{"code": "def CreateRetryTask(self):\n    with self._lock:\n        abandoned_task = self._GetTaskPendingRetry()\n        if (not abandoned_task):\n            return None\n        retry_task = abandoned_task.CreateRetryTask()\n        logger.debug('Retrying task {0:s} as {1:s}.'.format(abandoned_task.identifier, retry_task.identifier))\n        self._tasks_queued[retry_task.identifier] = retry_task\n        self._total_number_of_tasks += 1\n        self.SampleTaskStatus(retry_task, 'created_retry')\n        return retry_task", "docstring": "Creates a task that to retry a previously abandoned task.\n\nReturns:\nTask: a task that was abandoned but should be retried or None if there are\nno abandoned tasks that should be retried.", "source": "codesearchnet"}
{"code": "def hr_dp010(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `hr_dp010`'.format(value))\n    self._hr_dp010 = value", "docstring": "Corresponds to IDD Field `hr_dp010`\nhumidity ratio corresponding to\nDew-point temperature corresponding to 1.0,% annual cumulative frequency of occurrence\ncalculated at the standard atmospheric pressure at elevation of station\n\nArgs:\nvalue (float): value for IDD Field `hr_dp010`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def upload_entities_tsv(namespace, workspace, entities_tsv):\n    if isinstance(entities_tsv, string_types):\n        with open(entities_tsv, 'r') as tsv:\n            entity_data = tsv.read()\n    elif isinstance(entities_tsv, io.StringIO):\n        entity_data = entities_tsv.getvalue()\n    else:\n        raise ValueError('Unsupported input type.')\n    return upload_entities(namespace, workspace, entity_data)", "docstring": "Upload entities from a tsv loadfile.\n\nFile-based wrapper for api.upload_entities().\nA loadfile is a tab-separated text file with a header row\ndescribing entity type and attribute names, followed by\nrows of entities and their attribute values.\n\nEx:\nentity:participant_id   age   alive\nparticipant_23           25       Y\nparticipant_27           35       N\n\nArgs:\nnamespace (str): project to which workspace belongs\nworkspace (str): Workspace name\nentities_tsv (file): FireCloud loadfile, see format above", "source": "codesearchnet"}
{"code": "def _MaybeDeleteOldCheckpoints(self, meta_graph_suffix='meta'):\n    if self._checkpoints_to_be_deleted:\n        p = self._checkpoints_to_be_deleted.pop(0)\n        should_keep = p[1] > self._next_checkpoint_time\n        if should_keep:\n            self._next_checkpoint_time += self.saver_def.keep_checkpoint_every_n_hours * 3600\n            return\n        try:\n            checkpoint_management.remove_checkpoint(self._CheckpointFilename(p), self.saver_def.version, meta_graph_suffix)\n        except Exception as e:\n            logging.warning('Ignoring: %s', str(e))", "docstring": "Deletes old checkpoints if necessary.\n\n`self._checkpoints_to_be_deleted` is going to contain checkpoints that are\nover `max_to_keep`.  They are going to be deleted.  If\n`keep_checkpoint_every_n_hours` was specified, keep an additional checkpoint\nevery `N` hours. For example, if `N` is 0.5, an additional checkpoint is\nkept for every 0.5 hours of training; if `N` is 10, an additional\ncheckpoint is kept for every 10 hours of training.\n\nArgs:\nmeta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.", "source": "github-repos"}
{"code": "def set_margin(self, top=40, bottom=30, left=50, right=10, buffer_size=8):\n        \n        self.set_integer(\"top\", top)\n        self.set_integer(\"bottom\", bottom)\n        self.set_integer(\"left\", left)\n        self.set_integer(\"right\", right)\n        self.set_integer(\"buffer\", buffer_size)", "docstring": "Set margin of the chart.\n\nArgs:\ntop (int): size of top margin in pixels.\nbottom (int): size of bottom margin in pixels.\nleft (int): size of left margin in pixels.\nright (int): size of right margin in pixels.\nbuffer_size (int): buffer size in pixels between the chart and margins.", "source": "juraj-google-style"}
{"code": "def setRightsHolder(self, pid, userId, serialVersion, vendorSpecific=None):\n        \n        response = self.setRightsHolderResponse(\n            pid, userId, serialVersion, vendorSpecific\n        )\n        return self._read_boolean_response(response)", "docstring": "See Also: setRightsHolderResponse()\n\nArgs:\npid:\nuserId:\nserialVersion:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def get_subport_statistics(self, id_or_uri, port_name, subport_number):\n        \n        uri = self._client.build_uri(id_or_uri) + \"/statistics/{0}/subport/{1}\".format(port_name, subport_number)\n        return self._client.get(uri)", "docstring": "Gets the subport statistics on an interconnect.\n\nArgs:\nid_or_uri:  Can be either the interconnect id or the interconnect uri.\nport_name (str): A specific port name of an interconnect.\nsubport_number (int): The subport.\n\nReturns:\ndict: The statistics for the interconnect that matches id, port_name, and subport_number.", "source": "juraj-google-style"}
{"code": "def Decrypt(self, encrypted_data):\n    \n    decrypted_data = self._rc4_cipher.decrypt(encrypted_data)\n    return decrypted_data, b''", "docstring": "Decrypts the encrypted data.\n\nArgs:\nencrypted_data (bytes): encrypted data.\n\nReturns:\ntuple[bytes,bytes]: decrypted data and remaining encrypted data.", "source": "juraj-google-style"}
{"code": "def _fuse_awq_mlp(model, current_module_name, fuse_module_names, module, target_cls):\n    if len(fuse_module_names) == 0:\n        return\n    if hasattr(module, fuse_module_names[0]):\n        gate_proj = getattr(module, fuse_module_names[0])\n        up_proj = getattr(module, fuse_module_names[1])\n        down_proj = getattr(module, fuse_module_names[2])\n        previous_device = gate_proj.qweight.device\n        config = model.config.get_text_config(decoder=True)\n        hidden_act = config.hidden_act\n        activation_fn = ACT2FN[hidden_act]\n        new_module = target_cls(gate_proj, down_proj, up_proj, activation_fn)\n        parent_name, child_name = current_module_name.rsplit('.', 1)\n        parent = model.get_submodule(parent_name)\n        setattr(parent, child_name, new_module.to(previous_device))\n        del gate_proj, up_proj, down_proj", "docstring": "Fuse the MLP layers into a target class using autoawq\n\nArgs:\nmodel (`~PreTrainedModel`):\nThe input pretrained model\ncurrent_module_name (`str`):\nThe current submodule name\nfuse_module_names (`List[str]`):\nThe list of module names to fuse. For the MLP layers it has to be an array\nof length 3 that consists of the 3 MLP layers in the order (gate (dense layer post-attention) / up / down layers)\nmodule (`nn.Module`):\nThe pytorch parent module that has layernorm modules to fuse\ntarget_cls (`~autoawq.QuantFusedMLP`):\nThe `QuantFusedMLP` class as it only supports that class\nfor now.", "source": "github-repos"}
{"code": "def parse_test_files(filepattern):\n    for path in glob.glob(filepattern):\n        with open(path) as fin:\n            suite_name = os.path.splitext(os.path.basename(path))[0].title().replace('-', '') + 'Test'\n            print(path, suite_name)\n            methods = dict(create_test_methods(yaml.load(fin, Loader=yaml_transform.SafeLineLoader)))\n            globals()[suite_name] = type(suite_name, (unittest.TestCase,), methods)", "docstring": "Parses YAML test files and dynamically creates test cases.\n\nThis function iterates through all files matching the given glob pattern.\nFor each YAML file found, it:\n1. Reads the file content.\n2. Determines a test suite name based on the file name.\n3. Calls `create_test_methods` to generate test methods from the\nYAML specification.\n4. Dynamically creates a new TestCase class (inheriting from\n`unittest.TestCase`) and populates it with the generated test methods.\n5. Adds this newly created TestCase class to the global scope, making it\ndiscoverable by the unittest framework.\n\nArgs:\nfilepattern (str): A glob pattern specifying the YAML test files to parse.\nFor example, 'path/to/tests/*.yaml'.", "source": "github-repos"}
{"code": "def __init__(self, assign_defaults=(), method_name=None):\n    \n    super(self.__class__, self).__init__(assign_defaults=assign_defaults,\n                                         method_name=method_name)", "docstring": "Assigns arguments to the decorator.\n\nArgs:\nassign_defaults: A sequence of strings for the default values that should\nbe provided. Defaults are shared across methods.\nmethod_name: If provided, use this as the method_name instead of the\nwrapped function's name.", "source": "juraj-google-style"}
{"code": "def grow(script, iterations=1):\n    \n    filter_xml = '  <filter name=\"Dilate Selection\"/>\\n'\n    for _ in range(iterations):\n        util.write_filter(script, filter_xml)\n    return None", "docstring": "Grow (dilate, expand) the current set of selected faces\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter to.\niterations (int): the number of times to grow the selection.\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.ListKnowledgeBases = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.KnowledgeBases/ListKnowledgeBases',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.ListKnowledgeBasesRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.ListKnowledgeBasesResponse.FromString,\n        )\n    self.GetKnowledgeBase = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.KnowledgeBases/GetKnowledgeBase',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.GetKnowledgeBaseRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.KnowledgeBase.FromString,\n        )\n    self.CreateKnowledgeBase = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.KnowledgeBases/CreateKnowledgeBase',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.CreateKnowledgeBaseRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.KnowledgeBase.FromString,\n        )\n    self.DeleteKnowledgeBase = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.KnowledgeBases/DeleteKnowledgeBase',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_knowledge__base__pb2.DeleteKnowledgeBaseRequest.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def SetLines(self, lines):\n    (self._cli_lines, self._cli_cols) = TerminalSize()\n    if lines:\n        self._cli_lines = int(lines)", "docstring": "Set number of screen lines.\n\nArgs:\nlines: An int, number of lines. If None, use terminal dimensions.\n\nRaises:\nValueError, TypeError: Not a valid integer representation.", "source": "codesearchnet"}
{"code": "def delete_value(hive, key, vname=None, use_32bit_registry=False):\n    return __utils__['reg.delete_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry)", "docstring": "r'''\nDelete a registry value entry or the default value for a key.\n\nArgs:\n\nhive (str):\nThe name of the hive. Can be one of the following\n\n- HKEY_LOCAL_MACHINE or HKLM\n- HKEY_CURRENT_USER or HKCU\n- HKEY_USER or HKU\n- HKEY_CLASSES_ROOT or HKCR\n- HKEY_CURRENT_CONFIG or HKCC\n\nkey (str):\nThe key (looks like a path) to the value name.\n\nvname (str):\nThe value name. These are the individual name/data pairs under the\nkey. If not passed, the key (Default) value will be deleted.\n\nuse_32bit_registry (bool):\nDeletes the 32bit portion of the registry on 64bit installations. On\n32bit machines this is ignored.\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' reg.delete_value HKEY_CURRENT_USER 'SOFTWARE\\\\Salt' 'version'", "source": "codesearchnet"}
{"code": "def set_topic(self, topic):\n        \n        if not topic:\n            topic = ''\n        result = self._connection.put(\"room/%s\" % self.id, {\"room\": {\"topic\": topic}})\n        if result[\"success\"]:\n            self._load()\n\n        return result[\"success\"]", "docstring": "Set the room topic.\n\nArgs:\ntopic (str): Topic\n\nReturns:\nbool. Success", "source": "juraj-google-style"}
{"code": "def lu_factor(x):\n    if any_symbolic_tensors((x,)):\n        return LuFactor().symbolic_call(x)\n    return _lu_factor(x)", "docstring": "Computes the lower-upper decomposition of a square matrix.\n\nArgs:\nx: A tensor of shape `(..., M, M)`.\n\nReturns:\nA tuple of two tensors: a tensor of shape `(..., M, M)` containing the\nlower and upper triangular matrices and a tensor of shape `(..., M)`\ncontaining the pivots.", "source": "github-repos"}
{"code": "def remove(self, dic):\n    for kw in dic:\n        removePair = Pair(kw, dic[kw])\n        self._remove([removePair])", "docstring": "remove the pair by passing a identical dict\n\nArgs:\ndic (dict): key and value", "source": "codesearchnet"}
{"code": "def CreateServiceProto(job):\n  \n  service = rdf_client.OSXServiceInformation(\n      label=job.get(\"Label\"),\n      program=job.get(\"Program\"),\n      sessiontype=job.get(\"LimitLoadToSessionType\"),\n      lastexitstatus=int(job[\"LastExitStatus\"]),\n      timeout=int(job[\"TimeOut\"]),\n      ondemand=bool(job[\"OnDemand\"]))\n\n  for arg in job.get(\"ProgramArguments\", \"\", stringify=False):\n    \n    service.args.Append(str(arg))\n\n  mach_dict = job.get(\"MachServices\", {}, stringify=False)\n  for key, value in iteritems(mach_dict):\n    service.machservice.Append(\"%s:%s\" % (key, value))\n\n  job_mach_dict = job.get(\"PerJobMachServices\", {}, stringify=False)\n  for key, value in iteritems(job_mach_dict):\n    service.perjobmachservice.Append(\"%s:%s\" % (key, value))\n\n  if \"PID\" in job:\n    service.pid = job[\"PID\"].value\n\n  return service", "docstring": "Create the Service protobuf.\n\nArgs:\njob: Launchdjobdict from servicemanagement framework.\n\nReturns:\nsysinfo_pb2.OSXServiceInformation proto", "source": "juraj-google-style"}
{"code": "def read_file(path):\n    \n    gen = textfile.read_separated_lines_generator(path, max_columns=6,\n                                                  ignore_lines_starting_with=[';;'])\n\n    utterances = collections.defaultdict(list)\n\n    for record in gen:\n        values = record[1:len(record)]\n\n        for i in range(len(values)):\n            if i == 1 or i == 2 or i == 4:\n                values[i] = float(values[i])\n\n        utterances[record[0]].append(values)\n\n    return utterances", "docstring": "Reads a ctm file.\n\nArgs:\npath (str): Path to the file\n\nReturns:\n(dict): Dictionary with entries.\n\nExample::\n\n>>> read_file('/path/to/file.txt')\n{\n'wave-ab': [\n['1', 0.00, 0.07, 'HI', 1],\n['1', 0.09, 0.08, 'AH', 1]\n],\n'wave-xy': [\n['1', 0.00, 0.07, 'HI', 1],\n['1', 0.09, 0.08, 'AH', 1]\n]\n}", "source": "juraj-google-style"}
{"code": "def __init__(self, table_name, dataset, schema, project):\n    beam.PTransform.__init__(self)\n    self.table_name = table_name\n    self.dataset = dataset\n    self.schema = schema\n    self.project = project", "docstring": "Initializes the transform.\nArgs:\ntable_name: Name of the BigQuery table to use.\ndataset: Name of the dataset to use.\nschema: Dictionary in the format {'column_name': 'bigquery_type'}\nproject: Name of the Cloud project containing BigQuery table.", "source": "github-repos"}
{"code": "def fit(self, *args):\n        \n        data = list(zip(*args))\n        self.save()\n        if self._fit_batch_size is None:\n            raise ConfigError(\"in order to use fit() method\"\n                              \" set `fit_batch_size` parameter\")\n        bs = int(self._fit_batch_size)\n        data_len = len(data)\n        num_batches = self._fit_max_batches or ((data_len - 1) \n\n        avg_loss = 0.\n        best_loss = float('inf')\n        lrs, losses = [], []\n        _lr_find_schedule = DecayScheduler(start_val=self._fit_learning_rate[0],\n                                           end_val=self._fit_learning_rate[1],\n                                           dec_type=\"exponential\",\n                                           num_it=num_batches)\n        self._lr = _lr_find_schedule.start_val\n        self._mom = 0.\n        self._update_graph_variables(learning_rate=self._lr, momentum=self._mom)\n        best_lr = _lr_find_schedule.start_val\n        for i in range(num_batches):\n            batch_start = (i * bs) % data_len\n            batch_end = batch_start + bs\n            report = self.train_on_batch(*zip(*data[batch_start:batch_end]))\n            if not isinstance(report, dict):\n                report = {'loss': report}\n            \n            avg_loss = self._fit_beta*avg_loss + (1 - self._fit_beta)*report['loss']\n            smoothed_loss = avg_loss / (1 - self._fit_beta**(i + 1))\n            lrs.append(self._lr)\n            losses.append(smoothed_loss)\n            log.info(f\"Batch {i}/{num_batches}: smooth_loss = {smoothed_loss}\"\n                     f\", lr = {self._lr}, best_lr = {best_lr}\")\n            if math.isnan(smoothed_loss) or (smoothed_loss > 4 * best_loss):\n                break\n            if (smoothed_loss < best_loss) and (i >= self._fit_min_batches):\n                best_loss = smoothed_loss\n                best_lr = self._lr\n            self._lr = _lr_find_schedule.next_val()\n            self._update_graph_variables(learning_rate=self._lr)\n\n            if i >= num_batches:\n                break\n        \n        end_val = self._get_best(lrs, losses)\n\n        start_val = end_val\n        if self._lr_schedule.dec_type in (DecayType.ONECYCLE, DecayType.TRAPEZOID):\n            start_val = end_val / self._fit_learning_rate_div\n        elif self._lr_schedule.dec_type in (DecayType.POLYNOMIAL, DecayType.EXPONENTIAL,\n                                            DecayType.LINEAR, DecayType.COSINE):\n            start_val = end_val\n            end_val = end_val / self._fit_learning_rate_div\n        self._lr_schedule = DecayScheduler(start_val=start_val,\n                                           end_val=end_val,\n                                           num_it=self._lr_schedule.nb,\n                                           dec_type=self._lr_schedule.dec_type,\n                                           extra=self._lr_schedule.extra)\n        log.info(f\"Found best learning rate value = {best_lr}\"\n                 f\", setting new learning rate schedule with {self._lr_schedule}.\")\n\n        self.load()\n        self._lr = self._lr_schedule.start_val\n        self._mom = self._mom_schedule.start_val\n        self._update_graph_variables(learning_rate=self._lr, momentum=self._mom)\n        return {'smoothed_loss': losses, 'learning_rate': lrs}", "docstring": "Find the best learning rate schedule, and set obtained values of learning rate\nand momentum for further model training. Best learning rate will be divided\nby `fit_learning_rate_div` for further training model.\n\nArgs:\n*args: arguments\n\nReturns:", "source": "juraj-google-style"}
{"code": "def distance_to_angle(distance, units='metric'):\n    if (units in ('km', 'metric')):\n        pass\n    elif (units in ('sm', 'imperial', 'US customary')):\n        distance *= STATUTE_MILE\n    elif (units in ('nm', 'nautical')):\n        distance *= NAUTICAL_MILE\n    else:\n        raise ValueError(('Unknown units type %r' % units))\n    return math.degrees((distance / BODY_RADIUS))", "docstring": "Convert a distance in to an angle along a great circle.\n\nArgs:\ndistance (float): Distance to convert to degrees\nunits (str): Unit type to be used for distances\n\nReturns:\nfloat: Angle in degrees\n\nRaises:\nValueError: Unknown value for ``units``", "source": "codesearchnet"}
{"code": "def __call__(self, inputs, state, scope=None):\n    return self._call_wrapped_cell(inputs, state, cell_call_fn=self.cell.__call__, scope=scope)", "docstring": "Runs the RNN cell step computation.\n\nWe assume that the wrapped RNNCell is being built within its `__call__`\nmethod. We directly use the wrapped cell's `__call__` in the overridden\nwrapper `__call__` method.\n\nThis allows to use the wrapped cell and the non-wrapped cell equivalently\nwhen using `__call__`.\n\nArgs:\ninputs: A tensor with wrapped cell's input.\nstate: A tensor or tuple of tensors with wrapped cell's state.\nscope: VariableScope for the subgraph created in the wrapped cells'\n`__call__`.\n\nReturns:\nA pair containing:\n\n- Output: A tensor with cell's output.\n- New state: A tensor or tuple of tensors with new wrapped cell's state.", "source": "github-repos"}
{"code": "def get_yield_stress(self, n):\n    comp = root(self.get_stability_criteria, (- 1), args=n)\n    tens = root(self.get_stability_criteria, 1, args=n)\n    return (comp.x, tens.x)", "docstring": "Gets the yield stress for a given direction\n\nArgs:\nn (3x1 array-like): direction for which to find the\nyield stress", "source": "codesearchnet"}
{"code": "def from_json(cls, name, spec):\n        \n        if \"run\" not in spec:\n            raise TuneError(\"No trainable specified!\")\n\n        \n        \n        if \"env\" in spec:\n            spec[\"config\"] = spec.get(\"config\", {})\n            spec[\"config\"][\"env\"] = spec[\"env\"]\n            del spec[\"env\"]\n\n        spec = copy.deepcopy(spec)\n\n        run_value = spec.pop(\"run\")\n        try:\n            exp = cls(name, run_value, **spec)\n        except TypeError:\n            raise TuneError(\"Improper argument from JSON: {}.\".format(spec))\n        return exp", "docstring": "Generates an Experiment object from JSON.\n\nArgs:\nname (str): Name of Experiment.\nspec (dict): JSON configuration of experiment.", "source": "juraj-google-style"}
{"code": "def ContainsAddressStr(self, address):\n        \n        for key, contract in self._contracts.items():\n            if contract.Address == address:\n                return True\n        return False", "docstring": "Determine if the wallet contains the address.\n\nArgs:\naddress (str): a string representing the public key.\n\nReturns:\nbool: True, if the address is present in the wallet. False otherwise.", "source": "juraj-google-style"}
{"code": "def min(self):\n    if self.is_quantized or self.base_dtype in (bool, string, complex64, complex128):\n        raise TypeError(f'Cannot find minimum value of {self} with {('quantized type' if self.is_quantized else 'type')} {self.base_dtype}.')\n    try:\n        return ml_dtypes.finfo(self.as_numpy_dtype).min\n    except:\n        try:\n            return ml_dtypes.iinfo(self.as_numpy_dtype).min\n        except:\n            raise TypeError(f'Cannot find minimum value of {self}.')", "docstring": "Returns the minimum representable value in this data type.\n\nRaises:\nTypeError: if this is a non-numeric, unordered, or quantized type.", "source": "github-repos"}
{"code": "def case_to_clinVars(self, case_id):\n        \n        query = dict(case_id=case_id, csv_type='variant')\n        clinvar_objs = list(self.clinvar_collection.find(query))\n        submitted_vars = {}\n        for clinvar in clinvar_objs:\n            submitted_vars[clinvar.get('local_id')] = clinvar\n\n        return submitted_vars", "docstring": "Get all variants included in clinvar submissions for a case\n\nArgs:\ncase_id(str): a case _id\n\nReturns:\nsubmission_variants(dict): keys are variant ids and values are variant submission objects", "source": "juraj-google-style"}
{"code": "def build(self):\n    return copy.deepcopy(self._options)", "docstring": "Build a profiling option.\n\nReturns:\nA dict of profiling options.", "source": "github-repos"}
{"code": "def user_ban(channel, user):\n    \n\n    username = user.name\n    if isinstance(user, discord.Member):\n        if user.nick is not None:\n            username = user.nick\n\n    \n    gui = ui_embed.UI(\n        channel,\n        \"Banned {}\".format(username),\n        \"{} has been banned from this server\".format(username),\n        modulename=modulename\n    )\n\n    return gui", "docstring": "Creates an embed UI containing an user warning message\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\nuser (discord.User): The user to ban\n\nReturns:\nui (ui_embed.UI): The embed UI object", "source": "juraj-google-style"}
{"code": "def get_all_distributions_by_type(dist, metric_id):\n    submit_timestamp = time.time()\n    dist_types = ['count', 'max', 'min', 'sum', 'mean']\n    distribution_dicts = []\n    for dist_type in dist_types:\n        try:\n            distribution_dicts.append(get_distribution_dict(dist_type, submit_timestamp, dist, metric_id))\n        except ValueError:\n            continue\n    return distribution_dicts", "docstring": "Creates new list of objects with type of each distribution\nmetric value.\n\nArgs:\ndist(object): DistributionMetric object to be parsed\nmetric_id(uuid): id of the current test run\nReturns:\nlist of :class:`DistributionMetric` objects", "source": "github-repos"}
{"code": "def get_specification(version: str) -> Mapping[(str, Any)]:\n    spec_dir = config['bel']['lang']['specifications']\n    spec_dict = {}\n    bel_versions = get_bel_versions()\n    if (version not in bel_versions):\n        log.error('Cannot get unknown version BEL specification')\n        return {'error': 'unknown version of BEL'}\n    version_underscored = version.replace('.', '_')\n    json_fn = f'{spec_dir}/bel_v{version_underscored}.json'\n    with open(json_fn, 'r') as f:\n        spec_dict = json.load(f)\n    return spec_dict", "docstring": "Get BEL Specification\n\nThe json file this depends on is generated by belspec_yaml2json as\npart of the update_specifications function\n\nArgs:\nversion: e.g. 2.0.0 where the filename", "source": "codesearchnet"}
{"code": "def to_diff_dict(self) -> Dict[str, Any]:\n    config_dict = self.to_dict()\n    default_config_dict = BitsAndBytesConfig().to_dict()\n    serializable_config_dict = {}\n    for key, value in config_dict.items():\n        if value != default_config_dict[key]:\n            serializable_config_dict[key] = value\n    return serializable_config_dict", "docstring": "Removes all attributes from config which correspond to the default config attributes for better readability and\nserializes to a Python dictionary.\n\nReturns:\n`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,", "source": "github-repos"}
{"code": "def combine(specs):\n    new_specs = {}\n    for spec in specs:\n        if (new_specs.get(spec, None) is None):\n            new_specs[spec] = spec\n        else:\n            new_specs[spec].add(spec)\n    return list(new_specs.values())", "docstring": "Combine package specifications' limitations.\n\nArgs:\nspecs (list of PackageSpec): the package specifications.\n\nReturns:\nlist of PackageSpec: the new, merged list of PackageSpec.", "source": "codesearchnet"}
{"code": "def dbclass(self, value):\n        \n\n        if not is_valid_dbclass(value):\n            raise AttributeError(\"'{}' is not a valid database type\".format(value))\n\n        self._class = value\n        self._connectionXML.set('class', value)", "docstring": "Set the connection's dbclass property.\n\nArgs:\nvalue:  New dbclass value. String.\n\nReturns:\nNothing.", "source": "juraj-google-style"}
{"code": "def is_periodically_contiguous(self):\n    edges = self.sites_at_edges()\n    is_contiguous = [False, False, False]\n    along_x = any([(s2 in s1.p_neighbours) for s1 in edges[0] for s2 in edges[1]])\n    along_y = any([(s2 in s1.p_neighbours) for s1 in edges[2] for s2 in edges[3]])\n    along_z = any([(s2 in s1.p_neighbours) for s1 in edges[4] for s2 in edges[5]])\n    return (along_x, along_y, along_z)", "docstring": "logical check whether a cluster connects with itself across the\nsimulation periodic boundary conditions.\n\nArgs:\nnone\n\nReturns\n( Bool, Bool, Bool ): Contiguity along the x, y, and z coordinate axes", "source": "codesearchnet"}
{"code": "def get_element(source, path, separator=r'[/.]'):\n    \n    return _get_element_by_names(source, re.split(separator, path))", "docstring": "Given a dict and path '/' or '.' separated. Digs into de dict to retrieve\nthe specified element.\n\nArgs:\nsource (dict): set of nested objects in which the data will be searched\npath (string): '/' or '.' string with attribute names", "source": "juraj-google-style"}
{"code": "def get_cot_artifacts(context):\n    artifacts = {}\n    filepaths = filepaths_in_dir(context.config['artifact_dir'])\n    hash_alg = context.config['chain_of_trust_hash_algorithm']\n    for filepath in sorted(filepaths):\n        path = os.path.join(context.config['artifact_dir'], filepath)\n        sha = get_hash(path, hash_alg=hash_alg)\n        artifacts[filepath] = {hash_alg: sha}\n    return artifacts", "docstring": "Generate the artifact relative paths and shas for the chain of trust.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\n\nReturns:\ndict: a dictionary of {\"path/to/artifact\": {\"hash_alg\": \"...\"}, ...}", "source": "codesearchnet"}
{"code": "def filter_(predicate, *structures, **kwargs):\n    flatten = kwargs.pop('flatten', False)\n    assert (not kwargs), 'filter() got unexpected keyword arguments.'\n\n    def impl(predicate, *structures):\n        if (len(structures) == 0):\n            return structures\n        if all((isinstance(s, (tuple, list)) for s in structures)):\n            if (len(set((len(x) for x in structures))) > 1):\n                raise ValueError('Cannot merge tuples or lists of different length.')\n            if (len(structures) > 1):\n                filtered = (impl(predicate, *x) for x in _builtin_zip(*structures))\n            else:\n                filtered = (impl(predicate, x) for x in structures[0])\n            if hasattr(structures[0], '_fields'):\n                filtered = ((x if (x != ()) else None) for x in filtered)\n                return type(structures[0])(*filtered)\n            else:\n                filtered = (x for x in filtered if ((not isinstance(x, (tuple, list, dict))) or x))\n                return type(structures[0])(filtered)\n        if all((isinstance(s, dict) for s in structures)):\n            if (len(set((frozenset(x.keys()) for x in structures))) > 1):\n                raise ValueError('Cannot merge dicts with different keys.')\n            if (len(structures) > 1):\n                filtered = {k: impl(predicate, *(s[k] for s in structures)) for k in structures[0]}\n            else:\n                filtered = {k: impl(predicate, v) for (k, v) in structures[0].items()}\n            filtered = {k: v for (k, v) in filtered.items() if ((not isinstance(v, (tuple, list, dict))) or v)}\n            return type(structures[0])(filtered)\n        if (len(structures) > 1):\n            return (structures if predicate(*structures) else ())\n        else:\n            return (structures[0] if predicate(structures[0]) else ())\n    result = impl(predicate, *structures)\n    if flatten:\n        result = flatten_(result)\n    return result", "docstring": "Select elements of a nested structure based on a predicate function.\n\nIf multiple structures are provided as input, their structure must match and\nthe function will be applied to corresponding groups of elements. The nested\nstructure can consist of any combination of lists, tuples, and dicts.\n\nArgs:\npredicate: The function to determine whether an element should be kept.\nReceives one argument for every structure that is provided.\n*structures: One of more nested structures.\nflatten: Whether to flatten the resulting structure into a tuple. Keys of\ndictionaries will be discarded.\n\nReturns:\nNested structure.", "source": "codesearchnet"}
{"code": "def init(self, force_deploy=False):\n    machines = self.provider_conf.machines\n    networks = self.provider_conf.networks\n    _networks = []\n    for network in networks:\n        ipnet = IPNetwork(network.cidr)\n        _networks.append({'netpool': list(ipnet)[10:(- 10)], 'cidr': network.cidr, 'roles': network.roles, 'gateway': ipnet.ip})\n    vagrant_machines = []\n    vagrant_roles = {}\n    j = 0\n    for machine in machines:\n        for _ in range(machine.number):\n            vagrant_machine = {'name': ('enos-%s' % j), 'cpu': machine.flavour_desc['core'], 'mem': machine.flavour_desc['mem'], 'ips': [n['netpool'].pop() for n in _networks]}\n            vagrant_machines.append(vagrant_machine)\n            for role in machine.roles:\n                vagrant_roles.setdefault(role, []).append(vagrant_machine)\n            j = (j + 1)\n    logger.debug(vagrant_roles)\n    loader = FileSystemLoader(searchpath=TEMPLATE_DIR)\n    env = Environment(loader=loader, autoescape=True)\n    template = env.get_template('Vagrantfile.j2')\n    vagrantfile = template.render(machines=vagrant_machines, provider_conf=self.provider_conf)\n    vagrantfile_path = os.path.join(os.getcwd(), 'Vagrantfile')\n    with open(vagrantfile_path, 'w') as f:\n        f.write(vagrantfile)\n    v_env = dict(os.environ)\n    v_env['VAGRANT_DEFAULT_PROVIDER'] = self.provider_conf.backend\n    v = vagrant.Vagrant(root=os.getcwd(), quiet_stdout=False, quiet_stderr=False, env=v_env)\n    if force_deploy:\n        v.destroy()\n    v.up()\n    v.provision()\n    roles = {}\n    for (role, machines) in vagrant_roles.items():\n        for machine in machines:\n            keyfile = v.keyfile(vm_name=machine['name'])\n            port = v.port(vm_name=machine['name'])\n            address = v.hostname(vm_name=machine['name'])\n            roles.setdefault(role, []).append(Host(address, alias=machine['name'], user=self.provider_conf.user, port=port, keyfile=keyfile))\n    networks = [{'cidr': str(n['cidr']), 'start': str(n['netpool'][0]), 'end': str(n['netpool'][(- 1)]), 'dns': '8.8.8.8', 'gateway': str(n['gateway']), 'roles': n['roles']} for n in _networks]\n    logger.debug(roles)\n    logger.debug(networks)\n    return (roles, networks)", "docstring": "Reserve and deploys the vagrant boxes.\n\nArgs:\nforce_deploy (bool): True iff new machines should be started", "source": "codesearchnet"}
{"code": "def __init__(self, root, case_sensitive=True):\n    \n    root = os.path.normpath(root)\n\n    if not root:\n      errstr = 'root path must not be empty (\\'.\\' for current directory)'\n      raise ValueError(errstr)\n\n    ensure_directory_exists(root)\n\n    self.root_path = root\n    self.case_sensitive = bool(case_sensitive)", "docstring": "Initialize the datastore with given root directory `root`.\n\nArgs:\nroot: A path at which to mount this filesystem datastore.", "source": "juraj-google-style"}
{"code": "def matches(self, regex: str) -> 'Builder':\n    param_nodes = self._function_args_to_nodes(self.node, [regex])\n    return self._to_builder(_evaluation.MatchesFunction(self.node.context, self.node, param_nodes))", "docstring": "The FHIRPath matches() function.\n\nArgs:\nregex: a regular expression to match against the parent element.\n\nReturns:\nAn expression that evaluates to True if the parent matches the given\nregular expression.", "source": "github-repos"}
{"code": "def fetch(self, customer_id, token_id, data={}, **kwargs):\n        \n        url = \"{}/{}/tokens/{}\".format(self.base_url, customer_id, token_id)\n        return self.get_url(url, data, **kwargs)", "docstring": "Fetch Token for given Id and given customer Id\n\nArgs:\ncustomer_id : Customer Id for which tokens have to be fetched\ntoken_id    : Id for which TOken object has to be fetched\n\nReturns:\nToken dict for given token Id", "source": "juraj-google-style"}
{"code": "def realpath(path):\n    if (path == '~'):\n        return userdir\n    if (path == '/'):\n        return sysroot\n    if path.startswith('/'):\n        return os.path.abspath(path)\n    if path.startswith('~/'):\n        return os.path.expanduser(path)\n    if path.startswith('./'):\n        return os.path.abspath(os.path.join(os.path.curdir, path[2:]))\n    return os.path.abspath(path)", "docstring": "Create the real absolute path for the given path.\n\nAdd supports for userdir & / supports.\n\nArgs:\n* path: pathname to use for realpath.\n\nReturns:\nPlatform independent real absolute path.", "source": "codesearchnet"}
{"code": "def sample_dynamic_prior(self, samples, batch_size, length, fixed=False):\n    if fixed:\n        sample_batch_size = 1\n    else:\n        sample_batch_size = batch_size\n    (sample, state) = self.dynamic_prior.zero_state([samples, sample_batch_size])\n    locs = []\n    scale_diags = []\n    sample_list = []\n    for _ in range(length):\n        (dist, state) = self.dynamic_prior(sample, state)\n        sample = dist.sample()\n        locs.append(dist.parameters['loc'])\n        scale_diags.append(dist.parameters['scale_diag'])\n        sample_list.append(sample)\n    sample = tf.stack(sample_list, axis=2)\n    loc = tf.stack(locs, axis=2)\n    scale_diag = tf.stack(scale_diags, axis=2)\n    if fixed:\n        sample = (sample + tf.zeros([batch_size, 1, 1]))\n    return (sample, tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag))", "docstring": "Sample the dynamic latent prior.\n\nArgs:\nsamples: Number of samples to draw from the latent distribution.\nbatch_size: Number of sequences to sample.\nlength: Number of timesteps to sample for each sequence.\nfixed: Boolean for whether or not to share the same random\nsample across all sequences.\n\nReturns:\nA tuple of a sample tensor of shape [samples, batch_size, length\nlatent_size], and a MultivariateNormalDiag distribution from which\nthe tensor was sampled, with event shape [latent_size], and batch\nshape [samples, 1, length] if fixed or [samples, batch_size,\nlength] otherwise.", "source": "codesearchnet"}
{"code": "def colorize(text, messageType=None):\n    formattedText = str(text)\n    if ('ERROR' in messageType):\n        formattedText = (colorama.Fore.RED + formattedText)\n    elif ('WARNING' in messageType):\n        formattedText = (colorama.Fore.YELLOW + formattedText)\n    elif ('SUCCESS' in messageType):\n        formattedText = (colorama.Fore.GREEN + formattedText)\n    elif ('INFO' in messageType):\n        formattedText = (colorama.Fore.BLUE + formattedText)\n    if ('BOLD' in messageType):\n        formattedText = (colorama.Style.BRIGHT + formattedText)\n    return (formattedText + colorama.Style.RESET_ALL)", "docstring": "Function that colorizes a message.\n\nArgs:\n-----\ntext: The string to be colorized.\nmessageType: Possible options include \"ERROR\", \"WARNING\", \"SUCCESS\",\n\"INFO\" or \"BOLD\".\n\nReturns:\n--------\nstring: Colorized if the option is correct, including a tag at the end\nto reset the formatting.", "source": "codesearchnet"}
{"code": "def _change_precision(self, val, base=0):\n        \n        if not isinstance(val, int):\n            raise TypeError('The first argument must be an integer.')\n        val = round(abs(val))\n        val = (lambda num: base if is_num(num) else num)(val)\n        return val", "docstring": "Check and normalise the value of precision (must be positive integer).\n\nArgs:\nval (INT): must be positive integer\nbase (INT): Description\n\nReturns:\nVAL (INT): Description", "source": "juraj-google-style"}
{"code": "def validate_args(func: Method, *args: Any, **kwargs: Any) -> Method:\n    signature(func).bind(*args, **kwargs)\n    return func", "docstring": "Check if the request's arguments match a function's signature.\n\nRaises TypeError exception if arguments cannot be passed to a function.\n\nArgs:\nfunc: The function to check.\nargs: Positional arguments.\nkwargs: Keyword arguments.\n\nRaises:\nTypeError: If the arguments cannot be passed to the function.", "source": "codesearchnet"}
{"code": "def mark_checked(tensors):\n\n    def _mark_checked(tensor):\n        tensor._keras_history_checked = True\n    nest.map_structure(_mark_checked, tensors)", "docstring": "Marks that these Tensors should not be tracked.\n\nThis prevents Layers from attempting to create TensorFlowOpLayers\nfor these Tensors.\n\nArgs:\ntensors: An arbitrary structure of Tensors.", "source": "github-repos"}
{"code": "def filter_tests(output_file: str, filters: List[str]):\n    if not os.path.isfile(output_file):\n        print('No test file found.')\n        return\n    with open(output_file, 'r', encoding='utf-8') as f:\n        test_files = f.read().split(' ')\n    if len(test_files) == 0 or test_files == ['']:\n        print('No tests to filter.')\n        return\n    if test_files == ['tests']:\n        test_files = [os.path.join('tests', f) for f in os.listdir('tests') if f not in ['__init__.py'] + filters]\n    else:\n        test_files = [f for f in test_files if f.split(os.path.sep)[1] not in filters]\n    with open(output_file, 'w', encoding='utf-8') as f:\n        f.write(' '.join(test_files))", "docstring": "Reads the content of the output file and filters out all the tests in a list of given folders.\n\nArgs:\noutput_file (`str` or `os.PathLike`): The path to the output file of the tests fetcher.\nfilters (`List[str]`): A list of folders to filter.", "source": "github-repos"}
{"code": "def parse_structure(self, store_in_memory=False):\n        \n        \n        if not self.structure_file:\n            log.error('{}: no structure file, unable to parse'.format(self.id))\n            return None\n        else:\n            \n            structure = StructureIO(self.structure_path, self.file_type)\n\n            \n            structure_chains = [x.id for x in structure.first_model.child_list]\n            self.add_chain_ids(structure_chains)\n            self.get_structure_seqs(structure.first_model)\n\n            \n            if not self.mapped_chains:\n                self.add_mapped_chain_ids(structure_chains)\n\n            if store_in_memory:\n                self.parsed = True\n                self.structure = structure\n\n            return structure", "docstring": "Read the 3D coordinates of a structure file and return it as a Biopython Structure object.\nAlso create ChainProp objects in the chains attribute for each chain in the first model.\n\nArgs:\nstore_in_memory (bool): If the Biopython Structure object should be stored in the attribute ``structure``.\n\nReturns:\nStructure: Biopython Structure object", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor):\n    hidden_states = self.w_in(hidden_states)\n    if self.dropout is not None:\n        hidden_states = self.dropout(hidden_states)\n    hidden_states = self.w_out(hidden_states)\n    return hidden_states", "docstring": "Args:\nhidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`)", "source": "github-repos"}
{"code": "def system(self) -> 'EFBChat':\n    self.chat_name = 'System'\n    self.chat_alias = None\n    self.chat_uid = EFBChat.SYSTEM_ID\n    self.chat_type = ChatType.System\n    return self", "docstring": "Set the chat as a system chat.\nOnly set for channel-level and group-level system chats.\n\nReturns:\nEFBChat: This object.", "source": "codesearchnet"}
{"code": "def add_authorization_policy(access_token, ck_id, oid):\n    path = '/ContentKeys'\n    body = (('{\"AuthorizationPolicyId\":\"' + oid) + '\"}')\n    return helper_add(access_token, ck_id, path, body)", "docstring": "Add Media Service Authorization Policy.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nck_id (str): A Media Service Asset Content Key ID.\noptions_id (str): A Media Service OID.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def attention_bias_local(length, max_backward, max_forward):\n  \n  band = common_layers.ones_matrix_band_part(\n      length,\n      length,\n      max_backward,\n      max_forward,\n      out_shape=[1, 1, length, length])\n  return -1e9 * (1.0 - band)", "docstring": "Create an bias tensor to be added to attention logits.\n\nA position may attend to positions at most max_distance from it,\nforward and backwards.\n\nThis does not actually save any computation.\n\nArgs:\nlength: int\nmax_backward: int, maximum distance backward to attend. Negative values\nindicate unlimited.\nmax_forward: int, maximum distance forward to attend. Negative values\nindicate unlimited.\n\nReturns:\na `Tensor` with shape [1, 1, length, length].", "source": "juraj-google-style"}
{"code": "def loss_l2(self, l2=0):\n        \n        if isinstance(l2, (int, float)):\n            D = l2 * torch.eye(self.d)\n        else:\n            D = torch.diag(torch.from_numpy(l2))\n\n        \n        return torch.norm(D @ (self.mu - self.mu_init)) ** 2", "docstring": "L2 loss centered around mu_init, scaled optionally per-source.\n\nIn other words, diagonal Tikhonov regularization,\n||D(\\mu-\\mu_{init})||_2^2\nwhere D is diagonal.\n\nArgs:\n- l2: A float or np.array representing the per-source regularization\nstrengths to use", "source": "juraj-google-style"}
{"code": "def _SimpleEncoder(wire_type, encode_value, compute_value_size):\n\n    def SpecificEncoder(field_number, is_repeated, is_packed):\n        if is_packed:\n            tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)\n            local_EncodeVarint = _EncodeVarint\n\n            def EncodePackedField(write, value):\n                write(tag_bytes)\n                size = 0\n                for element in value:\n                    size += compute_value_size(element)\n                local_EncodeVarint(write, size)\n                for element in value:\n                    encode_value(write, element)\n            return EncodePackedField\n        elif is_repeated:\n            tag_bytes = TagBytes(field_number, wire_type)\n\n            def EncodeRepeatedField(write, value):\n                for element in value:\n                    write(tag_bytes)\n                    encode_value(write, element)\n            return EncodeRepeatedField\n        else:\n            tag_bytes = TagBytes(field_number, wire_type)\n\n            def EncodeField(write, value):\n                write(tag_bytes)\n                return encode_value(write, value)\n            return EncodeField\n    return SpecificEncoder", "docstring": "Return a constructor for an encoder for fields of a particular type.\n\nArgs:\nwire_type:  The field's wire type, for encoding tags.\nencode_value:  A function which encodes an individual value, e.g.\n_EncodeVarint().\ncompute_value_size:  A function which computes the size of an individual\nvalue, e.g. _VarintSize().", "source": "codesearchnet"}
{"code": "def event_date(self, event_date):\n    if (not self.can_update()):\n        self._tcex.handle_error(910, [self.type])\n    event_date = self._utils.format_datetime(event_date, date_format='%Y-%m-%dT%H:%M:%SZ')\n    self._data['eventDate'] = event_date\n    request = {'eventDate': event_date}\n    return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)", "docstring": "Updates the event_date.\n\nArgs:\nevent_date: Converted to %Y-%m-%dT%H:%M:%SZ date format.\n\nReturns:", "source": "codesearchnet"}
{"code": "def _flip(image, flip_index, scope_name):\n    with ops.name_scope(None, scope_name, [image]):\n        image = ops.convert_to_tensor(image, name='image')\n        image = _AssertAtLeast3DImage(image)\n        shape = image.get_shape()\n\n        def f_rank3():\n            return fix_image_flip_shape(image, array_ops.reverse(image, [flip_index]))\n\n        def f_rank4():\n            return array_ops.reverse(image, [flip_index + 1])\n        if shape.ndims is None:\n            rank = array_ops.rank(image)\n            return tf_cond.cond(math_ops.equal(rank, 3), f_rank3, f_rank4)\n        elif shape.ndims == 3:\n            return f_rank3()\n        elif shape.ndims == 4:\n            return f_rank4()\n        else:\n            raise ValueError(\"'image' (shape %s)must have either 3 or 4 dimensions.\" % shape)", "docstring": "Flip an image either horizontally or vertically.\n\nOutputs the contents of `image` flipped along the dimension `flip_index`.\n\nSee also `reverse()`.\n\nArgs:\nimage: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\nof shape `[height, width, channels]`.\nflip_index: 0 For vertical, 1 for horizontal.\nscope_name: string, scope name.\n\nReturns:\nA `Tensor` of the same type and shape as `image`.\n\nRaises:\nValueError: if the shape of `image` not supported.", "source": "github-repos"}
{"code": "def _GetMemberDataTypeMaps(self, data_type_definition, data_type_map_cache):\n    \n    if not data_type_definition:\n      raise errors.FormatError('Missing data type definition')\n\n    members = getattr(data_type_definition, 'members', None)\n    if not members:\n      raise errors.FormatError('Invalid data type definition missing members')\n\n    data_type_maps = []\n\n    members_data_size = 0\n    for member_definition in members:\n      if isinstance(member_definition, data_types.MemberDataTypeDefinition):\n        member_definition = member_definition.member_data_type_definition\n\n      if (data_type_definition.byte_order != definitions.BYTE_ORDER_NATIVE and\n          member_definition.byte_order == definitions.BYTE_ORDER_NATIVE):\n        \n        \n        member_definition = copy.copy(member_definition)\n        member_definition.name = '_{0:s}_{1:s}'.format(\n            data_type_definition.name, member_definition.name)\n        member_definition.byte_order = data_type_definition.byte_order\n\n      if member_definition.name not in data_type_map_cache:\n        data_type_map = DataTypeMapFactory.CreateDataTypeMapByType(\n            member_definition)\n        data_type_map_cache[member_definition.name] = data_type_map\n\n      data_type_map = data_type_map_cache[member_definition.name]\n      if members_data_size is not None:\n        if not isinstance(member_definition, data_types.PaddingDefinition):\n          byte_size = member_definition.GetByteSize()\n        else:\n          _, byte_size = divmod(\n              members_data_size, member_definition.alignment_size)\n          if byte_size > 0:\n            byte_size = member_definition.alignment_size - byte_size\n\n          data_type_map.byte_size = byte_size\n\n        if byte_size is None:\n          members_data_size = None\n        else:\n          members_data_size += byte_size\n\n      data_type_maps.append(data_type_map)\n\n    return data_type_maps", "docstring": "Retrieves the member data type maps.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\ndata_type_map_cache (dict[str, DataTypeMap]): cached data type maps.\n\nReturns:\nlist[DataTypeMap]: member data type maps.\n\nRaises:\nFormatError: if the data type maps cannot be determined from the data\ntype definition.", "source": "juraj-google-style"}
{"code": "def check_error_output(tst, out, command_prefix, args):\n    tst.assertGreater(len(out.lines), 2)\n    tst.assertStartsWith(out.lines[0], 'Error occurred during handling of command: %s %s' % (command_prefix, ' '.join(args)))", "docstring": "Check RichTextLines output from invalid/erroneous commands.\n\nArgs:\ntst: A test_util.TensorFlowTestCase instance.\nout: The RichTextLines object to be checked.\ncommand_prefix: The command prefix of the command that caused the error.\nargs: The arguments (excluding prefix) of the command that caused the error.", "source": "github-repos"}
{"code": "def GetRealValue(self, value):\n    assert value.op.type not in ['Variable', 'VariableV2']\n    real_value = self._history_map.get(value.name)\n    if real_value is None:\n        cur_value = value\n        cur_grad_state = self\n        while True:\n            enter_op = util.GetLoopConstantEnter(cur_value)\n            if enter_op:\n                cur_value = enter_op.inputs[0]\n                cur_grad_state = cur_grad_state.outer_grad_state\n                if cur_grad_state is None:\n                    real_value = self._grad_context.AddValue(cur_value)\n                    break\n            elif constant_op.is_constant(cur_value):\n                real_value = constant_op.constant(tensor_util.constant_value(cur_value), dtype=cur_value.dtype)\n                break\n            else:\n                self._grad_context.Exit()\n                history_value = cur_grad_state.AddForwardAccumulator(cur_value)\n                self._grad_context.Enter()\n                break\n        if real_value is None:\n            real_value = cur_grad_state.AddBackpropAccumulatedValue(history_value, cur_value)\n            if cur_grad_state != self:\n                real_value = self._grad_context.AddValue(real_value)\n        self._history_map[value.name] = real_value\n    return real_value", "docstring": "Get the real value of `value`.\n\nIf backprop \"uses\" a value produced by forward inference, an accumulator\nis added in the forward loop to accumulate its values.  We use the\naccumulated value. This method must be called in the grad loop context.\n`value` must be in forward and needed for backprop.\n\nArgs:\nvalue: A tensor to be captured.\n\nReturns:\nThe same tensor obtained from the saved history.", "source": "github-repos"}
{"code": "def GenerateTableHtml(items, keys_to_print, display_index=True):\n    html = ''\n    html += '<table><tr>\\n'\n    html += '<tr>\\n'\n    if display_index:\n        html += '<th>index</th>'\n    for h, mapper in keys_to_print:\n        html += '<th>%s</th>' % h\n    html += '</tr>\\n'\n    for idx, tensor in enumerate(items):\n        html += '<tr>\\n'\n        if display_index:\n            html += '<td>%d</td>' % idx\n        for h, mapper in keys_to_print:\n            val = tensor[h] if h in tensor else None\n            val = val if mapper is None else mapper(val)\n            html += '<td>%s</td>\\n' % val\n        html += '</tr>\\n'\n    html += '</table>\\n'\n    return html", "docstring": "Given a list of object values and keys to print, make an HTML table.\n\nArgs:\nitems: Items to print an array of dicts.\nkeys_to_print: (key, display_fn). `key` is a key in the object. i.e.\nitems[0][key] should exist. display_fn is the mapping function on display.\ni.e. the displayed html cell will have the string returned by\n`mapping_fn(items[0][key])`.\ndisplay_index: add a column which is the index of each row in `items`.\n\nReturns:\nAn html table.", "source": "github-repos"}
{"code": "def get_dim_label(js_dict, dim, input='dataset'):\n    if (input == 'dataset'):\n        input = js_dict['dimension'][dim]\n        label_col = 'label'\n    elif (input == 'dimension'):\n        label_col = js_dict['label']\n        input = js_dict\n    else:\n        raise ValueError\n    try:\n        dim_label = input['category']['label']\n    except KeyError:\n        dim_index = get_dim_index(js_dict, dim)\n        dim_label = pd.concat([dim_index['id'], dim_index['id']], axis=1)\n        dim_label.columns = ['id', 'label']\n    else:\n        dim_label = pd.DataFrame(list(zip(dim_label.keys(), dim_label.values())), index=dim_label.keys(), columns=['id', label_col])\n    try:\n        dim_index = input['category']['index']\n    except KeyError:\n        dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])), index=[0], columns=['id', 'index'])\n    else:\n        if (type(dim_index) is list):\n            dim_index = pd.DataFrame(list(zip(dim_index, range(0, len(dim_index)))), index=dim_index, columns=['id', 'index'])\n        else:\n            dim_index = pd.DataFrame(list(zip(dim_index.keys(), dim_index.values())), index=dim_index.keys(), columns=['id', 'index'])\n    dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')\n    return dim_label", "docstring": "Get label from a given dimension.\n\nArgs:\njs_dict (dict): dictionary containing dataset data and metadata.\ndim (string): dimension name obtained from JSON file.\n\nReturns:\ndim_label(pandas.DataFrame): DataFrame with label-based dimension data.", "source": "codesearchnet"}
{"code": "def iterate_sequences(consumer_fn, output_template, sequences, length, chunk_length=None, batch_size=None, num_epochs=1, padding_value=0):\n    if (not length.shape[0].value):\n        raise ValueError('Batch size of length tensor must be set.')\n    num_sequences = length.shape[0].value\n    sequences = dict(sequence=sequences, length=length)\n    dataset = tf.data.Dataset.from_tensor_slices(sequences)\n    dataset = dataset.repeat(num_epochs)\n    if chunk_length:\n        dataset = dataset.map(remove_padding).flat_map((lambda x: tf.data.Dataset.from_tensor_slices(chunk_sequence(x, chunk_length, padding_value))))\n        num_chunks = tf.reduce_sum((((length - 1) \n    else:\n        num_chunks = num_sequences\n    if batch_size:\n        dataset = dataset.shuffle((num_sequences \n    dataset = dataset.batch((batch_size or num_sequences))\n    dataset = dataset.prefetch(num_epochs)\n    iterator = dataset.make_initializable_iterator()\n    with tf.control_dependencies([iterator.initializer]):\n        num_batches = ((num_epochs * num_chunks) \n        return tf.scan((lambda _1, index: consumer_fn(iterator.get_next())), tf.range(num_batches), output_template, parallel_iterations=1)", "docstring": "Iterate over batches of chunks of sequences for multiple epochs.\n\nThe batch dimension of the length tensor must be set because it is used to\ninfer buffer sizes.\n\nArgs:\nconsumer_fn: Function creating the operation to process the data.\noutput_template: Nested tensors of same shape and dtype as outputs.\nsequences: Nested collection of tensors with batch and time dimension.\nlength: Tensor containing the length for each sequence.\nchunk_length: Split sequences into chunks of this size; optional.\nbatch_size: Split epochs into batches of this size; optional.\nnum_epochs: How many times to repeat over the data.\npadding_value: Value used for padding the last chunk after the sequence.\n\nRaises:\nValueError: Unknown batch size of the length tensor.\n\nReturns:\nConcatenated nested tensors returned by the consumer.", "source": "codesearchnet"}
{"code": "def update_ref(profile, ref, sha):\n    resource = ('/refs/' + ref)\n    payload = {'sha': sha}\n    data = api.patch_request(profile, resource, payload)\n    return prepare(data)", "docstring": "Point a ref to a new SHA.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nref\nThe ref to update, e.g., ``heads/my-feature-branch``.\n\nsha\nThe SHA of the commit to point the ref to.\n\nReturns\nA dict with data about the ref.", "source": "codesearchnet"}
{"code": "def get_json(self):\n    try:\n        usernotes = self.subreddit.wiki[self.page_name].content_md\n        notes = json.loads(usernotes)\n    except NotFound:\n        self._init_notes()\n    else:\n        if (notes['ver'] != self.schema):\n            raise RuntimeError('Usernotes schema is v{0}, puni requires v{1}'.format(notes['ver'], self.schema))\n        self.cached_json = self._expand_json(notes)\n    return self.cached_json", "docstring": "Get the JSON stored on the usernotes wiki page.\n\nReturns a dict representation of the usernotes (with the notes BLOB\ndecoded).\n\nRaises:\nRuntimeError if the usernotes version is incompatible with this\nversion of puni.", "source": "codesearchnet"}
{"code": "def set_members(self, name, members, mode=None):\n    commands = list()\n    grpid = re.search('(\\\\d+)', name).group()\n    current_members = self.get_members(name)\n    lacp_mode = self.get_lacp_mode(name)\n    if (mode and (mode != lacp_mode)):\n        lacp_mode = mode\n        self.set_lacp_mode(grpid, lacp_mode)\n    for member in set(current_members).difference(members):\n        commands.append(('interface %s' % member))\n        commands.append(('no channel-group %s' % grpid))\n    for member in set(members).difference(current_members):\n        commands.append(('interface %s' % member))\n        commands.append(('channel-group %s mode %s' % (grpid, lacp_mode)))\n    return (self.configure(commands) if commands else True)", "docstring": "Configures the array of member interfaces for the Port-Channel\n\nArgs:\nname(str): The Port-Channel interface name to configure the member\ninterfaces\n\nmembers(list): The list of Ethernet interfaces that should be\nmember interfaces\n\nmode(str): The LACP mode to configure the member interfaces to.\nValid values are 'on, 'passive', 'active'. When there are\nexisting channel-group members and their lacp mode differs\nfrom this attribute, all of those members will be removed and\nthen re-added using the specified lacp mode. If this attribute\nis omitted, the existing lacp mode will be used for new\nmember additions.\n\nReturns:\nTrue if the operation succeeds otherwise False", "source": "codesearchnet"}
{"code": "def get_firmware(self):\n    firmware_uri = '{}/firmware'.format(self.data['uri'])\n    return self._helper.do_get(firmware_uri)", "docstring": "Gets baseline firmware information for a SAS Logical Interconnect.\n\nReturns:\ndict: SAS Logical Interconnect Firmware.", "source": "codesearchnet"}
{"code": "def create(configs):\n    \n    if not configs:\n        raise Error(ANDROID_DEVICE_EMPTY_CONFIG_MSG)\n    elif configs == ANDROID_DEVICE_PICK_ALL_TOKEN:\n        ads = get_all_instances()\n    elif not isinstance(configs, list):\n        raise Error(ANDROID_DEVICE_NOT_LIST_CONFIG_MSG)\n    elif isinstance(configs[0], dict):\n        \n        ads = get_instances_with_configs(configs)\n    elif isinstance(configs[0], basestring):\n        \n        ads = get_instances(configs)\n    else:\n        raise Error('No valid config found in: %s' % configs)\n    valid_ad_identifiers = list_adb_devices() + list_adb_devices_by_usb_id()\n\n    for ad in ads:\n        if ad.serial not in valid_ad_identifiers:\n            raise DeviceError(ad, 'Android device is specified in config but'\n                              ' is not attached.')\n    _start_services_on_ads(ads)\n    return ads", "docstring": "Creates AndroidDevice controller objects.\n\nArgs:\nconfigs: A list of dicts, each representing a configuration for an\nAndroid device.\n\nReturns:\nA list of AndroidDevice objects.", "source": "juraj-google-style"}
{"code": "def one_hot_class_label_loss(top_out,\n                             targets,\n                             model_hparams,\n                             vocab_size,\n                             weights_fn):\n  \n  del model_hparams, vocab_size  \n  loss_scale = tf.losses.softmax_cross_entropy(\n      onehot_labels=targets, logits=top_out)\n  weights = weights_fn(targets)\n  loss_denom = tf.reduce_sum(weights)\n  return loss_scale, loss_denom", "docstring": "Apply softmax cross-entropy between outputs and targets.\n\nArgs:\ntop_out: logits Tensor with shape [batch, ?, ?, num_classes]\ntargets: one-hot encoding Tensor with shape [batch, ?, ?, num_classes]\nmodel_hparams: HParams, model hyperparmeters.\nvocab_size: int, vocabulary size.\nweights_fn:\n\nReturns:\nloss_scale (cross-entropy), loss_denom", "source": "juraj-google-style"}
{"code": "def is_orthogonal(\n        matrix: np.ndarray,\n        *,\n        rtol: float = 1e-5,\n        atol: float = 1e-8) -> bool:\n    \n    return (matrix.shape[0] == matrix.shape[1] and\n            np.all(np.imag(matrix) == 0) and\n            np.allclose(matrix.dot(matrix.T), np.eye(matrix.shape[0]),\n                        rtol=rtol,\n                        atol=atol))", "docstring": "Determines if a matrix is approximately orthogonal.\n\nA matrix is orthogonal if it's square and real and its transpose is its\ninverse.\n\nArgs:\nmatrix: The matrix to check.\nrtol: The per-matrix-entry relative tolerance on equality.\natol: The per-matrix-entry absolute tolerance on equality.\n\nReturns:\nWhether the matrix is orthogonal within the given tolerance.", "source": "juraj-google-style"}
{"code": "def __init__(self, input_spec):\n\n    def maybe_parse_byte_size(s):\n        return parse_byte_size(s) if isinstance(s, str) else int(s)\n    self._num_records = input_spec['numRecords']\n    self._key_size = maybe_parse_byte_size(input_spec.get('keySizeBytes', 1))\n    self._hot_key_fraction = input_spec.get('hotKeyFraction', 0)\n    self._num_hot_keys = input_spec.get('numHotKeys', 0)\n    self._value_size = maybe_parse_byte_size(input_spec.get('valueSizeBytes', 1))\n    self._total_size = self.element_size * self._num_records\n    self._initial_splitting = input_spec['bundleSizeDistribution']['type'] if 'bundleSizeDistribution' in input_spec else 'const'\n    if self._initial_splitting != 'const' and self._initial_splitting != 'zipf':\n        raise ValueError('Only const and zipf distributions are supported for determining sizes of bundles produced by initial splitting. Received: %s', self._initial_splitting)\n    self._initial_splitting_num_bundles = input_spec['forceNumInitialBundles'] if 'forceNumInitialBundles' in input_spec else 0\n    if self._initial_splitting == 'zipf':\n        self._initial_splitting_distribution_parameter = input_spec['bundleSizeDistribution']['param']\n        if self._initial_splitting_distribution_parameter < 1:\n            raise ValueError('Parameter for a Zipf distribution must be larger than 1. Received %r.', self._initial_splitting_distribution_parameter)\n    else:\n        self._initial_splitting_distribution_parameter = 0\n    self._dynamic_splitting = 'none' if 'splitPointFrequencyRecords' in input_spec and input_spec['splitPointFrequencyRecords'] == 0 else 'perfect'\n    if 'delayDistribution' in input_spec:\n        if input_spec['delayDistribution']['type'] != 'const':\n            raise ValueError(\"SyntheticSource currently only supports delay distributions of type 'const'. Received %s.\", input_spec['delayDistribution']['type'])\n        self._sleep_per_input_record_sec = float(input_spec['delayDistribution']['const']) / 1000\n        if self._sleep_per_input_record_sec and self._sleep_per_input_record_sec < 0.001:\n            raise ValueError('Sleep time per input record must be at least 1e-3. Received: %r', self._sleep_per_input_record_sec)\n    else:\n        self._sleep_per_input_record_sec = 0\n    self.gen_algo = input_spec.get('algorithm', None)\n    if self.gen_algo not in (None, 'builtin', 'lcg'):\n        raise ValueError('Unknown algorithm for input_spec: %s. Supported algorithms are \"builtin\" and \"lcg\".', self.gen_algo)", "docstring": "Initiates a synthetic source.\n\nArgs:\ninput_spec: Input specification of the source. See corresponding option in\nfunction 'parse_args()' below for more details.\nRaises:\nValueError: if input parameters are invalid.", "source": "github-repos"}
{"code": "def _consolidate_numeric_values(row_index_to_values, min_consolidation_fraction, debug_info):\n    type_counts = collections.Counter()\n    for numeric_values in row_index_to_values.values():\n        type_counts.update(_get_all_types(numeric_values))\n    if not type_counts:\n        return {}\n    max_count = max(type_counts.values())\n    if max_count < len(row_index_to_values) * min_consolidation_fraction:\n        return {}\n    valid_types = set()\n    for value_type, count in type_counts.items():\n        if count == max_count:\n            valid_types.add(value_type)\n    if len(valid_types) > 1:\n        assert DATE_TYPE in valid_types\n        max_type = DATE_TYPE\n    else:\n        max_type = next(iter(valid_types))\n    new_row_index_to_value = {}\n    for index, values in row_index_to_values.items():\n        for value in values:\n            if _get_value_type(value) == max_type:\n                new_row_index_to_value[index] = value\n                break\n    return new_row_index_to_value", "docstring": "Finds the most common numeric values in a column and returns them\n\nArgs:\nrow_index_to_values:\nFor each row index all the values in that cell.\nmin_consolidation_fraction:\nFraction of cells that need to have consolidated value.\ndebug_info:\nAdditional information only used for logging\n\nReturns:\nFor each row index the first value that matches the most common value. Rows that don't have a matching value\nare dropped. Empty list if values can't be consolidated.", "source": "github-repos"}
{"code": "def setup(self, paths=None):\n    if (not paths):\n        self.state.add_error('No `paths` argument provided in recipe, bailing', critical=True)\n    else:\n        self._paths = [path.strip() for path in paths.strip().split(',')]", "docstring": "Sets up the _paths attribute.\n\nArgs:\npaths: Comma-separated list of strings representing the paths to collect.", "source": "codesearchnet"}
{"code": "def scan(self, proxy_scanner, expected_num=20, val_thr_num=4, queue_timeout=3, val_timeout=5, out_file='proxies.json'):\n    try:\n        proxy_scanner.scan()\n        self.logger.info('starting {} threads to validating proxies...'.format(val_thr_num))\n        val_threads = []\n        for i in range(val_thr_num):\n            t = threading.Thread(name='val-{:0>2d}'.format((i + 1)), target=self.validate, kwargs=dict(proxy_scanner=proxy_scanner, expected_num=expected_num, queue_timeout=queue_timeout, val_timeout=val_timeout))\n            t.daemon = True\n            val_threads.append(t)\n            t.start()\n        for t in val_threads:\n            t.join()\n        self.logger.info('Proxy scanning done!')\n    except:\n        raise\n    finally:\n        if (out_file is not None):\n            self.save(out_file)", "docstring": "Scan and validate proxies\n\nFirstly, call the `scan` method of `proxy_scanner`, then using multiple\nthreads to validate them.\n\nArgs:\nproxy_scanner: A ProxyScanner object.\nexpected_num: Max number of valid proxies to be scanned.\nval_thr_num: Number of threads used for validating proxies.\nqueue_timeout: Timeout for getting a proxy from the queue.\nval_timeout: An integer passed to `is_valid` as argument `timeout`.\nout_file: A string or None. If not None, the proxies will be saved\ninto `out_file`.", "source": "codesearchnet"}
{"code": "def Snapshot(self, request, global_params=None):\n    config = self.GetMethodConfig('Snapshot')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Snapshot the state of a streaming job.\n\nArgs:\nrequest: (DataflowProjectsLocationsJobsSnapshotRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Snapshot) The response message.", "source": "github-repos"}
{"code": "def use_value_spec(self, value_spec: Optional[pg_typing.Dict], allow_partial: bool=False) -> 'Dict':\n    if value_spec is None:\n        self._value_spec = None\n        self._accessor_writable = True\n        return self\n    if not isinstance(value_spec, pg_typing.Dict):\n        raise ValueError(self._error_message(f'Value spec for list must be a `pg.typing.Dict` object. Encountered: {value_spec!r}'))\n    if self._value_spec and self._value_spec != value_spec:\n        raise RuntimeError(self._error_message(f'Dict is already bound with a different value spec: {self._value_spec}. New value spec: {value_spec}.'))\n    self._allow_partial = allow_partial\n    if flags.is_type_check_enabled():\n        value_spec.apply(self, allow_partial=base.accepts_partial(self), child_transform=base.symbolic_transform_fn(self._allow_partial), root_path=self.sym_path)\n    else:\n        self._value_spec = value_spec\n    return self", "docstring": "Applies a ``pg.typing.Dict`` as the value spec for current dict.\n\nArgs:\nvalue_spec: A Dict ValueSpec to apply to this Dict.\nIf current Dict is schema-less (whose immediate members are not\nvalidated against schema), and `value_spec` is not None, the value spec\nwill be applied to the Dict.\nOr else if current Dict is already symbolic (whose immediate members\nare under the constraint of a Dict value spec), and `value_spec` is\nNone, current Dict will become schema-less. However, the schema\nconstraints for non-immediate members will remain.\nallow_partial: Whether allow partial dict based on the schema. This flag\nwill override allow_partial flag in __init__ for spec-less Dict.\n\nReturns:\nSelf.\n\nRaises:\nValueError: validation failed due to value error.\nRuntimeError: Dict is already bound with another spec.\nTypeError: type errors during validation.\nKeyError: key errors during validation.", "source": "github-repos"}
{"code": "def get_address_coords(self, address):\n        \n        url = \"https:\n        r = requests.get(url)\n        r.raise_for_status()\n        results = r.json()['results']\n        lat = results[0]['geometry']['location']['lat']\n        lng = results[0]['geometry']['location']['lng']\n        return lat, lng", "docstring": "Use the google geocoder to get latitude and longitude for an address string\n\nArgs:\naddress: any address string\n\nReturns:\nA tuple of (lat,lng)", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.ListIntents = channel.unary_unary(\n        '/google.cloud.dialogflow.v2.Intents/ListIntents',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.ListIntentsRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.ListIntentsResponse.FromString,\n        )\n    self.GetIntent = channel.unary_unary(\n        '/google.cloud.dialogflow.v2.Intents/GetIntent',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.GetIntentRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.Intent.FromString,\n        )\n    self.CreateIntent = channel.unary_unary(\n        '/google.cloud.dialogflow.v2.Intents/CreateIntent',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.CreateIntentRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.Intent.FromString,\n        )\n    self.UpdateIntent = channel.unary_unary(\n        '/google.cloud.dialogflow.v2.Intents/UpdateIntent',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.UpdateIntentRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.Intent.FromString,\n        )\n    self.DeleteIntent = channel.unary_unary(\n        '/google.cloud.dialogflow.v2.Intents/DeleteIntent',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.DeleteIntentRequest.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n    self.BatchUpdateIntents = channel.unary_unary(\n        '/google.cloud.dialogflow.v2.Intents/BatchUpdateIntents',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.BatchUpdateIntentsRequest.SerializeToString,\n        response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n    self.BatchDeleteIntents = channel.unary_unary(\n        '/google.cloud.dialogflow.v2.Intents/BatchDeleteIntents',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.BatchDeleteIntentsRequest.SerializeToString,\n        response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def _ReadEntry(self, line):\n    line = line.split()\n    map_entry = automount.AutomountMapEntry()\n    try:\n        map_entry.key = line[0]\n        if len(line) > 2:\n            map_entry.options = line[1]\n            map_entry.location = line[2]\n        else:\n            map_entry.location = line[1]\n    except IndexError:\n        return None\n    return map_entry", "docstring": "Return an AutomountMapEntry from a record in the target cache.\n\nArgs:\nline: A string from a file cache.\n\nReturns:\nAn AutomountMapEntry if the line is successfully parsed, None otherwise.", "source": "github-repos"}
{"code": "def reduce(cls, requirements: Iterable['FetchRequirement']) -> 'FetchRequirement':\n    return reduce((lambda x, y: (x | y)), requirements, cls.NONE)", "docstring": "Reduce a set of fetch requirements into a single requirement.\n\nArgs:\nrequirements: The set of fetch requirements.", "source": "codesearchnet"}
{"code": "def publishApp(self, app_info, map_info=None, fsInfo=None):\n        \n        if self.securityhandler is None:\n            print (\"Security handler required\")\n            return\n        appDet = None\n        try:\n            app_results = []\n            if isinstance(app_info, list):\n                for appDet in app_info:\n                    app_results.append(self._publishAppLogic(appDet=appDet,map_info=map_info,fsInfo=fsInfo))\n            else:\n                app_results.append(self._publishAppLogic(appDet=app_info,map_info=map_info,fsInfo=fsInfo))\n            return app_results\n\n        except (common.ArcRestHelperError) as e:\n            raise e\n        except Exception as e:\n\n            line, filename, synerror = trace()\n            raise common.ArcRestHelperError({\n                \"function\": \"publishApp\",\n                \"line\": line,\n                \"filename\":  filename,\n                \"synerror\": synerror,\n            })\n\n        finally:\n            appDet = None\n\n            del appDet\n            gc.collect()", "docstring": "Publishes apps to AGOL/Portal\n\nArgs:\napp_info (list): A list of JSON configuration apps to publish.\nmap_info (list): Defaults to ``None``.\nfsInfo (list): Defaults to ``None``.\nReturns:\ndict: A dictionary of results objects.", "source": "juraj-google-style"}
{"code": "def users_getPresence(self, *, user: str, **kwargs) -> SlackResponse:\n    kwargs.update({'user': user})\n    return self.api_call('users.getPresence', http_verb='GET', params=kwargs)", "docstring": "Gets user presence information.\n\nArgs:\nuser (str): User to get presence info on. Defaults to the authed user.\ne.g. 'W1234567890'", "source": "codesearchnet"}
{"code": "def _make_unique_slug(slug: str, language: str, is_unique: Callable[[str], bool]) -> str:\n        \n\n        index = 1\n        unique_slug = slug\n\n        while not is_unique(unique_slug, language):\n            unique_slug = '%s-%d' % (slug, index)\n            index += 1\n\n        return unique_slug", "docstring": "Guarentees that the specified slug is unique by appending\na number until it is unique.\n\nArguments:\nslug:\nThe slug to make unique.\n\nis_unique:\nFunction that can be called to verify\nwhether the generate slug is unique.\n\nReturns:\nA guarenteed unique slug.", "source": "juraj-google-style"}
{"code": "def getdoc(object):\n    return _inspect.getdoc(object)", "docstring": "TFDecorator-aware replacement for inspect.getdoc.\n\nArgs:\nobject: An object, possibly decorated.\n\nReturns:\nThe docstring associated with the object.\n\nThe outermost-decorated object is intended to have the most complete\ndocumentation, so the decorated parameter is not unwrapped.", "source": "github-repos"}
{"code": "def create_position_ids_from_input_ids(self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int]=0):\n    mask = input_ids.ne(padding_idx).int()\n    incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask\n    return incremental_indices.long() + padding_idx", "docstring": "Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding\nsymbols are ignored. This is modified from fairseq's `utils.make_positions`.\n\nArgs:\nx: torch.Tensor x:\nReturns: torch.Tensor", "source": "github-repos"}
{"code": "def features(self):\n        \n        buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n        self._dll.JLINKARM_GetFeatureString(buf)\n\n        result = ctypes.string_at(buf).decode().strip()\n        if len(result) == 0:\n            return list()\n\n        return result.split(', ')", "docstring": "Returns a list of the J-Link embedded features.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nA list of strings, each a feature.  Example:\n``[ 'RDI', 'FlashBP', 'FlashDL', 'JFlash', 'GDB' ]``", "source": "juraj-google-style"}
{"code": "def __init__(self, value=b''):\n        \n        super(DigestValue, self).__init__(value, Tags.DIGEST_VALUE)", "docstring": "Construct a DigestValue object.\n\nArgs:\nvalue (bytes): The bytes of the hash. Optional, defaults to\nthe empty byte string.", "source": "juraj-google-style"}
{"code": "def get_weights(self):\n    with self.distribute_strategy.scope():\n        return super(Model, self).get_weights()", "docstring": "Retrieves the weights of the model.\n\nReturns:\nA flat list of Numpy arrays.", "source": "github-repos"}
{"code": "class OrderedEnqueuer(PyDatasetEnqueuer):\n\n    def __init__(self, py_dataset, workers=1, use_multiprocessing=False, max_queue_size=10, shuffle=False):\n        super().__init__(py_dataset, workers, use_multiprocessing, max_queue_size)\n        self.shuffle = shuffle\n        if self.py_dataset.num_batches is None:\n            self.indices = itertools.count()\n\n    def _get_executor_init(self, workers):\n        \n\n        def pool_fn(seqs):\n            pool = get_pool_class(True)(workers, initializer=init_pool_generator, initargs=(seqs, None, get_worker_id_queue()))\n            _DATA_POOLS.add(pool)\n            return pool\n        return pool_fn\n\n    def _run(self):\n        \n        try:\n            if self.py_dataset.num_batches is not None:\n                indices = range(self.py_dataset.num_batches)\n                if self.shuffle:\n                    indices = list(indices)\n                    random.shuffle(indices)\n                self.indices = iter(indices)\n            self._send_py_dataset()\n            with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:\n                while self.is_running():\n                    try:\n                        i = next(self.indices)\n                        self.future_queue.put(executor.apply_async(get_index, (self.uid, i)), block=True)\n                    except StopIteration:\n                        break\n        except Exception as e:\n            self.future_queue.put(e)\n\n    def get(self):\n        \n        while self.is_running():\n            try:\n                inputs = self.ready_queue.get(block=False)\n                yield inputs\n                continue\n            except queue.Empty:\n                pass\n            try:\n                value = self.future_queue.get(block=True, timeout=5)\n                self.future_queue.task_done()\n                if isinstance(value, Exception):\n                    raise value\n                inputs = value.get()\n                if inputs is not None:\n                    yield inputs\n            except queue.Empty:\n                pass\n            except Exception as e:\n                self.stop(drain_queue_and_join=True)\n                raise e\n        raise ValueError('Iterator called after `on_epoch_end` or before `on_epoch_begin`.')", "docstring": "Builds a Enqueuer from a PyDataset.\n\nArgs:\npy_dataset: A `keras.utils.PyDataset` object.\nuse_multiprocessing: use multiprocessing if True, otherwise threading\nshuffle: whether to shuffle the data at the beginning of each epoch", "source": "github-repos"}
{"code": "def flatten(l):\n    for el in l:\n        if (isinstance(el, Iterable) and (not isinstance(el, (str, bytes))) and (not isinstance(el, dict))):\n            (yield from flatten(el))\n        else:\n            (yield el)", "docstring": "Flatten a multi-deminision list and return a iterable\n\nNote that dict and str will not be expanded, instead, they will be kept as a single element.\n\nArgs:\nl (list): The list needs to be flattened\n\nReturns:\nA iterable of flattened list. To have a list instead use ``list(flatten(l))``", "source": "codesearchnet"}
{"code": "def _build_recursive_hd_scatter(input_tensors, devices):\n    num_devices = len(devices)\n    num_hops = int(math.log(num_devices, 2))\n    assert num_devices == 2 ** num_hops, 'num_devices must be a power of 2'\n    chunks = input_tensors\n    for h in reversed(range(0, num_hops)):\n        span = 2 ** h\n        group_size = span * 2\n        new_chunks = [[] for _ in devices]\n        for d in range(0, num_devices):\n            if d % group_size >= group_size / 2:\n                continue\n            left_idx = d\n            right_idx = d + span\n            left_dev = devices[left_idx]\n            right_dev = devices[right_idx]\n            with ops.device(left_dev):\n                new_chunks[left_idx] = array_ops.concat([chunks[left_idx], chunks[right_idx]], 0)\n            with ops.device(right_dev):\n                new_chunks[right_idx] = array_ops.concat([chunks[left_idx], chunks[right_idx]], 0)\n        chunks = new_chunks\n    return chunks", "docstring": "Construct the scatter phase of recursive halving-doubling all-reduce.\n\nArgs:\ninput_tensors: list of `tf.Tensor` that are fully-reduced shards.\ndevices: a list of strings naming the devices on which the reconstituted\nfull tensors should be placed.\n\nReturns:\nlist of `tf.Tensor` which are the fully reduced tensors.", "source": "github-repos"}
{"code": "async def executor(func, *args, **kwargs):\n    \n    def syncfunc():\n        return func(*args, **kwargs)\n\n    loop = asyncio.get_running_loop()\n    return await loop.run_in_executor(None, syncfunc)", "docstring": "Execute a function in an executor thread.\n\nArgs:\ntodo ((func,args,kwargs)): A todo tuple.", "source": "juraj-google-style"}
{"code": "def merge_with(self, other):\n    other = as_dimension(other)\n    self.assert_is_convertible_with(other)\n    if (self._value is None):\n        return Dimension(other.value)\n    else:\n        return Dimension(self._value)", "docstring": "Returns a Dimension that combines the information in `self` and `other`.\n\nDimensions are combined as follows:\n\n```python\ntf.Dimension(n)   .merge_with(tf.Dimension(n))    == tf.Dimension(n)\ntf.Dimension(n)   .merge_with(tf.Dimension(None)) == tf.Dimension(n)\ntf.Dimension(None).merge_with(tf.Dimension(n))    == tf.Dimension(n)\ntf.Dimension(None).merge_with(tf.Dimension(None)) == tf.Dimension(None)\ntf.Dimension(n)   .merge_with(tf.Dimension(m))  # raises ValueError for n != m\n```\n\nArgs:\nother: Another Dimension.\n\nReturns:\nA Dimension containing the combined information of `self` and\n`other`.\n\nRaises:\nValueError: If `self` and `other` are not convertible (see\nis_convertible_with).", "source": "codesearchnet"}
{"code": "def find_vulnerabilities(cfg_list, blackbox_mapping_file, sources_and_sinks_file, interactive=False, nosec_lines=defaultdict(set)):\n    vulnerabilities = list()\n    definitions = parse(sources_and_sinks_file)\n    with open(blackbox_mapping_file) as infile:\n        blackbox_mapping = json.load(infile)\n    for cfg in cfg_list:\n        find_vulnerabilities_in_cfg(cfg, definitions, Lattice(cfg.nodes), blackbox_mapping, vulnerabilities, interactive, nosec_lines)\n    if interactive:\n        with open(blackbox_mapping_file, 'w') as outfile:\n            json.dump(blackbox_mapping, outfile, indent=4)\n    return vulnerabilities", "docstring": "Find vulnerabilities in a list of CFGs from a trigger_word_file.\n\nArgs:\ncfg_list(list[CFG]): the list of CFGs to scan.\nblackbox_mapping_file(str)\nsources_and_sinks_file(str)\ninteractive(bool): determines if we ask the user about blackbox functions not in the mapping file.\nReturns:\nA list of vulnerabilities.", "source": "codesearchnet"}
{"code": "def stop_requested(self):\n    return self._stop_requested", "docstring": "Returns whether a stop is requested or not.\n\nIf true, `MonitoredSession` stops iterations.\nReturns:\nA `bool`", "source": "github-repos"}
{"code": "def draw(self, current_time, frame_time):\n        \n        self.set_default_viewport()\n        self.timeline.draw(current_time, frame_time, self.fbo)", "docstring": "Draws a frame. Internally it calls the\nconfigured timeline's draw method.\n\nArgs:\ncurrent_time (float): The current time (preferrably always from the configured timer class)\nframe_time (float): The duration of the previous frame in seconds", "source": "juraj-google-style"}
{"code": "def _ParseDateTimeValue(self, byte_stream, file_offset):\n    \n    datetime_value_map = self._GetDataTypeMap('cups_ipp_datetime_value')\n\n    try:\n      value = self._ReadStructureFromByteStream(\n          byte_stream, file_offset, datetime_value_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError(\n          'Unable to parse datetime value with error: {0!s}'.format(exception))\n\n    direction_from_utc = chr(value.direction_from_utc)\n    rfc2579_date_time_tuple = (\n        value.year, value.month, value.day_of_month,\n        value.hours, value.minutes, value.seconds, value.deciseconds,\n        direction_from_utc, value.hours_from_utc, value.minutes_from_utc)\n    return dfdatetime_rfc2579_date_time.RFC2579DateTime(\n        rfc2579_date_time_tuple=rfc2579_date_time_tuple)", "docstring": "Parses a CUPS IPP RFC2579 date-time value from a byte stream.\n\nArgs:\nbyte_stream (bytes): byte stream.\nfile_offset (int): offset of the attribute data relative to the start of\nthe file-like object.\n\nReturns:\ndfdatetime.RFC2579DateTime: RFC2579 date-time stored in the value.\n\nRaises:\nParseError: when the RFC2579 date-time value cannot be parsed.", "source": "juraj-google-style"}
{"code": "def failure_reason(self, failure_index=None):\n    (phase, _) = self._get_failed_phase(failure_index)\n    return phase.failure_reason", "docstring": "Get the reason for a failure.\n\nArgs:\nfailure_index: Index of the fail to return the graph for (can be\nnegative). If None, the most appropriate failure is chosen\naccording to these rules:\n- If the fail is cyclic, the most recent fail (the one containing\nthe cycle) is used;\n- If a callback has caused a failure, the most recent fail is used;\n- Otherwise, the first fail is used.\n\nReturns:\nA `FailureReason` subclass instance describing the failure.", "source": "codesearchnet"}
{"code": "def SetTimezone(self, timezone):\n    if (not timezone):\n        return\n    try:\n        self._timezone = pytz.timezone(timezone)\n    except pytz.UnknownTimeZoneError:\n        raise ValueError('Unsupported timezone: {0:s}'.format(timezone))", "docstring": "Sets the timezone.\n\nArgs:\ntimezone (str): timezone.\n\nRaises:\nValueError: if the timezone is not supported.", "source": "codesearchnet"}
{"code": "def malware(self, malware, password, file_name):\n        \n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        self._data['malware'] = malware\n        self._data['password'] = password\n        self._data['fileName'] = file_name\n        request = {'malware': malware, 'password': password, 'fileName': file_name}\n        return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)", "docstring": "Uploads to malware vault.\n\nArgs:\nmalware:\npassword:\nfile_name:", "source": "juraj-google-style"}
{"code": "def bbox_rotate(bbox, angle, rows, cols, interpolation):\n    \n    scale = cols / float(rows)\n    x = np.array([bbox[0], bbox[2], bbox[2], bbox[0]])\n    y = np.array([bbox[1], bbox[1], bbox[3], bbox[3]])\n    x = x - 0.5\n    y = y - 0.5\n    angle = np.deg2rad(angle)\n    x_t = (np.cos(angle) * x * scale + np.sin(angle) * y) / scale\n    y_t = (-np.sin(angle) * x * scale + np.cos(angle) * y)\n    x_t = x_t + 0.5\n    y_t = y_t + 0.5\n    return [min(x_t), min(y_t), max(x_t), max(y_t)]", "docstring": "Rotates a bounding box by angle degrees\n\nArgs:\nbbox (tuple): A tuple (x_min, y_min, x_max, y_max).\nangle (int): Angle of rotation in degrees\nrows (int): Image rows.\ncols (int): Image cols.\ninterpolation (int): interpolation method.\n\nreturn a tuple (x_min, y_min, x_max, y_max)", "source": "juraj-google-style"}
{"code": "def delete_channel(self, channel_name, project_name, dataset_name):\n        \n        return self.resources.delete_channel(channel_name, project_name,\n                                             dataset_name)", "docstring": "Deletes a channel given its name, name of its project\n, and name of its dataset.\n\nArguments:\nchannel_name (str): Channel name\nproject_name (str): Project name\ndataset_name (str): Dataset name\n\nReturns:\nbool: True if channel deleted, False if not", "source": "juraj-google-style"}
{"code": "def index_data(self, data, index_name, doc_type):\n        \n\n        \n        \n        if not isinstance(data, dict):\n            raise RuntimeError('Index failed, data needs to be a dict!')\n\n        try:\n            self.els_search.index(index=index_name, doc_type=doc_type, body=data)\n        except Exception, error:\n            print 'Index failed: %s' % str(error)\n            raise RuntimeError('Index failed: %s' % str(error))", "docstring": "Take an arbitrary dictionary of data and index it with ELS.\n\nArgs:\ndata: data to be Indexed. Should be a dictionary.\nindex_name: Name of the index.\ndoc_type: The type of the document.\n\nRaises:\nRuntimeError: When the Indexing fails.", "source": "juraj-google-style"}
{"code": "def set_device_name(self, new_name):\n    device_name = self.get_characteristic_handle_from_uuid(UUID_DEVICE_NAME)\n    if (device_name is None):\n        logger.warn('Failed to find handle for device name')\n        return False\n    if (len(new_name) > MAX_DEVICE_NAME_LEN):\n        logger.error('Device name exceeds maximum length ({} > {})'.format(len(new_name), MAX_DEVICE_NAME_LEN))\n        return False\n    if self.dongle._write_attribute(self.conn_handle, device_name, new_name.encode('ascii')):\n        self.name = new_name\n        return True\n    return False", "docstring": "Sets a new BLE device name for this SK8.\n\nArgs:\nnew_name (str): the new device name as an ASCII string, max 20 characters.\n\nReturns:\nTrue if the name was updated successfully, False otherwise.", "source": "codesearchnet"}
{"code": "def add_to_tensor(self, mat, name='add_to_tensor'):\n    with self._name_scope(name):\n        mat = tensor_conversion.convert_to_tensor_v2_with_dispatch(mat, name='mat')\n        mat_diag = array_ops.matrix_diag_part(mat)\n        new_diag = 1 + mat_diag\n        return array_ops.matrix_set_diag(mat, new_diag)", "docstring": "Add matrix represented by this operator to `mat`.  Equiv to `I + mat`.\n\nArgs:\nmat:  `Tensor` with same `dtype` and shape broadcastable to `self`.\nname:  A name to give this `Op`.\n\nReturns:\nA `Tensor` with broadcast shape and same `dtype` as `self`.", "source": "github-repos"}
{"code": "def _as_indexed_slices(x, optimize=True):\n    if not isinstance(x, (tensor_lib.Tensor, indexed_slices.IndexedSlices)):\n        raise TypeError(f'Not a Tensor or IndexedSlices: {type(x)}.')\n    if isinstance(x, indexed_slices.IndexedSlices):\n        return x\n    x_shape = array_ops.shape_internal(x, optimize=optimize)\n    return indexed_slices.IndexedSlices(x, range(0, x_shape[0]), x_shape)", "docstring": "Convert 'x' to IndexedSlices.\n\nConvert a dense Tensor to a block-sparse IndexedSlices.\n\nArgs:\nx: Either a Tensor object, or an IndexedSlices object.\noptimize: if true, attempt to optimize the conversion of 'x'.\n\nReturns:\nAn IndexedSlices object.\n\nRaises:\nTypeError: If 'x' is not a Tensor or an IndexedSlices object.", "source": "github-repos"}
{"code": "def register_event(self, *names):\n        \n        for name in names:\n            if name in self.__events:\n                continue\n            self.__events[name] = Event(name)", "docstring": "Registers new events after instance creation\n\nArgs:\n*names (str): Name or names of the events to register", "source": "juraj-google-style"}
{"code": "def _maintain_LC(self, obj, slice_id, last_slice=False, begin_slice=True, shard_ctx=None, slice_ctx=None):\n    if ((obj is None) or (not isinstance(obj, shard_life_cycle._ShardLifeCycle))):\n        return\n    shard_context = (shard_ctx or self.shard_context)\n    slice_context = (slice_ctx or self.slice_context)\n    if begin_slice:\n        if (slice_id == 0):\n            obj.begin_shard(shard_context)\n        obj.begin_slice(slice_context)\n    else:\n        obj.end_slice(slice_context)\n        if last_slice:\n            obj.end_shard(shard_context)", "docstring": "Makes sure shard life cycle interface are respected.\n\nArgs:\nobj: the obj that may have implemented _ShardLifeCycle.\nslice_id: current slice_id\nlast_slice: whether this is the last slice.\nbegin_slice: whether this is the beginning or the end of a slice.\nshard_ctx: shard ctx for dependency injection. If None, it will be read\nfrom self.\nslice_ctx: slice ctx for dependency injection. If None, it will be read\nfrom self.", "source": "codesearchnet"}
{"code": "def destringize(self, string):\n    m = segment_destr_pattern.match(string)\n    self.genome_id = int(m.group(1))\n    self.chr_id = int(m.group(2))\n    self.direction = m.group(3)\n    self.left = int(m.group(4))\n    self.right = int(m.group(5))", "docstring": "Get RNF values for this segment from its textual representation and\nsave them into this object.\n\nArgs:\nstring (str): Textual representation of a segment.", "source": "codesearchnet"}
{"code": "def absolute_hinge_difference(arr1, arr2, min_diff=10, dtype=np.uint8):\n    diff = np.abs((arr1.astype(np.int) - arr2), dtype=np.int)\n    return np.maximum((diff - min_diff), 0).astype(dtype)", "docstring": "Point-wise, hinge loss-like, difference between arrays.\n\nArgs:\narr1: integer array to compare.\narr2: integer array to compare.\nmin_diff: minimal difference taken into consideration.\ndtype: dtype of returned array.\n\nReturns:\narray", "source": "codesearchnet"}
{"code": "def set_wd_noise(self, wd_noise):\n    if isinstance(wd_noise, bool):\n        wd_noise = str(wd_noise)\n    if ((wd_noise.lower() == 'yes') or (wd_noise.lower() == 'true')):\n        wd_noise = 'True'\n    elif ((wd_noise.lower() == 'no') or (wd_noise.lower() == 'false')):\n        wd_noise = 'False'\n    elif (wd_noise.lower() == 'both'):\n        wd_noise = 'Both'\n    else:\n        raise ValueError('wd_noise must be yes, no, True, False, or Both.')\n    self.sensitivity_input.add_wd_noise = wd_noise\n    return", "docstring": "Add White Dwarf Background Noise\n\nThis adds the White Dwarf (WD) Background noise. This can either do calculations with,\nwithout, or with and without WD noise.\n\nArgs:\nwd_noise (bool or str, optional): Add or remove WD background noise. First option is to\nhave only calculations with the wd_noise. For this, use `yes` or True.\nSecond option is no WD noise. For this, use `no` or False. For both calculations\nwith and without WD noise, use `both`.\n\nRaises:\nValueError: Input value is not one of the options.", "source": "codesearchnet"}
{"code": "def register(cls, name: str, plugin: Type[ConnectionPlugin]) -> None:\n    existing_plugin = cls.available.get(name)\n    if (existing_plugin is None):\n        cls.available[name] = plugin\n    elif (existing_plugin != plugin):\n        raise ConnectionPluginAlreadyRegistered(f\"Connection plugin {plugin.__name__} can't be registered as {name!r} because plugin {existing_plugin.__name__} was already registered under this name\")", "docstring": "Registers a connection plugin with a specified name\n\nArgs:\nname: name of the connection plugin to register\nplugin: defined connection plugin class\n\nRaises:\n:obj:`nornir.core.exceptions.ConnectionPluginAlreadyRegistered` if\nanother plugin with the specified name was already registered", "source": "codesearchnet"}
{"code": "def validate(self, value):\n    cast_callback = (self.cast_callback if self.cast_callback else self.cast_type)\n    try:\n        return (value if isinstance(value, self.cast_type) else cast_callback(value))\n    except Exception:\n        raise NodeTypeError('Invalid value `{}` for {}.'.format(value, self.cast_type))", "docstring": "Base validation method. Check if type is valid, or try brute casting.\n\nArgs:\nvalue (object): A value for validation.\n\nReturns:\nBase_type instance.\n\nRaises:\nSchemaError, if validation or type casting fails.", "source": "codesearchnet"}
{"code": "def parse_html(self, text):\n        \n        bs = BeautifulSoup(text, \"html5lib\")\n        file_reg = re.compile(MARKDOWN_IMAGE_REGEX, flags=re.IGNORECASE)\n        tags = bs.findAll('img')\n\n        for tag in tags:\n            \n            src_text = tag.get(\"src\") or \"\"\n            formatted_src_match = file_reg.search(src_text)\n            src_text = formatted_src_match.group(2) if formatted_src_match else src_text\n\n            alt_text = tag.get(\"alt\") or \"\"\n            tag.replaceWith(\"![{alt}]({src})\".format(alt=alt_text, src=src_text))\n        return html.unescape(bs.find('body').renderContents().decode('utf-8'))", "docstring": "parse_html: Properly formats any img tags that might be in content\nArgs:\ntext (str): text to parse\nReturns: string with properly formatted images", "source": "juraj-google-style"}
{"code": "def make_mutant_tuples(example_protos, original_feature, index_to_mutate, viz_params):\n    mutant_features = make_mutant_features(original_feature, index_to_mutate, viz_params)\n    mutant_examples = []\n    for example_proto in example_protos:\n        for mutant_feature in mutant_features:\n            copied_example = copy.deepcopy(example_proto)\n            feature_name = mutant_feature.original_feature.feature_name\n            try:\n                feature_list = proto_value_for_feature(copied_example, feature_name)\n                if (index_to_mutate is None):\n                    new_values = mutant_feature.mutant_value\n                else:\n                    new_values = list(feature_list)\n                    new_values[index_to_mutate] = mutant_feature.mutant_value\n                del feature_list[:]\n                feature_list.extend(new_values)\n                mutant_examples.append(copied_example)\n            except (ValueError, IndexError):\n                mutant_examples.append(copied_example)\n    return (mutant_features, mutant_examples)", "docstring": "Return a list of `MutantFeatureValue`s and a list of mutant Examples.\n\nArgs:\nexample_protos: The examples to mutate.\noriginal_feature: A `OriginalFeatureList` that encapsulates the feature to\nmutate.\nindex_to_mutate: The index of the int64_list or float_list to mutate.\nviz_params: A `VizParams` object that contains the UI state of the request.\n\nReturns:\nA list of `MutantFeatureValue`s and a list of mutant examples.", "source": "codesearchnet"}
{"code": "def children(self, sourcepath, recursive=True):\n        \n        return self._get_recursive_dependancies(\n            self._CHILDREN_MAP,\n            sourcepath,\n            recursive=True\n        )", "docstring": "Recursively find all children that are imported from the given source\npath.\n\nArgs:\nsourcepath (str): Source file path to search for.\n\nKeyword Arguments:\nrecursive (bool): Switch to enabled recursive finding (if True).\nDefault to True.\n\nReturns:\nset: List of finded parents path.", "source": "juraj-google-style"}
{"code": "def __init__(self, cache: PagedAttentionCache, config: PretrainedConfig, generation_config: GenerationConfig, input_queue: queue.Queue, output_queue: queue.Queue, stop_event: threading.Event, model_device: torch.device, model_dtype: torch.dtype, scheduler: Scheduler, streaming: bool=False, manual_eviction: bool=False):\n    self.cache = cache\n    self.config = config\n    self.generation_config = generation_config\n    self.input_queue = input_queue\n    self.output_queue = output_queue\n    self.stop_event = stop_event\n    self.model_device = model_device\n    self.model_dtype = model_dtype\n    self.scheduler = scheduler\n    self.streaming = streaming\n    self.manual_eviction = manual_eviction\n    self.requests_in_batch: List[RequestState] = []\n    self._configure_batch_parameters()\n    self.metrics = ContinuousBatchProcessorMetrics(self.max_batch_tokens)\n    self.setup_static_tensors()", "docstring": "Initialize the continuous batch processor.\n\nArgs:\ncache: The paged attention cache to use\ngeneration_config: The generation configuration\ninput_queue: Queue for incoming requests\noutput_queue: Queue for outgoing results\nstop_event: Event to signal processing should stop\nmodel_device: Device for model inputs/outputs\nmodel_dtype: Data type for model inputs/outputs\nstreaming: Whether to stream tokens as they're generated", "source": "github-repos"}
{"code": "def distribute_dataset(self, dataset):\n    raise NotImplementedError()", "docstring": "Create a distributed dataset instance from the original user dataset.\n\nArgs:\ndataset: the original global dataset instance. Only\n`tf.data.Dataset` is supported at the moment.\n\nReturns:\na sharded `tf.data.Dataset` instance, which will produce data for\nthe current local worker/process.", "source": "github-repos"}
{"code": "def create_parser(default_name: str) -> argparse.ArgumentParser:\n    argparser = argparse.ArgumentParser(fromfile_prefix_chars='@')\n    argparser.add_argument('-H', '--host', help='Host to which the app binds. [%(default)s]', default='0.0.0.0')\n    argparser.add_argument('-p', '--port', help='Port to which the app binds. [%(default)s]', default=5000, type=int)\n    argparser.add_argument('-o', '--output', help='Logging output. [%(default)s]')\n    argparser.add_argument('-n', '--name', help='Service name. This will be used as prefix for all endpoints. [%(default)s]', default=default_name)\n    argparser.add_argument('--debug', help='Run the app in debug mode. [%(default)s]', action='store_true')\n    argparser.add_argument('--eventbus-host', help='Hostname at which the eventbus can be reached [%(default)s]', default='eventbus')\n    argparser.add_argument('--eventbus-port', help='Port at which the eventbus can be reached [%(default)s]', default=5672, type=int)\n    return argparser", "docstring": "Creates the default brewblox_service ArgumentParser.\nService-agnostic arguments are added.\n\nThe parser allows calling code to add additional arguments before using it in create_app()\n\nArgs:\ndefault_name (str):\ndefault value for the --name commandline argument.\n\nReturns:\nargparse.ArgumentParser: a Python ArgumentParser with defaults set.", "source": "codesearchnet"}
{"code": "def ctc_label_dense_to_sparse(labels, label_lengths):\n    label_shape = array_ops.shape(labels)\n    num_batches_tns = array_ops_stack.stack([label_shape[0]])\n    max_num_labels_tns = array_ops_stack.stack([label_shape[1]])\n\n    def range_less_than(old_input, current_input):\n        return array_ops.expand_dims(math_ops.range(array_ops.shape(old_input)[1]), 0) < array_ops.fill(max_num_labels_tns, current_input)\n    init = math_ops.cast(array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool)\n    dense_mask = functional_ops.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1)\n    dense_mask = dense_mask[:, 0, :]\n    label_array = array_ops.reshape(array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns), label_shape)\n    label_ind = array_ops.boolean_mask(label_array, dense_mask)\n    batch_array = array_ops.transpose(array_ops.reshape(array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns), reverse(label_shape, 0)))\n    batch_ind = array_ops.boolean_mask(batch_array, dense_mask)\n    indices = array_ops.transpose(array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]))\n    vals_sparse = array_ops.gather_nd(labels, indices)\n    return sparse_tensor.SparseTensor(math_ops.cast(indices, dtypes_module.int64), vals_sparse, math_ops.cast(label_shape, dtypes_module.int64))", "docstring": "Converts CTC labels from dense to sparse.\n\nArgs:\nlabels: dense CTC labels.\nlabel_lengths: length of the labels.\n\nReturns:\nA sparse tensor representation of the labels.", "source": "github-repos"}
{"code": "def codepointsInNamelist(namFilename, unique_glyphs=False, cache=None):\n    key = ('charset' if (not unique_glyphs) else 'ownCharset')\n    internals_dir = os.path.dirname(os.path.abspath(__file__))\n    target = os.path.join(internals_dir, namFilename)\n    result = readNamelist(target, unique_glyphs, cache)\n    return result[key]", "docstring": "Returns the set of codepoints contained in a given Namelist file.\n\nThis is a replacement CodepointsInSubset and implements the \"#$ include\"\nheader format.\n\nArgs:\nnamFilename: The path to the  Namelist file.\nunique_glyphs: Optional, whether to only include glyphs unique to subset.\nReturns:\nA set containing the glyphs in the subset.", "source": "codesearchnet"}
{"code": "def populate_readme(revision, rtd_version, **extra_kwargs):\n    with open(TEMPLATE_FILE, 'r') as file_obj:\n        template = file_obj.read()\n    img_prefix = IMG_PREFIX.format(revision=revision)\n    extra_links = EXTRA_LINKS.format(rtd_version=rtd_version, revision=revision)\n    docs_img = DOCS_IMG.format(rtd_version=rtd_version)\n    bernstein_basis = BERNSTEIN_BASIS_PLAIN.format(img_prefix=img_prefix)\n    bezier_defn = BEZIER_DEFN_PLAIN.format(img_prefix=img_prefix)\n    sum_to_unity = SUM_TO_UNITY_PLAIN.format(img_prefix=img_prefix)\n    template_kwargs = {'code_block1': PLAIN_CODE_BLOCK, 'code_block2': PLAIN_CODE_BLOCK, 'code_block3': PLAIN_CODE_BLOCK, 'testcleanup': '', 'toctree': '', 'bernstein_basis': bernstein_basis, 'bezier_defn': bezier_defn, 'sum_to_unity': sum_to_unity, 'img_prefix': img_prefix, 'extra_links': extra_links, 'docs': '|docs| ', 'docs_img': docs_img, 'pypi': '\\n\\n|pypi| ', 'pypi_img': PYPI_IMG, 'versions': '|versions|\\n\\n', 'versions_img': VERSIONS_IMG, 'rtd_version': rtd_version, 'revision': revision, 'circleci_badge': CIRCLECI_BADGE, 'circleci_path': '', 'travis_badge': TRAVIS_BADGE, 'travis_path': '', 'appveyor_badge': APPVEYOR_BADGE, 'appveyor_path': '', 'coveralls_badge': COVERALLS_BADGE, 'coveralls_path': COVERALLS_PATH, 'zenodo': '|zenodo|', 'zenodo_img': ZENODO_IMG, 'joss': ' |JOSS|', 'joss_img': JOSS_IMG}\n    template_kwargs.update(**extra_kwargs)\n    readme_contents = template.format(**template_kwargs)\n    readme_contents = INLINE_MATH_EXPR.sub(inline_math, readme_contents)\n    sphinx_modules = []\n    to_replace = functools.partial(mod_replace, sphinx_modules=sphinx_modules)\n    readme_contents = MOD_EXPR.sub(to_replace, readme_contents)\n    if (sphinx_modules != ['bezier.curve', 'bezier.surface']):\n        raise ValueError('Unexpected sphinx_modules', sphinx_modules)\n    sphinx_docs = []\n    to_replace = functools.partial(doc_replace, sphinx_docs=sphinx_docs)\n    readme_contents = DOC_EXPR.sub(to_replace, readme_contents)\n    if (sphinx_docs != ['python/reference/bezier', 'development']):\n        raise ValueError('Unexpected sphinx_docs', sphinx_docs)\n    return readme_contents", "docstring": "Populate README template with values.\n\nArgs:\nrevision (str): The branch, commit, etc. being referred to (e.g.\n``master``).\nrtd_version (str): The version to use for RTD (Read the Docs) links\n(e.g. ``latest``).\nextra_kwargs (Dict[str, str]): Over-ride for template arguments.\n\nReturns:\nstr: The populated README contents.\n\nRaises:\nValueError: If the ``sphinx_modules`` encountered are not as expected.\nValueError: If the ``sphinx_docs`` encountered are not as expected.", "source": "codesearchnet"}
{"code": "def generate_password_hash(password, salt, N=(1 << 14), r=8, p=1, buflen=64):\n    if PYTHON2:\n        password = password.encode('utf-8')\n        salt = salt.encode('utf-8')\n    pw_hash = scrypt_hash(password, salt, N, r, p, buflen)\n    return enbase64(pw_hash)", "docstring": "Generate password hash givin the password string and salt.\n\nArgs:\n- ``password``: Password string.\n- ``salt`` : Random base64 encoded string.\n\nOptional args:\n- ``N`` : the CPU cost, must be a power of 2 greater than 1, defaults to 1 << 14.\n- ``r`` : the memory cost, defaults to 8.\n- ``p`` : the parallelization parameter, defaults to 1.\n\nThe parameters r, p, and buflen must satisfy r * p < 2^30 and\nbuflen <= (2^32 - 1) * 32.\n\nThe recommended parameters for interactive logins as of 2009 are N=16384,\nr=8, p=1. Remember to use a good random salt.\n\nReturns:\n- base64 encoded scrypt hash.", "source": "codesearchnet"}
{"code": "def create_container_instance_group(access_token, subscription_id, resource_group, container_group_name, container_list, location, ostype='Linux', port=80, iptype='public'):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '?api-version=', CONTAINER_API])\n    container_group_body = {'location': location}\n    properties = {'osType': ostype}\n    properties['containers'] = container_list\n    ipport = {'protocol': 'TCP'}\n    ipport['port'] = port\n    ipaddress = {'ports': [ipport]}\n    ipaddress['type'] = iptype\n    properties['ipAddress'] = ipaddress\n    container_group_body['properties'] = properties\n    body = json.dumps(container_group_body)\n    return do_put(endpoint, body, access_token)", "docstring": "Create a new container group with a list of containers specifified by container_list.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\ncontainer_group_name (str): Name of container instance group.\ncontainer_list (list): A list of container properties. Use create_container_definition to\ncreate each container property set.\nlocation (str): Azure data center location. E.g. westus.\nostype (str): Container operating system type. Linux or Windows.\nport (int): TCP port number. E.g. 8080.\niptype (str): Type of IP address. E.g. public.\n\nReturns:\nHTTP response with JSON body of container group.", "source": "codesearchnet"}
{"code": "def method(*args, **kwargs):\n    \n    assert len(args) == 0\n    assert len(kwargs) == 1\n    assert \"num_return_vals\" in kwargs\n    num_return_vals = kwargs[\"num_return_vals\"]\n\n    def annotate_method(method):\n        method.__ray_num_return_vals__ = num_return_vals\n        return method\n\n    return annotate_method", "docstring": "Annotate an actor method.\n\n.. code-block:: python\n\n@ray.remote\nclass Foo(object):\n@ray.method(num_return_vals=2)\ndef bar(self):\nreturn 1, 2\n\nf = Foo.remote()\n\n_, _ = f.bar.remote()\n\nArgs:\nnum_return_vals: The number of object IDs that should be returned by\ninvocations of this actor method.", "source": "juraj-google-style"}
{"code": "def deploy_template_uri_param_uri(access_token, subscription_id, resource_group, deployment_name, template_uri, parameters_uri):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.Resources/deployments/', deployment_name, '?api-version=', DEPLOYMENTS_API])\n    properties = {'templateLink': {'uri': template_uri}}\n    properties['mode'] = 'Incremental'\n    properties['parametersLink'] = {'uri': parameters_uri}\n    template_body = {'properties': properties}\n    body = json.dumps(template_body)\n    return do_put(endpoint, body, access_token)", "docstring": "Deploy a template with both template and parameters referenced by URIs.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\ndeployment_name (str): A name you give to the deployment.\ntemplate_uri (str): URI which points to a JSON template (e.g. github raw location).\nparameters_uri (str): URI which points to a JSON parameters file (e.g. github raw location).\n\nReturns:\nHTTP response.", "source": "codesearchnet"}
{"code": "def temp44(msg):\n    \n    d = hex2bin(data(msg))\n\n    sign = int(d[23])\n    value = bin2int(d[24:34])\n\n    if sign:\n        value = value - 1024\n\n    temp = value * 0.25   \n    temp = round(temp, 2)\n\n    temp_alternative = value * 0.125   \n    temp_alternative = round(temp, 3)\n\n    return temp, temp_alternative", "docstring": "Static air temperature.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nfloat, float: temperature and alternative temperature in Celsius degree.\nNote: Two values returns due to what seems to be an inconsistancy\nerror in ICAO 9871 (2008) Appendix A-67.", "source": "juraj-google-style"}
{"code": "def regularize_cost_from_collection(name='regularize_cost'):\n    ctx = get_current_tower_context()\n    if (not ctx.is_training):\n        return tf.constant(0, dtype=tf.float32, name=('empty_' + name))\n    if ctx.has_own_variables:\n        losses = ctx.get_collection_in_tower(tfv1.GraphKeys.REGULARIZATION_LOSSES)\n    else:\n        losses = tfv1.get_collection(tfv1.GraphKeys.REGULARIZATION_LOSSES)\n    if (len(losses) > 0):\n        logger.info('regularize_cost_from_collection() found {} regularizers in REGULARIZATION_LOSSES collection.'.format(len(losses)))\n\n        def maploss(l):\n            assert l.dtype.is_floating, l\n            if (l.dtype != tf.float32):\n                l = tf.cast(l, tf.float32)\n            return l\n        losses = [maploss(l) for l in losses]\n        reg_loss = tf.add_n(losses, name=name)\n        return reg_loss\n    else:\n        return tf.constant(0, dtype=tf.float32, name=('empty_' + name))", "docstring": "Get the cost from the regularizers in ``tf.GraphKeys.REGULARIZATION_LOSSES``.\nIf in replicated mode, will only regularize variables created within the current tower.\n\nArgs:\nname (str): the name of the returned tensor\n\nReturns:\ntf.Tensor: a scalar, the total regularization cost.", "source": "codesearchnet"}
{"code": "def register_views(self, app):\n        \n        self.add_resource(LoginRedirectView, '/auth/login')\n        self.add_resource(LogoutRedirectView, '/auth/logout')\n\n        for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.auth']['plugins']:\n            cls = entry_point.load()\n            app.available_auth_systems[cls.name] = cls\n\n            if app.register_auth_system(cls):\n                for vcls in cls.views:\n                    self.add_resource(vcls, *vcls.URLS)\n                    logger.debug('Registered auth system view {} for paths: {}'.format(\n                        cls.__name__,\n                        ', '.join(vcls.URLS)\n                    ))\n\n        if not app.active_auth_system:\n            logger.error('No auth systems active, please enable an auth system and then start the system again')\n            sys.exit(-1)\n\n        for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.views']['plugins']:\n            view = entry_point.load()\n            self.add_resource(view, *view.URLS)\n            app.register_menu_item(view.MENU_ITEMS)\n\n            logger.debug('Registered view {} for paths: {}'.format(view.__name__, ', '.join(view.URLS)))", "docstring": "Iterates all entry points for views and auth systems and dynamically load and register the routes with Flask\n\nArgs:\napp (`CINQFlask`): CINQFlask object to register views for\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def __init__(self, scope, parent, expression=None):\n        \n        CodeStatement.__init__(self, scope, parent)\n        self.expression = expression", "docstring": "Constructor for expression statements.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.\n\nKwargs:\nexpression (CodeExpression): The expression of this statement.", "source": "juraj-google-style"}
{"code": "def find_executable_batch_size(function: Optional[callable]=None, starting_batch_size: int=128, auto_find_batch_size: bool=False):\n    if function is None:\n        return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size, auto_find_batch_size=auto_find_batch_size)\n    if auto_find_batch_size:\n        requires_backends(find_executable_batch_size, 'accelerate')\n        from accelerate.utils import find_executable_batch_size as accelerate_find_executable_batch_size\n        return accelerate_find_executable_batch_size(function=function, starting_batch_size=starting_batch_size)\n    return functools.partial(function, batch_size=starting_batch_size)", "docstring": "Args:\nA basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or\nCUDNN, the batch size is cut in half and passed to `function`. `function` must take in a `batch_size` parameter as\nits first argument.\nfunction (`callable`, *optional*)\nA function to wrap\nstarting_batch_size (`int`, *optional*)\nThe batch size to try and fit into memory\nauto_find_batch_size (`bool`, *optional*)\nIf False, will just execute `function`", "source": "github-repos"}
{"code": "def visualize_training(images_val, reconstructed_images_val, random_images_val, log_dir, prefix, viz_n=10):\n    save_imgs(images_val[:viz_n], os.path.join(log_dir, '{}_inputs.png'.format(prefix)))\n    save_imgs(reconstructed_images_val[:viz_n], os.path.join(log_dir, '{}_reconstructions.png'.format(prefix)))\n    if (random_images_val is not None):\n        save_imgs(random_images_val[:viz_n], os.path.join(log_dir, '{}_prior_samples.png'.format(prefix)))", "docstring": "Helper method to save images visualizing model reconstructions.\n\nArgs:\nimages_val: Numpy array containing a batch of input images.\nreconstructed_images_val: Numpy array giving the expected output\n(mean) of the decoder.\nrandom_images_val: Optionally, a Numpy array giving the expected output\n(mean) of decoding samples from the prior, or `None`.\nlog_dir: The directory to write images (Python `str`).\nprefix: A specific label for the saved visualizations, which\ndetermines their filenames (Python `str`).\nviz_n: The number of images from each batch to visualize (Python `int`).", "source": "codesearchnet"}
{"code": "def from_string(cls, s, name=None, modules=None, active=None):\n        \n        r = cls(name=name, modules=modules, active=active)\n        _parse_repp(s.splitlines(), r, None)\n        return r", "docstring": "Instantiate a REPP from a string.\n\nArgs:\nname (str, optional): the name of the REPP module\nmodules (dict, optional): a mapping from identifiers to\nREPP modules\nactive (iterable, optional): an iterable of default module\nactivations", "source": "juraj-google-style"}
{"code": "def success(channel, title, datapacks):\n    \n\n    \n    gui = ui_embed.UI(\n        channel,\n        title,\n        \"\",\n        modulename=modulename,\n        datapacks=datapacks\n    )\n\n    return gui", "docstring": "Creates an embed UI containing the help message\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\ntitle (str): The title of the embed\ndatapacks (list): The hex value\n\nReturns:\nui (ui_embed.UI): The embed UI object", "source": "juraj-google-style"}
{"code": "def convert(self, vroot, entry_variables):\n        \n        self.graph_info = GraphInfo(vroot)\n        self.entry_variables = entry_variables\n\n        with nn.parameter_scope(self.name):\n            \n            for func in self.graph_info.funcs:\n                o = self._identity_conversion(func)\n            self.end_variable = o\n        return self.end_variable", "docstring": "All functions are replaced with the same `new` function.\n\nArgs:\nvroot (:obj:`Variable`): NNabla Variable\nentry_variables (:obj:`Variable`): Entry variable from which the conversion starts.", "source": "juraj-google-style"}
{"code": "def refresh_access_token(self, refresh_token):\n    request = self._get_request()\n    response = request.post(self.OAUTH_TOKEN_URL, {'grant_type': 'refresh_token', 'refresh_token': refresh_token})\n    self.auth = HSAccessTokenAuth.from_response(response)\n    return self.auth.access_token", "docstring": "Refreshes the current access token.\n\nGets a new access token, updates client auth and returns it.\n\nArgs:\n\nrefresh_token (str): Refresh token to use\n\nReturns:\nThe new access token", "source": "codesearchnet"}
{"code": "def min_row_dist_sum_idx(dists):\n    \n    row_sums = np.apply_along_axis(arr=dists, axis=0, func1d=np.sum)\n    return row_sums.argmin()", "docstring": "Find the index of the row with the minimum row distance sum\n\nThis should return the index of the row index with the least distance overall\nto all other rows.\n\nArgs:\ndists (np.array): must be square distance matrix\n\nReturns:\nint: index of row with min dist row sum", "source": "juraj-google-style"}
{"code": "def highlight_code(text, lexer_name='python', **kwargs):\n    lexer_name = {'py': 'python', 'h': 'cpp', 'cpp': 'cpp', 'cxx': 'cpp', 'c': 'cpp'}.get(lexer_name.replace('.', ''), lexer_name)\n    try:\n        import pygments\n        import pygments.lexers\n        import pygments.formatters\n        import pygments.formatters.terminal\n        if sys.platform.startswith('win32'):\n            import colorama\n            colorama.init()\n        formater = pygments.formatters.terminal.TerminalFormatter(bg='dark')\n        lexer = pygments.lexers.get_lexer_by_name(lexer_name, **kwargs)\n        new_text = pygments.highlight(text, lexer, formater)\n    except ImportError:\n        import warnings\n        warnings.warn('pygments is not installed, code will not be highlighted')\n        new_text = text\n    return new_text", "docstring": "Highlights a block of text using ANSI tags based on language syntax.\n\nArgs:\ntext (str): plain text to highlight\nlexer_name (str): name of language\n**kwargs: passed to pygments.lexers.get_lexer_by_name\n\nReturns:\nstr: text : highlighted text\nIf pygments is not installed, the plain text is returned.\n\nCommandLine:\npython -c \"import pygments.formatters; print(list(pygments.formatters.get_all_formatters()))\"\n\nExample:\n>>> import ubelt as ub\n>>> text = 'import ubelt as ub; print(ub)'\n>>> new_text = ub.highlight_code(text)\n>>> print(new_text)", "source": "codesearchnet"}
{"code": "def get(self, workflow_id):\n    try:\n        db = self._client[self.database]\n        fs = GridFSProxy(GridFS(db.unproxied_object))\n        return DataStoreDocument(db[WORKFLOW_DATA_COLLECTION_NAME], fs, workflow_id)\n    except ConnectionFailure:\n        raise DataStoreNotConnected()", "docstring": "Returns the document for the given workflow id.\n\nArgs:\nworkflow_id (str): The id of the document that represents a workflow run.\n\nRaises:\nDataStoreNotConnected: If the data store is not connected to the server.\n\nReturns:\nDataStoreDocument: The document for the given workflow id.", "source": "codesearchnet"}
{"code": "def _BatchNormWithGlobalNormalizationGrad(op: ops.Operation, grad):\n    dx, dm, dv, db, dg = gen_nn_ops.batch_norm_with_global_normalization_grad(op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad, op.get_attr('variance_epsilon'), op.get_attr('scale_after_normalization'))\n    return (dx, dm, dv, db, dg)", "docstring": "Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.\n\nWe do not backprop anything for the mean and var intentionally as they are\nnot being trained with backprop in the operation.\n\nArgs:\nop: The BatchNormOp for which we need to generate gradients.\ngrad: Tensor.  The gradients passed to the BatchNormOp.\n\nReturns:\ndx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))\ndm: Backprop for mean, which is\nsum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))\ndv: Backprop for variance, which is\nsum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)\ndb: Backprop for beta, which is grad reduced in all except the\nlast dimension.\ndg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))", "source": "github-repos"}
{"code": "def get_resize_output_image_size(image, size, input_data_format) -> Tuple[int, int]:\n    height, width = get_image_size(image, channel_dim=input_data_format)\n    min_len = size['shortest_edge']\n    max_len = size['longest_edge']\n    aspect_ratio = width / height\n    if width >= height and width > max_len:\n        width = max_len\n        height = int(width / aspect_ratio)\n    elif height > width and height > max_len:\n        height = max_len\n        width = int(height * aspect_ratio)\n    height = max(height, min_len)\n    width = max(width, min_len)\n    return (height, width)", "docstring": "Get the output size of the image after resizing given a dictionary specifying the max and min sizes.\n\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nsize (`Dict[str, int]`):\nSize of the output image containing the keys \"shortest_edge\" and \"longest_edge\".\ninput_data_format (`ChannelDimension` or `str`):\nThe channel dimension format of the input image.\n\nReturns:\nThe output size of the image after resizing.", "source": "github-repos"}
{"code": "def tokenize(self, text):\n    output_tokens = []\n    for token in whitespace_tokenize(text):\n        chars = list(token)\n        if len(chars) > self.max_input_chars_per_word:\n            output_tokens.append(self.unk_token)\n            continue\n        is_bad = False\n        start = 0\n        sub_tokens = []\n        while start < len(chars):\n            end = len(chars)\n            cur_substr = None\n            while start < end:\n                substr = ''.join(chars[start:end])\n                if start > 0:\n                    substr = '\n                if substr in self.vocab:\n                    cur_substr = substr\n                    break\n                end -= 1\n            if cur_substr is None:\n                is_bad = True\n                break\n            sub_tokens.append(cur_substr)\n            start = end\n        if is_bad:\n            output_tokens.append(self.unk_token)\n        else:\n            output_tokens.extend(sub_tokens)\n    return output_tokens", "docstring": "Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform\ntokenization using the given vocabulary.\n\nFor example, `input = \"unaffable\"` will return as output `[\"un\", \"##aff\", \"##able\"]`.\n\nArgs:\ntext: A single token or whitespace separated tokens. This should have\nalready been passed through *BasicTokenizer*.\n\nReturns:\nA list of wordpiece tokens.", "source": "github-repos"}
{"code": "def add_file_locations(self, file_locations=[]):\n    if (not hasattr(self, '__file_locations__')):\n        self.__file_locations__ = copy.copy(file_locations)\n    else:\n        self.__file_locations__ += copy.copy(file_locations)", "docstring": "Adds a list of file locations to the current list\n\nArgs:\nfile_locations: list of file location tuples", "source": "codesearchnet"}
{"code": "def mark_parent_tasks_as_failed(self, task_name, flush_logs=False):\n        \n        for existing_task_name in self.tasks:\n            if existing_task_name == task_name:\n                break\n\n            if flush_logs:\n                self.tasks[existing_task_name].clear()\n\n            self.tasks[existing_task_name].failed = True\n\n        self.mark_main_tasks_as_failed()", "docstring": "Marks all the parent tasks as failed\n\nArgs:\ntask_name (str): Name of the child task\nflush_logs (bool): If ``True`` will discard all the logs form\nparent tasks\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def sync_results(vcs, signature):\n    results_directory = _get_results_directory(vcs, signature)\n    if (not os.path.exists(results_directory)):\n        raise ResultsNotFoundError\n    with open(os.path.join(results_directory, 'patterns'), 'r') as f:\n        patterns = f.read().strip().split()\n    includes = ['--include={}'.format(x) for x in patterns]\n    cmd = ((['rsync', '-r'] + includes) + ['--exclude=*', os.path.join(results_directory, 'results', ''), os.path.join(vcs.path, '')])\n    subprocess.check_call(cmd)", "docstring": "Sync the saved results for `signature` back to the project.\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\nsignature (str)\nRaises:\nResultsNotFoundError", "source": "codesearchnet"}
{"code": "def predict(self, X=None, **kwargs):\n    context = {'X': X}\n    context.update(kwargs)\n    last_block_name = list(self.blocks.keys())[(- 1)]\n    for (block_name, block) in self.blocks.items():\n        LOGGER.debug('Producing block %s', block_name)\n        try:\n            produce_args = self._get_block_args(block_name, block.produce_args, context)\n            outputs = block.produce(**produce_args)\n            if (block_name != last_block_name):\n                output_dict = self._get_outputs(block_name, outputs, block.produce_output)\n                context.update(output_dict)\n        except Exception:\n            LOGGER.exception('Exception caught producing MLBlock %s', block_name)\n            raise\n    return outputs", "docstring": "Produce predictions using the blocks of this pipeline.\n\nSequentially call the `produce` method of each block, capturing the\noutputs before calling the next one.\n\nDuring the whole process a context dictionary is built, where both the\npassed arguments and the captured outputs of the `produce` methods\nare stored, and from which the arguments for the next `produce` calls\nwill be taken.\n\nArgs:\nX: Data which the pipeline will use to make predictions.\n**kwargs: Any additional keyword arguments will be directly added\nto the context dictionary and available for the blocks.", "source": "codesearchnet"}
{"code": "def _get_target_encoder(self, x, y):\n        \n\n        assert len(x) == len(y)\n\n        \n        df = pd.DataFrame({y.name: y, x.name: x.fillna(NAN_INT)})\n        return df.groupby(x.name)[y.name].mean().to_dict()", "docstring": "Return a mapping from categories to average target values.\nArgs:\nx (pandas.Series): a categorical column to encode.\ny (pandas.Series): the target column\nReturns:\ntarget_encoder (dict): mapping from categories to average target values", "source": "juraj-google-style"}
{"code": "def unit_pos_to_spot(unit_pos) -> ParkingSpot:\n    min_ = 50\n    res = None\n    for airport in parkings:\n        for spot in parkings[airport]:\n            spot_pos = parkings[airport][spot]\n            dist = math.hypot((unit_pos[0] - spot_pos[0]), (unit_pos[1] - spot_pos[1]))\n            if (dist < min_):\n                min_ = dist\n                res = ParkingSpot(airport=airport, spot=spot)\n    return res", "docstring": "Translates a unit position to a known parking spot\n\nArgs:\nunit_pos: unit position as Vec2\n\nReturns: ParkingSpot object", "source": "codesearchnet"}
{"code": "def write_build_statement(self, module, action, deps, imports, suffix):\n    output = path_utils.join(self.pyi_dir, _module_to_output_path(module) + '.pyi' + suffix)\n    logging.info('%s %s\\n  imports: %s\\n  deps: %s\\n  output: %s', action, module.name, imports, deps, output)\n    if deps:\n        deps = ' | ' + ' '.join((escape_ninja_path(dep) for dep in deps))\n    else:\n        deps = ''\n    with open(self.ninja_file, 'a') as f:\n        f.write('build {output}: {action} {input}{deps}\\n  imports = {imports}\\n  module = {module}\\n'.format(output=escape_ninja_path(output), action=action, input=escape_ninja_path(module.full_path), deps=deps, imports=escape_ninja_path(imports), module=module.name))\n    return output", "docstring": "Write a build statement for the given module.\n\nArgs:\nmodule: A module_utils.Module object.\naction: An Action object.\ndeps: The module's dependencies.\nimports: An imports file.\nsuffix: An output file suffix.\n\nReturns:\nThe expected output of the build statement.", "source": "github-repos"}
{"code": "def process_data_fn(self, inputs: dict[str, common_types.ConsistentTensorType]) -> dict[str, common_types.ConsistentTensorType]:\n    outputs = inputs.copy()\n    for transform in self.transforms:\n        columns = transform.columns\n        for col in columns:\n            intermediate_result = transform(outputs[col], output_column_name=col)\n            for key, value in intermediate_result.items():\n                outputs[key] = value\n    return outputs", "docstring": "This method is used in the AnalyzeAndTransformDataset step. It applies\nthe transforms to the `inputs` in sequential order on the columns\nprovided for a given transform.\nArgs:\ninputs: A dictionary of column names and data.\nReturns:\nA dictionary of column names and transformed data.", "source": "github-repos"}
{"code": "def resolve(node, source_info, graphs):\n    visitor = TreeAnnotator(source_info, graphs)\n    node = visitor.visit(node)\n    return node", "docstring": "Resolves reaching definitions for each symbol.\n\nArgs:\nnode: ast.AST\nsource_info: transformer.SourceInfo\ngraphs: Dict[ast.FunctionDef, cfg.Graph]\nReturns:\nast.AST", "source": "github-repos"}
{"code": "def remove(self, force=False):\n    return self.client.api.remove_node(self.id, force=force)", "docstring": "Remove this node from the swarm.\n\nArgs:\nforce (bool): Force remove an active node. Default: `False`\n\nReturns:\n`True` if the request was successful.\n\nRaises:\n:py:class:`docker.errors.NotFound`\nIf the node doesn't exist in the swarm.\n\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def l2_loss(tensor, weight=1.0, scope=None):\n  \n  with tf.name_scope(scope, 'L2Loss', [tensor]):\n    weight = tf.convert_to_tensor(weight,\n                                  dtype=tensor.dtype.base_dtype,\n                                  name='loss_weight')\n    loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value')\n    tf.add_to_collection(LOSSES_COLLECTION, loss)\n    return loss", "docstring": "Define a L2Loss, useful for regularize, i.e. weight decay.\n\nArgs:\ntensor: tensor to regularize.\nweight: an optional weight to modulate the loss.\nscope: Optional scope for name_scope.\n\nReturns:\nthe L2 loss op.", "source": "juraj-google-style"}
{"code": "def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n    self._validate_kwargs(kwargs)\n    dtype = dtypes.as_dtype(dtype)\n    if not dtype.is_numpy_compatible or dtype == dtypes.string:\n        raise ValueError(f'Argument `dtype` expected to be numeric or boolean. Received {dtype}.')\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    return array_ops.ones(shape, dtype)", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor. Only numeric or boolean dtypes are\nsupported.\n**kwargs: Additional keyword arguments.\n\nRaises:\nValuesError: If the dtype is not numeric or boolean.", "source": "github-repos"}
{"code": "def getUrlMeta(self, url):\n        \n        return self.conn(\"GET\", SkypeConnection.API_URL, params={\"url\": url},\n                         auth=SkypeConnection.Auth.Authorize).json()", "docstring": "Retrieve various metadata associated with a URL, as seen by Skype.\n\nArgs:\nurl (str): address to ping for info\n\nReturns:\ndict: metadata for the website queried", "source": "juraj-google-style"}
{"code": "def merge(profile, head, base, commit_message=None):\n    if (not commit_message):\n        commit_message = (((('Merged ' + head) + ' into ') + base) + '.')\n    payload = {'base': base, 'head': head, 'commit_message': commit_message}\n    response = api.post_merge_request(profile, payload)\n    data = None\n    if (response.status_code == 201):\n        json_data = response.json()\n        data = prepare(json_data)\n    return data", "docstring": "Merge the head of a branch into the base branch.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nhead\nThe head to merge. It can be a SHA, or a branch name.\n\nbase\nThe name of the branch to merge the specified head into.\n\ncommit_message\nThe message to give for the commit.\n\nReturns:\nA dict with data about the merge.", "source": "codesearchnet"}
{"code": "def add(self, profile_datum):\n    self.total_op_time += profile_datum.op_time\n    self.total_exec_time += profile_datum.exec_time\n    device_and_node = '%s:%s' % (profile_datum.device_name, profile_datum.node_exec_stats.node_name)\n    device_and_node = '%s:%s' % (profile_datum.device_name, profile_datum.node_exec_stats.node_name)\n    if device_and_node in self._node_to_exec_count:\n        self._node_to_exec_count[device_and_node] += 1\n    else:\n        self._node_to_exec_count[device_and_node] = 1", "docstring": "Accumulate a new instance of ProfileDatum.\n\nArgs:\nprofile_datum: (`ProfileDatum`) an instance of `ProfileDatum` to\naccumulate to this object.", "source": "github-repos"}
{"code": "def _WaitForStartup(self, deadline):\n    \n    start = time.time()\n    sleep = 0.05\n\n    def Elapsed():\n      return time.time() - start\n\n    while True:\n      try:\n        response, _ = self._http.request(self._host)\n        if response.status == 200:\n          logging.info('emulator responded after %f seconds', Elapsed())\n          return True\n      except (socket.error, httplib.ResponseNotReady):\n        pass\n      if Elapsed() >= deadline:\n        \n        return False\n      else:\n        time.sleep(sleep)\n        sleep *= 2", "docstring": "Waits for the emulator to start.\n\nArgs:\ndeadline: deadline in seconds\n\nReturns:\nTrue if the emulator responds within the deadline, False otherwise.", "source": "juraj-google-style"}
{"code": "def Initialize(self, filename=None, data=None, fd=None, reset=True, must_exist=False, parser=ConfigFileParser):\n    self.FlushCache()\n    if reset:\n        self.raw_data = collections.OrderedDict()\n        self.writeback_data = collections.OrderedDict()\n        self.writeback = None\n        self.initialized = False\n    if (fd is not None):\n        self.parser = self.LoadSecondaryConfig(parser=parser(fd=fd))\n    elif (filename is not None):\n        self.parser = self.LoadSecondaryConfig(filename)\n        if (must_exist and (not self.parser.parsed)):\n            raise ConfigFormatError(('Unable to parse config file %s' % filename))\n    elif (data is not None):\n        self.parser = self.LoadSecondaryConfig(parser=parser(data=data))\n    elif must_exist:\n        raise RuntimeError('Registry path not provided.')\n    self.initialized = True", "docstring": "Initializes the config manager.\n\nThis method is used to add more config options to the manager. The config\ncan be given as one of the parameters as described in the Args section.\n\nArgs:\nfilename: The name of the configuration file to use.\ndata: The configuration given directly as a long string of data.\nfd: A file descriptor of a configuration file.\nreset: If true, the previous configuration will be erased.\nmust_exist: If true the data source must exist and be a valid\nconfiguration file, or we raise an exception.\nparser: The parser class to use (i.e. the format of the file). If not\nspecified guess from the filename.\n\nRaises:\nRuntimeError: No configuration was passed in any of the parameters.\n\nConfigFormatError: Raised when the configuration file is invalid or does\nnot exist..", "source": "codesearchnet"}
{"code": "def write_additional(self, productversion, channel):\n    self.fileobj.seek(self.additional_offset)\n    extras = extras_header.build(dict(count=1, sections=[dict(channel=six.u(channel), productversion=six.u(productversion), size=(((len(channel) + len(productversion)) + 2) + 8), padding=b'')]))\n    self.fileobj.write(extras)\n    self.last_offset = self.fileobj.tell()", "docstring": "Write the additional information to the MAR header.\n\nArgs:\nproductversion (str): product and version string\nchannel (str): channel string", "source": "codesearchnet"}
{"code": "def inv(self, q_data, max_iterations=100, tollerance=1e-05):\n    q_data = numpy.asfarray(q_data)\n    assert numpy.all(((q_data >= 0) & (q_data <= 1))), 'sanitize your inputs!'\n    shape = q_data.shape\n    q_data = q_data.reshape(len(self), (- 1))\n    x_data = evaluation.evaluate_inverse(self, q_data)\n    (lower, upper) = evaluation.evaluate_bound(self, x_data)\n    x_data = numpy.clip(x_data, a_min=lower, a_max=upper)\n    x_data = x_data.reshape(shape)\n    return x_data", "docstring": "Inverse Rosenblatt transformation.\n\nIf possible the transformation is done analytically. If not possible,\ntransformation is approximated using an algorithm that alternates\nbetween Newton-Raphson and binary search.\n\nArgs:\nq_data (numpy.ndarray):\nProbabilities to be inverse. If any values are outside ``[0,\n1]``, error will be raised. ``q_data.shape`` must be compatible\nwith distribution shape.\nmax_iterations (int):\nIf approximation is used, this sets the maximum number of\nallowed iterations in the Newton-Raphson algorithm.\ntollerance (float):\nIf approximation is used, this set the error tolerance level\nrequired to define a sample as converged.\n\nReturns:\n(numpy.ndarray):\nInverted probability values where\n``out.shape == q_data.shape``.", "source": "codesearchnet"}
{"code": "def one_step(self, current_state, previous_kernel_results):\n    previous_step_size_assign = ([] if (self.step_size_update_fn is None) else (previous_kernel_results.extra.step_size_assign if mcmc_util.is_list_like(previous_kernel_results.extra.step_size_assign) else [previous_kernel_results.extra.step_size_assign]))\n    with tf.control_dependencies(previous_step_size_assign):\n        (next_state, kernel_results) = self._impl.one_step(current_state, previous_kernel_results)\n        if (self.step_size_update_fn is not None):\n            step_size_assign = self.step_size_update_fn(self.step_size, kernel_results)\n            kernel_results = kernel_results._replace(extra=HamiltonianMonteCarloExtraKernelResults(step_size_assign=step_size_assign))\n        return (next_state, kernel_results)", "docstring": "Runs one iteration of Hamiltonian Monte Carlo.\n\nArgs:\ncurrent_state: `Tensor` or Python `list` of `Tensor`s representing the\ncurrent state(s) of the Markov chain(s). The first `r` dimensions index\nindependent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.\nprevious_kernel_results: `collections.namedtuple` containing `Tensor`s\nrepresenting values from previous calls to this function (or from the\n`bootstrap_results` function.)\n\nReturns:\nnext_state: Tensor or Python list of `Tensor`s representing the state(s)\nof the Markov chain(s) after taking exactly one step. Has same type and\nshape as `current_state`.\nkernel_results: `collections.namedtuple` of internal calculations used to\nadvance the chain.\n\nRaises:\nValueError: if there isn't one `step_size` or a list with same length as\n`current_state`.", "source": "codesearchnet"}
{"code": "def easeInOutBack(n, s=1.70158):\n    _checkRange(n)\n    n = (n * 2)\n    if (n < 1):\n        s *= 1.525\n        return (0.5 * ((n * n) * (((s + 1) * n) - s)))\n    else:\n        n -= 2\n        s *= 1.525\n        return (0.5 * (((n * n) * (((s + 1) * n) + s)) + 2))", "docstring": "A \"back-in\" tween function that overshoots both the start and destination.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "codesearchnet"}
{"code": "def download_sifts_xml(pdb_id, outdir='', force_rerun=False):\n    baseURL = 'ftp:\n    filename = '{}.xml.gz'.format(pdb_id.lower())\n    outfile = op.join(outdir, (filename.split('.')[0] + '.sifts.xml'))\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n        response = urlopen((baseURL + filename))\n        with open(outfile, 'wb') as f:\n            f.write(gzip.decompress(response.read()))\n    return outfile", "docstring": "Download the SIFTS file for a PDB ID.\n\nArgs:\npdb_id (str): PDB ID\noutdir (str): Output directory, current working directory if not specified.\nforce_rerun (bool): If the file should be downloaded again even if it exists\n\nReturns:\nstr: Path to downloaded file", "source": "codesearchnet"}
{"code": "def wrap_warnings(logger):\n\n    def decorator(func):\n\n        @functools.wraps(func)\n        def new_func(*args, **kwargs):\n            showwarning = warnings.showwarning\n            warnings.showwarning = warn_logging(logger)\n            try:\n                return func(*args, **kwargs)\n            finally:\n                warnings.showwarning = showwarning\n        return new_func\n    return decorator", "docstring": "Have the function patch `warnings.showwarning` with the given logger.\n\nArguments:\nlogger (~logging.logger): the logger to wrap warnings with when\nthe decorated function is called.\n\nReturns:\n`function`: a decorator function.", "source": "codesearchnet"}
{"code": "def search(self, search_phrase, limit=None):\n        \n        query, query_params = self._make_query_from_terms(search_phrase, limit=limit)\n\n        self._parsed_query = (str(query), query_params)\n\n        if query is not None:\n\n            self.backend.library.database.set_connection_search_path()\n\n            results = self.execute(query, **query_params)\n\n            for result in results:\n                vid, dataset_vid, score = result\n                yield PartitionSearchResult(\n                    vid=vid, dataset_vid=dataset_vid, score=score)", "docstring": "Finds partitions by search phrase.\n\nArgs:\nsearch_phrase (str or unicode):\nlimit (int, optional): how many results to generate. None means without limit.\n\nGenerates:\nPartitionSearchResult instances.", "source": "juraj-google-style"}
{"code": "def flatten(index, name='segmented_flatten'):\n    batch_size = tf.reduce_prod(index.batch_shape())\n    offset = tf.range(batch_size) * index.num_segments\n    offset = tf.reshape(offset, index.batch_shape())\n    for _ in range(index.batch_dims, index.indices.shape.rank):\n        offset = tf.expand_dims(offset, -1)\n    indices = tf.cast(offset, index.indices.dtype) + index.indices\n    return IndexMap(indices=tf.reshape(indices, [-1]), num_segments=index.num_segments * batch_size, batch_dims=0)", "docstring": "Flattens a batched index map to a 1d index map. This operation relabels the segments to keep batch elements\ndistinct. The k-th batch element will have indices shifted by `num_segments` * (k - 1). The result is a tensor with\n`num_segments` multiplied by the number of elements in the batch.\n\nArgs:\nindex: IndexMap to flatten.\nname: Name for the TensorFlow operation.\n\nReturns:\nThe flattened IndexMap.", "source": "github-repos"}
{"code": "def transform_to_mods_multimono(marc_xml, uuid, url):\n    \n    marc_xml = _read_content_or_path(marc_xml)\n\n    transformed = xslt_transformation(\n        marc_xml,\n        _absolute_template_path(\"MARC21toMultiMonographTitle.xsl\")\n    )\n\n    return _apply_postprocessing(\n        marc_xml=marc_xml,\n        xml=transformed,\n        func=mods_postprocessor.postprocess_multi_mono,\n        uuid=uuid,\n        url=url,\n    )", "docstring": "Convert `marc_xml` to multimonograph MODS data format.\n\nArgs:\nmarc_xml (str): Filename or XML string. Don't use ``\\\\n`` in case of\nfilename.\nuuid (str): UUID string giving the package ID.\nurl (str): URL of the publication (public or not).\n\nReturns:\nlist: Collection of transformed xml strings.", "source": "juraj-google-style"}
{"code": "def stop(self, threads=None, close_summary_writer=True, ignore_live_threads=False):\n    self._coord.request_stop()\n    try:\n        self._coord.join(threads, stop_grace_period_secs=self._stop_grace_secs, ignore_live_threads=ignore_live_threads)\n    finally:\n        if close_summary_writer and self._summary_writer:\n            self._summary_writer.add_session_log(SessionLog(status=SessionLog.STOP))\n            self._summary_writer.close()\n            self._graph_added_to_summary = False", "docstring": "Stop the services and the coordinator.\n\nThis does not close the session.\n\nArgs:\nthreads: Optional list of threads to join with the coordinator.  If\n`None`, defaults to the threads running the standard services, the\nthreads started for `QueueRunners`, and the threads started by the\n`loop()` method.  To wait on additional threads, pass the list in this\nparameter.\nclose_summary_writer: Whether to close the `summary_writer`.  Defaults to\n`True` if the summary writer was created by the supervisor, `False`\notherwise.\nignore_live_threads: If `True` ignores threads that remain running after a\ngrace period when joining threads via the coordinator, instead of\nraising a RuntimeError.", "source": "github-repos"}
{"code": "class RTDetrHungarianMatcher(nn.Module):\n\n    def __init__(self, config):\n        super().__init__()\n        requires_backends(self, ['scipy'])\n        self.class_cost = config.matcher_class_cost\n        self.bbox_cost = config.matcher_bbox_cost\n        self.giou_cost = config.matcher_giou_cost\n        self.use_focal_loss = config.use_focal_loss\n        self.alpha = config.matcher_alpha\n        self.gamma = config.matcher_gamma\n        if self.class_cost == self.bbox_cost == self.giou_cost == 0:\n            raise ValueError(\"All costs of the Matcher can't be 0\")\n\n    @torch.no_grad()\n    def forward(self, outputs, targets):\n        \n        batch_size, num_queries = outputs['logits'].shape[:2]\n        out_bbox = outputs['pred_boxes'].flatten(0, 1)\n        target_ids = torch.cat([v['class_labels'] for v in targets])\n        target_bbox = torch.cat([v['boxes'] for v in targets])\n        if self.use_focal_loss:\n            out_prob = F.sigmoid(outputs['logits'].flatten(0, 1))\n            out_prob = out_prob[:, target_ids]\n            neg_cost_class = (1 - self.alpha) * out_prob ** self.gamma * -(1 - out_prob + 1e-08).log()\n            pos_cost_class = self.alpha * (1 - out_prob) ** self.gamma * -(out_prob + 1e-08).log()\n            class_cost = pos_cost_class - neg_cost_class\n        else:\n            out_prob = outputs['logits'].flatten(0, 1).softmax(-1)\n            class_cost = -out_prob[:, target_ids]\n        bbox_cost = torch.cdist(out_bbox, target_bbox, p=1)\n        giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox))\n        cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost\n        cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()\n        sizes = [len(v['boxes']) for v in targets]\n        indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))]\n        return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]", "docstring": "This class computes an assignment between the targets and the predictions of the network\n\nFor efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more\npredictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are\nun-matched (and thus treated as non-objects).\n\nArgs:\nconfig: RTDetrConfig", "source": "github-repos"}
{"code": "def add_options(cls, parser):\n        \n        kwargs = {'action': 'store', 'default': '', 'parse_from_config': True,\n                  'comma_separated_list': True}\n        for num in range(cls.min_check, cls.max_check):\n            parser.add_option(None, \"--filename_check{}\".format(num), **kwargs)", "docstring": "Required by flake8\nadd the possible options, called first\n\nArgs:\nparser (OptionsManager):", "source": "juraj-google-style"}
{"code": "def __init__(self, header, metadata, content):\n        \n        self.header = header\n        self.metadata = metadata\n        self.content = content\n        self._buffers = []", "docstring": "Initialize a new message from header, metadata, and content\ndictionaries.\n\nTo assemble a message from existing JSON fragments, use the\n``assemble`` method.\n\nTo create new messages with automatically generated headers,\nuse subclass ``create`` methods.\n\nArgs:\nheader (JSON-like) :\n\nmetadata (JSON-like) :\n\ncontent (JSON-like) :", "source": "juraj-google-style"}
{"code": "def NamedPlaceholders(iterable):\n    placeholders = ', '.join(('%({})s'.format(key) for key in sorted(iterable)))\n    return '({})'.format(placeholders)", "docstring": "Returns named placeholders from all elements of the given iterable.\n\nUse this function for VALUES of MySQL INSERTs.\n\nTo account for Iterables with undefined order (dicts before Python 3.6),\nthis function sorts column names.\n\nExamples:\n>>> NamedPlaceholders({\"password\": \"foo\", \"name\": \"bar\"})\nu'(%(name)s, %(password)s)'\n\nArgs:\niterable: The iterable of strings to be used as placeholder keys.\n\nReturns:\nA string containing a tuple of comma-separated, sorted, named, placeholders.", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context):\n    \n    super(FVDEFileSystem, self).__init__(resolver_context)\n    self._fvde_volume = None\n    self._file_object = None", "docstring": "Initializes a file system.\n\nArgs:\nresolver_context (Context): resolver context.", "source": "juraj-google-style"}
{"code": "def find_replace_string(obj, find, replace):\n    \n    try:\n        strobj = str(obj)\n        newStr =  string.replace(strobj, find, replace)\n        if newStr == strobj:\n            return obj\n        else:\n            return newStr\n\n    except:\n        line, filename, synerror = trace()\n        raise ArcRestHelperError({\n                    \"function\": \"find_replace_string\",\n                    \"line\": line,\n                    \"filename\":  filename,\n                    \"synerror\": synerror,\n                                    }\n                                    )\n    finally:\n        pass", "docstring": "Performs a string.replace() on the input object.\n\nArgs:\nobj (object): The object to find/replace. It will be cast to ``str``.\nfind (str): The string to search for.\nreplace (str): The string to replace with.\nReturns:\nstr: The replaced string.", "source": "juraj-google-style"}
{"code": "def add_unique_id(self):\n    uid = 0\n    for feature in self._data['features']:\n        if feature['properties'].get('id'):\n            raise Exception('one of the features already had an id field')\n        feature['properties']['id'] = uid\n        uid += 1", "docstring": "Adds a unique id property to each feature.\n\nRaises:\n\n- An Exception if any of the features already\nhave an \"id\" field.", "source": "codesearchnet"}
{"code": "def register_backend(name, backend, allow_overwrite=False):\n        \n        if hasattr(Circuit, \"run_with_\" + name):\n            if allow_overwrite:\n                warnings.warn(f\"Circuit has attribute `run_with_{name}`.\")\n            else:\n                raise ValueError(f\"Circuit has attribute `run_with_{name}`.\")\n        if not allow_overwrite:\n            if name in BACKENDS:\n                raise ValueError(f\"Backend '{name}' is already registered as backend.\")\n        BACKENDS[name] = backend", "docstring": "Register new backend.\n\nArgs:\nname (str): The name of backend.\ngateclass (type): The type object of backend\nallow_overwrite (bool, optional): If True, allow to overwrite the existing backend.\nOtherwise, raise the ValueError.\n\nRaises:\nValueError: The name is duplicated with existing backend.\nWhen `allow_overwrite=True`, this error is not raised.", "source": "juraj-google-style"}
{"code": "def P(value, bits=None, endian=None, target=None):\n    return globals()[('P%d' % _get_bits(bits, target))](value, endian=endian, target=target)", "docstring": "Pack an unsigned pointer for a given target.\n\nArgs:\nvalue(int): The value to pack.\nbits(:class:`~pwnypack.target.Target.Bits`): Override the default\nword size. If ``None`` it will look at the word size of\n``target``.\nendian(:class:`~pwnypack.target.Target.Endian`): Override the default\nbyte order. If ``None``, it will look at the byte order of\nthe ``target`` argument.\ntarget(:class:`~pwnypack.target.Target`): Override the default byte\norder. If ``None``, it will look at the byte order of\nthe global :data:`~pwnypack.target.target`.", "source": "codesearchnet"}
{"code": "def __init__(self,\n                 *,\n                 start_msg: Optional[str] = None,\n                 end_msg: Optional[str] = None,\n                 start_no_nl: bool = False):\n        \n        if start_msg is None and end_msg is None:\n            raise ValueError(\n                \"At least one of 'start_msg' and 'end_msg' must be specified.\")\n        self._raise_if_not_none_nor_string(start_msg, \"start_msg\")\n        self._raise_if_not_none_nor_string(end_msg, \"end_msg\")\n        self._start_msg = start_msg\n        self._end_msg = end_msg\n        self._start_no_nl = start_no_nl", "docstring": "Note that both arguments are keyword only arguments.\n\nArgs:\nstart_msg: A message to print before the function runs.  end_msg: A\nmessage to print after the function has finished.  start_no_nl: If\nTrue, no newline is appended after the start_msg.", "source": "juraj-google-style"}
{"code": "def scrape(text, ptype=None):\n    \n\n    for ruletype, rule, info in scrape_types:\n        if ptype and ptype != ruletype:\n            continue\n        regx = regexes.get(ruletype)\n        for valu in regx.findall(text):\n            yield (ruletype, valu)", "docstring": "Scrape types from a blob of text and return node tuples.\n\nArgs:\ntext (str): Text to scrape.\nptype (str): Optional ptype to scrape. If present, only scrape rules which match the provided type.\n\nReturns:\n(str, str): Yield tuples of type, valu strings.", "source": "juraj-google-style"}
{"code": "def block_reducible(cm, nodes1, nodes2):\n    if ((not nodes1) or (not nodes2)):\n        return True\n    cm = cm[np.ix_(nodes1, nodes2)]\n    if ((not cm.sum(0).all()) or (not cm.sum(1).all())):\n        return True\n    if ((len(nodes1) > 1) and (len(nodes2) > 1)):\n        return block_cm(cm)\n    return False", "docstring": "Return whether connections from ``nodes1`` to ``nodes2`` are reducible.\n\nArgs:\ncm (np.ndarray): The network's connectivity matrix.\nnodes1 (tuple[int]): Source nodes\nnodes2 (tuple[int]): Sink nodes", "source": "codesearchnet"}
{"code": "def has_access(user, required_roles, match_all=True):\n    if (ROLE_ADMIN in user.roles):\n        return True\n    if isinstance(required_roles, str):\n        if (required_roles in user.roles):\n            return True\n        return False\n    if match_all:\n        for role in required_roles:\n            if (role not in user.roles):\n                return False\n        return True\n    else:\n        for role in required_roles:\n            if (role in user.roles):\n                return True\n        return False", "docstring": "Check if the user meets the role requirements. If mode is set to AND, all the provided roles must apply\n\nArgs:\nuser (:obj:`User`): User object\nrequired_roles (`list` of `str`): List of roles that the user must have applied\nmatch_all (`bool`): If true, all the required_roles must be applied to the user, else any one match will\nreturn `True`\n\nReturns:\n`bool`", "source": "codesearchnet"}
{"code": "def save_csv(X, y, path):\n    if sparse.issparse(X):\n        X = X.todense()\n    np.savetxt(path, np.hstack((y.reshape(((- 1), 1)), X)), delimiter=',')", "docstring": "Save data as a CSV file.\n\nArgs:\nX (numpy or scipy sparse matrix): Data matrix\ny (numpy array): Target vector.\npath (str): Path to the CSV file to save data.", "source": "codesearchnet"}
{"code": "def CreateDynamicDisplayAdSettings(client, opener):\n  \n  media_service = client.GetService('MediaService', 'v201809')\n\n  logo = {\n      'xsi_type': 'Image',\n      'mediaId': _CreateImage(media_service, opener, 'https:\n  }\n\n  dynamic_settings = {\n      'landscapeLogoImage': logo,\n      'pricePrefix': 'as low as',\n      'promoText': 'Free shipping!'\n  }\n\n  return dynamic_settings", "docstring": "Creates dynamic display ad settings.\n\nArgs:\nclient: an AdWordsClient instance.\nopener: an OpenerDirector instance.\n\nReturns:\nA dict containing the dynamic display ad settings.", "source": "juraj-google-style"}
{"code": "def tf_step(self, x, iteration, deltas, improvement, last_improvement, estimated_improvement):\n    (x, next_iteration, deltas, improvement, last_improvement, estimated_improvement) = super(LineSearch, self).tf_step(x, iteration, deltas, improvement, last_improvement, estimated_improvement)\n    next_x = [(t + delta) for (t, delta) in zip(x, deltas)]\n    if (self.mode == 'linear'):\n        next_deltas = deltas\n        next_estimated_improvement = (estimated_improvement + self.estimated_incr)\n    elif (self.mode == 'exponential'):\n        next_deltas = [(delta * self.parameter) for delta in deltas]\n        next_estimated_improvement = (estimated_improvement * self.parameter)\n    target_value = self.fn_x(next_deltas)\n    next_improvement = tf.divide(x=(target_value - self.base_value), y=tf.maximum(x=next_estimated_improvement, y=util.epsilon))\n    return (next_x, next_iteration, next_deltas, next_improvement, improvement, next_estimated_improvement)", "docstring": "Iteration loop body of the line search algorithm.\n\nArgs:\nx: Current solution estimate $x_t$.\niteration: Current iteration counter $t$.\ndeltas: Current difference $x_t - x'$.\nimprovement: Current improvement $(f(x_t) - f(x')) / v'$.\nlast_improvement: Last improvement $(f(x_{t-1}) - f(x')) / v'$.\nestimated_improvement: Current estimated value $v'$.\n\nReturns:\nUpdated arguments for next iteration.", "source": "codesearchnet"}
{"code": "def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides, padding):\n    x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)\n    x2 = np.random.rand(*filter_in_sizes).astype(np.float32)\n\n    def _SetupVal(data_format, use_gpu):\n        with test_util.device(use_gpu):\n            t1 = constant_op.constant(x1, shape=tensor_in_sizes)\n            t2 = constant_op.constant(x2, shape=filter_in_sizes)\n            strides = [1] + conv_strides + [1]\n            if data_format == 'NCHW':\n                t1 = test_util.NHWCToNCHW(t1)\n                strides = test_util.NHWCToNCHW(strides)\n            conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding, data_format=data_format)\n            if data_format == 'NCHW':\n                conv = test_util.NCHWToNHWC(conv)\n            return conv\n    tensors = []\n    for data_format, use_gpu in GetTestConfigs():\n        tensors.append(_SetupVal(data_format, use_gpu))\n    values = self.evaluate(tensors)\n    for i in range(1, len(values)):\n        self.assertAllClose(values[0], values[i], rtol=0.001, atol=0.001)", "docstring": "Verifies that CPU and GPU produce the same values.\n\nArgs:\ntensor_in_sizes: Input tensor dimensions in\n[batch, input_rows, input_cols, input_depth].\nfilter_in_sizes: Filter tensor dimensions in\n[kernel_rows, kernel_cols, input_depth, output_depth].\nconv_strides: [row_stride, col_stride] for the convolution;\npadding: Padding type.", "source": "github-repos"}
{"code": "def from_yaml(cls, yaml_path, filename=None):\n    if filename:\n        yaml_path = os.path.join(os.path.dirname(yaml_path), filename)\n    assert yaml_path.endswith('.yaml'), ('Expected a/path/to/<yamlname>.yaml, got %r' % yaml_path)\n    yamlname = os.path.basename(yaml_path)[:(- 5)]\n    log.debug('Parsing %s', yaml_path)\n    with open(yaml_path) as f:\n        text = f.read()\n    ds = yaml.load(text, Loader=yaml.RoundTripLoader)\n    docstring = None\n    sections = []\n    for d in ds:\n        assert (len(d) == 1), ('Expected section length 1, got %d' % len(d))\n        lineno = (d._yaml_line_col.line + 1)\n        name = list(d)[0]\n        sections.append(cls(yaml_path, lineno, name, d[name]))\n        if (name == 'builtin.defines.docstring'):\n            docstring = d[name]['value']\n    return (sections, yamlname, docstring)", "docstring": "Split a dictionary into parameters controllers parts blocks defines\n\nArgs:\nyaml_path (str): File path to YAML file, or a file in the same dir\nfilename (str): If give, use this filename as the last element in\nthe yaml_path (so yaml_path can be __file__)\n\nReturns:\ntuple: (sections, yamlname, docstring) where sections is a\nlist of created sections", "source": "codesearchnet"}
{"code": "def read_var_str(self, max_size=sys.maxsize):\n        \n        length = self.read_var_int(max_size)\n        return self.unpack(str(length) + 's', length)", "docstring": "Similar to `ReadString` but expects a variable length indicator instead of the fixed 1 byte indicator.\n\nArgs:\nmax_size (int): (Optional) maximum number of bytes to read.\n\nReturns:\nbytes:", "source": "juraj-google-style"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_buffer = utils.BytearrayStream()\n    if self._unique_identifier:\n        self._unique_identifier.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The GetAttributes response payload is missing the unique identifier field.')\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        for attribute in self._attributes:\n            attribute.write(local_buffer, kmip_version=kmip_version)\n    elif self._attributes:\n        template_attribute = objects.TemplateAttribute(attributes=self.attributes)\n        attributes = objects.convert_template_attribute_to_attributes(template_attribute)\n        attributes.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The GetAttributes response payload is missing the attributes list.')\n    self.length = local_buffer.length()\n    super(GetAttributesResponsePayload, self).write(output_buffer, kmip_version=kmip_version)\n    output_buffer.write(local_buffer.buffer)", "docstring": "Write the data encoding the GetAttributes response payload to a\nstream.\n\nArgs:\noutput_buffer (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def submit(self, port_id, tuple_):\n    port_index = self._splpy_output_ports[port_id]\n    ec._submit(self, port_index, tuple_)", "docstring": "Submit a tuple to the output port.\n\nThe value to be submitted (``tuple_``) can be a ``None`` (nothing will be submitted),\n``tuple``, ``dict` or ``list`` of those types. For details\non how the ``tuple_`` is mapped to an SPL tuple see :ref:`submit-from-python`.\n\nArgs:\nport_id: Identifier of the port specified in the\n``output_ports`` parameter of the ``@spl.primitive_operator``\ndecorator.\ntuple_: Tuple (or tuples) to be submitted to the output port.", "source": "codesearchnet"}
{"code": "def _RetryLoop(self, func, timeout=None):\n    timeout = (timeout or self.DEFAULT_TIMEOUT)\n    deadline = (time.time() + timeout)\n    sleep = 1\n    while True:\n        try:\n            return func(timeout)\n        except grpc.RpcError:\n            if ((time.time() + sleep) > deadline):\n                raise\n            time.sleep(sleep)\n            sleep *= 2\n            timeout = (deadline - time.time())", "docstring": "Retries an operation until success or deadline.\n\nArgs:\n\nfunc: The function to run. Must take a timeout, in seconds, as a single\nparameter. If it raises grpc.RpcError and deadline has not be reached,\nit will be run again.\n\ntimeout: Retries will continue until timeout seconds have passed.", "source": "codesearchnet"}
{"code": "def fasta_format_check(fasta_path, logger):\n    \n\n    header_count = 0\n    line_count = 1\n    nt_count = 0\n    with open(fasta_path) as f:\n        for l in f:\n            l = l.strip()\n            if l == '':\n                continue\n            if l[0] == '>':\n                header_count += 1\n                continue\n            if header_count == 0 and l[0] != '>':\n                error_msg = 'First non-blank line (L:{line_count}) does not contain FASTA header. Line beginning with \">\" expected.' \\\n                    .format(line_count=line_count)\n                logger.error(error_msg)\n                raise Exception(error_msg)\n            non_nucleotide_chars_in_line = set(l) - VALID_NUCLEOTIDES\n\n            if len(non_nucleotide_chars_in_line) > 0:\n                error_msg = 'Line {line} contains the following non-nucleotide characters: {non_nt_chars}' \\\n                    .format(line=line_count,\n                            non_nt_chars=', '.join([x for x in non_nucleotide_chars_in_line]))\n                logger.error(error_msg)\n                raise Exception(error_msg)\n            nt_count += len(l)\n            line_count += 1\n\n        if nt_count == 0:\n            error_msg = 'File \"{}\" does not contain any nucleotide sequence.'.format(fasta_path)\n            logger.error(error_msg)\n            raise Exception(error_msg)\n\n        logger.info('Valid FASTA format \"{}\" ({} bp)'.format(fasta_path, nt_count))", "docstring": "Check that a file is valid FASTA format.\n\n- First non-blank line needs to begin with a '>' header character.\n- Sequence can only contain valid IUPAC nucleotide characters\n\nArgs:\nfasta_str (str): FASTA file contents string\n\nRaises:\nException: If invalid FASTA format", "source": "juraj-google-style"}
{"code": "def button_number(self):\n    if (self.type != EventType.TABLET_PAD_BUTTON):\n        raise AttributeError(_wrong_prop.format(self.type))\n    return self._libinput.libinput_event_tablet_pad_get_button_number(self._handle)", "docstring": "The button number that triggered this event, starting at 0.\n\nFor events that are not of type\n:attr:`~libinput.constant.Event.TABLET_PAD_BUTTON`,\nthis property raises :exc:`AttributeError`.\n\nNote that the number returned is a generic sequential button number\nand not a semantic button code as defined in ``linux/input.h``.\nSee `Tablet pad button numbers`_ for more details.\n\nReturns:\nint: The button triggering this event.\nRaises:\nAttributeError", "source": "codesearchnet"}
{"code": "def ProcessCompletedRequests(self, notification, thread_pool):\n    \n    \n    \n    \n    \n    with queue_manager.QueueManager(token=self.token) as manager:\n      for request, _ in manager.FetchCompletedRequests(\n          self.session_id, timestamp=(0, notification.timestamp)):\n        \n        \n        if request.HasField(\"request\"):\n          manager.DeQueueClientRequest(request.request)\n\n    processing = []\n    while True:\n      try:\n        \n        \n        for request, responses in self.queue_manager.FetchCompletedResponses(\n            self.session_id, timestamp=(0, notification.timestamp)):\n\n          if request.id == 0 or not responses:\n            continue\n\n          \n          \n          if len(responses) != responses[-1].response_id:\n            \n            \n            \n            if request.transmission_count < 5:\n              stats_collector_instance.Get().IncrementCounter(\n                  \"grr_request_retransmission_count\")\n              request.transmission_count += 1\n              self.QueueRequest(request)\n            break\n\n          \n          self.hunt_obj.HeartBeat()\n          self._Process(\n              request, responses, thread_pool=thread_pool, events=processing)\n\n          \n          \n          self.queue_manager.DeleteRequest(request)\n          self.context.next_processed_request += 1\n\n        \n        return\n\n      except queue_manager.MoreDataException:\n        \n        for event in processing:\n          event.wait()\n\n        \n        \n        self.FlushMessages()\n        self.hunt_obj.Flush()\n        continue\n\n      finally:\n        \n        for event in processing:\n          event.wait()", "docstring": "Go through the list of requests and process the completed ones.\n\nWe take a snapshot in time of all requests and responses for this hunt. We\nthen process as many completed requests as possible. If responses are not\nquite here we leave it for next time.\n\nArgs:\nnotification: The notification object that triggered this processing.\nthread_pool: The thread pool to process the responses on.", "source": "juraj-google-style"}
{"code": "def update_mongo_compound_variants(self, bulk):\n    requests = []\n    for var_id in bulk:\n        var_obj = bulk[var_id]\n        if (not var_obj.get('compounds')):\n            continue\n        operation = pymongo.UpdateOne({'_id': var_obj['_id']}, {'$set': {'compounds': var_obj['compounds']}})\n        requests.append(operation)\n    if (not requests):\n        return\n    try:\n        self.variant_collection.bulk_write(requests, ordered=False)\n    except BulkWriteError as err:\n        LOG.warning('Updating compounds failed')\n        raise err", "docstring": "Update the compound information for a bulk of variants in the database\n\nArgs:\nbulk(dict): {'_id': scout.models.Variant}", "source": "codesearchnet"}
{"code": "def request(url, args=None, data=None, headers=None, method=None, credentials=None, raw_response=False, stats=None):\n    if (headers is None):\n        headers = {}\n    headers['user-agent'] = 'GoogleCloudDataLab/1.0'\n    if (args is not None):\n        qs = urllib.parse.urlencode(args)\n        url = ((url + '?') + qs)\n    if (data is not None):\n        if (method is None):\n            method = 'POST'\n        if (data != ''):\n            if ('Content-Type' not in headers):\n                data = json.dumps(data)\n                headers['Content-Type'] = 'application/json'\n        headers['Content-Length'] = str(len(data))\n    elif (method == 'POST'):\n        headers['Content-Length'] = '0'\n    if (method is None):\n        method = 'GET'\n    http = Http.http\n    if (credentials is not None):\n        http = copy.copy(http)\n        http = google_auth_httplib2.AuthorizedHttp(credentials)\n    if (stats is not None):\n        stats['duration'] = datetime.datetime.utcnow()\n    response = None\n    try:\n        log.debug(('request: method[%(method)s], url[%(url)s], body[%(data)s]' % locals()))\n        (response, content) = http.request(url, method=method, body=data, headers=headers)\n        if (200 <= response.status < 300):\n            if raw_response:\n                return content\n            if (type(content) == str):\n                return json.loads(content)\n            else:\n                return json.loads(str(content, encoding='UTF-8'))\n        else:\n            raise RequestException(response.status, content)\n    except ValueError:\n        raise Exception('Failed to process HTTP response.')\n    except httplib2.HttpLib2Error:\n        raise Exception('Failed to send HTTP request.')\n    finally:\n        if (stats is not None):\n            stats['data_size'] = len(data)\n            stats['status'] = response.status\n            stats['duration'] = (datetime.datetime.utcnow() - stats['duration']).total_seconds()", "docstring": "Issues HTTP requests.\n\nArgs:\nurl: the URL to request.\nargs: optional query string arguments.\ndata: optional data to be sent within the request.\nheaders: optional headers to include in the request.\nmethod: optional HTTP method to use. If unspecified this is inferred\n(GET or POST) based on the existence of request data.\ncredentials: optional set of credentials to authorize the request.\nraw_response: whether the raw response content should be returned as-is.\nstats: an optional dictionary that, if provided, will be populated with some\nuseful info about the request, like 'duration' in seconds and 'data_size' in\nbytes. These may be useful optimizing the access to rate-limited APIs.\nReturns:\nThe parsed response object.\nRaises:\nException when the HTTP request fails or the response cannot be processed.", "source": "codesearchnet"}
{"code": "def _project_to_part_level(hist: Hist, outliers_removal_axis: OutliersRemovalAxis) -> Hist:\n    import ROOT\n    if isinstance(hist, (ROOT.TH2, ROOT.TH3)):\n        projection_information: Dict[(str, Any)] = {}\n        output_object = _OutputObject(None)\n        projector = projectors.HistProjector(observable_to_project_from=hist, output_observable=output_object, output_attribute_name='output', projection_name_format='outliers_removal_hist', projection_information=projection_information)\n        projector.projection_axes.append(projectors.HistAxisRange(axis_type=outliers_removal_axis, axis_range_name='outliers_removal_axis', min_val=projectors.HistAxisRange.apply_func_to_find_bin(None, 1), max_val=projectors.HistAxisRange.apply_func_to_find_bin(ROOT.TAxis.GetNbins)))\n        projector.project()\n        return output_object.output\n    return hist", "docstring": "Project the input histogram to the particle level axis.\n\nArgs:\nhist: Histogram to check for outliers.\noutliers_removal_axis: Axis along which outliers removal will be performed. Usually\nthe particle level aixs.\nReturns:\nThe histogram to check for outliers.", "source": "codesearchnet"}
{"code": "def write_message(self, message, timeout):\n    with self._writer_lock:\n        self._transport.write(message.header, timeout.remaining_ms)\n        if timeout.has_expired():\n            _LOG.warning('Timed out between AdbMessage header and data, sending data anyway with 10ms timeout')\n            timeout = timeouts.PolledTimeout.from_millis(10)\n        self._transport.write(message.data, timeout.remaining_ms)", "docstring": "Send the given message over this transport.\n\nArgs:\nmessage: The AdbMessage to send.\ntimeout: Use this timeout for the entire write operation, it should be an\ninstance of timeouts.PolledTimeout.", "source": "codesearchnet"}
{"code": "def transpose(self, name=None):\n    \n    if name is None:\n      name = self.module_name + \"_transpose\"\n\n    if self._data_format == DATA_FORMAT_NHWC:\n      stride = self._stride[1:-1]\n    else:  \n      stride = self._stride[2:]\n\n    return Conv2D(output_channels=lambda: self.input_channels,\n                  kernel_shape=self._kernel_shape,\n                  stride=stride,\n                  padding=self._padding,\n                  use_bias=self._use_bias,\n                  initializers=self._initializers,\n                  partitioners=self._partitioners,\n                  regularizers=self._regularizers,\n                  data_format=self._data_format,\n                  custom_getter=self._custom_getter,\n                  name=name)", "docstring": "Returns matching `Conv2D` module.\n\nArgs:\nname: Optional string assigning name of transpose module. The default name\nis constructed by appending \"_transpose\" to `self.name`.\n\nReturns:\n`Conv2D` module.", "source": "juraj-google-style"}
{"code": "def visit_Call(self, node):\n    if (self.depth == 0):\n        return node\n    if (self.ignore_exceptions is None):\n        ignore_exceptions = ast.Name('None', ast.Load())\n    else:\n        ignore_exceptions = ast.List(self.ignore_exceptions, ast.Load())\n    catch_exception_type = (self.catch_exception if self.catch_exception else 'None')\n    catch_exception = ast.Name(catch_exception_type, ast.Load())\n    depth = ast.Num(((self.depth - 1) if (self.depth > 0) else (- 1)))\n    debug_node_name = ast.Name('debug', ast.Load())\n    call_extra_parameters = ([] if IS_PYTHON_3 else [None, None])\n    node.func = ast.Call(debug_node_name, [node.func, ignore_exceptions, catch_exception, depth], [], *call_extra_parameters)\n    return node", "docstring": "Propagate 'debug' wrapper into inner function calls if needed.\n\nArgs:\nnode (ast.AST): node statement to surround.", "source": "codesearchnet"}
{"code": "def sum(x, axis=None, keepdims=False):\n    return math_ops.reduce_sum(x, axis, keepdims)", "docstring": "Sum of the values in a tensor, alongside the specified axis.\n\nArgs:\nx: A tensor or variable.\naxis: An integer, the axis to sum over.\nkeepdims: A boolean, whether to keep the dimensions or not.\nIf `keepdims` is `False`, the rank of the tensor is reduced\nby 1. If `keepdims` is `True`,\nthe reduced dimension is retained with length 1.\n\nReturns:\nA tensor with sum of `x`.", "source": "github-repos"}
{"code": "def _encrypt_asymmetric(self, encryption_algorithm, encryption_key, plain_text, padding_method, hashing_algorithm=None):\n    if (encryption_algorithm == enums.CryptographicAlgorithm.RSA):\n        if (padding_method == enums.PaddingMethod.OAEP):\n            hash_algorithm = self._encryption_hash_algorithms.get(hashing_algorithm)\n            if (hash_algorithm is None):\n                raise exceptions.InvalidField(\"The hashing algorithm '{0}' is not supported for asymmetric encryption.\".format(hashing_algorithm))\n            padding_method = asymmetric_padding.OAEP(mgf=asymmetric_padding.MGF1(algorithm=hash_algorithm()), algorithm=hash_algorithm(), label=None)\n        elif (padding_method == enums.PaddingMethod.PKCS1v15):\n            padding_method = asymmetric_padding.PKCS1v15()\n        else:\n            raise exceptions.InvalidField(\"The padding method '{0}' is not supported for asymmetric encryption.\".format(padding_method))\n        backend = default_backend()\n        try:\n            public_key = backend.load_der_public_key(encryption_key)\n        except Exception:\n            try:\n                public_key = backend.load_pem_public_key(encryption_key)\n            except Exception:\n                raise exceptions.CryptographicFailure('The public key bytes could not be loaded.')\n        cipher_text = public_key.encrypt(plain_text, padding_method)\n        return {'cipher_text': cipher_text}\n    else:\n        raise exceptions.InvalidField(\"The cryptographic algorithm '{0}' is not supported for asymmetric encryption.\".format(encryption_algorithm))", "docstring": "Encrypt data using asymmetric encryption.\n\nArgs:\nencryption_algorithm (CryptographicAlgorithm): An enumeration\nspecifying the asymmetric encryption algorithm to use for\nencryption. Required.\nencryption_key (bytes): The bytes of the public key to use for\nencryption. Required.\nplain_text (bytes): The bytes to be encrypted. Required.\npadding_method (PaddingMethod): An enumeration specifying the\npadding method to use with the asymmetric encryption\nalgorithm. Required.\nhashing_algorithm (HashingAlgorithm): An enumeration specifying\nthe hashing algorithm to use with the encryption padding\nmethod. Required, if the padding method is OAEP. Optional\notherwise, defaults to None.\n\nReturns:\ndict: A dictionary containing the encrypted data, with at least\nthe following key/value field:\n* cipher_text - the bytes of the encrypted data\n\nRaises:\nInvalidField: Raised when the algorithm is unsupported or the\nlength is incompatible with the algorithm.\nCryptographicFailure: Raised when the key generation process\nfails.", "source": "codesearchnet"}
{"code": "def _find_scalar_and_max_depth(pylist):\n    if isinstance(pylist, (list, tuple)) or np.ndim(pylist) != 0:\n        scalar_depth = None\n        max_depth = 1\n        for child in pylist:\n            child_scalar_depth, child_max_depth = _find_scalar_and_max_depth(child)\n            if child_scalar_depth is not None:\n                if scalar_depth is not None and scalar_depth != child_scalar_depth + 1:\n                    raise ValueError('all scalar values must have the same nesting depth')\n                scalar_depth = child_scalar_depth + 1\n            max_depth = max(max_depth, child_max_depth + 1)\n        return (scalar_depth, max_depth)\n    return (0, 0)", "docstring": "Finds nesting depth of scalar values in pylist.\n\nArgs:\npylist: A nested python `list` or `tuple`.\n\nReturns:\nA tuple `(scalar_depth, max_depth)`.  `scalar_depth` is the nesting\ndepth of scalar values in `pylist`, or `None` if `pylist` contains no\nscalars.  `max_depth` is the maximum depth of `pylist` (including\nempty lists).\n\nRaises:\nValueError: If pylist has inconsistent nesting depths for scalars.", "source": "github-repos"}
{"code": "def _build(self, inputs):\n    shape_inputs = inputs.get_shape().as_list()\n    rank = len(shape_inputs)\n    full_multiples = ([1] * rank)\n    for (dim, multiple) in zip(self._dims, self._multiples):\n        full_multiples[dim] = multiple\n    return tf.tile(inputs, multiples=full_multiples)", "docstring": "Connects the `TileByDim` module into the graph.\n\nArgs:\ninputs: `Tensor` to tile.\n\nReturns:\nThe tiled tensor.", "source": "codesearchnet"}
{"code": "def vq_gating(x, num_experts, k, bneck, hparams=None, name='vq_gating'):\n    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n        if hparams.use_scales:\n            scales = tf.get_variable('scales', [num_experts], tf.float32, initializer=tf.ones_initializer())\n            scales = tf.nn.softmax(scales)\n            hparams.scales = scales\n        input_size = x.get_shape().as_list()[(- 1)]\n        batch_size = common_layers.shape_list(x)[0]\n        if (k > 1):\n            x = tf.layers.dense(x, (input_size * k))\n            x = tf.reshape(x, [(batch_size * k), input_size])\n        inputs = tf.expand_dims(x, axis=1)\n        inputs = tf.expand_dims(inputs, axis=1)\n        hparams.z_size = int(math.log(num_experts, 2))\n        hparams.hidden_size = input_size\n        hparams.top_k = k\n        d = bneck.discrete_bottleneck(inputs)\n        centroids = None\n        exp_discrete = d['discrete']\n        embed_lookup = d['embed']\n        extra_loss = d['loss']\n        if hparams.residual_centroids:\n            centroids = embed_lookup(exp_discrete)\n        top_k_indices = tf.squeeze(exp_discrete, axis=1)\n        tf.summary.histogram('discrete_counts', top_k_indices)\n        if (k > 1):\n            top_k_indices = tf.reshape(top_k_indices, [batch_size, k])\n        top_k_gates = tf.ones([batch_size, k])\n        gates = _rowwise_unsorted_segment_sum(top_k_gates, top_k_indices, num_experts)\n        count_per_expert = tf.reduce_sum(gates, axis=0)\n        if hparams.use_scales:\n            scale_loss = tf.reduce_mean((tf.to_float(count_per_expert) * scales))\n            extra_loss += scale_loss\n        if common_layers.should_generate_summaries():\n            tf.summary.histogram('vq_loss', extra_loss)\n            tf.summary.historgram('scale_loss', scale_loss)\n        return (gates, extra_loss, centroids)", "docstring": "VQ gating.\n\nArgs:\nx: input Tensor with shape [batch_size, input_size]\nnum_experts: an integer\nk: an integer - number of experts per example\nbneck: a bottleneck object\nhparams: optional hparams\nname: an optional string\n\nReturns:\ngates: a Tensor with shape [batch_size, num_experts]\nload: a Tensor with shape [num_experts]", "source": "codesearchnet"}
{"code": "def __init__(self, state_transition: Callable, current_state: Optional[State]) -> None:\n        \n        if not callable(state_transition):\n            raise ValueError('state_transition must be a callable')\n\n        self.state_transition = state_transition\n        self.current_state = current_state", "docstring": "Initialize the state manager.\n\nArgs:\nstate_transition: function that can apply a StateChange message.\ncurrent_state: current application state.", "source": "juraj-google-style"}
{"code": "def conforms(self, instance, format):\n        \n\n        try:\n            self.check(instance, format)\n        except FormatError:\n            return False\n        else:\n            return True", "docstring": "Check whether the instance conforms to the given format.\n\nArguments:\n\ninstance (*any primitive type*, i.e. str, number, bool):\n\nThe instance to check\n\nformat (str):\n\nThe format that instance should conform to\n\nReturns:\n\nbool: whether it conformed", "source": "juraj-google-style"}
{"code": "def print_result_for_plain_cgi_script_from_tuple(\n        contenttype_headers_content: WSGI_TUPLE_TYPE,\n        status: str = '200 OK') -> None:\n    \n    contenttype, headers, content = contenttype_headers_content\n    print_result_for_plain_cgi_script(contenttype, headers, content, status)", "docstring": "Writes HTTP result to stdout.\n\nArgs:\ncontenttype_headers_content:\nthe tuple ``(contenttype, extraheaders, data)``\nstatus:\nHTTP status message (default ``\"200 OK``)", "source": "juraj-google-style"}
{"code": "def create_issues_report(self, timeout=-1):\n        \n        uri = \"{}/issues/\".format(self.data[\"uri\"])\n        return self._helper.create_report(uri, timeout)", "docstring": "Creates an unexpected zoning report for a SAN.\n\nArgs:\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation in\nOneView, just stops waiting for its completion.\n\nReturns:\nlist: A list of FCIssueResponse dict.", "source": "juraj-google-style"}
{"code": "def add_callback(self, name, func):\n        \n\n        if name == 'on_scan':\n            events = ['device_seen']\n            def callback(_conn_string, _conn_id, _name, event):\n                func(self.id, event, event.get('validity_period', 60))\n        elif name == 'on_report':\n            events = ['report', 'broadcast']\n            def callback(_conn_string, conn_id, _name, event):\n                func(conn_id, event)\n        elif name == 'on_trace':\n            events = ['trace']\n            def callback(_conn_string, conn_id, _name, event):\n                func(conn_id, event)\n        elif name == 'on_disconnect':\n            events = ['disconnection']\n            def callback(_conn_string, conn_id, _name, _event):\n                func(self.id, conn_id)\n        else:\n            raise ArgumentError(\"Unknown callback type {}\".format(name))\n\n        self._adapter.register_monitor([None], events, callback)", "docstring": "Add a callback when device events happen.\n\nArgs:\nname (str): currently support 'on_scan' and 'on_disconnect'\nfunc (callable): the function that should be called", "source": "juraj-google-style"}
{"code": "def get_os(detailed=False):\n    \n    try:\n\n        os_type = platform.system()\n\n        if os_type == 'Linux':\n            os_detail = platform.uname()\n            distribution = platform.linux_distribution()\n            HOME = os.environ['HOME']\n            username = os.getenv('USER')\n        elif os_type == 'Windows':\n            username = os.getenv('username')\n            HOME = 'C:\\\\Users\\\\' + username\n        elif os_type == 'Java':\n            logger.warning('Unsupported OS. No information')\n    except OSError as e:\n        raise e\n    except Exception as e:\n        logger.exception(\n            '%s: problem determining local os environment %s' %\n            (inspect.stack()[0][3], str(e))\n            )\n    if detailed and os_type == 'Linux':\n        return {\n                'os_type': os_type,\n                'os_detail': os_detail,\n                'linux_distribution': distribution,\n                'HOME': HOME\n            }\n    elif detailed and os_type == 'Windows':\n        return {\n                'os_type': os_type,\n                'platform': platform,\n                'HOME': HOME\n            }\n    elif not detailed:\n        return {'os_type': os_type}", "docstring": "Summary:\nRetrieve local operating system environment characteristics\nArgs:\n:user (str): USERNAME, only required when run on windows os\nReturns:\nTYPE: dict object containing key, value pairs describing\nos information", "source": "juraj-google-style"}
{"code": "def render_template(template, out_dir='.', context=None):\n    template_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'templates', template)\n    files = []\n    empty_dirs = []\n    for (dirpath, _, filenames) in os.walk(template_directory):\n        if (len(filenames) == 0):\n            empty_dirs.append(os.path.relpath(dirpath, template_directory))\n        else:\n            files.extend([os.path.join(dirpath, filepath) for filepath in filenames])\n    for source_file in files:\n        with open(source_file, 'r') as file:\n            template = Template(file.read())\n            template_rendered = template.render(**(context or {}))\n            source_relpath = os.path.relpath(source_file, template_directory)\n            filename = os.path.join(out_dir, source_relpath)\n            filename_rendered = Template(filename).render(**context)\n            source_dir = os.path.dirname(filename_rendered)\n            if (not os.path.exists(source_dir)):\n                os.makedirs(source_dir)\n            with open(filename_rendered, 'w') as target_file:\n                target_file.write(template_rendered)\n    for dirpath in empty_dirs:\n        try:\n            dirname = os.path.join(out_dir, dirpath)\n            dirname_rendered = Template(dirname).render(**context)\n            if (not os.path.exists(dirname_rendered)):\n                os.makedirs(dirname_rendered)\n        except OSError as exc:\n            if ((exc.errno == errno.EEXIST) and os.path.isdir(dirpath)):\n                pass\n            else:\n                raise", "docstring": "This function renders the template desginated by the argument to the\ndesignated directory using the given context.\n\nArgs:\ntemplate (string) : the source template to use (relative to ./templates)\nout_dir (string) : the name of the output directory\ncontext (dict) : the template rendering context", "source": "codesearchnet"}
{"code": "def _SkipFieldMessage(tokenizer):\n    if tokenizer.TryConsume('<'):\n        delimiter = '>'\n    else:\n        tokenizer.Consume('{')\n        delimiter = '}'\n    while ((not tokenizer.LookingAt('>')) and (not tokenizer.LookingAt('}'))):\n        _SkipField(tokenizer)\n    tokenizer.Consume(delimiter)", "docstring": "Skips over a field message.\n\nArgs:\ntokenizer: A tokenizer to parse the field name and values.", "source": "codesearchnet"}
{"code": "def _get_left_right_blocks(x):\n    (_, x_num_outer_h_blocks, x_num_outer_w_blocks, x_memory_flange_h, x_memory_flange_w, depth) = common_layers.shape_list(x)\n    x_left_right_blocks = tf.slice(x, [0, 1, 0, 0, 0, 0], [(- 1), (x_num_outer_h_blocks - 2), (- 1), (- 1), (- 1), (- 1)])\n    num_blocks_h = ((x_num_outer_h_blocks - 2) \n    x_left_right_blocks = tf.reshape(x_left_right_blocks, [(- 1), num_blocks_h, 2, x_num_outer_w_blocks, x_memory_flange_h, x_memory_flange_w, depth])\n    x_left_right_blocks = tf.transpose(x_left_right_blocks, [0, 1, 3, 2, 4, 5, 6])\n    x_left_right_blocks = tf.reshape(x_left_right_blocks, [(- 1), num_blocks_h, x_num_outer_w_blocks, (2 * x_memory_flange_h), x_memory_flange_w, depth])\n    (x_left_blocks, x_right_blocks) = _split_along_width(x_left_right_blocks)\n    return (x_left_blocks, x_right_blocks)", "docstring": "Helper function. Assumes that memory_flange is half of query sizes.\n\nThis function splits the tensor of width 'n' into two halves, where the\nfirst half gets the width indices 0, 2, 4.. and the second half gets the\nwidth indices 3, 5, ... We also fuse two blocks along the h dimension.\n\nArgs:\nx: a 6-d tensor.\n\nReturns:\nx_left_blocks, x_right_blocks: Two 6-d tensors", "source": "codesearchnet"}
{"code": "def get_colorscale(cmap, levels=None, cmin=None, cmax=None):\n    \n    ncolors = levels if isinstance(levels, int) else None\n    if isinstance(levels, list):\n        ncolors = len(levels) - 1\n        if isinstance(cmap, list) and len(cmap) != ncolors:\n            raise ValueError('The number of colors in the colormap '\n                             'must match the intervals defined in the '\n                             'color_levels, expected %d colors found %d.'\n                             % (ncolors, len(cmap)))\n    try:\n        palette = process_cmap(cmap, ncolors)\n    except Exception as e:\n        colorscale = colors.PLOTLY_SCALES.get(cmap)\n        if colorscale is None:\n            raise e\n        return colorscale\n\n    if isinstance(levels, int):\n        colorscale = []\n        scale = np.linspace(0, 1, levels+1)\n        for i in range(levels+1):\n            if i == 0:\n                colorscale.append((scale[0], palette[i]))\n            elif i == levels:\n                colorscale.append((scale[-1], palette[-1]))\n            else:\n                colorscale.append((scale[i], palette[i-1]))\n                colorscale.append((scale[i], palette[i]))\n        return colorscale\n    elif isinstance(levels, list):\n        palette, (cmin, cmax) = color_intervals(\n            palette, levels, clip=(cmin, cmax))\n    return colors.make_colorscale(palette)", "docstring": "Converts a cmap spec to a plotly colorscale\n\nArgs:\ncmap: A recognized colormap by name or list of colors\nlevels: A list or integer declaring the color-levels\ncmin: The lower bound of the color range\ncmax: The upper bound of the color range\n\nReturns:\nA valid plotly colorscale", "source": "juraj-google-style"}
{"code": "def WriteTo(self, values):\n    \n    try:\n      return self._struct.pack(*values)\n    except (TypeError, struct.error) as exception:\n      raise IOError('Unable to write stream with error: {0!s}'.format(\n          exception))", "docstring": "Writes values to a byte stream.\n\nArgs:\nvalues (tuple[object, ...]): values to copy to the byte stream.\n\nReturns:\nbytes: byte stream.\n\nRaises:\nIOError: if byte stream cannot be written.\nOSError: if byte stream cannot be read.", "source": "juraj-google-style"}
{"code": "def insert_feasible_configurations(cur, feasible_configurations, encoded_data=None):\n    if (encoded_data is None):\n        encoded_data = {}\n    if ('num_variables' not in encoded_data):\n        encoded_data['num_variables'] = len(next(iter(feasible_configurations)))\n    if ('num_feasible_configurations' not in encoded_data):\n        encoded_data['num_feasible_configurations'] = len(feasible_configurations)\n    if (('feasible_configurations' not in encoded_data) or ('energies' not in encoded_data)):\n        encoded = {_serialize_config(config): en for (config, en) in feasible_configurations.items()}\n        (configs, energies) = zip(*sorted(encoded.items()))\n        encoded_data['feasible_configurations'] = json.dumps(configs, separators=(',', ':'))\n        encoded_data['energies'] = json.dumps(energies, separators=(',', ':'))\n    insert = '\\n            INSERT OR IGNORE INTO feasible_configurations(\\n                num_variables,\\n                num_feasible_configurations,\\n                feasible_configurations,\\n                energies)\\n            VALUES (\\n                :num_variables,\\n                :num_feasible_configurations,\\n                :feasible_configurations,\\n                :energies);\\n            '\n    cur.execute(insert, encoded_data)", "docstring": "Insert a group of feasible configurations into the cache.\n\nArgs:\ncur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function\nis meant to be run within a :obj:`with` statement.\nfeasible_configurations (dict[tuple[int]): The set of feasible\nconfigurations. Each key should be a tuple of variable assignments.\nThe values are the relative energies.\nencoded_data (dict, optional): If a dictionary is provided, it\nwill be populated with the serialized data. This is useful for\npreventing encoding the same information many times.\n\nExamples:\n>>> feasible_configurations = {(-1, -1): 0.0, (+1, +1): 0.0}\n>>> with pmc.cache_connect(':memory:') as cur:\n...     pmc.insert_feasible_configurations(cur, feasible_configurations)", "source": "codesearchnet"}
{"code": "def auto_flexdock(self, binding_residues, radius, ligand_path=None, force_rerun=False):\n    log.debug('\\n{}: running DOCK6...\\n\\tBinding residues: {}\\n\\tBinding residues radius: {}\\n\\tLigand to dock: {}\\n'.format(self.id, binding_residues, radius, op.basename(ligand_path)))\n    self.dockprep(force_rerun=force_rerun)\n    self.protein_only_and_noH(force_rerun=force_rerun)\n    self.dms_maker(force_rerun=force_rerun)\n    self.sphgen(force_rerun=force_rerun)\n    self.binding_site_mol2(residues=binding_residues, force_rerun=force_rerun)\n    self.sphere_selector_using_residues(radius=radius, force_rerun=force_rerun)\n    self.showbox(force_rerun=force_rerun)\n    self.grid(force_rerun=force_rerun)\n    if ligand_path:\n        self.do_dock6_flexible(ligand_path=ligand_path, force_rerun=force_rerun)", "docstring": "Run DOCK6 on a PDB file, given its binding residues and a radius around them.\n\nProvide a path to a ligand to dock a ligand to it. If no ligand is provided, DOCK6 preparations will be run on\nthat structure file.\n\nArgs:\nbinding_residues (str): Comma separated string of residues (eg: '144,170,199')\nradius (int, float): Radius around binding residues to dock to\nligand_path (str): Path to ligand (mol2 format) to dock to protein\nforce_rerun (bool): If method should be rerun even if output files exist", "source": "codesearchnet"}
{"code": "def execute_async_script(self, script, *args):\n    return self._execute(Command.EXECUTE_ASYNC_SCRIPT, {'script': script, 'args': list(args)})", "docstring": "Execute JavaScript Asynchronously in current context.\n\nSupport:\nWeb(WebView)\n\nArgs:\nscript: The JavaScript to execute.\n*args: Arguments for your JavaScript.\n\nReturns:\nReturns the return value of the function.", "source": "codesearchnet"}
{"code": "def find_rule(condition):\n        \n        final_condition = re.sub('{{.*}}', '42', condition)\n        ast_tokens = Condition.get_tokens(final_condition)\n        ast_compressed_tokens = Condition.compress_tokens(ast_tokens)\n\n        name = 'undefined'\n        function = lambda tokens: False\n\n        if len(ast_compressed_tokens) > 0:\n            for rule in Condition.RULES:\n                if Condition.match_tokens(ast_compressed_tokens, rule['types']):\n                    name = rule['name']\n                    function = rule['evaluate']\n                    break\n        return name, ast_tokens, function", "docstring": "Find rule for given condition.\n\nArgs:\ncondition (str): Python condition as string.\n\nReturns:\nstr, list, function: found rule name, list of AST tokens for condition\nand verification function.", "source": "juraj-google-style"}
{"code": "def _step(time, output_ta_t, *states):\n    current_input = tuple((ta[time] for ta in input_ta))\n    current_input = tree.pack_sequence_as(inputs, current_input)\n    output, new_states = step_function(current_input, tuple(states) + tuple(constants))\n    flat_new_state = tree.flatten(new_states)\n    flat_output = tree.flatten(output)\n    ta_index_to_write = time if return_all_outputs else 0\n    for ta, out in zip(output_ta_t, flat_output):\n        ta[ta_index_to_write] = out\n    new_states = tree.pack_sequence_as(initial_states, flat_new_state)\n    return (time + 1, output_ta_t) + tuple(new_states)", "docstring": "RNN step function.\n\nArgs:\ntime: Current timestep value.\noutput_ta_t: TensorArray.\n*states: List of states.\n\nReturns:\nTuple: `(time + 1,output_ta_t) + tuple(new_states)`", "source": "github-repos"}
{"code": "def json(self, attribs=None, recurse=True, ignorelist=False):\n    jsonnode = {}\n    jsonnode['type'] = self.XMLTAG\n    if self.id:\n        jsonnode['id'] = self.id\n    if self.set:\n        jsonnode['set'] = self.set\n    if self.cls:\n        jsonnode['class'] = self.cls\n    if self.annotator:\n        jsonnode['annotator'] = self.annotator\n    if self.annotatortype:\n        if (self.annotatortype == AnnotatorType.AUTO):\n            jsonnode['annotatortype'] = 'auto'\n        elif (self.annotatortype == AnnotatorType.MANUAL):\n            jsonnode['annotatortype'] = 'manual'\n    if (self.confidence is not None):\n        jsonnode['confidence'] = self.confidence\n    if self.n:\n        jsonnode['n'] = self.n\n    if self.auth:\n        jsonnode['auth'] = self.auth\n    if self.datetime:\n        jsonnode['datetime'] = self.datetime.strftime('%Y-%m-%dT%H:%M:%S')\n    if recurse:\n        jsonnode['children'] = []\n        if self.TEXTCONTAINER:\n            jsonnode['text'] = self.text()\n        if self.PHONCONTAINER:\n            jsonnode['phon'] = self.phon()\n        for child in self:\n            if (self.TEXTCONTAINER and isstring(child)):\n                jsonnode['children'].append(child)\n            elif (not self.PHONCONTAINER):\n                ignore = False\n                if ignorelist:\n                    for e in ignorelist:\n                        if isinstance(child, e):\n                            ignore = True\n                            break\n                if (not ignore):\n                    jsonnode['children'].append(child.json(attribs, recurse, ignorelist))\n    if attribs:\n        for attrib in attribs:\n            jsonnode[attrib] = attribs\n    return jsonnode", "docstring": "Serialises the FoLiA element and all its contents to a Python dictionary suitable for serialisation to JSON.\n\nExample::\n\nimport json\njson.dumps(word.json())\n\nReturns:\ndict", "source": "codesearchnet"}
{"code": "def finalize():\n    if config_is_locked():\n        raise RuntimeError('Finalize called twice (config already locked).')\n    bindings = {}\n    for hook in _FINALIZE_HOOKS:\n        new_bindings = hook(_CONFIG)\n        if (new_bindings is not None):\n            for (key, value) in six.iteritems(new_bindings):\n                pbk = ParsedBindingKey(key)\n                if (pbk in bindings):\n                    err_str = 'Received conflicting updates when running {}.'\n                    raise ValueError(err_str.format(hook))\n                bindings[pbk] = value\n    for (pbk, value) in six.iteritems(bindings):\n        bind_parameter(pbk, value)\n    _set_config_is_locked(True)", "docstring": "A function that should be called after parsing all Gin config files.\n\nCalling this function allows registered \"finalize hooks\" to inspect (and\npotentially modify) the Gin config, to provide additional functionality. Hooks\nshould not modify the configuration object they receive directly; instead,\nthey should return a dictionary mapping Gin binding keys to (new or updated)\nvalues. This way, all hooks see the config as originally parsed.\n\nRaises:\nRuntimeError: If the config is already locked.\nValueError: If two or more hooks attempt to modify or introduce bindings for\nthe same key. Since it is difficult to control the order in which hooks\nare registered, allowing this could yield unpredictable behavior.", "source": "codesearchnet"}
{"code": "def __init__(self, api_key, db_path='/tmp/gsb_v4.db',\n                 discard_fair_use_policy=False, platforms=None, timeout=10):\n        \n        self.api_client = SafeBrowsingApiClient(api_key, discard_fair_use_policy=discard_fair_use_policy)\n        self.storage = SqliteStorage(db_path, timeout=timeout)\n        self.platforms = platforms", "docstring": "Constructor.\n\nArgs:\napi_key: string, a key for API authentication.\ndb_path: string, path to SQLite DB file to store cached data.\ndiscard_fair_use_policy: boolean, disable request frequency throttling (only for testing).\nplatforms: list, threat lists to look up, default includes all platforms.\ntimeout: seconds to wait for Sqlite DB to become unlocked from concurrent WRITE transaction.", "source": "juraj-google-style"}
{"code": "def __init__(self, spec, meta_graph, trainable, checkpoint_path, name):\n    \n    self._spec = spec\n    self._meta_graph = meta_graph\n    self._trainable = trainable\n    self._checkpoint_path = checkpoint_path\n\n    register_ops_if_needed({\n        op.name for op in self._meta_graph.meta_info_def.stripped_op_list.op})\n\n    \n    \n    \n    \n    \n    \n    with tf.init_scope():\n      self._init_state(name)", "docstring": "Private constructor.\n\nArgs:\nspec: _ModuleSpec instance.\nmeta_graph: MetaGraphDef to use\ntrainable: whether module is trainable.\ncheckpoint_path: None or a string to the variables checkpoints.\nname: variable and scope name where to instantiate the Module. Must be an\nunused name scope.", "source": "juraj-google-style"}
{"code": "def _get_suffix(path):\n    \n    suffix = os.path.basename(path).split(\".\")[-1]\n\n    if \"/\" in suffix:\n        raise UserWarning(\"Filename can't contain '/' in suffix (%s)!\" % path)\n\n    return suffix", "docstring": "Return suffix from `path`.\n\n``/home/xex/somefile.txt`` --> ``txt``.\n\nArgs:\npath (str): Full file path.\n\nReturns:\nstr: Suffix.\n\nRaises:\nUserWarning: When ``/`` is detected in suffix.", "source": "juraj-google-style"}
{"code": "def sheets_tab_rename(config, auth, sheet_url_or_name, old_sheet_tab, new_sheet_tab):\n    sheet_id, tab_id = sheets_tab_id(config, auth, sheet_url_or_name, old_sheet_tab)\n    if tab_id is not None:\n        sheets_batch_update(config, auth, sheet_url_or_name, {'requests': [{'updateSheetProperties': {'properties': {'sheetId': tab_id, 'title': new_sheet_tab}, 'fields': 'title'}}]})", "docstring": "Rename a tab in a sheet.\n\nArgs:\nconfig - see starthinker/util/configuration.py\nauth - user or service\nurl_or_name - one of: URL, document title, or id\nold_sheet_tab - name of tab to get id for\nnew_sheet_tab - name of tab to get id for\n\nNo Return", "source": "github-repos"}
{"code": "def prepare_request(url: Union[(str, methods)], data: Optional[MutableMapping], headers: Optional[MutableMapping], global_headers: MutableMapping, token: str, as_json: Optional[bool]=None) -> Tuple[(str, Union[(str, MutableMapping)], MutableMapping)]:\n    if isinstance(url, methods):\n        as_json = (as_json or url.value[3])\n        real_url = url.value[0]\n    else:\n        real_url = url\n        as_json = False\n    if (not headers):\n        headers = {**global_headers}\n    else:\n        headers = {**global_headers, **headers}\n    payload: Optional[Union[(str, MutableMapping)]] = None\n    if (real_url.startswith(HOOK_URL) or (real_url.startswith(ROOT_URL) and as_json)):\n        (payload, headers) = _prepare_json_request(data, token, headers)\n    elif (real_url.startswith(ROOT_URL) and (not as_json)):\n        payload = _prepare_form_encoded_request(data, token)\n    else:\n        real_url = (ROOT_URL + real_url)\n        payload = _prepare_form_encoded_request(data, token)\n    return (real_url, payload, headers)", "docstring": "Prepare outgoing request\n\nCreate url, headers, add token to the body and if needed json encode it\n\nArgs:\nurl: :class:`slack.methods` item or string of url\ndata: Outgoing data\nheaders: Custom headers\nglobal_headers: Global headers\ntoken: Slack API token\nas_json: Post JSON to the slack API\nReturns:\n:py:class:`tuple` (url, body, headers)", "source": "codesearchnet"}
{"code": "def find_element(self, name, type=ElementType.ANY):\n        \n\n        for e in self.e_list:\n            \n            if type.value and not e['elementType'] == type:\n                continue\n            if e[\"name\"] == name:\n                uri = self.uri\n                uri.eid = e[\"id\"]\n                return uri", "docstring": "Find an elemnent in the document with the given name - could be a PartStudio, Assembly or blob.\n\nArgs:\nname: str\nthe name of the element.\n\nReturns:\n- onshapepy.uri of the element", "source": "juraj-google-style"}
{"code": "def __init__(self, action_type=None, ethertype=None):\n        \n        super().__init__(action_type, length=8)\n        self.ethertype = ethertype", "docstring": "Create a ActionPush with the optional parameters below.\n\nArgs:\naction_type (:class:`ActionType`): indicates which tag will be\npushed (VLAN, MPLS, PBB).\nethertype (int): indicates the Ethertype of the new tag.", "source": "juraj-google-style"}
{"code": "def show_confidence_band(self, value):\n        \n        if not isinstance(values, list):\n            raise TypeError(\"show_confidence_band must be a list of strings\")\n\n        self.options[\"show_confidence_band\"] = values", "docstring": "Show confidence band?\nSee metricsgraphics documentation\nArgs:\nvalue (list): strings\n\nRaises:\nTypeError: show_confidence_band must be a list of strings.", "source": "juraj-google-style"}
{"code": "def include(self, scheduled_operation: ScheduledOperation):\n        \n        collisions = self.query(time=scheduled_operation.time,\n                                duration=scheduled_operation.duration,\n                                qubits=scheduled_operation.operation.qubits)\n        if collisions:\n            raise ValueError('Operation {} has collisions: {}'.format(\n                scheduled_operation.operation, collisions))\n        self.scheduled_operations.add(scheduled_operation)\n        self._max_duration = max(self._max_duration,\n                                 scheduled_operation.duration)", "docstring": "Adds a scheduled operation to the schedule.\n\nArgs:\nscheduled_operation: The operation to add.\n\nRaises:\nValueError:\nThe operation collided with something already in the schedule.", "source": "juraj-google-style"}
{"code": "def get_qemu_info(path, backing_chain=False, fail_on_error=True):\n    cmd = ['qemu-img', 'info', '--output=json', path]\n    if backing_chain:\n        cmd.insert((- 1), '--backing-chain')\n    result = run_command_with_validation(cmd, fail_on_error, msg='Failed to get info for {}'.format(path))\n    return json.loads(result.out)", "docstring": "Get info on a given qemu disk\n\nArgs:\npath(str): Path to the required disk\nbacking_chain(boo): if true, include also info about\nthe image predecessors.\nReturn:\nobject: if backing_chain == True then a list of dicts else a dict", "source": "codesearchnet"}
{"code": "def _add_imports_to_env(self, raw_api):\n        \n        for namespace, desc in raw_api:\n            for item in desc:\n                if isinstance(item, AstImport):\n                    if namespace.name == item.target:\n                        raise InvalidSpec('Cannot import current namespace.',\n                                          item.lineno, item.path)\n                    if item.target not in self.api.namespaces:\n                        raise InvalidSpec(\n                            'Namespace %s is not defined in any spec.' %\n                            quote(item.target),\n                            item.lineno, item.path)\n                    env = self._get_or_create_env(namespace.name)\n                    imported_env = self._get_or_create_env(item.target)\n                    if namespace.name in imported_env:\n                        \n                        \n                        raise InvalidSpec(\n                            'Circular import of namespaces %s and %s '\n                            'detected.' %\n                            (quote(namespace.name), quote(item.target)),\n                            item.lineno, item.path)\n                    env[item.target] = imported_env", "docstring": "Scans raw parser output for import declarations. Checks if the imports\nare valid, and then creates a reference to the namespace in the\nenvironment.\n\nArgs:\nraw_api (Tuple[Namespace, List[stone.stone.parser._Element]]):\nNamespace paired with raw parser output.", "source": "juraj-google-style"}
{"code": "def set_status(self, status, msg):\n        \n        \n        if len(msg) > 2000:\n            msg = msg[:2000]\n            msg += \"\\n... snip ...\\n\"\n\n        \n        if self.status == self.S_LOCKED or status == self.S_LOCKED:\n            err_msg = (\n                 \"Locked files must be explicitly unlocked before calling set_status but\\n\"\n                 \"task.status = %s, input status = %s\" % (self.status, status))\n            raise RuntimeError(err_msg)\n\n        status = Status.as_status(status)\n\n        changed = True\n        if hasattr(self, \"_status\"):\n            changed = (status != self._status)\n\n        self._status = status\n\n        if status == self.S_RUN:\n            \n            if self.datetimes.start is None:\n                self.datetimes.start = datetime.datetime.now()\n\n        \n        if changed:\n            if status == self.S_SUB:\n                self.datetimes.submission = datetime.datetime.now()\n                self.history.info(\"Submitted with MPI=%s, Omp=%s, Memproc=%.1f [Gb] %s \" % (\n                    self.mpi_procs, self.omp_threads, self.mem_per_proc.to(\"Gb\"), msg))\n\n            elif status == self.S_OK:\n                self.history.info(\"Task completed %s\", msg)\n\n            elif status == self.S_ABICRITICAL:\n                self.history.info(\"Status set to S_ABI_CRITICAL due to: %s\", msg)\n\n            else:\n                self.history.info(\"Status changed to %s. msg: %s\", status, msg)\n\n        \n        \n        \n        \n        if status == self.S_DONE:\n            \n            self._on_done()\n\n        if status == self.S_OK:\n            \n            if not self.finalized:\n                self._on_ok()\n\n                \n                if self.gc is not None and self.gc.policy == \"task\":\n                    self.clean_output_files()\n\n            if self.status == self.S_OK:\n                \n                self.send_signal(self.S_OK)\n\n        return status", "docstring": "Set and return the status of the task.\n\nArgs:\nstatus: Status object or string representation of the status\nmsg: string with human-readable message used in the case of errors.", "source": "juraj-google-style"}
{"code": "def query(self, minhash, k):\n    if (k <= 0):\n        raise ValueError('k must be positive')\n    if (len(minhash) < (self.k * self.l)):\n        raise ValueError('The num_perm of MinHash out of range')\n    results = set()\n    r = self.k\n    while (r > 0):\n        for key in self._query(minhash, r, self.l):\n            results.add(key)\n            if (len(results) >= k):\n                return list(results)\n        r -= 1\n    return list(results)", "docstring": "Return the approximate top-k keys that have the highest\nJaccard similarities to the query set.\n\nArgs:\nminhash (datasketch.MinHash): The MinHash of the query set.\nk (int): The maximum number of keys to return.\n\nReturns:\n`list` of at most k keys.", "source": "codesearchnet"}
{"code": "def get_facets(self):\n    return dict([(facet.attrib['path'], [term.text for term in facet.findall('term')]) for facet in self._content.findall('facet')])", "docstring": "Get facets from the response.\n\nReturns:\nA dict where requested facet paths are keys and a list of coresponding terms are values.", "source": "codesearchnet"}
{"code": "def constcase(text, acronyms=None):\n    \n    words, _case, _sep = case_parse.parse_case(text, acronyms)\n    return '_'.join([w.upper() for w in words])", "docstring": "Return text in CONST_CASE style (aka SCREAMING_SNAKE_CASE).\n\nArgs:\ntext: input string to convert case\ndetect_acronyms: should attempt to detect acronyms\nacronyms: a list of acronyms to detect\n\n>>> constcase(\"hello world\")\n'HELLO_WORLD'\n>>> constcase(\"helloHTMLWorld\", True, [\"HTML\"])\n'HELLO_HTML_WORLD'", "source": "juraj-google-style"}
{"code": "def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, columns=None, sparse=False, drop_first=False, dtype=None):\n    if sparse:\n        raise NotImplementedError('SparseDataFrame is not implemented. To contribute to Modin, please visit github.com/modin-project/modin.')\n    if (not isinstance(data, DataFrame)):\n        ErrorMessage.default_to_pandas('`get_dummies` on non-DataFrame')\n        return DataFrame(pandas.get_dummies(data, prefix=prefix, prefix_sep=prefix_sep, dummy_na=dummy_na, columns=columns, sparse=sparse, drop_first=drop_first, dtype=dtype))\n    else:\n        new_manager = data._query_compiler.get_dummies(columns, prefix=prefix, prefix_sep=prefix_sep, dummy_na=dummy_na, drop_first=drop_first, dtype=dtype)\n        return DataFrame(query_compiler=new_manager)", "docstring": "Convert categorical variable into indicator variables.\n\nArgs:\ndata (array-like, Series, or DataFrame): data to encode.\nprefix (string, [string]): Prefix to apply to each encoded column\nlabel.\nprefix_sep (string, [string]): Separator between prefix and value.\ndummy_na (bool): Add a column to indicate NaNs.\ncolumns: Which columns to encode.\nsparse (bool): Not Implemented: If True, returns SparseDataFrame.\ndrop_first (bool): Whether to remove the first level of encoded data.\ndtype: The dtype for the get_dummies call.\n\nReturns:\nDataFrame or one-hot encoded data.", "source": "codesearchnet"}
{"code": "def _bind_length_handlers(tids, user_handler, lns):\n    for tid in tids:\n        for ln in lns:\n            type_octet = _gen_type_octet(tid, ln)\n            ion_type = _TID_VALUE_TYPE_TABLE[tid]\n            if ((ln == 1) and (ion_type is IonType.STRUCT)):\n                handler = partial(_ordered_struct_start_handler, partial(user_handler, ion_type))\n            elif (ln < _LENGTH_FIELD_FOLLOWS):\n                handler = partial(user_handler, ion_type, ln)\n            else:\n                handler = partial(_var_uint_field_handler, partial(user_handler, ion_type))\n            _HANDLER_DISPATCH_TABLE[type_octet] = handler", "docstring": "Binds a set of handlers with the given factory.\n\nArgs:\ntids (Sequence[int]): The Type IDs to bind to.\nuser_handler (Callable): A function that takes as its parameters\n:class:`IonType`, ``length``, and the ``ctx`` context\nreturning a co-routine.\nlns (Sequence[int]): The low-nibble lengths to bind to.", "source": "codesearchnet"}
{"code": "def create_file(self, filename):\n    \n    self.response.write('Creating file %s\\n' % filename)\n\n    write_retry_params = gcs.RetryParams(backoff_factor=1.1)\n    gcs_file = gcs.open(filename,\n                        'w',\n                        content_type='text/plain',\n                        options={'x-goog-meta-foo': 'foo',\n                                 'x-goog-meta-bar': 'bar'},\n                        retry_params=write_retry_params)\n    gcs_file.write('abcde\\n')\n    gcs_file.write('f'*1024*4 + '\\n')\n    gcs_file.close()\n    self.tmp_filenames_to_clean_up.append(filename)", "docstring": "Create a file.\n\nThe retry_params specified in the open call will override the default\nretry params for this particular file handle.\n\nArgs:\nfilename: filename.", "source": "juraj-google-style"}
{"code": "def get(self, save_path, dataset=None):\n        \n        if dataset is None:\n            selected_dataset = self._present_options()\n        else:\n            selected_dataset = dataset\n\n        save_path_full = join(save_path, selected_dataset.split('.')[0])\n\n        if isdir(save_path_full):\n            warn(\"\\n'{0}' already exists. Voiding Download.\".format(\n                save_path_full))\n        else:\n            self._print('Downloading Data...')\n            url = \"{0}/{1}\".format(self.url, selected_dataset)\n            self._download_data(url, save_path=save_path)\n\n        return abspath(save_path_full)", "docstring": "Download a dataset.\n\nArgs:\nsave_path : str\nA directory to save the data to.\ndataset : str, optional\nA specific dataset to download.\nNote: this must include the file extension.\nIf None, options will be presented for you\nto choose from.\n\nReturns:\nsave_path_full : str\nThe absolute path to the downloaded data.", "source": "juraj-google-style"}
{"code": "def Matches(self, file_entry):\n    \n    location = getattr(file_entry.path_spec, 'location', None)\n    if not location:\n      return None\n\n    if '.' not in location:\n      return False\n\n    _, _, extension = location.rpartition('.')\n    return extension.lower() in self._extensions", "docstring": "Compares the file entry against the filter.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry to compare.\n\nReturns:\nbool: True if the file entry matches the filter, False if not or\nNone if the filter does not apply.", "source": "juraj-google-style"}
{"code": "def load_graph(path: str, squeeze: bool=False) -> Tuple[Union[EventSetNode, Dict[str, EventSetNode]], Union[EventSetNode, Dict[str, EventSetNode]]]:\n    g = _load_graph(path=path)\n    inputs = g.named_inputs\n    outputs = g.named_outputs\n    assert inputs is not None\n    assert outputs is not None\n    if squeeze and len(inputs) == 1:\n        inputs = list(inputs.values())[0]\n    if squeeze and len(outputs) == 1:\n        outputs = list(outputs.values())[0]\n    return (inputs, outputs)", "docstring": "Loads a Temporian graph from a file.\n\nSee [`tp.save()`][temporian.save] and\n[`tp.save_graph()`][temporian.save_graph] for usage examples.\n\nArgs:\npath: File path to load from.\nsqueeze: If true, and if the input/output contains a single EventSetNode,\nreturns an EventSetNode (instead of a dictionary of EventSetNodes).\n\nReturns:\nInput and output EventSetNodes.", "source": "github-repos"}
{"code": "def create_cells(headers, schema_fields, values=None, row_number=None):\n    fillvalue = '_fillvalue'\n    is_header_row = (values is None)\n    cells = []\n    iterator = zip_longest(headers, schema_fields, (values or []), fillvalue=fillvalue)\n    for (column_number, (header, field, value)) in enumerate(iterator, start=1):\n        if (header == fillvalue):\n            header = None\n        elif is_header_row:\n            value = header\n        if (field == fillvalue):\n            field = None\n        if (value == fillvalue):\n            value = None\n        elif (value is None):\n            value = ''\n        cell = create_cell(header, value, field, column_number, row_number)\n        cells.append(cell)\n    return cells", "docstring": "Create list of cells from headers, fields and values.\n\nArgs:\nheaders (List[str]): The headers values.\nschema_fields (List[tableschema.field.Field]): The tableschema\nfields.\nvalues (List[Any], optional): The cells values. If not specified,\nthe created cells will have the same values as their\ncorresponding headers. This is useful for specifying headers\ncells.\nIf the list has any `None` values, as is the case on empty\ncells, the resulting Cell will have an empty string value. If\nthe `values` list has a different length than the `headers`,\nthe resulting Cell will have value `None`.\nrow_number (int, optional): The row number.\n\nReturns:\nList[dict]: List of cells.", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n    \n    self.receive = channel.unary_stream(\n        '/predix.eventhub.Subscriber/receive',\n        request_serializer=EventHub__pb2.SubscriptionRequest.SerializeToString,\n        response_deserializer=EventHub__pb2.Message.FromString,\n        )\n    self.receiveWithAcks = channel.stream_stream(\n        '/predix.eventhub.Subscriber/receiveWithAcks',\n        request_serializer=EventHub__pb2.SubscriptionResponse.SerializeToString,\n        response_deserializer=EventHub__pb2.Message.FromString,\n        )\n    self.subscribe = channel.stream_stream(\n        '/predix.eventhub.Subscriber/subscribe',\n        request_serializer=EventHub__pb2.SubscriptionAcks.SerializeToString,\n        response_deserializer=EventHub__pb2.SubscriptionMessage.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def probe_characteristics(self, conn_id, handle, services):\n    self._command_task.async_command(['_probe_characteristics', handle, services], self._probe_characteristics_finished, {'connection_id': conn_id, 'handle': handle, 'services': services})", "docstring": "Probe a device for all characteristics defined in its GATT table\n\nThis routine must be called after probe_services and passed the services dictionary\nproduced by that method.\n\nArgs:\nhandle (int): a handle to the connection on the BLED112 dongle\nconn_id (int): a unique identifier for this connection on the DeviceManager\nthat owns this adapter.\nservices (dict): A dictionary of GATT services produced by probe_services()", "source": "codesearchnet"}
{"code": "def new(self, name, *args, **kwargs):\n    if (name in self._instance_map):\n        raise ValueError('Instance {0} is already initialized'.format(name))\n    instance = self._class_map[name](*args, **kwargs)\n    self._instance_map[name] = instance\n    return instance", "docstring": "Create an instance.\n\nArgs:\nname (str): The name of the class\nargs: The arguments to pass to the class.\nkwargs: The keyword arguments to pass to the class.\n\nReturns:\ninstance", "source": "codesearchnet"}
{"code": "def get(self, key, default=None):\n    if isinstance(key, six.text_type):\n        return self.__mapping.get(key, None)\n    if (not isinstance(key, int)):\n        raise TypeError('Key must be int or Unicode sequence.')\n    if (key == 0):\n        return SYMBOL_ZERO_TOKEN\n    index = (key - 1)\n    if ((index < 0) or (key > len(self))):\n        return default\n    return self.__symbols[index]", "docstring": "Returns a token by text or local ID, with a default.\n\nA given text image may be associated with more than one symbol ID.  This will return the first definition.\n\nNote:\nUser defined symbol IDs are always one-based.  Symbol zero is a special symbol that\nalways has no text.\n\nArgs:\nkey (unicode | int):  The key to lookup.\ndefault(Optional[SymbolToken]): The default to return if the key is not found\n\nReturns:\nSymbolToken: The token associated with the key or the default if it doesn't exist.", "source": "codesearchnet"}
{"code": "def __init__(self, vocab, shift):\n    \n    self.shift = shift\n    alphabet = vocab\n    shifted_alphabet = deque(alphabet)\n    shifted_alphabet.rotate(shift)\n    self.encrypt = dict(zip(alphabet, list(shifted_alphabet)))\n    self.decrypt = dict(zip(list(shifted_alphabet), alphabet))", "docstring": "Initialize shift layer.\n\nArgs:\nvocab: (list of String) the vocabulary\nshift: (Integer) the amount of shift apply to the alphabet.\nPositive number implies shift to the right, negative number\nimplies shift to the left.", "source": "juraj-google-style"}
{"code": "def expand(self, words):\n    return words | beam.combiners.Count.PerElement() | beam.FlatMap(extract_prefixes) | beam.combiners.Top.LargestPerKey(self._count)", "docstring": "Compute the most common words for each possible prefixes.\n\nArgs:\nwords: a PCollection of strings\n\nReturns:\nA PCollection of most common words with each prefix, in the form\n(prefix, [(count, word), (count, word), ...])", "source": "github-repos"}
{"code": "def __init__(self, batch_env):\n    \n    super(PyFuncBatchEnv, self).__init__(batch_env.observation_space,\n                                         batch_env.action_space)\n    self._batch_env = batch_env\n    with tf.variable_scope(\"env_temporary\"):\n      self._observ = tf.Variable(\n          tf.zeros((self._batch_env.batch_size,) + self.observ_shape,\n                   self.observ_dtype),\n          name=\"observ\", trainable=False)", "docstring": "Batch of environments inside the TensorFlow graph.\n\nArgs:\nbatch_env: Batch environment.", "source": "juraj-google-style"}
{"code": "def masks_to_boxes(masks: torch.Tensor) -> torch.Tensor:\n    if masks.numel() == 0:\n        return torch.zeros((0, 4), device=masks.device)\n    h, w = masks.shape[-2:]\n    y = torch.arange(0, h, dtype=torch.float32, device=masks.device)\n    x = torch.arange(0, w, dtype=torch.float32, device=masks.device)\n    y, x = torch.meshgrid(y, x, indexing='ij')\n    x_mask = masks * torch.unsqueeze(x, 0)\n    x_max = x_mask.view(x_mask.shape[0], -1).max(-1)[0]\n    x_min = torch.where(masks, x.unsqueeze(0), torch.tensor(100000000.0, device=masks.device)).view(masks.shape[0], -1).min(-1)[0]\n    y_mask = masks * torch.unsqueeze(y, 0)\n    y_max = y_mask.view(y_mask.shape[0], -1).max(-1)[0]\n    y_min = torch.where(masks, y.unsqueeze(0), torch.tensor(100000000.0, device=masks.device)).view(masks.shape[0], -1).min(-1)[0]\n    return torch.stack([x_min, y_min, x_max, y_max], 1)", "docstring": "Compute the bounding boxes around the provided panoptic segmentation masks.\n\nArgs:\nmasks: masks in format `[number_masks, height, width]` where N is the number of masks\n\nReturns:\nboxes: bounding boxes in format `[number_masks, 4]` in xyxy format", "source": "github-repos"}
{"code": "def subscribe(self, exchange_name: str, routing: str, exchange_type: ExchangeType_='topic', on_message: EVENT_CALLBACK_=None) -> EventSubscription:\n    sub = EventSubscription(exchange_name, routing, exchange_type, on_message=on_message)\n    if (self._pending is not None):\n        self._pending.put_nowait(sub)\n    else:\n        self._pending_pre_async.append(sub)\n        LOGGER.info(f'Deferred event bus subscription: [{sub}]')\n    self._lazy_listen()\n    return sub", "docstring": "Adds a new event subscription to the listener.\n\nActual queue declaration to the remote message server is done when connected.\nIf the listener is not currently connected, it defers declaration.\n\nAll existing subscriptions are redeclared on the remote if `EventListener`\nloses and recreates the connection.\n\nArgs:\nexchange_name (str):\nName of the AMQP exchange. Messages are always published to a specific exchange.\n\nrouting (str):\nFilter messages passing through the exchange.\nA routing key is a '.'-separated string, and accepts '#' and '*' wildcards.\n\nexchange_type (ExchangeType_, optional):\nIf the exchange does not yet exist, it will be created with this type.\nDefault is `topic`, acceptable values are `topic`, `fanout`, or `direct`.\n\non_message (EVENT_CALLBACK_, optional):\nThe function to be called when a new message is received.\nIf `on_message` is none, it will default to logging the message.\n\nReturns:\nEventSubscription:\nThe newly created subscription.\nThis value can safely be discarded: EventListener keeps its own reference.", "source": "codesearchnet"}
{"code": "def to_control_flow_context_def(self, context_def, export_scope=None):\n    raise NotImplementedError('Abstract method')", "docstring": "Serializes this into `context_def`.\n\nArgs:\ncontext_def: a `ControlFlowContextDef` protocol buffer.\nexport_scope: Optional `string`. Name scope to remove.", "source": "github-repos"}
{"code": "def _compute_nfp_uniform(l, u, cum_counts, sizes):\n    \n    if l > u:\n        raise ValueError(\"l must be less or equal to u\")\n    if l == 0:\n        n = cum_counts[u]\n    else:\n        n = cum_counts[u]-cum_counts[l-1]\n    return n * float(sizes[u] - sizes[l]) / float(2*sizes[u])", "docstring": "Computes the expected number of false positives caused by using\nu to approximate set sizes in the interval [l, u], assuming uniform\ndistribution of set sizes within the interval.\n\nArgs:\nl: the lower bound on set sizes.\nu: the upper bound on set sizes.\ncum_counts: the complete cummulative distribution of set sizes.\nsizes: the complete domain of set sizes.\n\nReturn (float): the expected number of false positives.", "source": "juraj-google-style"}
{"code": "def _extract_options(config, options, *args):\n    extract = {}\n    for key in args:\n        if (key not in args):\n            continue\n        extract[key] = config[key]\n        option = getattr(options, key, None)\n        if (option is not None):\n            extract[key] = option\n    return extract", "docstring": "Extract options values from a configparser, optparse pair.\n\nOptions given on command line take precedence over options read in the\nconfiguration file.\n\nArgs:\nconfig (dict): option values read from a config file through\nconfigparser\noptions (optparse.Options): optparse 'options' object containing options\nvalues from the command line\n*args (str tuple): name of the options to extract", "source": "codesearchnet"}
{"code": "def _get_params(mcs, bases, namespace):\n        \n        params = [\n            (name, namespace.pop(name))\n            for name, attribute\n            in list(namespace.items())\n            if isinstance(attribute, BaseParam)\n        ]\n\n        for base in reversed(bases):\n            if hasattr(base, mcs._params_storage_key):\n                params = list(\n                    getattr(base, mcs._params_storage_key).items()\n                ) + params\n\n        return OrderedDict(params)", "docstring": "Create params dictionary to be used in resource class namespace.\n\nPop all parameter objects from attributes dict (namespace)\nand store them under _params_storage_key atrribute.\nAlso collect all params from base classes in order that ensures\nparams can be overriden.\n\nArgs:\nbases: all base classes of created resource class\nnamespace (dict): namespace as dictionary of attributes", "source": "juraj-google-style"}
{"code": "def value_of(self, value: Union[(sympy.Basic, float, str)]) -> Union[(sympy.Basic, float)]:\n    if isinstance(value, str):\n        return self.param_dict.get(value, sympy.Symbol(value))\n    if isinstance(value, sympy.Basic):\n        if (sys.version_info.major < 3):\n            d = {k.encode(): v for (k, v) in self.param_dict.items()}\n            v = value.subs(d)\n        else:\n            v = value.subs(self.param_dict)\n        return (v if v.free_symbols else float(v))\n    return value", "docstring": "Attempt to resolve a Symbol or name or float to its assigned value.\n\nIf unable to resolve a sympy.Symbol, returns it unchanged.\nIf unable to resolve a name, returns a sympy.Symbol with that name.\n\nArgs:\nvalue: The sympy.Symbol or name or float to try to resolve into just\na float.\n\nReturns:\nThe value of the parameter as resolved by this resolver.", "source": "codesearchnet"}
{"code": "def are_you_sure(msg=''):\n    print(msg)\n    from utool import util_arg\n    from utool import util_str\n    override = util_arg.get_argflag(('--yes', '--y', '-y'))\n    if override:\n        print('accepting based on command line flag')\n        return True\n    valid_ans = ['yes', 'y']\n    valid_prompt = util_str.conj_phrase(valid_ans, 'or')\n    ans = input(('Are you sure?\\n Enter %s to accept\\n' % valid_prompt))\n    return (ans.lower() in valid_ans)", "docstring": "r\"\"\"\nPrompts user to accept or checks command line for -y\n\nArgs:\nmsg (str):\n\nReturns:\nbool: accept or not", "source": "codesearchnet"}
{"code": "def FVDEVolumeOpen(fvde_volume, path_spec, file_object, key_chain):\n  \n  encrypted_root_plist = key_chain.GetCredential(\n      path_spec, 'encrypted_root_plist')\n  if encrypted_root_plist:\n    fvde_volume.read_encrypted_root_plist(encrypted_root_plist)\n\n  password = key_chain.GetCredential(path_spec, 'password')\n  if password:\n    fvde_volume.set_password(password)\n\n  recovery_password = key_chain.GetCredential(path_spec, 'recovery_password')\n  if recovery_password:\n    fvde_volume.set_recovery_password(recovery_password)\n\n  fvde_volume.open_file_object(file_object)", "docstring": "Opens the FVDE volume using the path specification.\n\nArgs:\nfvde_volume (pyfvde.volume): FVDE volume.\npath_spec (PathSpec): path specification.\nfile_object (FileIO): file-like object.\nkey_chain (KeyChain): key chain.", "source": "juraj-google-style"}
{"code": "class RandomNormal(Initializer):\n\n    def __init__(self, mean=0.0, stddev=0.05, seed=None):\n        self.mean = mean\n        self.stddev = stddev\n        self.seed = seed\n        self._random_generator = _RandomGenerator(seed)\n\n    def __call__(self, shape, dtype=None, **kwargs):\n        \n        _validate_kwargs(self.__class__.__name__, kwargs)\n        dtype = _assert_float_dtype(_get_dtype(dtype))\n        if _PARTITION_SHAPE in kwargs:\n            shape = kwargs[_PARTITION_SHAPE]\n        return self._random_generator.random_normal(shape, self.mean, self.stddev, dtype)\n\n    def get_config(self):\n        return {'mean': self.mean, 'stddev': self.stddev, 'seed': self.seed}", "docstring": "Initializer that generates tensors with a normal distribution.\n\nAlso available via the shortcut function\n`tf.keras.initializers.random_normal`.\n\nExamples:\n\n>>> # Standalone usage:\n>>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)\n>>> values = initializer(shape=(2, 2))\n\n>>> # Usage in a Keras layer:\n>>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)\n>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\nArgs:\nmean: a python scalar or a scalar tensor. Mean of the random values to\ngenerate.\nstddev: a python scalar or a scalar tensor. Standard deviation of the random\nvalues to generate.\nseed: A Python integer. An initializer created with a given seed will\nalways produce the same random tensor for a given shape and dtype.", "source": "github-repos"}
{"code": "def init_config(self, config):\n    self.config.update(config)\n    self.config.setdefault('LDAP_PORT', 389)\n    self.config.setdefault('LDAP_HOST', None)\n    self.config.setdefault('LDAP_USE_SSL', False)\n    self.config.setdefault('LDAP_READONLY', True)\n    self.config.setdefault('LDAP_CHECK_NAMES', True)\n    self.config.setdefault('LDAP_BIND_DIRECT_CREDENTIALS', False)\n    self.config.setdefault('LDAP_BIND_DIRECT_PREFIX', '')\n    self.config.setdefault('LDAP_BIND_DIRECT_SUFFIX', '')\n    self.config.setdefault('LDAP_BIND_DIRECT_GET_USER_INFO', True)\n    self.config.setdefault('LDAP_ALWAYS_SEARCH_BIND', False)\n    self.config.setdefault('LDAP_BASE_DN', '')\n    self.config.setdefault('LDAP_BIND_USER_DN', None)\n    self.config.setdefault('LDAP_BIND_USER_PASSWORD', None)\n    self.config.setdefault('LDAP_SEARCH_FOR_GROUPS', True)\n    self.config.setdefault('LDAP_FAIL_AUTH_ON_MULTIPLE_FOUND', False)\n    self.config.setdefault('LDAP_USER_DN', '')\n    self.config.setdefault('LDAP_GROUP_DN', '')\n    self.config.setdefault('LDAP_BIND_AUTHENTICATION_TYPE', 'SIMPLE')\n    self.config.setdefault('LDAP_USER_SEARCH_SCOPE', 'LEVEL')\n    self.config.setdefault('LDAP_USER_OBJECT_FILTER', '(objectclass=person)')\n    self.config.setdefault('LDAP_USER_LOGIN_ATTR', 'uid')\n    self.config.setdefault('LDAP_USER_RDN_ATTR', 'uid')\n    self.config.setdefault('LDAP_GET_USER_ATTRIBUTES', ldap3.ALL_ATTRIBUTES)\n    self.config.setdefault('LDAP_GROUP_SEARCH_SCOPE', 'LEVEL')\n    self.config.setdefault('LDAP_GROUP_OBJECT_FILTER', '(objectclass=group)')\n    self.config.setdefault('LDAP_GROUP_MEMBERS_ATTR', 'uniqueMember')\n    self.config.setdefault('LDAP_GET_GROUP_ATTRIBUTES', ldap3.ALL_ATTRIBUTES)\n    self.config.setdefault('LDAP_ADD_SERVER', True)\n    if self.config['LDAP_ADD_SERVER']:\n        self.add_server(hostname=self.config['LDAP_HOST'], port=self.config['LDAP_PORT'], use_ssl=self.config['LDAP_USE_SSL'])", "docstring": "Configures this extension with a given configuration dictionary.\nThis allows use of this extension without a flask app.\n\nArgs:\nconfig (dict): A dictionary with configuration keys", "source": "codesearchnet"}
{"code": "def strip_graph_default_valued_attrs(meta_graph_def):\n    op_name_to_function = {}\n    for function_def in meta_graph_def.graph_def.library.function:\n        op_name_to_function[function_def.signature.name] = function_def\n\n    def _strip_node_default_valued_attrs(node_def):\n        \n        if node_def.op in op_name_to_function:\n            return\n        op_def = op_def_registry.get(node_def.op)\n        if op_def is None:\n            return\n        attrs_to_strip = set()\n        for attr_name, attr_value in node_def.attr.items():\n            if _is_default_attr_value(op_def, attr_name, attr_value):\n                attrs_to_strip.add(attr_name)\n        for attr in attrs_to_strip:\n            del node_def.attr[attr]\n    for node_def in meta_graph_def.graph_def.node:\n        _strip_node_default_valued_attrs(node_def)\n    for function_def in meta_graph_def.graph_def.library.function:\n        for function_node_def in function_def.node_def:\n            _strip_node_default_valued_attrs(function_node_def)\n    meta_graph_def.meta_info_def.stripped_default_attrs = True", "docstring": "Strips default valued attributes for node defs in given MetaGraphDef.\n\nThis method also sets `meta_info_def.stripped_default_attrs` in the given\n`MetaGraphDef` proto to True.\n\nArgs:\nmeta_graph_def: `MetaGraphDef` protocol buffer\n\nReturns:\nNone.", "source": "github-repos"}
{"code": "def select(self, cols, mode='list'):\n        \n        if isinstance(cols, stringtypes):\n            cols = _split_cols(cols)\n        if not cols:\n            cols = [f.name for f in self.fields]\n        return select_rows(cols, self, mode=mode)", "docstring": "Select columns from each row in the table.\n\nSee :func:`select_rows` for a description of how to use the\n*mode* parameter.\n\nArgs:\ncols: an iterable of Field (column) names\nmode: how to return the data", "source": "juraj-google-style"}
{"code": "def scale(self, scalar, ignored_terms=None):\n    if (ignored_terms is None):\n        ignored_terms = set()\n    else:\n        ignored_terms = {asfrozenset(term) for term in ignored_terms}\n    for term in self:\n        if (term not in ignored_terms):\n            self[term] *= scalar", "docstring": "Multiply the polynomial by the given scalar.\n\nArgs:\nscalar (number):\nValue to multiply the polynomial by.\n\nignored_terms (iterable, optional):\nBiases associated with these terms are not scaled.", "source": "codesearchnet"}
{"code": "def WaitUntilDone(self, timeout=None):\n    f = utils.Poll(generator=self.Get, condition=(lambda f: (f.data.state != f.data.RUNNING)), timeout=timeout)\n    if (f.data.state != f.data.TERMINATED):\n        raise errors.FlowFailedError(('Flow %s (%s) failed: %s' % (self.flow_id, self.client_id, f.data.context.current_state)))\n    return f", "docstring": "Wait until the flow completes.\n\nArgs:\ntimeout: timeout in seconds. None means default timeout (1 hour). 0 means\nno timeout (wait forever).\n\nReturns:\nFresh flow object.\nRaises:\nPollTimeoutError: if timeout is reached.\nFlowFailedError: if the flow is not successful.", "source": "codesearchnet"}
{"code": "def predict_proba(self, a, b, device=None):\n    device = SETTINGS.get_default(device=device)\n    if (self.model is None):\n        print('Model has to be trained before doing any predictions')\n        raise ValueError\n    if (len(np.array(a).shape) == 1):\n        a = np.array(a).reshape(((- 1), 1))\n        b = np.array(b).reshape(((- 1), 1))\n    m = np.hstack((a, b))\n    m = scale(m)\n    m = m.astype('float32')\n    m = th.from_numpy(m).t().unsqueeze(0)\n    if th.cuda.is_available():\n        m = m.cuda()\n    return ((self.model(m).data.cpu().numpy() - 0.5) * 2)", "docstring": "Infer causal directions using the trained NCC pairwise model.\n\nArgs:\na (numpy.ndarray): Variable 1\nb (numpy.ndarray): Variable 2\ndevice (str): Device to run the algorithm on (defaults to ``cdt.SETTINGS.default_device``)\n\nReturns:\nfloat: Causation score (Value : 1 if a->b and -1 if b->a)", "source": "codesearchnet"}
{"code": "def assistant_from_yaml(cls, source, y, superassistant, fully_loaded=True,\n                            role=settings.DEFAULT_ASSISTANT_ROLE):\n        \n        \n        \n        \n        name = os.path.splitext(os.path.basename(source))[0]\n        yaml_checker.check(source, y)\n        assistant = yaml_assistant.YamlAssistant(name, y, source, superassistant,\n            fully_loaded=fully_loaded, role=role)\n\n        return assistant", "docstring": "Constructs instance of YamlAssistant loaded from given structure y, loaded\nfrom source file source.\n\nArgs:\nsource: path to assistant source file\ny: loaded yaml structure\nsuperassistant: superassistant of this assistant\nReturns:\nYamlAssistant instance constructed from y with source file source\nRaises:\nYamlError: if the assistant is malformed", "source": "juraj-google-style"}
{"code": "def __init__(self, hash_queue, hash_analysis_queue, **kwargs):\n    \n    super(ViperAnalyzer, self).__init__(\n        hash_queue, hash_analysis_queue, **kwargs)\n    self._checked_for_old_python_version = False\n    self._host = None\n    self._port = None\n    self._protocol = None\n    self._url = None", "docstring": "Initializes a Viper hash analyzer.\n\nArgs:\nhash_queue (Queue.queue): contains hashes to be analyzed.\nhash_analysis_queue (Queue.queue): that the analyzer will append\nHashAnalysis objects this queue.", "source": "juraj-google-style"}
{"code": "def from_service_account_info(cls, info, **kwargs):\n    signer = _service_account_info.from_dict(info, require=['client_email', 'token_uri'])\n    return cls._from_signer_and_info(signer, info, **kwargs)", "docstring": "Creates a Credentials instance from parsed service account info.\n\nArgs:\ninfo (Mapping[str, str]): The service account info in Google\nformat.\nkwargs: Additional arguments to pass to the constructor.\n\nReturns:\ngoogle.auth.service_account.Credentials: The constructed\ncredentials.\n\nRaises:\nValueError: If the info is not in the expected format.", "source": "codesearchnet"}
{"code": "def get_case_groups(adapter, total_cases, institute_id=None, slice_query=None):\n    cases = [{'status': 'all', 'count': total_cases, 'percent': 1}]\n    pipeline = []\n    group = {'$group': {'_id': '$status', 'count': {'$sum': 1}}}\n    subquery = {}\n    if (institute_id and slice_query):\n        subquery = adapter.cases(owner=institute_id, name_query=slice_query, yield_query=True)\n    elif institute_id:\n        subquery = adapter.cases(owner=institute_id, yield_query=True)\n    elif slice_query:\n        subquery = adapter.cases(name_query=slice_query, yield_query=True)\n    query = ({'$match': subquery} if subquery else {})\n    if query:\n        pipeline.append(query)\n    pipeline.append(group)\n    res = adapter.case_collection.aggregate(pipeline)\n    for status_group in res:\n        cases.append({'status': status_group['_id'], 'count': status_group['count'], 'percent': (status_group['count'] / total_cases)})\n    return cases", "docstring": "Return the information about case groups\n\nArgs:\nstore(adapter.MongoAdapter)\ntotal_cases(int): Total number of cases\nslice_query(str): Query to filter cases to obtain statistics for.\n\nReturns:\ncases(dict):", "source": "codesearchnet"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    eos = [self.eos_token_id]\n    if token_ids_1 is None:\n        return len(token_ids_0 + eos) * [0]\n    return len(token_ids_0 + eos + token_ids_1 + eos) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make\nuse of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def check_plugin(self, plugin):\n    vcf_section = self[plugin]\n    try:\n        vcf_field = vcf_section['field']\n        if (not (vcf_field in self.vcf_columns)):\n            raise ValidateError('field has to be in {0}\\nWrong field name in plugin: {1}'.format(self.vcf_columns, plugin))\n        if (vcf_field == 'INFO'):\n            try:\n                info_key = vcf_section['info_key']\n                if (info_key == 'CSQ'):\n                    try:\n                        csq_key = vcf_section['csq_key']\n                    except KeyError:\n                        raise ValidateError(\"CSQ entrys has to refer to an csq field.\\nRefer with keyword 'csq_key'\\ncsq_key is missing in section: {0}\".format(plugin))\n            except KeyError:\n                raise ValidateError(\"INFO entrys has to refer to an INFO field.\\nRefer with keyword 'info_key'\\ninfo_key is missing in section: {0}\".format(plugin))\n    except KeyError:\n        raise ValidateError(\"Vcf entrys have to refer to a field in the VCF with keyword 'field'.\\nMissing keyword 'field' in plugin: {0}\".format(plugin))\n    try:\n        data_type = vcf_section['data_type']\n        if (not (data_type in self.data_types)):\n            raise ValidateError('data_type has to be in {0}\\nWrong data_type in plugin: {1}'.format(self.data_types, plugin))\n    except KeyError:\n        raise ValidateError(\"Vcf entrys have to refer to a data type in the VCF with keyword 'data_type'.\\nMissing data_type in plugin: {0}\".format(plugin))\n    separators = vcf_section.get('separators', None)\n    if separators:\n        if (len(separators) == 1):\n            self[plugin]['separators'] = list(separators)\n    elif (data_type != 'flag'):\n        raise ValidateError('If data_type != flag the separators have to be definedMissing separators in plugin: {0}'.format(plugin))\n    record_rule = vcf_section.get('record_rule', None)\n    if record_rule:\n        if (not (record_rule in ['min', 'max'])):\n            raise ValidateError('Record rules have to be in {0}\\nWrong record_rule in plugin: {1}'.format(['min', 'max'], plugin))\n    else:\n        self.logger.info(\"Setting record rule to default: 'max'\")\n    return True", "docstring": "Check if the section is in the proper format vcf format.\n\nArgs:\nvcf_section (dict): The information from a vcf section\n\nReturns:\nTrue is it is in the proper format", "source": "codesearchnet"}
{"code": "def _enumerate_cores(bounds: List[int], ring_bounds: List[int], ring_sizes: List[int], host_bounds: List[int], host_sizes: List[int]) -> List[List[int]]:\n    if not bounds:\n        return [[]]\n    partials = _enumerate_cores(bounds[:-1], ring_bounds[:-1], ring_sizes[:-1], host_bounds[:-1], host_sizes[:-1])\n    results = []\n    for ring_i in range(0, bounds[-1], ring_bounds[-1]):\n        for ring_j in range(0, len(partials), ring_sizes[-1]):\n            for host_i in range(ring_i, ring_i + ring_bounds[-1], host_bounds[-1]):\n                for host_j in range(ring_j, ring_j + ring_sizes[-1], host_sizes[-1]):\n                    for i in range(host_i, host_i + host_bounds[-1]):\n                        for j in range(host_j, host_j + host_sizes[-1]):\n                            results.append(partials[j] + [i])\n    return results", "docstring": "Enumerates cores within `bounds` from fatest to slowest varying axes.\n\nArgs:\nbounds: Upper bounds of axes, from fastest to slowest varying.\nring_bounds: Upper bounds of ring size per axis in the same axis order.\nring_sizes: Number consecutive cores in the ring built so far, cumulatively.\nhost_bounds: Number of axis values per host in the same axis order.\nhost_sizes: Number consecutive cores on one host, cumulatively.\n\nReturns:\nCores represented as a list of 4 integers in the same axis order.", "source": "github-repos"}
{"code": "def new_partition(self, table, **kwargs):\n        \n\n        from . import Partition\n\n        \n\n        if isinstance(table, string_types):\n            table = self.table(table)\n\n        if 'sequence_id' in kwargs:\n            sequence_id = kwargs['sequence_id']\n            del kwargs['sequence_id']\n        else:\n            sequence_id = self._database.next_sequence_id(Dataset, self.vid, Partition)\n\n        p = Partition(\n            t_vid=table.vid,\n            table_name=table.name,\n            sequence_id=sequence_id,\n            dataset=self,\n            d_vid=self.vid,\n            **kwargs\n        )\n\n\n        p.update_id()\n\n        return p", "docstring": "Creates new partition and returns it.\n\nArgs:\ntable (orm.Table):\n\nReturns:\norm.Partition", "source": "juraj-google-style"}
{"code": "def import_settings(self, filename):\n        \n\n        if not os.path.isfile(filename):\n            self._logger.log(\n                'error',\n                'File: {} not found, continuing with default settings'.format(\n                    filename\n                )\n            )\n        else:\n            with open(filename, 'r') as jsonFile:\n                data = json.load(jsonFile)\n                self._value_ranges = data['valueRanges']\n                self._best_values = data['best_values']\n                self._best_values = []\n                for index, value in enumerate(data['best_values']):\n                    if self._value_ranges[index] == 'int':\n                        self._best_values.append(int(value))\n                    else:\n                        self._best_values.append(float(value))\n                self.minimize = data['minimize']\n                self.num_employers = data['num_employers']\n                self._best_score = float(data['best_score'])\n                self.limit = data['limit']", "docstring": "Import settings from a JSON file\n\nArgs:\nfilename (string): name of the file to import from", "source": "juraj-google-style"}
{"code": "def get_members(self, **query_params):\n    members = self.get_members_json(self.base_uri, query_params=query_params)\n    members_list = []\n    for member_json in members:\n        members_list.append(self.create_member(member_json))\n    return members_list", "docstring": "Get all members attached to this organisation. Returns a list of\nMember objects\n\nReturns:\nlist(Member): The members attached to this organisation", "source": "codesearchnet"}
{"code": "def __init__(self, code, component_trace):\n    super().__init__(code)\n    self.trace = component_trace", "docstring": "Constructs a FireExit exception.\n\nArgs:\ncode: (int) Exit code for the Fire CLI.\ncomponent_trace: (FireTrace) The trace for the Fire command.", "source": "github-repos"}
{"code": "def conditionally_create_kms_key(role_name, service_type):\n  \n  if service_type not in KMS_SERVICE_TYPES:\n    print_if_verbose(\"not eligible for kms; service_type: {} is not valid for kms\".format(service_type))\n    return\n\n  \n  key_alias = role_name.replace('.', '_')\n\n  try:\n    kms_key = CLIENTS[\"kms\"].describe_key(KeyId='alias/{}'.format(key_alias))\n  except ClientError as error:\n    if error.response['Error']['Code'] == 'NotFoundException':\n      kms_key = None\n    else:\n      fail(\"Exception describing KMS key: {} {}\".format(role_name, error))\n\n  if service_type == \"aws_fixture\":\n    kms_key_policy =  + CONTEXT.account_id + \n  else:\n    formatted_principal = '\"AWS\": \"arn:aws:iam::{}:role/{}\"'.format(CONTEXT.account_id, role_name)\n    kms_key_policy =  + CONTEXT.account_id +  + formatted_principal +  + CONTEXT.account_id +  + CONTEXT.account_id + \n\n  if not kms_key:\n    print(\"Create KMS key: {}\".format(key_alias))\n    if CONTEXT.commit:\n      \n      \n      create_key_failures = 0\n      while create_key_failures <= 5:\n        try:\n          new_kms_key = CLIENTS[\"kms\"].create_key(\n            Policy=kms_key_policy,\n            Description='Master Key for {}'.format(role_name)\n          )\n          break\n        except ClientError as error:\n          if error.response['Error']['Code'] == 'MalformedPolicyDocumentException':\n            if create_key_failures == 5:\n              fail(\"Exception creating kms key: {} {}\".format(role_name, error))\n            else:\n              create_key_failures += 1\n              time.sleep(5)\n          else:\n            fail(\"Exception creating kms key: {} {}\".format(role_name, error))\n\n      \n      try:\n        CLIENTS[\"kms\"].create_alias(\n          AliasName='alias/{}'.format(key_alias),\n          TargetKeyId=new_kms_key['KeyMetadata']['KeyId']\n        )\n      except ClientError as error:\n        fail(\"Exception creating alias for kms key: {} {}\".format(role_name, error))\n  else:\n    print_if_verbose(\"KMS key already exists: {}\".format(key_alias))", "docstring": "Create KMS Master Key for encryption/decryption of sensitive values in cf templates and latebind configs\nArgs:\nrole_name: name of the role that kms key is being created for; it will be given decrypt privileges.\nservice_type: service registry service type: 'aws_ec2', 'aws_fixture', 'aws_lambda', or 'http_service'", "source": "juraj-google-style"}
{"code": "def annotated(func, name=None):\n    \n\n    if hasattr(func, 'metadata'):\n        if name is not None:\n            func.metadata = AnnotatedMetadata(func, name)\n        return func\n\n    func.metadata = AnnotatedMetadata(func, name)\n\n    func.finalizer = False\n    func.takes_cmdline = False\n    func.decorated = False\n    func.context = False\n\n    return func", "docstring": "Mark a function as callable from the command line.\n\nThis function is meant to be called as decorator.  This function\nalso initializes metadata about the function's arguments that is\nbuilt up by the param decorator.\n\nArgs:\nfunc (callable): The function that we wish to mark as callable\nfrom the command line.\nname (str): Optional string that will override the function's\nbuilt-in name.", "source": "juraj-google-style"}
{"code": "def create_cloudtrail(self, region):\n    ct = self.session.client('cloudtrail', region_name=region)\n    self.create_sns_topic(region)\n    ct.create_trail(Name=self.trail_name, S3BucketName=self.bucket_name, S3KeyPrefix=self.account.account_name, IsMultiRegionTrail=True, IncludeGlobalServiceEvents=True, SnsTopicName=self.topic_name)\n    self.subscribe_sns_topic_to_sqs(region)\n    auditlog(event='cloudtrail.create_cloudtrail', actor=self.ns, data={'account': self.account.account_name, 'region': region})\n    self.log.info('Created CloudTrail for {} in {} ({})'.format(self.account, region, self.bucket_name))", "docstring": "Creates a new CloudTrail Trail\n\nArgs:\nregion (str): Name of the AWS region\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def update_compliance_all(self, information, timeout=-1):\n        \n\n        uri = self.URI + \"/compliance\"\n        result = self._helper.update(information, uri, timeout=timeout)\n\n        return result", "docstring": "Returns SAS Logical Interconnects to a consistent state. The current SAS Logical Interconnect state is\ncompared to the associated SAS Logical Interconnect group.\n\nArgs:\ninformation: Can be either the resource ID or URI.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: SAS Logical Interconnect.", "source": "juraj-google-style"}
{"code": "def _grad_fn(ys, xs, args, func_graph):\n    grad_ys = args[3:]\n    grad_outs = gradients_util._GradientsHelper(ys, xs, grad_ys=grad_ys, src_graph=func_graph, unconnected_gradients='zero')\n    assert all((g is not None for g in grad_outs))\n    counter = args[0]\n    maximum_iterations = args[1]\n    total_iters = args[2]\n    return [counter + 1, maximum_iterations, total_iters] + grad_outs", "docstring": "Computes the gradient of `func_graph` in the current graph.\n\nThis function builds the gradient graph of the corresponding forward-pass\n`func_graph` by differentiating `func_graph`'s outputs w.r.t. its inputs.\n\nArgs:\nys: A `Tensor` or list of tensors to be differentiated.\nxs: A `Tensor` or list of tensors to be used for differentiation.\nargs: The input arguments.\nargs[0] - Loop counter\nargs[1] - Total number of iterations.\nargs[2] - maximum_iterations.\nargs[3:] - Incoming gradients for `ys`.\nfunc_graph: function.FuncGraph. The corresponding forward-pass function.\n\nReturns:\nThe output gradient Tensors.", "source": "github-repos"}
{"code": "def _parse_networks(service_list: dict) -> list:\n    networks = []\n    for n_values in service_list['networks'].values():\n        for (n_key, n_value) in n_values.items():\n            if ('name' in n_key):\n                networks.append(n_value)\n    return networks", "docstring": "Parse network key.\n\nArgs:\nservice_list (dict): Service configurations\n\nReturns:\nlist, List of networks", "source": "codesearchnet"}
{"code": "def timeseries_from_mat(filename, varname=None, fs=1.0):\n    \n    import scipy.io as sio\n    if varname is None:\n        mat_dict = sio.loadmat(filename)\n        if len(mat_dict) > 1:\n            raise ValueError('Must specify varname: file contains '\n                             'more than one variable. ')\n    else:\n        mat_dict = sio.loadmat(filename, variable_names=(varname,))\n        array = mat_dict.popitem()[1]\n    return Timeseries(array, fs=fs)", "docstring": "load a multi-channel Timeseries from a MATLAB .mat file\n\nArgs:\nfilename (str): .mat file to load\nvarname (str): variable name. only needed if there is more than one\nvariable saved in the .mat file\nfs (scalar): sample rate of timeseries in Hz. (constant timestep assumed)\n\nReturns:\nTimeseries", "source": "juraj-google-style"}
{"code": "def create_view(self, state_root_hash=None):\n    if (state_root_hash is None):\n        state_root_hash = INIT_ROOT_KEY\n    merkle_db = MerkleDatabase(self._database, merkle_root=state_root_hash)\n    return StateView(merkle_db)", "docstring": "Creates a StateView for the given state root hash.\n\nArgs:\nstate_root_hash (str): The state root hash of the state view\nto return.  If None, returns the state view for the\nReturns:\nStateView: state view locked to the given root hash.", "source": "codesearchnet"}
{"code": "def module_name_from_path(folder_name, verbose=False):\n    folder_name = folder_name.split('.pyc')[0]\n    folder_name = folder_name.split('.py')[0]\n    folder_name = os.path.normpath(folder_name)\n    path = (folder_name + '/')\n    package = get_python_package(path)\n    module = []\n    if verbose:\n        print(('folder_name', folder_name))\n    while True:\n        path = os.path.dirname(path)\n        module.append(os.path.basename(path))\n        if (os.path.basename(path) == package):\n            path = os.path.dirname(path)\n            break\n        if (os.path.dirname(path) == path):\n            (path, module) = (None, None)\n            break\n        if verbose:\n            print(('path', path, os.path.dirname(path)))\n        if verbose:\n            print(('module', module))\n    if verbose:\n        print(('module', module))\n    module.reverse()\n    module = '.'.join(module)\n    return (module, path)", "docstring": "takes in a path to a folder or file and return the module path and the path to the module\n\nthe module is idenitified by\nthe path being in os.path, e.g. if /Users/Projects/Python/ is in os.path,\nthen folder_name = '/Users/PycharmProjects/pylabcontrol/pylabcontrol/scripts/script_dummy.pyc'\nreturns '/Users/PycharmProjects/' as the path and pylabcontrol.scripts.script_dummy as the module\n\nArgs:\nfolder_name: path to a file of the form\n'/Users/PycharmProjects/pylabcontrol/pylabcontrol/scripts/script_dummy.pyc'\n\nReturns:\nmodule: a string of the form, e.g. pylabcontrol.scripts.script_dummy ...\npath: a string with the path to the module, e.g. /Users/PycharmProjects/", "source": "codesearchnet"}
{"code": "def patch_traces(self, traces, project_id=None):\n        \n        if project_id is None:\n            project_id = self.project\n\n        self.trace_api.patch_traces(project_id=project_id, traces=traces)", "docstring": "Sends new traces to Stackdriver Trace or updates existing traces.\n\nArgs:\ntraces (dict): Required. The traces to be patched in the API call.\n\nproject_id (Optional[str]): ID of the Cloud project where the trace\ndata is stored.", "source": "juraj-google-style"}
{"code": "def mkdirs(self, path):\n    pass", "docstring": "Recursively create directories for the provided path.\n\nArgs:\npath: string path of the directory structure that should be created\n\nRaises:\nIOError: if leaf directory already exists.", "source": "github-repos"}
{"code": "def dbmax_stddev(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `dbmax_stddev`'.format(value))\n    self._dbmax_stddev = value", "docstring": "Corresponds to IDD Field `dbmax_stddev`\nStandard deviation of extreme annual maximum dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmax_stddev`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def get_unparsed_moves_from_last_n_games(games, games_nr, n, moves=(2 ** 21), shuffle=True, column_family=TFEXAMPLE, column='example', values_only=True):\n    (ct_r, ct_nr) = (9, 1)\n    ct_total = (ct_r + ct_nr)\n    fr_r = (ct_r / ct_total)\n    fr_nr = (ct_nr / ct_total)\n    resign = games.moves_from_last_n_games(math.ceil((n * fr_r)), math.ceil((moves * fr_r)), shuffle, column_family, column)\n    no_resign = games_nr.moves_from_last_n_games(math.floor((n * fr_nr)), math.floor((moves * fr_nr)), shuffle, column_family, column)\n    selection = np.array((([0] * ct_r) + ([1] * ct_nr)), dtype=np.int64)\n    choice = tf.data.Dataset.from_tensor_slices(selection).repeat().take(moves)\n    ds = tf.contrib.data.choose_from_datasets([resign, no_resign], choice)\n    if shuffle:\n        ds = ds.shuffle((len(selection) * 2))\n    if values_only:\n        ds = ds.map((lambda row_name, s: s))\n    return ds", "docstring": "Get a dataset of serialized TFExamples from the last N games.\n\nArgs:\ngames, games_nr: GameQueues of the regular selfplay and calibration\n(aka 'no resign') games to sample from.\nn:  an integer indicating how many past games should be sourced.\nmoves:  an integer indicating how many moves should be sampled\nfrom those N games.\ncolumn_family:  name of the column family containing move examples.\ncolumn:  name of the column containing move examples.\nshuffle:  if True, shuffle the selected move examples.\nvalues_only: if True, return only column values, no row keys.\n\nReturns:\nA dataset containing no more than `moves` examples, sampled\nrandomly from the last `n` games in the table.", "source": "codesearchnet"}
{"code": "def write_merged_bioassembly(inpath, outdir, outname, force_rerun=False):\n    outpath = outfile = op.join(outdir, (outname + '.pdb'))\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=op.join(outdir, (outname + '.pdb'))):\n        s = StructProp('Model merging', structure_path=inpath, file_type='pdb')\n        ss = s.parse_structure()\n        merge_all_models_into_first_model(ss.structure)\n        outpath = ss.write_pdb(custom_name=outname, out_dir=outdir, force_rerun=force_rerun)\n    else:\n        return outpath", "docstring": "Utility to take as input a bioassembly file and merge all its models into multiple chains in a single model.\n\nArgs:\ninfile (str): Path to input PDB file with multiple models that represent an oligomeric form of a structure.\noutdir (str): Path to output directory\noutname (str): New filename of structure file\nforce_rerun (bool): If a new PDB should be written if the file exists\n\nReturns:\nstr: Path to newly written PDB file.", "source": "codesearchnet"}
{"code": "def get(self, param=None, must=[APIKEY]):\n        \n        param = {} if param is None else param\n        r = self.verify_param(param, must)\n        if not r.is_succ():\n            return r\n        handle = CommonResultHandler(lambda rsp: {VERSION_V1:rsp.get(USER), VERSION_V2:rsp}[self.version()])\n        return self.path('get.json').post(param, handle, r)", "docstring": "查账户信息\n\n参数名 类型 是否必须 描述 示例\napikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526\n\nArgs:\nparam: (Optional)\nResults:\nResult", "source": "juraj-google-style"}
{"code": "def _compute_dtype(self):\n    return self._dtype_policy.compute_dtype", "docstring": "The layer's compute dtype.\n\nUnless mixed-precision is used, this is the same as `Layer.dtype`.\n\nIf self._autocast is True, layer's will cast floating-point inputs to this.\n\nReturns:\nThe layer's compute dtype.", "source": "github-repos"}
{"code": "def get_nanopub_urls(ns_root_url: str=None, start_dt: str=None) -> dict:\n    if (not ns_root_url):\n        ns_root_url = config['bel_api']['servers']['nanopubstore']\n    url = f'{ns_root_url}/nanopubs/timed'\n    if (not start_dt):\n        start_dt = get_nanopubstore_start_dt(ns_root_url)\n    params = {'startTime': start_dt, 'published': True}\n    r = bel.utils.get_url(url, params=params, cache=False)\n    if (r and (r.status_code == 200)):\n        data = r.json()\n        new_start_dt = data['queryTime']\n        update_nanopubstore_start_dt(ns_root_url, new_start_dt)\n        nanopub_urls = {'modified': [], 'deleted': []}\n        for nid in data['deleteddata']:\n            nanopub_urls['deleted'].append(f'{ns_root_url}/nanopubs/{nid}')\n        for nid in data['data']:\n            nanopub_urls['modified'].append(f'{ns_root_url}/nanopubs/{nid}')\n        return nanopub_urls\n    else:\n        log.error(f'Bad request to Nanopubstore', url=url, status=r.status_code, type='api_request')\n        return {}", "docstring": "Get modified and deleted nanopub urls\n\nLimited by last datetime retrieved (start_dt).  Modified includes new and updated nanopubs\n\nReturns:\ndict: {'modified': [], 'deleted': []}", "source": "codesearchnet"}
{"code": "def _reduce_sum_grad(op, grad):\n    if op.get_attr('reduction') != b'sum':\n        raise LookupError('No gradient defined for NcclAllReduce except for reduction=\"sum\".')\n    _check_device(grad, expected=op.device)\n    with ops.device(op.device):\n        result = gen_nccl_ops.nccl_broadcast(input=grad, shape=grad.shape)\n    return [result] * len(op.inputs)", "docstring": "The gradients for input `Operation` of `reduce_sum`.\n\nArgs:\nop: The `sum send` `Operation` that we are differentiating.\ngrad: Gradient with respect to the output of the `reduce_sum` op.\n\nReturns:\nThe gradient with respect to the input of `reduce_sum` op.\n\nRaises:\nLookupError: If the reduction attribute of op is not `sum`.", "source": "github-repos"}
{"code": "def enable_imu_streaming(self, enabled_imus, enabled_sensors=SENSOR_ALL):\n    imus_enabled = 0\n    for imu in enabled_imus:\n        imus_enabled |= (1 << imu)\n    if (enabled_sensors == 0):\n        logger.warn('Not enabling IMUs, no sensors enabled!')\n        return False\n    if (not self.dongle._enable_imu_streaming(self, imus_enabled, enabled_sensors)):\n        logger.warn('Failed to enable IMU streaming (imus_enabled={}, enabled_sensors={}'.format(imus_enabled, enabled_sensors))\n        return False\n    self.enabled_imus = enabled_imus\n    self.enabled_sensors = enabled_sensors\n    return True", "docstring": "Configures and enables IMU sensor data streaming.\n\nNOTE: only one streaming mode can be active at any time, so e.g. if you\nwant to stream IMU data, you must disable SK8-ExtAna streaming first.\n\nArgs:\nenabled_imus (list): a list of distinct ints in the range `0`-`4`\ninclusive identifying the IMU. `0` is the SK8 itself, and\n`1`-`4` are the subsidiary IMUs on the USB chain, starting\nfrom the end closest to the SK8.\nenabled_sensors (int): to save battery, you can choose to enable some\nor all of the sensors on each enabled IMU. By default, the\naccelerometer, magnetometer, and gyroscope are all enabled. Pass\na bitwise OR of one or more of :const:`SENSOR_ACC`,\n:const:`SENSOR_MAG`, and :const:`SENSOR_GYRO` to gain finer\ncontrol over the active sensors.\n\nReturns:\nbool. True if successful, False if an error occurred.", "source": "codesearchnet"}
{"code": "def Begin(self, function_name):\n    self.in_a_function = True\n    self.lines_in_function = 0\n    self.current_function = function_name", "docstring": "Start analyzing function body.\n\nArgs:\nfunction_name: The name of the function being tracked.", "source": "codesearchnet"}
{"code": "def input(self):\n    if not self._inbound_nodes:\n        raise AttributeError('Layer ' + self.name + ' is not connected, no input to return.')\n    return self._get_node_attribute_at_index(0, 'input_tensors', 'input')", "docstring": "Retrieves the input tensor(s) of a layer.\n\nOnly applicable if the layer has exactly one input,\ni.e. if it is connected to one incoming layer.\n\nReturns:\nInput tensor or list of input tensors.\n\nRaises:\nRuntimeError: If called in Eager mode.\nAttributeError: If no inbound nodes are found.", "source": "github-repos"}
{"code": "def get_i_name(self, num, is_oai=None):\n    if (num not in (1, 2)):\n        raise ValueError('`num` parameter have to be 1 or 2!')\n    if (is_oai is None):\n        is_oai = self.oai_marc\n    i_name = ('ind' if (not is_oai) else 'i')\n    return (i_name + str(num))", "docstring": "This method is used mainly internally, but it can be handy if you work\nwith with raw MARC XML object and not using getters.\n\nArgs:\nnum (int): Which indicator you need (1/2).\nis_oai (bool/None): If None, :attr:`.oai_marc` is\nused.\n\nReturns:\nstr: current name of ``i1``/``ind1`` parameter based on \\\n:attr:`oai_marc` property.", "source": "codesearchnet"}
{"code": "async def get_in_tree_template(link):\n    context = link.context\n    source_url = get_source_url(link)\n    if (not source_url.endswith(('.yml', '.yaml'))):\n        raise CoTError(\"{} source url {} doesn't end in .yml or .yaml!\".format(link.name, source_url))\n    tmpl = (await load_json_or_yaml_from_url(context, source_url, os.path.join(context.config['work_dir'], '{}_taskcluster.yml'.format(link.name))))\n    return tmpl", "docstring": "Get the in-tree json-e template for a given link.\n\nBy convention, this template is SOURCE_REPO/.taskcluster.yml.\n\nArgs:\nlink (LinkOfTrust): the parent link to get the source url from.\n\nRaises:\nCoTError: on non-yaml `source_url`\nKeyError: on non-well-formed source template\n\nReturns:\ndict: the first task in the template.", "source": "codesearchnet"}
{"code": "def assert_shape_match(shape1, shape2):\n    shape1 = tf.TensorShape(shape1)\n    shape2 = tf.TensorShape(shape2)\n    if ((shape1.ndims is None) or (shape2.ndims is None)):\n        raise ValueError(('Shapes must have known rank. Got %s and %s.' % (shape1.ndims, shape2.ndims)))\n    shape1.assert_same_rank(shape2)\n    shape1.assert_is_compatible_with(shape2)", "docstring": "Ensure the shape1 match the pattern given by shape2.\n\nEx:\nassert_shape_match((64, 64, 3), (None, None, 3))\n\nArgs:\nshape1 (tuple): Static shape\nshape2 (tuple): Dynamic shape (can contain None)", "source": "codesearchnet"}
{"code": "def plot_densities(self, ax=None, **kwargs):\n        \n        ax, fig, plt = get_ax_fig_plt(ax)\n\n        ax.grid(True)\n        ax.set_xlabel('r [Bohr]')\n        \n\n        for i, den_name in enumerate([\"ae_core_density\", \"pseudo_core_density\"]):\n            rden = getattr(self, den_name)\n            label = \"$n_c$\" if i == 1 else r\"$\\tilde{n}_c$\"\n            ax.plot(rden.mesh, rden.mesh * rden.values, label=label, lw=2)\n\n        ax.legend(loc=\"best\")\n\n        return fig", "docstring": "Plot the PAW densities.\n\nArgs:\nax: matplotlib :class:`Axes` or None if a new figure should be created.\n\nReturns:\n`matplotlib` figure", "source": "juraj-google-style"}
{"code": "def forward_pass(self, vector, layer_index, is_transpose=False, is_abs=False):\n    \n    if(layer_index < 0 or layer_index > self.num_hidden_layers):\n      raise ValueError('Invalid layer index')\n\n    layer_type = self.layer_types[layer_index]\n    weight = self.weights[layer_index]\n    if is_abs:\n      weight = tf.abs(weight)\n    if is_transpose:\n      vector = tf.reshape(vector, self.output_shapes[layer_index])\n    else:\n      vector = tf.reshape(vector, self.input_shapes[layer_index])\n\n    if layer_type in {'ff', 'ff_relu'}:\n      if is_transpose:\n        weight = tf.transpose(weight)\n      return_vector = tf.matmul(weight, vector)\n    elif layer_type in {'conv', 'conv_relu'}:\n      if is_transpose:\n        return_vector = tf.nn.conv2d_transpose(vector,\n                                               weight,\n                                               output_shape=self.input_shapes[layer_index],\n                                               strides=[1, self.cnn_params[layer_index]['stride'],\n                                                        self.cnn_params[layer_index]['stride'], 1],\n                                               padding=self.cnn_params[layer_index]['padding'])\n      else:\n        return_vector = tf.nn.conv2d(vector,\n                                     weight,\n                                     strides=[1, self.cnn_params[layer_index]['stride'],\n                                              self.cnn_params[layer_index]['stride'], 1],\n                                     padding=self.cnn_params[layer_index]['padding'])\n    else:\n      raise NotImplementedError('Unsupported layer type: {0}'.format(layer_type))\n    if is_transpose:\n      return tf.reshape(return_vector, (self.sizes[layer_index], 1))\n    return tf.reshape(return_vector, (self.sizes[layer_index + 1], 1))", "docstring": "Performs forward pass through the layer weights at layer_index.\n\nArgs:\nvector: vector that has to be passed through in forward pass\nlayer_index: index of the layer\nis_transpose: whether the weights of the layer have to be transposed\nis_abs: whether to take the absolute value of the weights\n\nReturns:\ntensor that corresponds to the forward pass through the layer\nRaises:\nValueError: if the layer_index is negative or more than num hidden layers", "source": "juraj-google-style"}
{"code": "def compress_artifact_if_supported(artifact_path):\n    (content_type, encoding) = guess_content_type_and_encoding(artifact_path)\n    log.debug('\"{}\" is encoded with \"{}\" and has mime/type \"{}\"'.format(artifact_path, encoding, content_type))\n    if ((encoding is None) and (content_type in _GZIP_SUPPORTED_CONTENT_TYPE)):\n        log.info('\"{}\" can be gzip\\'d. Compressing...'.format(artifact_path))\n        with open(artifact_path, 'rb') as f_in:\n            text_content = f_in.read()\n        with gzip.open(artifact_path, 'wb') as f_out:\n            f_out.write(text_content)\n        encoding = 'gzip'\n        log.info('\"{}\" compressed'.format(artifact_path))\n    else:\n        log.debug('\"{}\" is not supported for compression.'.format(artifact_path))\n    return (content_type, encoding)", "docstring": "Compress artifacts with GZip if they're known to be supported.\n\nThis replaces the artifact given by a gzip binary.\n\nArgs:\nartifact_path (str): the path to compress\n\nReturns:\ncontent_type, content_encoding (tuple):  Type and encoding of the file. Encoding equals 'gzip' if compressed.", "source": "codesearchnet"}
{"code": "def _version_from_file(path_to_version, default_version=DEFAULT_VERSION):\n    version_filepath = os.path.join(path_to_version, 'version.txt')\n    if (not os.path.isfile(version_filepath)):\n        warnings.warn('Unable to resolve current version', exceptions.ProsperDefaultVersionWarning)\n        return default_version\n    with open(version_filepath, 'r') as v_fh:\n        data = v_fh.read()\n    return data", "docstring": "for PyPI installed versions, just get data from file\n\nArgs:\npath_to_version (str): abspath to dir where version.txt exists\ndefault_version (str): fallback version in case of error\n\nReturns:\nstr: current working version", "source": "codesearchnet"}
{"code": "def DeregisterPathSpec(cls, path_spec_type):\n    type_indicator = path_spec_type.TYPE_INDICATOR\n    if (type_indicator not in cls._path_spec_types):\n        raise KeyError('Path specification type: {0:s} not set.'.format(type_indicator))\n    del cls._path_spec_types[type_indicator]\n    if (type_indicator in cls._system_level_type_indicators):\n        del cls._system_level_type_indicators[type_indicator]", "docstring": "Deregisters a path specification.\n\nArgs:\npath_spec_type (type): path specification type.\n\nRaises:\nKeyError: if path specification is not registered.", "source": "codesearchnet"}
{"code": "def __init__(self, macs=[], bt_device=''):\n        \n\n        self._run_flag = RunFlag()\n        self._subjects = []\n\n        m = Manager()\n        q = m.Queue()\n\n        \n        self._shared_data = m.dict()\n        self._shared_data['run_flag'] = True\n\n        \n        notify_thread = Thread(target=RuuviTagReactive._data_update, args=(self._subjects, q, self._run_flag))\n        notify_thread.start()\n\n        \n        executor = ProcessPoolExecutor(1)\n        executor.submit(_run_get_data_background, macs, q, self._shared_data, bt_device)", "docstring": "Start background process for get_datas and async task for notifying all subscribed observers\n\nArgs:\nmacs (list): MAC addresses\nbt_device (string): Bluetooth device id", "source": "juraj-google-style"}
{"code": "def member_create(self, params, member_id):\n        \n        member_config = params.get('rsParams', {})\n        server_id = params.pop('server_id', None)\n        version = params.pop('version', self._version)\n        proc_params = {'replSet': self.repl_id}\n        proc_params.update(params.get('procParams', {}))\n        if self.enable_ipv6:\n            enable_ipv6_single(proc_params)\n        \n        proc_params = self._strip_auth(proc_params)\n\n        \n        server_id = self._servers.create(\n            name='mongod',\n            procParams=proc_params,\n            sslParams=self.sslParams,\n            version=version,\n            server_id=server_id\n        )\n        member_config.update({\"_id\": member_id,\n                              \"host\": self._servers.hostname(server_id)})\n        return member_config", "docstring": "start new mongod instances as part of replica set\nArgs:\nparams - member params\nmember_id - member index\n\nreturn member config", "source": "juraj-google-style"}
{"code": "def __init__(self, words=None):\n    \n\n    words = self.sanitize_words(words)\n    self.word_id = {w:i for i, w in enumerate(words)}\n    self.id_word = {i:w for w,i in iteritems(self.word_id)}", "docstring": "Build attributes word_id and id_word from input.\n\nArgs:\nwords (list): list of sorted words according to frequency.", "source": "juraj-google-style"}
{"code": "def cast_to_common_dtype(tensors):\n    highest_float = None\n    highest_float_size = -1\n    for x in tensors:\n        dtype = backend.standardize_dtype(x.dtype)\n        if is_float(dtype):\n            if highest_float is None or dtype_size(dtype) > highest_float_size:\n                highest_float = dtype\n                highest_float_size = dtype_size(dtype)\n            elif dtype == 'float16' and highest_float == 'bfloat16':\n                highest_float = 'float32'\n                highest_float_size = dtype_size(highest_float)\n    if highest_float:\n        tensors = [ops.cast(x, highest_float) for x in tensors]\n    return tensors", "docstring": "Cast a list of tensors to a common dtype.\n\nIf any tensor is floating-point, they will all be casted to the most-precise\nfloating-point dtype. Otherwise the tensors are not casted.\n\nArgs:\ntensors: A list of tensors.\n\nReturns:\nSame list, casted to a common dtype.", "source": "github-repos"}
{"code": "def create_game(self, map_name, bot_difficulty=sc_pb.VeryEasy, bot_race=sc_common.Random, bot_first=False):\n    self._controller.ping()\n    map_inst = maps.get(map_name)\n    map_data = map_inst.data(self._run_config)\n    if (map_name not in self._saved_maps):\n        self._controller.save_map(map_inst.path, map_data)\n        self._saved_maps.add(map_name)\n    create = sc_pb.RequestCreateGame(local_map=sc_pb.LocalMap(map_path=map_inst.path, map_data=map_data), disable_fog=False)\n    if (not bot_first):\n        create.player_setup.add(type=sc_pb.Participant)\n    create.player_setup.add(type=sc_pb.Computer, race=bot_race, difficulty=bot_difficulty)\n    if bot_first:\n        create.player_setup.add(type=sc_pb.Participant)\n    self._controller.create_game(create)", "docstring": "Create a game, one remote agent vs the specified bot.\n\nArgs:\nmap_name: The map to use.\nbot_difficulty: The difficulty of the bot to play against.\nbot_race: The race for the bot.\nbot_first: Whether the bot should be player 1 (else is player 2).", "source": "codesearchnet"}
{"code": "def _commit_change(alias_table, export_path=None, post_commit=True):\n    \n    with open(export_path or GLOBAL_ALIAS_PATH, 'w+') as alias_config_file:\n        alias_table.write(alias_config_file)\n        if post_commit:\n            alias_config_file.seek(0)\n            alias_config_hash = hashlib.sha1(alias_config_file.read().encode('utf-8')).hexdigest()\n            AliasManager.write_alias_config_hash(alias_config_hash)\n            collided_alias = AliasManager.build_collision_table(alias_table.sections())\n            AliasManager.write_collided_alias(collided_alias)\n            build_tab_completion_table(alias_table)", "docstring": "Record changes to the alias table.\nAlso write new alias config hash and collided alias, if any.\n\nArgs:\nalias_table: The alias table to commit.\nexport_path: The path to export the aliases to. Default: GLOBAL_ALIAS_PATH.\npost_commit: True if we want to perform some extra actions after writing alias to file.", "source": "juraj-google-style"}
{"code": "def SetIndexName(self, index_name):\n    \n    self._index_name = index_name\n    logger.debug('Elasticsearch index name: {0:s}'.format(index_name))", "docstring": "Set the index name.\n\nArgs:\nindex_name (str): name of the index.", "source": "juraj-google-style"}
{"code": "def value(self):\n    return self._snapshot", "docstring": "Returns the last snapshot of this variable.\n\nYou usually do not need to call this method as all ops that need the value\nof the variable call it automatically through a `convert_to_tensor()` call.\n\nReturns a `Tensor` which holds the value of the variable.  You can not\nassign a new value to this tensor as it is not a reference to the variable.\n\nTo avoid copies, if the consumer of the returned value is on the same device\nas the variable, this actually returns the live value of the variable, not\na copy.  Updates to the variable are seen by the consumer.  If the consumer\nis on a different device it will get a copy of the variable.\n\nReturns:\nA `Tensor` containing the value of the variable.", "source": "github-repos"}
{"code": "def __init__(self, cache_address):\n    \n    super(CacheAddress, self).__init__()\n    self.block_number = None\n    self.block_offset = None\n    self.block_size = None\n    self.filename = None\n    self.value = cache_address\n\n    if cache_address & 0x80000000:\n      self.is_initialized = 'True'\n    else:\n      self.is_initialized = 'False'\n\n    self.file_type = (cache_address & 0x70000000) >> 28\n    if not cache_address == 0x00000000:\n      if self.file_type == self.FILE_TYPE_SEPARATE:\n        file_selector = cache_address & 0x0fffffff\n        self.filename = 'f_{0:06x}'.format(file_selector)\n\n      elif self.file_type in self._BLOCK_DATA_FILE_TYPES:\n        file_selector = (cache_address & 0x00ff0000) >> 16\n        self.filename = 'data_{0:d}'.format(file_selector)\n\n        file_block_size = self._FILE_TYPE_BLOCK_SIZES[self.file_type]\n        self.block_number = cache_address & 0x0000ffff\n        self.block_size = (cache_address & 0x03000000) >> 24\n        self.block_size *= file_block_size\n        self.block_offset = 8192 + (self.block_number * file_block_size)", "docstring": "Initializes a cache address.\n\nArgs:\ncache_address (int): cache address.", "source": "juraj-google-style"}
{"code": "def VerifyStructure(self, parser_mediator, line):\n    \n    self._last_month = 0\n    self._year_use = parser_mediator.GetEstimatedYear()\n\n    try:\n      structure = self.SECURITYD_LINE.parseString(line)\n    except pyparsing.ParseException:\n      logger.debug('Not a MacOS securityd log file')\n      return False\n\n    time_elements_tuple = self._GetTimeElementsTuple(structure)\n\n    try:\n      dfdatetime_time_elements.TimeElements(\n          time_elements_tuple=time_elements_tuple)\n    except ValueError:\n      logger.debug(\n          'Not a MacOS securityd log file, invalid date and time: {0!s}'.format(\n              structure.date_time))\n      return False\n\n    self._last_month = time_elements_tuple[1]\n\n    return True", "docstring": "Verify that this file is a securityd log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nline (str): line from a text file.\n\nReturns:\nbool: True if the line is in the expected format, False if not.", "source": "juraj-google-style"}
{"code": "def get_next_as_list(self, name=None):\n    del name\n    with ops.device(self._worker):\n        return self._format_data_list_with_options(self._iterator.get_next())", "docstring": "Get next element from the underlying iterator.\n\nRuns the iterator get_next() within a device scope. Since this doesn't use\nget_next_as_optional(), it is considerably faster than get_next_as_list(),\nbut it raises EOFError if any of the device doesn't get any data.\n\nArgs:\nname: not used.\n\nReturns:\nA list consisting of the next data from each device.", "source": "github-repos"}
{"code": "def delete_media_service_rg(access_token, subscription_id, rgname, msname):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', rgname,\n                        '/providers/microsoft.media/mediaservices/', msname,\n                        '?api-version=', MEDIA_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete a media service.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\nmsname (str): Media service name.\n\nReturns:\nHTTP response.", "source": "juraj-google-style"}
{"code": "def _format_parameter_error_message(name: str, sig: Signature, num_params: int) -> str:\n    if (num_params == 0):\n        plural = 's'\n        missing = 2\n        arguments = \"'slack' and 'event'\"\n    else:\n        plural = ''\n        missing = 1\n        arguments = \"'event'\"\n    return f'{name}{sig} missing {missing} required positional argument{plural}: {arguments}'", "docstring": "Format an error message for missing positional arguments.\n\nArgs:\nname: The function name.\nsig: The function's signature.\nnum_params: The number of function parameters.\n\nReturns:\nstr: A formatted error message.", "source": "codesearchnet"}
{"code": "def UpdateLease(self, duration):\n    if (not self.locked):\n        raise LockError(('Object must be locked to update the lease: %s.' % self.urn))\n    if (self.CheckLease() == 0):\n        self._RaiseLockError('UpdateLease')\n    self.transaction.UpdateLease(duration)", "docstring": "Updates the lease and flushes the object.\n\nThe lease is set to expire after the \"duration\" time from the present\nmoment.\nThis method is supposed to be used when operation that requires locking\nmay run for a time that exceeds the lease time specified in OpenWithLock().\nSee flows/hunts locking for an example.\n\nArgs:\nduration: Integer number of seconds. Lease expiry time will be set to\n\"time.time() + duration\".\n\nRaises:\nLockError: if the object is not currently locked or the lease has\nexpired.", "source": "codesearchnet"}
{"code": "def get_timing_signal(length, min_timescale=1, max_timescale=10000.0, num_timescales=16):\n    positions = to_float(tf.range(length))\n    log_timescale_increment = (math.log((max_timescale / min_timescale)) / (num_timescales - 1))\n    inv_timescales = (min_timescale * tf.exp((to_float(tf.range(num_timescales)) * (- log_timescale_increment))))\n    scaled_time = (tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0))\n    return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)", "docstring": "Create Tensor of sinusoids of different frequencies.\n\nArgs:\nlength: Length of the Tensor to create, i.e. Number of steps.\nmin_timescale: a float\nmax_timescale: a float\nnum_timescales: an int\n\nReturns:\nTensor of shape (length, 2*num_timescales)", "source": "codesearchnet"}
{"code": "def relevant_connections(n, _from, to):\n    \n    cm = np.zeros((n, n))\n\n    \n    \n    if not _from or not to:\n        return cm\n\n    cm[np.ix_(_from, to)] = 1\n    return cm", "docstring": "Construct a connectivity matrix.\n\nArgs:\nn (int): The dimensions of the matrix\n_from (tuple[int]): Nodes with outgoing connections to ``to``\nto (tuple[int]): Nodes with incoming connections from ``_from``\n\nReturns:\nnp.ndarray: An |n x n| connectivity matrix with the |i,jth| entry is\n``1`` if |i| is in ``_from`` and |j| is in ``to``, and 0 otherwise.", "source": "juraj-google-style"}
{"code": "def add_virtual_loss(self, up_to):\n    self.losses_applied += 1\n    loss = self.position.to_play\n    self.W += loss\n    if ((self.parent is None) or (self is up_to)):\n        return\n    self.parent.add_virtual_loss(up_to)", "docstring": "Propagate a virtual loss up to the root node.\n\nArgs:\nup_to: The node to propagate until. (Keep track of this! You'll\nneed it to reverse the virtual loss later.)", "source": "codesearchnet"}
{"code": "def calc_stats(prices):\n    \n    if isinstance(prices, pd.Series):\n        return PerformanceStats(prices)\n    elif isinstance(prices, pd.DataFrame):\n        return GroupStats(*[prices[x] for x in prices.columns])\n    else:\n        raise NotImplementedError('Unsupported type')", "docstring": "Calculates performance stats of a given object.\n\nIf object is Series, a PerformanceStats object is\nreturned. If object is DataFrame, a GroupStats object\nis returned.\n\nArgs:\n* prices (Series, DataFrame): Set of prices", "source": "juraj-google-style"}
{"code": "def _project_THn(self, hist: Hist) -> Any:\n    projection_axes = [axis.axis_type.value for axis in self.projection_axes]\n    if (len(projection_axes) == 2):\n        projection_axes.reverse()\n    args = (projection_axes + ['E'])\n    logger.debug(f'hist: {hist.GetName()} args: {args}')\n    if (len(projection_axes) > 3):\n        projected_hist = hist.ProjectionND(*args)\n    else:\n        projected_hist = hist.Projection(*args)\n    return projected_hist", "docstring": "Perform the actual THn -> THn or TH1 projection.\n\nThis projection could be to 1D, 2D, 3D, or ND.\n\nArgs:\nhist (ROOT.THnBase): Histogram from which the projections should be performed.\nReturns:\nROOT.THnBase or ROOT.TH1: The projected histogram.", "source": "codesearchnet"}
{"code": "def lineReceived(self, line):\n    while self._in_header:\n        if line:\n            self._headers.append(line)\n        else:\n            (http, status, message) = self._headers[0].split(' ', 2)\n            status = int(status)\n            if (status == 200):\n                self.factory.get_stream().connected()\n            else:\n                self.factory.continueTrying = 0\n                self.transport.loseConnection()\n                self.factory.get_stream().disconnected(RuntimeError(status, message))\n                return\n            self._in_header = False\n        break\n    else:\n        try:\n            self._len_expected = int(line, 16)\n            self.setRawMode()\n        except:\n            pass", "docstring": "Callback issued by twisted when new line arrives.\n\nArgs:\nline (str): Incoming line", "source": "codesearchnet"}
{"code": "def depth_may_average_ground_temperature(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `depth_may_average_ground_temperature`'.format(value))\n    self._depth_may_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_may_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_may_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor):\n    ln_outputs = self.layernorm_before_ffn(hidden_states)\n    outputs = self.ffn(ln_outputs)\n    if self.dropout is not None:\n        outputs = self.dropout(outputs)\n    hidden_states = hidden_states + outputs\n    return hidden_states", "docstring": "Args:\nhidden_states (`torch.Tensor` of shape `(batch, len_seq, dim_model)`):\nHidden states before feed forward layer.", "source": "github-repos"}
{"code": "def get_filtered_normalized_events(self):\n    user_image = google_v2_operations.get_action_image(self._op, _ACTION_USER_COMMAND)\n    need_ok = google_v2_operations.is_success(self._op)\n    events = {}\n    for event in google_v2_operations.get_events(self._op):\n        if self._filter(event):\n            continue\n        (mapped, match) = self._map(event)\n        name = mapped['name']\n        if (name == 'ok'):\n            if ((not need_ok) or ('ok' in events)):\n                continue\n        if (name == 'pulling-image'):\n            if (match.group(1) != user_image):\n                continue\n        events[name] = mapped\n    return sorted(events.values(), key=operator.itemgetter('start-time'))", "docstring": "Filter the granular v2 events down to events of interest.\n\nFilter through the large number of granular events returned by the\npipelines API, and extract only those that are interesting to a user. This\nis implemented by filtering out events which are known to be uninteresting\n(i.e. the default actions run for every job) and by explicitly matching\nspecific events which are interesting and mapping those to v1 style naming.\n\nEvents which are not whitelisted or blacklisted will still be output,\nmeaning any events which are added in the future won't be masked.\nWe don't want to suppress display of events that we don't recognize.\nThey may be important.\n\nReturns:\nA list of maps containing the normalized, filtered events.", "source": "codesearchnet"}
{"code": "def tokens(self):\n    return self._tokens", "docstring": "Access the tokens contained within this line.\n\nThe caller must not modify the tokens list returned by this method.\n\nReturns:\nList of tokens in this line.", "source": "github-repos"}
{"code": "def info(self, show_defaults=False):\n        \n        pprinter = PrettyPrinter(show_options=True, show_defaults=show_defaults)\n        print(pprinter.pprint(self._obj))", "docstring": "Prints a repr of the object including any applied options.\n\nArgs:\nshow_defaults: Whether to include default options", "source": "juraj-google-style"}
{"code": "def get_all(self, seq_set: SequenceSet) -> Sequence[Tuple[(int, CachedMessage)]]:\n    if seq_set.uid:\n        all_uids = (seq_set.flatten(self.max_uid) & self._uids)\n        return [(seq, self._cache[uid]) for (seq, uid) in enumerate(self._sorted, 1) if (uid in all_uids)]\n    else:\n        all_seqs = seq_set.flatten(self.exists)\n        return [(seq, self._cache[uid]) for (seq, uid) in enumerate(self._sorted, 1) if (seq in all_seqs)]", "docstring": "Return the cached messages, and their sequence numbers, for the\ngiven sequence set.\n\nArgs:\nseq_set: The message sequence set.", "source": "codesearchnet"}
{"code": "def _yellowfin(self):\n    yellowfin_ops = []\n    curv_range_ops = self._curvature_range()\n    yellowfin_ops += curv_range_ops\n    grad_var_ops = self._grad_variance()\n    yellowfin_ops += grad_var_ops\n    dist_to_opt_ops = self._dist_to_opt()\n    yellowfin_ops += dist_to_opt_ops\n    self._mu = tf.identity(tf.cond(self._do_tune, self._get_mu_tensor, (lambda : self._mu_var)))\n    with tf.control_dependencies([self._mu]):\n        self._lr = tf.identity(tf.cond(self._do_tune, self._get_lr_tensor, (lambda : self._lr_var)))\n    with tf.control_dependencies([self._mu, self._lr]):\n        self._mu = ((self._beta * self._mu_var) + ((1 - self._beta) * self._mu))\n        self._lr = ((self._beta * self._lr_var) + ((1 - self._beta) * self._lr))\n        yellowfin_ops.append(tf.assign(self._mu_var, self._mu))\n        yellowfin_ops.append(tf.assign(self._lr_var, self._lr))\n    yellowfin_ops = tf.group(*yellowfin_ops)\n    return yellowfin_ops", "docstring": "YellowFin auto-tuning optimizer based on momentum SGD.\n\nReturns:\nYF ops\n(Curvature range,\nGrad_variance,\nDist_to_opt,\nSingle-Step,\nAuto-Tuning)", "source": "codesearchnet"}
{"code": "def __init__(self, app=None):\n        \n        self._key = None\n        self._endpoint_uri = None\n        self._channel = None\n        self._requests_middleware = None\n        self._trace_log_handler = None\n        self._exception_telemetry_client = None\n\n        if app:\n            self.init_app(app)", "docstring": "Initialize a new instance of the extension.\n\nArgs:\napp (flask.Flask). the Flask application for which to initialize the extension.", "source": "juraj-google-style"}
{"code": "def convert_slow_tokenizer(transformer_tokenizer, from_tiktoken=False) -> Tokenizer:\n    tokenizer_class_name = transformer_tokenizer.__class__.__name__\n    if tokenizer_class_name in SLOW_TO_FAST_CONVERTERS and (not from_tiktoken):\n        converter_class = SLOW_TO_FAST_CONVERTERS[tokenizer_class_name]\n        return converter_class(transformer_tokenizer).converted()\n    else:\n        try:\n            logger.info('Converting from Tiktoken')\n            return TikTokenConverter(vocab_file=transformer_tokenizer.vocab_file, additional_special_tokens=transformer_tokenizer.additional_special_tokens).converted()\n        except Exception:\n            raise ValueError(f'Converting from SentencePiece and Tiktoken failed, if a converter for SentencePiece is available, provide a model path with a SentencePiece tokenizer.model file.Currently available slow->fast converters: {list(SLOW_TO_FAST_CONVERTERS.keys())}')", "docstring": "Utilities to convert a slow tokenizer instance in a fast tokenizer instance.\n\nArgs:\ntransformer_tokenizer ([`~tokenization_utils_base.PreTrainedTokenizer`]):\nInstance of a slow tokenizer to convert in the backend tokenizer for\n[`~tokenization_utils_base.PreTrainedTokenizerFast`].\nfrom_tiktoken (bool, optional): Whether to use the `tiktoken` library to convert the tokenizer instead of sentencepiece.\nDefaults to False.\n\nReturn:\nA instance of [`~tokenizers.Tokenizer`] to be used as the backend tokenizer of a\n[`~tokenization_utils_base.PreTrainedTokenizerFast`]", "source": "github-repos"}
{"code": "def convert_predictions_to_image_summaries(hook_args):\n    decode_hparams = hook_args.decode_hparams\n    if (not decode_hparams.display_decoded_images):\n        return []\n    predictions = hook_args.predictions[0]\n    all_summaries = []\n    rand_predictions = np.random.choice(predictions, size=10)\n    for (ind, prediction) in enumerate(rand_predictions):\n        output_summary = image_to_tf_summary_value(prediction['outputs'], tag=('%d_output' % ind))\n        input_summary = image_to_tf_summary_value(prediction['inputs'], tag=('%d_input' % ind))\n        all_summaries.append(input_summary)\n        all_summaries.append(output_summary)\n    return all_summaries", "docstring": "Optionally converts images from hooks_args to image summaries.\n\nArgs:\nhook_args: DecodeHookArgs namedtuple\nReturns:\nsummaries: list of tf.Summary values if hook_args.decode_hpara", "source": "codesearchnet"}
{"code": "def __add__(self, other):\n    \n    if (isinstance(other, LazyAllreduceSum) and\n        self.mesh_impl == other.mesh_impl and\n        self.mesh_axes == other.mesh_axes):\n      return LazyAllreduceSum(\n          self.mesh_impl,\n          self.mesh_impl.slicewise(\n              tf.add, self.laid_out_input, other.laid_out_input),\n          self.mesh_axes,\n          add_counter_fn=self.add_counter_fn)\n    else:\n      return self.mesh_impl.slicewise(\n          tf.add, self.to_laid_out_tensor(), other.to_laid_out_tensor())", "docstring": "Add to another LazyAllreduceSum.\n\nArgs:\nother: a LazyAllreduceSum or a LaidOutTensor\nReturns:\na LazyAllreduceSum or a LaidOutTensor", "source": "juraj-google-style"}
{"code": "def cudnn_bi_lstm(units, n_hidden, seq_lengths=None, n_layers=1, trainable_initial_states=False, name='cudnn_bi_gru', reuse=False):\n    with tf.variable_scope(name, reuse=reuse):\n        if (seq_lengths is None):\n            seq_lengths = (tf.ones([tf.shape(units)[0]], dtype=tf.int32) * tf.shape(units)[1])\n        with tf.variable_scope('Forward'):\n            (h_fw, (h_fw_last, c_fw_last)) = cudnn_lstm_wrapper(units, n_hidden, n_layers=n_layers, trainable_initial_states=trainable_initial_states, seq_lengths=seq_lengths)\n        with tf.variable_scope('Backward'):\n            reversed_units = tf.reverse_sequence(units, seq_lengths=seq_lengths, seq_dim=1, batch_dim=0)\n            (h_bw, (h_bw_last, c_bw_last)) = cudnn_lstm_wrapper(reversed_units, n_hidden, n_layers=n_layers, trainable_initial_states=trainable_initial_states, seq_lengths=seq_lengths)\n            h_bw = tf.reverse_sequence(h_bw, seq_lengths=seq_lengths, seq_dim=1, batch_dim=0)\n        return ((h_fw, h_bw), ((h_fw_last, c_fw_last), (h_bw_last, c_bw_last)))", "docstring": "Fast CuDNN Bi-LSTM implementation\n\nArgs:\nunits: tf.Tensor with dimensions [B x T x F], where\nB - batch size\nT - number of tokens\nF - features\nn_hidden: dimensionality of hidden state\nseq_lengths: number of tokens in each sample in the batch\nn_layers: number of layers\ntrainable_initial_states: whether to create a special trainable variable\nto initialize the hidden states of the network or use just zeros\nname: name of the variable scope to use\nreuse:whether to reuse already initialized variable\n\nReturns:\nh - all hidden states along T dimension,\ntf.Tensor with dimensionality [B x T x F]\nh_last - last hidden state, tf.Tensor with dimensionality [B x H * 2]\nwhere H - number of hidden units\nc_last - last cell state, tf.Tensor with dimensionality [B x H * 2]\nwhere H - number of hidden units", "source": "codesearchnet"}
{"code": "def findall(self, title=None):\n        \n        if title is None:\n            return list(self)\n        files = backend.iterfiles(self._drive, name=title)\n        return [self[id] for id, _ in files]", "docstring": "Fetch and return a list of spreadsheets with the given title.\n\nArgs:\ntitle(str): title/name of the spreadsheets to return, or ``None`` for all\nReturns:\nlist: list of new SpreadSheet instances (possibly empty)", "source": "juraj-google-style"}
{"code": "def get_reconstructed_band_structure(list_bs, efermi=None):\n    if (efermi is None):\n        efermi = (sum([b.efermi for b in list_bs]) / len(list_bs))\n    kpoints = []\n    labels_dict = {}\n    rec_lattice = list_bs[0].lattice_rec\n    nb_bands = min([list_bs[i].nb_bands for i in range(len(list_bs))])\n    kpoints = np.concatenate([[k.frac_coords for k in bs.kpoints] for bs in list_bs])\n    dicts = [bs.labels_dict for bs in list_bs]\n    labels_dict = {k: v.frac_coords for d in dicts for (k, v) in d.items()}\n    eigenvals = {}\n    eigenvals[Spin.up] = np.concatenate([bs.bands[Spin.up][:nb_bands] for bs in list_bs], axis=1)\n    if list_bs[0].is_spin_polarized:\n        eigenvals[Spin.down] = np.concatenate([bs.bands[Spin.down][:nb_bands] for bs in list_bs], axis=1)\n    projections = {}\n    if (len(list_bs[0].projections) != 0):\n        projs = [bs.projections[Spin.up][:nb_bands] for bs in list_bs]\n        projections[Spin.up] = np.concatenate(projs, axis=1)\n        if list_bs[0].is_spin_polarized:\n            projs = [bs.projections[Spin.down][:nb_bands] for bs in list_bs]\n            projections[Spin.down] = np.concatenate(projs, axis=1)\n    if isinstance(list_bs[0], BandStructureSymmLine):\n        return BandStructureSymmLine(kpoints, eigenvals, rec_lattice, efermi, labels_dict, structure=list_bs[0].structure, projections=projections)\n    else:\n        return BandStructure(kpoints, eigenvals, rec_lattice, efermi, labels_dict, structure=list_bs[0].structure, projections=projections)", "docstring": "This method takes a list of band structures and reconstructs\none band structure object from all of them.\n\nThis is typically very useful when you split non self consistent\nband structure runs in several independent jobs and want to merge back\nthe results\n\nArgs:\nlist_bs: A list of BandStructure or BandStructureSymmLine objects.\nefermi: The Fermi energy of the reconstructed band structure. If\nNone is assigned an average of all the Fermi energy in each\nobject in the list_bs is used.\n\nReturns:\nA BandStructure or BandStructureSymmLine object (depending on\nthe type of the list_bs objects)", "source": "codesearchnet"}
{"code": "def deprecated_internal_set_learning_phase(value):\n    global _GRAPH_LEARNING_PHASES\n    if value not in {0, 1}:\n        raise ValueError('Expected learning phase to be 0 or 1.')\n    with ops.init_scope():\n        if context.executing_eagerly():\n            _DUMMY_EAGER_GRAPH.learning_phase_is_set = True\n            _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = value\n        _GRAPH_LEARNING_PHASES[get_graph()] = value", "docstring": "A deprecated internal implementation of set_learning_phase.\n\nThis method is an internal-only version of `set_learning_phase` that\ndoes not raise a deprecation error. It is required because\nsaved_model needs to keep working with user code that uses the deprecated\nlearning phase methods until those APIs are fully removed from the public API.\n\nSpecifically SavedModel saving needs to make sure the learning phase is 0\nduring tracing even if users overwrote it to a different value.\n\nBut, we don't want to raise deprecation warnings for users when savedmodel\nsets learning phase just for compatibility with code that relied on\nexplicitly setting the learning phase for other values.\n\nArgs:\nvalue: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train\n\nRaises:\nValueError: if `value` is neither `0` nor `1`.", "source": "github-repos"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    file_offset = 0\n\n    try:\n      timestamp, event_data = self._ReadEntry(\n          parser_mediator, file_object, file_offset)\n    except errors.ParseError as exception:\n      raise errors.UnableToParseFile(\n          'Unable to parse first utmp entry with error: {0!s}'.format(\n              exception))\n\n    if not event_data.username:\n      raise errors.UnableToParseFile(\n          'Unable to parse first utmp entry with error: missing username')\n\n    if not timestamp:\n      raise errors.UnableToParseFile(\n          'Unable to parse first utmp entry with error: missing timestamp')\n\n    date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n        timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_START)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    file_offset = file_object.tell()\n    file_size = file_object.get_size()\n\n    while file_offset < file_size:\n      if parser_mediator.abort:\n        break\n\n      try:\n        timestamp, event_data = self._ReadEntry(\n            parser_mediator, file_object, file_offset)\n      except errors.ParseError:\n        \n        break\n\n      date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n          timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_START)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      file_offset = file_object.tell()", "docstring": "Parses an utmp file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def from_str(cls, input_string, fmt):\n    from pymatgen.io.xyz import XYZ\n    from pymatgen.io.gaussian import GaussianInput\n    if (fmt.lower() == 'xyz'):\n        m = XYZ.from_string(input_string).molecule\n    elif (fmt in ['gjf', 'g03', 'g09', 'com', 'inp']):\n        m = GaussianInput.from_string(input_string).molecule\n    elif (fmt == 'json'):\n        d = json.loads(input_string)\n        return cls.from_dict(d)\n    elif (fmt == 'yaml'):\n        import ruamel.yaml as yaml\n        d = yaml.safe_load(input_string)\n        return cls.from_dict(d)\n    else:\n        from pymatgen.io.babel import BabelMolAdaptor\n        m = BabelMolAdaptor.from_string(input_string, file_format=fmt).pymatgen_mol\n    return cls.from_sites(m)", "docstring": "Reads the molecule from a string.\n\nArgs:\ninput_string (str): String to parse.\nfmt (str): Format to output to. Defaults to JSON unless filename\nis provided. If fmt is specifies, it overrides whatever the\nfilename is. Options include \"xyz\", \"gjf\", \"g03\", \"json\". If\nyou have OpenBabel installed, any of the formats supported by\nOpenBabel. Non-case sensitive.\n\nReturns:\nIMolecule or Molecule.", "source": "codesearchnet"}
{"code": "def steps(self, goal):\n        \n\n        path = self.path(goal)\n        for i in range(len(path) - 1):\n            yield path[i], path[i + 1]", "docstring": "Get the list of individual relations leading to the targeted node\n\nArgs:\ngoal (str): Name of the targeted node\nReturn:\nlist of tuple of Node", "source": "juraj-google-style"}
{"code": "def apply_filter(self, expr, value):\n    if self.skip(value):\n        return expr\n    if (not self._valid_value(value)):\n        msg = 'Invalid value {value} passed to filter {name} - '.format(value=repr(value), name=self.name)\n        if (self.default is not None):\n            warn((msg + 'defaulting to {}'.format(self.default)))\n            value = self.default\n        else:\n            warn((msg + 'skipping'))\n            return expr\n    return self.func(expr, value)", "docstring": "Returns the given expression filtered by the given value.\n\nArgs:\nexpr (xpath.expression.AbstractExpression): The expression to filter.\nvalue (object): The desired value with which the expression should be filtered.\n\nReturns:\nxpath.expression.AbstractExpression: The filtered expression.", "source": "codesearchnet"}
{"code": "def from_json_str(cls, json_str):\n    return cls.from_json(json.loads(json_str, cls=JsonDecoder))", "docstring": "Convert json string representation into class instance.\n\nArgs:\njson_str: json representation as string.\n\nReturns:\nNew instance of the class with data loaded from json string.", "source": "codesearchnet"}
{"code": "def new(namespace, name, protected=False, attributes=dict(), api_url=fapi.PROD_API_ROOT):\n    r = fapi.create_workspace(namespace, name, protected, attributes, api_url)\n    fapi._check_response_code(r, 201)\n    return Workspace(namespace, name, api_url)", "docstring": "Create a new FireCloud workspace.\n\nReturns:\nWorkspace: A new FireCloud workspace\n\nRaises:\nFireCloudServerError: API call failed.", "source": "codesearchnet"}
{"code": "async def _check_resolver_ans(self, dns_answer_list, record_name, record_data_list, record_ttl, record_type_code):\n    type_filtered_list = [ans for ans in dns_answer_list if (ans.qtype == record_type_code)]\n    if (len(type_filtered_list) != len(record_data_list)):\n        return False\n    for rec in type_filtered_list:\n        conditions = [(rec.name == record_name), (rec.ttl == record_ttl), (rec.data in record_data_list)]\n        if (not all(conditions)):\n            return False\n    return True", "docstring": "Check if resolver answer is equal to record data.\n\nArgs:\ndns_answer_list (list): DNS answer list contains record objects.\nrecord_name (str): Record name.\nrecord_data_list (list): List of data values for the record.\nrecord_ttl (int): Record time-to-live info.\nrecord_type_code (int): Record type code.\n\nReturns:\nboolean indicating if DNS answer data is equal to record data.", "source": "codesearchnet"}
{"code": "def verify_repo_matches_url(repo, url):\n    repo_parts = urlparse(repo)\n    url_parts = urlparse(url)\n    errors = []\n    repo_path_parts = repo_parts.path.split('/')\n    url_path_parts = url_parts.path.split('/')\n    if (repo_parts.hostname != url_parts.hostname):\n        errors.append(\"verify_repo_matches_url: Hostnames don't match! {} {}\".format(repo_parts.hostname, url_parts.hostname))\n    if ((not url_parts.path.startswith(repo_parts.path)) or (url_path_parts[:len(repo_path_parts)] != repo_path_parts)):\n        errors.append(\"verify_repo_matches_url: Paths don't match! {} {}\".format(repo_parts.path, url_parts.path))\n    if errors:\n        log.warning('\\n'.join(errors))\n        return False\n    return True", "docstring": "Verify ``url`` is a part of ``repo``.\n\nWe were using ``startswith()`` for a while, which isn't a good comparison.\nThis function allows us to ``urlparse`` and compare host and path.\n\nArgs:\nrepo (str): the repo url\nurl (str): the url to verify is part of the repo\n\nReturns:\nbool: ``True`` if the repo matches the url.", "source": "codesearchnet"}
{"code": "def PyParseJoinList(string, location, tokens):\n    join_list = []\n    for token in tokens:\n        try:\n            join_list.append(str(token))\n        except UnicodeDecodeError:\n            join_list.append(repr(token))\n    tokens[0] = ''.join(join_list)\n    del tokens[1:]", "docstring": "Return a joined token from a list of tokens.\n\nThis is a callback method for pyparsing setParseAction that modifies\nthe returned token list to join all the elements in the list to a single\ntoken.\n\nArgs:\nstring (str): original string.\nlocation (int): location in the string where the match was made.\ntokens (list[str]): extracted tokens, where the string to be converted\nis stored.", "source": "codesearchnet"}
{"code": "def _Parse(self, template):\n    if (not template):\n        raise TextFSMTemplateError('Null template.')\n    self._ParseFSMVariables(template)\n    while self._ParseFSMState(template):\n        pass\n    self._ValidateFSM()", "docstring": "Parses template file for FSM structure.\n\nArgs:\ntemplate: Valid template file.\n\nRaises:\nTextFSMTemplateError: If template file syntax is invalid.", "source": "codesearchnet"}
{"code": "def write_log(self, message):\n    if (self._is_write_log and self.log_file and (not self.log_file.closed)):\n        self.log_file.write((message + '\\n'))", "docstring": "Write a line to the VM instruction log file.\n\nArgs:\nmessage (str): string message to write to file.", "source": "codesearchnet"}
{"code": "def transform_list_to_dict(list):\n    \n\n    ret = {}\n\n    for value in list:\n        if isinstance(value, dict):\n            ret.update(value)\n        else:\n            ret[str(value)] = True\n\n    return ret", "docstring": "Transforms a list into a dictionary, putting values as keys\nArgs:\nid:\nReturns:\ndict: dictionary built", "source": "juraj-google-style"}
{"code": "def indent(s, n_spaces=2, initial=True):\n    \n    i = ' '*n_spaces\n    t = s.replace('\\n', '\\n%s' % i)\n    if initial:\n        t = i + t\n    return t", "docstring": "Indent all new lines\nArgs:\nn_spaces: number of spaces to use for indentation\ninitial: whether or not to start with an indent", "source": "juraj-google-style"}
{"code": "def to_maildir(self, flags: Iterable[Union[(bytes, Flag)]]) -> str:\n    codes = []\n    for flag in flags:\n        if isinstance(flag, bytes):\n            flag = Flag(flag)\n        from_sys = self._from_sys.get(flag)\n        if (from_sys is not None):\n            codes.append(from_sys)\n        else:\n            from_kwd = self._from_kwd.get(flag)\n            if (from_kwd is not None):\n                codes.append(from_kwd)\n    return ''.join(codes)", "docstring": "Return the string of letter codes that are used to map to defined\nIMAP flags and keywords.\n\nArgs:\nflags: The flags and keywords to map.", "source": "codesearchnet"}
{"code": "def _is_flag(cls, arg):\n    if (arg == '--'):\n        return False\n    if (not arg.startswith('-')):\n        return False\n    if arg.startswith('--'):\n        first_char = arg[2]\n    else:\n        first_char = arg[1]\n    if (not first_char.isalpha()):\n        return False\n    return True", "docstring": "Check if an argument is a flag.\n\nA flag starts with - or -- and the next character must be a letter\nfollowed by letters, numbers, - or _.  Currently we only check the\nalpha'ness of the first non-dash character to make sure we're not just\nlooking at a negative number.\n\nReturns:\nbool: Whether the argument is a flag.", "source": "codesearchnet"}
{"code": "def regexp(__string: str, __pattern: str, __repl: Union[(Callable, str)], *, count: int=0, flags: int=0) -> str:\n    return re.sub(__pattern, __repl, __string, count, flags)", "docstring": "Jinja filter for regexp replacements.\n\nSee :func:`re.sub` for documentation.\n\nReturns:\nText with substitutions applied", "source": "codesearchnet"}
{"code": "def get_and_check_project(valid_vcs_rules, source_url):\n    project_path = match_url_regex(valid_vcs_rules, source_url, match_url_path_callback)\n    if (project_path is None):\n        raise ValueError('Unknown repo for source url {}!'.format(source_url))\n    project = project_path.split('/')[(- 1)]\n    return project", "docstring": "Given vcs rules and a source_url, return the project.\n\nThe project is in the path, but is the repo name.\n`releases/mozilla-beta` is the path; `mozilla-beta` is the project.\n\nArgs:\nvalid_vcs_rules (tuple of frozendicts): the valid vcs rules, per\n``match_url_regex``.\nsource_url (str): the source url to find the project for.\n\nRaises:\nRuntimeError: on failure to find the project.\n\nReturns:\nstr: the project.", "source": "codesearchnet"}
{"code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension=None) -> tuple[int, int]:\n    if channel_dim is None:\n        channel_dim = infer_channel_dimension_format(image)\n    if channel_dim == ChannelDimension.FIRST:\n        return (image.shape[-2], image.shape[-1])\n    elif channel_dim == ChannelDimension.LAST:\n        return (image.shape[-3], image.shape[-2])\n    else:\n        raise ValueError(f'Unsupported data format: {channel_dim}')", "docstring": "Returns the (height, width) dimensions of the image.\n\nArgs:\nimage (`np.ndarray`):\nThe image to get the dimensions of.\nchannel_dim (`ChannelDimension`, *optional*):\nWhich dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\nReturns:\nA tuple of the image's height and width.", "source": "github-repos"}
{"code": "def wait_until(what, times=-1):\n    \n    while times:\n        logger.info('Waiting times left %d', times)\n        try:\n            if what() is True:\n                return True\n        except:\n            logger.exception('Wait failed')\n        else:\n            logger.warning('Trial[%d] failed', times)\n        times -= 1\n        time.sleep(1)\n\n    return False", "docstring": "Wait until `what` return True\n\nArgs:\nwhat (Callable[bool]): Call `wait()` again and again until it returns True\ntimes (int): Maximum times of trials before giving up\n\nReturns:\nTrue if success, False if times threshold reached", "source": "juraj-google-style"}
{"code": "def from_der(der):\n    d = get_bytes(der)\n    if (len(d) < 8):\n        raise ValueError('DER signature string is too short.')\n    if (len(d) > 72):\n        raise ValueError('DER signature string is too long.')\n    if (d[0] != 48):\n        raise ValueError('DER signature does not start with 0x30.')\n    if (d[1] != len(d[2:])):\n        raise ValueError('DER signature length incorrect.')\n    total_length = d[1]\n    if (d[2] != 2):\n        raise ValueError('DER signature no 1st int marker.')\n    if ((d[3] <= 0) or (d[3] > (total_length - 7))):\n        raise ValueError('DER signature incorrect R length.')\n    rlen = d[3]\n    s_magic_index = (4 + rlen)\n    rb = d[4:s_magic_index]\n    if ((rb[0] & 128) != 0):\n        raise ValueError('DER signature R is negative.')\n    if ((len(rb) > 1) and (rb[0] == 0) and ((rb[1] & 128) != 128)):\n        raise ValueError('DER signature R is excessively padded.')\n    r = int.from_bytes(rb, 'big')\n    if (d[s_magic_index] != 2):\n        raise ValueError('DER signature no 2nd int marker.')\n    slen_index = (s_magic_index + 1)\n    slen = d[slen_index]\n    if ((slen <= 0) or (slen > (len(d) - (slen_index + 1)))):\n        raise ValueError('DER signature incorrect S length.')\n    sb = d[(slen_index + 1):]\n    if ((sb[0] & 128) != 0):\n        raise ValueError('DER signature S is negative.')\n    if ((len(sb) > 1) and (sb[0] == 0) and ((sb[1] & 128) != 128)):\n        raise ValueError('DER signature S is excessively padded.')\n    s = int.from_bytes(sb, 'big')\n    if ((r < 1) or (r >= bitcoin_curve.n)):\n        raise ValueError('DER signature R is not between 1 and N - 1.')\n    if ((s < 1) or (s >= bitcoin_curve.n)):\n        raise ValueError('DER signature S is not between 1 and N - 1.')\n    return Signature(r, s)", "docstring": "Decodes a Signature that was DER-encoded.\n\nArgs:\nder (bytes or str): The DER encoding to be decoded.\n\nReturns:\nSignature: The deserialized signature.", "source": "codesearchnet"}
{"code": "def __init__(self, label=None, edge_length=None):\n        \n        self.children = list()         \n        self.parent = None             \n        self.label = label             \n        self.edge_length = edge_length", "docstring": "``Node`` constructor\n\nArgs:\n``label`` (``str``): Label of this ``Node``\n\n``edge_length`` (``float``): Length of the edge incident to this ``Node``\n\nReturns:\n``Node`` object", "source": "juraj-google-style"}
{"code": "def ack(self, items):\n    for item in items:\n        time_to_ack = item.time_to_ack\n        if (time_to_ack is not None):\n            self._manager.ack_histogram.add(time_to_ack)\n    ack_ids = [item.ack_id for item in items]\n    request = types.StreamingPullRequest(ack_ids=ack_ids)\n    self._manager.send(request)\n    self.drop(items)", "docstring": "Acknowledge the given messages.\n\nArgs:\nitems(Sequence[AckRequest]): The items to acknowledge.", "source": "codesearchnet"}
{"code": "def lower_dict_keys(origin_dict):\n    if ((not origin_dict) or (not isinstance(origin_dict, dict))):\n        return origin_dict\n    return {key.lower(): value for (key, value) in origin_dict.items()}", "docstring": "convert keys in dict to lower case\n\nArgs:\norigin_dict (dict): mapping data structure\n\nReturns:\ndict: mapping with all keys lowered.\n\nExamples:\n>>> origin_dict = {\n\"Name\": \"\",\n\"Request\": \"\",\n\"URL\": \"\",\n\"METHOD\": \"\",\n\"Headers\": \"\",\n\"Data\": \"\"\n}\n>>> lower_dict_keys(origin_dict)\n{\n\"name\": \"\",\n\"request\": \"\",\n\"url\": \"\",\n\"method\": \"\",\n\"headers\": \"\",\n\"data\": \"\"\n}", "source": "codesearchnet"}
{"code": "def change_password(self, username, newpassword, raise_on_error=False):\n    response = self._put((self.rest_url + '/user/password'), data=json.dumps({'value': newpassword}), params={'username': username})\n    if response.ok:\n        return True\n    if raise_on_error:\n        raise RuntimeError(response.json()['message'])\n    return False", "docstring": "Change new password for a user\n\nArgs:\nusername: The account username.\n\nnewpassword: The account new password.\n\nraise_on_error: optional (default: False)\n\nReturns:\nTrue: Succeeded\nFalse: If unsuccessful", "source": "codesearchnet"}
{"code": "def _unpack_zip(self, file_obj, path):\n        \n        old_cwd = os.getcwd()\n        os.chdir(path)\n\n        zip_obj = zipfile.ZipFile(file_obj)\n        for cnt, zip_info in enumerate(zip_obj.infolist()):\n            zip_obj.extract(zip_info)\n\n            if cnt + 1 > self.max_zipfiles:\n                os.chdir(old_cwd)\n\n                msg = \"Too many files in .zip \"\n                msg += \"(self.max_zipfiles == {}, but {} given).\".format(\n                    self.max_zipfiles,\n                    cnt + 1,\n                )\n                raise ValueError(msg)\n\n        os.chdir(old_cwd)", "docstring": "Unpack .zip archive in `file_obj` to given `path`. Make sure, that it\nfits into limits (see :attr:`._max_zipfiles` for details).\n\nArgs:\nfile_obj (file): Opened file-like object.\npath (str): Path into which the .zip will be unpacked.\n\nRaises:\nValueError: If there is too many files in .zip archive.", "source": "juraj-google-style"}
{"code": "def yield_typed(obj_or_cls):\n    \n    if not isinstance(obj_or_cls, type):\n        obj_or_cls = type(obj_or_cls)\n    for attrname in dir(obj_or_cls):\n        if hasattr(obj_or_cls, attrname):\n            attr = getattr(obj_or_cls, attrname)\n            \n            if (isinstance(attr, property) and isinstance(attr.__doc__, six.string_types)\n                and \"__typed__\" in attr.__doc__):\n                yield attrname", "docstring": "Generator that yields typed object names of the class (or object's class).\n\nArgs:\nobj_or_cls (object): Class object or instance of class\n\nReturns:\nname (array): Names of class attributes that are strongly typed", "source": "juraj-google-style"}
{"code": "def is_alive(self, container: Container) -> bool:\n    uid = container.uid\n    return ((uid in self.__dockerc) and (self.__dockerc[uid].status == 'running'))", "docstring": "Determines whether a given container is still alive.\n\nReturns:\n`True` if the underlying Docker container for the given BugZoo\ncontainer is still alive, otherwise `False`.", "source": "codesearchnet"}
{"code": "def cast(cls, x, dtype):\n    return x.astype(dtype)", "docstring": "Cast a tensor to a different dtype.\n\nOnly called on a full array as provided by the user.\n\nArgs:\nx: the tensor to cast.\nReturns: the cast tensor.", "source": "github-repos"}
{"code": "def monkey_patch(enabled=True):\n    if enabled:\n        Image.open = imdirect_open\n    else:\n        Image.open = pil_open", "docstring": "Monkey patching PIL.Image.open method\n\nArgs:\nenabled (bool): If the monkey patch should be activated or deactivated.", "source": "codesearchnet"}
{"code": "def memory_zones(self):\n    count = self.num_memory_zones()\n    if (count == 0):\n        return list()\n    buf = (structs.JLinkMemoryZone * count)()\n    res = self._dll.JLINK_GetMemZones(buf, count)\n    if (res < 0):\n        raise errors.JLinkException(res)\n    return list(buf)", "docstring": "Gets all memory zones supported by the current target.\n\nSome targets support multiple memory zones.  This function provides the\nability to get a list of all the memory zones to facilate using the\nmemory zone routing functions.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nA list of all the memory zones as ``JLinkMemoryZone`` structures.\n\nRaises:\nJLinkException: on hardware errors.", "source": "codesearchnet"}
{"code": "def on_connected(self, connection):\n        \n        log.info('PikaClient: connected to RabbitMQ')\n        self.connected = True\n        self.in_channel = self.connection.channel(self.on_channel_open)", "docstring": "AMQP connection callback.\nCreates input channel.\n\nArgs:\nconnection: AMQP connection", "source": "juraj-google-style"}
{"code": "def walk_files_for(paths, supported_extensions):\n    for path in paths:\n        for (root, _, files) in os.walk(path):\n            if Application.ignore_path(root.replace(path, '')):\n                continue\n            for filename in files:\n                extension = os.path.splitext(filename)[1]\n                if (extension in supported_extensions):\n                    (yield (path, os.path.join(root, filename), extension))", "docstring": "Iterating files for given extensions.\n\nArgs:\nsupported_extensions (list): supported file extentsion for which to check loc and com.\n\nReturns:\nstr: yield each full path and filename found.", "source": "codesearchnet"}
{"code": "def take_screenshot(webdriver, file_name):\n    folder_location = os.path.join(ProjectUtils.get_project_root(), WebScreenShotUtil.SCREEN_SHOT_LOCATION)\n    WebScreenShotUtil.__capture_screenshot(webdriver, folder_location, (file_name + '.png'))", "docstring": "Captures a screenshot.\n\nArgs:\nwebdriver (WebDriver) - Selenium webdriver.\nfile_name (str) - File name to save screenshot as.", "source": "codesearchnet"}
{"code": "def create_var_in_main(name: str, value: Any, watch: bool=True) -> Tuple[str, Any]:\n    setattr(importlib.import_module('__main__'), name, value)\n    if watch:\n        from apache_beam.runners.interactive import interactive_environment as ie\n        ie.current_env().watch({name: value})\n    return (name, value)", "docstring": "Declares a variable in the main module.\n\nArgs:\nname: the variable name in the main module.\nvalue: the value of the variable.\nwatch: whether to watch it in the interactive environment.\nReturns:\nA 2-entry tuple of the variable name and value.", "source": "github-repos"}
{"code": "def remove(self, key):\n        \n\n        data = self._load_file()\n        del data[key]\n        self._save_file(data)", "docstring": "Remove a key from the data store\n\nArgs:\nkey (string): The key to remove\n\nRaises:\nKeyError: if the key was not found", "source": "juraj-google-style"}
{"code": "def _GetFlagsDefinedByModule(self, module):\n    \n    if not isinstance(module, str):\n      module = module.__name__\n\n    return list(self.FlagsByModuleDict().get(module, []))", "docstring": "Returns the list of flags defined by a module.\n\nArgs:\nmodule: A module object or a module name (a string).\n\nReturns:\nA new list of Flag objects.  Caller may update this list as he\nwishes: none of those changes will affect the internals of this\nFlagValue object.", "source": "juraj-google-style"}
{"code": "def extract(self, text: str) -> List[Extraction]:\n        \n\n        doc = self._tokenizer.tokenize_to_spacy_doc(text)\n        self._load_matcher()\n\n        matches = [x for x in self._matcher(doc) if x[1] != x[2]]\n        pos_filtered_matches = []\n        neg_filtered_matches = []\n        for idx, start, end in matches:\n            span_doc = self._tokenizer.tokenize_to_spacy_doc(doc[start:end].text)\n            this_spacy_rule = self._matcher.get(idx)\n            relations = self._find_relation(span_doc, this_spacy_rule)\n            rule_id, _ = self._hash_map[idx]\n            this_rule = self._rule_lst[rule_id]\n            if self._filter_match(doc[start:end], relations, this_rule.patterns):\n                value = self._form_output(doc[start:end], this_rule.output_format, relations, this_rule.patterns)\n                if this_rule.polarity:\n                    pos_filtered_matches.append((start, end, value, rule_id, relations))\n                else:\n                    neg_filtered_matches.append((start, end, value, rule_id, relations))\n\n        return_lst = []\n        if pos_filtered_matches:\n            longest_lst_pos = self._get_longest(pos_filtered_matches)\n            if neg_filtered_matches:\n                longest_lst_neg = self._get_longest(neg_filtered_matches)\n                return_lst = self._reject_neg(longest_lst_pos, longest_lst_neg)\n            else:\n                return_lst = longest_lst_pos\n\n        extractions = []\n        for (start, end, value, rule_id, relation) in return_lst:\n            this_extraction = Extraction(value=value,\n                                         extractor_name=self.name,\n                                         start_token=start,\n                                         end_token=end,\n                                         start_char=doc[start].idx,\n                                         end_char=doc[end-1].idx+len(doc[end-1]),\n                                         rule_id=rule_id.split(\"rule_id\n                                         match_mapping=relation)\n            extractions.append(this_extraction)\n\n        return extractions", "docstring": "Extract from text\n\nArgs:\ntext (str): input str to be extracted.\n\nReturns:\nList[Extraction]: the list of extraction or the empty list if there are no matches.", "source": "juraj-google-style"}
{"code": "def DEFINE_float(flag_name, default_value, docstring, required=False):  \n    \n    _define_helper(flag_name, default_value, docstring, float, required)", "docstring": "Defines a flag of type 'float'.\nArgs:\nflag_name: The name of the flag as a string.\ndefault_value: The default value the flag should take as a float.\ndocstring: A helpful message explaining the use of the flag.", "source": "juraj-google-style"}
{"code": "def SpinTimes(spin, bias):\n    \n    if not isinstance(spin, int):\n        raise TypeError('spin must be an int')\n    if spin == -1:\n        return Times(Real((-1, 1)), bias)  \n    elif spin == 1:\n        \n        return bias\n    else:\n        raise ValueError('expected spins to be -1., or 1.')", "docstring": "Define our own multiplication for bias times spins. This allows for\ncleaner log code as well as value checking.\n\nArgs:\nspin (int): -1 or 1\nbias (:class:`pysmt.shortcuts.Symbol`): The bias\n\nReturns:\nspins * bias", "source": "juraj-google-style"}
{"code": "async def build_task_dependencies(chain, task, name, my_task_id):\n    log.info('build_task_dependencies {} {}'.format(name, my_task_id))\n    if (name.count(':') > chain.context.config['max_chain_length']):\n        raise CoTError('Too deep recursion!\\n{}'.format(name))\n    sorted_dependencies = find_sorted_task_dependencies(task, name, my_task_id)\n    for (task_name, task_id) in sorted_dependencies:\n        if (task_id not in chain.dependent_task_ids()):\n            link = LinkOfTrust(chain.context, task_name, task_id)\n            json_path = link.get_artifact_full_path('task.json')\n            try:\n                task_defn = (await chain.context.queue.task(task_id))\n                link.task = task_defn\n                chain.links.append(link)\n                makedirs(os.path.dirname(json_path))\n                with open(json_path, 'w') as fh:\n                    fh.write(format_json(task_defn))\n                (await build_task_dependencies(chain, task_defn, task_name, task_id))\n            except TaskclusterFailure as exc:\n                raise CoTError(str(exc))", "docstring": "Recursively build the task dependencies of a task.\n\nArgs:\nchain (ChainOfTrust): the chain of trust to add to.\ntask (dict): the task definition to operate on.\nname (str): the name of the task to operate on.\nmy_task_id (str): the taskId of the task to operate on.\n\nRaises:\nCoTError: on failure.", "source": "codesearchnet"}
{"code": "def absnormpath(self, path):\n    path = self.normcase(path)\n    cwd = self._matching_string(path, self.cwd)\n    if (not path):\n        path = self.path_separator\n    elif (not self._starts_with_root_path(path)):\n        root_name = self._matching_string(path, self.root.name)\n        empty = self._matching_string(path, '')\n        path = self._path_separator(path).join(((((cwd != root_name) and cwd) or empty), path))\n    if (path == self._matching_string(path, '.')):\n        path = cwd\n    return self.normpath(path)", "docstring": "Absolutize and minimalize the given path.\n\nForces all relative paths to be absolute, and normalizes the path to\neliminate dot and empty components.\n\nArgs:\npath:  Path to normalize.\n\nReturns:\nThe normalized path relative to the current working directory,\nor the root directory if path is empty.", "source": "codesearchnet"}
{"code": "def step(self, actions, step_mul=None):\n    if (self._state == environment.StepType.LAST):\n        return self.reset()\n    skip = (not self._ensure_available_actions)\n    self._parallel.run(((c.act, f.transform_action(o.observation, a, skip_available=skip)) for (c, f, o, a) in zip(self._controllers, self._features, self._obs, actions)))\n    self._state = environment.StepType.MID\n    return self._step(step_mul)", "docstring": "Apply actions, step the world forward, and return observations.\n\nArgs:\nactions: A list of actions meeting the action spec, one per agent.\nstep_mul: If specified, use this rather than the environment's default.\n\nReturns:\nA tuple of TimeStep namedtuples, one per agent.", "source": "codesearchnet"}
{"code": "def _LookupClassReferences(serializable_ast, module_map, self_name):\n    class_lookup = visitors.LookupExternalTypes(module_map, self_name=self_name)\n    raw_ast = serializable_ast.ast\n    decorators = {d.type.name for c in raw_ast.classes + raw_ast.functions for d in c.decorators}\n    for node in serializable_ast.class_type_nodes or ():\n        try:\n            class_lookup.allow_functions = node.name in decorators\n            if node is not class_lookup.VisitClassType(node):\n                serializable_ast = serializable_ast.Replace(class_type_nodes=None)\n                break\n        except KeyError as e:\n            raise UnrestorableDependencyError(f'Unresolved class: {str(e)!r}.') from e\n    if serializable_ast.class_type_nodes is None:\n        try:\n            raw_ast = raw_ast.Visit(class_lookup)\n        except KeyError as e:\n            raise UnrestorableDependencyError(f'Unresolved class: {str(e)!r}.') from e\n    serializable_ast = serializable_ast.Replace(ast=raw_ast)\n    return serializable_ast", "docstring": "Fills .cls references in serializable_ast.ast with ones from module_map.\n\nAlready filled references are not changed. References to the module self._name\nare not filled. Setting self_name=None will fill all references.\n\nArgs:\nserializable_ast: A SerializableAst instance.\nmodule_map: Used to resolve ClassType.cls links to already loaded modules.\nThe loaded module will be added to the dict.\nself_name: A string representation of a module which should not be resolved,\nfor example: \"foo.bar.module1\" or None to resolve all modules.\n\nReturns:\nA SerializableAst with an updated .ast. .class_type_nodes is set to None\nif any of the Nodes needed to be regenerated.", "source": "github-repos"}
{"code": "def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(RevocationReason, self).read(istream, kmip_version=kmip_version)\n    tstream = BytearrayStream(istream.read(self.length))\n    self.revocation_code = RevocationReasonCode()\n    self.revocation_code.read(tstream, kmip_version=kmip_version)\n    if self.is_tag_next(Tags.REVOCATION_MESSAGE, tstream):\n        self.revocation_message = TextString()\n        self.revocation_message.read(tstream, kmip_version=kmip_version)\n    self.is_oversized(tstream)\n    self.validate()", "docstring": "Read the data encoding the RevocationReason object and decode it\ninto its constituent parts.\n\nArgs:\nistream (Stream): A data stream containing encoded object data,\nsupporting a read method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def Patch(self, request, global_params=None):\n    config = self.GetMethodConfig('Patch')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Updates an existing `BitbucketServerConfig`. This API is experimental.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsBitbucketServerConfigsPatchRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def indent(self, node, dirty=True):\n    if node.subitems:\n        return\n    self._subitems[node.id] = node\n    node.super_list_item_id = self.id\n    node.parent_item = self\n    if dirty:\n        node.touch(True)", "docstring": "Indent an item. Does nothing if the target has subitems.\n\nArgs:\nnode (gkeepapi.node.ListItem): Item to indent.\ndirty (bool): Whether this node should be marked dirty.", "source": "codesearchnet"}
{"code": "def serialize_attrs(self, *args):\n    cls = type(self)\n    result = {}\n    for a in args:\n        if (hasattr(cls, a) and (a not in cls.attrs_forbidden_for_serialization())):\n            val = getattr(self, a)\n            if is_list_like(val):\n                result[a] = list(val)\n            else:\n                result[a] = val\n    return result", "docstring": "Converts and instance to a dictionary with only the specified\nattributes as keys\n\nArgs:\n*args (list): The attributes to serialize\n\nExamples:\n\n>>> customer = Customer.create(name=\"James Bond\", email=\"007@mi.com\",\nphone=\"007\", city=\"London\")\n>>> customer.serialize_attrs('name', 'email')\n{'name': u'James Bond', 'email': u'007@mi.com'}", "source": "codesearchnet"}
{"code": "def bias_add(x, bias, data_format=None):\n    if data_format is None:\n        data_format = image_data_format()\n    if data_format not in {'channels_first', 'channels_last'}:\n        raise ValueError('Unknown data_format: ' + str(data_format))\n    bias_shape = int_shape(bias)\n    if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:\n        raise ValueError('Unexpected bias dimensions %d, expect to be 1 or %d dimensions' % (len(bias_shape), ndim(x) - 1))\n    if len(bias_shape) == 1:\n        if data_format == 'channels_first':\n            return nn.bias_add(x, bias, data_format='NCHW')\n        return nn.bias_add(x, bias, data_format='NHWC')\n    if ndim(x) in (3, 4, 5):\n        if data_format == 'channels_first':\n            bias_reshape_axis = (1, bias_shape[-1]) + bias_shape[:-1]\n            return x + reshape(bias, bias_reshape_axis)\n        return x + reshape(bias, (1,) + bias_shape)\n    return nn.bias_add(x, bias)", "docstring": "Adds a bias vector to a tensor.\n\nArgs:\nx: Tensor or variable.\nbias: Bias tensor to add.\ndata_format: string, `\"channels_last\"` or `\"channels_first\"`.\n\nReturns:\nOutput tensor.\n\nRaises:\nValueError: In one of the two cases below:\n1. invalid `data_format` argument.\n2. invalid bias shape.\nthe bias should be either a vector or\na tensor with ndim(x) - 1 dimension", "source": "github-repos"}
{"code": "def boolmask(indices, maxval=None):\n    if (maxval is None):\n        indices = list(indices)\n        maxval = (max(indices) + 1)\n    mask = ([False] * maxval)\n    for index in indices:\n        mask[index] = True\n    return mask", "docstring": "Constructs a list of booleans where an item is True if its position is in\n`indices` otherwise it is False.\n\nArgs:\nindices (list): list of integer indices\n\nmaxval (int): length of the returned list. If not specified\nthis is inferred from `indices`\n\nNote:\nIn the future the arg `maxval` may change its name to `shape`\n\nReturns:\nlist: mask: list of booleans. mask[idx] is True if idx in indices\n\nExample:\n>>> import ubelt as ub\n>>> indices = [0, 1, 4]\n>>> mask = ub.boolmask(indices, maxval=6)\n>>> assert mask == [True, True, False, False, True, False]\n>>> mask = ub.boolmask(indices)\n>>> assert mask == [True, True, False, False, True]", "source": "codesearchnet"}
{"code": "def get_vpc_id(account, region):\n    \n    url = '{0}/networks/aws'.format(API_URL)\n    response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n\n    if not response.ok:\n        raise SpinnakerVPCNotFound(response.text)\n\n    vpcs = response.json()\n\n    for vpc in vpcs:\n        LOG.debug('VPC: %(name)s, %(account)s, %(region)s => %(id)s', vpc)\n        if 'name' in vpc and all([vpc['name'] == 'vpc', vpc['account'] == account, vpc['region'] == region]):\n            LOG.info('Found VPC ID for %s in %s: %s', account, region, vpc['id'])\n            vpc_id = vpc['id']\n            break\n    else:\n        LOG.fatal('VPC list: %s', vpcs)\n        raise SpinnakerVPCIDNotFound('No VPC available for {0} [{1}].'.format(account, region))\n\n    return vpc_id", "docstring": "Get VPC ID configured for ``account`` in ``region``.\n\nArgs:\naccount (str): AWS account name.\nregion (str): Region name, e.g. us-east-1.\n\nReturns:\nstr: VPC ID for the requested ``account`` in ``region``.\n\nRaises:\n:obj:`foremast.exceptions.SpinnakerVPCIDNotFound`: VPC ID not found for\n``account`` in ``region``.\n:obj:`foremast.exceptions.SpinnakerVPCNotFound`: Spinnaker has no VPCs\nconfigured.", "source": "juraj-google-style"}
{"code": "def transform(self, target_type: Type[T], value: F, context: PipelineContext = None) -> T:\n        \n        pass", "docstring": "Transforms an object to a new type.\n\nArgs:\ntarget_type: The type to be converted to.\nvalue: The object to be transformed.\ncontext: The context of the transformation (mutable).", "source": "juraj-google-style"}
{"code": "def make_mutant_tuples(example_protos, original_feature, index_to_mutate,\n                       viz_params):\n  \n  mutant_features = make_mutant_features(original_feature, index_to_mutate,\n                                         viz_params)\n  mutant_examples = []\n  for example_proto in example_protos:\n    for mutant_feature in mutant_features:\n      copied_example = copy.deepcopy(example_proto)\n      feature_name = mutant_feature.original_feature.feature_name\n\n      try:\n        feature_list = proto_value_for_feature(copied_example, feature_name)\n        if index_to_mutate is None:\n          new_values = mutant_feature.mutant_value\n        else:\n          new_values = list(feature_list)\n          new_values[index_to_mutate] = mutant_feature.mutant_value\n\n        del feature_list[:]\n        feature_list.extend(new_values)\n        mutant_examples.append(copied_example)\n      except (ValueError, IndexError):\n        \n        \n        \n        \n        mutant_examples.append(copied_example)\n\n  return mutant_features, mutant_examples", "docstring": "Return a list of `MutantFeatureValue`s and a list of mutant Examples.\n\nArgs:\nexample_protos: The examples to mutate.\noriginal_feature: A `OriginalFeatureList` that encapsulates the feature to\nmutate.\nindex_to_mutate: The index of the int64_list or float_list to mutate.\nviz_params: A `VizParams` object that contains the UI state of the request.\n\nReturns:\nA list of `MutantFeatureValue`s and a list of mutant examples.", "source": "juraj-google-style"}
{"code": "def get_by_block(self, block_number):\n        \n        blocklist_snapshot = self.db.prefixed_db(NotificationPrefix.PREFIX_BLOCK).snapshot()\n        block_bytes = block_number.to_bytes(4, 'little')\n        results = []\n        for val in blocklist_snapshot.iterator(prefix=block_bytes, include_key=False):\n            event = SmartContractEvent.FromByteArray(val)\n            results.append(event)\n\n        return results", "docstring": "Look up notifications for a block\nArgs:\nblock_number (int): height of block to search for notifications\n\nReturns:\nlist: a list of notifications", "source": "juraj-google-style"}
{"code": "def _convert_to_eval_metric(metric_fn):\n\n    def problem_metric_fn(*args):\n        \"Returns an aggregation of the metric_fn's returned values.\"\n        (scores, weights) = metric_fn(*args)\n        return tf.metrics.mean(scores, weights)\n    return problem_metric_fn", "docstring": "Wrap a metric fn that returns scores and weights as an eval metric fn.\n\nThe input metric_fn returns values for the current batch. The wrapper\naggregates the return values collected over all of the batches evaluated.\n\nArgs:\nmetric_fn: function that returns scores and weights for the current batch's\nlogits and predicted labels.\n\nReturns:\nfunction that aggregates the scores and weights from metric_fn.", "source": "codesearchnet"}
{"code": "class DPTReassembleStage(nn.Module):\n\n    def __init__(self, config):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList()\n        if config.is_hybrid:\n            self._init_reassemble_dpt_hybrid(config)\n        else:\n            self._init_reassemble_dpt(config)\n        self.neck_ignore_stages = config.neck_ignore_stages\n\n    def _init_reassemble_dpt_hybrid(self, config):\n        \n        for i, factor in zip(range(len(config.neck_hidden_sizes)), config.reassemble_factors):\n            if i <= 1:\n                self.layers.append(nn.Identity())\n            elif i > 1:\n                self.layers.append(DPTReassembleLayer(config, channels=config.neck_hidden_sizes[i], factor=factor))\n        if config.readout_type != 'project':\n            raise ValueError(f'Readout type {config.readout_type} is not supported for DPT-Hybrid.')\n        self.readout_projects = nn.ModuleList()\n        hidden_size = _get_backbone_hidden_size(config)\n        for i in range(len(config.neck_hidden_sizes)):\n            if i <= 1:\n                self.readout_projects.append(nn.Sequential(nn.Identity()))\n            elif i > 1:\n                self.readout_projects.append(nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act]))\n\n    def _init_reassemble_dpt(self, config):\n        for i, factor in zip(range(len(config.neck_hidden_sizes)), config.reassemble_factors):\n            self.layers.append(DPTReassembleLayer(config, channels=config.neck_hidden_sizes[i], factor=factor))\n        if config.readout_type == 'project':\n            self.readout_projects = nn.ModuleList()\n            hidden_size = _get_backbone_hidden_size(config)\n            for _ in range(len(config.neck_hidden_sizes)):\n                self.readout_projects.append(nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act]))\n\n    def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]:\n        \n        out = []\n        for i, hidden_state in enumerate(hidden_states):\n            if i not in self.neck_ignore_stages:\n                cls_token, hidden_state = (hidden_state[:, 0], hidden_state[:, 1:])\n                batch_size, sequence_length, num_channels = hidden_state.shape\n                if patch_height is not None and patch_width is not None:\n                    hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels)\n                else:\n                    size = torch_int(sequence_length ** 0.5)\n                    hidden_state = hidden_state.reshape(batch_size, size, size, num_channels)\n                hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()\n                feature_shape = hidden_state.shape\n                if self.config.readout_type == 'project':\n                    hidden_state = hidden_state.flatten(2).permute((0, 2, 1))\n                    readout = cls_token.unsqueeze(1).expand_as(hidden_state)\n                    hidden_state = self.readout_projects[i](torch.cat((hidden_state, readout), -1))\n                    hidden_state = hidden_state.permute(0, 2, 1).reshape(feature_shape)\n                elif self.config.readout_type == 'add':\n                    hidden_state = hidden_state.flatten(2) + cls_token.unsqueeze(-1)\n                    hidden_state = hidden_state.reshape(feature_shape)\n                hidden_state = self.layers[i](hidden_state)\n            out.append(hidden_state)\n        return out", "docstring": "This class reassembles the hidden states of the backbone into image-like feature representations at various\nresolutions.\n\nThis happens in 3 stages:\n1. Map the N + 1 tokens to a set of N tokens, by taking into account the readout ([CLS]) token according to\n`config.readout_type`.\n2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`.\n3. Resizing the spatial dimensions (height, width).\n\nArgs:\nconfig (`[DPTConfig]`):\nModel configuration class defining the model architecture.", "source": "github-repos"}
{"code": "def getctime(self, path):\n    try:\n        file_obj = self.filesystem.resolve(path)\n    except IOError:\n        self.filesystem.raise_os_error(errno.ENOENT)\n    return file_obj.st_ctime", "docstring": "Returns the creation time of the fake file.\n\nArgs:\npath: the path to fake file.\n\nReturns:\n(int, float) the creation time of the fake file in number of\nseconds since the epoch.\n\nRaises:\nOSError: if the file does not exist.", "source": "codesearchnet"}
{"code": "def metadata(self, path):\n    try:\n        file_metadata = self._blobstorageIO()._status(path)\n        return FileMetadata(path, file_metadata['size'], file_metadata['last_updated'])\n    except Exception as e:\n        raise BeamIOError('Metadata operation failed', {path: e})", "docstring": "Fetch metadata fields of a file on the FileSystem.\n\nArgs:\npath: string path of a file.\n\nReturns:\n:class:`~apache_beam.io.filesystem.FileMetadata`.\n\nRaises:\n``BeamIOError``: if path isn't a file or doesn't exist.", "source": "github-repos"}
{"code": "def _build_command(self, python_executable, lib_dir_fq, proxy_enabled):\n    exe_command = [os.path.expanduser(python_executable), '-m', 'pip', 'install', '-r', self.requirements_file, '--ignore-installed', '--quiet', '--target', lib_dir_fq]\n    if self.args.no_cache_dir:\n        exe_command.append('--no-cache-dir')\n    if proxy_enabled:\n        trusted_hosts = ['pypi.org', 'pypi.python.org', 'files.pythonhosted.org']\n        for host in trusted_hosts:\n            exe_command.append('--trusted-host')\n            exe_command.append(host)\n    return exe_command", "docstring": "Build the pip command for installing dependencies.\n\nArgs:\npython_executable (str): The fully qualified path of the Python executable.\nlib_dir_fq (str): The fully qualified path of the lib directory.\n\nReturns:\nlist: The Python pip command with all required args.", "source": "codesearchnet"}
{"code": "def start(logdir, options=None):\n    global _profiler\n    with _profiler_lock:\n        if _profiler is not None:\n            raise errors.AlreadyExistsError(None, None, 'Another profiler is running.')\n        _profiler = _pywrap_profiler.ProfilerSession()\n        try:\n            opts = dict(options._asdict()) if options is not None else {}\n            _profiler.start(logdir, opts)\n        except errors.AlreadyExistsError:\n            logging.warning('Another profiler session is running which is probably created by profiler server. Please avoid using profiler server and profiler APIs at the same time.')\n            raise errors.AlreadyExistsError(None, None, 'Another profiler is running.')\n        except Exception:\n            _profiler = None\n            raise", "docstring": "Start profiling TensorFlow performance.\n\nArgs:\nlogdir: Profiling results log directory.\noptions: `ProfilerOptions` namedtuple to specify miscellaneous profiler\noptions. See example usage below.\n\nRaises:\nAlreadyExistsError: If a profiling session is already running.\n\nExample usage:\n```python\noptions = tf.profiler.experimental.ProfilerOptions(host_tracer_level = 3,\npython_tracer_level = 1,\ndevice_tracer_level = 1)\ntf.profiler.experimental.start('logdir_path', options = options)\n# Training code here\ntf.profiler.experimental.stop()\n```\n\nTo view the profiling results, launch TensorBoard and point it to `logdir`.\nOpen your browser and go to `localhost:6006/#profile` to view profiling\nresults.", "source": "github-repos"}
{"code": "def add_weatherdata(self, data):\n    if (not isinstance(data, WeatherData)):\n        raise ValueError('Weather data need to be of type WeatherData')\n    self._data['WEATHER DATA'].append(data)", "docstring": "Appends weather data.\n\nArgs:\ndata (WeatherData): weather data object", "source": "codesearchnet"}
{"code": "def launch_R_script(template, arguments, output_function=None, verbose=True, debug=False):\n    id = str(uuid.uuid4())\n    os.makedirs((('/tmp/cdt_R_script_' + id) + '/'))\n    try:\n        scriptpath = (('/tmp/cdt_R_script_' + id) + '/instance_{}'.format(os.path.basename(template)))\n        copy(template, scriptpath)\n        with fileinput.FileInput(scriptpath, inplace=True) as file:\n            for line in file:\n                mline = line\n                for elt in arguments:\n                    mline = mline.replace(elt, arguments[elt])\n                print(mline, end='')\n        if (output_function is None):\n            output = subprocess.call('Rscript --vanilla {}'.format(scriptpath), shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n        else:\n            if verbose:\n                process = subprocess.Popen('Rscript --vanilla {}'.format(scriptpath), shell=True)\n            else:\n                process = subprocess.Popen('Rscript --vanilla {}'.format(scriptpath), shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n            process.wait()\n            output = output_function()\n    except Exception as e:\n        if (not debug):\n            rmtree((('/tmp/cdt_R_script_' + id) + '/'))\n        raise e\n    except KeyboardInterrupt:\n        if (not debug):\n            rmtree((('/tmp/cdt_R_script_' + id) + '/'))\n        raise KeyboardInterrupt\n    if (not debug):\n        rmtree((('/tmp/cdt_R_script_' + id) + '/'))\n    return output", "docstring": "Launch an R script, starting from a template and replacing text in file\nbefore execution.\n\nArgs:\ntemplate (str): path to the template of the R script\narguments (dict): Arguments that modify the template's placeholders\nwith arguments\noutput_function (function): Function to execute **after** the execution\nof the R script, and its output is returned by this function. Used\ntraditionally as a function to retrieve the results of the\nexecution.\nverbose (bool): Sets the verbosity of the R subprocess.\ndebug (bool): If True, the generated scripts are not deleted.\n\nReturn:\nReturns the output of the ``output_function`` if not `None`\nelse `True` or `False` depending on whether the execution was\nsuccessful.", "source": "codesearchnet"}
{"code": "def poweroff_server(self, server=None, server_id=None):\n        \n        sid = server_id if server_id is not None else server.sid\n        if sid is None:\n            raise Exception('No Server Specified.')\n        json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))\n        json_obj = self.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme)\n        return True if json_obj['Success'] is 'True' else False", "docstring": "Poweroff a VM. If possible to pass the VM object or simply the ID\nof the VM that we want to turn on.\nArgs:\nserver: VM Object that represent the VM to power off,\nserver_id: Int or Str representing the ID of the VM to power off.\nReturns:\nreturn True if json_obj['Success'] is 'True' else False", "source": "juraj-google-style"}
{"code": "def from_json(cls, jsonmsg):\n        \n        import json\n        msg = json.loads(jsonmsg)\n        obj = cls(**msg)\n        obj.validate()\n        return obj", "docstring": "Create an object directly from a JSON string.\n\nApplies general validation after creating the\nobject to check whether all required fields are\npresent.\n\nArgs:\njsonmsg (str): An object encoded as a JSON string\n\nReturns:\nAn object of the generated type\n\nRaises:\nValidationError: if `jsonmsg` does not match the schema\n`cls` was generated from", "source": "juraj-google-style"}
{"code": "def add_jpeg_decoding(module_spec):\n  \n  input_height, input_width = hub.get_expected_image_size(module_spec)\n  input_depth = hub.get_num_image_channels(module_spec)\n  jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')\n  decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)\n  \n  decoded_image_as_float = tf.image.convert_image_dtype(decoded_image,\n                                                        tf.float32)\n  decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n  resize_shape = tf.stack([input_height, input_width])\n  resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)\n  resized_image = tf.image.resize_bilinear(decoded_image_4d,\n                                           resize_shape_as_int)\n  return jpeg_data, resized_image", "docstring": "Adds operations that perform JPEG decoding and resizing to the graph..\n\nArgs:\nmodule_spec: The hub.ModuleSpec for the image module being used.\n\nReturns:\nTensors for the node to feed JPEG data into, and the output of the\npreprocessing steps.", "source": "juraj-google-style"}
{"code": "def read_graph_execution_trace(self, graph_execution_trace_digest):\n    debug_event = self._reader.read_graph_execution_traces_event(graph_execution_trace_digest.locator)\n    return self._graph_execution_trace_from_debug_event_proto(debug_event, graph_execution_trace_digest.locator)", "docstring": "Read the detailed graph execution trace.\n\nArgs:\ngraph_execution_trace_digest: A `GraphExecutionTraceDigest` object.\n\nReturns:\nThe corresponding `GraphExecutionTrace` object.", "source": "github-repos"}
{"code": "def _ParseFileData(self, knowledge_base, file_object):\n    \n    plist_file = plist.PlistFile()\n\n    try:\n      plist_file.Read(file_object)\n\n    except IOError as exception:\n      raise errors.PreProcessFail(\n          'Unable to read: {0:s} with error: {1!s}'.format(\n              self.ARTIFACT_DEFINITION_NAME, exception))\n\n    if not plist_file.root_key:\n      raise errors.PreProcessFail((\n          'Unable to read: {0:s} with error: missing root key').format(\n              self.ARTIFACT_DEFINITION_NAME))\n\n    matches = []\n\n    self._FindKeys(plist_file.root_key, self._PLIST_KEYS, matches)\n    if not matches:\n      raise errors.PreProcessFail(\n          'Unable to read: {0:s} with error: no such keys: {1:s}.'.format(\n              self.ARTIFACT_DEFINITION_NAME, ', '.join(self._PLIST_KEYS)))\n\n    name = None\n    value = None\n    for name, value in matches:\n      if value:\n        break\n\n    if value is None:\n      raise errors.PreProcessFail((\n          'Unable to read: {0:s} with error: no values found for keys: '\n          '{1:s}.').format(\n              self.ARTIFACT_DEFINITION_NAME, ', '.join(self._PLIST_KEYS)))\n\n    self._ParsePlistKeyValue(knowledge_base, name, value)", "docstring": "Parses file content (data) for a preprocessing attribute.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nfile_object (dfvfs.FileIO): file-like object that contains the artifact\nvalue data.\n\nRaises:\nerrors.PreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def select_by_value(self, value):\n        \n        self._selected_key = None\n        self._selected_item = None\n        for k in self.children:\n            item = self.children[k]\n            item.attributes['selected'] = False\n            if value == item.get_value():\n                self._selected_key = k\n                self._selected_item = item\n                self._selected_item.attributes['selected'] = True", "docstring": "Selects an item by the text content of the child.\n\nArgs:\nvalue (str): Text content of the item that have to be selected.", "source": "juraj-google-style"}
{"code": "def format_page(self, page, link_resolver, output):\n        \n        debug('Formatting page %s' % page.link.ref, 'formatting')\n\n        if output:\n            actual_output = os.path.join(output,\n                                         'html')\n            if not os.path.exists(actual_output):\n                os.makedirs(actual_output)\n        else:\n            actual_output = None\n\n        page.format(self.formatter, link_resolver, actual_output)", "docstring": "Called by `project.Project.format_page`, to leave full control\nto extensions over the formatting of the pages they are\nresponsible of.\n\nArgs:\npage: tree.Page, the page to format.\nlink_resolver: links.LinkResolver, object responsible\nfor resolving links potentially mentioned in `page`\noutput: str, path to the output directory.", "source": "juraj-google-style"}
{"code": "def _lm_numdiff_jacobian(eval_func, nmr_params, nmr_observations):\n    \n    return SimpleCLFunction.from_string(r + str(nmr_params) +  + str(nmr_observations) + , dependencies=[eval_func, SimpleCLFunction.from_string( + str(nmr_observations) +  + eval_func.get_cl_function_name() +  + eval_func.get_cl_function_name() + ), SimpleCLFunction.from_string( + str(nmr_observations) +  + eval_func.get_cl_function_name() +  + eval_func.get_cl_function_name() + ), SimpleCLFunction.from_string( + str(nmr_observations) +  + eval_func.get_cl_function_name() +  + eval_func.get_cl_function_name() + )])", "docstring": "Get a numerical differentiated Jacobian function.\n\nThis computes the Jacobian of the observations (function vector) with respect to the parameters.\n\nArgs:\neval_func (mot.lib.cl_function.CLFunction): the evaluation function\nnmr_params (int): the number of parameters\nnmr_observations (int): the number of observations (the length of the function vector).\n\nReturns:\nmot.lib.cl_function.CLFunction: CL function for numerically estimating the Jacobian.", "source": "juraj-google-style"}
{"code": "def apply_product_config(config):\n    cot_product = config['cot_product']\n    for key in config:\n        if (isinstance(config[key], Mapping) and ('by-cot-product' in config[key])):\n            try:\n                config[key] = config[key]['by-cot-product'][cot_product]\n            except KeyError:\n                raise ConfigError('Product {} not specified for key {}'.format(cot_product, key))\n    return config", "docstring": "Apply config values that are keyed by `cot_product`.\n\nThis modifies the passed in configuration.\n\nArgs:\nconfig dict: the config to apply cot_product keying too\n\nReturns: dict", "source": "codesearchnet"}
{"code": "def _get_query_argument(args, cell, env):\n  \n  sql_arg = args.get('query', None)\n  if sql_arg is None:\n    \n    if not isinstance(cell, basestring):\n      raise Exception('Expected a --query argument or inline SQL')\n    return bigquery.Query(cell, env=env)\n\n  item = google.datalab.utils.commands.get_notebook_item(sql_arg)\n  if isinstance(item, bigquery.Query):\n    return item\n  else:\n    raise Exception('Expected a query object, got %s.' % type(item))", "docstring": "Get a query argument to a cell magic.\n\nThe query is specified with args['query']. We look that up and if it is a BQ query\nobject, just return it. If it is a string, build a query object out of it and return\nthat\n\nArgs:\nargs: the dictionary of magic arguments.\ncell: the cell contents which can be variable value overrides (if args has a 'query'\nvalue) or inline SQL otherwise.\nenv: a dictionary that is used for looking up variable values.\n\nReturns:\nA Query object.", "source": "juraj-google-style"}
{"code": "def crps(self, model_type, model_name, condition_model_name, condition_threshold, query=None):\n        \n\n        def gamma_cdf(x, a, loc, b):\n            if a == 0 or b == 0:\n                cdf = np.ones(x.shape)\n            else:\n                cdf = gamma.cdf(x, a, loc, b)\n            return cdf\n\n        crps_obj = DistributedCRPS(self.dist_thresholds)\n        if query is not None:\n            sub_forecasts = self.matched_forecasts[model_type][model_name].query(query)\n            sub_forecasts = sub_forecasts.reset_index(drop=True)\n            condition_forecasts = self.matched_forecasts[\"condition\"][condition_model_name].query(query)\n            condition_forecasts = condition_forecasts.reset_index(drop=True)\n        else:\n            sub_forecasts = self.matched_forecasts[model_type][model_name]\n            condition_forecasts = self.matched_forecasts[\"condition\"][condition_model_name]\n        if sub_forecasts.shape[0] > 0:\n            if model_type == \"dist\":\n                forecast_cdfs = np.zeros((sub_forecasts.shape[0], self.dist_thresholds.size))\n                for f in range(sub_forecasts.shape[0]):\n                    condition_prob = condition_forecasts.loc[f, self.forecast_bins[\"condition\"][0]]\n                    if condition_prob >= condition_threshold:\n                        f_params = [0, 0, 0]\n                    else:\n                        f_params = sub_forecasts[self.forecast_bins[model_type]].values[f]\n                    forecast_cdfs[f] = gamma_cdf(self.dist_thresholds, f_params[0], f_params[1], f_params[2])\n                obs_cdfs = np.array([gamma_cdf(self.dist_thresholds, *params)\n                                    for params in sub_forecasts[self.type_cols[model_type]].values])\n                crps_obj.update(forecast_cdfs, obs_cdfs)\n            else:\n                crps_obj.update(sub_forecasts[self.forecast_bins[model_type].astype(str)].values,\n                                sub_forecasts[self.type_cols[model_type]].values)\n\n        return crps_obj", "docstring": "Calculates the cumulative ranked probability score (CRPS) on the forecast data.\n\nArgs:\nmodel_type: model type being evaluated.\nmodel_name: machine learning model being evaluated.\ncondition_model_name: Name of the hail/no-hail model being evaluated\ncondition_threshold: Threshold for using hail size CDF\nquery: pandas query string to filter the forecasts based on the metadata\n\n\nReturns:\na DistributedCRPS object", "source": "juraj-google-style"}
{"code": "def declare(self, name, description=None, **kwargs):\n    \n    if not self._is_valid_key(name):\n      raise self.InvalidKeyError(\n          'Invalid key name, must begin with a lowercase letter', name)\n    if name in self._declarations:\n      raise self.KeyAlreadyDeclaredError(\n          'Configuration key already declared', name)\n    self._declarations[name] = self.Declaration(\n        name, description=description, **kwargs)", "docstring": "Declare a configuration key with the given name.\n\nArgs:\nname: Configuration key to declare, must not have been already declared.\ndescription: If provided, use this as the description for this key.\n**kwargs: Other kwargs to pass to the Declaration, only default_value\nis currently supported.", "source": "juraj-google-style"}
{"code": "def mtf_transformer_paper_lm(size):\n    n = (2 ** size)\n    hparams = mtf_transformer_base_lm()\n    hparams.batch_size = 256\n    hparams.d_model = 1024\n    hparams.d_ff = int((8192 * n))\n    hparams.d_kv = 256\n    hparams.num_heads = int((8 * n))\n    hparams.shared_embedding_and_softmax_weights = False\n    hparams.learning_rate_decay_steps = 13600\n    return hparams", "docstring": "Config for language-model experiments.\n\nTrain these on languagemodel_lm1b32k_packed for 136000 steps (10 epochs)\n\nThe size parameter is an integer that controls the number of heads and the\nsize of the size of the feedforward hidden layers.  Increasing size by 1\ndoubles each of these.\n\nResults:\nsize   params/10^9  log-ppl(per-token)\n-1     0.14         3.209\n0      0.22         3.119\n1      0.37         3.037\n2      0.67         2.969\n3      1.28         2.912\n4      2.48         2.874\n5      4.90         2.871\n\n(to get word-level log-ppl, multiply by 1.1078)\n\nArgs:\nsize: an integer\nReturns:\na hparams object", "source": "codesearchnet"}
{"code": "def getall(self):\n    users = self.users_re.findall(self.config, re.M)\n    resources = dict()\n    for user in users:\n        resources.update(self._parse_username(user))\n    return resources", "docstring": "Returns all local users configuration as a resource dict\n\nReturns:\ndict: A dict of usernames with a nested resource dict object", "source": "codesearchnet"}
{"code": "def get_parameters(self, grad_only=True):\n        \n        params = OrderedDict()\n\n        for v in self.get_modules():\n            if not isinstance(v, tuple):\n                continue\n            prefix, module = v\n            for k, v in module.__dict__.items():\n                if not isinstance(v, nn.Variable):\n                    continue\n                pname = k\n                name = \"{}/{}\".format(prefix, pname)\n                if grad_only and v.need_grad == False:\n                    continue\n                params[name] = v\n        return params", "docstring": "Get parameters.\nArgs:\ngrad_only (bool, optional): Return parameters with `need_grad` option as `True`.\nIf you set this option as `False`, All parameters are returned. Default is `True`.\nReturns:\ndict: The dictionary of parameter name (`str`) to Variable (:obj:`~nnabla.Variable`).", "source": "juraj-google-style"}
{"code": "def to_variant(dataset: DatasetV2):\n    return dataset._variant_tensor", "docstring": "Returns a variant representing the given dataset.\n\nArgs:\ndataset: A `tf.data.Dataset`.\n\nReturns:\nA scalar `tf.variant` tensor representing the given dataset.", "source": "github-repos"}
{"code": "def new(self, val):\n        \n        if len(self.things) >= self.max_things:\n            raise LimitationError('too many things')\n        self.things.add(val)\n        return val", "docstring": "Add a new value to me.\n\nArgs:\nval (LispVal): The value to be added.\n\nReturns:\nLispVal: The added value.\n\nRaises:\n~parthial.errs.LimitationError: If I already contain the maximum\nnumber of elements.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.BatchWriteSpans = channel.unary_unary(\n            \"/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans\",\n            request_serializer=google_dot_devtools_dot_cloudtrace__v2_dot_proto_dot_tracing__pb2.BatchWriteSpansRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n        self.CreateSpan = channel.unary_unary(\n            \"/google.devtools.cloudtrace.v2.TraceService/CreateSpan\",\n            request_serializer=google_dot_devtools_dot_cloudtrace__v2_dot_proto_dot_trace__pb2.Span.SerializeToString,\n            response_deserializer=google_dot_devtools_dot_cloudtrace__v2_dot_proto_dot_trace__pb2.Span.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def _match_elements(dom, matches):\n    \n    out = {}\n    for key, content in matches.items():\n        pattern = content[\"data\"].strip()\n        if \"\\n\" in pattern:\n            pattern = pattern.split()\n            transformer = lambda x: x.strip().split()\n        else:\n            transformer = lambda x: x.strip()\n\n        matching_elements = _locate_element(\n            dom,\n            pattern,\n            transformer=transformer\n        )\n\n        not_found_msg = content.get(\"notfoundmsg\", \"\").replace(\"$name\", key)\n        if not not_found_msg.strip():\n            not_found_msg = \"Can't locate variable '%s' with content '%s'!\" % (\n                key,\n                pattern,\n            )\n        content[\"notfoundmsg\"] = not_found_msg\n\n        \n        tagname = content.get(\"tagname\", \"\").strip().lower()\n        if tagname:\n            matching_elements = filter(\n                lambda x: x.getTagName().strip().lower() == tagname,\n                matching_elements\n            )\n\n        if not matching_elements:\n            raise UserWarning(not_found_msg)\n\n        if len(matching_elements) > 1:\n            raise UserWarning(\n                \"Ambigious content '%s'!\" % content\n                + \"Content was found in multiple elements!\"\n            )\n\n        out[key] = matching_elements[0]\n\n    return out", "docstring": "Find location of elements matching patterns specified in `matches`.\n\nArgs:\ndom (obj): HTMLElement DOM tree.\nmatches (dict): Structure: ``{\"var\": {\"data\": \"match\", ..}, ..}``.\n\nReturns:\ndict: Structure: ``{\"var\": {\"data\": HTMLElement_obj, ..}, ..}``", "source": "juraj-google-style"}
{"code": "def _GetRecord(self, offset, record_size):\n    record_header = '<4sLQQL'\n    get4 = (lambda x: struct.unpack('<L', self.input_dat[x:(x + 4)])[0])\n    url_offset = struct.unpack('B', self.input_dat[(offset + 52):(offset + 53)])[0]\n    if (url_offset in [255, 254]):\n        return None\n    data_offset = get4((offset + 68))\n    data_size = get4((offset + 72))\n    start_pos = (offset + data_offset)\n    data = struct.unpack('{0}s'.format(data_size), self.input_dat[start_pos:(start_pos + data_size)])[0]\n    fmt = record_header\n    unknown_size = (url_offset - struct.calcsize(fmt))\n    fmt += '{0}s'.format(unknown_size)\n    fmt += '{0}s'.format((record_size - struct.calcsize(fmt)))\n    dat = struct.unpack(fmt, self.input_dat[offset:(offset + record_size)])\n    (header, blocks, mtime, ctime, ftime, _, url) = dat\n    url = url.split(b'\\x00')[0].decode('utf-8')\n    if mtime:\n        mtime = ((mtime \n    if ctime:\n        ctime = ((ctime \n    return {'header': header, 'blocks': blocks, 'urloffset': url_offset, 'data_offset': data_offset, 'data_size': data_size, 'data': data, 'mtime': mtime, 'ctime': ctime, 'ftime': ftime, 'url': url}", "docstring": "Retrieve a single record from the file.\n\nArgs:\noffset: offset from start of input_dat where header starts\nrecord_size: length of the header according to file (untrusted)\n\nReturns:\nA dict containing a single browser history record.", "source": "codesearchnet"}
{"code": "def start(self, auto_register=True):\n        \n        return self.container.start_agent(agent=self, auto_register=auto_register)", "docstring": "Tells the container to start this agent.\nIt returns a coroutine or a future depending on whether it is called from a coroutine or a synchronous method.\n\nArgs:\nauto_register (bool): register the agent in the server (Default value = True)", "source": "juraj-google-style"}
{"code": "def _on_response_message(self, sequence, topic, message):\n    try:\n        conn_key = self._find_connection(topic)\n        context = self.conns.get_context(conn_key)\n    except ArgumentError:\n        self._logger.warn('Dropping message that does not correspond with a known connection, message=%s', message)\n        return\n    if (('client' in message) and (message['client'] != self.name)):\n        self._logger.debug('Dropping message that is for another client %s, we are %s', message['client'], self.name)\n    if messages.DisconnectionResponse.matches(message):\n        self.conns.finish_disconnection(conn_key, message['success'], message.get('failure_reason', None))\n    elif messages.OpenInterfaceResponse.matches(message):\n        self.conns.finish_operation(conn_key, message['success'], message.get('failure_reason', None))\n    elif messages.RPCResponse.matches(message):\n        rpc_message = messages.RPCResponse.verify(message)\n        self.conns.finish_operation(conn_key, rpc_message['success'], rpc_message.get('failure_reason', None), rpc_message.get('status', None), rpc_message.get('payload', None))\n    elif messages.ProgressNotification.matches(message):\n        progress_callback = context.get('progress_callback', None)\n        if (progress_callback is not None):\n            progress_callback(message['done_count'], message['total_count'])\n    elif messages.ScriptResponse.matches(message):\n        if ('progress_callback' in context):\n            del context['progress_callback']\n        self.conns.finish_operation(conn_key, message['success'], message.get('failure_reason', None))\n    elif messages.DisconnectionNotification.matches(message):\n        try:\n            conn_key = self._find_connection(topic)\n            conn_id = self.conns.get_connection_id(conn_key)\n        except ArgumentError:\n            self._logger.warn('Dropping disconnect notification that does not correspond with a known connection, topic=%s', topic)\n            return\n        self.conns.unexpected_disconnect(conn_key)\n        self._trigger_callback('on_disconnect', self.id, conn_id)\n    else:\n        self._logger.warn('Invalid response message received, message=%s', message)", "docstring": "Process a response message received\n\nArgs:\nsequence (int): The sequence number of the packet received\ntopic (string): The topic this message was received on\nmessage (dict): The message itself", "source": "codesearchnet"}
{"code": "def paint(self):\n    snippet = {'fill-opacity': VectorStyle.get_style_value(self.opacity), 'fill-color': VectorStyle.get_style_value(self.color), 'fill-outline-color': VectorStyle.get_style_value(self.outline_color)}\n    if self.translate:\n        snippet['fill-translate'] = self.translate\n    return snippet", "docstring": "Renders a javascript snippet suitable for use as a mapbox-gl fill paint entry\n\nReturns:\nA dict that can be converted to a mapbox-gl javascript paint snippet", "source": "codesearchnet"}
{"code": "def remove_context(self, name):\n        \n        self._context(name)\n        del self.contexts[name]\n        self._flush_tools()", "docstring": "Remove a context from the suite.\n\nArgs:\nname (str): Name of the context to remove.", "source": "juraj-google-style"}
{"code": "def map_arg(**maps):\n    \n    def deco(func):\n        @functools.wraps(func)\n        def wrapper(*args, **kwargs):\n            if six.PY2:\n                argmap = inspect.getcallargs(func, *args, **kwargs)\n            else:\n                \n                sig = inspect.signature(func)\n                argmap = sig.bind_partial(*args, **kwargs).arguments\n            for k, map_func in six.iteritems(maps):\n                if k in argmap:\n                    argmap[k] = map_func(argmap[k])\n            return func(**argmap)\n        return wrapper\n    return deco", "docstring": "Apply a mapping on certain argument before calling the original function.\n\nArgs:\nmaps (dict): {argument_name: map_func}", "source": "juraj-google-style"}
{"code": "def _test_end(self, result, e):\n    if self.begin_time is not None:\n        self.end_time = utils.get_current_epoch_time()\n    self.result = result\n    if e:\n        self.termination_signal = ExceptionRecord(e)", "docstring": "Marks the end of the test logic.\n\nArgs:\nresult: One of the TEST_RESULT enums in TestResultEnums.\ne: A test termination signal (usually an exception object). It can\nbe any exception instance or of any subclass of\nmobly.signals.TestSignal.", "source": "github-repos"}
{"code": "async def attach_url(self, url: str, description: str = None) -> Attachment:\n        \n        return await self._attach(url=url, description=description)", "docstring": "add an url as an attachment\n\n|methcoro|\n\nArgs:\nurl: url you want to add\ndescription: *optional* description for your attachment\n\nReturns:\nAttachment:\n\nRaises:\nValueError: url must not be None\nAPIException", "source": "juraj-google-style"}
{"code": "def CloseExpression(clean_lines, linenum, pos):\n    line = clean_lines.elided[linenum]\n    if ((line[pos] not in '({[<') or Match('<[<=]', line[pos:])):\n        return (line, clean_lines.NumLines(), (- 1))\n    (end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])\n    if (end_pos > (- 1)):\n        return (line, linenum, end_pos)\n    while (stack and (linenum < (clean_lines.NumLines() - 1))):\n        linenum += 1\n        line = clean_lines.elided[linenum]\n        (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)\n        if (end_pos > (- 1)):\n            return (line, linenum, end_pos)\n    return (line, clean_lines.NumLines(), (- 1))", "docstring": "If input points to ( or { or [ or <, finds the position that closes it.\n\nIf lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the\nlinenum/pos that correspond to the closing of the expression.\n\nTODO(unknown): cpplint spends a fair bit of time matching parentheses.\nIdeally we would want to index all opening and closing parentheses once\nand have CloseExpression be just a simple lookup, but due to preprocessor\ntricks, this is not so easy.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\npos: A position on the line.\n\nReturns:\nA tuple (line, linenum, pos) pointer *past* the closing brace, or\n(line, len(lines), -1) if we never find a close.  Note we ignore\nstrings and comments when matching; and the line we return is the\n'cleansed' line at linenum.", "source": "codesearchnet"}
{"code": "def __init__(self, dataset=None, worker=None, devices=None, components=None, element_spec=None, options=None, canonicalize_devices=None):\n    if worker is None or devices is None:\n        raise ValueError('Both `worker` and `devices` should be provided')\n    error_message = 'Either `dataset` or both `components` and `element_spec` need to be provided.'\n    self._options = options\n    self._canonicalize_devices = canonicalize_devices\n    if dataset is None:\n        if components is None or element_spec is None:\n            raise ValueError(error_message)\n        self._element_spec = element_spec\n        self._worker = worker\n        self._devices = devices\n        self._iterator = components[0]\n    else:\n        if components is not None or element_spec is not None:\n            raise ValueError(error_message)\n        super(_SingleWorkerOwnedDatasetIterator, self).__init__(dataset, worker, devices, self._options)", "docstring": "Create iterator for the `dataset` to fetch data to worker's `devices` .\n\n`OwnedMultiDeviceIterator` is used to prefetch input to the devices on the\ngiven worker. The lifetime of this iterator is tied to the encompassing\npython object. Once we go out of scope of the python object or return from\na tf.function the underlying iterator resource is deleted.\n\nArgs:\ndataset: A `tf.data.Dataset` instance.\nworker: Worker on which ops should be created.\ndevices: Distribute data from `dataset` to these devices.\ncomponents: Tensor components to construct the\n_SingleWorkerOwnedDatasetIterator from.\nelement_spec: A nested structure of `TypeSpec` objects that represents the\ntype specification of elements of the iterator.\noptions: `tf.distribute.InputOptions` used to control options on how this\ndataset is distributed.\ncanonicalize_devices: Whether to canonicalize devices for workers fully or\npartially. If False, it will partially canonicalize devices by removing\njob and task.", "source": "github-repos"}
{"code": "def _guess_fmt_from_bytes(inp):\n    \n    stripped = inp.strip()\n    fmt = None\n    ini_section_header_re = re.compile(b'^\\[([\\w-]+)\\]')\n\n    if len(stripped) == 0:\n        \n        fmt = 'yaml'\n    else:\n        if stripped.startswith(b'<'):\n            fmt = 'xml'\n        else:\n            for l in stripped.splitlines():\n                line = l.strip()\n                \n                \n                if not line.startswith(b'\n                    break\n            \n            if ini_section_header_re.match(line):\n                fmt = 'ini'\n            else:\n                \n                \n                fmt = 'yaml'\n\n    return fmt", "docstring": "Try to guess format of given bytestring.\n\nArgs:\ninp: byte string to guess format of\nReturns:\nguessed format", "source": "juraj-google-style"}
{"code": "def inspect_config(self, id):\n        \n        url = self._url('/configs/{0}', id)\n        return self._result(self._get(url), True)", "docstring": "Retrieve config metadata\n\nArgs:\nid (string): Full ID of the config to inspect\n\nReturns (dict): A dictionary of metadata\n\nRaises:\n:py:class:`docker.errors.NotFound`\nif no config with that ID exists", "source": "juraj-google-style"}
{"code": "def formula_html(self, reversed_=False):\n        \n        if self.H_count == 1:\n            text = \"H\"\n        elif self.H_count > 1:\n            text = \"H<sub>{}</sub>\".format(self.H_count)\n        else:\n            text = \"\"\n        seq = [self.symbol, text, self.charge_sign_html()]\n        if reversed_:\n            seq = reversed(seq)\n        return \"\".join(seq)", "docstring": "Chemical formula HTML\n\nArgs:\nreversed (bool): reversed text for leftmost atom groups", "source": "juraj-google-style"}
{"code": "def load_with_vocab(fin, vocab, dtype=np.float32):\n    arr = None\n    for line in fin:\n        try:\n            (token, v) = _parse_line(line, dtype)\n        except (ValueError, IndexError):\n            raise ParseError((b'Parsing error in line: ' + line))\n        if (token in vocab):\n            if (arr is None):\n                arr = np.empty((len(vocab), len(v)), dtype=dtype)\n                arr.fill(np.NaN)\n            elif (arr.shape[1] != len(v)):\n                raise ParseError((b'Vector size did not match in line: ' + line))\n            arr[(vocab[token], :)] = np.array(v, dtype=dtype).reshape(1, (- 1))\n    return arr", "docstring": "Load word embedding file with predefined vocabulary\n\nArgs:\nfin (File): File object to read. File should be open for reading ascii.\nvocab (dict): Mapping from words (``bytes``) to vector indices\n(``int``).\ndtype (numpy.dtype): Element data type to use for the array.\n\nReturns:\nnumpy.ndarray: Word embedding representation vectors", "source": "codesearchnet"}
{"code": "def add_path(self, path, path_filter=None):\n        \n        for root, _, files in os.walk(path):\n            for filename in files:\n                full_path_and_filename = os.path.join(root, filename)\n                if path_filter is None or path_filter(full_path_and_filename):\n                    relative_path_and_filename = full_path_and_filename.replace(path + '/', '')\n                    with open(full_path_and_filename, 'rb') as handle:\n                        self.files[relative_path_and_filename] = b64encode(handle.read()).decode('utf-8')", "docstring": "Adding all files from given path to the object.\n\nArgs:\npath (str): valid, existing directory", "source": "juraj-google-style"}
{"code": "def assertProtoEqual(self, a: message.Message, b: message.Message, check_initialized: bool=True, normalize_numbers: bool=False, msg: Optional[str]=None) -> None:\n    pool = descriptor_pool.Default()\n    if isinstance(a, str):\n        a = text_format.Merge(a, b.__class__(), descriptor_pool=pool)\n    for pb in (a, b):\n        if check_initialized:\n            errors = pb.FindInitializationErrors()\n            if errors:\n                cast(absltest.TestCase, self).fail(f'Initialization errors: {errors}\\n{pb}')\n        if normalize_numbers:\n            normalize_number_fields(pb)\n    cast(absltest.TestCase, self).assertMultiLineEqual(text_format.MessageToString(a, descriptor_pool=pool), text_format.MessageToString(b, descriptor_pool=pool), msg=msg)", "docstring": "Fails with a useful error if a and b aren't equal.\n\nComparison of repeated fields matches the semantics of\nunittest.TestCase.assertEqual(), ie order and extra duplicates fields matter.\n\nArgs:\nself: absltest.TestCase\na: proto2 PB instance, or text string representing one.\nb: proto2 PB instance -- message.Message or subclass thereof.\ncheck_initialized: boolean, whether to fail if either a or b isn't\ninitialized.\nnormalize_numbers: boolean, whether to normalize types and precision of\nnumbers before comparison.\nmsg: if specified, is used as the error message on failure.", "source": "github-repos"}
{"code": "def _build_network_on_replica(model, mode, inputs=None, targets=None):\n    from tensorflow.python.keras import models\n    from tensorflow.python.keras.engine import sequential\n    if isinstance(model, sequential.Sequential):\n        updated_model = models._clone_sequential_model(model, input_tensors=inputs, layer_fn=models.share_weights)\n    else:\n        updated_model = models._clone_functional_model(model, input_tensors=inputs, layer_fn=models.share_weights)\n        updated_model._callable_losses = model._callable_losses\n\n    def _upcast_low_precision_outputs(output):\n        if output.dtype == dtypes.bfloat16:\n            return math_ops.cast(output, dtypes.float32)\n        else:\n            return output\n    updated_model.outputs = [_upcast_low_precision_outputs(o) for o in updated_model.outputs]\n    if isinstance(targets, tuple):\n        targets = nest.flatten(targets)\n    if mode == ModeKeys.PREDICT and inputs is not None:\n        _custom_compile_for_predict(updated_model)\n    else:\n        updated_model.compile(model.optimizer, model.loss, metrics=metrics_module.clone_metrics(model._compile_metrics), loss_weights=model.loss_weights, sample_weight_mode=model.sample_weight_mode, weighted_metrics=metrics_module.clone_metrics(model._compile_weighted_metrics), target_tensors=targets)\n    return updated_model", "docstring": "Build an updated model on replicas.\n\nWe create a new Keras model while sharing the variables from the old graph.\nBuilding a new sub-graph is required since the original keras model creates\nplaceholders for the input and the output that are not accessible till we\ncall iterator.get_next() inside the step_fn for `fit`/`evaluate`/`predict`.\n\nThe sharing of weights and layers between the old and the new model guarantee\nthat we're using Strategy variables and any updates on either model are\nreflected correctly in callbacks and loop iterations.\n\nWe need to make sure we share the optimizers between the old and the new model\nas well so that optimizer state is not lost if the user is running fit\nmultiple times.\n\nArgs:\nmodel: Model to be replicated across Replicas\nmode: Which of fit/eval/predict is building the distributed network\ninputs: Input variables to be passed to the model\ntargets: Target tensor to be passed to model.compile\n\nReturns:\nA new model with shared layers with the old model.", "source": "github-repos"}
{"code": "def _sobol_generating_matrices(dim: types.IntTensor, log_num_results: types.IntTensor, num_digits: types.IntTensor, dtype=None) -> types.IntTensor:\n    global _INITIAL_DIRECTION_NUMBERS\n    global _PRIMITIVE_POLYNOMIAL_COEFFICIENTS\n    dtype = dtype or tf.int32\n    zero = tf.constant(0, dtype=dtype)\n    indices = tf.cast(tf.range(0, log_num_results), dtype)\n    dimensions = tf.range(0, dim)\n    directions = tf.convert_to_tensor(_INITIAL_DIRECTION_NUMBERS, dtype=dtype, name='direction_numbers')\n    padding = log_num_results - utils.get_shape(directions)[0]\n    padding = tf.math.maximum(zero, padding)\n    directions = tf.pad(directions, [[zero, padding], [zero, zero]])\n    directions = directions[:log_num_results]\n    directions = tf.gather(directions, dimensions, axis=1)\n    directions = tf.cast(tf.transpose(directions), dtype)\n    polynomial = tf.convert_to_tensor(_PRIMITIVE_POLYNOMIAL_COEFFICIENTS, dtype=dtype, name='polynomial_coefficients')\n    polynomial = tf.cast(tf.gather(polynomial, tf.expand_dims(dimensions, axis=1)), dtype)\n    degree = tf.cast(tf.math.floor(utils.log2(tf.cast(polynomial, dtype=tf.float32))), dtype=dtype)\n    initial_matrices = tf.bitwise.left_shift(directions, tf.cast(tf.expand_dims(num_digits - 1 - indices, axis=0), dtype))\n\n    def loop_predicate_fn(matrix_values, column):\n        del matrix_values\n        return column < log_num_results - 1\n\n    def loop_body_fn(matrices, column):\n        column_values = tf.gather(matrices, [column], axis=1)\n        should_be_updated = tf.logical_and(tf.less_equal(tf.math.maximum(degree, column + 1), indices), tf.less_equal(indices, column + degree))\n        updated_matrices = tf.bitwise.bitwise_xor(tf.where(tf.equal(indices, column + degree), tf.bitwise.right_shift(column_values, degree), matrices), utils.filter_tensor(column_values, polynomial, column + degree - indices))\n        returned_matrices = tf.where(should_be_updated, updated_matrices, matrices)\n        return (returned_matrices, column + 1)\n    matrices, _ = tf.while_loop(loop_predicate_fn, loop_body_fn, loop_vars=(initial_matrices, tf.constant(0, dtype)), maximum_iterations=tf.cast(log_num_results, tf.int32) - 1)\n    return matrices", "docstring": "Returns all Sobol generating matrices.\n\nArgs:\ndim: Positive scalar `Tensor` with rank 0 representing the event size of\npoints which can be sampled from the resulting generating matrix.\nlog_num_results: Positive scalar `Tensor` with rank 0 representing the\nbase-2 logarithm of the maximum number of points which can be sampled from\nthe resulting generating matrix.\nnum_digits: Positive scalar `Tensor` with rank 0 representing the base-2\nprecision of points which can be sampled from the resulting generating\nmatrix.\ndtype: Optional `dtype`. The `dtype` of the output `Tensor` (either a signed\nor unsigned integer `dtype`).\nDefault value: `None` which maps to `int32`.\n\nReturns:\nA scalar `Tensor` with shape `(dim, ceil(log2(num_results)))`.", "source": "github-repos"}
{"code": "def FindFirst(cls, setting_matcher, device_matcher=None, **kwargs):\n    try:\n        return next(cls.FindDevices(setting_matcher, device_matcher=device_matcher, **kwargs))\n    except StopIteration:\n        raise usb_exceptions.DeviceNotFoundError('No device available, or it is in the wrong configuration.')", "docstring": "Find and return the first matching device.\n\nArgs:\nsetting_matcher: See cls.FindDevices.\ndevice_matcher: See cls.FindDevices.\n**kwargs: See cls.FindDevices.\n\nReturns:\nAn instance of UsbHandle.\n\nRaises:\nDeviceNotFoundError: Raised if the device is not available.", "source": "codesearchnet"}
{"code": "def no_llvm(*args, uid=0, gid=0, **kwargs):\n    \n    uchroot_cmd = no_args()\n    uchroot_cmd = uchroot_cmd[__default_opts__(uid, gid)]\n    return uchroot_cmd[args]", "docstring": "Return a customizable uchroot command.\n\nThe command will be executed inside a uchroot environment.\n\nArgs:\nargs: List of additional arguments for uchroot (typical: mounts)\nReturn:\nchroot_cmd", "source": "juraj-google-style"}
{"code": "def process_dimensions(kdims, vdims):\n    \n    dimensions = {}\n    for group, dims in [('kdims', kdims), ('vdims', vdims)]:\n        if dims is None:\n            continue\n        elif isinstance(dims, (tuple, basestring, Dimension, dict)):\n            dims = [dims]\n        elif not isinstance(dims, list):\n            raise ValueError(\"%s argument expects a Dimension or list of dimensions, \"\n                             \"specified as tuples, strings, dictionaries or Dimension \"\n                             \"instances, not a %s type. Ensure you passed the data as the \"\n                             \"first argument.\" % (group, type(dims).__name__))\n        for dim in dims:\n            if not isinstance(dim, (tuple, basestring, Dimension, dict)):\n                raise ValueError('Dimensions must be defined as a tuple, '\n                                 'string, dictionary or Dimension instance, '\n                                 'found a %s type.' % type(dim).__name__)\n        dimensions[group] = [asdim(d) for d in dims]\n    return dimensions", "docstring": "Converts kdims and vdims to Dimension objects.\n\nArgs:\nkdims: List or single key dimension(s) specified as strings,\ntuples dicts or Dimension objects.\nvdims: List or single value dimension(s) specified as strings,\ntuples dicts or Dimension objects.\n\nReturns:\nDictionary containing kdims and vdims converted to Dimension\nobjects:\n\n{'kdims': [Dimension('x')], 'vdims': [Dimension('y')]", "source": "juraj-google-style"}
{"code": "def GetArtifactParserDependencies(rdf_artifact):\n  \n  deps = set()\n  processors = parser.Parser.GetClassesByArtifact(rdf_artifact.name)\n  for p in processors:\n    deps.update(p.knowledgebase_dependencies)\n  return deps", "docstring": "Return the set of knowledgebase path dependencies required by the parser.\n\nArgs:\nrdf_artifact: RDF artifact object.\n\nReturns:\nA set of strings for the required kb objects e.g.\n[\"users.appdata\", \"systemroot\"]", "source": "juraj-google-style"}
{"code": "def recursively_convert_to_json_serializable(test_obj):\n    try:\n        if ((not isinstance(test_obj, list)) and np.isnan(test_obj)):\n            return None\n    except TypeError:\n        pass\n    except ValueError:\n        pass\n    if isinstance(test_obj, (string_types, integer_types, float, bool)):\n        return test_obj\n    elif isinstance(test_obj, dict):\n        new_dict = {}\n        for key in test_obj:\n            new_dict[str(key)] = recursively_convert_to_json_serializable(test_obj[key])\n        return new_dict\n    elif isinstance(test_obj, (list, tuple, set)):\n        new_list = []\n        for val in test_obj:\n            new_list.append(recursively_convert_to_json_serializable(val))\n        return new_list\n    elif isinstance(test_obj, (np.ndarray, pd.Index)):\n        return [recursively_convert_to_json_serializable(x) for x in test_obj.tolist()]\n    elif (test_obj is None):\n        return test_obj\n    elif isinstance(test_obj, (datetime.datetime, datetime.date)):\n        return str(test_obj)\n    elif np.issubdtype(type(test_obj), np.bool_):\n        return bool(test_obj)\n    elif (np.issubdtype(type(test_obj), np.integer) or np.issubdtype(type(test_obj), np.uint)):\n        return int(test_obj)\n    elif np.issubdtype(type(test_obj), np.floating):\n        return float(round(test_obj, sys.float_info.dig))\n    elif isinstance(test_obj, pd.DataFrame):\n        return recursively_convert_to_json_serializable(test_obj.to_dict(orient='records'))\n    elif isinstance(test_obj, decimal.Decimal):\n        return float(test_obj)\n    else:\n        raise TypeError(('%s is of type %s which cannot be serialized.' % (str(test_obj), type(test_obj).__name__)))", "docstring": "Helper function to convert a dict object to one that is serializable\n\nArgs:\ntest_obj: an object to attempt to convert a corresponding json-serializable object\n\nReturns:\n(dict) A converted test_object\n\nWarning:\ntest_obj may also be converted in place.", "source": "codesearchnet"}
{"code": "def create_endpoints_csv_file(self, timeout=(- 1)):\n    uri = '{}/endpoints/'.format(self.data['uri'])\n    return self._helper.do_post(uri, {}, timeout, None)", "docstring": "Creates an endpoints CSV file for a SAN.\n\nArgs:\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation in\nOneView, just stops waiting for its completion.\n\nReturns:\ndict: Endpoint CSV File Response.", "source": "codesearchnet"}
{"code": "def collections(self, page_size=None):\n    iterator = self._client._firestore_api.list_collection_ids(self._document_path, page_size=page_size, metadata=self._client._rpc_metadata)\n    iterator.document = self\n    iterator.item_to_value = _item_to_collection_ref\n    return iterator", "docstring": "List subcollections of the current document.\n\nArgs:\npage_size (Optional[int]]): The maximum number of collections\nin each page of results from this request. Non-positive values\nare ignored. Defaults to a sensible value set by the API.\n\nReturns:\nSequence[~.firestore_v1beta1.collection.CollectionReference]:\niterator of subcollections of the current document. If the\ndocument does not exist at the time of `snapshot`, the\niterator will be empty", "source": "codesearchnet"}
{"code": "def set_package_releases(self, project_name, versions):\n        \n        self.packages[project_name] = sorted(versions, reverse=True)", "docstring": "Storage package information in ``self.packages``\n\nArgs:\nproject_name (str): This will be used as a the key in the\ndictionary.\nversions (list): List of ``str`` representing the available\nversions of a project.", "source": "juraj-google-style"}
{"code": "def Parse(self, rdf_data):\n    \n    if self._filter:\n      return list(self._filter.Parse(rdf_data, self.expression))\n    return rdf_data", "docstring": "Process rdf data through the filter.\n\nFilters sift data according to filter rules. Data that passes the filter\nrule is kept, other data is dropped.\n\nIf no filter method is provided, the data is returned as a list.\nOtherwise, a items that meet filter conditions are returned in a list.\n\nArgs:\nrdf_data: Host data that has already been processed by a Parser into RDF.\n\nReturns:\nA list containing data items that matched the filter rules.", "source": "juraj-google-style"}
{"code": "def start(self, request: Request) -> Response:\n        \n        if self._session_state != SessionState.ready:\n            raise RuntimeError('Session not ready')\n\n        response = Response()\n\n        yield from self._prepare_fetch(request, response)\n\n        response.file_transfer_size = yield from self._fetch_size(request)\n\n        if request.restart_value:\n            try:\n                yield from self._commander.restart(request.restart_value)\n                response.restart_value = request.restart_value\n            except FTPServerError:\n                _logger.debug('Could not restart file.', exc_info=1)\n\n        yield from self._open_data_stream()\n\n        command = Command('RETR', request.file_path)\n\n        yield from self._begin_stream(command)\n\n        self._session_state = SessionState.file_request_sent\n\n        return response", "docstring": "Start a file or directory listing download.\n\nArgs:\nrequest: Request.\n\nReturns:\nA Response populated with the initial data connection reply.\n\nOnce the response is received, call :meth:`download`.\n\nCoroutine.", "source": "juraj-google-style"}
{"code": "def get_minimum_indentation(text):\n    r\n    lines = text.split('\\n')\n    indentations = [get_indentation(line_)\n                    for line_ in lines  if len(line_.strip()) > 0]\n    if len(indentations) == 0:\n        return 0\n    return min(indentations)", "docstring": "r\"\"\"\nreturns the number of preceding spaces\n\nArgs:\ntext (str): unicode text\n\nReturns:\nint: indentation\n\nCommandLine:\npython -m utool.util_str --exec-get_minimum_indentation --show\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_str import *  # NOQA\n>>> import utool as ut\n>>> text = '    foo\\n   bar'\n>>> result = get_minimum_indentation(text)\n>>> print(result)\n3", "source": "juraj-google-style"}
{"code": "def deliver_tx(self, raw_transaction):\n        \n\n        self.abort_if_abci_chain_is_not_synced()\n\n        logger.debug('deliver_tx: %s', raw_transaction)\n        transaction = self.bigchaindb.is_valid_transaction(\n            decode_transaction(raw_transaction), self.block_transactions)\n\n        if not transaction:\n            logger.debug('deliver_tx: INVALID')\n            return ResponseDeliverTx(code=CodeTypeError)\n        else:\n            logger.debug('storing tx')\n            self.block_txn_ids.append(transaction.id)\n            self.block_transactions.append(transaction)\n            return ResponseDeliverTx(code=CodeTypeOk)", "docstring": "Validate the transaction before mutating the state.\n\nArgs:\nraw_tx: a raw string (in bytes) transaction.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n               batch_size=1000,\n               threadpool_prefix=\"batch_processor\",\n               threadpool_size=10):\n    \n    super(BatchConverter, self).__init__()\n    self.batch_size = batch_size\n    self.threadpool_prefix = threadpool_prefix\n    self.threadpool_size = threadpool_size", "docstring": "BatchProcessor constructor.\n\nArgs:\nbatch_size: All the values will be processed in batches of this size.\nthreadpool_prefix: Prefix that will be used in thread pool's threads\nnames.\nthreadpool_size: Size of a thread pool that will be used. If\nthreadpool_size is 0, no threads will be used and all conversions will\nbe done in the current thread.", "source": "juraj-google-style"}
{"code": "def get_type_parameters(self, annot, seen=None):\n    seen = seen or set()\n    if annot in seen or not annot.formal:\n        return []\n    if isinstance(annot, mixin.NestedAnnotation):\n        seen = seen | {annot}\n    if isinstance(annot, abstract.TypeParameter):\n        return [annot]\n    elif isinstance(annot, abstract.TupleClass):\n        annots = []\n        for idx in range(annot.tuple_length):\n            annots.extend(self.get_type_parameters(annot.formal_type_parameters[idx], seen))\n        return annots\n    elif isinstance(annot, mixin.NestedAnnotation):\n        return sum((self.get_type_parameters(t, seen) for _, t in annot.get_inner_types()), [])\n    return []", "docstring": "Returns all the TypeParameter instances that appear in the annotation.\n\nNote that if you just need to know whether or not the annotation contains\ntype parameters, you can check its `.formal` attribute.\n\nArgs:\nannot: An annotation.\nseen: A seen set.", "source": "github-repos"}
{"code": "def get_account(self, address, id=None, endpoint=None):\n        \n        return self._call_endpoint(GET_ACCOUNT_STATE, params=[address], id=id, endpoint=endpoint)", "docstring": "Look up an account on the blockchain.  Sample output:\n\nArgs:\naddress: (str) address to lookup ( in format 'AXjaFSP23Jkbe6Pk9pPGT6NBDs1HVdqaXK')\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\n\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"}
{"code": "def parse_method_configs(interface_config):\n    retry_codes_map = {name: retry_codes for (name, retry_codes) in six.iteritems(interface_config.get('retry_codes', {}))}\n    retry_params_map = {name: retry_params for (name, retry_params) in six.iteritems(interface_config.get('retry_params', {}))}\n    method_configs = {}\n    for (method_name, method_params) in six.iteritems(interface_config.get('methods', {})):\n        retry_params_name = method_params.get('retry_params_name')\n        if (retry_params_name is not None):\n            retry_params = retry_params_map[retry_params_name]\n            retry_ = _retry_from_retry_config(retry_params, retry_codes_map[method_params['retry_codes_name']])\n            timeout_ = _timeout_from_retry_config(retry_params)\n        else:\n            retry_ = None\n            timeout_ = timeout.ConstantTimeout((method_params['timeout_millis'] / _MILLIS_PER_SECOND))\n        method_configs[method_name] = MethodConfig(retry=retry_, timeout=timeout_)\n    return method_configs", "docstring": "Creates default retry and timeout objects for each method in a gapic\ninterface config.\n\nArgs:\ninterface_config (Mapping): The interface config section of the full\ngapic library config. For example, If the full configuration has\nan interface named ``google.example.v1.ExampleService`` you would\npass in just that interface's configuration, for example\n``gapic_config['interfaces']['google.example.v1.ExampleService']``.\n\nReturns:\nMapping[str, MethodConfig]: A mapping of RPC method names to their\nconfiguration.", "source": "codesearchnet"}
{"code": "def get_option(option_name, section_name=\"main\", default=_sentinel, cfg_file=cfg_file):\n    \n    defaults = get_defaults()\n\n    \n    \n    \n    \n\n    \n    \n    if default != _sentinel:\n        my_defaults = {option_name: default}\n    else:\n        my_defaults = defaults.get('section_name', {})\n\n    \n    parser = get_parser(cfg_file)\n    return parser.get(section_name, option_name, vars=my_defaults)", "docstring": "Returns a specific option specific in a config file\n\nArguments:\noption_name  -- Name of the option (example host_name)\nsection_name -- Which section of the config (default: name)\n\nexamples:\n>>> get_option(\"some option\", default=\"default result\")\n'default result'", "source": "juraj-google-style"}
{"code": "def get_connection_string(params, hide_password=True):\n    connection_string = (params['driver'] + ':\n    user = params.get('user', None)\n    password = params.get('password', None)\n    host = params.get('host', None)\n    port = params.get('port', None)\n    database = params.get('database', None)\n    if (database is None):\n        raise ValueError(\"Field 'database' of connection parameters cannot be None.\")\n    if ((password is None) and (user is not None)):\n        password = Client._get_password(params)\n        if (password is None):\n            raise RuntimeError('Password not defined and not available in keyring.')\n    if (host is not None):\n        if (user is not None):\n            connection_string += user\n            if (len(password) > 0):\n                if hide_password:\n                    connection_string += ':[password hidden]'\n                else:\n                    connection_string += (':' + password)\n            connection_string += '@'\n        connection_string += host\n        if (port is not None):\n            connection_string += (':' + str(port))\n    connection_string += ('/' + database)\n    return connection_string", "docstring": "Get a database connection string\n\nArgs:\nparams (dict): database configuration, as defined in :mod:`ozelot.config`\nhide_password (bool): if True, the password is hidden in the returned string\n(use this for logging purposes).\n\nReturns:\nstr: connection string", "source": "codesearchnet"}
{"code": "def has_course_mode(self, course_run_id, mode):\n    course_modes = self.get_course_modes(course_run_id)\n    return any((course_mode for course_mode in course_modes if (course_mode['slug'] == mode)))", "docstring": "Query the Enrollment API to see whether a course run has a given course mode available.\n\nArguments:\ncourse_run_id (str): The string value of the course run's unique identifier\n\nReturns:\nbool: Whether the course run has the given mode avaialble for enrollment.", "source": "codesearchnet"}
{"code": "def load_words(self, words):\n        \n        self._dictionary.update([word.lower() for word in words])\n        self._update_dictionary()", "docstring": "Load a list of words from which to generate a word frequency list\n\nArgs:\nwords (list): The list of words to be loaded", "source": "juraj-google-style"}
{"code": "def firmware_drivers(self):\n    if (not self.__firmware_drivers):\n        self.__firmware_drivers = FirmwareDrivers(self.__connection)\n    return self.__firmware_drivers", "docstring": "Gets the FirmwareDrivers API client.\n\nReturns:\nFirmwareDrivers:", "source": "codesearchnet"}
{"code": "def contains_peroxide(structure, relative_cutoff=1.1):\n    \n    ox_type = oxide_type(structure, relative_cutoff)\n    if ox_type == \"peroxide\":\n        return True\n    else:\n        return False", "docstring": "Determines if a structure contains peroxide anions.\n\nArgs:\nstructure (Structure): Input structure.\nrelative_cutoff: The peroxide bond distance is 1.49 Angstrom.\nRelative_cutoff * 1.49 stipulates the maximum distance two O\natoms must be to each other to be considered a peroxide.\n\nReturns:\nBoolean indicating if structure contains a peroxide anion.", "source": "juraj-google-style"}
{"code": "def schedule(self, callback, *args, **kwargs):\n        \n        self._executor.submit(callback, *args, **kwargs)", "docstring": "Schedule the callback to be called asynchronously in a thread pool.\n\nArgs:\ncallback (Callable): The function to call.\nargs: Positional arguments passed to the function.\nkwargs: Key-word arguments passed to the function.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def from_http_response(response):\n    \n    try:\n        payload = response.json()\n    except ValueError:\n        payload = {\"error\": {\"message\": response.text or \"unknown error\"}}\n\n    error_message = payload.get(\"error\", {}).get(\"message\", \"unknown error\")\n    errors = payload.get(\"error\", {}).get(\"errors\", ())\n\n    message = \"{method} {url}: {error}\".format(\n        method=response.request.method, url=response.request.url, error=error_message\n    )\n\n    exception = from_http_status(\n        response.status_code, message, errors=errors, response=response\n    )\n    return exception", "docstring": "Create a :class:`GoogleAPICallError` from a :class:`requests.Response`.\n\nArgs:\nresponse (requests.Response): The HTTP response.\n\nReturns:\nGoogleAPICallError: An instance of the appropriate subclass of\n:class:`GoogleAPICallError`, with the message and errors populated\nfrom the response.", "source": "juraj-google-style"}
{"code": "def _IsText(self, bytes_in, encoding=None):\n    is_text = True\n    if isinstance(bytes_in, py2to3.UNICODE_TYPE):\n        return is_text\n    for value in bytes_in:\n        if py2to3.PY_2:\n            value = ord(value)\n        if (not (31 < value < 128)):\n            is_text = False\n            break\n    if is_text:\n        return is_text\n    try:\n        bytes_in.decode('utf-8')\n        return True\n    except UnicodeDecodeError:\n        pass\n    if encoding:\n        try:\n            bytes_in.decode(encoding)\n            return True\n        except LookupError:\n            logger.error('Unsupported encoding: {0:s}'.format(encoding))\n        except UnicodeDecodeError:\n            pass\n    return False", "docstring": "Examine the bytes in and determine if they are indicative of text.\n\nParsers need quick and at least semi reliable method of discovering whether\nor not a particular byte stream is text or resembles text or not. This can\nbe used in text parsers to determine if a file is a text file or not for\ninstance.\n\nThe method assumes the byte sequence is either ASCII, UTF-8, UTF-16 or\nmethod supplied character encoding. Otherwise it will make the assumption\nthe byte sequence is not text, but a byte sequence.\n\nArgs:\nbytes_in (bytes|str): byte stream to examine.\nencoding (Optional[str]): encoding to test, if not defined ASCII and\nUTF-8 are tried.\n\nReturns:\nbool: True if the bytes stream contains text.", "source": "codesearchnet"}
{"code": "def __getitem__(self, item):\n        \n        depth = item.count('.') + 1\n        parts = item.split('.', 1)\n        for m in self.modules:\n            if parts[0] == m.name:\n                if depth == 1:\n                    return m\n        for p in self.packages:\n            if parts[0] == p.name:\n                if depth == 1:\n                    return p\n                item = p.get(parts[1])\n                if item:\n                    return item\n        raise KeyError(item)", "docstring": "Return the corresponding Package or Module object.\n\nArgs:\nitem (str): name of the package/module, dot-separated.\n\nReturns:\nPackage/Module: corresponding object.", "source": "juraj-google-style"}
{"code": "def compile_intermediate_cpfs(self,\n                                  scope: Dict[str, TensorFluent],\n                                  batch_size: Optional[int] = None,\n                                  noise: Optional[Noise] = None) -> List[CPFPair]:\n        \n        interm_fluents = []\n\n        with self.graph.as_default():\n            with tf.name_scope('intermediate_cpfs'):\n\n                for cpf in self.rddl.domain.intermediate_cpfs:\n                    cpf_noise = noise.get(cpf.name, None) if noise is not None else None\n\n                    name_scope = utils.identifier(cpf.name)\n                    with tf.name_scope(name_scope):\n                        t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise)\n\n                    interm_fluents.append((cpf.name, t))\n                    scope[cpf.name] = t\n\n        return interm_fluents", "docstring": "Compiles the intermediate fluent CPFs given the current `state` and `action` scope.\n\nArgs:\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.\nbatch_size (Optional[int]): The batch size.\n\nReturns:\nA list of intermediate fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`.", "source": "juraj-google-style"}
{"code": "def _convert_to_json(self, response):\n    try:\n        return response.json()\n    except ValueError:\n        logging.warning('Expected response in JSON format from {0} but the actual response text is: {1}'.format(response.request.url, response.text))\n    return None", "docstring": "Converts response to JSON.\nIf the response cannot be converted to JSON then `None` is returned.\n\nArgs:\nresponse - An object of type `requests.models.Response`\nReturns:\nResponse in JSON format if the response can be converted to JSON. `None` otherwise.", "source": "codesearchnet"}
{"code": "def truthyAttrs(cls):\n        \n        def __bool__(self):\n            return bool(any(getattr(self, attr) for attr in self.attrs))\n\n        cls.__bool__ = cls.__nonzero__ = __bool__\n        return cls", "docstring": "Class decorator: override __bool__ to set truthiness based on any attr being present.\n\nArgs:\ncls (class): class to decorate\n\nReturns:\nclass: same, but modified, class", "source": "juraj-google-style"}
{"code": "def insert(cls, cur, table: str, values: dict):\n    keys = cls._COMMA.join(values.keys())\n    value_place_holder = (cls._PLACEHOLDER * len(values))\n    query = cls._insert_string.format(table, keys, value_place_holder[:(- 1)])\n    (yield from cur.execute(query, tuple(values.values())))\n    return (yield from cur.fetchone())", "docstring": "Creates an insert statement with only chosen fields\n\nArgs:\ntable: a string indicating the name of the table\nvalues: a dict of fields and values to be inserted\n\nReturns:\nA 'Record' object with table columns as properties", "source": "codesearchnet"}
{"code": "def add(self, promise, bitoffset, *, _offsetideal=None):\n        \n        \n        \n        if _offsetideal is None:\n            _offsetideal = bitoffset\n        if isinstance(promise, TDOPromise):\n            newpromise = promise.makesubatoffset(\n                bitoffset, _offsetideal=_offsetideal)\n            self._promises.append(newpromise)\n        elif isinstance(promise, TDOPromiseCollection):\n            for p in promise._promises:\n                self.add(p, bitoffset, _offsetideal=_offsetideal)", "docstring": "Add a promise to the promise collection at an optional offset.\n\nArgs:\npromise: A TDOPromise to add to this collection.\nbitoffset: An integer offset for this new promise in the collection.\n_offsetideal: An integer offset for this new promise in the collection if the associated primitive supports arbitrary TDO control.", "source": "juraj-google-style"}
{"code": "def TransformerLM(vocab_size,\n                  feature_depth=512,\n                  feedforward_depth=2048,\n                  num_layers=6,\n                  num_heads=8,\n                  dropout=0.1,\n                  max_len=2048,\n                  mode='train'):\n  \n  return layers.Serial(\n      layers.ShiftRight(),\n      layers.Embedding(feature_depth, vocab_size),\n      layers.Dropout(rate=dropout, mode=mode),\n      layers.PositionalEncoding(max_len=max_len),\n      layers.Serial(*[DecoderLayer(feature_depth, feedforward_depth, num_heads,\n                                   dropout, mode)\n                      for _ in range(num_layers)]),\n      layers.LayerNorm(),\n      layers.Dense(vocab_size),\n      layers.LogSoftmax()\n  )", "docstring": "Transformer language model (only uses the decoder part of Transformer).\n\nArgs:\nvocab_size: int: vocab size\nfeature_depth: int:  depth of embedding\nfeedforward_depth: int: depth of feed-forward layer\nnum_layers: int: number of encoder/decoder layers\nnum_heads: int: number of attention heads\ndropout: float: dropout rate (how much to drop out)\nmax_len: int: maximum symbol length for positional encoding\nmode: str: 'train' or 'eval'\n\nReturns:\nthe layer.", "source": "juraj-google-style"}
{"code": "def read_schema(path):\n    result = schema_pb2.Schema()\n    contents = file_io.read_file_to_string(path)\n    text_format.Parse(contents, result)\n    return result", "docstring": "Reads a schema from the provided location.\n\nArgs:\npath: The location of the file holding a serialized Schema proto.\n\nReturns:\nAn instance of Schema or None if the input argument is None", "source": "github-repos"}
{"code": "def process_sequence(sequence,\n                     rules=None,\n                     skip_non_vietnamese=True):\n    \n    result = \"\"\n    raw = result\n    result_parts = []\n    if rules is None:\n        rules = get_telex_definition()\n\n    accepted_chars = _accepted_chars(rules)\n\n    for key in sequence:\n        if key not in accepted_chars:\n            result_parts.append(result)\n            result_parts.append(key)\n            result = \"\"\n            raw = \"\"\n        else:\n            result, raw = process_key(\n                string=result,\n                key=key,\n                fallback_sequence=raw,\n                rules=rules,\n                skip_non_vietnamese=skip_non_vietnamese)\n\n    result_parts.append(result)\n    return ''.join(result_parts)", "docstring": "\\\nConvert a key sequence into a Vietnamese string with diacritical marks.\n\nArgs:\nrules (optional): see docstring for process_key().\nskip_non_vietnamese (optional): see docstring for process_key().\n\nIt even supports continous key sequences connected by separators.\ni.e. process_sequence('con meof.ddieen') should work.", "source": "juraj-google-style"}
{"code": "def image_load(filename: str) -> tcod.image.Image:\n    \n    return tcod.image.Image._from_cdata(\n        ffi.gc(lib.TCOD_image_load(_bytes(filename)), lib.TCOD_image_delete)\n    )", "docstring": "Load an image file into an Image instance and return it.\n\nArgs:\nfilename (AnyStr): Path to a .bmp or .png image file.", "source": "juraj-google-style"}
{"code": "def from_raw(self, file_names=None, **kwargs):\n    if file_names:\n        self.file_names = file_names\n    if (not isinstance(file_names, (list, tuple))):\n        self.file_names = [file_names]\n    raw_file_loader = self.loader\n    set_number = 0\n    test = None\n    counter = 0\n    self.logger.debug('start iterating through file(s)')\n    for f in self.file_names:\n        self.logger.debug('loading raw file:')\n        self.logger.debug(f'{f}')\n        new_tests = raw_file_loader(f, **kwargs)\n        if new_tests:\n            if (test is not None):\n                self.logger.debug('continuing reading files...')\n                _test = self._append(test[set_number], new_tests[set_number])\n                if (not _test):\n                    self.logger.warning(f'EMPTY TEST: {f}')\n                    continue\n                test[set_number] = _test\n                self.logger.debug('added this test - started merging')\n                for j in range(len(new_tests[set_number].raw_data_files)):\n                    raw_data_file = new_tests[set_number].raw_data_files[j]\n                    file_size = new_tests[set_number].raw_data_files_length[j]\n                    test[set_number].raw_data_files.append(raw_data_file)\n                    test[set_number].raw_data_files_length.append(file_size)\n                    counter += 1\n                    if (counter > 10):\n                        self.logger.debug('ERROR? Too many files to merge')\n                        raise ValueError('Too many files to merge - could be a p2-p3 zip thing')\n            else:\n                self.logger.debug('getting data from first file')\n                if new_tests[set_number].no_data:\n                    self.logger.debug('NO DATA')\n                else:\n                    test = new_tests\n        else:\n            self.logger.debug('NOTHING LOADED')\n    self.logger.debug('finished loading the raw-files')\n    test_exists = False\n    if test:\n        if test[0].no_data:\n            self.logging.debug('the first dataset (or only dataset) loaded from the raw data file is empty')\n        else:\n            test_exists = True\n    if test_exists:\n        if (not prms.Reader.sorted_data):\n            self.logger.debug('sorting data')\n            test[set_number] = self._sort_data(test[set_number])\n        self.datasets.append(test[set_number])\n    else:\n        self.logger.warning('No new datasets added!')\n    self.number_of_datasets = len(self.datasets)\n    self.status_datasets = self._validate_datasets()\n    self._invent_a_name()\n    return self", "docstring": "Load a raw data-file.\n\nArgs:\nfile_names (list of raw-file names): uses CellpyData.file_names if\nNone. If the list contains more than one file name, then the\nruns will be merged together.", "source": "codesearchnet"}
{"code": "def _initialize_splittable_dimensions(self, mtf_graph):\n    \n    all_mtf_dimension_names = set()  \n    for mtf_operation in mtf_graph.operations:\n      for mtf_tensor in mtf_operation.outputs:\n        for mtf_dimension in mtf_tensor.shape.dims:\n          if not re.match(r\"_anonymous_\\d*\", mtf_dimension.name):\n            all_mtf_dimension_names.add(mtf_dimension.name)\n\n    unsplittable_mtf_dimension_names = set()  \n    for mtf_operation in mtf_graph.operations:\n      unsplittable_mtf_dimension_names.update(mtf_operation.unsplittable_dims)\n\n    return all_mtf_dimension_names - unsplittable_mtf_dimension_names", "docstring": "Initializer for self._splittable_mtf_dimension_names.\n\nArgs:\nmtf_graph: an mtf.Graph.\n\nReturns:\nA set(string) of the names of Mesh TensorFlow dimensions that may be\nassigned in a layout.", "source": "juraj-google-style"}
{"code": "def exhaustive_fragment_check(self, ontology: pd.DataFrame, iri_curie_fragment_predicate: str='iri', cross_reference_iris: bool=False, cross_reference_fragments: bool=False, diff: bool=True) -> Tuple[list]:\n    (inside, outside) = ([], [])\n    header = (['Index'] + list(ontology.columns))\n    for row in ontology.itertuples():\n        row = {header[i]: val for (i, val) in enumerate(row)}\n        entity_suffix = row[iri_curie_fragment_predicate]\n        if isinstance(entity_suffix, list):\n            if (len(entity_suffix) != 0):\n                exit('Need to have only 1 iri in the cell from the onotology.')\n            else:\n                entity_suffix = entity_suffix[0]\n        entity_fragment = self.extract_fragment(entity_suffix)\n        ilx_rows = self.fragment2rows.get(entity_fragment)\n        if (cross_reference_fragments and ilx_rows):\n            ilx_rows = [row for row in ilx_rows if (entity_fragment.lower() in row['iri'].lower())]\n        if (cross_reference_iris and ilx_rows):\n            ilx_rows = [row for row in ilx_rows if (entity_suffix.rsplit('/', 1)[(- 1)].lower() in row['iri'].lower())]\n        if ilx_rows:\n            inside.append({'external_ontology_row': row, 'ilx_rows': ilx_rows})\n        else:\n            outside.append(row)\n    if diff:\n        diff = self.__exhaustive_diff(inside)\n        return (inside, outside, diff)\n    return (inside, outside)", "docstring": "All entities with conflicting fragments gets a full diff to see if they belong\n\nArgs:\nontology: pandas DataFrame created from an ontology where the colnames are predicates\nand if classes exist it is also thrown into a the colnames.\niri_curie_fragment_predicate: usually in qname form and is the colname of the DataFrame for iri\nDefault is \"iri\" for graph2pandas module\ndiff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2\nReturns:\ninside: entities that are inside of InterLex\noutside: entities NOT in InterLex\ndiff (optional): List[List[dict]]... so complicated but usefull diff between matches only", "source": "codesearchnet"}
{"code": "def detect_language(index_page):\n    \n    dom = dhtmlparser.parseString(index_page)\n\n    clean_content = dhtmlparser.removeTags(dom)\n\n    lang = None\n    try:\n        lang = langdetect.detect(clean_content)\n    except UnicodeDecodeError:\n        lang = langdetect.detect(clean_content.decode(\"utf-8\"))\n\n    return SourceString(\n        lang,\n        source=\"langdetect\"\n    )", "docstring": "Detect `languages` using `langdetect` library.\n\nArgs:\nindex_page (str): HTML content of the page you wish to analyze.\n\nReturns:\nobj: One :class:`.SourceString` object.", "source": "juraj-google-style"}
{"code": "def reverse_transform(self, tables, table_metas=None, missing=None):\n    if (missing is None):\n        missing = self.missing\n    else:\n        self.missing = missing\n        warnings.warn(DEPRECATION_MESSAGE.format('reverse_transform'), DeprecationWarning)\n    reverse = {}\n    for table_name in tables:\n        table = tables[table_name]\n        if (table_metas is None):\n            table_meta = self.table_dict[table_name][1]\n        else:\n            table_meta = table_metas[table_name]\n        reverse[table_name] = self.reverse_transform_table(table, table_meta)\n    return reverse", "docstring": "Transform data back to its original format.\n\nArgs:\ntables(dict):   mapping of table names to `tuple` where each tuple is on the form\n(`pandas.DataFrame`, `dict`). The `DataFrame` contains the transformed\ndata and the `dict` the corresponding meta information.\nIf not specified, the tables will be retrieved using the meta_file.\n\ntable_metas(dict):  Full metadata file for the dataset.\n\nmissing(bool):      Wheter or not use NullTransformer to handle missing values.\n\nReturns:\ndict: Map from `str` (table_names) to `pandas.DataFrame` (transformed data).", "source": "codesearchnet"}
{"code": "def abi_to_fasta(input, output):\n    \n    direcs = [input, ]\n    \n    zip_files = list_files(input, ['zip'])\n    if zip_files:\n        direcs.extend(_process_zip_files(zip_files))\n    \n    for d in direcs:\n        files = list_files(d, ['ab1', 'abi'])\n        seqs = [SeqIO.read(open(f, 'rb'), 'abi') for f in files]\n        \n        fastas = ['>{}\\n{}'.format(s.id, str(s.seq)) for s in seqs]\n        ofile = os.path.basename(os.path.normpath(d)) + '.fasta'\n        opath = os.path.join(output, ofile)\n        open(opath, 'w').write('\\n'.join(fastas))", "docstring": "Converts ABI or AB1 files to FASTA format.\n\nArgs:\n\ninput (str): Path to a file or directory containing abi/ab1 files or\nzip archives of abi/ab1 files\n\noutput (str): Path to a directory for the output FASTA files", "source": "juraj-google-style"}
{"code": "def torque_off(self):\n    data = []\n    data.append(10)\n    data.append(self.servoid)\n    data.append(RAM_WRITE_REQ)\n    data.append(TORQUE_CONTROL_RAM)\n    data.append(1)\n    data.append(0)\n    send_data(data)", "docstring": "Set the torques of Herkulex to zero\n\nIn this mode, position control and velocity control\nwill not work, enable torque before that. Also the\nservo shaft is freely movable\n\nArgs:\nnone", "source": "codesearchnet"}
{"code": "def __init__(self, model_handler: ModelHandler[ExampleT, PredictionT, Any], clock, metrics_namespace, load_model_at_runtime: bool=False, model_tag: str='RunInference'):\n    self._model_handler = model_handler\n    self._shared_model_handle = shared.Shared()\n    self._clock = clock\n    self._model = None\n    self._metrics_namespace = metrics_namespace\n    self._load_model_at_runtime = load_model_at_runtime\n    self._side_input_path = None\n    self._model_tag = model_tag\n    self._cur_tag = model_tag", "docstring": "A DoFn implementation generic to frameworks.\n\nArgs:\nmodel_handler: An implementation of ModelHandler.\nclock: A clock implementing time_ns. *Used for unit testing.*\nmetrics_namespace: Namespace of the transform to collect metrics.\nload_model_at_runtime: Bool to indicate if model loading should be\ndeferred to runtime - for example if we are depending on side\ninputs to get the model path or we want to enforce a timeout on\nmodel loading.\nmodel_tag: Tag to use to disambiguate models in multi-model settings.", "source": "github-repos"}
{"code": "def _apply_transformation(inputs):\n    (ts, transformation, extend_collection, clear_redo) = inputs\n    new = ts.append_transformation(transformation, extend_collection, clear_redo=clear_redo)\n    o = [ts]\n    if new:\n        o.extend(new)\n    return o", "docstring": "Helper method for multiprocessing of apply_transformation. Must not be\nin the class so that it can be pickled.\n\nArgs:\ninputs: Tuple containing the transformed structure, the transformation\nto be applied, a boolean indicating whether to extend the\ncollection, and a boolean indicating whether to clear the redo\n\nReturns:\nList of output structures (the modified initial structure, plus\nany new structures created by a one-to-many transformation)", "source": "codesearchnet"}
{"code": "def _resize_output_size_rescale_to_max_len(height: int, width: int, min_len: Optional[int]=1, max_len: Optional[int]=None) -> Tuple[int, int]:\n    max_len = max(height, width) if max_len is None else max_len\n    aspect_ratio = width / height\n    if width >= height:\n        width = max_len\n        height = int(width / aspect_ratio)\n        if height % 2 != 0:\n            height += 1\n    elif height > width:\n        height = max_len\n        width = int(height * aspect_ratio)\n        if width % 2 != 0:\n            width += 1\n    height = max(height, min_len)\n    width = max(width, min_len)\n    return (height, width)", "docstring": "Get the output size of the image after resizing given a dictionary specifying the max and min sizes.\nArgs:\nheight (`int`):\nHeight of the input image.\nwidth (`int`):\nWidth of the input image.\nmin_len (`int`, *optional*, defaults to 1):\nMinimum size of the output image.\nmax_len (`int`, *optional*, defaults to the maximum size of the image):\nMaximum size of the output image.\nReturns:\nThe output size of the image after resizing.", "source": "github-repos"}
{"code": "def validate(self, definition, version=None, strict=False):\n        \n        if not HAS_KUBERNETES_VALIDATE:\n            raise KubernetesValidateMissing()\n\n        errors = list()\n        warnings = list()\n        try:\n            if version is None:\n                try:\n                    version = self.version['kubernetes']['gitVersion']\n                except KeyError:\n                    version = kubernetes_validate.latest_version()\n            kubernetes_validate.validate(definition, version, strict)\n        except kubernetes_validate.utils.ValidationError as e:\n            errors.append(\"resource definition validation error at %s: %s\" % ('.'.join([str(item) for item in e.path]), e.message))  \n        except VersionNotSupportedError as e:\n            errors.append(\"Kubernetes version %s is not supported by kubernetes-validate\" % version)\n        except kubernetes_validate.utils.SchemaNotFoundError as e:\n            warnings.append(\"Could not find schema for object kind %s with API version %s in Kubernetes version %s (possibly Custom Resource?)\" %\n                            (e.kind, e.api_version, e.version))\n        return warnings, errors", "docstring": "validate checks a kubernetes resource definition\n\nArgs:\ndefinition (dict): resource definition\nversion (str): version of kubernetes to validate against\nstrict (bool): whether unexpected additional properties should be considered errors\n\nReturns:\nwarnings (list), errors (list): warnings are missing validations, errors are validation failures", "source": "juraj-google-style"}
{"code": "def get_max_res_without_distortion(image_size: Tuple[int, int], target_size: Tuple[int, int]) -> Tuple[int, int]:\n    original_height, original_width = image_size\n    target_height, target_width = target_size\n    scale_w = target_width / original_width\n    scale_h = target_height / original_height\n    if scale_w < scale_h:\n        new_width = target_width\n        new_height = min(math.floor(original_height * scale_w), target_height)\n    else:\n        new_height = target_height\n        new_width = min(math.floor(original_width * scale_h), target_width)\n    return (new_height, new_width)", "docstring": "Determines the maximum resolution to which an image can be resized to without distorting its\naspect ratio, based on the target resolution.\n\nArgs:\nimage_size (Tuple[int, int]): The original resolution of the image (height, width).\ntarget_resolution (Tuple[int, int]): The desired resolution to fit the image into (height, width).\nReturns:\nTuple[int, int]: The optimal dimensions (height, width) to which the image should be resized.\nExample:\n>>> _get_max_res_without_distortion([200, 300], target_size = [450, 200])\n(134, 200)\n>>> _get_max_res_without_distortion([800, 600], target_size = [450, 1300])\n(450, 338)", "source": "github-repos"}
{"code": "def encode_texts(self, texts, unknown_token='<UNK>', verbose=1, **kwargs):\n    if (not self.has_vocab):\n        raise ValueError('You need to build the vocabulary using `build_vocab` before using `encode_texts`')\n    if (unknown_token and (unknown_token not in self.special_token)):\n        raise ValueError(((('Your special token (' + unknown_token) + ') to replace unknown words is not in the list of special token: ') + self.special_token))\n    progbar = Progbar(len(texts), verbose=verbose, interval=0.25)\n    encoded_texts = []\n    for token_data in self.token_generator(texts, **kwargs):\n        (indices, token) = (token_data[:(- 1)], token_data[(- 1)])\n        token_idx = self._token2idx.get(token)\n        if ((token_idx is None) and unknown_token):\n            token_idx = self.special_token.index(unknown_token)\n        if (token_idx is not None):\n            utils._append(encoded_texts, indices, token_idx)\n        progbar.update(indices[0])\n    progbar.update(len(texts))\n    return encoded_texts", "docstring": "Encodes the given texts using internal vocabulary with optionally applied encoding options. See\n``apply_encoding_options` to set various options.\n\nArgs:\ntexts: The list of text items to encode.\nunknown_token: The token to replace words that out of vocabulary. If none, those words are omitted.\nverbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1)\n**kwargs: The kwargs for `token_generator`.\n\nReturns:\nThe encoded texts.", "source": "codesearchnet"}
{"code": "def _refine_candidate(self, width, height):\n        \n        packer = newPacker(PackingMode.Offline, PackingBin.BFF, \n                pack_algo=self._pack_algo, sort_algo=SORT_LSIDE,\n                rotation=self._rotation)\n        packer.add_bin(width, height)\n       \n        for r in self._rectangles:\n            packer.add_rect(*r)\n\n        packer.pack()\n\n        \n        if len(packer[0]) != len(self._rectangles):\n            return None\n\n        \n        new_height = max(packer[0], key=lambda x: x.top).top\n        return(width, new_height, packer)", "docstring": "Use bottom-left packing algorithm to find a lower height for the\ncontainer.\n\nArguments:\nwidth\nheight\n\nReturns:\ntuple (width, height, PackingAlgorithm):", "source": "juraj-google-style"}
{"code": "def __init__(self, filters):\n        \n        self.filters = filters\n        super().__init__(', '.join(repr(f) for f in filters))", "docstring": "Initialization of instances:\n\nArgs:\nfilters (list): the invalid filter names.\n\nAttributes:\nfilters (list): the invalid filter names.", "source": "juraj-google-style"}
{"code": "def InitializeDownload(self, http_request, http=None, client=None):\n        \n        self.EnsureUninitialized()\n        if http is None and client is None:\n            raise exceptions.UserError('Must provide client or http.')\n        http = http or client.http\n        if client is not None:\n            http_request.url = client.FinalizeTransferUrl(http_request.url)\n        url = http_request.url\n        if self.auto_transfer:\n            end_byte = self.__ComputeEndByte(0)\n            self.__SetRangeHeader(http_request, 0, end_byte)\n            response = http_wrapper.MakeRequest(\n                self.bytes_http or http, http_request)\n            if response.status_code not in self._ACCEPTABLE_STATUSES:\n                raise exceptions.HttpError.FromResponse(response)\n            self.__initial_response = response\n            self.__SetTotal(response.info)\n            url = response.info.get('content-location', response.request_url)\n        if client is not None:\n            url = client.FinalizeTransferUrl(url)\n        self._Initialize(http, url)\n        \n        \n        if self.auto_transfer:\n            self.StreamInChunks()", "docstring": "Initialize this download by making a request.\n\nArgs:\nhttp_request: The HttpRequest to use to initialize this download.\nhttp: The httplib2.Http instance for this request.\nclient: If provided, let this client process the final URL before\nsending any additional requests. If client is provided and\nhttp is not, client.http will be used instead.", "source": "juraj-google-style"}
{"code": "def compute_index(self, axis, data_object, compute_diff=True):\n\n    def pandas_index_extraction(df, axis):\n        if (not axis):\n            return df.index\n        else:\n            try:\n                return df.columns\n            except AttributeError:\n                return pandas.Index([])\n    index_obj = (self.index if (not axis) else self.columns)\n    old_blocks = (self.data if compute_diff else None)\n    new_indices = data_object.get_indices(axis=axis, index_func=(lambda df: pandas_index_extraction(df, axis)), old_blocks=old_blocks)\n    return (index_obj[new_indices] if compute_diff else new_indices)", "docstring": "Computes the index after a number of rows have been removed.\n\nNote: In order for this to be used properly, the indexes must not be\nchanged before you compute this.\n\nArgs:\naxis: The axis to extract the index from.\ndata_object: The new data object to extract the index from.\ncompute_diff: True to use `self` to compute the index from self\nrather than data_object. This is used when the dimension of the\nindex may have changed, but the deleted rows/columns are\nunknown.\n\nReturns:\nA new pandas.Index object.", "source": "codesearchnet"}
{"code": "def field_mask(original, modified):\n    if ((original is None) and (modified is None)):\n        return field_mask_pb2.FieldMask()\n    if ((original is None) and (modified is not None)):\n        original = copy.deepcopy(modified)\n        original.Clear()\n    if ((modified is None) and (original is not None)):\n        modified = copy.deepcopy(original)\n        modified.Clear()\n    if (type(original) != type(modified)):\n        raise ValueError('expected that both original and modified should be of the same type, received \"{!r}\" and \"{!r}\".'.format(type(original), type(modified)))\n    return field_mask_pb2.FieldMask(paths=_field_mask_helper(original, modified))", "docstring": "Create a field mask by comparing two messages.\n\nArgs:\noriginal (~google.protobuf.message.Message): the original message.\nIf set to None, this field will be interpretted as an empty\nmessage.\nmodified (~google.protobuf.message.Message): the modified message.\nIf set to None, this field will be interpretted as an empty\nmessage.\n\nReturns:\ngoogle.protobuf.field_mask_pb2.FieldMask: field mask that contains\nthe list of field names that have different values between the two\nmessages. If the messages are equivalent, then the field mask is empty.\n\nRaises:\nValueError: If the ``original`` or ``modified`` are not the same type.", "source": "codesearchnet"}
{"code": "def extract_signature(func, ignore_first=False):\n    \n    sig_params = get_signature_params(func)\n\n    if ignore_first:\n        if len(sig_params) == 0:\n            raise Exception(\"Methods must take a 'self' argument, but the \"\n                            \"method '{}' does not have one.\".format(\n                                func.__name__))\n        sig_params = sig_params[1:]\n\n    \n    arg_names = []\n    arg_defaults = []\n    arg_is_positionals = []\n    keyword_names = set()\n    for arg_name, parameter in sig_params:\n        arg_names.append(arg_name)\n        arg_defaults.append(parameter.default)\n        arg_is_positionals.append(parameter.kind == parameter.VAR_POSITIONAL)\n        if parameter.kind == Parameter.POSITIONAL_OR_KEYWORD:\n            \n            keyword_names.add(arg_name)\n\n    return FunctionSignature(arg_names, arg_defaults, arg_is_positionals,\n                             keyword_names, func.__name__)", "docstring": "Extract the function signature from the function.\n\nArgs:\nfunc: The function whose signature should be extracted.\nignore_first: True if the first argument should be ignored. This should\nbe used when func is a method of a class.\n\nReturns:\nA function signature object, which includes the names of the keyword\narguments as well as their default values.", "source": "juraj-google-style"}
{"code": "def pretty_print_config_to_json(self, services, hostname=None):\n    \n    descriptor = self.get_config_dict(services, hostname)\n    return json.dumps(descriptor, sort_keys=True, indent=2,\n                      separators=(',', ': '))", "docstring": "JSON string description of a protorpc.remote.Service in API format.\n\nArgs:\nservices: Either a single protorpc.remote.Service or a list of them\nthat implements an api/version.\nhostname: string, Hostname of the API, to override the value set on the\ncurrent service. Defaults to None.\n\nReturns:\nstring, The API descriptor document as a JSON string.", "source": "juraj-google-style"}
{"code": "def dp004(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `dp004`'.format(value))\n\n        self._dp004 = value", "docstring": "Corresponds to IDD Field `dp004`\nDew-point temperature corresponding to 0.4% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `dp004`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def snap_mismatched_borders(script, edge_dist_ratio=0.01, unify_vert=True):\n    filter_xml = ''.join(['  <filter name=\"Snap Mismatched Borders\">\\n', '    <Param name=\"EdgeDistRatio\" ', 'value=\"{}\" '.format(edge_dist_ratio), 'description=\"Edge Distance Ratio\" ', 'type=\"RichFloat\" ', '/>\\n', '    <Param name=\"UnifyVertices\" ', 'value=\"{}\" '.format(str(unify_vert).lower()), 'description=\"UnifyVertices\" ', 'type=\"RichBool\" ', '/>\\n', '  </filter>\\n'])\n    util.write_filter(script, filter_xml)\n    return None", "docstring": "Try to snap together adjacent borders that are slightly mismatched.\n\nThis situation can happen on badly triangulated adjacent patches defined by\nhigh order surfaces. For each border vertex the filter snaps it onto the\nclosest boundary edge only if it is closest of edge_legth*threshold. When\nvertex is snapped the corresponding face it split and a new vertex is\ncreated.\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter to.\nedge_dist_ratio (float): Collapse edge when the edge / distance ratio\nis greater than this value. E.g. for default value 1000 two\nstraight border edges are collapsed if the central vertex dist from\nthe straight line composed by the two edges less than a 1/1000 of\nthe sum of the edges length. Larger values enforce that only\nvertexes very close to the line are removed.\nunify_vert (bool): If true the snap vertices are welded together.\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "codesearchnet"}
{"code": "def _get_spec(self) -> dict:\n        \n        if self.spec:\n            return self.spec\n        self.spec = requests.get(self.SPEC_URL.format(self.version)).json()\n        return self.spec", "docstring": "Fetches the OpenAPI spec from the server.\n\nIf the spec has already been fetched, the cached version is returned instead.\n\nArgS:\nNone\n\nReturns:\nOpenAPI spec data", "source": "juraj-google-style"}
{"code": "def protoc_command(lang, output_dir, proto_path, refactored_dir):\n    \n    proto_files = glob.glob(os.path.join(refactored_dir, '*.proto'))\n    cmd = ['protoc', '-I', proto_path, '--{}_out'.format(lang), output_dir]\n    cmd.extend(proto_files)\n    print(' '.join(cmd))\n    p = subprocess.Popen(\n        cmd, stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin,\n        cwd=proto_path)\n    p.communicate()", "docstring": "Runs the \"protoc\" command on the refactored Protobuf files to generate\nthe source python/python3 files.\n\nArgs:\nlang (str): the language to compile with \"protoc\"\n(i.e. python, python3)\noutput_dir (str): the output directory for the generated source files\nproto_path (str): the root protobuf build path in which to run \"protoc\"\nrefactored_dir (str): the input directory of the Protobuf files", "source": "juraj-google-style"}
{"code": "def local_reduction_attention(x, block_length, multihead_params):\n  \n\n  @expert_utils.add_name_scope()\n  def dot_product_self_local_attention_flattened(q, k, v):\n    \n    _, num_head, _, depth = q.get_shape().as_list()\n\n    \n    def pad_and_reshape(x):\n      \n      length_x = common_layers.shape_list(x)[2]\n      \n      \n      x = tf.pad(x, [[0, 0], [0, 0], [0, -length_x % block_length], [0, 0]])\n      x = tf.reshape(\n          x,\n          [\n              common_layers.shape_list(x)[0],  \n              num_head,  \n              common_layers.shape_list(x)[2] \n              block_length,  \n              depth,  \n          ])\n      return x\n\n    q, k, v = [pad_and_reshape(t) for t in (q, k, v)]\n\n    \n    logits = tf.matmul(q, k, transpose_b=True)\n    logits = tf.reshape(\n        logits,\n        [\n            common_layers.shape_list(logits)[0],  \n            num_head,  \n            common_layers.shape_list(logits)[2],  \n            block_length**2,  \n        ])\n    weights = tf.nn.softmax(logits)\n    weights = tf.reshape(\n        weights,\n        [\n            common_layers.shape_list(weights)[0],  \n            num_head,  \n            common_layers.shape_list(weights)[2],  \n            block_length,\n            block_length,  \n        ])\n    weights = tf.reduce_sum(weights, axis=3, keep_dims=True)  \n    v_out = tf.matmul(weights, v)  \n    v_out = tf.squeeze(v_out, axis=3)\n    return v_out\n\n  return multihead_attention(\n      x,\n      None,\n      bias=None,\n      output_depth=x.get_shape().as_list()[-1],\n      attention_type=dot_product_self_local_attention_flattened,\n      **multihead_params)", "docstring": "Reduce the length dimension using self attention.\n\nArgs:\nx (tf.Tensor): float32 of shape [batch, length, depth]\nblock_length (int): Block length for local attention (Compression factor)\nmultihead_params (dict): parameters for multihead attention\n\nReturns:\ntf.Tensor: Compressed tensor of shape [batch, length // factor, depth]", "source": "juraj-google-style"}
{"code": "def register(self, obj, value):\n    if obj in self._registry:\n        raise KeyError(f'{type(obj)} has already been registered.')\n    self._registry[obj] = value", "docstring": "Registers a Python object within the registry.\n\nArgs:\nobj: The object to add to the registry.\nvalue: The stored value for the 'obj' type.\n\nRaises:\nKeyError: If the same obj is used twice.", "source": "github-repos"}
{"code": "def task_done(self, message):\n    topic_partition = (message.topic, message.partition)\n    if (topic_partition not in self._topics):\n        logger.warning('Unrecognized topic/partition in task_done message: {0}:{1}'.format(*topic_partition))\n        return False\n    offset = message.offset\n    prev_done = self._offsets.task_done[topic_partition]\n    if ((prev_done is not None) and (offset != (prev_done + 1))):\n        logger.warning('Marking task_done on a non-continuous offset: %d != %d + 1', offset, prev_done)\n    prev_commit = self._offsets.commit[topic_partition]\n    if ((prev_commit is not None) and ((offset + 1) <= prev_commit)):\n        logger.warning('Marking task_done on a previously committed offset?: %d (+1) <= %d', offset, prev_commit)\n    self._offsets.task_done[topic_partition] = offset\n    if self._does_auto_commit_messages():\n        self._incr_auto_commit_message_count()\n    if self._should_auto_commit():\n        self.commit()\n    return True", "docstring": "Mark a fetched message as consumed.\n\nOffsets for messages marked as \"task_done\" will be stored back\nto the kafka cluster for this consumer group on commit()\n\nArguments:\nmessage (KafkaMessage): the message to mark as complete\n\nReturns:\nTrue, unless the topic-partition for this message has not\nbeen configured for the consumer. In normal operation, this\nshould not happen. But see github issue 364.", "source": "codesearchnet"}
{"code": "def _str_to_ord(content, weights):\n    ordinal = 0\n    for (i, c) in enumerate(content):\n        ordinal += ((weights[i] * _ALPHABET.index(c)) + 1)\n    return ordinal", "docstring": "Converts a string to its lexicographical order.\n\nArgs:\ncontent: the string to convert. Of type str.\nweights: weights from _get_weights.\n\nReturns:\nan int or long that represents the order of this string. \"\" has order 0.", "source": "codesearchnet"}
{"code": "def is_subdir(base_path, test_path, trailing_slash=False, wildcards=False):\n    if trailing_slash:\n        base_path = (base_path.rsplit('/', 1)[0] + '/')\n        test_path = (test_path.rsplit('/', 1)[0] + '/')\n    else:\n        if (not base_path.endswith('/')):\n            base_path += '/'\n        if (not test_path.endswith('/')):\n            test_path += '/'\n    if wildcards:\n        return fnmatch.fnmatchcase(test_path, base_path)\n    else:\n        return test_path.startswith(base_path)", "docstring": "Return whether the a path is a subpath of another.\n\nArgs:\nbase_path: The base path\ntest_path: The path which we are testing\ntrailing_slash: If True, the trailing slash is treated with importance.\nFor example, ``/images/`` is a directory while ``/images`` is a\nfile.\nwildcards: If True, globbing wildcards are matched against paths", "source": "codesearchnet"}
{"code": "def copy(self, effects=None, target=None):\n    warning = 'File.copy method is deprecated and will be\\n            removed in 4.0.0.\\n            Please use `create_local_copy`\\n            and `create_remote_copy` instead.\\n        '\n    logger.warn('API Warning: {0}'.format(warning))\n    if (target is not None):\n        return self.create_remote_copy(target, effects)\n    else:\n        return self.create_local_copy(effects)", "docstring": "Creates a File Copy on Uploadcare or Custom Storage.\n\nFile.copy method is deprecated and will be removed in 4.0.0.\nPlease use `create_local_copy` and `create_remote_copy` instead.\n\nArgs:\n- effects:\nAdds CDN image effects. If ``self.default_effects`` property\nis set effects will be combined with default effects.\n- target:\nName of a custom storage connected to your project.\nUploadcare storage is used if target is absent.", "source": "codesearchnet"}
{"code": "def limitReal(x, max_denominator=1000000):\n    \n    f = Fraction(x).limit_denominator(max_denominator)\n    return Real((f.numerator, f.denominator))", "docstring": "Creates an pysmt Real constant from x.\n\nArgs:\nx (number): A number to be cast to a pysmt constant.\nmax_denominator (int, optional): The maximum size of the denominator.\nDefault 1000000.\n\nReturns:\nA Real constant with the given value and the denominator limited.", "source": "juraj-google-style"}
{"code": "def _compute_new_attention_mask(hidden_states: torch.Tensor, seq_lens: torch.Tensor):\n    batch_size, mask_seq_len = hidden_states.shape[:2]\n    indices = torch.arange(mask_seq_len, device=seq_lens.device).expand(batch_size, -1)\n    bool_mask = indices >= seq_lens.unsqueeze(1).expand(-1, mask_seq_len)\n    mask = hidden_states.new_ones((batch_size, mask_seq_len))\n    mask = mask.masked_fill(bool_mask, 0)\n    return mask", "docstring": "Computes an attention mask of the form `(batch, seq_len)` with an attention for each element in the batch that\nstops at the corresponding element in `seq_lens`.\nArgs:\nhidden_states (`torch.FloatTensor` of shape `(batch, seq_len, *)`):\nThe sequences to mask, where `*` is any number of sequence-specific dimensions including none.\nseq_lens (`torch.Tensor` of shape `(batch)`:\nEach element represents the length of the sequence at the same index in `hidden_states`\nReturns:\n`torch.FloatTensor`: The float attention mask of shape `(batch, seq_len)`", "source": "github-repos"}
{"code": "def set_settings(self, settings):\n    for (k, v) in settings.items():\n        setattr(self, k, v)", "docstring": "Set every given settings as object attributes.\n\nArgs:\nsettings (dict): Dictionnary of settings.", "source": "codesearchnet"}
{"code": "def convert_dropout(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting dropout ...')\n    if (names == 'short'):\n        tf_name = ('DO' + random_string(6))\n    elif (names == 'keep'):\n        tf_name = w_name\n    else:\n        tf_name = (w_name + str(random.random()))\n    dropout = keras.layers.Dropout(rate=params['ratio'], name=tf_name)\n    layers[scope_name] = dropout(layers[inputs[0]])", "docstring": "Convert dropout.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def __call__(self, kl_fn):\n    \n    if not callable(kl_fn):\n      raise TypeError(\"kl_fn must be callable, received: %s\" % kl_fn)\n    if self._key in _DIVERGENCES:\n      raise ValueError(\"KL(%s || %s) has already been registered to: %s\"\n                       % (self._key[0].__name__, self._key[1].__name__,\n                          _DIVERGENCES[self._key]))\n    _DIVERGENCES[self._key] = kl_fn\n    return kl_fn", "docstring": "Perform the KL registration.\n\nArgs:\nkl_fn: The function to use for the KL divergence.\n\nReturns:\nkl_fn\n\nRaises:\nTypeError: if kl_fn is not a callable.\nValueError: if a KL divergence function has already been registered for\nthe given argument classes.", "source": "juraj-google-style"}
{"code": "def run_with_time_limit(self, cmd, time_limit=SUBMISSION_TIME_LIMIT):\n    if (time_limit < 0):\n        return self.run_without_time_limit(cmd)\n    container_name = str(uuid.uuid4())\n    cmd = ([DOCKER_BINARY, 'run', DOCKER_NVIDIA_RUNTIME, '--detach', '--name', container_name] + cmd)\n    logging.info('Docker command: %s', ' '.join(cmd))\n    logging.info('Time limit %d seconds', time_limit)\n    retval = subprocess.call(cmd)\n    start_time = time.time()\n    elapsed_time_sec = 0\n    while is_docker_still_running(container_name):\n        elapsed_time_sec = int((time.time() - start_time))\n        if (elapsed_time_sec < time_limit):\n            time.sleep(1)\n        else:\n            kill_docker_container(container_name)\n            logging.warning('Submission was killed because run out of time')\n    logging.info('Elapsed time of submission: %d', elapsed_time_sec)\n    logging.info('Docker retval: %d', retval)\n    if (retval != 0):\n        logging.warning('Docker returned non-zero retval: %d', retval)\n        raise WorkerError(('Docker returned non-zero retval ' + str(retval)))\n    return elapsed_time_sec", "docstring": "Runs docker command and enforces time limit.\n\nArgs:\ncmd: list with the command line arguments which are passed to docker\nbinary after run\ntime_limit: time limit, in seconds. Negative value means no limit.\n\nReturns:\nhow long it took to run submission in seconds\n\nRaises:\nWorkerError: if error occurred during execution of the submission", "source": "codesearchnet"}
{"code": "def ListClients(self, request, timeout=None):\n    \n    return self._RetryLoop(\n        lambda t: self._stub.ListClients(request, timeout=t))", "docstring": "Provides basic information about Fleetspeak clients.\n\nArgs:\nrequest: fleetspeak.admin.ListClientsRequest\n\ntimeout: How many seconds to try for.\n\nReturns: fleetspeak.admin.ListClientsResponse", "source": "juraj-google-style"}
{"code": "def _ScaleAndTranslateGrad(op, grad):\n    grad0 = gen_image_ops.scale_and_translate_grad(grad, op.inputs[0], op.inputs[2], op.inputs[3], kernel_type=op.get_attr('kernel_type'), antialias=op.get_attr('antialias'))\n    return [grad0, None, None, None]", "docstring": "The derivatives for ScaleAndTranslate transformation op.\n\nArgs:\nop: The ScaleAndTranslate op.\ngrad: The tensor representing the gradient w.r.t. the output.\n\nReturns:\nThe gradients w.r.t. the input.", "source": "github-repos"}
{"code": "def delete(self):\n    if self.exists():\n        try:\n            self._api.buckets_delete(self._name)\n        except Exception as e:\n            raise e", "docstring": "Deletes the bucket.\n\nRaises:\nException if there was an error deleting the bucket.", "source": "codesearchnet"}
{"code": "def _add_string_to_commastring(self, field, string):\n        \n        \n        if string in self._get_stringlist_from_commastring(field):\n            return False\n        strings = '%s,%s' % (self.data.get(field, ''), string)\n        if strings[0] == ',':\n            strings = strings[1:]\n        self.data[field] = strings\n        return True", "docstring": "Add a string to a comma separated list of strings\n\nArgs:\nfield (str): Field containing comma separated list\nstring (str): String to add\n\nReturns:\nbool: True if string added or False if string already present", "source": "juraj-google-style"}
{"code": "class MeanSquaredError(reduction_metrics.MeanMetricWrapper):\n\n    def __init__(self, name='mean_squared_error', dtype=None):\n        super().__init__(fn=mean_squared_error, name=name, dtype=dtype)\n        self._direction = 'down'\n\n    def get_config(self):\n        return {'name': self.name, 'dtype': self.dtype}", "docstring": "Computes the mean squared error between `y_true` and `y_pred`.\n\nFormula:\n\n```python\nloss = mean(square(y_true - y_pred))\n```\n\nArgs:\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.\n\nExample:\n>>> m = keras.metrics.MeanSquaredError()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])\n>>> m.result()\n0.25", "source": "github-repos"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. Blenderbot does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def read_up_to(self, queue, num_records, name=None):\n    if isinstance(queue, tensor_lib.Tensor):\n        queue_ref = queue\n    else:\n        queue_ref = queue.queue_ref\n    if self._reader_ref.dtype == dtypes.resource:\n        return gen_io_ops.reader_read_up_to_v2(self._reader_ref, queue_ref, num_records, name=name)\n    else:\n        old_queue_op = gen_data_flow_ops.fake_queue(queue_ref)\n        return gen_io_ops.reader_read_up_to(self._reader_ref, old_queue_op, num_records, name=name)", "docstring": "Returns up to num_records (key, value) pairs produced by a reader.\n\nWill dequeue a work unit from queue if necessary (e.g., when the\nReader needs to start reading from a new file since it has\nfinished with the previous file).\nIt may return less than num_records even before the last batch.\n\nArgs:\nqueue: A Queue or a mutable string Tensor representing a handle\nto a Queue, with string work items.\nnum_records: Number of records to read.\nname: A name for the operation (optional).\n\nReturns:\nA tuple of Tensors (keys, values).\nkeys: A 1-D string Tensor.\nvalues: A 1-D string Tensor.", "source": "github-repos"}
{"code": "def fetch(self, order_id, data={}, **kwargs):\n        \n        return super(Order, self).fetch(order_id, data, **kwargs)", "docstring": "Fetch Order for given Id\n\nArgs:\norder_id : Id for which order object has to be retrieved\n\nReturns:\nOrder dict for given order Id", "source": "juraj-google-style"}
{"code": "def __init__(self, num_groups=2):\n    if num_groups < 1:\n        raise ValueError(f'Argument `num_groups` must be a positive integer. Received: num_groups={num_groups}')\n    self._ready = threading.Condition(threading.Lock())\n    self._num_groups = num_groups\n    self._group_member_counts = [0] * self._num_groups", "docstring": "Initialize a group lock.\n\nArgs:\nnum_groups: The number of groups that will be accessing the resource under\nconsideration. Should be a positive number.\n\nReturns:\nA group lock that can then be used to synchronize code.\n\nRaises:\nValueError: If num_groups is less than 1.", "source": "github-repos"}
{"code": "def _on_connection_open(self, connection):\n    _log.info('Successfully opened connection to %s', connection.params.host)\n    self._channel = connection.channel(on_open_callback=self._on_channel_open)", "docstring": "Callback invoked when the connection is successfully established.\n\nArgs:\nconnection (pika.connection.SelectConnection): The newly-estabilished\nconnection.", "source": "codesearchnet"}
{"code": "def find_pip(pip_version=None, python_version=None):\n    pip_exe = 'pip'\n    try:\n        context = create_context(pip_version, python_version)\n    except BuildError as e:\n        from rez.backport.shutilwhich import which\n        pip_exe = which('pip')\n        if pip_exe:\n            print_warning((\"pip rez package could not be found; system 'pip' command (%s) will be used instead.\" % pip_exe))\n            context = None\n        else:\n            raise e\n    return (pip_exe, context)", "docstring": "Find a pip exe using the given python version.\n\nReturns:\n2-tuple:\nstr: pip executable;\n`ResolvedContext`: Context containing pip, or None if we fell back\nto system pip.", "source": "codesearchnet"}
{"code": "def GetEntries(self, parser_mediator, cache=None, database=None, **kwargs):\n    \n    if database is None:\n      raise ValueError('Invalid database.')\n\n    for table_name, callback_method in iter(self._tables.items()):\n      if parser_mediator.abort:\n        break\n\n      if not callback_method:\n        \n        \n        continue\n\n      callback = getattr(self, callback_method, None)\n      if callback is None:\n        logger.warning(\n            '[{0:s}] missing callback method: {1:s} for table: {2:s}'.format(\n                self.NAME, callback_method, table_name))\n        continue\n\n      esedb_table = database.get_table_by_name(table_name)\n      if not esedb_table:\n        logger.warning('[{0:s}] missing table: {1:s}'.format(\n            self.NAME, table_name))\n        continue\n\n      \n      \n      \n      callback(\n          parser_mediator, cache=cache, database=database, table=esedb_table,\n          **kwargs)", "docstring": "Extracts event objects from the database.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ncache (Optional[ESEDBCache]): cache.\ndatabase (Optional[pyesedb.file]): ESE database.\n\nRaises:\nValueError: If the database attribute is not valid.", "source": "juraj-google-style"}
{"code": "def read(cls, five9, external_id):\n    results = cls.search(five9, {cls.__uid_field__: external_id})\n    if (not results):\n        return None\n    return results[0]", "docstring": "Return a record singleton for the ID.\n\nArgs:\nfive9 (five9.Five9): The authenticated Five9 remote.\nexternal_id (mixed): The identified on Five9. This should be the\nvalue that is in the ``__uid_field__`` field on the record.\n\nReturns:\nBaseModel: The record, if found. Otherwise ``None``", "source": "codesearchnet"}
{"code": "class PatchTSMixerNormLayer(nn.Module):\n\n    def __init__(self, config: PatchTSMixerConfig):\n        super().__init__()\n        self.norm_mlp = config.norm_mlp\n        if 'batch' in config.norm_mlp.lower():\n            self.norm = PatchTSMixerBatchNorm(config)\n        else:\n            self.norm = nn.LayerNorm(config.d_model, eps=config.norm_eps)\n\n    def forward(self, inputs: torch.Tensor):\n        \n        if 'batch' in self.norm_mlp.lower():\n            inputs_reshaped = torch.reshape(inputs, (inputs.shape[0] * inputs.shape[1], inputs.shape[2], inputs.shape[3]))\n            inputs_reshaped = self.norm(inputs_reshaped)\n            inputs = torch.reshape(inputs_reshaped, inputs.shape)\n        else:\n            inputs = self.norm(inputs)\n        return inputs", "docstring": "Normalization block\n\nArgs:\nconfig (`PatchTSMixerConfig`):\nConfiguration.", "source": "github-repos"}
{"code": "def fill(self, text):\n        \n        def _fill(elem):  \n            elem.clear()\n            elem.send_keys(text)\n\n        self.map(_fill, u'fill({!r})'.format(text)).execute()", "docstring": "Set the text value of each matched element to `text`.\n\nExample usage:\n\n.. code:: python\n\n# Set the text of the first element matched by the query to \"Foo\"\nq.first.fill('Foo')\n\nArgs:\ntext (str): The text used to fill the element (usually a text field or text area).\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def get_operation_mtf_dimension_names(self, operation_name):\n    mtf_dimension_names = set()\n    for tensor_name in self.get_operation_input_names(operation_name):\n        mtf_dimension_names.update(self.get_tensor_mtf_dimension_names(tensor_name))\n    for tensor_name in self.get_operation_output_names(operation_name):\n        mtf_dimension_names.update(self.get_tensor_mtf_dimension_names(tensor_name))\n    return mtf_dimension_names", "docstring": "The Mesh TensorFlow dimensions associated with an operation.\n\nArgs:\noperation_name: a string, name of an operation in the graph.\n\nReturns:\na set(string), the names of Mesh TensorFlow dimensions.", "source": "codesearchnet"}
{"code": "def list(self, **kwargs):\n    resp = self.client.api.volumes(**kwargs)\n    if (not resp.get('Volumes')):\n        return []\n    return [self.prepare_model(obj) for obj in resp['Volumes']]", "docstring": "List volumes. Similar to the ``docker volume ls`` command.\n\nArgs:\nfilters (dict): Server-side list filtering options.\n\nReturns:\n(list of :py:class:`Volume`): The volumes.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def isClose(x, y, relative_tolerance):\n    if math.isnan(x) or math.isnan(y):\n        return math.isnan(x) == math.isnan(y)\n    if math.isinf(x) or math.isinf(y):\n        return x == y\n    return abs(x - y) <= relative_tolerance * max(abs(x), abs(y))", "docstring": "Returns True if x is close to y given the relative tolerance or if x and y are both inf, both -inf, or both NaNs.\n\nThis function does not distinguish between signalling and non-signalling NaN.\n\nArgs:\nx: float value to be compared\ny: float value to be compared\nrelative_tolerance: float. The allowable difference between the two values\nbeing compared is determined by multiplying the relative tolerance by the\nmaximum of the two values. If this is not provided, then all floats are\ncompared using string comparison.", "source": "github-repos"}
{"code": "def add_positional_embedding_nd(x, max_length, name=None):\n    with tf.name_scope('add_positional_embedding_nd'):\n        x_shape = common_layers.shape_list(x)\n        num_dims = (len(x_shape) - 2)\n        depth = x_shape[(- 1)]\n        base_shape = (([1] * (num_dims + 1)) + [depth])\n        base_start = ([0] * (num_dims + 2))\n        base_size = (([(- 1)] + ([1] * num_dims)) + [depth])\n        for i in range(num_dims):\n            shape = base_shape[:]\n            start = base_start[:]\n            size = base_size[:]\n            shape[(i + 1)] = max_length\n            size[(i + 1)] = x_shape[(i + 1)]\n            var = tf.get_variable((name + ('_%d' % i)), shape, initializer=tf.random_normal_initializer(0, (depth ** (- 0.5))))\n            var = (var * (depth ** 0.5))\n            x += tf.slice(var, start, size)\n        return x", "docstring": "Adds n-dimensional positional embedding.\n\nThe embeddings add to all positional dimensions of the tensor.\n\nArgs:\nx: Tensor with shape [batch, p1 ... pn, depth]. It has n positional\ndimensions, i.e., 1 for text, 2 for images, 3 for video, etc.\nmax_length: int representing static maximum size of any dimension.\nname: str representing name of the embedding tf.Variable.\n\nReturns:\nTensor of same shape as x.", "source": "codesearchnet"}
{"code": "def char_spacing(self, dots):\n        \n        if dots in range(0,127):\n            self.send(chr(27)+chr(32)+chr(dots))\n        else:\n            raise RuntimeError('Invalid dot amount in function charSpacing')", "docstring": "Specifes character spacing in dots.\n\nArgs:\ndots: the character spacing you desire, in dots\nReturns:\nNone\nRaises:\nRuntimeError: Invalid dot amount.", "source": "juraj-google-style"}
{"code": "def __init__(self, key_or_key_list: Optional[Union[Any, List[Any]]]=None, parent: Optional['KeyPath']=None):\n    if key_or_key_list is None:\n        key_or_key_list = []\n    elif not isinstance(key_or_key_list, (tuple, list)):\n        key_or_key_list = [key_or_key_list]\n    keys = []\n    if parent:\n        keys.extend(parent.keys)\n    keys.extend(key_or_key_list)\n    self._keys = keys\n    self._path_str = None", "docstring": "Constructor.\n\nArgs:\nkey_or_key_list: A single object as key, or a list/tuple of objects\nas keys in the path.\nWhen string types or StrKey objects are used as key, dot ('.') is used\nas the delimiter, otherwise square brackets ('[]') is used as the\ndelimiter when formatting a KeyPath.\nFor object type key, str(object) will be used to represent the key in\nstring form.\nparent: Parent KeyPath.", "source": "github-repos"}
{"code": "def create_all(cls, list_of_kwargs):\n    try:\n        return cls.add_all([(cls.new(**kwargs) if (kwargs is not None) else None) for kwargs in list_of_kwargs])\n    except:\n        cls.session.rollback()\n        raise", "docstring": "Batch method for creating a list of instances\n\nArgs:\nlist_of_kwargs(list of dicts): hereA list of dicts where\neach dict denotes the keyword args that you would pass\nto the create method separately\n\nExamples:\n\n>>> Customer.create_all([\n... {'name': 'Vicky', 'age': 34, 'user_id': 1},\n... {'name': 'Ron', 'age': 40, 'user_id': 1, 'gender': 'Male'}])", "source": "codesearchnet"}
{"code": "def _compute_fans(shape):\n    if len(shape) < 1:\n        fan_in = fan_out = 1\n    elif len(shape) == 1:\n        fan_in = fan_out = shape[0]\n    elif len(shape) == 2:\n        fan_in = shape[0]\n        fan_out = shape[1]\n    else:\n        receptive_field_size = 1\n        for dim in shape[:-2]:\n            receptive_field_size *= dim\n        fan_in = shape[-2] * receptive_field_size\n        fan_out = shape[-1] * receptive_field_size\n    return (int(fan_in), int(fan_out))", "docstring": "Computes the number of input and output units for a weight shape.\n\nArgs:\nshape: Integer shape tuple or TF tensor shape.\n\nReturns:\nA tuple of integer scalars (fan_in, fan_out).", "source": "github-repos"}
{"code": "def _CanSkipDataStream(self, file_entry, data_stream):\n    if file_entry.IsFile():\n        return False\n    if data_stream.IsDefault():\n        return True\n    return False", "docstring": "Determines if analysis and extraction of a data stream can be skipped.\n\nThis is used to prevent Plaso trying to run analyzers or extract content\nfrom a pipe or socket it encounters while processing a mounted filesystem.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry to consider for skipping.\ndata_stream (dfvfs.DataStream): data stream to consider for skipping.\n\nReturns:\nbool: True if the data stream can be skipped.", "source": "codesearchnet"}
{"code": "def get(self, name):\n    interface = name\n    if (not interface):\n        raise ValueError('Vrrp.get(): interface must contain a value.')\n    config = self.get_block(('interface %s' % interface))\n    if (config is None):\n        return config\n    match = set(re.findall('^\\\\s+(?:no |)vrrp (\\\\d+)', config, re.M))\n    if (not match):\n        return None\n    result = dict()\n    for vrid in match:\n        subd = dict()\n        subd.update(self._parse_delay_reload(config, vrid))\n        subd.update(self._parse_description(config, vrid))\n        subd.update(self._parse_enable(config, vrid))\n        subd.update(self._parse_ip_version(config, vrid))\n        subd.update(self._parse_mac_addr_adv_interval(config, vrid))\n        subd.update(self._parse_preempt(config, vrid))\n        subd.update(self._parse_preempt_delay_min(config, vrid))\n        subd.update(self._parse_preempt_delay_reload(config, vrid))\n        subd.update(self._parse_primary_ip(config, vrid))\n        subd.update(self._parse_priority(config, vrid))\n        subd.update(self._parse_secondary_ip(config, vrid))\n        subd.update(self._parse_timers_advertise(config, vrid))\n        subd.update(self._parse_track(config, vrid))\n        subd.update(self._parse_bfd_ip(config, vrid))\n        result.update({int(vrid): subd})\n    return (result if result else None)", "docstring": "Get the vrrp configurations for a single node interface\n\nArgs:\nname (string): The name of the interface for which vrrp\nconfigurations will be retrieved.\n\nReturns:\nA dictionary containing the vrrp configurations on the interface.\nReturns None if no vrrp configurations are defined or\nif the interface is not configured.", "source": "codesearchnet"}
{"code": "def __init__(self, name, *value):\n        \n        self.name = name\n        self.key = name\n        self.value = name if len(value) != 1 else value[0]\n        self.description = \"Matches {!r} and maps it to {!r}\".format(name, self.value)", "docstring": "Initialize Keywords\nArgs:\nname  -- keyword name\nvalue -- Optional value, otherwise name is used\n\nvalue is setup as *value to detect if the parameter is supplied, while\nstill supporting None. If no value is supplied then name should be used.\nIf any value is supplied (even None), then that value is used instead", "source": "juraj-google-style"}
{"code": "def map_exp_ids(self, exp, positions=False):\n        \n        if positions:\n            exp = [('%s_%s' % (\n                self.indexed_string.word(x[0]),\n                '-'.join(\n                    map(str,\n                        self.indexed_string.string_position(x[0])))), x[1])\n                   for x in exp]\n        else:\n            exp = [(self.indexed_string.word(x[0]), x[1]) for x in exp]\n        return exp", "docstring": "Maps ids to words or word-position strings.\n\nArgs:\nexp: list of tuples [(id, weight), (id,weight)]\npositions: if True, also return word positions\n\nReturns:\nlist of tuples (word, weight), or (word_positions, weight) if\nexamples: ('bad', 1) or ('bad_3-6-12', 1)", "source": "juraj-google-style"}
{"code": "def bbox(lat, lon, dist):\n    \n    latr = math.radians(lat)\n    lonr = math.radians(lon)\n\n    rad = r_mm\n    prad = rad * math.cos(latr)\n\n    latd = dist / rad\n    lond = dist / prad\n\n    latmin = math.degrees(latr - latd)\n    latmax = math.degrees(latr + latd)\n    lonmin = math.degrees(lonr - lond)\n    lonmax = math.degrees(lonr + lond)\n\n    return (latmin, latmax, lonmin, lonmax)", "docstring": "Calculate a min/max bounding box for the circle defined by lalo/dist.\n\nArgs:\nlat (float): The latitude in degrees\nlon (float): The longitude in degrees\ndist (int): A distance in geo:dist base units (mm)\n\nReturns:\n(float,float,float,float): (latmin, latmax, lonmin, lonmax)", "source": "juraj-google-style"}
{"code": "def trading_dates(start, end, calendar='US'):\n    kw = dict(start=pd.Timestamp(start, tz='UTC').date(), end=pd.Timestamp(end, tz='UTC').date())\n    us_cal = getattr(sys.modules[__name__], f'{calendar}TradingCalendar')()\n    return pd.bdate_range(**kw).drop(us_cal.holidays(**kw))", "docstring": "Trading dates for given exchange\n\nArgs:\nstart: start date\nend: end date\ncalendar: exchange as string\n\nReturns:\npd.DatetimeIndex: datetime index\n\nExamples:\n>>> bus_dates = ['2018-12-24', '2018-12-26', '2018-12-27']\n>>> trd_dates = trading_dates(start='2018-12-23', end='2018-12-27')\n>>> assert len(trd_dates) == len(bus_dates)\n>>> assert pd.Series(trd_dates == pd.DatetimeIndex(bus_dates)).all()", "source": "codesearchnet"}
{"code": "def __init__(self, service_endpoint_uri=None):\n        \n        self._send_interval = 1.0\n        self._send_remaining_time = 0\n        self._send_time = 3.0\n        self._lock_send_remaining_time = Lock()\n        SenderBase.__init__(self, service_endpoint_uri or DEFAULT_ENDPOINT_URL)", "docstring": "Initializes a new instance of the class.\n\nArgs:\nsender (String) service_endpoint_uri the address of the service to send telemetry data to.", "source": "juraj-google-style"}
{"code": "def get_user(self, user):\n        \n        self.project_service.set_auth(self._token_project)\n        return self.project_service.get_user(user)", "docstring": "Get user's data (first and last name, email, etc).\n\nArgs:\nuser (string): User name.\n\nReturns:\n(dictionary): User's data encoded in a dictionary.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def ReadByte(self, do_ord=True):\n        \n        try:\n            if do_ord:\n                return ord(self.stream.read(1))\n            return self.stream.read(1)\n        except Exception as e:\n            logger.error(\"ord expected character but got none\")\n        return 0", "docstring": "Read a single byte.\n\nArgs:\ndo_ord (bool): (default True) convert the byte to an ordinal first.\n\nReturns:\nbytes: a single byte if successful. 0 (int) if an exception occurred.", "source": "juraj-google-style"}
{"code": "def work_request(self, worker_name, md5, subkeys=None):\n        \n\n        \n        work_results = self._recursive_work_resolver(worker_name, md5)\n\n        \n        if subkeys:\n            if isinstance(subkeys, str):\n                subkeys = [subkeys]\n            try:\n                sub_results = {}\n                for subkey in subkeys:\n                    tmp = work_results[worker_name]\n\n                    \n                    for key in subkey.split('.')[:-1]:\n                        tmp = tmp[key]\n\n                    \n                    key = subkey.split('.')[-1]\n                    if key == '*':\n                        for key in tmp.keys():\n                            sub_results[key] = tmp[key]\n                    else:\n                        sub_results[key] = tmp[key]\n\n                \n                work_results = sub_results\n\n            except (KeyError, TypeError):\n                raise RuntimeError('Could not get one or more subkeys for: %s' % (work_results))\n\n        \n        return self.data_store.clean_for_serialization(work_results)", "docstring": "Make a work request for an existing stored sample.\nArgs:\nworker_name: 'strings', 'pe_features', whatever\nmd5: the md5 of the sample (or sample_set!)\nsubkeys: just get a subkey of the output: 'foo' or 'foo.bar' (None for all)\nReturns:\nThe output of the worker.", "source": "juraj-google-style"}
{"code": "def view(filepath):\n    \n    try:\n        view_func = getattr(view, PLATFORM)\n    except AttributeError:\n        raise RuntimeError('platform %r not supported' % PLATFORM)\n    view_func(filepath)", "docstring": "Open filepath with its default viewing application (platform-specific).\n\nArgs:\nfilepath: Path to the file to open in viewer.\nRaises:\nRuntimeError: If the current platform is not supported.", "source": "juraj-google-style"}
{"code": "def GetAnalyzerInstance(cls, analyzer_name):\n    \n    analyzer_name = analyzer_name.lower()\n    if analyzer_name not in cls._analyzer_classes:\n      raise KeyError(\n          'analyzer class not set for name: {0:s}.'.format(analyzer_name))\n\n    analyzer_class = cls._analyzer_classes[analyzer_name]\n    return analyzer_class()", "docstring": "Retrieves an instance of a specific analyzer.\n\nArgs:\nanalyzer_name (str): name of the analyzer to retrieve.\n\nReturns:\nBaseAnalyzer: analyzer instance.\n\nRaises:\nKeyError: if analyzer class is not set for the corresponding name.", "source": "juraj-google-style"}
{"code": "def call(self, hidden_states: tf.Tensor, attention_mask: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, past_key_value: Optional[Tuple[tf.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, **kwargs) -> Tuple[tf.Tensor, Optional[Tuple[tf.Tensor, tf.Tensor]]]:\n    if 'padding_mask' in kwargs:\n        warnings.warn('Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`')\n    residual = hidden_states\n    hidden_states = self.input_layernorm(hidden_states)\n    hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.post_attention_layernorm(hidden_states)\n    hidden_states = self.mlp(hidden_states)\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (self_attn_weights,)\n    if use_cache:\n        outputs += (present_key_value,)\n    return outputs", "docstring": "Args:\nhidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`tf.Tensor`, *optional*): attention mask of size\n`(batch, sequence_length)` where padding elements are indicated by 0.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.\nuse_cache (`bool`, *optional*):\nIf set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n(see `past_key_values`).\npast_key_value (`Tuple(tf.Tensor)`, *optional*): cached past key and value projection states", "source": "github-repos"}
{"code": "def load_snippet(self, name, package, config=None):\n    if hasattr(self, name):\n        raise SnippetError(self, 'Attribute \"%s\" already exists, please use a different name.' % name)\n    self.services.snippets.add_snippet_client(name, package, config=config)", "docstring": "Starts the snippet apk with the given package name and connects.\n\nExamples:\n\n.. code-block:: python\n\nad.load_snippet(\nname='maps', package='com.google.maps.snippets')\nad.maps.activateZoom('3')\n\nArgs:\nname: string, the attribute name to which to attach the snippet\nclient. E.g. `name='maps'` attaches the snippet client to\n`ad.maps`.\npackage: string, the package name of the snippet apk to connect to.\nconfig: snippet_client_v2.Config, the configuration object for\ncontrolling the snippet behaviors. See the docstring of the `Config`\nclass for supported configurations.\n\nRaises:\nSnippetError: Illegal load operations are attempted.", "source": "github-repos"}
{"code": "def period_end_day(self, value=None):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type str '\n                                 'for field `period_end_day`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `period_end_day`')\n\n        self._period_end_day = value", "docstring": "Corresponds to IDD Field `period_end_day`\n\nArgs:\nvalue (str): value for IDD Field `period_end_day`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def relu_density_logit(x, reduce_dims):\n  \n  frac = tf.reduce_mean(to_float(x > 0.0), reduce_dims)\n  scaled = tf.log(frac + math.exp(-10)) - tf.log((1.0 - frac) + math.exp(-10))\n  return scaled", "docstring": "logit(density(x)).\n\nUseful for histograms.\n\nArgs:\nx: a Tensor, typically the output of tf.relu\nreduce_dims: a list of dimensions\n\nReturns:\na Tensor", "source": "juraj-google-style"}
{"code": "def decode_image_tokens(self, image_tokens: torch.LongTensor, height: int, width: int):\n    sequences = image_tokens[:, :-3].view(-1, height, width + 1)\n    image_tokens = self.vocabulary_mapping.convert_bpe2img(sequences)\n    image = self.vqmodel.decode(image_tokens)\n    return image", "docstring": "Decodes generated image tokens from language model to continuous pixel values\nwith VQGAN module via upsampling.\n\nArgs:\nimage_tokens (`torch.LongTensor` of shape `(batch_size, num_of_tokens)`):\nThe tensors corresponding to the input images.\nheight (`int`):\nHeight of the generated image before upsampling.\nwidth (`int`):\nWidth of the generated image before upsampling.", "source": "github-repos"}
{"code": "def __init__(self, contents):\n    \n    precondition.AssertOptionalType(contents, Text)\n    self.contents = contents", "docstring": "Initialise the parser, presenting file contents to parse.\n\nArgs:\ncontents: file contents that are to be parsed.", "source": "juraj-google-style"}
{"code": "def raster_binarization(given_value, rasterfilename):\n    origin_raster = RasterUtilClass.read_raster(rasterfilename)\n    binary_raster = numpy.where((origin_raster.data == given_value), 1, 0)\n    return binary_raster", "docstring": "Make the raster into binarization.\n\nThe opening and closing are based on binary image. Therefore we need to\nmake the raster into binarization.\n\nArgs:\ngiven_value: The given value's pixels will be value in 1,\nother pixels will be value in 0.\nrasterfilename: The initial rasterfilena,e.\n\nReturns:\nbinary_raster: Raster after binarization.", "source": "codesearchnet"}
{"code": "def _parse_error_message(self, message):\n    msg = message['error']['message']\n    code = message['error']['code']\n    err = None\n    out = None\n    if ('data' in message['error']):\n        err = ' '.join(message['error']['data'][(- 1)]['errors'])\n        out = message['error']['data']\n    return (code, msg, err, out)", "docstring": "Parses the eAPI failure response message\n\nThis method accepts an eAPI failure message and parses the necesary\nparts in order to generate a CommandError.\n\nArgs:\nmessage (str): The error message to parse\n\nReturns:\ntuple: A tuple that consists of the following:\n* code: The error code specified in the failure message\n* message: The error text specified in the failure message\n* error: The error text from the command that generated the\nerror (the last command that ran)\n* output: A list of all output from all commands", "source": "codesearchnet"}
{"code": "def load_profiles_from_file(self, fqfn):\n    if self.args.verbose:\n        print('Loading profiles from File: {}{}{}'.format(c.Style.BRIGHT, c.Fore.MAGENTA, fqfn))\n    with open(fqfn, 'r+') as fh:\n        data = json.load(fh)\n        for profile in data:\n            self.profile_update(profile)\n            if (self.args.action == 'validate'):\n                self.validate(profile)\n        fh.seek(0)\n        fh.write(json.dumps(data, indent=2, sort_keys=True))\n        fh.truncate()\n    for d in data:\n        if (d.get('profile_name') in self.profiles):\n            self.handle_error('Found a duplicate profile name ({}).'.format(d.get('profile_name')))\n        self.profiles.setdefault(d.get('profile_name'), {'data': d, 'ij_filename': d.get('install_json'), 'fqfn': fqfn})", "docstring": "Load profiles from file.\n\nArgs:\nfqfn (str): Fully qualified file name.", "source": "codesearchnet"}
{"code": "def get_case_groups(adapter, total_cases, institute_id=None, slice_query=None):\n    \n    \n    cases = [{'status': 'all', 'count': total_cases, 'percent': 1}]\n    \n    pipeline = []\n    group = {'$group' : {'_id': '$status', 'count': {'$sum': 1}}}\n\n    subquery = {}\n    if institute_id and slice_query:\n        subquery = adapter.cases(owner=institute_id, name_query=slice_query,\n                              yield_query=True)\n    elif institute_id:\n        subquery = adapter.cases(owner=institute_id, yield_query=True)\n    elif slice_query:\n        subquery = adapter.cases(name_query=slice_query, yield_query=True)\n\n    query = {'$match': subquery} if subquery else {}\n\n    if query:\n        pipeline.append(query)\n\n    pipeline.append(group)\n    res = adapter.case_collection.aggregate(pipeline)\n\n    for status_group in res:\n        cases.append({'status': status_group['_id'],\n                      'count': status_group['count'],\n                      'percent': status_group['count'] / total_cases})\n\n    return cases", "docstring": "Return the information about case groups\n\nArgs:\nstore(adapter.MongoAdapter)\ntotal_cases(int): Total number of cases\nslice_query(str): Query to filter cases to obtain statistics for.\n\nReturns:\ncases(dict):", "source": "juraj-google-style"}
{"code": "def create_storage_account(access_token, subscription_id, rgname, account_name, location, storage_type='Standard_LRS'):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.Storage/storageAccounts/', account_name, '?api-version=', STORAGE_API])\n    storage_body = {'location': location}\n    storage_body['sku'] = {'name': storage_type}\n    storage_body['kind'] = 'Storage'\n    body = json.dumps(storage_body)\n    return do_put(endpoint, body, access_token)", "docstring": "Create a new storage account in the named resource group, with the named location.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\naccount_name (str): Name of the new storage account.\nlocation (str): Azure data center location. E.g. westus.\nstorage_type (str): Premium or Standard, local or globally redundant.\nDefaults to Standard_LRS.\n\nReturns:\nHTTP response. JSON body of storage account properties.", "source": "codesearchnet"}
{"code": "def restore(self, sess, save_path):\n        \n\n        if self._saver is None:\n            raise TensorForceError(\"register_saver_ops should be called before restore\")\n        self._saver.restore(sess=sess, save_path=save_path)", "docstring": "Restores the values of the managed variables from disk location.\n\nArgs:\nsess: The session for which to save the managed variables.\nsave_path: The path used to save the data to.", "source": "juraj-google-style"}
{"code": "def CheckAccess(filename, clean_lines, linenum, nesting_state, error):\n  \n  line = clean_lines.elided[linenum]  \n\n  matched = Match((r'\\s*(DISALLOW_COPY_AND_ASSIGN|'\n                   r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)\n  if not matched:\n    return\n  if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):\n    if nesting_state.stack[-1].access != 'private':\n      error(filename, linenum, 'readability/constructors', 3,\n            '%s must be in the private: section' % matched.group(1))\n\n  else:\n    \n    \n    \n    \n    pass", "docstring": "Checks for improper use of DISALLOW* macros.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nnesting_state: A NestingState instance which maintains information about\nthe current stack of nested blocks being parsed.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def saveplot(fig, *name_args, close=True, **name_kwargs):\n    oname = out_name(*name_args, **name_kwargs)\n    fig.savefig('{}.{}'.format(oname, conf.plot.format), format=conf.plot.format, bbox_inches='tight')\n    if close:\n        plt.close(fig)", "docstring": "Save matplotlib figure.\n\nYou need to provide :data:`stem` as a positional or keyword argument (see\n:func:`out_name`).\n\nArgs:\nfig (:class:`matplotlib.figure.Figure`): matplotlib figure.\nclose (bool): whether to close the figure.\nname_args: positional arguments passed on to :func:`out_name`.\nname_kwargs: keyword arguments passed on to :func:`out_name`.", "source": "codesearchnet"}
{"code": "def _parameterize_obj(obj):\n    if isinstance(obj, Mapping):\n        return dict(((key, _parameterize_obj(value)) for (key, value) in obj.items()))\n    elif isinstance(obj, bytes):\n        return _parameterize_string(obj.decode('utf8'))\n    elif isinstance(obj, str):\n        return _parameterize_string(obj)\n    elif isinstance(obj, Sequence):\n        return list((_parameterize_obj(item) for item in obj))\n    else:\n        return obj", "docstring": "Recursively parameterize all strings contained in an object.\n\nParameterizes all values of a Mapping, all items of a Sequence, an\nunicode string, or pass other objects through unmodified.\n\nByte strings will be interpreted as UTF-8.\n\nArgs:\nobj: data to parameterize\n\nReturn:\nA parameterized object to be included in a CloudFormation template.\nMappings are converted to `dict`, Sequences are converted to  `list`,\nand strings possibly replaced by compositions of function calls.", "source": "codesearchnet"}
{"code": "def anm_score(self, x, y):\n    gp = GaussianProcessRegressor().fit(x, y)\n    y_predict = gp.predict(x)\n    indepscore = normalized_hsic((y_predict - y), x)\n    return indepscore", "docstring": "Compute the fitness score of the ANM model in the x->y direction.\n\nArgs:\na (numpy.ndarray): Variable seen as cause\nb (numpy.ndarray): Variable seen as effect\n\nReturns:\nfloat: ANM fit score", "source": "codesearchnet"}
{"code": "def getStreamNetworkAsWkt(self, session, withNodes=True):\n    wkt_list = []\n    for link in self.streamLinks:\n        wkt_link = link.getAsWkt(session)\n        if wkt_link:\n            wkt_list.append(wkt_link)\n        if withNodes:\n            for node in link.nodes:\n                wkt_node = node.getAsWkt(session)\n                if wkt_node:\n                    wkt_list.append(wkt_node)\n    return 'GEOMCOLLECTION ({0})'.format(', '.join(wkt_list))", "docstring": "Retrieve the stream network geometry in Well Known Text format.\n\nArgs:\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database\nwithNodes (bool, optional): Include nodes. Defaults to False.\n\nReturns:\nstr: Well Known Text string.", "source": "codesearchnet"}
{"code": "def gaussian_noise(x, severity=1):\n    c = [0.08, 0.12, 0.18, 0.26, 0.38][(severity - 1)]\n    x = (np.array(x) / 255.0)\n    x_clip = (np.clip((x + np.random.normal(size=x.shape, scale=c)), 0, 1) * 255)\n    return around_and_astype(x_clip)", "docstring": "Gaussian noise corruption to images.\n\nArgs:\nx: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\nseverity: integer, severity of corruption.\n\nReturns:\nnumpy array, image with uint8 pixels in [0,255]. Added Gaussian noise.", "source": "codesearchnet"}
{"code": "def _NoBlankLinesBeforeCurrentToken(text, cur_token, prev_token):\n    cur_token_lineno = cur_token.lineno\n    if cur_token.is_comment:\n        cur_token_lineno -= cur_token.value.count('\\n')\n    num_newlines = text.count('\\n') if not prev_token.is_comment else 0\n    return prev_token.lineno + num_newlines == cur_token_lineno - 1", "docstring": "Determine if there are no blank lines before the current token.\n\nThe previous token is a docstring or comment. The prev_token_lineno is the\nstart of the text of that token. Counting the number of newlines in its text\ngives us the extent and thus where the line number of the end of the\ndocstring or comment. After that, we just compare it to the current token's\nline number to see if there are blank lines between them.\n\nArguments:\ntext: (unicode) The text of the docstring or comment before the current\ntoken.\ncur_token: (format_token.FormatToken) The current token in the logical line.\nprev_token: (format_token.FormatToken) The previous token in the logical\nline.\n\nReturns:\nTrue if there is no blank line before the current token.", "source": "github-repos"}
{"code": "def search_artists_by_name(self, artist_name: str, limit: int=5) -> List[NameExternalIDPair]:\n    response: requests.Response = requests.get(self._API_URL_TEMPLATE.format('search'), params={'q': artist_name, 'type': 'artist', 'limit': limit}, headers={'Authorization': 'Bearer {}'.format(self._token.access_token)})\n    response.raise_for_status()\n    if (not response.text):\n        return []\n    result: List[NameExternalIDPair] = []\n    data: List[Dict] = response.json()['artists']['items']\n    for artist in data:\n        artist = NameExternalIDPair(artist['name'].strip(), artist['id'].strip())\n        if ((not artist.name) or (not artist.external_id)):\n            raise SpotifyClientError('Name or ID is missing')\n        result.append(artist)\n    return result", "docstring": "Returns zero or more artist name - external ID pairs that match the specified artist name.\n\nArguments:\nartist_name (str): The artist name to search in the Spotify API.\nlimit (int): The maximum number of results to return.\n\nReturns:\nZero or more artist name - external ID pairs.\n\nRaises:\nrequests.HTTPError: If an HTTP error occurred during the request.\nSpotifyClientError: If an invalid item is found.", "source": "codesearchnet"}
{"code": "def initialize_repository(path, spor_dir='.spor'):\n    \n    path = pathlib.Path(path)\n    spor_path = path / spor_dir\n    if spor_path.exists():\n        raise ValueError('spor directory already exists: {}'.format(spor_path))\n    spor_path.mkdir()\n\n    return Repository(path, spor_dir)", "docstring": "Initialize a spor repository in `path` if one doesn't already exist.\n\nArgs:\npath: Path to any file or directory within the repository.\nspor_dir: The name of the directory containing spor data.\n\nReturns: A `Repository` instance.\n\nRaises:\nValueError: A repository already exists at `path`.", "source": "juraj-google-style"}
{"code": "def __init__(self, buffer_size=8, max_workers=5, client=None, credential=None):\n    self.buffer_size = buffer_size\n    self.max_workers = max_workers\n    self.client = client or DicomApiHttpClient()\n    self.credential = credential", "docstring": "Initializes DicomSearch.\nArgs:\nbuffer_size: # type: Int. Size of the request buffer.\nmax_workers: # type: Int. Maximum number of threads a worker can\ncreate. If it is set to one, all the request will be processed\nsequentially in a worker.\nclient: # type: object. If it is specified, all the Api calls will\nmade by this client instead of the default one (DicomApiHttpClient).\ncredential: # type: Google credential object, if it is specified, the\nHttp client will use it to create sessions instead of the default.", "source": "github-repos"}
{"code": "def pause(self, device):\n    resp = self.post('pause', params={'device': device}, return_response=True)\n    error = resp.text\n    if (not error):\n        error = None\n    return {'success': (resp.status_code == requests.codes.ok), 'error': error}", "docstring": "Pause the given device.\n\nArgs:\ndevice (str): Device ID.\n\nReturns:\ndict: with keys ``success`` and ``error``.", "source": "codesearchnet"}
{"code": "def shifted_centroid_distance(item_a, time_a, item_b, time_b, max_value):\n    (ax, ay) = item_a.center_of_mass(time_a)\n    (bx, by) = item_b.center_of_mass(time_b)\n    if (time_a < time_b):\n        bx = (bx - item_b.u)\n        by = (by - item_b.v)\n    else:\n        ax = (ax - item_a.u)\n        ay = (ay - item_a.v)\n    return (np.minimum(np.sqrt((((ax - bx) ** 2) + ((ay - by) ** 2))), max_value) / float(max_value))", "docstring": "Centroid distance with motion corrections.\n\nArgs:\nitem_a: STObject from the first set in ObjectMatcher\ntime_a: Time integer being evaluated\nitem_b: STObject from the second set in ObjectMatcher\ntime_b: Time integer being evaluated\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "codesearchnet"}
{"code": "def _format_output(kernel_restart, packages, verbose, restartable, nonrestartable, restartservicecommands, restartinitcommands):\n    if (not verbose):\n        packages = (restartable + nonrestartable)\n        if kernel_restart:\n            packages.append('System restart required.')\n        return packages\n    else:\n        ret = ''\n        if kernel_restart:\n            ret = 'System restart required.\\n\\n'\n        if packages:\n            ret += 'Found {0} processes using old versions of upgraded files.\\n'.format(len(packages))\n            ret += 'These are the packages:\\n'\n        if restartable:\n            ret += 'Of these, {0} seem to contain systemd service definitions or init scripts which can be used to restart them:\\n'.format(len(restartable))\n            for package in restartable:\n                ret += (package + ':\\n')\n                for program in packages[package]['processes']:\n                    ret += (program + '\\n')\n            if restartservicecommands:\n                ret += '\\n\\nThese are the systemd services:\\n'\n                ret += '\\n'.join(restartservicecommands)\n            if restartinitcommands:\n                ret += '\\n\\nThese are the initd scripts:\\n'\n                ret += '\\n'.join(restartinitcommands)\n        if nonrestartable:\n            ret += '\\n\\nThese processes {0} do not seem to have an associated init script to restart them:\\n'.format(len(nonrestartable))\n            for package in nonrestartable:\n                ret += (package + ':\\n')\n                for program in packages[package]['processes']:\n                    ret += (program + '\\n')\n    return ret", "docstring": "Formats the output of the restartcheck module.\n\nReturns:\nString - formatted output.\n\nArgs:\nkernel_restart: indicates that newer kernel is instaled\npackages: list of packages that should be restarted\nverbose: enables extensive output\nrestartable: list of restartable packages\nnonrestartable: list of non-restartable packages\nrestartservicecommands: list of commands to restart services\nrestartinitcommands: list of commands to restart init.d scripts", "source": "codesearchnet"}
{"code": "def get_cartesian_coords(self, fractional_coords: Vector3Like) -> np.ndarray:\n    return dot(fractional_coords, self._matrix)", "docstring": "Returns the cartesian coordinates given fractional coordinates.\n\nArgs:\nfractional_coords (3x1 array): Fractional coords.\n\nReturns:\nCartesian coordinates", "source": "codesearchnet"}
{"code": "def _set_advertising_data(self, packet_type, data):\n    payload = struct.pack(('<BB%ss' % len(data)), packet_type, len(data), bytes(data))\n    response = self._send_command(6, 9, payload)\n    (result,) = unpack('<H', response.payload)\n    if (result != 0):\n        return (False, {'reason': 'Error code from BLED112 setting advertising data', 'code': result})\n    return (True, None)", "docstring": "Set the advertising data for advertisements sent out by this bled112\n\nArgs:\npacket_type (int): 0 for advertisement, 1 for scan response\ndata (bytearray): the data to set", "source": "codesearchnet"}
{"code": "def _sign_operation(op):\n    md5 = hashlib.md5()\n    md5.update(op.consumerId.encode('utf-8'))\n    md5.update(b'\\x00')\n    md5.update(op.operationName.encode('utf-8'))\n    if op.labels:\n        signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels))\n    return md5.digest()", "docstring": "Obtains a signature for an operation in a ReportRequest.\n\nArgs:\nop (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an\noperation used in a `ReportRequest`\n\nReturns:\nstring: a unique signature for that operation", "source": "codesearchnet"}
{"code": "def check_signature_supported(func, warn=False):\n    function_name = func.__name__\n    sig_params = get_signature_params(func)\n    has_kwargs_param = False\n    has_kwonly_param = False\n    for (keyword_name, parameter) in sig_params:\n        if (parameter.kind == Parameter.VAR_KEYWORD):\n            has_kwargs_param = True\n        if (parameter.kind == Parameter.KEYWORD_ONLY):\n            has_kwonly_param = True\n    if has_kwargs_param:\n        message = 'The function {} has a **kwargs argument, which is currently not supported.'.format(function_name)\n        if warn:\n            logger.warning(message)\n        else:\n            raise Exception(message)\n    if has_kwonly_param:\n        message = 'The function {} has a keyword only argument (defined after * or *args), which is currently not supported.'.format(function_name)\n        if warn:\n            logger.warning(message)\n        else:\n            raise Exception(message)", "docstring": "Check if we support the signature of this function.\n\nWe currently do not allow remote functions to have **kwargs. We also do not\nsupport keyword arguments in conjunction with a *args argument.\n\nArgs:\nfunc: The function whose signature should be checked.\nwarn: If this is true, a warning will be printed if the signature is\nnot supported. If it is false, an exception will be raised if the\nsignature is not supported.\n\nRaises:\nException: An exception is raised if the signature is not supported.", "source": "codesearchnet"}
{"code": "def __init__(self, yaml_definition=None):\n    \n    definitions_registry = registry.DataTypeDefinitionsRegistry()\n\n    if yaml_definition:\n      definitions_reader = reader.YAMLDataTypeDefinitionsFileReader()\n\n      file_object = io.BytesIO(yaml_definition)\n      definitions_reader.ReadFileObject(definitions_registry, file_object)\n\n    super(DataTypeFabric, self).__init__(definitions_registry)", "docstring": "Initializes a data type fabric.\n\nArgs:\nyaml_definition (str): YAML formatted data type definitions.", "source": "juraj-google-style"}
{"code": "def get_mnemonics(self, mnemonics, uwis=None, alias=None):\n        \n        \n        uwis = uwis or self.uwis\n        wells = [w for w in self.__list if w.uwi in uwis]\n        all_wells = []\n        for w in wells:\n            this_well = [w.get_mnemonic(m, alias=alias) for m in mnemonics]\n            all_wells.append(this_well)\n        return all_wells", "docstring": "Looks at all the wells in turn and returns the highest thing\nin the alias table.\n\nArgs:\nmnemonics (list)\nalias (dict)\n\nReturns:\nlist. A list of lists.", "source": "juraj-google-style"}
{"code": "def expect_exitstatus(self, exit_status):\n    self.expect_end()\n    logger.debug(\"Checking exit status of '{0}', output so far: {1}\".format(self.name, self.get_output()))\n    if (self._spawn.exitstatus is None):\n        raise WrongExitStatusException(instance=self, expected=exit_status, output=self.get_output())\n    if (self._spawn.exitstatus is not exit_status):\n        raise WrongExitStatusException(instance=self, expected=exit_status, got=self._spawn.exitstatus, output=self.get_output())", "docstring": "Wait for the running program to finish and expect some exit status.\n\nArgs:\nexit_status (int):  The expected exit status.\n\nRaises:\nWrongExitStatusException: The produced exit status is not the expected one.", "source": "codesearchnet"}
{"code": "def score(text, *score_functions):\n    if (not score_functions):\n        raise ValueError('score_functions must not be empty')\n    return statistics.mean((func(text) for func in score_functions))", "docstring": "Score ``text`` using ``score_functions``.\n\nExamples:\n>>> score(\"abc\", function_a)\n>>> score(\"abc\", function_a, function_b)\n\nArgs:\ntext (str): The text to score\n*score_functions (variable length argument list): functions to score with\n\nReturns:\nArithmetic mean of scores\n\nRaises:\nValueError: If score_functions is empty", "source": "codesearchnet"}
{"code": "def is_disconnected(self, node_id):\n    conn = self._conns.get(node_id)\n    if (conn is None):\n        return False\n    return conn.disconnected()", "docstring": "Check whether the node connection has been disconnected or failed.\n\nA disconnected node has either been closed or has failed. Connection\nfailures are usually transient and can be resumed in the next ready()\ncall, but there are cases where transient failures need to be caught\nand re-acted upon.\n\nArguments:\nnode_id (int): the id of the node to check\n\nReturns:\nbool: True iff the node exists and is disconnected", "source": "codesearchnet"}
{"code": "def _ParseStringOption(cls, options, argument_name, default_value=None):\n    \n    argument_value = getattr(options, argument_name, None)\n    if argument_value is None:\n      return default_value\n\n    if isinstance(argument_value, py2to3.BYTES_TYPE):\n      encoding = sys.stdin.encoding\n\n      \n      if not encoding:\n        encoding = locale.getpreferredencoding()\n      if not encoding:\n        encoding = cls._PREFERRED_ENCODING\n\n      try:\n        argument_value = argument_value.decode(encoding)\n      except UnicodeDecodeError as exception:\n        raise errors.BadConfigOption((\n            'Unable to convert option: {0:s} to Unicode with error: '\n            '{1!s}.').format(argument_name, exception))\n\n    elif not isinstance(argument_value, py2to3.UNICODE_TYPE):\n      raise errors.BadConfigOption(\n          'Unsupported option: {0:s} string type required.'.format(\n              argument_name))\n\n    return argument_value", "docstring": "Parses a string command line argument.\n\nArgs:\noptions (argparse.Namespace): parser options.\nargument_name (str): name of the command line argument.\ndefault_value (Optional[str]): default value of the command line argument.\n\nReturns:\nstr: command line argument value or the default value if the command line\nargument is not set\n\nRaises:\nBadConfigOption: if the command line argument value cannot be converted\nto a Unicode string.", "source": "juraj-google-style"}
{"code": "def f(a=1, b=2):\n    return a + b", "docstring": "Compute the sum.\n\nArgs:\na: an integer.\nb: another integer.\n\nReturns:\nSum of two integers.", "source": "github-repos"}
{"code": "def set(self, key, value):\n    data = self._load_file()\n    data[key] = value\n    self._save_file(data)", "docstring": "Set the value of a key\n\nArgs:\nkey (string): The key used to store this value\nvalue (string): The value to store", "source": "codesearchnet"}
{"code": "def clean_program(self):\n    program_id = self.cleaned_data[self.Fields.PROGRAM].strip()\n    if (not program_id):\n        return None\n    try:\n        client = CourseCatalogApiClient(self._user, self._enterprise_customer.site)\n        program = (client.get_program_by_uuid(program_id) or client.get_program_by_title(program_id))\n    except MultipleProgramMatchError as exc:\n        raise ValidationError(ValidationMessages.MULTIPLE_PROGRAM_MATCH.format(program_count=exc.programs_matched))\n    except (HttpClientError, HttpServerError):\n        raise ValidationError(ValidationMessages.INVALID_PROGRAM_ID.format(program_id=program_id))\n    if (not program):\n        raise ValidationError(ValidationMessages.INVALID_PROGRAM_ID.format(program_id=program_id))\n    if (program['status'] != ProgramStatuses.ACTIVE):\n        raise ValidationError(ValidationMessages.PROGRAM_IS_INACTIVE.format(program_id=program_id, status=program['status']))\n    return program", "docstring": "Clean program.\n\nTry obtaining program treating form value as program UUID or title.\n\nReturns:\ndict: Program information if program found", "source": "codesearchnet"}
{"code": "def get(name, *default):\n    global g_config\n    curr = g_config\n    for part in name.split('.'):\n        if (part in curr):\n            curr = curr[part]\n        elif default:\n            return default[0]\n        else:\n            raise AttributeError(\"Config value '{}' does not exist\".format(name))\n    return curr", "docstring": "Get config value with the given name and optional default.\n\nArgs:\nname (str):\nThe name of the config value.\n*default (Any):\nIf given and the key doesn't not exist, this will be returned\ninstead. If it's not given and the config value does not exist,\nAttributeError will be raised\n\nReturns:\nThe requested config value. This is one of the global values defined\nin this file. If the value does not exist it will return `default` if\ngive or raise `AttributeError`.\n\nRaises:\nAttributeError: If the value does not exist and `default` was not given.", "source": "codesearchnet"}
{"code": "def get_all(cls, include_disabled=True):\n        \n        if cls == BaseAccount:\n            raise InquisitorError('get_all on BaseAccount is not supported')\n\n        account_type_id = db.AccountType.find_one(account_type=cls.account_type).account_type_id\n        qry = db.Account.order_by(desc(Account.enabled), Account.account_type_id, Account.account_name)\n\n        if not include_disabled:\n            qry = qry.filter(Account.enabled == 1)\n\n        accounts = qry.find(Account.account_type_id == account_type_id)\n\n        return {res.account_id: cls(res) for res in accounts}", "docstring": "Returns a list of all accounts of a given type\n\nArgs:\ninclude_disabled (`bool`): Include disabled accounts. Default: `True`\n\nReturns:\nlist of account objects", "source": "juraj-google-style"}
{"code": "def _op_to_matrix(self, op: Optional[ops.Operation], qubits: Tuple[(ops.Qid, ...)]) -> Optional[np.ndarray]:\n    (q1, q2) = qubits\n    matrix = protocols.unitary(op, None)\n    if (matrix is None):\n        return None\n    assert (op is not None)\n    if (op.qubits == qubits):\n        return matrix\n    if (op.qubits == (q2, q1)):\n        return MergeInteractions._flip_kron_order(matrix)\n    if (op.qubits == (q1,)):\n        return np.kron(matrix, np.eye(2))\n    if (op.qubits == (q2,)):\n        return np.kron(np.eye(2), matrix)\n    return None", "docstring": "Determines the effect of an operation on the given qubits.\n\nIf the operation is a 1-qubit operation on one of the given qubits,\nor a 2-qubit operation on both of the given qubits, and also the\noperation has a known matrix, then a matrix is returned. Otherwise None\nis returned.\n\nArgs:\nop: The operation to understand.\nqubits: The qubits we care about. Order determines matrix tensor\norder.\n\nReturns:\nNone, or else a matrix equivalent to the effect of the operation.", "source": "codesearchnet"}
{"code": "def tf_step(self, time, variables, source_variables, **kwargs):\n        \n        assert all(util.shape(source) == util.shape(target) for source, target in zip(source_variables, variables))\n\n        last_sync = tf.get_variable(\n            name='last-sync',\n            shape=(),\n            dtype=tf.int64,\n            initializer=tf.constant_initializer(value=(-self.sync_frequency), dtype=tf.int64),\n            trainable=False\n        )\n\n        def sync():\n            deltas = list()\n            for source_variable, target_variable in zip(source_variables, variables):\n                delta = self.update_weight * (source_variable - target_variable)\n                deltas.append(delta)\n\n            applied = self.apply_step(variables=variables, deltas=deltas)\n            last_sync_updated = last_sync.assign(value=time)\n\n            with tf.control_dependencies(control_inputs=(applied, last_sync_updated)):\n                \n                return [delta + 0.0 for delta in deltas]\n\n        def no_sync():\n            deltas = list()\n            for variable in variables:\n                delta = tf.zeros(shape=util.shape(variable))\n                deltas.append(delta)\n            return deltas\n\n        do_sync = (time - last_sync >= self.sync_frequency)\n        return tf.cond(pred=do_sync, true_fn=sync, false_fn=no_sync)", "docstring": "Creates the TensorFlow operations for performing an optimization step.\n\nArgs:\ntime: Time tensor.\nvariables: List of variables to optimize.\nsource_variables: List of source variables to synchronize with.\n**kwargs: Additional arguments, not used.\n\nReturns:\nList of delta tensors corresponding to the updates for each optimized variable.", "source": "juraj-google-style"}
{"code": "def encode_field(self, field, value):\n    \n    \n    \n    if (isinstance(field, messages.IntegerField) and\n        field.variant in (messages.Variant.INT64,\n                          messages.Variant.UINT64,\n                          messages.Variant.SINT64)):\n      if value not in (None, [], ()):\n        \n        if isinstance(value, list):\n          value = [str(subvalue) for subvalue in value]\n        else:\n          value = str(value)\n        return value\n\n    return super(EndpointsProtoJson, self).encode_field(field, value)", "docstring": "Encode a python field value to a JSON value.\n\nArgs:\nfield: A ProtoRPC field instance.\nvalue: A python value supported by field.\n\nReturns:\nA JSON serializable value appropriate for field.", "source": "juraj-google-style"}
{"code": "def update(self, **kwargs):\n        \n        for arg in kwargs:\n            if hasattr(self, arg):\n                setattr(self, arg, kwargs[arg])\n            else:\n                raise ValueError(\"Invalid RayParams parameter in\"\n                                 \" update: %s\" % arg)\n\n        self._check_usage()", "docstring": "Update the settings according to the keyword arguments.\n\nArgs:\nkwargs: The keyword arguments to set corresponding fields.", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor) -> torch.Tensor:\n    residual = hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    hidden_states = self.self_attn(hidden_states=hidden_states, cu_seqlens=cu_seqlens)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    hidden_states = self.fc1(hidden_states)\n    hidden_states = self.activation_fn(hidden_states)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = residual + hidden_states\n    if hidden_states.dtype == torch.float16:\n        clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n        hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n    outputs = (hidden_states,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\nlayer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size\n`(encoder_attention_heads,)`.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def _RegisterFlagByModuleId(self, module_id, flag):\n    \n    flags_by_module_id = self.FlagsByModuleIdDict()\n    flags_by_module_id.setdefault(module_id, []).append(flag)", "docstring": "Records the module that defines a specific flag.\n\nArgs:\nmodule_id: An int, the ID of the Python module.\nflag: A Flag object, a flag that is key to the module.", "source": "juraj-google-style"}
{"code": "def response_list(data, key):\n    \n    if key not in data:\n        return None\n\n    if isinstance(data[key], list):\n        return data[key]\n\n    else:\n        return [data[key],]", "docstring": "Obtain the relevant response data in a list.\n\nIf the response does not already contain the result in a list, a new one\nwill be created to ease iteration in the parser methods.\n\nArgs:\ndata (dict): API response.\nkey (str): Attribute of the response that contains the result values.\n\nReturns:\nList of response items (usually dict) or None if the key is not present.", "source": "juraj-google-style"}
{"code": "def set_servo_speed(self, goalspeed, led):\n    if (goalspeed > 0):\n        goalspeed_msb = ((int(goalspeed) & 65280) >> 8)\n        goalspeed_lsb = (int(goalspeed) & 255)\n    elif (goalspeed < 0):\n        goalspeed_msb = (64 + (255 - ((int(goalspeed) & 65280) >> 8)))\n        goalspeed_lsb = (abs(goalspeed) & 255)\n    data = []\n    data.append(12)\n    data.append(self.servoid)\n    data.append(I_JOG_REQ)\n    data.append(goalspeed_lsb)\n    data.append(goalspeed_msb)\n    data.append((2 | led))\n    data.append(self.servoid)\n    data.append(0)\n    send_data(data)", "docstring": "Set the Herkulex in continuous rotation mode\n\nArgs:\n\ngoalspeed (int): the speed , range -1023 to 1023\nled (int): the LED color\n0x00 LED off\n0x04 GREEN\n0x08 BLUE\n0x10 RED", "source": "codesearchnet"}
{"code": "def add_filter(ds, patterns):\n    \n    if not plugins.is_datasource(ds):\n        raise Exception(\"Filters are applicable only to datasources.\")\n\n    delegate = dr.get_delegate(ds)\n\n    if delegate.raw:\n        raise Exception(\"Filters aren't applicable to raw datasources.\")\n\n    if not delegate.filterable:\n        raise Exception(\"Filters aren't applicable to %s.\" % dr.get_name(ds))\n\n    if ds in _CACHE:\n        del _CACHE[ds]\n    if isinstance(patterns, six.string_types):\n        FILTERS[ds].add(patterns)\n    elif isinstance(patterns, list):\n        FILTERS[ds] |= set(patterns)\n    elif isinstance(patterns, set):\n        FILTERS[ds] |= patterns\n    else:\n        raise TypeError(\"patterns must be string, list, or set.\")", "docstring": "Add a filter or list of filters to a datasource. A filter is a simple\nstring, and it matches if it is contained anywhere within a line.\n\nArgs:\nds (@datasource component): The datasource to filter\npatterns (str, [str]): A string, list of strings, or set of strings to\nadd to the datasource's filters.", "source": "juraj-google-style"}
{"code": "def _ReadStringDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):\n    if is_member:\n        supported_definition_values = self._SUPPORTED_DEFINITION_VALUES_STRING_MEMBER\n    else:\n        supported_definition_values = self._SUPPORTED_DEFINITION_VALUES_STRING\n    definition_object = self._ReadElementSequenceDataTypeDefinition(definitions_registry, definition_values, data_types.StringDefinition, definition_name, supported_definition_values)\n    encoding = definition_values.get('encoding', None)\n    if (not encoding):\n        error_message = 'missing encoding'\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    definition_object.encoding = encoding\n    return definition_object", "docstring": "Reads a string data type definition.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\ndefinition_values (dict[str, object]): definition values.\ndefinition_name (str): name of the definition.\nis_member (Optional[bool]): True if the data type definition is a member\ndata type definition.\n\nReturns:\nStringDefinition: string data type definition.\n\nRaises:\nDefinitionReaderError: if the definitions values are missing or if\nthe format is incorrect.", "source": "codesearchnet"}
{"code": "def _viz_prototype(self, vis_fn):\n        \n        def _viz_logger(*args, **kwargs):\n            self.win = vis_fn(*args,\n                              win=self.win,\n                              env=self.env,\n                              opts=self.opts,\n                              **kwargs)\n        return _viz_logger", "docstring": "Outputs a function which will log the arguments to Visdom in an appropriate way.\n\nArgs:\nvis_fn: A function, such as self.vis.image", "source": "juraj-google-style"}
{"code": "def get_golden_chunk_records():\n    pattern = os.path.join(fsdb.golden_chunk_dir(), '*.zz')\n    return sorted(tf.gfile.Glob(pattern), reverse=True)[:FLAGS.window_size]", "docstring": "Return up to num_records of golden chunks to train on.\n\nReturns:\nA list of golden chunks up to num_records in length, sorted by path.", "source": "codesearchnet"}
{"code": "def print_debug(*args, **kwargs):\n    \n    if WTF_CONFIG_READER.get(\"debug\", False) == True:\n        print(*args, **kwargs)", "docstring": "Print if and only if the debug flag is set true in the config.yaml file.\n\nArgs:\nargs : var args of print arguments.", "source": "juraj-google-style"}
{"code": "def delete_idx_status(self, rdf_class):\n        \n\n        sparql_template = \n        rdf_types = [rdf_class.uri] + [item.uri\n                                       for item in rdf_class.subclasses]\n        sparql = sparql_template.format(\"\\n\\t\\t\".join(rdf_types))\n        log.warn(\"Deleting index status for %s\", rdf_class.uri)\n        return self.tstore_conn.update_query(sparql)", "docstring": "Removes all of the index status triples from the datastore\n\nArgs:\n-----\nrdf_class: The class of items to remove the status from", "source": "juraj-google-style"}
{"code": "def __init__(self, volume, layers=None):\n        \n        if isinstance(volume, string_types):\n            volume = nb.load(volume)\n        self.volume = volume\n        data = self.volume.get_data()\n        self.dims = data.shape\n        self.vox_dims = self.get_header().get_zooms()\n        self.full = np.float64(data.ravel())\n        self.global_mask = np.where(self.full)\n\n        self.reset()\n        if layers is not None:\n            self.add(layers)", "docstring": "Initialize a new Masker.\nArgs:\nvolume: A volume indicating the global space within which all\nsubsequent layers must reside. Any voxel in the mask with a\nnon-zero valid is considered valid for analyses. Can be either\nan image filename or a NiBabel image.\nlayers: Optional masking layers to add; see docstring for add().", "source": "juraj-google-style"}
{"code": "def lu_slogdet(LU):\n    r\n    LU = (asarray(LU[0], float), asarray(LU[1], float))\n    adet = _sum(log(_abs(LU[0].diagonal())))\n\n    s = prod(sign(LU[0].diagonal()))\n\n    nrows_exchange = LU[1].size - _sum(LU[1] == arange(LU[1].size, dtype=\"int32\"))\n\n    odd = nrows_exchange % 2 == 1\n    if odd:\n        s *= -1.0\n\n    return (s, adet)", "docstring": "r\"\"\"Natural logarithm of a LU decomposition.\n\nArgs:\nLU (tuple): LU decomposition.\n\nReturns:\ntuple: sign and log-determinant.", "source": "juraj-google-style"}
{"code": "def reindex(self):\n    _map = dict(zip(self.micro_indices, reindex(self.micro_indices)))\n    partition = tuple((tuple((_map[index] for index in group)) for group in self.partition))\n    output_indices = tuple((_map[i] for i in self.output_indices))\n    return Blackbox(partition, output_indices)", "docstring": "Squeeze the indices of this blackboxing to ``0..n``.\n\nReturns:\nBlackbox: a new, reindexed |Blackbox|.\n\nExample:\n>>> partition = ((3,), (2, 4))\n>>> output_indices = (2, 3)\n>>> blackbox = Blackbox(partition, output_indices)\n>>> blackbox.reindex()\nBlackbox(partition=((1,), (0, 2)), output_indices=(0, 1))", "source": "codesearchnet"}
{"code": "def create_volume(self, volume_name: str, driver_spec: str=None):\n    if driver_spec:\n        driver = driver_spec\n    else:\n        driver = 'local'\n    if (not self._manager):\n        raise RuntimeError('Services can only be deleted on swarm manager nodes')\n    self._client.volumes.create(name=volume_name, driver=driver)", "docstring": "Create new docker volumes.\n\nOnly the manager nodes can create a volume\n\nArgs:\nvolume_name (string): Name for the new docker volume\ndriver_spec (string): Driver for the docker volume", "source": "codesearchnet"}
{"code": "def write_double(self, value, little_endian=True):\n        \n        if little_endian:\n            endian = \"<\"\n        else:\n            endian = \">\"\n        return self.pack('%sd' % endian, value)", "docstring": "Pack the value as a double and write 8 bytes to the stream.\n\nArgs:\nvalue (number): the value to write to the stream.\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint: the number of bytes written.", "source": "juraj-google-style"}
{"code": "def get_signature_def_map(saved_model_dir, tag_set):\n    meta_graph = saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)\n    return meta_graph.signature_def", "docstring": "Gets SignatureDef map from a MetaGraphDef in a SavedModel.\n\nReturns the SignatureDef map for the given tag-set in the SavedModel\ndirectory.\n\nArgs:\nsaved_model_dir: Directory containing the SavedModel to inspect or execute.\ntag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in\nstring format, separated by ','. For tag-set contains multiple tags, all\ntags must be passed in.\n\nReturns:\nA SignatureDef map that maps from string keys to SignatureDefs.", "source": "github-repos"}
{"code": "def _geodetic_to_cartesian(cls, lat, lon, alt):\n        \n        C = Earth.r / np.sqrt(1 - (Earth.e * np.sin(lat)) ** 2)\n        S = Earth.r * (1 - Earth.e ** 2) / np.sqrt(1 - (Earth.e * np.sin(lat)) ** 2)\n        r_d = (C + alt) * np.cos(lat)\n        r_k = (S + alt) * np.sin(lat)\n\n        norm = np.sqrt(r_d ** 2 + r_k ** 2)\n        return norm * np.array([\n            np.cos(lat) * np.cos(lon),\n            np.cos(lat) * np.sin(lon),\n            np.sin(lat)\n        ])", "docstring": "Conversion from latitude, longitude and altitude coordinates to\ncartesian with respect to an ellipsoid\n\nArgs:\nlat (float): Latitude in radians\nlon (float): Longitude in radians\nalt (float): Altitude to sea level in meters\n\nReturn:\nnumpy.array: 3D element (in meters)", "source": "juraj-google-style"}
{"code": "def _eager_metrics_fn(model, outputs, targets, sample_weights=None, masks=None):\n    outputs = nest.flatten(outputs)\n    targets = nest.flatten(targets)\n    metric_results = []\n    if targets:\n        if len(model._targets) != len(targets):\n            new_targets = [None if t is None else targets.pop(0) for t in model._targets]\n            targets = new_targets\n        metric_results = model._handle_metrics(outputs, targets=targets, sample_weights=sample_weights, masks=masks, return_weighted_and_unweighted_metrics=True, skip_target_masks=model._prepare_skip_target_masks())\n    metric_results.extend([m.result() for m in model.metrics if m not in model._compile_metric_functions])\n    return metric_results", "docstring": "Calculates the metrics for each output of the given model.\n\nArgs:\nmodel: The model on which metrics are being calculated.\noutputs: The outputs of the given model.\ntargets: The predictions or targets of the given model.\nsample_weights: Optional list of sample weights for each output.\nmasks: Optional list of masks for each output.\n\nReturns:\nReturns the metric results for each output of the model.", "source": "github-repos"}
{"code": "def Write(self, string):\n    \n    try:\n      \n      \n      encoded_string = codecs.encode(string, self._encoding, self._errors)\n    except UnicodeEncodeError:\n      if self._errors == 'strict':\n        logger.error(\n            'Unable to properly write output due to encoding error. '\n            'Switching to error tolerant encoding which can result in '\n            'non Basic Latin (C0) characters to be replaced with \"?\" or '\n            '\"\\\\ufffd\".')\n        self._errors = 'replace'\n\n      encoded_string = codecs.encode(string, self._encoding, self._errors)\n\n    self._file_object.write(encoded_string)", "docstring": "Writes a string to the output.\n\nArgs:\nstring (str): output.", "source": "juraj-google-style"}
{"code": "def get_room_messages(self, room_id, token, direction, limit=10, to=None):\n        \n        query = {\n            \"roomId\": room_id,\n            \"from\": token,\n            \"dir\": direction,\n            \"limit\": limit,\n        }\n\n        if to:\n            query[\"to\"] = to\n\n        return self._send(\"GET\", \"/rooms/{}/messages\".format(quote(room_id)),\n                          query_params=query, api_path=\"/_matrix/client/r0\")", "docstring": "Perform GET /rooms/{roomId}/messages.\n\nArgs:\nroom_id (str): The room's id.\ntoken (str): The token to start returning events from.\ndirection (str):  The direction to return events from. One of: [\"b\", \"f\"].\nlimit (int): The maximum number of events to return.\nto (str): The token to stop returning events at.", "source": "juraj-google-style"}
{"code": "def signbit(x):\n    if any_symbolic_tensors((x,)):\n        return Signbit().symbolic_call(x)\n    return backend.numpy.signbit(x)", "docstring": "Return the sign bit of the elements of `x`.\n\nThe output boolean tensor contains `True` where the sign of `x` is negative,\nand `False` otherwise.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput boolean tensor of same shape as `x`.", "source": "github-repos"}
{"code": "def __init__(self, model):\n        \n        self._model_id = None\n        if model is not None:\n            self._model_id = model.id", "docstring": "Create a new base event.\n\nArgs:\n\nmodel (Model) : a Bokeh model to register event callbacks on", "source": "juraj-google-style"}
{"code": "def add_exit_callback_to_default_func_graph(fn) -> None:\n    default_graph = get_default_graph()\n    if not default_graph._building_function:\n        raise RuntimeError('Cannot add scope exit callbacks when not building a function.  Default graph: {}'.format(default_graph))\n    default_graph._add_scope_exit_callback(fn)", "docstring": "Add a callback to run when the default function graph goes out of scope.\n\nUsage:\n\n```python\n@tf.function\ndef fn(x, v):\nexpensive = expensive_object(v)\nadd_exit_callback_to_default_func_graph(lambda: expensive.release())\nreturn g(x, expensive)\n\nfn(x=tf.constant(...), v=...)\n# `expensive` has been released.\n```\n\nArgs:\nfn: A callable that takes no arguments and whose output is ignored.\nTo be executed when exiting func graph scope.\n\nRaises:\nRuntimeError: If executed when the current default graph is not a FuncGraph,\nor not currently executing in function creation mode (e.g., if inside\nan init_scope).", "source": "github-repos"}
{"code": "def minimize(self, session=None, feed_dict=None, fetches=None, step_callback=None, loss_callback=None, **run_kwargs):\n    session = (session or ops.get_default_session())\n    feed_dict = (feed_dict or {})\n    fetches = (fetches or [])\n    loss_callback = (loss_callback or (lambda *fetches: None))\n    step_callback = (step_callback or (lambda xk: None))\n    self._initialize_updated_shapes(session)\n    loss_grad_func = self._make_eval_func([self._loss, self._packed_loss_grad], session, feed_dict, fetches, loss_callback)\n    equality_funcs = self._make_eval_funcs(self._equalities, session, feed_dict, fetches)\n    equality_grad_funcs = self._make_eval_funcs(self._packed_equality_grads, session, feed_dict, fetches)\n    inequality_funcs = self._make_eval_funcs(self._inequalities, session, feed_dict, fetches)\n    inequality_grad_funcs = self._make_eval_funcs(self._packed_inequality_grads, session, feed_dict, fetches)\n    initial_packed_var_val = session.run(self._packed_var)\n    packed_var_val = self._minimize(initial_val=initial_packed_var_val, loss_grad_func=loss_grad_func, equality_funcs=equality_funcs, equality_grad_funcs=equality_grad_funcs, inequality_funcs=inequality_funcs, inequality_grad_funcs=inequality_grad_funcs, packed_bounds=self._packed_bounds, step_callback=step_callback, optimizer_kwargs=self.optimizer_kwargs)\n    var_vals = [packed_var_val[packing_slice] for packing_slice in self._packing_slices]\n    session.run(self._var_updates, feed_dict=dict(zip(self._update_placeholders, var_vals)), **run_kwargs)", "docstring": "Minimize a scalar `Tensor`.\n\nVariables subject to optimization are updated in-place at the end of\noptimization.\n\nNote that this method does *not* just return a minimization `Op`, unlike\n`Optimizer.minimize()`; instead it actually performs minimization by\nexecuting commands to control a `Session`.\n\nArgs:\nsession: A `Session` instance.\nfeed_dict: A feed dict to be passed to calls to `session.run`.\nfetches: A list of `Tensor`s to fetch and supply to `loss_callback`\nas positional arguments.\nstep_callback: A function to be called at each optimization step;\narguments are the current values of all optimization variables\nflattened into a single vector.\nloss_callback: A function to be called every time the loss and gradients\nare computed, with evaluated fetches supplied as positional arguments.\n**run_kwargs: kwargs to pass to `session.run`.", "source": "codesearchnet"}
{"code": "def _extractPayload(response, slaveaddress, mode, functioncode):\n    BYTEPOSITION_FOR_ASCII_HEADER = 0\n    BYTEPOSITION_FOR_SLAVEADDRESS = 0\n    BYTEPOSITION_FOR_FUNCTIONCODE = 1\n    NUMBER_OF_RESPONSE_STARTBYTES = 2\n    NUMBER_OF_CRC_BYTES = 2\n    NUMBER_OF_LRC_BYTES = 1\n    BITNUMBER_FUNCTIONCODE_ERRORINDICATION = 7\n    MINIMAL_RESPONSE_LENGTH_RTU = (NUMBER_OF_RESPONSE_STARTBYTES + NUMBER_OF_CRC_BYTES)\n    MINIMAL_RESPONSE_LENGTH_ASCII = 9\n    _checkString(response, description='response')\n    _checkSlaveaddress(slaveaddress)\n    _checkMode(mode)\n    _checkFunctioncode(functioncode, None)\n    plainresponse = response\n    if (mode == MODE_ASCII):\n        if (len(response) < MINIMAL_RESPONSE_LENGTH_ASCII):\n            raise ValueError('Too short Modbus ASCII response (minimum length {} bytes). Response: {!r}'.format(MINIMAL_RESPONSE_LENGTH_ASCII, response))\n    elif (len(response) < MINIMAL_RESPONSE_LENGTH_RTU):\n        raise ValueError('Too short Modbus RTU response (minimum length {} bytes). Response: {!r}'.format(MINIMAL_RESPONSE_LENGTH_RTU, response))\n    if (mode == MODE_ASCII):\n        if (response[BYTEPOSITION_FOR_ASCII_HEADER] != _ASCII_HEADER):\n            raise ValueError('Did not find header ({!r}) as start of ASCII response. The plain response is: {!r}'.format(_ASCII_HEADER, response))\n        elif (response[(- len(_ASCII_FOOTER)):] != _ASCII_FOOTER):\n            raise ValueError('Did not find footer ({!r}) as end of ASCII response. The plain response is: {!r}'.format(_ASCII_FOOTER, response))\n        response = response[1:(- 2)]\n        if ((len(response) % 2) != 0):\n            template = ('Stripped ASCII frames should have an even number of bytes, but is {} bytes. ' + 'The stripped response is: {!r} (plain response: {!r})')\n            raise ValueError(template.format(len(response), response, plainresponse))\n        response = _hexdecode(response)\n    if (mode == MODE_ASCII):\n        calculateChecksum = _calculateLrcString\n        numberOfChecksumBytes = NUMBER_OF_LRC_BYTES\n    else:\n        calculateChecksum = _calculateCrcString\n        numberOfChecksumBytes = NUMBER_OF_CRC_BYTES\n    receivedChecksum = response[(- numberOfChecksumBytes):]\n    responseWithoutChecksum = response[0:(len(response) - numberOfChecksumBytes)]\n    calculatedChecksum = calculateChecksum(responseWithoutChecksum)\n    if (receivedChecksum != calculatedChecksum):\n        template = 'Checksum error in {} mode: {!r} instead of {!r} . The response is: {!r} (plain response: {!r})'\n        text = template.format(mode, receivedChecksum, calculatedChecksum, response, plainresponse)\n        raise ValueError(text)\n    responseaddress = ord(response[BYTEPOSITION_FOR_SLAVEADDRESS])\n    if (responseaddress != slaveaddress):\n        raise ValueError('Wrong return slave address: {} instead of {}. The response is: {!r}'.format(responseaddress, slaveaddress, response))\n    receivedFunctioncode = ord(response[BYTEPOSITION_FOR_FUNCTIONCODE])\n    if (receivedFunctioncode == _setBitOn(functioncode, BITNUMBER_FUNCTIONCODE_ERRORINDICATION)):\n        raise ValueError('The slave is indicating an error. The response is: {!r}'.format(response))\n    elif (receivedFunctioncode != functioncode):\n        raise ValueError('Wrong functioncode: {} instead of {}. The response is: {!r}'.format(receivedFunctioncode, functioncode, response))\n    firstDatabyteNumber = NUMBER_OF_RESPONSE_STARTBYTES\n    if (mode == MODE_ASCII):\n        lastDatabyteNumber = (len(response) - NUMBER_OF_LRC_BYTES)\n    else:\n        lastDatabyteNumber = (len(response) - NUMBER_OF_CRC_BYTES)\n    payload = response[firstDatabyteNumber:lastDatabyteNumber]\n    return payload", "docstring": "Extract the payload data part from the slave's response.\n\nArgs:\n* response (str): The raw response byte string from the slave.\n* slaveaddress (int): The adress of the slave. Used here for error checking only.\n* mode (str): The modbus protcol mode (MODE_RTU or MODE_ASCII)\n* functioncode (int): Used here for error checking only.\n\nReturns:\nThe payload part of the *response* string.\n\nRaises:\nValueError, TypeError. Raises an exception if there is any problem with the received address, the functioncode or the CRC.\n\nThe received response should have the format:\n* RTU Mode: slaveaddress byte + functioncode byte + payloaddata + CRC (which is two bytes)\n* ASCII Mode: header (:) + slaveaddress byte + functioncode byte + payloaddata + LRC (which is two characters) + footer (CRLF)\n\nFor development purposes, this function can also be used to extract the payload from the request sent TO the slave.", "source": "codesearchnet"}
{"code": "def list(self, pattern='*'):\n    \n    if self._group_dict is None:\n      self._group_dict = collections.OrderedDict(\n          (group.id, group) for group in self._client.list_groups())\n\n    return [group for group in self._group_dict.values()\n            if fnmatch.fnmatch(group.display_name, pattern)]", "docstring": "Returns a list of groups that match the filters.\n\nArgs:\npattern: An optional pattern to filter the groups based on their display\nname. This can include Unix shell-style wildcards. E.g.\n``\"Production*\"``.\n\nReturns:\nA list of Group objects that match the filters.", "source": "juraj-google-style"}
{"code": "def _get_rest_doc(self, request, start_response):\n    api = request.body_json['api']\n    version = request.body_json['version']\n    generator = discovery_generator.DiscoveryGenerator(request=request)\n    services = [s for s in self._backend.api_services if ((s.api_info.name == api) and (s.api_info.api_version == version))]\n    doc = generator.pretty_print_config_to_json(services)\n    if (not doc):\n        error_msg = ('Failed to convert .api to discovery doc for version %s of api %s' % (version, api))\n        _logger.error('%s', error_msg)\n        return util.send_wsgi_error_response(error_msg, start_response)\n    return self._send_success_response(doc, start_response)", "docstring": "Sends back HTTP response with API directory.\n\nThis calls start_response and returns the response body.  It will return\nthe discovery doc for the requested api/version.\n\nArgs:\nrequest: An ApiRequest, the transformed request sent to the Discovery API.\nstart_response: A function with semantics defined in PEP-333.\n\nReturns:\nA string, the response body.", "source": "codesearchnet"}
{"code": "def _data_from_df(df):\n        \n        _df = df.copy()\n\n        \n        if isinstance(df.columns, pd.MultiIndex):\n            try:\n                _df.columns = ['_'.join(col) for col in _df.columns.values]\n            except TypeError:\n                raise TypeError('Could not flatten MultiIndex columns. '\n                                'use string column names or flatten manually')\n        \n        if isinstance(df.columns, pd.CategoricalIndex):\n            _df.columns = df.columns.tolist()\n        \n        index_name = ColumnDataSource._df_index_name(df)\n        if index_name == 'index':\n            _df.index = pd.Index(_df.index.values)\n        else:\n            _df.index = pd.Index(_df.index.values, name=index_name)\n        _df.reset_index(inplace=True)\n\n        tmp_data = {c: v.values for c, v in _df.iteritems()}\n\n        new_data = {}\n        for k, v in tmp_data.items():\n            new_data[k] = v\n\n        return new_data", "docstring": "Create a ``dict`` of columns from a Pandas ``DataFrame``,\nsuitable for creating a ColumnDataSource.\n\nArgs:\ndf (DataFrame) : data to convert\n\nReturns:\ndict[str, np.array]", "source": "juraj-google-style"}
{"code": "def cdot(L, out=None):\n    L = asarray(L, float)\n    layout_error = 'Wrong matrix layout.'\n    if (L.ndim != 2):\n        raise ValueError(layout_error)\n    if (L.shape[0] != L.shape[1]):\n        raise ValueError(layout_error)\n    if (out is None):\n        out = empty((L.shape[0], L.shape[1]), float)\n    return einsum('ij,kj->ik', L, L, out=out)", "docstring": "r\"\"\"Product of a Cholesky matrix with itself transposed.\n\nArgs:\nL (array_like): Cholesky matrix.\nout (:class:`numpy.ndarray`, optional): copy result to.\n\nReturns:\n:class:`numpy.ndarray`: :math:`\\mathrm L\\mathrm L^\\intercal`.", "source": "codesearchnet"}
{"code": "def _sendMouseEvent(ev, x, y, dwData=0):\n    \n    assert x != None and y != None, 'x and y cannot be set to None'\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n\n    width, height = _size()\n    convertedX = 65536 * x \n    convertedY = 65536 * y \n    ctypes.windll.user32.mouse_event(ev, ctypes.c_long(convertedX), ctypes.c_long(convertedY), dwData, 0)", "docstring": "The helper function that actually makes the call to the mouse_event()\nwin32 function.\n\nArgs:\nev (int): The win32 code for the mouse event. Use one of the MOUSEEVENTF_*\nconstants for this argument.\nx (int): The x position of the mouse event.\ny (int): The y position of the mouse event.\ndwData (int): The argument for mouse_event()'s dwData parameter. So far\nthis is only used by mouse scrolling.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def get_type(name, env, non_generic):\n    if (name in env):\n        if isinstance(env[name], MultiType):\n            return clone(env[name])\n        return fresh(env[name], non_generic)\n    else:\n        print('W: Undefined symbol {0}'.format(name))\n        return TypeVariable()", "docstring": "Get the type of identifier name from the type environment env.\n\nArgs:\nname: The identifier name\nenv: The type environment mapping from identifier names to types\nnon_generic: A set of non-generic TypeVariables\n\nRaises:\nParseError: Raised if name is an undefined symbol in the type\nenvironment.", "source": "codesearchnet"}
{"code": "def _stride(stride_spec):\n    if (stride_spec is None):\n        return [1, 1, 1, 1]\n    elif isinstance(stride_spec, tf.compat.integral_types):\n        return [1, stride_spec, stride_spec, 1]\n    elif (len(stride_spec) == 1):\n        return [1, stride_spec[0], stride_spec[0], 1]\n    elif (len(stride_spec) == 2):\n        return [1, stride_spec[0], stride_spec[1], 1]\n    else:\n        assert (len(stride_spec) == 4)\n        return stride_spec", "docstring": "Expands the stride spec into a length 4 list.\n\nArgs:\nstride_spec: If length 0, 1 or 2 then assign the inner dimensions, otherwise\nreturn stride_spec if it is length 4.\nReturns:\nA length 4 list.", "source": "codesearchnet"}
{"code": "def parse_sv_frequencies(variant):\n    frequency_keys = ['clingen_cgh_benignAF', 'clingen_cgh_benign', 'clingen_cgh_pathogenicAF', 'clingen_cgh_pathogenic', 'clingen_ngi', 'clingen_ngiAF', 'swegen', 'swegenAF', 'decipherAF', 'decipher']\n    sv_frequencies = {}\n    for key in frequency_keys:\n        value = variant.INFO.get(key, 0)\n        if ('AF' in key):\n            value = float(value)\n        else:\n            value = int(value)\n        if (value > 0):\n            sv_frequencies[key] = value\n    return sv_frequencies", "docstring": "Parsing of some custom sv frequencies\n\nThese are very specific at the moment, this will hopefully get better over time when the\nfield of structural variants is more developed.\n\nArgs:\nvariant(cyvcf2.Variant)\n\nReturns:\nsv_frequencies(dict)", "source": "codesearchnet"}
{"code": "def prompt(self, message, text_input=False, timeout_s=None, cli_color=''):\n    self.start_prompt(message, text_input, cli_color)\n    return self.wait_for_prompt(timeout_s)", "docstring": "Display a prompt and wait for a response.\n\nArgs:\nmessage: A string to be presented to the user.\ntext_input: A boolean indicating whether the user must respond with text.\ntimeout_s: Seconds to wait before raising a PromptUnansweredError.\ncli_color: An ANSI color code, or the empty string.\n\nReturns:\nA string response, or the empty string if text_input was False.\n\nRaises:\nMultiplePromptsError: There was already an existing prompt.\nPromptUnansweredError: Timed out waiting for the user to respond.", "source": "codesearchnet"}
{"code": "def __init__(self, pos_filename, interval=2):\n        \n        if not pos_filename:\n            pos_filename = os.path.join(os.getcwd(),\n                                        'mysqlbinlog2blinker.binlog.pos')\n        self.pos_storage_filename = pos_filename\n        assert self.pos_storage_filename\n        self.interval = interval\n\n        self._log_file = None\n        self._log_pos = None\n        self._pos_changed = False\n\n        self.save_log_pos_thread_stop_flag = threading.Event()\n        self.save_log_pos_thread = \\\n            threading.Thread(target=self._save_log_pos_thread_runner)\n        self.save_log_pos_thread.daemon = True", "docstring": "Create instance of FileBasedBinlogPosMemory\n\nArgs:\npos_filename (str|None): position storage file. None will makes\n*mysqlbinlog2blinker.binlog.pos* at current working dir\ninterval (float): the interval in second", "source": "juraj-google-style"}
{"code": "def shapes_match(a, b):\n  \n  if isinstance(a, (tuple, list)) and isinstance(b, (tuple, list)):\n    if len(a) != len(b):\n      return False\n    return all([shapes_match(ia, ib) for ia, ib in zip(a, b)])\n  elif isinstance(a, dict) and isinstance(b, dict):\n    if len(a) != len(b):\n      return False\n    match = True\n    for (ak, av), (bk, bv) in zip(a.items(), b.items()):\n      match = match and all([ak == bk and shapes_match(av, bv)])\n    return match\n  else:\n    shape_checker = shape_checkers[(type(a), type(b))]\n    return shape_checker(a, b)", "docstring": "Recursively check if shapes of object `a` and `b` match.\n\nWill walk lists, tuples and dicts.\n\nArgs:\na: object of type (numpy.ndarray,tf.Tensor,list,tuple,dict)\nto check for matching shapes against `b`.\nb: object to check for matching shape against `a`.\n\nReturns:\nA boolean indicating whether the shapes of `a` and `b` match.", "source": "juraj-google-style"}
{"code": "def bootstrap_results(self, init_state):\n    \n    with tf.compat.v1.name_scope(\n        name=mcmc_util.make_name(self.name, 'remc', 'bootstrap_results'),\n        values=[init_state]):\n      replica_results = [\n          self.replica_kernels[i].bootstrap_results(init_state)\n          for i in range(self.num_replica)\n      ]\n\n      init_state_parts = (\n          list(init_state)\n          if mcmc_util.is_list_like(init_state) else [init_state])\n\n      \n      replica_states = [[\n          tf.convert_to_tensor(value=s) for s in init_state_parts\n      ] for i in range(self.num_replica)]\n\n      if not mcmc_util.is_list_like(init_state):\n        replica_states = [s[0] for s in replica_states]\n\n      return ReplicaExchangeMCKernelResults(\n          replica_states=replica_states,\n          replica_results=replica_results,\n          sampled_replica_states=replica_states,\n          sampled_replica_results=replica_results,\n      )", "docstring": "Returns an object with the same type as returned by `one_step`.\n\nArgs:\ninit_state: `Tensor` or Python `list` of `Tensor`s representing the\ninitial state(s) of the Markov chain(s).\n\nReturns:\nkernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of\n`Tensor`s representing internal calculations made within this function.\nThis inculdes replica states.", "source": "juraj-google-style"}
{"code": "def dec(self, byts):\n        \n        envl = s_msgpack.un(byts)\n        iv = envl.get('iv', b'')\n        asscd = envl.get('asscd', b'')\n        data = envl.get('data', b'')\n\n        decryptor = AESGCM(self.ekey)\n\n        try:\n            data = decryptor.decrypt(iv, data, asscd)\n        except Exception:\n            logger.exception('Error decrypting data')\n            return None\n        return data", "docstring": "Decode an envelope dict and decrypt the given bytes.\n\nArgs:\nbyts (bytes): Bytes to decrypt.\n\nReturns:\nbytes: Decrypted message.", "source": "juraj-google-style"}
{"code": "def scan_storage(self, area_name, callable, start=0, stop=None):\n    if (area_name == u'storage'):\n        data = self.storage_data\n    elif (area_name == u'streaming'):\n        data = self.streaming_data\n    else:\n        raise ArgumentError(('Unknown area name in scan_storage (%s) should be storage or streaming' % area_name))\n    if (len(data) == 0):\n        return 0\n    if (stop is None):\n        stop = (len(data) - 1)\n    elif (stop >= len(data)):\n        raise ArgumentError('Given stop offset is greater than the highest offset supported', length=len(data), stop_offset=stop)\n    scanned = 0\n    for i in range(start, (stop + 1)):\n        scanned += 1\n        should_break = callable(i, data[i])\n        if (should_break is True):\n            break\n    return scanned", "docstring": "Iterate over streaming or storage areas, calling callable.\n\nArgs:\narea_name (str): Either 'storage' or 'streaming' to indicate which\nstorage area to scan.\ncallable (callable): A function that will be called as (offset, reading)\nfor each reading between start_offset and end_offset (inclusive).  If\nthe scan function wants to stop early it can return True.  If it returns\nanything else (including False or None), scanning will continue.\nstart (int): Optional offset to start at (included in scan).\nstop (int): Optional offset to end at (included in scan).\n\nReturns:\nint: The number of entries scanned.", "source": "codesearchnet"}
{"code": "def contains(self, x: int, y: int) -> bool:\n        \n        return (\n            self.x <= x < self.x + self.width\n            and self.y <= y < self.y + self.height\n        )", "docstring": "Returns True if this node contains these coordinates.\n\nArgs:\nx (int): X position to check.\ny (int): Y position to check.\n\nReturns:\nbool: True if this node contains these coordinates.\nOtherwise False.", "source": "juraj-google-style"}
{"code": "def set_generation_type(self, num_processors=(- 1), num_splits=1000, verbose=(- 1)):\n    self.parallel_input.num_processors = num_processors\n    self.parallel_input.num_splits = num_splits\n    self.parallel_input.verbose = verbose\n    return", "docstring": "Change generation type.\n\nChoose weather to generate the data in parallel or on a single processor.\n\nArgs:\nnum_processors (int or None, optional): Number of parallel processors to use.\nIf ``num_processors==-1``, this will use multiprocessing module and use\navailable cpus. If single generation is desired, num_processors is set\nto ``None``. Default is -1.\nnum_splits (int, optional): Number of binaries to run during each process.\nDefault is 1000.\nverbose (int, optional): Describes the notification of when parallel processes\nare finished. Value describes cadence of process completion notifications.\nIf ``verbose == -1``, no notifications are given. Default is -1.", "source": "codesearchnet"}
{"code": "def __init__(self, name: Text, num_replicas: int, pivot: ops.Operation):\n    super(TPUReplicateContext, self).__init__()\n    self._num_replicas = num_replicas\n    self._outer_device_function_stack = None\n    self._oc_dev_fn_stack = None\n    self._outside_compilation_cluster = None\n    self._is_map_outside_compilation = False\n    self._outside_compilation_v2_context = None\n    self._outside_compilation_counter = 0\n    self._in_gradient_colocation = None\n    self._gradient_colocation_stack = []\n    self._host_compute_core = []\n    self._name = name\n    self._tpu_replicate_attr = attr_value_pb2.AttrValue(s=compat.as_bytes(self._name))\n    self._unsupported_ops = []\n    self._pivot = pivot\n    self._replicated_vars = {}", "docstring": "Builds a new TPUReplicateContext.\n\nArgs:\nname: a unique name for the context, used to populate the `_tpu_replicate`\nattribute.\nnum_replicas: an integer that gives the number of replicas for the\ncomputation.\npivot: a pivot node. Nodes in the TPUReplicateContext that do not have any\ninputs will have a control dependency on the pivot node. This ensures\nthat nodes are correctly included in any enclosing control flow\ncontexts.", "source": "github-repos"}
{"code": "def categorical_accuracy(y_true, y_pred):\n    return math_ops.cast(math_ops.equal(math_ops.argmax(y_true, axis=-1), math_ops.argmax(y_pred, axis=-1)), backend.floatx())", "docstring": "Calculates how often predictions match one-hot labels.\n\nStandalone usage:\n>>> y_true = [[0, 0, 1], [0, 1, 0]]\n>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]\n>>> m = tf.keras.metrics.categorical_accuracy(y_true, y_pred)\n>>> assert m.shape == (2,)\n>>> m.numpy()\narray([0., 1.], dtype=float32)\n\nYou can provide logits of classes as `y_pred`, since argmax of\nlogits and probabilities are same.\n\nArgs:\ny_true: One-hot ground truth values.\ny_pred: The prediction values.\n\nReturns:\nCategorical accuracy values.", "source": "github-repos"}
{"code": "def build_album_art_full_uri(self, url):\n    if (not url.startswith(('http:', 'https:'))):\n        url = ((('http:\n    return url", "docstring": "Ensure an Album Art URI is an absolute URI.\n\nArgs:\nurl (str): the album art URI.\n\nReturns:\nstr: An absolute URI.", "source": "codesearchnet"}
{"code": "def verify(self, message, signature):\n        \n        message = _helpers._to_bytes(message, encoding='utf-8')\n        try:\n            return rsa.pkcs1.verify(message, signature, self._pubkey)\n        except (ValueError, rsa.pkcs1.VerificationError):\n            return False", "docstring": "Verifies a message against a signature.\n\nArgs:\nmessage: string or bytes, The message to verify. If string, will be\nencoded to bytes as utf-8.\nsignature: string or bytes, The signature on the message. If\nstring, will be encoded to bytes as utf-8.\n\nReturns:\nTrue if message was signed by the private key associated with the\npublic key that this object was constructed with.", "source": "juraj-google-style"}
{"code": "def load_supported_categories(categories_path: str):\n    global _load_supported_categories\n    if _load_supported_categories:\n        return\n    with open(categories_path, encoding='utf-8') as supported_categories:\n        yaml_object = yaml.load(supported_categories.read(), Loader=yaml.SafeLoader)\n    Tag.Config.supported_categories = yaml_object[TagFields.categories]\n    _load_supported_categories = True", "docstring": "Load the list of supported categories from categories_path file\ninto Tag model config\n\nArgs:\ncategories_path: path to the file with categories.", "source": "github-repos"}
{"code": "def detect_format(program, attributes) -> str:\n\n    def fmt(attr):\n        '\\n            For internal use only.\\n        '\n        return ((attr.array_length * attr.dimension), attr.shape)\n    return ' '.join((('%d%s' % fmt(program[a])) for a in attributes))", "docstring": "Detect format for vertex attributes.\nThe format returned does not contain padding.\n\nArgs:\nprogram (Program): The program.\nattributes (list): A list of attribute names.\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def clear_cached_modules(modules: py_utils.StrOrStrList, *, recursive: bool=True, verbose: bool=False, invalidate: bool=True) -> None:\n    modules_to_clear = get_module_names(modules, recursive=recursive)\n    if not modules_to_clear:\n        return\n    modules = set(py_utils.normalize_str_to_list(modules))\n    for module_name in modules_to_clear:\n        if verbose:\n            print(f'Clearing {module_name}')\n        invalidate_curr = invalidate and (not module_name.startswith('etils'))\n        if invalidate_curr or module_name in modules:\n            _clear_parent_module_attr(module_name)\n        if invalidate_curr:\n            _invalidate_module(sys.modules[module_name])\n        del sys.modules[module_name]\n    for cleanup in typing._cleanups:\n        cleanup()", "docstring": "Clear the `sys.modules` cache.\n\nHelpful for interactive development to reload from Jupyter notebook the\ncode we're currently editing (without having to restart the notebook kernel).\n\nUsage:\n\n```python\necolab.clear_cached_modules(['visu3d', 'other_module.submodule'])\n\nimport visu3d\nimport other_module.submodule\n```\n\nArgs:\nmodules: List of modules to clear\nrecursive: Whether submodules are cleared too\nverbose: Whether to display the list of modules cleared.\ninvalidate: If `True` (default), the instances of the module will raise an\nerror when used (to avoid using 2 versions of a module at the same time)", "source": "github-repos"}
{"code": "def BuildFindSpecs(self, environment_variables=None):\n    path_attributes = {}\n    if environment_variables:\n        for environment_variable in environment_variables:\n            attribute_name = environment_variable.name.lower()\n            attribute_value = environment_variable.value\n            if (not isinstance(attribute_value, py2to3.STRING_TYPES)):\n                continue\n            if ((len(attribute_value) > 2) and (attribute_value[1] == ':')):\n                (_, _, attribute_value) = attribute_value.rpartition(':')\n            if attribute_value.startswith('\\\\'):\n                attribute_value = attribute_value.replace('\\\\', '/')\n            path_attributes[attribute_name] = attribute_value\n    find_specs = []\n    with open(self._path, 'r') as file_object:\n        for line in file_object:\n            line = line.strip()\n            if line.startswith('\n                continue\n            if path_attributes:\n                try:\n                    line = line.format(**path_attributes)\n                except KeyError as exception:\n                    logger.error('Unable to expand path filter: {0:s} with error: {1!s}'.format(line, exception))\n                    continue\n            if (not line.startswith('/')):\n                logger.warning('The path filter must be defined as an absolute path: {0:s}'.format(line))\n                continue\n            path_segments = line.split('/')\n            path_segments.pop(0)\n            if (not path_segments[(- 1)]):\n                logger.warning('Empty last path segment in path filter: {0:s}'.format(line))\n                continue\n            find_spec = file_system_searcher.FindSpec(location_regex=path_segments, case_sensitive=False)\n            find_specs.append(find_spec)\n    return find_specs", "docstring": "Build find specification from a filter file.\n\nArgs:\nenvironment_variables (Optional[list[EnvironmentVariableArtifact]]):\nenvironment variables.\n\nReturns:\nlist[dfvfs.FindSpec]: find specification.", "source": "codesearchnet"}
{"code": "def __init__(self, key_dtype, value_dtype):\n    self._key_dtype = dtypes.as_dtype(key_dtype)\n    self._value_dtype = dtypes.as_dtype(value_dtype)\n    super(LookupInterface, self).__init__()", "docstring": "Construct a lookup table interface.\n\nArgs:\nkey_dtype: The table key type.\nvalue_dtype: The table value type.", "source": "github-repos"}
{"code": "def market(self, accountID, **kwargs):\n        \n        return self.create(\n            accountID,\n            order=MarketOrderRequest(**kwargs)\n        )", "docstring": "Shortcut to create a Market Order in an Account\n\nArgs:\naccountID : The ID of the Account\nkwargs : The arguments to create a MarketOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "juraj-google-style"}
{"code": "def sget_timestamp(self, cycle, step, dataset_number=None):\n    dataset_number = self._validate_dataset_number(dataset_number)\n    if (dataset_number is None):\n        self._report_empty_dataset()\n        return\n    cycle_index_header = self.headers_normal.cycle_index_txt\n    timestamp_header = self.headers_normal.test_time_txt\n    step_index_header = self.headers_normal.step_index_txt\n    test = self.datasets[dataset_number].dfdata\n    if isinstance(step, (list, tuple)):\n        warnings.warn(f'The varialbe step is a list.Should be an integer.{step}')\n        step = step[0]\n    c = test[((test[cycle_index_header] == cycle) & (test[step_index_header] == step))]\n    if (not self.is_empty(c)):\n        t = c[timestamp_header]\n        return t\n    else:\n        return pd.Series()", "docstring": "Returns timestamp for cycle, step.\n\nConvinience function; same as issuing\ndfdata[(dfdata[cycle_index_header] == cycle) &\n(dfdata[step_index_header] == step)][timestamp_header]\n\nArgs:\ncycle: cycle number\nstep: step number\ndataset_number: the dataset number (automatic selection if None)\n\nReturns:\npandas.Series", "source": "codesearchnet"}
{"code": "def assert_no_text(self, *args, **kwargs):\n    query = TextQuery(*args, **kwargs)\n\n    @self.synchronize(wait=query.wait)\n    def assert_no_text():\n        count = query.resolve_for(self)\n        if (matches_count(count, query.options) and ((count > 0) or expects_none(query.options))):\n            raise ExpectationNotMet(query.negative_failure_message)\n        return True\n    return assert_no_text()", "docstring": "Asserts that the page or current node doesn't have the given text content, ignoring any\nHTML tags.\n\nArgs:\n*args: Variable length argument list for :class:`TextQuery`.\n**kwargs: Arbitrary keyword arguments for :class:`TextQuery`.\n\nReturns:\nTrue\n\nRaises:\nExpectationNotMet: If the assertion hasn't succeeded during the wait time.", "source": "codesearchnet"}
{"code": "def create_ondemand_streaming_locator(access_token, encoded_asset_id, pid, starttime=None):\n    \n    path = '/Locators'\n    endpoint = ''.join([ams_rest_endpoint, path])\n    if starttime is None:\n        body = '{ \\\n\t\t\t\"AccessPolicyId\":\"' + pid + '\", \\\n\t\t\t\"AssetId\":\"' + encoded_asset_id + '\", \\\n\t\t\t\"Type\": \"2\" \\\n    }'\n    else:\n        body = '{ \\\n\t\t\t\"AccessPolicyId\":\"' + pid + '\", \\\n\t\t\t\"AssetId\":\"' + encoded_asset_id + '\", \\\n\t\t\t\"StartTime\":\"' + str(starttime) + '\", \\\n\t\t\t\"Type\": \"2\" \\\n\t\t}'\n    return do_ams_post(endpoint, path, body, access_token, \"json_only\")", "docstring": "Create Media Service OnDemand Streaming Locator.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nencoded_asset_id (str): A Media Service Encoded Asset ID.\npid (str): A Media Service Encoded PID.\nstarttime (str): A Media Service Starttime.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def load_all_yamls(cls, directories):\n        \n        yaml_files = []\n        loaded_yamls = {}\n\n        for d in directories:\n            if d.startswith('/home') and not os.path.exists(d):\n                os.makedirs(d)\n            for dirname, subdirs, files in os.walk(d):\n                yaml_files.extend(map(lambda x: os.path.join(dirname, x),\n                                      filter(lambda x: x.endswith('.yaml'), files)))\n\n        for f in yaml_files:\n            loaded_yamls[f] = cls.load_yaml_by_path(f)\n\n        return loaded_yamls", "docstring": "Loads yaml files from all given directories.\n\nArgs:\ndirectories: list of directories to search\nReturns:\ndict of {fullpath: loaded_yaml_structure}", "source": "juraj-google-style"}
{"code": "async def client_event_handler(self, client_id, event_tuple, user_data):\n    (conn_string, event_name, _event) = event_tuple\n    self._logger.debug('Ignoring event %s from device %s forwarded for client %s', event_name, conn_string, client_id)\n    return None", "docstring": "Method called to actually send an event to a client.\n\nUsers of this class should override this method to actually forward\ndevice events to their clients.  It is called with the client_id\npassed to (or returned from) :meth:`setup_client` as well as the\nuser_data object that was included there.\n\nThe event tuple is a 3-tuple of:\n\n- connection string\n- event name\n- event object\n\nIf you override this to be acoroutine, it will be awaited.  The\ndefault implementation just logs the event.\n\nArgs:\nclient_id (str): The client_id that this event should be forwarded\nto.\nevent_tuple (tuple): The connection_string, event_name and event_object\nthat should be forwarded.\nuser_data (object): Any user data that was passed to setup_client.", "source": "codesearchnet"}
{"code": "class FlaxSampleOutput(ModelOutput):\n    sequences: Optional[jnp.ndarray] = None", "docstring": "Flax Base class for outputs of decoder-only generation models using sampling.\n\n\nArgs:\nsequences (`jnp.ndarray` of shape `(batch_size, max_length)`):\nThe generated sequences.", "source": "github-repos"}
{"code": "def riak_multi_get(self, key_list_tuple):\n        \n        pool = PyokoMG()\n        objs = self._client.multiget(key_list_tuple, pool=pool)\n        pool.stop()\n        return objs", "docstring": "Sends given tuples of list to multiget method and took riak objs' keys and data. For each\nmultiget call, separate pools are used and after execution, pools are stopped.\nArgs:\nkey_list_tuple(list of tuples): [('bucket_type','bucket','riak_key')]\n\nExample:\n[('models','personel','McAPchPZzB6RVJ8QI2XSVQk4mUR')]\n\nReturns:\nobjs(tuple): obj's key and obj's value", "source": "juraj-google-style"}
{"code": "def heightmap_add_hill(\n    hm: np.ndarray, x: float, y: float, radius: float, height: float\n) -> None:\n    \n    lib.TCOD_heightmap_add_hill(_heightmap_cdata(hm), x, y, radius, height)", "docstring": "Add a hill (a half spheroid) at given position.\n\nIf height == radius or -radius, the hill is a half-sphere.\n\nArgs:\nhm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.\nx (float): The x position at the center of the new hill.\ny (float): The y position at the center of the new hill.\nradius (float): The size of the new hill.\nheight (float): The height or depth of the new hill.", "source": "juraj-google-style"}
{"code": "def __init__(self, name, freevars, extra_locals):\n    self._name = name\n    self._freevars = freevars\n    self._extra_locals = extra_locals\n    self._unbound_factory = None\n    self.module = None\n    self.source_map = None", "docstring": "Creates a new factory for a Python function.\n\nArgs:\nname: The function name.\nfreevars: The list of non-global free variables for the function.\nextra_locals: Dict[Text, Any], names and values for custom variables that\nare accessible to the generated code as local variables.", "source": "github-repos"}
{"code": "def get_cached_or_new(url, new=False):\n    \n    garbage_collection()\n\n    old_req = DATABASE.get(url)\n\n    if old_req and not new:\n        return old_req\n\n    if not (url.startswith(\"http:\n        raise ValueError(\"Invalid URL `%s`!\" % url)\n\n    req = RequestInfo(url=url)\n    DATABASE[url] = req\n\n    return req", "docstring": "Look into the database and return :class:`RequestInfo` if the `url` was\nalready analyzed, or create and return new instance, if not.\n\nIf the `new` is set to True, always create new instance.\n\nArgs:\nurl (str): URL of the analyzed resource.\nnew (bool, default False): Force new instance?\n\nReturns:\nobj: :class:`RequestInfo` instance.", "source": "juraj-google-style"}
{"code": "def fill_rect(self, rect):\n    check_int_err(lib.SDL_RenderFillRect(self._ptr, rect._ptr))", "docstring": "Fill a rectangle on the current rendering target with the drawing color.\n\nArgs:\nrect (Rect): The destination rectangle, or None to fill the entire rendering target.\n\nRaises:\nSDLError: If an error is encountered.", "source": "codesearchnet"}
{"code": "def EnsureAstName(ast, module_name, fix=False):\n    raw_ast = ast.ast\n    if fix and module_name != raw_ast.name:\n        ast = ast.Replace(class_type_nodes=None)\n        ast = ast.Replace(ast=raw_ast.Visit(visitors.RenameModuleVisitor(raw_ast.name, module_name)))\n    else:\n        assert module_name == raw_ast.name\n    return ast", "docstring": "Verify that serializable_ast has the name module_name, or repair it.\n\nArgs:\nast: An instance of SerializableAst.\nmodule_name: The name under which ast.ast should be loaded.\nfix: If this function should repair the wrong name.\n\nReturns:\nThe updated SerializableAst.", "source": "github-repos"}
{"code": "def _show_tag_sets(saved_model_dir):\n    tag_sets = saved_model_utils.get_saved_model_tag_sets(saved_model_dir)\n    print('The given SavedModel contains the following tag-sets:')\n    for tag_set in sorted(tag_sets):\n        print('%r' % ', '.join(sorted(tag_set)))", "docstring": "Prints the tag-sets stored in SavedModel directory.\n\nPrints all the tag-sets for MetaGraphs stored in SavedModel directory.\n\nArgs:\nsaved_model_dir: Directory containing the SavedModel to inspect.", "source": "github-repos"}
{"code": "def transmute_sites(self, old_site_label, new_site_label, n_sites_to_change):\n    selected_sites = self.select_sites(old_site_label)\n    for site in random.sample(selected_sites, n_sites_to_change):\n        site.label = new_site_label\n    self.site_labels = set([site.label for site in self.sites])", "docstring": "Selects a random subset of sites with a specific label and gives them a different label.\n\nArgs:\nold_site_label (String or List(String)): Site label(s) of the sites to be modified..\nnew_site_label (String):                 Site label to be applied to the modified sites.\nn_sites_to_change (Int):                 Number of sites to modify.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def output_types(self):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), self._element_spec)", "docstring": "Returns the type of each component of an element of this iterator.\n\nReturns:\nA (nested) structure of `tf.DType` objects corresponding to each component\nof an element of this dataset.", "source": "github-repos"}
{"code": "def _CheckFormatTokenSubtypes(self, llines, list_of_expected):\n    actual = []\n    for lline in llines:\n        filtered_values = [(ft.value, ft.subtypes) for ft in lline.tokens if ft.name not in pytree_utils.NONSEMANTIC_TOKENS]\n        if filtered_values:\n            actual.append(filtered_values)\n    self.assertEqual(list_of_expected, actual)", "docstring": "Check that the tokens in the LogicalLines have the expected subtypes.\n\nArgs:\nllines: list of LogicalLine.\nlist_of_expected: list of (name, subtype) pairs. Non-semantic tokens are\nfiltered out from the expected values.", "source": "github-repos"}
{"code": "def _dynamic_range_quantize(src_saved_model_path: str, dst_saved_model_path: str, quantization_options: _QuantizationOptions) -> autotrackable.AutoTrackable:\n    mode_str = 'dynamic-range quantization'\n    if _is_qat_saved_model(src_saved_model_path):\n        raise ValueError('The models trained with quantization-aware training (QAT) is not supported for %s.' % mode_str)\n    logging.info('Running post-training %s on model: %s', mode_str, src_saved_model_path)\n    logging.info('QuantizationOptions: \\n%s', quantization_options)\n    signature_def_map = save_model.get_signatures_from_saved_model(src_saved_model_path, quantization_options.signature_keys, quantization_options.tags)\n    pywrap_quantize_model.quantize_ptq_dynamic_range(src_saved_model_path, dst_saved_model_path, quantization_options_serialized=quantization_options.SerializeToString(), signature_keys=list(quantization_options.signature_keys), signature_def_map_serialized=_serialize_signature_def_map(signature_def_map), py_function_library=py_function_lib.PyFunctionLibrary())\n    return saved_model_load.load(dst_saved_model_path)", "docstring": "Quantizes the given SavedModel via post-training dynamic range quantization.\n\nArgs:\nsrc_saved_model_path: Path to the saved model.\ndst_saved_model_path: The path to save the output SavedModel. The directory\nwill be overwritten if not empty.\nquantization_options: QuantizationOptions proto describing quantization\nrelated config.\n\nReturns:\nA SavedModel object with TF quantization applied.\n\nRaises:\nValueError: when the model is QAT model.", "source": "github-repos"}
{"code": "def _attempt_shard_retry(self, shard_state, tstate):\n    \n    shard_attempts = shard_state.retries + 1\n\n    if shard_attempts >= parameters.config.SHARD_MAX_ATTEMPTS:\n      logging.warning(\n          \"Shard attempt %s exceeded %s max attempts.\",\n          shard_attempts, parameters.config.SHARD_MAX_ATTEMPTS)\n      return self._TASK_DIRECTIVE.FAIL_TASK\n    if tstate.output_writer and (\n        not tstate.output_writer._supports_shard_retry(tstate)):\n      logging.warning(\"Output writer %s does not support shard retry.\",\n                      tstate.output_writer.__class__.__name__)\n      return self._TASK_DIRECTIVE.FAIL_TASK\n\n    shard_state.reset_for_retry()\n    logging.warning(\"Shard %s attempt %s failed with up to %s attempts.\",\n                    shard_state.shard_id,\n                    shard_state.retries,\n                    parameters.config.SHARD_MAX_ATTEMPTS)\n    output_writer = None\n    if tstate.output_writer:\n      output_writer = tstate.output_writer.create(\n          tstate.mapreduce_spec, shard_state.shard_number, shard_attempts + 1)\n    tstate.reset_for_retry(output_writer)\n    return self._TASK_DIRECTIVE.RETRY_SHARD", "docstring": "Whether to retry shard.\n\nThis method may modify shard_state and tstate to prepare for retry or fail.\n\nArgs:\nshard_state: model.ShardState for current shard.\ntstate: model.TransientShardState for current shard.\n\nReturns:\nA _TASK_DIRECTIVE enum. RETRY_SHARD if shard should be retried.\nFAIL_TASK otherwise.", "source": "juraj-google-style"}
{"code": "def on_predict_batch_begin(self, batch, logs=None):", "docstring": "Called at the beginning of a batch in `predict` methods.\n\nSubclasses should override for any actions to run.\n\nNote that if the `steps_per_execution` argument to `compile` in\n`tf.keras.Model` is set to `N`, this method will only be called every `N`\nbatches.\n\nArgs:\nbatch: Integer, index of batch within the current epoch.\nlogs: Dict, contains the return value of `model.predict_step`,\nit typically returns a dict with a key 'outputs' containing\nthe model's outputs.", "source": "github-repos"}
{"code": "def __init__(self, path_elements: List[Union[str, int]], parent: Optional['Key']=None, project: Optional[str]=None, namespace: Optional[str]=None):\n    self.path_elements = tuple(path_elements)\n    self.parent = parent\n    self.namespace = namespace\n    self.project = project", "docstring": "Represents a Datastore key.\n\nThe partition ID is represented by its components: namespace and project.\nIf key has a parent, project and namespace should either be unset or match\nthe parent's.\n\nArgs:\npath_elements: (list of str and int) Key path: an alternating sequence of\nkind and identifier. The kind must be of type ``str`` and identifier may\nbe a ``str`` or an ``int``.\nIf the last identifier is omitted this is an incomplete key, which is\nunsupported in ``WriteToDatastore`` and ``DeleteFromDatastore``.\nSee :class:`google.cloud.datastore.key.Key` for more details.\nparent: (:class:`~apache_beam.io.gcp.datastore.v1new.types.Key`)\n(optional) Parent for this key.\nproject: (str) Project ID. Required unless set by parent.\nnamespace: (str) (optional) Namespace ID", "source": "github-repos"}
{"code": "def vec_size(nodes, s_val):\n    r\n    result_vec = evaluate_multi(nodes, np.asfortranarray([s_val]))\n    \n    return np.linalg.norm(result_vec[:, 0], ord=2)", "docstring": "r\"\"\"Compute :math:`\\|B(s)\\|_2`.\n\n.. note::\n\nThis is a helper for :func:`_compute_length` and does not have\na Fortran speedup.\n\nIntended to be used with ``functools.partial`` to fill in the\nvalue of ``nodes`` and create a callable that only accepts ``s_val``.\n\nArgs:\nnodes (numpy.ndarray): The nodes defining a curve.\ns_val (float): Parameter to compute :math:`B(s)`.\n\nReturns:\nfloat: The norm of :math:`B(s)`.", "source": "juraj-google-style"}
{"code": "def pre_ref_resolution_callback(self, other_model):\n        \n        \n        filename = other_model._tx_filename\n        assert (filename)\n        other_model._tx_model_repository = \\\n            GlobalModelRepository(self.all_models)\n        self.all_models.filename_to_model[filename] = other_model", "docstring": "(internal: used to store a model after parsing into the repository)\n\nArgs:\nother_model: the parsed model\n\nReturns:\nnothing", "source": "juraj-google-style"}
{"code": "def get_indices(self, axis=0, index_func=None, old_blocks=None):\n    ErrorMessage.catch_bugs_and_request_email((not callable(index_func)))\n    func = self.preprocess_func(index_func)\n    if (axis == 0):\n        new_indices = ([idx.apply(func).get() for idx in self._partitions_cache.T[0]] if len(self._partitions_cache.T) else [])\n        if (old_blocks is not None):\n            cumulative_block_lengths = np.array(old_blocks.block_lengths).cumsum()\n        else:\n            cumulative_block_lengths = np.array(self.block_lengths).cumsum()\n    else:\n        new_indices = ([idx.apply(func).get() for idx in self._partitions_cache[0]] if len(self._partitions_cache) else [])\n        if (old_blocks is not None):\n            cumulative_block_lengths = np.array(old_blocks.block_widths).cumsum()\n        else:\n            cumulative_block_lengths = np.array(self.block_widths).cumsum()\n    full_indices = (new_indices[0] if len(new_indices) else new_indices)\n    if (old_blocks is not None):\n        for i in range(len(new_indices)):\n            if ((i == 0) or (len(new_indices[i]) == 0)):\n                continue\n            try:\n                append_val = (new_indices[i] + cumulative_block_lengths[(i - 1)])\n            except TypeError:\n                append_val = new_indices[i]\n            full_indices = full_indices.append(append_val)\n    else:\n        full_indices = full_indices.append(new_indices[1:])\n    return full_indices", "docstring": "This gets the internal indices stored in the partitions.\n\nNote: These are the global indices of the object. This is mostly useful\nwhen you have deleted rows/columns internally, but do not know\nwhich ones were deleted.\n\nArgs:\naxis: This axis to extract the labels. (0 - index, 1 - columns).\nindex_func: The function to be used to extract the function.\nold_blocks: An optional previous object that this object was\ncreated from. This is used to compute the correct offsets.\n\nReturns:\nA Pandas Index object.", "source": "codesearchnet"}
{"code": "def InnermostClass(self):\n    for i in range(len(self.stack), 0, (- 1)):\n        classinfo = self.stack[(i - 1)]\n        if isinstance(classinfo, _ClassInfo):\n            return classinfo\n    return None", "docstring": "Get class info on the top of the stack.\n\nReturns:\nA _ClassInfo object if we are inside a class, or None otherwise.", "source": "codesearchnet"}
{"code": "def get(self, uri: str) -> Optional[_T]:\n    resource = self.resources_by_uri.get(uri)\n    if resource is None:\n        return None\n    if isinstance(resource, self.proto_cls):\n        return resource\n    parsed = self._parse_resource(uri, resource)\n    self.resources_by_uri[uri] = parsed\n    return parsed", "docstring": "Retrieves a protocol buffer for the resource with the given uri.\n\nArgs:\nuri: URI of the resource to retrieve.\n\nReturns:\nA protocol buffer for the resource or `None` if the `uri` is not present\nin the ResourceCollection.\n\nRaises:\nRuntimeError: The resource could not be found or the retrieved resource\ndid not have the expected URL. The .zip file may have changed on disk.", "source": "github-repos"}
{"code": "def _read_arg(arg):\n    if (arg is None):\n        arg_out = arg\n    else:\n        if ((len(arg) == 1) and os.path.exists(arg[0])):\n            arg_out = grp.read(arg[0])\n        else:\n            arg_out = arg\n        assert isinstance(arg_out, list), 'arg_out must be a list.'\n        assert (type(arg_out[0]) == str), 'arg_out must be a list of strings.'\n    return arg_out", "docstring": "If arg is a list with 1 element that corresponds to a valid file path, use\nset_io.grp to read the grp file. Otherwise, check that arg is a list of strings.\n\nArgs:\narg (list or None)\n\nReturns:\narg_out (list or None)", "source": "codesearchnet"}
{"code": "def folderExist(self, name, folders):\n        \n        if name is not None and name != '':\n\n            folderID = None\n\n            for folder in folders:\n                if folder['title'].lower() == name.lower():\n                    return True\n\n            del folders\n\n            return folderID\n\n        else:\n            return False", "docstring": "Determines if a folder exists, case insensitively.\n\nArgs:\nname (str): The name of the folder to check.\nfolders (list): A list of folder dicts to check against. The dicts must contain\nthe key:value pair ``title``.\nReturns:\nbool: ``True`` if the folder exists in the list, ``False`` otherwise.", "source": "juraj-google-style"}
{"code": "def save(self, config=None):\n    if (config is not None):\n        clist = [config]\n    else:\n        clist = [self._system_config, self._global_config, self._repo_config, self._local_config]\n    for conf in clist:\n        if (conf.filename is None):\n            continue\n        try:\n            logger.debug(\"Writing '{}'.\".format(conf.filename))\n            dname = os.path.dirname(os.path.abspath(conf.filename))\n            try:\n                os.makedirs(dname)\n            except OSError as exc:\n                if (exc.errno != errno.EEXIST):\n                    raise\n            conf.write()\n        except Exception as exc:\n            msg = \"failed to write config '{}'\".format(conf.filename)\n            raise ConfigError(msg, exc)", "docstring": "Saves config to config files.\n\nArgs:\nconfig (configobj.ConfigObj): optional config object to save.\n\nRaises:\ndvc.config.ConfigError: thrown if failed to write config file.", "source": "codesearchnet"}
{"code": "def lineno(self):\n    return self.first.lineno", "docstring": "Return the line number of this logical line.\n\nReturns:\nThe line number of the first token in this logical line.", "source": "github-repos"}
{"code": "def convert_md_to_rst(md_path, rst_temp_path):\n    \n    \n    command = \"pandoc --write=rst --output=%s %s\" % (rst_temp_path, md_path)\n    print(\"converting with pandoc: %s to %s\\n-->%s\" % (md_path, rst_temp_path,\n                                                       command))\n\n    if os.path.exists(rst_temp_path):\n        os.remove(rst_temp_path)\n\n    os.system(command)\n\n    if not os.path.exists(rst_temp_path):\n        s = (\"Error running: %s\\n\"\n             \"  Did you install pandoc per the %s docstring?\" % (command,\n                                                                 __file__))\n        sys.exit(s)\n\n    return read(rst_temp_path)", "docstring": "Convert the contents of a file from Markdown to reStructuredText.\n\nReturns the converted text as a Unicode string.\n\nArguments:\n\nmd_path: a path to a UTF-8 encoded Markdown file to convert.\n\nrst_temp_path: a temporary path to which to write the converted contents.", "source": "juraj-google-style"}
{"code": "def close(self):\n    if self.reuse:\n        logger.debug('Ipcontroller not shutting down: reuse enabled')\n        return\n    if (self.mode == 'manual'):\n        logger.debug('Ipcontroller not shutting down: Manual mode')\n        return\n    try:\n        pgid = os.getpgid(self.proc.pid)\n        os.killpg(pgid, signal.SIGTERM)\n        time.sleep(0.2)\n        os.killpg(pgid, signal.SIGKILL)\n        try:\n            self.proc.wait(timeout=1)\n            x = self.proc.returncode\n            if (x == 0):\n                logger.debug('Controller exited with {0}'.format(x))\n            else:\n                logger.error('Controller exited with {0}. May require manual cleanup'.format(x))\n        except subprocess.TimeoutExpired:\n            logger.warn('Ipcontroller process:{0} cleanup failed. May require manual cleanup'.format(self.proc.pid))\n    except Exception as e:\n        logger.warn('Failed to kill the ipcontroller process[{0}]: {1}'.format(self.proc.pid, e))", "docstring": "Terminate the controller process and its child processes.\n\nArgs:\n- None", "source": "codesearchnet"}
{"code": "def no_company_with_insufficient_companies_house_data(value):\n    \n\n    for prefix, name in company_types_with_insufficient_companies_house_data:\n        if value.upper().startswith(prefix):\n            raise ValidationError(\n                MESSAGE_INSUFFICIENT_DATA, params={'name': name}\n            )", "docstring": "Confirms that the company number is not for for a company that\nCompanies House does not hold information on.\n\nArgs:\nvalue (string): The company number to check.\n\nRaises:\ndjango.forms.ValidationError", "source": "juraj-google-style"}
{"code": "def _poll_once(self, timeout_ms, max_records):\n    self._coordinator.poll()\n    if (not self._subscription.has_all_fetch_positions()):\n        self._update_fetch_positions(self._subscription.missing_fetch_positions())\n    (records, partial) = self._fetcher.fetched_records(max_records)\n    if records:\n        if (not partial):\n            self._fetcher.send_fetches()\n        return records\n    self._fetcher.send_fetches()\n    timeout_ms = min(timeout_ms, (self._coordinator.time_to_next_poll() * 1000))\n    self._client.poll(timeout_ms=timeout_ms)\n    if self._coordinator.need_rejoin():\n        return {}\n    (records, _) = self._fetcher.fetched_records(max_records)\n    return records", "docstring": "Do one round of polling. In addition to checking for new data, this does\nany needed heart-beating, auto-commits, and offset updates.\n\nArguments:\ntimeout_ms (int): The maximum time in milliseconds to block.\n\nReturns:\ndict: Map of topic to list of records (may be empty).", "source": "codesearchnet"}
{"code": "def listen_tcp(cls, host='', port=0, echo=False):\n        \n\n        return cls(TCPServerSocketChannel(host, port), echo=echo)", "docstring": "Set up a :class:`TCPServerSocketChannel` and create a :class:`Flow`\ninstance for it.\n\nArgs:\nhost(str): The hostname or IP address to bind to.\nport(int): The port number to listen on.\necho(bool): Whether to echo read/written data to stdout by default.\n\nReturns:\n:class:`Flow`: A Flow instance initialised with the TCP socket\nchannel.", "source": "juraj-google-style"}
{"code": "def getModelSummaryAsGeoJson(self, session, withStreamNetwork=True, withNodes=False):\n        \n        \n        watershedMaskCard = self.getCard('WATERSHED_MASK')\n        maskFilename = watershedMaskCard.value\n        maskExtension = maskFilename.strip('\"').split('.')[1]\n\n        maskMap = session.query(RasterMapFile).\\\n                          filter(RasterMapFile.projectFile == self).\\\n                          filter(RasterMapFile.fileExtension == maskExtension).\\\n                          one()\n\n        \n        statement = .format('raster', maskMap.tableName, maskMap.id)\n\n        result = session.execute(statement)\n\n        maskMapJsonPolygon = ''\n        for row in result:\n            maskMapJsonPolygon = row.polygon\n\n        jsonString = maskMapJsonPolygon\n\n        if withStreamNetwork:\n            \n            channelInputFile = self.channelInputFile\n\n            if channelInputFile is not None:\n                \n                jsonStreamNetwork = channelInputFile.getStreamNetworkAsGeoJson(session=session, withNodes=withNodes)\n\n                \n                featureCollection = json.loads(jsonStreamNetwork)\n                jsonMaskMapObjects = json.loads(maskMapJsonPolygon)\n\n                \n                maskFeature = {\"type\": \"Feature\",\n                               \"geometry\": jsonMaskMapObjects,\n                               \"properties\": {},\n                               \"id\": maskMap.id}\n\n                \n                tempFeatures = featureCollection['features']\n                tempFeatures.append(maskFeature)\n                featureCollection['features'] = tempFeatures\n\n                \n                jsonString = json.dumps(featureCollection)\n\n        return jsonString", "docstring": "Retrieve a GeoJSON representation of the model. Includes vectorized mask map and stream network.\n\nArgs:\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database\nwithStreamNetwork (bool, optional): Include stream network. Defaults to True.\nwithNodes (bool, optional): Include nodes. Defaults to False.\n\nReturns:\nstr: GeoJSON string", "source": "juraj-google-style"}
{"code": "def _CheckKeyPath(self, registry_key, search_depth):\n    if (self._key_path_segments is None):\n        return False\n    if ((search_depth < 0) or (search_depth > self._number_of_key_path_segments)):\n        return False\n    if (search_depth == 0):\n        segment_name = ''\n    else:\n        segment_name = self._key_path_segments[(search_depth - 1)]\n        if self._is_regex:\n            if isinstance(segment_name, py2to3.STRING_TYPES):\n                flags = ((re.DOTALL | re.IGNORECASE) | re.UNICODE)\n                try:\n                    segment_name = '^{0:s}$'.format(segment_name)\n                    segment_name = re.compile(segment_name, flags=flags)\n                except sre_constants.error:\n                    return False\n                self._key_path_segments[(search_depth - 1)] = segment_name\n        else:\n            segment_name = segment_name.lower()\n            self._key_path_segments[(search_depth - 1)] = segment_name\n    if (search_depth > 0):\n        if self._is_regex:\n            if (not segment_name.match(registry_key.name)):\n                return False\n        elif (segment_name != registry_key.name.lower()):\n            return False\n    return True", "docstring": "Checks the key path find specification.\n\nArgs:\nregistry_key (WinRegistryKey): Windows Registry key.\nsearch_depth (int): number of key path segments to compare.\n\nReturns:\nbool: True if the Windows Registry key matches the find specification,\nFalse if not.", "source": "codesearchnet"}
{"code": "def ensure_files(self, filenames):\n    logger.debug('Testing {0} for the following files: {1}'.format(self.working_dir, filenames))\n    dircontent = os.listdir(self.working_dir)\n    for fname in filenames:\n        if (fname not in dircontent):\n            return False\n    return True", "docstring": "Checks the student submission for specific files.\n\nArgs:\nfilenames (tuple): The list of file names to be cjecked for.\n\nReturns:\nbool: Indicator if all files are found in the student archive.", "source": "codesearchnet"}
{"code": "def __init__(self, base: ModelHandler[ExampleT, PredictionT, ModelT], postprocess_fn: Callable[[PredictionT], PostProcessT]):\n    self._base = base\n    self._env_vars = getattr(base, '_env_vars', {})\n    self._postprocess_fn = postprocess_fn", "docstring": "A ModelHandler that has a preprocessing function associated with it.\n\nArgs:\nbase: An implementation of the underlying model handler.\npostprocess_fn: the preprocessing function to use.", "source": "github-repos"}
{"code": "def import_file_object(filename):\n    \n    try:\n        handle = open(filename, 'r')\n        file_obj = handle.read()\n        dict_obj = json.loads(file_obj)\n\n    except IOError as e:\n        logger.critical(\n            'import_file_object: %s error opening %s' % (str(e), str(filename))\n        )\n        raise e\n    except ValueError:\n        logger.info(\n            '%s: import_file_object: %s not json. file object returned' %\n            (inspect.stack()[0][3], str(filename))\n        )\n        return file_obj    \n    return dict_obj", "docstring": "Summary:\nImports block filesystem object\nArgs:\n:filename (str): block filesystem object\nReturns:\ndictionary obj (valid json file), file data object", "source": "juraj-google-style"}
{"code": "def __init__(self, config, input_size=None):\n    super().__init__()\n    dim = config.hidden_size\n    num_heads = config.num_attention_heads\n    self.num_heads = num_heads\n    head_dim = dim \n    self.scale = head_dim ** (-0.5)\n    self.qkv = nn.Linear(dim, dim * 3, bias=config.qkv_bias)\n    self.proj = nn.Linear(dim, dim)\n    self.use_relative_position_embeddings = config.use_relative_position_embeddings\n    if self.use_relative_position_embeddings:\n        self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))\n        self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))", "docstring": "Args:\nconfig (`VitDetConfig`):\nModel configuration.\ninput_size (`Tuple[int]`, *optional*):\nInput resolution, only required in case relative position embeddings are added.", "source": "github-repos"}
{"code": "def from_json(cls, json_data):\n        \n        if not isinstance(json_data, dict):\n            json_data = json.loads(_helpers._from_bytes(json_data))\n\n        private_key_pkcs8_pem = None\n        pkcs12_val = json_data.get(_PKCS12_KEY)\n        password = None\n        if pkcs12_val is None:\n            private_key_pkcs8_pem = json_data['_private_key_pkcs8_pem']\n            signer = crypt.Signer.from_string(private_key_pkcs8_pem)\n        else:\n            \n            \n            \n            pkcs12_val = base64.b64decode(pkcs12_val)\n            password = json_data['_private_key_password']\n            signer = crypt.Signer.from_string(pkcs12_val, password)\n\n        credentials = cls(\n            json_data['_service_account_email'],\n            signer,\n            scopes=json_data['_scopes'],\n            private_key_id=json_data['_private_key_id'],\n            client_id=json_data['client_id'],\n            user_agent=json_data['_user_agent'],\n            **json_data['_kwargs']\n        )\n        if private_key_pkcs8_pem is not None:\n            credentials._private_key_pkcs8_pem = private_key_pkcs8_pem\n        if pkcs12_val is not None:\n            credentials._private_key_pkcs12 = pkcs12_val\n        if password is not None:\n            credentials._private_key_password = password\n        credentials.invalid = json_data['invalid']\n        credentials.access_token = json_data['access_token']\n        credentials.token_uri = json_data['token_uri']\n        credentials.revoke_uri = json_data['revoke_uri']\n        token_expiry = json_data.get('token_expiry', None)\n        if token_expiry is not None:\n            credentials.token_expiry = datetime.datetime.strptime(\n                token_expiry, client.EXPIRY_FORMAT)\n        return credentials", "docstring": "Deserialize a JSON-serialized instance.\n\nInverse to :meth:`to_json`.\n\nArgs:\njson_data: dict or string, Serialized JSON (as a string or an\nalready parsed dictionary) representing a credential.\n\nReturns:\nServiceAccountCredentials from the serialized data.", "source": "juraj-google-style"}
{"code": "def wind44(msg):\n    d = hex2bin(data(msg))\n    status = int(d[4])\n    if (not status):\n        return None\n    speed = bin2int(d[5:14])\n    direction = ((bin2int(d[14:23]) * 180.0) / 256.0)\n    return (round(speed, 0), round(direction, 1))", "docstring": "Wind speed and direction.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\n(int, float): speed (kt), direction (degree)", "source": "codesearchnet"}
{"code": "def es_mapping(cls, base_class=None, role='rdf_class', **kwargs):\n        \n\n        def _prop_filter(prop, value, **kwargs):\n            \n\n            try:\n                use_prop = len(set(value.owl_inverseOf) - parent_props) > 0\n            except AttributeError:\n                use_prop = True\n            \n            \n            if prop in nested_props and use_prop:\n                return True\n            return False\n        if not base_class:\n            base_class = cls\n        es_map = {}\n        \n        if kwargs.get(\"depth\"): \n            kwargs['depth'] += 1\n            initial = False\n        else:\n            initial = True\n            kwargs['depth'] = 1\n            kwargs['class'] = cls.__name__\n            kwargs['class_obj'] = cls\n        if kwargs.get('class_obj'):\n            parent_props = set(cls.properties)\n        else:\n            parent_props = set()\n        if role == 'rdf_class':\n            es_map = {}\n            es_map = {prop: value.es_mapping(base_class) \\\n                      for prop, value in cls.properties.items()}\n\n        elif role == 'es_Nested':\n            \n            \n            if cls == base_class:\n                nested_props = LABEL_FIELDS\n            else:\n                nested_props = cls.es_defs.get('kds_esNestedProps',\n                                               list(cls.properties.keys()))\n            es_map = {prop: value.es_mapping(base_class, **kwargs) \\\n                      for prop, value in cls.properties.items() \\\n                      if _prop_filter(prop, value, **kwargs)}\n        ref_map = {\n            \"type\" : \"keyword\"\n        }\n        lower_map = {\n            \"type\": \"text\",\n            \"fields\": {\n                \"lower\": es_idx_types['es_Lower']['lower'],\n                'keyword': {'type': 'keyword'}\n            }\n        }\n        ignore_map = {\n            \"index\": False,\n            \"type\": \"text\"\n        }\n        if cls == base_class:\n            es_map['label'] = ref_map\n            es_map['value'] = lower_map\n\n        if cls.cls_defs.get('kds_storageType',[None])[0] != \"blanknode\" \\\n                and cls == base_class:\n            es_map['id'] = ref_map\n            es_map['uri'] = ref_map\n        rml_procs = cls.es_defs.get(\"kds_esRmlProcessor\", [])\n        rml_procs = [proc for proc in rml_procs\n                     if role == 'rdf_class' or\n                     proc['force']]\n        if rml_procs:\n            rml_maps = {}\n            for rml in rml_procs:\n                rml_maps[rml['name']] = ignore_map\n            if rml_maps:\n                es_map['rml_map'] = {\"properties\": rml_maps}\n                \n        return es_map", "docstring": "Returns the es mapping for the class\n\nargs:\n-----\nbase_class: The root class being indexed\nrole: the role states how the class should be mapped depending\nupon whether it is used as a subject of an object. options\nare es_Nested or rdf_class", "source": "juraj-google-style"}
{"code": "def VerifyMaps(self, conf):\n    retval = 0\n    for map_name in conf.maps:\n        self.log.info('Verifying map: %s.', map_name)\n        if map_name == config.MAP_NETGROUP:\n            self.log.info('The netgroup map does not support enumeration, skipping.')\n            continue\n        if map_name == config.MAP_AUTOMOUNT:\n            self.log.info('The automount map does not support enumeration, skipping.')\n            continue\n        try:\n            nss_map = nss.GetMap(map_name)\n        except error.UnsupportedMap:\n            self.log.warning('Verification of %s map is unsupported!', map_name)\n            continue\n        self.log.debug('built NSS map of %d entries', len(nss_map))\n        cache_options = conf.options[map_name].cache\n        cache = cache_factory.Create(cache_options, map_name)\n        try:\n            cache_map = cache.GetMap()\n        except error.CacheNotFound:\n            self.log.error('Cache missing!')\n            retval += 1\n            continue\n        self.log.debug('built cache map of %d entries', len(cache_map))\n        missing_entries = 0\n        for map_entry in cache_map:\n            if map_entry not in nss_map:\n                self.log.info('The following entry is present in the cache but not availible via NSS! %s', map_entry.name)\n                self.log.debug('missing entry data: %s', map_entry)\n                missing_entries += 1\n        if missing_entries > 0:\n            self.log.warning('Missing %d entries in %s map', missing_entries, map_name)\n            retval += 1\n    return retval", "docstring": "Compare each configured map against data retrieved from NSS.\n\nFor each configured map, build a Map object from NSS and compare\nit against a Map object retrieved directly from the cache.  We\nexpect the cache Map to be a subset of the nss Map due to possible\ninclusion of other NSS map types (e.g. files, nis, ldap, etc).\n\nThis could be done via series of get*nam calls, however at this\ntime it appears to be more efficient to grab them in bulk and use\nthe Map.__contains__() membership test.\n\nArgs:\nconf: nss_cache.config.Config object\n\nReturns:\ncount of failures when verifying", "source": "github-repos"}
{"code": "def to_string(cls, error_code):\n        \n        if error_code == cls.ILLEGAL_COMMAND:\n            return 'Failed to erase sector.'\n        return super(JLinkEraseErrors, cls).to_string(error_code)", "docstring": "Returns the string message for the given ``error_code``.\n\nArgs:\ncls (JLinkEraseErrors): the ``JLinkEraseErrors`` class\nerror_code (int): error code to convert\n\nReturns:\nAn error string corresponding to the error code.\n\nRaises:\nValueError: if the error code is invalid.", "source": "juraj-google-style"}
{"code": "def __init__(self, session_creator, hooks, should_recover, stop_grace_period_secs=120):\n    self._graph_was_finalized = ops.get_default_graph().finalized\n    self._hooks = hooks or []\n    for h in self._hooks:\n        h.begin()\n    worker_context = distribute_coordinator_context.get_current_worker_context()\n    if not session_creator and worker_context:\n        session_creator = worker_context.session_creator()\n    self._coordinated_creator = self._CoordinatedSessionCreator(session_creator=session_creator or ChiefSessionCreator(), hooks=self._hooks, stop_grace_period_secs=stop_grace_period_secs)\n    if should_recover:\n        self._sess = _RecoverableSession(self._coordinated_creator)\n    else:\n        self._sess = self._coordinated_creator.create_session()", "docstring": "Sets up a Monitored or Hooked Session.\n\nArgs:\nsession_creator: A factory object to create session. Typically a\n`ChiefSessionCreator` or a `WorkerSessionCreator`.\nhooks: An iterable of `SessionRunHook' objects.\nshould_recover: A bool. Indicates whether to recover from `AbortedError`\nand `UnavailableError` or not.\nstop_grace_period_secs: Number of seconds given to threads to stop after\n`close()` has been called.", "source": "github-repos"}
{"code": "def get(self, key, mem_map=True):\n        \n        self.raise_error_if_not_open()\n\n        if key in self._file:\n            data = self._file[key]\n\n            if not mem_map:\n                data = data[()]\n\n            return data\n        else:\n            return None", "docstring": "Read and return the data stored for the given key.\n\nArgs:\nkey (str): The key to read the data from.\nmem_map (bool): If ``True`` returns the data as\nmemory-mapped array, otherwise a copy is returned.\n\nNote:\nThe container has to be opened in advance.\n\nReturns:\nnumpy.ndarray: The stored data.", "source": "juraj-google-style"}
{"code": "def setFilter(self, search):\n    if (not isinstance(search, DataSearch)):\n        raise TypeError('The given parameter must an `qtpandas.DataSearch` object')\n    self._search = search\n    self.layoutAboutToBeChanged.emit()\n    if (self._dataFrameOriginal is not None):\n        self._dataFrame = self._dataFrameOriginal\n    self._dataFrameOriginal = self._dataFrame.copy()\n    self._search.setDataFrame(self._dataFrame)\n    (searchIndex, valid) = self._search.search()\n    if valid:\n        self._dataFrame = self._dataFrame[searchIndex]\n        self.layoutChanged.emit()\n    else:\n        self.clearFilter()\n        self.layoutChanged.emit()\n    self.dataFrameChanged.emit()", "docstring": "Apply a filter and hide rows.\n\nThe filter must be a `DataSearch` object, which evaluates a python\nexpression.\nIf there was an error while parsing the expression, the data will remain\nunfiltered.\n\nArgs:\nsearch(qtpandas.DataSearch): data search object to use.\n\nRaises:\nTypeError: An error is raised, if the given parameter is not a\n`DataSearch` object.", "source": "codesearchnet"}
{"code": "def check_dihedral(self, construction_table):\n        \n        c_table = construction_table\n        angles = self.get_angle_degrees(c_table.iloc[3:, :].values)\n        problem_index = np.nonzero((175 < angles) | (angles < 5))[0]\n        rename = dict(enumerate(c_table.index[3:]))\n        problem_index = [rename[i] for i in problem_index]\n        return problem_index", "docstring": "Checks, if the dihedral defining atom is colinear.\n\nChecks for each index starting from the third row of the\n``construction_table``, if the reference atoms are colinear.\n\nArgs:\nconstruction_table (pd.DataFrame):\n\nReturns:\nlist: A list of problematic indices.", "source": "juraj-google-style"}
{"code": "def _AddTokenOnNewline(self, dry_run, must_split):\n    current = self.next_token\n    previous = current.previous_token\n    self.column = self._GetNewlineColumn()\n    if not dry_run:\n        indent_level = self.line.depth\n        spaces = self.column\n        if spaces:\n            spaces -= indent_level * style.Get('INDENT_WIDTH')\n        current.AddWhitespacePrefix(newlines_before=1, spaces=spaces, indent_level=indent_level)\n    if not current.is_comment:\n        self.stack[-1].last_space = self.column\n    self.lowest_level_on_line = self.paren_level\n    if previous.OpensScope() or (previous.is_comment and previous.previous_token is not None and previous.previous_token.OpensScope()):\n        dedent = (style.Get('CONTINUATION_INDENT_WIDTH'), 0)[style.Get('INDENT_CLOSING_BRACKETS')]\n        self.stack[-1].closing_scope_indent = max(0, self.stack[-1].indent - dedent)\n        self.stack[-1].split_before_closing_bracket = True\n    penalty = current.split_penalty\n    if must_split:\n        return penalty\n    if previous.is_pseudo and previous.value == '(':\n        penalty += 50\n    if current.value not in {'if', 'for'}:\n        last = self.stack[-1]\n        last.num_line_splits += 1\n        penalty += style.Get('SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT') * last.num_line_splits\n    if current.OpensScope() and previous.OpensScope():\n        pprev = previous.previous_token\n        if not pprev or not pprev.is_name:\n            penalty += 10\n    return penalty + 10", "docstring": "Adds a line break and necessary indentation.\n\nAppends the next token to the state and updates information necessary for\nindentation.\n\nArguments:\ndry_run: (bool) Don't commit whitespace changes to the FormatToken if\nTrue.\nmust_split: (bool) A newline was required before this token.\n\nReturns:\nThe split penalty for splitting after the current state.", "source": "github-repos"}
{"code": "def from_string(key, password=b'notasecret'):\n        \n        key = _helpers._to_bytes(key)\n        parsed_pem_key = _helpers._parse_pem_key(key)\n        if parsed_pem_key:\n            pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, parsed_pem_key)\n        else:\n            password = _helpers._to_bytes(password, encoding='utf-8')\n            pkey = crypto.load_pkcs12(key, password).get_privatekey()\n        return OpenSSLSigner(pkey)", "docstring": "Construct a Signer instance from a string.\n\nArgs:\nkey: string, private key in PKCS12 or PEM format.\npassword: string, password for the private key file.\n\nReturns:\nSigner instance.\n\nRaises:\nOpenSSL.crypto.Error if the key can't be parsed.", "source": "juraj-google-style"}
{"code": "def UploadSignedConfigBlob(content,\n                           aff4_path,\n                           client_context=None,\n                           limit=None,\n                           token=None):\n  \n  if limit is None:\n    limit = config.CONFIG[\"Datastore.maximum_blob_size\"]\n\n  \n  \n  if client_context is None:\n    \n    client_context = [\"Platform:Windows\", \"Client Context\"]\n\n  config.CONFIG.Validate(\n      parameters=\"PrivateKeys.executable_signing_private_key\")\n\n  signing_key = config.CONFIG.Get(\n      \"PrivateKeys.executable_signing_private_key\", context=client_context)\n\n  verification_key = config.CONFIG.Get(\n      \"Client.executable_signing_public_key\", context=client_context)\n\n  signed_binary_utils.WriteSignedBinary(\n      rdfvalue.RDFURN(aff4_path),\n      content,\n      signing_key,\n      public_key=verification_key,\n      chunk_size=limit,\n      token=token)\n\n  logging.info(\"Uploaded to %s\", aff4_path)", "docstring": "Upload a signed blob into the datastore.\n\nArgs:\ncontent: File content to upload.\naff4_path: aff4 path to upload to.\nclient_context: The configuration contexts to use.\nlimit: The maximum size of the chunk to use.\ntoken: A security token.\n\nRaises:\nIOError: On failure to write.", "source": "juraj-google-style"}
{"code": "def helper_list(access_token, oid, path):\n    \n    if oid != \"\":\n        path = ''.join([path, \"('\", oid, \"')\"])\n    endpoint = ''.join([ams_rest_endpoint, path])\n    return do_ams_get(endpoint, path, access_token)", "docstring": "Helper Function to list a URL path.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\noid (str): An OID.\npath (str): A URL Path.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def run(self, fn, args=None, kwargs=None):\n    _check_initialization()\n    multi_process_lib.Process()\n    if self._runner is None:\n        self._start()\n    fn = dill.dumps(fn, dill.HIGHEST_PROTOCOL)\n    for conn in self._conn.values():\n        conn.send((fn, args or [], kwargs or {}))\n    process_statuses = []\n    for (task_type, task_id), conn in self._conn.items():\n        logging.info('Waiting for the result from %s-%d', task_type, task_id)\n        try:\n            process_statuses.append(conn.recv())\n        except EOFError:\n            self.shutdown()\n            raise RuntimeError('Unexpected EOF. Worker process may have died. Please report a bug')\n    return_values = []\n    for process_status in process_statuses:\n        assert isinstance(process_status, _ProcessStatusInfo)\n        if not process_status.is_successful:\n            six.reraise(*process_status.exc_info)\n        if process_status.return_value is not None:\n            return_values.append(process_status.return_value)\n    return return_values", "docstring": "Runs `fn` with `args` and `kwargs` on all jobs.\n\nArgs:\nfn: The function to be run.\nargs: Optional positional arguments to be supplied in `fn`.\nkwargs: Optional keyword arguments to be supplied in `fn`.\n\nReturns:\nA list of return values.", "source": "github-repos"}
{"code": "def __init__(self, num_experts, gates):\n    \n    self._gates = gates\n    self._num_experts = num_experts\n\n    where = tf.to_int32(tf.where(tf.transpose(gates) > 0))\n    self._expert_index, self._batch_index = tf.unstack(where, num=2, axis=1)\n    self._part_sizes_tensor = tf.reduce_sum(tf.to_int32(gates > 0), [0])\n    self._nonzero_gates = tf.gather(\n        tf.reshape(self._gates, [-1]),\n        self._batch_index * num_experts + self._expert_index)", "docstring": "Create a SparseDispatcher.\n\nArgs:\nnum_experts: an integer.\ngates: a `Tensor` of shape `[batch_size, num_experts]`.\n\nReturns:\na SparseDispatcher", "source": "juraj-google-style"}
{"code": "def compile(self,\n                container: Container,\n                verbose: bool = False\n                ) -> CompilationOutcome:\n        \n        \n        bug = self.__installation.bugs[container.bug]\n        return bug.compiler.compile(self, container, verbose=verbose)", "docstring": "Attempts to compile the program inside a given container.\n\nParams:\nverbose: specifies whether to print the stdout and stderr produced\nby the compilation command to the stdout. If `True`, then the\nstdout and stderr will be printed.\n\nReturns:\na summary of the outcome of the compilation attempt.", "source": "juraj-google-style"}
{"code": "def add(self, pattern_txt):\n    self.patterns[len(pattern_txt)] = pattern_txt\n    low = 0\n    high = (len(pattern_txt) - 1)\n    while (not pattern_txt[low]):\n        low += 1\n    while (not pattern_txt[high]):\n        high -= 1\n    min_pattern = pattern_txt[low:(high + 1)]\n    self.min_patterns[len(min_pattern)] = min_pattern", "docstring": "Add a pattern to the list.\n\nArgs:\npattern_txt (str list): the pattern, as a list of lines.", "source": "codesearchnet"}
{"code": "def quote_identifier(identifier: str,\n                     mixed: Union[SQLCompiler, Engine, Dialect]) -> str:\n    \n    \n    return get_preparer(mixed).quote(identifier)", "docstring": "Converts an SQL identifier to a quoted version, via the SQL dialect in\nuse.\n\nArgs:\nidentifier: the identifier to be quoted\nmixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or\n:class:`Dialect` object\n\nReturns:\nthe quoted identifier", "source": "juraj-google-style"}
{"code": "def _make_inputs_match(branch_graphs, branch_inputs):\n    assert len(branch_graphs) == len(branch_inputs)\n    added_inputs = set()\n    new_inputs = []\n    for branch_in in branch_inputs:\n        for tensor in branch_in:\n            tensor_id = ops.tensor_id(tensor)\n            if tensor_id not in added_inputs:\n                added_inputs.add(tensor_id)\n                new_inputs.append(tensor)\n    for branch_graph, branch_in in zip(branch_graphs, branch_inputs):\n        input_ids = [ops.tensor_id(t) for t in branch_in]\n        branch_input_to_param = dict(zip(input_ids, branch_graph.inputs))\n        input_list = []\n        for in_t in new_inputs:\n            param = branch_input_to_param.get(ops.tensor_id(in_t))\n            if param is None:\n                param = _create_dummy_input(branch_graph, in_t)\n            input_list.append(param)\n        branch_graph.inputs = input_list\n        branch_graph.function_captures.reset_captures(new_inputs, branch_graph.inputs)\n    return new_inputs", "docstring": "Modifies branch_graphs so they have the same input signature.\n\nThis method reorders and/or adds parameters to each graph in branch_graphs so\nthey have the same input signature, and updates the 'inputs' and 'captured'\nfields of each graph accordingly. It uses the input tensors from the outer\ngraph to avoid duplicating shared arguments.\n\nArgs:\nbranch_graphs: a `list` of `FuncGraph`\nbranch_inputs: a `list` of `list`s of `Tensor`s in the outer graph. The\ninputs for the corresponding graph in `branch_graphs`.\n\nReturns:\nA new list of Tensors from the outer graph that are the new inputs for each\nbranch_graph. This is a deduped version of `sum(branch_inputs)`.", "source": "github-repos"}
{"code": "def create(self, data={}, **kwargs):\n    url = self.base_url\n    return self.post_url(url, data, **kwargs)", "docstring": "Create Virtual Account from given dict\n\nArgs:\nParam for Creating Virtual Account\n\nReturns:\nVirtual Account dict", "source": "codesearchnet"}
{"code": "class MeanAbsolutePercentageError(reduction_metrics.MeanMetricWrapper):\n\n    def __init__(self, name='mean_absolute_percentage_error', dtype=None):\n        super().__init__(mean_absolute_percentage_error, name, dtype=dtype)\n        self._direction = 'down'\n\n    def get_config(self):\n        return {'name': self.name, 'dtype': self.dtype}", "docstring": "Computes mean absolute percentage error between `y_true` and `y_pred`.\n\nFormula:\n\n```python\nloss = 100 * mean(abs((y_true - y_pred) / y_true))\n```\n\nArgs:\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.\n\nExamples:\n>>> m = keras.metrics.MeanAbsolutePercentageError()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])\n>>> m.result()\n250000000.0\n\n>>> m.reset_state()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],\n...                sample_weight=[1, 0])\n>>> m.result()\n500000000.0\n\nUsage with `compile()` API:\n\n```python\nmodel.compile(\noptimizer='sgd',\nloss='mse',\nmetrics=[keras.metrics.MeanAbsolutePercentageError()])\n```", "source": "github-repos"}
{"code": "def anti_clobber_dir_path(dir_path, suffix='.d'):\n    \n    dir_path = os.path.normpath(dir_path)\n    parts = dir_path.split(os.sep)\n\n    for index in range(len(parts)):\n        test_path = os.sep.join(parts[:index + 1])\n\n        if os.path.isfile(test_path):\n            parts[index] += suffix\n\n            return os.sep.join(parts)\n\n    return dir_path", "docstring": "Return a directory path free of filenames.\n\nArgs:\ndir_path (str): A directory path.\nsuffix (str): The suffix to append to the part of the path that is\na file.\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def _try_recover(self, trial, error_msg):\n    try:\n        self.trial_executor.stop_trial(trial, error=(error_msg is not None), error_msg=error_msg, stop_logger=False)\n        trial.result_logger.flush()\n        if self.trial_executor.has_resources(trial.resources):\n            logger.info('Attempting to recover trial state from last checkpoint.')\n            self.trial_executor.start_trial(trial)\n            if (trial.status == Trial.ERROR):\n                raise RuntimeError('Trial did not start correctly.')\n        else:\n            logger.debug('Notifying Scheduler and requeueing trial.')\n            self._requeue_trial(trial)\n    except Exception:\n        logger.exception('Error recovering trial from checkpoint, abort.')\n        self._scheduler_alg.on_trial_error(self, trial)\n        self._search_alg.on_trial_complete(trial.trial_id, error=True)", "docstring": "Tries to recover trial.\n\nNotifies SearchAlgorithm and Scheduler if failure to recover.\n\nArgs:\ntrial (Trial): Trial to recover.\nerror_msg (str): Error message from prior to invoking this method.", "source": "codesearchnet"}
{"code": "def GetRelativePath(self, path_spec):\n    location = getattr(path_spec, 'location', None)\n    if (location is None):\n        raise errors.PathSpecError('Path specification missing location.')\n    if path_spec_factory.Factory.IsSystemLevelTypeIndicator(self._file_system.type_indicator):\n        if (not location.startswith(self._mount_point.location)):\n            raise errors.PathSpecError('Path specification does not contain mount point.')\n    else:\n        if (not hasattr(path_spec, 'parent')):\n            raise errors.PathSpecError('Path specification missing parent.')\n        if (path_spec.parent != self._mount_point):\n            raise errors.PathSpecError('Path specification does not contain mount point.')\n    path_segments = self._file_system.SplitPath(location)\n    if path_spec_factory.Factory.IsSystemLevelTypeIndicator(self._file_system.type_indicator):\n        mount_point_path_segments = self._file_system.SplitPath(self._mount_point.location)\n        path_segments = path_segments[len(mount_point_path_segments):]\n    return '{0:s}{1:s}'.format(self._file_system.PATH_SEPARATOR, self._file_system.PATH_SEPARATOR.join(path_segments))", "docstring": "Returns the relative path based on a resolved path specification.\n\nThe relative path is the location of the upper most path specification.\nThe the location of the mount point is stripped off if relevant.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nstr: corresponding relative path or None if the relative path could not\nbe determined.\n\nRaises:\nPathSpecError: if the path specification is incorrect.", "source": "codesearchnet"}
{"code": "def recipients(self, notification_type, recipients, priority='Low'):\n    self._notification_type = notification_type\n    self._recipients = recipients\n    self._priority = priority\n    self._is_organization = False", "docstring": "Set vars for the passed in data. Used for one or more recipient notification.\n\n.. code-block:: javascript\n\n{\n\"notificationType\": notification_type,\n\"priority\": priority\n\"isOrganization\": false,\n\"recipients\": recipients\n}\n\nArgs:\nnotification_type (str): The type of notification being sent.\nrecipients (str): A comma delimited string of recipients.\npriority (str): The priority: Low, Medium, High.", "source": "codesearchnet"}
{"code": "def apply(self, data, path=None, applicator=None):\n        \n\n        if applicator:\n            applicator.pset = self\n        else:\n            applicator = Applicator(self)\n\n        return applicator.apply(data, path=path)", "docstring": "Apply permissions in this set to the provided data, effectively\nremoving all keys from it are not permissioned to be viewed\n\nArguments:\n\ndata -- dict of data\n\nReturns:\n\nCleaned data", "source": "juraj-google-style"}
{"code": "def _convert_metadata(data):\n    \n    def compose(val, arguments=None):\n        \n        if val is None:\n            return None\n\n        if not arguments:\n            return val\n\n        arguments[\"\n        return arguments\n\n    conspect = data.get(\"conspect\", {})\n    author_name = data.get(\"author\", {}).get(\"name\")\n    author_code = data.get(\"author\", {}).get(\"code\")\n\n    metadata = odict[\n        \"dc:title\": data.get(\"title\"),\n        \"dcterms:alternative\": data.get(\"subtitle\"),\n        \"dc:creator\": compose(author_name, {\"@id\": author_code}),\n        \"dc:publisher\": data.get(\"publisher\"),\n        \"dc:description\": data.get(\"annotation\"),\n        \"dc:coverage\": compose(data.get(\"place\"), {\"@xml:lang\": \"cze\"}),\n        \"dc:language\": compose(data.get(\"language\"), {\"@schema\": \"ISO 639-2\"}),\n        \"dcterms:created\": data.get(\"from_year\"),\n        \"dcterms:accrualperiodicity\": compose(\n            data.get(\"periodicity\"),\n            {\"@xml:lang\": \"cze\"}\n        ),\n        \"dc:identifier\": [\n            {\"@rdf:resource\": data[\"url\"]},\n            compose(data.get(\"issn\"), {\"@xsi:type\": \"ISSN\"}),\n            compose(conspect.get(\"mdt\"), {\"@xsi:type\": \"MDT\"}),\n            compose(conspect.get(\"ddc\"), {\"@xsi:type\": \"DDC\"}),\n        ],\n        \"dc:subject\": [\n            compose(conspect.get(\"mdt\"), {\"@xsi:type\": \"dcterms:UDC\"}),\n            compose(conspect.get(\"ddc\"), {\"@xsi:type\": \"dcterms:DDC\"}),\n        ],\n    ]\n\n    def pick_keywords(data, source):\n        \n        return [\n            x[\"zahlavi\"]\n            for x in data.get(source, [])\n            if x.get(\"zahlavi\")\n        ]\n\n    \n    \n    cz_keywords = pick_keywords(data, \"cz_keywords\")\n    en_keywords = pick_keywords(data, \"en_keywords\")\n\n    if cz_keywords:\n        metadata[\"dc:subject\"].append({\n            \"@xml:lang\": \"cz\",\n            \"\n        })\n    if en_keywords:\n        metadata[\"dc:subject\"].append({\n            \"@xml:lang\": \"en\",\n            \"\n        })\n\n    \n    metadata[\"dc:identifier\"] = [x for x in metadata[\"dc:identifier\"] if x]\n    metadata[\"dc:subject\"] = [x for x in metadata[\"dc:subject\"] if x]\n\n    return metadata", "docstring": "Convert metadata from WA-KAT to Dublin core dictionary like structure,\nwhich may be easily converted to xml using :mod:`xmltodict` module.\n\nArgs:\ndata (dict): Nested WA-KAT data. See tests for example.\n\nReturns:\ndict: Dict in dublin core format.", "source": "juraj-google-style"}
{"code": "def _InstallRpm(self, path):\n    \n\n    pid = os.fork()\n    if pid == 0:\n      \n\n      cmd = \"/bin/rpm\"\n      cmd_args = [cmd, \"-U\", \"--replacepkgs\", \"--replacefiles\", path]\n\n      \n      \n      env = os.environ.copy()\n      env.pop(\"LD_LIBRARY_PATH\", None)\n      env.pop(\"PYTHON_PATH\", None)\n\n      \n      os.execve(cmd, cmd_args, env)\n\n    else:\n      \n      \n      \n      time.sleep(1000)", "docstring": "Client update for rpm based distros.\n\nUpgrading rpms is a bit more tricky than upgrading deb packages since there\nis a preinstall script that kills the running GRR daemon and, thus, also\nthe installer process. We need to make sure we detach the child process\nproperly and therefore cannot use client_utils_common.Execute().\n\nArgs:\npath: Path to the .rpm.", "source": "juraj-google-style"}
{"code": "def visit_membership(self, relation: _evaluation.MembershipRelationNode) -> _sql_data_types.Select:\n    lhs_result = self.visit(relation.left)\n    rhs_result = self.visit(relation.right)\n    in_lhs = lhs_result if isinstance(relation, _evaluation.InNode) else rhs_result\n    in_rhs = rhs_result if isinstance(relation, _evaluation.InNode) else lhs_result\n    sql_expr = f'({in_lhs.as_operand()}) IN ({in_rhs.as_operand()})'\n    return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_expr, _sql_data_type=_sql_data_types.Boolean, _sql_alias='mem_'), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK)", "docstring": "Translates a FHIRPath membership relation to Spark SQL.\n\nFor the `IN` relation, the LHS operand is assumed to be a collection of a\nsingle value. For 'CONTAINS', the RHS operand is assumed to be a collection\nof a single value. Equality is handled in the visit_equality function.\n\nArgs:\nrelation: The FHIRPath AST `MembershipRelation` node.\n\nReturns:\nA compiled Spark SQL expression.", "source": "github-repos"}
{"code": "def is_coord_subset(subset, superset, atol=1e-8):\n    \n    c1 = np.array(subset)\n    c2 = np.array(superset)\n    is_close = np.all(np.abs(c1[:, None, :] - c2[None, :, :]) < atol, axis=-1)\n    any_close = np.any(is_close, axis=-1)\n    return np.all(any_close)", "docstring": "Tests if all coords in subset are contained in superset.\nDoesn't use periodic boundary conditions\n\nArgs:\nsubset, superset: List of coords\n\nReturns:\nTrue if all of subset is in superset.", "source": "juraj-google-style"}
{"code": "def pop(self):\n    if not self.layers:\n        raise TypeError('There are no layers in the model.')\n    layer = self._self_tracked_trackables.pop()\n    self._layer_call_argspecs.pop(layer)\n    if not self.layers:\n        self.outputs = None\n        self.inputs = None\n        self.built = False\n        self._inferred_input_shape = None\n        self._has_explicit_input_shape = False\n        self._graph_initialized = False\n    elif self._graph_initialized:\n        self.layers[-1]._outbound_nodes = []\n        self.outputs = [self.layers[-1].output]\n        self._init_graph_network(self.inputs, self.outputs)\n        self.built = True", "docstring": "Removes the last layer in the model.\n\nRaises:\nTypeError: if there are no layers in the model.", "source": "github-repos"}
{"code": "def sg_queue_context(sess=None):\n    r\n\n    \n    sess = tf.get_default_session() if sess is None else sess\n\n    \n    coord = tf.train.Coordinator()\n    try:\n        \n        threads = tf.train.start_queue_runners(sess, coord)\n        yield\n    finally:\n        \n        coord.request_stop()\n        \n        coord.join(threads)", "docstring": "r\"\"\"Context helper for queue routines.\n\nArgs:\nsess: A session to open queues. If not specified, a new session is created.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def from_string(cls, string_input):\n        \n        directives = []\n        tasks = []\n        charge = None\n        spin_multiplicity = None\n        title = None\n        basis_set = None\n        basis_set_option = None\n        theory_directives = {}\n        geom_options = None\n        symmetry_options = None\n        memory_options = None\n        lines = string_input.strip().split(\"\\n\")\n        while len(lines) > 0:\n            l = lines.pop(0).strip()\n            if l == \"\":\n                continue\n\n            toks = l.split()\n            if toks[0].lower() == \"geometry\":\n                geom_options = toks[1:]\n                l = lines.pop(0).strip()\n                toks = l.split()\n                if toks[0].lower() == \"symmetry\":\n                    symmetry_options = toks[1:]\n                    l = lines.pop(0).strip()\n                \n                species = []\n                coords = []\n                while l.lower() != \"end\":\n                    toks = l.split()\n                    species.append(toks[0])\n                    coords.append([float(i) for i in toks[1:]])\n                    l = lines.pop(0).strip()\n                mol = Molecule(species, coords)\n            elif toks[0].lower() == \"charge\":\n                charge = int(toks[1])\n            elif toks[0].lower() == \"title\":\n                title = l[5:].strip().strip(\"\\\"\")\n            elif toks[0].lower() == \"basis\":\n                \n                l = lines.pop(0).strip()\n                basis_set = {}\n                while l.lower() != \"end\":\n                    toks = l.split()\n                    basis_set[toks[0]] = toks[-1].strip(\"\\\"\")\n                    l = lines.pop(0).strip()\n            elif toks[0].lower() in NwTask.theories:\n                \n                if len(toks) > 1:\n                    basis_set_option = toks[1]\n                \n                theory = toks[0].lower()\n                l = lines.pop(0).strip()\n                theory_directives[theory] = {}\n                while l.lower() != \"end\":\n                    toks = l.split()\n                    theory_directives[theory][toks[0]] = toks[-1]\n                    if toks[0] == \"mult\":\n                        spin_multiplicity = float(toks[1])\n                    l = lines.pop(0).strip()\n            elif toks[0].lower() == \"task\":\n                tasks.append(\n                    NwTask(charge=charge,\n                           spin_multiplicity=spin_multiplicity,\n                           title=title, theory=toks[1],\n                           operation=toks[2], basis_set=basis_set,\n                           basis_set_option=basis_set_option,\n                           theory_directives=theory_directives.get(toks[1])))\n            elif toks[0].lower() == \"memory\":\n                    memory_options = ' '.join(toks[1:])\n            else:\n                directives.append(l.strip().split())\n\n        return NwInput(mol, tasks=tasks, directives=directives,\n                       geometry_options=geom_options,\n                       symmetry_options=symmetry_options,\n                       memory_options=memory_options)", "docstring": "Read an NwInput from a string. Currently tested to work with\nfiles generated from this class itself.\n\nArgs:\nstring_input: string_input to parse.\n\nReturns:\nNwInput object", "source": "juraj-google-style"}
{"code": "def discretize(self, data):\n        \n        ret = data.copy()\n        for feature in self.lambdas:\n            if len(data.shape) == 1:\n                ret[feature] = int(self.lambdas[feature](ret[feature]))\n            else:\n                ret[:, feature] = self.lambdas[feature](\n                    ret[:, feature]).astype(int)\n        return ret", "docstring": "Discretizes the data.\nArgs:\ndata: numpy 2d or 1d array\nReturns:\nnumpy array of same dimension, discretized.", "source": "juraj-google-style"}
{"code": "def add_message(self, message_type):\n    \n    name = self.__normalized_name(message_type)\n    if name not in self.__schemas:\n      \n      self.__schemas[name] = None\n      schema = self.__message_to_schema(message_type)\n      self.__schemas[name] = schema\n    return name", "docstring": "Add a new message.\n\nArgs:\nmessage_type: protorpc.message.Message class to be parsed.\n\nReturns:\nstring, The JSON Schema id.\n\nRaises:\nKeyError if the Schema id for this message_type would collide with the\nSchema id of a different message_type that was already added.", "source": "juraj-google-style"}
{"code": "def _CheckStorageMetadata(cls, metadata_values, check_readable_only=False):\n    format_version = metadata_values.get('format_version', None)\n    if (not format_version):\n        raise IOError('Missing format version.')\n    try:\n        format_version = int(format_version, 10)\n    except (TypeError, ValueError):\n        raise IOError('Invalid format version: {0!s}.'.format(format_version))\n    if ((not check_readable_only) and (format_version != cls._FORMAT_VERSION)):\n        raise IOError('Format version: {0:d} is not supported.'.format(format_version))\n    if (format_version < cls._COMPATIBLE_FORMAT_VERSION):\n        raise IOError('Format version: {0:d} is too old and no longer supported.'.format(format_version))\n    if (format_version > cls._FORMAT_VERSION):\n        raise IOError('Format version: {0:d} is too new and not yet supported.'.format(format_version))\n    metadata_values['format_version'] = format_version\n    compression_format = metadata_values.get('compression_format', None)\n    if (compression_format not in definitions.COMPRESSION_FORMATS):\n        raise IOError('Unsupported compression format: {0:s}'.format(compression_format))\n    serialization_format = metadata_values.get('serialization_format', None)\n    if (serialization_format != definitions.SERIALIZER_FORMAT_JSON):\n        raise IOError('Unsupported serialization format: {0:s}'.format(serialization_format))\n    storage_type = metadata_values.get('storage_type', None)\n    if (storage_type not in definitions.STORAGE_TYPES):\n        raise IOError('Unsupported storage type: {0:s}'.format(storage_type))", "docstring": "Checks the storage metadata.\n\nArgs:\nmetadata_values (dict[str, str]): metadata values per key.\ncheck_readable_only (Optional[bool]): whether the store should only be\nchecked to see if it can be read. If False, the store will be checked\nto see if it can be read and written to.\n\nRaises:\nIOError: if the format version or the serializer format is not supported.\nOSError: if the format version or the serializer format is not supported.", "source": "codesearchnet"}
{"code": "def __write_to_hdf5_light(self, filename_out, *args, **kwargs):\n    block_size = 0\n    with h5py.File(filename_out, 'w') as h5:\n        h5.attrs[b'CLASS'] = b'FILTERBANK'\n        h5.attrs[b'VERSION'] = b'1.0'\n        if HAS_BITSHUFFLE:\n            bs_compression = bitshuffle.h5.H5FILTER\n            bs_compression_opts = (block_size, bitshuffle.h5.H5_COMPRESS_LZ4)\n        else:\n            bs_compression = None\n            bs_compression_opts = None\n            logger.warning('Warning: bitshuffle not found. No compression applied.')\n        dset = h5.create_dataset('data', data=self.data, compression=bs_compression, compression_opts=bs_compression_opts)\n        dset_mask = h5.create_dataset('mask', shape=self.file_shape, compression=bs_compression, compression_opts=bs_compression_opts, dtype='uint8')\n        dset.dims[0].label = b'frequency'\n        dset.dims[1].label = b'feed_id'\n        dset.dims[2].label = b'time'\n        dset_mask.dims[0].label = b'frequency'\n        dset_mask.dims[1].label = b'feed_id'\n        dset_mask.dims[2].label = b'time'\n        for (key, value) in self.header.items():\n            dset.attrs[key] = value", "docstring": "Write data to HDF5 file in one go.\n\nArgs:\nfilename_out (str): Name of output file", "source": "codesearchnet"}
{"code": "def set_vocabulary(self, vocabulary, idf_weights=None):\n    if self.output_mode == 'tf_idf':\n        if idf_weights is None:\n            raise ValueError(\"`idf_weights` must be set if output_mode is 'tf_idf'.\")\n    elif idf_weights is not None:\n        raise ValueError(f\"`idf_weights` should only be set if output_mode is `'tf_idf'`. Received: output_mode={self.output_mode} and idf_weights={idf_weights}\")\n    if isinstance(vocabulary, str):\n        if not tf.io.gfile.exists(vocabulary):\n            raise ValueError(f'Vocabulary file {vocabulary} does not exist.')\n        if self.output_mode == 'tf_idf':\n            raise ValueError(\"output_mode `'tf_idf'` does not support loading a vocabulary from file.\")\n        self.lookup_table = self._lookup_table_from_file(vocabulary)\n        self._record_vocabulary_size()\n        return\n    if not tf.executing_eagerly() and (tf.is_tensor(vocabulary) or tf.is_tensor(idf_weights)):\n        raise RuntimeError(f'Cannot set a tensor vocabulary on layer {self.name} when not executing eagerly. Create this layer or call `set_vocabulary()` outside of any traced function.')\n    if tf.is_tensor(vocabulary):\n        vocabulary = self._tensor_vocab_to_numpy(vocabulary)\n    elif isinstance(vocabulary, (list, tuple)):\n        vocabulary = np.array(vocabulary)\n    if tf.is_tensor(idf_weights):\n        idf_weights = idf_weights.numpy()\n    elif isinstance(idf_weights, (list, tuple)):\n        idf_weights = np.array(idf_weights)\n    if vocabulary.size == 0:\n        raise ValueError(f'Cannot set an empty vocabulary. Received: vocabulary={vocabulary}')\n    oov_start = self._oov_start_index()\n    token_start = self._token_start_index()\n    special_tokens = [self.mask_token] * oov_start + [self.oov_token] * self.num_oov_indices\n    found_special_tokens = np.array_equal(special_tokens, vocabulary[:token_start])\n    if found_special_tokens:\n        tokens = vocabulary[token_start:]\n    else:\n        tokens = vocabulary\n    repeated_tokens = self._find_repeated_tokens(tokens)\n    if repeated_tokens:\n        raise ValueError(f'The passed vocabulary has at least one repeated term. Please uniquify your dataset. The repeated terms are: {repeated_tokens}')\n    if self.mask_token is not None and self.mask_token in tokens:\n        mask_index = np.argwhere(vocabulary == self.mask_token)[-1]\n        raise ValueError(f'Found reserved mask token at unexpected location in `vocabulary`. Note that passed `vocabulary` does not need to include the OOV and mask tokens. Either remove all mask and OOV tokens, or include them only at the start of the vocabulary in precisely this order: {special_tokens}. Received: mask_token={self.mask_token} at vocabulary index {mask_index}')\n    if self.oov_token is not None and self.invert and (self.oov_token in tokens):\n        oov_index = np.argwhere(vocabulary == self.oov_token)[-1]\n        raise ValueError(f'Found reserved OOV token at unexpected location in `vocabulary`. Note that passed `vocabulary` does not need to include the OOV and mask tokens. Either remove all mask and OOV tokens, or include them only at the start of the vocabulary in precisely this order: {special_tokens}. Received: oov_token={self.oov_token} at vocabulary index {oov_index}')\n    new_vocab_size = token_start + len(tokens)\n    if self.max_tokens is not None and new_vocab_size > self.max_tokens:\n        raise ValueError(f'Attempted to set a vocabulary larger than the maximum vocab size. Received vocabulary size is {new_vocab_size}; `max_tokens` is {self.max_tokens}.')\n    self.lookup_table = self._lookup_table_from_tokens(tokens)\n    self._record_vocabulary_size()\n    if self.output_mode == 'tf_idf' and idf_weights is not None:\n        if len(vocabulary) != len(idf_weights):\n            raise ValueError(f'`idf_weights` must be the same length as vocabulary. len(idf_weights) is {len(idf_weights)}; len(vocabulary) is {len(vocabulary)}')\n        idf_weights = self._convert_to_ndarray(idf_weights)\n        if idf_weights.ndim != 1:\n            raise ValueError(f'TF-IDF data must be a 1-index array. Received: type(idf_weights)={type(idf_weights)}')\n        if found_special_tokens:\n            front_padding = 0\n            front_padding_value = 0\n        else:\n            front_padding = token_start\n            front_padding_value = np.average(idf_weights)\n        back_padding_value = 0\n        if self.pad_to_max_tokens and self.max_tokens is not None:\n            back_padding = self.max_tokens - front_padding - len(idf_weights)\n        else:\n            back_padding = 0\n        weights = np.pad(idf_weights, (front_padding, back_padding), 'constant', constant_values=(front_padding_value, back_padding_value))\n        weights = tf.convert_to_tensor(weights, dtype=backend.floatx())\n        self.idf_weights = tf.Variable(weights, trainable=False)\n        self.idf_weights_const = self.idf_weights.value()", "docstring": "Sets vocabulary (and optionally document frequency) for this layer.\n\nThis method sets the vocabulary and idf weights for this layer directly,\ninstead of analyzing a dataset through `adapt`. It should be used\nwhenever the vocab (and optionally document frequency) information is\nalready known.  If vocabulary data is already present in the layer, this\nmethod will replace it.\n\nArgs:\nvocabulary: Either an array or a string path to a text file.\nIf passing an array, can pass a tuple, list,\n1D numpy array, or 1D tensor containing the vocbulary terms.\nIf passing a file path, the file should contain one line\nper term in the vocabulary.\nidf_weights: A tuple, list, 1D numpy array, or 1D tensor\nof inverse document frequency weights with equal\nlength to vocabulary. Must be set if `output_mode`\nis `\"tf_idf\"`. Should not be set otherwise.", "source": "github-repos"}
{"code": "def _set_default_attr(self, default_attr):\n        \n        for attr, val in six.iteritems(default_attr):\n            if getattr(self, attr, None) is None:\n                setattr(self, attr, val)", "docstring": "Sets default attributes when None.\n\nArgs:\ndefault_attr: dict. Key-val of attr, default-value.", "source": "juraj-google-style"}
{"code": "def convert(recursive=False, optional_features=None, user_requested=True, conversion_ctx=ag_ctx.NullCtx()):\n\n    def decorator(f):\n        \n\n        def wrapper(*args, **kwargs):\n            \n            options = converter.ConversionOptions(recursive=recursive, user_requested=user_requested, optional_features=optional_features)\n            try:\n                with conversion_ctx:\n                    return converted_call(f, args, kwargs, options=options)\n            except Exception as e:\n                if hasattr(e, 'ag_error_metadata'):\n                    raise e.ag_error_metadata.to_exception(e)\n                else:\n                    raise\n        if inspect.isfunction(f) or inspect.ismethod(f):\n            wrapper = functools.update_wrapper(wrapper, f)\n        decorated_wrapper = tf_decorator.make_decorator(f, wrapper)\n        return autograph_artifact(decorated_wrapper)\n    return decorator", "docstring": "Decorator that compiles a function to use TensorFlow ops.\n\nThe decorator is dynamic - it recompiles the target whenever the decorated\nfunction is called. This means the parameter values are known at conversion.\nIt also means that repeated calls with different types of parameters will be\ncorrectly processed.\n\nArgs:\nrecursive: bool, whether to recursively convert any functions or classes\nthat the converted function may use.\noptional_features: converted.Feature, allows toggling optional or\nexperimental features. When set to None, only the core features are\nenabled.\nuser_requested: bool, whether this is a function that the user explicitly\nasked to be converted. See ConversionOptions.user_requested.\nconversion_ctx: Optional ag_ctx.ControlStatusCtx, the Autograph context in\nwhich `f` is used.\n\nReturns:\nCallable, a decorator that converts the given function into an equivalent\nfunction that uses TensorFlow ops.", "source": "github-repos"}
{"code": "def load_yaml_by_relpath(cls, directories, rel_path, log_debug=False):\n    for d in directories:\n        if (d.startswith(os.path.expanduser('~')) and (not os.path.exists(d))):\n            os.makedirs(d)\n        possible_path = os.path.join(d, rel_path)\n        if os.path.exists(possible_path):\n            loaded = cls.load_yaml_by_path(possible_path, log_debug=log_debug)\n            if (loaded is not None):\n                return (possible_path, cls.load_yaml_by_path(possible_path))\n    return None", "docstring": "Load a yaml file with path that is relative to one of given directories.\n\nArgs:\ndirectories: list of directories to search\nname: relative path of the yaml file to load\nlog_debug: log all messages as debug\nReturns:\ntuple (fullpath, loaded yaml structure) or None if not found", "source": "codesearchnet"}
{"code": "def format_tasks(tasks):\n  \n  return ['%d : %s (%s)' % (task.key.id(),\n                            task.description,\n                            ('done' if task.done\n                             else 'created %s' % task.created))\n          for task in tasks]", "docstring": "Converts a list of tasks to a list of string representations.\n\nArgs:\ntasks: A list of the tasks to convert.\nReturns:\nA list of string formatted tasks.", "source": "juraj-google-style"}
{"code": "def check_panels(adapter, panels, default_panels=None):\n    \n    default_panels = default_panels or []\n    panels_exist = True\n    for panel in default_panels:\n        if panel not in panels:\n            log.warning(\"Default panels have to be defined in panels\")\n            panels_exist = False\n    for panel in panels:\n        if not adapter.gene_panel(panel):\n            log.warning(\"Panel {} does not exist in database\".format(panel))\n            panels_exist = False\n    return panels_exist", "docstring": "Make sure that the gene panels exist in the database\nAlso check if the default panels are defined in gene panels\n\nArgs:\nadapter(MongoAdapter)\npanels(list(str)): A list with panel names\n\nReturns:\npanels_exists(bool)", "source": "juraj-google-style"}
{"code": "def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None=None, encoder_hidden_states: tf.Tensor | None=None, encoder_attention_mask: tf.Tensor | None=None, layer_head_mask: tf.Tensor | None=None, cross_attn_layer_head_mask: tf.Tensor | None=None, past_key_value: Tuple[tf.Tensor] | None=None, training: Optional[bool]=False) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:\n    residual = hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n    hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask)\n    hidden_states = self.dropout(hidden_states, training=training)\n    hidden_states = residual + hidden_states\n    cross_attn_present_key_value = None\n    cross_attn_weights = None\n    if encoder_hidden_states is not None:\n        residual = hidden_states\n        hidden_states = self.encoder_attn_layer_norm(hidden_states)\n        cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n        hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value)\n        hidden_states = self.dropout(hidden_states, training=training)\n        hidden_states = residual + hidden_states\n        present_key_value = present_key_value + cross_attn_present_key_value\n    residual = hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = self.activation_dropout(hidden_states, training=training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = self.dropout(hidden_states, training=training)\n    hidden_states = residual + hidden_states\n    return (hidden_states, self_attn_weights, cross_attn_weights, present_key_value)", "docstring": "Args:\nhidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*\nattention_mask (`tf.Tensor`): attention mask of size\n*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.\nencoder_hidden_states (`tf.Tensor`):\ncross attention input to the layer of shape *(batch, seq_len, embed_dim)*\nencoder_attention_mask (`tf.Tensor`): encoder attention mask of size\n*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.\nlayer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size\n*(decoder_attention_heads,)*\ncross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.\n*(decoder_attention_heads,)*\npast_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states", "source": "github-repos"}
{"code": "def callsign(msg):\n    \n\n    if common.typecode(msg) < 1 or common.typecode(msg) > 4:\n        raise RuntimeError(\"%s: Not a identification message\" % msg)\n\n    chars = '\n    msgbin = common.hex2bin(msg)\n    csbin = msgbin[40:96]\n\n    cs = ''\n    cs += chars[common.bin2int(csbin[0:6])]\n    cs += chars[common.bin2int(csbin[6:12])]\n    cs += chars[common.bin2int(csbin[12:18])]\n    cs += chars[common.bin2int(csbin[18:24])]\n    cs += chars[common.bin2int(csbin[24:30])]\n    cs += chars[common.bin2int(csbin[30:36])]\n    cs += chars[common.bin2int(csbin[36:42])]\n    cs += chars[common.bin2int(csbin[42:48])]\n\n    \n    \n    cs = cs.replace('\n    return cs", "docstring": "Aircraft callsign\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string\n\nReturns:\nstring: callsign", "source": "juraj-google-style"}
{"code": "def save_yaml(dictionary, path, pretty=False, sortkeys=False):\n    \n    \n    if sortkeys:\n        dictionary = dict(dictionary)\n    with open(path, 'w') as f:\n        if pretty:\n            pyaml.dump(dictionary, f)\n        else:\n            yaml.dump(dictionary, f, default_flow_style=None, Dumper=yamlloader.ordereddict.CDumper)", "docstring": "Save dictionary to YAML file preserving order if it is an OrderedDict\n\nArgs:\ndictionary (Dict): Python dictionary to save\npath (str): Path to YAML file\npretty (bool): Whether to pretty print. Defaults to False.\nsortkeys (bool): Whether to sort dictionary keys. Defaults to False.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def pretty_print_counters(counters):\n    totals = collections.defaultdict(int)\n    for (name, val) in counters:\n        prefixes = ([name[:i] for i in xrange(len(name)) if (name[i] == '/')] + [name])\n        for p in prefixes:\n            totals[p] += val\n    parts = []\n    for (name, val) in sorted(six.iteritems(totals)):\n        parts.append(((' ' * name.count('/')) + ('%s: %.3g' % (name, val))))\n    return '\\n'.join(parts)", "docstring": "print counters hierarchically.\n\nEach counter is a pair of a string and a number.\nThe string can have slashes, meaning that the number also counts towards\neach prefix.  e.g.  \"parameters/trainable\" counts towards both \"parameters\"\nand \"parameters/trainable\".\n\nArgs:\ncounters: a list of (string, number) pairs\n\nReturns:\na string", "source": "codesearchnet"}
{"code": "def get_latex_figure_str(fpath_list, caption_str=None, label_str=None,\n                         width_str=r'\\textwidth', height_str=None, nCols=None,\n                         dpath=None, colpos_sep=' ', nlsep='',\n                         use_sublbls=None, use_frame=False):\n    r\n    import utool as ut\n\n    if nCols is None:\n        nCols = len(fpath_list)\n\n    USE_SUBFIGURE = True\n\n    if width_str is not None:\n        colwidth = (1.0 / nCols)\n        if USE_SUBFIGURE:\n            colwidth *= .95\n            graphics_sizestr = ('%.2f' % (colwidth,)) + width_str\n        else:\n            graphics_sizestr = '[width=%.1f%s]' % (colwidth, width_str)\n    elif height_str is not None:\n        graphics_sizestr = '[height=%s]' % (height_str)\n    else:\n        graphics_sizestr =  ''\n\n    if dpath is not None:\n        fpath_list = [ut.relpath_unix(fpath_, dpath) for fpath_ in fpath_list]\n\n    if USE_SUBFIGURE:\n        \n        \n        \n        graphics_list = []\n        sublbl_prefix = label_str if label_str is not None else ''\n        for count, fpath in enumerate(fpath_list):\n            \n            CHRLBLS = True\n            if CHRLBLS:\n                \n                subchar = chr(65 + count)\n            else:\n                subchar = str(count)\n            parts = []\n            subfigure_str = ''\n            if len(fpath_list) > 1:\n                parts.append('\\\\begin{subfigure}[h]{' + graphics_sizestr + '}')\n                parts.append('\\\\centering')\n            graphics_part = '\\\\includegraphics[width=%s]{%s}' % (width_str, fpath,)\n            if use_frame:\n                parts.append('\\\\fbox{%s}' % (graphics_part,))\n            else:\n                parts.append(graphics_part)\n            if use_sublbls is True or use_sublbls is None and len(fpath_list) > 1:\n                parts.append('\\\\caption{}\\\\label{sub:' + sublbl_prefix + subchar + '}')\n            if len(fpath_list) > 1:\n                parts.append('\\\\end{subfigure}')\n            subfigure_str = ''.join(parts)\n            graphics_list.append(subfigure_str)\n    else:\n        if True:\n            graphics_list = [\n                r'\\includegraphics%s{%s}\\captionof{figure}{%s}' % (\n                    graphics_sizestr, fpath, 'fd',\n                    \n                    \n                )\n                for count, fpath in enumerate(fpath_list)]\n        else:\n            graphics_list = [r'\\includegraphics%s{%s}' % (graphics_sizestr, fpath,) for fpath in fpath_list]\n        \n    \n\n    \n    NL = '\\n'\n    if USE_SUBFIGURE:\n        col_spacer_mid = NL + '~~' + '% --' + NL\n        col_spacer_end = NL + r'\\\\' + '% --' + NL\n    else:\n        col_spacer_mid = NL + '&' + NL\n        col_spacer_end = NL + r'\\\\' + nlsep + NL\n    sep_list = [\n        col_spacer_mid  if count % nCols > 0 else col_spacer_end\n        for count in range(1, len(graphics_list) + 1)\n    ]\n    if len(sep_list) > 0:\n        sep_list[-1] = ''\n    graphics_list_ = [graphstr + sep for graphstr, sep in zip(graphics_list, sep_list)]\n\n    \n    graphics_body = ''.join(graphics_list_)\n    header_str = colpos_sep.join(['c'] * nCols)\n\n    if USE_SUBFIGURE:\n        figure_body = graphics_body\n    else:\n        figure_body =  ut.codeblock(\n            r\n        ) % (header_str, graphics_body)\n    if caption_str is not None:\n        \n        if label_str is not None:\n            figure_body += '\\n\\caption[%s]{%s}' % (label_str, caption_str,)\n        else:\n            figure_body += '\\n\\caption{%s}' % (caption_str,)\n    if label_str is not None:\n        figure_body += '\\n\\label{fig:%s}' % (label_str,)\n    \n    \n    \n    figure_fmtstr = ut.codeblock(\n        r\n    )\n    figure_str = figure_fmtstr % (figure_body)\n    return figure_str", "docstring": "r\"\"\"\nArgs:\nfpath_list (list):\ndpath (str): directory relative to main tex file\n\nReturns:\nstr: figure_str\n\nCommandLine:\npython -m utool.util_latex --test-get_latex_figure_str\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_latex import *  # NOQA\n>>> fpath_list = ['figures/foo.png']\n>>> figure_str = get_latex_figure_str(fpath_list)\n>>> result = str(figure_str)\n>>> print(result)", "source": "juraj-google-style"}
{"code": "def send_msg_to_webhook(self, message):\n    payload = {'content': message}\n    header = {'Content-Type': 'application/json'}\n    try:\n        request = requests.post(self.api_url, headers=header, json=payload)\n        request.raise_for_status()\n    except Exception as error_msg:\n        warning_msg = (('EXCEPTION: UNABLE TO COMMIT LOG MESSAGE' + '\\n\\texception={0}'.format(repr(error_msg))) + '\\n\\tmessage={0}'.format(message))\n        warnings.warn(warning_msg, exceptions.WebhookFailedEmitWarning)", "docstring": "separated Requests logic for easier testing\n\nArgs:\nmessage (str): actual logging string to be passed to REST endpoint\n\nTodo:\n* Requests.text/json return for better testing options", "source": "codesearchnet"}
{"code": "def instantiate_resolver(self, name, args):\n        \n        if name not in self._known_resolvers:\n            raise ArgumentError(\"Attempting to instantiate unknown dependency resolver\", name=name)\n\n        return self._known_resolvers[name](args)", "docstring": "Directly instantiate a dependency resolver by name with the given arguments\n\nArgs:\nname (string): The name of the class that we want to instantiate\nargs (dict): The arguments to pass to the resolver factory\n\nReturns:\nDependencyResolver", "source": "juraj-google-style"}
{"code": "def export_node(self, n) -> Dict[str, Union[str, List[str]]]:\n        \n        node_dict = {\n            \"name\": n[0],\n            \"units\": _get_units(n[0]),\n            \"dtype\": _get_dtype(n[0]),\n            \"arguments\": list(self.predecessors(n[0])),\n        }\n\n        if not n[1].get(\"indicators\") is None:\n            for indicator in n[1][\"indicators\"].values():\n                if \"dataset\" in indicator.__dict__:\n                    del indicator.__dict__[\"dataset\"]\n\n            node_dict[\"indicators\"] = [\n                _process_datetime(indicator.__dict__)\n                for indicator in n[1][\"indicators\"].values()\n            ]\n        else:\n            node_dict[\"indicators\"] = None\n\n        return node_dict", "docstring": "Return dict suitable for exporting to JSON.\n\nArgs:\nn: A dict representing the data in a networkx AnalysisGraph node.\n\nReturns:\nThe node dict with additional fields for name, units, dtype, and\narguments.", "source": "juraj-google-style"}
{"code": "def _CreateOutputFileHandles(self, output_type):\n    gzip_filehandle_parent = tempfile.NamedTemporaryFile(suffix=output_type)\n    gzip_filehandle = gzip.GzipFile(gzip_filehandle_parent.name, 'wb', self.GZIP_COMPRESSION_LEVEL, gzip_filehandle_parent)\n    self.temp_output_trackers[output_type] = TempOutputTracker(output_type=output_type, gzip_filehandle=gzip_filehandle, gzip_filehandle_parent=gzip_filehandle_parent)\n    return self.temp_output_trackers[output_type]", "docstring": "Creates a new gzipped output tempfile for the output type.\n\nWe write to JSON data to gzip_filehandle to get compressed data. We hold a\nreference to the original filehandle (gzip_filehandle_parent) so we can pass\nthe gzip data to bigquery.\n\nArgs:\noutput_type: string of export type to be used in filename. e.g.\nExportedFile\n\nReturns:\nA TempOutputTracker object", "source": "codesearchnet"}
{"code": "def __init__(self, match_type=MatchType.OFPMT_OXM, oxm_match_fields=None):\n        \n        super().__init__()\n        self.match_type = match_type\n        self.oxm_match_fields = oxm_match_fields or OxmMatchFields()\n\n        self._update_match_length()", "docstring": "Describe the flow match header structure.\n\nArgs:\nmatch_type (MatchType): One of OFPMT_* (MatchType) items.\nlength (int): Length of Match (excluding padding) followed by\nExactly (length - 4) (possibly 0) bytes containing\nOXM TLVs, then exactly ((length + 7)/8*8 - length)\n(between 0 and 7) bytes of all-zero bytes.\noxm_fields (OxmMatchFields): Sample description.", "source": "juraj-google-style"}
{"code": "class MaskFormerSwinBackbone(MaskFormerSwinPreTrainedModel, BackboneMixin):\n\n    def __init__(self, config: MaskFormerSwinConfig):\n        super().__init__(config)\n        super()._init_backbone(config)\n        self.model = MaskFormerSwinModel(config)\n        if 'stem' in self.out_features:\n            raise ValueError(\"This backbone does not support 'stem' in the `out_features`.\")\n        self.num_features = [config.embed_dim] + [int(config.embed_dim * 2 ** i) for i in range(len(config.depths))]\n        self.hidden_states_norms = nn.ModuleList([nn.LayerNorm(num_channels) for num_channels in self.num_features[1:]])\n        self.post_init()\n\n    def forward(self, pixel_values: Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput:\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        outputs = self.model(pixel_values, output_hidden_states=True, output_attentions=output_attentions, return_dict=True)\n        hidden_states = outputs.hidden_states[1:]\n        spatial_dimensions: Tuple[Tuple[int, int]] = outputs.hidden_states_spatial_dimensions\n        feature_maps = ()\n        for i, (hidden_state, stage, (height, width)) in enumerate(zip(hidden_states, self.stage_names[1:], spatial_dimensions)):\n            norm = self.hidden_states_norms[i]\n            hidden_state_unpolled = hidden_state[-1]\n            hidden_state_norm = norm(hidden_state_unpolled)\n            batch_size, _, hidden_size = hidden_state_norm.shape\n            hidden_state_permuted = hidden_state_norm.permute(0, 2, 1).view((batch_size, hidden_size, height, width)).contiguous()\n            if stage in self.out_features:\n                feature_maps += (hidden_state_permuted,)\n        if not return_dict:\n            output = (feature_maps,)\n            if output_hidden_states:\n                output += (outputs.hidden_states,)\n            if output_attentions:\n                output += (outputs.attentions,)\n            return output\n        return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions)", "docstring": "MaskFormerSwin backbone, designed especially for the MaskFormer framework.\n\nThis classes reshapes `hidden_states` from (`batch_size, sequence_length, hidden_size)` to (`batch_size,\nnum_channels, height, width)`). It also adds additional layernorms after each stage.\n\nArgs:\nconfig (`MaskFormerSwinConfig`):\nThe configuration used by [`MaskFormerSwinModel`].", "source": "github-repos"}
{"code": "def normalize_full_name_false(decl):\n    \n    if decl.cache.normalized_full_name_false is None:\n        decl.cache.normalized_full_name_false = normalize(\n            declaration_utils.full_name(decl, with_defaults=False))\n    return decl.cache.normalized_full_name_false", "docstring": "Cached variant of normalize\n\nArgs:\ndecl (declaration.declaration_t): the declaration\n\nReturns:\nstr: normalized name", "source": "juraj-google-style"}
{"code": "def step(self, actions, step_mul=None):\n    \n    if self._state == environment.StepType.LAST:\n      return self.reset()\n\n    skip = not self._ensure_available_actions\n    self._parallel.run(\n        (c.act, f.transform_action(o.observation, a, skip_available=skip))\n        for c, f, o, a in zip(\n            self._controllers, self._features, self._obs, actions))\n\n    self._state = environment.StepType.MID\n    return self._step(step_mul)", "docstring": "Apply actions, step the world forward, and return observations.\n\nArgs:\nactions: A list of actions meeting the action spec, one per agent.\nstep_mul: If specified, use this rather than the environment's default.\n\nReturns:\nA tuple of TimeStep namedtuples, one per agent.", "source": "juraj-google-style"}
{"code": "def parse_result(line):\n    \n    \n    if line.startswith(\"Problem\"):\n        raise RuntimeError(\"Login credentials seems to be wrong\")\n\n    result = {\n        'p_value': None,\n        'gene_symbols': [],\n        'disease_nr': None,\n        'disease_source': None,\n        'description': None,\n        'raw_line': line\n    }\n    \n    result['raw_line'] = line.rstrip()\n    result_line = line.rstrip().split('\\t')\n    \n    try:\n        result['p_value'] = float(result_line[0])\n    except ValueError:\n        pass\n\n    try:\n        medical_litterature = result_line[2].split(':')\n        result['disease_source'] = medical_litterature[0]\n        result['disease_nr'] = int(medical_litterature[1])\n    except IndexError:\n        pass\n\n    try:\n        description = result_line[3]\n        result['description'] = description\n    except IndexError:\n        pass\n\n    if len(result_line) > 4:\n        for gene_symbol in result_line[4].split(','):\n            result['gene_symbols'].append(gene_symbol.strip())\n\n    return result", "docstring": "Parse the result line of a phenomizer request.\n\nArguments:\nline (str): A raw output line from phenomizer\n\nReturns:\nresult (dict): A dictionary with the phenomizer info:\n{\n'p_value': float,\n'gene_symbols': list(str),\n'disease_nr': int,\n'disease_source': str,\n'description': str,\n'raw_line': str\n}", "source": "juraj-google-style"}
{"code": "def screenshot(self, filename=None):\n        \n        image = self.d.screenshot()\n        if self.rotation:\n            method = getattr(Image, 'ROTATE_{}'.format(self.rotation*90))\n            image = image.transpose(method)\n        if filename:\n            image.save(filename)\n        return image", "docstring": "Take ios screenshot\nArgs:\n- filename(string): optional\nReturns:\nPIL.Image object", "source": "juraj-google-style"}
{"code": "def extend_webfont_settings(webfont_settings):\n    if (not webfont_settings.get('fontdir_path', False)):\n        raise IcomoonSettingsError(\"Webfont settings miss the required key item 'fontdir_path'\")\n    if (not webfont_settings.get('csspart_path', False)):\n        webfont_settings['csspart_path'] = None\n    return webfont_settings", "docstring": "Validate a webfont settings and optionally fill missing ``csspart_path``\noption.\n\nArgs:\nwebfont_settings (dict): Webfont settings (an item value from\n``settings.ICOMOON_WEBFONTS``).\n\nReturns:\ndict: Webfont settings", "source": "codesearchnet"}
{"code": "def _get_break_loop_node(break_node):\n    loop_nodes = (astroid.For, astroid.While)\n    parent = break_node.parent\n    while ((not isinstance(parent, loop_nodes)) or (break_node in getattr(parent, 'orelse', []))):\n        break_node = parent\n        parent = parent.parent\n        if (parent is None):\n            break\n    return parent", "docstring": "Returns the loop node that holds the break node in arguments.\n\nArgs:\nbreak_node (astroid.Break): the break node of interest.\n\nReturns:\nastroid.For or astroid.While: the loop node holding the break node.", "source": "codesearchnet"}
{"code": "def long_id(self, sample):\n        \n        if self.grid == 'WAC':\n            lon = self.CENTER_LONGITUDE + (sample - self.SAMPLE_PROJECTION_OFFSET - 1)\\\n                * self.MAP_SCALE * 1e-3 / (self.A_AXIS_RADIUS * np.cos(self.CENTER_LATITUDE * np.pi / 180.0))\n            return lon * 180 / np.pi\n        else:\n            lon = float(self.CENTER_LONGITUDE) + \\\n                (sample - float(self.SAMPLE_PROJECTION_OFFSET) - 1)\\\n                / float(self.MAP_RESOLUTION)\n            return lon", "docstring": "Return the corresponding longitude\n\nArgs:\nsample (int): sample number on a line\n\nReturns:\nCorreponding longidude in degree", "source": "juraj-google-style"}
{"code": "def add_user_to_template(self, template_id, account_id=None, email_address=None):\n        \n        return self._add_remove_user_template(self.TEMPLATE_ADD_USER_URL, template_id, account_id, email_address)", "docstring": "Gives the specified Account access to the specified Template\n\nArgs:\n\ntemplate_id (str):      The id of the template to give the account access to\n\naccount_id (str):       The id of the account to give access to the template. The account id prevails if both account_id and email_address are provided.\n\nemail_address (str):    The email address of the account to give access to.\n\nReturns:\nA Template object", "source": "juraj-google-style"}
{"code": "def getlines(self, bufnr=None):\n        \n        buf = self._vim.buffers[bufnr] if bufnr else self._vim.current.buffer\n        return buf[:]", "docstring": "Get all lines of a buffer as a list.\n\nArgs:\nbufnr (Optional[int]): A Vim buffer number, current if ``None``.\n\nReturns:\nList[str]", "source": "juraj-google-style"}
{"code": "def convert_segmentation_to_rle(segmentation):\n    segment_ids = torch.unique(segmentation)\n    run_length_encodings = []\n    for idx in segment_ids:\n        mask = torch.where(segmentation == idx, 1, 0)\n        rle = binary_mask_to_rle(mask)\n        run_length_encodings.append(rle)\n    return run_length_encodings", "docstring": "Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format.\n\nArgs:\nsegmentation (`torch.Tensor` or `numpy.array`):\nA segmentation map of shape `(height, width)` where each value denotes a segment or class id.\nReturns:\n`List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id.", "source": "github-repos"}
{"code": "def properties(self):\n    props = {}\n    for line in self.adb_shell(['getprop']).splitlines():\n        m = _PROP_PATTERN.match(line)\n        if m:\n            props[m.group('key')] = m.group('value')\n    return props", "docstring": "Android Properties, extracted from `adb shell getprop`\n\nReturns:\ndict of props, for\nexample:\n\n{'ro.bluetooth.dun': 'true'}", "source": "codesearchnet"}
{"code": "def _get_audios_and_audio_lengths(self, audios: AudioInput) -> Sequence['torch.Tensor', Sequence[int]]:\n    requires_backends(self, ['torch'])\n    if isinstance(audios, np.ndarray):\n        audios = torch.from_numpy(audios)\n    elif isinstance(audios, Sequence) and isinstance(audios[0], np.ndarray):\n        audios = [torch.from_numpy(arr) for arr in audios]\n    if isinstance(audios, torch.Tensor):\n        if audios.ndim == 1:\n            audios = audios.unsqueeze(0)\n        if not torch.is_floating_point(audios):\n            raise ValueError('Invalid audio provided. Audio should be a floating point between 0 and 1')\n        if audios.shape[0] > 1:\n            logger.warning('Audio samples are already collated; assuming they all have the same length')\n        lengths = [audios.shape[-1]] * audios.shape[0]\n        return (audios, lengths)\n    elif isinstance(audios, Sequence) and isinstance(audios[0], torch.Tensor):\n        if not torch.is_floating_point(audios[0]):\n            raise ValueError('Invalid audio provided. Audio should be a floating point between 0 and 1')\n        lengths = [audio.shape[-1] for audio in audios]\n        padding = [max(lengths) - length for length in lengths]\n        audios = [audio.view(1, -1) for audio in audios]\n        padded = [torch.nn.functional.pad(audio, (0, pad)) for audio, pad in zip(audios, padding)]\n        audios = torch.cat(padded, dim=0)\n        return (audios, lengths)\n    raise TypeError('Invalid audio provided. Audio should be a one or more torch tensors or numpy arrays')", "docstring": "Coerces audio inputs to torch tensors and extracts audio lengths prior to stacking.\n\nArgs:\naudios (`AudioInput`):\nAudio sequence, numpy array, or torch tensor.", "source": "github-repos"}
{"code": "def get_neighbors(self, site, r):\n        \n        nn = self.get_sites_in_sphere(site.coords, r)\n        return [(s, dist) for (s, dist) in nn if site != s]", "docstring": "Get all neighbors to a site within a sphere of radius r.  Excludes the\nsite itself.\n\nArgs:\nsite (Site): Site at the center of the sphere.\nr (float): Radius of sphere.\n\nReturns:\n[(site, dist) ...] since most of the time, subsequent processing\nrequires the distance.", "source": "juraj-google-style"}
{"code": "def get_message(routing_key, properties, body):\n    if (properties.headers is None):\n        _log.error('Message (body=%r) arrived without headers. A publisher is misbehaving!', body)\n        properties.headers = {}\n    try:\n        MessageClass = get_class(properties.headers['fedora_messaging_schema'])\n    except KeyError:\n        _log.error('Message (headers=%r, body=%r) arrived without a schema header. A publisher is misbehaving!', properties.headers, body)\n        MessageClass = Message\n    try:\n        severity = properties.headers['fedora_messaging_severity']\n    except KeyError:\n        _log.error('Message (headers=%r, body=%r) arrived without a severity. A publisher is misbehaving! Defaulting to INFO.', properties.headers, body)\n        severity = INFO\n    if (properties.content_encoding is None):\n        _log.error('Message arrived without a content encoding')\n        properties.content_encoding = 'utf-8'\n    try:\n        body = body.decode(properties.content_encoding)\n    except UnicodeDecodeError as e:\n        _log.error('Unable to decode message body %r with %s content encoding', body, properties.content_encoding)\n        raise ValidationError(e)\n    try:\n        body = json.loads(body)\n    except ValueError as e:\n        _log.error('Failed to load message body %r, %r', body, e)\n        raise ValidationError(e)\n    message = MessageClass(body=body, topic=routing_key, properties=properties, severity=severity)\n    try:\n        message.validate()\n        _log.debug('Successfully validated message %r', message)\n    except jsonschema.exceptions.ValidationError as e:\n        _log.error('Message validation of %r failed: %r', message, e)\n        raise ValidationError(e)\n    return message", "docstring": "Construct a Message instance given the routing key, the properties and the\nbody received from the AMQP broker.\n\nArgs:\nrouting_key (str): The AMQP routing key (will become the message topic)\nproperties (pika.BasicProperties): the AMQP properties\nbody (bytes): The encoded message body\n\nRaises:\nValidationError: If Message validation failed or message body\ndocoding/loading is impossible.", "source": "codesearchnet"}
{"code": "def Value(self, p):\n        \n        if p < 0 or p > 1:\n            raise ValueError('Probability p must be in range [0, 1]')\n\n        if p == 0: return self.xs[0]\n        if p == 1: return self.xs[-1]\n        index = bisect.bisect(self.ps, p)\n        if p == self.ps[index - 1]:\n            return self.xs[index - 1]\n        else:\n            return self.xs[index]", "docstring": "Returns InverseCDF(p), the value that corresponds to probability p.\n\nArgs:\np: number in the range [0, 1]\n\nReturns:\nnumber value", "source": "juraj-google-style"}
{"code": "def compose(f, *fs):\n    rfs = list(chain([f], fs))\n    rfs.reverse()\n\n    def composed(*args, **kwargs):\n        return reduce((lambda result, fn: fn(result)), rfs[1:], rfs[0](*args, **kwargs))\n    return composed", "docstring": "Compose functions right to left.\n\ncompose(f, g, h)(x) -> f(g(h(x)))\n\nArgs:\nf, *fs: The head and rest of a sequence of callables. The\nrightmost function passed can accept any arguments and\nthe returned function will have the same signature as\nthis last provided function.  All preceding functions\nmust be unary.\n\nReturns:\nThe composition of the argument functions. The returned\nfunction will accept the same arguments as the rightmost\npassed in function.", "source": "codesearchnet"}
{"code": "def change_kernel(self, kernel, return_dict=True):\n    if (type(kernel) != Kernel):\n        raise BadKernelObject('Use Kernel object')\n    return self._perform_action({'type': 'change_kernel', 'kernel': kernel.id}, return_dict)", "docstring": "Change the kernel to a new one\n\nArgs:\nkernel : instance of digitalocean.Kernel.Kernel\n\nOptional Args:\nreturn_dict (bool): Return a dict when True (default),\notherwise return an Action.\n\nReturns dict or Action", "source": "codesearchnet"}
{"code": "def disable(self):\n    self.client.api.disable_plugin(self.name)\n    self.reload()", "docstring": "Disable the plugin.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def has_all_finite_radius_neurites(data_wrapper, threshold=0.0):\n    db = data_wrapper.data_block\n    neurite_ids = np.in1d(db[(:, COLS.TYPE)], POINT_TYPE.NEURITES)\n    zero_radius_ids = (db[(:, COLS.R)] <= threshold)\n    bad_pts = np.array(db[(neurite_ids & zero_radius_ids)][(:, COLS.ID)], dtype=int).tolist()\n    return CheckResult((len(bad_pts) == 0), bad_pts)", "docstring": "Check that all points with neurite type have a finite radius\n\nReturns:\nCheckResult with result and list of IDs of neurite points with zero radius", "source": "codesearchnet"}
{"code": "def build(X_df=None, y_df=None):\n    \n    if X_df is None:\n        X_df, _ = load_data()\n    if y_df is None:\n        _, y_df = load_data()\n\n    features = get_contrib_features()\n    mapper_X = ballet.feature.make_mapper(features)\n    X = mapper_X.fit_transform(X_df)\n\n    encoder_y = get_target_encoder()\n    y = encoder_y.fit_transform(y_df)\n\n    return {\n        'X_df': X_df,\n        'features': features,\n        'mapper_X': mapper_X,\n        'X': X,\n        'y_df': y_df,\n        'encoder_y': encoder_y,\n        'y': y,\n    }", "docstring": "Build features and target\n\nArgs:\nX_df (DataFrame): raw variables\ny_df (DataFrame): raw target\n\nReturns:\ndict with keys X_df, features, mapper_X, X, y_df, encoder_y, y", "source": "juraj-google-style"}
{"code": "def find1(self, kw: YangIdentifier, arg: str = None,\n              pref: YangIdentifier = None,\n              required: bool = False) -> Optional[\"Statement\"]:\n        \n        for sub in self.substatements:\n            if (sub.keyword == kw and sub.prefix == pref and\n                    (arg is None or sub.argument == arg)):\n                return sub\n        if required:\n            raise StatementNotFound(str(self), kw)", "docstring": "Return first substatement with the given parameters.\n\nArgs:\nkw: Statement keyword (local part for extensions).\narg: Argument (all arguments will match if ``None``).\npref: Keyword prefix (``None`` for built-in statements).\nrequired: Should an exception be raised on failure?\n\nRaises:\nStatementNotFound: If `required` is ``True`` and the\nstatement is not found.", "source": "juraj-google-style"}
{"code": "def _validate_first_message(cls, msg):\n    data = cls._unpack_message(msg)\n    logger.debug(data)\n    if (data != cls.RTM_HANDSHAKE):\n        raise SlackApiError('Unexpected response: {!r}'.format(data))\n    logger.info('Joined real-time messaging.')", "docstring": "Check the first message matches the expected handshake.\n\nNote:\nThe handshake is provided as :py:attr:`RTM_HANDSHAKE`.\n\nArguments:\nmsg (:py:class:`aiohttp.Message`): The message to validate.\n\nRaises:\n:py:class:`SlackApiError`: If the data doesn't match the\nexpected handshake.", "source": "codesearchnet"}
{"code": "def attribute(self, attr_type, attr_value, displayed=False, source=None, unique=True, formatter=None):\n    attr = Attribute(attr_type, attr_value, displayed, source, formatter)\n    if (unique == 'Type'):\n        for attribute_data in self._attributes:\n            if (attribute_data.type == attr_type):\n                attr = attribute_data\n                break\n        else:\n            self._attributes.append(attr)\n    elif (unique is True):\n        for attribute_data in self._attributes:\n            if ((attribute_data.type == attr_type) and (attribute_data.value == attr.value)):\n                attr = attribute_data\n                break\n        else:\n            self._attributes.append(attr)\n    elif (unique is False):\n        self._attributes.append(attr)\n    return attr", "docstring": "Return instance of Attribute\n\nunique:\n* False - Attribute type:value can be duplicated.\n* Type - Attribute type has to be unique (e.g., only 1 Description Attribute).\n* True - Attribute type:value combo must be unique.\n\nArgs:\nattr_type (str): The ThreatConnect defined attribute type.\nattr_value (str): The value for this attribute.\ndisplayed (bool, default:false): If True the supported attribute will be marked for\ndisplay.\nsource (str, optional): The source value for this attribute.\nunique (bool|string, optional): Control attribute creation.\nformatter (method, optional): A method that takes a single attribute value and returns a\nsingle formatted value.\n\nReturns:\nobj: An instance of Attribute.", "source": "codesearchnet"}
{"code": "def DownloadDir(aff4_path, output_dir, bufsize=8192, preserve_path=True):\n  \n  if not os.path.isdir(output_dir):\n    os.makedirs(output_dir)\n  fd = aff4.FACTORY.Open(aff4_path)\n  for child in fd.OpenChildren():\n    if preserve_path:\n      \n      full_dir = utils.JoinPath(output_dir, child.urn.Path())\n      full_dir = os.path.dirname(full_dir)\n      if not os.path.isdir(full_dir):\n        os.makedirs(full_dir)\n      outfile = os.path.join(full_dir, child.urn.Basename())\n    else:\n      outfile = os.path.join(output_dir, child.urn.Basename())\n    logging.info(u\"Downloading %s to %s\", child.urn, outfile)\n    with open(outfile, \"wb\") as out_fd:\n      try:\n        buf = child.Read(bufsize)\n        while buf:\n          out_fd.write(buf)\n          buf = child.Read(bufsize)\n      except IOError as e:\n        logging.error(\"Failed to read %s. Err: %s\", child.urn, e)", "docstring": "Take an aff4 path and download all files in it to output_dir.\n\nArgs:\naff4_path: Any aff4 path as a string\noutput_dir: A local directory to write to, will be created if not there.\nbufsize: Buffer size to use.\npreserve_path: If set all paths will be created.  Note that this works for\ncollections as well. It will download all files in the collection.  This\nonly downloads files that are already in the datastore, it doesn't queue\nanything on the client.", "source": "juraj-google-style"}
{"code": "def get_enterprise_user_id(self, obj):\n    enterprise_learner = EnterpriseCustomerUser.objects.filter(user_id=obj.id).first()\n    return (enterprise_learner and enterprise_learner.id)", "docstring": "Get enterprise user id from user object.\n\nArguments:\nobj (User): Django User object\n\nReturns:\n(int): Primary Key identifier for enterprise user object.", "source": "codesearchnet"}
{"code": "def get(issue_id, issue_type_id):\n        \n        return db.Issue.find_one(\n            Issue.issue_id == issue_id,\n            Issue.issue_type_id == issue_type_id\n        )", "docstring": "Return issue by ID\n\nArgs:\nissue_id (str): Unique Issue identifier\nissue_type_id (str): Type of issue to get\n\nReturns:\n:obj:`Issue`: Returns Issue object if found, else None", "source": "juraj-google-style"}
{"code": "def which(self, cmd, parent_environ=None, fallback=False):\n    env = self.get_environ(parent_environ=parent_environ)\n    path = which(cmd, env=env)\n    if (fallback and (path is None)):\n        path = which(cmd)\n    return path", "docstring": "Find a program in the resolved environment.\n\nArgs:\ncmd: String name of the program to find.\nparent_environ: Environment to interpret the context within,\ndefaults to os.environ if None.\nfallback: If True, and the program is not found in the context,\nthe current environment will then be searched.\n\nReturns:\nPath to the program, or None if the program was not found.", "source": "codesearchnet"}
{"code": "def bytes_to_readable_str(num_bytes, include_b=False):\n  \n\n  if num_bytes is None:\n    return str(num_bytes)\n  if num_bytes < 1024:\n    result = \"%d\" % num_bytes\n  elif num_bytes < 1048576:\n    result = \"%.2fk\" % (num_bytes / float(1 << 10))\n  elif num_bytes < 1073741824:\n    result = \"%.2fM\" % (num_bytes / float(1 << 20))\n  else:\n    result = \"%.2fG\" % (num_bytes / float(1 << 30))\n\n  if include_b:\n    result += \"B\"\n  return result", "docstring": "Generate a human-readable string representing number of bytes.\n\nThe units B, kB, MB and GB are used.\n\nArgs:\nnum_bytes: (`int` or None) Number of bytes.\ninclude_b: (`bool`) Include the letter B at the end of the unit.\n\nReturns:\n(`str`) A string representing the number of bytes in a human-readable way,\nincluding a unit at the end.", "source": "juraj-google-style"}
{"code": "def pack(value, nbits=None):\n    if (nbits is None):\n        nbits = (pack_size(value) * BITS_PER_BYTE)\n    elif (nbits <= 0):\n        raise ValueError('Given number of bits must be greater than 0.')\n    buf_size = int(math.ceil((nbits / float(BITS_PER_BYTE))))\n    buf = (ctypes.c_uint8 * buf_size)()\n    for (idx, _) in enumerate(buf):\n        buf[idx] = ((value >> (idx * BITS_PER_BYTE)) & 255)\n    return buf", "docstring": "Packs a given value into an array of 8-bit unsigned integers.\n\nIf ``nbits`` is not present, calculates the minimal number of bits required\nto represent the given ``value``.  The result is little endian.\n\nArgs:\nvalue (int): the integer value to pack\nnbits (int): optional number of bits to use to represent the value\n\nReturns:\nAn array of ``ctypes.c_uint8`` representing the packed ``value``.\n\nRaises:\nValueError: if ``value < 0`` and ``nbits`` is ``None`` or ``nbits <= 0``.\nTypeError: if ``nbits`` or ``value`` are not numbers.", "source": "codesearchnet"}
{"code": "def _ReadPropertySet(self, property_set):\n    for property_section in property_set.sections:\n        if (property_section.class_identifier != self._CLASS_IDENTIFIER):\n            continue\n        for property_value in property_section.properties:\n            property_name = self._PROPERTY_NAMES.get(property_value.identifier, None)\n            if (not property_name):\n                property_name = '0x{0:04}'.format(property_value.identifier)\n            value = self._GetValueAsObject(property_value)\n            if self._PROPERTY_VALUE_MAPPINGS:\n                value_callback_name = self._PROPERTY_VALUE_MAPPINGS.get(property_name, None)\n                if value_callback_name:\n                    value_callback_method = getattr(self, value_callback_name, None)\n                    if value_callback_method:\n                        value = value_callback_method(value)\n            if (property_name in self._DATE_TIME_PROPERTIES):\n                properties_dict = self.date_time_properties\n                value = dfdatetime_filetime.Filetime(timestamp=value)\n            else:\n                properties_dict = self._properties\n            if (property_name not in properties_dict):\n                properties_dict[property_name] = value", "docstring": "Reads properties from a property set.\n\nArgs:\nproperty_set (pyolecf.property_set): OLECF property set.", "source": "codesearchnet"}
{"code": "def _to_backend_layout(tensor_layout):\n    if tensor_layout.device_mesh is None:\n        raise ValueError('Cannot create sharding when device mesh is not set for TensorLayout.')\n    partition_spec = jax.sharding.PartitionSpec(*tensor_layout.axes)\n    jax_mesh = tensor_layout.device_mesh.backend_mesh\n    return jax.sharding.NamedSharding(jax_mesh, partition_spec)", "docstring": "Convert the TensorLayout to JAX backend specific Sharding.\n\nArgs:\ntensor_layout: TensorLayout instance to convert.\n\nReturns:\nA `jax.sharding.NamedSharding` instance.", "source": "github-repos"}
{"code": "def _GetScanner(self, specification_store, signature_identifiers):\n    \n    if not specification_store:\n      return None\n\n    scanner_object = pysigscan.scanner()\n\n    for format_specification in specification_store.specifications:\n      if format_specification.identifier not in signature_identifiers:\n        continue\n\n      for signature in format_specification.signatures:\n        pattern_offset = signature.offset\n        if pattern_offset is None:\n          signature_flags = pysigscan.signature_flags.NO_OFFSET\n        elif pattern_offset < 0:\n          pattern_offset *= -1\n          signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END\n        else:\n          signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START\n\n        scanner_object.add_signature(\n            signature.identifier, pattern_offset, signature.pattern,\n            signature_flags)\n\n      self._signature_identifiers.append(format_specification.identifier)\n\n    return scanner_object", "docstring": "Initializes the scanner form the specification store.\n\nArgs:\nspecification_store (FormatSpecificationStore): a specification store.\nsignature_identifiers (list[str]): signature identifiers.\n\nReturns:\npysigscan.scanner: signature scanner or None.", "source": "juraj-google-style"}
{"code": "def ReadArtifactDefinitionValues(self, artifact_definition_values):\n    \n    if not artifact_definition_values:\n      raise errors.FormatError('Missing artifact definition values.')\n\n    different_keys = (\n        set(artifact_definition_values) - definitions.TOP_LEVEL_KEYS)\n    if different_keys:\n      different_keys = ', '.join(different_keys)\n      raise errors.FormatError('Undefined keys: {0:s}'.format(different_keys))\n\n    name = artifact_definition_values.get('name', None)\n    if not name:\n      raise errors.FormatError('Invalid artifact definition missing name.')\n\n    \n    description = artifact_definition_values.get('doc', None)\n    if not description:\n      raise errors.FormatError(\n          'Invalid artifact definition: {0:s} missing description.'.format(\n              name))\n\n    artifact_definition = artifact.ArtifactDefinition(\n        name, description=description)\n\n    if artifact_definition_values.get('collectors', []):\n      raise errors.FormatError(\n          'Invalid artifact definition: {0:s} still uses collectors.'.format(\n              name))\n\n    urls = artifact_definition_values.get('urls', [])\n    if not isinstance(urls, list):\n      raise errors.FormatError(\n          'Invalid artifact definition: {0:s} urls is not a list.'.format(\n              name))\n\n    \n    artifact_definition.conditions = artifact_definition_values.get(\n        'conditions', [])\n    artifact_definition.provides = artifact_definition_values.get(\n        'provides', [])\n    self._ReadLabels(artifact_definition_values, artifact_definition, name)\n    self._ReadSupportedOS(artifact_definition_values, artifact_definition, name)\n    artifact_definition.urls = urls\n    self._ReadSources(artifact_definition_values, artifact_definition, name)\n\n    return artifact_definition", "docstring": "Reads an artifact definition from a dictionary.\n\nArgs:\nartifact_definition_values (dict[str, object]): artifact definition\nvalues.\n\nReturns:\nArtifactDefinition: an artifact definition.\n\nRaises:\nFormatError: if the format of the artifact definition is not set\nor incorrect.", "source": "juraj-google-style"}
{"code": "def update_fitness(objective_function, particle):\n    \n    fitness = objective_function(particle.position)\n    best_fitness = particle.best_fitness\n    cmp = comparator(fitness)\n    if best_fitness is None or cmp(fitness, best_fitness):\n        best_position = particle.position\n        return particle._replace(fitness=fitness,\n                                 best_fitness=fitness,\n                                 best_position=best_position)\n    else:\n        return particle._replace(fitness=fitness)", "docstring": "Calculates and updates the fitness and best_fitness of a particle.\n\nFitness is calculated using the 'problem.fitness' function.\n\nArgs:\nproblem: The optimization problem encapsulating the fitness function\nand optimization type.\nparticle: cipy.algorithms.pso.Particle: Particle to update the fitness\nfor.\n\nReturns:\ncipy.algorithms.pso.Particle: A new particle with the updated fitness.", "source": "juraj-google-style"}
{"code": "def delete_storage_account(access_token, subscription_id, rgname, account_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourcegroups/', rgname,\n                        '/providers/Microsoft.Storage/storageAccounts/', account_name,\n                        '?api-version=', STORAGE_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete a storage account in the specified resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\naccount_name (str): Name of the new storage account.\n\nReturns:\nHTTP response.", "source": "juraj-google-style"}
{"code": "def validate_split(query):\n    if query.order:\n        raise SplitNotPossibleError('Query cannot have any sort orders.')\n    if query.limit is not None:\n        raise SplitNotPossibleError('Query cannot have a limit set.')\n    for filter in query.filters:\n        if isinstance(filter[1], ValueProvider):\n            filter_operator = filter[1].get()\n        else:\n            filter_operator = filter[1]\n        if filter_operator in ['<', '<=', '>', '>=']:\n            raise SplitNotPossibleError('Query cannot have any inequality filters.')", "docstring": "Verifies that the given query can be properly scattered.\n\nNote that equality and ancestor filters are allowed, however they may result\nin inefficient sharding.\n\nRaises:\nQuerySplitterError if split could not be performed owing to query\nparameters.", "source": "github-repos"}
{"code": "def FormatTree(tree, style_config=None, lines=None):\n    style.SetGlobalStyle(style.CreateStyleFromConfig(style_config))\n    comment_splicer.SpliceComments(tree)\n    continuation_splicer.SpliceContinuations(tree)\n    subtype_assigner.AssignSubtypes(tree)\n    identify_container.IdentifyContainers(tree)\n    split_penalty.ComputeSplitPenalties(tree)\n    blank_line_calculator.CalculateBlankLines(tree)\n    llines = pytree_unwrapper.UnwrapPyTree(tree)\n    for lline in llines:\n        lline.CalculateFormattingInformation()\n    lines = _LineRangesToSet(lines)\n    _MarkLinesToFormat(llines, lines)\n    return reformatter.Reformat(_SplitSemicolons(llines), lines)", "docstring": "Format a parsed lib2to3 pytree.\n\nThis provides an alternative entry point to YAPF.\n\nArguments:\ntree: (pytree.Node) The root of the pytree to format.\nstyle_config: (string) Either a style name or a path to a file that contains\nformatting style settings. If None is specified, use the default style\nas set in style.DEFAULT_STYLE_FACTORY\nlines: (list of tuples of integers) A list of tuples of lines, [start, end],\nthat we want to format. The lines are 1-based indexed. It can be used by\nthird-party code (e.g., IDEs) when reformatting a snippet of code rather\nthan a whole file.\n\nReturns:\nThe source formatted according to the given formatting style.", "source": "github-repos"}
{"code": "def merkleroot(hashes):\n    \n    \n    \n    \n    \n    \n    if not hashes:\n        return sha3_256(b'').hexdigest()\n    \n    if len(hashes) == 1:\n        return hexlify(hashes[0]).decode()\n    if len(hashes) % 2 == 1:\n        hashes.append(hashes[-1])\n    parent_hashes = [\n        sha3_256(hashes[i] + hashes[i+1]).digest()\n        for i in range(0, len(hashes)-1, 2)\n    ]\n    return merkleroot(parent_hashes)", "docstring": "Computes the merkle root for a given list.\n\nArgs:\nhashes (:obj:`list` of :obj:`bytes`): The leaves of the tree.\n\nReturns:\nstr: Merkle root in hexadecimal form.", "source": "juraj-google-style"}
{"code": "def resnet18(pretrained=False, **kwargs):\n    model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n    if pretrained:\n        model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n    return model", "docstring": "Constructs a ResNet-18 model.\n\nArgs:\npretrained (bool): If True, returns a model pre-trained on ImageNet", "source": "codesearchnet"}
{"code": "def source_lines(self, host_name, file_path):\n    offset = self._host_name_file_path_to_offset[host_name, file_path]\n    return list(self._reader.read_source_files_event(offset).source_file.lines)", "docstring": "Read the line-by-line content of a source file.\n\nArgs:\nhost_name: Host name on which the source file is located.\nfile_path: File path at which the source file is located.\n\nReturns:\nLines of the source file as a `list` of `str`s.", "source": "github-repos"}
{"code": "def update_acmg(self, institute_obj, case_obj, user_obj, link, variant_obj, acmg_str):\n    self.create_event(institute=institute_obj, case=case_obj, user=user_obj, link=link, category='variant', verb='acmg', variant=variant_obj, subject=variant_obj['display_name'])\n    LOG.info('Setting ACMG to {} for: {}'.format(acmg_str, variant_obj['display_name']))\n    if (acmg_str is None):\n        updated_variant = self.variant_collection.find_one_and_update({'_id': variant_obj['_id']}, {'$unset': {'acmg_classification': 1}}, return_document=pymongo.ReturnDocument.AFTER)\n    else:\n        updated_variant = self.variant_collection.find_one_and_update({'_id': variant_obj['_id']}, {'$set': {'acmg_classification': REV_ACMG_MAP[acmg_str]}}, return_document=pymongo.ReturnDocument.AFTER)\n    LOG.debug('Variant updated')\n    return updated_variant", "docstring": "Create an event for updating the ACMG classification of a variant.\n\nArguments:\ninstitute_obj (dict): A Institute object\ncase_obj (dict): Case object\nuser_obj (dict): A User object\nlink (str): The url to be used in the event\nvariant_obj (dict): A variant object\nacmg_str (str): The new ACMG classification string\n\nReturns:\nupdated_variant", "source": "codesearchnet"}
{"code": "def _wrap_and_check_outputs(self, outputs, single_output_default_name, error_label=None):\n    if not isinstance(outputs, dict):\n        outputs = {single_output_default_name: outputs}\n    output_dict = {}\n    for key, value in outputs.items():\n        error_name = error_label or single_output_default_name\n        key = self._check_output_key(key, error_name)\n        if not isinstance(value, tensor.Tensor):\n            raise ValueError('{} output value must be a Tensor; got {}.'.format(error_name, value))\n        output_dict[key] = value\n    return output_dict", "docstring": "Wraps raw tensors as dicts and checks type.\n\nNote that we create a new dict here so that we can overwrite the keys\nif necessary.\n\nArgs:\noutputs: A `Tensor` or a dict of string to `Tensor`.\nsingle_output_default_name: A string key for use in the output dict\nif the provided `outputs` is a raw tensor.\nerror_label: descriptive string for use in error messages. If none,\nsingle_output_default_name will be used.\n\nReturns:\nA dict of tensors\n\nRaises:\nValueError: if the outputs dict keys are not strings or tuples of strings\nor the values are not Tensors.", "source": "github-repos"}
{"code": "def traverse_inorder(self, leaves=True, internal=True):\n        \n        c = self; s = deque(); done = False\n        while not done:\n            if c is None:\n                if len(s) == 0:\n                    done = True\n                else:\n                    c = s.pop()\n                    if (leaves and c.is_leaf()) or (internal and not c.is_leaf()):\n                        yield c\n                    if len(c.children) == 0:\n                        c = None\n                    elif len(c.children) == 2:\n                        c = c.children[1]\n                    else:\n                        raise RuntimeError(INORDER_NONBINARY)\n            else:\n                s.append(c)\n                if len(c.children) == 0:\n                    c = None\n                elif len(c.children) == 2:\n                    c = c.children[0]\n                else:\n                    raise RuntimeError(INORDER_NONBINARY)", "docstring": "Perform an inorder traversal starting at this ``Node`` object\n\nArgs:\n``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``", "source": "juraj-google-style"}
{"code": "def find_module_id_defining_flag(self, flagname, default=None):\n    \n    registered_flag = self._flags().get(flagname)\n    if registered_flag is None:\n      return default\n    for module_id, flags in six.iteritems(self.flags_by_module_id_dict()):\n      for flag in flags:\n        \n        \n        \n        if (flag.name == registered_flag.name and\n            flag.short_name == registered_flag.short_name):\n          return module_id\n    return default", "docstring": "Return the ID of the module defining this flag, or default.\n\nArgs:\nflagname: str, name of the flag to lookup.\ndefault: Value to return if flagname is not defined. Defaults\nto None.\n\nReturns:\nThe ID of the module which registered the flag with this name.\nIf no such module exists (i.e. no flag with this name exists),\nwe return default.", "source": "juraj-google-style"}
{"code": "def exec_python(attr, src, executable=\"python\"):\n    \n    import subprocess\n\n    if isinstance(src, basestring):\n        src = [src]\n\n    p = popen([executable, \"-c\", \"; \".join(src)],\n              stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n    out, err = p.communicate()\n\n    if p.returncode:\n        from rez.exceptions import InvalidPackageError\n        raise InvalidPackageError(\n            \"Error determining package attribute '%s':\\n%s\" % (attr, err))\n\n    return out.strip()", "docstring": "Runs a python subproc to calculate a package attribute.\n\nArgs:\nattr (str): Name of package attribute being created.\nsrc (list of str): Python code to execute, will be converted into\nsemicolon-delimited single line of code.\n\nReturns:\nstr: Output of python process.", "source": "juraj-google-style"}
{"code": "class RunScoreAndLearn(beam.PTransform[beam.PCollection[NestedKeyedInputT], beam.PCollection[NestedKeyedOutputT]]):\n\n    def __init__(self, detector: AnomalyDetector):\n        self._detector = detector\n\n    def expand(self, input: beam.PCollection[NestedKeyedInputT]) -> beam.PCollection[NestedKeyedOutputT]:\n        return input | beam.ParDo(_ScoreAndLearnDoFn(self._detector.to_spec()))", "docstring": "Applies the _ScoreAndLearnDoFn to a PCollection of data.\n\nThis PTransform scores and learns from data points using an anomaly\ndetection model.\n\nArgs:\ndetector: The anomaly detection model to use.", "source": "github-repos"}
{"code": "def _attempt_shard_retry(self, shard_state, tstate):\n    shard_attempts = (shard_state.retries + 1)\n    if (shard_attempts >= parameters.config.SHARD_MAX_ATTEMPTS):\n        logging.warning('Shard attempt %s exceeded %s max attempts.', shard_attempts, parameters.config.SHARD_MAX_ATTEMPTS)\n        return self._TASK_DIRECTIVE.FAIL_TASK\n    if (tstate.output_writer and (not tstate.output_writer._supports_shard_retry(tstate))):\n        logging.warning('Output writer %s does not support shard retry.', tstate.output_writer.__class__.__name__)\n        return self._TASK_DIRECTIVE.FAIL_TASK\n    shard_state.reset_for_retry()\n    logging.warning('Shard %s attempt %s failed with up to %s attempts.', shard_state.shard_id, shard_state.retries, parameters.config.SHARD_MAX_ATTEMPTS)\n    output_writer = None\n    if tstate.output_writer:\n        output_writer = tstate.output_writer.create(tstate.mapreduce_spec, shard_state.shard_number, (shard_attempts + 1))\n    tstate.reset_for_retry(output_writer)\n    return self._TASK_DIRECTIVE.RETRY_SHARD", "docstring": "Whether to retry shard.\n\nThis method may modify shard_state and tstate to prepare for retry or fail.\n\nArgs:\nshard_state: model.ShardState for current shard.\ntstate: model.TransientShardState for current shard.\n\nReturns:\nA _TASK_DIRECTIVE enum. RETRY_SHARD if shard should be retried.\nFAIL_TASK otherwise.", "source": "codesearchnet"}
{"code": "def update(w: jax.Array, scores: jax.Array, rows: jax.Array, cols: jax.Array, Y: jax.Array) -> typing.Tuple[jax.Array, jax.Array, int, float]:\n    N = w.shape[0]\n    M = scores.shape[0]\n    res = w.dot(Y) - jax.ops.segment_sum((w * (2 * Y - 1)).take(rows), cols, M)\n    err = 0.5 - jnp.abs(res - 0.5)\n    best_feature_index: int = err.argmin()\n    positivity: bool = res.at[best_feature_index].get() < 0.5\n    err_min = err.at[best_feature_index].get()\n    amount: float = jnp.log((1 - err_min) / (err_min + EPS))\n    X_best = jnp.zeros(N, dtype=bool).at[jnp.where(cols == best_feature_index, rows, N)].set(True, mode='drop')\n    w = w * jnp.exp(amount * (Y ^ X_best == positivity))\n    w = w / w.sum()\n    score = amount * (2 * positivity - 1)\n    scores = scores.at[best_feature_index].add(score)\n    return (w, scores, best_feature_index, score)", "docstring": "Calculates the new weight vector and the contribution scores.\n\nArgs:\nw (jax.Array): A weight vector.\nscores (JAX array): Contribution scores of features.\nrows (jax.Array): Row indices of True values in the input data.\ncols (jax.Array): Column indices of True values in the input data.\nY (jax.Array): The target output.\n\n\nReturns:\nA tuple of following items:\n- w (jax.Array): The new weight vector.\n- scores (JAX array): The new contribution scores.\n- best_feature_index (int): The index of the best feature.\n- score (float): The newly added score for the best feature.", "source": "github-repos"}
{"code": "def blocks(self, name):\n        \n        b = self._blocks(name)\n        if b:\n            return b\n        return self._blocks(name.replace('?>?', ' '))", "docstring": "Search for defined blocks recursively.\nAllow '>' to be ignored. '.a .b' == '.a > .b'\nArgs:\nname (string): Search term\nReturns:\nBlock object OR False", "source": "juraj-google-style"}
{"code": "def __init__(self, z=None, x=None, label=None):\n        r\n        if label is not None:\n            a = Pauli.from_label(label)\n            self._z = a.z\n            self._x = a.x\n        else:\n            self._init_from_bool(z, x)", "docstring": "r\"\"\"Make the Pauli object.\n\nNote that, for the qubit index:\n- Order of z, x vectors is q_0 ... q_{n-1},\n- Order of pauli label is q_{n-1} ... q_0\n\nE.g.,\n- z and x vectors: z = [z_0 ... z_{n-1}], x = [x_0 ... x_{n-1}]\n- a pauli is $P_{n-1} \\otimes ... \\otimes P_0$\n\nArgs:\nz (numpy.ndarray): boolean, z vector\nx (numpy.ndarray): boolean, x vector\nlabel (str): pauli label", "source": "juraj-google-style"}
{"code": "def DumpMany(objs):\n  \n  precondition.AssertIterableType(objs, object)\n\n  text = yaml.safe_dump_all(objs, default_flow_style=False, allow_unicode=True)\n\n  if compatibility.PY2:\n    text = text.decode(\"utf-8\")\n\n  return text", "docstring": "Stringifies a sequence of Python objects to a multi-document YAML.\n\nArgs:\nobjs: An iterable of Python objects to convert to YAML.\n\nReturns:\nA multi-document YAML representation of the given objects.", "source": "juraj-google-style"}
{"code": "def build(cls, value: object, binary: bool = False,\n              fallback: object = None) -> Union[Nil, 'String']:\n        \n        if value is None:\n            if fallback is None:\n                return Nil()\n            else:\n                return cls.build(fallback, binary)\n        elif not value:\n            return QuotedString(b'')\n        elif isinstance(value, bytes):\n            ascii_ = value\n        elif isinstance(value, memoryview):\n            ascii_ = bytes(value)\n        elif hasattr(value, '__bytes__'):\n            ascii_ = bytes(cast(SupportsBytes, value))\n        elif isinstance(value, str) or hasattr(value, '__str__'):\n            value = str(value)\n            try:\n                ascii_ = bytes(value, 'ascii')\n            except UnicodeEncodeError:\n                ascii_ = bytes(value, 'utf-8', 'replace')\n                return LiteralString(ascii_, binary)\n        else:\n            raise TypeError(value)\n        if not binary and len(ascii_) < 64 \\\n                and b'\\n' not in ascii_ \\\n                and b'\\x00' not in ascii_:\n            return QuotedString(ascii_)\n        else:\n            return LiteralString(ascii_, binary)", "docstring": "Produce either a :class:`QuotedString` or :class:`LiteralString`\nbased on the contents of ``data``. This is useful to improve\nreadability of response data.\n\nArgs:\nvalue: The string to serialize.\nbinary: True if the string should be transmitted as binary.\nfallback: The default value to use if ``value`` is None.", "source": "juraj-google-style"}
{"code": "def token_network_connect(\n            self,\n            registry_address: PaymentNetworkID,\n            token_address: TokenAddress,\n            funds: TokenAmount,\n            initial_channel_target: int = 3,\n            joinable_funds_target: float = 0.4,\n    ) -> None:\n        \n        if not is_binary_address(registry_address):\n            raise InvalidAddress('registry_address must be a valid address in binary')\n        if not is_binary_address(token_address):\n            raise InvalidAddress('token_address must be a valid address in binary')\n\n        token_network_identifier = views.get_token_network_identifier_by_token_address(\n            chain_state=views.state_from_raiden(self.raiden),\n            payment_network_id=registry_address,\n            token_address=token_address,\n        )\n\n        connection_manager = self.raiden.connection_manager_for_token_network(\n            token_network_identifier,\n        )\n\n        has_enough_reserve, estimated_required_reserve = has_enough_gas_reserve(\n            raiden=self.raiden,\n            channels_to_open=initial_channel_target,\n        )\n\n        if not has_enough_reserve:\n            raise InsufficientGasReserve((\n                'The account balance is below the estimated amount necessary to '\n                'finish the lifecycles of all active channels. A balance of at '\n                f'least {estimated_required_reserve} wei is required.'\n            ))\n\n        connection_manager.connect(\n            funds=funds,\n            initial_channel_target=initial_channel_target,\n            joinable_funds_target=joinable_funds_target,\n        )", "docstring": "Automatically maintain channels open for the given token network.\n\nArgs:\ntoken_address: the ERC20 token network to connect to.\nfunds: the amount of funds that can be used by the ConnectionMananger.\ninitial_channel_target: number of channels to open proactively.\njoinable_funds_target: fraction of the funds that will be used to join\nchannels opened by other participants.", "source": "juraj-google-style"}
{"code": "def send_email_message(self, recipient, subject, html_message, text_message, sender_email, sender_name):\n        \n\n        if not current_app.testing:  \n\n            \n            from flask_sendmail import Message\n            message = Message(\n                subject,\n                recipients=[recipient],\n                html=html_message,\n                body=text_message)\n\n            \n            self.mail.send(message)", "docstring": "Send email message via Flask-Sendmail.\n\nArgs:\nrecipient: Email address or tuple of (Name, Email-address).\nsubject: Subject line.\nhtml_message: The message body in HTML.\ntext_message: The message body in plain text.", "source": "juraj-google-style"}
{"code": "def save_scan_plot(self, filename='scan.pdf', img_format='pdf', coords=None):\n    plt = self.get_scan_plot(coords)\n    plt.savefig(filename, format=img_format)", "docstring": "Save matplotlib plot of the potential energy surface to a file.\n\nArgs:\nfilename: Filename to write to.\nimg_format: Image format to use. Defaults to EPS.\ncoords: internal coordinate name to use as abcissa.", "source": "codesearchnet"}
{"code": "def _set_textarea(el, value):\n        \n        if isinstance(value, dict):\n            el.text = value[\"val\"]\n        elif type(value) in [list, tuple]:\n            el.text = \"\\n\\n\".join(\n                \"-- %s --\\n%s\" % (item[\"source\"], item[\"val\"])\n                for item in value\n            )\n        else:\n            el.text = value", "docstring": "Set content of given textarea element `el` to `value`.\n\nArgs:\nel (obj): Reference to textarea element you wish to set.\nvalue (obj/list): Value to which the `el` will be set.", "source": "juraj-google-style"}
{"code": "def testRaggedOneHotMatchesArrayOpsOneHot(self, indices_shape, depth, on_value=None, off_value=None, axis=None, dtype=None):\n    indices_shape = tensor_shape.as_shape(indices_shape)\n    indices = np.random.randint(depth + 1, size=indices_shape)\n    expected = array_ops.one_hot(indices, depth, on_value=on_value, off_value=off_value, axis=axis, dtype=dtype)\n    for ragged_rank in range(1, len(indices_shape)):\n        if axis is not None and 0 <= axis <= ragged_rank:\n            continue\n        ragged_indices = ragged_tensor.RaggedTensor.from_tensor(indices, ragged_rank=ragged_rank)\n        result = ragged_array_ops.ragged_one_hot(ragged_indices, depth, on_value=on_value, off_value=off_value, axis=axis, dtype=dtype)\n        self.assertAllEqual(result.to_tensor(), expected)", "docstring": "Tests that tf.one_hot gives the same result for ragged & uniform tensors.\n\nRuns tf.one_hot with a uniform tensor, and compares the output with the\nresults of calling tf.one_hot with ragged version of that tensor with\nvarying ragged ranks.\n\nArgs:\nindices_shape: Shape for `indices` arg to `tf.one_hot`\ndepth: `depth` arg to `tf.one_hot`\non_value: `on_value` arg to `tf.one_hot`\noff_value: `off_value` arg to `tf.one_hot`\naxis: `axis` arg to `tf.one_hot`\ndtype: `dtype` arg to `tf.one_hot`", "source": "github-repos"}
{"code": "def __call__(self, request: beam.Row, *args, **kwargs):\n    if self.entity_row_fn:\n        entity_dict = self.entity_row_fn(request)\n    else:\n        request_dict = request._asdict()\n        entity_dict = {self.entity_id: request_dict[self.entity_id]}\n    feature_values = self.store.get_online_features(features=self.features, entity_rows=[entity_dict], full_feature_names=self.full_feature_names).to_dict()\n    response_dict = {k: v[0] for k, v in feature_values.items()}\n    return (request, beam.Row(**response_dict))", "docstring": "Fetches feature values for an entity-id from the Feast feature store.\n\nArgs:\nrequest: the input `beam.Row` to enrich.", "source": "github-repos"}
{"code": "def _PreprocessSources(self, extraction_engine):\n    \n    logger.debug('Starting preprocessing.')\n\n    try:\n      artifacts_registry = engine.BaseEngine.BuildArtifactsRegistry(\n          self._artifact_definitions_path, self._custom_artifacts_path)\n      extraction_engine.PreprocessSources(\n          artifacts_registry, self._source_path_specs,\n          resolver_context=self._resolver_context)\n\n    except IOError as exception:\n      logger.error('Unable to preprocess with error: {0!s}'.format(exception))\n\n    logger.debug('Preprocessing done.')", "docstring": "Preprocesses the sources.\n\nArgs:\nextraction_engine (BaseEngine): extraction engine to preprocess\nthe sources.", "source": "juraj-google-style"}
{"code": "def set_volume(percentage):\n    if ((percentage > 100) or (percentage < 0)):\n        raise ValueError('percentage must be an integer between 0 and 100')\n    if (system.get_name() == 'windows'):\n        pass\n    elif (system.get_name() == 'mac'):\n        volume_int = (percentage / 10)\n        sp.Popen(['osascript', '-e', ('set Volume %d' % volume_int)]).wait()\n    else:\n        formatted = (str(percentage) + '%')\n        sp.Popen(['amixer', '--quiet', 'sset', 'Master', formatted]).wait()", "docstring": "Set the volume.\n\nSets the volume to a given percentage (integer between 0 and 100).\n\nArgs:\npercentage (int): The percentage (as a 0 to 100 integer) to set the volume to.\n\nRaises:\nValueError: if the percentage is >100 or <0.", "source": "codesearchnet"}
{"code": "def authenticate(self, user, password):\n    request = Request(AUTH_URL)\n    request.add_header('X-Simperium-API-Key', API_KEY)\n    if (sys.version_info < (3, 3)):\n        request.add_data(json.dumps({'username': user, 'password': password}))\n    else:\n        request.data = json.dumps({'username': user, 'password': password}).encode()\n    try:\n        res = urllib2.urlopen(request).read()\n        token = json.loads(res.decode('utf-8'))['access_token']\n    except HTTPError:\n        raise SimplenoteLoginFailed('Login to Simplenote API failed!')\n    except IOError:\n        token = None\n    return token", "docstring": "Method to get simplenote auth token\n\nArguments:\n- user (string):     simplenote email address\n- password (string): simplenote password\n\nReturns:\nSimplenote API token as string", "source": "codesearchnet"}
{"code": "def convert_avgpool(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting pooling ...')\n\n    if names == 'short':\n        tf_name = 'P' + random_string(7)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    if 'kernel_shape' in params:\n        height, width = params['kernel_shape']\n    else:\n        height, width = params['kernel_size']\n\n    if 'strides' in params:\n        stride_height, stride_width = params['strides']\n    else:\n        stride_height, stride_width = params['stride']\n\n    if 'pads' in params:\n        padding_h, padding_w, _, _ = params['pads']\n    else:\n        padding_h, padding_w = params['padding']\n\n    input_name = inputs[0]\n    pad = 'valid' \n\n    if height % 2 == 1 and width % 2 == 1 and \\\n       height \n       stride_height == 1 and stride_width == 1:\n        pad = 'same'\n    else:\n        padding_name = tf_name + '_pad'\n        padding_layer = keras.layers.ZeroPadding2D(\n            padding=(padding_h, padding_w),\n            name=padding_name\n        )\n        layers[padding_name] = padding_layer(layers[inputs[0]])\n        input_name = padding_name\n\n    \n    pooling = keras.layers.AveragePooling2D(\n        pool_size=(height, width),\n        strides=(stride_height, stride_width),\n        padding=pad,\n        name=tf_name,\n        data_format='channels_first'\n    )\n\n    layers[scope_name] = pooling(layers[input_name])", "docstring": "Convert Average pooling.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def __init__(self, interval=3600):\n        \n        self.interval = interval\n        self.start_time = datetime.datetime.now()\n        self.chk_counter = 0", "docstring": "Initializes the handler with an interval.\n\nArgs:\ninterval (int): Interval at which to checkpoint in seconds.\nDefaults to 3600 (1 hr).", "source": "juraj-google-style"}
{"code": "def inv_logistic(y: Union[float, np.ndarray],\n                 k: float,\n                 theta: float) -> Optional[float]:\n    r\n    if y is None or k is None or theta is None:\n        return None\n    \n    return (np.log((1 / y) - 1) / -k) + theta", "docstring": "r\"\"\"\nInverse standard logistic function:\n\n.. math::\n\nx = ( log( \\frac {1} {y} - 1) / -k ) + \\theta\n\nArgs:\ny: :math:`y`\nk: :math:`k`\ntheta: :math:`\\theta`\n\nReturns:\n:math:`x`", "source": "juraj-google-style"}
{"code": "def sign(self, private_keys):\n    if ((private_keys is None) or (not isinstance(private_keys, list))):\n        raise TypeError('`private_keys` must be a list instance')\n\n    def gen_public_key(private_key):\n        public_key = private_key.get_verifying_key().encode()\n        return public_key.decode()\n    key_pairs = {gen_public_key(PrivateKey(private_key)): PrivateKey(private_key) for private_key in private_keys}\n    tx_dict = self.to_dict()\n    tx_dict = Transaction._remove_signatures(tx_dict)\n    tx_serialized = Transaction._to_str(tx_dict)\n    for (i, input_) in enumerate(self.inputs):\n        self.inputs[i] = self._sign_input(input_, tx_serialized, key_pairs)\n    self._hash()\n    return self", "docstring": "Fulfills a previous Transaction's Output by signing Inputs.\n\nNote:\nThis method works only for the following Cryptoconditions\ncurrently:\n- Ed25519Fulfillment\n- ThresholdSha256\nFurthermore, note that all keys required to fully sign the\nTransaction have to be passed to this method. A subset of all\nwill cause this method to fail.\n\nArgs:\nprivate_keys (:obj:`list` of :obj:`str`): A complete list of\nall private keys needed to sign all Fulfillments of this\nTransaction.\n\nReturns:\n:class:`~bigchaindb.common.transaction.Transaction`", "source": "codesearchnet"}
{"code": "def begin_abort(self, root_pipeline_key, abort_message):\n    \n    def txn():\n      pipeline_record = db.get(root_pipeline_key)\n      if pipeline_record is None:\n        logging.warning(\n            'Tried to abort root pipeline ID \"%s\" but it does not exist.',\n            root_pipeline_key.name())\n        raise db.Rollback()\n      if pipeline_record.status == _PipelineRecord.ABORTED:\n        logging.warning(\n            'Tried to abort root pipeline ID \"%s\"; already in state: %s',\n            root_pipeline_key.name(), pipeline_record.status)\n        raise db.Rollback()\n      if pipeline_record.abort_requested:\n        logging.warning(\n            'Tried to abort root pipeline ID \"%s\"; abort signal already sent.',\n            root_pipeline_key.name())\n        raise db.Rollback()\n\n      pipeline_record.abort_requested = True\n      pipeline_record.abort_message = abort_message\n      pipeline_record.put()\n\n      task = taskqueue.Task(\n          url=self.fanout_abort_handler_path,\n          params=dict(root_pipeline_key=root_pipeline_key))\n      task.add(queue_name=self.queue_name, transactional=True)\n      return True\n\n    return db.run_in_transaction(txn)", "docstring": "Kicks off the abort process for a root pipeline and all its children.\n\nArgs:\nroot_pipeline_key: db.Key of the root pipeline to abort.\nabort_message: Message explaining why the abort happened, only saved\ninto the root pipeline.\n\nReturns:\nTrue if the abort signal was sent successfully; False otherwise.", "source": "juraj-google-style"}
{"code": "def base_name_from_image(image):\n    m = re.match('^(.+/)?([^:/]+)(:[^:]+)?$', image)\n    algo_name = (m.group(2) if m else image)\n    return algo_name", "docstring": "Extract the base name of the image to use as the 'algorithm name' for the job.\n\nArgs:\nimage (str): Image name.\n\nReturns:\nstr: Algorithm name, as extracted from the image name.", "source": "codesearchnet"}
{"code": "def get_stored_version(connection):\n    \n\n    if connection.engine.name == 'sqlite':\n        version = connection.execute('PRAGMA user_version').fetchone()[0]\n        if version == 0:\n            raise VersionIsNotStored\n        return version\n    elif connection.engine.name == 'postgresql':\n        try:\n            r = connection\\\n                .execute('SELECT version FROM {}.user_version;'.format(POSTGRES_SCHEMA_NAME))\\\n                .fetchone()\n            if not r:\n                raise VersionIsNotStored\n\n            version = r[0]\n\n        except ProgrammingError:\n            \n            raise VersionIsNotStored\n        return version\n    else:\n        raise DatabaseError('Do not know how to get version from {} engine.'.format(connection.engine.name))", "docstring": "Returns database version.\n\nArgs:\nconnection (sqlalchemy connection):\n\nRaises: Assuming user_version pragma (sqlite case) and user_version table (postgresql case)\nexist because they created with the database creation.\n\nReturns:\nint: version of the database.", "source": "juraj-google-style"}
{"code": "def get_metrics_namespace(self) -> str:\n    return 'BeamML_TF_Tensor'", "docstring": "Returns:\nA namespace for metrics collected by the RunInference transform.", "source": "github-repos"}
{"code": "def __contains__(self, func):\n        \n        return any((func is mw.func) or (mw.is_subchain and func in mw.func)\n                   for mw in self.mw_list)", "docstring": "Returns whether the function is stored anywhere in the middleware chain.\n\nThis runs recursively though any subchains.\n\nArgs:\nfunc (callable): A function which may be present in the chain\n\nReturns:\nbool: True if func is a function contained anywhere in the chain.", "source": "juraj-google-style"}
{"code": "def _remove_lines(self, lines, sublist_lengths, num_to_remove):\n    curr = 0\n    result = []\n    for offset in sublist_lengths:\n        end = curr + offset\n        start = min(curr + num_to_remove, end)\n        result += lines[start:end]\n        curr += offset\n    return result", "docstring": "Utility function to remove num_to_remove lines from each sublist.\n\nArgs:\nlines: list of items.\nsublist_lengths: list of integers representing length of sublist\ncorresponding to each source file.\nnum_to_remove: number of lines to remove from each sublist.\nReturns:\nremaining lines.", "source": "github-repos"}
{"code": "def add_transcript(self, transcript):\n    logger.debug('Adding transcript {0} to variant {1}'.format(transcript, self['variant_id']))\n    self['transcripts'].append(transcript)", "docstring": "Add the information transcript\n\nThis adds a transcript dict to variant['transcripts']\n\nArgs:\ntranscript (dict): A transcript dictionary", "source": "codesearchnet"}
{"code": "def create_assembly(self, did, wid, name='My Assembly'):\n        \n\n        payload = {\n            'name': name\n        }\n\n        return self._api.request('post', '/api/assemblies/d/' + did + '/w/' + wid, body=payload)", "docstring": "Creates a new assembly element in the specified document / workspace.\n\nArgs:\n- did (str): Document ID\n- wid (str): Workspace ID\n- name (str, default='My Assembly')\n\nReturns:\n- requests.Response: Onshape response data", "source": "juraj-google-style"}
{"code": "def cds_score(self, x_te, y_te):\n        \n        if type(x_te) == np.ndarray:\n            x_te, y_te = pd.Series(x_te.reshape(-1)), pd.Series(y_te.reshape(-1))\n        xd, yd = discretized_sequences(x_te,  y_te,  self.ffactor, self.maxdev)\n        cx = Counter(xd)\n        cy = Counter(yd)\n        yrange = sorted(cy.keys())\n        ny = len(yrange)\n        py = np.array([cy[i] for i in yrange], dtype=float)\n        py = py / py.sum()\n        pyx = []\n        for a in cx:\n            if cx[a] > self.minc:\n                yx = y_te[xd == a]\n                \n                \n                \n                \n                if count_unique(y_te) > len_discretized_values(y_te, \"Numerical\", self.ffactor, self.maxdev):\n\n                    yx = (yx - np.mean(yx)) / np.std(y_te)\n                    yx = discretized_sequence(yx, \"Numerical\", self.ffactor, self.maxdev, norm=False)\n                    cyx = Counter(yx.astype(int))\n                    pyxa = np.array([cyx[i] for i in discretized_values(y_te, \"Numerical\", self.ffactor, self.maxdev)],\n                                    dtype=float)\n\n                else:\n                    cyx = Counter(yx)\n                    pyxa = [cyx[i] for i in yrange]\n                    pyxax = np.array([0] * (ny - 1) + pyxa + [0] * (ny - 1), dtype=float)\n                    xcorr = [sum(py * pyxax[i:i + ny]) for i in range(2 * ny - 1)]\n                    imax = xcorr.index(max(xcorr))\n                    pyxa = np.array([0] * (2 * ny - 2 - imax) + pyxa + [0] * imax, dtype=float)\n                assert pyxa.sum() == cx[a]\n                pyxa = pyxa / pyxa.sum()\n\n                pyx.append(pyxa)\n\n        if len(pyx) == 0:\n            return 0\n\n        pyx = np.array(pyx)\n        pyx = pyx - pyx.mean(axis=0)\n        return np.std(pyx)", "docstring": "Computes the cds statistic from variable 1 to variable 2\n\nArgs:\nx_te (numpy.ndarray): Variable 1\ny_te (numpy.ndarray): Variable 2\n\nReturns:\nfloat: CDS fit score", "source": "juraj-google-style"}
{"code": "def remove_list_duplicates(lista, unique=False):\n    \n    result = []\n    allready = []\n\n    for elem in lista:\n        if elem not in result:\n            result.append(elem)\n        else:\n            allready.append(elem)\n\n    if unique:\n        for elem in allready:\n            result = list(filter((elem).__ne__, result))\n\n    return result", "docstring": "Remove duplicated elements in a list.\nArgs:\nlista: List with elements to clean duplicates.", "source": "juraj-google-style"}
{"code": "def __init__(self, skype=None):\n        \n        self.skype = skype\n        self.synced = False\n        self.cache = {}", "docstring": "Create a new container object.  The :attr:`synced` state and internal :attr:`cache` are initialised here.\n\nArgs:\nskype (Skype): parent Skype instance", "source": "juraj-google-style"}
{"code": "def __init__(self, ad):\n        \n        super(Sl4aClient, self).__init__(app_name=_APP_NAME, ad=ad)\n        self._ad = ad\n        self.ed = None\n        self._adb = ad.adb", "docstring": "Initializes an Sl4aClient.\n\nArgs:\nad: AndroidDevice object.", "source": "juraj-google-style"}
{"code": "def filesizes(images):\n    while True:\n        img = (yield marv.pull(images))\n        if (img is None):\n            break\n        (yield marv.push(img.size))", "docstring": "Stat filesize of files.\n\nArgs:\nimages: stream of marv image files\n\nReturns:\nStream of filesizes", "source": "codesearchnet"}
{"code": "def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        super(GetAttributeListRequestPayload, self).read(\n            input_buffer,\n            kmip_version=kmip_version\n        )\n        local_buffer = utils.BytearrayStream(input_buffer.read(self.length))\n\n        if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_buffer):\n            self._unique_identifier = primitives.TextString(\n                tag=enums.Tags.UNIQUE_IDENTIFIER\n            )\n            self._unique_identifier.read(\n                local_buffer,\n                kmip_version=kmip_version\n            )\n        else:\n            self._unique_identifier = None\n\n        self.is_oversized(local_buffer)", "docstring": "Read the data encoding the GetAttributeList request payload and decode\nit into its constituent parts.\n\nArgs:\ninput_buffer (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def truncate_rationale(rationale, max_length=MAX_RATIONALE_SIZE_IN_EVENT):\n    \n    if isinstance(rationale, basestring) and max_length is not None and len(rationale) > max_length:\n        return rationale[0:max_length], True\n    else:\n        return rationale, False", "docstring": "Truncates the rationale for analytics event emission if necessary\n\nArgs:\nrationale (string): the string value of the rationale\nmax_length (int): the max length for truncation\n\nReturns:\ntruncated_value (string): the possibly truncated version of the rationale\nwas_truncated (bool): returns true if the rationale is truncated", "source": "juraj-google-style"}
{"code": "def greedy_coloring(adj):\n    \n\n    \n    coloring = {}\n    colors = {}\n    possible_colors = {n: set(range(len(adj))) for n in adj}\n    while possible_colors:\n\n        \n        n = min(possible_colors, key=lambda n: len(possible_colors[n]))\n\n        \n        color = min(possible_colors[n])\n        coloring[n] = color\n        if color not in colors:\n            colors[color] = {n}\n        else:\n            colors[color].add(n)\n\n        \n        for neighbor in adj[n]:\n            if neighbor in possible_colors and color in possible_colors[neighbor]:\n                possible_colors[neighbor].remove(color)\n\n        \n        del possible_colors[n]\n\n    return coloring, colors", "docstring": "Determines a vertex coloring.\n\nArgs:\nadj (dict): The edge structure of the graph to be colored.\n`adj` should be of the form {node: neighbors, ...} where\nneighbors is a set.\n\nReturns:\ndict: the coloring {node: color, ...}\ndict: the colors {color: [node, ...], ...}\n\nNote:\nThis is a greedy heuristic: the resulting coloring is not\nnecessarily minimal.", "source": "juraj-google-style"}
{"code": "def _load_credentials_from_file(filename):\n    if (not os.path.exists(filename)):\n        raise exceptions.DefaultCredentialsError('File {} was not found.'.format(filename))\n    with io.open(filename, 'r') as file_obj:\n        try:\n            info = json.load(file_obj)\n        except ValueError as caught_exc:\n            new_exc = exceptions.DefaultCredentialsError('File {} is not a valid json file.'.format(filename), caught_exc)\n            six.raise_from(new_exc, caught_exc)\n    credential_type = info.get('type')\n    if (credential_type == _AUTHORIZED_USER_TYPE):\n        from google.auth import _cloud_sdk\n        try:\n            credentials = _cloud_sdk.load_authorized_user_credentials(info)\n        except ValueError as caught_exc:\n            msg = 'Failed to load authorized user credentials from {}'.format(filename)\n            new_exc = exceptions.DefaultCredentialsError(msg, caught_exc)\n            six.raise_from(new_exc, caught_exc)\n        _warn_about_problematic_credentials(credentials)\n        return (credentials, None)\n    elif (credential_type == _SERVICE_ACCOUNT_TYPE):\n        from google.oauth2 import service_account\n        try:\n            credentials = service_account.Credentials.from_service_account_info(info)\n        except ValueError as caught_exc:\n            msg = 'Failed to load service account credentials from {}'.format(filename)\n            new_exc = exceptions.DefaultCredentialsError(msg, caught_exc)\n            six.raise_from(new_exc, caught_exc)\n        return (credentials, info.get('project_id'))\n    else:\n        raise exceptions.DefaultCredentialsError('The file {file} does not have a valid type. Type is {type}, expected one of {valid_types}.'.format(file=filename, type=credential_type, valid_types=_VALID_TYPES))", "docstring": "Loads credentials from a file.\n\nThe credentials file must be a service account key or stored authorized\nuser credentials.\n\nArgs:\nfilename (str): The full path to the credentials file.\n\nReturns:\nTuple[google.auth.credentials.Credentials, Optional[str]]: Loaded\ncredentials and the project ID. Authorized user credentials do not\nhave the project ID information.\n\nRaises:\ngoogle.auth.exceptions.DefaultCredentialsError: if the file is in the\nwrong format or is missing.", "source": "codesearchnet"}
{"code": "class PipedPipelineDataFormat(PipelineDataFormat):\n\n    def __iter__(self):\n        for line in sys.stdin:\n            if '\\t' in line:\n                line = line.split('\\t')\n                if self.column:\n                    yield {kwargs: l for (kwargs, _), l in zip(self.column, line)}\n                else:\n                    yield tuple(line)\n            else:\n                yield line\n\n    def save(self, data: dict):\n        \n        print(data)\n\n    def save_binary(self, data: Union[dict, List[dict]]) -> str:\n        if self.output_path is None:\n            raise KeyError('When using piped input on pipeline outputting large object requires an output file path. Please provide such output path through --output argument.')\n        return super().save_binary(data)", "docstring": "Read data from piped input to the python process. For multi columns data, columns should separated by\n\nIf columns are provided, then the output will be a dictionary with {column_x: value_x}\n\nArgs:\noutput_path (`str`): Where to save the outgoing data.\ninput_path (`str`): Where to look for the input data.\ncolumn (`str`): The column to read.\noverwrite (`bool`, *optional*, defaults to `False`):\nWhether or not to overwrite the `output_path`.", "source": "github-repos"}
{"code": "def write(self, output='jsonstat'):\n        \n\n        if output == 'jsonstat':\n            return json.dumps(OrderedDict(self), cls=NumpyEncoder)\n        elif output == 'dataframe':\n            return get_dim_label(self, self['label'], 'dimension')\n        else:\n            raise ValueError(\"Allowed arguments are 'jsonstat' or 'dataframe'\")", "docstring": "Writes data from a Dataset object to JSONstat or Pandas Dataframe.\nArgs:\noutput(string): can accept 'jsonstat' or 'dataframe'\n\nReturns:\nSerialized JSONstat or a Pandas Dataframe,depending on the \\\n'output' parameter.", "source": "juraj-google-style"}
{"code": "def ParseTextToDicts(self, *args, **kwargs):\n    \n\n    result_lists = self.ParseText(*args, **kwargs)\n    result_dicts = []\n\n    for row in result_lists:\n      result_dicts.append(dict(zip(self.header, row)))\n\n    return result_dicts", "docstring": "Calls ParseText and turns the result into list of dicts.\n\nList items are dicts of rows, dict key is column header and value is column\nvalue.\n\nArgs:\ntext: (str), Text to parse with embedded newlines.\neof: (boolean), Set to False if we are parsing only part of the file.\nSuppresses triggering EOF state.\n\nRaises:\nTextFSMError: An error occurred within the FSM.\n\nReturns:\nList of dicts.", "source": "juraj-google-style"}
{"code": "def GetFileSystemReferenceCount(self, path_spec):\n    \n    identifier = self._GetFileSystemCacheIdentifier(path_spec)\n    cache_value = self._file_system_cache.GetCacheValue(identifier)\n    if not cache_value:\n      return None\n\n    return cache_value.reference_count", "docstring": "Retrieves the reference count of a cached file system object.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nint: reference count or None if there is no file system object for\nthe corresponding path specification cached.", "source": "juraj-google-style"}
{"code": "def ReceiveMessagesRelationalFlows(self, client_id, messages):\n    now = time.time()\n    unprocessed_msgs = []\n    message_handler_requests = []\n    dropped_count = 0\n    for (session_id, msgs) in iteritems(collection.Group(messages, operator.attrgetter('session_id'))):\n        leftover_msgs = self.HandleWellKnownFlows(msgs)\n        for msg in leftover_msgs:\n            if ((msg.auth_state != msg.AuthorizationState.AUTHENTICATED) and (msg.session_id != self.unauth_allowed_session_id)):\n                dropped_count += 1\n                continue\n            if (session_id in queue_manager.session_id_map):\n                message_handler_requests.append(rdf_objects.MessageHandlerRequest(client_id=msg.source.Basename(), handler_name=queue_manager.session_id_map[session_id], request_id=msg.response_id, request=msg.payload))\n            else:\n                unprocessed_msgs.append(msg)\n    if dropped_count:\n        logging.info('Dropped %d unauthenticated messages for %s', dropped_count, client_id)\n    if unprocessed_msgs:\n        flow_responses = []\n        for message in unprocessed_msgs:\n            flow_responses.append(rdf_flow_objects.FlowResponseForLegacyResponse(message))\n        data_store.REL_DB.WriteFlowResponses(flow_responses)\n        for msg in unprocessed_msgs:\n            if (msg.type == rdf_flows.GrrMessage.Type.STATUS):\n                stat = rdf_flows.GrrStatus(msg.payload)\n                if (stat.status == rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED):\n                    crash_details = rdf_client.ClientCrash(client_id=client_id, session_id=msg.session_id, backtrace=stat.backtrace, crash_message=stat.error_message, nanny_status=stat.nanny_status, timestamp=rdfvalue.RDFDatetime.Now())\n                    events.Events.PublishEvent('ClientCrash', crash_details, token=self.token)\n    if message_handler_requests:\n        data_store.REL_DB.WriteMessageHandlerRequests(message_handler_requests)\n    logging.debug('Received %s messages from %s in %s sec', len(messages), client_id, (time.time() - now))", "docstring": "Receives and processes messages for flows stored in the relational db.\n\nArgs:\nclient_id: The client which sent the messages.\nmessages: A list of GrrMessage RDFValues.", "source": "codesearchnet"}
{"code": "def Insert(self, request, global_params=None):\n    config = self.GetMethodConfig('Insert')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Creates a new, empty table in the dataset.\n\nArgs:\nrequest: (BigqueryTablesInsertRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Table) The response message.", "source": "github-repos"}
{"code": "def ParseOptions(cls, options, configuration_object):\n    \n    if not isinstance(configuration_object, tools.CLITool):\n      raise errors.BadConfigObject(\n          'Configuration object is not an instance of CLITool')\n\n    artifact_filters = cls._ParseStringOption(options, 'artifact_filter_string')\n    artifact_filters_file = cls._ParseStringOption(\n        options, 'artifact_filters_file')\n    filter_file = cls._ParseStringOption(options, 'file_filter')\n\n    if artifact_filters and artifact_filters_file:\n      raise errors.BadConfigOption(\n          'Please only specify artifact definition names in a file '\n          'or on the command line.')\n\n    if (artifact_filters_file or artifact_filters) and filter_file:\n      raise errors.BadConfigOption(\n          'Please do not specify both artifact definitions and legacy filters.')\n\n    if artifact_filters_file and os.path.isfile(artifact_filters_file):\n      with open(artifact_filters_file) as file_object:\n        file_content = file_object.read()\n        artifact_filters = file_content.splitlines()\n    elif artifact_filters:\n      artifact_filters = [name.strip() for name in artifact_filters.split(',')]\n\n    setattr(configuration_object, '_artifact_filters', artifact_filters)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.\nBadConfigOption: if the required artifact definitions are not defined.", "source": "juraj-google-style"}
{"code": "def invitation_backend(backend=None, namespace=None):\n    \n    \n    backend = backend or ORGS_INVITATION_BACKEND\n    class_module, class_name = backend.rsplit(\".\", 1)\n    mod = import_module(class_module)\n    return getattr(mod, class_name)(namespace=namespace)", "docstring": "Returns a specified invitation backend\n\nArgs:\nbackend: dotted path to the invitation backend class\nnamespace: URL namespace to use\n\nReturns:\nan instance of an InvitationBackend", "source": "juraj-google-style"}
{"code": "def __init__(self, direction, edge_name, optional=False, within_optional_scope=False):\n        \n        super(Traverse, self).__init__(\n            direction, edge_name, optional=optional, within_optional_scope=within_optional_scope)\n        self.direction = direction\n        self.edge_name = edge_name\n        self.optional = optional\n        \n        self.within_optional_scope = within_optional_scope\n        self.validate()", "docstring": "Create a new Traverse block in the given direction and across the given edge.\n\nArgs:\ndirection: string, 'in' or 'out'\nedge_name: string obeying variable name rules (see validate_safe_string).\noptional: optional bool, specifying whether the traversal to the given location\nis optional (i.e. non-filtering) or mandatory (filtering).\n\nReturns:\nnew Traverse object", "source": "juraj-google-style"}
{"code": "def main(argv=None):\n    args = parse_mobly_cli_args(argv)\n    test_class = _find_test_class()\n    if args.list_tests:\n        _print_test_names(test_class)\n        sys.exit(0)\n    test_configs = config_parser.load_test_config_file(args.config, args.test_bed)\n    tests = None\n    if args.tests:\n        tests = args.tests\n    console_level = logging.DEBUG if args.verbose else logging.INFO\n    ok = True\n    for config in test_configs:\n        runner = TestRunner(log_dir=config.log_path, testbed_name=config.testbed_name)\n        with runner.mobly_logger(console_level=console_level):\n            runner.add_test_class(config, test_class, tests)\n            try:\n                runner.run()\n                ok = runner.results.is_all_pass and ok\n            except signals.TestAbortAll:\n                pass\n            except Exception:\n                logging.exception('Exception when executing %s.', config.testbed_name)\n                ok = False\n    if not ok:\n        sys.exit(1)", "docstring": "Execute the test class in a test module.\n\nThis is the default entry point for running a test script file directly.\nIn this case, only one test class in a test script is allowed.\n\nTo make your test script executable, add the following to your file:\n\n.. code-block:: python\n\nfrom mobly import test_runner\n...\nif __name__ == '__main__':\ntest_runner.main()\n\nIf you want to implement your own cli entry point, you could use function\nexecute_one_test_class(test_class, test_config, test_identifier)\n\nArgs:\nargv: A list that is then parsed as cli args. If None, defaults to cli\ninput.", "source": "github-repos"}
{"code": "def nb_r_deriv(r, data_row):\n    \n    n = len(data_row)\n    d = sum(digamma(data_row + r)) - n*digamma(r) + n*np.log(r/(r+np.mean(data_row)))\n    return d", "docstring": "Derivative of log-likelihood wrt r (formula from wikipedia)\n\nArgs:\nr (float): the R paramemter in the NB distribution\ndata_row (array): 1d array of length cells", "source": "juraj-google-style"}
{"code": "def transformer_text_encoder(inputs,\n                             target_space,\n                             hparams,\n                             name=None):\n  \n  with tf.variable_scope(name, default_name=\"transformer_text_encoder\"):\n    inputs = common_layers.flatten4d3d(inputs)\n    [\n        encoder_input,\n        encoder_self_attention_bias,\n        ed,\n    ] = transformer_layers.transformer_prepare_encoder(\n        inputs, target_space=target_space, hparams=hparams)\n    encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.dropout)\n    encoder_output = transformer_layers.transformer_encoder(\n        encoder_input, encoder_self_attention_bias, hparams)\n    return encoder_output, ed", "docstring": "Transformer text encoder over inputs with unmasked full attention.\n\nArgs:\ninputs: Tensor of shape [batch, length, 1, hparams.hidden_size].\ntarget_space: int. Used for encoding inputs under a target space id.\nhparams: HParams.\nname: string, variable scope.\n\nReturns:\nencoder_output: Tensor of shape [batch, length, hparams.hidden_size].\ned: Tensor of shape [batch, 1, 1, length]. Encoder-decoder attention bias\nfor any padded tokens.", "source": "juraj-google-style"}
{"code": "def insert(self, index, item):\n        \n        if not self:\n            list.append(self, item)\n        elif item.__class__ == self[0].__class__:\n            list.insert(self, index, item)\n        else:\n            raise exceptions.WrongListItemType(item.__class__.__name__,\n                                               self[0].__class__.__name__)", "docstring": "Insert an item at the specified index.\n\nArgs:\nindex (int): Position to insert the item.\nitem: Item to be inserted.\n\nRaises:\n:exc:`~.exceptions.WrongListItemType`: If an item has a different\ntype than the first item to be stored.", "source": "juraj-google-style"}
{"code": "def from_audio_encoder_config(cls, audio_encoder_config: PretrainedConfig, **kwargs):\n    return cls(audio_encoder_config=audio_encoder_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`MoshiConfig`] (or a derived class) from an audio encoder configuration.\n\nReturns:\n[`MoshiConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def ensure_resource_data(self, update_data=False):\n        \n        \n        if not any(key in self.data for key in self.UNIQUE_IDENTIFIERS):\n            raise exceptions.HPOneViewMissingUniqueIdentifiers(MISSING_UNIQUE_IDENTIFIERS)\n\n        \n        if not update_data:\n            return\n\n        resource_data = None\n\n        if 'uri' in self.UNIQUE_IDENTIFIERS and self.data.get('uri'):\n            resource_data = self._helper.do_get(self.data['uri'])\n        else:\n            for identifier in self.UNIQUE_IDENTIFIERS:\n                identifier_value = self.data.get(identifier)\n\n                if identifier_value:\n                    result = self.get_by(identifier, identifier_value)\n                    if result and isinstance(result, list):\n                        resource_data = result[0]\n                        break\n\n        if resource_data:\n            self.data.update(resource_data)\n        else:\n            raise exceptions.HPOneViewResourceNotFound(RESOURCE_DOES_NOT_EXIST)", "docstring": "Retrieves data from OneView and updates resource object.\n\nArgs:\nupdate_data: Flag to update resource data when it is required.", "source": "juraj-google-style"}
{"code": "def create_position_ids_from_input_ids(self, input_ids, past_key_values_length=0):\n    mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype)\n    incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask\n    return incremental_indices + self.padding_idx", "docstring": "Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding\nsymbols are ignored. This is modified from fairseq's `utils.make_positions`.\n\nArgs:\ninput_ids: tf.Tensor\nReturns: tf.Tensor", "source": "github-repos"}
{"code": "def get_change_point_config(params: Dict[str, Any]) -> ChangePointConfig:\n    return ChangePointConfig(min_runs_between_change_points=params.get('min_runs_between_change_points', constants._DEFAULT_MIN_RUNS_BETWEEN_CHANGE_POINTS), num_runs_in_change_point_window=params.get('num_runs_in_change_point_window', constants._DEFAULT_NUM_RUMS_IN_CHANGE_POINT_WINDOW))", "docstring": "Args:\nparams: Dict containing parameters to run change point analysis.\nReturns:\nChangePointConfig object containing change point analysis parameters.", "source": "github-repos"}
{"code": "def __init__(self, file_object, delete_tempfile=True, journal_mode=\"DELETE\"):\n    \n    self.file_object = file_object\n    self.journal_mode = journal_mode\n\n    \n    \n    \n    if hasattr(self.file_object, \"name\"):\n      self.name = self.file_object.name\n      self._delete_file = False\n    else:\n      self._delete_file = delete_tempfile\n      with tempfile.NamedTemporaryFile(delete=False) as fd:\n        self.name = fd.name\n        data = file_object.read(65536)\n        while data:\n          fd.write(data)\n          data = file_object.read(65536)", "docstring": "Init.\n\nArgs:\nfile_object: A file like object.\ndelete_tempfile: If we create a tempfile, should we delete it when\nwe're done.\njournal_mode: If set to \"WAL\" a \"Write-Ahead Log\" is created.", "source": "juraj-google-style"}
{"code": "def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs):\n    if (tensors_to_log is None):\n        tensors_to_log = _TENSORS_TO_LOG\n    return tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=every_n_iter)", "docstring": "Function to get LoggingTensorHook.\n\nArgs:\nevery_n_iter: `int`, print the values of `tensors` once every N local\nsteps taken on the current worker.\ntensors_to_log: List of tensor names or dictionary mapping labels to tensor\nnames. If not set, log _TENSORS_TO_LOG by default.\n**kwargs: a dictionary of arguments to LoggingTensorHook.\n\nReturns:\nReturns a LoggingTensorHook with a standard set of tensors that will be\nprinted to stdout.", "source": "codesearchnet"}
{"code": "def recoverURL(self, url):\n    self.setUserAgent()\n    if ('https:\n        self.setProxy(protocol='https')\n    else:\n        self.setProxy(protocol='http')\n    if ('.onion' in url):\n        try:\n            pass\n        except:\n            pass\n        url = url.replace('.onion', '.onion.cab')\n    try:\n        recurso = self.br.open(url)\n    except:\n        return None\n    html = recurso.read()\n    return html", "docstring": "Public method to recover a resource.\n\nArgs:\n-----\nurl: The URL to be collected.\n\nReturns:\n--------\nReturns a resource that has to be read, for instance, with html = self.br.read()", "source": "codesearchnet"}
{"code": "def skip(self, count, name=None) -> 'DatasetV2':\n    from tensorflow.python.data.ops import skip_op\n    return skip_op._skip(self, count, name)", "docstring": "Creates a `Dataset` that skips `count` elements from this dataset.\n\n>>> dataset = tf.data.Dataset.range(10)\n>>> dataset = dataset.skip(7)\n>>> [a.item() for a in dataset.as_numpy_iterator()]\n[7, 8, 9]\n\nArgs:\ncount: A `tf.int64` scalar `tf.Tensor`, representing the number of\nelements of this dataset that should be skipped to form the new dataset.\nIf `count` is greater than the size of this dataset, the new dataset\nwill contain no elements.  If `count` is -1, skips the entire dataset.\nname: (Optional.) A name for the tf.data operation.\n\nReturns:\nA new `Dataset` with the transformation applied as described above.", "source": "github-repos"}
{"code": "def create(self, request, desc, files, public=False):\n        \n        request.data = json.dumps({\n                \"description\": desc,\n                \"public\": public,\n                \"files\": files,\n                })\n        return self.send(request).json()['html_url']", "docstring": "Creates a gist\n\nArguments:\nrequest: an initial request object\ndesc:    the gist description\nfiles:   a list of files to add to the gist\npublic:  a flag to indicate whether the gist is public or not\n\nReturns:\nThe URL to the newly created gist.", "source": "juraj-google-style"}
{"code": "def compute_centroid(points):\n    lats = [p[1] for p in points]\n    lons = [p[0] for p in points]\n    return Point(np.mean(lats), np.mean(lons), None)", "docstring": "Computes the centroid of set of points\n\nArgs:\npoints (:obj:`list` of :obj:`Point`)\nReturns:\n:obj:`Point`", "source": "codesearchnet"}
{"code": "def get_asn_verbose_dns(self, asn=None):\n        \n\n        if asn[0:2] != 'AS':\n\n            asn = 'AS{0}'.format(asn)\n\n        zone = '{0}.asn.cymru.com'.format(asn)\n\n        try:\n\n            log.debug('ASN verbose query for {0}'.format(zone))\n            data = self.dns_resolver.query(zone, 'TXT')\n            return str(data[0])\n\n        except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers,\n                dns.resolver.NoAnswer, dns.exception.Timeout) as e:\n\n            raise ASNLookupError(\n                'ASN lookup failed (DNS {0}) for {1}.'.format(\n                    e.__class__.__name__, asn)\n            )\n\n        except:  \n\n            raise ASNLookupError(\n                'ASN lookup failed for {0}.'.format(asn)\n            )", "docstring": "The function for retrieving the information for an ASN from\nCymru via port 53 (DNS). This is needed since IP to ASN mapping via\nCymru DNS does not return the ASN Description like Cymru Whois does.\n\nArgs:\nasn (:obj:`str`): The AS number (required).\n\nReturns:\nstr: The raw ASN data.\n\nRaises:\nASNLookupError: The ASN lookup failed.", "source": "juraj-google-style"}
{"code": "def _initialize_global_state(self, redis_address, redis_password=None, timeout=20):\n    self.redis_client = services.create_redis_client(redis_address, redis_password)\n    start_time = time.time()\n    num_redis_shards = None\n    redis_shard_addresses = []\n    while ((time.time() - start_time) < timeout):\n        num_redis_shards = self.redis_client.get('NumRedisShards')\n        if (num_redis_shards is None):\n            print('Waiting longer for NumRedisShards to be populated.')\n            time.sleep(1)\n            continue\n        num_redis_shards = int(num_redis_shards)\n        if (num_redis_shards < 1):\n            raise Exception('Expected at least one Redis shard, found {}.'.format(num_redis_shards))\n        redis_shard_addresses = self.redis_client.lrange('RedisShards', start=0, end=(- 1))\n        if (len(redis_shard_addresses) != num_redis_shards):\n            print('Waiting longer for RedisShards to be populated.')\n            time.sleep(1)\n            continue\n        break\n    if ((time.time() - start_time) >= timeout):\n        raise Exception('Timed out while attempting to initialize the global state. num_redis_shards = {}, redis_shard_addresses = {}'.format(num_redis_shards, redis_shard_addresses))\n    self.redis_clients = []\n    for shard_address in redis_shard_addresses:\n        self.redis_clients.append(services.create_redis_client(shard_address.decode(), redis_password))", "docstring": "Initialize the GlobalState object by connecting to Redis.\n\nIt's possible that certain keys in Redis may not have been fully\npopulated yet. In this case, we will retry this method until they have\nbeen populated or we exceed a timeout.\n\nArgs:\nredis_address: The Redis address to connect.\nredis_password: The password of the redis server.", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool=False, output_router_logits: bool=False) -> torch.Tensor:\n    residual = hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    hidden_states, attn_weights, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions)\n    hidden_states = self.attn_dropout(hidden_states)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.ff_layer_norm(hidden_states)\n    if self.is_sparse:\n        hidden_states, router_states = self.ffn(hidden_states, attention_mask)\n    else:\n        hidden_states, router_states = (self.ffn(hidden_states), None)\n    hidden_states = self.ff_dropout(hidden_states)\n    hidden_states = residual + hidden_states\n    if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()):\n        clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n        hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    if output_router_logits:\n        outputs += (router_states,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`):\ninput to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`):\nattention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very\nlarge negative values.\nlayer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size\n`(encoder_attention_heads,)`.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def _ReadRecordAttributeValueOffset(self, file_object, file_offset, number_of_attribute_values):\n    offsets_data_size = (number_of_attribute_values * 4)\n    offsets_data = file_object.read(offsets_data_size)\n    context = dtfabric_data_maps.DataTypeMapContext(values={'number_of_attribute_values': number_of_attribute_values})\n    data_type_map = self._GetDataTypeMap('keychain_record_attribute_value_offsets')\n    try:\n        attribute_value_offsets = self._ReadStructureFromByteStream(offsets_data, file_offset, data_type_map, context=context)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to map record attribute value offsets data at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))\n    return attribute_value_offsets", "docstring": "Reads the record attribute value offsets.\n\nArgs:\nfile_object (file): file-like object.\nfile_offset (int): offset of the record attribute values offsets relative\nto the start of the file.\nnumber_of_attribute_values (int): number of attribute values.\n\nReturns:\nkeychain_record_attribute_value_offsets: record attribute value offsets.\n\nRaises:\nParseError: if the record attribute value offsets cannot be read.", "source": "codesearchnet"}
{"code": "def get_events_for_subscription(access_token, subscription_id, start_timestamp):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/providers/microsoft.insights/eventtypes/management/values?api-version=',\n                        INSIGHTS_API, '&$filter=eventTimestamp ge \\'', start_timestamp, '\\''])\n    return do_get(endpoint, access_token)", "docstring": "Get the insights evens for a subsctipion since the specific timestamp.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nstart_timestamp (str): timestamp to get events from. E.g. '2017-05-01T00:00:00.0000000Z'.\nReturns:\nHTTP response. JSON body of insights events.", "source": "juraj-google-style"}
{"code": "def build_phenotype(phenotype_id, adapter):\n    \n    phenotype_obj = {}\n    phenotype = adapter.hpo_term(phenotype_id)\n    if phenotype:\n        phenotype_obj['phenotype_id'] = phenotype['hpo_id']\n        phenotype_obj['feature'] = phenotype['description']\n    return phenotype", "docstring": "Build a small phenotype object\n\nBuild a dictionary with phenotype_id and description\n\nArgs:\nphenotype_id (str): The phenotype id\nadapter (scout.adapter.MongoAdapter)\n\nReturns:\nphenotype_obj (dict):\n\ndict(\nphenotype_id = str,\nfeature = str, # description of phenotype\n)", "source": "juraj-google-style"}
{"code": "def get_properties(properties_file='raw.properties.json', env=None, region=None):\n    \n    with open(properties_file, 'rt') as file_handle:\n        properties = json.load(file_handle)\n\n    env_properties = properties.get(env, properties)\n    contents = env_properties.get(region, env_properties)\n    LOG.debug('Found properties for %s:\\n%s', env, contents)\n    return contents", "docstring": "Get contents of _properties_file_ for the _env_.\n\nArgs:\nproperties_file (str): File name of `create-configs` JSON output.\nenv (str): Environment to read optionally.\nregion (str): Region to get specific configs for.\n\nReturns:\ndict: JSON loaded Application properties for _env_.\nNone: Given _env_ was not found in `create-configs` JSON output.", "source": "juraj-google-style"}
{"code": "def synthesize(self, duration, tick_frequency):\n        \n        sr = self.samplerate.samples_per_second\n        \n        tick = np.random.uniform(low=-1., high=1., size=int(sr * .1))\n        tick *= np.linspace(1, 0, len(tick))\n        \n        samples = np.zeros(int(sr * (duration / Seconds(1))))\n        ticks_per_second = Seconds(1) / tick_frequency\n        \n        step = int(sr \n        for i in range(0, len(samples), step):\n            size = len(samples[i:i + len(tick)])\n            samples[i:i + len(tick)] += tick[:size]\n        return AudioSamples(samples, self.samplerate)", "docstring": "Synthesize periodic \"ticks\", generated from white noise and an envelope\n\nArgs:\nduration (numpy.timedelta64): The total duration of the sound to be\nsynthesized\ntick_frequency (numpy.timedelta64): The frequency of the ticking\nsound", "source": "juraj-google-style"}
{"code": "def _api_scrape(json_inp, ndx):\n    try:\n        headers = json_inp['resultSets'][ndx]['headers']\n        values = json_inp['resultSets'][ndx]['rowSet']\n    except KeyError:\n        try:\n            headers = json_inp['resultSet'][ndx]['headers']\n            values = json_inp['resultSet'][ndx]['rowSet']\n        except KeyError:\n            headers = json_inp['resultSet']['headers']\n            values = json_inp['resultSet']['rowSet']\n    if HAS_PANDAS:\n        return DataFrame(values, columns=headers)\n    else:\n        return [dict(zip(headers, value)) for value in values]", "docstring": "Internal method to streamline the getting of data from the json\n\nArgs:\njson_inp (json): json input from our caller\nndx (int): index where the data is located in the api\n\nReturns:\nIf pandas is present:\nDataFrame (pandas.DataFrame): data set from ndx within the\nAPI's json\nelse:\nA dictionary of both headers and values from the page", "source": "codesearchnet"}
{"code": "def _guess_format_from_extension(ext):\n    ext = ext.strip('.')\n    formats = []\n    for fmt in FILE_FORMATS:\n        if (ext in FILE_FORMATS[fmt]):\n            formats.append(fmt)\n    if ((formats == []) or (len(formats) > 1)):\n        return False\n    return formats[0]", "docstring": "Guess the appropriate data type from file extension.\n\nArguments:\next:        The file extension (period optional)\n\nReturns:\nString. The format (without leading period),\nor False if none was found or couldn't be guessed", "source": "codesearchnet"}
{"code": "def block_view(self, mri):\n    controller = self.get_controller(mri)\n    block = controller.block_view(weakref.proxy(self))\n    return block", "docstring": "Get a view of a block\n\nArgs:\nmri: The mri of the controller hosting the block\n\nReturns:\nBlock: The block we control", "source": "codesearchnet"}
{"code": "def get_model_details(self, model_name):\n    full_name = model_name\n    if (not model_name.startswith('projects/')):\n        full_name = ('projects/%s/models/%s' % (self._project_id, model_name))\n    return self._api.projects().models().get(name=full_name).execute()", "docstring": "Get details of the specified model from CloudML Service.\n\nArgs:\nmodel_name: the name of the model. It can be a model full name\n(\"projects/[project_id]/models/[model_name]\") or just [model_name].\nReturns: a dictionary of the model details.", "source": "codesearchnet"}
{"code": "def render_dictionary(data, headers=None):\n  \n  return IPython.core.display.HTML(_html.HtmlBuilder.render_table(data, headers))", "docstring": "Return a dictionary list formatted as a HTML table.\n\nArgs:\ndata: the dictionary list\nheaders: the keys in the dictionary to use as table columns, in order.", "source": "juraj-google-style"}
{"code": "def create_index(index_name, index_config, client):\n    client.create(index=index_name, body=index_config)", "docstring": "Creates an index with a given configuration\n\nArgs:\nindex_name (str): Name of the index you want to create\nindex_config (dict) configuration for the index\nclient (Elasticsearch.IndicesClient) the Elasticsearch client", "source": "codesearchnet"}
{"code": "def StartMergeTaskStorage(self, task):\n    if (self._storage_type != definitions.STORAGE_TYPE_SESSION):\n        raise IOError('Unsupported storage type.')\n    if (not self._merge_task_storage_path):\n        raise IOError('Missing merge task storage path.')\n    merge_storage_file_path = self._GetMergeTaskStorageFilePath(task)\n    if (not os.path.isfile(merge_storage_file_path)):\n        raise IOError('Merge task storage path is not a file.')\n    return self._CreateTaskStorageMergeReader(merge_storage_file_path)", "docstring": "Starts a merge of a task storage with the session storage.\n\nArgs:\ntask (Task): task.\n\nReturns:\nStorageMergeReader: storage merge reader of the task storage.\n\nRaises:\nIOError: if the storage file cannot be opened or\nif the storage type is not supported or\nif the temporary path for the task storage does not exist or\nif the temporary path for the task storage doe not refers to a file.\nOSError: if the storage file cannot be opened or\nif the storage type is not supported or\nif the temporary path for the task storage does not exist or\nif the temporary path for the task storage doe not refers to a file.", "source": "codesearchnet"}
{"code": "def post(cls, payload):\n        \n        if not isinstance(payload, dict):\n            raise ValueError(\"The 'payload' parameter must be provided a dictionary object.\")\n        payload = cls.set_id_in_fkeys(payload)\n        payload = cls.check_boolean_fields(payload)\n        payload = cls.add_model_name_to_payload(payload)\n        \n        payload = cls.prepost_hooks(payload)\n        cls.debug_logger.debug(\"POSTING payload {}\".format(json.dumps(payload, indent=4)))\n        res = requests.post(url=cls.URL, json=(payload), headers=HEADERS, verify=False)\n        cls.write_response_html_to_file(res,\"bob.html\")\n        if not res.ok:\n            cls.log_error(res.text)\n            res_json = res.json()\n            if \"exception\" in res_json:\n                exc_type = res_json[\"exception\"]\n                if exc_type == \"ActiveRecord::RecordNotUnique\":\n                    raise RecordNotUnique()\n        res.raise_for_status()\n        res = res.json()\n        cls.log_post(res)\n        cls.debug_logger.debug(\"Success\")\n        return res", "docstring": "Posts the data to the specified record.\n\nArgs:\npayload: `dict`. This will be JSON-formatted prior to sending the request.\n\nReturns:\n`dict`. The JSON formatted response.\n\nRaises:\n`Requests.exceptions.HTTPError`: The status code is not ok.\n`RecordNotUnique`: The Rails server returned the exception ActiveRecord::RecordNotUnique.", "source": "juraj-google-style"}
{"code": "def get_dimension_index(self, dimension):\n    if isinstance(dimension, int):\n        if ((dimension < (self.ndims + len(self.vdims))) or (dimension < len(self.dimensions()))):\n            return dimension\n        else:\n            return IndexError('Dimension index out of bounds')\n    dim = dimension_name(dimension)\n    try:\n        dimensions = (self.kdims + self.vdims)\n        return [i for (i, d) in enumerate(dimensions) if (d == dim)][0]\n    except IndexError:\n        raise Exception(('Dimension %s not found in %s.' % (dim, self.__class__.__name__)))", "docstring": "Get the index of the requested dimension.\n\nArgs:\ndimension: Dimension to look up by name or by index\n\nReturns:\nInteger index of the requested dimension", "source": "codesearchnet"}
{"code": "def process_subj_or_pred(self, component: Union[(URIRef, str)]) -> URIRef:\n    if ('http' in component):\n        prefix = self.find_prefix(component)\n        if prefix:\n            self.process_prefix(prefix)\n        return URIRef(component)\n    elif (':' in component):\n        (presumed_prefix, info) = component.split(':', 1)\n        namespace: Union[(Namespace, None)] = self.process_prefix(presumed_prefix)\n        if (not namespace):\n            exit((component + \": qname namespace does't exist yet.\"))\n        return namespace[info]\n    exit((component + ': is not a valid subject or predicate'))", "docstring": "Adds viable uri from iri or expands viable qname to iri to be triple ready\n\nNeed to have a viable qualified name (qname) in order to use a qname. You can make it\nviable by either add the namespace beforehand with add_namespace(s) or if its already\nin the local common_namespaces preloaded.\n\nArgs:\ncomponent: entity subject or predicate to be expanded or have its uri saved.\n\nReturns:\nrdflib URIRef ready subject or predicate to be put into a triple.\n\nRaises:\nSystemExit: When expecting a qname to be expanded, but is not valid or if\ncomponent is not a qualified name or a iri.", "source": "codesearchnet"}
{"code": "def read_analysis(self, file_handle):\n        \n        start = self.annotation['__header__']['analysis start']\n        end = self.annotation['__header__']['analysis end']\n        if start != 0 and end != 0:\n            file_handle.seek(start, 0)\n            self._analysis = file_handle.read(end - start)\n        else:\n            self._analysis = None", "docstring": "Read the ANALYSIS segment of the FCS file and store it in self.analysis.\n\nWarning: This has never been tested with an actual fcs file that contains an\nanalysis segment.\n\nArgs:\nfile_handle: buffer containing FCS data", "source": "juraj-google-style"}
{"code": "def initialize(self, map_arr, start_point_label='S', end_point_label='G', wall_label='\n    np.set_printoptions(threshold=np.inf)\n    self.__agent_label = agent_label\n    self.__map_arr = map_arr\n    self.__start_point_label = start_point_label\n    start_arr_tuple = np.where((self.__map_arr == self.__start_point_label))\n    (x_arr, y_arr) = start_arr_tuple\n    self.__start_point_tuple = (x_arr[0], y_arr[0])\n    end_arr_tuple = np.where((self.__map_arr == self.__end_point_label))\n    (x_arr, y_arr) = end_arr_tuple\n    self.__end_point_tuple = (x_arr[0], y_arr[0])\n    self.__wall_label = wall_label\n    for x in range(self.__map_arr.shape[1]):\n        for y in range(self.__map_arr.shape[0]):\n            if (((x, y) == self.__start_point_tuple) or ((x, y) == self.__end_point_tuple)):\n                continue\n            arr_value = self.__map_arr[y][x]\n            if (arr_value == self.__wall_label):\n                continue\n            self.save_r_df((x, y), float(arr_value))", "docstring": "Initialize map of maze and setup reward value.\n\nArgs:\nmap_arr:              Map. the 2d- `np.ndarray`.\nstart_point_label:    Label of start point.\nend_point_label:      Label of end point.\nwall_label:           Label of wall.\nagent_label:          Label of agent.", "source": "codesearchnet"}
{"code": "def _detect(self):\n    results = []\n    for contract in self.contracts:\n        shadows = self.detect_builtin_shadowing_definitions(contract)\n        if shadows:\n            for shadow in shadows:\n                shadow_type = shadow[0]\n                shadow_object = shadow[1]\n                local_variable_parent = shadow[2]\n                local_variable_path = (contract.name + '.')\n                if (local_variable_parent is not None):\n                    local_variable_path += (local_variable_parent.name + '.')\n                local_variable_path += shadow_object.name\n                info = '{} ({} @ {}) shadows built-in symbol \"{}\"\\n'.format(local_variable_path, shadow_type, shadow_object.source_mapping_str, shadow_object.name)\n                json = self.generate_json_result(info)\n                if (shadow_type in [self.SHADOWING_FUNCTION, self.SHADOWING_MODIFIER, self.SHADOWING_EVENT]):\n                    self.add_function_to_json(shadow_object, json)\n                elif (shadow_type in [self.SHADOWING_STATE_VARIABLE, self.SHADOWING_LOCAL_VARIABLE]):\n                    self.add_variable_to_json(shadow_object, json)\n                results.append(json)\n    return results", "docstring": "Detect shadowing of built-in symbols\n\nRecursively visit the calls\nReturns:\nlist: {'vuln', 'filename,'contract','func', 'shadow'}", "source": "codesearchnet"}
{"code": "def print_tensor(self, args, screen_info=None):\n    parsed = self._arg_parsers['print_tensor'].parse_args(args)\n    np_printoptions = cli_shared.numpy_printoptions_from_screen_info(screen_info)\n    highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges)\n    tensor_name, tensor_slicing = command_parser.parse_tensor_name_with_slicing(parsed.tensor_name)\n    node_name, output_slot = debug_graphs.parse_node_or_tensor_name(tensor_name)\n    if self._debug_dump.loaded_partition_graphs() and (not self._debug_dump.node_exists(node_name)):\n        output = cli_shared.error('Node \"%s\" does not exist in partition graphs' % node_name)\n        _add_main_menu(output, node_name=None, enable_list_tensors=True, enable_print_tensor=False)\n        return output\n    watch_keys = self._debug_dump.debug_watch_keys(node_name)\n    if output_slot is None:\n        output_slots = set()\n        for watch_key in watch_keys:\n            output_slots.add(int(watch_key.split(':')[1]))\n        if len(output_slots) == 1:\n            output_slot = list(output_slots)[0]\n        else:\n            lines = ['Node \"%s\" generated debug dumps from %s output slots:' % (node_name, len(output_slots)), 'Please specify the output slot: %s:x.' % node_name]\n            output = debugger_cli_common.RichTextLines(lines)\n            _add_main_menu(output, node_name=node_name, enable_list_tensors=True, enable_print_tensor=False)\n            return output\n    matching_data = []\n    for watch_key in watch_keys:\n        debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)\n        for datum in debug_tensor_data:\n            if datum.output_slot == output_slot:\n                matching_data.append(datum)\n    if not matching_data:\n        output = cli_shared.error('Tensor \"%s\" did not generate any dumps.' % parsed.tensor_name)\n    elif len(matching_data) == 1:\n        if parsed.number <= 0:\n            output = cli_shared.format_tensor(matching_data[0].get_tensor(), matching_data[0].watch_key, np_printoptions, print_all=parsed.print_all, tensor_slicing=tensor_slicing, highlight_options=highlight_options, include_numeric_summary=parsed.numeric_summary, write_path=parsed.write_path)\n        else:\n            output = cli_shared.error('Invalid number (%d) for tensor %s, which generated one dump.' % (parsed.number, parsed.tensor_name))\n        _add_main_menu(output, node_name=node_name, enable_print_tensor=False)\n    else:\n        if parsed.number < 0:\n            lines = ['Tensor \"%s\" generated %d dumps:' % (parsed.tensor_name, len(matching_data))]\n            font_attr_segs = {}\n            for i, datum in enumerate(matching_data):\n                rel_time = (datum.timestamp - self._debug_dump.t0) / 1000.0\n                lines.append('\n                command = 'print_tensor %s -n %d' % (parsed.tensor_name, i)\n                font_attr_segs[len(lines) - 1] = [(len(lines[-1]) - len(datum.watch_key), len(lines[-1]), debugger_cli_common.MenuItem(None, command))]\n            lines.append('')\n            lines.append('You can use the -n (--number) flag to specify which dump to print.')\n            lines.append('For example:')\n            lines.append('  print_tensor %s -n 0' % parsed.tensor_name)\n            output = debugger_cli_common.RichTextLines(lines, font_attr_segs=font_attr_segs)\n        elif parsed.number >= len(matching_data):\n            output = cli_shared.error('Specified number (%d) exceeds the number of available dumps (%d) for tensor %s' % (parsed.number, len(matching_data), parsed.tensor_name))\n        else:\n            output = cli_shared.format_tensor(matching_data[parsed.number].get_tensor(), matching_data[parsed.number].watch_key + ' (dump \n        _add_main_menu(output, node_name=node_name, enable_print_tensor=False)\n    return output", "docstring": "Command handler for print_tensor.\n\nPrint value of a given dumped tensor.\n\nArgs:\nargs: Command-line arguments, excluding the command prefix, as a list of\nstr.\nscreen_info: Optional dict input containing screen information such as\ncols.\n\nReturns:\nOutput text lines as a RichTextLines object.", "source": "github-repos"}
{"code": "def move_file(src, dest):\n    \n    try:\n        os.replace(src, dest)\n    except Exception as ex_replace:\n        logger.error(f\"error moving file {src} to \"\n                     f\"{dest}. {ex_replace}\")\n        raise", "docstring": "Move source file to destination.\n\nOverwrites dest.\n\nArgs:\nsrc: str or path-like. source file\ndest: str or path-like. destination file\n\nReturns:\nNone.\n\nRaises:\nFileNotFoundError: out path parent doesn't exist.\nOSError: if any IO operations go wrong.", "source": "juraj-google-style"}
{"code": "def load_pkl(filenames):\n    \n    if not isinstance(filenames, (list, tuple)):\n        filenames = [filenames]\n    times = []\n    for name in filenames:\n        name = str(name)\n        with open(name, 'rb') as file:\n            loaded_obj = pickle.load(file)\n            if not isinstance(loaded_obj, Times):\n                raise TypeError(\"At least one loaded object is not a Times data object.\")\n            times.append(loaded_obj)\n    return times if len(times) > 1 else times[0]", "docstring": "Unpickle file contents.\n\nArgs:\nfilenames (str): Can be one or a list or tuple of filenames to retrieve.\n\nReturns:\nTimes: A single object, or from a collection of filenames, a list of Times objects.\n\nRaises:\nTypeError: If any loaded object is not a Times object.", "source": "juraj-google-style"}
{"code": "def begin_operation(self, conn_or_internal_id, op_name, callback, timeout):\n    data = {'id': conn_or_internal_id, 'callback': callback, 'operation_name': op_name}\n    action = ConnectionAction('begin_operation', data, timeout=timeout, sync=False)\n    self._actions.put(action)", "docstring": "Begin an operation on a connection\n\nArgs:\nconn_or_internal_id (string, int): Either an integer connection id or a string\ninternal_id\nop_name (string): The name of the operation that we are starting (stored in\nthe connection's microstate)\ncallback (callable): Callback to call when this disconnection attempt either\nsucceeds or fails\ntimeout (float): How long to allow this connection attempt to proceed\nwithout timing it out (in seconds)", "source": "codesearchnet"}
{"code": "def individual(self, ind_id=None):\n        \n        for ind_obj in self.individual_objs:\n            if ind_obj.ind_id == ind_id:\n                return ind_obj\n        return None", "docstring": "Return a individual object\n\nArgs:\nind_id (str): A individual id\n\nReturns:\nindividual (puzzle.models.individual)", "source": "juraj-google-style"}
{"code": "def on(self, event_name, *args, **kwargs):\n        \n        def decorator(f):\n            self.add_event_handler(event_name, f, *args, **kwargs)\n            return f\n        return decorator", "docstring": "Decorator shortcut for add_event_handler.\n\nArgs:\nevent_name: An event to attach the handler to. Valid events are from :class:`~ignite.engine.Events` or\nany `event_name` added by :meth:`~ignite.engine.Engine.register_events`.\n*args: optional args to be passed to `handler`.\n**kwargs: optional keyword args to be passed to `handler`.", "source": "juraj-google-style"}
{"code": "def NCHW_VECT_CToNHWC(input_shape_or_tensor: Union[tensor_lib.Tensor, list[int]]) -> Union[tensor_lib.Tensor, list[int]]:\n    permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}\n    is_tensor = isinstance(input_shape_or_tensor, tensor_lib.Tensor)\n    input_shape: list[int] = input_shape_or_tensor.shape.as_list() if is_tensor else input_shape_or_tensor\n    if input_shape[-1] != 4:\n        raise ValueError('Last dimension of NCHW_VECT_C must be 4.')\n    permutation = permutations[len(input_shape)]\n    nhwc_shape = [input_shape[a] for a in permutation[:-1]]\n    nhwc_shape[-1] *= input_shape[-1]\n    if is_tensor:\n        t = array_ops.transpose(input_shape_or_tensor, permutation)\n        return array_ops.reshape(t, nhwc_shape)\n    else:\n        return nhwc_shape", "docstring": "Transforms the input from the NCHW_VECT_C layout to NHWC layout.\n\nNote: Does not include de-quantization or type conversion steps, which should\nbe applied beforehand.\n\nArgs:\ninput_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape\n\nReturns:\ntensor or shape array transformed into NHWC\n\nRaises:\nValueError: if last dimension of `input_shape_or_tensor` is not 4.", "source": "github-repos"}
{"code": "def enhance_function_signatures(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]:\n    \n\n    for func in spec_dict[\"functions\"][\"signatures\"]:\n        for i, sig in enumerate(spec_dict[\"functions\"][\"signatures\"][func][\"signatures\"]):\n            args = sig[\"arguments\"]\n            req_args = []\n            pos_args = []\n            opt_args = []\n            mult_args = []\n            for arg in args:\n                \n                if arg.get(\"multiple\", False):\n                    if arg[\"type\"] in [\"Function\", \"Modifier\"]:\n                        mult_args.extend(arg.get(\"values\", []))\n                    elif arg[\"type\"] in [\"StrArgNSArg\", \"NSArg\", \"StrArg\"]:\n                        \n                        mult_args.append(arg[\"type\"])\n\n                \n                elif arg.get(\"optional\", False) and arg.get(\"position\", False):\n                    if arg[\"type\"] in [\"Function\", \"Modifier\"]:\n                        pos_args.append(arg.get(\"values\", []))\n                    elif arg[\"type\"] in [\"StrArgNSArg\", \"NSArg\", \"StrArg\"]:\n                        pos_args.append(arg[\"type\"])\n\n                \n                elif arg.get(\"optional\", False):\n                    if arg[\"type\"] in [\"Function\", \"Modifier\"]:\n                        opt_args.extend(arg.get(\"values\", []))\n                    elif arg[\"type\"] in [\"StrArgNSArg\", \"NSArg\", \"StrArg\"]:\n                        opt_args.append(arg[\"type\"])\n\n                \n                else:\n                    if arg[\"type\"] in [\"Function\", \"Modifier\"]:\n                        req_args.append(arg.get(\"values\", []))\n                    elif arg[\"type\"] in [\"StrArgNSArg\", \"NSArg\", \"StrArg\"]:\n                        req_args.append(arg[\"type\"])\n\n            spec_dict[\"functions\"][\"signatures\"][func][\"signatures\"][i][\"req_args\"] = copy.deepcopy(\n                req_args\n            )\n            spec_dict[\"functions\"][\"signatures\"][func][\"signatures\"][i][\"pos_args\"] = copy.deepcopy(\n                pos_args\n            )\n            spec_dict[\"functions\"][\"signatures\"][func][\"signatures\"][i][\"opt_args\"] = copy.deepcopy(\n                opt_args\n            )\n            spec_dict[\"functions\"][\"signatures\"][func][\"signatures\"][i][\n                \"mult_args\"\n            ] = copy.deepcopy(mult_args)\n\n    return spec_dict", "docstring": "Enhance function signatures\n\nAdd required and optional objects to signatures objects for semantic validation\nsupport.\n\nArgs:\nspec_dict (Mapping[str, Any]): bel specification dictionary\n\nReturns:\nMapping[str, Any]: return enhanced bel specification dict", "source": "juraj-google-style"}
{"code": "def gfortran_search_path(library_dirs):\n    \n    cmd = (\"gfortran\", \"-print-search-dirs\")\n    process = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n    return_code = process.wait()\n    \n    if return_code != 0:\n        return library_dirs\n\n    cmd_output = process.stdout.read().decode(\"utf-8\")\n    \n    search_lines = cmd_output.strip().split(\"\\n\")\n    library_lines = [\n        line[len(FORTRAN_LIBRARY_PREFIX) :]\n        for line in search_lines\n        if line.startswith(FORTRAN_LIBRARY_PREFIX)\n    ]\n    if len(library_lines) != 1:\n        msg = GFORTRAN_MISSING_LIBS.format(cmd_output)\n        print(msg, file=sys.stderr)\n        return library_dirs\n\n    \n    library_line = library_lines[0]\n    accepted = set(library_dirs)\n    for part in library_line.split(os.pathsep):\n        full_path = os.path.abspath(part.strip())\n        if os.path.isdir(full_path):\n            accepted.add(full_path)\n        else:\n            \n            msg = GFORTRAN_BAD_PATH.format(full_path)\n            print(msg, file=sys.stderr)\n    return sorted(accepted)", "docstring": "Get the library directory paths for ``gfortran``.\n\nLooks for ``libraries: =`` in the output of ``gfortran -print-search-dirs``\nand then parses the paths. If this fails for any reason, this method will\nprint an error and return ``library_dirs``.\n\nArgs:\nlibrary_dirs (List[str]): Existing library directories.\n\nReturns:\nList[str]: The library directories for ``gfortran``.", "source": "juraj-google-style"}
{"code": "def copy(source, destination):\n    \n    if os.path.isdir(source):\n        return __copytree(source, destination)\n    else:\n        return __copyfile2(source, destination)", "docstring": "Copy file or directory.\n\nArgs:\nsource (str): Source file or directory\ndestination (str): Destination file or directory (where to copy).\n\nReturns:\nbool: True if the operation is successful, False otherwise.", "source": "juraj-google-style"}
{"code": "def _check_version(self, root):\n        \n        version = self._get_version(root)\n        supported = [StrictVersion(x) for x in\n                     self.supported_versions(root.tag)]\n\n        if version in supported:\n            return\n\n        error = \"Document version ({0}) not in supported versions ({1})\"\n        raise UnsupportedVersionError(\n            message=error.format(version, supported),\n            expected=supported,\n            found=version\n        )", "docstring": "Ensure the root element is a supported version.\n\nArgs:\nroot (etree.Element)\n\nRaises:\nUnsupportedVersionError", "source": "juraj-google-style"}
{"code": "def validate_seeded_answers(answers, options, algo):\n    if (algo['name'] == 'simple'):\n        return validate_seeded_answers_simple(answers, options, algo)\n    elif (algo['name'] == 'random'):\n        return validate_seeded_answers_random(answers)\n    else:\n        raise UnknownChooseAnswerAlgorithm()", "docstring": "Validate answers based on selection algorithm\n\nThis is called when instructor setup the tool and providing seeded answers to the question.\nThis function is trying to validate if instructor provided enough seeds for a give algorithm.\ne.g. we require 1 seed for each option in simple algorithm and at least 1 seed for random\nalgorithm. Because otherwise, the first student won't be able to see the answers on the\nsecond step where he/she suppose to compare and review other students answers.\n\nArgs:\nanswers (list): list of dict that contain seeded answers\noptions (dict): all options that should exist in the answers\nalgo (str): selection algorithm\n\nReturns:\nNone if successful, otherwise error message", "source": "codesearchnet"}
{"code": "def run(self, steps=None):\n    try:\n        while (self.instruction_pointer < len(self.code)):\n            self.step()\n            if (steps is not None):\n                steps -= 1\n                if (steps == 0):\n                    break\n    except StopIteration:\n        pass\n    except EOFError:\n        pass\n    return self", "docstring": "Run threaded code in machine.\n\nArgs:\nsteps: If specified, run that many number of instructions before\nstopping.", "source": "codesearchnet"}
{"code": "def is_displayed(target):\n    \n    is_displayed = getattr(target, 'is_displayed', None)\n    if not is_displayed or not callable(is_displayed):\n        raise TypeError('Target has no attribute \\'is_displayed\\' or not callable')\n    if not is_displayed():\n        raise WebDriverException('element not visible')", "docstring": "Assert whether the target is displayed\n\nArgs:\ntarget(WebElement): WebElement Object.\n\nReturns:\nReturn True if the element is displayed or return False otherwise.", "source": "juraj-google-style"}
{"code": "def replace_variables(self, text):\n    variables = {'python-executable': str(((self._venv_path / 'bin') / 'python'))}\n    return text.format(**variables)", "docstring": "Replace variable placeholders in `text` with values from the virtual env.\n\nThe variables are:\n- {python-executable}\n\nArgs:\ntext: The text to do replacment int.\n\nReturns: The text after replacement.", "source": "codesearchnet"}
{"code": "def askInitial():\n    return inquirer.prompt([inquirer.Text('inputPath', message=\"What's the path of your input file (eg input.csv)\"), inquirer.List('year', message='What year are you in', choices=[1, 2, 3, 4]), inquirer.Checkbox('whatToDo', message='What can I do for you (select with your spacebar)', choices=['Get your weighted average', 'Get your rank in the year', 'Reformat results by module and output to csv', 'Plot the results by module'])])", "docstring": "Asks the user for what it wants the script to do\n\nReturns:\n[dictionary] -- answers to the questions", "source": "codesearchnet"}
{"code": "def _create_dir_path(self, file_hash, path=None, hash_list=None):\n        \n        \n        if hash_list is None:\n            hash_list = list(file_hash)\n\n        if not hash_list:\n            raise IOError(\"Directory structure is too full!\")\n\n        \n        if not path:\n            path = os.path.join(\n                self.path,\n                hash_list.pop(0)\n            )\n\n        \n        if not os.path.exists(path):\n            os.mkdir(path)\n            return self._create_dir_path(\n                file_hash=file_hash,\n                path=path,\n                hash_list=hash_list\n            )\n\n        files = os.listdir(path)\n\n        \n        if file_hash in files:\n            return path\n\n        \n        if len(files) < self.dir_limit:\n            return path\n\n        \n        return self._create_dir_path(\n            file_hash=file_hash,\n            path=os.path.join(path, hash_list.pop(0)),\n            hash_list=hash_list\n        )", "docstring": "Create proper filesystem paths for given `file_hash`.\n\nArgs:\nfile_hash (str): Hash of the file for which the path should be\ncreated.\npath (str, default None): Recursion argument, don't set this.\nhash_list (list, default None): Recursion argument, don't set this.\n\nReturns:\nstr: Created path.", "source": "juraj-google-style"}
{"code": "def search(pattern):\n\n    def match(napp):\n        'Whether a NApp metadata matches the pattern.'\n        username = napp.get('username', napp.get('author'))\n        strings = (['{}/{}'.format(username, napp.get('name')), napp.get('description')] + napp.get('tags'))\n        return any((pattern.match(string) for string in strings))\n    napps = NAppsClient().get_napps()\n    return [napp for napp in napps if match(napp)]", "docstring": "Search all server NApps matching pattern.\n\nArgs:\npattern (str): Python regular expression.", "source": "codesearchnet"}
{"code": "def fetched_records(self, max_records=None):\n    if (max_records is None):\n        max_records = self.config['max_poll_records']\n    assert (max_records > 0)\n    drained = collections.defaultdict(list)\n    records_remaining = max_records\n    while (records_remaining > 0):\n        if (not self._next_partition_records):\n            if (not self._completed_fetches):\n                break\n            completion = self._completed_fetches.popleft()\n            self._next_partition_records = self._parse_fetched_data(completion)\n        else:\n            records_remaining -= self._append(drained, self._next_partition_records, records_remaining)\n    return (dict(drained), bool(self._completed_fetches))", "docstring": "Returns previously fetched records and updates consumed offsets.\n\nArguments:\nmax_records (int): Maximum number of records returned. Defaults\nto max_poll_records configuration.\n\nRaises:\nOffsetOutOfRangeError: if no subscription offset_reset_strategy\nCorruptRecordException: if message crc validation fails (check_crcs\nmust be set to True)\nRecordTooLargeError: if a message is larger than the currently\nconfigured max_partition_fetch_bytes\nTopicAuthorizationError: if consumer is not authorized to fetch\nmessages from the topic\n\nReturns: (records (dict), partial (bool))\nrecords: {TopicPartition: [messages]}\npartial: True if records returned did not fully drain any pending\npartition requests. This may be useful for choosing when to\npipeline additional fetch requests.", "source": "codesearchnet"}
{"code": "def print_search_results(self, search_results, buf=sys.stdout):\n        \n        formatted_lines = self.format_search_results(search_results)\n        pr = Printer(buf)\n\n        for txt, style in formatted_lines:\n            pr(txt, style)", "docstring": "Print formatted search results.\n\nArgs:\nsearch_results (list of `ResourceSearchResult`): Search to format.", "source": "juraj-google-style"}
{"code": "def _is_magical_field(self, model_instance, field, is_insert: bool):\n    old_value = getattr(model_instance, field.name, None)\n    field.pre_save(model_instance, is_insert)\n    new_value = getattr(model_instance, field.name, None)\n    return (old_value != new_value)", "docstring": "Verifies whether this field is gonna modify something\non its own.\n\n\"Magical\" means that a field modifies the field value\nduring the pre_save.\n\nArguments:\nmodel_instance:\nThe model instance the field is defined on.\n\nfield:\nThe field to get of whether the field is\nmagical.\n\nis_insert:\nPretend whether this is an insert?\n\nReturns:\nTrue when this field modifies something.", "source": "codesearchnet"}
{"code": "def delete(self, domain, type_name, search_command):\n        \n        return self._request(domain, type_name, search_command, 'DELETE', None)", "docstring": "Delete entry in ThreatConnect Data Store\n\nArgs:\ndomain (string): One of 'local', 'organization', or 'system'.\ntype_name (string): This is a free form index type name. The ThreatConnect API will use\nthis resource verbatim.\nsearch_command (string): Search command to pass to ES.", "source": "juraj-google-style"}
{"code": "def get_users(self, capacity=None):\n        \n        \n        users = list()\n        usersdicts = self.data.get('users')\n        if usersdicts is not None:\n            for userdata in usersdicts:\n                if capacity is not None and userdata['capacity'] != capacity:\n                    continue\n                id = userdata.get('id')\n                if id is None:\n                    id = userdata['name']\n                user = hdx.data.user.User.read_from_hdx(id, configuration=self.configuration)\n                user['capacity'] = userdata['capacity']\n                users.append(user)\n        return users", "docstring": "Returns the organization's users.\n\nArgs:\ncapacity (Optional[str]): Filter by capacity eg. member, admin. Defaults to None.\nReturns:\nList[User]: Organization's users.", "source": "juraj-google-style"}
{"code": "def delete_keyvault(access_token, subscription_id, rgname, vault_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourcegroups/', rgname,\n                        '/providers/Microsoft.KeyVault/vaults/', vault_name,\n                        '?api-version=', KEYVAULT_API])\n    return do_delete(endpoint, access_token)", "docstring": "Deletes a key vault in the named resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\nvault_name (str): Name of the new key vault.\n\nReturns:\nHTTP response. 200 OK.", "source": "juraj-google-style"}
{"code": "async def setvolume(self, value):\n    self.logger.debug('volume command')\n    if (self.state != 'ready'):\n        return\n    logger.debug('Volume command received')\n    if (value == '+'):\n        if (self.volume < 100):\n            self.statuslog.debug('Volume up')\n            self.volume = ((10 * (self.volume \n            self.volumelog.info(str(self.volume))\n            try:\n                self.streamer.volume = (self.volume / 100)\n            except AttributeError:\n                pass\n        else:\n            self.statuslog.warning('Already at maximum volume')\n    elif (value == '-'):\n        if (self.volume > 0):\n            self.statuslog.debug('Volume down')\n            self.volume = ((10 * ((self.volume + 9) \n            self.volumelog.info(str(self.volume))\n            try:\n                self.streamer.volume = (self.volume / 100)\n            except AttributeError:\n                pass\n        else:\n            self.statuslog.warning('Already at minimum volume')\n    else:\n        try:\n            value = int(value)\n        except ValueError:\n            self.statuslog.error('Volume argument must be +, -, or a %')\n        else:\n            if (0 <= value <= 200):\n                self.statuslog.debug('Setting volume')\n                self.volume = value\n                self.volumelog.info(str(self.volume))\n                try:\n                    self.streamer.volume = (self.volume / 100)\n                except AttributeError:\n                    pass\n            else:\n                self.statuslog.error('Volume must be between 0 and 200')\n    self.write_volume()", "docstring": "The volume command\n\nArgs:\nvalue (str): The value to set the volume to", "source": "codesearchnet"}
{"code": "def archive(self, output_path):\n        \n\n        if self.path is None:\n            raise ArgumentError(\"Cannot archive a recipe yet without a reference to its original yaml file in self.path\")\n\n        outfile = zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED)\n\n        outfile.write(self.path, arcname=\"recipe_script.yaml\")\n\n        written_files = set()\n\n        for _factory, args, _resources, files in self.steps:\n            for arg_name in files:\n                file_path = args[arg_name]\n\n                if file_path in written_files:\n                    continue\n\n                if os.path.basename(file_path) != file_path:\n                    raise ArgumentError(\"Cannot archive a recipe yet that references file not in the same directory as the recipe\")\n\n                full_path = os.path.join(os.path.dirname(self.path), file_path)\n                outfile.write(full_path, arcname=file_path)\n                written_files.add(file_path)", "docstring": "Archive this recipe and all associated files into a .ship archive.\n\nArgs:\noutput_path (str): The path where the .ship file should be saved.", "source": "juraj-google-style"}
{"code": "def ReadSystemConfigurationArtifact(\n      self, system_configuration, session_identifier=CURRENT_SESSION):\n    \n    if system_configuration.code_page:\n      try:\n        self.SetCodepage(system_configuration.code_page)\n      except ValueError:\n        logger.warning(\n            'Unsupported codepage: {0:s}, defaulting to {1:s}'.format(\n                system_configuration.code_page, self._codepage))\n\n    self._hostnames[session_identifier] = system_configuration.hostname\n\n    self.SetValue('keyboard_layout', system_configuration.keyboard_layout)\n\n    self.SetValue('operating_system', system_configuration.operating_system)\n    self.SetValue(\n        'operating_system_product',\n        system_configuration.operating_system_product)\n    self.SetValue(\n        'operating_system_version',\n        system_configuration.operating_system_version)\n\n    if system_configuration.time_zone:\n      try:\n        self.SetTimeZone(system_configuration.time_zone)\n      except ValueError:\n        logger.warning(\n            'Unsupported time zone: {0:s}, defaulting to {1:s}'.format(\n                system_configuration.time_zone, self.timezone.zone))\n\n    self._user_accounts[session_identifier] = {\n        user_account.username: user_account\n        for user_account in system_configuration.user_accounts}", "docstring": "Reads the knowledge base values from a system configuration artifact.\n\nNote that this overwrites existing values in the knowledge base.\n\nArgs:\nsystem_configuration (SystemConfigurationArtifact): system configuration\nartifact.\nsession_identifier (Optional[str])): session identifier, where\nCURRENT_SESSION represents the active session.", "source": "juraj-google-style"}
{"code": "def populate_ast_nsarg_defaults(ast, belast, species_id=None):\n    \n\n    if isinstance(ast, NSArg):\n        given_term_id = \"{}:{}\".format(ast.namespace, ast.value)\n\n        r = bel.terms.terms.get_normalized_terms(given_term_id)\n        ast.canonical = r[\"canonical\"]\n        ast.decanonical = r[\"decanonical\"]\n\n        r = bel.terms.terms.get_terms(ast.canonical)\n\n        if len(r) > 0:\n            ast.species_id = r[0].get(\"species_id\", False)\n            ast.species_label = r[0].get(\"species_label\", False)\n\n        \n        \n        \n        if ast.species_id and species_id is None:\n            species_id = ast.species_id\n            belast.species.add((ast.species_id, ast.species_label))\n\n        elif ast.species_id and species_id and species_id != ast.species_id:\n            belast.species_id = False\n            belast.species_label = False\n\n    \n    if hasattr(ast, \"args\"):\n        for arg in ast.args:\n            populate_ast_nsarg_defaults(arg, belast, species_id)\n\n    return ast", "docstring": "Recursively populate NSArg AST entries for default (de)canonical values\n\nThis was added specifically for the BEL Pipeline. It is designed to\nrun directly against ArangoDB and not through the BELAPI.\n\nArgs:\nast (BEL): BEL AST\n\nReturns:\nBEL: BEL AST", "source": "juraj-google-style"}
{"code": "def update_load_balancer(access_token, subscription_id, resource_group, lb_name, body):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Network/loadBalancers/', lb_name,\n                        '?api-version=', NETWORK_API])\n    return do_put(endpoint, body, access_token)", "docstring": "Updates a load balancer model, i.e. PUT an updated LB body.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nlb_name (str): Name of the new load balancer.\nbody (str): JSON body of an updated load balancer.\n\nReturns:\nHTTP response. Load Balancer JSON body.", "source": "juraj-google-style"}
{"code": "def from_dict(cls, d):\n        \n        sites = [Site.from_dict(sd) for sd in d[\"sites\"]]\n        charge = d.get(\"charge\", 0)\n        spin_multiplicity = d.get(\"spin_multiplicity\")\n        return cls.from_sites(sites, charge=charge, spin_multiplicity=spin_multiplicity)", "docstring": "Reconstitute a Molecule object from a dict representation created using\nas_dict().\n\nArgs:\nd (dict): dict representation of Molecule.\n\nReturns:\nMolecule object", "source": "juraj-google-style"}
{"code": "def configure(self, sbi_config: str):\n        \n        \n        config_dict = json.loads(sbi_config)\n        self.debug_stream('SBI configuration:\\n%s',\n                          json.dumps(config_dict, indent=2))\n        try:\n            sbi = Subarray(self.get_name()).configure_sbi(config_dict)\n        except jsonschema.exceptions.ValidationError as error:\n            return json.dumps(dict(path=error.absolute_path.__str__(),\n                                   schema_path=error.schema_path.__str__(),\n                                   message=error.message), indent=2)\n        except RuntimeError as error:\n            return json.dumps(dict(error=str(error)), indent=2)\n        return 'Accepted SBI: {}'.format(sbi.id)", "docstring": "Configure an SBI for this subarray.\n\nArgs:\nsbi_config (str): SBI configuration JSON\n\nReturns:\nstr,", "source": "juraj-google-style"}
{"code": "def sanger_variants(self, institute_id=None, case_id=None):\n    query = {'validation': {'$exists': True}}\n    if institute_id:\n        query['institute_id'] = institute_id\n    if case_id:\n        query['case_id'] = case_id\n    return self.variant_collection.find(query)", "docstring": "Return all variants with sanger information\n\nArgs:\ninstitute_id(str)\ncase_id(str)\n\nReturns:\nres(pymongo.Cursor): A Cursor with all variants with sanger activity", "source": "codesearchnet"}
{"code": "def bessel_k0(x, name=None):\n    with ops.name_scope(name, 'bessel_k0', [x]):\n        return gen_special_math_ops.bessel_k0(x)", "docstring": "Computes the Bessel k0 function of `x` element-wise.\n\nModified Bessel function of order 0.\n\nIt is preferable to use the numerically stabler function `k0e(x)` instead.\n\n>>> tf.math.special.bessel_k0([0.5, 1., 2., 4.]).numpy()\narray([0.92441907, 0.42102444, 0.11389387, 0.01115968], dtype=float32)\n\nArgs:\nx: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n`float32`, `float64`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.k0\n@end_compatibility", "source": "github-repos"}
{"code": "def GetEntries(self, parser_mediator, match=None, **unused_kwargs):\n    \n    device_cache = match.get('DeviceCache', {})\n    for device, value in iter(device_cache.items()):\n      name = value.get('Name', '')\n      if name:\n        name = ''.join(('Name:', name))\n\n      event_data = plist_event.PlistTimeEventData()\n      event_data.root = '/DeviceCache'\n\n      datetime_value = value.get('LastInquiryUpdate', None)\n      if datetime_value:\n        event_data.desc = ' '.join(\n            filter(None, ('Bluetooth Discovery', name)))\n        event_data.key = '{0:s}/LastInquiryUpdate'.format(device)\n\n        event = time_events.PythonDatetimeEvent(\n            datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n        if device in match.get('PairedDevices', []):\n          event_data.desc = 'Paired:True {0:s}'.format(name)\n          event_data.key = device\n\n          event = time_events.PythonDatetimeEvent(\n              datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n          parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      datetime_value = value.get('LastNameUpdate', None)\n      if datetime_value:\n        event_data.desc = ' '.join(filter(None, ('Device Name Set', name)))\n        event_data.key = '{0:s}/LastNameUpdate'.format(device)\n\n        event = time_events.PythonDatetimeEvent(\n            datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      datetime_value = value.get('LastServicesUpdate', None)\n      if datetime_value:\n        event_data.desc = ' '.join(filter(None, ('Services Updated', name)))\n        event_data.key = '{0:s}/LastServicesUpdate'.format(device)\n\n        event = time_events.PythonDatetimeEvent(\n            datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts relevant BT entries.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmatch (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.", "source": "juraj-google-style"}
{"code": "def _call_unittest_assertion(assertion_method, *args, msg=None, extras=None, **kwargs):\n    my_msg = None\n    try:\n        assertion_method(*args, **kwargs)\n    except AssertionError as e:\n        my_msg = str(e)\n        if msg:\n            my_msg = f'{my_msg} {msg}'\n    if my_msg is not None:\n        raise signals.TestFailure(my_msg, extras=extras)", "docstring": "Wrapper for converting a unittest assertion into a Mobly one.\n\nArgs:\nassertion_method: unittest.TestCase assertion method to call.\n*args: Positional arguments for the assertion call.\nmsg: A string that adds additional info about the failure.\nextras: An optional field for extra information to be included in\ntest result.\n**kwargs: Keyword arguments for the assertion call.", "source": "github-repos"}
{"code": "def noisy_identity_kernel_initializer(base_num_channels, stddev=1e-08):\n\n    def _noisy_identity_kernel_initializer(shape, dtype=tf.float32, partition_info=None):\n        'Constructs a noisy identity kernel.\\n\\n    Args:\\n      shape: List of integers. Represents shape of result.\\n      dtype: data type for values in result.\\n      partition_info: Partition information for initializer functions. Ignored.\\n\\n    Returns:\\n      Tensor of desired shape and dtype such that applying it as a convolution\\n        kernel results in a noisy near-identity operation.\\n\\n    Raises:\\n      ValueError: If shape does not define a valid kernel.\\n                  If filter width and height differ.\\n                  If filter width and height are not odd numbers.\\n                  If number of input and output channels are not multiples of\\n                    base_num_channels.\\n    '\n        if (len(shape) != 4):\n            raise ValueError('Convolution kernels must be rank 4.')\n        (filter_height, filter_width, in_channels, out_channels) = shape\n        if (filter_width != filter_height):\n            raise ValueError('Noisy identity initializer only works for square filters.')\n        if ((filter_width % 2) != 1):\n            raise ValueError('Noisy identity initializer requires filters have odd height and width.')\n        if (((in_channels % base_num_channels) != 0) or ((out_channels % base_num_channels) != 0)):\n            raise ValueError('in_channels and out_channels must both be multiples of base_num_channels.')\n        middle_pixel = (filter_height \n        is_middle_pixel = tf.logical_and(tf.equal(_range_along_dimension(0, shape), middle_pixel), tf.equal(_range_along_dimension(1, shape), middle_pixel))\n        is_same_channel_multiple = tf.equal(tf.floordiv((_range_along_dimension(2, shape) * base_num_channels), in_channels), tf.floordiv((_range_along_dimension(3, shape) * base_num_channels), out_channels))\n        noise = tf.truncated_normal(shape, stddev=stddev, dtype=dtype)\n        return tf.where(tf.logical_and(is_same_channel_multiple, is_middle_pixel), (tf.ones(shape, dtype=dtype) * (base_num_channels / out_channels)), noise)\n    return _noisy_identity_kernel_initializer", "docstring": "Build an initializer for constructing near-identity convolution kernels.\n\nConstruct a convolution kernel where in_channels and out_channels are\nmultiples of base_num_channels, but need not be equal. This initializer is\nessentially the same as identity_kernel_initializer, except that magnitude\nis \"spread out\" across multiple copies of the input.\n\nArgs:\nbase_num_channels: int. Number that divides both in_channels and\nout_channels.\nstddev: float. Standard deviation of truncated normal noise added to\noff-entries to break ties.\n\nReturns:\nInitializer function for building a noisy identity kernel.", "source": "codesearchnet"}
{"code": "def _CheckIsLink(self, file_entry):\n    if (definitions.FILE_ENTRY_TYPE_LINK not in self._file_entry_types):\n        return False\n    return file_entry.IsLink()", "docstring": "Checks the is_link find specification.\n\nArgs:\nfile_entry (FileEntry): file entry.\n\nReturns:\nbool: True if the file entry matches the find specification, False if not.", "source": "codesearchnet"}
{"code": "def _other_wrapper(self, name, writing):\n        \n        io_attr = getattr(self._io, name)\n\n        def other_wrapper(*args, **kwargs):\n            \n            write_seek = self._io.tell()\n            ret_value = io_attr(*args, **kwargs)\n            if write_seek != self._io.tell():\n                self._read_seek = self._io.tell()\n                self._read_whence = 0\n            if not writing or not IS_PY2:\n                return ret_value\n\n        return other_wrapper", "docstring": "Wrap a stream attribute in an other_wrapper.\n\nArgs:\nname: the name of the stream attribute to wrap.\n\nReturns:\nother_wrapper which is described below.", "source": "juraj-google-style"}
{"code": "def get_pkg_module_names(package_path):\n    module_names = set()\n    for (fobj, modname, _) in pkgutil.iter_modules(path=[package_path]):\n        filename = os.path.join(fobj.path, ('%s.py' % modname))\n        if os.path.exists(filename):\n            module_names.add(os.path.abspath(filename))\n    return module_names", "docstring": "Returns module filenames from package.\n\nArgs:\npackage_path: Path to Python package.\nReturns:\nA set of module filenames.", "source": "codesearchnet"}
{"code": "def _process_contains_filter_directive(filter_operation_info, location, context, parameters):\n    filtered_field_type = filter_operation_info.field_type\n    filtered_field_name = filter_operation_info.field_name\n    base_field_type = strip_non_null_from_type(filtered_field_type)\n    if (not isinstance(base_field_type, GraphQLList)):\n        raise GraphQLCompilationError(u'Cannot apply \"contains\" to non-list type {}'.format(filtered_field_type))\n    argument_inferred_type = strip_non_null_from_type(base_field_type.of_type)\n    (argument_expression, non_existence_expression) = _represent_argument(location, context, parameters[0], argument_inferred_type)\n    filter_predicate = expressions.BinaryComposition(u'contains', expressions.LocalField(filtered_field_name), argument_expression)\n    if (non_existence_expression is not None):\n        filter_predicate = expressions.BinaryComposition(u'||', non_existence_expression, filter_predicate)\n    return blocks.Filter(filter_predicate)", "docstring": "Return a Filter basic block that checks if the directive arg is contained in the field.\n\nArgs:\nfilter_operation_info: FilterOperationInfo object, containing the directive and field info\nof the field where the filter is to be applied.\nlocation: Location where this filter is used.\ncontext: dict, various per-compilation data (e.g. declared tags, whether the current block\nis optional, etc.). May be mutated in-place in this function!\nparameters: list of 1 element, specifying the collection in which the value must exist;\nif the collection is optional and missing, the check will return True\n\nReturns:\na Filter basic block that performs the contains check", "source": "codesearchnet"}
{"code": "def _FormatIPCPermToken(self, token_data):\n    return {'user_id': token_data.user_identifier, 'group_id': token_data.group_identifier, 'creator_user_id': token_data.creator_user_identifier, 'creator_group_id': token_data.creator_group_identifier, 'access': token_data.access_mode}", "docstring": "Formats an IPC permissions token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_ipc_perm): AUT_IPC_PERM token data.\n\nReturns:\ndict[str, str]: token values.", "source": "codesearchnet"}
{"code": "def __init__(self, parent):\n        \n\n        super(ModuleFrame, self).__init__(parent)\n        logger.debug(\"Initialising module tabs\")\n\n        \n        style = ttk.Style()\n        style.configure(\"Module.TFrame\", background=\"white\")\n\n        self.module_buttons = {}\n        self.current_button = None\n\n        \n        self.module_list = ttk.Frame(self, width=150, style=\"Module.TFrame\")\n        self.module_list.grid(column=0, row=0, padx=0, pady=0, sticky=\"W E N S\")\n        self.module_list.columnconfigure(0, weight=1)\n        self.module_list.rowconfigure(0, weight=0)\n        self.module_list.rowconfigure(1, weight=1)\n        \n        header = tk.Label(self.module_list, text=\"Modules\", bg=\"white\", fg=\"\n        header.grid(column=0, row=0, padx=0, pady=0, sticky=\"W E N\")\n        \n        self.module_selection = ttk.Frame(self.module_list, style=\"Module.TFrame\")\n        self.module_selection.grid(column=0, row=1, padx=0, pady=0, sticky=\"W E N S\")\n        self.module_selection.columnconfigure(0, weight=1)\n        \n        self.module_ui = ttk.Frame(self)\n        self.module_ui.grid(column=1, row=0, padx=0, pady=0, sticky=\"W E N S\")\n        self.module_ui.columnconfigure(0, weight=1)\n        self.module_ui.rowconfigure(0, weight=1)\n\n        self.clear_modules()\n\n        \n        self.columnconfigure(0, minsize=150)\n        self.columnconfigure(1, weight=1)\n        self.rowconfigure(0, weight=1)", "docstring": "Create a new module frame and add it to the given parent.\n\nArgs:\nparent: A tk or ttk object", "source": "juraj-google-style"}
{"code": "def call(self, inputs):\n    \n    del inputs  \n    with tf.compat.v1.name_scope(self._name):\n      return tfd.MultivariateNormalDiag(self.loc, self.scale_diag)", "docstring": "Runs the model to generate multivariate normal distribution.\n\nArgs:\ninputs: Unused.\n\nReturns:\nA MultivariateNormalDiag distribution with event shape\n[dimensions], batch shape [], and sample shape [sample_shape,\ndimensions].", "source": "juraj-google-style"}
{"code": "def __init__(self, rot_mats: Optional[torch.Tensor]=None, quats: Optional[torch.Tensor]=None, normalize_quats: bool=True):\n    if rot_mats is None and quats is None or (rot_mats is not None and quats is not None):\n        raise ValueError('Exactly one input argument must be specified')\n    if rot_mats is not None and rot_mats.shape[-2:] != (3, 3) or (quats is not None and quats.shape[-1] != 4):\n        raise ValueError('Incorrectly shaped rotation matrix or quaternion')\n    if quats is not None:\n        quats = quats.to(dtype=torch.float32)\n    if rot_mats is not None:\n        rot_mats = rot_mats.to(dtype=torch.float32)\n    if quats is not None and normalize_quats:\n        quats = quats / torch.linalg.norm(quats, dim=-1, keepdim=True)\n    self._rot_mats = rot_mats\n    self._quats = quats", "docstring": "Args:\nrot_mats:\nA [*, 3, 3] rotation matrix tensor. Mutually exclusive with quats\nquats:\nA [*, 4] quaternion. Mutually exclusive with rot_mats. If normalize_quats is not True, must be a unit\nquaternion\nnormalize_quats:\nIf quats is specified, whether to normalize quats", "source": "github-repos"}
{"code": "def reaction_charge(reaction, compound_charge):\n    \n\n    charge_sum = 0.0\n    for compound, value in reaction.compounds:\n        charge = compound_charge.get(compound.name, float('nan'))\n        charge_sum += charge * float(value)\n    return charge_sum", "docstring": "Calculate the overall charge for the specified reaction.\n\nArgs:\nreaction: :class:`psamm.reaction.Reaction`.\ncompound_charge: a map from each compound to charge values.", "source": "juraj-google-style"}
{"code": "def get_repository(self, path):\n        \n        \n        parts = path.split('@', 1)\n        if len(parts) == 1:\n            parts = (\"filesystem\", parts[0])\n\n        repo_type, location = parts\n        if repo_type == \"filesystem\":\n            \n            \n            \n            \n            location = os.path.abspath(location)\n\n        normalised_path = \"%s@%s\" % (repo_type, location)\n        return self._get_repository(normalised_path)", "docstring": "Get a package repository.\n\nArgs:\npath (str): Entry from the 'packages_path' config setting. This may\nsimply be a path (which is managed by the 'filesystem' package\nrepository plugin), or a string in the form \"type@location\",\nwhere 'type' identifies the repository plugin type to use.\n\nReturns:\n`PackageRepository` instance.", "source": "juraj-google-style"}
{"code": "def on_modified(self, event):\n        \n        self._logger.debug('Detected modify event on watched path: %s', event.src_path)\n\n        self._process_event(event)", "docstring": "Function called everytime a new file is modified.\n\nArgs:\nevent: Event to process.", "source": "juraj-google-style"}
{"code": "def _unschedule_sending_init_updates(self):\n    LOG.debug('Un-scheduling sending of initial Non-RTC UPDATEs (init. UPDATEs already sent: %s)', self._sent_init_non_rtc_update)\n    if self._rtc_eor_timer:\n        self._rtc_eor_timer.stop()\n        self._rtc_eor_timer = None\n        return True\n    return False", "docstring": "Un-schedules sending of initial updates\n\nStops the timer if set for sending initial updates.\nReturns:\n- True if timer was stopped\n- False if timer was already stopped and nothing was done", "source": "codesearchnet"}
{"code": "def CheckForBadCharacters(filename, lines, error):\n    for (linenum, line) in enumerate(lines):\n        if (u'�' in line):\n            error(filename, linenum, 'readability/utf8', 5, 'Line contains invalid UTF-8 (or Unicode replacement character).')\n        if ('\\x00' in line):\n            error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')", "docstring": "Logs an error for each line containing bad characters.\n\nTwo kinds of bad characters:\n\n1. Unicode replacement characters: These indicate that either the file\ncontained invalid UTF-8 (likely) or Unicode replacement characters (which\nit shouldn't).  Note that it's possible for this to throw off line\nnumbering if the invalid UTF-8 occurred adjacent to a newline.\n\n2. NUL bytes.  These are problematic for some tools.\n\nArgs:\nfilename: The name of the current file.\nlines: An array of strings, each representing a line of the file.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def _set_weights(instance, symbolic_weights, weight_values, name, skip_mismatch=False):\n    for i, weight_value in enumerate(weight_values):\n        expected_shape = symbolic_weights[i].shape\n        received_shape = weight_value.shape\n        if expected_shape != received_shape:\n            if skip_mismatch:\n                warnings.warn(f'Skipping loading weights for {name}due to mismatch in shape for weight {symbolic_weights[i].path}. Weight expects shape {expected_shape}. Received saved weight with shape {received_shape}', stacklevel=2)\n                continue\n            raise ValueError(f'Shape mismatch in {name}for weight {symbolic_weights[i].path}. Weight expects shape {expected_shape}. Received saved weight with shape {received_shape}')\n        symbolic_weights[i].assign(weight_value)\n    if hasattr(instance, 'finalize_state') and symbolic_weights:\n        instance.finalize_state()", "docstring": "Safely set weights into a model or a layer.\n\nArgs:\ninstance: Model or layer instance,\nsymbolic_weights: symbolic tensors representing\nthe weights of the variables to load,\nweight_values: values of the weights to load,\nskip_mismatch: Boolean, whether to skip loading of weights\nwhere there is a mismatch in the shape of the weights,\nname: name used to identify the group.\n\nRaises:\nValueError: in case of mismatch between provided\nmodel/layer and weights.", "source": "github-repos"}
{"code": "def create_store(reducer, initial_state=None, enhancer=None):\n    \n    if enhancer is not None:\n        if not hasattr(enhancer, '__call__'):\n            raise TypeError('Expected the enhancer to be a function.')\n        return enhancer(create_store)(reducer, initial_state)\n\n    if not hasattr(reducer, '__call__'):\n        raise TypeError('Expected the reducer to be a function.')\n\n    \n    current_reducer = [reducer]\n    current_state = [initial_state]\n    current_listeners = [[]]\n    next_listeners = [current_listeners[0]]\n    is_dispatching = [False]\n\n    def ensure_can_mutate_next_listeners():\n        if next_listeners[0] == current_listeners[0]:\n            next_listeners[0] = current_listeners[0][:]\n\n    def get_state():\n        return current_state[0]\n\n    def subscribe(listener):\n        if not hasattr(listener, '__call__'):\n            raise TypeError('Expected listener to be a function.')\n\n        is_subscribed = [True]  \n\n        ensure_can_mutate_next_listeners()\n        next_listeners[0].append(listener)\n\n        def unsubcribe():\n            if not is_subscribed[0]:\n                return\n            is_subscribed[0] = False\n\n            ensure_can_mutate_next_listeners()\n            index = next_listeners[0].index(listener)\n            next_listeners[0].pop(index)\n\n        return unsubcribe\n\n    def dispatch(action):\n        if not isinstance(action, dict):\n            raise TypeError('Actions must be a dict. '\n                            'Use custom middleware for async actions.')\n\n        if action.get('type') is None:\n            raise ValueError('Actions must have a non-None \"type\" property. '\n                             'Have you misspelled a constant?')\n\n        if is_dispatching[0]:\n            raise Exception('Reducers may not dispatch actions.')\n\n        try:\n            is_dispatching[0] = True\n            current_state[0] = current_reducer[0](current_state[0], action)\n        finally:\n            is_dispatching[0] = False\n\n        listeners = current_listeners[0] = next_listeners[0]\n        for listener in listeners:\n            listener()\n\n        return action\n\n    def replace_reducer(next_reducer):\n        if not hasattr(next_reducer, '__call__'):\n            raise TypeError('Expected next_reducer to be a function')\n\n        current_reducer[0] = next_reducer\n        dispatch({'type': ActionTypes.INIT})\n\n    dispatch({'type': ActionTypes.INIT})\n\n    return StoreDict(\n        dispatch=dispatch,\n        subscribe=subscribe,\n        get_state=get_state,\n        replace_reducer=replace_reducer,\n    )", "docstring": "redux in a nutshell.\n\nobservable has been omitted.\n\nArgs:\nreducer: root reducer function for the state tree\ninitial_state: optional initial state data\nenhancer: optional enhancer function for middleware etc.\n\nReturns:\na Pydux store", "source": "juraj-google-style"}
{"code": "def shape4d(a, data_format='NHWC'):\n    \n    s2d = shape2d(a)\n    if get_data_format(data_format, False) == 'NHWC':\n        return [1] + s2d + [1]\n    else:\n        return [1, 1] + s2d", "docstring": "Ensuer a 4D shape, to use with 4D symbolic functions.\n\nArgs:\na: a int or tuple/list of length 2\n\nReturns:\nlist: of length 4. if ``a`` is a int, return ``[1, a, a, 1]``\nor ``[1, 1, a, a]`` depending on data_format.", "source": "juraj-google-style"}
{"code": "def __init__(self, event_type, event_data):\n        \n\n        self.type = event_type\n        self.data = event_data", "docstring": "Creates a new event.\n\nArgs:\nevent_type (int): the type of the event, see\n:class:`~bigchaindb.events.EventTypes`\nevent_data (obj): the data of the event.", "source": "juraj-google-style"}
{"code": "def set(self, name, valu):\n        \n        byts = s_msgpack.en(valu)\n        lkey = self.pref + name.encode('utf8')\n        self.slab.put(lkey, byts, db=self.db)\n        self.info[name] = valu", "docstring": "Set a name in the SlabDict.\n\nArgs:\nname (str): The key name.\nvalu (obj): A msgpack compatible value.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    cls = [self.cls_token_id]\n    sep = [self.sep_token_id]\n    question_suffix = [self.question_token_id] + [self.convert_tokens_to_ids('.')]\n    if self.padding_side == 'right':\n        return cls + token_ids_0 + question_suffix + sep + token_ids_1 + sep\n    else:\n        return cls + token_ids_0 + sep + token_ids_1 + question_suffix + sep", "docstring": "Build model inputs from a pair of sequence for question answering tasks by concatenating and adding special\ntokens. A Splinter sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences for question answering: `[CLS] question_tokens [QUESTION] . [SEP] context_tokens [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nThe question token IDs if pad_on_right, else context tokens IDs\ntoken_ids_1 (`List[int]`, *optional*):\nThe context token IDs if pad_on_right, else question token IDs\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def pick_env_and_run_and_report(self, env: env_tools.PreparedEnv, env_py2: Optional[env_tools.PreparedEnv], verbose: bool, previous_failures: Set['Check']) -> CheckResult:\n    env.report_status_to_github('pending', 'Running...', self.context())\n    chosen_env = cast(env_tools.PreparedEnv, (env_py2 if self.needs_python2_env() else env))\n    os.chdir(cast(str, chosen_env.destination_directory))\n    result = self.run(chosen_env, verbose, previous_failures)\n    if (result.unexpected_error is not None):\n        env.report_status_to_github('error', 'Unexpected error.', self.context())\n    else:\n        env.report_status_to_github(('success' if result.success else 'failure'), result.message, self.context())\n    return result", "docstring": "Evaluates this check in python 3 or 2.7, and reports to github.\n\nIf the prepared environments are not linked to a github repository,\nwith a known access token, reporting to github is skipped.\n\nArgs:\nenv: A prepared python 3 environment.\nenv_py2: A prepared python 2.7 environment.\nverbose: When set, more progress output is produced.\nprevious_failures: Checks that have already run and failed.\n\nReturns:\nA CheckResult instance.", "source": "codesearchnet"}
{"code": "def finish_connection(self, conn_or_internal_id, successful, failure_reason=None):\n        \n\n        data = {\n            'id': conn_or_internal_id,\n            'success': successful,\n            'failure_reason': failure_reason\n        }\n\n        action = ConnectionAction('finish_connection', data, sync=False)\n        self._actions.put(action)", "docstring": "Finish a connection attempt\n\nArgs:\nconn_or_internal_id (string, int): Either an integer connection id or a string\ninternal_id\nsuccessful (bool): Whether this connection attempt was successful\nfailure_reason (string): If this connection attempt failed, an optional reason\nfor the failure.", "source": "juraj-google-style"}
{"code": "def DisplayTree(node, children, level=0):\n  \n  value = ''\n  node_type = ''\n\n  if 'caseValue' in node:\n    case_value = node['caseValue']\n    node_type = case_value['ProductDimension.Type']\n\n    if node_type == 'ProductCanonicalCondition':\n      value = (case_value['condition'] if 'condition' in case_value\n               else 'OTHER')\n    elif node_type == 'ProductBiddingCategory':\n      value = '%s(%s)' % (case_value['type'], case_value['value']\n                          if 'value' in case_value else 'OTHER')\n    else:\n      value = (case_value['value'] if 'value' in case_value else 'OTHER')\n\n  print ('%sid: %s, node_type: %s, value: %s\\n'\n         % (' ' * level, node['id'], node_type, value))\n\n  for child_node in children[node['id']]:\n    DisplayTree(child_node, children, level + 1)", "docstring": "Recursively display a node and each of its children.\n\nArgs:\nnode: The node we're displaying the children of.\nchildren: Children of the parent node.\nlevel: How deep in the tree we are.", "source": "juraj-google-style"}
{"code": "def xfrange(start, stop, step=1, maxSize=(- 1)):\n    if (start <= stop):\n        (stop, step) = ((stop + 1), abs(step))\n    else:\n        (stop, step) = ((stop - 1), (- abs(step)))\n    if (maxSize >= 0):\n        size = lenRange(start, stop, step)\n        if (size > maxSize):\n            raise exceptions.MaxSizeException(('Size %d > %s (MAX_FRAME_SIZE)' % (size, maxSize)))\n    return (f for f in xrange(start, stop, step))", "docstring": "Returns a generator that yields the frames from start to stop, inclusive.\nIn other words it adds or subtracts a frame, as necessary, to return the\nstop value as well, if the stepped range would touch that value.\n\nArgs:\nstart (int):\nstop (int):\nstep (int): Note that the sign will be ignored\nmaxSize (int):\n\nReturns:\ngenerator:\n\nRaises:\n:class:`fileseq.exceptions.MaxSizeException`: if size is exceeded", "source": "codesearchnet"}
{"code": "def forward(self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, **kwargs) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n    residual = latents\n    latents = self.input_latents_norm(latents)\n    context = self.input_context_norm(context)\n    latents, self_attn_weights, present_key_value = self.self_attn(latents=latents, context=context, attention_mask=attention_mask)\n    latents = residual + latents\n    residual = latents\n    latents = self.post_attention_layernorm(latents)\n    latents = self.mlp(latents)\n    latents = residual + latents\n    outputs = (latents,)\n    if output_attentions:\n        outputs += (self_attn_weights,)\n    if use_cache:\n        outputs += (present_key_value,)\n    return outputs", "docstring": "Args:\nlatents (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\ncontext (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`, *optional*): attention mask of size\n`(batch, sequence_length)` where padding elements are indicated by 0.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.\nuse_cache (`bool`, *optional*):\nIf set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n(see `past_key_values`).\npast_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states", "source": "github-repos"}
{"code": "def gemini_query(self, query_id):\n        \n        logger.debug(\"Looking for query with id {0}\".format(query_id))\n        return self.query(GeminiQuery).filter_by(id=query_id).first()", "docstring": "Return a gemini query\n\nArgs:\nname (str)", "source": "juraj-google-style"}
{"code": "def _PromptUserForAPFSVolumeIdentifiers(self, volume_system, volume_identifiers):\n    print_header = True\n    while True:\n        if print_header:\n            self._PrintAPFSVolumeIdentifiersOverview(volume_system, volume_identifiers)\n            print_header = False\n        lines = self._textwrapper.wrap(self._USER_PROMPT_APFS)\n        self._output_writer.Write('\\n'.join(lines))\n        self._output_writer.Write('\\n\\nVolume identifiers: ')\n        try:\n            selected_volumes = self._ReadSelectedVolumes(volume_system, prefix='apfs')\n            if ((not selected_volumes) or (not set(selected_volumes).difference(volume_identifiers))):\n                break\n        except ValueError:\n            pass\n        self._output_writer.Write('\\n')\n        lines = self._textwrapper.wrap('Unsupported volume identifier(s), please try again or abort with Ctrl^C.')\n        self._output_writer.Write('\\n'.join(lines))\n        self._output_writer.Write('\\n\\n')\n    return selected_volumes", "docstring": "Prompts the user to provide APFS volume identifiers.\n\nArgs:\nvolume_system (dfvfs.APFSVolumeSystem): volume system.\nvolume_identifiers (list[str]): volume identifiers including prefix.\n\nReturns:\nlist[str]: selected volume identifiers including prefix or None.", "source": "codesearchnet"}
{"code": "def variant(self, document_id, gene_panels=None, case_id=None):\n    query = {}\n    if case_id:\n        query['case_id'] = case_id\n        query['variant_id'] = document_id\n    else:\n        query['_id'] = document_id\n    variant_obj = self.variant_collection.find_one(query)\n    if variant_obj:\n        variant_obj = self.add_gene_info(variant_obj, gene_panels)\n        if (variant_obj['chromosome'] in ['X', 'Y']):\n            variant_obj['is_par'] = is_par(variant_obj['chromosome'], variant_obj['position'])\n    return variant_obj", "docstring": "Returns the specified variant.\n\nArguments:\ndocument_id : A md5 key that represents the variant or \"variant_id\"\ngene_panels(List[GenePanel])\ncase_id (str): case id (will search with \"variant_id\")\n\nReturns:\nvariant_object(Variant): A odm variant object", "source": "codesearchnet"}
{"code": "def get_other_answers_random(pool, seeded_answers, get_student_item_dict, num_responses):\n    \n    ret = []\n    \n    pool = {int(k): v for k, v in pool.items()}\n    seeded = {'seeded'+str(index): answer for index, answer in enumerate(seeded_answers)}\n    merged_pool = seeded.keys()\n\n    for key in pool:\n        merged_pool += pool[key].keys()\n\n    \n    random.shuffle(merged_pool)\n    \n    student_id = get_student_item_dict()['student_id']\n\n    for student in merged_pool:\n        if len(ret) >= num_responses:\n            \n            break\n        elif student == student_id:\n            \n            continue\n\n        if student.startswith('seeded'):\n            option = seeded[student]['answer']\n            rationale = seeded[student]['rationale']\n        else:\n            student_item = get_student_item_dict(student)\n            submission = sas_api.get_answers_for_student(student_item)\n            rationale = submission.get_rationale(0)\n            option = submission.get_vote(0)\n        ret.append({'option': option, 'rationale': rationale})\n\n    return {\"answers\": ret}", "docstring": "Get answers from others with random algorithm, which randomly select answer from the pool.\n\nStudent may get three answers for option 1 or one answer for option 1 and two answers for option 2.\n\nArgs:\nsee `get_other_answers`\nnum_responses (int): the number of responses to be returned. This value may not be\nrespected if there is not enough answers to return\n\nReturns:\ndict: answers based on the selection algorithm", "source": "juraj-google-style"}
{"code": "def distinct(self, selector=identity):\n    if self.closed():\n        raise ValueError('Attempt to call distinct() on a closed Queryable.')\n    if (not is_callable(selector)):\n        raise TypeError('distinct() parameter selector={0} is not callable'.format(repr(selector)))\n    return self._create(self._generate_distinct_result(selector))", "docstring": "Eliminate duplicate elements from a sequence.\n\nNote: This method uses deferred execution.\n\nArgs:\nselector: An optional single argument function the result of which\nis the value compared for uniqueness against elements already\nconsumed. If omitted, the element value itself is compared for\nuniqueness.\n\nReturns:\nUnique elements of the source sequence as determined by the\nselector function.  Note that it is unprojected elements that are\nreturned, even if a selector was provided.\n\nRaises:\nValueError: If the Queryable is closed.\nTypeError: If the selector is not callable.", "source": "codesearchnet"}
{"code": "def SetUsername(self, username):\n    self._username = username\n    logger.debug('Elasticsearch username: {0!s}'.format(username))", "docstring": "Sets the username.\n\nArgs:\nusername (str): username to authenticate with.", "source": "codesearchnet"}
{"code": "def transform_to_mods_multimono(marc_xml, uuid, url):\n    marc_xml = _read_content_or_path(marc_xml)\n    transformed = xslt_transformation(marc_xml, _absolute_template_path('MARC21toMultiMonographTitle.xsl'))\n    return _apply_postprocessing(marc_xml=marc_xml, xml=transformed, func=mods_postprocessor.postprocess_multi_mono, uuid=uuid, url=url)", "docstring": "Convert `marc_xml` to multimonograph MODS data format.\n\nArgs:\nmarc_xml (str): Filename or XML string. Don't use ``\\\\n`` in case of\nfilename.\nuuid (str): UUID string giving the package ID.\nurl (str): URL of the publication (public or not).\n\nReturns:\nlist: Collection of transformed xml strings.", "source": "codesearchnet"}
{"code": "def get_link(self, task_id):\n    links = [x for x in self.links if (x.task_id == task_id)]\n    if (len(links) != 1):\n        raise CoTError('No single Link matches task_id {}!\\n{}'.format(task_id, self.dependent_task_ids()))\n    return links[0]", "docstring": "Get a ``LinkOfTrust`` by task id.\n\nArgs:\ntask_id (str): the task id to find.\n\nReturns:\nLinkOfTrust: the link matching the task id.\n\nRaises:\nCoTError: if no ``LinkOfTrust`` matches.", "source": "codesearchnet"}
{"code": "def listen(self):\n    logger.info(('Listening on port ' + str(self.listener.listen_port)))\n    self.listener.listen()", "docstring": "Starts the client listener to listen for server responses.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def CompileReport(self, mediator):\n    \n    report_text = [\n        'Sessionize plugin identified {0:d} sessions and '\n        'applied {1:d} tags.'.format(\n            len(self._events_per_session), self._number_of_event_tags)]\n    for session, event_count in enumerate(self._events_per_session):\n      report_text.append('\\tSession {0:d}: {1:d} events'.format(\n          session, event_count))\n    report_text = '\\n'.join(report_text)\n    return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)", "docstring": "Compiles an analysis report.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between\nanalysis plugins and other components, such as storage and dfvfs.\n\nReturns:\nAnalysisReport: analysis report.", "source": "juraj-google-style"}
{"code": "def __setattr__(self, name: str, value: Any) -> None:\n    if name.startswith('_'):\n        super().__setattr__(name, value)\n    else:\n        self[name] = value", "docstring": "Set attribute of this Dict.\n\nNOTE(daiyip): When setting attributes, public attributes (not started with\n'_') are set as dict fields, while private attributes (started with '_') are\nset on the object instance.\n\nArgs:\nname: Name of attribute.\nvalue: Value of attribute.", "source": "github-repos"}
{"code": "def get_height_rect(self, x: int, y: int, width: int, height: int, string: str) -> int:\n    string_ = string.encode('utf-8')\n    return int(lib.get_height_rect(self.console_c, x, y, width, height, string_, len(string_)))", "docstring": "Return the height of this text word-wrapped into this rectangle.\n\nArgs:\nx (int): The x coordinate from the left.\ny (int): The y coordinate from the top.\nwidth (int): Maximum width to render the text.\nheight (int): Maximum lines to render the text.\nstring (str): A Unicode string.\n\nReturns:\nint: The number of lines of text once word-wrapped.", "source": "codesearchnet"}
{"code": "def lock(self, key, client):\n    self.key = key\n    self.client = client", "docstring": "Set the key that will be used to ensure messages come from one party\n\nArgs:\nkey (string): The key used to validate future messages\nclient (string): A string that will be returned to indicate who\nlocked this device.", "source": "codesearchnet"}
{"code": "def parse_from_string(string, version_type):\n    if not re.search('[0-9]+\\\\.[0-9]+\\\\.[a-zA-Z0-9]+', string):\n        raise RuntimeError('Invalid version string: %s' % string)\n    major, minor, extension = string.split('.', 2)\n    extension_split = extension.split('-', 1)\n    patch = extension_split[0]\n    if len(extension_split) == 2:\n        identifier_string = '-' + extension_split[1]\n    else:\n        identifier_string = ''\n    return Version(major, minor, patch, identifier_string, version_type)", "docstring": "Returns version object from Semver string.\n\nArgs:\nstring: version string\nversion_type: version parameter\n\nRaises:\nRuntimeError: If the version string is not valid.", "source": "github-repos"}
{"code": "def get_input_shapes_map(input_tensors):\n    input_arrays = [tensor[0] for tensor in input_tensors]\n    input_shapes_list = []\n    for _, shape, _ in input_tensors:\n        dims = None\n        if shape:\n            dims = [dim.value for dim in shape.dims]\n        input_shapes_list.append(dims)\n    input_shapes = {name: shape for name, shape in zip(input_arrays, input_shapes_list) if shape}\n    return input_shapes", "docstring": "Gets a map of input names to shapes.\n\nArgs:\ninput_tensors: List of input tensor tuples `(name, shape, type)`.\n\nReturns:\n{string : list of integers}.", "source": "github-repos"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        local_stream = utils.BytearrayStream()\n\n        if self._unique_identifier is not None:\n            self._unique_identifier.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self._offset is not None:\n            self._offset.write(local_stream, kmip_version=kmip_version)\n        if self._template_attribute is not None:\n            self._template_attribute.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        self.length = local_stream.length()\n        super(RekeyRequestPayload, self).write(\n            output_stream,\n            kmip_version=kmip_version\n        )\n        output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the Rekey request payload to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def load_identity_signer(key_dir, key_name):\n    \n    key_path = os.path.join(key_dir, '{}.priv'.format(key_name))\n\n    if not os.path.exists(key_path):\n        raise LocalConfigurationError(\n            \"No such signing key file: {}\".format(key_path))\n    if not os.access(key_path, os.R_OK):\n        raise LocalConfigurationError(\n            \"Key file is not readable: {}\".format(key_path))\n\n    LOGGER.info('Loading signing key: %s', key_path)\n    try:\n        with open(key_path, 'r') as key_file:\n            private_key_str = key_file.read().strip()\n    except IOError as e:\n        raise LocalConfigurationError(\n            \"Could not load key file: {}\".format(str(e)))\n\n    try:\n        private_key = Secp256k1PrivateKey.from_hex(private_key_str)\n    except signing.ParseError as e:\n        raise LocalConfigurationError(\n            \"Invalid key in file {}: {}\".format(key_path, str(e)))\n\n    context = signing.create_context('secp256k1')\n    crypto_factory = CryptoFactory(context)\n    return crypto_factory.new_signer(private_key)", "docstring": "Loads a private key from the key directory, based on a validator's\nidentity.\n\nArgs:\nkey_dir (str): The path to the key directory.\nkey_name (str): The name of the key to load.\n\nReturns:\nSigner: the cryptographic signer for the key", "source": "juraj-google-style"}
{"code": "def add_gripper(self, arm_name, gripper):\n        \n        if arm_name in self.grippers:\n            raise ValueError(\"Attempts to add multiple grippers to one body\")\n\n        arm_subtree = self.worldbody.find(\".\n\n        for actuator in gripper.actuator:\n\n            if actuator.get(\"name\") is None:\n                raise XMLError(\"Actuator has no name\")\n\n            if not actuator.get(\"name\").startswith(\"gripper\"):\n                raise XMLError(\n                    \"Actuator name {} does not have prefix 'gripper'\".format(\n                        actuator.get(\"name\")\n                    )\n                )\n\n        for body in gripper.worldbody:\n            arm_subtree.append(body)\n\n        self.merge(gripper, merge_body=False)\n        self.grippers[arm_name] = gripper", "docstring": "Mounts gripper to arm.\n\nThrows error if robot already has a gripper or gripper type is incorrect.\n\nArgs:\narm_name (str): name of arm mount\ngripper (MujocoGripper instance): gripper MJCF model", "source": "juraj-google-style"}
{"code": "def clean_all(G, settings):\n    \n    quiet = settings[\"quiet\"]\n    recon = settings[\"recon\"]\n    sprint = settings[\"sprint\"]\n    error = settings[\"error\"]\n    all_outputs = []\n    for node in G.nodes(data=True):\n        if \"output\" in node[1]:\n            for item in get_all_outputs(node[1]):\n                all_outputs.append(item)\n    all_outputs.append(\".shastore\")\n    retcode = 0\n    for item in sorted(all_outputs):\n        if os.path.isfile(item):\n            if recon:\n                sprint(\"Would remove file: {}\".format(item))\n                continue\n            sprint(\"Attempting to remove file '{}'\", level=\"verbose\")\n            try:\n                os.remove(item)\n                sprint(\"Removed file\", level=\"verbose\")\n            except:\n                errmes = \"Error: file '{}' failed to be removed\"\n                error(errmes.format(item))\n                retcode = 1\n    if not retcode and not recon:\n        sprint(\"All clean\", color=True)\n    return retcode", "docstring": "Removes all the output files from all targets. Takes\nthe graph as the only argument\n\nArgs:\nThe networkx graph object\nThe settings dictionary\n\nReturns:\n0 if successful\n1 if removing even one file failed", "source": "juraj-google-style"}
{"code": "def DEFINE_integer(flag_name, default_value, docstring, required=False):  \n    \n    _define_helper(flag_name, default_value, docstring, int, required)", "docstring": "Defines a flag of type 'int'.\nArgs:\nflag_name: The name of the flag as a string.\ndefault_value: The default value the flag should take as an int.\ndocstring: A helpful message explaining the use of the flag.", "source": "juraj-google-style"}
{"code": "def umount(self, forced=True):\n    if self.is_mounted():\n        if is_osx():\n            cmd = ['/usr/sbin/diskutil', 'unmount', self.connection['mount_point']]\n            if forced:\n                cmd.insert(2, 'force')\n            subprocess.check_call(cmd)\n        else:\n            cmd = ['umount', self.connection['mount_point']]\n            if forced:\n                cmd.insert(1, '-f')\n            subprocess.check_call(cmd)", "docstring": "Try to unmount our mount point.\n\nDefaults to using forced method. If OS is Linux, it will not\ndelete the mount point.\n\nArgs:\nforced: Bool whether to force the unmount. Default is True.", "source": "codesearchnet"}
{"code": "def encode_bqm_as_qp(solver, linear, quadratic):\n    active = active_qubits(linear, quadratic)\n    nan = float('nan')\n    lin = [uniform_get(linear, qubit, (0 if (qubit in active) else nan)) for qubit in solver._encoding_qubits]\n    lin = base64.b64encode(struct.pack(('<' + ('d' * len(lin))), *lin))\n    quad = [(quadratic.get((q1, q2), 0) + quadratic.get((q2, q1), 0)) for (q1, q2) in solver._encoding_couplers if ((q1 in active) and (q2 in active))]\n    quad = base64.b64encode(struct.pack(('<' + ('d' * len(quad))), *quad))\n    return {'format': 'qp', 'lin': lin.decode('utf-8'), 'quad': quad.decode('utf-8')}", "docstring": "Encode the binary quadratic problem for submission to a given solver,\nusing the `qp` format for data.\n\nArgs:\nsolver (:class:`dwave.cloud.solver.Solver`):\nThe solver used.\n\nlinear (dict[variable, bias]/list[variable, bias]):\nLinear terms of the model.\n\nquadratic (dict[(variable, variable), bias]):\nQuadratic terms of the model.\n\nReturns:\nencoded submission dictionary", "source": "codesearchnet"}
{"code": "def downsample(data, percent):\n    \n    n_genes = data.shape[0]\n    n_cells = data.shape[1]\n    new_data = data.copy()\n    total_count = float(data.sum())\n    to_remove = total_count*percent\n    \n    cell_sums = data.sum(0).astype(float)\n    \n    cell_gene_probs = data/cell_sums\n    \n    cell_probs = np.array(cell_sums/total_count).flatten()\n    cells_selected = np.random.multinomial(to_remove, pvals=cell_probs)\n    for i, num_selected in enumerate(cells_selected):\n        cell_gene = np.array(cell_gene_probs[:,i]).flatten()\n        genes_selected = np.random.multinomial(num_selected, pvals=cell_gene)\n        if sparse.issparse(data):\n            genes_selected = sparse.csc_matrix(genes_selected).T\n        new_data[:,i] -= genes_selected\n    new_data[new_data < 0] = 0\n    return new_data", "docstring": "downsample the data by removing a given percentage of the reads.\n\nArgs:\ndata: genes x cells array or sparse matrix\npercent: float between 0 and 1", "source": "juraj-google-style"}
{"code": "def _write_entries(self, stream, entries, converter, properties=None):\n        \n        def iter_entries():\n            for c in entries:\n                entry = converter(c)\n                if entry is None:\n                    continue\n                if properties is not None:\n                    entry = OrderedDict(\n                        (key, value) for key, value in iteritems(entry)\n                        if key == 'id' or key in properties)\n                yield entry\n\n        self._dump(stream, list(iter_entries()))", "docstring": "Write iterable of entries as YAML object to stream.\n\nArgs:\nstream: File-like object.\nentries: Iterable of entries.\nconverter: Conversion function from entry to YAML object.\nproperties: Set of compartment properties to output (or None to\noutput all).", "source": "juraj-google-style"}
{"code": "def _get_rules_from_aws(self):\n    list_of_rules = list()\n    if self.profile:\n        boto3.setup_default_session(profile_name=self.profile)\n    if self.region:\n        ec2 = boto3.client('ec2', region_name=self.region)\n    else:\n        ec2 = boto3.client('ec2')\n    security_groups = ec2.describe_security_groups(Filters=self.filters)\n    for group in security_groups['SecurityGroups']:\n        group_dict = dict()\n        group_dict['id'] = group['GroupId']\n        group_dict['name'] = group['GroupName']\n        group_dict['description'] = group.get('Description', None)\n        if (group.get('IpPermissions', None) or group.get('IpPermissionsEgress', None)):\n            group_dict['rules'] = list()\n        for rule in group.get('IpPermissions', None):\n            rule_dict = self._build_rule(rule)\n            rule_dict['direction'] = 'INGRESS'\n            group_dict['rules'].append(rule_dict)\n        for rule in group.get('IpPermissionsEgress', None):\n            rule_dict = self._build_rule(rule)\n            rule_dict['direction'] = 'EGRESS'\n            group_dict['rules'].append(rule_dict)\n        list_of_rules.append(group_dict)\n    return list_of_rules", "docstring": "Load the EC2 security rules off AWS into a list of dict.\n\nReturns:\nlist", "source": "codesearchnet"}
{"code": "def __getitem__(self, item: Union[Timestamp, slice]):\n        \n        if isinstance(item, slice):\n            if item.step:\n                raise ValueError('Step not supported.')\n            start = cast(Timestamp, item.start)\n            stop = cast(Timestamp, item.stop)\n            return self.query(time=start, duration=stop - start)\n        return self.query(time=item, include_query_end_time=True)", "docstring": "Finds operations overlapping a given time or time slice.\n\nArgs:\nitem: Either a Timestamp or a slice containing start and stop\nTimestamps.\n\nReturns:\nThe scheduled operations that occurs during the given time.", "source": "juraj-google-style"}
{"code": "def get_eval_dataloader(self, eval_dataset: Optional[Union[str, Dataset]]=None) -> DataLoader:\n    if eval_dataset is None and self.eval_dataset is None:\n        raise ValueError('Trainer: evaluation requires an eval_dataset.')\n    dataloader_key = eval_dataset if isinstance(eval_dataset, str) else 'eval'\n    if hasattr(self, '_eval_dataloaders') and dataloader_key in self._eval_dataloaders and self.args.dataloader_persistent_workers:\n        return self.accelerator.prepare(self._eval_dataloaders[dataloader_key])\n    eval_dataset = self.eval_dataset[eval_dataset] if isinstance(eval_dataset, str) else eval_dataset if eval_dataset is not None else self.eval_dataset\n    return self._get_dataloader(dataset=eval_dataset, description='Evaluation', batch_size=self.args.eval_batch_size, sampler_fn=self._get_eval_sampler, dataloader_key=dataloader_key)", "docstring": "Returns the evaluation [`~torch.utils.data.DataLoader`].\n\nSubclass and override this method if you want to inject some custom behavior.\n\nArgs:\neval_dataset (`str` or `torch.utils.data.Dataset`, *optional*):\nIf a `str`, will use `self.eval_dataset[eval_dataset]` as the evaluation dataset. If a `Dataset`, will override `self.eval_dataset` and must implement `__len__`. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed.", "source": "github-repos"}
{"code": "def process(self, element):\n    return re.findall(\"[\\\\w\\\\']+\", element, re.UNICODE)", "docstring": "Returns an iterator over the words of this element.\nThe element is a line of text.  If the line is blank, note that, too.\nArgs:\nelement: the element being processed\nReturns:\nThe processed element.", "source": "github-repos"}
{"code": "def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:\n    residual = hidden_states\n    hidden_states, attn_weights, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    hidden_states, _ = self.decomp1(hidden_states)\n    residual = hidden_states\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states, _ = self.decomp2(hidden_states)\n    hidden_states = self.final_layer_norm(hidden_states)\n    if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()):\n        clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n        hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\nlayer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size\n`(encoder_attention_heads,)`.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def init_cache(self, batch_size, max_length):\n    input_ids = jnp.ones((batch_size, max_length))\n    attention_mask = jnp.ones_like(input_ids)\n    position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)\n    init_variables = self.module.init(jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True)\n    return unfreeze(init_variables['cache'])", "docstring": "Args:\nbatch_size (`int`):\nbatch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.\nmax_length (`int`):\nmaximum possible length for auto-regressive decoding. Defines the sequence length of the initialized\ncache.", "source": "github-repos"}
{"code": "def remove_phenotype(self, institute, case, user, link, phenotype_id,\n                         is_group=False):\n        \n        LOG.info(\"Removing HPO term from case {0}\".format(case['display_name']))\n\n        if is_group:\n            updated_case = self.case_collection.find_one_and_update(\n                {'_id': case['_id']},\n                {\n                    '$pull': {\n                        'phenotype_terms': {'phenotype_id': phenotype_id},\n                        'phenotype_groups': {'phenotype_id': phenotype_id},\n                    },\n                },\n                return_document=pymongo.ReturnDocument.AFTER\n            )\n\n        else:\n            updated_case = self.case_collection.find_one_and_update(\n                {'_id': case['_id']},\n                {\n                    '$pull': {\n                        'phenotype_terms': {'phenotype_id': phenotype_id},\n                    },\n                },\n                return_document=pymongo.ReturnDocument.AFTER\n            )\n\n        LOG.info(\"Creating event for removing phenotype term {0}\" \\\n                    \" from case {1}\".format(phenotype_id, case['display_name']))\n\n        self.create_event(\n            institute=institute,\n            case=case,\n            user=user,\n            link=link,\n            category='case',\n            verb='remove_phenotype',\n            subject=case['display_name']\n        )\n\n        LOG.debug(\"Case updated\")\n        return updated_case", "docstring": "Remove an existing phenotype from a case\n\nArgs:\ninstitute (dict): A Institute object\ncase (dict): Case object\nuser (dict): A User object\nlink (dict): The url to be used in the event\nphenotype_id (str): A phenotype id\n\nReturns:\nupdated_case(dict)", "source": "juraj-google-style"}
{"code": "def makedir(self, dir_name, mode=PERM_DEF):\n        \n        dir_name = make_string_path(dir_name)\n        ends_with_sep = self.ends_with_path_separator(dir_name)\n        dir_name = self._path_without_trailing_separators(dir_name)\n        if not dir_name:\n            self.raise_os_error(errno.ENOENT, '')\n\n        if self.is_windows_fs:\n            dir_name = self.absnormpath(dir_name)\n        parent_dir, _ = self.splitpath(dir_name)\n        if parent_dir:\n            base_dir = self.normpath(parent_dir)\n            ellipsis = self._matching_string(\n                parent_dir, self.path_separator + '..')\n            if parent_dir.endswith(ellipsis) and not self.is_windows_fs:\n                base_dir, dummy_dotdot, _ = parent_dir.partition(ellipsis)\n            if not self.exists(base_dir):\n                self.raise_os_error(errno.ENOENT, base_dir)\n\n        dir_name = self.absnormpath(dir_name)\n        if self.exists(dir_name, check_link=True):\n            if self.is_windows_fs and dir_name == self.path_separator:\n                error_nr = errno.EACCES\n            else:\n                error_nr = errno.EEXIST\n            if ends_with_sep and self.is_macos and not self.exists(dir_name):\n                \n                self.remove_object(dir_name)\n            else:\n                self.raise_os_error(error_nr, dir_name)\n        head, tail = self.splitpath(dir_name)\n\n        self.add_object(\n            head, FakeDirectory(tail, mode & ~self.umask, filesystem=self))", "docstring": "Create a leaf Fake directory.\n\nArgs:\ndir_name: (str) Name of directory to create.\nRelative paths are assumed to be relative to '/'.\nmode: (int) Mode to create directory with.  This argument defaults\nto 0o777. The umask is applied to this mode.\n\nRaises:\nOSError: if the directory name is invalid or parent directory is\nread only or as per :py:meth:`add_object`.", "source": "juraj-google-style"}
{"code": "def InjectString(self, codestring, wait_for_completion=True):\n    if (self.inferior.is_running and self.inferior.gdb.IsAttached()):\n        try:\n            self.inferior.gdb.InjectString(self.inferior.position, codestring, wait_for_completion=wait_for_completion)\n        except RuntimeError:\n            (exc_type, exc_value, exc_traceback) = sys.exc_info()\n            traceback.print_exception(exc_type, exc_value, exc_traceback)\n    else:\n        logging.error('Not attached to any process.')", "docstring": "Try to inject python code into current thread.\n\nArgs:\ncodestring: Python snippet to execute in inferior. (may contain newlines)\nwait_for_completion: Block until execution of snippet has completed.", "source": "codesearchnet"}
{"code": "def NetshStaticIp(interface,\n                  ip=u'127.0.0.9',\n                  subnet=u'255.255.255.255',\n                  gw=u'127.0.0.1'):\n  \n  args = [\n      '/c', 'netsh', 'interface', 'ip', 'set', 'address', interface, 'static',\n      ip, subnet, gw, '1'\n  ]\n  \n  res = client_utils_common.Execute(\n      'cmd', args, time_limit=-1, bypass_whitelist=True)\n  return res", "docstring": "Changes interface to a staticly set IP.\n\nSets IP configs to local if no paramaters passed.\n\nArgs:\ninterface: Name of the interface.\nip: IP address.\nsubnet: Subnet mask.\ngw: IP address of the default gateway.\n\nReturns:\nA tuple of stdout, stderr, exit_status.", "source": "juraj-google-style"}
{"code": "def check_satpy(readers=None, writers=None, extras=None):\n    \n    from satpy.readers import configs_for_reader\n    from satpy.writers import configs_for_writer\n\n    print('Readers')\n    print('=======')\n    for reader, res in sorted(check_yaml_configs(configs_for_reader(reader=readers), 'reader').items()):\n        print(reader + ': ', res)\n    print()\n\n    print('Writers')\n    print('=======')\n    for writer, res in sorted(check_yaml_configs(configs_for_writer(writer=writers), 'writer').items()):\n        print(writer + ': ', res)\n    print()\n\n    print('Extras')\n    print('======')\n    module_names = extras if extras is not None else ('cartopy', 'geoviews')\n    for module_name, res in sorted(_check_import(module_names).items()):\n        print(module_name + ': ', res)\n    print()", "docstring": "Check the satpy readers and writers for correct installation.\n\nArgs:\nreaders (list or None): Limit readers checked to those specified\nwriters (list or None): Limit writers checked to those specified\nextras (list or None): Limit extras checked to those specified\n\nReturns: bool\nTrue if all specified features were successfully loaded.", "source": "juraj-google-style"}
{"code": "def _remove(self, removeList, selfValue):\n    for removeValue in removeList:\n        print(removeValue, removeList)\n        removeEverything(removeValue, selfValue)", "docstring": "Remove elements from a list by matching the elements in the other list.\n\nThis method only looks inside current instance's value, not recursive.\nThere is no need for a recursive one anyway.\nMatch by == operation.\n\nArgs:\nremoveList (list): The list of matching elements.\nselfValue (list): The list you remove value from. Usually ``self.value``", "source": "codesearchnet"}
{"code": "def clone_with_git(repo_uri, dest_path):\n    \n    log.info('Cloning git repo %s to %s', repo_uri, dest_path)\n    git.Repo.clone_from(repo_uri, dest_path, depth=1)", "docstring": "Create a clone by cloning a git repository.\n\nArgs:\nrepo_uri: The URI of the git repository to clone.\ndest_path: The location to clone to.", "source": "juraj-google-style"}
{"code": "def makesubatoffset(self, bitoffset, *, _offsetideal=None):\n    if (_offsetideal is None):\n        _offsetideal = bitoffset\n    if (bitoffset is 0):\n        return self\n    newpromise = TDOPromiseCollection(self._chain)\n    for promise in self._promises:\n        newpromise.add(promise, bitoffset, _offsetideal=_offsetideal)\n    return newpromise", "docstring": "Create a copy of this PromiseCollection with an offset applied to each contained promise and register each with their parent.\n\nIf this promise's primitive is being merged with another\nprimitive, a new subpromise may be required to keep track of\nthe new offset of data coming from the new primitive.\n\nArgs:\nbitoffset: An integer offset of the data in the new primitive.\n_offsetideal: An integer offset to use if the associated primitive supports arbitrary TDO control.\n\nReturns:\nA new TDOPromiseCollection registered with this promise\ncollection, and with the correct offset.", "source": "codesearchnet"}
{"code": "def view(self, vleaf, fpath=None, cleanup=True, format=None):\n    graph = self.create_graphviz_digraph(vleaf, format=format)\n    graph.view(fpath, cleanup=cleanup)", "docstring": "View the graph.\n\nArgs:\nvleaf (`nnabla.Variable`): End variable. All variables and functions which can be traversed from this variable are shown in the reuslt.\nfpath (`str`): The file path used to save.\ncleanup (`bool`): Clean up the source file after rendering. Default is True.\nformat (str):\nForce overwrite ``format`` (``'pdf', 'png', ...)``) configuration.", "source": "codesearchnet"}
{"code": "def fill(self, name_or_slot, value):\n    if isinstance(name_or_slot, basestring):\n        slot = getattr(self.outputs, name_or_slot)\n    elif isinstance(name_or_slot, Slot):\n        slot = name_or_slot\n    else:\n        raise UnexpectedPipelineError(('Could not fill invalid output name: %r' % name_or_slot))\n    if (not slot._exists):\n        raise SlotNotDeclaredError(('Cannot fill output with name \"%s\" that was just declared within the Pipeline context.' % slot.name))\n    self._context.fill_slot(self._pipeline_key, slot, value)", "docstring": "Fills an output slot required by this Pipeline.\n\nArgs:\nname_or_slot: The name of the slot (a string) or Slot record to fill.\nvalue: The serializable value to assign to this slot.\n\nRaises:\nUnexpectedPipelineError if the Slot no longer exists. SlotNotDeclaredError\nif trying to output to a slot that was not declared ahead of time.", "source": "codesearchnet"}
{"code": "def check_version_info(redis_client):\n    \n    redis_reply = redis_client.get(\"VERSION_INFO\")\n\n    \n    \n    if redis_reply is None:\n        return\n\n    true_version_info = tuple(json.loads(ray.utils.decode(redis_reply)))\n    version_info = _compute_version_info()\n    if version_info != true_version_info:\n        node_ip_address = ray.services.get_node_ip_address()\n        error_message = (\"Version mismatch: The cluster was started with:\\n\"\n                         \"    Ray: \" + true_version_info[0] + \"\\n\"\n                         \"    Python: \" + true_version_info[1] + \"\\n\"\n                         \"    Pyarrow: \" + str(true_version_info[2]) + \"\\n\"\n                         \"This process on node \" + node_ip_address +\n                         \" was started with:\" + \"\\n\"\n                         \"    Ray: \" + version_info[0] + \"\\n\"\n                         \"    Python: \" + version_info[1] + \"\\n\"\n                         \"    Pyarrow: \" + str(version_info[2]))\n        if version_info[:2] != true_version_info[:2]:\n            raise Exception(error_message)\n        else:\n            logger.warning(error_message)", "docstring": "Check if various version info of this process is correct.\n\nThis will be used to detect if workers or drivers are started using\ndifferent versions of Python, pyarrow, or Ray. If the version\ninformation is not present in Redis, then no check is done.\n\nArgs:\nredis_client: A client for the primary Redis shard.\n\nRaises:\nException: An exception is raised if there is a version mismatch.", "source": "juraj-google-style"}
{"code": "def create_cloudwatch_log_event(app_name, env, region, rules):\n    \n\n    session = boto3.Session(profile_name=env, region_name=region)\n    cloudwatch_client = session.client('logs')\n\n    log_group = rules.get('log_group')\n    filter_name = rules.get('filter_name')\n    filter_pattern = rules.get('filter_pattern')\n\n    if not log_group:\n        LOG.critical('Log group is required and no \"log_group\" is defined!')\n        raise InvalidEventConfiguration('Log group is required and no \"log_group\" is defined!')\n\n    if not filter_name:\n        LOG.critical('Filter name is required and no filter_name is defined!')\n        raise InvalidEventConfiguration('Filter name is required and no filter_name is defined!')\n\n    if filter_pattern is None:\n        LOG.critical('Filter pattern is required and no filter_pattern is defined!')\n        raise InvalidEventConfiguration('Filter pattern is required and no filter_pattern is defined!')\n\n    lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region)\n\n    statement_id = '{}_cloudwatchlog_{}'.format(app_name, filter_name.replace(\" \", \"_\"))\n    principal = 'logs.{}.amazonaws.com'.format(region)\n    account_id = get_env_credential(env=env)['accountId']\n    source_arn = \"arn:aws:logs:{0}:{1}:log-group:{2}:*\".format(region, account_id, log_group)\n    add_lambda_permissions(\n        function=lambda_alias_arn,\n        statement_id=statement_id,\n        action='lambda:InvokeFunction',\n        principal=principal,\n        source_arn=source_arn,\n        env=env,\n        region=region)\n\n    cloudwatch_client.put_subscription_filter(\n        logGroupName=log_group, filterName=filter_name, filterPattern=filter_pattern, destinationArn=lambda_alias_arn)\n\n    LOG.info(\"Created Cloudwatch log event with filter: %s\", filter_pattern)", "docstring": "Create cloudwatch log event for lambda from rules.\n\nArgs:\napp_name (str): name of the lambda function\nenv (str): Environment/Account for lambda function\nregion (str): AWS region of the lambda function\nrules (str): Trigger rules from the settings", "source": "juraj-google-style"}
{"code": "def list_load_balancers(access_token, subscription_id):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/providers/Microsoft.Network/',\n                        '/loadBalancers?api-version=', NETWORK_API])\n    return do_get(endpoint, access_token)", "docstring": "List the load balancers in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body of load balancer list with properties.", "source": "juraj-google-style"}
{"code": "def grep_projects(tofind_list, user_profile=None, verbose=True, new=False, **kwargs):\n    import utool as ut\n    user_profile = ensure_user_profile(user_profile)\n    print('user_profile = {!r}'.format(user_profile))\n    kwargs = kwargs.copy()\n    colored = kwargs.pop('colored', True)\n    grepkw = {}\n    grepkw['greater_exclude_dirs'] = user_profile.project_exclude_dirs\n    grepkw['exclude_dirs'] = user_profile.project_exclude_dirs\n    grepkw['dpath_list'] = user_profile.project_dpaths\n    grepkw['include_patterns'] = user_profile.project_include_patterns\n    grepkw['exclude_patterns'] = user_profile.project_exclude_patterns\n    grepkw.update(kwargs)\n    msg_list1 = []\n    msg_list2 = []\n    print_ = msg_list1.append\n    print_('Greping Projects')\n    print_(('tofind_list = %s' % (ut.repr4(tofind_list, nl=True),)))\n    if verbose:\n        print('\\n'.join(msg_list1))\n    grep_result = ut.grep(tofind_list, **grepkw)\n    (found_fpath_list, found_lines_list, found_lxs_list) = grep_result\n    reflags = grepkw.get('reflags', 0)\n    _exprs_flags = [ut.extend_regex2(expr, reflags) for expr in tofind_list]\n    extended_regex_list = ut.take_column(_exprs_flags, 0)\n    reflags_list = ut.take_column(_exprs_flags, 1)\n    reflags = reflags_list[0]\n    resultstr = ut.make_grep_resultstr(grep_result, extended_regex_list, reflags, colored=colored)\n    msg_list2.append(resultstr)\n    print_ = msg_list2.append\n    print_('====================')\n    print_(('found_fpath_list = ' + ut.repr4(found_fpath_list)))\n    print_('')\n    if verbose:\n        print('\\n'.join(msg_list2))\n    msg_list = (msg_list1 + msg_list2)\n    if new:\n        return GrepResult(found_fpath_list, found_lines_list, found_lxs_list, extended_regex_list, reflags)\n    else:\n        return msg_list", "docstring": "r\"\"\"\nGreps the projects defined in the current UserProfile\n\nArgs:\ntofind_list (list):\nuser_profile (None): (default = None)\n\nKwargs:\nuser_profile\n\nCommandLine:\npython -m utool --tf grep_projects grep_projects\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_project import *  # NOQA\n>>> import utool as ut\n>>> import sys\n>>> tofind_list = ut.get_argval('--find', type_=list,\n>>>                             default=[sys.argv[-1]])\n>>> grep_projects(tofind_list)", "source": "codesearchnet"}
{"code": "def build_info(self):\n    if self.is_bootloader:\n        self.log.error('Device is in fastboot mode, could not get build info.')\n        return\n    info = {}\n    info['build_id'] = self.adb.getprop('ro.build.id')\n    info['build_type'] = self.adb.getprop('ro.build.type')\n    return info", "docstring": "Get the build info of this Android device, including build id and\nbuild type.\n\nThis is not available if the device is in bootloader mode.\n\nReturns:\nA dict with the build info of this Android device, or None if the\ndevice is in bootloader mode.", "source": "codesearchnet"}
{"code": "def set_nsxcontroller_ip(self, **kwargs):\n        \n        name = kwargs.pop('name')\n        ip_addr = str((kwargs.pop('ip_addr', None)))\n        nsxipaddress = ip_interface(unicode(ip_addr))\n        if nsxipaddress.version != 4:\n            raise ValueError('NSX Controller ip must be IPV4')\n\n        ip_args = dict(name=name, address=ip_addr)\n        method_name = 'nsx_controller_connection_addr_address'\n        method_class = self._brocade_tunnels\n        nsxcontroller_attr = getattr(method_class, method_name)\n        config = nsxcontroller_attr(**ip_args)\n        output = self._callback(config)\n        return output", "docstring": "Set nsx-controller IP\n\nArgs:\nIP (str): IPV4 address.\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def run_amylpred2(self, seq, outdir, run_amylmuts=False):\n        \n        outdir_amylpred = op.join(outdir, 'AMYLPRED2_results')\n        if not op.exists(outdir_amylpred):\n            os.mkdir(outdir_amylpred)\n\n        url = \"http:\n        cj = CookieJar()\n        opener = build_opener(HTTPCookieProcessor(cj))\n        formdata = {\"email\": self.email, \"password\": self.password}\n        data_encoded = urlencode(formdata)\n        data_encoded = data_encoded.encode('ASCII')\n        response = opener.open(url, data_encoded)\n\n        \n        methods = ['AGGRESCAN', 'NETCSSP', 'PAFIG', 'APD', 'AMYLPATTERN',\n                   'SECSTR', 'BSC', 'WALTZ', 'CONFENERGY', 'TANGO']\n\n        if run_amylmuts:\n            methods.append('AMYLMUTS')\n\n        output = {}\n        timeCounts = 0\n\n        for met in methods:\n            \n            existing_results = glob.glob(op.join(outdir_amylpred, '*_{}.txt'.format(met)))\n            if existing_results:\n                results_file = existing_results[0]\n            else:\n                values = {'seq_data': seq, 'method': met}\n                data = urlencode(values)\n                data = data.encode('ASCII')\n                url_input = \"http:\n                response = opener.open(url_input, data)\n                result = str(response.read())\n                ind = str.find(result, 'Job ID')\n                result2 = result[ind:ind + 50]\n                ind1 = str.find(result2, ':')\n                ind2 = str.find(result2, '<BR>')\n                job_id = result2[ind1 + 2:ind2]\n\n                \n                url_result = 'http:\n                print(url_result)\n                print(\"Waiting for %s results\" % met, end='.')\n                while True:\n                    result = urlopen(url_result).read()\n                    if not result:\n                        time.sleep(1)\n                        timeCounts += 1\n                        print('.', end='')\n                    else:\n                        response = requests.get(url_result)\n                        break\n                results_file = op.join(outdir_amylpred, \"{}_{}.txt\".format(url_result.split('/')[-1].strip('.txt'), met))\n                with open(results_file, \"wb\") as handle:\n                    for data in response.iter_content():\n                        handle.write(data)\n                print(\"\")\n\n            method, hits = self.parse_method_results(results_file, met)\n            \n            output[met] = hits\n            \n            \n            \n        if timeCounts != 0:\n            print(\"Time spent: %d seconds\" % timeCounts)\n        return output", "docstring": "Run all methods on the AMYLPRED2 web server for an amino acid sequence and gather results.\n\nResult files are cached in ``/path/to/outdir/AMYLPRED2_results``.\n\nArgs:\nseq (str): Amino acid sequence as a string\noutdir (str): Directory to where output files should be saved\nrun_amylmuts (bool): If AMYLMUTS method should be run, default False\n\nReturns:\ndict: Result for each method run", "source": "juraj-google-style"}
{"code": "def tensor_rank(self, name='tensor_rank'):\n    with self._name_scope(name):\n        return self.shape.ndims", "docstring": "Rank (in the sense of tensors) of matrix corresponding to this operator.\n\nIf this operator acts like the batch matrix `A` with\n`A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.\n\nArgs:\nname:  A name for this `Op`.\n\nReturns:\nPython integer, or None if the tensor rank is undefined.", "source": "github-repos"}
{"code": "def open_workshared_model(self, model_path, central=False, detached=False, keep_worksets=True, audit=False, show_workset_config=1):\n    if detached:\n        if audit:\n            if keep_worksets:\n                self._add_entry(templates.CENTRAL_OPEN_DETACH_AUDIT.format(model_path=model_path, workset_config=show_workset_config))\n            else:\n                self._add_entry(templates.CENTRAL_OPEN_DETACH_AUDIT_DISCARD.format(model_path=model_path, workset_config=show_workset_config))\n        elif keep_worksets:\n            self._add_entry(templates.CENTRAL_OPEN_DETACH.format(model_path=model_path, workset_config=show_workset_config))\n        else:\n            self._add_entry(templates.CENTRAL_OPEN_DETACH_DISCARD.format(model_path=model_path, workset_config=show_workset_config))\n    elif central:\n        if audit:\n            self._add_entry(templates.CENTRAL_OPEN_AUDIT.format(model_path=model_path, workset_config=show_workset_config))\n        else:\n            self._add_entry(templates.CENTRAL_OPEN.format(model_path=model_path, workset_config=show_workset_config))\n    elif audit:\n        self._add_entry(templates.WORKSHARED_OPEN_AUDIT.format(model_path=model_path, workset_config=show_workset_config))\n    else:\n        self._add_entry(templates.WORKSHARED_OPEN.format(model_path=model_path, workset_config=show_workset_config))", "docstring": "Append a open workshared model entry to the journal.\n\nThis instructs Revit to open a workshared model.\n\nArgs:\nmodel_path (str): full path to workshared model\ncentral (bool): if True opens central model and not local\ndetached (bool): if True opens a detached model\nkeep_worksets (bool): if True keeps worksets when detaching\naudit (bool): if True audits the model when opening", "source": "codesearchnet"}
{"code": "def SetCTypesForLibrary(libname, fn_table):\n  \n  libpath = ctypes.util.find_library(libname)\n  if not libpath:\n    raise ErrorLibNotFound('Library %s not found' % libname)\n\n  lib = ctypes.cdll.LoadLibrary(libpath)\n\n  \n  for (function, args, result) in fn_table:\n    f = getattr(lib, function)\n    f.argtypes = args\n    f.restype = result\n\n  return lib", "docstring": "Set function argument types and return types for an ObjC library.\n\nArgs:\nlibname: Library name string\nfn_table: List of (function, [arg types], return types) tuples\nReturns:\nctypes.CDLL with types set according to fn_table\nRaises:\nErrorLibNotFound: Can't find specified lib", "source": "juraj-google-style"}
{"code": "def _MergeEntities(self, a, b):\n    \n    if a.shape_id != b.shape_id:\n      raise MergeError('shape_id must be the same')\n\n    distance = max(ApproximateDistanceBetweenPoints(a.points[0][:2],\n                                                    b.points[0][:2]),\n                   ApproximateDistanceBetweenPoints(a.points[-1][:2],\n                                                    b.points[-1][:2]))\n    if distance > self.largest_shape_distance:\n      raise MergeError('The shape endpoints are too far away: %.1fm '\n                       '(largest_shape_distance is %.1fm)' %\n                       (distance, self.largest_shape_distance))\n\n    return self._Migrate(b, self.feed_merger.b_schedule, False)", "docstring": "Merges the shapes by taking the new shape.\n\nArgs:\na: The first transitfeed.Shape instance.\nb: The second transitfeed.Shape instance.\n\nReturns:\nThe merged shape.\n\nRaises:\nMergeError: If the ids are different or if the endpoints are further\nthan largest_shape_distance apart.", "source": "juraj-google-style"}
{"code": "def add_tools(self, *tools):\n        \n        for tool in tools:\n            if not isinstance(tool, Tool):\n                raise ValueError(\"All arguments to add_tool must be Tool subclasses.\")\n\n            self.toolbar.tools.append(tool)", "docstring": "Adds tools to the plot.\n\nArgs:\n*tools (Tool) : the tools to add to the Plot\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def get_snapshot_by(self, volume_id_or_uri, field, value):\n    uri = self.__build_volume_snapshot_uri(volume_id_or_uri)\n    return self._client.get_by(field, value, uri=uri)", "docstring": "Gets all snapshots that match the filter.\n\nThe search is case-insensitive.\n\nArgs:\nvolume_id_or_uri: Can be either the volume id or the volume uri.\nfield: Field name to filter.\nvalue: Value to filter.\n\nReturns:\nlist: Snapshots", "source": "codesearchnet"}
{"code": "def get_file_size(file_object):\n    position = file_object.tell()\n    file_object.seek(0, 2)\n    file_size = file_object.tell()\n    file_object.seek(position, 0)\n    return file_size", "docstring": "Returns the size, in bytes, of a file. Expects an object that supports\nseek and tell methods.\n\nArgs:\nfile_object (file_object) - The object that represents the file\n\nReturns:\n(int): size of the file, in bytes", "source": "codesearchnet"}
{"code": "class BlipEncoder(nn.Module):\n\n    def __init__(self, config: BlipConfig):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([BlipEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for idx, encoder_layer in enumerate(self.layers):\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`BlipEncoderLayer`].\n\nArgs:\nconfig (`BlipConfig`):\nThe corresponding vision configuration for the `BlipEncoder`.", "source": "github-repos"}
{"code": "def add_controller(self, controller, timeout=None):\n        \n        \n        assert controller.mri not in self._controllers, \\\n            \"Controller already exists for %s\" % controller.mri\n        self._controllers[controller.mri] = controller\n        controller.setup(self)\n        if self.state:\n            should_publish = self._start_controllers([controller], timeout)\n            if self.state == STARTED and should_publish:\n                self._publish_controllers(timeout)", "docstring": "Add a controller to be hosted by this process\n\nArgs:\ncontroller (Controller): Its controller\ntimeout (float): Maximum amount of time to wait for each spawned\nobject. None means forever", "source": "juraj-google-style"}
{"code": "def GetCoinAssets(self):\n    assets = set()\n    for coin in self.GetCoins():\n        assets.add(coin.Output.AssetId)\n    return list(assets)", "docstring": "Get asset ids of all coins present in the wallet.\n\nReturns:\nlist: of UInt256 asset id's.", "source": "codesearchnet"}
{"code": "def forward(self, input_ids: torch.Tensor, cache_position: torch.Tensor):\n    _, seqlen = input_ids.shape\n    position_ids = cache_position.unsqueeze(0)\n    past_key_values = self.static_cache\n    outs = self.model(input_ids=input_ids, attention_mask=None, position_ids=position_ids, cache_position=cache_position, past_key_values=past_key_values, use_cache=True)\n    return outs.logits", "docstring": "Forward pass of the module, which is compatible with the ExecuTorch runtime.\n\nArgs:\ninput_ids (`torch.Tensor`): Tensor representing current input token id to the module.\ncache_position (`torch.Tensor`): Tensor representing current input position in the cache.\n\nReturns:\ntorch.Tensor: Logits output from the model.\n\nThis forward adapter serves two primary purposes:\n\n1. **Making the Model `torch.export`-Compatible**:\nThe adapter hides unsupported objects, such as the `Cache`, from the graph inputs and outputs,\nenabling the model to be exportable using `torch.export` without encountering issues.\n\n2. **Ensuring Compatibility with `ExecuTorch` runtime**:\nThe adapter matches the model's forward signature with that in `executorch/extension/llm/runner`,\nensuring that the exported model can be executed in `ExecuTorch` out-of-the-box.", "source": "github-repos"}
{"code": "def APFSContainerPathSpecGetVolumeIndex(path_spec):\n  \n  volume_index = getattr(path_spec, 'volume_index', None)\n  if volume_index is not None:\n    return volume_index\n\n  location = getattr(path_spec, 'location', None)\n  if location is None or not location.startswith('/apfs'):\n    return None\n\n  try:\n    volume_index = int(location[5:], 10) - 1\n  except (TypeError, ValueError):\n    volume_index = None\n\n  if volume_index is None or volume_index < 0 or volume_index > 99:\n    volume_index = None\n\n  return volume_index", "docstring": "Retrieves the volume index from the path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nint: volume index or None if the index cannot be determined.", "source": "juraj-google-style"}
{"code": "def remove_attribute(self, attribute: str) -> None:\n        \n        attr_index = self.__attr_index(attribute)\n        if attr_index is not None:\n            self.yaml_node.value.pop(attr_index)", "docstring": "Remove an attribute from the node.\n\nUse only if is_mapping() returns True.\n\nArgs:\nattribute: The name of the attribute to remove.", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_state):\n    projected_latents = self.in_proj(hidden_state)\n    quantized_representation, audio_codes = self.decode_latents(projected_latents)\n    commitment_loss = F.mse_loss(projected_latents, quantized_representation.detach(), reduction='mean')\n    codebook_loss = F.mse_loss(quantized_representation, projected_latents.detach(), reduction='mean')\n    quantized_representation = projected_latents + (quantized_representation - projected_latents).detach()\n    quantized_representation = self.out_proj(quantized_representation)\n    return (quantized_representation, commitment_loss, codebook_loss, audio_codes, projected_latents)", "docstring": "Quantizes the input tensor using a fixed codebook and returns the corresponding codebook vectors.\n\nArgs:\nhidden_state (`torch.FloatTensor` of shape `(batch_size, dimension, time_steps)`):\nInput tensor.\n\nReturns:\nquantized_representation (`torch.Tensor`of shape `(batch_size, dimension, time_steps)`):\nQuantized continuous representation of input.\ncommitment_loss (`torch.FloatTensor`of shape `(1)`):\nCommitment loss to train encoder to predict vectors closer to codebook entries.\ncodebook_loss (`torch.FloatTensor`of shape `(1)`):\nCodebook loss to update the codebook.\naudio_codes (`torch.LongTensor` of shape `(batch_size, time_steps)`):\nCodebook indices for each codebook, quantized discrete representation of input.\nprojected_latents (torch.FloatTensor of shape `(batch_size, num_codebooks * dimension, time_steps)`):\nProjected latents (continuous representation of input before quantization).", "source": "github-repos"}
{"code": "def plot_points(points, lattice=None, coords_are_cartesian=False, fold=False, ax=None, **kwargs):\n    (ax, fig, plt) = get_ax3d_fig_plt(ax)\n    if ('color' not in kwargs):\n        kwargs['color'] = 'b'\n    if (((not coords_are_cartesian) or fold) and (lattice is None)):\n        raise ValueError('coords_are_cartesian False or fold True require the lattice')\n    for p in points:\n        if fold:\n            p = fold_point(p, lattice, coords_are_cartesian=coords_are_cartesian)\n        elif (not coords_are_cartesian):\n            p = lattice.get_cartesian_coords(p)\n        ax.scatter(*p, **kwargs)\n    return (fig, ax)", "docstring": "Adds Points to a matplotlib Axes\n\nArgs:\npoints: list of coordinates\nlattice: Lattice object used to convert from reciprocal to cartesian coordinates\ncoords_are_cartesian: Set to True if you are providing\ncoordinates in cartesian coordinates. Defaults to False.\nRequires lattice if False.\nfold: whether the points should be folded inside the first Brillouin Zone.\nDefaults to False. Requires lattice if True.\nax: matplotlib :class:`Axes` or None if a new figure should be created.\nkwargs: kwargs passed to the matplotlib function 'scatter'. Color defaults to blue\n\nReturns:\nmatplotlib figure and matplotlib ax", "source": "codesearchnet"}
{"code": "def _config_for_enable_caching_device(rnn_cell):\n    default_enable_caching_device = ops.executing_eagerly_outside_functions()\n    if rnn_cell._enable_caching_device != default_enable_caching_device:\n        return {'enable_caching_device': rnn_cell._enable_caching_device}\n    return {}", "docstring": "Return the dict config for RNN cell wrt to enable_caching_device field.\n\nSince enable_caching_device is a internal implementation detail for speed up\nthe RNN variable read when running on the multi remote worker setting, we\ndon't want this config to be serialized constantly in the JSON. We will only\nserialize this field when a none default value is used to create the cell.\nArgs:\nrnn_cell: the RNN cell for serialize.\n\nReturns:\nA dict which contains the JSON config for enable_caching_device value or\nempty dict if the enable_caching_device value is same as the default value.", "source": "github-repos"}
{"code": "def _pull_out_unaffected_blocks_lhs(lhs, rest, out_port, in_port):\n    \n\n    _, block_index = lhs.index_in_block(out_port)\n\n    bs = lhs.block_structure\n\n    nbefore, nblock, nafter = (sum(bs[:block_index]),\n                               bs[block_index],\n                               sum(bs[block_index + 1:]))\n    before, block, after = lhs.get_blocks((nbefore, nblock, nafter))\n\n    if before != cid(nbefore) or after != cid(nafter):\n        outer_lhs = before + cid(nblock - 1) + after\n        inner_lhs = cid(nbefore) + block + cid(nafter)\n        return outer_lhs << Feedback.create(\n                SeriesProduct.create(inner_lhs, *rest),\n                out_port=out_port, in_port=in_port)\n    elif block == cid(nblock):\n        outer_lhs = before + cid(nblock - 1) + after\n        return outer_lhs << Feedback.create(\n                SeriesProduct.create(*rest),\n                out_port=out_port, in_port=in_port)\n    raise CannotSimplify()", "docstring": "In a self-Feedback of a series product, where the left-most operand is\nreducible, pull all non-trivial blocks outside of the feedback.\n\nArgs:\nlhs (Circuit): The reducible circuit\nrest (tuple): The other SeriesProduct operands\nout_port (int): The feedback output port index\nin_port (int): The feedback input port index\n\nReturns:\nCircuit: The simplified circuit", "source": "juraj-google-style"}
{"code": "def get_most_unrolled_urls(tweet):\n    \n    unrolled_urls = []\n    for url in get_tweet_links(tweet):\n        if url.get(\"unwound\", {\"url\": None}).get(\"url\", None) is not None:\n            unrolled_urls.append(url[\"unwound\"][\"url\"])\n        elif url.get(\"expanded_url\", None) is not None:\n            unrolled_urls.append(url[\"expanded_url\"])\n        else:\n            unrolled_urls.append(url[\"url\"])\n    return unrolled_urls", "docstring": "For each url included in the Tweet \"urls\", get the most unrolled\nversion available. Only return 1 url string per url in tweet.tweet_links\nIn order of preference for \"most unrolled\"\n(keys from the dict at tweet.tweet_links): \\n\n1. `unwound`/`url` \\n\n2. `expanded_url` \\n\n3. `url`\n\nArgs:\ntweet (Tweet): A Tweet object or dict\n\nReturns:\nlist (list of strings): a list of the most unrolled url available", "source": "juraj-google-style"}
{"code": "def StartService(service_name):\n    try:\n        win32serviceutil.StartService(service_name)\n        logging.info(\"Service '%s' started.\", service_name)\n    except pywintypes.error as e:\n        if (getattr(e, 'winerror', None) == winerror.ERROR_SERVICE_DOES_NOT_EXIST):\n            logging.debug(\"Tried to start '%s', but the service is not installed.\", service_name)\n        else:\n            logging.exception(\"Encountered error trying to start '%s':\", service_name)", "docstring": "Start a Windows service with the given name.\n\nArgs:\nservice_name: string The name of the service to be started.", "source": "codesearchnet"}
{"code": "def monkhorst_automatic(kpts=(2, 2, 2), shift=(0, 0, 0)):\n    return Kpoints('Automatic kpoint scheme', 0, Kpoints.supported_modes.Monkhorst, kpts=[kpts], kpts_shift=shift)", "docstring": "Convenient static constructor for an automatic Monkhorst pack Kpoint\ngrid.\n\nArgs:\nkpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice\nvectors. Defaults to (2,2,2)\nshift: Shift to be applied to the kpoints. Defaults to (0,0,0).\n\nReturns:\nKpoints object", "source": "codesearchnet"}
{"code": "def plot(self, **plot_kwargs: Any) -> None:\n    fig = plt.figure()\n    plt.plot(self._num_cfds_seq, self._gnd_state_probs, 'ro-', figure=fig, **plot_kwargs)\n    plt.xlabel('Number of Cliffords', figure=fig)\n    plt.ylabel('Ground State Probability', figure=fig)\n    fig.show()", "docstring": "Plots the average ground state probability vs the number of\nCliffords in the RB study.\n\nArgs:\n**plot_kwargs: Arguments to be passed to matplotlib.pyplot.plot.", "source": "codesearchnet"}
{"code": "def _strip_unnecessary_contents_from_stack(result, processed):\n  \n  \n  if isinstance(result, (PrettyTensor, Loss)):\n    if result.is_sequence():\n      for tensor in result.sequence:\n        _strip_unnecessary_contents_from_stack(tensor, processed)\n        return\n    else:\n      result = result.tensor\n  if hasattr(result, 'op'):\n    result = result.op\n  if result in processed:\n    return\n  else:\n    processed.add(result)\n  trace = []\n  found = False\n  for f, line_no, method, _ in result._traceback:\n    if (method in ('_replace_deferred', '_construct') and\n        f.endswith('pretty_tensor_class.py')):\n      found = True\n      continue\n    trace.append((f, line_no, method, {}))\n  result._traceback = trace\n\n  \n  \n  if not found:\n    return\n  for inp in result.inputs:\n    _strip_unnecessary_contents_from_stack(inp, processed)", "docstring": "Remove the distracting lines from the stored tracebacks.\n\nThis also reduces memory overhead by removing the frame contents. This is very\nimportant when doing long unrolls.\n\nArgs:\nresult: The result to process.\nprocessed: A set of already processed nodes, used to stop early.", "source": "juraj-google-style"}
{"code": "def color_lerp(\n    c1: Tuple[int, int, int], c2: Tuple[int, int, int], a: float\n) -> Color:\n    \n    return Color._new_from_cdata(lib.TCOD_color_lerp(c1, c2, a))", "docstring": "Return the linear interpolation between two colors.\n\n``a`` is the interpolation value, with 0 returing ``c1``,\n1 returning ``c2``, and 0.5 returing a color halfway between both.\n\nArgs:\nc1 (Union[Tuple[int, int, int], Sequence[int]]):\nThe first color.  At a=0.\nc2 (Union[Tuple[int, int, int], Sequence[int]]):\nThe second color.  At a=1.\na (float): The interpolation value,\n\nReturns:\nColor: The interpolated Color.", "source": "juraj-google-style"}
{"code": "def group_pairs(pair_list):\n    \n    \n    groupid_to_items = defaultdict(list)\n    \n    for item, groupid in pair_list:\n        groupid_to_items[groupid].append(item)\n    return groupid_to_items", "docstring": "Groups a list of items using the first element in each pair as the item and\nthe second element as the groupid.\n\nArgs:\npair_list (list): list of 2-tuples (item, groupid)\n\nReturns:\ndict: groupid_to_items: maps a groupid to a list of items\n\nSeeAlso:\ngroup_items", "source": "juraj-google-style"}
{"code": "def update(self, **kwargs):\n    kwargs = {k: (np.array(v) if isinstance(v, (int, float)) else v) for (k, v) in kwargs.items()}\n    self.args.update(kwargs)", "docstring": "Update the model arguments with additional arguments.\n\nArgs:\nkwargs (dict): Optional keyword arguments to add to prior args.", "source": "codesearchnet"}
{"code": "def command_factory(command):\n    \n    def communicate(body={}, root_dir=None):\n        \n\n        client = connect_socket(root_dir)\n        body['mode'] = command\n        \n        \n        if 'func' in body:\n            del body['func']\n        data_string = pickle.dumps(body, -1)\n        client.send(data_string)\n\n        \n        response = receive_data(client)\n        return response\n    return communicate", "docstring": "A factory which returns functions for direct daemon communication.\n\nThis factory will create a function which sends a payload to the daemon\nand returns the unpickled object which is returned by the daemon.\n\nArgs:\ncommand (string): The type of payload this should be. This determines\nas what kind of instruction this will be interpreted by the daemon.\nReturns:\nfunction: The created function.", "source": "juraj-google-style"}
{"code": "def validated_value(self, raw_value):\n    value = self.value(raw_value)\n    try:\n        for validator in self.validators:\n            validator(value)\n    except:\n        raise\n    else:\n        return value", "docstring": "Return parsed parameter value and run validation handlers.\n\nError message included in exception will be included in http error\nresponse\n\nArgs:\nvalue: raw parameter value to parse validate\n\nReturns:\nNone\n\nNote:\nConcept of validation for params is understood here as a process\nof checking if data of valid type (successfully parsed/processed by\n``.value()`` handler) does meet some other constraints\n(lenght, bounds, uniqueness, etc.). It will internally call its\n``value()`` handler.", "source": "codesearchnet"}
{"code": "def transform_verbosity(self, description, use_verbose_format):\n        \n        if use_verbose_format is False:\n            description = description.replace(\n                _(\", every minute\"), '')\n            description = description.replace(_(\", every hour\"), '')\n            description = description.replace(_(\", every day\"), '')\n        return description", "docstring": "Transforms the verbosity of the expression description by stripping verbosity from original description\nArgs:\ndescription: The description to transform\nuse_verbose_format: If True, will leave description as it, if False, will strip verbose parts\nsecond_expression: Seconds part\nReturns:\nThe transformed description with proper verbosity", "source": "juraj-google-style"}
{"code": "def url(self, url):\n        \n        if url and url.endswith('/'):\n            url = url[:-1]\n\n        self._url = url", "docstring": "Set API URL endpoint\n\nArgs:\nurl: the url of the API endpoint", "source": "juraj-google-style"}
{"code": "def _FormatDateTime(self, event):\n    \n    try:\n      return timelib.Timestamp.CopyToIsoFormat(\n          event.timestamp, timezone=self._output_mediator.timezone,\n          raise_error=True)\n\n    except (OverflowError, ValueError) as exception:\n      self._ReportEventError(event, (\n          'unable to copy timestamp: {0!s} to a human readable date and time '\n          'with error: {1!s}. Defaulting to: \"0000-00-00T00:00:00\"').format(\n              event.timestamp, exception))\n\n      return '0000-00-00T00:00:00'", "docstring": "Formats the date and time in ISO 8601 format.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: date and time field.", "source": "juraj-google-style"}
{"code": "def assign_seat(self, seat):\n    rc = self._libinput.libinput_udev_assign_seat(self._li, seat.encode())\n    assert (rc == 0), 'Failed to assign {}'.format(seat)", "docstring": "Assign a seat to this libinput context.\n\nNew devices or the removal of existing devices will appear as events\nwhen iterating over :meth:`~libinput.LibInput.get_event`.\n\n:meth:`assign_seat` succeeds even if no input devices are\ncurrently available on this seat, or if devices are available but fail\nto open. Devices that do not have the minimum capabilities to be\nrecognized as pointer, keyboard or touch device are ignored. Such\ndevices and those that failed to open are ignored until the next call\nto :meth:`~libinput.LibInput.resume`.\n\nWarning:\nThis method may only be called once per context.\nArgs:\nseat (str): A seat identifier.", "source": "codesearchnet"}
{"code": "def __init__(self, status_address, bundle_process_cache=None, state_cache=None, enable_heap_dump=False, worker_id=None, log_lull_timeout_ns=DEFAULT_LOG_LULL_TIMEOUT_NS):\n    self._alive = True\n    self._bundle_process_cache = bundle_process_cache\n    self._state_cache = state_cache\n    ch = GRPCChannelFactory.insecure_channel(status_address)\n    grpc.channel_ready_future(ch).result(timeout=60)\n    self._status_channel = grpc.intercept_channel(ch, WorkerIdInterceptor(worker_id))\n    self._status_stub = beam_fn_api_pb2_grpc.BeamFnWorkerStatusStub(self._status_channel)\n    self._responses = queue.Queue()\n    self.log_lull_timeout_ns = log_lull_timeout_ns\n    self._last_full_thread_dump_secs = 0.0\n    self._last_lull_logged_secs = 0.0\n    self._server = threading.Thread(target=lambda: self._serve(), name='fn_api_status_handler')\n    self._server.daemon = True\n    self._enable_heap_dump = enable_heap_dump\n    self._server.start()\n    self._lull_logger = threading.Thread(target=lambda: self._log_lull_in_bundle_processor(self._bundle_process_cache), name='lull_operation_logger')\n    self._lull_logger.daemon = True\n    self._lull_logger.start()", "docstring": "Initialize FnApiWorkerStatusHandler.\n\nArgs:\nstatus_address: The URL Runner uses to host the WorkerStatus server.\nbundle_process_cache: The BundleProcessor cache dict from sdk worker.\nstate_cache: The StateCache form sdk worker.", "source": "github-repos"}
{"code": "def format_color(text, color, use_color_setting):\n    if (not use_color_setting):\n        return text\n    else:\n        return '{}{}{}'.format(color, text, NORMAL)", "docstring": "Format text with color.\n\nArgs:\ntext - Text to be formatted with color if `use_color`\ncolor - The color start string\nuse_color_setting - Whether or not to color", "source": "codesearchnet"}
{"code": "def _cart_dists(self, s1, s2, avg_lattice, mask, normalization, lll_frac_tol=None):\n    if (len(s2) > len(s1)):\n        raise ValueError('s1 must be larger than s2')\n    if (mask.shape != (len(s2), len(s1))):\n        raise ValueError('mask has incorrect shape')\n    (vecs, d_2) = pbc_shortest_vectors(avg_lattice, s2, s1, mask, return_d2=True, lll_frac_tol=lll_frac_tol)\n    lin = LinearAssignment(d_2)\n    s = lin.solution\n    short_vecs = vecs[(np.arange(len(s)), s)]\n    translation = np.average(short_vecs, axis=0)\n    f_translation = avg_lattice.get_fractional_coords(translation)\n    new_d2 = np.sum(((short_vecs - translation) ** 2), axis=(- 1))\n    return (((new_d2 ** 0.5) * normalization), f_translation, s)", "docstring": "Finds a matching in cartesian space. Finds an additional\nfractional translation vector to minimize RMS distance\n\nArgs:\ns1, s2: numpy arrays of fractional coordinates. len(s1) >= len(s2)\navg_lattice: Lattice on which to calculate distances\nmask: numpy array of booleans. mask[i, j] = True indicates\nthat s2[i] cannot be matched to s1[j]\nnormalization (float): inverse normalization length\n\nReturns:\nDistances from s2 to s1, normalized by (V/Natom) ^ 1/3\nFractional translation vector to apply to s2.\nMapping from s1 to s2, i.e. with numpy slicing, s1[mapping] => s2", "source": "codesearchnet"}
{"code": "def xpath(self, exact=None):\n        \n\n        exact = exact if exact is not None else self.exact\n\n        if isinstance(self.expression, AbstractExpression):\n            expression = self._apply_expression_filters(self.expression)\n\n            return to_xpath(expression, exact=exact)\n        else:\n            return str_(self.expression)", "docstring": "Returns the XPath query for this selector.\n\nArgs:\nexact (bool, optional): Whether to exactly match text.\n\nReturns:\nstr: The XPath query for this selector.", "source": "juraj-google-style"}
{"code": "def getTraitCorrCoef(self,term_i=None):\n        \n        cov = self.getTraitCovar(term_i)\n        stds = sp.sqrt(cov.diagonal())[:,sp.newaxis]\n        RV = cov / stds / stds.T\n        return RV", "docstring": "Return the estimated trait correlation coefficient matrix for term_i (or the total if term_i is None)\nTo retrieve the trait covariance matrix use \\see getTraitCovar\n\nArgs:\nterm_i:     index of the random effect term we want to retrieve the correlation coefficients\nReturns:\nestimated trait correlation coefficient matrix", "source": "juraj-google-style"}
{"code": "def metaclass(*metaclasses):\n\n    def _inner(cls):\n        metabases = tuple(collections.OrderedDict(((c, None) for c in (metaclasses + (type(cls),)))).keys())\n        _Meta = metabases[0]\n        for base in metabases[1:]:\n\n            class _Meta(base, _Meta):\n                pass\n        return six.add_metaclass(_Meta)(cls)\n    return _inner", "docstring": "Create the class using all metaclasses.\n\nArgs:\nmetaclasses: A tuple of metaclasses that will be used to generate and\nreplace a specified class.\n\nReturns:\nA decorator that will recreate the class using the specified\nmetaclasses.", "source": "codesearchnet"}
{"code": "def system_info(url, auth, verify_ssl):\n    \n    sysinfo_response = requests.get(url + '/info', headers=X_REQ_BY, auth=auth, verify=verify_ssl)\n    sysinfo_response.raise_for_status()\n    return sysinfo_response.json()", "docstring": "Retrieve SDC system information.\n\nArgs:\nurl (str): the host url.\nauth (tuple): a tuple of username, and password.", "source": "juraj-google-style"}
{"code": "def GetEntries(self, parser_mediator, match=None, **unused_kwargs):\n    stores = match.get('Stores', {})\n    for (volume_name, volume) in iter(stores.items()):\n        datetime_value = volume.get('CreationDate', None)\n        if (not datetime_value):\n            continue\n        partial_path = volume['PartialPath']\n        event_data = plist_event.PlistTimeEventData()\n        event_data.desc = 'Spotlight Volume {0:s} ({1:s}) activated.'.format(volume_name, partial_path)\n        event_data.key = ''\n        event_data.root = '/Stores'\n        event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts relevant Volume Configuration Spotlight entries.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmatch (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.", "source": "codesearchnet"}
{"code": "def compose_back(self, input_circuit, edge_map=None):\n    edge_map = (edge_map or {})\n    if (len(set(edge_map.values())) != len(edge_map)):\n        raise DAGCircuitError('duplicates in wire_map')\n    add_qregs = self._check_edgemap_registers(edge_map, input_circuit.qregs, self.qregs)\n    for qreg in add_qregs:\n        self.add_qreg(qreg)\n    add_cregs = self._check_edgemap_registers(edge_map, input_circuit.cregs, self.cregs)\n    for creg in add_cregs:\n        self.add_creg(creg)\n    self._check_wiremap_validity(edge_map, input_circuit.input_map, self.output_map)\n    for nd in input_circuit.topological_nodes():\n        if (nd.type == 'in'):\n            m_wire = edge_map.get(nd.wire, nd.wire)\n            if (m_wire not in self.output_map):\n                raise DAGCircuitError(('wire %s[%d] not in self' % (m_wire[0].name, m_wire[1])))\n            if (nd.wire not in input_circuit.wires):\n                raise DAGCircuitError(('inconsistent wire type for %s[%d] in input_circuit' % (nd.wire[0].name, nd.wire[1])))\n        elif (nd.type == 'out'):\n            pass\n        elif (nd.type == 'op'):\n            condition = self._map_condition(edge_map, nd.condition)\n            self._check_condition(nd.name, condition)\n            m_qargs = list(map((lambda x: edge_map.get(x, x)), nd.qargs))\n            m_cargs = list(map((lambda x: edge_map.get(x, x)), nd.cargs))\n            self.apply_operation_back(nd.op, m_qargs, m_cargs, condition)\n        else:\n            raise DAGCircuitError(('bad node type %s' % nd.type))", "docstring": "Apply the input circuit to the output of this circuit.\n\nThe two bases must be \"compatible\" or an exception occurs.\nA subset of input qubits of the input circuit are mapped\nto a subset of output qubits of this circuit.\n\nArgs:\ninput_circuit (DAGCircuit): circuit to append\nedge_map (dict): map {(Register, int): (Register, int)}\nfrom the output wires of input_circuit to input wires\nof self.\n\nRaises:\nDAGCircuitError: if missing, duplicate or incosistent wire", "source": "codesearchnet"}
{"code": "def get_gpu_count():\n    key = 'gpu_count_no_sudo'\n    out, err = run_shell_cmd(cmds_all[PLATFORM][key])\n    if err and FLAGS.debug:\n        print('Error in detecting GPU count:\\n %s' % str(err))\n    return out.strip(b'\\n')", "docstring": "Retrieves total number of GPU's available in the system.\n\nReturns:\nInteger that is the total # of GPU's found.", "source": "github-repos"}
{"code": "def _TensorArrayGatherGrad(op: ops.Operation, grad):\n    handle = op.inputs[0]\n    indices = op.inputs[1]\n    flow = op.inputs[2]\n    dtype = op.get_attr('dtype')\n    grad_source = _GetGradSource(grad)\n    g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow, colocate_with_first_write_call=False).grad(source=grad_source, flow=flow)\n    u_g = g.scatter(indices, grad)\n    return [None, None, u_g.flow]", "docstring": "Gradient for TensorArrayGather.\n\nArgs:\nop: Forward TensorArrayGather op.\ngrad: Gradient `Tensor` to TensorArrayGather.\n\nReturns:\nA flow `Tensor`, which can be used in control dependencies to\nforce the write of `grad` to the gradient `TensorArray`.", "source": "github-repos"}
{"code": "def prepare_lazy_data(content, functions_mapping=None, check_variables_set=None, cached=False):\n    if ((content is None) or isinstance(content, (numeric_types, bool, type))):\n        return content\n    elif isinstance(content, (list, set, tuple)):\n        return [prepare_lazy_data(item, functions_mapping, check_variables_set, cached) for item in content]\n    elif isinstance(content, dict):\n        parsed_content = {}\n        for (key, value) in content.items():\n            parsed_key = prepare_lazy_data(key, functions_mapping, check_variables_set, cached)\n            parsed_value = prepare_lazy_data(value, functions_mapping, check_variables_set, cached)\n            parsed_content[parsed_key] = parsed_value\n        return parsed_content\n    elif isinstance(content, basestring):\n        if (not is_var_or_func_exist(content)):\n            return content.replace('$$', '$')\n        functions_mapping = (functions_mapping or {})\n        check_variables_set = (check_variables_set or set())\n        content = content.strip()\n        content = LazyString(content, functions_mapping, check_variables_set, cached)\n    return content", "docstring": "make string in content as lazy object with functions_mapping\n\nRaises:\nexceptions.VariableNotFound: if any variable undefined in check_variables_set", "source": "codesearchnet"}
{"code": "def AddContract(self, contract):\n        \n        super(UserWallet, self).AddContract(contract)\n\n        try:\n            db_contract = Contract.get(ScriptHash=contract.ScriptHash.ToBytes())\n            db_contract.delete_instance()\n        except Exception as e:\n            logger.debug(\"contract does not exist yet\")\n\n        sh = bytes(contract.ScriptHash.ToArray())\n        address, created = Address.get_or_create(ScriptHash=sh)\n        address.IsWatchOnly = False\n        address.save()\n        db_contract = Contract.create(RawData=contract.ToArray(),\n                                      ScriptHash=contract.ScriptHash.ToBytes(),\n                                      PublicKeyHash=contract.PublicKeyHash.ToBytes(),\n                                      Address=address,\n                                      Account=self.__dbaccount)\n\n        logger.debug(\"Creating db contract %s \" % db_contract)\n\n        db_contract.save()", "docstring": "Add a contract to the database.\n\nArgs:\ncontract(neo.SmartContract.Contract): a Contract instance.", "source": "juraj-google-style"}
{"code": "def get_keys_from_ldap(self, username=None):\n    result_dict = {}\n    filter = ['(sshPublicKey=*)']\n    if (username is not None):\n        filter.append('(uid={})'.format(username))\n    attributes = ['uid', 'sshPublicKey']\n    results = self.client.search(filter, attributes)\n    for result in results:\n        result_dict[result.uid.value] = result.sshPublicKey.values\n    return result_dict", "docstring": "Fetch keys from ldap.\n\nArgs:\nusername Username associated with keys to fetch (optional)\n\nReturns:\nArray of dictionaries in '{username: [public keys]}' format", "source": "codesearchnet"}
{"code": "def rotate(self, matrix, tol=1e-3):\n        \n        matrix = SquareTensor(matrix)\n        if not matrix.is_rotation(tol):\n            raise ValueError(\"Rotation matrix is not valid.\")\n        sop = SymmOp.from_rotation_and_translation(matrix,\n                                                   [0., 0., 0.])\n        return self.transform(sop)", "docstring": "Applies a rotation directly, and tests input matrix to ensure a valid\nrotation.\n\nArgs:\nmatrix (3x3 array-like): rotation matrix to be applied to tensor\ntol (float): tolerance for testing rotation matrix validity", "source": "juraj-google-style"}
{"code": "def get_help_datapacks(module_name, server_prefix):\n    \n\n    _dir = os.path.realpath(\n        os.path.join(os.getcwd(), os.path.dirname(__file__)))\n    module_dir = \"{}/../{}\".format(_dir, module_name, \"_help.json\")\n    if os.path.isdir(module_dir):\n        module_help_path = \"{}/{}\".format(module_dir, \"_help.json\")\n\n        if os.path.isfile(module_help_path):\n            return helptools.get_help_datapacks(module_help_path, server_prefix)\n        else:\n            return [(\"Help\", \"{} does not have a help.json file\".format(module_name), False)]\n    else:\n        return [(\"Help\", \"No module found called {}\".format(module_name), False)]", "docstring": "Get the help datapacks for a module\n\nArgs:\nmodule_name (str): The module to get help data for\nserver_prefix (str): The command prefix for this server\n\nReturns:\ndatapacks (list): The help datapacks for the module", "source": "juraj-google-style"}
{"code": "def getLogger(self, component_name: str=None) -> logging.Logger:\n    logger_name = (self.root + (component_name if component_name else 'generic'))\n    _logger = self.loggers.get(logger_name)\n    if (not _logger):\n        _logger = logging.getLogger(logger_name)\n        stdio_handler = logging.StreamHandler()\n        stdio_handler.setFormatter(LogFormatter())\n        stdio_handler.setLevel(logging.INFO)\n        _logger.addHandler(stdio_handler)\n        _logger.setLevel(logging.DEBUG)\n        self.loggers[logger_name] = _logger\n    return _logger", "docstring": "Get the logger instance matching ``component_name`` or create a new one if non-existent.\n\nArgs:\ncomponent_name: a neo-python component name. e.g. network, vm, db\n\nReturns:\na logger for the specified component.", "source": "codesearchnet"}
{"code": "def parsed_stack(self, value):\n        \n        if value == self._defaults['parsedStack'] and 'parsedStack' in self._values:\n            del self._values['parsedStack']\n        else:\n            self._values['parsedStack'] = value", "docstring": "The parsed_stack property.\n\nArgs:\nvalue (list). the property value.", "source": "juraj-google-style"}
{"code": "def info(self, **kwargs):\n    path = self._get_path('info')\n    kwargs.update({'session_id': self.session_id})\n    response = self._GET(path, kwargs)\n    self.id = response['id']\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the basic information for an account.\n\nCall this method first, before calling other Account methods.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def describe(obj: Any, denylist: Collection[Any], leaves_only: bool=False) -> str:\n    if get_ignore_reason(obj, denylist):\n        return '{}{}'.format(get_ignore_reason(obj, denylist), type(obj))\n    if tf_inspect.isframe(obj):\n        return 'frame: {}'.format(tf_inspect.getframeinfo(obj))\n    elif tf_inspect.ismodule(obj):\n        return 'module: {}'.format(obj.__name__)\n    elif leaves_only:\n        return '{}, {}'.format(type(obj), id(obj))\n    elif isinstance(obj, list):\n        return 'list({}): {}'.format(id(obj), [describe(e, denylist, leaves_only=True) for e in obj])\n    elif isinstance(obj, tuple):\n        return 'tuple({}): {}'.format(id(obj), [describe(e, denylist, leaves_only=True) for e in obj])\n    elif isinstance(obj, dict):\n        return 'dict({}): {} keys'.format(id(obj), len(obj.keys()))\n    elif tf_inspect.isfunction(obj):\n        return 'function({}) {}; globals ID: {}'.format(id(obj), obj.__name__, id(obj.__globals__))\n    else:\n        return '{}, {}'.format(type(obj), id(obj))", "docstring": "Returns a custom human-readable summary of obj.\n\nArgs:\nobj: the value to describe.\ndenylist: same as denylist in get_ignore_reason.\nleaves_only: boolean flag used when calling describe recursively. Useful\nfor summarizing collections.", "source": "github-repos"}
{"code": "def __normalized_name(self, message_type):\n    name = message_type.definition_name()\n    split_name = re.split('[^0-9a-zA-Z]', name)\n    normalized = ''.join(((part[0].upper() + part[1:]) for part in split_name if part))\n    previous = self.__normalized_names.get(normalized)\n    if previous:\n        if (previous != name):\n            raise KeyError(('Both %s and %s normalize to the same schema name: %s' % (name, previous, normalized)))\n    else:\n        self.__normalized_names[normalized] = name\n    return normalized", "docstring": "Normalized schema name.\n\nGenerate a normalized schema name, taking the class name and stripping out\neverything but alphanumerics, and camel casing the remaining words.\nA normalized schema name is a name that matches [a-zA-Z][a-zA-Z0-9]*\n\nArgs:\nmessage_type: protorpc.message.Message class being parsed.\n\nReturns:\nA string, the normalized schema name.\n\nRaises:\nKeyError: A collision was found between normalized names.", "source": "codesearchnet"}
{"code": "def ctc_state_log_probs(seq_lengths, max_seq_length):\n    batch_size = _get_dim(seq_lengths, 0)\n    num_label_states = max_seq_length + 1\n    num_duration_states = 2\n    num_states = num_duration_states * num_label_states\n    log_0 = math_ops.cast(math_ops.log(math_ops.cast(0, dtypes.float64) + 1e-307), dtypes.float32)\n    initial_state_log_probs = array_ops.one_hot(indices=array_ops.zeros([batch_size], dtype=dtypes.int32), depth=num_states, on_value=0.0, off_value=log_0, axis=1)\n    label_final_state_mask = array_ops.one_hot(seq_lengths, depth=num_label_states, axis=0)\n    duration_final_state_mask = array_ops.ones([num_duration_states, 1, batch_size])\n    final_state_mask = duration_final_state_mask * label_final_state_mask\n    final_state_log_probs = (1.0 - final_state_mask) * log_0\n    final_state_log_probs = array_ops.reshape(final_state_log_probs, [num_states, batch_size])\n    return (initial_state_log_probs, array_ops.transpose(final_state_log_probs))", "docstring": "Computes CTC alignment initial and final state log probabilities.\n\nCreate the initial/final state values directly as log values to avoid\nhaving to take a float64 log on tpu (which does not exist).\n\nArgs:\nseq_lengths: int tensor of shape [batch_size], seq lengths in the batch.\nmax_seq_length: int, max sequence length possible.\n\nReturns:\ninitial_state_log_probs, final_state_log_probs", "source": "github-repos"}
{"code": "def SetCredentials(self, password=None, username=None):\n    \n    if password:\n      self._password = password\n    if username:\n      self._user = username", "docstring": "Sets the database credentials.\n\nArgs:\npassword (Optional[str]): password to access the database.\nusername (Optional[str]): username to access the database.", "source": "juraj-google-style"}
{"code": "def pack_eager_tensors(tensors, ctx=None) -> EagerTensor:\n    if not isinstance(tensors, list):\n        raise TypeError(f'tensors must be a list, but got a {type(tensors)}')\n    if not tensors:\n        raise ValueError('Cannot pack an empty list of tensors.')\n    dtype = tensors[0].dtype\n    shape = tensors[0].shape\n    handle_data = tensors[0]._handle_data\n    is_resource = dtype == dtypes.resource\n    for i in range(len(tensors)):\n        t = tensors[i]\n        if not isinstance(t, EagerTensor):\n            raise TypeError(f'All tensors being packed must be EagerTensor. Found an item of type {type(t)}.')\n        if t.dtype != dtype:\n            raise ValueError(f'All tensors being packed should have the same dtype {dtype}, but the {i}-th tensor is of dtype {t.dtype}')\n        if t.shape != shape:\n            raise ValueError(f'All tensors being packed should have the same shape {shape}, but the {i}-th tensor is of shape {t.shape}')\n        if is_resource and t._handle_data != handle_data:\n            raise ValueError(f'All tensors being packed should have the same handle data {handle_data}, but the {i}-th tensor is of handle data {t._handle_data}')\n    if ctx is None:\n        ctx = context.context()\n    packed_tensor = ctx.pack_eager_tensors(tensors)\n    if handle_data is not None:\n        packed_tensor._handle_data = handle_data\n\n    def grad_fun(_):\n        raise ValueError('Computing gradients through pack_eager_tensors is not supported.')\n    record.record_operation('pack_eager_tensors', [packed_tensor], tensors, grad_fun)\n    return packed_tensor", "docstring": "Pack multiple `EagerTensor`s of the same dtype and shape.\n\nArgs:\ntensors: a list of EagerTensors to pack.\nctx: context.context().\n\nReturns:\nA packed EagerTensor.", "source": "github-repos"}
{"code": "def evaluated_variants(self, case_id):\n    query = {'$and': [{'case_id': case_id}, {'$or': [{'acmg_classification': {'$exists': True}}, {'manual_rank': {'$exists': True}}, {'dismiss_variant': {'$exists': True}}]}]}\n    variants = {}\n    for var in self.variant_collection.find(query):\n        variants[var['variant_id']] = self.add_gene_info(var)\n    event_query = {'$and': [{'case': case_id}, {'category': 'variant'}, {'verb': 'comment'}]}\n    comment_variants = {event['variant_id'] for event in self.event_collection.find(event_query)}\n    for var_id in comment_variants:\n        if (var_id in variants):\n            continue\n        variant_obj = self.variant(var_id, case_id=case_id)\n        if (not variant_obj):\n            continue\n        variant_obj['is_commented'] = True\n        variants[var_id] = variant_obj\n    return variants.values()", "docstring": "Returns variants that has been evaluated\n\nReturn all variants, snvs/indels and svs from case case_id\nwhich have a entry for 'acmg_classification', 'manual_rank', 'dismiss_variant'\nor if they are commented.\n\nArgs:\ncase_id(str)\n\nReturns:\nvariants(iterable(Variant))", "source": "codesearchnet"}
{"code": "def create_symbol(self, *args, **kwargs):\n        \n        if not kwargs.get('project_name'):\n            kwargs['project_name'] = self.project.project_name\n\n        sym = self.app.database.create_symbol(*args, **kwargs)\n        if sym:\n            \n            if type(sym) != Symbol:\n                self._created_symbols[sym.filename].add(sym.unique_name)\n\n        return sym", "docstring": "Extensions that discover and create instances of `symbols.Symbol`\nshould do this through this method, as it will keep an index\nof these which can be used when generating a \"naive index\".\n\nSee `database.Database.create_symbol` for more\ninformation.\n\nArgs:\nargs: see `database.Database.create_symbol`\nkwargs: see `database.Database.create_symbol`\n\nReturns:\nsymbols.Symbol: the created symbol, or `None`.", "source": "juraj-google-style"}
{"code": "def safe_url(self, url, errors='strict'):\n        \n        if url is not None:\n            url = quote(self.s(url, errors=errors), safe='~')\n        return url", "docstring": "URL encode value for safe HTTP request.\n\nArgs:\nurl (string): The string to URL Encode.\n\nReturns:\n(string): The urlencoded string.", "source": "juraj-google-style"}
{"code": "def _check_required_fields(self, object_type, ignore_fields):\n    for field in self.configuration[object_type]['required_fields']:\n        if ((field not in self.data) and (field not in ignore_fields)):\n            raise HDXError(('Field %s is missing in %s!' % (field, object_type)))", "docstring": "Helper method to check that metadata for HDX object is complete\n\nArgs:\nignore_fields (List[str]): Any fields to ignore in the check\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _AssertValidators(self, validators):\n    \n    for validator in sorted(\n        validators, key=lambda validator: validator.insertion_index):\n      try:\n        validator.verify(self)\n      except exceptions.ValidationError as e:\n        message = validator.print_flags_with_values(self)\n        raise exceptions.IllegalFlagValueError('%s: %s' % (message, str(e)))", "docstring": "Assert if all validators in the list are satisfied.\n\nAsserts validators in the order they were created.\nArgs:\nvalidators: Iterable(validators.Validator), validators to be\nverified\nRaises:\nAttributeError: if validators work with a non-existing flag.\nIllegalFlagValueError: if validation fails for at least one validator", "source": "juraj-google-style"}
{"code": "def register_rml_def(self,\n                         location_type,\n                         location,\n                         filename=None,\n                         **kwargs):\n        \n        if location_type == 'directory':\n            self.register_directory(location, **kwargs)\n        elif location_type == 'filepath':\n            if not os.path.exists(location):\n                raise OSError(\"File not found\", location)\n            if os.path.isfile(location):\n                self.register_rml(location)\n            elif filename:\n                new_loc = os.path.join(location, filename)\n                if not os.path.exists(new_loc):\n                    raise OSError(\"File not found\", new_loc)\n                elif os.path.isfile(new_loc):\n                    self.register_rml(new_loc)\n            else:\n                raise OSError(\"File not found\", location)\n        elif location_type.startswith('package'):\n            pkg_path = \\\n                    importlib.util.find_spec(\\\n                            location).submodule_search_locations[0]\n            if location_type.endswith('_all'):\n                self.register_directory(pkg_path, **kwargs)\n            elif location_type.endswith('_file'):\n                filepath = os.path.join(pkg_path, filename)\n                self.register_rml(filepath, **kwargs)\n            else:\n                raise NotImplementedError", "docstring": "Registers the rml file locations for easy access\n\nArgs:\n-----\nlocation_type: ['package_all',\n'package_file',\n'directory',\n'filepath']\nlocation: The correlated location string based on the location_type\nfilename: Optional, associated with 'package_file' location_type\n\nkwargs:\n-------\ninclude_subfolders: Boolean", "source": "juraj-google-style"}
{"code": "def get_corrections_dict(self, entry):\n    corrections = {}\n    for c in self.corrections:\n        val = c.get_correction(entry)\n        if (val != 0):\n            corrections[str(c)] = val\n    return corrections", "docstring": "Returns the corrections applied to a particular entry.\n\nArgs:\nentry: A ComputedEntry object.\n\nReturns:\n({correction_name: value})", "source": "codesearchnet"}
{"code": "def write_fasta_file(seq_records, outname, outdir=None, outext='.faa', force_rerun=False):\n    if (not outdir):\n        outdir = ''\n    outfile = ssbio.utils.outfile_maker(inname='', outname=outname, outdir=outdir, outext=outext)\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n        SeqIO.write(seq_records, outfile, 'fasta')\n    return outfile", "docstring": "Write a FASTA file for a SeqRecord or a list of SeqRecord objects.\n\nArgs:\nseq_records (SeqRecord, list): SeqRecord or a list of SeqRecord objects\noutname: Name of the output file which will have outext appended to it\noutdir: Path to directory to output sequences to\noutext: Extension of FASTA file, default \".faa\"\nforce_rerun: If file should be overwritten if it exists\n\nReturns:\nstr: Path to output FASTA file.", "source": "codesearchnet"}
{"code": "def CreateStorageWriterForFile(cls, session, path):\n    if sqlite_file.SQLiteStorageFile.CheckSupportedFormat(path):\n        return sqlite_writer.SQLiteStorageFileWriter(session, path)\n    return None", "docstring": "Creates a storage writer based on the file.\n\nArgs:\nsession (Session): session the storage changes are part of.\npath (str): path to the storage file.\n\nReturns:\nStorageWriter: a storage writer or None if the storage file cannot be\nopened or the storage format is not supported.", "source": "codesearchnet"}
{"code": "def run_command(command, input_data=None, out_pipe=subprocess.PIPE, err_pipe=subprocess.PIPE, env=None, **kwargs):\n    if (env is None):\n        env = os.environ.copy()\n    with LogTask(('Run command: %s' % ' '.join((('\"%s\"' % arg) for arg in command))), logger=LOGGER, level='debug') as task:\n        command_result = _run_command(command=command, input_data=input_data, out_pipe=out_pipe, err_pipe=err_pipe, env=env, uuid=task.uuid, **kwargs)\n        return command_result", "docstring": "Runs a command non-interactively\n\nArgs:\ncommand(list of str): args of the command to execute, including the\ncommand itself as command[0] as `['ls', '-l']`\ninput_data(str): If passed, will feed that data to the subprocess\nthrough stdin\nout_pipe(int or file): File descriptor as passed to\n:ref:subprocess.Popen to use as stdout\nerr_pipe(int or file): File descriptor as passed to\n:ref:subprocess.Popen to use as stderr\nenv(dict of str:str): If set, will use the given dict as env for the\nsubprocess\n**kwargs: Any other keyword args passed will be passed to the\n:ref:subprocess.Popen call\n\nReturns:\nlago.utils.CommandStatus: result of the interactive execution", "source": "codesearchnet"}
{"code": "def DtypeToNumberConverter(self, dtype):\n    \n    if np.issubdtype(dtype, np.datetime64):\n\n      def DatetimesToNumbers(dt_list):\n        return np.array([pd.Timestamp(dt).value for dt in dt_list])\n\n      return DatetimesToNumbers\n    elif np.issubdtype(dtype, np.timedelta64):\n\n      def TimedetlasToNumbers(td_list):\n        return np.array([pd.Timedelta(td).value for td in td_list])\n\n      return TimedetlasToNumbers\n    else:\n      return None", "docstring": "Converts a Numpy dtype to a converter method if applicable.\nThe converter method takes in a numpy array of objects of the provided\ndtype\nand returns a numpy array of the numbers backing that object for\nstatistical\nanalysis. Returns None if no converter is necessary.\nArgs:\ndtype: The numpy dtype to make a converter for.\nReturns:\nThe converter method or None.", "source": "juraj-google-style"}
{"code": "def _create_zeros_for_none_grads(forward_graphs, grad_graphs):\n    assert len(forward_graphs) == len(grad_graphs)\n    branch_outputs = [g.structured_outputs for g in grad_graphs]\n    num_outputs_per_branch = [len(outs) for outs in branch_outputs]\n    assert len(set(num_outputs_per_branch)) == 1, num_outputs_per_branch\n    for output_idx, branch_outs in enumerate(zip(*branch_outputs)):\n        if any((t is None for t in branch_outs)) and any((t is not None for t in branch_outs)):\n            for branch_index, t in enumerate(branch_outs):\n                if t is None:\n                    with grad_graphs[branch_index].as_default():\n                        zeros = default_gradient.zeros_like(forward_graphs[branch_index].inputs[output_idx])\n                        grad_graphs[branch_index].structured_outputs[output_idx] = zeros\n    for grad_graph in grad_graphs:\n        grad_graph.outputs = [t for t in func_graph_module.flatten(grad_graph.structured_outputs) if t is not None]", "docstring": "Creates zeros for None out grads if at least one branch has non-None grad.\n\nArgs:\nforward_graphs: List of forward FuncGraphs.\ngrad_graphs: List of grad FuncGraphs.", "source": "github-repos"}
{"code": "def unique_array(arr):\n    if (not len(arr)):\n        return np.asarray(arr)\n    elif pd:\n        if (isinstance(arr, np.ndarray) and (arr.dtype.kind not in 'MO')):\n            return pd.unique(arr)\n        values = []\n        for v in arr:\n            if (isinstance(v, datetime_types) and (not isinstance(v, cftime_types))):\n                v = pd.Timestamp(v).to_datetime64()\n            values.append(v)\n        return pd.unique(values)\n    else:\n        arr = np.asarray(arr)\n        (_, uniq_inds) = np.unique(arr, return_index=True)\n        return arr[np.sort(uniq_inds)]", "docstring": "Returns an array of unique values in the input order.\n\nArgs:\narr (np.ndarray or list): The array to compute unique values on\n\nReturns:\nA new array of unique values", "source": "codesearchnet"}
{"code": "def calculate_character_to_length_mapping(\n        measurer: text_measurer.TextMeasurer,\n        characters: Iterable[str]) -> Mapping[str, float]:\n    \n    char_to_length = {}\n\n    for c in characters:\n        char_to_length[c] = measurer.text_width(c)\n    return char_to_length", "docstring": "Return a mapping between each given character and its length.\n\nArgs:\nmeasurer: The TextMeasurer used to measure the width of the text in\npixels.\ncharacters: The characters to measure e.g. \"ml\".\n\nReturns:\nA mapping from the given characters to their length in pixels, as\ndetermined by 'measurer' e.g. {'m': 5.2, 'l', 1.2}.", "source": "juraj-google-style"}
{"code": "def update_from_json(self, path=join('config', 'hdx_dataset_static.json')):\n    super(Dataset, self).update_from_json(path)\n    self.separate_resources()", "docstring": "Update dataset metadata with static metadata from JSON file\n\nArgs:\npath (str): Path to JSON dataset metadata. Defaults to config/hdx_dataset_static.json.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None):\n    pass", "docstring": "Returns an IdWeightPair.\n\n`IdWeightPair` is a pair of `SparseTensor`s which represents ids and\nweights.\n\n`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`\n`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a\n`SparseTensor` of `float` or `None` to indicate all weights should be\ntaken to be 1. If specified, `weight_tensor` must have exactly the same\nshape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing\noutput of a `VarLenFeature` which is a ragged matrix.\n\nArgs:\ninputs: A `LazyBuilder` as a cache to get input tensors required to create\n`IdWeightPair`.\nweight_collections: List of graph collections to which variables (if any\nwill be created) are added.\ntrainable: If `True` also add variables to the graph collection\n`GraphKeys.TRAINABLE_VARIABLES` (see `tf.compat.v1.get_variable`).", "source": "github-repos"}
{"code": "def next_population(self, population, fitnesses):\n        \n        return common.make_population(self._population_size,\n                                      self._generate_solution)", "docstring": "Make a new population after each optimization iteration.\n\nArgs:\npopulation: The population current population of solutions.\nfitnesses: The fitness associated with each solution in the population\nReturns:\nlist; a list of solutions.", "source": "juraj-google-style"}
{"code": "def sgn_prod(p1, p2):\n    phase = Pauli._prod_phase(p1, p2)\n    new_pauli = (p1 * p2)\n    return (new_pauli, phase)", "docstring": "r\"\"\"\nMultiply two Paulis and track the phase.\n\n$P_3 = P_1 \\otimes P_2$: X*Y\n\nArgs:\np1 (Pauli): pauli 1\np2 (Pauli): pauli 2\n\nReturns:\nPauli: the multiplied pauli\ncomplex: the sign of the multiplication, 1, -1, 1j or -1j", "source": "codesearchnet"}
{"code": "class RedisEnrichmentHandler(EnrichmentSourceHandler[beam.Row, beam.Row]):\n\n    def __init__(self, redis_host: str, redis_port: int, index_name: str='embeddings-index', vector_field: str='text_vector', return_fields: list=['id', 'title', 'url', 'text'], hybrid_fields: str='*', k: int=2):\n        self.redis_host = redis_host\n        self.redis_port = redis_port\n        self.index_name = index_name\n        self.vector_field = vector_field\n        self.return_fields = return_fields\n        self.hybrid_fields = hybrid_fields\n        self.k = k\n        self.client = None\n\n    def __enter__(self):\n        \n        self.client = redis.Redis(host=self.redis_host, port=self.redis_port)\n\n    def __call__(self, request: beam.Row, *args, **kwargs):\n        \n        embedded_query = request['text']\n        base_query = f'{self.hybrid_fields}=>[KNN {self.k} @{self.vector_field} $vector AS vector_score]'\n        query = Query(base_query).return_fields(*self.return_fields).paging(0, self.k).dialect(2)\n        params_dict = {'vector': np.array(embedded_query).astype(dtype=np.float32).tobytes()}\n        results = self.client.ft(self.index_name).search(query, params_dict)\n        return (beam.Row(text=embedded_query), beam.Row(docs=results.docs))", "docstring": "A handler for :class:`apache_beam.transforms.enrichment.Enrichment`\ntransform to interact with redis vector DB.\n\nArgs:\nredis_host (str): Redis Host to connect to redis DB\nredis_port (int): Redis Port to connect to redis DB\nindex_name (str): Index Name created for searching in Redis DB\nvector_field (str): vector field to compute similarity score in vector DB\nreturn_fields (list): returns list of similar text and its embeddings\nhybrid_fields (str): fields to be selected\nk (int): Value of K in KNN algorithm for searching in redis", "source": "github-repos"}
{"code": "def _check_job_status(self, job, desc, status_key_name):\n        \n        status = desc[status_key_name]\n        \n        status = _STATUS_CODE_TABLE.get(status, status)\n\n        if status != 'Completed' and status != 'Stopped':\n            reason = desc.get('FailureReason', '(No reason provided)')\n            job_type = status_key_name.replace('JobStatus', ' job')\n            raise ValueError('Error for {} {}: {} Reason: {}'.format(job_type, job, status, reason))", "docstring": "Check to see if the job completed successfully and, if not, construct and\nraise a ValueError.\n\nArgs:\njob (str): The name of the job to check.\ndesc (dict[str, str]): The result of ``describe_training_job()``.\nstatus_key_name (str): Status key name to check for.\n\nRaises:\nValueError: If the training job fails.", "source": "juraj-google-style"}
{"code": "def _find_best_fit(self, pbin):\n        \n        fit = ((pbin.fitness(r[0], r[1]), k) for k, r in self._sorted_rect.items())\n        fit = (f for f in fit if f[0] is not None)\n        try:\n            _, rect = min(fit, key=self.first_item)\n            return rect\n        except ValueError:\n            return None", "docstring": "Return best fitness rectangle from rectangles packing _sorted_rect list\n\nArguments:\npbin (PackingAlgorithm): Packing bin\n\nReturns:\nkey of the rectangle with best fitness", "source": "juraj-google-style"}
{"code": "def process_exception(self, e, uuid, routing_key, body, tb=None):\n        \n        \n        msg = e.message if hasattr(e, \"message\") else str(e)\n        exception_type = str(e.__class__)\n        exception_name = str(e.__class__.__name__)\n\n        print \"Sending exception %s: %s for UUID %s.\" % (\n            exception_name,\n            msg,\n            uuid\n        )\n\n        self.sendMessage(\n            self.output_exchange,\n            routing_key,\n            str(body),\n            properties=pika.BasicProperties(\n                content_type=\"application/text\",\n                delivery_mode=2,\n                headers={\n                    \"exception\": msg,\n                    \"exception_type\": exception_type,\n                    \"exception_name\": exception_name,\n                    \"traceback\": tb,\n                    \"UUID\": uuid\n                }\n            )\n        )", "docstring": "Callback called when exception was raised.\n\nThis method serializes the exception and sends it over AMQP back\nto caller.\n\nArgs:\ne (obj): Instance of the exception.\nuuid (str): UUID of the message that caused the exception to raise.\nrouting_key (str): Which routing key was used.\nbody (str): Body of the exception - the longer text.\ntb (str, default None): Traceback (stacktrace)v of the exception.", "source": "juraj-google-style"}
{"code": "def AddFile(self, path, file_data):\n    \n    if self.file_system.FileEntryExistsByPath(path):\n      raise ValueError('Path: {0:s} already set.'.format(path))\n\n    self._AddParentDirectories(path)\n    self.file_system.AddFileEntry(path, file_data=file_data)", "docstring": "Adds a \"regular\" file to the fake file system.\n\nNote that this function will create parent directories if needed.\n\nArgs:\npath (str): path of the file within the fake file system.\nfile_data (bytes): data of the file.\n\nRaises:\nValueError: if the path is already set.", "source": "juraj-google-style"}
{"code": "def generate_panel(self, img):\n    plt.figure(figsize=(14, 6))\n    ax = plt.gca()\n    fig = plt.gcf()\n    plt.subplot(122)\n    data_save = np.zeros_like(self.postcard)\n    self.roll_best = np.zeros((4, 2))\n    for i in range(4):\n        g = np.where((self.qs == i))[0]\n        wh = np.where((self.times[g] > 54947))\n        self.roll_best[i] = self.do_rolltest(g, wh)\n    self.do_photometry()\n    for i in range(4):\n        g = np.where((self.qs == i))[0]\n        plt.errorbar(self.times[g], self.obs_flux[g], yerr=self.flux_uncert[i], fmt=fmt[i])\n    plt.xlabel('Time', fontsize=20)\n    plt.ylabel('Relative Flux', fontsize=20)\n    plt.subplot(121)\n    implot = plt.imshow(img, interpolation='nearest', cmap='gray', vmin=(98000 * 52), vmax=(104000 * 52))\n    cid = fig.canvas.mpl_connect('button_press_event', self.onclick)\n    plt.show(block=True)", "docstring": "Creates the figure shown in ``adjust_aperture`` for visualization purposes. Called by other functions\nand generally not called by the user directly.\n\nArgs:\nimg: The data frame to be passed through to be plotted. A cutout of the ``integrated_postcard``", "source": "codesearchnet"}
{"code": "def credit_note(request, note_id, access_code=None):\n    note_id = int(note_id)\n    current_note = CreditNoteController.for_id_or_404(note_id)\n    apply_form = forms.ApplyCreditNoteForm(current_note.credit_note.invoice.user, (request.POST or None), prefix='apply_note')\n    refund_form = forms.ManualCreditNoteRefundForm((request.POST or None), prefix='refund_note')\n    cancellation_fee_form = forms.CancellationFeeForm((request.POST or None), prefix='cancellation_fee')\n    if (request.POST and apply_form.is_valid()):\n        inv_id = apply_form.cleaned_data['invoice']\n        invoice = commerce.Invoice.objects.get(pk=inv_id)\n        current_note.apply_to_invoice(invoice)\n        messages.success(request, ('Applied credit note %d to invoice.' % note_id))\n        return redirect('invoice', invoice.id)\n    elif (request.POST and refund_form.is_valid()):\n        refund_form.instance.entered_by = request.user\n        refund_form.instance.parent = current_note.credit_note\n        refund_form.save()\n        messages.success(request, 'Applied manual refund to credit note.')\n        refund_form = forms.ManualCreditNoteRefundForm(prefix='refund_note')\n    elif (request.POST and cancellation_fee_form.is_valid()):\n        percentage = cancellation_fee_form.cleaned_data['percentage']\n        invoice = current_note.cancellation_fee(percentage)\n        messages.success(request, ('Generated cancellation fee for credit note %d.' % note_id))\n        return redirect('invoice', invoice.invoice.id)\n    data = {'credit_note': current_note.credit_note, 'apply_form': apply_form, 'refund_form': refund_form, 'cancellation_fee_form': cancellation_fee_form}\n    return render(request, 'registrasion/credit_note.html', data)", "docstring": "Displays a credit note.\n\nIf ``request`` is a ``POST`` request, forms for applying or refunding\na credit note will be processed.\n\nThis view requires a login, and the logged in user must be staff.\n\nArguments:\nnote_id (castable to int): The ID of the credit note to view.\n\nReturns:\nrender or redirect:\nIf the \"apply to invoice\" form is correctly processed, redirect to\nthat invoice, otherwise, render ``registration/credit_note.html``\nwith the following data::\n\n{\n\"credit_note\": models.commerce.CreditNote(),\n\"apply_form\": form,  # A form for applying credit note\n# to an invoice.\n\"refund_form\": form, # A form for applying a *manual*\n# refund of the credit note.\n\"cancellation_fee_form\" : form, # A form for generating an\n# invoice with a\n# cancellation fee\n}", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True) -> torch.Tensor:\n    residual = hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n    hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, output_attentions=output_attentions)\n    hidden_states = self.attn_dropout(hidden_states)\n    hidden_states = residual + hidden_states\n    cross_attn_present_key_value = None\n    cross_attn_weights = None\n    if encoder_hidden_states is not None:\n        residual = hidden_states\n        hidden_states = self.cross_attention_layer_norm(hidden_states)\n        cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n        hidden_states, cross_attn_weights, cross_attn_present_key_value = self.cross_attention(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, past_key_value=cross_attn_past_key_value, attention_mask=encoder_attention_mask, output_attentions=output_attentions)\n        hidden_states = self.attn_dropout(hidden_states)\n        hidden_states = residual + hidden_states\n        present_key_value += cross_attn_present_key_value\n    residual = hidden_states\n    hidden_states = self.ffn_layer_norm(hidden_states)\n    hidden_states = self.ffn(hidden_states)\n    hidden_states = self.ffn_dropout(hidden_states)\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states, present_key_value)\n    if output_attentions:\n        outputs += (self_attn_weights, cross_attn_weights)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`):\ninput to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`):\nattention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very\nlarge negative values.\nencoder_hidden_states (`torch.FloatTensor`):\ncross attention input to the layer of shape `(batch, seq_len, embed_dim)`\nencoder_attention_mask (`torch.FloatTensor`):\nencoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by\nvery large negative values.\npast_key_value (`Tuple(torch.FloatTensor)`):\ncached past key and value projection states\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def _FormatDateTime(self, event):\n    try:\n        return timelib.Timestamp.CopyToIsoFormat(event.timestamp, timezone=self._output_mediator.timezone, raise_error=True)\n    except (OverflowError, ValueError) as exception:\n        self._ReportEventError(event, 'unable to copy timestamp: {0!s} to a human readable date and time with error: {1!s}. Defaulting to: \"0000-00-00T00:00:00\"'.format(event.timestamp, exception))\n        return '0000-00-00T00:00:00'", "docstring": "Formats the date and time in ISO 8601 format.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: date and time field.", "source": "codesearchnet"}
{"code": "def oem(self, command, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):\n    \n    return self._simple_command(\n        'oem %s' % command, timeout_ms=timeout_ms, info_cb=info_cb)", "docstring": "Executes an OEM command on the device.\n\nArgs:\ncommand: The command to execute, such as 'poweroff' or 'bootconfig read'.\ntimeout_ms: Optional timeout in milliseconds to wait for a response.\ninfo_cb: See Download. Messages vary based on command.\nReturns:\nThe final response from the device.", "source": "juraj-google-style"}
{"code": "def lengths( self ):\n        \n        return( np.array( [ math.sqrt( sum( row**2 ) ) for row in self.matrix ] ) )", "docstring": "The cell lengths.\n\nArgs:\nNone\n\nReturns:\n(np.array(a,b,c)): The cell lengths.", "source": "juraj-google-style"}
{"code": "def get_inspection_units(logdir='', event_file='', tag=''):\n  \n  if logdir:\n    subdirs = io_wrapper.GetLogdirSubdirectories(logdir)\n    inspection_units = []\n    for subdir in subdirs:\n      generator = itertools.chain(*[\n          generator_from_event_file(os.path.join(subdir, f))\n          for f in tf.io.gfile.listdir(subdir)\n          if io_wrapper.IsTensorFlowEventsFile(os.path.join(subdir, f))\n      ])\n      inspection_units.append(InspectionUnit(\n          name=subdir,\n          generator=generator,\n          field_to_obs=get_field_to_observations_map(generator, tag)))\n    if inspection_units:\n      print('Found event files in:\\n{}\\n'.format('\\n'.join(\n          [u.name for u in inspection_units])))\n    elif io_wrapper.IsTensorFlowEventsFile(logdir):\n      print(\n          'It seems that {} may be an event file instead of a logdir. If this '\n          'is the case, use --event_file instead of --logdir to pass '\n          'it in.'.format(logdir))\n    else:\n      print('No event files found within logdir {}'.format(logdir))\n    return inspection_units\n  elif event_file:\n    generator = generator_from_event_file(event_file)\n    return [InspectionUnit(\n        name=event_file,\n        generator=generator,\n        field_to_obs=get_field_to_observations_map(generator, tag))]\n  return []", "docstring": "Returns a list of InspectionUnit objects given either logdir or event_file.\n\nIf logdir is given, the number of InspectionUnits should equal the\nnumber of directories or subdirectories that contain event files.\n\nIf event_file is given, the number of InspectionUnits should be 1.\n\nArgs:\nlogdir: A log directory that contains event files.\nevent_file: Or, a particular event file path.\ntag: An optional tag name to query for.\n\nReturns:\nA list of InspectionUnit objects.", "source": "juraj-google-style"}
{"code": "def sg_restore(sess, save_path, category=''):\n    if (not isinstance(category, (tuple, list))):\n        category = [category]\n    var_list = {}\n    for cat in category:\n        for t in tf.global_variables():\n            if t.name.startswith(cat):\n                var_list[t.name[:(- 2)]] = t\n    saver = tf.train.Saver(var_list)\n    saver.restore(sess, save_path)", "docstring": "r\"\"\" Restores previously saved variables.\n\nArgs:\nsess: A `Session` to use to restore the parameters.\nsave_path: Path where parameters were previously saved.\ncategory: A `String` to filter variables starts with given category.\n\nReturns:", "source": "codesearchnet"}
{"code": "def report_clean(rows):\n    print('DBM Report Clean')\n    first = True\n    last = False\n    date = None\n    for row in rows:\n        if row == ['No data returned by the reporting service.']:\n            break\n        if not row or row[0] is None or row[0] == '':\n            break\n        if first:\n            try:\n                date_column = row.index('Date')\n                row[date_column] = 'Report_Day'\n            except ValueError:\n                pass\n            row = [column_header_sanitize(cell) for cell in row]\n        else:\n            row = [cell.replace('/', '-') if isinstance(cell, str) and len(cell) == 4 + 1 + 2 + 1 + 2 and (cell[4] == '/') and (cell[7] == '/') else cell for cell in row]\n        row = ['' if cell.strip() in ('Unknown', '-') else '1000' if cell == '< 1000' else cell for cell in row]\n        yield row\n        first = False", "docstring": "Helper to fix DBM report issues for BigQuery and ensure schema compliance.\n\nMemory efficiently cleans each row by fixing:\n* Strips header and footer to preserve only data rows.\n* Changes 'Date' to 'Report_Day' to avoid using reserved name in BigQuery.\n* Changes date values to use '-' instead of '/' for BigQuery compatibility.\n* Changes columns '-' and 'Unknown' to NULL\n* Changes '< 1000' to 1000\n\nUsage example:\n\n```\nfilename, report = report_file(...)\nrows = report_to_rows(report)\nrows = report_clean(rows)\n```\n\nArgs:\n* rows: (iterator) Rows to clean.\n\nReturns:\n* Iterator of cleaned rows.", "source": "github-repos"}
{"code": "def array_to_img(x, data_format=None, scale=True, dtype=None):\n    data_format = backend.standardize_data_format(data_format)\n    if dtype is None:\n        dtype = backend.floatx()\n    if pil_image is None:\n        raise ImportError('Could not import PIL.Image. The use of `array_to_img` requires PIL.')\n    x = np.asarray(x, dtype=dtype)\n    if x.ndim != 3:\n        raise ValueError(f'Expected image array to have rank 3 (single image). Got array with shape: {x.shape}')\n    if data_format == 'channels_first':\n        x = x.transpose(1, 2, 0)\n    if scale:\n        x = x - np.min(x)\n        x_max = np.max(x)\n        if x_max != 0:\n            x /= x_max\n        x *= 255\n    if x.shape[2] == 4:\n        return pil_image.fromarray(x.astype('uint8'), 'RGBA')\n    elif x.shape[2] == 3:\n        return pil_image.fromarray(x.astype('uint8'), 'RGB')\n    elif x.shape[2] == 1:\n        if np.max(x) > 255:\n            return pil_image.fromarray(x[:, :, 0].astype('int32'), 'I')\n        return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')\n    else:\n        raise ValueError(f'Unsupported channel number: {x.shape[2]}')", "docstring": "Converts a 3D NumPy array to a PIL Image instance.\n\nExample:\n\n```python\nfrom PIL import Image\nimg = np.random.random(size=(100, 100, 3))\npil_img = keras.utils.array_to_img(img)\n```\n\nArgs:\nx: Input data, in any form that can be converted to a NumPy array.\ndata_format: Image data format, can be either `\"channels_first\"` or\n`\"channels_last\"`. Defaults to `None`, in which case the global\nsetting `keras.backend.image_data_format()` is used (unless you\nchanged it, it defaults to `\"channels_last\"`).\nscale: Whether to rescale the image such that minimum and maximum values\nare 0 and 255 respectively. Defaults to `True`.\ndtype: Dtype to use. `None` means the global setting\n`keras.backend.floatx()` is used (unless you changed it, it\ndefaults to `\"float32\"`). Defaults to `None`.\n\nReturns:\nA PIL Image instance.", "source": "github-repos"}
{"code": "def add_untagged_ok(self, text: MaybeBytes,\n                        code: Optional[ResponseCode] = None) -> None:\n        \n        response = ResponseOk(b'*', text, code)\n        self.add_untagged(response)", "docstring": "Add an untagged ``OK`` response.\n\nSee Also:\n:meth:`.add_untagged`, :class:`ResponseOk`\n\nArgs:\ntext: The response text.\ncode: Optional response code.", "source": "juraj-google-style"}
{"code": "def load_drops(self, dropin):\n    obj = load_object(dropin)\n    try:\n        drops = getattr(obj, self.drops_type)\n    except AttributeError:\n        try:\n            drops = load_object(('%s.%s' % (dropin, self.drops_type)))\n        except ImportError:\n            drops = None\n    if hasattr(drops, '__drops__'):\n        drops = drops.__drops__\n    if callable(drops):\n        drops = drops(self.app)\n    return (drops or [])", "docstring": "Load `drops` from the given dropin.\n\nArgs:\ndropin (string): path of a dropin, e.g. dropin.auth\n\nReturns:\nAn iterable contains the drops object in the given dropin\n\nThis method load drops object by some sort of convension. For example, assuming\nwe want to load drops type `models` from dropin `dropin.articls`. The drops are\ndiscoveried with the following sequence::\n\nimport dropin.articles\ndrops = dropin.articles.models\n\nif anything goes wrong, next try is ::\n\nimport dropin.articles.models as drops\n\nif the current drops object has attribute **__drops__** ::\n\ndrops = drops.__drops__\n\nif the current drops object is a callable ::\n\ndrops = drops()\n\nif not drops was found, an empty list is returned.", "source": "codesearchnet"}
{"code": "def _add_input_deps(self, executor, args, kwargs):\n        \n\n        \n        if executor == 'data_manager':\n            return args, kwargs\n\n        inputs = kwargs.get('inputs', [])\n        for idx, f in enumerate(inputs):\n            if isinstance(f, File) and f.is_remote():\n                inputs[idx] = self.data_manager.stage_in(f, executor)\n\n        for kwarg, f in kwargs.items():\n            if isinstance(f, File) and f.is_remote():\n                kwargs[kwarg] = self.data_manager.stage_in(f, executor)\n\n        newargs = list(args)\n        for idx, f in enumerate(newargs):\n            if isinstance(f, File) and f.is_remote():\n                newargs[idx] = self.data_manager.stage_in(f, executor)\n\n        return tuple(newargs), kwargs", "docstring": "Look for inputs of the app that are remote files. Submit stage_in\napps for such files and replace the file objects in the inputs list with\ncorresponding DataFuture objects.\n\nArgs:\n- executor (str) : executor where the app is going to be launched\n- args (List) : Positional args to app function\n- kwargs (Dict) : Kwargs to app function", "source": "juraj-google-style"}
{"code": "def add_send_message(self, connection, send_message):\n        \n        self._send_message[connection] = send_message\n        LOGGER.debug(\"Added send_message function \"\n                     \"for connection %s\", connection)", "docstring": "Adds a send_message function to the Dispatcher's\ndictionary of functions indexed by connection.\n\nArgs:\nconnection (str): A locally unique identifier\nprovided by the receiver of messages.\nsend_message (fn): The method that should be called\nby the dispatcher to respond to messages which\narrive via connection.", "source": "juraj-google-style"}
{"code": "def _compile_control_flow_expression(self,\n                                         expr: Expression,\n                                         scope: Dict[str, TensorFluent],\n                                         batch_size: Optional[int] = None,\n                                         noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:\n        \n        etype = expr.etype\n        args = expr.args\n        if etype[1] == 'if':\n            condition = self._compile_expression(args[0], scope, batch_size, noise)\n            true_case = self._compile_expression(args[1], scope, batch_size, noise)\n            false_case = self._compile_expression(args[2], scope, batch_size, noise)\n            fluent = TensorFluent.if_then_else(condition, true_case, false_case)\n        else:\n            raise ValueError('Invalid control flow expression:\\n{}'.format(expr))\n        return fluent", "docstring": "Compile a control flow expression `expr` into a TensorFluent\nin the given `scope` with optional batch size.\n\nArgs:\nexpr (:obj:`rddl2tf.expr.Expression`): A RDDL control flow expression.\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.\nbatch_size (Optional[size]): The batch size.\n\nReturns:\n:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.", "source": "juraj-google-style"}
{"code": "def plot_labels(ax, label_fontsize=14, xlabel=None, xlabel_arg=None, ylabel=None, ylabel_arg=None, zlabel=None, zlabel_arg=None):\n    xlabel = (xlabel if (xlabel is not None) else (ax.get_xlabel() or 'X'))\n    ylabel = (ylabel if (ylabel is not None) else (ax.get_ylabel() or 'Y'))\n    xlabel_arg = dict_if_none(xlabel_arg)\n    ylabel_arg = dict_if_none(ylabel_arg)\n    ax.set_xlabel(xlabel, fontsize=label_fontsize, **xlabel_arg)\n    ax.set_ylabel(ylabel, fontsize=label_fontsize, **ylabel_arg)\n    if hasattr(ax, 'zaxis'):\n        zlabel = (zlabel if (zlabel is not None) else (ax.get_zlabel() or 'Z'))\n        zlabel_arg = dict_if_none(zlabel_arg)\n        ax.set_zlabel(zlabel, fontsize=label_fontsize, **zlabel_arg)", "docstring": "Sets the labels options of a matplotlib plot\n\nArgs:\nax: matplotlib axes\nlabel_fontsize(int): Size of the labels' font\nxlabel(str): The xlabel for the figure\nxlabel_arg(dict):  Passsed into matplotlib as xlabel arguments\nylabel(str): The ylabel for the figure\nylabel_arg(dict):  Passsed into matplotlib as ylabel arguments\nzlabel(str): The zlabel for the figure\nzlabel_arg(dict):  Passsed into matplotlib as zlabel arguments", "source": "codesearchnet"}
{"code": "def addRow(self, *value):\n    if ((len(value) == 1) and isinstance(value[0], (tuple, list))):\n        value = value[0]\n    assert (len(value) == self.getNumCols())\n    self._impl.addRow(Tuple(value)._impl)", "docstring": "Add a row to the DataFrame. The size of the tuple must be equal to the\ntotal number of columns in the dataframe.\n\nArgs:\nvalue: A single argument with a tuple containing all the values\nfor the row to be added, or multiple arguments with the values for\neach column.", "source": "codesearchnet"}
{"code": "def upgrade(self, remote=None):\n    if self.enabled:\n        raise errors.DockerError('Plugin must be disabled before upgrading.')\n    if (remote is None):\n        remote = self.name\n    privileges = self.client.api.plugin_privileges(remote)\n    for d in self.client.api.upgrade_plugin(self.name, remote, privileges):\n        (yield d)\n    self._reload()", "docstring": "Upgrade the plugin.\n\nArgs:\nremote (string): Remote reference to upgrade to. The\n``:latest`` tag is optional and is the default if omitted.\nDefault: this plugin's name.\n\nReturns:\nA generator streaming the decoded API logs", "source": "codesearchnet"}
{"code": "def _create_pseudo_names(tensors, prefix):\n\n    def one_index(ele):\n        if isinstance(ele, int):\n            return ele + 1\n        return ele\n    flat_paths = list(nest.yield_flat_paths(tensors))\n    flat_paths = nest.map_structure(one_index, flat_paths)\n    names = []\n    for path in flat_paths:\n        if not path:\n            name = prefix + '1'\n        else:\n            name = '_'.join((str(p) for p in path))\n            if isinstance(path[0], int):\n                name = prefix + name\n        names.append(name)\n    return names", "docstring": "Creates pseudo {input | output} names for subclassed Models.\n\nWarning: this function should only be used to define default\nnames for `Metics` and `SavedModel`. No other use cases should\nrely on a `Model`'s input or output names.\n\nExample with dict:\n\n`{'a': [x1, x2], 'b': x3}` becomes:\n`['a_1', 'a_2', 'b']`\n\nExample with list:\n\n`[x, y]` becomes:\n`['output_1', 'output_2']`\n\nArgs:\ntensors: `Model`'s outputs or inputs.\nprefix: 'output_' for outputs, 'input_' for inputs.\n\nReturns:\nFlattened list of pseudo names.", "source": "github-repos"}
{"code": "def add_element(self, element):\n    if isinstance(element, BaseExpression):\n        element.set_parent(self._working_fragment)\n        self._working_fragment.elements.append(element)\n        return self\n    else:\n        return self.add_operator(element)", "docstring": "Add an element of type ``Operator``, ``Constraint``, or\n``Expression`` to the ``Expression``.\n\nArgs:\nelement: ``Constraint``, ``Expression``, or ``Operator``.\n\nReturns:\nExpression: ``self``\n\nRaises:\nFiqlObjectException: Element is not a valid type.", "source": "codesearchnet"}
{"code": "def _bytestringToFloat(bytestring, numberOfRegisters=2):\n    \n    _checkString(bytestring, minlength=4, maxlength=8, description='bytestring')\n    _checkInt(numberOfRegisters, minvalue=2, maxvalue=4, description='number of registers')\n\n    numberOfBytes = _NUMBER_OF_BYTES_PER_REGISTER * numberOfRegisters\n\n    formatcode = '>'  \n    if numberOfRegisters == 2:\n        formatcode += 'f'  \n    elif numberOfRegisters == 4:\n        formatcode += 'd'  \n    else:\n        raise ValueError('Wrong number of registers! Given value is {0!r}'.format(numberOfRegisters))\n\n    if len(bytestring) != numberOfBytes:\n        raise ValueError('Wrong length of the byte string! Given value is {0!r}, and numberOfRegisters is {1!r}.'.\\\n            format(bytestring, numberOfRegisters))\n\n    return _unpack(formatcode, bytestring)", "docstring": "Convert a four-byte string to a float.\n\nFloats are stored in two or more consecutive 16-bit registers in the slave.\n\nFor discussion on precision, number of bits, number of registers, the range, byte order\nand on alternative names, see :func:`minimalmodbus._floatToBytestring`.\n\nArgs:\n* bytestring (str): A string of length 4 or 8.\n* numberOfRegisters (int): Can be 2 or 4.\n\nReturns:\nA float.\n\nRaises:\nTypeError, ValueError", "source": "juraj-google-style"}
{"code": "def sanger_variants(self, institute_id=None, case_id=None):\n        \n        query = {'validation': {'$exists': True}}\n        if institute_id:\n            query['institute_id'] = institute_id\n        if case_id:\n            query['case_id'] = case_id\n\n        return self.variant_collection.find(query)", "docstring": "Return all variants with sanger information\n\nArgs:\ninstitute_id(str)\ncase_id(str)\n\nReturns:\nres(pymongo.Cursor): A Cursor with all variants with sanger activity", "source": "juraj-google-style"}
{"code": "def __init__(self, ascii_codepage='cp1252', key_path_prefix=''):\n    \n    super(WinRegistryFile, self).__init__()\n    self._ascii_codepage = ascii_codepage\n    self._key_path_prefix = key_path_prefix\n    self._key_path_prefix_length = len(key_path_prefix)\n    self._key_path_prefix_upper = key_path_prefix.upper()", "docstring": "Initializes a Windows Registry file.\n\nArgs:\nascii_codepage (Optional[str]): ASCII string codepage.\nkey_path_prefix (Optional[str]): Windows Registry key path prefix.", "source": "juraj-google-style"}
{"code": "def int_to_bit(self, x_int, num_bits, base=2):\n    \n    x_l = tf.to_int32(tf.expand_dims(x_int, axis=-1))\n    \n    x_labels = [\n        tf.floormod(\n            tf.floordiv(tf.to_int32(x_l),\n                        tf.to_int32(base)**i), tf.to_int32(base))\n        for i in range(num_bits)]\n    res = tf.concat(x_labels, axis=-1)\n    return tf.to_float(res)", "docstring": "Turn x_int representing numbers into a bitwise (lower-endian) tensor.\n\nArgs:\nx_int: Tensor containing integer to be converted into base\nnotation.\nnum_bits: Number of bits in the representation.\nbase: Base of the representation.\n\nReturns:\nCorresponding number expressed in base.", "source": "juraj-google-style"}
{"code": "def get(self, key, default='', stringify=True):\n    obj = self.__getitem__(key)\n    if (obj is None):\n        obj = default\n    elif stringify:\n        obj = str(obj)\n    return obj", "docstring": "Returns dictionary values or default.\n\nArgs:\nkey: string. Dictionary key to look up.\ndefault: string. Return this value if key not found.\nstringify: bool. Force all return values to string for compatibility\nreasons.\nReturns:\npython-wrapped CF object or default if not found.", "source": "codesearchnet"}
{"code": "def _cell_magic(line, query):\n    args = magic_arguments.parse_argstring(_cell_magic, line)\n    params = []\n    if (args.params is not None):\n        try:\n            params = _helpers.to_query_parameters(ast.literal_eval(''.join(args.params)))\n        except Exception:\n            raise SyntaxError('--params is not a correctly formatted JSON string or a JSON serializable dictionary')\n    project = (args.project or context.project)\n    client = bigquery.Client(project=project, credentials=context.credentials)\n    bqstorage_client = _make_bqstorage_client((args.use_bqstorage_api or context.use_bqstorage_api), context.credentials)\n    job_config = bigquery.job.QueryJobConfig()\n    job_config.query_parameters = params\n    job_config.use_legacy_sql = args.use_legacy_sql\n    query_job = _run_query(client, query, job_config)\n    if (not args.verbose):\n        display.clear_output()\n    result = query_job.to_dataframe(bqstorage_client=bqstorage_client)\n    if args.destination_var:\n        IPython.get_ipython().push({args.destination_var: result})\n    else:\n        return result", "docstring": "Underlying function for bigquery cell magic\n\nNote:\nThis function contains the underlying logic for the 'bigquery' cell\nmagic. This function is not meant to be called directly.\n\nArgs:\nline (str): \"%%bigquery\" followed by arguments as required\nquery (str): SQL query to run\n\nReturns:\npandas.DataFrame: the query results.", "source": "codesearchnet"}
{"code": "def __init__(self, partitioned_dim_sizes, inner_dim_sizes, dim_size_dtype=None):\n    assert isinstance(partitioned_dim_sizes, (list, tuple))\n    with ops.name_scope(None, 'RaggedTensorDynamicShape', (partitioned_dim_sizes, inner_dim_sizes)):\n        partitioned_dim_sizes = tuple((ops.convert_to_tensor(size, name='partitioned_dimension_size_%d' % i) for i, size in enumerate(partitioned_dim_sizes)))\n        inner_dim_sizes = ops.convert_to_tensor(inner_dim_sizes, name='inner_dim_sizes')\n        if partitioned_dim_sizes:\n            for axis, dimension_size in enumerate(partitioned_dim_sizes):\n                if dimension_size.shape.ndims is None:\n                    raise ValueError('rank of partitioned_dim_sizes[%d] is unknown' % axis)\n                dimension_size.shape.with_rank_at_most(1)\n            if partitioned_dim_sizes[0].shape.ndims == 1:\n                raise ValueError('outermost partitioned dimension must be uniform')\n            if partitioned_dim_sizes[-1].shape.ndims == 0:\n                raise ValueError('innermost partitioned dimension must be ragged')\n        inner_dim_sizes.shape.assert_has_rank(1)\n        if dim_size_dtype is None:\n            dim_size_dtypes = set((p.dtype for p in partitioned_dim_sizes if p.shape.ndims == 1))\n            if not dim_size_dtypes:\n                dim_size_dtype = dtypes.int64\n            elif len(dim_size_dtypes) == 1:\n                dim_size_dtype = dim_size_dtypes.pop()\n            else:\n                if not ragged_config.auto_cast_partition_dtype():\n                    raise ValueError('partitioned_dim_sizes must have matching dtypes')\n                dim_size_dtype = dtypes.int64\n        partitioned_dim_sizes = tuple((math_ops.cast(p, dim_size_dtype) for p in partitioned_dim_sizes))\n        inner_dim_sizes = math_ops.cast(inner_dim_sizes, dim_size_dtype)\n        self._partitioned_dim_sizes = partitioned_dim_sizes\n        self._inner_dim_sizes = inner_dim_sizes", "docstring": "Creates a RaggedTensorDynamicShape.\n\nArgs:\npartitioned_dim_sizes: A `list` of 0-D or 1-D integer `Tensor`, one for\neach partitioned dimension.  If dimension `d` is uniform, then\n`partitioned_dim_sizes[d]` must be an integer scalar, specifying the\nsize of all slices across dimension `d`.  If dimension `d` is ragged,\nthen `partitioned_dim_sizes[d]` must be an integer vector, specifying\nthe size of each slice across dimension `d`.\ninner_dim_sizes: A 1-D integer `Tensor`, whose length is equal to the\nnumber of inner dimensions.  `inner_dim_sizes[n]` is the size of all\nslices across the `n`th inner dimension (which is the\n`(len(partitioned_dim_sizes)+n)`th dimension in the overall tensor.\ndim_size_dtype: dtype for dimension sizes.  If not specified, then it\nis chosen based on the dtypes of `partitioned_dim_sizes` and\n`inner_dim_sizes`.", "source": "github-repos"}
{"code": "def asdatetime(self, naive=True):\n    \n    args = list(self.timetuple()[0:6])+[self.microsecond]\n    if not naive:\n      args.append(self.tzinfo)\n    return datetime.datetime(*args)", "docstring": "Return this datetime_tz as a datetime object.\n\nArgs:\nnaive: Return *without* any tz info.\n\nReturns:\nThis datetime_tz as a datetime object.", "source": "juraj-google-style"}
{"code": "def ParseHeader(table):\n    precondition.AssertIterableType(table, dict)\n    prototype = None\n    for row in table:\n        columns = list(iterkeys(row))\n        if (prototype is None):\n            prototype = columns\n        elif (prototype != columns):\n            message = \"Expected columns '{expected}', got '{actual}' for table {json}\"\n            message = message.format(expected=prototype, actual=columns, json=table)\n            raise ValueError(message)\n    result = rdf_osquery.OsqueryHeader()\n    for name in (prototype or []):\n        result.columns.append(rdf_osquery.OsqueryColumn(name=name))\n    return result", "docstring": "Parses header of osquery output.\n\nArgs:\ntable: A table in a \"parsed JSON\" representation.\n\nReturns:\nA parsed `rdf_osquery.OsqueryHeader` instance.", "source": "codesearchnet"}
{"code": "def search_artists_by_name(self, artist_name: str, limit: int = 5) -> List[NameExternalIDPair]:\n        \n        response: requests.Response = requests.get(\n            self._API_URL_TEMPLATE.format(\"search\"),\n            params={\"q\": artist_name, \"type\": \"artist\", \"limit\": limit},\n            headers={\"Authorization\": \"Bearer {}\".format(self._token.access_token)}\n        )\n\n        \n\n        response.raise_for_status()\n        if not response.text:\n            return []\n\n        result: List[NameExternalIDPair] = []\n        data: List[Dict] = response.json()[\"artists\"][\"items\"]\n        for artist in data:\n            artist = NameExternalIDPair(artist[\"name\"].strip(), artist[\"id\"].strip())\n            if not artist.name or not artist.external_id:\n                raise SpotifyClientError(\"Name or ID is missing\")\n            result.append(artist)\n\n        return result", "docstring": "Returns zero or more artist name - external ID pairs that match the specified artist name.\n\nArguments:\nartist_name (str): The artist name to search in the Spotify API.\nlimit (int): The maximum number of results to return.\n\nReturns:\nZero or more artist name - external ID pairs.\n\nRaises:\nrequests.HTTPError: If an HTTP error occurred during the request.\nSpotifyClientError: If an invalid item is found.", "source": "juraj-google-style"}
{"code": "def convert_line_endings(filename: str, to_unix: bool = False,\n                         to_windows: bool = False) -> None:\n    \n    assert to_unix != to_windows\n    with open(filename, \"rb\") as f:\n        contents = f.read()\n    windows_eol = b\"\\r\\n\"  \n    unix_eol = b\"\\n\"  \n    if to_unix:\n        log.info(\"Converting from Windows to UNIX line endings: {!r}\",\n                 filename)\n        src = windows_eol\n        dst = unix_eol\n    else:  \n        log.info(\"Converting from UNIX to Windows line endings: {!r}\",\n                 filename)\n        src = unix_eol\n        dst = windows_eol\n        if windows_eol in contents:\n            log.info(\"... already contains at least one Windows line ending; \"\n                     \"probably converted before; skipping\")\n            return\n    contents = contents.replace(src, dst)\n    with open(filename, \"wb\") as f:\n        f.write(contents)", "docstring": "Converts a file (in place) from UNIX to Windows line endings, or the\nreverse.\n\nArgs:\nfilename: filename to modify (in place)\nto_unix: convert Windows (CR LF) to UNIX (LF)\nto_windows: convert UNIX (LF) to Windows (CR LF)", "source": "juraj-google-style"}
{"code": "def HeartBeat(self):\n    if (self.allow_overruns or (not self.job.lifetime)):\n        return\n    runtime = (rdfvalue.RDFDatetime.Now() - self.run_state.started_at)\n    if (runtime > self.lifetime):\n        raise LifetimeExceededError(('Cronjob run has exceeded the maximum runtime of %s.' % self.lifetime))", "docstring": "Terminates a cronjob-run if it has exceeded its maximum runtime.\n\nThis is a no-op for cronjobs that allow overruns.\n\nRaises:\nLifetimeExceededError: If the cronjob has exceeded its maximum runtime.", "source": "codesearchnet"}
{"code": "def Insert(self, key, value, row_index):\n    if (row_index < 0):\n        row_index += len(self)\n    if (not (0 <= row_index < len(self))):\n        raise IndexError(('Index \"%s\" is out of bounds.' % row_index))\n    new_row = Row()\n    for idx in self.header:\n        if (self.index(idx) == row_index):\n            new_row[key] = value\n        new_row[idx] = self[idx]\n    self._keys = new_row.header\n    self._values = new_row.values\n    del new_row\n    self._BuildIndex()", "docstring": "Inserts new values at a specified offset.\n\nArgs:\nkey: string for header value.\nvalue: string for a data value.\nrow_index: Offset into row for data.\n\nRaises:\nIndexError: If the offset is out of bands.", "source": "codesearchnet"}
{"code": "def static_uniform_row_length(self):\n    if self._uniform_row_length is not None:\n        return tensor_util.constant_value(self._uniform_row_length)\n    return None", "docstring": "The number of values in each row of this partition, if statically known.\n\nReturns:\nThe number of values in each row of this partition as an `int` (if\nstatically known); or `None` (otherwise).", "source": "github-repos"}
{"code": "def _is_sequence_right_padded(mask):\n    max_seq_length = mask.shape[1]\n    count_of_true = torch.sum(mask, dim=1)\n    batch_size = mask.shape[0]\n    indices = torch.arange(max_seq_length, device=mask.device).repeat(batch_size, 1)\n    right_padded_mask = indices < count_of_true.unsqueeze(1)\n    return torch.all(mask == right_padded_mask)", "docstring": "Check the mask tensor and see if it right padded.\n\ncuDNN uses the sequence length param to skip the tailing\ntimestep. If the data is left padded, or not a strict right padding (has\nmasked value in the middle of the sequence), then cuDNN won't work\nproperly in those cases.\n\nLeft padded data: [[False, False, True, True, True]].\nRight padded data: [[True, True, True, False, False]].\nMixture of mask/unmasked data: [[True, False, True, False, False]].\n\nNote that for the mixed data example above, the actually data RNN should see\nare those 2 Trues (index 0 and 2), the index 1 False should be ignored and\nnot pollute the internal states.\n\nArgs:\nmask: the Boolean tensor with shape [batch, timestep]\n\nReturns:\nboolean scalar tensor, whether the mask is strictly right padded.", "source": "github-repos"}
{"code": "def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor) -> torch.Tensor:\n    hidden_states = self.patch_embed(hidden_states)\n    rotary_pos_emb = self.rot_pos_emb(grid_thw)\n    window_index, cu_window_seqlens = self.get_window_index(grid_thw)\n    cu_window_seqlens = torch.tensor(cu_window_seqlens, device=hidden_states.device, dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32)\n    cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens)\n    seq_len, _ = hidden_states.size()\n    hidden_states = hidden_states.reshape(seq_len \n    hidden_states = hidden_states[window_index, :, :]\n    hidden_states = hidden_states.reshape(seq_len, -1)\n    rotary_pos_emb = rotary_pos_emb.reshape(seq_len \n    rotary_pos_emb = rotary_pos_emb[window_index, :, :]\n    rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)\n    emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)\n    position_embeddings = (emb.cos(), emb.sin())\n    cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(dim=0, dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32)\n    cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)\n    for layer_num, blk in enumerate(self.blocks):\n        if layer_num in self.fullatt_block_indexes:\n            cu_seqlens_now = cu_seqlens\n        else:\n            cu_seqlens_now = cu_window_seqlens\n        if self.gradient_checkpointing and self.training:\n            hidden_states = self._gradient_checkpointing_func(blk.__call__, hidden_states, cu_seqlens_now, None, position_embeddings)\n        else:\n            hidden_states = blk(hidden_states, cu_seqlens=cu_seqlens_now, position_embeddings=position_embeddings)\n    hidden_states = self.merger(hidden_states)\n    reverse_indices = torch.argsort(window_index)\n    hidden_states = hidden_states[reverse_indices, :]\n    return hidden_states", "docstring": "Args:\nhidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):\nThe final hidden states of the model.\ngrid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):\nThe temporal, height and width of feature shape of each image in LLM.\n\nReturns:\n`torch.Tensor`: hidden_states.", "source": "github-repos"}
{"code": "def add_all_transport_reactions(model, boundaries, allow_duplicates=False):\n    \n\n    all_reactions = {}\n    if not allow_duplicates:\n        \n        \n        for rxnid in model.database.reactions:\n            rx = model.database.get_reaction(rxnid)\n            all_reactions[rx] = rxnid\n\n    boundary_pairs = set()\n    for source, dest in boundaries:\n        if source != dest:\n            boundary_pairs.add(tuple(sorted((source, dest))))\n\n    added = set()\n    added_pairs = set()\n    initial_compounds = set(model.compounds)\n    reactions = set(model.database.reactions)\n    for compound in initial_compounds:\n        for c1, c2 in boundary_pairs:\n            compound1 = compound.in_compartment(c1)\n            compound2 = compound.in_compartment(c2)\n            pair = compound1, compound2\n            if pair in added_pairs:\n                continue\n\n            rxnid_tp = create_transport_id(reactions, compound1, compound2)\n\n            reaction_tp = Reaction(Direction.Both, {\n                compound1: -1,\n                compound2: 1\n            })\n            if reaction_tp not in all_reactions:\n                model.database.set_reaction(rxnid_tp, reaction_tp)\n                reactions.add(rxnid_tp)\n            else:\n                rxnid_tp = all_reactions[reaction_tp]\n\n            if not model.has_reaction(rxnid_tp):\n                added.add(rxnid_tp)\n            model.add_reaction(rxnid_tp)\n            added_pairs.add(pair)\n\n    return added", "docstring": "Add all transport reactions to database and to model.\n\nAdd transport reactions for all boundaries. Boundaries are defined\nby pairs (2-tuples) of compartment IDs. Transport reactions are\nadded for all compounds in the model, not just for compounds in the\ntwo boundary compartments.\n\nArgs:\nmodel: :class:`psamm.metabolicmodel.MetabolicModel`.\nboundaries: Set of compartment boundary pairs.\n\nReturns:\nSet of IDs of reactions that were added.", "source": "juraj-google-style"}
{"code": "def fdatasync(self, file_des):\n        \n        \n        if self.filesystem.is_windows_fs or self.filesystem.is_macos:\n            raise AttributeError(\"module 'os' has no attribute 'fdatasync'\")\n        if 0 <= file_des < NR_STD_STREAMS:\n            self.filesystem.raise_os_error(errno.EINVAL)\n        self.filesystem.get_open_file(file_des)", "docstring": "Perform fdatasync for a fake file (in other words, do nothing).\n\nArgs:\nfile_des: The file descriptor of the open file.\n\nRaises:\nOSError: file_des is an invalid file descriptor.\nTypeError: file_des is not an integer.", "source": "juraj-google-style"}
{"code": "def _flatten_dict(original_dict):\n    flat_dict = {}\n    for (key, value) in original_dict.items():\n        if isinstance(value, dict):\n            for (name, tensor) in value.items():\n                if isinstance(tensor, dict):\n                    raise ValueError('flatten_dict only handles 2 levels of nesting.')\n                flat_key = ((('__' + key) + '_') + name)\n                flat_dict[flat_key] = tensor\n        else:\n            flat_dict[key] = value\n    return flat_dict", "docstring": "Flatten dict of dicts into a single dict with appropriate prefixes.\n\nHandles only 2 levels of nesting in the original dict.\n\nArgs:\noriginal_dict: Dict which may contain one or more dicts.\nReturns:\nflat_dict: Dict without any nesting. Any dicts in the original dict have\ntheir keys as prefixes in the new dict.\nRaises:\nValueError if the original dict has more than two levels of nesting.", "source": "codesearchnet"}
{"code": "def analyze_results(import_dict_objects: Dict[str, List[str]], type_hint_objects: Dict[str, List[str]]) -> List[str]:\n\n    def find_duplicates(seq):\n        return [k for k, v in collections.Counter(seq).items() if v > 1]\n    if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):\n        return ['Both sides of the init do not have the same backends!']\n    errors = []\n    for key in import_dict_objects.keys():\n        duplicate_imports = find_duplicates(import_dict_objects[key])\n        if duplicate_imports:\n            errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}')\n        duplicate_type_hints = find_duplicates(type_hint_objects[key])\n        if duplicate_type_hints:\n            errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}')\n        if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):\n            name = 'base imports' if key == 'none' else f'{key} backend'\n            errors.append(f'Differences for {name}:')\n            for a in type_hint_objects[key]:\n                if a not in import_dict_objects[key]:\n                    errors.append(f'  {a} in TYPE_HINT but not in _import_structure.')\n            for a in import_dict_objects[key]:\n                if a not in type_hint_objects[key]:\n                    errors.append(f'  {a} in _import_structure but not in TYPE_HINT.')\n    return errors", "docstring": "Analyze the differences between _import_structure objects and TYPE_CHECKING objects found in an init.\n\nArgs:\nimport_dict_objects (`Dict[str, List[str]]`):\nA dictionary mapping backend names (`\"none\"` for the objects independent of any specific backend) to\nlist of imported objects.\ntype_hint_objects (`Dict[str, List[str]]`):\nA dictionary mapping backend names (`\"none\"` for the objects independent of any specific backend) to\nlist of imported objects.\n\nReturns:\n`List[str]`: The list of errors corresponding to mismatches.", "source": "github-repos"}
{"code": "def assert_same_float_dtype(tensors=None, dtype=None):\n    if tensors:\n        dtype = _assert_same_base_type(tensors, dtype)\n    if (not dtype):\n        dtype = tf.float32\n    elif (not is_floating(dtype)):\n        raise ValueError('Expected floating point type, got {}.'.format(dtype))\n    return dtype", "docstring": "Validate and return float type based on `tensors` and `dtype`.\n\nFor ops such as matrix multiplication, inputs and weights must be of the\nsame float type. This function validates that all `tensors` are the same type,\nvalidates that type is `dtype` (if supplied), and returns the type. Type must\nbe a floating point type. If neither `tensors` nor `dtype` is supplied,\nthe function will return `dtypes.float32`.\n\nArgs:\ntensors: Tensors of input values. Can include `None` elements, which will\nbe ignored.\ndtype: Expected type.\n\nReturns:\nValidated type.\n\nRaises:\nValueError: if neither `tensors` nor `dtype` is supplied, or result is not\nfloat, or the common type of the inputs is not a floating point type.", "source": "codesearchnet"}
{"code": "def conv_json(self, uri_format=\"sparql_uri\", add_ids=False):\n        \n        def convert_item(ivalue):\n            \n            nvalue = ivalue\n            if isinstance(ivalue, BaseRdfDataType):\n                if ivalue.type == 'uri':\n                    if ivalue.startswith(\"pyuri\") and uri_format == \"pyuri\":\n                        nvalue = getattr(ivalue, \"sparql\")\n                    else:\n                        nvalue = getattr(ivalue, uri_format)\n                else:\n                    nvalue = ivalue.to_json\n            elif isinstance(ivalue, RdfClassBase):\n                if ivalue.subject.type == \"uri\":\n                    nvalue = ivalue.conv_json(uri_format, add_ids)\n                elif ivalue.subject.type == \"bnode\":\n                    nvalue = ivalue.conv_json(uri_format, add_ids)\n            elif isinstance(ivalue, list):\n                nvalue = []\n                for item in ivalue:\n                    temp = convert_item(item)\n                    nvalue.append(temp)\n            return nvalue\n\n        rtn_val = {key: convert_item(value) for key, value in self.items()}\n        \n        if add_ids:\n\n            if self.subject.type == 'uri':\n                rtn_val['uri'] = self.subject.sparql_uri\n                rtn_val['id'] = sha1(rtn_val['uri'].encode()).hexdigest()\n        \n        return rtn_val", "docstring": "converts the class to a json compatable python dictionary\n\nArgs:\nuri_format('sparql_uri','pyuri'): The format that uri values will\nbe returned\n\nReturns:\ndict: a json compatabile python dictionary", "source": "juraj-google-style"}
{"code": "def deserialize_sparse_tensors(tensors, types, shapes, classes):\n    ret = nest.pack_sequence_as(types, [sparse_ops.deserialize_sparse(tensor, dtype=ty, rank=shape.ndims) if c is sparse_tensor.SparseTensor else tensor for tensor, ty, shape, c in zip(nest.flatten(tensors), nest.flatten(types), nest.flatten(shapes), nest.flatten(classes))])\n    return ret", "docstring": "Deserializes sparse tensors.\n\nArgs:\ntensors: a structure of tensors to deserialize.\ntypes: a structure that holds information about types of `tensors`\nshapes: a structure that holds information about shapes of `tensors`\nclasses: a structure of objects that identify the dataset item classes\n\nReturns:\n`tensors` with any serialized sparse tensors replaced by their deserialized\nversion.", "source": "github-repos"}
{"code": "def list_keyvaults_sub(access_token, subscription_id):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/providers/Microsoft.KeyVault/vaults',\n                        '?api-version=', KEYVAULT_API])\n    return do_get_next(endpoint, access_token)", "docstring": "Lists key vaults belonging to this subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. 200 OK.", "source": "juraj-google-style"}
{"code": "def get_package(self, name) -> 'EffectPackage':\n    (name, cls_name) = parse_package_string(name)\n    try:\n        return self.package_map[name]\n    except KeyError:\n        raise EffectError(\"No package '{}' registered\".format(name))", "docstring": "Get a package by python path. Can also contain path to an effect.\n\nArgs:\nname (str): Path to effect package or effect\n\nReturns:\nThe requested EffectPackage\n\nRaises:\nEffectError when no package is found", "source": "codesearchnet"}
{"code": "def update_file(filename, result, content, indent):\n    parts = re.split('---+', content, 2)\n    frontmatter = yaml.safe_load(parts[1])\n    frontmatter['counts'] = result['counts']\n    parts[1] = '\\n{}'.format(yaml.safe_dump(frontmatter, default_flow_style=False, indent=indent))\n    result = '---'.join(parts)\n    with open(filename, 'wb') as f:\n        f.write(result.encode('utf-8'))\n    print('{} updated.'.format(filename))", "docstring": "Updates a Jekyll file to contain the counts form an object\n\nThis just converts the results to YAML and adds to the Jekyll frontmatter.\n\nArgs:\nfilename: the Jekyll file to update\nresult: the results object from `wc`\ncontent: the contents of the original file\nindent: the indentation level for dumping YAML", "source": "codesearchnet"}
{"code": "def read_array(self, key, embedded=True):\n        \n        return self.read(key, True, embedded)", "docstring": "Alias for read method that will read any type (e.g., String, KeyValue) and always\nreturn array.\n\nArgs:\nkey (string): The variable to read from the DB.\nembedded (boolean): Resolve embedded variables.\n\nReturns:\n(any): Results retrieved from DB", "source": "juraj-google-style"}
{"code": "def SubtractFromBalance(self, assetId, fixed8_val):\n    found = False\n    for (key, balance) in self.Balances.items():\n        if (key == assetId):\n            self.Balances[assetId] = (self.Balances[assetId] - fixed8_val)\n            found = True\n    if (not found):\n        self.Balances[assetId] = (fixed8_val * Fixed8((- 1)))", "docstring": "Subtract amount to the specified balance.\n\nArgs:\nassetId (UInt256):\nfixed8_val (Fixed8): amount to add.", "source": "codesearchnet"}
{"code": "def launch_run(self, command, project=None, entity=None, run_id=None):\n    query = gql('\\n        mutation launchRun(\\n            $entity: String\\n            $model: String\\n            $runId: String\\n            $image: String\\n            $command: String\\n            $patch: String\\n            $cwd: String\\n            $datasets: [String]\\n        ) {\\n            launchRun(input: {id: $runId, entityName: $entity, patch: $patch, modelName: $model,\\n                image: $image, command: $command, datasets: $datasets, cwd: $cwd}) {\\n                podName\\n                status\\n                runId\\n            }\\n        }\\n        ')\n    patch = BytesIO()\n    if self.git.dirty:\n        self.git.repo.git.execute(['git', 'diff'], output_stream=patch)\n        patch.seek(0)\n    cwd = '.'\n    if self.git.enabled:\n        cwd = (cwd + os.getcwd().replace(self.git.repo.working_dir, ''))\n    return self.gql(query, variable_values={'entity': (entity or self.settings('entity')), 'model': (project or self.settings('project')), 'command': command, 'runId': run_id, 'patch': patch.read().decode('utf8'), 'cwd': cwd})", "docstring": "Launch a run in the cloud.\n\nArgs:\ncommand (str): The command to run\nprogram (str): The file to run\nproject (str): The project to scope the runs to\nentity (str, optional): The entity to scope this project to.  Defaults to public models\nrun_id (str, optional): The run_id to scope to\n\nReturns:\n[{\"podName\",\"status\"}]", "source": "codesearchnet"}
{"code": "def reverse(self):\n    if self.closed():\n        raise ValueError('Attempt to call reverse() on a closed Queryable.')\n    try:\n        r = reversed(self._iterable)\n        return self._create(r)\n    except TypeError:\n        pass\n    return self._create(self._generate_reverse_result())", "docstring": "Returns the sequence reversed.\n\nNote: This method uses deferred execution, but the whole source\nsequence is consumed once execution commences.\n\nReturns:\nThe source sequence in reverse order.\n\nRaises:\nValueError: If the Queryable is closed().", "source": "codesearchnet"}
{"code": "def edit_profile(request):\n    (form, handled) = _handle_profile(request, 'profile')\n    if (handled and (not form.errors)):\n        messages.success(request, 'Your attendee profile was updated.')\n        return redirect('dashboard')\n    data = {'form': form}\n    return render(request, 'registrasion/profile_form.html', data)", "docstring": "View for editing an attendee's profile\n\nThe user must be logged in to edit their profile.\n\nReturns:\nredirect or render:\nIn the case of a ``POST`` request, it'll redirect to ``dashboard``,\nor otherwise, it will render ``registrasion/profile_form.html``\nwith data::\n\n{\n\"form\": form,  # Instance of ATTENDEE_PROFILE_FORM.\n}", "source": "codesearchnet"}
{"code": "def _snapshot_tensor(self, tensor):\n    snapshot_variable = self._create_or_get_tensor_values_cache(tensor.name, tensor.op.graph, tensor.shape.as_list(), tensor.dtype)\n    return state_ops.assign(snapshot_variable, tensor).op", "docstring": "Creates a new tf.Variable and a new tf.Operation that assigns the value of the tensor to this variable.\n\nArgs:\ntensor: tensor whose values will be stored in a new tf.Variable.\nReturns:\nAn assignment operation.", "source": "github-repos"}
{"code": "def past_stop_threshold(stop_threshold, eval_metric):\n    if (stop_threshold is None):\n        return False\n    if (not isinstance(stop_threshold, numbers.Number)):\n        raise ValueError('Threshold for checking stop conditions must be a number.')\n    if (not isinstance(eval_metric, numbers.Number)):\n        raise ValueError('Eval metric being checked against stop conditions must be a number.')\n    if (eval_metric >= stop_threshold):\n        tf.logging.info('Stop threshold of {} was passed with metric value {}.'.format(stop_threshold, eval_metric))\n        return True\n    return False", "docstring": "Return a boolean representing whether a model should be stopped.\n\nArgs:\nstop_threshold: float, the threshold above which a model should stop\ntraining.\neval_metric: float, the current value of the relevant metric to check.\n\nReturns:\nTrue if training should stop, False otherwise.\n\nRaises:\nValueError: if either stop_threshold or eval_metric is not a number", "source": "codesearchnet"}
{"code": "def find_latest_change_point_index(metric_values: List[Union[float, int]]):\n    change_points_indices = find_change_points(metric_values)\n    change_points_indices = filter_change_points_by_median_threshold(metric_values, change_points_indices)\n    if not change_points_indices:\n        return None\n    change_points_indices.sort()\n    change_point_index = change_points_indices[-1]\n    if is_edge_change_point(change_point_index, len(metric_values), constants._EDGE_SEGMENT_SIZE):\n        logging.info('The change point %s is located at the edge of the data with an edge segment size of %s. This change point will be ignored for now, awaiting additional data. Should the change point persist after gathering more data, an alert will be raised.' % (change_point_index, constants._EDGE_SEGMENT_SIZE))\n        return None\n    return change_point_index", "docstring": "Args:\nmetric_values: Metric values used to run change point analysis.\nReturns:\nint: Right most change point index observed on metric_values.", "source": "github-repos"}
{"code": "def open_connection(self, connection: str, configuration: Config, hostname: Optional[str]=None, username: Optional[str]=None, password: Optional[str]=None, port: Optional[int]=None, platform: Optional[str]=None, extras: Optional[Dict[(str, Any)]]=None, default_to_host_attributes: bool=True) -> ConnectionPlugin:\n    if (connection in self.connections):\n        raise ConnectionAlreadyOpen(connection)\n    self.connections[connection] = self.connections.get_plugin(connection)()\n    if default_to_host_attributes:\n        conn_params = self.get_connection_parameters(connection)\n        self.connections[connection].open(hostname=(hostname if (hostname is not None) else conn_params.hostname), username=(username if (username is not None) else conn_params.username), password=(password if (password is not None) else conn_params.password), port=(port if (port is not None) else conn_params.port), platform=(platform if (platform is not None) else conn_params.platform), extras=(extras if (extras is not None) else conn_params.extras), configuration=configuration)\n    else:\n        self.connections[connection].open(hostname=hostname, username=username, password=password, port=port, platform=platform, extras=extras, configuration=configuration)\n    return self.connections[connection]", "docstring": "Open a new connection.\n\nIf ``default_to_host_attributes`` is set to ``True`` arguments will default to host\nattributes if not specified.\n\nRaises:\nAttributeError: if it's unknown how to establish a connection for the given type\n\nReturns:\nAn already established connection", "source": "codesearchnet"}
{"code": "def __le__(self, other):\n    \n    if not isinstance(other, interface.DateTimeValues):\n      raise ValueError('Other not an instance of DateTimeValues')\n\n    return isinstance(other, Never)", "docstring": "Determines if the date time values are less than or equal to other.\n\nArgs:\nother (DateTimeValues): date time values to compare against.\n\nReturns:\nbool: True if the date time values are greater than or equal to other.\n\nRaises:\nValueError: if other is not an instance of DateTimeValues.", "source": "juraj-google-style"}
{"code": "def parse(self, filename):\n        \n        filehandle = storage.open_vos_or_local(filename, \"rb\")\n        assert filehandle is not None, \"Failed to open file {} \".format(filename)\n        filestr = filehandle.read()\n        filehandle.close()\n\n        assert filestr is not None, \"File contents are None\"\n\n        observations = self._parse_observation_list(filestr)\n\n        self._parse_observation_headers(filestr, observations)\n\n        sys_header = self._parse_system_header(filestr)\n\n        sources = self._parse_source_data(filestr, observations)\n\n        return AstromData(observations, sys_header, sources, discovery_only=self.discovery_only)", "docstring": "Parses a file into an AstromData structure.\n\nArgs:\nfilename: str\nThe name of the file whose contents will be parsed.\n\nReturns:\ndata: AstromData\nThe file contents extracted into a data structure for programmatic\naccess.", "source": "juraj-google-style"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, pixel_attention_mask: torch.LongTensor=None):\n    batch_size, num_images, num_channels, height, width = pixel_values.shape\n    pixel_values = pixel_values.view(batch_size * num_images, *pixel_values.shape[2:])\n    nb_values_per_image = pixel_values.shape[1:].numel()\n    real_images_inds = (pixel_values == 0.0).sum(dim=(-1, -2, -3)) != nb_values_per_image\n    if not any(real_images_inds):\n        real_images_inds[0] = True\n    pixel_values = pixel_values[real_images_inds].contiguous()\n    if pixel_attention_mask is None:\n        pixel_attention_mask = torch.ones(size=[pixel_values.shape[i] for i in (0, 2, 3)], dtype=torch.bool, device=pixel_values.device)\n    else:\n        pixel_attention_mask = pixel_attention_mask.view(batch_size * num_images, *pixel_attention_mask.shape[2:])\n        pixel_attention_mask = pixel_attention_mask[real_images_inds].contiguous()\n    patch_size = self.config.vision_config.patch_size\n    patches_subgrid = pixel_attention_mask.unfold(dimension=1, size=patch_size, step=patch_size)\n    patches_subgrid = patches_subgrid.unfold(dimension=2, size=patch_size, step=patch_size)\n    patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) > 0).bool()\n    image_hidden_states = self.vision_model(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask)\n    image_hidden_states = image_hidden_states.last_hidden_state\n    image_hidden_states = self.connector(image_hidden_states)\n    return image_hidden_states", "docstring": "Encodes images into continuous embeddings that can be forwarded to the language model.\n\nArgs:\npixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\nThe tensors corresponding to the input images.\npixel_attention_mask (`torch.LongTensor`, *optional*):\nThe attention mask indicating padded regions in the image.", "source": "github-repos"}
{"code": "def lex_index(n, k, lst):\n    if (len(lst) != k):\n        raise VisualizationError('list should have length k')\n    comb = list(map((lambda x: ((n - 1) - x)), lst))\n    dualm = sum([n_choose_k(comb[((k - 1) - i)], (i + 1)) for i in range(k)])\n    return int(dualm)", "docstring": "Return  the lex index of a combination..\n\nArgs:\nn (int): the total number of options .\nk (int): The number of elements.\nlst (list): list\n\nReturns:\nint: returns int index for lex order\n\nRaises:\nVisualizationError: if length of list is not equal to k", "source": "codesearchnet"}
{"code": "def argsort2(indexable, key=None, reverse=False):\n    if isinstance(indexable, dict):\n        vk_iter = ((v, k) for (k, v) in indexable.items())\n    else:\n        vk_iter = ((v, k) for (k, v) in enumerate(indexable))\n    if (key is None):\n        indices = [k for (v, k) in sorted(vk_iter, reverse=reverse)]\n    else:\n        indices = [k for (v, k) in sorted(vk_iter, key=(lambda vk: key(vk[0])), reverse=reverse)]\n    return indices", "docstring": "Returns the indices that would sort a indexable object.\n\nThis is similar to np.argsort, but it is written in pure python and works\non both lists and dictionaries.\n\nArgs:\nindexable (list or dict): indexable to sort by\n\nReturns:\nlist: indices: list of indices such that sorts the indexable\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> import utool as ut\n>>> # argsort works on dicts\n>>> dict_ = indexable = {'a': 3, 'b': 2, 'c': 100}\n>>> indices = ut.argsort2(indexable)\n>>> assert list(ut.take(dict_, indices)) == sorted(dict_.values())\n>>> # argsort works on lists\n>>> indexable = [100, 2, 432, 10]\n>>> indices = ut.argsort2(indexable)\n>>> assert list(ut.take(indexable, indices)) == sorted(indexable)\n>>> # argsort works on iterators\n>>> indexable = reversed(range(100))\n>>> indices = ut.argsort2(indexable)\n>>> assert indices[0] == 99", "source": "codesearchnet"}
{"code": "def _FindKeys(self, key, names, matches):\n    \n    for name, subkey in iter(key.items()):\n      if name in names:\n        matches.append((name, subkey))\n\n      if isinstance(subkey, dict):\n        self._FindKeys(subkey, names, matches)", "docstring": "Searches the plist key hierarchy for keys with matching names.\n\nIf a match is found a tuple of the key name and value is added to\nthe matches list.\n\nArgs:\nkey (dict[str, object]): plist key.\nnames (list[str]): names of the keys to match.\nmatches (list[str]): keys with matching names.", "source": "juraj-google-style"}
{"code": "def _expand_str_alias(path_cfg, alias_dict, overriding_kargs):\n    \n\n    \n    \n\n    new_path_cfg = alias_dict[path_cfg]\n    \n\n    new_overriding_kargs = dict(alias=path_cfg)\n    \n\n    new_overriding_kargs.update(overriding_kargs)\n    \n\n    return expand_path_cfg(new_path_cfg, alias_dict,new_overriding_kargs)", "docstring": "expand a path config given as a string\n\nArgs:\npath_cfg (str): an alias\nalias_dict (dict):\noverriding_kargs (dict):", "source": "juraj-google-style"}
{"code": "def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):\n    vision_data = {}\n    if image_sizes is not None:\n        images_kwargs = AriaProcessorKwargs._defaults.get('images_kwargs', {})\n        images_kwargs.update(kwargs)\n        max_size = images_kwargs.get('max_image_size', None) or self.image_processor.max_image_size\n        num_image_patches = [self.image_processor.get_number_of_image_patches(*image_size, images_kwargs) for image_size in image_sizes]\n        num_image_tokens = [self.size_conversion[max_size] * num_patches for num_patches in num_image_patches]\n        vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})\n    return MultiModalData(**vision_data)", "docstring": "Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.\nArgs:\nimage_sizes (`List[List[int]]`, *optional*):\nThe input sizes formatted as (height, width) per each image.\nReturns:\n`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided\ninput modalities, along with other useful data.", "source": "github-repos"}
{"code": "def check_error_response(self, body, status):\n    status_code = int(status.split(' ', 1)[0])\n    if (status_code >= 300):\n        raise errors.BackendError(body, status)", "docstring": "Raise an exception if the response from the backend was an error.\n\nArgs:\nbody: A string containing the backend response body.\nstatus: A string containing the backend response status.\n\nRaises:\nBackendError if the response is an error.", "source": "codesearchnet"}
{"code": "def hex_to_name(hexx):\n    \n    for n, h in defaults.COLOURS.items():\n        if (len(n) > 1) and (h == hexx.upper()):\n            return n.lower()\n    return None", "docstring": "Convert hex to a color name, using matplotlib's colour names.\n\nArgs:\nhexx (str): A hexadecimal colour, starting with '#'.\n\nReturns:\nstr: The name of the colour, or None if not found.", "source": "juraj-google-style"}
{"code": "def _fetch_preprocessing_callback(fetch):\n    if isinstance(fetch, ops.Operation):\n        operation_fetches.append(fetch)\n        return fetch\n    elif isinstance(fetch, meta_graph_pb2.TensorInfo):\n        tensor_infos.append(fetch)\n        decoded = _get_element_from_tensor_info(fetch, self._func_graph)\n        if tensor_util.is_tf_type(decoded) or isinstance(decoded, composite_tensor.CompositeTensor):\n            tensor_fetches.append(decoded)\n        else:\n            operation_fetches.append(decoded)\n        return decoded\n    elif isinstance(fetch, (tensor_lib.Tensor, composite_tensor.CompositeTensor)):\n        tensor_fetches.append(fetch)\n        return fetch\n    else:\n        graph_element = self.graph.as_graph_element(fetch)\n        return _fetch_preprocessing_callback(graph_element)", "docstring": "Extract out lists of ops, tensors, and tensor type info.\n\nTurns TensorInfos into Tensors in the original `fetches` structure.\nAlso extracts ops from `fetches`.\n\nArgs:\nfetch: The fetch to preprocess: Tensor, TensorInfo, or Operation, or\nstring identifying a Tensor or Operation.\n\nReturns:\n`fetch` converted to a Tensor.", "source": "github-repos"}
{"code": "def string_set(namespace: Union[Type, str], name: str) -> 'Metrics.DelegatingStringSet':\n    namespace = Metrics.get_namespace(namespace)\n    return Metrics.DelegatingStringSet(MetricName(namespace, name))", "docstring": "Obtains or creates a String set metric.\n\nString set metrics are restricted to string values.\n\nArgs:\nnamespace: A class or string that gives the namespace to a metric\nname: A string that gives a unique name to a metric\n\nReturns:\nA StringSet object.", "source": "github-repos"}
{"code": "def set_colourtemp(self, colourtemp):\n        \n        if not 0 <= colourtemp <= 255:\n            raise ValueError(\"The colour temperature needs to be between 0 and 255.\")\n\n        payload = self.generate_payload(SET, {self.DPS_INDEX_COLOURTEMP: colourtemp})\n        data = self._send_receive(payload)\n        return data", "docstring": "Set the colour temperature of an rgb bulb.\n\nArgs:\ncolourtemp(int): Value for the colour temperature (0-255).", "source": "juraj-google-style"}
{"code": "def resize_for_vision_encoder(self, image: np.ndarray, vision_encoder_max_size: int, resample: PILImageResampling=PILImageResampling.LANCZOS, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):\n    height, width = get_image_size(image, channel_dim=input_data_format)\n    aspect_ratio = width / height\n    if width >= height:\n        width = math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size\n        height = int(width / aspect_ratio)\n        height = math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size\n    elif height > width:\n        height = math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size\n        width = int(height * aspect_ratio)\n        width = math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size\n    new_size = {'height': height, 'width': width}\n    return self.resize(image, size=new_size, resample=resample, input_data_format=input_data_format, data_format=data_format)", "docstring": "Resize images to be multiples of `vision_encoder_max_size` while preserving the aspect ratio.\nArgs:\nimage (`np.ndarray`):\nImages to resize.\nvision_encoder_max_size (`int`):\nMaximum size of the output image. If the image is larger than this size, it will be split into\npatches of this size, and the original image will be concatenated with the patches, resized to max_size.\nresample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`):\nResampling filter to use when resizing the image.\ndata_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the output image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred", "source": "github-repos"}
{"code": "def update(self, properties=None, description=None):\n    cv = ApplicationConfiguration._props(properties=properties, description=description)\n    res = self.rest_client.session.patch(self.rest_self, headers={'Accept': 'application/json', 'Content-Type': 'application/json'}, json=cv)\n    _handle_http_errors(res)\n    self.json_rep = res.json()\n    return self", "docstring": "Update this application configuration.\n\nTo create or update a property provide its key-value\npair in `properties`.\n\nTo delete a property provide its key with the value ``None``\nin properties.\n\nArgs:\nproperties (dict): Property values to be updated. If ``None`` the properties are unchanged.\ndescription (str): Description for the configuration. If ``None`` the description is unchanged.\n\nReturns:\nApplicationConfiguration: self", "source": "codesearchnet"}
{"code": "def _adjusted_script_code(self, script):\n    script_code = ByteData()\n    if (script[0] == (len(script) - 1)):\n        return script\n    script_code += VarInt(len(script))\n    script_code += script\n    return script_code", "docstring": "Checks if the script code pased in to the sighash function is already\nlength-prepended\nThis will break if there's a redeem script that's just a pushdata\nThat won't happen in practice\n\nArgs:\nscript (bytes): the spend script\nReturns:\n(bytes): the length-prepended script (if necessary)", "source": "codesearchnet"}
{"code": "def add(self, data, name=None):\n    if (name is None):\n        n = len(self.data)\n        while (('Series %d' % n) in self.data):\n            n += 1\n        name = ('Series %d' % n)\n    self.data[name] = data\n    return name", "docstring": "Appends a new column of data to the data source.\n\nArgs:\ndata (seq) : new data to add\nname (str, optional) : column name to use.\nIf not supplied, generate a name of the form \"Series ####\"\n\nReturns:\nstr:  the column name used", "source": "codesearchnet"}
{"code": "def end_of_chunk(prev_tag, tag, prev_type, type_):\n    chunk_end = False\n    if (prev_tag == 'E'):\n        chunk_end = True\n    if (prev_tag == 'S'):\n        chunk_end = True\n    if ((prev_tag == 'B') and (tag == 'B')):\n        chunk_end = True\n    if ((prev_tag == 'B') and (tag == 'S')):\n        chunk_end = True\n    if ((prev_tag == 'B') and (tag == 'O')):\n        chunk_end = True\n    if ((prev_tag == 'I') and (tag == 'B')):\n        chunk_end = True\n    if ((prev_tag == 'I') and (tag == 'S')):\n        chunk_end = True\n    if ((prev_tag == 'I') and (tag == 'O')):\n        chunk_end = True\n    if ((prev_tag != 'O') and (prev_tag != '.') and (prev_type != type_)):\n        chunk_end = True\n    return chunk_end", "docstring": "Checks if a chunk ended between the previous and current word.\n\nArgs:\nprev_tag: previous chunk tag.\ntag: current chunk tag.\nprev_type: previous type.\ntype_: current type.\n\nReturns:\nchunk_end: boolean.", "source": "codesearchnet"}
{"code": "def dv(self, orb):\n    orb = orb.copy(form='cartesian')\n    if (self.frame == 'QSW'):\n        mat = to_qsw(orb).T\n    elif (self.frame == 'TNW'):\n        mat = to_tnw(orb).T\n    else:\n        mat = np.identity(3)\n    return (mat @ self._dv)", "docstring": "Computation of the velocity increment in the reference frame of the orbit\n\nArgs:\norb (Orbit):\nReturn:\nnumpy.array: Velocity increment, length 3", "source": "codesearchnet"}
{"code": "def _prepare_for_training(self, job_name=None):\n        \n        super(Framework, self)._prepare_for_training(job_name=job_name)\n\n        \n        \n        if self.source_dir and not self.source_dir.lower().startswith('s3:\n            validate_source_dir(self.entry_point, self.source_dir)\n\n        \n        \n        local_code = get_config_value('local.local_code', self.sagemaker_session.config)\n        if self.sagemaker_session.local_mode and local_code:\n            \n            if self.source_dir is None:\n                self.source_dir = os.path.dirname(self.entry_point)\n            self.entry_point = os.path.basename(self.entry_point)\n\n            code_dir = 'file:\n            script = self.entry_point\n        else:\n            self.uploaded_code = self._stage_user_code_in_s3()\n            code_dir = self.uploaded_code.s3_prefix\n            script = self.uploaded_code.script_name\n\n        \n        self._hyperparameters[DIR_PARAM_NAME] = code_dir\n        self._hyperparameters[SCRIPT_PARAM_NAME] = script\n        self._hyperparameters[CLOUDWATCH_METRICS_PARAM_NAME] = self.enable_cloudwatch_metrics\n        self._hyperparameters[CONTAINER_LOG_LEVEL_PARAM_NAME] = self.container_log_level\n        self._hyperparameters[JOB_NAME_PARAM_NAME] = self._current_job_name\n        self._hyperparameters[SAGEMAKER_REGION_PARAM_NAME] = self.sagemaker_session.boto_region_name", "docstring": "Set hyperparameters needed for training. This method will also validate ``source_dir``.\n\nArgs:\n* job_name (str): Name of the training job to be created. If not specified, one is generated,\nusing the base name given to the constructor if applicable.", "source": "juraj-google-style"}
{"code": "def _ExtractJQuery(self, jquery_raw):\n    data_part = ''\n    if (not jquery_raw):\n        return {}\n    if ('[' in jquery_raw):\n        (_, _, first_part) = jquery_raw.partition('[')\n        (data_part, _, _) = first_part.partition(']')\n    elif jquery_raw.startswith('\n        (_, _, first_part) = jquery_raw.partition('{')\n        data_part = '{{{0:s}'.format(first_part)\n    elif ('({' in jquery_raw):\n        (_, _, first_part) = jquery_raw.partition('(')\n        (data_part, _, _) = first_part.rpartition(')')\n    if (not data_part):\n        return {}\n    try:\n        data_dict = json.loads(data_part)\n    except ValueError:\n        return {}\n    return data_dict", "docstring": "Extracts values from a JQuery string.\n\nArgs:\njquery_raw (str): JQuery string.\n\nReturns:\ndict[str, str]: extracted values.", "source": "codesearchnet"}
{"code": "def extract_subtree(self, node):\n        \n        if not isinstance(node, Node):\n            raise TypeError(\"node must be a Node\")\n        r = self.root; self.root = node; o = copy(self); self.root = r; return o", "docstring": "Return a copy of the subtree rooted at ``node``\n\nArgs:\n``node`` (``Node``): The root of the desired subtree\n\nReturns:\n``Tree``: A copy of the subtree rooted at ``node``", "source": "juraj-google-style"}
{"code": "def get_thread(self, thread_key):\n\t\t\n\t\turi = '/'.join([self.api_uri,\n\t\t\t\t\t\tself.threads_suffix,\n\t\t\t\t\t\tthread_key\n\t\t\t\t\t\t])\n\t\treturn self._req('get', uri)", "docstring": "Gets a thread specified by thread_key\nArgs:\nthread_key \t\tthread to get\nreturns \t\ta thread dict", "source": "juraj-google-style"}
{"code": "def _get_dataset_showcase_dict(self, showcase):\n        \n        \n        if isinstance(showcase, hdx.data.showcase.Showcase) or isinstance(showcase, dict):\n            if 'id' not in showcase:\n                showcase = hdx.data.showcase.Showcase.read_from_hdx(showcase['name'])\n            showcase = showcase['id']\n        elif not isinstance(showcase, str):\n            raise HDXError('Type %s cannot be added as a showcase!' % type(showcase).__name__)\n        if is_valid_uuid(showcase) is False:\n            raise HDXError('%s is not a valid showcase id!' % showcase)\n        return {'package_id': self.data['id'], 'showcase_id': showcase}", "docstring": "Get dataset showcase dict\n\nArgs:\nshowcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary\n\nReturns:\ndict: dataset showcase dict", "source": "juraj-google-style"}
{"code": "def is50or60(msg, spd_ref, trk_ref, alt_ref):\n\n    def vxy(v, angle):\n        vx = (v * np.sin(np.radians(angle)))\n        vy = (v * np.cos(np.radians(angle)))\n        return (vx, vy)\n    if (not (bds50.is50(msg) and bds60.is60(msg))):\n        return None\n    h50 = bds50.trk50(msg)\n    v50 = bds50.gs50(msg)\n    if ((h50 is None) or (v50 is None)):\n        return 'BDS50,BDS60'\n    h60 = bds60.hdg60(msg)\n    m60 = bds60.mach60(msg)\n    i60 = bds60.ias60(msg)\n    if ((h60 is None) or ((m60 is None) and (i60 is None))):\n        return 'BDS50,BDS60'\n    m60 = (np.nan if (m60 is None) else m60)\n    i60 = (np.nan if (i60 is None) else i60)\n    XY5 = vxy((v50 * aero.kts), h50)\n    XY6m = vxy(aero.mach2tas(m60, (alt_ref * aero.ft)), h60)\n    XY6i = vxy(aero.cas2tas((i60 * aero.kts), (alt_ref * aero.ft)), h60)\n    allbds = ['BDS50', 'BDS60', 'BDS60']\n    X = np.array([XY5, XY6m, XY6i])\n    Mu = np.array(vxy((spd_ref * aero.kts), trk_ref))\n    try:\n        dist = np.linalg.norm((X - Mu), axis=1)\n        BDS = allbds[np.nanargmin(dist)]\n    except ValueError:\n        return 'BDS50,BDS60'\n    return BDS", "docstring": "Use reference ground speed and trk to determine BDS50 and DBS60.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\nspd_ref (float): reference speed (ADS-B ground speed), kts\ntrk_ref (float): reference track (ADS-B track angle), deg\nalt_ref (float): reference altitude (ADS-B altitude), ft\n\nReturns:\nString or None: BDS version, or possible versions, or None if nothing matches.", "source": "codesearchnet"}
{"code": "def CompressedHistograms(self, run, tag):\n    accumulator = self.GetAccumulator(run)\n    return accumulator.CompressedHistograms(tag)", "docstring": "Retrieve the compressed histogram events associated with a run and tag.\n\nArgs:\nrun: A string name of the run for which values are retrieved.\ntag: A string name of the tag for which values are retrieved.\n\nRaises:\nKeyError: If the run is not found, or the tag is not available for\nthe given run.\n\nReturns:\nAn array of `event_accumulator.CompressedHistogramEvents`.", "source": "codesearchnet"}
{"code": "def get_itasser_models(self, homology_raw_dir, custom_itasser_name_mapping=None, outdir=None, force_rerun=False):\n    counter = 0\n    for g in tqdm(self.genes):\n        if (custom_itasser_name_mapping and (g.id in custom_itasser_name_mapping)):\n            hom_id = custom_itasser_name_mapping[g.id]\n            if (not op.exists(op.join(homology_raw_dir, hom_id))):\n                hom_id = g.id\n        else:\n            hom_id = g.id\n        new_itasser_name = (hom_id + '_model1')\n        orig_itasser_dir = op.join(homology_raw_dir, hom_id)\n        try:\n            itasser_prop = g.protein.load_itasser_folder(ident=hom_id, itasser_folder=orig_itasser_dir, organize=True, outdir=outdir, organize_name=new_itasser_name, force_rerun=force_rerun)\n        except OSError:\n            log.debug('{}: homology model folder unavailable'.format(g.id))\n            continue\n        except IOError:\n            log.debug('{}: homology model unavailable'.format(g.id))\n            continue\n        if itasser_prop.structure_file:\n            counter += 1\n        else:\n            log.debug('{}: homology model file unavailable, perhaps modelling did not finish'.format(g.id))\n    log.info('Completed copying of {} I-TASSER models to GEM-PRO directory. See the \"df_homology_models\" attribute for a summary dataframe.'.format(counter))", "docstring": "Copy generated I-TASSER models from a directory to the GEM-PRO directory.\n\nArgs:\nhomology_raw_dir (str): Root directory of I-TASSER folders.\ncustom_itasser_name_mapping (dict): Use this if your I-TASSER folder names differ from your model gene names.\nInput a dict of {model_gene: ITASSER_folder}.\noutdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories\nwere not created initially\nforce_rerun (bool): If homology files should be copied again even if they exist in the GEM-PRO directory", "source": "codesearchnet"}
{"code": "def get_node(self, role: str, default=None) -> BioCNode:\n        \n        return next((node for node in self.nodes if node.role == role), default)", "docstring": "Get the first node with role\n\nArgs:\nrole: role\ndefault: node returned instead of raising StopIteration\n\nReturns:\nthe first node with role", "source": "juraj-google-style"}
{"code": "def create_from_binary(cls, mft_config, binary_data, entry_number):\n    bin_view = memoryview(binary_data)\n    entry = None\n    if (bin_view[0:4] != b'\\x00\\x00\\x00\\x00'):\n        try:\n            header = MFTHeader.create_from_binary(mft_config.ignore_signature_check, bin_view[:MFTHeader.get_representation_size()])\n        except HeaderError as e:\n            e.update_entry_number(entry_number)\n            e.update_entry_binary(binary_data)\n            raise\n        entry = cls(header, _defaultdict(list))\n        if (header.mft_record != entry_number):\n            _MOD_LOGGER.warning(\"The MFT entry number doesn't match. %d != %d\", entry_number, header.mft_record)\n        if (len(binary_data) != header.entry_alloc_len):\n            _MOD_LOGGER.error('Expected MFT size is different than entry size.')\n            raise EntryError(f'Expected MFT size ({len(binary_data)}) is different than entry size ({header.entry_alloc_len}).', binary_data, entry_number)\n        if mft_config.apply_fixup_array:\n            apply_fixup_array(bin_view, header.fx_offset, header.fx_count, header.entry_alloc_len)\n        entry._load_attributes(mft_config, bin_view[header.first_attr_offset:])\n    bin_view.release()\n    return entry", "docstring": "Creates a MFTEntry from a binary stream. It correctly process\nthe binary data extracting the MFTHeader, all the attributes and the\nslack information from the binary stream.\n\nThe binary data WILL be changed to apply the fixup array.\n\nArgs:\nmft_config (:obj:`MFTConfig`) - An instance of MFTConfig, as this tells\nhow the library will interpret data.\nbinary_data (bytearray) - A binary stream with the data to extract.\nThis has to be a writeable and support the memoryview call\nentry_number (int) - The entry number for this entry\n\nReturns:\nMFTEntry: If the object is empty, returns None, otherwise, new object MFTEntry", "source": "codesearchnet"}
{"code": "def near(point, dist, points):\n    for cmpt in points:\n        if (haversine(point, cmpt) <= dist):\n            return True\n    return False", "docstring": "Determine if the given point is within dist of any of points.\n\nArgs:\npoint ((float,float)): A latitude, longitude float tuple.\ndist (int): A distance in mm ( base units )\npoints (list): A list of latitude, longitude float tuples to compare against.", "source": "codesearchnet"}
{"code": "def __init__(self, structure_matcher=StructureMatcher(\n                 comparator=ElementComparator()), symprec=None):\n        \n        self.symprec = symprec\n        self.structure_list = defaultdict(list)\n        if isinstance(structure_matcher, dict):\n            self.structure_matcher = StructureMatcher.from_dict(structure_matcher)\n        else:\n            self.structure_matcher = structure_matcher", "docstring": "Remove duplicate structures based on the structure matcher\nand symmetry (if symprec is given).\n\nArgs:\nstructure_matcher: Provides a structure matcher to be used for\nstructure comparison.\nsymprec: The precision in the symmetry finder algorithm if None (\ndefault value), no symmetry check is performed and only the\nstructure matcher is used. A recommended value is 1e-5.", "source": "juraj-google-style"}
{"code": "def setDocumentedBy(self, documented_pid, documenting_pid):\n    self._check_initialized()\n    documented_id = self.getObjectByPid(documented_pid)\n    documenting_id = self.getObjectByPid(documenting_pid)\n    self.add((documented_id, CITO.isDocumentedBy, documenting_id))", "docstring": "Add a CiTO, the Citation Typing Ontology, triple asserting that\n``documented_pid`` isDocumentedBy ``documenting_pid``.\n\nAdds assertion: ``documented_pid cito:isDocumentedBy documenting_pid``\n\nArgs:\ndocumented_pid: str\nPID of a Science Object that is documented by ``documenting_pid``.\n\ndocumenting_pid: str\nPID of a Science Object that documents ``documented_pid``.", "source": "codesearchnet"}
{"code": "def cp(src, dst):\n    if isdir(src):\n        if isdir(dst):\n            rm(dst)\n        shutil.copytree(src, dst)\n    elif isfile(src):\n        shutil.copy(src, dst)\n    else:\n        raise IOError(\"Source '{0}' not found\".format(src))", "docstring": "Copy a file or directory.\n\nIf source is a directory, this recursively copies the directory\nand its contents. If the destination is a directory, then this\ncreates a copy of the source in the destination directory with the\nsame basename.\n\nIf the destination already exists, this will attempt to overwrite\nit.\n\nArguments:\n\nsrc (string): path to the source file or directory.\ndst (string): path to the destination file or directory.\n\nRaises:\n\nIOError: if source does not exist.", "source": "codesearchnet"}
{"code": "def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor]=None, dim=None) -> torch.Tensor:\n    if weights is not None:\n        weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor))\n        sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0)\n        return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights\n    else:\n        return input_tensor.mean(dim=dim)", "docstring": "Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero,\nmeaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`.\n\nArgs:\ninput_tensor (`torch.FloatTensor`):\nInput tensor, of which the average must be computed.\nweights (`torch.FloatTensor`, *optional*):\nWeights tensor, of the same shape as `input_tensor`.\ndim (`int`, *optional*):\nThe dim along which to average `input_tensor`.\n\nReturns:\n`torch.FloatTensor`: The tensor with values averaged along the specified `dim`.", "source": "github-repos"}
{"code": "def transform_table(self, table, table_meta, missing=None):\n    if (missing is None):\n        missing = self.missing\n    else:\n        self.missing = missing\n        warnings.warn(DEPRECATION_MESSAGE.format('transform_table'), DeprecationWarning)\n    content = {}\n    columns = []\n    table_name = table_meta['name']\n    for field in table_meta['fields']:\n        column_name = field['name']\n        if (missing and table[column_name].isnull().any()):\n            null_transformer = transformers.NullTransformer(field)\n            clean_column = null_transformer.fit_transform(table[column_name])\n            null_name = ('?' + column_name)\n            columns.append(null_name)\n            content[null_name] = clean_column[null_name].values\n            column = clean_column[column_name]\n        else:\n            column = table[column_name].to_frame()\n        transformer = self.transformers[(table_name, column_name)]\n        content[column_name] = transformer.transform(column)[column_name].values\n        columns.append(column_name)\n    return pd.DataFrame(content, columns=columns)", "docstring": "Apply the stored transformers to `table`.\n\nArgs:\ntable(pandas.DataFrame):     Contents of the table to be transformed.\n\ntable_meta(dict):   Metadata for the given table.\n\nmissing(bool):      Wheter or not use NullTransformer to handle missing values.\n\nReturns:\npandas.DataFrame: Transformed table.", "source": "codesearchnet"}
{"code": "def sort_response(response: Dict[str, Any]) -> OrderedDict:\n    \n    root_order = [\"jsonrpc\", \"result\", \"error\", \"id\"]\n    error_order = [\"code\", \"message\", \"data\"]\n    req = OrderedDict(sorted(response.items(), key=lambda k: root_order.index(k[0])))\n    if \"error\" in response:\n        req[\"error\"] = OrderedDict(\n            sorted(response[\"error\"].items(), key=lambda k: error_order.index(k[0]))\n        )\n    return req", "docstring": "Sort the keys in a JSON-RPC response object.\n\nThis has no effect other than making it nicer to read. Useful in Python 3.5 only,\ndictionaries are already sorted in newer Python versions.\n\nExample::\n\n>>> json.dumps(sort_response({'id': 2, 'result': 5, 'jsonrpc': '2.0'}))\n{\"jsonrpc\": \"2.0\", \"result\": 5, \"id\": 1}\n\nArgs:\nresponse: Deserialized JSON-RPC response.\n\nReturns:\nThe same response, sorted in an OrderedDict.", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]:\n    residual = hidden_states\n    hidden_states = self.layer_norm1(hidden_states)\n    hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.layer_norm2(hidden_states)\n    hidden_states = self.mlp(hidden_states)\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n`(config.encoder_attention_heads,)`.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def job_monitor(job, interval=None, monitor_async=False, quiet=False, output=sys.stdout):\n    if (interval is None):\n        _interval_set = False\n        interval = 2\n    else:\n        _interval_set = True\n    if _NOTEBOOK_ENV:\n        if monitor_async:\n            try:\n                import ipywidgets as widgets\n            except ImportError:\n                raise ImportError('These functions  need ipywidgets. Run \"pip install ipywidgets\" before.')\n            from qiskit.tools.jupyter.jupyter_magics import _html_checker\n            style = 'font-size:16px;'\n            header = \"<p style='{style}'>Job Status: %s </p>\".format(style=style)\n            status = widgets.HTML(value=(header % job.status().value))\n            display(status)\n            thread = threading.Thread(target=_html_checker, args=(job, interval, status, header))\n            thread.start()\n        else:\n            _text_checker(job, interval, _interval_set, quiet=quiet, output=output)\n    else:\n        if monitor_async:\n            raise QiskitError('monitor_async only available in Jupyter notebooks.')\n        _text_checker(job, interval, _interval_set, quiet=quiet, output=output)", "docstring": "Monitor the status of a IBMQJob instance.\n\nArgs:\njob (BaseJob): Job to monitor.\ninterval (int): Time interval between status queries.\nmonitor_async (bool): Monitor asyncronously (in Jupyter only).\nquiet (bool): If True, do not print status messages.\noutput (file): The file like object to write status messages to.\nBy default this is sys.stdout.\n\nRaises:\nQiskitError: When trying to run async outside of Jupyter\nImportError: ipywidgets not available for notebook.", "source": "codesearchnet"}
{"code": "def set_filename_and_line_from_caller(self, offset: int=0) -> int:\n    retcode = self.SUCCESS\n    frame = inspect.currentframe()\n    if not frame:\n        return self.FAILURE\n    frame = cast(types.FrameType, frame)\n    for _ in range(offset + 1):\n        parent = frame.f_back\n        if parent is None:\n            retcode = self.HEURISTIC_USED\n            break\n        parent = cast(types.FrameType, parent)\n        frame = parent\n    self.filename = frame.f_code.co_filename\n    self.lineno = cast(int, frame.f_lineno)\n    return retcode", "docstring": "Set filename and line using the caller's stack frame.\n\nIf the requested stack information is not available, a heuristic may\nbe applied and self.HEURISTIC USED will be returned.  If the heuristic\nfails then no change will be made to the filename and lineno members\n(None by default) and self.FAILURE will be returned.\n\nArgs:\noffset: Integer.  If 0, the caller's stack frame is used.  If 1,\nthe caller's caller's stack frame is used.  Larger values are\npermissible but if out-of-range (larger than the number of stack\nframes available) the outermost stack frame will be used.\n\nReturns:\nTraceableObject.SUCCESS if appropriate stack information was found,\nTraceableObject.HEURISTIC_USED if the offset was larger than the stack,\nand TraceableObject.FAILURE if the stack was empty.", "source": "github-repos"}
{"code": "def are_equal(self, mol1, mol2):\n        \n        b1 = set(self._get_bonds(mol1))\n        b2 = set(self._get_bonds(mol2))\n        return b1 == b2", "docstring": "Compare the bond table of the two molecules.\n\nArgs:\nmol1: first molecule. pymatgen Molecule object.\nmol2: second moleculs. pymatgen Molecule objec.", "source": "juraj-google-style"}
{"code": "def load_yaml_config(conf_file):\n    \n    \n    global g_config\n\n    with open(conf_file) as fp:\n        \n        g_config = util.yaml_load(fp)\n\n        \n        \n        src_dir = get_path('src_dir', None)\n        if src_dir is not None:\n            sys.path.insert(0, src_dir)\n\n        for cmd in get('commands', []):\n            _import(cmd)", "docstring": "Load a YAML configuration.\n\nThis will not update the configuration but replace it entirely.\n\nArgs:\nconf_file (str):\nPath to the YAML config. This function will not check the file name\nor extension and will just crash if the given file does not exist or\nis not a valid YAML file.", "source": "juraj-google-style"}
{"code": "def inspect_image(self, image):\n    return self._result(self._get(self._url('/images/{0}/json', image)), True)", "docstring": "Get detailed information about an image. Similar to the ``docker\ninspect`` command, but only for images.\n\nArgs:\nimage (str): The image to inspect\n\nReturns:\n(dict): Similar to the output of ``docker inspect``, but as a\nsingle dict\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def initial_value(self):\n    return self._initial_value", "docstring": "Returns the Tensor used as the initial value for the variable.\n\nNote that this is different from `initialized_value()` which runs\nthe op that initializes the variable before returning its value.\nThis method returns the tensor that is used by the op that initializes\nthe variable.\n\nReturns:\nA `Tensor`.", "source": "github-repos"}
{"code": "def __init__(self, choices=None, validator=None, **kwargs):\n    \n    self.choices = choices\n    subvalidator = validator or String()\n    self.validator = List(validator=subvalidator)\n\n    \n    for choice in self.choices:\n      subvalidator.Validate(choice)\n    super(MultiChoice, self).__init__(**kwargs)", "docstring": "Create a multichoice object and validate choices.\n\nArgs:\nchoices: list of available choices\nvalidator: validator to use for each of the list *items* the validator for\nthe top level is a list.\n**kwargs: passed through to parent class.", "source": "juraj-google-style"}
{"code": "def load_model_using_search_path(\n            self, filename, model, search_path, is_main_model=False,\n            encoding='utf8', add_to_local_models=True):\n        \n        if (model):\n            self.update_model_in_repo_based_on_filename(model)\n        for the_path in search_path:\n            full_filename = join(the_path, filename)\n            \n            if exists(full_filename):\n                the_metamodel = \\\n                    MetaModelProvider.get_metamodel(model, full_filename)\n                return self.load_model(the_metamodel,\n                                       full_filename,\n                                       is_main_model,\n                                       encoding=encoding,\n                                       add_to_local_models=add_to_local_models)\n\n        raise IOError(\n            errno.ENOENT, os.strerror(errno.ENOENT), filename)", "docstring": "add a new model to all relevant objects\n\nArgs:\nfilename: models to be loaded\nmodel: model holding the loaded models in its _tx_model_repository\nfield (may be None).\nsearch_path: list of search directories.\n\nReturns:\nthe loaded model", "source": "juraj-google-style"}
{"code": "def generate_func_call(name, args=None, kwargs=None):\n    \n    all_args = []\n    if args:\n        all_args.extend(args)\n    if kwargs:\n        all_args.extend('{}={}'.format(k, v)\n                        for k, v in kwargs if v is not None)\n    return '{}({})'.format(name, ', '.join(all_args))", "docstring": "Generates code to call a function.\n\nArgs:\nname (str): The function name.\nargs (list[str]): Each positional argument.\nkwargs (list[tuple]): Each tuple is (arg: str, value: str). If\nvalue is None, then the keyword argument is omitted. Otherwise,\nif the value is not a string, then str() is called on it.\n\nReturns:\nstr: Code to call a function.", "source": "juraj-google-style"}
{"code": "def metadata(self) -> Dict[str, Any]:\n    return self._metadata", "docstring": "Metadata of this field.\n\nMetadata is defined as a dict type, so we can add multiple annotations\nto a field.\n\nuserdata = field.metadata.get('userdata', None):\n\nReturns:\nMetadata of this field as a dict.", "source": "github-repos"}
{"code": "def eval(x):\n    return get_value(to_dense(x))", "docstring": "Evaluates the value of a variable.\n\nArgs:\nx: A variable.\n\nReturns:\nA Numpy array.\n\nExamples:\n\n>>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]),\n...                                  dtype='float32')\n>>> tf.keras.backend.eval(kvar)\narray([[1.,  2.],\n[3.,  4.]], dtype=float32)", "source": "github-repos"}
{"code": "def minimum(x1, x2, output_shape=None, name=None):\n    output_shape = convert_to_shape(output_shape)\n    with tf.name_scope(name, default_name='minimum'):\n        (x1, x2) = binary_arguments_to_tensors(x1, x2)\n        return MinMaxOperation(tf.minimum, x1, x2, output_shape=_infer_binary_broadcast_shape(x1.shape, x2.shape, output_shape)).outputs[0]", "docstring": "Binary minimum with broadcsting.\n\nArgs:\nx1: a Tensor\nx2: a Tensor\noutput_shape: an optional Shape\nname: an optional string\nReturns:\na Tensor", "source": "codesearchnet"}
{"code": "def compare_with_existing(self, region='us-east-1', onetime=False):\n        \n        pipelines = self.get_existing_pipelines()\n        pipeline_id = None\n        found = False\n        for pipeline in pipelines:\n            correct_app_and_region = (pipeline['application'] == self.app_name) and (region in pipeline['name'])\n            if onetime:\n                onetime_str = \"(onetime-{})\".format(self.environments[0])\n                if correct_app_and_region and onetime_str in pipeline['name']:\n                    found = True\n            elif correct_app_and_region:\n                found = True\n\n            if found:\n                self.log.info('Existing pipeline found - %s', pipeline['name'])\n                pipeline_id = pipeline['id']\n                break\n        else:\n            self.log.info('No existing pipeline found')\n\n        return pipeline_id", "docstring": "Compare desired pipeline with existing pipelines.\n\nArgs:\nregion (str): Region of desired pipeline.\nonetime (bool): Looks for different pipeline if Onetime\n\nReturns:\nstr: pipeline_id if existing, empty string of not.", "source": "juraj-google-style"}
{"code": "def _send_notification(self, handle, value):\n    value_len = len(value)\n    value = bytes(value)\n    payload = struct.pack(('<BHB%ds' % value_len), 255, handle, value_len, value)\n    response = self._send_command(2, 5, payload)\n    (result,) = unpack('<H', response.payload)\n    if (result != 0):\n        return (False, {'reason': 'Error code from BLED112 notifying a value', 'code': result, 'handle': handle, 'value': value})\n    return (True, None)", "docstring": "Send a notification to all connected clients on a characteristic\n\nArgs:\nhandle (int): The handle we wish to notify on\nvalue (bytearray): The value we wish to send", "source": "codesearchnet"}
{"code": "def GetMetadata(self, metadata_key='', recursive=True, timeout=None, retry=True):\n    return self._HandleMetadataUpdate(metadata_key=metadata_key, recursive=recursive, wait=False, timeout=timeout, retry=retry)", "docstring": "Retrieve the contents of metadata server for a metadata key.\n\nArgs:\nmetadata_key: string, the metadata key to watch for changes.\nrecursive: bool, True if we should recursively watch for metadata changes.\ntimeout: int, timeout in seconds for returning metadata output.\nretry: bool, True if we should retry on failure.\n\nReturns:\njson, the deserialized contents of the metadata server or None if error.", "source": "codesearchnet"}
{"code": "def __init__(self, name, pivot):\n    super(XLACompileContext, self).__init__()\n    self._name = name\n    self._name_as_bytes = compat.as_bytes(name)\n    self._unsupported_ops = []\n    self._pivot = pivot", "docstring": "Builds a new XLACompileContext.\n\nArgs:\nname: a unique name for the context, used to populate the\n`_xla_compile_id` attribute.\npivot: a pivot node. Nodes in the XLACompileContext that do not have any\ninputs will have a control dependency on the pivot node. This ensures\nthat nodes are correctly included in any enclosing control flow\ncontexts.", "source": "github-repos"}
{"code": "def get_html_content(id_: str) -> str:\n    try:\n        node = nodes.Node.from_id(id_)\n        return node.inner_html\n    except Exception as e:\n        epy.reraise(e, prefix='`ecolab.inspect` internal error. Please report an issue.\\n')", "docstring": "Returns the inner content of the block id.\n\nIs called the first time a block is expanded.\n\nArgs:\nid_: Id of the block to load\n\nReturns:\nThe html to add.", "source": "github-repos"}
{"code": "def remove(self, processor_identity):\n    with self._condition:\n        processor_types = self._identities.get(processor_identity)\n        if (processor_types is None):\n            LOGGER.warning('transaction processor with identity %s tried to unregister but was not registered', processor_identity)\n            return\n        for processor_type in processor_types:\n            if (processor_type not in self._processors):\n                LOGGER.warning('processor type %s not a known processor type but is associated with identity %s', processor_type, processor_identity)\n                continue\n            self._processors[processor_type].remove_processor(processor_identity=processor_identity)\n            if (not self._processors[processor_type]):\n                del self._processors[processor_type]", "docstring": "Removes all of the Processors for\na particular transaction processor zeromq identity.\n\nArgs:\nprocessor_identity (str): The zeromq identity of the transaction\nprocessor.", "source": "codesearchnet"}
{"code": "def log_error(cls, msg):\n        \n        cls.error_logger.error(msg)\n        cls.debug_logger.debug(msg)", "docstring": "Logs the provided error message to both the error logger and the debug logger logging\ninstances.\n\nArgs:\nmsg: `str`. The error message to log.", "source": "juraj-google-style"}
{"code": "def dist_point_line(p, l1, l2):\n    cross_prod = np.cross((l2 - l1), (p - l1))\n    return (np.linalg.norm(cross_prod) / np.linalg.norm((l2 - l1)))", "docstring": "compute the orthogonal distance between from the line that goes through\nthe points l1, l2 and the point p\n\nArgs:\np, l1, l2 : iterable\npoint\nindices 0, 1, 2 corresponding to cartesian coordinates", "source": "codesearchnet"}
{"code": "def check_column(state, name, missing_msg=None, expand_msg=None):\n    if (missing_msg is None):\n        missing_msg = \"We expected to find a column named `{{name}}` in the result of your query, but couldn't.\"\n    if (expand_msg is None):\n        expand_msg = 'Have another look at your query result. '\n    msg_kwargs = {'name': name}\n    has_result(state)\n    stu_res = state.student_result\n    sol_res = state.solution_result\n    if (name not in sol_res):\n        raise BaseException(('name %s not in solution column names' % name))\n    if (name not in stu_res):\n        _msg = state.build_message(missing_msg, fmt_kwargs=msg_kwargs)\n        state.do_test(_msg)\n    return state.to_child(append_message={'msg': expand_msg, 'kwargs': msg_kwargs}, student_result={name: stu_res[name]}, solution_result={name: sol_res[name]})", "docstring": "Zoom in on a particular column in the query result, by name.\n\nAfter zooming in on a column, which is represented as a single-column query result,\nyou can use ``has_equal_value()`` to verify whether the column in the solution query result\nmatches the column in student query result.\n\nArgs:\nname: name of the column to zoom in on.\nmissing_msg: if specified, this overrides the automatically generated feedback\nmessage in case the column is missing in the student query result.\nexpand_msg: if specified, this overrides the automatically generated feedback\nmessage that is prepended to feedback messages that are thrown\nfurther in the SCT chain.\n\n:Example:\n\nSuppose we are testing the following SELECT statements\n\n* solution: ``SELECT artist_id as id, name FROM artists``\n* student : ``SELECT artist_id, name       FROM artists``\n\nWe can write the following SCTs: ::\n\n# fails, since no column named id in student result\nEx().check_column('id')\n\n# passes, since a column named name is in student_result\nEx().check_column('name')", "source": "codesearchnet"}
{"code": "def setup_session(self, server, hooks, graph_default_context):\n        \n        if self.execution_type == \"distributed\":\n            \n            \n            session_creator = tf.train.ChiefSessionCreator(\n                scaffold=self.scaffold,\n                master=server.target,\n                config=self.session_config,\n                checkpoint_dir=None,\n                checkpoint_filename_with_path=None\n            )\n            \n            \n            \n            \n            \n            \n            \n\n            \n            self.monitored_session = tf.train.MonitoredSession(\n                session_creator=session_creator,\n                hooks=hooks,\n                stop_grace_period_secs=120  \n            )\n            \n            if self.tf_session_dump_dir != \"\":\n                self.monitored_session = DumpingDebugWrapperSession(self.monitored_session, self.tf_session_dump_dir)\n        else:\n            \n            self.monitored_session = tf.train.SingularMonitoredSession(\n                hooks=hooks,\n                scaffold=self.scaffold,\n                master='',  \n                config=self.session_config,  \n                checkpoint_dir=None\n            )\n\n        if graph_default_context:\n            graph_default_context.__exit__(None, None, None)\n        self.graph.finalize()\n\n        \n        self.monitored_session.__enter__()\n        self.session = self.monitored_session._tf_sess()", "docstring": "Creates and then enters the session for this model (finalizes the graph).\n\nArgs:\nserver (tf.train.Server): The tf.train.Server object to connect to (None for single execution).\nhooks (list): A list of (saver, summary, etc..) hooks to be passed to the session.\ngraph_default_context: The graph as_default() context that we are currently in.", "source": "juraj-google-style"}
{"code": "def add_custom_column_spec(self, spec: ColumnSpec) -> 'ColumnSpecsBuilder':\n    self._specs.append(spec)\n    return self", "docstring": "Add a custom :class:`.ColumnSpec` to the builder.\n\nUse this method when you need complete control over the :class:`.ColumnSpec`\n, including custom value extraction and type handling.\n\nArgs:\nspec: A :class:`.ColumnSpec` instance defining the column name, type,\nvalue extraction, and optional SQL type casting.\n\nReturns:\nSelf for method chaining\n\nExamples:\nCustom text column from chunk metadata:\n\n>>> builder.add_custom_column_spec(\n...     ColumnSpec.text(\n...         name=\"source_and_id\",\n...         value_fn=lambda chunk:         ...             f\"{chunk.metadata.get('source')}_{chunk.id}\"\n...     )\n... )", "source": "github-repos"}
{"code": "def load(self, fobj, index=None):\n        \n        if index is None:\n            index = self._get_tab_index()\n        page = self.pages[index]\n\n        if fobj is None:\n            return\n\n\n        if not isinstance(fobj, tuple(page.clss_load)):\n            raise RuntimeError('Object to load must be in {0!s} (not a {1!s})'.format(\n             [x.__name__ for x in page.clss_load], fobj.__class__.__name__))\n\n        page.editor.load(fobj)\n        self._update_gui_text_tabs()", "docstring": "Loads given DataFile object. **tolerant with None**\n\nArgs:\nfobj: object of one of accepted classes\nindex: tab index to load fobj into. If not passed, loads into current tab", "source": "juraj-google-style"}
{"code": "def horizontal_infrared_radiation_intensity(self, value=9999.0):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `horizontal_infrared_radiation_intensity`'.format(value))\n        if (value < 0.0):\n            raise ValueError('value need to be greater or equal 0.0 for field `horizontal_infrared_radiation_intensity`')\n    self._horizontal_infrared_radiation_intensity = value", "docstring": "Corresponds to IDD Field `horizontal_infrared_radiation_intensity`\n\nArgs:\nvalue (float): value for IDD Field `horizontal_infrared_radiation_intensity`\nUnit: Wh/m2\nvalue >= 0.0\nMissing value: 9999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def run(self, circuit):\n    name = circuit.name\n    dag = circuit_to_dag(circuit)\n    del circuit\n    for passset in self.working_list:\n        for pass_ in passset:\n            dag = self._do_pass(pass_, dag, passset.options)\n    circuit = dag_to_circuit(dag)\n    circuit.name = name\n    return circuit", "docstring": "Run all the passes on a QuantumCircuit\n\nArgs:\ncircuit (QuantumCircuit): circuit to transform via all the registered passes\n\nReturns:\nQuantumCircuit: Transformed circuit.", "source": "codesearchnet"}
{"code": "def __init__(self, func, name=None, indexed=None,\n               repeated=None, verbose_name=None):\n    \n    super(ComputedProperty, self).__init__(name=name, indexed=indexed,\n                                           repeated=repeated,\n                                           verbose_name=verbose_name)\n    self._func = func", "docstring": "Constructor.\n\nArgs:\nfunc: A function that takes one argument, the model instance, and returns\na calculated value.", "source": "juraj-google-style"}
{"code": "def modified_files(root, tracked_only=False, commit=None):\n    assert os.path.isabs(root), ('Root has to be absolute, got: %s' % root)\n    command = ['hg', 'status']\n    if commit:\n        command.append(('--change=%s' % commit))\n    status_lines = subprocess.check_output(command).decode('utf-8').split(os.linesep)\n    modes = ['M', 'A']\n    if (not tracked_only):\n        modes.append('\\\\?')\n    modes_str = '|'.join(modes)\n    modified_file_status = utils.filter_lines(status_lines, ('(?P<mode>%s) (?P<filename>.+)' % modes_str), groups=('filename', 'mode'))\n    return dict(((os.path.join(root, filename), mode) for (filename, mode) in modified_file_status))", "docstring": "Returns a list of files that has been modified since the last commit.\n\nArgs:\nroot: the root of the repository, it has to be an absolute path.\ntracked_only: exclude untracked files when True.\ncommit: SHA1 of the commit. If None, it will get the modified files in the\nworking copy.\n\nReturns: a dictionary with the modified files as keys, and additional\ninformation as value. In this case it adds the status returned by\nhg status.", "source": "codesearchnet"}
{"code": "def parse_config_file(config_file, skip_unknown=False):\n  \n  for reader, existence_check in _FILE_READERS:\n    if existence_check(config_file):\n      with reader(config_file) as f:\n        parse_config(f, skip_unknown=skip_unknown)\n        return\n  raise IOError('Unable to open file: {}'.format(config_file))", "docstring": "Parse a Gin config file.\n\nArgs:\nconfig_file: The path to a Gin config file.\nskip_unknown: A boolean indicating whether unknown configurables and imports\nshould be skipped instead of causing errors (alternatively a list of\nconfigurable names to skip if unknown). See `parse_config` for additional\ndetails.\n\nRaises:\nIOError: If `config_file` cannot be read using any register file reader.", "source": "juraj-google-style"}
{"code": "def ParseOptions(self, options):\n    \n    self._ParseInformationalOptions(options)\n\n    self._verbose = getattr(options, 'verbose', False)\n\n    self._output_filename = getattr(options, 'write', None)\n\n    argument_helper_names = ['process_resources', 'storage_file']\n    helpers_manager.ArgumentHelperManager.ParseOptions(\n        options, self, names=argument_helper_names)\n\n    \n    if not self._storage_file_path:\n      raise errors.BadConfigOption('Missing storage file option.')\n\n    if not os.path.isfile(self._storage_file_path):\n      raise errors.BadConfigOption(\n          'No such storage file: {0:s}.'.format(self._storage_file_path))\n\n    compare_storage_file_path = self.ParseStringOption(\n        options, 'compare_storage_file')\n    if compare_storage_file_path:\n      if not os.path.isfile(compare_storage_file_path):\n        raise errors.BadConfigOption(\n            'No such storage file: {0:s}.'.format(compare_storage_file_path))\n\n      self._compare_storage_file_path = compare_storage_file_path\n      self.compare_storage_information = True\n\n    self._output_format = self.ParseStringOption(options, 'output_format')\n\n    if self._output_filename:\n      if os.path.exists(self._output_filename):\n        raise errors.BadConfigOption(\n            'Output file already exists: {0:s}.'.format(self._output_filename))\n      output_file_object = open(self._output_filename, 'wb')\n      self._output_writer = tools.FileObjectOutputWriter(output_file_object)\n\n    self._EnforceProcessMemoryLimit(self._process_memory_limit)", "docstring": "Parses the options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "juraj-google-style"}
{"code": "def genome_name_from_fasta_path(fasta_path):\n    \n    filename = os.path.basename(fasta_path)\n    return re.sub(r'(\\.fa$)|(\\.fas$)|(\\.fasta$)|(\\.fna$)|(\\.\\w{1,}$)', '', filename)", "docstring": "Extract genome name from fasta filename\n\nGet the filename without directory and remove the file extension.\n\nExample:\nWith fasta file path ``/path/to/genome_1.fasta``::\n\nfasta_path = '/path/to/genome_1.fasta'\ngenome_name = genome_name_from_fasta_path(fasta_path)\nprint(genome_name)\n# => \"genome_1\"\n\nArgs:\nfasta_path (str): fasta file path\n\nReturns:\nstr: genome name", "source": "juraj-google-style"}
{"code": "async def remove(self, *, node_id: str, force: bool=False) -> Mapping[(str, Any)]:\n    params = {'force': force}\n    response = (await self.docker._query_json('nodes/{node_id}'.format(node_id=node_id), method='DELETE', params=params))\n    return response", "docstring": "Remove a node from a swarm.\n\nArgs:\nnode_id: The ID or name of the node", "source": "codesearchnet"}
{"code": "def __init__(self, options):\n        \n        self._options = options\n        self._tasks = {}\n        self._task_lock = threading.RLock()", "docstring": "Constructor.\n\nArgs:\noptions (gax.BundleOptions): configures strategy this instance\nuses when executing bundled functions.", "source": "juraj-google-style"}
{"code": "def decompress_decoder_2d(x, hparams, name=None):\n  \n  return decompress_decoder(x, hparams,\n                            strides=(2, 2),\n                            kernel=(hparams.kernel_size, hparams.kernel_size),\n                            name=name)", "docstring": "Decoder that decompresses 2-D inputs by 2**num_compress_steps.\n\nArgs:\nx: Tensor of shape [batch, compress_height, compress_width, channels].\nhparams: HParams.\nname: string, variable scope.\n\nReturns:\nTensor of shape [batch, height, width, hparams.hidden_size].", "source": "juraj-google-style"}
{"code": "def _NormalizedVolumeIdentifiers(\n      self, volume_system, volume_identifiers, prefix='v'):\n    \n    normalized_volume_identifiers = []\n    for volume_identifier in volume_identifiers:\n      if isinstance(volume_identifier, int):\n        volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier)\n\n      elif not volume_identifier.startswith(prefix):\n        try:\n          volume_identifier = int(volume_identifier, 10)\n          volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier)\n        except (TypeError, ValueError):\n          pass\n\n      try:\n        volume = volume_system.GetVolumeByIdentifier(volume_identifier)\n      except KeyError:\n        volume = None\n\n      if not volume:\n        raise errors.ScannerError(\n            'Volume missing for identifier: {0:s}.'.format(volume_identifier))\n\n      normalized_volume_identifiers.append(volume_identifier)\n\n    return normalized_volume_identifiers", "docstring": "Normalizes volume identifiers.\n\nArgs:\nvolume_system (VolumeSystem): volume system.\nvolume_identifiers (list[int|str]): allowed volume identifiers, formatted\nas an integer or string with prefix.\nprefix (Optional[str]): volume identifier prefix.\n\nReturns:\nlist[str]: volume identifiers with prefix.\n\nRaises:\nScannerError: if the volume identifier is not supported or no volume\ncould be found that corresponds with the identifier.", "source": "juraj-google-style"}
{"code": "def get_device_name():\n    return context().device_name", "docstring": "Get the device name for the current thread.\n\nReturns:\nThe device name for the current thread.", "source": "github-repos"}
{"code": "def extension_to_message(extension: message.Message, message_cls: Type[_T]) -> _T:\n    msg = message_cls()\n    add_extension_to_message(extension, msg)\n    return msg", "docstring": "Serializes a provided FHIR extension into a message of type message_cls.\n\nThis function is a convenience wrapper around add_extension_to_message.\n\nArgs:\nextension: The FHIR extension to serialize.\nmessage_cls: The type of protobuf message to serialize extension to.\n\nReturns:\nA message of type message_cls.", "source": "github-repos"}
{"code": "def GetMessages(self, formatter_mediator, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    event_values = event.CopyToDict()\n\n    event = event_values.get('event', None)\n    if event:\n      event_values['event_map'] = self.EVENT_NAMES.get(event, 'Unknown')\n\n    category = event_values.get('cat', None)\n    if category:\n      event_values['category_map'] = self.CATEGORY_NAMES.get(\n          category, 'Unknown')\n\n    action = event_values.get('action0', None)\n    if action:\n      event_values['action0_map'] = self.ACTION_0_NAMES.get(action, 'Unknown')\n\n    action = event_values.get('action1', None)\n    if action:\n      event_values['action1_map'] = self.ACTION_1_2_NAMES.get(\n          action, 'Unknown')\n\n    action = event_values.get('action2', None)\n    if action:\n      event_values['action2_map'] = self.ACTION_1_2_NAMES.get(\n          action, 'Unknown')\n\n    return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def convert_dict_to_compatible_tensor(values, targets):\n    result = {}\n    for (key, value) in sorted(values.items()):\n        result[key] = _convert_to_compatible_tensor(value, targets[key], error_prefix=(\"Can't convert %r\" % key))\n    return result", "docstring": "Converts dict `values` in tensors that are compatible with `targets`.\n\nArgs:\nvalues: A dict to objects to convert with same keys as `targets`.\ntargets: A dict returned by `parse_tensor_info_map`.\n\nReturns:\nA map with the same keys as `values` but values converted into\nTensor/SparseTensors that can be fed into `protomap`.\n\nRaises:\nTypeError: If it fails to convert.", "source": "codesearchnet"}
{"code": "def extract_report_spec(service, label_is_supported=label_descriptor.KnownLabels.is_supported, metric_is_supported=metric_descriptor.KnownMetrics.is_supported):\n    resource_descs = service.monitoredResources\n    labels_dict = {}\n    logs = set()\n    if service.logging:\n        logs = _add_logging_destinations(service.logging.producerDestinations, resource_descs, service.logs, labels_dict, label_is_supported)\n    metrics_dict = {}\n    monitoring = service.monitoring\n    if monitoring:\n        for destinations in (monitoring.consumerDestinations, monitoring.producerDestinations):\n            _add_monitoring_destinations(destinations, resource_descs, service.metrics, metrics_dict, metric_is_supported, labels_dict, label_is_supported)\n    return (logs, metrics_dict.keys(), labels_dict.keys())", "docstring": "Obtains the used logs, metrics and labels from a service.\n\nlabel_is_supported and metric_is_supported are filter functions used to\ndetermine if label_descriptors or metric_descriptors found in the service\nare supported.\n\nArgs:\nservice (:class:`endpoints_management.gen.servicecontrol_v1_messages.Service`):\na service instance\nlabel_is_supported (:func): determines if a given label is supported\nmetric_is_supported (:func): determines if a given metric is supported\n\nReturn:\ntuple: (\nlogs (set[string}), # the logs to report to\nmetrics (list[string]), # the metrics to use\nlabels (list[string]) # the labels to add\n)", "source": "codesearchnet"}
{"code": "def remove_handler(self, name):\n    index = None\n    for (i, h) in enumerate(self.capture_handlers):\n        if (h['name'] == name):\n            index = i\n    if (index is not None):\n        self.capture_handlers[index]['logger'].close()\n        del self.capture_handlers[index]", "docstring": "Remove a handler given a name\n\nNote, if multiple handlers have the same name the last matching\ninstance in the handler list will be removed.\n\nArgs:\nname:\nThe name of the handler to remove", "source": "codesearchnet"}
{"code": "def from_string(cls, string, format_=None, fps=None, **kwargs):\n    fp = io.StringIO(string)\n    return cls.from_file(fp, format_, fps=fps, **kwargs)", "docstring": "Load subtitle file from string.\n\nSee :meth:`SSAFile.load()` for full description.\n\nArguments:\nstring (str): Subtitle file in a string. Note that the string\nmust be Unicode (in Python 2).\n\nReturns:\nSSAFile\n\nExample:\n>>> text = '''\n... 1\n... 00:00:00,000 --> 00:00:05,000\n... An example SubRip file.\n... '''\n>>> subs = SSAFile.from_string(text)", "source": "codesearchnet"}
{"code": "def get_block(self, parent, config='running_config'):\n        \n        try:\n            parent = r'^%s$' % parent\n            return self.node.section(parent, config=config)\n        except TypeError:\n            return None", "docstring": "Scans the config and returns a block of code\n\nArgs:\nparent (str): The parent string to search the config for and\nreturn the block\nconfig (str): A text config string to be searched. Default\nis to search the running-config of the Node.\n\nReturns:\nA string object that represents the block from the config.  If\nthe parent string is not found, then this method will\nreturn None.", "source": "juraj-google-style"}
{"code": "def _mark_func_graph_as_unsaveable(graph, learning_phase):\n    if graph.building_function and is_placeholder(learning_phase):\n        graph.mark_as_unsaveable('The keras learning phase placeholder was used inside a function. Exporting placeholders is not supported when saving out a SavedModel. Please call `tf.keras.backend.set_learning_phase(0)` in the function to set the learning phase to a constant value.')", "docstring": "Mark func graph as unsaveable due to use of symbolic keras learning phase.\n\nFunctions that capture the symbolic learning phase cannot be exported to\nSavedModel. Mark the funcgraph as unsaveable, so that an error will be raised\nif it is exported.\n\nArgs:\ngraph: Graph or FuncGraph object.\nlearning_phase: Learning phase placeholder or int defined in the graph.", "source": "github-repos"}
{"code": "def convert_bbox_yolo_to_pascal(boxes: torch.Tensor, image_size: tuple[int, int]) -> torch.Tensor:\n    boxes = center_to_corners_format(boxes)\n    height, width = image_size\n    boxes = boxes * torch.tensor([[width, height, width, height]])\n    return boxes", "docstring": "Convert bounding boxes from YOLO format (x_center, y_center, width, height) in range [0, 1]\nto Pascal VOC format (x_min, y_min, x_max, y_max) in absolute coordinates.\n\nArgs:\nboxes (torch.Tensor): Bounding boxes in YOLO format\nimage_size (Tuple[int, int]): Image size in format (height, width)\n\nReturns:\ntorch.Tensor: Bounding boxes in Pascal VOC format (x_min, y_min, x_max, y_max)", "source": "github-repos"}
{"code": "def _ReadFormatDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):\n    if is_member:\n        error_message = 'data type not supported as member'\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    definition_object = self._ReadLayoutDataTypeDefinition(definitions_registry, definition_values, data_types.FormatDefinition, definition_name, self._SUPPORTED_DEFINITION_VALUES_FORMAT)\n    definition_object.metadata = definition_values.get('metadata', {})\n    attributes = definition_values.get('attributes', None)\n    if attributes:\n        unsupported_attributes = set(attributes.keys()).difference(self._SUPPORTED_ATTRIBUTES_FORMAT)\n        if unsupported_attributes:\n            error_message = 'unsupported attributes: {0:s}'.format(', '.join(unsupported_attributes))\n            raise errors.DefinitionReaderError(definition_name, error_message)\n        byte_order = attributes.get('byte_order', definitions.BYTE_ORDER_NATIVE)\n        if (byte_order not in definitions.BYTE_ORDERS):\n            error_message = 'unsupported byte-order attribute: {0!s}'.format(byte_order)\n            raise errors.DefinitionReaderError(definition_name, error_message)\n        definition_object.byte_order = byte_order\n    return definition_object", "docstring": "Reads a format data type definition.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\ndefinition_values (dict[str, object]): definition values.\ndefinition_name (str): name of the definition.\nis_member (Optional[bool]): True if the data type definition is a member\ndata type definition.\n\nReturns:\nFormatDefinition: format definition.\n\nRaises:\nDefinitionReaderError: if the definitions values are missing or if\nthe format is incorrect.", "source": "codesearchnet"}
{"code": "def GetIndentLevel(line):\n  \n  indent = Match(r'^( *)\\S', line)\n  if indent:\n    return len(indent.group(1))\n  else:\n    return 0", "docstring": "Return the number of leading spaces in line.\n\nArgs:\nline: A string to check.\n\nReturns:\nAn integer count of leading spaces, possibly zero.", "source": "juraj-google-style"}
{"code": "def add_payload(self, key, val, append=False):\n        \n        self._request.add_payload(key, val, append)", "docstring": "Add a key value pair to payload for this request.\n\n.. Note:: For ``_search`` you can pass a search argument. (e.g. _search?summary=1.1.1.1).\n\nArgs:\nkey (string): The payload key\nval (string): The payload value\nappend (bool): Indicate whether the value should be appended", "source": "juraj-google-style"}
{"code": "def to_dict(self):\n    return dict(addr=self.addr, protocol=self.protocol, weight=self.weight, last_checked=self.last_checked)", "docstring": "convert detailed proxy info into a dict\n\nReturns:\ndict: A dict with four keys: ``addr``, ``protocol``,\n``weight`` and ``last_checked``", "source": "codesearchnet"}
{"code": "def add_redistribution(self, protocol, route_map_name=None):\n        \n        protocols = ['bgp', 'rip', 'static', 'connected']\n        if protocol not in protocols:\n            raise ValueError('redistributed protocol must be'\n                             'bgp, connected, rip or static')\n        if route_map_name is None:\n            cmd = 'redistribute {}'.format(protocol)\n        else:\n            cmd = 'redistribute {} route-map {}'.format(protocol,\n                                                        route_map_name)\n        return self.configure_ospf(cmd)", "docstring": "Adds a protocol redistribution to OSPF\n\nArgs:\nprotocol (str):  protocol to redistribute\nroute_map_name (str): route-map to be used to\nfilter the protocols\nReturns:\nbool: True if the command completes successfully\nException:\nValueError:  This will be raised if the protocol pass is not one\nof the following: [rip, bgp, static, connected]", "source": "juraj-google-style"}
{"code": "def reorder(miz_file_path: typing.Union[str, Path],\n                target_dir: typing.Union[str, Path],\n                skip_options_file: bool,\n                ):\n        \n\n        miz_file_path = Path(miz_file_path).absolute()\n        if not miz_file_path.exists():\n            raise FileNotFoundError(miz_file_path)\n        if not miz_file_path.is_file():\n            raise ValueError(f'not a file: {miz_file_path}')\n\n        target_dir_path = Path(target_dir).absolute()\n        if not target_dir_path.exists():\n            target_dir_path.mkdir(parents=True)\n        else:\n            if not target_dir_path.is_dir():\n                raise ValueError(f'not a directory: {target_dir_path}')\n\n        LOGGER.debug('re-ordering miz file: %s', miz_file_path)\n        LOGGER.debug('destination folder: %s', target_dir)\n        LOGGER.debug('%s option file', \"skipping\" if skip_options_file else \"including\")\n\n        if not target_dir_path.exists():\n            LOGGER.debug('creating directory %s', target_dir_path)\n            target_dir_path.mkdir(exist_ok=True)\n\n        Miz._do_reorder(miz_file_path, skip_options_file, target_dir_path)", "docstring": "Re-orders a miz file into a folder (flattened)\n\nArgs:\nmiz_file_path: source miz file\ntarget_dir: folder to flatten the content into\nskip_options_file: do not re-order option file", "source": "juraj-google-style"}
{"code": "def get_interpolated_value(self, energy, integrated=False):\n        \n        inter = {}\n        for spin in self.cohp:\n            if not integrated:\n                inter[spin] = get_linear_interpolated_value(self.energies,\n                                                            self.cohp[spin],\n                                                            energy)\n            elif self.icohp is not None:\n                inter[spin] = get_linear_interpolated_value(self.energies,\n                                                            self.icohp[spin],\n                                                            energy)\n            else:\n                raise ValueError(\"ICOHP is empty.\")\n        return inter", "docstring": "Returns the COHP for a particular energy.\n\nArgs:\nenergy: Energy to return the COHP value for.", "source": "juraj-google-style"}
{"code": "def report_file(config, auth, report_id=None, name=None, timeout=60, chunksize=DBM_CHUNKSIZE):\n    storage_path = report_fetch(config, auth, report_id, name, timeout)\n    if storage_path == False:\n        return (None, None)\n    elif storage_path == True:\n        return ('report_running.csv', None)\n    else:\n        filename = RE_FILENAME.search(storage_path).groups(0)[0]\n        if chunksize:\n            if config.verbose:\n                print('REPORT FILE STREAM:', storage_path)\n            return (filename, response_utf8_stream(urlopen(storage_path), chunksize))\n        else:\n            if config.verbose:\n                print('REPORT FILE SINGLE:', storage_path)\n            return (filename, urlopen(storage_path).read().decode('UTF-8'))", "docstring": "Retrieves most recent DBM file by name or ID, if in progress, waits for it to complete.\n\nTimeout is in minutes ( retries will happen at 1 minute interval, default\ntotal time is 60 minutes )\nIf chunksize is set to None then the whole file is downloaded at once.\n\nArgs:\n* auth: (string) Either user or service.\n* report_id: (int) ID of DCm report to fetch ( either or name ).\n* name: (string) Name of report to fetch ( either or report_id ).\n* timeout: (int) Minutes to wait for in progress report before giving up.\n* chunksize: (int) number of bytes to download at a time, for memory\nconstrained systems.\n\nReturns:\n* (filename, iterator) if file exists and is ready to download in chunks.\n* (filename, file) if file exists and chunking is off.\n* ('report_running.csv', None) if report is in progress.\n* (None, None) if file does not exist.", "source": "github-repos"}
{"code": "def market_exact(self, session, start_time: str, end_time: str) -> Session:\n        \n        if session not in self.exch: return SessNA\n        ss = self.exch[session]\n\n        same_day = ss[0] < ss[-1]\n\n        if not start_time: s_time = ss[0]\n        else:\n            s_time = param.to_hour(start_time)\n            if same_day: s_time = max(s_time, ss[0])\n\n        if not end_time: e_time = ss[-1]\n        else:\n            e_time = param.to_hour(end_time)\n            if same_day: e_time = min(e_time, ss[-1])\n\n        if same_day and (s_time > e_time): return SessNA\n        return Session(start_time=s_time, end_time=e_time)", "docstring": "Explicitly specify start time and end time\n\nArgs:\nsession: predefined session\nstart_time: start time in terms of HHMM string\nend_time: end time in terms of HHMM string\n\nReturns:\nSession of start_time and end_time", "source": "juraj-google-style"}
{"code": "def _GetFileByPath(self, key_path_upper):\n    (key_path_prefix, registry_file) = self._GetCachedFileByPath(key_path_upper)\n    if (not registry_file):\n        for mapping in self._GetFileMappingsByPath(key_path_upper):\n            try:\n                registry_file = self._OpenFile(mapping.windows_path)\n            except IOError:\n                registry_file = None\n            if (not registry_file):\n                continue\n            if (not key_path_prefix):\n                key_path_prefix = mapping.key_path_prefix\n            self.MapFile(key_path_prefix, registry_file)\n            key_path_prefix = key_path_prefix.upper()\n            break\n    return (key_path_prefix, registry_file)", "docstring": "Retrieves a Windows Registry file for a specific path.\n\nArgs:\nkey_path_upper (str): Windows Registry key path, in upper case with\na resolved root key alias.\n\nReturns:\ntuple: consists:\n\nstr: upper case key path prefix\nWinRegistryFile: corresponding Windows Registry file or None if not\navailable.", "source": "codesearchnet"}
{"code": "def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:\n    return self._get_dataloader(dataset=test_dataset, description='test', batch_size=self.args.eval_batch_size, sampler_fn=self._get_eval_sampler)", "docstring": "Returns the test [`~torch.utils.data.DataLoader`].\n\nSubclass and override this method if you want to inject some custom behavior.\n\nArgs:\ntest_dataset (`torch.utils.data.Dataset`, *optional*):\nThe test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the\n`model.forward()` method are automatically removed. It must implement `__len__`.", "source": "github-repos"}
{"code": "def create_nanopubs_fh(output_fn: str):\n    (json_flag, jsonl_flag, yaml_flag) = (False, False, False)\n    if output_fn:\n        if re.search('gz$', output_fn):\n            out_fh = gzip.open(output_fn, 'wt')\n        else:\n            out_fh = click.open_file(output_fn, mode='wt')\n        if re.search('ya?ml', output_fn):\n            yaml_flag = True\n        elif (('jsonl' in output_fn) or ('-' == output_fn)):\n            jsonl_flag = True\n        elif ('json' in output_fn):\n            json_flag = True\n    else:\n        out_fh = sys.stdout\n    return (out_fh, yaml_flag, jsonl_flag, json_flag)", "docstring": "Create Nanopubs output filehandle\n\n\\b\nIf output fn is '-' will write JSONlines to STDOUT\nIf output fn has *.gz, will written as a gzip file\nIf output fn has *.jsonl*, will written as a JSONLines file\nIF output fn has *.json*, will be written as a JSON file\nIf output fn has *.yaml* or *.yml*,  will be written as a YAML file\n\nArgs:\noutput_fn: Name of output file\n\nReturns:\n(filehandle, yaml_flag, jsonl_flag, json_flag)", "source": "codesearchnet"}
{"code": "def add_info_field(self, field):\n        \n        if field in self.info_dict:\n            msg = \"New info field [{}] already exists.\".format(field)\n            raise KeyError(msg)\n\n        if \"=\" in field:\n            key, value = field.split(\"=\")\n            self.info_dict[key] = value\n        else:\n            self.info_dict[field] = field\n\n        self._join_info_fields()", "docstring": "Adds new info field (flag or key=value pair).\n\nArgs:\nfield: String flag (e.g. \"SOMATIC\") or key-value (\"NEW_DP=42\")\n\nRaises:\nKeyError: if info field already exists", "source": "juraj-google-style"}
{"code": "def AddEventData(self, event_data):\n    \n    self._RaiseIfNotWritable()\n\n    event_data = self._PrepareAttributeContainer(event_data)\n\n    identifier = event_data.GetIdentifier()\n    lookup_key = identifier.CopyToString()\n    self._event_data[lookup_key] = event_data", "docstring": "Adds event data.\n\nArgs:\nevent_data (EventData): event data.\n\nRaises:\nIOError: when the storage writer is closed.\nOSError: when the storage writer is closed.", "source": "juraj-google-style"}
{"code": "def getUrlMeta(self, url):\n    return self.conn('GET', SkypeConnection.API_URL, params={'url': url}, auth=SkypeConnection.Auth.Authorize).json()", "docstring": "Retrieve various metadata associated with a URL, as seen by Skype.\n\nArgs:\nurl (str): address to ping for info\n\nReturns:\ndict: metadata for the website queried", "source": "codesearchnet"}
{"code": "def _apply_merge_op_and_or_mask(self, op_fn, inputs):\n    output = None\n    output_mask = None\n    for x in inputs:\n        mask = backend.get_keras_mask(x)\n        if mask is not None:\n            mask = ops.broadcast_to(ops.expand_dims(mask, -1), ops.shape(x))\n        if output is None:\n            output = x\n            output_mask = mask\n            continue\n        if mask is not None:\n            x = ops.where(mask, x, output)\n        if output_mask is not None:\n            output = ops.where(output_mask, output, x)\n        if mask is not None and output_mask is not None:\n            output_mask = ops.logical_or(output_mask, mask)\n        else:\n            output_mask = None\n        output = op_fn(output, x)\n    if output_mask is not None:\n        output_mask = ops.any(output_mask, axis=-1, keepdims=False)\n        backend.set_keras_mask(output, output_mask)\n    return output", "docstring": "Merge a set of inputs by applying `op_fn` and ORing the masks.\n\nWe use this for `Minimum` and `Maximum` as it handles the fact that\nthere is no identity element. If applicable, the mask obtained by ORing\nall masks is set on the output.\n\nArgs:\nop_fn: binary operation to apply to tensor pair.\ninputs: array of tensors to apply operation on.", "source": "github-repos"}
{"code": "def get_dos(self, partial_dos=False, npts_mu=10000, T=None):\n        \n        spin = self.data.spin if isinstance(self.data.spin,int) else 1\n\n        energies, densities, vvdos, cdos = BL.BTPDOS(self.eband, self.vvband, npts=npts_mu)\n        if T is not None:\n            densities = BL.smoothen_DOS(energies, densities, T)\n\n        tdos = Dos(self.efermi / units.eV, energies / units.eV,\n                   {Spin(spin): densities})\n\n        if partial_dos:\n            tdos = self.get_partial_doses(tdos=tdos, npts_mu=npts_mu, T=T)\n\n        return tdos", "docstring": "Return a Dos object interpolating bands\n\nArgs:\npartial_dos: if True, projections will be interpolated as well\nand partial doses will be return. Projections must be available\nin the loader.\nnpts_mu: number of energy points of the Dos\nT: parameter used to smooth the Dos", "source": "juraj-google-style"}
{"code": "def get_pattern_link_topattern(self, patternnumber):\n        \n        _checkPatternNumber(patternnumber)\n\n        address = _calculateRegisterAddress('linkpattern', patternnumber)\n        return self.read_register(address)", "docstring": "Get the 'linked pattern' value for a given pattern.\n\nArgs:\npatternnumber (integer): From 0-7\n\nReturns:\nThe 'linked pattern' value (int).", "source": "juraj-google-style"}
{"code": "def cache_file(symbol, func, has_date, root, date_type='date'):\n    \n    cur_mod = sys.modules[func.__module__]\n    data_tz = getattr(cur_mod, 'DATA_TZ') if hasattr(cur_mod, 'DATA_TZ') else 'UTC'\n    cur_dt = utils.cur_time(typ=date_type, tz=data_tz, trading=False)\n\n    if has_date:\n        if hasattr(cur_mod, 'FILE_WITH_DATE'):\n            file_fmt = getattr(cur_mod, 'FILE_WITH_DATE')\n        else:\n            file_fmt = '{root}/{typ}/{symbol}/{cur_dt}.parq'\n    else:\n        if hasattr(cur_mod, 'FILE_NO_DATE'):\n            file_fmt = getattr(cur_mod, 'FILE_NO_DATE')\n        else:\n            file_fmt = '{root}/{typ}/{symbol}.parq'\n\n    return data_file(\n        file_fmt=file_fmt, root=root, cur_dt=cur_dt, typ=func.__name__, symbol=symbol\n    )", "docstring": "Data file\n\nArgs:\nsymbol: symbol\nfunc: use function to categorize data\nhas_date: contains date in data file\nroot: root path\ndate_type: parameters pass to utils.cur_time, [date, time, time_path, ...]\n\nReturns:\nstr: date file", "source": "juraj-google-style"}
{"code": "def request_and_check(self, url, method='get', expected_content_type=None, **kwargs):\n    assert (method in ['get', 'post'])\n    result = self.driver.request(method, url, **kwargs)\n    if (result.status_code != requests.codes.ok):\n        raise RuntimeError(('Error requesting %r, status = %d' % (url, result.status_code)))\n    if (expected_content_type is not None):\n        content_type = result.headers.get('content-type', '')\n        if (not re.match(expected_content_type, content_type)):\n            raise RuntimeError(('Error requesting %r, content type %r does not match %r' % (url, content_type, expected_content_type)))\n    return result", "docstring": "Performs a request, and checks that the status is OK, and that the\ncontent-type matches expectations.\n\nArgs:\nurl: URL to request\nmethod: either 'get' or 'post'\nexpected_content_type: prefix to match response content-type against\n**kwargs: passed to the request method directly.\n\nRaises:\nRuntimeError if status_code does not match.", "source": "codesearchnet"}
{"code": "def initialize_logger(debug):\n    level = (logging.DEBUG if debug else logging.INFO)\n    logger = logging.getLogger('cucco')\n    logger.setLevel(level)\n    formatter = logging.Formatter('%(asctime)s %(levelname).1s %(message)s')\n    console_handler = logging.StreamHandler()\n    console_handler.setLevel(level)\n    console_handler.setFormatter(formatter)\n    logger.addHandler(console_handler)\n    return logger", "docstring": "Set up logger to be used by the library.\n\nArgs:\ndebug: Wheter to use debug level or not.\n\nReturns:\nA logger ready to be used.", "source": "codesearchnet"}
{"code": "def process_configs(file_lookup, app_config_format, pipeline_config):\n    app_configs = collections.defaultdict(dict)\n    for env in ENVS:\n        file_json = app_config_format.format(env=env)\n        try:\n            env_config = file_lookup.json(filename=file_json)\n            app_configs[env] = apply_region_configs(env_config)\n        except FileNotFoundError:\n            LOG.critical('Application configuration not available for %s.', env)\n            continue\n    try:\n        app_configs['pipeline'] = file_lookup.json(filename=pipeline_config)\n    except FileNotFoundError:\n        LOG.warning('Unable to process pipeline.json. Using defaults.')\n        app_configs['pipeline'] = {'env': ['stage', 'prod']}\n    LOG.debug('Application configs:\\n%s', app_configs)\n    return app_configs", "docstring": "Processes the configs from lookup sources.\n\nArgs:\nfile_lookup (FileLookup): Source to look for file/config\napp_config_format (str): The format for application config files.\npipeline_config (str): Name/path of the pipeline config\n\nReturns:\ndict: Retreived application config", "source": "codesearchnet"}
{"code": "def lit(literal: Sequence[Input], *literals: Sequence[Sequence[Input]]) -> Parser:\n    if (len(literals) > 0):\n        return AlternativeParser(options.handle_literal(literal), *map(options.handle_literal, literals))\n    else:\n        return options.handle_literal(literal)", "docstring": "Match a literal sequence.\n\nIn the `TextParsers`` context, this matches the literal string\nprovided. In the ``GeneralParsers`` context, this matches a sequence of\ninput.\n\nIf multiple literals are provided, they are treated as alternatives. e.g.\n``lit('+', '-')`` is the same as ``lit('+') | lit('-')``.\n\nArgs:\nliteral: A literal to match\n*literals: Alternative literals to match\n\nReturns:\nA ``LiteralParser`` in the ``GeneralContext``, a ``LiteralStringParser``\nin the ``TextParsers`` context, and an ``AlternativeParser`` if multiple\narguments are provided.", "source": "codesearchnet"}
{"code": "def list(name, default=None, allow_none=False, fallback=None, separator=','):\n    value = read(name, default, allow_none, fallback=fallback)\n    if isinstance(value, builtins.list):\n        return value\n    elif isinstance(value, builtins.str):\n        return _str_to_list(value, separator)\n    elif ((value is None) and allow_none):\n        return None\n    else:\n        return [builtins.str(value)]", "docstring": "Get a list of strings or the default.\n\nThe individual list elements are whitespace-stripped.\n\nArgs:\nname: The environment variable name\ndefault: The default value to use if no environment variable is found\nallow_none: If the return value can be `None` (i.e. optional)\nseparator: The list item separator character or pattern", "source": "codesearchnet"}
{"code": "def validate_and_copy_one_submission(self, submission_path):\n    if os.path.exists(self.download_dir):\n        shutil.rmtree(self.download_dir)\n    os.makedirs(self.download_dir)\n    if os.path.exists(self.validate_dir):\n        shutil.rmtree(self.validate_dir)\n    os.makedirs(self.validate_dir)\n    logging.info(((('\\n' + ('\n    local_path = self.copy_submission_locally(submission_path)\n    metadata = self.base_validator.validate_submission(local_path)\n    if (not metadata):\n        logging.error('Submission \"%s\" is INVALID', submission_path)\n        self.stats.add_failure()\n        return\n    submission_type = metadata['type']\n    container_name = metadata['container_gpu']\n    logging.info('Submission \"%s\" is VALID', submission_path)\n    self.list_of_containers.add(container_name)\n    self.stats.add_success(submission_type)\n    if self.do_copy:\n        submission_id = '{0:04}'.format(self.cur_submission_idx)\n        self.cur_submission_idx += 1\n        self.copy_submission_to_destination(submission_path, TYPE_TO_DIR[submission_type], submission_id)\n        self.id_to_path_mapping[submission_id] = submission_path", "docstring": "Validates one submission and copies it to target directory.\n\nArgs:\nsubmission_path: path in Google Cloud Storage of the submission file", "source": "codesearchnet"}
{"code": "def gunzip_file(gz_path, new_path):\n    if tf.gfile.Exists(new_path):\n        tf.logging.info(('File %s already exists, skipping unpacking' % new_path))\n        return\n    tf.logging.info(('Unpacking %s to %s' % (gz_path, new_path)))\n    mode = (stat.S_IRWXU or stat.S_IXGRP or stat.S_IRGRP or stat.S_IROTH)\n    os.chmod(os.path.dirname(new_path), mode)\n    with gzip.open(gz_path, 'rb') as gz_file:\n        with tf.gfile.GFile(new_path, mode='wb') as new_file:\n            for line in gz_file:\n                new_file.write(line)", "docstring": "Unzips from gz_path into new_path.\n\nArgs:\ngz_path: path to the zipped file.\nnew_path: path to where the file will be unzipped.", "source": "codesearchnet"}
{"code": "def assert_equal_graph_def_v1(actual: graph_pb2.GraphDef, expected: graph_pb2.GraphDef, checkpoint_v2: bool=False, hash_table_shared_name: bool=False) -> None:\n    assert_equal_graph_def(actual, expected, checkpoint_v2, hash_table_shared_name)", "docstring": "Asserts that two `GraphDef`s are (mostly) the same.\n\nCompares two `GraphDef` protos for equality, ignoring versions and ordering of\nnodes, attrs, and control inputs.  Node names are used to match up nodes\nbetween the graphs, so the naming of nodes must be consistent.\n\nArgs:\nactual: The `GraphDef` we have.\nexpected: The `GraphDef` we expected.\ncheckpoint_v2: boolean determining whether to ignore randomized attribute\nvalues that appear in V2 checkpoints.\nhash_table_shared_name: boolean determining whether to ignore randomized\nshared_names that appear in HashTableV2 op defs.\n\nRaises:\nAssertionError: If the `GraphDef`s do not match.\nTypeError: If either argument is not a `GraphDef`.", "source": "github-repos"}
{"code": "def __init__(self, timestamp=None):\n    \n    super(OLEAutomationDate, self).__init__()\n    self._precision = definitions.PRECISION_1_MICROSECOND\n    self._timestamp = timestamp", "docstring": "Initializes an OLE Automation date.\n\nArgs:\ntimestamp (Optional[float]): OLE Automation date.", "source": "juraj-google-style"}
{"code": "def coerce_to_pendulum(x: PotentialDatetimeType,\n                       assume_local: bool = False) -> Optional[DateTime]:\n    \n    if not x:  \n        return None\n    if isinstance(x, DateTime):\n        return x\n    tz = get_tz_local() if assume_local else get_tz_utc()\n    if isinstance(x, datetime.datetime):\n        return pendulum.instance(x, tz=tz)  \n    elif isinstance(x, datetime.date):\n        \n        \n        \n        midnight = DateTime.min.time()\n        dt = DateTime.combine(x, midnight)\n        return pendulum.instance(dt, tz=tz)  \n    elif isinstance(x, str):\n        return pendulum.parse(x, tz=tz)  \n    else:\n        raise ValueError(\"Don't know how to convert to DateTime: \"\n                         \"{!r}\".format(x))", "docstring": "Converts something to a :class:`pendulum.DateTime`.\n\nArgs:\nx: something that may be coercible to a datetime\nassume_local: if ``True``, assume local timezone; if ``False``, assume\nUTC\n\nReturns:\na :class:`pendulum.DateTime`, or ``None``.\n\nRaises:\npendulum.parsing.exceptions.ParserError: if a string fails to parse\nValueError: if no conversion possible", "source": "juraj-google-style"}
{"code": "def _RemoveAllFlagAppearances(self, name):\n    \n    flag_dict = self.FlagDict()\n    if name not in flag_dict:\n      raise exceptions.UnrecognizedFlagError(name)\n    flag = flag_dict[name]\n    names_to_remove = {name}\n    names_to_remove.add(flag.name)\n    if flag.short_name:\n      names_to_remove.add(flag.short_name)\n    for n in names_to_remove:\n      self.__delattr__(n)", "docstring": "Removes flag with name for all appearances.\n\nA flag can be registered with its long name and an optional short name.\nThis method removes both of them. This is different than __delattr__.\n\nArgs:\nname: Either flag's long name or short name.\n\nRaises:\nUnrecognizedFlagError: When flag name is not found.", "source": "juraj-google-style"}
{"code": "def get_link_or_none(pattern_name, request, view_kwargs=None):\n    from is_core.patterns import reverse_pattern\n    pattern = reverse_pattern(pattern_name)\n    assert (pattern is not None), 'Invalid pattern name {}'.format(pattern_name)\n    if pattern.has_permission('get', request, view_kwargs=view_kwargs):\n        return pattern.get_url_string(request, view_kwargs=view_kwargs)\n    else:\n        return None", "docstring": "Helper that generate URL prom pattern name and kwargs and check if current request has permission to open the URL.\nIf not None is returned.\n\nArgs:\npattern_name (str): slug which is used for view registratin to pattern\nrequest (django.http.request.HttpRequest): Django request object\nview_kwargs (dict): list of kwargs necessary for URL generator\n\nReturns:", "source": "codesearchnet"}
{"code": "def _get_jwt_for_audience(self, audience):\n        \n        token, expiry = self._cache.get(audience, (None, None))\n\n        if token is None or expiry < _helpers.utcnow():\n            token, expiry = self._make_jwt_for_audience(audience)\n            self._cache[audience] = token, expiry\n\n        return token", "docstring": "Get a JWT For a given audience.\n\nIf there is already an existing, non-expired token in the cache for\nthe audience, that token is used. Otherwise, a new token will be\ncreated.\n\nArgs:\naudience (str): The intended audience.\n\nReturns:\nbytes: The encoded JWT.", "source": "juraj-google-style"}
{"code": "def outputs(self) -> Mapping[str, Mapping[int, str]]:\n    common_outputs = self._tasks_to_common_outputs[self.task]\n    return copy.deepcopy(common_outputs)", "docstring": "Mapping containing the axis definition of the output tensors to provide to the model\n\nReturns:\nFor each output: its name associated to the axes symbolic name and the axis position within the tensor", "source": "github-repos"}
{"code": "def _get(self, obj):\n    if (not hasattr(obj, '_property_values')):\n        raise RuntimeError((\"Cannot get a property value '%s' from a %s instance before HasProps.__init__\" % (self.name, obj.__class__.__name__)))\n    if (self.name not in obj._property_values):\n        return self._get_default(obj)\n    else:\n        return obj._property_values[self.name]", "docstring": "Internal implementation of instance attribute access for the\n``BasicPropertyDescriptor`` getter.\n\nIf the value has not been explicitly set by a user, return that\nvalue. Otherwise, return the default.\n\nArgs:\nobj (HasProps) : the instance to get a value of this property for\n\nReturns:\nobject\n\nRaises:\nRuntimeError\nIf the |HasProps| instance has not yet been initialized, or if\nthis descriptor is on a class that is not a |HasProps|.", "source": "codesearchnet"}
{"code": "def add_error(self, error):\n    self._count += 1\n    self._record.add_error('expect@%s+%s' % (time.time(), self._count), error)", "docstring": "Record an error from expect APIs.\n\nThis method generates a position stamp for the expect. The stamp is\ncomposed of a timestamp and the number of errors recorded so far.\n\nArgs:\nerror: Exception or signals.ExceptionRecord, the error to add.", "source": "github-repos"}
{"code": "def _assertOpOutputMatchesExpected(self, op, inp, expected, equality_test=None, rtol=0.001, atol=1e-05):\n    with self.session() as session:\n        with self.test_scope():\n            pinp = array_ops.placeholder(dtypes.as_dtype(inp.dtype), inp.shape, name='a')\n            output = op(pinp)\n        result = session.run(output, {pinp: inp})\n        if equality_test is None:\n            self.assertEqual(output.dtype, expected.dtype)\n            self.assertAllCloseAccordingToType(expected, result, rtol=rtol, atol=atol, bfloat16_rtol=0.03)\n        else:\n            equality_test(result, expected, rtol=rtol, atol=atol)", "docstring": "Verifies that 'op' produces 'expected' when fed input 'inp' .\n\nArgs:\nop: operator to test\ninp: numpy input array to use as input to 'op'.\nexpected: numpy array representing the expected output of 'op'.\nequality_test: either None, or a function that tests two numpy arrays for\nequality. If None, self.assertAllClose is used.\nrtol: relative tolerance for equality test.\natol: absolute tolerance for equality test.", "source": "github-repos"}
{"code": "def animation(frame_function: types.FrameFunction) -> types.Animation:\n    animation_ = core.Animation(frame_function)\n\n    @functools.wraps(frame_function)\n    def wrapper(*args, **kwargs):\n        return animation_(*args, **kwargs)\n    return wrapper", "docstring": "Turn a FrameFunction into an Animation.\n\nArgs:\nframe_function: A function that returns a FrameGenerator.\n\nReturns:\nan Animation decorator function.", "source": "codesearchnet"}
{"code": "def GetAnalysisStatusUpdateCallback(self):\n    if (self._mode == self.MODE_LINEAR):\n        return self._PrintAnalysisStatusUpdateLinear\n    if (self._mode == self.MODE_WINDOW):\n        return self._PrintAnalysisStatusUpdateWindow\n    return None", "docstring": "Retrieves the analysis status update callback function.\n\nReturns:\nfunction: status update callback function or None if not available.", "source": "codesearchnet"}
{"code": "def get(self):\n    return self._quantile_tracker.get()", "docstring": "Calculates and returns the median (q = 0.5).\n\nReturns:\nfloat: The median of the values in the window setting specified in the\ninternal quantile tracker. Returns NaN if the window is empty.", "source": "github-repos"}
{"code": "def integer_key_convert(dictin, dropfailedkeys=False):\n    \n    \n    return key_value_convert(dictin, keyfn=int, dropfailedkeys=dropfailedkeys)", "docstring": "Convert keys of dictionary to integers\n\nArgs:\ndictin (DictUpperBound): Input dictionary\ndropfailedkeys (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False.\n\nReturns:\nDict: Dictionary with keys converted to integers", "source": "juraj-google-style"}
{"code": "def decrypt(self, message):\n        \n\n        \n        message = json.loads(message)\n\n        \n        unencrypted_msg = []\n\n        for line in message:\n\n            \n            enc_line = binascii.a2b_base64(line)\n\n            \n            unencrypted_line = rsa.decrypt(enc_line, self.private_key)\n\n            unencrypted_msg.append(unencrypted_line)\n\n        \n        unencrypted_msg = \"\".join(unencrypted_msg)\n\n        return unencrypted_msg", "docstring": "Decrypts a string using our own private key object.\n\nArgs:\nmessage (string): The string of the message to decrypt.\n\nReturns:\nThe unencrypted string.", "source": "juraj-google-style"}
{"code": "def get_repo_data(saltenv='base'):\n    repo_details = _get_repo_details(saltenv)\n    if (repo_details.winrepo_age == (- 1)):\n        log.debug('No winrepo.p cache file. Refresh pkg db now.')\n        refresh_db(saltenv=saltenv)\n    if ('winrepo.data' in __context__):\n        log.trace('get_repo_data returning results from __context__')\n        return __context__['winrepo.data']\n    else:\n        log.trace('get_repo_data called reading from disk')\n    try:\n        serial = salt.payload.Serial(__opts__)\n        with salt.utils.files.fopen(repo_details.winrepo_file, 'rb') as repofile:\n            try:\n                repodata = salt.utils.data.decode((serial.loads(repofile.read()) or {}))\n                __context__['winrepo.data'] = repodata\n                return repodata\n            except Exception as exc:\n                log.exception(exc)\n                return {}\n    except IOError as exc:\n        log.error('Not able to read repo file')\n        log.exception(exc)\n        return {}", "docstring": "Returns the existing package metadata db. Will create it, if it does not\nexist, however will not refresh it.\n\nArgs:\nsaltenv (str): Salt environment. Default ``base``\n\nReturns:\ndict: A dict containing contents of metadata db.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' pkg.get_repo_data", "source": "codesearchnet"}
{"code": "def evaluate_repeatedly(self, accuracy, num_steps, feed_vars=(), feed_data=None, summary_tag=None, evaluation_times=(- 1)):\n    current_checkpoint = None\n    try:\n        for i in itertools.count(0):\n            with self.session() as sess:\n                current_checkpoint = self.load_new_checkpoint_when_available(sess, current_checkpoint)\n                self._run_init_test_vars_op()\n                accuracy_result = self.evaluate_model(accuracy, num_steps, summary_tag=summary_tag, print_every=0, feed_vars=feed_vars, feed_data=feed_data)\n                if (not summary_tag):\n                    print(('[%d] %s' % (sess.run(bookkeeper.global_step()), accuracy_result)))\n                if ((i + 1) == evaluation_times):\n                    return accuracy_result\n    finally:\n        print('Shutting down')\n        sys.stdout.flush()\n        self.stop_queues()", "docstring": "Runs the evaluation in a loop for `evaluation_times`.\n\nOn each iteration, `evaluate_model` is called with the supplied arguments.\nThis manages the queue threads itself.\n\nArgs:\naccuracy: The metric that is being evaluated.\nnum_steps: The number of steps to run in the evaluator.\nfeed_vars: A list or tuple of the variables that will be fed.\nfeed_data: A generator that produces tuples of the same length as\nfeed_vars.\nsummary_tag: If provided, the final result of each evaluation will be\npublished to this tag.\nevaluation_times: Run this loop for this many times or forever if it is\n`-1`.\n\nReturns:\nThe final evaluation result from `evaluate_model` if `evaluation_times`\never ends.", "source": "codesearchnet"}
{"code": "def filter_aliases(alias_table):\n    \n    for alias in alias_table.sections():\n        if alias_table.has_option(alias, 'command'):\n            yield (alias.split()[0], remove_pos_arg_placeholders(alias_table.get(alias, 'command')))", "docstring": "Filter aliases that does not have a command field in the configuration file.\n\nArgs:\nalias_table: The alias table.\n\nYield:\nA tuple with [0] being the first word of the alias and\n[1] being the command that the alias points to.", "source": "juraj-google-style"}
{"code": "def reload(self, napps=None):\n        \n        client = NAppsClient(self._config)\n        client.reload_napps(napps)", "docstring": "Reload a NApp or all NApps.\n\nArgs:\nnapps (list): NApp list to be reloaded.\nRaises:\nrequests.HTTPError: When there's a server error.", "source": "juraj-google-style"}
{"code": "def prepare_to_run_task(context, claim_task):\n    \n    current_task_info = {}\n    context.claim_task = claim_task\n    current_task_info['taskId'] = get_task_id(claim_task)\n    current_task_info['runId'] = get_run_id(claim_task)\n    log.info(\"Going to run taskId {taskId} runId {runId}!\".format(\n        **current_task_info\n    ))\n    context.write_json(\n        os.path.join(context.config['work_dir'], 'current_task_info.json'),\n        current_task_info, \"Writing current task info to {path}...\"\n    )\n    return current_task_info", "docstring": "Given a `claim_task` json dict, prepare the `context` and `work_dir`.\n\nSet `context.claim_task`, and write a `work_dir/current_task_info.json`\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\nclaim_task (dict): the claim_task dict.\n\nReturns:\ndict: the contents of `current_task_info.json`", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.LeaseGrant = channel.unary_unary(\n        '/etcdserverpb.Lease/LeaseGrant',\n        request_serializer=rpc__pb2.LeaseGrantRequest.SerializeToString,\n        response_deserializer=rpc__pb2.LeaseGrantResponse.FromString,\n        )\n    self.LeaseRevoke = channel.unary_unary(\n        '/etcdserverpb.Lease/LeaseRevoke',\n        request_serializer=rpc__pb2.LeaseRevokeRequest.SerializeToString,\n        response_deserializer=rpc__pb2.LeaseRevokeResponse.FromString,\n        )\n    self.LeaseKeepAlive = channel.stream_stream(\n        '/etcdserverpb.Lease/LeaseKeepAlive',\n        request_serializer=rpc__pb2.LeaseKeepAliveRequest.SerializeToString,\n        response_deserializer=rpc__pb2.LeaseKeepAliveResponse.FromString,\n        )\n    self.LeaseTimeToLive = channel.unary_unary(\n        '/etcdserverpb.Lease/LeaseTimeToLive',\n        request_serializer=rpc__pb2.LeaseTimeToLiveRequest.SerializeToString,\n        response_deserializer=rpc__pb2.LeaseTimeToLiveResponse.FromString,\n        )\n    self.LeaseLeases = channel.unary_unary(\n        '/etcdserverpb.Lease/LeaseLeases',\n        request_serializer=rpc__pb2.LeaseLeasesRequest.SerializeToString,\n        response_deserializer=rpc__pb2.LeaseLeasesResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def apply_to_tensor(self, tensor, assign_tuple_sharding=False, use_sharding_op=False, unspecified_dims=None):\n    if unspecified_dims:\n        assert use_sharding_op and (not assign_tuple_sharding)\n    proto = self._proto\n    if isinstance(tensor, resource_variable_ops.BaseResourceVariable) and context.xla_sharding_for_resource_variables_enabled():\n        if assign_tuple_sharding:\n            proto = self._create_tuple_proto(num_outputs=1)\n        tensor._set_xla_sharding(proto)\n        return tensor\n    if use_sharding_op:\n        if assign_tuple_sharding:\n            proto = self._create_tuple_proto(num_outputs=1)\n            tensor = tf2xla.sharding(tensor, sharding=proto.SerializeToString())\n        else:\n            tensor = tf2xla.sharding(tensor, sharding=proto.SerializeToString(), unspecified_dims=unspecified_dims or [])\n    elif assign_tuple_sharding or len(tensor.op.outputs) > 1:\n        proto = self._get_or_create_tuple_proto(tensor.op)\n        tuple_shardings = list(proto.tuple_shardings)\n        tuple_shardings[tensor.value_index] = self._proto\n        proto = xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.TUPLE, tuple_shardings=tuple_shardings)\n    tensor.op._set_attr('_XlaSharding', attr_value_pb2.AttrValue(s=proto.SerializeToString()))\n    return tensor", "docstring": "Applies this Sharding attribute to `tensor`.\n\nArgs:\ntensor: A tf.Tensor to split.\nassign_tuple_sharding: If the sharding type should be a tuple.\nuse_sharding_op: Whether to create a sharding op on `tensor`.\nunspecified_dims: An optional list of dimensions unspecified.\n\nReturns:\nThe tensor with Sharding attribute.", "source": "github-repos"}
{"code": "def choose_branch(exclude=None):\n    if (exclude is None):\n        master = conf.get('git.master_branch', 'master')\n        develop = conf.get('git.devel_branch', 'develop')\n        exclude = {master, develop}\n    branches = list((set(git.branches()) - exclude))\n    for (i, branch_name) in enumerate(branches):\n        shell.cprint('<90>[{}] <33>{}'.format((i + 1), branch_name))\n    choice = 0\n    while ((choice < 1) or (choice > len(branches))):\n        prompt = 'Pick a base branch from the above [1-{}]'.format(len(branches))\n        choice = click.prompt(prompt, value_proc=int)\n        if (not (1 <= choice <= len(branches))):\n            fmt = 'Invalid choice {}, you must pick a number between {} and {}'\n            log.err(fmt.format(choice, 1, len(branches)))\n    return branches[(choice - 1)]", "docstring": "Show the user a menu to pick a branch from the existing ones.\n\nArgs:\nexclude (list[str]):\nList of branch names to exclude from the menu. By default it will\nexclude master and develop branches. To show all branches pass an\nempty array here.\n\nReturns:\nstr: The name of the branch chosen by the user. If the user inputs an\ninvalid choice, he will be asked again (and again) until he picks a\na valid branch.", "source": "codesearchnet"}
{"code": "def decode(self, ids, strip_extraneous=False):\n    if strip_extraneous:\n        ids = strip_ids(ids, list(range((self._num_reserved_ids or 0))))\n    return unicode_to_native(tokenizer.decode(self._subtoken_ids_to_tokens(ids)))", "docstring": "Converts a sequence of subtoken ids to a native string.\n\nArgs:\nids: a list of integers in the range [0, vocab_size)\nstrip_extraneous: bool, whether to strip off extraneous tokens\n(EOS and PAD).\n\nReturns:\na native string", "source": "codesearchnet"}
{"code": "def read_html_file(data_dir, fileroot, encoding=None):\n    \n    fname = os.path.join(\n        data_dir, RAW_HTML_DIRNAME, fileroot + RAW_HTML_EXT)\n    encodings = (encoding,) if encoding else ('utf-8', 'iso-8859-1')  \n    for encoding in encodings:\n        try:\n            with io.open(fname, mode='rt', encoding=encoding) as f:\n                raw_html = f.read()\n            break\n        except (UnicodeDecodeError, UnicodeError):\n            raw_html = None\n\n    return ftfy.fix_encoding(raw_html).strip()", "docstring": "Read the HTML file corresponding to identifier ``fileroot``\nin the raw HTML directory below the root ``data_dir``.\n\nArgs:\ndata_dir (str)\nfileroot (str)\nencoding (str)\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def write_pattern(lines_per_file, no_data=False, return_filenames=False):\n    temp_dir = tempfile.mkdtemp()\n    all_data = []\n    file_name = None\n    start_index = 0\n    for i in range(len(lines_per_file)):\n        file_name, data = write_data(lines_per_file[i], no_data=no_data, directory=temp_dir, prefix='mytemp')\n        if return_filenames:\n            all_data.extend(zip([file_name] * len(data), data))\n        else:\n            all_data.extend(data)\n        start_index += lines_per_file[i]\n    assert file_name\n    return (file_name[:file_name.rfind(os.path.sep)] + os.path.sep + 'mytemp*', all_data)", "docstring": "Writes a pattern of temporary files.\n\nArgs:\nlines_per_file (List[int]): The number of lines to write per file.\nno_data (bool): If :data:`True`, empty lines will be written, otherwise\neach line will contain a concatenation of b'line' and the line number.\nreturn_filenames (bool): If True, returned list will contain\n(filename, data) pairs.\n\nReturns:\nTuple[str, List[Union[str, (str, str)]]]: A tuple of the filename pattern\nand a list of the utf-8 decoded written data or (filename, data) pairs.", "source": "github-repos"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        local_buffer = utils.BytearrayStream()\n\n        if self._maximum_items:\n            self._maximum_items.write(local_buffer, kmip_version=kmip_version)\n\n        if self._offset_items:\n            self._offset_items.write(local_buffer, kmip_version=kmip_version)\n\n        if self._storage_status_mask:\n            self._storage_status_mask.write(\n                local_buffer,\n                kmip_version=kmip_version\n            )\n\n        if self._object_group_member:\n            self._object_group_member.write(\n                local_buffer,\n                kmip_version=kmip_version\n            )\n\n        if kmip_version < enums.KMIPVersion.KMIP_2_0:\n            if self._attributes:\n                for attribute in self.attributes:\n                    attribute.write(\n                        local_buffer,\n                        kmip_version=kmip_version\n                    )\n        else:\n            if self._attributes:\n                \n                template_attribute = objects.TemplateAttribute(\n                    attributes=self.attributes\n                )\n                attributes = objects.convert_template_attribute_to_attributes(\n                    template_attribute\n                )\n                attributes.write(local_buffer, kmip_version=kmip_version)\n            else:\n                raise exceptions.InvalidField(\n                    \"The Locate request payload is missing the attributes \"\n                    \"list.\"\n                )\n\n        self.length = local_buffer.length()\n        super(LocateRequestPayload, self).write(\n            output_buffer,\n            kmip_version=kmip_version\n        )\n        output_buffer.write(local_buffer.buffer)", "docstring": "Write the data encoding the Locate request payload to a buffer.\n\nArgs:\noutput_buffer (stream): A data buffer in which to encode object\ndata, supporting a write method.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def line_count(fn):\n    with open(fn) as f:\n        for (i, l) in enumerate(f):\n            pass\n    return (i + 1)", "docstring": "Get line count of file\n\nArgs:\nfn (str): Path to file\n\nReturn:\nNumber of lines in file (int)", "source": "codesearchnet"}
{"code": "async def update_read_timestamp(self, read_timestamp=None):\n    if (read_timestamp is None):\n        read_timestamp = (self.events[(- 1)].timestamp if self.events else datetime.datetime.now(datetime.timezone.utc))\n    if (read_timestamp > self.latest_read_timestamp):\n        logger.info('Setting {} latest_read_timestamp from {} to {}'.format(self.id_, self.latest_read_timestamp, read_timestamp))\n        state = self._conversation.self_conversation_state\n        state.self_read_state.latest_read_timestamp = parsers.to_timestamp(read_timestamp)\n        try:\n            (await self._client.update_watermark(hangouts_pb2.UpdateWatermarkRequest(request_header=self._client.get_request_header(), conversation_id=hangouts_pb2.ConversationId(id=self.id_), last_read_timestamp=parsers.to_timestamp(read_timestamp))))\n        except exceptions.NetworkError as e:\n            logger.warning('Failed to update read timestamp: {}'.format(e))\n            raise", "docstring": "Update the timestamp of the latest event which has been read.\n\nThis method will avoid making an API request if it will have no effect.\n\nArgs:\nread_timestamp (datetime.datetime): (optional) Timestamp to set.\nDefaults to the timestamp of the newest event.\n\nRaises:\n.NetworkError: If the timestamp cannot be updated.", "source": "codesearchnet"}
{"code": "def check_origin(self, origin):\n        \n        from ..util import check_whitelist\n        parsed_origin = urlparse(origin)\n        origin_host = parsed_origin.netloc.lower()\n\n        allowed_hosts = self.application.websocket_origins\n        if settings.allowed_ws_origin():\n            allowed_hosts = set(settings.allowed_ws_origin())\n\n        allowed = check_whitelist(origin_host, allowed_hosts)\n        if allowed:\n            return True\n        else:\n            log.error(\"Refusing websocket connection from Origin '%s'; \\\n                      use --allow-websocket-origin=%s or set BOKEH_ALLOW_WS_ORIGIN=%s to permit this; currently we allow origins %r\",\n                      origin, origin_host, origin_host, allowed_hosts)\n            return False", "docstring": "Implement a check_origin policy for Tornado to call.\n\nThe supplied origin will be compared to the Bokeh server whitelist. If the\norigin is not allow, an error will be logged and ``False`` will be returned.\n\nArgs:\norigin (str) :\nThe URL of the connection origin\n\nReturns:\nbool, True if the connection is allowed, False otherwise", "source": "juraj-google-style"}
{"code": "def _check_initialized(self):\n    baddies = self._find_uninitialized()\n    if baddies:\n        raise datastore_errors.BadValueError(('Entity has uninitialized properties: %s' % ', '.join(baddies)))", "docstring": "Internal helper to check for uninitialized properties.\n\nRaises:\nBadValueError if it finds any.", "source": "codesearchnet"}
{"code": "def get_average_voltage(self, min_voltage=None, max_voltage=None):\n    pairs_in_range = self._select_in_voltage_range(min_voltage, max_voltage)\n    if (len(pairs_in_range) == 0):\n        return 0\n    total_cap_in_range = sum([p.mAh for p in pairs_in_range])\n    total_edens_in_range = sum([(p.mAh * p.voltage) for p in pairs_in_range])\n    return (total_edens_in_range / total_cap_in_range)", "docstring": "Average voltage for path satisfying between a min and max voltage.\n\nArgs:\nmin_voltage (float): The minimum allowable voltage for a given\nstep.\nmax_voltage (float): The maximum allowable voltage allowable for a\ngiven step.\n\nReturns:\nAverage voltage in V across the insertion path (a subset of the\npath can be chosen by the optional arguments)", "source": "codesearchnet"}
{"code": "def _merge_heads(self, x: torch.Tensor) -> torch.Tensor:\n    batch_size_and_num_heads, seq_length, _ = x.shape\n    batch_size = batch_size_and_num_heads \n    x = x.view(batch_size, self.num_heads, seq_length, self.head_dim)\n    x = x.permute(0, 2, 1, 3)\n    return x.reshape(batch_size, seq_length, self.num_heads * self.head_dim)", "docstring": "Merge heads together over the last dimension\n\nArgs:\nx (`torch.tensor`): [batch_size * num_heads, seq_length, head_dim]\n\nReturns:\ntorch.tensor: [batch_size, seq_length, num_heads * head_dim]", "source": "github-repos"}
{"code": "def _prepare_replacement(self, replaced, key):\n    repl = self.replacements[key]\n    new_nodes = ast_util.copy_clean(repl, preserve_annos=self.preserved_annos)\n    if isinstance(new_nodes, gast.AST):\n        new_nodes = [new_nodes]\n    return new_nodes", "docstring": "Prepares a replacement AST that's safe to swap in for a node.\n\nArgs:\nreplaced: ast.AST, the node being replaced\nkey: Hashable, the key of the replacement AST\nReturns:\nast.AST, the replacement AST", "source": "github-repos"}
{"code": "def plot_spectra_overlapped(ss, title=None, setup=_default_setup):\n    plt.figure()\n    draw_spectra_overlapped(ss, title, setup)\n    plt.show()", "docstring": "Plots one or more spectra in the same plot.\n\nArgs:\nss: list of Spectrum objects\ntitle=None: window title\nsetup: PlotSpectrumSetup object", "source": "codesearchnet"}
{"code": "def attention_bias_proximal(length):\n  \n  r = tf.to_float(tf.range(length))\n  diff = tf.expand_dims(r, 0) - tf.expand_dims(r, 1)\n  return tf.expand_dims(tf.expand_dims(-tf.log1p(tf.abs(diff)), 0), 0)", "docstring": "Bias for self-attention to encourage attention to close positions.\n\nArgs:\nlength: an integer scalar.\n\nReturns:\na Tensor with shape [1, 1, length, length]", "source": "juraj-google-style"}
{"code": "def get_likelihood(self, uni_matrix):\n    uni_dim = uni_matrix.shape[1]\n    num_edge = len(self.edges)\n    values = np.zeros([1, num_edge])\n    new_uni_matrix = np.empty([uni_dim, uni_dim])\n    for i in range(num_edge):\n        edge = self.edges[i]\n        (value, left_u, right_u) = edge.get_likelihood(uni_matrix)\n        new_uni_matrix[(edge.L, edge.R)] = left_u\n        new_uni_matrix[(edge.R, edge.L)] = right_u\n        values[(0, i)] = np.log(value)\n    return (np.sum(values), new_uni_matrix)", "docstring": "Compute likelihood of the tree given an U matrix.\n\nArgs:\nuni_matrix(numpy.array): univariate matrix to evaluate likelihood on.\n\nReturns:\ntuple[float, numpy.array]:\nlikelihood of the current tree, next level conditional univariate matrix", "source": "codesearchnet"}
{"code": "class AriaGroupedExpertsMLP(nn.Module):\n\n    def __init__(self, config: AriaTextConfig) -> None:\n        super().__init__()\n        self.config = config\n        self.fc1 = AriaGroupedExpertsGemm(config.hidden_size, config.intermediate_size * 2, config.moe_num_experts)\n        self.fc2 = AriaGroupedExpertsGemm(config.intermediate_size, config.hidden_size, config.moe_num_experts)\n\n    def forward(self, permuted_tokens, tokens_per_expert):\n        \n        fc1_output = self.fc1(permuted_tokens, tokens_per_expert)\n        projection, gate = torch.chunk(fc1_output, 2, dim=-1)\n        fc1_output = nn.functional.silu(projection) * gate\n        fc2_output = self.fc2(fc1_output, tokens_per_expert)\n        return fc2_output", "docstring": "Grouped MLP module for Mixture of Experts.\n\nArgs:\nconfig (`AriaTextConfig`):\nConfiguration object for the model.", "source": "github-repos"}
{"code": "def _step(time, output_ta_t, *states):\n    current_input = tuple((ta.read(time) for ta in input_ta))\n    current_input = tf.nest.pack_sequence_as(inputs, current_input)\n    output, new_states = step_function(current_input, tuple(states) + tuple(constants))\n    flat_state = tf.nest.flatten(states)\n    flat_new_state = tf.nest.flatten(new_states)\n    for state, new_state in zip(flat_state, flat_new_state):\n        if isinstance(new_state, tf.Tensor):\n            new_state.set_shape(state.shape)\n    flat_output = tf.nest.flatten(output)\n    ta_index_to_write = time if return_all_outputs else 0\n    output_ta_t = tuple((ta.write(ta_index_to_write, out) for ta, out in zip(output_ta_t, flat_output)))\n    new_states = tf.nest.pack_sequence_as(initial_states, flat_new_state)\n    return (time + 1, output_ta_t) + tuple(new_states)", "docstring": "RNN step function.\n\nArgs:\ntime: Current timestep value.\noutput_ta_t: TensorArray.\n*states: List of states.\n\nReturns:\nTuple: `(time + 1,output_ta_t) + tuple(new_states)`", "source": "github-repos"}
{"code": "def get_instances(serials):\n    \n    results = []\n    for s in serials:\n        results.append(AndroidDevice(s))\n    return results", "docstring": "Create AndroidDevice instances from a list of serials.\n\nArgs:\nserials: A list of android device serials.\n\nReturns:\nA list of AndroidDevice objects.", "source": "juraj-google-style"}
{"code": "def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:\n    pts = pts - self._trans\n    return self._rots.invert_apply(pts)", "docstring": "Applies the inverse of the transformation to a coordinate tensor.\n\nArgs:\npts: A [*, 3] coordinate tensor\nReturns:\nThe transformed points.", "source": "github-repos"}
{"code": "def time_to_jump(self):\n    k_tot = (rate_prefactor * np.sum(self.p))\n    return ((- (1.0 / k_tot)) * math.log(random.random()))", "docstring": "The timestep until the next jump.\n\nArgs:\nNone\n\nReturns:\n(Float): The timestep until the next jump.", "source": "codesearchnet"}
{"code": "def get_type_name_in_language(cls, type_name, sub_type, language):\n    if (language in cls.type_methods_cache):\n        m = cls.type_methods_cache[language]\n        if (not m):\n            return type_name\n        return m(type_name)\n    (found, method) = load_language_plugins(language, 'get_type_name')\n    if found:\n        cls.type_methods_cache[language] = method\n        if method:\n            return method(type_name, sub_type)\n        else:\n            return type_name\n    module = importlib.import_module(('.lang.%s' % language), package='monolithe.generators')\n    if (not hasattr(module, 'get_type_name')):\n        cls.type_methods_cache[language] = None\n        return type_name\n    method = getattr(module, 'get_type_name')\n    cls.type_methods_cache[language] = method\n    return method(type_name, sub_type)", "docstring": "Get the type for the given language\n\nArgs:\ntype_name (str): the type to convert\nlanguage (str): the language to use\n\nReturns:\na type name in the given language\n\nExample:\nget_type_name_in_language(\"Varchar\", \"python\")\n>>> str", "source": "codesearchnet"}
{"code": "def get_handler(progname, fmt=None, datefmt=None, project_id=None, credentials=None, debug_thread_worker=False, **_):\n    builder = CloudLoggingHandlerBuilder(progname, fmt=fmt, datefmt=datefmt, project_id=project_id, credentials=credentials, debug_thread_worker=debug_thread_worker)\n    return builder.get_handler()", "docstring": "Helper function to create a Stackdriver handler.\n\nSee `ulogger.stackdriver.CloudLoggingHandlerBuilder` for arguments\nand supported keyword arguments.\n\nReturns:\n(obj): Instance of `google.cloud.logging.handlers.\nCloudLoggingHandler`", "source": "codesearchnet"}
{"code": "def create_temp_parfile(self):\n    output_dir = os.path.dirname(self.output_filename)\n    return tempfile.NamedTemporaryFile(dir=output_dir, delete=False)", "docstring": "Create the first part of a parfile.\n\nReturns:\nA file-like object with a 'name' attribute", "source": "github-repos"}
{"code": "def dot_distance(t1, t2, name=None):\n  \n  with tf.name_scope(name, 'dot_distance', [t1, t2]) as scope:\n    return -dot_product(t1, t2, name=scope)", "docstring": "dot \"distance\" between t1 and t2.\n\nArgs:\nt1: A tensor.\nt2: A tensor that is the same size as t1.\nname: Optional name for this op.\nReturns:\nThe dot distance between t1 and t2.", "source": "juraj-google-style"}
{"code": "def diff_prof(step):\n    \n    rbot, rtop = misc.get_rbounds(step)\n    rad = step.rprof['r'].values + rbot\n    tprof = step.rprof['Tmean'].values\n    diff = (tprof[:-1] - tprof[1:]) / (rad[1:] - rad[:-1])\n    \n    diff = np.insert(diff, 0, (1 - tprof[0]) / (rad[0] - rbot))\n    \n    diff = np.append(diff, tprof[-1] / (rtop - rad[-1]))\n    \n    return diff, np.append(rad, rtop)", "docstring": "Diffusion.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nReturns:\ntuple of :class:`numpy.array`: the diffusion and the radial position\nat which it is evaluated.", "source": "juraj-google-style"}
{"code": "def add_vcf_info(keyword, variant_line=None, variant_dict=None, annotation=None):\n    \n    logger = logging.getLogger(__name__)\n    \n    if annotation:\n        new_info = '{0}={1}'.format(keyword, annotation)\n    else:\n        new_info = keyword\n    \n    logger.debug(\"Adding new variant information {0}\".format(new_info))\n    \n    fixed_variant = None\n    \n    if variant_line:\n        logger.debug(\"Adding information to a variant line\")\n        splitted_variant = variant_line.rstrip('\\n').split('\\t')\n        logger.debug(\"Adding information to splitted variant line\")\n        old_info = splitted_variant[7]\n        if old_info == '.':\n            splitted_variant[7] = new_info\n        else:\n            splitted_variant[7] = \"{0};{1}\".format(splitted_variant[7], new_info)\n        \n        fixed_variant = '\\t'.join(splitted_variant)\n    \n    elif variant_dict:\n        logger.debug(\"Adding information to a variant dict\")\n        old_info = variant_dict['INFO']\n        if old_info == '.':\n            variant_dict['INFO'] = new_info\n        else:\n            variant_dict['INFO'] = \"{0};{1}\".format(old_info, new_info)\n        fixed_variant = variant_dict\n    \n    return fixed_variant", "docstring": "Add information to the info field of a vcf variant line.\n\nArguments:\nvariant_line (str): A vcf formatted variant line\nkeyword (str): The info field key\nannotation (str): If the annotation is a key, value pair\nthis is the string that represents the value\n\nReturns:\nfixed_variant : str if variant line, or dict if variant_dict", "source": "juraj-google-style"}
{"code": "def run(self, host: str = '0.0.0.0', port: int = 8080):\n        \n        self._loop.run_until_complete(self._configure_plugins())\n        web.run_app(self._app, host=host, port=port)", "docstring": "Start sirbot\n\nConfigure sirbot and start the aiohttp.web.Application\n\nArgs:\nhost (str): host\nport (int): port", "source": "juraj-google-style"}
{"code": "def do_phonefy(self, query, **kwargs):\n        \n        results = []\n\n        test = self.check_phonefy(query, kwargs)\n\n        if test:\n            r = {\n                \"type\": \"i3visio.phone\",\n                \"value\": self.platformName + \" - \" + query,\n                \"attributes\": []\n            }\n\n            try:\n                aux = {\n                    \"type\": \"i3visio.uri\",\n                    \"value\": self.createURL(query, mode=\"phonefy\"),\n                    \"attributes\": []\n                }\n                r[\"attributes\"].append(aux)\n            except:\n                pass\n\n            aux = {\n                \"type\": \"i3visio.platform\",\n                \"value\": self.platformName,\n                \"attributes\": []\n            }\n            r[\"attributes\"].append(aux)\n\n            \n            r[\"attributes\"] += self.process_phonefy(test)\n            results.append(r)\n            \n        return results", "docstring": "Verifying a phonefy query in this platform.\n\nThis might be redefined in any class inheriting from Platform.\n\nArgs:\n-----\nquery: The element to be searched.\n\nReturn:\n-------\nA list of elements to be appended.", "source": "juraj-google-style"}
{"code": "def report_clean(rows):\n    print('DCM REPORT CLEAN')\n    first = True\n    last = False\n    for row in rows:\n        if row and row[0] == 'Report Fields':\n            break\n    for row in rows:\n        if 'No data returned by the reporting service.' in row:\n            break\n        if not row or row[0] == 'Grand Total:':\n            break\n        if first:\n            try:\n                date_column = row.index('Date')\n                row[date_column] = 'Report_Day'\n            except ValueError:\n                pass\n            row = [column_header_sanitize(cell) for cell in row]\n        row = ['' if cell.strip() in ('(not set)', '-') else cell for cell in row]\n        yield row\n        first = False", "docstring": "Helper to fix DCM report issues for BigQuery and ensure schema compliance.\n\nMemory efficiently cleans each row by fixing:\n* Strips header and footer to preserve only data rows.\n* Changes 'Date' to 'Report_Day' to avoid using reserved name in BigQuery.\n* removes '-' as columns\n* Changes data format to match data studio if datastusio=True.\n\nUsage example:\n\n```\nfilename, report = report_file(...)\nrows = report_to_rows(report)\nrows = report_clean(rows)\n```\n\nArgs:\n* rows: (iterator) Rows to clean.\n\nReturns:\n* Iterator of cleaned rows.", "source": "github-repos"}
{"code": "def _make_fake_dataset_fn(initial_delay_us, remainder_delay_us):\n\n    def fake_dataset_fn(unused):\n        \n        del unused\n\n        def make_dataset(time_us, num_elements):\n            dataset = dataset_ops.Dataset.range(num_elements)\n            if time_us > 0:\n                dataset = dataset.apply(testing.sleep(time_us))\n            return dataset\n        if not initial_delay_us:\n            return make_dataset(remainder_delay_us, 100)\n        return make_dataset(initial_delay_us, 0).concatenate(make_dataset(remainder_delay_us, 100))\n    return fake_dataset_fn", "docstring": "Returns a dataset that emulates a remote storage data source.\n\nReturns a dataset factory which creates a dataset with 100 elements that\nemulates the performance characteristic of a file-based dataset stored in a\nremote storage. In particular, the first element will take an order of\nmagnitude longer to produce than the remaining elements (100ms vs. 1ms).\n\nArgs:\ninitial_delay_us: How long to wait before producing the first element.\nremainder_delay_us: How long to wait before producing subsequent elements.", "source": "github-repos"}
{"code": "def __init__(self, model: PreTrainedModel, max_batch_size: int=1, max_cache_len: int=4096):\n    super().__init__()\n    if not hasattr(model.config, 'use_cache') or model.config.use_cache is False:\n        raise ValueError('The model must have caching enabled to be performant.')\n    if hasattr(model.config, 'layer_types') and getattr(model.config, 'sliding_window', None) is not None:\n        self.model = TorchExportableModuleWithHybridCache(model, max_batch_size, max_cache_len)\n    else:\n        logging.info('Using `StaticCache` for export as `layer_types` is not specified or `sliding_window` is `null` in the config.')\n        self.model = TorchExportableModuleWithStaticCache(model)", "docstring": "Initializes the exportable module with `HybridCache`.\n\nArgs:\nmodel (`PreTrainedModel`): The pretrained model to wrap.\nmax_batch_size (int): Maximum batch size for the cache.\nmax_cache_len (int): Maximum sequence length for the cache.\n\nRaises:\nValueError: If the model is configured with a unsupported cache implementation.", "source": "github-repos"}
{"code": "def highlight_html(code: str) -> str:\n    theme = resource_utils.resource_import('static/highlight.css', module='etils.ecolab')\n    html_str = '\\n  {theme}\\n  <script src=\"\n    html_str = epy.dedent(html_str)\n    html_str = html_str.format(theme=theme, code=html.escape(code))\n    return html_str", "docstring": "Add Python syntax highlighting to a Python code string.\n\nUsage:\n\nExample:\n\n```python\n@dataclasses.dataclass\nclass A:\nx: int\n\ndef _repr_html_(self) -> str:\nfrom etils import ecolab  # Lazy-import ecolab\n\nreturn ecolab.highlight_html(repr(self))\n\n```\n\nArgs:\ncode: The string to wrap\n\nReturns:\nThe HTML string representation", "source": "github-repos"}
{"code": "def nPr(n, r):\n    \n    f = math.factorial\n    return int(f(n) / f(n-r))", "docstring": "Calculates nPr.\n\nArgs:\nn (int): total number of items.\nr (int): items to permute\n\nReturns:\nnPr.", "source": "juraj-google-style"}
{"code": "def unpause(self, container):\n        \n        url = self._url('/containers/{0}/unpause', container)\n        res = self._post(url)\n        self._raise_for_status(res)", "docstring": "Unpause all processes within a container.\n\nArgs:\ncontainer (str): The container to unpause", "source": "juraj-google-style"}
{"code": "def attributes(self, main_type, sub_type, unique_id, owner=None, params=None):\n        \n\n        params = params or {}\n\n        if owner:\n            params['owner'] = owner\n\n        if not sub_type:\n            url = '/v2/{}/{}/attributes'.format(main_type, unique_id)\n        else:\n            url = '/v2/{}/{}/{}/attributes'.format(main_type, sub_type, unique_id)\n\n        for a in self._iterate(url, params, 'attribute'):\n            yield a", "docstring": "Args:\nowner:\nmain_type:\nsub_type:\nunique_id:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def forward(self, input_points: Optional[Tuple[torch.Tensor, torch.Tensor]], input_labels: Optional[torch.Tensor], input_boxes: Optional[torch.Tensor], input_masks: Optional[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n    sparse_embeddings = None\n    batch_size = 1\n    target_device = self.shared_embedding.positional_embedding.device\n    if input_points is not None:\n        batch_size, point_batch_size = input_points.shape[:2]\n        if input_labels is None:\n            raise ValueError('If points are provided, labels must also be provided.')\n        point_embeddings = self._embed_points(input_points, input_labels, pad=input_boxes is None)\n        sparse_embeddings = point_embeddings\n    if input_boxes is not None:\n        batch_size = input_boxes.shape[0]\n        box_embeddings = self._embed_boxes(input_boxes)\n        if sparse_embeddings is None:\n            sparse_embeddings = box_embeddings\n        else:\n            sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=2)\n    if input_masks is not None:\n        dense_embeddings = self.mask_embed(input_masks)\n    else:\n        dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(batch_size, -1, self.image_embedding_size[0], self.image_embedding_size[1])\n    if sparse_embeddings is None:\n        sparse_embeddings = torch.zeros((batch_size, 1, 1, self.hidden_size), device=target_device)\n    return (sparse_embeddings, dense_embeddings)", "docstring": "Embeds different types of prompts, returning both sparse and dense embeddings.\n\nArgs:\npoints (`torch.Tensor`, *optional*):\npoint coordinates and labels to embed.\nboxes (`torch.Tensor`, *optional*):\nboxes to embed\nmasks (`torch.Tensor`, *optional*):\nmasks to embed", "source": "github-repos"}
{"code": "def name_based_save(mesh: layout_lib.Mesh, checkpoint_prefix: Union[str, tensor_lib.Tensor], name_tensor_dict: Dict[str, Union[tensor_lib.Tensor, tf_variables.Variable]]):\n    if not context.executing_eagerly():\n        raise ValueError('name based save must run eagerly.')\n    ordered_name_tensor_dict = name_tensor_dict\n    if not isinstance(name_tensor_dict, collections.OrderedDict):\n        ordered_name_tensor_dict = collections.OrderedDict(name_tensor_dict)\n    checkpoint_prefix = api.pack([checkpoint_prefix] * mesh.num_local_devices(), layout_lib.Layout.replicated(mesh.host_mesh(), rank=0))\n    tensor_names = api.pack([list(ordered_name_tensor_dict.keys())] * mesh.num_local_devices(), layout_lib.Layout.replicated(mesh.host_mesh(), rank=1))\n    sharded_save(mesh, file_prefix=checkpoint_prefix, tensor_names=tensor_names, shape_and_slices=[''] * len(ordered_name_tensor_dict), tensors=list(ordered_name_tensor_dict.values()))", "docstring": "Saves name based Tensor into a Checkpoint.\n\nThe function prepares the input dictionary to the format of a `sharded_save`,\nso that it can take advantage of DTensor SPMD based distributed save.\n\nSame as restore, the function only supports saving on the single mesh.\n\nArgs:\nmesh: The single mesh that all Tensors would be restored to.\ncheckpoint_prefix : The prefix of checkpoint to be restored.\nname_tensor_dict: A ordered dictionary of tensor_names to a DTensor. The\nDTensor shape/dtype must match the tensors being saved/restored for now.", "source": "github-repos"}
{"code": "def generate_identified_filename(filename: Path, identifier: str) -> Path:\n    return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)", "docstring": "Append a string-identifier at the end (before the extension, if any) to the provided filepath\n\nArgs:\nfilename: pathlib.Path The actual path object we would like to add an identifier suffix\nidentifier: The suffix to add\n\nReturns: String with concatenated identifier at the end of the filename", "source": "github-repos"}
{"code": "def _normalize_feature_columns(feature_columns):\n    if isinstance(feature_columns, fc_types.FeatureColumn):\n        feature_columns = [feature_columns]\n    if isinstance(feature_columns, collections_abc.Iterator):\n        feature_columns = list(feature_columns)\n    if isinstance(feature_columns, dict):\n        raise ValueError('Expected feature_columns to be iterable, found dict.')\n    for column in feature_columns:\n        if not isinstance(column, fc_types.FeatureColumn):\n            raise ValueError('Items of feature_columns must be a FeatureColumn. Given (type {}): {}.'.format(type(column), column))\n    if not feature_columns:\n        raise ValueError('feature_columns must not be empty.')\n    name_to_column = {}\n    for column in feature_columns:\n        if column.name in name_to_column:\n            raise ValueError('Duplicate feature column name found for columns: {} and {}. This usually means that these columns refer to same base feature. Either one must be discarded or a duplicated but renamed item must be inserted in features dict.'.format(column, name_to_column[column.name]))\n        name_to_column[column.name] = column\n    return sorted(feature_columns, key=lambda x: x.name)", "docstring": "Normalizes the `feature_columns` input.\n\nThis method converts the `feature_columns` to list type as best as it can. In\naddition, verifies the type and other parts of feature_columns, required by\ndownstream library.\n\nArgs:\nfeature_columns: The raw feature columns, usually passed by users.\n\nReturns:\nThe normalized feature column list.\n\nRaises:\nValueError: for any invalid inputs, such as empty, duplicated names, etc.", "source": "github-repos"}
{"code": "def add_variant(self, variant):\n        \n        LOG.debug(\"Upserting variant: {0}\".format(variant.get('_id')))\n        \n        update = self._get_update(variant)\n        \n        message = self.db.variant.update_one(\n            {'_id': variant['_id']},\n            update,\n            upsert=True\n        )\n        if message.modified_count == 1:\n            LOG.debug(\"Variant %s was updated\", variant.get('_id'))\n        else:\n            LOG.debug(\"Variant was added to database for first time\")\n        return", "docstring": "Add a variant to the variant collection\n\nIf the variant exists we update the count else we insert a new variant object.\n\nArgs:\nvariant (dict): A variant dictionary", "source": "juraj-google-style"}
{"code": "def take_at_most_n_seconds(time_s, func, *args, **kwargs):\n    thread = threading.Thread(target=func, args=args, kwargs=kwargs)\n    thread.start()\n    thread.join(time_s)\n    if thread.is_alive():\n        return False\n    return True", "docstring": "A function that returns whether a function call took less than time_s.\n\nNOTE: The function call is not killed and will run indefinitely if hung.\n\nArgs:\ntime_s: Maximum amount of time to take.\nfunc: Function to call.\n*args: Arguments to call the function with.\n**kwargs: Keyword arguments to call the function with.\nReturns:\nTrue if the function finished in less than time_s seconds.", "source": "codesearchnet"}
{"code": "def split(self, desired_bundle_size: int, start_position: Optional[Any]=None, stop_position: Optional[Any]=None) -> Iterator[SourceBundle]:\n    raise NotImplementedError", "docstring": "Splits the source into a set of bundles.\n\nBundles should be approximately of size ``desired_bundle_size`` bytes.\n\nArgs:\ndesired_bundle_size: the desired size (in bytes) of the bundles returned.\nstart_position: if specified the given position must be used as the\nstarting position of the first bundle.\nstop_position: if specified the given position must be used as the ending\nposition of the last bundle.\nReturns:\nan iterator of objects of type 'SourceBundle' that gives information about\nthe generated bundles.", "source": "github-repos"}
{"code": "def verify_mfa(self, mfa_token):\n        \n        response = self.resource.verify_mfa({'mfa_token': mfa_token})\n        return (response['valid'] == True or response['valid'] == 'true')", "docstring": "Verify an SMS or TOTP MFA token for this user.\n\nArgs:\nmfa_token (str): An alphanumeric code from either a User's TOTP\napplication or sent to them via SMS.\n\nReturns:\nTrue if the mfa_token is valid, False otherwise.", "source": "juraj-google-style"}
{"code": "def compile(self, ops):\n        \n\n        def _compile():\n            code = []\n\n            for op in ops:\n                if isinstance(op, SyscallInvoke):\n                    code.extend(self.syscall(op))\n                elif isinstance(op, LoadRegister):\n                    code.extend(self.reg_load(op.register, op.value))\n                elif isinstance(op, str):\n                    code.extend(op.split('\\n'))\n                else:\n                    raise ValueError('No idea how to assemble \"%s\"' % repr(op))\n            return ['\\t%s' % line for line in code]\n\n        \n        _compile()\n        return '\\n'.join(self.finalize(self.data_finalizer(_compile(), self.data))) + '\\n'", "docstring": "Translate a list of operations into its assembler source.\n\nArguments:\nops(list): A list of shellcode operations.\n\nReturns:\nstr: The assembler source code that implements the shellcode.", "source": "juraj-google-style"}
{"code": "def multiplier_with_docstring(num, rate=2):\n    return num * rate", "docstring": "Multiplies num by rate.\n\nArgs:\nnum (int): the num you want to multiply\nrate (int): the rate for multiplication\nReturns:\nMultiplication of num by rate", "source": "github-repos"}
{"code": "def forecast(stl, fc_func, steps=10, seasonal=False, **fc_func_kwargs):\n    forecast_array = np.array([])\n    trend_array = stl.trend\n    for step in range(steps):\n        pred = fc_func(np.append(trend_array, forecast_array), **fc_func_kwargs)\n        forecast_array = np.append(forecast_array, pred)\n    col_name = fc_func.__name__\n    observed_timedelta = (stl.observed.index[(- 1)] - stl.observed.index[(- 2)])\n    forecast_idx_start = (stl.observed.index[(- 1)] + observed_timedelta)\n    forecast_idx = pd.date_range(start=forecast_idx_start, periods=steps, freq=pd.tseries.frequencies.to_offset(observed_timedelta))\n    if seasonal:\n        seasonal_ix = 0\n        max_correlation = (- np.inf)\n        detrended_array = np.asanyarray((stl.observed - stl.trend)).squeeze()\n        for (i, x) in enumerate(stl.period_averages):\n            if (i == 0):\n                detrended_slice = detrended_array[(- len(stl.period_averages)):]\n            else:\n                detrended_slice = detrended_array[(- (len(stl.period_averages) + i)):(- i)]\n            this_correlation = np.correlate(detrended_slice, stl.period_averages)[0]\n            if (this_correlation > max_correlation):\n                max_correlation = this_correlation\n                seasonal_ix = i\n        rolled_period_averages = np.roll(stl.period_averages, (- seasonal_ix))\n        tiled_averages = np.tile(rolled_period_averages, ((steps \n        forecast_array += tiled_averages\n        col_name += '+seasonal'\n    forecast_frame = pd.DataFrame(data=forecast_array, index=forecast_idx)\n    forecast_frame.columns = [col_name]\n    return forecast_frame", "docstring": "Forecast the given decomposition ``stl`` forward by ``steps`` steps using the forecasting\nfunction ``fc_func``, optionally including the calculated seasonality.\n\nThis is an additive model, Y[t] = T[t] + S[t] + e[t]\n\nArgs:\nstl (a modified statsmodels.tsa.seasonal.DecomposeResult): STL decomposition of observed time\nseries created using the ``stldecompose.decompose()`` method.\nfc_func (function): Function which takes an array of observations and returns a single\nvalued forecast for the next point.\nsteps (int, optional): Number of forward steps to include in the forecast\nseasonal (bool, optional): Include seasonal component in forecast\nfc_func_kwargs: keyword arguments\nAll remaining arguments are passed to the forecasting function ``fc_func``\n\nReturns:\nforecast_frame (pd.Dataframe): A ``pandas.Dataframe`` containing forecast values and a\nDatetimeIndex matching the observed index.", "source": "codesearchnet"}
{"code": "def get_unverified_claims(token):\n    \n    try:\n        claims = jws.get_unverified_claims(token)\n    except:\n        raise JWTError('Error decoding token claims.')\n\n    try:\n        claims = json.loads(claims.decode('utf-8'))\n    except ValueError as e:\n        raise JWTError('Invalid claims string: %s' % e)\n\n    if not isinstance(claims, Mapping):\n        raise JWTError('Invalid claims string: must be a json object')\n\n    return claims", "docstring": "Returns the decoded claims without verification of any kind.\n\nArgs:\ntoken (str): A signed JWT to decode the headers from.\n\nReturns:\ndict: The dict representation of the token claims.\n\nRaises:\nJWTError: If there is an exception decoding the token.", "source": "juraj-google-style"}
{"code": "def star(self, input_string):\n        \n        if input_string != self.epsilon and input_string != self.empty:\n            return \"(\" + input_string + \")*\"\n        else:\n            return \"\"", "docstring": "Kleene star operation\nArgs:\ninput_string (str): The string that the kleene star will be made\nReturns:\nstr: The applied Kleene star operation on the input string", "source": "juraj-google-style"}
{"code": "def play_human(env):\n    try:\n        play(env, fps=env.metadata['video.frames_per_second'])\n    except KeyboardInterrupt:\n        pass\n    env.close()", "docstring": "Play the environment using keyboard as a human.\n\nArgs:\nenv (gym.Env): the initialized gym environment to play\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def index_of_coincidence(*texts):\n    if (not texts):\n        raise ValueError('texts must not be empty')\n    return statistics.mean((_calculate_index_of_coincidence(frequency_analyze(text), len(text)) for text in texts))", "docstring": "Calculate the index of coincidence for one or more ``texts``.\nThe results are averaged over multiple texts to return the delta index of coincidence.\n\nExamples:\n>>> index_of_coincidence(\"aabbc\")\n0.2\n\n>>> index_of_coincidence(\"aabbc\", \"abbcc\")\n0.2\n\nArgs:\n*texts (variable length argument list): The texts to analyze\n\nReturns:\nDecimal value of the index of coincidence\n\nRaises:\nValueError: If texts is empty\nValueError: If any text is less that 2 character long", "source": "codesearchnet"}
{"code": "def run_shell_command(state, host, command, get_pty=False, timeout=None, print_output=False, **command_kwargs):\n    command = make_command(command, **command_kwargs)\n    logger.debug('--> Running command on localhost: {0}'.format(command))\n    if print_output:\n        print('{0}>>> {1}'.format(host.print_prefix, command))\n    process = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)\n    stdout_reader = gevent.spawn(read_buffer, process.stdout, print_output=print_output, print_func=(lambda line: '{0}{1}'.format(host.print_prefix, line)))\n    stderr_reader = gevent.spawn(read_buffer, process.stderr, print_output=print_output, print_func=(lambda line: '{0}{1}'.format(host.print_prefix, click.style(line, 'red'))))\n    greenlets = gevent.wait((stdout_reader, stderr_reader), timeout=timeout)\n    if (len(greenlets) != 2):\n        stdout_reader.kill()\n        stderr_reader.kill()\n        raise timeout_error()\n    stdout = stdout_reader.get()\n    stderr = stderr_reader.get()\n    logger.debug('--> Waiting for exit status...')\n    process.wait()\n    process.stdout.close()\n    logger.debug('--> Command exit status: {0}'.format(process.returncode))\n    return ((process.returncode == 0), stdout, stderr)", "docstring": "Execute a command on the local machine.\n\nArgs:\nstate (``pyinfra.api.State`` obj): state object for this command\nhostname (string): hostname of the target\ncommand (string): actual command to execute\nsudo (boolean): whether to wrap the command with sudo\nsudo_user (string): user to sudo to\nget_pty (boolean): whether to get a PTY before executing the command\nenv (dict): envrionment variables to set\ntimeout (int): timeout for this command to complete before erroring\n\nReturns:\ntuple: (exit_code, stdout, stderr)\nstdout and stderr are both lists of strings from each buffer.", "source": "codesearchnet"}
{"code": "def get_events_for_blocks(self, blocks, subscriptions):\n    events = []\n    for blkw in blocks:\n        events.extend(self.get_events_for_block(blkw, subscriptions))\n    return events", "docstring": "Get a list of events associated with all the blocks.\n\nArgs:\nblocks (list of BlockWrapper): The blocks to search for events that\nmatch each subscription.\nsubscriptions (list of EventSubscriptions): EventFilter and\nevent type to filter events.\n\nReturns (list of Events): The Events associated which each block id.\n\nRaises:\nKeyError A receipt is missing from the receipt store.", "source": "codesearchnet"}
{"code": "def FormatSOAPDateTime(value):\n  \n  value_date = value['date']\n  return '%s-%s-%s %s:%s:%s (%s)' % (\n      value_date['year'], value_date['month'], value_date['day'],\n      value['hour'], value['minute'], value['second'], value['timeZoneId'])", "docstring": "Format a SOAP DateTime object for printing.\n\nArgs:\nvalue: The DateTime object to format.\n\nReturns:\nA string representing the value.", "source": "juraj-google-style"}
{"code": "def to_valid_state_vector(state_rep: Union[(int, np.ndarray)], num_qubits: int, dtype: Type[np.number]=np.complex64) -> np.ndarray:\n    if isinstance(state_rep, np.ndarray):\n        if (len(state_rep) != (2 ** num_qubits)):\n            raise ValueError('initial state was of size {} but expected state for {} qubits'.format(len(state_rep), num_qubits))\n        state = state_rep\n    elif isinstance(state_rep, int):\n        if (state_rep < 0):\n            raise ValueError('initial_state must be positive')\n        elif (state_rep >= (2 ** num_qubits)):\n            raise ValueError('initial state was {} but expected state for {} qubits'.format(state_rep, num_qubits))\n        else:\n            state = np.zeros((2 ** num_qubits), dtype=dtype)\n            state[state_rep] = 1.0\n    else:\n        raise TypeError('initial_state was not of type int or ndarray')\n    validate_normalized_state(state, num_qubits, dtype)\n    return state", "docstring": "Verifies the state_rep is valid and converts it to ndarray form.\n\nThis method is used to support passing in an integer representing a\ncomputational basis state or a full wave function as a representation of\na state.\n\nArgs:\nstate_rep: If an int, the state returned is the state corresponding to\na computational basis state. If an numpy array this is the full\nwave function. Both of these are validated for the given number\nof qubits, and the state must be properly normalized and of the\nappropriate dtype.\nnum_qubits: The number of qubits for the state. The state_rep must be\nvalid for this number of qubits.\ndtype: The numpy dtype of the state, will be used when creating the\nstate for a computational basis state, or validated against if\nstate_rep is a numpy array.\n\nReturns:\nA numpy ndarray corresponding to the state on the given number of\nqubits.\n\nRaises:\nValueError if the state is not valid.", "source": "codesearchnet"}
{"code": "def DEFINE_multi_float(name, default, help, lower_bound=None, upper_bound=None, flag_values=FLAGS, **args):\n    parser = FloatParser(lower_bound, upper_bound)\n    serializer = ArgumentSerializer()\n    DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)", "docstring": "Registers a flag whose value can be a list of arbitrary floats.\n\nUse the flag on the command line multiple times to place multiple\nfloat values into the list.  The 'default' may be a single float\n(which will be converted into a single-element list) or a list of\nfloats.\n\nArgs:\nname: A string, the flag name.\ndefault: The default value of the flag.\nhelp: A help string.\nlower_bound: float, min values of the flag.\nupper_bound: float, max values of the flag.\nflag_values: FlagValues object with which the flag will be registered.\n**args: Dictionary with extra keyword args that are passed to the\nFlag __init__.", "source": "codesearchnet"}
{"code": "def _parse_mode(self, config):\n        \n        value = re.search(r'switchport mode (\\w+)', config, re.M)\n        return dict(mode=value.group(1))", "docstring": "Scans the specified config and parses the switchport mode value\n\nArgs:\nconfig (str): The interface configuration block to scan\n\nReturns:\ndict: A Python dict object with the value of switchport mode.\nThe dict returned is intended to be merged into the resource\ndict", "source": "juraj-google-style"}
{"code": "def get_path(self, path: str, data: dict) -> Tuple[dict, dict]:\n        \n        path = self._insert_vars(path, data)\n        path = self.BASE_URL + path\n        data = self.cache.check(path)\n        if data:\n            return data\n        self._try_refresh_access_token()\n        r = self.session.get(path)\n        self.cache.set(r)\n        return r.json()", "docstring": "Queries the ESI by an endpoint URL.\n\nThis method is not marked \"private\" as it _can_ be used\nby consuming code, but it's probably easier to call the\n`get_op` method instead.\n\nArgs:\npath: raw ESI URL path\ndata: data to insert into the URL\n\nReturns:\nESI data", "source": "juraj-google-style"}
{"code": "def RecursiveDownload(dir_obj, target_dir, max_depth=10, depth=1, overwrite=False, max_threads=10):\n    if (not isinstance(dir_obj, aff4.AFF4Volume)):\n        return\n    thread_pool = threadpool.ThreadPool.Factory('Downloader', max_threads)\n    thread_pool.Start()\n    for sub_file_entry in dir_obj.OpenChildren():\n        path_elements = [target_dir]\n        sub_target_dir = u'/'.join(path_elements)\n        try:\n            if isinstance(sub_file_entry, aff4.AFF4Stream):\n                args = (sub_file_entry.urn, sub_target_dir, sub_file_entry.token, overwrite)\n                thread_pool.AddTask(target=CopyAFF4ToLocal, args=args, name='Downloader')\n            elif ('Container' in sub_file_entry.behaviours):\n                if (depth >= max_depth):\n                    continue\n                try:\n                    os.makedirs(sub_target_dir)\n                except OSError:\n                    pass\n                RecursiveDownload(sub_file_entry, sub_target_dir, overwrite=overwrite, depth=(depth + 1))\n        except IOError:\n            logging.exception('Unable to download %s', sub_file_entry.urn)\n        finally:\n            sub_file_entry.Close()\n    if (depth <= 1):\n        thread_pool.Stop(join_timeout=THREADPOOL_JOIN_TIMEOUT)", "docstring": "Recursively downloads a file entry to the target path.\n\nArgs:\ndir_obj: An aff4 object that contains children.\ntarget_dir: Full path of the directory to write to.\nmax_depth: Depth to download to. 1 means just the directory itself.\ndepth: Current depth of recursion.\noverwrite: Should we overwrite files that exist.\nmax_threads: Use this many threads to do the downloads.", "source": "codesearchnet"}
{"code": "def load_template(path_or_buffer):\n    from itertools import groupby\n    from operator import itemgetter\n    path_or_buffer = _stringify_path(path_or_buffer)\n    if is_file_like(path_or_buffer):\n        templates = json.load(path_or_buffer)\n    else:\n        with open(path_or_buffer, 'r') as f:\n            templates = json.load(f)\n    options = []\n    grouper = itemgetter('page', 'extraction_method')\n    for (key, grp) in groupby(sorted(templates, key=grouper), grouper):\n        tmp_options = [_convert_template_option(e) for e in grp]\n        if (len(tmp_options) == 1):\n            options.append(tmp_options[0])\n            continue\n        option = tmp_options[0]\n        areas = [e.get('area') for e in tmp_options]\n        option['area'] = areas\n        option['multiple_tables'] = True\n        options.append(option)\n    return options", "docstring": "Build tabula-py option from template file\n\nArgs:\nfile_like_obj: File like object of Tabula app template\n\nReturns:\n`obj`:dict: tabula-py options", "source": "codesearchnet"}
{"code": "def add_metric(self, value, aggregation=None, name=None):\n    if aggregation is not None and aggregation != 'mean':\n        raise ValueError('We currently support only `mean` sample-wise metric aggregation. You provided aggregation=`%s`' % aggregation)\n    from_metric_obj = hasattr(value, '_metric_obj')\n    is_symbolic = tf_utils.is_symbolic_tensor(value)\n    in_call_context = base_layer_utils.call_context().in_call\n    if name is None and (not from_metric_obj):\n        raise ValueError(\"Please provide a name for your metric like `self.add_metric(tf.reduce_sum(inputs), name='mean_activation', aggregation='mean')`\")\n    elif from_metric_obj:\n        name = value._metric_obj.name\n    if in_call_context:\n        self._symbolic_add_metric(value, aggregation, name)\n    else:\n        if not is_symbolic:\n            raise ValueError('Expected a symbolic Tensor for the metric value, received: ' + str(value))\n        if not getattr(self, '_is_graph_network', False):\n            with backend.get_graph().as_default():\n                self._symbolic_add_metric(value, aggregation, name)\n            return\n        if from_metric_obj:\n            raise ValueError('Using the result of calling a `Metric` object when calling `add_metric` on a Functional Model is not supported. Please pass the Tensor to monitor directly.')\n        self._graph_network_add_metric(value, aggregation, name)", "docstring": "Adds metric tensor to the layer.\n\nArgs:\nvalue: Metric tensor.\naggregation: Sample-wise metric reduction function. If `aggregation=None`,\nit indicates that the metric tensor provided has been aggregated\nalready. eg, `bin_acc = BinaryAccuracy(name='acc')` followed by\n`model.add_metric(bin_acc(y_true, y_pred))`. If aggregation='mean', the\ngiven metric tensor will be sample-wise reduced using `mean` function.\neg, `model.add_metric(tf.reduce_sum(outputs), name='output_mean',\naggregation='mean')`.\nname: String metric name.\n\nRaises:\nValueError: If `aggregation` is anything other than None or `mean`.", "source": "github-repos"}
{"code": "def cast(x, dtype):\n    dtype = backend.standardize_dtype(dtype)\n    if any_symbolic_tensors((x,)):\n        return Cast(dtype=dtype)(x)\n    return backend.core.cast(x, dtype)", "docstring": "Cast a tensor to the desired dtype.\n\nArgs:\nx: A tensor or variable.\ndtype: The target type.\n\nReturns:\nA tensor of the specified `dtype`.\n\nExample:\n\n>>> x = keras.ops.arange(4)\n>>> x = keras.ops.cast(x, dtype=\"float16\")", "source": "github-repos"}
{"code": "def get_pipeline_options(project: str, job_name: str, mode: str, num_workers: int=cfg.NUM_WORKERS, streaming: bool=True) -> PipelineOptions:\n    job_name = f'{job_name}-{datetime.now().strftime('%Y%m%d%H%M%S')}'\n    staging_bucket = f'gs:\n    dataflow_options = {'runner': 'DirectRunner' if mode == 'local' else 'DataflowRunner', 'job_name': job_name, 'project': project, 'region': 'us-central1', 'staging_location': f'{staging_bucket}/dflow-staging', 'temp_location': f'{staging_bucket}/dflow-temp', 'setup_file': './setup.py', 'streaming': streaming}\n    if num_workers:\n        dataflow_options.update({'num_workers': num_workers})\n    return PipelineOptions(flags=[], **dataflow_options)", "docstring": "Function to retrieve the pipeline options.\nArgs:\nproject: GCP project to run on\nmode: Indicator to run local, cloud or template\nnum_workers: Number of Workers for running the job parallely\nmax_num_workers: Maximum number of workers running the job parallely\nReturns:\nDataflow pipeline options", "source": "github-repos"}
{"code": "def _send_trace(self, chunk=None):\n    self._trace_sm_running = True\n    if (chunk is None):\n        chunk = self._next_tracing_chunk(20)\n    if ((chunk is None) or (len(chunk) == 0)):\n        self._trace_sm_running = False\n        return\n    try:\n        self._send_notification(TracingChar.value_handle, chunk)\n        self._defer(self._send_trace)\n    except bable_interface.BaBLEException as err:\n        if (err.packet.status == 'Rejected'):\n            time.sleep(0.05)\n            self._defer(self._send_trace, [chunk])\n        else:\n            self._audit('ErrorStreamingTrace')\n            self._logger.exception('Error while tracing data')", "docstring": "Stream tracing data to the ble client in 20 byte chunks\n\nArgs:\nchunk (bytearray): A chunk that should be sent instead of requesting a\nnew chunk from the pending reports.", "source": "codesearchnet"}
{"code": "def check_video_availability(request, video_id):\n    api = Api()\n    api.authenticate()\n    availability = api.check_upload_status(video_id)\n    if (availability is not True):\n        data = {'success': False}\n    else:\n        data = {'success': True}\n    return HttpResponse(json.dumps(data), content_type='application/json')", "docstring": "Controls the availability of the video. Newly uploaded videos are in processing stage.\nAnd others might be rejected.\n\nReturns:\njson response", "source": "codesearchnet"}
{"code": "def __init__(self, counter_factory, state_sampler, declaring_step, input_index):\n    super().__init__(counter_factory, state_sampler)\n    self.declaring_step = declaring_step\n    self.input_index = input_index\n    self.update_current_step()", "docstring": "Create a side input read counter.\n\nArgs:\ncounter_factory: A counters.CounterFactory to create byte counters.\nstate_sampler: A statesampler.StateSampler to transition into read states.\ndeclaring_step: A string with the step name of the step that directly\nreceives the side input initially.\ninput_index: The index of the side input in the list of inputs of the\ndeclaring step.\n\nThe side input is uniquely identified by (declaring_step, input_index);\nwhere declaring_step is the step that receives the PCollectionView as a\nside input, and input_index is the index of the PCollectionView within\nthe list of inputs.", "source": "github-repos"}
{"code": "def _eval_comparison(self, ident: tuple[str, int | slice | None], op: str, value: str | int | tuple[int, ...]) -> bool:\n    name, key = ident\n    if name == 'sys.version_info':\n        if key is None:\n            key = slice(None, None, None)\n        if isinstance(key, int) and (not isinstance(value, int)):\n            raise _ParseError('an element of sys.version_info must be compared to an integer')\n        if isinstance(key, slice) and (not _is_int_tuple(value)):\n            raise _ParseError('sys.version_info must be compared to a tuple of integers')\n        try:\n            actual = self._options.python_version[key]\n        except IndexError as e:\n            raise _ParseError(str(e)) from e\n        if isinstance(key, slice):\n            actual = _three_tuple(actual)\n            value = _three_tuple(value)\n    elif name == 'sys.platform':\n        if not isinstance(value, str):\n            raise _ParseError('sys.platform must be compared to a string')\n        valid_cmps = (cmp_slots.EQ, cmp_slots.NE)\n        if op not in valid_cmps:\n            raise _ParseError('sys.platform must be compared using %s or %s' % valid_cmps)\n        actual = self._options.platform\n    else:\n        raise _ParseError(f'Unsupported condition: {name!r}.')\n    return cmp_slots.COMPARES[op](actual, value)", "docstring": "Evaluate a comparison and return a bool.\n\nArgs:\nident: A tuple of a dotted name string and an optional __getitem__ key.\nop: One of the comparison operator strings in cmp_slots.COMPARES.\nvalue: The value to be compared against.\n\nReturns:\nThe boolean result of the comparison.\n\nRaises:\nParseError: If the comparison cannot be evaluated.", "source": "github-repos"}
{"code": "def _get_two_lines(f):\n    \n    l0 = f.readline()\n    l1 = f.readline()\n    return l0, l1", "docstring": "Get the first and second lines\nArgs:\nf (filelike): File that is opened for ascii.\n\nReturns:\nbytes", "source": "juraj-google-style"}
{"code": "def concurrent_exec(func, param_list):\n    with concurrent.futures.ThreadPoolExecutor(max_workers=30) as executor:\n        future_to_params = {executor.submit(func, *p): p for p in param_list}\n        return_vals = []\n        for future in concurrent.futures.as_completed(future_to_params):\n            params = future_to_params[future]\n            try:\n                return_vals.append(future.result())\n            except Exception as exc:\n                logging.exception('{} generated an exception: {}'.format(params, traceback.format_exc()))\n                return_vals.append(exc)\n        return return_vals", "docstring": "Executes a function with different parameters pseudo-concurrently.\n\nThis is basically a map function. Each element (should be an iterable) in\nthe param_list is unpacked and passed into the function. Due to Python's\nGIL, there's no true concurrency. This is suited for IO-bound tasks.\n\nArgs:\nfunc: The function that parforms a task.\nparam_list: A list of iterables, each being a set of params to be\npassed into the function.\n\nReturns:\nA list of return values from each function execution. If an execution\ncaused an exception, the exception object will be the corresponding\nresult.", "source": "codesearchnet"}
{"code": "def set_channel_created(self, channel_link, channel_id):\n        \n        self.channel_link = channel_link\n        self.channel_id = channel_id\n        self.__record_progress(Status.PUBLISH_CHANNEL if config.PUBLISH else Status.DONE)", "docstring": "set_channel_created: records progress after creating channel on Kolibri Studio\nArgs:\nchannel_link (str): link to uploaded channel\nchannel_id (str): id of channel that has been uploaded\nReturns: None", "source": "juraj-google-style"}
{"code": "def validate(cls, mapper_spec):\n    \n    writer_spec = cls.get_params(mapper_spec, allow_old=False)\n\n    \n    if cls.BUCKET_NAME_PARAM not in writer_spec:\n      raise errors.BadWriterParamsError(\n          \"%s is required for Google Cloud Storage\" %\n          cls.BUCKET_NAME_PARAM)\n    try:\n      cloudstorage.validate_bucket_name(\n          writer_spec[cls.BUCKET_NAME_PARAM])\n    except ValueError, error:\n      raise errors.BadWriterParamsError(\"Bad bucket name, %s\" % (error))\n\n    \n    cls._generate_filename(writer_spec, \"name\", \"id\", 0)\n    cls._generate_filename(writer_spec, \"name\", \"id\", 0, 1, 0)", "docstring": "Validate mapper specification.\n\nArgs:\nmapper_spec: an instance of model.MapperSpec.\n\nRaises:\nBadWriterParamsError: if the specification is invalid for any reason such\nas missing the bucket name or providing an invalid bucket name.", "source": "juraj-google-style"}
{"code": "def get_cqz(self, callsign, timestamp=timestamp_now):\n    return self.get_all(callsign, timestamp)[const.CQZ]", "docstring": "Returns CQ Zone of a callsign\n\nArgs:\ncallsign (str): Amateur Radio callsign\ntimestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)\n\nReturns:\nint: containing the callsign's CQ Zone\n\nRaises:\nKeyError: no CQ Zone found for callsign", "source": "codesearchnet"}
{"code": "def get_best_blockhash(self, id=None, endpoint=None):\n        \n        return self._call_endpoint(GET_BEST_BLOCK_HASH, id=id, endpoint=endpoint)", "docstring": "Get the hash of the highest block\nArgs:\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"}
{"code": "def targets(self, module):\n    if (module not in self.module_targets):\n        raise BuildError('Could not find module in targets()', module=module)\n    return [self.find(x, module) for x in self.module_targets[module]]", "docstring": "Find the targets for a given module.\n\nReturns:\nlist: A sequence of all of the targets for the specified module.", "source": "codesearchnet"}
{"code": "def debug(text):\n    frame = inspect.currentframe().f_back\n    module = frame.f_globals['__name__']\n    func = frame.f_code.co_name\n    msg = ('%s.%s: %s' % (module, func, text))\n    _LOGGER.debug(msg)", "docstring": "Log a message to syslog and stderr\n\nArgs:\ntext (str): The string object to print", "source": "codesearchnet"}
{"code": "def reminders_info(self, *, reminder: str, **kwargs) -> SlackResponse:\n    self._validate_xoxp_token()\n    kwargs.update({'reminder': reminder})\n    return self.api_call('reminders.info', http_verb='GET', params=kwargs)", "docstring": "Gets information about a reminder.\n\nArgs:\nreminder (str): The ID of the reminder. e.g. 'Rm12345678'", "source": "codesearchnet"}
{"code": "def ParseContactRow(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = TwitterAndroidContactEventData()\n    event_data.query = query\n    event_data.identifier = self._GetRowValue(query_hash, row, '_id')\n    event_data.user_identifier = self._GetRowValue(query_hash, row, 'user_id')\n    event_data.username = self._GetRowValue(query_hash, row, 'username')\n    event_data.name = self._GetRowValue(query_hash, row, 'name')\n    event_data.description = self._GetRowValue(query_hash, row, 'description')\n    event_data.web_url = self._GetRowValue(query_hash, row, 'web_url')\n    event_data.location = self._GetRowValue(query_hash, row, 'location')\n    event_data.followers = self._GetRowValue(query_hash, row, 'followers')\n    event_data.friends = self._GetRowValue(query_hash, row, 'friends')\n    event_data.statuses = self._GetRowValue(query_hash, row, 'statuses')\n    event_data.image_url = self._GetRowValue(query_hash, row, 'image_url')\n\n    timestamp = self._GetRowValue(query_hash, row, 'profile_created')\n    if timestamp:\n      date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_CREATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'updated')\n    if timestamp:\n      date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_UPDATE)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'friendship_time')\n    if timestamp:\n      date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a status row from the database.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from query.", "source": "juraj-google-style"}
{"code": "def load_text(self, text, tokenizer=None):\n    if tokenizer:\n        words = [x.lower() for x in tokenizer(text)]\n    else:\n        words = self.tokenize(text)\n    self._dictionary.update(words)\n    self._update_dictionary()", "docstring": "Load text from which to generate a word frequency list\n\nArgs:\ntext (str): The text to be loaded\ntokenizer (function): The function to use to tokenize a string", "source": "codesearchnet"}
{"code": "def configure_vrf(self, vrf_name, commands):\n    commands = make_iterable(commands)\n    commands.insert(0, ('vrf definition %s' % vrf_name))\n    return self.configure(commands)", "docstring": "Configures the specified VRF using commands\n\nArgs:\nvrf_name (str): The VRF name to configure\ncommands: The list of commands to configure\n\nReturns:\nTrue if the commands completed successfully", "source": "codesearchnet"}
{"code": "def deserialize(proto):\n    _, type_registrations = _REVIVED_TYPE_REGISTRY.get(proto.identifier, (None, None))\n    if type_registrations is not None:\n        for type_registration in type_registrations:\n            if type_registration.should_load(proto):\n                return (type_registration.from_proto(proto), type_registration.setter)\n    return None", "docstring": "Create a trackable object from a SavedUserObject proto.\n\nArgs:\nproto: A SavedUserObject to deserialize.\n\nReturns:\nA tuple of (trackable, assignment_fn) where assignment_fn has the same\nsignature as setattr and should be used to add dependencies to\n`trackable` when they are available.", "source": "github-repos"}
{"code": "def do_post(endpoint, body, access_token):\n    \n    headers = {\"content-type\": \"application/json\", \"Authorization\": 'Bearer ' + access_token}\n    headers['User-Agent'] = get_user_agent()\n    return requests.post(endpoint, data=body, headers=headers)", "docstring": "Do an HTTP POST request and return JSON.\n\nArgs:\nendpoint (str): Azure Resource Manager management endpoint.\nbody (str): JSON body of information to post.\naccess_token (str): A valid Azure authentication token.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def _find_human_readable_labels(synsets, synset_to_human):\n    humans = []\n    for s in synsets:\n        assert (s in synset_to_human), ('Failed to find: %s' % s)\n        humans.append(synset_to_human[s])\n    return humans", "docstring": "Build a list of human-readable labels.\n\nArgs:\nsynsets: list of strings; each string is a unique WordNet ID.\nsynset_to_human: dict of synset to human labels, e.g.,\n'n02119022' --> 'red fox, Vulpes vulpes'\n\nReturns:\nList of human-readable strings corresponding to each synset.", "source": "codesearchnet"}
{"code": "def finish(self, exitcode):\n    self._queue.put(self.Finish(exitcode))\n    self._thread.join()", "docstring": "Cleans up.\n\nAnything pushed after finish will be dropped.\n\nArgs:\nexitcode: The exitcode of the watched process.", "source": "codesearchnet"}
{"code": "def set_current(self, current):\n        \n        self.current = current\n        self.input = current.input\n        \n        \n        self.output = current.output\n        self.cmd = current.task_data['cmd']\n\n        if self.cmd and NEXT_CMD_SPLITTER in self.cmd:\n            self.cmd, self.next_cmd = self.cmd.split(NEXT_CMD_SPLITTER)\n        else:\n            self.next_cmd = None", "docstring": "Creates some aliases for attributes of ``current``.\n\nArgs:\ncurrent: :attr:`~zengine.engine.WFCurrent` object.", "source": "juraj-google-style"}
{"code": "def to_cmd_args(mapping):  \n    \n\n    sorted_keys = sorted(mapping.keys())\n\n    def arg_name(obj):\n        string = _decode(obj)\n        if string:\n            return u'--%s' % string if len(string) > 1 else u'-%s' % string\n        else:\n            return u''\n\n    arg_names = [arg_name(argument) for argument in sorted_keys]\n\n    def arg_value(value):\n        if hasattr(value, 'items'):\n            map_items = ['%s=%s' % (k, v) for k, v in sorted(value.items())]\n            return ','.join(map_items)\n        return _decode(value)\n\n    arg_values = [arg_value(mapping[key]) for key in sorted_keys]\n\n    items = zip(arg_names, arg_values)\n\n    return [item for item in itertools.chain.from_iterable(items)]", "docstring": "Transform a dictionary in a list of cmd arguments.\nExample:\n>>>args = mapping.to_cmd_args({'model_dir': '/opt/ml/model', 'batch_size': 25})\n>>>\n>>>print(args)\n['--model_dir', '/opt/ml/model', '--batch_size', 25]\nArgs:\nmapping (dict[str, object]): A Python mapping.\nReturns:\n(list): List of cmd arguments", "source": "juraj-google-style"}
{"code": "class TimmWrapperImageProcessor(BaseImageProcessor):\n    main_input_name = 'pixel_values'\n\n    def __init__(self, pretrained_cfg: Dict[str, Any], architecture: Optional[str]=None, **kwargs):\n        requires_backends(self, 'timm')\n        super().__init__(architecture=architecture)\n        self.data_config = timm.data.resolve_data_config(pretrained_cfg, model=None, verbose=False)\n        self.val_transforms = timm.data.create_transform(**self.data_config, is_training=False)\n        self.train_transforms = timm.data.create_transform(**self.data_config, is_training=True)\n        self._not_supports_tensor_input = any((transform.__class__.__name__ == 'ToTensor' for transform in self.val_transforms.transforms))\n\n    def to_dict(self) -> Dict[str, Any]:\n        \n        output = super().to_dict()\n        output.pop('train_transforms', None)\n        output.pop('val_transforms', None)\n        output.pop('_not_supports_tensor_input', None)\n        return output\n\n    @classmethod\n    def get_image_processor_dict(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n        \n        image_processor_filename = kwargs.pop('image_processor_filename', 'config.json')\n        return super().get_image_processor_dict(pretrained_model_name_or_path, image_processor_filename=image_processor_filename, **kwargs)\n\n    def preprocess(self, images: ImageInput, return_tensors: Optional[Union[str, TensorType]]='pt') -> BatchFeature:\n        \n        if return_tensors != 'pt':\n            raise ValueError(f\"return_tensors for TimmWrapperImageProcessor must be 'pt', but got {return_tensors}\")\n        if self._not_supports_tensor_input and isinstance(images, torch.Tensor):\n            images = images.cpu().numpy()\n        if isinstance(images, torch.Tensor):\n            images = self.val_transforms(images)\n            images = images.unsqueeze(0) if images.ndim == 3 else images\n        else:\n            images = make_list_of_images(images)\n            images = [to_pil_image(image) for image in images]\n            images = torch.stack([self.val_transforms(image) for image in images])\n        return BatchFeature({'pixel_values': images}, tensor_type=return_tensors)\n\n    def save_pretrained(self, *args, **kwargs):\n        logger.warning_once('The `save_pretrained` method is disabled for TimmWrapperImageProcessor. The image processor configuration is saved directly in `config.json` when `save_pretrained` is called for saving the model.')", "docstring": "Wrapper class for timm models to be used within transformers.\n\nArgs:\npretrained_cfg (`Dict[str, Any]`):\nThe configuration of the pretrained model used to resolve evaluation and\ntraining transforms.\narchitecture (`Optional[str]`, *optional*):\nName of the architecture of the model.", "source": "github-repos"}
{"code": "def from_lengths_and_angles(abc: List[float], ang: List[float]):\n        \n        return Lattice.from_parameters(abc[0], abc[1], abc[2], ang[0], ang[1], ang[2])", "docstring": "Create a Lattice using unit cell lengths and angles (in degrees).\n\nArgs:\nabc (3x1 array): Lattice parameters, e.g. (4, 4, 5).\nang (3x1 array): Lattice angles in degrees, e.g., (90,90,120).\n\nReturns:\nA Lattice with the specified lattice parameters.", "source": "juraj-google-style"}
{"code": "def find_sanitiser_nodes(sanitiser, sanitisers_in_file):\n    for sanitiser_tuple in sanitisers_in_file:\n        if (sanitiser == sanitiser_tuple.trigger_word):\n            (yield sanitiser_tuple.cfg_node)", "docstring": "Find nodes containing a particular sanitiser.\n\nArgs:\nsanitiser(string): sanitiser to look for.\nsanitisers_in_file(list[Node]): list of CFG nodes with the sanitiser.\n\nReturns:\nIterable of sanitiser nodes.", "source": "codesearchnet"}
{"code": "def load(self, data):\n        \n        resp = self.client.api.load_image(data)\n        images = []\n        for chunk in resp:\n            if 'stream' in chunk:\n                match = re.search(\n                    r'(^Loaded image ID: |^Loaded image: )(.+)$',\n                    chunk['stream']\n                )\n                if match:\n                    image_id = match.group(2)\n                    images.append(image_id)\n            if 'error' in chunk:\n                raise ImageLoadError(chunk['error'])\n\n        return [self.get(i) for i in images]", "docstring": "Load an image that was previously saved using\n:py:meth:`~docker.models.images.Image.save` (or ``docker save``).\nSimilar to ``docker load``.\n\nArgs:\ndata (binary): Image data to be loaded.\n\nReturns:\n(list of :py:class:`Image`): The images.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def _parse(self, stream):\n        \n        builddata = json.load(stream)\n        log.debug('This is a JSON build file.')\n\n        if 'targets' not in builddata:\n            log.warn('Warning: No targets defined here.')\n            return\n\n        for tdata in builddata['targets']:\n            \n            target = address.new(target=tdata.pop('name'),\n                                 repo=self.target.repo,\n                                 path=self.target.path)\n            \n            if target in self.node and 'target_obj' in self.node[target]:\n                raise error.ButcherError(\n                    'Target is defined more than once: %s', target)\n\n            rule_obj = targets.new(name=target,\n                                   ruletype=tdata.pop('type'),\n                                   **tdata)\n\n            log.debug('New target: %s', target)\n            self.add_node(target, {'target_obj': rule_obj})\n\n            \n            for dep in rule_obj.composed_deps() or []:\n                d_target = address.new(dep)\n                if not d_target.repo:  \n                    d_target.repo = self.target.repo\n                if d_target.repo == self.target.repo and not d_target.path:\n                    d_target.path = self.target.path\n                if d_target not in self.nodes():\n                    self.add_node(d_target)\n                log.debug('New dep: %s -> %s', target, d_target)\n                self.add_edge(target, d_target)", "docstring": "Parse a JSON BUILD file.\n\nArgs:\nbuilddata: dictionary of buildfile data\nreponame: name of the repo that it came from\npath: directory path within the repo", "source": "juraj-google-style"}
{"code": "def _preprocess_conv3d_input(x, data_format):\n    tf_data_format = 'NDHWC'\n    if data_format == 'channels_first':\n        if not _has_nchw_support():\n            x = array_ops.transpose(x, (0, 2, 3, 4, 1))\n        else:\n            tf_data_format = 'NCDHW'\n    return (x, tf_data_format)", "docstring": "Transpose and cast the input before the conv3d.\n\nArgs:\nx: input tensor.\ndata_format: string, `\"channels_last\"` or `\"channels_first\"`.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def get(self):\n    raise NotImplementedError()", "docstring": "Get the current tracking value.\n\nReturns:\nThe current tracked value, the type of which depends on the specific\ntracker implementation.", "source": "github-repos"}
{"code": "def run(self):\n    qclog = open(self.qclog_file, 'w')\n    p = subprocess.Popen(self.current_command, stdout=qclog)\n    return p", "docstring": "Perform the actual QChem run.\n\nReturns:\n(subprocess.Popen) Used for monitoring.", "source": "codesearchnet"}
{"code": "def num_nodes(self, leaves=True, internal=True):\n        \n        if not isinstance(leaves, bool):\n            raise TypeError(\"leaves must be a bool\")\n        if not isinstance(internal, bool):\n            raise TypeError(\"internal must be a bool\")\n        num = 0\n        for node in self.traverse_preorder():\n            if (leaves and node.is_leaf()) or (internal and not node.is_leaf()):\n                num += 1\n        return num", "docstring": "Compute the total number of selected nodes in this ``Tree``\n\nArgs:\n``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``\n\nReturns:\n``int``: The total number of selected nodes in this ``Tree``", "source": "juraj-google-style"}
{"code": "def plot_dendrogram(ax, obj, show_diameters=True):\n    dnd = Dendrogram(obj, show_diameters=show_diameters)\n    dnd.generate()\n    _render_dendrogram(dnd, ax, 0.0)\n    ax.set_title('Morphology Dendrogram')\n    ax.set_xlabel('micrometers (um)')\n    ax.set_ylabel('micrometers (um)')\n    ax.set_aspect('auto')\n    ax.legend()", "docstring": "Dendrogram of `obj`\n\nArgs:\nobj: Neuron or tree \\\nneurom.Neuron, neurom.Tree\nshow_diameters : boolean \\\nDetermines if node diameters will \\\nbe show or not.", "source": "codesearchnet"}
{"code": "def handle_error(self, error, download_request):\n        \n        if hasattr(error, \"errno\") and error.errno == errno.EACCES:\n            self.handle_certificate_problem(str(error))\n        else:\n            self.handle_general_download_error(str(error), download_request)", "docstring": "Checks what error occured and looks for an appropriate solution.\n\nArgs:\nerror: Exception\nThe error that has occured.\ndownload_request:\nThe request which resulted in the error.", "source": "juraj-google-style"}
{"code": "def source(self, value=None):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type str '\n                                 'for field `source`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `source`')\n\n        self._source = value", "docstring": "Corresponds to IDD Field `source`\n\nArgs:\nvalue (str): value for IDD Field `source`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def GetPasswdMap(self, since=None):\n    return PasswdUpdateGetter().GetUpdates(self._GetClient(), self.conf['bucket'], self.conf['passwd_object'], since)", "docstring": "Return the passwd map from this source.\n\nArgs:\nsince: Get data only changed since this timestamp (inclusive) or None\nfor all data.\n\nReturns:\ninstance of passwd.PasswdMap", "source": "github-repos"}
{"code": "def sample_mgrid(self, mgrid: np.array) -> np.array:\n    mgrid = np.ascontiguousarray(mgrid, np.float32)\n    if (mgrid.shape[0] != self.dimensions):\n        raise ValueError(('mgrid.shape[0] must equal self.dimensions, %r[0] != %r' % (mgrid.shape, self.dimensions)))\n    out = np.ndarray(mgrid.shape[1:], np.float32)\n    if (mgrid.shape[1:] != out.shape):\n        raise ValueError(('mgrid.shape[1:] must equal out.shape, %r[1:] != %r' % (mgrid.shape, out.shape)))\n    lib.NoiseSampleMeshGrid(self._tdl_noise_c, out.size, ffi.cast('float*', mgrid.ctypes.data), ffi.cast('float*', out.ctypes.data))\n    return out", "docstring": "Sample a mesh-grid array and return the result.\n\nThe :any:`sample_ogrid` method performs better as there is a lot of\noverhead when working with large mesh-grids.\n\nArgs:\nmgrid (numpy.ndarray): A mesh-grid array of points to sample.\nA contiguous array of type `numpy.float32` is preferred.\n\nReturns:\nnumpy.ndarray: An array of sampled points.\n\nThis array has the shape: ``mgrid.shape[:-1]``.\nThe ``dtype`` is `numpy.float32`.", "source": "codesearchnet"}
{"code": "def angle_3points(p0, p1, p2):\n    vec1 = vector(p1, p0)\n    vec2 = vector(p2, p0)\n    return math.atan2(np.linalg.norm(np.cross(vec1, vec2)), np.dot(vec1, vec2))", "docstring": "compute the angle in radians between three 3D points\n\nCalculated as the angle between p1-p0 and p2-p0.\n\nArgs:\np0, p1, p2:  indexable objects with\nindices 0, 1, 2 corresponding to 3D cartesian coordinates.\n\nReturns:\nAngle in radians between (p1-p0) and (p2-p0).\n0.0 if p0==p1 or p0==p2.", "source": "codesearchnet"}
{"code": "def print_tree_deps_of(module, all_edges=None):\n    if all_edges is None:\n        all_edges = create_reverse_dependency_tree()\n    tree = get_tree_starting_at(module, all_edges)\n    lines = [(tree[0], tree[0])]\n    for index in range(1, len(tree)):\n        edges = tree[index]\n        start_edges = {edge[0] for edge in edges}\n        for start in start_edges:\n            end_edges = {edge[1] for edge in edges if edge[0] == start}\n            pos = 0\n            while lines[pos][1] != start:\n                pos += 1\n            lines = lines[:pos + 1] + [(' ' * (2 * index) + end, end) for end in end_edges] + lines[pos + 1:]\n    for line in lines:\n        print(line[0])", "docstring": "Prints the tree of modules depending on a given module.\n\nArgs:\nmodule (`str`): The module that will be the root of the subtree we want.\nall_eges (`List[Tuple[str, str]]`, *optional*):\nThe list of all edges of the tree. Will be set to `create_reverse_dependency_tree()` if not passed.", "source": "github-repos"}
{"code": "def trade_day(dt, cal='US'):\n    \n    from xone import calendar\n\n    dt = pd.Timestamp(dt).date()\n    return calendar.trading_dates(start=dt - pd.Timedelta('10D'), end=dt, calendar=cal)[-1]", "docstring": "Latest trading day w.r.t given dt\n\nArgs:\ndt: date of reference\ncal: trading calendar\n\nReturns:\npd.Timestamp: last trading day\n\nExamples:\n>>> trade_day('2018-12-25').strftime('%Y-%m-%d')\n'2018-12-24'", "source": "juraj-google-style"}
{"code": "def set_source_interface(self, name):\n    cmd = self.command_builder('ntp source', value=name)\n    return self.configure(cmd)", "docstring": "Assign the NTP source on the node\n\nArgs:\nname (string): The interface port that specifies the NTP source.\n\nReturns:\nTrue if the operation succeeds, otherwise False.", "source": "codesearchnet"}
{"code": "def _inter_manager_operations(self, other, how_to_join, func):\n        \n        reindexed_self, reindexed_other_list, joined_index = self.copartition(\n            0, other, how_to_join, False\n        )\n        \n        reindexed_other = reindexed_other_list[0]\n        new_columns = self._join_index_objects(\n            0, other.columns, how_to_join, sort=False\n        )\n        \n        \n        \n        \n        self_cols = self.columns\n        other_cols = other.columns\n\n        def inter_data_op_builder(left, right, func):\n            left.columns = self_cols\n            right.columns = other_cols\n            \n            \n            left.index = pandas.RangeIndex(len(left.index))\n            right.index = pandas.RangeIndex(len(right.index))\n            result = func(left, right)\n            result.columns = pandas.RangeIndex(len(result.columns))\n            return result\n\n        new_data = reindexed_self.inter_data_operation(\n            1, lambda l, r: inter_data_op_builder(l, r, func), reindexed_other\n        )\n        return self.__constructor__(new_data, joined_index, new_columns)", "docstring": "Inter-data operations (e.g. add, sub).\n\nArgs:\nother: The other Manager for the operation.\nhow_to_join: The type of join to join to make (e.g. right, outer).\n\nReturns:\nNew DataManager with new data and index.", "source": "juraj-google-style"}
{"code": "def GetArtifactCollectorArgs(flow_args, knowledge_base):\n  \n  args = rdf_artifacts.ClientArtifactCollectorArgs()\n  args.knowledge_base = knowledge_base\n\n  args.apply_parsers = flow_args.apply_parsers\n  args.ignore_interpolation_errors = flow_args.ignore_interpolation_errors\n  args.max_file_size = flow_args.max_file_size\n  args.use_tsk = flow_args.use_tsk\n\n  if not flow_args.recollect_knowledge_base:\n    artifact_names = flow_args.artifact_list\n  else:\n    artifact_names = GetArtifactsForCollection(knowledge_base.os,\n                                               flow_args.artifact_list)\n\n  expander = ArtifactExpander(knowledge_base, flow_args.path_type,\n                              flow_args.max_file_size)\n  for artifact_name in artifact_names:\n    rdf_artifact = artifact_registry.REGISTRY.GetArtifact(artifact_name)\n    if not MeetsConditions(knowledge_base, rdf_artifact):\n      continue\n    if artifact_name in expander.processed_artifacts:\n      continue\n    requested_by_user = artifact_name in flow_args.artifact_list\n    for expanded_artifact in expander.Expand(rdf_artifact, requested_by_user):\n      args.artifacts.append(expanded_artifact)\n  return args", "docstring": "Prepare bundle of artifacts and their dependencies for the client.\n\nArgs:\nflow_args: An `ArtifactCollectorFlowArgs` instance.\nknowledge_base: contains information about the client\n\nReturns:\nrdf value object containing a list of extended artifacts and the\nknowledge base", "source": "juraj-google-style"}
{"code": "def combine_columns(columns):\n    columns_zipped = itertools.zip_longest(*columns)\n    return ''.join((x for zipped in columns_zipped for x in zipped if x))", "docstring": "Combine ``columns`` into a single string.\n\nExample:\n>>> combine_columns(['eape', 'xml'])\n'example'\n\nArgs:\ncolumns (iterable): ordered columns to combine\n\nReturns:\nString of combined columns", "source": "codesearchnet"}
{"code": "def at(self, instant):\n        \n\n        for event in self:\n            if event.begin <= instant <= event.end:\n                yield event", "docstring": "Iterates (in chronological order) over all events that are occuring during `instant`.\n\nArgs:\ninstant (Arrow object)", "source": "juraj-google-style"}
{"code": "def list_experiments(self, collection_name):\n        \n        exp = ExperimentResource(\n            name='', collection_name=collection_name, coord_frame='foo')\n        return self._list_resource(exp)", "docstring": "List all experiments that belong to a collection.\n\nArgs:\ncollection_name (string): Name of the parent collection.\n\nReturns:\n(list)\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def var(x, axis=None, keepdims=False):\n    if x.dtype.base_dtype == dtypes_module.bool:\n        x = math_ops.cast(x, floatx())\n    return math_ops.reduce_variance(x, axis=axis, keepdims=keepdims)", "docstring": "Variance of a tensor, alongside the specified axis.\n\nArgs:\nx: A tensor or variable.\naxis: An integer, the axis to compute the variance.\nkeepdims: A boolean, whether to keep the dimensions or not.\nIf `keepdims` is `False`, the rank of the tensor is reduced\nby 1. If `keepdims` is `True`,\nthe reduced dimension is retained with length 1.\n\nReturns:\nA tensor with the variance of elements of `x`.", "source": "github-repos"}
{"code": "def make_lda_variational(activation, num_topics, layer_sizes):\n    encoder_net = tf.keras.Sequential()\n    for num_hidden_units in layer_sizes:\n        encoder_net.add(tf.keras.layers.Dense(num_hidden_units, activation=activation, kernel_initializer=tf.compat.v1.glorot_normal_initializer()))\n    encoder_net.add(tf.keras.layers.Dense(num_topics, activation=tf.nn.softplus, kernel_initializer=tf.compat.v1.glorot_normal_initializer()))\n\n    def lda_variational(bag_of_words):\n        concentration = _clip_dirichlet_parameters(encoder_net(bag_of_words))\n        return ed.Dirichlet(concentration=concentration, name='topics_posterior')\n    return lda_variational", "docstring": "Creates the variational distribution for LDA.\n\nArgs:\nactivation: Activation function to use.\nnum_topics: The number of topics.\nlayer_sizes: The number of hidden units per layer in the encoder.\n\nReturns:\nlda_variational: A function that takes a bag-of-words Tensor as\ninput and returns a distribution over topics.", "source": "codesearchnet"}
{"code": "def get_nested_dmaps(dmap):\n    if (not isinstance(dmap, DynamicMap)):\n        return []\n    dmaps = [dmap]\n    for o in dmap.callback.inputs:\n        dmaps.extend(get_nested_dmaps(o))\n    return list(set(dmaps))", "docstring": "Recurses DynamicMap to find DynamicMaps inputs\n\nArgs:\ndmap: DynamicMap to recurse to look for DynamicMap inputs\n\nReturns:\nList of DynamicMap instances that were found", "source": "codesearchnet"}
{"code": "def custom_line_color_map(self, values):\n        \n        if not isinstance(values, list):\n            raise TypeError(\"custom_line_color_map must be a list\")\n\n        self.options[\"custom_line_color_map\"] = values", "docstring": "Set the custom line color map.\n\nArgs:\nvalues (list): list of colors.\n\nRaises:\nTypeError: Custom line color map must be a list.", "source": "juraj-google-style"}
{"code": "def _GetNameFromProduct(self):\n    product = (self.product or '')\n    product = product.split(' ')\n    product_lower_case = [segment.lower() for segment in product]\n    number_of_segments = len(product)\n    if ('windows' in product_lower_case):\n        segment_index = (product_lower_case.index('windows') + 1)\n        if (product_lower_case[segment_index] in ('(r)', 'server')):\n            segment_index += 1\n        suffix_segment_index = (segment_index + 1)\n        if ((suffix_segment_index < number_of_segments) and (product_lower_case[suffix_segment_index] == 'r2')):\n            return 'Windows {0:s} R2'.format(product[segment_index])\n        return 'Windows {0:s}'.format(product[segment_index])\n    return None", "docstring": "Determines the predefined operating system name from the product.\n\nReturns:\nstr: operating system name, such as \"macOS Mojave\" or \"Windows XP\" or\nNone if the name cannot be determined. This value is used to\nprogrammatically link a parser preset to an operating system and\ntherefore must be one of predefined values.", "source": "codesearchnet"}
{"code": "def _apply_gradients_and_copy(self, opt, raw_grad_list, ps_var_grads):\n        \n        \n        with tf.name_scope('apply_gradients'):\n            var_update_ops = []\n            for vid, (g, v) in enumerate(ps_var_grads):\n                \n                apply_gradient_op = opt.apply_gradients([(g, v)])\n                barrier = self._add_sync_queues_and_barrier(\n                    'param_update_barrier_{}'.format(vid), [apply_gradient_op])\n                with tf.control_dependencies([barrier]), \\\n                        tf.device(self.cpu_device):\n                    updated_value = v.read_value()\n                    for towerid in range(self.nr_gpu):\n                        var_update_ops.append(\n                            raw_grad_list[towerid][vid][1].assign(updated_value))\n            return var_update_ops", "docstring": "Apply averaged gradients to ps vars, and then copy the updated\nvariables back to each tower.\n\nArgs:\nraw_grad_list: Ngpu x Nvar x 2 gradient list from all towers\nps_var_grads: Nvar x 2 (grad, ps_var)\n\nReturns:\nlist of copy ops", "source": "juraj-google-style"}
{"code": "def run(self, fetches, feed_dict=None, options=None, run_metadata=None):\n    return self._sess.run(fetches, feed_dict=feed_dict, options=options, run_metadata=run_metadata)", "docstring": "Run ops in the monitored session.\n\nThis method is completely compatible with the `tf.Session.run()` method.\n\nArgs:\nfetches: Same as `tf.Session.run()`.\nfeed_dict: Same as `tf.Session.run()`.\noptions: Same as `tf.Session.run()`.\nrun_metadata: Same as `tf.Session.run()`.\n\nReturns:\nSame as `tf.Session.run()`.", "source": "github-repos"}
{"code": "def get_incomplete_penetrance_genes(hpo_lines):\n    \n    genes = parse_hpo_genes(hpo_lines)\n    incomplete_penetrance_genes = set()\n    for hgnc_symbol in genes:\n        if genes[hgnc_symbol].get('incomplete_penetrance'):\n            incomplete_penetrance_genes.add(hgnc_symbol)\n    return incomplete_penetrance_genes", "docstring": "Get a set with all genes that have incomplete penetrance according to HPO\n\nArgs:\nhpo_lines(iterable(str))\n\nReturns:\nincomplete_penetrance_genes(set): A set with the hgnc symbols of all\ngenes with incomplete penetrance", "source": "juraj-google-style"}
{"code": "def add_menu_item(self, command, title):\n        \n        m_item = Gtk.MenuItem()\n        m_item.set_label(title)\n        m_item.connect('activate', command)\n        self.menu.append(m_item)\n        self.menu.show_all()", "docstring": "Add mouse right click menu item.\nArgs:\ncommand (callable): function that will be called after left mouse click on title\ntitle (str): label that will be shown in menu", "source": "juraj-google-style"}
{"code": "def _find_paths_referenced(self) -> Tuple[Optional[str], Collection[str]]:", "docstring": "Finds paths for any elements referenced in this expression.\n\nRecursively builds paths by visiting each node in the tree. Returns a tuple\nof (context, paths) where `context` is an identifier which may be part of a\ndotted path completed by its parent and `paths` are the dotted paths found\nso far.\n\nImplementations must recursively call this method for all child nodes.\n\nReturns:\nA tuple of (context, paths) as described above.", "source": "github-repos"}
{"code": "def open(self, path, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO) -> BinaryIO:\n    return self._path_open(path, 'rb', mime_type, compression_type)", "docstring": "Returns a read channel for the given file path.\n\nArgs:\npath: string path of the file object to be written to the system\nmime_type: MIME type to specify the type of content in the file object\ncompression_type: Type of compression to be used for this object\n\nReturns: file handle with a close function for the user to use", "source": "github-repos"}
{"code": "def process_input_data(filename, imager, grid_data, grid_norm, grid_weights):\n    ms = oskar.MeasurementSet.open(filename)\n    block_start = 0\n    num_rows = ms.num_rows\n    num_baselines = ((ms.num_stations * (ms.num_stations - 1)) \n    while (block_start < num_rows):\n        block_size = (num_rows - block_start)\n        if (block_size > num_baselines):\n            block_size = num_baselines\n        uvw = ms.read_column('UVW', block_start, block_size)\n        vis_weights = ms.read_column('WEIGHT', block_start, block_size)\n        if (ms.num_pols == 4):\n            vis_weights = (0.5 * (vis_weights[(:, 0)] + vis_weights[(:, 3)]))\n        for j in range(ms.num_channels):\n            coords = ((uvw * (ms.freq_start_hz + (j * ms.freq_inc_hz))) / 299792458.0)\n            vis_data = None\n            if (not imager.coords_only):\n                vis_data = ms.read_vis(block_start, j, 1, block_size)\n                if (ms.num_pols == 4):\n                    vis_data = (0.5 * (vis_data[(0, :, 0)] + vis_data[(0, :, 3)]))\n            grid_norm = imager.update_plane(coords[(:, 0)], coords[(:, 1)], coords[(:, 2)], vis_data, vis_weights, grid_data, grid_norm, grid_weights)\n        block_start += block_size\n    return grid_norm", "docstring": "Reads visibility data from a Measurement Set.\n\nThe visibility grid or weights grid is updated accordingly.\n\nVisibility data are read from disk in blocks of size num_baselines.\n\nArgs:\nfilename (str):                    Name of Measurement Set to open.\nimager (oskar.Imager):             Handle to configured imager.\ngrid_data (numpy.ndarray or None): Visibility grid to populate.\ngrid_norm (float)                  Current grid normalisation.\ngrid_weights (numpy.ndarray):      Weights grid to populate or read.\n\nReturns:\ngrid_norm (float):                 Updated grid normalisation.", "source": "codesearchnet"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, vision_feature_layer: Optional[Union[int, List[int]]]=None, **kwargs):\n    vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer\n    kwargs = {k: v for k, v in kwargs.items() if v is not None}\n    image_outputs = self.vision_tower(pixel_values, image_sizes=image_sizes, output_hidden_states=True, **kwargs)\n    if isinstance(vision_feature_layer, int):\n        selected_image_feature = image_outputs.hidden_states[vision_feature_layer]\n    else:\n        hs_pool = [image_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer]\n        selected_image_feature = torch.cat(hs_pool, dim=-1)\n    image_features = self.multi_modal_projector(selected_image_feature.squeeze(0), image_sizes)\n    downsample_ratio = self.vision_tower.patch_size * self.config.spatial_merge_size\n    split_sizes = [height \n    image_features = torch.split(image_features.squeeze(0), split_sizes)\n    return image_features", "docstring": "Obtains image last hidden states from the vision tower and apply multimodal projection.\n\nArgs:\npixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):\nThe tensors corresponding to the input images.\nvision_feature_layer (`Union[int, List[int]]`, *optional*):\nThe index of the layer to select the vision feature. If multiple indices are provided,\nthe vision feature of the corresponding indices will be concatenated to form the\nvision features.\nimage_sizes (`torch.Tensor`, *optional*):\nTensor containing the image sizes as returned by the processor.\nReturns:\nimage_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).", "source": "github-repos"}
{"code": "def dump_tree(self, statement=None, indent_level=0):\n        \n\n        out = u\"\"\n\n        indent = u\" \"*indent_level\n\n        if statement is None:\n            for root_statement in self.statements:\n                out += self.dump_tree(root_statement, indent_level)\n        else:\n            out += indent + str(statement) + u'\\n'\n\n            if len(statement.children) > 0:\n                for child in statement.children:\n                    out += self.dump_tree(child, indent_level=indent_level+4)\n\n        return out", "docstring": "Dump the AST for this parsed file.\n\nArgs:\nstatement (SensorGraphStatement): the statement to print\nif this function is called recursively.\nindent_level (int): The number of spaces to indent this\nstatement.  Used for recursively printing blocks of\nstatements.\nReturns:\nstr: The AST for this parsed sg file as a nested\ntree with one node per line and blocks indented.", "source": "juraj-google-style"}
{"code": "def screenrecord(self, bit_rate: int = 5000000, time_limit: int = 180, filename: _PATH = '/sdcard/demo.mp4') -> None:\n        \n        self._execute('-s', self.device_sn, 'shell',\n                      'screenrecord', '--bit-rate', str(bit_rate), '--time-limit', str(time_limit), filename)", "docstring": "Recording the display of devices running Android 4.4 (API level 19) and higher.\n\nArgs:\nbit_rate:You can increase the bit rate to improve video quality, but doing so results in larger movie files.\ntime_limit: Sets the maximum recording time, in seconds, and the maximum value is 180 (3 minutes).", "source": "juraj-google-style"}
{"code": "def add(reader, writer, column, start, stop, value):\n    for (i, row) in enumerate(reader):\n        if ((i >= start) and (i <= stop)):\n            row[column] = (type(value)(row[column]) + value)\n        writer.appendRecord(row)", "docstring": "Adds a value over a range of rows.\n\nArgs:\nreader: A FileRecordStream object with input data.\nwriter: A FileRecordStream object to write output data to.\ncolumn: The column of data to modify.\nstart: The first row in the range to modify.\nend: The last row in the range to modify.\nvalue: The value to add.", "source": "codesearchnet"}
{"code": "def _append_commands(dct, module_name, commands):\n    for command in commands:\n        entry_point = '{command}{subcommand} = {module}{callable}'.format(command=command.command, subcommand=(':{}'.format(command.subcommand) if command.subcommand else ''), module=module_name, callable=(':{}'.format(command.callable) if command.callable else ''))\n        dct.setdefault(command.command, set()).add(entry_point)", "docstring": "Append entry point strings representing the given Command objects.\n\nArgs:\ndct: The dictionary to append with entry point strings. Each key will\nbe a primary command with a value containing a list of entry point\nstrings representing a Command.\nmodule_name: The name of the module in which the command object\nresides.\ncommands: A list of Command objects to convert to entry point strings.", "source": "codesearchnet"}
{"code": "def get_last(self, num=10):\n    max_item = self.get_max_item()\n    urls = [urljoin(self.item_url, f'{i}.json') for i in range(((max_item - num) + 1), (max_item + 1))]\n    result = self._run_async(urls=urls)\n    return [Item(r) for r in result if r]", "docstring": "Returns last `num` of HN stories\n\nDownloads all the HN articles and returns them as Item objects\n\nReturns:\n`list` object containing ids of HN stories.", "source": "codesearchnet"}
{"code": "def read_raster(raster_file):\n    ds = gdal_Open(raster_file)\n    band = ds.GetRasterBand(1)\n    data = band.ReadAsArray()\n    xsize = band.XSize\n    ysize = band.YSize\n    nodata_value = band.GetNoDataValue()\n    geotrans = ds.GetGeoTransform()\n    dttype = band.DataType\n    srs = osr_SpatialReference()\n    srs.ImportFromWkt(ds.GetProjection())\n    if (nodata_value is None):\n        nodata_value = DEFAULT_NODATA\n    band = None\n    ds = None\n    return Raster(ysize, xsize, data, nodata_value, geotrans, srs, dttype)", "docstring": "Read raster by GDAL.\n\nArgs:\nraster_file: raster file path.\n\nReturns:\nRaster object.", "source": "codesearchnet"}
{"code": "def list_mapped_classes():\n        \n        cls_dict = {key: value\n                    for key, value in MODULE.rdfclass.__dict__.items()\n                    if not isinstance(value, RdfConfigManager)\n                    and key not in ['properties']\n                    and hasattr(value, 'es_defs')\n                    and value.es_defs.get('kds_esIndex')}\n        new_dict = {}\n        \n        \n        \n        \n        potential_maps = set([cls_.__name__ for cls_ in cls_dict.values()])\n        for name, cls_ in cls_dict.items():\n            parents = set(cls_.hierarchy)\n            if len(parents.intersection(potential_maps)) <= 1:\n                new_dict[name] = cls_\n        return new_dict", "docstring": "Returns all the rdfclasses that have and associated elasticsearch\nmapping\n\nArgs:\nNone", "source": "juraj-google-style"}
{"code": "def parse_topology(ml_log, log=None, ml_version='1.3.4BETA', print_output=False):\n    topology = {'manifold': True, 'non_manifold_E': 0, 'non_manifold_V': 0}\n    with open(ml_log) as fread:\n        for line in fread:\n            if ('V:' in line):\n                vert_edge_face = line.replace('V:', ' ').replace('E:', ' ').replace('F:', ' ').split()\n                topology['vert_num'] = int(vert_edge_face[0])\n                topology['edge_num'] = int(vert_edge_face[1])\n                topology['face_num'] = int(vert_edge_face[2])\n            if ('Unreferenced Vertices' in line):\n                topology['unref_vert_num'] = int(line.split()[2])\n            if ('Boundary Edges' in line):\n                topology['boundry_edge_num'] = int(line.split()[2])\n            if ('Mesh is composed by' in line):\n                topology['part_num'] = int(line.split()[4])\n            if ('non 2-manifold mesh' in line):\n                topology['manifold'] = False\n            if ('non two manifold edges' in line):\n                topology['non_manifold_edge'] = int(line.split()[2])\n            if ('non two manifold vertexes' in line):\n                topology['non_manifold_vert'] = int(line.split()[2])\n            if ('Genus is' in line):\n                topology['genus'] = line.split()[2]\n                if (topology['genus'] != 'undefined'):\n                    topology['genus'] = int(topology['genus'])\n            if ('holes' in line):\n                topology['hole_num'] = line.split()[2]\n                if (topology['hole_num'] == 'a'):\n                    topology['hole_num'] = 'undefined'\n                else:\n                    topology['hole_num'] = int(topology['hole_num'])\n    for (key, value) in topology.items():\n        if (log is not None):\n            log_file = open(log, 'a')\n            log_file.write('{:16} = {}\\n'.format(key, value))\n            log_file.close()\n        elif print_output:\n            print('{:16} = {}'.format(key, value))\n    return topology", "docstring": "Parse the ml_log file generated by the measure_topology function.\n\nArgs:\nml_log (str): MeshLab log file to parse\nlog (str): filename to log output\n\nReturns:\ndict: dictionary with the following keys:\nvert_num (int): number of vertices\nedge_num (int): number of edges\nface_num (int): number of faces\nunref_vert_num (int): number or unreferenced vertices\nboundry_edge_num (int): number of boundary edges\npart_num (int): number of parts (components) in the mesh.\nmanifold (bool): True if mesh is two-manifold, otherwise false.\nnon_manifold_edge (int): number of non_manifold edges.\nnon_manifold_vert (int): number of non-manifold verices\ngenus (int or str): genus of the mesh, either a number or\n'undefined' if the mesh is non-manifold.\nholes (int or str): number of holes in the mesh, either a number\nor 'undefined' if the mesh is non-manifold.", "source": "codesearchnet"}
{"code": "def WriteMap(self, map_data=None, force_write=False):\n    if map_data is None:\n        writable_map = self.data\n    else:\n        writable_map = map_data\n    entries_written = self.Write(writable_map)\n    if entries_written is None:\n        self.log.warning('cache write failed, exiting')\n        return 1\n    if force_write or self.Verify(entries_written):\n        self._Commit()\n        self.WriteIndex()\n        return 0\n    self.log.warning('verification failed, exiting')\n    return 1", "docstring": "Write a map to disk.\n\nArgs:\nmap_data: optional Map object to overwrite our current data with.\nforce_write: optional flag to indicate verification checks can be\nignored.\n\nReturns:\n0 if succesful, 1 if not", "source": "github-repos"}
{"code": "def seek(self, offset, whence=os.SEEK_SET):\n    self._checkClosed()\n    if whence == os.SEEK_SET:\n        self._position = offset\n    elif whence == os.SEEK_CUR:\n        self._position += offset\n    elif whence == os.SEEK_END:\n        self._position = self._downloader.size + offset\n    else:\n        raise ValueError('Whence mode %r is invalid.' % whence)\n    self._position = min(self._position, self._downloader.size)\n    self._position = max(self._position, 0)\n    return self._position", "docstring": "Set the stream's current offset.\n\nNote if the new offset is out of bound, it is adjusted to either 0 or EOF.\n\nArgs:\noffset: seek offset as number.\nwhence: seek mode. Supported modes are os.SEEK_SET (absolute seek),\nos.SEEK_CUR (seek relative to the current position), and os.SEEK_END\n(seek relative to the end, offset should be negative).\n\nRaises:\n``ValueError``: When this stream is closed or if whence is invalid.", "source": "github-repos"}
{"code": "def api_client(connection, client_class=xbahn.api.Client):\n    \n\n    return client_class(\n        link=xbahn.connection.link.Link(\n            \n            receive=connection,\n            \n            send=connection\n        )\n    )", "docstring": "Establishes an API client for one-way communication\nconnection with an API Server\n\nArguments:\n- connection (xbahn.connection.Connection)\n\nKeyword Arguments:\n- client_class (xbahn.api.Client): if supplied use this class to initantiate\nthe client object. If omitted will use xbahn.api.Client.\n\nReturns:\n- client_class: client instance", "source": "juraj-google-style"}
{"code": "def get_max_muO2(self, min_voltage=None, max_voltage=None):\n        \n        data = []\n        for pair in self._select_in_voltage_range(min_voltage, max_voltage):\n            if pair.muO2_discharge is not None:\n                data.extend([d['chempot'] for d in pair.muO2_discharge])\n            if pair.muO2_charge is not None:\n                data.extend([d['chempot'] for d in pair.muO2_discharge])\n        return max(data) if len(data) > 0 else None", "docstring": "Maximum critical oxygen chemical potential along path.\n\nArgs:\nmin_voltage: The minimum allowable voltage.\nmax_voltage: The maximum allowable voltage.\n\nReturns:\nMaximum critical oxygen chemical of all compounds along the\ninsertion path (a subset of the path can be chosen by the optional\narguments).", "source": "juraj-google-style"}
{"code": "def from_attrs(cls, desired_attrs=None, except_attrs=None,\n                   critical_attrs=None):\n        \n        if isinstance(desired_attrs, roids.OID):\n            desired_attrs = set([desired_attrs])\n        if isinstance(except_attrs, roids.OID):\n            except_attrs = set([except_attrs])\n        if isinstance(critical_attrs, roids.OID):\n            critical_attrs = set([critical_attrs])\n\n        if rfc5587 is None:\n            raise NotImplementedError(\"Your GSSAPI implementation does not \"\n                                      \"have support for RFC 5587\")\n\n        mechs = rfc5587.indicate_mechs_by_attrs(desired_attrs,\n                                                except_attrs,\n                                                critical_attrs)\n        return (cls(mech) for mech in mechs)", "docstring": "Get a generator of mechanisms supporting the specified attributes. See\nRFC 5587's :func:`indicate_mechs_by_attrs` for more information.\n\nArgs:\ndesired_attrs ([OID]): Desired attributes\nexcept_attrs ([OID]): Except attributes\ncritical_attrs ([OID]): Critical attributes\n\nReturns:\n[Mechanism]: A set of mechanisms having the desired features.\n\nRaises:\nGSSError\n\n:requires-ext:`rfc5587`", "source": "juraj-google-style"}
{"code": "def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding, expected, use_gpu, v2=False, one_dim=False, use_negative_input=False):\n    for data_format, use_gpu_2 in GetTestConfigs(include_nchw_vect_c=True, one_dimensional=one_dim):\n        if use_gpu_2 == use_gpu:\n            self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding, data_format, expected, use_gpu, v2, use_negative_input)", "docstring": "Verifies the output values of the pooling function.\n\nArgs:\npool_func: Function to be called, co.MaxPool, co.AvgPool,\nor the Lua version.\ninput_sizes: Input tensor dimensions.\nksize: The kernel size dimensions\nstrides: The stride dimensions\npadding: Padding type.\nexpected: An array containing the expected operation outputs.\nuse_gpu: Whether we are running on GPU.\nv2: Whether to use v2 version.\none_dim: If one dimensional pools should be done instead of two\ndimensional pools.\nuse_negative_input: If the input values should be negative.", "source": "github-repos"}
{"code": "def barycentric_coords(coords, simplex):\n    coords = np.atleast_2d(coords)\n    t = (np.transpose(simplex[(:(- 1), :)]) - np.transpose(simplex[((- 1), :)])[(:, None)])\n    all_but_one = np.transpose(np.linalg.solve(t, np.transpose((coords - simplex[(- 1)]))))\n    last_coord = (1 - np.sum(all_but_one, axis=(- 1))[(:, None)])\n    return np.append(all_but_one, last_coord, axis=(- 1))", "docstring": "Converts a list of coordinates to barycentric coordinates, given a\nsimplex with d+1 points. Only works for d >= 2.\n\nArgs:\ncoords: list of n coords to transform, shape should be (n,d)\nsimplex: list of coordinates that form the simplex, shape should be\n(d+1, d)\n\nReturns:\na LIST of barycentric coordinates (even if the original input was 1d)", "source": "codesearchnet"}
{"code": "def compute_verdict(self, results):\n    if (results['class'] in self.reject_classes):\n        threshold = self.reject_classes[results['class']]\n        if (float(results['confidence']) >= threshold):\n            logger.debug('<{0}> Suggesting to reject the message based on DSPAM results: user={1[user]}, class={1[class]}, confidence={1[confidence]}'.format(self.id, results))\n            return self.VERDICT_REJECT\n    if (results['class'] in self.quarantine_classes):\n        threshold = self.quarantine_classes[results['class']]\n        if (float(results['confidence']) >= threshold):\n            logger.debug('<{0}> Suggesting to quarantine the message based on DSPAM results: user={1[user]}, class={1[class]}, confidence={1[confidence]}'.format(self.id, results))\n            return self.VERDICT_QUARANTINE\n    if (results['class'] in self.accept_classes):\n        threshold = self.accept_classes[results['class']]\n        if (float(results['confidence']) >= threshold):\n            logger.debug('<{0}> Suggesting to accept the message based on DSPAM results: user={1[user]}, class={1[class]}, confidence={1[confidence]}'.format(self.id, results))\n            return self.VERDICT_ACCEPT\n    logger.debug('<{0}> Suggesting to accept the message, no verdict class matched DSPAM results: user={1[user]}, class={1[class]}, confidence={1[confidence]}'.format(self.id, results))\n    return self.VERDICT_ACCEPT", "docstring": "Match results to the configured reject, quarantine and accept classes,\nand return a verdict based on that.\n\nThe verdict classes are matched in the order: reject_classes,\nquarantine_classes, accept_classes. This means that you can configure\ndifferent verdicts for different confidence results, for instance:\nreject_classes= Spam:0.99       # Reject obvious spam\nquarantine_classes = Spam:0.7   # Quarantine spam with confidence\n#   between 0.7 and 0.99\naccept_classes = Spam           # Accept low confidence spam (good\n#   for FP and retraining)\n\nArgs:\nresults -- A results dictionary from DspamClient.", "source": "codesearchnet"}
{"code": "def get_dns_zone_ids(env='dev', facing='internal'):\n    \n    client = boto3.Session(profile_name=env).client('route53')\n\n    zones = client.list_hosted_zones_by_name(DNSName='.'.join([env, DOMAIN]))\n\n    zone_ids = []\n    for zone in zones['HostedZones']:\n        LOG.debug('Found Hosted Zone: %s', zone)\n\n        if facing == 'external' or zone['Config']['PrivateZone']:\n            LOG.info('Using %(Id)s for \"%(Name)s\", %(Config)s', zone)\n            zone_ids.append(zone['Id'])\n\n    LOG.debug('Zone IDs: %s', zone_ids)\n    return zone_ids", "docstring": "Get Route 53 Hosted Zone IDs for _env_.\n\nArgs:\nenv (str): Deployment environment.\nfacing (str): Type of ELB, external or internal.\n\nReturns:\nlist: Hosted Zone IDs for _env_. Only *PrivateZone* when _facing_ is\ninternal.", "source": "juraj-google-style"}
{"code": "def aggregate_single_gradient_using_copy(grad_and_vars, use_mean, check_inf_nan):\n    grads = [g for g, _ in grad_and_vars]\n    grad = math_ops.add_n(grads)\n    if use_mean and len(grads) > 1:\n        grad = array_ops.multiply(grad, 1.0 / len(grads))\n    v = grad_and_vars[0][1]\n    if check_inf_nan:\n        has_nan_or_inf = array_ops.logical_not(array_ops.reduce_all(array_ops.is_finite(grads)))\n        return ((grad, v), has_nan_or_inf)\n    else:\n        return ((grad, v), None)", "docstring": "Calculate the average gradient for a shared variable across all replicas.\n\nNote that this function provides a synchronization point across all replicas.\n\nArgs:\ngrad_and_vars: A list or tuple of (gradient, variable) tuples. Each\n(gradient, variable) pair within the outer list represents the gradient\nof the variable calculated for a single replica, and the number of pairs\nequals the number of replicas.\nuse_mean: if True, mean is taken, else sum of gradients is taken.\ncheck_inf_nan: check grads for nans and infs.\n\nReturns:\nThe tuple ([(average_gradient, variable),], has_nan_or_inf) where the\ngradient has been averaged across all replicas. The variable is chosen\nfrom the first replica. The has_nan_or_inf indicates the grads has nan or\ninf.", "source": "github-repos"}
{"code": "def get_asset_tensors(export_dir, meta_graph_def_to_load, import_scope=None):\n    collection_def = meta_graph_def_to_load.collection_def\n    asset_tensor_dict = {}\n    asset_protos = []\n    if meta_graph_def_to_load.asset_file_def:\n        asset_protos = meta_graph_def_to_load.asset_file_def\n    elif constants.ASSETS_KEY in collection_def:\n        assets_any_proto = collection_def[constants.ASSETS_KEY].any_list.value\n        for asset_any_proto in assets_any_proto:\n            asset_proto = meta_graph_pb2.AssetFileDef()\n            asset_any_proto.Unpack(asset_proto)\n            asset_protos.append(asset_proto)\n    assets_directory = file_io.join(compat.as_bytes(export_dir), compat.as_bytes(constants.ASSETS_DIRECTORY))\n    for asset_proto in asset_protos:\n        tensor_name = asset_proto.tensor_info.name\n        if import_scope:\n            tensor_name = '%s/%s' % (import_scope, tensor_name)\n        asset_tensor_dict[tensor_name] = file_io.join(compat.as_bytes(assets_directory), compat.as_bytes(asset_proto.filename))\n    return asset_tensor_dict", "docstring": "Gets the asset tensors, if defined in the meta graph def to load.\n\nArgs:\nexport_dir: Directory where the SavedModel is located.\nmeta_graph_def_to_load: The meta graph def from the SavedModel to be loaded.\nimport_scope: Optional `string` -- if specified, prepend this followed by\n'/' to all returned asset tensor names.\n\nReturns:\nA dictionary of asset tensors, keyed by the name of the asset tensor. The\nvalue in the map corresponds to the absolute path of the asset file.", "source": "github-repos"}
{"code": "def get_dataframe(self, force_computation=False):\n        \n        \n        if self.df is not None and not force_computation: return self.df\n\n        self.df = self.fetch(self.context)\n\n        \n        self.df = self.preprocess(self.df)\n        self.transform(self.df)\n\n        return self.df", "docstring": "Preprocesses then transforms the return of fetch().\n\nArgs:\nforce_computation (bool, optional) : Defaults to False. If set to True, forces the computation of DataFrame at each call.\n\nReturns:\npandas.DataFrame: Preprocessed and transformed DataFrame.", "source": "juraj-google-style"}
{"code": "def _get_block_publisher(self, state_hash):\n        \n        state_view = self._state_view_factory.create_view(state_hash)\n        try:\n            class BatchPublisher:\n                def send(self, transactions):\n                    \n                    \n                    \n                    \n                    \n                    \n                    \n                    \n                    raise InvalidGenesisConsensusError(\n                        'Consensus cannot send transactions during genesis.')\n\n            consensus = ConsensusFactory.get_configured_consensus_module(\n                NULL_BLOCK_IDENTIFIER,\n                state_view)\n            return consensus.BlockPublisher(\n                BlockCache(self._block_store),\n                state_view_factory=self._state_view_factory,\n                batch_publisher=BatchPublisher(),\n                data_dir=self._data_dir,\n                config_dir=self._config_dir,\n                validator_id=self._identity_signer.get_public_key().as_hex())\n        except UnknownConsensusModuleError as e:\n            raise InvalidGenesisStateError(e)", "docstring": "Returns the block publisher based on the consensus module set by the\n\"sawtooth_settings\" transaction family.\n\nArgs:\nstate_hash (str): The current state root hash for reading settings.\n\nRaises:\nInvalidGenesisStateError: if any errors occur getting the\nBlockPublisher.", "source": "juraj-google-style"}
{"code": "def sendfrom(self, user_id, dest_address, amount, minconf=1):\n    amount = Decimal(amount).quantize(self.quantum, rounding=ROUND_HALF_EVEN)\n    txhash = self.rpc.call('sendfrom', user_id, dest_address, float(str(amount)), minconf)\n    self.logger.debug(('Send %s %s from %s to %s' % (str(amount), self.coin, str(user_id), dest_address)))\n    self.logger.debug(('Transaction hash: %s' % txhash))\n    return txhash", "docstring": "Send coins from user's account.\n\nArgs:\nuser_id (str): this user's unique identifier\ndest_address (str): address which is to receive coins\namount (str or Decimal): amount to send (eight decimal points)\nminconf (int): ensure the account has a valid balance using this\nmany confirmations (default=1)\n\nReturns:\nstr: transaction ID", "source": "codesearchnet"}
{"code": "def merge(self: 'FetchResponse', other: 'FetchResponse') \\\n            -> 'FetchResponse':\n        \n        if self.seq != other.seq:\n            raise ValueError(other)\n        new_data = OrderedDict(self.data)\n        new_data.update(other.data)\n        return FetchResponse(self.seq, list(new_data.items()))", "docstring": "Merge the other FETCH response, adding any fetch attributes that do\nnot already exist in this FETCH response. For example::\n\n* 3 FETCH (UID 119)\n* 3 FETCH (FLAGS (\\\\Seen))\n\nWould merge into::\n\n* 3 FETCH (UID 119 FLAGS (\\\\Seen))\n\nArgs:\nother: The other response to merge.", "source": "juraj-google-style"}
{"code": "def is_ipython_subprocess() -> bool:\n    return False", "docstring": "Check if we are in a sub-process launched from within a `ipython` terminal.\n\nReturns:\n`True` only if we are in ipython terminal (e.g. `ml_python`) and inside\na sub-process.", "source": "github-repos"}
{"code": "def parse_input(self):\n    if self._text:\n        lines = iter(self._text.splitlines())\n    elif self._file:\n        lines = self._file\n    else:\n        lines = ()\n    sample_lines = []\n    for line in lines:\n        if (len(sample_lines) > 100):\n            break\n        sample_lines.append(line)\n    lines = itertools.chain(sample_lines, lines)\n    self.guess_type(sample_lines)\n    datetime_format = wpull.protocol.ftp.ls.date.guess_datetime_format(sample_lines)\n    self.set_datetime_format(datetime_format)\n    return self.parse(lines)", "docstring": "Parse the listings.\n\nReturns:\niter: A iterable of :class:`.ftp.ls.listing.FileEntry`", "source": "codesearchnet"}
{"code": "def ScanForStorageMediaImage(self, source_path_spec):\n    \n    try:\n      type_indicators = analyzer.Analyzer.GetStorageMediaImageTypeIndicators(\n          source_path_spec, resolver_context=self._resolver_context)\n    except RuntimeError as exception:\n      raise errors.BackEndError((\n          'Unable to process source path specification with error: '\n          '{0!s}').format(exception))\n\n    if not type_indicators:\n      \n      \n      file_system = resolver.Resolver.OpenFileSystem(\n          source_path_spec, resolver_context=self._resolver_context)\n      raw_path_spec = path_spec_factory.Factory.NewPathSpec(\n          definitions.TYPE_INDICATOR_RAW, parent=source_path_spec)\n\n      try:\n        \n        \n        glob_results = raw.RawGlobPathSpec(file_system, raw_path_spec)\n      except errors.PathSpecError:\n        glob_results = None\n\n      file_system.Close()\n\n      if not glob_results:\n        return None\n\n      return raw_path_spec\n\n    if len(type_indicators) > 1:\n      raise errors.BackEndError(\n          'Unsupported source found more than one storage media image types.')\n\n    return path_spec_factory.Factory.NewPathSpec(\n        type_indicators[0], parent=source_path_spec)", "docstring": "Scans the path specification for a supported storage media image format.\n\nArgs:\nsource_path_spec (PathSpec): source path specification.\n\nReturns:\nPathSpec: storage media image path specification or None if no supported\nstorage media image type was found.\n\nRaises:\nBackEndError: if the source cannot be scanned or more than one storage\nmedia image type is found.", "source": "juraj-google-style"}
{"code": "def new_netting_channel(\n            self,\n            partner: Address,\n            settle_timeout: int,\n            given_block_identifier: BlockSpecification,\n    ) -> ChannelID:\n        \n        checking_block = self.client.get_checking_block()\n        self._new_channel_preconditions(\n            partner=partner,\n            settle_timeout=settle_timeout,\n            block_identifier=given_block_identifier,\n        )\n        log_details = {\n            'peer1': pex(self.node_address),\n            'peer2': pex(partner),\n        }\n        gas_limit = self.proxy.estimate_gas(\n            checking_block,\n            'openChannel',\n            participant1=self.node_address,\n            participant2=partner,\n            settle_timeout=settle_timeout,\n        )\n        if not gas_limit:\n            self.proxy.jsonrpc_client.check_for_insufficient_eth(\n                transaction_name='openChannel',\n                transaction_executed=False,\n                required_gas=GAS_REQUIRED_FOR_OPEN_CHANNEL,\n                block_identifier=checking_block,\n            )\n            self._new_channel_postconditions(\n                partner=partner,\n                block=checking_block,\n            )\n\n            log.critical('new_netting_channel call will fail', **log_details)\n            raise RaidenUnrecoverableError('Creating a new channel will fail')\n\n        log.debug('new_netting_channel called', **log_details)\n        \n        \n        if gas_limit and partner not in self.open_channel_transactions:\n            new_open_channel_transaction = AsyncResult()\n            self.open_channel_transactions[partner] = new_open_channel_transaction\n            gas_limit = safe_gas_limit(gas_limit, GAS_REQUIRED_FOR_OPEN_CHANNEL)\n            try:\n                transaction_hash = self.proxy.transact(\n                    'openChannel',\n                    gas_limit,\n                    participant1=self.node_address,\n                    participant2=partner,\n                    settle_timeout=settle_timeout,\n                )\n                self.client.poll(transaction_hash)\n                receipt_or_none = check_transaction_threw(self.client, transaction_hash)\n                if receipt_or_none:\n                    self._new_channel_postconditions(\n                        partner=partner,\n                        block=receipt_or_none['blockNumber'],\n                    )\n                    log.critical('new_netting_channel failed', **log_details)\n                    raise RaidenUnrecoverableError('creating new channel failed')\n\n            except Exception as e:\n                log.critical('new_netting_channel failed', **log_details)\n                new_open_channel_transaction.set_exception(e)\n                raise\n            else:\n                new_open_channel_transaction.set(transaction_hash)\n            finally:\n                self.open_channel_transactions.pop(partner, None)\n        else:\n            \n            self.open_channel_transactions[partner].get()\n\n        channel_identifier: ChannelID = self._detail_channel(\n            participant1=self.node_address,\n            participant2=partner,\n            block_identifier='latest',\n        ).channel_identifier\n        log_details['channel_identifier'] = str(channel_identifier)\n        log.info('new_netting_channel successful', **log_details)\n\n        return channel_identifier", "docstring": "Creates a new channel in the TokenNetwork contract.\n\nArgs:\npartner: The peer to open the channel with.\nsettle_timeout: The settle timeout to use for this channel.\ngiven_block_identifier: The block identifier of the state change that\nprompted this proxy action\n\nReturns:\nThe ChannelID of the new netting channel.", "source": "juraj-google-style"}
{"code": "def get_results_as_numpy_array(self, parameter_space, result_parsing_function, runs):\n    return np.array(self.get_space(self.db.get_complete_results(), {}, parameter_space, runs, result_parsing_function))", "docstring": "Return the results relative to the desired parameter space in the form\nof a numpy array.\n\nArgs:\nparameter_space (dict): dictionary containing\nparameter/list-of-values pairs.\nresult_parsing_function (function): user-defined function, taking a\nresult dictionary as argument, that can be used to parse the\nresult files and return a list of values.\nruns (int): number of runs to gather for each parameter\ncombination.", "source": "codesearchnet"}
{"code": "def default_batch_size(self) -> int:\n    return OnnxConfig.default_fixed_batch", "docstring": "The default batch size to use if no other indication\n\nReturns:\nInteger > 0", "source": "github-repos"}
{"code": "def append(self, annotation):\n        \n        self._annotations[annotation.id] = annotation\n        self._dirty = True\n        return annotation", "docstring": "Add an annotation.\n\nArgs:\nannotation (gkeepapi.node.Annotation): An Annotation object.\n\nReturns:\ngkeepapi.node.Annotation: The Annotation.", "source": "juraj-google-style"}
{"code": "def pack_small_tensors(tower_grads, max_bytes=0):\n    \n    assert max_bytes >= 0\n    orig_grads = [g for g, _ in tower_grads[0]]\n    \n    assert all(g.dtype == tf.float32 for g in orig_grads)\n    sizes = [4 * g.shape.num_elements() for g in orig_grads]\n    print_stats(sizes)\n    small_ranges = []\n    large_indices = []\n    new_sizes = []\n\n    def end_interval(indices, small_ranges, large_indices):\n        if len(indices) > 1:\n            small_ranges.insert(0, [indices[0], indices[-1]])\n        else:\n            large_indices.insert(0, indices[0])\n\n    cur_range = []\n    cur_size = 0\n    for i, s in reversed(list(enumerate(sizes))):\n        if cur_size > max_bytes:\n            end_interval(cur_range, small_ranges, large_indices)\n            new_sizes.insert(0, cur_size)\n            cur_range = []\n            cur_size = 0\n        cur_range.insert(0, i)\n        cur_size += s\n    end_interval(cur_range, small_ranges, large_indices)\n    new_sizes.insert(0, cur_size)\n\n    print_stats(new_sizes)\n    num_gv = len(orig_grads)\n    packing = {}\n    if len(small_ranges):\n        new_tower_grads = []\n        for dev_idx, gv_list in enumerate(tower_grads):\n            assert len(gv_list) == num_gv, (\n                \"Possible cause: \"\n                \"Networks constructed on different workers \"\n                \"don't have the same number of variables. \"\n                \"If you use tf.GraphKeys or tf.global_variables() \"\n                \"with multiple graphs per worker during network \"\n                \"construction, you need to use \"\n                \"appropriate scopes, see \"\n                \"https:\n            new_gv_list = []\n            for r in small_ranges:\n                key = \"%d:%d\" % (dev_idx, len(new_gv_list))\n                new_gv_list.append((pack_range(key, packing, gv_list, r),\n                                    \"packing_var_placeholder\"))\n            for i in large_indices:\n                new_gv_list.append(gv_list[i])\n            new_tower_grads.append(new_gv_list)\n        return new_tower_grads, packing\n    else:\n        return tower_grads, None", "docstring": "Concatenate gradients together more intelligently.\n\nDoes binpacking\nArgs:\ntower_grads: List of lists of (gradient, variable) tuples.\nmax_bytes: Int giving max number of bytes in a tensor that\nmay be considered small.", "source": "juraj-google-style"}
{"code": "def init_algebra(*, default_hs_cls='LocalSpace'):\n    from qnet.algebra.core.hilbert_space_algebra import LocalSpace\n    from qnet.algebra.core.abstract_quantum_algebra import QuantumExpression\n    default_hs_cls = getattr(importlib.import_module('qnet'), default_hs_cls)\n    if issubclass(default_hs_cls, LocalSpace):\n        QuantumExpression._default_hs_cls = default_hs_cls\n    else:\n        raise TypeError('default_hs_cls must be a subclass of LocalSpace')", "docstring": "Initialize the algebra system\n\nArgs:\ndefault_hs_cls (str): The name of the :class:`.LocalSpace` subclass\nthat should be used when implicitly creating Hilbert spaces, e.g.\nin :class:`.OperatorSymbol`", "source": "codesearchnet"}
{"code": "def add_documents(self, docs):\n    for sent in docs:\n        sent = map(self.process_token, sent)\n        self._token_count.update(sent)", "docstring": "Update dictionary from a collection of documents. Each document is a list\nof tokens.\n\nArgs:\ndocs (list): documents to add.", "source": "codesearchnet"}
{"code": "def parse_data_types_from_doc_ref(api, doc, namespace_context, ignore_missing_entries=False):\n    output = []\n    (data_types, routes_by_ns) = parse_data_types_and_routes_from_doc_ref(api, doc, namespace_context, ignore_missing_entries=ignore_missing_entries)\n    for d in data_types:\n        output.append(d)\n    for (ns_name, routes) in routes_by_ns.items():\n        try:\n            ns = api.namespaces[ns_name]\n            for r in routes:\n                for d in ns.get_route_io_data_types_for_route(r):\n                    output.append(d)\n        except KeyError:\n            if (not ignore_missing_entries):\n                raise\n    return output", "docstring": "Given a documentation string, parse it and return all references to other\ndata types. If there are references to routes, include also the data types of\nthose routes.\n\nArgs:\n- api: The API containing this doc ref.\n- doc: The documentation string to parse.\n- namespace_context: The namespace name relative to this documentation.\n- ignore_missing_entries: If set, this will skip references to nonexistent data types instead\nof raising an exception.\n\nReturns:\n- a list of referenced data types", "source": "codesearchnet"}
{"code": "def _find_max_beta_token_len():\n    max_beta_len = (- 1)\n    for (beta, uni) in _map.BETACODE_MAP.items():\n        if (len(beta) > max_beta_len):\n            max_beta_len = len(beta)\n    return max_beta_len", "docstring": "Finds the maximum length of a single betacode token.\n\nReturns:\nThe length of the longest key in the betacode map, which corresponds to the\nlongest single betacode token.", "source": "codesearchnet"}
{"code": "def _validate_alias_name(alias_name):\n    if (not alias_name):\n        raise CLIError(EMPTY_ALIAS_ERROR)\n    if (not re.match('^[a-zA-Z]', alias_name)):\n        raise CLIError(INVALID_STARTING_CHAR_ERROR.format(alias_name[0]))", "docstring": "Check if the alias name is valid.\n\nArgs:\nalias_name: The name of the alias to validate.", "source": "codesearchnet"}
{"code": "def get_objects_from_form(variant_ids, form_fields, object_type):\n    submission_fields = []\n    if (object_type == 'variant'):\n        submission_fields = CLINVAR_HEADER\n    else:\n        submission_fields = CASEDATA_HEADER\n    submission_objects = []\n    for variant_id in variant_ids:\n        subm_obj = {}\n        if ((object_type == 'casedata') and (('casedata_' + variant_id) not in form_fields)):\n            continue\n        subm_obj['csv_type'] = object_type\n        subm_obj['case_id'] = form_fields.get('case_id')\n        subm_obj['category'] = form_fields.get(('category@' + variant_id))\n        for (key, values) in submission_fields.items():\n            field_value = form_fields.get(((key + '@') + variant_id))\n            if (field_value and (not (field_value == '-'))):\n                if (key == 'ref_seq'):\n                    refseq_raw = field_value.split('|')\n                    subm_obj['ref_seq'] = refseq_raw[0]\n                    subm_obj['hgvs'] = refseq_raw[1]\n                else:\n                    subm_obj[key] = field_value\n        if (object_type == 'casedata'):\n            subm_obj['_id'] = ((((str(subm_obj['case_id']) + '_') + variant_id) + '_') + str(subm_obj['individual_id']))\n        else:\n            subm_obj['_id'] = ((str(subm_obj['case_id']) + '_') + variant_id)\n        submission_objects.append(subm_obj)\n    return submission_objects", "docstring": "Extract the objects to be saved in the clinvar database collection.\nobject_type param specifies if these objects are variant or casedata objects\n\nArgs:\nvariant_ids(list): list of database variant ids\nform_fields(dict): it's the submission form dictionary. Keys have the same names as CLINVAR_HEADER and CASEDATA_HEADER\nobject_type(str): either 'variant' or 'case_data'\n\nReturns:\nsubmission_objects(list): list of submission objects of either type 'variant' or 'casedata'", "source": "codesearchnet"}
{"code": "def piece_to_id(input, model_file=None, model_proto=None, name=None):\n  \n\n  return _gen_sentencepiece_processor_op.sentencepiece_piece_to_id(\n      input, model_file=model_file, model_proto=model_proto, name=name)", "docstring": "Converts piece into vocabulary id.\n\nArgs:\ninput: An arbitrary tensor of string.\nmodel_file: The sentencepiece model file path.\nmodel_proto: The sentencepiece model serialized proto.\nEither `model_file` or `model_proto` must be set.\nname: The name argument that is passed to the op function.\nReturns:\nA tensor of int32 with the same shape as input.", "source": "juraj-google-style"}
{"code": "def __init__(self, *schedules: List[Union[ScheduleComponent, Tuple[int, ScheduleComponent]]],\n                 name: str = None):\n        \n        self._name = name\n        try:\n            timeslots = []\n            children = []\n            for sched_pair in schedules:\n                \n                if not isinstance(sched_pair, (list, tuple)):\n                    sched_pair = (0, sched_pair)\n                \n                sched_pair = tuple(sched_pair)\n                insert_time, sched = sched_pair\n                sched_timeslots = sched.timeslots\n                if insert_time:\n                    sched_timeslots = sched_timeslots.shift(insert_time)\n                timeslots.append(sched_timeslots.timeslots)\n                children.append(sched_pair)\n\n            self._timeslots = TimeslotCollection(*itertools.chain(*timeslots))\n            self._children = tuple(children)\n\n        except PulseError as ts_err:\n            raise PulseError('Child schedules {0} overlap.'.format(schedules)) from ts_err", "docstring": "Create empty schedule.\n\nArgs:\n*schedules: Child Schedules of this parent Schedule. May either be passed as\nthe list of schedules, or a list of (start_time, schedule) pairs\nname: Name of this schedule\n\nRaises:\nPulseError: If timeslots intercept.", "source": "juraj-google-style"}
{"code": "def make_client(servers: Sequence[str], *args, **kwargs) -> GMatrixClient:\n    if (len(servers) > 1):\n        sorted_servers = [server_url for (server_url, _) in sort_servers_closest(servers)]\n        log.info('Automatically selecting matrix homeserver based on RTT', sorted_servers=sorted_servers)\n    elif (len(servers) == 1):\n        sorted_servers = servers\n    else:\n        raise TransportError('No valid servers list given')\n    last_ex = None\n    for server_url in sorted_servers:\n        server_url: str = server_url\n        client = GMatrixClient(server_url, *args, **kwargs)\n        try:\n            client.api._send('GET', '/versions', api_path='/_matrix/client')\n        except MatrixError as ex:\n            log.warning('Selected server not usable', server_url=server_url, _exception=ex)\n            last_ex = ex\n        else:\n            break\n    else:\n        raise TransportError('Unable to find a reachable Matrix server. Please check your network connectivity.') from last_ex\n    return client", "docstring": "Given a list of possible servers, chooses the closest available and create a GMatrixClient\n\nParams:\nservers: list of servers urls, with scheme (http or https)\nRest of args and kwargs are forwarded to GMatrixClient constructor\nReturns:\nGMatrixClient instance for one of the available servers", "source": "codesearchnet"}
{"code": "def migrate_config(self, current_config, config_to_migrate, always_update, update_defaults):\n    value = self._search_config_for_possible_names(current_config)\n    self._update_config(config_to_migrate, value, always_update, update_defaults)", "docstring": "Migrate config value in current_config, updating config_to_migrate.\n\nGiven the current_config object, it will attempt to find a value\nbased on all the names given. If no name could be found, then it\nwill simply set the value to the default.\n\nIf a value is found and is in the list of previous_defaults, it will\neither update or keep the old value based on if update_defaults is\nset.\n\nIf a non-default value is set it will either keep this value or update\nit based on if ``always_update`` is true.\n\nArgs:\ncurrent_config (dict): Current configuration.\nconfig_to_migrate (dict): Config to update.\nalways_update (bool): Always update value.\nupdate_defaults (bool): Update values found in previous_defaults", "source": "codesearchnet"}
{"code": "def sort_imports_in_all_inits(check_only=True):\n    failures = []\n    for root, _, files in os.walk(PATH_TO_TRANSFORMERS):\n        if '__init__.py' in files:\n            result = sort_imports(os.path.join(root, '__init__.py'), check_only=check_only)\n            if result:\n                failures = [os.path.join(root, '__init__.py')]\n    if len(failures) > 0:\n        raise ValueError(f'Would overwrite {len(failures)} files, run `make style`.')", "docstring": "Sort the imports defined in the `_import_structure` of all inits in the repo.\n\nArgs:\ncheck_only (`bool`, *optional*, defaults to `True`): Whether or not to just check (and not auto-fix) the init.", "source": "github-repos"}
{"code": "def __getitem__(self, key):\n    if self._dims is not None:\n        if isinstance(key, slice):\n            return TensorShape(self._dims[key])\n        elif self._v2_behavior:\n            return self._dims[key]\n        else:\n            return self.dims[key]\n    elif isinstance(key, slice):\n        start = key.start if key.start is not None else 0\n        stop = key.stop\n        if key.step is not None:\n            raise ValueError('Steps are not yet handled')\n        if stop is None:\n            return unknown_shape()\n        elif start < 0 or stop < 0:\n            return unknown_shape()\n        else:\n            return unknown_shape(rank=stop - start)\n    elif self._v2_behavior:\n        return None\n    else:\n        return Dimension(None)", "docstring": "Returns the value of a dimension or a shape, depending on the key.\n\nArgs:\nkey: If `key` is an integer, returns the dimension at that index;\notherwise if `key` is a slice, returns a TensorShape whose dimensions\nare those selected by the slice from `self`.\n\nReturns:\nAn integer if `key` is an integer, or a `TensorShape` if `key` is a\nslice.\n\nRaises:\nValueError: If `key` is a slice and `self` is completely unknown and\nthe step is set.", "source": "github-repos"}
{"code": "def translations(self, **kwargs):\n    path = self._get_id_path('translations')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the translations for a specific movie id.\n\nArgs:\nappend_to_response: (optional) Comma separated, any movie method.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def _is_composite_function(self, func: function_pb2.FunctionDef) -> bool:\n    return func.signature.name.startswith('composite_')", "docstring": "Determine whether a FunctionDef is composite function.\n\nArgs:\nfunc: A FunctionDef object.\n\nReturns:\nTrue iff `func` is composte function.", "source": "github-repos"}
{"code": "def read_chunk_header(self):\n    try:\n        chunk_size_hex = (yield from self._connection.readline())\n    except ValueError as error:\n        raise ProtocolError('Invalid chunk size: {0}'.format(error)) from error\n    if (not chunk_size_hex.endswith(b'\\n')):\n        raise NetworkError('Connection closed.')\n    try:\n        chunk_size = int(chunk_size_hex.split(b';', 1)[0].strip(), 16)\n    except ValueError as error:\n        raise ProtocolError('Invalid chunk size: {0}'.format(error)) from error\n    if (chunk_size < 0):\n        raise ProtocolError('Chunk size cannot be negative.')\n    self._chunk_size = self._bytes_left = chunk_size\n    return (chunk_size, chunk_size_hex)", "docstring": "Read a single chunk's header.\n\nReturns:\ntuple: 2-item tuple with the size of the content in the chunk and\nthe raw header byte string.\n\nCoroutine.", "source": "codesearchnet"}
{"code": "def _call_with_structured_signature(self, args, kwargs):\n    bound_args = function_type_utils.canonicalize_function_inputs(args, kwargs, self.function_type)\n    filtered_flat_args = self.function_type.unpack_inputs(bound_args)\n    return self._call_flat(filtered_flat_args, captured_inputs=self.captured_inputs)", "docstring": "Executes the wrapped function with the structured signature.\n\nArgs:\nargs: Positional arguments to the concrete function.\nkwargs: Keyword arguments to the concrete function.\n\nReturns:\nThe result of applying the function on the Tensors/Variables contained in\n`args` and `kwargs`.\nRaises:\nTypeError: if `args` and `kwargs` do not match the structured signature\nof this `ConcreteFunction`.", "source": "github-repos"}
{"code": "def run(self, args):\n        \n        jlink = self.create_jlink(args)\n        erased = jlink.erase()\n        print('Bytes Erased: %d' % erased)", "docstring": "Erases the device connected to the J-Link.\n\nArgs:\nself (EraseCommand): the ``EraseCommand`` instance\nargs (Namespace): the arguments passed on the command-line\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def get_ssm_parameter(parameter_name):\n    \n    try:\n        response = boto3.client('ssm').get_parameters(\n            Names=[parameter_name],\n            WithDecryption=True\n        )\n\n        return response.get('Parameters', None)[0].get('Value', '')\n    except Exception:\n        pass\n\n    return ''", "docstring": "Get the decrypted value of an SSM parameter\n\nArgs:\nparameter_name - the name of the stored parameter of interest\n\nReturn:\nValue if allowed and present else None", "source": "juraj-google-style"}
{"code": "def get_min_instability(self, min_voltage=None, max_voltage=None):\n    data = []\n    for pair in self._select_in_voltage_range(min_voltage, max_voltage):\n        if (pair.decomp_e_charge is not None):\n            data.append(pair.decomp_e_charge)\n        if (pair.decomp_e_discharge is not None):\n            data.append(pair.decomp_e_discharge)\n    return (min(data) if (len(data) > 0) else None)", "docstring": "The minimum instability along a path for a specific voltage range.\n\nArgs:\nmin_voltage: The minimum allowable voltage.\nmax_voltage: The maximum allowable voltage.\n\nReturns:\nMinimum decomposition energy of all compounds along the insertion\npath (a subset of the path can be chosen by the optional arguments)", "source": "codesearchnet"}
{"code": "def isloaded(self, name):\n    if (name is None):\n        return True\n    if isinstance(name, str):\n        return (name in [x.__module__ for x in self])\n    if isinstance(name, Iterable):\n        return set(name).issubset([x.__module__ for x in self])\n    return False", "docstring": "Checks if given hook module has been loaded\n\nArgs:\nname (str): The name of the module to check\n\nReturns:\nbool.  The return code::\n\nTrue -- Loaded\nFalse -- Not Loaded", "source": "codesearchnet"}
{"code": "def parse(filename, encoding=None):\n    with open(filename, encoding=encoding) as source:\n        for line in source:\n            for word in line.split():\n                (yield word)", "docstring": "!DEMO!\nSimple file parsing generator\n\nArgs:\nfilename: absolute or relative path to file on disk\nencoding: encoding string that is passed to open function", "source": "codesearchnet"}
{"code": "def number_of_shards(self):\n    return self._sharding_policies[0].number_of_shards", "docstring": "Gets the number of shards to use for the InfeedQueue.\n\nReturns:\nNumber of shards or None if the number of shards has not been set.", "source": "github-repos"}
{"code": "def getAsGeoJson(self, session):\n        \n        statement = .format(self.geometryColumnName,\n                               self.tableName,\n                               self.id)\n\n        result = session.execute(statement)\n\n        for row in result:\n            return row.json", "docstring": "Retrieve the geometry in GeoJSON format.\n\nThis method is a veneer for an SQL query that calls the ``ST_AsGeoJSON()`` function on the geometry column.\n\nArgs:\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.\n\nReturns:\nstr: GeoJSON string representation of geometry.", "source": "juraj-google-style"}
{"code": "def _Open(self, path_spec, mode='rb'):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    file_object = resolver.Resolver.OpenFileObject(\n        path_spec.parent, resolver_context=self._resolver_context)\n\n    self._file_object = file_object", "docstring": "Opens the file system object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nmode (Optional[str]): file access mode. The default is 'rb' which\nrepresents read-only binary.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file system object could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable:\n    old_embedding_dim = shape_list(old_embeddings)[1]\n    init_range = getattr(self.config, 'initializer_range', 0.02)\n    embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens)\n    new_embeddings = self.add_weight(name=old_embeddings.name.split(':')[0], shape=[new_num_tokens, old_embedding_dim], initializer=get_initializer(init_range), dtype=tf.float32)\n    init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value())\n    new_embeddings.assign(init_embeddings)\n    return new_embeddings", "docstring": "Build a resized Embedding weights from a provided token Embedding weights. Increasing the size will add newly\ninitialized vectors at the end. Reducing the size will remove vectors from the end\n\nArgs:\nold_embeddings (`tf.Variable`):\nOld embeddings to be resized.\nnew_num_tokens (`int`, *optional*):\nNew number of tokens in the embedding matrix.\n\nIncreasing the size will add newly initialized vectors at the end. Reducing the size will remove\nvectors from the end. If not provided or `None`, just returns a pointer to the input tokens\n`tf.Variable` module of the model without doing anything.\n\nReturn:\n`tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if `new_num_tokens` is\n`None`", "source": "github-repos"}
{"code": "def _CheckFileEntryType(self, file_entry):\n    if (not self._file_entry_types):\n        return None\n    return (self._CheckIsDevice(file_entry) or self._CheckIsDirectory(file_entry) or self._CheckIsFile(file_entry) or self._CheckIsLink(file_entry) or self._CheckIsPipe(file_entry) or self._CheckIsSocket(file_entry))", "docstring": "Checks the file entry type find specifications.\n\nArgs:\nfile_entry (FileEntry): file entry.\n\nReturns:\nbool: True if the file entry matches the find specification, False if\nnot or None if no file entry type specification is defined.", "source": "codesearchnet"}
{"code": "def apply_operation(self, symmop, fractional=False):\n        \n        if not fractional:\n            self._lattice = Lattice([symmop.apply_rotation_only(row)\n                                     for row in self._lattice.matrix])\n\n            def operate_site(site):\n                new_cart = symmop.operate(site.coords)\n                new_frac = self._lattice.get_fractional_coords(new_cart)\n                return PeriodicSite(site.species, new_frac,\n                                    self._lattice,\n                                    properties=site.properties)\n\n        else:\n            new_latt = np.dot(symmop.rotation_matrix, self._lattice.matrix)\n            self._lattice = Lattice(new_latt)\n\n            def operate_site(site):\n                return PeriodicSite(site.species,\n                                    symmop.operate(site.frac_coords),\n                                    self._lattice,\n                                    properties=site.properties)\n\n        self._sites = [operate_site(s) for s in self._sites]", "docstring": "Apply a symmetry operation to the structure and return the new\nstructure. The lattice is operated by the rotation matrix only.\nCoords are operated in full and then transformed to the new lattice.\n\nArgs:\nsymmop (SymmOp): Symmetry operation to apply.\nfractional (bool): Whether the symmetry operation is applied in\nfractional space. Defaults to False, i.e., symmetry operation\nis applied in cartesian coordinates.", "source": "juraj-google-style"}
{"code": "def peek_all(self, model_class):\n    if self._cache:\n        return self._cache.get_records(model_class.__name__)\n    else:\n        return []", "docstring": "Return a list of models from the local cache.\n\nArgs:\nmodel_class (:class:`cinder_data.model.CinderModel`): A subclass of\n:class:`cinder_data.model.CinderModel` of your chosen model.\n\nReturns:\nlist: A list of instances of you model_class or and empty list.", "source": "codesearchnet"}
{"code": "def get_interpolated_value(self, energy):\n    f = {}\n    for spin in self.densities.keys():\n        f[spin] = get_linear_interpolated_value(self.energies, self.densities[spin], energy)\n    return f", "docstring": "Returns interpolated density for a particular energy.\n\nArgs:\nenergy: Energy to return the density for.", "source": "codesearchnet"}
{"code": "async def vsetup(self, author):\n        \n\n        if self.vready:\n            logger.warning(\"Attempt to init voice when already initialised\")\n            return\n\n        if self.state != 'starting':\n            logger.error(\"Attempt to init from wrong state ('{}'), must be 'starting'.\".format(self.state))\n            return\n\n        self.logger.debug(\"Setting up voice\")\n\n        \n        self.vchannel = author.voice.voice_channel\n        if self.vchannel:\n            self.statuslog.info(\"Connecting to voice\")\n            try:\n                self.vclient = await client.join_voice_channel(self.vchannel)\n            except discord.ClientException as e:\n                logger.exception(e)\n                self.statuslog.warning(\"I'm already connected to a voice channel.\")\n                return\n            except discord.opus.OpusNotLoaded as e:\n                logger.exception(e)\n                logger.error(\"Could not load Opus. This is an error with your FFmpeg setup.\")\n                self.statuslog.error(\"Could not load Opus.\")\n                return\n            except discord.DiscordException as e:\n                logger.exception(e)\n                self.statuslog.error(\"I couldn't connect to the voice channel. Check my permissions.\")\n                return\n            except Exception as e:\n                self.statuslog.error(\"Internal error connecting to voice, disconnecting.\")\n                logger.error(\"Error connecting to voice {}\".format(e))\n                return\n        else:\n            self.statuslog.error(\"You're not connected to a voice channel.\")\n            return\n\n        self.vready = True", "docstring": "Creates the voice client\n\nArgs:\nauthor (discord.Member): The user that the voice ui will seek", "source": "juraj-google-style"}
{"code": "def get_variant_by_name(self, name):\n        \n        results = []\n\n        try:\n            for info, dosage in self._bgen.get_variant(name):\n                results.append(Genotypes(\n                    Variant(\n                        info.name,\n                        CHROM_STR_ENCODE.get(info.chrom, info.chrom),\n                        info.pos,\n                        [info.a1, info.a2],\n                    ),\n                    dosage,\n                    reference=info.a1,\n                    coded=info.a2,\n                    multiallelic=False,\n                ))\n\n        except ValueError:\n            logging.variant_name_not_found(name)\n\n        return results", "docstring": "Get the genotype of a marker using it's name.\n\nArgs:\nname (str): The name of the marker.\n\nReturns:\nlist: A list of Genotypes.", "source": "juraj-google-style"}
{"code": "def __ComputeEndByte(self, start, end=None, use_chunks=True):\n    end_byte = end\n    if ((start < 0) and (not self.total_size)):\n        return end_byte\n    if use_chunks:\n        alternate = ((start + self.chunksize) - 1)\n        if (end_byte is not None):\n            end_byte = min(end_byte, alternate)\n        else:\n            end_byte = alternate\n    if self.total_size:\n        alternate = (self.total_size - 1)\n        if (end_byte is not None):\n            end_byte = min(end_byte, alternate)\n        else:\n            end_byte = alternate\n    return end_byte", "docstring": "Compute the last byte to fetch for this request.\n\nThis is all based on the HTTP spec for Range and\nContent-Range.\n\nNote that this is potentially confusing in several ways:\n* the value for the last byte is 0-based, eg \"fetch 10 bytes\nfrom the beginning\" would return 9 here.\n* if we have no information about size, and don't want to\nuse the chunksize, we'll return None.\nSee the tests for more examples.\n\nArgs:\nstart: byte to start at.\nend: (int or None, default: None) Suggested last byte.\nuse_chunks: (bool, default: True) If False, ignore self.chunksize.\n\nReturns:\nLast byte to use in a Range header, or None.", "source": "codesearchnet"}
{"code": "def is_insert_grad_of_statement(node):\n  \n  tangent_calls = [anno.getanno(item.context_expr, 'func', None)\n                   is utils.insert_grad_of for item in node.items]\n  if all(tangent_calls):\n    return True\n  elif any(tangent_calls):\n    raise ValueError\n  else:\n    return False", "docstring": "Check whether a context manager calls `insert_grad_of`.\n\nArgs:\nnode: The context manager node.\n\nReturns:\nWhether or not this node contains `insert_grad_of` calls.\n\nRaises:\nValueError: If the `insert_grad_of` calls are mixed with other calls.", "source": "juraj-google-style"}
{"code": "def add_to_cache(cls, remote_info, container):\n    if (not isinstance(container, cls)):\n        raise TypeError(('%r not an instance of %r, could not be added to cache.' % (container, cls)))\n    if (remote_info in cls.__remote_info_cache):\n        raise KeyError('Cache has collision but should not.')\n    cls.__remote_info_cache[remote_info] = container", "docstring": "Adds a ResourceContainer to a cache tying it to a protorpc method.\n\nArgs:\nremote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding\nto a method.\ncontainer: An instance of ResourceContainer.\n\nRaises:\nTypeError: if the container is not an instance of cls.\nKeyError: if the remote method has been reference by a container before.\nThis created remote method should never occur because a remote method\nis created once.", "source": "codesearchnet"}
{"code": "def AddIndex(self, path_segment_index):\n    if (path_segment_index in self._weight_per_index):\n        raise ValueError('Path segment index already set.')\n    self._weight_per_index[path_segment_index] = 0", "docstring": "Adds a path segment index and sets its weight to 0.\n\nArgs:\npath_segment_index: an integer containing the path segment index.\n\nRaises:\nValueError: if the path segment weights already contains\nthe path segment index.", "source": "codesearchnet"}
{"code": "def _update_unenrolled_list(sailthru_client, email, course_url, unenroll):\n    \n    try:\n        \n        sailthru_response = sailthru_client.api_get(\"user\", {\"id\": email, \"fields\": {\"vars\": 1}})\n        if not sailthru_response.is_ok():\n            error = sailthru_response.get_error()\n            logger.error(\"Error attempting to read user record from Sailthru: %s\", error.get_message())\n            return not can_retry_sailthru_request(error)\n\n        response_json = sailthru_response.json\n\n        unenroll_list = []\n        if response_json and \"vars\" in response_json and response_json[\"vars\"] \\\n           and \"unenrolled\" in response_json[\"vars\"]:\n            unenroll_list = response_json[\"vars\"][\"unenrolled\"]\n\n        changed = False\n        \n        if unenroll:\n            if course_url not in unenroll_list:\n                unenroll_list.append(course_url)\n                changed = True\n\n        \n        elif course_url in unenroll_list:\n            unenroll_list.remove(course_url)\n            changed = True\n\n        if changed:\n            \n            sailthru_response = sailthru_client.api_post(\n                'user', {'id': email, 'key': 'email', 'vars': {'unenrolled': unenroll_list}})\n\n            if not sailthru_response.is_ok():\n                error = sailthru_response.get_error()\n                logger.error(\"Error attempting to update user record in Sailthru: %s\", error.get_message())\n                return not can_retry_sailthru_request(error)\n\n        return True\n\n    except SailthruClientError as exc:\n        logger.exception(\"Exception attempting to update user record for %s in Sailthru - %s\", email, text_type(exc))\n        return False", "docstring": "Maintain a list of courses the user has unenrolled from in the Sailthru user record\n\nArguments:\nsailthru_client (object): SailthruClient\nemail (str): user's email address\ncourse_url (str): LMS url for course info page.\nunenroll (boolean): True if unenrolling, False if enrolling\n\nReturns:\nFalse if retryable error, else True", "source": "juraj-google-style"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_stream = BytearrayStream()\n    if self._wrapping_method:\n        self._wrapping_method.write(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Invalid struct missing the wrapping method attribute.')\n    if self._encryption_key_information:\n        self._encryption_key_information.write(local_stream, kmip_version=kmip_version)\n    if self._mac_signature_key_information:\n        self._mac_signature_key_information.write(local_stream, kmip_version=kmip_version)\n    if self._attribute_names:\n        for unique_identifier in self._attribute_names:\n            unique_identifier.write(local_stream, kmip_version=kmip_version)\n    if self._encoding_option:\n        self._encoding_option.write(local_stream, kmip_version=kmip_version)\n    self.length = local_stream.length()\n    super(KeyWrappingSpecification, self).write(output_stream, kmip_version=kmip_version)\n    output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the KeyWrappingSpecification struct to a\nstream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def is_diagonal(matrix: np.ndarray, *, atol: float=1e-08) -> bool:\n    matrix = np.copy(matrix)\n    for i in range(min(matrix.shape)):\n        matrix[(i, i)] = 0\n    return tolerance.all_near_zero(matrix, atol=atol)", "docstring": "Determines if a matrix is a approximately diagonal.\n\nA matrix is diagonal if i!=j implies m[i,j]==0.\n\nArgs:\nmatrix: The matrix to check.\natol: The per-matrix-entry absolute tolerance on equality.\n\nReturns:\nWhether the matrix is diagonal within the given tolerance.", "source": "codesearchnet"}
{"code": "def validate(self):\n    for schema in (self.headers_schema, Message.headers_schema):\n        _log.debug('Validating message headers \"%r\" with schema \"%r\"', self._headers, schema)\n        jsonschema.validate(self._headers, schema)\n    for schema in (self.body_schema, Message.body_schema):\n        _log.debug('Validating message body \"%r\" with schema \"%r\"', self.body, schema)\n        jsonschema.validate(self.body, schema)", "docstring": "Validate the headers and body with the message schema, if any.\n\nIn addition to the user-provided schema, all messages are checked against\nthe base schema which requires certain message headers and the that body\nbe a JSON object.\n\n.. warning:: This method should not be overridden by sub-classes.\n\nRaises:\njsonschema.ValidationError: If either the message headers or the message body\nare invalid.\njsonschema.SchemaError: If either the message header schema or the message body\nschema are invalid.", "source": "codesearchnet"}
{"code": "def fetch(self, payment_id, data={}, **kwargs):\n        \n        return super(Payment, self).fetch(payment_id, data, **kwargs)", "docstring": "Fetch Payment for given Id\n\nArgs:\npayment_id : Id for which payment object has to be retrieved\n\nReturns:\nPayment dict for given payment Id", "source": "juraj-google-style"}
{"code": "def switch_window(self, window_id: int):\n    \n\n    \n    \n    if window_id not in self.tmux_available_window_ids:\n      for i in range(max(self.tmux_available_window_ids)+1, window_id+1):\n        self._run_raw(f'tmux new-window -t {self.tmux_session} -d')\n\n        tmux_window = self.tmux_session + ':' + str(i)\n        cmd = shlex.quote(f'cd {self.taskdir}')\n        tmux_cmd = f'tmux send-keys -t {tmux_window} {cmd} Enter'\n        self._run_raw(tmux_cmd)\n        self.tmux_available_window_ids.append(i)\n\n    self.tmux_window_id = window_id", "docstring": "Switches currently active tmux window for given task. 0 is the default window\nArgs:\nwindow_id: integer id of tmux window to use", "source": "juraj-google-style"}
{"code": "def on_channel_open(self, channel):\n        \n        self.in_channel.exchange_declare(exchange='input_exc', type='topic', durable=True)\n        channel.queue_declare(callback=self.on_input_queue_declare, queue=self.INPUT_QUEUE_NAME)", "docstring": "Input channel creation callback\nQueue declaration done here\n\nArgs:\nchannel: input channel", "source": "juraj-google-style"}
{"code": "class SquaredHinge(reduction_metrics.MeanMetricWrapper):\n\n    def __init__(self, name='squared_hinge', dtype=None):\n        super().__init__(fn=squared_hinge, name=name, dtype=dtype)\n        self._direction = 'down'\n\n    def get_config(self):\n        return {'name': self.name, 'dtype': self.dtype}", "docstring": "Computes the hinge metric between `y_true` and `y_pred`.\n\n`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are\nprovided we will convert them to -1 or 1.\n\nArgs:\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.\n\nExample:\n\n>>> m = keras.metrics.SquaredHinge()\n>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])\n>>> m.result()\n1.86\n>>> m.reset_state()\n>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],\n...                sample_weight=[1, 0])\n>>> m.result()\n1.46", "source": "github-repos"}
{"code": "def supports_suggested_actions(channel_id: str, button_cnt: int=100) -> bool:\n    max_actions = {Channels.facebook: 10, Channels.skype: 10, Channels.line: 13, Channels.kik: 20, Channels.telegram: 100, Channels.slack: 100, Channels.emulator: 100, Channels.direct_line: 100, Channels.webchat: 100}\n    return ((button_cnt <= max_actions[channel_id]) if (channel_id in max_actions) else False)", "docstring": "Determine if a number of Suggested Actions are supported by a Channel.\n\nArgs:\nchannel_id (str): The Channel to check the if Suggested Actions are supported in.\nbutton_cnt (int, optional): Defaults to 100. The number of Suggested Actions to check for the Channel.\n\nReturns:\nbool: True if the Channel supports the button_cnt total Suggested Actions, False if the Channel does not support that number of Suggested Actions.", "source": "codesearchnet"}
{"code": "def _add_length_constrain(token_lst: List[Dict], lengths: List) -> List[Dict]:\n        \n\n        result = []\n        for a_token in token_lst:\n            for length in lengths:\n                if type(length) == str and length and length.isdigit():\n                    a_token[attrs.LENGTH] = int(length)\n                    result.append(copy.deepcopy(a_token))\n                elif type(length) == int:\n                    a_token[attrs.LENGTH] = int(length)\n                    result.append(copy.deepcopy(a_token))\n        return result", "docstring": "Add length constrain for some token type, create cross production\nArgs:\ntoken_lst: List[Dict]\nlengths: List\n\nReturns: List[Dict]", "source": "juraj-google-style"}
{"code": "def memory_write16(self, addr, data, zone=None):\n    return self.memory_write(addr, data, zone, 16)", "docstring": "Writes half-words to memory of a target system.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): start address to write to\ndata (list): list of half-words to write\nzone (str): optional memory zone to access\n\nReturns:\nNumber of half-words written to target.\n\nRaises:\nJLinkException: on memory access error.", "source": "codesearchnet"}
{"code": "def create(self, path, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO) -> BinaryIO:\n    dirname = os.path.dirname(path)\n    if dirname:\n        os.makedirs(os.path.dirname(path), exist_ok=True)\n    return self._path_open(path, 'wb', mime_type, compression_type)", "docstring": "Returns a write channel for the given file path.\n\nArgs:\npath: string path of the file object to be written to the system\nmime_type: MIME type to specify the type of content in the file object\ncompression_type: Type of compression to be used for this object\n\nReturns: file handle with a close function for the user to use", "source": "github-repos"}
{"code": "def __init__(self, component1=None, component2=None):\n        \n        if component1 is None and component2 is not None:\n            component1 = component2\n            component2 = None\n\n        self._llhead = None\n        self._lltail = None\n        \n        \n\n        if isinstance(component1, CompositeBitarray):\n            self._llhead = component1._llhead\n            self._lltail = component1._lltail\n            self._offset = component1._offset\n            self._tailbitsused = component1._tailbitsused\n            self._length = len(component1)\n        else:\n            self._llhead = self._lltail = _DLLNode(component1)\n            self._offset = 0\n            self._tailbitsused = len(component1)\n            self._length = self._tailbitsused\n\n\n        if component2 is not None:\n            oldtail = self._lltail\n            if isinstance(component2, CompositeBitarray):\n                if self._lltail is component2._llhead:\n                    if self._tail_end != component2._offset:\n                        raise ProteusDataJoinError()\n\n                    if component2._is_single_llnode:\n                        self._tailbitsused += component2._tailbitsused\n                    else:\n                        self._tailbitsused = component2._tailbitsused\n                    self._lltail = component2._lltail\n                    self._length += len(component2)\n                elif self._lltail.next is component2._llhead and\\\n                         self._tailoffset == 0 and\\\n                         component2._offset == 0:\n                    self._lltail = component2._lltail\n                    self._tailbitsused = component2._tailbitsused\n                    self._length += len(component2)\n                elif component2._llhead.prev is not None or\\\n                     self._lltail.next is not None or\\\n                     component2._offset or self._tailoffset or\\\n                     self._llhead is component2._lltail:\n                    \n                    \n                    \n                    raise ProteusDataJoinError()\n                else:\n                    self._length += len(component2)\n                    self._lltail.next = component2._llhead\n                    self._lltail = component2._lltail\n                    self._tailbitsused = component2._tailbitsused\n            else:\n                if self._tailoffset or self._lltail.next is not None:\n                    raise ProteusDataJoinError()\n                self._tailbitsused = len(component2)\n                self._length += self._tailbitsused\n                node = _DLLNode(component2)\n                node.prev = self._lltail\n                self._lltail = node\n\n\n            \n            \n            \n            \n            \n            if oldtail is not self._llhead or self._offset == 0:\n                self._do_merge(oldtail)", "docstring": "Create a bitarray object that stores its components by reference).\n\nArgs:\n*components: Any number of bitarray instances to store in this composition.", "source": "juraj-google-style"}
{"code": "def NewEvent(type: str, id: UUID=None, data: JsonDict=None, metadata: JsonDict=None) -> NewEventData:\n    return NewEventData((id or uuid4()), type, data, metadata)", "docstring": "Build the data structure for a new event.\n\nArgs:\ntype: An event type.\nid: The uuid identifier for the event.\ndata: A dict containing data for the event. These data\nmust be json serializable.\nmetadata: A dict containing metadata about the event.\nThese must be json serializable.", "source": "codesearchnet"}
{"code": "def crt(self, mp, mq):\n        \n        u = (mq - mp) * self.p_inverse % self.q\n        return mp + (u * self.p)", "docstring": "The Chinese Remainder Theorem as needed for decryption. Returns the solution modulo n=pq.\n\nArgs:\nmp(int): the solution modulo p.\nmq(int): the solution modulo q.", "source": "juraj-google-style"}
{"code": "def save(self, representative_dataset: RepresentativeDatasetMapping) -> Mapping[str, _RepresentativeDatasetFile]:\n    raise NotImplementedError('Method \"save\" is not implemented.')", "docstring": "Saves the representative dataset.\n\nArgs:\nrepresentative_dataset: RepresentativeDatasetMapping which is a\nsignature_def_key -> representative dataset mapping.", "source": "github-repos"}
{"code": "def handle_subscribed_event(self, event_obj, event_name):\n        \n        handler, args = self.handlers[event_name]\n        self.executor.submit(handler, event_obj, *args)", "docstring": "Execute the registered handler of an event.\n\nRetrieve the handler and its arguments, and execute the handler in a\nnew thread.\n\nArgs:\nevent_obj: Json object of the event.\nevent_name: Name of the event to call handler for.", "source": "juraj-google-style"}
{"code": "def group_associations_types(self, group_type, api_entity=None, api_branch=None, params=None):\n        \n        if params is None:\n            params = {}\n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        target = self._tcex.ti.group(group_type)\n\n        for gat in self.tc_requests.group_associations_types(\n            self.api_type,\n            self.api_sub_type,\n            self.unique_id,\n            target,\n            api_entity=api_entity,\n            api_branch=api_branch,\n            owner=self.owner,\n            params=params,\n        ):\n            yield gat", "docstring": "Gets the group association from a Indicator/Group/Victim\n\nArgs:\ngroup_type:\napi_entity:\napi_branch:\nparams:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _list_profile_sort_key(profile_datum, sort_by):\n    if sort_by == SORT_OPS_BY_OP_NAME:\n        return profile_datum.node_exec_stats.node_name\n    elif sort_by == SORT_OPS_BY_OP_TYPE:\n        return profile_datum.op_type\n    elif sort_by == SORT_OPS_BY_LINE:\n        return profile_datum.file_line_func\n    elif sort_by == SORT_OPS_BY_OP_TIME:\n        return profile_datum.op_time\n    elif sort_by == SORT_OPS_BY_EXEC_TIME:\n        return profile_datum.node_exec_stats.all_end_rel_micros\n    else:\n        return profile_datum.node_exec_stats.all_start_micros", "docstring": "Get a profile_datum property to sort by in list_profile command.\n\nArgs:\nprofile_datum: A `ProfileDatum` object.\nsort_by: (string) indicates a value to sort by.\nMust be one of SORT_BY* constants.\n\nReturns:\nprofile_datum property to sort by.", "source": "github-repos"}
{"code": "def _process_counter_example(self, mma, w_string):\n        \n        w_string = self._find_bad_transition(mma, w_string)\n\n        diff = len(w_string)\n        same = 0\n        while True:\n            i = (same + diff) / 2\n            access_string = self._run_in_hypothesis(mma, w_string, i)\n            is_diff = self._check_suffix(w_string, access_string, i)\n            if is_diff:\n                diff = i\n            else:\n                same = i\n            if diff - same == 1:\n                break\n        exp = w_string[diff:]\n\n        self.observation_table.em_vector.append(exp)\n        for row in self.observation_table.sm_vector + self.observation_table.smi_vector:\n            self._fill_table_entry(row, exp)", "docstring": "Process a counterexample in the Rivest-Schapire way.\nArgs:\nmma (DFA): The hypothesis automaton\nw_string (str): The examined string to be consumed\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def raw_sql(cls, cur, query: str, values: tuple):\n    (yield from cur.execute(query, values))\n    return (yield from cur.fetchall())", "docstring": "Run a raw sql query\n\nArgs:\nquery : query string to execute\nvalues : tuple of values to be used with the query\n\nReturns:\nresult of query as list of named tuple", "source": "codesearchnet"}
{"code": "def pack_sequence_as(structure, flat_sequence):\n    flat_sequence = list(flat_sequence)\n    flattened_structure = nest.flatten(structure, expand_composites=True)\n    if len(flattened_structure) != len(flat_sequence):\n        raise ValueError('Mismatch in element count')\n    for i in range(len(flat_sequence)):\n        if isinstance(flattened_structure[i], tensor_array_ops.TensorArray):\n            flat_sequence[i] = tensor_array_ops.build_ta_with_new_flow(old_ta=flattened_structure[i], flow=flat_sequence[i])\n    return nest.pack_sequence_as(structure, flat_sequence, expand_composites=True)", "docstring": "Like `nest.pack_sequence_as` but also builds TensorArrays from flows.\n\nArgs:\nstructure: The structure to pack into. May contain Tensors,\nCompositeTensors, or TensorArrays.\nflat_sequence: An iterable containing tensors.\n\nReturns:\nA nested structure.\n\nRaises:\nAssertionError if `structure` and `flat_sequence` are not compatible.", "source": "github-repos"}
{"code": "def _process_example_section(func_documentation, func, parent_class, class_name, model_name_lowercase, config_class, checkpoint, indent_level):\n    from transformers.models import auto as auto_module\n    example_docstring = ''\n    if func_documentation is not None and (match := re.search('(?m)^([ \\\\t]*)(?=Example)', func_documentation)):\n        example_docstring = func_documentation[match.start():]\n        example_docstring = '\\n' + set_min_indent(example_docstring, indent_level + 4)\n    elif parent_class is None and model_name_lowercase is not None:\n        task = f'({'|'.join(PT_SAMPLE_DOCSTRINGS.keys())})'\n        model_task = re.search(task, class_name)\n        CONFIG_MAPPING = auto_module.configuration_auto.CONFIG_MAPPING\n        if (checkpoint_example := checkpoint) is None:\n            try:\n                checkpoint_example = get_checkpoint_from_config_class(CONFIG_MAPPING[model_name_lowercase])\n            except KeyError:\n                if model_name_lowercase in HARDCODED_CONFIG_FOR_MODELS:\n                    CONFIG_MAPPING_NAMES = auto_module.configuration_auto.CONFIG_MAPPING_NAMES\n                    config_class_name = HARDCODED_CONFIG_FOR_MODELS[model_name_lowercase]\n                    if config_class_name in CONFIG_MAPPING_NAMES.values():\n                        model_name_for_auto_config = [k for k, v in CONFIG_MAPPING_NAMES.items() if v == config_class_name][0]\n                        if model_name_for_auto_config in CONFIG_MAPPING:\n                            checkpoint_example = get_checkpoint_from_config_class(CONFIG_MAPPING[model_name_for_auto_config])\n        if model_task is not None:\n            if checkpoint_example is not None:\n                example_annotation = ''\n                task = model_task.group()\n                example_annotation = PT_SAMPLE_DOCSTRINGS[task].format(model_class=class_name, checkpoint=checkpoint_example, expected_output='...', expected_loss='...', qa_target_start_index=14, qa_target_end_index=15, mask='<mask>')\n                example_docstring = set_min_indent(example_annotation, indent_level + 4)\n            else:\n                print(f\"🚨 No checkpoint found for {class_name}.{func.__name__}. Please add a `checkpoint` arg to `auto_docstring` or add one in {config_class}'s docstring\")\n        else:\n            for name_model_list_for_task in MODELS_TO_PIPELINE:\n                model_list_for_task = getattr(auto_module.modeling_auto, name_model_list_for_task)\n                if class_name in model_list_for_task.values():\n                    pipeline_name = MODELS_TO_PIPELINE[name_model_list_for_task]\n                    example_annotation = PIPELINE_TASKS_TO_SAMPLE_DOCSTRINGS[pipeline_name].format(model_class=class_name, checkpoint=checkpoint_example, expected_output='...', expected_loss='...', qa_target_start_index=14, qa_target_end_index=15)\n                    example_docstring = set_min_indent(example_annotation, indent_level + 4)\n                    break\n    return example_docstring", "docstring": "Process the example section of the docstring.\n\nArgs:\nfunc_documentation (`str`): Existing function documentation (manually specified in the docstring)\nfunc (`function`): Function being processed\nparent_class (`class`): Parent class of the function\nclass_name (`str`): Name of the class\nmodel_name_lowercase (`str`): Lowercase model name\nconfig_class (`str`): Config class for the model\ncheckpoint: Checkpoint to use in examples\nindent_level (`int`): Indentation level", "source": "github-repos"}
{"code": "def gff3_verifier(entries, line=None):\n    \n\n    regex = r'^[a-zA-Z0-9.:^*$@!+_?-|]+\\t.+\\t.+\\t\\d+\\t\\d+\\t' \\\n            + r'\\d*\\.?\\d*\\t[+-.]\\t[.0-2]\\t.+{0}$'.format(os.linesep)\n    delimiter = r'\\t'\n\n    for entry in entries:\n        try:\n            entry_verifier([entry.write()], regex, delimiter)\n        except FormatError as error:\n            \n            if line:\n                intro = 'Line {0}'.format(str(line))\n            elif error.part == 0:\n                intro = 'Entry with source {0}'.format(entry.source)\n            else:\n                intro = 'Entry with Sequence ID {0}'.format(entry.seqid)\n\n            \n            if error.part == 0:\n                msg = '{0} has no Sequence ID'.format(intro)\n            elif error.part == 1:\n                msg = '{0} has no source'.format(intro)\n            elif error.part == 2:\n                msg = '{0} has non-numerical characters in type'.format(intro)\n            elif error.part == 3:\n                msg = '{0} has non-numerical characters in ' \\\n                      'start position'.format(intro)\n            elif error.part == 4:\n                msg = '{0} has non-numerical characters in ' \\\n                      'end position'.format(intro)\n            elif error.part == 5:\n                msg = '{0} has non-numerical characters in score'.format(intro)\n            elif error.part == 6:\n                msg = '{0} strand not in [+-.]'.format(intro)\n            elif error.part == 7:\n                msg = '{0} phase not in [.0-2]'.format(intro)\n            elif error.part == 8:\n                msg = '{0} has no attributes'.format(intro)\n            else:\n                msg = 'Unknown Error: Likely a Bug'\n            raise FormatError(message=msg)\n\n        if line:\n            line += 1", "docstring": "Raises error if invalid GFF3 format detected\n\nArgs:\nentries (list): A list of GFF3Entry instances\n\nline (int): Line number of first entry\n\nRaises:\nFormatError: Error when GFF3 format incorrect with descriptive message", "source": "juraj-google-style"}
{"code": "def parse_object_like_triples(self):\n    self.rdf.triples = SimpleNamespace()\n    for (s, p, o) in self.rdf.graph:\n        (ns_prefix, ns_uri, predicate) = self.rdf.graph.compute_qname(p)\n        if (not hasattr(self.rdf.triples, ns_prefix)):\n            setattr(self.rdf.triples, ns_prefix, SimpleNamespace())\n        if (not hasattr(getattr(self.rdf.triples, ns_prefix), predicate)):\n            setattr(getattr(self.rdf.triples, ns_prefix), predicate, [])\n        getattr(getattr(self.rdf.triples, ns_prefix), predicate).append(o)", "docstring": "method to parse triples from self.rdf.graph for object-like\naccess\n\nArgs:\nNone\n\nReturns:\nNone: sets self.rdf.triples", "source": "codesearchnet"}
{"code": "def match_next_flag(tt_flags, pos):\n    match = _FLAG_DOUBLE_QUOTE_PAT.match(tt_flags, pos)\n    if match:\n        return (match, True)\n    match = _FLAG_SINGLE_QUOTE_PAT.match(tt_flags, pos)\n    if match:\n        return (match, True)\n    match = _FLAG_NO_QUOTE_PAT.match(tt_flags, pos)\n    if match:\n        return (match, True)\n    match = _FLAG_NO_EQUAL_PAT.match(tt_flags, pos)\n    if match:\n        return (match, False)\n    return (None, False)", "docstring": "Returns the match for the next TensorTracer flag.\n\nArgs:\ntt_flags: a string that contains the flags.\npos: where in flags to start the search.\n\nReturns:\nA pair where the first element is the regular-expression\nmatch found and the second element indicates if the match\nhas a value.", "source": "github-repos"}
{"code": "def _AnsiCmd(command_list):\n  \n  if not isinstance(command_list, list):\n    raise ValueError('Invalid list: %s' % command_list)\n  \n  \n  for sgr in command_list:\n    if sgr.lower() not in SGR:\n      raise ValueError('Invalid or unsupported SGR name: %s' % sgr)\n  \n  command_str = [str(SGR[x.lower()]) for x in command_list]\n  \n  return '\\033[%sm' % (';'.join(command_str))", "docstring": "Takes a list of SGR values and formats them as an ANSI escape sequence.\n\nArgs:\ncommand_list: List of strings, each string represents an SGR value.\ne.g. 'fg_blue', 'bg_yellow'\n\nReturns:\nThe ANSI escape sequence.\n\nRaises:\nValueError: if a member of command_list does not map to a valid SGR value.", "source": "juraj-google-style"}
{"code": "def charges(self, num, charge_id=None, **kwargs):\n    baseuri = (self._BASE_URI + 'company/{}/charges'.format(num))\n    if (charge_id is not None):\n        baseuri += '/{}'.format(charge_id)\n        res = self.session.get(baseuri, params=kwargs)\n    else:\n        res = self.session.get(baseuri, params=kwargs)\n    self.handle_http_error(res)\n    return res", "docstring": "Search for charges against a company by company number.\n\nArgs:\nnum (str): Company number to search on.\ntransaction (Optional[str]): Filing record number.\nkwargs (dict): additional keywords passed into\nrequests.session.get params keyword.", "source": "codesearchnet"}
{"code": "def write(self, destination, filename, content):\n    if (not os.path.exists(destination)):\n        try:\n            os.makedirs(destination)\n        except:\n            pass\n    filepath = ('%s/%s' % (destination, filename))\n    f = open(filepath, 'w+')\n    f.write(content)\n    f.close()", "docstring": "Write a file at the specific destination with the content.\n\nArgs:\ndestination (string): the destination location\nfilename (string): the filename that will be written\ncontent (string): the content of the filename", "source": "codesearchnet"}
{"code": "def get_key_flags_for_module(self, module):\n    \n    if not isinstance(module, str):\n      module = module.__name__\n\n    \n    \n    \n    key_flags = self._get_flags_defined_by_module(module)\n\n    \n    for flag in self.key_flags_by_module_dict().get(module, []):\n      if flag not in key_flags:\n        key_flags.append(flag)\n    return key_flags", "docstring": "Returns the list of key flags for a module.\n\nArgs:\nmodule: module|str, the module to get key flags from.\n\nReturns:\n[Flag], a new list of Flag instances.  Caller may update this list as\ndesired: none of those changes will affect the internals of this\nFlagValue instance.", "source": "juraj-google-style"}
{"code": "def contains(self, sub):\n        \n        sub = sub.lower()\n        found_words = set()\n\n        res = cgaddag.gdg_contains(self.gdg, sub.encode(encoding=\"ascii\"))\n        tmp = res\n\n        while tmp:\n            word = tmp.contents.str.decode(\"ascii\")\n            found_words.add(word)\n            tmp = tmp.contents.next\n\n        cgaddag.gdg_destroy_result(res)\n        return list(found_words)", "docstring": "Find all words containing a substring.\n\nArgs:\nsub: A substring to be searched for.\n\nReturns:\nA list of all words found.", "source": "juraj-google-style"}
{"code": "def get(self, txn_id):\n    if (txn_id not in self._receipt_db):\n        raise KeyError('Unknown transaction id {}'.format(txn_id))\n    txn_receipt_bytes = self._receipt_db[txn_id]\n    txn_receipt = TransactionReceipt()\n    txn_receipt.ParseFromString(txn_receipt_bytes)\n    return txn_receipt", "docstring": "Returns the TransactionReceipt\n\nArgs:\ntxn_id (str): the id of the transaction for which the receipt\nshould be retrieved.\n\nReturns:\nTransactionReceipt: The receipt for the given transaction id.\n\nRaises:\nKeyError: if the transaction id is unknown.", "source": "codesearchnet"}
{"code": "def mt_excel_files(store, case_obj, temp_excel_dir):\n    today = datetime.datetime.now().strftime('%Y-%m-%d')\n    samples = case_obj.get('individuals')\n    query = {'chrom': 'MT'}\n    mt_variants = list(store.variants(case_id=case_obj['_id'], query=query, nr_of_variants=(- 1), sort_key='position'))\n    written_files = 0\n    for sample in samples:\n        sample_id = sample['individual_id']\n        sample_lines = export_mt_variants(variants=mt_variants, sample_id=sample_id)\n        document_name = ('.'.join([case_obj['display_name'], sample_id, today]) + '.xlsx')\n        workbook = Workbook(os.path.join(temp_excel_dir, document_name))\n        Report_Sheet = workbook.add_worksheet()\n        row = 0\n        for (col, field) in enumerate(MT_EXPORT_HEADER):\n            Report_Sheet.write(row, col, field)\n        for (row, line) in enumerate(sample_lines, 1):\n            for (col, field) in enumerate(line):\n                Report_Sheet.write(row, col, field)\n        workbook.close()\n        if os.path.exists(os.path.join(temp_excel_dir, document_name)):\n            written_files += 1\n    return written_files", "docstring": "Collect MT variants and format line of a MT variant report\nto be exported in excel format\n\nArgs:\nstore(adapter.MongoAdapter)\ncase_obj(models.Case)\ntemp_excel_dir(os.Path): folder where the temp excel files are written to\n\nReturns:\nwritten_files(int): the number of files written to temp_excel_dir", "source": "codesearchnet"}
{"code": "def DisableInterfaces(interface):\n    set_tested_versions = ['vista', '2008']\n    set_args = ['/c', 'netsh', 'set', 'interface', interface, 'DISABLED']\n    host_version = platform.platform().lower()\n    for version in set_tested_versions:\n        if (host_version.find(version) != (- 1)):\n            res = client_utils_common.Execute('cmd', set_args, time_limit=(- 1), bypass_whitelist=True)\n            return res\n    return ('', 'Command not available for this version.', 99, '')", "docstring": "Tries to disable an interface.  Only works on Vista and 7.\n\nArgs:\ninterface: Name of the interface to disable.\n\nReturns:\nres which is a tuple of (stdout, stderr, exit_status, time_taken).", "source": "codesearchnet"}
{"code": "def get_resize_output_image_size(input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int]], max_size: Optional[int]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]:\n    image_size = get_image_size(input_image, input_data_format)\n    if isinstance(size, (list, tuple)):\n        return size\n    return get_size_with_aspect_ratio(image_size, size, max_size)", "docstring": "Computes the output image size given the input image size and the desired output size. If the desired output size\nis a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output\nimage size is computed by keeping the aspect ratio of the input image size.\n\nArgs:\ninput_image (`np.ndarray`):\nThe image to resize.\nsize (`int` or `Tuple[int, int]` or `List[int]`):\nThe desired output size.\nmax_size (`int`, *optional*):\nThe maximum allowed output size.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred from the input image.", "source": "github-repos"}
{"code": "def apply_operation(self, symmop):\n        \n\n        def operate_site(site):\n            new_cart = symmop.operate(site.coords)\n            return Site(site.species, new_cart,\n                        properties=site.properties)\n\n        self._sites = [operate_site(s) for s in self._sites]", "docstring": "Apply a symmetry operation to the molecule.\n\nArgs:\nsymmop (SymmOp): Symmetry operation to apply.", "source": "juraj-google-style"}
{"code": "def chdir(self, target_directory):\n        \n        target_directory = self.filesystem.resolve_path(\n            target_directory, allow_fd=True)\n        self.filesystem.confirmdir(target_directory)\n        directory = self.filesystem.resolve(target_directory)\n        \n        \n        if not is_root() and not directory.st_mode | PERM_EXE:\n            self.filesystem.raise_os_error(errno.EACCES, directory)\n        self.filesystem.cwd = target_directory", "docstring": "Change current working directory to target directory.\n\nArgs:\ntarget_directory: The path to new current working directory.\n\nRaises:\nOSError: if user lacks permission to enter the argument directory\nor if the target is not a directory.", "source": "juraj-google-style"}
{"code": "def _resolve_non_literal_route(self, method, path):\n        \n        for route_dict in (self._wildcard, self._regex):\n            if method in route_dict:\n                for route in reversed(route_dict[method]):\n                    callback_data = route.match(path)\n                    if callback_data is not None:\n                        return callback_data\n        return None", "docstring": "Resolve a request to a wildcard or regex route handler.\n\nArguments:\nmethod (str): HTTP method name, e.g. GET, POST, etc.\npath (str): Request path\n\nReturns:\ntuple or None: A tuple of three items:\n\n1. Route handler (callable)\n2. Positional arguments (list)\n3. Keyword arguments (dict)\n\n``None`` if no route matches the request.", "source": "juraj-google-style"}
{"code": "def message(self, tree, spins, subtheta, auxvars):\n        \n        energy_sources = set()\n        for v, children in tree.items():\n            aux = auxvars[v]\n\n            assert all(u in spins for u in self._ancestors[v])\n\n            \n            \n            \n            def energy_contributions():\n                yield subtheta.linear[v]\n\n                for u, bias in subtheta.adj[v].items():\n                    if u in spins:\n                        yield SpinTimes(spins[u], bias)\n\n            plus_energy = Plus(energy_contributions())\n            minus_energy = SpinTimes(-1, plus_energy)\n\n            \n            if children:\n                \n                spins[v] = 1\n                plus_energy = Plus(plus_energy, self.message(children, spins, subtheta, auxvars))\n                spins[v] = -1\n                minus_energy = Plus(minus_energy, self.message(children, spins, subtheta, auxvars))\n                del spins[v]\n\n            \n            m = FreshSymbol(REAL)\n\n            ancestor_aux = {auxvars[u] if spins[u] > 0 else Not(auxvars[u])\n                            for u in self._ancestors[v]}\n            plus_aux = And({aux}.union(ancestor_aux))\n            minus_aux = And({Not(aux)}.union(ancestor_aux))\n\n            self.assertions.update({LE(m, plus_energy),\n                                    LE(m, minus_energy),\n                                    Implies(plus_aux, GE(m, plus_energy)),\n                                    Implies(minus_aux, GE(m, minus_energy))\n                                    })\n\n            energy_sources.add(m)\n\n        return Plus(energy_sources)", "docstring": "Determine the energy of the elimination tree.\n\nArgs:\ntree (dict): The current elimination tree\nspins (dict): The current fixed spins\nsubtheta (dict): Theta with spins fixed.\nauxvars (dict): The auxiliary variables for the given spins.\n\nReturns:\nThe formula for the energy of the tree.", "source": "juraj-google-style"}
{"code": "def create_streaming_endpoint(access_token, name, description=\"New Streaming Endpoint\", \\\nscale_units=\"1\"):\n    \n    path = '/StreamingEndpoints'\n    endpoint = ''.join([ams_rest_endpoint, path])\n    body = '{ \\\n\t\t\"Id\":null, \\\n\t\t\"Name\":\"' + name + '\", \\\n\t\t\"Description\":\"' + description + '\", \\\n\t\t\"Created\":\"0001-01-01T00:00:00\", \\\n\t\t\"LastModified\":\"0001-01-01T00:00:00\", \\\n\t\t\"State\":null, \\\n\t\t\"HostName\":null, \\\n\t\t\"ScaleUnits\":\"' + scale_units + '\", \\\n\t\t\"CrossSiteAccessPolicies\":{ \\\n\t\t\t\"ClientAccessPolicy\":\"<access-policy><cross-domain-access><policy><allow-from http-request-headers=\\\\\"*\\\\\"><domain uri=\\\\\"http:\n\t\t\t\"CrossDomainPolicy\":\"<?xml version=\\\\\"1.0\\\\\"?><!DOCTYPE cross-domain-policy SYSTEM \\\\\"http:\n\t\t} \\\n\t}'\n    return do_ams_post(endpoint, path, body, access_token)", "docstring": "Create Media Service Streaming Endpoint.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nname (str): A Media Service Streaming Endpoint Name.\ndescription (str): A Media Service Streaming Endpoint Description.\nscale_units (str): A Media Service Scale Units Number.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def as_dict(self, verbosity=0):\n        \n        species_list = []\n        for spec, occu in self._species.items():\n            d = spec.as_dict()\n            del d[\"@module\"]\n            del d[\"@class\"]\n            d[\"occu\"] = occu\n            species_list.append(d)\n\n        d = {\"species\": species_list,\n             \"abc\": [float(c) for c in self._frac_coords],\n             \"lattice\": self._lattice.as_dict(verbosity=verbosity),\n             \"@module\": self.__class__.__module__,\n             \"@class\": self.__class__.__name__}\n\n        if verbosity > 0:\n            d[\"xyz\"] = [float(c) for c in self.coords]\n            d[\"label\"] = self.species_string\n\n        d[\"properties\"] = self.properties\n        return d", "docstring": "Json-serializable dict representation of PeriodicSite.\n\nArgs:\nverbosity (int): Verbosity level. Default of 0 only includes the\nmatrix representation. Set to 1 for more details such as\ncartesian coordinates, etc.", "source": "juraj-google-style"}
{"code": "def init_database(connection=None, dbname=None):\n    \n\n    connection = connection or connect()\n    dbname = dbname or bigchaindb.config['database']['name']\n\n    create_database(connection, dbname)\n    create_tables(connection, dbname)", "docstring": "Initialize the configured backend for use with BigchainDB.\n\nCreates a database with :attr:`dbname` with any required tables\nand supporting indexes.\n\nArgs:\nconnection (:class:`~bigchaindb.backend.connection.Connection`): an\nexisting connection to use to initialize the database.\nCreates one if not given.\ndbname (str): the name of the database to create.\nDefaults to the database name given in the BigchainDB\nconfiguration.", "source": "juraj-google-style"}
{"code": "def save_imgs(x, fname):\n    n = x.shape[0]\n    fig = figure.Figure(figsize=(n, 1), frameon=False)\n    canvas = backend_agg.FigureCanvasAgg(fig)\n    for i in range(n):\n        ax = fig.add_subplot(1, n, (i + 1))\n        ax.imshow(x[i].squeeze(), interpolation='none', cmap=cm.get_cmap('binary'))\n        ax.axis('off')\n    canvas.print_figure(fname, format='png')\n    print(('saved %s' % fname))", "docstring": "Helper method to save a grid of images to a PNG file.\n\nArgs:\nx: A numpy array of shape [n_images, height, width].\nfname: The filename to write to (including extension).", "source": "codesearchnet"}
{"code": "def CreateSharedBudget(client):\n    budget_service = client.GetService('BudgetService', version='v201809')\n    budget = {'name': ('Shared Interplanetary Budget \n    operation = {'operator': 'ADD', 'operand': budget}\n    response = budget_service.mutate([operation])\n    return response['value'][0]", "docstring": "Creates an explicit budget to be used only to create the Campaign.\n\nArgs:\nclient: AdWordsClient the client to run the example with.\n\nReturns:\ndict An object representing a shared budget.", "source": "codesearchnet"}
{"code": "def map_into_course(self, course_key):\n        \n        return self.replace(usage_key=self.usage_key.map_into_course(course_key))", "docstring": "Return a new :class:`UsageKey` or :class:`AssetKey` representing this usage inside the\ncourse identified by the supplied :class:`CourseKey`. It returns the same type as\n`self`\n\nArgs:\ncourse_key (:class:`CourseKey`): The course to map this object into.\n\nReturns:\nA new :class:`CourseObjectMixin` instance.", "source": "juraj-google-style"}
{"code": "def generate_mediation_matrix(dsm):\n    cat = dsm.categories\n    ent = dsm.entities\n    size = dsm.size[0]\n    if (not cat):\n        cat = (['appmodule'] * size)\n    packages = [e.split('.')[0] for e in ent]\n    mediation_matrix = [[0 for _ in range(size)] for _ in range(size)]\n    for i in range(0, size):\n        for j in range(0, size):\n            if (cat[i] == 'framework'):\n                if (cat[j] == 'framework'):\n                    mediation_matrix[i][j] = (- 1)\n                else:\n                    mediation_matrix[i][j] = 0\n            elif (cat[i] == 'corelib'):\n                if ((cat[j] in ('framework', 'corelib')) or ent[i].startswith((packages[j] + '.')) or (i == j)):\n                    mediation_matrix[i][j] = (- 1)\n                else:\n                    mediation_matrix[i][j] = 0\n            elif (cat[i] == 'applib'):\n                if ((cat[j] in ('framework', 'corelib', 'applib')) or ent[i].startswith((packages[j] + '.')) or (i == j)):\n                    mediation_matrix[i][j] = (- 1)\n                else:\n                    mediation_matrix[i][j] = 0\n            elif (cat[i] == 'appmodule'):\n                if ((cat[j] in ('framework', 'corelib', 'applib', 'broker', 'data')) or ent[i].startswith((packages[j] + '.')) or (i == j)):\n                    mediation_matrix[i][j] = (- 1)\n                else:\n                    mediation_matrix[i][j] = 0\n            elif (cat[i] == 'broker'):\n                if ((cat[j] in ('appmodule', 'corelib', 'framework')) or ent[i].startswith((packages[j] + '.')) or (i == j)):\n                    mediation_matrix[i][j] = (- 1)\n                else:\n                    mediation_matrix[i][j] = 0\n            elif (cat[i] == 'data'):\n                if ((cat[j] == 'framework') or (i == j)):\n                    mediation_matrix[i][j] = (- 1)\n                else:\n                    mediation_matrix[i][j] = 0\n            else:\n                raise DesignStructureMatrixError(('Mediation matrix value NOT generated for %s:%s' % (i, j)))\n    return mediation_matrix", "docstring": "Generate the mediation matrix of the given matrix.\n\nRules for mediation matrix generation:\n\nSet -1 for items NOT to be considered\nSet 0 for items which MUST NOT be present\nSet 1 for items which MUST be present\n\nEach module has optional dependencies to itself.\n\n- Framework has optional dependency to all framework items (-1),\nand to nothing else.\n- Core libraries have dependencies to framework.\nDependencies to other core libraries are tolerated.\n- Application libraries have dependencies to framework.\nDependencies to other core or application libraries are tolerated.\nNo dependencies to application modules.\n- Application modules have dependencies to framework and libraries.\nDependencies to other application modules\nshould be mediated over a broker.\nDependencies to data are tolerated.\n- Data have no dependencies at all\n(but framework/libraries would be tolerated).\n\nArgs:\ndsm (:class:`DesignStructureMatrix`): the DSM to generate\nthe mediation matrix for.", "source": "codesearchnet"}
{"code": "def coordinate_tensor(shape, axis):\n  \n  if axis < 0:\n    axis = tf.size(shape) + axis  \n\n  r = tf.range(shape[axis])\n  r_shape = tf.one_hot(\n      axis, tf.size(shape), on_value=-1, off_value=1, dtype=tf.int32)\n  return tf.zeros(shape, dtype=tf.int32) + tf.reshape(r, r_shape)", "docstring": "Return a tensor with given shape containing coordinate along given axis.\n\nArgs:\nshape: a Tensor representing the shape of the output Tensor\naxis: an integer\n\nReturns:\nA tensor with shape shape and type tf.int32, where each elements its\ncoordinate along the given axis.", "source": "juraj-google-style"}
{"code": "def duplicated_initializer(tc, init, graph_seed, shape=None):\n    if shape is None:\n        shape = [100]\n    with tc.test_session(graph=ops.Graph()):\n        random_seed.set_random_seed(graph_seed)\n        t1 = init(shape).eval()\n        t2 = init(shape).eval()\n        return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)", "docstring": "Tests duplicated random initializer within the same graph.\n\nThis test generates two random kernels from the same initializer to the same\ngraph, and checks if the results are close enough. Even given the same global,\nseed, two different instances of random kernels should generate different\nresults.\n\nArgs:\ntc: An instance of TensorFlowTestCase.\ninit: An Initializer that generates a tensor of a given shape\ngraph_seed: A graph-level seed to use.\nshape: Shape of the tensor to initialize or `None` to use a vector of length\n100.\n\nReturns:\nTrue or False as determined by test.", "source": "github-repos"}
{"code": "def calc_control_outputs(self, graph):\n    control_outputs = {}\n    for op in graph.get_operations():\n        for control_input in op.control_inputs:\n            if control_input not in control_outputs:\n                control_outputs[control_input] = set()\n            control_outputs[control_input].add(op)\n    return control_outputs", "docstring": "Returns the map of control_outputs for a given graph.\n\nArgs:\ngraph: The graph to parse.\n\nReturns:\nA map of the control outputs.", "source": "github-repos"}
{"code": "def generate_token(key, user_id, action_id='', when=None):\n    digester = hmac.new(_helpers._to_bytes(key, encoding='utf-8'))\n    digester.update(_helpers._to_bytes(str(user_id), encoding='utf-8'))\n    digester.update(DELIMITER)\n    digester.update(_helpers._to_bytes(action_id, encoding='utf-8'))\n    digester.update(DELIMITER)\n    when = _helpers._to_bytes(str((when or int(time.time()))), encoding='utf-8')\n    digester.update(when)\n    digest = digester.digest()\n    token = base64.urlsafe_b64encode(((digest + DELIMITER) + when))\n    return token", "docstring": "Generates a URL-safe token for the given user, action, time tuple.\n\nArgs:\nkey: secret key to use.\nuser_id: the user ID of the authenticated user.\naction_id: a string identifier of the action they requested\nauthorization for.\nwhen: the time in seconds since the epoch at which the user was\nauthorized for this action. If not set the current time is used.\n\nReturns:\nA string XSRF protection token.", "source": "codesearchnet"}
{"code": "def get_completions(self, context_word, prefix):\n    if context_word not in self._comp_dict:\n        return (None, None)\n    comp_items = self._comp_dict[context_word]\n    comp_items = sorted([item for item in comp_items if item.startswith(prefix)])\n    return (comp_items, self._common_prefix(comp_items))", "docstring": "Get the tab completions given a context word and a prefix.\n\nArgs:\ncontext_word: The context word.\nprefix: The prefix of the incomplete word.\n\nReturns:\n(1) None if no registered context matches the context_word.\nA list of str for the matching completion items. Can be an empty list\nof a matching context exists, but no completion item matches the\nprefix.\n(2) Common prefix of all the words in the first return value. If the\nfirst return value is None, this return value will be None, too. If\nthe first return value is not None, i.e., a list, this return value\nwill be a str, which can be an empty str if there is no common\nprefix among the items of the list.", "source": "github-repos"}
{"code": "def sap_sid_nr(broker):\n    insts = broker[DefaultSpecs.saphostctrl_listinstances].content\n    hn = broker[DefaultSpecs.hostname].content[0].split('.')[0].strip()\n    results = set()\n    for ins in insts:\n        ins_splits = ins.split(' - ')\n        if (ins_splits[2].strip() == hn):\n            results.add((ins_splits[0].split()[(- 1)].lower(), ins_splits[1].strip()))\n    return list(results)", "docstring": "Get the SID and Instance Number\n\nTypical output of saphostctrl_listinstances::\n# /usr/sap/hostctrl/exe/saphostctrl -function ListInstances\nInst Info : SR1 - 01 - liuxc-rhel7-hana-ent - 749, patch 418, changelist 1816226\n\nReturns:\n(list): List of tuple of SID and Instance Number.", "source": "codesearchnet"}
{"code": "def Serialize(self, writer):\n        \n        writer.WriteUInt256(self.PrevHash)\n        writer.WriteUInt16(self.PrevIndex)", "docstring": "Serialize object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def make_block(cls, header: str='', content: str | dict[str, Any] | list[Any] | tuple[Any, ...]=(), *, braces: Union[str, tuple[str, str]]='(', equal: str='=', limit: int=20) -> str:\n    if isinstance(braces, str):\n        braces = _BRACE_TO_BRACES[braces]\n    brace_start, brace_end = braces\n    if isinstance(content, str):\n        content = [content]\n    if isinstance(content, dict):\n        parts = [f'{k}{equal}{pretty_repr(v)}' for k, v in content.items()]\n    elif isinstance(content, (list, tuple)):\n        parts = [pretty_repr(v) for v in content]\n    else:\n        raise TypeError(f'Invalid fields {type(content)}')\n    collapse = len(parts) <= 1\n    if any(('\\n' in p for p in parts)):\n        collapse = False\n    elif sum((len(p) for p in parts)) <= limit:\n        collapse = True\n    lines = cls()\n    lines += f'{header}{brace_start}'\n    with lines.indent():\n        if collapse:\n            lines += ', '.join(parts)\n        else:\n            for p in parts:\n                lines += f'{p},'\n    lines += f'{brace_end}'\n    return lines.join(collapse=collapse)", "docstring": "Util function to create a code block.\n\nExample:\n\n```python\nepy.Lines.make_block('A', {}) == 'A()'\nepy.Lines.make_block('A', {'x': '1'}) == 'A(x=1)'\nepy.Lines.make_block('A', {'x': '1', 'y': '2'}) == '''A(\nx=1,\ny=2,\n)'''\n```\n\nPattern is as:\n\n```\n{header}{braces[0]}\n{k}={v},\n...\n{braces[1]}\n```\n\nArgs:\nheader: Prefix before the brace\ncontent: Dict of key to values. One line will be displayed per item if\n`len(content) > 1`. Otherwise the code is collapsed\nbraces: Brace type (`(`, `[`, `{`), can be tuple for custom open/close.\nequal: The separator (`=`, `: `)\nlimit: Strings smaller than this will be collapsed\n\nReturns:\nThe block string", "source": "github-repos"}
{"code": "def reduce_concat(self, x):\n    return self.reduce(lambda y: y, x)", "docstring": "Performs a concat reduction on `x` across pfor iterations.\n\nNote that this currently may not work inside a control flow construct.\nArgs:\nx: an unvectorized Tensor.\n\nReturns:\nA Tensor that has rank one higher than `x`. The value is the vectorized\nversion of `x`, i.e. stacking the value of `x` across different pfor\niterations.", "source": "github-repos"}
{"code": "def store_state(node, reaching, defined, stack):\n    defs = [def_ for def_ in reaching if (not isinstance(def_[1], gast.arguments))]\n    if (not len(defs)):\n        return node\n    (reaching, original_defs) = zip(*defs)\n    assignments = []\n    for id_ in (set(reaching) - defined):\n        assignments.append(quoting.quote('{} = None'.format(id_)))\n    store = []\n    load = []\n    for (id_, def_) in zip(reaching, original_defs):\n        if (isinstance(def_, gast.Assign) and ('tangent.Stack()' in quoting.unquote(def_.value))):\n            (push, pop, op_id) = get_push_pop_stack()\n        else:\n            (push, pop, op_id) = get_push_pop()\n        store.append(template.replace('push(_stack, val, op_id)', push=push, val=id_, _stack=stack, op_id=op_id))\n        load.append(template.replace('val = pop(_stack, op_id)', pop=pop, val=id_, _stack=stack, op_id=op_id))\n    (body, return_) = (node.body[0].body[:(- 1)], node.body[0].body[(- 1)])\n    node.body[0].body = (((assignments + body) + store) + [return_])\n    node.body[1].body = (load[::(- 1)] + node.body[1].body)\n    return node", "docstring": "Push the final state of the primal onto the stack for the adjoint.\n\nPython's scoping rules make it possible for variables to not be defined in\ncertain blocks based on the control flow path taken at runtime. In order to\nmake sure we don't try to push non-existing variables onto the stack, we\ndefined these variables explicitly (by assigning `None` to them) at the\nbeginning of the function.\n\nAll the variables that reach the return statement are pushed onto the\nstack, and in the adjoint they are popped off in reverse order.\n\nArgs:\nnode: A module with the primal and adjoint function definitions as returned\nby `reverse_ad`.\nreaching: The variable definitions that reach the end of the primal.\ndefined: The variables defined at the end of the primal.\nstack: The stack node to use for storing and restoring state.\n\nReturns:\nnode: A node with the requisite pushes and pops added to make sure that\nstate is transferred between primal and adjoint split motion calls.", "source": "codesearchnet"}
{"code": "def new(self, user_id, tokens=None, user_data=None, valid_until=None, client_ip=None, encoding='utf-8'):\n    if (valid_until is None):\n        valid_until = (int(time.time()) + TicketFactory._DEFAULT_TIMEOUT)\n    else:\n        valid_until = int(valid_until)\n    user_id = ulp.quote(user_id)\n    token_str = ''\n    if tokens:\n        token_str = ','.join((ulp.quote(t) for t in tokens))\n    user_str = ('' if (not user_data) else ulp.quote(user_data))\n    ip = (self._DEFAULT_IP if (client_ip is None) else ip_address(client_ip))\n    data0 = ((bytes([ip.version]) + ip.packed) + pack('>I', valid_until))\n    data1 = '\\x00'.join((user_id, token_str, user_str)).encode(encoding)\n    digest = self._hexdigest(data0, data1)\n    parts = ('{0}{1:08x}{2}'.format(digest, valid_until, user_id), token_str, user_str)\n    return '!'.join(parts)", "docstring": "Creates a new authentication ticket.\n\nArgs:\nuser_id: User id to store in ticket (stored in plain text)\ntokens: Optional sequence of token strings to store in the ticket\n(stored in plain text).\nuser_data: Optional user data to store in the ticket (string like\nobject stored in plain text)\nvalid_until: Expiration time of ticket as a integer (typically\ntime.time() + seconds).\nclient_ip: Optional string or ip_address.IPAddress of the client.\nencoding: Optional encoding type that is used when hashing the\nstrings passed to the function\n\nReturns:\nA ticket string that can later be used to identify the user", "source": "codesearchnet"}
{"code": "def _truncate_float(matchobj, format_str='0.2g'):\n    if matchobj.group(0):\n        return format(float(matchobj.group(0)), format_str)\n    return ''", "docstring": "Truncate long floats\n\nArgs:\nmatchobj (re.Match): contains original float\nformat_str (str): format specifier\nReturns:\nstr: returns truncated float", "source": "codesearchnet"}
{"code": "def rtt_control(self, command, config):\n    config_byref = (ctypes.byref(config) if (config is not None) else None)\n    res = self._dll.JLINK_RTTERMINAL_Control(command, config_byref)\n    if (res < 0):\n        raise errors.JLinkRTTException(res)\n    return res", "docstring": "Issues an RTT Control command.\n\nAll RTT control is done through a single API call which expects\nspecifically laid-out configuration structures.\n\nArgs:\nself (JLink): the ``JLink`` instance\ncommand (int): the command to issue (see enums.JLinkRTTCommand)\nconfig (ctypes type): the configuration to pass by reference.\n\nReturns:\nAn integer containing the result of the command.", "source": "codesearchnet"}
{"code": "def _Open(self, path_spec=None, mode='rb'):\n    \n    if not path_spec:\n      raise ValueError('Missing path specification.')\n\n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    self._gzip_file_object = resolver.Resolver.OpenFileObject(\n        path_spec.parent, resolver_context=self._resolver_context)\n    file_size = self._gzip_file_object.get_size()\n\n    self._gzip_file_object.seek(0, os.SEEK_SET)\n\n    uncompressed_data_offset = 0\n    next_member_offset = 0\n\n    while next_member_offset < file_size:\n      member = gzipfile.GzipMember(\n          self._gzip_file_object, next_member_offset, uncompressed_data_offset)\n      uncompressed_data_offset = (\n          uncompressed_data_offset + member.uncompressed_data_size)\n      self._members_by_end_offset[uncompressed_data_offset] = member\n      self.uncompressed_data_size += member.uncompressed_data_size\n      next_member_offset = member.member_end_offset", "docstring": "Opens the file-like object defined by path specification.\n\nArgs:\npath_spec (Optional[PathSpec]): path specification.\nmode (Optional[str]): file access mode.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file-like object could not be opened.\nOSError: if the file-like object could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def get_overlaps(self, offset, length):\n    if (''.join([chunk.word for chunk in self])[offset] == ' '):\n        offset += 1\n    index = 0\n    result = ChunkList()\n    for chunk in self:\n        if ((offset < (index + len(chunk.word))) and (index < (offset + length))):\n            result.append(chunk)\n        index += len(chunk.word)\n    return result", "docstring": "Returns chunks overlapped with the given range.\n\nArgs:\noffset (int): Begin offset of the range.\nlength (int): Length of the range.\n\nReturns:\nOverlapped chunks. (:obj:`budou.chunk.ChunkList`)", "source": "codesearchnet"}
{"code": "def GetLogdirSubdirectories(path):\n  \n  if not tf.io.gfile.exists(path):\n    \n    return ()\n\n  if not tf.io.gfile.isdir(path):\n    raise ValueError('GetLogdirSubdirectories: path exists and is not a '\n                     'directory, %s' % path)\n\n  if IsCloudPath(path):\n    \n    \n    logger.info(\n        'GetLogdirSubdirectories: Starting to list directories via glob-ing.')\n    traversal_method = ListRecursivelyViaGlobbing\n  else:\n    \n    \n    logger.info(\n        'GetLogdirSubdirectories: Starting to list directories via walking.')\n    traversal_method = ListRecursivelyViaWalking\n\n  return (\n      subdir\n      for (subdir, files) in traversal_method(path)\n      if any(IsTensorFlowEventsFile(f) for f in files)\n  )", "docstring": "Obtains all subdirectories with events files.\n\nThe order of the subdirectories returned is unspecified. The internal logic\nthat determines order varies by scenario.\n\nArgs:\npath: The path to a directory under which to find subdirectories.\n\nReturns:\nA tuple of absolute paths of all subdirectories each with at least 1 events\nfile directly within the subdirectory.\n\nRaises:\nValueError: If the path passed to the method exists and is not a directory.", "source": "juraj-google-style"}
{"code": "def get_lambda_arn(app, account, region):\n    session = boto3.Session(profile_name=account, region_name=region)\n    lambda_client = session.client('lambda')\n    lambda_arn = None\n    paginator = lambda_client.get_paginator('list_functions')\n    for lambda_functions in paginator.paginate():\n        for lambda_function in lambda_functions['Functions']:\n            if (lambda_function['FunctionName'] == app):\n                lambda_arn = lambda_function['FunctionArn']\n                LOG.debug('Lambda ARN for lambda function %s is %s.', app, lambda_arn)\n                break\n        if lambda_arn:\n            break\n    if (not lambda_arn):\n        LOG.fatal('Lambda function with name %s not found in %s %s', app, account, region)\n        raise LambdaFunctionDoesNotExist('Lambda function with name {0} not found in {1} {2}'.format(app, account, region))\n    return lambda_arn", "docstring": "Get lambda ARN.\n\nArgs:\naccount (str): AWS account name.\nregion (str): Region name, e.g. us-east-1\napp (str): Lambda function name\n\nReturns:\nstr: ARN for requested lambda function", "source": "codesearchnet"}
{"code": "def register_recipe(cls, recipe):\n    recipe_name = recipe.contents['name']\n    cls._recipe_classes[recipe_name] = (recipe.contents, recipe.args, recipe.__doc__)", "docstring": "Registers a dftimewolf recipe.\n\nArgs:\nrecipe: imported python module representing the recipe.", "source": "codesearchnet"}
{"code": "def index_of(self, file_path, line_number, called_function_name, called_file_path, called_function_start_line):\n    location_key = (file_path, called_function_name, line_number)\n    if location_key in self._location_key_to_location:\n        location = self._location_key_to_location[location_key]\n        return location.id\n    else:\n        location_index = len(self._location_key_to_location) + 1\n        location = profile_pb2.Location()\n        location.id = location_index\n        self._location_key_to_location[location_key] = location\n        line = location.line.add()\n        line.function_id = self._functions.index_of(called_file_path, called_function_name, called_function_start_line)\n        line.line = line_number\n        return location_index", "docstring": "Returns index of the location, adding the location if needed.\n\nArgs:\nfile_path: (string) Path to file that makes the call.\nline_number: (integer) Call line number.\ncalled_function_name: (string) Function name of the function called at\n`file_path` and `line_number`.\ncalled_file_path: (string) Path to file where the called function is\ndefined.\ncalled_function_start_line: (integer) Start line number of called\nfunction definition in `called_file_path` file.\n\nReturns:\nIndex of location.", "source": "github-repos"}
{"code": "def on_value_event(self, event):\n    \n    if not event.summary.value:\n      logger.info('The summary of the event lacks a value.')\n      return None\n\n    \n    \n    watch_key = event.summary.value[0].node_name\n    tensor_value = debug_data.load_tensor_from_event(event)\n    device_name = _extract_device_name_from_event(event)\n    node_name, output_slot, debug_op = (\n        event.summary.value[0].node_name.split(':'))\n    maybe_base_expanded_node_name = (\n        self._run_states.get_maybe_base_expanded_node_name(node_name,\n                                                           self._run_key,\n                                                           device_name))\n    self._tensor_store.add(watch_key, tensor_value)\n    self._outgoing_channel.put(_comm_tensor_data(\n        device_name, node_name, maybe_base_expanded_node_name, output_slot,\n        debug_op, tensor_value, event.wall_time))\n\n    logger.info('on_value_event(): waiting for client ack (tensors)...')\n    self._incoming_channel.get()\n    logger.info('on_value_event(): client ack received (tensor).')\n\n    \n    \n    if self._is_debug_node_in_breakpoints(event.summary.value[0].node_name):\n      logger.info('Sending empty EventReply for breakpoint: %s',\n                      event.summary.value[0].node_name)\n      \n      return debug_service_pb2.EventReply()\n    return None", "docstring": "Records the summary values based on an updated message from the debugger.\n\nLogs an error message if writing the event to disk fails.\n\nArgs:\nevent: The Event proto to be processed.", "source": "juraj-google-style"}
{"code": "def create(self, name, description='', whitelisted_container_task_types=None, whitelisted_executable_task_types=None):\n    if (whitelisted_container_task_types is None):\n        whitelisted_container_task_types = []\n    if (whitelisted_executable_task_types is None):\n        whitelisted_executable_task_types = []\n    request_url = (self._client.base_api_url + self.list_url)\n    data_to_post = {'name': name, 'description': description, 'whitelisted_container_task_types': whitelisted_container_task_types, 'whitelisted_executable_task_types': whitelisted_executable_task_types}\n    response = self._client.session.post(request_url, data=data_to_post)\n    self.validate_request_success(response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_201_CREATED)\n    return self.response_data_to_model_instance(response.json())", "docstring": "Create a task whitelist.\n\nArgs:\nname (str): The name of the task whitelist.\ndescription (str, optional): A description of the task whitelist.\nwhitelisted_container_task_types (list, optional): A list of\nwhitelisted container task type IDs.\nwhitelisted_executable_task_types (list, optional): A list\nof whitelisted executable task type IDs.\n\nReturns:\n:class:`saltant.models.task_whitelist.TaskWhitelist`:\nA task whitelist model instance representing the task\nwhitelist just created.", "source": "codesearchnet"}
{"code": "def __eq__(self, other):\n        \n        if isinstance(other, DocumentReference):\n            return self._client == other._client and self._path == other._path\n        else:\n            return NotImplemented", "docstring": "Equality check against another instance.\n\nArgs:\nother (Any): A value to compare against.\n\nReturns:\nUnion[bool, NotImplementedType]: Indicating if the values are\nequal.", "source": "juraj-google-style"}
{"code": "def create_profiler_ui(graph, run_metadata, ui_type='readline', on_ui_exit=None, config=None):\n    del config\n    analyzer = ProfileAnalyzer(graph, run_metadata)\n    cli = ui_factory.get_ui(ui_type, on_ui_exit=on_ui_exit)\n    cli.register_command_handler('list_profile', analyzer.list_profile, analyzer.get_help('list_profile'), prefix_aliases=['lp'])\n    cli.register_command_handler('print_source', analyzer.print_source, analyzer.get_help('print_source'), prefix_aliases=['ps'])\n    return cli", "docstring": "Create an instance of ReadlineUI based on a `tf.Graph` and `RunMetadata`.\n\nArgs:\ngraph: Python `Graph` object.\nrun_metadata: A `RunMetadata` protobuf object.\nui_type: (str) requested UI type, e.g., \"readline\".\non_ui_exit: (`Callable`) the callback to be called when the UI exits.\nconfig: An instance of `cli_config.CLIConfig`.\n\nReturns:\n(base_ui.BaseUI) A BaseUI subtype object with a set of standard analyzer\ncommands and tab-completions registered.", "source": "github-repos"}
{"code": "def exit_hook(callable, once=True):\n  r\n  if once and callable in ExitHooks:\n    return\n\n  ExitHooks.append(callable)", "docstring": "r\"\"\"A decorator that makes the decorated function to run while ec exits.\n\nArgs:\ncallable (callable): The target callable.\nonce (bool): Avoids adding a func to the hooks, if it has been added already. Defaults to True.\n\nNote:\nHooks are processedd in a LIFO order.", "source": "juraj-google-style"}
{"code": "def generate(cache_fn):\n    if (not os.path.exists(cache_fn)):\n        ((print >> sys.stderr), (\"Can't access `%s`!\" % cache_fn))\n        sys.exit(1)\n    with SqliteDict(cache_fn) as db:\n        for item in _pick_keywords(db):\n            (yield item)", "docstring": "Go thru `cache_fn` and filter keywords. Store them in `keyword_list.json`.\n\nArgs:\ncache_fn (str): Path to the file with cache.\n\nReturns:\nlist: List of :class:`KeywordInfo` objects.", "source": "codesearchnet"}
{"code": "def definition_package(cls):\n    outer_definition = cls.message_definition()\n    if (not outer_definition):\n        return util.get_package_for_module(cls.__module__)\n    return outer_definition.definition_package()", "docstring": "Helper method for creating creating the package of a definition.\n\nReturns:\nName of package that definition belongs to.", "source": "codesearchnet"}
{"code": "def shift_time(start_time, mins) -> str:\n    s_time = pd.Timestamp(start_time)\n    e_time = (s_time + (np.sign(mins) * pd.Timedelta(f'00:{abs(mins)}:00')))\n    return e_time.strftime('%H:%M')", "docstring": "Shift start time by mins\n\nArgs:\nstart_time: start time in terms of HH:MM string\nmins: number of minutes (+ / -)\n\nReturns:\nend time in terms of HH:MM string", "source": "codesearchnet"}
{"code": "def findall_operations_with_gate_type(self, gate_type: Type[T_DESIRED_GATE_TYPE]) -> Iterable[Tuple[(int, ops.GateOperation, T_DESIRED_GATE_TYPE)]]:\n    result = self.findall_operations((lambda operation: bool(ops.op_gate_of_type(operation, gate_type))))\n    for (index, op) in result:\n        gate_op = cast(ops.GateOperation, op)\n        (yield (index, gate_op, cast(T_DESIRED_GATE_TYPE, gate_op.gate)))", "docstring": "Find the locations of all gate operations of a given type.\n\nArgs:\ngate_type: The type of gate to find, e.g. XPowGate or\nMeasurementGate.\n\nReturns:\nAn iterator (index, operation, gate)'s for operations with the given\ngate type.", "source": "codesearchnet"}
{"code": "def image_data_format():\n    return _IMAGE_DATA_FORMAT", "docstring": "Return the default image data format convention.\n\nReturns:\nA string, either `'channels_first'` or `'channels_last'`.\n\nExample:\n\n>>> keras.config.image_data_format()\n'channels_last'", "source": "github-repos"}
{"code": "def create_write_transform(self) -> beam.PTransform[Chunk, Any]:\n    raise NotImplementedError(type(self))", "docstring": "Creates a PTransform that writes embeddings to the vector database.\n\nReturns:\nA PTransform that accepts PCollection[Chunk] and writes the chunks'\nembeddings and metadata to the configured vector database.\nThe transform should handle:\n- Converting Chunk format to database schema\n- Setting up database connection/client\n- Writing with appropriate batching/error handling", "source": "github-repos"}
{"code": "def load(cls, campaign_dir, ns_path=None, runner_type='Auto', optimized=True, check_repo=True):\n    if (ns_path is not None):\n        ns_path = os.path.abspath(ns_path)\n    campaign_dir = os.path.abspath(campaign_dir)\n    db = DatabaseManager.load(campaign_dir)\n    script = db.get_script()\n    runner = None\n    if (ns_path is not None):\n        runner = CampaignManager.create_runner(ns_path, script, runner_type, optimized)\n    return cls(db, runner, check_repo)", "docstring": "Load an existing simulation campaign.\n\nNote that specifying an ns-3 installation is not compulsory when using\nthis method: existing results will be available, but in order to run\nadditional simulations it will be necessary to specify a\nSimulationRunner object, and assign it to the CampaignManager.\n\nArgs:\ncampaign_dir (str): path to the directory in which to save the\nsimulation campaign database.\nns_path (str): path to the ns-3 installation to employ in this\ncampaign.\nrunner_type (str): implementation of the SimulationRunner to use.\nValue can be: SimulationRunner (for running sequential\nsimulations locally), ParallelRunner (for running parallel\nsimulations locally), GridRunner (for running simulations using\na DRMAA-compatible parallel task scheduler).\noptimized (bool): whether to configure the runner to employ an\noptimized ns-3 build.", "source": "codesearchnet"}
{"code": "def ReadFromFile(self, path):\n    self._definitions = {}\n    with open(path, 'r') as file_object:\n        for preset_definition in self._ReadPresetsFromFileObject(file_object):\n            self._definitions[preset_definition.name] = preset_definition", "docstring": "Reads parser and parser plugin presets from a file.\n\nArgs:\npath (str): path of file that contains the the parser and parser plugin\npresets configuration.\n\nRaises:\nMalformedPresetError: if one or more plugin preset definitions are\nmalformed.", "source": "codesearchnet"}
{"code": "def _invalid_triple_quote(self, quote, row, col=None):\n        \n        self.add_message(\n            'invalid-triple-quote',\n            line=row,\n            args=(quote, TRIPLE_QUOTE_OPTS.get(self.config.triple_quote)),\n            **self.get_offset(col)\n        )", "docstring": "Add a message for an invalid triple quote.\n\nArgs:\nquote: The quote characters that were found.\nrow: The row number the quote characters were found on.\ncol: The column the quote characters were found on.", "source": "juraj-google-style"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, vision_feature_layer: Optional[Union[int, List[int]]]=None, vision_feature_select_strategy: Optional[str]=None):\n    vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer\n    vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy\n    image_num_patches = [image_size_to_num_patches(image_size=imsize, grid_pinpoints=self.config.image_grid_pinpoints, patch_size=self.config.vision_config.image_size) for imsize in image_sizes]\n    if pixel_values.dim() == 5:\n        _pixel_values_list = [pix_val[:num_patch] for pix_val, num_patch in zip(pixel_values, image_num_patches)]\n        pixel_values = torch.cat(_pixel_values_list, dim=0)\n    elif pixel_values.dim() != 4:\n        raise ValueError(f'pixel_values of shape {pixel_values.shape}, expect to be of 4 or 5 dimensions')\n    image_features = self.vision_tower(pixel_values, output_hidden_states=True)\n    if isinstance(vision_feature_layer, int):\n        selected_image_feature = image_features.hidden_states[vision_feature_layer]\n    else:\n        hs_pool = [image_features.hidden_states[layer_idx] for layer_idx in vision_feature_layer]\n        selected_image_feature = torch.cat(hs_pool, dim=-1)\n    if vision_feature_select_strategy == 'default':\n        selected_image_feature = selected_image_feature[:, 1:]\n    elif vision_feature_select_strategy == 'full':\n        selected_image_feature = selected_image_feature\n    image_features = self.multi_modal_projector(selected_image_feature)\n    image_features = torch.split(image_features, image_num_patches, dim=0)\n    image_features, feature_lens = self.pack_image_features(image_features, image_sizes, vision_feature_select_strategy, image_newline=self.image_newline)\n    return image_features", "docstring": "Obtains image last hidden states from the vision tower and apply multimodal projection.\n\nArgs:\npixel_values (`torch.FloatTensor]` of shape `(batch_size, num_patches, channels, height, width)`)\nThe tensors corresponding to the input images.\nimage_sizes (`torch.Tensor` of shape `(num_images, 2)`)\nActual image size of each images (H, W).\nvision_feature_layer (`Union[int, List[int]]`, *optional*):\nThe index of the layer to select the vision feature. If multiple indices are provided,\nthe vision feature of the corresponding indices will be concatenated to form the\nvision features.\nvision_feature_select_strategy (`str`, *optional*):\nThe feature selection strategy used to select the vision feature from the vision backbone.\nCan be one of `\"default\"` or `\"full\"`\nReturns:\nimage_features (List[`torch.Tensor`]): List of image feature tensor, each contains all the visual feature of all patches\nand are of shape `(num_patches, image_length, embed_dim)`).", "source": "github-repos"}
{"code": "def merge_config(\n    config: Mapping[str, Any],\n    override_config: Mapping[str, Any] = None,\n    override_config_fn: str = None,\n) -> Mapping[str, Any]:\n    \n\n    if override_config_fn:\n        with open(override_config_fn, \"r\") as f:\n            override_config = yaml.load(f, Loader=yaml.SafeLoader)\n\n    if not override_config:\n        log.info(\"Missing override_config\")\n\n    return functools.reduce(rec_merge, (config, override_config))", "docstring": "Override config with additional configuration in override_config or override_config_fn\n\nUsed in script to merge CLI options with Config\n\nArgs:\nconfig: original configuration\noverride_config: new configuration to override/extend current config\noverride_config_fn: new configuration filename as YAML file", "source": "juraj-google-style"}
{"code": "def replace(self, i, species, coords=None, coords_are_cartesian=False, properties=None):\n    if (coords is None):\n        frac_coords = self[i].frac_coords\n    elif coords_are_cartesian:\n        frac_coords = self._lattice.get_fractional_coords(coords)\n    else:\n        frac_coords = coords\n    new_site = PeriodicSite(species, frac_coords, self._lattice, properties=properties)\n    self._sites[i] = new_site", "docstring": "Replace a single site. Takes either a species or a dict of species and\noccupations.\n\nArgs:\ni (int): Index of the site in the _sites list.\nspecies (species-like): Species of replacement site\ncoords (3x1 array): Coordinates of replacement site. If None,\nthe current coordinates are assumed.\ncoords_are_cartesian (bool): Whether coordinates are cartesian.\nDefaults to False.\nproperties (dict): Properties associated with the site.", "source": "codesearchnet"}
{"code": "def rename_next_state_fluent(name: str) -> str:\n    i = name.index('/')\n    functor = name[:(i - 1)]\n    arity = name[(i + 1):]\n    return '{}/{}'.format(functor, arity)", "docstring": "Returns next state fluent canonical name.\n\nArgs:\nname (str): The current state fluent name.\n\nReturns:\nstr: The next state fluent name.", "source": "codesearchnet"}
{"code": "def add_update(self, updates, inputs=None):\n    if inputs is not None:\n        tf_logging.warning('`add_update` `inputs` kwarg has been deprecated. You no longer need to pass a value to `inputs` as it is being automatically inferred.')\n    call_context = base_layer_utils.call_context()\n    if distribute_lib.has_strategy() and distribute_lib.in_cross_replica_context() and (not call_context.saving):\n        return\n    updates = generic_utils.to_list(updates)\n    if call_context.in_call:\n        relevant_inputs = call_context.inputs\n    else:\n        inbound_nodes = getattr(self, '_inbound_nodes', [])\n        relevant_inputs = [node.input_tensors for node in inbound_nodes]\n\n    def process_update(x):\n        \n        if callable(x):\n            update = lambda: process_update(x())\n            return update()\n        elif isinstance(x, ops.Operation):\n            update = x\n        elif hasattr(x, 'op'):\n            update = x.op\n        else:\n            update = tensor_conversion.convert_to_tensor_v2_with_dispatch(x)\n        reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update])\n        update._unconditional_update = update not in reachable\n        return update\n    updates = [process_update(x) for x in updates]\n    self._updates.extend(updates)", "docstring": "Add update op(s), potentially dependent on layer inputs.\n\nWeight updates (for instance, the updates of the moving mean and variance\nin a BatchNormalization layer) may be dependent on the inputs passed\nwhen calling a layer. Hence, when reusing the same layer on\ndifferent inputs `a` and `b`, some entries in `layer.updates` may be\ndependent on `a` and some on `b`. This method automatically keeps track\nof dependencies.\n\nThe `get_updates_for` method allows to retrieve the updates relevant to a\nspecific set of inputs.\n\nThis call is ignored when eager execution is enabled (in that case, variable\nupdates are run on the fly and thus do not need to be tracked for later\nexecution).\n\nArgs:\nupdates: Update op, or list/tuple of update ops, or zero-arg callable\nthat returns an update op. A zero-arg callable should be passed in\norder to disable running the updates by setting `trainable=False`\non this Layer, when executing in Eager mode.\ninputs: Deprecated, will be automatically inferred.", "source": "github-repos"}
{"code": "def get_module_functions(modules):\n    module_fns = set()\n    for module in modules:\n        for key in dir(module):\n            attr = getattr(module, key)\n            if isinstance(attr, (types.BuiltinFunctionType, types.FunctionType, numpy.ufunc)):\n                module_fns.add(attr)\n    return module_fns", "docstring": "Finds functions that do not have implemented derivatives.\n\nArgs:\nmodules: A list of Python modules. Functions contained in these modules\nwill be checked for membership in 'implemented', and if not found,\nwill be added to an 'unimplemented' set\nimplemented: A Python object containing implemented derivatives. A function\nshould be checkable for membership using the `fn in implemented` syntax.\n\nReturns:\nmodule_fns: A set of functions, builtins or ufuncs in `modules`.", "source": "codesearchnet"}
{"code": "def unarchive_user(self, user_id):\n    url = (self.record_url + '/unarchive')\n    res = requests.patch(url=url, json={'user_id': user_id}, headers=HEADERS, verify=False)\n    self.write_response_html_to_file(res, 'bob.html')\n    res.raise_for_status()", "docstring": "Unarchives the user with the specified user ID.\n\nArgs:\nuser_id: `int`. The ID of the user to unarchive.\n\nReturns:\n`NoneType`: None.", "source": "codesearchnet"}
{"code": "def numeric_summary(tensor):\n\n    def _counts_summary(counts, skip_zeros=True, total_count=None):\n        \n        if skip_zeros:\n            counts = [(count_key, count_val) for count_key, count_val in counts if count_val]\n        max_common_len = 0\n        for count_key, count_val in counts:\n            count_val_str = str(count_val)\n            common_len = max(len(count_key) + 1, len(count_val_str) + 1)\n            max_common_len = max(common_len, max_common_len)\n        key_line = debugger_cli_common.RichLine('|')\n        val_line = debugger_cli_common.RichLine('|')\n        for count_key, count_val in counts:\n            count_val_str = str(count_val)\n            key_line += _pad_string_to_length(count_key, max_common_len)\n            val_line += _pad_string_to_length(count_val_str, max_common_len)\n        key_line += ' |'\n        val_line += ' |'\n        if total_count is not None:\n            total_key_str = 'total'\n            total_val_str = str(total_count)\n            max_common_len = max(len(total_key_str) + 1, len(total_val_str))\n            total_key_str = _pad_string_to_length(total_key_str, max_common_len)\n            total_val_str = _pad_string_to_length(total_val_str, max_common_len)\n            key_line += total_key_str + ' |'\n            val_line += total_val_str + ' |'\n        return debugger_cli_common.rich_text_lines_from_rich_line_list([key_line, val_line])\n    if not isinstance(tensor, np.ndarray) or not np.size(tensor):\n        return debugger_cli_common.RichTextLines(['No numeric summary available due to empty tensor.'])\n    elif np.issubdtype(tensor.dtype, np.floating) or np.issubdtype(tensor.dtype, np.complexfloating) or np.issubdtype(tensor.dtype, np.integer):\n        counts = [('nan', np.sum(np.isnan(tensor))), ('-inf', np.sum(np.isneginf(tensor))), ('-', np.sum(np.logical_and(tensor < 0.0, np.logical_not(np.isneginf(tensor))))), ('0', np.sum(tensor == 0.0)), ('+', np.sum(np.logical_and(tensor > 0.0, np.logical_not(np.isposinf(tensor))))), ('+inf', np.sum(np.isposinf(tensor)))]\n        output = _counts_summary(counts, total_count=np.size(tensor))\n        valid_array = tensor[np.logical_not(np.logical_or(np.isinf(tensor), np.isnan(tensor)))]\n        if np.size(valid_array):\n            stats = [('min', np.min(valid_array)), ('max', np.max(valid_array)), ('mean', np.mean(valid_array)), ('std', np.std(valid_array))]\n            output.extend(_counts_summary(stats, skip_zeros=False))\n        return output\n    elif tensor.dtype == np.bool_:\n        counts = [('False', np.sum(tensor == 0)), ('True', np.sum(tensor > 0))]\n        return _counts_summary(counts, total_count=np.size(tensor))\n    else:\n        return debugger_cli_common.RichTextLines(['No numeric summary available due to tensor dtype: %s.' % tensor.dtype])", "docstring": "Get a text summary of a numeric tensor.\n\nThis summary is only available for numeric (int*, float*, complex*) and\nBoolean tensors.\n\nArgs:\ntensor: (`numpy.ndarray`) the tensor value object to be summarized.\n\nReturns:\nThe summary text as a `RichTextLines` object. If the type of `tensor` is not\nnumeric or Boolean, a single-line `RichTextLines` object containing a\nwarning message will reflect that.", "source": "github-repos"}
{"code": "def ReadPathInfoHistory(self, client_id, path_type, components):\n    histories = self.ReadPathInfosHistories(client_id, path_type, [components])\n    return histories[components]", "docstring": "Reads a collection of hash and stat entry for given path.\n\nArgs:\nclient_id: An identifier string for a client.\npath_type: A type of a path to retrieve path history for.\ncomponents: A tuple of path components corresponding to path to retrieve\ninformation for.\n\nReturns:\nA list of `rdf_objects.PathInfo` ordered by timestamp in ascending order.", "source": "codesearchnet"}
{"code": "def _data_from_df(df):\n    _df = df.copy()\n    if isinstance(df.columns, pd.MultiIndex):\n        try:\n            _df.columns = ['_'.join(col) for col in _df.columns.values]\n        except TypeError:\n            raise TypeError('Could not flatten MultiIndex columns. use string column names or flatten manually')\n    if isinstance(df.columns, pd.CategoricalIndex):\n        _df.columns = df.columns.tolist()\n    index_name = ColumnDataSource._df_index_name(df)\n    if (index_name == 'index'):\n        _df.index = pd.Index(_df.index.values)\n    else:\n        _df.index = pd.Index(_df.index.values, name=index_name)\n    _df.reset_index(inplace=True)\n    tmp_data = {c: v.values for (c, v) in _df.iteritems()}\n    new_data = {}\n    for (k, v) in tmp_data.items():\n        new_data[k] = v\n    return new_data", "docstring": "Create a ``dict`` of columns from a Pandas ``DataFrame``,\nsuitable for creating a ColumnDataSource.\n\nArgs:\ndf (DataFrame) : data to convert\n\nReturns:\ndict[str, np.array]", "source": "codesearchnet"}
{"code": "def DotProductAttention(query, key, value, mask, dropout, mode, rng):\n    depth = np.shape(query)[(- 1)]\n    dots = (np.matmul(query, np.swapaxes(key, (- 1), (- 2))) / np.sqrt(depth))\n    if (mask is not None):\n        dots = np.where(mask, dots, (- 1000000000.0))\n    dots = np.exp((dots - backend.logsumexp(dots, axis=(- 1), keepdims=True)))\n    if (dropout >= 1.0):\n        raise ValueError('Dropout rates must be lower than 1.')\n    if ((dropout is not None) and (dropout > 0.0) and (mode == 'train')):\n        keep = backend.random.bernoulli(rng, (1.0 - dropout), dots.shape)\n        dots = np.where(keep, (dots / (1.0 - dropout)), 0)\n    out = np.matmul(dots, value)\n    return out", "docstring": "Core dot product self-attention.\n\nArgs:\nquery: array of representations\nkey: array of representations\nvalue: array of representations\nmask: attention-mask, gates attention\ndropout: float: dropout rate\nmode: 'eval' or 'train': whether to use dropout\nrng: JAX PRNGKey: subkey for disposable use\n\nReturns:\nSelf attention for q, k, v arrays.", "source": "codesearchnet"}
{"code": "def __init__(self,\n                 intervals: List[Interval] = None,\n                 no_overlap: bool = True,\n                 no_contiguous: bool = True) -> None:\n        \n        \n        \n        \n        self.intervals = [] if intervals is None else list(intervals)\n        self.no_overlap = no_overlap\n        self.no_contiguous = no_contiguous\n        for i in self.intervals:\n            if not isinstance(i, Interval):\n                raise TypeError(\n                    \"IntervalList creation failed: contents are not all \"\n                    \"Interval: {}\".format(repr(self.intervals)))\n        self._tidy()", "docstring": "Creates the :class:`IntervalList`.\n\nArgs:\nintervals: optional list of :class:`Interval` objects to\nincorporate into the :class:`IntervalList`\nno_overlap: merge intervals that overlap (now and on subsequent\naddition)?\nno_contiguous: if ``no_overlap`` is set, merge intervals that are\ncontiguous too?", "source": "juraj-google-style"}
{"code": "def rebuild_tree(cls, session, tree_id):\n    session.query(cls).filter_by(tree_id=tree_id).update({cls.left: 0, cls.right: 0, cls.level: 0})\n    top = session.query(cls).filter_by(parent_id=None).filter_by(tree_id=tree_id).one()\n    top.left = left = 1\n    top.right = right = 2\n    top.level = level = cls.get_default_level()\n\n    def recursive(children, left, right, level):\n        level = (level + 1)\n        for (i, node) in enumerate(children):\n            same_level_right = children[(i - 1)].right\n            left = (left + 1)\n            if (i > 0):\n                left = (left + 1)\n            if same_level_right:\n                left = (same_level_right + 1)\n            right = (left + 1)\n            node.left = left\n            node.right = right\n            parent = node.parent\n            j = 0\n            while parent:\n                parent.right = ((right + 1) + j)\n                parent = parent.parent\n                j += 1\n            node.level = level\n            recursive(node.children, left, right, level)\n    recursive(top.children, left, right, level)", "docstring": "This method rebuid tree.\n\nArgs:\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session\ntree_id (int or str): id of tree\n\nExample:\n\n* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_rebuild`", "source": "codesearchnet"}
{"code": "def Format(self, format_string, rdf):\n    result = []\n    for (literal_text, field_name, _, _) in self.parse(format_string):\n        if literal_text:\n            result.append(literal_text)\n        if (field_name is not None):\n            rslts = []\n            objs = self.expander(rdf, field_name)\n            for o in objs:\n                rslts.extend(self.FanOut(o))\n            result.append(','.join(rslts))\n    return ''.join(result)", "docstring": "Apply string formatting templates to rdf data.\n\nUses some heuristics to coerce rdf values into a form compatible with string\nformatter rules. Repeated items are condensed into a single comma separated\nlist. Unlike regular string.Formatter operations, we use objectfilter\nexpansion to fully acquire the target attribute in one pass, rather than\nrecursing down each element of the attribute tree.\n\nArgs:\nformat_string: A format string specification.\nrdf: The rdf value to be formatted.\n\nReturns:\nA string of formatted data.", "source": "codesearchnet"}
{"code": "def path_in_cache(self, filename, metahash):\n        \n        cpath = self._genpath(filename, metahash)\n        if os.path.exists(cpath):\n            return cpath\n        else:\n            raise CacheMiss", "docstring": "Generates the path to a file in the mh cache.\n\nThe generated path does not imply the file's existence!\n\nArgs:\nfilename: Filename relative to buildroot\nrule: A targets.SomeBuildRule object\nmetahash: hash object", "source": "juraj-google-style"}
{"code": "def build_from_token_counts(self, token_counts, min_count, num_iterations=4):\n    self._init_alphabet_from_tokens(six.iterkeys(token_counts))\n    self._init_subtokens_from_list(list(self._alphabet))\n    if (min_count < 1):\n        min_count = 1\n    for i in xrange(num_iterations):\n        subtoken_counts = collections.defaultdict(int)\n        for (token, count) in six.iteritems(token_counts):\n            escaped_token = _escape_token(token, self._alphabet)\n            subtokens = self._escaped_token_to_subtoken_strings(escaped_token)\n            start = 0\n            for subtoken in subtokens:\n                for end in xrange((start + 1), (len(escaped_token) + 1)):\n                    new_subtoken = escaped_token[start:end]\n                    subtoken_counts[new_subtoken] += count\n                start += len(subtoken)\n        len_to_subtoken_strings = []\n        for (subtoken_string, count) in six.iteritems(subtoken_counts):\n            lsub = len(subtoken_string)\n            if (count >= min_count):\n                while (len(len_to_subtoken_strings) <= lsub):\n                    len_to_subtoken_strings.append(set())\n                len_to_subtoken_strings[lsub].add(subtoken_string)\n        new_subtoken_strings = []\n        for lsub in xrange((len(len_to_subtoken_strings) - 1), 0, (- 1)):\n            subtoken_strings = len_to_subtoken_strings[lsub]\n            for subtoken_string in subtoken_strings:\n                count = subtoken_counts[subtoken_string]\n                if (count >= min_count):\n                    if (subtoken_string not in self._alphabet):\n                        new_subtoken_strings.append((count, subtoken_string))\n                    for l in xrange(1, lsub):\n                        subtoken_counts[subtoken_string[:l]] -= count\n        new_subtoken_strings.extend(((subtoken_counts.get(a, 0), a) for a in self._alphabet))\n        new_subtoken_strings.sort(reverse=True)\n        self._init_subtokens_from_list([subtoken for (_, subtoken) in new_subtoken_strings])", "docstring": "Train a SubwordTextTokenizer based on a dictionary of word counts.\n\nArgs:\ntoken_counts: a dictionary of Unicode strings to int.\nmin_count: an integer - discard subtokens with lower counts.\nnum_iterations: an integer; how many iterations of refinement.", "source": "codesearchnet"}
{"code": "def interruptRead(self, endpoint, size, timeout = 100):\n        r\n        return self.dev.read(endpoint, size, timeout)", "docstring": "r\"\"\"Performs a interrupt read request to the endpoint specified.\n\nArguments:\nendpoint: endpoint number.\nsize: number of bytes to read.\ntimeout: operation timeout in milliseconds. (default: 100)\nReturns a tuple with the data read.", "source": "juraj-google-style"}
{"code": "def get_values(js_dict, value='value'):\n    \n\n    values = js_dict[value]\n    if type(values) is list:\n        if type(values[0]) is not dict or tuple:\n            return values\n    \n    values = {int(key): value for (key, value) in values.items()}\n\n    if js_dict.get('size'):\n        max_val = np.prod(np.array((js_dict['size'])))\n    else:\n        max_val = np.prod(np.array((js_dict['dimension']['size'])))\n    vals = max_val * [None]\n    for (key, value) in values.items():\n        vals[key] = value\n\n    values = vals\n    return values", "docstring": "Get values from input data.\n\nArgs:\njs_dict (dict): dictionary containing dataset data and metadata.\nvalue (string, optional): name of the value column. Defaults to 'value'.\n\nReturns:\nvalues (list): list of dataset values.", "source": "juraj-google-style"}
{"code": "def run(func, keys, max_procs=None, show_proc=False, affinity=None, **kwargs):\n    \n    if max_procs is None: max_procs = cpu_count()\n    kw_arr = saturate_kwargs(keys=keys, **kwargs)\n    if len(kw_arr) == 0: return\n\n    if isinstance(affinity, int):\n        win32process.SetProcessAffinityMask(win32api.GetCurrentProcess(), affinity)\n\n    task_queue = queue.Queue()\n    while len(kw_arr) > 0:\n        for _ in range(max_procs):\n            if len(kw_arr) == 0: break\n            kw = kw_arr.pop(0)\n            p = Process(target=func, kwargs=kw)\n            p.start()\n            sys.stdout.flush()\n            task_queue.put(p)\n            if show_proc:\n                signature = ', '.join([f'{k}={v}' for k, v in kw.items()])\n                print(f'[{func.__name__}] ({signature})')\n        while not task_queue.empty():\n            p = task_queue.get()\n            p.join()", "docstring": "Provide interface for multiprocessing\n\nArgs:\nfunc: callable functions\nkeys: keys in kwargs that want to use process\nmax_procs: max number of processes\nshow_proc: whether to show process\naffinity: CPU affinity\n**kwargs: kwargs for func", "source": "juraj-google-style"}
{"code": "class PerceiverOneHotPreprocessor(AbstractPreprocessor):\n\n    def __init__(self, config: PerceiverConfig) -> None:\n        super().__init__()\n        self.config: PerceiverConfig = config\n\n    @property\n    def num_channels(self) -> int:\n        return self.config.num_labels\n\n    def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor]=None, network_input_is_1d: bool=True):\n        inputs = inputs[:, None, :]\n        return (inputs, None, inputs)", "docstring": "One-hot preprocessor for Perceiver Encoder. Can be used to add a dummy index dimension to the input.\n\nArgs:\nconfig ([`PerceiverConfig`]):\nModel configuration.", "source": "github-repos"}
{"code": "def copy_count_a(input_a, *other_inputs, **kwargs):\n    count = input_a.count()\n    input_a.skip_all()\n    for input_x in other_inputs:\n        input_x.skip_all()\n    return [IOTileReading(0, 0, count)]", "docstring": "Copy the latest reading from input a into the output.\n\nAll other inputs are skipped to that after this function\nruns there are no readings left in any of the input walkers\neven if no output is generated.\n\nReturns:\nlist(IOTileReading)", "source": "codesearchnet"}
{"code": "def ParseShadowEntry(self, line):\n    \n    fields = (\"login\", \"passwd\", \"last_change\", \"min_age\", \"max_age\",\n              \"warn_time\", \"inactivity\", \"expire\", \"reserved\")\n    if line:\n      rslt = dict(zip(fields, line.split(\":\")))\n      pw_entry = self.shadow.setdefault(rslt[\"login\"], rdf_client.PwEntry())\n      pw_entry.store = self.shadow_store\n      pw_entry.hash_type = self.GetHashType(rslt[\"passwd\"])\n      \n      last_change = rslt.get(\"last_change\")\n      if last_change:\n        pw_entry.age = int(last_change)\n      max_age = rslt.get(\"max_age\")\n      if max_age:\n        pw_entry.max_age = int(max_age)", "docstring": "Extract the user accounts in /etc/shadow.\n\nIdentifies the users in /etc/shadow and several attributes of their account,\nincluding how their password is crypted and password aging characteristics.\n\nArgs:\nline: An entry of the shadow file.", "source": "juraj-google-style"}
{"code": "def try_get_column(column_name, node, context):\n    \n    selectable = get_node_selectable(node, context)\n    if not hasattr(selectable, 'c'):\n        raise AssertionError(\n            u'Selectable \"{}\" does not have a column collection. Context is {}.'.format(\n                selectable, context))\n    return selectable.c.get(column_name, None)", "docstring": "Attempt to get a column by name from the selectable.\n\nArgs:\ncolumn_name: str, name of the column to retrieve.\nnode: SqlNode, the node the column is being retrieved for.\ncontext: CompilationContext, compilation specific metadata.\n\nReturns:\nOptional[column], the SQLAlchemy column if found, None otherwise.", "source": "juraj-google-style"}
{"code": "def _testMultipleReduceJoin(self, input_array, axis, separator=' '):\n    with self.cached_session():\n        output = string_ops.reduce_join(inputs=input_array, axis=axis, keep_dims=False, separator=separator)\n        output_keep_dims = string_ops.reduce_join(inputs=input_array, axis=axis, keep_dims=True, separator=separator)\n        truth = input_array\n        for index in axis:\n            truth = string_ops.reduce_join(inputs=truth, axis=index, keep_dims=True, separator=separator)\n        if not axis:\n            truth = constant_op.constant(truth)\n        truth_squeezed = array_ops.squeeze(truth, axis=axis)\n        output_array = self.evaluate(output)\n        output_keep_dims_array = self.evaluate(output_keep_dims)\n        truth_array = self.evaluate(truth)\n        truth_squeezed_array = self.evaluate(truth_squeezed)\n    self.assertAllEqualUnicode(truth_array, output_keep_dims_array)\n    self.assertAllEqualUnicode(truth_squeezed_array, output_array)\n    self.assertAllEqual(truth.get_shape(), output_keep_dims.get_shape())\n    self.assertAllEqual(truth_squeezed.get_shape(), output.get_shape())", "docstring": "Tests reduce_join for one input and multiple axes.\n\nDoes so by comparing the output to that from nested reduce_string_joins.\nThe correctness of single-dimension reduce_join is verified by other\ntests below using _testReduceJoin.\n\nArgs:\ninput_array: The input to test.\naxis: The indices to reduce.\nseparator: The separator to use when joining.", "source": "github-repos"}
{"code": "def _convert_template_option(template):\n    \n\n    option = {}\n    extraction_method = template.get('extraction_method')\n    if extraction_method == 'guess':\n        option['guess'] = True\n    elif extraction_method == 'lattice':\n        option['lattice'] = True\n    elif extraction_method == 'stream':\n        option['stream'] = True\n\n    option['pages'] = template.get('page')\n    option['area'] = [round(template['y1'], 3), round(template['x1'], 3), round(template['y2'], 3), round(template['x2'], 3)]\n\n    return option", "docstring": "Convert Tabula app template to tabula-py option\n\nArgs:\ntemplate (dict): Tabula app template\n\nReturns:\n`obj`:dict: tabula-py option", "source": "juraj-google-style"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def iter_compress(item_iter, flag_iter):\n    true_items = (item for (item, flag) in zip(item_iter, flag_iter) if flag)\n    return true_items", "docstring": "iter_compress - like numpy compress\n\nArgs:\nitem_iter (list):\nflag_iter (list): of bools\n\nReturns:\nlist: true_items\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_iter import *  # NOQA\n>>> item_iter = [1, 2, 3, 4, 5]\n>>> flag_iter = [False, True, True, False, True]\n>>> true_items = iter_compress(item_iter, flag_iter)\n>>> result = list(true_items)\n>>> print(result)\n[2, 3, 5]", "source": "codesearchnet"}
{"code": "def CheckComment(line, filename, linenum, next_line_start, error):\n    commentpos = line.find('\n    if (commentpos != (- 1)):\n        if ((re.sub('\\\\\\\\.', '', line[0:commentpos]).count('\"') % 2) == 0):\n            if ((not (Match('^.*{ *\n                error(filename, linenum, 'whitespace/comments', 2, 'At least two spaces is best between code and comments')\n            comment = line[commentpos:]\n            match = _RE_PATTERN_TODO.match(comment)\n            if match:\n                leading_whitespace = match.group(1)\n                if (len(leading_whitespace) > 1):\n                    error(filename, linenum, 'whitespace/todo', 2, 'Too many spaces before TODO')\n                username = match.group(2)\n                if (not username):\n                    error(filename, linenum, 'readability/todo', 2, 'Missing username in TODO; it should look like \"\n                middle_whitespace = match.group(3)\n                if ((middle_whitespace != ' ') and (middle_whitespace != '')):\n                    error(filename, linenum, 'whitespace/todo', 2, 'TODO(my_username) should be followed by a space')\n            if (Match('\n                error(filename, linenum, 'whitespace/comments', 4, 'Should have a space between", "docstring": "Checks for common mistakes in comments.\n\nArgs:\nline: The line in question.\nfilename: The name of the current file.\nlinenum: The number of the line to check.\nnext_line_start: The first non-whitespace column of the next line.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def _parse_authors(authors):\n    link = authors.find('a')\n    link = (link[0].params.get('href') if link else None)\n    author_list = _strip_content(authors)\n    if ('(' in author_list):\n        author_list = author_list.split('(')[0]\n    if (not author_list.strip()):\n        return []\n    return map((lambda author: Author(author.strip(), link)), author_list.strip().split(','))", "docstring": "Parse informations about authors of the book.\n\nArgs:\ndom (obj): HTMLElement containing slice of the page with details.\n\nReturns:\nlist: List of :class:`.Author` objects. Blank if no author \\\nfound.", "source": "codesearchnet"}
{"code": "def main(args):\n  \n  if not args:\n    raise Exception('Please specify at least one JSON config path')\n  inputs = []\n  program = []\n  outputs = []\n  for arg in args:\n    with open(arg) as fd:\n      config = json.load(fd)\n    inputs.extend(config.get('inputs', []))\n    program.extend(config.get('program', []))\n    outputs.extend(config.get('outputs', []))\n  if not program:\n    raise Exception('Please specify a program')\n  return run(inputs, program, outputs)", "docstring": "Invokes run function using a JSON file config.\n\nArgs:\nargs: CLI args, which can be a JSON file containing an object whose\nattributes are the parameters to the run function. If multiple JSON\nfiles are passed, their contents are concatenated.\nReturns:\n0 if succeeded or nonzero if failed.\nRaises:\nException: If input data is missing.", "source": "juraj-google-style"}
{"code": "def slice_hidden(x, hidden_size, num_blocks):\n  \n  batch_size, latent_dim, _ = common_layers.shape_list(x)\n  block_dim = hidden_size \n  x_sliced = tf.reshape(x,\n                        shape=[batch_size, latent_dim, num_blocks, block_dim])\n  return x_sliced", "docstring": "Slice encoder hidden state under num_blocks.\n\nArgs:\nx: Encoder hidden state of shape [batch_size, latent_dim, hidden_size].\nhidden_size: Dimension of the latent space.\nnum_blocks: Number of blocks in DVQ.\n\nReturns:\nSliced states of shape [batch_size, latent_dim, num_blocks, block_dim].", "source": "juraj-google-style"}
{"code": "def http_exception(channel, title):\n    \n\n    \n    gui = ui_embed.UI(\n        channel,\n        \"Too much help\",\n        \"{} is too helpful! Try trimming some of the help messages.\".format(title),\n        modulename=modulename\n    )\n\n    return gui", "docstring": "Creates an embed UI containing the 'too long' error message\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\ntitle (str): The title of the embed\n\nReturns:\nui (ui_embed.UI): The embed UI object", "source": "juraj-google-style"}
{"code": "def get_cohp(self, spin=None, integrated=False):\n    if (not integrated):\n        populations = self.cohp\n    else:\n        populations = self.icohp\n    if (populations is None):\n        return None\n    elif (spin is None):\n        return populations\n    else:\n        if isinstance(spin, int):\n            spin = Spin(spin)\n        elif isinstance(spin, str):\n            s = {'up': 1, 'down': (- 1)}[spin.lower()]\n            spin = Spin(s)\n        return {spin: populations[spin]}", "docstring": "Returns the COHP or ICOHP for a particular spin.\n\nArgs:\nspin: Spin. Can be parsed as spin object, integer (-1/1)\nor str (\"up\"/\"down\")\nintegrated: Return COHP (False) or ICOHP (True)\n\nReturns:\nReturns the CHOP or ICOHP for the input spin. If Spin is\nNone and both spins are present, both spins will be returned\nas a dictionary.", "source": "codesearchnet"}
{"code": "def _get_input_readers(self, state):\n    \n    serialized_input_readers_key = (self._SERIALIZED_INPUT_READERS_KEY %\n                                    state.key().id_or_name())\n    serialized_input_readers = model._HugeTaskPayload.get_by_key_name(\n        serialized_input_readers_key, parent=state)\n\n    \n    input_reader_class = state.mapreduce_spec.mapper.input_reader_class()\n    split_param = state.mapreduce_spec.mapper\n    if issubclass(input_reader_class, map_job.InputReader):\n      split_param = map_job.JobConfig._to_map_job_config(\n          state.mapreduce_spec,\n          os.environ.get(\"HTTP_X_APPENGINE_QUEUENAME\"))\n    if serialized_input_readers is None:\n      readers = input_reader_class.split_input(split_param)\n    else:\n      readers = [input_reader_class.from_json_str(_json) for _json in\n                 json.loads(zlib.decompress(\n                 serialized_input_readers.payload))]\n\n    if not readers:\n      return None, None\n\n    \n    state.mapreduce_spec.mapper.shard_count = len(readers)\n    state.active_shards = len(readers)\n\n    \n    if serialized_input_readers is None:\n      \n      serialized_input_readers = model._HugeTaskPayload(\n          key_name=serialized_input_readers_key, parent=state)\n      readers_json_str = [i.to_json_str() for i in readers]\n      serialized_input_readers.payload = zlib.compress(json.dumps(\n                                                       readers_json_str))\n    return readers, serialized_input_readers", "docstring": "Get input readers.\n\nArgs:\nstate: a MapreduceState model.\n\nReturns:\nA tuple: (a list of input readers, a model._HugeTaskPayload entity).\nThe payload entity contains the json serialized input readers.\n(None, None) when input reader inplitting returned no data to process.", "source": "juraj-google-style"}
{"code": "def _url_dirname(self, url_or_path):\n    scheme, path = self._split_scheme(url_or_path)\n    return self._combine_scheme(scheme, posixpath.dirname(path))", "docstring": "Like posixpath.dirname, but preserves scheme:// prefix.\n\nArgs:\nurl_or_path: A string in the form of scheme://some/path OR /some/path.", "source": "github-repos"}
{"code": "def phase_flip(p: Optional[float]=None) -> Union[(common_gates.ZPowGate, PhaseFlipChannel)]:\n    if (p is None):\n        return _phase_flip_Z()\n    return _phase_flip(p)", "docstring": "r\"\"\"\nReturns a PhaseFlipChannel that flips a qubit's phase with probability p\nif p is None, return a guaranteed phase flip in the form of a Z operation.\n\nThis channel evolves a density matrix via:\n\n$$\n\\rho \\rightarrow M_0 \\rho M_0^\\dagger + M_1 \\rho M_1^\\dagger\n$$\n\nWith:\n\n$$\n\\begin{aligned}\nM_0 =& \\sqrt{p} \\begin{bmatrix}\n1 & 0  \\\\\n0 & 1\n\\end{bmatrix}\n\\\\\nM_1 =& \\sqrt{1-p} \\begin{bmatrix}\n1 & 0 \\\\\n0 & -1\n\\end{bmatrix}\n\\end{aligned}\n$$\n\nArgs:\np: the probability of a phase flip.\n\nRaises:\nValueError: if p is not a valid probability.", "source": "codesearchnet"}
{"code": "def to_file(self, filename):\n        \n        d = {\"mass_info\": self.mass_info,\n             \"nonbond_coeffs\": self.nonbond_coeffs,\n             \"topo_coeffs\": self.topo_coeffs}\n        yaml = YAML(typ=\"safe\")\n        with open(filename, \"w\") as f:\n            yaml.dump(d, f)", "docstring": "Saves object to a file in YAML format.\n\nArgs:\nfilename (str): Filename.", "source": "juraj-google-style"}
{"code": "def do_load(self, design, init=False):\n        \n        \n        if design:\n            filename = self._validated_config_filename(design)\n            with open(filename, \"r\") as f:\n                text = f.read()\n            structure = json_decode(text)\n        else:\n            structure = {}\n        \n        attributes = structure.get(\"attributes\", structure)\n        children = structure.get(\"children\", structure)\n        \n        name, mri, x, y, visible = [], [], [], [], []\n        for part_name, d in attributes.get(\"layout\", {}).items():\n            name.append(part_name)\n            mri.append(\"\")\n            x.append(d[\"x\"])\n            y.append(d[\"y\"])\n            visible.append(d[\"visible\"])\n        self.set_layout(LayoutTable(name, mri, x, y, visible))\n        \n        source, export = [], []\n        for source_name, export_name in attributes.get(\"exports\", {}).items():\n            source.append(source_name)\n            export.append(export_name)\n        self.exports.set_value(ExportTable(source, export))\n        \n        our_values = {k: v for k, v in attributes.items()\n                      if k in self.our_config_attributes}\n        block = self.block_view()\n        block.put_attribute_values(our_values)\n        \n        self.run_hooks(\n            LoadHook(p, c, children.get(p.name, {}), init)\n            for p, c in self.create_part_contexts(only_visible=False).items())\n        self._mark_clean(design, init)", "docstring": "Load a design name, running the child LoadHooks.\n\nArgs:\ndesign: Name of the design json file, without extension\ninit: Passed to the LoadHook to tell the children if this is being\nrun at Init or not", "source": "juraj-google-style"}
{"code": "def __init__(self, key_spec: Union[KeySpec, str], value_spec: ValueSpec, description: Optional[str]=None, metadata: Optional[Dict[str, Any]]=None, origin: Optional[Type[Any]]=None) -> None:\n    if isinstance(key_spec, str):\n        key_spec = KeySpec.from_str(key_spec)\n    assert isinstance(key_spec, KeySpec), key_spec\n    self._key = key_spec\n    self._value = value_spec\n    self._description = description\n    self._origin = origin\n    if metadata and (not isinstance(metadata, dict)):\n        raise ValueError('metadata must be a dict.')\n    self._metadata = metadata or {}", "docstring": "Constructor.\n\nArgs:\nkey_spec: Key specification of the field. Can be a string or a KeySpec\ninstance.\nvalue_spec: Value specification of the field.\ndescription: Description of the field.\nmetadata: A dict of objects as metadata for the field.\norigin: The class that this field originates from.\n\nRaises:\nValueError: metadata is not a dict.", "source": "github-repos"}
{"code": "def _version_from_file(\n        path_to_version,\n        default_version=DEFAULT_VERSION,\n):\n    \n    version_filepath = os.path.join(path_to_version, 'version.txt')\n    if not os.path.isfile(version_filepath):\n        warnings.warn(\n            'Unable to resolve current version',\n            exceptions.ProsperDefaultVersionWarning)\n        return default_version\n\n    with open(version_filepath, 'r') as v_fh:\n        data = v_fh.read()\n\n    return data", "docstring": "for PyPI installed versions, just get data from file\n\nArgs:\npath_to_version (str): abspath to dir where version.txt exists\ndefault_version (str): fallback version in case of error\n\nReturns:\nstr: current working version", "source": "juraj-google-style"}
{"code": "def _CheckLine(self, line):\n    for rule in self._cur_state:\n        matched = self._CheckRule(rule, line)\n        if matched:\n            for value in matched.groupdict():\n                self._AssignVar(matched, value)\n            if self._Operations(rule):\n                if rule.new_state:\n                    if (rule.new_state not in ('End', 'EOF')):\n                        self._cur_state = self.states[rule.new_state]\n                    self._cur_state_name = rule.new_state\n                break", "docstring": "Passes the line through each rule until a match is made.\n\nArgs:\nline: A string, the current input line.", "source": "codesearchnet"}
{"code": "def __init__(self):\n        \n        super(JLinkTraceRegion, self).__init__()\n        self.SizeOfStruct = ctypes.sizeof(self)", "docstring": "Initializes the trace region.\n\nSets the size of the structure.\n\nArgs:\nself (JLinkTraceRegion): the ``JLinkTraceRegion`` instance.\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def set_ylim(self, xlims, dx, xscale, reverse=False):\n        \n        self._set_axis_limits('y', xlims, dx, xscale, reverse)\n        return", "docstring": "Set y limits for plot.\n\nThis will set the limits for the y axis\nfor the specific plot.\n\nArgs:\nylims (len-2 list of floats): The limits for the axis.\ndy (float): Amount to increment by between the limits.\nyscale (str): Scale of the axis. Either `log` or `lin`.\nreverse (bool, optional): If True, reverse the axis tick marks. Default is False.", "source": "juraj-google-style"}
{"code": "def get_model(self):\n    model_cls = get_connected_model_for_table_name(self.table_name)\n    return model_cls._default_manager.filter(id=self.record_id).first()", "docstring": "Fetch the instance of the connected model referenced by this log record.\n\nReturns:\nThe connected instance, or ``None`` if it does not exists.", "source": "codesearchnet"}
{"code": "def _augment_observation(self, ob, reward, cumulative_reward):\n    \n    img = PIL_Image().new(\"RGB\",\n                          (ob.shape[1], self.HEADER_HEIGHT,))\n    draw = PIL_ImageDraw().Draw(img)\n    draw.text(\n        (1, 0), \"c:{:3}, r:{:3}\".format(int(cumulative_reward), int(reward)),\n        fill=(255, 0, 0)\n    )\n    draw.text(\n        (1, 15), \"fc:{:3}\".format(int(self._frame_counter)),\n        fill=(255, 0, 0)\n    )\n    header = np.asarray(img)\n    del img\n    header.setflags(write=1)\n    \n    if self._wait:\n      pixel_fill = (0, 255, 0)\n    else:\n      pixel_fill = (255, 0, 0)\n    header[0, :, :] = pixel_fill\n    return np.concatenate([header, ob], axis=0)", "docstring": "Expand observation array with additional information header (top rows).\n\nArgs:\nob: observation\nreward: reward to be included in header.\ncumulative_reward: total cumulated reward to be included in header.\n\nReturns:\nExpanded observation array.", "source": "juraj-google-style"}
{"code": "def clean_value(self):\n    result = []\n    for mdl in self:\n        result.append(super(ListNode, mdl).clean_value())\n    return result", "docstring": "Populates json serialization ready data.\nThis is the method used to serialize and store the object data in to DB\n\nReturns:\nList of dicts.", "source": "codesearchnet"}
{"code": "def execute(self, correlation_id, args):\n        \n        \n        if self._schema != None:\n            self.validate_and_throw_exception(correlation_id, args)\n        \n        \n        try:\n            return self._function(correlation_id, args)\n        \n        except Exception as ex:\n            raise InvocationException(\n                correlation_id,\n                \"EXEC_FAILED\",\n                \"Execution \" + self._name + \" failed: \" + str(ex)\n            ).with_details(\"command\", self._name).wrap(ex)", "docstring": "Executes the command given specific arguments as an input.\n\nArgs:\ncorrelation_id: a unique correlation/transaction id\nargs: command arguments\n\nReturns: an execution result.\n\nRaises:\nApplicationException: when execution fails for whatever reason.", "source": "juraj-google-style"}
{"code": "def request(self, method, params=None):\n        \n        msg_id = self._id_generator()\n        log.debug('Sending request with id %s: %s %s', msg_id, method, params)\n\n        message = {\n            'jsonrpc': JSONRPC_VERSION,\n            'id': msg_id,\n            'method': method,\n        }\n        if params is not None:\n            message['params'] = params\n\n        request_future = futures.Future()\n        request_future.add_done_callback(self._cancel_callback(msg_id))\n\n        self._server_request_futures[msg_id] = request_future\n        self._consumer(message)\n\n        return request_future", "docstring": "Send a JSON RPC request to the client.\n\nArgs:\nmethod (str): The method name of the message to send\nparams (any): The payload of the message\n\nReturns:\nFuture that will resolve once a response has been received", "source": "juraj-google-style"}
{"code": "def trace_format(self):\n        \n        cmd = enums.JLinkTraceCommand.GET_FORMAT\n        data = ctypes.c_uint32(0)\n        res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n        if (res == 1):\n            raise errors.JLinkException('Failed to get trace format.')\n        return data.value", "docstring": "Retrieves the current format the trace buffer is using.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\nThe current format the trace buffer is using.  This is one of the\nattributes of ``JLinkTraceFormat``.", "source": "juraj-google-style"}
{"code": "def _read_and_batch_from_files(file_pattern, batch_size, max_length, num_cpu_cores, shuffle, repeat):\n    dataset = tf.data.Dataset.list_files(file_pattern)\n    if shuffle:\n        mlperf_log.transformer_print(key=mlperf_log.INPUT_ORDER)\n        dataset = dataset.shuffle(buffer_size=_FILE_SHUFFLE_BUFFER)\n    dataset = dataset.apply(tf.contrib.data.parallel_interleave(_load_records, sloppy=shuffle, cycle_length=num_cpu_cores))\n    dataset = dataset.map(_parse_example, num_parallel_calls=num_cpu_cores)\n    dataset = dataset.filter((lambda x, y: _filter_max_length((x, y), max_length)))\n    mlperf_log.transformer_print(key=mlperf_log.INPUT_BATCH_SIZE, value=batch_size)\n    mlperf_log.transformer_print(key=mlperf_log.INPUT_MAX_LENGTH, value=max_length)\n    dataset = _batch_examples(dataset, batch_size, max_length)\n    dataset = dataset.repeat(repeat)\n    dataset = dataset.prefetch(1)\n    return dataset", "docstring": "Create dataset where each item is a dict of \"inputs\" and \"targets\".\n\nArgs:\nfile_pattern: String used to match the input TFRecord files.\nbatch_size: Maximum number of tokens per batch of examples\nmax_length: Maximum number of tokens per example\nnum_cpu_cores: Number of cpu cores for parallel input processing.\nshuffle: If true, randomizes order of elements.\nrepeat: Number of times to repeat the dataset. If None, the dataset is\nrepeated forever.\n\nReturns:\ntf.data.Dataset object containing examples loaded from the files.", "source": "codesearchnet"}
{"code": "def finalise(self):\n    assert self._is_solved()\n    g = self._get_minimal_graph()\n    scopes = dict(((x.package_name, x) for x in self.scopes if (not x.package_request.conflict)))\n    fam_cycle = find_cycle(g)\n    if fam_cycle:\n        cycle = []\n        for fam in fam_cycle:\n            scope = scopes[fam]\n            variant = scope._get_solved_variant()\n            stmt = VersionedObject.construct(fam, variant.version)\n            cycle.append(stmt)\n        phase = copy.copy(self)\n        phase.scopes = scopes.values()\n        phase.failure_reason = Cycle(cycle)\n        phase.status = SolverStatus.cyclic\n        return phase\n    fams = [x.name for x in self.solver.request_list]\n    ordered_fams = _get_dependency_order(g, fams)\n    scopes_ = []\n    for fam in ordered_fams:\n        scope = scopes[fam]\n        if (not scope.package_request.conflict):\n            scopes_.append(scope)\n    phase = copy.copy(self)\n    phase.scopes = scopes_\n    return phase", "docstring": "Remove conflict requests, detect cyclic dependencies, and reorder\npackages wrt dependency and then request order.\n\nReturns:\nA new copy of the phase with conflict requests removed and packages\ncorrectly ordered; or, if cyclic dependencies were detected, a new\nphase marked as cyclic.", "source": "codesearchnet"}
{"code": "def label_count(self, label_list_ids=None):\n        \n        count = collections.defaultdict(int)\n\n        for utterance in self.utterances.values():\n            for label_value, utt_count in utterance.label_count(label_list_ids=label_list_ids).items():\n                count[label_value] += utt_count\n\n        return count", "docstring": "Return a dictionary containing the number of times, every label-value in this corpus is occurring.\n\nArgs:\nlabel_list_ids (list): If not None, only labels from label-lists with an id contained in this list\nare considered.\n\nReturns:\ndict: A dictionary containing the number of occurrences with the label-value as key.", "source": "juraj-google-style"}
{"code": "def delete_devices(self, auth_body, devices):\n        \n        content = {\n            \"auth\": auth_body,\n            \"devices\": devices\n        }\n        return self._send(\"POST\", \"/delete_devices\", content=content)", "docstring": "Bulk deletion of devices.\n\nNOTE: This endpoint uses the User-Interactive Authentication API.\n\nArgs:\nauth_body (dict): Authentication params.\ndevices (list): List of device ID\"s to delete.", "source": "juraj-google-style"}
{"code": "def get_disk_usage(self, path=None):\n        \n        DiskUsage = namedtuple('usage', 'total, used, free')\n        if path is None:\n            mount_point = self.mount_points[self.root.name]\n        else:\n            mount_point = self._mount_point_for_path(path)\n        if mount_point and mount_point['total_size'] is not None:\n            return DiskUsage(mount_point['total_size'],\n                             mount_point['used_size'],\n                             mount_point['total_size'] -\n                             mount_point['used_size'])\n        return DiskUsage(\n            1024 * 1024 * 1024 * 1024, 0, 1024 * 1024 * 1024 * 1024)", "docstring": "Return the total, used and free disk space in bytes as named tuple,\nor placeholder values simulating unlimited space if not set.\n\n.. note:: This matches the return value of shutil.disk_usage().\n\nArgs:\npath: The disk space is returned for the file system device where\n`path` resides.\nDefaults to the root path (e.g. '/' on Unix systems).", "source": "juraj-google-style"}
{"code": "def __init__(self, version_string, first_matched_type, second_matched_type):\n        \n        super(TooManyTypesError, self).__init__(\n            'Release \"{}\" cannot match types \"{}\" and \"{}\"'.format(\n                version_string, first_matched_type, second_matched_type\n            )\n        )", "docstring": "Constructor.\n\nArgs:\nversion_string (str): The string that gave too many types.\nfirst_matched_type (str): The name of the first detected type.\nsecond_matched_type (str): The name of the second detected type", "source": "juraj-google-style"}
{"code": "def all_subnets_shorter_prefix(ip_net, cidr, include_default=False):\n    \n    subnets_list = list()\n    if include_default:\n        while int(cidr) >= 0:\n            try:\n                subnets_list.append('%s/%s' % (whole_subnet_maker(ip_net, cidr), cidr))\n            except Exception as e:\n                LOGGER.critical('Function all_subnets_shorter_prefix {item}'.format(item=e))\n            cidr = str(int(cidr) - 1)\n    else:\n        while int(cidr) > 0:\n            try:\n                subnets_list.append('%s/%s' % (whole_subnet_maker(ip_net, cidr), cidr))\n            except Exception as e:\n                LOGGER.critical('Function all_subnets_shorter_prefix {item}'.format(item=e))\n            cidr = str(int(cidr) - 1)\n    return subnets_list", "docstring": "Function to return every subnet a ip can belong to with a shorter prefix\nArgs:\nip_net: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1\ncidr: CIDR value of 0 to 32\ninclude_default: If you want the list to inlclude the default route set to True\n\nReturns: returns a list of subnets", "source": "juraj-google-style"}
{"code": "def findall(self, title=None):\n    if (title is None):\n        return list(self)\n    files = backend.iterfiles(self._drive, name=title)\n    return [self[id] for (id, _) in files]", "docstring": "Fetch and return a list of spreadsheets with the given title.\n\nArgs:\ntitle(str): title/name of the spreadsheets to return, or ``None`` for all\nReturns:\nlist: list of new SpreadSheet instances (possibly empty)", "source": "codesearchnet"}
{"code": "def set_working_directory(working_directory):\n    \n    logger.debug(\"starting\")\n\n    logger.debug(f\"adding {working_directory} to sys.paths\")\n    sys.path.append(working_directory)\n\n    logger.debug(\"done\")", "docstring": "Add working_directory to sys.paths.\n\nThis allows dynamic loading of arbitrary python modules in cwd.\n\nArgs:\nworking_directory: string. path to add to sys.paths", "source": "juraj-google-style"}
{"code": "def pymmh3_hash64(key: Union[(bytes, bytearray)], seed: int=0, x64arch: bool=True) -> Tuple[(int, int)]:\n    hash_128 = pymmh3_hash128(key, seed, x64arch)\n    unsigned_val1 = (hash_128 & 18446744073709551615)\n    if ((unsigned_val1 & 9223372036854775808) == 0):\n        signed_val1 = unsigned_val1\n    else:\n        signed_val1 = (- ((unsigned_val1 ^ 18446744073709551615) + 1))\n    unsigned_val2 = ((hash_128 >> 64) & 18446744073709551615)\n    if ((unsigned_val2 & 9223372036854775808) == 0):\n        signed_val2 = unsigned_val2\n    else:\n        signed_val2 = (- ((unsigned_val2 ^ 18446744073709551615) + 1))\n    return (signed_val1, signed_val2)", "docstring": "Implements 64bit murmur3 hash, as per ``pymmh3``. Returns a tuple.\n\nArgs:\nkey: data to hash\nseed: seed\nx64arch: is a 64-bit architecture available?\n\nReturns:\ntuple: tuple of integers, ``(signed_val1, signed_val2)``", "source": "codesearchnet"}
{"code": "def augment(self, dct: NonAugmentedDict,\n                document: Optional[YamlDocument] = None) -> AugmentedDict:\n        \n        Validator.instance_of(dict, raise_ex=True, dct=dct)\n\n        \n        for instance in self._extensions:\n            nodes = list(dict_find_pattern(dct, **instance.config()))\n            for parent, k, val in nodes:\n                parent.pop(k)\n                fragment = instance.apply(\n                    ExtensionContext(\n                        mentor=self,\n                        document=document or dct,\n                        dct=dct,\n                        parent_node=parent,\n                        node=(k, val)\n                    )\n                )\n                if fragment is not None:\n                    parent.update(fragment)\n\n        return dct", "docstring": "Augments the given dictionary by using all the bound extensions.\n\nArgs:\ndct: Dictionary to augment.\ndocument: The document the dictionary was loaded from.\n\nReturns:\nThe augmented dictionary.", "source": "juraj-google-style"}
{"code": "def _custom_diag_normal_kl(lhs, rhs, name=None):\n    with tf.name_scope((name or 'kl_divergence')):\n        mean0 = lhs.mean()\n        mean1 = rhs.mean()\n        logstd0 = tf.log(lhs.stddev())\n        logstd1 = tf.log(rhs.stddev())\n        (logstd0_2, logstd1_2) = ((2 * logstd0), (2 * logstd1))\n        return (0.5 * ((((tf.reduce_sum(tf.exp((logstd0_2 - logstd1_2)), (- 1)) + tf.reduce_sum((((mean1 - mean0) ** 2) / tf.exp(logstd1_2)), (- 1))) + tf.reduce_sum(logstd1_2, (- 1))) - tf.reduce_sum(logstd0_2, (- 1))) - mean0.shape[(- 1)].value))", "docstring": "Empirical KL divergence of two normals with diagonal covariance.\n\nArgs:\nlhs: Diagonal Normal distribution.\nrhs: Diagonal Normal distribution.\nname: Name scope for the op.\n\nReturns:\nKL divergence from lhs to rhs.", "source": "codesearchnet"}
{"code": "def get_module_object_and_name(globals_dict):\n  \n  name = globals_dict.get('__name__', None)\n  module = sys.modules.get(name, None)\n  \n  return _ModuleObjectAndName(module,\n                              (sys.argv[0] if name == '__main__' else name))", "docstring": "Returns the module that defines a global environment, and its name.\n\nArgs:\nglobals_dict: A dictionary that should correspond to an environment\nproviding the values of the globals.\n\nReturns:\n_ModuleObjectAndName - pair of module object & module name.\nReturns (None, None) if the module could not be identified.", "source": "juraj-google-style"}
{"code": "def waitForEvent(self, event_name, predicate, timeout=DEFAULT_TIMEOUT):\n    deadline = time.perf_counter() + timeout\n    while time.perf_counter() <= deadline:\n        rpc_timeout = deadline - time.perf_counter()\n        if rpc_timeout < 0:\n            break\n        rpc_timeout = min(rpc_timeout, MAX_TIMEOUT)\n        try:\n            event = self.waitAndGet(event_name, rpc_timeout)\n        except TimeoutError:\n            break\n        if predicate(event):\n            return event\n    raise TimeoutError(self._ad, 'Timed out after %ss waiting for an \"%s\" event that satisfies the predicate \"%s\".' % (timeout, event_name, predicate.__name__))", "docstring": "Wait for an event of a specific name that satisfies the predicate.\n\nThis call will block until the expected event has been received or time\nout.\n\nThe predicate function defines the condition the event is expected to\nsatisfy. It takes an event and returns True if the condition is\nsatisfied, False otherwise.\n\nNote all events of the same name that are received but don't satisfy\nthe predicate will be discarded and not be available for further\nconsumption.\n\nArgs:\nevent_name: string, the name of the event to wait for.\npredicate: function, a function that takes an event (dictionary) and\nreturns a bool.\ntimeout: float, default is 120s.\n\nReturns:\ndictionary, the event that satisfies the predicate if received.\n\nRaises:\nTimeoutError: raised if no event that satisfies the predicate is\nreceived after timeout seconds.", "source": "github-repos"}
{"code": "def export_template(access_token, subscription_id, rgname):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/exportTemplate', '?api-version=', RESOURCE_API])\n    rg_body = {'options': 'IncludeParameterDefaultValue', 'resources': ['*']}\n    body = json.dumps(rg_body)\n    return do_post(endpoint, body, access_token)", "docstring": "Capture the specified resource group as a template\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def test(cls, test_expr: bool = True) -> None:\n        \n        cls.test_dialect_specific_1()\n        cls.test_identifiers()\n        if test_expr:\n            cls.test_expr()\n        cls.test_sql_core()\n        cls.test_dialect_specific_2()", "docstring": "Runs self-tests.\n\nArgs:\ntest_expr: include tests of expressions (which can be slow).", "source": "juraj-google-style"}
{"code": "def __init__(self, room_id, api):\n        \n        self.room_id = room_id\n        self.api = api", "docstring": "Instantiates MatrixRoom object.\n\nArgs:\nroom_id(str): Matrix room id (e.g. !1234567:example.com)\napi(MatrixASHttpAPI): Api for calls to the server.", "source": "juraj-google-style"}
{"code": "def upcoming(self, **kwargs):\n        \n        path = self._get_path('upcoming')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the list of upcoming movies. This list refreshes every day.\nThe maximum number of items this list will include is 100.\n\nArgs:\npage: (optional) Minimum value of 1.  Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def PyParseRangeCheck(lower_bound, upper_bound):\n\n    def CheckRange(string, location, tokens):\n        'Parse the arguments.\\n\\n    Args:\\n      string (str): original string.\\n      location (int): location in the string where the match was made\\n      tokens (list[str]): tokens.\\n    '\n        try:\n            check_number = tokens[0]\n        except IndexError:\n            check_number = (- 1)\n        if (check_number < lower_bound):\n            raise pyparsing.ParseException('Value: {0:d} precedes lower bound: {1:d}'.format(check_number, lower_bound))\n        if (check_number > upper_bound):\n            raise pyparsing.ParseException('Value: {0:d} exceeds upper bound: {1:d}'.format(check_number, upper_bound))\n    return CheckRange", "docstring": "Verify that a number is within a defined range.\n\nThis is a callback method for pyparsing setParseAction\nthat verifies that a read number is within a certain range.\n\nTo use this method it needs to be defined as a callback method\nin setParseAction with the upper and lower bound set as parameters.\n\nArgs:\nlower_bound (int): lower bound of the range.\nupper_bound (int): upper bound of the range.\n\nReturns:\nFunction: callback method that can be used by pyparsing setParseAction.", "source": "codesearchnet"}
{"code": "def __init__(self, lmda, theta, phi) -> None:\n        \n        self.lmda = lmda % 2\n        self.theta = theta % 2\n        self.phi = phi % 2", "docstring": "A QASM gate representing any single qubit unitary with a series of\nthree rotations, Z, Y, and Z.\n\nThe angles are normalized to the range [0, 2) half_turns.\n\nArgs:\nlmda: Half turns to rotate about Z (applied first).\ntheta: Half turns to rotate about Y.\nphi: Half turns to rotate about Z (applied last).", "source": "juraj-google-style"}
{"code": "def join_sources(source_module: DeploymentModule, contract_name: str):\n    joined_file = Path(__file__).parent.joinpath('joined.sol')\n    remapping = {module: str(path) for (module, path) in contracts_source_path().items()}\n    command = ['./utils/join-contracts.py', '--import-map', json.dumps(remapping), str(contracts_source_path_of_deployment_module(source_module).joinpath((contract_name + '.sol'))), str(joined_file)]\n    working_dir = Path(__file__).parent.parent\n    try:\n        subprocess.check_call(command, cwd=working_dir)\n    except subprocess.CalledProcessError as ex:\n        print(f'cd {str(working_dir)}; {subprocess.list2cmdline(command)} failed.')\n        raise ex\n    return joined_file.read_text()", "docstring": "Use join-contracts.py to concatenate all imported Solidity files.\n\nArgs:\nsource_module: a module name to look up contracts_source_path()\ncontract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc.", "source": "codesearchnet"}
{"code": "def block_start(self, previous_block):\n    previous_header_bytes = previous_block.header\n    previous_header = BlockHeader()\n    previous_header.ParseFromString(previous_header_bytes)\n    block_info = BlockInfo(block_num=previous_header.block_num, previous_block_id=previous_header.previous_block_id, signer_public_key=previous_header.signer_public_key, header_signature=previous_block.header_signature, timestamp=int(time.time()))\n    return [self.create_batch(block_info)]", "docstring": "Returns an ordered list of batches to inject at the beginning of the\nblock. Can also return None if no batches should be injected.\n\nArgs:\nprevious_block (Block): The previous block.\n\nReturns:\nA list of batches to inject.", "source": "codesearchnet"}
{"code": "def format_snippet(sensor_graph):\n    output = []\n    output.append('disable')\n    output.append('clear')\n    output.append('reset')\n    for node in sensor_graph.dump_nodes():\n        output.append('add_node \"{}\"'.format(node))\n    for streamer in sensor_graph.streamers:\n        line = \"add_streamer '{}' '{}' {} {} {}\".format(streamer.selector, streamer.dest, streamer.automatic, streamer.format, streamer.report_type)\n        if (streamer.with_other is not None):\n            line += ' --withother {}'.format(streamer.with_other)\n        output.append(line)\n    for (stream, value) in sorted(sensor_graph.constant_database.items(), key=(lambda x: x[0].encode())):\n        output.append(\"set_constant '{}' {}\".format(stream, value))\n    output.append('persist')\n    output.append('back')\n    app_tag = sensor_graph.metadata_database.get('app_tag')\n    app_version = sensor_graph.metadata_database.get('app_version')\n    if (app_tag is not None):\n        if (app_version is None):\n            app_version = '0.0'\n        output.append('test_interface')\n        output.append((\"set_version app %d --version '%s'\" % (app_tag, app_version)))\n        output.append('back')\n    output.append('config_database')\n    output.append('clear_variables')\n    for (slot, conf_vars) in sensor_graph.config_database.items():\n        for (conf_var, conf_def) in conf_vars.items():\n            (conf_type, conf_val) = conf_def\n            if (conf_type == 'binary'):\n                conf_val = ('hex:' + hexlify(conf_val))\n            elif isinstance(conf_val, str):\n                conf_val = ('\"%s\"' % conf_val)\n            output.append(\"set_variable '{}' {} {} {}\".format(slot, conf_var, conf_type, conf_val))\n    output.append('back')\n    output.append('reset')\n    return ('\\n'.join(output) + '\\n')", "docstring": "Format this sensor graph as iotile command snippets.\n\nThis includes commands to reset and clear previously stored\nsensor graphs.\n\nArgs:\nsensor_graph (SensorGraph): the sensor graph that we want to format", "source": "codesearchnet"}
{"code": "def validate_functions(ast: BELAst, bo):\n    \n\n    if isinstance(ast, Function):\n        log.debug(f\"Validating: {ast.name}, {ast.function_type}, {ast.args}\")\n        function_signatures = bo.spec[\"functions\"][\"signatures\"][ast.name][\"signatures\"]\n\n        function_name = ast.name\n        (valid_function, messages) = check_function_args(\n            ast.args, function_signatures, function_name\n        )\n        if not valid_function:\n            message = \", \".join(messages)\n            bo.validation_messages.append(\n                (\n                    \"ERROR\",\n                    \"Invalid BEL Statement function {} - problem with function signatures: {}\".format(\n                        ast.to_string(), message\n                    ),\n                )\n            )\n            bo.parse_valid = False\n\n    \n    if hasattr(ast, \"args\"):\n        for arg in ast.args:\n            validate_functions(arg, bo)\n\n    return bo", "docstring": "Recursively validate function signatures\n\nDetermine if function matches one of the available signatures. Also,\n\n1. Add entity types to AST NSArg, e.g. Abundance, ...\n2. Add optional to  AST Arg (optional means it is not a\nfixed, required argument and needs to be sorted for\ncanonicalization, e.g. reactants(A, B, C) )\n\nArgs:\nbo: bel object\n\nReturns:\nbel object", "source": "juraj-google-style"}
{"code": "def _addConfig(instance, config, parent_section):\n\t\t\n\t\ttry:\n\t\t\tsection_name = \"{p}/{n}\".format(p = parent_section, n=instance.NAME.lower())\n\t\t\tconfig.add_section(section_name)\n\t\t\tfor k in instance.CONFIG.keys():\n\t\t\t\tconfig.set(section_name, k, instance.CONFIG[k])\n\t\texcept Exception as e:\n\t\t\tprint \"[!] %s\" % e", "docstring": "Writes a section for a plugin.\n\nArgs:\ninstance (object): Class instance for plugin\nconfig (object): Object (ConfigParser) which the current config\nparent_section (str): Parent section for plugin. Usually 'checkers' or 'reports'", "source": "juraj-google-style"}
{"code": "def _find_docstring_line(self, start, end):\n    for i in range(start, (end + 1)):\n        if (i in self._tokenized_triple_quotes):\n            return i\n    return None", "docstring": "Find the row where a docstring starts in a function or class.\n\nThis will search for the first match of a triple quote token in\nrow sequence from the start of the class or function.\n\nArgs:\nstart: the row where the class / function starts.\nend: the row where the class / function ends.\n\nReturns:\nint: the row number where the docstring is found.", "source": "codesearchnet"}
{"code": "def __init__(cls, name, bases, dictionary):\n    \n    super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary)\n    \n    \n    if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary:\n      return\n    descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY]\n    service_stub_builder = _ServiceStubBuilder(descriptor)\n    service_stub_builder.BuildServiceStub(cls)", "docstring": "Creates a message service stub class.\n\nArgs:\nname: Name of the class (ignored, here).\nbases: Base classes of the class being constructed.\ndictionary: The class dictionary of the class being constructed.\ndictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object\ndescribing this protocol service type.", "source": "juraj-google-style"}
{"code": "def _merge_field(self, json_value: Any, field: descriptor.FieldDescriptor, parent: message.Message) -> None:\n    if not annotation_utils.is_primitive_type(field.message_type) and proto_utils.field_is_set(parent, field):\n        raise ValueError(f'Target field {field.full_name} is already set.')\n    if field.containing_oneof is not None:\n        oneof_field = parent.DESCRIPTOR.oneofs_by_name[field.containing_oneof.name]\n        if annotation_utils.is_primitive_type(field.message_type) and oneof_field.full_name == field.full_name:\n            raise ValueError(f'Cannot set field {field.full_name} since oneof field {oneof_field.full_name} is already set.')\n    existing_field_size = proto_utils.field_content_length(parent, field)\n    if proto_utils.field_is_repeated(field):\n        if not isinstance(json_value, list):\n            raise ValueError(f'Attempted to merge a repeated field, {field.name}, a json_value with type {type(json_value)} instead of a list.')\n        if existing_field_size != 0 and existing_field_size != len(json_value):\n            raise ValueError('Repeated primitive list length does not match extension list for field: {field.full_name!r}.')\n    json_value = json_value if proto_utils.field_is_repeated(field) else [json_value]\n    for i, value in enumerate(json_value):\n        parsed_value = self._parse_field_value(field, value)\n        if existing_field_size > 0:\n            field_value = proto_utils.get_value_at_field_index(parent, field, i)\n            field_value.MergeFrom(parsed_value)\n            extensions.clear_fhir_extensions_with_url(field_value, extensions.PRIMITIVE_HAS_NO_VALUE_URL)\n        else:\n            field_value = proto_utils.set_in_parent_or_add(parent, field)\n            field_value.MergeFrom(parsed_value)", "docstring": "Merges the json_value into the provided field of the parent Message.\n\nArgs:\njson_value: The JSON value to set.\nfield: The FieldDescriptor of the field to set in parent.\nparent: The parent Message to set the value on.\n\nRaises:\nValueError: In the event that a non-primitive field has already been set.\nValueError: In the event that a oneof field has already been set.", "source": "github-repos"}
{"code": "def _get_scopes(state, names: Sequence[str], ctx) -> Sequence[abstract.InterpreterClass | abstract.InterpreterFunction]:\n    scopes = []\n    for name in names:\n        prev = scopes[-1] if scopes else None\n        if not prev:\n            try:\n                _, var = ctx.vm.load_global(state, name)\n            except KeyError:\n                break\n        elif isinstance(prev, abstract.InterpreterClass):\n            if name in prev.members:\n                var = prev.members[name]\n            else:\n                break\n        else:\n            assert isinstance(prev, abstract.InterpreterFunction)\n            if prev.last_frame and name in prev.last_frame.f_locals.pyval:\n                var = prev.last_frame.f_locals.pyval[name]\n            else:\n                break\n        try:\n            scopes.append(abstract_utils.get_atomic_value(var, (abstract.InterpreterClass, abstract.InterpreterFunction)))\n        except abstract_utils.ConversionError:\n            break\n    return scopes", "docstring": "Gets the class or function objects for a sequence of nested scope names.\n\nFor example, if the code under analysis is:\nclass Foo:\ndef f(self):\ndef g(): ...\nthen when called with ['Foo', 'f', 'g'], this method returns\n[InterpreterClass(Foo), InterpreterFunction(f), InterpreterFunction(g)].\n\nArguments:\nstate: The current state.\nnames: A sequence of names for consecutive nested scopes in the module under\nanalysis. Must start with a module-level name.\nctx: The current context.\n\nReturns:\nThe class or function object corresponding to each name in 'names'.", "source": "github-repos"}
{"code": "def add_loss(loss, loss_collection=ops.GraphKeys.LOSSES):\n    if loss_collection and (not context.executing_eagerly()):\n        ops.add_to_collection(loss_collection, loss)", "docstring": "Adds a externally defined loss to the collection of losses.\n\nArgs:\nloss: A loss `Tensor`.\nloss_collection: Optional collection to add the loss to.", "source": "github-repos"}
{"code": "def in_same_dir(as_file, target_file):\n    \n    return os.path.abspath(os.path.join(os.path.dirname(as_file), target_file))", "docstring": "Return an absolute path to a target file that is located in the same directory as as_file\n\nArgs:\nas_file: File name (including __file__)\nUse the directory path of this file\ntarget_file: Name of the target file", "source": "juraj-google-style"}
{"code": "def fail_steamid(channel):\n    \n\n    gui = ui_embed.UI(\n        channel,\n        \"That SteamID doesn't exist.\",\n        \"You can get your SteamID by going to your profile page and looking at the url, \"\n        \"or you can set a custom ID by going to edit profile on your profile page.\",\n        modulename=modulename,\n        colour=0x0088FF\n    )\n\n    return gui", "docstring": "Creates an embed UI for invalid SteamIDs\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\n\nReturns:\nui (ui_embed.UI): The embed UI object", "source": "juraj-google-style"}
{"code": "def _build(self, one_hot_input_sequence):\n    \n\n    input_shape = one_hot_input_sequence.get_shape()\n    batch_size = input_shape[1]\n\n    batch_embed_module = snt.BatchApply(self._embed_module)\n    input_sequence = batch_embed_module(one_hot_input_sequence)\n    input_sequence = tf.nn.relu(input_sequence)\n\n    initial_state = self._core.initial_state(batch_size)\n\n    if self._use_dynamic_rnn:\n      output_sequence, final_state = tf.nn.dynamic_rnn(\n          cell=self._core,\n          inputs=input_sequence,\n          time_major=True,\n          initial_state=initial_state)\n    else:\n      rnn_input_sequence = tf.unstack(input_sequence)\n      output, final_state = tf.contrib.rnn.static_rnn(\n          cell=self._core,\n          inputs=rnn_input_sequence,\n          initial_state=initial_state)\n      output_sequence = tf.stack(output)\n\n    batch_output_module = snt.BatchApply(self._output_module)\n    output_sequence_logits = batch_output_module(output_sequence)\n\n    return output_sequence_logits, final_state", "docstring": "Builds the deep LSTM model sub-graph.\n\nArgs:\none_hot_input_sequence: A Tensor with the input sequence encoded as a\none-hot representation. Its dimensions should be `[truncation_length,\nbatch_size, output_size]`.\n\nReturns:\nTuple of the Tensor of output logits for the batch, with dimensions\n`[truncation_length, batch_size, output_size]`, and the\nfinal state of the unrolled core,.", "source": "juraj-google-style"}
{"code": "def xw_plus_b(x, weights, biases, name=None):\n    with ops.name_scope(name, 'xw_plus_b', [x, weights, biases]) as name:\n        x = ops.convert_to_tensor(x, name='x')\n        weights = ops.convert_to_tensor(weights, name='weights')\n        biases = ops.convert_to_tensor(biases, name='biases')\n        mm = math_ops.matmul(x, weights)\n        return bias_add(mm, biases, name=name)", "docstring": "Computes matmul(x, weights) + biases.\n\nArgs:\nx: a 2D tensor.  Dimensions typically: batch, in_units\nweights: a 2D tensor.  Dimensions typically: in_units, out_units\nbiases: a 1D tensor.  Dimensions: out_units\nname: A name for the operation (optional).  If not specified\n\"xw_plus_b\" is used.\n\nReturns:\nA 2-D Tensor computing matmul(x, weights) + biases.\nDimensions typically: batch, out_units.", "source": "github-repos"}
{"code": "def add_argument_to(self, parser):\n        \n        from devassistant.cli.devassistant_argparse import DefaultIffUsedActionFactory\n        if isinstance(self.kwargs.get('action', ''), list):\n            \n            if self.kwargs['action'][0] == 'default_iff_used':\n                self.kwargs['action'] = DefaultIffUsedActionFactory.generate_action(\n                    self.kwargs['action'][1])\n        \n        \n        self.kwargs.pop('preserved', None)\n        try:\n            parser.add_argument(*self.flags, **self.kwargs)\n        except Exception as ex:\n            problem = \"Error while adding argument '{name}': {error}\".\\\n                format(name=self.name, error=repr(ex))\n            raise exceptions.ExecutionException(problem)", "docstring": "Used by cli to add this as an argument to argparse parser.\n\nArgs:\nparser: parser to add this argument to", "source": "juraj-google-style"}
{"code": "def grow(self, times=1):\n    self.nodes.append([])\n    for (n, node) in enumerate(self.nodes[self.age]):\n        if (self.age == 0):\n            p_node = Node(self.pos[:2])\n        else:\n            p_node = self._get_node_parent((self.age - 1), n)\n        angle = node.get_node_angle(p_node)\n        for i in range(self.comp):\n            tot_angle = self.__get_total_angle(angle, i)\n            length = self.__get_total_length((self.age + 1), i)\n            self.nodes[(self.age + 1)].append(node.make_new_node(length, tot_angle))\n    self.age += 1\n    if (times > 1):\n        self.grow((times - 1))", "docstring": "Let the tree grow.\n\nArgs:\ntimes (integer): Indicate how many times the tree will grow.", "source": "codesearchnet"}
{"code": "def logged(level=logging.DEBUG):\n\n    def wrap(f):\n        _logger = logging.getLogger('{}.{}'.format(f.__module__, f.__name__))\n\n        def wrapped_f(*args, **kwargs):\n            _logger.log(level, 'Called at {} with args = {} and kwargs = {}'.format(datetime.datetime.now(), args, kwargs))\n            data = f(*args, **kwargs)\n            _logger.log(level, 'Done at {} with args = {} and kwargs = {}'.format(datetime.datetime.now(), args, kwargs))\n            return data\n        return wrapped_f\n    return wrap", "docstring": "Useful logging decorator. If a method is logged, the beginning and end of\nthe method call will be logged at a pre-specified level.\n\nArgs:\nlevel: Level to log method at. Defaults to DEBUG.", "source": "codesearchnet"}
{"code": "def _GetUnsortedNotifications(self, queue_shard, notifications_by_session_id=None):\n    if (notifications_by_session_id is None):\n        notifications_by_session_id = {}\n    end_time = (self.frozen_timestamp or rdfvalue.RDFDatetime.Now())\n    for notification in self.data_store.GetNotifications(queue_shard, end_time):\n        existing = notifications_by_session_id.get(notification.session_id)\n        if existing:\n            if (notification.first_queued > existing.first_queued):\n                notifications_by_session_id[notification.session_id] = notification\n            elif ((notification.first_queued == existing.first_queued) and (notification.last_status > existing.last_status)):\n                logging.warning('Notifications with equal first_queued fields detected: %s %s', notification, existing)\n                notifications_by_session_id[notification.session_id] = notification\n        else:\n            notifications_by_session_id[notification.session_id] = notification\n    return notifications_by_session_id", "docstring": "Returns all the available notifications for a queue_shard.\n\nArgs:\nqueue_shard: urn of queue shard\nnotifications_by_session_id: store notifications in this dict rather than\ncreating a new one\n\nReturns:\ndict of notifications. keys are session ids.", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:\n    residual = hidden_states\n    hidden_states, attn_weights, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    residual = hidden_states\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()):\n        clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n        hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\nlayer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size\n`(encoder_attention_heads,)`.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def parse_flags_with_usage(args):\n  \n  try:\n    return FLAGS(args)\n  except flags.Error as error:\n    sys.stderr.write('FATAL Flags parsing error: %s\\n' % error)\n    sys.stderr.write('Pass --helpshort or --helpfull to see help on flags.\\n')\n    sys.exit(1)", "docstring": "Tries to parse the flags, print usage, and exit if unparseable.\n\nArgs:\nargs: [str], a non-empty list of the command line arguments including\nprogram name.\n\nReturns:\n[str], a non-empty list of remaining command line arguments after parsing\nflags, including program name.", "source": "juraj-google-style"}
{"code": "def __init__(self, campfire, data):\n        \n        dataType = type(data)\n        if dataType == types.StringType or dataType == types.UnicodeType:\n            messageType = self._TYPE_PASTE if data.find(\"\\n\") >= 0 else self._TYPE_TEXT\n            if messageType == self._TYPE_TEXT:\n                matches = re.match(\"^https?:\n                if matches:\n                    messageType = self._TYPE_TWEET\n            data = {\n                \"type\": messageType,\n                \"body\": data\n            }\n\n        super(Message, self).__init__(campfire)\n\n        self.set_data(data, [\"created_at\"])\n        \n        self.user = None\n        self.room = None\n\n        if \"user_id\" in data and data[\"user_id\"]:\n            self.user = self._campfire.get_user(data[\"user_id\"])\n        if \"room_id\" in data and data[\"room_id\"]:\n            self.room = self._campfire.get_room(data[\"room_id\"])\n            if self.is_upload():\n                self.upload = self._connection.get(\"room/%s/messages/%s/upload\" % (self.room.id, self.id), key=\"upload\")\n                if \"full_url\" in self.upload:\n                    self.upload[\"url\"] = self.upload[\"full_url\"]\n                    del self.upload[\"full_url\"]\n\n        if self.is_tweet():\n            \n            matches = re.match(\"(.+)\\s+--\\s+@([^,]+),\\s*(.+)$\", self.body)\n            if matches:\n                self.tweet = {\n                    \"tweet\": matches.group(1),\n                    \"user\": matches.group(2),\n                    \"url\": matches.group(3)\n                }\n            else:\n                tweet_data = {}\n                if re.match(\"^---\", self.body):\n                    for line in self.body.split(\"\\n\")[1:]:\n                        matches = re.match('^:([^:]+):\\s*\"?(.+)\"?$', line)\n                        if matches:\n                            tweet_data[matches.group(1)] = matches.group(2)\n    \n                if tweet_data and \"author_username\" in tweet_data and \"message\" in tweet_data and \"id\" in tweet_data:\n                    self.tweet = {\n                        \"tweet\": tweet_data[\"message\"],\n                        \"user\": tweet_data[\"author_username\"],\n                        \"url\": \"http:\n                    }\n                else:\n                    self.type = self._TYPE_TEXT", "docstring": "Initialize.\n\nArgs:\ncampfire (:class:`Campfire`): Campfire instance\ndata (dict or str): If string, message type will be set to either paste or text", "source": "juraj-google-style"}
{"code": "def select_embedding_from_source(cur, source_nodelist, source_edgelist, target_nodelist, target_edgelist):\n    encoded_data = {'target_num_nodes': len(target_nodelist), 'target_num_edges': len(target_edgelist), 'target_edges': json.dumps(target_edgelist, separators=(',', ':')), 'source_num_nodes': len(source_nodelist), 'source_num_edges': len(source_edgelist), 'source_edges': json.dumps(source_edgelist, separators=(',', ':'))}\n    select = '\\n        SELECT\\n            source_node,\\n            chain\\n        FROM\\n            embedding_component_view\\n        WHERE\\n            source_num_edges = :source_num_edges AND\\n            source_edges = :source_edges AND\\n            source_num_nodes = :source_num_nodes AND\\n\\n            target_num_edges = :target_num_edges AND\\n            target_edges = :target_edges AND\\n            target_num_nodes = :target_num_nodes\\n        '\n    embedding = {v: json.loads(chain) for (v, chain) in cur.execute(select, encoded_data)}\n    return embedding", "docstring": "Select an embedding from the source graph and target graph.\n\nArgs:\ncur (:class:`sqlite3.Cursor`):\nAn sqlite3 cursor. This function is meant to be run within a :obj:`with` statement.\n\ntarget_nodelist (list):\nThe nodes in the target graph. Should be integer valued.\n\ntarget_edgelist (list):\nThe edges in the target graph.\n\nembedding_tag (str):\nA string tag to associate with the embedding.\n\nReturns:\ndict: The mapping from the source graph to the target graph.\nIn the form {v: {s, ...}, ...} where v is a variable in the\nsource model and s is a variable in the target model.", "source": "codesearchnet"}
{"code": "def Substitute(self, pattern):\n    \n    if isinstance(pattern, bytes):\n      substs = [re.escape(subst.encode(\"ascii\")) for subst in self._substs]\n      regex = re.compile(b\"|\".join(substs))\n\n      def Replacement(match):\n        key = match.group(0).decode(\"ascii\")\n        return self._substs[key].encode(\"utf-8\")\n\n    elif isinstance(pattern, Text):\n      substs = [re.escape(subst) for subst in self._substs]\n      regex = re.compile(\"|\".join(substs))\n\n      def Replacement(match):\n        key = match.group(0)\n        return self._substs[key]\n\n    else:\n      raise TypeError(\"Unexpected pattern type '{}'\".format(type(pattern)))\n\n    if not substs:\n      return pattern\n    else:\n      return regex.sub(Replacement, pattern)", "docstring": "Formats given pattern with this substitution environment.\n\nA pattern can contain placeholders for variables (`%%foo%%`) and scopes\n(`%%bar.baz%%`) that are replaced with concrete values in this substiution\nenvironment (specified in the constructor).\n\nArgs:\npattern: A pattern with placeholders to substitute.\n\nReturns:\nA pattern with placeholders substituted with concrete values.", "source": "juraj-google-style"}
{"code": "def __init__(self, server_address, username, password):\n        \n        _wtflog.info(\n            \"connecting to %s, using %s:%s\", server_address, username, password)\n        self._mail = imaplib.IMAP4_SSL(server_address)\n        self._mail.login(username, password)\n        _wtflog.info(\"connected.\")", "docstring": "Constructor\n\nArgs:\nserver_address (str): Email Server address.\nusername (str): Username\npassword (str): Password", "source": "juraj-google-style"}
{"code": "def invoke(self, line):\n    finished = True\n    while (len(line) > 0):\n        (val, line, finished) = self.invoke_one(line)\n        if (val is not None):\n            iprint(val)\n    return finished", "docstring": "Invoke a one or more function given a list of arguments.\n\nThe functions are searched for using the current context on the context stack\nand its annotated type information is used to convert all of the string parameters\npassed in line to appropriate python types.\n\nArgs:\nline (list): The list of command line arguments.\n\nReturns:\nbool: A boolean specifying if the last function created a new context\n(False if a new context was created) and a list with the remainder of the\ncommand line if this function did not consume all arguments.)", "source": "codesearchnet"}
{"code": "def jsoned(struct, wrap=True, meta=None, struct_key='result', pre_render_callback=None):\n    return _json.dumps(structured(struct, wrap=wrap, meta=meta, struct_key=struct_key, pre_render_callback=pre_render_callback), default=json_encoder)", "docstring": "Provides a json dump of the struct\n\nArgs:\nstruct: The data to dump\nwrap (bool, optional): Specify whether to wrap the\nstruct in an enclosing dict\nstruct_key (str, optional): The string key which will\ncontain the struct in the result dict\nmeta (dict, optional): An optional dictonary to merge\nwith the output dictionary.\n\nExamples:\n\n>>> jsoned([3,4,5])\n... '{\"status\": \"success\", \"result\": [3, 4, 5]}'\n\n>>> jsoned([3,4,5], wrap=False)\n... '[3, 4, 5]'", "source": "codesearchnet"}
{"code": "def tracker(obj):\n    \n    import types as typ\n    global oids, uuids\n    import six\n    from inspect import isclass\n    untracked = (six.string_types, six.integer_types, float,\n                 complex, six.text_type)\n\n    semitrack = (list, dict, set, tuple)\n    if six.PY3: \n        semitrack = semitrack + (range, filter, map)\n        \n    if (isinstance(obj, semitrack) and\n        all([isinstance(t, untracked) for t in obj])):\n        if len(obj) > 0:\n            semiform = \"{0} len={1:d} min={2} max={3}\"\n            return semiform.format(type(obj), len(obj), min(obj), max(obj))\n        else:\n            semiform = \"{0} len={1:d}\"\n            return semiform.format(type(obj), len(obj))\n    elif isinstance(obj, semitrack):\n        \n        \n        \n        result = []\n\n        \n        \n        \n        \n        for o in obj[0:min((len(obj), 5))]:\n            track = tracker(o)\n            if isinstance(track, Instance):\n                result.append(track.uuid)\n            else:\n                result.append(track)\n\n        if len(obj) > 5:\n            result.append(\"... ({0:d} items)\".format(len(obj)))\n            \n        return tuple(result)\n    elif isinstance(obj, slice):\n        return \"slice({}, {}, {})\".format(obj.start, obj.stop, obj.step)\n    elif type(obj) is type:\n        return obj.__name__\n    elif type(obj) is typ.LambdaType:\n        if hasattr(obj, \"__fqdn__\"):\n            \n            \n            return obj.__fqdn__\n        else:\n            if six.PY2:\n                _code = obj.func_code\n            else: \n                _code = obj.__code__\n            return \"lambda ({})\".format(', '.join(_code.co_varnames))\n    elif type(obj) in [typ.FunctionType, typ.MethodType]: \n        return obj.__name__\n    elif not isinstance(obj, untracked):\n        \n        \n        \n        oid = id(obj)\n        if oid in oids:\n            result = oids[oid]\n        else:\n            result = Instance(oid, obj)\n            oids[oid] = result\n            uuids[result.uuid] = result\n        return result\n    else:\n        return None", "docstring": "Returns the :class:`Instance` of the specified object if it is one that\nwe track by default.\n\nArgs:\nobj (object): any python object passed as an argument to a method.\n\nReturns:\nInstance: if the object is trackable, the Instance instance of\nthat object; else None.", "source": "juraj-google-style"}
{"code": "def with_rank(self, rank):\n    try:\n        return self.merge_with(unknown_shape(rank=rank))\n    except ValueError:\n        raise ValueError('Shape %s must have rank %d' % (self, rank))", "docstring": "Returns a shape based on `self` with the given rank.\n\nThis method promotes a completely unknown shape to one with a\nknown rank.\n\nArgs:\nrank: An integer.\n\nReturns:\nA shape that is at least as specific as `self` with the given rank.\n\nRaises:\nValueError: If `self` does not represent a shape with the given `rank`.", "source": "github-repos"}
{"code": "def sanity_check_actions(actions_spec):\n    \n    \n    actions = copy.deepcopy(actions_spec)\n\n    \n    is_unique = ('type' in actions)\n    if is_unique:\n        actions = dict(action=actions)\n\n    \n    for name, action in actions.items():\n        \n        if 'type' not in action:\n            action['type'] = 'int'\n\n        \n        if action['type'] == 'int':\n            if 'num_actions' not in action:\n                raise TensorForceError(\"Action requires value 'num_actions' set!\")\n        elif action['type'] == 'float':\n            if ('min_value' in action) != ('max_value' in action):\n                raise TensorForceError(\"Action requires both values 'min_value' and 'max_value' set!\")\n\n        \n        if 'shape' not in action:\n            action['shape'] = ()\n\n        \n        if isinstance(action['shape'], int):\n            action['shape'] = (action['shape'],)\n\n    return actions, is_unique", "docstring": "Sanity checks an actions dict, used to define the action space for an MDP.\nThrows an error or warns if mismatches are found.\n\nArgs:\nactions_spec (Union[None,dict]): The spec-dict to check (or None).\n\nReturns: Tuple of 1) the action space desc and 2) whether there is only one component in the action space.", "source": "juraj-google-style"}
{"code": "def CreateDataTypeMapByType(cls, data_type_definition):\n    data_type_map_class = cls._MAP_PER_DEFINITION.get(data_type_definition.TYPE_INDICATOR, None)\n    if (not data_type_map_class):\n        return None\n    return data_type_map_class(data_type_definition)", "docstring": "Creates a specific data type map by type indicator.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\n\nReturns:\nDataTypeMap: data type map or None if the date type definition\nis not available.", "source": "codesearchnet"}
{"code": "def _TensorArrayConcatGrad(op: ops.Operation, grad, unused_lengths_grad):\n    handle = op.inputs[0]\n    flow = op.inputs[1]\n    lengths = op.outputs[1]\n    dtype = op.get_attr('dtype')\n    grad_source = _GetGradSource(grad)\n    g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow, colocate_with_first_write_call=False).grad(source=grad_source, flow=flow)\n    u_g = g.split(grad, lengths=lengths)\n    return [None, u_g.flow]", "docstring": "Gradient for TensorArrayConcat.\n\nArgs:\nop: Forward TensorArrayConcat op.\ngrad: Gradient `Tensor` to TensorArrayConcat.\n\nReturns:\nA flow `Tensor`, which can be used in control dependencies to\nforce the write of `grad` to the gradient `TensorArray`.", "source": "github-repos"}
{"code": "def _ProcessFileEntry(self, mediator, file_entry):\n    \n    display_name = mediator.GetDisplayName()\n    logger.debug(\n        '[ProcessFileEntry] processing file entry: {0:s}'.format(display_name))\n\n    reference_count = mediator.resolver_context.GetFileObjectReferenceCount(\n        file_entry.path_spec)\n\n    try:\n      if self._IsMetadataFile(file_entry):\n        self._ProcessMetadataFile(mediator, file_entry)\n\n      else:\n        file_entry_processed = False\n        for data_stream in file_entry.data_streams:\n          if self._abort:\n            break\n\n          if self._CanSkipDataStream(file_entry, data_stream):\n            logger.debug((\n                '[ProcessFileEntry] Skipping datastream {0:s} for {1:s}: '\n                '{2:s}').format(\n                    data_stream.name, file_entry.type_indicator, display_name))\n            continue\n\n          self._ProcessFileEntryDataStream(mediator, file_entry, data_stream)\n\n          file_entry_processed = True\n\n        if not file_entry_processed:\n          \n          self._ProcessFileEntryDataStream(mediator, file_entry, None)\n\n    finally:\n      new_reference_count = (\n          mediator.resolver_context.GetFileObjectReferenceCount(\n              file_entry.path_spec))\n      if reference_count != new_reference_count:\n        \n        if mediator.resolver_context.ForceRemoveFileObject(\n            file_entry.path_spec):\n          logger.warning(\n              'File-object not explicitly closed for file: {0:s}'.format(\n                  display_name))\n\n    logger.debug(\n        '[ProcessFileEntry] done processing file entry: {0:s}'.format(\n            display_name))", "docstring": "Processes a file entry.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\nfile_entry (dfvfs.FileEntry): file entry.", "source": "juraj-google-style"}
{"code": "def wait_for_batches(self, batch_ids, timeout=None):\n        \n        self._batch_tracker.watch_statuses(self, batch_ids)\n        timeout = timeout or DEFAULT_TIMEOUT\n        start_time = time()\n\n        with self._wait_condition:\n            while True:\n                if self._statuses is not None:\n                    return _format_batch_statuses(\n                        self._statuses, batch_ids, self._batch_tracker)\n\n                if time() - start_time > timeout:\n                    statuses = self._batch_tracker.get_statuses(batch_ids)\n                    return _format_batch_statuses(\n                        statuses, batch_ids, self._batch_tracker)\n\n                self._wait_condition.wait(timeout - (time() - start_time))", "docstring": "Locks until a list of batch ids is committed to the block chain\nor a timeout is exceeded. Returns the statuses of those batches.\n\nArgs:\nbatch_ids (list of str): The ids of the batches to wait for\ntimeout(int): Maximum time in seconds to wait for\n\nReturns:\nlist of BatchStatus: BatchStatuses to send back to client", "source": "juraj-google-style"}
{"code": "def ParseCmd(self, cmd_input, attributes=None, templates=None):\n        \n        \n        self.raw = cmd_input\n\n        if not templates:\n            \n            row_idx = self.index.GetRowMatch(attributes)\n            if row_idx:\n                templates = self.index.index[row_idx][\"Template\"]\n            else:\n                raise CliTableError(\n                    'No template found for attributes: \"%s\"' % attributes\n                )\n\n        template_files = self._TemplateNamesToFiles(templates)\n\n        try:\n            \n            self.Reset()\n            self._keys = set()\n            self.table = self._ParseCmdItem(self.raw, template_file=template_files[0])\n\n            \n            for tmplt in template_files[1:]:\n                self.extend(\n                    self._ParseCmdItem(self.raw, template_file=tmplt), set(self._keys)\n                )\n        finally:\n            for f in template_files:\n                f.close()", "docstring": "Creates a TextTable table of values from cmd_input string.\nParses command output with template/s. If more than one template is found\nsubsequent tables are merged if keys match (dropped otherwise).\nArgs:\ncmd_input: String, Device/command response.\nattributes: Dict, attribute that further refine matching template.\ntemplates: String list of templates to parse with. If None, uses index\nRaises:\nCliTableError: A template was not found for the given command.", "source": "juraj-google-style"}
{"code": "def is_global(self):\n    return ((not ((self.network_address in IPv4Network('100.64.0.0/10')) and (self.broadcast_address in IPv4Network('100.64.0.0/10')))) and (not self.is_private))", "docstring": "Test if this address is allocated for public networks.\n\nReturns:\nA boolean, True if the address is not reserved per\niana-ipv4-special-registry.", "source": "codesearchnet"}
{"code": "def _query(self, query_type, query_str, verbose=False):\n        \n        \n        cached = self.query_cache.get(query_str)\n        if cached:\n            if verbose:\n                print('Returning Cached VT Query Results')\n            return cached\n\n        \n        if query_type == 'file':\n            response = requests.get('https:\n                                    params={'apikey': self.apikey, 'resource': query_str, 'allinfo': 1})\n        else:\n            response = requests.post('https:\n                                     params={'apikey': self.apikey, 'resource': query_str, 'allinfo': 1})\n\n        \n        try:\n            vt_output = response.json()\n        except ValueError:\n            error_msg = 'VirusTotal no valid response, throttling and trying again...'\n            if self.throttle:\n                if verbose:\n                    print(error_msg)\n                time.sleep(30)\n                return self._query(query_type, query_str)\n\n            return {'vt_error': error_msg}\n\n        \n        if not vt_output or vt_output['response_code'] == 0:\n            output = {'query': query_str, 'not_found': True}\n            self.query_cache.set(query_str, output)\n            return output\n\n        \n        output = {field: vt_output[field] for field in vt_output.keys() if field not in self.exclude}\n\n        \n        output['query'] = query_str\n\n        \n        scan_results = collections.Counter()\n        for scan in vt_output['scans'].values():\n            if 'result' in scan:\n                if scan['result']:\n                    scan_results[scan['result']] += 1\n        output['scan_results'] = scan_results.most_common(5)\n\n        \n        self.query_cache.set(query_str, output)\n\n        \n        return output", "docstring": "Internal query method for the VirusTotal Service\nArgs:\nquery_type(str): The type of query (either 'file' or 'url')\nquery_str (str): The file hash or domain/url to be queried", "source": "juraj-google-style"}
{"code": "def zeros(shape, dtype=None, **kwargs):\n    \n    data = np.zeros(shape, dtype)\n    return dc.array(data, **kwargs)", "docstring": "Create an array of given shape and type, filled with zeros.\n\nArgs:\nshape (sequence of ints): 2D shape of the array.\ndtype (data-type, optional): Desired data-type for the array.\nkwargs (optional): Other arguments of the array (*coords, attrs, and name).\n\nReturns:\narray (decode.array): Decode array filled with zeros.", "source": "juraj-google-style"}
{"code": "def GetCredential(self, path_spec, identifier):\n    credentials = self._credentials_per_path_spec.get(path_spec.comparable, {})\n    return credentials.get(identifier, None)", "docstring": "Retrieves a specific credential from the key chain.\n\nArgs:\npath_spec (PathSpec): path specification.\nidentifier (str): credential identifier.\n\nReturns:\nobject: credential or None if the credential for the path specification\nis not set.", "source": "codesearchnet"}
{"code": "def init_cache(self, batch_size, max_length):\n    input_ids = jnp.ones((batch_size, max_length), dtype='i4')\n    attention_mask = jnp.ones_like(input_ids, dtype='i4')\n    position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)\n    init_variables = self.module.init(jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True)\n    return unfreeze(init_variables['cache'])", "docstring": "Args:\nbatch_size (`int`):\nbatch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.\nmax_length (`int`):\nmaximum possible length for auto-regressive decoding. Defines the sequence length of the initialized\ncache.", "source": "github-repos"}
{"code": "def update(self, domain, type_name, search_command, body):\n        \n        return self._request(domain, type_name, search_command, 'PUT', body)", "docstring": "Update entry in ThreatConnect Data Store\n\nArgs:\ndomain (string): One of 'local', 'organization', or 'system'.\ntype_name (string): This is a free form index type name. The ThreatConnect API will use\nthis resource verbatim.\nsearch_command (string): Search command to pass to ES.\nbody (str): JSON body", "source": "juraj-google-style"}
{"code": "def key_validation_check(tweet_keys_list, superset_keys, minset_keys):\n    tweet_keys = set(tweet_keys_list)\n    minset_overlap = (tweet_keys & minset_keys)\n    if (minset_overlap != minset_keys):\n        raise UnexpectedFormatError('keys ({}) missing from Tweet (Public API data is not supported)'.format((minset_keys - tweet_keys)))\n    unexpected_keys = (tweet_keys - superset_keys)\n    if (len(unexpected_keys) > 0):\n        raise UnexpectedFormatError('Unexpected keys ({}) are in this Tweet'.format(unexpected_keys))\n    return 0", "docstring": "Validates the keys present in a Tweet.\n\nArgs:\ntweet_keys_list (list): the keys present in a tweet\nsuperset_keys (set): the set of all possible keys for a tweet\nminset_keys (set): the set of minimal keys expected in a tweet.\n\nReturns:\n0 if no errors\n\nRaises:\nUnexpectedFormatError on any mismatch of keys.", "source": "codesearchnet"}
{"code": "def add(self, coro, *args, **kw):\n    if asyncio.iscoroutinefunction(coro):\n        coro = coro(*args, **kw)\n    if (not asyncio.iscoroutine(coro)):\n        raise TypeError('paco: coro must be a coroutine object')\n    index = max(len(self.pool), 0)\n    task = Task(index, coro)\n    self.pool.append(task)\n    return coro", "docstring": "Adds a new coroutine function with optional variadic argumetns.\n\nArguments:\ncoro (coroutine function): coroutine to execute.\n*args (mixed): optional variadic arguments\n\nRaises:\nTypeError: if the coro object is not a valid coroutine\n\nReturns:\nfuture: coroutine wrapped future", "source": "codesearchnet"}
{"code": "def charge_balance(model):\n    \n\n    compound_charge = {}\n    for compound in model.compounds:\n        if compound.charge is not None:\n            compound_charge[compound.id] = compound.charge\n\n    for reaction in model.reactions:\n        charge = reaction_charge(reaction.equation, compound_charge)\n        yield reaction, charge", "docstring": "Calculate the overall charge for all reactions in the model.\n\nYield (reaction, charge) pairs.\n\nArgs:\nmodel: :class:`psamm.datasource.native.NativeModel`.", "source": "juraj-google-style"}
{"code": "def clip_range(nodes1, nodes2):\n    (coeff_a, coeff_b, coeff_c, d_min, d_max) = compute_fat_line(nodes1)\n    (_, num_nodes2) = nodes2.shape\n    polynomial = np.empty((2, num_nodes2), order='F')\n    denominator = float((num_nodes2 - 1))\n    for index in six.moves.xrange(num_nodes2):\n        polynomial[(0, index)] = (index / denominator)\n        polynomial[(1, index)] = (((coeff_a * nodes2[(0, index)]) + (coeff_b * nodes2[(1, index)])) + coeff_c)\n    start_bottom = np.asfortranarray([0.0, d_min])\n    end_bottom = np.asfortranarray([1.0, d_min])\n    start_top = np.asfortranarray([0.0, d_max])\n    end_top = np.asfortranarray([1.0, d_max])\n    s_min = DEFAULT_S_MIN\n    s_max = DEFAULT_S_MAX\n    for start_index in six.moves.xrange((num_nodes2 - 1)):\n        for end_index in six.moves.xrange((start_index + 1), num_nodes2):\n            (s_min, s_max) = _update_parameters(s_min, s_max, start_bottom, end_bottom, polynomial[(:, start_index)], polynomial[(:, end_index)])\n            (s_min, s_max) = _update_parameters(s_min, s_max, start_top, end_top, polynomial[(:, start_index)], polynomial[(:, end_index)])\n    return _check_parameter_range(s_min, s_max)", "docstring": "r\"\"\"Reduce the parameter range where two curves can intersect.\n\nDoes so by using the \"fat line\" for ``nodes1`` and computing the\ndistance polynomial against ``nodes2``.\n\n.. note::\n\nThis assumes, but does not check that the curves being considered\nwill only have one intersection in the parameter ranges\n:math:`s \\in \\left[0, 1\\right]`, :math:`t \\in \\left[0, 1\\right]`.\nThis assumption is based on the fact that B |eacute| zier clipping\nis meant to be used to find tangent intersections for already\nsubdivided (i.e. sufficiently zoomed in) curve segments.\n\nArgs:\nnodes1 (numpy.ndarray): ``2 x N1`` array of nodes in a curve which\nwill define the clipping region.\nnodes2 (numpy.ndarray): ``2 x N2`` array of nodes in a curve which\nwill be clipped.\n\nReturns:\nTuple[float, float]: The pair of\n\n* The start parameter of the clipped range.\n* The end parameter of the clipped range.", "source": "codesearchnet"}
{"code": "def from_file(cls, filename, *, strict=True):\n    config = cls()\n    config.load_from_file(filename, strict=strict)\n    return config", "docstring": "Create a new Config object from a configuration file.\n\nArgs:\nfilename (str): The location and name of the configuration file.\nstrict (bool): If true raises a ConfigLoadError when the configuration\ncannot be found.\n\nReturns:\nAn instance of the Config class.\n\nRaises:\nConfigLoadError: If the configuration cannot be found.", "source": "codesearchnet"}
{"code": "def MakeSimpleProtoClass(fields, full_name=None, pool=None):\n  \n  factory = message_factory.MessageFactory(pool=pool)\n\n  if full_name is not None:\n    try:\n      proto_cls = _GetMessageFromFactory(factory, full_name)\n      return proto_cls\n    except KeyError:\n      \n      pass\n\n  \n  \n  \n  field_items = fields.items()\n  if not isinstance(fields, OrderedDict):\n    field_items = sorted(field_items)\n\n  \n  \n  fields_hash = hashlib.sha1()\n  for f_name, f_type in field_items:\n    fields_hash.update(f_name.encode('utf-8'))\n    fields_hash.update(str(f_type).encode('utf-8'))\n  proto_file_name = fields_hash.hexdigest() + '.proto'\n\n  \n  if full_name is None:\n    full_name = ('net.proto2.python.public.proto_builder.AnonymousProto_' +\n                 fields_hash.hexdigest())\n    try:\n      proto_cls = _GetMessageFromFactory(factory, full_name)\n      return proto_cls\n    except KeyError:\n      \n      pass\n\n  \n  factory.pool.Add(\n      _MakeFileDescriptorProto(proto_file_name, full_name, field_items))\n  return _GetMessageFromFactory(factory, full_name)", "docstring": "Create a Protobuf class whose fields are basic types.\n\nNote: this doesn't validate field names!\n\nArgs:\nfields: dict of {name: field_type} mappings for each field in the proto. If\nthis is an OrderedDict the order will be maintained, otherwise the\nfields will be sorted by name.\nfull_name: optional str, the fully-qualified name of the proto type.\npool: optional DescriptorPool instance.\nReturns:\na class, the new protobuf class with a FileDescriptor.", "source": "juraj-google-style"}
{"code": "def _get_example_from_properties(self, spec):\n    local_spec = deepcopy(spec)\n    additional_property = False\n    if ('additionalProperties' in local_spec):\n        additional_property = True\n        if ('properties' not in local_spec):\n            local_spec['properties'] = {}\n        local_spec['properties'].update({'any_prop1': local_spec['additionalProperties'], 'any_prop2': local_spec['additionalProperties']})\n        del local_spec['additionalProperties']\n        required = local_spec.get('required', [])\n        required += ['any_prop1', 'any_prop2']\n        local_spec['required'] = required\n    example = {}\n    properties = local_spec.get('properties')\n    if (properties is not None):\n        required = local_spec.get('required', properties.keys())\n        for (inner_name, inner_spec) in properties.items():\n            if (inner_name not in required):\n                continue\n            partial = self.get_example_from_prop_spec(inner_spec)\n            if isinstance(partial, list):\n                partial = partial[0]\n            example[inner_name] = partial\n    return (example, additional_property)", "docstring": "Get example from the properties of an object defined inline.\n\nArgs:\nprop_spec: property specification you want an example of.\n\nReturns:\nAn example for the given spec\nA boolean, whether we had additionalProperties in the spec, or not", "source": "codesearchnet"}
{"code": "def _get_suffix(path):\n    suffix = os.path.basename(path).split('.')[(- 1)]\n    if ('/' in suffix):\n        raise UserWarning((\"Filename can't contain '/' in suffix (%s)!\" % path))\n    return suffix", "docstring": "Return suffix from `path`.\n\n``/home/xex/somefile.txt`` --> ``txt``.\n\nArgs:\npath (str): Full file path.\n\nReturns:\nstr: Suffix.\n\nRaises:\nUserWarning: When ``/`` is detected in suffix.", "source": "codesearchnet"}
{"code": "def write_string_to_file(filename, file_content):\n    with FileIO(filename, mode='w') as f:\n        f.write(file_content)", "docstring": "Writes a string to a given file.\n\nArgs:\nfilename: string, path to a file\nfile_content: string, contents that need to be written to the file\n\nRaises:\nerrors.OpError: If there are errors during the operation.", "source": "github-repos"}
{"code": "def baredoc(obj):\n    \n    doc = getdoc(obj)\n    if not doc:\n        return ''\n    doc = doc.splitlines()[0]\n    return doc.rstrip(' .').lstrip()", "docstring": "Return the first line of the docstring of an object.\n\nTrailing periods and spaces as well as leading spaces are removed from the\noutput.\n\nArgs:\nobj: any Python object.\nReturns:\nstr: the first line of the docstring of obj.", "source": "juraj-google-style"}
{"code": "def add_field(self, fieldname, fieldspec=whoosh_module_fields.TEXT):\n    self._whoosh.add_field(fieldname, fieldspec)\n    return self._whoosh.schema", "docstring": "Add a field in the index of the model.\n\nArgs:\nfieldname (Text): This parameters register a new field in specified model.\nfieldspec (Name, optional): This option adds various options as were described before.\n\nReturns:\nTYPE: The new schema after deleted is returned.", "source": "codesearchnet"}
{"code": "def create_projection(self, fov: float = 75.0, near: float = 1.0, far: float = 100.0, aspect_ratio: float = None):\n        \n        return matrix44.create_perspective_projection_matrix(\n            fov,\n            aspect_ratio or self.window.aspect_ratio,\n            near,\n            far,\n            dtype='f4',\n        )", "docstring": "Create a projection matrix with the following parameters.\nWhen ``aspect_ratio`` is not provided the configured aspect\nratio for the window will be used.\n\nArgs:\nfov (float): Field of view (float)\nnear (float): Camera near value\nfar (float): Camrea far value\n\nKeyword Args:\naspect_ratio (float): Aspect ratio of the viewport\n\nReturns:\nThe projection matrix as a float32 :py:class:`numpy.array`", "source": "juraj-google-style"}
{"code": "def _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2):\n    cand1_f = tf.to_float(cand1)\n    cand2_f = tf.to_float(cand2)\n    step_size = (cand2_f - cand1_f)\n    fpart = ((x - cand1_f) / step_size)\n    ret = tf.where(tf.greater(fpart, noise), cand2, cand1)\n    return ret", "docstring": "Round-off x to cand1 or to cand2 in an unbiased way.\n\nCand1 and cand2 are the same shape as x.\nFor every element of x, the corresponding elements of cand1 and cand2 should\nbe the two closest bfloat16 values to x.  Order does not matter.\ncand1 and cand2 must differ from each other.\n\nArgs:\nx: A float32 Tensor.\nnoise: A Tensor broadcastable to the shape of x containing\nrandom uniform values in [0.0, 1.0].\ncand1: A bfloat16 Tensor the same shape as x.\ncand2: A bfloat16 Tensor the same shape as x.\n\nReturns:\nA bfloat16 Tensor.", "source": "codesearchnet"}
{"code": "def protorpc_to_endpoints_error(self, status, body):\n    try:\n        rpc_error = self.__PROTOJSON.decode_message(remote.RpcStatus, body)\n    except (ValueError, messages.ValidationError):\n        rpc_error = remote.RpcStatus()\n    if (rpc_error.state == remote.RpcStatus.State.APPLICATION_ERROR):\n        error_class = _ERROR_NAME_MAP.get(rpc_error.error_name)\n        if error_class:\n            (status, body) = self.__write_error(error_class.http_status, rpc_error.error_message)\n    return (status, body)", "docstring": "Convert a ProtoRPC error to the format expected by Google Endpoints.\n\nIf the body does not contain an ProtoRPC message in state APPLICATION_ERROR\nthe status and body will be returned unchanged.\n\nArgs:\nstatus: HTTP status of the response from the backend\nbody: JSON-encoded error in format expected by Endpoints frontend.\n\nReturns:\nTuple of (http status, body)", "source": "codesearchnet"}
{"code": "def optimize(objective_function, domain,\n             stopping_condition, parameters=None,\n             position_update=functions.std_position,\n             velocity_update=functions.std_velocity,\n             parameter_update=functions.std_parameter_update,\n             measurements=(),\n             measurer=dictionary_based_metrics):\n    \n    params = __init_parameters__(parameters)\n\n    rng = np.random.RandomState(params['seed'])\n\n    initial_swarm = [functions.initialize_particle(rng, domain,\n                                                   objective_function)\n                     for i in range(params['swarm_size'])]\n    state = types.PSOState(rng, params, iterations=0, swarm=initial_swarm)\n\n    topology_function = state.params['topology']\n    update_fitness = functions.update_fitness\n    update_particle = functions.update_particle\n\n    results, measure = measurer(measurements)\n    while not stopping_condition(state):\n        n_bests = topology_function(state)\n\n        state = state._replace(swarm=[update_particle(position_update,\n                                                      velocity_update,\n                                                      state, n_bests, ip)\n                                      for ip in enumerate(state.swarm)])\n\n        state = state._replace(swarm=[update_fitness(objective_function,\n                                                     particle)\n                                      for particle in state.swarm],\n                               iterations=state.iterations + 1)\n\n        state = parameter_update(state, objective_function)\n\n        results = measure(results, state)\n\n    return functions.solution(state.swarm), results", "docstring": "Perform particle swarm optimization of the given fitness function.\nArgs:\nobjective_function: the cost function to optimize.\nstopping_condition: function specifying the stopping condition.\nparameters: dictionary: parameter dictionary for the PSO.\n\nReturns:\ncipy.algorithms.pso.Particle: The global best particle.", "source": "juraj-google-style"}
{"code": "def get_metric_by_name(self, metric_name, **kwargs):\n    return self._get_object_by_name(self._METRIC_ENDPOINT_SUFFIX, metric_name, **kwargs)", "docstring": "get a metric by name\n\nArgs:\nmetric_name (string): name of metric\n\nReturns:\ndictionary of response", "source": "codesearchnet"}
{"code": "def load(self, response):\n    self._response = response\n    if self.next_location(raw=True):\n        self._num_redirects += 1", "docstring": "Load the response and increment the counter.\n\nArgs:\nresponse (:class:`.http.request.Response`): The response from\na previous request.", "source": "codesearchnet"}
{"code": "def load_delivery_report(adapter: MongoAdapter, report_path: str, case_id: str, update: bool=False):\n    case_obj = adapter.case(case_id=case_id)\n    if (case_obj is None):\n        raise DataNotFoundError('no case found')\n    if (not case_obj.get('delivery_report')):\n        _put_report_in_case_root(case_obj, report_path)\n    elif update:\n        _put_report_in_case_root(case_obj, report_path)\n    else:\n        raise IntegrityError('Existing delivery report found, use update = True to overwrite')\n    logger.info('Saving report for case {} in database'.format(case_obj['_id']))\n    return adapter.replace_case(case_obj)", "docstring": "Load a delivery report into a case in the database\n\nIf the report already exists the function will exit.\nIf the user want to load a report that is already in the database\n'update' has to be 'True'\n\nArgs:\nadapter     (MongoAdapter): Connection to the database\nreport_path (string):       Path to delivery report\ncase_id     (string):       Optional case identifier\nupdate      (bool):         If an existing report should be replaced\n\nReturns:\nupdated_case(dict)", "source": "codesearchnet"}
{"code": "def save_forensic_reports_to_kafka(self, forensic_reports, forensic_topic):\n        \n        if type(forensic_reports) == dict:\n            forensic_reports = [forensic_reports]\n\n        if len(forensic_reports) < 1:\n            return\n\n        try:\n            logger.debug(\"Saving forensic reports to Kafka\")\n            self.producer.send(forensic_topic, forensic_reports)\n        except UnknownTopicOrPartitionError:\n            raise KafkaError(\n                \"Kafka error: Unknown topic or partition on broker\")\n        except Exception as e:\n            raise KafkaError(\n                \"Kafka error: {0}\".format(e.__str__()))\n        try:\n            self.producer.flush()\n        except Exception as e:\n            raise KafkaError(\n                \"Kafka error: {0}\".format(e.__str__()))", "docstring": "Saves forensic DMARC reports to Kafka, sends individual\nrecords (slices) since Kafka requires messages to be <= 1MB\nby default.\n\nArgs:\nforensic_reports (list):  A list of forensic report dicts\nto save to Kafka\nforensic_topic (str): The name of the Kafka topic", "source": "juraj-google-style"}
{"code": "def WriteStatEntries(stat_entries, client_id, mutation_pool, token=None):\n    for stat_response in stat_entries:\n        if stat_response.pathspec.last.stream_name:\n            stat_response.st_mode &= (~ stat_type_mask)\n            stat_response.st_mode |= stat.S_IFREG\n    if data_store.AFF4Enabled():\n        for stat_entry in stat_entries:\n            CreateAFF4Object(stat_entry, client_id_urn=rdf_client.ClientURN(client_id), mutation_pool=mutation_pool, token=token)\n    if data_store.RelationalDBEnabled():\n        path_infos = [rdf_objects.PathInfo.FromStatEntry(s) for s in stat_entries]\n        data_store.REL_DB.WritePathInfos(client_id, _FilterOutPathInfoDuplicates(path_infos))", "docstring": "Persists information about stat entries.\n\nArgs:\nstat_entries: A list of `StatEntry` instances.\nclient_id: An id of a client the stat entries come from.\nmutation_pool: A mutation pool used for writing into the AFF4 data store.\ntoken: A token used for writing into the AFF4 data store.", "source": "codesearchnet"}
{"code": "def get_session(db_url):\n        \n        \n        engine = create_engine(db_url, poolclass=NullPool, echo=False)\n        Session = sessionmaker(bind=engine)\n        Base.metadata.create_all(engine)\n        return Session()", "docstring": "Gets SQLAlchemy session given url. Your tables must inherit\nfrom Base in hdx.utilities.database.\n\nArgs:\ndb_url (str): SQLAlchemy url\n\nReturns:\nsqlalchemy.orm.session.Session: SQLAlchemy session", "source": "juraj-google-style"}
{"code": "def get_ip_prefixes_from_config(config, services, ip_version):\n    ip_prefixes = set()\n    for service in services:\n        ip_prefix = ipaddress.ip_network(config.get(service, 'ip_prefix'))\n        if (ip_prefix.version == ip_version):\n            ip_prefixes.add(ip_prefix.with_prefixlen)\n    return ip_prefixes", "docstring": "Build a set of IP prefixes found in service configuration files.\n\nArguments:\nconfig (obg): A configparser object which holds our configuration.\nservices (list): A list of section names which are the name of the\nservice checks.\nip_version (int): IP protocol version\n\nReturns:\nA set of IP prefixes.", "source": "codesearchnet"}
{"code": "def as_graph(self, depth=0):\n        \n        if depth in self._graph_cache:\n            return self._graph_cache[depth]\n        self._graph_cache[depth] = graph = Graph(self, depth=depth)\n        return graph", "docstring": "Create a graph with self as node, cache it, return it.\n\nArgs:\ndepth (int): depth of the graph.\n\nReturns:\nGraph: an instance of Graph.", "source": "juraj-google-style"}
{"code": "def _launch_cli(self):\n    self._register_this_run_info(self._run_cli)\n    response = self._run_cli.run_ui(init_command=self._init_command, title=self._title, title_color=self._title_color)\n    return response", "docstring": "Launch the interactive command-line interface.\n\nReturns:\nThe OnRunStartResponse specified by the user using the \"run\" command.", "source": "github-repos"}
{"code": "def _wait_after(provider, job_ids, poll_interval, stop_on_failure):\n    job_ids_to_check = {j for j in job_ids if (j != dsub_util.NO_JOB)}\n    error_messages = []\n    while (job_ids_to_check and ((not error_messages) or (not stop_on_failure))):\n        print(('Waiting for: %s.' % ', '.join(job_ids_to_check)))\n        jobs_left = _wait_for_any_job(provider, job_ids_to_check, poll_interval)\n        jobs_completed = job_ids_to_check.difference(jobs_left)\n        tasks_completed = provider.lookup_job_tasks({'*'}, job_ids=jobs_completed)\n        dominant_job_tasks = _dominant_task_for_jobs(tasks_completed)\n        if (len(dominant_job_tasks) != len(jobs_completed)):\n            jobs_found = dsub_util.tasks_to_job_ids(dominant_job_tasks)\n            jobs_not_found = jobs_completed.difference(jobs_found)\n            for j in jobs_not_found:\n                error = ('%s: not found' % j)\n                print_error(('  %s' % error))\n                error_messages += [error]\n        for t in dominant_job_tasks:\n            job_id = t.get_field('job-id')\n            status = t.get_field('task-status')\n            print(('  %s: %s' % (str(job_id), str(status))))\n            if (status in ['FAILURE', 'CANCELED']):\n                error_messages += [provider.get_tasks_completion_messages([t])]\n        job_ids_to_check = jobs_left\n    return error_messages", "docstring": "Print status info as we wait for those jobs.\n\nBlocks until either all of the listed jobs succeed,\nor one of them fails.\n\nArgs:\nprovider: job service provider\njob_ids: a set of job IDs (string) to wait for\npoll_interval: integer seconds to wait between iterations\nstop_on_failure: whether to stop waiting if one of the tasks fails.\n\nReturns:\nEmpty list if there was no error,\na list of error messages from the failed tasks otherwise.", "source": "codesearchnet"}
{"code": "def last_updated(self, path):\n    return self._gcsIO().last_updated(path)", "docstring": "Get UNIX Epoch time in seconds on the FileSystem.\n\nArgs:\npath: string path of file.\n\nReturns: float UNIX Epoch time\n\nRaises:\n``BeamIOError``: if path doesn't exist.", "source": "github-repos"}
{"code": "def _HasDuplicateRegistryKeyPaths(\n      self, filename, artifact_definition, source):\n    \n    result = False\n    intersection = self._artifact_registry_key_paths.intersection(\n        set(source.keys))\n    if intersection:\n      duplicate_key_paths = '\\n'.join(intersection)\n      logging.warning((\n          'Artifact definition: {0:s} in file: {1:s} has duplicate '\n          'Registry key paths:\\n{2:s}').format(\n              artifact_definition.name, filename, duplicate_key_paths))\n      result = True\n\n    self._artifact_registry_key_paths.update(source.keys)\n    return result", "docstring": "Checks if Registry key paths are not already defined by other artifacts.\n\nNote that at the moment this function will only find exact duplicate\nRegistry key paths.\n\nArgs:\nfilename (str): name of the artifacts definition file.\nartifact_definition (ArtifactDefinition): artifact definition.\nsource (SourceType): source definition.\n\nReturns:\nbool: True if the Registry key paths defined by the source type\nare used in other artifacts.", "source": "juraj-google-style"}
{"code": "def scored_to_phenotype(self,phenotypes):\n        \n        def _apply_score(scored_calls,phenotypes):\n            present = sorted(list(set(phenotypes)&set(scored_calls.keys())))\n            total = sum([scored_calls[x] for x in present])\n            if total > 1: \n                raise ValueError(\"You cant extract phenotypes from scores if they are not mutually exclusive\")\n            if total == 0: return np.nan\n            for label in present:\n                if scored_calls[label] == 1: return label\n            raise ValueError(\"Should have hit an exit criteria already\")\n        output = self.copy()\n        output['phenotype_label'] = output.apply(lambda x: _apply_score(x['scored_calls'],phenotypes),1)\n        \n        output['phenotype_calls'] = output.apply(lambda x: \n            dict([(y,1 if x['phenotype_label']==y else 0) for y in phenotypes])\n        ,1)\n        return output", "docstring": "Convert binary pehnotypes to mutually exclusive phenotypes.\nIf none of the phenotypes are set, then phenotype_label becomes nan\nIf any of the phenotypes are multiply set then it throws a fatal error.\n\nArgs:\nphenotypes (list): a list of scored_names to convert to phenotypes\n\nReturns:\nCellDataFrame", "source": "juraj-google-style"}
{"code": "def export(self, name=None):\n    with tf.name_scope(name or '%s_lookup_table_export' % self._name):\n        keys, values = gen_simple_hash_table_op.examples_simple_hash_table_export(self.resource_handle, key_dtype=self._key_dtype, value_dtype=self._value_dtype)\n        return (keys, values)", "docstring": "Export all `key` and `value` pairs.\n\nArgs:\nname: A name for the operation (optional).\n\nReturns:\nA tuple of two tensors, the first with the `keys` and the second with\nthe `values`.", "source": "github-repos"}
{"code": "def transform_content(tags, content_transformer):\n    \n    if type(tags) not in [tuple, list]:\n        tags = [tags]\n\n    for tag in tags:\n        new_child = dhtmlparser.HTMLElement(content_transformer(tag))\n\n        \n        if hasattr(tag, \"parent\"):\n            new_child.parent = tag\n\n        tag.childs = [new_child]", "docstring": "Transform content in all `tags` using result of `content_transformer(tag)`\ncall.\n\nArgs:\ntags (obj/list): HTMLElement instance, or list of HTMLElement\ninstances.\ncontent_transformer (function): Function which is called as\n``content_transformer(tag)``.", "source": "juraj-google-style"}
{"code": "def _parse_hextet(self, hextet_str):\n        \n        \n        if not self._HEX_DIGITS.issuperset(hextet_str):\n            raise ValueError\n        if len(hextet_str) > 4:\n          raise ValueError\n        hextet_int = int(hextet_str, 16)\n        if hextet_int > 0xFFFF:\n            raise ValueError\n        return hextet_int", "docstring": "Convert an IPv6 hextet string into an integer.\n\nArgs:\nhextet_str: A string, the number to parse.\n\nReturns:\nThe hextet as an integer.\n\nRaises:\nValueError: if the input isn't strictly a hex number from [0..FFFF].", "source": "juraj-google-style"}
{"code": "def AsDict(self, dt=True):\n        \n        data = {}\n\n        if self.sharekey:\n            data['sharekey'] = self.sharekey\n        if self.name:\n            data['name'] = self.name\n        if self.user:\n            data['user'] = self.user.AsDict()\n        if self.title:\n            data['title'] = self.title\n        if self.description:\n            data['description'] = self.description\n        if self.posted_at:\n            if dt:\n                data['posted_at'] = self.posted_at\n            else:\n                data['posted_at'] = self.posted_at_iso\n        if self.permalink:\n            data['permalink'] = self.permalink\n        if self.width:\n            data['width'] = self.width\n        if self.height:\n            data['height'] = self.height\n        if self.image_url:\n            data['image_url'] = self.image_url\n        if self.source_url:\n            data['source_url'] = self.source_url\n        data['views'] = self.views\n        data['likes'] = self.likes\n        data['saves'] = self.saves\n        data['comments'] = self.comments\n        data['nsfw'] = self.nsfw\n        data['saved'] = self.saved\n        data['liked'] = self.liked\n\n        return data", "docstring": "A dict representation of this Shake instance.\n\nThe return value uses the same key names as the JSON representation.\n\nArgs:\ndt (bool): If True, return dates as python datetime objects. If\nFalse, return dates as ISO strings.\n\nReturn:\nA dict representing this Shake instance", "source": "juraj-google-style"}
{"code": "def from_json_str(cls, json_str: str, primitive_cls: Type[message.Message], context: Context) -> 'PrimitiveWrapper':", "docstring": "Serializes json_str into an instance of primitive_cls and wraps.\n\nArgs:\njson_str: The string-representation of the raw json_value to serialize\ninto primitive_cls and wrap.\nprimitive_cls: The FHIR primitive class to serialize into and wrap.\ncontext: Related primitive information to use for printing/parsing a\nwrapped primitive.\n\nReturns:\nAn instance of PrimitiveWrapper.", "source": "github-repos"}
{"code": "def does_attribute_meet_condition(self, attribute, conditions):\n        \n        if conditions is None or len(conditions) == 0:\n            return True\n\n        for attribute_name, attribute_value in conditions.items():\n            value = getattr(attribute, attribute_name, False)\n            if value != attribute_value and bool(value) != attribute_value:\n                return False\n\n        return True", "docstring": "Check if the attribute meet all the given conditions\n\nArgs:\nattribute: the attribute information\nconditions: a dictionary of condition to match\n\nReturns:\nTrue if the attribute match all conditions. False otherwise", "source": "juraj-google-style"}
{"code": "def _repr_to_list(value: torch.Tensor):\n    torch.set_printoptions(sci_mode=True, linewidth=120)\n    with StringIO() as buf, redirect_stdout(buf):\n        print(value)\n        raw = buf.getvalue()\n    return _sanitize_repr_for_diff(raw).splitlines()", "docstring": "Converts a tensor into a sanitized multi-line string representation.\n\nArgs:\nvalue (`torch.Tensor`): The tensor to represent.\n\nReturns:\n`List[str]`: List of string lines representing the tensor.", "source": "github-repos"}
{"code": "def open_if_needed(self, mode=None):\n        \n        was_open = self.is_open()\n\n        if not was_open:\n            self.open(mode=mode)\n\n        try:\n            yield self\n        finally:\n            if not was_open:\n                self.close()", "docstring": "Convenience context-manager for the use with ``with``.\nOpens the container if not already done.\nOnly closes the container if it was opened within this context.\n\nArgs:\nmode (str): Either 'r' for read-only, 'w' for truncate and write or\n'a' for append. (default: 'a').\nIf ``None``, uses ``self.mode``.", "source": "juraj-google-style"}
{"code": "def get_field(proto: message.Message, fields: FieldTypes) -> tuple[Any, Optional[descriptor.FieldDescriptor]]:\n    field_proto = proto\n    field_desc = None\n    for field_proto, field_desc, _, _ in _walk_fields(proto, fields):\n        pass\n    return (field_proto, field_desc)", "docstring": "Returns the field and field descriptor from the proto.\n\nArgs:\nproto: Parent proto of any message type.\nfields: List of string/int/map key fields, e.g. [\"nodes\", \"attr\", \"value\"]\ncan represent `proto.nodes.attr[\"value\"]`.\n\nReturns:\nTuple of (\nField in the proto or `None` if none are found,\nField descriptor\n)", "source": "github-repos"}
{"code": "def get(cls, blob_key, **ctx_options):\n    \n    fut = cls.get_async(blob_key, **ctx_options)\n    return fut.get_result()", "docstring": "Retrieve a BlobInfo by key.\n\nArgs:\nblob_key: A blob key.  This may be a str, unicode or BlobKey instance.\n**ctx_options: Context options for Model().get_by_id().\n\nReturns:\nA BlobInfo entity associated with the provided key,  If there was\nno such entity, returns None.", "source": "juraj-google-style"}
{"code": "def mark_experimental(fn):\n\n    @wraps(fn)\n    def wrapper(*args, **kw):\n        from peltak.core import shell\n        if shell.is_tty:\n            warnings.warn('This command is has experimental status. The interface is not yet stable and might change without notice within with a patch version update. Use at your own risk')\n        return fn(*args, **kw)\n    return wrapper", "docstring": "Mark function as experimental.\n\nArgs:\nfn (FunctionType):\nThe command function to decorate.", "source": "codesearchnet"}
{"code": "def _create_record_internal(self, rtype, name, content, identifier=None):\n    name = (self._relative_name(name) if (name is not None) else name)\n    LOGGER.debug('Creating record with name %s', name)\n    if self._is_duplicate_record(rtype, name, content):\n        return True\n    data = self._get_post_data_to_create_dns_entry(rtype, name, content, identifier)\n    LOGGER.debug('Create DNS data: %s', data)\n    create_response = self.session.post(self.URLS['dns_create_entry'].format(self.domain_id), data=data)\n    self._invalidate_records_cache()\n    self._log('Create DNS entry', create_response)\n    was_success = (len(self._list_records(rtype, name, content)) > 0)\n    if was_success:\n        msg = 'Successfully added record %s'\n    else:\n        msg = 'Failed to add record %s'\n    LOGGER.info(msg, name)\n    return was_success", "docstring": "Create a new DNS entry in the domain zone if it does not already exist.\n\nArgs:\nrtype (str): The DNS type (e.g. A, TXT, MX, etc) of the new entry.\nname (str): The name of the new DNS entry, e.g the domain for which a\nMX entry shall be valid.\ncontent (str): The content of the new DNS entry, e.g. the mail server\nhostname for a MX entry.\n[identifier] (str): The easyname id of a DNS entry. Use to overwrite an\nexisting entry.\n\nReturns:\nbool: True if the record was created successfully, False otherwise.", "source": "codesearchnet"}
{"code": "def create_output(self, key, value, variable_type=None):\n        \n        results = None\n        if key is not None:\n            key = key.strip()\n            key_type = '{}-{}'.format(key, variable_type)\n            if self.out_variables_type.get(key_type) is not None:\n                \n                v = self.out_variables_type.get(key_type)\n                self.tcex.log.info(\n                    u'Variable {} was requested by downstream app.'.format(v.get('variable'))\n                )\n                if value is not None:\n                    results = self.create(v.get('variable'), value)\n                else:\n                    self.tcex.log.info(\n                        u'Variable {} has a none value and will not be written.'.format(key)\n                    )\n            elif self.out_variables.get(key) is not None and variable_type is None:\n                \n                v = self.out_variables.get(key)\n                self.tcex.log.info(\n                    u'Variable {} was requested by downstream app.'.format(v.get('variable'))\n                )\n                if value is not None:\n                    results = self.create(v.get('variable'), value)\n                else:\n                    self.tcex.log.info(\n                        u'Variable {} has a none value and will not be written.'.format(\n                            v.get('variable')\n                        )\n                    )\n            else:\n                var_value = key\n                if variable_type is not None:\n                    var_value = key_type\n                self.tcex.log.info(\n                    u'Variable {} was NOT requested by downstream app.'.format(var_value)\n                )\n        return results", "docstring": "Wrapper for Create method of CRUD operation for working with KeyValue DB.\n\nThis method will automatically check to see if provided variable was requested by\na downstream app and if so create the data in the KeyValue DB.\n\nArgs:\nkey (string): The variable to write to the DB.\nvalue (any): The data to write to the DB.\nvariable_type (string): The variable type being written.\n\nReturns:\n(string): Result string of DB write.", "source": "juraj-google-style"}
{"code": "def port_tag_details(cls, tags):\n        \n        \n        for tag in tags:\n            match = port_tag_re.match(tag)\n            if match:\n                source_sink, port, extra = match.groups()\n                return source_sink == \"source\", cls(port), extra", "docstring": "Search tags for port info, returning it\n\nArgs:\ntags: A list of tags to check\n\nReturns:\nNone or (is_source, port, connected_value|disconnected_value)\nwhere port is one of the Enum entries of Port", "source": "juraj-google-style"}
{"code": "def topological_sort(data):\n    \n\n    def check_self_dependencies(input_data):\n        \n        for k, v in input_data.items():\n            if k in v:\n                raise ValueError('Self-dependency, {} depends on itself.'.format(k))\n\n    def prepare_input_data(input_data):\n        \n        return {k: set(v) for k, v in input_data.items()}\n\n    def find_items_without_dependencies(input_data):\n        \n        return list(reduce(set.union, input_data.values()) - set(input_data.keys()))\n\n    def add_empty_dependencies(data):\n        items_without_dependencies = find_items_without_dependencies(data)\n        data.update({item: set() for item in items_without_dependencies})\n\n    def get_sorted(input_data):\n        data = input_data\n        while True:\n            ordered = set(item for item, dep in data.items() if len(dep) == 0)\n            if not ordered:\n                break\n            yield ordered\n            data = {item: (dep - ordered) for item, dep in data.items() if item not in ordered}\n\n        if len(data) != 0:\n            raise ValueError('Cyclic dependencies exist '\n                             'among these items: {}'.format(', '.join(repr(x) for x in data.items())))\n\n    check_self_dependencies(data)\n\n    if not len(data):\n        return []\n\n    data_copy = prepare_input_data(data)\n    add_empty_dependencies(data_copy)\n\n    result = []\n    for d in get_sorted(data_copy):\n        try:\n            d = sorted(d)\n        except TypeError:\n            d = list(d)\n\n        result.extend(d)\n    return result", "docstring": "Topological sort the given dictionary structure.\n\nArgs:\ndata (dict); dictionary structure where the value is a list of dependencies for that given key.\nFor example: ``{'a': (), 'b': ('a',)}``, where ``a`` depends on nothing and ``b`` depends on ``a``.\n\nReturns:\ntuple: the dependencies in constructor order", "source": "juraj-google-style"}
{"code": "async def get_tournaments(self, subdomain: str = None, force_update: bool = False) -> list:\n        \n        if self.tournaments is None:\n            force_update = True\n            self._subdomains_searched.append('' if subdomain is None else subdomain)\n        elif subdomain is None and '' not in self._subdomains_searched:\n            force_update = True\n            self._subdomains_searched.append('')\n        elif subdomain is not None and subdomain not in self._subdomains_searched:\n            force_update = True\n            self._subdomains_searched.append(subdomain)\n\n        if force_update:\n            params = {\n                'include_participants': 1 if AUTO_GET_PARTICIPANTS else 0,\n                'include_matches': 1 if AUTO_GET_MATCHES else 0\n            }\n            if subdomain is not None:\n                params['subdomain'] = subdomain\n\n            res = await self.connection('GET', 'tournaments', **params)\n            if len(res) == 0:\n                self.tournaments = []\n            else:\n                for t_data in res:\n                    self._refresh_tournament_from_json(t_data)\n\n        return self.tournaments", "docstring": "gets all user's tournaments\n\n|methcoro|\n\nArgs:\nsubdomain: *optional* subdomain needs to be given explicitely to get tournaments in a subdomain\nforce_update: *optional* set to True to force the data update from Challonge\n\nReturns:\nlist[Tournament]: list of all the user tournaments\n\nRaises:\nAPIException", "source": "juraj-google-style"}
{"code": "def set_hostname(self, value=None, default=False, disable=False):\n        \n        cmd = self.command_builder('hostname', value=value, default=default,\n                                   disable=disable)\n        return self.configure(cmd)", "docstring": "Configures the global system hostname setting\n\nEosVersion:\n4.13.7M\n\nArgs:\nvalue (str): The hostname value\ndefault (bool): Controls use of the default keyword\ndisable (bool): Controls the use of the no keyword\n\nReturns:\nbool: True if the commands are completed successfully", "source": "juraj-google-style"}
{"code": "def _insert(self, item, feed_item):\n    return self._api().insert(profileId=self.profile_id, body=item).execute()", "docstring": "Inserts a new item into CM.\n\nArgs:\nitem: The CM object to insert.\nfeed_item: The feed item from the Bulkdozer feed representing the item to\ninsert.\n\nReturns:\nThe CM object representing the item inserted.", "source": "github-repos"}
{"code": "def pull(self, project, run=None, entity=None):\n    (project, run) = self.parse_slug(project, run=run)\n    urls = self.download_urls(project, run, entity)\n    responses = []\n    for fileName in urls:\n        (_, response) = self.download_write_file(urls[fileName])\n        if response:\n            responses.append(response)\n    return responses", "docstring": "Download files from W&B\n\nArgs:\nproject (str): The project to download\nrun (str, optional): The run to upload to\nentity (str, optional): The entity to scope this project to.  Defaults to wandb models\n\nReturns:\nThe requests library response object", "source": "codesearchnet"}
{"code": "def _GetPathSegmentSeparator(self, path):\n    \n    if path.startswith('\\\\') or path[1:].startswith(':\\\\'):\n      return '\\\\'\n\n    if path.startswith('/'):\n      return '/'\n\n    if '/' and '\\\\' in path:\n      \n      forward_count = len(path.split('/'))\n      backward_count = len(path.split('\\\\'))\n\n      if forward_count > backward_count:\n        return '/'\n\n      return '\\\\'\n\n    \n    \n    if '/' in path:\n      return '/'\n\n    return '\\\\'", "docstring": "Given a path give back the path separator as a best guess.\n\nArgs:\npath (str): path.\n\nReturns:\nstr: path segment separator.", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: List[torch.Tensor], patch_height, patch_width) -> List[torch.Tensor]:\n    if not isinstance(hidden_states, (tuple, list)):\n        raise TypeError('hidden_states should be a tuple or list of tensors')\n    if len(hidden_states) != len(self.config.neck_hidden_sizes):\n        raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.')\n    if self.reassemble_stage is not None:\n        hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)\n    features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]\n    output = self.fusion_stage(features)\n    return (output, features[-1])", "docstring": "Args:\nhidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):\nList of hidden states from the backbone.", "source": "github-repos"}
{"code": "def __init__(self, profile_id: str, profile_location: Optional[str]=None, log_results: bool=False, file_copy_fn: Optional[Callable[[str, str], None]]=None, time_prefix: str='%Y-%m-%d_%H_%M_%S-', enable_cpu_profiling: bool=False, enable_memory_profiling: bool=False):\n    self.profile_id = str(profile_id)\n    self.profile_location = profile_location\n    self.log_results = log_results\n    self.file_copy_fn = file_copy_fn or self.default_file_copy_fn\n    self.time_prefix = time_prefix\n    self.enable_cpu_profiling = enable_cpu_profiling\n    self.enable_memory_profiling = enable_memory_profiling", "docstring": "Creates a Profile object.\n\nArgs:\nprofile_id: Unique id of the profiling session.\nprofile_location: The file location where the profiling results will be\nstored.\nlog_results: Log the result to console if true.\nfile_copy_fn: Lambda function for copying files.\ntime_prefix: Format of the timestamp prefix in profiling result files.\nenable_cpu_profiling: CPU profiler will be enabled during the profiling\nsession.\nenable_memory_profiling: Memory profiler will be enabled during the\nprofiling session, the profiler only records the newly allocated objects\nin this session.", "source": "github-repos"}
{"code": "def materialize(self, ref, table_name=None, index_columns=None, logger=None):\n    from ambry.library import Library\n    assert isinstance(self._library, Library)\n    logger.debug('Materializing warehouse partition.\\n    partition: {}'.format(ref))\n    partition = self._library.partition(ref)\n    connection = self._backend._get_connection()\n    return self._backend.install(connection, partition, table_name=table_name, index_columns=index_columns, materialize=True, logger=logger)", "docstring": "Creates materialized table for given partition reference.\n\nArgs:\nref (str): id, vid, name or vname of the partition.\n\nReturns:\nstr: name of the partition table in the database.", "source": "codesearchnet"}
{"code": "def thermal_conductivity(self, temperature, volume):\n        \n        gamma = self.gruneisen_parameter(temperature, volume)\n        theta_d = self.debye_temperature(volume)  \n        theta_a = theta_d * self.natoms**(-1./3.)  \n        prefactor = (0.849 * 3 * 4**(1./3.)) / (20. * np.pi**3)\n        \n        prefactor = prefactor * (self.kb/self.hbar)**3 * self.avg_mass\n        kappa = prefactor / (gamma**2 - 0.514 * gamma + 0.228)\n        \n        \n        kappa = kappa * theta_a**2 * volume**(1./3.) * 1e-10\n        return kappa", "docstring": "Eq(17) in 10.1103/PhysRevB.90.174107\n\nArgs:\ntemperature (float): temperature in K\nvolume (float): in Ang^3\n\nReturns:\nfloat: thermal conductivity in W/K/m", "source": "juraj-google-style"}
{"code": "def visit_invoke_reference(self, identifier: InvokeReferenceNode) -> Any:\n    return self.visit_invoke_expression(identifier)", "docstring": "Allows visitors to implement custom Reference logic.\n\nBy default, calls `visit_invoke_expression`. Subclasses may override this\nmethod to introduce custom logic for handling references.\n\nThis function is called when the 'reference' identifier is invoked against a\nFHIR Reference resource. The visit_invoke_expression function is called for\nall other invocations.\n\nArgs:\nidentifier: The identifier on the right hand side of an invocation.\n\nReturns:\nThe result of the reference invocation.", "source": "github-repos"}
{"code": "def verify_id_token(id_token, audience, http=None, cert_uri=ID_TOKEN_VERIFICATION_CERTS):\n    _require_crypto_or_die()\n    if (http is None):\n        http = transport.get_cached_http()\n    (resp, content) = transport.request(http, cert_uri)\n    if (resp.status == http_client.OK):\n        certs = json.loads(_helpers._from_bytes(content))\n        return crypt.verify_signed_jwt_with_certs(id_token, certs, audience)\n    else:\n        raise VerifyJwtTokenError('Status code: {0}'.format(resp.status))", "docstring": "Verifies a signed JWT id_token.\n\nThis function requires PyOpenSSL and because of that it does not work on\nApp Engine.\n\nArgs:\nid_token: string, A Signed JWT.\naudience: string, The audience 'aud' that the token should be for.\nhttp: httplib2.Http, instance to use to make the HTTP request. Callers\nshould supply an instance that has caching enabled.\ncert_uri: string, URI of the certificates in JSON format to\nverify the JWT against.\n\nReturns:\nThe deserialized JSON in the JWT.\n\nRaises:\noauth2client.crypt.AppIdentityError: if the JWT fails to verify.\nCryptoUnavailableError: if no crypto library is available.", "source": "codesearchnet"}
{"code": "def _get_single_variable(self, name, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, partition_info=None, reuse=None, trainable=None, collections=None, caching_device=None, validate_shape=True, use_resource=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE):\n    initializing_from_value = False\n    if initializer is not None and (not callable(initializer)):\n        initializing_from_value = True\n    if shape is not None and initializing_from_value:\n        raise ValueError('If initializer is a constant, do not specify shape.')\n    dtype = dtypes.as_dtype(dtype)\n    if shape is not None:\n        shape = tensor_shape.as_shape(shape)\n    if name in self._vars:\n        if reuse is False:\n            var = self._vars[name]\n            err_msg = 'Variable %s already exists, disallowed. Did you mean to set reuse=True or reuse=tf.AUTO_REUSE in VarScope?' % name\n            if isinstance(var, resource_variable_ops.ResourceVariable):\n                raise ValueError(err_msg)\n            tb = var.op.traceback[::-1]\n            tb = [x for x in tb if 'tensorflow/python' not in x[0]][:5]\n            raise ValueError('%s Originally defined at:\\n\\n%s' % (err_msg, ''.join(traceback.format_list(tb))))\n        found_var = self._vars[name]\n        if shape is not None and (not shape.is_compatible_with(found_var.get_shape())):\n            raise ValueError('Trying to share variable %s, but specified shape %s and found shape %s.' % (name, shape, found_var.get_shape()))\n        if not dtype.is_compatible_with(found_var.dtype):\n            dtype_str = dtype.name\n            found_type_str = found_var.dtype.name\n            raise ValueError('Trying to share variable %s, but specified dtype %s and found dtype %s.' % (name, dtype_str, found_type_str))\n        return found_var\n    if reuse is True:\n        raise ValueError('Variable %s does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=tf.AUTO_REUSE in VarScope?' % name)\n    if initializer is None:\n        if shape is None:\n            raise ValueError(f'Variable {name} did not get an initializer, so its `shape` argument must be specified.')\n        initializer, initializing_from_value = self._get_default_initializer(name=name, shape=shape, dtype=dtype)\n    with ops.init_scope():\n        if initializing_from_value:\n            init_val = initializer\n            variable_dtype = None\n        else:\n            if tf_inspect.isclass(initializer):\n                initializer = initializer()\n            if shape is not None and shape.is_fully_defined():\n                if 'partition_info' in tf_inspect.getargspec(initializer).args:\n                    init_val = functools.partial(initializer, shape.as_list(), dtype=dtype, partition_info=partition_info)\n                else:\n                    init_val = functools.partial(initializer, shape.as_list(), dtype=dtype)\n                variable_dtype = dtype.base_dtype\n            elif _needs_no_arguments(initializer):\n                init_val = initializer\n                variable_dtype = None\n            else:\n                raise ValueError(\"The initializer passed is not valid. It should be a callable with no arguments and the shape should not be provided or an instance of `tf.keras.initializers.*' and `shape` should be fully defined.\")\n    if use_resource is None:\n        use_resource = resource_variables_toggle.resource_variables_enabled()\n    v = _variable_v1(initial_value=init_val, name=name, trainable=trainable, collections=collections, caching_device=caching_device, dtype=variable_dtype, validate_shape=validate_shape, constraint=constraint, use_resource=use_resource, synchronization=synchronization, aggregation=aggregation, shape=shape)\n    if context.executing_eagerly() and self._store_eager_variables:\n        if collections:\n            ops.add_to_collections(collections, v)\n        else:\n            ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, v)\n        if trainable:\n            ops.add_to_collection(ops.GraphKeys.TRAINABLE_VARIABLES, v)\n    if not context.executing_eagerly() or self._store_eager_variables:\n        self._vars[name] = v\n    logging.vlog(1, 'Created variable %s with shape %s and init %s', v.name, format(shape), initializer)\n    if regularizer:\n\n        def make_regularizer_op():\n            with ops.colocate_with(v):\n                with ops.name_scope(name + '/Regularizer/'):\n                    return regularizer(v)\n        if regularizer(v) is not None:\n            lazy_eval_tensor = _LazyEvalTensor(make_regularizer_op)\n            ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, lazy_eval_tensor)\n    return v", "docstring": "Get or create a single Variable (e.g.\n\na shard or entire variable).\n\nSee the documentation of get_variable above (ignore partitioning components)\nfor details.\n\nArgs:\nname: see get_variable.\nshape: see get_variable.\ndtype: see get_variable.\ninitializer: see get_variable.\nregularizer: see get_variable.\npartition_info: _PartitionInfo object.\nreuse: see get_variable.\ntrainable: see get_variable.\ncollections: see get_variable.\ncaching_device: see get_variable.\nvalidate_shape: see get_variable.\nuse_resource: see get_variable.\nconstraint: see get_variable.\nsynchronization: see get_variable.\naggregation: see get_variable.\n\nReturns:\nA Variable.  See documentation of get_variable above.\n\nRaises:\nValueError: See documentation of get_variable above.", "source": "github-repos"}
{"code": "def generate_lars_path(weighted_data, weighted_labels):\n    x_vector = weighted_data\n    (alphas, _, coefs) = lars_path(x_vector, weighted_labels, method='lasso', verbose=False)\n    return (alphas, coefs)", "docstring": "Generates the lars path for weighted data.\n\nArgs:\nweighted_data: data that has been weighted by kernel\nweighted_label: labels, weighted by kernel\n\nReturns:\n(alphas, coefs), both are arrays corresponding to the\nregularization parameter and coefficients, respectively", "source": "codesearchnet"}
{"code": "def snr(*args, **kwargs):\n    squeeze = False\n    max_length = 0\n    for arg in args:\n        try:\n            length = len(arg)\n            if (length > max_length):\n                max_length = length\n        except TypeError:\n            pass\n    if (max_length == 0):\n        squeeze = True\n    kwargs['length'] = max_length\n    snr_main = SNR(**kwargs)\n    if squeeze:\n        snr_out = snr_main(*args)\n        return {key: np.squeeze(snr_out[key]) for key in snr_out}\n    return snr_main(*args)", "docstring": "Compute the SNR of binaries.\n\nsnr is a function that takes binary parameters and sensitivity curves as inputs,\nand returns snr for chosen phases.\n\nWarning: All binary parameters must be either scalar, len-1 arrays,\nor arrays of the same length. All of these can be used at once. However,\nyou cannot input multiple arrays of different lengths.\n\nArguments:\n*args: Arguments for :meth:`gwsnrcalc.utils.pyphenomd.PhenomDWaveforms.__call__`\n**kwargs: Keyword arguments related to\nparallel generation (see :class:`gwsnrcalc.utils.parallel`),\nwaveforms (see :class:`gwsnrcalc.utils.pyphenomd`),\nor sensitivity information (see :class:`gwsnrcalc.utils.sensitivity`).\n\nReturns:\n(dict or list of dict): Signal-to-Noise Ratio dictionary for requested phases.", "source": "codesearchnet"}
{"code": "def _get_showcase_dataset_dict(self, dataset):\n        \n        \n        if isinstance(dataset, hdx.data.dataset.Dataset) or isinstance(dataset, dict):\n            if 'id' not in dataset:\n                dataset = hdx.data.dataset.Dataset.read_from_hdx(dataset['name'])\n            dataset = dataset['id']\n        elif not isinstance(dataset, str):\n            raise hdx.data.hdxobject.HDXError('Type %s cannot be added as a dataset!' % type(dataset).__name__)\n        if is_valid_uuid(dataset) is False:\n            raise hdx.data.hdxobject.HDXError('%s is not a valid dataset id!' % dataset)\n        return {'showcase_id': self.data['id'], 'package_id': dataset}", "docstring": "Get showcase dataset dict\n\nArgs:\nshowcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary\n\nReturns:\nDict: showcase dataset dict", "source": "juraj-google-style"}
{"code": "def _CheckIsDevice(self, file_entry):\n    \n    if definitions.FILE_ENTRY_TYPE_DEVICE not in self._file_entry_types:\n      return False\n    return file_entry.IsDevice()", "docstring": "Checks the is_device find specification.\n\nArgs:\nfile_entry (FileEntry): file entry.\n\nReturns:\nbool: True if the file entry matches the find specification, False if not.", "source": "juraj-google-style"}
{"code": "def clean_args(self, args, keys):\n    for k in keys:\n        if getattr(args, k) is None:\n            delattr(args, k)", "docstring": "Clean None values out of the arg namespace.\n\nThis lets us check for a config file arg based on whether the None default\nwas overwritten.\n\nArgs:\nargs: an argparse.Namespace.\nkeys: Keys to clean if None", "source": "github-repos"}
{"code": "def transform(self, col):\n        \n        out = pd.DataFrame(index=col.index)\n        out[self.col_name] = col.fillna(self.default_value)\n        out[self.new_name] = (pd.notnull(col) * 1).astype(int)\n        return out", "docstring": "Prepare the transformer to convert data and return the processed table.\n\nArgs:\ncol(pandas.DataFrame): Data to transform.\n\nReturns:\npandas.DataFrame", "source": "juraj-google-style"}
{"code": "def bdp(tickers, flds, **kwargs):\n    \n    logger = logs.get_logger(bdp, level=kwargs.pop('log', logs.LOG_LEVEL))\n    con, _ = create_connection()\n    ovrds = assist.proc_ovrds(**kwargs)\n\n    logger.info(\n        f'loading reference data from Bloomberg:\\n'\n        f'{assist.info_qry(tickers=tickers, flds=flds)}'\n    )\n    data = con.ref(tickers=tickers, flds=flds, ovrds=ovrds)\n    if not kwargs.get('cache', False): return [data]\n\n    qry_data = []\n    for r, snap in data.iterrows():\n        subset = [r]\n        data_file = storage.ref_file(\n            ticker=snap.ticker, fld=snap.field, ext='pkl', **kwargs\n        )\n        if data_file:\n            if not files.exists(data_file): qry_data.append(data.iloc[subset])\n            files.create_folder(data_file, is_file=True)\n            data.iloc[subset].to_pickle(data_file)\n\n    return qry_data", "docstring": "Bloomberg reference data\n\nArgs:\ntickers: tickers\nflds: fields to query\n**kwargs: bbg overrides\n\nReturns:\npd.DataFrame\n\nExamples:\n>>> bdp('IQ US Equity', 'Crncy', raw=True)\nticker  field value\n0  IQ US Equity  Crncy   USD\n>>> bdp('IQ US Equity', 'Crncy').reset_index()\nticker crncy\n0  IQ US Equity   USD", "source": "juraj-google-style"}
{"code": "def load_subclasses(klass, modules=None):\n    if modules:\n        if isinstance(modules, six.string_types):\n            modules = [modules]\n        loader = Loader()\n        loader.load(*modules)\n    return klass.__subclasses__()", "docstring": "Load recursively all all subclasses from a module.\n\nArgs:\nklass (str or list of str): Class whose subclasses we want to load.\nmodules: List of additional modules or module names that should be\nrecursively imported in order to find all the subclasses of the\ndesired class. Default: None\n\nFIXME: This function is kept only for backward compatibility reasons, it\nshould not be used. Deprecation warning should be raised and it should\nbe replaces by the ``Loader`` class.", "source": "codesearchnet"}
{"code": "def relative_humidity(self, value=999):\n        \n        if value is not None:\n            try:\n                value = int(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type int '\n                                 'for field `relative_humidity`'.format(value))\n            if value < 0:\n                raise ValueError('value need to be greater or equal 0 '\n                                 'for field `relative_humidity`')\n            if value > 110:\n                raise ValueError('value need to be smaller 110 '\n                                 'for field `relative_humidity`')\n\n        self._relative_humidity = value", "docstring": "Corresponds to IDD Field `relative_humidity`\n\nArgs:\nvalue (int): value for IDD Field `relative_humidity`\nvalue >= 0\nvalue <= 110\nMissing value: 999\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def to_tensor_list(element_spec, element):\n    return _to_tensor_list_helper(lambda state, spec, component: state + spec._to_tensor_list(component), element_spec, element)", "docstring": "Returns a tensor list representation of the element.\n\nArgs:\nelement_spec: A nested structure of `tf.TypeSpec` objects representing to\nelement type specification.\nelement: The element to convert to tensor list representation.\n\nReturns:\nA tensor list representation of `element`.\n\nRaises:\nValueError: If `element_spec` and `element` do not have the same number of\nelements or if the two structures are not nested in the same way.\nTypeError: If `element_spec` and `element` differ in the type of sequence\nin any of their substructures.", "source": "github-repos"}
{"code": "def write_tarball(voevents, filepath):\n    \n    tuple_gen = ( (v.ivorn, v.xml) for v in voevents)\n    return write_tarball_from_ivorn_xml_tuples(tuple_gen,\n                                               filepath)", "docstring": "Iterate over voevent models / dbrows and write to bz'd tarball.\n\nArgs:\nvoevents (iterable): An iterable (e.g. list) of e.g. Voevent db-rows,\nwith access to the 'ivorn' and 'xml' attributes.\nfilepath (string): Path to the new tarball to create. Typically of form\n'/path/to/foo.tar.bz2'\nReturns\npacket_count (int): Number of packets written to tarball", "source": "juraj-google-style"}
{"code": "def AcceptableMimeType(accept_patterns, mime_type):\n    if ('/' not in mime_type):\n        raise exceptions.InvalidUserInputError(('Invalid MIME type: \"%s\"' % mime_type))\n    unsupported_patterns = [p for p in accept_patterns if (';' in p)]\n    if unsupported_patterns:\n        raise exceptions.GeneratedClientError(('MIME patterns with parameter unsupported: \"%s\"' % ', '.join(unsupported_patterns)))\n\n    def MimeTypeMatches(pattern, mime_type):\n        'Return True iff mime_type is acceptable for pattern.'\n        if (pattern == '*'):\n            pattern = '*/*'\n        return all(((accept in ('*', provided)) for (accept, provided) in zip(pattern.split('/'), mime_type.split('/'))))\n    return any((MimeTypeMatches(pattern, mime_type) for pattern in accept_patterns))", "docstring": "Return True iff mime_type is acceptable for one of accept_patterns.\n\nNote that this function assumes that all patterns in accept_patterns\nwill be simple types of the form \"type/subtype\", where one or both\nof these can be \"*\". We do not support parameters (i.e. \"; q=\") in\npatterns.\n\nArgs:\naccept_patterns: list of acceptable MIME types.\nmime_type: the mime type we would like to match.\n\nReturns:\nWhether or not mime_type matches (at least) one of these patterns.", "source": "codesearchnet"}
{"code": "def get_item_concept_mapping(self, lang):\n        \n        concepts = self.filter(active=True, lang=lang)\n        return group_keys_by_value_lists(Concept.objects.get_concept_item_mapping(concepts, lang))", "docstring": "Get mapping of items_ids to concepts containing these items\n\nArgs:\nlang (str): language of concepts\n\nReturns:\ndict: item (int) -> set of concepts (int)", "source": "juraj-google-style"}
{"code": "def get_lonlatalts(self):\n    band = self.filehandle\n    ((xpoints, ypoints), (gcp_lons, gcp_lats, gcp_alts), (gcps, crs)) = self.get_gcps()\n    longitudes = interpolate_xarray(xpoints, ypoints, gcp_lons, band.shape)\n    latitudes = interpolate_xarray(xpoints, ypoints, gcp_lats, band.shape)\n    altitudes = interpolate_xarray(xpoints, ypoints, gcp_alts, band.shape)\n    longitudes.attrs['gcps'] = gcps\n    longitudes.attrs['crs'] = crs\n    latitudes.attrs['gcps'] = gcps\n    latitudes.attrs['crs'] = crs\n    altitudes.attrs['gcps'] = gcps\n    altitudes.attrs['crs'] = crs\n    return (longitudes, latitudes, altitudes)", "docstring": "Obtain GCPs and construct latitude and longitude arrays.\n\nArgs:\nband (gdal band): Measurement band which comes with GCP's\narray_shape (tuple) : The size of the data array\nReturns:\ncoordinates (tuple): A tuple with longitude and latitude arrays", "source": "codesearchnet"}
{"code": "def get(self):\n    chunk_size = self._smallest_buffer()\n    all_full = self._all_full()\n    if all_full:\n        right_context = 0\n        num_frames = (chunk_size - self.current_left_context)\n    else:\n        right_context = self.right_context\n        num_frames = self.min_frames\n    chunk_size_needed = ((num_frames + self.current_left_context) + right_context)\n    if (chunk_size >= chunk_size_needed):\n        data = []\n        keep_frames = (self.left_context + self.right_context)\n        keep_from = max(0, (chunk_size - keep_frames))\n        for index in range(self.num_buffers):\n            data.append(self.buffers[index][:chunk_size])\n            self.buffers[index] = self.buffers[index][keep_from:]\n        if (self.num_buffers == 1):\n            data = data[0]\n        chunk = Chunk(data, self.current_frame, all_full, self.current_left_context, right_context)\n        self.current_left_context = min(self.left_context, chunk_size)\n        self.current_frame = max(((self.current_frame + chunk_size) - keep_frames), 0)\n        return chunk", "docstring": "Get a new chunk if available.\n\nReturns:\nChunk or list: If enough frames are available a chunk is returned. Otherwise None.\nIf ``self.num_buffer >= 1`` a list instead of single chunk is returned.", "source": "codesearchnet"}
{"code": "def MapFile(self, key_path_prefix, registry_file):\n    \n    self._registry_files[key_path_prefix.upper()] = registry_file\n    registry_file.SetKeyPathPrefix(key_path_prefix)", "docstring": "Maps the Windows Registry file to a specific key path prefix.\n\nArgs:\nkey_path_prefix (str): key path prefix.\nregistry_file (WinRegistryFile): Windows Registry file.", "source": "juraj-google-style"}
{"code": "def milliseconds(value: Union[int, float]) -> Duration:\n    return float(value / 1000)", "docstring": "Converts input value from milliseconds to a `Duration` in seconds.\n\nExample:\n```python\n>>> duration = tp.duration.milliseconds(250)\n>>> duration\n0.25\n\n>>> # Usage in a window operation\n>>> a = tp.event_set(\n...     timestamps=[0.5, 1.0, 1.2],\n...     features={\"f1\": [1, 5, -5]}\n... )\n>>> a.moving_sum(window_length=duration)\nindexes: ...\ntimestamps: [0.5 1.  1.2]\n'f1': [1 5 0]\n...\n\n```\n\nArgs:\nvalue: Number of milliseconds.\n\nReturns:\nEquivalent number of seconds.", "source": "github-repos"}
{"code": "def assert_visible(self, selector, testid=None, **kwargs):\n    self.info_log(('Assert visible selector(%s) testid(%s)' % (selector, testid)))\n    highlight = kwargs.get('highlight', BROME_CONFIG['highlight']['highlight_on_assertion_success'])\n    self.debug_log(('effective highlight: %s' % highlight))\n    wait_until_visible = kwargs.get('wait_until_visible', BROME_CONFIG['proxy_driver']['wait_until_visible_before_assert_visible'])\n    self.debug_log(('effective wait_until_visible: %s' % wait_until_visible))\n    if wait_until_visible:\n        self.wait_until_visible(selector, raise_exception=False)\n    element = self.find(selector, raise_exception=False, wait_until_visible=False, wait_until_present=False)\n    if (element and element.is_displayed(raise_exception=False)):\n        if highlight:\n            element.highlight(style=BROME_CONFIG['highlight']['style_on_assertion_success'])\n        if (testid is not None):\n            self.create_test_result(testid, True)\n        return True\n    else:\n        if (testid is not None):\n            self.create_test_result(testid, False)\n        return False", "docstring": "Assert that the element is visible in the dom\n\nArgs:\nselector (str): the selector used to find the element\ntestid (str): the test_id or a str\n\nKwargs:\nwait_until_visible (bool)\nhighlight (bool)\n\nReturns:\nbool: True is the assertion succeed; False otherwise.", "source": "codesearchnet"}
{"code": "def price(self, valuation_date, market, model=None, pricing_context=None, name=None):\n    del model, pricing_context\n    name = name or self._name + '_price'\n    with tf.name_scope(name):\n        discount_curve = market.discount_curve\n        discount_factors = discount_curve.get_discount_factor(self._payment_dates)\n        future_cashflows = tf.cast(self._payment_dates >= valuation_date, dtype=self._dtype)\n        cashflow_pvs = self._notional * (future_cashflows * self._daycount_fractions * self._coupon_rate * discount_factors)\n        return tf.math.reduce_sum(tf.reshape(cashflow_pvs, (self._batch_size, self._num_cashflows)), axis=1)", "docstring": "Returns the present value of the stream on the valuation date.\n\nArgs:\nvaluation_date: A scalar `DateTensor` specifying the date on which\nvaluation is being desired.\nmarket: A namedtuple of type `InterestRateMarket` which contains the\nnecessary information for pricing the cashflow stream.\nmodel: Reserved for future use.\npricing_context: Additional context relevant for pricing.\nname: Python str. The name to give to the ops created by this function.\nDefault value: `None` which maps to 'price'.\n\nReturns:\nA Rank 1 `Tensor` of real type containing the modeled price of each stream\nbased on the input market data.", "source": "github-repos"}
{"code": "def find_equivalent_sites(self, site):\n        \n        for sites in self.equivalent_sites:\n            if site in sites:\n                return sites\n\n        raise ValueError(\"Site not in structure\")", "docstring": "Finds all symmetrically equivalent sites for a particular site\n\nArgs:\nsite (PeriodicSite): A site in the structure\n\nReturns:\n([PeriodicSite]): List of all symmetrically equivalent sites.", "source": "juraj-google-style"}
{"code": "def watch(self, enable=True, gpsd_protocol=PROTOCOL, devicepath=None):\n        \n        \n        command = '?WATCH={{\"enable\":true,\"{0}\":true}}'.format(gpsd_protocol)\n\n        if gpsd_protocol == 'rare':  \n            command = command.replace('\"rare\":true', '\"raw\":1')\n        if gpsd_protocol == 'raw':  \n            command = command.replace('\"raw\":true', '\"raw\",2')\n        if not enable:\n            command = command.replace('true', 'false')  \n        if devicepath:\n            command = command.replace('}', ',\"device\":\"') + devicepath + '\"}'\n\n        return self.send(command)", "docstring": "watch gpsd in various gpsd_protocols or devices.\nArguments:\nenable: (bool) stream data to socket\ngpsd_protocol: (str) 'json' | 'nmea' | 'rare' | 'raw' | 'scaled' | 'split24' | 'pps'\ndevicepath: (str) device path - '/dev/ttyUSBn' for some number n or '/dev/whatever_works'\nReturns:\ncommand: (str) e.g., '?WATCH={\"enable\":true,\"json\":true};'", "source": "juraj-google-style"}
{"code": "def _start_profiler(self, logdir):\n    if self._profiler_started:\n        return\n    try:\n        profiler.start(logdir=logdir)\n        self._profiler_started = True\n    except errors.AlreadyExistsError as e:\n        logging.error('Failed to start profiler: %s', e.message)", "docstring": "Starts the profiler if currently inactive.\n\nArgs:\nlogdir: Directory where profiler results will be saved.", "source": "github-repos"}
{"code": "def get_internal_urls(self):\n    internal_urls = self.get_subfields('856', 'u', i1='4', i2='0')\n    internal_urls.extend(self.get_subfields('998', 'a'))\n    internal_urls.extend(self.get_subfields('URL', 'u'))\n    return map((lambda x: x.replace('&amp;', '&')), internal_urls)", "docstring": "URL's, which may point to edeposit, aleph, kramerius and so on.\n\nFields ``856u40``, ``998a`` and ``URLu``.\n\nReturns:\nlist: List of internal URLs.", "source": "codesearchnet"}
{"code": "def kill_raylet(self, check_alive=True):\n    self._kill_process_type(ray_constants.PROCESS_TYPE_RAYLET, check_alive=check_alive)", "docstring": "Kill the raylet.\n\nArgs:\ncheck_alive (bool): Raise an exception if the process was already\ndead.", "source": "codesearchnet"}
{"code": "def _extract_inner_match(self, candidate, offset):\n        \n        for possible_inner_match in _INNER_MATCHES:\n            group_match = possible_inner_match.search(candidate)\n            is_first_match = True\n            while group_match and self._max_tries > 0:\n                if is_first_match:\n                    \n                    group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN,\n                                                         candidate[:group_match.start()])\n                    match = self._parse_and_verify(group, offset)\n                    if match is not None:\n                        return match\n                    self._max_tries -= 1\n                    is_first_match = False\n                group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN,\n                                                     group_match.group(1))\n                match = self._parse_and_verify(group, offset + group_match.start(1))\n                if match is not None:\n                    return match\n                self._max_tries -= 1\n                group_match = possible_inner_match.search(candidate, group_match.start() + 1)\n        return None", "docstring": "Attempts to extract a match from candidate if the whole candidate\ndoes not qualify as a match.\n\nArguments:\ncandidate -- The candidate text that might contain a phone number\noffset -- The current offset of candidate within text\nReturns the match found, None if none can be found", "source": "juraj-google-style"}
{"code": "def _ImportAll(self, module):\n    aliases = []\n    getattrs = set()\n    ast = self._module_map[module]\n    type_param_names = set()\n    if module == 'http.client':\n        exports = None\n    else:\n        exports = [x for x in ast.constants if x.name.endswith('.__all__')]\n        if exports:\n            exports = exports[0].value\n    for member in sum((ast.constants, ast.type_params, ast.classes, ast.functions, ast.aliases), ()):\n        _, _, member_name = member.name.rpartition('.')\n        if exports and member_name not in exports:\n            continue\n        new_name = self._ModulePrefix() + member_name\n        if isinstance(member, pytd.Function) and member_name == '__getattr__':\n            getattrs.add(member.Replace(name=new_name))\n        else:\n            if isinstance(member, pytd.TypeParameter):\n                type_param_names.add(new_name)\n            elif new_name in type_param_names:\n                continue\n            if member_name.startswith('_'):\n                continue\n            t = pytd.ToType(member, allow_constants=True, allow_functions=True)\n            aliases.append(pytd.Alias(new_name, t))\n    return (aliases, getattrs)", "docstring": "Get the new members that would result from a star import of the module.\n\nArgs:\nmodule: The module name.\n\nReturns:\nA tuple of:\n- a list of new aliases,\n- a set of new __getattr__ functions.", "source": "github-repos"}
{"code": "def visit_and_get_function_nodes(self, definition, first_node):\n    len_before_visiting_func = len(self.nodes)\n    previous_node = self.nodes[(- 1)]\n    entry_node = self.append_node(EntryOrExitNode(('Function Entry ' + definition.name)))\n    if (not first_node):\n        first_node = entry_node\n    self.connect_if_allowed(previous_node, entry_node)\n    function_body_connect_statements = self.stmt_star_handler(definition.node.body)\n    entry_node.connect(function_body_connect_statements.first_statement)\n    exit_node = self.append_node(EntryOrExitNode(('Exit ' + definition.name)))\n    exit_node.connect_predecessors(function_body_connect_statements.last_statements)\n    the_new_nodes = self.nodes[len_before_visiting_func:]\n    return_connection_handler(the_new_nodes, exit_node)\n    return (the_new_nodes, first_node)", "docstring": "Visits the nodes of a user defined function.\n\nArgs:\ndefinition(LocalModuleDefinition): Definition of the function being added.\nfirst_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function.\n\nReturns:\nthe_new_nodes(list[Node]): The nodes added while visiting the function.\nfirst_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function.", "source": "codesearchnet"}
{"code": "def save_driver_logs(driver, prefix):\n    \n    browser_name = os.environ.get('SELENIUM_BROWSER', 'firefox')\n    log_dir = os.environ.get('SELENIUM_DRIVER_LOG_DIR')\n    if not log_dir:\n        LOGGER.warning('The SELENIUM_DRIVER_LOG_DIR environment variable was not set; not saving logs')\n        return\n    elif not os.path.exists(log_dir):\n        os.makedirs(log_dir)\n    if browser_name == 'firefox':\n        \n        \n        \n        log_path = os.path.join(os.getcwd(), 'geckodriver.log')\n        if os.path.exists(log_path):\n            dest_path = os.path.join(log_dir, '{}_geckodriver.log'.format(prefix))\n            copyfile(log_path, dest_path)\n        return\n\n    log_types = driver.log_types\n    for log_type in log_types:\n        try:\n            log = driver.get_log(log_type)\n            file_name = os.path.join(\n                log_dir, '{}_{}.log'.format(prefix, log_type)\n            )\n            with open(file_name, 'w') as output_file:\n                for line in log:\n                    output_file.write(\"{}{}\".format(dumps(line), '\\n'))\n        except:  \n            msg = (\n                u\"Could not save browser log of type '{log_type}'. \"\n                u\"It may be that the browser does not support it.\"\n            ).format(log_type=log_type)\n\n            LOGGER.warning(msg, exc_info=True)", "docstring": "Save the selenium driver logs.\n\nThe location of the driver log files can be configured\nby the environment variable `SELENIUM_DRIVER_LOG_DIR`.  If not set,\nthis defaults to the current working directory.\n\nArgs:\ndriver (selenium.webdriver): The Selenium-controlled browser.\nprefix (str): A prefix which will be used in the output file names for the logs.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _scalar(tf_fn, x, promote_to_float=False):\n    x = np_array_ops.asarray(x)\n    if promote_to_float and (not np.issubdtype(x.dtype.as_numpy_dtype, np.inexact)):\n        x = x.astype(np_utils.result_type(float))\n    return tf_fn(x)", "docstring": "Computes the tf_fn(x) for each element in `x`.\n\nArgs:\ntf_fn: function that takes a single Tensor argument.\nx: array_like. Could be an ndarray, a Tensor or any object that can be\nconverted to a Tensor using `ops.convert_to_tensor`.\npromote_to_float: whether to cast the argument to a float dtype if it is not\nalready.\n\nReturns:\nAn ndarray with the same shape as `x`. The default output dtype is\ndetermined by `np_utils.result_type(float)`, unless x is an ndarray with a\nfloating point type, in which case the output type is same as x.dtype.", "source": "github-repos"}
{"code": "def gates_to_idx(gates, qregs):\n    sizes = [qr.size for qr in qregs.values()]\n    reg_idx = np.cumsum(([0] + sizes))\n    regint = {}\n    for (ind, qreg) in enumerate(qregs.values()):\n        regint[qreg] = ind\n    out = np.zeros((2 * len(gates)), dtype=np.int32)\n    for (idx, gate) in enumerate(gates):\n        out[(2 * idx)] = (reg_idx[regint[gate[0][0]]] + gate[0][1])\n        out[((2 * idx) + 1)] = (reg_idx[regint[gate[1][0]]] + gate[1][1])\n    return out", "docstring": "Converts gate tuples into a nested list of integers.\n\nArgs:\ngates (list): List of (QuantumRegister, int) pairs\nrepresenting gates.\nqregs (dict): List of )QuantumRegister, int) tuples.\n\nReturns:\nlist: Nested list of integers for gates.", "source": "codesearchnet"}
{"code": "def to_sql(cls, qc, **kwargs):\n        \n        \n        \n        \n        \n        \n\n        empty_df = qc.head(1).to_pandas().head(0)\n        empty_df.to_sql(**kwargs)\n        \n        kwargs[\"if_exists\"] = \"append\"\n        columns = qc.columns\n\n        def func(df, **kwargs):\n            df.columns = columns\n            df.to_sql(**kwargs)\n            return pandas.DataFrame()\n\n        map_func = qc._prepare_method(func, **kwargs)\n        result = qc._map_across_full_axis(1, map_func)\n        \n        result.to_pandas()", "docstring": "Write records stored in a DataFrame to a SQL database.\nArgs:\nqc: the query compiler of the DF that we want to run to_sql on\nkwargs: parameters for pandas.to_sql(**kwargs)", "source": "juraj-google-style"}
{"code": "def from_keras_log(csv_path, output_dir_path, **kwargs):\n    \n    \n    data = pd.read_csv(csv_path, sep=None, engine='python')\n    _from_keras_log_format(data, output_dir_path=output_dir_path, **kwargs)", "docstring": "Plot accuracy and loss from a Keras CSV log.\n\nArgs:\ncsv_path: The path to the CSV log with the actual data.\noutput_dir_path: The path to the directory where the resultings plots\nshould end up.", "source": "juraj-google-style"}
{"code": "def series_with_slh(self, other):\n        \n        new_S = self.S * other.S\n        new_L = self.S * other.L + self.L\n\n        def ImAdjoint(m):\n            return (m.H - m) * (I / 2)\n\n        delta = ImAdjoint(self.L.adjoint() * self.S * other.L)\n\n        if isinstance(delta, Matrix):\n            new_H = self.H + other.H + delta[0, 0]\n        else:\n            assert delta == 0\n            new_H = self.H + other.H\n\n        return SLH(new_S, new_L, new_H)", "docstring": "Series product with another :class:`SLH` object\n\nArgs:\nother (SLH): An upstream SLH circuit.\n\nReturns:\nSLH: The combined system.", "source": "juraj-google-style"}
{"code": "def _sequence_search(self, start: GridQubit, current: List[GridQubit]) -> List[GridQubit]:\n    used = set(current)\n    seq = []\n    n = start\n    while (n is not None):\n        seq.append(n)\n        used.add(n)\n        n = self._choose_next_qubit(n, used)\n    return seq", "docstring": "Search for the continuous linear sequence from the given qubit.\n\nThis method is called twice for the same starting qubit, so that\nsequences that begin and end on this qubit are searched for.\n\nArgs:\nstart: The first qubit, where search should be trigerred from.\ncurrent: Previously found linear sequence, which qubits are\nforbidden to use during the search.\n\nReturns:\nContinuous linear sequence that begins with the starting qubit and\ndoes not contain any qubits from the current list.", "source": "codesearchnet"}
{"code": "def set_heat_pump_mode(self, device_label, mode):\n        \n        response = None\n        try:\n            response = requests.put(\n                urls.set_heatpump_state(self._giid, device_label),\n                headers={\n                    'Accept': 'application/json',\n                    'Content-Type': 'application/json',\n                    'Cookie': 'vid={}'.format(self._vid)},\n                data=json.dumps({'mode': mode}))\n        except requests.exceptions.RequestException as ex:\n            raise RequestError(ex)\n        _validate_response(response)\n        return json.loads(response.text)", "docstring": "Set heatpump mode\nArgs:\nmode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'", "source": "juraj-google-style"}
{"code": "def __call__(self, fn):\n        \n\n        def completion(app, *args, **kwargs):\n            \n            app.exit_message = self.msg\n            return fn(app, *args, **kwargs)\n\n        return completion", "docstring": "Implement __call__ function for decorator.\n\nArgs:\nfn (function): The decorated function.\n\nReturns:\nfunction: The custom decorator function.", "source": "juraj-google-style"}
{"code": "def _check_arg_equality(node_a, node_b, attr_name):\n    \n    return getattr(node_a, attr_name) == getattr(node_b, attr_name)", "docstring": "Check equality of nodes based on the comparison of their attributes named attr_name.\n\nArgs:\nnode_a (astroid.node): first node to compare.\nnode_b (astroid.node): second node to compare.\nattr_name (str): name of the nodes attribute to use for comparison.\n\nReturns:\nbool: True if node_a.attr_name == node_b.attr_name, False otherwise.", "source": "juraj-google-style"}
{"code": "def start_publishing(mysql_settings, **kwargs):\n    \n    _logger.info('Start publishing from %s with:\\n%s'\n                 % (mysql_settings, kwargs))\n\n    kwargs.setdefault('server_id', random.randint(1000000000, 4294967295))\n    kwargs.setdefault('freeze_schema', True)\n\n    \n    stream = pymysqlreplication.BinLogStreamReader(\n        mysql_settings,\n        only_events=[row_event.DeleteRowsEvent,\n                     row_event.UpdateRowsEvent,\n                     row_event.WriteRowsEvent],\n        **kwargs\n    )\n    \n\n    for event in stream:\n        \n        if not isinstance(event, row_event.RowsEvent):\n            continue\n\n        _logger.debug('Send binlog signal \"%s@%s.%s\"' % (\n            event.__class__.__name__,\n            event.schema,\n            event.table\n        ))\n        signals.binlog_signal.send(event, stream=stream)\n        signals.binlog_position_signal.send((stream.log_file, stream.log_pos))", "docstring": "Start publishing MySQL row-based binlog events to blinker signals\n\nArgs:\nmysql_settings (dict): information to connect to mysql via pymysql\n**kwargs: The additional kwargs will be passed to\n:py:class:`pymysqlreplication.BinLogStreamReader`.", "source": "juraj-google-style"}
{"code": "def __call__(self, dumper: 'Dumper', data: Any) -> yaml.MappingNode:\n        \n        \n        logger.info('Representing {} of class {}'.format(\n            data, self.class_.__name__))\n\n        \n        represented = dumper.represent_str(data.name)\n\n        \n        snode = Node(represented)\n        if hasattr(self.class_, 'yatiml_sweeten'):\n            self.class_.yatiml_sweeten(snode)\n            represented = snode.yaml_node\n\n        logger.debug('End representing {}'.format(data))\n        return represented", "docstring": "Represents the class as a ScalarNode.\n\nArgs:\ndumper: The dumper to use.\ndata: The user-defined object to dump.\n\nReturns:\nA yaml.Node representing the object.", "source": "juraj-google-style"}
{"code": "def fit2d(samples, e_x, e_y, remove_zeros=False, p_est=None, **kw):\n    if (p_est is None):\n        height = (len(e_y) - 1)\n        width = (len(e_x) - 1)\n        (p_est, _) = np.histogramdd(samples, (e_x, e_y))\n    else:\n        p_est = p_est.T\n        (width, height) = p_est.shape\n    shape = p_est.shape\n    p_est = (p_est / sum(p_est.flat)).reshape(shape)\n    mx = p_est.sum(1)\n    my = p_est.sum(0)\n    p_est = p_est.T.flatten()\n    (basis, knots) = spline_base2d(width, height, marginal_x=mx, marginal_y=my, **kw)\n    model = linear_model.BayesianRidge()\n    if remove_zeros:\n        non_zero = (~ (p_est == 0))\n        model.fit(basis[(:, non_zero)].T, p_est[non_zero])\n    else:\n        non_zero = (p_est >= 0)\n        p_est[((~ non_zero), :)] = np.finfo(float).eps\n        model.fit(basis.T, p_est)\n    return (model.predict(basis.T).reshape((height, width)), p_est.reshape((height, width)), knots)", "docstring": "Fits a 2D distribution with splines.\n\nInput:\nsamples: Matrix or list of arrays\nIf matrix, it must be of size Nx2, where N is the number of\nobservations. If list, it must contain two arrays of length\nN.\ne_x: Array\nEdges that define the events in the probability\ndistribution along the x direction. For example,\ne_x[0] < samples[0] <= e_x[1] picks out all\nsamples that are associated with the first event.\ne_y: Array\nSee e_x, but for the y direction.\nremove_zeros: Bool\nIf True, events that are not observed will not\nbe part of the fitting process. If False, those\nevents will be modelled as finfo('float').eps\n**kw: Arguments that are passed on to spline_bse1d.\n\nReturns:\ndistribution: Array\nAn array that gives an estimate of probability for\nevents defined by e.\nknots: Tuple of arrays\nSequence of knots that were used for the spline basis (x,y)", "source": "codesearchnet"}
{"code": "def pad_to_best_fit(images: 'torch.Tensor', target_size: Tuple[int, int], background_color: Union[int, Tuple[int, int, int]]=0) -> 'torch.Tensor':\n    num_channels = images.shape[1] if len(images.shape) == 4 else images.shape[0]\n    if isinstance(background_color, int):\n        background_color = [background_color] + [0] * (num_channels - 1)\n    elif len(background_color) != num_channels:\n        raise ValueError(f'background_color must have no more than {num_channels} elements to match the number of channels')\n    height, width = images.shape[-2:]\n    target_height, target_width = target_size\n    paste_x_right = target_width - width\n    paste_y_right = target_height - height\n    padded_images = F.pad(images, padding=[0, 0, paste_x_right, paste_y_right], fill=background_color)\n    return padded_images", "docstring": "Pads an image to fit the target size.\n\nArgs:\nimages (`np.ndarray`):\nThe images to pad.\nbackground_color (`int` or `Tuple[int, int, int]`, *optional*, defaults to 0):\nThe color to use for the padding. Can be an integer for single channel or a\ntuple of integers representing for multi-channel images. If passed as integer\nin mutli-channel mode, it will default to `0` in subsequent channels.\nReturns:\n`torch.Tensor`: The padded images.", "source": "github-repos"}
{"code": "def ismount(self, path):\n        \n        path = make_string_path(path)\n        if not path:\n            return False\n        normed_path = self.filesystem.absnormpath(path)\n        sep = self.filesystem._path_separator(path)\n        if self.filesystem.is_windows_fs:\n            if self.filesystem.alternative_path_separator is not None:\n                path_seps = (\n                    sep, self.filesystem._alternative_path_separator(path)\n                )\n            else:\n                path_seps = (sep, )\n            drive, rest = self.filesystem.splitdrive(normed_path)\n            if drive and drive[:1] in path_seps:\n                return (not rest) or (rest in path_seps)\n            if rest in path_seps:\n                return True\n        for mount_point in self.filesystem.mount_points:\n            if normed_path.rstrip(sep) == mount_point.rstrip(sep):\n                return True\n        return False", "docstring": "Return true if the given path is a mount point.\n\nArgs:\npath: Path to filesystem object to be checked\n\nReturns:\n`True` if path is a mount point added to the fake file system.\nUnder Windows also returns True for drive and UNC roots\n(independent of their existence).", "source": "juraj-google-style"}
{"code": "def Open(self, file_object, ascii_codepage='cp1252'):\n    \n    registry_file = dfwinreg_regf.REGFWinRegistryFile(\n        ascii_codepage=ascii_codepage)\n\n    \n    \n    registry_file.Open(file_object)\n\n    return registry_file", "docstring": "Opens a Windows Registry file-like object.\n\nArgs:\nfile_object (dfvfs.FileIO): Windows Registry file-like object.\nascii_codepage (Optional[str]): ASCII string codepage.\n\nReturns:\nWinRegistryFile: Windows Registry file or None.", "source": "juraj-google-style"}
{"code": "def test_encode_with_backbone_element_constraint_succeeds(self, fhir_path_expression: str, expected_sql_expression: str, expected_fhir_path_sql_expression: str, expected_fields_referenced: List[str]):\n    constraint = self.build_constraint(fhir_path_expression=fhir_path_expression)\n    self.assert_constraint_is_equal_to_expression(base_id='Patient', element_definition_id='Patient.contact.name', constraint=constraint, expected_sql_expression=expected_sql_expression, expected_fhir_path_sql_expression=expected_fhir_path_sql_expression, expected_fields_referenced=expected_fields_referenced)", "docstring": "Tests encoding of a \"transitive constraint\" defined on a BackboneElement.\n\nA \"transitive constraint\" is a constraint defined relative to a resource\nelsewhere in the FHIR resource graph than what we're querying against.\n\nArgs:\nfhir_path_expression: The FHIRPath expression to encode.\nexpected_sql_expression: The expected generated Standard SQL from v1.\nexpected_fhir_path_sql_expression: The expected generated Standard SQL\nwithout any contextual subqueries.\nexpected_fields_referenced: The expected fields_referenced_by_expression\nattribute on the resulting constraint.", "source": "github-repos"}
{"code": "def flownet2_fusion(self, x):\n    with argscope([tf.layers.conv2d], activation=(lambda x: tf.nn.leaky_relu(x, 0.1)), padding='valid', strides=2, kernel_size=3, data_format='channels_first'), argscope([tf.layers.conv2d_transpose], padding='same', activation=tf.identity, data_format='channels_first', strides=2, kernel_size=4):\n        conv0 = tf.layers.conv2d(pad(x, 1), 64, name='conv0', strides=1)\n        x = tf.layers.conv2d(pad(conv0, 1), 64, name='conv1')\n        conv1 = tf.layers.conv2d(pad(x, 1), 128, name='conv1_1', strides=1)\n        x = tf.layers.conv2d(pad(conv1, 1), 128, name='conv2')\n        conv2 = tf.layers.conv2d(pad(x, 1), 128, name='conv2_1', strides=1)\n        flow2 = tf.layers.conv2d(pad(conv2, 1), 2, name='predict_flow2', strides=1, activation=tf.identity)\n        flow2_up = tf.layers.conv2d_transpose(flow2, 2, name='upsampled_flow2_to_1')\n        x = tf.layers.conv2d_transpose(conv2, 32, name='deconv1', activation=(lambda x: tf.nn.leaky_relu(x, 0.1)))\n        concat1 = tf.concat([conv1, x, flow2_up], axis=1, name='concat1')\n        interconv1 = tf.layers.conv2d(pad(concat1, 1), 32, strides=1, name='inter_conv1', activation=tf.identity)\n        flow1 = tf.layers.conv2d(pad(interconv1, 1), 2, name='predict_flow1', strides=1, activation=tf.identity)\n        flow1_up = tf.layers.conv2d_transpose(flow1, 2, name='upsampled_flow1_to_0')\n        x = tf.layers.conv2d_transpose(concat1, 16, name='deconv0', activation=(lambda x: tf.nn.leaky_relu(x, 0.1)))\n        concat0 = tf.concat([conv0, x, flow1_up], axis=1, name='concat0')\n        interconv0 = tf.layers.conv2d(pad(concat0, 1), 16, strides=1, name='inter_conv0', activation=tf.identity)\n        flow0 = tf.layers.conv2d(pad(interconv0, 1), 2, name='predict_flow0', strides=1, activation=tf.identity)\n        return tf.identity(flow0, name='flow2')", "docstring": "Architecture in Table 4 of FlowNet 2.0.\n\nArgs:\nx: NCHW tensor, where C=11 is the concatenation of 7 items of [3, 2, 2, 1, 1, 1, 1] channels.", "source": "codesearchnet"}
{"code": "def str_of_constant(self, printer: 'Callable[[_base.BaseValue], str]') -> str:\n    del printer\n    return repr(self.pyval)", "docstring": "Get a string representation of this constant.\n\nArgs:\nprinter: A BaseValue -> str function that will be used to print abstract\nvalues.\n\nReturns:\nA string of self.pyval.", "source": "github-repos"}
{"code": "def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):\n    vision_data = {}\n    if image_sizes is not None:\n        num_image_tokens = [self.image_seq_length] * len(image_sizes)\n        num_image_patches = [1] * len(image_sizes)\n        vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})\n    return MultiModalData(**vision_data)", "docstring": "Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.\n\nArgs:\nimage_sizes (List[List[str]], *optional*):\nThe input sizes formatted as (height, width) per each image.\nReturns:\nDict[str, List[int]]: A dictionary mapping each modality (\"image\", \"video\", \"audio\")\nto a list containing the number of placeholder tokens required. If the model doesn't accept\na certain modality or no input sizes are provided, the dict value is set to an empty list.", "source": "github-repos"}
{"code": "def CompleteBreakpoint(self, breakpoint_id):\n    \n    with self._lock:\n      self._completed.add(breakpoint_id)\n      if breakpoint_id in self._active:\n        self._active.pop(breakpoint_id).Clear()", "docstring": "Marks the specified breaking as completed.\n\nAppends the ID to set of completed breakpoints and clears it.\n\nArgs:\nbreakpoint_id: breakpoint ID to complete.", "source": "juraj-google-style"}
{"code": "def as_dict(self, voigt=False):\n        \n        input_array = self.voigt if voigt else self\n        d = {\"@module\": self.__class__.__module__,\n             \"@class\": self.__class__.__name__,\n             \"input_array\": input_array.tolist()}\n        if voigt:\n            d.update({\"voigt\": voigt})\n        return d", "docstring": "Serializes the tensor object\n\nArgs:\nvoigt (bool): flag for whether to store entries in\nvoigt-notation.  Defaults to false, as information\nmay be lost in conversion.\n\nReturns (Dict):\nserialized format tensor object", "source": "juraj-google-style"}
{"code": "def map_feeds(self, ad_feed, ad_creative_assignment, ad_placement_assignment, ad_event_tag_assignment, placement_feed, event_tag_profile_feed):\n    for ad in ad_feed:\n        ad['creative_assignment'] = [association for association in ad_creative_assignment if self._assignment_matches(ad, association)]\n        ad['placement_assignment'] = [association for association in ad_placement_assignment if self._assignment_matches(ad, association)]\n        if ad.get(FieldMap.PLACEMENT_ID, None) or ad.get(FieldMap.PLACEMENT_NAME, None):\n            ad['placement_assignment'].append(ad)\n        ad['event_tag_assignment'] = [association for association in ad_event_tag_assignment if self._assignment_matches(ad, association)]\n        if ad.get(FieldMap.EVENT_TAG_ID, None) or ad.get(FieldMap.EVENT_TAG_NAME, None):\n            ad['event_tag_assignment'].append(ad)\n        ad['placement_event_tag_profile'] = []\n        for placement_assignment in ad['placement_assignment']:\n            placement = self._placement_dao.get(placement_assignment, required=True)\n            if placement:\n                ad_placement = None\n                for item in placement_feed:\n                    if int(placement['id']) == item.get(FieldMap.PLACEMENT_ID, None):\n                        ad_placement = item\n                if ad_placement:\n                    event_tag_profile_name = ad_placement.get(FieldMap.EVENT_TAG_PROFILE_NAME, '')\n                    if event_tag_profile_name:\n                        ad['placement_event_tag_profile'] += [event_tag_profile for event_tag_profile in event_tag_profile_feed if event_tag_profile.get(FieldMap.EVENT_TAG_PROFILE_NAME, None) == event_tag_profile_name]", "docstring": "Maps subfeeds to the corresponding ad.\n\nThe Ad is an object that has several other dependent entities, they could be\nother entities like creative assignment, or complex sub objects in the ad\nentity like the placement assignment. This function maps those feeds by ID\nand injects the child feeds into the feed item representing the ad.\n\nAlso, the ad level is where placement event tag profiles are assigned, and\ntherefore this function is also responsible to determining if the placement\nevent tag profile should be used, or if the direct event tag assignment in\nthe ad should be used.\n\nArgs:\nad_feed: Ad feed.\nad_creative_assignment: Ad creative assignment feed.\nad_placement_assignment: Ad placement assignment feed.\nplacement_feed: Placement feed.\nevent_tag_profile_feed: Event tag profile feed.", "source": "github-repos"}
{"code": "def __init__(self, urns, service, managed_replacement=None):\n    self._urns = urns\n    self._service = service\n    self._schema_transforms = None\n    self._managed_replacement = managed_replacement", "docstring": "Initializes the ExternalProvider.\n\nArgs:\nurns: a set of URNs that uniquely identify the transforms supported.\nservice: the gradle target that identified the expansion service jar.\nmanaged_replacement (Optional): a map that defines the transform for\nwhich the SDK may replace the transform with an available managed\ntransform.", "source": "github-repos"}
{"code": "def _infer_all_output_dims(self, inputs):\n    \n    \n    \n    \n    batch_size = tf.expand_dims(tf.shape(inputs)[0], 0)\n    out_channels = (self.output_channels,)\n\n    \n    \n    if self._n == 1:\n      out_shape = (1,) + self.output_shape\n    else:\n      out_shape = self.output_shape\n\n    if self._data_format.startswith(\"NC\"):\n      out_shape_tuple = out_channels + out_shape\n    elif self._data_format.startswith(\"N\") and self._data_format.endswith(\"C\"):\n      out_shape_tuple = out_shape + out_channels\n\n    output_shape = tf.concat([batch_size, out_shape_tuple], 0)\n    return output_shape", "docstring": "Calculate the output shape for `inputs` after a deconvolution.\n\nArgs:\ninputs: A Tensor of shape `data_format` and of type `tf.float16`,\n`tf.bfloat16` or `tf.float32`.\n\nReturns:\noutput_shape: A tensor of shape (`batch_size`, `conv_output_shape`).", "source": "juraj-google-style"}
{"code": "def _mel_to_hertz(mel_values, name=None):\n    with ops.name_scope(name, 'mel_to_hertz', [mel_values]):\n        mel_values = ops.convert_to_tensor(mel_values)\n        return _MEL_BREAK_FREQUENCY_HERTZ * (math_ops.exp(mel_values / _MEL_HIGH_FREQUENCY_Q) - 1.0)", "docstring": "Converts frequencies in `mel_values` from the mel scale to linear scale.\n\nArgs:\nmel_values: A `Tensor` of frequencies in the mel scale.\nname: An optional name for the operation.\n\nReturns:\nA `Tensor` of the same shape and type as `mel_values` containing linear\nscale frequencies in Hertz.", "source": "github-repos"}
{"code": "def bridge_delete(br, if_exists=True):\n    \n    param_if_exists = _param_if_exists(if_exists)\n    cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)\n    result = __salt__['cmd.run_all'](cmd)\n    retcode = result['retcode']\n    return _retcode_to_bool(retcode)", "docstring": "Deletes bridge and all of  its  ports.\n\nArgs:\nbr: A string - bridge name\nif_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.\n\nReturns:\nTrue on success, else False.\n\n.. versionadded:: 2016.3.0\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.bridge_delete br0", "source": "juraj-google-style"}
{"code": "def calculate_energy(self, energies):\n        \n        return sum([amt * energies[c] for amt, c in zip(self._coeffs,\n                                                        self._all_comp)])", "docstring": "Calculates the energy of the reaction.\n\nArgs:\nenergies ({Composition: float}): Energy for each composition.\nE.g ., {comp1: energy1, comp2: energy2}.\n\nReturns:\nreaction energy as a float.", "source": "juraj-google-style"}
{"code": "def import_tsv(self, tsv_file):\n        \n        r = fapi.upload_entities_tsv(self.namespace, self.name,\n                                     self.tsv_file, self.api_url)\n        fapi._check_response_code(r, 201)", "docstring": "Upload entity data to workspace from tsv loadfile.\n\nArgs:\ntsv_file (file): Tab-delimited file of entity data", "source": "juraj-google-style"}
{"code": "def save_tiles(tiles, prefix='', directory=os.getcwd(), format='png'):\n    \n\n\n\n    for tile in tiles:\n        tile.save(filename=tile.generate_filename(prefix=prefix,\n                                                  directory=directory,\n                                                  format=format), \n                                                  format=format)\n    return tuple(tiles)", "docstring": "Write image files to disk. Create specified folder(s) if they\ndon't exist. Return list of :class:`Tile` instance.\n\nArgs:\ntiles (list):  List, tuple or set of :class:`Tile` objects to save.\nprefix (str):  Filename prefix of saved tiles.\n\nKwargs:\ndirectory (str):  Directory to save tiles. Created if non-existant.\n\nReturns:\nTuple of :class:`Tile` instances.", "source": "juraj-google-style"}
{"code": "def pad(self, image: np.ndarray, size: Dict[str, int], aspect_ratio: Tuple[int, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n    _validate_size(size)\n    image_height, image_width = get_image_size(image, channel_dim=input_data_format)\n    num_tiles_height, num_tiles_width = aspect_ratio\n    padded_height = num_tiles_height * size['height']\n    padded_width = num_tiles_width * size['width']\n    pad_size = ((0, padded_height - image_height), (0, padded_width - image_width))\n    image = pad(image, pad_size, mode=PaddingMode.CONSTANT, constant_values=0, data_format=data_format, input_data_format=input_data_format)\n    return image", "docstring": "Pad an image to the `size` x `aspect_ratio`. For example, if size is {height: 224, width: 224} and aspect ratio is\n(1, 2), the image will be padded to 224x448.\n\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nsize (`Dict[str, int]`):\nSize of the output image.\naspect_ratio (`Tuple[int, int]`):\nThe aspect ratio of the image.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.\n\nReturns:\n`np.ndarray`: The padded image.", "source": "github-repos"}
{"code": "def send_message(host, data, timeout=None, properties=None):\n    \n    channel = _get_channel(host, timeout)\n\n    if not properties:\n        properties = pika.BasicProperties(\n            content_type=\"application/json\",\n            delivery_mode=2,\n            headers={\"UUID\": str(uuid.uuid4())}\n        )\n\n    parameters = settings.get_amqp_settings()[host]\n\n    channel.basic_publish(\n        exchange=parameters[\"exchange\"],\n        routing_key=parameters[\"in_key\"],\n        properties=properties,\n        body=data\n    )", "docstring": "Send message to given `host`.\n\nArgs:\nhost (str): Specified host: aleph/ftp/whatever available host.\ndata (str): JSON data.\ntimeout (int, default None): How much time wait for connection.", "source": "juraj-google-style"}
{"code": "def VisitUnionType(self, union):\n    if not any((isinstance(t, pytd.GenericType) for t in union.type_list)):\n        return union\n    union = pytd_utils.JoinTypes(union.type_list)\n    if not isinstance(union, pytd.UnionType):\n        union = pytd.UnionType((union,))\n    merge_tuples = self._should_merge(pytd.TupleType, union)\n    merge_callables = self._should_merge(pytd.CallableType, union)\n    if merge_tuples or merge_callables:\n        type_list = []\n        for t in union.type_list:\n            if merge_tuples and isinstance(t, pytd.TupleType):\n                t = pytd.GenericType(base_type=t.base_type, parameters=(pytd_utils.JoinTypes(t.parameters),))\n            elif merge_callables and isinstance(t, pytd.CallableType):\n                t = pytd.GenericType(base_type=t.base_type, parameters=(pytd.AnythingType(), t.ret))\n            type_list.append(t)\n        union = union.Replace(type_list=tuple(type_list))\n    collect = {}\n    has_redundant_base_types = False\n    for t in union.type_list:\n        if isinstance(t, pytd.GenericType):\n            key = self._key(t)\n            if key in collect:\n                has_redundant_base_types = True\n                collect[key] = tuple((pytd_utils.JoinTypes([p1, p2]) for p1, p2 in zip(collect[key], t.parameters)))\n            else:\n                collect[key] = t.parameters\n    if not has_redundant_base_types:\n        return union\n    result = pytd.NothingType()\n    done = set()\n    for t in union.type_list:\n        if isinstance(t, pytd.GenericType):\n            key = self._key(t)\n            if key in done:\n                continue\n            parameters = collect[key]\n            add = t.Replace(parameters=tuple((p.Visit(CombineContainers()) for p in parameters)))\n            done.add(key)\n        else:\n            add = t\n        result = pytd_utils.JoinTypes([result, add])\n    return result", "docstring": "Push unions down into containers.\n\nThis collects similar container types in unions and merges them into\nsingle instances with the union type pushed down to the element_type level.\n\nArguments:\nunion: A pytd.Union instance. Might appear in a parameter, a return type,\na constant type, etc.\n\nReturns:\nA simplified pytd.Union.", "source": "github-repos"}
{"code": "def country_code_for_valid_region(region_code):\n    \n    metadata = PhoneMetadata.metadata_for_region(region_code.upper(), None)\n    if metadata is None:\n        raise Exception(\"Invalid region code %s\" % region_code)\n    return metadata.country_code", "docstring": "Returns the country calling code for a specific region.\n\nFor example, this would be 1 for the United States, and 64 for New\nZealand.  Assumes the region is already valid.\n\nArguments:\nregion_code -- The region that we want to get the country calling code for.\n\nReturns the country calling code for the region denoted by region_code.", "source": "juraj-google-style"}
{"code": "def unpack(self, buff, offset=0):\n        \n        super().unpack(buff, offset)\n        if self.tpid.value:\n            self._validate()\n            self.tpid = self.tpid.value\n            self.pcp = self._tci.value >> 13\n            self.cfi = (self._tci.value >> 12) & 1\n            self.vid = self._tci.value & 4095\n        else:\n            self.tpid = EtherType.VLAN\n            self.pcp = None\n            self.cfi = None\n            self.vid = None", "docstring": "Unpack a binary struct into this object's attributes.\n\nReturn the values instead of the lib's basic types.\n\nAfter unpacking, the abscence of a `tpid` value causes the assignment\nof None to the field values to indicate that there is no VLAN\ninformation.\n\nArgs:\nbuff (bytes): Binary buffer.\noffset (int): Where to begin unpacking.\n\nRaises:\n:exc:`~.exceptions.UnpackException`: If unpack fails.", "source": "juraj-google-style"}
{"code": "def timeRange(start: datetime.time, end: datetime.time, step: float) -> Iterator[datetime.datetime]:\n    assert (step > 0)\n    start = _fillDate(start)\n    end = _fillDate(end)\n    delta = datetime.timedelta(seconds=step)\n    t = start\n    while (t < datetime.datetime.now()):\n        t += delta\n    while (t <= end):\n        waitUntil(t)\n        (yield t)\n        t += delta", "docstring": "Iterator that waits periodically until certain time points are\nreached while yielding those time points.\n\nArgs:\nstart: Start time, can be specified as datetime.datetime,\nor as datetime.time in which case today is used as the date\nend: End time, can be specified as datetime.datetime,\nor as datetime.time in which case today is used as the date\nstep (float): The number of seconds of each period", "source": "codesearchnet"}
{"code": "def _dominant_task_for_jobs(tasks):\n    per_job = _group_tasks_by_jobid(tasks)\n    ret = []\n    for job_id in per_job.keys():\n        tasks_in_salience_order = sorted(per_job[job_id], key=_importance_of_task)\n        ret.append(tasks_in_salience_order[0])\n    return ret", "docstring": "A list with, for each job, its dominant task.\n\nThe dominant task is the one that exemplifies its job's\nstatus. It is either:\n- the first (FAILURE or CANCELED) task, or if none\n- the first RUNNING task, or if none\n- the first SUCCESS task.\n\nArgs:\ntasks: a list of tasks to consider\n\nReturns:\nA list with, for each job, its dominant task.", "source": "codesearchnet"}
{"code": "def sys_check_for_event(\n    mask: int, k: Optional[Key], m: Optional[Mouse]\n) -> int:\n    \n    return int(\n        lib.TCOD_sys_check_for_event(\n            mask, k.key_p if k else ffi.NULL, m.mouse_p if m else ffi.NULL\n        )\n    )", "docstring": "Check for and return an event.\n\nArgs:\nmask (int): :any:`Event types` to wait for.\nk (Optional[Key]): A tcod.Key instance which might be updated with\nan event.  Can be None.\nm (Optional[Mouse]): A tcod.Mouse instance which might be updated\nwith an event.  Can be None.\n\n.. deprecated:: 9.3\nUse the :any:`tcod.event.get` function to check for events.", "source": "juraj-google-style"}
{"code": "def GetFormatterObject(cls, data_type):\n    \n    data_type = data_type.lower()\n    if data_type not in cls._formatter_objects:\n      formatter_object = None\n\n      if data_type in cls._formatter_classes:\n        formatter_class = cls._formatter_classes[data_type]\n        \n        \n        formatter_object = formatter_class()\n\n      if not formatter_object:\n        logger.warning(\n            'Using default formatter for data type: {0:s}'.format(data_type))\n        formatter_object = default.DefaultFormatter()\n\n      cls._formatter_objects[data_type] = formatter_object\n\n    return cls._formatter_objects[data_type]", "docstring": "Retrieves the formatter object for a specific data type.\n\nArgs:\ndata_type (str): data type.\n\nReturns:\nEventFormatter: corresponding formatter or the default formatter if\nnot available.", "source": "juraj-google-style"}
{"code": "def _load_saved_model(self, saved_model_dir, saved_model_tags):\n    graph = _ops.Graph()\n    saved_model = _loader_impl.SavedModelLoader(saved_model_dir)\n    saved_model.load_graph(graph, tags=saved_model_tags)\n    meta_graph = saved_model.get_meta_graph_def_from_tags(saved_model_tags)\n    graph_def = meta_graph.graph_def\n    signature_def = meta_graph.signature_def[_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]\n    input_tensors = [graph.get_tensor_by_name(signature_def.inputs[key].name) for key in signature_def.inputs]\n    output_tensors = [graph.get_tensor_by_name(signature_def.outputs[key].name) for key in signature_def.outputs]\n    return (graph_def, input_tensors, output_tensors)", "docstring": "Load graph_def from saved model with the default serving signature key.\n\nArgs:\nsaved_model_dir: Directory of the SavedModel.\nsaved_model_tags: Set of tags identifying the MetaGraphDef within the\nSavedModel to analyze.\n\nReturns:\ngraph_def: The loaded GraphDef.\ninput_tensors: List of input tensors.\noutput_tensors: List of output tensors.", "source": "github-repos"}
{"code": "def equal(x, y):\n    return math_ops.equal(x, y)", "docstring": "Element-wise equality between two tensors.\n\nArgs:\nx: Tensor or variable.\ny: Tensor or variable.\n\nReturns:\nA bool tensor.", "source": "github-repos"}
{"code": "def alloc(self):\n    if (not self._free):\n        self._expand()\n    id = self._free.pop()\n    self._used.add(id)\n    return id", "docstring": "Allocate an ID value and return it.\n\nRaises:\nValueError: Out of capacity in ID pool.", "source": "codesearchnet"}
{"code": "def to_csv(self, filename: str, latexify_names: bool = False):\n        \n\n        elements = set()\n        for entry in self.entries:\n            elements.update(entry.composition.elements)\n        elements = sorted(list(elements), key=lambda a: a.X)\n        writer = csv.writer(open(filename, \"w\"), delimiter=unicode2str(\",\"),\n                            quotechar=unicode2str(\"\\\"\"),\n                            quoting=csv.QUOTE_MINIMAL)\n        writer.writerow([\"Name\"] + elements + [\"Energy\"])\n        for entry in self.entries:\n            row = [entry.name if not latexify_names\n                   else re.sub(r\"([0-9]+)\", r\"_{\\1}\", entry.name)]\n            row.extend([entry.composition[el] for el in elements])\n            row.append(entry.energy)\n            writer.writerow(row)", "docstring": "Exports PDEntries to a csv\n\nArgs:\nfilename: Filename to write to.\nentries: PDEntries to export.\nlatexify_names: Format entry names to be LaTex compatible,\ne.g., Li_{2}O", "source": "juraj-google-style"}
{"code": "def get_path(self, temp_ver):\n    if (temp_ver not in self):\n        raise RuntimeError('Template: {} not present'.format(temp_ver.name))\n    return self._prefixed(temp_ver.name)", "docstring": "Get the path of the given version in this store\n\nArgs:\ntemp_ver TemplateVersion: version to look for\n\nReturns:\nstr: The path to the template version inside the store\n\nRaises:\nRuntimeError: if the template is not in the store", "source": "codesearchnet"}
{"code": "def convertDateStrToDateTimeStr(date, time='00:00:00'):\n    if not date == None:\n        date = '%sT%sZ' % (date, time)\n    return date", "docstring": "Convert Date string (YYYY-MM-DD) to a datetime string by adding the desired time (YYYY-MM-DDTHH:mm:SSZ)\n\nArgs:\ndate: the date as a string to be converted\ntime: the time as a string to be added to the date\n\nReturns:\nA string representation of a datetime in the following\nformat YYYY-MM-DDTHH:mm:SSZ", "source": "github-repos"}
{"code": "def GetValidHostsForCert(cert):\n    if ('subjectAltName' in cert):\n        return [x[1] for x in cert['subjectAltName'] if (x[0].lower() == 'dns')]\n    else:\n        return [x[0][1] for x in cert['subject'] if (x[0][0].lower() == 'commonname')]", "docstring": "Returns a list of valid host globs for an SSL certificate.\n\nArgs:\ncert: A dictionary representing an SSL certificate.\nReturns:\nlist: A list of valid host globs.", "source": "codesearchnet"}
{"code": "def open_streaming_interface(self):\n    super(ReferenceDevice, self).open_streaming_interface()\n    self.rpc(8, rpcs.SG_GRAPH_INPUT, 8, streams.COMM_TILE_OPEN)\n    return []", "docstring": "Called when someone opens a streaming interface to the device.\n\nThis method will automatically notify sensor_graph that there is a\nstreaming interface opened.\n\nReturns:\nlist: A list of IOTileReport objects that should be sent out\nthe streaming interface.", "source": "codesearchnet"}
{"code": "def ParseLocalEntryRow(self, parser_mediator, query, row, cache=None, database=None, **unused_kwargs):\n    query_hash = hash(query)\n    inode_number = self._GetRowValue(query_hash, row, 'inode_number')\n    local_path = self.GetLocalPath(inode_number, cache, database)\n    event_data = GoogleDriveSnapshotLocalEntryEventData()\n    event_data.path = local_path\n    event_data.query = query\n    event_data.size = self._GetRowValue(query_hash, row, 'size')\n    timestamp = self._GetRowValue(query_hash, row, 'modified')\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a local entry row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.\ncache (Optional[SQLiteCache]): cache.\ndatabase (Optional[SQLiteDatabase]): database.", "source": "codesearchnet"}
{"code": "def optionally_with_args(phase, **kwargs):\n    if isinstance(phase, PhaseGroup):\n        return phase.with_args(**kwargs)\n    if isinstance(phase, collections.Iterable):\n        return [optionally_with_args(p, **kwargs) for p in phase]\n    if (not isinstance(phase, phase_descriptor.PhaseDescriptor)):\n        phase = phase_descriptor.PhaseDescriptor.wrap_or_copy(phase)\n    return phase.with_known_args(**kwargs)", "docstring": "Apply only the args that the phase knows.\n\nIf the phase has a **kwargs-style argument, it counts as knowing all args.\n\nArgs:\nphase: phase_descriptor.PhaseDescriptor or PhaseGroup or callable, or\niterable of those, the phase or phase group (or iterable) to apply\nwith_args to.\n**kwargs: arguments to apply to the phase.\n\nReturns:\nphase_descriptor.PhaseDescriptor or PhaseGroup or iterable with the updated\nargs.", "source": "codesearchnet"}
{"code": "def diff_configurations(model_config, bench_config, model_bundle, bench_bundle):\n    \n    diff_dict = LIVVDict()\n    model_data = model_bundle.parse_config(model_config)\n    bench_data = bench_bundle.parse_config(bench_config)\n    if model_data == {} and bench_data == {}:\n        return elements.error(\"Configuration Comparison\",\n                              \"Could not open file: \" + model_config.split(os.path.sep)[-1])\n\n    model_sections = set(six.iterkeys(model_data))\n    bench_sections = set(six.iterkeys(bench_data))\n    all_sections = set(model_sections.union(bench_sections))\n\n    for s in all_sections:\n        model_vars = set(six.iterkeys(model_data[s])) if s in model_sections else set()\n        bench_vars = set(six.iterkeys(bench_data[s])) if s in bench_sections else set()\n        all_vars = set(model_vars.union(bench_vars))\n        for v in all_vars:\n            model_val = model_data[s][v] if s in model_sections and v in model_vars else 'NA'\n            bench_val = bench_data[s][v] if s in bench_sections and v in bench_vars else 'NA'\n            same = True if model_val == bench_val and model_val != 'NA' else False\n            diff_dict[s][v] = (same, model_val, bench_val)\n    return elements.file_diff(\"Configuration Comparison\", diff_dict)", "docstring": "Description\n\nArgs:\nmodel_config: a dictionary with the model configuration data\nbench_config: a dictionary with the benchmark configuration data\nmodel_bundle: a LIVVkit model bundle object\nbench_bundle: a LIVVkit model bundle object\n\nReturns:\nA dictionary created by the elements object corresponding to\nthe results of the bit for bit testing", "source": "juraj-google-style"}
{"code": "def prettify(unicode_text):\n    \n    import xml.dom.minidom\n    reparsed = xml.dom.minidom.parseString(unicode_text.encode('utf-8'))\n    return reparsed.toprettyxml(indent=\"  \", newl=\"\\n\")", "docstring": "Return a pretty-printed version of a unicode XML string.\n\nUseful for debugging.\n\nArgs:\nunicode_text (str): A text representation of XML (unicode,\n*not* utf-8).\n\nReturns:\nstr: A pretty-printed version of the input.", "source": "juraj-google-style"}
{"code": "def json_to_url(json, symbol):\n    \n    start = json[0]['date']\n    end = json[-1]['date']\n    diff = end - start\n\n    \n    \n    \n    periods = [300, 900, 1800, 7200, 14400, 86400]\n\n    diffs = {}\n    for p in periods:\n        diffs[p] = abs(1 - (p / (diff / len(json)))) \n\n    period = min(diffs, key=diffs.get) \n    \n    url = ('https:\n           '=returnChartData&currencyPair={0}&start={1}'\n           '&end={2}&period={3}').format(symbol, start, end, period) \n    return url", "docstring": "Converts a JSON to a URL by the Poloniex API\n\nArgs:\njson: JSON data as a list of dict dates, where the keys are\nthe raw market statistics.\nsymbol: String of currency pair, like a ticker symbol.\n\nReturns:\nString URL to Poloniex API representing the given JSON.", "source": "juraj-google-style"}
{"code": "def concat(cartesians, ignore_index=False, keys=None):\n    frames = [molecule._frame for molecule in cartesians]\n    new = pd.concat(frames, ignore_index=ignore_index, keys=keys, verify_integrity=True)\n    if (type(ignore_index) is bool):\n        new = pd.concat(frames, ignore_index=ignore_index, keys=keys, verify_integrity=True)\n    else:\n        new = pd.concat(frames, ignore_index=True, keys=keys, verify_integrity=True)\n        if (type(ignore_index) is int):\n            new.index = range(ignore_index, (ignore_index + len(new)))\n        else:\n            new.index = ignore_index\n    return cartesians[0].__class__(new)", "docstring": "Join list of cartesians into one molecule.\n\nWrapper around the :func:`pandas.concat` function.\nDefault values are the same as in the pandas function except for\n``verify_integrity`` which is set to true in case of this library.\n\nArgs:\nignore_index (sequence, bool, int): If it is a boolean, it\nbehaves like in the description of\n:meth:`pandas.DataFrame.append`.\nIf it is a sequence, it becomes the new index.\nIf it is an integer,\n``range(ignore_index, ignore_index + len(new))``\nbecomes the new index.\nkeys (sequence): If multiple levels passed, should contain tuples.\nConstruct hierarchical index using the passed keys as\nthe outermost level\n\nReturns:\nCartesian:", "source": "codesearchnet"}
{"code": "def get_class_in_module(class_name: str, module_path: Union[str, os.PathLike], *, force_reload: bool=False) -> type:\n    name = os.path.normpath(module_path)\n    if name.endswith('.py'):\n        name = name[:-3]\n    name = name.replace(os.path.sep, '.')\n    module_file: Path = Path(HF_MODULES_CACHE) / module_path\n    with _HF_REMOTE_CODE_LOCK:\n        if force_reload:\n            sys.modules.pop(name, None)\n            importlib.invalidate_caches()\n        cached_module: Optional[ModuleType] = sys.modules.get(name)\n        module_spec = importlib.util.spec_from_file_location(name, location=module_file)\n        module_files: list[Path] = [module_file] + sorted(map(Path, get_relative_import_files(module_file)))\n        module_hash: str = hashlib.sha256(b''.join((bytes(f) + f.read_bytes() for f in module_files))).hexdigest()\n        module: ModuleType\n        if cached_module is None:\n            module = importlib.util.module_from_spec(module_spec)\n            sys.modules[name] = module\n        else:\n            module = cached_module\n        if getattr(module, '__transformers_module_hash__', '') != module_hash:\n            module_spec.loader.exec_module(module)\n            module.__transformers_module_hash__ = module_hash\n        return getattr(module, class_name)", "docstring": "Import a module on the cache directory for modules and extract a class from it.\n\nArgs:\nclass_name (`str`): The name of the class to import.\nmodule_path (`str` or `os.PathLike`): The path to the module to import.\nforce_reload (`bool`, *optional*, defaults to `False`):\nWhether to reload the dynamic module from file if it already exists in `sys.modules`.\nOtherwise, the module is only reloaded if the file has changed.\n\nReturns:\n`typing.Type`: The class looked for.", "source": "github-repos"}
{"code": "def CopyFromDateTimeString(self, time_string):\n    super(APFSTime, self)._CopyFromDateTimeString(time_string)\n    if ((self._timestamp is None) or (self._timestamp < self._INT64_MIN) or (self._timestamp > self._INT64_MAX)):\n        raise ValueError('Date time value not supported.')", "docstring": "Copies a APFS timestamp from a date and time string.\n\nArgs:\ntime_string (str): date and time value formatted as:\nYYYY-MM-DD hh:mm:ss.######[+-]##:##\n\nWhere # are numeric digits ranging from 0 to 9 and the seconds\nfraction can be either 3 or 6 digits. The time of day, seconds\nfraction and time zone offset are optional. The default time zone\nis UTC.\n\nRaises:\nValueError: if the date and time value is not supported.", "source": "codesearchnet"}
{"code": "def _ReadLine(self, file_object):\n    if (len(self._buffer) < self._buffer_size):\n        content = file_object.read(self._buffer_size)\n        content = content.decode(self._encoding)\n        self._buffer = ''.join([self._buffer, content])\n    (line, new_line, self._buffer) = self._buffer.partition('\\n')\n    if ((not line) and (not new_line)):\n        line = self._buffer\n        self._buffer = ''\n    self._current_offset += len(line)\n    if line.endswith('\\r'):\n        line = line[:(- len('\\r'))]\n    if new_line:\n        line = ''.join([line, '\\n'])\n        self._current_offset += len('\\n')\n    return line", "docstring": "Reads a line from the file object.\n\nArgs:\nfile_object (dfvfs.FileIO): file-like object.\n\nReturns:\nstr: line read from the file-like object.", "source": "codesearchnet"}
{"code": "def astype(array, y):\n    if isinstance(y, autograd.core.Node):\n        return array.astype(numpy.array(y.value).dtype)\n    return array.astype(numpy.array(y).dtype)", "docstring": "A functional form of the `astype` method.\n\nArgs:\narray: The array or number to cast.\ny: An array or number, as the input, whose type should be that of array.\n\nReturns:\nAn array or number with the same dtype as `y`.", "source": "codesearchnet"}
{"code": "def _convert_path(path, name):\n    \n    table = os.path.splitext(path)[0]\n    table = table.replace(os.path.sep, '__')\n    if name is not None:\n        table = '___'.join([table, name])\n    table = re.sub('[^0-9a-zA-Z_]+', '_', table)\n    table = table.lower()\n    return table", "docstring": "Convert resource's path and name to storage's table name.\n\nArgs:\npath (str): resource path\nname (str): resource name\n\nReturns:\nstr: table name", "source": "juraj-google-style"}
{"code": "def add_lambda_permissions(function='',\n                           statement_id='',\n                           action='lambda:InvokeFunction',\n                           principal='',\n                           source_arn='',\n                           env='',\n                           region='us-east-1'):\n    \n    session = boto3.Session(profile_name=env, region_name=region)\n    lambda_client = session.client('lambda')\n    response_action = None\n    prefixed_sid = FOREMAST_PREFIX + statement_id\n\n    add_permissions_kwargs = {\n        'FunctionName': function,\n        'StatementId': prefixed_sid,\n        'Action': action,\n        'Principal': principal,\n    }\n\n    if source_arn:\n        add_permissions_kwargs['SourceArn'] = source_arn\n\n    try:\n        lambda_client.add_permission(**add_permissions_kwargs)\n        response_action = 'Add permission with Sid: {}'.format(prefixed_sid)\n    except boto3.exceptions.botocore.exceptions.ClientError as error:\n        LOG.debug('Add permission error: %s', error)\n        response_action = \"Did not add permissions\"\n\n    LOG.debug('Related StatementId (SID): %s', prefixed_sid)\n    LOG.info(response_action)", "docstring": "Add permission to Lambda for the event trigger.\n\nArgs:\nfunction (str): Lambda function name\nstatement_id (str): IAM policy statement (principal) id\naction (str): Lambda action to allow\nprincipal (str): AWS principal to add permissions\nsource_arn (str): ARN of the source of the event. Only needed for S3\nenv (str): Environment/account of function\nregion (str): AWS region of function", "source": "juraj-google-style"}
{"code": "def _static_cache_update(k_cache: torch.Tensor, v_cache: torch.Tensor, key_states: torch.Tensor, value_states: torch.Tensor, cache_position: Optional[torch.LongTensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n    if cache_position is None:\n        k_cache.copy_(key_states)\n        v_cache.copy_(value_states)\n    else:\n        try:\n            k_cache.index_copy_(2, cache_position, key_states)\n            v_cache.index_copy_(2, cache_position, value_states)\n        except NotImplementedError:\n            k_cache[:, :, cache_position] = key_states\n            v_cache[:, :, cache_position] = value_states\n    return (k_cache, v_cache)", "docstring": "Updates the static cache tensors in place.\n\nArgs:\nk_cache (`torch.Tensor`): The key cache tensor to update.\nv_cache (`torch.Tensor`): The value cache tensor to update.\nkey_states (`torch.Tensor`): The new key states to add.\nvalue_states (`torch.Tensor`): The new value states to add.\ncache_position (`Optional[torch.LongTensor]`): The position indices where the new states should be inserted.\nIf None, the entire cache is overwritten (prefill).\n\nReturns:\nTuple[`torch.Tensor`, `torch.Tensor`]: The updated key and value cache tensors (modified in-place).", "source": "github-repos"}
{"code": "def join(input_files, output_file):\n    final_features = []\n    for file in input_files:\n        with open(file) as f:\n            feat_collection = geojson.load(f)\n            final_features += feat_collection['features']\n    feat_collection['features'] = final_features\n    with open(output_file, 'w') as f:\n        geojson.dump(feat_collection, f)", "docstring": "Join geojsons into one. The spatial reference system of the output file is the same\nas the one of the last file in the list.\n\nArgs:\ninput_files (list): List of file name strings.\noutput_file (str): Output file name.", "source": "codesearchnet"}
{"code": "def findLocalOptima(self,fast=False,verbose=True,n_times=10,lambd=None):\n        \n        if not self.init:       self.initGP(fast)\n        \n        opt_list = []\n\n        fixed0 = SP.zeros_like(self.gp.getParams()['dataTerm'])    \n\n        \n        for i in range(n_times):\n            \n            scales1 = self._getScalesRand()\n            fixed1  = 1e-1*SP.randn(fixed0.shape[0],fixed0.shape[1])\n            conv = self.trainGP(fast=fast,scales0=scales1,fixed0=fixed1,lambd=lambd)\n\n            if conv:\n                \n                temp=1\n                for j in range(len(opt_list)):\n                    if SP.allclose(abs(self.getScales()),abs(opt_list[j]['scales'])):\n                        temp=0\n                        opt_list[j]['counter']+=1\n                        break\n                if temp==1:\n                    opt = {}\n                    opt['counter'] = 1\n                    opt['LML'] = self.getLML()\n                    opt['scales'] = self.getScales()\n                    opt_list.append(opt)\n        \n        \n        \n        LML = SP.array([opt_list[i]['LML'] for i in range(len(opt_list))])\n        index   = LML.argsort()[::-1]\n        out = []\n        if verbose:\n            print(\"\\nLocal mimima\\n\")\n            print(\"n_times\\t\\tLML\")\n            print(\"------------------------------------\")\n            for i in range(len(opt_list)):\n                out.append(opt_list[index[i]])\n                if verbose:\n                    print((\"%d\\t\\t%f\" % (opt_list[index[i]]['counter'], opt_list[index[i]]['LML'])))\n                print(\"\")\n\n        return out", "docstring": "Train the model repeadly up to a number specified by the users with random restarts and\nreturn a list of all relative minima that have been found\n\nArgs:\nfast:       Boolean. if set to True initalize kronSumGP\nverbose:    Boolean. If set to True, verbose output is produced. (default True)\nn_times:    number of re-starts of the optimization. (default 10)", "source": "juraj-google-style"}
{"code": "def get_extension_by_name(cert_obj, extension_name):\n    \n    try:\n        return cert_obj.extensions.get_extension_for_oid(\n            getattr(cryptography.x509.oid.ExtensionOID, extension_name)\n        )\n    except cryptography.x509.ExtensionNotFound:\n        pass", "docstring": "Get a standard certificate extension by attribute name.\n\nArgs:\ncert_obj: cryptography.Certificate\nCertificate containing a standard extension.\n\nextension_name : str\nExtension name. E.g., 'SUBJECT_DIRECTORY_ATTRIBUTES'.\n\nReturns:\nCryptography.Extension", "source": "juraj-google-style"}
{"code": "async def _async_supervisor(func, animation_, step, *args, **kwargs):\n    \n    with ThreadPoolExecutor(max_workers=2) as pool:\n        with _terminating_event() as event:\n            pool.submit(animate_cli, animation_, step, event)\n            result = await func(*args, **kwargs)\n    return result", "docstring": "Supervisor for running an animation with an asynchronous function.\n\nArgs:\nfunc: A function to be run alongside an animation.\nanimation_: An infinite generator that produces\nstrings for the animation.\nstep: Seconds between each animation frame.\n*args: Arguments for func.\n**kwargs: Keyword arguments for func.\nReturns:\nThe result of func(*args, **kwargs)\nRaises:\nAny exception that is thrown when executing func.", "source": "juraj-google-style"}
{"code": "def loads(serialized_messages):\n    try:\n        messages_dicts = json.loads(serialized_messages)\n    except ValueError:\n        _log.error('Loading serialized messages failed.')\n        raise\n    messages = []\n    for message_dict in messages_dicts:\n        try:\n            headers = message_dict['headers']\n        except KeyError:\n            _log.error('Message saved without headers.')\n            raise\n        try:\n            MessageClass = get_class(headers['fedora_messaging_schema'])\n        except KeyError:\n            _log.error('Message (headers=%r) saved without a schema header.', headers)\n            raise\n        try:\n            body = message_dict['body']\n        except KeyError:\n            _log.error('Message saved without body.')\n            raise\n        try:\n            id = message_dict['id']\n        except KeyError:\n            _log.error('Message saved without id.')\n            raise\n        try:\n            queue = message_dict['queue']\n        except KeyError:\n            _log.warning('Message saved without queue.')\n            queue = None\n        try:\n            topic = message_dict['topic']\n        except KeyError:\n            _log.error('Message saved without topic.')\n            raise\n        try:\n            severity = headers['fedora_messaging_severity']\n        except KeyError:\n            _log.error('Message saved without a severity.')\n            raise\n        message = MessageClass(body=body, topic=topic, headers=headers, severity=severity)\n        try:\n            message.validate()\n            _log.debug('Successfully validated message %r', message)\n        except jsonschema.exceptions.ValidationError as e:\n            _log.error('Message validation of %r failed: %r', message, e)\n            raise ValidationError(e)\n        message.queue = queue\n        message.id = id\n        messages.append(message)\n    return messages", "docstring": "Deserialize messages from a JSON formatted str\n\nArgs:\nserialized_messages (JSON str):\n\nReturns:\nlist: Deserialized message objects.\n\nRaises:\nValidationError: If deserialized message validation failed.\nKeyError: If serialized_messages aren't properly serialized.\nValueError: If serialized_messages is not valid JSON", "source": "codesearchnet"}
{"code": "def single_conv_dist(name, x, output_channels=None):\n    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n        x_shape = common_layers.shape_list(x)\n        if (output_channels is None):\n            output_channels = x_shape[(- 1)]\n        mean_log_scale = conv('conv2d', x, output_channels=(2 * output_channels), conv_init='zeros', apply_actnorm=False)\n        mean = mean_log_scale[(:, :, :, 0::2)]\n        log_scale = mean_log_scale[(:, :, :, 1::2)]\n        return tf.distributions.Normal(mean, tf.exp(log_scale))", "docstring": "A 3x3 convolution mapping x to a standard normal distribution at init.\n\nArgs:\nname: variable scope.\nx: 4-D Tensor.\noutput_channels: number of channels of the mean and std.", "source": "codesearchnet"}
{"code": "def in_train_phase(x, alt, training=None):\n    from tensorflow.python.keras.engine import base_layer_utils\n    if training is None:\n        training = base_layer_utils.call_context().training\n    if training is None:\n        training = learning_phase()\n    if not tensor_util.is_tf_type(training):\n        if training == 1 or training is True:\n            if callable(x):\n                return x()\n            else:\n                return x\n        elif training == 0 or training is False:\n            if callable(alt):\n                return alt()\n            else:\n                return alt\n    x = switch(training, x, alt)\n    return x", "docstring": "Selects `x` in train phase, and `alt` otherwise.\n\nNote that `alt` should have the *same shape* as `x`.\n\nArgs:\nx: What to return in train phase\n(tensor or callable that returns a tensor).\nalt: What to return otherwise\n(tensor or callable that returns a tensor).\ntraining: Optional scalar tensor\n(or Python boolean, or Python integer)\nspecifying the learning phase.\n\nReturns:\nEither `x` or `alt` based on the `training` flag.\nthe `training` flag defaults to `K.learning_phase()`.", "source": "github-repos"}
{"code": "def random_shift(image, wsr=0.1, hsr=0.1):\n    (height, width, _) = common_layers.shape_list(image)\n    (width_range, height_range) = ((wsr * width), (hsr * height))\n    height_translations = tf.random_uniform((1,), (- height_range), height_range)\n    width_translations = tf.random_uniform((1,), (- width_range), width_range)\n    translations = tf.concat((height_translations, width_translations), axis=0)\n    return tf.contrib.image.translate(image, translations=translations)", "docstring": "Apply random horizontal and vertical shift to images.\n\nThis is the default data-augmentation strategy used on CIFAR in Glow.\n\nArgs:\nimage: a 3-D Tensor\nwsr: Width shift range, as a float fraction of the width.\nhsr: Height shift range, as a float fraction of the width.\nReturns:\nimages: images translated by the provided wsr and hsr.", "source": "codesearchnet"}
{"code": "def _FormatUsername(self, event):\n    \n    username = self._output_mediator.GetUsername(event)\n    return self._SanitizeField(username)", "docstring": "Formats the username.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: formatted username field.", "source": "juraj-google-style"}
{"code": "def predict_features(self, df_features, df_target, idx=0, **kwargs):\n        \n        X = df_features.values\n        y = df_target.values[:, 0]\n        rr = ReliefF()\n        rr.fit(X, y)\n\n        return rr.feature_importances_", "docstring": "For one variable, predict its neighbouring nodes.\n\nArgs:\ndf_features (pandas.DataFrame):\ndf_target (pandas.Series):\nidx (int): (optional) for printing purposes\nkwargs (dict): additional options for algorithms\n\nReturns:\nlist: scores of each feature relatively to the target", "source": "juraj-google-style"}
{"code": "def dump_connection_info(engine: Engine, fileobj: TextIO=sys.stdout) -> None:\n    meta = MetaData(bind=engine)\n    writeline_nl(fileobj, sql_comment('Database info: {}'.format(meta)))", "docstring": "Dumps some connection info, as an SQL comment. Obscures passwords.\n\nArgs:\nengine: the SQLAlchemy :class:`Engine` to dump metadata information\nfrom\nfileobj: the file-like object (default ``sys.stdout``) to write\ninformation to", "source": "codesearchnet"}
{"code": "def write_xml_root(xml_root, output_loc=None, filename=None):\n    \n    if xml_root is None:\n        raise Dump2PolarionException(\"No data to write.\")\n    filename_fin = _get_filename(output_loc=output_loc, filename=filename)\n\n    et = etree.ElementTree(xml_root)\n    et.write(filename_fin, xml_declaration=True, pretty_print=True, encoding=\"utf-8\")\n    logger.info(\"Data written to '%s'\", filename_fin)", "docstring": "Outputs the XML content (from XML element) into a file.\n\nIf `output_loc` is supplied and it's a file (not directory), the output\nwill be saved there and the `filename` is ignored.\n\nArgs:\nxml_root: root element ot the XML document\noutput_loc: file or directory for saving the file\nfilename: file name that will be used if `output_loc` is directory\nIf it is needed and is not supplied, it will be generated", "source": "juraj-google-style"}
{"code": "def detect_deprecated_references_in_contract(self, contract):\n    results = []\n    for state_variable in contract.variables:\n        if (state_variable.contract != contract):\n            continue\n        if state_variable.expression:\n            deprecated_results = self.detect_deprecation_in_expression(state_variable.expression)\n            if deprecated_results:\n                results.append((state_variable, deprecated_results))\n    for function in (contract.functions + contract.modifiers):\n        if (function.contract != contract):\n            continue\n        for node in function.nodes:\n            deprecated_results = self.detect_deprecated_references_in_node(node)\n            for ir in node.irs:\n                if isinstance(ir, LowLevelCall):\n                    for dep_llc in self.DEPRECATED_LOW_LEVEL_CALLS:\n                        if (ir.function_name == dep_llc[0]):\n                            deprecated_results.append(dep_llc)\n            if deprecated_results:\n                results.append((node, deprecated_results))\n    return results", "docstring": "Detects the usage of any deprecated built-in symbols.\n\nReturns:\nlist of tuple: (state_variable | node, (detecting_signature, original_text, recommended_text))", "source": "codesearchnet"}
{"code": "def get_dict_table_schema(schema):\n    if isinstance(schema, (dict, value_provider.ValueProvider)) or callable(schema) or schema is None:\n        return schema\n    elif isinstance(schema, str):\n        table_schema = get_table_schema_from_string(schema)\n        return table_schema_to_dict(table_schema)\n    elif isinstance(schema, bigquery.TableSchema):\n        return table_schema_to_dict(schema)\n    else:\n        raise TypeError('Unexpected schema argument: %s.' % schema)", "docstring": "Transform the table schema into a dictionary instance.\n\nArgs:\nschema (str, dict, ~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema):\nThe schema to be used if the BigQuery table to write has to be created.\nThis can either be a dict or string or in the TableSchema format.\n\nReturns:\nDict[str, Any]: The schema to be used if the BigQuery table to write has\nto be created but in the dictionary format.", "source": "github-repos"}
{"code": "def resolve_path(path, config_file):\n        \n        if os.path.isabs(path):\n            return path\n        return os.path.relpath(path, os.path.dirname(config_file))", "docstring": "Resolve path relative to config file location.\n\nArgs:\npath: Path to be resolved.\nconfig_file: Path to config file, which `path` is specified\nrelative to.\n\nReturns:\nPath relative to the `config_file` location. If `path` is an\nabsolute path then it will be returned without change.", "source": "juraj-google-style"}
{"code": "def set_source_interface(self, name):\n        \n        cmd = self.command_builder('ntp source', value=name)\n        return self.configure(cmd)", "docstring": "Assign the NTP source on the node\n\nArgs:\nname (string): The interface port that specifies the NTP source.\n\nReturns:\nTrue if the operation succeeds, otherwise False.", "source": "juraj-google-style"}
{"code": "def run(self, *args, **kwargs):\n        \n        accounts = list(AWSAccount.get_all(include_disabled=False).values())\n\n        \n        s3_acl = get_template('cloudtrail_s3_bucket_policy.json')\n        s3_bucket_name = self.dbconfig.get('bucket_name', self.ns)\n        s3_bucket_region = self.dbconfig.get('bucket_region', self.ns, 'us-west-2')\n        s3_bucket_account = AWSAccount.get(self.dbconfig.get('bucket_account', self.ns))\n        CloudTrail.create_s3_bucket(s3_bucket_name, s3_bucket_region, s3_bucket_account, s3_acl)\n\n        self.validate_sqs_policy(accounts)\n\n        for account in accounts:\n            ct = CloudTrail(account, s3_bucket_name, s3_bucket_region, self.log)\n            ct.run()", "docstring": "Entry point for the scheduler\n\nArgs:\n*args: Optional arguments\n**kwargs: Optional keyword arguments\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def convert_softmax(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting softmax ...')\n    if (names == 'short'):\n        tf_name = ('SMAX' + random_string(4))\n    elif (names == 'keep'):\n        tf_name = w_name\n    else:\n        tf_name = (w_name + str(random.random()))\n\n    def target_layer(x, dim=params['dim']):\n        import keras\n        return keras.activations.softmax(x, axis=dim)\n    lambda_layer = keras.layers.Lambda(target_layer)\n    layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert softmax layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def __make_request_headers(self, teststep_dict, entry_json):\n    teststep_headers = {}\n    for header in entry_json['request'].get('headers', []):\n        if (header['name'].lower() in IGNORE_REQUEST_HEADERS):\n            continue\n        teststep_headers[header['name']] = header['value']\n    if teststep_headers:\n        teststep_dict['request']['headers'] = teststep_headers", "docstring": "parse HAR entry request headers, and make teststep headers.\nheader in IGNORE_REQUEST_HEADERS will be ignored.\n\nArgs:\nentry_json (dict):\n{\n\"request\": {\n\"headers\": [\n{\"name\": \"Host\", \"value\": \"httprunner.top\"},\n{\"name\": \"Content-Type\", \"value\": \"application/json\"},\n{\"name\": \"User-Agent\", \"value\": \"iOS/10.3\"}\n],\n},\n\"response\": {}\n}\n\nReturns:\n{\n\"request\": {\nheaders: {\"Content-Type\": \"application/json\"}\n}", "source": "codesearchnet"}
{"code": "def add_chain(self, name, order):\n    if (name not in self.chains):\n        setattr(self.chains, name, MarkovChain(order=order))\n    else:\n        raise ValueError('Chain with this name already exists')", "docstring": "Add chain to current shelve file\n\nArgs:\nname: chain name\norder: markov chain order", "source": "codesearchnet"}
{"code": "def as_matrix(self, depth=0):\n        \n        if depth in self._matrix_cache:\n            return self._matrix_cache[depth]\n        self._matrix_cache[depth] = matrix = Matrix(self, depth=depth)\n        return matrix", "docstring": "Create a matrix with self as node, cache it, return it.\n\nArgs:\ndepth (int): depth of the matrix.\n\nReturns:\nMatrix: an instance of Matrix.", "source": "juraj-google-style"}
{"code": "def inspect(self, **kwargs):\n    scf_cycle = abiinspect.PhononScfCycle.from_file(self.output_file.path)\n    if (scf_cycle is not None):\n        if ('title' not in kwargs):\n            kwargs['title'] = str(self)\n        return scf_cycle.plot(**kwargs)", "docstring": "Plot the Phonon SCF cycle results with matplotlib.\n\nReturns:\n`matplotlib` figure, None if some error occurred.", "source": "codesearchnet"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n    if token_ids_1 is None:\n        return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    _cls = [self.cls_token_id]\n    _sep = [self.sep_token_id]\n    return _cls + token_ids_0 + _sep + _sep + token_ids_1 + _sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. An ErnieM sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nReturns:\n`List[int]`: List of input_id with the appropriate special tokens.", "source": "github-repos"}
{"code": "def register_agent(self, short_name):\n        \n\n        self._loop.run_coroutine(self._client.register_agent(short_name))", "docstring": "Register to act as the RPC agent for this service.\n\nAfter this cal succeeds, all requests to send RPCs to this service will be routed\nthrough this agent.\n\nArgs:\nshort_name (str): A unique short name for this service that functions\nas an id", "source": "juraj-google-style"}
{"code": "def compute_matmul_output_shape(shape1, shape2):\n    if len(shape1) == 1:\n        shape1 = (1, shape1[0])\n    if len(shape2) == 1:\n        shape2 = (shape2[0], 1)\n    if shape1[-1] is not None and shape2[-2] is not None and (shape1[-1] != shape2[-2]):\n        raise ValueError(f'Inner dimensions (`x1.shape[-1]` and `x2.shape[-2]`) must be equal, but received `x1.shape={shape1}` and `x2.shape={shape2}`.')\n    leading_shape = broadcast_shapes(shape1[:-2], shape2[:-2])\n    last_2_dims_shape = [shape1[-2], shape2[-1]]\n    output_shape = leading_shape + last_2_dims_shape\n    if len(shape1) == 1:\n        del output_shape[-2]\n    if len(shape2) == 1:\n        del output_shape[-1]\n    return tuple(output_shape)", "docstring": "Compute the output shape of a `matmul` operation.\n\nArgs:\nshape1: Shape of the left operand.\nshape2: Shape of the right operand.\n\nReturns:\nTuple of ints: The output shape for the `matmul` operation.", "source": "github-repos"}
{"code": "def _new_from_rft(self, base_template, rft_file):\n        \n        self._add_entry(base_template)\n        self._add_entry(templates.NEW_FROM_RFT\n                                 .format(rft_file_path=rft_file,\n                                         rft_file_name=op.basename(rft_file)))", "docstring": "Append a new file from .rft entry to the journal.\n\nThis instructs Revit to create a new model based on\nthe provided .rft template.\n\nArgs:\nbase_template (str): new file journal template from rmj.templates\nrft_file (str): full path to .rft template to be used", "source": "juraj-google-style"}
{"code": "def run_tasks(header, tasks):\n    \n    tasks = list(tasks)\n    with timed_display(header) as print_message:\n        with tqdm(tasks, position=1, desc='Progress', disable=None,\n                  bar_format='{desc}{percentage:3.0f}% |{bar}|',\n                  total=sum(t[2] if len(t) > 2 else 1 for t in tasks),\n                  dynamic_ncols=True) as pbar:\n            for task in tasks:\n                print_message(task[0])\n                with display_status():\n                    try:\n                        task[1]()\n                    finally:\n                        pbar.update(task[2] if len(task) > 2 else 1)", "docstring": "Run a group of tasks with a header, footer and success/failure messages.\n\nArgs:\nheader: A message to print in the header bar before the tasks are run.\ntasks: A list of tuples containing a task title, a task, and a weight.\nIf the tuple only contains two values, the weight is assumed to be\none.", "source": "juraj-google-style"}
{"code": "def get_backbone_element_fields(structdef: StructureDefinition, path: str) -> List[str]:\n    results = []\n    struct_id = cast(Any, structdef).id.value\n    qualified_path = struct_id + '.' + path if path else struct_id\n    for elem in cast(Any, structdef).snapshot.element:\n        if elem.id.value.startswith(qualified_path):\n            relative_path = elem.id.value[len(qualified_path) + 1:]\n            if relative_path and '.' not in relative_path:\n                if relative_path.endswith('[x]'):\n                    relative_path = relative_path[:-3]\n                results.append(relative_path)\n    return results", "docstring": "Returns the field under the path to the given FHIR backbone element.\n\nArgs:\nstructdef: a FHIR StructureDefinition proto.\npath: a path to a backbone element within the structure definition.\n\nReturns:\nA list of nested field names.", "source": "github-repos"}
{"code": "def lookup_package(self, definition_name):\n        \n        while True:\n            descriptor = self.lookup_descriptor(definition_name)\n            if isinstance(descriptor, FileDescriptor):\n                return descriptor.package\n            else:\n                index = definition_name.rfind('.')\n                if index < 0:\n                    return None\n                definition_name = definition_name[:index]", "docstring": "Determines the package name for any definition.\n\nDetermine the package that any definition name belongs to. May\ncheck parent for package name and will resolve missing\ndescriptors if provided descriptor loader.\n\nArgs:\ndefinition_name: Definition name to find package for.", "source": "juraj-google-style"}
{"code": "def write_file(path, data, mode='w'):  \n    \n    with open(path, mode) as f:\n        f.write(data)", "docstring": "Write data to a file.\n\nArgs:\npath (str): path to the file.\ndata (str): data to be written to the file.\nmode (str): mode which the file will be open.", "source": "juraj-google-style"}
{"code": "def write_sample(binary, payload, path, filename):\n    if (not os.path.exists(path)):\n        os.makedirs(path)\n    sample = os.path.join(path, filename)\n    if binary:\n        with open(sample, 'wb') as f:\n            f.write(base64.b64decode(payload))\n    else:\n        with open(sample, 'w') as f:\n            f.write(payload)", "docstring": "This function writes a sample on file system.\n\nArgs:\nbinary (bool): True if it's a binary file\npayload: payload of sample, in base64 if it's a binary\npath (string): path of file\nfilename (string): name of file\nhash_ (string): file hash", "source": "codesearchnet"}
{"code": "def build_evaluation(variant_specific, variant_id, user_id, user_name, institute_id, case_id, classification, criteria):\n    criteria = (criteria or [])\n    evaluation_obj = dict(variant_specific=variant_specific, variant_id=variant_id, institute_id=institute_id, case_id=case_id, classification=classification, user_id=user_id, user_name=user_name, created_at=datetime.datetime.now())\n    criteria_objs = []\n    for info in criteria:\n        criteria_obj = {}\n        criteria_obj['term'] = info['term']\n        if ('comment' in info):\n            criteria_obj['comment'] = info['comment']\n        if ('links' in info):\n            criteria_obj['links'] = info['links']\n        criteria_objs.append(criteria_obj)\n    evaluation_obj['criteria'] = criteria_objs\n    return evaluation_obj", "docstring": "Build a evaluation object ready to be inserted to database\n\nArgs:\nvariant_specific(str): md5 string for the specific variant\nvariant_id(str): md5 string for the common variant\nuser_id(str)\nuser_name(str)\ninstitute_id(str)\ncase_id(str)\nclassification(str): The ACMG classification\ncriteria(list(dict)): A list of dictionaries with ACMG criterias\n\nReturns:\nevaluation_obj(dict): Correctly formatted evaluation object", "source": "codesearchnet"}
{"code": "def notify_batches_finished(self, statuses):\n    with self._wait_condition:\n        self._statuses = statuses\n        self._wait_condition.notify()", "docstring": "Called by the BatchTracker the _BatchWaiter is observing. Should not\nbe called by handlers.\n\nArgs:\nstatuses (dict of int): A dict with keys of batch ids, and values\nof status enums", "source": "codesearchnet"}
{"code": "def sanger_ordered(self, institute_id=None, user_id=None):\n    query = {'$match': {'$and': [{'verb': 'sanger'}]}}\n    if institute_id:\n        query['$match']['$and'].append({'institute': institute_id})\n    if user_id:\n        query['$match']['$and'].append({'user_id': user_id})\n    results = self.event_collection.aggregate([query, {'$group': {'_id': '$case', 'vars': {'$addToSet': '$variant_id'}}}])\n    sanger_ordered = [item for item in results]\n    return sanger_ordered", "docstring": "Get all variants with validations ever ordered.\n\nArgs:\ninstitute_id(str) : The id of an institute\nuser_id(str) : The id of an user\n\nReturns:\nsanger_ordered(list) : a list of dictionaries, each with \"case_id\" as keys and list of variant ids as values", "source": "codesearchnet"}
{"code": "def compile(self, model):\n        \n\n        log = SensorLog(InMemoryStorageEngine(model), model)\n        self.sensor_graph = SensorGraph(log, model)\n\n        allocator = StreamAllocator(self.sensor_graph, model)\n\n        self._scope_stack = []\n\n        \n        root = RootScope(self.sensor_graph, allocator)\n        self._scope_stack.append(root)\n\n        for statement in self.statements:\n            statement.execute(self.sensor_graph, self._scope_stack)\n\n        self.sensor_graph.initialize_remaining_constants()\n        self.sensor_graph.sort_nodes()", "docstring": "Compile this file into a SensorGraph.\n\nYou must have preivously called parse_file to parse a\nsensor graph file into statements that are then executed\nby this command to build a sensor graph.\n\nThe results are stored in self.sensor_graph and can be\ninspected before running optimization passes.\n\nArgs:\nmodel (DeviceModel): The device model that we should compile\nthis sensor graph for.", "source": "juraj-google-style"}
{"code": "def get_auth(credentials_prompt, refresh_token_cache, manual_login=False):\n    with requests.Session() as session:\n        session.headers = {'user-agent': USER_AGENT}\n        try:\n            logger.info('Authenticating with refresh token')\n            refresh_token = refresh_token_cache.get()\n            if (refresh_token is None):\n                raise GoogleAuthError('Refresh token not found')\n            access_token = _auth_with_refresh_token(session, refresh_token)\n        except GoogleAuthError as e:\n            logger.info('Failed to authenticate using refresh token: %s', e)\n            logger.info('Authenticating with credentials')\n            if manual_login:\n                authorization_code = credentials_prompt.get_authorization_code()\n            else:\n                authorization_code = _get_authorization_code(session, credentials_prompt)\n            (access_token, refresh_token) = _auth_with_code(session, authorization_code)\n            refresh_token_cache.set(refresh_token)\n        logger.info('Authentication successful')\n        return _get_session_cookies(session, access_token)", "docstring": "Authenticate with Google.\n\nArgs:\nrefresh_token_cache (RefreshTokenCache): Cache to use so subsequent\nlogins may not require credentials.\ncredentials_prompt (CredentialsPrompt): Prompt to use if credentials\nare required to log in.\nmanual_login (bool): If true, prompt user to log in through a browser\nand enter authorization code manually. Defaults to false.\n\nReturns:\ndict: Google session cookies.\n\nRaises:\nGoogleAuthError: If authentication with Google fails.", "source": "codesearchnet"}
{"code": "def write_edges(\n    edges: Mapping[str, Any],\n    filename: str,\n    jsonlines: bool = False,\n    gzipflag: bool = False,\n    yaml: bool = False,\n):\n    \n    pass", "docstring": "Write edges to file\n\nArgs:\nedges (Mapping[str, Any]): in edges JSON Schema format\nfilename (str): filename to write\njsonlines (bool): output in JSONLines format?\ngzipflag (bool): create gzipped file?\nyaml (bool): create yaml file?", "source": "juraj-google-style"}
{"code": "def process_results(qry_results):\n    i_info = {}\n    for (i, j) in enumerate(qry_results['Reservations']):\n        i_info[i] = {'id': j['Instances'][0]['InstanceId']}\n        i_info[i]['state'] = j['Instances'][0]['State']['Name']\n        i_info[i]['ami'] = j['Instances'][0]['ImageId']\n        i_info[i]['ssh_key'] = j['Instances'][0]['KeyName']\n        i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName']\n        try:\n            i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags'])\n        except KeyError:\n            i_info[i]['tag'] = {'Name': ''}\n    debg.dprint('numInstances: ', len(i_info))\n    debg.dprintx('Details except AMI-name')\n    debg.dprintx(i_info, True)\n    return i_info", "docstring": "Generate dictionary of results from query.\n\nDecodes the large dict recturned from the AWS query.\n\nArgs:\nqry_results (dict): results from awsc.get_inst_info\nReturns:\ni_info (dict): information on instances and details.", "source": "codesearchnet"}
{"code": "def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens):\n    new_lm_head_bias = {}\n    for attr, weight in old_lm_head_bias.items():\n        first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight)\n        size_diff = new_num_tokens - old_num_tokens\n        final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens]\n        if tf.math.greater(size_diff, 0):\n            padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]]\n            current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1)\n            num_tokens_to_copy = min(old_num_tokens, new_num_tokens)\n            mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy]\n            bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True)\n            bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False)\n        else:\n            slice_from = [0] if first_dim is None else [0, 0]\n            current_bias = tf.slice(weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape))\n            bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True)\n        new_bias = self.add_weight(shape=final_shape, initializer='zeros', trainable=True, name=weight.name.split(':')[0])\n        init_bias = tf.where(bias_mask, current_bias, new_bias.value())\n        new_bias.assign(init_bias)\n        new_lm_head_bias[attr] = new_bias\n    return new_lm_head_bias", "docstring": "Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end.\nReducing the size will remove vectors from the end\n\nArgs:\nold_lm_head_bias (`tf.Variable`):\nOld lm head bias to be resized.\nnew_num_tokens (`int`, *optional*):\nNew number of tokens in the linear matrix.\n\nIncreasing the size will add newly initialized vectors at the end. Reducing the size will remove\nvectors from the end. If not provided or `None`, just returns None\n\nReturn:\n`tf.Variable`: Pointer to the resized bias.", "source": "github-repos"}
{"code": "def load(self, context):\n    \n    try:\n      \n      import tensorflow\n      \n      \n      from tensorflow.python.eager import profiler_client\n    except ImportError:\n      return\n    \n    from tensorboard.plugins.profile.profile_plugin import ProfilePlugin\n    return ProfilePlugin(context)", "docstring": "Returns the plugin, if possible.\n\nArgs:\ncontext: The TBContext flags.\n\nReturns:\nA ProfilePlugin instance or None if it couldn't be loaded.", "source": "juraj-google-style"}
{"code": "def __recognize_dict(self, node: yaml.Node,\n                         expected_type: Type) -> RecResult:\n        \n        logger.debug('Recognizing as a dict')\n        if not issubclass(generic_type_args(expected_type)[0], str):\n            raise RuntimeError(\n                'YAtiML only supports dicts with strings as keys')\n        if not isinstance(node, yaml.MappingNode):\n            message = '{}{}Expected a dict/mapping here'.format(\n                node.start_mark, os.linesep)\n            return [], message\n        value_type = generic_type_args(expected_type)[1]\n        for _, value in node.value:\n            recognized_value_types, message = self.recognize(value, value_type)\n            if len(recognized_value_types) == 0:\n                return [], message\n            if len(recognized_value_types) > 1:\n                return [\n                    Dict[str, t]  \n                    for t in recognized_value_types\n                ], message  \n\n        return [expected_type], ''", "docstring": "Recognize a node that we expect to be a dict of some kind.\n\nArgs:\nnode: The node to recognize.\nexpected_type: Dict[str, ...something...]\n\nReturns:\nexpected_type if it was recognized, [] otherwise.", "source": "juraj-google-style"}
{"code": "def move_added_token(self, token: str, target_idx: int):\n    assert token in self.added_tokens_encoder, 'Token which should be moved has to be an added token'\n    assert token not in self.idx2sym, 'Token which should be moved is already in vocab'\n    self.idx2sym.insert(target_idx, token)\n    self.sym2idx[token] = target_idx\n    for idx in range(target_idx + 1, len(self.idx2sym)):\n        current_sym = self.idx2sym[idx]\n        self.sym2idx[current_sym] = idx\n    old_index = self._added_tokens_encoder.pop(token)\n    self._added_tokens_decoder.pop(old_index)", "docstring": "Moves an added token to a specific position in the vocab. This method should be used when resizing an embedding\nlayer other than the last one in the `AdaptiveEmbedding` in order to move the token in the tokenizer from the\ndefault position (at the very end) to the desired one.\n\nArgs:\ntoken: The token to move to a specific position in the vocab.\ntarget_idx: The position where the token should be moved to.", "source": "github-repos"}
{"code": "def genome_name_from_fasta_path(fasta_path):\n    filename = os.path.basename(fasta_path)\n    return re.sub('(\\\\.fa$)|(\\\\.fas$)|(\\\\.fasta$)|(\\\\.fna$)|(\\\\.\\\\w{1,}$)', '', filename)", "docstring": "Extract genome name from fasta filename\n\nGet the filename without directory and remove the file extension.\n\nExample:\nWith fasta file path ``/path/to/genome_1.fasta``::\n\nfasta_path = '/path/to/genome_1.fasta'\ngenome_name = genome_name_from_fasta_path(fasta_path)\nprint(genome_name)\n# => \"genome_1\"\n\nArgs:\nfasta_path (str): fasta file path\n\nReturns:\nstr: genome name", "source": "codesearchnet"}
{"code": "def _preprocess_padding(padding):\n    if padding == 'same':\n        padding = 'SAME'\n    elif padding == 'valid':\n        padding = 'VALID'\n    else:\n        raise ValueError('Invalid padding: ' + str(padding))\n    return padding", "docstring": "Convert keras' padding to TensorFlow's padding.\n\nArgs:\npadding: string, one of 'same' , 'valid'\n\nReturns:\na string, one of 'SAME', 'VALID'.\n\nRaises:\nValueError: if invalid `padding'`", "source": "github-repos"}
{"code": "def add(self, handler):\n    self._handlers.append(handler)\n    static_paths = set((h.static_path() for h in self.handlers))\n    static_paths.discard(None)\n    if (len(static_paths) > 1):\n        raise RuntimeError(('More than one static path requested for app: %r' % list(static_paths)))\n    elif (len(static_paths) == 1):\n        self._static_path = static_paths.pop()\n    else:\n        self._static_path = None", "docstring": "Add a handler to the pipeline used to initialize new documents.\n\nArgs:\nhandler (Handler) : a handler for this Application to use to\nprocess Documents", "source": "codesearchnet"}
{"code": "def avg_pool(x, pool_size, strides, padding):\n    x = tf_np.asarray(x)\n    return tf_np.asarray(nn_ops.pool(input=x, window_shape=pool_size, pooling_type='AVG', strides=strides, padding=padding))", "docstring": "Performs an N-D average pooling.\n\nArgs:\nx: ndarray of rank N+2, of shape `[batch_size] + input_spatial_shape +\n[num_channels]`. Pooling happens over the spatial dimensions only.\npool_size: sequence of N ints.\nstrides: sequence of N ints.\npadding: a string, the padding algorithm. Must be \"SAME\" or \"VALID\".\n\nReturns:\nAn (N+2)-D array,  of shape\n[batch_size] + output_spatial_shape + [num_channels],\nwhere `output_spatial_shape` depends on the value of padding:\nIf padding = \"SAME\":\noutput_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])\nIf padding = \"VALID\":\noutput_spatial_shape[i] =\nceil((input_spatial_shape[i] - (pool_size[i] - 1)) / strides[i]).", "source": "github-repos"}
{"code": "def values(self):\n    return {n: getattr(self, n) for n in self._hparam_types.keys()}", "docstring": "Return the hyperparameter values as a Python dictionary.\n\nReturns:\nA dictionary with hyperparameter names as keys.  The values are the\nhyperparameter values.", "source": "codesearchnet"}
{"code": "def bot(self, id):\n        \n        json = self.skype.conn(\"GET\", \"{0}/agents\".format(SkypeConnection.API_BOT), params={\"agentId\": id},\n                               auth=SkypeConnection.Auth.SkypeToken).json().get(\"agentDescriptions\", [])\n        return self.merge(SkypeBotUser.fromRaw(self.skype, json[0])) if json else None", "docstring": "Retrieve a single bot.\n\nArgs:\nid (str): UUID or username of the bot\n\nReturns:\nSkypeBotUser: resulting bot user object", "source": "juraj-google-style"}
{"code": "def purview(repertoire):\n    \n    if repertoire is None:\n        return None\n\n    return tuple(i for i, dim in enumerate(repertoire.shape) if dim == 2)", "docstring": "The purview of the repertoire.\n\nArgs:\nrepertoire (np.ndarray): A repertoire\n\nReturns:\ntuple[int]: The purview that the repertoire was computed over.", "source": "juraj-google-style"}
{"code": "def eigvalsh(a, eigvec=False):\n    if (eigvec == True):\n        (val, vec) = eigh(a, eigvec=True)\n        return (val, gvar.mean(vec))\n    else:\n        return eigh(a, eigvec=False)", "docstring": "Eigenvalues of Hermitian matrix ``a``.\n\nArgs:\na: Two-dimensional, square Hermitian matrix/array of numbers\nand/or :class:`gvar.GVar`\\s. Array elements must be\nreal-valued if `gvar.GVar`\\s are involved (i.e., symmetric\nmatrix).\neigvec (bool): If ``True``, method returns a tuple of arrays\n``(val, vec)`` where ``val[i]`` are the\neigenvalues of ``a``, and ``vec[:, i]`` are the mean\nvalues of the corresponding eigenvectors. Only ``val`` is\nreturned if ``eigvec=False`` (default).\n\nReturns:\nArray ``val`` of eigenvalues of matrix ``a`` if parameter\n``eigvec==False`` (default); otherwise a tuple of\narrays ``(val, vec)`` where ``val[i]`` are the eigenvalues\n(in ascending order) and ``vec[:, i]`` are the mean values\nof the corresponding eigenvectors.\n\nRaises:\nValueError: If matrix is not square and two-dimensional.", "source": "codesearchnet"}
{"code": "def get_status(self):\n    return (self._initialized, self._error_message)", "docstring": "Get status of `_Reqs` initialization.\n\nReturns:\nTuple\n(Boolean indicating initialization status,\nList of error messages, if any)", "source": "github-repos"}
{"code": "def OpenFileSystem(cls, path_spec_object, resolver_context=None):\n    if (not isinstance(path_spec_object, path_spec.PathSpec)):\n        raise TypeError('Unsupported path specification type.')\n    if (resolver_context is None):\n        resolver_context = cls._resolver_context\n    if (path_spec_object.type_indicator == definitions.TYPE_INDICATOR_MOUNT):\n        if path_spec_object.HasParent():\n            raise errors.PathSpecError('Unsupported mount path specification with parent.')\n        mount_point = getattr(path_spec_object, 'identifier', None)\n        if (not mount_point):\n            raise errors.PathSpecError('Unsupported path specification without mount point identifier.')\n        path_spec_object = mount_manager.MountPointManager.GetMountPoint(mount_point)\n        if (not path_spec_object):\n            raise errors.MountPointError('No such mount point: {0:s}'.format(mount_point))\n    file_system = resolver_context.GetFileSystem(path_spec_object)\n    if (not file_system):\n        resolver_helper = cls._GetResolverHelper(path_spec_object.type_indicator)\n        file_system = resolver_helper.NewFileSystem(resolver_context)\n    try:\n        file_system.Open(path_spec_object)\n    except (IOError, ValueError) as exception:\n        raise errors.BackEndError('Unable to open file system with error: {0!s}'.format(exception))\n    return file_system", "docstring": "Opens a file system object defined by path specification.\n\nArgs:\npath_spec_object (PathSpec): path specification.\nresolver_context (Optional[Context]): resolver context, where None\nrepresents the built in context which is not multi process safe.\n\nReturns:\nFileSystem: file system or None if the path specification could not\nbe resolved or has no file system object.\n\nRaises:\nAccessError: if the access to open the file system was denied.\nBackEndError: if the file system cannot be opened.\nMountPointError: if the mount point specified in the path specification\ndoes not exist.\nPathSpecError: if the path specification is incorrect.\nTypeError: if the path specification type is unsupported.", "source": "codesearchnet"}
{"code": "def __init__(self,\n               g=None,\n               default_device=None,\n               global_step=None):  \n    \n    if g is None:\n      self._g = tf.get_default_graph()\n    else:\n      self._g = g\n    self._train_op = None\n    \n    self._summary_tags = set()\n    if global_step and global_step.dtype.base_dtype not in (tf.int32, tf.int64):\n      raise ValueError('Global step must be an int32 or int64 variable: %s' %\n                       global_step.dtype)\n    self._global_step = global_step\n\n    if default_device:\n      \n      self.g._device_function_stack.append(default_device)\n\n    self._recurrent_state = None\n    self.reset_summary_collections()", "docstring": "Creates a Bookkeeper.\n\nArgs:\ng: A graph, if not specified then the default graph is used.\ndefault_device: A default device or function.\nglobal_step: A variable to use as a global step.\nRaises:\nValueError: If global_step is not an integer variable.", "source": "juraj-google-style"}
{"code": "def cumsum(x, axis=None, dtype=None):\n    return Cumsum(axis=axis, dtype=dtype)(x)", "docstring": "Returns the cumulative sum of elements along a given axis.\n\nArgs:\nx: Input tensor.\naxis: Axis along which the cumulative sum is computed.\nBy default the input is flattened.\ndtype: dtype of returned tensor. Defaults to x.dtype.\n\nReturns:\nOutput tensor.", "source": "github-repos"}
{"code": "def MICECache(subsystem, parent_cache=None):\n    if config.REDIS_CACHE:\n        cls = RedisMICECache\n    else:\n        cls = DictMICECache\n    return cls(subsystem, parent_cache=parent_cache)", "docstring": "Construct a |MICE| cache.\n\nUses either a Redis-backed cache or a local dict cache on the object.\n\nArgs:\nsubsystem (Subsystem): The subsystem that this is a cache for.\n\nKwargs:\nparent_cache (MICECache): The cache generated by the uncut\nversion of ``subsystem``. Any cached |MICE| which are\nunaffected by the cut are reused in this cache. If None,\nthe cache is initialized empty.", "source": "codesearchnet"}
{"code": "def download_aspera(self, user, host, silent=False):\n        \n        aspera_home = os.environ.get(\"ASPERA_HOME\", None)\n        if not aspera_home:\n            raise ValueError(\"environment variable $ASPERA_HOME not set\")\n        if not os.path.exists(aspera_home):\n            raise ValueError(\n                \"$ASPERA_HOME directory {} does not exist\".format(aspera_home))\n        ascp = os.path.join(aspera_home, \"connect/bin/ascp\")\n        key = os.path.join(aspera_home, \"connect/etc/asperaweb_id_dsa.openssh\")\n        if not os.path.exists(ascp):\n            raise ValueError(\"could not find ascp binary\")\n        if not os.path.exists(key):\n            raise ValueError(\"could not find openssh key\")\n\n        parsed_url = urlparse(self.url)\n\n        cmd = \"{} -i {} -k1 -T -l400m {}@{}:{} {}\".format(\n            ascp, key, user, host, parsed_url.path, self._temp_file_name)\n        logger.debug(cmd)\n        try:\n            pr = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)\n            stdout, stderr = pr.communicate()\n            if not silent:\n                logger.debug(\"Aspera stdout: \" + str(stdout))\n                logger.debug(\"Aspera stderr: \" + str(stderr))\n            if pr.returncode == 0:\n                logger.debug(\"Moving %s to %s\" % (\n                    self._temp_file_name,\n                    self.destination))\n                shutil.move(self._temp_file_name, self.destination)\n                logger.debug(\"Successfully downloaded %s\" % self.url)\n            else:\n                logger.error(\n                    \"Failed to download %s using Aspera Connect\" % self.url)\n        finally:\n            try:\n                os.remove(self._temp_file_name)\n            except OSError:\n                pass", "docstring": "Download file with Aspera Connect.\n\nFor details see the documentation ov Aspera Connect\n\nArgs:\nuser (:obj:`str`): FTP user.\nhost (:obj:`str`): FTP host. Defaults to \"ftp-trace.ncbi.nlm.nih.gov\".", "source": "juraj-google-style"}
{"code": "def build_worker_instruction(*args):\n    tuple_class = collections.namedtuple(*args)\n    tuple_class.__str__ = worker_object_to_string\n    tuple_class.__repr__ = worker_object_to_string\n    return tuple_class", "docstring": "Create an object representing a ParallelInstruction protobuf.\n\nThis will be a collections.namedtuple with a custom __str__ method.\n\nAlas, this wrapper is not known to pylint, which thinks it creates\nconstants.  You may have to put a disable=invalid-name pylint\nannotation on any use of this, depending on your names.\n\nArgs:\n*args: first argument is the name of the type to create.  Should\nstart with \"Worker\".  Second arguments is alist of the\nattributes of this object.\nReturns:\nA new class, a subclass of tuple, that represents the protobuf.", "source": "github-repos"}
{"code": "def submit_file_content(self, method, url, data, headers, params, halt_on_error=True):\n    r = None\n    try:\n        r = self.tcex.session.request(method, url, data=data, headers=headers, params=params)\n    except Exception as e:\n        self.tcex.handle_error(580, [e], halt_on_error)\n    return r", "docstring": "Submit File Content for Documents and Reports to ThreatConnect API.\n\nArgs:\nmethod (str): The HTTP method for the request (POST, PUT).\nurl (str): The URL for the request.\ndata (str;bytes;file): The body (data) for the request.\nheaders (dict): The headers for the request.\nparams (dict): The query string parameters for the request.\nhalt_on_error (bool, default:True): If True any exception will raise an error.\n\nReturns:\nrequests.models.Response: The response from the request.", "source": "codesearchnet"}
{"code": "def __init__(self, maximum_number_of_queued_items=0, timeout=None):\n    \n    super(MultiProcessingQueue, self).__init__()\n    self._timeout = timeout\n\n    \n    \n\n    \n    \n    \n    \n    queue_max_length = _multiprocessing.SemLock.SEM_VALUE_MAX\n    \n\n    if maximum_number_of_queued_items > queue_max_length:\n      logger.warning((\n          'Requested maximum queue size: {0:d} is larger than the maximum '\n          'size supported by the system. Defaulting to: {1:d}').format(\n              maximum_number_of_queued_items, queue_max_length))\n      maximum_number_of_queued_items = queue_max_length\n\n    \n    self._queue = multiprocessing.Queue(maxsize=maximum_number_of_queued_items)", "docstring": "Initializes a multi-processing queue.\n\nArgs:\nmaximum_number_of_queued_items (Optional[int]): maximum number of queued\nitems, where 0 represents no limit.\ntimeout (Optional[float]): number of seconds for the get to time out,\nwhere None will block until a new item is put onto the queue.", "source": "juraj-google-style"}
{"code": "def get_primitives_paths():\n    primitives_paths = list()\n    entry_points = pkg_resources.iter_entry_points('mlprimitives')\n    for entry_point in entry_points:\n        if (entry_point.name == 'jsons_path'):\n            path = entry_point.load()\n            primitives_paths.append(path)\n    return (_PRIMITIVES_PATHS + primitives_paths)", "docstring": "Get the list of folders where the primitives will be looked for.\n\nThis list will include the value of any `entry_point` named `jsons_path` published under\nthe name `mlprimitives`.\n\nAn example of such an entry point would be::\n\nentry_points = {\n'mlprimitives': [\n'jsons_path=some_module:SOME_VARIABLE'\n]\n}\n\nwhere the module `some_module` contains a variable such as::\n\nSOME_VARIABLE = os.path.join(os.path.dirname(__file__), 'jsons')\n\nReturns:\nlist:\nThe list of folders.", "source": "codesearchnet"}
{"code": "def __ge__(self, other):\n    \n    if not isinstance(other, interface.DateTimeValues):\n      raise ValueError('Other not an instance of DateTimeValues')\n\n    if not isinstance(other, SemanticTime):\n      return False\n\n    return self._SORT_ORDER >= other._SORT_ORDER", "docstring": "Determines if the date time values are greater than or equal to other.\n\nArgs:\nother (DateTimeValues): date time values to compare against.\n\nReturns:\nbool: True if the date time values are greater than or equal to other.\n\nRaises:\nValueError: if other is not an instance of DateTimeValues.", "source": "juraj-google-style"}
{"code": "def _start_process(self, classpath):\n    cache_dir = self.config['cache-dir']\n    java_flags = self.config['java-flags']\n    iswindows = (os.name == 'nt')\n    Util.mkdir_p(cache_dir)\n    log_path = os.path.join(cache_dir, 'server.log')\n    log = open(log_path, 'w')\n    null = open(os.devnull, 'r')\n    java = os.path.join(self.config['java-home'], 'bin', ('java.exe' if iswindows else 'java'))\n    if (not os.path.exists(java)):\n        raise InvalidJavaPathError(errno.ENOENT, 'No such file or directory', java)\n    elif (not os.access(java, os.X_OK)):\n        raise InvalidJavaPathError(errno.EACCES, 'Permission denied', java)\n    args = (([java, '-cp', (';' if iswindows else ':').join(classpath)] + [a for a in java_flags if a]) + ['-Densime.config={}'.format(self.config.filepath), 'org.ensime.server.Server'])\n    process = subprocess.Popen(args, stdin=null, stdout=log, stderr=subprocess.STDOUT)\n    pid_path = os.path.join(cache_dir, 'server.pid')\n    Util.write_file(pid_path, str(process.pid))\n\n    def on_stop():\n        log.close()\n        null.close()\n        with catch(Exception):\n            os.remove(pid_path)\n    return EnsimeProcess(cache_dir, process, log_path, on_stop)", "docstring": "Given a classpath prepared for running ENSIME, spawns a server process\nin a way that is otherwise agnostic to how the strategy installs ENSIME.\n\nArgs:\nclasspath (list of str): list of paths to jars or directories\n(Within this function the list is joined with a system dependent\npath separator to create a single string argument suitable to\npass to ``java -cp`` as a classpath)\n\nReturns:\nEnsimeProcess: A process handle for the launched server.", "source": "codesearchnet"}
{"code": "def stage_job_resources(self, resources: List[Tuple[str, str, str]], staging_location: Optional[str]=None):\n    if staging_location is None:\n        raise RuntimeError('The staging_location must be specified.')\n    staged_resources = []\n    for file_path, staged_path, sha256 in resources:\n        self.stage_artifact(file_path, FileSystems.join(staging_location, staged_path), sha256)\n        staged_resources.append(staged_path)\n    return staged_resources", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\nStages job resources to staging_location.\n\nArgs:\nresources: A list of tuples of local file paths and file names (no\npaths) to be used for staging resources.\nstaging_location: Location to stage the file.\n\nReturns:\nA list of file names (no paths) for the resources staged. All the\nfiles are assumed to be staged at staging_location.\n\nRaises:\nRuntimeError: If files specified are not found or error encountered\nwhile trying to create the resources (e.g., build a setup package).", "source": "github-repos"}
{"code": "def write(self, data, echo=None):\n    if (echo or ((echo is None) and self.echo)):\n        sys.stdout.write(data.decode('latin1'))\n        sys.stdout.flush()\n    self.channel.write(data)", "docstring": "Write data to channel.\n\nArgs:\ndata(bytes): The data to write to the channel.\necho(bool): Whether to echo the written data to stdout.\n\nRaises:\nEOFError: If the channel was closed before all data was sent.", "source": "codesearchnet"}
{"code": "def _slice_params_to_dict(dist, params_event_ndims, slices):\n    override_dict = {}\n    for (param_name, param_event_ndims) in six.iteritems(params_event_ndims):\n        if (param_name not in dist.parameters):\n            raise ValueError('Distribution {} is missing advertised parameter {}'.format(dist, param_name))\n        param = dist.parameters[param_name]\n        if (param is None):\n            continue\n        dtype = None\n        if hasattr(dist, param_name):\n            attr = getattr(dist, param_name)\n            dtype = getattr(attr, 'dtype', None)\n        if (dtype is None):\n            dtype = dist.dtype\n            warnings.warn('Unable to find property getter for parameter Tensor {} on {}, falling back to Distribution.dtype {}'.format(param_name, dist, dtype))\n        param = tf.convert_to_tensor(value=param, dtype=dtype)\n        override_dict[param_name] = _slice_single_param(param, param_event_ndims, slices, dist.batch_shape_tensor())\n    return override_dict", "docstring": "Computes the override dictionary of sliced parameters.\n\nArgs:\ndist: The tfd.Distribution being batch-sliced.\nparams_event_ndims: Per-event parameter ranks, a `str->int` `dict`.\nslices: Slices as received by __getitem__.\n\nReturns:\noverrides: `str->Tensor` `dict` of batch-sliced parameter overrides.", "source": "codesearchnet"}
{"code": "def test_encode(self, base_id: str, expected_context_element: str, expected_sql_expression: str, expected_fhir_path_expression: str, expected_fields_referenced_by_expression: List[str]):\n    error_reporter = fhir_errors.ListErrorReporter()\n    all_resources = list(self.resources.values())\n    encoder = fhir_path_validator.FhirProfileStandardSqlEncoder(unittest.mock.Mock(iter_structure_definitions=lambda: all_resources), primitive_handler.PrimitiveHandler(), error_reporter, options=fhir_path_validator.SqlGenerationOptions(verbose_error_reporting=True))\n    resource = self.resources[f'http:\n    actual_bindings = encoder.encode(resource)\n    self.assertEmpty(error_reporter.warnings)\n    self.assertEmpty(error_reporter.errors)\n    self.assertEqual(actual_bindings[0].element_path, expected_context_element)\n    self.assertEqual(actual_bindings[0].fhir_path_expression, expected_fhir_path_expression)\n    self.assertEqual(actual_bindings[0].fields_referenced_by_expression, expected_fields_referenced_by_expression)\n    self.assertEqual(actual_bindings[0].sql_expression, expected_sql_expression)", "docstring": "Ensures we build the expected constraints to validate a structure definition.\n\nGiven the `base_id` to a structure definition, ensure we generate the\nexpected constraints to validate that structure definition.\n\nArgs:\nbase_id: The structure definition to use in the test.\nexpected_context_element: The expected element_path for the resulting\nconstraint.\nexpected_sql_expression: The expected SQL expression for the resulting\nconstraint.\nexpected_fhir_path_expression: The expected FHIRPath expression for the\nresulting constraint.\nexpected_fields_referenced_by_expression: The expected\nfields_referenced_by_expression for the resulting constraint.", "source": "github-repos"}
{"code": "def append(self, item):\n        \n        if isinstance(item, list):\n            self.extend(item)\n        elif not self:\n            list.append(self, item)\n        elif item.__class__ == self[0].__class__:\n            list.append(self, item)\n        else:\n            raise exceptions.WrongListItemType(item.__class__.__name__,\n                                               self[0].__class__.__name__)", "docstring": "Append one item to the list.\n\nArgs:\nitem: Item to be appended.\n\nRaises:\n:exc:`~.exceptions.WrongListItemType`: If an item has a different\ntype than the first item to be stored.", "source": "juraj-google-style"}
{"code": "def set_value(x, value):\n    value = numpy_compat.np_asarray(value, dtype=dtype_numpy(x))\n    if ops.executing_eagerly_outside_functions():\n        x.assign(value)\n    else:\n        with get_graph().as_default():\n            tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])\n            if hasattr(x, '_assign_placeholder'):\n                assign_placeholder = x._assign_placeholder\n                assign_op = x._assign_op\n            else:\n                placeholder_shape = tensor_shape.TensorShape([None] * value.ndim)\n                assign_placeholder = array_ops.placeholder(tf_dtype, shape=placeholder_shape)\n                assign_op = x.assign(assign_placeholder)\n                x._assign_placeholder = assign_placeholder\n                x._assign_op = assign_op\n            get_session().run(assign_op, feed_dict={assign_placeholder: value})", "docstring": "Sets the value of a variable, from a Numpy array.\n\n`backend.set_value` is the complement of `backend.get_value`, and provides\na generic interface for assigning to variables while abstracting away the\ndifferences between TensorFlow 1.x and 2.x semantics.\n\n{snippet}\n\nArgs:\nx: Variable to set to a new value.\nvalue: Value to set the tensor to, as a Numpy array\n(of the same shape).", "source": "github-repos"}
{"code": "def __call__(self, stats):\n        \n        if 'elapsed_time' not in stats:\n            stats['elapsed_time'] = _get_time() - self._start_at\n        self._log.append(stats)\n\n        with tempdir(prefix=self._log_name, dir=self._out_path) as tempd:\n            path = os.path.join(tempd, 'log.json')\n            with open(path, 'w') as f:\n                json.dump(self._log, f, indent=4)\n\n            new_path = os.path.join(self._out_path, self._log_name)\n            shutil.move(path, new_path)", "docstring": "Add training log.\n\nArgs:\nstats (dict): Training log values. The object must be key-value\nstyle and values type must be `float` or `int`. When the object\ndoes not have 'elapsed_time' key, the function set the time\nautomatically. The measurement starts when create new instance.", "source": "juraj-google-style"}
{"code": "def load_pyfile(self, path):\n        \n        with open(path) as config_file:\n            contents = config_file.read()\n            try:\n                exec(compile(contents, path, 'exec'), self)\n            except Exception as e:\n                raise MalformedConfig(path, six.text_type(e))", "docstring": "Load python file as config.\n\nArgs:\npath (string): path to the python file", "source": "juraj-google-style"}
{"code": "def swo_set_emu_buffer_size(self, buf_size):\n    buf = ctypes.c_uint32(buf_size)\n    res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.SET_BUFFERSIZE_EMU, ctypes.byref(buf))\n    if (res < 0):\n        raise errors.JLinkException(res)\n    return None", "docstring": "Sets the size of the buffer used by the J-Link to collect SWO data.\n\nArgs:\nself (JLink): the ``JLink`` instance\nbuf_size (int): the new size of the emulator buffer\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error", "source": "codesearchnet"}
{"code": "def __init__(self, y_tensor=None):\n    self._uuid = uuid.uuid4().hex\n    _gradient_debuggers[self._uuid] = self\n    self._gradient_tensors = {}\n    self._y_tensor = y_tensor\n    self._graph = None\n    if y_tensor:\n        self._graph = y_tensor.graph\n    self._is_active_context = False", "docstring": "Constructor of GradientsDebugger.\n\nArgs:\ny_tensor: optional: the `tf.Tensor` to be differentiated, i.e., the tensor\non the numerator of the differentiation.", "source": "github-repos"}
{"code": "def _resize_image_if_necessary(image_fobj, target_pixels=None):\n  \n  if target_pixels is None:\n    return image_fobj\n\n  cv2 = tfds.core.lazy_imports.cv2\n  \n  image = cv2.imdecode(\n      np.fromstring(image_fobj.read(), dtype=np.uint8), flags=3)\n  \n  height, width, _ = image.shape\n  actual_pixels = height * width\n  if actual_pixels > target_pixels:\n    factor = np.sqrt(target_pixels / actual_pixels)\n    image = cv2.resize(image, dsize=None, fx=factor, fy=factor)\n  \n  _, buff = cv2.imencode(\".jpg\", image, [int(cv2.IMWRITE_JPEG_QUALITY), 72])\n  return io.BytesIO(buff.tostring())", "docstring": "Resize an image to have (roughly) the given number of target pixels.\n\nArgs:\nimage_fobj: File object containing the original image.\ntarget_pixels: If given, number of pixels that the image must have.\n\nReturns:\nA file object.", "source": "juraj-google-style"}
{"code": "def length_squared(x, keep_dims=False, name=None, reduction_dim=None):\n    with tf.name_scope(name, 'length_squared', [x]) as scope:\n        x = tf.convert_to_tensor(x, name='x')\n        if (not reduction_dim):\n            reduction_dim = _last_index(x, 1)\n        return tf.reduce_sum(tf.square(x), reduction_dim, keep_dims=keep_dims, name=scope)", "docstring": "Computes the squared length of x.\n\nArgs:\nx: A tensor.\nkeep_dims: If true, reduction does not change the rank of the input.\nname: Optional name for this op.\nreduction_dim: The dimension to reduce, by default choose the last one\nand if no shape is specified guess 1.\nReturns:\nThe squared length of x.", "source": "codesearchnet"}
{"code": "def download_to_tempfile(url, file_name=None, extension=None):\n    if (not file_name):\n        file_name = generate_timestamped_string('wtf_temp_file')\n    if extension:\n        file_path = temp_path((file_name + extension))\n    else:\n        ext = ''\n        try:\n            ext = re.search(u'\\\\.\\\\w+$', file_name).group(0)\n        except:\n            pass\n        file_path = temp_path((file_name + ext))\n    webFile = urllib.urlopen(url)\n    localFile = open(file_path, 'w')\n    localFile.write(webFile.read())\n    webFile.close()\n    localFile.close()\n    return file_path", "docstring": "Downloads a URL contents to a tempfile.  This is useful for testing downloads.\nIt will download the contents of a URL to a tempfile, which you then can\nopen and use to validate the downloaded contents.\n\nArgs:\nurl (str) : URL of the contents to download.\n\nKwargs:\nfile_name (str): Name of file.\nextension (str): Extension to use.\n\nReturn:\nstr - Returns path to the temp file.", "source": "codesearchnet"}
{"code": "def _list(self, dir_or_prefix):\n    try:\n        for path, (size, updated) in self._gcsIO().list_files(dir_or_prefix, with_metadata=True):\n            yield FileMetadata(path, size, updated)\n    except Exception as e:\n        raise BeamIOError('List operation failed', {dir_or_prefix: e})", "docstring": "List files in a location.\n\nListing is non-recursive, for filesystems that support directories.\n\nArgs:\ndir_or_prefix: (string) A directory or location prefix (for filesystems\nthat don't have directories).\n\nReturns:\nGenerator of ``FileMetadata`` objects.\n\nRaises:\n``BeamIOError``: if listing fails, but not if no files were found.", "source": "github-repos"}
{"code": "def verify(self, obj):\n        \n\n        if not isinstance(obj, str):\n            raise ValidationError(\"Object is not a string\", reason='object is not a string',\n                                  object=obj, type=type(obj), str_type=str)\n\n        return obj", "docstring": "Verify that the object conforms to this verifier's schema\n\nArgs:\nobj (object): A python object to verify\n\nRaises:\nValidationError: If there is a problem verifying the dictionary, a\nValidationError is thrown with at least the reason key set indicating\nthe reason for the lack of validation.", "source": "juraj-google-style"}
{"code": "def add(self, arg, options=None):\n    \n    fut = tasklets.Future('%s.add(%s, %s)' % (self, arg, options))\n    todo = self._queues.get(options)\n    if todo is None:\n      utils.logging_debug('AutoBatcher(%s): creating new queue for %r',\n                          self._todo_tasklet.__name__, options)\n      if not self._queues:\n        eventloop.add_idle(self._on_idle)\n      todo = self._queues[options] = []\n    todo.append((fut, arg))\n    if len(todo) >= self._limit:\n      del self._queues[options]\n      self.run_queue(options, todo)\n    return fut", "docstring": "Adds an arg and gets back a future.\n\nArgs:\narg: one argument for _todo_tasklet.\noptions: rpc options.\n\nReturn:\nAn instance of future, representing the result of running\n_todo_tasklet without batching.", "source": "juraj-google-style"}
{"code": "def filter_publication(publication, cmp_authors=True):\n    query = None\n    isbn_query = False\n    if (publication.optionals and publication.optionals.ISBN):\n        query = aleph.ISBNQuery(publication.optionals.ISBN)\n        isbn_query = True\n    else:\n        query = aleph.TitleQuery(publication.title)\n    result = aleph.reactToAMQPMessage(aleph.SearchRequest(query), '')\n    if (not result.records):\n        return publication\n    if isbn_query:\n        for record in result.records:\n            epub = record.epublication\n            if (compare_names(epub.nazev, publication.title) >= 80):\n                return None\n        return publication\n    for record in result.records:\n        epub = record.epublication\n        if (not (compare_names(epub.nazev, publication.title) >= 80)):\n            continue\n        if (not cmp_authors):\n            return None\n        for author in epub.autori:\n            author_str = ('%s %s %s' % (author.firstName, author.lastName, author.title))\n            pub_authors = map((lambda x: x.name), publication.authors)\n            if (type(pub_authors) not in [list, tuple, set]):\n                pub_authors = [pub_authors]\n            for pub_author in pub_authors:\n                if (compare_names(author_str, pub_author) >= 50):\n                    return None\n    return publication", "docstring": "Filter publications based at data from Aleph.\n\nArgs:\npublication (obj): :class:`.Publication` instance.\n\nReturns:\nobj/None: None if the publication was found in Aleph or `publication` \\\nif not.", "source": "codesearchnet"}
{"code": "def indicator_body(indicators):\n    hash_patterns = {'md5': re.compile('^([a-fA-F\\\\d]{32})$'), 'sha1': re.compile('^([a-fA-F\\\\d]{40})$'), 'sha256': re.compile('^([a-fA-F\\\\d]{64})$')}\n    body = {}\n    for indicator in indicators:\n        if (indicator is None):\n            continue\n        if hash_patterns['md5'].match(indicator):\n            body['md5'] = indicator\n        elif hash_patterns['sha1'].match(indicator):\n            body['sha1'] = indicator\n        elif hash_patterns['sha256'].match(indicator):\n            body['sha256'] = indicator\n    return body", "docstring": "Generate the appropriate dictionary content for POST of an File indicator\n\nArgs:\nindicators (list): A list of one or more hash value(s).", "source": "codesearchnet"}
{"code": "def horizontal_infrared_radiation_intensity(self, value=9999.0):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `horizontal_infrared_radiation_intensity`'.format(value))\n            if value < 0.0:\n                raise ValueError(\n                    'value need to be greater or equal 0.0 '\n                    'for field `horizontal_infrared_radiation_intensity`')\n\n        self._horizontal_infrared_radiation_intensity = value", "docstring": "Corresponds to IDD Field `horizontal_infrared_radiation_intensity`\n\nArgs:\nvalue (float): value for IDD Field `horizontal_infrared_radiation_intensity`\nUnit: Wh/m2\nvalue >= 0.0\nMissing value: 9999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def noisy_wrap(__func: Callable) -> Callable:\n\n    def wrapper(*args, **kwargs):\n        DebugPrint.enable()\n        try:\n            __func(*args, **kwargs)\n        finally:\n            DebugPrint.disable()\n    return wrapper", "docstring": "Decorator to enable DebugPrint for a given function.\n\nArgs:\n__func: Function to wrap\nReturns:\nWrapped function", "source": "codesearchnet"}
{"code": "def _get_type_name(type_):\n    \n    \n    name = repr(type_)\n    if name.startswith(\"<\"):\n        name = getattr(type_, \"__qualname__\", getattr(type_, \"__name__\", \"\"))\n    return name.rsplit(\".\", 1)[-1] or repr(type_)", "docstring": "Return a displayable name for the type.\n\nArgs:\ntype_: A class object.\n\nReturns:\nA string value describing the class name that can be used in a natural\nlanguage sentence.", "source": "juraj-google-style"}
{"code": "def sam2rnf(args):\n    rnftools.mishmash.Source.recode_sam_reads(sam_fn=args.sam_fn, fastq_rnf_fo=args.fq_fo, fai_fo=args.fai_fo, genome_id=args.genome_id, number_of_read_tuples=(10 ** 9), simulator_name=args.simulator_name, allow_unmapped=args.allow_unmapped)", "docstring": "Convert SAM to RNF-based FASTQ with respect to argparse parameters.\n\nArgs:\nargs (...): Arguments parsed by argparse", "source": "codesearchnet"}
{"code": "def _get_context():\n    try:\n        import google.colab\n        import IPython\n    except ImportError:\n        pass\n    else:\n        if (IPython.get_ipython() is not None):\n            return _CONTEXT_COLAB\n    try:\n        import IPython\n    except ImportError:\n        pass\n    else:\n        ipython = IPython.get_ipython()\n        if ((ipython is not None) and ipython.has_trait('kernel')):\n            return _CONTEXT_IPYTHON\n    return _CONTEXT_NONE", "docstring": "Determine the most specific context that we're in.\n\nReturns:\n_CONTEXT_COLAB: If in Colab with an IPython notebook context.\n_CONTEXT_IPYTHON: If not in Colab, but we are in an IPython notebook\ncontext (e.g., from running `jupyter notebook` at the command\nline).\n_CONTEXT_NONE: Otherwise (e.g., by running a Python script at the\ncommand-line or using the `ipython` interactive shell).", "source": "codesearchnet"}
{"code": "def __init__(self, max_age, client_chunksize=25, **kwargs):\n    \n    super(IterateAllClients, self).__init__(**kwargs)\n    self.client_chunksize = client_chunksize\n    self.max_age = max_age", "docstring": "Iterate over all clients in a threadpool.\n\nArgs:\nmax_age: Maximum age in seconds of clients to check.\nclient_chunksize: A function to call with each client urn.\n**kwargs: Arguments passed to init.", "source": "juraj-google-style"}
{"code": "def plot_accuracy(data, output_dir_path='.', output_filename='accuracy.png',\n                  width=10, height=8):\n    \n    output_path = os.path.join(output_dir_path, output_filename)\n\n    max_val_data = get_epoch_max_val_acc(data)\n    max_val_label = round(max_val_data['acc'].values[0], 4)\n\n    \n    max_epoch_data = data[data['epoch'] == data['epoch'].max()]\n\n    plot = ggplot(data, aes('epoch', 'acc', color='factor(data)')) + \\\n        geom_line(size=1, show_legend=False) + \\\n        geom_vline(aes(xintercept='epoch', color='data'),\n                   data=max_val_data, alpha=0.5, show_legend=False) + \\\n        geom_label(aes('epoch', 'acc'), data=max_val_data,\n                   label=max_val_label, nudge_y=-0.02, va='top', label_size=0,\n                   show_legend=False) + \\\n        geom_text(aes('epoch', 'acc', label='data'), data=max_epoch_data,\n                  nudge_x=2, ha='center', show_legend=False) + \\\n        geom_point(aes('epoch', 'acc'), data=max_val_data,\n                   show_legend=False) + \\\n        labs(y='Accuracy', x='Epochs') + \\\n        theme_bw(base_family='Arial', base_size=15) + \\\n        scale_color_manual(['\n\n    plot.save(output_path, width=width, height=height)", "docstring": "Plot accuracy.\nArgs:\ndata: Panda dataframe in *the* format.", "source": "juraj-google-style"}
{"code": "def on_message(self, fragment):\n        \n\n        \n        \n        \n\n        try:\n            message = yield self._receive(fragment)\n        except Exception as e:\n            \n            \n            log.error(\"Unhandled exception receiving a message: %r: %r\", e, fragment, exc_info=True)\n            self._internal_error(\"server failed to parse a message\")\n\n        try:\n            if message:\n                if _message_test_port is not None:\n                    _message_test_port.received.append(message)\n                work = yield self._handle(message)\n                if work:\n                    yield self._schedule(work)\n        except Exception as e:\n            log.error(\"Handler or its work threw an exception: %r: %r\", e, message, exc_info=True)\n            self._internal_error(\"server failed to handle a message\")\n\n        raise gen.Return(None)", "docstring": "Process an individual wire protocol fragment.\n\nThe websocket RFC specifies opcodes for distinguishing text frames\nfrom binary frames. Tornado passes us either a text or binary string\ndepending on that opcode, we have to look at the type of the fragment\nto see what we got.\n\nArgs:\nfragment (unicode or bytes) : wire fragment to process", "source": "juraj-google-style"}
{"code": "def union(self, other):\n        \n        if not hasattr(other, \"__iter__\"):\n            other = [other]\n        bounds = self.bounds[:]\n        for range in other:\n            bounds += range.bounds\n\n        bounds = self._union(bounds)\n        range = VersionRange(None)\n        range.bounds = bounds\n        return range", "docstring": "OR together version ranges.\n\nCalculates the union of this range with one or more other ranges.\n\nArgs:\nother: VersionRange object (or list of) to OR with.\n\nReturns:\nNew VersionRange object representing the union.", "source": "juraj-google-style"}
{"code": "def write8(self, offset, value):\n    if (not isinstance(offset, (int, long))):\n        raise TypeError('Invalid offset type, should be integer.')\n    if (not isinstance(value, (int, long))):\n        raise TypeError('Invalid value type, should be integer.')\n    if ((value < 0) or (value > 255)):\n        raise ValueError('Value out of bounds.')\n    offset = self._adjust_offset(offset)\n    self._validate_offset(offset, 1)\n    self.mapping[offset:(offset + 1)] = struct.pack('B', value)", "docstring": "Write 8-bits to the specified `offset` in bytes, relative to the\nbase physical address of the MMIO region.\n\nArgs:\noffset (int, long): offset from base physical address, in bytes.\nvalue (int, long): 8-bit value to write.\n\nRaises:\nTypeError: if `offset` or `value` type are invalid.\nValueError: if `offset` or `value` are out of bounds.", "source": "codesearchnet"}
{"code": "def file_create(filename, settings):\n        \n        if len(settings) != 1:\n            raise ValueError(\"Settings must only contain one item with key \"\n                             \"'content'.\")\n        for k, v in settings.items():\n            if k == \"content\":\n                with open(filename, 'w') as f:\n                    f.write(v)", "docstring": "Creates a file.\n\nArgs:\nfilename (str): Filename.\nsettings (dict): Must be {\"content\": actual_content}", "source": "juraj-google-style"}
{"code": "def merge(cls, schema_list: Sequence['Schema'], name: Optional[str]=None, description: Optional[str]=None) -> 'Schema':\n    fields = {}\n    kw_field = None\n    for schema in schema_list:\n        for key, field in schema.fields.items():\n            if key.is_const:\n                if key not in fields or (field.origin is not None and fields[key].origin is not None and issubclass(field.origin, fields[key].origin)):\n                    fields[key] = field\n            elif kw_field is None:\n                kw_field = field\n    if kw_field is not None:\n        fields[kw_field.key] = kw_field\n    return Schema(list(fields.values()), name=name, description=description, allow_nonconst_keys=True)", "docstring": "Merge multiple schemas into one.\n\nFor fields shared by multiple schemas, the first appeared onces will be\nused in the merged schema.\n\nArgs:\nschema_list: A list of schemas to merge.\nname: (Optional) name of the merged schema.\ndescription: (Optinoal) description of the schema.\n\nReturns:\nThe merged schema.", "source": "github-repos"}
{"code": "def run_independently(self, op):\n    self._independent_ops.append(op)\n    op._set_attr('_independent_side_effects', attr_value_pb2.AttrValue(b=True))", "docstring": "Marks the given op as independent.\n\nOverrides any other rule for the op.\n\nIndependent ops are guaranteed to execute before the return values, but\nare allowed to run in parallel with everything else. Use in programs which\ncan guarantee that an op has side effects that don't affect any other op.\n\nArgs:\nop: An operation", "source": "github-repos"}
{"code": "def set_function_defaults(self, node: 'cfg.CFGNode', defaults_var: 'cfg.Variable') -> None:\n    defaults = self._extract_defaults(defaults_var)\n    if defaults is None:\n        defaults = [self.ctx.new_unsolvable(node) for _ in self.signature.param_names]\n    defaults = dict(zip(self.signature.param_names[-len(defaults):], defaults))\n    self.signature.defaults = defaults", "docstring": "Attempts to set default arguments of a function.\n\nIf defaults_var is not an unambiguous tuple (i.e. one that can be processed\nby abstract_utils.get_atomic_python_constant), every argument is made\noptional and a warning is issued. This function emulates __defaults__.\n\nArgs:\nnode: The node where default arguments are being set. Needed if we cannot\nget a useful value from defaults_var.\ndefaults_var: a Variable with a single binding to a tuple of default\nvalues.", "source": "github-repos"}
{"code": "def getFileKeys(self):\n    files = self.getFileObjects()\n    files_list = []\n    for (key, value) in files.iteritems():\n        if value:\n            files_list.append(key)\n    return files_list", "docstring": "Retrieve a list of file keys that have been read into the database.\n\nThis is a utility method that can be used to programmatically access the GsshaPy file objects. Use these keys\nin conjunction with the dictionary returned by the getFileObjects method.\n\nReturns:\nlist: List of keys representing file objects that have been read into the database.", "source": "codesearchnet"}
{"code": "def delete_by_path(self, path):\n        \n        if not os.path.exists(path):\n            raise IOError(\"Unknown path '%s'!\" % path)\n\n        if not path.startswith(self.path):\n            raise IOError(\n                \"Path '%s' is not in the root of the storage ('%s')!\" % (\n                    path,\n                    self.path\n                )\n            )\n\n        if os.path.isfile(path):\n            os.unlink(path)\n            return self._recursive_remove_blank_dirs(path)\n\n        shutil.rmtree(path)\n        self._recursive_remove_blank_dirs(path)", "docstring": "Delete file/directory identified by `path` argument.\n\nWarning:\n`path` have to be in :attr:`path`.\n\nArgs:\npath (str): Path of the file / directory you want to remove.\n\nRaises:\nIOError: If the file / directory doesn't exists, or is not in \\\n:attr:`path`.", "source": "juraj-google-style"}
{"code": "def mean(data, n=3, **kwargs):\n    if (len(data[(- n):]) < n):\n        forecast = np.nan\n    else:\n        forecast = np.mean(data[(- n):])\n    return forecast", "docstring": "The mean forecast for the next point is the mean value of the previous ``n`` points in\nthe series.\n\nArgs:\ndata (np.array): Observed data, presumed to be ordered in time.\nn (int): period over which to calculate the mean\n\nReturns:\nfloat: a single-valued forecast for the next value in the series.", "source": "codesearchnet"}
{"code": "def Add(self, path, age=None):\n    \n    if not isinstance(path, string_types):\n      raise ValueError(\"Only strings should be added to a URN.\")\n\n    result = rdfvalue.RDFURN(self.Copy(age))\n    result.Update(path=utils.JoinPath(self._string_urn, path))\n\n    return result", "docstring": "Add a relative stem to the current value and return a new RDFURN.\n\nNote that this returns an RDFURN, not a ClientURN since the resulting object\nwould not pass validation.\n\nArgs:\npath: A string containing a relative path.\nage: The age of the object. If None set to current time.\n\nReturns:\nA new RDFURN that can be chained.\n\nRaises:\nValueError: if the path component is not a string.", "source": "juraj-google-style"}
{"code": "def import_extension_module(ext_name):\n    import importlib\n    try:\n        return importlib.import_module(('.' + ext_name), 'nnabla_ext')\n    except ImportError as e:\n        from nnabla import logger\n        logger.error('Extension `{}` does not exist.'.format(ext_name))\n        raise e", "docstring": "Import an extension module by name.\n\nThe extension modules are installed under the `nnabla_ext` package as\nnamespace packages. All extension modules provide a unified set of APIs.\n\nArgs:\next_name(str): Extension name. e.g. 'cpu', 'cuda', 'cudnn' etc.\n\nReturns: module\nAn Python module of a particular NNabla extension.\n\nExample:\n\n.. code-block:: python\n\next = import_extension_module('cudnn')\navailable_devices = ext.get_devices()\nprint(available_devices)\next.device_synchronize(available_devices[0])\next.clear_memory_cache()", "source": "codesearchnet"}
{"code": "def decode_event(abi: ABI, log_: Dict) -> Dict:\n    \n    if isinstance(log_['topics'][0], str):\n        log_['topics'][0] = decode_hex(log_['topics'][0])\n    elif isinstance(log_['topics'][0], int):\n        log_['topics'][0] = decode_hex(hex(log_['topics'][0]))\n    event_id = log_['topics'][0]\n    events = filter_by_type('event', abi)\n    topic_to_event_abi = {\n        event_abi_to_log_topic(event_abi): event_abi\n        for event_abi in events\n    }\n    event_abi = topic_to_event_abi[event_id]\n    return get_event_data(event_abi, log_)", "docstring": "Helper function to unpack event data using a provided ABI\n\nArgs:\nabi: The ABI of the contract, not the ABI of the event\nlog_: The raw event data\n\nReturns:\nThe decoded event", "source": "juraj-google-style"}
{"code": "def _serialize_quadratic_biases(quadratic, edgelist):\n    quadratic_list = [(quadratic[(u, v)] if ((u, v) in quadratic) else quadratic[(v, u)]) for (u, v) in edgelist]\n    quadratic_bytes = struct.pack(('<' + ('d' * len(quadratic))), *quadratic_list)\n    return base64.b64encode(quadratic_bytes).decode('utf-8')", "docstring": "Serializes the quadratic biases.\n\nArgs:\nquadratic (dict): a dict of the form {edge1: bias1, ...} where\neach edge is of the form (node1, node2).\nedgelist (list): a list of the form [(node1, node2), ...].\n\nReturns:\nstr: base 64 encoded string of little endian 8 byte floats,\none for each of the edges in quadratic. Ordered by edgelist.\n\nExample:\n>>> _serialize_quadratic_biases({(0, 1): -1, (1, 2): 1, (0, 2): .4},\n...                             [(0, 1), (1, 2), (0, 2)])\n'AAAAAAAA8L8AAAAAAADwP5qZmZmZmdk/'", "source": "codesearchnet"}
{"code": "def __init__(self, hooks=None, scaffold=None, master='', config=None, checkpoint_dir=None, stop_grace_period_secs=120, checkpoint_filename_with_path=None):\n    session_creator = ChiefSessionCreator(scaffold=scaffold, master=master, config=config, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path)\n    super(SingularMonitoredSession, self).__init__(session_creator, hooks, should_recover=False, stop_grace_period_secs=stop_grace_period_secs)", "docstring": "Creates a SingularMonitoredSession.\n\nArgs:\nhooks: An iterable of `SessionRunHook' objects.\nscaffold: A `Scaffold` used for gathering or building supportive ops. If\nnot specified a default one is created. It's used to finalize the graph.\nmaster: `String` representation of the TensorFlow master to use.\nconfig: `ConfigProto` proto used to configure the session.\ncheckpoint_dir: A string.  Optional path to a directory where to restore\nvariables.\nstop_grace_period_secs: Number of seconds given to threads to stop after\n`close()` has been called.\ncheckpoint_filename_with_path: A string. Optional path to a checkpoint\nfile from which to restore variables.", "source": "github-repos"}
{"code": "def server(self, value):\n        \n        self._server = value\n        self._connectionXML.set('server', value)", "docstring": "Set the connection's server property.\n\nArgs:\nvalue:  New server. String.\n\nReturns:\nNothing.", "source": "juraj-google-style"}
{"code": "def SetServerInformation(self, server, port):\n    \n    self._host = server\n    self._port = port", "docstring": "Sets the server information.\n\nArgs:\nserver (str): hostname or IP address of the database server.\nport (int): port number of the database server.", "source": "juraj-google-style"}
{"code": "def _get_longest_diag_dict(input_matrix, nonzero_idx):\n    visited = set()\n    diags = {}\n    for idx in nonzero_idx:\n        start_idx = torch.clone(idx)\n        tuple_start_idx = tuple(start_idx.tolist())\n        if tuple_start_idx in visited:\n            continue\n        visited.add(tuple_start_idx)\n        cur_diag_len = 1\n        start_idx += 1\n        while start_idx[0] < input_matrix.shape[0] and start_idx[1] < input_matrix.shape[1]:\n            tuple_start_idx = tuple(start_idx.tolist())\n            visited.add(tuple_start_idx)\n            if input_matrix[start_idx[0], start_idx[1]] == 1:\n                cur_diag_len += 1\n                start_idx += 1\n            else:\n                break\n        diags[idx] = cur_diag_len\n    return diags", "docstring": "Calculates the length of the longest diagonal sequence in a given matrix.\nArgs:\ninput_matrix (torch.Tensor): The input matrix.\nnonzero_idx (torch.Tensor): The indices of the non-zero elements in the matrix.\nReturns:\ndict: A dictionary where the keys are the indices of the non-zero elements and the values are the lengths of the longest diagonal sequences starting from those indices.", "source": "github-repos"}
{"code": "def _get_scalars_plugin(self):\n    if (scalars_metadata.PLUGIN_NAME in self._plugin_name_to_instance):\n        return self._plugin_name_to_instance[scalars_metadata.PLUGIN_NAME]\n    return None", "docstring": "Tries to get the scalars plugin.\n\nReturns:\nThe scalars plugin. Or None if it is not yet registered.", "source": "codesearchnet"}
{"code": "def get_key_pair(self, alias_name):\n    uri = ((self.URI + '/keypair/') + alias_name)\n    return self._client.get(uri)", "docstring": "Retrieves the public and private key pair associated with the specified alias name.\n\nArgs:\nalias_name: Key pair associated with the RabbitMQ\n\nReturns:\ndict: RabbitMQ certificate", "source": "codesearchnet"}
{"code": "def get_config(self, config='running-config', params=None, as_string=False):\n    if (config not in ['startup-config', 'running-config']):\n        raise TypeError('invalid config name specified')\n    command = ('show %s' % config)\n    if params:\n        command += (' %s' % params)\n    result = self.run_commands(command, 'text')\n    if as_string:\n        return str(result[0]['output']).strip()\n    return str(result[0]['output']).split('\\n')", "docstring": "Retreives the config from the node\n\nThis method will retrieve the config from the node as either a string\nor a list object.  The config to retrieve can be specified as either\nthe startup-config or the running-config.\n\nArgs:\nconfig (str): Specifies to return either the nodes startup-config\nor running-config.  The default value is the running-config\nparams (str): A string of keywords to append to the command for\nretrieving the config.\nas_string (boo): Flag that determines the response.  If True, then\nthe configuration is returned as a raw string.  If False, then\nthe configuration is returned as a list.  The default value is\nFalse\n\nReturns:\nThis method will return either a string or a list depending on the\nstates of the as_string keyword argument.\n\nRaises:\nTypeError: If the specified config is not one of either\n'running-config' or 'startup-config'", "source": "codesearchnet"}
{"code": "def call(self, hidden_states, attention_mask: tf.Tensor | None=None, encoder_hidden_states: tf.Tensor | None=None, encoder_attention_mask: tf.Tensor | None=None, layer_head_mask: tf.Tensor | None=None, cross_attn_layer_head_mask: tf.Tensor | None=None, past_key_value: Tuple[tf.Tensor] | None=None, training=False) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:\n    residual = hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n    hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, training=training)\n    hidden_states = self.dropout(hidden_states, training=training)\n    hidden_states = residual + hidden_states\n    cross_attn_present_key_value = None\n    cross_attn_weights = None\n    if encoder_hidden_states is not None:\n        residual = hidden_states\n        hidden_states = self.encoder_attn_layer_norm(hidden_states)\n        cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n        hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, training=training)\n        hidden_states = self.dropout(hidden_states, training=training)\n        hidden_states = residual + hidden_states\n        present_key_value = present_key_value + cross_attn_present_key_value\n    residual = hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = self.activation_dropout(hidden_states, training=training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = self.dropout(hidden_states, training=training)\n    hidden_states = residual + hidden_states\n    return (hidden_states, self_attn_weights, cross_attn_weights, present_key_value)", "docstring": "Args:\nhidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`tf.Tensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\nencoder_hidden_states (`tf.Tensor`):\ncross attention input to the layer of shape `(batch, seq_len, embed_dim)`\nencoder_attention_mask (`tf.Tensor`): encoder attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\nlayer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size\n`(decoder_attention_heads,)`\ncross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.\n`(decoder_attention_heads,)`\npast_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states", "source": "github-repos"}
{"code": "def delete(self, resource, timeout=-1):\n        \n        self._client.delete(resource=resource, timeout=timeout)", "docstring": "Delete all the labels for a resource.\n\nArgs:\nresource (dict): Object to delete.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.", "source": "juraj-google-style"}
{"code": "def mutual_information(state, d0, d1=None):\n    \n    if d1 is None:\n        d1 = int(len(state) / d0)\n    mi = entropy(partial_trace(state, [0], dimensions=[d0, d1]))\n    mi += entropy(partial_trace(state, [1], dimensions=[d0, d1]))\n    mi -= entropy(state)\n    return mi", "docstring": "Compute the mutual information of a bipartite state.\n\nArgs:\nstate (array_like): a bipartite state-vector or density-matrix.\nd0 (int): dimension of the first subsystem.\nd1 (int or None): dimension of the second subsystem.\n\nReturns:\nfloat: The mutual information S(rho_A) + S(rho_B) - S(rho_AB).", "source": "juraj-google-style"}
{"code": "def positional_encoding_1d(self, batch_size, sequence_length, embedding_dim, device='cpu', dtype=torch.float32):\n    position = torch.arange(0, sequence_length, dtype=dtype, device=device).unsqueeze(1)\n    index = torch.arange(0, embedding_dim, 2, dtype=dtype, device=device).unsqueeze(0)\n    div_term = torch.exp(index * (-torch.log(torch.tensor(10000.0, device=device)) / embedding_dim))\n    pos_encoding = position * div_term\n    pos_encoding = torch.cat([torch.sin(pos_encoding), torch.cos(pos_encoding)], dim=1)\n    pos_encoding = pos_encoding.unsqueeze(dim=0).repeat(batch_size, 1, 1)\n    return pos_encoding", "docstring": "Generate positional encodings\n\nArgs:\nsequence_length (int): Sequence length\nembedding_dim (int): Embedding dimension\n\nReturns:\ntorch.Tensor: Positional encodings.", "source": "github-repos"}
{"code": "def clown_strike_ioc(self, ioc):\n    r = requests.get('http:\n    self._output(r.text)", "docstring": "Performs Clown Strike lookup on an IoC.\n\nArgs:\nioc - An IoC.", "source": "codesearchnet"}
{"code": "def cos(x):\n    if any_symbolic_tensors((x,)):\n        return Cos().symbolic_call(x)\n    return backend.numpy.cos(x)", "docstring": "Cosine, element-wise.\n\nArgs:\nx: Input tensor.\n\nReturns:\nThe corresponding cosine values.", "source": "github-repos"}
{"code": "def reply_code_tuple(code: int) -> Tuple[int, int, int]:\n    \n    return code", "docstring": "Return the reply code as a tuple.\n\nArgs:\ncode: The reply code.\n\nReturns:\nEach item in the tuple is the digit.", "source": "juraj-google-style"}
{"code": "def _parse_unknown_block_line(self, instrumentation_block, line):\n    if line.startswith(_InstrumentationStructurePrefixes.STATUS):\n        return self._parse_method_block_line(self._transition_instrumentation_block(instrumentation_block, new_state=_InstrumentationBlockStates.METHOD), line)\n    elif line.startswith(_InstrumentationStructurePrefixes.RESULT) or _InstrumentationStructurePrefixes.FAILED in line:\n        return self._parse_result_block_line(self._transition_instrumentation_block(instrumentation_block, new_state=_InstrumentationBlockStates.RESULT), line)\n    else:\n        instrumentation_block.add_value(line)\n        return instrumentation_block", "docstring": "Parses a line from the instrumentation output from the UNKNOWN\nparser state.\n\nArgs:\ninstrumentation_block: _InstrumentationBlock, the current\ninstrumenation block, where the correct categorization it noti\nyet known.\nline: string, the raw instrumenation output line to be used to\ndeteremine the correct categorization.\n\nReturns:\nThe next instrumentation block to continue parsing with. Usually,\nthis is the same instrumentation block but with the state\ntransitioned appropriately.", "source": "github-repos"}
{"code": "def create(cls, session, attributes=None, relationships=None):\n        \n        resource_type = cls._resource_type()\n        resource_path = cls._resource_path()\n        url = session._build_url(resource_path)\n        json = build_request_body(resource_type, None,\n                                  attributes=attributes,\n                                  relationships=relationships)\n        process = cls._mk_one(session)\n        return session.post(url, CB.json(201, process), json=json)", "docstring": "Create a resource of the resource.\n\nThis should only be called from sub-classes\n\nArgs:\n\nsession(Session): The session to create the resource in.\n\nattributes(dict): Any attributes that are valid for the\ngiven resource type.\n\nrelationships(dict): Any relationships that are valid for the\ngiven resource type.\n\nReturns:\n\nResource: An instance of a resource.", "source": "juraj-google-style"}
{"code": "def cross_product(p1, p2, o=(0, 0)):\n    \n    v1 = vector(o, p1)\n    v2 = vector(o, p2)\n    return v1[0] * v2[1] - v1[1] * v2[0]", "docstring": "Returns cross product\nArgs:\np1, p2: point (x, y)\no: origin", "source": "juraj-google-style"}
{"code": "def interpolate(hidden_states, ratio):\n    batch_size, time_length, classes_num = hidden_states.shape\n    upsampled = hidden_states[:, :, None, :].repeat(1, 1, ratio, 1)\n    upsampled = upsampled.reshape(batch_size, time_length * ratio, classes_num)\n    return upsampled", "docstring": "Interpolate data in time domain. This is used to compensate the resolution reduction in downsampling of a CNN.\n\nArgs:\nhidden_states (`torch.FloatTensor` of shape (batch_size, time_length, classes_num)):\nInput hidden states\nratio (`int`):\nThe ratio of the length of the output to the length of the input.", "source": "github-repos"}
{"code": "def get_source_url(obj):\n    source_env_prefix = obj.context.config['source_env_prefix']\n    task = obj.task\n    log.debug('Getting source url for {} {}...'.format(obj.name, obj.task_id))\n    repo = get_repo(obj.task, source_env_prefix=source_env_prefix)\n    source = task['metadata']['source']\n    if (repo and (not verify_repo_matches_url(repo, source))):\n        raise CoTError(\"{name} {task_id}: {source_env_prefix} {repo} doesn't match source {source}!\".format(name=obj.name, task_id=obj.task_id, source_env_prefix=source_env_prefix, repo=repo, source=source))\n    log.info('{} {}: found {}'.format(obj.name, obj.task_id, source))\n    return source", "docstring": "Get the source url for a Trust object.\n\nArgs:\nobj (ChainOfTrust or LinkOfTrust): the trust object to inspect\n\nRaises:\nCoTError: if repo and source are defined and don't match\n\nReturns:\nstr: the source url.", "source": "codesearchnet"}
{"code": "def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    encoder_hidden_states = encoder_outputs[0]\n    if encoder_attention_mask is None:\n        batch_size, sequence_length = encoder_hidden_states.shape[:2]\n        encoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    batch_size, sequence_length = decoder_input_ids.shape\n    if decoder_attention_mask is None:\n        decoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    if decoder_position_ids is None:\n        if past_key_values is not None:\n            raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.')\n        decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n    inputs = {'params': params or self.params}\n    if past_key_values:\n        inputs['cache'] = past_key_values\n        mutable = ['cache']\n    else:\n        mutable = False\n\n    def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):\n        decoder_module = module._get_decoder_module()\n        return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)\n    outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)\n    if past_key_values is not None and return_dict:\n        outputs, past = outputs\n        outputs['past_key_values'] = unfreeze(past['cache'])\n        return outputs\n    elif past_key_values is not None and (not return_dict):\n        outputs, past = outputs\n        outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> import jax.numpy as jnp\n>>> from transformers import AutoTokenizer, FlaxBartForConditionalGeneration\n\n>>> model = FlaxBartForConditionalGeneration.from_pretrained(\"facebook/bart-large-cnn\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/bart-large-cnn\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, max_length=1024, return_tensors=\"jax\")\n>>> encoder_outputs = model.encode(**inputs)\n\n>>> decoder_start_token_id = model.config.decoder_start_token_id\n>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype=\"i4\") * decoder_start_token_id\n\n>>> outputs = model.decode(decoder_input_ids, encoder_outputs)\n>>> last_decoder_hidden_states = outputs.last_hidden_state\n```", "source": "github-repos"}
{"code": "def _add_data_types_and_routes_to_api(self, namespace, desc):\n        \n\n        env = self._get_or_create_env(namespace.name)\n\n        for item in desc:\n            if isinstance(item, AstTypeDef):\n                api_type = self._create_type(env, item)\n                namespace.add_data_type(api_type)\n                self._check_canonical_name_available(item, namespace.name)\n            elif isinstance(item, AstStructPatch) or isinstance(item, AstUnionPatch):\n                \n                base_name = self._get_base_name(item.name, namespace.name)\n                self._patch_data_by_canonical_name[base_name] = (item, namespace)\n            elif isinstance(item, AstRouteDef):\n                route = self._create_route(env, item)\n                namespace.add_route(route)\n                self._check_canonical_name_available(item, namespace.name, allow_duplicate=True)\n            elif isinstance(item, AstImport):\n                \n                pass\n            elif isinstance(item, AstAlias):\n                alias = self._create_alias(env, item)\n                namespace.add_alias(alias)\n                self._check_canonical_name_available(item, namespace.name)\n            elif isinstance(item, AstAnnotationDef):\n                annotation = self._create_annotation(env, item)\n                namespace.add_annotation(annotation)\n                self._check_canonical_name_available(item, namespace.name)\n            elif isinstance(item, AstAnnotationTypeDef):\n                annotation_type = self._create_annotation_type(env, item)\n                namespace.add_annotation_type(annotation_type)\n                self._check_canonical_name_available(item, namespace.name)\n            else:\n                raise AssertionError('Unknown AST node type %r' %\n                                     item.__class__.__name__)", "docstring": "From the raw output of the parser, create forward references for each\nuser-defined type (struct, union, route, and alias).\n\nArgs:\nnamespace (stone.api.Namespace): Namespace for definitions.\ndesc (List[stone.stone.parser._Element]): All AST nodes in a spec\nfile in the order they were defined. Should not include a\nnamespace declaration.", "source": "juraj-google-style"}
{"code": "def get_video_features(self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor]=None):\n    pixel_values_videos = pixel_values_videos.type(self.visual.dtype)\n    video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw)\n    return video_embeds", "docstring": "Encodes videos into continuous embeddings that can be forwarded to the language model.\n\nArgs:\npixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\nThe tensors corresponding to the input videos.\nvideo_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):\nThe temporal, height and width of feature shape of each video in LLM.", "source": "github-repos"}
{"code": "def _get_numeric_sort_key_fn(self, table_numeric_values, value):\n    if not table_numeric_values:\n        return None\n    all_values = list(table_numeric_values.values())\n    all_values.append(value)\n    try:\n        return get_numeric_sort_key_fn(all_values)\n    except ValueError:\n        return None", "docstring": "Returns the sort key function for comparing value to table values. The function returned will be a suitable\ninput for the key param of the sort(). See number_annotation_utils._get_numeric_sort_key_fn for details\n\nArgs:\ntable_numeric_values: Numeric values of a column\nvalue: Numeric value in the question\n\nReturns:\nA function key function to compare column and question values.", "source": "github-repos"}
{"code": "def render(self, link_url, image_url, **kwargs):\n        \n        path = '%s/render' % self.path\n        data = {'link_url': link_url, 'image_url': image_url}\n        return self.gitlab.http_get(path, data, **kwargs)", "docstring": "Preview link_url and image_url after interpolation.\n\nArgs:\nlink_url (str): URL of the badge link\nimage_url (str): URL of the badge image\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabRenderError: If the rendering failed\n\nReturns:\ndict: The rendering properties", "source": "juraj-google-style"}
{"code": "def to_json_string(self) -> str:\n    dictionary = self.to_dict()\n    return json.dumps(dictionary, indent=2, sort_keys=True) + '\\n'", "docstring": "Serializes this instance to a JSON string.\n\nReturns:\n`str`: String containing all the attributes that make up this feature_extractor instance in JSON format.", "source": "github-repos"}
{"code": "def generate(self, id_or_uri):\n        \n        uri = self._client.build_uri(id_or_uri) + \"/generate\"\n        return self._client.get(uri)", "docstring": "Generates and returns a random range.\n\nArgs:\nid_or_uri:\nID or URI of range.\n\nReturns:\ndict: A dict containing a list with IDs.", "source": "juraj-google-style"}
{"code": "def decode(self, probs, sizes=None):\n    (_, max_probs) = torch.max(probs.transpose(0, 1), 2)\n    strings = self.convert_to_strings(max_probs.view(max_probs.size(0), max_probs.size(1)), sizes)\n    return self.process_strings(strings, remove_repetitions=True)", "docstring": "Returns the argmax decoding given the probability matrix. Removes\nrepeated elements in the sequence, as well as blanks.\n\nArguments:\nprobs: Tensor of character probabilities from the network. Expected shape of seq_length x batch x output_dim\nsizes(optional): Size of each sequence in the mini-batch\nReturns:\nstrings: sequences of the model's best guess for the transcription on inputs", "source": "codesearchnet"}
{"code": "def avro_union_type_to_beam_type(union_type: List) -> schema_pb2.FieldType:\n    if len(union_type) == 2 and 'null' in union_type:\n        for avro_type in union_type:\n            if avro_type in AVRO_PRIMITIVES_TO_BEAM_PRIMITIVES:\n                return schema_pb2.FieldType(atomic_type=AVRO_PRIMITIVES_TO_BEAM_PRIMITIVES[avro_type], nullable=True)\n        return schemas.typing_to_runner_api(Any)\n    return schemas.typing_to_runner_api(Any)", "docstring": "convert an avro union type to a beam type\n\nif the union type is a nullable, and it is a nullable union of an avro\nprimitive with a corresponding beam primitive then create a nullable beam\nfield of the corresponding beam type, otherwise return an Any type.\n\nArgs:\nunion_type: the avro union type to convert.\n\nReturns:\nthe beam type of the avro union.", "source": "github-repos"}
{"code": "def pre_fetch(self, feed):\n    if hasattr(self, '_list_name') and self._list_name and self._id_field:\n        print('pre fetching %s' % self._list_name)\n        ids = [feed_item[self._id_field] for feed_item in feed if isinstance(feed_item[self._id_field], int)]\n        if ids:\n            for i in range(0, len(ids), 500):\n                results = self._api(iterate=True).list(profileId=self.profile_id, ids=ids[i:i + 500]).execute()\n                for item in results:\n                    store.set(self._entity, [item['id']], item)", "docstring": "Pre-fetches all required items to be update into the cache.\n\nThis increases performance for update operations.\n\nArgs:\nfeed: List of feed items to retrieve", "source": "github-repos"}
{"code": "def loopUntil(\n            self, condition=None, timeout: float = 0) -> Iterator[object]:\n        \n        endTime = time.time() + timeout\n        while True:\n            test = condition and condition()\n            if test:\n                yield test\n                return\n            elif timeout and time.time() > endTime:\n                yield False\n                return\n            else:\n                yield test\n            self.waitOnUpdate(endTime - time.time() if timeout else 0)", "docstring": "Iterate until condition is met, with optional timeout in seconds.\nThe yielded value is that of the condition or False when timed out.\n\nArgs:\ncondition: Predicate function that is tested after every network\nupdate.\ntimeout: Maximum time in seconds to wait.\nIf 0 then no timeout is used.", "source": "juraj-google-style"}
{"code": "def get_lonlatalts(self):\n        \n        band = self.filehandle\n\n        (xpoints, ypoints), (gcp_lons, gcp_lats, gcp_alts), (gcps, crs) = self.get_gcps()\n\n        \n        \n\n        longitudes = interpolate_xarray(xpoints, ypoints, gcp_lons, band.shape)\n        latitudes = interpolate_xarray(xpoints, ypoints, gcp_lats, band.shape)\n        altitudes = interpolate_xarray(xpoints, ypoints, gcp_alts, band.shape)\n\n        longitudes.attrs['gcps'] = gcps\n        longitudes.attrs['crs'] = crs\n        latitudes.attrs['gcps'] = gcps\n        latitudes.attrs['crs'] = crs\n        altitudes.attrs['gcps'] = gcps\n        altitudes.attrs['crs'] = crs\n\n        return longitudes, latitudes, altitudes", "docstring": "Obtain GCPs and construct latitude and longitude arrays.\n\nArgs:\nband (gdal band): Measurement band which comes with GCP's\narray_shape (tuple) : The size of the data array\nReturns:\ncoordinates (tuple): A tuple with longitude and latitude arrays", "source": "juraj-google-style"}
{"code": "def distinct_values_of(self, field, count_deleted=False):\n    solr_params = ('facet=true&facet.field=%s&rows=0' % field)\n    result = self.riak_http_search_query(self.index_name, solr_params, count_deleted)\n    facet_fields = result['facet_counts']['facet_fields'][field]\n    keys = facet_fields[0::2]\n    vals = facet_fields[1::2]\n    return dict(zip(keys, vals))", "docstring": "Uses riak http search query endpoint for advanced SOLR queries.\n\nArgs:\nfield (str): facet field\ncount_deleted (bool): ignore deleted or not\n\nReturns:\n(dict): pairs of field values and number of counts", "source": "codesearchnet"}
{"code": "def restore_initializer(filename, name_fn=None, collection=tf.GraphKeys.GLOBAL_VARIABLES):\n\n    def _restore_initializer(getter, name, *args, **kwargs):\n        'Gets variable with restore initializer.'\n        collections = kwargs['collections']\n        if (collections is None):\n            collections = [tf.GraphKeys.GLOBAL_VARIABLES]\n        if (kwargs['trainable'] and (tf.GraphKeys.TRAINABLE_VARIABLES not in collections)):\n            collections += [tf.GraphKeys.TRAINABLE_VARIABLES]\n        if ((collection is None) or (collection in collections)):\n            if (name_fn is not None):\n                var_name_in_checkpoint = name_fn(name)\n            else:\n                var_name_in_checkpoint = name\n            tf.logging.info(\"Restoring '%s' from '%s' into variable '%s'\", var_name_in_checkpoint, filename, name)\n            kwargs['initializer'] = snt.restore_initializer(filename, var_name_in_checkpoint, scope='')\n        return getter(name, *args, **kwargs)\n    return _restore_initializer", "docstring": "Custom getter to restore all variables with `snt.restore_initializer`.\n\nArgs:\nfilename: The filename of the checkpoint.\nname_fn: A function which can map the name of the variable requested. This\nallows restoring variables with values having different names in the\ncheckpoint.\ncollection: Only set the restore initializer for variables in this\ncollection. If `None`, it will attempt to restore all variables. By\ndefault `tf.GraphKeys.GLOBAL_VARIABLES`.\n\nReturns:\nA restore_initializer custom getter, which is a function taking arguments\n(getter, name, *args, **kwargs).", "source": "codesearchnet"}
{"code": "def cysparse_type_to_real_sum_cysparse_type(cysparse_type):\n    \n\n    r_type = None\n\n    if cysparse_type in ['INT32_t', 'UINT32_t', 'INT64_t', 'UINT64_t']:\n        r_type = 'FLOAT64_t'\n    elif cysparse_type in ['FLOAT32_t', 'FLOAT64_t']:\n        r_type = 'FLOAT64_t'\n    elif cysparse_type in ['FLOAT128_t']:\n        r_type = 'FLOAT128_t'\n    elif cysparse_type in ['COMPLEX64_t', 'COMPLEX128_t']:\n        r_type = 'FLOAT64_t'\n    elif cysparse_type in ['COMPLEX256_t']:\n        r_type = 'FLOAT128_t'\n    else:\n        raise TypeError(\"Not a recognized type\")\n\n    assert r_type in ['FLOAT64_t', 'FLOAT128_t']\n\n    return r_type", "docstring": "Returns the best **real** type for a **real** sum for a given type.\n\nFor instance:\n\nINT32_t -> FLOAT64_t\n\nArgs:\ncysparse_type:", "source": "juraj-google-style"}
{"code": "def report_delete(config, auth, report_id=None, name=None):\n    if config.verbose:\n        print('DBM DELETE:', report_id or name)\n    report = report_get(config, auth, report_id, name)\n    if report:\n        API_DBM(config, auth).queries().delete(queryId=report['queryId']).execute()\n    elif config.verbose:\n        print('DBM DELETE: No Report')", "docstring": "Deletes a DBM report based on name or ID.\n\nArgs:\n* auth: (string) Either user or service.\n* report_id: (int) ID of DCm report to fetch ( either or name ).\n* name: (string) Name of report to fetch ( either or report_id ).\n\nReturns:\n* None", "source": "github-repos"}
{"code": "def save(self, filepath):\n    filepath = str(filepath)\n    if not filepath.endswith('.weights.h5'):\n        raise ValueError(f'Invalid `filepath` argument: expected a `.weights.h5` extension. Received: filepath={filepath}')\n    weights_store = H5IOStore(filepath, mode='w')\n\n    def _save(weights_dict, weights_store, inner_path):\n        vars_to_create = {}\n        for name, value in weights_dict.items():\n            if isinstance(value, dict):\n                if value:\n                    _save(weights_dict[name], weights_store, inner_path=inner_path + '/' + name)\n            else:\n                vars_to_create[name] = value\n        if vars_to_create:\n            var_store = weights_store.make(inner_path)\n            for name, value in vars_to_create.items():\n                var_store[name] = value\n    _save(self.weights_dict, weights_store, inner_path='')\n    weights_store.close()", "docstring": "Save the edited weights file.\n\nArgs:\nfilepath: Path to save the file to.\nMust be a `.weights.h5` file.", "source": "github-repos"}
{"code": "def get_rel_pos(q_size, k_size, rel_pos):\n    max_rel_dist = int(2 * max(q_size, k_size) - 1)\n    if rel_pos.shape[0] != max_rel_dist:\n        rel_pos_resized = nn.functional.interpolate(rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode='linear')\n        rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)\n    else:\n        rel_pos_resized = rel_pos\n    q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)\n    k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)\n    relative_coords = q_coords - k_coords + (k_size - 1) * max(q_size / k_size, 1.0)\n    return rel_pos_resized[relative_coords.long()]", "docstring": "Get relative positional embeddings according to the relative positions of query and key sizes.\n\nArgs:\nq_size (`int`):\nSize of query q.\nk_size (`int`):\nSize of key k.\nrel_pos (`torch.Tensor`):\nRelative position embeddings (num_embeddings, num_channels).\n\nReturns:\nExtracted positional embeddings according to relative positions.", "source": "github-repos"}
{"code": "def distances_from_parent(self, leaves=True, internal=True, unlabeled=False):\n        \n        if not isinstance(leaves, bool):\n            raise TypeError(\"leaves must be a bool\")\n        if not isinstance(internal, bool):\n            raise TypeError(\"internal must be a bool\")\n        if not isinstance(unlabeled, bool):\n            raise TypeError(\"unlabeled must be a bool\")\n        if leaves or internal:\n            for node in self.traverse_preorder():\n                if ((leaves and node.is_leaf()) or (internal and not node.is_leaf())) and (unlabeled or node.label is not None):\n                    if node.edge_length is None:\n                        yield (node,0)\n                    else:\n                        yield (node,node.edge_length)", "docstring": "Generator over the node-to-parent distances of this ``Tree``; (node,distance) tuples\n\nArgs:\n``terminal`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``\n\n``unlabeled`` (``bool``): ``True`` to include unlabeled nodes, otherwise ``False``", "source": "juraj-google-style"}
{"code": "def r_edges(step):\n    (rbot, rtop) = misc.get_rbounds(step)\n    centers = (step.rprof.loc[(:, 'r')].values + rbot)\n    edges = ((centers[:(- 1)] + centers[1:]) / 2)\n    edges = np.insert(edges, 0, rbot)\n    edges = np.append(edges, rtop)\n    return (edges, edges)", "docstring": "Cell border.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nReturns:\ntuple of :class:`numpy.array`: the position of the bottom and top walls\nof the cells. The two elements of the tuple are identical.", "source": "codesearchnet"}
{"code": "def pybel_to_json(molecule, name=None):\n    atoms = [{'element': table.GetSymbol(atom.atomicnum), 'location': list(atom.coords)} for atom in molecule.atoms]\n    for (json_atom, pybel_atom) in zip(atoms, molecule.atoms):\n        if (pybel_atom.partialcharge != 0):\n            json_atom['charge'] = pybel_atom.partialcharge\n        if pybel_atom.OBAtom.HasData('_atom_site_label'):\n            obatom = pybel_atom.OBAtom\n            json_atom['label'] = obatom.GetData('_atom_site_label').GetValue()\n        if pybel_atom.OBAtom.HasData('color'):\n            obatom = pybel_atom.OBAtom\n            json_atom['color'] = obatom.GetData('color').GetValue()\n    bonds = [{'atoms': [b.GetBeginAtom().GetIndex(), b.GetEndAtom().GetIndex()], 'order': b.GetBondOrder()} for b in ob.OBMolBondIter(molecule.OBMol)]\n    output = {'atoms': atoms, 'bonds': bonds, 'units': {}}\n    if hasattr(molecule, 'unitcell'):\n        uc = molecule.unitcell\n        output['unitcell'] = [[v.GetX(), v.GetY(), v.GetZ()] for v in uc.GetCellVectors()]\n        density = (sum((atom.atomicmass for atom in molecule.atoms)) / (uc.GetCellVolume() * 0.6022))\n        output['density'] = density\n        output['units']['density'] = 'kg / L'\n    element_count = Counter((table.GetSymbol(a.atomicnum) for a in molecule))\n    hill_count = []\n    for element in ['C', 'H']:\n        if (element in element_count):\n            hill_count += [(element, element_count[element])]\n            del element_count[element]\n    hill_count += sorted(element_count.items())\n    div = (reduce(gcd, (c[1] for c in hill_count)) if hasattr(molecule, 'unitcell') else 1)\n    output['formula'] = ''.join(((n if ((c / div) == 1) else ('%s%d' % (n, (c / div)))) for (n, c) in hill_count))\n    output['molecular_weight'] = (molecule.molwt / div)\n    output['units']['molecular_weight'] = 'g / mol'\n    if name:\n        output['name'] = name\n    return output", "docstring": "Converts a pybel molecule to json.\n\nArgs:\nmolecule: An instance of `pybel.Molecule`\nname: (Optional) If specified, will save a \"name\" property\nReturns:\nA Python dictionary containing atom and bond data", "source": "codesearchnet"}
{"code": "def get_region(b):\n    remap = {None: 'us-east-1', 'EU': 'eu-west-1'}\n    region = b.get('Location', {}).get('LocationConstraint')\n    return remap.get(region, region)", "docstring": "Tries to get the bucket region from Location.LocationConstraint\n\nSpecial cases:\nLocationConstraint EU defaults to eu-west-1\nLocationConstraint null defaults to us-east-1\n\nArgs:\nb (object): A bucket object\n\nReturns:\nstring: an aws region string", "source": "codesearchnet"}
{"code": "def get_vocab(self) -> Dict[str, int]:\n    raise NotImplementedError()", "docstring": "Returns the vocabulary as a dictionary of token to index.\n\n`tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the\nvocab.\n\nReturns:\n`Dict[str, int]`: The vocabulary.", "source": "github-repos"}
{"code": "def _get_expiration(self, headers: dict) -> int:\n    expiration_str = headers.get('expires')\n    if (not expiration_str):\n        return 0\n    expiration = datetime.strptime(expiration_str, '%a, %d %b %Y %H:%M:%S %Z')\n    delta = (expiration - datetime.utcnow()).total_seconds()\n    return math.ceil(abs(delta))", "docstring": "Gets the expiration time of the data from the response headers.\n\nArgs:\nheaders: dictionary of headers from ESI\n\nReturns:\nvalue of seconds from now the data expires", "source": "codesearchnet"}
{"code": "def reactions_add(self, *, name: str, **kwargs) -> SlackResponse:\n        \n        kwargs.update({\"name\": name})\n        return self.api_call(\"reactions.add\", json=kwargs)", "docstring": "Adds a reaction to an item.\n\nArgs:\nname (str): Reaction (emoji) name. e.g. 'thumbsup'\nchannel (str): Channel where the message to add reaction to was posted.\ne.g. 'C1234567890'\ntimestamp (str): Timestamp of the message to add reaction to. e.g. '1234567890.123456'", "source": "juraj-google-style"}
{"code": "class AriaCrossAttention(nn.Module):\n\n    def __init__(self, config: AriaConfig, dropout_rate: float=0):\n        super().__init__()\n        hidden_size = config.vision_config.hidden_size\n        num_heads = config.vision_config.num_attention_heads\n        self.num_heads = num_heads\n        self.q_proj = nn.Linear(hidden_size, hidden_size, bias=False)\n        self.k_proj = nn.Linear(hidden_size, hidden_size, bias=False)\n        self.v_proj = nn.Linear(hidden_size, hidden_size, bias=False)\n        self.multihead_attn = nn.MultiheadAttention(hidden_size, num_heads, batch_first=True)\n        self.linear = nn.Linear(hidden_size, hidden_size)\n        self.dropout = nn.Dropout(dropout_rate)\n        self.layer_norm = nn.LayerNorm(hidden_size)\n        self.layer_norm_kv = nn.LayerNorm(hidden_size)\n\n    def forward(self, key_value_states, hidden_states, attn_mask=None):\n        \n        query = self.q_proj(self.layer_norm(hidden_states))\n        key_value_states = self.layer_norm_kv(key_value_states)\n        key = self.k_proj(key_value_states)\n        value = self.v_proj(key_value_states)\n        attn_output, _ = self.multihead_attn(query, key, value, attn_mask=attn_mask)\n        attn_output = self.dropout(self.linear(attn_output))\n        return attn_output", "docstring": "Aria Cross-Attention module.\n\nArgs:\nconfig (`AriaConfig`):\nThe configuration to use.", "source": "github-repos"}
{"code": "def __delete__(self, obj):\n        \n\n        if self.name in obj._property_values:\n            old_value = obj._property_values[self.name]\n            del obj._property_values[self.name]\n            self.trigger_if_changed(obj, old_value)\n\n        if self.name in obj._unstable_default_values:\n            del obj._unstable_default_values[self.name]", "docstring": "Implement the deleter for the Python `descriptor protocol`_.\n\nArgs:\nobj (HasProps) : An instance to delete this property from", "source": "juraj-google-style"}
{"code": "def _validate_schema_and_ast(schema, ast):\n    core_graphql_errors = validate(schema, ast)\n    unsupported_default_directives = frozenset([frozenset(['include', frozenset(['FIELD', 'FRAGMENT_SPREAD', 'INLINE_FRAGMENT']), frozenset(['if'])]), frozenset(['skip', frozenset(['FIELD', 'FRAGMENT_SPREAD', 'INLINE_FRAGMENT']), frozenset(['if'])]), frozenset(['deprecated', frozenset(['ENUM_VALUE', 'FIELD_DEFINITION']), frozenset(['reason'])])])\n    expected_directives = {frozenset([directive.name, frozenset(directive.locations), frozenset(six.viewkeys(directive.args))]) for directive in DIRECTIVES}\n    actual_directives = {frozenset([directive.name, frozenset(directive.locations), frozenset(six.viewkeys(directive.args))]) for directive in schema.get_directives()}\n    missing_directives = (expected_directives - actual_directives)\n    if missing_directives:\n        missing_message = u'The following directives were missing from the provided schema: {}'.format(missing_directives)\n        core_graphql_errors.append(missing_message)\n    extra_directives = ((actual_directives - expected_directives) - unsupported_default_directives)\n    if extra_directives:\n        extra_message = u'The following directives were supplied in the given schema, but are not not supported by the GraphQL compiler: {}'.format(extra_directives)\n        core_graphql_errors.append(extra_message)\n    return core_graphql_errors", "docstring": "Validate the supplied graphql schema and ast.\n\nThis method wraps around graphql-core's validation to enforce a stricter requirement of the\nschema -- all directives supported by the compiler must be declared by the schema, regardless of\nwhether each directive is used in the query or not.\n\nArgs:\nschema: GraphQL schema object, created using the GraphQL library\nast: abstract syntax tree representation of a graphql query\n\nReturns:\nlist containing schema and/or query validation errors", "source": "codesearchnet"}
{"code": "async def evaluate_model(eval_model_path, target_model_path, sgf_dir, seed):\n    lines = (await run('bazel-bin/cc/eval', '--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'eval.flags')), '--model={}'.format(eval_model_path), '--model_two={}'.format(target_model_path), '--sgf_dir={}'.format(sgf_dir), '--seed={}'.format(seed)))\n    result = '\\n'.join(lines[(- 7):])\n    logging.info(result)\n    (eval_stats, target_stats) = parse_win_stats_table(result, 2)\n    num_games = (eval_stats.total_wins + target_stats.total_wins)\n    win_rate = (eval_stats.total_wins / num_games)\n    logging.info('Win rate %s vs %s: %.3f', eval_stats.model_name, target_stats.model_name, win_rate)\n    return win_rate", "docstring": "Evaluate one model against a target.\n\nArgs:\neval_model_path: the path to the model to evaluate.\ntarget_model_path: the path to the model to compare to.\nsgf_dif: directory path to write SGF output to.\nseed: random seed to use when running eval.\n\nReturns:\nThe win-rate of eval_model against target_model in the range [0, 1].", "source": "codesearchnet"}
{"code": "def shape4d(a, data_format='NHWC'):\n    s2d = shape2d(a)\n    if (get_data_format(data_format, False) == 'NHWC'):\n        return (([1] + s2d) + [1])\n    else:\n        return ([1, 1] + s2d)", "docstring": "Ensuer a 4D shape, to use with 4D symbolic functions.\n\nArgs:\na: a int or tuple/list of length 2\n\nReturns:\nlist: of length 4. if ``a`` is a int, return ``[1, a, a, 1]``\nor ``[1, 1, a, a]`` depending on data_format.", "source": "codesearchnet"}
{"code": "def _make_train_step_fn(model, mode, strategy, output_labels):\n\n    def _step_fn(ctx, inputs):\n        \n        if isinstance(inputs, (tuple, list)) and len(inputs) == 2:\n            inputs, targets = inputs\n        else:\n            targets = None\n        if isinstance(inputs, dict):\n            inputs = [inputs[input_name] for input_name in model._feed_input_names]\n        _build_model(strategy, model, mode, inputs, targets)\n        grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args = strategy.extended.call_for_each_replica(_per_replica_execution_function, args=(dist_utils.get_distributed_model(model, mode), mode))\n        all_inputs, all_outputs, all_updates, all_session_args = dist_utils.unwrap_values(strategy, grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args)\n        combined_fn = backend.function(all_inputs, all_outputs, updates=all_updates, name='distributed_' + str(mode) + '_function', **all_session_args)\n        for label, output in zip(output_labels, combined_fn.outputs):\n            if label == 'loss':\n                reduce_op = ds_reduce_util.ReduceOp.SUM\n            else:\n                reduce_op = ds_reduce_util.ReduceOp.MEAN\n            ctx.set_last_step_output(label, output, reduce_op)\n        return combined_fn.updates_op\n    return _step_fn", "docstring": "Create step fn.\n\nArgs:\nmodel: a Keras Model instance.\nmode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.\nstrategy: a `tf.distribute.Strategy` instance.\noutput_labels: the output labels for the step function.\n\nReturns:\nA step function to run by `tf.distribute.Strategy`.", "source": "github-repos"}
{"code": "def setup_spline(self, spline_options=None):\n    self.spline_options = spline_options\n    relative_energies = (self.energies - self.energies[0])\n    if scipy_old_piecewisepolynomial:\n        if self.spline_options:\n            raise RuntimeError('Option for saddle point not available withold scipy implementation')\n        self.spline = PiecewisePolynomial(self.r, np.array([relative_energies, (- self.forces)]).T, orders=3)\n    elif (self.spline_options.get('saddle_point', '') == 'zero_slope'):\n        imax = np.argmax(relative_energies)\n        self.spline = CubicSpline(x=self.r[:(imax + 1)], y=relative_energies[:(imax + 1)], bc_type=((1, 0.0), (1, 0.0)))\n        cspline2 = CubicSpline(x=self.r[imax:], y=relative_energies[imax:], bc_type=((1, 0.0), (1, 0.0)))\n        self.spline.extend(c=cspline2.c, x=cspline2.x[1:])\n    else:\n        self.spline = CubicSpline(x=self.r, y=relative_energies, bc_type=((1, 0.0), (1, 0.0)))", "docstring": "Setup of the options for the spline interpolation\n\nArgs:\nspline_options (dict): Options for cubic spline. For example,\n{\"saddle_point\": \"zero_slope\"} forces the slope at the saddle to\nbe zero.", "source": "codesearchnet"}
{"code": "async def open_interface(self, client_id, conn_string, interface):\n    conn_id = self._client_connection(client_id, conn_string)\n    self._hook_open_interface(conn_string, interface, client_id)\n    (await self.adapter.open_interface(conn_id, interface))", "docstring": "Open a device interface on behalf of a client.\n\nSee :meth:`AbstractDeviceAdapter.open_interface`.\n\nArgs:\nclient_id (str): The client we are working for.\nconn_string (str): A connection string that will be\npassed to the underlying device adapter.\ninterface (str): The name of the interface to open.\n\nRaises:\nDeviceServerError: There is an issue with your client_id such\nas not being connected to the device.\nDeviceAdapterError: The adapter had an issue opening the interface.", "source": "codesearchnet"}
{"code": "def __process_node(self, node: yaml.Node, expected_type: Type) -> yaml.Node:\n    logger.info('Processing node {} expecting type {}'.format(node, expected_type))\n    (recognized_types, message) = self.__recognizer.recognize(node, expected_type)\n    if (len(recognized_types) != 1):\n        raise RecognitionError(message)\n    recognized_type = recognized_types[0]\n    logger.debug('Savorizing node {}'.format(node))\n    if (recognized_type in self._registered_classes.values()):\n        node = self.__savorize(node, recognized_type)\n    logger.debug('Savorized, now {}'.format(node))\n    logger.debug('Recursing into subnodes')\n    if is_generic_list(recognized_type):\n        if (node.tag != 'tag:yaml.org,2002:seq'):\n            raise RecognitionError('{}{}Expected a {} here'.format(node.start_mark, os.linesep, type_to_desc(expected_type)))\n        for item in node.value:\n            self.__process_node(item, generic_type_args(recognized_type)[0])\n    elif is_generic_dict(recognized_type):\n        if (node.tag != 'tag:yaml.org,2002:map'):\n            raise RecognitionError('{}{}Expected a {} here'.format(node.start_mark, os.linesep, type_to_desc(expected_type)))\n        for (_, value_node) in node.value:\n            self.__process_node(value_node, generic_type_args(recognized_type)[1])\n    elif (recognized_type in self._registered_classes.values()):\n        if ((not issubclass(recognized_type, enum.Enum)) and (not issubclass(recognized_type, str)) and (not issubclass(recognized_type, UserString))):\n            for (attr_name, type_, _) in class_subobjects(recognized_type):\n                cnode = Node(node)\n                if cnode.has_attribute(attr_name):\n                    subnode = cnode.get_attribute(attr_name)\n                    new_subnode = self.__process_node(subnode.yaml_node, type_)\n                    cnode.set_attribute(attr_name, new_subnode)\n    else:\n        logger.debug('Not a generic class or a user-defined class, not recursing')\n    node.tag = self.__type_to_tag(recognized_type)\n    logger.debug('Finished processing node {}'.format(node))\n    return node", "docstring": "Processes a node.\n\nThis is the main function that implements yatiml's \\\nfunctionality. It figures out how to interpret this node \\\n(recognition), then applies syntactic sugar, and finally \\\nrecurses to the subnodes, if any.\n\nArgs:\nnode: The node to process.\nexpected_type: The type we expect this node to be.\n\nReturns:\nThe transformed node, or a transformed copy.", "source": "codesearchnet"}
{"code": "def dframe(self, dimensions=None, multi_index=False):\n        \n        import pandas as pd\n        if dimensions is None:\n            dimensions = [d.name for d in self.dimensions()]\n        else:\n            dimensions = [self.get_dimension(d, strict=True).name for d in dimensions]\n        column_names = dimensions\n        dim_vals = OrderedDict([(dim, self.dimension_values(dim)) for dim in column_names])\n        df = pd.DataFrame(dim_vals)\n        if multi_index:\n            df = df.set_index([d for d in dimensions if d in self.kdims])\n        return df", "docstring": "Convert dimension values to DataFrame.\n\nReturns a pandas dataframe of columns along each dimension,\neither completely flat or indexed by key dimensions.\n\nArgs:\ndimensions: Dimensions to return as columns\nmulti_index: Convert key dimensions to (multi-)index\n\nReturns:\nDataFrame of columns corresponding to each dimension", "source": "juraj-google-style"}
{"code": "def get_header(graphs, proto_fileformat='rawproto', default_ops='NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'):\n    ops_and_kernels = get_ops_and_kernels(proto_fileformat, graphs, default_ops)\n    if not ops_and_kernels:\n        print('Error reading graph!')\n        return 1\n    return get_header_from_ops_and_kernels(ops_and_kernels, default_ops == 'all')", "docstring": "Computes a header for use with tensorflow SELECTIVE_REGISTRATION.\n\nArgs:\ngraphs: a list of paths to GraphDef files to include.\nproto_fileformat: optional format of proto file, either 'textproto',\n'rawproto' (default) or ops_list. The ops_list is the file contain the\nlist of ops in JSON format, Ex: \"[[\"Transpose\", \"TransposeCpuOp\"]]\".\ndefault_ops: optional comma-separated string of operator:kernel pairs to\nalways include implementation for. Pass 'all' to have all operators and\nkernels included. Default: 'NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'.\n\nReturns:\nthe string of the header that should be written as ops_to_register.h.", "source": "github-repos"}
{"code": "def import_object_from_string_code(code, object):\n    sha256 = hashlib.sha256(code.encode('UTF-8')).hexdigest()\n    module = imp.new_module(sha256)\n    try:\n        exec_(code, module.__dict__)\n    except Exception as e:\n        raise exceptions.UserError('User code exception', exception_message=str(e))\n    sys.modules[sha256] = module\n    try:\n        return getattr(module, object)\n    except AttributeError:\n        raise exceptions.UserError('{} not found in code'.format(object))", "docstring": "Used to import an object from arbitrary passed code.\n\nPassed in code is treated as a module and is imported and added\nto `sys.modules` with its SHA256 hash as key.\n\nArgs:\ncode (string): Python code to import as module\n\nobject (string): Name of object to extract from imported module", "source": "codesearchnet"}
{"code": "def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=0.001):\n    if ndim(x) == 4:\n        if axis == 1 or axis == -3:\n            tf_data_format = 'NCHW'\n        elif axis == 3 or axis == -1:\n            tf_data_format = 'NHWC'\n        else:\n            tf_data_format = None\n        if tf_data_format == 'NHWC' or (tf_data_format == 'NCHW' and _has_nchw_support()):\n            if ndim(mean) > 1:\n                mean = array_ops.reshape(mean, [-1])\n            if ndim(var) > 1:\n                var = array_ops.reshape(var, [-1])\n            if beta is None:\n                beta = zeros_like(mean)\n            elif ndim(beta) > 1:\n                beta = array_ops.reshape(beta, [-1])\n            if gamma is None:\n                gamma = ones_like(mean)\n            elif ndim(gamma) > 1:\n                gamma = array_ops.reshape(gamma, [-1])\n        y, _, _ = nn.fused_batch_norm(x, gamma, beta, epsilon=epsilon, mean=mean, variance=var, data_format=tf_data_format, is_training=False)\n        return y\n    return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)", "docstring": "Applies batch normalization on x given mean, var, beta and gamma.\n\nI.e. returns:\n`output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`\n\nArgs:\nx: Input tensor or variable.\nmean: Mean of batch.\nvar: Variance of batch.\nbeta: Tensor with which to center the input.\ngamma: Tensor by which to scale the input.\naxis: Integer, the axis that should be normalized.\n(typically the features axis).\nepsilon: Fuzz factor.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def limit(self, count):\n        \n        return self.__class__(\n            self._parent,\n            projection=self._projection,\n            field_filters=self._field_filters,\n            orders=self._orders,\n            limit=count,\n            offset=self._offset,\n            start_at=self._start_at,\n            end_at=self._end_at,\n        )", "docstring": "Limit a query to return a fixed number of results.\n\nIf the current query already has a limit set, this will overwrite it.\n\nArgs:\ncount (int): Maximum number of documents to return that match\nthe query.\n\nReturns:\n~.firestore_v1beta1.query.Query: A limited query. Acts as a\ncopy of the current query, modified with the newly added\n\"limit\" filter.", "source": "juraj-google-style"}
{"code": "def update_compliance(self, timeout=(- 1)):\n    uri = '{}/compliance'.format(self.data['uri'])\n    return self._helper.update(None, uri, timeout=timeout)", "docstring": "Returns logical interconnects to a consistent state. The current logical interconnect state is\ncompared to the associated logical interconnect group.\n\nAny differences identified are corrected, bringing the logical interconnect back to a consistent\nstate. Changes are asynchronously applied to all managed interconnects. Note that if the changes detected\ninvolve differences in the interconnect map between the logical interconnect group and the logical interconnect,\nthe process of bringing the logical interconnect back to a consistent state might involve automatically removing\nexisting interconnects from management and/or adding new interconnects for management.\n\nArgs:\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Logical Interconnect.", "source": "codesearchnet"}
{"code": "def dtype(self, value):\n        \n        for (dtype, string) in self._all:\n            if string == value:\n                return dtype\n\n        return None", "docstring": "Gets the datatype for the given `value` (description).\n\nArgs:\nvalue (str): A text description for any datatype.\n\nReturns:\nnumpy.dtype: The matching datatype for the given text.\nNone: If no match can be found, `None` will be returned.", "source": "juraj-google-style"}
{"code": "def transform_coords(self, width, height):\n\t\t\n\n\t\tif self.type not in {EventType.TOUCH_DOWN, EventType.TOUCH_MOTION}:\n\t\t\traise AttributeError(_wrong_meth.format(self.type))\n\t\tx = self._libinput.libinput_event_touch_get_x_transformed(\n\t\t\tself._handle, width)\n\t\ty = self._libinput.libinput_event_touch_get_y_transformed(\n\t\t\tself._handle, height)\n\t\treturn x, y", "docstring": "Return the current absolute coordinates of the touch event,\ntransformed to screen coordinates.\n\nFor events not of type :attr:`~libinput.constant.EventType.TOUCH_DOWN`,\n:attr:`~libinput.constant.EventType.TOUCH_MOTION`, this method\nraises :exc:`AttributeError`.\n\nArgs:\nwidth (int): The current output screen width.\nheight (int): The current output screen height.\nReturns:\n(float, float): The current absolute (x, y) coordinates transformed\nto screen coordinates.", "source": "juraj-google-style"}
{"code": "def _CreateLineStringForShape(self, parent, shape):\n    \n    coordinate_list = [(longitude, latitude) for\n                       (latitude, longitude, distance) in shape.points]\n    return self._CreateLineString(parent, coordinate_list)", "docstring": "Create a KML LineString using coordinates from a shape.\n\nArgs:\nparent: The parent ElementTree.Element instance.\nshape: The transitfeed.Shape instance.\n\nReturns:\nThe LineString ElementTree.Element instance or None if coordinate_list is\nempty.", "source": "juraj-google-style"}
{"code": "def pull_screenrecord(self, bit_rate: int = 5000000, time_limit: int = 180, remote: _PATH = '/sdcard/demo.mp4', local: _PATH = 'demo.mp4') -> None:\n        \n        self.screenrecord(bit_rate, time_limit, filename=remote)\n        self.pull(remote, local)", "docstring": "Recording the display of devices running Android 4.4 (API level 19) and higher. Then copy it to your computer.\n\nArgs:\nbit_rate:You can increase the bit rate to improve video quality, but doing so results in larger movie files.\ntime_limit: Sets the maximum recording time, in seconds, and the maximum value is 180 (3 minutes).", "source": "juraj-google-style"}
{"code": "def add_role(user, roles):\n\n    def _add_role(role):\n        user_role = UserRole()\n        user_role.user_id = user.user_id\n        user_role.role_id = role.role_id\n        db.session.add(user_role)\n        db.session.commit()\n    [_add_role(role) for role in roles]", "docstring": "Map roles for user in database\n\nArgs:\nuser (User): User to add roles to\nroles ([Role]): List of roles to add\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def add_workflow_definitions(sbi_config: dict):\n    \n    registered_workflows = []\n    for i in range(len(sbi_config['processing_blocks'])):\n        workflow_config = sbi_config['processing_blocks'][i]['workflow']\n        workflow_name = '{}:{}'.format(workflow_config['id'],\n                                       workflow_config['version'])\n        if workflow_name in registered_workflows:\n            continue\n        workflow_definition = dict(\n            id=workflow_config['id'],\n            version=workflow_config['version'],\n            stages=[]\n        )\n        key = \"workflow_definitions:{}:{}\".format(workflow_config['id'],\n                                                  workflow_config['version'])\n        DB.save_dict(key, workflow_definition, hierarchical=False)\n        registered_workflows.append(workflow_name)", "docstring": "Add any missing SBI workflow definitions as placeholders.\n\nThis is a utility function used in testing and adds mock / test workflow\ndefinitions to the database for workflows defined in the specified\nSBI config.\n\nArgs:\nsbi_config (dict): SBI configuration dictionary.", "source": "juraj-google-style"}
{"code": "def indices2nodes(self, indices):\n    if (set(indices) - set(self.node_indices)):\n        raise ValueError(\"`indices` must be a subset of the Subsystem's indices.\")\n    return tuple((self._index2node[n] for n in indices))", "docstring": "Return |Nodes| for these indices.\n\nArgs:\nindices (tuple[int]): The indices in question.\n\nReturns:\ntuple[Node]: The |Node| objects corresponding to these indices.\n\nRaises:\nValueError: If requested indices are not in the subsystem.", "source": "codesearchnet"}
{"code": "def create_or_update(cls, course_video, file_name=None, image_data=None, generated_images=None):\n    (video_image, created) = cls.objects.get_or_create(course_video=course_video)\n    if image_data:\n        if ((not created) and (VideoImage.objects.filter(image=video_image.image).count() == 1)):\n            video_image.image.delete()\n        with closing(image_data) as image_file:\n            file_name = '{uuid}{ext}'.format(uuid=uuid4().hex, ext=os.path.splitext(file_name)[1])\n            try:\n                video_image.image.save(file_name, image_file)\n            except Exception:\n                logger.exception('VAL: Video Image save failed to storage for course_id [%s] and video_id [%s]', course_video.course_id, course_video.video.edx_video_id)\n                raise\n    else:\n        if generated_images:\n            video_image.generated_images = generated_images\n            if (not video_image.image.name):\n                file_name = generated_images[0]\n        video_image.image.name = file_name\n    video_image.save()\n    return (video_image, created)", "docstring": "Create a VideoImage object for a CourseVideo.\n\nNOTE: If `image_data` is None then `file_name` value will be used as it is, otherwise\na new file name is constructed based on uuid and extension from `file_name` value.\n`image_data` will be None in case of course re-run and export. `generated_images` list\ncontains names of images auto generated by VEDA. If an image is not already set then first\nimage name from `generated_images` list will be used.\n\nArguments:\ncourse_video (CourseVideo): CourseVideo instance\nfile_name (str): File name of the image\nimage_data (InMemoryUploadedFile): Image data to be saved.\ngenerated_images (list): auto generated image names\n\nReturns:\nReturns a tuple of (video_image, created).", "source": "codesearchnet"}
{"code": "def leave_module(self, node):\n        \n        for triple_quote in self._tokenized_triple_quotes.values():\n            self._check_triple_quotes(triple_quote)\n\n        \n        \n        self._tokenized_triple_quotes = {}", "docstring": "Leave module and check remaining triple quotes.\n\nArgs:\nnode: the module node we are leaving.", "source": "juraj-google-style"}
{"code": "def commit_channel(self, channel_id):\n        \n        payload = {\n            \"channel_id\":channel_id,\n            \"stage\": config.STAGE,\n        }\n        response = config.SESSION.post(config.finish_channel_url(), data=json.dumps(payload))\n        if response.status_code != 200:\n            config.LOGGER.error(\"\\n\\nCould not activate channel: {}\\n\".format(response._content.decode('utf-8')))\n            if response.status_code == 403:\n                config.LOGGER.error(\"Channel can be viewed at {}\\n\\n\".format(config.open_channel_url(channel_id, staging=True)))\n                sys.exit()\n        response.raise_for_status()\n        new_channel = json.loads(response._content.decode(\"utf-8\"))\n        channel_link = config.open_channel_url(new_channel['new_channel'])\n        return channel_id, channel_link", "docstring": "commit_channel: commits channel to Kolibri Studio\nArgs:\nchannel_id (str): channel's id on Kolibri Studio\nReturns: channel id and link to uploadedchannel", "source": "juraj-google-style"}
{"code": "def default_value(fieldname, datatype):\n    \n    if fieldname in tsdb_coded_attributes:\n        return str(tsdb_coded_attributes[fieldname])\n    else:\n        return _default_datatype_values.get(datatype, '')", "docstring": "Return the default value for a column.\n\nIf the column name (e.g. *i-wf*) is defined to have an idiosyncratic\nvalue, that value is returned. Otherwise the default value for the\ncolumn's datatype is returned.\n\nArgs:\nfieldname: the column name (e.g. `i-wf`)\ndatatype: the datatype of the column (e.g. `:integer`)\nReturns:\nThe default value for the column.\n\n.. deprecated:: v0.7.0", "source": "juraj-google-style"}
{"code": "def _get_cached_certs(cert_uri, cache):\n    certs = cache.get(cert_uri, namespace=_CERT_NAMESPACE)\n    if (certs is None):\n        _logger.debug('Cert cache miss for %s', cert_uri)\n        try:\n            result = urlfetch.fetch(cert_uri)\n        except AssertionError:\n            return None\n        if (result.status_code == 200):\n            certs = json.loads(result.content)\n            expiration_time_seconds = _get_cert_expiration_time(result.headers)\n            if expiration_time_seconds:\n                cache.set(cert_uri, certs, time=expiration_time_seconds, namespace=_CERT_NAMESPACE)\n        else:\n            _logger.error('Certs not available, HTTP request returned %d', result.status_code)\n    return certs", "docstring": "Get certs from cache if present; otherwise, gets from URI and caches them.\n\nArgs:\ncert_uri: URI from which to retrieve certs if cache is stale or empty.\ncache: Cache of pre-fetched certs.\n\nReturns:\nThe retrieved certs.", "source": "codesearchnet"}
{"code": "def _and_join(self, terms):\n    if (len(terms) > 1):\n        return ' AND '.join([self._or_join(t) for t in terms])\n    else:\n        return self._or_join(terms[0])", "docstring": "Joins terms using AND operator.\n\nArgs:\nterms (list): terms to join\n\nExamples:\nself._and_join(['term1']) -> 'term1'\nself._and_join(['term1', 'term2']) -> 'term1 AND term2'\nself._and_join(['term1', 'term2', 'term3']) -> 'term1 AND term2 AND term3'\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def __init__(self, missing_modules: Collection[str]=()):\n    if os.getenv('TYPESHED_HOME'):\n        self._store = ExternalTypeshedFs(missing_file=self.MISSING_FILE)\n    else:\n        self._store = InternalTypeshedFs(missing_file=self.MISSING_FILE)\n    self._missing = self._load_missing().union(missing_modules)\n    self._stdlib_versions = self._load_stdlib_versions()\n    self._third_party_packages = self._load_third_party_packages()", "docstring": "Initializer.\n\nArgs:\nmissing_modules: A collection of modules in the format\n'stdlib/module_name', which will be combined with the contents of\nMISSING_FILE to form a set of missing modules for which pytype will not\nreport errors.", "source": "github-repos"}
{"code": "def NetworkFee(self):\n    if (self._network_fee is None):\n        input = Fixed8(0)\n        for coin_ref in self.References.values():\n            if (coin_ref.AssetId == GetBlockchain().SystemCoin().Hash):\n                input = (input + coin_ref.Value)\n        output = Fixed8(0)\n        for tx_output in self.outputs:\n            if (tx_output.AssetId == GetBlockchain().SystemCoin().Hash):\n                output = (output + tx_output.Value)\n        self._network_fee = ((input - output) - self.SystemFee())\n    return self._network_fee", "docstring": "Get the network fee.\n\nReturns:\nFixed8:", "source": "codesearchnet"}
{"code": "def populate(projects_to_filter=None, group=None):\n    if (projects_to_filter is None):\n        projects_to_filter = []\n    import benchbuild.projects as all_projects\n    all_projects.discover()\n    prjs = ProjectRegistry.projects\n    if projects_to_filter:\n        prjs = {}\n        for filter_project in set(projects_to_filter):\n            try:\n                prjs.update({x: y for (x, y) in ProjectRegistry.projects.items(prefix=filter_project)})\n            except KeyError:\n                pass\n    if group:\n        groupkeys = set(group)\n        prjs = {name: cls for (name, cls) in prjs.items() if (cls.GROUP in groupkeys)}\n    return {x: prjs[x] for x in prjs if ((prjs[x].DOMAIN != 'debug') or (x in projects_to_filter))}", "docstring": "Populate the list of projects that belong to this experiment.\n\nArgs:\nprojects_to_filter (list(Project)):\nList of projects we want to assign to this experiment.\nWe intersect the list of projects with the list of supported\nprojects to get the list of projects that belong to this\nexperiment.\ngroup (list(str)):\nIn addition to the project filter, we provide a way to filter\nwhole groups.", "source": "codesearchnet"}
{"code": "def mesh_axis_to_cumprod(self, tensor_shape):\n    tensor_layout = self.tensor_layout(tensor_shape)\n    ma2ta = tensor_layout.mesh_axis_to_tensor_axis(self.ndims)\n    ta2cumprod = tensor_shape.cumprod\n    return [(None if (ta is None) else ta2cumprod[ta]) for ta in ma2ta]", "docstring": "For each mesh axis, give the product of previous tensor axes.\n\nArgs:\ntensor_shape: Shape.\n\nReturns:\nlist with length self.ndims where each element is an integer or None.", "source": "codesearchnet"}
{"code": "def expand_abbreviations(txt, fields):\n\n    def _expand(matchobj):\n        s = matchobj.group('var')\n        if (s not in fields):\n            matches = [x for x in fields if x.startswith(s)]\n            if (len(matches) == 1):\n                s = matches[0]\n        return ('{%s}' % s)\n    return re.sub(FORMAT_VAR_REGEX, _expand, txt)", "docstring": "Expand abbreviations in a format string.\n\nIf an abbreviation does not match a field, or matches multiple fields, it\nis left unchanged.\n\nExample:\n\n>>> fields = (\"hey\", \"there\", \"dude\")\n>>> expand_abbreviations(\"hello {d}\", fields)\n'hello dude'\n\nArgs:\ntxt (str): Format string.\nfields (list of str): Fields to expand to.\n\nReturns:\nExpanded string.", "source": "codesearchnet"}
{"code": "def np_dtype(dtype):\n    \n    if dtype == 'float' or dtype == float or dtype == np.float32 or dtype == tf.float32:\n        return np.float32\n    elif dtype == np.float64 or dtype == tf.float64:\n        return np.float64\n    elif dtype == np.float16 or dtype == tf.float16:\n        return np.float16\n    elif dtype == 'int' or dtype == int or dtype == np.int32 or dtype == tf.int32:\n        return np.int32\n    elif dtype == np.int64 or dtype == tf.int64:\n        return np.int64\n    elif dtype == np.int16 or dtype == tf.int16:\n        return np.int16\n    elif dtype == 'bool' or dtype == bool or dtype == np.bool_ or dtype == tf.bool:\n        return np.bool_\n    else:\n        raise TensorForceError(\"Error: Type conversion from type {} not supported.\".format(str(dtype)))", "docstring": "Translates dtype specifications in configurations to numpy data types.\nArgs:\ndtype: String describing a numerical type (e.g. 'float') or numerical type primitive.\n\nReturns: Numpy data type", "source": "juraj-google-style"}
{"code": "def Matches(self, file_entry, search_depth):\n    if (self._location_segments is None):\n        location_match = None\n    else:\n        location_match = self._CheckLocation(file_entry, search_depth)\n        if (not location_match):\n            return (False, location_match)\n        if (search_depth != self._number_of_location_segments):\n            return (False, location_match)\n    match = self._CheckFileEntryType(file_entry)\n    if ((match is not None) and (not match)):\n        return (False, location_match)\n    match = self._CheckIsAllocated(file_entry)\n    if ((match is not None) and (not match)):\n        return (False, location_match)\n    return (True, location_match)", "docstring": "Determines if the file entry matches the find specification.\n\nArgs:\nfile_entry (FileEntry): file entry.\nsearch_depth (int): number of location path segments to compare.\n\nReturns:\ntuple: contains:\n\nbool: True if the file entry matches the find specification, False\notherwise.\nbool: True if the location matches, False if not or None if no location\nspecified.", "source": "codesearchnet"}
{"code": "def draw_arc(self, x, y, r, start, end, color):\n        \n        check_int_err(lib.arcRGBA(self._ptr, x, y, r, start, end, color[0], color[1], color[2], color[3]))", "docstring": "Draw an arc.\n\nArgs:\nx (int): The x coordinate of the center of the arc.\ny (int): The y coordinate of the center of the arc.\nr (int): The radius of the arc.\nstart (int): The start of the arc.\nend (int): The end of the arc.\ncolor (Tuple[int, int, int, int]): The color of the circle.\n\nRaises:\nSDLError: If an error is encountered.", "source": "juraj-google-style"}
{"code": "def update_with_token(self, token_id: int) -> bool:\n    if self.status != RequestStatus.DECODING:\n        return False\n    is_eos = token_id == self.eos_token_id and self.eos_token_id != -1\n    is_max_len = self.generated_len() >= self.max_new_tokens\n    if is_eos or is_max_len:\n        self.status = RequestStatus.FINISHED\n        return True\n    return False", "docstring": "Update the request with a newly generated token and check for completion.\n\nArgs:\ntoken_id: The token ID to add to the output sequence\n\nReturns:\nbool: True if the request is now complete, False otherwise", "source": "github-repos"}
{"code": "def get_proj_info(self, token):\n    r = self.remote_utils.get_url((self.url() + '{}/info/'.format(token)))\n    return r.json()", "docstring": "Return the project info for a given token.\n\nArguments:\ntoken (str): Token to return information for\n\nReturns:\nJSON: representation of proj_info", "source": "codesearchnet"}
{"code": "def from_file(cls, filename=\"CTRL\", **kwargs):\n        \n        with zopen(filename, \"rt\") as f:\n            contents = f.read()\n        return LMTOCtrl.from_string(contents, **kwargs)", "docstring": "Creates a CTRL file object from an existing file.\n\nArgs:\nfilename: The name of the CTRL file. Defaults to 'CTRL'.\n\nReturns:\nAn LMTOCtrl object.", "source": "juraj-google-style"}
{"code": "def diagonalize_real_symmetric_and_sorted_diagonal_matrices(symmetric_matrix: np.ndarray, diagonal_matrix: np.ndarray, *, rtol: float=1e-05, atol: float=1e-08, check_preconditions: bool=True) -> np.ndarray:\n    if check_preconditions:\n        if (np.any(np.imag(symmetric_matrix)) or (not predicates.is_hermitian(symmetric_matrix, rtol=rtol, atol=atol))):\n            raise ValueError('symmetric_matrix must be real symmetric.')\n        if ((not predicates.is_diagonal(diagonal_matrix, atol=atol)) or np.any(np.imag(diagonal_matrix)) or np.any((diagonal_matrix[(:(- 1), :(- 1))] < diagonal_matrix[(1:, 1:)]))):\n            raise ValueError('diagonal_matrix must be real diagonal descending.')\n        if (not predicates.commutes(diagonal_matrix, symmetric_matrix, rtol=rtol, atol=atol)):\n            raise ValueError('Given matrices must commute.')\n\n    def similar_singular(i, j):\n        return np.allclose(diagonal_matrix[(i, i)], diagonal_matrix[(j, j)], rtol=rtol)\n    ranges = _contiguous_groups(diagonal_matrix.shape[0], similar_singular)\n    p = np.zeros(symmetric_matrix.shape, dtype=np.float64)\n    for (start, end) in ranges:\n        block = symmetric_matrix[(start:end, start:end)]\n        p[(start:end, start:end)] = diagonalize_real_symmetric_matrix(block, rtol=rtol, atol=atol)\n    return p", "docstring": "Returns an orthogonal matrix that diagonalizes both given matrices.\n\nThe given matrices must commute.\nGuarantees that the sorted diagonal matrix is not permuted by the\ndiagonalization (except for nearly-equal values).\n\nArgs:\nsymmetric_matrix: A real symmetric matrix.\ndiagonal_matrix: A real diagonal matrix with entries along the diagonal\nsorted into descending order.\nrtol: Relative numeric error threshold.\natol: Absolute numeric error threshold.\ncheck_preconditions: If set, verifies that the input matrices commute\nand are respectively symmetric and diagonal descending.\n\nReturns:\nAn orthogonal matrix P such that P.T @ symmetric_matrix @ P is diagonal\nand P.T @ diagonal_matrix @ P = diagonal_matrix (up to tolerance).\n\nRaises:\nValueError: Matrices don't meet preconditions (e.g. not symmetric).", "source": "codesearchnet"}
{"code": "def asin(cls, x: 'TensorFluent') -> 'TensorFluent':\n    return cls._unary_op(x, tf.asin, tf.float32)", "docstring": "Returns a TensorFluent for the arcsin function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the arcsin function.", "source": "codesearchnet"}
{"code": "def _all_correct_list(array):\n    \n    if type(array) not in _ITERABLE_TYPES:\n        return False\n\n    for item in array:\n        if not type(item) in _ITERABLE_TYPES:\n            return False\n\n        if len(item) != 2:\n            return False\n\n    return True", "docstring": "Make sure, that all items in `array` has good type and size.\n\nArgs:\narray (list): Array of python types.\n\nReturns:\nTrue/False", "source": "juraj-google-style"}
{"code": "def print_info(info_mapping):\n    if (not info_mapping):\n        return\n    content_format = '{:<16} : {:<}\\n'\n    content = '\\n==================== Output ====================\\n'\n    content += content_format.format('Variable', 'Value')\n    content += content_format.format(('-' * 16), ('-' * 29))\n    for (key, value) in info_mapping.items():\n        if isinstance(value, (tuple, collections.deque)):\n            continue\n        elif isinstance(value, (dict, list)):\n            value = json.dumps(value)\n        elif (value is None):\n            value = 'None'\n        if is_py2:\n            if isinstance(key, unicode):\n                key = key.encode('utf-8')\n            if isinstance(value, unicode):\n                value = value.encode('utf-8')\n        content += content_format.format(key, value)\n    content += (('-' * 48) + '\\n')\n    logger.log_info(content)", "docstring": "print info in mapping.\n\nArgs:\ninfo_mapping (dict): input(variables) or output mapping.\n\nExamples:\n>>> info_mapping = {\n\"var_a\": \"hello\",\n\"var_b\": \"world\"\n}\n>>> info_mapping = {\n\"status_code\": 500\n}\n>>> print_info(info_mapping)\n==================== Output ====================\nKey              :  Value\n---------------- :  ----------------------------\nvar_a            :  hello\nvar_b            :  world\n------------------------------------------------", "source": "codesearchnet"}
{"code": "def assertNumpyObjectTensorsRecursivelyEqual(self, a, b, msg):\n    if isinstance(a, np.ndarray) and a.dtype == object:\n        self.assertEqual(a.dtype, b.dtype, msg)\n        self.assertEqual(a.shape, b.shape, msg)\n        self.assertLen(a, len(b), msg)\n        for a_val, b_val in zip(a, b):\n            self.assertNumpyObjectTensorsRecursivelyEqual(a_val, b_val, msg)\n    else:\n        self.assertAllEqual(a, b, msg)", "docstring": "Check that two numpy arrays are equal.\n\nFor arrays with dtype=object, check values recursively to see if a and b\nare equal.  (c.f. `np.array_equal`, which checks dtype=object values using\nobject identity.)\n\nArgs:\na: A numpy array.\nb: A numpy array.\nmsg: Message to display if a != b.", "source": "github-repos"}
{"code": "def load_schema(schema_path):\n    try:\n        with open(schema_path) as schema_file:\n            schema = json.load(schema_file)\n    except ValueError as e:\n        raise SchemaInvalidError(('Invalid JSON in schema or included schema: %s\\n%s' % (schema_file.name, str(e))))\n    return schema", "docstring": "Load the JSON schema at the given path as a Python object.\n\nArgs:\nschema_path: A filename for a JSON schema.\n\nReturns:\nA Python object representation of the schema.", "source": "codesearchnet"}
{"code": "def create_profile(profile_name):\n    \n    try:\n        profile = Profile(profile_name=profile_name)\n        profile.full_clean()\n        profile.save()\n    except ValidationError as err:\n        raise ValCannotCreateError(err.message_dict)", "docstring": "Used to create Profile objects in the database\n\nA profile needs to exists before an EncodedVideo object can be created.\n\nArgs:\nprofile_name (str): ID of the profile\n\nRaises:\nValCannotCreateError: Raised if the profile name is invalid or exists", "source": "juraj-google-style"}
{"code": "def from_http(cls, raw_body: MutableMapping, verification_token: Optional[str]=None, team_id: Optional[str]=None) -> 'Event':\n    if (verification_token and (raw_body['token'] != verification_token)):\n        raise exceptions.FailedVerification(raw_body['token'], raw_body['team_id'])\n    if (team_id and (raw_body['team_id'] != team_id)):\n        raise exceptions.FailedVerification(raw_body['token'], raw_body['team_id'])\n    if raw_body['event']['type'].startswith('message'):\n        return Message(raw_body['event'], metadata=raw_body)\n    else:\n        return Event(raw_body['event'], metadata=raw_body)", "docstring": "Create an event with data coming from the HTTP Event API.\n\nIf the event type is a message a :class:`slack.events.Message` is returned.\n\nArgs:\nraw_body: Decoded body of the Event API request\nverification_token: Slack verification token used to verify the request came from slack\nteam_id: Verify the event is for the correct team\n\nReturns:\n:class:`slack.events.Event` or :class:`slack.events.Message`\n\nRaises:\n:class:`slack.exceptions.FailedVerification`: when `verification_token` or `team_id` does not match the\nincoming event's.", "source": "codesearchnet"}
{"code": "def decrypt(key, ciphertext):\n    \n    \n    key = ''.join(key)\n    alphabet = string.ascii_letters\n    cipher_alphabet = key.lower() + key.upper()\n    return ciphertext.translate(str.maketrans(cipher_alphabet, alphabet))", "docstring": "Decrypt Simple Substitution enciphered ``ciphertext`` using ``key``.\n\nExample:\n>>> decrypt(\"PQSTUVWXYZCODEBRAKINGFHJLM\", \"XUOOB\")\nHELLO\n\nArgs:\nkey (iterable): The key to use\nciphertext (str): The text to decrypt\n\nReturns:\nDecrypted ciphertext", "source": "juraj-google-style"}
{"code": "def shape_rb_data(raw_rb):\n    rb_data = []\n    rb_data.append(np.mean(raw_rb, 0))\n    rb_data.append(np.std(raw_rb, 0))\n    return rb_data", "docstring": "Take the raw rb data and convert it into averages and std dev\n\nArgs:\nraw_rb (numpy.array): m x n x l list where m is the number of seeds, n\nis the number of Clifford sequences and l is the number of qubits\n\nReturn:\nnumpy_array: 2 x n x l list where index 0 is the mean over seeds, 1 is\nthe std dev overseeds", "source": "codesearchnet"}
{"code": "def is_installed(name):\n    out = __salt__['cmd.run_all'](((FLATPAK_BINARY_NAME + ' info ') + name))\n    if (out['retcode'] and out['stderr']):\n        return False\n    else:\n        return True", "docstring": "Determine if a package or runtime is installed.\n\nArgs:\nname (str): The name of the package or the runtime.\n\nReturns:\nbool: True if the specified package or runtime is installed.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' flatpak.is_installed org.gimp.GIMP", "source": "codesearchnet"}
{"code": "def bonds(lines, atoms):\n    conv_stereo_table = {0: 0, 1: 1, 3: 3, 4: 3, 6: 2}\n    results = {a: {} for a in atoms}\n    for line in lines:\n        bond = Bond()\n        first = int(line[0:3])\n        second = int(line[3:6])\n        if (first > second):\n            bond.is_lower_first = 0\n        order = int(line[6:9])\n        if (order < 4):\n            bond.order = order\n        bond.type = conv_stereo_table[int(line[9:12])]\n        results[first][second] = {'bond': bond}\n        results[second][first] = {'bond': bond}\n    return results", "docstring": "Parse bond block into bond objects\n\nReturns:\ndict: networkx adjacency dict", "source": "codesearchnet"}
{"code": "def CopyFromDateTimeString(self, time_string):\n    \n    date_time_values = self._CopyDateTimeFromString(time_string)\n\n    year = date_time_values.get('year', 0)\n    month = date_time_values.get('month', 0)\n    day_of_month = date_time_values.get('day_of_month', 0)\n    hours = date_time_values.get('hours', 0)\n    minutes = date_time_values.get('minutes', 0)\n    seconds = date_time_values.get('seconds', 0)\n\n    self._normalized_timestamp = None\n    self._number_of_seconds = self._GetNumberOfSecondsFromElements(\n        year, month, day_of_month, hours, minutes, seconds)\n    self._microseconds = date_time_values.get('microseconds', None)\n\n    self.is_local_time = False", "docstring": "Copies a fake timestamp from a date and time string.\n\nArgs:\ntime_string (str): date and time value formatted as:\nYYYY-MM-DD hh:mm:ss.######[+-]##:##\n\nWhere # are numeric digits ranging from 0 to 9 and the seconds\nfraction can be either 3 or 6 digits. The time of day, seconds\nfraction and time zone offset are optional. The default time zone\nis UTC.", "source": "juraj-google-style"}
{"code": "def config_stdio(self, log_configurations: Optional[List[LogConfiguration]] = None, default_level=logging.INFO) -> None:\n        \n        \n        if not log_configurations:\n            for logger in self.loggers.values():\n                self._restrict_output(logger, default_level)\n        \n        else:\n            for component, level in log_configurations:\n                try:\n                    logger = self.loggers[self.root + component]\n                except KeyError:\n                    raise ValueError(\"Failed to configure component. Invalid name: {}\".format(component))\n                self._restrict_output(logger, level)", "docstring": "Configure the stdio `StreamHandler` levels on the specified loggers.\nIf no log configurations are specified then the `default_level` will be applied to all handlers.\n\nArgs:\nlog_configurations: a list of (component name, log level) tuples\ndefault_level: logging level to apply when no log_configurations are specified", "source": "juraj-google-style"}
{"code": "def add_behaviour(self, behaviour, template=None):\n    behaviour.set_agent(self)\n    if issubclass(type(behaviour), FSMBehaviour):\n        for (_, state) in behaviour.get_states().items():\n            state.set_agent(self)\n    behaviour.set_template(template)\n    self.behaviours.append(behaviour)\n    if self.is_alive():\n        behaviour.start()", "docstring": "Adds and starts a behaviour to the agent.\nIf template is not None it is used to match\nnew messages and deliver them to the behaviour.\n\nArgs:\nbehaviour (spade.behaviour.CyclicBehaviour): the behaviour to be started\ntemplate (spade.template.Template, optional): the template to match messages with (Default value = None)", "source": "codesearchnet"}
{"code": "def wait(self, timeout_ms=None):\n    closed = timeouts.loop_until_timeout_or_true(timeouts.PolledTimeout.from_millis(timeout_ms), self.stream.is_closed, 0.1)\n    if closed:\n        if hasattr(self.stdout, 'getvalue'):\n            return self.stdout.getvalue()\n        return True\n    return None", "docstring": "Block until this command has completed.\n\nArgs:\ntimeout_ms: Timeout, in milliseconds, to wait.\n\nReturns:\nOutput of the command if it complete and self.stdout is a StringIO\nobject or was passed in as None.  Returns True if the command completed but\nstdout was provided (and was not a StringIO object).  Returns None if the\ntimeout expired before the command completed.  Be careful to check the\nreturn value explicitly for None, as the output may be ''.", "source": "codesearchnet"}
{"code": "def _backspaced_single_line_animation(animation_, *args, **kwargs):\n    animation_gen = animation_(*args, **kwargs)\n    (yield next(animation_gen))\n    (yield from util.concatechain(util.BACKSPACE_GEN(kwargs['width']), animation_gen))", "docstring": "Turn an animation into an automatically backspaced animation.\n\nArgs:\nanimation: A function that returns a generator that yields\nstrings for animation frames.\nargs: Arguments for the animation function.\nkwargs: Keyword arguments for the animation function.\nReturns:\nthe animation generator, with backspaces applied to each but the first\nframe.", "source": "codesearchnet"}
{"code": "def rms_forward(hidden_states, variance_epsilon=1e-06):\n    input_dtype = hidden_states.dtype\n    hidden_states = hidden_states.to(torch.float32)\n    variance = hidden_states.pow(2).mean(-1, keepdim=True)\n    hidden_states = hidden_states * torch.rsqrt(variance + variance_epsilon)\n    return hidden_states.to(input_dtype)", "docstring": "Calculates simple RMSNorm with no learnable weights. `MambaRMSNorm` will\nleverage this in order to multiply the final result with the RMSNorm weight\n\nArgs:\nhidden_states (`torch.Tensor`):\nHidden states to normalize\nvariance_epsilon (`float`):\nThe eps value to add in the square root scaling factor", "source": "github-repos"}
{"code": "def _find_max_under_constraint(self, constrained, dependent, predicate):\n    feasible = ops.nonzero(predicate(constrained, self.value))\n    feasible_exists = ops.greater(ops.size(feasible), 0)\n    max_dependent = ops.max(ops.take(dependent, feasible), initial=0)\n    return ops.where(feasible_exists, max_dependent, 0.0)", "docstring": "Returns the maximum of dependent_statistic that satisfies the\nconstraint.\n\nArgs:\nconstrained: Over these values the constraint is specified. A rank-1\ntensor.\ndependent: From these values the maximum that satiesfies the\nconstraint is selected. Values in this tensor and in\n`constrained` are linked by having the same threshold at each\nposition, hence this tensor must have the same shape.\npredicate: A binary boolean functor to be applied to arguments\n`constrained` and `self.value`, e.g. `ops.greater`.\n\nReturns:\nmaximal dependent value, if no value satisfies the constraint 0.0.", "source": "github-repos"}
{"code": "def __init__(self, graph, run_metadata):\n    self._graph = graph\n    self._run_metadata = run_metadata\n    self._string_table = StringTable()\n    self._functions = Functions(self._string_table)\n    self._locations = Locations(self._functions)", "docstring": "Constructor.\n\nArgs:\ngraph: A `Graph` instance.\nrun_metadata: A list of `RunMetadata` objects.", "source": "github-repos"}
{"code": "def parse_yaml_file(self, yaml_file: Union[str, os.PathLike], allow_extra_keys: bool=False) -> tuple[DataClass, ...]:\n    outputs = self.parse_dict(yaml.safe_load(Path(yaml_file).read_text()), allow_extra_keys=allow_extra_keys)\n    return tuple(outputs)", "docstring": "Alternative helper method that does not use `argparse` at all, instead loading a yaml file and populating the\ndataclass types.\n\nArgs:\nyaml_file (`str` or `os.PathLike`):\nFile name of the yaml file to parse\nallow_extra_keys (`bool`, *optional*, defaults to `False`):\nDefaults to False. If False, will raise an exception if the json file contains keys that are not\nparsed.\n\nReturns:\nTuple consisting of:\n\n- the dataclass instances in the same order as they were passed to the initializer.", "source": "github-repos"}
{"code": "def strip_strings(model):\n    model.description = None\n    for subgraph in model.subgraphs:\n        subgraph.name = None\n        for tensor in subgraph.tensors:\n            tensor.name = None\n    model.signatureDefs = None", "docstring": "Strips all nonessential strings from the model to reduce model size.\n\nWe remove the following strings:\n(find strings by searching \":string\" in the tensorflow lite flatbuffer schema)\n1. Model description\n2. SubGraph name\n3. Tensor names\nWe retain OperatorCode custom_code and Metadata name.\n\nArgs:\nmodel: The model from which to remove nonessential strings.", "source": "github-repos"}
{"code": "def __item_descriptor(self, config):\n    \n    descriptor = {\n        'kind': 'discovery\n        'icons': {\n            'x16': 'https:\n                   'googleg_16dp.png',\n            'x32': 'https:\n                   'googleg_32dp.png',\n        },\n        'preferred': True,\n    }\n\n    description = config.get('description')\n    root_url = config.get('root')\n    name = config.get('name')\n    version = config.get('api_version')\n    relative_path = '/apis/{0}/{1}/rest'.format(name, version)\n\n    if description:\n      descriptor['description'] = description\n\n    descriptor['name'] = name\n    descriptor['version'] = version\n    descriptor['discoveryLink'] = '.{0}'.format(relative_path)\n\n    root_url_port = urlparse.urlparse(root_url).port\n\n    original_path = self.__request.reconstruct_full_url(\n        port_override=root_url_port)\n    descriptor['discoveryRestUrl'] = '{0}/{1}/{2}/rest'.format(\n        original_path, name, version)\n\n    if name and version:\n      descriptor['id'] = '{0}:{1}'.format(name, version)\n\n    return descriptor", "docstring": "Builds an item descriptor for a service configuration.\n\nArgs:\nconfig: A dictionary containing the service configuration to describe.\n\nReturns:\nA dictionary that describes the service configuration.", "source": "juraj-google-style"}
{"code": "def circuit_to_image(circ: Circuit, qubits: Qubits=None) -> PIL.Image:\n    latex = circuit_to_latex(circ, qubits)\n    img = render_latex(latex)\n    return img", "docstring": "Create an image of a quantum circuit.\n\nA convenience function that calls circuit_to_latex() and render_latex().\n\nArgs:\ncirc:       A quantum Circuit\nqubits:     Optional qubit list to specify qubit order\n\nReturns:\nReturns: A PIL Image (Use img.show() to display)\n\nRaises:\nNotImplementedError: For unsupported gates.\nOSError: If an external dependency is not installed.", "source": "codesearchnet"}
{"code": "def mach60(msg):\n    \n    d = hex2bin(data(msg))\n\n    if d[23] == '0':\n        return None\n\n    mach = bin2int(d[24:34]) * 2.048 / 512.0\n    return round(mach, 3)", "docstring": "Aircraft MACH number\n\nArgs:\nmsg (String): 28 bytes hexadecimal message (BDS60) string\n\nReturns:\nfloat: MACH number", "source": "juraj-google-style"}
{"code": "def output_file_for(window, shard, pane):\n    filename = '%s/LOG-%s-%s-%03d-%s' % (output_path, window.max_timestamp(), shard, pane.index, pane.timing) if output_path else None\n    return OutputFile(window.max_timestamp(), shard, pane.index, pane.timing, filename)", "docstring": "Returns:\nan OutputFile object constructed with pane, window and shard.", "source": "github-repos"}
{"code": "def on_modified(self, event):\n    self._logger.debug('Detected modify event on watched path: %s', event.src_path)\n    self._process_event(event)", "docstring": "Function called everytime a new file is modified.\n\nArgs:\nevent: Event to process.", "source": "codesearchnet"}
{"code": "def __init__(self, default: typing.Optional[int]=MISSING_VALUE, min_value: typing.Optional[int]=None, max_value: typing.Optional[int]=None, is_noneable: bool=False, frozen: bool=False):\n    super().__init__(int, default, min_value, max_value, is_noneable, frozen)", "docstring": "Constructor.\n\nArgs:\ndefault: (Optional) default value for this spec.\nmin_value: (Optional) minimum value of acceptable values.\nmax_value: (Optional) maximum value of acceptable values.\nis_noneable: If True, None is acceptable.\nfrozen: If True, values other than the default value is not accceptable.", "source": "github-repos"}
{"code": "def recipe_dt(config, auth_read, auth_write, bucket, paths, days, hours, dataset):\n    dt(config, {'auth': auth_read, 'from': {'bucket': bucket, 'paths': paths, 'days': days, 'hours': hours}, 'to': {'auth': auth_write, 'dataset': dataset}})", "docstring": "Move data from a DT bucket into a BigQuery table.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nauth_write (authentication) - Credentials used for writing data.\nbucket (string) - Name of bucket where DT files are stored.\npaths (string_list) - List of prefixes to pull specific DT files.\ndays (integer) - Number of days back to synchronize.\nhours (integer) - Number of hours back to synchronize.\ndataset (string) - Existing dataset in BigQuery.", "source": "github-repos"}
{"code": "def _FormatValue(self, value, level=0):\n    \n\n    def FormatDictItem(key_value):\n      \n      key, value = key_value\n      return (self._FormatValue(key, level + 1) +\n              ': ' +\n              self._FormatValue(value, level + 1))\n\n    def LimitedEnumerate(items, formatter, level=0):\n      \n      count = 0\n      limit = self.max_sublist_items if level > 0 else self.max_list_items\n      for item in items:\n        if count == limit:\n          yield '...'\n          break\n\n        yield formatter(item)\n        count += 1\n\n    def FormatList(items, formatter, level=0):\n      \n      return ', '.join(LimitedEnumerate(items, formatter, level=level))\n\n    if isinstance(value, _PRIMITIVE_TYPES):\n      return _TrimString(repr(value),  \n                         self.max_value_len)\n\n    if isinstance(value, _DATE_TYPES):\n      return str(value)\n\n    if level > self.max_depth:\n      return str(type(value))\n\n    if isinstance(value, dict):\n      return '{' + FormatList(six.iteritems(value), FormatDictItem) + '}'\n\n    if isinstance(value, _VECTOR_TYPES):\n      return _ListTypeFormatString(value).format(FormatList(\n          value, lambda item: self._FormatValue(item, level + 1), level=level))\n\n    if isinstance(value, types.FunctionType):\n      return 'function ' + value.__name__\n\n    if hasattr(value, '__dict__') and value.__dict__:\n      return self._FormatValue(value.__dict__, level)\n\n    return str(type(value))", "docstring": "Pretty-prints an object for a logger.\n\nThis function is very similar to the standard pprint. The main difference\nis that it enforces limits to make sure we never produce an extremely long\nstring or take too much time.\n\nArgs:\nvalue: Python object to print.\nlevel: current recursion level.\n\nReturns:\nFormatted string.", "source": "juraj-google-style"}
{"code": "def softplus(x, scale=1.0, name=None):\n    if (scale == 1):\n        return tf.nn.softplus(x)\n    else:\n        with tf.name_scope(name, 'softplus', [x]):\n            scale = tf.convert_to_tensor(scale, dtype=x.dtype.base_dtype)\n            return (tf.nn.softplus((x * scale)) / scale)", "docstring": "Computes softplus with a scale factor to sharpen of the hinge.\n\nThis is an alternate non-linearity to relu. It has a similar shape, but\nit has a smooth transition from the linear part to 0.\n\nArgs:\nx: A tensor.\nscale: A float that sharpens the curve.\nname: Optional name.\nReturns:\ny = log(1 + exp(scale * x)) / scale", "source": "codesearchnet"}
{"code": "def read_binary(self, key, b64decode=True, decode=False):\n    data = None\n    if (key is not None):\n        data = self.db.read(key.strip())\n        if (data is not None):\n            data = json.loads(data)\n            if b64decode:\n                data = base64.b64decode(data)\n                if decode:\n                    try:\n                        data = data.decode('utf-8')\n                    except UnicodeDecodeError:\n                        data = data.decode('latin-1')\n    else:\n        self.tcex.log.warning(u'The key field was None.')\n    return data", "docstring": "Read method of CRUD operation for binary data.\n\nArgs:\nkey (string): The variable to read from the DB.\nb64decode (bool): If true the data will be base64 decoded.\ndecode (bool): If true the data will be decoded to a String.\n\nReturns:\n(bytes|string): Results retrieved from DB.", "source": "codesearchnet"}
{"code": "def initialize_tpu_system_impl(cluster_resolver, tpu_cluster_resolver_cls):\n    if tpu_cluster_resolver_cls is None or not issubclass(tpu_cluster_resolver_cls, cluster_resolver_lib.ClusterResolver) or (not hasattr(tpu_cluster_resolver_cls, 'tpu_hardware_feature')):\n        raise TypeError('tpu_cluster_resolver_cls is not tf.distribute.cluster_resolver.TPUClusterResolver.')\n    logging.info('Deallocate tpu buffers before initializing tpu system.')\n    context.context()._clear_caches()\n    context.context().clear_kernel_cache()\n    gc.collect()\n    job = None\n    if cluster_resolver is None:\n        if context.executing_eagerly():\n            curr_device = device.DeviceSpec.from_string(context.context().device_name)\n            if curr_device.job is not None:\n                job = '{}/replica:0/task:0'.format(curr_device.job)\n        cluster_resolver = tpu_cluster_resolver_cls('')\n    assert isinstance(cluster_resolver, tpu_cluster_resolver_cls)\n    tpu_name = compat.as_text(cluster_resolver._tpu)\n    if tpu_name in _INITIALIZED_TPU_SYSTEMS:\n        logging.warning('TPU system %s has already been initialized. Reinitializing the TPU can cause previously created variables on TPU to be lost.', tpu_name)\n    logging.info('Initializing the TPU system: %s', tpu_name)\n    if tpu_name not in _LOCAL_MASTERS:\n        job = '{}/replica:0/task:0'.format(cluster_resolver.get_job_name())\n    if context.executing_eagerly():\n\n        @def_function.function(autograph=False)\n        def _tpu_init_fn():\n            return tpu.initialize_system(job=job, compilation_failure_closes_chips=False, tpu_cancellation_closes_chips=False)\n        run_eagerly = def_function.functions_run_eagerly()\n        if run_eagerly:\n            logging.warning('It looks like tf.function behavior was disabled, perhaps using tf.config.run_functions_eagerly. tf.tpu.experimental.initialize_tpu_system requires tf.function to work. This primitive will override the disable.')\n            def_function.run_functions_eagerly(False)\n        try:\n            with ops.device(tpu._tpu_system_device_name(job)):\n                output = _tpu_init_fn()\n            context.async_wait()\n        except errors.InvalidArgumentError as e:\n            raise errors.NotFoundError(None, None, 'TPUs not found in the cluster. Failed in initialization: ' + str(e))\n        finally:\n            if run_eagerly is not None:\n                def_function.run_functions_eagerly(run_eagerly)\n        context.context()._initialize_logical_devices()\n        serialized_topology = output.numpy()\n    elif not ops.executing_eagerly_outside_functions():\n        master = cluster_resolver.master()\n        cluster_spec = cluster_resolver.cluster_spec()\n        session_config = config_pb2.ConfigProto(allow_soft_placement=True)\n        if cluster_spec:\n            session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())\n        with ops.Graph().as_default():\n            with session_lib.Session(config=session_config, target=master) as sess:\n                serialized_topology = sess.run(tpu.initialize_system())\n    else:\n        with ops.device(tpu._tpu_system_device_name(job)):\n            serialized_topology = tpu.initialize_system(job=job, compilation_failure_closes_chips=False)\n            return serialized_topology\n    logging.info('Finished initializing TPU system.')\n    tpu_topology = topology.Topology(serialized=serialized_topology)\n    cluster_resolver.set_tpu_topology(serialized_topology)\n    _INITIALIZED_TPU_SYSTEMS[tpu_name] = tpu_topology\n    _tpu_worker_address.get_cell('address').set(cluster_resolver.get_master())\n    return tpu_topology", "docstring": "Implementation for tpu.experimental.initialize_tpu_system.\n\nKept separate to avoid tpu_oss code duplication.\n\nInitialize the TPU devices.\n\nArgs:\ncluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,\nwhich provides information about the TPU cluster.\ntpu_cluster_resolver_cls: a reference to\ntf.distribute.cluster_resolver.TPUClusterResolver so that an instance\nof it can be initialized if cluster_resolver is None.\nReturns:\nThe tf.tpu.Topology object for the topology of the TPU cluster. If called\ninside tf.function, it returns the serialized topology object instead.\n\nRaises:\nRuntimeError: If running inside a tf.function.\nNotFoundError: If no TPU devices found in eager mode.\nTypeError: If tpu_cluster_resolver_cls is\nnot tf.distribute.cluster_resolver.TPUClusterResolver.", "source": "github-repos"}
{"code": "def _flatten_subsection(subsection, _type, offset, parent):\n    for row in subsection:\n        if (row in ('Low', 'Generated', 'High')):\n            continue\n        elif isinstance(row[0], StringType):\n            if (len(row) in (4, 5)):\n                if (len(row) == 5):\n                    assert (row[4][0] == 'S'), ('Only known usage of a fifth member is Sn, found: %s' % row[4][0])\n                (yield (float(row[0]), float(row[1]), float(row[2]), (float(row[3]) / 2.0), _type, offset, parent))\n                parent = offset\n                offset += 1\n        elif isinstance(row[0], list):\n            split_parent = (offset - 1)\n            start_offset = 0\n            slices = []\n            start = 0\n            for (i, value) in enumerate(row):\n                if (value == '|'):\n                    slices.append(slice((start + start_offset), i))\n                    start = (i + 1)\n            slices.append(slice((start + start_offset), len(row)))\n            for split_slice in slices:\n                for _row in _flatten_subsection(row[split_slice], _type, offset, split_parent):\n                    offset += 1\n                    (yield _row)", "docstring": "Flatten a subsection from its nested version\n\nArgs:\nsubsection: Nested subsection as produced by _parse_section, except one level in\n_type: type of section, ie: AXON, etc\nparent: first element has this as it's parent\noffset: position in the final array of the first element\n\nReturns:\nGenerator of values corresponding to [X, Y, Z, R, TYPE, ID, PARENT_ID]", "source": "codesearchnet"}
{"code": "def plot(self, figsize=None, rotation=45):\n    (fig, ax) = plt.subplots(figsize=figsize)\n    plt.imshow(self._cm, interpolation='nearest', cmap=plt.cm.Blues, aspect='auto')\n    plt.title('Confusion matrix')\n    plt.colorbar()\n    tick_marks = np.arange(len(self._labels))\n    plt.xticks(tick_marks, self._labels, rotation=rotation)\n    plt.yticks(tick_marks, self._labels)\n    if isinstance(self._cm, list):\n        thresh = (max(max(self._cm)) / 2.0)\n        for (i, j) in itertools.product(range(len(self._labels)), range(len(self._labels))):\n            plt.text(j, i, self._cm[i][j], horizontalalignment='center', color=('white' if (self._cm[i][j] > thresh) else 'black'))\n    else:\n        thresh = (self._cm.max() / 2.0)\n        for (i, j) in itertools.product(range(len(self._labels)), range(len(self._labels))):\n            plt.text(j, i, self._cm[(i, j)], horizontalalignment='center', color=('white' if (self._cm[(i, j)] > thresh) else 'black'))\n    plt.tight_layout()\n    plt.ylabel('True label')\n    plt.xlabel('Predicted label')", "docstring": "Plot the confusion matrix.\n\nArgs:\nfigsize: tuple (x, y) of ints. Sets the size of the figure\nrotation: the rotation angle of the labels on the x-axis.", "source": "codesearchnet"}
{"code": "def authenticate(self, auth_token, auth_info, service_name):\n    try:\n        jwt_claims = self.get_jwt_claims(auth_token)\n    except Exception as error:\n        raise suppliers.UnauthenticatedException(u'Cannot decode the auth token', error)\n    _check_jwt_claims(jwt_claims)\n    user_info = UserInfo(jwt_claims)\n    issuer = user_info.issuer\n    if (issuer not in self._issuers_to_provider_ids):\n        raise suppliers.UnauthenticatedException((u'Unknown issuer: ' + issuer))\n    provider_id = self._issuers_to_provider_ids[issuer]\n    if (not auth_info.is_provider_allowed(provider_id)):\n        raise suppliers.UnauthenticatedException((u'The requested method does not allow provider id: ' + provider_id))\n    audiences = user_info.audiences\n    has_service_name = (service_name in audiences)\n    allowed_audiences = auth_info.get_allowed_audiences(provider_id)\n    intersected_audiences = set(allowed_audiences).intersection(audiences)\n    if ((not has_service_name) and (not intersected_audiences)):\n        raise suppliers.UnauthenticatedException(u'Audiences not allowed')\n    return user_info", "docstring": "Authenticates the current auth token.\n\nArgs:\nauth_token: the auth token.\nauth_info: the auth configurations of the API method being called.\nservice_name: the name of this service.\n\nReturns:\nA constructed UserInfo object representing the identity of the caller.\n\nRaises:\nUnauthenticatedException: When\n* the issuer is not allowed;\n* the audiences are not allowed;\n* the auth token has already expired.", "source": "codesearchnet"}
{"code": "def insert(self, fields, typecast=False):\n        \n        return self._post(self.url_table, json_data={\"fields\": fields, \"typecast\": typecast})", "docstring": "Inserts a record\n\n>>> record = {'Name': 'John'}\n>>> airtable.insert(record)\n\nArgs:\nfields(``dict``): Fields to insert.\nMust be dictionary with Column names as Key.\ntypecast(``boolean``): Automatic data conversion from string values.\n\nReturns:\nrecord (``dict``): Inserted record", "source": "juraj-google-style"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor]=None):\n    pixel_values = pixel_values.type(self.visual.dtype)\n    image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)\n    split_sizes = (image_grid_thw.prod(-1) \n    image_embeds = torch.split(image_embeds, split_sizes)\n    return image_embeds", "docstring": "Encodes images into continuous embeddings that can be forwarded to the language model.\n\nArgs:\npixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\nThe tensors corresponding to the input images.\nimage_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):\nThe temporal, height and width of feature shape of each image in LLM.", "source": "github-repos"}
{"code": "def _AddExtractionProcessStatusTableRow(self, process_status, table_view):\n    used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory)\n    sources = ''\n    if ((process_status.number_of_produced_sources is not None) and (process_status.number_of_produced_sources_delta is not None)):\n        sources = '{0:d} ({1:d})'.format(process_status.number_of_produced_sources, process_status.number_of_produced_sources_delta)\n    events = ''\n    if ((process_status.number_of_produced_events is not None) and (process_status.number_of_produced_events_delta is not None)):\n        events = '{0:d} ({1:d})'.format(process_status.number_of_produced_events, process_status.number_of_produced_events_delta)\n    table_view.AddRow([process_status.identifier, process_status.pid, process_status.status, used_memory, sources, events, process_status.display_name])", "docstring": "Adds an extraction process status table row.\n\nArgs:\nprocess_status (ProcessStatus): processing status.\ntable_view (CLITabularTableView): table view.", "source": "codesearchnet"}
{"code": "def __init__(self, points, joggle=False):\n        \n        self.points = points\n        dim = [len(i) for i in self.points]\n        if max(dim) != min(dim):\n            raise ValueError(\"Input points must all have the same dimension!\")\n        self.dim = dim[0]\n        if joggle:\n            options = \"i QJ\"\n        else:\n            options = \"i Qt\"\n        output = qdelaunay(options, points)\n        output.pop(0)\n        self.vertices = [[int(i)for i in row.strip().split()] for row in output]", "docstring": "Initializes a DelaunayTri from points.\n\nArgs:\npoints ([[float]]): All the points as a sequence of sequences.\ne.g., [[-0.5, -0.5], [-0.5, 0.5], [0.5, -0.5], [0.5, 0.5]]\njoggle (bool): Use qhull option to joggle inputs until simplical\nresult is obtained instead of merging facets.", "source": "juraj-google-style"}
{"code": "def render_trees(trees, path_composer):\n    trees = list(trees)\n\n    def create_pub_cache(trees):\n        \"\\n        Create uuid -> DBPublication cache from all uuid's linked from `trees`.\\n\\n        Args:\\n            trees (list): List of :class:`.Tree`.\\n\\n        Returns:\\n            dict: {uuid: DBPublication}\\n        \"\n        sub_pubs_uuids = sum((x.collect_publications() for x in trees), [])\n        uuid_mapping = {uuid: search_pubs_by_uuid(uuid) for uuid in set(sub_pubs_uuids)}\n        return {uuid: pub[0] for (uuid, pub) in uuid_mapping.iteritems() if pub}\n    pub_cache = create_pub_cache(trees)\n\n    def render_tree(tree, ind=1):\n        '\\n        Render the tree into HTML using :attr:`TREE_TEMPLATE`. Private trees\\n        are ignored.\\n\\n        Args:\\n            tree (obj): :class:`.Tree` instance.\\n            ind (int, default 1): Indentation. This function is called\\n                recursively.\\n\\n        Returns:\\n            str: Rendered string.\\n        '\n        if (not tree.is_public):\n            return ''\n        rendered_tree = SimpleTemplate(TREE_TEMPLATE).render(tree=tree, render_tree=render_tree, ind=ind, path_composer=path_composer, pub_cache=pub_cache)\n        ind_txt = (ind * '  ')\n        return (ind_txt + ('\\n' + ind_txt).join(rendered_tree.splitlines()))\n    parent = tree_handler().get_parent(trees[0])\n    link_up = (path_composer(parent) if parent else None)\n    return SimpleTemplate(TREES_TEMPLATE).render(trees=trees, render_tree=render_tree, link_up=link_up)", "docstring": "Render list of `trees` to HTML.\n\nArgs:\ntrees (list): List of :class:`.Tree`.\npath_composer (fn reference): Function used to compose paths from UUID.\nLook at :func:`.compose_tree_path` from :mod:`.web_tools`.\n\nReturns:\nstr: HTML representation of trees.", "source": "codesearchnet"}
{"code": "def _AddSerializedEvent(self, event):\n    \n    identifier = identifiers.SQLTableIdentifier(\n        self._CONTAINER_TYPE_EVENT,\n        self._serialized_event_heap.number_of_events + 1)\n    event.SetIdentifier(identifier)\n\n    serialized_data = self._SerializeAttributeContainer(event)\n\n    self._serialized_event_heap.PushEvent(event.timestamp, serialized_data)\n\n    if self._serialized_event_heap.data_size > self._maximum_buffer_size:\n      self._WriteSerializedAttributeContainerList(self._CONTAINER_TYPE_EVENT)", "docstring": "Adds an serialized event.\n\nArgs:\nevent (EventObject): event.\n\nRaises:\nIOError: if the event cannot be serialized.\nOSError: if the event cannot be serialized.", "source": "juraj-google-style"}
{"code": "def add_comment(self, line):\n        \n        if not isinstance(self.last_item, Comment):\n            comment = Comment(self._structure)\n            self._structure.append(comment)\n        self.last_item.add_line(line)\n        return self", "docstring": "Add a Comment object to the section\n\nUsed during initial parsing mainly\n\nArgs:\nline (str): one line in the comment", "source": "juraj-google-style"}
{"code": "def start(self) -> asyncio.Future:\n    if (os.name != 'nt'):\n        signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)\n        for s in signals:\n            self._event_loop.add_signal_handler(s, self.stop)\n    future = asyncio.ensure_future(self._connect_and_read(), loop=self._event_loop)\n    if (self.run_async or self._event_loop.is_running()):\n        return future\n    return self._event_loop.run_until_complete(future)", "docstring": "Starts an RTM Session with Slack.\n\nMakes an authenticated call to Slack's RTM API to retrieve\na websocket URL and then connects to the message server.\nAs events stream-in we run any associated callbacks stored\non the client.\n\nIf 'auto_reconnect' is specified we\nretrieve a new url and reconnect any time the connection\nis lost unintentionally or an exception is thrown.\n\nRaises:\nSlackApiError: Unable to retreive RTM URL from Slack.", "source": "codesearchnet"}
{"code": "def bottom(self, features):\n    if (not self._problem_hparams):\n        log_warn('Without a Problem, T2TModel.bottom is a passthrough.')\n        return features\n    transformed_features = collections.OrderedDict()\n    all_previous_modalities = []\n    target_modality = _create_target_modality(self._problem_hparams.modality)\n    for (feature_name, modality) in sorted(six.iteritems(self._problem_hparams.modality)):\n        if (feature_name not in features):\n            tf.logging.warning(('Missing feature %s - ignoring.' % feature_name))\n            continue\n        vocab_size = self._problem_hparams.vocab_size[feature_name]\n        if ((vocab_size is not None) and hasattr(self._hparams, 'vocab_divisor')):\n            vocab_size += ((- vocab_size) % self._hparams.vocab_divisor)\n        modality_name = self._hparams.name.get(feature_name, modalities.get_name(modality))(self._hparams, vocab_size)\n        if (feature_name in target_modality):\n            if (len(target_modality) > 1):\n                variable_scope_name = ('%s/%s' % (modality_name, feature_name))\n            else:\n                variable_scope_name = modality_name\n            bottom = self._hparams.bottom.get(feature_name, modalities.get_targets_bottom(modality))\n            with tf.variable_scope(variable_scope_name) as vs:\n                self._add_variable_scope(variable_scope_name, vs)\n                log_info(\"Transforming feature '%s' with %s.targets_bottom\", feature_name, modality_name)\n                transformed_features[feature_name] = bottom(features[feature_name], self._hparams, vocab_size)\n        else:\n            bottom = self._hparams.bottom.get(feature_name, modalities.get_bottom(modality))\n            do_reuse = (modality_name in all_previous_modalities)\n            with tf.variable_scope(modality_name, reuse=do_reuse) as vs:\n                self._add_variable_scope(modality_name, vs)\n                log_info(\"Transforming feature '%s' with %s.bottom\", feature_name, modality_name)\n                transformed_features[feature_name] = bottom(features[feature_name], self._hparams, vocab_size)\n            all_previous_modalities.append(modality_name)\n    for key in features:\n        if (key not in transformed_features):\n            transformed_features[key] = features[key]\n        else:\n            transformed_features[(key + '_raw')] = features[key]\n    return transformed_features", "docstring": "Transforms features to feed into body.\n\nArgs:\nfeatures: dict of str to Tensor. Typically it is the preprocessed data\nbatch after Problem's preprocess_example().\n\nReturns:\ntransformed_features: dict of same key-value pairs as features. The value\nTensors are newly transformed.", "source": "codesearchnet"}
{"code": "def to_event(self, event_type, field_name=None, depth=None):\n        \n        if self.ion_event is None:\n            value = self\n            if isinstance(self, IonPyNull):\n                value = None\n            self.ion_event = IonEvent(event_type, ion_type=self.ion_type, value=value, field_name=field_name,\n                                      annotations=self.ion_annotations, depth=depth)\n        return self.ion_event", "docstring": "Constructs an IonEvent from this _IonNature value.\n\nArgs:\nevent_type (IonEventType): The type of the resulting event.\nfield_name (Optional[text]): The field name associated with this value, if any.\ndepth (Optional[int]): The depth of this value.\n\nReturns:\nAn IonEvent with the properties from this value.", "source": "juraj-google-style"}
{"code": "def convert2wavenumber(rsr):\n    retv = {}\n    for chname in rsr.keys():\n        retv[chname] = {}\n        for det in rsr[chname].keys():\n            retv[chname][det] = {}\n            if ('wavenumber' in rsr[chname][det].keys()):\n                retv[chname][det] = rsr[chname][det].copy()\n                LOG.debug('RSR data already in wavenumber space. No conversion needed.')\n                continue\n            for sat in rsr[chname][det].keys():\n                if (sat == 'wavelength'):\n                    wnum = (1.0 / (0.0001 * rsr[chname][det][sat]))\n                    retv[chname][det]['wavenumber'] = wnum[::(- 1)]\n                elif (sat == 'response'):\n                    if (type(rsr[chname][det][sat]) is dict):\n                        retv[chname][det][sat] = {}\n                        for name in rsr[chname][det][sat].keys():\n                            resp = rsr[chname][det][sat][name]\n                            retv[chname][det][sat][name] = resp[::(- 1)]\n                    else:\n                        resp = rsr[chname][det][sat]\n                        retv[chname][det][sat] = resp[::(- 1)]\n    unit = 'cm-1'\n    si_scale = 100.0\n    return (retv, {'unit': unit, 'si_scale': si_scale})", "docstring": "Take rsr data set with all channels and detectors for an instrument\neach with a set of wavelengths and normalised responses and\nconvert to wavenumbers and responses\n\n:rsr: Relative Spectral Response function (all bands)\nReturns:\n:retv: Relative Spectral Responses in wave number space\n:info: Dictionary with scale (to go convert to SI units) and unit", "source": "codesearchnet"}
{"code": "def copy_modified_gene(self, modified_gene, ignore_model_attributes=True):\n    ignore = ['_model', '_reaction', '_functional', 'model', 'reaction', 'functional']\n    for attr in filter((lambda a: ((not a.startswith('__')) and (not isinstance(getattr(type(self), a, None), property)) and (not callable(getattr(self, a))))), dir(modified_gene)):\n        if ((attr not in ignore) and ignore_model_attributes):\n            setattr(self, attr, getattr(modified_gene, attr))", "docstring": "Copy attributes of a Gene object over to this Gene, given that the modified gene has the same ID.\n\nArgs:\nmodified_gene (Gene, GenePro): Gene with modified attributes that you want to copy over.\nignore_model_attributes (bool): If you want to ignore copying over attributes related to metabolic models.", "source": "codesearchnet"}
{"code": "def get_month_description(self):\n    return self.get_segment_description(self._expression_parts[4], '', (lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime('%B')), (lambda s: _(', every {0} months').format(s)), (lambda s: _(', {0} through {1}')), (lambda s: _(', only in {0}')))", "docstring": "Generates a description for only the MONTH portion of the expression\n\nReturns:\nThe MONTH description", "source": "codesearchnet"}
{"code": "def model_loader(gem_file_path, gem_file_type):\n    if ((gem_file_type.lower() == 'xml') or (gem_file_type.lower() == 'sbml')):\n        model = read_sbml_model(gem_file_path)\n    elif (gem_file_type.lower() == 'mat'):\n        model = load_matlab_model(gem_file_path)\n    elif (gem_file_type.lower() == 'json'):\n        model = load_json_model(gem_file_path)\n    else:\n        raise ValueError('File type must be \"sbml\", \"xml\", \"mat\", or \"json\".')\n    return model", "docstring": "Consolidated function to load a GEM using COBRApy. Specify the file type being loaded.\n\nArgs:\ngem_file_path (str): Path to model file\ngem_file_type (str): GEM model type - ``sbml`` (or ``xml``), ``mat``, or ``json`` format\n\nReturns:\nCOBRApy Model object.", "source": "codesearchnet"}
{"code": "def convert_to_dimension(d):\n  \n  if d is None:\n    return None\n  if isinstance(d, Dimension):\n    if not isinstance(d.name, str) or not isinstance(d.size, int):\n      raise ValueError(\"Bad dimension %s\" % (d,))\n    return d\n  name, size = d\n  if isinstance(name, str) and isinstance(size, int):\n    return Dimension(name, size)\n  else:\n    raise ValueError(\"could not convert %s to Dimension\" % (d,))", "docstring": "Converts input to a Dimension.\n\nArgs:\nd: Dimension, tuple (string, int), or None.\n\nReturns:\nDimension or None.\n\nRaises:\nValueError: If d cannot be converted to a Dimension.", "source": "juraj-google-style"}
{"code": "async def updateCronJob(self, iden, query):\n    cron = self.cell.agenda.appts.get(iden)\n    if (cron is None):\n        raise s_exc.NoSuchIden()\n    self._trig_auth_check(cron.useriden)\n    (await self.cell.agenda.mod(iden, query))", "docstring": "Change an existing cron job's query\n\nArgs:\niden (bytes):  The iden of the cron job to be changed", "source": "codesearchnet"}
{"code": "def WriteSessionCompletion(self, aborted=False):\n    self._RaiseIfNotWritable()\n    if (self._storage_type != definitions.STORAGE_TYPE_SESSION):\n        raise IOError('Unsupported storage type.')\n    self._session.aborted = aborted\n    session_completion = self._session.CreateSessionCompletion()\n    self._storage_file.WriteSessionCompletion(session_completion)", "docstring": "Writes session completion information.\n\nArgs:\naborted (Optional[bool]): True if the session was aborted.\n\nRaises:\nIOError: if the storage type is not supported or\nwhen the storage writer is closed.\nOSError: if the storage type is not supported or\nwhen the storage writer is closed.", "source": "codesearchnet"}
{"code": "def delete(self):\n    self.manager.session.delete(self._uri)\n    self.manager._name_uri_cache.delete(self.properties.get(self.manager._name_prop, None))", "docstring": "Delete this NIC.\n\nAuthorization requirements:\n\n* Object-access permission to the Partition containing this HBA.\n* Task permission to the \"Partition Details\" task.\n\nRaises:\n\n:exc:`~zhmcclient.HTTPError`\n:exc:`~zhmcclient.ParseError`\n:exc:`~zhmcclient.AuthError`\n:exc:`~zhmcclient.ConnectionError`", "source": "codesearchnet"}
{"code": "def MakeSelfExtractingZip(self, payload_data, output_path):\n    \n    context = self.context + [\"Client Context\"]\n\n    src_zip = zipfile.ZipFile(io.BytesIO(payload_data), mode=\"r\")\n    zip_data = io.BytesIO()\n    output_zip = zipfile.ZipFile(\n        zip_data, mode=\"w\", compression=zipfile.ZIP_DEFLATED)\n\n    config_file_name = config.CONFIG.Get(\n        \"ClientBuilder.config_filename\", context=context)\n    \n    for template_file in src_zip.namelist():\n      if template_file != config_file_name:\n        \n        \n        \n        CopyFileInZip(src_zip, template_file, output_zip)\n\n    client_config_content = self.GetClientConfig(context)\n\n    output_zip.writestr(\n        config_file_name,\n        client_config_content.encode(\"utf-8\"),\n        compress_type=zipfile.ZIP_STORED)\n\n    \n    \n    \n    \n    \n    output_zip.comment = b\"$AUTORUN$>%s\" % config.CONFIG.Get(\n        \"ClientBuilder.autorun_command_line\", context=context).encode(\"utf-8\")\n\n    output_zip.close()\n\n    utils.EnsureDirExists(os.path.dirname(output_path))\n    with open(output_path, \"wb\") as fd:\n      \n      stub_data = io.BytesIO()\n      unzipsfx_stub = config.CONFIG.Get(\n          \"ClientBuilder.unzipsfx_stub\", context=context)\n      stub_raw = open(unzipsfx_stub, \"rb\").read()\n\n      \n      if b\"level=\\\"requireAdministrator\" not in stub_raw:\n        raise RuntimeError(\"Bad unzip binary in use. Not compiled with the\"\n                           \"requireAdministrator manifest option.\")\n\n      stub_data.write(stub_raw)\n\n      \n      \n      SetPeSubsystem(\n          stub_data,\n          console=config.CONFIG.Get(\"ClientBuilder.console\", context=context))\n\n      \n      end_of_file = zip_data.tell() + stub_data.tell()\n\n      \n      \n      offset_to_rsrc = stub_data.getvalue().find(b\".rsrc\")\n\n      \n      stub_data.seek(offset_to_rsrc + 20)\n      start_of_rsrc_section = struct.unpack(\"<I\", stub_data.read(4))[0]\n\n      \n      \n      stub_data.seek(offset_to_rsrc + 16)\n      stub_data.write(struct.pack(\"<I\", end_of_file - start_of_rsrc_section))\n\n      \n      out_data = io.BytesIO()\n      out_data.write(stub_data.getvalue())\n      out_data.write(zip_data.getvalue())\n\n      \n      fd.write(out_data.getvalue())\n\n    if self.signer:\n      self.signer.SignFile(output_path)\n\n    logging.info(\"Deployable binary generated at %s\", output_path)\n\n    return output_path", "docstring": "Repack the installer into the payload.\n\nArgs:\npayload_data: data payload for zip file\noutput_path: filename for the zip output\n\nRaises:\nRuntimeError: if the ClientBuilder.unzipsfx_stub doesn't require admin.\nReturns:\noutput_path: filename string of zip output file", "source": "juraj-google-style"}
{"code": "def do_get(self, uri):\n        \n        self.validate_resource_uri(uri)\n        return self._connection.get(uri)", "docstring": "Helps to make get requests\n\nArgs:\nuri: URI of the resource\n\nReturns:\nReturns: Returns the resource data", "source": "juraj-google-style"}
{"code": "def line_plot(df, xypairs, mode, layout={}, config=_BASE_CONFIG):\n        \n        if df.empty:\n            return {\n                \"x\": [],\n                \"y\": [],\n                \"mode\": mode\n            }\n\n        _data = []\n        for x, y in xypairs:\n            if (x in df.columns) and (y in df.columns):\n                _data.append(\n                    {\n                        \"x\": df[x].values.tolist(),\n                        \"y\": df[y].values.tolist(),\n                        \"mode\": mode\n                    }\n                )\n\n        return {\n            \"data\": _data,\n            \"layout\": layout,\n            \"config\": config\n            }", "docstring": "basic line plot\n\ndataframe to json for a line plot\n\nArgs:\ndf (pandas.DataFrame): input dataframe\nxypairs (list): list of tuples containing column names\nmode (str): plotly.js mode (e.g. lines)\nlayout (dict): layout parameters\nconfig (dict): config parameters", "source": "juraj-google-style"}
{"code": "def use(self, middleware, path=None):\n    self.log.info(' Using middleware {}', middleware)\n    if (path is None):\n        path = MiddlewareChain.ROOT_PATTERN\n    self.add(HTTPMethod.ALL, path, middleware)\n    return self", "docstring": "Call the provided middleware upon requests matching the path.\nIf path is not provided or None, all requests will match.\n\nArgs:\nmiddleware (callable): Callable with the signature\n``(res, req) -> None``\npath (Optional[str or regex]): a specific path the\nrequest must match for the middleware to be called.\nReturns:\nThis router", "source": "codesearchnet"}
{"code": "def temp_shell_task(cls, inp, mpi_procs=1, workdir=None, manager=None):\n    import tempfile\n    workdir = (tempfile.mkdtemp() if (workdir is None) else workdir)\n    if (manager is None):\n        manager = TaskManager.from_user_config()\n    task = cls.from_input(inp, workdir=workdir, manager=manager.to_shell_manager(mpi_procs=mpi_procs))\n    task.set_name('temp_shell_task')\n    return task", "docstring": "Build a Task with a temporary workdir. The task is executed via the shell with 1 MPI proc.\nMainly used for invoking Abinit to get important parameters needed to prepare the real task.\n\nArgs:\nmpi_procs: Number of MPI processes to use.", "source": "codesearchnet"}
{"code": "def _prompt_split_image(self, num_patches):\n    img_patches_per_tile = (self.img_size \n    img_string = f'{self.start_of_img_token}'\n    if num_patches > 1:\n        for idx in range(1, num_patches):\n            img_string += f'{self.tile_token}_{idx}' + f'{self.img_patch_token}' * img_patches_per_tile\n    img_string += f'{self.tile_global_token}' + f'{self.img_patch_token}' * img_patches_per_tile\n    img_string += f'{self.end_of_img_token}'\n    return img_string", "docstring": "Create a structured string representation of image tokens\n\nArgs:\nnum_patches: Number of patches in the image\n\nReturns:\nString with appropriate image tokens", "source": "github-repos"}
{"code": "def flag_last(o):\n    it = o.__iter__()\n    try:\n        e = next(it)\n    except StopIteration:\n        return\n    while True:\n        try:\n            nxt = next(it)\n            yield (False, e)\n            e = nxt\n        except StopIteration:\n            yield (True, e)\n            break", "docstring": "Flags the last loop of an iterator.\n\nConsumes an iterator, buffers one instance so it can look ahead.\nReturns True on last iteration.\n\nArgs:\n* o: An iterator instance.\n\nReturns:\n* A tuple of (True/False, iteration). Returns True, next on StopIteration.", "source": "github-repos"}
{"code": "def get_vmss_vm(access_token, subscription_id, resource_group, vmss_name, instance_id):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/virtualMachines/', str(instance_id), '?api-version=', COMP_API])\n    return do_get(endpoint, access_token)", "docstring": "Get individual VMSS VM details.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvmss_name (str): Name of the virtual machine scale set.\ninstance_id (int): VM ID of the scale set VM.\n\nReturns:\nHTTP response. JSON body of VMSS VM model view.", "source": "codesearchnet"}
{"code": "def chat_meMessage(self, *, channel: str, text: str, **kwargs) -> SlackResponse:\n    kwargs.update({'channel': channel, 'text': text})\n    return self.api_call('chat.meMessage', json=kwargs)", "docstring": "Share a me message into a channel.\n\nArgs:\nchannel (str): The channel id. e.g. 'C1234567890'\ntext (str): The message you'd like to share. e.g. 'Hello world'", "source": "codesearchnet"}
{"code": "def writeCmdMsg(self, msg):\n        \n        ekm_log(\"(writeCmdMsg | \" + self.getContext() + \") \" + msg)\n        self.m_command_msg = msg", "docstring": "Internal method to set the command result string.\n\nArgs:\nmsg (str): Message built during command.", "source": "juraj-google-style"}
{"code": "def generate(cache_fn):\n    \n    if not os.path.exists(cache_fn):\n        print >> sys.stderr, \"Can't access `%s`!\" % cache_fn\n        sys.exit(1)\n\n    with SqliteDict(cache_fn) as db:\n        for item in _pick_keywords(db):\n            yield item", "docstring": "Go thru `cache_fn` and filter keywords. Store them in `keyword_list.json`.\n\nArgs:\ncache_fn (str): Path to the file with cache.\n\nReturns:\nlist: List of :class:`KeywordInfo` objects.", "source": "juraj-google-style"}
{"code": "def labels(self):\n    if (not self.__labels):\n        self.__labels = Labels(self.__connection)\n    return self.__labels", "docstring": "Gets the Labels API client.\n\nReturns:\nLabels:", "source": "codesearchnet"}
{"code": "def predict(fqdn, result, *argl, **argd):\n    \n    \n    \n    out = None\n    if len(argl) > 0:\n        machine = argl[0]\n        if isclassifier(machine):\n            out = classify_predict(fqdn, result, None, *argl, **argd)\n        elif isregressor(machine):\n            out = regress_predict(fqdn, result, None, *argl, **argd)\n    return out", "docstring": "Analyzes the result of a generic predict operation performed by\n`sklearn`.\n\nArgs:\nfqdn (str): full-qualified name of the method that was called.\nresult: result of calling the method with `fqdn`.\nargl (tuple): positional arguments passed to the method call.\nargd (dict): keyword arguments passed to the method call.", "source": "juraj-google-style"}
{"code": "def reqHeadTimeStamp(\n            self, contract: Contract, whatToShow: str,\n            useRTH: bool, formatDate: int = 1) -> datetime.datetime:\n        \n        return self._run(\n            self.reqHeadTimeStampAsync(\n                contract, whatToShow, useRTH, formatDate))", "docstring": "Get the datetime of earliest available historical data\nfor the contract.\n\nArgs:\ncontract: Contract of interest.\nuseRTH: If True then only show data from within Regular\nTrading Hours, if False then show all data.\nformatDate: If set to 2 then the result is returned as a\ntimezone-aware datetime.datetime with UTC timezone.", "source": "juraj-google-style"}
{"code": "def get_studies_by_regions(dataset, masks, threshold=0.08, remove_overlap=True, studies=None, features=None, regularization='scale'):\n    import nibabel as nib\n    import os\n    try:\n        loaded_masks = [nib.load(os.path.relpath(m)) for m in masks]\n    except OSError:\n        print('Error loading masks. Check the path')\n    grouped_ids = [dataset.get_studies(mask=m, activation_threshold=threshold) for m in loaded_masks]\n    flat_ids = reduce((lambda a, b: (a + b)), grouped_ids)\n    if remove_overlap:\n        import collections\n        flat_ids = [id for (id, count) in collections.Counter(flat_ids).items() if (count == 1)]\n        grouped_ids = [[x for x in m if (x in flat_ids)] for m in grouped_ids]\n    y = [([idx] * len(ids)) for (idx, ids) in enumerate(grouped_ids)]\n    y = reduce((lambda a, b: (a + b)), y)\n    y = np.array(y)\n    X = [dataset.get_feature_data(ids=group_ids, features=features) for group_ids in grouped_ids]\n    X = np.vstack(tuple(X))\n    if regularization:\n        X = regularize(X, method=regularization)\n    return (X, y)", "docstring": "Set up data for a classification task given a set of masks\n\nGiven a set of masks, this function retrieves studies associated with\neach mask at the specified threshold, optionally removes overlap and\nfilters by studies and features, and returns studies by feature matrix\n(X) and class labels (y)\n\nArgs:\ndataset: a Neurosynth dataset\nmaks: a list of paths to Nifti masks\nthreshold: percentage of voxels active within the mask for study\nto be included\nremove_overlap: A boolean indicating if studies studies that\nappear in more than one mask should be excluded\nstudies: An optional list of study names used to constrain the set\nused in classification. If None, will use all features in the\ndataset.\nfeatures: An optional list of feature names used to constrain the\nset used in classification. If None, will use all features in\nthe dataset.\nregularize: Optional boolean indicating if X should be regularized\n\nReturns:\nA tuple (X, y) of np arrays.\nX is a feature by studies matrix and y is a vector of class labels", "source": "codesearchnet"}
{"code": "def _create_dummy_input(func_graph, template_tensor):\n    with func_graph.as_default():\n        return array_ops.placeholder(template_tensor.dtype, shape=template_tensor.shape)", "docstring": "Creates tensors in func_graph to represent template_tensors.\n\nArgs:\nfunc_graph: FuncGraph.\ntemplate_tensor: a tensor in the outer graph.\n\nReturns:\nA tensor in func_graph.", "source": "github-repos"}
{"code": "def transform(self, input_df):\n        \n\n        \n        _df = input_df.copy(deep=False)\n\n        \n        for column in self.cat_columns:\n\n            \n            if column not in _df:\n                raise RuntimeError('Required column {:s} not found'.format(column))\n\n            \n            if _df[column].dtype == 'object':\n                print('Changing column {:s} to category'.format(column))\n                _df[column] = pd.Categorical(_df[column])\n\n        \n        _df = _df.select_dtypes(include=['bool', 'int', 'float', 'category'])\n\n        \n        if self.normalize:\n            for column in list(_df.select_dtypes(include=[np.number]).columns.values):\n                print('Normalizing column {:s}...'.format(column))\n                smin, smax = self.norm_map[column]\n                _df[column] = (_df[column] - smin) / (smax - smin)\n\n        \n        return self.dummy_encoder.transform(_df)", "docstring": "Convert the dataframe to a matrix (numpy ndarray)\nArgs:\ninput_df (dataframe): The dataframe to convert", "source": "juraj-google-style"}
{"code": "def get_sanger_unevaluated(store, institute_id, user_id):\n    sanger_ordered_by_case = store.sanger_ordered(institute_id, user_id)\n    unevaluated = []\n    for item in sanger_ordered_by_case:\n        case_id = item['_id']\n        case_obj = store.case(case_id=case_id)\n        if (not case_obj):\n            continue\n        case_display_name = case_obj.get('display_name')\n        varid_list = item['vars']\n        unevaluated_by_case = {}\n        unevaluated_by_case[case_display_name] = []\n        for var_id in varid_list:\n            variant_obj = store.variant(document_id=var_id, case_id=case_id)\n            if ((variant_obj is None) or (variant_obj.get('sanger_ordered') is None) or (variant_obj.get('sanger_ordered') is False)):\n                continue\n            validation = variant_obj.get('validation', 'not_evaluated')\n            if (validation in ['True positive', 'False positive']):\n                continue\n            unevaluated_by_case[case_display_name].append(variant_obj['_id'])\n        if (len(unevaluated_by_case[case_display_name]) > 0):\n            unevaluated.append(unevaluated_by_case)\n    return unevaluated", "docstring": "Get all variants for an institute having Sanger validations ordered but still not evaluated\n\nArgs:\nstore(scout.adapter.MongoAdapter)\ninstitute_id(str)\n\nReturns:\nunevaluated: a list that looks like this: [ {'case1': [varID_1, varID_2, .., varID_n]}, {'case2' : [varID_1, varID_2, .., varID_n]} ],\nwhere the keys are case_ids and the values are lists of variants with Sanger ordered but not yet validated", "source": "codesearchnet"}
{"code": "def GetSecurityDescriptor(self):\n    fwnt_security_descriptor = pyfwnt.security_descriptor()\n    fwnt_security_descriptor.copy_from_byte_stream(self._fsntfs_file_entry.security_descriptor_data)\n    return fwnt_security_descriptor", "docstring": "Retrieves the security descriptor.\n\nReturns:\npyfwnt.security_descriptor: security descriptor.", "source": "codesearchnet"}
{"code": "def get_request_data(self, path, action, body=None):\n    body = (body or '')\n    (path_name, path_spec) = self.get_path_spec(path)\n    response = {}\n    if ((path_spec is not None) and (action in path_spec.keys())):\n        for status_code in path_spec[action]['responses'].keys():\n            resp = path_spec[action]['responses'][status_code]\n            try:\n                response[int(status_code)] = self.get_response_example(resp)\n            except ValueError:\n                response[status_code] = self.get_response_example(resp)\n    if (response == {}):\n        response[400] = ''\n    return response", "docstring": "Get the default data and status code of the given path + action request.\n\nArgs:\npath: path of the request.\naction: action of the request(get, post, delete...)\nbody: body sent, used to sent it back for post request.\n\nReturns:\nA tuple with the default response data and status code\nIn case of default status_code, use 0", "source": "codesearchnet"}
{"code": "def list_vmss_sub(access_token, subscription_id):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/virtualMachineScaleSets', '?api-version=', COMP_API])\n    return do_get_next(endpoint, access_token)", "docstring": "List VM Scale Sets in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body of VM scale sets.", "source": "codesearchnet"}
{"code": "def add_applicator(self, table, cols, function):\n        \n\n        if table not in self.relations:\n            raise ItsdbError('Cannot add applicator; table \"{}\" is not '\n                             'defined by the relations file.'\n                             .format(table))\n        if cols is None:\n            raise ItsdbError('Cannot add applicator; columns not specified.')\n        fields = set(f.name for f in self.relations[table])\n        for col in cols:\n            if col not in fields:\n                raise ItsdbError('Cannot add applicator; column \"{}\" not '\n                                 'defined by the relations file.'\n                                 .format(col))\n        self.applicators[table].append((cols, function))", "docstring": "Add an applicator. When reading *table*, rows in *table* will be\nmodified by apply_rows().\n\nArgs:\ntable: The table to apply the function to.\ncols: The columns in *table* to apply the function on.\nfunction: The applicator function.", "source": "juraj-google-style"}
{"code": "def _get_help_for_modules(self, modules, prefix, include_special_flags):\n    output_lines = []\n    for module in modules:\n        self._render_our_module_flags(module, output_lines, prefix)\n    if include_special_flags:\n        self._render_module_flags('absl.flags', six.itervalues(_helpers.SPECIAL_FLAGS._flags()), output_lines, prefix)\n    return '\\n'.join(output_lines)", "docstring": "Returns the help string for a list of modules.\n\nPrivate to absl.flags package.\n\nArgs:\nmodules: List[str], a list of modules to get the help string for.\nprefix: str, a string that is prepended to each generated help line.\ninclude_special_flags: bool, whether to include description of\nSPECIAL_FLAGS, i.e. --flagfile and --undefok.", "source": "codesearchnet"}
{"code": "def time_estimate(self, duration, **kwargs):\n    path = ('%s/%s/time_estimate' % (self.manager.path, self.get_id()))\n    data = {'duration': duration}\n    return self.manager.gitlab.http_post(path, post_data=data, **kwargs)", "docstring": "Set an estimated time of work for the object.\n\nArgs:\nduration (str): Duration in human format (e.g. 3h30)\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabTimeTrackingError: If the time tracking update cannot be done", "source": "codesearchnet"}
{"code": "def load_metascenario(self, scenario_list):\n    for scenario in scenario_list:\n        name = scenario.get('name')\n        if (name is None):\n            raise DataError('Scenario in scenario list is missing a name parameter', scenario=scenario)\n        tile_address = scenario.get('tile')\n        args = scenario.get('args', {})\n        dest = self\n        if (tile_address is not None):\n            dest = self._tiles.get(tile_address)\n            if (dest is None):\n                raise DataError('Attempted to load a scenario into a tile address that does not exist', address=tile_address, valid_addresses=list(self._tiles))\n        dest.load_scenario(name, **args)", "docstring": "Load one or more scenarios from a list.\n\nEach entry in scenario_list should be a dict containing at least a\nname key and an optional tile key and args key.  If tile is present\nand its value is not None, the scenario specified will be loaded into\nthe given tile only.  Otherwise it will be loaded into the entire\ndevice.\n\nIf the args key is specified is will be passed as keyword arguments\nto load_scenario.\n\nArgs:\nscenario_list (list): A list of dicts for each scenario that should\nbe loaded.", "source": "codesearchnet"}
{"code": "def save(self, savefile):\n    with open(str(savefile), 'wb') as f:\n        self.write_to_fp(f)\n        log.debug('Saved to %s', savefile)", "docstring": "Do the TTS API request and write result to file.\n\nArgs:\nsavefile (string): The path and file name to save the ``mp3`` to.\n\nRaises:\n:class:`gTTSError`: When there's an error with the API request.", "source": "codesearchnet"}
{"code": "def is_subset(self, other):\n\t\t\n\t\tif isinstance(other, _basebag):\n\t\t\tfor elem, count in self.counts():\n\t\t\t\tif not count <= other.count(elem):\n\t\t\t\t\treturn False\n\t\telse:\n\t\t\tfor elem in self:\n\t\t\t\tif self.count(elem) > 1 or elem not in other:\n\t\t\t\t\treturn False\n\t\treturn True", "docstring": "Check that every element in self has a count <= in other.\n\nArgs:\nother (Set)", "source": "juraj-google-style"}
{"code": "def setCTRatio(self, new_ct, password=\"00000000\"):\n        \n        ret = False\n        self.setContext(\"setCTRatio\")\n        try:\n            self.clearCmdMsg()\n            if ((new_ct != CTRatio.Amps_100) and (new_ct != CTRatio.Amps_200) and\n                    (new_ct != CTRatio.Amps_400) and (new_ct != CTRatio.Amps_600) and\n                    (new_ct != CTRatio.Amps_800) and (new_ct != CTRatio.Amps_1000) and\n                    (new_ct != CTRatio.Amps_1200) and (new_ct != CTRatio.Amps_1500) and\n                    (new_ct != CTRatio.Amps_2000) and (new_ct != CTRatio.Amps_3000) and\n                    (new_ct != CTRatio.Amps_4000) and (new_ct != CTRatio.Amps_5000)):\n                self.writeCmdMsg(\"Legal CT Ratios: 100, 200, 400, 600, \" +\n                                 \"800, 1000, 1200, 1500, 2000, 3000, 4000 and 5000\")\n                self.setContext(\"\")\n                return ret\n\n            if len(password) != 8:\n                self.writeCmdMsg(\"Invalid password length.\")\n                self.setContext(\"\")\n                return ret\n\n            if not self.request(False):\n                self.writeCmdMsg(\"Bad read CRC on setting\")\n            else:\n                if not self.serialCmdPwdAuth(password):\n                    self.writeCmdMsg(\"Password failure\")\n                else:\n                    req_str = \"015731023030443028\" + binascii.hexlify(str(new_ct).zfill(4)) + \"2903\"\n                    req_str += self.calc_crc16(req_str[2:].decode(\"hex\"))\n                    self.m_serial_port.write(req_str.decode(\"hex\"))\n                    if self.m_serial_port.getResponse(self.getContext()).encode(\"hex\") == \"06\":\n                        self.writeCmdMsg(\"Success(setCTRatio): 06 returned.\")\n                        ret = True\n            self.serialPostEnd()\n\n        except:\n            ekm_log(traceback.format_exc(sys.exc_info()))\n\n        self.setContext(\"\")\n        return ret", "docstring": "Serial call to set CT ratio for attached inductive pickup.\n\nArgs:\nnew_ct (int): A :class:`~ekmmeters.CTRatio` value, a legal amperage setting.\npassword (str): Optional password.\n\nReturns:\nbool: True on completion with ACK.", "source": "juraj-google-style"}
{"code": "def _force_edge_active_move(self, state: _STATE) -> _STATE:\n        \n        seqs, edges = state\n        unused_edges = edges.copy()\n\n        \n        for seq in seqs:\n            for i in range(1, len(seq)):\n                unused_edges.remove(self._normalize_edge((seq[i - 1], seq[i])))\n\n        edge = self._choose_random_edge(unused_edges)\n        if not edge:\n            return seqs, edges\n\n        return (\n            self._force_edge_active(seqs,\n                                    edge,\n                                    lambda: bool(self._rand.randint(2))),\n            edges)", "docstring": "Move which forces a random edge to appear on some sequence.\n\nThis move chooses random edge from the edges which do not belong to any\nsequence and modifies state in such a way, that this chosen edge\nappears on some sequence of the search state.\n\nArgs:\nstate: Search state, not mutated.\n\nReturns:\nNew search state with one of the unused edges appearing in some\nsequence.", "source": "juraj-google-style"}
{"code": "def disconnect_sync(self, connection_handle):\n    self.bable.disconnect(connection_handle=connection_handle, sync=True)", "docstring": "Synchronously disconnect from whoever has connected to us\n\nArgs:\nconnection_handle (int): The handle of the connection we wish to disconnect.", "source": "codesearchnet"}
{"code": "def update_work_as_completed(self, worker_id, work_id, other_values=None, error=None):\n    client = self._datastore_client\n    try:\n        with client.transaction() as transaction:\n            work_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id, KIND_WORK, work_id)\n            work_entity = client.get(work_key, transaction=transaction)\n            if (work_entity['claimed_worker_id'] != worker_id):\n                return False\n            work_entity['is_completed'] = True\n            if other_values:\n                work_entity.update(other_values)\n            if error:\n                work_entity['error'] = text_type(error)\n            transaction.put(work_entity)\n    except Exception:\n        return False\n    return True", "docstring": "Updates work piece in datastore as completed.\n\nArgs:\nworker_id: ID of the worker which did the work\nwork_id: ID of the work which was done\nother_values: dictionary with additonal values which should be saved\nwith the work piece\nerror: if not None then error occurred during computation of the work\npiece. In such case work will be marked as completed with error.\n\nReturns:\nwhether work was successfully updated", "source": "codesearchnet"}
{"code": "def check_required_tags_compliance(self, resource):\n    missing_tags = []\n    notes = []\n    resource_tags = {tag.key.lower(): tag.value for tag in resource.tags}\n    if (resource.resource_type in self.alert_schedule):\n        target_accounts = self.alert_schedule[resource.resource_type]['scope']\n    else:\n        target_accounts = self.alert_schedule['*']['scope']\n    if (not ((resource.account.account_name in target_accounts) or ('*' in target_accounts))):\n        return (missing_tags, notes)\n    if (self.audit_ignore_tag.lower() in resource_tags):\n        return (missing_tags, notes)\n    required_tags = list(self.required_tags)\n    if (self.gdpr_enabled and (resource.account.account_name in self.gdpr_accounts)):\n        required_tags.append(self.gdpr_tag)\n    '\\n        \n    for key in [tag.lower() for tag in required_tags]:\n        if (key not in resource_tags):\n            missing_tags.append(key)\n        elif (not self.validate_tag(key, resource_tags[key])):\n            missing_tags.append(key)\n            notes.append('{} tag is not valid'.format(key))\n    return (missing_tags, notes)", "docstring": "Check whether a resource is compliance\n\nArgs:\nresource: A single resource\n\nReturns:\n`(list, list)`\nA tuple contains missing tags (if there were any) and notes", "source": "codesearchnet"}
{"code": "def call_replica_local_fn(fn, *args, **kwargs):\n    strategy = None\n    if 'strategy' in kwargs:\n        strategy = kwargs.pop('strategy')\n    elif distribute_lib.has_strategy():\n        strategy = distribute_lib.get_strategy()\n    is_tpu = backend.is_tpu_strategy(strategy)\n    if not is_tpu and strategy and distribute_lib.in_cross_replica_context():\n        with strategy.scope():\n            return strategy.extended.call_for_each_replica(fn, args, kwargs)\n    return fn(*args, **kwargs)", "docstring": "Call a function that uses replica-local variables.\n\nThis function correctly handles calling `fn` in a cross-replica\ncontext.\n\nArgs:\nfn: The function to call.\n*args: Positional arguments to the `fn`.\n**kwargs: Keyword argument to `fn`.\n\nReturns:\nThe result of calling `fn`.", "source": "github-repos"}
{"code": "def query(self, coords, order=1):\n        \n        out = np.full(len(coords.l.deg), np.nan, dtype='f4')\n\n        for pole in self.poles:\n            m = (coords.b.deg >= 0) if pole == 'ngp' else (coords.b.deg < 0)\n\n            if np.any(m):\n                data, w = self._data[pole]\n                x, y = w.wcs_world2pix(coords.l.deg[m], coords.b.deg[m], 0)\n                out[m] = map_coordinates(data, [y, x], order=order, mode='nearest')\n\n        return out", "docstring": "Returns the map value at the specified location(s) on the sky.\n\nArgs:\ncoords (`astropy.coordinates.SkyCoord`): The coordinates to query.\norder (Optional[int]): Interpolation order to use. Defaults to `1`,\nfor linear interpolation.\n\nReturns:\nA float array containing the map value at every input coordinate.\nThe shape of the output will be the same as the shape of the\ncoordinates stored by `coords`.", "source": "juraj-google-style"}
{"code": "def put(self, key, value):\n    \n\n    value = self.serializedValue(value)\n    self.child_datastore.put(key, value)", "docstring": "Stores the object `value` named by `key`.\nSerializes values on the way in, and stores the serialized data into the\n``child_datastore``.\n\nArgs:\nkey: Key naming `value`\nvalue: the object to store.", "source": "juraj-google-style"}
{"code": "async def send(self, metric):\n    message = json.dumps(metric).encode('utf-8')\n    (await self.loop.create_datagram_endpoint((lambda : UDPClientProtocol(message)), remote_addr=(self.ip, self.port)))", "docstring": "Transform metric to JSON bytestring and send to server.\n\nArgs:\nmetric (dict): Complete metric to send as JSON.", "source": "codesearchnet"}
{"code": "def execute(api):\n    \n    try:\n      return api.execute()\n    except Exception as exception:\n      now = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')\n      _print_error('%s: Exception %s: %s' % (now, type(exception).__name__,\n                                             str(exception)))\n      \n      raise exception", "docstring": "Executes operation.\n\nArgs:\napi: The base API object\n\nReturns:\nA response body object", "source": "juraj-google-style"}
{"code": "def lf_summary(L, Y=None, lf_names=None, est_accs=None):\n    \n    n, m = L.shape\n    if lf_names is not None:\n        col_names = [\"j\"]\n        d = {\"j\": list(range(m))}\n    else:\n        lf_names = list(range(m))\n        col_names = []\n        d = {}\n\n    \n    col_names.extend([\"Polarity\", \"Coverage\", \"Overlaps\", \"Conflicts\"])\n    d[\"Polarity\"] = Series(data=lf_polarities(L), index=lf_names)\n    d[\"Coverage\"] = Series(data=lf_coverages(L), index=lf_names)\n    d[\"Overlaps\"] = Series(data=lf_overlaps(L), index=lf_names)\n    d[\"Conflicts\"] = Series(data=lf_conflicts(L), index=lf_names)\n\n    if Y is not None:\n        col_names.extend([\"Correct\", \"Incorrect\", \"Emp. Acc.\"])\n        confusions = [\n            confusion_matrix(Y, L[:, i], pretty_print=False) for i in range(m)\n        ]\n        corrects = [np.diagonal(conf).sum() for conf in confusions]\n        incorrects = [\n            conf.sum() - correct for conf, correct in zip(confusions, corrects)\n        ]\n        accs = lf_empirical_accuracies(L, Y)\n        d[\"Correct\"] = Series(data=corrects, index=lf_names)\n        d[\"Incorrect\"] = Series(data=incorrects, index=lf_names)\n        d[\"Emp. Acc.\"] = Series(data=accs, index=lf_names)\n\n    if est_accs is not None:\n        col_names.append(\"Learned Acc.\")\n        d[\"Learned Acc.\"] = Series(est_accs, index=lf_names)\n\n    return DataFrame(data=d, index=lf_names)[col_names]", "docstring": "Returns a pandas DataFrame with the various per-LF statistics.\n\nArgs:\nL: an n x m scipy.sparse matrix where L_{i,j} is the label given by the\njth LF to the ith candidate\nY: an [n] or [n, 1] np.ndarray of gold labels.\nIf provided, the empirical accuracy for each LF will be calculated", "source": "juraj-google-style"}
{"code": "def operator(name=None, operators=None, aliases=None, kind=None):\n    \n    def delegator(assertion, subject, expected, *args, **kw):\n        return assertion.test(subject, expected, *args, **kw)\n\n    def decorator(fn):\n        operator = Operator(fn=fn, aliases=aliases, kind=kind)\n        _name = name if isinstance(name, six.string_types) else fn.__name__\n        operator.operators = (_name,)\n\n        _operators = operators\n        if isinstance(_operators, list):\n            _operators = tuple(_operators)\n\n        if isinstance(_operators, tuple):\n            operator.operators += _operators\n\n        \n        Engine.register(operator)\n        return functools.partial(delegator, operator)\n\n    return decorator(name) if inspect.isfunction(name) else decorator", "docstring": "Registers a new operator function in the test engine.\n\nArguments:\n*args: variadic arguments.\n**kw: variadic keyword arguments.\n\nReturns:\nfunction", "source": "juraj-google-style"}
{"code": "def _copy_fn(fn):\n    if not callable(fn):\n        raise TypeError('fn is not callable: %s' % fn)\n    return types.FunctionType(code=fn.__code__, globals=fn.__globals__, name=fn.__name__, argdefs=fn.__defaults__, closure=fn.__closure__)", "docstring": "Create a deep copy of fn.\n\nArgs:\nfn: a callable\n\nReturns:\nA `FunctionType`: a deep copy of fn.\n\nRaises:\nTypeError: if `fn` is not a callable.", "source": "github-repos"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_stream = BytearrayStream()\n    if (self._device_serial_number is not None):\n        self._device_serial_number.write(local_stream, kmip_version=kmip_version)\n    if (self._password is not None):\n        self._password.write(local_stream, kmip_version=kmip_version)\n    if (self._device_identifier is not None):\n        self._device_identifier.write(local_stream, kmip_version=kmip_version)\n    if (self._network_identifier is not None):\n        self._network_identifier.write(local_stream, kmip_version=kmip_version)\n    if (self._machine_identifier is not None):\n        self._machine_identifier.write(local_stream, kmip_version=kmip_version)\n    if (self._media_identifier is not None):\n        self._media_identifier.write(local_stream, kmip_version=kmip_version)\n    self.length = local_stream.length()\n    super(DeviceCredential, self).write(output_stream, kmip_version=kmip_version)\n    output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the DeviceCredential struct to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def convert(isbn, code='978'):\n    isbn = _isbn_cleanse(isbn)\n    if (len(isbn) == 10):\n        isbn = (code + isbn[:(- 1)])\n        return (isbn + calculate_checksum(isbn))\n    elif isbn.startswith('978'):\n        return (isbn[3:(- 1)] + calculate_checksum(isbn[3:(- 1)]))\n    else:\n        raise IsbnError('Only ISBN-13s with 978 Bookland code can be converted to ISBN-10.')", "docstring": "Convert ISBNs between ISBN-10 and ISBN-13.\n\nNote:\nNo attempt to hyphenate converted ISBNs is made, because the\nspecification requires that *any* hyphenation must be correct but\nallows ISBNs without hyphenation.\n\nArgs:\nisbn (str): SBN, ISBN-10 or ISBN-13\ncode (str): EAN Bookland code\n\nReturns:\n``str``: Converted ISBN-10 or ISBN-13\n\nRaise:\nIsbnError: When ISBN-13 isn't convertible to an ISBN-10", "source": "codesearchnet"}
{"code": "def _GetFieldByName(message_descriptor, field_name):\n  \n  try:\n    return message_descriptor.fields_by_name[field_name]\n  except KeyError:\n    raise ValueError('Protocol message %s has no \"%s\" field.' %\n                     (message_descriptor.name, field_name))", "docstring": "Returns a field descriptor by field name.\n\nArgs:\nmessage_descriptor: A Descriptor describing all fields in message.\nfield_name: The name of the field to retrieve.\nReturns:\nThe field descriptor associated with the field name.", "source": "juraj-google-style"}
{"code": "def write_input(self, output_dir=\".\", make_dir_if_not_present=True):\n        \n        if make_dir_if_not_present and not os.path.exists(output_dir):\n            os.makedirs(output_dir)\n\n        feff = self.all_input()\n\n        feff_input = \"\\n\\n\".join(str(feff[k]) for k in\n                                 [\"HEADER\", \"PARAMETERS\", \"POTENTIALS\", \"ATOMS\"]\n                                 if k in feff)\n\n        for k, v in feff.items():\n            with open(os.path.join(output_dir, k), \"w\") as f:\n                f.write(str(v))\n\n        with open(os.path.join(output_dir, \"feff.inp\"), \"w\") as f:\n            f.write(feff_input)\n\n        \n        if \"ATOMS\" not in feff:\n            self.atoms.struct.to(fmt=\"cif\",\n                                 filename=os.path.join(\n                                     output_dir, feff[\"PARAMETERS\"][\"CIF\"]))", "docstring": "Writes a set of FEFF input to a directory.\n\nArgs:\noutput_dir: Directory to output the FEFF input files\nmake_dir_if_not_present: Set to True if you want the directory (\nand the whole path) to be created if it is not present.", "source": "juraj-google-style"}
{"code": "def propose(self):\n    candidates = self._get_candidates()\n    if (candidates is None):\n        return None\n    predictions = self.predict(candidates)\n    idx = self._acquire(predictions)\n    return candidates[idx]", "docstring": "Use the trained model to propose a new pipeline.\n\nReturns:\nint: Index corresponding to pipeline to try in ``dpp_matrix``.", "source": "codesearchnet"}
{"code": "def stack_and_pad_tensors(batch, padding_index=DEFAULT_PADDING_INDEX, dim=0):\n    \n    lengths = [tensor.shape[0] for tensor in batch]\n    max_len = max(lengths)\n    padded = [pad_tensor(tensor, max_len, padding_index) for tensor in batch]\n    lengths = torch.tensor(lengths)\n    padded = torch.stack(padded, dim=dim).contiguous()\n    for _ in range(dim):\n        lengths = lengths.unsqueeze(0)\n    return padded, lengths", "docstring": "Pad a :class:`list` of ``tensors`` (``batch``) with ``padding_index``.\n\nArgs:\nbatch (:class:`list` of :class:`torch.Tensor`): Batch of tensors to pad.\npadding_index (int, optional): Index to pad tensors with.\ndim (int, optional): Dimension on to which to concatenate the batch of tensors.\n\nReturns\ntorch.Tensor, torch.Tensor: Padded tensors and original lengths of tensors.", "source": "juraj-google-style"}
{"code": "def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n    input_height, input_width = get_image_size(image, channel_dim=input_data_format)\n    mask = np.zeros(output_size, dtype=np.int64)\n    mask[:input_height, :input_width] = 1\n    return mask", "docstring": "Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.\nArgs:\nimage (`np.ndarray`):\nImage to make the pixel mask for.\noutput_size (`Tuple[int, int]`):\nOutput size of the mask.", "source": "github-repos"}
{"code": "def get_all(cls, include_disabled=True):\n    if (cls == BaseAccount):\n        raise InquisitorError('get_all on BaseAccount is not supported')\n    account_type_id = db.AccountType.find_one(account_type=cls.account_type).account_type_id\n    qry = db.Account.order_by(desc(Account.enabled), Account.account_type_id, Account.account_name)\n    if (not include_disabled):\n        qry = qry.filter((Account.enabled == 1))\n    accounts = qry.find((Account.account_type_id == account_type_id))\n    return {res.account_id: cls(res) for res in accounts}", "docstring": "Returns a list of all accounts of a given type\n\nArgs:\ninclude_disabled (`bool`): Include disabled accounts. Default: `True`\n\nReturns:\nlist of account objects", "source": "codesearchnet"}
{"code": "def get_mealy_conjecture(self):\n        \n        mma = MealyMachine()\n        for s in self.observation_table.sm_vector:\n            for i in self.alphabet:\n                dst = self.observation_table.equiv_classes[s + i]\n                \n                if dst is None:\n                    logging.debug('Conjecture attempt on non closed table.')\n                    return None\n                o = self.observation_table[s, i]\n                src_id = self.observation_table.sm_vector.index(s)\n                dst_id = self.observation_table.sm_vector.index(dst)\n                mma.add_arc(src_id, dst_id, i, o)\n\n        \n        for s in mma.states:\n            s.final = True\n        return mma", "docstring": "Utilize the observation table to construct a Mealy Machine.\nThe library used for representing the Mealy Machine is the python\nbindings of the openFST library (pyFST).\nArgs:\nNone\nReturns:\nMealyMachine: A mealy machine build based on a closed and consistent\nobservation table.", "source": "juraj-google-style"}
{"code": "def _central_crop(image, crop_height, crop_width):\n  \n  shape = tf.shape(image)\n  height, width = shape[0], shape[1]\n\n  mlperf_log.resnet_print(key=mlperf_log.INPUT_CENTRAL_CROP,\n                          value=[crop_height, crop_width])\n\n  amount_to_be_cropped_h = (height - crop_height)\n  crop_top = amount_to_be_cropped_h \n  amount_to_be_cropped_w = (width - crop_width)\n  crop_left = amount_to_be_cropped_w \n  return tf.slice(\n      image, [crop_top, crop_left, 0], [crop_height, crop_width, -1])", "docstring": "Performs central crops of the given image list.\n\nArgs:\nimage: a 3-D image tensor\ncrop_height: the height of the image following the crop.\ncrop_width: the width of the image following the crop.\n\nReturns:\n3-D tensor with cropped image.", "source": "juraj-google-style"}
{"code": "def _request(self, path, key, data, method, key_is_cik, extra_headers={}):\n    if (method == 'GET'):\n        if (len(data) > 0):\n            url = ((path + '?') + data)\n        else:\n            url = path\n        body = None\n    else:\n        url = path\n        body = data\n    headers = {}\n    if key_is_cik:\n        headers['X-Exosite-CIK'] = key\n    else:\n        headers['X-Exosite-Token'] = key\n    if (method == 'POST'):\n        headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=utf-8'\n    headers['Accept'] = 'text/plain, text/csv, application/x-www-form-urlencoded'\n    headers.update(extra_headers)\n    (body, response) = self._onephttp.request(method, url, body, headers)\n    pr = ProvisionResponse(body, response)\n    if (self._raise_api_exceptions and (not pr.isok)):\n        raise ProvisionException(pr)\n    return pr", "docstring": "Generically shared HTTP request method.\n\nArgs:\npath: The API endpoint to interact with.\nkey: A string for the key used by the device for the API.  Either a CIK or token.\ndata: A string for the pre-encoded data to be sent with this request.\nmethod: A string denoting the HTTP verb to use for the request (e.g. 'GET', 'POST')\nkey_is_cik: Whether or not the device key used is a CIK or token.\nextra_headers: A dictionary of extra headers to include with the request.\n\nReturns:\nA ProvisionResponse containing the result of the HTTP request.", "source": "codesearchnet"}
{"code": "def to_representation(self, instance):\n        \n        if self.id_only():\n            return instance.pk\n\n        pk = getattr(instance, 'pk', None)\n        if not settings.ENABLE_SERIALIZER_OBJECT_CACHE or pk is None:\n            return self._to_representation(instance)\n        else:\n            if pk not in self.obj_cache:\n                self.obj_cache[pk] = self._to_representation(instance)\n            return self.obj_cache[pk]", "docstring": "Modified to_representation method. Optionally may cache objects.\n\nArguments:\ninstance: A model instance or data object.\nReturns:\nInstance ID if the serializer is meant to represent its ID.\nOtherwise, a tagged data dict representation.", "source": "juraj-google-style"}
{"code": "def ssh_reachable(self, tries=None, propagate_fail=True):\n        \n        if not self.running():\n            return False\n\n        try:\n            ssh.get_ssh_client(\n                ip_addr=self.ip(),\n                host_name=self.name(),\n                ssh_tries=tries,\n                propagate_fail=propagate_fail,\n                ssh_key=self.virt_env.prefix.paths.ssh_id_rsa(),\n                username=self._spec.get('ssh-user'),\n                password=self._spec.get('ssh-password'),\n            )\n        except ssh.LagoSSHTimeoutException:\n            return False\n\n        return True", "docstring": "Check if the VM is reachable with ssh\nArgs:\ntries(int): Number of tries to try connecting to the host\npropagate_fail(bool): If set to true, this event will appear\nin the log and fail the outter stage. Otherwise, it will be\ndiscarded.\nReturns:\nbool: True if the VM is reachable.", "source": "juraj-google-style"}
{"code": "def from_celery(cls, name, worker_dict, queues):\n        \n        return WorkerStats(\n            name=name,\n            broker=BrokerStats.from_celery(worker_dict['broker']),\n            pid=worker_dict['pid'],\n            process_pids=worker_dict['pool']['processes'],\n            concurrency=worker_dict['pool']['max-concurrency'],\n            job_count=worker_dict['pool']['writes']['total'],\n            queues=queues\n        )", "docstring": "Create a WorkerStats object from the dictionary returned by celery.\n\nArgs:\nname (str): The name of the worker.\nworker_dict (dict): The dictionary as returned by celery.\nqueues (list): A list of QueueStats objects that represent the queues this\nworker is listening on.\n\nReturns:\nWorkerStats: A fully initialized WorkerStats object.", "source": "juraj-google-style"}
{"code": "def nodes_on_wire(self, wire, only_ops=False):\n        \n        current_node = self.input_map.get(wire, None)\n\n        if not current_node:\n            raise DAGCircuitError('The given wire %s is not present in the circuit'\n                                  % str(wire))\n\n        more_nodes = True\n        while more_nodes:\n            more_nodes = False\n            \n            if current_node.type == 'op' or not only_ops:\n                yield current_node\n\n            \n            for node, edges in self._multi_graph.adj[current_node].items():\n                if any(wire == edge['wire'] for edge in edges.values()):\n                    current_node = node\n                    more_nodes = True\n                    break", "docstring": "Iterator for nodes that affect a given wire\n\nArgs:\nwire (tuple(Register, index)): the wire to be looked at.\nonly_ops (bool): True if only the ops nodes are wanted\notherwise all nodes are returned.\nYield:\nDAGNode: the successive ops on the given wire\n\nRaises:\nDAGCircuitError: if the given wire doesn't exist in the DAG", "source": "juraj-google-style"}
{"code": "def update_batch(self, loss_per_instance):\n        \n        if self.batch_indices is None:\n            raise TensorForceError(\"Need to call get_batch before each update_batch call.\")\n        \n        \n\n        for index, loss in zip(self.batch_indices, loss_per_instance):\n            \n            new_priority = (np.abs(loss) + self.prioritization_constant) ** self.prioritization_weight\n            self.observations._move(index, new_priority)\n            self.none_priority_index += 1", "docstring": "Computes priorities according to loss.\n\nArgs:\nloss_per_instance:", "source": "juraj-google-style"}
{"code": "def MakePartialStat(self, fd):\n    \n\n    is_dir = \"Container\" in fd.behaviours\n\n    return {\n        \"pathspec\": fd.Get(fd.Schema.PATHSPEC, \"\"),\n        \"st_atime\": fd.Get(fd.Schema.LAST, 0),\n        \"st_blksize\": 0,\n        \"st_blocks\": 0,\n        \"st_ctime\": 0,\n        \"st_dev\": 0,\n        \"st_gid\": 0,\n        \"st_ino\": 0,\n        \"st_mode\": self.default_dir_mode if is_dir else self.default_file_mode,\n        \"st_mtime\": 0,\n        \"st_nlink\": 0,\n        \"st_rdev\": 0,\n        \"st_size\": fd.Get(fd.Schema.SIZE, 0),\n        \"st_uid\": 0\n    }", "docstring": "Try and give a 'stat' for something not in the data store.\n\nArgs:\nfd: The object with no stat.\n\nReturns:\nA dictionary corresponding to what we'll say the 'stat' is\nfor objects which are not actually files, so have no OS level stat.", "source": "juraj-google-style"}
{"code": "def _compute_linear_scaling_rope_parameters(config: Optional[PretrainedConfig]=None, device: Optional['torch.device']=None, seq_len: Optional[int]=None, **rope_kwargs) -> tuple['torch.Tensor', float]:\n    if config is not None and len(rope_kwargs) > 0:\n        raise ValueError(f'Unexpected arguments: `**rope_kwargs` and `config` are mutually exclusive in `_compute_linear_scaling_rope_parameters`, got `rope_kwargs`={rope_kwargs} and `config`={config}')\n    if len(rope_kwargs) > 0:\n        factor = rope_kwargs['factor']\n    elif config is not None:\n        factor = config.rope_scaling['factor']\n    inv_freq, attention_factor = _compute_default_rope_parameters(config, device, seq_len, **rope_kwargs)\n    inv_freq /= factor\n    return (inv_freq, attention_factor)", "docstring": "Computes the inverse frequencies with linear scaling. Credits to the Reddit user /u/kaiokendev\nArgs:\nconfig ([`~transformers.PretrainedConfig`]):\nThe model configuration.\ndevice (`torch.device`):\nThe device to use for initialization of the inverse frequencies.\nseq_len (`int`, *optional*):\nThe current sequence length. Unused for this type of RoPE.\nrope_kwargs (`Dict`, *optional*):\nBC compatibility with the previous RoPE class instantiation, will be removed in v4.45.\nReturns:\nTuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the\npost-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).", "source": "github-repos"}
{"code": "def _wait_all_creative_activation(self, feed_item, timeout=128):\n    for association in feed_item['creative_assignment']:\n        creative = self._creative_dao.get(association, required=True)\n        self._wait_creative_activation(creative['id'], timeout)", "docstring": "Waits for activation of all creatives that should be associated to the feed item that represents an ad.\n\nArgs:\nfeed_item: Feed item representing an Ad from the Bulkdozer feed.\ntimeout: Optional parameter identifying how long to wait for all creatives\nto be activated in seconds.\n\nRaises:\nException: In case one or more creatives do not get activated within the\nspecified timeout.", "source": "github-repos"}
{"code": "def __init__(self, vfs_object):\n    \n    super(ObjectsCacheValue, self).__init__()\n    self._reference_count = 0\n    self.vfs_object = vfs_object", "docstring": "Initializes the resolver objects cache value object.\n\nArgs:\nvfs_object (object): VFS object to cache.", "source": "juraj-google-style"}
{"code": "def supported_language(lang):\n    \n    try:\n      self.get_collection(lang=lang)\n      return True\n    except LanguageNotSupported as e:\n      return False", "docstring": "Return True if polyglot supports the language.\n\nArgs:\nlang (string): Language code.", "source": "juraj-google-style"}
{"code": "def by_phone(self, phone, cc=None):\n        \n\n        header, content = self._http_request(self.BASE_URL, phone=phone, cc=cc)\n        return json.loads(content)", "docstring": "Perform a Yelp Phone API Search based on phone number given.\n\nArgs:\nphone    - Phone number to search by\ncc       - ISO 3166-1 alpha-2 country code. (Optional)", "source": "juraj-google-style"}
{"code": "def get_dump_sizes_bytes(self, node_name, output_slot, debug_op, device_name=None):\n    device_name = self._infer_device_name(device_name, node_name)\n    watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)\n    if watch_key not in self._watch_key_to_datum[device_name]:\n        raise WatchKeyDoesNotExistInDebugDumpDirError('Watch key \"%s\" does not exist in the debug dump of device %s' % (watch_key, device_name))\n    return self._watch_key_to_dump_size_bytes[device_name][watch_key]", "docstring": "Get the sizes of the dump files for a debug-dumped tensor.\n\nUnit of the file size: byte.\n\nArgs:\nnode_name: (`str`) name of the node that the tensor is produced by.\noutput_slot: (`int`) output slot index of tensor.\ndebug_op: (`str`) name of the debug op.\ndevice_name: (`str`) name of the device. If there is only one device or if\nthe specified debug_watch_key exists on only one device, this argument\nis optional.\n\nReturns:\n(`list` of `int`): list of dump file sizes in bytes.\n\nRaises:\nWatchKeyDoesNotExistInDebugDumpDirError: If the tensor watch key does not\nexist in the debug dump data.", "source": "github-repos"}
{"code": "def lines_from_file(path, as_interned=False, encoding=None):\n    lines = None\n    with io.open(path, encoding=encoding) as f:\n        if as_interned:\n            lines = [sys.intern(line) for line in f.read().splitlines()]\n        else:\n            lines = f.read().splitlines()\n    return lines", "docstring": "Create a list of file lines from a given filepath.\n\nArgs:\npath (str): File path\nas_interned (bool): List of \"interned\" strings (default False)\n\nReturns:\nstrings (list): File line list", "source": "codesearchnet"}
{"code": "def traverse(self, index=0):\n    if (index < len(self.nodes)):\n        for entity in self.nodes[index]:\n            for next_result in self.traverse(index=(index + 1)):\n                if isinstance(entity, list):\n                    (yield (entity + next_result))\n                else:\n                    (yield ([entity] + next_result))\n    else:\n        (yield [])", "docstring": "This is used to produce a list of lists where each each item\nin that list is a diffrent combination of items from the lists\nwithin with every combination of such values.\n\nArgs:\nindex (int) : the index at witch to start the list.\nNote this is used only in the function as a processing\n\nReturns:\nlist : is every combination.", "source": "codesearchnet"}
{"code": "def molecule(lines):\n    \n    count_line = lines[3]\n    num_atoms = int(count_line[0:3])\n    num_bonds = int(count_line[3:6])\n    \n    \n    compound = Compound()\n    compound.graph._node = atoms(lines[4: num_atoms+4])\n    compound.graph._adj = bonds(lines[num_atoms+4: num_atoms+num_bonds+4],\n                                compound.graph._node.keys())\n    props = properties(lines[num_atoms+num_bonds+4:])\n    add_properties(props, compound)\n    return compound", "docstring": "Parse molfile part into molecule object\n\nArgs:\nlines (list): lines of molfile part\n\nRaises:\nValueError: Symbol not defined in periodictable.yaml\n(Polymer expression not supported yet)", "source": "juraj-google-style"}
{"code": "def depth_december_average_ground_temperature(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `depth_december_average_ground_temperature`'.format(value))\n\n        self._depth_december_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_december_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_december_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def _checkSetpointValue( setpointvalue, maxvalue ):   \n    \n    if maxvalue is None:\n        raise TypeError('The maxvalue (for the setpoint) must not be None!')\n    minimalmodbus._checkNumerical(setpointvalue, minvalue=0, maxvalue=maxvalue, description='setpoint value')", "docstring": "Check that the given setpointvalue is valid.\n\nArgs:\n* setpointvalue (numerical): The setpoint value to be checked. Must be positive.\n* maxvalue (numerical): Upper limit for setpoint value. Must be positive.\n\nRaises:\nTypeError, ValueError", "source": "juraj-google-style"}
{"code": "def _extract_all_responses(self, resources, api_endpoint, api_name):\n    (all_responses, resources) = self._bulk_cache_lookup(api_name, resources)\n    resource_chunks = self._prepare_resource_chunks(resources)\n    response_chunks = self._request_reports('resource', resource_chunks, api_endpoint)\n    self._extract_response_chunks(all_responses, response_chunks, api_name)\n    return all_responses", "docstring": "Aux function to extract all the API endpoint responses.\n\nArgs:\nresources: list of string hashes.\napi_endpoint: endpoint path\napi_name: endpoint name\nReturns:\nA dict with the hash as key and the VT report as value.", "source": "codesearchnet"}
{"code": "def add_individual(self, genotype):\n    logger.debug('Adding genotype {0} to variant {1}'.format(genotype, self['variant_id']))\n    self['individuals'].append(genotype)", "docstring": "Add the information for a individual\n\nThis adds a genotype dict to variant['individuals']\n\nArgs:\ngenotype (dict): A genotype dictionary", "source": "codesearchnet"}
{"code": "def conditionally_inline_policies(role_name, sr_entry):\n  \n  service_type = sr_entry['type']\n  if not (service_type in SERVICE_TYPE_ROLE and \"policies\" in sr_entry):\n    print_if_verbose(\"not eligible for policies; service_type: {} is not valid for policies \"\n                     \"or no 'policies' key in service registry for this role\".format(service_type))\n    return\n\n  for policy_name in sr_entry['policies']:\n    print_if_verbose(\"loading policy: {} for role: {}\".format(policy_name, role_name))\n    try:\n      policy_document = resolve_policy_document(policy_name)\n    except:\n      fail(\"Exception loading policy: {} for role: {}\".format(policy_name, role_name), sys.exc_info())\n\n    \n    if CONTEXT.commit:\n      try:\n        CLIENTS[\"iam\"].put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=policy_document)\n      except:\n        fail(\"Exception putting policy: {} onto role: {}\".format(policy_name, role_name), sys.exc_info())", "docstring": "If 'policies' key lists the filename prefixes of policies to bind to the role,\nload them from the expected path and inline them onto the role\nArgs:\nrole_name: name of the role to attach the policies to\nsr_entry: service registry entry", "source": "juraj-google-style"}
{"code": "def diff_levenshtein(self, diffs):\n    \n    levenshtein = 0\n    insertions = 0\n    deletions = 0\n    for (op, data) in diffs:\n      if op == self.DIFF_INSERT:\n        insertions += len(data)\n      elif op == self.DIFF_DELETE:\n        deletions += len(data)\n      elif op == self.DIFF_EQUAL:\n        \n        levenshtein += max(insertions, deletions)\n        insertions = 0\n        deletions = 0\n    levenshtein += max(insertions, deletions)\n    return levenshtein", "docstring": "Compute the Levenshtein distance; the number of inserted, deleted or\nsubstituted characters.\n\nArgs:\ndiffs: Array of diff tuples.\n\nReturns:\nNumber of changes.", "source": "juraj-google-style"}
{"code": "def ne(left: Any, right: Any) -> bool:\n    return not eq(left, right)", "docstring": "Compares if two values are not equal. Use symbolic equality if possible.\n\nExample::\n\n@pg.members([\n('x', pg.typing.Any())\n])\nclass A(pg.Object):\ndef sym_eq(self, right):\nif super().sym_eq(right):\nreturn True\nreturn pg.eq(self.x, right)\n\nclass B:\npass\n\nassert pg.ne(1, 2)\nassert pg.ne(A(1), A(2))\n# A has override `sym_eq`.\nassert not pg.ne(A(1), 1)\n# Objects of B are compared by references.\nassert pg.ne(A(B()), A(B()))\n\nArgs:\nleft: The left-hand value to compare.\nright: The right-hand value to compare.\n\nReturns:\nTrue if left and right is not equal or symbolically equal. Otherwise False.", "source": "github-repos"}
{"code": "def add_module(self, module_name, module_ui):\n    m_button = tk.Label(self.module_selection, text=module_name, bg='white', anchor='w')\n    m_button.grid(column=0, row=len(self.module_selection.winfo_children()), padx=0, pady=0, sticky='W E N S')\n    self.module_buttons[module_name] = m_button\n    m_button.bind('<Button-1>', (lambda e: self.module_selected(module_name, module_ui)))", "docstring": "Adds a module to the list\n\nArgs:\nmodule_name (str): The name of the module\nmodule_ui: The function to call to create the module's UI", "source": "codesearchnet"}
{"code": "def export(self, top=True):\n        \n        out = []\n        if top:\n            out.append(self._internal_name)\n        out.append(self._to_str(self.comments_2))\n        return \",\".join(out)", "docstring": "Exports object to its string representation.\n\nArgs:\ntop (bool):  if True appends `internal_name` before values.\nAll non list objects should be exported with value top=True,\nall list objects, that are embedded in as fields inlist objects\nshould be exported with `top`=False\n\nReturns:\nstr: The objects string representation", "source": "juraj-google-style"}
{"code": "def convert_placeholder_to_const(input_graph_def, nodes_to_convert=None):\n    input_node_map = {}\n    for node in input_graph_def.node:\n        if node.name not in input_node_map:\n            input_node_map[node.name] = node\n        else:\n            raise ValueError('Duplicate node names detected for ', node.name)\n    dict_to_change = {}\n    for key in PLACEHOLDER_WITH_DEFAULT_LIST:\n        dict_to_change[key] = PLACEHOLDER_WITH_DEFAULT_LIST[key]\n    if nodes_to_convert is not None and len(nodes_to_convert) > 0:\n        dict_list = parse_nodes_dict(nodes_to_convert)\n        dict_to_change.update(dict_list)\n    ph_node_list = []\n    for ph_node in dict_to_change:\n        if not ph_node and ph_node not in input_node_map:\n            continue\n        ph_node_list.append(ph_node)\n    if not ph_node_list:\n        tf_logging.warning('No PlaceholderWithDefault nodes found to convert to Constant. Maybe check the spellings')\n        return input_graph_def\n    result_graph_def = graph_pb2.GraphDef()\n    for node in input_graph_def.node:\n        is_replaced = False\n        new_node = node_def_pb2.NodeDef()\n        if node.op == 'PlaceholderWithDefault' or node.op == 'Placeholder':\n            match_key = [find_key for find_key in dict_to_change.keys() if find_key in node.name]\n            if len(match_key) > 0:\n                if dtypes.bool.as_datatype_enum == node.attr['dtype'].type:\n                    new_val_str = dict_to_change[match_key[0]]\n                    new_node.op = 'Const'\n                    new_node.name = node.name\n                    new_node.attr['dtype'].CopyFrom(node.attr['dtype'])\n                    new_node.attr['value'].CopyFrom(attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(strtobool(new_val_str), dtype=dtypes.bool, shape=[])))\n                    is_replaced = True\n                else:\n                    tf_logging.warning('Not converting to Const. Currently only bool             PlaceholderWithDefault or Placeholder can be converted to const.             current dtype = ', node.attr['dtype'])\n        if not is_replaced:\n            new_node.CopyFrom(node)\n        result_graph_def.node.extend([new_node])\n    return result_graph_def", "docstring": "Rename the PlaceHolderWithDefault node to constant\n\nIn a frozen graph, PlaceholderWithDefault nodes can be converted to\nConstant op nodes with same value. This will help simplify the graph.\n\nArgs:\ninput_graph_def: A GraphDef containing a model.\nnodes_to_convert: A list of PlaceholderWithDefault or Placeholder nodes to\nbe converted to Constants with their new value.\n\nReturns:\nmodified graph with PlaceholderWithDefault node converted to Constant node", "source": "github-repos"}
{"code": "def _ParseRecordExtraField(self, byte_stream, file_offset):\n    extra_field_map = self._GetDataTypeMap('asl_record_extra_field')\n    try:\n        record_extra_field = self._ReadStructureFromByteStream(byte_stream, file_offset, extra_field_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse record extra field at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))\n    return record_extra_field", "docstring": "Parses a record extra field.\n\nArgs:\nbyte_stream (bytes): byte stream.\nfile_offset (int): offset of the record extra field relative to\nthe start of the file.\n\nReturns:\nasl_record_extra_field: record extra field.\n\nRaises:\nParseError: if the record extra field cannot be parsed.", "source": "codesearchnet"}
{"code": "def __init__(\n            self,\n            path: str,\n            query_string: bytes,\n            scheme: str,\n            headers: CIMultiDict,\n            subprotocols: List[str],\n            receive: Callable,\n            send: Callable,\n            accept: Callable,\n    ) -> None:\n        \n        super().__init__('GET', scheme, path, query_string, headers)\n        self._accept = accept\n        self._receive = receive\n        self._send = send\n        self._subprotocols = subprotocols", "docstring": "Create a request object.\n\nArguments:\npath: The full unquoted path of the request.\nquery_string: The raw bytes for the query string part.\nscheme: The scheme used for the request.\nheaders: The request headers.\nsubprotocols: The subprotocols requested.\nreceive: Returns an awaitable of the current data\n\naccept: Idempotent callable to accept the websocket connection.", "source": "juraj-google-style"}
{"code": "def popn(self, buffer_type, count):\n        \n\n        buffer_type = str(buffer_type)\n\n        if buffer_type == u'streaming':\n            chosen_buffer = self.streaming_data\n        else:\n            chosen_buffer = self.storage_data\n\n        if count > len(chosen_buffer):\n            raise StreamEmptyError(\"Not enough data in buffer for popn command\", requested=count, stored=len(chosen_buffer), buffer=buffer_type)\n\n        popped = chosen_buffer[:count]\n        remaining = chosen_buffer[count:]\n\n        if buffer_type == u'streaming':\n            self.streaming_data = remaining\n        else:\n            self.storage_data = remaining\n\n        return popped", "docstring": "Remove and return the oldest count values from the named buffer\n\nArgs:\nbuffer_type (str): The buffer to pop from (either u\"storage\" or u\"streaming\")\ncount (int): The number of readings to pop\n\nReturns:\nlist(IOTileReading): The values popped from the buffer", "source": "juraj-google-style"}
{"code": "def ValidateCertificateHostname(cert, hostname):\n    hosts = GetValidHostsForCert(cert)\n    boto.log.debug('validating server certificate: hostname=%s, certificate hosts=%s', hostname, hosts)\n    for host in hosts:\n        host_re = host.replace('.', '\\\\.').replace('*', '[^.]*')\n        if re.search(('^%s$' % (host_re,)), hostname, re.I):\n            return True\n    return False", "docstring": "Validates that a given hostname is valid for an SSL certificate.\n\nArgs:\ncert: A dictionary representing an SSL certificate.\nhostname: The hostname to test.\nReturns:\nbool: Whether or not the hostname is valid for this certificate.", "source": "codesearchnet"}
{"code": "def incr(self, key, value, noreply=False):\n    key = self.check_key(key)\n    cmd = (((b'incr ' + key) + b' ') + six.text_type(value).encode('ascii'))\n    if noreply:\n        cmd += b' noreply'\n    cmd += b'\\r\\n'\n    results = self._misc_cmd([cmd], b'incr', noreply)\n    if noreply:\n        return None\n    if (results[0] == b'NOT_FOUND'):\n        return None\n    return int(results[0])", "docstring": "The memcached \"incr\" command.\n\nArgs:\nkey: str, see class docs for details.\nvalue: int, the amount by which to increment the value.\nnoreply: optional bool, False to wait for the reply (the default).\n\nReturns:\nIf noreply is True, always returns None. Otherwise returns the new\nvalue of the key, or None if the key wasn't found.", "source": "codesearchnet"}
{"code": "def index_all(self, index_name):\n        \n        oks = 0\n        notoks = 0\n        for ok, item in streaming_bulk(\n            self.es_client,\n            self._iter_documents(index_name)\n        ):\n            if ok:\n                oks += 1\n            else:\n                notoks += 1\n        logging.info(\n            \"Import results: %d ok, %d not ok\",\n            oks,\n            notoks\n        )", "docstring": "Index all available documents, using streaming_bulk for speed\nArgs:\n\nindex_name (string): The index", "source": "juraj-google-style"}
{"code": "def pad_image(self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n    output_height, output_width = (size['height'], size['width'])\n    input_height, input_width = get_image_size(image, channel_dim=input_data_format)\n    delta_width = output_width - input_width\n    delta_height = output_height - input_height\n    pad_top = delta_height \n    pad_left = delta_width \n    pad_bottom = delta_height - pad_top\n    pad_right = delta_width - pad_left\n    padding = ((pad_top, pad_bottom), (pad_left, pad_right))\n    return pad(image, padding, data_format=data_format, input_data_format=input_data_format)", "docstring": "Pad the image to the specified size at the top, bottom, left and right.\n\nArgs:\nimage (`np.ndarray`):\nThe image to be padded.\nsize (`Dict[str, int]`):\nThe size `{\"height\": h, \"width\": w}` to pad the image to.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe data format of the output image. If unset, the same format as the input image is used.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def AsDict(self, dt=True):\n        \n        data = {}\n\n        if self.body:\n            data['body'] = self.body\n        if self.posted_at:\n            data['posted_at'] = self.posted_at\n        if self.user:\n            data['user'] = self.user.AsDict()\n\n        return data", "docstring": "A dict representation of this Comment instance.\n\nThe return value uses the same key names as the JSON representation.\n\nArgs:\ndt (bool): If True, return dates as python datetime objects. If\nFalse, return dates as ISO strings.\n\nReturn:\nA dict representing this Comment instance", "source": "juraj-google-style"}
{"code": "def sanitize(vpc_config):\n    if (vpc_config is None):\n        return vpc_config\n    elif (type(vpc_config) is not dict):\n        raise ValueError('vpc_config is not a dict: {}'.format(vpc_config))\n    elif (not vpc_config):\n        raise ValueError('vpc_config is empty')\n    subnets = vpc_config.get(SUBNETS_KEY)\n    if (subnets is None):\n        raise ValueError('vpc_config is missing key: {}'.format(SUBNETS_KEY))\n    if (type(subnets) is not list):\n        raise ValueError('vpc_config value for {} is not a list: {}'.format(SUBNETS_KEY, subnets))\n    elif (not subnets):\n        raise ValueError('vpc_config value for {} is empty'.format(SUBNETS_KEY))\n    security_group_ids = vpc_config.get(SECURITY_GROUP_IDS_KEY)\n    if (security_group_ids is None):\n        raise ValueError('vpc_config is missing key: {}'.format(SECURITY_GROUP_IDS_KEY))\n    if (type(security_group_ids) is not list):\n        raise ValueError('vpc_config value for {} is not a list: {}'.format(SECURITY_GROUP_IDS_KEY, security_group_ids))\n    elif (not security_group_ids):\n        raise ValueError('vpc_config value for {} is empty'.format(SECURITY_GROUP_IDS_KEY))\n    return to_dict(subnets, security_group_ids)", "docstring": "Checks that an instance of VpcConfig has the expected keys and values, removes unexpected keys,\nand raises ValueErrors if any expectations are violated\n\nArgs:\nvpc_config (dict): a VpcConfig dict containing 'Subnets' and 'SecurityGroupIds'\n\nReturns:\nA valid VpcConfig dict containing only 'Subnets' and 'SecurityGroupIds' from the vpc_config parameter\nIf vpc_config parameter is None, returns None\n\nRaises:\nValueError if any expectations are violated:\n* vpc_config must be a non-empty dict\n* vpc_config must have key `Subnets` and the value must be a non-empty list\n* vpc_config must have key `SecurityGroupIds` and the value must be a non-empty list", "source": "codesearchnet"}
{"code": "def _get_mutation_to_unknown(self, node: cfg.CFGNode, values: list[_base.BaseValue]) -> list[function.Mutation]:\n    mutations = []\n    for v in values:\n        if isinstance(v, _instance_base.SimpleValue):\n            for name in v.instance_type_parameters:\n                if name in self._mutated_type_parameters:\n                    mutations.append(function.Mutation(v, name, self.ctx.convert.create_new_unknown(node, action='type_param_' + name)))\n    return mutations", "docstring": "Mutation for making all type parameters in a list of instances \"unknown\".\n\nThis is used if we call a function that has mutable parameters and\nmultiple signatures with unknown parameters.\n\nArgs:\nnode: The current CFG node.\nvalues: A list of instances of BaseValue.\n\nReturns:\nA list of function.Mutation instances.", "source": "github-repos"}
{"code": "def get_general_case_info(adapter, institute_id=None, slice_query=None):\n    \n    general = {}\n\n    \n    name_query = slice_query\n\n    cases = adapter.cases(owner=institute_id, name_query=name_query)\n\n    phenotype_cases = 0\n    causative_cases = 0\n    pinned_cases = 0\n    cohort_cases = 0\n\n    pedigree = {\n        1: {\n            'title': 'Single',\n            'count': 0\n        },\n        2: {\n            'title': 'Duo',\n            'count': 0\n        },\n        3: {\n            'title': 'Trio',\n            'count': 0\n        },\n        'many': {\n            'title': 'Many',\n            'count': 0\n        },\n    }\n\n    case_ids = set()\n\n    total_cases = 0\n    for total_cases,case in enumerate(cases,1):\n        \n        if institute_id:\n            case_ids.add(case['_id'])\n        if case.get('phenotype_terms'):\n            phenotype_cases += 1\n        if case.get('causatives'):\n            causative_cases += 1\n        if case.get('suspects'):\n            pinned_cases += 1\n        if case.get('cohorts'):\n            cohort_cases += 1\n\n        nr_individuals = len(case.get('individuals',[]))\n        if nr_individuals == 0:\n            continue\n        if nr_individuals > 3:\n            pedigree['many']['count'] += 1\n        else:\n            pedigree[nr_individuals]['count'] += 1\n\n    general['total_cases'] = total_cases\n    general['phenotype_cases'] = phenotype_cases\n    general['causative_cases'] = causative_cases\n    general['pinned_cases'] = pinned_cases\n    general['cohort_cases'] = cohort_cases\n    general['pedigree'] = pedigree\n    general['case_ids'] = case_ids\n\n    return general", "docstring": "Return general information about cases\n\nArgs:\nadapter(adapter.MongoAdapter)\ninstitute_id(str)\nslice_query(str):   Query to filter cases to obtain statistics for.\n\n\nReturns:\ngeneral(dict)", "source": "juraj-google-style"}
{"code": "def vcf_records(self, format_tags=None, qualified=False):\n        \n        if qualified:\n            sample_names = self.qualified_sample_names\n        else:\n            sample_names = self.sample_names\n\n        for line in self._file_reader.read_lines():\n            if line.startswith(\"\n                continue\n            vcf_record = vcf.VcfRecord.parse_record(line, sample_names)\n            if format_tags:\n                vcf_record = self.modify_format_tag(vcf_record, format_tags)\n            yield vcf_record", "docstring": "Generates parsed VcfRecord objects.\n\nTypically called in a for loop to process each vcf record in a\nVcfReader. VcfReader must be opened in advanced and closed when\ncomplete. Skips all headers.\n\nArgs:\nqualified: When True, sample names are prefixed with file name\n\nReturns:\nParsed VcfRecord\n\nRaises:\nStopIteration: when reader is exhausted.\nTypeError: if reader is closed.", "source": "juraj-google-style"}
{"code": "def export_node(self, n) -> Dict[(str, Union[(str, List[str])])]:\n    node_dict = {'name': n[0], 'units': _get_units(n[0]), 'dtype': _get_dtype(n[0]), 'arguments': list(self.predecessors(n[0]))}\n    if (not (n[1].get('indicators') is None)):\n        for indicator in n[1]['indicators'].values():\n            if ('dataset' in indicator.__dict__):\n                del indicator.__dict__['dataset']\n        node_dict['indicators'] = [_process_datetime(indicator.__dict__) for indicator in n[1]['indicators'].values()]\n    else:\n        node_dict['indicators'] = None\n    return node_dict", "docstring": "Return dict suitable for exporting to JSON.\n\nArgs:\nn: A dict representing the data in a networkx AnalysisGraph node.\n\nReturns:\nThe node dict with additional fields for name, units, dtype, and\narguments.", "source": "codesearchnet"}
{"code": "def launch_simulation(self, parameter):\n        \n        return next(SimulationRunner.run_simulations(self, [parameter],\n                                                     self.data_folder))", "docstring": "Launch a single simulation, using SimulationRunner's facilities.\n\nThis function is used by ParallelRunner's run_simulations to map\nsimulation running over the parameter list.\n\nArgs:\nparameter (dict): the parameter combination to simulate.", "source": "juraj-google-style"}
{"code": "def is_frozen_graph(sess):\n    for op in sess.graph.get_operations():\n        if op.type.startswith('Variable') or op.type.endswith('VariableOp'):\n            return False\n    return True", "docstring": "Determines if the graph is frozen.\n\nDetermines if a graph has previously been frozen by checking for any\noperations of type Variable*. If variables are found, the graph is not frozen.\n\nArgs:\nsess: TensorFlow Session.\n\nReturns:\nBool.", "source": "github-repos"}
{"code": "def _AssertAtLeast3DImage(image):\n    return control_flow_ops.with_dependencies(_CheckAtLeast3DImage(image, require_static=False), image)", "docstring": "Assert that we are working with a properly shaped image.\n\nPerforms the check statically if possible (i.e. if the shape\nis statically known). Otherwise adds a control dependency\nto an assert op that checks the dynamic shape.\n\nArgs:\nimage: >= 3-D Tensor of size [*, height, width, depth]\n\nRaises:\nValueError: if image.shape is not a [>= 3] vector.\n\nReturns:\nIf the shape of `image` could be verified statically, `image` is\nreturned unchanged, otherwise there will be a control dependency\nadded that asserts the correct dynamic shape.", "source": "github-repos"}
{"code": "def compute_bleu(reference_corpus, translation_corpus, max_order=4,\n                 use_bp=True):\n  \n  reference_length = 0\n  translation_length = 0\n  bp = 1.0\n  geo_mean = 0\n\n  matches_by_order = [0] * max_order\n  possible_matches_by_order = [0] * max_order\n  precisions = []\n\n  for (references, translations) in zip(reference_corpus, translation_corpus):\n    reference_length += len(references)\n    translation_length += len(translations)\n    ref_ngram_counts = _get_ngrams_with_counter(references, max_order)\n    translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)\n\n    overlap = dict((ngram,\n                    min(count, translation_ngram_counts[ngram]))\n                   for ngram, count in ref_ngram_counts.items())\n\n    for ngram in overlap:\n      matches_by_order[len(ngram) - 1] += overlap[ngram]\n    for ngram in translation_ngram_counts:\n      possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[\n          ngram]\n\n  precisions = [0] * max_order\n  smooth = 1.0\n\n  for i in xrange(0, max_order):\n    if possible_matches_by_order[i] > 0:\n      precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]\n      if matches_by_order[i] > 0:\n        precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[\n            i]\n      else:\n        smooth *= 2\n        precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])\n    else:\n      precisions[i] = 0.0\n\n  if max(precisions) > 0:\n    p_log_sum = sum(math.log(p) for p in precisions if p)\n    geo_mean = math.exp(p_log_sum / max_order)\n\n  if use_bp:\n    ratio = translation_length / reference_length\n    bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0\n  bleu = geo_mean * bp\n  return np.float32(bleu)", "docstring": "Computes BLEU score of translated segments against one or more references.\n\nArgs:\nreference_corpus: list of references for each translation. Each\nreference should be tokenized into a list of tokens.\ntranslation_corpus: list of translations to score. Each translation\nshould be tokenized into a list of tokens.\nmax_order: Maximum n-gram order to use when computing BLEU score.\nuse_bp: boolean, whether to apply brevity penalty.\n\nReturns:\nBLEU score.", "source": "juraj-google-style"}
{"code": "def _update_service_current_state(service: ServiceState):\n    \n    LOG.debug(\"Setting current state from target state for %s\", service.id)\n    service.update_current_state(service.target_state)", "docstring": "Update the current state of a service.\n\nUpdates the current state of services after their target state has changed.\n\nArgs:\nservice (ServiceState): Service state object to update", "source": "juraj-google-style"}
{"code": "def save(self, data: dict):\n    with open(self.output_path, 'w') as f:\n        json.dump(data, f)", "docstring": "Save the provided data object in a json file.\n\nArgs:\ndata (`dict`): The data to store.", "source": "github-repos"}
{"code": "def init(self, basedir, config, sourcedir, targetdir, cwd='', commit=True):\n    if (not basedir):\n        basedir = '.'\n    (abs_basedir, abs_config, abs_sourcedir, abs_targetdir) = self.expand(basedir, config, sourcedir, targetdir, cwd)\n    self.valid_paths(abs_config, abs_sourcedir, abs_targetdir)\n    if commit:\n        self.commit(sourcedir, targetdir, abs_config, abs_sourcedir, abs_targetdir)\n    return {'basedir': abs_basedir, 'config': abs_config, 'sourcedir': abs_sourcedir, 'targetdir': abs_targetdir}", "docstring": "Init project structure and configuration from given arguments\n\nArgs:\nbasedir (string): Project base directory used to prepend relative\npaths. If empty or equal to '.', it will be filled with current\ndirectory path.\nconfig (string): Settings file path.\nsourcedir (string): Source directory path.\ntargetdir (string): Compiled files target directory path.\n\nKeyword Arguments:\ncwd (string): Current directory path to prepend base dir if empty.\ncommit (bool): If ``False``, directory structure and settings file\nwon't be created.\n\nReturns:\ndict: A dict containing expanded given paths.", "source": "codesearchnet"}
{"code": "def _ParseFieldsMetadata(self, structure):\n    \n    fields = structure.fields.split(' ')\n\n    log_line_structure = pyparsing.Empty()\n    if fields[0] == 'date' and fields[1] == 'time':\n      log_line_structure += self.DATE_TIME.setResultsName('date_time')\n      fields = fields[2:]\n\n    for member in fields:\n      log_line_structure += self._LOG_LINE_STRUCTURES.get(member, self.URI)\n\n    updated_structures = []\n    for line_structure in self._line_structures:\n      if line_structure[0] != 'logline':\n        updated_structures.append(line_structure)\n    updated_structures.append(('logline', log_line_structure))\n    \n    \n    self._line_structures = updated_structures", "docstring": "Parses the fields metadata and updates the log line definition to match.\n\nArgs:\nstructure (pyparsing.ParseResults): structure parsed from the log file.", "source": "juraj-google-style"}
{"code": "def _project_dict(self, **kwargs: Dict[str, Any]) -> Dict[str, Hist]:\n        \n        \n        get_hist_args = copy.deepcopy(kwargs)\n        projection_name_args = copy.deepcopy(kwargs)\n        for key, input_observable in self.observable_to_project_from.items():\n            output_hist, projection_name, projection_name_args, = self._project_observable(\n                input_key = key,\n                input_observable = input_observable,\n                get_hist_args = get_hist_args,\n                projection_name_args = projection_name_args,\n                **kwargs,\n            )\n\n            \n            output_hist_args = projection_name_args\n            output_hist_args.update({  \n                \"output_hist\": output_hist,\n                \"projection_name\": projection_name\n            })\n            output_key_name = self.output_key_name(**output_hist_args)  \n            self.output_observable[output_key_name] = self.output_hist(**output_hist_args)  \n\n        return self.output_observable", "docstring": "Driver function for projecting and storing a dictionary of observables.\n\nArgs:\nkwargs (dict): Additional named args to be passed to projection_name(...) and output_key_name(...)\nReturns:\nThe projected histograms. The projected histograms are also stored in ``output_observable``.", "source": "juraj-google-style"}
{"code": "def LateBind(self, target=None):\n    if (not issubclass(target, RDFProtoStruct)):\n        raise TypeError(('Field %s expects a protobuf, but target is %s' % (self, target)))\n    self.late_bound = False\n    self.type = target\n    self.owner.AddDescriptor(self)", "docstring": "Late binding callback.\n\nThis method is called on this field descriptor when the target RDFValue\nclass is finally defined. It gives the field descriptor an opportunity to\ninitialize after the point of definition.\n\nArgs:\ntarget: The target nested class.\n\nRaises:\nTypeError: If the target class is not of the expected type.", "source": "codesearchnet"}
{"code": "def get_full_alias(self, query):\n    if (query in self.alias_table.sections()):\n        return query\n    return next((section for section in self.alias_table.sections() if (section.split()[0] == query)), '')", "docstring": "Get the full alias given a search query.\n\nArgs:\nquery: The query this function performs searching on.\n\nReturns:\nThe full alias (with the placeholders, if any).", "source": "codesearchnet"}
{"code": "def delete_branch(profile, name):\n    ref = ('heads/' + name)\n    data = refs.delete_ref(profile, ref)\n    return data", "docstring": "Delete a branch.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nname\nThe name of the branch to delete.\n\nReturns:\nThe response of the DELETE request.", "source": "codesearchnet"}
{"code": "def run(self, text):\n        \n        for regex in self.regexes:\n            text = regex.sub(self.repl, text)\n        return text", "docstring": "Run each regex substitution on ``text``.\n\nArgs:\ntext (string): the input text.\n\nReturns:\nstring: text after all substitutions have been sequentially\napplied.", "source": "juraj-google-style"}
{"code": "def get_all_artifacts_per_task_id(chain, upstream_artifacts):\n    all_artifacts_per_task_id = {}\n    for link in chain.links:\n        if (link.task_type in PARENT_TASK_TYPES):\n            add_enumerable_item_to_dict(dict_=all_artifacts_per_task_id, key=link.task_id, item='public/task-graph.json')\n        if (link.task_type in DECISION_TASK_TYPES):\n            add_enumerable_item_to_dict(dict_=all_artifacts_per_task_id, key=link.task_id, item='public/actions.json')\n            add_enumerable_item_to_dict(dict_=all_artifacts_per_task_id, key=link.task_id, item='public/parameters.yml')\n    if upstream_artifacts:\n        for upstream_dict in upstream_artifacts:\n            add_enumerable_item_to_dict(dict_=all_artifacts_per_task_id, key=upstream_dict['taskId'], item=upstream_dict['paths'])\n    for (task_id, paths) in all_artifacts_per_task_id.items():\n        all_artifacts_per_task_id[task_id] = sorted(set(paths))\n    return all_artifacts_per_task_id", "docstring": "Return every artifact to download, including the Chain Of Trust Artifacts.\n\nArgs:\nchain (ChainOfTrust): the chain of trust object\nupstream_artifacts: the list of upstream artifact definitions\n\nReturns:\ndict: sorted list of paths to downloaded artifacts ordered by taskId", "source": "codesearchnet"}
{"code": "def serialize_ndarray_b64(o):\n    \n    if o.flags['C_CONTIGUOUS']:\n        o_data = o.data\n    else:\n        o_data = np.ascontiguousarray(o).data\n    data_b64 = base64.b64encode(o_data)\n    return dict(\n        _type='np.ndarray',\n        data=data_b64.decode('utf-8'),\n        dtype=o.dtype,\n        shape=o.shape)", "docstring": "Serializes a :obj:`numpy.ndarray` in a format where the datatype and shape are\nhuman-readable, but the array data itself is binary64 encoded.\n\nArgs:\no (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized.\n\nReturns:\nA dictionary that can be passed to :obj:`json.dumps`.", "source": "juraj-google-style"}
{"code": "def rsub(self, other, axis=\"columns\", level=None, fill_value=None):\n        \n        return self._binary_op(\n            \"rsub\", other, axis=axis, level=level, fill_value=fill_value\n        )", "docstring": "Subtract a DataFrame/Series/scalar from this DataFrame.\n\nArgs:\nother: The object to use to apply the subtraction to this.\naxis: The axis to apply the subtraction over.\nlevel: Mutlilevel index level to subtract over.\nfill_value: The value to fill NaNs with.\n\nReturns:\nA new DataFrame with the subtraciont applied.", "source": "juraj-google-style"}
{"code": "def get_tensor_mtf_dimension_names(self, tensor_name):\n    \n    tensor = self._name_to_tensor(tensor_name)\n    if isinstance(tensor, mtf.Tensor):\n      return tensor.shape.dimension_names\n    else:  \n      return []", "docstring": "The Mesh TensorFlow dimensions associated with a tensor.\n\nArgs:\ntensor_name: a string, name of a tensor in the graph.\n\nReturns:\na [string], the names of Mesh TensorFlow dimensions.", "source": "juraj-google-style"}
{"code": "def prune_unused_nodes(meta_graph, signature_def):\n  \n  \n  \n  graph = tf_v1.Graph()\n  with graph.as_default():\n    tf_v1.train.import_meta_graph(meta_graph, input_map={}, import_scope=\"\")\n    \n    used_node_names = set()\n    for _, tensor_def in signature_def.outputs.items():\n      output_tensor = graph.get_tensor_by_name(tensor_def.name)\n      mark_backward(output_tensor, used_node_names)\n    \n    node_filter_in_list = []\n    for node in meta_graph.graph_def.node:\n      \n      \n      \n      \n      if node.name in used_node_names or node.op == \"VarHandleOp\":\n        node_filter_in_list.append(node)\n    del meta_graph.graph_def.node[:]\n    meta_graph.graph_def.node.extend(node_filter_in_list)\n  del graph", "docstring": "Function to prune unused ops given a signature def.\n\nThis function does a graph traversal through from all outputs as\ndefined in the signature_def to collect all used nodes. Then, any\nnodes which are unused can be discarded. This is useful for graph which are\nexecuting eagerly or on TPUs.\n\nArgs:\nmeta_graph: The input/output MetaGraphDef for which we wish to prune.\nsignature_def: A SignatureDef which specifies the outputs from which we wish\nto start graph traversal.", "source": "juraj-google-style"}
{"code": "def __init__(self, dev_id, address, local_key=None, dev_type=None, connection_timeout=10):\n        \n        self.id = dev_id\n        self.address = address\n        self.local_key = local_key\n        self.local_key = local_key.encode('latin1')\n        self.dev_type = dev_type\n        self.connection_timeout = connection_timeout\n\n        self.port = 6668", "docstring": "Represents a Tuya device.\n\nArgs:\ndev_id (str): The device id.\naddress (str): The network address.\nlocal_key (str, optional): The encryption key. Defaults to None.\ndev_type (str, optional): The device type.\nIt will be used as key for lookups in payload_dict.\nDefaults to None.\n\nAttributes:\nport (int): The port to connect to.", "source": "juraj-google-style"}
{"code": "def NormalizePath(path, sep='/'):\n    if (not path):\n        return sep\n    path = SmartUnicode(path)\n    path_list = path.split(sep)\n    if (path_list[0] in ['.', '..', '']):\n        path_list.pop(0)\n    i = 0\n    while True:\n        list_len = len(path_list)\n        for i in range(i, len(path_list)):\n            if ((path_list[i] == '.') or (not path_list[i])):\n                path_list.pop(i)\n                break\n            elif (path_list[i] == '..'):\n                path_list.pop(i)\n                if (((i == 1) and path_list[0]) or (i > 1)):\n                    i -= 1\n                    path_list.pop(i)\n                break\n        if (len(path_list) == list_len):\n            return (sep + sep.join(path_list))", "docstring": "A sane implementation of os.path.normpath.\n\nThe standard implementation treats leading / and // as different leading to\nincorrect normal forms.\n\nNOTE: Its ok to use a relative path here (without leading /) but any /../ will\nstill be removed anchoring the path at the top level (e.g. foo/../../../../bar\n=> bar).\n\nArgs:\npath: The path to normalize.\nsep: Separator used.\n\nReturns:\nA normalized path. In this context normalized means that all input paths\nthat would result in the system opening the same physical file will produce\nthe same normalized path.", "source": "codesearchnet"}
{"code": "def _get_structured_grad_output(outputs, grads, body_grad_graph):\n    result = []\n    outputs_idx = 3\n    structured_outputs_idx = 3\n    for g in grads:\n        if g is None:\n            result.append(None)\n            continue\n        output = body_grad_graph.structured_outputs[structured_outputs_idx]\n        structured_outputs_idx += 1\n        if isinstance(output, indexed_slices.IndexedSlices):\n            result.append(indexed_slices.IndexedSlices(values=outputs[outputs_idx], indices=outputs[outputs_idx + 1], dense_shape=outputs[outputs_idx + 2]))\n            outputs_idx += 3\n        else:\n            assert isinstance(output, tensor_lib.Tensor)\n            result.append(outputs[outputs_idx])\n            outputs_idx += 1\n    return result", "docstring": "Returns the values that should be returned from the while grad function.\n\nArgs:\noutputs: the raw Tensor outputs of the grad While op.\ngrads: the input gradients to the gradient function.\nbody_grad_graph: _WhileBodyGradFuncGraph.\n\nReturns:\nA list of gradient values. May include Nones.", "source": "github-repos"}
{"code": "def is_compatible_with(self, spec_or_tensor):\n    \n    return (self._dtype.is_compatible_with(spec_or_tensor.dtype) and\n            self._shape.is_compatible_with(spec_or_tensor.shape))", "docstring": "Returns True if spec_or_tensor is compatible with this TensorSpec.\n\nTwo tensors are considered compatible if they have the same dtype\nand their shapes are compatible (see `tf.TensorShape.is_compatible_with`).\n\nArgs:\nspec_or_tensor: A tf.TensorSpec or a tf.Tensor\n\nReturns:\nTrue if spec_or_tensor is compatible with self.", "source": "juraj-google-style"}
{"code": "def send(query,\n         address=DEFAULT_ADDRESS,\n         port=DEFAULT_PORT,\n         ttl=DEFAULT_TTL,\n         local_only=False,\n         timeout_s=2):\n  \n  \n  sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n  sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)\n  if local_only:\n    \n    sock.setsockopt(\n        socket.IPPROTO_IP,\n        socket.IP_MULTICAST_IF,\n        struct.pack('!L', LOCALHOST_ADDRESS))\n  sock.settimeout(timeout_s)\n  sock.sendto(query.encode('utf-8'), (address, port))\n\n  \n  recv_queue = queue.Queue()\n  def _handle_responses():\n    while True:\n      try:\n        data, address = sock.recvfrom(MAX_MESSAGE_BYTES)\n        data = data.decode('utf-8')\n      except socket.timeout:\n        recv_queue.put(None)\n        break\n      else:\n        _LOG.debug('Multicast response to query \"%s\": %s:%s',\n                   query, address[0], data)\n        recv_queue.put((address[0], str(data)))\n\n  \n  response_thread = threading.Thread(target=_handle_responses)\n  response_thread.start()\n  while response_thread.is_alive():\n    recv_tuple = recv_queue.get()\n    if not recv_tuple:\n      break\n    yield recv_tuple\n  response_thread.join()", "docstring": "Sends a query to the given multicast socket and returns responses.\n\nArgs:\nquery: The string query to send.\naddress: Multicast IP address component of the socket to send to.\nport: Multicast UDP port component of the socket to send to.\nttl: TTL for multicast messages. 1 to keep traffic in-network.\ntimeout_s: Seconds to wait for responses.\n\nReturns: A set of all responses that arrived before the timeout expired.\nResponses are tuples of (sender_address, message).", "source": "juraj-google-style"}
{"code": "def get_system_time():\n    now = win32api.GetLocalTime()\n    meridian = 'AM'\n    hours = int(now[4])\n    if (hours == 12):\n        meridian = 'PM'\n    elif (hours == 0):\n        hours = 12\n    elif (hours > 12):\n        hours = (hours - 12)\n        meridian = 'PM'\n    return '{0:02d}:{1:02d}:{2:02d} {3}'.format(hours, now[5], now[6], meridian)", "docstring": "Get the system time.\n\nReturns:\nstr: Returns the system time in HH:MM:SS AM/PM format.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt 'minion-id' system.get_system_time", "source": "codesearchnet"}
{"code": "def base256_encode(n, minwidth=0):\n    if (n > 0):\n        arr = []\n        while n:\n            (n, rem) = divmod(n, 256)\n            arr.append(rem)\n        b = bytearray(reversed(arr))\n    elif (n == 0):\n        b = bytearray(b'\\x00')\n    else:\n        raise ValueError('Negative numbers not supported')\n    if ((minwidth > 0) and (len(b) < minwidth)):\n        padding = ((minwidth - len(b)) * b'\\x00')\n        b = (bytearray(padding) + b)\n    b.reverse()\n    return b", "docstring": "Encode the input with base256.\n\nArgs:\nn (int): input value.\nminwidth: minimum return value length.\n\nRaises:\nValueError: if a negative number is provided.\n\nReturns:\nbytearray:", "source": "codesearchnet"}
{"code": "async def verify_chain_of_trust(chain):\n    log_path = os.path.join(chain.context.config['task_log_dir'], 'chain_of_trust.log')\n    scriptworker_log = logging.getLogger('scriptworker')\n    with contextual_log_handler(chain.context, path=log_path, log_obj=scriptworker_log, formatter=AuditLogFormatter(fmt=chain.context.config['log_fmt'], datefmt=chain.context.config['log_datefmt'])):\n        try:\n            (await build_task_dependencies(chain, chain.task, chain.name, chain.task_id))\n            (await download_cot(chain))\n            verify_cot_signatures(chain)\n            (await download_cot_artifacts(chain))\n            task_count = (await verify_task_types(chain))\n            check_num_tasks(chain, task_count)\n            (await verify_worker_impls(chain))\n            (await trace_back_to_tree(chain))\n        except (BaseDownloadError, KeyError, AttributeError) as exc:\n            log.critical('Chain of Trust verification error!', exc_info=True)\n            if isinstance(exc, CoTError):\n                raise\n            else:\n                raise CoTError(str(exc))\n        log.info('Good.')", "docstring": "Build and verify the chain of trust.\n\nArgs:\nchain (ChainOfTrust): the chain we're operating on\n\nRaises:\nCoTError: on failure", "source": "codesearchnet"}
{"code": "def call(self, inputs):\n    del inputs\n    latent_code = ed.MultivariateNormalDiag(loc=tf.zeros(self.latent_size), sample_shape=1, name='latent_code')\n    state = self.lstm.zero_state(1, dtype=tf.float32)\n    t = 0\n    productions = []\n    stack = [self.grammar.start_symbol]\n    while stack:\n        symbol = stack.pop()\n        (net, state) = self.lstm(latent_code, state)\n        logits = (self.output_layer(net) + self.grammar.mask(symbol, on_value=0.0, off_value=(- 1000000000.0)))\n        production = ed.OneHotCategorical(logits=logits, name=('production_' + str(t)))\n        (_, rhs) = self.grammar.production_rules[tf.argmax(input=production, axis=(- 1))]\n        for symbol in rhs:\n            if (symbol in self.grammar.nonterminal_symbols):\n                stack.append(symbol)\n        productions.append(production)\n        t += 1\n    return tf.stack(productions, axis=1)", "docstring": "Runs the model forward to generate a sequence of productions.\n\nArgs:\ninputs: Unused.\n\nReturns:\nproductions: Tensor of shape [1, num_productions, num_production_rules].\nSlices along the `num_productions` dimension represent one-hot vectors.", "source": "codesearchnet"}
{"code": "def _Open(self, path_spec=None, mode='rb'):\n    if ((not self._file_object_set_in_init) and (not path_spec)):\n        raise ValueError('Missing path specification.')\n    if (not self._file_object_set_in_init):\n        if (not path_spec.HasParent()):\n            raise errors.PathSpecError('Unsupported path specification without parent.')\n        self._encryption_method = getattr(path_spec, 'encryption_method', None)\n        if (self._encryption_method is None):\n            raise errors.PathSpecError('Path specification missing encryption method.')\n        self._file_object = resolver.Resolver.OpenFileObject(path_spec.parent, resolver_context=self._resolver_context)\n    self._path_spec = path_spec", "docstring": "Opens the file-like object.\n\nArgs:\npath_spec (Optional[PathSpec]): path specification.\nmode (Optional[str]): file access mode.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file-like object could not be opened.\nOSError: if the file-like object could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "codesearchnet"}
{"code": "def RestrictFeedItemToGeoTarget(client, feed_item, location_id):\n  \n  \n  feed_item_target_service = client.GetService(\n      'FeedItemTargetService', version='v201809')\n\n  \n  \n  criterion_target = {\n      'xsi_type': 'FeedItemCriterionTarget',\n      'feedId': feed_item['feedId'],\n      'feedItemId': feed_item['feedItemId'],\n      \n      \n      'criterion': {\n          'xsi_type': 'Location',\n          'id': location_id\n      }\n  }\n\n  operation = {'operator': 'ADD', 'operand': criterion_target}\n\n  response = feed_item_target_service.mutate([operation])\n  new_location_target = response['value'][0]\n  print('Feed item target for feed ID %d and feed item ID %d was created to '\n        'restrict serving to location ID %d.' %\n        (new_location_target['feedId'],\n         new_location_target['feedItemId'],\n         new_location_target['criterion']['id']))", "docstring": "Restrict a feed item to a geo target location.\n\nArgs:\nclient: An AdWordsClient instance.\nfeed_item: A FeedItem.\nlocation_id: The Id of the location to restrict to.", "source": "juraj-google-style"}
{"code": "def SetHeaders(self, soap_headers, http_headers):\n    self.suds_client.set_options(soapheaders=soap_headers, headers=http_headers)", "docstring": "Set the headers for the underlying client.\n\nArgs:\nsoap_headers: A SOAP element for the SOAP headers.\nhttp_headers: A dictionary for the http headers.", "source": "codesearchnet"}
{"code": "def get_or_generate_vocabulary(data_dir,\n                               tmp_dir,\n                               data_prefix,\n                               max_page_size_exp,\n                               approx_vocab_size=32768,\n                               strip=True):\n  \n  num_pages_for_vocab_generation = approx_vocab_size \n  vocab_file = vocab_filename(approx_vocab_size, strip)\n\n  def my_generator(data_prefix):\n    \n    count = 0\n    for page in corpus_page_generator(\n        all_corpus_files(data_prefix)[::-1], tmp_dir, max_page_size_exp):\n      revisions = page[\"revisions\"]\n      if revisions:\n        text = get_text(revisions[-1], strip=strip)\n        yield text\n        count += 1\n        if count % 100 == 0:\n          tf.logging.info(\"reading pages for vocab %d\" % count)\n        if count > num_pages_for_vocab_generation:\n          break\n\n  return generator_utils.get_or_generate_vocab_inner(data_dir, vocab_file,\n                                                     approx_vocab_size,\n                                                     my_generator(data_prefix))", "docstring": "Get or generate the vocabulary.\n\nArgs:\ndata_dir: a string\ntmp_dir: a string\ndata_prefix: a string\nmax_page_size_exp: an integer\napprox_vocab_size: an integer\nstrip: a boolean\n\nReturns:\na TextEncoder", "source": "juraj-google-style"}
{"code": "def _PrintProcessingTime(self, processing_status):\n    if (not processing_status):\n        processing_time = '00:00:00'\n    else:\n        processing_time = (time.time() - processing_status.start_time)\n        time_struct = time.gmtime(processing_time)\n        processing_time = time.strftime('%H:%M:%S', time_struct)\n    self._output_writer.Write('Processing time\\t\\t: {0:s}\\n'.format(processing_time))", "docstring": "Prints the processing time.\n\nArgs:\nprocessing_status (ProcessingStatus): processing status.", "source": "codesearchnet"}
{"code": "def get_config(self):\n    data = self.data\n    if type(self.data).__module__ == np.__name__:\n        data = self.data.tolist()\n    try:\n        json_data = json.dumps(data)\n    except TypeError as e:\n        raise TypeError(f'Data not JSON Serializable: {data}') from e\n    targets = self.targets\n    if type(self.targets).__module__ == np.__name__:\n        targets = self.targets.tolist()\n    try:\n        json_targets = json.dumps(targets)\n    except TypeError as e:\n        raise TypeError(f'Targets not JSON Serializable: {targets}') from e\n    return {'data': json_data, 'targets': json_targets, 'length': self.length, 'sampling_rate': self.sampling_rate, 'stride': self.stride, 'start_index': self.start_index, 'end_index': self.end_index, 'shuffle': self.shuffle, 'reverse': self.reverse, 'batch_size': self.batch_size}", "docstring": "Returns the TimeseriesGenerator configuration as Python dictionary.\n\nReturns:\nA Python dictionary with the TimeseriesGenerator configuration.", "source": "github-repos"}
{"code": "def sort_imports(file: str, check_only: bool=True):\n    with open(file, encoding='utf-8') as f:\n        code = f.read()\n    if '_import_structure' not in code or 'define_import_structure' in code:\n        return\n    main_blocks = split_code_in_indented_blocks(code, start_prompt='_import_structure = {', end_prompt='if TYPE_CHECKING:')\n    for block_idx in range(1, len(main_blocks) - 1):\n        block = main_blocks[block_idx]\n        block_lines = block.split('\\n')\n        line_idx = 0\n        while line_idx < len(block_lines) and '_import_structure' not in block_lines[line_idx]:\n            if 'import dummy' in block_lines[line_idx]:\n                line_idx = len(block_lines)\n            else:\n                line_idx += 1\n        if line_idx >= len(block_lines):\n            continue\n        internal_block_code = '\\n'.join(block_lines[line_idx:-1])\n        indent = get_indent(block_lines[1])\n        internal_blocks = split_code_in_indented_blocks(internal_block_code, indent_level=indent)\n        pattern = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key\n        keys = [pattern.search(b).groups()[0] if pattern.search(b) is not None else None for b in internal_blocks]\n        keys_to_sort = [(i, key) for i, key in enumerate(keys) if key is not None]\n        sorted_indices = [x[0] for x in sorted(keys_to_sort, key=lambda x: x[1])]\n        count = 0\n        reorderded_blocks = []\n        for i in range(len(internal_blocks)):\n            if keys[i] is None:\n                reorderded_blocks.append(internal_blocks[i])\n            else:\n                block = sort_objects_in_import(internal_blocks[sorted_indices[count]])\n                reorderded_blocks.append(block)\n                count += 1\n        main_blocks[block_idx] = '\\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]])\n    if code != '\\n'.join(main_blocks):\n        if check_only:\n            return True\n        else:\n            print(f'Overwriting {file}.')\n            with open(file, 'w', encoding='utf-8') as f:\n                f.write('\\n'.join(main_blocks))", "docstring": "Sort the imports defined in the `_import_structure` of a given init.\n\nArgs:\nfile (`str`): The path to the init to check/fix.\ncheck_only (`bool`, *optional*, defaults to `True`): Whether or not to just check (and not auto-fix) the init.", "source": "github-repos"}
{"code": "def cast_to_seq(obj, alphabet=IUPAC.extended_protein):\n    if isinstance(obj, Seq):\n        return obj\n    if isinstance(obj, SeqRecord):\n        return obj.seq\n    if isinstance(obj, str):\n        obj = obj.upper()\n        return Seq(obj, alphabet)\n    else:\n        raise ValueError('Must provide a string, Seq, or SeqRecord object.')", "docstring": "Return a Seq representation of a string or SeqRecord object.\n\nArgs:\nobj (str, Seq, SeqRecord): Sequence string or Biopython SeqRecord object\nalphabet: See Biopython SeqRecord docs\n\nReturns:\nSeq: Seq representation of the sequence", "source": "codesearchnet"}
{"code": "def get_files(self, retrieve=False):\n\n\t\t\n\n\t\tif self.exists and hasattr(self.rdf.triples, 'pcdm') and hasattr(self.rdf.triples.pcdm, 'hasFile'):\n\t\t\tfiles = [ self.repo.parse_uri(uri) for uri in self.rdf.triples.pcdm.hasFile ]\n\n\t\t\t\n\t\t\treturn files\n\n\t\telse:\n\t\t\treturn []", "docstring": "get pcdm:hasFile for this resource\n\nArgs:\nretrieve (bool): if True, issue .refresh() on resource thereby confirming existence and retrieving payload", "source": "juraj-google-style"}
{"code": "def add(self, email):\n    if (email not in self._collaborators):\n        self._collaborators[email] = ShareRequestValue.Add\n    self._dirty = True", "docstring": "Add a collaborator.\n\nArgs:\nstr : Collaborator email address.", "source": "codesearchnet"}
{"code": "def get_cases(variant_source, case_lines=None, case_type='ped',\n              variant_type='snv', variant_mode='vcf'):\n        \n        individuals = get_individuals(\n            variant_source=variant_source,\n            case_lines=case_lines,\n            case_type=case_type,\n            variant_mode=variant_mode\n        )\n        case_objs = []\n        case_ids = set()\n\n        compressed = False\n        tabix_index = False\n        \n        if variant_source.endswith('.gz'):\n            logger.debug(\"Found compressed variant source\")\n            compressed = True\n            tabix_file = '.'.join([variant_source, 'tbi'])\n            if os.path.exists(tabix_file):\n                logger.debug(\"Found index file\")\n                tabix_index = True\n\n        if len(individuals) > 0:\n            for individual in individuals:\n                case_ids.add(individual.case_id)\n        else:\n            case_ids = [os.path.basename(variant_source)]\n\n        for case_id in case_ids:\n            logger.info(\"Found case {0}\".format(case_id))\n            case = Case(\n                case_id=case_id,\n                name=case_id,\n                variant_source=variant_source,\n                variant_type=variant_type,\n                variant_mode=variant_mode,\n                compressed=compressed,\n                tabix_index=tabix_index\n                )\n\n            \n            for individual in individuals:\n                if individual.case_id == case_id:\n                    logger.info(\"Adding ind {0} to case {1}\".format(\n                        individual.name, individual.case_id\n                    ))\n                    case.add_individual(individual)\n\n            case_objs.append(case)\n\n        return case_objs", "docstring": "Create a cases and populate it with individuals\n\nArgs:\nvariant_source (str): Path to vcf files\ncase_lines (Iterable): Ped like lines\ncase_type (str): Format of case lines\n\nReturns:\ncase_objs (list(puzzle.models.Case))", "source": "juraj-google-style"}
{"code": "def slot(self):\n    if (self.type == EventType.TOUCH_FRAME):\n        raise AttributeError(_wrong_prop.format(self.type))\n    return self._libinput.libinput_event_touch_get_slot(self._handle)", "docstring": "The slot of this touch event.\n\nSee the kernel's multitouch protocol B documentation for more\ninformation.\n\nIf the touch event has no assigned slot, for example if it is from\na single touch device, this property returns -1.\n\nFor events not of type :attr:`~libinput.constant.EventType.TOUCH_DOWN`,\n:attr:`~libinput.constant.EventType.TOUCH_UP`,\n:attr:`~libinput.constant.EventType.TOUCH_MOTION` or\n:attr:`~libinput.constant.EventType.TOUCH_CANCEL`, this property\nraises :exc:`AttributeError`.\n\nReturns:\nint: The slot of this touch event.\nRaises:\nAttributeError", "source": "codesearchnet"}
{"code": "def check_imports(filename: Union[str, os.PathLike]) -> list[str]:\n    imports = get_imports(filename)\n    missing_packages = []\n    for imp in imports:\n        try:\n            importlib.import_module(imp)\n        except ImportError as exception:\n            logger.warning(f'Encountered exception while importing {imp}: {exception}')\n            if 'No module named' in str(exception):\n                missing_packages.append(imp)\n            else:\n                raise\n    if len(missing_packages) > 0:\n        raise ImportError(f'This modeling file requires the following packages that were not found in your environment: {', '.join(missing_packages)}. Run `pip install {' '.join(missing_packages)}`')\n    return get_relative_imports(filename)", "docstring": "Check if the current Python environment contains all the libraries that are imported in a file. Will raise if a\nlibrary is missing.\n\nArgs:\nfilename (`str` or `os.PathLike`): The module file to check.\n\nReturns:\n`list[str]`: The list of relative imports in the file.", "source": "github-repos"}
{"code": "def persist_project(project):\n    \n    from benchbuild.utils.schema import Project, Session\n    session = Session()\n    projects = session.query(Project) \\\n        .filter(Project.name == project.name) \\\n        .filter(Project.group_name == project.group)\n\n    name = project.name\n    desc = project.__doc__\n    domain = project.domain\n    group_name = project.group\n    version = project.version() \\\n        if callable(project.version) else project.version\n    try:\n        src_url = project.src_uri\n    except AttributeError:\n        src_url = 'unknown'\n\n    if projects.count() == 0:\n        newp = Project()\n        newp.name = name\n        newp.description = desc\n        newp.src_url = src_url\n        newp.domain = domain\n        newp.group_name = group_name\n        newp.version = version\n        session.add(newp)\n    else:\n        newp_value = {\n            \"name\": name,\n            \"description\": desc,\n            \"src_url\": src_url,\n            \"domain\": domain,\n            \"group_name\": group_name,\n            \"version\": version\n        }\n        projects.update(newp_value)\n\n    session.commit()\n    return (projects, session)", "docstring": "Persist this project in the benchbuild database.\n\nArgs:\nproject: The project we want to persist.", "source": "juraj-google-style"}
{"code": "def cancel(self, identifier: typing.Any, exc_type: typing.Optional[type]=None) -> bool:\n    raise NotImplementedError()", "docstring": "Cancel an active coroutine and remove it from the schedule.\n\nArgs:\nidentifier (typing.Any): The identifier returned from add.\nexc_type (typing.Optional[type]): The exception type to throw into\nthe coroutine on cancel. No exception is thrown if nothing is\ngiven. Instead the coroutine is no longer processed.\n\nReturns:\nbool: True if the coroutine is cancelled. False if the identifier\nis invalid or if the coroutine is complete.", "source": "codesearchnet"}
{"code": "def _get_recursive_dependancies(self, dependencies_map, sourcepath, recursive=True):\n    collected = set([])\n    collected.update(dependencies_map.get(sourcepath, []))\n    sequence = collected.copy()\n    walkthrough = []\n    if recursive:\n        while True:\n            if (not sequence):\n                break\n            item = sequence.pop()\n            walkthrough.append(item)\n            current_item_dependancies = dependencies_map.get(item, [])\n            for dependency in current_item_dependancies:\n                if (dependency in walkthrough):\n                    continue\n                else:\n                    collected.add(dependency)\n                    sequence.add(dependency)\n            if (sourcepath in walkthrough):\n                msg = \"A circular import has occured by '{}'\"\n                raise CircularImport(msg.format(current_item_dependancies))\n            if (not sequence):\n                break\n    return collected", "docstring": "Return all dependencies of a source, recursively searching through its\ndependencies.\n\nThis is a common method used by ``children`` and ``parents`` methods.\n\nArgs:\ndependencies_map (dict): Internal buffer (internal buffers\n``_CHILDREN_MAP`` or ``_PARENTS_MAP``) to use for searching.\nsourcepath (str): Source file path to start searching for\ndependencies.\n\nKeyword Arguments:\nrecursive (bool): Switch to enable recursive finding (if True).\nDefault to True.\n\nRaises:\nCircularImport: If circular error is detected from a source.\n\nReturns:\nset: List of dependencies paths.", "source": "codesearchnet"}
{"code": "def _find_relation(self, span_doc: doc, r: List) -> Dict:\n        \n\n        rule = r[1][0]\n        span_pivot = 0\n        relation = {}\n        for e_id, element in enumerate(rule):\n            if not span_doc[span_pivot:]:\n                for extra_id, _, in enumerate(rule[e_id:]):\n                    relation[e_id+extra_id] = None\n                break\n            new_doc = self._tokenizer.tokenize_to_spacy_doc(span_doc[span_pivot:].text)\n            if \"OP\" not in element:\n                relation[e_id] = (span_pivot, span_pivot+1)\n                span_pivot += 1\n            else:\n                if e_id < len(rule)-1:\n                    tmp_rule_1 = [rule[e_id]]\n                    tmp_rule_2 = [rule[e_id+1]]\n                    tmp_matcher = Matcher(self._nlp.vocab)\n                    tmp_matcher.add(0, None, tmp_rule_1)\n                    tmp_matcher.add(1, None, tmp_rule_2)\n                    tmp_matches = sorted([x for x in tmp_matcher(new_doc) if x[1] != x[2]], key=lambda a: a[1])\n\n                    if not tmp_matches:\n                        relation[e_id] = None\n                    else:\n                        matches_1 = [x for x in tmp_matches if x[0] == 0 and x[1] == 0]\n                        if not matches_1:\n                            relation[e_id] = None\n                        else:\n                            _, s1, e1 = matches_1[0]\n                            matches_2 = [x for x in tmp_matches if x[0] == 1]\n                            if not matches_2:\n                                relation[e_id] = (span_pivot, span_pivot + e1)\n                                span_pivot += e1\n                            else:\n                                _, s2, e2 = matches_2[0]\n                                if e1 <= s2:\n                                    relation[e_id] = (span_pivot, span_pivot + e1)\n                                    span_pivot += e1\n                                else:\n                                    relation[e_id] = (span_pivot, span_pivot + s2)\n                                    span_pivot += s2\n                else:\n                    relation[e_id] = (span_pivot, len(span_doc))\n\n        return relation", "docstring": "Get the relations between the each pattern in the spacy rule and the matches\nArgs:\nspan_doc: doc\nr: List\n\nReturns: Dict", "source": "juraj-google-style"}
{"code": "def eig(tensor, name=None):\n    if tensor.dtype == dtypes.float32 or tensor.dtype == dtypes.complex64:\n        out_dtype = dtypes.complex64\n    elif tensor.dtype == dtypes.float64 or tensor.dtype == dtypes.complex128:\n        out_dtype = dtypes.complex128\n    e, v = gen_linalg_ops.eig(tensor, Tout=out_dtype, compute_v=True, name=name)\n    return (e, v)", "docstring": "Computes the eigen decomposition of a batch of matrices.\n\nThe eigenvalues\nand eigenvectors for a non-Hermitian matrix in general are complex. The\neigenvectors are not guaranteed to be linearly independent.\n\nComputes the eigenvalues and right eigenvectors of the innermost\nN-by-N matrices in `tensor` such that\n`tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i]`, for i=0...N-1.\n\nArgs:\ntensor: `Tensor` of shape `[..., N, N]`. Only the lower triangular part of\neach inner inner matrix is referenced.\nname: string, optional name of the operation.\n\nReturns:\ne: Eigenvalues. Shape is `[..., N]`. The eigenvalues are not necessarily\nordered.\nv: Eigenvectors. Shape is `[..., N, N]`. The columns of the inner most\nmatrices contain eigenvectors of the corresponding matrices in `tensor`", "source": "github-repos"}
{"code": "def has_datastore(self):\n    (success, result) = self._read_from_hdx('datastore', self.data['id'], 'resource_id', self.actions()['datastore_search'])\n    if (not success):\n        logger.debug(result)\n    elif result:\n        return True\n    return False", "docstring": "Check if the resource has a datastore.\n\nReturns:\nbool: Whether the resource has a datastore or not", "source": "codesearchnet"}
{"code": "def _find_uninitialized(self):\n    return set((name for (name, prop) in self._properties.iteritems() if (not prop._is_initialized(self))))", "docstring": "Internal helper to find uninitialized properties.\n\nReturns:\nA set of property names.", "source": "codesearchnet"}
{"code": "def _genBgTerm_fromXX(self, vTot, vCommon, XX, a=None, c=None):\n    vSpecific = (vTot - vCommon)\n    SP.random.seed(0)\n    if (c == None):\n        c = SP.randn(self.P)\n    XX += (0.001 * SP.eye(XX.shape[0]))\n    L = LA.cholesky(XX, lower=True)\n    R = self.genWeights(self.N, self.P)\n    A = self.genTraitEffect()\n    if (a is not None):\n        A[(0, :)] = a\n    Yc = SP.dot(L, SP.dot(R, A))\n    Yc *= (SP.sqrt(vCommon) / SP.sqrt(Yc.var(0).mean()))\n    R = SP.randn(self.N, self.P)\n    Yi = SP.dot(L, SP.dot(R, SP.diag(c)))\n    Yi *= (SP.sqrt(vSpecific) / SP.sqrt(Yi.var(0).mean()))\n    return (Yc, Yi)", "docstring": "generate background term from SNPs\n\nArgs:\nvTot: variance of Yc+Yi\nvCommon: variance of Yc\nXX: kinship matrix\na: common scales, it can be set for debugging purposes\nc: indipendent scales, it can be set for debugging purposes", "source": "codesearchnet"}
{"code": "def get_data_xlsx(file_name, file_contents=None, on_demand=False):\n    return get_data_xls(file_name, file_contents=file_contents, on_demand=on_demand)", "docstring": "Loads the new excel format files. Old format files will automatically get loaded as well.\n\nArgs:\nfile_name: The name of the local file, or the holder for the\nextension type when the file_contents are supplied.\nfile_contents: The file-like object holding contents of file_name.\nIf left as None, then file_name is directly loaded.\non_demand: Requests that a yielder be used in place of a full data\ncopy.", "source": "codesearchnet"}
{"code": "def age(self):\n    date = datetime.today().date()\n    b = self.birthday\n    if b:\n        return int(((date - b).days / 365))\n    return None", "docstring": "Returns a user's age, based on their birthday.\n\nReturns:\ninteger", "source": "codesearchnet"}
{"code": "def send_location(self, room_id, geo_uri, name, thumb_url=None, thumb_info=None, timestamp=None):\n    content_pack = {'geo_uri': geo_uri, 'msgtype': 'm.location', 'body': name}\n    if thumb_url:\n        content_pack['thumbnail_url'] = thumb_url\n    if thumb_info:\n        content_pack['thumbnail_info'] = thumb_info\n    return self.send_message_event(room_id, 'm.room.message', content_pack, timestamp=timestamp)", "docstring": "Send m.location message event\n\nArgs:\nroom_id (str): The room ID to send the event in.\ngeo_uri (str): The geo uri representing the location.\nname (str): Description for the location.\nthumb_url (str): URL to the thumbnail of the location.\nthumb_info (dict): Metadata about the thumbnail, type ImageInfo.\ntimestamp (int): Set origin_server_ts (For application services only)", "source": "codesearchnet"}
{"code": "def find_and_replace_channel_refs(self, text):\n    match = True\n    pattern = re.compile('<\n    while match:\n        match = pattern.search(text)\n        if match:\n            text = text.replace(match.group(0), ('\n    return text", "docstring": "Find occurrences of Slack channel referenfces and attempts to\nreplace them with just channel names.\n\nArgs:\ntext (string): The message text\nReturns:\nstring: The message text with channel references replaced.", "source": "codesearchnet"}
{"code": "def read(self, size=None):\n        \n        data = self.rfile.read(size)\n        self.bytes_read += len(data)\n        self._check_length()\n        return data", "docstring": "Read a chunk from rfile buffer and return it.\n\nArgs:\nsize (int): amount of data to read\n\nReturns:\nbytes: Chunk from rfile, limited by size if specified.", "source": "juraj-google-style"}
{"code": "def _load_schema(file_path, name=None):\n    \n    if name is None:\n        \n        name = os.path.splitext(os.path.basename(file_path))[0]\n    if name not in _SCHEMAS:\n        with open(file_path, 'r') as schema_file:\n            _SCHEMAS[name] = json.load(schema_file)\n\n    return _SCHEMAS[name]", "docstring": "Loads the QObj schema for use in future validations.\n\nCaches schema in _SCHEMAS module attribute.\n\nArgs:\nfile_path(str): Path to schema.\nname(str): Given name for schema. Defaults to file_path filename\nwithout schema.\nReturn:\nschema(dict): Loaded schema.", "source": "juraj-google-style"}
{"code": "def __call__(self, utterances_batch: List[str], history_batch: List[List[str]],\n                 states_batch: Optional[list] = None) -> Tuple[List[str], List[float]]:\n        \n        responses, confidences = self.model(utterances_batch)\n\n        \n        if isinstance(confidences[0], list):\n            confidences = [max(c) for c in confidences]\n\n        return responses, confidences", "docstring": "It returns the skill inference result.\n\nOutput is batches of the skill inference results and estimated confidences.\n\nArgs:\nutterances_batch: A batch of utterances.\nhistory_batch: A batch of list typed histories for each utterance.\nstates_batch: Optional. A batch of arbitrary typed states for\neach utterance.\n\nReturns:\nBatches of the skill inference results and estimated confidences.", "source": "juraj-google-style"}
{"code": "def write(self, data):\n    \n    ctx = context.get()\n    if len(data) != 2:\n      logging.error(\"Got bad tuple of length %d (2-tuple expected): %s\",\n                    len(data), data)\n\n    try:\n      key = str(data[0])\n      value = str(data[1])\n    except TypeError:\n      logging.error(\"Expecting a tuple, but got %s: %s\",\n                    data.__class__.__name__, data)\n\n    file_index = key.__hash__() % len(self._filehandles)\n\n    \n    \n    \n    \n    pool = self._pools[file_index]\n    if pool is None:\n      filehandle = self._filehandles[file_index]\n      pool = output_writers.GCSRecordsPool(filehandle=filehandle, ctx=ctx)\n      self._pools[file_index] = pool\n\n    proto = kv_pb.KeyValue()\n    proto.set_key(key)\n    proto.set_value(value)\n    pool.append(proto.Encode())", "docstring": "Write data.\n\nArgs:\ndata: actual data yielded from handler. Type is writer-specific.", "source": "juraj-google-style"}
{"code": "def Send(self, message):\n    \n    if not isinstance(message, common_pb2.Message):\n      raise ValueError(\"Send requires a fleetspeak.Message\")\n\n    if message.destination.service_name == \"system\":\n      raise ValueError(\n          \"Only predefined messages can have destination.service_name == \\\"system\\\"\")\n\n    return self._SendImpl(message)", "docstring": "Send a message through Fleetspeak.\n\nArgs:\nmessage: A message protocol buffer.\nReturns:\nSize of the message in bytes.\nRaises:\nValueError: If message is not a common_pb2.Message.", "source": "juraj-google-style"}
{"code": "def _safe_setattr(obj, name, value):\n    \n    okey = id(obj)\n    if okey in _set_failures or okey in _final_objs:\n        return False\n    \n    import inspect\n    try:\n        if inspect.ismethod(obj):\n            setattr(obj.__func__, name, value)\n            return True\n        else:\n            if isinstance(obj, dict): \n                obj[name] = value\n            else:\n                setattr(obj, name, value)\n            return True\n    except (TypeError, AttributeError):\n        _set_failures.append(okey)\n        msg.warn(\"Failed {}:{} attribute set on {}.\".format(name, value, obj))\n        return False", "docstring": "Safely sets the attribute of the specified object. This includes not\nsetting attributes for final objects and setting __func__ for instancemethod\ntyped objects.\n\nArgs:\nobj: object to set an attribute for.\nname (str): new attribute name.\nvalue: new attribute value.\n\nReturns:\nbool: True if the set attribute was successful.", "source": "juraj-google-style"}
{"code": "def retrieve_token(self, token):\n    headers = self.client._get_private_headers()\n    endpoint = '/tokens/{}'.format(token)\n    return self.client._get((self.client.URL_BASE + endpoint), headers=headers)", "docstring": "Retrieve Token details for a specific Token.\n\nArgs:\ntoken: The identifier of the token.\n\n\nReturns:", "source": "codesearchnet"}
{"code": "def iter_packages(self, name, range_=None, paths=None):\n    for package in iter_packages(name, range_, paths):\n        if (not self.excludes(package)):\n            (yield package)", "docstring": "Same as iter_packages in packages.py, but also applies this filter.\n\nArgs:\nname (str): Name of the package, eg 'maya'.\nrange_ (VersionRange or str): If provided, limits the versions returned\nto those in `range_`.\npaths (list of str, optional): paths to search for packages, defaults\nto `config.packages_path`.\n\nReturns:\n`Package` iterator.", "source": "codesearchnet"}
{"code": "def __init__(self, iterable=None, modify_time=None, update_time=None):\n    if self.__class__ is Map:\n        raise TypeError('Map is an abstract class.')\n    self._data = {}\n    self._index = []\n    self._last_modification_timestamp = modify_time\n    self._last_update_timestamp = update_time\n    self.log = logging.getLogger(__name__)\n    if iterable is not None:\n        for item in iterable:\n            self.Add(item)", "docstring": "Construct a Map object.\n\nArgs:\niterable: A tuple or list that can be iterated over and added to the Map,\ndefaults to None.\nmodify_time: An optional modify time for this Map, defaults to None.\ndefaults to None.\nupdate_time: An optional update time for this Map, defaults to None.\ndefaults to None.\n\nRaises:\nTypeError: If the objects in the iterable are of the wrong type.", "source": "github-repos"}
{"code": "def _process_datum(self, data, input_reader, ctx, transient_shard_state):\n    if (data is not input_readers.ALLOW_CHECKPOINT):\n        self.slice_context.incr(context.COUNTER_MAPPER_CALLS)\n        handler = transient_shard_state.handler\n        if isinstance(handler, map_job.Mapper):\n            handler(self.slice_context, data)\n        else:\n            if input_reader.expand_parameters:\n                result = handler(*data)\n            else:\n                result = handler(data)\n            if util.is_generator(result):\n                for output in result:\n                    if isinstance(output, operation.Operation):\n                        output(ctx)\n                    else:\n                        output_writer = transient_shard_state.output_writer\n                        if (not output_writer):\n                            logging.warning('Handler yielded %s, but no output writer is set.', output)\n                        else:\n                            output_writer.write(output)\n    if ((self._time() - self._start_time) >= parameters.config._SLICE_DURATION_SEC):\n        return False\n    return True", "docstring": "Process a single data piece.\n\nCall mapper handler on the data.\n\nArgs:\ndata: a datum to process.\ninput_reader: input reader.\nctx: mapreduce context\ntransient_shard_state: transient shard state.\n\nReturns:\nTrue if scan should be continued, False if scan should be stopped.", "source": "codesearchnet"}
{"code": "def check(self, solution):\n    return self.func(*(solution[v] for v in self.variables))", "docstring": "Check that a solution satisfies the constraint.\n\nArgs:\nsolution (container):\nAn assignment for the variables in the constraint.\n\nReturns:\nbool: True if the solution satisfies the constraint; otherwise False.\n\nExamples:\nThis example creates a constraint that :math:`a \\\\ne b` on binary variables\nand tests it for two candidate solutions, with additional unconstrained\nvariable c.\n\n>>> import dwavebinarycsp\n>>> const = dwavebinarycsp.Constraint.from_configurations([(0, 1), (1, 0)],\n...             ['a', 'b'], dwavebinarycsp.BINARY)\n>>> solution = {'a': 1, 'b': 1, 'c': 0}\n>>> const.check(solution)\nFalse\n>>> solution = {'a': 1, 'b': 0, 'c': 0}\n>>> const.check(solution)\nTrue", "source": "codesearchnet"}
{"code": "def _examples_from_path_handler(self, request):\n    \n    examples_count = int(request.args.get('max_examples'))\n    examples_path = request.args.get('examples_path')\n    sampling_odds = float(request.args.get('sampling_odds'))\n    self.example_class = (tf.train.SequenceExample\n        if request.args.get('sequence_examples') == 'true'\n        else tf.train.Example)\n    try:\n      platform_utils.throw_if_file_access_not_allowed(examples_path,\n                                                      self._logdir,\n                                                      self._has_auth_group)\n      example_strings = platform_utils.example_protos_from_path(\n          examples_path, examples_count, parse_examples=False,\n          sampling_odds=sampling_odds, example_class=self.example_class)\n      self.examples = [\n          self.example_class.FromString(ex) for ex in example_strings]\n      self.generate_sprite(example_strings)\n      json_examples = [\n          json_format.MessageToJson(example) for example in self.examples\n      ]\n      self.updated_example_indices = set(range(len(json_examples)))\n      return http_util.Respond(\n          request,\n          {'examples': json_examples,\n           'sprite': True if self.sprite else False}, 'application/json')\n    except common_utils.InvalidUserInputError as e:\n      return http_util.Respond(request, {'error': e.message},\n                               'application/json', code=400)", "docstring": "Returns JSON of the specified examples.\n\nArgs:\nrequest: A request that should contain 'examples_path' and 'max_examples'.\n\nReturns:\nJSON of up to max_examlpes of the examples in the path.", "source": "juraj-google-style"}
{"code": "def __resource_descriptor(self, resource_path, methods):\n    descriptor = {}\n    method_map = {}\n    sub_resource_index = collections.defaultdict(list)\n    sub_resource_map = {}\n    resource_path_tokens = resource_path.split('.')\n    for (service, protorpc_meth_info) in methods:\n        method_info = getattr(protorpc_meth_info, 'method_info', None)\n        path = method_info.get_path(service.api_info)\n        method_id = method_info.method_id(service.api_info)\n        canonical_method_id = self._get_canonical_method_id(method_id)\n        current_resource_path = self._get_resource_path(method_id)\n        if (current_resource_path[:len(resource_path_tokens)] != resource_path_tokens):\n            raise api_exceptions.ToolError('Internal consistency error in resource path {0}'.format(current_resource_path))\n        effective_resource_path = current_resource_path[len(resource_path_tokens):]\n        if effective_resource_path:\n            sub_resource_name = effective_resource_path[0]\n            new_resource_path = '.'.join([resource_path, sub_resource_name])\n            sub_resource_index[new_resource_path].append((service, protorpc_meth_info))\n        else:\n            method_map[canonical_method_id] = self.__method_descriptor(service, method_info, protorpc_meth_info)\n    for (sub_resource, sub_resource_methods) in sub_resource_index.items():\n        sub_resource_name = sub_resource.split('.')[(- 1)]\n        sub_resource_map[sub_resource_name] = self.__resource_descriptor(sub_resource, sub_resource_methods)\n    if method_map:\n        descriptor['methods'] = method_map\n    if sub_resource_map:\n        descriptor['resources'] = sub_resource_map\n    return descriptor", "docstring": "Describes a resource.\n\nArgs:\nresource_path: string, the path of the resource (e.g., 'entries.items')\nmethods: list of tuples of type\n(endpoints.Service, protorpc.remote._RemoteMethodInfo), the methods\nthat serve this resource.\n\nReturns:\nDictionary describing the resource.", "source": "codesearchnet"}
{"code": "def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):\n    if attention_mask is not None and attention_mask.dim() == 4:\n        causal_mask = attention_mask\n    else:\n        min_dtype = torch.finfo(dtype).min\n        causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)\n        if sequence_length != 1:\n            causal_mask = torch.triu(causal_mask, diagonal=1)\n        causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)\n        causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)\n        if attention_mask is not None:\n            causal_mask = causal_mask.clone()\n            mask_length = attention_mask.shape[-1]\n            padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)\n            padding_mask = padding_mask == 0\n            causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)\n    return causal_mask", "docstring": "Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.\n\nArgs:\nattention_mask (`torch.Tensor`):\nA 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape\n`(batch_size, 1, query_length, key_value_length)`.\nsequence_length (`int`):\nThe sequence length being processed.\ntarget_length (`int`):\nThe target length: when generating with static cache, the mask should be as long as the static cache,\nto account for the 0 padding, the part of the cache that is not filled yet.\ndtype (`torch.dtype`):\nThe dtype to use for the 4D attention mask.\ncache_position (`torch.Tensor`):\nIndices depicting the position of the input sequence tokens in the sequence.\nbatch_size (`torch.Tensor`):\nBatch size.", "source": "github-repos"}
{"code": "def load_validator(schema_path, schema):\n    if (os.name == 'nt'):\n        file_prefix = 'file:\n    else:\n        file_prefix = 'file:'\n    resolver = RefResolver((file_prefix + schema_path.replace('\\\\', '/')), schema)\n    validator = Draft4Validator(schema, resolver=resolver)\n    return validator", "docstring": "Create a JSON schema validator for the given schema.\n\nArgs:\nschema_path: The filename of the JSON schema.\nschema: A Python object representation of the same schema.\n\nReturns:\nAn instance of Draft4Validator.", "source": "codesearchnet"}
{"code": "def unset(entity, *types):\n    \n    if not types:\n        types = (TypedField,)\n\n    fields = list(entity._fields.keys())\n    remove = (x for x in fields if isinstance(x, types))\n\n    for field in remove:\n        del entity._fields[field]", "docstring": "Unset the TypedFields on the input `entity`.\n\nArgs:\nentity: A mixbox.Entity object.\n*types: A variable-length list of TypedField subclasses. If not\nprovided, defaults to TypedField.", "source": "juraj-google-style"}
{"code": "def _os_release_info(self):\n    if os.path.isfile(self.os_release_file):\n        with open(self.os_release_file) as release_file:\n            return self._parse_os_release_content(release_file)\n    return {}", "docstring": "Get the information items from the specified os-release file.\n\nReturns:\nA dictionary containing all information items.", "source": "codesearchnet"}
{"code": "def to_dict(self):\n    return {'name': self.name, 'id': self.id, 'type': self.type, 'workflow_id': self.workflow_id, 'queue': self.queue, 'start_time': self.start_time, 'arguments': self.arguments, 'acknowledged': self.acknowledged, 'func_name': self.func_name, 'hostname': self.hostname, 'worker_name': self.worker_name, 'worker_pid': self.worker_pid, 'routing_key': self.routing_key}", "docstring": "Return a dictionary of the job stats.\n\nReturns:\ndict: Dictionary of the stats.", "source": "codesearchnet"}
{"code": "def op_list_to_dict(op_list, convert_variable_to_tensor=True):\n    if not isinstance(op_list, (list, tuple, set)):\n        raise TypeError(f'Variables to save should be passed in a dict or a list. Got {op_list}')\n    op_list = nest.flatten(list(op_list))\n    op_list = sorted(op_list, key=lambda x: x.name)\n    names_to_saveables = {}\n    for var in op_list:\n        resource_or_ref_variable = isinstance(var, resource_variable_ops.BaseResourceVariable) or isinstance(var, ref_variable.RefVariable)\n        if isinstance(var, saveable_object.SaveableObject):\n            names_to_saveables[var.name] = var\n        elif isinstance(var, variables.PartitionedVariable):\n            if var.name in names_to_saveables:\n                raise ValueError(f'At least two variables have the same name: {var.name}')\n            names_to_saveables[var.name] = var\n        elif isinstance(var, variables.Variable) and var._save_slice_info:\n            name = var._save_slice_info.full_name\n            if name in names_to_saveables:\n                if not isinstance(names_to_saveables[name], list):\n                    raise ValueError(f'Mixing slices and non-slices with the same name: {name}')\n                names_to_saveables[name].append(var)\n            else:\n                names_to_saveables[name] = [var]\n        elif isinstance(var, trackable.Trackable) and (not resource_or_ref_variable):\n            trackable_saveables = [factory() if callable(factory) else factory for factory in saveable_objects_from_trackable(var, tf1_saver=True).values()]\n            names_to_saveables.update(op_list_to_dict(trackable_saveables))\n        elif not getattr(var, '_in_graph_mode', True):\n            if not isinstance(var, resource_variable_ops.BaseResourceVariable):\n                raise ValueError(f'Can only save/restore ResourceVariables when eager execution is enabled. Got type: {type(var)}.')\n            set_var = names_to_saveables.setdefault(var._shared_name, var)\n            if set_var is not var:\n                raise ValueError(f\"Two different ResourceVariable objects with the same shared_name '{var._shared_name}' were passed to the Saver. This likely means that they were created in different Graphs or isolated contexts, and may not be checkpointed together.\")\n        else:\n            if convert_variable_to_tensor:\n                if isinstance(var, resource_variable_ops.BaseResourceVariable):\n                    var = var._graph_element\n                else:\n                    var = ops.convert_to_tensor(var, as_ref=True)\n                if not _tensor_comes_from_variable(var):\n                    raise TypeError(f'Variable to save is not a Variable: {var}')\n            if var.op.type == 'ReadVariableOp':\n                name = var.op.inputs[0].op.name\n            else:\n                name = var.op.name\n            if name in names_to_saveables:\n                raise ValueError(f'At least two variables have the same name: {name}')\n            names_to_saveables[name] = var\n    return names_to_saveables", "docstring": "Create a dictionary of names to operation lists.\n\nThis method is only used when the variable name matters (e.g. when saving\nor restoring from a TF1 name-based checkpoint). In TF2, this can be called\nfrom `tf.train.Checkpoint.restore` when loading from a name-based checkpoint.\n\nArgs:\nop_list: A (nested) list, tuple, or set of Variables or SaveableObjects.\nconvert_variable_to_tensor: Whether or not to convert single Variables\nwith no slice info into Tensors.\n\nReturns:\nA dictionary of names to the operations that must be saved under\nthat name.  Variables with save_slice_info are grouped together under the\nsame key in no particular order.\n\nRaises:\nTypeError: If the type of op_list or its elements is not supported.\nValueError: If at least two saveables share the same name.", "source": "github-repos"}
{"code": "def filter_embeddings(embeddings, vocab, dim):\n    \n    if not isinstance(embeddings, dict):\n        return\n    _embeddings = np.zeros([len(vocab), dim])\n    for word in vocab:\n        if word in embeddings:\n            word_idx = vocab[word]\n            _embeddings[word_idx] = embeddings[word]\n\n    return _embeddings", "docstring": "Loads word vectors in numpy array.\n\nArgs:\nembeddings (dict): a dictionary of numpy array.\nvocab (dict): word_index lookup table.\n\nReturns:\nnumpy array: an array of word embeddings.", "source": "juraj-google-style"}
{"code": "def astype(array, y):\n  \n  if isinstance(y, autograd.core.Node):\n    return array.astype(numpy.array(y.value).dtype)\n  return array.astype(numpy.array(y).dtype)", "docstring": "A functional form of the `astype` method.\n\nArgs:\narray: The array or number to cast.\ny: An array or number, as the input, whose type should be that of array.\n\nReturns:\nAn array or number with the same dtype as `y`.", "source": "juraj-google-style"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does\nnot make use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def log_flush_for_interval(self, log_type, interval):\n    if (not log_type):\n        log_type = 'policies'\n    interval = interval.replace(' ', '+')\n    flush_url = '{}/{}/interval/{}'.format(self.url, log_type, interval)\n    self.jss.delete(flush_url)", "docstring": "Flush logs for an interval of time.\n\nArgs:\nlog_type (str): Only documented type is \"policies\". This\nwill be applied by default if nothing is passed.\ninterval (str): Combination of \"Zero\", \"One\", \"Two\",\n\"Three\", \"Six\", and \"Day\", \"Week\", \"Month\", \"Year\". e.g.\n(\"Three+Months\") Please note: The documentation for this\nspecifies the singular form (e.g. \"Month\"), and plural\n(\"Months\") at different times, and further the\nconstruction is listed as \"THREE MONTHS\" elsewhere.\nLimited testing indicates that pluralization does not\nmatter, nor does capitalization.\nPlease test!\n\nNo validation is performed on this prior to the request\nbeing made.\n\nRaises:\nJSSDeleteError if provided url_path has a >= 400 response.", "source": "codesearchnet"}
{"code": "def set_config(self, key, value):\n    keyname = ('config:' + key)\n    self.kvstore.set(keyname, value)", "docstring": "Set a persistent config key to a value, stored in the registry\n\nArgs:\nkey (string): The key name\nvalue (string): The key value", "source": "codesearchnet"}
{"code": "def __init__(self, ascii_codepage='cp1252', registry_file_reader=None):\n    \n    super(WinRegistry, self).__init__()\n    self._ascii_codepage = ascii_codepage\n    self._registry_file_reader = registry_file_reader\n    self._registry_files = {}\n    self._user_registry_files = {}", "docstring": "Initializes the Windows Registry.\n\nArgs:\nascii_codepage (Optional[str]): ASCII string codepage.\nregistry_file_reader (Optional[WinRegistryFileReader]): Windows Registry\nfile reader.", "source": "juraj-google-style"}
{"code": "def get_padding_value(padding=None, kernel_size=7, stride=1, dilation=1) -> Tuple[Tuple, bool]:\n    dynamic = False\n    if padding is None:\n        padding = (stride - 1 + dilation * (kernel_size - 1)) \n        return (padding, dynamic)\n    if isinstance(padding, str):\n        padding = padding.lower()\n        if padding == 'same':\n            if stride == 1 and dilation * (kernel_size - 1) % 2 == 0:\n                padding = (stride - 1 + dilation * (kernel_size - 1)) \n            else:\n                padding = 0\n                dynamic = True\n        elif padding == 'valid':\n            padding = 0\n        else:\n            padding = (stride - 1 + dilation * (kernel_size - 1)) \n    return (padding, dynamic)", "docstring": "Utility function to get the tuple padding value given the kernel_size and padding.\n\nArgs:\npadding (Union[`str`, `int`], *optional*):\nPadding value, can be either `\"same\"`, `\"valid\"`. If a different value is provided the default padding from\nPyTorch is used.\nkernel_size (`int`, *optional*, defaults to 7):\nKernel size of the convolution layers.\nstride (`int`, *optional*, defaults to 1):\nStride value of the convolution layers.\ndilation (`int`, *optional*, defaults to 1):\nDilation value of the convolution layers.", "source": "github-repos"}
{"code": "def imshow_bboxes(img, bboxes, colors='green', top_k=(- 1), thickness=1, show=True, win_name='', wait_time=0, out_file=None):\n    img = imread(img)\n    if isinstance(bboxes, np.ndarray):\n        bboxes = [bboxes]\n    if (not isinstance(colors, list)):\n        colors = [colors for _ in range(len(bboxes))]\n    colors = [color_val(c) for c in colors]\n    assert (len(bboxes) == len(colors))\n    for (i, _bboxes) in enumerate(bboxes):\n        _bboxes = _bboxes.astype(np.int32)\n        if (top_k <= 0):\n            _top_k = _bboxes.shape[0]\n        else:\n            _top_k = min(top_k, _bboxes.shape[0])\n        for j in range(_top_k):\n            left_top = (_bboxes[(j, 0)], _bboxes[(j, 1)])\n            right_bottom = (_bboxes[(j, 2)], _bboxes[(j, 3)])\n            cv2.rectangle(img, left_top, right_bottom, colors[i], thickness=thickness)\n    if show:\n        imshow(img, win_name, wait_time)\n    if (out_file is not None):\n        imwrite(img, out_file)", "docstring": "Draw bboxes on an image.\n\nArgs:\nimg (str or ndarray): The image to be displayed.\nbboxes (list or ndarray): A list of ndarray of shape (k, 4).\ncolors (list[str or tuple or Color]): A list of colors.\ntop_k (int): Plot the first k bboxes only if set positive.\nthickness (int): Thickness of lines.\nshow (bool): Whether to show the image.\nwin_name (str): The window name.\nwait_time (int): Value of waitKey param.\nout_file (str, optional): The filename to write the image.", "source": "codesearchnet"}
{"code": "def _copy(src, dst, src_is_storage, dst_is_storage):\n    if (src_is_storage and dst_is_storage):\n        system_src = get_instance(src)\n        system_dst = get_instance(dst)\n        if (system_src is system_dst):\n            if (system_src.relpath(src) == system_dst.relpath(dst)):\n                raise same_file_error((\"'%s' and '%s' are the same file\" % (src, dst)))\n            try:\n                return system_dst.copy(src, dst)\n            except (UnsupportedOperation, ObjectException):\n                pass\n        for (caller, called, method) in ((system_dst, system_src, 'copy_from_%s'), (system_src, system_dst, 'copy_to_%s')):\n            if hasattr(caller, (method % called.storage)):\n                try:\n                    return getattr(caller, (method % called.storage))(src, dst, called)\n                except (UnsupportedOperation, ObjectException):\n                    continue\n    with cos_open(src, 'rb') as fsrc:\n        with cos_open(dst, 'wb') as fdst:\n            for stream in (fsrc, fdst):\n                try:\n                    buffer_size = getattr(stream, '_buffer_size')\n                    break\n                except AttributeError:\n                    continue\n            else:\n                buffer_size = COPY_BUFSIZE\n            copyfileobj(fsrc, fdst, buffer_size)", "docstring": "Copies file from source to destination\n\nArgs:\nsrc (str or file-like object): Source file.\ndst (str or file-like object): Destination file.\nsrc_is_storage (bool): Source is storage.\ndst_is_storage (bool): Destination is storage.", "source": "codesearchnet"}
{"code": "def _probe_services(self, handle):\n    code = 10240\n\n    def event_filter_func(event):\n        if ((event.command_class == 4) and (event.command == 2)):\n            (event_handle,) = unpack('B', event.payload[0:1])\n            return (event_handle == handle)\n        return False\n\n    def end_filter_func(event):\n        if ((event.command_class == 4) and (event.command == 1)):\n            (event_handle,) = unpack('B', event.payload[0:1])\n            return (event_handle == handle)\n        return False\n    payload = struct.pack('<BHHBH', handle, 1, 65535, 2, code)\n    try:\n        response = self._send_command(4, 1, payload)\n    except InternalTimeoutError:\n        return (False, {'reason': 'Timeout waiting for command response'})\n    (handle, result) = unpack('<BH', response.payload)\n    if (result != 0):\n        return (False, None)\n    events = self._wait_process_events(0.5, event_filter_func, end_filter_func)\n    gatt_events = [x for x in events if event_filter_func(x)]\n    end_events = [x for x in events if end_filter_func(x)]\n    if (len(end_events) == 0):\n        return (False, None)\n    end_event = end_events[0]\n    (_, result, _) = unpack('<BHH', end_event.payload)\n    if (result != 0):\n        self._logger.warn(('Error enumerating GATT table, protocol error code = %d (0x%X)' % (result, result)))\n        return (False, None)\n    services = {}\n    for event in gatt_events:\n        process_gatt_service(services, event)\n    return (True, {'services': services})", "docstring": "Probe for all primary services and characteristics in those services\n\nArgs:\nhandle (int): the connection handle to probe", "source": "codesearchnet"}
{"code": "def is_valid_callsign(self, callsign, timestamp=timestamp_now):\n    try:\n        if self.get_all(callsign, timestamp):\n            return True\n    except KeyError:\n        return False", "docstring": "Checks if a callsign is valid\n\nArgs:\ncallsign (str): Amateur Radio callsign\ntimestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)\n\nReturns:\nbool: True / False\n\nExample:\nThe following checks if \"DH1TW\" is a valid callsign\n\n>>> from pyhamtools import LookupLib, Callinfo\n>>> my_lookuplib = LookupLib(lookuptype=\"countryfile\")\n>>> cic = Callinfo(my_lookuplib)\n>>> cic.is_valid_callsign(\"DH1TW\")\nTrue", "source": "codesearchnet"}
{"code": "def __init__(self, port=None,\n                 max_length=ControllerMaxLen.OFPCML_NO_BUFFER):\n        \n        super().__init__(action_type=ActionType.OFPAT_OUTPUT, length=16)\n        self.port = port\n        self.max_length = max_length", "docstring": "Create a ActionOutput with the optional parameters below.\n\nArgs:\nport (:class:`Port` or :class:`int`): Output port.\nmax_length (int): Max length to send to controller.", "source": "juraj-google-style"}
{"code": "def stack(self, trees: Iterable[Tree[Array['*s']]]) -> Tree[Array['n_trees *s']]:\n    return self.backend.map(_stack, *trees)", "docstring": "Stack a tree of `Iterable[Array]`.\n\nSupports `jax`, `tf`, `np`.\n\nExample:\n\n```python\netree.stack([\n{'a': np.array([1])},\n{'a': np.array([2])},\n{'a': np.array([3])},\n]) == {\n'a': np.array([[1], [2], [3]])\n}\n```\n\nArgs:\ntrees: The list of tree to stack\n\nReturns:\nTree of arrays.", "source": "github-repos"}
{"code": "def insert_top(self, node):\n    if (not isinstance(node, grammar.STATEMENTS)):\n        raise ValueError\n    self.to_insert_top.append(node)", "docstring": "Insert statements at the top of the function body.\n\nNote that multiple calls to `insert_top` will result in the statements\nbeing prepended in that order; this is different behavior from `prepend`.\n\nArgs:\nnode: The statement to prepend.\n\nRaises:\nValueError: If the given node is not a statement.", "source": "codesearchnet"}
{"code": "def get_dirty_items(item_list, flag_list):\n    assert (len(item_list) == len(flag_list))\n    dirty_items = [item for (item, flag) in zip(item_list, flag_list) if (not flag)]\n    return dirty_items", "docstring": "Returns each item in item_list where not flag in flag_list\n\nArgs:\nitem_list (list):\nflag_list (list):\n\nReturns:\ndirty_items", "source": "codesearchnet"}
{"code": "def complain(distribution_name):\n    try:\n        pkg_resources.get_distribution(distribution_name)\n        warnings.warn('The {pkg} distribution is now obsolete. Please `pip uninstall {pkg}`. In the future, this warning will become an ImportError.'.format(pkg=distribution_name), DeprecationWarning)\n    except pkg_resources.DistributionNotFound:\n        pass", "docstring": "Issue a warning if `distribution_name` is installed.\n\nIn a future release, this method will be updated to raise ImportError\nrather than just send a warning.\n\nArgs:\ndistribution_name (str): The name of the obsolete distribution.", "source": "codesearchnet"}
{"code": "def load_panel_app(adapter, panel_id=None, institute='cust000'):\n    \n    base_url = 'https:\n    \n    hgnc_map = adapter.genes_by_alias()\n    \n    if panel_id:\n        panel_ids = [panel_id]\n\n    if not panel_id:\n        \n        LOG.info(\"Fetching all panel app panels\")\n        data = get_request(base_url.format('list_panels'))\n    \n        json_lines = json.loads(data)\n        \n        panel_ids = [panel_info['Panel_Id'] for panel_info in json_lines['result']]\n    \n    for panel_id in panel_ids:\n        panel_data = get_request(base_url.format('get_panel') + panel_id)\n        \n        parsed_panel = parse_panel_app_panel(\n            panel_info = json.loads(panel_data)['result'], \n            hgnc_map=hgnc_map,\n            institute=institute\n        )\n        parsed_panel['panel_id'] = panel_id\n        \n        if len(parsed_panel['genes']) == 0:\n            LOG.warning(\"Panel {} is missing genes. Skipping.\".format(parsed_panel['display_name']))\n            continue\n        \n        try:\n            adapter.load_panel(parsed_panel=parsed_panel)\n        except Exception as err:\n            raise err", "docstring": "Load PanelApp panels into scout database\n\nIf no panel_id load all PanelApp panels\n\nArgs:\nadapter(scout.adapter.MongoAdapter)\npanel_id(str): The panel app panel id", "source": "juraj-google-style"}
{"code": "def create_redis_client(redis_address, password=None):\n    \n    redis_ip_address, redis_port = redis_address.split(\":\")\n    \n    \n    return redis.StrictRedis(\n        host=redis_ip_address, port=int(redis_port), password=password)", "docstring": "Create a Redis client.\n\nArgs:\nThe IP address, port, and password of the Redis server.\n\nReturns:\nA Redis client.", "source": "juraj-google-style"}
{"code": "def gaussian_pdf(std=10.0, mean=0.0):\n    \n    norm_const = 1.0\n    def pdf(x):\n        return norm_const*np.exp(-0.5 * ((x-mean)/std)**2) * \\\n            np.sin(np.pi/180.0 * x)\n    norm_dev = quad(pdf, 0.0, 180.0)[0]\n    \n    norm_const /= norm_dev \n    return pdf", "docstring": "Gaussian PDF for orientation averaging.\n\nArgs:\nstd: The standard deviation in degrees of the Gaussian PDF\nmean: The mean in degrees of the Gaussian PDF.  This should be a number\nin the interval [0, 180)\n\nReturns:\npdf(x), a function that returns the value of the spherical Jacobian-\nnormalized Gaussian PDF with the given STD at x (degrees). It is\nnormalized for the interval [0, 180].", "source": "juraj-google-style"}
{"code": "def setup_service(api_name, api_version, credentials=None):\n    if (not credentials):\n        credentials = oauth2client.client.GoogleCredentials.get_application_default()\n    return apiclient.discovery.build(api_name, api_version, credentials=credentials)", "docstring": "Configures genomics API client.\n\nArgs:\napi_name: Name of the Google API (for example: \"genomics\")\napi_version: Version of the API (for example: \"v2alpha1\")\ncredentials: Credentials to be used for the gcloud API calls.\n\nReturns:\nA configured Google Genomics API client with appropriate credentials.", "source": "codesearchnet"}
{"code": "def add_institute(self, institute_obj):\n    internal_id = institute_obj['internal_id']\n    display_name = institute_obj['internal_id']\n    if self.institute(institute_id=internal_id):\n        raise IntegrityError('Institute {0} already exists in database'.format(display_name))\n    LOG.info('Adding institute with internal_id: {0} and display_name: {1}'.format(internal_id, display_name))\n    insert_info = self.institute_collection.insert_one(institute_obj)\n    LOG.info('Institute saved')", "docstring": "Add a institute to the database\n\nArgs:\ninstitute_obj(Institute)", "source": "codesearchnet"}
{"code": "def reset(self, *args):\n    self.resource = self.resource.reset(list(args))\n    return self", "docstring": "Resets any of the tokens for this Application.\nNote that you may have to reauthenticate afterwards.\n\nUsage:\napplication.reset('api_token')\napplication.reset('api_token', 'totp_secret')\n\nArgs:\n*args (list of str): one or more of\n['api_token', 'subscription_token', 'totp_secret']\n\nReturns:\nThe Application.", "source": "codesearchnet"}
{"code": "def sg_log(tensor, opt):\n    r\n    return tf.log(tensor + tf.sg_eps, name=opt.name)", "docstring": "r\"\"\"Log transform a dense tensor\n\nSee `tf.log()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` ( automatically given by chain )\nopt:\nname: If provided, replace current tensor's name.\n\nReturns:\nA `Tensor`.", "source": "juraj-google-style"}
{"code": "def convert_to_experiment_list(experiments):\n    exp_list = experiments\n    if (experiments is None):\n        exp_list = []\n    elif isinstance(experiments, Experiment):\n        exp_list = [experiments]\n    elif (type(experiments) is dict):\n        exp_list = [Experiment.from_json(name, spec) for (name, spec) in experiments.items()]\n    if ((type(exp_list) is list) and all((isinstance(exp, Experiment) for exp in exp_list))):\n        if (len(exp_list) > 1):\n            logger.warning('All experiments will be using the same SearchAlgorithm.')\n    else:\n        raise TuneError('Invalid argument: {}'.format(experiments))\n    return exp_list", "docstring": "Produces a list of Experiment objects.\n\nConverts input from dict, single experiment, or list of\nexperiments to list of experiments. If input is None,\nwill return an empty list.\n\nArguments:\nexperiments (Experiment | list | dict): Experiments to run.\n\nReturns:\nList of experiments.", "source": "codesearchnet"}
{"code": "def translate_file(estimator, subtokenizer, input_file, output_file=None, print_all_translations=True):\n    batch_size = _DECODE_BATCH_SIZE\n    (sorted_inputs, sorted_keys) = _get_sorted_inputs(input_file)\n    num_decode_batches = (((len(sorted_inputs) - 1) \n\n    def input_generator():\n        'Yield encoded strings from sorted_inputs.'\n        for (i, line) in enumerate(sorted_inputs):\n            if ((i % batch_size) == 0):\n                batch_num = ((i \n                print(('Decoding batch %d out of %d.' % (batch_num, num_decode_batches)))\n            (yield _encode_and_add_eos(line, subtokenizer))\n\n    def input_fn():\n        'Created batched dataset of encoded inputs.'\n        ds = tf.data.Dataset.from_generator(input_generator, tf.int64, tf.TensorShape([None]))\n        ds = ds.padded_batch(batch_size, [None])\n        return ds\n    translations = []\n    for (i, prediction) in enumerate(estimator.predict(input_fn)):\n        translation = _trim_and_decode(prediction['outputs'], subtokenizer)\n        translations.append(translation)\n        if print_all_translations:\n            print('Translating:')\n            print(('\\tInput: %s' % sorted_inputs[i]))\n            print(('\\tOutput: %s\\n' % translation))\n            print(('=' * 100))\n    if (output_file is not None):\n        if tf.gfile.IsDirectory(output_file):\n            raise ValueError('File output is a directory, will not save outputs to file.')\n        tf.logging.info(('Writing to file %s' % output_file))\n        with tf.gfile.Open(output_file, 'w') as f:\n            for index in xrange(len(sorted_keys)):\n                f.write(('%s\\n' % translations[sorted_keys[index]]))", "docstring": "Translate lines in file, and save to output file if specified.\n\nArgs:\nestimator: tf.Estimator used to generate the translations.\nsubtokenizer: Subtokenizer object for encoding and decoding source and\ntranslated lines.\ninput_file: file containing lines to translate\noutput_file: file that stores the generated translations.\nprint_all_translations: If true, all translations are printed to stdout.\n\nRaises:\nValueError: if output file is invalid.", "source": "codesearchnet"}
{"code": "def AddNEP5Token(self, token):\n    if (token.ScriptHash.ToBytes() in self._tokens.keys()):\n        logger.error('Token already in wallet')\n        return\n    self._tokens[token.ScriptHash.ToBytes()] = token", "docstring": "Add a NEP-5 compliant token to the wallet.\n\nArgs:\ntoken (NEP5Token): an instance of type neo.Wallets.NEP5Token.\n\nNote:\nPrints a warning to the console if the token already exists in the wallet.", "source": "codesearchnet"}
{"code": "def list_pop(list_, i, opts):\n    assert isinstance(opts, ListPopOpts)\n    if isinstance(list_, tensor_array_ops.TensorArray):\n        raise ValueError('TensorArray does not support item removal')\n    elif tensor_util.is_tf_type(list_):\n        if list_.dtype == dtypes.variant:\n            return _tf_tensor_list_pop(list_, i, opts)\n        else:\n            raise ValueError('tensor lists are expected to be Tensors with dtype=tf.variant, instead found %s' % list_)\n    else:\n        return _py_list_pop(list_, i)", "docstring": "The list pop function.\n\nNote: it is unspecified where list_ will be mutated or not. If list_ is\na TensorFlow entity, it will not be typically mutated. If list_ is a plain\nlist, it will be. In general, if the list is mutated then the return value\nshould point to the original entity.\n\nArgs:\nlist_: An entity that supports pop semantics.\ni: Optional index to pop from. May be None.\nopts: A ListPopOpts.\n\nReturns:\nTuple (x, out_list_):\nout_list_: same as list_, after the removal was performed.\nx: the removed element value.\n\nRaises:\nValueError: if list_ is not of a known list-like type or the operation is\nnot supported for that type.", "source": "github-repos"}
{"code": "def device_type_from_string(cl_device_type_str):\n    \n    cl_device_type_str = cl_device_type_str.upper()\n    if hasattr(cl.device_type, cl_device_type_str):\n        return getattr(cl.device_type, cl_device_type_str)\n    return None", "docstring": "Converts values like ``gpu`` to a pyopencl device type string.\n\nSupported values are: ``accelerator``, ``cpu``, ``custom``, ``gpu``. If ``all`` is given, None is returned.\n\nArgs:\ncl_device_type_str (str): The string we want to convert to a device type.\n\nReturns:\ncl.device_type: the pyopencl device type.", "source": "juraj-google-style"}
{"code": "def list_experiments(self, collection_name):\n    exp = ExperimentResource(name='', collection_name=collection_name, coord_frame='foo')\n    return self._list_resource(exp)", "docstring": "List all experiments that belong to a collection.\n\nArgs:\ncollection_name (string): Name of the parent collection.\n\nReturns:\n(list)\n\nRaises:\nrequests.HTTPError on failure.", "source": "codesearchnet"}
{"code": "def bofh_excuse(how_many=1):\n    excuse_path = os.path.join(os.path.dirname(__file__), 'bofh_excuses.json')\n    with open(excuse_path, 'r') as _f:\n        excuse_dict = json.load(_f)\n    return [generate_random_string(excuse_dict) for _ in range(int(how_many))]", "docstring": "Generate random BOFH themed technical excuses!\n\nArgs:\nhow_many: Number of excuses to generate. (Default: 1)\n\nReturns:\nA list of BOFH excuses.", "source": "codesearchnet"}
{"code": "def get_rng(obj=None):\n    seed = (((id(obj) + os.getpid()) + int(datetime.now().strftime('%Y%m%d%H%M%S%f'))) % 4294967295)\n    if (_RNG_SEED is not None):\n        seed = _RNG_SEED\n    return np.random.RandomState(seed)", "docstring": "Get a good RNG seeded with time, pid and the object.\n\nArgs:\nobj: some object to use to generate random seed.\nReturns:\nnp.random.RandomState: the RNG.", "source": "codesearchnet"}
{"code": "def Create(self, request, global_params=None):\n    config = self.GetMethodConfig('Create')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Create an association between a GCP project and a GitHub Enterprise server.\n\nArgs:\nrequest: (CloudbuildProjectsGithubEnterpriseConfigsCreateRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def FindClonedClients(token=None):\n    index = client_index.CreateClientIndex(token=token)\n    clients = index.LookupClients(['.'])\n    hw_infos = _GetHWInfos(clients, token=token)\n    clients_with_multiple_serials = [client_id for (client_id, serials) in iteritems(hw_infos) if (len(serials) > 1)]\n    client_list = aff4.FACTORY.MultiOpen(clients_with_multiple_serials, age=aff4.ALL_TIMES, token=token)\n    cloned_clients = []\n    for c in client_list:\n        hwis = c.GetValuesForAttribute(c.Schema.HARDWARE_INFO)\n        max_index = {}\n        min_index = {}\n        ids = set()\n        for (i, hwi) in enumerate(hwis):\n            s = hwi.serial_number\n            max_index[s] = i\n            if (s not in min_index):\n                min_index[s] = i\n            ids.add(s)\n        ranges = []\n        for hwid in ids:\n            ranges.append((min_index[hwid], max_index[hwid]))\n        ranges.sort()\n        for i in range((len(ranges) - 1)):\n            if (ranges[i][1] > ranges[(i + 1)][0]):\n                cloned_clients.append(c)\n                msg = 'Found client with multiple, overlapping serial numbers: %s'\n                logging.info(msg, c.urn)\n                for hwi in c.GetValuesForAttribute(c.Schema.HARDWARE_INFO):\n                    logging.info('%s %s', hwi.age, hwi.serial_number)\n                break\n    return cloned_clients", "docstring": "A script to find multiple machines reporting the same client_id.\n\nThis script looks at the hardware serial numbers that a client reported in\nover time (they get collected with each regular interrogate). We have seen\nthat sometimes those serial numbers change - for example when a disk is put\nin a new machine - so reporting multiple serial numbers does not flag a client\nimmediately as a cloned machine. In order to be shown here by this script, the\nserial number has to be alternating between two values.\n\nArgs:\ntoken: datastore token.\n\nReturns:\nA list of clients that report alternating hardware ids.", "source": "codesearchnet"}
{"code": "def build_from_token_counts(self, token_counts, min_count, num_iterations=4, reserved_tokens=None, max_subtoken_length=None):\n    if (reserved_tokens is None):\n        reserved_tokens = RESERVED_TOKENS\n    else:\n        for (default, proposed) in zip(RESERVED_TOKENS, reserved_tokens):\n            if (default != proposed):\n                raise ValueError('RESERVED_TOKENS must be a prefix of reserved_tokens.')\n    alphabet_tokens = chain(six.iterkeys(token_counts), [native_to_unicode(t) for t in reserved_tokens])\n    self._init_alphabet_from_tokens(alphabet_tokens)\n    self._init_subtokens_from_list(list(self._alphabet), reserved_tokens=reserved_tokens)\n    if (min_count < 1):\n        min_count = 1\n    for i in range(num_iterations):\n        tf.logging.info('Iteration {0}'.format(i))\n        subtoken_counts = collections.defaultdict(int)\n        for (token, count) in six.iteritems(token_counts):\n            iter_start_time = time.time()\n            escaped_token = _escape_token(token, self._alphabet)\n            subtokens = self._escaped_token_to_subtoken_strings(escaped_token)\n            start = 0\n            for subtoken in subtokens:\n                last_position = (len(escaped_token) + 1)\n                if (max_subtoken_length is not None):\n                    last_position = min(last_position, (start + max_subtoken_length))\n                for end in range((start + 1), last_position):\n                    new_subtoken = escaped_token[start:end]\n                    subtoken_counts[new_subtoken] += count\n                start += len(subtoken)\n            iter_time_secs = (time.time() - iter_start_time)\n            if (iter_time_secs > 0.1):\n                tf.logging.info(u'Processing token [{0}] took {1} seconds, consider setting Text2TextProblem.max_subtoken_length to a smaller value.'.format(token, iter_time_secs))\n        len_to_subtoken_strings = []\n        for (subtoken_string, count) in six.iteritems(subtoken_counts):\n            lsub = len(subtoken_string)\n            if (count >= min_count):\n                while (len(len_to_subtoken_strings) <= lsub):\n                    len_to_subtoken_strings.append(set())\n                len_to_subtoken_strings[lsub].add(subtoken_string)\n        new_subtoken_strings = []\n        for lsub in range((len(len_to_subtoken_strings) - 1), 0, (- 1)):\n            subtoken_strings = len_to_subtoken_strings[lsub]\n            for subtoken_string in subtoken_strings:\n                count = subtoken_counts[subtoken_string]\n                if (count >= min_count):\n                    if (subtoken_string not in self._alphabet):\n                        new_subtoken_strings.append((count, subtoken_string))\n                    for l in range(1, lsub):\n                        subtoken_counts[subtoken_string[:l]] -= count\n        new_subtoken_strings.extend(((subtoken_counts.get(a, 0), a) for a in self._alphabet))\n        new_subtoken_strings.sort(reverse=True)\n        new_subtoken_strings = [subtoken for (_, subtoken) in new_subtoken_strings]\n        if reserved_tokens:\n            escaped_reserved_tokens = [_escape_token(native_to_unicode(t), self._alphabet) for t in reserved_tokens]\n            new_subtoken_strings = (escaped_reserved_tokens + new_subtoken_strings)\n        self._init_subtokens_from_list(new_subtoken_strings)\n        tf.logging.info(('vocab_size = %d' % self.vocab_size))", "docstring": "Train a SubwordTextEncoder based on a dictionary of word counts.\n\nArgs:\ntoken_counts: a dictionary of Unicode strings to int.\nmin_count: an integer - discard subtokens with lower counts.\nnum_iterations: an integer.  how many iterations of refinement.\nreserved_tokens: List of reserved tokens. The global variable\n`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this\nargument is `None`, it will use `RESERVED_TOKENS`.\nmax_subtoken_length: Maximum length of a subtoken. If this is not set,\nthen the runtime and memory use of creating the vocab is quadratic in\nthe length of the longest token. If this is set, then it is instead\nO(max_subtoken_length * length of longest token).\n\nRaises:\nValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it\nis not clear what the space is being reserved for, or when it will be\nfilled in.", "source": "codesearchnet"}
{"code": "def read_from_file(path, file_type='text', exception=ScriptWorkerException):\n    FILE_TYPE_MAP = {'text': 'r', 'binary': 'rb'}\n    if (file_type not in FILE_TYPE_MAP):\n        raise exception('Unknown file_type {} not in {}!'.format(file_type, FILE_TYPE_MAP))\n    try:\n        with open(path, FILE_TYPE_MAP[file_type]) as fh:\n            return fh.read()\n    except (OSError, FileNotFoundError) as exc:\n        raise exception(\"Can't read_from_file {}: {}\".format(path, str(exc)))", "docstring": "Read from ``path``.\n\nSmall helper function to read from ``file``.\n\nArgs:\npath (str): the path to read from.\nfile_type (str, optional): the type of file. Currently accepts\n``text`` or ``binary``. Defaults to ``text``.\nexception (Exception, optional): the exception to raise\nif unable to read from the file.  Defaults to ``ScriptWorkerException``.\n\nReturns:\nNone: if unable to read from ``path`` and ``exception`` is ``None``\nstr or bytes: the contents of ``path``\n\nRaises:\nException: if ``exception`` is set.", "source": "codesearchnet"}
{"code": "def write_compacted(g):\n    d_nodes = {}\n    d_edges = {}\n\n    def conv(value):\n        if isinstance(value, basestring):\n            return value.strip('\"')\n        else:\n            return value\n    for node in g.nodes():\n        label = None\n        attrs = []\n        for (k, v) in sorted(g.node_attributes(node)):\n            v_ = conv(v)\n            if (k == 'label'):\n                label = v_\n            else:\n                attrs.append((k, v_))\n        value = ((node, label) if label else node)\n        d_nodes.setdefault(tuple(attrs), []).append(value)\n    for edge in g.edges():\n        attrs = [(k, conv(v)) for (k, v) in sorted(g.edge_attributes(edge))]\n        label = str(g.edge_label(edge))\n        value = (tuple((list(edge) + [label])) if label else edge)\n        d_edges.setdefault(tuple(attrs), []).append(tuple(value))\n    doc = dict(nodes=d_nodes.items(), edges=d_edges.items())\n    contents = str(doc)\n    return contents", "docstring": "Write a graph in our own compacted format.\n\nReturns:\nstr.", "source": "codesearchnet"}
{"code": "def validate_with_tags(self, tags, confidence):\n    result = {'intent_type': self.name}\n    intent_confidence = 0.0\n    local_tags = tags[:]\n    used_tags = []\n    for (require_type, attribute_name) in self.requires:\n        (required_tag, canonical_form, confidence) = find_first_tag(local_tags, require_type)\n        if (not required_tag):\n            result['confidence'] = 0.0\n            return (result, [])\n        result[attribute_name] = canonical_form\n        if (required_tag in local_tags):\n            local_tags.remove(required_tag)\n        used_tags.append(required_tag)\n        intent_confidence += confidence\n    if (len(self.at_least_one) > 0):\n        best_resolution = resolve_one_of(tags, self.at_least_one)\n        if (not best_resolution):\n            result['confidence'] = 0.0\n            return (result, [])\n        else:\n            for key in best_resolution:\n                result[key] = best_resolution[key][0].get('key')\n                intent_confidence += 1.0\n            used_tags.append(best_resolution)\n            if (best_resolution in local_tags):\n                local_tags.remove(best_resolution)\n    for (optional_type, attribute_name) in self.optional:\n        (optional_tag, canonical_form, conf) = find_first_tag(local_tags, optional_type)\n        if ((not optional_tag) or (attribute_name in result)):\n            continue\n        result[attribute_name] = canonical_form\n        if (optional_tag in local_tags):\n            local_tags.remove(optional_tag)\n        used_tags.append(optional_tag)\n        intent_confidence += 1.0\n    total_confidence = ((intent_confidence / len(tags)) * confidence)\n    (target_client, canonical_form, confidence) = find_first_tag(local_tags, CLIENT_ENTITY_NAME)\n    result['target'] = (target_client.get('key') if target_client else None)\n    result['confidence'] = total_confidence\n    return (result, used_tags)", "docstring": "Validate weather tags has required entites for this intent to fire\n\nArgs:\ntags(list): Tags and Entities used for validation\nconfidence(float): ?\n\nReturns:\nintent, tags: Returns intent and tags used by the intent on\nfalure to meat required entities then returns intent with confidence\nof 0.0 and an empty list for tags.", "source": "codesearchnet"}
{"code": "def _ParseFileData(self, knowledge_base, file_object):\n    \n    line_reader = line_reader_file.BinaryLineReader(file_object)\n\n    try:\n      reader = line_reader_file.BinaryDSVReader(line_reader, b':')\n    except csv.Error as exception:\n      raise errors.PreProcessFail(\n          'Unable to read: {0:s} with error: {1!s}'.format(\n              self.ARTIFACT_DEFINITION_NAME, exception))\n\n    for row in reader:\n      if len(row) < 7 or not row[0] or not row[2]:\n        \n        continue\n\n      try:\n        username = row[0].decode('utf-8')\n      except UnicodeDecodeError:\n        \n        logger.error('Unable to decode username.')\n        continue\n\n      try:\n        identifier = row[2].decode('utf-8')\n      except UnicodeDecodeError:\n        \n        logger.error('Unable to decode identifier.')\n        continue\n\n      group_identifier = None\n      if row[3]:\n        try:\n          group_identifier = row[3].decode('utf-8')\n        except UnicodeDecodeError:\n          \n          logger.error('Unable to decode group identifier.')\n\n      full_name = None\n      if row[4]:\n        try:\n          full_name = row[4].decode('utf-8')\n        except UnicodeDecodeError:\n          \n          logger.error('Unable to decode full name.')\n\n      user_directory = None\n      if row[5]:\n        try:\n          user_directory = row[5].decode('utf-8')\n        except UnicodeDecodeError:\n          \n          logger.error('Unable to decode user directory.')\n\n      shell = None\n      if row[6]:\n        try:\n          shell = row[6].decode('utf-8')\n        except UnicodeDecodeError:\n          \n          logger.error('Unable to decode shell.')\n\n      user_account = artifacts.UserAccountArtifact(\n          identifier=identifier, username=username)\n      user_account.group_identifier = group_identifier\n      user_account.full_name = full_name\n      user_account.user_directory = user_directory\n      user_account.shell = shell\n\n      try:\n        knowledge_base.AddUserAccount(user_account)\n      except KeyError:\n        \n        pass", "docstring": "Parses file content (data) for user account preprocessing attributes.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nfile_object (dfvfs.FileIO): file-like object that contains the artifact\nvalue data.\n\nRaises:\nerrors.PreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def expandvars(text, environ=None):\n    \n    if '$' not in text:\n        return text\n\n    i = 0\n    if environ is None:\n        environ = os.environ\n\n    while True:\n        m = ENV_VAR_REGEX.search(text, i)\n        if not m:\n            break\n        i, j = m.span(0)\n        name = m.group(1)\n        if name.startswith('{') and name.endswith('}'):\n            name = name[1:-1]\n        if name in environ:\n            tail = text[j:]\n            text = text[:i] + environ[name]\n            i = len(text)\n            text += tail\n        else:\n            i = j\n    return text", "docstring": "Expand shell variables of form $var and ${var}.\n\nUnknown variables are left unchanged.\n\nArgs:\ntext (str): String to expand.\nenviron (dict): Environ dict to use for expansions, defaults to\nos.environ.\n\nReturns:\nThe expanded string.", "source": "juraj-google-style"}
{"code": "def __init__(self, logger, script_type, default_shell=None):\n    \n    self.logger = logger\n    self.script_type = script_type\n    self.default_shell = default_shell or '/bin/bash'", "docstring": "Constructor.\n\nArgs:\nlogger: logger object, used to write to SysLog and serial port.\nscript_type: string, the type of the script we are running.\ndefault_shell: string, the default shell to execute the script.", "source": "juraj-google-style"}
{"code": "def addgroup(name, group):\n    \n    if six.PY2:\n        name = _to_unicode(name)\n        group = _to_unicode(group)\n\n    name = _cmd_quote(name)\n    group = _cmd_quote(group).lstrip('\\'').rstrip('\\'')\n\n    user = info(name)\n    if not user:\n        return False\n    if group in user['groups']:\n        return True\n\n    cmd = 'net localgroup \"{0}\" {1} /add'.format(group, name)\n    ret = __salt__['cmd.run_all'](cmd, python_shell=True)\n\n    return ret['retcode'] == 0", "docstring": "Add user to a group\n\nArgs:\nname (str): The user name to add to the group\n\ngroup (str): The name of the group to which to add the user\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' user.addgroup jsnuffy 'Power Users'", "source": "juraj-google-style"}
{"code": "def ListClients(self, request, timeout=None):\n    return self._RetryLoop((lambda t: self._stub.ListClients(request, timeout=t)))", "docstring": "Provides basic information about Fleetspeak clients.\n\nArgs:\nrequest: fleetspeak.admin.ListClientsRequest\n\ntimeout: How many seconds to try for.\n\nReturns: fleetspeak.admin.ListClientsResponse", "source": "codesearchnet"}
{"code": "def _CalculateHashesFileEntry(\n      self, file_system, file_entry, parent_full_path, output_writer):\n    \n    \n    \n    \n    full_path = file_system.JoinPath([parent_full_path, file_entry.name])\n    for data_stream in file_entry.data_streams:\n      hash_value = self._CalculateHashDataStream(file_entry, data_stream.name)\n      display_path = self._GetDisplayPath(\n          file_entry.path_spec, full_path, data_stream.name)\n      output_writer.WriteFileHash(display_path, hash_value or 'N/A')\n\n    for sub_file_entry in file_entry.sub_file_entries:\n      self._CalculateHashesFileEntry(\n          file_system, sub_file_entry, full_path, output_writer)", "docstring": "Recursive calculates hashes starting with the file entry.\n\nArgs:\nfile_system (dfvfs.FileSystem): file system.\nfile_entry (dfvfs.FileEntry): file entry.\nparent_full_path (str): full path of the parent file entry.\noutput_writer (StdoutWriter): output writer.", "source": "juraj-google-style"}
{"code": "def return_handler(\n        self,\n        call_node,\n        function_nodes,\n        saved_function_call_index,\n        first_node\n    ):\n        \n        if any(isinstance(node, YieldNode) for node in function_nodes):\n            \n            rhs_prefix = 'yld_'\n        elif any(isinstance(node, ConnectToExitNode) for node in function_nodes):\n            \n            rhs_prefix = 'ret_'\n        else:\n            return  \n\n        \n        LHS = CALL_IDENTIFIER + 'call_' + str(saved_function_call_index)\n        RHS = rhs_prefix + get_call_names_as_string(call_node.func)\n        return_node = RestoreNode(\n            LHS + ' = ' + RHS,\n            LHS,\n            [RHS],\n            line_number=call_node.lineno,\n            path=self.filenames[-1]\n        )\n        return_node.first_node = first_node\n        self.nodes[-1].connect(return_node)\n        self.nodes.append(return_node)", "docstring": "Handle the return from a function during a function call.\n\nArgs:\ncall_node(ast.Call) : The node that calls the definition.\nfunction_nodes(list[Node]): List of nodes of the function being called.\nsaved_function_call_index(int): Unique number for each call.\nfirst_node(EntryOrExitNode or RestoreNode): Used to connect previous statements to this function.", "source": "juraj-google-style"}
{"code": "def ExamineEvent(self, mediator, event):\n    \n    self._EnsureRequesterStarted()\n\n    path_spec = event.pathspec\n    event_identifiers = self._event_identifiers_by_pathspec[path_spec]\n\n    event_identifier = event.GetIdentifier()\n    event_identifiers.append(event_identifier)\n\n    if event.data_type not in self.DATA_TYPES or not self._analyzer.lookup_hash:\n      return\n\n    lookup_hash = '{0:s}_hash'.format(self._analyzer.lookup_hash)\n    lookup_hash = getattr(event, lookup_hash, None)\n    if not lookup_hash:\n      display_name = mediator.GetDisplayNameForPathSpec(path_spec)\n      logger.warning((\n          'Lookup hash attribute: {0:s}_hash missing from event that '\n          'originated from: {1:s}.').format(\n              self._analyzer.lookup_hash, display_name))\n      return\n\n    path_specs = self._hash_pathspecs[lookup_hash]\n    path_specs.append(path_spec)\n    \n    \n    if len(path_specs) == 1:\n      self.hash_queue.put(lookup_hash)", "docstring": "Evaluates whether an event contains the right data for a hash lookup.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between\nanalysis plugins and other components, such as storage and dfvfs.\nevent (EventObject): event.", "source": "juraj-google-style"}
{"code": "def _Fail(self, msg):\n    raise TruthAssertionError(msg)", "docstring": "Fail unconditionally.\n\nArgs:\nmsg: string to include in the exception.\n\nRaises:\nTruthAssertionError: always, by design.", "source": "github-repos"}
{"code": "def serialize_to_transport(self, encoding='utf-8', xslt_url=None):\n    assert (encoding in ('utf-8', 'UTF-8'))\n    dataone_exception_pyxb = self.get_pyxb()\n    return d1_common.xml.serialize_for_transport(dataone_exception_pyxb, xslt_url=xslt_url)", "docstring": "Serialize to XML ``bytes`` with prolog.\n\nArgs:\nencoding: str\nEncoding to use for XML doc bytes\nxslt_url: str\nIf specified, add a processing instruction to the XML doc that specifies the\ndownload location for an XSLT stylesheet.\n\nReturns:\nbytes: XML holding a DataONEError based type.", "source": "codesearchnet"}
{"code": "def close(self):\n        \n        if self.reuse:\n            logger.debug(\"Ipcontroller not shutting down: reuse enabled\")\n            return\n\n        if self.mode == \"manual\":\n            logger.debug(\"Ipcontroller not shutting down: Manual mode\")\n            return\n\n        try:\n            pgid = os.getpgid(self.proc.pid)\n            os.killpg(pgid, signal.SIGTERM)\n            time.sleep(0.2)\n            os.killpg(pgid, signal.SIGKILL)\n            try:\n                self.proc.wait(timeout=1)\n                x = self.proc.returncode\n                if x == 0:\n                    logger.debug(\"Controller exited with {0}\".format(x))\n                else:\n                    logger.error(\"Controller exited with {0}. May require manual cleanup\".format(x))\n            except subprocess.TimeoutExpired:\n                logger.warn(\"Ipcontroller process:{0} cleanup failed. May require manual cleanup\".format(self.proc.pid))\n\n        except Exception as e:\n            logger.warn(\"Failed to kill the ipcontroller process[{0}]: {1}\".format(self.proc.pid, e))", "docstring": "Terminate the controller process and its child processes.\n\nArgs:\n- None", "source": "juraj-google-style"}
{"code": "def call(self, inputs: List[Any], global_state: pg.geno.AttributeDict, step: int=0) -> List[Any]:\n    raise NotImplementedError()", "docstring": "Subclasses should override this method.\n\nThe `global_state` and `step` are optional for the subclasses' call\nsignature.\n\nArgs:\ninputs: A list of values as inputs.\nglobal_state: An `AttributeDict` object as the global state container,\nwhich is readable/writable during the operation.\nstep: Number of examples historically proposed, which can be used for\ndetermining a cross over schedule.\n\nReturns:\nA list of values as output of current operation.", "source": "github-repos"}
{"code": "def play_human(env):\n    \n    \n    try:\n        play(env, fps=env.metadata['video.frames_per_second'])\n    except KeyboardInterrupt:\n        pass\n    \n    env.close()", "docstring": "Play the environment using keyboard as a human.\n\nArgs:\nenv (gym.Env): the initialized gym environment to play\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def delete(filething):\n    \n\n    t = OggSpeex(filething)\n    filething.fileobj.seek(0)\n    t.delete(filething)", "docstring": "delete(filething)\n\nArguments:\nfilething (filething)\nRaises:\nmutagen.MutagenError\n\nRemove tags from a file.", "source": "juraj-google-style"}
{"code": "def find_file_in_load_dirs(relpath):\n    \n    if relpath.startswith(os.path.sep):\n        relpath = relpath.lstrip(os.path.sep)\n\n    for ld in settings.DATA_DIRECTORIES:\n        possible_path = os.path.join(ld, relpath)\n        if os.path.exists(possible_path):\n            return possible_path", "docstring": "If given relative path exists in one of DevAssistant load paths,\nreturn its full path.\n\nArgs:\nrelpath: a relative path, e.g. \"assitants/crt/test.yaml\"\n\nReturns:\nabsolute path of the file, e.g. \"/home/x/.devassistant/assistanta/crt/test.yaml\nor None if file is not found", "source": "juraj-google-style"}
{"code": "def _start_profiler(self, logdir):\n    if self._profiler_started:\n        return\n    try:\n        backend.tensorboard.start_trace(logdir)\n        self._profiler_started = True\n    except Exception as e:\n        logging.error('Failed to start profiler: %s', e)", "docstring": "Starts the profiler if currently inactive.\n\nArgs:\nlogdir: Directory where profiler results will be saved.", "source": "github-repos"}
{"code": "def is_remote_file_modified(web_file, destination):\n    \n    try:\n        \n        last_mod = web_file.headers.get('last-modified')\n        if last_mod:\n            web_file_time = time.strptime(\n                web_file.headers.get(\n                    'last-modified'), '%a, %d %b %Y %H:%M:%S %Z')\n        else:\n            web_file_time = time.gmtime()\n\n        web_file_size = int(web_file.headers.get('content-length', -1))\n        if os.path.exists(destination):\n            file_time = time.gmtime(os.path.getmtime(destination))\n            file_size = os.path.getsize(destination)\n            if file_time >= web_file_time and file_size == web_file_size:\n                return False\n\n    except Exception as ex:\n        msg = ('Fail checking if remote file is modified default returns TRUE'\n               ' - {}'.format(ex))\n        logger.debug(msg)\n\n    return True", "docstring": "Check if online file has been modified.\nArgs:\n:web_file: online file to check.\n:destination: path of the offline file to compare.", "source": "juraj-google-style"}
{"code": "def add_where_when(voevent, coords, obs_time, observatory_location, allow_tz_naive_datetime=False):\n    if (obs_time.tzinfo is not None):\n        utc_naive_obs_time = obs_time.astimezone(pytz.utc).replace(tzinfo=None)\n    elif (not allow_tz_naive_datetime):\n        raise ValueError(\"Datetime passed without tzinfo, cannot be sure if it is really a UTC timestamp. Please verify function call and either add tzinfo or pass parameter 'allow_tz_naive_obstime=True', as appropriate\")\n    else:\n        utc_naive_obs_time = obs_time\n    obs_data = etree.SubElement(voevent.WhereWhen, 'ObsDataLocation')\n    etree.SubElement(obs_data, 'ObservatoryLocation', id=observatory_location)\n    ol = etree.SubElement(obs_data, 'ObservationLocation')\n    etree.SubElement(ol, 'AstroCoordSystem', id=coords.system)\n    ac = etree.SubElement(ol, 'AstroCoords', coord_system_id=coords.system)\n    time = etree.SubElement(ac, 'Time', unit='s')\n    instant = etree.SubElement(time, 'TimeInstant')\n    instant.ISOTime = utc_naive_obs_time.isoformat()\n    pos2d = etree.SubElement(ac, 'Position2D', unit=coords.units)\n    pos2d.Name1 = 'RA'\n    pos2d.Name2 = 'Dec'\n    pos2d_val = etree.SubElement(pos2d, 'Value2')\n    pos2d_val.C1 = coords.ra\n    pos2d_val.C2 = coords.dec\n    pos2d.Error2Radius = coords.err", "docstring": "Add details of an observation to the WhereWhen section.\n\nWe\n\nArgs:\nvoevent(:class:`Voevent`): Root node of a VOEvent etree.\ncoords(:class:`.Position2D`): Sky co-ordinates of event.\nobs_time(datetime.datetime): Nominal DateTime of the observation. Must\neither be timezone-aware, or should be carefully verified as\nrepresenting UTC and then set parameter\n``allow_tz_naive_datetime=True``.\nobservatory_location(str): Telescope locale, e.g. 'La Palma'.\nMay be a generic location as listed under\n:class:`voeventparse.definitions.observatory_location`.\nallow_tz_naive_datetime (bool): (Default False). Accept timezone-naive\ndatetime-timestamps. See comments for ``obs_time``.", "source": "codesearchnet"}
{"code": "def __request_message_descriptor(self, request_kind, message_type, method_id,\n                                   path):\n    \n    descriptor = {}\n\n    params, param_order = self.__params_descriptor(message_type, request_kind,\n                                                   path, method_id)\n\n    if isinstance(message_type, resource_container.ResourceContainer):\n      message_type = message_type.body_message_class()\n\n    if (request_kind == self.__NO_BODY or\n        message_type == message_types.VoidMessage()):\n      descriptor['body'] = 'empty'\n    else:\n      descriptor['body'] = 'autoTemplate(backendRequest)'\n      descriptor['bodyName'] = 'resource'\n      self.__request_schema[method_id] = self.__parser.add_message(\n          message_type.__class__)\n\n    if params:\n      descriptor['parameters'] = params\n\n    if param_order:\n      descriptor['parameterOrder'] = param_order\n\n    return descriptor", "docstring": "Describes the parameters and body of the request.\n\nArgs:\nrequest_kind: The type of request being made.\nmessage_type: messages.Message or ResourceContainer class. The message to\ndescribe.\nmethod_id: string, Unique method identifier (e.g. 'myapi.items.method')\npath: string, HTTP path to method.\n\nReturns:\nDictionary describing the request.\n\nRaises:\nValueError: if the method path and request required fields do not match", "source": "juraj-google-style"}
{"code": "def mean(values: Sequence[Union[int, float, None]]) -> Optional[float]:\n    \n    total = 0.0  \n    n = 0\n    for x in values:\n        if x is not None:\n            total += x\n            n += 1\n    return total / n if n > 0 else None", "docstring": "Returns the mean of a list of numbers.\n\nArgs:\nvalues: values to mean, ignoring any values that are ``None``\n\nReturns:\nthe mean, or ``None`` if :math:`n = 0`", "source": "juraj-google-style"}
{"code": "def forward(self, X):\n    return (self.W(X).sum(dim=1) + self.b)", "docstring": "Execute sparse linear layer\n\nArgs:\nX: an [n, h] torch.LongTensor containing up to h indices of features\nwhose weights should be looked up and used in a sparse linear\nmultiplication.", "source": "codesearchnet"}
{"code": "def __init__(self, parameter_name, value, allowed_values):\n    \n    super(EnumRejectionError, self).__init__(parameter_name, value)\n    self.allowed_values = allowed_values", "docstring": "Constructor for EnumRejectionError.\n\nArgs:\nparameter_name: String; the name of the enum parameter which had a value\nrejected.\nvalue: The actual value passed in for the enum. Usually string.\nallowed_values: List of strings allowed for the enum.", "source": "juraj-google-style"}
{"code": "def x_www_form_urlencoded(post_data):\n    \n    if isinstance(post_data, dict):\n        return \"&\".join([\n            u\"{}={}\".format(key, value)\n            for key, value in post_data.items()\n        ])\n    else:\n        return post_data", "docstring": "convert origin dict to x-www-form-urlencoded\n\nArgs:\npost_data (dict):\n{\"a\": 1, \"b\":2}\n\nReturns:\nstr:\na=1&b=2", "source": "juraj-google-style"}
{"code": "def get_build_output(self, process):\n        \n\n        while True:\n            output = process.stdout.readline()\n            if output == b'' and process.poll() is not None:\n                if process.returncode > 0:\n                    raise Exception(\"Compilation ended with an error\"\n                                    \".\\nSTDERR\\n%s\\nSTDOUT\\n%s\" %\n                                    (process.stderr.read(),\n                                     process.stdout.read()))\n                return\n            if output:\n                \n                \n                \n                matches = re.search(r'\\[\\s*(\\d+?)/(\\d+)\\].*',\n                                    output.strip().decode('utf-8'))\n                if matches is not None:\n                    yield [int(matches.group(1)), int(matches.group(2))]", "docstring": "Parse the output of the ns-3 build process to extract the information\nthat is needed to draw the progress bar.\n\nArgs:\nprocess: the subprocess instance to listen to.", "source": "juraj-google-style"}
{"code": "def add_gene_ids(self, genes_list):\n    orig_num_genes = len(self.genes)\n    for g in list(set(genes_list)):\n        if (not self.genes.has_id(g)):\n            new_gene = GenePro(id=g, pdb_file_type=self.pdb_file_type, root_dir=self.genes_dir)\n            if self.model:\n                self.model.genes.append(new_gene)\n            else:\n                self.genes.append(new_gene)\n    log.info('Added {} genes to GEM-PRO project'.format((len(self.genes) - orig_num_genes)))", "docstring": "Add gene IDs manually into the GEM-PRO project.\n\nArgs:\ngenes_list (list): List of gene IDs as strings.", "source": "codesearchnet"}
{"code": "def GetVolumeIdentifiers(self, volume_system):\n    \n    volume_identifiers = []\n    for volume in volume_system.volumes:\n      volume_identifier = getattr(volume, 'identifier', None)\n      if volume_identifier:\n        volume_identifiers.append(volume_identifier)\n\n    return sorted(volume_identifiers)", "docstring": "Retrieves the volume identifiers.\n\nArgs:\nvolume_system (VolumeSystem): volume system.\n\nReturns:\nlist[str]: sorted volume identifiers.", "source": "juraj-google-style"}
{"code": "def to_dict(self, drop_null=True, camel=False):\n        \n        \n        def to_dict(obj, drop_null, camel):\n            \n            if isinstance(obj, (Body, BodyChild)):\n                obj = obj.__dict__\n            if isinstance(obj, dict):\n                data = {}\n                for attr, val in six.iteritems(obj):\n                    if camel:\n                        attr = _snake_to_camel(attr)\n                    valid_null = (isinstance(val, bool) or val == 0 or\n                                  (val and to_dict(val, drop_null, camel)))\n                    if not drop_null or (drop_null and valid_null):\n                        data[attr] = to_dict(val, drop_null, camel)\n                return data\n            elif isinstance(obj, list):\n                data = []\n                for val in obj:\n                    valid_null = (isinstance(val, bool) or val == 0 or\n                                  (val and to_dict(val, drop_null, camel)))\n                    if not drop_null or (drop_null and valid_null):\n                        data.append(to_dict(val, drop_null, camel))\n                return data\n            else:\n                return obj\n        return to_dict(self, drop_null, camel)", "docstring": "Serialize self as dict.\n\nArgs:\ndrop_null: bool, default True. Remove 'empty' attributes.\ncamel: bool, default True. Convert keys to camelCase.\n\nReturn:\ndict: object params.", "source": "juraj-google-style"}
{"code": "def bounds(self, thr=0, lower_index=0, upper_index=-1):\n        \n        points = self.points[lower_index:upper_index]\n\n        min_lat = float(\"inf\")\n        min_lon = float(\"inf\")\n        max_lat = -float(\"inf\")\n        max_lon = -float(\"inf\")\n\n        for point in points:\n            min_lat = min(min_lat, point.lat)\n            min_lon = min(min_lon, point.lon)\n            max_lat = max(max_lat, point.lat)\n            max_lon = max(max_lon, point.lon)\n\n        return (min_lat - thr, min_lon - thr, max_lat + thr, max_lon + thr)", "docstring": "Computes the bounds of the segment, or part of it\n\nArgs:\nlower_index (int, optional): Start index. Defaults to 0\nupper_index (int, optional): End index. Defaults to 0\nReturns:\n:obj:`tuple` of :obj:`float`: Bounds of the (sub)segment, such that\n(min_lat, min_lon, max_lat, max_lon)", "source": "juraj-google-style"}
{"code": "def build_prefixes(namespaces=None):\n    \n    if namespaces is None:\n        namespaces = [\n            ('bf', str(BIBFRAME)),\n            ('schema', str(SCHEMA_ORG))\n        ]\n    output = \"PREFIX {}: <{}>\\n\".format(\n        namespaces[0][0],\n        namespaces[0][1])\n    if len(namespaces) == 1:\n        return output\n    else:\n        for namespace in namespaces[1:]:\n            output += \"PREFIX  {}: <{}>\\n\".format(namespace[0], namespace[1])\n    return output", "docstring": "Internal function takes a list of prefix, namespace uri tuples and\ngenerates a SPARQL PREFIX string.\n\nArgs:\nnamespaces(list): List of tuples, defaults to BIBFRAME and\nSchema.org\n\nReturns:\nstring", "source": "juraj-google-style"}
{"code": "def NgramScorer(frequency_map):\n    length = len(next(iter(frequency_map)))\n    floor = math.log10((0.01 / sum(frequency_map.values())))\n    ngrams = frequency.frequency_to_probability(frequency_map, decorator=math.log10)\n\n    def inner(text):\n        text = ''.join(text)\n        text = remove(text.upper(), (string.whitespace + string.punctuation))\n        return sum((ngrams.get(ngram, floor) for ngram in iterate_ngrams(text, length)))\n    return inner", "docstring": "Compute the score of a text by using the frequencies of ngrams.\n\nExample:\n>>> fitness = NgramScorer(english.unigrams)\n>>> fitness(\"ABC\")\n-4.3622319742618245\n\nArgs:\nfrequency_map (dict): ngram to frequency mapping", "source": "codesearchnet"}
{"code": "def union(self, other):\n    union = Rect()\n    lib.SDL_UnionRect(self._ptr, other._ptr, union._ptr)\n    return union", "docstring": "Calculate the union of this rectangle and another rectangle.\n\nArgs:\nother (Rect): The other rectangle.\n\nReturns:\nRect: The union of this rectangle and the given other rectangle.", "source": "codesearchnet"}
{"code": "def interpolate(self, date, method=None, order=None):\n        \n\n        if not self.start <= date <= self.stop:\n            raise ValueError(\"Date '%s' not in range\" % date)\n\n        prev_idx = 0\n        ephem = self\n\n        \n        while True:\n            idx = len(ephem)\n            if idx == 1:\n                break\n            k = idx \n\n            if date > ephem[k].date:\n                prev_idx += k\n                ephem = ephem[k:]\n            else:\n                ephem = ephem[:k]\n\n        method = method if method is not None else self.method\n        order = order if order is not None else self.order\n\n        if method == self.LINEAR:\n\n            y0 = self[prev_idx]\n            y1 = self[prev_idx + 1]\n\n            result = y0[:] + (y1[:] - y0[:]) * (date.mjd - y0.date.mjd) / (y1.date.mjd - y0.date.mjd)\n\n        elif method == self.LAGRANGE:\n\n            stop = prev_idx + 1 + order \n            start = prev_idx - order \n            if stop >= len(self):\n                start -= stop - len(self)\n            elif start < 0:\n                stop -= start\n                start = 0\n\n            \n            subset = self[start:stop]\n            date_subset = np.array([x.date.mjd for x in subset])\n\n            result = np.zeros(6)\n\n            \n            \n            \n            \n            \n            \n            \n            \n            for j in range(order):\n                \n                mask = date_subset != date_subset[j]\n                l_j = (date.mjd - date_subset[mask]) / (date_subset[j] - date_subset[mask])\n                result = result + l_j.prod() * subset[j]\n\n        else:\n            raise ValueError(\"Unkown interpolation method\", method)\n\n        orb = ephem[0]\n\n        return orb.__class__(date, result, orb.form, orb.frame, orb.propagator)", "docstring": "Interpolate data at a given date\n\nArgs:\ndate (Date):\nmethod (str): Method of interpolation to use\norder (int): In case of ``LAGRANGE`` method is used\nReturn:\nOrbit:", "source": "juraj-google-style"}
{"code": "def prepare_sample_weight_modes(training_endpoints, sample_weight_mode):\n    if isinstance(sample_weight_mode, collections.abc.Mapping):\n        generic_utils.check_for_unexpected_keys('sample_weight_mode', sample_weight_mode, [e.output_name for e in training_endpoints])\n        for end_point in training_endpoints:\n            if not end_point.should_skip_target_weights():\n                if end_point.output_name not in sample_weight_mode:\n                    raise ValueError('Output ' + end_point.output_name + 'missing from `_sample_weight_modes` dictionary')\n                else:\n                    end_point.sample_weight_mode = sample_weight_mode.get(end_point.output_name)\n    elif isinstance(sample_weight_mode, (list, tuple)):\n        if len(sample_weight_mode) != len(training_endpoints):\n            raise ValueError('When passing a list as sample_weight_mode, it should have one entry per model output. The model has ' + str(len(training_endpoints)) + ' outputs, but you passed ' + str(len(sample_weight_mode)) + '_sample_weight_modes.')\n        for mode, endpoint in zip(sample_weight_mode, training_endpoints):\n            if not endpoint.should_skip_target_weights():\n                endpoint.sample_weight_mode = mode\n    else:\n        for endpoint in training_endpoints:\n            if not endpoint.should_skip_target_weights():\n                endpoint.sample_weight_mode = sample_weight_mode", "docstring": "Prepares sample weight modes for the model.\n\nArgs:\ntraining_endpoints: List of model _TrainingEndpoints.\nsample_weight_mode: sample weight mode user input passed from compile API.\n\nRaises:\nValueError: In case of invalid `sample_weight_mode` input.", "source": "github-repos"}
{"code": "def equal(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return Equal().symbolic_call(x1, x2)\n    return backend.numpy.equal(x1, x2)", "docstring": "Returns `(x1 == x2)` element-wise.\n\nArgs:\nx1: Tensor to compare.\nx2: Tensor to compare.\n\nReturns:\nOutput tensor, element-wise comparison of `x1` and `x2`.", "source": "github-repos"}
{"code": "def exists(path):\n    path = _normalize_dir(path)\n    sysPath = get_path()\n    return (path.lower() in (x.lower() for x in sysPath))", "docstring": "Check if the directory is configured in the SYSTEM path\nCase-insensitive and ignores trailing backslash\n\nReturns:\nboolean True if path exists, False if not\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_path.exists 'c:\\\\python27'\nsalt '*' win_path.exists 'c:\\\\python27\\\\'\nsalt '*' win_path.exists 'C:\\\\pyThon27'", "source": "codesearchnet"}
{"code": "def consume_socket_output(frames, demux=False):\n    \n    if demux is False:\n        \n        \n        return six.binary_type().join(frames)\n\n    \n    \n    out = [None, None]\n    for frame in frames:\n        \n        \n        assert frame != (None, None)\n        if frame[0] is not None:\n            if out[0] is None:\n                out[0] = frame[0]\n            else:\n                out[0] += frame[0]\n        else:\n            if out[1] is None:\n                out[1] = frame[1]\n            else:\n                out[1] += frame[1]\n    return tuple(out)", "docstring": "Iterate through frames read from the socket and return the result.\n\nArgs:\n\ndemux (bool):\nIf False, stdout and stderr are multiplexed, and the result is the\nconcatenation of all the frames. If True, the streams are\ndemultiplexed, and the result is a 2-tuple where each item is the\nconcatenation of frames belonging to the same stream.", "source": "juraj-google-style"}
{"code": "def getfullargspec(obj):\n    decorators, target = tf_decorator.unwrap(obj)\n    for d in decorators:\n        if d.decorator_argspec is not None:\n            return _convert_maybe_argspec_to_fullargspec(d.decorator_argspec)\n    return _getfullargspec(target)", "docstring": "TFDecorator-aware replacement for `inspect.getfullargspec`.\n\nThis wrapper emulates `inspect.getfullargspec` in[^)]* Python2.\n\nArgs:\nobj: A callable, possibly decorated.\n\nReturns:\nThe `FullArgSpec` that describes the signature of\nthe outermost decorator that changes the callable's signature. If the\ncallable is not decorated, `inspect.getfullargspec()` will be called\ndirectly on the callable.", "source": "github-repos"}
{"code": "def wait(self, timeout_s: float=None) -> int:\n    if (not self.running):\n        return 0\n    retcode = self.process.wait(timeout=timeout_s)\n    if (retcode is None):\n        self.error('Subprocess finished, but return code was None')\n        retcode = 1\n    elif (retcode == 0):\n        self.info('Subprocess finished cleanly (return code 0).')\n    else:\n        self.error('Subprocess finished, but FAILED (return code {}). Logs were: {} (stdout), {} (stderr)'.format(retcode, self.details.logfile_out, self.details.logfile_err))\n    self.running = False\n    return retcode", "docstring": "Wait for up to ``timeout_s`` for the child process to finish.\n\nArgs:\ntimeout_s: maximum time to wait or ``None`` to wait forever\n\nReturns:\nprocess return code; or ``0`` if it wasn't running, or ``1`` if\nit managed to exit without a return code\n\nRaises:\nsubprocess.TimeoutExpired: if the process continues to run", "source": "codesearchnet"}
{"code": "def read(keypath, configfile=None):\n    if (configfile in _configs):\n        appconfig = _configs[configfile]\n    else:\n        appconfig = AppConfig(configfile=configfile)\n        _configs[configfile] = appconfig\n    return appconfig.read(keypath)", "docstring": "Reads a value from the configuration file.\n\nArgs:\nkeypath: str\nSpecifies the key for which the value is desired.  It can be a\nhierarchical path.  Example: \"section1.subsection.key1\"\nconfigfile: str\nPath to the config file to read.  Defaults to None, in which case\nthe application's default config file is used.\n\nReturns:\nvalue from configuration file", "source": "codesearchnet"}
{"code": "def create_config(config_path='scriptworker.yaml'):\n    if (not os.path.exists(config_path)):\n        print(\"{} doesn't exist! Exiting...\".format(config_path), file=sys.stderr)\n        sys.exit(1)\n    with open(config_path, 'r', encoding='utf-8') as fh:\n        secrets = safe_load(fh)\n    config = dict(deepcopy(DEFAULT_CONFIG))\n    if (not secrets.get('credentials')):\n        secrets['credentials'] = read_worker_creds()\n    config.update(secrets)\n    apply_product_config(config)\n    messages = check_config(config, config_path)\n    if messages:\n        print('\\n'.join(messages), file=sys.stderr)\n        print('Exiting...', file=sys.stderr)\n        sys.exit(1)\n    credentials = get_frozen_copy(secrets['credentials'])\n    del config['credentials']\n    config = get_frozen_copy(config)\n    return (config, credentials)", "docstring": "Create a config from DEFAULT_CONFIG, arguments, and config file.\n\nThen validate it and freeze it.\n\nArgs:\nconfig_path (str, optional): the path to the config file.  Defaults to\n\"scriptworker.yaml\"\n\nReturns:\ntuple: (config frozendict, credentials dict)\n\nRaises:\nSystemExit: on failure", "source": "codesearchnet"}
{"code": "def _create_array(self, arr: np.ndarray) -> int:\n    if (not isinstance(arr, np.ndarray)):\n        raise ValueError('Array is not a numpy ndarray.')\n    try:\n        c_arr = np.ctypeslib.as_ctypes(arr)\n    except (KeyError, NotImplementedError):\n        raise ValueError('Array has unsupported dtype {}.'.format(arr.dtype))\n    raw_arr = RawArray(c_arr._type_, c_arr)\n    with self._lock:\n        if (self._count >= len(self._arrays)):\n            self._arrays += (len(self._arrays) * [None])\n        self._get_next_free()\n        self._arrays[self._current] = (raw_arr, arr.shape)\n        self._count += 1\n    return self._current", "docstring": "Returns the handle of a RawArray created from the given numpy array.\n\nArgs:\narr: A numpy ndarray.\n\nReturns:\nThe handle (int) of the array.\n\nRaises:\nValueError: if arr is not a ndarray or of an unsupported dtype. If\nthe array is of an unsupported type, using a view of the array to\nanother dtype and then converting on get is often a work around.", "source": "codesearchnet"}
{"code": "def save_as(self, new_filename):\n        \n\n        xfile._save_file(self._filename, self._datasourceTree, new_filename)", "docstring": "Save our file with the name provided.\n\nArgs:\nnew_filename:  New name for the workbook file. String.\n\nReturns:\nNothing.", "source": "juraj-google-style"}
{"code": "def add_index_argument(cls, group):\n        \n        prefix = cls.argument_prefix\n\n        group.add_argument(\n            '--%s-index' % prefix, action=\"store\",\n            dest=\"%s_index\" % prefix,\n            help=(\"Name of the %s root markdown file, can be None\" % (\n                cls.extension_name)))", "docstring": "Subclasses may call this to add an index argument.\n\nArgs:\ngroup: arparse.ArgumentGroup, the extension argument group\nprefix: str, arguments have to be namespaced", "source": "juraj-google-style"}
{"code": "def __init__(self, transport, maxdata, remote_banner):\n    \n    try:\n      self.systemtype, self.serial, self.banner = remote_banner.split(':', 2)\n    except ValueError:\n      raise usb_exceptions.AdbProtocolError('Received malformed banner %s',\n                                            remote_banner)\n    self.transport = transport\n    self.maxdata = maxdata\n    self._last_id_used = 0\n    self._reader_lock = threading.Lock()\n    self._open_lock = threading.Lock()\n    \n    self._stream_transport_map = {}\n    self._stream_transport_map_lock = threading.RLock()", "docstring": "Create an ADB connection to a device.\n\nArgs:\ntransport: AdbTransportAdapter to use for reading/writing AdbMessages\nmaxdata: Max data size the remote endpoint will accept.\nremote_banner: Banner received from the remote endpoint.", "source": "juraj-google-style"}
{"code": "def supports_ansi_escape_codes(fd):\n    \n\n    if os.isatty(fd):\n        return True\n\n    if not is_win:\n        return False\n\n    \n    handle = winapi._get_osfhandle(fd)\n    if handle == winapi.INVALID_HANDLE_VALUE:\n        return False\n\n    if winapi.GetFileType(handle) != winapi.FILE_TYPE_PIPE:\n        return False\n\n    file_name = _get_file_name_for_handle(handle)\n    match = re.match(\n        \"^\\\\\\\\(cygwin|msys)-[a-z0-9]+-pty[0-9]+-(from|to)-master$\", file_name)\n    return match is not None", "docstring": "Returns whether the output device is capable of interpreting ANSI escape\ncodes when :func:`print_` is used.\n\nArgs:\nfd (int): file descriptor (e.g. ``sys.stdout.fileno()``)\nReturns:\n`bool`", "source": "juraj-google-style"}
{"code": "def make_new(self, rev):\n    return self.vcs.make_rev_options(rev, extra_args=self.extra_args)", "docstring": "Make a copy of the current instance, but with a new rev.\n\nArgs:\nrev: the name of the revision for the new object.", "source": "codesearchnet"}
{"code": "def remove_words(self, words):\n        \n        for word in words:\n            self._dictionary.pop(word.lower())\n        self._update_dictionary()", "docstring": "Remove a list of words from the word frequency list\n\nArgs:\nwords (list): The list of words to remove", "source": "juraj-google-style"}
{"code": "def summarize_mean_in_nats_and_bits(inputs, units, name, nats_name_scope='nats', bits_name_scope='bits_per_dim'):\n    mean = tf.reduce_mean(input_tensor=inputs)\n    with tf.compat.v1.name_scope(nats_name_scope):\n        tf.compat.v2.summary.scalar(name, mean, step=tf.compat.v1.train.get_or_create_global_step())\n    with tf.compat.v1.name_scope(bits_name_scope):\n        tf.compat.v2.summary.scalar(name, ((mean / units) / tf.math.log(2.0)), step=tf.compat.v1.train.get_or_create_global_step())", "docstring": "Summarize the mean of a tensor in nats and bits per unit.\n\nArgs:\ninputs: A tensor of values measured in nats.\nunits: The units of the tensor with which to compute the mean bits\nper unit.\nname: The name of the tensor.\nnats_name_scope: The name scope of the nats summary.\nbits_name_scope: The name scope of the bits summary.", "source": "codesearchnet"}
{"code": "def tscore(sample1, sample2):\n    if (len(sample1) != len(sample2)):\n        raise ValueError('different number of values')\n    error = (pooled_sample_variance(sample1, sample2) / len(sample1))\n    diff = (statistics.mean(sample1) - statistics.mean(sample2))\n    return (diff / math.sqrt((error * 2)))", "docstring": "Calculate a t-test score for the difference between two samples.\n\nArgs:\nsample1: one sample.\nsample2: the other sample.\n\nReturns:\nThe t-test score, as a float.", "source": "codesearchnet"}
{"code": "def GetMerger(self, cls):\n    \n    for merger in self._mergers:\n      if isinstance(merger, cls):\n        return merger\n    raise LookupError('No matching DataSetMerger found')", "docstring": "Looks for an added DataSetMerger derived from the given class.\n\nArgs:\ncls: A class derived from DataSetMerger.\n\nReturns:\nThe matching DataSetMerger instance.\n\nRaises:\nLookupError: No matching DataSetMerger has been added.", "source": "juraj-google-style"}
{"code": "def quote(self, data):\n    if (self.lang == 'python'):\n        quote_char = \"'\"\n    elif (self.lang == 'java'):\n        quote_char = \"'\"\n    if re.findall('[!\\\\-\\\\=\\\\s\\\\$\\\\&]{1,}', str(data)):\n        data = '{0}{1}{0}'.format(quote_char, data)\n    return data", "docstring": "Quote any parameters that contain spaces or special character.\n\nReturns:\n(string): String containing parameters wrapped in double quotes", "source": "codesearchnet"}
{"code": "def set_label_list(self, label_lists):\n    if isinstance(label_lists, annotations.LabelList):\n        label_lists = [label_lists]\n    for label_list in label_lists:\n        if (label_list.idx is None):\n            label_list.idx = 'default'\n        label_list.utterance = self\n        self.label_lists[label_list.idx] = label_list", "docstring": "Set the given label-list for this utterance.\nIf the label-list-idx is not set, ``default`` is used.\nIf there is already a label-list with the given idx,\nit will be overriden.\n\nArgs:\nlabel_list (LabelList, list): A single or multi. label-lists to add.", "source": "codesearchnet"}
{"code": "async def anext(*args):\n    if (not args):\n        raise TypeError('anext() expected at least 1 arguments, got 0')\n    if (len(args) > 2):\n        raise TypeError('anext() expected at most 2 arguments, got {}'.format(len(args)))\n    (iterable, default, has_default) = (args[0], None, False)\n    if (len(args) == 2):\n        (iterable, default) = args\n        has_default = True\n    try:\n        return (await iterable.__anext__())\n    except StopAsyncIteration as exc:\n        if has_default:\n            return default\n        raise StopAsyncIteration() from exc", "docstring": "Return the next item from an async iterator.\n\nArgs:\niterable: An async iterable.\ndefault: An optional default value to return if the iterable is empty.\n\nReturn:\nThe next value of the iterable.\n\nRaises:\nTypeError: The iterable given is not async.\n\nThis function will return the next value form an async iterable. If the\niterable is empty the StopAsyncIteration will be propogated. However, if\na default value is given as a second argument the exception is silenced and\nthe default value is returned instead.", "source": "codesearchnet"}
{"code": "def list_attributes(self):\n\n    def _row_gen(attributes):\n        for attr in attributes.values():\n            (yield (attr.name, attr.display_name, attr.description))\n    return pd.DataFrame.from_records(_row_gen(self.attributes), columns=['name', 'display_name', 'description'])", "docstring": "Lists available attributes in a readable DataFrame format.\n\nReturns:\npd.DataFrame: Frame listing available attributes.", "source": "codesearchnet"}
{"code": "def get_stacks(self):\n    if (not hasattr(self, '_stacks')):\n        stacks = []\n        definitions = self._get_stack_definitions()\n        for stack_def in definitions:\n            stack = Stack(definition=stack_def, context=self, mappings=self.mappings, force=(stack_def.name in self.force_stacks), locked=stack_def.locked, enabled=stack_def.enabled, protected=stack_def.protected)\n            stacks.append(stack)\n        self._stacks = stacks\n    return self._stacks", "docstring": "Get the stacks for the current action.\n\nHandles configuring the :class:`stacker.stack.Stack` objects that will\nbe used in the current action.\n\nReturns:\nlist: a list of :class:`stacker.stack.Stack` objects", "source": "codesearchnet"}
{"code": "def analyze_one_classification_result(storage_client, file_path, adv_batch, dataset_batches, dataset_meta):\n    class_result = read_classification_results(storage_client, file_path)\n    if (class_result is None):\n        return (0, 0, 0, 0)\n    adv_images = adv_batch['images']\n    dataset_batch_images = dataset_batches.data[adv_batch['dataset_batch_id']]['images']\n    count_correctly_classified = 0\n    count_errors = 0\n    count_hit_target_class = 0\n    num_images = 0\n    for (adv_img_id, label) in iteritems(class_result):\n        if (adv_img_id not in adv_images):\n            continue\n        num_images += 1\n        clean_image_id = adv_images[adv_img_id]['clean_image_id']\n        dataset_image_id = dataset_batch_images[clean_image_id]['dataset_image_id']\n        if (label == dataset_meta.get_true_label(dataset_image_id)):\n            count_correctly_classified += 1\n        else:\n            count_errors += 1\n        if (label == dataset_meta.get_target_class(dataset_image_id)):\n            count_hit_target_class += 1\n    return (count_correctly_classified, count_errors, count_hit_target_class, num_images)", "docstring": "Reads and analyzes one classification result.\n\nThis method reads file with classification result and counts\nhow many images were classified correctly and incorrectly,\nhow many times target class was hit and total number of images.\n\nArgs:\nstorage_client: instance of CompetitionStorageClient\nfile_path: result file path\nadv_batch: AversarialBatches.data[adv_batch_id]\nadv_batch_id is stored in each ClassificationBatch entity\ndataset_batches: instance of DatasetBatches\ndataset_meta: instance of DatasetMetadata\n\nReturns:\nTuple of (count_correctly_classified, count_errors, count_hit_target_class,\nnum_images)", "source": "codesearchnet"}
{"code": "def unzip_columns(expr, column_types):\n    \n    weld_obj = WeldObject(encoder_, decoder_)\n    column_appenders = []\n    struct_fields = []\n    result_fields = []\n    for i, column_type in enumerate(column_types):\n        column_appenders.append(\"appender[%s]\" % column_type)\n        struct_fields.append(\"merge(b.$%s, e.$%s)\" % (i, i))\n        result_fields.append(\"result(unzip_builder.$%s)\" % i)\n    appender_string = \"{%s}\" % \", \".join(column_appenders)\n    struct_string = \"{%s}\" % \", \".join(struct_fields)\n    result_string = \"{%s}\" % \", \".join(result_fields)\n    expr_var = weld_obj.update(expr)\n    if isinstance(expr, WeldObject):\n        expr_var = expr.obj_id\n        weld_obj.dependencies[expr_var] = expr\n\n    weld_template = \n\n    weld_obj.weld_code = weld_template % {\"expr\": expr_var,\n                                          \"appenders\": appender_string,\n                                          \"struct_builder\": struct_string,\n                                          \"result\": result_string}\n    return weld_obj", "docstring": "Zip together multiple columns.\n\nArgs:\ncolumns (WeldObject / Numpy.ndarray): lust of columns\n\nReturns:\nA WeldObject representing this computation", "source": "juraj-google-style"}
{"code": "def save_plot(code, elem):\n    if ('plt' in elem.attributes):\n        (figurewidth, figureheight) = elem.attributes['plt'].split(',')\n    else:\n        try:\n            figureheight = elem.attributes['height']\n        except KeyError:\n            figureheight = '4cm'\n        try:\n            figurewidth = elem.attributes['width']\n        except KeyError:\n            figurewidth = '6cm'\n    return f", "docstring": "Converts matplotlib plots to tikz code.\n\nIf elem has either the plt attribute (format: plt=width,height) or the\nattributes width=width and/or height=height, the figurewidth and -height\nare set accordingly. If none are given, a height of 4cm and a width of 6cm\nis used as default.\n\nArgs:\ncode: The matplotlib code.\nelem: The element.\n\nReturns:\nThe code and some code to invoke matplotlib2tikz.", "source": "codesearchnet"}
{"code": "def _ParseDateTimeValue(self, parser_mediator, date_time_value):\n    \n    if date_time_value[14] != 'Z':\n      parser_mediator.ProduceExtractionWarning(\n          'invalid date and time value: {0!s}'.format(date_time_value))\n      return None\n\n    try:\n      year = int(date_time_value[0:4], 10)\n      month = int(date_time_value[4:6], 10)\n      day_of_month = int(date_time_value[6:8], 10)\n      hours = int(date_time_value[8:10], 10)\n      minutes = int(date_time_value[10:12], 10)\n      seconds = int(date_time_value[12:14], 10)\n    except (TypeError, ValueError):\n      parser_mediator.ProduceExtractionWarning(\n          'invalid date and time value: {0!s}'.format(date_time_value))\n      return None\n\n    time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)\n\n    try:\n      return dfdatetime_time_elements.TimeElements(\n          time_elements_tuple=time_elements_tuple)\n    except ValueError:\n      parser_mediator.ProduceExtractionWarning(\n          'invalid date and time value: {0!s}'.format(date_time_value))\n      return None", "docstring": "Parses a date time value.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ndate_time_value (str): date time value\n(CSSM_DB_ATTRIBUTE_FORMAT_TIME_DATE) in the format: \"YYYYMMDDhhmmssZ\".\n\nReturns:\ndfdatetime.TimeElements: date and time extracted from the value or None\nif the value does not represent a valid string.", "source": "juraj-google-style"}
{"code": "def get(self, ldap_dn):\n    self.base_dn = ldap_dn\n    self.sub_tree = BASE\n    return self.first()", "docstring": "Return an LDAP entry by DN\n\nArgs:\nldap_dn (str): LDAP DN", "source": "codesearchnet"}
{"code": "def get_both_blocks_sibling(self):\n    if (not self.is_both_blocks()):\n        return None\n    if (self.block.block_letter and (self.block.block_letter.upper() not in ['A', 'B'])):\n        return None\n    other_instances = EighthScheduledActivity.objects.filter(activity=self.activity, block__date=self.block.date)\n    for inst in other_instances:\n        if (inst == self):\n            continue\n        if (inst.block.block_letter in ['A', 'B']):\n            return inst\n    return None", "docstring": "If this is a both-blocks activity, get the other EighthScheduledActivity\nobject that occurs on the other block.\n\nboth_blocks means A and B block, NOT all of the blocks on that day.\n\nReturns:\nEighthScheduledActivity object if found\nNone if the activity cannot have a sibling\nFalse if not found", "source": "codesearchnet"}
{"code": "def on_train_end(self, logs=None):\n    logs = self._process_logs(logs)\n    for callback in self.callbacks:\n        callback.on_train_end(logs)", "docstring": "Calls the `on_train_end` methods of its callbacks.\n\nArgs:\nlogs: Dict. Currently no data is passed to this argument for this method\nbut that may change in the future.", "source": "github-repos"}
{"code": "def _distributed_apply(self, distribution, grads_and_vars, global_step=None, name=None):\n    name = name if name is not None else self.get_name()\n    grads = [g for g, _ in grads_and_vars]\n    loss_scale_update_op, should_apply_grads = self._loss_scale.update(grads)\n\n    def apply_fn():\n        return self._apply_gradients(distribution, grads_and_vars, global_step, name + '-wrapped')\n    maybe_apply_op = smart_cond.smart_cond(should_apply_grads, apply_fn, control_flow_ops.no_op)\n    return control_flow_ops.group(maybe_apply_op, loss_scale_update_op, name=name)", "docstring": "A version of `apply_gradients` for cross replica context.\n\nWhen users are in a cross replica strategy, they must call this rather than\n`apply_gradients()`.\n\nArgs:\ndistribution: a `DistributionStrategy` object.\ngrads_and_vars: List of (gradient, variable) pairs as returned by\n`compute_gradients()` and then aggregated across replicas.\nglobal_step: Optional (mirrored) `Variable` to increment by one after the\nvariables have been updated.\nname: Optional name for the returned operation. Default to the name passed\nto the `Optimizer` constructor.\n\nReturns:\nAn `Operation` that applies the specified gradients across all\nreplicas. If `global_step` was not None, that operation also\nincrements `global_step`", "source": "github-repos"}
{"code": "def _add_transitions(mcs, field_name, workflow, attrs, implems=None):\n    new_implems = ImplementationList(field_name, workflow)\n    if implems:\n        new_implems.load_parent_implems(implems)\n    new_implems.transform(attrs)\n    return new_implems", "docstring": "Collect and enhance transition definitions to a workflow.\n\nModifies the 'attrs' dict in-place.\n\nArgs:\nfield_name (str): name of the field transitions should update\nworkflow (Workflow): workflow we're working on\nattrs (dict): dictionary of attributes to be updated.\nimplems (ImplementationList): Implementation list from parent\nclasses (optional)\n\nReturns:\nImplementationList: The new implementation list for this field.", "source": "codesearchnet"}
{"code": "def get_without_ethernet(self, id_or_uri):\n    uri = (self._client.build_uri(id_or_uri) + '/withoutEthernet')\n    return self._client.get(uri)", "docstring": "Gets the logical downlink with the specified ID without ethernet.\n\nArgs:\nid_or_uri: Can be either the logical downlink id or the logical downlink uri.\n\nReturns:\ndict", "source": "codesearchnet"}
{"code": "def recipe_email_cm_to_bigquery(config, auth_read, email, subject, dataset, table, is_incremental_load):\n    email(config, {'auth': auth_read, 'read': {'from': 'noreply-cm@google.com', 'to': email, 'subject': subject, 'attachment': '.*'}, 'write': {'bigquery': {'dataset': dataset, 'table': table, 'header': True, 'is_incremental_load': is_incremental_load}}})", "docstring": "Pulls a CM Report from a gMail powered email account into BigQuery.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nemail (string) - Email address report was sent to.\nsubject (string) - Regular expression to match subject. Double escape backslashes.\ndataset (string) - Existing dataset in BigQuery.\ntable (string) - Name of table to be written to.\nis_incremental_load (boolean) - Append report data to table based on date column, de-duplicates.", "source": "github-repos"}
{"code": "def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, np.ndarray, List[dict]]:\n    doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs)\n    return (retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids))", "docstring": "Retrieves documents for specified `question_hidden_states`.\n\nArgs:\nquestion_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`):\nA batch of query vectors to retrieve with.\nn_docs (`int`):\nThe number of docs retrieved per query.\n\nReturn:\n`Tuple[np.ndarray, np.ndarray, List[dict]]`: A tuple with the following objects:\n\n- **retrieved_doc_embeds** (`np.ndarray` of shape `(batch_size, n_docs, dim)`) -- The retrieval embeddings\nof the retrieved docs per query.\n- **doc_ids** (`np.ndarray` of shape `(batch_size, n_docs)`) -- The ids of the documents in the index\n- **doc_dicts** (`List[dict]`): The `retrieved_doc_embeds` examples per query.", "source": "github-repos"}
{"code": "def get_children_of_type(typ, root):\n    \n\n    if type(typ) is not text:\n        typ = typ.__name__\n\n    return get_children(lambda x: x.__class__.__name__ == typ, root)", "docstring": "Returns a list of all model elements of type 'typ' starting from model\nelement 'root'. The search process will follow containment links only.\nNon-containing references shall not be followed.\n\nArgs:\ntyp(str or python class): The type of the model object we are\nlooking for.\nroot (model object): Python model object which is the start of the\nsearch process.", "source": "juraj-google-style"}
{"code": "def CopyToDict(self):\n    path_spec_dict = {}\n    for (attribute_name, attribute_value) in iter(self.__dict__.items()):\n        if (attribute_value is None):\n            continue\n        if (attribute_name == 'parent'):\n            attribute_value = attribute_value.CopyToDict()\n        path_spec_dict[attribute_name] = attribute_value\n    return path_spec_dict", "docstring": "Copies the path specification to a dictionary.\n\nReturns:\ndict[str, object]: path specification attributes.", "source": "codesearchnet"}
{"code": "def _guessEncoding(self, path):\n    if (os.path.exists(path) and path.lower().endswith('csv')):\n        encoding = None\n        if (encoding is not None):\n            if encoding.startswith('utf'):\n                encoding = encoding.replace('-', '')\n            encoding = encoding.replace('-', '_')\n            viewValue = _encodings.get(encoding)\n            self._encodingKey = encoding\n            index = self._encodingComboBox.findText(viewValue.upper())\n            self._encodingComboBox.setCurrentIndex(index)", "docstring": "Opens a file from the given `path` and checks the file encoding.\n\nThe file must exists on the file system and end with the extension\n`.csv`. The file is read line by line until the encoding could be\nguessed.\nOn a successfull identification, the widgets of this dialog will be\nupdated.\n\nArgs:\npath (string): Path to a csv file on the file system.", "source": "codesearchnet"}
{"code": "def get_plugin_apps(self):\n    return {_ACK_ROUTE: self._serve_ack, _COMM_ROUTE: self._serve_comm, _DEBUGGER_GRPC_HOST_PORT_ROUTE: self._serve_debugger_grpc_host_port, _DEBUGGER_GRAPH_ROUTE: self._serve_debugger_graph, _GATED_GRPC_ROUTE: self._serve_gated_grpc, _TENSOR_DATA_ROUTE: self._serve_tensor_data, _SOURCE_CODE_ROUTE: self._serve_source_code}", "docstring": "Obtains a mapping between routes and handlers.\n\nThis function also starts a debugger data server on separate thread if the\nplugin has not started one yet.\n\nReturns:\nA mapping between routes and handlers (functions that respond to\nrequests).", "source": "codesearchnet"}
{"code": "def move_test_classes_into_scope(wrapped_test_module):\n    for name, obj in wrapped_test_module.__dict__.items():\n        if _is_test_class(obj):\n            module_variables['tpu_test_imported_%s' % name] = obj", "docstring": "Add all test classes defined in wrapped module to our module.\n\nThe test runner works by inspecting the main module for TestCase classes, so\nby adding a module-level reference to the TestCase we cause it to execute the\nwrapped TestCase.\n\nArgs:\nwrapped_test_module: The user-provided test code to run.", "source": "github-repos"}
{"code": "def WMITimeStrToRDFDatetime(self, timestr):\n    \n    \n    \n    offset_minutes = timestr[21:]\n    year = timestr[:4]\n    month = timestr[4:6]\n    day = timestr[6:8]\n    hours = timestr[8:10]\n    minutes = timestr[10:12]\n    seconds = timestr[12:14]\n    microseconds = timestr[15:21]\n\n    unix_seconds = calendar.timegm(\n        tuple(map(int, [year, month, day, hours, minutes, seconds])))\n    unix_seconds -= int(offset_minutes) * 60\n    return rdfvalue.RDFDatetime(unix_seconds * 1e6 + int(microseconds))", "docstring": "Return RDFDatetime from string like 20140825162259.000000-420.\n\nArgs:\ntimestr: WMI time string\n\nReturns:\nrdfvalue.RDFDatetime\n\nWe have some timezone manipulation work to do here because the UTC offset is\nin minutes rather than +-HHMM", "source": "juraj-google-style"}
{"code": "def CreateTask(self, session_identifier):\n    task = tasks.Task(session_identifier)\n    logger.debug('Created task: {0:s}.'.format(task.identifier))\n    with self._lock:\n        self._tasks_queued[task.identifier] = task\n        self._total_number_of_tasks += 1\n        self.SampleTaskStatus(task, 'created')\n    return task", "docstring": "Creates a task.\n\nArgs:\nsession_identifier (str): the identifier of the session the task is\npart of.\n\nReturns:\nTask: task attribute container.", "source": "codesearchnet"}
{"code": "def _get_min_max_value_by_expanding_range(self, start_idx: int) -> tuple[float, float]:\n    mse_min = (float('inf'), float('inf'), float('inf'))\n    left, right = (start_idx, start_idx)\n    move_left = True\n    while not (left == 0 and right == self._num_bins - 1):\n        if move_left and left > 0 or right == self._num_bins - 1:\n            left = max(left - 1, 0)\n        else:\n            right = min(right + 1, self._num_bins - 1)\n        move_left = not move_left\n        quant_min, quant_max = (self._hist_mids[left], self._hist_mids[right])\n        mse_tuple = self._get_weighted_mean_squared_error(quant_min, quant_max)\n        mse_min = min(mse_tuple, mse_min)\n    min_value, max_value = (mse_min[1], mse_min[2])\n    return (min_value, max_value)", "docstring": "Starting from start_idx, expand left and right alternately to find the min value of mse loss.\n\nArgs:\nstart_idx: Index to start quantization.\n\nReturns:\n(min_value, max_value): Min and max calculated.", "source": "github-repos"}
{"code": "def log_combinations(n, counts, name='log_combinations'):\n    with tf.name_scope(name):\n        n = tf.convert_to_tensor(value=n, name='n')\n        counts = tf.convert_to_tensor(value=counts, name='counts')\n        total_permutations = tf.math.lgamma((n + 1))\n        counts_factorial = tf.math.lgamma((counts + 1))\n        redundant_permutations = tf.reduce_sum(input_tensor=counts_factorial, axis=[(- 1)])\n        return (total_permutations - redundant_permutations)", "docstring": "Multinomial coefficient.\n\nGiven `n` and `counts`, where `counts` has last dimension `k`, we compute\nthe multinomial coefficient as:\n\n```n! / sum_i n_i!```\n\nwhere `i` runs over all `k` classes.\n\nArgs:\nn: Floating-point `Tensor` broadcastable with `counts`. This represents `n`\noutcomes.\ncounts: Floating-point `Tensor` broadcastable with `n`. This represents\ncounts in `k` classes, where `k` is the last dimension of the tensor.\nname: A name for this operation (optional).\n\nReturns:\n`Tensor` representing the multinomial coefficient between `n` and `counts`.", "source": "codesearchnet"}
{"code": "def derive_field_name(self, field_name):\n    cls = type(self)\n    return cls(self[0], self[1], self[2], field_name, self[4], self[5])", "docstring": "Derives a new event from this one setting the ``field_name`` attribute.\n\nArgs:\nfield_name (Union[amazon.ion.symbols.SymbolToken, unicode]): The field name to set.\nReturns:\nIonEvent: The newly generated event.", "source": "codesearchnet"}
{"code": "def where_node_as_ldap(where, compiler, connection):\n    (bits, params) = ([], [])\n    for item in where.children:\n        if isinstance(item, WhereNode):\n            (clause, clause_params) = compiler.compile(item)\n        else:\n            (clause, clause_params) = item.as_sql(compiler, connection)\n        bits.append(clause)\n        params.extend(clause_params)\n    if (not bits):\n        return ('', [])\n    if (len(bits) == 1):\n        clause = bits[0]\n    elif (where.connector == AND):\n        clause = ('&' + ''.join((('(%s)' % bit) for bit in bits)))\n    elif (where.connector == OR):\n        clause = ('|' + ''.join((('(%s)' % bit) for bit in bits)))\n    else:\n        raise LdapDBError(('Unhandled WHERE connector: %s' % where.connector))\n    if where.negated:\n        clause = ('!(%s)' % clause)\n    return (clause, params)", "docstring": "Parse a django.db.models.sql.where.WhereNode.\n\nReturns:\n(clause, [params]): the filter clause, with a list of unescaped parameters.", "source": "codesearchnet"}
{"code": "def detect(self, filename, offset, standalone=False):\n    r = RawStruct(filename=filename, offset=(offset + SIG_OFFSET), length=SIG_SIZE)\n    oem_id = r.data\n    if (oem_id == b'NTFS    '):\n        return True\n    return False", "docstring": "Verifies NTFS filesystem signature.\n\nReturns:\nbool: True if filesystem signature at offset 0x03 \\\nmatches 'NTFS    ', False otherwise.", "source": "codesearchnet"}
{"code": "def print_res(data):\n    \n    print('===================================')\n    main_part = data['data']\n    print(main_part['word_name'])\n    symbols = main_part['symbols'][0]\n    print(\"美式音标：[\" + symbols['ph_am'] + \"]\")\n    print(\"英式音标：[\" + symbols['ph_en'] + \"]\")\n    print('-----------------------------------')\n    parts = symbols['parts']\n    for part in parts:\n        print(part['part'])\n        for mean in part['means']:\n            print(\"    \", mean)\n    print('===================================')", "docstring": "Print translate result in a better format\nArgs:\ndata(str): result", "source": "juraj-google-style"}
{"code": "def nb_cluster(data, k, P_init=None, R_init=None, assignments=None, means=None, max_iters=10):\n    (genes, cells) = data.shape\n    if (P_init is None):\n        P_init = np.random.random((genes, k))\n    if (R_init is None):\n        R_init = np.random.randint(1, data.max(), (genes, k))\n        R_init = R_init.astype(float)\n    if (assignments is None):\n        (_, assignments) = kmeans_pp(data, k, means)\n    means = np.zeros((genes, k))\n    old_assignments = np.copy(assignments)\n    for i in range(max_iters):\n        nb_gene_indices = fit_cluster(data, assignments, k, P_init, R_init, means)\n        lls = nb_ll(data[(nb_gene_indices, :)], P_init[(nb_gene_indices, :)], R_init[(nb_gene_indices, :)])\n        lls += pois_ll.poisson_ll(data[((~ nb_gene_indices), :)], means[((~ nb_gene_indices), :)])\n        P_init[((~ nb_gene_indices), :)] = 0\n        R_init[((~ nb_gene_indices), :)] = np.inf\n        for c in range(cells):\n            assignments[c] = np.argmax(lls[(c, :)])\n        if np.equal(assignments, old_assignments).all():\n            break\n        old_assignments = np.copy(assignments)\n    return (assignments, P_init, R_init)", "docstring": "Performs negative binomial clustering on the given data. If some genes have mean > variance, then these genes are fitted to a Poisson distribution.\n\nArgs:\ndata (array): genes x cells\nk (int): number of clusters\nP_init (array): NB success prob param - genes x k. Default: random\nR_init (array): NB stopping param - genes x k. Default: random\nassignments (array): cells x 1 array of integers 0...k-1. Default: kmeans-pp (poisson)\nmeans (array): initial cluster means (for use with kmeans-pp to create initial assignments). Default: None\nmax_iters (int): default: 100\n\nReturns:\nassignments (array): 1d array of length cells, containing integers 0...k-1\nP (array): genes x k - value is 0 for genes with mean > var\nR (array): genes x k - value is inf for genes with mean > var", "source": "codesearchnet"}
{"code": "def Encode(string, encoding=None):\n    del encoding\n    return string", "docstring": "Encode the text string to a byte string.\n\nArgs:\nstring: str, The text string to encode.\nencoding: The suggested encoding if known.\n\nReturns:\nstr, The binary string.", "source": "github-repos"}
{"code": "def get_mac_dot_app_dir(directory):\n    return os.path.dirname(os.path.dirname(os.path.dirname(directory)))", "docstring": "Returns parent directory of mac .app\n\nArgs:\n\ndirectory (str): Current directory\n\nReturns:\n\n(str): Parent directory of mac .app", "source": "codesearchnet"}
{"code": "def create_schema(host):\n    \n    connection = create_blocking_connection(host)\n    channel = connection.channel()\n\n    exchange = settings.get_amqp_settings()[host][\"exchange\"]\n    channel.exchange_declare(\n        exchange=exchange,\n        exchange_type=\"topic\",\n        durable=True\n    )\n    print \"Created exchange '%s'.\" % exchange\n    print \"Creating queues:\"\n\n    queues = settings.get_amqp_settings()[host][\"queues\"]\n    for queue in queues.keys():\n        channel.queue_declare(\n            queue=queue,\n            durable=True,\n            \n        )\n        print \"\\tCreated durable queue '%s'.\" % queue\n\n    print\n    print \"Routing exchanges using routing key to queues:\"\n\n    for queue in queues.keys():\n        channel.queue_bind(\n            queue=queue,\n            exchange=exchange,\n            routing_key=queues[queue]\n        )\n\n        print \"\\tRouting exchange %s['%s'] -> '%s'.\" % (\n            exchange,\n            queues[queue],\n            queue\n        )", "docstring": "Create exchanges, queues and route them.\n\nArgs:\nhost (str): One of the possible hosts.", "source": "juraj-google-style"}
{"code": "def Start(self, seed_list: List[str]=None, skip_seeds: bool=False) -> None:\n    if (not seed_list):\n        seed_list = settings.SEED_LIST\n    logger.debug('Starting up nodeleader')\n    if (not skip_seeds):\n        logger.debug('Attempting to connect to seed list...')\n        for bootstrap in seed_list:\n            if (not is_ip_address(bootstrap)):\n                (host, port) = bootstrap.split(':')\n                bootstrap = f'{hostname_to_ip(host)}:{port}'\n            addr = Address(bootstrap)\n            self.KNOWN_ADDRS.append(addr)\n            self.SetupConnection(addr)\n    logger.debug('Starting up nodeleader: starting peer, mempool, and blockheight check loops')\n    self.start_peer_check_loop()\n    self.start_memcheck_loop()\n    self.start_blockheight_loop()\n    if (settings.ACCEPT_INCOMING_PEERS and (not self.incoming_server_running)):\n\n        class OneShotFactory(Factory):\n\n            def __init__(self, leader):\n                self.leader = leader\n\n            def buildProtocol(self, addr):\n                print(f'building new protocol for addr: {addr}')\n                self.leader.AddKnownAddress(Address(f'{addr.host}:{addr.port}'))\n                p = NeoNode(incoming_client=True)\n                p.factory = self\n                return p\n\n        def listen_err(err):\n            print(f'Failed start listening server for reason: {err.value}')\n\n        def listen_ok(value):\n            self.incoming_server_running = True\n        logger.debug(f'Starting up nodeleader: setting up listen server on port: {settings.NODE_PORT}')\n        server_endpoint = TCP4ServerEndpoint(self.reactor, settings.NODE_PORT)\n        listenport_deferred = server_endpoint.listen(OneShotFactory(leader=self))\n        listenport_deferred.addCallback(listen_ok)\n        listenport_deferred.addErrback(listen_err)", "docstring": "Start connecting to the seed list.\n\nArgs:\nseed_list: a list of host:port strings if not supplied use list from `protocol.xxx.json`\nskip_seeds: skip connecting to seed list", "source": "codesearchnet"}
{"code": "def mode(self, axis=0, numeric_only=False, dropna=True):\n        \n        axis = self._get_axis_number(axis)\n        return self.__constructor__(\n            query_compiler=self._query_compiler.mode(\n                axis=axis, numeric_only=numeric_only, dropna=dropna\n            )\n        )", "docstring": "Perform mode across the DataFrame.\n\nArgs:\naxis (int): The axis to take the mode on.\nnumeric_only (bool): if True, only apply to numeric columns.\n\nReturns:\nDataFrame: The mode of the DataFrame.", "source": "juraj-google-style"}
{"code": "def get_signature_params(func):\n    if is_cython(func):\n        attrs = ['__code__', '__annotations__', '__defaults__', '__kwdefaults__']\n        if all((hasattr(func, attr) for attr in attrs)):\n            original_func = func\n\n            def func():\n                return\n            for attr in attrs:\n                setattr(func, attr, getattr(original_func, attr))\n        else:\n            raise TypeError('{!r} is not a Python function we can process'.format(func))\n    return list(funcsigs.signature(func).parameters.items())", "docstring": "Get signature parameters\n\nSupport Cython functions by grabbing relevant attributes from the Cython\nfunction and attaching to a no-op function. This is somewhat brittle, since\nfuncsigs may change, but given that funcsigs is written to a PEP, we hope\nit is relatively stable. Future versions of Python may allow overloading\nthe inspect 'isfunction' and 'ismethod' functions / create ABC for Python\nfunctions. Until then, it appears that Cython won't do anything about\ncompatability with the inspect module.\n\nArgs:\nfunc: The function whose signature should be checked.\n\nRaises:\nTypeError: A type error if the signature is not supported", "source": "codesearchnet"}
{"code": "def _make_source_table(self, source_list, is_tf_py_library):\n    path_head = 'Source file path'\n    num_nodes_head = '\n    num_tensors_head = '\n    num_dumps_head = '\n    if is_tf_py_library:\n        color = cli_shared.COLOR_GRAY\n        lines = [RL('TensorFlow Python library file(s):', color)]\n    else:\n        color = cli_shared.COLOR_WHITE\n        lines = [RL('File(s) outside TensorFlow Python library:', color)]\n    if not source_list:\n        lines.append(RL('[No files.]'))\n        lines.append(RL())\n        return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)\n    path_column_width = max(max((len(item[0]) for item in source_list)), len(path_head)) + 1\n    num_nodes_column_width = max(max((len(str(item[2])) for item in source_list)), len(num_nodes_head)) + 1\n    num_tensors_column_width = max(max((len(str(item[3])) for item in source_list)), len(num_tensors_head)) + 1\n    head = RL(path_head + ' ' * (path_column_width - len(path_head)), color)\n    head += RL(num_nodes_head + ' ' * (num_nodes_column_width - len(num_nodes_head)), color)\n    head += RL(num_tensors_head + ' ' * (num_tensors_column_width - len(num_tensors_head)), color)\n    head += RL(num_dumps_head, color)\n    lines.append(head)\n    for file_path, _, num_nodes, num_tensors, num_dumps, first_line_num in source_list:\n        path_attributes = [color]\n        if source_utils.is_extension_uncompiled_python_source(file_path):\n            path_attributes.append(debugger_cli_common.MenuItem(None, 'ps %s -b %d' % (file_path, first_line_num)))\n        line = RL(file_path, path_attributes)\n        line += ' ' * (path_column_width - len(line))\n        line += RL(str(num_nodes) + ' ' * (num_nodes_column_width - len(str(num_nodes))), color)\n        line += RL(str(num_tensors) + ' ' * (num_tensors_column_width - len(str(num_tensors))), color)\n        line += RL(str(num_dumps), color)\n        lines.append(line)\n    lines.append(RL())\n    return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)", "docstring": "Make a table summarizing the source files that create nodes and tensors.\n\nArgs:\nsource_list: List of source files and related information as a list of\ntuples (file_path, is_tf_library, num_nodes, num_tensors, num_dumps,\nfirst_line).\nis_tf_py_library: (`bool`) whether this table is for files that belong\nto the TensorFlow Python library.\n\nReturns:\nThe table as a `debugger_cli_common.RichTextLines` object.", "source": "github-repos"}
{"code": "async def forget_ticket(self, request):\n        \n        session = await get_session(request)\n        session.pop(self.cookie_name, '')", "docstring": "Called to forget the ticket data a request\n\nArgs:\nrequest: aiohttp Request object.", "source": "juraj-google-style"}
{"code": "def match_datetime(file_name: str, regex_expression: str) -> datetime.datetime:\n\n    def rearrange_time_list(order_list: t.List, time_list: t.List) -> t.List:\n        if order_list == DEFAULT_TIME_ORDER_LIST:\n            return time_list\n        new_time_list = []\n        for i, j in zip(order_list, time_list):\n            dst = DEFAULT_TIME_ORDER_LIST.index(i)\n            new_time_list.insert(dst, j)\n        return new_time_list\n    char_to_replace = {'%Y': ['([0-9]{4})', [0, 1978]], '%m': ['([0-9]{2})', [1, 1]], '%d': ['([0-9]{2})', [2, 1]], '%H': ['([0-9]{2})', [3, 0]], '%M': ['([0-9]{2})', [4, 0]], '%S': ['([0-9]{2})', [5, 0]], '*': ['.*']}\n    missing_idx_list = []\n    temp_expression = regex_expression\n    for key, value in char_to_replace.items():\n        if key != '*' and regex_expression.find(key) == -1:\n            missing_idx_list.append(value[1])\n        else:\n            temp_expression = temp_expression.replace(key, value[0])\n    regex_matches = re.findall(temp_expression, file_name)[0]\n    order_list = [f'%{char}' for char in re.findall('%(\\\\w{1})', regex_expression)]\n    time_list = list(map(int, regex_matches))\n    time_list = rearrange_time_list(order_list, time_list)\n    if missing_idx_list:\n        for [idx, val] in missing_idx_list:\n            time_list.insert(idx, val)\n    return datetime.datetime(*time_list)", "docstring": "Matches the regex string given and extracts the datetime object.\n\nArgs:\nfile_name: File name from which you want to extract datetime.\nregex_expression: Regex expression for extracting datetime from the filename.\n\nReturns:\nA datetime object after extracting from the filename.", "source": "github-repos"}
{"code": "def _init_saver(self, saver=USE_DEFAULT):\n    if saver is Supervisor.USE_DEFAULT:\n        saver = self._get_first_op_from_collection(ops.GraphKeys.SAVERS)\n        if saver is None and variables.global_variables():\n            saver = saver_mod.Saver()\n            ops.add_to_collection(ops.GraphKeys.SAVERS, saver)\n    self._saver = saver", "docstring": "Initializes saver.\n\nArgs:\nsaver: A `Saver` object. If set to USE_DEFAULT, create one that saves all\nthe variables.", "source": "github-repos"}
{"code": "def search_track(self, artist, album=None, track=None, full_album_art_uri=False):\n    subcategories = [artist]\n    subcategories.append((album or ''))\n    result = self.get_album_artists(full_album_art_uri=full_album_art_uri, subcategories=subcategories, search_term=track, complete_result=True)\n    result._metadata['search_type'] = 'search_track'\n    return result", "docstring": "Search for an artist, an artist's albums, or specific track.\n\nArgs:\nartist (str): an artist's name.\nalbum (str, optional): an album name. Default `None`.\ntrack (str, optional): a track name. Default `None`.\nfull_album_art_uri (bool): whether the album art URI should be\nabsolute (i.e. including the IP address). Default `False`.\n\nReturns:\nA `SearchResult` instance.", "source": "codesearchnet"}
{"code": "def to_dms(angle, style='dms'):\n    \n    sign = 1 if angle >= 0 else -1\n    angle = abs(angle) * 3600\n    minutes, seconds = divmod(angle, 60)\n    degrees, minutes = divmod(minutes, 60)\n    if style == 'dms':\n        return tuple(sign * abs(i) for i in (int(degrees), int(minutes),\n                                             seconds))\n    elif style == 'dm':\n        return tuple(sign * abs(i) for i in (int(degrees),\n                                             (minutes + seconds / 60)))\n    else:\n        raise ValueError('Unknown style type %r' % style)", "docstring": "Convert decimal angle to degrees, minutes and possibly seconds.\n\nArgs:\nangle (float): Angle to convert\nstyle (str): Return fractional or whole minutes values\n\nReturns:\ntuple of int: Angle converted to degrees, minutes and possibly seconds\n\nRaises:\nValueError: Unknown value for ``style``", "source": "juraj-google-style"}
{"code": "def get_enum_from_name(self, enum_name):\n        \n        return next((e for e in self.enums if e.name == enum_name), None)", "docstring": "Return an enum from a name\nArgs:\nenum_name (str): name of the enum\nReturns:\nEnum", "source": "juraj-google-style"}
{"code": "def first(self) -> 'Builder':\n    return self._to_builder(_evaluation.FirstFunction(self.node.context, self.node, []))", "docstring": "The FHIRPath first() function.\n\nReturns:\nAn expression that evaluates to the first element of the parent, or\nempty if the parent has no results.", "source": "github-repos"}
{"code": "def get_descriptor(self):\n    raise NotImplementedError('Base class should not be called directly!')", "docstring": "This function returns a string describing the sniffer. The specific\nstring (and its format) is up to each derived sniffer type.\n\nReturns:\nA string describing the sniffer.", "source": "github-repos"}
{"code": "def add_molecule(self, mol, bond=None, base=None, target=None):\n        \n        ai = self.available_idx()\n        mapping = {n: n + ai - 1 for n, _ in mol.atoms_iter()}\n        relabeled = nx.relabel_nodes(mol.graph, mapping)  \n        self.graph.add_nodes_from(relabeled.nodes(data=True))\n        self.graph.add_edges_from(relabeled.edges(data=True))\n        if bond:\n            self.add_bond(base, mapping[target], bond)", "docstring": "connect atom group (for SMILES parser)\n\nMay requires recalculation of 2D coordinate for drawing\n\nArgs:\nmol: graphmol.Compound()\nthe original object will be copied.\nbond: Bond object to be connected.\nthe original will not be copied so be careful.\nbase: index of atom in self to connect\ntarget: index of atom in group to be connected\nRaises:\nTypeError", "source": "juraj-google-style"}
{"code": "def __frontend_limit_rules_descriptor(self, api_info):\n    \n    if not api_info.frontend_limits.rules:\n      return None\n\n    rules = []\n    for rule in api_info.frontend_limits.rules:\n      descriptor = {}\n      for propname, descname in (('match', 'match'),\n                                 ('qps', 'qps'),\n                                 ('user_qps', 'userQps'),\n                                 ('daily', 'daily'),\n                                 ('analytics_id', 'analyticsId')):\n        if getattr(rule, propname) is not None:\n          descriptor[descname] = getattr(rule, propname)\n      if descriptor:\n        rules.append(descriptor)\n\n    return rules", "docstring": "Builds a frontend limit rules descriptor from API info.\n\nArgs:\napi_info: An _ApiInfo object.\n\nReturns:\nA list of dictionaries with frontend limit rules information.", "source": "juraj-google-style"}
{"code": "def reindex(self, axis, labels, **kwargs):\n        \n\n        \n        \n        def reindex_builer(df, axis, old_labels, new_labels, **kwargs):\n            if axis:\n                while len(df.columns) < len(old_labels):\n                    df[len(df.columns)] = np.nan\n                df.columns = old_labels\n                new_df = df.reindex(columns=new_labels, **kwargs)\n                \n                new_df.columns = pandas.RangeIndex(len(new_df.columns))\n                return new_df\n            else:\n                while len(df.index) < len(old_labels):\n                    df.loc[len(df.index)] = np.nan\n                df.index = old_labels\n                new_df = df.reindex(index=new_labels, **kwargs)\n                \n                new_df.reset_index(inplace=True, drop=True)\n                return new_df\n\n        old_labels = self.columns if axis else self.index\n        new_index = self.index if axis else labels\n        new_columns = labels if axis else self.columns\n        func = self._prepare_method(\n            lambda df: reindex_builer(df, axis, old_labels, labels, **kwargs)\n        )\n        \n        \n        \n        \n        \n        \n        new_data = self._map_across_full_axis(axis, func)\n        return self.__constructor__(new_data, new_index, new_columns)", "docstring": "Fits a new index for this Manger.\n\nArgs:\naxis: The axis index object to target the reindex on.\nlabels: New labels to conform 'axis' on to.\n\nReturns:\nA new QueryCompiler with updated data and new index.", "source": "juraj-google-style"}
{"code": "def exportData(self, datfile):\n        \n\n        def ampl_set(name, values):\n            def format_entry(e):\n                return repr(e).replace(' ', '')\n\n            return 'set {0} := {1};'.format(\n                name, ','.join(format_entry(e) for e in values)\n            )\n\n        def ampl_param(name, values):\n            def format_entry(k, v):\n                k = repr(k).strip('()').replace(' ', '')\n                if v == inf:\n                    v = \"Infinity\"\n                elif v == -inf:\n                    v = \"-Infinity\"\n                else:\n                    v = repr(v).strip('()').replace(' ', '')\n                return '[{0}]{1}'.format(k, v)\n\n            return 'param {0} := {1};'.format(\n                name, ''.join(format_entry(k, v) for k, v in values.items())\n            )\n\n        with open(datfile, 'w') as f:\n            for name, entity in self.getSets():\n                values = entity.getValues().toList()\n                print(ampl_set(name, values), file=f)\n            for name, entity in self.getParameters():\n                if entity.isScalar():\n                    print(\n                        'param {} := {};'.format(name, entity.value()),\n                        file=f\n                    )\n                else:\n                    values = entity.getValues().toDict()\n                    print(ampl_param(name, values), file=f)", "docstring": "Create a .dat file with the data that has been loaded.\n\nArgs:\ndatfile: Path to the file (Relative to the current working\ndirectory or absolute).", "source": "juraj-google-style"}
{"code": "def random(self, shape, tf_fn, kwargs):\n    \n    slice_shape = self.slice_shape(shape)\n    op_seed = random.random()\n    def my_fn(pnum):\n      \n      \n      seed = hash(\"%s,%s\" % (op_seed, self.slice_begin(shape, pnum)))\n      return tf_fn(slice_shape, seed=seed, **kwargs)\n    return self.slicewise(my_fn, self.laid_out_pnum())", "docstring": "Call a random tf operation (e.g. tf.random.uniform).\n\nArgs:\nshape: a Shape\ntf_fn: a function such as tf.random.uniform\nkwargs: kwargs to pass to tf_fn, except for seed\n\nReturns:\na LaidOutTensor", "source": "juraj-google-style"}
{"code": "def generate_selected_rules(rule_configs: List[RuleConfig], rules: RulesMap) -> List[RuleChecker]:\n    selected_rules: List[RuleChecker] = []\n    for rule_config in rule_configs:\n        rule_name = rule_config['rule']\n        if rule_name not in rules:\n            raise ValueError('Invalid rule specified.')\n        else:\n            args = rule_config.get('args', {})\n            rule = rules[rule_name](**args)\n            rule.__name__ = rule_name\n            rule.__kwdefaults__ = args\n            selected_rules.append(rule)\n    if len(selected_rules) == 0:\n        raise ValueError('No rules specified.')\n    return selected_rules", "docstring": "Generates rule checkers from the provided rule configs and\nmappable rules.\n\nArgs:\n* rule_configs: List of RuleConfigs, with potential args\n* rules: Typed RulesMap\n\nReturns:\n* List of RuleCheckers with args applied\n\nRaises:\n* ValueError: if non-existent rule name provided", "source": "github-repos"}
{"code": "def _GetMergeTaskStorageFilePath(self, task):\n    \n    filename = '{0:s}.plaso'.format(task.identifier)\n    return os.path.join(self._merge_task_storage_path, filename)", "docstring": "Retrieves the path of a task storage file in the merge directory.\n\nArgs:\ntask (Task): task.\n\nReturns:\nstr: path of a task storage file file in the merge directory.", "source": "juraj-google-style"}
{"code": "def FromString(cls, indata):\n    lines = [x.strip() for x in indata.split('\\n') if ((not x.startswith('\n    if (len(lines) < 3):\n        raise DataError('Invalid CommandFile string that did not contain 3 header lines', lines=lines)\n    (fmt_line, version_line, ascii_line) = lines[:3]\n    if (not version_line.startswith('Format: ')):\n        raise DataError(\"Invalid format version that did not start with 'Format: '\", line=version_line)\n    version = version_line[8:]\n    if (ascii_line != 'Type: ASCII'):\n        raise DataError('Unknown file type line (expected Type: ASCII)', line=ascii_line)\n    cmds = [cls.decode(x) for x in lines[3:]]\n    return CommandFile(fmt_line, version, cmds)", "docstring": "Load a CommandFile from a string.\n\nThe string should be produced from a previous call to\nencode.\n\nArgs:\nindata (str): The encoded input data.\n\nReturns:\nCommandFile: The decoded CommandFile object.", "source": "codesearchnet"}
{"code": "def update_user_attributes(self, user, claims):\n    required_fields = [field.name for field in user._meta.fields if (field.blank is False)]\n    for (field, claim) in settings.CLAIM_MAPPING.items():\n        if hasattr(user, field):\n            if (claim in claims):\n                setattr(user, field, claims[claim])\n                logger.debug(\"Attribute '{}' for user '{}' was set to '{}'.\".format(field, user, claims[claim]))\n            elif (field in required_fields):\n                msg = \"Claim not found in access token: '{}'. Check ADFS claims mapping.\"\n                raise ImproperlyConfigured(msg.format(claim))\n            else:\n                msg = \"Claim '{}' for user field '{}' was not found in the access token for user '{}'. Field is not required and will be left empty\".format(claim, field, user)\n                logger.warning(msg)\n        else:\n            msg = \"User model has no field named '{}'. Check ADFS claims mapping.\"\n            raise ImproperlyConfigured(msg.format(field))", "docstring": "Updates user attributes based on the CLAIM_MAPPING setting.\n\nArgs:\nuser (django.contrib.auth.models.User): User model instance\nclaims (dict): claims from the access token", "source": "codesearchnet"}
{"code": "def from_dict(d):\n    i = Tags()\n    for (k, v) in d.items():\n        if (k not in ('@module', '@class')):\n            i[k] = v\n    return i", "docstring": "Creates Tags object from a dictionary.\n\nArgs:\nd: Dict of feff parameters and values.\n\nReturns:\nTags object", "source": "codesearchnet"}
{"code": "def occurrence(self, indicator=None):\n        \n        self._request_entity = 'fileOccurrence'\n        self._request_uri = '{}/fileOccurrences'.format(self._request_uri)\n        if indicator is not None:\n            self._request_uri = '{}/{}/fileOccurrences'.format(self._api_uri, indicator)", "docstring": "Update the URI to retrieve file occurrences for the provided indicator.\n\nArgs:\nindicator (string): The indicator to retrieve file occurrences.", "source": "juraj-google-style"}
{"code": "def __call__(self, shape, dtype=None, **kwargs):\n    _validate_kwargs(self.__class__.__name__, kwargs, support_partition=False)\n    dtype = _assert_float_dtype(_get_dtype(dtype))\n    if len(shape) < 2:\n        raise ValueError('The tensor to initialize must be at least two-dimensional')\n    num_rows = 1\n    for dim in shape[:-1]:\n        num_rows *= dim\n    num_cols = shape[-1]\n    flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))\n    a = self._random_generator.random_normal(flat_shape, dtype=dtype)\n    q, r = gen_linalg_ops.qr(a, full_matrices=False)\n    d = array_ops.tensor_diag_part(r)\n    q *= math_ops.sign(d)\n    if num_rows < num_cols:\n        q = array_ops.matrix_transpose(q)\n    return self.gain * array_ops.reshape(q, shape)", "docstring": "Returns a tensor object initialized to an orthogonal matrix.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor. Only floating point types are\nsupported. If not specified, `tf.keras.backend.floatx()` is used,\nwhich default to `float32` unless you configured it otherwise\n(via `tf.keras.backend.set_floatx(float_dtype)`)\n**kwargs: Additional keyword arguments.", "source": "github-repos"}
{"code": "def _on_action(self, sequence, topic, message):\n        \n\n        try:\n            slug = None\n            parts = topic.split('/')\n            slug = parts[-3]\n            uuid = self._extract_device_uuid(slug)\n        except Exception as exc:\n            self._logger.warn(\"Error parsing slug in action handler (slug=%s, topic=%s)\", slug, topic)\n            return\n\n        if messages.DisconnectCommand.matches(message):\n            self._logger.debug(\"Received disconnect command for device 0x%X\", uuid)\n            key = message['key']\n            client = message['client']\n            self._loop.add_callback(self._disconnect_from_device, uuid, key, client)\n        elif messages.OpenInterfaceCommand.matches(message) or messages.CloseInterfaceCommand.matches(message):\n            self._logger.debug(\"Received %s command for device 0x%X\", message['operation'], uuid)\n            key = message['key']\n            client = message['client']\n            oper = message['operation']\n\n            if oper == 'open_interface':\n                self._loop.add_callback(self._open_interface, client, uuid, message['interface'], key)\n            else:\n                self._loop.add_callback(self._close_interface, client, uuid, message['interface'], key)\n        elif messages.RPCCommand.matches(message):\n            rpc_msg = messages.RPCCommand.verify(message)\n\n            client = rpc_msg['client']\n            address = rpc_msg['address']\n            rpc = rpc_msg['rpc_id']\n            payload = rpc_msg['payload']\n            key = rpc_msg['key']\n            timeout = rpc_msg['timeout']\n\n            self._loop.add_callback(self._send_rpc, client, uuid, address, rpc, payload, timeout, key)\n        elif messages.ScriptCommand.matches(message):\n            script_msg = messages.ScriptCommand.verify(message)\n\n            key = script_msg['key']\n            client = script_msg['client']\n            script = script_msg['script']\n\n            self._loop.add_callback(self._send_script, client, uuid, script, key, (script_msg['fragment_index'], script_msg['fragment_count']))\n        else:\n            self._logger.error(\"Unsupported message received (topic=%s) (message=%s)\", topic, str(message))", "docstring": "Process a command action that we received on behalf of a device.\n\nArgs:\nsequence (int): The sequence number of the packet received\ntopic (string): The topic this message was received on\nmessage (dict): The message itself", "source": "juraj-google-style"}
{"code": "def __init__(self, element_value: ValueSpecOrAnnotation, default: typing.Optional[typing.List[typing.Any]]=MISSING_VALUE, min_size: typing.Optional[int]=None, max_size: typing.Optional[int]=None, size: typing.Optional[int]=None, transform: typing.Optional[typing.Callable[[typing.Any], typing.List[typing.Any]]]=None, is_noneable: bool=False, frozen: bool=False):\n    element_value = ValueSpec.from_annotation(element_value, auto_typing=True)\n    if size is not None and (min_size is not None or max_size is not None):\n        raise ValueError(f'Either \"size\" or \"min_size\"/\"max_size\" pair can be specified. Encountered: size={size}, min_size={min_size}, max_size={max_size}.')\n    if size is not None:\n        min_size = size\n        max_size = size\n    if min_size is None:\n        min_size = 0\n    if min_size < 0:\n        raise ValueError(f'\"min_size\" of List must be no less than 0. Encountered: {min_size}.')\n    if max_size is not None:\n        if max_size < min_size:\n            raise ValueError(f'\"max_size\" of List must be no less than \"min_size\". Encountered: min_size={min_size}, max_size={max_size}')\n    self._element = Field(key_specs.ListKey(min_size, max_size), element_value, 'Field of list element')\n    super().__init__(list, default, transform, is_noneable=is_noneable, frozen=frozen)", "docstring": "Constructor.\n\nArgs:\nelement_value: A ``ValueSpec`` object or an equivalent annotation as the\nspec for the list element.\ndefault: (Optional) default value for this spec.\nmin_size: (Optional) min size of list. If None, 0 will be used.\nmax_size: (Optional) max size of list.\nsize: (Optional) size of List. A shortcut to specify min_size and max_size\nat the same time. `size` and `min_size`/`max_size` are mutual exclusive.\ntransform: (Optional) user-defined function to be called on the input\nof `apply`. It could be used as a type converter or a custom\nvalidator which may raise errors.\nis_noneable: If True, None is acceptable.\nfrozen: If True, values other than the default value is not accceptable.", "source": "github-repos"}
{"code": "def _sorted_results(self, results_dicts):\n    \n    print('results dicts:', results_dicts)\n    sorted_dict = sorted(results_dicts, key=lambda k: k['start_time'])\n    results = []\n    for entry in sorted_dict:\n      results.append(entry['dt'])\n    return results", "docstring": "Sorts dict of results based on log start_time.\n\nSorts the results and returns an array with only the values but sorted\nby oldest value first.value\n\nArgs:\nresults_dicts: List of result dicts\n\nReturns:\nList of only the time but sorted oldest first.", "source": "juraj-google-style"}
{"code": "def _create_filter(col_param, extractor):\n    include_missing_values = (not col_param.exclude_missing_values)\n    if col_param.HasField('filter_regexp'):\n        value_filter_fn = _create_regexp_filter(col_param.filter_regexp)\n    elif col_param.HasField('filter_interval'):\n        value_filter_fn = _create_interval_filter(col_param.filter_interval)\n    elif col_param.HasField('filter_discrete'):\n        value_filter_fn = _create_discrete_set_filter(col_param.filter_discrete)\n    elif include_missing_values:\n        return None\n    else:\n        value_filter_fn = (lambda _: True)\n\n    def filter_fn(session_group):\n        value = extractor(session_group)\n        if (value is None):\n            return include_missing_values\n        return value_filter_fn(value)\n    return filter_fn", "docstring": "Creates a filter for the given col_param and extractor.\n\nArgs:\ncol_param: A tensorboard.hparams.ColParams object identifying the column\nand describing the filter to apply.\nextractor: A function that extract the column value identified by\n'col_param' from a tensorboard.hparams.SessionGroup protobuffer.\nReturns:\nA boolean function taking a tensorboard.hparams.SessionGroup protobuffer\nreturning True if the session group passes the filter described by\n'col_param'. If col_param does not specify a filter (i.e. any session\ngroup passes) returns None.", "source": "codesearchnet"}
{"code": "def make_unique_script_attr(attributes):\n    \n    filtered_attr = []\n    script_list = []\n    for attr in attributes:\n        if attr.Usage != TransactionAttributeUsage.Script:\n            filtered_attr.append(attr)\n        else:\n            data = attr.Data\n            if isinstance(data, UInt160):\n                \n                data = attr.Data.ToArray()\n\n            \n            if data not in script_list:\n                script_list.append(data)\n                filtered_attr.append(attr)\n\n    return filtered_attr", "docstring": "Filter out duplicate `Script` TransactionAttributeUsage types.\nArgs:\nattributes: a list of TransactionAttribute's\n\nReturns:\nlist:", "source": "juraj-google-style"}
{"code": "def generate_poisson_data(centers, n_cells, cluster_probs=None):\n    (genes, clusters) = centers.shape\n    output = np.zeros((genes, n_cells))\n    if (cluster_probs is None):\n        cluster_probs = (np.ones(clusters) / clusters)\n    labels = []\n    for i in range(n_cells):\n        c = np.random.choice(range(clusters), p=cluster_probs)\n        labels.append(c)\n        output[(:, i)] = np.random.poisson(centers[(:, c)])\n    return (output, np.array(labels))", "docstring": "Generates poisson-distributed data, given a set of means for each cluster.\n\nArgs:\ncenters (array): genes x clusters matrix\nn_cells (int): number of output cells\ncluster_probs (array): prior probability for each cluster.\nDefault: uniform.\n\nReturns:\noutput - array with shape genes x n_cells\nlabels - array of cluster labels", "source": "codesearchnet"}
{"code": "def dict_isect(*args):\n    if (not args):\n        return {}\n    else:\n        dictclass = (OrderedDict if isinstance(args[0], OrderedDict) else dict)\n        common_keys = set.intersection(*map(set, args))\n        first_dict = args[0]\n        return dictclass(((k, first_dict[k]) for k in common_keys))", "docstring": "Constructs a dictionary that contains keys common between all inputs.\nThe returned values will only belong to the first dictionary.\n\nArgs:\n*args : a sequence of dictionaries (or sets of keys)\n\nReturns:\nDict | OrderedDict :\nOrderedDict if the first argument is an OrderedDict, otherwise dict\n\nNotes:\nThis function can be used as an alternative to `dict_subset` where any\nkey not in the dictionary is ignored. See the following example:\n\n>>> dict_isect({'a': 1, 'b': 2, 'c': 3}, ['a', 'c', 'd'])\n{'a': 1, 'c': 3}\n\nExample:\n>>> dict_isect({'a': 1, 'b': 1}, {'b': 2, 'c': 2})\n{'b': 1}\n>>> dict_isect(odict([('a', 1), ('b', 2)]), odict([('c', 3)]))\nOrderedDict()\n>>> dict_isect()\n{}", "source": "codesearchnet"}
{"code": "def safe_call(request: Request, methods: Methods, *, debug: bool) -> Response:\n    \n    with handle_exceptions(request, debug) as handler:\n        result = call(methods.items[request.method], *request.args, **request.kwargs)\n        handler.response = SuccessResponse(result=result, id=request.id)\n    return handler.response", "docstring": "Call a Request, catching exceptions to ensure we always return a Response.\n\nArgs:\nrequest: The Request object.\nmethods: The list of methods that can be called.\ndebug: Include more information in error responses.\n\nReturns:\nA Response object.", "source": "juraj-google-style"}
{"code": "def GetTermIdentifier(self):\n    return self._term", "docstring": "Returns the TERM environment variable for the console.\n\nReturns:\nstr: A str that describes the console's text capabilities", "source": "github-repos"}
{"code": "def reshape(self, shape: tf.TensorShape) -> 'TensorFluent':\n    t = tf.reshape(self.tensor, shape)\n    scope = self.scope.as_list()\n    batch = self.batch\n    return TensorFluent(t, scope, batch=batch)", "docstring": "Returns a TensorFluent for the reshape operation with given `shape`.\n\nArgs:\nshape: The output's shape.\n\nReturns:\nA TensorFluent wrapping the reshape operation.", "source": "codesearchnet"}
{"code": "def resolve_aliases(data_type):\n    \n    if not is_alias(data_type):\n        return data_type\n\n    resolved = resolve_aliases(data_type.data_type)\n    data_type.data_type = resolved\n\n    return resolved", "docstring": "Resolve all chained / nested aliases. This will recursively point\nnested aliases to their resolved data type (first non-alias in the chain).\n\nNote: This differs from unwrap_alias which simply identifies/returns\nthe resolved data type.\n\nArgs:\ndata_type (DataType): The target DataType/Alias to resolve.\nReturn:\nDataType: The resolved type.", "source": "juraj-google-style"}
{"code": "def run(self, data_loaders, workflow, max_epochs, **kwargs):\n        \n        assert isinstance(data_loaders, list)\n        assert mmcv.is_list_of(workflow, tuple)\n        assert len(data_loaders) == len(workflow)\n\n        self._max_epochs = max_epochs\n        work_dir = self.work_dir if self.work_dir is not None else 'NONE'\n        self.logger.info('Start running, host: %s, work_dir: %s',\n                         get_host_info(), work_dir)\n        self.logger.info('workflow: %s, max: %d epochs', workflow, max_epochs)\n        self.call_hook('before_run')\n\n        while self.epoch < max_epochs:\n            for i, flow in enumerate(workflow):\n                mode, epochs = flow\n                if isinstance(mode, str):  \n                    if not hasattr(self, mode):\n                        raise ValueError(\n                            'runner has no method named \"{}\" to run an epoch'.\n                            format(mode))\n                    epoch_runner = getattr(self, mode)\n                elif callable(mode):  \n                    epoch_runner = mode\n                else:\n                    raise TypeError('mode in workflow must be a str or '\n                                    'callable function, not {}'.format(\n                                        type(mode)))\n                for _ in range(epochs):\n                    if mode == 'train' and self.epoch >= max_epochs:\n                        return\n                    epoch_runner(data_loaders[i], **kwargs)\n\n        time.sleep(1)  \n        self.call_hook('after_run')", "docstring": "Start running.\n\nArgs:\ndata_loaders (list[:obj:`DataLoader`]): Dataloaders for training\nand validation.\nworkflow (list[tuple]): A list of (phase, epochs) to specify the\nrunning order and epochs. E.g, [('train', 2), ('val', 1)] means\nrunning 2 epochs for training and 1 epoch for validation,\niteratively.\nmax_epochs (int): Total training epochs.", "source": "juraj-google-style"}
{"code": "def get(self, value):\n        \n        config = self.get_block('vrf definition %s' % value)\n        if not config:\n            return None\n        response = dict(vrf_name=value)\n        response.update(self._parse_rd(config))\n        response.update(self._parse_description(config))\n        config = self.get_block('no ip routing vrf %s' % value)\n        if config:\n            response['ipv4_routing'] = False\n        else:\n            response['ipv4_routing'] = True\n        config = self.get_block('no ipv6 unicast-routing vrf %s' % value)\n        if config:\n            response['ipv6_routing'] = False\n        else:\n            response['ipv6_routing'] = True\n\n        return response", "docstring": "Returns the VRF configuration as a resource dict.\n\nArgs:\nvalue (string): The vrf name to retrieve from the\nrunning configuration.\n\nReturns:\nA Python dict object containing the VRF attributes as\nkey/value pairs.", "source": "juraj-google-style"}
{"code": "def with_bloomberg(func):\n    \n    @wraps(func)\n    def wrapper(*args, **kwargs):\n\n        scope = utils.func_scope(func=func)\n        param = inspect.signature(func).parameters\n        port = kwargs.pop('port', _PORT_)\n        timeout = kwargs.pop('timeout', _TIMEOUT_)\n        restart = kwargs.pop('restart', False)\n        all_kw = {\n            k: args[n] if n < len(args) else v.default\n            for n, (k, v) in enumerate(param.items()) if k != 'kwargs'\n        }\n        all_kw.update(kwargs)\n        log_level = kwargs.get('log', logs.LOG_LEVEL)\n\n        for to_list in ['tickers', 'flds']:\n            conv = all_kw.get(to_list, None)\n            if hasattr(conv, 'tolist'):\n                all_kw[to_list] = getattr(conv, 'tolist')()\n            if isinstance(conv, str):\n                all_kw[to_list] = [conv]\n\n        cached_data = []\n        if scope in ['xbbg.blp.bdp', 'xbbg.blp.bds']:\n            to_qry = cached.bdp_bds_cache(func=func.__name__, **all_kw)\n            cached_data += to_qry.cached_data\n\n            if not (to_qry.tickers and to_qry.flds):\n                if not cached_data: return pd.DataFrame()\n                res = pd.concat(cached_data, sort=False).reset_index(drop=True)\n                if not all_kw.get('raw', False):\n                    res = assist.format_output(\n                        data=res, source=func.__name__,\n                        col_maps=all_kw.get('col_maps', dict())\n                    )\n                return res\n\n            all_kw['tickers'] = to_qry.tickers\n            all_kw['flds'] = to_qry.flds\n\n        if scope in ['xbbg.blp.bdib']:\n            data_file = storage.hist_file(\n                ticker=all_kw['ticker'], dt=all_kw['dt'], typ=all_kw['typ'],\n            )\n            if files.exists(data_file):\n                logger = logs.get_logger(func, level=log_level)\n                if all_kw.get('batch', False): return\n                logger.debug(f'reading from {data_file} ...')\n                return assist.format_intraday(data=pd.read_parquet(data_file), **all_kw)\n\n        _, new = create_connection(port=port, timeout=timeout, restart=restart)\n        res = func(**{\n            k: v for k, v in all_kw.items() if k not in ['raw', 'col_maps']\n        })\n        if new: delete_connection()\n\n        if scope.startswith('xbbg.blp.') and isinstance(res, list):\n            final = cached_data + res\n            if not final: return pd.DataFrame()\n            res = pd.DataFrame(pd.concat(final, sort=False))\n\n        if (scope in ['xbbg.blp.bdp', 'xbbg.blp.bds']) \\\n                and (not all_kw.get('raw', False)):\n            res = assist.format_output(\n                data=res.reset_index(drop=True), source=func.__name__,\n                col_maps=all_kw.get('col_maps', dict()),\n            )\n\n        return res\n    return wrapper", "docstring": "Wrapper function for Bloomberg connection\n\nArgs:\nfunc: function to wrap", "source": "juraj-google-style"}
{"code": "def matchmaker_request(url, token, method, content_type=None, accept=None, data=None):\n    \n    headers = Headers()\n    headers = { 'X-Auth-Token': token}\n    if content_type:\n        headers['Content-Type'] = content_type\n    if accept:\n        headers['Accept'] = accept\n\n    \n    req_data = data or {'timestamp' : datetime.datetime.now().timestamp()}\n    json_response = None\n    try:\n        LOG.info('Sending {} request to MME url {}. Data sent: {}'.format(\n            method, url, req_data))\n        resp = requests.request(\n            method = method,\n            url = url,\n            headers = headers,\n            data = json.dumps(req_data)\n        )\n        json_response = resp.json()\n        LOG.info('MME server response was:{}'.format(json_response))\n\n        if isinstance(json_response, str):\n            json_response = {\n                'message' : json_response,\n            }\n        elif isinstance(json_response, list): \n            return json_response\n        json_response['status_code'] = resp.status_code\n    except Exception as err:\n        LOG.info('An error occurred while sending HTTP request to server ({})'.format(err))\n        json_response = {\n            'message' : str(err)\n        }\n    return json_response", "docstring": "Send a request to MatchMaker and return its response\n\nArgs:\nurl(str): url to send request to\ntoken(str): MME server authorization token\nmethod(str): 'GET', 'POST' or 'DELETE'\ncontent_type(str): MME request Content-Type\naccept(str): accepted response\ndata(dict): eventual data to send in request\n\nReturns:\njson_response(dict): server response", "source": "juraj-google-style"}
{"code": "def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name):\n        \n        \n        if keyfunc is not cls._default:\n            return \"{}.{}[{}, {}, {}]\".format(\n                cls.__module__,\n                cls.__name__,\n                cls._get_fullname(type_),\n                cls._get_bound_repr(bound),\n                keyfunc_name,\n            )\n        return \"{}.{}[{}, {}]\".format(\n            cls.__module__,\n            cls.__name__,\n            cls._get_fullname(type_),\n            cls._get_bound_repr(bound),\n        )", "docstring": "Return a class representation using the slice parameters.\n\nArgs:\ntype_: The type the class was sliced with.\nbound: The boundaries specified for the values of type_.\nkeyfunc: The comparison function used to check the value\nboundaries.\nkeyfunc_name: The name of keyfunc.\n\nReturns:\nA string representing the class.", "source": "juraj-google-style"}
{"code": "def _get_truncated_table_rows(self, query_tokens: List[str], tokenized_table: TokenizedTable, num_rows: int, num_columns: int, max_length: int, truncation_strategy: Union[str, TapasTruncationStrategy]) -> Tuple[int, int]:\n    if not isinstance(truncation_strategy, TapasTruncationStrategy):\n        truncation_strategy = TapasTruncationStrategy(truncation_strategy)\n    if max_length is None:\n        max_length = self.model_max_length\n    if truncation_strategy == TapasTruncationStrategy.DROP_ROWS_TO_FIT:\n        while True:\n            num_tokens = self._get_max_num_tokens(query_tokens, tokenized_table, num_rows=num_rows, num_columns=num_columns, max_length=max_length)\n            if num_tokens is not None:\n                break\n            num_rows -= 1\n            if num_rows < 1:\n                break\n    elif truncation_strategy != TapasTruncationStrategy.DO_NOT_TRUNCATE:\n        raise ValueError(f'Unknown truncation strategy {truncation_strategy}.')\n    return (num_rows, num_tokens or 1)", "docstring": "Truncates a sequence pair in-place following the strategy.\n\nArgs:\nquery_tokens (`List[str]`):\nList of strings corresponding to the tokenized query.\ntokenized_table (`TokenizedTable`):\nTokenized table\nnum_rows (`int`):\nTotal number of table rows\nnum_columns (`int`):\nTotal number of table columns\nmax_length (`int`):\nTotal maximum length.\ntruncation_strategy (`str` or [`TapasTruncationStrategy]`):\nTruncation strategy to use. Seeing as this method should only be called when truncating, the only\navailable strategy is the `\"drop_rows_to_fit\"` strategy.\n\nReturns:\n`Tuple(int, int)`: tuple containing the number of rows after truncation, and the number of tokens available\nfor each table element.", "source": "github-repos"}
{"code": "def banner(text, border='=', width=80):\n    text_padding = ('{0:^%d}' % width)\n    LOG.info((border * width))\n    LOG.info(text_padding.format(text))\n    LOG.info((border * width))", "docstring": "Center _text_ in a banner _width_ wide with _border_ characters.\n\nArgs:\ntext (str): What to write in the banner\nborder (str): Border character\nwidth (int): How long the border should be", "source": "codesearchnet"}
{"code": "def create_application_configuration(self, name, properties, description=None):\n    if (not hasattr(self, 'applicationConfigurations')):\n        raise NotImplementedError()\n    cv = ApplicationConfiguration._props(name, properties, description)\n    res = self.rest_client.session.post(self.applicationConfigurations, headers={'Accept': 'application/json'}, json=cv)\n    _handle_http_errors(res)\n    return ApplicationConfiguration(res.json(), self.rest_client)", "docstring": "Create an application configuration.\n\nArgs:\nname (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a\n.. versionadded 1.12", "source": "codesearchnet"}
{"code": "def make_conv_bias_activation_tests(activation_op):\n\n    def create_test(options):\n        \n        test_parameters = [{'input_shape': [[1, 3, 4, 3]], 'filter_shape': [[2, 3], [3, 3]], 'filter_2_shape': [[2, 1, 1, 3]], 'strides': [[1, 1, 1, 1]], 'dilations': [[1, 1, 1, 1]], 'data_format': ['NCHW'], 'channel_multiplier': [1, 2], 'fully_quantize': [False], 'dynamic_range_quantize': [False]}]\n\n        def get_tensor_shapes(parameters):\n            input_shape = parameters['input_shape']\n            filter_size = parameters['filter_shape']\n            filter_shape = filter_size + [input_shape[3], parameters['channel_multiplier']]\n            return [input_shape, filter_shape]\n\n        @tf.function(jit_compile=True)\n        def add_conv(input_tensor, filter_input, parameters):\n            out = tf.nn.conv2d(input=input_tensor, filters=filter_input, strides=parameters['strides'], dilations=parameters['dilations'], padding='VALID', data_format=parameters['data_format'])\n            return out\n\n        def add_bias_add(data_input, filter_shape):\n            bias_input = create_tensor_data(np.float32, (filter_shape[-1],))\n            out = tf.nn.bias_add(data_input, bias_input, data_format='NHWC')\n            return out\n\n        def build_graph(parameters):\n            \n            input_shape, filter_shape = get_tensor_shapes(parameters)\n            input_tensor = tf.compat.v1.placeholder(dtype=tf.float32, name='input', shape=input_shape)\n            filter_input = create_tensor_data(np.float32, filter_shape, min_value=-10, max_value=10)\n            input_tensors = [input_tensor]\n            if parameters['data_format'] == 'NCHW':\n                out = add_conv(input_tensor, filter_input, parameters)\n            else:\n                out = tf.nn.conv2d(input=input_tensor, filters=filter_input, strides=parameters['strides'], dilations=parameters['dilations'], padding='VALID', data_format=parameters['data_format'])\n            out = add_bias_add(out, filter_shape)\n            out = activation_op(out)\n            filter_input_2 = create_tensor_data(np.float32, parameters['filter_2_shape'], min_value=-10, max_value=10)\n            if parameters['data_format'] == 'NCHW':\n                out = add_conv(out, filter_input_2, parameters)\n            else:\n                out = tf.nn.conv2d(input=out, filters=filter_input_2, strides=parameters['strides'], dilations=parameters['dilations'], padding='VALID', data_format=parameters['data_format'])\n            out = add_bias_add(out, filter_shape)\n            out = activation_op(out)\n            return (input_tensors, [out])\n\n        def build_inputs(parameters, sess, inputs, outputs):\n            \n            input_shape, _ = get_tensor_shapes(parameters)\n            values = [create_tensor_data(np.float32, input_shape, min_value=-1, max_value=1)]\n            return (values, sess.run(outputs, feed_dict=dict(zip(inputs, values))))\n        make_zip_of_tests(options, test_parameters, build_graph, build_inputs, expected_tf_failures=2)\n    return create_test", "docstring": "Make a set of tests to do convolution with activation and bias.\n\nThis test will create multiple consecutive convolutions with NCHW layout to\nmake sure that the tranformations to NHWC works as expected. Note this\ndoesn't check any performance so manual checking of the generated model is\nadvised.\n\nArgs:\nactivation_op: The activation op to be used in the test.\n\nReturns:\nThe function that creates the test.", "source": "github-repos"}
{"code": "def __init__(self, dataset_merger, problem_type=transitfeed.TYPE_WARNING,\n               **kwargs):\n    \n    kwargs['type'] = problem_type\n    kwargs['entity_type_name'] = dataset_merger.ENTITY_TYPE_NAME\n    transitfeed.ExceptionWithContext.__init__(self, None, None, **kwargs)\n    self.dataset_merger = dataset_merger", "docstring": "Initialise the exception object.\n\nArgs:\ndataset_merger: The DataSetMerger instance that generated this problem.\nproblem_type: The problem severity. This should be set to one of the\ncorresponding constants in transitfeed.\nkwargs: Keyword arguments to be saved as instance attributes.", "source": "juraj-google-style"}
{"code": "def load(self, validate=True):\n        \n        self._load()\n        try:\n            self.config = self._load_config(self.system_config_file)\n            user = self._load_config(self.global_config_file)\n            config = self._load_config(self.config_file)\n            local = self._load_config(self.config_local_file)\n\n            \n            \n            for conf in [user, config, local]:\n                self.config = self._merge(self.config, conf)\n\n            if validate:\n                self.config = Schema(self.SCHEMA).validate(self.config)\n\n            \n            self.config = configobj.ConfigObj(\n                self.config, write_empty_values=True\n            )\n            self.config.filename = self.config_file\n            self._resolve_paths(self.config, self.config_file)\n        except Exception as ex:\n            raise ConfigError(ex)", "docstring": "Loads config from all the config files.\n\nArgs:\nvalidate (bool): optional flag to tell dvc if it should validate\nthe config or just load it as is. 'True' by default.\n\n\nRaises:\ndvc.config.ConfigError: thrown if config has invalid format.", "source": "juraj-google-style"}
{"code": "def _create_w_objective(m, X, Z=None):\n    \n    genes, clusters = m.shape\n    cells = X.shape[1]\n    nonzeros = (X!=0)\n    def objective(w):\n        \n        \n        w = w.reshape((m.shape[1], X.shape[1]))\n        d = m.dot(w)+eps\n        \n        \n        \n        temp = X/d\n        m_sum = m.T.dot(nonzeros)\n        m2 = m.T.dot(temp)\n        deriv = m_sum - m2\n        return np.sum(nonzeros*(d - X*np.log(d)))/genes, deriv.flatten()/genes\n    return objective", "docstring": "Creates an objective function and its derivative for W, given M and X (data)\n\nArgs:\nm (array): genes x clusters\nX (array): genes x cells\nZ (array): zero-inflation parameters - genes x 1", "source": "juraj-google-style"}
{"code": "def binary_arguments_to_tensors(x1, x2):\n    if ((not isinstance(x1, Tensor)) and (not isinstance(x2, Tensor))):\n        raise ValueError('at least one of x1 and x2 must be an mtf Tensor')\n    elif (isinstance(x1, Tensor) and isinstance(x2, Tensor)):\n        return (x1, x2)\n    elif isinstance(x1, Tensor):\n        return (x1, import_tf_tensor(x1.mesh, tf.convert_to_tensor(x2, dtype=x1.dtype), Shape([])))\n    else:\n        return (import_tf_tensor(x2.mesh, tf.convert_to_tensor(x1, dtype=x2.dtype), Shape([])), x2)", "docstring": "Convert argument of a binary operation to Tensors.\n\nArgs:\nx1: a Tensor or something convertible to a tf Scalar\nx2: a Tensor or something convertible to a tf Scalar\n\nReturns:\nnew_x1: a Tensor\nnew_x2: a Tensor\n\nRaises:\nValueError: on failure", "source": "codesearchnet"}
{"code": "def AddProcessingOptions(self, argument_group):\n    argument_helper_names = ['temporary_directory', 'zeromq']\n    if self._CanEnforceProcessMemoryLimit():\n        argument_helper_names.append('process_resources')\n    helpers_manager.ArgumentHelperManager.AddCommandLineArguments(argument_group, names=argument_helper_names)\n    argument_group.add_argument('--worker-memory-limit', '--worker_memory_limit', dest='worker_memory_limit', action='store', type=int, metavar='SIZE', help='Maximum amount of memory (data segment and shared memory) a worker process is allowed to consume in bytes, where 0 represents no limit. The default limit is 2147483648 (2 GiB). If a worker process exceeds this limit is is killed by the main (foreman) process.')", "docstring": "Adds processing options to the argument group\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "codesearchnet"}
{"code": "def IsErrorSuppressedByNolint(category, linenum):\n    return ((linenum in _error_suppressions.get(category, set())) or (linenum in _error_suppressions.get(None, set())))", "docstring": "Returns true if the specified error category is suppressed on this line.\n\nConsults the global error_suppressions map populated by\nParseNolintSuppressions/ResetNolintSuppressions.\n\nArgs:\ncategory: str, the category of the error.\nlinenum: int, the current line number.\nReturns:\nbool, True iff the error should be suppressed due to a NOLINT comment.", "source": "codesearchnet"}
{"code": "def _group(self, group_data):\n        \n        if isinstance(group_data, dict):\n            \n            xid = group_data.get('xid')\n        else:\n            \n            xid = group_data.xid\n\n        if self.groups.get(xid) is not None:\n            \n            group_data = self.groups.get(xid)\n        elif self.groups_shelf.get(xid) is not None:\n            \n            group_data = self.groups_shelf.get(xid)\n        else:\n            \n            self.groups[xid] = group_data\n        return group_data", "docstring": "Return previously stored group or new group.\n\nArgs:\ngroup_data (dict|obj): An Group dict or instance of Group object.\n\nReturns:\ndict|obj: The new Group dict/object or the previously stored dict/object.", "source": "juraj-google-style"}
{"code": "def publish(self, event_type: str, event_data: dict=None):\n    import inspect\n    import os.path\n    _stack = inspect.stack()\n    _origin = (((os.path.basename(_stack[3][1]) + '::') + _stack[3][3]) + '::L{}'.format(_stack[3][2]))\n    publish(event_type=event_type, event_data=event_data, object_type=self._type, object_id=self._id, object_key=self._key, origin=_origin)", "docstring": "Publish an event associated with the scheduling object.\n\nNote:\nIdeally publish should not be used directly but by other methods\nwhich perform actions on the object.\n\nArgs:\nevent_type (str): Type of event.\nevent_data (dict, optional): Event data.", "source": "codesearchnet"}
{"code": "def SerializeUnsigned(self, writer):\n        \n        writer.WriteUInt32(self.Version)\n        writer.WriteUInt256(self.PrevHash)\n        writer.WriteUInt256(self.MerkleRoot)\n        writer.WriteUInt32(self.Timestamp)\n        writer.WriteUInt32(self.Index)\n        writer.WriteUInt64(self.ConsensusData)\n        writer.WriteUInt160(self.NextConsensus)", "docstring": "Serialize unsigned data only.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def __init__(self, url=None):\n        \n        self._url = None\n        self.users_url = None\n        self.groups_url = None\n\n        self.url = url", "docstring": "Construct a SLUGSConnector.\n\nArgs:\nurl (string): The base URL for the remote SLUGS instance. Optional,\ndefaults to None. Required for authentication.", "source": "juraj-google-style"}
{"code": "def Detect(self, baseline, host_data):\n    \n    result = CheckResult()\n    for detector in self.detectors:\n      finding = detector(baseline, host_data)\n      if finding:\n        result.ExtendAnomalies([finding])\n    if result:\n      return result", "docstring": "Run host_data through detectors and return them if a detector triggers.\n\nArgs:\nbaseline: The base set of rdf values used to evaluate whether an issue\nexists.\nhost_data: The rdf values passed back by the filters.\n\nReturns:\nA CheckResult message containing anomalies if any detectors identified an\nissue, None otherwise.", "source": "juraj-google-style"}
{"code": "def read_full(fileobj, size):\n    if (size < 0):\n        raise ValueError('size must not be negative')\n    data = fileobj.read(size)\n    if (len(data) != size):\n        raise IOError\n    return data", "docstring": "Like fileobj.read but raises IOError if not all requested data is\nreturned.\n\nIf you want to distinguish IOError and the EOS case, better handle\nthe error yourself instead of using this.\n\nArgs:\nfileobj (fileobj)\nsize (int): amount of bytes to read\nRaises:\nIOError: In case read fails or not enough data is read", "source": "codesearchnet"}
{"code": "def get_variant_slice(self, package_name, range_):\n    variant_list = self.variant_lists.get(package_name)\n    if (variant_list is None):\n        variant_list = _PackageVariantList(package_name, self.solver)\n        self.variant_lists[package_name] = variant_list\n    entries = variant_list.get_intersection(range_)\n    if (not entries):\n        return None\n    slice_ = _PackageVariantSlice(package_name, entries=entries, solver=self.solver)\n    return slice_", "docstring": "Get a list of variants from the cache.\n\nArgs:\npackage_name (str): Name of package.\nrange_ (`VersionRange`): Package version range.\n\nReturns:\n`_PackageVariantSlice` object.", "source": "codesearchnet"}
{"code": "def dispatch_command(self, prefix, argv, screen_info=None):\n    if not prefix:\n        raise ValueError('Prefix is empty')\n    resolved_prefix = self._resolve_prefix(prefix)\n    if not resolved_prefix:\n        raise ValueError('No handler is registered for command prefix \"%s\"' % prefix)\n    handler = self._handlers[resolved_prefix]\n    try:\n        output = handler(argv, screen_info=screen_info)\n    except CommandLineExit as e:\n        raise e\n    except SystemExit as e:\n        lines = ['Syntax error for command: %s' % prefix, 'For help, do \"help %s\"' % prefix]\n        output = RichTextLines(lines)\n    except BaseException as e:\n        lines = ['Error occurred during handling of command: %s %s:' % (resolved_prefix, ' '.join(argv)), '%s: %s' % (type(e), str(e))]\n        lines.append('')\n        lines.extend(traceback.format_exc().split('\\n'))\n        output = RichTextLines(lines)\n    if not isinstance(output, RichTextLines) and output is not None:\n        raise ValueError('Return value from command handler %s is not None or a RichTextLines instance' % str(handler))\n    return output", "docstring": "Handles a command by dispatching it to a registered command handler.\n\nArgs:\nprefix: Command prefix, as a str, e.g., \"print\".\nargv: Command argument vector, excluding the command prefix, represented\nas a list of str, e.g.,\n[\"tensor_1\"]\nscreen_info: A dictionary containing screen info, e.g., {\"cols\": 100}.\n\nReturns:\nAn instance of RichTextLines or None. If any exception is caught during\nthe invocation of the command handler, the RichTextLines will wrap the\nerror type and message.\n\nRaises:\nValueError: If\n1) prefix is empty, or\n2) no command handler is registered for the command prefix, or\n3) the handler is found for the prefix, but it fails to return a\nRichTextLines or raise any exception.\nCommandLineExit:\nIf the command handler raises this type of exception, this method will\nsimply pass it along.", "source": "github-repos"}
{"code": "def convert_to_tensor_or_ragged_tensor(value, dtype=None, preferred_dtype=None, name=None):\n    if isinstance(value, RaggedTensor):\n        if dtype and (not dtype.is_compatible_with(value.dtype)):\n            raise ValueError(f'Tensor conversion requested dtype {dtype.name} for RaggedTensor with dtype {value.dtype.name}: {value}.')\n        return value\n    elif isinstance(value, ragged_tensor_value.RaggedTensorValue):\n        with ops.name_scope(name, 'ConvertToTensorOrRaggedTensor', []):\n            flat_values = ops.convert_to_tensor(value=value.flat_values, dtype=dtype, dtype_hint=preferred_dtype, name='flat_values')\n            return RaggedTensor.from_nested_row_splits(flat_values, value.nested_row_splits, validate=False)\n    else:\n        return tensor_conversion.convert_to_tensor_v2_with_dispatch(value=value, dtype=dtype, dtype_hint=preferred_dtype, name=name)", "docstring": "Converts value to a `RaggedTensor` or `Tensor`.\n\n* If `value` is a `RaggedTensor`, then return it as-is.\n* If `value` is a `RaggedTensorValue`, return a corresponding constant\n`RaggedTensor`.\n* Otherwise, use `convert_to_tensor` to convert `value` to a `Tensor`.\n\nArgs:\nvalue: A `RaggedTensor`, a `RaggedTensorValue`, or an object whose type has\na registered `Tensor` conversion function.\ndtype: Optional element type for the returned tensor.  If missing the type\nis inferred from the type of `value`.\npreferred_dtype: Optional element type for the returned tensor, used when\ndtype is None.  This argument has no effect if `value` is already a\ntensor, or when conversion is not possible.\nname: Optional name to use if a new `Tensor` is created.\n\nReturns:\nA `Tensor` or `RaggedTensor`.", "source": "github-repos"}
{"code": "def _slice_params_to_dict(dist, params_event_ndims, slices):\n  \n  override_dict = {}\n  for param_name, param_event_ndims in six.iteritems(params_event_ndims):\n    \n    if param_name not in dist.parameters:\n      raise ValueError('Distribution {} is missing advertised '\n                       'parameter {}'.format(dist, param_name))\n    param = dist.parameters[param_name]\n    if param is None:\n      \n      \n      continue\n    dtype = None\n    if hasattr(dist, param_name):\n      attr = getattr(dist, param_name)\n      dtype = getattr(attr, 'dtype', None)\n    if dtype is None:\n      dtype = dist.dtype\n      warnings.warn('Unable to find property getter for parameter Tensor {} '\n                    'on {}, falling back to Distribution.dtype {}'.format(\n                        param_name, dist, dtype))\n    param = tf.convert_to_tensor(value=param, dtype=dtype)\n    override_dict[param_name] = _slice_single_param(param, param_event_ndims,\n                                                    slices,\n                                                    dist.batch_shape_tensor())\n  return override_dict", "docstring": "Computes the override dictionary of sliced parameters.\n\nArgs:\ndist: The tfd.Distribution being batch-sliced.\nparams_event_ndims: Per-event parameter ranks, a `str->int` `dict`.\nslices: Slices as received by __getitem__.\n\nReturns:\noverrides: `str->Tensor` `dict` of batch-sliced parameter overrides.", "source": "juraj-google-style"}
{"code": "def _contains_internal_dynamic_call(contract):\n    for func in contract.all_functions_called:\n        for node in func.nodes:\n            for ir in node.irs:\n                if isinstance(ir, InternalDynamicCall):\n                    return True\n    return False", "docstring": "Checks if a contract contains a dynamic call either in a direct definition, or through inheritance.\n\nReturns:\n(boolean): True if this contract contains a dynamic call (including through inheritance).", "source": "codesearchnet"}
{"code": "def __init__(self, **sections):\n        \n        self._sections = []\n        for sct_name, sct_meta in sections.items():\n            if _is_valid(sct_name):\n                setattr(self, sct_name, Section(**sct_meta.def_))\n                self._sections.append(sct_name)\n            else:\n                raise error.SectionError(sct_name)\n        self._parser = None\n        self._nosub_valid = False\n        self._subcmds = {}\n        self._config_files = ()", "docstring": "Initialization of instances.\n\nArgs:\nsections (:class:`~loam.manager.Section`): section metadata. The\nname of each *section* is the name of the keyword argument\npassed on to this function. Section names should be valid\nidentifiers, otherwise a :class:`~loam.error.SectionError` is\nraised.", "source": "juraj-google-style"}
{"code": "def build_avatar_url(jid):\n    digest = md5(str(jid).encode('utf-8')).hexdigest()\n    return 'http:", "docstring": "Static method to build a gravatar url with the agent's JID\n\nArgs:\njid (aioxmpp.JID): an XMPP identifier\n\nReturns:\nstr: an URL for the gravatar", "source": "codesearchnet"}
{"code": "def _insert(self, new_item, feed_item):\n    filename = feed_item.get(FieldMap.CREATIVE_ASSET_FILE_NAME, None)\n    file_buffer = object_get(self.config, self.auth, '%s:%s' % (feed_item.get(FieldMap.CREATIVE_ASSET_BUCKET_NAME, None), filename))\n    file_mime = mimetypes.guess_type(filename, strict=False)[0]\n    media = MediaIoBaseUpload(BytesIO(file_buffer), mimetype=file_mime, chunksize=CHUNKSIZE, resumable=True)\n    result = self._api().insert(profileId=self.profile_id, advertiserId=str(feed_item.get(FieldMap.ADVERTISER_ID, None)), media_body=media, body=new_item).execute()\n    return result", "docstring": "Handles the upload of creative assets to DCM and the creation of the associated entity.\n\nThis method makes a call to the DCM API to create a new entity.\n\nArgs:\nnew_item: The item to insert into DCM.\nfeed_item: The feed item representing the creative asset from the\nBulkdozer feed.\n\nReturns:\nThe newly created item in DCM.", "source": "github-repos"}
{"code": "def get_query_parameters(args, cell_body, date_time=datetime.datetime.now()):\n    env = google.datalab.utils.commands.notebook_environment()\n    config = google.datalab.utils.commands.parse_config(cell_body, env=env, as_dict=False)\n    sql = args['query']\n    if (sql is None):\n        raise Exception('Cannot extract query parameters in non-query cell')\n    if config:\n        jsonschema.validate(config, BigQuerySchema.QUERY_PARAMS_SCHEMA)\n    config = (config or {})\n    config_parameters = config.get('parameters', [])\n    return bigquery.Query.get_query_parameters(config_parameters, date_time=date_time)", "docstring": "Extract query parameters from cell body if provided\nAlso validates the cell body schema using jsonschema to catch errors before sending the http\nrequest. This validation isn't complete, however; it does not validate recursive schemas,\nbut it acts as a good filter against most simple schemas\n\nArgs:\nargs: arguments passed to the magic cell\ncell_body: body of the magic cell\ndate_time: The timestamp at which the date-time related parameters need to be resolved.\n\nReturns:\nValidated object containing query parameters", "source": "codesearchnet"}
{"code": "def get_ogr_driver(filepath):\n    \n    filename, file_extension = os.path.splitext(filepath)\n    EXTENSION = file_extension[1:]\n\n    ogr_driver_count = ogr.GetDriverCount()\n    for idx in range(ogr_driver_count):\n        driver = ogr.GetDriver(idx)\n        driver_extension = driver.GetMetadataItem(str('DMD_EXTENSION')) or ''\n        driver_extensions = driver.GetMetadataItem(str('DMD_EXTENSIONS')) or ''\n\n        if EXTENSION == driver_extension or EXTENSION in driver_extensions:\n            return driver\n\n    else:\n        msg = 'No driver found for the following file extension: {}'.format(\n            EXTENSION)\n        raise ValueError(msg)", "docstring": "Get the OGR driver from the provided file extension.\n\nArgs:\nfile_extension (str): file extension\n\nReturns:\nosgeo.ogr.Driver\n\nRaises:\nValueError: no driver is found", "source": "juraj-google-style"}
{"code": "class TimeSeriesFeatureEmbedder(nn.Module):\n\n    def __init__(self, cardinalities: List[int], embedding_dims: List[int]) -> None:\n        super().__init__()\n        self.num_features = len(cardinalities)\n        self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)])\n\n    def forward(self, features: torch.Tensor) -> torch.Tensor:\n        if self.num_features > 1:\n            cat_feature_slices = torch.chunk(features, self.num_features, dim=-1)\n        else:\n            cat_feature_slices = [features]\n        return torch.cat([embed(cat_feature_slice.squeeze(-1)) for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices)], dim=-1)", "docstring": "Embed a sequence of categorical features.\n\nArgs:\ncardinalities (`list[int]`):\nList of cardinalities of the categorical features.\nembedding_dims (`list[int]`):\nList of embedding dimensions of the categorical features.", "source": "github-repos"}
{"code": "def put(self, key, value):\n    \n    path = self.object_path(key)\n    self._write_object(path, value)", "docstring": "Stores the object `value` named by `key`.\n\nArgs:\nkey: Key naming `value`\nvalue: the object to store.", "source": "juraj-google-style"}
{"code": "def setdefault(pb_or_dict, key, value):\n    if (not get(pb_or_dict, key, default=None)):\n        set(pb_or_dict, key, value)", "docstring": "Set the key on the object to the value if the current value is falsy.\n\nBecause protobuf Messages do not distinguish between unset values and\nfalsy ones particularly well, this method treats any falsy value\n(e.g. 0, empty list) as a target to be overwritten, on both Messages\nand dictionaries.\n\nArgs:\npb_or_dict (Union[~google.protobuf.message.Message, Mapping]): the\nobject.\nkey (str): The key on the object in question.\nvalue (Any): The value to set.\n\nRaises:\nTypeError: If pb_or_dict is not a Message or Mapping.", "source": "codesearchnet"}
{"code": "def _expansion_request_url_for_value_set_url(value_set_url: str) -> Tuple[str, str]:\n    value_set_domain = urllib.parse.urlparse(value_set_url).netloc\n    root_url = TERMINOLOGY_BASE_URL_PER_DOMAIN.get(value_set_domain)\n    if root_url is None:\n        raise ValueError('Unknown domain %s. Can not find appropriate terminology server.' % value_set_domain)\n    return (root_url, urllib.parse.urljoin(root_url, 'ValueSet/$expand'))", "docstring": "Builds a URL for querying a terminology service to expand `value_set_url`.\n\nArgs:\nvalue_set_url: The URL being expanded.\n\nRaises:\nValueError: If a terminology service can not be found for `value_set_url`.\n\nReturns:\nA tuple of (root_url, expansion_url) where root_url is the root URL of the\nterminology service and expansion_url is the URL to use when performing\nvalue set expansion against that terminology service.", "source": "github-repos"}
{"code": "def translate_index(index_name):\n    \n    uuid = SEARCH_INDEX_UUIDS.get(index_name.strip().lower())\n    if not uuid:\n        try:\n            index_info = globus_sdk.SearchClient().get_index(index_name).data\n            if not isinstance(index_info, dict):\n                raise ValueError(\"Multiple UUIDs possible\")\n            uuid = index_info.get(\"id\", index_name)\n        except Exception:\n            uuid = index_name\n    return uuid", "docstring": "Translate a known Globus Search index into the index UUID.\nThe UUID is the proper way to access indices, and will eventually be the only way.\nThis method will return names it cannot disambiguate.\n\nArguments:\nindex_name (str): The name of the index.\n\nReturns:\nstr: The UUID of the index. If the index is not known and is not unambiguous,\nthis will be the ``index_name`` unchanged instead.", "source": "juraj-google-style"}
{"code": "def timestampFormat(self, timestampFormat):\n    if (not isinstance(timestampFormat, str)):\n        raise TypeError('not of type unicode')\n    self._timestampFormat = timestampFormat", "docstring": "Setter to _timestampFormat. Formatting string for conversion of timestamps to QtCore.QDateTime\n\nRaises:\nAssertionError: if timestampFormat is not of type unicode.\n\nArgs:\ntimestampFormat (unicode): assign timestampFormat to _timestampFormat.\nFormatting string for conversion of timestamps to QtCore.QDateTime. Used in data method.", "source": "codesearchnet"}
{"code": "def send_state_event(self, room_id, event_type, content, state_key=\"\",\n                         timestamp=None):\n        \n        path = \"/rooms/%s/state/%s\" % (\n            quote(room_id), quote(event_type),\n        )\n        if state_key:\n            path += \"/%s\" % (quote(state_key))\n        params = {}\n        if timestamp:\n            params[\"ts\"] = timestamp\n        return self._send(\"PUT\", path, content, query_params=params)", "docstring": "Perform PUT /rooms/$room_id/state/$event_type\n\nArgs:\nroom_id(str): The room ID to send the state event in.\nevent_type(str): The state event type to send.\ncontent(dict): The JSON content to send.\nstate_key(str): Optional. The state key for the event.\ntimestamp (int): Set origin_server_ts (For application services only)", "source": "juraj-google-style"}
{"code": "def subprogram_signature(vo, fullname=None):\n  \n\n  if fullname is None:\n    fullname = vo.name\n\n  if isinstance(vo, VhdlFunction):\n    plist = ','.join(p.data_type for p in vo.parameters)\n    sig = '{}[{} return {}]'.format(fullname, plist, vo.return_type)\n  else: \n    plist = ','.join(p.data_type for p in vo.parameters)\n    sig = '{}[{}]'.format(fullname, plist)\n\n  return sig", "docstring": "Generate a signature string\n\nArgs:\nvo (VhdlFunction, VhdlProcedure): Subprogram object\nReturns:\nSignature string.", "source": "juraj-google-style"}
{"code": "def pprint(sequence, keys=None):\n    if (len(sequence) > 0):\n        columns = calculate_columns(sequence)\n        row_format = calculate_row_format(columns, keys)\n        header = (row_format % dict([(key, key.title()) for key in columns]))\n        separator = (row_format % dict([(key, ('-' * columns[key])) for key in columns]))\n        print(separator)\n        print(header)\n        print(separator)\n        for row in sequence:\n            print((row_format % row))\n        print(separator)", "docstring": "Print sequence as ascii table to stdout.\n\nArgs:\nsequence (list or tuple): a sequence with a dictionary each entry.\nkeys (list): optional list of keys to order columns as well as to filter for them.", "source": "codesearchnet"}
{"code": "def combine_graph_defs(to_proto, from_proto):\n    if (from_proto.version != to_proto.version):\n        raise ValueError('Cannot combine GraphDefs of different versions.')\n    try:\n        _safe_copy_proto_list_values(to_proto.node, from_proto.node, (lambda n: n.name))\n    except _ProtoListDuplicateKeyError as exc:\n        raise ValueError(('A GraphDef contains non-unique node names: %s' % exc))\n    except _SameKeyDiffContentError as exc:\n        raise ValueError(('Cannot combine GraphDefs because nodes share a name but contents are different: %s' % exc))\n    try:\n        _safe_copy_proto_list_values(to_proto.library.function, from_proto.library.function, (lambda n: n.signature.name))\n    except _ProtoListDuplicateKeyError as exc:\n        raise ValueError(('A GraphDef contains non-unique function names: %s' % exc))\n    except _SameKeyDiffContentError as exc:\n        raise ValueError(('Cannot combine GraphDefs because functions share a name but are different: %s' % exc))\n    try:\n        _safe_copy_proto_list_values(to_proto.library.gradient, from_proto.library.gradient, (lambda g: g.gradient_func))\n    except _ProtoListDuplicateKeyError as exc:\n        raise ValueError(('A GraphDef contains non-unique gradient function names: %s' % exc))\n    except _SameKeyDiffContentError as exc:\n        raise ValueError(('Cannot combine GraphDefs because gradients share a gradient_func name but map to different functions: %s' % exc))\n    return to_proto", "docstring": "Combines two GraphDefs by adding nodes from from_proto into to_proto.\n\nAll GraphDefs are expected to be of TensorBoard's.\nIt assumes node names are unique across GraphDefs if contents differ. The\nnames can be the same if the NodeDef content are exactly the same.\n\nArgs:\nto_proto: A destination TensorBoard GraphDef.\nfrom_proto: A TensorBoard GraphDef to copy contents from.\n\nReturns:\nto_proto\n\nRaises:\nValueError in case any assumption about GraphDef is violated: A\nGraphDef should have unique node, function, and gradient function\nnames. Also, when merging GraphDefs, they should have not have nodes,\nfunctions, or gradient function mappings that share the name but details\ndo not match.", "source": "codesearchnet"}
{"code": "def get_sequence_dense_tensor(self, transformation_cache, state_manager):\n    sp_tensor = transformation_cache.get(self, state_manager)\n    dense_tensor = sparse_ops.sparse_tensor_to_dense(sp_tensor, default_value=self.default_value)\n    dense_shape = array_ops.concat([array_ops.shape(dense_tensor)[:1], [-1], self.variable_shape], axis=0)\n    dense_tensor = array_ops.reshape(dense_tensor, shape=dense_shape)\n    if sp_tensor.shape.ndims == 2:\n        num_elements = self.variable_shape.num_elements()\n    else:\n        num_elements = 1\n    seq_length = fc_utils.sequence_length_from_sparse_tensor(sp_tensor, num_elements=num_elements)\n    return fc.SequenceDenseColumn.TensorSequenceLengthPair(dense_tensor=dense_tensor, sequence_length=seq_length)", "docstring": "Returns a `TensorSequenceLengthPair`.\n\nArgs:\ntransformation_cache: A `FeatureTransformationCache` object to access\nfeatures.\nstate_manager: A `StateManager` to create / access resources such as\nlookup tables.", "source": "github-repos"}
{"code": "def is_debug_node(node_name):\n    return node_name.startswith('__dbg_')", "docstring": "Determine whether a node name is that of a debug node.\n\nSuch nodes are inserted by TensorFlow core upon request in\nRunOptions.debug_options.debug_tensor_watch_opts.\n\nArgs:\nnode_name: Name of the node.\n\nReturns:\nA bool indicating whether the input argument is the name of a debug node.", "source": "github-repos"}
{"code": "def put(self, json=None):\n    return self._call('put', url=self.endpoint, json=json)", "docstring": "Send a PUT request and return the JSON decoded result.\n\nArgs:\njson (dict, optional): Object to encode and send in request.\n\nReturns:\nmixed: JSON decoded response data.", "source": "codesearchnet"}
{"code": "def setAvatar(self, image):\n        \n        self.conn(\"PUT\", \"{0}/users/{1}/profile/avatar\".format(SkypeConnection.API_USER, self.userId),\n                  auth=SkypeConnection.Auth.SkypeToken, data=image.read())", "docstring": "Update the profile picture for the current user.\n\nArgs:\nimage (file): a file-like object to read the image from", "source": "juraj-google-style"}
{"code": "def __init__(self, **kwargs):\n        \n        self.address = self.name = address.new(kwargs['name'])\n        \n        self.subgraph = None\n        self.params = {}\n        log.debug('New target: %s', self.address)\n\n        try:\n            for param_name, param_type in self.required_params:\n                self.params[param_name] = kwargs.pop(param_name)\n                assert isinstance(self.params[param_name], param_type)\n        except AssertionError as err:\n            if isinstance(param_type, tuple) and len(param_type) > 1:\n                msg = 'one of: %s' % ', '.join(param_type.__name__)\n            else:\n                msg = str(param_type.__name__)\n            raise error.InvalidRule(\n                'While loading %s: Invalid type for %s. '\n                'Expected: %s. Actual: %s.' % (\n                    self.address, param_name, msg,\n                    repr(self.params[param_name])))\n        except KeyError as err:\n            log.error(err)\n            raise error.InvalidRule(\n                'While loading %s: Required parameter %s not given.' % repr(\n                    self.address, param_name))\n\n        for (param_name, param_type, param_default) in self.optional_params:\n            if param_name not in kwargs:\n                self.params[param_name] = param_default\n            else:\n                self.params[param_name] = kwargs.pop(param_name)\n                if not isinstance(self.params[param_name], param_type):\n                    msg = str(param_type.__name__)\n                    if isinstance(param_type, tuple) and len(param_type) > 1:\n                        msg = 'one of: %s' % ', '.join(param_type.__name__)\n                    raise error.InvalidRule(\n                        'While loading %s: Invalid type for %s. '\n                        'Expected: %s. Actual: %s.' % (\n                            self.address, param_name, msg,\n                            repr(self.params[param_name])))\n\n        if kwargs:  \n            raise error.InvalidRule(\n                '[%s]: Unknown argument(s): %s' % (\n                    self.address, ', '.join(kwargs.keys())))\n\n        if self.graphcontext is not None:\n            self.graphcontext.add_node(self.address, target_obj=self)\n            \n\n        try:\n            self.validate_args()\n        except AssertionError as err:\n            raise error.InvalidRule('Error in %s: %s' % (self.address, err))", "docstring": "Initialize the build rule.\n\nArgs:\n**kwargs: Assorted parameters; see subclass implementations for\ndetails.", "source": "juraj-google-style"}
{"code": "def serialize_array(array, domain=(0, 1), fmt='png', quality=70):\n  \n  normalized = _normalize_array(array, domain=domain)\n  return _serialize_normalized_array(normalized, fmt=fmt, quality=quality)", "docstring": "Given an arbitrary rank-3 NumPy array,\nreturns the byte representation of the encoded image.\n\nArgs:\narray: NumPy array of dtype uint8 and range 0 to 255\ndomain: expected range of values in array, see `_normalize_array()`\nfmt: string describing desired file format, defaults to 'png'\nquality: specifies compression quality from 0 to 100 for lossy formats\n\nReturns:\nimage data as BytesIO buffer", "source": "juraj-google-style"}
{"code": "def list_alias():\n    alias_table = get_alias_table()\n    output = []\n    for alias in alias_table.sections():\n        if alias_table.has_option(alias, 'command'):\n            output.append({'alias': alias, 'command': ' '.join(alias_table.get(alias, 'command').split())})\n    return output", "docstring": "List all registered aliases.\n\nReturns:\nAn array of  dictionary containing the alias and the command that it points to.", "source": "codesearchnet"}
{"code": "def init(self, address, hard_reset=False):\n    self.address = address\n    if hard_reset:\n        pass\n    for i in range(Dongle.PORT_RETRIES):\n        try:\n            logger.debug('Setting up BGAPI, attempt {}/{}'.format((i + 1), Dongle.PORT_RETRIES))\n            self.api = BlueGigaAPI(port=self.address, callbacks=self, baud=Dongle.BAUDRATE, timeout=DEF_TIMEOUT)\n            self.api.start_daemon()\n            break\n        except serial.serialutil.SerialException as e:\n            logger.debug('Failed to init BlueGigaAPI: {}, attempt {}/{}'.format(e, (i + 1), Dongle.PORT_RETRIES))\n            time.sleep(0.1)\n    if (self.api is None):\n        return False\n    time.sleep(0.5)\n    self.get_supported_connections()\n    logger.info('Dongle supports {} connections'.format(self.supported_connections))\n    if (self.supported_connections == (- 1)):\n        logger.error('Failed to retrieve number of supported connections from the dongle! (try reinserting it)')\n        return False\n    self.conn_state = {x: self._STATE_IDLE for x in range(self.supported_connections)}\n    self.reset()\n    self._cbthread = threading.Thread(target=self._cbthreadfunc)\n    self._cbthread.setDaemon(True)\n    self._cbthread_q = Queue()\n    self._cbthread.start()\n    return True", "docstring": "Open the serial connection to a dongle at the supplied address.\n\nArgs:\naddress (str): the serial port address of the BLED112 dongle, e.g. 'COM5'\nhard_reset (bool): not currently used\n\nReturns:\nTrue if a connection with the dongle was established, False otherwise.", "source": "codesearchnet"}
{"code": "def serialize(metric):\n    return serialization_lib.serialize_keras_object(metric)", "docstring": "Serializes metric function or `Metric` instance.\n\nArgs:\nmetric: A Keras `Metric` instance or a metric function.\n\nReturns:\nMetric configuration dictionary.", "source": "github-repos"}
{"code": "def max_range(ranges, combined=True):\n    try:\n        with warnings.catch_warnings():\n            warnings.filterwarnings('ignore', 'All-NaN (slice|axis) encountered')\n            values = [tuple(((np.NaN if (v is None) else v) for v in r)) for r in ranges]\n            if (pd and any(((isinstance(v, datetime_types) and (not isinstance(v, cftime_types))) for r in values for v in r))):\n                converted = []\n                for (l, h) in values:\n                    if (isinstance(l, datetime_types) and isinstance(h, datetime_types)):\n                        (l, h) = (pd.Timestamp(l).to_datetime64(), pd.Timestamp(h).to_datetime64())\n                    converted.append((l, h))\n                values = converted\n            arr = np.array(values)\n            if (not len(arr)):\n                return (np.NaN, np.NaN)\n            elif (arr.dtype.kind in 'OSU'):\n                arr = list(python2sort([v for r in values for v in r if ((not is_nan(v)) and (v is not None))]))\n                return (arr[0], arr[(- 1)])\n            elif (arr.dtype.kind in 'M'):\n                return ((arr.min(), arr.max()) if combined else (arr[(:, 0)].min(), arr[(:, 1)].min()))\n            if combined:\n                return (np.nanmin(arr), np.nanmax(arr))\n            else:\n                return (np.nanmin(arr[(:, 0)]), np.nanmax(arr[(:, 1)]))\n    except:\n        return (np.NaN, np.NaN)", "docstring": "Computes the maximal lower and upper bounds from a list bounds.\n\nArgs:\nranges (list of tuples): A list of range tuples\ncombined (boolean, optional): Whether to combine bounds\nWhether range should be computed on lower and upper bound\nindependently or both at once\n\nReturns:\nThe maximum range as a single tuple", "source": "codesearchnet"}
{"code": "def angle_to_distance(angle, units='metric'):\n    distance = (math.radians(angle) * BODY_RADIUS)\n    if (units in ('km', 'metric')):\n        return distance\n    elif (units in ('sm', 'imperial', 'US customary')):\n        return (distance / STATUTE_MILE)\n    elif (units in ('nm', 'nautical')):\n        return (distance / NAUTICAL_MILE)\n    else:\n        raise ValueError(('Unknown units type %r' % units))", "docstring": "Convert angle in to distance along a great circle.\n\nArgs:\nangle (float): Angle in degrees to convert to distance\nunits (str): Unit type to be used for distances\n\nReturns:\nfloat: Distance in ``units``\n\nRaises:\nValueError: Unknown value for ``units``", "source": "codesearchnet"}
{"code": "def convert_data_to_dtype(data, data_type, mot_float_type='float'):\n    scalar_dtype = ctype_to_dtype(data_type, mot_float_type)\n    if isinstance(data, numbers.Number):\n        data = scalar_dtype(data)\n    if is_vector_ctype(data_type):\n        shape = data.shape\n        dtype = ctype_to_dtype(data_type, mot_float_type)\n        ve = np.zeros(shape[:(- 1)], dtype=dtype)\n        if (len(shape) == 1):\n            for vector_ind in range(shape[0]):\n                ve[0][vector_ind] = data[vector_ind]\n        elif (len(shape) == 2):\n            for i in range(data.shape[0]):\n                for vector_ind in range(data.shape[1]):\n                    ve[i][vector_ind] = data[(i, vector_ind)]\n        elif (len(shape) == 3):\n            for i in range(data.shape[0]):\n                for j in range(data.shape[1]):\n                    for vector_ind in range(data.shape[2]):\n                        ve[(i, j)][vector_ind] = data[(i, j, vector_ind)]\n        return np.require(ve, requirements=['C', 'A', 'O'])\n    return np.require(data, scalar_dtype, ['C', 'A', 'O'])", "docstring": "Convert the given input data to the correct numpy type.\n\nArgs:\ndata (ndarray): The value to convert to the correct numpy type\ndata_type (str): the data type we need to convert the data to\nmot_float_type (str): the data type of the current ``mot_float_type``\n\nReturns:\nndarray: the input data but then converted to the desired numpy data type", "source": "codesearchnet"}
{"code": "def to_dict(self):\n    output = asdict(self)\n    output['structure_module'] = self.structure_module.to_dict()\n    return output", "docstring": "Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].\n\nReturns:\n`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,", "source": "github-repos"}
{"code": "def spawn_agent(self, agent_definition, location):\n    self._should_write_to_command_buffer = True\n    self._add_agents(agent_definition)\n    command_to_send = SpawnAgentCommand(location, agent_definition.name, agent_definition.type)\n    self._commands.add_command(command_to_send)", "docstring": "Queues a spawn agent command. It will be applied when `tick` or `step` is called next.\nThe agent won't be able to be used until the next frame.\n\nArgs:\nagent_definition (:obj:`AgentDefinition`): The definition of the agent to spawn.\nlocation (np.ndarray or list): The position to spawn the agent in the world, in XYZ coordinates (in meters).", "source": "codesearchnet"}
{"code": "def fetch_layout(tensor: tensor_lib.Tensor) -> layout_lib.Layout:\n    return _dtensor_device().fetch_layout(tensor)", "docstring": "Fetches the layout of a DTensor.\n\nArgs:\ntensor: The DTensor whose layout is to be fetched.\n\nReturns:\nThe `Layout` of this DTensor.\n\nRaises:\nRuntimeError: When not called eagerly.", "source": "github-repos"}
{"code": "def get_func_and_args_from_str(call_str):\n    open_paren_index = call_str.find('(')\n    close_paren_index = call_str.rfind(')')\n    function_name = call_str[:call_str.find('(')]\n    args = call_str[open_paren_index + 1:close_paren_index].split(',')\n    args = [arg.split('=')[0].strip() for arg in args]\n    args = [arg for arg in args if arg]\n    return (function_name, args)", "docstring": "Parse call string to get function and argument names.\n\nArgs:\ncall_str: Call string must be in the form:\n`tf.foo(arg1=val1, arg2=val2, ...)`.\n\nReturns:\n(function_name, list of arg names) tuple.", "source": "github-repos"}
{"code": "def __init__(\n        self, \n        bar_gram,\n        midi_df_list, \n        batch_size=20, \n        seq_len=10, \n        time_fraction=0.1,\n        conditional_flag=True\n    ):\n        \n        if isinstance(bar_gram, BarGram) is False:\n            raise TypeError()\n\n        self.__bar_gram = bar_gram\n\n        program_list = []\n        self.__midi_df_list = midi_df_list\n        for i in range(len(self.__midi_df_list)):\n            program_list.extend(\n                self.__midi_df_list[i][\"program\"].drop_duplicates().values.tolist()\n            )\n        program_list = list(set(program_list))\n\n        self.__batch_size = batch_size\n        self.__seq_len = seq_len\n        self.__channel = len(program_list)\n        self.__program_list = program_list\n        self.__time_fraction = time_fraction\n        self.__dim = self.__bar_gram.dim\n        self.__conditional_flag = conditional_flag", "docstring": "Init.\n\nArgs:\nbar_gram:           is-a `BarGram`.\nmidi_df_list:      `list` of paths to MIDI data extracted by `MidiController`.\nbatch_size:         Batch size.\nseq_len:            The length of sequneces.\nThe length corresponds to the number of `time` splited by `time_fraction`.\n\ntime_fraction:      Time fraction which means the length of bars.", "source": "juraj-google-style"}
{"code": "def add_transition(self, source: str, dest: str):\n    self._transitions[source].append(dest)", "docstring": "Adds a transition from one state to another.\n\nArgs:\nsource (str): the name of the state from where the transition starts\ndest (str): the name of the state where the transition ends", "source": "codesearchnet"}
{"code": "def make_encoder(activation, num_topics, layer_sizes):\n    encoder_net = tf.keras.Sequential()\n    for num_hidden_units in layer_sizes:\n        encoder_net.add(tf.keras.layers.Dense(num_hidden_units, activation=activation, kernel_initializer=tf.compat.v1.glorot_normal_initializer()))\n    encoder_net.add(tf.keras.layers.Dense(num_topics, activation=tf.nn.softplus, kernel_initializer=tf.compat.v1.glorot_normal_initializer()))\n\n    def encoder(bag_of_words):\n        net = _clip_dirichlet_parameters(encoder_net(bag_of_words))\n        return tfd.Dirichlet(concentration=net, name='topics_posterior')\n    return encoder", "docstring": "Create the encoder function.\n\nArgs:\nactivation: Activation function to use.\nnum_topics: The number of topics.\nlayer_sizes: The number of hidden units per layer in the encoder.\n\nReturns:\nencoder: A `callable` mapping a bag-of-words `Tensor` to a\n`tfd.Distribution` instance over topics.", "source": "codesearchnet"}
{"code": "def __init__(self, element=None):\n        \n        super(TreeMapNode, self).__init__()\n        self._element = element\n        self._nodes = dict()\n        self._parent = None\n\n        self._depth = -1", "docstring": "Constructor.\n\nArgs:\nelement: Object to add into the node.", "source": "juraj-google-style"}
{"code": "def remove_overlap(self, also_remove_contiguous: bool = False) -> None:\n        \n        overlap = True\n        while overlap:\n            overlap = self._remove_overlap_sub(also_remove_contiguous)\n        self._sort()", "docstring": "Merges any overlapping intervals.\n\nArgs:\nalso_remove_contiguous: treat contiguous (as well as overlapping)\nintervals as worthy of merging?", "source": "juraj-google-style"}
{"code": "def convert_dict_to_params(src_dict):\n    \n    return \"&\".join([\n        \"{}={}\".format(key, value)\n        for key, value in src_dict.items()\n    ])", "docstring": "convert dict to params string\n\nArgs:\nsrc_dict (dict): source mapping data structure\n\nReturns:\nstr: string params data\n\nExamples:\n>>> src_dict = {\n\"a\": 1,\n\"b\": 2\n}\n>>> convert_dict_to_params(src_dict)\n>>> \"a=1&b=2\"", "source": "juraj-google-style"}
{"code": "class Permute(Layer):\n\n    def __init__(self, dims, **kwargs):\n        super().__init__(**kwargs)\n        self.dims = tuple(dims)\n        if sorted(dims) != list(range(1, len(dims) + 1)):\n            raise ValueError(f'Invalid permutation argument `dims` for Permute Layer. The set of indices in `dims` must be consecutive and start from 1. Received dims={dims}')\n        self.input_spec = InputSpec(ndim=len(self.dims) + 1)\n\n    def compute_output_shape(self, input_shape):\n        output_shape = [input_shape[0]]\n        for dim in self.dims:\n            output_shape.append(input_shape[dim])\n        return tuple(output_shape)\n\n    def compute_output_spec(self, inputs):\n        output_shape = self.compute_output_shape(inputs.shape)\n        return KerasTensor(shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse)\n\n    def call(self, inputs):\n        return ops.transpose(inputs, axes=(0,) + self.dims)\n\n    def get_config(self):\n        config = {'dims': self.dims}\n        base_config = super().get_config()\n        return {**base_config, **config}", "docstring": "Permutes the dimensions of the input according to a given pattern.\n\nUseful e.g. connecting RNNs and convnets.\n\nArgs:\ndims: Tuple of integers. Permutation pattern does not include the\nbatch dimension. Indexing starts at 1.\nFor instance, `(1, 3, 2)` permutes the second and third dimensions\nof the input.\n\nInput shape:\nArbitrary.\n\nOutput shape:\nSame as the input shape, but with the dimensions re-ordered according\nto the specified pattern.\n\nExample:\n\n>>> x = keras.Input(shape=(10, 64))\n>>> y = keras.layers.Permute((2, 1))(x)\n>>> y.shape\n(None, 64, 10)", "source": "github-repos"}
{"code": "def _assertOpOutputMatchesExpected(self, params, solution, high_level=True, rtol=0.001, atol=1e-05):\n    input = params['input']\n    with self.session() as session:\n        for dtype in self.numeric_types - {np.int8, np.uint8}:\n            expected = solution.astype(dtype)\n            with self.test_scope():\n                params['input'] = array_ops.placeholder(dtype, input.shape, name='input')\n                if high_level:\n                    output = array_ops.matrix_diag_part(**params)\n                else:\n                    output = gen_array_ops.matrix_diag_part(**params)\n                output = array_ops.matrix_diag_part(**params)\n            result = session.run(output, {params['input']: input.astype(dtype)})\n            self.assertEqual(output.dtype, expected.dtype)\n            self.assertAllCloseAccordingToType(expected, result, rtol=rtol, atol=atol, bfloat16_rtol=0.03)", "docstring": "Verifies that matrix_diag_part produces `solution` when fed `params`.\n\nArgs:\nparams: dictionary containing input parameters to matrix_diag_part.\nsolution: numpy array representing the expected output.\nhigh_level: call high_level matrix_set_diag\nrtol: relative tolerance for equality test.\natol: absolute tolerance for equality test.", "source": "github-repos"}
{"code": "def quantile(self, value, name='quantile'):\n    return self._call_quantile(value, name)", "docstring": "Quantile function. Aka \"inverse cdf\" or \"percent point function\".\n\nGiven random variable `X` and `p in [0, 1]`, the `quantile` is:\n\n```none\nquantile(p) := x such that P[X <= x] == p\n```\n\nArgs:\nvalue: `float` or `double` `Tensor`.\nname: Python `str` prepended to names of ops created by this function.\n\nReturns:\nquantile: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with\nvalues of type `self.dtype`.", "source": "github-repos"}
{"code": "def item_coords(self, table_item):\n        \n        for row_key in self.children.keys():\n            for item_key in self.children[row_key].children.keys():\n                if self.children[row_key].children[item_key] == table_item:\n                    return (int(row_key), int(item_key))\n        return None", "docstring": "Returns table_item's (row, column) cordinates.\nReturns None in case of item not found.\n\nArgs:\ntable_item (TableItem): an item instance", "source": "juraj-google-style"}
{"code": "def __init__(self, subdomain, username, password, ssl=False, currentUser=None):\n        \n        self.base_url = \"http%s:\n        self._settings = {\n            \"subdomain\": subdomain,\n            \"username\": username,\n            \"password\": password,\n            \"ssl\": ssl\n        }\n        self._user = currentUser\n        self._users = {}\n        self._rooms = {}\n\n        if not self._user:\n            _connection = Connection(url=\"%s/users/me\" % self.base_url, user=username, password=password)\n            user = _connection.get(key=\"user\")\n            \n        self._connection = Connection(\n            base_url=self.base_url, \n            user=self._user.token if self._user else user[\"api_auth_token\"], \n            password=\"x\"\n        )\n\n        if self._user:\n            self._user.set_connection(self._connection)\n        else:\n            self._user = User(self, user[\"id\"], current=True)\n            self._user.token = user[\"api_auth_token\"]", "docstring": "Initialize.\n\nArgs:\nsubdomain (str): Campfire subdomain\nusername (str): User\npassword (str): pasword\n\nKwargs:\nssl (bool): enabled status of SSL\ncurrentUser (:class:`User`): If specified, don't auto load current user, use this one instead", "source": "juraj-google-style"}
{"code": "def write_registers(self, registeraddress, values):\n    if (not isinstance(values, list)):\n        raise TypeError('The \"values parameter\" must be a list. Given: {0!r}'.format(values))\n    _checkInt(len(values), minvalue=1, description='length of input list')\n    self._genericCommand(16, registeraddress, values, numberOfRegisters=len(values), payloadformat='registers')", "docstring": "Write integers to 16-bit registers in the slave.\n\nThe slave register can hold integer values in the range 0 to 65535 (\"Unsigned INT16\").\n\nUses Modbus function code 16.\n\nThe number of registers that will be written is defined by the length of the ``values`` list.\n\nArgs:\n* registeraddress (int): The slave register start address (use decimal numbers, not hex).\n* values (list of int): The values to store in the slave registers.\n\nAny scaling of the register data, or converting it to negative number (two's complement)\nmust be done manually.\n\nReturns:\nNone\n\nRaises:\nValueError, TypeError, IOError", "source": "codesearchnet"}
{"code": "def universal_transformer_layer(x,\n                                hparams,\n                                ffn_unit,\n                                attention_unit,\n                                pad_remover=None):\n  \n\n  def add_vanilla_transformer_layer(x, num_layers, name):\n    \n    if hparams.add_position_timing_signal:\n      \n      \n      \n      x = common_attention.add_timing_signal_1d(x)\n    for layer in range(num_layers):\n      with tf.variable_scope(name + \"layer_%d\" % layer):\n        x = ffn_unit(attention_unit(x))\n    return x\n\n  with tf.variable_scope(\"universal_transformer_%s\" % hparams.recurrence_type):\n    if (hparams.mix_with_transformer and\n        \"before_ut\" in hparams.mix_with_transformer):\n      x = add_vanilla_transformer_layer(x, hparams.num_mixedin_layers,\n                                        \"before_ut_\")\n\n    if hparams.recurrence_type == \"act\":\n      output, extra_output = universal_transformer_act(\n          x, hparams, ffn_unit, attention_unit)\n\n    else:  \n\n      ut_function, initializer = get_ut_layer(x, hparams, ffn_unit,\n                                              attention_unit, pad_remover)\n\n      output, _, extra_output = tf.foldl(\n          ut_function, tf.range(hparams.num_rec_steps),\n          initializer=initializer)\n\n      \n      if (hparams.recurrence_type == \"lstm\" and\n          hparams.get(\"use_memory_as_final_state\", False)):\n        output = extra_output\n\n    if (hparams.mix_with_transformer and\n        \"after_ut\" in hparams.mix_with_transformer):\n      output = add_vanilla_transformer_layer(output, hparams.num_mixedin_layers,\n                                             \"after_ut_\")\n\n    return output, extra_output", "docstring": "Core function applying the universal transformer layer.\n\nArgs:\nx: input\nhparams: model hyper-parameters\nffn_unit: feed-forward unit\nattention_unit: multi-head attention unit\npad_remover: to mask out padding in convolutional layers (efficiency).\n\nReturns:\nthe output tensor,  extra output (can be memory, ponder time, etc.)\n\nRaises:\nValueError: Unknown recurrence type", "source": "juraj-google-style"}
{"code": "def write_csv(data, file_name, encoding='utf-8'):\n    \n    name_extension = len(data) > 1\n    root, ext = os.path.splitext(file_name)\n\n    for i, sheet in enumerate(data):\n        fname = file_name if not name_extension else root+\"_\"+str(i)+ext\n        with open(fname, 'wb') as date_file:\n            csv_file = csv.writer(date_file, encoding=encoding)\n            for line in sheet:\n                csv_file.writerow(line)", "docstring": "Writes out to csv format.\n\nArgs:\ndata: 2D list of tables/worksheets.\nfile_name: Name of the output file.", "source": "juraj-google-style"}
{"code": "def GetLoggingLocation():\n    frame = inspect.currentframe()\n    this_file = frame.f_code.co_filename\n    frame = frame.f_back\n    while frame:\n        if (this_file == frame.f_code.co_filename):\n            if ('cdbg_logging_location' in frame.f_locals):\n                ret = frame.f_locals['cdbg_logging_location']\n                if (len(ret) != 3):\n                    return (None, None, None)\n                return ret\n        frame = frame.f_back\n    return (None, None, None)", "docstring": "Search for and return the file and line number from the log collector.\n\nReturns:\n(pathname, lineno, func_name) The full path, line number, and function name\nfor the logpoint location.", "source": "codesearchnet"}
{"code": "def parse_helpfull_output(help_output, regex=FLAG_HELP_RE_PY):\n    \n    valid_flags = set()\n    for _, no_prefix, flag_name in regex.findall(help_output):\n        valid_flags.add('--' + flag_name)\n        if no_prefix:\n            valid_flags.add('--no' + flag_name)\n    return valid_flags", "docstring": "Parses the output of --helpfull.\n\nArgs:\nhelp_output: str, the full output of --helpfull.\n\nReturns:\nA set of flags that are valid flags.", "source": "juraj-google-style"}
{"code": "def get(url, params={}):\n        \n        request_url = url\n\n        if len(params):\n            request_url = \"{}?{}\".format(url, urlencode(params))\n\n        try:\n            req = Request(request_url, headers={'User-Agent': 'Mozilla/5.0'})\n            response = json.loads(urlopen(req).read().decode(\"utf-8\"))\n\n            return response\n        except HTTPError as err:\n            raise MtgException(err.read())", "docstring": "Invoke an HTTP GET request on a url\n\nArgs:\nurl (string): URL endpoint to request\nparams (dict): Dictionary of url parameters\nReturns:\ndict: JSON response as a dictionary", "source": "juraj-google-style"}
{"code": "def publish_values(self, labeled_values):\n    metric_dicts = [Metric(time.time(), uuid.uuid4().hex, value, label=label).as_dict() for label, value in labeled_values]\n    for publisher in self.publishers:\n        publisher.publish(metric_dicts)", "docstring": "The method to publish simple labeled values.\n\nArgs:\nlabeled_values (List[Tuple(str, int)]): list of (label, value)", "source": "github-repos"}
{"code": "def wbmax(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `wbmax`'.format(value))\n    self._wbmax = value", "docstring": "Corresponds to IDD Field `wbmax`\nExtreme maximum wet-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `wbmax`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def _build_endpoint(self, endpoint_name):\n    endpoint_relative = settings.get('asmaster_endpoints', endpoint_name)\n    return ('%s%s' % (self.host, endpoint_relative))", "docstring": "Generate an enpoint url from a setting name.\n\nArgs:\nendpoint_name(str): setting name for the enpoint to build\n\nReturns:\n(str) url enpoint", "source": "codesearchnet"}
{"code": "def update_media_assetfile(access_token, parent_asset_id, asset_id, content_length, name):\n    \n    path = '/Files'\n    full_path = ''.join([path, \"('\", asset_id, \"')\"])\n    full_path_encoded = urllib.parse.quote(full_path, safe='')\n    endpoint = ''.join([ams_rest_endpoint, full_path_encoded])\n    body = '{ \\\n\t\t\"ContentFileSize\": \"' + str(content_length) + '\", \\\n\t\t\"Id\": \"' + asset_id + '\", \\\n\t\t\"MimeType\": \"video/mp4\", \\\n\t\t\"Name\": \"' + name + '\", \\\n\t\t\"ParentAssetId\": \"' + parent_asset_id + '\" \\\n\t}'\n    return do_ams_patch(endpoint, full_path_encoded, body, access_token)", "docstring": "Update Media Service Asset File.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nparent_asset_id (str): A Media Service Asset Parent Asset ID.\nasset_id (str): A Media Service Asset Asset ID.\ncontent_length (str): A Media Service Asset Content Length.\nname (str): A Media Service Asset name.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def derive_depth(self, depth):\n        \n        cls = type(self)\n        \n        return cls(\n            self[0],\n            self[1],\n            self[2],\n            self[3],\n            self[4],\n            depth\n        )", "docstring": "Derives a new event from this one setting the ``depth`` attribute.\n\nArgs:\ndepth: (int):\nThe annotations associated with the derived event.\n\nReturns:\nIonEvent: The newly generated event.", "source": "juraj-google-style"}
{"code": "def register_intent_parser(self, intent_parser, domain=0):\n    if (domain not in self.domains):\n        self.register_domain(domain=domain)\n    self.domains[domain].register_intent_parser(intent_parser=intent_parser)", "docstring": "Register a intent parser with a domain.\n\nArgs:\nintent_parser(intent): The intent parser you wish to register.\ndomain(str): a string representing the domain you wish register the intent\nparser to.", "source": "codesearchnet"}
{"code": "def escape_for_cmd_exe(arg):\n    meta_chars = '()%!^\"<>&|'\n    meta_re = re.compile((('(' + '|'.join((re.escape(char) for char in list(meta_chars)))) + ')'))\n    meta_map = {char: '^{0}'.format(char) for char in meta_chars}\n\n    def escape_meta_chars(m):\n        char = m.group(1)\n        return meta_map[char]\n    return meta_re.sub(escape_meta_chars, arg)", "docstring": "Escape an argument string to be suitable to be passed to\ncmd.exe on Windows\n\nThis method takes an argument that is expected to already be properly\nescaped for the receiving program to be properly parsed. This argument\nwill be further escaped to pass the interpolation performed by cmd.exe\nunchanged.\n\nAny meta-characters will be escaped, removing the ability to e.g. use\nredirects or variables.\n\nArgs:\narg (str): a single command line argument to escape for cmd.exe\n\nReturns:\nstr: an escaped string suitable to be passed as a program argument to cmd.exe", "source": "codesearchnet"}
{"code": "def __init__(self, *args, **kwargs):\n        \n        if isinstance(kwargs.get('record'), dict):\n            prefix, _ = kwargs['event_type'].split('.', 1)\n            model = self.EVENT_PREFIX_TO_MODEL[prefix]\n            kwargs['record'] = model.from_api(**kwargs['record'])\n        super(WebHookEvent, self).__init__(*args, **kwargs)", "docstring": "Parse raw record data if required.\n\nArgs:\nrecord (dict or BaseModel): The record data that was received for\nthe request. If it is a ``dict``, the data will be parsed\nusing the proper model's ``from_api`` method.", "source": "juraj-google-style"}
{"code": "def _verify_iat_and_exp(payload):\n    \n    now = _helpers.datetime_to_secs(_helpers.utcnow())\n\n    \n    for key in ('iat', 'exp'):\n        if key not in payload:\n            raise ValueError(\n                'Token does not contain required claim {}'.format(key))\n\n    \n    iat = payload['iat']\n    \n    \n    earliest = iat - _helpers.CLOCK_SKEW_SECS\n    if now < earliest:\n        raise ValueError('Token used too early, {} < {}'.format(now, iat))\n\n    \n    exp = payload['exp']\n    \n    \n    latest = exp + _helpers.CLOCK_SKEW_SECS\n    if latest < now:\n        raise ValueError('Token expired, {} < {}'.format(latest, now))", "docstring": "Verifies the ``iat`` (Issued At) and ``exp`` (Expires) claims in a token\npayload.\n\nArgs:\npayload (Mapping[str, str]): The JWT payload.\n\nRaises:\nValueError: if any checks failed.", "source": "juraj-google-style"}
{"code": "def emit_completion(self, completion_percent):\n    completion_mode = XBlockCompletionMode.get_mode(self)\n    if ((not self.has_custom_completion) or (completion_mode != XBlockCompletionMode.COMPLETABLE)):\n        raise AttributeError(\"Using `emit_completion` requires `has_custom_completion == True` (was {}) and `completion_mode == 'completable'` (was {})\".format(self.has_custom_completion, completion_mode))\n    if ((completion_percent is None) or (not (0.0 <= completion_percent <= 1.0))):\n        raise ValueError('Completion percent must be in [0.0; 1.0] interval, {} given'.format(completion_percent))\n    self.runtime.publish(self, 'completion', {'completion': completion_percent})", "docstring": "Emits completion event through Completion API.\n\nUnlike grading API, calling this method allows completion to go down - i.e. emitting a value of 0.0 on\na previously completed block indicates that it is no longer considered complete.\n\nArguments:\ncompletion_percent (float): Completion in range [0.0; 1.0] (inclusive), where 0.0 means the block\nis not completed, 1.0 means the block is fully completed.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def Log(self, format_str, *args):\n    log_entry = rdf_flow_objects.FlowLogEntry(client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, hunt_id=self.rdf_flow.parent_hunt_id, message=(format_str % args))\n    data_store.REL_DB.WriteFlowLogEntries([log_entry])\n    if self.rdf_flow.parent_hunt_id:\n        db_compat.ProcessHuntFlowLog(self.rdf_flow, (format_str % args))", "docstring": "Logs the message using the flow's standard logging.\n\nArgs:\nformat_str: Format string\n*args: arguments to the format string", "source": "codesearchnet"}
{"code": "def __init__(self, fixer_names, options=None, explicit=None):\n        \n        self.fixers = fixer_names\n        self.explicit = explicit or []\n        self.options = self._default_options.copy()\n        if options is not None:\n            self.options.update(options)\n        if self.options[\"print_function\"]:\n            self.grammar = pygram.python_grammar_no_print_statement\n        else:\n            self.grammar = pygram.python_grammar\n        \n        \n        \n        self.write_unchanged_files = self.options.get(\"write_unchanged_files\")\n        self.errors = []\n        self.logger = logging.getLogger(\"RefactoringTool\")\n        self.fixer_log = []\n        self.wrote = False\n        self.driver = driver.Driver(self.grammar,\n                                    convert=pytree.convert,\n                                    logger=self.logger)\n        self.pre_order, self.post_order = self.get_fixers()\n\n\n        self.files = []  \n\n        self.BM = bm.BottomMatcher()\n        self.bmi_pre_order = [] \n        self.bmi_post_order = []\n\n        for fixer in chain(self.post_order, self.pre_order):\n            if fixer.BM_compatible:\n                self.BM.add_fixer(fixer)\n                \n                \n            elif fixer in self.pre_order:\n                self.bmi_pre_order.append(fixer)\n            elif fixer in self.post_order:\n                self.bmi_post_order.append(fixer)\n\n        self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)\n        self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)", "docstring": "Initializer.\n\nArgs:\nfixer_names: a list of fixers to import\noptions: an dict with configuration.\nexplicit: a list of fixers to run even if they are explicit.", "source": "juraj-google-style"}
{"code": "def format_level_0_memory(memory):\n    formatted_memory = _list_to_complex_array(memory)\n    if (not (2 <= len(formatted_memory.shape) <= 3)):\n        raise QiskitError('Level zero memory is not of correct shape.')\n    return formatted_memory", "docstring": "Format an experiment result memory object for measurement level 0.\n\nArgs:\nmemory (list): Memory from experiment with `meas_level==1`. `avg` or\n`single` will be inferred from shape of result memory.\n\nReturns:\nnp.ndarray: Measurement level 0 complex numpy array\n\nRaises:\nQiskitError: If the returned numpy array does not have 2 (avg) or 3 (single)\nindicies.", "source": "codesearchnet"}
{"code": "def _on_connection_error(self, connection, error_message):\n    self._channel = None\n    if isinstance(error_message, pika_errs.AMQPConnectionError):\n        error_message = repr(error_message.args[0])\n    _log.error(error_message)\n    self.call_later(1, self.reconnect)", "docstring": "Callback invoked when the connection failed to be established.\n\nArgs:\nconnection (pika.connection.SelectConnection): The connection that\nfailed to open.\nerror_message (str): The reason the connection couldn't be opened.", "source": "codesearchnet"}
{"code": "def min_validator(min_value):\n\n    def validator(value):\n        if (value < min_value):\n            raise ValidationError('{} is not >= {}'.format(value, min_value))\n    return validator", "docstring": "Return validator function that ensures lower bound of a number.\n\nResult validation function will validate the internal value of resource\ninstance field with the ``value >= min_value`` check\n\nArgs:\nmin_value: minimal value for new validator", "source": "codesearchnet"}
{"code": "def ReadIndex(self, index_file=None):\n        \n\n        self.index_file = index_file or self.index_file\n        fullpath = os.path.join(self.template_dir, self.index_file)\n        if self.index_file and fullpath not in self.INDEX:\n            self.index = IndexTable(self._PreParse, self._PreCompile, fullpath)\n            self.INDEX[fullpath] = self.index\n        else:\n            self.index = self.INDEX[fullpath]\n\n        \n        if \"Template\" not in self.index.index.header:  \n            raise CliTableError(\"Index file does not have 'Template' column.\")", "docstring": "Reads the IndexTable index file of commands and templates.\nArgs:\nindex_file: String, file where template/command mappings reside.\nRaises:\nCliTableError: A template column was not found in the table.", "source": "juraj-google-style"}
{"code": "def _initialize_operation_name_to_id(self):\n    operation_name_to_id = {}\n    for (i, operation) in enumerate(self._operations):\n        operation_name_to_id[operation.name] = i\n    return operation_name_to_id", "docstring": "Initializer for _operation_name_to_id.\n\nReturns:\na {string: int}, mapping operation names to their index in _operations.", "source": "codesearchnet"}
{"code": "def stop_capture_handler(self, name):\n        \n        empty_capturers_indeces = []\n        for k, sc in self._stream_capturers.iteritems():\n            stream_capturer = sc[0]\n            stream_capturer.remove_handler(name)\n\n            if stream_capturer.handler_count == 0:\n                self._pool.killone(sc[1])\n                empty_capturers_indeces.append(k)\n\n        for i in empty_capturers_indeces:\n            del self._stream_capturers[i]", "docstring": "Remove all handlers with a given name\n\nArgs:\nname:\nThe name of the handler(s) to remove.", "source": "juraj-google-style"}
{"code": "def remove_delegate(self, callback):\n        \n\n        if callback not in self._delegate_methods:\n            return\n\n        self._delegate_methods.remove(callback)", "docstring": "Unregisters a registered delegate function or a method.\n\nArgs:\ncallback(function): method to trigger when push center receives events", "source": "juraj-google-style"}
{"code": "def reset(self, *args):\n        \n        self.resource = self.resource.reset(list(args))\n        return self", "docstring": "Resets any of the tokens for this Application.\nNote that you may have to reauthenticate afterwards.\n\nUsage:\napplication.reset('api_token')\napplication.reset('api_token', 'totp_secret')\n\nArgs:\n*args (list of str): one or more of\n['api_token', 'subscription_token', 'totp_secret']\n\nReturns:\nThe Application.", "source": "juraj-google-style"}
{"code": "def scalar(name, tensor, family=None, step=None):\n\n    def function(tag, scope):\n        return gen_summary_ops.write_scalar_summary(_summary_state.writer._resource, _choose_step(step), tag, array_ops.identity(tensor), name=scope)\n    return summary_writer_function(name, tensor, function, family=family)", "docstring": "Writes a scalar summary if possible.\n\nUnlike `tf.contrib.summary.generic` this op may change the dtype\ndepending on the writer, for both practical and efficiency concerns.\n\nArgs:\nname: An arbitrary name for this summary.\ntensor: A `tf.Tensor` Must be one of the following types:\n`float32`, `float64`, `int32`, `int64`, `uint8`, `int16`,\n`int8`, `uint16`, `half`, `uint32`, `uint64`.\nfamily: Optional, the summary's family.\nstep: The `int64` monotonic step variable, which defaults\nto `tf.compat.v1.train.get_global_step`.\n\nReturns:\nThe created `tf.Operation` or a `tf.no_op` if summary writing has\nnot been enabled for this context.", "source": "github-repos"}
{"code": "def exclude(self, **filters):\n        \n        exclude = {'-%s' % key: value for key, value in filters.items()}\n        return self.filter(**exclude)", "docstring": "Applies query filters for excluding matching records from result set.\n\nArgs:\n**filters: Query filters as keyword arguments.\n\nReturns:\nSelf. Queryset object.\n\nExamples:\n>>> Person.objects.exclude(age=None)\n>>> Person.objects.filter(name__startswith='jo').exclude(age__lte=16)", "source": "juraj-google-style"}
{"code": "def to_file(self, destination, format='csv', csv_delimiter=',', csv_header=True):\n    \n    f = codecs.open(destination, 'w', 'utf-8')\n    fieldnames = []\n    for column in self.schema:\n      fieldnames.append(column.name)\n    if sys.version_info[0] == 2:\n      csv_delimiter = csv_delimiter.encode('unicode_escape')\n    writer = csv.DictWriter(f, fieldnames=fieldnames, delimiter=csv_delimiter)\n    if csv_header:\n      writer.writeheader()\n    for row in self:\n      writer.writerow(row)\n    f.close()", "docstring": "Save the results to a local file in CSV format.\n\nArgs:\ndestination: path on the local filesystem for the saved results.\nformat: the format to use for the exported data; currently only 'csv' is supported.\ncsv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','\ncsv_header: for CSV exports, whether to include an initial header line. Default true.\nRaises:\nAn Exception if the operation failed.", "source": "juraj-google-style"}
{"code": "def _parse_args():\n    parser = argparse.ArgumentParser(description='preprocess_coco_minival: Preprocess COCO minival dataset')\n    parser.add_argument('--images_folder', type=str, help='Full path of the validation images folder.', required=True)\n    parser.add_argument('--instances_file', type=str, help='Full path of the input JSON file, like instances_val20xx.json.', required=True)\n    parser.add_argument('--allowlist_file', type=str, help='File with COCO image ids to preprocess, one on each line.', required=False)\n    parser.add_argument('--num_images', type=int, help='Number of allowlisted images to preprocess into the output folder.', required=False)\n    parser.add_argument('--output_folder', type=str, help='Full path to output images & text proto files into.', required=True)\n    return parser.parse_known_args(args=sys.argv[1:])[0]", "docstring": "Creates a parser that parse the command line arguments.\n\nReturns:\nA namespace parsed from command line arguments.", "source": "github-repos"}
{"code": "def sample(self, num_rows=1):\n    self.check_fit()\n    res = {}\n    means = np.zeros(self.covariance.shape[0])\n    size = (num_rows,)\n    clean_cov = np.nan_to_num(self.covariance)\n    samples = np.random.multivariate_normal(means, clean_cov, size=size)\n    for (i, (label, distrib)) in enumerate(self.distribs.items()):\n        cdf = stats.norm.cdf(samples[(:, i)])\n        res[label] = distrib.percent_point(cdf)\n    return pd.DataFrame(data=res)", "docstring": "Creates sintentic values stadistically similar to the original dataset.\n\nArgs:\nnum_rows: `int` amount of samples to generate.\n\nReturns:\nnp.ndarray: Sampled data.", "source": "codesearchnet"}
{"code": "def max(x, axis=None, keepdims=False, initial=None):\n    if any_symbolic_tensors((x,)):\n        return Max(axis=axis, keepdims=keepdims, initial=initial).symbolic_call(x)\n    return backend.numpy.max(x, axis=axis, keepdims=keepdims, initial=initial)", "docstring": "Return the maximum of a tensor or maximum along an axis.\n\nArgs:\nx: Input tensor.\naxis: Axis or axes along which to operate. By default, flattened input\nis used.\nkeepdims: If this is set to `True`, the axes which are reduced are left\nin the result as dimensions with size one. Defaults to `False`.\ninitial: The minimum value of an output element. Defaults to `None`.\n\nReturns:\nMaximum of `x`.", "source": "github-repos"}
{"code": "def fix_variable(self, v, value):\n    adj = self.adj\n    linear = self.linear\n    if (value not in self.vartype.value):\n        raise ValueError('expected value to be in {}, received {} instead'.format(self.vartype.value, value))\n    removed_interactions = []\n    for u in adj[v]:\n        self.add_variable(u, (value * adj[v][u]))\n        removed_interactions.append((u, v))\n    self.remove_interactions_from(removed_interactions)\n    self.add_offset((value * linear[v]))\n    self.remove_variable(v)", "docstring": "Fix the value of a variable and remove it from a binary quadratic model.\n\nArgs:\nv (variable):\nVariable in the binary quadratic model to be fixed.\n\nvalue (int):\nValue assigned to the variable. Values must match the :class:`.Vartype` of the binary\nquadratic model.\n\nExamples:\n\nThis example creates a binary quadratic model with one variable and fixes\nits value.\n\n>>> import dimod\n...\n>>> bqm = dimod.BinaryQuadraticModel({'a': -.5, 'b': 0.}, {('a', 'b'): -1}, 0.0, dimod.SPIN)\n>>> bqm.fix_variable('a', -1)\n>>> bqm.offset\n0.5\n>>> bqm.linear['b']\n1.0\n>>> 'a' in bqm\nFalse", "source": "codesearchnet"}
{"code": "def session_manager(self):\n    return self._session_manager", "docstring": "Return the SessionManager used by the Supervisor.\n\nReturns:\nA SessionManager object.", "source": "github-repos"}
{"code": "def normalize(x, axis=-1, order=2):\n    from keras.src import ops\n    if isinstance(x, np.ndarray):\n        norm = np.atleast_1d(np.linalg.norm(x, order, axis))\n        norm[norm == 0] = 1\n        axis = axis or -1\n        return x / np.expand_dims(norm, axis)\n    return ops.nn.normalize(x, axis=axis, order=order)", "docstring": "Normalizes an array.\n\nIf the input is a NumPy array, a NumPy array will be returned.\nIf it's a backend tensor, a backend tensor will be returned.\n\nArgs:\nx: Array to normalize.\naxis: axis along which to normalize.\norder: Normalization order (e.g. `order=2` for L2 norm).\n\nReturns:\nA normalized copy of the array.", "source": "github-repos"}
{"code": "def development_verify():\n    with open(DEVELOPMENT_TEMPLATE, 'r') as file_obj:\n        template = file_obj.read()\n    expected = template.format(revision=REVISION, rtd_version=RTD_VERSION)\n    with open(DEVELOPMENT_FILE, 'r') as file_obj:\n        contents = file_obj.read()\n    if (contents != expected):\n        err_msg = ('\\n' + get_diff(contents, expected, 'DEVELOPMENT.rst.actual', 'DEVELOPMENT.rst.expected'))\n        raise ValueError(err_msg)\n    else:\n        print('DEVELOPMENT.rst contents are as expected.')", "docstring": "Populate template and compare to ``DEVELOPMENT.rst``\n\nRaises:\nValueError: If the current ``DEVELOPMENT.rst`` doesn't\nagree with the expected value computed from the template.", "source": "codesearchnet"}
{"code": "def Launch(self, request, global_params=None):\n    config = self.GetMethodConfig('Launch')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Launch a job with a FlexTemplate.\n\nArgs:\nrequest: (DataflowProjectsLocationsFlexTemplatesLaunchRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(LaunchFlexTemplateResponse) The response message.", "source": "github-repos"}
{"code": "def ParseOptions(cls, options, output_module):  \n    \n    if not isinstance(output_module, dynamic.DynamicOutputModule):\n      raise errors.BadConfigObject(\n          'Output module is not an instance of DynamicOutputModule')\n\n    default_fields = ','.join(cls._DEFAULT_FIELDS)\n    fields = cls._ParseStringOption(\n        options, 'fields', default_value=default_fields)\n\n    additional_fields = cls._ParseStringOption(\n        options, 'additional_fields')\n\n    if additional_fields:\n      fields = '{0:s},{1:s}'.format(fields, additional_fields)\n\n    output_module.SetFields([\n        field_name.strip() for field_name in fields.split(',')])", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\noutput_module (OutputModule): output module to configure.\n\nRaises:\nBadConfigObject: when the output module object is of the wrong type.\nBadConfigOption: when the output filename was not provided.", "source": "juraj-google-style"}
{"code": "async def getProvStack(self, iden: str):\n        \n        return self.cell.provstor.getProvStack(s_common.uhex(iden))", "docstring": "Return the providence stack associated with the given iden.\n\nArgs:\niden (str):  the iden from splice\n\nNote: the iden appears on each splice entry as the 'prov' property", "source": "juraj-google-style"}
{"code": "def in_cache(self, objpath, metahash):\n        \n        try:\n            self.path_in_cache(objpath, metahash)\n            return True\n        except CacheMiss:\n            return False", "docstring": "Returns true if object is cached.\n\nArgs:\nobjpath: Filename relative to buildroot.\nmetahash: hash object", "source": "juraj-google-style"}
{"code": "def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n    return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)", "docstring": "Rescale the image by the given factor. image = image * rescale_factor.\n\nArgs:\nimage (`np.ndarray`):\nImage to rescale.\nrescale_factor (`float`):\nThe value to use for rescaling.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format for the output image. If unset, the channel dimension format of the input\nimage is used. Can be one of:\n- `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n- `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\ninput_data_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format for the input image. If unset, is inferred from the input image. Can be\none of:\n- `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n- `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.", "source": "github-repos"}
{"code": "def list_knowledge_bases(project_id):\n    \n    import dialogflow_v2beta1 as dialogflow\n    client = dialogflow.KnowledgeBasesClient()\n    project_path = client.project_path(project_id)\n\n    print('Knowledge Bases for: {}'.format(project_id))\n    for knowledge_base in client.list_knowledge_bases(project_path):\n        print(' - Display Name: {}'.format(knowledge_base.display_name))\n        print(' - Knowledge ID: {}\\n'.format(knowledge_base.name))", "docstring": "Lists the Knowledge bases belonging to a project.\n\nArgs:\nproject_id: The GCP project linked with the agent.", "source": "juraj-google-style"}
{"code": "def price(self, valuation_date, market, model=None):\n    del model, valuation_date\n    reference_curve = market.reference_curve\n    fwd_rate = reference_curve.get_forward_rate(self._accrual_start_date, self._accrual_end_date, self._daycount_fraction)\n    return 100.0 * self._contract_notional * (1.0 - fwd_rate)", "docstring": "Returns the price of the contract on the valuation date.\n\nArgs:\nvaluation_date: A scalar `DateTensor` specifying the date on which\nvaluation is being desired.\nmarket: A namedtuple of type `InterestRateMarket` which contains the\nnecessary information for pricing the FRA instrument.\nmodel: Reserved for future use.\n\nReturns:\nA Rank 1 `Tensor` of real type containing the modeled price of each\nfutures contract based on the input market data.", "source": "github-repos"}
{"code": "def populate_readme(version, circleci_build, appveyor_build, coveralls_build, travis_build):\n    with open(RELEASE_README_FILE, 'r') as file_obj:\n        template = file_obj.read()\n    contents = template.format(version=version, circleci_build=circleci_build, appveyor_build=appveyor_build, coveralls_build=coveralls_build, travis_build=travis_build)\n    with open(README_FILE, 'w') as file_obj:\n        file_obj.write(contents)", "docstring": "Populates ``README.rst`` with release-specific data.\n\nThis is because ``README.rst`` is used on PyPI.\n\nArgs:\nversion (str): The current version.\ncircleci_build (Union[str, int]): The CircleCI build ID corresponding\nto the release.\nappveyor_build (str): The AppVeyor build ID corresponding to the\nrelease.\ncoveralls_build (Union[str, int]): The Coveralls.io build ID\ncorresponding to the release.\ntravis_build (int): The Travis CI build ID corresponding to\nthe release.", "source": "codesearchnet"}
{"code": "def dequantize_flow(dx, dy, max_val=0.02, denorm=True):\n    assert (dx.shape == dy.shape)\n    assert ((dx.ndim == 2) or ((dx.ndim == 3) and (dx.shape[(- 1)] == 1)))\n    (dx, dy) = [dequantize(d, (- max_val), max_val, 255) for d in [dx, dy]]\n    if denorm:\n        dx *= dx.shape[1]\n        dy *= dx.shape[0]\n    flow = np.dstack((dx, dy))\n    return flow", "docstring": "Recover from quantized flow.\n\nArgs:\ndx (ndarray): Quantized dx.\ndy (ndarray): Quantized dy.\nmax_val (float): Maximum value used when quantizing.\ndenorm (bool): Whether to multiply flow values with width/height.\n\nReturns:\nndarray: Dequantized flow.", "source": "codesearchnet"}
{"code": "def to_hgnc(self, hgnc_alias, build='37'):\n    result = self.hgnc_genes(hgnc_symbol=hgnc_alias, build=build)\n    if result:\n        for gene in result:\n            return gene['hgnc_symbol']\n    else:\n        return None", "docstring": "Check if a hgnc symbol is an alias\n\nReturn the correct hgnc symbol, if not existing return None\n\nArgs:\nhgnc_alias(str)\n\nReturns:\nhgnc_symbol(str)", "source": "codesearchnet"}
{"code": "def get_reserved_vlan_range(self, id_or_uri):\n    uri = (self._client.build_uri(id_or_uri) + '/reserved-vlan-range')\n    return self._client.get(uri)", "docstring": "Gets the reserved vlan ID range for the fabric.\n\nNote:\nThis method is only available on HPE Synergy.\n\nArgs:\nid_or_uri: ID or URI of fabric.\n\nReturns:\ndict: vlan-pool", "source": "codesearchnet"}
{"code": "def get_changes_since(self, timestamp: str) -> Dict[(str, List)]:\n    rg = []\n    cg = []\n    ra = []\n    ca = []\n    layers = []\n    if (self.last_modified() > timestamp):\n        if (self.row_graphs.last_modified() > timestamp):\n            for name in self.row_graphs.keys():\n                if (self.row_graphs.last_modified(name) > timestamp):\n                    rg.append(name)\n        if (self.col_graphs.last_modified() > timestamp):\n            for name in self.col_graphs.keys():\n                if (self.col_graphs.last_modified(name) > timestamp):\n                    cg.append(name)\n        if (self.ra.last_modified() > timestamp):\n            for name in self.ra.keys():\n                if (self.ra.last_modified(name) > timestamp):\n                    ra.append(name)\n        if (self.ca.last_modified() > timestamp):\n            for name in self.ca.keys():\n                if (self.ca.last_modified(name) > timestamp):\n                    ca.append(name)\n        if (self.layers.last_modified() > timestamp):\n            for name in self.layers.keys():\n                if (self.layers.last_modified(name) > timestamp):\n                    layers.append(name)\n    return {'row_graphs': rg, 'col_graphs': cg, 'row_attrs': ra, 'col_attrs': ca, 'layers': layers}", "docstring": "Get a summary of the parts of the file that changed since the given time\n\nArgs:\ntimestamp:\tISO8601 timestamp\n\nReturn:\ndict:\tDictionary like ``{\"row_graphs\": rg, \"col_graphs\": cg, \"row_attrs\": ra, \"col_attrs\": ca, \"layers\": layers}`` listing the names of objects that were modified since the given time", "source": "codesearchnet"}
{"code": "def __setitem__(self, key, value):\n    \n    if not isinstance(key, basestring):\n      raise Exception(\"LRU cache can only be indexed by strings\")\n\n    if key in self._cache:\n      entry = self._cache[key]\n    elif len(self._cache) < self._cache_size:\n      \n      self._cache[key] = entry = {}\n    else:\n      \n      entry = min(list(self._cache.values()), key=lambda x: x['last_used'])\n      self._cache.pop(entry['key'])\n      self._cache[key] = entry\n\n    entry['value'] = value\n    entry['key'] = key\n    entry['last_used'] = datetime.datetime.now()", "docstring": "Put an item in the cache.\n\nArgs:\nkey: a string key for retrieving the item.\nvalue: the item to cache.\nRaises:\nException if the key is not a string.", "source": "juraj-google-style"}
{"code": "def from_file(cls, filename, constant_lattice=True, **kwargs):\n        \n        \n\n        fname = os.path.basename(filename)\n        if fnmatch(fname, \"*XDATCAR*\"):\n            structures = Xdatcar(filename).structures\n        elif fnmatch(fname, \"vasprun*.xml*\"):\n            structures = Vasprun(filename).structures\n        else:\n            raise ValueError(\"Unsupported file\")\n\n        return cls.from_structures(structures, constant_lattice=constant_lattice, **kwargs)", "docstring": "Convenience constructor to obtain trajectory from XDATCAR or vasprun.xml file\n\nArgs:\nfilename (str): The filename to read from.\nconstant_lattice (bool): Whether the lattice changes during the simulation, such as in an NPT MD\nsimulation. True results in\n\nReturns:\n(Trajectory)", "source": "juraj-google-style"}
{"code": "def RunMetadata(self, run, tag):\n    \n    accumulator = self.GetAccumulator(run)\n    return accumulator.RunMetadata(tag)", "docstring": "Get the session.run() metadata associated with a TensorFlow run and tag.\n\nArgs:\nrun: A string name of a TensorFlow run.\ntag: A string name of the tag associated with a particular session.run().\n\nRaises:\nKeyError: If the run is not found, or the tag is not available for the\ngiven run.\n\nReturns:\nThe metadata in the form of `RunMetadata` protobuf data structure.", "source": "juraj-google-style"}
{"code": "def _CreateDynamicDisplayAdSettings(media_service, opener):\n    image = _CreateImage(media_service, opener, 'https:\n    logo = {'type': 'IMAGE', 'mediaId': image['mediaId'], 'xsi_type': 'Image'}\n    dynamic_settings = {'landscapeLogoImage': logo, 'pricePrefix': 'as low as', 'promoText': 'Free shipping!', 'xsi_type': 'DynamicSettings'}\n    return dynamic_settings", "docstring": "Creates settings for dynamic display ad.\n\nArgs:\nmedia_service: a SudsServiceProxy instance for AdWords's MediaService.\nopener: an OpenerDirector instance.\n\nReturns:\nThe dynamic display ad settings.", "source": "codesearchnet"}
{"code": "def add_graph_building_optimization_tests(cls: _TC) -> _TC:\n    if flags.config().graph_building_optimization.value():\n        return cls\n    for name, value in cls.__dict__.copy().items():\n        if callable(value) and (name.startswith(unittest.TestLoader.testMethodPrefix) or name.startswith('benchmark')):\n            setattr(cls, name + 'WithGraphBuildingOptimization', enable_graph_building_optimization(value))\n    return cls", "docstring": "Adds methods with graph_building_optimization enabled to the test suite.\n\nExample:\n\n@test_util.add_graph_building_optimization_tests\nclass FooTest(test.TestCase):\n\ndef testBar(self):\n...\n\nGenerated class:\nclass FooTest(test.TestCase):\n\ndef testBar(self):\n...\n\ndef testBarWithGraphBuildingOptimization(self):\n// Enable graph_building_optimization\ntestBar(self)\n// Disable graph_building_optimization\n\nArgs:\ncls: class to decorate.\n\nReturns:\ncls with new test methods added.", "source": "github-repos"}
{"code": "def percentile(self, percent):\n    if (percent >= 100):\n        percent = 100\n    target = (len(self) - (len(self) * (percent / 100)))\n    for k in reversed(sorted(self._data.keys())):\n        target -= self._data[k]\n        if (target < 0):\n            return k\n    return 10", "docstring": "Return the value that is the Nth precentile in the histogram.\n\nArgs:\npercent (Union[int, float]): The precentile being sought. The\ndefault consumer implementations use consistently use ``99``.\n\nReturns:\nint: The value corresponding to the requested percentile.", "source": "codesearchnet"}
{"code": "def merged(cls, *flatterms: 'FlatTerm') -> 'FlatTerm':\n    return cls(cls._combined_wildcards_iter(sum(flatterms, cls.empty())))", "docstring": "Concatenate the given flatterms to a single flatterm.\n\nArgs:\n*flatterms:\nThe flatterms which are concatenated.\n\nReturns:\nThe concatenated flatterms.", "source": "codesearchnet"}
{"code": "def to_cache_timer(datetime_func):\n    if (datetime_func is None):\n        datetime_func = datetime.utcnow\n\n    def _timer():\n        'Return the timestamp since the epoch.'\n        return (datetime_func() - datetime(1970, 1, 1)).total_seconds()\n    return _timer", "docstring": "Converts a datetime_func to a timestamp_func.\n\nArgs:\ndatetime_func (callable[[datatime]]): a func that returns the current\ntime\n\nReturns:\ntime_func (callable[[timestamp]): a func that returns the timestamp\nfrom the epoch", "source": "codesearchnet"}
{"code": "def __init__(self,\n               validate_args=False,\n               name=\"normal\"):\n    \n    self._graph_parents = []\n    self._name = name\n    self._validate_args = validate_args\n\n    super(NormalCDF, self).__init__(\n        validate_args=validate_args,\n        forward_min_event_ndims=0,\n        name=name)", "docstring": "Instantiates the `NormalCDF` bijector.\n\nArgs:\nvalidate_args: Python `bool` indicating whether arguments should be\nchecked for correctness.\nname: Python `str` name given to ops managed by this object.", "source": "juraj-google-style"}
{"code": "def restore_state(self, state):\n        \n\n        state_name = state.get('state_name')\n        state_version = state.get('state_version')\n\n        if state_name != self.STATE_NAME or state_version != self.STATE_VERSION:\n            raise ArgumentError(\"Invalid emulated device state name or version\", found=(state_name, state_version),\n                                expected=(self.STATE_NAME, self.STATE_VERSION))\n\n        def _background_restore():\n            \n            super(ReferenceDevice, self).restore_state(state)\n\n            self.reset_count = state.get('reset_count', 0)\n            self.script = base64.b64decode(state.get('received_script'))\n\n        self.synchronize_task(_background_restore)", "docstring": "Restore the current state of this emulated device.\n\nNote that restore_state happens synchronously in the emulation thread\nto avoid any race conditions with accessing data members and ensure a\nconsistent atomic restoration process.\n\nThis method will block while the background restore happens.\n\nArgs:\nstate (dict): A previously dumped state produced by dump_state.", "source": "juraj-google-style"}
{"code": "def _verify_params(self):\n    reserved_in_use = self._RESERVED_PARAMS.intersection(self.extra_params)\n    if reserved_in_use:\n        raise ValueError('Using a reserved parameter', reserved_in_use)", "docstring": "Verifies the parameters don't use any reserved parameter.\n\nRaises:\nValueError: If a reserved parameter is used.", "source": "codesearchnet"}
{"code": "def manage_all_configs(save_results, filename):\n    all_configs = get_all_configs()\n    print_all_configs(all_configs[0], all_configs[1], all_configs[2])\n    if save_results:\n        save_to_file(all_configs[3], filename)", "docstring": "Manages configuration detection and retrieval based on user input.\n\nArgs:\nsave_results: Boolean indicating whether to save the results to a file.\nfilename: String that is the name of the output JSON file.", "source": "github-repos"}
{"code": "def energy_at_conditions(self, pH, V):\n    return ((self.energy + ((self.npH * PREFAC) * pH)) + (self.nPhi * V))", "docstring": "Get free energy for a given pH and V\n\nArgs:\npH (float): pH at which to evaluate free energy\nV (float): voltage at which to evaluate free energy\n\nReturns:\nfree energy at conditions", "source": "codesearchnet"}
{"code": "def _get_split_key(client_keys, num_splits):\n    if not client_keys or len(client_keys) < num_splits - 1:\n        return client_keys\n    num_keys_per_split = max(1.0, float(len(client_keys)) / (num_splits - 1))\n    split_client_keys = []\n    for i in range(1, num_splits):\n        split_index = int(round(i * num_keys_per_split) - 1)\n        split_client_keys.append(client_keys[split_index])\n    return split_client_keys", "docstring": "Given a list of keys and a number of splits find the keys to split on.\n\nArgs:\nclient_keys: the list of keys.\nnum_splits: the number of splits.\n\nReturns:\nA list of keys to split on.", "source": "github-repos"}
{"code": "def process_multientry(entry_list, prod_comp, coeff_threshold=0.0001):\n    dummy_oh = [Composition('H'), Composition('O')]\n    try:\n        entry_comps = [e.composition for e in entry_list]\n        rxn = Reaction((entry_comps + dummy_oh), [prod_comp])\n        coeffs = (- np.array([rxn.get_coeff(comp) for comp in entry_comps]))\n        if (coeffs > coeff_threshold).all():\n            return MultiEntry(entry_list, weights=coeffs.tolist())\n        else:\n            return None\n    except ReactionError:\n        return None", "docstring": "Static method for finding a multientry based on\na list of entries and a product composition.\nEssentially checks to see if a valid aqueous\nreaction exists between the entries and the\nproduct composition and returns a MultiEntry\nwith weights according to the coefficients if so.\n\nArgs:\nentry_list ([Entry]): list of entries from which to\ncreate a MultiEntry\nprod_comp (Composition): composition constraint for setting\nweights of MultiEntry\ncoeff_threshold (float): threshold of stoichiometric\ncoefficients to filter, if weights are lower than\nthis value, the entry is not returned", "source": "codesearchnet"}
{"code": "def __init__(self, certificate=None, private_key=None):\n    \n    self.private_key = private_key\n    self.certificate = certificate\n    self._ClearServerCipherCache()\n\n    \n    self.encrypted_cipher_cache = utils.FastStore(max_size=50000)", "docstring": "Creates a communicator.\n\nArgs:\ncertificate: Our own certificate.\nprivate_key: Our own private key.", "source": "juraj-google-style"}
{"code": "def case(self, case_id=None):\n        \n        if case_id:\n            for case in self.case_objs:\n                if case.case_id == case_id:\n                    return case\n        else:\n            if self.cases:\n                return list(self.case_objs)[0]\n\n        return Case(case_id='unknown')", "docstring": "Return a Case object\n\nIf no case_id is given return one case\n\nArgs:\ncase_id (str): A case id\n\nReturns:\nA Case object", "source": "juraj-google-style"}
{"code": "def add_permissions(self, grp_name, resource, permissions):\n        \n        self.service.add_permissions(\n            grp_name, resource, permissions,\n            self.url_prefix, self.auth, self.session, self.session_send_opts)", "docstring": "Add additional permissions for the group associated with the given resource.\n\nArgs:\ngrp_name (string): Name of group.\nresource (intern.resource.boss.BossResource): Identifies which data model object to operate on.\npermissions (list): List of permissions to add to the given resource.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def get_javascript_error(self, return_type='string'):\n        \n\n        if BROME_CONFIG['proxy_driver']['intercept_javascript_error']:\n            js_errors = self._driver.execute_script(\n                'return window.jsErrors; window.jsErrors = [];'\n            )\n\n            if not js_errors:\n                js_errors = []\n\n            if return_type == 'list':\n                if len(js_errors):\n                    return js_errors\n                else:\n                    return []\n            else:\n                if len(js_errors):\n                    return os.linesep.join(js_errors)\n                else:\n                    return self.no_javascript_error_string\n        else:\n            if return_type == 'list':\n                return []\n            else:\n                return self.no_javascript_error_string", "docstring": "Return the gathered javascript error\n\nArgs:\nreturn_type: 'string' | 'list'; default: 'string'", "source": "juraj-google-style"}
{"code": "def sub(x1, x2, output_shape=None, name=None):\n  \n  output_shape = convert_to_shape(output_shape)\n  if not isinstance(x2, Tensor):\n    return ScalarAddOperation(x1, -x2).outputs[0]\n  with tf.name_scope(name, default_name=\"sub\"):\n    x1, x2 = binary_arguments_to_tensors(x1, x2)\n    return add(x1, negative(x2), output_shape=output_shape)", "docstring": "Binary subtraction with broadcsting.\n\nArgs:\nx1: a Tensor\nx2: a Tensor\noutput_shape: an optional Shape\nname: an optional string\nReturns:\na Tensor", "source": "juraj-google-style"}
{"code": "def pyside_load_ui(uifile, base_instance=None):\n    \n    form_class, base_class = load_ui_type(uifile)\n    if not base_instance:\n        typeName = form_class.__name__\n        finalType = type(typeName,\n                         (form_class, base_class),\n                         {})\n        base_instance = finalType()\n    else:\n        if not isinstance(base_instance, base_class):\n            raise RuntimeError(\n                'The base_instance passed to loadUi does not inherit from'\n                ' needed base type (%s)' % type(base_class))\n        typeName = type(base_instance).__name__\n        base_instance.__class__ = type(typeName,\n                                       (form_class, type(base_instance)),\n                                       {})\n    base_instance.setupUi(base_instance)\n    return base_instance", "docstring": "Provide PyQt4.uic.loadUi functionality to PySide\n\nArgs:\nuifile (str): Absolute path to .ui file\nbase_instance (QWidget): The widget into which UI widgets are loaded\n\n\nNote:\npysideuic is required for this to work with PySide.\n\nThis seems to work correctly in Maya as well as outside of it as\nopposed to other implementations which involve overriding QUiLoader.\n\nReturns:\nQWidget: the base instance", "source": "juraj-google-style"}
{"code": "def nhapDaiHan(self, cucSo, gioiTinh):\n        \n        for cung in self.thapNhiCung:\n            khoangCach = khoangCachCung(cung.cungSo, self.cungMenh, gioiTinh)\n            cung.daiHan(cucSo + khoangCach * 10)\n        return self", "docstring": "Nhap dai han\n\nArgs:\ncucSo (TYPE): Description\ngioiTinh (TYPE): Description\n\nReturns:\nTYPE: Description", "source": "juraj-google-style"}
{"code": "def output_csv(filehandle: TextIO, values: Iterable[str]) -> None:\n    \n    line = \",\".join(values)\n    filehandle.write(line + \"\\n\")", "docstring": "Write a line of CSV. POOR; does not escape things properly. DEPRECATED.\n\nArgs:\nfilehandle: file to write to\nvalues: values", "source": "juraj-google-style"}
{"code": "def BuildChecks(self, request):\n    \n    result = []\n    if request.HasField(\"start_time\") or request.HasField(\"end_time\"):\n\n      def FilterTimestamp(file_stat, request=request):\n        return file_stat.HasField(\"st_mtime\") and (\n            file_stat.st_mtime < request.start_time or\n            file_stat.st_mtime > request.end_time)\n\n      result.append(FilterTimestamp)\n\n    if request.HasField(\"min_file_size\") or request.HasField(\"max_file_size\"):\n\n      def FilterSize(file_stat, request=request):\n        return file_stat.HasField(\"st_size\") and (\n            file_stat.st_size < request.min_file_size or\n            file_stat.st_size > request.max_file_size)\n\n      result.append(FilterSize)\n\n    if request.HasField(\"perm_mode\"):\n\n      def FilterPerms(file_stat, request=request):\n        return (file_stat.st_mode & request.perm_mask) != request.perm_mode\n\n      result.append(FilterPerms)\n\n    if request.HasField(\"uid\"):\n\n      def FilterUID(file_stat, request=request):\n        return file_stat.st_uid != request.uid\n\n      result.append(FilterUID)\n\n    if request.HasField(\"gid\"):\n\n      def FilterGID(file_stat, request=request):\n        return file_stat.st_gid != request.gid\n\n      result.append(FilterGID)\n\n    if request.HasField(\"path_regex\"):\n      regex = request.path_regex\n\n      def FilterPath(file_stat, regex=regex):\n        \n        return not regex.Search(file_stat.pathspec.Basename())\n\n      result.append(FilterPath)\n\n    if request.HasField(\"data_regex\"):\n\n      def FilterData(file_stat, **_):\n        \n        return not self.TestFileContent(file_stat)\n\n      result.append(FilterData)\n\n    return result", "docstring": "Parses request and returns a list of filter callables.\n\nEach callable will be called with the StatEntry and returns True if the\nentry should be suppressed.\n\nArgs:\nrequest: A FindSpec that describes the search.\n\nReturns:\na list of callables which return True if the file is to be suppressed.", "source": "juraj-google-style"}
{"code": "def get_rows_fieldnames_from_raw_sql(\n        session: Union[Session, Engine, Connection],\n        sql: str) -> Tuple[Sequence[Sequence[Any]], Sequence[str]]:\n    \n    result = session.execute(sql)  \n    fieldnames = result.keys()\n    rows = result.fetchall()\n    return rows, fieldnames", "docstring": "Returns results and column names from a query.\n\nArgs:\nsession: SQLAlchemy :class:`Session`, :class:`Engine`, or\n:class:`Connection` object\nsql: raw SQL to execure\n\nReturns:\n``(rows, fieldnames)`` where ``rows`` is the usual set of results and\n``fieldnames`` are the name of the result columns/fields.", "source": "juraj-google-style"}
{"code": "def _get_computer_object():\n    with salt.utils.winapi.Com():\n        nt = win32com.client.Dispatch('AdsNameSpaces')\n    return nt.GetObject('', 'WinNT:", "docstring": "A helper function to get the object for the local machine\n\nReturns:\nobject: Returns the computer object for the local machine", "source": "codesearchnet"}
{"code": "def hasValue(self) -> 'Builder':\n    return self._to_builder(_evaluation.HasValueFunction(self.node.context, self.node, []))", "docstring": "The FHIRPath hasValue() function.\n\nReturns:\nAn expression that evaluates to True if the parent has a single value\nthat is a primitive.", "source": "github-repos"}
{"code": "def are_equivalent_xml(a_xml, b_xml, ignore_timestamps=False):\n    'Normalizes then compares SystemMetadata XML docs for equivalency.\\n  ``a_xml`` and ``b_xml`` should be utf-8 encoded DataONE System Metadata XML\\n  documents.\\n  '\n    return are_equivalent_pyxb(d1_common.xml.deserialize(a_xml), d1_common.xml.deserialize(b_xml), ignore_timestamps)", "docstring": "Determine if two SystemMetadata XML docs are semantically equivalent.\n\nNormalize then compare SystemMetadata XML docs for equivalency.\n\nArgs:\na_xml, b_xml: bytes\nUTF-8 encoded SystemMetadata XML docs to compare\n\nignore_timestamps: bool\n``True``: Timestamps in the SystemMetadata are ignored so that objects that are\ncompared register as equivalent if only their timestamps differ.\n\nReturns:\nbool: **True** if SystemMetadata XML docs are semantically equivalent.\n\nNotes:\nThe SystemMetadata is normalized by removing any redundant information and\nordering all sections where there are no semantics associated with the order. The\nnormalized SystemMetadata is intended to be semantically equivalent to the\nun-normalized one.", "source": "codesearchnet"}
{"code": "def sca_xsect(scatterer, h_pol=True):\n    if (scatterer.psd_integrator is not None):\n        return scatterer.psd_integrator.get_angular_integrated(scatterer.psd, scatterer.get_geometry(), 'sca_xsect')\n    old_geom = scatterer.get_geometry()\n\n    def d_xsect(thet, phi):\n        (scatterer.phi, scatterer.thet) = ((phi * rad_to_deg), (thet * rad_to_deg))\n        Z = scatterer.get_Z()\n        I = sca_intensity(scatterer, h_pol)\n        return (I * np.sin(thet))\n    try:\n        xsect = dblquad(d_xsect, 0.0, (2 * np.pi), (lambda x: 0.0), (lambda x: np.pi))[0]\n    finally:\n        scatterer.set_geometry(old_geom)\n    return xsect", "docstring": "Scattering cross section for the current setup, with polarization.\n\nArgs:\nscatterer: a Scatterer instance.\nh_pol: If True (default), use horizontal polarization.\nIf False, use vertical polarization.\n\nReturns:\nThe scattering cross section.", "source": "codesearchnet"}
{"code": "def _pack3(obj, fp, **options):\n    global compatibility\n    ext_handlers = options.get('ext_handlers')\n    if (obj is None):\n        _pack_nil(obj, fp, options)\n    elif (ext_handlers and (obj.__class__ in ext_handlers)):\n        _pack_ext(ext_handlers[obj.__class__](obj), fp, options)\n    elif isinstance(obj, bool):\n        _pack_boolean(obj, fp, options)\n    elif isinstance(obj, int):\n        _pack_integer(obj, fp, options)\n    elif isinstance(obj, float):\n        _pack_float(obj, fp, options)\n    elif (compatibility and isinstance(obj, str)):\n        _pack_oldspec_raw(obj.encode('utf-8'), fp, options)\n    elif (compatibility and isinstance(obj, bytes)):\n        _pack_oldspec_raw(obj, fp, options)\n    elif isinstance(obj, str):\n        _pack_string(obj, fp, options)\n    elif isinstance(obj, bytes):\n        _pack_binary(obj, fp, options)\n    elif isinstance(obj, (list, tuple)):\n        _pack_array(obj, fp, options)\n    elif isinstance(obj, dict):\n        _pack_map(obj, fp, options)\n    elif isinstance(obj, datetime.datetime):\n        _pack_ext_timestamp(obj, fp, options)\n    elif isinstance(obj, Ext):\n        _pack_ext(obj, fp, options)\n    elif ext_handlers:\n        t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None)\n        if t:\n            _pack_ext(ext_handlers[t](obj), fp, options)\n        else:\n            raise UnsupportedTypeException(('unsupported type: %s' % str(type(obj))))\n    else:\n        raise UnsupportedTypeException(('unsupported type: %s' % str(type(obj))))", "docstring": "Serialize a Python object into MessagePack bytes.\n\nArgs:\nobj: a Python object\nfp: a .write()-supporting file-like object\n\nKwargs:\next_handlers (dict): dictionary of Ext handlers, mapping a custom type\nto a callable that packs an instance of the type\ninto an Ext object\nforce_float_precision (str): \"single\" to force packing floats as\nIEEE-754 single-precision floats,\n\"double\" to force packing floats as\nIEEE-754 double-precision floats.\n\nReturns:\nNone.\n\nRaises:\nUnsupportedType(PackException):\nObject type not supported for packing.\n\nExample:\n>>> f = open('test.bin', 'wb')\n>>> umsgpack.pack({u\"compact\": True, u\"schema\": 0}, f)\n>>>", "source": "codesearchnet"}
{"code": "def set(self, key, value, *, section=DataStoreDocumentSection.Data):\n    key_notation = '.'.join([section, key])\n    try:\n        self._delete_gridfs_data(self._data_from_dotnotation(key_notation, default=None))\n    except KeyError:\n        logger.info('Adding new field {} to the data store'.format(key_notation))\n    result = self._collection.update_one({'_id': ObjectId(self._workflow_id)}, {'$set': {key_notation: self._encode_value(value)}, '$currentDate': {'lastModified': True}})\n    return (result.modified_count == 1)", "docstring": "Store a value under the specified key in the given section of the document.\n\nThis method stores a value into the specified section of the workflow data store\ndocument. Any existing value is overridden. Before storing a value, any linked\nGridFS document under the specified key is deleted.\n\nArgs:\nkey (str): The key pointing to the value that should be stored/updated.\nIt supports MongoDB's dot notation for nested fields.\nvalue: The value that should be stored/updated.\nsection (DataStoreDocumentSection): The section from which the data should\nbe retrieved.\n\nReturns:\nbool: ``True`` if the value could be set/updated, otherwise ``False``.", "source": "codesearchnet"}
{"code": "def open_window(self, private=False):\n    handles_before = self.selenium.window_handles\n    self.switch_to()\n    with self.selenium.context(self.selenium.CONTEXT_CHROME):\n        self.selenium.find_element(*self._file_menu_button_locator).click()\n        if private:\n            self.selenium.find_element(*self._file_menu_private_window_locator).click()\n        else:\n            self.selenium.find_element(*self._file_menu_new_window_button_locator).click()\n    return self.wait.until(expected.new_browser_window_is_opened(self.selenium, handles_before), message='No new browser window opened')", "docstring": "Open a new browser window.\n\nArgs:\nprivate (bool): Optional parameter to open a private browsing\nwindow. Defaults to False.\n\nReturns:\n:py:class:`BrowserWindow`: Opened window.", "source": "codesearchnet"}
{"code": "def __init__(self, function_meta, functions_mapping=None, check_variables_set=None):\n        \n        self.functions_mapping = functions_mapping or {}\n        self.check_variables_set = check_variables_set or set()\n        self.cache_key = None\n        self.__parse(function_meta)", "docstring": "init LazyFunction object with function_meta\n\nArgs:\nfunction_meta (dict): function name, args and kwargs.\n{\n\"func_name\": \"func\",\n\"args\": [1, 2]\n\"kwargs\": {\"a\": 3, \"b\": 4}\n}", "source": "juraj-google-style"}
{"code": "def read_table(fstream):\n    \n    pos = fstream.tell()\n    line = fstream.readline().strip()\n    fragments = line.split(\",\")\n    fragments = [x for x in fragments if x is not None]\n    partition = dict()\n    if not len(fragments) >= 4:\n        return None\n\n    partition[\"table\"] = fragments[0]\n    partition[\"group\"] = fragments[1]\n    partition[\"set\"] = fragments[2]\n    partition[\"num_lines\"] = fragments[3]\n\n    struct = None\n    if partition is not None and partition[\"table\"] == \"TABLE\":\n        num_lines = int(partition[\"num_lines\"].strip())\n        struct = {}\n        header = fetch_cols(fstream)\n\n        struct.update({header[0]: header[1:]})\n        for _ in range(num_lines):\n            cols = fetch_cols(fstream)\n            struct.update({cols[0]: cols[1:]})\n    else:\n        fstream.seek(pos)\n\n    return struct", "docstring": "Read a likwid table info from the text stream.\n\nArgs:\nfstream: Likwid's filestream.\n\nReturns (dict(str: str)):\nA dict containing likwid's table info as key/value pairs.", "source": "juraj-google-style"}
{"code": "def run_numerical_categorical_analysis(args, schema_list):\n    header = [column['name'] for column in schema_list]\n    input_files = file_io.get_matching_files(args.input_file_pattern)\n    for col_schema in schema_list:\n        col_type = col_schema['type'].lower()\n        if ((col_type != 'string') and (col_type != 'integer') and (col_type != 'float')):\n            raise ValueError(('Schema contains an unsupported type %s.' % col_type))\n\n    def _init_numerical_results():\n        return {'min': float('inf'), 'max': float('-inf'), 'count': 0, 'sum': 0.0}\n    numerical_results = collections.defaultdict(_init_numerical_results)\n    categorical_results = collections.defaultdict(set)\n    for input_file in input_files:\n        with file_io.FileIO(input_file, 'r') as f:\n            for line in f:\n                parsed_line = dict(zip(header, line.strip().split(',')))\n                for col_schema in schema_list:\n                    col_name = col_schema['name']\n                    col_type = col_schema['type']\n                    if (col_type.lower() == 'string'):\n                        categorical_results[col_name].update([parsed_line[col_name]])\n                    else:\n                        if (not parsed_line[col_name].strip()):\n                            continue\n                        numerical_results[col_name]['min'] = min(numerical_results[col_name]['min'], float(parsed_line[col_name]))\n                        numerical_results[col_name]['max'] = max(numerical_results[col_name]['max'], float(parsed_line[col_name]))\n                        numerical_results[col_name]['count'] += 1\n                        numerical_results[col_name]['sum'] += float(parsed_line[col_name])\n    for col_schema in schema_list:\n        if (col_schema['type'].lower() != 'string'):\n            col_name = col_schema['name']\n            mean = (numerical_results[col_name]['sum'] / numerical_results[col_name]['count'])\n            del numerical_results[col_name]['sum']\n            del numerical_results[col_name]['count']\n            numerical_results[col_name]['mean'] = mean\n    file_io.write_string_to_file(os.path.join(args.output_dir, NUMERICAL_ANALYSIS_FILE), json.dumps(numerical_results, indent=2, separators=(',', ': ')))\n    for (name, unique_labels) in six.iteritems(categorical_results):\n        labels = '\\n'.join(list(unique_labels))\n        file_io.write_string_to_file(os.path.join(args.output_dir, (CATEGORICAL_ANALYSIS_FILE % name)), labels)", "docstring": "Makes the numerical and categorical analysis files.\n\nArgs:\nargs: the command line args\nschema_list: python object of the schema json file.\n\nRaises:\nValueError: if schema contains unknown column types.", "source": "codesearchnet"}
{"code": "def gather_gpu_devices():\n    try:\n        dev_info = _gather_gpu_devices_proc()\n        if not dev_info:\n            raise ValueError('No devices found')\n        return dev_info\n    except (IOError, ValueError, errors.OpError):\n        pass\n    try:\n        return _gather_gpu_devices_cudart()\n    except (OSError, ValueError, NotImplementedError, errors.OpError):\n        return []", "docstring": "Gather gpu device info.\n\nReturns:\nA list of test_log_pb2.GPUInfo messages.", "source": "github-repos"}
{"code": "def alloc_data(self, value):\n        \n\n        if isinstance(value, six.binary_type):\n            return self._alloc_data(value)\n        elif isinstance(value, six.text_type):\n            return self._alloc_data(value.encode('utf-8') + b'\\0')\n        else:\n            raise TypeError('No idea how to encode %s' % repr(value))", "docstring": "Allocate a piece of data that will be included in the shellcode body.\n\nArguments:\nvalue(...): The value to add to the shellcode. Can be bytes or\nstring type.\n\nReturns:\n~pwnypack.types.Offset: The offset used to address the data.", "source": "juraj-google-style"}
{"code": "def __init__(self, use_zeromq=True):\n    \n    super(PsortMultiProcessEngine, self).__init__()\n    self._analysis_plugins = {}\n    self._completed_analysis_processes = set()\n    self._data_location = None\n    self._event_filter_expression = None\n    self._event_queues = {}\n    self._event_tag_index = event_tag_index.EventTagIndex()\n    self._events_status = processing_status.EventsStatus()\n    \n    \n    self._export_event_heap = PsortEventHeap()\n    self._export_event_timestamp = 0\n    self._guppy_memory_profiler = None\n    self._knowledge_base = None\n    self._memory_profiler = None\n    self._merge_task = None\n    self._number_of_consumed_event_tags = 0\n    self._number_of_consumed_events = 0\n    self._number_of_consumed_reports = 0\n    self._number_of_consumed_sources = 0\n    self._number_of_consumed_warnings = 0\n    self._number_of_produced_event_tags = 0\n    self._number_of_produced_events = 0\n    self._number_of_produced_reports = 0\n    self._number_of_produced_sources = 0\n    self._number_of_produced_warnings = 0\n    self._processing_configuration = None\n    self._processing_profiler = None\n    self._serializers_profiler = None\n    self._status = definitions.STATUS_INDICATOR_IDLE\n    self._status_update_callback = None\n    self._use_zeromq = use_zeromq\n    self._worker_memory_limit = definitions.DEFAULT_WORKER_MEMORY_LIMIT", "docstring": "Initializes an engine object.\n\nArgs:\nuse_zeromq (Optional[bool]): True if ZeroMQ should be used for queuing\ninstead of Python's multiprocessing queue.", "source": "juraj-google-style"}
{"code": "def get_attribute(json, attr):\n    \n    res = [json[entry][attr] for entry, _ in enumerate(json)]\n    logger.debug('{0}s (from JSON):\\n{1}'.format(attr, res))\n    return res", "docstring": "Gets the values of an attribute from JSON\n\nArgs:\njson: JSON data as a list of dict dates, where the keys are\nthe raw market statistics.\nattr: String of attribute in JSON file to collect.\n\nReturns:\nList of values of specified attribute from JSON", "source": "juraj-google-style"}
{"code": "def console_map_ascii_code_to_font(asciiCode: int, fontCharX: int, fontCharY: int) -> None:\n    lib.TCOD_console_map_ascii_code_to_font(_int(asciiCode), fontCharX, fontCharY)", "docstring": "Set a character code to new coordinates on the tile-set.\n\n`asciiCode` must be within the bounds created during the initialization of\nthe loaded tile-set.  For example, you can't use 255 here unless you have a\n256 tile tile-set loaded.  This applies to all functions in this group.\n\nArgs:\nasciiCode (int): The character code to change.\nfontCharX (int): The X tile coordinate on the loaded tileset.\n0 is the leftmost tile.\nfontCharY (int): The Y tile coordinate on the loaded tileset.\n0 is the topmost tile.", "source": "codesearchnet"}
{"code": "def noninteractive_changeset_update(self, fqn, template, old_parameters, parameters, stack_policy, tags, **kwargs):\n    logger.debug('Using noninterative changeset provider mode for %s.', fqn)\n    (_changes, change_set_id) = create_change_set(self.cloudformation, fqn, template, parameters, tags, 'UPDATE', service_role=self.service_role, **kwargs)\n    self.deal_with_changeset_stack_policy(fqn, stack_policy)\n    self.cloudformation.execute_change_set(ChangeSetName=change_set_id)", "docstring": "Update a Cloudformation stack using a change set.\n\nThis is required for stacks with a defined Transform (i.e. SAM), as the\ndefault update_stack API cannot be used with them.\n\nArgs:\nfqn (str): The fully qualified name of the Cloudformation stack.\ntemplate (:class:`stacker.providers.base.Template`): A Template\nobject to use when updating the stack.\nold_parameters (list): A list of dictionaries that defines the\nparameter list on the existing Cloudformation stack.\nparameters (list): A list of dictionaries that defines the\nparameter list to be applied to the Cloudformation stack.\nstack_policy (:class:`stacker.providers.base.Template`): A template\nobject representing a stack policy.\ntags (list): A list of dictionaries that defines the tags\nthat should be applied to the Cloudformation stack.", "source": "codesearchnet"}
{"code": "def classification_signature_def(examples, classes, scores):\n    if examples is None:\n        raise ValueError('Classification `examples` cannot be None.')\n    if not isinstance(examples, tensor_lib.Tensor):\n        raise ValueError(f'Classification `examples` must be a string Tensor. Found `examples` of type {type(examples)}.')\n    if classes is None and scores is None:\n        raise ValueError('Classification `classes` and `scores` cannot both be None.')\n    input_tensor_info = utils.build_tensor_info(examples)\n    if input_tensor_info.dtype != types_pb2.DT_STRING:\n        raise ValueError(f'Classification input tensors must be of type string. Found tensors of type {input_tensor_info.dtype}')\n    signature_inputs = {signature_constants.CLASSIFY_INPUTS: input_tensor_info}\n    signature_outputs = {}\n    if classes is not None:\n        classes_tensor_info = utils.build_tensor_info(classes)\n        if classes_tensor_info.dtype != types_pb2.DT_STRING:\n            raise ValueError(f'Classification classes must be of type string Tensor. Found tensors of type {classes_tensor_info.dtype}.`')\n        signature_outputs[signature_constants.CLASSIFY_OUTPUT_CLASSES] = classes_tensor_info\n    if scores is not None:\n        scores_tensor_info = utils.build_tensor_info(scores)\n        if scores_tensor_info.dtype != types_pb2.DT_FLOAT:\n            raise ValueError('Classification scores must be a float Tensor.')\n        signature_outputs[signature_constants.CLASSIFY_OUTPUT_SCORES] = scores_tensor_info\n    signature_def = build_signature_def(signature_inputs, signature_outputs, signature_constants.CLASSIFY_METHOD_NAME)\n    return signature_def", "docstring": "Creates classification signature from given examples and predictions.\n\nThis function produces signatures intended for use with the TensorFlow Serving\nClassify API (tensorflow_serving/apis/prediction_service.proto), and so\nconstrains the input and output types to those allowed by TensorFlow Serving.\n\nArgs:\nexamples: A string `Tensor`, expected to accept serialized tf.Examples.\nclasses: A string `Tensor`.  Note that the ClassificationResponse message\nrequires that class labels are strings, not integers or anything else.\nscores: a float `Tensor`.\n\nReturns:\nA classification-flavored signature_def.\n\nRaises:\nValueError: If examples is `None`.", "source": "github-repos"}
{"code": "def dumps(o, encoder=None):\n    \n\n    retval = \"\"\n    if encoder is None:\n        encoder = TomlEncoder(o.__class__)\n    addtoretval, sections = encoder.dump_sections(o, \"\")\n    retval += addtoretval\n    outer_objs = [id(o)]\n    while sections:\n        section_ids = [id(section) for section in sections]\n        for outer_obj in outer_objs:\n            if outer_obj in section_ids:\n                raise ValueError(\"Circular reference detected\")\n        outer_objs += section_ids\n        newsections = encoder.get_empty_table()\n        for section in sections:\n            addtoretval, addtosections = encoder.dump_sections(\n                sections[section], section)\n\n            if addtoretval or (not addtoretval and not addtosections):\n                if retval and retval[-2:] != \"\\n\\n\":\n                    retval += \"\\n\"\n                retval += \"[\" + section + \"]\\n\"\n                if addtoretval:\n                    retval += addtoretval\n            for s in addtosections:\n                newsections[section + \".\" + s] = addtosections[s]\n        sections = newsections\n    return retval", "docstring": "Stringifies input dict as toml\n\nArgs:\no: Object to dump into toml\n\npreserve: Boolean parameter. If true, preserve inline tables.\n\nReturns:\nString containing the toml corresponding to dict", "source": "juraj-google-style"}
{"code": "def handle_incoming_message(self, msg):\n        \n        if msg.type == MessageType.START_JOB:\n            job = msg.message['job']\n            self.schedule_job(job)\n        elif msg.type == MessageType.CANCEL_JOB:\n            job_id = msg.message['job_id']\n            self.cancel(job_id)", "docstring": "Start or cancel a job, based on the msg.\n\nIf msg.type == MessageType.START_JOB, then start the job given by msg.job.\n\nIf msg.type == MessageType.CANCEL_JOB, then try to cancel the job given by msg.job.job_id.\n\nArgs:\nmsg (barbequeue.messaging.classes.Message):\n\nReturns: None", "source": "juraj-google-style"}
{"code": "def _check_parameter_range(s_min, s_max):\n    if (s_min == DEFAULT_S_MIN):\n        return (0.0, 1.0)\n    if (s_max == DEFAULT_S_MAX):\n        return (s_min, s_min)\n    return (s_min, s_max)", "docstring": "r\"\"\"Performs a final check on a clipped parameter range.\n\n.. note::\n\nThis is a helper for :func:`clip_range`.\n\nIf both values are unchanged from the \"unset\" default, this returns\nthe whole interval :math:`\\left[0.0, 1.0\\right]`.\n\nIf only one of the values is set to some parameter :math:`s`, this\nreturns the \"degenerate\" interval :math:`\\left[s, s\\right]`. (We rely\non the fact that ``s_min`` must be the only set value, based on how\n:func:`_update_parameters` works.)\n\nOtherwise, this simply returns ``[s_min, s_max]``.\n\nArgs:\ns_min (float): Current start of clipped interval. If \"unset\", this\nvalue will be ``DEFAULT_S_MIN``.\ns_max (float): Current end of clipped interval. If \"unset\", this\nvalue will be ``DEFAULT_S_MAX``.\n\nReturns:\nTuple[float, float]: The (possibly updated) start and end\nof the clipped parameter range.", "source": "codesearchnet"}
{"code": "def add_handler(self, handler: Handler, group: int=0):\n    if isinstance(handler, DisconnectHandler):\n        self.disconnect_handler = handler.callback\n    else:\n        self.dispatcher.add_handler(handler, group)\n    return (handler, group)", "docstring": "Use this method to register an update handler.\n\nYou can register multiple handlers, but at most one handler within a group\nwill be used for a single update. To handle the same update more than once, register\nyour handler using a different group id (lower group id == higher priority).\n\nArgs:\nhandler (``Handler``):\nThe handler to be registered.\n\ngroup (``int``, *optional*):\nThe group identifier, defaults to 0.\n\nReturns:\nA tuple of (handler, group)", "source": "codesearchnet"}
{"code": "def authentication_required(req, resp, resource, uri_kwargs):\n    \n    if 'user' not in req.context:\n        args = [\"Unauthorized\", \"This resource requires authentication\"]\n\n        \n        if FALCON_VERSION >= (1, 0, 0):\n            args.append(req.context.get('challenges', []))\n\n        raise HTTPUnauthorized(*args)", "docstring": "Ensure that user is authenticated otherwise return ``401 Unauthorized``.\n\nIf request fails to authenticate this authorization hook will also\ninclude list of ``WWW-Athenticate`` challenges.\n\nArgs:\nreq (falcon.Request): the request object.\nresp (falcon.Response): the response object.\nresource (object): the resource object.\nuri_kwargs (dict): keyword arguments from the URI template.\n\n.. versionadded:: 0.4.0", "source": "juraj-google-style"}
{"code": "def oem_name(self, value):\n        \n        if value == self._defaults['ai.device.oemName'] and 'ai.device.oemName' in self._values:\n            del self._values['ai.device.oemName']\n        else:\n            self._values['ai.device.oemName'] = value", "docstring": "The oem_name property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def create_method_arguments(self, node, method, use_defaults=False):\n    args = []\n    num_posargs = method.argcount(node)\n    num_posargs_no_default = num_posargs - len(method.defaults)\n    for i in range(num_posargs):\n        default_idx = i - num_posargs_no_default\n        if use_defaults and default_idx >= 0:\n            arg = method.defaults[default_idx]\n        else:\n            arg = self.ctx.convert.create_new_unknown(node, force=not use_defaults)\n        args.append(arg)\n    kws = {}\n    for key in method.signature.kwonly_params:\n        if use_defaults and key in method.kw_defaults:\n            kws[key] = method.kw_defaults[key]\n        else:\n            kws[key] = self.ctx.convert.create_new_unknown(node, force=not use_defaults)\n    starargs = self.create_varargs(node) if method.has_varargs() else None\n    starstarargs = self.create_kwargs(node) if method.has_kwargs() else None\n    return (node, function.Args(posargs=tuple(args), namedargs=kws, starargs=starargs, starstarargs=starstarargs))", "docstring": "Create arguments for the given method.\n\nCreates Unknown objects as arguments for the given method. Note that we\ndon't need to take parameter annotations into account as\nInterpreterFunction.call() will take care of that.\n\nArgs:\nnode: The current node.\nmethod: An abstract.InterpreterFunction.\nuse_defaults: Whether to use parameter defaults for arguments. When True,\nunknown arguments are created with force=False, as it is fine to use\nUnsolvable rather than Unknown objects for type-checking defaults.\n\nReturns:\nA tuple of a node and a function.Args object.", "source": "github-repos"}
{"code": "def _method_url(self, method_name):\n    return '{base_url}/api/{api}/{method}'.format(base_url=self._base_url(), api=self.api_version, method=method_name)", "docstring": "Generate the URL for the requested method\n\nArgs:\nmethod_name (str): Name of the method\n\nReturns:\nA string containing the URL of the method", "source": "codesearchnet"}
{"code": "def search(nats_api, search_model, algo, dataset='cifar10', reporting_epoch=12, max_train_hours=20000.0):\n    nats_api.reset_time()\n    times, best_valids, best_tests = ([0.0], [0.0], [0.0])\n    valid_models = 0\n    time_spent = 0\n    start_time = time.time()\n    last_report_time = start_time\n    for model, feedback in pg.sample(search_model, algo):\n        spec = model()\n        validation_accuracy, _, _, _ = nats_api.simulate_train_eval(spec, dataset=dataset, hp=VALIDATION_SET_REPORTING_EPOCH)\n        time_spent = nats_api.used_time\n        more_info = nats_api.get_more_info(spec, dataset, hp=reporting_epoch)\n        valid_models += 1\n        feedback(validation_accuracy)\n        if validation_accuracy > best_valids[-1]:\n            best_valids.append(validation_accuracy)\n            best_tests.append(more_info['test-accuracy'])\n        else:\n            best_valids.append(best_valids[-1])\n            best_tests.append(best_tests[-1])\n        times.append(time_spent)\n        time_spent_in_hours = time_spent / (60 * 60)\n        if time_spent_in_hours > max_train_hours:\n            break\n        if feedback.id % 100 == 0:\n            now = time.time()\n            print(f'Tried {feedback.id} models, valid {valid_models}, time_spent {time_spent}, elapse since last report: {now - last_report_time} seconds.')\n            last_report_time = now\n    print(f'Total time elapse: {time.time() - start_time} seconds.')\n    return (times[1:], best_valids[1:], best_tests[1:])", "docstring": "Define the search procedure.\n\nArgs:\nnats_api: the NATS-Bench object.\nsearch_model: which is a `model` object annotated with `one_of`.\nalgo: algorithm for search.\ndataset: the target dataset\nreporting_epoch: Use test set results for models trained for this\nmany epochs.\nmax_train_hours: max time budget to train the models, which is the sum\nof training time queried from NAS-Bench.\n\nReturns:\nA tuple of (total time spent at step i for all steps,\nbest validation accuracy at step i for all steps,\nbest test accuracy at step i for all steps)", "source": "github-repos"}
{"code": "def hook(self, function, dependencies=None):\n    if (not isinstance(dependencies, (Iterable, type(None), str))):\n        raise TypeError('Invalid list of dependencies provided!')\n    if (not hasattr(function, '__deps__')):\n        function.__deps__ = dependencies\n    if self.isloaded(function.__deps__):\n        self.append(function)\n    else:\n        self._later.append(function)\n    for ext in self._later:\n        if self.isloaded(ext.__deps__):\n            self._later.remove(ext)\n            self.hook(ext)", "docstring": "Tries to load a hook\n\nArgs:\nfunction (func): Function that will be called when the event is called\n\nKwargs:\ndependencies (str): String or Iterable with modules whose hooks should be called before this one\n\nRaises:\n:class:TypeError\n\nNote that the dependencies are module-wide, that means that if\n`parent.foo` and `parent.bar` are both subscribed to `example` event\nand `child` enumerates `parent` as dependcy, **both** `foo` and `bar`\nmust be called in order for the dependcy to get resolved.", "source": "codesearchnet"}
{"code": "def _apply_threshold_to_predictions(self, result: AnomalyResult) -> AnomalyResult:\n    predictions = [dataclasses.replace(p, label=self._threshold_fn.apply(p.score), threshold=self._threshold_fn.threshold) for p in result.predictions]\n    return dataclasses.replace(result, predictions=predictions)", "docstring": "Updates the prediction labels in an AnomalyResult using the ThresholdFn.\n\nArgs:\nresult (AnomalyResult): The input `AnomalyResult` containing anomaly\nscores.\n\nReturns:\nAnomalyResult: A new `AnomalyResult` with updated prediction labels\nand threshold values.", "source": "github-repos"}
{"code": "def serialize_streamnet(streamnet_file, output_reach_file):\n        \n        FileClass.copy_files(streamnet_file, output_reach_file)\n        ds_reach = ogr_Open(output_reach_file, update=True)\n        layer_reach = ds_reach.GetLayer(0)\n        layer_def = layer_reach.GetLayerDefn()\n        i_link = layer_def.GetFieldIndex(FLD_LINKNO)\n        i_link_downslope = layer_def.GetFieldIndex(FLD_DSLINKNO)\n        i_len = layer_def.GetFieldIndex(REACH_LENGTH)\n\n        old_id_list = []\n        \n        \n        \n        \n        output_dic = {}\n        ft = layer_reach.GetNextFeature()\n        while ft is not None:\n            link_id = ft.GetFieldAsInteger(i_link)\n            reach_len = ft.GetFieldAsDouble(i_len)\n            if link_id not in old_id_list:\n                if reach_len < DELTA:\n                    downstream_id = ft.GetFieldAsInteger(i_link_downslope)\n                    output_dic[link_id] = downstream_id\n                else:\n                    old_id_list.append(link_id)\n\n            ft = layer_reach.GetNextFeature()\n        old_id_list.sort()\n\n        id_map = {}\n        for i, old_id in enumerate(old_id_list):\n            id_map[old_id] = i + 1\n        \n        \n        layer_reach.ResetReading()\n        ft = layer_reach.GetNextFeature()\n        while ft is not None:\n            link_id = ft.GetFieldAsInteger(i_link)\n            if link_id not in id_map:\n                layer_reach.DeleteFeature(ft.GetFID())\n                ft = layer_reach.GetNextFeature()\n                continue\n\n            ds_id = ft.GetFieldAsInteger(i_link_downslope)\n            ds_id = output_dic.get(ds_id, ds_id)\n            ds_id = output_dic.get(ds_id, ds_id)\n\n            ft.SetField(FLD_LINKNO, id_map[link_id])\n            if ds_id in id_map:\n                ft.SetField(FLD_DSLINKNO, id_map[ds_id])\n            else:\n                \n                ft.SetField(FLD_DSLINKNO, -1)\n            layer_reach.SetFeature(ft)\n            ft = layer_reach.GetNextFeature()\n        ds_reach.ExecuteSQL(str('REPACK reach'))\n        layer_reach.SyncToDisk()\n        ds_reach.Destroy()\n        del ds_reach\n        return id_map", "docstring": "Eliminate reach with zero length and return the reach ID map.\nArgs:\nstreamnet_file: original stream net ESRI shapefile\noutput_reach_file: serialized stream net, ESRI shapefile\n\nReturns:\nid pairs {origin: newly assigned}", "source": "juraj-google-style"}
{"code": "def get_tensor_size(self, tensor_name, partial_layout=None, mesh_dimension_to_size=None):\n    return (self.get_tensor_dtype(tensor_name).size * self.get_tensor_num_entries(tensor_name, partial_layout, mesh_dimension_to_size))", "docstring": "The size of a tensor in bytes.\n\nIf partial_layout is specified, then mesh_dimension_to_size must also be. In\nthis case, the size on a single device is returned.\n\nArgs:\ntensor_name: a string, name of a tensor in the graph.\npartial_layout: an optional {string: string}, from MTF dimension name to\nmesh dimension name.\nmesh_dimension_to_size: an optional {string: int}, from mesh dimension\nname to size.\n\nReturns:\nan integer", "source": "codesearchnet"}
{"code": "def get_template(template):\n    from cloud_inquisitor.database import db\n    tmpl = db.Template.find_one(template_name=template)\n    if (not tmpl):\n        raise InquisitorError('No such template found: {}'.format(template))\n    tmplenv = Environment(loader=BaseLoader, autoescape=True)\n    tmplenv.filters['json_loads'] = json.loads\n    tmplenv.filters['slack_quote_join'] = (lambda data: ', '.join(('`{}`'.format(x) for x in data)))\n    return tmplenv.from_string(tmpl.template)", "docstring": "Return a Jinja2 template by filename\n\nArgs:\ntemplate (str): Name of the template to return\n\nReturns:\nA Jinja2 Template object", "source": "codesearchnet"}
{"code": "def call_next(self, *args, **kwargs) -> t.List[run.RunInfo]:\n    all_results = []\n    for ext in self.next_extensions:\n        LOG.debug('  %s ', ext)\n        results = ext(*args, **kwargs)\n        LOG.debug('  %s => %s', ext, results)\n        if (results is None):\n            LOG.warning('No result from: %s', ext)\n            continue\n        result_list = []\n        if isinstance(results, c.Iterable):\n            result_list.extend(results)\n        else:\n            result_list.append(results)\n        all_results.extend(result_list)\n    return all_results", "docstring": "Call all child extensions with the given arguments.\n\nThis calls all child extensions and collects the results for\nour own parent. Use this to control the execution of your\nnested extensions from your own extension.\n\nReturns:\n:obj:`list` of :obj:`RunInfo`: A list of collected\nresults of our child extensions.", "source": "codesearchnet"}
{"code": "def __init__(self, files=None, misspelling_file=None):\n    \n    if misspelling_file:\n      self._misspelling_dict = defaultdict(list)\n      with open(misspelling_file, 'r') as f:\n        for line in f:\n          bad_word, correction = line.strip().split(' ', 1)\n          self._misspelling_dict[bad_word].append(correction)\n\n    self._files = []\n    if files:\n      self.add(files)", "docstring": "Initialises an Misspellings instance.\n\nArgs:\nfiles: List of files to check.  More can be added with add().\nmisspelling_file: Filename with a list of misspelled words\nand their alternatives.\n\nRaises:\nIOError: Raised if misspelling_file can't be found.\nValueError: Raised if misspelling_file isn't correctly formatted.", "source": "juraj-google-style"}
{"code": "def plot_loss_history(history, figsize=(15, 8)):\n    \n\n    plt.figure(figsize=figsize)\n\n    plt.plot(history.history[\"loss\"])\n    plt.plot(history.history[\"val_loss\"])\n\n    plt.xlabel(\"\n    plt.ylabel(\"Loss\")\n    plt.legend([\"Training\", \"Validation\"])\n    plt.title(\"Loss over time\")\n\n    plt.show()", "docstring": "Plots the learning history for a Keras model,\nassuming the validation data was provided to the 'fit' function.\n\nArgs:\nhistory: The return value from the 'fit' function.\nfigsize: The size of the plot.", "source": "juraj-google-style"}
{"code": "def parseEquation(self, inp):\n        \n        inp = MathService._preprocess(inp)\n        split = inp.split(' ')\n\n        \n        for i, w in enumerate(split):\n            if w in self.__unaryOperators__:\n                op = self.__unaryOperators__[w]\n\n                \n                eq1 = ' '.join(split[:i])\n                eq2 = ' '.join(split[i + 1:])\n\n                \n                result = MathService._applyUnary(self.parseEquation(eq2), op)\n\n                return self.parseEquation(eq1 + \" \" + str(result))\n\n        def extractNumbersAndSymbols(inp):\n            numbers = []\n            symbols = []\n\n            \n            next_number = \"\"\n            for w in inp.split(' '):\n                if w in self.__binaryOperators__:\n                    symbols.append(self.__binaryOperators__[w])\n\n                    if next_number:\n                        numbers.append(next_number)\n                        next_number = \"\"\n\n                else:\n                    if next_number:\n                        next_number += \" \"\n                    next_number += w\n\n            if next_number:\n                numbers.append(next_number)\n\n            \n            def convert(n):\n                if n in self.__constants__:\n                    return self.__constants__[n]\n\n                converter = NumberService()\n                return converter.parse(n)\n\n            numbers = [convert(n) for n in numbers]\n\n            return numbers, symbols\n\n        numbers, symbols = extractNumbersAndSymbols(inp)\n\n        return MathService._calculate(numbers, symbols)", "docstring": "Solves the equation specified by the input string.\n\nArgs:\ninp (str): An equation, specified in words, containing some\ncombination of numbers, binary, and unary operations.\n\nReturns:\nThe floating-point result of carrying out the computation.", "source": "juraj-google-style"}
{"code": "def _fits_surface(self, width, height):\n        \n        assert(width > 0 and height > 0)\n        if self.rot and (width > self.width or height > self.height):\n            width, height = height, width\n\n        if width > self.width or height > self.height:\n            return False\n        else:\n            return True", "docstring": "Test surface is big enough to place a rectangle\n\nArguments:\nwidth (int, float): Rectangle width\nheight (int, float): Rectangle height\n\nReturns:\nboolean: True if it could be placed, False otherwise", "source": "juraj-google-style"}
{"code": "async def remove_participant(self, p: Participant):\n        \n        await self.connection('DELETE', 'tournaments/{}/participants/{}'.format(self._id, p._id))\n        if p in self.participants:\n            self.participants.remove(p)", "docstring": "remove a participant from the tournament\n\n|methcoro|\n\nArgs:\np: the participant to remove\n\nRaises:\nAPIException", "source": "juraj-google-style"}
{"code": "def CompleteTask(self, task):\n    \n    with self._lock:\n      if task.identifier not in self._tasks_merging:\n        raise KeyError('Task {0:s} was not merging.'.format(task.identifier))\n\n      self.SampleTaskStatus(task, 'completed')\n\n      del self._tasks_merging[task.identifier]\n\n      logger.debug('Completed task {0:s}.'.format(task.identifier))", "docstring": "Completes a task.\n\nThe task is complete and can be removed from the task manager.\n\nArgs:\ntask (Task): task.\n\nRaises:\nKeyError: if the task was not merging.", "source": "juraj-google-style"}
{"code": "def set_max_steps_per_epoch(max_steps_per_epoch):\n    global _MAX_STEPS_PER_EPOCH\n    _MAX_STEPS_PER_EPOCH = max_steps_per_epoch", "docstring": "Limit the maximum number of steps for any call to fit/evaluate/predict.\n\nThis will cap the number of steps for single epoch of a call to `fit()`,\n`evaluate()`, or `predict()`. This is purely for debugging, and can also be\nset via the `KERAS_MAX_STEPS_PER_EPOCH` environment variable to quickly run\na scrip without modifying its source.\n\nArgs:\nmax_epochs: The integer limit on the number of epochs or `None`. If\n`None`, no limit is applied.", "source": "github-repos"}
{"code": "def __init__(self, data=None, _KEY=None, _ATTRS=None):\n    if self.__class__ is MapEntry:\n        raise TypeError('MapEntry is an abstract class.')\n    if data is None:\n        return\n    else:\n        for key in data:\n            setattr(self, key, data[key])\n    self.log = logging.getLogger(__name__)", "docstring": "This is an abstract class.\n\nArgs:\ndata:  An optional dict of attribute, value pairs to populate with.\n\nRaises:\nTypeError:  Bad argument, or attempt to instantiate abstract class.", "source": "github-repos"}
{"code": "def load(self, source, as_defaults=False):\n        \n        if isinstance(source, six.string_types):\n            source = os.path.expanduser(source)\n            with open(source, encoding='utf-8') as f:\n                self._rw.load_config_from_file(self._config, f, as_defaults=as_defaults)\n\n        elif isinstance(source, (list, tuple)):\n            for s in source:\n                with open(s, encoding='utf-8') as f:\n                    self._rw.load_config_from_file(self._config, f, as_defaults=as_defaults)\n\n        else:\n            self._rw.load_config_from_file(self._config, source, as_defaults=as_defaults)", "docstring": "Load configuration values from the specified source.\n\nArgs:\nsource:\nas_defaults (bool): if ``True``, contents of ``source`` will be treated as schema of configuration items.", "source": "juraj-google-style"}
{"code": "def get_commit(profile, sha):\n    resource = ('/commits/' + sha)\n    data = api.get_request(profile, resource)\n    return prepare(data)", "docstring": "Fetch a commit.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nsha\nThe SHA of the commit to fetch.\n\nReturns:\nA dict with data about the commit.", "source": "codesearchnet"}
{"code": "def _set_bearer_user_vars(allowed_client_ids, scopes):\n    (all_scopes, sufficient_scopes) = _process_scopes(scopes)\n    try:\n        authorized_scopes = oauth.get_authorized_scopes(sorted(all_scopes))\n    except oauth.Error:\n        _logger.debug('Unable to get authorized scopes.', exc_info=True)\n        return\n    if (not _are_scopes_sufficient(authorized_scopes, sufficient_scopes)):\n        _logger.warning('Authorized scopes did not satisfy scope requirements.')\n        return\n    client_id = oauth.get_client_id(authorized_scopes)\n    if ((list(allowed_client_ids) != SKIP_CLIENT_ID_CHECK) and (client_id not in allowed_client_ids)):\n        _logger.warning('Client ID is not allowed: %s', client_id)\n        return\n    os.environ[_ENV_USE_OAUTH_SCOPE] = ' '.join(authorized_scopes)\n    _logger.debug('get_current_user() will return user from matched oauth_user.')", "docstring": "Validate the oauth bearer token and set endpoints auth user variables.\n\nIf the bearer token is valid, this sets ENDPOINTS_USE_OAUTH_SCOPE.  This\nprovides enough information that our endpoints.get_current_user() function\ncan get the user.\n\nArgs:\nallowed_client_ids: List of client IDs that are acceptable.\nscopes: List of acceptable scopes.", "source": "codesearchnet"}
{"code": "def get_ax3d_fig_plt(ax=None, **kwargs):\n    \n    import matplotlib.pyplot as plt\n    from mpl_toolkits.mplot3d import axes3d\n    if ax is None:\n        fig = plt.figure(**kwargs)\n        ax = axes3d.Axes3D(fig)\n    else:\n        fig = plt.gcf()\n\n    return ax, fig, plt", "docstring": "Helper function used in plot functions supporting an optional Axes3D\nargument. If ax is None, we build the `matplotlib` figure and create the\nAxes3D else we return the current active figure.\n\nArgs:\nkwargs: keyword arguments are passed to plt.figure if ax is not None.\n\nReturns:\nax: :class:`Axes` object\nfigure: matplotlib figure\nplt: matplotlib pyplot module.", "source": "juraj-google-style"}
{"code": "def AddTransaction(self, tx):\n    if (BC.Default() is None):\n        return False\n    if (tx.Hash.ToBytes() in self.MemPool.keys()):\n        return False\n    if BC.Default().ContainsTransaction(tx.Hash):\n        return False\n    if (not tx.Verify(self.MemPool.values())):\n        logger.error('Verifying tx result... failed')\n        return False\n    self.MemPool[tx.Hash.ToBytes()] = tx\n    return True", "docstring": "Add a transaction to the memory pool.\n\nArgs:\ntx (neo.Core.TX.Transaction): instance.\n\nReturns:\nbool: True if successfully added. False otherwise.", "source": "codesearchnet"}
{"code": "def create_from_snapshot(self, data, timeout=(- 1)):\n    uri = (self.URI + '/from-snapshot')\n    return self._client.create(data, uri=uri, timeout=timeout)", "docstring": "Creates a new volume on the storage system from a snapshot of a volume.\nA volume template must also be specified when creating a volume from a snapshot.\n\nThe global setting \"StorageVolumeTemplateRequired\" controls whether or\nnot root volume templates can be used to provision volumes.\nThe value of this setting defaults to \"false\".\nIf the value is set to \"true\", then only templates with an \"isRoot\" value of \"false\"\ncan be used to provision a volume.\n\nArgs:\ndata (dict):\nObject to create.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.\n\nReturns:\ndict: Created data.", "source": "codesearchnet"}
{"code": "def summarize_variables(var_list=None, tag=None):\n    if (var_list is None):\n        var_list = tf.trainable_variables()\n    if (tag is None):\n        tag = 'training_variables/'\n    name_to_var = {v.name: v for v in var_list}\n    for v_name in list(name_to_var):\n        v = name_to_var[v_name]\n        tf.summary.histogram((tag + v_name), v)", "docstring": "Summarize the variables.\n\nArgs:\nvar_list: a list of variables; defaults to trainable_variables.\ntag: name scope of the summary; defaults to training_variables/.", "source": "codesearchnet"}
{"code": "def Run(self):\n    for e in self.events:\n        if e.Run() is False:\n            return False\n    return True", "docstring": "Execute this state transition.\n\nReturns:\nWhether or not all event functions returned True.", "source": "github-repos"}
{"code": "def poll():\n    event_ptr = ffi.new('SDL_Event *')\n    while lib.SDL_PollEvent(event_ptr):\n        (yield Event._from_ptr(event_ptr))\n        event_ptr = ffi.new('SDL_Event *')", "docstring": "Polls for currently pending events.\n\nReturns:\nIterable[Event]: Events from the event queue.", "source": "codesearchnet"}
{"code": "def add_bboxes_to_image(image, bboxes, color='red', width=1):\n    \n    def expanded_bbox(bbox, n):\n        \n        l = min(bbox[0][0], bbox[1][0])\n        r = max(bbox[0][0], bbox[1][0])\n        t = min(bbox[0][1], bbox[1][1])\n        b = max(bbox[0][1], bbox[1][1])\n        return ((l - n, t - n), (r + n, b + n))\n\n    from PIL import Image, ImageDraw\n    draw = ImageDraw.Draw(image)\n    for bbox in bboxes:\n        for n in range(width):\n            draw.rectangle(expanded_bbox(bbox, n), outline=color)\n\n    return image", "docstring": "Draw rectangles on the image for the bounding boxes\nReturns a PIL.Image\nArguments:\nimage -- input image\nbboxes -- bounding boxes in the [((l, t), (r, b)), ...] format\nKeyword arguments:\ncolor -- color to draw the rectangles\nwidth -- line width of the rectangles\nExample:\nimage = Image.open(filename)\nadd_bboxes_to_image(image, bboxes[filename], width=2, color='#FF7700')\nimage.show()", "source": "juraj-google-style"}
{"code": "def __init__(self, url):\n        \n        self.url = url\n        self.domain = urlparse(url).netloc\n        self.index = None\n\n        self.creation_ts = time.time()\n        self.downloaded_ts = None\n        self.processing_started_ts = None\n        self.processing_ended_ts = None\n\n        for key in worker_mapping().keys():\n            setattr(self, key, None)", "docstring": "Constructor.\n\nArgs:\nurl (str): URL to which this request is related.", "source": "juraj-google-style"}
{"code": "def ws_db996(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `ws_db996`'.format(value))\n\n        self._ws_db996 = value", "docstring": "Corresponds to IDD Field `ws_db996`\nMean wind speed coincident with 99.6% dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `ws_db996`\nUnit: m/s\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def _resolve_grad_inputs(cond_graph, grad_graph):\n    new_inputs = []\n    for t in grad_graph.external_captures:\n        if t.graph != grad_graph.outer_graph:\n            assert t.graph == cond_graph\n            for i, output in enumerate(t.graph.outputs):\n                if output is t:\n                    t = t.graph._forward_cond.outputs[i]\n                    break\n            else:\n                for i, output in enumerate(t.graph.internal_captures):\n                    if output is t:\n                        t = t.graph.external_captures[i]\n                        break\n                else:\n                    raise ValueError('Could not find external tensor capture {tensor} in captures or outputs'.format(tensor=t))\n            assert t.graph == cond_graph.outer_graph\n        new_inputs.append(t)\n    return new_inputs", "docstring": "Returns the tensors to pass as inputs to `grad_graph`.\n\nThe `grad_graph` may have external references to\n1. Its outer graph containing the input gradients. These references are kept\nas is.\n2. Tensors in the forward pass graph. These tensors may not be \"live\"\nwhen the gradient is being computed. We replace such references by their\ncorresponding tensor in `cond_graph.outer_graph`. In the case of nested\ncontrol flow or functions, the gradient logic handling\n`grad_graph.outer_graph` will make sure the tensor from\n`cond_graph.outer_graph` is also correctly captured.\n\nArgs:\ncond_graph: FuncGraph. The forward-pass function.\ngrad_graph: FuncGraph. The gradients function.\n\nReturns:\nA list of inputs tensors to be passed to grad_graph.", "source": "github-repos"}
{"code": "def fetch(self, addon_id, data={}, **kwargs):\n    return super(Addon, self).fetch(addon_id, data, **kwargs)", "docstring": "Fetch addon for given Id\n\nArgs:\naddon_id : Id for which addon object has to be retrieved\n\nReturns:\naddon dict for given subscription Id", "source": "codesearchnet"}
{"code": "def sync_model(self, comment='', compact_central=False,\n                   release_borrowed=True, release_workset=True,\n                   save_local=False):\n        \n        self._add_entry(templates.FILE_SYNC_START)\n\n        if compact_central:\n            self._add_entry(templates.FILE_SYNC_COMPACT)\n        if release_borrowed:\n            self._add_entry(templates.FILE_SYNC_RELEASE_BORROWED)\n        if release_workset:\n            self._add_entry(templates.FILE_SYNC_RELEASE_USERWORKSETS)\n        if save_local:\n            self._add_entry(templates.FILE_SYNC_RELEASE_SAVELOCAL)\n\n        self._add_entry(templates.FILE_SYNC_COMMENT_OK\n                                 .format(sync_comment=comment))", "docstring": "Append a sync model entry to the journal.\n\nThis instructs Revit to sync the currently open workshared model.\n\nArgs:\ncomment (str): comment to be provided for the sync step\ncompact_central (bool): if True compacts the central file\nrelease_borrowed (bool): if True releases the borrowed elements\nrelease_workset (bool): if True releases the borrowed worksets\nsave_local (bool): if True saves the local file as well", "source": "juraj-google-style"}
{"code": "def add_node(self, node_name):\n        \n        graph = self.graph\n        if node_name in graph:\n            raise KeyError('node %s already exists' % node_name)\n        graph[node_name] = set()", "docstring": "Add a node if it does not exist yet, or error out.\n\nArgs:\nnode_name (str): The unique name of the node to add.\n\nRaises:\nKeyError: Raised if a node with the same name already exist in the\ngraph", "source": "juraj-google-style"}
{"code": "def get_by_alias(self, alias):\n    if (alias not in self._aliases):\n        raise DataInvalidAlias('A dataset with alias {} does not exist'.format(alias))\n    return self.get_by_index(self._aliases[alias])", "docstring": "Return a dataset by its alias.\n\nArgs:\nalias (str): The alias of the dataset that should be returned.\n\nRaises:\nDataInvalidAlias: If the alias does not represent a valid dataset.", "source": "codesearchnet"}
{"code": "def allreduce_ring_single_shard(xs, devices, reduction_fn_string=\"SUM\"):\n  \n  n = len(xs)\n  binary_reduction = mtf.binary_reduction_fn(reduction_fn_string)\n  assert len(devices) == n, \"devices must be a list of length len(xs)\"\n  if n == 1:\n    return xs\n  result = [None] * n\n  if n % 2 == 0:\n    left_center = n \n    right_center = left_center + 1\n  else:\n    left_center = n \n    right_center = left_center\n  left_sum = xs[0]\n  for i in xrange(1, left_center + 1):\n    with tf.device(devices[i]):\n      left_sum = binary_reduction(left_sum, xs[i])\n  right_sum = xs[n-1]\n  for i in reversed(xrange(left_center + 1, n - 1)):\n    with tf.device(devices[i]):\n      right_sum = binary_reduction(xs[i], right_sum)\n  with tf.device(devices[left_center]):\n    result[left_center] = binary_reduction(left_sum, right_sum)\n  if n % 2 == 0:\n    with tf.device(devices[right_center]):\n      result[right_center] = binary_reduction(left_sum, right_sum)\n  for i in reversed(xrange(left_center)):\n    with tf.device(devices[i]):\n      result[i] = tf.identity(result[i + 1])\n  for i in xrange(right_center + 1, n):\n    with tf.device(devices[i]):\n      result[i] = tf.identity(result[i - 1])\n  return result", "docstring": "Compute the reduction of all Tensors and put the result everywhere.\n\nPerformance-optimized for a ring of devices.\n\nArgs:\nxs: a list of n tf.Tensors\ndevices: a list of strings\nreduction_fn_string: \"SUM\" or \"MAX\"\n\nReturns:\na list of n Tensors\nRaises:\nValueError: if devices is not a list of n strings", "source": "juraj-google-style"}
{"code": "def format_auth_params(params):\n    parts = []\n    for (key, value) in params.items():\n        if value:\n            parts.append('{}=\"{}\"'.format(key, value))\n    return ', '.join(parts)", "docstring": "Generate the format expected by HTTP Headers from parameters.\n\nArgs:\nparams (dict): {key: value} to convert to key=value\n\nReturns:\nA formatted header string.", "source": "codesearchnet"}
{"code": "def calculate_columns(sequence):\n    \n    columns = {}\n\n    for row in sequence:\n        for key in row.keys():\n            if key not in columns:\n                columns[key] = len(key)\n\n            value_length = len(str(row[key]))\n            if value_length > columns[key]:\n                columns[key] = value_length\n\n    return columns", "docstring": "Find all row names and the maximum column widths.\n\nArgs:\ncolumns (dict): the keys are the column name and the value the max length.\n\nReturns:\ndict: column names (key) and widths (value).", "source": "juraj-google-style"}
{"code": "def intersection(self, other, recursive=True):\n        \n        if not isinstance(other, composite):\n            raise AssertionError('Cannot intersect composite and {} types'.format(type(other)))\n        \n        if self.meta_type != other.meta_type:\n            return composite({})\n\n        if self.meta_type == 'list':\n            keep = []\n            for item in self._list:\n                if item in other._list:\n                    if recursive and isinstance(item, composite):\n                        keep.extend(item.intersection(other.index(item), recursive=True))\n                    else:\n                        keep.append(item)\n            return composite(keep)\n        elif self.meta_type == 'dict':\n            keep = {}\n            for key in self._dict:\n                item = self._dict[key]\n                if key in other._dict:\n                    if recursive and \\\n                       isinstance(item, composite) and \\\n                       isinstance(other.get(key), composite):\n                       keep[key] = item.intersection(other.get(key), recursive=True)\n                    elif item == other[key]:\n                        keep[key] = item\n            return composite(keep)\n        return", "docstring": "Recursively compute intersection of data. For dictionaries, items\nfor specific keys will be reduced to unique items. For lists, items\nwill be reduced to unique items. This method is meant to be analogous\nto set.intersection for composite objects.\n\nArgs:\nother (composite): Other composite object to intersect with.\nrecursive (bool): Whether or not to perform the operation recursively,\nfor all nested composite objects.", "source": "juraj-google-style"}
{"code": "def image_transform(X, function, reshape_before=False, reshape_after=False,\n                    width=None, height=None, **kwargs):\n    \n\n    if not callable(function):\n        function = import_object(function)\n\n    elif not callable(function):\n        raise ValueError(\"function must be a str or a callable\")\n\n    flat_image = len(X[0].shape) == 1\n\n    if reshape_before and flat_image:\n        if not (width and height):\n            side_length = math.sqrt(X.shape[1])\n            if side_length.is_integer():\n                side_length = int(side_length)\n                width = side_length\n                height = side_length\n\n            else:\n                raise ValueError(\"Image sizes must be given for non-square images\")\n    else:\n        reshape_before = False\n\n    new_X = []\n    for image in X:\n        if reshape_before:\n            image = image.reshape((width, height))\n\n        features = function(\n            image,\n            **kwargs\n        )\n\n        if reshape_after:\n            features = np.reshape(features, X.shape[1])\n\n        new_X.append(features)\n\n    return np.array(new_X)", "docstring": "Apply a function image by image.\n\nArgs:\nreshape_before: whether 1d array needs to be reshaped to a 2d image\nreshape_after: whether the returned values need to be reshaped back to a 1d array\nwidth: image width used to rebuild the 2d images. Required if the image is not square.\nheight: image height used to rebuild the 2d images. Required if the image is not square.", "source": "juraj-google-style"}
{"code": "def adapt(self, d, x):\n        \n        self.update_memory_x(x)\n        m_d, m_x = self.read_memory()\n        \n        y = np.dot(self.w, x-m_x) + m_d\n        e = d - y\n        nu = self.mu / (self.eps + np.dot(x-m_x, x-m_x))\n        dw = nu * e * (x-m_x)\n        self.w += dw\n        self.update_memory_d(d)", "docstring": "Adapt weights according one desired value and its input.\n\nArgs:\n\n* `d` : desired value (float)\n\n* `x` : input array (1-dimensional array)", "source": "juraj-google-style"}
{"code": "def dot_product(t1, t2, keep_dims=False, name=None, reduction_dim=None):\n  \n  with tf.name_scope(name, 'dot', [t1, t2]) as scope:\n    t1 = tf.convert_to_tensor(t1, name='t1')\n    t2 = tf.convert_to_tensor(t2, name='t2')\n    mul = tf.multiply(t1, t2)\n    if not reduction_dim:\n      reduction_dim = _last_index(mul, 1)\n    return tf.reduce_sum(mul, reduction_dim, name=scope, keep_dims=keep_dims)", "docstring": "Computes the dot product of t1 and t2.\n\nArgs:\nt1: A rank 2 tensor.\nt2: A tensor that is the same size as t1.\nkeep_dims: If true, reduction does not change the rank of the input.\nname: Optional name for this op.\nreduction_dim: The dimension to reduce, by default choose the last one\nand if no shape is specified guess 1.\nReturns:\nThe dot product.", "source": "juraj-google-style"}
{"code": "def finalize_options(self):\n        \n        self.cwd = os.path.abspath(os.path.dirname(__file__))\n        self.build_dirs = [\n            os.path.join(self.cwd, 'build'),\n            os.path.join(self.cwd, 'htmlcov'),\n            os.path.join(self.cwd, 'dist'),\n            os.path.join(self.cwd, 'pylink_square.egg-info')\n        ]\n        self.build_artifacts = ['.pyc', '.o', '.elf', '.bin']", "docstring": "Populate the attributes.\n\nArgs:\nself (CleanCommand): the ``CleanCommand`` instance\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def ParseByteStream(\n      self, parser_mediator, byte_stream, parent_path_segments=None,\n      codepage='cp1252'):\n    \n    if parent_path_segments and isinstance(parent_path_segments, list):\n      self._path_segments = list(parent_path_segments)\n    else:\n      self._path_segments = []\n\n    shell_item_list = pyfwsi.item_list()\n\n    parser_mediator.AppendToParserChain(self)\n    try:\n      shell_item_list.copy_from_byte_stream(\n          byte_stream, ascii_codepage=codepage)\n\n      for shell_item in iter(shell_item_list.items):\n        self._ParseShellItem(parser_mediator, shell_item)\n    finally:\n      parser_mediator.PopFromParserChain()", "docstring": "Parses the shell items from the byte stream.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nbyte_stream (bytes): shell items data.\nparent_path_segments (Optional[list[str]]): parent shell item path\nsegments.\ncodepage (Optional[str]): byte stream codepage.", "source": "juraj-google-style"}
{"code": "def _get_num_audio_features(self, audio_lengths: Sequence[int]) -> Sequence[int]:\n    hop_length = self.melspec_kwargs['hop_length']\n    effective_window_size = self.projector_window_size \n    projector_lengths = []\n    for raw_length in audio_lengths:\n        mel_length = raw_length \n        encoder_length = mel_length \n        nblocks = math.ceil(encoder_length / self.projector_window_size)\n        projector_length = nblocks * effective_window_size\n        projector_lengths.append(projector_length)\n    return projector_lengths", "docstring": "Gets the (variable length) number of features (i.e., projector output) for the sequences\nbeing considered.\n\nArgs:\naudio_lengths (`Sequence[int]`):\nSequence of one or more raw audio lengths.", "source": "github-repos"}
{"code": "def __init__(self, num_packs=1):\n    if num_packs < 0:\n        raise ValueError('HierarchicalCopy requires num_packs >= 0, but {} is specified'.format(num_packs))\n    super(HierarchicalCopyAllReduce, self).__init__(all_reduce_alg='hierarchical_copy', num_packs=num_packs)", "docstring": "Initializes the object.\n\nArgs:\nnum_packs: a non-negative integer. The number of packs to split values\ninto. If zero, no packing will be done.\n\nRaises:\nValueError if `num_packs` is negative.", "source": "github-repos"}
{"code": "def RunOnce(self):\n    start_time = time.time()\n    processed = 0\n    queue_manager = queue_manager_lib.QueueManager(token=self.token)\n    for queue in self.queues:\n        queue_manager.FreezeTimestamp()\n        fetch_messages_start = time.time()\n        notifications = queue_manager.GetNotifications(queue)\n        stats_collector_instance.Get().RecordEvent('worker_time_to_retrieve_notifications', (time.time() - fetch_messages_start))\n        stuck_flows = []\n        for n in notifications:\n            if n.in_progress:\n                stuck_flows.append(n)\n        if stuck_flows:\n            self.ProcessStuckFlows(stuck_flows, queue_manager)\n        notifications_available = []\n        for notification in notifications:\n            if (notification.session_id not in self.queued_flows):\n                notifications_available.append(notification)\n        try:\n            processed += self.ProcessMessages(notifications_available, queue_manager, (self.RUN_ONCE_MAX_SECONDS - (time.time() - start_time)))\n        except Exception as e:\n            logging.error('Error processing message %s. %s.', e, traceback.format_exc())\n            stats_collector_instance.Get().IncrementCounter('grr_worker_exceptions')\n            if flags.FLAGS.pdb_post_mortem:\n                pdb.post_mortem()\n        queue_manager.UnfreezeTimestamp()\n        if ((time.time() - start_time) > self.RUN_ONCE_MAX_SECONDS):\n            return processed\n    return processed", "docstring": "Processes one set of messages from Task Scheduler.\n\nThe worker processes new jobs from the task master. For each job\nwe retrieve the session from the Task Scheduler.\n\nReturns:\nTotal number of messages processed by this call.", "source": "codesearchnet"}
{"code": "def get_supervisor(func: types.AnyFunction) -> types.Supervisor:\n    if (not callable(func)):\n        raise TypeError('func is not callable')\n    if asyncio.iscoroutinefunction(func):\n        supervisor = _async_supervisor\n    else:\n        supervisor = _sync_supervisor\n    return functools.partial(supervisor, func)", "docstring": "Get the appropriate supervisor to use and pre-apply the function.\n\nArgs:\nfunc: A function.", "source": "codesearchnet"}
{"code": "def ch_stop_time(self, *channels: List[Channel]) -> int:\n    return self.timeslots.ch_stop_time(*channels)", "docstring": "Return maximum start time for supplied channels.\n\nArgs:\n*channels: Supplied channels", "source": "codesearchnet"}
{"code": "def send_post(self, mri, method_name, **params):\n        \n        q = Queue()\n        request = Post(\n            path=[mri, method_name],\n            parameters=params)\n        request.set_callback(q.put)\n        IOLoopHelper.call(self._send_request, request)\n        response = q.get()\n        if isinstance(response, Error):\n            raise response.message\n        else:\n            return response.value", "docstring": "Abstract method to dispatch a Post to the server\n\nArgs:\nmri (str): The mri of the Block\nmethod_name (str): The name of the Method within the Block\nparams: The parameters to send\n\nReturns:\nThe return results from the server", "source": "juraj-google-style"}
{"code": "def __init__(self, **kwargs):\n        \n        try:\n            arguments = Adapter(CollectorUpdate.schema_complete().validate(kwargs))\n            self.matrix = arguments.matrix\n            self.stage = arguments.stage\n            self.timestamp = arguments.timestamp\n            self.status = arguments.status\n            self.information = arguments.information.data\n        except SchemaError as exception:\n            Logger.get_logger(__name__).error(exception)\n            raise RuntimeError(str(exception))", "docstring": "Initializing and validating fields.\n\nArgs:\nkwargs (dict): application command line options.\n\nRaises:\nRuntimeError: when validation of parameters has failed.", "source": "juraj-google-style"}
{"code": "def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool=False, **kwargs):\n    use_auth_token = kwargs.pop('use_auth_token', None)\n    if use_auth_token is not None:\n        warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)\n        if kwargs.get('token', None) is not None:\n            raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')\n        kwargs['token'] = use_auth_token\n    if os.path.isfile(save_directory):\n        raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file')\n    os.makedirs(save_directory, exist_ok=True)\n    if push_to_hub:\n        commit_message = kwargs.pop('commit_message', None)\n        repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1])\n        repo_id = self._create_repo(repo_id, **kwargs)\n        files_timestamps = self._get_files_timestamps(save_directory)\n    if self._auto_class is not None:\n        custom_object_save(self, save_directory, config=self)\n    output_feature_extractor_file = os.path.join(save_directory, FEATURE_EXTRACTOR_NAME)\n    self.to_json_file(output_feature_extractor_file)\n    logger.info(f'Feature extractor saved in {output_feature_extractor_file}')\n    if push_to_hub:\n        self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get('token'))\n    return [output_feature_extractor_file]", "docstring": "Save a feature_extractor object to the directory `save_directory`, so that it can be re-loaded using the\n[`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`] class method.\n\nArgs:\nsave_directory (`str` or `os.PathLike`):\nDirectory where the feature extractor JSON file will be saved (will be created if it does not exist).\npush_to_hub (`bool`, *optional*, defaults to `False`):\nWhether or not to push your model to the Hugging Face model hub after saving it. You can specify the\nrepository you want to push to with `repo_id` (will default to the name of `save_directory` in your\nnamespace).\nkwargs (`Dict[str, Any]`, *optional*):\nAdditional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.", "source": "github-repos"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    file_offset = 0\n    file_size = file_object.get_size()\n    record_map = self._GetDataTypeMap('pls_recall_record')\n\n    while file_offset < file_size:\n      try:\n        pls_record, record_data_size = self._ReadStructureFromFileObject(\n            file_object, file_offset, record_map)\n      except (ValueError, errors.ParseError) as exception:\n        if file_offset == 0:\n          raise errors.UnableToParseFile('Unable to parse first record.')\n\n        parser_mediator.ProduceExtractionWarning((\n            'unable to parse record at offset: 0x{0:08x} with error: '\n            '{1!s}').format(file_offset, exception))\n        break\n\n      if file_offset == 0 and not self._VerifyRecord(pls_record):\n        raise errors.UnableToParseFile('Verification of first record failed.')\n\n      event_data = PlsRecallEventData()\n      event_data.database_name = pls_record.database_name.rstrip('\\x00')\n      event_data.sequence_number = pls_record.sequence_number\n      event_data.offset = file_offset\n      event_data.query = pls_record.query.rstrip('\\x00')\n      event_data.username = pls_record.username.rstrip('\\x00')\n\n      date_time = dfdatetime_delphi_date_time.DelphiDateTime(\n          timestamp=pls_record.last_written_time)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_WRITTEN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      file_offset += record_data_size", "docstring": "Parses a PLSRecall.dat file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def install(self, path, dry_run=False, overrides=None):\n    repo = package_repository_manager.get_repository(path)\n    resource = repo.install_variant(self.resource, dry_run=dry_run, overrides=overrides)\n    if (resource is None):\n        return None\n    elif (resource is self.resource):\n        return self\n    else:\n        return Variant(resource)", "docstring": "Install this variant into another package repository.\n\nIf the package already exists, this variant will be correctly merged\ninto the package. If the variant already exists in this package, the\nexisting variant is returned.\n\nArgs:\npath (str): Path to destination package repository.\ndry_run (bool): If True, do not actually install the variant. In this\nmode, a `Variant` instance is only returned if the equivalent\nvariant already exists in this repository; otherwise, None is\nreturned.\noverrides (dict): Use this to change or add attributes to the\ninstalled variant.\n\nReturns:\n`Variant` object - the (existing or newly created) variant in the\nspecified repository. If `dry_run` is True, None may be returned.", "source": "codesearchnet"}
{"code": "def enumerate_dataset(start=0):\n\n    def _apply_fn(dataset):\n        return dataset.enumerate(start)\n    return _apply_fn", "docstring": "A transformation that enumerates the elements of a dataset.\n\nIt is similar to python's `enumerate`.\nFor example:\n\n```python\n# NOTE: The following examples use `{ ... }` to represent the\n# contents of a dataset.\na = { 1, 2, 3 }\nb = { (7, 8), (9, 10) }\n\n# The nested structure of the `datasets` argument determines the\n# structure of elements in the resulting dataset.\na.apply(tf.data.experimental.enumerate_dataset(start=5))\n=> { (5, 1), (6, 2), (7, 3) }\nb.apply(tf.data.experimental.enumerate_dataset())\n=> { (0, (7, 8)), (1, (9, 10)) }\n```\n\nArgs:\nstart: A `tf.int64` scalar `tf.Tensor`, representing the start value for\nenumeration.\n\nReturns:\nA `Dataset` transformation function, which can be passed to\n`tf.data.Dataset.apply`.", "source": "github-repos"}
{"code": "def compute_token_logits(sequence_output, temperature, output_weights, output_bias):\n    logits = (torch.einsum('bsj,j->bs', sequence_output, output_weights) + output_bias) / temperature\n    return logits", "docstring": "Computes logits per token\n\nArgs:\nsequence_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\nAlso known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model.\ntemperature (`float`):\nTemperature for the Bernoulli distribution.\noutput_weights (`torch.FloatTensor` of shape `(hidden_size,)`):\nWeights of the linear layer for cell selection.\noutput_bias (`torch.FloatTensor` of shape `()`):\nBias of the linear layer for cell selection\n\nReturns:\nlogits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Logits per token.", "source": "github-repos"}
{"code": "def construct_analogy_test_set(test_examples, dictionary, ignore_missing=False):\n    \n\n    test = []\n    \n    for example in test_examples:\n        try:\n            test.append([dictionary[word] for word in example])\n        except KeyError:\n            if ignore_missing:\n                pass\n            else:\n                raise\n\n    try:\n        test = np.array(test, dtype=np.int32)\n    except ValueError as e:\n        \n        raise ValueError('Each row of the test set should contain '\n                        '4 integer word ids', e)\n\n    return test", "docstring": "Construct the analogy test set by mapping the words to their\nword vector ids.\n\nArguments:\n- test_examples: iterable of 4-word iterables\n- dictionay: a mapping from words to ids\n- boolean ignore_missing: if True, words in the test set\nthat are not in the dictionary\nwill be dropeed.\n\nReturns:\n- a N by 4 numpy matrix.", "source": "juraj-google-style"}
{"code": "def is17(msg):\n    \n\n    if allzeros(msg):\n        return False\n\n    d = hex2bin(data(msg))\n\n    if bin2int(d[28:56]) != 0:\n        return False\n\n    caps = cap17(msg)\n\n    \n    \n    \n    \n\n    \n    if 'BDS20' not in caps:\n        return False\n\n    return True", "docstring": "Check if a message is likely to be BDS code 1,7\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nbool: True or False", "source": "juraj-google-style"}
{"code": "def __init__(self, layer, named_tensors=None, scope='tf-layer', summary_labels=(), **kwargs):\n        \n        self.layer_spec = layer\n        self.layer = util.get_object(obj=layer, predefined_objects=TFLayer.tf_layers, kwargs=kwargs)\n        self.first_scope = None\n\n        super(TFLayer, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new layer instance of a TensorFlow layer.\n\nArgs:\nname: The name of the layer, one of 'dense'.\n**kwargs: Additional arguments passed on to the TensorFlow layer constructor.", "source": "juraj-google-style"}
{"code": "def parse(cls, op):\n        \n        for event in cls:\n            if event.value == int(op):\n                return event\n        return None", "docstring": "Gets the enum for the op code\n\nArgs:\nop: value of the op code (will be casted to int)\n\nReturns:\nThe enum that matches the op code", "source": "juraj-google-style"}
{"code": "def get_external_command_output(command: str) -> bytes:\n    args = shlex.split(command)\n    ret = subprocess.check_output(args)\n    return ret", "docstring": "Takes a command-line command, executes it, and returns its ``stdout``\noutput.\n\nArgs:\ncommand: command string\n\nReturns:\noutput from the command as ``bytes``", "source": "codesearchnet"}
{"code": "def reply(self, reply_comment):\n    payload = (('{ \"Comment\": \"' + reply_comment) + '\"}')\n    endpoint = (('https:\n    self._make_api_call('post', endpoint, data=payload)", "docstring": "Reply to the Message.\n\nNotes:\nHTML can be inserted in the string and will be interpreted properly by Outlook.\n\nArgs:\nreply_comment: String message to send with email.", "source": "codesearchnet"}
{"code": "def create(self, data=None, uri=None, timeout=-1, custom_headers=None, force=False):\n        \n        if not data:\n            data = {}\n\n        default_values = self._get_default_values()\n        data = self._helper.update_resource_fields(data, default_values)\n\n        logger.debug('Create (uri = %s, resource = %s)' % (uri, str(data)))\n\n        resource_data = self._helper.create(data, uri, timeout, custom_headers, force)\n        new_resource = self.new(self._connection, resource_data)\n\n        return new_resource", "docstring": "Makes a POST request to create a resource when a request body is required.\n\nArgs:\ndata: Additional fields can be passed to create the resource.\nuri: Resouce uri\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\ncustom_headers: Allows set specific HTTP headers.\nReturns:\nCreated resource.", "source": "juraj-google-style"}
{"code": "def deserialize(doc_xml, pyxb_binding=None):\n    pyxb_binding = (pyxb_binding or d1_common.types.dataoneTypes)\n    try:\n        return pyxb_binding.CreateFromDocument(doc_xml)\n    except pyxb.ValidationError as e:\n        raise ValueError('Unable to deserialize XML to PyXB. error=\"{}\" xml=\"{}\"'.format(e.details(), doc_xml))\n    except (pyxb.PyXBException, xml.sax.SAXParseException, Exception) as e:\n        raise ValueError('Unable to deserialize XML to PyXB. error=\"{}\" xml=\"{}\"'.format(str(e), doc_xml))", "docstring": "Deserialize DataONE XML types to PyXB.\n\nArgs:\ndoc_xml: UTF-8 encoded ``bytes``\n\npyxb_binding: PyXB binding object. If not specified, the correct one should be\nselected automatically.\n\nReturns:\nPyXB object\n\nSee Also:\n``deserialize_d1_exception()`` for deserializing DataONE Exception types.", "source": "codesearchnet"}
{"code": "def write_object_proto(var, proto, options):\n    if options.experimental_variable_policy._expand_distributed_variables():\n        for var in var.values:\n            var_proto = proto.variable.experimental_distributed_variable_components.add()\n            var_proto.name = var.name.split(':')[0]\n            var_proto.device = var.device", "docstring": "Update a SavedObject proto for the caller.\n\nIf a DistributedVariable object supports this method, it will be called when\nsaving with a pre-built `SavedObject` proto representing the object, plus an\ninstance of `SaveOptions`. This method is then free to modify that proto\ninstance.\n\n`DistributedVariable` with `AUTO` or `ON_WRITE` synchronization optionally\nwrite out information about their components to the\n`experimental_distributed_variable_components` field of a\n`SavedVariable` (depending on the `SaveOptions` variable policy).\n\nArgs:\nvar: The DistributedVariable object.\nproto: A pre-built `SavedObject` proto for this object. It is assumed this\nwill be a `SavedVariable` instance.\noptions: A `SaveOptions` instance.", "source": "github-repos"}
{"code": "def create_new_username(ip, devicetype=None, timeout=_DEFAULT_TIMEOUT):\n    \n    res = Resource(_api_url(ip), timeout)\n    prompt = \"Press the Bridge button, then press Return: \"\n    \n    if sys.version_info.major == 2:\n        _ = raw_input(prompt)\n    else:\n        _ = input(prompt)\n\n    if devicetype is None:\n        devicetype = \"qhue\n\n    \n    response = res(devicetype=devicetype, http_method=\"post\")\n\n    return response[0][\"success\"][\"username\"]", "docstring": "Interactive helper function to generate a new anonymous username.\n\nArgs:\nip: ip address of the bridge\ndevicetype (optional): devicetype to register with the bridge. If\nunprovided, generates a device type based on the local hostname.\ntimeout (optional, default=5): request timeout in seconds\nRaises:\nQhueException if something went wrong with username generation (for\nexample, if the bridge button wasn't pressed).", "source": "juraj-google-style"}
{"code": "def remove_vcf_info(keyword, variant_line=None, variant_dict=None):\n    \n    logger.debug(\"Removing variant information {0}\".format(keyword))\n    \n    fixed_variant = None\n    \n    def get_new_info_string(info_string, keyword):\n        \n        new_info_list = []\n        splitted_info_string = info_string.split(';')\n        for info in splitted_info_string:\n            splitted_info_entry = info.split('=')\n            if splitted_info_entry[0] != keyword:\n                new_info_list.append(info)\n        \n        new_info_string = ';'.join(new_info_list)\n        \n        return new_info_string\n        \n    \n    if variant_line:\n        logger.debug(\"Removing information from a variant line\")\n        splitted_variant = variant_line.rstrip('\\n').split('\\t')\n        \n        old_info = splitted_variant[7]\n        if old_info == '.':\n            new_info_string = '.'\n        else:\n            new_info_string = get_new_info_string(old_info, keyword)\n        \n        splitted_variant[7] = new_info_string\n        \n        fixed_variant = '\\t'.join(splitted_variant)\n    \n    elif variant_dict:\n        logger.debug(\"Removing information to a variant dict\")\n        old_info = variant_dict['INFO']\n        \n        if old_info == '.':\n            variant_dict['INFO'] = old_info\n        else:\n            new_info_string = get_new_info_string(old_info, keyword)\n        \n        variant_dict['INFO'] = new_info_string\n        fixed_variant = variant_dict\n    \n    return fixed_variant", "docstring": "Remove the information of a info field of a vcf variant line or a\nvariant dict.\n\nArguments:\nvariant_line (str): A vcf formatted variant line\nvariant_dict (dict): A variant dictionary\nkeyword (str): The info field key\n\nReturns:\nvariant_line (str): A annotated variant line", "source": "juraj-google-style"}
{"code": "def FromTimestampToHttp(self, ts):\n    ts = time.gmtime(ts)\n    return time.strftime('%a, %d %b %Y %H:%M:%S GMT', ts)", "docstring": "Converts internal nss_cache timestamp to HTTP timestamp.\n\nArgs:\nts: number of seconds since epoch\nReturns:\nHTTP format timestamp string", "source": "github-repos"}
{"code": "def make_simulated_env_fn(**env_kwargs):\n  \n  def env_fn(in_graph):\n    class_ = SimulatedBatchEnv if in_graph else SimulatedBatchGymEnv\n    return class_(**env_kwargs)\n  return env_fn", "docstring": "Returns a function creating a simulated env, in or out of graph.\n\nArgs:\n**env_kwargs: kwargs to pass to the simulated env constructor.\n\nReturns:\nFunction in_graph -> env.", "source": "juraj-google-style"}
{"code": "def sudo_remove_dirtree(dir_name):\n    try:\n        subprocess.check_output(['sudo', 'rm', '-rf', dir_name])\n    except subprocess.CalledProcessError as e:\n        raise WorkerError('Cant remove directory {0}'.format(dir_name), e)", "docstring": "Removes directory tree as a superuser.\n\nArgs:\ndir_name: name of the directory to remove.\n\nThis function is necessary to cleanup directories created from inside a\nDocker, since they usually written as a root, thus have to be removed as a\nroot.", "source": "codesearchnet"}
{"code": "def lookup_descriptor(self, definition_name):\n    try:\n        return self.__descriptors[definition_name]\n    except KeyError:\n        pass\n    if self.__descriptor_loader:\n        definition = self.__descriptor_loader(definition_name)\n        self.__descriptors[definition_name] = definition\n        return definition\n    else:\n        raise messages.DefinitionNotFoundError(('Could not find definition for %s' % definition_name))", "docstring": "Lookup descriptor by name.\n\nGet descriptor from library by name.  If descriptor is not found will\nattempt to find via descriptor loader if provided.\n\nArgs:\ndefinition_name: Definition name to find.\n\nReturns:\nDescriptor that describes definition name.\n\nRaises:\nDefinitionNotFoundError if not descriptor exists for definition name.", "source": "codesearchnet"}
{"code": "def filter(self, limit=None, to=None, category=None):\n    if (category and (not to)):\n        msg_slice = itertools.islice((x for x in self.store if (x[2] == category)), limit)\n    elif (to and (not category)):\n        to = JID.fromstr(to)\n        msg_slice = itertools.islice((x for x in self.store if _agent_in_msg(to, x[1])), limit)\n    elif (to and category):\n        to = JID.fromstr(to)\n        msg_slice = itertools.islice((x for x in self.store if (_agent_in_msg(to, x[1]) and (x[2] == category))), limit)\n    else:\n        msg_slice = self.all(limit=limit)\n        return msg_slice\n    return list(msg_slice)[::(- 1)]", "docstring": "Returns the events that match the filters\n\nArgs:\nlimit (int, optional): the max length of the events to return (Default value = None)\nto (str, optional): only events that have been sent or received by 'to' (Default value = None)\ncategory (str, optional): only events belonging to the category (Default value = None)\n\nReturns:\nlist: a list of filtered events", "source": "codesearchnet"}
{"code": "def _add_input_deps(self, executor, args, kwargs):\n    if (executor == 'data_manager'):\n        return (args, kwargs)\n    inputs = kwargs.get('inputs', [])\n    for (idx, f) in enumerate(inputs):\n        if (isinstance(f, File) and f.is_remote()):\n            inputs[idx] = self.data_manager.stage_in(f, executor)\n    for (kwarg, f) in kwargs.items():\n        if (isinstance(f, File) and f.is_remote()):\n            kwargs[kwarg] = self.data_manager.stage_in(f, executor)\n    newargs = list(args)\n    for (idx, f) in enumerate(newargs):\n        if (isinstance(f, File) and f.is_remote()):\n            newargs[idx] = self.data_manager.stage_in(f, executor)\n    return (tuple(newargs), kwargs)", "docstring": "Look for inputs of the app that are remote files. Submit stage_in\napps for such files and replace the file objects in the inputs list with\ncorresponding DataFuture objects.\n\nArgs:\n- executor (str) : executor where the app is going to be launched\n- args (List) : Positional args to app function\n- kwargs (Dict) : Kwargs to app function", "source": "codesearchnet"}
{"code": "def __init__(self, config, channel=None):\n        \n\n        self.channel = channel\n\n        if not _kubernetes_enabled:\n            raise OptionalModuleMissing(['kubernetes'],\n                                        \"Kubernetes provider requires kubernetes module and config.\")\n\n        self.kube_client = client.ExtensionsV1beta1Api()\n\n        self.config = config\n        self.sitename = self.config['site']\n        self.namespace = self.config['execution']['namespace']\n        self.image = self.config['execution']['image']\n\n        self.init_blocks = self.config[\"execution\"][\"block\"][\"initBlocks\"]\n        self.min_blocks = self.config[\"execution\"][\"block\"][\"minBlocks\"]\n        self.max_blocks = self.config[\"execution\"][\"block\"][\"maxBlocks\"]\n\n        self.user_id = None\n        self.group_id = None\n        self.run_as_non_root = None\n        if 'security' in self.config['execution']:\n            self.user_id = self.config[\"execution\"]['security'][\"user_id\"]\n            self.group_id = self.config[\"execution\"]['security'][\"group_id\"]\n            self.run_as_non_root = self.config[\"execution\"]['security'][\"run_as_non_root\"]\n\n        self.secret = None\n        if 'secret' in self.config['execution']:\n            self.secret = self.config['execution']['secret']\n\n        \n        self.resources = {}", "docstring": "Initialize the Kubernetes execution provider class\n\nArgs:\n- Config (dict): Dictionary with all the config options.\n\nKWargs :\n- channel (channel object) : default=None A channel object", "source": "juraj-google-style"}
{"code": "def convert_tokens_to_string(self, tokens: List[str]) -> str:\n    raise NotImplementedError", "docstring": "Converts a sequence of tokens in a single string. The most simple way to do it is `\" \".join(tokens)` but we\noften want to remove sub-word tokenization artifacts at the same time.\n\nArgs:\ntokens (`List[str]`): The token to join in a string.\n\nReturns:\n`str`: The joined tokens.", "source": "github-repos"}
{"code": "def relu(x, alpha=0.0, max_value=None, threshold=0):\n    dtype = getattr(x, 'dtype', floatx())\n    if alpha != 0.0:\n        if max_value is None and threshold == 0:\n            return nn.leaky_relu(x, alpha=alpha)\n        if threshold != 0:\n            negative_part = nn.relu(-x + threshold)\n        else:\n            negative_part = nn.relu(-x)\n    clip_max = max_value is not None\n    if threshold != 0:\n        x = x * math_ops.cast(math_ops.greater(x, threshold), dtype=dtype)\n    elif max_value == 6:\n        x = nn.relu6(x)\n        clip_max = False\n    else:\n        x = nn.relu(x)\n    if clip_max:\n        max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)\n        zero = _constant_to_tensor(0, x.dtype.base_dtype)\n        x = clip_ops.clip_by_value(x, zero, max_value)\n    if alpha != 0.0:\n        alpha = _to_tensor(alpha, x.dtype.base_dtype)\n        x -= alpha * negative_part\n    return x", "docstring": "Rectified linear unit.\n\nWith default values, it returns element-wise `max(x, 0)`.\n\nOtherwise, it follows:\n`f(x) = max_value` for `x >= max_value`,\n`f(x) = x` for `threshold <= x < max_value`,\n`f(x) = alpha * (x - threshold)` otherwise.\n\nArgs:\nx: A tensor or variable.\nalpha: A scalar, slope of negative section (default=`0.`).\nmax_value: float. Saturation threshold.\nthreshold: float. Threshold value for thresholded activation.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def generate_calculus_integrate_sample(vlist, ops, min_depth, max_depth, functions):\n    var_index = random.randrange(len(vlist))\n    var = vlist[var_index]\n    consts = (vlist[:var_index] + vlist[(var_index + 1):])\n    depth = random.randrange(min_depth, (max_depth + 1))\n    expr = random_expr_with_required_var(depth, var, consts, ops)\n    expr_str = str(expr)\n    sample = ((var + ':') + expr_str)\n    target = format_sympy_expr(sympy.integrate(expr_str, sympy.Symbol(var)), functions=functions)\n    return (sample, target)", "docstring": "Randomly generate a symbolic integral dataset sample.\n\nGiven an input expression, produce the indefinite integral.\n\nArgs:\nvlist: Variable list. List of chars that can be used in the expression.\nops: List of ExprOp instances. The allowed operators for the expression.\nmin_depth: Expression trees will not have a smaller depth than this. 0 means\nthere is just a variable. 1 means there is one operation.\nmax_depth: Expression trees will not have a larger depth than this. To make\nall trees have the same depth, set this equal to `min_depth`.\nfunctions: Defines special functions. A dict mapping human readable string\nnames, like \"log\", \"exp\", \"sin\", \"cos\", etc., to single chars. Each\nfunction gets a unique token, like \"L\" for \"log\".\n\nReturns:\nsample: String representation of the input. Will be of the form\n'var:expression'.\ntarget: String representation of the solution.", "source": "codesearchnet"}
{"code": "def WaitUntilNoFlowsToProcess(self, timeout=None):\n    t = self.flow_handler_thread\n    if (not t):\n        return\n    start_time = time.time()\n    while True:\n        with self.lock:\n            if ((not t.isAlive()) or ((not self._GetFlowRequestsReadyForProcessing()) and (not self.flow_handler_num_being_processed))):\n                return\n        time.sleep(0.2)\n        if (timeout and ((time.time() - start_time) > timeout)):\n            raise TimeOutWhileWaitingForFlowsToBeProcessedError(\"Flow processing didn't finish in time.\")", "docstring": "Waits until flow processing thread is done processing flows.\n\nArgs:\ntimeout: If specified, is a max number of seconds to spend waiting.\n\nRaises:\nTimeOutWhileWaitingForFlowsToBeProcessedError: if timeout is reached.", "source": "codesearchnet"}
{"code": "def handle_duplications(file_path):\n    \n    logging.info('Handling duplications for \"%s\"', file_path)\n    f = open_strings_file(file_path, \"r+\")\n    header_comment_key_value_tuples = extract_header_comment_key_value_tuples_from_file(f)\n    file_elements = []\n    section_file_elements = []\n    keys_to_objects = {}\n    duplicates_found = []\n    for header_comment, comments, key, value in header_comment_key_value_tuples:\n        if len(header_comment) > 0:\n            \n            for elem in sorted(section_file_elements, key=lambda x: x.comments[0]):\n                file_elements.append(elem)\n            section_file_elements = []\n            file_elements.append(Comment(header_comment))\n\n        if key in keys_to_objects:\n            keys_to_objects[key].add_comments(comments)\n            duplicates_found.append(key)\n        else:\n            loc_obj = LocalizationEntry(comments, key, value)\n            keys_to_objects[key] = loc_obj\n            section_file_elements.append(loc_obj)\n\n    \n    for elem in sorted(section_file_elements, key=lambda x: x.comments[0]):\n        file_elements.append(elem)\n\n    f.seek(0)\n\n    for element in file_elements:\n        f.write(unicode(element))\n        f.write(u\"\\n\")\n\n    f.truncate()\n    f.close()\n\n    logging.info(\"Omitted %d duplicates (%s)\" % (len(duplicates_found), \",\".join(duplicates_found)))\n    logging.info('Finished handling duplications for \"%s\"', file_path)", "docstring": "Omits the duplications in the strings files.\nKeys that appear more than once, will be joined to one appearance and the omit will be documented.\n\nArgs:\nfile_path (str): The path to the strings file.", "source": "juraj-google-style"}
{"code": "def hexstr(text):\n    text = text.strip().lower()\n    if text.startswith(('0x', '0X')):\n        text = text[2:]\n    if (not text):\n        raise s_exc.BadTypeValu(valu=text, name='hexstr', mesg='No string left after stripping')\n    try:\n        s_common.uhex(text)\n    except (binascii.Error, ValueError) as e:\n        raise s_exc.BadTypeValu(valu=text, name='hexstr', mesg=str(e))\n    return text", "docstring": "Ensure a string is valid hex.\n\nArgs:\ntext (str): String to normalize.\n\nExamples:\nNorm a few strings:\n\nhexstr('0xff00')\nhexstr('ff00')\n\nNotes:\nWill accept strings prefixed by '0x' or '0X' and remove them.\n\nReturns:\nstr: Normalized hex string.", "source": "codesearchnet"}
{"code": "def __gt__(self, other: 'TensorFluent') -> 'TensorFluent':\n        \n        return self._binary_op(self, other, tf.greater, tf.float32)", "docstring": "Returns a TensorFluent for the greater-than relational operator.\n\nArgs:\nself: The first operand.\nother: The second operand.", "source": "juraj-google-style"}
{"code": "def sg_arg_def(**kwargs):\n    for (k, v) in kwargs.items():\n        if ((type(v) is tuple) or (type(v) is list)):\n            (v, c) = (v[0], v[1])\n        else:\n            c = k\n        if (type(v) is str):\n            tf.app.flags.DEFINE_string(k, v, c)\n        elif (type(v) is int):\n            tf.app.flags.DEFINE_integer(k, v, c)\n        elif (type(v) is float):\n            tf.app.flags.DEFINE_float(k, v, c)\n        elif (type(v) is bool):\n            tf.app.flags.DEFINE_bool(k, v, c)", "docstring": "r\"\"\"Defines command line options\n\nArgs:\n**kwargs:\nkey: A name for the option.\nvalue : Default value or a tuple of (default value, description).\n\nReturns:\nNone\n\nFor example,\n\n```\n# Either of the following two lines will define `--n_epoch` command line argument and set its default value as 1.\n\ntf.sg_arg_def(n_epoch=1)\ntf.sg_arg_def(n_epoch=(1, 'total number of epochs'))\n```", "source": "codesearchnet"}
{"code": "def _GetStatus(self, two_factor=False):\n    params = ['status']\n    if two_factor:\n        params += ['--twofactor']\n    retcode = self._RunOsLoginControl(params)\n    if (retcode is None):\n        if self.oslogin_installed:\n            self.logger.warning('OS Login not installed.')\n            self.oslogin_installed = False\n        return None\n    self.oslogin_installed = True\n    if (not os.path.exists(constants.OSLOGIN_NSS_CACHE)):\n        return False\n    return (not retcode)", "docstring": "Check whether OS Login is installed.\n\nArgs:\ntwo_factor: bool, True if two factor should be enabled.\n\nReturns:\nbool, True if OS Login is installed.", "source": "codesearchnet"}
{"code": "def get_avatar(from_header, size=64, default='retro'):\n    params = OrderedDict([('s', size), ('d', default)])\n    query = parse.urlencode(params)\n    address = email.utils.parseaddr(from_header)[1]\n    value_hash = sha256(address.encode('utf-8')).hexdigest()\n    return 'https:", "docstring": "Get the avatar URL from the email's From header.\n\nArgs:\nfrom_header (str): The email's From header. May contain the sender's full name.\n\nReturns:\nstr: The URL to that sender's avatar.", "source": "codesearchnet"}
{"code": "def __init__(self, file_system, path_spec):\n    \n    super(Directory, self).__init__()\n    self._entries = None\n    self._file_system = file_system\n    self.path_spec = path_spec", "docstring": "Initializes a directory.\n\nArgs:\nfile_system (FileSystem): file system.\npath_spec (PathSpec): path specification.", "source": "juraj-google-style"}
{"code": "def _get_current_ids(self, source=True, meta=True, spectra=True, spectra_annotation=True):\n    c = self.c\n    if source:\n        c.execute('SELECT max(id) FROM library_spectra_source')\n        last_id_origin = c.fetchone()[0]\n        if last_id_origin:\n            self.current_id_origin = (last_id_origin + 1)\n        else:\n            self.current_id_origin = 1\n    if meta:\n        c.execute('SELECT max(id) FROM library_spectra_meta')\n        last_id_meta = c.fetchone()[0]\n        if last_id_meta:\n            self.current_id_meta = (last_id_meta + 1)\n        else:\n            self.current_id_meta = 1\n    if spectra:\n        c.execute('SELECT max(id) FROM library_spectra')\n        last_id_spectra = c.fetchone()[0]\n        if last_id_spectra:\n            self.current_id_spectra = (last_id_spectra + 1)\n        else:\n            self.current_id_spectra = 1\n    if spectra_annotation:\n        c.execute('SELECT max(id) FROM library_spectra_annotation')\n        last_id_spectra_annotation = c.fetchone()[0]\n        if last_id_spectra_annotation:\n            self.current_id_spectra_annotation = (last_id_spectra_annotation + 1)\n        else:\n            self.current_id_spectra_annotation = 1", "docstring": "Get the current id for each table in the database\n\nArgs:\nsource (boolean): get the id for the table \"library_spectra_source\" will update self.current_id_origin\nmeta (boolean): get the id for the table \"library_spectra_meta\" will update self.current_id_meta\nspectra (boolean): get the id for the table \"library_spectra\" will update self.current_id_spectra\nspectra_annotation (boolean): get the id for the table \"library_spectra_annotation\" will update\nself.current_id_spectra_annotation", "source": "codesearchnet"}
{"code": "def _inter_df_op_handler(self, func, other, **kwargs):\n        \n        axis = kwargs.get(\"axis\", 0)\n        axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0\n        if isinstance(other, type(self)):\n            return self._inter_manager_operations(\n                other, \"outer\", lambda x, y: func(x, y, **kwargs)\n            )\n        else:\n            return self._scalar_operations(\n                axis, other, lambda df: func(df, other, **kwargs)\n            )", "docstring": "Helper method for inter-manager and scalar operations.\n\nArgs:\nfunc: The function to use on the Manager/scalar.\nother: The other Manager/scalar.\n\nReturns:\nNew DataManager with new data and index.", "source": "juraj-google-style"}
{"code": "def process_user_input(self):\n    user_input = self.screen.input()\n    try:\n        indexes = self.__parse_range_list(user_input)\n        indexes[:] = [(x - 1) for x in indexes if (0 < x < (len(self.items) + 1))]\n        for index in indexes:\n            self.current_option = index\n            self.select()\n    except Exception as e:\n        return", "docstring": "This overrides the method in ConsoleMenu to allow for comma-delimited and range inputs.\n\nExamples:\nAll of the following inputs would have the same result:\n* 1,2,3,4\n* 1-4\n* 1-2,3-4\n* 1 - 4\n* 1, 2, 3, 4\nRaises:\nValueError: If the input cannot be correctly parsed.", "source": "codesearchnet"}
{"code": "def StoreRequestsAndResponses(self, new_requests=None, new_responses=None, requests_to_delete=None):\n    to_write = {}\n    if (new_requests is not None):\n        for (request, timestamp) in new_requests:\n            subject = request.session_id.Add('state')\n            queue = to_write.setdefault(subject, {})\n            queue.setdefault((self.FLOW_REQUEST_TEMPLATE % request.id), []).append((request.SerializeToString(), timestamp))\n    if (new_responses is not None):\n        for (response, timestamp) in new_responses:\n            if (response.type == rdf_flows.GrrMessage.Type.STATUS):\n                subject = response.session_id.Add('state')\n                attribute = (self.FLOW_STATUS_TEMPLATE % response.request_id)\n                to_write.setdefault(subject, {}).setdefault(attribute, []).append((response.SerializeToString(), timestamp))\n            subject = self.GetFlowResponseSubject(response.session_id, response.request_id)\n            attribute = (self.FLOW_RESPONSE_TEMPLATE % (response.request_id, response.response_id))\n            to_write.setdefault(subject, {}).setdefault(attribute, []).append((response.SerializeToString(), timestamp))\n    to_delete = {}\n    if (requests_to_delete is not None):\n        for request in requests_to_delete:\n            queue = to_delete.setdefault(request.session_id.Add('state'), [])\n            queue.append((self.FLOW_REQUEST_TEMPLATE % request.id))\n            queue.append((self.FLOW_STATUS_TEMPLATE % request.id))\n    for subject in (set(to_write) | set(to_delete)):\n        self.MultiSet(subject, to_write.get(subject, {}), to_delete=to_delete.get(subject, []), sync=True)", "docstring": "Stores new flow requests and responses to the data store.\n\nArgs:\nnew_requests: A list of tuples (request, timestamp) to store in the data\nstore.\nnew_responses: A list of tuples (response, timestamp) to store in the data\nstore.\nrequests_to_delete: A list of requests that should be deleted from the\ndata store.", "source": "codesearchnet"}
{"code": "def report_status_to_github(self, state: str, description: str, context: str, target_url: Optional[str]=None):\n    if (state not in ['error', 'failure', 'pending', 'success']):\n        raise ValueError('Unrecognized state: {!r}'.format(state))\n    if ((self.repository is None) or (self.repository.access_token is None)):\n        return\n    print(repr(('report_status', context, state, description, target_url)), file=sys.stderr)\n    payload = {'state': state, 'description': description, 'context': context}\n    if (target_url is not None):\n        payload['target_url'] = target_url\n    url = 'https:\n    response = requests.post(url, json=payload)\n    if (response.status_code != 201):\n        raise IOError('Request failed. Code: {}. Content: {}.'.format(response.status_code, response.content))", "docstring": "Sets a commit status indicator on github.\n\nIf not running from a pull request (i.e. repository is None), then this\njust prints to stderr.\n\nArgs:\nstate: The state of the status indicator.\nMust be 'error', 'failure', 'pending', or 'success'.\ndescription: A summary of why the state is what it is,\ne.g. '5 lint errors' or 'tests passed!'.\ncontext: The name of the status indicator, e.g. 'pytest' or 'lint'.\ntarget_url: Optional location where additional details about the\nstatus can be found, e.g. an online test results page.\n\nRaises:\nValueError: Not one of the allowed states.\nIOError: The HTTP post request failed, or the response didn't have\na 201 code indicating success in the expected way.", "source": "codesearchnet"}
{"code": "def detect_extracellular_compartment(model):\n    \n    extracellular_key = Counter()\n\n    for reaction in model.reactions:\n        equation = reaction.equation\n        if equation is None:\n            continue\n\n        if len(equation.compounds) == 1:\n            compound, _ = equation.compounds[0]\n            compartment = compound.compartment\n            extracellular_key[compartment] += 1\n    if len(extracellular_key) == 0:\n        return None\n    else:\n        best_key, _ = extracellular_key.most_common(1)[0]\n\n    logger.info('{} is extracellular compartment'.format(best_key))\n\n    return best_key", "docstring": "Detect the identifier for equations with extracellular compartments.\n\nArgs:\nmodel: :class:`NativeModel`.", "source": "juraj-google-style"}
{"code": "def swd_read32(self, offset):\n    value = self._dll.JLINK_SWD_GetU32(offset)\n    return ctypes.c_uint32(value).value", "docstring": "Gets a unit of ``32`` bits from the input buffer.\n\nArgs:\nself (JLink): the ``JLink`` instance\noffset (int): the offset (in bits) from which to start reading\n\nReturns:\nThe integer read from the input buffer.", "source": "codesearchnet"}
{"code": "def from_non_deterministic_state(cls, alg=None):\n    if config.is_op_determinism_enabled():\n        raise RuntimeError('\"from_non_deterministic_state\" cannot be called when determinism is enabled.')\n    if alg is None:\n        alg = DEFAULT_ALGORITHM\n    alg = random_ops_util.convert_alg_to_int(alg)\n    state = non_deterministic_ints(shape=[_get_state_size(alg)], dtype=SEED_TYPE)\n    return cls(state=state, alg=alg)", "docstring": "Creates a generator by non-deterministically initializing its state.\n\nThe source of the non-determinism will be platform- and time-dependent.\n\nArgs:\nalg: (optional) the RNG algorithm. If None, it will be auto-selected. See\n`__init__` for its possible values.\n\nReturns:\nThe new generator.", "source": "github-repos"}
{"code": "def reqExecutions(\n            self, execFilter: ExecutionFilter = None) -> List[Fill]:\n        \n        return self._run(self.reqExecutionsAsync(execFilter))", "docstring": "It is recommended to use :meth:`.fills`  or\n:meth:`.executions` instead.\n\nRequest and return a list a list of fills.\n\nThis method is blocking.\n\nArgs:\nexecFilter: If specified, return executions that match the filter.", "source": "juraj-google-style"}
{"code": "def evaluate(self, verbose=True, decode=True, passes=None, num_threads=1,\n                 apply_experimental_transforms=False):\n        \n        if isinstance(self.expr, WeldObject):\n            return self.expr.evaluate(\n                to_weld_type(\n                    self.weld_type,\n                    self.dim),\n                verbose,\n                decode,\n                passes=passes,\n                num_threads=num_threads,\n                apply_experimental_transforms=apply_experimental_transforms)\n        return self.expr", "docstring": "Summary\n\nArgs:\nverbose (bool, optional): Description\ndecode (bool, optional): Description\n\nReturns:\nTYPE: Description", "source": "juraj-google-style"}
{"code": "def get_hosted_zone_by_name(client, zone_name):\n    \n    p = client.get_paginator(\"list_hosted_zones\")\n\n    for i in p.paginate():\n        for zone in i[\"HostedZones\"]:\n            if zone[\"Name\"] == zone_name:\n                return parse_zone_id(zone[\"Id\"])\n    return None", "docstring": "Get the zone id of an existing zone by name.\n\nArgs:\nclient (:class:`botocore.client.Route53`): The connection used to\ninteract with Route53's API.\nzone_name (string): The name of the DNS hosted zone to create.\n\nReturns:\nstring: The Id of the Hosted Zone.", "source": "juraj-google-style"}
{"code": "def mtf_range(mesh, dim, dtype, name=None):\n  \n  dim = convert_to_dimension(dim)\n  with tf.variable_scope(name, default_name=\"range\"):\n    if dtype == tf.bfloat16:\n      \n      \n      tf_range = tf.cast(tf.range(dim.size), tf.bfloat16)\n    else:\n      tf_range = tf.range(dim.size, dtype=dtype)\n    return import_tf_tensor(mesh, tf_range, shape=Shape([dim]))", "docstring": "Create a 1d mesh tensor with a range from [0, dim.size).\n\nCall externally as mtf.range()\n\nArgs:\nmesh: a Mesh\ndim: a Dimension\ndtype: a tf.DType\nname: an optional string\n\nReturns:\na Tensor", "source": "juraj-google-style"}
{"code": "def create_xml_dom_element(doc, name, value):\n  \n  s = str_or_unicode(value)\n  if six.PY2 and not isinstance(s, unicode):\n    \n    s = s.decode('utf-8', 'ignore')\n  if isinstance(value, bool):\n    \n    s = s.lower()\n  \n  s = _ILLEGAL_XML_CHARS_REGEX.sub(u'', s)\n\n  e = doc.createElement(name)\n  e.appendChild(doc.createTextNode(s))\n  return e", "docstring": "Returns an XML DOM element with name and text value.\n\nArgs:\ndoc: minidom.Document, the DOM document it should create nodes from.\nname: str, the tag of XML element.\nvalue: object, whose string representation will be used\nas the value of the XML element. Illegal or highly discouraged xml 1.0\ncharacters are stripped.\n\nReturns:\nAn instance of minidom.Element.", "source": "juraj-google-style"}
{"code": "def _do_pass(self, pass_, dag, options):\n        \n\n        \n        if not options[\"ignore_requires\"]:\n            for required_pass in pass_.requires:\n                dag = self._do_pass(required_pass, dag, options)\n\n        \n        if pass_ not in self.valid_passes:\n            if pass_.is_transformation_pass:\n                pass_.property_set = self.fenced_property_set\n                new_dag = pass_.run(dag)\n                if not isinstance(new_dag, DAGCircuit):\n                    raise TranspilerError(\"Transformation passes should return a transformed dag.\"\n                                          \"The pass %s is returning a %s\" % (type(pass_).__name__,\n                                                                             type(new_dag)))\n                dag = new_dag\n            elif pass_.is_analysis_pass:\n                pass_.property_set = self.property_set\n                pass_.run(FencedDAGCircuit(dag))\n            else:\n                raise TranspilerError(\"I dont know how to handle this type of pass\")\n\n            \n            self._update_valid_passes(pass_, options['ignore_preserves'])\n\n        return dag", "docstring": "Do a pass and its \"requires\".\n\nArgs:\npass_ (BasePass): Pass to do.\ndag (DAGCircuit): The dag on which the pass is ran.\noptions (dict): PassManager options.\nReturns:\nDAGCircuit: The transformed dag in case of a transformation pass.\nThe same input dag in case of an analysis pass.\nRaises:\nTranspilerError: If the pass is not a proper pass instance.", "source": "juraj-google-style"}
{"code": "def get_request_feature(self, name):\n    if ('[]' in name):\n        return (self.request.query_params.getlist(name) if (name in self.features) else None)\n    elif ('{}' in name):\n        return (self._extract_object_params(name) if (name in self.features) else {})\n    else:\n        return (self.request.query_params.get(name) if (name in self.features) else None)", "docstring": "Parses the request for a particular feature.\n\nArguments:\nname: A feature name.\n\nReturns:\nA feature parsed from the URL if the feature is supported, or None.", "source": "codesearchnet"}
{"code": "def resize_annotation(annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float=0.5, resample: PILImageResampling=PILImageResampling.NEAREST):\n    ratios = tuple((float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size)))\n    ratio_height, ratio_width = ratios\n    new_annotation = {}\n    new_annotation['size'] = target_size\n    for key, value in annotation.items():\n        if key == 'boxes':\n            boxes = value\n            scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)\n            new_annotation['boxes'] = scaled_boxes\n        elif key == 'area':\n            area = value\n            scaled_area = area * (ratio_width * ratio_height)\n            new_annotation['area'] = scaled_area\n        elif key == 'masks':\n            masks = value[:, None]\n            masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])\n            masks = masks.astype(np.float32)\n            masks = masks[:, 0] > threshold\n            new_annotation['masks'] = masks\n        elif key == 'size':\n            new_annotation['size'] = target_size\n        else:\n            new_annotation[key] = value\n    return new_annotation", "docstring": "Resizes an annotation to a target size.\n\nArgs:\nannotation (`Dict[str, Any]`):\nThe annotation dictionary.\norig_size (`Tuple[int, int]`):\nThe original size of the input image.\ntarget_size (`Tuple[int, int]`):\nThe target size of the image, as returned by the preprocessing `resize` step.\nthreshold (`float`, *optional*, defaults to 0.5):\nThe threshold used to binarize the segmentation masks.\nresample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):\nThe resampling filter to use when resizing the masks.", "source": "github-repos"}
{"code": "def select_with_condition(self, condition, key=None):\n    condition = Condition.as_condition(condition)\n    new_confs = []\n    for conf in self:\n        obj = (conf if (key is None) else AttrDict(conf[key]))\n        add_it = condition(obj=obj)\n        if add_it:\n            new_confs.append(conf)\n    self._confs = new_confs", "docstring": "Remove all the configurations that do not satisfy the given condition.\n\nArgs:\ncondition: dict or :class:`Condition` object with operators expressed with a Mongodb-like syntax\nkey: Selects the sub-dictionary on which condition is applied, e.g. key=\"vars\"\nif we have to filter the configurations depending on the values in vars", "source": "codesearchnet"}
{"code": "def Decrypt(self, encrypted_data):\n    \n    index_split = -(len(encrypted_data) % AES.block_size)\n    if index_split:\n      remaining_encrypted_data = encrypted_data[index_split:]\n      encrypted_data = encrypted_data[:index_split]\n    else:\n      remaining_encrypted_data = b''\n\n    decrypted_data = self._aes_cipher.decrypt(encrypted_data)\n\n    return decrypted_data, remaining_encrypted_data", "docstring": "Decrypts the encrypted data.\n\nArgs:\nencrypted_data (bytes): encrypted data.\n\nReturns:\ntuple[bytes, bytes]: decrypted data and remaining encrypted data.", "source": "juraj-google-style"}
{"code": "def get_trial(self) -> Trial:", "docstring": "Gets current Trial.\n\nReturns:\nAn up-to-date `Trial` object. A distributed tuning backend should make\nsure the return value is up-to-date not only locally, but among different\nworkers.", "source": "github-repos"}
{"code": "def get_tick(self, name):\n    name_map = {'fast': config_fast_tick_secs, 'user1': config_tick1_secs, 'user2': config_tick2_secs}\n    config = name_map.get(name)\n    if (config is None):\n        raise ArgumentError('Unknown tick requested', name=name)\n    slot = SlotIdentifier.FromString('controller')\n    try:\n        var = self.get_config(slot, config)\n        return var[1]\n    except ArgumentError:\n        return 0", "docstring": "Check the config variables to see if there is a configurable tick.\n\nSensor Graph has a built-in 10 second tick that is sent every 10\nseconds to allow for triggering timed events.  There is a second\n'user' tick that is generated internally by the sensorgraph compiler\nand used for fast operations and finally there are several field\nconfigurable ticks that can be used for setting up configurable\ntimers.\n\nThis is done by setting a config variable on the controller with the\ndesired tick interval, which is then interpreted by this function.\n\nThe appropriate config_id to use is listed in `known_constants.py`\n\nReturns:\nint: 0 if the tick is disabled, otherwise the number of seconds\nbetween each tick", "source": "codesearchnet"}
{"code": "def _count_eventually_passing_retries(self):\n    count = 0\n    for record in self.passed:\n        r = record\n        while r.parent is not None and r.parent[1] == TestParentType.RETRY:\n            count += 1\n            r = r.parent[0]\n    return count", "docstring": "Counts the number of retry iterations that eventually passed.\n\nIf a test is retried and eventually passed, all the associated non-passing\niterations should not be considered when devising the final state of the\ntest run.\n\nReturns:\nInt, the number that should be subtracted from the result altering error\ncounts.", "source": "github-repos"}
{"code": "def split_vector_ctype(ctype):\n    \n    if not is_vector_ctype(ctype):\n        raise ValueError('The given ctype is not a vector type.')\n    for vector_length in [2, 3, 4, 8, 16]:\n        if ctype.endswith(str(vector_length)):\n            vector_str_len = len(str(vector_length))\n            return ctype[:-vector_str_len], int(ctype[-vector_str_len:])", "docstring": "Split a vector ctype into a raw ctype and the vector length.\n\nIf the given ctype is not a vector type, we raise an error. I\n\nArgs:\nctype (str): the ctype to possibly split into a raw ctype and the vector length\n\nReturns:\ntuple: the raw ctype and the vector length", "source": "juraj-google-style"}
{"code": "def add_region_feature(self, start_resnum, end_resnum, feat_type=None, feat_id=None, qualifiers=None):\n    if self.feature_file:\n        raise ValueError('Feature file associated with sequence, please remove file association to append additional features.')\n    if (not feat_type):\n        feat_type = 'Manually added protein sequence region feature'\n    newfeat = SeqFeature(location=FeatureLocation((start_resnum - 1), end_resnum), type=feat_type, id=feat_id, qualifiers=qualifiers)\n    self.features.append(newfeat)", "docstring": "Add a feature to the features list describing a region of the protein sequence.\n\nArgs:\nstart_resnum (int): Start residue number of the protein sequence feature\nend_resnum (int): End residue number of the protein sequence feature\nfeat_type (str, optional): Optional description of the feature type (ie. 'binding domain')\nfeat_id (str, optional): Optional ID of the feature type (ie. 'TM1')", "source": "codesearchnet"}
{"code": "def update(self, **kwargs):\n    to_remove = []\n    for key, value in kwargs.items():\n        if hasattr(self, key):\n            setattr(self, key, value)\n            to_remove.append(key)\n    self.validate()\n    unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove}\n    return unused_kwargs", "docstring": "Updates attributes of this class instance with attributes from `kwargs` if they match existing attributes,\nreturning all the unused kwargs.\n\nArgs:\nkwargs (`Dict[str, Any]`):\nDictionary of attributes to tentatively update this class.\n\nReturns:\n`Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance.", "source": "github-repos"}
{"code": "def load_imgs(filenames, masker, nan_to_num=True):\n    if isinstance(filenames, string_types):\n        filenames = [filenames]\n    data = np.zeros((masker.n_vox_in_mask, len(filenames)))\n    for (i, f) in enumerate(filenames):\n        data[(:, i)] = masker.mask(f, nan_to_num)\n    return data", "docstring": "Load multiple images from file into an ndarray.\n\nArgs:\nfilenames: A single filename or list of filenames pointing to valid\nimages.\nmasker: A Masker instance.\nnan_to_num: Optional boolean indicating whether to convert NaNs to zero.\n\nReturns:\nAn m x n 2D numpy array, where m = number of voxels in mask and\nn = number of images passed.", "source": "codesearchnet"}
{"code": "def is_coord_subset_pbc(subset, superset, atol=1e-8, mask=None):\n    \n    c1 = np.array(subset, dtype=np.float64)\n    c2 = np.array(superset, dtype=np.float64)\n    if mask is not None:\n        m = np.array(mask, dtype=np.int)\n    else:\n        m = np.zeros((len(subset), len(superset)), dtype=np.int)\n    atol = np.zeros(3, dtype=np.float64) + atol\n    return cuc.is_coord_subset_pbc(c1, c2, atol, m)", "docstring": "Tests if all fractional coords in subset are contained in superset.\n\nArgs:\nsubset, superset: List of fractional coords\natol (float or size 3 array): Tolerance for matching\nmask (boolean array): Mask of matches that are not allowed.\ni.e. if mask[1,2] == True, then subset[1] cannot be matched\nto superset[2]\n\nReturns:\nTrue if all of subset is in superset.", "source": "juraj-google-style"}
{"code": "def _node_def(from_node_def, export_scope, unbound_inputs, clear_devices=False):\n    node_def = copy.deepcopy(from_node_def)\n    for i, v in enumerate(node_def.input):\n        if export_scope and (not node_def.input[i].lstrip('^').startswith(export_scope)):\n            node_def.input[i] = re.sub('([\\\\^]|^)(.*)', '\\\\1' + _UNBOUND_INPUT_PREFIX + '\\\\2', compat.as_str(v))\n            unbound_inputs.append(node_def.input[i])\n        else:\n            node_def.input[i] = ops.strip_name_scope(v, export_scope)\n    node_def.name = compat.as_bytes(ops.strip_name_scope(from_node_def.name, export_scope))\n    for k, v in from_node_def.attr.items():\n        if k == '_class':\n            new_s = [compat.as_bytes(ops.strip_name_scope(s, export_scope)) for s in v.list.s if not export_scope or compat.as_str(s).split('@')[1].startswith(export_scope)]\n            node_def.attr[k].CopyFrom(attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(s=new_s)))\n        elif node_def.op in ('Enter', 'RefEnter') and k == 'frame_name':\n            if not export_scope or compat.as_str(v.s).startswith(export_scope):\n                new_s = compat.as_bytes(ops.strip_name_scope(v.s, export_scope))\n            node_def.attr[k].CopyFrom(attr_value_pb2.AttrValue(s=new_s))\n        else:\n            node_def.attr[k].CopyFrom(v)\n    if clear_devices:\n        node_def.device = ''\n    return node_def", "docstring": "Create a `NodeDef` proto with export_scope stripped.\n\nArgs:\nfrom_node_def: A `node_def_pb2.NodeDef` protocol buffer.\nexport_scope: A `string` representing the name scope to remove.\nunbound_inputs: An array of unbound input names if they exist.\nclear_devices: Boolean which controls whether to clear device information\nfrom node_def. Default false.\n\nReturns:\nA `node_def_pb2.NodeDef` protocol buffer.", "source": "github-repos"}
{"code": "async def async_fetch(url: str, **kwargs) -> Selector:\n    \n    kwargs.setdefault('headers', DEFAULT_HEADERS)\n    async with aiohttp.ClientSession(**kwargs) as ses:\n        async with ses.get(url, **kwargs) as res:\n            html = await res.text()\n            tree = Selector(text=html)\n            return tree", "docstring": "Do the fetch in an async style.\n\nArgs:\nurl (str): The url of the site.\n\nReturns:\nSelector: allows you to select parts of HTML text using CSS or XPath expressions.", "source": "juraj-google-style"}
{"code": "def file_name_increase(file_name, file_location):\n    \n    add_one = 1\n    file_name_temp = file_name\n    while verify_file_exists(file_name_temp, file_location):\n        try:\n            name, file_extension = file_name.split('.')\n            file_name_temp = '%s-%i.%s' % (name, add_one, file_extension)\n        except Exception as e:\n            LOGGER.critical('Function file_name_increase Error {error} ignoring any errors'.format(error=e))\n            name = file_name\n            file_name_temp = '%s-%i' % (name, add_one)\n        add_one += 1\n    file_name = file_name_temp\n    return file_name", "docstring": "Function to increase a filename by a number 1\nArgs:\nfile_name: The name of file to check\nfile_location: The location of the file, derive from the os module\n\nReturns: returns a good filename.", "source": "juraj-google-style"}
{"code": "def add_outgoing_edge(self, edge):\n    self._outgoing_edges.append(edge)", "docstring": "Adds an outgoing edge to the Convertible's list of edges.\n\nArgs:\nedge: The outgoing edge (its source should be 'self').", "source": "github-repos"}
{"code": "def copy_foreign_keys(self, event):\n    event_keys = set(event._meta.fields.keys())\n    obj_keys = self._meta.fields.keys()\n    matching_keys = event_keys.intersection(obj_keys)\n    for key in matching_keys:\n        if (key == 'created_by'):\n            continue\n        if (not isinstance(self._meta.fields[key], peewee.ForeignKeyField)):\n            continue\n        setattr(event, key, getattr(self, key))\n    possible_key = self.__class__.__name__.lower()\n    if ((possible_key in event_keys) and (event.code != 'AUDIT_DELETE')):\n        setattr(event, possible_key, self)", "docstring": "Copies possible foreign key values from the object into the Event,\nskipping common keys like modified and created.\n\nArgs:\nevent (Event): The Event instance to copy the FKs into\nobj (fleaker.db.Model): The object to pull the values from", "source": "codesearchnet"}
{"code": "def validate_dataset_input(x, y, sample_weight, validation_split=None):\n    if y is not None:\n        raise ValueError('You passed a dataset or dataset iterator (%s) as input `x` to your model. In that case, you should not specify a target (`y`) argument, since the dataset or dataset iterator generates both input data and target data. Received: %s' % (x, y))\n    if sample_weight is not None:\n        raise ValueError('`sample_weight` argument is not supported when input `x` is a dataset or a dataset iterator. Instead, youcan provide sample_weight as the third element  of yourdataset, i.e. (inputs, targets, sample_weight). Received: x=%s, sample_weight=%s' % (x, sample_weight))\n    if validation_split is not None and validation_split != 0.0:\n        raise ValueError('`validation_split` argument is not supported when input `x` is a dataset or a dataset iterator. Received: x=%s, validation_split=%f' % (x, validation_split))", "docstring": "Validates user input arguments when a dataset iterator is passed.\n\nArgs:\nx: Input data. A `tf.data` dataset or iterator.\ny: Target data. It could be either Numpy array(s) or TensorFlow tensor(s).\nExpected to be `None` when `x` is a dataset iterator.\nsample_weight: An optional sample-weight array passed by the user to weight\nthe importance of each sample in `x`. Expected to be `None` when `x` is a\ndataset iterator\nvalidation_split: Float between 0 and 1. Fraction of the training data to be\nused as validation data. Expected to be `None` when `x` is a dataset\niterator.\n\nRaises:\nValueError: if argument `y` or `sample_weight` or `validation_split` are\nprovided by user.", "source": "github-repos"}
{"code": "def get_utxoset_merkle_root(self):\n    utxoset = backend.query.get_unspent_outputs(self.connection)\n    hashes = [sha3_256('{}{}'.format(utxo['transaction_id'], utxo['output_index']).encode()).digest() for utxo in utxoset]\n    return merkleroot(sorted(hashes))", "docstring": "Returns the merkle root of the utxoset. This implies that\nthe utxoset is first put into a merkle tree.\n\nFor now, the merkle tree and its root will be computed each\ntime. This obviously is not efficient and a better approach\nthat limits the repetition of the same computation when\nunnecesary should be sought. For instance, future optimizations\ncould simply re-compute the branches of the tree that were\naffected by a change.\n\nThe transaction hash (id) and output index should be sufficient\nto uniquely identify a utxo, and consequently only that\ninformation from a utxo record is needed to compute the merkle\nroot. Hence, each node of the merkle tree should contain the\ntuple (txid, output_index).\n\n.. important:: The leaves of the tree will need to be sorted in\nsome kind of lexicographical order.\n\nReturns:\nstr: Merkle root in hexadecimal form.", "source": "codesearchnet"}
{"code": "def encode(self, inputs: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, freeze_feature_encoder: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    if attention_mask is None:\n        attention_mask = jnp.ones_like(inputs, dtype='i4')\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n\n    def _encoder_forward(module, inputs, attention_mask, **kwargs):\n        encode_module = module._get_encoder_module()\n        return encode_module(inputs, attention_mask, **kwargs)\n    outputs = self.module.apply({'params': params or self.params}, inputs=jnp.array(inputs, dtype='f4'), attention_mask=jnp.array(attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, freeze_feature_encoder=freeze_feature_encoder, rngs=rngs, method=_encoder_forward)\n    if return_dict:\n        outputs = FlaxBaseModelOutput(last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions)\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import FlaxSpeechEncoderDecoderModel\n\n>>> # initialize a wav2vec2-2-bart from pretrained wav2vec2 and bart models. Note that the cross-attention layers will be randomly initialized\n>>> model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(\n...     \"facebook/wav2vec2-large-lv60\", \"facebook/bart-large\"\n... )\n\n>>> inputs = jnp.ones((2, 5000), dtype=jnp.float32)\n>>> encoder_outputs = model.encode(inputs)\n```", "source": "github-repos"}
{"code": "def to_event_set(pipe: beam.PCollection[Dict[str, Any]], schema: Schema, timestamp_key: str='timestamp', format: DictEventSetFormatChoices=DictEventSetFormat.GROUPED_BY_INDEX) -> BeamEventSet:\n    num_features = len(schema.features)\n    if format == DictEventSetFormat.GROUPED_BY_INDEX:\n        if num_features != 0:\n            return partition_by_feature_idx(pipe | 'Parse dict' >> beam.FlatMap(_event_set_dict_to_event_set, schema, timestamp_key), num_features=num_features, reshuffle=True)\n        else:\n            return _reshuffle_item_in_tuples((pipe | 'Parse dict' >> beam.Map(_event_set_dict_to_event_set_no_features, schema, timestamp_key),))\n    elif format == DictEventSetFormat.SINGLE_EVENTS:\n        indexed = pipe | 'Parse and index' >> beam.Map(_parse_and_index, schema, timestamp_key) | 'Aggregate' >> beam.GroupByKey()\n        if num_features != 0:\n            return partition_by_feature_idx(indexed | 'Merge by timestamps' >> beam.ParDo(_MergeTimestamps(schema.features)), num_features=num_features, reshuffle=True)\n        else:\n            return _reshuffle_item_in_tuples((indexed | 'Merge by timestamps' >> beam.Map(_merge_timestamps_no_features),))\n    else:\n        raise ValueError(f'Unknown format {format}')", "docstring": "Converts a PCollection of key:value to a Beam EventSet.\n\nThis method is compatible with the output of `from_csv_raw` and the\nOfficial Beam IO connectors.\n\nWhen importing data from csv files, use `from_csv` to convert csv files\ndirectly into EventSets.\n\nUnlike Temporian in-process EventSet import method (\n[tp.event_set][temporian.event_set])), this method (`tpb.to_event_set`)\nrequires for timestamps to be numerical values.\n\nArgs:\npipe: Beam pipe of key values.\nschema: Schema of the data. Note: The schema of a Temporian node is\navailable with `node.schema`.\ntimestamp_key: Key containing the timestamps.\nformat: Format of the events inside the received dictionary. See\n[DictEventSetFormat][temporian.io.format.DictEventSetFormat] for\nmore.\n\nReturns:\nBeam EventSet.", "source": "github-repos"}
{"code": "def assert_style(self, styles, **kwargs):\n        \n\n        query = StyleQuery(styles, **kwargs)\n\n        @self.synchronize(wait=query.wait)\n        def assert_style():\n            if not query.resolves_for(self):\n                raise ExpectationNotMet(query.failure_message)\n\n            return True\n\n        return assert_style()", "docstring": "Asserts that an element has the specified CSS styles. ::\n\nelement.assert_style({\"color\": \"rgb(0,0,255)\", \"font-size\": re.compile(r\"px\")})\n\nArgs:\nstyles (Dict[str, str | RegexObject]): The expected styles.\n\nReturns:\nTrue\n\nRaises:\nExpectationNotMet: The element doesn't have the specified styles.", "source": "juraj-google-style"}
{"code": "def Insert(self, request, global_params=None):\n    config = self.GetMethodConfig('Insert')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Creates a new routine in the dataset.\n\nArgs:\nrequest: (BigqueryRoutinesInsertRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Routine) The response message.", "source": "github-repos"}
{"code": "def __init__(self, shape_id=None, lat=None, lon=None,seq=None, dist=None,\n               field_dict=None):\n    \n    self._schedule = None\n    if field_dict:\n      if isinstance(field_dict, self.__class__):\n        for k, v in field_dict.iteritems():\n          self.__dict__[k] = v\n      else:\n        self.__dict__.update(field_dict)\n    else:\n      self.shape_id = shape_id\n      self.shape_pt_lat = lat\n      self.shape_pt_lon = lon\n      self.shape_pt_sequence = seq\n      self.shape_dist_traveled = dist", "docstring": "Initialize a new ShapePoint object.\n\nArgs:\nfield_dict: A dictionary mapping attribute name to unicode string", "source": "juraj-google-style"}
{"code": "def AddAdGroup(self, client_customer_id, campaign_id, name, status):\n    self.client.SetClientCustomerId(client_customer_id)\n    ad_group_service = self.client.GetService('AdGroupService')\n    operations = [{'operator': 'ADD', 'operand': {'campaignId': campaign_id, 'name': name, 'status': status}}]\n    ad_group_service.mutate(operations)", "docstring": "Create a new ad group.\n\nArgs:\nclient_customer_id: str Client Customer Id used to create the AdGroup.\ncampaign_id: str Id of the campaign to use.\nname: str Name to assign to the AdGroup.\nstatus: str Status to assign to the AdGroup when it is created.", "source": "codesearchnet"}
{"code": "def learn_q(self, predicted_q_arr, real_q_arr):\n        \n        self.__predicted_q_arr_list.append(predicted_q_arr)\n        while len(self.__predicted_q_arr_list) > self.__seq_len:\n            self.__predicted_q_arr_list = self.__predicted_q_arr_list[1:]\n        while len(self.__predicted_q_arr_list) < self.__seq_len:\n            self.__predicted_q_arr_list.append(self.__predicted_q_arr_list[-1])\n        predicted_q_arr = np.array(self.__predicted_q_arr_list)\n        predicted_q_arr = predicted_q_arr.transpose((1, 0, 2))\n\n        self.__real_q_arr_list.append(real_q_arr)\n        while len(self.__real_q_arr_list) > self.__seq_len:\n            self.__real_q_arr_list = self.__real_q_arr_list[1:]\n        while len(self.__real_q_arr_list) < self.__seq_len:\n            self.__real_q_arr_list.append(self.__real_q_arr_list[-1])\n        real_q_arr = np.array(self.__real_q_arr_list)\n        real_q_arr = real_q_arr.transpose((1, 0, 2))\n\n        loss = self.__computable_loss.compute_loss(predicted_q_arr, real_q_arr)\n        delta_arr = self.__computable_loss.compute_delta(predicted_q_arr, real_q_arr)\n\n        delta_arr, lstm_output_grads_list = self.__lstm_model.output_back_propagate(\n            predicted_q_arr,\n            delta_arr\n        )\n        delta_arr, _, lstm_hidden_grads_list = self.__lstm_model.hidden_back_propagate(\n            delta_arr[:, -1]\n        )\n        lstm_grads_list = lstm_output_grads_list\n        lstm_grads_list.extend(lstm_hidden_grads_list)\n        self.__lstm_model.optimize(lstm_grads_list, self.__learning_rate, 1)\n        self.__loss_list.append(loss)", "docstring": "Infernce Q-Value.\n\nArgs:\npredicted_q_arr:    `np.ndarray` of predicted Q-Values.\nreal_q_arr:         `np.ndarray` of real Q-Values.", "source": "juraj-google-style"}
{"code": "def parse_conservations(variant):\n    \n    conservations = {}\n\n    conservations['gerp'] = parse_conservation(\n                                            variant,\n                                            'dbNSFP_GERP___RS'\n                                        )\n    conservations['phast'] = parse_conservation(\n                                            variant,\n                                            'dbNSFP_phastCons100way_vertebrate'\n                                        )\n    conservations['phylop'] = parse_conservation(\n                                            variant,\n                                            'dbNSFP_phyloP100way_vertebrate'\n                                        )\n    return conservations", "docstring": "Parse the conservation predictors\n\nArgs:\nvariant(dict): A variant dictionary\n\nReturns:\nconservations(dict): A dictionary with the conservations", "source": "juraj-google-style"}
{"code": "def __init__(self, tensors=None, values=None, tol=1e-5):\n        \n        self._tensor_list = tensors or []\n        self._value_list = values or []\n        if not len(self._tensor_list) == len(self._value_list):\n            raise ValueError(\"TensorMapping must be initialized with tensors\"\n                             \"and values of equivalent length\")\n        self.tol = tol", "docstring": "Initialize a TensorMapping\n\nArgs:\ntensor_list ([Tensor]): list of tensors\nvalue_list ([]): list of values to be associated with tensors\ntol (float): an absolute tolerance for getting and setting\nitems in the mapping", "source": "juraj-google-style"}
{"code": "def login(self, broker_name, account_cookie, account=None):\n        \n        res = False\n        if account is None:\n            if account_cookie not in self.session.keys():\n                self.session[account_cookie] = QA_Account(\n                    account_cookie=account_cookie,\n                    broker=broker_name\n                )\n                if self.sync_account(broker_name, account_cookie):\n                    res = True\n\n                if self.if_start_orderthreading and res:\n                    \n                    self.order_handler.subscribe(\n                        self.session[account_cookie],\n                        self.broker[broker_name]\n                    )\n\n        else:\n            if account_cookie not in self.session.keys():\n                account.broker = broker_name\n                self.session[account_cookie] = account\n                if self.sync_account(broker_name, account_cookie):\n                    res = True\n                if self.if_start_orderthreading and res:\n                    \n                    self.order_handler.subscribe(\n                        account,\n                        self.broker[broker_name]\n                    )\n\n        if res:\n            return res\n        else:\n            try:\n                self.session.pop(account_cookie)\n            except:\n                pass\n            return False", "docstring": "login 登录到交易前置\n\n2018-07-02 在实盘中,登录到交易前置后,需要同步资产状态\n\nArguments:\nbroker_name {[type]} -- [description]\naccount_cookie {[type]} -- [description]\n\nKeyword Arguments:\naccount {[type]} -- [description] (default: {None})\n\nReturns:\n[type] -- [description]", "source": "juraj-google-style"}
{"code": "def save(self, **kwargs):\n    updated_data = self._get_updated_data()\n    if (not updated_data):\n        return\n    obj_id = self.get_id()\n    server_data = self.manager.update(obj_id, updated_data, **kwargs)\n    if (server_data is not None):\n        self._update_attrs(server_data)", "docstring": "Save the changes made to the object to the server.\n\nThe object is updated to match what the server returns.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaise:\nGitlabAuthenticationError: If authentication is not correct\nGitlabUpdateError: If the server cannot perform the request", "source": "codesearchnet"}
{"code": "def insert_taxon_in_new_fasta_file(self, aln):\n    new_seq_records = []\n    for seq_record in SeqIO.parse(aln, 'fasta'):\n        new_seq_record_id = '[{0}] {1}'.format(self.taxon_for_codon_usage, seq_record.id)\n        new_seq_record = SeqRecord(seq_record.seq, id=new_seq_record_id)\n        new_seq_records.append(new_seq_record)\n    base_filename = os.path.splitext(aln)\n    new_filename = '{0}_modified{1}'.format(base_filename[0], base_filename[1])\n    SeqIO.write(new_seq_records, new_filename, 'fasta')\n    return new_filename", "docstring": "primer4clades infers the codon usage table from the taxon names in the\nsequences.\n\nThese names need to be enclosed by square brackets and be\npresent in the description of the FASTA sequence. The position is not\nimportant. I will insert the names in the description in a new FASTA\nfile.\n\nReturns:\nFilename of modified FASTA file that includes the name of the taxon.", "source": "codesearchnet"}
{"code": "def check_oneof(**kwargs):\n    if (not kwargs):\n        return None\n    not_nones = [val for val in kwargs.values() if (val is not None)]\n    if (len(not_nones) > 1):\n        raise ValueError('Only one of {fields} should be set.'.format(fields=', '.join(sorted(kwargs.keys()))))", "docstring": "Raise ValueError if more than one keyword argument is not none.\n\nArgs:\nkwargs (dict): The keyword arguments sent to the function.\n\nReturns: None\n\nRaises:\nValueError: If more than one entry in kwargs is not none.", "source": "codesearchnet"}
{"code": "def get_additional_charge_by_identifier(self, recurring_billing_id):\n        \n        fmt = 'recurringBillItems/{}'.format(recurring_billing_id)\n        return self.client._get(self.url + fmt, headers=self.get_headers())", "docstring": "Query extra charge information of an invoice from its identifier.\n\nArgs:\nrecurring_billing_id: Identifier of the additional charge.\n\nReturns:", "source": "juraj-google-style"}
{"code": "def update_target_state(self, value: str, force: bool=True) -> datetime:\n    value = value.lower()\n    if (not force):\n        current_state = self.current_state\n        if (current_state == 'unknown'):\n            raise RuntimeError(\"Unable to set target state when current state is 'unknown'\")\n        allowed_target_states = self._allowed_target_states[current_state]\n        LOG.debug('Updating target state of %s to %s', self._id, value)\n        if (value not in allowed_target_states):\n            raise ValueError(\"Invalid target state: '{}'. {} can be commanded to states: {}\".format(value, current_state, allowed_target_states))\n    return self._update_state('target', value)", "docstring": "Set the target state.\n\nArgs:\nvalue (str): New value for target state\nforce (bool): If true, ignore allowed transitions\n\nReturns:\ndatetime, update timestamp\n\nRaises:\nRuntimeError, if it is not possible to currently set the target\nstate.\nValueError, if the specified target stat is not allowed.", "source": "codesearchnet"}
{"code": "def add_object_to_scope(self, obj):\n    if isinstance(obj, Computer):\n        self.add_object_to_path(obj, 'scope/computers')\n    elif isinstance(obj, ComputerGroup):\n        self.add_object_to_path(obj, 'scope/computer_groups')\n    elif isinstance(obj, Building):\n        self.add_object_to_path(obj, 'scope/buildings')\n    elif isinstance(obj, Department):\n        self.add_object_to_path(obj, 'scope/departments')\n    else:\n        raise TypeError", "docstring": "Add an object to the appropriate scope block.\n\nArgs:\nobj: JSSObject to add to scope. Accepted subclasses are:\nComputer\nComputerGroup\nBuilding\nDepartment\n\nRaises:\nTypeError if invalid obj type is provided.", "source": "codesearchnet"}
{"code": "def _format_src_url(self, path, caller_system):\n        \n        path = '%s/%s' % (self._endpoint, self.relpath(path))\n\n        \n        if caller_system is not self:\n            try:\n                path = '%s?%s' % (path, self._storage_parameters['sas_token'])\n            except KeyError:\n                pass\n\n        return path", "docstring": "Ensure path is absolute and use the correct URL format for use with\ncross Azure storage account copy function.\n\nArgs:\npath (str): Path or URL.\ncaller_system (pycosio.storage.azure._AzureBaseSystem subclass):\nSystem calling this method (Can be another Azure system).\n\nReturns:\nstr: URL.", "source": "juraj-google-style"}
{"code": "def get_temporary_scripts_path(self):\n    result = None\n    if (len(self.config.temporary_scripts_path) > 0):\n        if os.path.isdir(self.config.temporary_scripts_path):\n            result = self.config.temporary_scripts_path\n    return result", "docstring": "Get path for temporary scripts.\n\nReturns:\nstr: path for temporary scripts or None if not set", "source": "codesearchnet"}
{"code": "def _execute(self, command, data=None, unpack=True):\n    if (not data):\n        data = {}\n    if (self.session_id is not None):\n        data.setdefault('session_id', self.session_id)\n    data = self._wrap_el(data)\n    res = self.remote_invoker.execute(command, data)\n    ret = WebDriverResult.from_object(res)\n    ret.raise_for_status()\n    ret.value = self._unwrap_el(ret.value)\n    if (not unpack):\n        return ret\n    return ret.value", "docstring": "Private method to execute command.\n\nArgs:\ncommand(Command): The defined command.\ndata(dict): The uri variable and body.\nuppack(bool): If unpack value from result.\n\nReturns:\nThe unwrapped value field in the json response.", "source": "codesearchnet"}
{"code": "def add_functions(spec_dict: Mapping[(str, Any)]) -> Mapping[(str, Any)]:\n    spec_dict['functions']['list'] = []\n    spec_dict['functions']['list_long'] = []\n    spec_dict['functions']['list_short'] = []\n    spec_dict['functions']['primary'] = {}\n    spec_dict['functions']['primary']['list_long'] = []\n    spec_dict['functions']['primary']['list_short'] = []\n    spec_dict['functions']['modifier'] = {}\n    spec_dict['functions']['modifier']['list_long'] = []\n    spec_dict['functions']['modifier']['list_short'] = []\n    spec_dict['functions']['to_short'] = {}\n    spec_dict['functions']['to_long'] = {}\n    for func_name in spec_dict['functions']['info']:\n        abbreviated_name = spec_dict['functions']['info'][func_name]['abbreviation']\n        spec_dict['functions']['list'].extend((func_name, abbreviated_name))\n        spec_dict['functions']['list_long'].append(func_name)\n        spec_dict['functions']['list_short'].append(abbreviated_name)\n        if (spec_dict['functions']['info'][func_name]['type'] == 'primary'):\n            spec_dict['functions']['primary']['list_long'].append(func_name)\n            spec_dict['functions']['primary']['list_short'].append(abbreviated_name)\n        else:\n            spec_dict['functions']['modifier']['list_long'].append(func_name)\n            spec_dict['functions']['modifier']['list_short'].append(abbreviated_name)\n        spec_dict['functions']['to_short'][abbreviated_name] = abbreviated_name\n        spec_dict['functions']['to_short'][func_name] = abbreviated_name\n        spec_dict['functions']['to_long'][abbreviated_name] = func_name\n        spec_dict['functions']['to_long'][func_name] = func_name\n    return spec_dict", "docstring": "Add function keys to spec_dict\n\nArgs:\nspec_dict (Mapping[str, Any]): bel specification dictionary\n\nReturns:\nMapping[str, Any]: bel specification dictionary with added function keys", "source": "codesearchnet"}
{"code": "def combine_first_two_dimensions(x):\n  \n  ret = tf.reshape(x, tf.concat([[-1], common_layers.shape_list(x)[2:]], 0))\n  old_shape = x.get_shape().dims\n  a, b = old_shape[:2]\n  new_shape = [a * b if a and b else None] + old_shape[2:]\n  ret.set_shape(new_shape)\n  return ret", "docstring": "Reshape x so that the first two dimension become one.\n\nArgs:\nx: a Tensor with shape [a, b, ...]\n\nReturns:\na Tensor with shape [ab, ...]", "source": "juraj-google-style"}
{"code": "def _head(self, client_kwargs):\n        \n        with _handle_oss_error():\n            bucket = self._get_bucket(client_kwargs)\n\n            \n            if 'key' in client_kwargs:\n                return bucket.head_object(\n                    key=client_kwargs['key']).headers\n\n            \n            return bucket.get_bucket_info().headers", "docstring": "Returns object HTTP header.\n\nArgs:\nclient_kwargs (dict): Client arguments.\n\nReturns:\ndict: HTTP header.", "source": "juraj-google-style"}
{"code": "def Process(self, parser_mediator, cache=None, database=None, **unused_kwargs):\n    if (cache is None):\n        raise ValueError('Missing cache value.')\n    if (database is None):\n        raise ValueError('Missing database value.')\n    super(SQLitePlugin, self).Process(parser_mediator)\n    for (query, callback_method) in self.QUERIES:\n        if parser_mediator.abort:\n            break\n        callback = getattr(self, callback_method, None)\n        if (callback is None):\n            logger.warning('[{0:s}] missing callback method: {1:s} for query: {2:s}'.format(self.NAME, callback_method, query))\n            continue\n        self._ParseQuery(parser_mediator, database, query, callback, cache)", "docstring": "Determine if this is the right plugin for this database.\n\nThis function takes a SQLiteDatabase object and compares the list\nof required tables against the available tables in the database.\nIf all the tables defined in REQUIRED_TABLES are present in the\ndatabase then this plugin is considered to be the correct plugin\nand the function will return back a generator that yields event\nobjects.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\ncache (Optional[SQLiteCache]): cache.\ndatabase (Optional[SQLiteDatabase]): database.\n\nRaises:\nValueError: If the database or cache value are missing.", "source": "codesearchnet"}
{"code": "def solve(self, print_solution=False):\n    \n    \n    self._cp_solver = cp_model.CpSolver()\n    status = self._cp_solver.Solve(self._model)\n    if status != cp_model.OPTIMAL:\n      if status == cp_model.FEASIBLE:\n        logging.warning(\"A potentially suboptimal solution was found.\")\n      else:\n        logging.error(\"Solver returned status %d.\", status)\n        raise SolverError(\"The solver could not solve the problem and returned \"\n                          \"status {}.\".format(status))\n\n    \n    if print_solution:\n      print_cp_model_solution.print_solution(self._model, self._cp_solver)\n\n    \n    layout = []\n    for mtf_dimension_name in (\n        self._layout_validator.splittable_mtf_dimension_names):\n      for mesh_dimension_name in (\n          self._layout_validator.mesh_dimension_name_to_size):\n        value = self._cp_solver.Value(self._global_vars[(mtf_dimension_name,\n                                                         mesh_dimension_name)])\n        if value:  \n          layout.append(mtf_dimension_name + \":\" + mesh_dimension_name)\n\n    layout.sort()\n    return \";\".join(layout)", "docstring": "Solves the current integer program and returns the computed layout.\n\nArgs:\nprint_solution: An optional boolean indicating whether to print the full\nsolution in human-readable format.\n\nReturns:\nThe computed layout (as a string).\n\nRaises:\nSolverError: the internal solver could not find a solution, or the\nsolution found is infeasible.", "source": "juraj-google-style"}
{"code": "def __init__(self, cl_environments=None, compile_flags=None, double_precision=None):\n        \n        super().__init__()\n        self._cl_environments = cl_environments\n        self._compile_flags = compile_flags\n        self._double_precision = double_precision", "docstring": "Updates the runtime settings.\n\nArgs:\ncl_environments (list of CLEnvironment): the new CL environments we wish to use for future computations\ncompile_flags (list): the list of compile flags to use during analysis.\ndouble_precision (boolean): if we compute in double precision or not", "source": "juraj-google-style"}
{"code": "def __init__(self, client_path, data, chunk_index, total_chunks, offset,\n               total_size):\n    \n    self.client_path = client_path\n    self.data = data\n    self.offset = offset\n    self.total_size = total_size\n    self.chunk_index = chunk_index\n    self.total_chunks = total_chunks", "docstring": "Initializes StreamedFileChunk object.\n\nArgs:\nclient_path: db.ClientPath identifying the file.\ndata: bytes with chunk's contents.\nchunk_index: Index of this chunk (relative to the sequence of chunks\ncorresponding to the file).\ntotal_chunks: Total number of chunks corresponding to a given file.\noffset: Offset of this chunk in bytes from the beginning of the file.\ntotal_size: Total size of the file in bytes.", "source": "juraj-google-style"}
{"code": "def tcp_ping(task: Task, ports: List[int], timeout: int=2, host: Optional[str]=None) -> Result:\n    if isinstance(ports, int):\n        ports = [ports]\n    if isinstance(ports, list):\n        if (not all((isinstance(port, int) for port in ports))):\n            raise ValueError(\"Invalid value for 'ports'\")\n    else:\n        raise ValueError(\"Invalid value for 'ports'\")\n    host = (host or task.host.hostname)\n    result = {}\n    for port in ports:\n        s = socket.socket()\n        s.settimeout(timeout)\n        try:\n            status = s.connect_ex((host, port))\n            if (status == 0):\n                connection = True\n            else:\n                connection = False\n        except (socket.gaierror, socket.timeout, socket.error):\n            connection = False\n        finally:\n            s.close()\n        result[port] = connection\n    return Result(host=task.host, result=result)", "docstring": "Tests connection to a tcp port and tries to establish a three way\nhandshake. To be used for network discovery or testing.\n\nArguments:\nports (list of int): tcp ports to ping\ntimeout (int, optional): defaults to 2\nhost (string, optional): defaults to ``hostname``\n\n\nReturns:\nResult object with the following attributes set:\n* result (``dict``): Contains port numbers as keys with True/False as values", "source": "codesearchnet"}
{"code": "def publishCombinedWebMap(self, maps_info, webmaps):\n        \n        if self.securityhandler is None:\n            print (\"Security handler required\")\n            return\n        admin = None\n        map_results = None\n        map_info = None\n        operationalLayers = None\n        tableLayers = None\n        item = None\n        response = None\n        opLays = None\n        operationalLayers = None\n        tblLays = None\n        tblLayer = None\n        itemInfo = None\n        try:\n            admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)\n\n            map_results = []\n            for map_info in maps_info:\n\n                operationalLayers = []\n                tableLayers = []\n                for webmap in webmaps:\n                    item = admin.content.getItem(itemId=webmap)\n                    response = item.itemData()\n                    if 'operationalLayers' in response:\n\n                        opLays = []\n                        for opLayer in response['operationalLayers']:\n                            opLays.append(opLayer)\n                        opLays.extend(operationalLayers)\n                        operationalLayers = opLays\n                    if 'tables' in response:\n\n                        tblLays = []\n                        for tblLayer in response['tables']:\n                            tblLays.append(tblLayer)\n                        tblLays.extend(tableLayers)\n                        tableLayers = tblLays\n\n                if 'ReplaceTag' in map_info:\n\n                    itemInfo = {\"ReplaceTag\":map_info['ReplaceTag'] }\n                else:\n                    itemInfo = {\"ReplaceTag\":\"{WebMap}\" }\n\n                itemInfo['MapInfo'] = self._publishMap(config=map_info,\n                                                        replaceInfo=None,\n                                                        operationalLayers=operationalLayers,\n                                                        tableLayers=tableLayers)\n\n\n                map_results.append(itemInfo)\n                if not itemInfo is None:\n                    if not 'error' in itemInfo['MapInfo']['Results']:\n                        print (\"%s webmap created\" % itemInfo['MapInfo']['Name'])\n                    else:\n                        print (str(itemInfo['MapInfo']['Results']))\n                else:\n                    print (\"Map not created\")\n\n                return map_results\n        except Exception as e:\n\n            line, filename, synerror = trace()\n            raise common.ArcRestHelperError({\n                \"function\": \"publishedCombinedWebMap\",\n                \"line\": line,\n                \"filename\":  filename,\n                \"synerror\": synerror,\n            })\n        finally:\n            admin = None\n\n            map_info = None\n\n            tableLayers = None\n            item = None\n            response = None\n            opLays = None\n            operationalLayers = None\n            tblLays = None\n            tblLayer = None\n            itemInfo = None\n\n            del admin\n            del map_info\n\n            del tableLayers\n            del item\n            del response\n            del opLays\n            del operationalLayers\n            del tblLays\n            del tblLayer\n            del itemInfo\n\n            gc.collect()", "docstring": "Publishes a combination of web maps.\n\nArgs:\nmaps_info (list): A list of JSON configuration combined web maps to publish.\n\nReturns:\nlist: A list of results from :py:meth:`arcrest.manageorg._content.UserItem.updateItem`.", "source": "juraj-google-style"}
{"code": "def get_message(routing_key, properties, body):\n    \n    if properties.headers is None:\n        _log.error(\n            \"Message (body=%r) arrived without headers. \" \"A publisher is misbehaving!\",\n            body,\n        )\n        properties.headers = {}\n\n    try:\n        MessageClass = get_class(properties.headers[\"fedora_messaging_schema\"])\n    except KeyError:\n        _log.error(\n            \"Message (headers=%r, body=%r) arrived without a schema header.\"\n            \" A publisher is misbehaving!\",\n            properties.headers,\n            body,\n        )\n        MessageClass = Message\n\n    try:\n        severity = properties.headers[\"fedora_messaging_severity\"]\n    except KeyError:\n        _log.error(\n            \"Message (headers=%r, body=%r) arrived without a severity.\"\n            \" A publisher is misbehaving! Defaulting to INFO.\",\n            properties.headers,\n            body,\n        )\n        severity = INFO\n\n    if properties.content_encoding is None:\n        _log.error(\"Message arrived without a content encoding\")\n        properties.content_encoding = \"utf-8\"\n    try:\n        body = body.decode(properties.content_encoding)\n    except UnicodeDecodeError as e:\n        _log.error(\n            \"Unable to decode message body %r with %s content encoding\",\n            body,\n            properties.content_encoding,\n        )\n        raise ValidationError(e)\n\n    try:\n        body = json.loads(body)\n    except ValueError as e:\n        _log.error(\"Failed to load message body %r, %r\", body, e)\n        raise ValidationError(e)\n\n    message = MessageClass(\n        body=body, topic=routing_key, properties=properties, severity=severity\n    )\n    try:\n        message.validate()\n        _log.debug(\"Successfully validated message %r\", message)\n    except jsonschema.exceptions.ValidationError as e:\n        _log.error(\"Message validation of %r failed: %r\", message, e)\n        raise ValidationError(e)\n    return message", "docstring": "Construct a Message instance given the routing key, the properties and the\nbody received from the AMQP broker.\n\nArgs:\nrouting_key (str): The AMQP routing key (will become the message topic)\nproperties (pika.BasicProperties): the AMQP properties\nbody (bytes): The encoded message body\n\nRaises:\nValidationError: If Message validation failed or message body\ndocoding/loading is impossible.", "source": "juraj-google-style"}
{"code": "def get_random_distorted_bottlenecks(sess, image_lists, how_many, category, image_dir, input_jpeg_tensor, distorted_image, resized_input_tensor, bottleneck_tensor):\n    class_count = len(image_lists.keys())\n    bottlenecks = []\n    ground_truths = []\n    for unused_i in range(how_many):\n        label_index = random.randrange(class_count)\n        label_name = list(image_lists.keys())[label_index]\n        image_index = random.randrange((MAX_NUM_IMAGES_PER_CLASS + 1))\n        image_path = get_image_path(image_lists, label_name, image_index, image_dir, category)\n        if (not tf.gfile.Exists(image_path)):\n            tf.logging.fatal('File does not exist %s', image_path)\n        jpeg_data = tf.gfile.GFile(image_path, 'rb').read()\n        distorted_image_data = sess.run(distorted_image, {input_jpeg_tensor: jpeg_data})\n        bottleneck_values = sess.run(bottleneck_tensor, {resized_input_tensor: distorted_image_data})\n        bottleneck_values = np.squeeze(bottleneck_values)\n        bottlenecks.append(bottleneck_values)\n        ground_truths.append(label_index)\n    return (bottlenecks, ground_truths)", "docstring": "Retrieves bottleneck values for training images, after distortions.\n\nIf we're training with distortions like crops, scales, or flips, we have to\nrecalculate the full model for every image, and so we can't use cached\nbottleneck values. Instead we find random images for the requested category,\nrun them through the distortion graph, and then the full graph to get the\nbottleneck results for each.\n\nArgs:\nsess: Current TensorFlow Session.\nimage_lists: OrderedDict of training images for each label.\nhow_many: The integer number of bottleneck values to return.\ncategory: Name string of which set of images to fetch - training, testing,\nor validation.\nimage_dir: Root folder string of the subfolders containing the training\nimages.\ninput_jpeg_tensor: The input layer we feed the image data to.\ndistorted_image: The output node of the distortion graph.\nresized_input_tensor: The input node of the recognition graph.\nbottleneck_tensor: The bottleneck output layer of the CNN graph.\n\nReturns:\nList of bottleneck arrays and their corresponding ground truths.", "source": "codesearchnet"}
{"code": "def _compress_hextets(cls, hextets):\n        \n        best_doublecolon_start = -1\n        best_doublecolon_len = 0\n        doublecolon_start = -1\n        doublecolon_len = 0\n        for index, hextet in enumerate(hextets):\n            if hextet == '0':\n                doublecolon_len += 1\n                if doublecolon_start == -1:\n                    \n                    doublecolon_start = index\n                if doublecolon_len > best_doublecolon_len:\n                    \n                    best_doublecolon_len = doublecolon_len\n                    best_doublecolon_start = doublecolon_start\n            else:\n                doublecolon_len = 0\n                doublecolon_start = -1\n\n        if best_doublecolon_len > 1:\n            best_doublecolon_end = (best_doublecolon_start +\n                                    best_doublecolon_len)\n            \n            if best_doublecolon_end == len(hextets):\n                hextets += ['']\n            hextets[best_doublecolon_start:best_doublecolon_end] = ['']\n            \n            if best_doublecolon_start == 0:\n                hextets = [''] + hextets\n\n        return hextets", "docstring": "Compresses a list of hextets.\n\nCompresses a list of strings, replacing the longest continuous\nsequence of \"0\" in the list with \"\" and adding empty strings at\nthe beginning or at the end of the string such that subsequently\ncalling \":\".join(hextets) will produce the compressed version of\nthe IPv6 address.\n\nArgs:\nhextets: A list of strings, the hextets to compress.\n\nReturns:\nA list of strings.", "source": "juraj-google-style"}
{"code": "def oem(self):\n    buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n    res = self._dll.JLINKARM_GetOEMString(ctypes.byref(buf))\n    if (res != 0):\n        raise errors.JLinkException('Failed to grab OEM string.')\n    oem = ctypes.string_at(buf).decode()\n    if (len(oem) == 0):\n        return None\n    return oem", "docstring": "Retrieves and returns the OEM string of the connected J-Link.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nThe string of the OEM.  If this is an original SEGGER product, then\n``None`` is returned instead.\n\nRaises:\nJLinkException: on hardware error.", "source": "codesearchnet"}
{"code": "def _kl_bernoulli_bernoulli(a, b, name=None):\n  \n  with tf.name_scope(name or \"kl_bernoulli_bernoulli\"):\n    delta_probs0 = tf.nn.softplus(-b.logits) - tf.nn.softplus(-a.logits)\n    delta_probs1 = tf.nn.softplus(b.logits) - tf.nn.softplus(a.logits)\n    return (tf.sigmoid(a.logits) * delta_probs0\n            + tf.sigmoid(-a.logits) * delta_probs1)", "docstring": "Calculate the batched KL divergence KL(a || b) with a and b Bernoulli.\n\nArgs:\na: instance of a Bernoulli distribution object.\nb: instance of a Bernoulli distribution object.\nname: (optional) Name to use for created operations.\ndefault is \"kl_bernoulli_bernoulli\".\n\nReturns:\nBatchwise KL(a || b)", "source": "juraj-google-style"}
{"code": "async def remember_ticket(self, request, ticket):\n    session = (await get_session(request))\n    session[self.cookie_name] = ticket", "docstring": "Called to store the ticket data for a request.\n\nTicket data is stored in the aiohttp_session object\n\nArgs:\nrequest: aiohttp Request object.\nticket: String like object representing the ticket to be stored.", "source": "codesearchnet"}
{"code": "def get_contacts(self, issue):\n    if (not issue.resource):\n        return []\n    account_contacts = issue.resource.account.contacts\n    try:\n        resource_owners = issue.resource.get_owner_emails()\n        if (type(resource_owners) is list):\n            for resource_owner in resource_owners:\n                account_contacts.append({'type': 'email', 'value': resource_owner})\n    except AttributeError:\n        pass\n    return account_contacts", "docstring": "Returns a list of contacts for an issue\n\nArgs:\nissue (:obj:`RequiredTagsIssue`): Issue record\n\nReturns:\n`list` of `dict`", "source": "codesearchnet"}
{"code": "def get_files_re(self, file_re, full_path=False, ignorecase=False):\n    try:\n        if ignorecase:\n            compiled_re = re.compile(file_re, re.I)\n        else:\n            compiled_re = re.compile(file_re)\n    except sre_constants.error:\n        logger.error('Failed to compile regex: {}.'.format(file_re))\n        return []\n    found = []\n    if self.handle:\n        for member in self.handle.getmembers():\n            if (isinstance(member, TarInfo) and member.isdir()):\n                pass\n            elif ((full_path and compiled_re.search(member.name)) or ((not full_path) and compiled_re.search(os.path.basename(member.name)))):\n                found.append(member.name)\n    return found", "docstring": "Finds all files that match file_re and returns their list.\nDoesn't return directories, only files.\n\nArgs:\nfile_re: raw string to match files against (gets compiled into re)\nfull_path: whether to match against full path inside the archive\nor just the filenames\nignorecase: whether to ignore case when using the given re\nReturns:\nList of full paths of files inside the archive that match the given\nfile_re.", "source": "codesearchnet"}
{"code": "def GetScriptHashesForVerifying(self):\n    if (self.PrevHash.Data == bytearray(32)):\n        if (type(self.Script.VerificationScript) is bytes):\n            return [bytearray(self.Script.VerificationScript)]\n        elif (type(self.Script.VerificationScript) is bytearray):\n            return [self.Script.VerificationScript]\n        else:\n            raise Exception('Invalid Verification script')\n    prev_header = GetBlockchain().GetHeader(self.PrevHash.ToBytes())\n    if (prev_header is None):\n        raise Exception('Invalid operation')\n    return [prev_header.NextConsensus]", "docstring": "Get the script hash used for verification.\n\nRaises:\nException: if the verification script is invalid, or no header could be retrieved from the Blockchain.\n\nReturns:\nlist: with a single UInt160 representing the next consensus node.", "source": "codesearchnet"}
{"code": "def list_from_file(filename, prefix='', offset=0, max_num=0):\n    \n    cnt = 0\n    item_list = []\n    with open(filename, 'r') as f:\n        for _ in range(offset):\n            f.readline()\n        for line in f:\n            if max_num > 0 and cnt >= max_num:\n                break\n            item_list.append(prefix + line.rstrip('\\n'))\n            cnt += 1\n    return item_list", "docstring": "Load a text file and parse the content as a list of strings.\n\nArgs:\nfilename (str): Filename.\nprefix (str): The prefix to be inserted to the begining of each item.\noffset (int): The offset of lines.\nmax_num (int): The maximum number of lines to be read,\nzeros and negatives mean no limitation.\n\nReturns:\nlist[str]: A list of strings.", "source": "juraj-google-style"}
{"code": "def tf_next_step(self, x, iteration, conjugate, residual, squared_residual):\n        \n        next_step = super(ConjugateGradient, self).tf_next_step(x, iteration, conjugate, residual, squared_residual)\n        return tf.logical_and(x=next_step, y=(squared_residual >= util.epsilon))", "docstring": "Termination condition: max number of iterations, or residual sufficiently small.\n\nArgs:\nx: Current solution estimate $x_t$.\niteration: Current iteration counter $t$.\nconjugate: Current conjugate $c_t$.\nresidual: Current residual $r_t$.\nsquared_residual: Current squared residual $r_t^2$.\n\nReturns:\nTrue if another iteration should be performed.", "source": "juraj-google-style"}
{"code": "def partial_derivative_sigma(mu, sigma, low, high, data):\n    pd_sigma = np.sum(((- (1 / sigma)) + (((data - mu) ** 2) / (sigma ** 3))))\n    pd_sigma -= (len(data) * ((((low - mu) * norm.pdf(low, mu, sigma)) - ((high - mu) * norm.pdf(high, mu, sigma))) / (sigma * (norm.cdf(high, mu, sigma) - norm.cdf(low, mu, sigma)))))\n    return (- pd_sigma)", "docstring": "The partial derivative with respect to the standard deviation.\n\nArgs:\nmu (float): the mean of the truncated normal\nsigma (float): the std of the truncated normal\nlow (float): the lower truncation bound\nhigh (float): the upper truncation bound\ndata (ndarray): the one dimension list of data points for which we want to calculate the likelihood\n\nReturns:\nfloat: the partial derivative evaluated at the given point", "source": "codesearchnet"}
{"code": "def assemble(self, ops):\n        \n\n        return pwnypack.asm.asm(self.compile(ops), target=self.target)", "docstring": "Assemble a list of operations into executable code.\n\nArguments:\nops(list): A list of shellcode operations.\n\nReturns:\nbytes: The executable code that implements the shellcode.", "source": "juraj-google-style"}
{"code": "def make_vcs_requirement_url(repo_url, rev, project_name, subdir=None):\n    \n    egg_project_name = pkg_resources.to_filename(project_name)\n    req = '{}@{}\n    if subdir:\n        req += '&subdirectory={}'.format(subdir)\n\n    return req", "docstring": "Return the URL for a VCS requirement.\n\nArgs:\nrepo_url: the remote VCS url, with any needed VCS prefix (e.g. \"git+\").\nproject_name: the (unescaped) project name.", "source": "juraj-google-style"}
{"code": "def get_max_atten(self):\n    return self.attenuation_device.max_atten", "docstring": "Gets the max attenuation supported by the Attenuator.\n\nReturns:\nA float that is the max attenuation value.", "source": "github-repos"}
{"code": "def UpdateClass(self, class_name, gtfs_class):\n    \n    if class_name not in self._class_mapping:\n      raise problems.NonexistentMapping(class_name)\n    self._class_mapping[class_name] = gtfs_class", "docstring": "Updates an entry in the list of known classes.\n\nArgs:\nclass_name: A string with the class name that is to be updated.\ngtfs_class: The new class\nRaises:\nNonexistentMapping if there is no class with the specified class_name.", "source": "juraj-google-style"}
{"code": "def feedforward(inputs,\n                num_units,\n                scope=\"multihead_attention\"):\n    \n    with tf.variable_scope(scope):\n        \n        params = {\"inputs\": inputs, \"filters\": num_units[0], \"kernel_size\": 1,\n                  \"activation\": tf.nn.relu, \"use_bias\": True}\n        outputs = tf.layers.conv1d(**params)\n\n        \n        params = {\"inputs\": outputs, \"filters\": num_units[1], \"kernel_size\": 1,\n                  \"activation\": None, \"use_bias\": True}\n        outputs = tf.layers.conv1d(**params)\n\n        \n        outputs += inputs\n\n        \n        outputs = normalize(outputs)\n\n    return outputs", "docstring": "Point-wise feed forward net.\n\nArgs:\ninputs: A 3d tensor with shape of [N, T, C].\nnum_units: A list of two integers.\nscope: Optional scope for `variable_scope`.\nreuse: Boolean, whether to reuse the weights of a previous layer\nby the same name.\n\nReturns:\nA 3d tensor with the same shape and dtype as inputs", "source": "juraj-google-style"}
{"code": "def _scale_boxes(boxes, target_sizes):\n    if isinstance(target_sizes, (list, tuple)):\n        image_height = torch.tensor([i[0] for i in target_sizes])\n        image_width = torch.tensor([i[1] for i in target_sizes])\n    elif isinstance(target_sizes, torch.Tensor):\n        image_height, image_width = target_sizes.unbind(1)\n    else:\n        raise ValueError('`target_sizes` must be a list, tuple or torch.Tensor')\n    scale_factor = torch.stack([image_width, image_height, image_width, image_height], dim=1)\n    scale_factor = scale_factor.unsqueeze(1).to(boxes.device)\n    boxes = boxes * scale_factor\n    return boxes", "docstring": "Scale batch of bounding boxes to the target sizes.\n\nArgs:\nboxes (`torch.Tensor` of shape `(batch_size, num_boxes, 4)`):\nBounding boxes to scale. Each box is expected to be in (x1, y1, x2, y2) format.\ntarget_sizes (`List[Tuple[int, int]]` or `torch.Tensor` of shape `(batch_size, 2)`):\nTarget sizes to scale the boxes to. Each target size is expected to be in (height, width) format.\n\nReturns:\n`torch.Tensor` of shape `(batch_size, num_boxes, 4)`: Scaled bounding boxes.", "source": "github-repos"}
{"code": "def Lookup(self, keywords, start_time=FIRST_TIMESTAMP, end_time=LAST_TIMESTAMP, last_seen_map=None):\n    posting_lists = self.ReadPostingLists(keywords, start_time=start_time, end_time=end_time, last_seen_map=last_seen_map)\n    results = list(itervalues(posting_lists))\n    relevant_set = results[0]\n    for hits in results:\n        relevant_set &= hits\n        if (not relevant_set):\n            return relevant_set\n    return relevant_set", "docstring": "Finds objects associated with keywords.\n\nFind the names related to all keywords.\n\nArgs:\nkeywords: A collection of keywords that we are interested in.\nstart_time: Only considers keywords added at or after this point in time.\nend_time: Only considers keywords at or before this point in time.\nlast_seen_map: If present, is treated as a dict and populated to map pairs\n(keyword, name) to the timestamp of the latest connection found.\nReturns:\nA set of potentially relevant names.", "source": "codesearchnet"}
{"code": "def get_attribute(self, node, obj, name, valself=None):\n    obj = abstract_utils.unwrap_final(obj)\n    special_attribute = obj.get_special_attribute(node, name, valself)\n    if special_attribute is not None:\n        return (node, special_attribute)\n    if isinstance(obj, abstract.Function):\n        if name == '__get__':\n            return (node, None)\n        else:\n            return self._get_instance_attribute(node, obj, name, valself)\n    elif isinstance(obj, abstract.ParameterizedClass):\n        return self.get_attribute(node, obj.base_cls, name, valself)\n    elif isinstance(obj, abstract.Class):\n        return self._get_class_attribute(node, obj, name, valself)\n    elif isinstance(obj, overlay.Overlay):\n        return self._get_module_attribute(node, obj.get_module(name), name, valself)\n    elif isinstance(obj, abstract.Module):\n        return self._get_module_attribute(node, obj, name, valself)\n    elif isinstance(obj, abstract.SimpleValue):\n        return self._get_instance_attribute(node, obj, name, valself)\n    elif isinstance(obj, abstract.Union):\n        if name == '__getitem__':\n            return (node, self.ctx.new_unsolvable(node))\n        nodes = []\n        ret = self.ctx.program.NewVariable()\n        for o in obj.options:\n            node2, attr = self.get_attribute(node, o, name, valself)\n            if attr is not None:\n                ret.PasteVariable(attr, node2)\n                nodes.append(node2)\n        if ret.bindings:\n            return (self.ctx.join_cfg_nodes(nodes), ret)\n        else:\n            return (node, None)\n    elif isinstance(obj, special_builtins.SuperInstance):\n        return self._get_attribute_from_super_instance(node, obj, name, valself)\n    elif isinstance(obj, special_builtins.Super):\n        return self.get_attribute(node, self.ctx.convert.super_type, name, valself)\n    elif isinstance(obj, (abstract.StaticMethod, abstract.ClassMethod)):\n        return self.get_attribute(node, obj.method, name, valself)\n    elif isinstance(obj, abstract.BoundFunction):\n        return self.get_attribute(node, obj.underlying, name, valself)\n    elif isinstance(obj, abstract.TypeParameterInstance):\n        param_var = obj.instance.get_instance_type_parameter(obj.name)\n        if not param_var.bindings:\n            param_var = obj.param.instantiate(self.ctx.root_node)\n        results = []\n        nodes = []\n        for b in param_var.bindings:\n            if b.data == obj:\n                continue\n            node2, ret = self.get_attribute(node, b.data, name, valself)\n            if ret is None:\n                if b.IsVisible(node):\n                    return (node, None)\n            else:\n                results.append(ret)\n                nodes.append(node2)\n        if nodes:\n            node = self.ctx.join_cfg_nodes(nodes)\n            return (node, self.ctx.join_variables(node, results))\n        else:\n            return (node, self.ctx.new_unsolvable(node))\n    elif isinstance(obj, abstract.Empty):\n        return (node, None)\n    elif isinstance(obj, abstract.ParamSpec):\n        if name == 'args':\n            return (node, abstract.ParamSpecArgs(obj, self.ctx).to_variable(node))\n        elif name == 'kwargs':\n            return (node, abstract.ParamSpecKwargs(obj, self.ctx).to_variable(node))\n        else:\n            return (node, None)\n    else:\n        return (node, None)", "docstring": "Get the named attribute from the given object.\n\nArgs:\nnode: The current CFG node.\nobj: The object.\nname: The name of the attribute to retrieve.\nvalself: A cfg.Binding to a self reference to include in the attribute's\norigins. If obj is an abstract.Class, valself can be a binding to:\n* an instance of obj - obj will be treated strictly as a class.\n* obj itself - obj will be treated as an instance of its metaclass.\n* None - if name == \"__getitem__\", obj is a type annotation; else, obj\nis strictly a class, but the attribute is left unbound.\nElse, valself is optional and should be a binding to obj when given.\n\nReturns:\nA tuple (CFGNode, cfg.Variable). If this attribute doesn't exist,\nthe Variable will be None.", "source": "github-repos"}
{"code": "def construct_gene_object(ensembl, transcript_id):\n    \n    \n    \n    (chrom, start, end, strand, genomic_sequence) = ensembl.get_genomic_seq_for_transcript(transcript_id, expand=10)\n    cds_sequence = ensembl.get_cds_seq_for_transcript(transcript_id)\n    \n    \n    cds_ranges = ensembl.get_cds_ranges_for_transcript(transcript_id)\n    exon_ranges = ensembl.get_exon_ranges_for_transcript(transcript_id)\n    \n    \n    transcript = Transcript(transcript_id, chrom, start, end, strand)\n    transcript.set_exons(exon_ranges, cds_ranges)\n    transcript.set_cds(cds_ranges)\n    \n    transcript.add_cds_sequence(cds_sequence)\n    transcript.add_genomic_sequence(genomic_sequence, offset=10)\n    \n    return transcript", "docstring": "creates an Transcript object for a gene from ensembl databases\n\nArgs:\nensembl: EnsemblRequest object to request data from ensembl\ntranscript_id: string for an Ensembl transcript ID\n\nReturns:\na Transcript object, containing transcript coordinates and gene and\ntranscript sequence.\n\nRaises:\nValueError if CDS from genomic sequence given gene coordinates and CDS\nretrieved from Ensembl do not match.", "source": "juraj-google-style"}
{"code": "def __init__(self, file_system, tsk_attribute):\n    \n    super(TSKDataStream, self).__init__()\n    self._file_system = file_system\n    self._tsk_attribute = tsk_attribute", "docstring": "Initializes a data stream.\n\nArgs:\nfile_system (TSKFileSystem): file system.\ntsk_attribute (pytsk3.Attribute): TSK attribute.", "source": "juraj-google-style"}
{"code": "def _call_method_from_namespace(obj, method_name, namespace):\n    method = getattr(obj, method_name)\n    method_parser = method.parser\n    arg_names = _get_args_name_from_parser(method_parser)\n    if (method_name == '__init__'):\n        return _call(obj, arg_names, namespace)\n    return _call(method, arg_names, namespace)", "docstring": "Call the method, retrieved from obj, with the correct arguments via\nthe namespace\n\nArgs:\nobj: any kind of object\nmethod_name: method to be called\nnamespace: an argparse.Namespace object containing parsed command\nline arguments", "source": "codesearchnet"}
{"code": "def append_to_history(self, filename, command, go_to_eof):\n        \n        if not is_text_string(filename): \n            filename = to_text_string(filename.toUtf8(), 'utf-8')\n        command = to_text_string(command)\n        index = self.filenames.index(filename)\n        self.editors[index].append(command)\n        if go_to_eof:\n            self.editors[index].set_cursor_position('eof')\n        self.tabwidget.setCurrentIndex(index)", "docstring": "Append an entry to history filename.\n\nArgs:\nfilename (str): file to be updated in a new tab.\ncommand (str): line to be added.\ngo_to_eof (bool): scroll to the end of file.", "source": "juraj-google-style"}
{"code": "def _CompressionSizeDelta(self, records, options_a, options_b):\n    fn_a = self._WriteRecordsToFile(records, 'tfrecord_a', options=options_a)\n    test_a = list(tf_record.tf_record_iterator(fn_a, options=options_a))\n    self.assertEqual(records, test_a, options_a)\n    fn_b = self._WriteRecordsToFile(records, 'tfrecord_b', options=options_b)\n    test_b = list(tf_record.tf_record_iterator(fn_b, options=options_b))\n    self.assertEqual(records, test_b, options_b)\n    return os.path.getsize(fn_a) - os.path.getsize(fn_b)", "docstring": "Validate compression with options_a and options_b and return size delta.\n\nCompress records with options_a and options_b. Uncompress both compressed\nfiles and assert that the contents match the original records. Finally\ncalculate how much smaller the file compressed with options_a was than the\nfile compressed with options_b.\n\nArgs:\nrecords: The records to compress\noptions_a: First set of options to compress with, the baseline for size.\noptions_b: Second set of options to compress with.\n\nReturns:\nThe difference in file size when using options_a vs options_b. A positive\nvalue means options_a was a better compression than options_b. A negative\nvalue means options_b had better compression than options_a.", "source": "github-repos"}
{"code": "def init_datapackage(resource_paths):\n    dp = datapackage.Package({'name': 'change-me', 'schema': 'tabular-data-package'})\n    for path in resource_paths:\n        dp.infer(path)\n    return dp", "docstring": "Create tabular data package with resources.\n\nIt will also infer the tabular resources' schemas.\n\nArgs:\nresource_paths (List[str]): Paths to the data package resources.\n\nReturns:\ndatapackage.Package: The data package.", "source": "codesearchnet"}
{"code": "def process_test_logs(name, test_name, test_args, benchmark_type, start_time, run_time, log_files):\n    results = test_log_pb2.TestResults()\n    results.name = name\n    results.target = test_name\n    results.start_time = start_time\n    results.run_time = run_time\n    results.benchmark_type = test_log_pb2.TestResults.BenchmarkType.Value(benchmark_type.upper())\n    git_sha = get_git_commit_sha()\n    if git_sha:\n        results.commit_id.hash = git_sha\n    results.entries.CopyFrom(process_benchmarks(log_files))\n    results.run_configuration.argument.extend(test_args)\n    results.machine_configuration.CopyFrom(system_info_lib.gather_machine_configuration())\n    return results", "docstring": "Gather test information and put it in a TestResults proto.\n\nArgs:\nname: Benchmark target identifier.\ntest_name: A unique bazel target, e.g. \"//path/to:test\"\ntest_args: A string containing all arguments to run the target with.\nbenchmark_type: A string representing the BenchmarkType enum; the\nbenchmark type for this target.\nstart_time: Test starting time (epoch)\nrun_time:   Wall time that the test ran for\nlog_files:  Paths to the log files\n\nReturns:\nA TestResults proto", "source": "github-repos"}
{"code": "def serialize_data(data, compression=False, encryption=False, public_key=None):\n    message = json.dumps(data)\n    if compression:\n        message = zlib.compress(message)\n        message = binascii.b2a_base64(message)\n    if (encryption and public_key):\n        message = encryption.encrypt(message, public_key)\n    encoded_message = str.encode(message)\n    return encoded_message", "docstring": "Serializes normal Python datatypes into plaintext using json.\n\nYou may also choose to enable compression and encryption when serializing\ndata to send over the network. Enabling one or both of these options will\nincur additional overhead.\n\nArgs:\ndata (dict): The data to convert into plain text using json.\ncompression (boolean): True or False value on whether or not to compress\nthe serialized data.\nencryption (rsa.encryption): An encryption instance used to encrypt the\nmessage if encryption is desired.\npublic_key (str): The public key to use to encrypt if encryption is\nenabled.\n\nReturns:\nThe string message serialized using json.", "source": "codesearchnet"}
{"code": "def __init__(self, raw_string, bow=True):\n        \n        self.raw = raw_string\n        self.as_list = list(self.raw)\n        self.as_np = np.array(self.as_list)\n        self.string_start = np.arange(len(self.raw))\n        vocab = {}\n        self.inverse_vocab = []\n        self.positions = []\n        self.bow = bow\n        non_vocab = set()\n        for i, char in enumerate(self.as_np):\n            if char in non_vocab:\n                continue\n            if bow:\n                if char not in vocab:\n                    vocab[char] = len(vocab)\n                    self.inverse_vocab.append(char)\n                    self.positions.append([])\n                idx_char = vocab[char]\n                self.positions[idx_char].append(i)\n            else:\n                self.inverse_vocab.append(char)\n                self.positions.append(i)\n        if not bow:\n            self.positions = np.array(self.positions)", "docstring": "Initializer.\n\nArgs:\nraw_string: string with raw text in it\nbow: if True, a char is the same everywhere in the text - i.e. we\nwill index multiple occurrences of the same character. If False,\norder matters, so that the same word will have different ids\naccording to position.", "source": "juraj-google-style"}
{"code": "def compile_intermediate_cpfs(self, scope: Dict[(str, TensorFluent)], batch_size: Optional[int]=None, noise: Optional[Noise]=None) -> List[CPFPair]:\n    interm_fluents = []\n    with self.graph.as_default():\n        with tf.name_scope('intermediate_cpfs'):\n            for cpf in self.rddl.domain.intermediate_cpfs:\n                cpf_noise = (noise.get(cpf.name, None) if (noise is not None) else None)\n                name_scope = utils.identifier(cpf.name)\n                with tf.name_scope(name_scope):\n                    t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise)\n                interm_fluents.append((cpf.name, t))\n                scope[cpf.name] = t\n    return interm_fluents", "docstring": "Compiles the intermediate fluent CPFs given the current `state` and `action` scope.\n\nArgs:\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.\nbatch_size (Optional[int]): The batch size.\n\nReturns:\nA list of intermediate fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`.", "source": "codesearchnet"}
{"code": "def rename_document(self, did, name):\n        \n\n        payload = {\n            'name': name\n        }\n\n        return self._api.request('post', '/api/documents/' + did, body=payload)", "docstring": "Renames the specified document.\n\nArgs:\n- did (str): Document ID\n- name (str): New document name\n\nReturns:\n- requests.Response: Onshape response data", "source": "juraj-google-style"}
{"code": "def get_page_artid_for_publication_info(publication_info, separator):\n        \n        if 'artid' in publication_info:\n            return publication_info['artid']\n\n        elif 'page_start' in publication_info and 'page_end' in publication_info:\n            page_start = publication_info['page_start']\n            page_end = publication_info['page_end']\n            return text_type('{}{}{}').format(\n                page_start, text_type(separator), page_end\n            )\n\n        return ''", "docstring": "Return the page range or the article id of a publication_info entry.\n\nArgs:\npublication_info(dict): a publication_info field entry of a record\nseparator(basestring): optional page range symbol, defaults to a single dash\n\nReturns:\nstring: the page range or the article id of the record.\n\nExamples:\n>>> publication_info = {'artid': '054021'}\n>>> get_page_artid(publication_info)\n'054021'", "source": "juraj-google-style"}
{"code": "def generate_examples(options):\n    _prepare_dir(options)\n    out = options.zip_to_output\n    if options.multi_gen_state:\n        test_name = options.multi_gen_state.test_name\n    else:\n        test_name = re.sub('(_(|with-flex|forward-compat|edgetpu|mlir-quant))?(_xnnpack)?\\\\.zip$', '', out, count=1)\n    test_function_name = 'make_%s_tests' % test_name\n    test_function = get_test_function(test_function_name)\n    if test_function is None:\n        raise RuntimeError(\"Can't find a test function to create %r. Tried %r\" % (out, test_function_name))\n    if options.make_forward_compat_test:\n        future_date = datetime.date.today() + datetime.timedelta(days=30)\n        with tf.compat.forward_compatibility_horizon(future_date.year, future_date.month, future_date.day):\n            test_function(options)\n    else:\n        test_function(options)", "docstring": "Generate examples for a test set.\n\nArgs:\noptions: Options containing information to generate examples.\n\nRaises:\nRuntimeError: if the test function cannot be found.", "source": "github-repos"}
{"code": "def __contains__(self, item):\n        \n        try:\n            _libexec('merkle_db_contains', self.pointer,\n                     item.encode())\n            \n            return True\n        except KeyError:\n            return False", "docstring": "Does the tree contain an address.\n\nArgs:\nitem (str): An address.\n\nReturns:\n(bool): True if it does contain, False otherwise.", "source": "juraj-google-style"}
{"code": "def from_row_starts(cls, row_starts, nvals, validate=True, dtype=None, dtype_hint=None):\n    if not isinstance(validate, bool):\n        raise TypeError('validate must have type bool')\n    with ops.name_scope(None, 'RowPartitionFromRowStarts', [row_starts]):\n        row_starts = cls._convert_row_partition(row_starts, 'row_starts', dtype_hint=dtype_hint, dtype=dtype)\n        row_starts.shape.assert_has_rank(1)\n        nvals = math_ops.cast(nvals, row_starts.dtype)\n        if validate:\n            msg = 'Arguments to from_row_starts do not form a valid RaggedTensor'\n            checks = [check_ops.assert_rank(row_starts, 1, message=msg), _assert_zero(row_starts[:1], message=msg), _assert_monotonic_increasing(row_starts, message=msg), check_ops.assert_less_equal(row_starts[-1:], nvals, message=msg)]\n            row_starts = control_flow_ops.with_dependencies(checks, row_starts)\n        row_splits = array_ops.concat([row_starts, [nvals]], axis=0)\n        return cls(row_splits=row_splits, nvals=nvals, internal=_row_partition_factory_key)", "docstring": "Creates a `RowPartition` with rows partitioned by `row_starts`.\n\nEquivalent to: `from_row_splits(concat([row_starts, nvals], axis=0))`.\n\nArgs:\nrow_starts: A 1-D integer tensor with shape `[nrows]`.  Must be\nnonnegative and sorted in ascending order.  If `nrows>0`, then\n`row_starts[0]` must be zero.\nnvals: A scalar tensor indicating the number of values.\nvalidate: If true, then use assertions to check that the arguments form a\nvalid `RowPartition`.\ndtype: Optional dtype for the RowPartition. If missing, the type\nis inferred from the type of `row_starts`, dtype_hint, or tf.int64.\ndtype_hint: Optional dtype for the RowPartition, used when dtype\nis None. In some cases, a caller may not have a dtype in mind when\nconverting to a tensor, so dtype_hint can be used as a soft preference.\nIf the conversion to `dtype_hint` is not possible, this argument has no\neffect.\n\nReturns:\nA `RowPartition`.", "source": "github-repos"}
{"code": "def impersonate(self, name=None, lifetime=None, mechs=None, usage='initiate'):\n    if (rcred_s4u is None):\n        raise NotImplementedError('Your GSSAPI implementation does not have support for S4U')\n    res = rcred_s4u.acquire_cred_impersonate_name(self, name, lifetime, mechs, usage)\n    return type(self)(base=res.creds)", "docstring": "Impersonate a name using the current credentials\n\nThis method acquires credentials by impersonating another\nname using the current credentials.\n\n:requires-ext:`s4u`\n\nArgs:\nname (Name): the name to impersonate\nlifetime (int): the desired lifetime of the new credentials,\nor None for indefinite\nmechs (list): the desired :class:`MechType` OIDs for the new\ncredentials\nusage (str): the desired usage for the new credentials -- either\n'both', 'initiate', or 'accept'.  Note that some mechanisms\nmay only support 'initiate'.\n\nReturns:\nCredentials: the new credentials impersonating the given name", "source": "codesearchnet"}
{"code": "def update_state(world):\n    \n\n    world_size = len(world)\n\n    def wrap(index):\n        \n        return index % world_size\n\n    for x in range(world_size):\n        for y in range(world_size):\n            \n            if not world[x][y].allow_change.get():\n                continue\n            live_neighbor_count = sum([\n                world[wrap(x)][wrap(y + 1)].value,\n                world[wrap(x + 1)][wrap(y + 1)].value,\n                world[wrap(x + 1)][wrap(y)].value,\n                world[wrap(x + 1)][wrap(y - 1)].value,\n                world[wrap(x)][wrap(y-1)].value,\n                world[wrap(x - 1)][wrap(y - 1)].value,\n                world[wrap(x - 1)][wrap(y)].value,\n                world[wrap(x - 1)][wrap(y + 1)].value\n            ])\n            if world[x][y].value:\n                \n                \n                \n                if not (live_neighbor_count == 2 or live_neighbor_count == 3):\n                    world[x][y].value = False\n            else:\n                \n                if live_neighbor_count == 3:\n                    world[x][y].value = True", "docstring": "Increment the world state, determining which cells live, die, or appear.\n\nArgs:\nworld (list[list]): A square matrix of cells\n\nReturns: None", "source": "juraj-google-style"}
{"code": "def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'):\n    if block_type == 'block35':\n        branch_0 = conv2d_bn(x, 32, 1)\n        branch_1 = conv2d_bn(x, 32, 1)\n        branch_1 = conv2d_bn(branch_1, 32, 3)\n        branch_2 = conv2d_bn(x, 32, 1)\n        branch_2 = conv2d_bn(branch_2, 48, 3)\n        branch_2 = conv2d_bn(branch_2, 64, 3)\n        branches = [branch_0, branch_1, branch_2]\n    elif block_type == 'block17':\n        branch_0 = conv2d_bn(x, 192, 1)\n        branch_1 = conv2d_bn(x, 128, 1)\n        branch_1 = conv2d_bn(branch_1, 160, [1, 7])\n        branch_1 = conv2d_bn(branch_1, 192, [7, 1])\n        branches = [branch_0, branch_1]\n    elif block_type == 'block8':\n        branch_0 = conv2d_bn(x, 192, 1)\n        branch_1 = conv2d_bn(x, 192, 1)\n        branch_1 = conv2d_bn(branch_1, 224, [1, 3])\n        branch_1 = conv2d_bn(branch_1, 256, [3, 1])\n        branches = [branch_0, branch_1]\n    else:\n        raise ValueError('Unknown Inception-ResNet block type. Expects \"block35\", \"block17\" or \"block8\", but got: ' + str(block_type))\n    block_name = block_type + '_' + str(block_idx)\n    channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3\n    mixed = layers.Concatenate(axis=channel_axis, name=block_name + '_mixed')(branches)\n    up = conv2d_bn(mixed, x.shape[channel_axis], 1, activation=None, use_bias=True, name=block_name + '_conv')\n    x = CustomScaleLayer(scale)([x, up])\n    if activation is not None:\n        x = layers.Activation(activation, name=block_name + '_ac')(x)\n    return x", "docstring": "Adds an Inception-ResNet block.\n\nArgs:\nx: input tensor.\nscale: scaling factor to scale the residuals\n(i.e., the output of passing `x` through an inception module)\nbefore adding them to the shortcut\nbranch. Let `r` be the output from the residual branch,\nthe output of this block will be `x + scale * r`.\nblock_type: `'block35'`, `'block17'` or `'block8'`,\ndetermines the network structure in the residual branch.\nblock_idx: an `int` used for generating layer names.\nThe Inception-ResNet blocks are repeated many times\nin this network. We use `block_idx` to identify each\nof the repetitions. For example, the first\nInception-ResNet-A block will have\n`block_type='block35', block_idx=0`, and the layer names\nwill have a common prefix `'block35_0'`.\nactivation: activation function to use at the end of the block.\n\nReturns:\nOutput tensor for the block.", "source": "github-repos"}
{"code": "def diffusion_mds(means, weights, d, diffusion_rounds=10):\n    \n    for i in range(diffusion_rounds):\n        weights = weights*weights\n        weights = weights/weights.sum(0)\n    X = dim_reduce(means, weights, d)\n    if X.shape[0]==2:\n        return X.dot(weights)\n    else:\n        return X.T.dot(weights)", "docstring": "Dimensionality reduction using MDS, while running diffusion on W.\n\nArgs:\nmeans (array): genes x clusters\nweights (array): clusters x cells\nd (int): desired dimensionality\n\nReturns:\nW_reduced (array): array of shape (d, cells)", "source": "juraj-google-style"}
{"code": "def ParseInteger(text, is_signed=False, is_long=False):\n    try:\n        if is_long:\n            result = long(text, 0)\n        else:\n            result = int(text, 0)\n    except ValueError:\n        raise ValueError((\"Couldn't parse integer: %s\" % text))\n    checker = _INTEGER_CHECKERS[((2 * int(is_long)) + int(is_signed))]\n    checker.CheckValue(result)\n    return result", "docstring": "Parses an integer.\n\nArgs:\ntext: The text to parse.\nis_signed: True if a signed integer must be parsed.\nis_long: True if a long integer must be parsed.\n\nReturns:\nThe integer value.\n\nRaises:\nValueError: Thrown Iff the text is not a valid integer.", "source": "codesearchnet"}
{"code": "def get_dict(self, name, default=None):\n    if (name not in self):\n        if (default is not None):\n            return default\n        raise EnvironmentError.not_found(self._prefix, name)\n    return dict(**self.get(name))", "docstring": "Retrieves an environment variable value as a dictionary.\n\nArgs:\nname (str): The case-insensitive, unprefixed variable name.\ndefault: If provided, a default value will be returned\ninstead of throwing ``EnvironmentError``.\n\nReturns:\ndict: The environment variable's value as a ``dict``.\n\nRaises:\nEnvironmentError: If the environment variable does not\nexist, and ``default`` was not provided.", "source": "codesearchnet"}
{"code": "def __init__(self, flow, **kwargs):\n        \n        self.flow = flow\n        self.max_njobs_inqueue = kwargs.get(\"max_njobs_inqueue\", 200)", "docstring": "Initialize the object\n\nArgs:\nflow: :class:`Flow` object\nmax_njobs_inqueue: The launcher will stop submitting jobs when the\nnumber of jobs in the queue is >= Max number of jobs", "source": "juraj-google-style"}
{"code": "def translate_array(self, string, language, level=3, retdata=False):\n    language = language.lower()\n    assert (self.is_built_in(language) or (language in self.outer_templates)), (('Sorry, ' + language) + ' is not a supported language.')\n    data = phpserialize.loads(bytes(string, 'utf-8'), array_hook=list, decode_strings=True)\n    if self.is_built_in(language):\n        self.get_built_in(language, level, data)\n        print(self)\n        return (self.data_structure if retdata else None)\n\n    def loop_print(iterable, level=3):\n        '\\n            Loops over a python representation of a php array \\n            (list of tuples) and constructs a representation in another language.\\n            Translates a php array into another structure.\\n\\n            Args:\\n                iterable: list or tuple to unpack.\\n\\n                level: integer, number of spaces to use for indentation\\n            '\n        retval = ''\n        indentation = (' ' * level)\n        if ((not self.is_iterable(iterable)) or isinstance(iterable, str)):\n            non_iterable = str(iterable)\n            return str(non_iterable)\n        for item in iterable:\n            if (isinstance(item, tuple) and (len(item) == 2)):\n                key = item[0]\n                val = loop_print(item[1], level=(level + 3))\n                val = (self.translate_val(language, val) if ((language in self.lang_specific_values) and (val in self.lang_specific_values[language])) else val)\n                key = (str(key) if isinstance(key, int) else ((\"'\" + str(key)) + \"'\"))\n                needs_unpacking = ((hasattr(item[0], '__iter__') == False) and (hasattr(item[1], '__iter__') == True))\n                if needs_unpacking:\n                    retval += self.get_inner_template(language, 'iterable', indentation, key, val)\n                else:\n                    val = (str(val) if (val.isdigit() or (val in self.lang_specific_values[language].values())) else ((\"'\" + str(val)) + \"'\"))\n                    retval += self.get_inner_template(language, 'singular', indentation, key, val)\n        return retval\n    self.data_structure = (self.outer_templates[language] % loop_print(data))\n    print(self)\n    return (self.data_structure if retdata else None)", "docstring": "Unserializes a serialized php array and prints it to\nthe console as a data structure in the specified language.\nUsed to translate or convert a php array into a data structure\nin another language. Currently supports, PHP, Python, Javascript,\nand JSON.\n\nArgs:\nstring: a string of serialized php\n\nlanguage: a string representing the desired output\nformat for the array.\n\nlevel: integer, indentation level in spaces.\nDefaults to 3.\n\nretdata: boolean, the method will return the string\nin addition to printing it if set to True. Defaults\nto false.\n\nReturns:\nNone but prints a string to the console if retdata is\nFalse, otherwise returns a string.", "source": "codesearchnet"}
{"code": "def quad_2d(width, height, xpos=0.0, ypos=0.0) -> VAO:\n    pos = numpy.array([(xpos - (width / 2.0)), (ypos + (height / 2.0)), 0.0, (xpos - (width / 2.0)), (ypos - (height / 2.0)), 0.0, (xpos + (width / 2.0)), (ypos - (height / 2.0)), 0.0, (xpos - (width / 2.0)), (ypos + (height / 2.0)), 0.0, (xpos + (width / 2.0)), (ypos - (height / 2.0)), 0.0, (xpos + (width / 2.0)), (ypos + (height / 2.0)), 0.0], dtype=numpy.float32)\n    normals = numpy.array([0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0], dtype=numpy.float32)\n    uvs = numpy.array([0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0], dtype=numpy.float32)\n    vao = VAO('geometry:quad', mode=moderngl.TRIANGLES)\n    vao.buffer(pos, '3f', ['in_position'])\n    vao.buffer(normals, '3f', ['in_normal'])\n    vao.buffer(uvs, '2f', ['in_uv'])\n    return vao", "docstring": "Creates a 2D quad VAO using 2 triangles with normals and texture coordinates.\n\nArgs:\nwidth (float): Width of the quad\nheight (float): Height of the quad\n\nKeyword Args:\nxpos (float): Center position x\nypos (float): Center position y\n\nReturns:\nA :py:class:`demosys.opengl.vao.VAO` instance.", "source": "codesearchnet"}
{"code": "def _rot90_4D(images, k, name_scope):\n\n    def _rot90():\n        return array_ops.transpose(array_ops.reverse_v2(images, [2]), [0, 2, 1, 3])\n\n    def _rot180():\n        return array_ops.reverse_v2(images, [1, 2])\n\n    def _rot270():\n        return array_ops.reverse_v2(array_ops.transpose(images, [0, 2, 1, 3]), [2])\n    cases = [(math_ops.equal(k, 1), _rot90), (math_ops.equal(k, 2), _rot180), (math_ops.equal(k, 3), _rot270)]\n    result = control_flow_case.case(cases, default=lambda: images, exclusive=True, name=name_scope)\n    shape = result.get_shape()\n    result.set_shape([shape[0], None, None, shape[3]])\n    return result", "docstring": "Rotate batch of images counter-clockwise by 90 degrees `k` times.\n\nArgs:\nimages: 4-D Tensor of shape `[height, width, channels]`.\nk: A scalar integer. The number of times the images are rotated by 90\ndegrees.\nname_scope: A valid TensorFlow name scope.\n\nReturns:\nA 4-D `Tensor` of the same type and shape as `images`.", "source": "github-repos"}
{"code": "def ParseInteger(text, is_signed=False, is_long=False):\n  \n  \n  try:\n    \n    \n    \n    if is_long:\n      result = long(text, 0)\n    else:\n      result = int(text, 0)\n  except ValueError:\n    raise ValueError('Couldn\\'t parse integer: %s' % text)\n\n  \n  checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]\n  checker.CheckValue(result)\n  return result", "docstring": "Parses an integer.\n\nArgs:\ntext: The text to parse.\nis_signed: True if a signed integer must be parsed.\nis_long: True if a long integer must be parsed.\n\nReturns:\nThe integer value.\n\nRaises:\nValueError: Thrown Iff the text is not a valid integer.", "source": "juraj-google-style"}
{"code": "def reset(target, containers=None, config=None):\n    if target is not None:\n        target = compat.as_bytes(target)\n    if containers is not None:\n        containers = [compat.as_bytes(c) for c in containers]\n    else:\n        containers = []\n    tf_session.TF_Reset(target, containers, config)", "docstring": "Resets resource containers on `target`, and close all connected sessions.\n\nA resource container is distributed across all workers in the\nsame cluster as `target`.  When a resource container on `target`\nis reset, resources associated with that container will be cleared.\nIn particular, all Variables in the container will become undefined:\nthey lose their values and shapes.\n\nNOTE:\n(i) reset() is currently only implemented for distributed sessions.\n(ii) Any sessions on the master named by `target` will be closed.\n\nIf no resource containers are provided, all containers are reset.\n\nArgs:\ntarget: The execution engine to connect to.\ncontainers: A list of resource container name strings, or `None` if all of\nall the containers are to be reset.\nconfig: (Optional.) Protocol buffer with configuration options.\n\nRaises:\ntf.errors.OpError: Or one of its subclasses if an error occurs while\nresetting containers.", "source": "github-repos"}
{"code": "def get_contract_data(self, contract_name):\n    contract_data_path = (self.output_dir + '/{0}.json'.format(contract_name))\n    with open(contract_data_path, 'r') as contract_data_file:\n        contract_data = json.load(contract_data_file)\n    abi = contract_data['abi']\n    bytecode = contract_data['evm']['bytecode']['object']\n    return (abi, bytecode)", "docstring": "Returns the contract data for a given contract\n\nArgs:\ncontract_name (str): Name of the contract to return.\n\nReturns:\nstr, str: ABI and bytecode of the contract", "source": "codesearchnet"}
{"code": "def trainable_variables(scope=None):\n    return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope)", "docstring": "Returns all variables created with `trainable=True`.\n\nWhen passed `trainable=True`, the `Variable()` constructor automatically\nadds new variables to the graph collection\n`GraphKeys.TRAINABLE_VARIABLES`. This convenience function returns the\ncontents of that collection.\n\n@compatibility(TF2)\nNot compatible with eager execution and `tf.function`. In particular, Graph\ncollections are deprecated in TF2. Instead please create a `tf.Module`\ncontainer for all your model state, including variables.\nYou can then list all the trainable variables in your `tf.Module` through the\n`trainable_variables` attribute.\n@end_compatibility\n\nArgs:\nscope: (Optional.) A string. If supplied, the resulting list is filtered to\ninclude only items whose `name` attribute matches `scope` using\n`re.match`. Items without a `name` attribute are never returned if a scope\nis supplied. The choice of `re.match` means that a `scope` without special\ntokens filters by prefix.\n\nReturns:\nA list of Variable objects.", "source": "github-repos"}
{"code": "def __init__(self, dependency_name, is_upstream=False, optional=False):\n        \n        self.dependency_name = dependency_name\n        self.is_upstream = is_upstream\n        self.optional = optional", "docstring": "Constructor for `Extension`.\n\nArgs:\ndependency_name: str, see `ExtDependency.dependency_name`\nis_upstream: bool, see `ExtDependency.is_upstream`", "source": "juraj-google-style"}
{"code": "def to_json(self, variables=None):\n    variables_to_resolve = []\n    if variables:\n        for (key, value) in variables.items():\n            variables_to_resolve.append(Variable(key, value))\n    for k in self.get_parameter_definitions():\n        if ((not variables) or (k not in variables)):\n            variables_to_resolve.append(Variable(k, 'unused_value'))\n    self.resolve_variables(variables_to_resolve)\n    return self.render_template()[1]", "docstring": "Render the blueprint and return the template in json form.\n\nArgs:\nvariables (dict):\nOptional dictionary providing/overriding variable values.\n\nReturns:\nstr: the rendered CFN JSON template", "source": "codesearchnet"}
{"code": "def _seconds_have_elapsed(token, num_seconds):\n    now = timeit.default_timer()\n    then = _log_timer_per_token.get(token, None)\n    if ((then is None) or ((now - then) >= num_seconds)):\n        _log_timer_per_token[token] = now\n        return True\n    else:\n        return False", "docstring": "Tests if 'num_seconds' have passed since 'token' was requested.\n\nNot strictly thread-safe - may log with the wrong frequency if called\nconcurrently from multiple threads. Accuracy depends on resolution of\n'timeit.default_timer()'.\n\nAlways returns True on the first call for a given 'token'.\n\nArgs:\ntoken: The token for which to look up the count.\nnum_seconds: The number of seconds to test for.\n\nReturns:\nWhether it has been >= 'num_seconds' since 'token' was last requested.", "source": "codesearchnet"}
{"code": "def log_every_n(level, msg, n, *args):\n  \n  count = _get_next_log_count_per_token(get_absl_logger().findCaller())\n  log_if(level, msg, not (count % n), *args)", "docstring": "Logs 'msg % args' at level 'level' once per 'n' times.\n\nLogs the 1st call, (N+1)st call, (2N+1)st call,  etc.\nNot threadsafe.\n\nArgs:\nlevel: int, the absl logging level at which to log.\nmsg: str, the message to be logged.\nn: int, the number of times this should be called before it is logged.\n*args: The args to be substitued into the msg.", "source": "juraj-google-style"}
{"code": "def parse_outputtrans(path_dir):\n        \n        run_type = None\n        warning = None\n        efermi = None\n        gap = None\n        doping_levels = []\n\n        with open(os.path.join(path_dir, \"boltztrap.outputtrans\"), 'r') \\\n                as f:\n            for line in f:\n                if \"WARNING\" in line:\n                    warning = line\n                elif \"Calc type:\" in line:\n                    run_type = line.split()[-1]\n                elif line.startswith(\"VBM\"):\n                    efermi = Energy(line.split()[1], \"Ry\").to(\"eV\")\n                elif line.startswith(\"Egap:\"):\n                    gap = Energy(float(line.split()[1]), \"Ry\").to(\"eV\")\n                elif line.startswith(\"Doping level number\"):\n                    doping_levels.append(float(line.split()[6]))\n\n        return run_type, warning, efermi, gap, doping_levels", "docstring": "Parses .outputtrans file\n\nArgs:\npath_dir: dir containing boltztrap.outputtrans\n\nReturns:\ntuple - (run_type, warning, efermi, gap, doping_levels)", "source": "juraj-google-style"}
{"code": "def ws010(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `ws010`'.format(value))\n    self._ws010 = value", "docstring": "Corresponds to IDD Field `ws010`\nWind speed corresponding to 1.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `ws010`\nUnit: m/s\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def patch_addContext(self, patch, text):\n    if (len(text) == 0):\n        return\n    pattern = text[patch.start2:(patch.start2 + patch.length1)]\n    padding = 0\n    while ((text.find(pattern) != text.rfind(pattern)) and ((self.Match_MaxBits == 0) or (len(pattern) < ((self.Match_MaxBits - self.Patch_Margin) - self.Patch_Margin)))):\n        padding += self.Patch_Margin\n        pattern = text[max(0, (patch.start2 - padding)):((patch.start2 + patch.length1) + padding)]\n    padding += self.Patch_Margin\n    prefix = text[max(0, (patch.start2 - padding)):patch.start2]\n    if prefix:\n        patch.diffs[:0] = [(self.DIFF_EQUAL, prefix)]\n    suffix = text[(patch.start2 + patch.length1):((patch.start2 + patch.length1) + padding)]\n    if suffix:\n        patch.diffs.append((self.DIFF_EQUAL, suffix))\n    patch.start1 -= len(prefix)\n    patch.start2 -= len(prefix)\n    patch.length1 += (len(prefix) + len(suffix))\n    patch.length2 += (len(prefix) + len(suffix))", "docstring": "Increase the context until it is unique,\nbut don't let the pattern expand beyond Match_MaxBits.\n\nArgs:\npatch: The patch to grow.\ntext: Source text.", "source": "codesearchnet"}
{"code": "def variant(self, case_id, variant_id):\n        \n        \n        variant_id = int(variant_id)\n        gemini_query = \"SELECT * from variants WHERE variant_id = {0}\".format(\n            variant_id\n        )\n\n        individuals = []\n        \n        case_obj = self.case(case_id)\n        for individual in case_obj.individuals:\n            individuals.append(individual)\n\n        self.db = case_obj.variant_source\n        self.variant_type = case_obj.variant_type\n\n        gq = GeminiQuery(self.db)\n        gq.run(gemini_query)\n\n        for gemini_variant in gq:\n            variant = self._format_variant(\n                case_id=case_id,\n                gemini_variant=gemini_variant,\n                individual_objs=individuals,\n                index=gemini_variant['variant_id'],\n                add_all_info = True\n            )\n            return variant\n\n        return None", "docstring": "Return a specific variant.\n\nWe solve this by building a gemini query and send it to _variants\n\nArgs:\ncase_id (str): Path to a gemini database\nvariant_id (int): A gemini variant id\n\nReturns:\nvariant_obj (dict): A puzzle variant", "source": "juraj-google-style"}
{"code": "def get_fixture(self, fixture_id, head2head=None):\n    filters = []\n    if ((head2head is not None) and (int(head2head) > 0)):\n        self.logger.debug(f'Getting fixture {fixture_id}. head2head is {head2head}.')\n        filters.append(self.__createFilter('head2head', head2head))\n    else:\n        self.logger.debug(f'Getting fixture {fixture_id}.')\n    return self._request('fixtures', fixture_id, filters=filters)", "docstring": "Loads a single fixture.\n\nArgs:\n* fixture_id (str): the id of the fixture\n* head2head (int, optional): load the previous n fixture of the two teams\n\nReturns:\n* :obj: json: the fixture-json", "source": "codesearchnet"}
{"code": "def calc_limits(data, dist=None, padding=0.25):\n    dmin = (sys.float_info.max if (dist is None) else dist.get('min', sys.float_info.max))\n    dmax = (sys.float_info.min if (dist is None) else dist.get('max', sys.float_info.min))\n    _min = min(min(data), dmin)\n    _max = max(max(data), dmax)\n    padding = (padding * (_max - _min))\n    return ((_min - padding), (_max + padding))", "docstring": "Calculate a suitable range for a histogram\n\nReturns:\ntuple of (min, max)", "source": "codesearchnet"}
{"code": "def GetUsernameForPath(self, path):\n    path = path.lower()\n    user_accounts = self._user_accounts.get(self.CURRENT_SESSION, {})\n    for user_account in iter(user_accounts.values()):\n        if (not user_account.user_directory):\n            continue\n        user_directory = user_account.user_directory.lower()\n        if path.startswith(user_directory):\n            return user_account.username\n    return None", "docstring": "Retrieves a username for a specific path.\n\nThis is determining if a specific path is within a user's directory and\nreturning the username of the user if so.\n\nArgs:\npath (str): path.\n\nReturns:\nstr: username or None if the path does not appear to be within a user's\ndirectory.", "source": "codesearchnet"}
{"code": "def _PrintAnalysisStatusUpdateLinear(self, processing_status):\n    \n    for worker_status in processing_status.workers_status:\n      status_line = (\n          '{0:s} (PID: {1:d}) - events consumed: {2:d} - running: '\n          '{3!s}\\n').format(\n              worker_status.identifier, worker_status.pid,\n              worker_status.number_of_consumed_events,\n              worker_status.status not in definitions.ERROR_STATUS_INDICATORS)\n      self._output_writer.Write(status_line)", "docstring": "Prints an analysis status update in linear mode.\n\nArgs:\nprocessing_status (ProcessingStatus): processing status.", "source": "juraj-google-style"}
{"code": "def get_catalog_courses(self, catalog_id):\n        \n        return self._load_data(\n            self.CATALOGS_COURSES_ENDPOINT.format(catalog_id),\n            default=[]\n        )", "docstring": "Return the courses included in a single course catalog by ID.\n\nArgs:\ncatalog_id (int): The catalog ID we want to retrieve.\n\nReturns:\nlist: Courses of the catalog in question", "source": "juraj-google-style"}
{"code": "def load(self, email, master_token, android_id):\n        \n        self._email = email\n        self._android_id = android_id\n        self._master_token = master_token\n\n        self.refresh()\n        return True", "docstring": "Authenticate to Google with the provided master token.\n\nArgs:\nemail (str): The account to use.\nmaster_token (str): The master token.\nandroid_id (str): An identifier for this client.\n\nRaises:\nLoginException: If there was a problem logging in.", "source": "juraj-google-style"}
{"code": "def create_graph_from_data(self, data, **kwargs):\n    self.arguments['{VERBOSE}'] = str(self.verbose).upper()\n    results = self._run_ccdr(data, verbose=self.verbose)\n    return nx.relabel_nodes(nx.DiGraph(results), {idx: i for (idx, i) in enumerate(data.columns)})", "docstring": "Apply causal discovery on observational data using CCDr.\n\nArgs:\ndata (pandas.DataFrame): DataFrame containing the data\n\nReturns:\nnetworkx.DiGraph: Solution given by the CCDR algorithm.", "source": "codesearchnet"}
{"code": "def set_nodes_vlan(site, nodes, interface, vlan_id):\n\n    def _to_network_address(host):\n        'Translate a host to a network address\\n        e.g:\\n        paranoia-20.rennes.grid5000.fr -> paranoia-20-eth2.rennes.grid5000.fr\\n        '\n        splitted = host.split('.')\n        splitted[0] = ((splitted[0] + '-') + interface)\n        return '.'.join(splitted)\n    gk = get_api_client()\n    network_addresses = [_to_network_address(n) for n in nodes]\n    gk.sites[site].vlans[str(vlan_id)].submit({'nodes': network_addresses})", "docstring": "Set the interface of the nodes in a specific vlan.\n\nIt is assumed that the same interface name is available on the node.\n\nArgs:\nsite(str): site to consider\nnodes(list): nodes to consider\ninterface(str): the network interface to put in the vlan\nvlan_id(str): the id of the vlan", "source": "codesearchnet"}
{"code": "def get_is_group_member(self, grp_name, user):\n        \n        self.project_service.set_auth(self._token_project)\n        return self.project_service.get_is_group_member(grp_name, user)", "docstring": "Check if the given user is a member of the named group.\n\nNote that a group maintainer is not considered a member unless the\nuser is also explicitly added as a member.\n\nArgs:\nname (string): Name of group.\nuser_name (string): User of interest.\n\nReturns:\n(bool): False if user not a member.", "source": "juraj-google-style"}
{"code": "def __init__(self, latitude, longitude, time, status, mode=None):\n        \n        super(LoranPosition, self).__init__(latitude, longitude)\n        self.time = time\n        self.status = status\n        self.mode = mode", "docstring": "Initialise a new ``LoranPosition`` object.\n\nArgs:\nlatitude (float): Fix's latitude\nlongitude (float): Fix's longitude\ntime (datetime.time): Time the fix was taken\nstatus (bool): Whether the data is active\nmode (str): Type of reading", "source": "juraj-google-style"}
{"code": "def learn(self, state_key, limit=1000):\n        \n        self.t = 1\n        while self.t <= limit:\n            next_action_list = self.extract_possible_actions(state_key)\n            if len(next_action_list):\n                action_key = self.select_action(\n                    state_key=state_key,\n                    next_action_list=next_action_list\n                )\n                reward_value = self.observe_reward_value(state_key, action_key)\n\n            if len(next_action_list):\n                \n                next_state_key = self.update_state(\n                    state_key=state_key,\n                    action_key=action_key\n                )\n\n                next_next_action_list = self.extract_possible_actions(next_state_key)\n                next_action_key = self.predict_next_action(next_state_key, next_next_action_list)\n                next_max_q = self.extract_q_df(next_state_key, next_action_key)\n\n                \n                self.update_q(\n                    state_key=state_key,\n                    action_key=action_key,\n                    reward_value=reward_value,\n                    next_max_q=next_max_q\n                )\n                \n                state_key = next_state_key\n\n            \n            self.normalize_q_value()\n            self.normalize_r_value()\n\n            \n            self.visualize_learning_result(state_key)\n            \n            if self.check_the_end_flag(state_key) is True:\n                break\n\n            \n            self.t += 1", "docstring": "Learning and searching the optimal solution.\n\nArgs:\nstate_key:      Initial state.\nlimit:          The maximum number of iterative updates based on value iteration algorithms.", "source": "juraj-google-style"}
{"code": "def fn(x: int, y: str):\n    return x", "docstring": "Test function\n\nArgs:\nx: The input\ny: Also the input", "source": "github-repos"}
{"code": "def _calculate_aggregation_loss(logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels, aggregation_loss_weight):\n    per_example_aggregation_loss = _calculate_aggregation_loss_known(logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels)\n    if use_answer_as_supervision:\n        per_example_aggregation_loss += _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask)\n    return aggregation_loss_weight * per_example_aggregation_loss", "docstring": "Calculates the aggregation loss per example.\n\nArgs:\nlogits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):\nLogits per aggregation operation.\naggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`):\nA mask set to 1 for examples that should use aggregation functions.\naggregation_labels (`torch.LongTensor` of shape `(batch_size, )`):\nAggregation function id for every example in the batch.\nuse_answer_as_supervision (`bool`, *optional*):\nWhether to use the answer as the only supervision for aggregation examples.\nnum_aggregation_labels (`int`, *optional*, defaults to 0):\nThe number of aggregation operators to predict.\naggregation_loss_weight (`float`, *optional*, defaults to 1.0):\nImportance weight for the aggregation loss.\n\nReturns:\naggregation_loss (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss per example.", "source": "github-repos"}
{"code": "def eigenvalues(df):\n    corr = np.corrcoef(df, rowvar=0)\n    eigvals = np.linalg.eigvals(corr)\n    return pd.Series(eigvals, df.columns, name='Eigenvalue')", "docstring": "Returns a pandas Series with eigenvalues of the correlation matrix.\n\nArgs:\ndf: pandas DataFrame with columns to run diagnostics on", "source": "codesearchnet"}
{"code": "class Wrapper(Layer):\n\n    def __init__(self, layer, **kwargs):\n        try:\n            assert isinstance(layer, Layer)\n        except Exception:\n            raise ValueError(f\"Layer {layer} supplied to Wrapper isn't a supported layer type. Please ensure wrapped layer is a valid Keras layer.\")\n        super().__init__(**kwargs)\n        self.layer = layer\n\n    def build(self, input_shape=None):\n        if not self.layer.built:\n            self.layer.build(input_shape)\n            self.layer.built = True\n\n    def get_config(self):\n        config = {'layer': serialization_lib.serialize_keras_object(self.layer)}\n        base_config = super().get_config()\n        return {**base_config, **config}\n\n    @classmethod\n    def from_config(cls, config, custom_objects=None):\n        layer = serialization_lib.deserialize_keras_object(config.pop('layer'), custom_objects=custom_objects)\n        return cls(layer, **config)", "docstring": "Abstract wrapper base class.\n\nWrappers take another layer and augment it in various ways.\nDo not use this class as a layer, it is only an abstract base class.\nTwo usable wrappers are the `TimeDistributed` and `Bidirectional` layers.\n\nArgs:\nlayer: The layer to be wrapped.", "source": "github-repos"}
{"code": "def eval(self, expr):\n    if (self.depth >= self.max_depth):\n        raise LimitationError('too much nesting')\n    if (self.steps >= self.max_steps):\n        raise LimitationError('too many steps')\n    self.depth += 1\n    self.steps += 1\n    res = expr.eval(self)\n    self.depth -= 1\n    return res", "docstring": "Evaluate an expression.\n\nThis does **not** add its argument (or its result) as an element of me!\nThat is the responsibility of the code that created the object. This\nmeans that you need to :meth:`Environment.rec_new` any expression you\nget from user input before evaluating it.\n\nThis, and any wrappers around it, are the **only** entry points to\nexpression evaluation you should call from ordinary code (i.e., code\nthat isn't part of a extension).\n\nArgs:\nexpr (LispVal): The expression to evaluate.\n\nReturns:\nLispVal: The result of evaluating the expression.\n\nRaises:\n~parthial.errs.LimitationError: If evaluating the expression would\nrequire more nesting, more time, or the allocation of more\nvalues than is permissible.", "source": "codesearchnet"}
{"code": "def enroll_users_in_course(cls, enterprise_customer, course_id, course_mode, emails):\n    (existing_users, unregistered_emails) = cls.get_users_by_email(emails)\n    successes = []\n    pending = []\n    failures = []\n    for user in existing_users:\n        succeeded = cls.enroll_user(enterprise_customer, user, course_mode, course_id)\n        if succeeded:\n            successes.append(user)\n        else:\n            failures.append(user)\n    for email in unregistered_emails:\n        pending_user = enterprise_customer.enroll_user_pending_registration(email, course_mode, course_id)\n        pending.append(pending_user)\n    return (successes, pending, failures)", "docstring": "Enroll existing users in a course, and create a pending enrollment for nonexisting users.\n\nArgs:\nenterprise_customer: The EnterpriseCustomer which is sponsoring the enrollment\ncourse_id (str): The unique identifier of the course in which we're enrolling\ncourse_mode (str): The mode with which we're enrolling in the course\nemails: An iterable of email addresses which need to be enrolled\n\nReturns:\nsuccesses: A list of users who were successfully enrolled in the course\npending: A list of PendingEnterpriseCustomerUsers who were successfully linked and had\npending enrollments created for them in the database\nfailures: A list of users who could not be enrolled in the course", "source": "codesearchnet"}
{"code": "def get_all_publications(return_namedtuples=True):\n    sources = [ben_cz.get_publications, grada_cz.get_publications, cpress_cz.get_publications, zonerpress_cz.get_publications]\n    publications = []\n    for source in sources:\n        publications.extend(filters.filter_publications(source()))\n    if return_namedtuples:\n        publications = map((lambda x: x.to_namedtuple()), publications)\n    return publications", "docstring": "Get list publications from all available source.\n\nArgs:\nreturn_namedtuples (bool, default True): Convert :class:`.Publication`\nstructures to namedtuples (used in AMQP\ncommunication).\n\nReturns:\nlist: List of :class:`.Publication` structures converted to namedtuple.", "source": "codesearchnet"}
{"code": "def FoldValue(self, value):\n    \n    if value is False and self._data_type_definition.false_value is not None:\n      return self._data_type_definition.false_value\n\n    if value is True and self._data_type_definition.true_value is not None:\n      return self._data_type_definition.true_value\n\n    raise ValueError('No matching True and False values')", "docstring": "Folds the data type into a value.\n\nArgs:\nvalue (object): value.\n\nReturns:\nobject: folded value.\n\nRaises:\nValueError: if the data type definition cannot be folded into the value.", "source": "juraj-google-style"}
{"code": "def __init__(self, email, password):\n        \n        self.email = email\n        self.password = password\n        self.token = None\n        self.last_api_call = None\n        self.state = []\n        \n        self.authenticate()\n        \n        self.update_state_from_api()", "docstring": "Create the Trackr API interface object.\nArgs:\nemail (str): Trackr account email address.\npassword (str): Trackrr account password.", "source": "juraj-google-style"}
{"code": "def update(self, *args, **kwargs):\n        \n        for k, v in args:\n            self[k] = v\n        for k, v in kwargs.items():\n            self[k] = v", "docstring": "Update ConfigMap from mapping/iterable.\n\nIf the key exists the entry is updated else it is added.\n\nArgs:\n*args: variable length argument list.  A valid argument is a two item\ntuple/list.  The first item is the key and the second is the value.\n**kwargs: Arbitrary keyword arguments representing the config.", "source": "juraj-google-style"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    if token_ids_1 is not None:\n        return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1, 1]\n    return [0] * len(token_ids_0) + [1, 1]", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def AddTrip(self, schedule=None, headsign=None, service_period=None, trip_id=None):\n    if (schedule is None):\n        assert (self._schedule is not None)\n        schedule = self._schedule\n    if (trip_id is None):\n        trip_id = util.FindUniqueId(schedule.trips)\n    if (service_period is None):\n        service_period = schedule.GetDefaultServicePeriod()\n    trip_class = self.GetGtfsFactory().Trip\n    trip_obj = trip_class(route=self, headsign=headsign, service_period=service_period, trip_id=trip_id)\n    schedule.AddTripObject(trip_obj)\n    return trip_obj", "docstring": "Add a trip to this route.\n\nArgs:\nschedule: a Schedule object which will hold the new trip or None to use\nthe schedule of this route.\nheadsign: headsign of the trip as a string\nservice_period: a ServicePeriod object or None to use\nschedule.GetDefaultServicePeriod()\ntrip_id: optional trip_id for the new trip\n\nReturns:\na new Trip object", "source": "codesearchnet"}
{"code": "def get_associated_resource(self, task):\n        \n\n        if not task:\n            raise HPOneViewUnknownType(MSG_INVALID_TASK)\n\n        if task['category'] != 'tasks' and task['category'] != 'backups':\n            \n            raise HPOneViewUnknownType(MSG_UNKNOWN_OBJECT_TYPE)\n\n        if task['type'] == 'TaskResourceV2':\n            resource_uri = task['associatedResource']['resourceUri']\n\n            if resource_uri and resource_uri.startswith(\"/rest/appliance/support-dumps/\"):\n                \n                return task, resource_uri\n\n        elif task['type'] == 'BACKUP':\n            task = self._connection.get(task['taskUri'])\n            resource_uri = task['uri']\n        else:\n            raise HPOneViewInvalidResource(MSG_TASK_TYPE_UNRECONIZED % task['type'])\n\n        entity = {}\n\n        if resource_uri:\n            entity = self._connection.get(resource_uri)\n\n        return task, entity", "docstring": "Retrieve a resource associated with a task.\n\nArgs:\ntask: task dict\n\nReturns:\ntuple: task (updated), the entity found (dict)", "source": "juraj-google-style"}
{"code": "def compute_order(bytecode: list[opcodes.Opcode], python_version) -> list[Block]:\n    processed_blocks = set()\n    blocks = _split_bytecode(bytecode, processed_blocks, python_version)\n    if python_version >= (3, 12):\n        blocks = _remove_jump_back_block(blocks)\n        blocks = _remove_jmp_to_get_anext_and_merge(blocks, processed_blocks)\n    first_op_to_block = {block.code[0]: block for block in blocks}\n    for i, block in enumerate(blocks):\n        next_block = blocks[i + 1] if i < len(blocks) - 1 else None\n        if block in processed_blocks:\n            continue\n        first_op, last_op = (block.code[0], block.code[-1])\n        if next_block and (not last_op.no_next()):\n            block.connect_outgoing(next_block)\n        if first_op.target:\n            block.connect_outgoing(first_op_to_block[first_op.target])\n        if last_op.target:\n            block.connect_outgoing(first_op_to_block[last_op.target])\n        if last_op.block_target:\n            block.connect_outgoing(first_op_to_block[last_op.block_target])\n    return cfg_utils.order_nodes(blocks)", "docstring": "Split bytecode into blocks and order the blocks.\n\nThis builds an \"ancestor first\" ordering of the basic blocks of the bytecode.\n\nArgs:\nbytecode: A list of instances of opcodes.Opcode. (E.g. returned from\nopcodes.dis())\n\nReturns:\nA list of Block instances.", "source": "github-repos"}
{"code": "def calculate_subscription_lifecycle(subscription_id):\n    subscription = Subscription.objects.select_related('messageset', 'schedule').get(id=subscription_id)\n    behind = subscription.messages_behind()\n    if (behind == 0):\n        return\n    current_messageset = subscription.messageset\n    current_sequence_number = subscription.next_sequence_number\n    end_subscription = Subscription.fast_forward_lifecycle(subscription, save=False)[(- 1)]\n    BehindSubscription.objects.create(subscription=subscription, messages_behind=behind, current_messageset=current_messageset, current_sequence_number=current_sequence_number, expected_messageset=end_subscription.messageset, expected_sequence_number=end_subscription.next_sequence_number)", "docstring": "Calculates the expected lifecycle position the subscription in\nsubscription_ids, and creates a BehindSubscription entry for them.\n\nArgs:\nsubscription_id (str): ID of subscription to calculate lifecycle for", "source": "codesearchnet"}
{"code": "def insert(self, loc, column, value):\n    if is_list_like(value):\n        if isinstance(value, pandas.Series):\n            value = value.reindex(self.index)\n        value = list(value)\n\n    def insert(df, internal_indices=[]):\n        internal_idx = int(internal_indices[0])\n        old_index = df.index\n        df.index = pandas.RangeIndex(len(df.index))\n        df.insert(internal_idx, internal_idx, value, allow_duplicates=True)\n        df.columns = pandas.RangeIndex(len(df.columns))\n        df.index = old_index\n        return df\n    new_data = self.data.apply_func_to_select_indices_along_full_axis(0, insert, loc, keep_remaining=True)\n    new_columns = self.columns.insert(loc, column)\n    return self.__constructor__(new_data, self.index, new_columns)", "docstring": "Insert new column data.\n\nArgs:\nloc: Insertion index.\ncolumn: Column labels to insert.\nvalue: Dtype object values to insert.\n\nReturns:\nA new PandasQueryCompiler with new data inserted.", "source": "codesearchnet"}
{"code": "def save_screenshot(driver, name):\n    \n    if hasattr(driver, 'save_screenshot'):\n        screenshot_dir = os.environ.get('SCREENSHOT_DIR')\n        if not screenshot_dir:\n            LOGGER.warning('The SCREENSHOT_DIR environment variable was not set; not saving a screenshot')\n            return\n        elif not os.path.exists(screenshot_dir):\n            os.makedirs(screenshot_dir)\n        image_name = os.path.join(screenshot_dir, name + '.png')\n        driver.save_screenshot(image_name)\n\n    else:\n        msg = (\n            u\"Browser does not support screenshots. \"\n            u\"Could not save screenshot '{name}'\"\n        ).format(name=name)\n\n        LOGGER.warning(msg)", "docstring": "Save a screenshot of the browser.\n\nThe location of the screenshot can be configured\nby the environment variable `SCREENSHOT_DIR`.  If not set,\nthis defaults to the current working directory.\n\nArgs:\ndriver (selenium.webdriver): The Selenium-controlled browser.\nname (str): A name for the screenshot, which will be used in the output file name.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def release_client(self, client):\n    if isinstance(client, Client):\n        if (not self._is_expired_client(client)):\n            LOG.debug('Client is not expired. Adding back to pool')\n            self.__pool.append(client)\n        elif client.is_connected():\n            LOG.debug('Client is expired and connected. Disconnecting')\n            client.disconnect()\n    if (self.__sem is not None):\n        self.__sem.release()", "docstring": "Releases a client object to the pool.\n\nArgs:\nclient: Client object.", "source": "codesearchnet"}
{"code": "def _pre_suf_fix_filter(t: List, prefix: str, suffix: str) -> bool:\n        \n\n        if prefix:\n            for a_token in t:\n                if a_token._.n_prefix(len(prefix)) != prefix:\n                    return False\n        if suffix:\n            for a_token in t:\n                if a_token._.n_suffix(len(suffix)) != suffix:\n                    return False\n\n        return True", "docstring": "Prefix and Suffix filter\nArgs:\nt: List, list of tokens\nprefix: str\nsuffix: str\n\nReturns: bool", "source": "juraj-google-style"}
{"code": "def get_grouped_indices(self, voigt=False, **kwargs):\n    if voigt:\n        array = self.voigt\n    else:\n        array = self\n    indices = list(itertools.product(*[range(n) for n in array.shape]))\n    remaining = indices.copy()\n    grouped = [list(zip(*np.where(np.isclose(array, 0, **kwargs))))]\n    remaining = [i for i in remaining if (i not in grouped[0])]\n    while remaining:\n        new = list(zip(*np.where(np.isclose(array, array[remaining[0]], **kwargs))))\n        grouped.append(new)\n        remaining = [i for i in remaining if (i not in new)]\n    return [g for g in grouped if g]", "docstring": "Gets index sets for equivalent tensor values\n\nArgs:\nvoigt (bool): whether to get grouped indices\nof voigt or full notation tensor, defaults\nto false\n**kwargs: keyword args for np.isclose.  Can take atol\nand rtol for absolute and relative tolerance, e. g.\n\n>>> tensor.group_array_indices(atol=1e-8)\n\nor\n\n>>> tensor.group_array_indices(rtol=1e-5)\n\nReturns:\nlist of index groups where tensor values are equivalent to\nwithin tolerances", "source": "codesearchnet"}
{"code": "def integer_value_convert(dictin, dropfailedvalues=False):\n    \n    \n    return key_value_convert(dictin, valuefn=int, dropfailedvalues=dropfailedvalues)", "docstring": "Convert values of dictionary to integers\n\nArgs:\ndictin (DictUpperBound): Input dictionary\ndropfailedvalues (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False.\n\nReturns:\nDict: Dictionary with values converted to integers", "source": "juraj-google-style"}
{"code": "def hash_file(path, block_size=65536):\n    \n    sha256 = hashlib.sha256()\n    with open(path, 'rb') as f:\n        for block in iter(lambda: f.read(block_size), b''):\n            sha256.update(block)\n    return sha256.hexdigest()", "docstring": "Returns SHA256 checksum of a file\n\nArgs:\npath (string): Absolute file path of file to hash\n\nblock_size (int, optional): Number of bytes to read per block", "source": "juraj-google-style"}
{"code": "def from_intlist(int_list, *qregs):\n        \n        if not all((isinstance(i, int) for i in int_list)):\n            raise LayoutError('Expected a list of ints')\n        if len(int_list) != len(set(int_list)):\n            raise LayoutError('Duplicate values not permitted; Layout is bijective.')\n        n_qubits = sum(reg.size for reg in qregs)\n        \n        if len(int_list) < n_qubits:\n            err_msg = 'Integer list length must equal number of qubits in circuit.'\n            raise LayoutError(err_msg)\n        out = Layout()\n        main_idx = 0\n        for qreg in qregs:\n            for idx in range(qreg.size):\n                out[(qreg, idx)] = int_list[main_idx]\n                main_idx += 1\n        if main_idx != len(int_list):\n            for int_item in int_list[main_idx:]:\n                out[int_item] = None\n        return out", "docstring": "Converts a list of integers to a Layout\nmapping virtual qubits (index of the list) to\nphysical qubits (the list values).\n\nArgs:\nint_list (list): A list of integers.\n*qregs (QuantumRegisters): The quantum registers to apply\nthe layout to.\nReturns:\nLayout: The corresponding Layout object.\nRaises:\nLayoutError: Invalid input layout.", "source": "juraj-google-style"}
{"code": "def create_impression_event(self, experiment, variation_id, user_id, attributes):\n    \n\n    params = self._get_common_params(user_id, attributes)\n    impression_params = self._get_required_params_for_impression(experiment, variation_id)\n\n    params[self.EventParams.USERS][0][self.EventParams.SNAPSHOTS].append(impression_params)\n\n    return Event(self.EVENTS_URL,\n                 params,\n                 http_verb=self.HTTP_VERB,\n                 headers=self.HTTP_HEADERS)", "docstring": "Create impression Event to be sent to the logging endpoint.\n\nArgs:\nexperiment: Experiment for which impression needs to be recorded.\nvariation_id: ID for variation which would be presented to user.\nuser_id: ID for user.\nattributes: Dict representing user attributes and values which need to be recorded.\n\nReturns:\nEvent object encapsulating the impression event.", "source": "juraj-google-style"}
{"code": "def are_all_matches_terminal(self,\n            predicate: Callable[[ops.Operation], bool]):\n        \n        return all(\n            self.next_moment_operating_on(op.qubits, i + 1) is None for\n            (i, op) in self.findall_operations(predicate)\n        )", "docstring": "Check whether all of the ops that satisfy a predicate are terminal.\n\nArgs:\npredicate: A predicate on ops.Operations which is being checked.\n\nReturns:\nWhether or not all `Operation` s in a circuit that satisfy the\ngiven predicate are terminal.", "source": "juraj-google-style"}
{"code": "def Run(script, container=None, exit_on_error=False, gas=Fixed8.Zero(), test_mode=True):\n    from neo.Core.Blockchain import Blockchain\n    from neo.SmartContract.StateMachine import StateMachine\n    from neo.EventHub import events\n    bc = Blockchain.Default()\n    accounts = DBCollection(bc._db, DBPrefix.ST_Account, AccountState)\n    assets = DBCollection(bc._db, DBPrefix.ST_Asset, AssetState)\n    validators = DBCollection(bc._db, DBPrefix.ST_Validator, ValidatorState)\n    contracts = DBCollection(bc._db, DBPrefix.ST_Contract, ContractState)\n    storages = DBCollection(bc._db, DBPrefix.ST_Storage, StorageItem)\n    script_table = CachedScriptTable(contracts)\n    service = StateMachine(accounts, validators, assets, contracts, storages, None)\n    engine = ApplicationEngine(trigger_type=TriggerType.Application, container=container, table=script_table, service=service, gas=gas, testMode=test_mode, exit_on_error=exit_on_error)\n    script = binascii.unhexlify(script)\n    engine.LoadScript(script)\n    try:\n        success = engine.Execute()\n        engine.testMode = True\n        service.ExecutionCompleted(engine, success)\n    except Exception as e:\n        engine.testMode = True\n        service.ExecutionCompleted(engine, False, e)\n    for event in service.events_to_dispatch:\n        events.emit(event.event_type, event)\n    return engine", "docstring": "Runs a script in a test invoke environment\n\nArgs:\nscript (bytes): The script to run\ncontainer (neo.Core.TX.Transaction): [optional] the transaction to use as the script container\n\nReturns:\nApplicationEngine", "source": "codesearchnet"}
{"code": "def execute(self, data_dict, callback, group=None, trace=None):\n    group = (group or self.group)\n    context = _ScopedContext(data_dict, self.undefined_str, group=group)\n    _Execute(self._program.Statements(), context, callback, trace)", "docstring": "Low level method to expand the template piece by piece.\n\nArgs:\ndata_dict: The JSON data dictionary.\ncallback: A callback which should be called with each expanded token.\ngroup: Dictionary of name -> Template instance (for styles)\n\nExample: You can pass 'f.write' as the callback to write directly to a file\nhandle.", "source": "codesearchnet"}
{"code": "def handle(self, args, kwargs):\n    return self.NOT_SUPPORTED", "docstring": "Handle this dispatcher's operation with the specified arguments.\n\nIf this operation dispatcher can handle the given arguments, then\nreturn an appropriate value (or raise an appropriate exception).\n\nArgs:\nargs: The arguments to the operation.\nkwargs: They keyword arguments to the operation.\n\nReturns:\nThe result of the operation, or `OpDispatcher.NOT_SUPPORTED` if this\ndispatcher can not handle the given arguments.", "source": "github-repos"}
{"code": "def _follow_leafref(self, xpath: 'Expr', init: 'TerminalNode') -> Optional['DataNode']:\n    if isinstance(xpath, LocationPath):\n        lft = self._follow_leafref(xpath.left, init)\n        if (lft is None):\n            return None\n        return lft._follow_leafref(xpath.right, init)\n    elif isinstance(xpath, Step):\n        if (xpath.axis == Axis.parent):\n            return self.data_parent()\n        elif (xpath.axis == Axis.child):\n            if (isinstance(self, InternalNode) and xpath.qname):\n                qname = (xpath.qname if xpath.qname[1] else (xpath.qname[0], init.ns))\n                return self.get_data_child(*qname)\n    elif isinstance(xpath, Root):\n        return self.schema_root()\n    return None", "docstring": "Return the data node referred to by a leafref path.\n\nArgs:\nxpath: XPath expression compiled from a leafref path.\ninit: initial context node", "source": "codesearchnet"}
{"code": "def _SerializeAttributeContainer(self, attribute_container):\n    if self._serializers_profiler:\n        self._serializers_profiler.StartTiming(attribute_container.CONTAINER_TYPE)\n    try:\n        attribute_container_data = self._serializer.WriteSerialized(attribute_container)\n        if (not attribute_container_data):\n            raise IOError('Unable to serialize attribute container: {0:s}.'.format(attribute_container.CONTAINER_TYPE))\n        attribute_container_data = attribute_container_data.encode('utf-8')\n    finally:\n        if self._serializers_profiler:\n            self._serializers_profiler.StopTiming(attribute_container.CONTAINER_TYPE)\n    return attribute_container_data", "docstring": "Serializes an attribute container.\n\nArgs:\nattribute_container (AttributeContainer): attribute container.\n\nReturns:\nbytes: serialized attribute container.\n\nRaises:\nIOError: if the attribute container cannot be serialized.\nOSError: if the attribute container cannot be serialized.", "source": "codesearchnet"}
{"code": "def xxd_output_to_object(input_cc_file):\n    model_bytes = xxd_output_to_bytes(input_cc_file)\n    return convert_bytearray_to_object(model_bytes)", "docstring": "Converts xxd output C++ source file to object.\n\nArgs:\ninput_cc_file: Full path name to th C++ source file dumped by xxd\n\nRaises:\nRuntimeError: If input_cc_file path is invalid.\nIOError: If input_cc_file cannot be opened.\n\nReturns:\nA python object corresponding to the input tflite file.", "source": "github-repos"}
{"code": "def parse_object_like_triples(self):\n\n\t\t\n\n\t\t\n\t\tself.rdf.triples = SimpleNamespace() \n\t\tfor s,p,o in self.rdf.graph:\n\n\t\t\t\n\t\t\tns_prefix, ns_uri, predicate = self.rdf.graph.compute_qname(p)\n\n\t\t\t\n\t\t\tif not hasattr(self.rdf.triples, ns_prefix):\n\t\t\t\tsetattr(self.rdf.triples, ns_prefix, SimpleNamespace())\n\n\t\t\t\n\t\t\tif not hasattr(getattr(self.rdf.triples, ns_prefix), predicate):\n\t\t\t\tsetattr(getattr(self.rdf.triples, ns_prefix), predicate, [])\n\n\t\t\t\n\t\t\tgetattr(getattr(self.rdf.triples, ns_prefix), predicate).append(o)", "docstring": "method to parse triples from self.rdf.graph for object-like\naccess\n\nArgs:\nNone\n\nReturns:\nNone: sets self.rdf.triples", "source": "juraj-google-style"}
{"code": "def get_steps_branch_len(self, length):\n    return log((length / self.length), min(self.branches[0][0]))", "docstring": "Get, how much steps will needed for a given branch length.\n\nReturns:\nfloat: The age the tree must achieve to reach the given branch length.", "source": "codesearchnet"}
{"code": "def Remove(self, row):\n        \n        if row == 0 or row > self.size:\n            raise TableError(\"Attempt to remove header row\")\n        new_table = []\n        \n        for t_row in self._table:\n            if t_row.row != row:\n                new_table.append(t_row)\n                if t_row.row > row:\n                    t_row.row -= 1\n        self._table = new_table", "docstring": "Removes a row from the table.\n\nArgs:\nrow: int, the row number to delete. Must be >= 1, as the header\ncannot be removed.\n\nRaises:\nTableError: Attempt to remove nonexistent or header row.", "source": "juraj-google-style"}
{"code": "def filter(self, *query_filter):\n        \n        for query in query_filter:\n            self.query.append(query)\n        return self", "docstring": "Set the query filter to perform the query with\n\nArgs:\n*query_filter: Simplified Query Language filter", "source": "juraj-google-style"}
{"code": "def upload(self, *args, **kwargs):\n    self.prepare()\n    metadata = self.create_metadata(*args, **kwargs)\n    package = self.build_napp_package(metadata.get('name'))\n    NAppsClient().upload_napp(metadata, package)", "docstring": "Create package and upload it to NApps Server.\n\nRaises:\nFileNotFoundError: If kytos.json is not found.", "source": "codesearchnet"}
{"code": "def replaceFA(self, faDataType: int, xml: str):\n        \n        self.client.replaceFA(faDataType, xml)", "docstring": "Replaces Financial Advisor's settings.\n\nArgs:\nfaDataType: See :meth:`.requestFA`.\nxml: The XML-formatted configuration string.", "source": "juraj-google-style"}
{"code": "def read(self, fileName, **kwargs):\n        \n        if self._langext is not None:\n            with open(fileName, 'r') as fin:\n                newmodel = self._langext.translate(fin.read(), **kwargs)\n                with open(fileName+'.translated', 'w') as fout:\n                    fout.write(newmodel)\n                    fileName += '.translated'\n        lock_and_call(\n            lambda: self._impl.read(fileName),\n            self._lock\n        )\n        self._errorhandler_wrapper.check()", "docstring": "Interprets the specified file (script or model or mixed) As a side\neffect, it invalidates all entities (as the passed file can contain any\narbitrary command); the lists of entities will be re-populated lazily\n(at first access).\n\nArgs:\nfileName: Full path to the file.\n\nRaises:\nRuntimeError: in case the file does not exist.", "source": "juraj-google-style"}
{"code": "def _set_least_batch_id(self, txn_signature):\n        \n\n        batch = self._batches_by_txn_id[txn_signature]\n\n        least_index = self._index_of_batch(\n            self._batches_by_id[self._least_batch_id_wo_results].batch)\n\n        current_index = self._index_of_batch(batch)\n        all_prior = False\n\n        if current_index <= least_index:\n            return\n            \n            \n        if all(\n                all(t.header_signature in self._txn_results\n                    for t in b.transactions)\n                for b in self._batches[least_index:current_index]):\n            all_prior = True\n        if not all_prior:\n            return\n        possible_least = self._batches[current_index].header_signature\n        \n        \n        for b in self._batches[current_index:]:\n            if not all(t.header_signature in self._txn_results\n                       for t in b.transactions):\n                possible_least = b.header_signature\n                break\n        self._least_batch_id_wo_results = possible_least", "docstring": "Set the first batch id that doesn't have all results.\n\nArgs:\ntxn_signature (str): The txn identifier of the transaction with\nresults being set.", "source": "juraj-google-style"}
{"code": "def mpim_close(self, *, channel: str, **kwargs) -> SlackResponse:\n    kwargs.update({'channel': channel})\n    return self.api_call('mpim.close', json=kwargs)", "docstring": "Closes a multiparty direct message channel.\n\nArgs:\nchannel (str): Multiparty Direct message channel to close. e.g. 'G1234567890'", "source": "codesearchnet"}
{"code": "def address(self, compressed=True, testnet=False):\n        \n        version = '0x'\n        return version + binascii.hexlify(self.keccak[12:]).decode('ascii')", "docstring": "Address property that returns the Base58Check\nencoded version of the HASH160.\n\nArgs:\ncompressed (bool): Whether or not the compressed key should\nbe used.\ntestnet (bool): Whether or not the key is intended for testnet\nusage. False indicates mainnet usage.\n\nReturns:\nbytes: Base58Check encoded string", "source": "juraj-google-style"}
{"code": "def save_as(self, filename=None):\n    if (filename is None):\n        filename = self.filename\n    if (filename is None):\n        filename = self.default_filename\n    if (filename is None):\n        raise RuntimeError(\"Class '{}' has no default filename\".format(self.__class__.__name__))\n    self._do_save_as(filename)\n    self.filename = filename", "docstring": "Dumps object contents into file on disk.\n\nArgs:\nfilename (optional): defaults to self.filename. If passed, self.filename\nwill be updated to filename.", "source": "codesearchnet"}
{"code": "def days(start, end=None):\n    return iterate.between(start, datetime.timedelta(days=1), end)", "docstring": "Iterate over the days between the given datetime_tzs.\n\nArgs:\nstart: datetime_tz to start from.\nend: (Optional) Date to end at, if not given the iterator will never\nterminate.\n\nReturns:\nAn iterator which generates datetime_tz objects a day apart.", "source": "codesearchnet"}
{"code": "def unicode_convert(obj):\n    try:\n        if isinstance(obj, dict):\n            return {unicode_convert(key): unicode_convert(value) for (key, value) in obj.items()}\n        elif isinstance(obj, list):\n            return [unicode_convert(element) for element in obj]\n        elif isinstance(obj, str):\n            return obj\n        elif isinstance(obj, six.text_type):\n            return obj.encode('utf-8')\n        elif isinstance(obj, six.integer_types):\n            return obj\n        else:\n            return obj\n    except:\n        return obj", "docstring": "Converts unicode objects to anscii.\n\nArgs:\nobj (object): The object to convert.\nReturns:\nThe object converted to anscii, if possible. For ``dict`` and ``list``, the object type is maintained.", "source": "codesearchnet"}
{"code": "def bind_extensions(app):\n    \n    \n    app.db = app.config['PUZZLE_BACKEND']\n    app.db.init_app(app)\n\n    \n    bootstrap.init_app(app)\n    markdown(app)\n\n    @app.template_filter('islist')\n    def islist(object):\n        return isinstance(object, (tuple, list))", "docstring": "Configure extensions.\n\nArgs:\napp (Flask): initialized Flask app instance", "source": "juraj-google-style"}
{"code": "def WritePathStatHistory(self, client_path, stat_entries):\n    client_path_history = ClientPathHistory()\n    for (timestamp, stat_entry) in iteritems(stat_entries):\n        client_path_history.AddStatEntry(timestamp, stat_entry)\n    self.MultiWritePathHistory({client_path: client_path_history})", "docstring": "Writes a collection of `StatEntry` observed for particular path.\n\nArgs:\nclient_path: A `ClientPath` instance.\nstat_entries: A dictionary with timestamps as keys and `StatEntry`\ninstances as values.", "source": "codesearchnet"}
{"code": "def all_tokens(self, delimiter=' ', label_list_ids=None):\n        \n        tokens = set()\n\n        for utterance in self.utterances.values():\n            tokens = tokens.union(utterance.all_tokens(delimiter=delimiter, label_list_ids=label_list_ids))\n\n        return tokens", "docstring": "Return a list of all tokens occurring in one of the labels in the corpus.\n\nArgs:\ndelimiter (str): The delimiter used to split labels into tokens\n(see :meth:`audiomate.annotations.Label.tokenized`).\nlabel_list_ids (list): If not None, only labels from label-lists with an idx contained in this list\nare considered.\n\nReturns:\n:class:`set`: A set of distinct tokens.", "source": "juraj-google-style"}
{"code": "def import_image_from_file(self, filename, repository=None, tag=None,\n                               changes=None):\n        \n\n        return self.import_image(\n            src=filename, repository=repository, tag=tag, changes=changes\n        )", "docstring": "Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only\nsupports importing from a tar file on disk.\n\nArgs:\nfilename (str): Full path to a tar file.\nrepository (str): The repository to create\ntag (str): The tag to apply\n\nRaises:\nIOError: File does not exist.", "source": "juraj-google-style"}
{"code": "def _GetElementDataTypeDefinition(self, data_type_definition):\n    if (not data_type_definition):\n        raise errors.FormatError('Missing data type definition')\n    element_data_type_definition = getattr(data_type_definition, 'element_data_type_definition', None)\n    if (not element_data_type_definition):\n        raise errors.FormatError('Invalid data type definition missing element')\n    return element_data_type_definition", "docstring": "Retrieves the element data type definition.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\n\nReturns:\nDataTypeDefinition: element data type definition.\n\nRaises:\nFormatError: if the element data type cannot be determined from the data\ntype definition.", "source": "codesearchnet"}
{"code": "def GetParserObjects(cls, parser_filter_expression=None):\n    \n    includes, excludes = cls._GetParserFilters(parser_filter_expression)\n\n    parser_objects = {}\n    for parser_name, parser_class in iter(cls._parser_classes.items()):\n      \n      if not includes and parser_name in excludes:\n        continue\n\n      if includes and parser_name not in includes:\n        continue\n\n      parser_object = parser_class()\n      if parser_class.SupportsPlugins():\n        plugin_includes = None\n        if parser_name in includes:\n          plugin_includes = includes[parser_name]\n\n        parser_object.EnablePlugins(plugin_includes)\n\n      parser_objects[parser_name] = parser_object\n\n    return parser_objects", "docstring": "Retrieves the parser objects.\n\nArgs:\nparser_filter_expression (Optional[str]): parser filter expression,\nwhere None represents all parsers and plugins.\n\nReturns:\ndict[str, BaseParser]: parsers per name.", "source": "juraj-google-style"}
{"code": "def load_config(paths=DEFAULT_CONFIG_PATHS):\n    \n    config = Config()\n    for path in paths:\n        if os.path.isfile(path):\n            config.load_pyfile(path)\n\n    return config", "docstring": "Attempt to load config from paths, in order.\n\nArgs:\npaths (List[string]): list of paths to python files\n\nReturn:\nConfig: loaded config", "source": "juraj-google-style"}
{"code": "def read(self, n):\n        \n\n        d = b''\n        while n:\n            try:\n                block = self._socket.recv(n)\n            except socket.error:\n                block = None\n            if not block:\n                raise EOFError('Socket closed')\n            d += block\n            n -= len(block)\n        return d", "docstring": "Receive *n* bytes from the socket.\n\nArgs:\nn(int): The number of bytes to read.\n\nReturns:\nbytes: *n* bytes read from the socket.\n\nRaises:\nEOFError: If the socket was closed.", "source": "juraj-google-style"}
{"code": "def malloc(self, key, shape, dtype):\n        \n        if key not in self._memory or self._memory[key].shape != shape or self._memory[key].dtype != dtype:\n            self._memory[key] = Shmem(key, shape, dtype, self._uuid)\n\n        return self._memory[key].np_array", "docstring": "Allocates a block of shared memory, and returns a numpy array whose data corresponds with that block.\n\nArgs:\nkey (str): The key to identify the block.\nshape (list of int): The shape of the numpy array to allocate.\ndtype (type): The numpy data type (e.g. np.float32).\n\nReturns:\nnp.ndarray: The numpy array that is positioned on the shared memory.", "source": "juraj-google-style"}
{"code": "def _on_scan(self, info):\n        \n\n        device_id = info['uuid']\n        expiration_time = info.get('validity_period', 60)\n        infocopy = deepcopy(info)\n\n        infocopy['expiration_time'] = monotonic() + expiration_time\n\n        with self._scan_lock:\n            self._scanned_devices[device_id] = infocopy", "docstring": "Callback called when a new device is discovered on this CMDStream\n\nArgs:\ninfo (dict): Information about the scanned device", "source": "juraj-google-style"}
{"code": "def from_rfc3339(value):\n    return datetime.datetime.strptime(value, _RFC3339_MICROS).replace(tzinfo=pytz.utc)", "docstring": "Convert a microsecond-precision timestamp to datetime.\n\nArgs:\nvalue (str): The RFC3339 string to convert.\n\nReturns:\ndatetime.datetime: The datetime object equivalent to the timestamp in\nUTC.", "source": "codesearchnet"}
{"code": "def Read(self, file_object):\n    \n    try:\n      self.root_key = biplist.readPlist(file_object)\n\n    except (\n        biplist.NotBinaryPlistException,\n        biplist.InvalidPlistException) as exception:\n      raise IOError(exception)", "docstring": "Reads a plist from a file-like object.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object containing plist data.\n\nRaises:\nIOError: if the plist file-like object cannot be read.\nOSError: if the plist file-like object cannot be read.", "source": "juraj-google-style"}
{"code": "def set_static_dns(iface, *addrs):\n    if ((addrs is ()) or (str(addrs[0]).lower() == 'none')):\n        return {'Interface': iface, 'DNS Server': 'No Changes'}\n    if (str(addrs[0]).lower() == '[]'):\n        log.debug('Clearing list of DNS servers')\n        cmd = ['netsh', 'interface', 'ip', 'set', 'dns', 'name={0}'.format(iface), 'source=static', 'address=none']\n        __salt__['cmd.run'](cmd, python_shell=False)\n        return {'Interface': iface, 'DNS Server': []}\n    addr_index = 1\n    for addr in addrs:\n        if (addr_index == 1):\n            cmd = ['netsh', 'interface', 'ip', 'set', 'dns', 'name={0}'.format(iface), 'source=static', 'address={0}'.format(addr), 'register=primary']\n            __salt__['cmd.run'](cmd, python_shell=False)\n            addr_index = (addr_index + 1)\n        else:\n            cmd = ['netsh', 'interface', 'ip', 'add', 'dns', 'name={0}'.format(iface), 'address={0}'.format(addr), 'index={0}'.format(addr_index)]\n            __salt__['cmd.run'](cmd, python_shell=False)\n            addr_index = (addr_index + 1)\n    return {'Interface': iface, 'DNS Server': addrs}", "docstring": "Set static DNS configuration on a Windows NIC\n\nArgs:\n\niface (str): The name of the interface to set\n\naddrs (*):\nOne or more DNS servers to be added. To clear the list of DNS\nservers pass an empty list (``[]``). If undefined or ``None`` no\nchanges will be made.\n\nReturns:\ndict: A dictionary containing the new DNS settings\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt -G 'os_family:Windows' ip.set_static_dns 'Local Area Connection' '192.168.1.1'\nsalt -G 'os_family:Windows' ip.set_static_dns 'Local Area Connection' '192.168.1.252' '192.168.1.253'", "source": "codesearchnet"}
{"code": "def inside_cell( self, r ):\n        \n        centre = np.array( [ 0.5, 0.5, 0.5 ] )\n        new_r = self.nearest_image( centre, r )\n        return new_r", "docstring": "Given a fractional-coordinate, if this lies outside the cell return the equivalent point inside the cell.\n\nArgs:\nr (np.array): Fractional coordinates of a point (this may be outside the cell boundaries).\n\nReturns:\n(np.array): Fractional coordinates of an equivalent point, inside the cell boundaries.", "source": "juraj-google-style"}
{"code": "def launch_subshell(self, shell_cls, cmd, args, *, prompt=None, context={}):\n    readline.write_history_file(self.history_fname)\n    prompt = (prompt if prompt else shell_cls.__name__)\n    mode = _ShellBase._Mode(shell=self, cmd=cmd, args=args, prompt=prompt, context=context)\n    shell = shell_cls(batch_mode=self.batch_mode, debug=self.debug, mode_stack=(self._mode_stack + [mode]), pipe_end=self._pipe_end, root_prompt=self.root_prompt, stdout=self.stdout, stderr=self.stderr, temp_dir=self._temp_dir)\n    self.print_debug(\"Leave parent shell '{}'\".format(self.prompt))\n    exit_directive = shell.cmdloop()\n    self.print_debug(\"Enter parent shell '{}': {}\".format(self.prompt, exit_directive))\n    readline.clear_history()\n    if os.path.isfile(self.history_fname):\n        readline.read_history_file(self.history_fname)\n    if (not (exit_directive is True)):\n        return exit_directive", "docstring": "Launch a subshell.\n\nThe doc string of the cmdloop() method explains how shell histories and\nhistory files are saved and restored.\n\nThe design of the _ShellBase class encourage launching of subshells through\nthe subshell() decorator function. Nonetheless, the user has the option\nof directly launching subshells via this method.\n\nArguments:\nshell_cls: The _ShellBase class object to instantiate and launch.\nargs: Arguments used to launch this subshell.\nprompt: The name of the subshell. The default, None, means\nto use the shell_cls.__name__.\ncontext: A dictionary to pass to the subshell as its context.\n\nReturns:\n'root': Inform the parent shell to keep exiting until the root shell\nis reached.\n'all': Exit the the command line.\nFalse, None, or anything that are evaluated as False: Inform the\nparent shell to stay in that parent shell.\nAn integer indicating the depth of shell to exit to. 0 = root shell.", "source": "codesearchnet"}
{"code": "def data_string_compare(db_data, user_data):\n        \n        db_data = ''.join(db_data.split())\n        user_data = ''.join(user_data.split())\n        if operator.eq(db_data, user_data):\n            return True\n        return False", "docstring": "Validate string removing all white space before comparison.\n\nArgs:\ndb_data (str): The data store in Redis.\nuser_data (str): The user provided data.\n\nReturns:\nbool: True if the data passed validation.", "source": "juraj-google-style"}
{"code": "def _ReadEncodedData(self, read_size):\n    encoded_data = self._file_object.read(read_size)\n    read_count = len(encoded_data)\n    self._encoded_data = b''.join([self._encoded_data, encoded_data])\n    (self._decoded_data, self._encoded_data) = self._decoder.Decode(self._encoded_data)\n    self._decoded_data_size = len(self._decoded_data)\n    return read_count", "docstring": "Reads encoded data from the file-like object.\n\nArgs:\nread_size (int): number of bytes of encoded data to read.\n\nReturns:\nint: number of bytes of encoded data read.", "source": "codesearchnet"}
{"code": "def from_string(cls, string, format_=None, fps=None, **kwargs):\n        \n        fp = io.StringIO(string)\n        return cls.from_file(fp, format_, fps=fps, **kwargs)", "docstring": "Load subtitle file from string.\n\nSee :meth:`SSAFile.load()` for full description.\n\nArguments:\nstring (str): Subtitle file in a string. Note that the string\nmust be Unicode (in Python 2).\n\nReturns:\nSSAFile\n\nExample:\n>>> text = '''\n... 1\n... 00:00:00,000 --> 00:00:05,000\n... An example SubRip file.\n... '''\n>>> subs = SSAFile.from_string(text)", "source": "juraj-google-style"}
{"code": "def _get_env(key, default=None, coerce=(lambda x: x), required=False):\n    try:\n        value = os.environ[key]\n    except KeyError:\n        if (required is True):\n            raise RequiredSettingMissing(key)\n        else:\n            return default\n    try:\n        return coerce(value)\n    except Exception:\n        raise CoercianError(key, value, coerce)", "docstring": "Return env var coerced into a type other than string.\n\nThis function extends the standard os.getenv function to enable\nthe coercion of values into data types other than string (all env\nvars are strings by default).\n\nArgs:\nkey: string, the name of the env var to look up\n\nKwargs:\ndefault: the default value to return if the env var does not exist. NB the\ndefault value is **not** coerced, and is assumed to be of the correct type.\ncoerce: a function that is used to coerce the value returned into\nanother type\nrequired: bool, if True, then a RequiredSettingMissing error is raised\nif the env var does not exist.\n\nReturns the env var, passed through the coerce function", "source": "codesearchnet"}
{"code": "def run(self, qobj, backend_options=None):\n    self._set_options(qobj_config=qobj.config, backend_options=backend_options)\n    job_id = str(uuid.uuid4())\n    job = BasicAerJob(self, job_id, self._run_job, qobj)\n    job.submit()\n    return job", "docstring": "Run qobj asynchronously.\n\nArgs:\nqobj (Qobj): payload of the experiment\nbackend_options (dict): backend options\n\nReturns:\nBasicAerJob: derived from BaseJob\n\nAdditional Information:\nbackend_options: Is a dict of options for the backend. It may contain\n* \"initial_statevector\": vector_like\n\nThe \"initial_statevector\" option specifies a custom initial\ninitial statevector for the simulator to be used instead of the all\nzero state. This size of this vector must be correct for the number\nof qubits in all experiments in the qobj.\n\nExample::\n\nbackend_options = {\n\"initial_statevector\": np.array([1, 0, 0, 1j]) / np.sqrt(2),\n}", "source": "codesearchnet"}
{"code": "def draw_point(self, x, y):\n        \n        check_int_err(lib.SDL_RenderDrawPoint(self._ptr, x, y))", "docstring": "Draw a point on the current rendering target.\n\nArgs:\nx (int): The x coordinate of the point.\ny (int): The y coordinate of the point.\n\nRaises:\nSDLError: If an error is encountered.", "source": "juraj-google-style"}
{"code": "def source_required(src_file):\n    \n    if not src_file.exists():\n        return True\n\n    required = True\n    hash_file = src_file.with_suffix(\".hash\", depth=0)\n    LOG.debug(\"Hash file location: %s\", hash_file)\n    if hash_file.exists():\n        new_hash = get_hash_of_dirs(src_file)\n        with open(hash_file, 'r') as h_file:\n            old_hash = h_file.readline()\n        required = not new_hash == old_hash\n        if required:\n            from benchbuild.utils.cmd import rm\n            rm(\"-r\", src_file)\n            rm(hash_file)\n    if required:\n        LOG.info(\"Source required for: %s\", src_file)\n        LOG.debug(\"Reason: src-exists: %s hash-exists: %s\", src_file.exists(),\n                  hash_file.exists())\n    return required", "docstring": "Check, if a download is required.\n\nArgs:\nsrc_file: The filename to check for.\nsrc_root: The path we find the file in.\n\nReturns:\nTrue, if we need to download something, False otherwise.", "source": "juraj-google-style"}
{"code": "def update_thread(cls, session, conversation, thread):\n    data = thread.to_api()\n    data['reload'] = True\n    return cls(('/conversations/%s/threads/%d.json' % (conversation.id, thread.id)), data=data, request_type=RequestPaginator.PUT, singleton=True, session=session)", "docstring": "Update a thread.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nconversation (helpscout.models.Conversation): The conversation\nthat the thread belongs to.\nthread (helpscout.models.Thread): The thread to be updated.\n\nReturns:\nhelpscout.models.Conversation: Conversation including freshly\nupdated thread.", "source": "codesearchnet"}
{"code": "def FromFile(cls, in_path):\n    with open(in_path, 'rb') as infile:\n        in_data = json.load(infile)\n    if (not (('trace', 'selectors') in in_data)):\n        raise ArgumentError('Invalid trace file format', keys=in_data.keys(), expected=('trace', 'selectors'))\n    selectors = [DataStreamSelector.FromString(x) for x in in_data['selectors']]\n    readings = [IOTileReading(x['time'], DataStream.FromString(x['stream']).encode(), x['value'], reading_id=x['reading_id']) for x in in_data['trace']]\n    return SimulationTrace(readings, selectors=selectors)", "docstring": "Load a previously saved ascii representation of this simulation trace.\n\nArgs:\nin_path (str): The path of the input file that we should load.\n\nReturns:\nSimulationTrace: The loaded trace object.", "source": "codesearchnet"}
{"code": "def __init__(self, input_lists, skip_node_names=None, destination_node_name=None):\n    self._input_lists = input_lists\n    self._skip_node_names = skip_node_names\n    self._inputs = []\n    self._visited_nodes = []\n    self._depth_count = 0\n    self._depth_list = []\n    self._destination_node_name = destination_node_name", "docstring": "Constructor of _DFSGraphTracer.\n\nArgs:\ninput_lists: A list of dicts. Each dict is an adjacency (input) map from\nthe recipient node name as the key and the list of input node names\nas the value.\nskip_node_names: Optional: a list of node names to skip tracing.\ndestination_node_name: Optional: destination node name. If not `None`, it\nshould be the name of a destination not as a str and the graph tracing\nwill raise GraphTracingReachedDestination as soon as the node has been\nreached.\n\nRaises:\nGraphTracingReachedDestination: if stop_at_node_name is not None and\nthe specified node is reached.", "source": "github-repos"}
{"code": "def traverse_inorder(self, leaves=True, internal=True):\n        \n        for node in self.root.traverse_inorder(leaves=leaves, internal=internal):\n            yield node", "docstring": "Perform an inorder traversal of the ``Node`` objects in this ``Tree``\n\nArgs:\n``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``", "source": "juraj-google-style"}
{"code": "def wb020(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `wb020`'.format(value))\n\n        self._wb020 = value", "docstring": "Corresponds to IDD Field `wb020`\nWet-bulb temperature corresponding to 02.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `wb020`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def __init__(self, message: str, p: processor.Processor):\n    self._message = message\n    self._p = p\n    self._start = None\n    self._ttft = None\n    self._model_call_event = asyncio.Event()\n    self._model_call_event.clear()", "docstring": "Wraps a processor to provide performance messaging.\n\nShould only be used for processors that consume their entire input before\nproducing output (such as non-streaming or unidirectional/single streaming\nmodel calls). The TTFT is estimated by waiting first that the inputs\nstream is\ncompletely sent to the processor (`start` time is then set). When the\nprocessor outputs its first token, the duration from `start` is then\nreported.\n\nIn a bidirectional streaming setup, the TTFT will not be reported at all.\n\nArgs:\nmessage: header of the status chunk that will be returned. It is used to\nidentify different calls to this function.\np: processor for which we need to compute ttft. self._message = message\nself._p = p self._start = None self._ttft = None self._model_call_event\n= asyncio.Event() self._model_call_event.clear()", "source": "github-repos"}
{"code": "def _ParseFSMState(self, template):\n    if (not template):\n        return\n    state_name = ''\n    for line in template:\n        self._line_num += 1\n        line = line.rstrip()\n        if (line and (not self.comment_regex.match(line))):\n            if ((not self.state_name_re.match(line)) or (len(line) > self.MAX_NAME_LEN) or (line in TextFSMRule.LINE_OP) or (line in TextFSMRule.RECORD_OP)):\n                raise TextFSMTemplateError((\"Invalid state name: '%s'. Line: %s\" % (line, self._line_num)))\n            state_name = line\n            if (state_name in self.states):\n                raise TextFSMTemplateError((\"Duplicate state name: '%s'. Line: %s\" % (line, self._line_num)))\n            self.states[state_name] = []\n            self.state_list.append(state_name)\n            break\n    for line in template:\n        self._line_num += 1\n        line = line.rstrip()\n        if (not line):\n            break\n        if self.comment_regex.match(line):\n            continue\n        if (not (line.startswith('  ^') or line.startswith('\\t^'))):\n            raise TextFSMTemplateError((\"Missing white space or carat ('^') before rule. Line: %s\" % self._line_num))\n        self.states[state_name].append(TextFSMRule(line, self._line_num, self.value_map))\n    return state_name", "docstring": "Extracts State and associated Rules from body of template file.\n\nAfter the Value definitions the remainder of the template is\nstate definitions. The routine is expected to be called iteratively\nuntil no more states remain - indicated by returning None.\n\nThe routine checks that the state names are a well formed string, do\nnot clash with reserved names and are unique.\n\nArgs:\ntemplate: Valid template file after Value definitions\nhave already been read.\n\nReturns:\nName of the state parsed from file. None otherwise.\n\nRaises:\nTextFSMTemplateError: If any state definitions are invalid.", "source": "codesearchnet"}
{"code": "def _embedPayload(slaveaddress, mode, functioncode, payloaddata):\n    _checkSlaveaddress(slaveaddress)\n    _checkMode(mode)\n    _checkFunctioncode(functioncode, None)\n    _checkString(payloaddata, description='payload')\n    firstPart = ((_numToOneByteString(slaveaddress) + _numToOneByteString(functioncode)) + payloaddata)\n    if (mode == MODE_ASCII):\n        request = (((_ASCII_HEADER + _hexencode(firstPart)) + _hexencode(_calculateLrcString(firstPart))) + _ASCII_FOOTER)\n    else:\n        request = (firstPart + _calculateCrcString(firstPart))\n    return request", "docstring": "Build a request from the slaveaddress, the function code and the payload data.\n\nArgs:\n* slaveaddress (int): The address of the slave.\n* mode (str): The modbus protcol mode (MODE_RTU or MODE_ASCII)\n* functioncode (int): The function code for the command to be performed. Can for example be 16 (Write register).\n* payloaddata (str): The byte string to be sent to the slave.\n\nReturns:\nThe built (raw) request string for sending to the slave (including CRC etc).\n\nRaises:\nValueError, TypeError.\n\nThe resulting request has the format:\n* RTU Mode: slaveaddress byte + functioncode byte + payloaddata + CRC (which is two bytes).\n* ASCII Mode: header (:) + slaveaddress (2 characters) + functioncode (2 characters) + payloaddata + LRC (which is two characters) + footer (CRLF)\n\nThe LRC or CRC is calculated from the byte string made up of slaveaddress + functioncode + payloaddata.\nThe header, LRC/CRC, and footer are excluded from the calculation.", "source": "codesearchnet"}
{"code": "def __new__(mcs, classname, bases, class_dict):\n    \n    options = {}\n    required = set()\n    for name, option in class_dict.iteritems():\n      if isinstance(option, _Option):\n        options[name] = option\n        if option.required:\n          required.add(name)\n\n    for name in options:\n      class_dict.pop(name)\n    class_dict[mcs._OPTIONS] = options\n    class_dict[mcs._REQUIRED] = required\n    cls = type.__new__(mcs, classname, bases, class_dict)\n\n    \n    if object not in bases:\n      parent_options = {}\n      \n      for c in reversed(cls.__mro__):\n        if mcs._OPTIONS in c.__dict__:\n          \n          parent_options.update(c.__dict__[mcs._OPTIONS])\n        if mcs._REQUIRED in c.__dict__:\n          required.update(c.__dict__[mcs._REQUIRED])\n      for k, v in parent_options.iteritems():\n        if k not in options:\n          options[k] = v\n    return cls", "docstring": "Creates a _Config class and modifies its class dict.\n\nArgs:\nclassname: name of the class.\nbases: a list of base classes.\nclass_dict: original class dict.\n\nReturns:\nA new _Config class. The modified class will have two fields.\n_options field is a dict from option name to _Option objects.\n_required field is a set of required option names.", "source": "juraj-google-style"}
{"code": "def run_inference(self, batch: Sequence[str], model: _VLLMModelServer, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n    return asyncio.run(self._async_run_inference(batch, model, inference_args))", "docstring": "Runs inferences on a batch of text strings.\n\nArgs:\nbatch: A sequence of examples as text strings.\nmodel: A _VLLMModelServer containing info for connecting to the server.\ninference_args: Any additional arguments for an inference.\n\nReturns:\nAn Iterable of type PredictionResult.", "source": "github-repos"}
{"code": "def __init__(self, scope, parent):\n        \n        CodeEntity.__init__(self, scope, parent)\n        self._si = -1", "docstring": "Constructor for statements.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.", "source": "juraj-google-style"}
{"code": "def showAddColumnDialog(self, triggered):\n    if triggered:\n        dialog = AddAttributesDialog(self)\n        dialog.accepted.connect(self.addColumn)\n        dialog.rejected.connect(self.uncheckButton)\n        dialog.show()", "docstring": "Display the dialog to add a column to the model.\n\nThis method is also a slot.\n\nArgs:\ntriggered (bool): If the corresponding button was\nactivated, the dialog will be created and shown.", "source": "codesearchnet"}
{"code": "def overlapping(self, variant_obj):\n        \n        \n        category = 'snv' if variant_obj['category'] == 'sv' else 'sv'\n\n        query = {\n            '$and': [\n                {'case_id': variant_obj['case_id']},\n                {'category': category},\n                {'hgnc_ids' : { '$in' : variant_obj['hgnc_ids']}}\n            ]\n        }\n\n        sort_key = [('rank_score', pymongo.DESCENDING)]\n        \n        variants = self.variant_collection.find(query).sort(sort_key).limit(30)\n\n        return variants", "docstring": "Return overlapping variants.\n\nLook at the genes that a variant overlaps to.\nThen return all variants that overlap these genes.\n\nIf variant_obj is sv it will return the overlapping snvs and oposite\nThere is a problem when SVs are huge since there are to many overlapping variants.\n\nArgs:\nvariant_obj(dict)\n\nReturns:\nvariants(iterable(dict))", "source": "juraj-google-style"}
{"code": "def copy(self, texture, source_rect=None, dest_rect=None, rotation=0, center=None, flip=lib.SDL_FLIP_NONE):\n    if (source_rect == None):\n        source_rect_ptr = ffi.NULL\n    else:\n        source_rect_ptr = source_rect._ptr\n    if (dest_rect == None):\n        dest_rect_ptr = ffi.NULL\n    else:\n        dest_rect_ptr = dest_rect._ptr\n    if (center == None):\n        center_ptr = ffi.NULL\n    else:\n        center_ptr = center._ptr\n    check_int_err(lib.SDL_RenderCopyEx(self._ptr, texture._ptr, source_rect_ptr, dest_rect_ptr, rotation, center_ptr, flip))", "docstring": "Copy a portion of the source texture to the current rendering target, rotating it by angle around the given center.\n\nArgs:\ntexture (Texture): The source texture.\nsource_rect (Rect): The source rectangle, or None for the entire texture.\ndest_rect (Rect): The destination rectangle, or None for the entire rendering target.\nrotation (float): An angle in degrees that indicates the rotation that will be applied to dest_rect.\ncenter (Point): The point around which dest_rect will be rotated (if None, rotation will be done around\ndest_rect.w/2, dest_rect.h/2).\nflip (int): A value stating which flipping actions should be performed on the texture.\n\nRaises:\nSDLError: If an error is encountered.", "source": "codesearchnet"}
{"code": "def add_field(self, fieldname, fieldspec=whoosh_module_fields.TEXT):\n    \n    self._whoosh.add_field(fieldname, fieldspec)\n    return self._whoosh.schema", "docstring": "Add a field in the index of the model.\n\nArgs:\nfieldname (Text): This parameters register a new field in specified model.\nfieldspec (Name, optional): This option adds various options as were described before.\n\nReturns:\nTYPE: The new schema after deleted is returned.", "source": "juraj-google-style"}
{"code": "def memory_write64(self, addr, data, zone=None):\n    words = []\n    bitmask = 4294967295\n    for long_word in data:\n        words.append((long_word & bitmask))\n        words.append(((long_word >> 32) & bitmask))\n    return self.memory_write32(addr, words, zone=zone)", "docstring": "Writes long words to memory of a target system.\n\nNote:\nThis is little-endian.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): start address to write to\ndata (list): list of long words to write\nzone (str): optional memory zone to access\n\nReturns:\nNumber of long words written to target.\n\nRaises:\nJLinkException: on memory access error.", "source": "codesearchnet"}
{"code": "def parse_files(self):\n    log_re = self.log_format_regex\n    log_lines = []\n    for log_file in self.matching_files():\n        with open(log_file) as f:\n            matches = re.finditer(log_re, f.read())\n            for match in matches:\n                log_lines.append(match.groupdict())\n    return log_lines", "docstring": "Find the files and parse them.\n\nReturns:\nlist: list of dictionaries (one for each parsed line).", "source": "codesearchnet"}
{"code": "def _validate_dump_with_graphs(self, device_name):\n    if not self._debug_graphs:\n        raise LookupError('No partition graphs loaded for device %s' % device_name)\n    debug_graph = self._debug_graphs[device_name]\n    for datum in self._dump_tensor_data[device_name]:\n        if datum.node_name not in debug_graph.node_inputs:\n            raise ValueError(\"Node name '%s' is not found in partition graphs of device %s.\" % (datum.node_name, device_name))\n    pending_inputs = {}\n    for node in debug_graph.node_inputs:\n        pending_inputs[node] = []\n        inputs = debug_graph.node_inputs[node]\n        for inp in inputs:\n            inp_node = debug_graphs.get_node_name(inp)\n            inp_output_slot = debug_graphs.get_output_slot(inp)\n            if inp_node in self._debug_watches[device_name] and inp_output_slot in self._debug_watches[device_name][inp_node] and (debug_graph.node_op_types.get(inp) not in ('Enter', 'NextIteration')) and ((inp_node, inp_output_slot) not in pending_inputs[node]):\n                pending_inputs[node].append((inp_node, inp_output_slot))\n    for i, datum in enumerate(self._dump_tensor_data[device_name]):\n        node = datum.node_name\n        slot = datum.output_slot\n        if not self._satisfied_at_timestamp(device_name, pending_inputs[node], datum.timestamp, start_i=i + 1):\n            raise ValueError('Causality violated in timing relations of debug dumps: %s (%d): these input(s) are not satisfied: %s' % (node, datum.timestamp, repr(pending_inputs[node])))\n        recipients = debug_graph.node_recipients[node]\n        for recipient in recipients:\n            recipient_pending_inputs = pending_inputs[recipient]\n            if (node, slot) in recipient_pending_inputs:\n                if self.node_op_type(recipient) == 'Merge':\n                    del recipient_pending_inputs[:]\n                else:\n                    del recipient_pending_inputs[recipient_pending_inputs.index((node, slot))]", "docstring": "Validate the dumped tensor data against the partition graphs.\n\nOnly the watched nodes are validated by this method, because tfdbg allows\nclients to watch only a subset of the nodes.\n\nArgs:\ndevice_name: (`str`) device name.\n\nRaises:\nLookupError: If the partition graphs have not been loaded yet.\nValueError: If dumps contain node names not found in partition graph.\nOr if the temporal order of the dump's timestamps violate the\ninput relations on the partition graphs.", "source": "github-repos"}
{"code": "def load_folder_files(folder_path, recursive=True):\n    \n    if isinstance(folder_path, (list, set)):\n        files = []\n        for path in set(folder_path):\n            files.extend(load_folder_files(path, recursive))\n\n        return files\n\n    if not os.path.exists(folder_path):\n        return []\n\n    file_list = []\n\n    for dirpath, dirnames, filenames in os.walk(folder_path):\n        filenames_list = []\n\n        for filename in filenames:\n            if not filename.endswith(('.yml', '.yaml', '.json')):\n                continue\n\n            filenames_list.append(filename)\n\n        for filename in filenames_list:\n            file_path = os.path.join(dirpath, filename)\n            file_list.append(file_path)\n\n        if not recursive:\n            break\n\n    return file_list", "docstring": "load folder path, return all files endswith yml/yaml/json in list.\n\nArgs:\nfolder_path (str): specified folder path to load\nrecursive (bool): load files recursively if True\n\nReturns:\nlist: files endswith yml/yaml/json", "source": "juraj-google-style"}
{"code": "def assert_equal_flattened(self, expected_results, actual_results):\n    self.assertEqual(len(expected_results), len(actual_results))\n    for i, expected_result in enumerate(expected_results):\n        final_result = []\n        actual_result = actual_results[i]\n        for val in actual_result:\n            final_result.extend(val.numpy())\n        self.assertAllEqual(expected_result, final_result)", "docstring": "Asserts that flattened results are equal.\n\nDue to the number of replicas in the strategy, the output may have a\ndifferent structure and needs to be flattened for comparison.\n\nArgs:\nexpected_results: The results expected as a result of a computation.\nactual_results: The actual results of a computation.", "source": "github-repos"}
{"code": "def _compute_nfps_uniform(cum_counts, sizes):\n    nfps = np.zeros((len(sizes), len(sizes)))\n    for l in range(len(sizes)):\n        for u in range(l, len(sizes)):\n            nfps[(l, u)] = _compute_nfp_uniform(l, u, cum_counts, sizes)\n    return nfps", "docstring": "Computes the matrix of expected false positives for all possible\nsub-intervals of the complete domain of set sizes, assuming uniform\ndistribution of set_sizes within each sub-intervals.\n\nArgs:\ncum_counts: the complete cummulative distribution of set sizes.\nsizes: the complete domain of set sizes.\n\nReturn (np.array): the 2-D array of expected number of false positives\nfor every pair of [l, u] interval, where l is axis-0 and u is\naxis-1.", "source": "codesearchnet"}
{"code": "def AsyncPopenArgs(self):\n    args = {}\n    if self.operating_system == OperatingSystem.WINDOWS:\n        args['close_fds'] = True\n        detached_process = 8\n        create_new_process_group = 512\n        args['creationflags'] = detached_process | create_new_process_group\n    else:\n        args['preexec_fn'] = os.setsid\n        args['close_fds'] = True\n        args['stdin'] = subprocess.PIPE\n        args['stdout'] = subprocess.PIPE\n        args['stderr'] = subprocess.PIPE\n    return args", "docstring": "Returns the args for spawning an async process using Popen on this OS.\n\nMake sure the main process does not wait for the new process. On windows\nthis means setting the 0x8 creation flag to detach the process.\n\nKilling a group leader kills the whole group. Setting creation flag 0x200 on\nWindows or running setsid on *nix makes sure the new process is in a new\nsession with the new process the group leader. This means it can't be killed\nif the parent is killed.\n\nFinally, all file descriptors (FD) need to be closed so that waiting for the\noutput of the main process does not inadvertently wait for the output of the\nnew process, which means waiting for the termination of the new process.\nIf the new process wants to write to a file, it can open new FDs.\n\nReturns:\n{str:}, The args for spawning an async process using Popen on this OS.", "source": "github-repos"}
{"code": "def get_metadata(feature_name, etextno):\n    metadata_values = MetadataExtractor.get(feature_name).get_metadata(etextno)\n    return frozenset(metadata_values)", "docstring": "Looks up the value of a meta-data feature for a given text.\n\nArguments:\nfeature_name (str): The name of the meta-data to look up.\netextno (int): The identifier of the Gutenberg text for which to look\nup the meta-data.\n\nReturns:\nfrozenset: The values of the meta-data for the text or an empty set if\nthe text does not have meta-data associated with the feature.\n\nRaises:\nUnsupportedFeature: If there is no MetadataExtractor registered that\ncan extract meta-data for the given feature name.", "source": "codesearchnet"}
{"code": "def __init__(self, data, entities=None, categories=None):\n        \n        self.data = data\n        if entities is None:\n            entities = self.default_entities()\n        self.entities = entities\n        if categories is None:\n            categories = []\n        self.categories = categories\n\n        self.validate()", "docstring": "Initialization method.\n\nArgs:\ndata (list of list of int/float): 2-dim array.\nentities (list): list of entities.\ncategories (list): list of the categories (one per entity).", "source": "juraj-google-style"}
{"code": "def AddArguments(cls, argument_group):\n    \n    argument_group.add_argument(\n        '--preferred_year', '--preferred-year', dest='preferred_year',\n        type=int, action='store', default=None, metavar='YEAR', help=(\n            'When a format\\'s timestamp does not include a year, e.g. '\n            'syslog, use this as the initial year instead of attempting '\n            'auto-detection.'))\n\n    argument_group.add_argument(\n        '--process_archives', '--process-archives', dest='process_archives',\n        action='store_true', default=False, help=(\n            'Process file entries embedded within archive files, such as '\n            'archive.tar and archive.zip. This can make processing '\n            'significantly slower.'))\n\n    argument_group.add_argument(\n        '--skip_compressed_streams', '--skip-compressed-streams',\n        dest='process_compressed_streams', action='store_false', default=True,\n        help=(\n            'Skip processing file content within compressed streams, such as '\n            'syslog.gz and syslog.bz2.'))", "docstring": "Adds command line arguments to an argument group.\n\nThis function takes an argument parser or an argument group object and adds\nto it all the command line arguments this helper supports.\n\nArgs:\nargument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\nargparse group.", "source": "juraj-google-style"}
{"code": "def add_to_collection(name, value) -> None:\n    get_default_graph().add_to_collection(name, value)", "docstring": "Wrapper for `Graph.add_to_collection()` using the default graph.\n\nSee `tf.Graph.add_to_collection`\nfor more details.\n\nArgs:\nname: The key for the collection. For example, the `GraphKeys` class\ncontains many standard names for collections.\nvalue: The value to add to the collection.\n\n@compatibility(eager)\nCollections are only supported in eager when variables are created inside\nan EagerVariableStore (e.g. as part of a layer or template).\n@end_compatibility", "source": "github-repos"}
{"code": "def get_resize_output_image_size(input_image: ImageInput, size: Union[int, Tuple[int, int], List[int], Tuple[int]], patch_size: Union[int, Tuple[int, int], List[int], Tuple[int]], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> tuple:\n    max_height, max_width = size if isinstance(size, (tuple, list)) else (size, size)\n    patch_height, patch_width = patch_size if isinstance(patch_size, (tuple, list)) else (patch_size, patch_size)\n    height, width = get_image_size(input_image, input_data_format)\n    ratio = max(height / max_height, width / max_width)\n    if ratio > 1:\n        height = int(math.floor(height / ratio))\n        width = int(math.floor(width / ratio))\n    num_height_tokens, num_width_tokens = _num_image_tokens((height, width), (patch_height, patch_width))\n    return (num_height_tokens * patch_height, num_width_tokens * patch_width)", "docstring": "Find the target (height, width) dimension of the output image after resizing given the input image and the desired\nsize.\n\nArgs:\ninput_image (`ImageInput`):\nThe image to resize.\nsize (`int` or `Tuple[int, int]`):\nMax image size an input image can be. Must be a dictionary with the key \"longest_edge\".\npatch_size (`int` or `Tuple[int, int]`):\nThe patch_size as `(height, width)` to use for resizing the image. If patch_size is an integer, `(patch_size, patch_size)`\nwill be used\ninput_data_format (`ChannelDimension`, *optional*):\nThe channel dimension format of the input image. If unset, will use the inferred format from the input.\n\nReturns:\n`tuple`: The target (height, width) dimension of the output image after resizing.", "source": "github-repos"}
{"code": "def remove_user(self, group, username):\n    try:\n        self.lookup_id(group)\n    except ldap_tools.exceptions.InvalidResult as err:\n        raise err from None\n    operation = {'memberUid': [(ldap3.MODIFY_DELETE, [username])]}\n    self.client.modify(self.__distinguished_name(group), operation)", "docstring": "Remove a user from the specified LDAP group.\n\nArgs:\ngroup: Name of group to update\nusername: Username of user to remove\n\nRaises:\nldap_tools.exceptions.InvalidResult:\nResults of the query were invalid.  The actual exception raised\ninherits from InvalidResult.  See #lookup_id for more info.", "source": "codesearchnet"}
{"code": "def get_legacy_output_shapes(dataset_or_iterator):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), get_structure(dataset_or_iterator))", "docstring": "Returns the output shapes for elements of the input dataset / iterator.\n\nArgs:\ndataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.\n\nReturns:\nA (nested) structure of `tf.TensorShape` objects matching the structure of\nthe dataset / iterator elements and specifying the shape of the individual\ncomponents.\n\n@compatibility(TF2)\nThis is a legacy API for inspecting the type signature of dataset elements. In\nTF 2, you should use the `tf.data.Dataset.element_spec` attribute instead.\n@end_compatibility", "source": "github-repos"}
{"code": "def get_config(self, obj):\n    try:\n        shared_object_config = self._shared_objects_config[obj]\n    except (TypeError, KeyError):\n        return None\n    shared_object_config.increment_ref_count()\n    return shared_object_config", "docstring": "Gets a `SharedObjectConfig` if one has already been seen for `obj`.\n\nArgs:\nobj: The object for which to retrieve the `SharedObjectConfig`.\n\nReturns:\nThe SharedObjectConfig for a given object, if already seen. Else,\n`None`.", "source": "github-repos"}
{"code": "def update_power_state(self, id_or_uri, power_state):\n        \n        uri = self._client.build_uri(id_or_uri) + \"/powerState\"\n        return self._client.update(power_state, uri)", "docstring": "Sets the power state of the specified power delivery device. The device must be an HP Intelligent Outlet.\n\nArgs:\nid_or_uri:\nCan be either the power device id or the uri\npower_state:\n{\"powerState\":\"On|Off\"}\n\nReturns:\nstr: The power state", "source": "juraj-google-style"}
{"code": "def copy_entities(self, from_namespace, from_workspace, etype, enames):\n    r = fapi.copy_entities(from_namespace, from_workspace, self.namespace, self.name, etype, enames, self.api_url)\n    fapi._check_response_code(r, 201)", "docstring": "Copy entities from another workspace.\n\nArgs:\nfrom_namespace (str): Source workspace namespace\nfrom_workspace (str): Source workspace name\netype (str): Entity type\nenames (list(str)): List of entity names to copy", "source": "codesearchnet"}
{"code": "def sum_rightmost_ndims_preserving_shape(x, ndims):\n    x = tf.convert_to_tensor(value=x)\n    if (x.shape.ndims is not None):\n        axes = tf.range((x.shape.ndims - ndims), x.shape.ndims)\n    else:\n        axes = tf.range((tf.rank(x) - ndims), tf.rank(x))\n    return tf.reduce_sum(input_tensor=x, axis=axes)", "docstring": "Return `Tensor` with right-most ndims summed.\n\nArgs:\nx: the `Tensor` whose right-most `ndims` dimensions to sum\nndims: number of right-most dimensions to sum.\n\nReturns:\nA `Tensor` resulting from calling `reduce_sum` on the `ndims` right-most\ndimensions. If the shape of `x` is statically known, the result will also\nhave statically known shape. Otherwise, the resulting shape will only be\nknown at runtime.", "source": "codesearchnet"}
{"code": "def bloch_vector_of(self, qubit: ops.Qid) -> np.ndarray:\n    return bloch_vector_from_state_vector(self.state_vector(), self.qubit_map[qubit])", "docstring": "Returns the bloch vector of a qubit in the state.\n\nCalculates the bloch vector of the given qubit\nin the state given by self.state_vector(), given that\nself.state_vector() follows the standard Kronecker convention of\nnumpy.kron.\n\nArgs:\nqubit: qubit who's bloch vector we want to find.\n\nReturns:\nA length 3 numpy array representing the qubit's bloch vector.\n\nRaises:\nValueError: if the size of the state represents more than 25 qubits.\nIndexError: if index is out of range for the number of qubits\ncorresponding to the state.", "source": "codesearchnet"}
{"code": "class ViltProcessor(ProcessorMixin):\n    attributes = ['image_processor', 'tokenizer']\n    image_processor_class = 'ViltImageProcessor'\n    tokenizer_class = ('BertTokenizer', 'BertTokenizerFast')\n\n    def __init__(self, image_processor=None, tokenizer=None, **kwargs):\n        feature_extractor = None\n        if 'feature_extractor' in kwargs:\n            warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning)\n            feature_extractor = kwargs.pop('feature_extractor')\n        image_processor = image_processor if image_processor is not None else feature_extractor\n        if image_processor is None:\n            raise ValueError('You need to specify an `image_processor`.')\n        if tokenizer is None:\n            raise ValueError('You need to specify a `tokenizer`.')\n        super().__init__(image_processor, tokenizer)\n        self.current_processor = self.image_processor\n\n    def __call__(self, images, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchEncoding:\n        \n        encoding = self.tokenizer(text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs)\n        encoding_image_processor = self.image_processor(images, return_tensors=return_tensors)\n        encoding.update(encoding_image_processor)\n        return encoding\n\n    def batch_decode(self, *args, **kwargs):\n        \n        return self.tokenizer.batch_decode(*args, **kwargs)\n\n    def decode(self, *args, **kwargs):\n        \n        return self.tokenizer.decode(*args, **kwargs)\n\n    @property\n    def model_input_names(self):\n        tokenizer_input_names = self.tokenizer.model_input_names\n        image_processor_input_names = self.image_processor.model_input_names\n        return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))\n\n    @property\n    def feature_extractor_class(self):\n        warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning)\n        return self.image_processor_class\n\n    @property\n    def feature_extractor(self):\n        warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning)\n        return self.image_processor", "docstring": "Constructs a ViLT processor which wraps a BERT tokenizer and ViLT image processor into a single processor.\n\n[`ViltProcessor`] offers all the functionalities of [`ViltImageProcessor`] and [`BertTokenizerFast`]. See the\ndocstring of [`~ViltProcessor.__call__`] and [`~ViltProcessor.decode`] for more information.\n\nArgs:\nimage_processor (`ViltImageProcessor`, *optional*):\nAn instance of [`ViltImageProcessor`]. The image processor is a required input.\ntokenizer (`BertTokenizerFast`, *optional*):\nAn instance of ['BertTokenizerFast`]. The tokenizer is a required input.", "source": "github-repos"}
{"code": "async def get_user_groups(request):\n    acl_callback = request.get(GROUPS_KEY)\n    if (acl_callback is None):\n        raise RuntimeError('acl_middleware not installed')\n    user_id = (await get_auth(request))\n    groups = (await acl_callback(user_id))\n    if (groups is None):\n        return None\n    user_groups = ((Group.AuthenticatedUser, user_id) if (user_id is not None) else ())\n    return set(itertools.chain(groups, (Group.Everyone,), user_groups))", "docstring": "Returns the groups that the user in this request has access to.\n\nThis function gets the user id from the auth.get_auth function, and passes\nit to the ACL callback function to get the groups.\n\nArgs:\nrequest: aiohttp Request object\n\nReturns:\nIf the ACL callback function returns None, this function returns None.\nOtherwise this function returns the sequence of group permissions\nprovided by the callback, plus the Everyone group. If user_id is not\nNone, the AuthnticatedUser group and the user_id are added to the\ngroups returned by the function\n\nRaises:\nRuntimeError: If the ACL middleware is not installed", "source": "codesearchnet"}
{"code": "def FVDEVolumeOpen(fvde_volume, path_spec, file_object, key_chain):\n    encrypted_root_plist = key_chain.GetCredential(path_spec, 'encrypted_root_plist')\n    if encrypted_root_plist:\n        fvde_volume.read_encrypted_root_plist(encrypted_root_plist)\n    password = key_chain.GetCredential(path_spec, 'password')\n    if password:\n        fvde_volume.set_password(password)\n    recovery_password = key_chain.GetCredential(path_spec, 'recovery_password')\n    if recovery_password:\n        fvde_volume.set_recovery_password(recovery_password)\n    fvde_volume.open_file_object(file_object)", "docstring": "Opens the FVDE volume using the path specification.\n\nArgs:\nfvde_volume (pyfvde.volume): FVDE volume.\npath_spec (PathSpec): path specification.\nfile_object (FileIO): file-like object.\nkey_chain (KeyChain): key chain.", "source": "codesearchnet"}
{"code": "def delete_entity(self, etype, entity_id):\n    r = fapi.delete_entity(self.namespace, self.name, etype, entity_id, self.api_url)\n    fapi._check_response_code(r, 202)", "docstring": "Delete an entity in this workspace.\n\nArgs:\netype (str): Entity type\nentity_id (str): Entity name/unique id", "source": "codesearchnet"}
{"code": "def get_best_electronegativity_anonymous_mapping(self, struct1, struct2):\n    (struct1, struct2) = self._process_species([struct1, struct2])\n    (struct1, struct2, fu, s1_supercell) = self._preprocess(struct1, struct2)\n    matches = self._anonymous_match(struct1, struct2, fu, s1_supercell, use_rms=True, break_on_match=True)\n    if matches:\n        min_X_diff = np.inf\n        for m in matches:\n            X_diff = 0\n            for (k, v) in m[0].items():\n                X_diff += (struct1.composition[k] * ((k.X - v.X) ** 2))\n            if (X_diff < min_X_diff):\n                min_X_diff = X_diff\n                best = m[0]\n        return best", "docstring": "Performs an anonymous fitting, which allows distinct species in one\nstructure to map to another. E.g., to compare if the Li2O and Na2O\nstructures are similar. If multiple substitutions are within tolerance\nthis will return the one which minimizes the difference in\nelectronegativity between the matches species.\n\nArgs:\nstruct1 (Structure): 1st structure\nstruct2 (Structure): 2nd structure\n\nReturns:\nmin_mapping (Dict): Mapping of struct1 species to struct2 species", "source": "codesearchnet"}
{"code": "def get_file_list(wildcard):\n    \n    files = glob.glob(os.path.expanduser(wildcard))\n    return files", "docstring": "Search for files to be concatenated. Currently very basic, but could\nexpand to be more sophisticated.\n\nArgs:\nwildcard (regular expression string)\n\nReturns:\nfiles (list of full file paths)", "source": "juraj-google-style"}
{"code": "def constant(value, delay=None):\n\n    @asyncio.coroutine\n    def coro():\n        if delay:\n            (yield from asyncio.sleep(delay))\n        return value\n    return coro", "docstring": "Returns a coroutine function that when called, always returns\nthe provided value.\n\nThis function has an alias: `paco.identity`.\n\nArguments:\nvalue (mixed): value to constantly return when coroutine is called.\ndelay (int/float): optional return value delay in seconds.\n\nReturns:\ncoroutinefunction\n\nUsage::\n\ncoro = paco.constant('foo')\n\nawait coro()\n# => 'foo'\nawait coro()\n# => 'foo'", "source": "codesearchnet"}
{"code": "def hflip(img):\n    \n    if not _is_pil_image(img):\n        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n    return img.transpose(Image.FLIP_LEFT_RIGHT)", "docstring": "Horizontally flip the given PIL Image.\n\nArgs:\nimg (PIL Image): Image to be flipped.\n\nReturns:\nPIL Image:  Horizontall flipped image.", "source": "juraj-google-style"}
{"code": "def _CreateIndexIfNotExists(self, index_name, mappings):\n    try:\n        if (not self._client.indices.exists(index_name)):\n            self._client.indices.create(body={'mappings': mappings}, index=index_name)\n    except elasticsearch.exceptions.ConnectionError as exception:\n        raise RuntimeError('Unable to create Elasticsearch index with error: {0!s}'.format(exception))", "docstring": "Creates an Elasticsearch index if it does not exist.\n\nArgs:\nindex_name (str): mame of the index.\nmappings (dict[str, object]): mappings of the index.\n\nRaises:\nRuntimeError: if the Elasticsearch index cannot be created.", "source": "codesearchnet"}
{"code": "def format_comment(comment_data):\n    \n    format_pieces = []\n    \n    if 'line' in comment_data:\n        format_pieces.append('line {line}')\n    if 'column' in comment_data:\n        if format_pieces:\n            format_pieces.append(', ')\n        format_pieces.append('col {column}')\n    if format_pieces:\n        format_pieces.append(': ')\n\n    \n    if 'severity' in comment_data:\n        format_pieces.append('{severity}: ')\n\n    if 'message_id' in comment_data:\n        format_pieces.append('[{message_id}]: ')\n\n    \n    if 'message' in comment_data:\n        format_pieces.append('{message}')\n\n    return ''.join(format_pieces).format(**comment_data)", "docstring": "Formats the data returned by the linters.\n\nGiven a dictionary with the fields: line, column, severity, message_id,\nmessage, will generate a message like:\n\n'line {line}, col {column}: {severity}: [{message_id}]: {message}'\n\nAny of the fields may nbe absent.\n\nArgs:\ncomment_data: dictionary with the linter data.\n\nReturns:\na string with the formatted message.", "source": "juraj-google-style"}
{"code": "def tags_all(self):\n        \n        if 'tags' not in self.database.collection_names():\n            print 'Warning: Searching on non-existance tags collection'\n            return None\n\n        cursor = self.database['tags'].find({}, {'_id':0, 'md5':1, 'tags':1})\n        return [item for item in cursor]", "docstring": "List of the tags and md5s for all samples\nArgs:\nNone\n\nReturns:\nList of the tags and md5s for all samples", "source": "juraj-google-style"}
{"code": "def AddTableColumns(self, table, columns):\n    table_columns = self._table_columns.setdefault(table, [])\n    for attr in columns:\n        if (attr not in table_columns):\n            table_columns.append(attr)", "docstring": "Add columns to table if they are not already there.\n\nArgs:\ntable: table name as a string\ncolumns: an iterable of column names", "source": "codesearchnet"}
{"code": "def collect_previous_mask(input_tensors):\n\n    def _collect_previous_mask(x):\n        return getattr(x, '_keras_mask', None)\n    return nest.map_structure(_collect_previous_mask, input_tensors)", "docstring": "Retrieves the output mask(s) of the previous node.\n\nArgs:\ninput_tensors: An arbitrary structure of Tensors.\n\nReturns:\nA mask tensor or list of mask tensors.", "source": "github-repos"}
{"code": "def generate_algebra_simplify_sample(vlist, ops, min_depth, max_depth):\n    depth = random.randrange(min_depth, (max_depth + 1))\n    expr = random_expr(depth, vlist, ops)\n    sample = str(expr)\n    target = format_sympy_expr(sympy.simplify(sample))\n    return (sample, target)", "docstring": "Randomly generate an algebra simplify dataset sample.\n\nGiven an input expression, produce the simplified expression.\n\nArgs:\nvlist: Variable list. List of chars that can be used in the expression.\nops: List of ExprOp instances. The allowed operators for the expression.\nmin_depth: Expression trees will not have a smaller depth than this. 0 means\nthere is just a variable. 1 means there is one operation.\nmax_depth: Expression trees will not have a larger depth than this. To make\nall trees have the same depth, set this equal to `min_depth`.\n\nReturns:\nsample: String representation of the input.\ntarget: String representation of the solution.", "source": "codesearchnet"}
{"code": "def has_all_nonzero_segment_lengths(neuron, threshold=0.0):\n    \n    bad_ids = []\n    for sec in _nf.iter_sections(neuron):\n        p = sec.points\n        for i, s in enumerate(zip(p[:-1], p[1:])):\n            if segment_length(s) <= threshold:\n                bad_ids.append((sec.id, i))\n\n    return CheckResult(len(bad_ids) == 0, bad_ids)", "docstring": "Check presence of neuron segments with length not above threshold\n\nArguments:\nneuron(Neuron): The neuron object to test\nthreshold(float): value above which a segment length is considered to\nbe non-zero\n\nReturns:\nCheckResult with result including list of (section_id, segment_id)\nof zero length segments", "source": "juraj-google-style"}
{"code": "def scale(self):\n    if (self.type not in {EventType.GESTURE_PINCH_BEGIN, EventType.GESTURE_PINCH_UPDATE, EventType.GESTURE_PINCH_END}):\n        raise AttributeError(_wrong_prop.format(self.type))\n    return self._libinput.libinput_event_gesture_get_scale(self._handle)", "docstring": "The absolute scale of a pinch gesture, the scale is\nthe division of the current distance between the fingers and\nthe distance at the start of the gesture.\n\nThe scale begins at 1.0, and if e.g. the fingers moved together by\n50% then the scale will become 0.5, if they move twice as far apart\nas initially the scale becomes 2.0, etc.\n\nFor gesture events that are of type\n:attr:`~libinput.constant.EventType.GESTURE_PINCH_BEGIN`, this property\nreturns 1.0.\n\nFor gesture events that are of type\n:attr:`~libinput.constant.EventType.GESTURE_PINCH_END`, this property\nreturns the scale value of the most recent\n:attr:`~libinput.constant.EventType.GESTURE_PINCH_UPDATE` event\n(if any) or 1.0 otherwise.\n\nFor all other events this property raises :exc:`AttributeError`.\n\nReturns:\nfloat: The absolute scale of a pinch gesture.\nRaises:\nAttributeError", "source": "codesearchnet"}
{"code": "def CheckCondition(condition, check_object):\n    try:\n        of = objectfilter.Parser(condition).Parse()\n        compiled_filter = of.Compile(objectfilter.BaseFilterImplementation)\n        return compiled_filter.Matches(check_object)\n    except objectfilter.Error as e:\n        raise ConditionError(e)", "docstring": "Check if a condition matches an object.\n\nArgs:\ncondition: A string condition e.g. \"os == 'Windows'\"\ncheck_object: Object to validate, e.g. an rdf_client.KnowledgeBase()\n\nReturns:\nTrue or False depending on whether the condition matches.\n\nRaises:\nConditionError: If condition is bad.", "source": "codesearchnet"}
{"code": "def scale_streaming_endpoint(access_token, streaming_endpoint_id, scale_units):\n    path = '/StreamingEndpoints'\n    full_path = ''.join([path, \"('\", streaming_endpoint_id, \"')\", '/Scale'])\n    full_path_encoded = urllib.parse.quote(full_path, safe='')\n    endpoint = ''.join([ams_rest_endpoint, full_path_encoded])\n    body = (('{\"scaleUnits\": \"' + str(scale_units)) + '\"}')\n    return do_ams_post(endpoint, full_path_encoded, body, access_token)", "docstring": "Scale Media Service Streaming Endpoint.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nstreaming_endpoint_id (str): A Media Service Streaming Endpoint ID.\nscale_units (str): A Media Service Scale Units Number.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def movies_in_theaters(self, **kwargs):\n        \n        path = self._get_path('movies_in_theaters')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Gets the movies currently in theaters from the API.\n\nArgs:\npage_limit (optional): number of movies to show per page, default=16\npage (optional): results page number, default=1\ncountry (optional): localized data for selected country, default=\"us\"\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def _show_defined_functions(saved_model_dir, meta_graphs):\n    has_object_graph_def = False\n    for meta_graph_def in meta_graphs:\n        has_object_graph_def |= meta_graph_def.HasField('object_graph_def')\n    if not has_object_graph_def:\n        return\n    print('\\nConcrete Functions:', end='')\n    try:\n        with ops_lib.Graph().as_default():\n            trackable_object = load.load(saved_model_dir, options=load_options.LoadOptions(experimental_skip_checkpoint=True))\n    except Exception as e:\n        if 'Op type not registered' in str(e):\n            error = 'the existence of custom ops in the SavedModel'\n        else:\n            error = 'unknown reasons'\n        print(f' N/A (could not be listed due to {error})')\n        return\n    children = list(save._AugmentedGraphView(trackable_object).list_children(trackable_object))\n    children = sorted(children, key=lambda x: x.name)\n    for name, child in children:\n        concrete_functions = []\n        if isinstance(child, defun.ConcreteFunction):\n            concrete_functions.append(child)\n        elif isinstance(child, def_function.Function):\n            concrete_functions.extend(child._list_all_concrete_functions_for_serialization())\n        else:\n            continue\n        print(\"\\n  Function Name: '%s'\" % name)\n        concrete_functions = sorted(concrete_functions, key=lambda x: x.name)\n        for index, concrete_function in enumerate(concrete_functions, 1):\n            args, kwargs = (None, None)\n            if concrete_function.structured_input_signature:\n                args, kwargs = concrete_function.structured_input_signature\n            elif concrete_function._arg_keywords:\n                args = concrete_function._arg_keywords\n            if args:\n                print('    Option \n                print('      Callable with:')\n                _print_args(args, indent=4)\n            if kwargs:\n                _print_args(kwargs, 'Named Argument', indent=4)", "docstring": "Prints the callable concrete and polymorphic functions of the Saved Model.\n\nArgs:\nsaved_model_dir: Directory containing the SavedModel to inspect.\nmeta_graphs: Already-extracted MetaGraphDef of the SavedModel.", "source": "github-repos"}
{"code": "def downsampled_mesh(self, step):\n        \n        from lace.mesh import Mesh\n\n        if self.f is not None:\n            raise ValueError(\n                'Function `downsampled_mesh` does not support faces.')\n\n        low = Mesh()\n        if self.v is not None:\n            low.v = self.v[::step]\n        if self.vc is not None:\n            low.vc = self.vc[::step]\n        return low", "docstring": "Returns a downsampled copy of this mesh.\n\nArgs:\nstep: the step size for the sampling\n\nReturns:\na new, downsampled Mesh object.\n\nRaises:\nValueError if this Mesh has faces.", "source": "juraj-google-style"}
{"code": "def embedded_tweet(self):\n    embedded_tweet = tweet_embeds.get_embedded_tweet(self)\n    if (embedded_tweet is not None):\n        try:\n            return Tweet(embedded_tweet)\n        except NotATweetError as nate:\n            raise NotATweetError(('The embedded tweet payload {} appears malformed.' + \" Failed with '{}'\".format(embedded_tweet, nate)))\n    else:\n        return None", "docstring": "Get the retweeted Tweet OR the quoted Tweet and return it as a Tweet object\n\nReturns:\nTweet (or None, if the Tweet is neither a quote tweet or a Retweet):\na Tweet representing the quote Tweet or the Retweet\n(see tweet_embeds.get_embedded_tweet, this is that value as a Tweet)\n\nRaises:\nNotATweetError: if embedded tweet is malformed", "source": "codesearchnet"}
{"code": "def split_last_dimension(x, n):\n    x_shape = common_layers.shape_list(x)\n    m = x_shape[(- 1)]\n    if (isinstance(m, int) and isinstance(n, int)):\n        assert ((m % n) == 0)\n    return tf.reshape(x, (x_shape[:(- 1)] + [n, (m", "docstring": "Reshape x so that the last dimension becomes two dimensions.\n\nThe first of these two dimensions is n.\n\nArgs:\nx: a Tensor with shape [..., m]\nn: an integer.\n\nReturns:\na Tensor with shape [..., n, m/n]", "source": "codesearchnet"}
{"code": "def handle(*codes, **kwargs):\n        \n        regToken = kwargs.get(\"regToken\", False)\n        subscribe = kwargs.get(\"subscribe\")\n\n        def decorator(fn):\n            @functools.wraps(fn)\n            def wrapper(self, *args, **kwargs):\n                try:\n                    return fn(self, *args, **kwargs)\n                except SkypeApiException as e:\n                    if isinstance(e.args[1], requests.Response) and e.args[1].status_code in codes:\n                        conn = self if isinstance(self, SkypeConnection) else self.conn\n                        if regToken:\n                            conn.getRegToken()\n                        if subscribe:\n                            conn.endpoints[subscribe].subscribe()\n                        return fn(self, *args, **kwargs)\n                    raise\n            return wrapper\n\n        return decorator", "docstring": "Method decorator: if a given status code is received, re-authenticate and try again.\n\nArgs:\ncodes (int list): status codes to respond to\nregToken (bool): whether to try retrieving a new token on error\n\nReturns:\nmethod: decorator function, ready to apply to other methods", "source": "juraj-google-style"}
{"code": "def get_stops_line(self, **kwargs):\n    params = {'line': util.ints_to_string(kwargs.get('lines', [])), 'direction': util.direction_code(kwargs.get('direction', '')), 'cultureInfo': util.language_code(kwargs.get('lang'))}\n    result = self.make_request('geo', 'get_stops_line', **params)\n    if (not util.check_result(result, 'stop')):\n        return (False, 'UNKNOWN ERROR')\n    values = util.response_list(result, 'stop')\n    return (True, [emtype.Stop(**a) for a in values])", "docstring": "Obtain information on the stops of the given lines.\n\nArguments:\nlines (list[int] | int): Lines to query, may be empty to get\nall the lines.\ndirection (str): Optional, either *forward* or *backward*.\nlang (str): Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[Stop]), or message string\nin case of error.", "source": "codesearchnet"}
{"code": "def distance_to_line(a, b, p):\n    return distance(closest_point(a, b, p), p)", "docstring": "Closest distance between a line segment and a point\n\nArgs:\na ([float, float]): x and y coordinates. Line start\nb ([float, float]): x and y coordinates. Line end\np ([float, float]): x and y coordinates. Point to compute the distance\nReturns:\nfloat", "source": "codesearchnet"}
{"code": "def add_scalar_value(self, value_buf):\n        \n        self.__container_node.add_child(_Node(value_buf))\n        self.current_container_length += len(value_buf)", "docstring": "Add a node to the tree containing a scalar value.\n\nArgs:\nvalue_buf (bytearray): bytearray containing the scalar value.", "source": "juraj-google-style"}
{"code": "def __init__(self, fetches):\n    values = _get_attrs_values(fetches)\n    self._fetch_type = type(fetches)\n    self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in values]\n    self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)", "docstring": "Creates a _AttrsFetchMapper.\n\nArgs:\nfetches: An instance of an attrs decorated class.", "source": "github-repos"}
{"code": "def configure_stream(level='WARNING'):\n    \n    \n    root_logger = logging.getLogger()\n    \n    root_logger.setLevel(level)\n\n    \n    template = \"[%(asctime)s] %(name)-25s %(levelname)-8s %(message)s\"\n    formatter = logging.Formatter(template)\n\n    \n    console = logging.StreamHandler()\n    console.setLevel(level)\n    console.setFormatter(formatter)\n\n    root_logger.addHandler(console)\n    return root_logger", "docstring": "Configure root logger using a standard stream handler.\n\nArgs:\nlevel (string, optional): lowest level to log to the console\n\nReturns:\nlogging.RootLogger: root logger instance with attached handler", "source": "juraj-google-style"}
{"code": "def __init__(self, type=None, hashes=None):\n        \n        self.Type = type\n        self.Hashes = hashes if hashes else []", "docstring": "Create an instance.\n\nArgs:\ntype (neo.Network.InventoryType):\nhashes (list): of bytearray items.", "source": "juraj-google-style"}
{"code": "def rest_action(self, func, url, **kwargs):\n        \n        try:\n            response = func(url, timeout=self.TIMEOUT, **kwargs)\n        except requests.RequestException, err:\n            log.exception(\n                \"[PyLmod] Error - connection error in \"\n                \"rest_action, err=%s\", err\n            )\n            raise err\n        try:\n            return response.json()\n        except ValueError, err:\n            log.exception('Unable to decode %s', response.content)\n            raise err", "docstring": "Routine to do low-level REST operation, with retry.\n\nArgs:\nfunc (callable): API function to call\nurl (str): service URL endpoint\nkwargs (dict): addition parameters\n\nRaises:\nrequests.RequestException: Exception connection error\nValueError: Unable to decode response content\n\nReturns:\nlist: the json-encoded content of the response", "source": "juraj-google-style"}
{"code": "def create_explicit(bounds):\n    safe_bounds = sorted((float(x) for x in bounds))\n    if (len(safe_bounds) != len(set(safe_bounds))):\n        raise ValueError(u'Detected two elements of bounds that are the same')\n    return sc_messages.Distribution(bucketCounts=([0] * (len(safe_bounds) + 1)), explicitBuckets=sc_messages.ExplicitBuckets(bounds=safe_bounds))", "docstring": "Creates a new instance of distribution with explicit buckets.\n\nbounds is an iterable of ordered floats that define the explicit buckets\n\nArgs:\nbounds (iterable[float]): initializes the bounds\n\nReturn:\n:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`\n\nRaises:\nValueError: if the args are invalid for creating an instance", "source": "codesearchnet"}
{"code": "def get_signatures_with_results(vcs):\n    \n    results_dir = os.path.join(vcs.private_dir(), 'results')\n    if not os.path.exists(results_dir):\n        return []\n    rel_paths = os.listdir(results_dir)\n    return [p for p in rel_paths if os.path.isdir(os.path.join(results_dir, p))]", "docstring": "Returns the list of signatures for which test results are saved.\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\n\nReturns:\nList[str]", "source": "juraj-google-style"}
{"code": "def set_from_tree(self, address_value_dict):\n        \n\n        for address, value in address_value_dict.items():\n            if address in self._state:\n                self._state[address].set_result(result=value,\n                                                from_tree=True)", "docstring": "Set the result for each future at the given addresses with the value\nstored in the merkle database.\n\nArgs:\naddress_value_dict (dict of str: bytes): The unique\nfull addresses that the bytes values should be set with.", "source": "juraj-google-style"}
{"code": "def purge_unused(self, pass_count=3):\n    for purge_count in range(0, pass_count):\n        self._add_entry(templates.PROJECT_PURGE)", "docstring": "Append an purge model entry to the journal.\n\nThis instructs Revit to purge the open model.\n\nArgs:\npass_count (int): number of times to execute the purge.\ndefault is 3", "source": "codesearchnet"}
{"code": "def _GetTimeValue(self, name):\n    timestamp = getattr(self._tsk_file.info.meta, name, None)\n    if (self._file_system_type in self._TSK_HAS_NANO_FS_TYPES):\n        name_fragment = '{0:s}_nano'.format(name)\n        fraction_of_second = getattr(self._tsk_file.info.meta, name_fragment, None)\n    else:\n        fraction_of_second = None\n    return TSKTime(timestamp=timestamp, fraction_of_second=fraction_of_second)", "docstring": "Retrieves a date and time value.\n\nArgs:\nname (str): name of the date and time value, for example \"atime\" or\n\"mtime\".\n\nReturns:\ndfdatetime.DateTimeValues: date and time value or None if not available.", "source": "codesearchnet"}
{"code": "def __try_read_record(self):\n    block_remaining = (_BLOCK_SIZE - (self.__reader.tell() % _BLOCK_SIZE))\n    if (block_remaining < _HEADER_LENGTH):\n        return ('', _RECORD_TYPE_NONE)\n    header = self.__reader.read(_HEADER_LENGTH)\n    if (len(header) != _HEADER_LENGTH):\n        raise EOFError(('Read %s bytes instead of %s' % (len(header), _HEADER_LENGTH)))\n    (masked_crc, length, record_type) = struct.unpack(_HEADER_FORMAT, header)\n    crc = _unmask_crc(masked_crc)\n    if ((length + _HEADER_LENGTH) > block_remaining):\n        raise errors.InvalidRecordError('Length is too big')\n    data = self.__reader.read(length)\n    if (len(data) != length):\n        raise EOFError(('Not enough data read. Expected: %s but got %s' % (length, len(data))))\n    if (record_type == _RECORD_TYPE_NONE):\n        return ('', record_type)\n    actual_crc = crc32c.crc_update(crc32c.CRC_INIT, [record_type])\n    actual_crc = crc32c.crc_update(actual_crc, data)\n    actual_crc = crc32c.crc_finalize(actual_crc)\n    if (actual_crc != crc):\n        raise errors.InvalidRecordError('Data crc does not match')\n    return (data, record_type)", "docstring": "Try reading a record.\n\nReturns:\n(data, record_type) tuple.\nRaises:\nEOFError: when end of file was reached.\nInvalidRecordError: when valid record could not be read.", "source": "codesearchnet"}
{"code": "def _preprocess_conv2d_input(x, data_format, force_transpose=False):\n    tf_data_format = 'NHWC'\n    if data_format == 'channels_first':\n        if not _has_nchw_support() or force_transpose:\n            x = array_ops.transpose(x, (0, 2, 3, 1))\n        else:\n            tf_data_format = 'NCHW'\n    return (x, tf_data_format)", "docstring": "Transpose and cast the input before the conv2d.\n\nArgs:\nx: input tensor.\ndata_format: string, `\"channels_last\"` or `\"channels_first\"`.\nforce_transpose: Boolean. If True, the input will always be transposed\nfrom NCHW to NHWC if `data_format` is `\"channels_first\"`.\nIf False, the transposition only occurs on CPU (GPU ops are\nassumed to support NCHW).\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def __call__(self, shape, dtype=None):\n    if len(shape) != 2:\n        raise ValueError(f'Identity matrix initializer can only be used for 2D matrices. Received: shape={shape} of rank {len(shape)}.')\n    dtype = standardize_dtype(dtype)\n    return self.gain * ops.eye(*shape, dtype=dtype)", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor. Only numeric or boolean dtypes\nare supported. If not specified, `keras.backend.floatx()`\nis used, which default to `float32` unless you configured it\notherwise (via `keras.backend.set_floatx(float_dtype)`).", "source": "github-repos"}
{"code": "def _prefix_output_keys(self, output_dict, output_name):\n    new_outputs = {}\n    for key, val in output_dict.items():\n        key = self._prefix_key(key, output_name)\n        new_outputs[key] = val\n    return new_outputs", "docstring": "Prepend output_name to the output_dict keys if it doesn't exist.\n\nThis produces predictable prefixes for the pre-determined outputs\nof SupervisedOutput.\n\nArgs:\noutput_dict: dict of string to Tensor, assumed valid.\noutput_name: prefix string to prepend to existing keys.\n\nReturns:\ndict with updated keys and existing values.", "source": "github-repos"}
{"code": "def get_value(x):\n    if not tensor_util.is_tf_type(x):\n        return x\n    if context.executing_eagerly() or isinstance(x, ops.EagerTensor):\n        return x.numpy()\n    if not getattr(x, '_in_graph_mode', True):\n        with context.eager_mode():\n            return x.numpy()\n    if ops.executing_eagerly_outside_functions():\n        with ops.init_scope():\n            return x.numpy()\n    with x.graph.as_default():\n        return x.eval(session=get_session((x,)))", "docstring": "Returns the value of a variable.\n\n`backend.get_value` is the complement of `backend.set_value`, and provides\na generic interface for reading from variables while abstracting away the\ndifferences between TensorFlow 1.x and 2.x semantics.\n\n{snippet}\n\nArgs:\nx: input variable.\n\nReturns:\nA Numpy array.", "source": "github-repos"}
{"code": "async def get(self, cid, coinid):\n    if settings.SIGNATURE_VERIFICATION:\n        super().verify()\n    message = json.loads(self.get_argument('message', '{}'))\n    public_key = message.get('public_key')\n    if (coinid in settings.bridges.keys()):\n        self.account.blockchain.setendpoint(settings.bridges[coinid])\n    content = (await self.account.blockchain.getsinglecontent(cid=cid))\n    if ('error' in content.keys()):\n        self.set_status(content['error'])\n        self.write(content)\n        raise tornado.web.Finish\n    account = (await self.account.getaccountbywallet(wallet=content['owneraddr']))\n    if ('error' in account.keys()):\n        self.set_status(account['error'])\n        self.write(account)\n        raise tornado.web.Finish\n    cids = (await self.account.getuserscontent(public_key=public_key))\n    deals = (await self.account.getdeals(buyer=public_key))\n    if (int(content['cid']) in [i[0] for i in cids.get(coinid, [])]):\n        content['access_type'] = 'write_access'\n    elif (int(content['cid']) in [i[0] for i in deals.get(coinid, [])]):\n        content['access_type'] = 'read_access'\n    try:\n        offer = (await self.account.blockchain.getoffer(cid=cid, buyer_address=self.account.validator[coinid](public_key)))\n        content['owner'] = account.get('public_key')\n        content['seller_access_string'] = offer.get('seller_access_string')\n        content['seller_pubkey'] = offer.get('seller_public_key')\n    except:\n        pass\n    self.write(content)", "docstring": "Receives content by content id and coin id\n\nAccepts:\nQuery string arguments:\n- \"cid\" - int\n- \"coinid\" - str\n\nReturns:\nreturn dict with following fields:\n- \"description\" - str\n- \"read_access\" - int\n- \"write_access\" - int\n- \"content\" - str\n- \"cid\" - int\n- \"owneraddr\" - str\n- \"owner\" - str\n- \"coinid\" - str\n\nVerified: True", "source": "codesearchnet"}
{"code": "def reduce_to_2d(arr):\n  \n  if not isinstance(arr, np.ndarray):\n    raise ValueError('reduce_to_2d requires a numpy.ndarray')\n\n  ndims = len(arr.shape)\n  if ndims < 2:\n    raise ValueError('reduce_to_2d requires an array of dimensionality >=2')\n  \n  slices = ([0] * (ndims - 2)) + [slice(None), slice(None)]\n  return arr[slices]", "docstring": "Given a np.npdarray with nDims > 2, reduce it to 2d.\n\nIt does this by selecting the zeroth coordinate for every dimension greater\nthan two.\n\nArgs:\narr: a numpy ndarray of dimension at least 2.\n\nReturns:\nA two-dimensional subarray from the input array.\n\nRaises:\nValueError: If the argument is not a numpy ndarray, or the dimensionality\nis too low.", "source": "juraj-google-style"}
{"code": "def image_feature_engineering(features, feature_tensors_dict):\n    engineered_features = {}\n    for (name, feature_tensor) in six.iteritems(feature_tensors_dict):\n        if ((name in features) and (features[name]['transform'] == IMAGE_TRANSFORM)):\n            with tf.name_scope(name, 'Wx_plus_b'):\n                hidden = tf.contrib.layers.fully_connected(feature_tensor, IMAGE_HIDDEN_TENSOR_SIZE)\n                engineered_features[name] = hidden\n        else:\n            engineered_features[name] = feature_tensor\n    return engineered_features", "docstring": "Add a hidden layer on image features.\n\nArgs:\nfeatures: features dict\nfeature_tensors_dict: dict of feature-name: tensor", "source": "codesearchnet"}
{"code": "def recover(self, history: Iterable[Trial]) -> None:\n    for trial in history:\n        if trial.status in ['COMPLETED', 'PENDING', 'STOPPING']:\n            self.should_stop_early(trial)", "docstring": "Recover states by replaying the trial history.\n\nSubclass can override.\n\nNOTE: `recover` will always be called before the first `should_stop_early`\nis called. It could be called multiple times if there are multiple source\nof history, e.g: trials from a previous study and existing trials from\ncurrent study.\n\nThe default behavior is to replay `should_stop_early` on all trials that\ncontain all intermediate measurements.\n\nArgs:\nhistory: An iterable object of trials.", "source": "github-repos"}
{"code": "def _list_to_string(l, s):\n    return s.join(l)", "docstring": "Concatenates list items into a single string separated by `s`.\n\nArgs:\nl: List with items to be concatenated into a single string.\ns: String or char that will be concatenated in between each item.\n\nReturns:\nString that has all items in list `l` concatenated with `s` separator.", "source": "github-repos"}
{"code": "def get_keys(keyfiles, signature_type):\n    builtin_keys = {('release', 'sha1'): [mardor.mozilla.release1_sha1, mardor.mozilla.release2_sha1], ('release', 'sha384'): [mardor.mozilla.release1_sha384, mardor.mozilla.release2_sha384], ('nightly', 'sha1'): [mardor.mozilla.nightly1_sha1, mardor.mozilla.nightly2_sha1], ('nightly', 'sha384'): [mardor.mozilla.nightly1_sha384, mardor.mozilla.nightly2_sha384], ('dep', 'sha1'): [mardor.mozilla.dep1_sha1, mardor.mozilla.dep2_sha1], ('dep', 'sha384'): [mardor.mozilla.dep1_sha384, mardor.mozilla.dep2_sha384], ('autograph-stage', 'sha384'): [mardor.mozilla.autograph_stage_sha384]}\n    keys = []\n    for keyfile in keyfiles:\n        if keyfile.startswith(':mozilla-'):\n            name = keyfile.split(':mozilla-')[1]\n            try:\n                keys.extend(builtin_keys[(name, signature_type)])\n            except KeyError:\n                raise ValueError('Invalid internal key name: {}'.format(keyfile))\n        else:\n            key = open(keyfile, 'rb').read()\n            keys.append(key)\n    return keys", "docstring": "Get public keys for the given keyfiles.\n\nArgs:\nkeyfiles: List of filenames with public keys, or :mozilla- prefixed key\nnames\nsignature_type: one of 'sha1' or 'sha384'\n\nReturns:\nList of public keys as strings", "source": "codesearchnet"}
{"code": "def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):\n    line = clean_lines.elided[linenum]\n    declarator_end = line.rfind(')')\n    if (declarator_end >= 0):\n        fragment = line[declarator_end:]\n    elif ((linenum > 1) and (clean_lines.elided[(linenum - 1)].rfind(')') >= 0)):\n        fragment = line\n    else:\n        return\n    if (Search('\\\\boverride\\\\b', fragment) and Search('\\\\bfinal\\\\b', fragment)):\n        error(filename, linenum, 'readability/inheritance', 4, '\"override\" is redundant since function is already declared as \"final\"')", "docstring": "Check if line contains a redundant \"override\" or \"final\" virt-specifier.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    shutdown_value = registry_key.GetValueByName('ShutdownTime')\n    if not shutdown_value:\n      return\n\n    try:\n      date_time = self._ParseFiletime(shutdown_value.data)\n    except errors.ParseError as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to determine shutdown timestamp with error: {0!s}'.format(\n              exception))\n      return\n\n    if not date_time:\n      date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n\n    event_data = ShutdownWindowsRegistryEventData()\n    event_data.key_path = registry_key.path\n    event_data.offset = shutdown_value.offset\n    event_data.value_name = shutdown_value.name\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_LAST_SHUTDOWN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a ShutdownTime Windows Registry value.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def GetPathInfo(self, timestamp=None):\n    path_info_timestamp = self._LastEntryTimestamp(self._path_infos, timestamp)\n    try:\n        result = self._path_infos[path_info_timestamp].Copy()\n    except KeyError:\n        result = rdf_objects.PathInfo(path_type=self._path_type, components=self._components)\n    stat_entry_timestamp = self._LastEntryTimestamp(self._stat_entries, timestamp)\n    result.last_stat_entry_timestamp = stat_entry_timestamp\n    result.stat_entry = self._stat_entries.get(stat_entry_timestamp)\n    hash_entry_timestamp = self._LastEntryTimestamp(self._hash_entries, timestamp)\n    result.last_hash_entry_timestamp = hash_entry_timestamp\n    result.hash_entry = self._hash_entries.get(hash_entry_timestamp)\n    return result", "docstring": "Generates a summary about the path record.\n\nArgs:\ntimestamp: A point in time from which the data should be retrieved.\n\nReturns:\nA `rdf_objects.PathInfo` instance.", "source": "codesearchnet"}
{"code": "class ProgbarLogger(Callback):\n\n    def __init__(self, count_mode='samples', stateful_metrics=None):\n        super(ProgbarLogger, self).__init__()\n        self._supports_tf_logs = True\n        if count_mode == 'samples':\n            self.use_steps = False\n        elif count_mode == 'steps':\n            self.use_steps = True\n        else:\n            raise ValueError('Unknown `count_mode`: ' + str(count_mode))\n        self.stateful_metrics = set(stateful_metrics) if stateful_metrics else set()\n        self.seen = 0\n        self.progbar = None\n        self.target = None\n        self.verbose = 1\n        self.epochs = 1\n        self._train_step, self._test_step, self._predict_step = (None, None, None)\n        self._call_batch_hooks = True\n        self._called_in_fit = False\n\n    def set_params(self, params):\n        self.verbose = params['verbose']\n        self.epochs = params['epochs']\n        if self.use_steps and 'steps' in params:\n            self.target = params['steps']\n        elif not self.use_steps and 'samples' in params:\n            self.target = params['samples']\n        else:\n            self.target = None\n        self._call_batch_hooks = self.verbose == 1\n        if self.target is None:\n            try:\n                self._train_step = self.model._train_counter\n                self._test_step = self.model._test_counter\n                self._predict_step = self.model._predict_counter\n            except AttributeError:\n                self._call_batch_hooks = True\n\n    def on_train_begin(self, logs=None):\n        self._called_in_fit = True\n\n    def on_test_begin(self, logs=None):\n        if not self._called_in_fit:\n            self._reset_progbar()\n            self._maybe_init_progbar()\n\n    def on_predict_begin(self, logs=None):\n        self._reset_progbar()\n        self._maybe_init_progbar()\n\n    def on_epoch_begin(self, epoch, logs=None):\n        self._reset_progbar()\n        self._maybe_init_progbar()\n        if self.verbose and self.epochs > 1:\n            print('Epoch %d/%d' % (epoch + 1, self.epochs))\n\n    def on_train_batch_end(self, batch, logs=None):\n        self._batch_update_progbar(batch, logs)\n\n    def on_test_batch_end(self, batch, logs=None):\n        if not self._called_in_fit:\n            self._batch_update_progbar(batch, logs)\n\n    def on_predict_batch_end(self, batch, logs=None):\n        self._batch_update_progbar(batch, None)\n\n    def on_epoch_end(self, epoch, logs=None):\n        self._finalize_progbar(logs, self._train_step)\n\n    def on_test_end(self, logs=None):\n        if not self._called_in_fit:\n            self._finalize_progbar(logs, self._test_step)\n\n    def on_predict_end(self, logs=None):\n        self._finalize_progbar(logs, self._predict_step)\n\n    def _reset_progbar(self):\n        self.seen = 0\n        self.progbar = None\n\n    def _maybe_init_progbar(self):\n        \n        self.stateful_metrics = set(self.stateful_metrics)\n        if self.model:\n            self.stateful_metrics = self.stateful_metrics.union(set((m.name for m in self.model.metrics)))\n        if self.progbar is None:\n            self.progbar = Progbar(target=self.target, verbose=self.verbose, stateful_metrics=self.stateful_metrics, unit_name='step' if self.use_steps else 'sample')\n        self.progbar._update_stateful_metrics(self.stateful_metrics)\n\n    def _implements_train_batch_hooks(self):\n        return self._call_batch_hooks\n\n    def _implements_test_batch_hooks(self):\n        return self._call_batch_hooks\n\n    def _implements_predict_batch_hooks(self):\n        return self._call_batch_hooks\n\n    def _batch_update_progbar(self, batch, logs=None):\n        \n        logs = logs or {}\n        self._maybe_init_progbar()\n        if self.use_steps:\n            self.seen = batch + 1\n        else:\n            logs = copy.copy(logs)\n            batch_size = logs.pop('size', 0)\n            num_steps = logs.pop('num_steps', 1)\n            logs.pop('batch', None)\n            add_seen = num_steps * batch_size\n            self.seen += add_seen\n        if self.verbose == 1:\n            logs = tf_utils.sync_to_numpy_or_python_type(logs)\n            self.progbar.update(self.seen, list(logs.items()), finalize=False)\n\n    def _finalize_progbar(self, logs, counter):\n        logs = tf_utils.sync_to_numpy_or_python_type(logs or {})\n        if self.target is None:\n            if counter is not None:\n                counter = counter.numpy()\n                if not self.use_steps:\n                    counter *= logs.get('size', 1)\n            self.target = counter or self.seen\n            self.progbar.target = self.target\n        self.progbar.update(self.target, list(logs.items()), finalize=True)", "docstring": "Callback that prints metrics to stdout.\n\nArgs:\ncount_mode: One of `\"steps\"` or `\"samples\"`.\nWhether the progress bar should\ncount samples seen or steps (batches) seen.\nstateful_metrics: Iterable of string names of metrics that\nshould *not* be averaged over an epoch.\nMetrics in this list will be logged as-is.\nAll others will be averaged over time (e.g. loss, etc).\nIf not provided, defaults to the `Model`'s metrics.\n\nRaises:\nValueError: In case of invalid `count_mode`.", "source": "github-repos"}
{"code": "def multinomial(logits, num_samples, seed=None, name=None, output_dtype=None):\n    with ops.name_scope(name, 'multinomial', [logits]):\n        return multinomial_categorical_impl(logits, num_samples, output_dtype, seed)", "docstring": "Draws samples from a multinomial distribution.\n\nExample:\n\n```python\n# samples has shape [1, 5], where each value is either 0 or 1 with equal\n# probability.\nsamples = tf.random.categorical(tf.math.log([[0.5, 0.5]]), 5)\n```\n\nArgs:\nlogits: 2-D Tensor with shape `[batch_size, num_classes]`.  Each slice\n`[i, :]` represents the unnormalized log-probabilities for all classes.\nnum_samples: 0-D.  Number of independent samples to draw for each row slice.\nseed: A Python integer. Used to create a random seed for the distribution.\nSee `tf.random.set_seed` for behavior.\nname: Optional name for the operation.\noutput_dtype: The integer type of the output: `int32` or `int64`. Defaults\nto `int64`.\n\nReturns:\nThe drawn samples of shape `[batch_size, num_samples]`.", "source": "github-repos"}
{"code": "def normalise(self, to_currency):\n        \n        out = Money(currency=to_currency)\n        for money in self._money_obs:\n            out += converter.convert(money, to_currency)\n        return Balance([out])", "docstring": "Normalise this balance into a single currency\n\nArgs:\nto_currency (str): Destination currency\n\nReturns:\n(Balance): A new balance object containing a single Money value in the specified currency", "source": "juraj-google-style"}
{"code": "def should_close(http_version, connection_field):\n    \n    connection_field = (connection_field or '').lower()\n\n    if http_version == 'HTTP/1.0':\n        return connection_field.replace('-', '') != 'keepalive'\n    else:\n        return connection_field == 'close'", "docstring": "Return whether the connection should be closed.\n\nArgs:\nhttp_version (str): The HTTP version string like ``HTTP/1.0``.\nconnection_field (str): The value for the ``Connection`` header.", "source": "juraj-google-style"}
{"code": "def download_file(self, url):\n        \n        response = requests.get(url, stream=True)\n        response.raise_for_status()\n        return (int(response.headers.get('content-length', 0)), response)", "docstring": "Initiate a streaming download\n\nArgs:\nurl (str): The url to download\n\nReturns:\nA tuple of the content length and the streaming response", "source": "juraj-google-style"}
{"code": "def latlong(text):\n    \n    nlat, nlon = text.split(',')\n    return (float(nlat), float(nlon))", "docstring": "Chop a latlong string and return (float,float).\nDoes not perform validation on the coordinates.\n\nArgs:\ntext (str):  A longitude,latitude string.\n\nReturns:\n(float,float): A longitude, latitude float tuple.", "source": "juraj-google-style"}
{"code": "def _wrap_callback_errors(callback, message):\n    try:\n        callback(message)\n    except Exception:\n        _LOGGER.exception('Top-level exception occurred in callback while processing a message')\n        message.nack()", "docstring": "Wraps a user callback so that if an exception occurs the message is\nnacked.\n\nArgs:\ncallback (Callable[None, Message]): The user callback.\nmessage (~Message): The Pub/Sub message.", "source": "codesearchnet"}
{"code": "def authentication(self, username, password):\n    _auth_text = '{}:{}'.format(username, password)\n    if (int(sys.version[0]) > 2):\n        _auth_bin = base64.encodebytes(_auth_text.encode())\n        _auth = _auth_bin.decode()\n        _auth = _auth.replace('\\n', '')\n        self._auth = _auth\n    else:\n        _auth = base64.encodestring(_auth_text)\n        self._auth = str(_auth).replace('\\n', '')\n    _LOGGER.debug('Autentication string is: {}:***'.format(username))", "docstring": "Configures the user authentication for eAPI\n\nThis method configures the username and password combination to use\nfor authenticating to eAPI.\n\nArgs:\nusername (str): The username to use to authenticate the eAPI\nconnection with\npassword (str): The password in clear text to use to authenticate\nthe eAPI connection with", "source": "codesearchnet"}
{"code": "def open_window(self, private=False):\n        \n        handles_before = self.selenium.window_handles\n        self.switch_to()\n\n        with self.selenium.context(self.selenium.CONTEXT_CHROME):\n            \n            self.selenium.find_element(*self._file_menu_button_locator).click()\n            if private:\n                self.selenium.find_element(\n                    *self._file_menu_private_window_locator\n                ).click()\n            else:\n                self.selenium.find_element(\n                    *self._file_menu_new_window_button_locator\n                ).click()\n\n        return self.wait.until(\n            expected.new_browser_window_is_opened(self.selenium, handles_before),\n            message=\"No new browser window opened\",\n        )", "docstring": "Open a new browser window.\n\nArgs:\nprivate (bool): Optional parameter to open a private browsing\nwindow. Defaults to False.\n\nReturns:\n:py:class:`BrowserWindow`: Opened window.", "source": "juraj-google-style"}
{"code": "class FlaubertPoolerAnswerClass(nn.Module):\n\n    def __init__(self, config: FlaubertConfig):\n        super().__init__()\n        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)\n        self.activation = nn.Tanh()\n        self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)\n\n    def forward(self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, cls_index: Optional[torch.LongTensor]=None) -> torch.FloatTensor:\n        \n        hsz = hidden_states.shape[-1]\n        assert start_states is not None or start_positions is not None, 'One of start_states, start_positions should be not None'\n        if start_positions is not None:\n            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)\n            start_states = hidden_states.gather(-2, start_positions).squeeze(-2)\n        if cls_index is not None:\n            cls_index = cls_index[:, None, None].expand(-1, -1, hsz)\n            cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2)\n        else:\n            cls_token_state = hidden_states[:, -1, :]\n        x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))\n        x = self.activation(x)\n        x = self.dense_1(x).squeeze(-1)\n        return x", "docstring": "Compute SQuAD 2.0 answer class from classification and start tokens hidden states.\n\nArgs:\nconfig ([`FlaubertConfig`]):\nThe config used by the model, will be used to grab the `hidden_size` of the model.", "source": "github-repos"}
{"code": "def run_tpm(system, steps, blackbox):\n    node_tpms = []\n    for node in system.nodes:\n        node_tpm = node.tpm_on\n        for input_node in node.inputs:\n            if (not blackbox.in_same_box(node.index, input_node)):\n                if (input_node in blackbox.output_indices):\n                    node_tpm = marginalize_out([input_node], node_tpm)\n        node_tpms.append(node_tpm)\n    noised_tpm = rebuild_system_tpm(node_tpms)\n    noised_tpm = convert.state_by_node2state_by_state(noised_tpm)\n    tpm = convert.state_by_node2state_by_state(system.tpm)\n    tpm = np.dot(tpm, np.linalg.matrix_power(noised_tpm, (steps - 1)))\n    return convert.state_by_state2state_by_node(tpm)", "docstring": "Iterate the TPM for the given number of timesteps.\n\nReturns:\nnp.ndarray: tpm * (noise_tpm^(t-1))", "source": "codesearchnet"}
{"code": "def UpdateCacheFromSource(self, cache, source, incremental=False, force_write=False, location=None):\n    return_val = 0\n    cache_filename = cache.GetCacheFilename()\n    if cache_filename is not None:\n        new_file_fd, new_file = tempfile.mkstemp(dir=os.path.dirname(cache_filename), prefix=os.path.basename(cache_filename), suffix='.nsscache.tmp')\n    else:\n        raise error.CacheInvalid('Cache has no filename.')\n    self.log.debug('temp source filename: %s', new_file)\n    try:\n        source.GetFile(self.map_name, new_file, current_file=cache.GetCacheFilename(), location=location)\n        os.lseek(new_file_fd, 0, os.SEEK_SET)\n        source_cache = cache_factory.Create(self.cache_options, self.map_name)\n        source_map = source_cache.GetMap(new_file)\n        return_val += self._FullUpdateFromFile(cache, source_map, force_write)\n    finally:\n        try:\n            os.unlink(new_file)\n        except OSError as e:\n            if e.errno != errno.ENOENT:\n                raise\n    return return_val", "docstring": "Update a single cache file, from a given source.\n\nArgs:\ncache: A nss_cache.caches.Cache object.\nsource: A nss_cache.sources.Source object.\nincremental: We ignore this.\nforce_write: A boolean flag forcing empty map updates when False,\ndefaults to False.\nlocation: The optional location in the source of this map used by\nautomount to specify which automount map to get, defaults to None.\n\nReturns:\nAn int indicating the success of an update (0 == good, fail otherwise).", "source": "github-repos"}
{"code": "def _load_info(self):\n        \n\n        url = '%s/prefix?duration=36000' % self.base_url\n        r = self.gbdx_connection.get(url)\n        r.raise_for_status()\n        return r.json()", "docstring": "Get user info for GBDX S3, put into instance vars for convenience.\n\nArgs:\nNone.\n\nReturns:\nDictionary with S3 access key, S3 secret key, S3 session token,\nuser bucket and user prefix (dict).", "source": "juraj-google-style"}
{"code": "def detail_poi(self, **kwargs):\n    params = {'language': util.language_code(kwargs.get('lang')), 'family': kwargs.get('family')}\n    if kwargs.get('id'):\n        params['id'] = kwargs['id']\n    result = self.make_request('detail_poi', {}, **params)\n    if (not util.check_result(result)):\n        return (False, result.get('message', 'UNKNOWN ERROR'))\n    values = util.response_list(result, 'Data')\n    return (True, [emtype.PoiDetails(**a) for a in values])", "docstring": "Obtain detailed info of a given POI.\n\nArgs:\nfamily (str): Family code of the POI (3 chars).\nlang (str): Language code (*es* or *en*).\nid (int): Optional, ID of the POI to query. Passing value -1 will\nresult in information from all POIs.\n\nReturns:\nStatus boolean and parsed response (list[PoiDetails]), or\nmessage string in case of error.", "source": "codesearchnet"}
{"code": "def list_dir(self, context):\n    doc = inspect.getdoc(context)\n    listing = ''\n    listing += '\\n'\n    listing += (annotate.context_name(context) + '\\n')\n    if (doc is not None):\n        doc = inspect.cleandoc(doc)\n        listing += (doc + '\\n')\n    listing += '\\nDefined Functions:\\n'\n    is_dict = False\n    if isinstance(context, dict):\n        funs = context.keys()\n        is_dict = True\n    else:\n        funs = utils.find_all(context)\n    for fun in sorted(funs):\n        override_name = None\n        if is_dict:\n            override_name = fun\n        fun = self.find_function(context, fun)\n        if isinstance(fun, dict):\n            if is_dict:\n                listing += ((' - ' + override_name) + '\\n')\n            else:\n                listing += ((' - ' + fun.metadata.name) + '\\n')\n        else:\n            listing += ((' - ' + fun.metadata.signature(name=override_name)) + '\\n')\n        if (annotate.short_description(fun) != ''):\n            listing += (('   ' + annotate.short_description(fun)) + '\\n')\n    listing += '\\nBuiltin Functions\\n'\n    for bif in sorted(self.builtins.keys()):\n        listing += ((' - ' + bif) + '\\n')\n    listing += '\\n'\n    return listing", "docstring": "Return a listing of all of the functions in this context including builtins.\n\nArgs:\ncontext (object): The context to print a directory for.\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def update_ip_info(self, since_days=10, save=False, force=False):\n    try:\n        last_check = IPInfoCheck.objects.get(ip_address=self.client_ip_address)\n        since_last = (datetime.date.today() - last_check.date)\n        if (since_last <= datetime.timedelta(days=since_days)):\n            if ((not self.ip_info) or ((self.ip_info != last_check.ip_info) and force)):\n                self.ip_info = last_check.ip_info\n                self.save()\n                return True\n            elif save:\n                self.save()\n            return False\n        (ip_info, created) = IPInfo.get_or_create_from_ip(self.client_ip_address)\n        last_check.date = datetime.date.today()\n        last_check.save()\n        if created:\n            last_check.ip_info = ip_info\n            self.ip_info = ip_info\n            self.save()\n            return True\n        elif save:\n            self.save()\n        return False\n    except IPInfoCheck.DoesNotExist:\n        self.ip_info = IPInfoCheck.check_ip(self.client_ip_address)\n        self.save()\n        return True", "docstring": "Update the IP info.\n\nArgs:\nsince_days (int): if checked less than this number of days ago,\ndon't check again (default to 10 days).\nsave (bool): whether to save anyway or not.\nforce (bool): whether to update ip_info to last checked one.\n\nReturns:\nbool: check was run. IPInfo might not have been updated.", "source": "codesearchnet"}
{"code": "def _endpoint_to_target(self, endpoint):\n        \n        parsed = urlparse.urlparse(endpoint)\n        scheme = parsed[0]\n        hostport = parsed[1]\n\n        if 'unix' in scheme:\n            return (None, None, unquote(hostport))\n\n        if scheme == 'https':\n            target_port = 443\n        else:\n            target_port = 80\n\n        (target_host, target_port) = self._split_hostport(hostport, default_port=target_port)\n        return (target_host, target_port, None)", "docstring": "Convert a URL into a host / port, or into a path to a unix domain socket\n\nArgs:\nendpoint (str): A URL parsable by urlparse\n\nReturns:\n3 item tuple: (host, port, path).\nhost and port will None, and path will be not None if a a unix domain socket URL is passed\npath will be None if a normal TCP based URL is passed", "source": "juraj-google-style"}
{"code": "def get_all_text(tweet):\n    \n    if is_original_format(tweet):\n        return \"\\n\".join(filter(None, [tweet.user_entered_text,\n                                       tweet.quote_or_rt_text,\n                                       \"\\n\".join(tweet.poll_options)]))\n    else:\n        return \"\\n\".join(filter(None, [tweet.user_entered_text,\n                                       tweet.quote_or_rt_text]))", "docstring": "Get all of the text of the tweet. This includes @ mentions, long links,\nquote-tweet contents (separated by a newline), RT contents & poll options\n\nArgs:\ntweet (Tweet): A Tweet object (must be a Tweet object)\n\nReturns:\nstr: text from tweet.user_entered_text, tweet.quote_or_rt_text and\ntweet.poll_options (if in original format), separated by newlines", "source": "juraj-google-style"}
{"code": "def GetSeverityString(self, severity):\n    if (0 <= severity < len(self._SEVERITY)):\n        return self._SEVERITY[severity]\n    return 'Unknown {0:d}'.format(severity)", "docstring": "Retrieves a string representation of the severity.\n\nArgs:\nseverity (int): severity.\n\nReturns:\nstr: description of the event severity.", "source": "codesearchnet"}
{"code": "def from_string(species_string: str):\n    m = re.search('([A-Z][a-z]*)([0-9.]*)([+\\\\-])(.*)', species_string)\n    if m:\n        sym = m.group(1)\n        oxi = (1 if (m.group(2) == '') else float(m.group(2)))\n        oxi = ((- oxi) if (m.group(3) == '-') else oxi)\n        properties = None\n        if m.group(4):\n            toks = m.group(4).replace(',', '').split('=')\n            properties = {toks[0]: float(toks[1])}\n        return Specie(sym, oxi, properties)\n    else:\n        raise ValueError('Invalid Species String')", "docstring": "Returns a Specie from a string representation.\n\nArgs:\nspecies_string (str): A typical string representation of a\nspecies, e.g., \"Mn2+\", \"Fe3+\", \"O2-\".\n\nReturns:\nA Specie object.\n\nRaises:\nValueError if species_string cannot be intepreted.", "source": "codesearchnet"}
{"code": "def Graph(self):\n    graph = graph_pb2.GraphDef()\n    if (self._graph is not None):\n        graph.ParseFromString(self._graph)\n        return graph\n    raise ValueError('There is no graph in this EventAccumulator')", "docstring": "Return the graph definition, if there is one.\n\nIf the graph is stored directly, return that.  If no graph is stored\ndirectly but a metagraph is stored containing a graph, return that.\n\nRaises:\nValueError: If there is no graph for this run.\n\nReturns:\nThe `graph_def` proto.", "source": "codesearchnet"}
{"code": "def write_file_elements_to_strings_file(file_path, file_elements):\n    \n    f = open_strings_file(file_path, \"w\")\n    for element in file_elements:\n        f.write(unicode(element))\n        f.write(u\"\\n\")\n\n    f.close()", "docstring": "Write elements to the string file\n\nArgs:\nfile_path (str): The path to the strings file\nfile_elements (list) : List of elements to write to the file.", "source": "juraj-google-style"}
{"code": "def create(self, resource, timeout=(- 1)):\n    return self._client.create(resource, timeout=timeout, default_values=self.DEFAULT_VALUES)", "docstring": "Creates a scope.\n\nArgs:\nresource (dict): Object to create.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.\n\nReturns:\ndict: Created scope.", "source": "codesearchnet"}
{"code": "def _CheckpointFilename(self, p):\n    name, _ = p\n    return name", "docstring": "Returns the checkpoint filename given a `(filename, time)` pair.\n\nArgs:\np: (filename, time) pair.\n\nReturns:\nCheckpoint file name.", "source": "github-repos"}
{"code": "async def subscriptions(self, request):\n        \n\n        if not self._accepting:\n            return web.Response(status=503)\n\n        web_sock = web.WebSocketResponse()\n        await web_sock.prepare(request)\n\n        async for msg in web_sock:\n            if msg.type == aiohttp.WSMsgType.TEXT:\n                await self._handle_message(web_sock, msg.data)\n            elif msg.type == aiohttp.WSMsgType.ERROR:\n                LOGGER.warning(\n                    'Web socket connection closed with exception %s',\n                    web_sock.exception())\n                await web_sock.close()\n\n        await self._handle_unsubscribe(web_sock)\n\n        return web_sock", "docstring": "Handles requests for new subscription websockets.\n\nArgs:\nrequest (aiohttp.Request): the incoming request\n\nReturns:\naiohttp.web.WebSocketResponse: the websocket response, when the\nresulting websocket is closed", "source": "juraj-google-style"}
{"code": "def invoke_step(self, context):\n    logger.debug('starting')\n    logger.debug(f'running step {self.module}')\n    self.run_step_function(context)\n    logger.debug(f'step {self.module} done')", "docstring": "Invoke 'run_step' in the dynamically loaded step module.\n\nDon't invoke this from outside the Step class. Use\npypyr.dsl.Step.run_step instead.\ninvoke_step just does the bare module step invocation, it does not\nevaluate any of the decorator logic surrounding the step. So unless\nyou really know what you're doing, use run_step if you intend on\nexecuting the step the same way pypyr does.\n\nArgs:\ncontext: (pypyr.context.Context) The pypyr context. This arg will\nmutate.", "source": "codesearchnet"}
{"code": "def get_metar_from_mission(mission_file: str, icao: str='XXXX', time: str=None) -> str:\n    return _MetarFromMission(mission_file=mission_file, icao=icao, time=time).metar", "docstring": "Builds a dummy METAR string from a mission file\n\nArgs:\nmission_file: input mission file\nicao: dummy ICAO (defaults to XXXX)\ntime: dummy time (defaults to now())\n\nReturns: METAR str", "source": "codesearchnet"}
{"code": "def _chunk_query(l, n, cn, conn, table, db_type):\n    [insert_query_m(l[i:(i + n)], table, conn, cn, db_type) for i in range(0, len(l), n)]", "docstring": "Call for inserting SQL query in chunks based on n rows\n\nArgs:\nl (list): List of tuples\nn (int): Number of rows\ncn (str): Column names\nconn (connection object): Database connection object\ntable (str): Table name\ndb_type (str): If \"sqlite\" or \"mysql\"", "source": "codesearchnet"}
{"code": "def _CreateConfig(self, project_id):\n    project_id = (project_id or self._GetNumericProjectId())\n    if (not project_id):\n        return\n    self.boto_config_header %= (self.boto_config_script, self.boto_config_template)\n    config = config_manager.ConfigManager(config_file=self.boto_config_template, config_header=self.boto_config_header)\n    boto_dir = os.path.dirname(self.boto_config_script)\n    config.SetOption('GSUtil', 'default_project_id', project_id)\n    config.SetOption('GSUtil', 'default_api_version', '2')\n    config.SetOption('GoogleCompute', 'service_account', 'default')\n    config.SetOption('Plugin', 'plugin_directory', boto_dir)\n    config.WriteConfig(config_file=self.boto_config)", "docstring": "Create the boto config to support standalone GSUtil.\n\nArgs:\nproject_id: string, the project ID to use in the config file.", "source": "codesearchnet"}
{"code": "def _resource_apply_sparse(self, grad, handle, indices):\n    raise NotImplementedError()", "docstring": "Add ops to apply sparse gradients to the variable `handle`.\n\nSimilar to `_apply_sparse`, the `indices` argument to this method has been\nde-duplicated. Optimizers which deal correctly with non-unique indices may\ninstead override `_resource_apply_sparse_duplicate_indices` to avoid this\noverhead.\n\nArgs:\ngrad: a `Tensor` representing the gradient for the affected indices.\nhandle: a `Tensor` of dtype `resource` which points to the variable\nto be updated.\nindices: a `Tensor` of integral type representing the indices for\nwhich the gradient is nonzero. Indices are unique.\n\nReturns:\nAn `Operation` which updates the value of the variable.", "source": "github-repos"}
{"code": "def _parse_resources(resource_values: dict, resource_name: str) -> dict:\n    resources = {}\n    for r_values in resource_values[resource_name]:\n        if ('limits' in r_values):\n            for (r_key, r_value) in resource_values[resource_name][r_values].items():\n                if ('cpu' in r_key):\n                    cpu_value = (float(r_value) * (10 ** 9))\n                    cpu_key = (r_key[:3] + '_limit')\n                    resources[cpu_key] = int(cpu_value)\n                if ('mem' in r_key):\n                    mem_value = re.sub('M', '', r_value)\n                    mem_key = (r_key[:3] + '_limit')\n                    resources[mem_key] = (int(mem_value) * 1048576)\n    resources_spec = docker.types.Resources(**resources)\n    return resources_spec", "docstring": "Parse resources key.\n\nArgs:\nresource_values (dict): resource configurations values\nresource_name (string): Resource name\n\nReturns:\ndict, resources specification", "source": "codesearchnet"}
{"code": "def _add_resource_to_collection(parent_resource: Dict[str, Any], resource_json: Dict[str, Any], collections_per_resource_type: Dict[str, ResourceCollection]) -> None:\n    resource_type = resource_json.get('resourceType')\n    if resource_type in collections_per_resource_type:\n        collections_per_resource_type[resource_type].put(resource_json, parent_resource)\n    elif resource_type == 'Bundle':\n        for entry in resource_json.get('entry', ()):\n            bundle_resource = entry.get('resource')\n            if bundle_resource:\n                _add_resource_to_collection(parent_resource, bundle_resource, collections_per_resource_type)", "docstring": "Adds an entry for the given resource to the appropriate collection.\n\nAdds the resource described by `resource_json` found within `parent_resource`\nto the appropriate ResourceCollection of the given `fhir_package`. Allows the\nresource to subsequently be retrieved by its URL from the FhirPackage. In the\ncase where `resource_json` is located inside a bundle, `parent_resource` will\nbe the bundle containing the resource. Otherwise, `resource_json` and\n`parent_resource` will be the same JSON object. If the JSON is not a FHIR\nresource, or not a resource type tracked by the PackageManager, does nothing.\n\nArgs:\nparent_resource: The bundle `resource_json` can be found inside, or the\nresource itself if it is not part of a bundle.\nresource_json: The parsed JSON representation of the resource to add.\ncollections_per_resource_type: The set of `ResourceCollection`s to add the\nresource to.", "source": "github-repos"}
{"code": "def __setstate__(self, state):\n        \n        if isinstance(state, tuple):\n            \n            \n            self.__init__(state[0])\n        elif isinstance(state, basestring):\n            \n            \n            self.__init__(state)\n        elif isinstance(state, dict):\n            \n            \n            if '__frange' in state and '__set' in state and '__list' in state:\n                self._frange = state['__frange']\n                self._items = frozenset(state['__set'])\n                self._order = tuple(state['__list'])\n            else:\n                for k in self.__slots__:\n                    setattr(self, k, state[k])\n        else:\n            msg = \"Unrecognized state data from which to deserialize FrameSet\"\n            raise ValueError(msg)", "docstring": "Allows for de-serialization from a pickled :class:`FrameSet`.\n\nArgs:\nstate (tuple or str or dict): A string/dict can be used for\nbackwards compatibility\n\nRaises:\nValueError: if state is not an appropriate type", "source": "juraj-google-style"}
{"code": "def _parse_dtensor_env_var_from_cluster_resolver(cluster_resolver):\n    result = {}\n    cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_resolver.cluster_spec())\n    dtensor_jobs = []\n    if 'chief' in cluster_spec.jobs:\n        dtensor_jobs.extend(cluster_spec.job_tasks('chief'))\n    if 'worker' in cluster_spec.jobs:\n        dtensor_jobs.extend(cluster_spec.job_tasks('worker'))\n    if None in dtensor_jobs:\n        raise ValueError(f'Unexpected dtensor job address from cluster spec: {cluster_spec}')\n    result['DTENSOR_JOBS'] = ','.join(dtensor_jobs)\n    result['DTENSOR_NUM_CLIENTS'] = str(len(dtensor_jobs))\n    if cluster_resolver.task_type == 'chief':\n        dtensor_client_id = 0\n    elif cluster_resolver.task_type == 'worker':\n        dtensor_client_id = cluster_resolver.task_id\n        if 'chief' in cluster_spec.jobs:\n            dtensor_client_id += 1\n    result['DTENSOR_CLIENT_ID'] = str(dtensor_client_id)\n    result['DTENSOR_JOB_NAME'] = 'worker'\n    return result", "docstring": "Parse the env vars for Dtensor based on the cluster resolver.\n\nIn the multi-client setting, each of the DTensor jobs need to aware of each\nother, and the interface to setup those values are via the envvars. The\nvalue used by dtensor are different from the existing\n`MultiWorkerMirroredStrategy`. This function will parse the value from\ncluster resolver, and populate the corresponding value for DTensor jobs in the\n`os.environ`.\n\nArgs:\ncluster_resolver: A `tf.distribute.cluster_resolver.ClusterResolver`\ninstance.\n\nReturns:\nA dict of {Str:Str} which contains all the env vars needed by DTensor jobs.\nThe value is for verification purpose.\n\nRaises:\nThe value parsed from existing cluster spec is not valid.", "source": "github-repos"}
{"code": "def _set_current_subscript(self, active):\n    current_subscript = self.sender()\n    if active:\n        for subscript_name in list(self._current_subscript_stage['subscript_exec_count'].keys()):\n            if (subscript_name == current_subscript.name):\n                self._current_subscript_stage['subscript_exec_count'][subscript_name] += 1\n        self._current_subscript_stage['current_subscript'] = current_subscript\n    else:\n        self._current_subscript_stage['current_subscript'] = current_subscript\n        for subscript_name in list(self._current_subscript_stage['subscript_exec_count'].keys()):\n            if (subscript_name == current_subscript.name):\n                duration = (current_subscript.end_time - current_subscript.start_time)\n                if (subscript_name in self._current_subscript_stage['subscript_exec_duration']):\n                    duration_old = self._current_subscript_stage['subscript_exec_duration'][subscript_name]\n                else:\n                    duration_old = datetime.timedelta(0)\n                exec_count = self._current_subscript_stage['subscript_exec_count'][subscript_name]\n                duration_new = ((duration_old * (exec_count - 1)) + duration)\n                self._current_subscript_stage['subscript_exec_duration'][subscript_name] = (((duration_old * (exec_count - 1)) + duration) / exec_count)", "docstring": "sets the current subscript and keeps a counter of how ofter a particular subscript has been executed\nthis information is usefull when implementing a status update or plotting functions that depend on which subscript is being executed\n\nkeeps track of the following dictionary:\nself._current_subscript_stage = {\n'current_subscript' : reference to the current subscrit\n'subscript_exec_count' : dictionary where key is the subscript name and value how often is has been executed\n'subscript_exec_duration' : dictionary where key is the subscript name and value the average duration of executing the subscript\n}\n\nArgs:\nactive: True if the current subscript is just started, False if it just finished", "source": "codesearchnet"}
{"code": "def get_range(self, request, start, end):\n    for i in range(2):\n        try:\n            stream = self.get_stream(request, start)\n            data = stream.read(end - start)\n            self._download_pos += len(data)\n            return data\n        except Exception as e:\n            self._download_stream = None\n            self._download_request = None\n            if i == 0:\n                continue\n            if isinstance(e, messages.S3ClientError):\n                raise e\n            raise messages.S3ClientError(str(e), get_http_error_code(e))", "docstring": "Retrieves an object's contents.\n\nArgs:\nrequest: (GetRequest) request\nstart: (int) start offset\nend: (int) end offset (exclusive)\nReturns:\n(bytes) The response message.", "source": "github-repos"}
{"code": "def check_result(data, key=''):\n    \n    if not isinstance(data, dict):\n        return False\n\n    if key:\n        if key in data:\n            return True\n\n        return False\n\n    if 'resultCode' in data.keys():\n        \n        return True if data.get('resultCode', -1) == 0 else False\n\n    elif 'code' in data.keys():\n        \n        return True if data.get('code', -1) == 0 else False\n\n    return False", "docstring": "Check the result of an API response.\n\nIdeally, this should be done by checking that the value of the ``resultCode``\nattribute is 0, but there are endpoints that simply do not follow this rule.\n\nArgs:\ndata (dict): Response obtained from the API endpoint.\nkey (string): Key to check for existence in the dict.\n\nReturns:\nbool: True if result was correct, False otherwise.", "source": "juraj-google-style"}
{"code": "def _process_book(link):\n    data = DOWNER.download(link)\n    dom = dhtmlparser.parseString(utils.handle_encodnig(data))\n    dhtmlparser.makeDoubleLinked(dom)\n    price = None\n    try:\n        price = _strip_content(zapi.get_price(dom))\n    except UserWarning:\n        price = dom.find('p', {'class': 'vaseCena'})\n        if price:\n            price = price[0].getContent().replace('&nbsp;', ' ')\n            price = filter((lambda x: x.isdigit()), price.strip())\n            if price:\n                price = (price[0] + 'kč')\n            else:\n                price = '-1'\n        else:\n            price = '-1'\n    pub = Publication(title=_strip_content(zapi.get_title(dom)), authors=_parse_authors(zapi.get_author(dom)), price=price, publisher=_strip_content(zapi.get_publisher(dom)))\n    pub.optionals.URL = link\n    pub.optionals.pages = _strip_content(zapi.get_pages(dom))\n    pub.optionals.pub_date = _strip_content(zapi.get_pub_date(dom))\n    pub.optionals.ISBN = _strip_content(zapi.get_ISBN(dom))\n    pub.optionals.binding = _strip_content(zapi.get_binding(dom))\n    if pub.title.startswith('E-kniha:'):\n        pub.title = pub.title.replace('E-kniha:', '', 1).strip()\n        pub.optionals.is_ebook = True\n    if pub.optionals.ISBN:\n        if (' ' in pub.optionals.ISBN):\n            pub.optionals.ISBN = pub.optionals.ISBN.split(' ')[0]\n        if ('(' in pub.optionals.ISBN):\n            pub.optionals.ISBN = pub.optionals.ISBN.split('(')[0]\n    return pub", "docstring": "Download and parse available informations about book from the publishers\nwebpages.\n\nArgs:\nlink (str): URL of the book at the publishers webpages.\n\nReturns:\nobj: :class:`.Publication` instance with book details.", "source": "codesearchnet"}
{"code": "def df(self):\n    url = self._url('/system/df')\n    return self._result(self._get(url), True)", "docstring": "Get data usage information.\n\nReturns:\n(dict): A dictionary representing different resource categories\nand their respective data usage.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def cases(store, case_query, limit=100):\n    \n\n    case_groups = {status: [] for status in CASE_STATUSES}\n    for case_obj in case_query.limit(limit):\n        analysis_types = set(ind['analysis_type'] for ind in case_obj['individuals'])\n\n        case_obj['analysis_types'] = list(analysis_types)\n        case_obj['assignees'] = [store.user(user_email) for user_email in\n                                 case_obj.get('assignees', [])]\n        case_groups[case_obj['status']].append(case_obj)\n        case_obj['is_rerun'] = len(case_obj.get('analyses', [])) > 0\n        case_obj['clinvar_variants'] = store.case_to_clinVars(case_obj['_id'])\n        case_obj['display_track'] = TRACKS[case_obj.get('track', 'rare')]\n\n    data = {\n        'cases': [(status, case_groups[status]) for status in CASE_STATUSES],\n        'found_cases': case_query.count(),\n        'limit': limit,\n    }\n    return data", "docstring": "Preprocess case objects.\n\nAdd the necessary information to display the 'cases' view\n\nArgs:\nstore(adapter.MongoAdapter)\ncase_query(pymongo.Cursor)\nlimit(int): Maximum number of cases to display\n\nReturns:\ndata(dict): includes the cases, how many there are and the limit.", "source": "juraj-google-style"}
{"code": "class SiglipEncoder(nn.Module):\n\n    def __init__(self, config: SiglipConfig):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([SiglipEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    @can_return_tuple\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutput:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for encoder_layer in self.layers:\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`SiglipEncoderLayer`].\n\nArgs:\nconfig: SiglipConfig", "source": "github-repos"}
{"code": "def deconv_output_length(input_length, filter_size, padding, stride):\n    if input_length is None:\n        return None\n    input_length *= stride\n    if padding == 'valid':\n        input_length += max(filter_size - stride, 0)\n    elif padding == 'full':\n        input_length -= stride + filter_size - 2\n    return input_length", "docstring": "Determines output length of a transposed convolution given input length.\n\nArgs:\ninput_length: integer.\nfilter_size: integer.\npadding: one of \"same\", \"valid\", \"full\".\nstride: integer.\n\nReturns:\nThe output length (integer).", "source": "github-repos"}
{"code": "def sysctl(command):\n    out = subprocess.check_output(command)\n    result = out.split(b' ')[1]\n    try:\n        return int(result)\n    except ValueError:\n        return result", "docstring": "Run a sysctl command and parse the output.\n\nArgs:\ncommand: A sysctl command with an argument, for example,\n[\"sysctl\", \"hw.memsize\"].\n\nReturns:\nThe parsed output.", "source": "codesearchnet"}
{"code": "def get_neighbors_in_shell(self, origin, r, dr):\n        \n        outer = self.get_sites_in_sphere(origin, r + dr)\n        inner = r - dr\n        return [(site, dist) for (site, dist) in outer if dist > inner]", "docstring": "Returns all sites in a shell centered on origin (coords) between radii\nr-dr and r+dr.\n\nArgs:\norigin (3x1 array): Cartesian coordinates of center of sphere.\nr (float): Inner radius of shell.\ndr (float): Width of shell.\n\nReturns:\n[(site, dist) ...] since most of the time, subsequent processing\nrequires the distance.", "source": "juraj-google-style"}
{"code": "def __mul__(self, other):\n        \n        return self.__class__(self.x, other * self.y, *self._args,\n                              **self._kwargs)", "docstring": "Scale the Spectrum's y values\n\nArgs:\nother: scalar, The scale amount\nReturns:\nSpectrum object with y values scaled", "source": "juraj-google-style"}
{"code": "def record2marcxml(record):\n    \n    schema_name = _get_schema_name(record)\n\n    if schema_name == 'hep':\n        marcjson = hep2marc.do(record)\n    elif schema_name == 'authors':\n        marcjson = hepnames2marc.do(record)\n    else:\n        raise NotImplementedError(u'JSON -> MARC rules missing for \"{}\"'.format(schema_name))\n\n    record = RECORD()\n\n    for key, values in sorted(iteritems(marcjson)):\n        tag, ind1, ind2 = _parse_key(key)\n        if _is_controlfield(tag, ind1, ind2):\n            value = force_single_element(values)\n            if not isinstance(value, text_type):\n                value = text_type(value)\n            record.append(CONTROLFIELD(_strip_invalid_chars_for_xml(value), {'tag': tag}))\n        else:\n            for value in force_list(values):\n                datafield = DATAFIELD({'tag': tag, 'ind1': ind1, 'ind2': ind2})\n                for code, els in sorted(iteritems(value)):\n                    for el in force_list(els):\n                        if not isinstance(el, text_type):\n                            el = text_type(el)\n                        datafield.append(SUBFIELD(_strip_invalid_chars_for_xml(el), {'code': code}))\n                record.append(datafield)\n\n    return tostring(record, encoding='utf8', pretty_print=True)", "docstring": "Convert a JSON record to a MARCXML string.\n\nDeduces which set of rules to use by parsing the ``$schema`` key, as\nit unequivocally determines which kind of record we have.\n\nArgs:\nrecord(dict): a JSON record.\n\nReturns:\nstr: a MARCXML string converted from the record.", "source": "juraj-google-style"}
{"code": "def colorize(text, messageType=None):\n    \n    formattedText = str(text)\n    \n    if \"ERROR\" in messageType:\n        formattedText = colorama.Fore.RED + formattedText\n    elif \"WARNING\" in messageType:\n        formattedText = colorama.Fore.YELLOW + formattedText\n    elif \"SUCCESS\" in messageType:\n        formattedText = colorama.Fore.GREEN + formattedText\n    elif \"INFO\" in messageType:\n        formattedText = colorama.Fore.BLUE + formattedText\n\n    \n    if \"BOLD\" in messageType:\n        formattedText = colorama.Style.BRIGHT + formattedText\n\n    return formattedText + colorama.Style.RESET_ALL", "docstring": "Function that colorizes a message.\n\nArgs:\n-----\ntext: The string to be colorized.\nmessageType: Possible options include \"ERROR\", \"WARNING\", \"SUCCESS\",\n\"INFO\" or \"BOLD\".\n\nReturns:\n--------\nstring: Colorized if the option is correct, including a tag at the end\nto reset the formatting.", "source": "juraj-google-style"}
{"code": "def get_eligible_features(examples, num_mutants):\n    features_dict = get_numeric_features_to_observed_range(examples)\n    features_dict.update(get_categorical_features_to_sampling(examples, num_mutants))\n    features_list = []\n    for (k, v) in sorted(features_dict.items()):\n        v['name'] = k\n        features_list.append(v)\n    return features_list", "docstring": "Returns a list of JSON objects for each feature in the examples.\n\nThis list is used to drive partial dependence plots in the plugin.\n\nArgs:\nexamples: Examples to examine to determine the eligible features.\nnum_mutants: The number of mutations to make over each feature.\n\nReturns:\nA list with a JSON object for each feature.\nNumeric features are represented as {name: observedMin: observedMax:}.\nCategorical features are repesented as {name: samples:[]}.", "source": "codesearchnet"}
{"code": "def mean(series):\n    if np.issubdtype(series.dtype, np.number):\n        return series.mean()\n    else:\n        return np.nan", "docstring": "Returns the mean of a series.\n\nArgs:\nseries (pandas.Series): column to summarize.", "source": "codesearchnet"}
{"code": "def add_from_existing(self, resource, timeout=(- 1)):\n    uri = (self.URI + '/from-existing')\n    return self._client.create(resource, uri=uri, timeout=timeout)", "docstring": "Adds a volume that already exists in the Storage system\n\nArgs:\nresource (dict):\nObject to create.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.\n\nReturns:\ndict: Added resource.", "source": "codesearchnet"}
{"code": "def get_user_info(self, dn, _connection=None):\n        \n        return self.get_object(\n            dn=dn,\n            filter=self.config.get('LDAP_USER_OBJECT_FILTER'),\n            attributes=self.config.get(\"LDAP_GET_USER_ATTRIBUTES\"),\n            _connection=_connection,\n        )", "docstring": "Gets info about a user specified at dn.\n\nArgs:\ndn (str): The dn of the user to find\n_connection (ldap3.Connection): A connection object to use when\nsearching. If not given, a temporary connection will be\ncreated, and destroyed after use.\n\nReturns:\ndict: A dictionary of the user info from LDAP", "source": "juraj-google-style"}
{"code": "def _add_encrypted(self, other):\n        \n        if self.public_key != other.public_key:\n            raise ValueError(\"Attempted to add numbers encrypted against \"\n                             \"different public keys!\")\n\n        \n        a, b = self, other\n        if a.exponent > b.exponent:\n            a = self.decrease_exponent_to(b.exponent)\n        elif a.exponent < b.exponent:\n            b = b.decrease_exponent_to(a.exponent)\n\n        sum_ciphertext = a._raw_add(a.ciphertext(False), b.ciphertext(False))\n        return EncryptedNumber(a.public_key, sum_ciphertext, a.exponent)", "docstring": "Returns E(a + b) given E(a) and E(b).\n\nArgs:\nother (EncryptedNumber): an `EncryptedNumber` to add to self.\n\nReturns:\nEncryptedNumber: E(a + b), calculated by taking the product\nof E(a) and E(b) modulo :attr:`~PaillierPublicKey.n` ** 2.\n\nRaises:\nValueError: if numbers were encrypted against different keys.", "source": "juraj-google-style"}
{"code": "def _find_image_bounding_boxes(filenames, image_to_bboxes):\n  \n  num_image_bbox = 0\n  bboxes = []\n  for f in filenames:\n    basename = os.path.basename(f)\n    if basename in image_to_bboxes:\n      bboxes.append(image_to_bboxes[basename])\n      num_image_bbox += 1\n    else:\n      bboxes.append([])\n  print('Found %d images with bboxes out of %d images' % (\n      num_image_bbox, len(filenames)))\n  return bboxes", "docstring": "Find the bounding boxes for a given image file.\n\nArgs:\nfilenames: list of strings; each string is a path to an image file.\nimage_to_bboxes: dictionary mapping image file names to a list of\nbounding boxes. This list contains 0+ bounding boxes.\nReturns:\nList of bounding boxes for each image. Note that each entry in this\nlist might contain from 0+ entries corresponding to the number of bounding\nbox annotations for the image.", "source": "juraj-google-style"}
{"code": "def _read_mode_mptcp(self, size, kind):\n    bins = self._read_binary(1)\n    subt = int(bins[:4], base=2)\n    bits = bins[4:]\n    dlen = (size - 1)\n    func = mptcp_opt.get(subt)\n    if (func is None):\n        temp = self._read_fileng(dlen)\n        data = dict(kind=kind, length=size, subtype='Unknown', data=(bytes(chr(int(bits[:4], base=2)), encoding='utf-8') + temp))\n    else:\n        data = func(self, bits, dlen, kind)\n    return data", "docstring": "Read Multipath TCP option.\n\nPositional arguments:\n* size - int, length of option\n* kind - int, 30 (Multipath TCP)\n\nReturns:\n* dict -- extracted Multipath TCP (MP-TCP) option\n\nStructure of MP-TCP [RFC 6824]:\n1                   2                   3\n0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\n+---------------+---------------+-------+-----------------------+\n|     Kind      |    Length     |Subtype|                       |\n+---------------+---------------+-------+                       |\n|                     Subtype-specific data                     |\n|                       (variable length)                       |\n+---------------------------------------------------------------+\n\nOctets      Bits        Name                    Description\n0           0     tcp.mp.kind             Kind (30)\n1           8     tcp.mp.length           Length\n2          16     tcp.mp.subtype          Subtype\n2          20     tcp.mp.data             Subtype-specific Data", "source": "codesearchnet"}
{"code": "def read_config(contents):\n    \n    file_obj = io.StringIO(contents)\n    config = six.moves.configparser.ConfigParser()\n    config.readfp(file_obj)\n    return config", "docstring": "Reads pylintrc config into native ConfigParser object.\n\nArgs:\ncontents (str): The contents of the file containing the INI config.\n\nReturns:\nConfigParser.ConfigParser: The parsed configuration.", "source": "juraj-google-style"}
{"code": "def log_error(self, msg):\n        \n        if self.__logger:\n            self.__logger.error(msg)\n\n        raise RuntimeError(msg)", "docstring": "Log an error and raise an exception.\n\nArgs:\nmsg: Error message to log.\n\nRaises:\nRuntimeError: With the message.", "source": "juraj-google-style"}
{"code": "def ResultCollectionForFID(cls, flow_id):\n    if (not isinstance(flow_id, rdfvalue.RDFURN)):\n        flow_id = rdfvalue.RDFURN(flow_id)\n    return sequential_collection.GeneralIndexedCollection(flow_id.Add(RESULTS_SUFFIX))", "docstring": "Returns the ResultCollection for the flow with a given flow_id.\n\nArgs:\nflow_id: The id of the flow, a RDFURN of the form aff4:/flows/F:123456.\n\nReturns:\nThe collection containing the results for the flow identified by the id.", "source": "codesearchnet"}
{"code": "def parse(self, data, lexer=None, *args, **kwargs):\n        \n        if lexer is None:\n            lexer = self.lexer\n        return self.parser.parse(data, lexer=lexer, *args, **kwargs)", "docstring": "Parse the input JSON data string into a python data structure.\nArgs:\ndata: An input data string\nlexer:  An optional ply.lex instance that overrides the default lexer.\nReturns:\nA python dict or list representing the input JSON data.", "source": "juraj-google-style"}
{"code": "def __init__(self, metric_name, kind, value_type, update_op_func,\n                 mark=Mark.PRODUCER):\n        \n        self.kind = kind\n        self.metric_name = metric_name\n        if mark is Mark.CONSUMER:\n            self.update_op_func = self._consumer_metric(update_op_func)\n        elif mark is Mark.PRODUCER_BY_CONSUMER:\n            self.update_op_func = self._by_consumer_metric(update_op_func)\n        else:\n            self.update_op_func = update_op_func\n        self.value_type = value_type\n        self.mark = mark", "docstring": "Constructor.\n\nupdate_op_func is used to when updating an `Operation` from a\n`ReportRequestInfo`.\n\nArgs:\nmetric_name (str): the name of the metric descriptor\nkind (:class:`MetricKind`): the ``kind`` of the described metric\nvalue_type (:class:`ValueType`): the `value type` of the described metric\nupdate_op_func (function): the func to update an operation", "source": "juraj-google-style"}
{"code": "def get_es_label(obj, def_obj):\n    \n    label_flds = LABEL_FIELDS\n    if def_obj.es_defs.get('kds_esLabel'):\n        label_flds = def_obj.es_defs['kds_esLabel'] + LABEL_FIELDS\n    try:\n        for label in label_flds:\n            if def_obj.cls_defs.get(label):\n                 obj['label'] = def_obj.cls_defs[label][0]\n                 break\n        if not obj.get('label'):\n            obj['label'] = def_obj.__class__.__name__.split(\"_\")[-1]\n    except AttributeError:\n        \n        \n        \n        if def_obj.get('rdf_type'):\n            obj['label'] = def_obj['rdf_type'][-1].value[-1]\n        else:\n            obj['label'] = \"no_label\"\n    return obj", "docstring": "Returns object with label for an object that goes into the elacticsearch\n'label' field\n\nargs:\nobj: data object to update\ndef_obj: the class instance that has defintion values", "source": "juraj-google-style"}
{"code": "def _stop(self) -> None:\n    self._server.stop()", "docstring": "Stops the server.\n\nRaises:\ntf.errors.OpError: Or one of its subclasses if an error occurs while\nstopping the server.", "source": "github-repos"}
{"code": "def pretokenized_tfds_dataset(dataset_name=gin.REQUIRED,\n                              text2self=gin.REQUIRED,\n                              tfds_data_dir=gin.REQUIRED,\n                              dataset_split=gin.REQUIRED,\n                              batch_size=gin.REQUIRED,\n                              sequence_length=gin.REQUIRED,\n                              vocabulary=None):\n  \n  del vocabulary\n  dataset = tfds.load(\n      dataset_name,\n      split=dataset_split,\n      as_supervised=True,\n      data_dir=tfds_data_dir)\n  if dataset_split == \"train\":\n    dataset = dataset.repeat()\n    dataset = dataset.shuffle(1000)\n  def shift_and_append_eos(t):\n    \n    \n    return tf.concat([t + 1, [1]], 0)\n  def feature_map(inputs, targets):\n    if text2self:\n      return {\"targets\": shift_and_append_eos(targets)}\n    else:\n      return {\"inputs\": shift_and_append_eos(inputs),\n              \"targets\": shift_and_append_eos(targets)}\n  dataset = dataset.map(feature_map,\n                        num_parallel_calls=tf.data.experimental.AUTOTUNE)\n  return pack_and_batch(dataset, batch_size, sequence_length)", "docstring": "Reads a tensorflow_datasets dataset.\n\nArgs:\ndataset_name: a string\ntext2self: a boolean\ntfds_data_dir: a boolean\ndataset_split: a string\nbatch_size: an integer\nsequence_length: an integer\nvocabulary: ignored\nReturns:\na tf.data.Dataset of batches", "source": "juraj-google-style"}
{"code": "def run(self, dag):\n    cx_runs = dag.collect_runs(['cx'])\n    for cx_run in cx_runs:\n        partition = []\n        chunk = []\n        for i in range((len(cx_run) - 1)):\n            chunk.append(cx_run[i])\n            qargs0 = cx_run[i].qargs\n            qargs1 = cx_run[(i + 1)].qargs\n            if (qargs0 != qargs1):\n                partition.append(chunk)\n                chunk = []\n        chunk.append(cx_run[(- 1)])\n        partition.append(chunk)\n        for chunk in partition:\n            if ((len(chunk) % 2) == 0):\n                for n in chunk:\n                    dag.remove_op_node(n)\n            else:\n                for n in chunk[1:]:\n                    dag.remove_op_node(n)\n    return dag", "docstring": "Run one pass of cx cancellation on the circuit\n\nArgs:\ndag (DAGCircuit): the directed acyclic graph to run on.\nReturns:\nDAGCircuit: Transformed DAG.", "source": "codesearchnet"}
{"code": "def __init__(self, fn, args, kwargs, side_inputs, windowing, tagged_receivers, step_name=None, logging_context=None, state=None, scoped_metrics_container=None, operation_name=None, transform_id=None, user_state_context=None):\n    side_inputs = list(side_inputs)\n    self.step_name = step_name\n    self.transform_id = transform_id\n    self.context = DoFnContext(step_name, state=state)\n    self.bundle_finalizer_param = DoFn.BundleFinalizerParam()\n    self.execution_context = None\n    do_fn_signature = DoFnSignature(fn)\n    main_receivers = tagged_receivers[None]\n    if 'outputs_per_element_counter' in RuntimeValueProvider.experiments:\n        output_counter_name = CounterName('per-element-output-count', step_name=operation_name)\n        per_element_output_counter = state._counter_factory.get_counter(output_counter_name, Counter.DATAFLOW_DISTRIBUTION).accumulator\n    else:\n        per_element_output_counter = None\n    output_handler = _OutputHandler(windowing.windowfn, main_receivers, tagged_receivers, per_element_output_counter, getattr(fn, 'output_batch_converter', None), getattr(do_fn_signature.process_method.method_value, '_beam_yields_batches', False), getattr(do_fn_signature.process_batch_method.method_value, '_beam_yields_elements', False))\n    if do_fn_signature.is_stateful_dofn() and (not user_state_context):\n        raise Exception('Requested execution of a stateful DoFn, but no user state context is available. This likely means that the current runner does not support the execution of stateful DoFns.')\n    self.do_fn_invoker = DoFnInvoker.create_invoker(do_fn_signature, output_handler, self.context, side_inputs, args, kwargs, user_state_context=user_state_context, bundle_finalizer_param=self.bundle_finalizer_param)", "docstring": "Initializes a DoFnRunner.\n\nArgs:\nfn: user DoFn to invoke\nargs: positional side input arguments (static and placeholder), if any\nkwargs: keyword side input arguments (static and placeholder), if any\nside_inputs: list of sideinput.SideInputMaps for deferred side inputs\nwindowing: windowing properties of the output PCollection(s)\ntagged_receivers: a dict of tag name to Receiver objects\nstep_name: the name of this step\nlogging_context: DEPRECATED [BEAM-4728]\nstate: handle for accessing DoFn state\nscoped_metrics_container: DEPRECATED\noperation_name: The system name assigned by the runner for this operation.\ntransform_id: The PTransform Id in the pipeline proto for this DoFn.\nuser_state_context: The UserStateContext instance for the current\nStateful DoFn.", "source": "github-repos"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_buffer = utils.BytearrayStream()\n    if self._operations:\n        for operation in self._operations:\n            operation.write(local_buffer, kmip_version=kmip_version)\n    if self._object_types:\n        for object_type in self._object_types:\n            object_type.write(local_buffer, kmip_version=kmip_version)\n    if self._vendor_identification:\n        self._vendor_identification.write(local_buffer, kmip_version=kmip_version)\n    if self._server_information:\n        self._server_information.write(local_buffer, kmip_version=kmip_version)\n    if self._application_namespaces:\n        for application_namespace in self._application_namespaces:\n            application_namespace.write(local_buffer, kmip_version=kmip_version)\n    if (kmip_version >= enums.KMIPVersion.KMIP_1_1):\n        if self._extension_information:\n            for extension_information in self._extension_information:\n                extension_information.write(local_buffer, kmip_version=kmip_version)\n    if (kmip_version >= enums.KMIPVersion.KMIP_1_2):\n        if self._attestation_types:\n            for attestation_type in self._attestation_types:\n                attestation_type.write(local_buffer, kmip_version=kmip_version)\n    if (kmip_version >= enums.KMIPVersion.KMIP_1_3):\n        if self._rng_parameters:\n            for rng_parameters in self._rng_parameters:\n                rng_parameters.write(local_buffer, kmip_version=kmip_version)\n        if self._profile_information:\n            for profile_information in self._profile_information:\n                profile_information.write(local_buffer, kmip_version=kmip_version)\n        if self._validation_information:\n            for validation_information in self._validation_information:\n                validation_information.write(local_buffer, kmip_version=kmip_version)\n        if self._capability_information:\n            for capability_information in self._capability_information:\n                capability_information.write(local_buffer, kmip_version=kmip_version)\n        if self._client_registration_methods:\n            for client_reg_method in self._client_registration_methods:\n                client_reg_method.write(local_buffer, kmip_version=kmip_version)\n    if (kmip_version >= enums.KMIPVersion.KMIP_2_0):\n        if self._defaults_information:\n            self._defaults_information.write(local_buffer, kmip_version=kmip_version)\n        if self._storage_protection_masks:\n            for storage_protection_mask in self._storage_protection_masks:\n                storage_protection_mask.write(local_buffer, kmip_version=kmip_version)\n    self.length = local_buffer.length()\n    super(QueryResponsePayload, self).write(output_buffer, kmip_version=kmip_version)\n    output_buffer.write(local_buffer.buffer)", "docstring": "Write the data encoding the QueryResponsePayload object to a stream.\n\nArgs:\noutput_buffer (Stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def mean_squared_error(true, pred):\n    result = (tf.reduce_sum(tf.squared_difference(true, pred)) / tf.to_float(tf.size(pred)))\n    return result", "docstring": "L2 distance between tensors true and pred.\n\nArgs:\ntrue: the ground truth image.\npred: the predicted image.\nReturns:\nmean squared error between ground truth and predicted image.", "source": "codesearchnet"}
{"code": "def process_file(vcs, commit, force, gitlint_config, file_data):\n    (filename, extra_data) = file_data\n    if force:\n        modified_lines = None\n    else:\n        modified_lines = vcs.modified_lines(filename, extra_data, commit=commit)\n    result = linters.lint(filename, modified_lines, gitlint_config)\n    result = result[filename]\n    return (filename, result)", "docstring": "Lint the file\n\nReturns:\nThe results from the linter.", "source": "codesearchnet"}
{"code": "def __call__(self, current_obj, attr, obj_ref):\n        \n\n        def _find_obj_fqn(p, fqn_name, cls):\n            \n\n            def find_obj(parent, name):\n                if parent is not current_obj and \\\n                        self.scope_redirection_logic is not None:\n                    from textx.scoping import Postponed\n                    res = self.scope_redirection_logic(parent)\n                    assert res is not None, \\\n                        \"scope_redirection_logic must not return None\"\n                    if type(res) is Postponed:\n                        return res\n                    for m in res:\n                        return_value = find_obj(m, name)\n                        if return_value is not None:\n                            return return_value\n                for attr in [a for a in parent.__dict__ if\n                             not a.startswith('__') and not\n                             a.startswith('_tx_') and not\n                             callable(getattr(parent, a))]:\n                    obj = getattr(parent, attr)\n                    if isinstance(obj, (list, tuple)):\n                        for innerobj in obj:\n                            if hasattr(innerobj, \"name\") \\\n                                    and innerobj.name == name:\n                                return innerobj\n                    else:\n                        if hasattr(obj, \"name\") and obj.name == name:\n                            return obj\n                return None\n\n            for n in fqn_name.split('.'):\n                obj = find_obj(p, n)\n                if obj:\n                    if type(obj) is Postponed:\n                        return obj\n                    p = obj\n                else:\n                    return None\n\n            from textx import textx_isinstance\n            if textx_isinstance(obj, cls):\n                return p\n            else:\n                return None\n\n        def _find_referenced_obj(p, name, cls):\n            \n            ret = _find_obj_fqn(p, name, cls)\n            if ret:\n                return ret\n            while hasattr(p, \"parent\"):\n                p = p.parent\n                ret = _find_obj_fqn(p, name, cls)\n                if ret:\n                    return ret\n                \n\n        from textx.model import ObjCrossRef\n        assert type(obj_ref) is ObjCrossRef, type(obj_ref)\n        obj_cls, obj_name = obj_ref.cls, obj_ref.obj_name\n        return _find_referenced_obj(current_obj, obj_name, obj_cls)", "docstring": "find a fully qualified name.\nUse this callable as scope_provider in a meta-model:\nmy_metamodel.register_scope_provider(\n{\"*.*\":textx.scoping.providers.FQN})\n\nArgs:\ncurrent_obj: object corresponding a instance of an\nobject (rule instance)\nattr: the referencing attribute (unused)\nobj_ref: ObjCrossRef to be resolved\n\nReturns: None or the referenced object", "source": "juraj-google-style"}
{"code": "def round(self, decimals=0, *args, **kwargs):\n        \n        return self.__constructor__(\n            query_compiler=self._query_compiler.round(decimals=decimals, **kwargs)\n        )", "docstring": "Round each element in the DataFrame.\n\nArgs:\ndecimals: The number of decimals to round to.\n\nReturns:\nA new DataFrame.", "source": "juraj-google-style"}
{"code": "def is_type_or_profile_of(url: str, message_or_descriptor: annotation_utils.MessageOrDescriptorBase) -> bool:\n    return is_type(url, message_or_descriptor) or is_profile_of(url, message_or_descriptor)", "docstring": "Whether message_or_descriptor is of type url *or* is a profile of url.\n\nArgs:\nurl: The FHIR structure definition URL to compare against.\nmessage_or_descriptor: The Message or Descriptor to examine.\n\nReturns:\nTrue if message_or_descriptor has a structure definition URL of url, or if\nit is a profile with a base structure definition URL of url.", "source": "github-repos"}
{"code": "def GetUpdates(self, source, url, since):\n    proto = url.split(':')[0]\n    if proto not in ('http', 'https'):\n        raise error.ConfigurationError('Unsupported protocol %s' % proto)\n    conn = source.conn\n    conn.setopt(pycurl.OPT_FILETIME, 1)\n    conn.setopt(pycurl.ENCODING, 'bzip2, gzip')\n    if since is not None:\n        conn.setopt(pycurl.TIMEVALUE, int(since))\n        conn.setopt(pycurl.TIMECONDITION, pycurl.TIMECONDITION_IFMODSINCE)\n    retry_count = 0\n    resp_code = 500\n    while retry_count < source.conf['retry_max']:\n        try:\n            source.log.debug('fetching %s', url)\n            resp_code, headers, body_bytes = curl.CurlFetch(url, conn, self.log)\n            self.log.debug('response code: %s', resp_code)\n        finally:\n            if resp_code < 400:\n                if resp_code == 304:\n                    return []\n                if resp_code == 200:\n                    break\n            retry_count += 1\n            self.log.warning('Failed connection: attempt \n            if retry_count == source.conf['retry_max']:\n                self.log.debug('max retries hit')\n                raise error.SourceUnavailable('Max retries exceeded.')\n            time.sleep(source.conf['retry_delay'])\n    headers = headers.split('\\r\\n')\n    last_modified = conn.getinfo(pycurl.INFO_FILETIME)\n    self.log.debug('last modified: %s', last_modified)\n    if last_modified == -1:\n        for header in headers:\n            if header.lower().startswith('last-modified'):\n                self.log.debug('%s', header)\n                http_ts_string = header[header.find(':') + 1:].strip()\n                last_modified = self.FromHttpToTimestamp(http_ts_string)\n                break\n        else:\n            http_ts_string = ''\n    else:\n        http_ts_string = self.FromTimestampToHttp(last_modified)\n    self.log.debug('Last-modified is: %s', http_ts_string)\n    try:\n        body_bytes = bz2.decompress(body_bytes)\n        self.log.debug('bzip encoding found')\n    except IOError:\n        self.log.debug('bzip encoding not found')\n    response = StringIO(body_bytes.decode('utf-8'))\n    data_map = self.GetMap(cache_info=response)\n    if http_ts_string:\n        http_ts = self.FromHttpToTimestamp(http_ts_string)\n        self.log.debug('setting last modified to: %s', http_ts)\n        data_map.SetModifyTimestamp(http_ts)\n    return data_map", "docstring": "Get updates from a source.\n\nArgs:\nsource: A data source\nurl: url to the data we want\nsince: a timestamp representing the last change (None to force-get)\n\nReturns:\nA tuple containing the map of updates and a maximum timestamp\n\nRaises:\nValueError: an object in the source map is malformed\nConfigurationError:", "source": "github-repos"}
{"code": "def GetCampaignFeeds(client, feed, placeholder_type):\n    campaign_feed_service = client.GetService('CampaignFeedService', 'v201809')\n    campaign_feeds = []\n    more_pages = True\n    selector = {'fields': ['CampaignId', 'MatchingFunction', 'PlaceholderTypes'], 'predicates': [{'field': 'Status', 'operator': 'EQUALS', 'values': ['ENABLED']}, {'field': 'FeedId', 'operator': 'EQUALS', 'values': [feed['id']]}, {'field': 'PlaceholderTypes', 'operator': 'CONTAINS_ANY', 'values': [placeholder_type]}], 'paging': {'startIndex': 0, 'numberResults': PAGE_SIZE}}\n    while more_pages:\n        page = campaign_feed_service.get(selector)\n        if ('entries' in page):\n            campaign_feeds.extend(page['entries'])\n        selector['paging']['startIndex'] += PAGE_SIZE\n        more_pages = (selector['paging']['startIndex'] < int(page['totalNumEntries']))\n    return campaign_feeds", "docstring": "Get a list of Feed Item Ids used by a campaign via a given Campaign Feed.\n\nArgs:\nclient: an AdWordsClient instance.\nfeed: a Campaign Feed.\nplaceholder_type: the Placeholder Type.\n\nReturns:\nA list of Feed Item Ids.", "source": "codesearchnet"}
{"code": "def get_members(self, retrieve=False):\n\n\t\t\n\n\t\tif self.exists and hasattr(self.rdf.triples, 'pcdm') and hasattr(self.rdf.triples.pcdm, 'hasMember'):\n\t\t\tmembers = [ self.repo.parse_uri(uri) for uri in self.rdf.triples.pcdm.hasMember ]\n\n\t\t\t\n\t\t\treturn members\n\n\t\telse:\n\t\t\treturn []", "docstring": "get pcdm:hasMember for this resource\n\nArgs:\nretrieve (bool): if True, issue .refresh() on resource thereby confirming existence and retrieving payload", "source": "juraj-google-style"}
{"code": "def __init__(self, parent=None, **kwargs):\n    \n    if not parent:\n      raise ValueError('Missing parent value.')\n\n    super(QCOWPathSpec, self).__init__(parent=parent, **kwargs)", "docstring": "Initializes a path specification.\n\nNote that the QCOW path specification must have a parent.\n\nArgs:\nparent (Optional[PathSpec]): parent path specification.\n\nRaises:\nValueError: when parent is not set.", "source": "juraj-google-style"}
{"code": "def _ScheduleTasks(self, storage_writer):\n    \n    logger.debug('Task scheduler started')\n\n    self._status = definitions.STATUS_INDICATOR_RUNNING\n\n    \n\n    \n    \n\n    event_source_heap = _EventSourceHeap()\n\n    self._FillEventSourceHeap(\n        storage_writer, event_source_heap, start_with_first=True)\n\n    event_source = event_source_heap.PopEventSource()\n\n    task = None\n    while event_source or self._task_manager.HasPendingTasks():\n      if self._abort:\n        break\n\n      try:\n        if not task:\n          task = self._task_manager.CreateRetryTask()\n\n        if not task and event_source:\n          task = self._task_manager.CreateTask(self._session_identifier)\n          task.file_entry_type = event_source.file_entry_type\n          task.path_spec = event_source.path_spec\n          event_source = None\n\n          self._number_of_consumed_sources += 1\n\n          if self._guppy_memory_profiler:\n            self._guppy_memory_profiler.Sample()\n\n        if task:\n          if self._ScheduleTask(task):\n            logger.debug(\n                'Scheduled task {0:s} for path specification {1:s}'.format(\n                    task.identifier, task.path_spec.comparable))\n\n            self._task_manager.SampleTaskStatus(task, 'scheduled')\n\n            task = None\n\n          else:\n            self._task_manager.SampleTaskStatus(task, 'schedule_attempted')\n\n        self._MergeTaskStorage(storage_writer)\n\n        if not event_source_heap.IsFull():\n          self._FillEventSourceHeap(storage_writer, event_source_heap)\n\n        if not task and not event_source:\n          event_source = event_source_heap.PopEventSource()\n\n      except KeyboardInterrupt:\n        self._abort = True\n\n        self._processing_status.aborted = True\n        if self._status_update_callback:\n          self._status_update_callback(self._processing_status)\n\n    for task in self._task_manager.GetFailedTasks():\n      warning = warnings.ExtractionWarning(\n          message='Worker failed to process path specification',\n          path_spec=task.path_spec)\n      self._storage_writer.AddWarning(warning)\n      self._processing_status.error_path_specs.append(task.path_spec)\n\n    self._status = definitions.STATUS_INDICATOR_IDLE\n\n    if self._abort:\n      logger.debug('Task scheduler aborted')\n    else:\n      logger.debug('Task scheduler stopped')", "docstring": "Schedules tasks.\n\nArgs:\nstorage_writer (StorageWriter): storage writer for a session storage.", "source": "juraj-google-style"}
{"code": "def grepPDF(self, path):\n    \n    with open(path, 'rb') as pdf_file_obj:\n      match = set()\n      text = ''\n      pdf_reader = PyPDF2.PdfFileReader(pdf_file_obj)\n      pages = pdf_reader.numPages\n      for page in range(pages):\n        page_obj = pdf_reader.getPage(page)\n        text += '\\n' + page_obj.extractText()\n      match.update(set(x.lower() for x in re.findall(\n          self._keywords, text, re.IGNORECASE)))\n    return match", "docstring": "Parse PDF files text content for keywords.\n\nArgs:\npath: PDF file path.\n\nReturns:\nmatch: set of unique occurrences of every match.", "source": "juraj-google-style"}
{"code": "def _get_vcap_services(vcap_services=None):\n    vcap_services = (vcap_services or os.environ.get('VCAP_SERVICES'))\n    if (not vcap_services):\n        raise ValueError(\"VCAP_SERVICES information must be supplied as a parameter or as environment variable 'VCAP_SERVICES'\")\n    if isinstance(vcap_services, dict):\n        return vcap_services\n    try:\n        vcap_services = json.loads(vcap_services)\n    except json.JSONDecodeError:\n        try:\n            with open(vcap_services) as vcap_json_data:\n                vcap_services = json.load(vcap_json_data)\n        except:\n            raise ValueError('VCAP_SERVICES information is not JSON or a file containing JSON:', vcap_services)\n    return vcap_services", "docstring": "Retrieves the VCAP Services information from the `ConfigParams.VCAP_SERVICES` field in the config object. If\n`vcap_services` is not specified, it takes the information from VCAP_SERVICES environment variable.\n\nArgs:\nvcap_services (str): Try to parse as a JSON string, otherwise, try open it as a file.\nvcap_services (dict): Return the dict as is.\n\nReturns:\ndict: A dict representation of the VCAP Services information.\n\nRaises:\nValueError:\n* if `vcap_services` nor VCAP_SERVICES environment variable are specified.\n* cannot parse `vcap_services` as a JSON string nor as a filename.", "source": "codesearchnet"}
{"code": "def recv_task_request_from_workers(self):\n    info = MPI.Status()\n    comm.recv(source=MPI.ANY_SOURCE, tag=TASK_REQUEST_TAG, status=info)\n    worker_rank = info.Get_source()\n    logger.info('Received task request from worker:{}'.format(worker_rank))\n    return worker_rank", "docstring": "Receives 1 task request from MPI comm\n\nReturns:\n--------\nworker_rank: worker_rank id", "source": "codesearchnet"}
{"code": "def __init__(self, encoding='utf-8'):\n    \n    super(OutputWriter, self).__init__()\n    self._encoding = encoding\n    self._errors = 'strict'", "docstring": "Initializes an output writer.\n\nArgs:\nencoding (Optional[str]): input encoding.", "source": "juraj-google-style"}
{"code": "def assert_rank_at_least(x, rank, data=None, summarize=None, message=None, name=None):\n    with ops.name_scope(name, 'assert_rank_at_least', (x, rank) + tuple(data or [])):\n        x = ops.convert_to_tensor(x, name='x')\n        rank = ops.convert_to_tensor(rank, name='rank')\n        message = _message_prefix(message)\n        static_condition = lambda actual_rank, given_rank: actual_rank >= given_rank\n        dynamic_condition = math_ops.greater_equal\n        if context.executing_eagerly():\n            name = ''\n        else:\n            name = x.name\n        if data is None:\n            data = [message, 'Tensor %s must have rank at least' % name, rank, 'Received shape: ', array_ops.shape(x)]\n        try:\n            assert_op = _assert_rank_condition(x, rank, static_condition, dynamic_condition, data, summarize)\n        except ValueError as e:\n            if e.args[0] == 'Static rank condition failed':\n                raise ValueError('%sTensor %s must have rank at least %d.  Received rank %d, shape %s' % (message, name, e.args[2], e.args[1], x.get_shape()))\n            else:\n                raise\n    return assert_op", "docstring": "Assert `x` has rank equal to `rank` or higher.\n\nExample of adding a dependency to an operation:\n\n```python\nwith tf.control_dependencies([tf.compat.v1.assert_rank_at_least(x, 2)]):\noutput = tf.reduce_sum(x)\n```\n\nArgs:\nx:  Numeric `Tensor`.\nrank:  Scalar `Tensor`.\ndata:  The tensors to print out if the condition is False.  Defaults to\nerror message and first few entries of `x`.\nsummarize: Print this many entries of each tensor.\nmessage: A string to prefix to the default message.\nname: A name for this operation (optional).\nDefaults to \"assert_rank_at_least\".\n\nReturns:\nOp raising `InvalidArgumentError` unless `x` has specified rank or higher.\nIf static checks determine `x` has correct rank, a `no_op` is returned.\n\nRaises:\nValueError:  If static checks determine `x` has wrong rank.", "source": "github-repos"}
{"code": "def _info_to_string(info):\n  \n  for key in _TENSORBOARD_INFO_FIELDS:\n    field_type = _TENSORBOARD_INFO_FIELDS[key]\n    if not isinstance(getattr(info, key), field_type.runtime_type):\n      raise ValueError(\n          \"expected %r of type %s, but found: %r\" %\n          (key, field_type.runtime_type, getattr(info, key))\n      )\n  if info.version != version.VERSION:\n    raise ValueError(\n        \"expected 'version' to be %r, but found: %r\" %\n        (version.VERSION, info.version)\n    )\n  json_value = {\n      k: _TENSORBOARD_INFO_FIELDS[k].serialize(getattr(info, k))\n      for k in _TENSORBOARD_INFO_FIELDS\n  }\n  return json.dumps(json_value, sort_keys=True, indent=4)", "docstring": "Convert a `TensorBoardInfo` to string form to be stored on disk.\n\nThe format returned by this function is opaque and should only be\ninterpreted by `_info_from_string`.\n\nArgs:\ninfo: A valid `TensorBoardInfo` object.\n\nRaises:\nValueError: If any field on `info` is not of the correct type.\n\nReturns:\nA string representation of the provided `TensorBoardInfo`.", "source": "juraj-google-style"}
{"code": "def categorize(values, categories, default=None):\n    uniq_cats = list(unique_iterator(values))\n    cats = []\n    for c in values:\n        if isinstance(categories, list):\n            cat_ind = uniq_cats.index(c)\n            if (cat_ind < len(categories)):\n                cat = categories[cat_ind]\n            else:\n                cat = default\n        else:\n            cat = categories.get(c, default)\n        cats.append(cat)\n    return np.asarray(cats)", "docstring": "Maps discrete values to supplied categories.\n\nReplaces discrete values in input array with a fixed set of\ncategories defined either as a list or dictionary.\n\nArgs:\nvalues: Array of values to be categorized\ncategories: List or dict of categories to map inputs to\ndefault: Default value to assign if value not in categories\n\nReturns:\nArray of categorized values", "source": "codesearchnet"}
{"code": "def AddFilterOptions(self, argument_group):\n    names = ['artifact_filters', 'date_filters', 'filter_file']\n    helpers_manager.ArgumentHelperManager.AddCommandLineArguments(argument_group, names=names)\n    argument_group.add_argument('-x', '--extensions', dest='extensions_string', action='store', type=str, metavar='EXTENSIONS', help='Filter on file name extensions. This option accepts multiple multiple comma separated values e.g. \"csv,docx,pst\".')\n    argument_group.add_argument('--names', dest='names_string', action='store', type=str, metavar='NAMES', help='Filter on file names.  This option accepts a comma separated string denoting all file names, e.g. -x \"NTUSER.DAT,UsrClass.dat\".')\n    argument_group.add_argument('--signatures', dest='signature_identifiers', action='store', type=str, metavar='IDENTIFIERS', help='Filter on file format signature identifiers. This option accepts multiple comma separated values e.g. \"esedb,lnk\". Use \"list\" to show an overview of the supported file format signatures.')", "docstring": "Adds the filter options to the argument group.\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "codesearchnet"}
{"code": "def load_from_dict(self, dictionary, _override=True, _allow_undeclared=False):\n    undeclared_keys = []\n    for (key, value) in self._modules['six'].iteritems(dictionary):\n        if ((key not in self._declarations) and (not _allow_undeclared)):\n            undeclared_keys.append(key)\n            continue\n        if (key in self._loaded_values):\n            if _override:\n                self._logger.info('Overriding previously loaded value for %s (%s) with value: %s', key, self._loaded_values[key], value)\n            else:\n                self._logger.info('Ignoring new value (%s), keeping previous value for %s: %s', value, key, self._loaded_values[key])\n                continue\n        key = (key.decode() if isinstance(key, bytes) else key)\n        value = (value.decode() if isinstance(value, bytes) else value)\n        self._loaded_values[key] = value\n    if undeclared_keys:\n        self._logger.warning('Ignoring undeclared configuration keys: %s', undeclared_keys)", "docstring": "Loads the config with values from a dictionary instead of a file.\n\nThis is meant for testing and bin purposes and shouldn't be used in most\napplications.\n\nArgs:\ndictionary: The dictionary containing config keys/values to update.\n_override: If True, new values will override previous values.\n_allow_undeclared: If True, silently load undeclared keys, otherwise\nwarn and ignore the value.  Typically used for loading config\nfiles before declarations have been evaluated.", "source": "codesearchnet"}
{"code": "def sni2route(self, sni: SchemaNodeId, sctx: SchemaContext) -> SchemaRoute:\n    nlist = sni.split('/')\n    res = []\n    for qn in (nlist[1:] if (sni[0] == '/') else nlist):\n        res.append(self.translate_node_id(qn, sctx))\n    return res", "docstring": "Translate schema node identifier to a schema route.\n\nArgs:\nsni: Schema node identifier (absolute or relative).\nsctx: Schema context.\n\nRaises:\nModuleNotRegistered: If `mid` is not registered in the data model.\nUnknownPrefix: If a prefix specified in `sni` is not declared.", "source": "codesearchnet"}
{"code": "def fold(*vals):\n    \n    vals = [v for v in vals if v is not None]\n    if not vals:\n        return None\n    return min(vals), max(vals)", "docstring": "Initialize a new (min,max) tuple interval from values.\n\nArgs:\n*vals ([int,...]):  A list of values (or Nones)\n\nReturns:\n((int,int)):    A (min,max) interval tuple or None", "source": "juraj-google-style"}
{"code": "def na_if(series, *values):\n    \n\n    series = pd.Series(series)\n    series[series.isin(values)] = np.nan\n    return series", "docstring": "If values in a series match a specified value, change them to `np.nan`.\n\nArgs:\nseries: Series or vector, often symbolic.\n*values: Value(s) to convert to `np.nan` in the series.", "source": "juraj-google-style"}
{"code": "def register_task(self, input, deps=None, manager=None, task_class=None, append=False):\n    if (not append):\n        work = Work(manager=manager)\n    elif (not self.works):\n        work = Work(manager=manager)\n        append = False\n    else:\n        work = self.works[(- 1)]\n    task = work.register(input, deps=deps, task_class=task_class)\n    if (not append):\n        self.register_work(work)\n    return work", "docstring": "Utility function that generates a `Work` made of a single task\n\nArgs:\ninput: :class:`AbinitInput`\ndeps: List of :class:`Dependency` objects specifying the dependency of this node.\nAn empy list of deps implies that this node has no dependencies.\nmanager: The :class:`TaskManager` responsible for the submission of the task.\nIf manager is None, we use the :class:`TaskManager` specified during the creation of the work.\ntask_class: Task subclass to instantiate. Default: :class:`AbinitTask`\nappend: If true, the task is added to the last work (a new Work is created if flow is empty)\n\nReturns:\nThe generated :class:`Work` for the task, work[0] is the actual task.", "source": "codesearchnet"}
{"code": "def PushEventSource(self, event_source):\n    \n    if event_source.file_entry_type == (\n        dfvfs_definitions.FILE_ENTRY_TYPE_DIRECTORY):\n      weight = 1\n    else:\n      weight = 100\n\n    heap_values = (weight, time.time(), event_source)\n    heapq.heappush(self._heap, heap_values)", "docstring": "Pushes an event source onto the heap.\n\nArgs:\nevent_source (EventSource): event source.", "source": "juraj-google-style"}
{"code": "def _exception_for(self, code):\n    if (code in self.errors):\n        return self.errors[code]\n    elif (500 <= code < 599):\n        return exceptions.RemoteServerError\n    else:\n        return exceptions.UnknownError", "docstring": "Return the exception class suitable for the specified HTTP\nstatus code.\n\nRaises:\nUnknownError: The HTTP status code is not one of the knowns.", "source": "codesearchnet"}
{"code": "def _alter_code(code, **attrs):\n    \n\n    PyCode_New = ctypes.pythonapi.PyCode_New\n\n    PyCode_New.argtypes = (\n        ctypes.c_int,\n        ctypes.c_int,\n        ctypes.c_int,\n        ctypes.c_int,\n        ctypes.c_int,\n        ctypes.py_object,\n        ctypes.py_object,\n        ctypes.py_object,\n        ctypes.py_object,\n        ctypes.py_object,\n        ctypes.py_object,\n        ctypes.py_object,\n        ctypes.py_object,\n        ctypes.c_int,\n        ctypes.py_object)\n\n    PyCode_New.restype = ctypes.py_object\n\n    args = [\n        [code.co_argcount, 'co_argcount'],\n        [code.co_kwonlyargcount, 'co_kwonlyargcount'],\n        [code.co_nlocals, 'co_nlocals'],\n        [code.co_stacksize, 'co_stacksize'],\n        [code.co_flags, 'co_flags'],\n        [code.co_code, 'co_code'],\n        [code.co_consts, 'co_consts'],\n        [code.co_names, 'co_names'],\n        [code.co_varnames, 'co_varnames'],\n        [code.co_freevars, 'co_freevars'],\n        [code.co_cellvars, 'co_cellvars'],\n        [code.co_filename, 'co_filename'],\n        [code.co_name, 'co_name'],\n        [code.co_firstlineno, 'co_firstlineno'],\n        [code.co_lnotab, 'co_lnotab']]\n\n    for arg in args:\n        if arg[1] in attrs:\n            arg[0] = attrs[arg[1]]\n\n    return PyCode_New(\n        args[0][0],  \n        args[1][0],  \n        args[2][0],  \n        args[3][0],  \n        args[4][0],  \n        args[5][0],  \n        args[6][0],  \n        args[7][0],  \n        args[8][0],  \n        args[9][0],  \n        args[10][0],  \n        args[11][0],  \n        args[12][0],  \n        args[13][0],  \n        args[14][0])", "docstring": "Create a new code object by altering some of ``code`` attributes\n\nArgs:\ncode: code objcect\nattrs: a mapping of names of code object attrs to their values", "source": "juraj-google-style"}
{"code": "def save(self, path_info, checksum):\n    assert (path_info['scheme'] == 'local')\n    assert (checksum is not None)\n    path = path_info['path']\n    assert os.path.exists(path)\n    (actual_mtime, actual_size) = get_mtime_and_size(path)\n    actual_inode = get_inode(path)\n    existing_record = self.get_state_record_for_inode(actual_inode)\n    if (not existing_record):\n        self._insert_new_state_record(path, actual_inode, actual_mtime, actual_size, checksum)\n        return\n    self._update_state_for_path_changed(path, actual_inode, actual_mtime, actual_size, checksum)", "docstring": "Save checksum for the specified path info.\n\nArgs:\npath_info (dict): path_info to save checksum for.\nchecksum (str): checksum to save.", "source": "codesearchnet"}
{"code": "def gaussian_deriv(times: np.ndarray, amp: complex, center: float, sigma: float, ret_gaussian: bool=False) -> np.ndarray:\n    (gauss, x) = gaussian(times, amp=amp, center=center, sigma=sigma, ret_x=True)\n    gauss_deriv = (((- x) / sigma) * gauss)\n    if ret_gaussian:\n        return (gauss_deriv, gauss)\n    return gauss_deriv", "docstring": "Continuous unnormalized gaussian derivative pulse.\n\nArgs:\ntimes: Times to output pulse for.\namp: Pulse amplitude at `center`.\ncenter: Center (mean) of pulse.\nsigma: Width (standard deviation) of pulse.\nret_gaussian: Return gaussian with which derivative was taken with.", "source": "codesearchnet"}
{"code": "def all(self, data={}, **kwargs):\n    return super(Order, self).all(data, **kwargs)", "docstring": "Fetch all Order entities\n\nReturns:\nDictionary of Order data", "source": "codesearchnet"}
{"code": "def find_documents(self, sentence, limit=None, must_sort=True, search_type='fuzzy'):\n    sentence = sentence.strip()\n    sentence = strip_accents(sentence)\n    if (sentence == u''):\n        return self.get_all_docs()\n    result_list_list = []\n    total_results = 0\n    for query_parser in self.search_param_list[search_type]:\n        query = query_parser['query_parser'].parse(sentence)\n        sortedby = None\n        if (must_sort and ('sortedby' in query_parser)):\n            sortedby = query_parser['sortedby']\n        if sortedby:\n            results = self.__searcher.search(query, limit=limit, sortedby=sortedby)\n        else:\n            results = self.__searcher.search(query, limit=limit)\n        results = [(result['docid'], result['doctype']) for result in results]\n        result_list_list.append(results)\n        total_results += len(results)\n        if ((not must_sort) and (total_results >= limit)):\n            break\n    docs = set()\n    for result_intermediate in result_list_list:\n        for result in result_intermediate:\n            doc = self._docs_by_id.get(result[0])\n            if (doc is None):\n                continue\n            docs.add(doc)\n    docs = [d for d in docs]\n    if ((not must_sort) and (limit is not None)):\n        docs = docs[:limit]\n    return docs", "docstring": "Returns all the documents matching the given keywords\n\nArguments:\nsentence --- a sentenced query\nReturns:\nAn array of document (doc objects)", "source": "codesearchnet"}
{"code": "def contains_peroxide(structure, relative_cutoff=1.1):\n    ox_type = oxide_type(structure, relative_cutoff)\n    if (ox_type == 'peroxide'):\n        return True\n    else:\n        return False", "docstring": "Determines if a structure contains peroxide anions.\n\nArgs:\nstructure (Structure): Input structure.\nrelative_cutoff: The peroxide bond distance is 1.49 Angstrom.\nRelative_cutoff * 1.49 stipulates the maximum distance two O\natoms must be to each other to be considered a peroxide.\n\nReturns:\nBoolean indicating if structure contains a peroxide anion.", "source": "codesearchnet"}
{"code": "def safe_group_name(group_name, group_max_length=100, ellipsis=True):\n        \n        ellipsis_value = ''\n        if ellipsis:\n            ellipsis_value = ' ...'\n\n        if group_name is not None and len(group_name) > group_max_length:\n            \n            group_name_array = group_name.split(' ')\n            group_name = ''\n            for word in group_name_array:\n                word = u'{}'.format(word)\n                if (len(group_name) + len(word) + len(ellipsis_value)) >= group_max_length:\n                    group_name = '{}{}'.format(group_name, ellipsis_value)\n                    group_name = group_name.lstrip(' ')\n                    break\n                group_name += ' {}'.format(word)\n        return group_name", "docstring": "Truncate group name to match limit breaking on space and optionally add an ellipsis.\n\n.. note:: Currently the ThreatConnect group name limit is 100 characters.\n\nArgs:\ngroup_name (string): The raw group name to be truncated.\ngroup_max_length (int): The max length of the group name.\nellipsis (boolean): If true the truncated name will have '...' appended.\n\nReturns:\n(string): The truncated group name with optional ellipsis.", "source": "juraj-google-style"}
{"code": "def list_group_members(self, name):\n        \n        return self.service.list_group_members(\n            name, self.url_prefix, self.auth, self.session,\n            self.session_send_opts)", "docstring": "Get the members of a group (does not include maintainers).\n\nArgs:\nname (string): Name of group to query.\n\nReturns:\n(list[string]): List of member names.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def transformer_encode(encoder_function, inputs, target_space, hparams, attention_weights=None, features=None, losses=None, **kwargs):\n    inputs = common_layers.flatten4d3d(inputs)\n    (encoder_input, self_attention_bias, encoder_decoder_attention_bias) = transformer_prepare_encoder(inputs, target_space, hparams, features=features)\n    mlperf_log.transformer_print(key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT, value=hparams.layer_prepostprocess_dropout, hparams=hparams)\n    encoder_input = tf.nn.dropout(encoder_input, (1.0 - hparams.layer_prepostprocess_dropout))\n    attn_bias_for_padding = None\n    if hparams.unidirectional_encoder:\n        attn_bias_for_padding = encoder_decoder_attention_bias\n    encoder_output = encoder_function(encoder_input, self_attention_bias, hparams, nonpadding=features_to_nonpadding(features, 'inputs'), save_weights_to=attention_weights, make_image_summary=(not common_layers.is_xla_compiled()), losses=losses, attn_bias_for_padding=attn_bias_for_padding, **kwargs)\n    return (encoder_output, encoder_decoder_attention_bias)", "docstring": "Encode transformer inputs.\n\nArgs:\nencoder_function: the encoder function\ninputs: Transformer inputs [batch_size, input_length, 1, hidden_dim] which\nwill be flattened along the two spatial dimensions.\ntarget_space: scalar, target space ID.\nhparams: hyperparameters for model.\nattention_weights: weight to store attention to.\nfeatures: optionally pass the entire features dictionary as well. This is\nneeded now for \"packed\" datasets.\nlosses: optional list onto which to append extra training losses\n**kwargs: additional arguments to pass to encoder_function\n\nReturns:\nTuple of:\nencoder_output: Encoder representation.\n[batch_size, input_length, hidden_dim]\nencoder_decoder_attention_bias: Bias and mask weights for\nencoder-decoder attention. [batch_size, input_length]", "source": "codesearchnet"}
{"code": "def canonicalize(d, default=None):\n    if isinstance(d, context.LogicalDevice):\n        d = tf_device.DeviceSpec.from_string(d.name)\n    else:\n        d = tf_device.DeviceSpec.from_string(d)\n    assert d.device_type is None or d.device_type == d.device_type.upper(), \"Device type '%s' must be all-caps.\" % (d.device_type,)\n    result = tf_device.DeviceSpec(replica=0, task=0, device_type='CPU', device_index=0)\n    if ops.executing_eagerly_outside_functions():\n        host_cpu = tf_device.DeviceSpec.from_string(config.list_logical_devices('CPU')[0].name)\n        if host_cpu.job:\n            result = result.make_merged_spec(host_cpu)\n        else:\n            result = result.replace(job='localhost')\n    if default:\n        result = result.make_merged_spec(tf_device.DeviceSpec.from_string(default))\n    result = result.make_merged_spec(d)\n    return result.to_string()", "docstring": "Canonicalize device string.\n\nIf d has missing components, the rest would be deduced from the `default`\nargument or from '/replica:0/task:0/device:CPU:0'. For example:\nIf d = '/cpu:0', default='/job:worker/task:1', it returns\n'/job:worker/replica:0/task:1/device:CPU:0'.\nIf d = '/cpu:0', default='/job:worker', it returns\n'/job:worker/replica:0/task:0/device:CPU:0'.\nIf d = '/gpu:0', default=None, it returns\n'/replica:0/task:0/device:GPU:0'.\n\nNote: This uses \"job:localhost\" as the default if executing eagerly.\n\nArgs:\nd: a device string or tf.config.LogicalDevice\ndefault: a string for default device if d doesn't have all components.\n\nReturns:\na canonicalized device string.", "source": "github-repos"}
{"code": "def directed_tripartition_indices(N):\n    \n    result = []\n    if N <= 0:\n        return result\n\n    base = [0, 1, 2]\n    for key in product(base, repeat=N):\n        part = [[], [], []]\n        for i, location in enumerate(key):\n            part[location].append(i)\n\n        result.append(tuple(tuple(p) for p in part))\n\n    return result", "docstring": "Return indices for directed tripartitions of a sequence.\n\nArgs:\nN (int): The length of the sequence.\n\nReturns:\nlist[tuple]: A list of tuples containing the indices for each\npartition.\n\nExample:\n>>> N = 1\n>>> directed_tripartition_indices(N)\n[((0,), (), ()), ((), (0,), ()), ((), (), (0,))]", "source": "juraj-google-style"}
{"code": "def add_offset(self, offset):\n    self.offset += offset\n    try:\n        self._counterpart.add_offset(offset)\n    except AttributeError:\n        pass", "docstring": "Add specified value to the offset of a binary quadratic model.\n\nArgs:\noffset (number):\nValue to be added to the constant energy offset of the binary quadratic model.\n\nExamples:\n\nThis example creates an Ising model with an offset of -0.5 and then\nadds to it.\n\n>>> import dimod\n...\n>>> bqm = dimod.BinaryQuadraticModel({0: 0.0, 1: 0.0}, {(0, 1): 0.5}, -0.5, dimod.SPIN)\n>>> bqm.add_offset(1.0)\n>>> bqm.offset\n0.5", "source": "codesearchnet"}
{"code": "def get_dataset(categories: list, split: str='train'):\n    labels = ['sadness', 'joy', 'love', 'anger', 'fear', 'surprise']\n    label_map = {class_name: class_id for class_id, class_name in enumerate(labels)}\n    labels_subset = np.array([label_map[class_name] for class_name in categories])\n    emotion_dataset = load_dataset('emotion', download_mode='force_redownload')\n    X, y = (np.array(emotion_dataset[split]['text']), np.array(emotion_dataset[split]['label']))\n    subclass_idxs = [idx for idx, label in enumerate(y) if label in labels_subset]\n    X_subset, y_subset = (X[subclass_idxs], y[subclass_idxs])\n    return (X_subset.tolist(), y_subset.tolist())", "docstring": "Takes a list of categories and a split (train/test/dev) and returns the\ncorresponding subset of the dataset\n\nArgs:\ncategories (list): list of emotion categories to use\nsplit (str): The split of the dataset to use. Can be either \"train\", \"dev\", or \"test\".\nDefaults to train\n\nReturns:\nA list of text and a list of labels", "source": "github-repos"}
{"code": "def sanger_ordered(self, institute_id=None, user_id=None):\n        \n        query = {'$match': {\n                '$and': [\n                    {'verb': 'sanger'},\n                ],\n            }}\n\n        if institute_id:\n            query['$match']['$and'].append({'institute': institute_id})\n        if user_id:\n            query['$match']['$and'].append({'user_id': user_id})\n\n        \n        results = self.event_collection.aggregate([\n            query,\n            {'$group': {\n                '_id': \"$case\",\n                'vars': {'$addToSet' : '$variant_id'}\n            }}\n        ])\n\n        sanger_ordered =  [item for item in results]\n        return sanger_ordered", "docstring": "Get all variants with validations ever ordered.\n\nArgs:\ninstitute_id(str) : The id of an institute\nuser_id(str) : The id of an user\n\nReturns:\nsanger_ordered(list) : a list of dictionaries, each with \"case_id\" as keys and list of variant ids as values", "source": "juraj-google-style"}
{"code": "def _split_cell(cell, module):\n    lines = cell.split('\\n')\n    code = None\n    last_def = (- 1)\n    name = None\n    define_wild_re = re.compile('^DEFINE\\\\s+.*$', re.IGNORECASE)\n    define_re = re.compile('^DEFINE\\\\s+QUERY\\\\s+([A-Z]\\\\w*)\\\\s*?(.*)$', re.IGNORECASE)\n    select_re = re.compile('^SELECT\\\\s*.*$', re.IGNORECASE)\n    standard_sql_re = re.compile('^(CREATE|WITH|INSERT|DELETE|UPDATE)\\\\s*.*$', re.IGNORECASE)\n    for (i, line) in enumerate(lines):\n        define_match = define_re.match(line)\n        select_match = select_re.match(line)\n        standard_sql_match = standard_sql_re.match(line)\n        if i:\n            prior_content = ''.join(lines[:i]).strip()\n            if select_match:\n                select_match = ((len(prior_content) == 0) or ((prior_content[(- 1)] != '(') and (not standard_sql_re.match(prior_content))))\n            if standard_sql_match:\n                standard_sql_match = ((len(prior_content) == 0) or (not standard_sql_re.match(prior_content)))\n        if (define_match or select_match or standard_sql_match):\n            if (code is None):\n                code = '\\n'.join(lines[:i]).strip()\n                if len(code):\n                    code += '\\n'\n            elif (last_def >= 0):\n                query = '\\n'.join([line for line in lines[last_def:i] if len(line)]).strip()\n                if (select_match and (name != datalab.data._utils._SQL_MODULE_MAIN) and (len(query) == 0)):\n                    continue\n                statement = datalab.data.SqlStatement(query, module)\n                module.__dict__[name] = statement\n                module.__dict__[datalab.data._utils._SQL_MODULE_LAST] = statement\n            if define_match:\n                name = define_match.group(1)\n                lines[i] = define_match.group(2)\n            else:\n                name = datalab.data._utils._SQL_MODULE_MAIN\n            last_def = i\n        else:\n            define_wild_match = define_wild_re.match(line)\n            if define_wild_match:\n                raise Exception('Expected \"DEFINE QUERY <name>\"')\n    if (last_def >= 0):\n        query = '\\n'.join([line for line in lines[last_def:] if len(line)]).strip()\n        statement = datalab.data.SqlStatement(query, module)\n        module.__dict__[name] = statement\n        module.__dict__[datalab.data._utils._SQL_MODULE_LAST] = statement\n    if (code is None):\n        code = ''\n    module.__dict__[datalab.data._utils._SQL_MODULE_ARGPARSE] = _arguments(code, module)\n    return module.__dict__.get(datalab.data._utils._SQL_MODULE_LAST, None)", "docstring": "Split a hybrid %%sql cell into the Python code and the queries.\n\nPopulates a module with the queries.\n\nArgs:\ncell: the contents of the %%sql cell.\nmodule: the module that the contents will populate.\n\nReturns:\nThe default (last) query for the module.", "source": "codesearchnet"}
{"code": "def email_results(results, host, mail_from, mail_to, port=0, ssl=False, user=None, password=None, subject=None, attachment_filename=None, message=None, ssl_context=None):\n    logging.debug('Emailing report to: {0}'.format(','.join(mail_to)))\n    date_string = datetime.now().strftime('%Y-%m-%d')\n    if attachment_filename:\n        if (not attachment_filename.lower().endswith('.zip')):\n            attachment_filename += '.zip'\n        filename = attachment_filename\n    else:\n        filename = 'DMARC-{0}.zip'.format(date_string)\n    assert isinstance(mail_to, list)\n    msg = MIMEMultipart()\n    msg['From'] = mail_from\n    msg['To'] = ', '.join(mail_to)\n    msg['Date'] = email.utils.formatdate(localtime=True)\n    msg['Subject'] = (subject or 'DMARC results for {0}'.format(date_string))\n    text = (message or 'Please see the attached zip file\\n')\n    msg.attach(MIMEText(text))\n    zip_bytes = get_report_zip(results)\n    part = MIMEApplication(zip_bytes, Name=filename)\n    part['Content-Disposition'] = 'attachment; filename=\"{0}\"'.format(filename)\n    msg.attach(part)\n    try:\n        if (ssl_context is None):\n            ssl_context = create_default_context()\n        if ssl:\n            server = smtplib.SMTP_SSL(host, port=port, context=ssl_context)\n            server.connect(host, port)\n            server.ehlo_or_helo_if_needed()\n        else:\n            server = smtplib.SMTP(host, port=port)\n            server.connect(host, port)\n            server.ehlo_or_helo_if_needed()\n            if server.has_extn('starttls'):\n                server.starttls(context=ssl_context)\n                server.ehlo()\n            else:\n                logger.warning('SMTP server does not support STARTTLS. Proceeding in plain text!')\n        if (user and password):\n            server.login(user, password)\n        server.sendmail(mail_from, mail_to, msg.as_string())\n    except smtplib.SMTPException as error:\n        error = error.__str__().lstrip(\"b'\").rstrip(\"'\").rstrip('.')\n        raise SMTPError(error)\n    except socket.gaierror:\n        raise SMTPError('DNS resolution failed')\n    except ConnectionRefusedError:\n        raise SMTPError('Connection refused')\n    except ConnectionResetError:\n        raise SMTPError('Connection reset')\n    except ConnectionAbortedError:\n        raise SMTPError('Connection aborted')\n    except TimeoutError:\n        raise SMTPError('Connection timed out')\n    except SSLError as error:\n        raise SMTPError('SSL error: {0}'.format(error.__str__()))\n    except CertificateError as error:\n        raise SMTPError('Certificate error: {0}'.format(error.__str__()))", "docstring": "Emails parsing results as a zip file\n\nArgs:\nresults (OrderedDict): Parsing results\nhost: Mail server hostname or IP address\nmail_from: The value of the message from header\nmail_to : A list of addresses to mail to\nport (int): Port to use\nssl (bool): Require a SSL connection from the start\nuser: An optional username\npassword: An optional password\nsubject: Overrides the default message subject\nattachment_filename: Override the default attachment filename\nmessage: Override the default plain text body\nssl_context: SSL context options", "source": "codesearchnet"}
{"code": "def _to_qasm_output(self, header: Optional[str]=None, precision: int=10, qubit_order: ops.QubitOrderOrList=ops.QubitOrder.DEFAULT) -> QasmOutput:\n    if (header is None):\n        header = 'Generated from Cirq v{}'.format(cirq._version.__version__)\n    qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for(self.all_qubits())\n    return QasmOutput(operations=self.all_operations(), qubits=qubits, header=header, precision=precision, version='2.0')", "docstring": "Returns a QASM object equivalent to the circuit.\n\nArgs:\nheader: A multi-line string that is placed in a comment at the top\nof the QASM. Defaults to a cirq version specifier.\nprecision: Number of digits to use when representing numbers.\nqubit_order: Determines how qubits are ordered in the QASM\nregister.", "source": "codesearchnet"}
{"code": "def show_history(self, status=None, nids=None, full_history=False, metadata=False):\n        \n        nrows, ncols = get_terminal_size()\n\n        works_done = []\n        \n        for task in self.iflat_tasks(status=status, nids=nids):\n            work = task.work\n\n            if work not in works_done:\n                works_done.append(work)\n                if work.history or full_history:\n                    cprint(make_banner(str(work), width=ncols, mark=\"=\"), **work.status.color_opts)\n                    print(work.history.to_string(metadata=metadata))\n\n            if task.history or full_history:\n                cprint(make_banner(str(task), width=ncols, mark=\"=\"), **task.status.color_opts)\n                print(task.history.to_string(metadata=metadata))\n\n        \n        if self.history or full_history:\n            cprint(make_banner(str(self), width=ncols, mark=\"=\"), **self.status.color_opts)\n            print(self.history.to_string(metadata=metadata))", "docstring": "Print the history of the flow to stdout.\n\nArgs:\nstatus: if not None, only the tasks with this status are select\nfull_history: Print full info set, including nodes with an empty history.\nnids: optional list of node identifiers used to filter the tasks.\nmetadata: print history metadata (experimental)", "source": "juraj-google-style"}
{"code": "def class_from_typename(cls, type_name: str) -> Optional[Type['JSONConvertible']]:\n    return cls._TYPE_REGISTRY.class_from_typename(type_name)", "docstring": "Gets the class for a registered type name.\n\nArgs:\ntype_name: A string as the global unique type identifier for requested\nclass.\n\nReturns:\nA type object if registered, otherwise None.", "source": "github-repos"}
{"code": "def __call__(self, images: Union[str, List[str], 'Image.Image', List['Image.Image']], **kwargs: Any) -> Union['Image.Image', List['Image.Image']]:\n    return super().__call__(images, **kwargs)", "docstring": "Transform the image(s) passed as inputs.\n\nArgs:\nimages (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):\nThe pipeline handles three types of images:\n\n- A string containing a http link pointing to an image\n- A string containing a local path to an image\n- An image loaded in PIL directly\n\nThe pipeline accepts either a single image or a batch of images, which must then be passed as a string.\nImages in a batch must all be in the same format: all as http links, all as local paths, or all as PIL\nimages.\ntimeout (`float`, *optional*, defaults to None):\nThe maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and\nthe call may block forever.\n\nReturn:\nAn image (Image.Image) or a list of images (List[\"Image.Image\"]) containing result(s). If the input is a\nsingle image, the return will be also a single image, if the input is a list of several images, it will\nreturn a list of transformed images.", "source": "github-repos"}
{"code": "def timed_operation(msg, log_start=False):\n    assert len(msg)\n    if log_start:\n        logger.info('Start {} ...'.format(msg))\n    start = timer()\n    (yield)\n    msg = (msg[0].upper() + msg[1:])\n    logger.info('{} finished, time:{:.4f} sec.'.format(msg, (timer() - start)))", "docstring": "Surround a context with a timer.\n\nArgs:\nmsg(str): the log to print.\nlog_start(bool): whether to print also at the beginning.\n\nExample:\n.. code-block:: python\n\nwith timed_operation('Good Stuff'):\ntime.sleep(1)\n\nWill print:\n\n.. code-block:: python\n\nGood stuff finished, time:1sec.", "source": "codesearchnet"}
{"code": "def send_log_message(self, message: LogMessage) -> None:\n    self.send_log_messages([message])", "docstring": "Sends the log message to BigQuery.\n\nArgs:\n* message: LogMessage dictionary\n\nReturns:\n* None\n\nRaises:\n* RuntimeError: if BigQuery insert fails", "source": "github-repos"}
{"code": "def add_mapped_chain_ids(self, mapped_chains):\n        \n        mapped_chains = ssbio.utils.force_list(mapped_chains)\n\n        for c in mapped_chains:\n            if c not in self.mapped_chains:\n                self.mapped_chains.append(c)\n                log.debug('{}: added to list of mapped chains'.format(c))\n            else:\n                log.debug('{}: chain already in list of mapped chains, not adding'.format(c))", "docstring": "Add chains by ID into the mapped_chains attribute\n\nArgs:\nmapped_chains (str, list): Chain ID or list of IDs", "source": "juraj-google-style"}
{"code": "def get_all(cls, account=None, location=None, include_disabled=False):\n    qry = db.Resource.filter((Resource.resource_type_id == ResourceType.get(cls.resource_type).resource_type_id))\n    if account:\n        qry = qry.filter((Resource.account_id == account.account_id))\n    if (not include_disabled):\n        qry = qry.join(Account, (Resource.account_id == Account.account_id)).filter((Account.enabled == 1))\n    if location:\n        qry = qry.filter((Resource.location == location))\n    return {res.resource_id: cls(res) for res in qry.all()}", "docstring": "Returns a list of all resources for a given account, location and resource type.\n\nAttributes:\naccount (:obj:`Account`): Account owning the resources\nlocation (`str`): Location of the resources to return (region)\ninclude_disabled (`bool`): Include resources from disabled accounts (default: False)\n\nReturns:\nlist of resource objects", "source": "codesearchnet"}
{"code": "def from_ase_atoms(cls, atoms):\n    return cls(atoms=atoms.get_chemical_symbols(), coords=atoms.positions)", "docstring": "Create an instance of the own class from an ase molecule\n\nArgs:\nmolecule (:class:`ase.atoms.Atoms`):\n\nReturns:\nCartesian:", "source": "codesearchnet"}
{"code": "def unpackStruct(self, data, def_buf):\n    struct_str = '='\n    for fld in def_buf:\n        if (not def_buf[fld][MeterData.CalculatedFlag]):\n            struct_str = ((struct_str + str(def_buf[fld][MeterData.SizeValue])) + 's')\n    if (len(data) == 255):\n        contents = struct.unpack(struct_str, str(data))\n    else:\n        self.writeCmdMsg(('Length error.  Len() size = ' + str(len(data))))\n        contents = ()\n    return contents", "docstring": "Wrapper for struct.unpack with SerialBlock buffer definitionns.\n\nArgs:\ndata (str): Implicit cast bytes to str, serial port return.\ndef_buf (SerialBlock): Block object holding field lengths.\n\nReturns:\ntuple: parsed result of struct.unpack() with field definitions.", "source": "codesearchnet"}
{"code": "def users(self, institute=None):\n        \n        query = {}\n        if institute:\n            LOG.info(\"Fetching all users from institute %s\", institute)\n            query = {'institutes': {'$in': [institute]}}\n        else:\n            LOG.info(\"Fetching all users\")\n            \n        res = self.user_collection.find(query)\n        return res", "docstring": "Return all users from the database\n\nArgs:\ninstitute(str): A institute_id\n\nReturns:\nres(pymongo.Cursor): A cursor with users", "source": "juraj-google-style"}
{"code": "def create(self, name):\n    return Bucket(name, context=self._context).create(self._project_id)", "docstring": "Creates a new bucket.\n\nArgs:\nname: a unique name for the new bucket.\nReturns:\nThe newly created bucket.\nRaises:\nException if there was an error creating the bucket.", "source": "codesearchnet"}
{"code": "def _parse_session_run_index(self, event):\n    metadata_string = event.log_message.message\n    try:\n        metadata = json.loads(metadata_string)\n    except ValueError as e:\n        logger.error(\"Could not decode metadata string '%s' for step value: %s\", metadata_string, e)\n        return constants.SENTINEL_FOR_UNDETERMINED_STEP\n    try:\n        return metadata['session_run_index']\n    except KeyError:\n        logger.error('The session_run_index is missing from the metadata: %s', metadata_string)\n        return constants.SENTINEL_FOR_UNDETERMINED_STEP", "docstring": "Parses the session_run_index value from the event proto.\n\nArgs:\nevent: The event with metadata that contains the session_run_index.\n\nReturns:\nThe int session_run_index value. Or\nconstants.SENTINEL_FOR_UNDETERMINED_STEP if it could not be determined.", "source": "codesearchnet"}
{"code": "def copy(self, source_file_names, destination_file_names):\n    raise NotImplementedError", "docstring": "Recursively copy the file tree from the source to the destination\n\nArgs:\nsource_file_names: list of source file objects that needs to be copied\ndestination_file_names: list of destination of the new object\n\nRaises:\n``BeamIOError``: if any of the copy operations fail", "source": "github-repos"}
{"code": "def add_virtual_loss(self, up_to):\n        \n        self.losses_applied += 1\n        \n        \n        loss = self.position.to_play\n        self.W += loss\n        if self.parent is None or self is up_to:\n            return\n        self.parent.add_virtual_loss(up_to)", "docstring": "Propagate a virtual loss up to the root node.\n\nArgs:\nup_to: The node to propagate until. (Keep track of this! You'll\nneed it to reverse the virtual loss later.)", "source": "juraj-google-style"}
{"code": "def AddArg(self, arg):\n    \n    self.args.append(arg)\n    if len(self.args) > self.number_of_args:\n      raise ParseError(\"Too many args for this expression.\")\n\n    elif len(self.args) == self.number_of_args:\n      return True\n\n    return False", "docstring": "Adds a new arg to this expression.\n\nArgs:\narg: The argument to add (string).\n\nReturns:\nTrue if this arg is the last arg, False otherwise.\n\nRaises:\nParseError: If there are too many args.", "source": "juraj-google-style"}
{"code": "def output(self):\n    if not self._inbound_nodes:\n        raise AttributeError('Layer ' + self.name + ' has no inbound nodes.')\n    return self._get_node_attribute_at_index(0, 'output_tensors', 'output')", "docstring": "Retrieves the output tensor(s) of a layer.\n\nOnly applicable if the layer has exactly one output,\ni.e. if it is connected to one incoming layer.\n\nReturns:\nOutput tensor or list of output tensors.\n\nRaises:\nAttributeError: if the layer is connected to more than one incoming\nlayers.\nRuntimeError: if called in Eager mode.", "source": "github-repos"}
{"code": "def set_s3_prefix(self, region, name):\n        \n        ct = self.session.client('cloudtrail', region_name=region)\n        ct.update_trail(Name=name, S3KeyPrefix=self.account.account_name)\n\n        auditlog(\n            event='cloudtrail.set_s3_prefix',\n            actor=self.ns,\n            data={\n                'account': self.account.account_name,\n                'region': region\n            }\n        )\n        self.log.info('Updated S3KeyPrefix to {0} for {0}/{1}'.format(\n            self.account.account_name,\n            region\n        ))", "docstring": "Sets the S3 prefix for a CloudTrail Trail\n\nArgs:\nregion (`str`): Name of the AWS region\nname (`str`): Name of the CloudTrail Trail\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def webhook(self, webhook_url):\n    if (not webhook_url):\n        raise Exception('Url can not be None')\n    matcher = re.match(self.__webhook_url_format, webhook_url)\n    if (not matcher):\n        raise Exception(('Invalid url format, looking for: ' + self.__webhook_url_format))\n    self.api_keys(int(matcher.group(1)), matcher.group(2))", "docstring": "Load object with webhook_url\n\nArgs:\nwebhook_url (str): full webhook url given by Discord 'create webhook' func", "source": "codesearchnet"}
{"code": "def map_exp_ids(self, exp):\n    names = self.exp_feature_names\n    if (self.discretized_feature_names is not None):\n        names = self.discretized_feature_names\n    return [(names[x[0]], x[1]) for x in exp]", "docstring": "Maps ids to feature names.\n\nArgs:\nexp: list of tuples [(id, weight), (id,weight)]\n\nReturns:\nlist of tuples (feature_name, weight)", "source": "codesearchnet"}
{"code": "def get_oligomeric_state(swiss_model_path):\n    oligo_info = {}\n    with open(swiss_model_path, 'r') as f:\n        for line in f:\n            if line.startswith('REMARK   3 MODEL INFORMATION'):\n                break\n        for i in range(10):\n            line = f.readline()\n            if ('ENGIN' in line):\n                oligo_info['ENGIN'] = line.rstrip().split(' ')[(- 1)]\n            elif ('OSTAT' in line):\n                oligo_info['OSTAT'] = line.rstrip().split(' ')[(- 1)]\n            elif ('OSRSN' in line):\n                oligo_info['OSRSN'] = line.rstrip().split(' ')[(- 1)]\n            elif ('QSPRD' in line):\n                oligo_info['QSPRD'] = line.rstrip().split(' ')[(- 1)]\n            elif ('GMQE' in line):\n                oligo_info['GMQE'] = line.rstrip().split(' ')[(- 1)]\n            elif ('QMN4' in line):\n                oligo_info['QMN4'] = line.rstrip().split(' ')[(- 1)]\n            elif ('MODT' in line):\n                oligo_info['MODT'] = line.rstrip().split(' ')[(- 1)]\n    return oligo_info", "docstring": "Parse the oligomeric prediction in a SWISS-MODEL repository file\n\nAs of 2018-02-26, works on all E. coli models. Untested on other pre-made organism models.\n\nArgs:\nswiss_model_path (str): Path to SWISS-MODEL PDB file\n\nReturns:\ndict: Information parsed about the oligomeric state", "source": "codesearchnet"}
{"code": "def daemon(args):\n    \n    if os.environ.get(DVC_DAEMON):\n        logger.debug(\"skipping launching a new daemon.\")\n        return\n\n    cmd = [sys.executable]\n    if not is_binary():\n        cmd += [\"-m\", \"dvc\"]\n    cmd += [\"daemon\", \"-q\"] + args\n\n    env = fix_env()\n    file_path = os.path.abspath(inspect.stack()[0][1])\n    env[cast_bytes_py2(\"PYTHONPATH\")] = cast_bytes_py2(\n        os.path.dirname(os.path.dirname(file_path))\n    )\n    env[cast_bytes_py2(DVC_DAEMON)] = cast_bytes_py2(\"1\")\n\n    _spawn(cmd, env)", "docstring": "Launch a `dvc daemon` command in a detached process.\n\nArgs:\nargs (list): list of arguments to append to `dvc daemon` command.", "source": "juraj-google-style"}
{"code": "def __init__(self, tcex):\n        \n        self.tcex = tcex\n        self._db = None\n        self._out_variables = None\n        self._out_variables_type = None\n        self.output_data = {}\n\n        \n        self._variable_match = re.compile(r'^{}$'.format(self._variable_pattern))\n        \n        self._variable_parse = re.compile(self._variable_pattern)\n        \n        self._vars_keyvalue_embedded = re.compile(\n            r'(?:\\\"\\:\\s?)[^\\\"]?{}'.format(self._variable_pattern)\n        )", "docstring": "Initialize the Class properties.\n\nArgs:\ntcex (object): Instance of TcEx.", "source": "juraj-google-style"}
{"code": "def get_models(module: types.ModuleType, include_pretrained: bool=False) -> List[Tuple[str, type]]:\n    models = []\n    model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel, transformers.FlaxPreTrainedModel)\n    for attr_name in dir(module):\n        if not include_pretrained and ('Pretrained' in attr_name or 'PreTrained' in attr_name):\n            continue\n        attr = getattr(module, attr_name)\n        if isinstance(attr, type) and issubclass(attr, model_classes) and (attr.__module__ == module.__name__):\n            models.append((attr_name, attr))\n    return models", "docstring": "Get the objects in a module that are models.\n\nArgs:\nmodule (`types.ModuleType`):\nThe module from which we are extracting models.\ninclude_pretrained (`bool`, *optional*, defaults to `False`):\nWhether or not to include the `PreTrainedModel` subclass (like `BertPreTrainedModel`) or not.\n\nReturns:\nList[Tuple[str, type]]: List of models as tuples (class name, actual class).", "source": "github-repos"}
{"code": "def remove_root(self, model, setter=None):\n    if (model not in self._roots):\n        return\n    self._push_all_models_freeze()\n    try:\n        self._roots.remove(model)\n    finally:\n        self._pop_all_models_freeze()\n    self._trigger_on_change(RootRemovedEvent(self, model, setter))", "docstring": "Remove a model as root model from this Document.\n\nChanges to this model may still trigger ``on_change`` callbacks\non this document, if the model is still referred to by other\nroot models.\n\nArgs:\nmodel (Model) :\nThe model to add as a root of this document.\n\nsetter (ClientSession or ServerSession or None, optional) :\nThis is used to prevent \"boomerang\" updates to Bokeh apps.\n(default: None)\n\nIn the context of a Bokeh server application, incoming updates\nto properties will be annotated with the session that is\ndoing the updating. This value is propagated through any\nsubsequent change notifications that the update triggers.\nThe session can compare the event setter to itself, and\nsuppress any updates that originate from itself.", "source": "codesearchnet"}
{"code": "def wrap_deepmind(env, dim=84, framestack=True):\n    \n    env = MonitorEnv(env)\n    env = NoopResetEnv(env, noop_max=30)\n    if \"NoFrameskip\" in env.spec.id:\n        env = MaxAndSkipEnv(env, skip=4)\n    env = EpisodicLifeEnv(env)\n    if \"FIRE\" in env.unwrapped.get_action_meanings():\n        env = FireResetEnv(env)\n    env = WarpFrame(env, dim)\n    \n    \n    if framestack:\n        env = FrameStack(env, 4)\n    return env", "docstring": "Configure environment for DeepMind-style Atari.\n\nNote that we assume reward clipping is done outside the wrapper.\n\nArgs:\ndim (int): Dimension to resize observations to (dim x dim).\nframestack (bool): Whether to framestack observations.", "source": "juraj-google-style"}
{"code": "def parse_s3_url(url):\n    \n    parsed_url = urlparse(url)\n    if parsed_url.scheme != \"s3\":\n        raise ValueError(\"Expecting 's3' scheme, got: {} in {}\".format(parsed_url.scheme, url))\n    return parsed_url.netloc, parsed_url.path.lstrip('/')", "docstring": "Returns an (s3 bucket, key name/prefix) tuple from a url with an s3 scheme\n\nArgs:\nurl (str):\n\nReturns:\ntuple: A tuple containing:\nstr: S3 bucket name\nstr: S3 key", "source": "juraj-google-style"}
{"code": "def _parse_command(self, command):\n    command = command.strip()\n    if not command:\n        return ('', [], None)\n    command_items = command_parser.parse_command(command)\n    command_items, output_file_path = command_parser.extract_output_file_path(command_items)\n    return (command_items[0], command_items[1:], output_file_path)", "docstring": "Parse a command string into prefix and arguments.\n\nArgs:\ncommand: (str) Command string to be parsed.\n\nReturns:\nprefix: (str) The command prefix.\nargs: (list of str) The command arguments (i.e., not including the\nprefix).\noutput_file_path: (str or None) The path to save the screen output\nto (if any).", "source": "github-repos"}
{"code": "def _freezeModel(self, func):\n    root = autotrackable.AutoTrackable()\n    root.f = func\n    input_func = root.f.get_concrete_function()\n    output_func = convert_to_constants.convert_var_to_const_function_in_v1(input_func, lower_control_flow=False)\n    return (root, output_func)", "docstring": "Freezes the function.\n\nArgs:\nfunc: Function.\n\nReturns:\nroot: AutoTrackable object with original ConcreteFunction.\noutput_func: frozen ConcreteFunction.", "source": "github-repos"}
{"code": "def AddLabels(self, labels):\n    \n    for label in labels:\n      if not self._VALID_LABEL_REGEX.match(label):\n        raise ValueError((\n            'Unsupported label: \"{0:s}\". A label must only consist of '\n            'alphanumeric characters or underscores.').format(label))\n\n    for label in labels:\n      if label not in self.labels:\n        self.labels.append(label)", "docstring": "Adds labels to the event tag.\n\nArgs:\nlabels (list[str]): labels.\n\nRaises:\nValueError: if a label is malformed.", "source": "juraj-google-style"}
{"code": "def heightmap_multiply_hm(hm1: np.ndarray, hm2: np.ndarray, hm3: np.ndarray) -> None:\n    hm3[:] = (hm1[:] * hm2[:])", "docstring": "Multiplies two heightmap's together and stores the result in ``hm3``.\n\nArgs:\nhm1 (numpy.ndarray): The first heightmap.\nhm2 (numpy.ndarray): The second heightmap to multiply with the first.\nhm3 (numpy.ndarray): A destination heightmap to store the result.\n\n.. deprecated:: 2.0\nDo ``hm3[:] = hm1[:] * hm2[:]`` instead.\nAlternatively you can do ``HeightMap(hm1.array[:] * hm2.array[:])``.", "source": "codesearchnet"}
{"code": "def integer_based_slice(self, ts):\n        \n\n        if isinstance(ts, slice):\n            try:\n                start = Seconds(0) if ts.start is None else ts.start\n                if start < Seconds(0):\n                    start = self.end + start\n                stop = self.end if ts.stop is None else ts.stop\n                if stop < Seconds(0):\n                    stop = self.end + stop\n                duration = stop - start\n                ts = TimeSlice(start=start, duration=duration)\n            except (ValueError, TypeError):\n                pass\n\n        if not isinstance(ts, TimeSlice):\n            return ts\n\n        diff = self.duration - self.frequency\n        start_index = \\\n            max(0, np.floor((ts.start - diff) / self.frequency))\n        end = self.end if ts.duration is None else ts.end\n\n\n        \n        \n        \n        \n        \n        ratio = np.round(end / self.frequency, 2)\n\n\n        stop_index = np.ceil(ratio)\n        return slice(int(start_index), int(stop_index))", "docstring": "Transform a :class:`TimeSlice` into integer indices that numpy can work\nwith\n\nArgs:\nts (slice, TimeSlice): the time slice to translate into integer\nindices", "source": "juraj-google-style"}
{"code": "def add_embedded_campaign(self, id, collection, campaign, confidence,\n                              analyst, date, description):\n        \n        if type(id) is not ObjectId:\n            id = ObjectId(id)\n        \n        \n        obj = getattr(self.db, collection)\n        result = obj.find({'_id': id, 'campaign.name': campaign})\n        if result.count() > 0:\n            return\n        else:\n            log.debug('Adding campaign to set: {}'.format(campaign))\n            campaign_obj = {\n                'analyst': analyst,\n                'confidence': confidence,\n                'date': date,\n                'description': description,\n                'name': campaign\n            }\n            result = obj.update(\n                {'_id': id},\n                {'$push': {'campaign': campaign_obj}}\n            )\n            return result", "docstring": "Adds an embedded campaign to the TLO.\n\nArgs:\nid: the CRITs object id of the TLO\ncollection: The db collection. See main class documentation.\ncampaign: The campaign to assign.\nconfidence: The campaign confidence\nanalyst: The analyst making the assignment\ndate: The date of the assignment\ndescription: A description\nReturns:\nThe resulting mongo object", "source": "juraj-google-style"}
{"code": "def m_to_inches(value):\n    if value is None:\n        return None\n    return value / 39.37", "docstring": "Converts distance in meters to inches\n\nArgs:\nvalue: floating point representing the distance in meters\nReturns: distance in inches", "source": "github-repos"}
{"code": "def lat_id(self, line):\n        \n        if self.grid == 'WAC':\n            lat = ((1 + self.LINE_PROJECTION_OFFSET - line) *\n                   self.MAP_SCALE * 1e-3 / self.A_AXIS_RADIUS)\n            return lat * 180 / np.pi\n        else:\n            lat = float(self.CENTER_LATITUDE) - \\\n                (line - float(self.LINE_PROJECTION_OFFSET) - 1)\\\n                / float(self.MAP_RESOLUTION)\n            return lat", "docstring": "Return the corresponding latitude\n\nArgs:\nline (int): Line number\n\nReturns:\nCorreponding latitude in degree", "source": "juraj-google-style"}
{"code": "def _ProcessGRRMessages(self, fs_client_id, grr_messages):\n    grr_client_id = fleetspeak_utils.FleetspeakIDToGRRID(fs_client_id)\n    for grr_message in grr_messages:\n        grr_message.source = grr_client_id\n        grr_message.auth_state = rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED\n    client_is_new = self.frontend.EnrolFleetspeakClient(client_id=grr_client_id)\n    if ((not client_is_new) and data_store.RelationalDBEnabled()):\n        data_store.REL_DB.WriteClientMetadata(grr_client_id, last_ping=rdfvalue.RDFDatetime.Now())\n    self.frontend.ReceiveMessages(client_id=grr_client_id, messages=grr_messages)", "docstring": "Handles messages from GRR clients received via Fleetspeak.\n\nThis method updates the last-ping timestamp of the client before beginning\nprocessing.\n\nArgs:\nfs_client_id: The Fleetspeak client-id for the client.\ngrr_messages: An Iterable of GrrMessages.", "source": "codesearchnet"}
{"code": "def add_arguments(self, parser):\n    group = parser.add_mutually_exclusive_group(required=True)\n    group.add_argument('-d', '--downgrade', action='store_true', help='downgrade the J-Link firmware')\n    group.add_argument('-u', '--upgrade', action='store_true', help='upgrade the J-Link firmware')\n    return self.add_common_arguments(parser, False)", "docstring": "Adds the arguments for the firmware command.\n\nArgs:\nself (FirmwareCommand): the ``FirmwareCommand`` instance\nparser (argparse.ArgumentParser): parser to add the commands to\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def __init__( self, sites ):\n        \n        self.sites = set( sites )\n        self.neighbours = set()\n        for s in self.sites:\n            self.neighbours.update( s.p_neighbours ) \n        self.neighbours = self.neighbours.difference( self.sites )", "docstring": "Initialise an Cluster instance.\n\nArgs:\nsites (List(Site): The list of sites that make up the cluster.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def is_allowlisted(o, check_call_override=True, allow_namedtuple_subclass=False):\n    if isinstance(o, functools.partial):\n        m = functools\n    else:\n        m = tf_inspect.getmodule(o)\n    if hasattr(m, '__name__'):\n        for rule in config.CONVERSION_RULES:\n            action = rule.get_action(m)\n            if action == config.Action.CONVERT:\n                logging.log(2, 'Not allowed: %s: %s', o, rule)\n                return False\n            elif action == config.Action.DO_NOT_CONVERT:\n                logging.log(2, 'Allowlisted: %s: %s', o, rule)\n                return True\n    if hasattr(o, '__code__') and tf_inspect.isgeneratorfunction(o):\n        logging.log(2, 'Allowlisted: %s: generator functions are not converted', o)\n        return True\n    if check_call_override and (not tf_inspect.isclass(o)) and hasattr(o, '__call__'):\n        if type(o) != type(o.__call__) and is_allowlisted(o.__call__):\n            logging.log(2, 'Allowlisted: %s: object __call__ allowed', o)\n            return True\n    owner_class = None\n    if tf_inspect.ismethod(o):\n        owner_class = inspect_utils.getmethodclass(o)\n        if owner_class is tf_method_target.TfMethodTarget:\n            owner_class = o.__self__.target_class\n        if owner_class is not None:\n            if issubclass(owner_class, unittest.TestCase):\n                logging.log(2, 'Allowlisted: %s: method of TestCase subclass', o)\n                return True\n            owner_class = inspect_utils.getdefiningclass(o, owner_class)\n            if is_allowlisted(owner_class, check_call_override=False, allow_namedtuple_subclass=True):\n                logging.log(2, 'Allowlisted: %s: owner is allowed %s', o, owner_class)\n                return True\n    if inspect_utils.isnamedtuple(o):\n        if allow_namedtuple_subclass:\n            if not any((inspect_utils.isnamedtuple(base) for base in o.__bases__)):\n                logging.log(2, 'Allowlisted: %s: named tuple', o)\n                return True\n        else:\n            logging.log(2, 'Allowlisted: %s: named tuple or subclass', o)\n            return True\n    logging.log(2, 'Not allowed: %s: default rule', o)\n    return False", "docstring": "Checks whether an entity is allowed for use in graph mode.\n\nExamples of allowed entities include all members of the tensorflow\npackage.\n\nArgs:\no: A Python entity.\ncheck_call_override: Reserved for internal use. When set to `False`, it\ndisables the rule according to which classes are allowed if their\n__call__ method is allowed.\nallow_namedtuple_subclass: Reserved for internal use. When `True`,\nnamedtuple subclasses are not allowed.\n\nReturns:\nBoolean", "source": "github-repos"}
{"code": "def equals(self, other):\n    if (not isinstance(other, self.__class__)):\n        return False\n    else:\n        return (self.properties_with_values() == other.properties_with_values())", "docstring": "Structural equality of models.\n\nArgs:\nother (HasProps) : the other instance to compare to\n\nReturns:\nTrue, if properties are structurally equal, otherwise False", "source": "codesearchnet"}
{"code": "def set_options(cls, pipeline_options):\n    cls._pipeline_options = pipeline_options", "docstring": "Set filesystem options.\n\nArgs:\npipeline_options: Instance of ``PipelineOptions``.", "source": "github-repos"}
{"code": "def __init__(self, xid=None, port=None):\n        \n        super().__init__(xid)\n        self.port = port", "docstring": "Create a QueueGetConfigRequest with the optional parameters below.\n\nArgs:\nxid (int): xid of OpenFlow header\nport (:class:`~.common.port.PortNo`): Target port for the query.", "source": "juraj-google-style"}
{"code": "def _project_TH3(self, hist: Hist) -> Any:\n        \n        \n        if len(self.projection_axes) < 1 or len(self.projection_axes) > 2:\n            raise ValueError(len(self.projection_axes), \"Invalid number of axes\")\n\n        \n        projection_axis_name = \"\"\n        for axis in self.projection_axes:\n            \n            \n            \n            \n            \n            \n            proj_axis_name = axis.axis_type.name[:1]\n            if proj_axis_name not in [\"x\", \"y\", \"z\"]:\n                raise ValueError(f\"Projection axis name {proj_axis_name} is not 'x', 'y', or 'z'. Please check your configuration.\")\n            projection_axis_name += proj_axis_name\n\n        \n        \n        \n        \n        if len(self.projection_axes) == 2:\n            \n            projection_axis_name = projection_axis_name[::-1]\n\n        \n        logger.info(f\"Projecting onto axes \\\"{projection_axis_name}\\\" from hist {hist.GetName()}\")\n        projected_hist = hist.Project3D(projection_axis_name)\n\n        return projected_hist", "docstring": "Perform the actual TH3 -> TH1 projection.\n\nThis projection could be to 1D or 2D.\n\nArgs:\nhist (ROOT.TH3): Histogram from which the projections should be performed.\nReturns:\nROOT.TH1: The projected histogram.", "source": "juraj-google-style"}
{"code": "def energy_at_conditions(self, pH, V):\n        \n        return self.energy + self.npH * PREFAC * pH + self.nPhi * V", "docstring": "Get free energy for a given pH and V\n\nArgs:\npH (float): pH at which to evaluate free energy\nV (float): voltage at which to evaluate free energy\n\nReturns:\nfree energy at conditions", "source": "juraj-google-style"}
{"code": "def take_parenting(self, inst):\n        \n\n        if self is inst:\n            return\n        for decl in inst.declarations:\n            decl.parent = self\n            self.declarations.append(decl)\n        inst.declarations = []", "docstring": "Takes parenting from inst and transfers it to self.\n\nArgs:\ninst (namespace_t): a namespace declaration", "source": "juraj-google-style"}
{"code": "def expression(self, previous_precedence=0):\n    lhs = self.atom()\n    return self.operator(lhs, previous_precedence)", "docstring": "An expression is an atom or an infix expression.\n\nGrammar (sort of, actually a precedence-climbing parser):\nexpression = atom [ binary_operator expression ] .\n\nArgs:\nprevious_precedence: What operator precedence should we start with?", "source": "codesearchnet"}
{"code": "def dict_to_attributes_code(dict_):\n    lines = []\n    for (key, value) in dict_.iteritems():\n        if isinstance(value, dict):\n            txt = dict_to_attributes_code(value)\n            lines_ = txt.split('\\n')\n            for line in lines_:\n                if (not line.startswith(' ')):\n                    line = ('%s.%s' % (key, line))\n                lines.append(line)\n        else:\n            value_txt = pformat(value)\n            if ('\\n' in value_txt):\n                lines.append(('%s = \\\\' % key))\n                value_txt = indent(value_txt)\n                lines.extend(value_txt.split('\\n'))\n            else:\n                line = ('%s = %s' % (key, value_txt))\n                lines.append(line)\n    return '\\n'.join(lines)", "docstring": "Given a nested dict, generate a python code equivalent.\n\nExample:\n>>> d = {'foo': 'bah', 'colors': {'red': 1, 'blue': 2}}\n>>> print dict_to_attributes_code(d)\nfoo = 'bah'\ncolors.red = 1\ncolors.blue = 2\n\nReturns:\nstr.", "source": "codesearchnet"}
{"code": "def get_asides(self, block):\n        \n        aside_instances = [\n            self.get_aside_of_type(block, aside_type)\n            for aside_type in self.applicable_aside_types(block)\n        ]\n        return [\n            aside_instance for aside_instance in aside_instances\n            if aside_instance.should_apply_to_block(block)\n        ]", "docstring": "Return instances for all of the asides that will decorate this `block`.\n\nArguments:\nblock (:class:`.XBlock`): The block to render retrieve asides for.\n\nReturns:\nList of XBlockAside instances", "source": "juraj-google-style"}
{"code": "def _get_op_control_flow_context(self, op):\n    op_control_flow_context = op._control_flow_context\n    if control_flow_util.IsLoopExit(op):\n        op_control_flow_context = op_control_flow_context.outer_context\n    return op_control_flow_context", "docstring": "Returns the control flow of the given op.\n\nArgs:\nop: tf.Operation for which the control flow context is requested.\nReturns:\nop_control_flow_context: which the is control flow context of the given\nop. If the operation type is LoopExit, returns the outer control flow\ncontext.", "source": "github-repos"}
{"code": "def xml(self, xml):\n        \n        self._request.xml = xml\n        self.add_matcher(matcher('XMLMatcher', xml))", "docstring": "Defines a XML body value to match.\n\nArguments:\nxml (str|regex): body XML to match.\n\nReturns:\nself: current Mock instance.", "source": "juraj-google-style"}
{"code": "def __SetDefaultUploadStrategy(self, upload_config, http_request):\n    if (upload_config.resumable_path is None):\n        self.strategy = SIMPLE_UPLOAD\n    if (self.strategy is not None):\n        return\n    strategy = SIMPLE_UPLOAD\n    if ((self.total_size is not None) and (self.total_size > _RESUMABLE_UPLOAD_THRESHOLD)):\n        strategy = RESUMABLE_UPLOAD\n    if (http_request.body and (not upload_config.simple_multipart)):\n        strategy = RESUMABLE_UPLOAD\n    if (not upload_config.simple_path):\n        strategy = RESUMABLE_UPLOAD\n    self.strategy = strategy", "docstring": "Determine and set the default upload strategy for this upload.\n\nWe generally prefer simple or multipart, unless we're forced to\nuse resumable. This happens when any of (1) the upload is too\nlarge, (2) the simple endpoint doesn't support multipart requests\nand we have metadata, or (3) there is no simple upload endpoint.\n\nArgs:\nupload_config: Configuration for the upload endpoint.\nhttp_request: The associated http request.\n\nReturns:\nNone.", "source": "codesearchnet"}
{"code": "def jump(self):\n    potential_jumps = self.potential_jumps()\n    if (not potential_jumps):\n        raise BlockedLatticeError('No moves are possible in this lattice')\n    all_transitions = transitions.Transitions(self.potential_jumps())\n    random_jump = all_transitions.random()\n    delta_t = all_transitions.time_to_jump()\n    self.time += delta_t\n    self.update_site_occupation_times(delta_t)\n    self.update(random_jump)\n    return all_transitions.time_to_jump()", "docstring": "Select a jump at random from all potential jumps, then update the lattice state.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _find_and_replace(text, start_string, end_string, replace_fn):\n    ret = u''\n    current_pos = 0\n    while True:\n        start_pos = text.find(start_string, current_pos)\n        if (start_pos == (- 1)):\n            ret += text[current_pos:]\n            break\n        ret += text[current_pos:start_pos]\n        end_pos = text.find(end_string, (start_pos + len(start_string)))\n        if (end_pos == (- 1)):\n            break\n        ret += replace_fn(text[(start_pos + len(start_string)):end_pos])\n        current_pos = (end_pos + len(end_string))\n    return ret", "docstring": "Remove everything found between instances of start_string and end_string.\n\nReplace each such instance with replace_fn(removed_text)\n\ne.g. _find_and_replace(u\"the [[fat]] cat [[sat]]\", u\"[[\", u\"]]\", lambda x: x)\n= u\"the fat cat sat\"\n\nArgs:\ntext: a unicode string\nstart_string: a unicode string\nend_string: a unicode string\nreplace_fn: a unary function from unicode string to unicode string\n\nReturns:\na string", "source": "codesearchnet"}
{"code": "def __init__(self, call_collection, call_fn, name, input_signature):\n    self.call_collection = call_collection\n    self.input_signature = input_signature\n    self.wrapped_call = def_function.function(layer_call_wrapper(call_collection, call_fn, name), input_signature=input_signature)\n    self.original_layer_call = call_collection.layer_call_method", "docstring": "Initializes a LayerCall object.\n\nArgs:\ncall_collection: a LayerCallCollection, which contains the other layer\ncall functions (e.g. call_with_conditional_losses, call). These\nfunctions should be traced with the same arguments.\ncall_fn: A call function.\nname: Name of the call function.\ninput_signature: Input signature of call_fn (can be None).", "source": "github-repos"}
{"code": "def write_updates_to_csv(self, updates):\n        \n        with open(self._csv_file_name, 'w') as csvfile:\n            csvwriter = self.csv_writer(csvfile)\n            csvwriter.writerow(CSV_COLUMN_HEADERS)\n\n            for update in updates:\n                row = [\n                    update.name,\n                    update.current_version,\n                    update.new_version,\n                    update.prelease,\n                ]\n                csvwriter.writerow(row)", "docstring": "Given a list of updates, write the updates out to the provided CSV\nfile.\n\nArgs:\nupdates (list): List of Update objects.", "source": "juraj-google-style"}
{"code": "def list(self, **kwargs):\n    path = self._get_path('movie_list')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the list of supported certifications for movies.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def release_dates(self, **kwargs):\n        \n        path = self._get_id_path('release_dates')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the release dates and certification for a specific movie id.\n\nArgs:\nappend_to_response: (optional) Comma separated, any movie method.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def Update(self, request, global_params=None):\n    config = self.GetMethodConfig('Update')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource.\n\nArgs:\nrequest: (BigqueryDatasetsUpdateRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Dataset) The response message.", "source": "github-repos"}
{"code": "def _ParseDocstringArgSpec(doc):\n    match = re.search('^\\\\w+\\\\(.*\\\\)', doc)\n    args_spec = _GenerateArgsSpec(doc)\n    if not match or args_spec is None:\n        raise ValueError(f'Failed to parse argspec from docstring: {doc}')\n    output_string = f'args=[{args_spec}], varargs=None, keywords=None, defaults=None'\n    return output_string", "docstring": "Get an ArgSpec string from a method docstring.\n\nThis method is used to generate argspec for C extension functions that follow\npybind11 DocString format function signature. For example:\n`foo_function(a: int, b: string) -> None...`\n\nArgs:\ndoc: A python string which starts with function signature.\n\nReturns:\nstring: a argspec string representation if successful. If not, return None.\n\nRaises:\nValueError: Raised when failed to parse the input docstring.", "source": "github-repos"}
{"code": "def combine_regions(self,input_region_labels,output_region_label,verbose=True):\n        \n        if isinstance(input_region_labels,str): input_region_labels = [input_region_labels]\n        bad_regions = set(input_region_labels)-set(self.regions)\n        if len(bad_regions) > 0: raise ValueError(\"Error regions(s) \"+str(bad_regions)+\" are not in the data.\")\n        data = self.copy()\n        if len(input_region_labels) == 0: return data\n        def _swap_in(d,inputs,output):\n            \n            overlap = set(d.keys()).intersection(inputs)\n            \n            if len(overlap) == 0: return d\n            keepers = [(k,v) for k,v in d.items() if k not in inputs]\n            \n            return dict(keepers+\\\n                        [(output_region_label,sum([d[x] for x in overlap]))])\n        data['regions'] = data.apply(lambda x:\n            _swap_in(x['regions'],input_region_labels,output_region_label)\n            ,1)\n        data.loc[data['region_label'].isin(input_region_labels),'region_label'] = output_region_label\n        return data", "docstring": "Combine/rename one or more input regions to a single output region\n\nArgs:\ninput_region_labels (list): A str name or list of names to combine\noutput_region_label (list): A str name to change the phenotype names to\nverbose (bool): output more details\n\nReturns:\nCellDataFrame: The CellDataFrame modified.", "source": "juraj-google-style"}
{"code": "def _CheckSQLite3(verbose_output=True):\n  \n  \n  \n  \n  \n  \n  module_name = 'pysqlite2.dbapi2'\n  minimum_version = '3.7.8'\n\n  module_object = _ImportPythonModule(module_name)\n  if not module_object:\n    module_name = 'sqlite3'\n\n  module_object = _ImportPythonModule(module_name)\n  if not module_object:\n    print('[FAILURE]\\tmissing: {0:s}.'.format(module_name))\n    return False\n\n  module_version = getattr(module_object, 'sqlite_version', None)\n  if not module_version:\n    return False\n\n  \n  \n  module_version_map = list(\n      map(int, _VERSION_SPLIT_REGEX.split(module_version)))\n  minimum_version_map = list(\n      map(int, _VERSION_SPLIT_REGEX.split(minimum_version)))\n\n  if module_version_map < minimum_version_map:\n    print((\n        '[FAILURE]\\t{0:s} version: {1!s} is too old, {2!s} or later '\n        'required.').format(module_name, module_version, minimum_version))\n    return False\n\n  if verbose_output:\n    print('[OK]\\t\\t{0:s} version: {1!s}'.format(module_name, module_version))\n\n  return True", "docstring": "Checks the availability of sqlite3.\n\nArgs:\nverbose_output (Optional[bool]): True if output should be verbose.\n\nReturns:\nbool: True if the sqlite3 Python module is available, False otherwise.", "source": "juraj-google-style"}
{"code": "def wait_for_model_package(self, model_package_name, poll=5):\n        \n        desc = _wait_until(lambda: _create_model_package_status(self.sagemaker_client, model_package_name),\n                           poll)\n        status = desc['ModelPackageStatus']\n\n        if status != 'Completed':\n            reason = desc.get('FailureReason', None)\n            raise ValueError('Error creating model package {}: {} Reason: {}'.format(\n                model_package_name, status, reason))\n        return desc", "docstring": "Wait for an Amazon SageMaker endpoint deployment to complete.\n\nArgs:\nendpoint (str): Name of the ``Endpoint`` to wait for.\npoll (int): Polling interval in seconds (default: 5).\n\nReturns:\ndict: Return value from the ``DescribeEndpoint`` API.", "source": "juraj-google-style"}
{"code": "def create_box_field(self, box_key, name, field_type, **kwargs):\n\t\t\n\t\t\n\t\tself._raise_unimplemented_error()\n\t\t\n\t\turi = '/'.join([self.api_uri,\n\t\t\t\t\t\tself.boxes_suffix, \n\t\t\t\t\t\tbox_key,\n\t\t\t\t\t\tself.fields_suffix\n\t\t\t\t\t\t])\n\t\t\n\t\tcode, data = self._create_field(uri, name, field_type, **kwargs)\n\t\t\n\t\treturn code, data", "docstring": "Creates a box field with the provided attributes.\nArgs:\nbox_key\t\t\tspecifying the box to add the field to\nname\t\t\trequired name string\nfield_type\t\trequired type string [TEXT_INPUT, DATE or PERSON]\nkwargs\t\t\t{}\nreturn\t\t\t(status code, field dict)", "source": "juraj-google-style"}
{"code": "def InsertNodesAfter(new_nodes, target):\n    for node in reversed(new_nodes):\n        _InsertNodeAt(node, target, after=True)", "docstring": "Insert new_nodes after the given target location in the tree.\n\nArguments:\nnew_nodes: a sequence of new nodes to insert (the nodes should not be in the\ntree).\ntarget: the target node after which the new node node will be inserted.\n\nRaises:\nRuntimeError: if the tree is corrupted, or the insertion would corrupt it.", "source": "github-repos"}
{"code": "def get(self, key, index=None):\n        \n        records = self.get_multi([key], index=index)\n\n        try:\n            return records[0][1]  \n        except IndexError:\n            return None", "docstring": "Retrieves a value associated with a key from the database\n\nArgs:\nkey (str): The key to retrieve", "source": "juraj-google-style"}
{"code": "def FillDeviceCapabilities(device, descriptor):\n    preparsed_data = PHIDP_PREPARSED_DATA(0)\n    ret = hid.HidD_GetPreparsedData(device, ctypes.byref(preparsed_data))\n    if (not ret):\n        raise ctypes.WinError()\n    try:\n        caps = HidCapabilities()\n        ret = hid.HidP_GetCaps(preparsed_data, ctypes.byref(caps))\n        if (ret != HIDP_STATUS_SUCCESS):\n            raise ctypes.WinError()\n        descriptor.usage = caps.Usage\n        descriptor.usage_page = caps.UsagePage\n        descriptor.internal_max_in_report_len = caps.InputReportByteLength\n        descriptor.internal_max_out_report_len = caps.OutputReportByteLength\n    finally:\n        hid.HidD_FreePreparsedData(preparsed_data)", "docstring": "Fill out device capabilities.\n\nFills the HidCapabilitites of the device into descriptor.\n\nArgs:\ndevice: A handle to the open device\ndescriptor: DeviceDescriptor to populate with the\ncapabilities\n\nReturns:\nnone\n\nRaises:\nWindowsError when unable to obtain capabilitites.", "source": "codesearchnet"}
{"code": "def wont_implement_method(base_type, name, reason=None, explanation=None):\n    if reason is not None:\n        if reason not in _WONT_IMPLEMENT_REASONS:\n            raise AssertionError(f'reason must be one of {list(_WONT_IMPLEMENT_REASONS.keys())}, got {reason!r}')\n        reason_data = _WONT_IMPLEMENT_REASONS[reason]\n    elif explanation is not None:\n        reason_data = {'explanation': explanation}\n    else:\n        raise ValueError('One of (reason, explanation) must be specified')\n\n    def wrapper(*args, **kwargs):\n        raise WontImplementError(f\"'{name}' is not yet supported {reason_data['explanation']}\", reason=reason)\n    wrapper.__name__ = name\n    wrapper.__doc__ = f':meth:`{_prettify_pandas_type(base_type)}.{name}` is not yet supported in the Beam DataFrame API {reason_data['explanation']}'\n    if 'url' in reason_data:\n        wrapper.__doc__ += f'\\n\\n For more information see {reason_data['url']}.'\n    return wrapper", "docstring": "Generate a stub method that raises WontImplementError.\n\nNote either reason or explanation must be specified. If both are specified,\nexplanation is ignored.\n\nArgs:\nbase_type: The pandas type of the method that this is trying to replicate.\nname: The name of the method that this is aiming to replicate.\nreason: If specified, use data from the corresponding entry in\n``_WONT_IMPLEMENT_REASONS`` to generate a helpful exception message\nand docstring for the method.\nexplanation: If specified, use this string as an explanation for why\nthis operation is not supported when generating an exception message\nand docstring.", "source": "github-repos"}
{"code": "def path(self, path):\n        \n        url = furl(self._request.rawurl)\n        url.path = path\n        self._request.url = url.url\n        self.add_matcher(matcher('PathMatcher', path))", "docstring": "Defines a URL path to match.\n\nOnly call this method if the URL has no path already defined.\n\nArguments:\npath (str): URL path value to match. E.g: ``/api/users``.\n\nReturns:\nself: current Mock instance.", "source": "juraj-google-style"}
{"code": "def _ParseAccountsData(self, account_data):\n    if (not account_data):\n        return {}\n    lines = [line for line in account_data.splitlines() if line]\n    user_map = {}\n    for line in lines:\n        if (not all(((ord(c) < 128) for c in line))):\n            self.logger.info('SSH key contains non-ascii character: %s.', line)\n            continue\n        split_line = line.split(':', 1)\n        if (len(split_line) != 2):\n            self.logger.info('SSH key is not a complete entry: %s.', split_line)\n            continue\n        (user, key) = split_line\n        if self._HasExpired(key):\n            self.logger.debug('Expired SSH key for user %s: %s.', user, key)\n            continue\n        if (user not in user_map):\n            user_map[user] = []\n        user_map[user].append(key)\n    logging.debug('User accounts: %s.', user_map)\n    return user_map", "docstring": "Parse the SSH key data into a user map.\n\nArgs:\naccount_data: string, the metadata server SSH key attributes data.\n\nReturns:\ndict, a mapping of the form: {'username': ['sshkey1, 'sshkey2', ...]}.", "source": "codesearchnet"}
{"code": "def action_scope(self, action_fluents: Sequence[tf.Tensor]) -> Dict[(str, TensorFluent)]:\n    return dict(zip(self.rddl.domain.action_fluent_ordering, action_fluents))", "docstring": "Returns a partial scope with current action-fluents.\n\nArgs:\naction_fluents (Sequence[tf.Tensor]): The action fluents.\n\nReturns:\nA mapping from action fluent names to :obj:`rddl2tf.fluent.TensorFluent`.", "source": "codesearchnet"}
{"code": "def make_sample_her_transitions(replay_strategy, replay_k, reward_fun):\n    \n    if replay_strategy == 'future':\n        future_p = 1 - (1. / (1 + replay_k))\n    else:  \n        future_p = 0\n\n    def _sample_her_transitions(episode_batch, batch_size_in_transitions):\n        \n        T = episode_batch['u'].shape[1]\n        rollout_batch_size = episode_batch['u'].shape[0]\n        batch_size = batch_size_in_transitions\n\n        \n        episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)\n        t_samples = np.random.randint(T, size=batch_size)\n        transitions = {key: episode_batch[key][episode_idxs, t_samples].copy()\n                       for key in episode_batch.keys()}\n\n        \n        \n        her_indexes = np.where(np.random.uniform(size=batch_size) < future_p)\n        future_offset = np.random.uniform(size=batch_size) * (T - t_samples)\n        future_offset = future_offset.astype(int)\n        future_t = (t_samples + 1 + future_offset)[her_indexes]\n\n        \n        \n        \n        future_ag = episode_batch['ag'][episode_idxs[her_indexes], future_t]\n        transitions['g'][her_indexes] = future_ag\n\n        \n        info = {}\n        for key, value in transitions.items():\n            if key.startswith('info_'):\n                info[key.replace('info_', '')] = value\n\n        \n        reward_params = {k: transitions[k] for k in ['ag_2', 'g']}\n        reward_params['info'] = info\n        transitions['r'] = reward_fun(**reward_params)\n\n        transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:])\n                       for k in transitions.keys()}\n\n        assert(transitions['u'].shape[0] == batch_size_in_transitions)\n\n        return transitions\n\n    return _sample_her_transitions", "docstring": "Creates a sample function that can be used for HER experience replay.\n\nArgs:\nreplay_strategy (in ['future', 'none']): the HER replay strategy; if set to 'none',\nregular DDPG experience replay is used\nreplay_k (int): the ratio between HER replays and regular replays (e.g. k = 4 -> 4 times\nas many HER replays as regular replays are used)\nreward_fun (function): function to re-compute the reward with substituted goals", "source": "juraj-google-style"}
{"code": "def _UpdateAuthorizedKeys(self, user, ssh_keys):\n    pw_entry = self._GetUser(user)\n    if (not pw_entry):\n        return\n    uid = pw_entry.pw_uid\n    gid = pw_entry.pw_gid\n    home_dir = pw_entry.pw_dir\n    ssh_dir = os.path.join(home_dir, '.ssh')\n    authorized_keys_file = os.path.join(ssh_dir, 'authorized_keys')\n    if (os.path.islink(ssh_dir) or os.path.islink(authorized_keys_file)):\n        self.logger.warning('Not updating authorized keys for user %s. File is a symlink.', user)\n        return\n    if (not os.path.exists(home_dir)):\n        file_utils.SetPermissions(home_dir, mode=493, uid=uid, gid=gid, mkdir=True)\n    file_utils.SetPermissions(ssh_dir, mode=448, uid=uid, gid=gid, mkdir=True)\n    prefix = (self.logger.name + '-')\n    with tempfile.NamedTemporaryFile(mode='w', prefix=prefix, delete=True) as updated_keys:\n        updated_keys_file = updated_keys.name\n        if os.path.exists(authorized_keys_file):\n            lines = open(authorized_keys_file).readlines()\n        else:\n            lines = []\n        google_lines = set()\n        for (i, line) in enumerate(lines):\n            if line.startswith(self.google_comment):\n                google_lines.update([i, (i + 1)])\n        for (i, line) in enumerate(lines):\n            if ((i not in google_lines) and line):\n                line += ('\\n' if (not line.endswith('\\n')) else '')\n                updated_keys.write(line)\n        for ssh_key in ssh_keys:\n            ssh_key += ('\\n' if (not ssh_key.endswith('\\n')) else '')\n            updated_keys.write(('%s\\n' % self.google_comment))\n            updated_keys.write(ssh_key)\n        updated_keys.flush()\n        shutil.copy(updated_keys_file, authorized_keys_file)\n    file_utils.SetPermissions(authorized_keys_file, mode=384, uid=uid, gid=gid)", "docstring": "Update the authorized keys file for a Linux user with a list of SSH keys.\n\nArgs:\nuser: string, the name of the Linux user account.\nssh_keys: list, the SSH key strings associated with the user.\n\nRaises:\nIOError, raised when there is an exception updating a file.\nOSError, raised when setting permissions or writing to a read-only\nfile system.", "source": "codesearchnet"}
{"code": "def baseline_optimizer_arguments(self, states, internals, reward):\n    arguments = dict(time=self.global_timestep, variables=self.baseline.get_variables(), arguments=dict(states=states, internals=internals, reward=reward, update=tf.constant(value=True)), fn_reference=self.baseline.reference, fn_loss=self.fn_baseline_loss)\n    if (self.global_model is not None):\n        arguments['global_variables'] = self.global_model.baseline.get_variables()\n    return arguments", "docstring": "Returns the baseline optimizer arguments including the time, the list of variables to\noptimize, and various functions which the optimizer might require to perform an update\nstep.\n\nArgs:\nstates: Dict of state tensors.\ninternals: List of prior internal state tensors.\nreward: Reward tensor.\n\nReturns:\nBaseline optimizer arguments as dict.", "source": "codesearchnet"}
{"code": "def error_buckets(gold, pred, X=None):\n    buckets = defaultdict(list)\n    gold = arraylike_to_numpy(gold)\n    pred = arraylike_to_numpy(pred)\n    for (i, (y, l)) in enumerate(zip(pred, gold)):\n        buckets[(y, l)].append((X[i] if (X is not None) else i))\n    return buckets", "docstring": "Group items by error buckets\n\nArgs:\ngold: an array-like of gold labels (ints)\npred: an array-like of predictions (ints)\nX: an iterable of items\nReturns:\nbuckets: A dict of items where buckets[i,j] is a list of items with\npredicted label i and true label j. If X is None, return indices\ninstead.\n\nFor a binary problem with (1=positive, 2=negative):\nbuckets[1,1] = true positives\nbuckets[1,2] = false positives\nbuckets[2,1] = false negatives\nbuckets[2,2] = true negatives", "source": "codesearchnet"}
{"code": "def __call__(self, *args, **kwargs):\n\n    def replica_local_fn(*args, **kwargs):\n        \n        if any((isinstance(arg, keras_tensor.KerasTensor) for arg in nest.flatten((args, kwargs)))):\n            update_op = None\n        else:\n            update_op = self.update_state(*args, **kwargs)\n        update_ops = []\n        if update_op is not None:\n            update_ops.append(update_op)\n        with ops.control_dependencies(update_ops):\n            result_t = self.result()\n            result_t._metric_obj = self\n            return result_t\n    from tensorflow.python.keras.distribute import distributed_training_utils\n    return distributed_training_utils.call_replica_local_fn(replica_local_fn, *args, **kwargs)", "docstring": "Accumulates statistics and then computes metric result value.\n\nArgs:\n*args:\n**kwargs: A mini-batch of inputs to the Metric,\npassed on to `update_state()`.\n\nReturns:\nThe metric value tensor.", "source": "github-repos"}
{"code": "def ReleaseFileSystem(self, file_system):\n    (identifier, cache_value) = self._file_system_cache.GetCacheValueByObject(file_system)\n    if (not identifier):\n        raise RuntimeError('Object not cached.')\n    if (not cache_value):\n        raise RuntimeError('Invalid cache value.')\n    self._file_system_cache.ReleaseObject(identifier)\n    result = cache_value.IsDereferenced()\n    if result:\n        self._file_system_cache.RemoveObject(identifier)\n    return result", "docstring": "Releases a cached file system object.\n\nArgs:\nfile_system (FileSystem): file system object.\n\nReturns:\nbool: True if the file system object can be closed.\n\nRaises:\nPathSpecError: if the path specification is incorrect.\nRuntimeError: if the file system object is not cached or an inconsistency\nis detected in the cache.", "source": "codesearchnet"}
{"code": "def _get_required_container_version():\n    if 'dev' in beam_version.__version__:\n        return names.BEAM_DEV_SDK_CONTAINER_TAG\n    else:\n        return _get_container_image_tag()", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\nReturns:\nstr: The tag of worker container images in GCR that corresponds to\ncurrent version of the SDK.", "source": "github-repos"}
{"code": "def _rename_if_any_arg_found_transformer(parent, node, full_name, name, logs, arg_names=None, arg_ok_predicate=None, remove_if_ok=False, message=None):\n    for arg_name in arg_names:\n        rename_node = _rename_if_arg_found_transformer(parent, node, full_name, name, logs, arg_name, arg_ok_predicate, remove_if_ok, message)\n        node = rename_node if rename_node else node\n    return node", "docstring": "Replaces the given call with tf.compat.v1 if any of the arg_names is found.\n\nArgs:\nparent: Parent of node.\nnode: ast.Call node to modify.\nfull_name: full name of function to modify.\nname: name of function to modify.\nlogs: list of logs to append to.\narg_names: list of names of the argument to look for.\narg_ok_predicate: predicate callable with the ast of the argument value,\nreturns whether the argument value is allowed.\nremove_if_ok: remove the argument if present and ok as determined by\narg_ok_predicate.\nmessage: message to print if a non-ok arg is found (and hence, the function\nis renamed to its compat.v1 version).\n\nReturns:\nnode, if it was modified, else None.", "source": "github-repos"}
{"code": "def __call__(cls, *args, **kwargs):\n        \n        if cls.instance is None:\n            with threading.Lock():\n                if cls.instance is None:\n                    cls.instance = super(Singleton, cls).__call__(*args, **kwargs)\n\n        return cls.instance", "docstring": "Return singleton instance.\n\nArgs:\ncls (type): the class.\nargs (tuple/list): initializer function arguments.\nkwargs (dict): initializer function keyword arguments.", "source": "juraj-google-style"}
{"code": "def multi_post(self, urls, query_params=None, data=None, to_json=True, send_as_file=False):\n    return self._multi_request(MultiRequest._VERB_POST, urls, query_params, data, to_json=to_json, send_as_file=send_as_file)", "docstring": "Issue multiple POST requests.\n\nArgs:\nurls - A string URL or list of string URLs\nquery_params - None, a dict, or a list of dicts representing the query params\ndata - None, a dict or string, or a list of dicts and strings representing the data body.\nto_json - A boolean, should the responses be returned as JSON blobs\nsend_as_file - A boolean, should the data be sent as a file.\nReturns:\na list of dicts if to_json is set of requests.response otherwise.\nRaises:\nInvalidRequestError - Can not decide how many requests to issue.", "source": "codesearchnet"}
{"code": "def external_ids(self, **kwargs):\n    path = self._get_series_id_season_number_episode_number_path('external_ids')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the external ids for a TV episode by combination of a season and\nepisode number.\n\nArgs:\nlanguage: (optional) ISO 639 code.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def draw(self, current_time, frame_time):\n    self.set_default_viewport()\n    self.timeline.draw(current_time, frame_time, self.fbo)", "docstring": "Draws a frame. Internally it calls the\nconfigured timeline's draw method.\n\nArgs:\ncurrent_time (float): The current time (preferrably always from the configured timer class)\nframe_time (float): The duration of the previous frame in seconds", "source": "codesearchnet"}
{"code": "def ensure_dir(path):\n    dirpath = os.path.dirname(path)\n    if (dirpath and (not os.path.exists(dirpath))):\n        os.makedirs(dirpath)", "docstring": "Ensure directory exists.\n\nArgs:\npath(str): dir path", "source": "codesearchnet"}
{"code": "def disconnect_async(self, connection_id, callback):\n        \n\n        try:\n            context = self.connections.get_context(connection_id)\n        except ArgumentError:\n            callback(connection_id, self.id, False, \"Could not find connection information\")\n            return\n\n        self.connections.begin_disconnection(connection_id, callback, self.get_config('default_timeout'))\n\n        self.bable.disconnect(\n            connection_handle=context['connection_handle'],\n            on_disconnected=[self._on_disconnection_finished, context]\n        )", "docstring": "Asynchronously disconnect from a device that has previously been connected\n\nArgs:\nconnection_id (int): A unique identifier for this connection on the DeviceManager that owns this adapter.\ncallback (callable): A function called as callback(connection_id, adapter_id, success, failure_reason)\nwhen the disconnection finishes. Disconnection can only either succeed or timeout.", "source": "juraj-google-style"}
{"code": "def _build_param_string(params):\n        \n        pairs = []\n        for key, value in params.iteritems():\n            if value is None:\n                value = ''\n            pairs.append('{0}={1}'.format(key, value))\n        if len(pairs) > 0:\n            return '?{0}'.format('&'.join(pairs))\n        return ''", "docstring": "Build query params string from a dictionary.\n\nArgs:\nparams (dict): A dictionary of params\n\nReturns:\nstring: A valid url query params string.", "source": "juraj-google-style"}
{"code": "def delete_credit_card(self, *, customer_id, credit_card_id):\n        \n        fmt = 'customers/{}/creditCards/{}'.format(customer_id, credit_card_id)\n        return self.client._delete(self.url + fmt, headers=self.get_headers())", "docstring": "Delete a credit card (Token) associated with a user.\n\nArgs:\ncustomer_id: Identifier of the client of whom you are going to delete the token.\ncredit_card_id: Identifier of the token to be deleted.\n\nReturns:", "source": "juraj-google-style"}
{"code": "def find_slot(self, wanted, slots=None):\n        \n        for slot in self.find_slots(wanted, slots):\n            return slot\n        return None", "docstring": "Searches the given slots or, if not given,\nactive hotbar slot, hotbar, inventory, open window in this order.\n\nArgs:\nwanted: function(Slot) or Slot or itemID or (itemID, metadata)\n\nReturns:\nOptional[Slot]: The first slot containing the item\nor None if not found.", "source": "juraj-google-style"}
{"code": "def __x_google_quota_descriptor(self, metric_costs):\n    \n    return {\n        'metricCosts': {\n            metric: cost for (metric, cost) in metric_costs.items()\n        }\n    } if metric_costs else None", "docstring": "Describes the metric costs for a call.\n\nArgs:\nmetric_costs: Dict of metric definitions to the integer cost value against\nthat metric.\n\nReturns:\nA dict descriptor describing the Quota limits for the endpoint.", "source": "juraj-google-style"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def run_as_function_for_tape_gradients(make_op, inputs):\n    if gradients_util.PossibleTapeGradientTypes(inputs) == gradients_util.POSSIBLE_GRADIENT_TYPES_HIGHER_ORDER and (not (ops.get_default_graph().building_function and 'cflow_gradient_wrapper' in ops.get_default_graph().name)):\n        results = tracing_compilation.call_function((inputs,), tracing_options=tracing_compilation.TracingOptions(make_op, 'cflow_gradient_wrapper', autograph=False))\n        return results\n    else:\n        return make_op(inputs)", "docstring": "Fix higher-order tape gradients by wrapping `make_op` in a function.\n\nArgs:\nmake_op: A function that takes a list of inputs and returns a list of output\ntensors. This function should set any handle data relevant to its outputs\nbefore returning.\ninputs: A list of tensors to check for tape gradients and pass to\n`make_op`. These should include all tensors used in `make_op`.\n\nReturns:\nTensors corresponding to `make_op`'s output.", "source": "github-repos"}
{"code": "def array(self, size_chunk, start, bytesize):\n    with open(self.img, 'rb') as f1:\n        f1.seek((self.start_byte + (start * self.bytesize)))\n        data = f1.read((size_chunk * self.bytesize))\n        Z = np.fromstring(data, dtype=self.dtype, count=size_chunk)\n        if (self.grid == 'LOLA'):\n            return (Z * float(self.SCALING_FACTOR))\n        else:\n            return Z", "docstring": "Read part of the binary file\n\nArgs:\nsize_chunk (int) : Size of the chunk to read\nstart (int): Starting byte\nbytesize (int): Ending byte\n\nReturns:\n(np.array): array of the corresponding values", "source": "codesearchnet"}
{"code": "def _to_enos_roles(roles):\n\n    def to_host(h):\n        extra = {}\n        for (nic, roles) in h['nics']:\n            for role in roles:\n                extra[role] = nic\n        return Host(h['host'], user='root', extra=extra)\n    enos_roles = {}\n    for (role, hosts) in roles.items():\n        enos_roles[role] = [to_host(h) for h in hosts]\n    logger.debug(enos_roles)\n    return enos_roles", "docstring": "Transform the roles to use enoslib.host.Host hosts.\n\nArgs:\nroles (dict): roles returned by\n:py:func:`enoslib.infra.provider.Provider.init`", "source": "codesearchnet"}
{"code": "def VerifyGitkitToken(self, jwt):\n    certs = self.rpc_helper.GetPublicCert()\n    crypt.MAX_TOKEN_LIFETIME_SECS = (30 * 86400)\n    parsed = None\n    for aud in filter((lambda x: (x is not None)), [self.project_id, self.client_id]):\n        try:\n            parsed = crypt.verify_signed_jwt_with_certs(jwt, certs, aud)\n        except crypt.AppIdentityError as e:\n            if ('Wrong recipient' not in e.message):\n                return None\n        if parsed:\n            return GitkitUser.FromToken(parsed)\n    return None", "docstring": "Verifies a Gitkit token string.\n\nArgs:\njwt: string, the token to be checked\n\nReturns:\nGitkitUser, if the token is valid. None otherwise.", "source": "codesearchnet"}
{"code": "def _dict_func(self, func, axis, *args, **kwargs):\n    if ('axis' not in kwargs):\n        kwargs['axis'] = axis\n    if (axis == 0):\n        index = self.columns\n    else:\n        index = self.index\n    func = {idx: func[key] for key in func for idx in index.get_indexer_for([key])}\n\n    def dict_apply_builder(df, func_dict={}):\n        return pandas.DataFrame(df.apply(func_dict, *args, **kwargs))\n    result_data = self.data.apply_func_to_select_indices_along_full_axis(axis, dict_apply_builder, func, keep_remaining=False)\n    full_result = self._post_process_apply(result_data, axis)\n    return full_result", "docstring": "Apply function to certain indices across given axis.\n\nArgs:\nfunc: The function to apply.\naxis: Target axis to apply the function along.\n\nReturns:\nA new PandasQueryCompiler.", "source": "codesearchnet"}
{"code": "def op_or(self, *elements):\n    expression = self.add_operator(Operator(','))\n    for element in elements:\n        expression.add_element(element)\n    return expression", "docstring": "Update the ``Expression`` by joining the specified additional\n``elements`` using an \"OR\" ``Operator``\n\nArgs:\n*elements (BaseExpression): The ``Expression`` and/or\n``Constraint`` elements which the \"OR\" ``Operator`` applies\nto.\n\nReturns:\nExpression: ``self`` or related ``Expression``.", "source": "codesearchnet"}
{"code": "def fbresnet152(num_classes=1000, pretrained='imagenet'):\n    \n    model = FBResNet(Bottleneck, [3, 8, 36, 3], num_classes=num_classes)\n    if pretrained is not None:\n        settings = pretrained_settings['fbresnet152'][pretrained]\n        assert num_classes == settings['num_classes'], \\\n            \"num_classes should be {}, but is {}\".format(settings['num_classes'], num_classes)\n        model.load_state_dict(model_zoo.load_url(settings['url']))\n        model.input_space = settings['input_space']\n        model.input_size = settings['input_size']\n        model.input_range = settings['input_range']\n        model.mean = settings['mean']\n        model.std = settings['std']\n    return model", "docstring": "Constructs a ResNet-152 model.\n\nArgs:\npretrained (bool): If True, returns a model pre-trained on ImageNet", "source": "juraj-google-style"}
{"code": "def _ImportPythonModule(module_name):\n  \n  try:\n    module_object = list(map(__import__, [module_name]))[0]\n  except ImportError:\n    return None\n\n  \n  if '.' in module_name:\n    for submodule_name in module_name.split('.')[1:]:\n      module_object = getattr(module_object, submodule_name, None)\n\n  return module_object", "docstring": "Imports a Python module.\n\nArgs:\nmodule_name (str): name of the module.\n\nReturns:\nmodule: Python module or None if the module cannot be imported.", "source": "juraj-google-style"}
{"code": "def _force_edges_active_move(self, state: _STATE) -> _STATE:\n    for _ in range(self._rand.randint(1, 4)):\n        state = self._force_edge_active_move(state)\n    return state", "docstring": "Move function which repeats _force_edge_active_move a few times.\n\nArgs:\nstate: Search state, not mutated.\n\nReturns:\nNew search state which consists of incremental changes of the\noriginal state.", "source": "codesearchnet"}
{"code": "class PatchTSMixerForTimeSeriesClassificationOutput(ModelOutput):\n    loss: Optional[torch.FloatTensor] = None\n    prediction_outputs: Optional[torch.FloatTensor] = None\n    last_hidden_state: Optional[torch.FloatTensor] = None\n    hidden_states: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "Output type of [`PatchTSMixerForTimeSeriesClassificationOutput`].\n\nArgs:\nprediction_outputs (`torch.FloatTensor` of shape `(batch_size, num_labels)`):\nPrediction output from the classification head.\nlast_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`):\nBackbone embeddings before passing through the head.\nhidden_states (`tuple(torch.FloatTensor)`, *optional*):\nHidden-states of the model at the output of each layer plus the optional initial embedding outputs.\nloss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`):\nTotal loss.", "source": "github-repos"}
{"code": "def table_field_to_avro_field(table_field: Dict[str, Any], namespace: str) -> Dict[str, Any]:\n    assert 'type' in table_field, 'Unable to get type for table field {}'.format(table_field)\n    assert table_field['type'] in BIG_QUERY_TO_AVRO_TYPES, 'Unable to map BigQuery field type {} to avro type'.format(table_field['type'])\n    avro_type = BIG_QUERY_TO_AVRO_TYPES[table_field['type']]\n    if avro_type == 'record':\n        element_type = get_record_schema_from_dict_table_schema(table_field['name'], table_field, namespace='.'.join((namespace, table_field['name'])))\n    else:\n        element_type = avro_type\n    field_mode = table_field.get('mode', 'NULLABLE')\n    if field_mode in (None, 'NULLABLE'):\n        field_type = ['null', element_type]\n    elif field_mode == 'REQUIRED':\n        field_type = element_type\n    elif field_mode == 'REPEATED':\n        field_type = {'type': 'array', 'items': element_type}\n    else:\n        raise ValueError('Unknown BigQuery field mode: {}'.format(field_mode))\n    avro_field = {'type': field_type, 'name': table_field['name']}\n    doc = table_field.get('description')\n    if doc:\n        avro_field['doc'] = doc\n    return avro_field", "docstring": "Convert a BigQuery field to an avro field.\n\nArgs:\ntable_field (Dict[str, Any]): A BigQuery field in dict form.\n\nReturns:\nDict[str, Any]: An equivalent Avro field in dict form.", "source": "github-repos"}
{"code": "def get_application_configuration(name):\n    _check()\n    rc = _ec.get_application_configuration(name)\n    if (rc is False):\n        raise ValueError('Application configuration {0} not found.'.format(name))\n    return rc", "docstring": "Get a named application configuration.\n\nAn application configuration is a named set of securely stored properties\nwhere each key and its value in the property set is a string.\n\nAn application configuration object is used to store information that\nIBM Streams applications require, such as:\n\n* Database connection data\n* Credentials that your applications need to use to access external systems\n* Other data, such as the port numbers or URLs of external systems\n\nArguments:\nname(str): Name of the application configuration.\n\nReturns:\ndict: Dictionary containing the property names and values for the application configuration.\n\nRaises:\nValueError: Application configuration does not exist.", "source": "codesearchnet"}
{"code": "def update_parameters(parameters, grads, learning_rate=1.2):\n    \n    \n    W1 = parameters[\"W1\"]\n    b1 = parameters[\"b1\"]\n    W2 = parameters[\"W2\"]\n    b2 = parameters[\"b2\"]\n\n    \n    dW1 = grads[\"dW1\"]\n    db1 = grads[\"db1\"]\n    dW2 = grads[\"dW2\"]\n    db2 = grads[\"db2\"]\n\n    \n    W1 -= learning_rate * dW1\n    b1 -= learning_rate * db1\n    W2 -= learning_rate * dW2\n    b2 -= learning_rate * db2\n\n    parameters = {\"W1\": W1,\n                  \"b1\": b1,\n                  \"W2\": W2,\n                  \"b2\": b2}\n\n    return parameters", "docstring": "Updates parameters using the gradient descent update rule given above\n\nArguments:\nparameters -- python dictionary containing your parameters\ngrads -- python dictionary containing your gradients\n\nReturns:\nparameters -- python dictionary containing your updated parameters", "source": "juraj-google-style"}
{"code": "def _ParseLogLine(self, parser_mediator, structure):\n    \n    if not self._xchat_year:\n      return\n\n    time_elements_tuple = self._GetTimeElementsTuple(structure)\n\n    try:\n      date_time = dfdatetime_time_elements.TimeElements(\n          time_elements_tuple=time_elements_tuple)\n      date_time.is_local_time = True\n    except ValueError:\n      parser_mediator.ProduceExtractionWarning(\n          'invalid date time value: {0!s}'.format(structure.date_time))\n      return\n\n    self._last_month = time_elements_tuple[1]\n\n    event_data = XChatLogEventData()\n    event_data.nickname = structure.nickname\n    \n    \n    event_data.text = ' '.join(structure.text.split())\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_ADDED,\n        time_zone=parser_mediator.timezone)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a log line.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.", "source": "juraj-google-style"}
{"code": "def _find_metric_value(session_or_group, metric_name):\n    for metric_value in session_or_group.metric_values:\n        if ((metric_value.name.tag == metric_name.tag) and (metric_value.name.group == metric_name.group)):\n            return metric_value", "docstring": "Returns the metric_value for a given metric in a session or session group.\n\nArgs:\nsession_or_group: A Session protobuffer or SessionGroup protobuffer.\nmetric_name: A MetricName protobuffer. The metric to search for.\nReturns:\nA MetricValue protobuffer representing the value of the given metric or\nNone if no such metric was found in session_or_group.", "source": "codesearchnet"}
{"code": "def __init__(self, callback, callback_lock, options, queue_item):\n        \n\n        threading.Thread.__init__(self)\n\n        self.__callback = callback\n        self.__callback_lock = callback_lock\n        self.__options = options\n        self.__queue_item = queue_item", "docstring": "Constructs a crawler thread instance\n\nArgs:\ncallback (obj): The method to call when finished\ncallback_lock (bool): The callback lock that prevents race conditions.\noptions (:class:`nyawc.Options`): The settins/options object.\nqueue_item (:class:`nyawc.QueueItem`): The queue item containing a request to execute.", "source": "juraj-google-style"}
{"code": "def _make_flow(request, scopes, return_url=None):\n    \n    \n    csrf_token = hashlib.sha256(os.urandom(1024)).hexdigest()\n\n    request.session[_CSRF_KEY] = csrf_token\n\n    state = json.dumps({\n        'csrf_token': csrf_token,\n        'return_url': return_url,\n    })\n\n    flow = client.OAuth2WebServerFlow(\n        client_id=django_util.oauth2_settings.client_id,\n        client_secret=django_util.oauth2_settings.client_secret,\n        scope=scopes,\n        state=state,\n        redirect_uri=request.build_absolute_uri(\n            urlresolvers.reverse(\"google_oauth:callback\")))\n\n    flow_key = _FLOW_KEY.format(csrf_token)\n    request.session[flow_key] = jsonpickle.encode(flow)\n    return flow", "docstring": "Creates a Web Server Flow\n\nArgs:\nrequest: A Django request object.\nscopes: the request oauth2 scopes.\nreturn_url: The URL to return to after the flow is complete. Defaults\nto the path of the current request.\n\nReturns:\nAn OAuth2 flow object that has been stored in the session.", "source": "juraj-google-style"}
{"code": "def add_tile(self, tile_source, **kw):\n    tile_renderer = TileRenderer(tile_source=tile_source, **kw)\n    self.renderers.append(tile_renderer)\n    return tile_renderer", "docstring": "Adds new ``TileRenderer`` into ``Plot.renderers``\n\nArgs:\ntile_source (TileSource) : a tile source instance which contain tileset configuration\n\nKeyword Arguments:\nAdditional keyword arguments are passed on as-is to the tile renderer\n\nReturns:\nTileRenderer : TileRenderer", "source": "codesearchnet"}
{"code": "def _prep_binary_content(self):\n    if ((not self.data) and (not self.location) and ('Content-Location' not in self.resource.headers.keys())):\n        raise Exception('creating/updating NonRDFSource requires content from self.binary.data, self.binary.location, or the Content-Location header')\n    elif ('Content-Location' in self.resource.headers.keys()):\n        logger.debug('Content-Location header found, using')\n        self.delivery = 'header'\n    elif ('Content-Location' not in self.resource.headers.keys()):\n        if self.location:\n            self.resource.headers['Content-Location'] = self.location\n            self.delivery = 'header'\n        elif self.data:\n            if isinstance(self.data, io.BufferedIOBase):\n                logger.debug('detected file-like object')\n                self.delivery = 'payload'\n            else:\n                logger.debug('detected bytes')\n                self.delivery = 'payload'", "docstring": "Sets delivery method of either payload or header\nFavors Content-Location header if set\n\nArgs:\nNone\n\nReturns:\nNone: sets attributes in self.binary and headers", "source": "codesearchnet"}
{"code": "def framesToFrameRange(frames, sort=True, zfill=0, compress=False):\n        \n        if compress:\n            frames = unique(set(), frames)\n        frames = list(frames)\n        if not frames:\n            return ''\n        if len(frames) == 1:\n            return pad(frames[0], zfill)\n        if sort:\n            frames.sort()\n        return ','.join(FrameSet.framesToFrameRanges(frames, zfill))", "docstring": "Converts an iterator of frames into a\nframe range string.\n\nArgs:\nframes (collections.Iterable): sequence of frames to process\nsort (bool): sort the sequence before processing\nzfill (int): width for zero padding\ncompress (bool): remove any duplicates before processing\n\nReturns:\nstr:", "source": "juraj-google-style"}
{"code": "def marquee(text='', width=78, mark='*'):\n    if (not text):\n        return (mark * width)[:width]\n    nmark = ((((width - len(text)) - 2) \n    if (nmark < 0):\n        nmark = 0\n    marks = (mark * nmark)\n    return ('%s %s %s' % (marks, text, marks))", "docstring": "Return the input string centered in a 'marquee'.\n\nArgs:\ntext (str): Input string\nwidth (int): Width of final output string.\nmark (str): Character used to fill string.\n\n:Examples:\n\n>>> marquee('A test', width=40)\n'**************** A test ****************'\n\n>>> marquee('A test', width=40, mark='-')\n'---------------- A test ----------------'\n\nmarquee('A test',40, ' ')\n'                 A test                 '", "source": "codesearchnet"}
{"code": "def _get_connection(self):\n    if (not getattr(self, '_connection', None)):\n        logger.debug('Creating new connection.\\n   dsn: {}'.format(self._dsn))\n        d = parse_url_to_dict(self._dsn)\n        self._connection = psycopg2.connect(database=d['path'].strip('/'), user=d['username'], password=d['password'], port=d['port'], host=d['hostname'])\n    return self._connection", "docstring": "Returns connection to the postgres database.\n\nReturns:\nconnection to postgres database who stores mpr data.", "source": "codesearchnet"}
{"code": "def optimize(node):\n    node = dead_code_elimination(node)\n    node = constant_folding(node)\n    node = assignment_propagation(node)\n    return node", "docstring": "Perform a series of optimization passes.\n\nThis function performs a series of optimizations (dead code elimination,\nconstant folding, variable folding) on the given AST.\nIt optimizes the code repeatedly until reaching a fixed point. The fixed\npoint is determine roughly by checking whether the number of lines of\ngenerated source code changed after the latest pass.\n\nArgs:\nnode: The AST to optimize.\nReturns:\nThe optimized AST.", "source": "codesearchnet"}
{"code": "def _ReceiveItemOnActivity(self, zmq_socket):\n    \n    events = zmq_socket.poll(\n        self._ZMQ_SOCKET_RECEIVE_TIMEOUT_MILLISECONDS)\n    if events:\n      try:\n        received_object = self._zmq_socket.recv_pyobj()\n        return received_object\n\n      except zmq.error.Again:\n        logger.error(\n            '{0:s}. Failed to receive item in time.'.format(\n                self.name))\n        raise\n\n      except zmq.error.ZMQError as exception:\n        if exception.errno == errno.EINTR:\n          logger.error(\n              'ZMQ syscall interrupted in {0:s}. Queue aborting.'.format(\n                  self.name))\n        raise\n\n    raise errors.QueueEmpty", "docstring": "Attempts to receive an item from a ZeroMQ socket.\n\nArgs:\nzmq_socket (zmq.Socket): used to the receive the item.\n\nReturns:\nobject: item from the socket.\n\nRaises:\nQueueEmpty: if no item could be received within the timeout.\nzmq.error.ZMQError: if an error occurs in ZeroMQ", "source": "juraj-google-style"}
{"code": "def verify_docker_image_sha(chain, link):\n    \n    cot = link.cot\n    task = link.task\n    errors = []\n\n    if isinstance(task['payload'].get('image'), dict):\n        \n        docker_image_task_id = task['extra']['chainOfTrust']['inputs']['docker-image']\n        log.debug(\"Verifying {} {} against docker-image {}\".format(\n            link.name, link.task_id, docker_image_task_id\n        ))\n        if docker_image_task_id != task['payload']['image']['taskId']:\n            errors.append(\"{} {} docker-image taskId isn't consistent!: {} vs {}\".format(\n                link.name, link.task_id, docker_image_task_id,\n                task['payload']['image']['taskId']\n            ))\n        else:\n            path = task['payload']['image']['path']\n            \n            \n            image_hash = cot['environment']['imageArtifactHash']\n            alg, sha = image_hash.split(':')\n            docker_image_link = chain.get_link(docker_image_task_id)\n            upstream_sha = docker_image_link.cot['artifacts'].get(path, {}).get(alg)\n            if upstream_sha is None:\n                errors.append(\"{} {} docker-image docker sha {} is missing! {}\".format(\n                    link.name, link.task_id, alg,\n                    docker_image_link.cot['artifacts'][path]\n                ))\n            elif upstream_sha != sha:\n                errors.append(\"{} {} docker-image docker sha doesn't match! {} {} vs {}\".format(\n                    link.name, link.task_id, alg, sha, upstream_sha\n                ))\n            else:\n                log.debug(\"Found matching docker-image sha {}\".format(upstream_sha))\n    else:\n        prebuilt_task_types = chain.context.config['prebuilt_docker_image_task_types']\n        if prebuilt_task_types != \"any\" and link.task_type not in prebuilt_task_types:\n            errors.append(\n                \"Task type {} not allowed to use a prebuilt docker image!\".format(\n                    link.task_type\n                )\n            )\n    raise_on_errors(errors)", "docstring": "Verify that built docker shas match the artifact.\n\nArgs:\nchain (ChainOfTrust): the chain we're operating on.\nlink (LinkOfTrust): the task link we're checking.\n\nRaises:\nCoTError: on failure.", "source": "juraj-google-style"}
{"code": "def pop_stack(stack, op_id):\n    if __debug__:\n        (pushed_stack, pushed_op_id) = stack.pop()\n        assert (pushed_op_id == op_id), ('Wanted %s, got %s' % (op_id, pushed_op_id))\n    else:\n        pushed_stack = stack.pop()\n    return pushed_stack", "docstring": "Proxy of pop, where we know we're popping a stack off of a stack.\n\nWe know that we don't need to differentiate through this.\nSee pop() for more.\n\nArgs:\nstack: The stack to pop from.\nop_id: A unique variable that is also passed into the matching push.\nAllows optimization passes to track pairs of pushes and pops.\n\nReturns:\nThe last value.", "source": "codesearchnet"}
{"code": "def resize_positional_embeddings(positional_embeddings: torch.Tensor, spatial_shapes: torch.LongTensor, max_length: int) -> torch.Tensor:\n    batch_size = spatial_shapes.shape[0]\n    embed_dim = positional_embeddings.shape[-1]\n    source_dtype = positional_embeddings.dtype\n    resulted_positional_embeddings = torch.empty((batch_size, max_length, embed_dim), device=positional_embeddings.device, dtype=source_dtype)\n    positional_embeddings = positional_embeddings.permute(2, 0, 1).unsqueeze(0)\n    if positional_embeddings.device.type == 'cpu':\n        positional_embeddings = positional_embeddings.to(torch.float32)\n    for i in range(batch_size):\n        height, width = spatial_shapes[i]\n        resized_embeddings = F.interpolate(positional_embeddings, size=(height, width), mode='bilinear', align_corners=False, antialias=True)\n        resized_embeddings = resized_embeddings.reshape(embed_dim, height * width).transpose(0, 1)\n        resized_embeddings = resized_embeddings.to(source_dtype)\n        resulted_positional_embeddings[i, :height * width] = resized_embeddings\n        resulted_positional_embeddings[i, height * width:] = resized_embeddings[0]\n    return resulted_positional_embeddings", "docstring": "Resize positional embeddings to image-specific size and pad to a fixed size.\n\nArgs:\npositional_embeddings (`torch.Tensor`):\nPosition embeddings of shape (height, width, embed_dim)\nspatial_shapes (`torch.LongTensor`):\nSpatial shapes of shape (batch_size, 2) to resize the positional embeddings to\nmax_length (`int`):\nMaximum length of the positional embeddings to pad resized positional embeddings to\n\nReturns:\n`torch.Tensor`: Embeddings of shape (batch_size, max_length, embed_dim)", "source": "github-repos"}
{"code": "def transfer(self, payment_id, data={}, **kwargs):\n        \n        url = \"{}/{}/transfers\".format(self.base_url, payment_id)\n        return self.post_url(url, data, **kwargs)", "docstring": "Create Transfer for given Payment Id\n\nArgs:\npayment_id : Id for which payment object has to be transfered\n\nReturns:\nPayment dict after getting transfered", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor]=None) -> torch.FloatTensor:\n    x = self.dense(hidden_states).squeeze(-1)\n    if p_mask is not None:\n        if p_mask.dtype == torch.float16:\n            x = x * (1 - p_mask) - 65500 * p_mask\n        else:\n            x = x * (1 - p_mask) - 1e+30 * p_mask\n    return x", "docstring": "Args:\nhidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`):\nThe final hidden states of the model.\np_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*):\nMask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token\nshould be masked.\n\nReturns:\n`torch.FloatTensor`: The start logits for SQuAD.", "source": "github-repos"}
{"code": "def graph_distances(start, edges, distances):\n    \n    \n    adj = {x: [] for x in range(len(distances))}\n    for n1, n2 in edges:\n        adj[n1].append(n2)\n        adj[n2].append(n1)\n    \n    to_visit = []\n    new_dist = {}\n    for n in adj[start]:\n        heapq.heappush(to_visit, (distances[start, n], n))\n    while to_visit:\n        d, next_node = heapq.heappop(to_visit)\n        if next_node not in new_dist:\n            new_dist[next_node] = d\n        for n in adj[next_node]:\n            if n not in new_dist:\n                heapq.heappush(to_visit, (d + distances[next_node, n], n))\n    return new_dist", "docstring": "Given an undirected adjacency list and a pairwise distance matrix between\nall nodes: calculates distances along graph from start node.\n\nArgs:\nstart (int): start node\nedges (list): adjacency list of tuples\ndistances (array): 2d array of distances between nodes\n\nReturns:\ndict of node to distance from start", "source": "juraj-google-style"}
{"code": "def extract_grid(self, longmin, longmax, latmin, latmax):\n    (sample_min, sample_max) = map(int, map(self.sample_id, [longmin, longmax]))\n    (line_min, line_max) = map(int, map(self.line_id, [latmax, latmin]))\n    X = np.array(map(self.long_id, range(sample_min, sample_max, 1)))\n    Y = np.array(map(self.lat_id, range(line_min, (line_max + 1), 1)))\n    for (i, line) in enumerate(range(int(line_min), (int(line_max) + 1))):\n        start = (((line - 1) * int(self.SAMPLE_LAST_PIXEL)) + sample_min)\n        chunk_size = int((sample_max - sample_min))\n        Za = self.array(chunk_size, start, self.bytesize)\n        if (i == 0):\n            Z = Za\n        else:\n            Z = np.vstack((Z, Za))\n    (X, Y) = np.meshgrid(X, Y)\n    return (X, Y, Z)", "docstring": "Extract part of the image ``img``\n\nArgs:\nlongmin (float): Minimum longitude of the window\nlongmax (float): Maximum longitude of the window\nlatmin (float): Minimum latitude of the window\nlatmax (float): Maximum latitude of the window\n\nReturns:\nA tupple of three arrays ``(X,Y,Z)`` with ``X`` contains the\nlongitudes, ``Y`` contains the latitude and ``Z`` the values\nextracted from the window.\n\nNote:\nAll return arrays have the same size.\n\nAll coordinate are in degree.", "source": "codesearchnet"}
{"code": "def _install_signal_handler(self, signal_number, signal_name):\n    old_signal_handler = None\n\n    def handler(handled_signal_number, frame):\n        signal.signal(signal_number, signal.SIG_DFL)\n        sys.stderr.write(('TensorBoard caught %s; exiting...\\n' % signal_name))\n        if (old_signal_handler not in (signal.SIG_IGN, signal.SIG_DFL)):\n            old_signal_handler(handled_signal_number, frame)\n        sys.exit(0)\n    old_signal_handler = signal.signal(signal_number, handler)", "docstring": "Set a signal handler to gracefully exit on the given signal.\n\nWhen this process receives the given signal, it will run `atexit`\nhandlers and then exit with `0`.\n\nArgs:\nsignal_number: The numeric code for the signal to handle, like\n`signal.SIGTERM`.\nsignal_name: The human-readable signal name.", "source": "codesearchnet"}
{"code": "def print_terminal_table(headers, data_list, parse_row_fn):\n    data_iter = iter(data_list)\n    try:\n        example = next(data_iter)\n        example_row = parse_row_fn(example)\n        data_iter = itertools.chain([example], data_iter)\n    except StopIteration:\n        example_row = ([''] * len(headers))\n    format_string = format_terminal_row(headers, example_row)\n    top_row = format_string.format(*headers)\n    print((top_row[0:(- 3)] if top_row.endswith('...') else top_row))\n    for data in data_iter:\n        print(format_string.format(*parse_row_fn(data)))", "docstring": "Uses a set of headers, raw data, and a row parsing function, to print\ndata to the terminal in a table of rows and columns.\n\nArgs:\nheaders (tuple of strings): The headers for each column of data\ndata_list (list of dicts): Raw response data from the validator\nparse_row_fn (function): Parses a dict of data into a tuple of columns\nExpected args:\ndata (dict): A single response object from the validator\nExpected return:\ncols (tuple): The properties to display in each column", "source": "codesearchnet"}
{"code": "def send_raw_tx(self, serialized_tx, id=None, endpoint=None):\n        \n        return self._call_endpoint(SEND_TX, params=[serialized_tx], id=id, endpoint=endpoint)", "docstring": "Submits a serialized tx to the network\nArgs:\nserialized_tx: (str) a hexlified string of a transaction\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\nReturns:\nbool: whether the tx was accepted or not", "source": "juraj-google-style"}
{"code": "def GetStatusInformation(self):\n    status = processing_status.TasksStatus()\n    with self._lock:\n        status.number_of_abandoned_tasks = len(self._tasks_abandoned)\n        status.number_of_queued_tasks = len(self._tasks_queued)\n        status.number_of_tasks_pending_merge = (len(self._tasks_pending_merge) + len(self._tasks_merging))\n        status.number_of_tasks_processing = len(self._tasks_processing)\n        status.total_number_of_tasks = self._total_number_of_tasks\n    return status", "docstring": "Retrieves status information about the tasks.\n\nReturns:\nTasksStatus: tasks status information.", "source": "codesearchnet"}
{"code": "def round_f1_macro(y_true, y_predicted):\n    try:\n        predictions = [np.round(x) for x in y_predicted]\n    except TypeError:\n        predictions = y_predicted\n    return f1_score(np.array(y_true), np.array(predictions), average='macro')", "docstring": "Calculates F1 macro measure.\n\nArgs:\ny_true: list of true values\ny_predicted: list of predicted values\n\nReturns:\nF1 score", "source": "codesearchnet"}
{"code": "def sheets_write(config, auth, sheet_url_or_name, sheet_tab, sheet_range, data, append=False, valueInputOption='RAW'):\n    if config.verbose:\n        print('SHEETS WRITE', sheet_url_or_name, sheet_tab, sheet_range)\n    sheet_id = sheets_id(config, auth, sheet_url_or_name)\n    range = sheets_tab_range(sheet_tab, sheet_range)\n    body = {'values': list(data)}\n    if append:\n        API_Sheets(config, auth).spreadsheets().values().append(spreadsheetId=sheet_id, range=range, body=body, valueInputOption=valueInputOption, insertDataOption='OVERWRITE').execute()\n    else:\n        API_Sheets(config, auth).spreadsheets().values().update(spreadsheetId=sheet_id, range=range, body=body, valueInputOption=valueInputOption).execute()", "docstring": "Write to sheets for specified range.\n\nArgs:\nconfig - see starthinker/util/configuration.py\nauth - user or service\nsheet_url_or_name - one of: URL, document title, or id\nsheet_tab - name of tab to get id for\nsheet_range - A1 notation or blank if whole sheet\ndata - list of lists representing rows.\nappend - if true, data will be added after last row with data.\nvalueInputOption - see APi docs.\n\nNo Return", "source": "github-repos"}
{"code": "def SyncSleep(delay, name=None):\n    return examples_sync_sleep(delay=delay, name=name)", "docstring": "Pause for `delay` seconds (which need not be an integer).\n\nThis is a synchronous (blocking) version of a sleep op. It's purpose is\nto be contrasted with Examples>AsyncSleep.\n\nArgs:\ndelay: tf.Tensor which is a scalar of type float.\nname: An optional name for the op.\n\nReturns:\nThe `delay` value.", "source": "github-repos"}
{"code": "def remove_all(self, filter, force=False, timeout=(- 1)):\n    return self._client.delete_all(filter=filter, force=force, timeout=timeout)", "docstring": "Deletes the set of datacenters according to the specified parameters. A filter is required to identify the set\nof resources to be deleted.\n\nArgs:\nfilter:\nA general filter/query string to narrow the list of items that will be removed.\nforce:\nIf set to true, the operation completes despite any problems with\nnetwork connectivity or errors on the resource itself. The default is false.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\nbool: operation success", "source": "codesearchnet"}
{"code": "def get_pixel_coordinates(self, point, ccdnum):\n        \n        hdulist_index = self.get_hdulist_idx(ccdnum)\n        if isinstance(point[0], Quantity) and isinstance(point[1], Quantity):\n            pix_point = point[0].value, point[1].value\n        else:\n            pix_point = point\n        if self.reading.inverted:\n            pix_point = self.reading.obs.naxis1 - pix_point[0] +1 , self.reading.obs.naxis2 - pix_point[1] + 1\n\n        (x, y) = self.hdulist[hdulist_index].converter.convert(pix_point)\n        return x, y, hdulist_index", "docstring": "Retrieves the pixel location of a point within the current HDUList given the\nlocation in the original FITS image.  This takes into account that\nthe image may be a cutout of a larger original.\n\nArgs:\npoint: tuple(float, float)\n(x, y) in original.\n\nReturns:\n(x, y) pixel in this image.\n@param extno: the extno from the original Mosaic that the x/y coordinates are from.", "source": "juraj-google-style"}
{"code": "def get_folder_items(self, folder_id, limit=100, offset=0, fields_list=None):\n    qs = {'limit': limit, 'offset': offset}\n    if fields_list:\n        qs['fields'] = ','.join(fields_list)\n    return self.__request('GET', ('folders/%s/items' % (folder_id,)), querystring=qs)", "docstring": "Get files and folders inside a given folder\n\nArgs:\nfolder_id (int): Where to get files and folders info.\n\nlimit (int): The number of items to return.\n\noffset (int): The item at which to begin the response.\n\nfields_list (list): List of attributes to get. All attributes if None.\n\nReturns:\ndict. Response from Box.\n\nRaises:\nBoxError: An error response is returned from Box (status_code >= 400).\n\nBoxHttpResponseError: Response from Box is malformed.\n\nrequests.exceptions.*: Any connection related problem.", "source": "codesearchnet"}
{"code": "def loadfn(fname):\n    \n    if (fnmatch(fname, \"*POSCAR*\") or fnmatch(fname, \"*CONTCAR*\") or\n            \".cif\" in fname.lower()) or fnmatch(fname, \"*.vasp\"):\n        return Structure.from_file(fname)\n    elif fnmatch(fname, \"*vasprun*\"):\n        from pymatgen.io.vasp import Vasprun\n        return Vasprun(fname)\n    elif fnmatch(fname, \"*.json*\"):\n        from monty.serialization import loadfn\n        return loadfn(fname)", "docstring": "Convenience method to perform quick loading of data from a filename. The\ntype of object returned depends the file type.\n\nArgs:\nfname (string): A filename.\n\nReturns:\nNote that fname is matched using unix-style, i.e., fnmatch.\n(Structure) if *POSCAR*/*CONTCAR*/*.cif\n(Vasprun) *vasprun*\n(obj) if *json* (passthrough to monty.serialization.loadfn)", "source": "juraj-google-style"}
{"code": "def __init__(self, scope, parent, name):\n        \n        CodeControlFlow.__init__(self, scope, parent, name)\n        self.declarations = None\n        self.increment = None", "docstring": "Constructor for loops.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.\nname (str): The name of the loop statement in the program.", "source": "juraj-google-style"}
{"code": "def get_unique_families(hkls):\n\n    def is_perm(hkl1, hkl2):\n        h1 = np.abs(hkl1)\n        h2 = np.abs(hkl2)\n        return all([(i == j) for (i, j) in zip(sorted(h1), sorted(h2))])\n    unique = collections.defaultdict(list)\n    for hkl1 in hkls:\n        found = False\n        for hkl2 in unique.keys():\n            if is_perm(hkl1, hkl2):\n                found = True\n                unique[hkl2].append(hkl1)\n                break\n        if (not found):\n            unique[hkl1].append(hkl1)\n    pretty_unique = {}\n    for (k, v) in unique.items():\n        pretty_unique[sorted(v)[(- 1)]] = len(v)\n    return pretty_unique", "docstring": "Returns unique families of Miller indices. Families must be permutations\nof each other.\n\nArgs:\nhkls ([h, k, l]): List of Miller indices.\n\nReturns:\n{hkl: multiplicity}: A dict with unique hkl and multiplicity.", "source": "codesearchnet"}
{"code": "def convert(self, y):\n    if y is None:\n        return None\n    if isinstance(y, sparse_tensor.SparseTensor):\n        return self._convert_sparse(y)\n    assert isinstance(y, (tensor_lib.Tensor, ops.Operation)), y\n    output = self._convert_helper(y)\n    if isinstance(output, WrappedTensor):\n        assert isinstance(y, tensor_lib.Tensor)\n        return self._unwrap_or_tile(output)\n    else:\n        assert isinstance(y, ops.Operation)\n        assert not y.outputs\n        assert isinstance(output, ops.Operation)\n    return output", "docstring": "Returns the converted value corresponding to y.\n\nArgs:\ny: A Tensor or a ops.Operation object. If latter, y should not have\nany outputs.\n\nReturns:\nIf y does not need to be converted, it returns y as is. Else it returns\nthe \"converted value\" corresponding to y.", "source": "github-repos"}
{"code": "def table(text):\n    \n\n    def table_bar(col_lengths):\n        return \"+-%s-+%s\" % (\n            \"-+-\".join([\"-\" * length for length in col_lengths]),\n            os.linesep,\n        )\n\n    rows = []\n    for line in text.splitlines():\n        rows.append([part.strip() for part in line.split(\"|\")])\n    max_cols = max(map(len, rows))\n    col_lengths = [0] * max_cols\n    for row in rows:\n        cols = len(row)\n        if cols < max_cols:\n            row.extend([\"\"] * (max_cols - cols))\n        for i, col in enumerate(row):\n            col_length = len(col)\n            if col_length > col_lengths[i]:\n                col_lengths[i] = col_length\n    text = table_bar(col_lengths)\n    for i, row in enumerate(rows):\n        cols = []\n        for i, col in enumerate(row):\n            cols.append(col.ljust(col_lengths[i]))\n        text += \"| %s |%s\" % (\" | \".join(cols), os.linesep)\n        text += table_bar(col_lengths)\n    return text", "docstring": "Format the text as a table.\n\nText in format:\n\nfirst | second\nrow 2 col 1 | 4\n\nWill be formatted as::\n\n+-------------+--------+\n| first       | second |\n+-------------+--------+\n| row 2 col 1 | 4      |\n+-------------+--------+\n\nArgs:\ntext (str): Text that needs to be formatted.\n\nReturns:\nstr: Formatted string.", "source": "juraj-google-style"}
{"code": "def ParseRow(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = AndroidWebViewCacheEventData()\n    event_data.content_length = self._GetRowValue(\n        query_hash, row, 'contentlength')\n    event_data.query = query\n    event_data.url = self._GetRowValue(query_hash, row, 'url')\n\n    timestamp = self._GetRowValue(query_hash, row, 'expires')\n    if timestamp is not None:\n      date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_EXPIRATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'lastmodify')\n    if timestamp is not None:\n      date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a row from the database.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def alternatives(self, Class=None, set=None):\n    for e in self.select(AlternativeLayers, None, True, ['Original', 'Suggestion']):\n        if (Class is None):\n            (yield e)\n        elif (len(e) >= 1):\n            for e2 in e:\n                try:\n                    if isinstance(e2, Class):\n                        try:\n                            if ((set is None) or (e2.set == set)):\n                                (yield e)\n                                break\n                        except AttributeError:\n                            continue\n                except AttributeError:\n                    continue", "docstring": "Generator over alternatives, either all or only of a specific annotation type, and possibly restrained also by set.\n\nArguments:\n* ``Class`` - The Class you want to retrieve (e.g. PosAnnotation). Or set to None to select all alternatives regardless of what type they are.\n* ``set``   - The set you want to retrieve (defaults to None, which selects irregardless of set)\n\nReturns:\nGenerator over Alternative elements", "source": "codesearchnet"}
{"code": "def entropy(rho: Density, base: float = None) -> float:\n    \n    op = asarray(rho.asoperator())\n    probs = np.linalg.eigvalsh(op)\n    probs = np.maximum(probs, 0.0)  \n    return scipy.stats.entropy(probs, base=base)", "docstring": "Returns the von-Neumann entropy of a mixed quantum state.\n\nArgs:\nrho:    A density matrix\nbase:   Optional logarithm base. Default is base e, and entropy is\nmeasures in nats. For bits set base to 2.\n\nReturns:\nThe von-Neumann entropy of rho", "source": "juraj-google-style"}
{"code": "def _trackable_needs_to_be_saved(obj):\n    if hasattr(obj, '__dict__'):\n        if '_serialize_to_tensors' in obj.__dict__ or '_gather_saveables_for_checkpoint' in obj.__dict__ or '_copy_trackable_to_cpu' in obj.__dict__:\n            return True\n    for t in type(obj).mro():\n        if t is base.Trackable:\n            continue\n        elif '_serialize_to_tensors' in t.__dict__ or '_gather_saveables_for_checkpoint' in t.__dict__ or '_copy_trackable_to_cpu' in t.__dict__:\n            return True\n    return False", "docstring": "Returns whether a trackable needs to be saved.\n\nReturns a bool to indicate whether obj's class has `_serialize_to_tensors`,\n`gather_saveables_for_checkpoint`, or `_copy_trackable_to_cpu` defined.\n\nArgs:\nobj: A Trackable object.", "source": "github-repos"}
{"code": "def verify_manylinux_compliance(auditwheel_log: str, compliance_tag: str) -> None:\n    regex = 'following platform tag:\\\\s+\"{}\"'.format(compliance_tag)\n    alt_regex = regex.replace('2014', '_2_17')\n    if not (re.search(regex, auditwheel_log) or re.search(alt_regex, auditwheel_log)):\n        raise RuntimeError('The wheel is not compliant with the tag {tag}.\\n{result}'.format(tag=compliance_tag, result=auditwheel_log))", "docstring": "Verify manylinux compliance.\n\nArgs:\nauditwheel_log: \"auditwheel show\" execution results\ncompliance_tag: manyLinux compliance tag\n\nRaises:\nRuntimeError: if the wheel is not manyLinux compliant.", "source": "github-repos"}
{"code": "def add_asset(self, asset, asset_name, asset_type):\n        \n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        if asset == 'PHONE':\n            return self.tc_requests.add_victim_phone_asset(self.unique_id, asset_name)\n        if asset == 'EMAIL':\n            return self.tc_requests.add_victim_email_asset(self.unique_id, asset_name, asset_type)\n        if asset == 'NETWORK':\n            return self.tc_requests.add_victim_network_asset(self.unique_id, asset_name, asset_type)\n        if asset == 'SOCIAL':\n            return self.tc_requests.add_victim_social_asset(self.unique_id, asset_name, asset_type)\n        if asset == 'WEB':\n            return self.tc_requests.add_victim_web_asset(self.unique_id, asset_name)\n        self._tcex.handle_error(\n            925, ['asset_type', 'add_asset', 'asset_type', 'asset_type', asset_type]\n        )\n        return None", "docstring": "Adds a asset to the Victim\n\nValid asset_type:\n+ PHONE\n+ EMAIL\n+ NETWORK\n+ SOCIAL\n+ WEB\n\nArgs:\nasset:\nasset_name:\nasset_type: PHONE, EMAIL, NETWORK, SOCIAL, or WEB\n\nReturns:", "source": "juraj-google-style"}
{"code": "def compatible_with(value, logical_value):\n    if isinstance(value, abstract.List) and (not value.is_concrete):\n        return True\n    elif isinstance(value, abstract.Dict) and (not value.is_concrete):\n        return not logical_value or bool(value.get_instance_type_parameter(abstract_utils.K).bindings)\n    elif isinstance(value, abstract.LazyConcreteDict):\n        return value.is_empty() != logical_value\n    elif isinstance(value, abstract.PythonConstant):\n        return bool(value.pyval) == logical_value\n    elif isinstance(value, abstract.Instance):\n        name = value.full_name\n        if logical_value and name in _CONTAINER_NAMES:\n            ret = value.has_instance_type_parameter(abstract_utils.T) and bool(value.get_instance_type_parameter(abstract_utils.T).bindings)\n            return ret\n        elif name == 'builtins.NoneType':\n            return not logical_value\n        elif name in NUMERIC:\n            return True\n        elif isinstance(value.cls, abstract.Class) and (not value.cls.overrides_bool):\n            if getattr(value.cls, 'template', None):\n                return True\n            return logical_value\n        return True\n    elif isinstance(value, (abstract.Function, abstract.Class)):\n        return logical_value\n    else:\n        return True", "docstring": "Returns the conditions under which the value could be True or False.\n\nArgs:\nvalue: An abstract value.\nlogical_value: Either True or False.\n\nReturns:\nFalse: If the value could not evaluate to logical_value under any\ncircumstance (e.g. value is the empty list and logical_value is True).\nTrue: If it is possible for the value to evaluate to the logical_value,\nand any ambiguity cannot be resolved by additional bindings.", "source": "github-repos"}
{"code": "def confirm(statement):\n    prompt = '{statement} [y/n]'.format(statement=statement)\n    answer = _ask(prompt, limited_to=['yes', 'no', 'y', 'n'])\n    return (answer and answer.startswith('y'))", "docstring": "Ask the user for confirmation about the specified statement.\n\nArgs:\nstatement (unicode): statement to ask the user confirmation about.\n\nReturns:\nbool: whether or not specified statement was confirmed.", "source": "codesearchnet"}
{"code": "def normalize_cluster_spec(cluster_spec):\n    if isinstance(cluster_spec, (dict, cluster_pb2.ClusterDef)):\n        return server_lib.ClusterSpec(cluster_spec)\n    elif not isinstance(cluster_spec, server_lib.ClusterSpec):\n        raise ValueError(\"`cluster_spec' should be dict or a `tf.train.ClusterSpec` or a `tf.train.ClusterDef` object\")\n    return cluster_spec", "docstring": "Makes `cluster_spec` into a `ClusterSpec` object.\n\nArgs:\ncluster_spec: a dict, ClusterDef or ClusterSpec object specifying the\ncluster configurations.\n\nReturns:\na `ClusterSpec` object.\n\nRaises:\nValueError: if `cluster_spec` is not a dict or a `ClusterSpec` or a\n`ClusterDef`.", "source": "github-repos"}
{"code": "def patch_addContext(self, patch, text):\n    \n    if len(text) == 0:\n      return\n    pattern = text[patch.start2 : patch.start2 + patch.length1]\n    padding = 0\n\n    \n    \n    while (text.find(pattern) != text.rfind(pattern) and (self.Match_MaxBits ==\n        0 or len(pattern) < self.Match_MaxBits - self.Patch_Margin -\n        self.Patch_Margin)):\n      padding += self.Patch_Margin\n      pattern = text[max(0, patch.start2 - padding) :\n                     patch.start2 + patch.length1 + padding]\n    \n    padding += self.Patch_Margin\n\n    \n    prefix = text[max(0, patch.start2 - padding) : patch.start2]\n    if prefix:\n      patch.diffs[:0] = [(self.DIFF_EQUAL, prefix)]\n    \n    suffix = text[patch.start2 + patch.length1 :\n                  patch.start2 + patch.length1 + padding]\n    if suffix:\n      patch.diffs.append((self.DIFF_EQUAL, suffix))\n\n    \n    patch.start1 -= len(prefix)\n    patch.start2 -= len(prefix)\n    \n    patch.length1 += len(prefix) + len(suffix)\n    patch.length2 += len(prefix) + len(suffix)", "docstring": "Increase the context until it is unique,\nbut don't let the pattern expand beyond Match_MaxBits.\n\nArgs:\npatch: The patch to grow.\ntext: Source text.", "source": "juraj-google-style"}
{"code": "def _ParseFile(self, file_obj, line_parser):\n    \n    lines = [\n        l.strip() for l in utils.ReadFileBytesAsUnicode(file_obj).splitlines()\n    ]\n    try:\n      for index, line in enumerate(lines):\n        if line:\n          line_parser(line)\n    except (IndexError, KeyError) as e:\n      raise parser.ParseError(\"Invalid file at line %d: %s\" % (index + 1, e))", "docstring": "Process a file line by line.\n\nArgs:\nfile_obj: The file to parse.\nline_parser: The parser method used to process and store line content.\n\nRaises:\nparser.ParseError if the parser is unable to process the line.", "source": "juraj-google-style"}
{"code": "def parse_json_path(self, jsonpath):\n    if (jsonpath not in self.parsed):\n        try:\n            self.parsed[jsonpath] = self.parser(jsonpath)\n        except Exception:\n            self.log(('Invalid Json Path: ' + jsonpath), 'error')\n            raise InvalidJsonPathError('Invalid Json Path')\n    return self.parsed[jsonpath]", "docstring": "Parse a jsonpath\n\nArgs:\njsonpath: str\n\nReturns: a parsed json path", "source": "codesearchnet"}
{"code": "def GetContainingXLAContext(ctxt):\n    while ctxt:\n        if ctxt.IsXLAContext():\n            return ctxt\n        ctxt = ctxt.outer_context\n    return None", "docstring": "Returns the first ancestor XLAContext of `ctxt`.\n\nReturns `ctxt` if `ctxt` is a XLAContext, or None if `ctxt` is not in a\nwhile loop.\n\nArgs:\nctxt: ControlFlowContext\n\nReturns:\n`ctxt` if `ctxt` is a XLAContext, the most nested XLAContext containing\n`ctxt`, or None if `ctxt` is not in a while loop.", "source": "github-repos"}
{"code": "def format(obj, options):\n    formatters = {float_types: (lambda x: '{:.{}g}'.format(x, options.digits))}\n    for (_types, fmtr) in formatters.items():\n        if isinstance(obj, _types):\n            return fmtr(obj)\n    try:\n        if (six.PY2 and isinstance(obj, six.string_types)):\n            return str(obj.encode('utf-8'))\n        return str(obj)\n    except:\n        return 'OBJECT'", "docstring": "Return a string representation of the Python object\n\nArgs:\nobj: The Python object\noptions: Format options", "source": "codesearchnet"}
{"code": "def _collect_process_tree(starting_pid):\n    ret = []\n    stack = [starting_pid]\n    while stack:\n        pid = stack.pop()\n        if platform.system() == 'Darwin':\n            command = ['pgrep', '-P', str(pid)]\n        else:\n            command = ['ps', '-o', 'pid', '--ppid', str(pid), '--noheaders']\n        try:\n            ps_results = subprocess.check_output(command).decode().strip()\n        except subprocess.CalledProcessError:\n            continue\n        children_pid_list = [int(p.strip()) for p in ps_results.split('\\n')]\n        stack.extend(children_pid_list)\n        ret.extend(children_pid_list)\n    return ret", "docstring": "Collects PID list of the descendant processes from the given PID.\n\nThis function only available on Unix like system.\n\nArgs:\nstarting_pid: The PID to start recursively traverse.\n\nReturns:\nA list of pid of the descendant processes.", "source": "github-repos"}
{"code": "class DepthAnythingReassembleStage(nn.Module):\n\n    def __init__(self, config):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList()\n        for channels, factor in zip(config.neck_hidden_sizes, config.reassemble_factors):\n            self.layers.append(DepthAnythingReassembleLayer(config, channels=channels, factor=factor))\n\n    def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]:\n        \n        out = []\n        for i, hidden_state in enumerate(hidden_states):\n            hidden_state = hidden_state[:, 1:]\n            batch_size, _, num_channels = hidden_state.shape\n            hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels)\n            hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()\n            hidden_state = self.layers[i](hidden_state)\n            out.append(hidden_state)\n        return out", "docstring": "This class reassembles the hidden states of the backbone into image-like feature representations at various\nresolutions.\n\nThis happens in 3 stages:\n1. Take the patch embeddings and reshape them to image-like feature representations.\n2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`.\n3. Resizing the spatial dimensions (height, width).\n\nArgs:\nconfig (`[DepthAnythingConfig]`):\nModel configuration class defining the model architecture.", "source": "github-repos"}
{"code": "def client_credentials(self, client_id, client_secret, audience, grant_type='client_credentials'):\n    return self.post('https:", "docstring": "Client credentials grant\n\nThis is the OAuth 2.0 grant that server processes utilize in\norder to access an API. Use this endpoint to directly request\nan access_token by using the Application Credentials (a Client Id and\na Client Secret).\n\nArgs:\ngrant_type (str): Denotes the flow you're using. For client credentials\nuse client_credentials\n\nclient_id (str): your application's client Id\n\nclient_secret (str): your application's client Secret\n\naudience (str): The unique identifier of the target API you want to access.\n\nReturns:\naccess_token", "source": "codesearchnet"}
{"code": "def user_bounded_trie(namespace, name, metric, ptransform=None):\n    labels = create_labels(ptransform=ptransform, namespace=namespace, name=name)\n    return create_monitoring_info(USER_BOUNDED_TRIE_URN, BOUNDED_TRIE_TYPE, metric.to_proto().SerializeToString(), labels)", "docstring": "Return the string set monitoring info for the URN, metric and labels.\n\nArgs:\nnamespace: User-defined namespace of BoundedTrie.\nname: Name of BoundedTrie.\nmetric: The BoundedTrieData representing the metrics.\nptransform: The ptransform id used as a label.", "source": "github-repos"}
{"code": "def _ParseFSMVariables(self, template):\n    self.values = []\n    for line in template:\n        self._line_num += 1\n        line = line.rstrip()\n        if (not line):\n            return\n        if self.comment_regex.match(line):\n            continue\n        if line.startswith('Value '):\n            try:\n                value = TextFSMValue(fsm=self, max_name_len=self.MAX_NAME_LEN, options_class=self._options_cls)\n                value.Parse(line)\n            except TextFSMTemplateError as error:\n                raise TextFSMTemplateError(('%s Line %s.' % (error, self._line_num)))\n            if (value.name in self.header):\n                raise TextFSMTemplateError((\"Duplicate declarations for Value '%s'. Line: %s.\" % (value.name, self._line_num)))\n            try:\n                self._ValidateOptions(value)\n            except TextFSMTemplateError as error:\n                raise TextFSMTemplateError(('%s Line %s.' % (error, self._line_num)))\n            self.values.append(value)\n            self.value_map[value.name] = value.template\n        elif (not self.values):\n            raise TextFSMTemplateError('No Value definitions found.')\n        else:\n            raise TextFSMTemplateError(('Expected blank line after last Value entry. Line: %s.' % self._line_num))", "docstring": "Extracts Variables from start of template file.\n\nValues are expected as a contiguous block at the head of the file.\nThese will be line separated from the State definitions that follow.\n\nArgs:\ntemplate: Valid template file, with Value definitions at the top.\n\nRaises:\nTextFSMTemplateError: If syntax or semantic errors are found.", "source": "codesearchnet"}
{"code": "def assert_no_title(self, title, **kwargs):\n        \n\n        query = TitleQuery(title, **kwargs)\n\n        @self.synchronize(wait=query.wait)\n        def assert_no_title():\n            if query.resolves_for(self):\n                raise ExpectationNotMet(query.negative_failure_message)\n\n            return True\n\n        return assert_no_title()", "docstring": "Asserts that the page doesn't have the given title.\n\nArgs:\ntitle (str | RegexObject): The string that the title should include.\n**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.\n\nReturns:\nTrue\n\nRaises:\nExpectationNotMet: If the assertion hasn't succeeded during the wait time.", "source": "juraj-google-style"}
{"code": "def get_nc_attrs(nc):\n    \n\n    meta = {\n        'experiment': nc.experiment_id,\n        'frequency': nc.frequency,\n        'institute': nc.institute_id,\n        'model': nc.model_id,\n        'modeling_realm': nc.modeling_realm,\n        'ensemble_member': 'r{}i{}p{}'.format(nc.realization, nc.initialization_method, nc.physics_version),\n    }\n\n    variable_name = get_var_name(nc)\n    if variable_name:\n        meta.update({'variable_name': variable_name})\n\n    return meta", "docstring": "Gets netCDF file metadata attributes.\n\nArguments:\nnc (netCDF4.Dataset): an open NetCDF4 Dataset to pull attributes from.\n\nReturns:\ndict: Metadata as extracted from the netCDF file.", "source": "juraj-google-style"}
{"code": "def allconcat(self, x, mesh_axis, concat_axis, stack=False):\n    \n    x = x.to_laid_out_tensor()\n    coord = self.laid_out_pcoord(mesh_axis)\n    t = x.one_slice\n    old_shape = t.shape.as_list()\n    num_parts = self.shape[mesh_axis].size\n    t = tf.expand_dims(t, concat_axis)\n    t *= tf.reshape(\n        tf.one_hot(coord.one_slice, num_parts, dtype=t.dtype),\n        [num_parts if i == concat_axis else 1\n         for i in xrange(len(old_shape) + 1)])\n    if not stack:\n      new_shape = old_shape[:]\n      new_shape[concat_axis] *= num_parts\n      t = tf.reshape(t, new_shape)\n    return self.allreduce(self.LaidOutTensor([t]), [mesh_axis], \"SUM\")", "docstring": "Grouped allconcat (like MPI allgather followed by concat).\n\nTODO(noam): inefficient - replace with a XLA allconcat when available\n\nArgs:\nx: a LaidOutTensor\nmesh_axis: an integer - the mesh axis along which to group\nconcat_axis: an integer (the Tensor axis along which to concatenate)\nstack: a boolean - whether to stack instead of concat\nReturns:\na LaidOutTensor", "source": "juraj-google-style"}
{"code": "def processPhoneList(platformNames=[], numbers=[], excludePlatformNames=[]):\n    \n    \n    platforms = platform_selection.getPlatformsByName(platformNames, mode=\"phonefy\", excludePlatformNames=excludePlatformNames)\n\n    results = []\n    for num in numbers:\n        for pla in platforms:\n            \n            entities = pla.getInfo(query=num, process=True, mode=\"phonefy\")\n            if entities != {}:\n                results+=json.loads(entities)\n    return results", "docstring": "Method to perform searchs on a series of numbers.\n\nArgs:\n-----\nplatformNames: List of names of the platforms.\nnumbers: List of numbers to be queried.\nexcludePlatformNames: A list of platforms not to be searched.\n\nReturn:\n-------\nA list of verified emails.", "source": "juraj-google-style"}
{"code": "def prune_volumes(self, filters=None):\n        \n        params = {}\n        if filters:\n            params['filters'] = utils.convert_filters(filters)\n        url = self._url('/volumes/prune')\n        return self._result(self._post(url, params=params), True)", "docstring": "Delete unused volumes\n\nArgs:\nfilters (dict): Filters to process on the prune list.\n\nReturns:\n(dict): A dict containing a list of deleted volume names and\nthe amount of disk space reclaimed in bytes.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def upload(self, content, content_type, filename=None):\n        \n        try:\n            response = self.api.media_upload(content, content_type, filename)\n            if \"content_uri\" in response:\n                return response[\"content_uri\"]\n            else:\n                raise MatrixUnexpectedResponse(\n                    \"The upload was successful, but content_uri wasn't found.\"\n                )\n        except MatrixRequestError as e:\n            raise MatrixRequestError(\n                code=e.code,\n                content=\"Upload failed: %s\" % e\n            )", "docstring": "Upload content to the home server and recieve a MXC url.\n\nArgs:\ncontent (bytes): The data of the content.\ncontent_type (str): The mimetype of the content.\nfilename (str): Optional. Filename of the content.\n\nRaises:\nMatrixUnexpectedResponse: If the homeserver gave a strange response\nMatrixRequestError: If the upload failed for some reason.", "source": "juraj-google-style"}
{"code": "def output_classes(self):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_classes(), self._element_spec)", "docstring": "Returns the class of each component of an element of this iterator.\n\nThe expected values are `tf.Tensor` and `tf.SparseTensor`.\n\nReturns:\nA nested structure of Python `type` objects corresponding to each\ncomponent of an element of this dataset.", "source": "github-repos"}
{"code": "def _GetTfRecordEntries(self, path, max_entries, is_sequence, iterator_options):\n    return self._GetEntries([path], max_entries, partial(tf.python_io.tf_record_iterator, options=iterator_options), is_sequence)", "docstring": "Extracts TFRecord examples into a dictionary of feature values.\n\nArgs:\npath: The path to the TFRecord file(s).\nmax_entries: The maximum number of examples to load.\nis_sequence: True if the input data from 'path' are tf.SequenceExamples,\nFalse if tf.Examples. Defaults to false.\niterator_options: Options to pass to the iterator that reads the examples.\nDefaults to None.\n\nReturns:\nA tuple with two elements:\n- A dictionary of all features parsed thus far and arrays of their\nvalues.\n- The number of examples parsed.", "source": "codesearchnet"}
{"code": "def batch_shape(self):\n    return self.shape[:-2]", "docstring": "`TensorShape` of batch dimensions of this `LinearOperator`.\n\nIf this operator acts like the batch matrix `A` with\n`A.shape = [B1,...,Bb, M, N]`, then this returns\n`TensorShape([B1,...,Bb])`, equivalent to `A.shape[:-2]`\n\nReturns:\n`TensorShape`, statically determined, may be undefined.", "source": "github-repos"}
{"code": "def safe_call(request: Request, methods: Methods, *, debug: bool) -> Response:\n    with handle_exceptions(request, debug) as handler:\n        result = call(methods.items[request.method], *request.args, **request.kwargs)\n        handler.response = SuccessResponse(result=result, id=request.id)\n    return handler.response", "docstring": "Call a Request, catching exceptions to ensure we always return a Response.\n\nArgs:\nrequest: The Request object.\nmethods: The list of methods that can be called.\ndebug: Include more information in error responses.\n\nReturns:\nA Response object.", "source": "codesearchnet"}
{"code": "def angular_templates(context):\n    template_paths = context['HORIZON_CONFIG']['external_templates']\n    all_theme_static_files = context['HORIZON_CONFIG']['theme_static_files']\n    this_theme_static_files = all_theme_static_files[context['THEME']]\n    template_overrides = this_theme_static_files['template_overrides']\n    angular_templates = {}\n    for relative_path in template_paths:\n        template_static_path = (context['STATIC_URL'] + relative_path)\n        if (relative_path in template_overrides):\n            relative_path = template_overrides[relative_path]\n        result = []\n        for finder in finders.get_finders():\n            result.extend(finder.find(relative_path, True))\n        path = result[(- 1)]\n        try:\n            if six.PY3:\n                with open(path, encoding='utf-8') as template_file:\n                    angular_templates[template_static_path] = template_file.read()\n            else:\n                with open(path) as template_file:\n                    angular_templates[template_static_path] = template_file.read()\n        except (OSError, IOError):\n            pass\n    templates = [(key, value) for (key, value) in angular_templates.items()]\n    templates.sort(key=(lambda item: item[0]))\n    return {'angular_templates': templates}", "docstring": "Generate a dictionary of template contents for all static HTML templates.\n\nIf the template has been overridden by a theme, load the\noverride contents instead of the original HTML file.\nOne use for this is to pre-populate the angular template cache.\n\nArgs:\ncontext: the context of the current Django template\n\nReturns: an object containing\nangular_templates: dictionary of angular template contents\n- key is the template's static path,\n- value is a string of HTML template contents", "source": "codesearchnet"}
{"code": "def from_shape(cls, ragged_shape: dynamic_ragged_shape.DynamicRaggedShape) -> 'StructuredTensor':\n    return StructuredTensor(fields={}, ragged_shape=ragged_shape)", "docstring": "Creates a `StructuredTensor` with no fields and ragged_shape.\n\nArgs:\nragged_shape: the shape of the structured tensor.\n\nReturns:\na StructuredTensor with no fields and ragged_shape.", "source": "github-repos"}
{"code": "def to_b58check(self, testnet=False):\n        \n        b = self.testnet_bytes if testnet else bytes(self)\n        return base58.b58encode_check(b)", "docstring": "Generates a Base58Check encoding of this key.\n\nArgs:\ntestnet (bool): True if the key is to be used with\ntestnet, False otherwise.\nReturns:\nstr: A Base58Check encoded string representing the key.", "source": "juraj-google-style"}
{"code": "def expected_mean_g_value(self, vocab_size: int, coinflip_prob: float=0.5) -> float:\n    return coinflip_prob + coinflip_prob * (1 - coinflip_prob) * (1 - 1 / vocab_size)", "docstring": "Compute expected mean g-value after watermarking, assuming uniform LM dist.\n\nThis is the theoretical expected value for single-layer watermarking.\n\nArgs:\nvocab_size (`int`):\nThe size of the vocabulary.\ncoinflip_prob arg_name (`float`, *optional*, defaults to 0.5):\nProbability of 1 in boolean prf.\n\nReturns:\nThe expected mean g-value for watermarked text.", "source": "github-repos"}
{"code": "def rating(self, value):\n        \n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n        request_data = {'rating': value}\n        return self.tc_requests.update(\n            self.api_type, self.api_sub_type, self.unique_id, request_data, owner=self.owner\n        )", "docstring": "Updates the Indicators rating\n\nArgs:\nvalue:", "source": "juraj-google-style"}
{"code": "def create_ondemand_streaming_locator(access_token, encoded_asset_id, pid, starttime=None):\n    path = '/Locators'\n    endpoint = ''.join([ams_rest_endpoint, path])\n    if (starttime is None):\n        body = (((('{ \\t\\t\\t\"AccessPolicyId\":\"' + pid) + '\", \\t\\t\\t\"AssetId\":\"') + encoded_asset_id) + '\", \\t\\t\\t\"Type\": \"2\"     }')\n    else:\n        body = (((((('{ \\t\\t\\t\"AccessPolicyId\":\"' + pid) + '\", \\t\\t\\t\"AssetId\":\"') + encoded_asset_id) + '\", \\t\\t\\t\"StartTime\":\"') + str(starttime)) + '\", \\t\\t\\t\"Type\": \"2\" \\t\\t}')\n    return do_ams_post(endpoint, path, body, access_token, 'json_only')", "docstring": "Create Media Service OnDemand Streaming Locator.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nencoded_asset_id (str): A Media Service Encoded Asset ID.\npid (str): A Media Service Encoded PID.\nstarttime (str): A Media Service Starttime.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def _keyDown(key):\n    \n    if key not in keyboardMapping or keyboardMapping[key] is None:\n        return\n\n    needsShift = pyautogui.isShiftCharacter(key)\n\n    \n    mods, vkCode = divmod(keyboardMapping[key], 0x100)\n\n    for apply_mod, vk_mod in [(mods & 4, 0x12), (mods & 2, 0x11),\n        (mods & 1 or needsShift, 0x10)]: \n        if apply_mod:\n            ctypes.windll.user32.keybd_event(vk_mod, 0, 0, 0) \n    ctypes.windll.user32.keybd_event(vkCode, 0, 0, 0)\n    for apply_mod, vk_mod in [(mods & 1 or needsShift, 0x10), (mods & 2, 0x11),\n        (mods & 4, 0x12)]: \n        if apply_mod:\n            ctypes.windll.user32.keybd_event(vk_mod, 0, KEYEVENTF_KEYUP, 0)", "docstring": "Performs a keyboard key press without the release. This will put that\nkey in a held down state.\n\nNOTE: For some reason, this does not seem to cause key repeats like would\nhappen if a keyboard key was held down on a text field.\n\nArgs:\nkey (str): The key to be pressed down. The valid names are listed in\npyautogui.KEY_NAMES.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def extension_method(cls, method_name: str) -> Any:\n\n    def decorator(func):\n        sig = pg_typing.signature(func, auto_typing=False, auto_doc=False)\n        try:\n            extension_arg_index = sig.arg_names.index('value') - 1\n        except ValueError as e:\n            raise TypeError(f'View method {func.__name__!r} must have a `value` argument, which represents the target object to render.') from e\n        if sig.varargs is not None:\n            raise TypeError(f'View method must not have variable positional argument. Found `*{sig.varargs.name}` in {func.__name__!r}')\n\n        def get_extension(args: Sequence[Any], kwargs: Dict[str, Any]) -> Any:\n            if 'value' in kwargs:\n                return kwargs['value']\n            if extension_arg_index < len(args):\n                return args[extension_arg_index]\n            raise ValueError(f'No value is provided for the `value` argument for {func.__name__!r}.')\n\n        def map_args(args: Sequence[Any], kwargs: Dict[str, Any]) -> Dict[str, Any]:\n            assert len(args) < len(sig.args), (args, sig.args)\n            kwargs.update({sig.args[i].name: arg for i, arg in enumerate(args) if i != extension_arg_index})\n            kwargs.pop('value', None)\n            return kwargs\n\n        @functools.wraps(func)\n        def _generated_view_fn(self, *args, **kwargs):\n            return self._maybe_dispatch(*args, **kwargs, extension=get_extension(args, kwargs), view_method=func, extension_method_name=method_name, arg_map_fn=map_args)\n        return _generated_view_fn\n    return decorator", "docstring": "Decorator that dispatches a View method to a View.Extension method.\n\nA few things to note:\n1) The View method being decorated must have a `value` argument, based on\nwhich the Extension method will be dispatched.\n2) The View method's `value` argument will map to the Extension method's\n`self` argument.\n3) The Extension method can optionally have a `view` argument, which will\nbe set to the current View class.\n\nArgs:\nmethod_name: The name of the method in the Extension class to dispatch\nfrom current View method.\n\nReturns:\nA decorator that dispatches a View method to a View.Extension method.", "source": "github-repos"}
{"code": "def _wrap_section(source, width):\n    if _get_section('usage', source):\n        return _wrap_usage_section(source, width)\n    if _is_definition_section(source):\n        return _wrap_definition_section(source, width)\n    lines = inspect.cleandoc(source).splitlines()\n    paragraphs = (textwrap.wrap(line, width, replace_whitespace=False) for line in lines)\n    return '\\n'.join((line for paragraph in paragraphs for line in paragraph))", "docstring": "Wrap the given section string to the current terminal size.\n\nIntelligently wraps the section string to the given width. When wrapping\nsection lines, it auto-adjusts the spacing between terms and definitions.\nIt also adjusts commands the fit the correct length for the arguments.\n\nArgs:\nsource: The section string to wrap.\n\nReturns:\nThe wrapped section string.", "source": "codesearchnet"}
{"code": "class Blip2ForConditionalGenerationModelOutput(ModelOutput):\n    loss: Optional[Tuple[torch.FloatTensor]] = None\n    logits: Optional[Tuple[torch.FloatTensor]] = None\n    vision_outputs: Optional[torch.FloatTensor] = None\n    qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None\n    language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None\n\n    def to_tuple(self) -> Tuple[Any]:\n        return tuple((self[k] if k not in ['vision_outputs', 'qformer_outputs', 'language_model_outputs'] else getattr(self, k).to_tuple() for k in self.keys()))", "docstring": "Class defining the outputs of [`Blip2ForConditionalGeneration`].\n\nArgs:\nloss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):\nLanguage modeling loss from the language model.\nlogits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\nPrediction scores of the language modeling head of the language model.\nvision_outputs (`BaseModelOutputWithPooling`):\nOutputs of the vision encoder.\nqformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):\nOutputs of the Q-Former (Querying Transformer).\nlanguage_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):\nOutputs of the language model.", "source": "github-repos"}
{"code": "def _VerifyOneTest(self, pool_func, pool_grad_func, input_sizes, ksize, strides, padding, data_format, pool_grad_grad_func=None):\n    total_size = np.prod(input_sizes)\n    x = np.arange(1, total_size + 1, dtype=np.float32)\n    x *= np.random.randint(2, size=total_size) * 2 - 1\n    x[np.random.choice(total_size)] = np.inf\n    x[np.random.choice(total_size)] = -np.inf\n    x = x.reshape(input_sizes)\n    with self.session() as sess:\n        with ops.device(self.CPU_DEVICE):\n            inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes)\n            outputs = pool_func(inputs, ksize=ksize, strides=strides, padding=padding, data_format='NHWC')\n        output_vals = np.array(sess.run(outputs, {inputs: x}))\n        output_gradient_vals = np.arange(1, output_vals.size + 1, dtype=np.float32)\n        output_gradient_vals = output_gradient_vals.reshape(output_vals.shape)\n        output_grad_grad_vals = np.arange(1, x.size + 1, dtype=np.float32)\n        output_grad_grad_vals = output_grad_grad_vals.reshape(x.shape)\n        with ops.device(self.CPU_DEVICE):\n            output_gradients = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)\n            expected_input_gradients = pool_grad_func(inputs, outputs, output_gradients, ksize=ksize, strides=strides, padding=padding, data_format='NHWC')\n            expected_input_gradient_vals = sess.run(expected_input_gradients, {inputs: x, output_gradients: output_gradient_vals})\n            output_grad_gradients = array_ops.placeholder(dtypes.float32, shape=expected_input_gradient_vals.shape)\n            if pool_grad_grad_func is not None:\n                expected_grad_gradients = pool_grad_grad_func(inputs, outputs, output_grad_gradients, ksize=ksize, strides=strides, padding=padding, data_format='NHWC')\n                expected_grad_gradients_vals = sess.run(expected_grad_gradients, {inputs: x, output_grad_gradients: output_grad_grad_vals})\n        with self.test_scope():\n            outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)\n            xla_inputs = inputs\n            xla_outputs = outputs\n            xla_output_gradients = output_gradients\n            xla_output_grad_gradients = output_grad_gradients\n            xla_ksize = ksize\n            xla_strides = strides\n            if data_format == 'NCHW':\n                xla_inputs = NHWCToNCHW(inputs)\n                xla_outputs = NHWCToNCHW(outputs)\n                xla_output_gradients = NHWCToNCHW(output_gradients)\n                xla_output_grad_gradients = NHWCToNCHW(output_grad_gradients)\n                xla_ksize = NHWCToNCHW(ksize)\n                xla_strides = NHWCToNCHW(strides)\n            actual_input_gradients = pool_grad_func(xla_inputs, xla_outputs, xla_output_gradients, ksize=xla_ksize, strides=xla_strides, padding=padding, data_format=data_format)\n            if data_format == 'NCHW':\n                actual_input_gradients = NCHWToNHWC(actual_input_gradients)\n            if pool_grad_grad_func is not None:\n                actual_grad_gradients = pool_grad_grad_func(xla_inputs, xla_outputs, xla_output_grad_gradients, ksize=xla_ksize, strides=xla_strides, padding=padding, data_format=data_format)\n                if data_format == 'NCHW':\n                    actual_grad_gradients = NCHWToNHWC(actual_grad_gradients)\n        actual_input_gradients_vals = sess.run(actual_input_gradients, {inputs: x, outputs: output_vals, output_gradients: output_gradient_vals})\n        self.assertAllClose(expected_input_gradient_vals, actual_input_gradients_vals, rtol=0.0001, atol=1e-06)\n        self.assertShapeEqual(actual_input_gradients_vals, inputs)\n        if pool_grad_grad_func is not None:\n            actual_grad_gradients_vals = sess.run(actual_grad_gradients, {inputs: x, outputs: output_vals, output_grad_gradients: output_grad_grad_vals})\n            self.assertAllClose(expected_grad_gradients_vals, actual_grad_gradients_vals, rtol=0.0001, atol=1e-06)\n            self.assertShapeEqual(actual_grad_gradients_vals, outputs)", "docstring": "Verifies the output values of the pooling gradient function.\n\nArgs:\npool_func: Forward pooling function\npool_grad_func: Pooling gradient function for pool_grad_func\ninput_sizes: Input tensor dimensions.\nksize: The kernel size dimensions\nstrides: The stride dimensions\npadding: Padding type.\ndata_format: The data format we use to run the pooling operation.\npool_grad_grad_func: Second-order gradient function, if available.", "source": "github-repos"}
{"code": "def get_filename(self, task, default_ext):\n    url_path = urlparse(task['file_url'])[2]\n    extension = (url_path.split('.')[(- 1)] if ('.' in url_path) else default_ext)\n    file_idx = (self.fetched_num + self.file_idx_offset)\n    return '{:06d}.{}'.format(file_idx, extension)", "docstring": "Set the path where the image will be saved.\n\nThe default strategy is to use an increasing 6-digit number as\nthe filename. You can override this method if you want to set custom\nnaming rules. The file extension is kept if it can be obtained from\nthe url, otherwise ``default_ext`` is used as extension.\n\nArgs:\ntask (dict): The task dict got from ``task_queue``.\n\nOutput:\nFilename with extension.", "source": "codesearchnet"}
{"code": "def _validate_at_hash(claims, access_token, algorithm):\n    \n    if 'at_hash' not in claims and not access_token:\n        return\n    elif 'at_hash' in claims and not access_token:\n        msg = 'No access_token provided to compare against at_hash claim.'\n        raise JWTClaimsError(msg)\n    elif access_token and 'at_hash' not in claims:\n        msg = 'at_hash claim missing from token.'\n        raise JWTClaimsError(msg)\n\n    try:\n        expected_hash = calculate_at_hash(access_token,\n                                          ALGORITHMS.HASHES[algorithm])\n    except (TypeError, ValueError):\n        msg = 'Unable to calculate at_hash to verify against token claims.'\n        raise JWTClaimsError(msg)\n        \n    if claims['at_hash'] != expected_hash:\n        raise JWTClaimsError('at_hash claim does not match access_token.')", "docstring": "Validates that the 'at_hash' parameter included in the claims matches\nwith the access_token returned alongside the id token as part of\nthe authorization_code flow.\n\nArgs:\nclaims (dict): The claims dictionary to validate.\naccess_token (str): The access token returned by the OpenID Provider.\nalgorithm (str): The algorithm used to sign the JWT, as specified by\nthe token headers.", "source": "juraj-google-style"}
{"code": "def _compute_gradient_error_float16(self, x, x32, x_shape, y, y32, y_shape, x_dtype):\n    x_init_val = np.random.random_sample(x_shape).astype(x_dtype)\n    x32_init_val = x_init_val.astype(np.float32)\n    theoretical_grad, _ = gradient_checker.compute_gradient(x, x_shape, y, y_shape, delta=0.001, x_init_value=x_init_val)\n    _, numerical_grad = gradient_checker.compute_gradient(x32, x_shape, y32, y_shape, delta=0.001, x_init_value=x32_init_val)\n    if theoretical_grad.size == 0 and numerical_grad.size == 0:\n        return 0\n    return np.fabs(theoretical_grad - numerical_grad).max()", "docstring": "Computes the gradient error for float16 inputs and/or outputs.\n\nThis returns the same value as gradient_checker.compute_gradient_error. The\ndifference is that gradient_checker.compute_gradient_error does not\nnumerically compute the gradients in a numerically stable way for float16\ntensors. To fix this, this function requires float32 versions of x and y to\nnumerically compute the gradients, to compare with the float16 symbolically\ncomputed gradients.\n\nArgs:\nx: The input tensor.\nx32: A float32 version of x.\nx_shape: The shape of x.\ny: The output tensor.\ny32: A float32 version of y. Must be calculated based on x32, not x.\ny_shape: The shape of y.\nx_dtype: The type of x, float16 or bfloat16.\n\nReturns:\nThe maximum error in between the two Jacobians, as in\ngradient_checker.compute_gradient_error.", "source": "github-repos"}
{"code": "def get_current_round(self, tournament=1):\n    query = '\\n            query($tournament: Int!) {\\n              rounds(tournament: $tournament\\n                     number: 0) {\\n                number\\n              }\\n            }\\n        '\n    arguments = {'tournament': tournament}\n    data = self.raw_query(query, arguments)['data']['rounds'][0]\n    if (data is None):\n        return None\n    round_num = data['number']\n    return round_num", "docstring": "Get number of the current active round.\n\nArgs:\ntournament (int): ID of the tournament (optional, defaults to 1)\n\nReturns:\nint: number of the current active round\n\nExample:\n>>> NumerAPI().get_current_round()\n104", "source": "codesearchnet"}
{"code": "def prod(\n        self,\n        axis=None,\n        skipna=None,\n        level=None,\n        numeric_only=None,\n        min_count=0,\n        **kwargs\n    ):\n        \n        axis = self._get_axis_number(axis) if axis is not None else 0\n        data = self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=True)\n        return data._reduce_dimension(\n            data._query_compiler.prod(\n                axis=axis,\n                skipna=skipna,\n                level=level,\n                numeric_only=numeric_only,\n                min_count=min_count,\n                **kwargs\n            )\n        )", "docstring": "Return the product of the values for the requested axis\n\nArgs:\naxis : {index (0), columns (1)}\nskipna : boolean, default True\nlevel : int or level name, default None\nnumeric_only : boolean, default None\nmin_count : int, default 0\n\nReturns:\nprod : Series or DataFrame (if level specified)", "source": "juraj-google-style"}
{"code": "def HandleNetworkInterfaces(self, result):\n    \n    network_interfaces = self._ExtractInterfaceMetadata(result)\n\n    if self.network_setup_enabled:\n      self.network_setup.EnableNetworkInterfaces(\n          [interface.name for interface in network_interfaces[1:]])\n\n    for interface in network_interfaces:\n      if self.ip_forwarding_enabled:\n        self.ip_forwarding.HandleForwardedIps(\n            interface.name, interface.forwarded_ips, interface.ip)", "docstring": "Called when network interface metadata changes.\n\nArgs:\nresult: dict, the metadata response with the network interfaces.", "source": "juraj-google-style"}
{"code": "class GraniteMoeHybridMoE(nn.Module):\n\n    def __init__(self, config: GraniteMoeHybridConfig):\n        super(GraniteMoeHybridMoE, self).__init__()\n        self.input_size = config.hidden_size\n        self.hidden_size = config.intermediate_size\n        self.activation = ACT2FN[config.hidden_act]\n        self.input_linear = GraniteMoeHybridParallelExperts(config.num_local_experts, self.input_size, self.hidden_size * 2)\n        self.output_linear = GraniteMoeHybridParallelExperts(config.num_local_experts, self.hidden_size, self.input_size)\n        self.router = GraniteMoeHybridTopKGating(input_size=self.input_size, num_experts=config.num_local_experts, top_k=config.num_experts_per_tok)\n\n    def forward(self, layer_input):\n        \n        bsz, length, emb_size = layer_input.size()\n        layer_input = layer_input.reshape(-1, emb_size)\n        _, batch_index, batch_gates, expert_size, router_logits = self.router(layer_input)\n        expert_inputs = layer_input[batch_index]\n        hidden_states = self.input_linear(expert_inputs, expert_size)\n        chunked_hidden_states = hidden_states.chunk(2, dim=-1)\n        hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1]\n        expert_outputs = self.output_linear(hidden_states, expert_size)\n        expert_outputs = expert_outputs * batch_gates[:, None]\n        zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device)\n        layer_output = zeros.index_add(0, batch_index, expert_outputs)\n        layer_output = layer_output.view(bsz, length, self.input_size)\n        return (layer_output, router_logits)", "docstring": "A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.\n\nArgs:\nconfig:\nConfiguration object with model hyperparameters.", "source": "github-repos"}
{"code": "def copen(fileobj, mode='rb', **kwargs):\n    algo = io.open\n    mode = mode.lower().strip()\n    modules = {}\n    write_mode = (False if (mode.lstrip('U')[0] == 'r') else True)\n    kwargs['mode'] = mode\n    modules_to_import = {'bz2': 'BZ2File', 'gzip': 'GzipFile', 'lzma': 'LZMAFile'}\n    for (mod, _class) in modules_to_import.items():\n        try:\n            modules[_class] = getattr(import_module(mod), _class)\n        except (ImportError, AttributeError) as e:\n            modules[_class] = open\n            warn('Cannot process {0} files due to following error:{1}{2}{1}You will need to install the {0} library to properly use these files. Currently, such files will open in \"text\" mode.'.format(mod, linesep, e))\n    if (write_mode is True):\n        algo_map = {'bz2': modules['BZ2File'], 'gz': modules['GzipFile'], 'xz': modules['LZMAFile']}\n        ext = fileobj.split('.')[(- 1)]\n        try:\n            algo = algo_map[ext]\n        except KeyError:\n            pass\n    else:\n        algo = io.TextIOWrapper\n        file_sigs = {b'BZh': modules['BZ2File'], b'\\x1f\\x8b\\x08': modules['GzipFile'], b'\\xfd7zXZ\\x00': modules['LZMAFile']}\n        fileobj = io.BufferedReader(io.open(fileobj, 'rb'))\n        max_len = max((len(x) for x in file_sigs.keys()))\n        start = fileobj.peek(max_len)\n        for sig in file_sigs.keys():\n            if start.startswith(sig):\n                algo = file_sigs[sig]\n                break\n    algo_args = set(getfullargspec(algo).args)\n    good_args = set(kwargs.keys()).intersection(algo_args)\n    _kwargs = {arg: kwargs[arg] for arg in good_args}\n    if (write_mode is True):\n        handle = algo(fileobj, **_kwargs)\n    else:\n        try:\n            handle = algo(fileobj=fileobj, **_kwargs)\n        except TypeError:\n            handle = algo(fileobj, **_kwargs)\n    return handle", "docstring": "Detects and opens compressed file for reading and writing.\n\nArgs:\nfileobj (File): any File-like object supported by an underlying\ncompression algorithm\n\nmode (unicode): mode to open fileobj with\n\n**kwargs: keyword-arguments to pass to the compression algorithm\n\nReturns:\nFile: TextWrapper if no compression, else returns appropriate\nwrapper for the compression type\n\nExample:\n.. code-block:: Python\n\n>>> from tempfile import NamedTemporaryFile\n>>> # Write compressed file\n>>> temp = NamedTemporaryFile(delete=False, suffix='.bz2')\n>>> test_bz2 = copen(temp.name, 'wb')\n>>> test_bz2.write(b'bzip2')\n>>> test_bz2.close()\n>>> # Read compressed bzip file\n>>> test_bz2 = copen(temp.name, 'rb')\n>>> test_bz2.read()\nb'bzip2'", "source": "codesearchnet"}
{"code": "def recipe_dataset(config, auth_write, dataset_dataset, dataset_emails, dataset_groups):\n    dataset(config, {'auth': auth_write, 'dataset': dataset_dataset, 'emails': dataset_emails, 'groups': dataset_groups})", "docstring": "Create and permission a dataset in BigQuery.\n\nArgs:\nauth_write (authentication) - Credentials used for writing data.\ndataset_dataset (string) - Name of Google BigQuery dataset to create.\ndataset_emails (string_list) - Comma separated emails.\ndataset_groups (string_list) - Comma separated groups.", "source": "github-repos"}
{"code": "def inv(x):\n    if any_symbolic_tensors((x,)):\n        return Inv().symbolic_call(x)\n    return _inv(x)", "docstring": "Computes the inverse of a square tensor.\n\nArgs:\nx: Input tensor of shape `(..., M, M)`.\n\nReturns:\nA tensor of shape `(..., M, M)` representing the inverse of `x`.", "source": "github-repos"}
{"code": "def _GetAttributeScripts(self, attribute_data, dest_dir):\n    \n    script_dict = {}\n    attribute_data = attribute_data or {}\n    metadata_key = '%s-script' % self.script_type\n    metadata_value = attribute_data.get(metadata_key)\n    if metadata_value:\n      self.logger.info('Found %s in metadata.', metadata_key)\n      with tempfile.NamedTemporaryFile(\n          mode='w', dir=dest_dir, delete=False) as dest:\n        dest.write(metadata_value.lstrip())\n        script_dict[metadata_key] = dest.name\n\n    metadata_key = '%s-script-url' % self.script_type\n    metadata_value = attribute_data.get(metadata_key)\n    if metadata_value:\n      self.logger.info('Found %s in metadata.', metadata_key)\n      script_dict[metadata_key] = self._DownloadScript(\n          metadata_value, dest_dir)\n\n    return script_dict", "docstring": "Retrieve the scripts from attribute metadata.\n\nArgs:\nattribute_data: dict, the contents of the attributes metadata.\ndest_dir: string, the path to a directory for storing metadata scripts.\n\nReturns:\ndict, a dictionary mapping metadata keys to files storing scripts.", "source": "juraj-google-style"}
{"code": "def extract_archive(file_path, path='.', archive_format='auto'):\n    if archive_format is None:\n        return False\n    if archive_format == 'auto':\n        archive_format = ['tar', 'zip']\n    if isinstance(archive_format, str):\n        archive_format = [archive_format]\n    file_path = path_to_string(file_path)\n    path = path_to_string(path)\n    for archive_type in archive_format:\n        if archive_type == 'tar':\n            open_fn = tarfile.open\n            is_match_fn = tarfile.is_tarfile\n        elif archive_type == 'zip':\n            open_fn = zipfile.ZipFile\n            is_match_fn = zipfile.is_zipfile\n        else:\n            raise NotImplementedError(archive_type)\n        if is_match_fn(file_path):\n            with open_fn(file_path) as archive:\n                try:\n                    if zipfile.is_zipfile(file_path):\n                        archive.extractall(path)\n                    else:\n                        archive.extractall(path, members=filter_safe_paths(archive))\n                except (tarfile.TarError, RuntimeError, KeyboardInterrupt):\n                    if os.path.exists(path):\n                        if os.path.isfile(path):\n                            os.remove(path)\n                        else:\n                            shutil.rmtree(path)\n                    raise\n            return True\n    return False", "docstring": "Extracts an archive if it matches a support format.\n\nSupports `.tar`, `.tar.gz`, `.tar.bz`, and `.zip` formats.\n\nArgs:\nfile_path: Path to the archive file.\npath: Where to extract the archive file.\narchive_format: Archive format to try for extracting the file.\nOptions are `\"auto\"`, `\"tar\"`, `\"zip\"`, and `None`.\n`\"tar\"` includes `.tar`, `.tar.gz`, and `.tar.bz` files.\nThe default `\"auto\"` uses `[\"tar\", \"zip\"]`.\n`None` or an empty list will return no matches found.\n\nReturns:\n`True` if a match was found and an archive extraction was completed,\n`False` otherwise.", "source": "github-repos"}
{"code": "def decode_list_offset_response(cls, response):\n        \n        return [\n            kafka.structs.ListOffsetResponsePayload(topic, partition, error, timestamp, offset)\n            for topic, partitions in response.topics\n            for partition, error, timestamp, offset in partitions\n        ]", "docstring": "Decode OffsetResponse_v2 into ListOffsetResponsePayloads\n\nArguments:\nresponse: OffsetResponse_v2\n\nReturns: list of ListOffsetResponsePayloads", "source": "juraj-google-style"}
{"code": "def FromFile(cls, inpath):\n    with open(inpath, 'r') as infile:\n        indata = infile.read()\n    return cls.FromString(indata)", "docstring": "Load a CommandFile from a path.\n\nArgs:\ninpath (str): The path to the file to load\n\nReturns:\nCommandFile: The decoded CommandFile object.", "source": "codesearchnet"}
{"code": "def run_step(context):\n    \n    logger.debug(\"started\")\n\n    assert context, (\"context must be set for echo. Did you set \"\n                     \"'echoMe=text here'?\")\n\n    context.assert_key_exists('echoMe', __name__)\n\n    if isinstance(context['echoMe'], str):\n        val = context.get_formatted('echoMe')\n    else:\n        val = context['echoMe']\n\n    logger.info(val)\n\n    logger.debug(\"done\")", "docstring": "Simple echo. Outputs context['echoMe'].\n\nArgs:\ncontext: dictionary-like. context is mandatory.\ncontext must contain key 'echoMe'\ncontext['echoMe'] will echo the value to logger.\nThis logger could well be stdout.\n\nWhen you execute the pipeline, it should look something like this:\npypyr [name here] 'echoMe=test', assuming a keyvaluepair context parser.", "source": "juraj-google-style"}
{"code": "def __batch_evaluate(self, test_events):\n        \n        percentiles = np.zeros(len(test_events))\n\n        all_items = set(self.item_buffer)\n        for i, e in enumerate(test_events):\n\n            \n            unobserved = all_items\n            if not self.repeat:\n                \n                unobserved -= self.rec.users[e.user.index]['known_items']\n                \n                unobserved.add(e.item.index)\n\n            candidates = np.asarray(list(unobserved))\n            recos, scores = self.__recommend(e, candidates)\n\n            pos = np.where(recos == e.item.index)[0][0]\n            percentiles[i] = pos / (len(recos) - 1) * 100\n\n        return np.mean(percentiles)", "docstring": "Evaluate the current model by using the given test events.\n\nArgs:\ntest_events (list of Event): Current model is evaluated by these events.\n\nReturns:\nfloat: Mean Percentile Rank for the test set.", "source": "juraj-google-style"}
{"code": "def unset(config, section, opt=None):\n        \n        if section not in config.keys():\n            raise ConfigError(\"section '{}' doesn't exist\".format(section))\n\n        if opt is None:\n            del config[section]\n            return\n\n        if opt not in config[section].keys():\n            raise ConfigError(\n                \"option '{}.{}' doesn't exist\".format(section, opt)\n            )\n        del config[section][opt]\n\n        if not config[section]:\n            del config[section]", "docstring": "Unsets specified option and/or section in the config.\n\nArgs:\nconfig (configobj.ConfigObj): config to work on.\nsection (str): section name.\nopt (str): optional option name.", "source": "juraj-google-style"}
{"code": "def get_module_file(self, namespace, module, version):\n    module_parts = module.split('.')\n    module_path = path_utils.join(*module_parts)\n    paths = []\n    if namespace == 'stdlib':\n        path = path_utils.join(namespace, module_path)\n        if self._is_module_in_typeshed(module_parts, version) or path in self.missing:\n            paths.append(path)\n    elif namespace == 'third_party':\n        for package in sorted(self._third_party_packages[module_parts[0]]):\n            paths.append(path_utils.join('stubs', package, module_path))\n    for path_rel in paths:\n        if path_rel in self.missing:\n            relpath = path_utils.join('nonexistent', path_rel + '.pyi')\n            return (relpath, builtin_stubs.DEFAULT_SRC)\n        for path in [path_utils.join(path_rel, '__init__.pyi'), path_rel + '.pyi']:\n            try:\n                name, src = self._store.load_file(path)\n                return (name, src)\n            except OSError:\n                pass\n    raise OSError(f\"Couldn't find {module}\")", "docstring": "Get the contents of a typeshed .pyi file.\n\nArguments:\nnamespace: selects a top-level directory within typeshed/ Allowed values\nare \"stdlib\" and \"third_party\". \"third_party\" corresponds to the the\ntypeshed/stubs/ directory.\nmodule: module name (e.g., \"sys\" or \"__builtins__\"). Can contain dots, if\nit's a submodule. Package names should omit the \"__init__\" suffix (e.g.,\npass in \"os\", not \"os.__init__\").\nversion: The Python version. (major, minor)\n\nReturns:\nA tuple with the filename and contents of the file\nRaises:\nIOError: if file not found", "source": "github-repos"}
{"code": "def compile_file_into_spirv(filepath, stage, optimization='size', warnings_as_errors=False):\n    with open(filepath, 'rb') as f:\n        content = f.read()\n    return compile_into_spirv(content, stage, filepath, optimization=optimization, warnings_as_errors=warnings_as_errors)", "docstring": "Compile shader file into Spir-V binary.\n\nThis function uses shaderc to compile your glsl file code into Spir-V\ncode.\n\nArgs:\nfilepath (strs): Absolute path to your shader file\nstage (str): Pipeline stage in ['vert', 'tesc', 'tese', 'geom',\n'frag', 'comp']\noptimization (str): 'zero' (no optimization) or 'size' (reduce size)\nwarnings_as_errors (bool): Turn warnings into errors\n\nReturns:\nbytes: Compiled Spir-V binary.\n\nRaises:\nCompilationError: If compilation fails.", "source": "codesearchnet"}
{"code": "def __init__(self, options=None):\n    if options is not None:\n        self._options = copy.deepcopy(options)\n    else:\n        self._options = {'max_depth': 100, 'min_bytes': 0, 'min_micros': 0, 'min_params': 0, 'min_float_ops': 0, 'min_occurrence': 0, 'order_by': 'name', 'account_type_regexes': ['.*'], 'start_name_regexes': ['.*'], 'trim_name_regexes': [], 'show_name_regexes': ['.*'], 'hide_name_regexes': [], 'account_displayed_op_only': False, 'select': ['micros'], 'step': -1, 'output': 'stdout'}", "docstring": "Constructor.\n\nArgs:\noptions: Optional initial option dict to start with.", "source": "github-repos"}
{"code": "def pymmh3_hash64(key: Union[bytes, bytearray],\n                  seed: int = 0,\n                  x64arch: bool = True) -> Tuple[int, int]:\n    \n\n    hash_128 = pymmh3_hash128(key, seed, x64arch)\n\n    unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF  \n    if unsigned_val1 & 0x8000000000000000 == 0:\n        signed_val1 = unsigned_val1\n    else:\n        signed_val1 = -((unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1)\n\n    unsigned_val2 = (hash_128 >> 64) & 0xFFFFFFFFFFFFFFFF  \n    if unsigned_val2 & 0x8000000000000000 == 0:\n        signed_val2 = unsigned_val2\n    else:\n        signed_val2 = -((unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1)\n\n    return signed_val1, signed_val2", "docstring": "Implements 64bit murmur3 hash, as per ``pymmh3``. Returns a tuple.\n\nArgs:\nkey: data to hash\nseed: seed\nx64arch: is a 64-bit architecture available?\n\nReturns:\ntuple: tuple of integers, ``(signed_val1, signed_val2)``", "source": "juraj-google-style"}
{"code": "def push_obj(self, obj: T, offset: int=0):\n    traceable_obj = TraceableObject(obj)\n    self._stack.append(traceable_obj)\n    return traceable_obj.set_filename_and_line_from_caller(offset + 1)", "docstring": "Add object to the stack and record its filename and line information.\n\nArgs:\nobj: An object to store on the stack.\noffset: Integer.  If 0, the caller's stack frame is used.  If 1,\nthe caller's caller's stack frame is used.\n\nReturns:\nTraceableObject.SUCCESS if appropriate stack information was found,\nTraceableObject.HEURISTIC_USED if the stack was smaller than expected,\nand TraceableObject.FAILURE if the stack was empty.", "source": "github-repos"}
{"code": "def get_markdown_files(self, dir_):\n        \n        md_files = OrderedSet()\n        for root, _, files in os.walk(dir_):\n            for name in files:\n                split = os.path.splitext(name)\n                if len(split) == 1:\n                    continue\n                if split[1] in ('.markdown', '.md', '.yaml'):\n                    md_files.add(os.path.join(root, name))\n        return md_files", "docstring": "Get all the markdown files in a folder, recursively\n\nArgs:\ndir_: str, a toplevel folder to walk.", "source": "juraj-google-style"}
{"code": "def sort_dict(d, key=None, reverse=False):\n    \n    kv_items = [kv for kv in d.items()]\n\n    \n    if key is None:\n        kv_items.sort(key=lambda t: t[1], reverse=reverse)\n    else:\n        kv_items.sort(key=key, reverse=reverse)\n\n    \n    return collections.OrderedDict(kv_items)", "docstring": "Sorts a dict by value.\n\nArgs:\nd: Input dictionary\nkey: Function which takes an tuple (key, object) and returns a value to\ncompare and sort by. By default, the function compares the values\nof the dict i.e. key = lambda t : t[1]\nreverse: Allows to reverse sort order.\n\nReturns:\nOrderedDict object whose keys are ordered according to their value.", "source": "juraj-google-style"}
{"code": "def _cleanup_unregistered_flag_from_module_dicts(self, flag_obj):\n    \n    if self._flag_is_registered(flag_obj):\n      return\n    for flags_by_module_dict in (self.flags_by_module_dict(),\n                                 self.flags_by_module_id_dict(),\n                                 self.key_flags_by_module_dict()):\n      for flags_in_module in six.itervalues(flags_by_module_dict):\n        \n        \n        while flag_obj in flags_in_module:\n          flags_in_module.remove(flag_obj)", "docstring": "Cleans up unregistered flags from all module -> [flags] dictionaries.\n\nIf flag_obj is registered under either its long name or short name, it\nwon't be removed from the dictionaries.\n\nArgs:\nflag_obj: Flag, the Flag instance to clean up for.", "source": "juraj-google-style"}
{"code": "class OneFormerPixelLevelModuleOutput(ModelOutput):\n    encoder_features: List[torch.FloatTensor] = None\n    decoder_features: List[torch.FloatTensor] = None\n    decoder_last_feature: Optional[torch.FloatTensor] = None", "docstring": "OneFormer's pixel level module output. It returns both the last and (optionally) the hidden states from the\n`encoder` and `decoder`. By default, the `encoder` is a Swin/Dinat Backbone and the `decoder` is a Multi-Scale\nDeformable Attention based decoder.\n\nArgs:\nencoder_features (List of `(torch.FloatTensor)`):\nList of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden-states (also\ncalled feature maps) of the model at the output of each stage.\ndecoder_features (List of `(torch.FloatTensor)`):\nList of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden-states (also\ncalled feature maps) of the model at the output of each stage.\ndecoder_last_feature (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)):\n1/4 scale features from the last Pixel Decoder Layer.", "source": "github-repos"}
{"code": "def unq_argument(self) -> str:\n    start = self.offset\n    self.dfa([{'': (lambda : 0), ';': (lambda : (- 1)), ' ': (lambda : (- 1)), '\\t': (lambda : (- 1)), '\\r': (lambda : (- 1)), '\\n': (lambda : (- 1)), '{': (lambda : (- 1)), '/': (lambda : 1)}, {'': (lambda : 0), '/': self._back_break, '*': self._back_break}])\n    self._arg = self.input[start:self.offset]", "docstring": "Parse unquoted argument.\n\nRaises:\nEndOfInput: If past the end of input.", "source": "codesearchnet"}
{"code": "def getValue(self, unit=None):\n        \n\n        if unit or self.unit:\n            r = float(self.value * UnitToValue(self.unit)) / UnitToValue(unit)\n            return int(round(r)) if isinstance(self.value, int) else r\n        return self.value", "docstring": "Return the value of the feature.\n\nIf the unit is specified and the feature has a unit, the value is converted\n\nArgs:\n- unit(str,optional): A unit to convert the current feature value ('B','K','M','G')", "source": "juraj-google-style"}
{"code": "def store_object(file_name, save_key, file_location, object_to_store=None):\n    \n    file = __os.path.join(file_location, file_name)\n    try:\n        shelve_store = __shelve.open(file)\n    except Exception as e:\n        LOGGER.critical('Function store_object Error {error} ignoring any errors'.format(error=e))\n        print('Bad storage dB, rebuilding!!')\n        __os.remove(file)\n        shelve_store = __shelve.open(file)\n    shelve_store[save_key] = object_to_store\n    shelve_store.close()", "docstring": "Function to store objects in a shelve\nArgs:\nfile_name: Shelve storage file name\nsave_key: The name of the key to store the item to\nfile_location: The location of the file, derive from the os module\nobject_to_store: The object you want to store\n\nReturns:", "source": "juraj-google-style"}
{"code": "def test_step(self, data):\n    data = data_adapter.expand_1d(data)\n    x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)\n    y_pred = self(x, training=False)\n    self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses)\n    self.compiled_metrics.update_state(y, y_pred, sample_weight)\n    return_metrics = {}\n    for metric in self.metrics:\n        result = metric.result()\n        if isinstance(result, dict):\n            return_metrics.update(result)\n        else:\n            return_metrics[metric.name] = result\n    return return_metrics", "docstring": "The logic for one evaluation step.\n\nThis method can be overridden to support custom evaluation logic.\nThis method is called by `Model.make_test_function`.\n\nThis function should contain the mathematical logic for one step of\nevaluation.\nThis typically includes the forward pass, loss calculation, and metrics\nupdates.\n\nConfiguration details for *how* this logic is run (e.g. `tf.function` and\n`tf.distribute.Strategy` settings), should be left to\n`Model.make_test_function`, which can also be overridden.\n\nArgs:\ndata: A nested structure of `Tensor`s.\n\nReturns:\nA `dict` containing values that will be passed to\n`tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the\nvalues of the `Model`'s metrics are returned.", "source": "github-repos"}
{"code": "def __method_descriptor(self, service, method_info,\n                          protorpc_method_info):\n    \n    descriptor = {}\n\n    request_message_type = (resource_container.ResourceContainer.\n                            get_request_message(protorpc_method_info.remote))\n    request_kind = self.__get_request_kind(method_info)\n    remote_method = protorpc_method_info.remote\n\n    method_id = method_info.method_id(service.api_info)\n\n    path = method_info.get_path(service.api_info)\n\n    description = protorpc_method_info.remote.method.__doc__\n\n    descriptor['id'] = method_id\n    descriptor['path'] = path\n    descriptor['httpMethod'] = method_info.http_method\n\n    if description:\n      descriptor['description'] = description\n\n    descriptor['scopes'] = [\n        'https:\n    ]\n\n    parameters = self.__params_descriptor(\n        request_message_type, request_kind, path, method_id,\n        method_info.request_params_class)\n    if parameters:\n      descriptor['parameters'] = parameters\n\n    if method_info.request_params_class:\n      parameter_order = self.__params_order_descriptor(\n        method_info.request_params_class, path, is_params_class=True)\n    else:\n      parameter_order = self.__params_order_descriptor(\n        request_message_type, path, is_params_class=False)\n    if parameter_order:\n      descriptor['parameterOrder'] = parameter_order\n\n    request_descriptor = self.__request_message_descriptor(\n        request_kind, request_message_type, method_id,\n        method_info.request_body_class)\n    if request_descriptor is not None:\n      descriptor['request'] = request_descriptor\n\n    response_descriptor = self.__response_message_descriptor(\n        remote_method.response_type(), method_info.method_id(service.api_info))\n    if response_descriptor is not None:\n      descriptor['response'] = response_descriptor\n\n    return descriptor", "docstring": "Describes a method.\n\nArgs:\nservice: endpoints.Service, Implementation of the API as a service.\nmethod_info: _MethodInfo, Configuration for the method.\nprotorpc_method_info: protorpc.remote._RemoteMethodInfo, ProtoRPC\ndescription of the method.\n\nReturns:\nDictionary describing the method.", "source": "juraj-google-style"}
{"code": "def get_self_attention_bias(x):\n    x_shape = common_layers.shape_list(x)\n    self_attention_bias = common_attention.attention_bias_lower_triangle(x_shape[1])\n    return self_attention_bias", "docstring": "Creates masked self attention bias.\n\nArgs:\nx: A tensor of shape [batch, length, depth]\n\nReturns:\nself_attention_bias: A tensor of shape [length, length, 1]", "source": "codesearchnet"}
{"code": "def sample(self, num_samples=1):\n        \n        self.check_fit()\n        return np.random.normal(self.mean, self.std, num_samples)", "docstring": "Returns new data point based on model.\n\nArguments:\nn_samples: `int`\n\nReturns:\nnp.ndarray: Generated samples", "source": "juraj-google-style"}
{"code": "def discard_event(event: events.Event, bot_id: str=None) -> bool:\n    if (event['type'] in SKIP_EVENTS):\n        return True\n    elif (bot_id and isinstance(event, events.Message)):\n        if (event.get('bot_id') == bot_id):\n            LOG.debug('Ignoring event: %s', event)\n            return True\n        elif (('message' in event) and (event['message'].get('bot_id') == bot_id)):\n            LOG.debug('Ignoring event: %s', event)\n            return True\n    return False", "docstring": "Check if the incoming event needs to be discarded\n\nArgs:\nevent: Incoming :class:`slack.events.Event`\nbot_id: Id of connected bot\n\nReturns:\nboolean", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n        \n        self.ListUptimeCheckConfigs = channel.unary_unary(\n            \"/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckConfigsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckConfigsResponse.FromString,\n        )\n        self.GetUptimeCheckConfig = channel.unary_unary(\n            \"/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.GetUptimeCheckConfigRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2.UptimeCheckConfig.FromString,\n        )\n        self.CreateUptimeCheckConfig = channel.unary_unary(\n            \"/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.CreateUptimeCheckConfigRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2.UptimeCheckConfig.FromString,\n        )\n        self.UpdateUptimeCheckConfig = channel.unary_unary(\n            \"/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.UpdateUptimeCheckConfigRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2.UptimeCheckConfig.FromString,\n        )\n        self.DeleteUptimeCheckConfig = channel.unary_unary(\n            \"/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.DeleteUptimeCheckConfigRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n        self.ListUptimeCheckIps = channel.unary_unary(\n            \"/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckIpsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__service__pb2.ListUptimeCheckIpsResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def _append_defects(self, part, part_content_type):\n    part_defects = {}\n    for e in part.defects:\n        defects = '{}: {}'.format(e.__class__.__name__, e.__doc__)\n        self._defects_categories.add(e.__class__.__name__)\n        part_defects.setdefault(part_content_type, []).append(defects)\n        log.debug('Added defect {!r}'.format(defects))\n    if part_defects:\n        self._has_defects = True\n        self._defects.append(part_defects)", "docstring": "Add new defects and defects categories to object attributes.\n\nThe defects are a list of all the problems found\nwhen parsing this message.\n\nArgs:\npart (string): mail part\npart_content_type (string): content type of part", "source": "codesearchnet"}
{"code": "def unsubscribe(self, future):\n    assert (future not in self._pending_unsubscribes), ('%r has already been unsubscribed from' % self._pending_unsubscribes[future])\n    subscribe = self._requests[future]\n    self._pending_unsubscribes[future] = subscribe\n    self._subscriptions.pop(subscribe.id)\n    request = Unsubscribe(subscribe.id)\n    request.set_callback(self._q.put)\n    try:\n        controller = self.get_controller(subscribe.path[0])\n    except ValueError:\n        pass\n    else:\n        self.handle_request(controller, request)", "docstring": "Terminates the subscription given by a future\n\nArgs:\nfuture (Future): The future of the original subscription", "source": "codesearchnet"}
{"code": "def _ip_int_from_string(self, ip_str):\n        \n        if not ip_str:\n            raise AddressValueError('Address cannot be empty')\n\n        octets = ip_str.split('.')\n        if len(octets) != 4:\n            raise AddressValueError(\"Expected 4 octets in %r\" % ip_str)\n\n        try:\n            bvs = map(self._parse_octet, octets)\n            return _compat_int_from_byte_vals(bvs, 'big')\n        except ValueError as exc:\n            raise AddressValueError(\"%s in %r\" % (exc, ip_str))", "docstring": "Turn the given IP string into an integer for comparison.\n\nArgs:\nip_str: A string, the IP ip_str.\n\nReturns:\nThe IP ip_str as an integer.\n\nRaises:\nAddressValueError: if ip_str isn't a valid IPv4 Address.", "source": "juraj-google-style"}
{"code": "def all(self, scope=None, **kwargs):\n    path = '/runners/all'\n    query_data = {}\n    if (scope is not None):\n        query_data['scope'] = scope\n    return self.gitlab.http_list(path, query_data, **kwargs)", "docstring": "List all the runners.\n\nArgs:\nscope (str): The scope of runners to show, one of: specific,\nshared, active, paused, online\nall (bool): If True, return all the items, without pagination\nper_page (int): Number of items to retrieve per request\npage (int): ID of the page to return (starts with page 1)\nas_list (bool): If set to False and no pagination option is\ndefined, return a generator instead of a list\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabListError: If the server failed to perform the request\n\nReturns:\nlist(Runner): a list of runners matching the scope.", "source": "codesearchnet"}
{"code": "def _get_node_parent(self, age, pos):\n    return self.nodes[age][int((pos / self.comp))]", "docstring": "Get the parent node of node, whch is located in tree's node list.\n\nReturns:\nobject: The parent node.", "source": "codesearchnet"}
{"code": "def verifyToken(self, auth):\n    if (auth in (self.Auth.SkypeToken, self.Auth.Authorize)):\n        if (('skype' not in self.tokenExpiry) or (datetime.now() >= self.tokenExpiry['skype'])):\n            if (not hasattr(self, 'getSkypeToken')):\n                raise SkypeAuthException('Skype token expired, and no password specified')\n            self.getSkypeToken()\n    elif (auth == self.Auth.RegToken):\n        if (('reg' not in self.tokenExpiry) or (datetime.now() >= self.tokenExpiry['reg'])):\n            self.getRegToken()", "docstring": "Ensure the authentication token for the given auth method is still valid.\n\nArgs:\nauth (Auth): authentication type to check\n\nRaises:\n.SkypeAuthException: if Skype auth is required, and the current token has expired and can't be renewed", "source": "codesearchnet"}
{"code": "def list_distribute_contents_simple(input_list, function=lambda x: x):\n    \n    \n    dictionary = dict()\n    for obj in input_list:\n        dict_of_lists_add(dictionary, function(obj), obj)\n    output_list = list()\n    i = 0\n    done = False\n    while not done:\n        found = False\n        for key in sorted(dictionary):\n            if i < len(dictionary[key]):\n                output_list.append(dictionary[key][i])\n                found = True\n        if found:\n            i += 1\n        else:\n            done = True\n    return output_list", "docstring": "Distribute the contents of a list eg. [1, 1, 1, 2, 2, 3] -> [1, 2, 3, 1, 2, 1]. List can contain complex types\nlike dictionaries in which case the function can return the appropriate value eg.  lambda x: x[KEY]\n\nArgs:\ninput_list (List): List to distribute values\nfunction (Callable[[Any], Any]): Return value to use for distributing. Defaults to lambda x: x.\n\nReturns:\nList: Distributed list", "source": "juraj-google-style"}
{"code": "def ParseOptions(cls, options, output_module):\n    \n    if not isinstance(output_module, xlsx.XLSXOutputModule):\n      raise errors.BadConfigObject(\n          'Output module is not an instance of XLSXOutputModule')\n\n    fields = cls._ParseStringOption(\n        options, 'fields', default_value=cls._DEFAULT_FIELDS)\n\n    additional_fields = cls._ParseStringOption(options, 'additional_fields')\n\n    if additional_fields:\n      fields = '{0:s},{1:s}'.format(fields, additional_fields)\n\n    filename = getattr(options, 'write', None)\n    if not filename:\n      raise errors.BadConfigOption(\n          'Output filename was not provided use \"-w filename\" to specify.')\n\n    timestamp_format = cls._ParseStringOption(\n        options, 'timestamp_format',\n        default_value=cls._DEFAULT_TIMESTAMP_FORMAT)\n\n    output_module.SetFields([\n        field_name.strip() for field_name in fields.split(',')])\n    output_module.SetFilename(filename)\n    output_module.SetTimestampFormat(timestamp_format)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\noutput_module (XLSXOutputModule): output module to configure.\n\nRaises:\nBadConfigObject: when the output module object is of the wrong type.\nBadConfigOption: when the output filename was not provided.", "source": "juraj-google-style"}
{"code": "def AppendContent(self, src_fd):\n    \n    while 1:\n      blob = src_fd.read(self.chunksize)\n      if not blob:\n        break\n\n      blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(blob)\n      self.AddBlob(blob_id, len(blob))\n\n    self.Flush()", "docstring": "Create new blob hashes and append to BlobImage.\n\nWe don't support writing at arbitrary file offsets, but this method provides\na convenient way to add blobs for a new file, or append content to an\nexisting one.\n\nArgs:\nsrc_fd: source file handle open for read\n\nRaises:\nIOError: if blob has already been finalized.", "source": "juraj-google-style"}
{"code": "def _make_output_dense(self, query_shape, common_kwargs, name=None):\n    query_rank = len(query_shape)\n    if self._output_shape:\n        output_shape = self._output_shape\n    else:\n        output_shape = [query_shape[-1]]\n    einsum_equation, bias_axes, output_rank = _build_proj_equation(query_rank - 1, bound_dims=2, output_dims=len(output_shape))\n    return EinsumDense(einsum_equation, output_shape=_get_output_shape(output_rank - 1, output_shape), bias_axes=bias_axes if self._use_bias else None, name=name, **common_kwargs)", "docstring": "Builds the output projection matrix.\n\nArgs:\nfree_dims: Number of free dimensions for einsum equation building.\ncommon_kwargs: Common keyword arguments for einsum layer.\nname: Name for the projection layer.\n\nReturns:\nProjection layer.", "source": "github-repos"}
{"code": "class custom_gradient:\n\n    def __init__(self, fun):\n        warnings.warn('`custom_gradient` for the numpy backend acts as a pass-through to support the forward pass. No gradient computation or modification takes place.')\n        self.fun = fun\n\n    def __call__(self, *args, **kwargs):\n        outputs, _ = self.fun(*args, **kwargs)\n        return outputs", "docstring": "Decorator for custom gradients.\n\nArgs:\nfun: Forward pass function.", "source": "github-repos"}
{"code": "def process(self, element):\n    import apache_beam as beam\n    import six\n    import tensorflow as tf\n    tf.logging.set_verbosity(tf.logging.ERROR)\n    try:\n        clean_element = []\n        for line in element:\n            clean_element.append(line.rstrip())\n        batch_result = self._session.run(fetches=self._transformed_features, feed_dict={self._input_placeholder_tensor: clean_element})\n        for i in range(len(clean_element)):\n            transformed_features = {}\n            for (name, value) in six.iteritems(batch_result):\n                if isinstance(value, tf.SparseTensorValue):\n                    batch_i_indices = (value.indices[(:, 0)] == i)\n                    batch_i_values = value.values[batch_i_indices]\n                    transformed_features[name] = batch_i_values.tolist()\n                else:\n                    transformed_features[name] = value[i].tolist()\n            (yield transformed_features)\n    except Exception as e:\n        (yield beam.pvalue.TaggedOutput('errors', (str(e), element)))", "docstring": "Run the transformation graph on batched input data\n\nArgs:\nelement: list of csv strings, representing one batch input to the TF graph.\n\nReturns:\ndict containing the transformed data. Results are un-batched. Sparse\ntensors are converted to lists.", "source": "codesearchnet"}
{"code": "def get_current_semver_version():\n    bazel_rc_file = open(BAZEL_RC, 'r')\n    wheel_type = ''\n    wheel_build_date = ''\n    wheel_version_suffix = ''\n    for line in bazel_rc_file:\n        wheel_type = _get_regex_match(line, '^build --repo_env=ML_WHEEL_TYPE=\"(.+)\"')[0] or wheel_type\n        wheel_build_date = _get_regex_match(line, '^build --repo_env=ML_WHEEL_BUILD_DATE=\"([0-9]*)\"')[0] or wheel_build_date\n        wheel_version_suffix, is_matched = _get_regex_match(line, '^build --repo_env=ML_WHEEL_VERSION_SUFFIX=\"(.*)\"', is_last_match=True)\n        if is_matched:\n            break\n    tf_version_bzl_file = open(TF_VERSION_BZL, 'r')\n    wheel_version = ''\n    for line in tf_version_bzl_file:\n        wheel_version, is_matched = _get_regex_match(line, '^TF_VERSION = \"([0-9.]+)\"', is_last_match=True)\n        if is_matched:\n            break\n    old_major, old_minor, old_patch_num = wheel_version.split('.')\n    if wheel_type == 'nightly':\n        version_type = NIGHTLY_VERSION\n    else:\n        version_type = SNAPSHOT_VERSION\n    old_extension = ''\n    if wheel_type == 'nightly':\n        old_extension = '-dev{}'.format(wheel_build_date)\n    else:\n        if wheel_build_date:\n            old_extension += '-dev{}'.format(wheel_build_date)\n        if wheel_version_suffix:\n            old_extension += wheel_version_suffix\n    return Version(old_major, old_minor, old_patch_num, old_extension, version_type)", "docstring": "Returns a Version object of current version.\n\nReturns:\nversion: Version object of current SemVer string based on information from\n.bazelrc and tf_version.bzl files.", "source": "github-repos"}
{"code": "def dry_bulb_temperature(self, value=99.9):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `dry_bulb_temperature`'.format(value))\n        if (value <= (- 70.0)):\n            raise ValueError('value need to be greater -70.0 for field `dry_bulb_temperature`')\n        if (value >= 70.0):\n            raise ValueError('value need to be smaller 70.0 for field `dry_bulb_temperature`')\n    self._dry_bulb_temperature = value", "docstring": "Corresponds to IDD Field `dry_bulb_temperature`\n\nArgs:\nvalue (float): value for IDD Field `dry_bulb_temperature`\nUnit: C\nvalue > -70.0\nvalue < 70.0\nMissing value: 99.9\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def check_causatives(self, case_obj=None, institute_obj=None):\n    institute_id = (case_obj['owner'] if case_obj else institute_obj['_id'])\n    institute_causative_variant_ids = self.get_causatives(institute_id)\n    if (len(institute_causative_variant_ids) == 0):\n        return []\n    if case_obj:\n        case_causative_ids = set(case_obj.get('causatives', []))\n        institute_causative_variant_ids = list(set(institute_causative_variant_ids).difference(case_causative_ids))\n    query = self.variant_collection.find({'_id': {'$in': institute_causative_variant_ids}}, {'variant_id': 1})\n    positional_variant_ids = [item['variant_id'] for item in query]\n    filters = {'variant_id': {'$in': positional_variant_ids}}\n    if case_obj:\n        filters['case_id'] = case_obj['_id']\n    else:\n        filters['institute'] = institute_obj['_id']\n    return self.variant_collection.find(filters)", "docstring": "Check if there are any variants that are previously marked causative\n\nLoop through all variants that are marked 'causative' for an\ninstitute and check if any of the variants are present in the\ncurrent case.\n\nArgs:\ncase_obj (dict): A Case object\ninstitute_obj (dict): check across the whole institute\n\nReturns:\ncausatives(iterable(Variant))", "source": "codesearchnet"}
{"code": "def dimension_value(dimension: Union['Dimension', int, None]) -> Union[int, None]:\n    if isinstance(dimension, Dimension):\n        return dimension.value\n    return dimension", "docstring": "Compatibility utility required to allow for both V1 and V2 behavior in TF.\n\nUntil the release of TF 2.0, we need the legacy behavior of `TensorShape` to\ncoexist with the new behavior. This utility is a bridge between the two.\n\nWhen accessing the value of a TensorShape dimension,\nuse this utility, like this:\n\n```\n# If you had this in your V1 code:\nvalue = tensor_shape[i].value\n\n# Use `dimension_value` as direct replacement compatible with both V1 & V2:\nvalue = dimension_value(tensor_shape[i])\n\n# This would be the V2 equivalent:\nvalue = tensor_shape[i]  # Warning: this will return the dim value in V2!\n```\n\nArgs:\ndimension: Either a `Dimension` instance, an integer, or None.\n\nReturns:\nA plain value, i.e. an integer or None.", "source": "github-repos"}
{"code": "def purity(labels, true_labels):\n    \n    purity = 0.0\n    for i in set(labels):\n        indices = (labels==i)\n        true_clusters = true_labels[indices]\n        if len(true_clusters)==0:\n            continue\n        counts = Counter(true_clusters)\n        lab, count = counts.most_common()[0]\n        purity += count\n    return float(purity)/len(labels)", "docstring": "Calculates the purity score for the given labels.\n\nArgs:\nlabels (array): 1D array of integers\ntrue_labels (array): 1D array of integers - true labels\n\nReturns:\npurity score - a float bewteen 0 and 1. Closer to 1 is better.", "source": "juraj-google-style"}
{"code": "def MakeSuiteFromHist(hist, name=None):\n    if (name is None):\n        name = hist.name\n    d = dict(hist.GetDict())\n    return MakeSuiteFromDict(d, name)", "docstring": "Makes a normalized suite from a Hist object.\n\nArgs:\nhist: Hist object\nname: string name\n\nReturns:\nSuite object", "source": "codesearchnet"}
{"code": "def parse(inp, format=None, encoding='utf-8', force_types=True):\n    proper_inp = inp\n    if hasattr(inp, 'read'):\n        proper_inp = inp.read()\n    if isinstance(proper_inp, six.text_type):\n        proper_inp = proper_inp.encode(encoding)\n    fname = None\n    if hasattr(inp, 'name'):\n        fname = inp.name\n    fmt = _get_format(format, fname, proper_inp)\n    proper_inp = six.BytesIO(proper_inp)\n    try:\n        res = _do_parse(proper_inp, fmt, encoding, force_types)\n    except Exception as e:\n        raise AnyMarkupError(e, traceback.format_exc())\n    if (res is None):\n        res = {}\n    return res", "docstring": "Parse input from file-like object, unicode string or byte string.\n\nArgs:\ninp: file-like object, unicode string or byte string with the markup\nformat: explicitly override the guessed `inp` markup format\nencoding: `inp` encoding, defaults to utf-8\nforce_types:\nif `True`, integers, floats, booleans and none/null\nare recognized and returned as proper types instead of strings;\nif `False`, everything is converted to strings\nif `None`, backend return value is used\nReturns:\nparsed input (dict or list) containing unicode values\nRaises:\nAnyMarkupError if a problem occurs while parsing or inp", "source": "codesearchnet"}
{"code": "def upload(self, local_fn: str, remote_fn: str = '',\n             dont_overwrite: bool = False):\n    \n    raise NotImplementedError()", "docstring": "Uploads given file to the task. If remote_fn is not specified, dumps it\ninto task current directory with the same name.\n\nArgs:\nlocal_fn: location of file locally\nremote_fn: location of file on task\ndont_overwrite: if True, will be no-op if target file exists", "source": "juraj-google-style"}
{"code": "def replace_with_higgs_linear(model, quantization_config=None, current_key_name=None, has_been_replaced=False):\n    from accelerate import init_empty_weights\n    for name, module in model.named_children():\n        if current_key_name is None:\n            current_key_name = []\n        current_key_name.append(name)\n        if isinstance(module, nn.Linear):\n            current_key_name_str = '.'.join(current_key_name)\n            if not any((current_key_name_str.endswith(key) for key in quantization_config.modules_to_not_convert)):\n                with init_empty_weights():\n                    in_features = module.in_features\n                    out_features = module.out_features\n                    model._modules[name] = HiggsLinear(in_features, out_features, bias=module.bias is not None, num_bits=quantization_config.bits, hadamard_size=quantization_config.hadamard_size, group_size=quantization_config.group_size)\n                    has_been_replaced = True\n                    model._modules[name].source_cls = type(module)\n                    model._modules[name].requires_grad_(False)\n        if len(list(module.children())) > 0:\n            _, has_been_replaced = replace_with_higgs_linear(module, quantization_config=quantization_config, current_key_name=current_key_name, has_been_replaced=has_been_replaced)\n        current_key_name.pop(-1)\n    return (model, has_been_replaced)", "docstring": "Public method that recursively replaces the Linear layers of the given model with HIGGS quantized layers.\n`accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the\nconversion has been successful or not.\n\nArgs:\nmodel (`torch.nn.Module`):\nThe model to convert, can be any `torch.nn.Module` instance.\nquantization_config (`HiggsConfig`):\nThe quantization config object that contains the quantization parameters.\ncurrent_key_name (`list`, *optional*):\nA list that contains the current key name. This is used for recursion and should not be passed by the user.\nhas_been_replaced (`bool`, *optional*):\nA boolean that indicates if the conversion has been successful or not. This is used for recursion and\nshould not be passed by the user.", "source": "github-repos"}
{"code": "def _get_decoratables(self, atype):\n    result = []\n    defmsg = 'Skipping {}; not decoratable or already decorated.'\n    for varname in self.shell.run_line_magic('who_ls', atype):\n        varobj = self.shell.user_ns.get(varname, None)\n        decorate = False\n        if (varobj is None):\n            continue\n        if (atype in ['classobj', 'type']):\n            if ((not hasattr(varobj, '__acorn__')) and hasattr(varobj, '__module__') and (varobj.__module__ == '__main__') and (not hasattr(varobj, '__file__'))):\n                decorate = True\n            else:\n                msg.std(defmsg.format(varname), 3)\n        elif (atype in ['function', 'staticmethod']):\n            func = None\n            if ((atype == 'staticmethod') and hasattr(varobj, '__func__')):\n                func = varobj.__func__\n            elif (atype == 'function'):\n                func = varobj\n            if ((func is not None) and (not hasattr(func, '__acorn__')) and hasattr(func, '__code__') and ('<ipython-input' in func.__code__.co_filename)):\n                decorate = True\n            else:\n                msg.std(defmsg.format(varname), 3)\n        if decorate:\n            self.entities[atype][varname] = varobj\n            result.append((varname, varobj))\n    return result", "docstring": "Returns a list of the objects that need to be decorated in the\ncurrent user namespace based on their type.\n\nArgs:\natype (str): one of the values in :attr:`atypes`. Specifies the type of\nobject to search.", "source": "codesearchnet"}
{"code": "def __init__(self, x: int, *, y: str, **kwargs):", "docstring": "Constructor.\n\nArgs:\nx: An int.\ny: A str.\n**kwargs: Kwargs.", "source": "github-repos"}
{"code": "def rolldim(P, n=1):\n    \n    dim = P.dim\n    shape = P.shape\n    dtype = P.dtype\n    A = dict(((key[n:]+key[:n],P.A[key]) for key in P.keys))\n    return Poly(A, dim, shape, dtype)", "docstring": "Roll the axes.\n\nArgs:\nP (Poly) : Input polynomial.\nn (int) : The axis that after rolling becomes the 0th axis.\n\nReturns:\n(Poly) : Polynomial with new axis configuration.\n\nExamples:\n>>> x,y,z = variable(3)\n>>> P = x*x*x + y*y + z\n>>> print(P)\nq0^3+q1^2+q2\n>>> print(rolldim(P))\nq0^2+q2^3+q1", "source": "juraj-google-style"}
{"code": "def _stop_server(self):\n    if self._proc:\n        utils.stop_standing_subprocess(self._proc)\n        self._proc = None\n    out = self._adb.shell(_STOP_CMD.format(snippet_package=self.package, user=self._get_user_command_string()), timeout=_STOP_CMD_TIMEOUT_SEC).decode('utf-8')\n    if 'OK (0 tests)' not in out:\n        raise android_device_lib_errors.DeviceError(self._device, f'Failed to stop existing apk. Unexpected output: {out}.')", "docstring": "Releases all the resources acquired in `start_server`.\n\nRaises:\nandroid_device_lib_errors.DeviceError: if the server exited with errors on\nthe device side.", "source": "github-repos"}
{"code": "def load_config(self, file_name):\n        \n\n        \n\n        def load_settings(file_name):\n            \n\n            instruments_loaded = {}\n            probes_loaded = {}\n            scripts_loaded = {}\n\n            if os.path.isfile(file_name):\n                in_data = load_b26_file(file_name)\n\n                instruments = in_data['instruments'] if 'instruments' in in_data else {}\n                scripts = in_data['scripts'] if 'scripts' in in_data else {}\n                probes = in_data['probes'] if 'probes' in in_data else {}\n\n                instruments_loaded, failed = Instrument.load_and_append(instruments)\n                if len(failed) > 0:\n                    print(('WARNING! Following instruments could not be loaded: ', failed))\n\n                scripts_loaded, failed, instruments_loaded = Script.load_and_append(\n                    script_dict=scripts,\n                    instruments=instruments_loaded,\n                    log_function=self.log,\n                    data_path=self.gui_settings['data_folder'])\n\n                if len(failed) > 0:\n                    print(('WARNING! Following scripts could not be loaded: ', failed))\n\n                probes_loaded, failed, instruments_loadeds = Probe.load_and_append(\n                    probe_dict=probes,\n                    probes=probes_loaded,\n                    instruments=instruments_loaded)\n            return instruments_loaded, scripts_loaded, probes_loaded\n\n        print(('loading script/instrument/probes config from {:s}'.format(file_name)))\n        try:\n            config = load_b26_file(file_name)['gui_settings']\n            if config['settings_file'] != file_name:\n                print((\n                'WARNING path to settings file ({:s}) in config file is different from path of settings file ({:s})'.format(\n                    config['settings_file'], file_name)))\n            config['settings_file'] = file_name\n            print(('loading of {:s} successful'.format(file_name)))\n        except Exception:\n            print(('WARNING path to settings file ({:s}) invalid use default settings'.format(file_name)))\n            config = self._DEFAULT_CONFIG\n\n\n            for x in list(self._DEFAULT_CONFIG.keys()):\n                if x in config:\n                    if not os.path.exists(config[x]):\n                        try:\n                            os.makedirs(config[x])\n                        except Exception:\n                            config[x] = self._DEFAULT_CONFIG[x]\n                            os.makedirs(config[x])\n                            print(('WARNING: failed validating or creating path: set to default path'.format(config[x])))\n                else:\n                    config[x] = self._DEFAULT_CONFIG[x]\n                    os.makedirs(config[x])\n                    print(('WARNING: path {:s} not specified set to default {:s}'.format(x, config[x])))\n\n        \n        if os.path.exists(os.path.dirname(file_name)):\n            config['settings_file'] = file_name\n\n        self.gui_settings = config\n\n        self.instruments, self.scripts, self.probes = load_settings(file_name)\n\n\n        self.refresh_tree(self.tree_gui_settings, self.gui_settings)\n        self.refresh_tree(self.tree_scripts, self.scripts)\n        self.refresh_tree(self.tree_settings, self.instruments)\n\n        self._hide_parameters(file_name)", "docstring": "checks if the file is a valid config file\nArgs:\nfile_name:", "source": "juraj-google-style"}
{"code": "def from_spec(cls, spec: str) -> Self:\n    if not spec:\n        return cls()\n    try:\n        full_shape_str, slice_str = spec.rsplit(' ', 1)\n    except ValueError as e:\n        raise ValueError('Spec string must contain space-separated full_shape info.') from e\n    full_shape = []\n    for dim in full_shape_str.split():\n        try:\n            full_shape.append(int(dim))\n        except ValueError as e:\n            raise ValueError(f\"Spec string full_shape must be a sequence of integers. Found '{dim}', which is not an integer.\") from e\n    var_offset = []\n    var_shape = []\n    for dim_spec in slice_str.split(':'):\n        try:\n            offset, shape = dim_spec.split(',')\n        except ValueError as e:\n            raise ValueError('Spec string must contain comma-separated pairs of offsets and shapes.') from e\n        try:\n            var_offset.append(int(offset))\n        except ValueError as e:\n            raise ValueError(f\"Spec string var_offset must be an integer. Found '{offset}', which is not an integer.\") from e\n        try:\n            var_shape.append(int(shape))\n        except ValueError as e:\n            raise ValueError(f\"Spec string var_shape must be an integer. Found '{shape}', which is not an integer.\") from e\n    return cls(full_shape=full_shape, var_offset=var_offset, var_shape=var_shape)", "docstring": "Parses a SaveSliceInfo spec string and returns a SaveSliceInfo object.\n\nArgs:\nspec: The tensor slice spec string according to the SaveSliceInfo.spec\nproperty. The spec contains the space-separated shape of the full\nvariable, followed by colon-separated pairs of the variable's offset\nand shape, where each pair is comma-separated. For example, consider a\nvariable whose full shape is [4 3 5], offset is [0 1 3], and shape is\n[4 1 2]. This variable's SaveSliceInfo.spec would be\n\"4 3 5 0,4:1,1:3,2\".\n\nReturns:\nA SaveSliceInfo object containing the extracted information.\n\nRaises:\nValueError: If the input string is not in the expected format.", "source": "github-repos"}
{"code": "def check_partition_column(partition_column, cols):\n    for (k, v) in cols.items():\n        if (k == partition_column):\n            if (v == 'int'):\n                return\n            else:\n                raise InvalidPartitionColumn('partition_column must be int, and not {0}'.format(v))\n    raise InvalidPartitionColumn('partition_column {0} not found in the query'.format(partition_column))", "docstring": "Check partition_column existence and type\n\nArgs:\npartition_column: partition_column name\ncols: dict with columns names and python types\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def derivative_extraction(feat, DeltaWindows):\n    (rows, cols) = feat.shape\n    DIF = np.zeros(feat.shape, dtype=feat.dtype)\n    Scale = 0\n    FEAT = np.lib.pad(feat, ((0, 0), (DeltaWindows, DeltaWindows)), 'edge')\n    for i in range(DeltaWindows):\n        offset = DeltaWindows\n        Range = (i + 1)\n        dif = (Range * FEAT[(:, (offset + Range):((offset + Range) + cols))])\n        (- FEAT[(:, (offset - Range):((offset - Range) + cols))])\n        Scale += (2 * np.power(Range, 2))\n        DIF += dif\n    return (DIF / Scale)", "docstring": "This function the derivative features.\n\nArgs:\nfeat (array): The main feature vector(For returning the second\norder derivative it can be first-order derivative).\nDeltaWindows (int): The value of  DeltaWindows is set using\nthe configuration parameter DELTAWINDOW.\n\nReturns:\narray: Derivative feature vector - A NUMFRAMESxNUMFEATURES numpy\narray which is the derivative features along the features.", "source": "codesearchnet"}
{"code": "def last_updated(self, url):\n    return self.metadata(url).last_updated_in_seconds", "docstring": "Fetches last updated time for a URL.\n\nArgs:\nurl: string url of file.\n\nReturns: float UNIX Epoch time\n\nRaises:\n``BeamIOError``: if path doesn't exist.", "source": "github-repos"}
{"code": "def dump_stats(filename):\n    res = _dump_impl()\n    f = open(filename, 'w')\n    json.dump(res, f, indent=4)\n    f.close()", "docstring": "Write collected information to file.\n\nArgs:\nfilename: absolute filename", "source": "codesearchnet"}
{"code": "def add_edge_end_unused(intersection, duplicates, intersections):\n    found = None\n    for other in intersections:\n        if ((intersection.index_first == other.index_first) and (intersection.index_second == other.index_second)):\n            if ((intersection.s == 0.0) and (other.s == 0.0)):\n                found = other\n                break\n            if ((intersection.t == 0.0) and (other.t == 0.0)):\n                found = other\n                break\n    if (found is not None):\n        intersections.remove(found)\n        duplicates.append(found)\n    intersections.append(intersection)", "docstring": "Add intersection that is ``COINCIDENT_UNUSED`` but on an edge end.\n\nThis is a helper for :func:`~._surface_intersection.add_intersection`.\nIt assumes that\n\n* ``intersection`` will have at least one of ``s == 0.0`` or ``t == 0.0``\n* A \"misclassified\" intersection in ``intersections`` that matches\n``intersection`` will be the \"same\" if it matches both ``index_first``\nand ``index_second`` and if it matches the start index exactly\n\nArgs:\nintersection (.Intersection): An intersection to be added.\nduplicates (List[.Intersection]): List of duplicate intersections.\nintersections (List[.Intersection]): List of \"accepted\" (i.e.\nnon-duplicate) intersections.", "source": "codesearchnet"}
{"code": "def setModelData(self, editor, model, index):\n    model.setData(index, editor.itemText(editor.currentIndex()))", "docstring": "Updates the model after changing data in the editor.\n\nArgs:\neditor (QtGui.QComboBox): The current editor for the item. Should be\na `QtGui.QComboBox` as defined in `createEditor`.\nmodel (ColumnDtypeModel): The model which holds the displayed data.\nindex (QtCore.QModelIndex): The index of the current item of the model.", "source": "codesearchnet"}
{"code": "def poll_output(self):\n    if self.block:\n        return self.output\n    new_list = self.output[self.old_output_size:]\n    self.old_output_size += len(new_list)\n    return new_list", "docstring": "Append lines from stdout to self.output.\n\nReturns:\nlist: The lines added since last call", "source": "codesearchnet"}
{"code": "def destroy(ads):\n    for ad in ads:\n        try:\n            ad.services.stop_all()\n        except:\n            ad.log.exception('Failed to clean up properly.')", "docstring": "Cleans up AndroidDevice objects.\n\nArgs:\nads: A list of AndroidDevice objects.", "source": "codesearchnet"}
{"code": "def _netsh_file(content):\n    with tempfile.NamedTemporaryFile(mode='w', prefix='salt-', suffix='.netsh', delete=False) as fp:\n        fp.write(content)\n    try:\n        log.debug('%s:\\n%s', fp.name, content)\n        return salt.modules.cmdmod.run('netsh -f {0}'.format(fp.name), python_shell=True)\n    finally:\n        os.remove(fp.name)", "docstring": "helper function to get the results of ``netsh -f content.txt``\n\nRunning ``netsh`` will drop you into a ``netsh`` prompt where you can issue\n``netsh`` commands. You can put a series of commands in an external file and\nrun them as if from a ``netsh`` prompt using the ``-f`` switch. That's what\nthis function does.\n\nArgs:\n\ncontent (str):\nThe contents of the file that will be run by the ``netsh -f``\ncommand\n\nReturns:\nstr: The text returned by the netsh command", "source": "codesearchnet"}
{"code": "def _add_imports_to_env(self, raw_api):\n    for (namespace, desc) in raw_api:\n        for item in desc:\n            if isinstance(item, AstImport):\n                if (namespace.name == item.target):\n                    raise InvalidSpec('Cannot import current namespace.', item.lineno, item.path)\n                if (item.target not in self.api.namespaces):\n                    raise InvalidSpec(('Namespace %s is not defined in any spec.' % quote(item.target)), item.lineno, item.path)\n                env = self._get_or_create_env(namespace.name)\n                imported_env = self._get_or_create_env(item.target)\n                if (namespace.name in imported_env):\n                    raise InvalidSpec(('Circular import of namespaces %s and %s detected.' % (quote(namespace.name), quote(item.target))), item.lineno, item.path)\n                env[item.target] = imported_env", "docstring": "Scans raw parser output for import declarations. Checks if the imports\nare valid, and then creates a reference to the namespace in the\nenvironment.\n\nArgs:\nraw_api (Tuple[Namespace, List[stone.stone.parser._Element]]):\nNamespace paired with raw parser output.", "source": "codesearchnet"}
{"code": "def register(self, name):\n\n    def register_func(func):\n        self.store[name] = func\n        return func\n    return register_func", "docstring": "Decorator for registering a function with PyPhi.\n\nArgs:\nname (string): The name of the function", "source": "codesearchnet"}
{"code": "def __init__(self, type_enum):\n        \n        \n        \n        type_enum = int(type_enum)\n        if (\n            type_enum not in types_pb2.DataType.values()\n            or type_enum == types_pb2.DT_INVALID\n        ):\n            raise TypeError(\n                \"type_enum is not a valid types_pb2.DataType: %s\" % type_enum\n            )\n        self._type_enum = type_enum", "docstring": "Creates a new `DataType`.\n\nNOTE(mrry): In normal circumstances, you should not need to\nconstruct a `DataType` object directly. Instead, use the\n`tf.as_dtype()` function.\n\nArgs:\ntype_enum: A `types_pb2.DataType` enum value.\n\nRaises:\nTypeError: If `type_enum` is not a value `types_pb2.DataType`.", "source": "juraj-google-style"}
{"code": "def config_cmd_handler(conf, config='config'):\n    \n    if conf[config].create or conf[config].update:\n        conf.create_config_(update=conf[config].update)\n    if conf[config].create_local:\n        conf.create_config_(index=-1, update=conf[config].update)\n    if conf[config].edit:\n        if not conf.config_files_[0].is_file():\n            conf.create_config_(update=conf[config].update)\n        subprocess.call(shlex.split('{} {}'.format(conf[config].editor,\n                                                   conf.config_files_[0])))", "docstring": "Implement the behavior of a subcmd using config_conf_section\n\nArgs:\nconf (:class:`~loam.manager.ConfigurationManager`): it should contain a\nsection created with :func:`config_conf_section` function.\nconfig (str): name of the configuration section created with\n:func:`config_conf_section` function.", "source": "juraj-google-style"}
{"code": "def __contains__(self, id):\n        \n        try:\n            backend.spreadsheet(self._sheets, id)\n        except KeyError:\n            return False\n        else:\n            return True", "docstring": "Return if there is a spreadsheet with the given id.\n\nArgs:\nid (str): unique alphanumeric id of the spreadsheet\nReturns:\nbool: ``True`` if it can be fetched else ``False``", "source": "juraj-google-style"}
{"code": "def make_absolute(base, relative):\n        \n\n        \n        \n        while relative.startswith('/../') or relative.startswith('../'):\n            relative = relative[3:]\n\n            base_parsed = urlparse(base)\n            new_path = base_parsed.path.rsplit('/', 1)[0]\n            base_parsed = base_parsed._replace(path=new_path)\n            base = base_parsed.geturl()\n\n        return urljoin(base, relative)", "docstring": "Make the given (relative) URL absolute.\n\nArgs:\nbase (str): The absolute URL the relative url was found on.\nrelative (str): The (possibly relative) url to make absolute.\n\nReturns:\nstr: The absolute URL.", "source": "juraj-google-style"}
{"code": "def line_similarity(p1a, p1b, p2a, p2b, T=CLOSE_DISTANCE_THRESHOLD):\n    d = line_distance_similarity(p1a, p1b, p2a, p2b, T=T)\n    a = abs(angle_similarity(normalize(line(p1a, p1b)), normalize(line(p2a, p2b))))\n    return (d * a)", "docstring": "Similarity between two lines\n\nArgs:\np1a ([float, float]): x and y coordinates. Line A start\np1b ([float, float]): x and y coordinates. Line A end\np2a ([float, float]): x and y coordinates. Line B start\np2b ([float, float]): x and y coordinates. Line B end\nReturns:\nfloat: between 0 and 1. Where 1 is very similar and 0 is completely different", "source": "codesearchnet"}
{"code": "def account_displayed_op_only(self, is_true):\n    self._options['account_displayed_op_only'] = is_true\n    return self", "docstring": "Whether only account the statistics of displayed profiler nodes.\n\nArgs:\nis_true: If true, only account statistics of nodes eventually\ndisplayed by the outputs.\nOtherwise, a node's statistics are accounted by its parents\nas long as it's types match 'account_type_regexes', even if\nit is hidden from the output, say, by hide_name_regexes.\nReturns:\nself", "source": "github-repos"}
{"code": "def plot_soma3d(ax, soma, color=None, alpha=_ALPHA):\n    \n    color = _get_color(color, tree_type=NeuriteType.soma)\n\n    if isinstance(soma, SomaCylinders):\n        for start, end in zip(soma.points, soma.points[1:]):\n            common.plot_cylinder(ax,\n                                 start=start[COLS.XYZ], end=end[COLS.XYZ],\n                                 start_radius=start[COLS.R], end_radius=end[COLS.R],\n                                 color=color, alpha=alpha)\n    else:\n        common.plot_sphere(ax, center=soma.center[COLS.XYZ], radius=soma.radius,\n                           color=color, alpha=alpha)\n\n    \n    _update_3d_datalim(ax, soma)", "docstring": "Generates a 3d figure of the soma.\n\nArgs:\nax(matplotlib axes): on what to plot\nsoma(neurom.core.Soma): plotted soma\ncolor(str or None): Color of plotted values, None corresponds to default choice\nalpha(float): Transparency of plotted values", "source": "juraj-google-style"}
{"code": "def handle_length(schema, field, validator, parent_schema):\n    if isinstance(field, fields.String):\n        minKey = 'minLength'\n        maxKey = 'maxLength'\n    elif isinstance(field, (fields.List, fields.Nested)):\n        minKey = 'minItems'\n        maxKey = 'maxItems'\n    else:\n        raise ValueError('In order to set the Length validator for JSON schema, the field must be either a List or a String')\n    if validator.min:\n        schema[minKey] = validator.min\n    if validator.max:\n        schema[maxKey] = validator.max\n    if validator.equal:\n        schema[minKey] = validator.equal\n        schema[maxKey] = validator.equal\n    return schema", "docstring": "Adds validation logic for ``marshmallow.validate.Length``, setting the\nvalues appropriately for ``fields.List``, ``fields.Nested``, and\n``fields.String``.\n\nArgs:\nschema (dict): The original JSON schema we generated. This is what we\nwant to post-process.\nfield (fields.Field): The field that generated the original schema and\nwho this post-processor belongs to.\nvalidator (marshmallow.validate.Length): The validator attached to the\npassed in field.\nparent_schema (marshmallow.Schema): The Schema instance that the field\nbelongs to.\n\nReturns:\ndict: A, possibly, new JSON Schema that has been post processed and\naltered.\n\nRaises:\nValueError: Raised if the `field` is something other than\n`fields.List`, `fields.Nested`, or `fields.String`", "source": "codesearchnet"}
{"code": "def unpack(self, buff, offset=0):\n    super().unpack(buff, offset)\n    try:\n        self.oxm_field = self._unpack_oxm_field()\n    except ValueError as exception:\n        raise UnpackException(exception)\n    self.oxm_hasmask = ((self.oxm_field_and_mask & 1) == 1)\n    start = (offset + 4)\n    end = (start + self.oxm_length)\n    self.oxm_value = buff[start:end]", "docstring": "Unpack the buffer into a OxmTLV.\n\nArgs:\nbuff (bytes): The binary data to be unpacked.\noffset (int): If we need to shift the beginning of the data.", "source": "codesearchnet"}
{"code": "def enumerate(self: EventSetOrNode) -> EventSetOrNode:\n    from temporian.core.operators.enumerate import enumerate\n    return enumerate(self)", "docstring": "Create an `int64` feature with the ordinal position of each event in an\n[`EventSet`][temporian.EventSet].\n\nEach index group is enumerated independently.\n\nUsage:\n```python\n>>> a = tp.event_set(\n...    timestamps=[-1, 2, 3, 5, 0],\n...    features={\"cat\": [\"A\", \"A\", \"A\", \"A\", \"B\"]},\n...    indexes=[\"cat\"],\n... )\n>>> b = a.enumerate()\n>>> b\nindexes: [('cat', str_)]\nfeatures: [('enumerate', int64)]\nevents:\ncat=b'A' (4 events):\ntimestamps: [-1.  2.  3.  5.]\n'enumerate': [0 1 2 3]\ncat=b'B' (1 events):\ntimestamps: [0.]\n'enumerate': [0]\n...\n\n```\n\nReturns:\nEventSet with a single feature with each event's ordinal position in\nits index group.", "source": "github-repos"}
{"code": "def as_treemap(self):\n    if self._treemap_cache:\n        return self._treemap_cache\n    self._treemap_cache = treemap = TreeMap(self)\n    return treemap", "docstring": "Return the dependencies as a TreeMap.\n\nReturns:\nTreeMap: instance of TreeMap.", "source": "codesearchnet"}
{"code": "def AddUserAccount(self, user_account, session_identifier=CURRENT_SESSION):\n    \n    if session_identifier not in self._user_accounts:\n      self._user_accounts[session_identifier] = {}\n\n    user_accounts = self._user_accounts[session_identifier]\n    if user_account.identifier in user_accounts:\n      raise KeyError('User account: {0:s} already exists.'.format(\n          user_account.identifier))\n\n    user_accounts[user_account.identifier] = user_account", "docstring": "Adds an user account.\n\nArgs:\nuser_account (UserAccountArtifact): user account artifact.\nsession_identifier (Optional[str])): session identifier, where\nCURRENT_SESSION represents the active session.\n\nRaises:\nKeyError: if the user account already exists.", "source": "juraj-google-style"}
{"code": "def merge_corpora(cls, corpora):\n    ds = Corpus()\n    for merging_corpus in corpora:\n        ds.merge_corpus(merging_corpus)\n    return ds", "docstring": "Merge a list of corpora into one.\n\nArgs:\ncorpora (Iterable): An iterable of :py:class:`audiomate.corpus.CorpusView`.\n\nReturns:\nCorpus: A corpus with the data from all given corpora merged into one.", "source": "codesearchnet"}
{"code": "def unpack_message(buffer):\n    hdr_size = Header().get_size()\n    (hdr_buff, msg_buff) = (buffer[:hdr_size], buffer[hdr_size:])\n    header = Header()\n    header.unpack(hdr_buff)\n    message = new_message_from_header(header)\n    message.unpack(msg_buff)\n    return message", "docstring": "Unpack the whole buffer, including header pack.\n\nArgs:\nbuffer (bytes): Bytes representation of a openflow message.\n\nReturns:\nobject: Instance of openflow message.", "source": "codesearchnet"}
{"code": "def __init__(self, dfk, memoize=True, checkpoint={}):\n        \n        self.dfk = dfk\n        self.memoize = memoize\n\n        if self.memoize:\n            logger.info(\"App caching initialized\")\n            self.memo_lookup_table = checkpoint\n        else:\n            logger.info(\"App caching disabled for all apps\")\n            self.memo_lookup_table = {}", "docstring": "Initialize the memoizer.\n\nArgs:\n- dfk (DFK obj): The DFK object\n\nKWargs:\n- memoize (Bool): enable memoization or not.\n- checkpoint (Dict): A checkpoint loaded as a dict.", "source": "juraj-google-style"}
{"code": "def fields_equal(self, instance, fields_to_ignore=('id', 'change_date', 'changed_by')):\n    for field in self._meta.get_fields():\n        if ((not field.many_to_many) and (field.name not in fields_to_ignore)):\n            if (getattr(instance, field.name) != getattr(self, field.name)):\n                return False\n    return True", "docstring": "Compares this instance's fields to the supplied instance to test for equality.\nThis will ignore any fields in `fields_to_ignore`.\n\nNote that this method ignores many-to-many fields.\n\nArgs:\ninstance: the model instance to compare\nfields_to_ignore: List of fields that should not be compared for equality. By default\nincludes `id`, `change_date`, and `changed_by`.\n\nReturns: True if the checked fields are all equivalent, else False", "source": "codesearchnet"}
{"code": "def compare_forks(self, cur_fork_head, new_fork_head):\n        \n\n        \n        \n        if new_fork_head.consensus != b\"Devmode\":\n            raise \\\n                TypeError(\n                    'New fork head {} is not a DevMode block'.format(\n                        new_fork_head.identifier[:8]))\n\n        \n        \n        \n        \n        \n        if cur_fork_head.consensus != b\"Devmode\":\n            if new_fork_head.previous_block_id == cur_fork_head.identifier:\n                LOGGER.info(\n                    'Choose new fork %s: New fork head switches consensus to '\n                    'DevMode',\n                    new_fork_head.identifier[:8])\n                return True\n\n            raise \\\n                TypeError(\n                    'Trying to compare a DevMode block {} to a non-DevMode '\n                    'block {} that is not the direct predecessor'.format(\n                        new_fork_head.identifier[:8],\n                        cur_fork_head.identifier[:8]))\n\n        if new_fork_head.block_num == cur_fork_head.block_num:\n            cur_fork_hash = self.hash_signer_public_key(\n                cur_fork_head.header.signer_public_key,\n                cur_fork_head.header.previous_block_id)\n            new_fork_hash = self.hash_signer_public_key(\n                new_fork_head.header.signer_public_key,\n                new_fork_head.header.previous_block_id)\n\n            result = new_fork_hash < cur_fork_hash\n\n        else:\n            result = new_fork_head.block_num > cur_fork_head.block_num\n\n        return result", "docstring": "The longest chain is selected. If they are equal, then the hash\nvalue of the previous block id and publisher signature is computed.\nThe lowest result value is the winning block.\nArgs:\ncur_fork_head: The current head of the block chain.\nnew_fork_head: The head of the fork that is being evaluated.\nReturns:\nbool: True if choosing the new chain head, False if choosing\nthe current chain head.", "source": "juraj-google-style"}
{"code": "def from_node(cls, node):\n        \n        if not isinstance(node, aioxmpp.stanza.Message):\n            raise AttributeError(\"node must be a aioxmpp.stanza.Message instance\")\n        msg = cls()\n        msg._to = node.to\n        msg._sender = node.from_\n        if None in node.body:\n            msg.body = node.body[None]\n        else:\n            for key in node.body.keys():\n                msg.body = node.body[key]\n                break\n\n        for data in node.xep0004_data:\n            if data.title == SPADE_X_METADATA:\n                for field in data.fields:\n                    if field.var != \"_thread_node\":\n                        msg.set_metadata(field.var, field.values[0])\n                    else:\n                        msg.thread = field.values[0]\n\n        return msg", "docstring": "Creates a new spade.message.Message from an aixoxmpp.stanza.Message\n\nArgs:\nnode (aioxmpp.stanza.Message): an aioxmpp Message\n\nReturns:\nspade.message.Message: a new spade Message", "source": "juraj-google-style"}
{"code": "def backward(ctx, grad_output):\n    args = ctx.saved_tensors\n    grad_fn = ctx.grad_fn\n    if grad_fn is None:\n        raise ValueError('grad_fn must be provided for custom gradient')\n    grads = grad_fn(*args, upstream=grad_output)\n    if not isinstance(grads, tuple):\n        grads = (grads,)\n    return (None,) + grads", "docstring": "Backward pass computation specification.\n\nArgs:\nctx: Context object.\ngrad_output: Gradient with respect to the output.", "source": "github-repos"}
{"code": "def _tensor_product(self, other, reverse=False):\n        \n        \n        if not isinstance(other, Stinespring):\n            other = Stinespring(other)\n\n        \n        sa_l, sa_r = self._data\n        sb_l, sb_r = other._data\n\n        \n        din_a, dout_a = self.dim\n        din_b, dout_b = other.dim\n        dtr_a = sa_l.shape[0] \n        dtr_b = sb_l.shape[0] \n        if reverse:\n            shape_in = (dout_b, dtr_b, dout_a, dtr_a, din_b * din_a)\n            shape_out = (dout_b * dtr_b * dout_a * dtr_a, din_b * din_a)\n        else:\n            shape_in = (dout_a, dtr_a, dout_b, dtr_b, din_a * din_b)\n            shape_out = (dout_a * dtr_a * dout_b * dtr_b, din_a * din_b)\n\n        \n        if reverse:\n            input_dims = self.input_dims() + other.input_dims()\n            output_dims = self.output_dims() + other.output_dims()\n            sab_l = np.kron(sb_l, sa_l)\n        else:\n            input_dims = other.input_dims() + self.input_dims()\n            output_dims = other.output_dims() + self.output_dims()\n            sab_l = np.kron(sa_l, sb_l)\n        \n        sab_l = np.reshape(\n            np.transpose(np.reshape(sab_l, shape_in), (0, 2, 1, 3, 4)),\n            shape_out)\n\n        \n        if sa_r is None and sb_r is None:\n            sab_r = None\n        else:\n            if sa_r is None:\n                sa_r = sa_l\n            elif sb_r is None:\n                sb_r = sb_l\n            if reverse:\n                sab_r = np.kron(sb_r, sa_r)\n            else:\n                sab_r = np.kron(sa_r, sb_r)\n            \n            sab_r = np.reshape(\n                np.transpose(np.reshape(sab_r, shape_in), (0, 2, 1, 3, 4)),\n                shape_out)\n        return Stinespring((sab_l, sab_r), input_dims, output_dims)", "docstring": "Return the tensor product channel.\n\nArgs:\nother (QuantumChannel): a quantum channel subclass.\nreverse (bool): If False return self ⊗ other, if True return\nif True return (other ⊗ self) [Default: False]\nReturns:\nStinespring: the tensor product channel as a Stinespring object.\n\nRaises:\nQiskitError: if other cannot be converted to a channel.", "source": "juraj-google-style"}
{"code": "def _controller_name(self, objtype):\n    if objtype.endswith('y'):\n        return (objtype[:(- 1)] + 'ies')\n    if ((objtype[(- 1)] in 'sx') or (objtype[(- 2):] in ['sh', 'ch'])):\n        return (objtype + 'es')\n    if objtype.endswith('an'):\n        return (objtype[:(- 2)] + 'en')\n    return (objtype + 's')", "docstring": "Determines the controller name for the object's type\n\nArgs:\nobjtype (str): The object type\n\nReturns:\nA string with the controller name", "source": "codesearchnet"}
{"code": "def select_cross_device_ops(devices, session_config=None):\n    requested_devices = set((device_util.canonicalize(d) for d in devices))\n    if ops.executing_eagerly_outside_functions():\n        logical_gpus = context.context().list_logical_devices(device_type='GPU')\n        physical_gpus = context.context().list_physical_devices(device_type='GPU')\n        if len(logical_gpus) != len(physical_gpus):\n            logging.warning('NCCL is not supported when using virtual GPUs, fallingback to reduction to one device')\n            return ReductionToOneDevice()\n        machine_devices = context.context().list_logical_devices()\n    else:\n        machine_devices = device_lib.list_local_devices(session_config=session_config)\n    using_devices = set()\n    for d in machine_devices:\n        if device_util.canonicalize(d.name) in requested_devices:\n            using_devices.add(d.name)\n    if len(using_devices) != len(requested_devices):\n        logging.warning('Some requested devices in `tf.distribute.Strategy` are not visible to TensorFlow: %s', ','.join(list(requested_devices - using_devices)))\n    if any(('gpu' not in d.lower() for d in requested_devices)):\n        logging.warning('There are non-GPU devices in `tf.distribute.Strategy`, not using nccl allreduce.')\n        return ReductionToOneDevice()\n    if kernels.get_registered_kernels_for_op('NcclAllReduce'):\n        return NcclAllReduce(num_packs=1)\n    else:\n        logging.warning('Nccl kernel is not found, not using nccl allreduce.')\n        return ReductionToOneDevice()", "docstring": "Find the best `CrossDeviceOps` locally given a `tf.compat.v1.ConfigProto`.\n\nArgs:\ndevices: a list of devices passed to `tf.distribute.Strategy`.\nsession_config: a `tf.compat.v1.ConfigProto` or `None`. If `None`, it will\nmake decision based on all logical devices.\n\nReturns:\nA subclass of `CrossDeviceOps`.", "source": "github-repos"}
{"code": "def trace_save_and_restore(obj):\n    legacy_name = saveable_compat.get_saveable_name(obj)\n    obj_save_fn = obj._serialize_to_tensors\n    obj_restore_fn = obj._restore_from_tensors\n    if isinstance(obj_save_fn, defun.ConcreteFunction):\n        concrete_save = obj_save_fn\n    else:\n\n        @def_function.function\n        def save_fn():\n            tensor_dict = obj_save_fn()\n            if any((isinstance(v, tensor_callable.Callable) for v in tensor_dict.values())):\n                raise NotImplementedError(f'Unable to export SavedModel with object of type {type(obj)} because it returns a Callable in `_serialize_to_tensors`. If you need this functionality please file a feature request.')\n            if legacy_name:\n                return {f'{legacy_name}{key}': value for key, value in tensor_dict.items()}\n            return tensor_dict\n        concrete_save = save_fn.get_concrete_function()\n    if isinstance(obj_restore_fn, defun.ConcreteFunction):\n        concrete_restore = obj_restore_fn\n    else:\n\n        @def_function.function\n        def restore_fn(restored_tensors):\n            if legacy_name:\n                restored_tensors = {key[len(legacy_name):]: value for key, value in restored_tensors.items()}\n            obj_restore_fn(restored_tensors)\n        concrete_restore = restore_fn.get_concrete_function(concrete_save.structured_outputs)\n    return (concrete_save, concrete_restore)", "docstring": "Traces `Trackable` serialize- and restore-from-tensors functions.\n\nArgs:\nobj: A `Trackable` object.\n\nReturns:\nA concrete Function.", "source": "github-repos"}
{"code": "def get_exception_handlers(node: astroid.node_classes.NodeNG, exception=Exception) -> List[astroid.ExceptHandler]:\n    context = find_try_except_wrapper_node(node)\n    if isinstance(context, astroid.TryExcept):\n        return [handler for handler in context.handlers if error_of_type(handler, exception)]\n    return None", "docstring": "Return the collections of handlers handling the exception in arguments.\n\nArgs:\nnode (astroid.NodeNG): A node that is potentially wrapped in a try except.\nexception (builtin.Exception or str): exception or name of the exception.\n\nReturns:\nlist: the collection of handlers that are handling the exception or None.", "source": "codesearchnet"}
{"code": "def _validate_alias_file_path(alias_file_path):\n    if (not os.path.exists(alias_file_path)):\n        raise CLIError(ALIAS_FILE_NOT_FOUND_ERROR)\n    if os.path.isdir(alias_file_path):\n        raise CLIError(ALIAS_FILE_DIR_ERROR.format(alias_file_path))", "docstring": "Make sure the alias file path is neither non-existant nor a directory\n\nArgs:\nThe alias file path to import aliases from.", "source": "codesearchnet"}
{"code": "def unpack(self, buff, offset=0):\n        \n        super().unpack(buff, offset)\n        self.wildcards = UBInt32(value=FlowWildCards.OFPFW_ALL,\n                                 enum_ref=FlowWildCards)\n        self.wildcards.unpack(buff, offset)", "docstring": "Unpack *buff* into this object.\n\nDo nothing, since the _length is already defined and it is just a Pad.\nKeep buff and offset just for compability with other unpack methods.\n\nArgs:\nbuff (bytes): Binary buffer.\noffset (int): Where to begin unpacking.\n\nRaises:\n:exc:`~.exceptions.UnpackException`: If unpack fails.", "source": "juraj-google-style"}
{"code": "def print_solution(model, solver):\n  \n  model_proto = model.Proto()\n  response_proto = solver.ResponseProto()\n  variables_in_objective_map = {}\n  maximization = False\n  if model_proto.HasField('objective'):\n    objective = model_proto.objective\n    for i in range(len(objective.vars)):\n      variables_in_objective_map[objective.vars[i]] = objective.coeffs[i]\n    if objective.scaling_factor < 0.0:\n      maximization = True\n  variable_assignments = []\n  variables_in_objective = []\n  num_vars = len(model_proto.variables)\n  for var_index in range(num_vars):\n    if not model_proto.variables[var_index].name:\n      continue\n    variable_name = model_proto.variables[var_index].name\n    if var_index in variables_in_objective_map:\n      coefficient = variables_in_objective_map[var_index]\n      if coefficient:\n        if maximization:\n          coefficient *= -1\n        if coefficient < 0:\n          variables_in_objective.append(' - {} * {}'.format(\n              -coefficient, variable_name))\n        elif coefficient > 0:\n          variables_in_objective.append(' + {} * {}'.format(\n              coefficient, variable_name))\n    variable_assignments.append('  {} = {}\\n'.format(\n        variable_name, response_proto.solution[var_index]))\n  print(''.join(variable_assignments), end='')\n  \n  if variables_in_objective and variables_in_objective[0][1] == '+':\n    variables_in_objective[0] = variables_in_objective[0][2:]\n  print('{}:{}'.format('Maximize' if maximization else 'Minimize',\n                       ''.join(variables_in_objective)))\n  print('Objective value: {}\\n'.format(solver.ObjectiveValue()))", "docstring": "Prints the solution associated with solver.\n\nIf solver has already had Solve() called on it, prints the solution. This\nincludes each variable and its assignment, along with the objective function\nand its optimal value.\nIf solver has not had Solve() called on it, or there is no feasible solution,\nthis will probably crash.\n\nArgs:\nmodel: A pywrapcp.CpModel object.\nsolver: A pywrapcp.CpSolver object.\n\nReturns:\nNothing, but prints the solution associated with solver.", "source": "juraj-google-style"}
{"code": "def get_field_to_observations_map(generator, query_for_tag=''):\n  \n\n  def increment(stat, event, tag=''):\n    assert stat in TRACKED_FIELDS\n    field_to_obs[stat].append(Observation(step=event.step,\n                                          wall_time=event.wall_time,\n                                          tag=tag)._asdict())\n\n  field_to_obs = dict([(t, []) for t in TRACKED_FIELDS])\n\n  for event in generator:\n    \n    if event.HasField('graph_def') and (not query_for_tag):\n      increment('graph', event)\n    if event.HasField('session_log') and (not query_for_tag):\n      status = event.session_log.status\n      if status == event_pb2.SessionLog.START:\n        increment('sessionlog:start', event)\n      elif status == event_pb2.SessionLog.STOP:\n        increment('sessionlog:stop', event)\n      elif status == event_pb2.SessionLog.CHECKPOINT:\n        increment('sessionlog:checkpoint', event)\n    elif event.HasField('summary'):\n      for value in event.summary.value:\n        if query_for_tag and value.tag != query_for_tag:\n          continue\n\n        for proto_name, display_name in SUMMARY_TYPE_TO_FIELD.items():\n          if value.HasField(proto_name):\n            increment(display_name, event, value.tag)\n  return field_to_obs", "docstring": "Return a field to `Observations` dict for the event generator.\n\nArgs:\ngenerator: A generator over event protos.\nquery_for_tag: A string that if specified, only create observations for\nevents with this tag name.\n\nReturns:\nA dict mapping keys in `TRACKED_FIELDS` to an `Observation` list.", "source": "juraj-google-style"}
{"code": "def _on_report(self, report, connection_id):\n        \n        self._logger.info('Received report: %s', str(report))\n        self._trigger_callback('on_report', connection_id, report)\n\n        return False", "docstring": "Callback function called when a report has been processed.\n\nArgs:\nreport (IOTileReport): The report object\nconnection_id (int): The connection id related to this report\n\nReturns:\n- True to indicate that IOTileReportParser should also keep a copy of the report\nor False to indicate it should delete it.", "source": "juraj-google-style"}
{"code": "def _MakeTimestamp(self, start=None, end=None):\n    mysql_unsigned_bigint_max = 18446744073709551615\n    ts_start = int((start or 0))\n    if (end is None):\n        ts_end = mysql_unsigned_bigint_max\n    else:\n        ts_end = int(end)\n    if ((ts_start == 0) and (ts_end == mysql_unsigned_bigint_max)):\n        return None\n    else:\n        return (ts_start, ts_end)", "docstring": "Create a timestamp using a start and end time.\n\nArgs:\nstart: Start timestamp.\nend: End timestamp.\n\nReturns:\nA tuple (start, end) of converted timestamps or None for all time.", "source": "codesearchnet"}
{"code": "def filter_publication(publication):\n    \n    if settings.USE_DUP_FILTER:\n        publication = dup_filter.filter_publication(publication)\n\n    if publication and settings.USE_ALEPH_FILTER:\n        publication = aleph_filter.filter_publication(\n            publication,\n            cmp_authors=settings.ALEPH_FILTER_BY_AUTHOR\n        )\n\n    return publication", "docstring": "Filter :class:`.Publication` objects using settings declared in\n:mod:`~harvester.settings` submodule.\n\nArgs:\npublication (obj): :class:`.Publication` instance.\n\nReturns:\nobj/None: None if the publication was found in Aleph or `publication` \\\nif not.", "source": "juraj-google-style"}
{"code": "def CreateAdsWithCustomizations(client, adgroup_ids, feed_name):\n  \n  \n  adgroup_ad_service = client.GetService('AdGroupAdService', 'v201809')\n\n  expanded_text_ad = {\n      'xsi_type': 'ExpandedTextAd',\n      'headlinePart1': 'Luxury Cruise to {=%s.Name}' % feed_name,\n      'headlinePart2': 'Only {=%s.Price}' % feed_name,\n      'description': 'Offer ends in {=countdown(%s.Date)}!' % feed_name,\n      'finalUrls': ['http:\n  }\n\n  \n  \n  operations = [{\n      'operator': 'ADD',\n      'operand': {\n          'adGroupId': adgroup,\n          'ad': expanded_text_ad\n      }\n  } for adgroup in adgroup_ids]\n\n  response = adgroup_ad_service.mutate(operations)\n\n  if response and 'value' in response:\n    for ad in response['value']:\n      print ('Created an ad with ID \"%s\", type \"%s\", and status \"%s\".'\n             % (ad['ad']['id'], ad['ad']['Ad.Type'], ad['status']))\n  else:\n    raise errors.GoogleAdsError('No ads were added.')", "docstring": "Creates ExpandedTextAds that use ad customizations for specified AdGroups.\n\nArgs:\nclient: an AdWordsClient instance.\nadgroup_ids: a list containing the AdGroup ids to add ExpandedTextAds to.\nfeed_name: the name of the feed used to apply customizations.\n\nRaises:\nGoogleAdsError: if no ExpandedTextAds were added.", "source": "juraj-google-style"}
{"code": "def ClientCertFromCSR(cls, csr):\n    \n    builder = x509.CertificateBuilder()\n    \n    \n    common_name = csr.GetCN()\n    serial = int(common_name.split(\".\")[1], 16)\n    builder = builder.serial_number(serial)\n    builder = builder.subject_name(\n        x509.Name(\n            [x509.NameAttribute(oid.NameOID.COMMON_NAME, str(common_name))]))\n\n    now = rdfvalue.RDFDatetime.Now()\n    now_plus_year = now + rdfvalue.Duration(\"52w\")\n    builder = builder.not_valid_after(now_plus_year.AsDatetime())\n    now_minus_ten = now - rdfvalue.Duration(\"10s\")\n    builder = builder.not_valid_before(now_minus_ten.AsDatetime())\n    \n    \n    \n    ca_cert = config_lib._CONFIG[\"CA.certificate\"]\n    \n    builder = builder.issuer_name(ca_cert.GetIssuer())\n    builder = builder.public_key(csr.GetPublicKey().GetRawPublicKey())\n\n    \n    \n    \n    ca_key = config_lib._CONFIG[\"PrivateKeys.ca_key\"]\n    \n\n    return RDFX509Cert(\n        builder.sign(\n            private_key=ca_key.GetRawPrivateKey(),\n            algorithm=hashes.SHA256(),\n            backend=openssl.backend))", "docstring": "Creates a new cert for the given common name.\n\nArgs:\ncsr: A CertificateSigningRequest.\n\nReturns:\nThe signed cert.", "source": "juraj-google-style"}
{"code": "def _set_value(instance_to_path_map, path_to_instance_map, prop_tree, config_instance):\n    path = instance_to_path_map[config_instance]\n    group = prop_tree\n    for elem in path[:(- 1)]:\n        group = getattr(group, elem)\n    assert (group._key == config_instance.parent.key)\n    setattr(group, config_instance.key, config_instance.value)\n    term = getattr(group, config_instance.key)\n    try:\n        if hasattr(term, '_term'):\n            term._term._config = config_instance\n            return\n    except KeyError:\n        pass\n    try:\n        if hasattr(term, '_config'):\n            term._config = config_instance\n            return\n    except KeyError:\n        pass\n    else:\n        pass", "docstring": "Finds appropriate term in the prop_tree and sets its value from config_instance.\n\nArgs:\nconfigs_map (dict): key is id of the config, value is Config instance (AKA cache of the configs)\nprop_tree (PropertyDictTree): poperty tree to populate.\nconfig_instance (Config):", "source": "codesearchnet"}
{"code": "def get_day_of_month(datestring):\n    get_day = re.compile('\\\\d{1,2}(st|nd|rd|th)?', re.IGNORECASE)\n    day = get_day.search(datestring)\n    the_day = None\n    if day:\n        if bool(re.search('[st|nd|rd|th]', day.group().lower())):\n            the_day = day.group()[:(- 2)]\n        else:\n            the_day = day.group()\n        if (int(the_day) < 10):\n            the_day = add_zero(the_day)\n    return str(the_day)", "docstring": "Transforms an ordinal number into plain number with padding zero.\n\nE.g. 3rd -> 03, or 12th -> 12\n\nKeyword arguments:\ndatestring -- a string\n\nReturns:\nString, or None if the transformation fails", "source": "codesearchnet"}
{"code": "def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):\n    if ((self._last_header > header_path) and Match('^\\\\s*\n        return False\n    return True", "docstring": "Check if a header is in alphabetical order with the previous header.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nheader_path: Canonicalized header to be checked.\n\nReturns:\nReturns true if the header is in alphabetical order.", "source": "codesearchnet"}
{"code": "def restore_server_connection(self, port=None):\n    try:\n        self.host_port = port\n        self._make_connection()\n    except Exception as e:\n        self.log.error('Failed to re-connect to the server.')\n        raise errors.ServerRestoreConnectionError(self._device, f'Failed to restore server connection for {self.package} at host port {self.host_port}, device port {self.device_port}.') from e\n    self._proc = None\n    self._restore_event_client()", "docstring": "Restores the server after the device got reconnected.\n\nInstead of creating a new instance of the client:\n- Uses the given port (or find a new available host port if none is\ngiven).\n- Tries to connect to the remote server with the selected port.\n\nArgs:\nport: int, if given, this is the host port from which to connect to the\nremote device port. If not provided, find a new available port as host\nport.\n\nRaises:\nerrors.ServerRestoreConnectionError: when failed to restore the connection\nto the snippet server.", "source": "github-repos"}
{"code": "def get_attachment_data(cls, session, attachment_id):\n        \n        return cls(\n            '/attachments/%d/data.json' % attachment_id,\n            singleton=True,\n            session=session,\n            out_type=AttachmentData,\n        )", "docstring": "Return a specific attachment's data.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nattachment_id (int): The ID of the attachment from which to get\ndata.\n\nReturns:\nhelpscout.models.AttachmentData: An attachment data singleton, if\nexisting. Otherwise ``None``.", "source": "juraj-google-style"}
{"code": "def recent(self, username, project, limit=1, offset=0, branch=None, status_filter=''):\n    method = 'GET'\n    if (branch is not None):\n        url = '/project/{username}/{project}/tree/{branch}?circle-token={token}&limit={limit}&offset={offset}&filter={status_filter}'.format(username=username, project=project, branch=branch, token=self.client.api_token, limit=limit, offset=offset, status_filter=status_filter)\n    else:\n        url = '/project/{username}/{project}?circle-token={token}&limit={limit}&offset={offset}&filter={status_filter}'.format(username=username, project=project, token=self.client.api_token, limit=limit, offset=offset, status_filter=status_filter)\n    json_data = self.client.request(method, url)\n    return json_data", "docstring": "Return status of recent builds for given project.\n\nRetrieves build statuses for given project and branch. If branch is\nNone it retrieves most recent build.\n\nArgs:\nusername (str): Name of the user.\nproject (str): Name of the project.\nlimit (int): Number of builds to return, default=1, max=100.\noffset (int): Returns builds starting from given offset.\nbranch (str): Optional branch name as string. If specified only\nbuilds from given branch are returned.\nstatus_filter (str): Restricts which builds are returned. Set to\n\"completed\", \"successful\", \"failed\", \"running\", or defaults\nto no filter.\n\nReturns:\nA list of dictionaries with information about each build.", "source": "codesearchnet"}
{"code": "def get_module(dir_path, relative_to_dir):\n    dir_path = dir_path[len(relative_to_dir):]\n    dir_path = dir_path.replace(os.sep, '/')\n    return dir_path.replace('/', '.').strip('.')", "docstring": "Get module that corresponds to path relative to relative_to_dir.\n\nArgs:\ndir_path: Path to directory.\nrelative_to_dir: Get module relative to this directory.\n\nReturns:\nName of module that corresponds to the given directory.", "source": "github-repos"}
{"code": "def extract_tree_with(self, labels, suppress_unifurcations=True):\n        \n        return self.extract_tree(labels, False, suppress_unifurcations)", "docstring": "Extract a copy of this ``Tree`` with only the leaves labeled by the strings in ``labels``\n\nArgs:\n``leaves`` (``set``): Set of leaf labels to include.\n\n``suppress_unifurcations`` (``bool``): ``True`` to suppress unifurcations, otherwise ``False``\n\nReturns:\nTree: Copy of this Tree, including only the leaves labeled by the strings in ``labels``", "source": "juraj-google-style"}
{"code": "def _fuse_awq_layernorm(fuse_module_names, module, target_cls):\n    for module_name in fuse_module_names:\n        if hasattr(module, module_name):\n            old_module = getattr(module, module_name)\n            module._modules[module_name] = target_cls(old_module.weight, old_module.variance_epsilon).to(old_module.weight.device)\n            del old_module", "docstring": "Fuse the LayerNorm layers into a target class using autoawq\n\nArgs:\nfuse_module_names (`List[str]`):\nThe list of module names to fuse\nmodule (`nn.Module`):\nThe pytorch parent module that has layernorm modules to fuse\ntarget_cls (`~autoawq.FasterTransformerRMSNorm`):\nThe `FasterTransformerRMSNorm` class as it only supports that class\nfor now.", "source": "github-repos"}
{"code": "def process_dimensions(kdims, vdims):\n    dimensions = {}\n    for (group, dims) in [('kdims', kdims), ('vdims', vdims)]:\n        if (dims is None):\n            continue\n        elif isinstance(dims, (tuple, basestring, Dimension, dict)):\n            dims = [dims]\n        elif (not isinstance(dims, list)):\n            raise ValueError(('%s argument expects a Dimension or list of dimensions, specified as tuples, strings, dictionaries or Dimension instances, not a %s type. Ensure you passed the data as the first argument.' % (group, type(dims).__name__)))\n        for dim in dims:\n            if (not isinstance(dim, (tuple, basestring, Dimension, dict))):\n                raise ValueError(('Dimensions must be defined as a tuple, string, dictionary or Dimension instance, found a %s type.' % type(dim).__name__))\n        dimensions[group] = [asdim(d) for d in dims]\n    return dimensions", "docstring": "Converts kdims and vdims to Dimension objects.\n\nArgs:\nkdims: List or single key dimension(s) specified as strings,\ntuples dicts or Dimension objects.\nvdims: List or single value dimension(s) specified as strings,\ntuples dicts or Dimension objects.\n\nReturns:\nDictionary containing kdims and vdims converted to Dimension\nobjects:\n\n{'kdims': [Dimension('x')], 'vdims': [Dimension('y')]", "source": "codesearchnet"}
{"code": "def l2_regularizer(weight=1.0, scope=None):\n\n    def regularizer(tensor):\n        with tf.name_scope(scope, 'L2Regularizer', [tensor]):\n            l2_weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='weight')\n            return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value')\n    return regularizer", "docstring": "Define a L2 regularizer.\n\nArgs:\nweight: scale the loss by this factor.\nscope: Optional scope for name_scope.\n\nReturns:\na regularizer function.", "source": "codesearchnet"}
{"code": "def set_commissions(self, fn):\n        \n        self.commission_fn = fn\n\n        for c in self._childrenv:\n            if isinstance(c, StrategyBase):\n                c.set_commissions(fn)", "docstring": "Set commission (transaction fee) function.\n\nArgs:\nfn (fn(quantity, price)): Function used to determine commission\namount.", "source": "juraj-google-style"}
{"code": "def grow(self, times=1):\n        \n        self.nodes.append([])\n\n        for n, node in enumerate(self.nodes[self.age]):\n            if self.age == 0:\n                p_node = Node(self.pos[:2])\n            else:\n                p_node = self._get_node_parent(self.age-1, n)\n            angle = node.get_node_angle(p_node)\n            for i in range(self.comp):\n                tot_angle = self.__get_total_angle(angle, i)\n                length = self.__get_total_length(self.age+1, i)\n                self.nodes[self.age+1].append(node.make_new_node(length, tot_angle))\n\n        self.age += 1\n\n        if times > 1:\n            self.grow(times-1)", "docstring": "Let the tree grow.\n\nArgs:\ntimes (integer): Indicate how many times the tree will grow.", "source": "juraj-google-style"}
{"code": "def validate_txn_obj(obj_name, obj, key, validation_fun):\n    \n    backend = bigchaindb.config['database']['backend']\n\n    if backend == 'localmongodb':\n        data = obj.get(key, {})\n        if isinstance(data, dict):\n            validate_all_keys_in_obj(obj_name, data, validation_fun)\n        elif isinstance(data, list):\n            validate_all_items_in_list(obj_name, data, validation_fun)", "docstring": "Validate value of `key` in `obj` using `validation_fun`.\n\nArgs:\nobj_name (str): name for `obj` being validated.\nobj (dict): dictionary object.\nkey (str): key to be validated in `obj`.\nvalidation_fun (function): function used to validate the value\nof `key`.\n\nReturns:\nNone: indicates validation successful\n\nRaises:\nValidationError: `validation_fun` will raise exception on failure", "source": "juraj-google-style"}
{"code": "def __init__(self, password, testnet=False):\n        \n        netcode = 'XTN' if testnet else 'BTC'\n        if isinstance(password, str):\n            password = password.encode()\n        self.wallet = BIP32Node.from_master_secret(password, netcode=netcode)\n        self.root_address = ('', self.wallet.address())", "docstring": "Initializes a BIP32 wallet.\n\nAddresses returned by the wallet are of the form ``(path, address)``.\n\nArgs:\npassword (bytes): Master secret for the wallet. The password can\nalso be passed as a string (``str``).\ntestnet (bool): Wwether to use the bitcoin testnet or mainnet.\nDefaults to ``False``.", "source": "juraj-google-style"}
{"code": "def get_default_session():\n    return _default_session_stack.get_default()", "docstring": "Returns the default session for the current thread.\n\nThe returned `Session` will be the innermost session on which a\n`Session` or `Session.as_default()` context has been entered.\n\nNOTE: The default session is a property of the current thread. If you\ncreate a new thread, and wish to use the default session in that\nthread, you must explicitly add a `with sess.as_default():` in that\nthread's function.\n\nReturns:\nThe default `Session` being used in the current thread.", "source": "github-repos"}
{"code": "def from_json(raw):\n    \n    ncls = None\n    _type = raw.get('type')\n    try:\n        ncls = _type_map[NodeType(_type)]\n    except (KeyError, ValueError) as e:\n        logger.warning('Unknown node type: %s', _type)\n        if DEBUG:\n            raise_from(exception.ParseException('Parse error for %s' % (_type), raw), e)\n        return None\n    node = ncls()\n    node.load(raw)\n\n    return node", "docstring": "Helper to construct a node from a dict.\n\nArgs:\nraw (dict): Raw node representation.\n\nReturns:\nNode: A Node object or None.", "source": "juraj-google-style"}
{"code": "def __init__(self, app_name, ad):\n    self.host_port = None\n    self.device_port = None\n    self.app_name = app_name\n    self._ad = ad\n    self.log = self._ad.log\n    self.uid = None\n    self._client = None\n    self._conn = None\n    self._counter = None\n    self._lock = threading.Lock()\n    self._event_client = None\n    self.verbose_logging = True", "docstring": "Args:\napp_name: (str) The user-visible name of the app being communicated\nwith.\nad: (AndroidDevice) The device object associated with a client.", "source": "github-repos"}
{"code": "async def get_movie(self, id_):\n    url = self.url_builder('movie/{movie_id}', dict(movie_id=id_), url_params=OrderedDict(append_to_response='credits'))\n    data = (await self.get_data(url))\n    if (data is None):\n        return\n    return Movie.from_json(data, self.config['data'].get('images'))", "docstring": "Retrieve movie data by ID.\n\nArguments:\nid_ (:py:class:`int`): The movie's TMDb ID.\n\nReturns:\n:py:class:`~.Movie`: The requested movie.", "source": "codesearchnet"}
{"code": "def queue_log_message(self, message: LogMessage) -> bool | Any:\n    return self._messages.push(message)", "docstring": "Add a log message to the log queue and attempt a flush.\n\nArgs:\n* message: LogMessage dictionary\n\nReturns:\n* True, if flushed with no errors\n* False, if not flushed\n* Error value from logger, if flushed with errors", "source": "github-repos"}
{"code": "def get_random_email(ltd=\"com\"):\n        \n\n        email = [\n            RandomInputHelper.get_random_value(6, [string.ascii_lowercase]),\n            \"@\",\n            RandomInputHelper.get_random_value(6, [string.ascii_lowercase]),\n            \".\",\n            ltd\n        ]\n\n        return \"\".join(email)", "docstring": "Get a random email address with the given ltd.\n\nArgs:\nltd (str): The ltd to use (e.g. com).\n\nReturns:\nstr: The random email.", "source": "juraj-google-style"}
{"code": "def plot_spectra_stacked(ss, title=None, num_rows=None, setup=_default_setup):\n    \n\n    draw_spectra_stacked(ss, title, num_rows, setup)\n    plt.show()", "docstring": "Plots one or more stacked in subplots sharing same x-axis.\n\nArgs:\nss: list of Spectrum objects\ntitle=None: window title\nnum_rows=None: (optional) number of rows for subplot grid. If not passed,\nnum_rows will be the number of plots, and the number of columns will be 1.\nIf passed, number of columns is calculated automatically.\nsetup: PlotSpectrumSetup object", "source": "juraj-google-style"}
{"code": "def __init__(self, vertical, cont, end):\n        \n        super(AbstractStyle, self).__init__()\n        self.vertical = vertical\n        self.cont = cont\n        self.end = end\n        assert (len(cont) == len(vertical) and len(cont) == len(end)), (\n            \"'%s', '%s' and '%s' need to have equal length\" % (vertical, cont,\n                                                               end))", "docstring": "Tree Render Style.\n\nArgs:\n\nvertical: Sign for vertical line.\n\ncont: Chars for a continued branch.\n\nend: Chars for the last branch.", "source": "juraj-google-style"}
{"code": "def eere_station(station_code):\n    with open((env.SRC_PATH + '/eere_meta.csv')) as eere_meta:\n        stations = csv.DictReader(eere_meta)\n        for station in stations:\n            if (station['station_code'] == station_code):\n                return station\n    raise KeyError('station not found')", "docstring": "Station information.\n\nArgs:\nstation_code (str): station code.\n\nReturns (dict): station information", "source": "codesearchnet"}
{"code": "def extract_cluster(self, target_sites, **kwargs):\n        \n        cluster = list(target_sites)\n        others = [site for site in self if site not in cluster]\n        size = 0\n        while len(cluster) > size:\n            size = len(cluster)\n            new_others = []\n            for site in others:\n                for site2 in cluster:\n                    if CovalentBond.is_bonded(site, site2, **kwargs):\n                        cluster.append(site)\n                        break\n                else:\n                    new_others.append(site)\n            others = new_others\n        return cluster", "docstring": "Extracts a cluster of atoms based on bond lengths\n\nArgs:\ntarget_sites ([Site]): List of initial sites to nucleate cluster.\n\\\\*\\\\*kwargs: kwargs passed through to CovalentBond.is_bonded.\n\nReturns:\n[Site/PeriodicSite] Cluster of atoms.", "source": "juraj-google-style"}
{"code": "def minutes(value: Union[int, float]) -> Duration:\n    return float(value * 60)", "docstring": "Converts input value from minutes to a `Duration` in seconds.\n\nExample:\n```python\n>>> timestamps = [tp.duration.minutes(i) for i in [5, 10, 30]]\n>>> timestamps\n[300.0, 600.0, 1800.0]\n\n>>> # Usage in a window operation\n>>> a = tp.event_set(timestamps=timestamps, features={\"f1\": [1, 5, -5]})\n>>> a.moving_sum(window_length=tp.duration.minutes(6))\nindexes: ...\ntimestamps: [ 300. 600. 1800.]\n'f1': [ 1 6 -5]\n...\n\n```\n\nArgs:\nvalue: Number of minutes.\n\nReturns:\nEquivalent number of seconds.", "source": "github-repos"}
{"code": "def _string_from_ip_int(self, ip_int=None):\n    if ((not ip_int) and (ip_int != 0)):\n        ip_int = int(self._ip)\n    if (ip_int > self._ALL_ONES):\n        raise ValueError('IPv6 address is too large')\n    hex_str = ('%032x' % ip_int)\n    hextets = []\n    for x in range(0, 32, 4):\n        hextets.append(('%x' % int(hex_str[x:(x + 4)], 16)))\n    hextets = self._compress_hextets(hextets)\n    return ':'.join(hextets)", "docstring": "Turns a 128-bit integer into hexadecimal notation.\n\nArgs:\nip_int: An integer, the IP address.\n\nReturns:\nA string, the hexadecimal representation of the address.\n\nRaises:\nValueError: The address is bigger than 128 bits of all ones.", "source": "codesearchnet"}
{"code": "def get_beam_typehints_from_tableschema(schema):\n    if not isinstance(schema, (bigquery.TableSchema, bigquery.TableFieldSchema)):\n        schema = get_bq_tableschema(schema)\n    typehints = []\n    for field in schema.fields:\n        name, field_type, mode = (field.name, field.type.upper(), field.mode.upper())\n        if field_type in ['STRUCT', 'RECORD']:\n            typehint = RowTypeConstraint.from_fields(get_beam_typehints_from_tableschema(field))\n        elif field_type in BIGQUERY_TYPE_TO_PYTHON_TYPE:\n            typehint = BIGQUERY_TYPE_TO_PYTHON_TYPE[field_type]\n        else:\n            raise ValueError(f'Converting BigQuery type [{field_type}] to Python Beam type is not supported.')\n        if mode == 'REPEATED':\n            typehint = Sequence[typehint]\n        elif mode != 'REQUIRED':\n            typehint = Optional[typehint]\n        typehints.append((name, typehint))\n    return typehints", "docstring": "Extracts Beam Python type hints from the schema.\n\nArgs:\nschema (~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema):\nThe TableSchema to extract type hints from.\n\nReturns:\nList[Tuple[str, Any]]: A list of type hints that describe the input schema.\nNested and repeated fields are supported.", "source": "github-repos"}
{"code": "def exit_handler(signum, frame):\n    LOGGER.debug('signal {} was caught'.format(signum))\n    sys.exit((128 + signum))", "docstring": "Catch SIGTERM and SIGHUP and call \"sys.exit\" which raises\n\"SystemExit\" exception.\nThis will trigger all the cleanup code defined in ContextManagers\nand \"finally\" statements.\n\nFor more details about the arguments see \"signal\" documentation.\n\nArgs:\nsignum(int): The signal's number\nframe(frame): The current stack frame, can be None", "source": "codesearchnet"}
{"code": "def max_consecutive_days(self) -> Optional[Tuple[(int, Interval)]]:\n    if (len(self.intervals) == 0):\n        return None\n    startdate = self.start_date()\n    enddate = self.end_date()\n    seq = ''\n    ndays = ((enddate - startdate).days + 1)\n    for i in range(ndays):\n        date = (startdate + datetime.timedelta(days=i))\n        wholeday = Interval.wholeday(date)\n        if any([x.overlaps(wholeday) for x in self.intervals]):\n            seq += '+'\n        else:\n            seq += ' '\n    longest = max(seq.split(), key=len)\n    longest_len = len(longest)\n    longest_idx = seq.index(longest)\n    longest_interval = Interval.dayspan((startdate + datetime.timedelta(days=longest_idx)), (startdate + datetime.timedelta(days=(longest_idx + longest_len))))\n    return (longest_len, longest_interval)", "docstring": "The length of the longest sequence of days in which all days include\nan interval.\n\nReturns:\ntuple:\n``(longest_length, longest_interval)`` where\n``longest_interval`` is a :class:`Interval` containing the\nstart and end date of the longest span -- or ``None`` if we\ncontain no intervals.", "source": "codesearchnet"}
{"code": "def load_model(model_cls_path, model_cls_name, model_load_args):\n    spec = importlib.util.spec_from_file_location('active_model', model_cls_path)\n    model_module = importlib.util.module_from_spec(spec)\n    spec.loader.exec_module(model_module)\n    model_cls = getattr(model_module, model_cls_name)\n    model = model_cls()\n    if (not isinstance(model, BaseModel)):\n        warnings.warn((\"Loaded model '%s' at '%s' is not an instance of %r\" % (model_cls_name, model_cls_path, BaseModel)))\n    model.load(**model_load_args)\n    return model", "docstring": "Get an instance of the described model.\n\nArgs:\nmodel_cls_path: Path to the module in which the model class\nis defined.\nmodel_cls_name: Name of the model class.\nmodel_load_args: Dictionary of args to pass to the `load` method\nof the model instance.\n\nReturns:\nAn instance of :class:`.models.model.BaseModel` or subclass", "source": "codesearchnet"}
{"code": "def find(self, package, **kwargs):\n    for finder in self.finders:\n        package_spec = finder.find(package, **kwargs)\n        if package_spec:\n            return package_spec\n    return None", "docstring": "Find a package using package finders.\n\nReturn the first package found.\n\nArgs:\npackage (str): package to find.\n**kwargs (): additional keyword arguments used by finders.\n\nReturns:\nPackageSpec: if package found, else None", "source": "codesearchnet"}
{"code": "def token_request(self, authorization_code):\n        \n        if not self._client.token_endpoint:\n            return None\n\n        request = {\n            'grant_type': 'authorization_code',\n            'code': authorization_code,\n            'redirect_uri': self._redirect_uri\n        }\n\n        logger.debug('making token request: %s', request)\n        client_auth_method = self._client.registration_response.get('token_endpoint_auth_method', 'client_secret_basic')\n        auth_header = _ClientAuthentication(self._client.client_id, self._client.client_secret)(client_auth_method,\n                                                                                                request)\n        resp = self._provider_configuration.requests_session \\\n            .post(self._client.token_endpoint,\n                  data=request,\n                  headers=auth_header) \\\n            .json()\n        logger.debug('received token response: %s', json.dumps(resp))\n\n        if 'error' in resp:\n            token_resp = TokenErrorResponse(**resp)\n        else:\n            token_resp = AccessTokenResponse(**resp)\n            token_resp.verify(keyjar=self._client.keyjar)\n            if 'id_token' in resp:\n                token_resp['id_token_jwt'] = resp['id_token']\n\n        return token_resp", "docstring": "Makes a token request.  If the 'token_endpoint' is not configured in the provider metadata, no request will\nbe made.\n\nArgs:\nauthorization_code (str): authorization code issued to client after user authorization\n\nReturns:\nUnion[AccessTokenResponse, TokenErrorResponse, None]: The parsed token response, or None if no token\nrequest was performed.", "source": "juraj-google-style"}
{"code": "def port_remove(br, port, if_exists=True):\n    param_if_exists = _param_if_exists(if_exists)\n    if (port and (not br)):\n        cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)\n    else:\n        cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)\n    result = __salt__['cmd.run_all'](cmd)\n    retcode = result['retcode']\n    return _retcode_to_bool(retcode)", "docstring": "Deletes port.\n\nArgs:\nbr: A string - bridge name (If bridge is None, port is removed from  whatever bridge contains it)\nport: A string - port name.\nif_exists: Bool, if False - attempting to delete a por that  does  not exist returns False. (Default True)\n\nReturns:\nTrue on success, else False.\n\n.. versionadded:: 2016.3.0\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.port_remove br0 8080", "source": "codesearchnet"}
{"code": "def from_file(xmu_dat_file='xmu.dat', feff_inp_file='feff.inp'):\n    data = np.loadtxt(xmu_dat_file)\n    header = Header.from_file(feff_inp_file)\n    parameters = Tags.from_file(feff_inp_file)\n    pots = Potential.pot_string_from_file(feff_inp_file)\n    if ('RECIPROCAL' in parameters):\n        absorbing_atom = parameters['TARGET']\n    else:\n        absorbing_atom = pots.splitlines()[3].split()[2]\n    return Xmu(header, parameters, absorbing_atom, data)", "docstring": "Get Xmu from file.\n\nArgs:\nxmu_dat_file (str): filename and path for xmu.dat\nfeff_inp_file (str): filename and path of feff.inp input file\n\nReturns:\nXmu object", "source": "codesearchnet"}
{"code": "def get_structure_from_canonical_name(self, structure_name):\n        \n        return next((st for st in self.structures if st.canonical_name == structure_name), None)", "docstring": "Return a structure from a canonical name\nArgs:\nstructure_name (str): canonical name of the structure\nReturns:\nStructure", "source": "juraj-google-style"}
{"code": "def update_missing_keys_after_loading(self, model, missing_keys: List[str], prefix: str) -> List[str]:\n    return missing_keys", "docstring": "Override this method if you want to adjust the `missing_keys` after loading the model params,\nbut before the model is post-processed.\n\nArgs:\nmissing_keys (`List[str]`, *optional*):\nThe list of missing keys in the checkpoint compared to the state dict of the model", "source": "github-repos"}
{"code": "def list_vms_sub(access_token, subscription_id):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/virtualMachines', '?api-version=', COMP_API])\n    return do_get_next(endpoint, access_token)", "docstring": "List VMs in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body of a list of VM model views.", "source": "codesearchnet"}
{"code": "def clean(self, force: bool=False):\n        \n        assert not self._closed\n\n        with (yield from self._host_pools_lock):\n            for key, pool in tuple(self._host_pools.items()):\n                yield from pool.clean(force=force)\n\n                if not self._host_pool_waiters[key] and pool.empty():\n                    del self._host_pools[key]\n                    del self._host_pool_waiters[key]", "docstring": "Clean all closed connections.\n\nArgs:\nforce: Clean connected and idle connections too.\n\nCoroutine.", "source": "juraj-google-style"}
{"code": "def frame(self, frame):\n        \n        try:\n            zframe = str(int(frame)).zfill(self._zfill)\n        except ValueError:\n            zframe = frame\n\n        \n        \n        \n\n        if self._zfill == 0:\n            zframe = \"\"\n\n        return \"\".join((self._dir, self._base, zframe, self._ext))", "docstring": "Return a path go the given frame in the sequence.  Integer or string\ndigits are treated as a frame number and padding is applied, all other\nvalues are passed though.\n\nExamples:\n>>> seq.frame(1)\n/foo/bar.0001.exr\n>>> seq.frame(\"#\")\n/foo/bar.#.exr\n\nArgs:\nframe (int or str): the desired frame number or a char to pass\nthrough (ie. #)\n\nReturns:\nstr:", "source": "juraj-google-style"}
{"code": "def disable_cudnn_autotune(func: _F) -> _F:\n\n    def decorated(*args, **kwargs):\n        original_tf_cudnn_use_autotune = os.environ.get('TF_CUDNN_USE_AUTOTUNE')\n        os.environ['TF_CUDNN_USE_AUTOTUNE'] = 'false'\n        original_xla_flags = os.environ.get('XLA_FLAGS')\n        new_xla_flags = '--xla_gpu_autotune_level=0'\n        if original_xla_flags:\n            new_xla_flags = original_xla_flags + ' ' + new_xla_flags\n        os.environ['XLA_FLAGS'] = new_xla_flags\n        result = func(*args, **kwargs)\n        if original_tf_cudnn_use_autotune is None:\n            del os.environ['TF_CUDNN_USE_AUTOTUNE']\n        else:\n            os.environ['TF_CUDNN_USE_AUTOTUNE'] = original_tf_cudnn_use_autotune\n        if original_xla_flags is None:\n            del os.environ['XLA_FLAGS']\n        else:\n            os.environ['XLA_FLAGS'] = original_xla_flags\n        return result\n    return tf_decorator.make_decorator(func, decorated)", "docstring": "Disable autotuning during the call to this function.\n\nSome tests want to base assertions on a graph being isomorphic with a copy.\nTo ensure this, this decorator disables autotuning.\n\nArgs:\nfunc: Function to run with CuDNN autotuning turned off.\n\nReturns:\nDecorated function.", "source": "github-repos"}
{"code": "def set_compare_custom_predict_fn(self, predict_fn):\n    self.delete('compare_estimator_and_spec')\n    self.store('compare_custom_predict_fn', predict_fn)\n    self.set_compare_inference_address('custom_predict_fn')\n    if (not self.has_compare_model_name()):\n        self.set_compare_model_name('2')\n    return self", "docstring": "Sets a second custom function for inference.\n\nIf you wish to compare the results of two models in WIT, use this method\nto setup the details of the second model.\n\nInstead of using TF Serving to host a model for WIT to query, WIT can\ndirectly use a custom function as the model to query. In this case, the\nprovided function should accept example protos and return:\n- For classification: A 2D list of numbers. The first dimension is for\neach example being predicted. The second dimension are the probabilities\nfor each class ID in the prediction.\n- For regression: A 1D list of numbers, with a regression score for each\nexample being predicted.\n\nArgs:\npredict_fn: The custom python function which will be used for model\ninference.\n\nReturns:\nself, in order to enabled method chaining.", "source": "codesearchnet"}
{"code": "def update_config(self, config, timeout=-1):\n        \n        return self._client.update(config, uri=self.URI + \"/config\", timeout=timeout)", "docstring": "Updates the remote server configuration and the automatic backup schedule for backup.\n\nArgs:\nconfig (dict): Object to update.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.\n\nReturns:\ndict: Backup details.", "source": "juraj-google-style"}
{"code": "def take_snapshot(self, snapshot_name, return_dict=True, power_off=False):\n    if ((power_off is True) and (self.status != 'off')):\n        action = self.power_off(return_dict=False)\n        action.wait()\n        self.load()\n    return self._perform_action({'type': 'snapshot', 'name': snapshot_name}, return_dict)", "docstring": "Take a snapshot!\n\nArgs:\nsnapshot_name (str): name of snapshot\n\nOptional Args:\nreturn_dict (bool): Return a dict when True (default),\notherwise return an Action.\npower_off (bool): Before taking the snapshot the droplet will be\nturned off with another API call. It will wait until the\ndroplet will be powered off.\n\nReturns dict or Action", "source": "codesearchnet"}
{"code": "def export(preprocessor: Union['PreTrainedTokenizer', 'FeatureExtractionMixin', 'ProcessorMixin'], model: Union['PreTrainedModel', 'TFPreTrainedModel'], config: OnnxConfig, opset: int, output: Path, tokenizer: Optional['PreTrainedTokenizer']=None, device: str='cpu') -> Tuple[List[str], List[str]]:\n    if not (is_torch_available() or is_tf_available()):\n        raise ImportError('Cannot convert because neither PyTorch nor TensorFlow are not installed. Please install torch or tensorflow first.')\n    if is_tf_available() and isinstance(model, TFPreTrainedModel) and (device == 'cuda'):\n        raise RuntimeError('`tf2onnx` does not support export on CUDA device.')\n    if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None:\n        raise ValueError('You cannot provide both a tokenizer and a preprocessor to export the model.')\n    if tokenizer is not None:\n        warnings.warn('The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use `preprocessor` instead.', FutureWarning)\n        logger.info('Overwriting the `preprocessor` argument with `tokenizer` to generate dummy inputs.')\n        preprocessor = tokenizer\n    if is_torch_available():\n        from ..utils import get_torch_version\n        if not config.is_torch_support_available:\n            logger.warning(f'Unsupported PyTorch version for this model. Minimum required is {config.torch_onnx_minimum_version}, got: {get_torch_version()}')\n    if is_torch_available() and issubclass(type(model), PreTrainedModel):\n        return export_pytorch(preprocessor, model, config, opset, output, tokenizer=tokenizer, device=device)\n    elif is_tf_available() and issubclass(type(model), TFPreTrainedModel):\n        return export_tensorflow(preprocessor, model, config, opset, output, tokenizer=tokenizer)", "docstring": "Export a Pytorch or TensorFlow model to an ONNX Intermediate Representation (IR)\n\nArgs:\npreprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]):\nThe preprocessor used for encoding the data.\nmodel ([`PreTrainedModel`] or [`TFPreTrainedModel`]):\nThe model to export.\nconfig ([`~onnx.config.OnnxConfig`]):\nThe ONNX configuration associated with the exported model.\nopset (`int`):\nThe version of the ONNX operator set to use.\noutput (`Path`):\nDirectory to store the exported ONNX model.\ndevice (`str`, *optional*, defaults to `cpu`):\nThe device on which the ONNX model will be exported. Either `cpu` or `cuda`. Only PyTorch is supported for\nexport on CUDA devices.\n\nReturns:\n`Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from\nthe ONNX configuration.", "source": "github-repos"}
{"code": "def GetMessageStrings(cls, formatter_mediator, event):\n    formatter_object = cls.GetFormatterObject(event.data_type)\n    return formatter_object.GetMessages(formatter_mediator, event)", "docstring": "Retrieves the formatted message strings for a specific event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions between\nformatters and other components, such as storage and Windows EventLog\nresources.\nevent (EventObject): event.\n\nReturns:\nlist[str, str]: long and short version of the message string.", "source": "codesearchnet"}
{"code": "def get_certificate(self, id):\n        \n        return Certificate.get_object(api_token=self.token, cert_id=id)", "docstring": "Returns a Certificate object by its ID.\n\nArgs:\nid (str): Certificate ID", "source": "juraj-google-style"}
{"code": "def get_object(cls, api_token, ip):\n    floating_ip = cls(token=api_token, ip=ip)\n    floating_ip.load()\n    return floating_ip", "docstring": "Class method that will return a FloatingIP object by its IP.\n\nArgs:\napi_token: str - token\nip: str - floating ip address", "source": "codesearchnet"}
{"code": "def ValidateToken(token, targets):\n  \n\n  def GetSubjectForError():\n    if len(targets) == 1:\n      return list(targets)[0]\n    else:\n      return None\n\n  \n  if not token:\n    raise access_control.UnauthorizedAccess(\n        \"Must give an authorization token for %s\" % targets,\n        subject=GetSubjectForError())\n\n  \n  token.CheckExpiry()\n\n  \n  if not token.username:\n    raise access_control.UnauthorizedAccess(\n        \"Must specify a username for access to %s.\" % targets,\n        subject=GetSubjectForError())\n\n  return True", "docstring": "Does basic token validation.\n\nArgs:\ntoken: User's credentials as access_control.ACLToken.\ntargets: List of targets that were meant to be accessed by the token. This\nis used for logging purposes only.\n\nReturns:\nTrue if token is valid.\n\nRaises:\naccess_control.UnauthorizedAccess: if token is not valid.\nValueError: if targets list is empty.", "source": "juraj-google-style"}
{"code": "def append_block(self, node, reverse=False):\n    if (not isinstance(node, grammar.STATEMENTS)):\n        raise ValueError\n    if reverse:\n        self.to_append_block[(- 1)].appendleft(node)\n    else:\n        self.to_append_block[(- 1)].append(node)", "docstring": "Append a statement to the current block.\n\nArgs:\nnode: The statement to prepend.\nreverse: When called multiple times, this flag determines whether the\nstatement should be prepended or appended to the already inserted\nstatements.\n\nRaises:\nValueError: If the given node is not a statement.", "source": "codesearchnet"}
{"code": "def BuildServiceStub(self, cls):\n\n    def _ServiceStubInit(stub, rpc_channel):\n        stub.rpc_channel = rpc_channel\n    self.cls = cls\n    cls.__init__ = _ServiceStubInit\n    for method in self.descriptor.methods:\n        setattr(cls, method.name, self._GenerateStubMethod(method))", "docstring": "Constructs the stub class.\n\nArgs:\ncls: The class that will be constructed.", "source": "codesearchnet"}
{"code": "def _EvaluateExpression(frame, expression):\n  \n  try:\n    code = compile(expression, '<watched_expression>', 'eval')\n  except (TypeError, ValueError) as e:\n    \n    return (False, {\n        'isError': True,\n        'refersTo': 'VARIABLE_NAME',\n        'description': {\n            'format': 'Invalid expression',\n            'parameters': [str(e)]}})\n  except SyntaxError as e:\n    return (False, {\n        'isError': True,\n        'refersTo': 'VARIABLE_NAME',\n        'description': {\n            'format': 'Expression could not be compiled: $0',\n            'parameters': [e.msg]}})\n\n  try:\n    return (True, native.CallImmutable(frame, code))\n  except BaseException as e:  \n    return (False, {\n        'isError': True,\n        'refersTo': 'VARIABLE_VALUE',\n        'description': {\n            'format': 'Exception occurred: $0',\n            'parameters': [str(e)]}})", "docstring": "Compiles and evaluates watched expression.\n\nArgs:\nframe: evaluation context.\nexpression: watched expression to compile and evaluate.\n\nReturns:\n(False, status) on error or (True, value) on success.", "source": "juraj-google-style"}
{"code": "def union(cls, *mhs):\n        \n        if len(mhs) < 2:\n            raise ValueError(\"Cannot union less than 2 MinHash\")\n        num_perm = len(mhs[0])\n        seed = mhs[0].seed\n        if any((seed != m.seed or num_perm != len(m)) for m in mhs):\n            raise ValueError(\"The unioning MinHash must have the\\\n                    same seed and number of permutation functions\")\n        hashvalues = np.minimum.reduce([m.hashvalues for m in mhs])\n        permutations = mhs[0].permutations\n        return cls(num_perm=num_perm, seed=seed, hashvalues=hashvalues,\n                permutations=permutations)", "docstring": "Create a MinHash which is the union of the MinHash objects passed as arguments.\n\nArgs:\n*mhs: The MinHash objects to be united. The argument list length is variable,\nbut must be at least 2.\n\nReturns:\ndatasketch.MinHash: A new union MinHash.", "source": "juraj-google-style"}
{"code": "def __init__(self, tcex, name):\n        \n        self._name = name\n        self._tcex = tcex\n        self._type = 'tags'\n        self._api_sub_type = None\n        self._api_type = None\n        self._api_entity = 'tag'\n\n        self._utils = TcExUtils()\n        self._tc_requests = TiTcRequest(self._tcex)", "docstring": "Initialize Class Properties.\n\nArgs:\ngroup_type (str): The ThreatConnect define Group type.\nname (str): The name for this Group.\nxid (str, kwargs): The external id for this Group.", "source": "juraj-google-style"}
{"code": "def set(msg_or_dict, key, value):\n    \n    \n    if not isinstance(msg_or_dict, (collections_abc.MutableMapping, message.Message)):\n        raise TypeError(\n            \"set() expected a dict or protobuf message, got {!r}.\".format(\n                type(msg_or_dict)\n            )\n        )\n\n    \n    basekey, subkey = _resolve_subkeys(key)\n\n    \n    \n    if subkey is not None:\n        if isinstance(msg_or_dict, collections_abc.MutableMapping):\n            msg_or_dict.setdefault(basekey, {})\n        set(get(msg_or_dict, basekey), subkey, value)\n        return\n\n    if isinstance(msg_or_dict, collections_abc.MutableMapping):\n        msg_or_dict[key] = value\n    else:\n        _set_field_on_message(msg_or_dict, key, value)", "docstring": "Set a key's value on a protobuf Message or dictionary.\n\nArgs:\nmsg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the\nobject.\nkey (str): The key to set.\nvalue (Any): The value to set.\n\nRaises:\nTypeError: If ``msg_or_dict`` is not a Message or dictionary.", "source": "juraj-google-style"}
{"code": "def _ParseEventData(self, variable_length_section):\n    event_data = WinJobEventData()\n    event_data.application = variable_length_section.application_name.rstrip('\\x00')\n    event_data.comment = variable_length_section.comment.rstrip('\\x00')\n    event_data.parameters = variable_length_section.parameters.rstrip('\\x00')\n    event_data.username = variable_length_section.author.rstrip('\\x00')\n    event_data.working_directory = variable_length_section.working_directory.rstrip('\\x00')\n    return event_data", "docstring": "Parses the event data form a variable-length data section.\n\nArgs:\nvariable_length_section (job_variable_length_data_section): a\nWindows Scheduled Task job variable-length data section.\n\nReturns:\nWinJobEventData: event data of the job file.", "source": "codesearchnet"}
{"code": "def convert_pytorch(nlp: Pipeline, opset: int, output: Path, use_external_format: bool):\n    if not is_torch_available():\n        raise Exception('Cannot convert because PyTorch is not installed. Please install torch first.')\n    import torch\n    from torch.onnx import export\n    print(f'Using framework PyTorch: {torch.__version__}')\n    with torch.no_grad():\n        input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, 'pt')\n        ordered_input_names, model_args = ensure_valid_input(nlp.model, tokens, input_names)\n        export(nlp.model, model_args, f=output.as_posix(), input_names=ordered_input_names, output_names=output_names, dynamic_axes=dynamic_axes, do_constant_folding=True, opset_version=opset)", "docstring": "Export a PyTorch backed pipeline to ONNX Intermediate Representation (IR\n\nArgs:\nnlp: The pipeline to be exported\nopset: The actual version of the ONNX operator set to use\noutput: Path where will be stored the generated ONNX model\nuse_external_format: Split the model definition from its parameters to allow model bigger than 2GB\n\nReturns:", "source": "github-repos"}
{"code": "def __init__(self, rots: Optional[Rotation], trans: Optional[torch.Tensor]):\n    batch_dims, dtype, device, requires_grad = (None, None, None, None)\n    if trans is not None:\n        batch_dims = trans.shape[:-1]\n        dtype = trans.dtype\n        device = trans.device\n        requires_grad = trans.requires_grad\n    elif rots is not None:\n        batch_dims = rots.shape\n        dtype = rots.dtype\n        device = rots.device\n        requires_grad = rots.requires_grad\n    else:\n        raise ValueError('At least one input argument must be specified')\n    if rots is None:\n        rots = Rotation.identity(batch_dims, dtype, device, requires_grad)\n    elif trans is None:\n        trans = identity_trans(batch_dims, dtype, device, requires_grad)\n    assert rots is not None\n    assert trans is not None\n    if rots.shape != trans.shape[:-1] or rots.device != trans.device:\n        raise ValueError('Rots and trans incompatible')\n    trans = trans.to(dtype=torch.float32)\n    self._rots = rots\n    self._trans = trans", "docstring": "Args:\nrots: A [*, 3, 3] rotation tensor\ntrans: A corresponding [*, 3] translation tensor", "source": "github-repos"}
{"code": "def __init__(self, current_frame):\n        \n        self.ignore_unknown_dtypes = False\n        self.meta_params = dict()\n        self.method_calling = inspect.getframeinfo(current_frame)[2]\n\n        _, _, __, self.vals_current = inspect.getargvalues(current_frame)\n        \n        if 'self' in self.vals_current:\n            self.recorded_class_type = self.vals_current['self']\n            \n            self.meta_params['AgentName'] = str(self.vals_current['self'])\n\n        frame_list = inspect.getouterframes(current_frame)\n\n        for frame in frame_list:\n            \n            args, varargs, keywords, vals = inspect.getargvalues(frame[0])\n            if 'self' in vals:\n                if self.recorded_class_type == vals['self']:\n                    for i in args:\n                        self.meta_params[i] = vals[i]\n        \n        del self.meta_params['self']", "docstring": "Init the MetaPrameterRecord with \"Agent\" parameters by passing inspect.currentframe() from Agent Class.\n\nThe Init will search back to find the parent class to capture all passed parameters and store\nthem in \"self.meta_params\".\n\nNOTE: Currently only optimized for TensorBoard output.\n\nTODO: Add JSON Export, TEXT EXPORT\n\nArgs:\ncurrent_frame: Frame value from class to obtain metaparameters[= inspect.currentframe()]", "source": "juraj-google-style"}
{"code": "def DeserializeFromDB(buffer):\n        \n        m = StreamManager.GetStream(buffer)\n        reader = BinaryReader(m)\n        c = ContractState()\n        c.Deserialize(reader)\n\n        StreamManager.ReleaseStream(m)\n\n        return c", "docstring": "Deserialize full object.\n\nArgs:\nbuffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from.\n\nReturns:\nContractState:", "source": "juraj-google-style"}
{"code": "def release_readme_verify():\n    version = '{version}'\n    expected = populate_readme(version, version, pypi='', pypi_img='', versions='\\n\\n', versions_img='', circleci_badge=CIRCLECI_BADGE_RELEASE, circleci_path='/{circleci_build}', travis_badge=TRAVIS_BADGE_RELEASE, travis_path='/builds/{travis_build}', appveyor_badge=APPVEYOR_BADGE_RELEASE, appveyor_path='/build/{appveyor_build}', coveralls_badge=COVERALLS_BADGE_RELEASE, coveralls_path='builds/{coveralls_build}')\n    with open(RELEASE_README_FILE, 'r') as file_obj:\n        contents = file_obj.read()\n    if (contents != expected):\n        err_msg = ('\\n' + get_diff(contents, expected, 'README.rst.release.actual', 'README.rst.release.expected'))\n        raise ValueError(err_msg)\n    else:\n        print('README.rst.release.template contents are as expected.')", "docstring": "Specialize the template to a PyPI release template.\n\nOnce populated, compare to ``README.rst.release.template``.\n\nRaises:\nValueError: If the current template doesn't agree with the expected\nvalue specialized from the template.", "source": "codesearchnet"}
{"code": "def fasta_files_equal(seq_file1, seq_file2):\n    seq1 = SeqIO.read(open(seq_file1), 'fasta')\n    seq2 = SeqIO.read(open(seq_file2), 'fasta')\n    if (str(seq1.seq) == str(seq2.seq)):\n        return True\n    else:\n        return False", "docstring": "Check equality of a FASTA file to another FASTA file\n\nArgs:\nseq_file1: Path to a FASTA file\nseq_file2: Path to another FASTA file\n\nReturns:\nbool: If the sequences are the same", "source": "codesearchnet"}
{"code": "def Close(self):\n    if (not self._is_open):\n        raise IOError('Storage file already closed.')\n    if (not self._read_only):\n        self._WriteSerializedAttributeContainerList(self._CONTAINER_TYPE_EVENT_SOURCE)\n        self._WriteSerializedAttributeContainerList(self._CONTAINER_TYPE_EVENT_DATA)\n        self._WriteSerializedAttributeContainerList(self._CONTAINER_TYPE_EVENT)\n        self._WriteSerializedAttributeContainerList(self._CONTAINER_TYPE_EVENT_TAG)\n        self._WriteSerializedAttributeContainerList(self._CONTAINER_TYPE_EXTRACTION_WARNING)\n    if self._connection:\n        self._connection.commit()\n        self._connection.close()\n        self._connection = None\n        self._cursor = None\n    self._is_open = False", "docstring": "Closes the storage.\n\nRaises:\nIOError: if the storage file is already closed.\nOSError: if the storage file is already closed.", "source": "codesearchnet"}
{"code": "def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n    input_height, input_width = get_image_size(image, channel_dim=input_data_format)\n    mask = np.zeros(output_size, dtype=np.int64)\n    mask[:input_height, :input_width] = 1\n    return mask", "docstring": "Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.\n\nArgs:\nimage (`np.ndarray`):\nImage to make the pixel mask for.\noutput_size (`Tuple[int, int]`):\nOutput size of the mask.", "source": "github-repos"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        super(MACSignatureKeyInformation, self).read(\n            input_stream,\n            kmip_version=kmip_version\n        )\n        local_stream = BytearrayStream(input_stream.read(self.length))\n\n        if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):\n            self._unique_identifier = primitives.TextString(\n                tag=enums.Tags.UNIQUE_IDENTIFIER\n            )\n            self._unique_identifier.read(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        else:\n            raise ValueError(\n                \"Invalid struct missing the unique identifier attribute.\"\n            )\n\n        if self.is_tag_next(\n                enums.Tags.CRYPTOGRAPHIC_PARAMETERS,\n                local_stream\n        ):\n            self._cryptographic_parameters = CryptographicParameters()\n            self._cryptographic_parameters.read(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        self.is_oversized(local_stream)", "docstring": "Read the data encoding the MACSignatureKeyInformation struct and\ndecode it into its constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def user(self, user: str) -> \"ChildHTTPAPI\":\n        \n        if self.is_real_user:\n            raise ValueError(\"Can't get child of real user\")\n\n        try:\n            return self.children[user]\n        except KeyError:\n            child = ChildHTTPAPI(user, self)\n            self.children[user] = child\n            return child", "docstring": "Get a child HTTPAPI instance.\n\nArgs:\nuser: The Matrix ID of the user whose API to get.\n\nReturns:\nA HTTPAPI instance that always uses the given Matrix ID.", "source": "juraj-google-style"}
{"code": "def check_cache(resource_type):\n\n    def decorator(func):\n\n        @functools.wraps(func)\n        def wrapper(*args, **kwargs):\n            try:\n                adapter = args[0]\n                (key, val) = list(kwargs.items())[0]\n            except IndexError:\n                logger.warning(\"Couldn't generate full index key, skipping cache\")\n            else:\n                index_key = (resource_type, key, val)\n                try:\n                    cached_record = adapter._swimlane.resources_cache[index_key]\n                except KeyError:\n                    logger.debug('Cache miss: `{!r}`'.format(index_key))\n                else:\n                    logger.debug('Cache hit: `{!r}`'.format(cached_record))\n                    return cached_record\n            return func(*args, **kwargs)\n        return wrapper\n    return decorator", "docstring": "Decorator for adapter methods to check cache for resource before normally sending requests to retrieve data\n\nOnly works with single kwargs, almost always used with @one_of_keyword_only decorator\n\nArgs:\nresource_type (type(APIResource)): Subclass of APIResource of cache to be checked when called", "source": "codesearchnet"}
{"code": "def impad(img, shape, pad_val=0):\n    if (not isinstance(pad_val, (int, float))):\n        assert (len(pad_val) == img.shape[(- 1)])\n    if (len(shape) < len(img.shape)):\n        shape = (shape + (img.shape[(- 1)],))\n    assert (len(shape) == len(img.shape))\n    for i in range((len(shape) - 1)):\n        assert (shape[i] >= img.shape[i])\n    pad = np.empty(shape, dtype=img.dtype)\n    pad[...] = pad_val\n    pad[(:img.shape[0], :img.shape[1], ...)] = img\n    return pad", "docstring": "Pad an image to a certain shape.\n\nArgs:\nimg (ndarray): Image to be padded.\nshape (tuple): Expected padding shape.\npad_val (number or sequence): Values to be filled in padding areas.\n\nReturns:\nndarray: The padded image.", "source": "codesearchnet"}
{"code": "def crop(img, i, j, h, w):\n    \n    if not _is_pil_image(img):\n        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n    return img.crop((j, i, j + w, i + h))", "docstring": "Crop the given PIL Image.\n\nArgs:\nimg (PIL Image): Image to be cropped.\ni (int): i in (i,j) i.e coordinates of the upper left corner.\nj (int): j in (i,j) i.e coordinates of the upper left corner.\nh (int): Height of the cropped image.\nw (int): Width of the cropped image.\n\nReturns:\nPIL Image: Cropped image.", "source": "juraj-google-style"}
{"code": "def sys_update_char(asciiCode: int, fontx: int, fonty: int, img: tcod.image.Image, x: int, y: int) -> None:\n    lib.TCOD_sys_update_char(_int(asciiCode), fontx, fonty, img, x, y)", "docstring": "Dynamically update the current font with img.\n\nAll cells using this asciiCode will be updated\nat the next call to :any:`tcod.console_flush`.\n\nArgs:\nasciiCode (int): Ascii code corresponding to the character to update.\nfontx (int): Left coordinate of the character\nin the bitmap font (in tiles)\nfonty (int): Top coordinate of the character\nin the bitmap font (in tiles)\nimg (Image): An image containing the new character bitmap.\nx (int): Left pixel of the character in the image.\ny (int): Top pixel of the character in the image.", "source": "codesearchnet"}
{"code": "def Equals(self, other):\n        \n        if other is None:\n            return False\n        if other.PrevHash.ToBytes() == self.PrevHash.ToBytes() and other.PrevIndex == self.PrevIndex:\n            return True\n        return False", "docstring": "Test for equality.\n\nArgs:\nother (obj):\n\nReturns:\nbool: True `other` equals self.", "source": "juraj-google-style"}
{"code": "def get_comments(self, sharekey=None):\n        \n        if not sharekey:\n            raise Exception(\n                \"You must specify a sharekey of the file you\"\n                \"want to 'like'.\")\n\n        endpoint = '/api/sharedfile/{0}/comments'.format(sharekey)\n\n        data = self._make_request(\"GET\", endpoint=endpoint)\n\n        return [Comment.NewFromJSON(c) for c in data['comments']]", "docstring": "Retrieve comments on a SharedFile\n\nArgs:\nsharekey (str): Sharekey for the file from which you want to return\nthe set of comments.\n\nReturns:\nList of Comment objects.", "source": "juraj-google-style"}
{"code": "def tokenize(self, text, never_split=None):\n    never_split = self.never_split.union(set(never_split)) if never_split else self.never_split\n    text = self._clean_text(text)\n    if self.tokenize_chinese_chars:\n        text = self._tokenize_chinese_chars(text)\n    unicode_normalized_text = unicodedata.normalize('NFC', text)\n    orig_tokens = whitespace_tokenize(unicode_normalized_text)\n    split_tokens = []\n    for token in orig_tokens:\n        if token not in never_split:\n            if self.do_lower_case:\n                token = token.lower()\n                if self.strip_accents is not False:\n                    token = self._run_strip_accents(token)\n            elif self.strip_accents:\n                token = self._run_strip_accents(token)\n        split_tokens.extend(self._run_split_on_punc(token, never_split))\n    output_tokens = whitespace_tokenize(' '.join(split_tokens))\n    return output_tokens", "docstring": "Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.\n\nArgs:\nnever_split (`List[str]`, *optional*)\nKept for backward compatibility purposes. Now implemented directly at the base class level (see\n[`PreTrainedTokenizer.tokenize`]) List of token not to split.", "source": "github-repos"}
{"code": "def upload_predictions(self, file_path, tournament=1):\n    self.logger.info('uploading predictions...')\n    auth_query = '\\n            query($filename: String!\\n                  $tournament: Int!) {\\n                submission_upload_auth(filename: $filename\\n                                       tournament: $tournament) {\\n                    filename\\n                    url\\n                }\\n            }\\n            '\n    arguments = {'filename': os.path.basename(file_path), 'tournament': tournament}\n    submission_resp = self.raw_query(auth_query, arguments, authorization=True)\n    submission_auth = submission_resp['data']['submission_upload_auth']\n    with open(file_path, 'rb') as fh:\n        requests.put(submission_auth['url'], data=fh.read())\n    create_query = '\\n            mutation($filename: String!\\n                     $tournament: Int!) {\\n                create_submission(filename: $filename\\n                                  tournament: $tournament) {\\n                    id\\n                }\\n            }\\n            '\n    arguments = {'filename': submission_auth['filename'], 'tournament': tournament}\n    create = self.raw_query(create_query, arguments, authorization=True)\n    self.submission_id = create['data']['create_submission']['id']\n    return self.submission_id", "docstring": "Upload predictions from file.\n\nArgs:\nfile_path (str): CSV file with predictions that will get uploaded\ntournament (int): ID of the tournament (optional, defaults to 1)\n\nReturns:\nstr: submission_id\n\nExample:\n>>> api = NumerAPI(secret_key=\"..\", public_id=\"..\")\n>>> api.upload_predictions()\n'93c46857-fed9-4594-981e-82db2b358daf'", "source": "codesearchnet"}
{"code": "def forward(self, outputs, targets):\n    batch_size, num_queries = outputs['logits'].shape[:2]\n    out_bbox = outputs['pred_boxes'].flatten(0, 1)\n    target_ids = torch.cat([v['class_labels'] for v in targets])\n    target_bbox = torch.cat([v['boxes'] for v in targets])\n    if self.use_focal_loss:\n        out_prob = F.sigmoid(outputs['logits'].flatten(0, 1))\n        out_prob = out_prob[:, target_ids]\n        neg_cost_class = (1 - self.alpha) * out_prob ** self.gamma * -(1 - out_prob + 1e-08).log()\n        pos_cost_class = self.alpha * (1 - out_prob) ** self.gamma * -(out_prob + 1e-08).log()\n        class_cost = pos_cost_class - neg_cost_class\n    else:\n        out_prob = outputs['logits'].flatten(0, 1).softmax(-1)\n        class_cost = -out_prob[:, target_ids]\n    bbox_cost = torch.cdist(out_bbox, target_bbox, p=1)\n    giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox))\n    cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost\n    cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()\n    sizes = [len(v['boxes']) for v in targets]\n    indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))]\n    return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]", "docstring": "Performs the matching\n\nParams:\noutputs: This is a dict that contains at least these entries:\n\"logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits\n\"pred_boxes\": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates\n\ntargets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:\n\"class_labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth\nobjects in the target) containing the class labels\n\"boxes\": Tensor of dim [num_target_boxes, 4] containing the target box coordinates\n\nReturns:\nA list of size batch_size, containing tuples of (index_i, index_j) where:\n- index_i is the indices of the selected predictions (in order)\n- index_j is the indices of the corresponding selected targets (in order)\nFor each batch element, it holds:\nlen(index_i) = len(index_j) = min(num_queries, num_target_boxes)", "source": "github-repos"}
{"code": "def __init__(self, lst):\n        \n\n        list.__init__(self, lst)\n        self.server = None\n        self.port = find_free_port()\n        self.html_path = get_cur_path()+'/data/math_list/index.html'", "docstring": "MathList Constructor\n\ntodo:: share a port among lists. Or maybe close the server after serving from it?\n\nArgs:\nlst (list):  A list of LaTeX math to be rendered by KaTeX\n\nReturns:\nA math list object\n\nUsage example\n>>> lst = [\"\\int x = y\", \"x + 6\"]\n>>> MathList(lst)\n... see nicely formatted math.", "source": "juraj-google-style"}
{"code": "def FlatMap(fn=identity, *args, **kwargs):\n    label = 'FlatMap(%s)' % ptransform.label_from_callable(fn)\n    if not callable(fn):\n        raise TypeError('FlatMap can be used only with callable objects. Received %r instead.' % fn)\n    pardo = ParDo(CallableWrapperDoFn(fn), *args, **kwargs)\n    pardo.label = label\n    return pardo", "docstring": ":func:`FlatMap` is like :class:`ParDo` except it takes a callable to\nspecify the transformation.\n\nThe callable must return an iterable for each element of the input\n:class:`~apache_beam.pvalue.PCollection`. The elements of these iterables will\nbe flattened into the output :class:`~apache_beam.pvalue.PCollection`. If\nno callable is given, then all elements of the input PCollection must already\nbe iterables themselves and will be flattened into the output PCollection.\n\nArgs:\nfn (callable): a callable object.\n*args: positional arguments passed to the transform callable.\n**kwargs: keyword arguments passed to the transform callable.\n\nReturns:\n~apache_beam.pvalue.PCollection:\nA :class:`~apache_beam.pvalue.PCollection` containing the\n:func:`FlatMap` outputs.\n\nRaises:\nTypeError: If the **fn** passed as argument is not a callable.\nTypical error is to pass a :class:`DoFn` instance which is supported only\nfor :class:`ParDo`.", "source": "github-repos"}
{"code": "def find_user(cls, session, mailbox, user):\n    return cls(('/mailboxes/%d/users/%s/conversations.json' % (mailbox.id, user.id)), session=session)", "docstring": "Return conversations for a specific user in a mailbox.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nmailbox (helpscout.models.Mailbox): Mailbox to search.\nuser (helpscout.models.User): User to search for.\n\nReturns:\nRequestPaginator(output_type=helpscout.models.Conversation):\nConversations iterator.", "source": "codesearchnet"}
{"code": "def create_single_fc_model(fingerprint_input, model_settings, is_training):\n    if is_training:\n        dropout_rate = tf.compat.v1.placeholder(tf.float32, name='dropout_rate')\n    fingerprint_size = model_settings['fingerprint_size']\n    label_count = model_settings['label_count']\n    weights = tf.compat.v1.get_variable(name='weights', initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.001), shape=[fingerprint_size, label_count])\n    bias = tf.compat.v1.get_variable(name='bias', initializer=tf.compat.v1.zeros_initializer, shape=[label_count])\n    logits = tf.matmul(fingerprint_input, weights) + bias\n    if is_training:\n        return (logits, dropout_rate)\n    else:\n        return logits", "docstring": "Builds a model with a single hidden fully-connected layer.\n\nThis is a very simple model with just one matmul and bias layer. As you'd\nexpect, it doesn't produce very accurate results, but it is very fast and\nsimple, so it's useful for sanity testing.\n\nHere's the layout of the graph:\n\n(fingerprint_input)\nv\n[MatMul]<-(weights)\nv\n[BiasAdd]<-(bias)\nv\n\nArgs:\nfingerprint_input: TensorFlow node that will output audio feature vectors.\nmodel_settings: Dictionary of information about the model.\nis_training: Whether the model is going to be used for training.\n\nReturns:\nTensorFlow node outputting logits results, and optionally a dropout\nplaceholder.", "source": "github-repos"}
{"code": "def list_files(root, suffix, prefix=False):\n    root = os.path.expanduser(root)\n    files = list(filter((lambda p: (os.path.isfile(os.path.join(root, p)) and p.endswith(suffix))), os.listdir(root)))\n    if (prefix is True):\n        files = [os.path.join(root, d) for d in files]\n    return files", "docstring": "List all files ending with a suffix at a given root\n\nArgs:\nroot (str): Path to directory whose folders need to be listed\nsuffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').\nIt uses the Python \"str.endswith\" method and is passed directly\nprefix (bool, optional): If true, prepends the path to each result, otherwise\nonly returns the name of the files found", "source": "codesearchnet"}
{"code": "def _kl_dirichlet_dirichlet(d1, d2, name=None):\n    with ops.name_scope(name, 'kl_dirichlet_dirichlet', values=[d1.concentration, d2.concentration]):\n        digamma_sum_d1 = math_ops.digamma(math_ops.reduce_sum(d1.concentration, axis=-1, keepdims=True))\n        digamma_diff = math_ops.digamma(d1.concentration) - digamma_sum_d1\n        concentration_diff = d1.concentration - d2.concentration\n        return math_ops.reduce_sum(concentration_diff * digamma_diff, axis=-1) - special_math_ops.lbeta(d1.concentration) + special_math_ops.lbeta(d2.concentration)", "docstring": "Batchwise KL divergence KL(d1 || d2) with d1 and d2 Dirichlet.\n\nArgs:\nd1: instance of a Dirichlet distribution object.\nd2: instance of a Dirichlet distribution object.\nname: (optional) Name to use for created operations.\ndefault is \"kl_dirichlet_dirichlet\".\n\nReturns:\nBatchwise KL(d1 || d2)", "source": "github-repos"}
{"code": "def delete(self, resource_id):\n        \n\n        endpoint = '{}/{}'.format(self.endpoint, resource_id)\n\n        response = self.api.execute(\"DELETE\", endpoint)\n\n        if not response.ok:\n            raise Error.parse(response.json())\n\n        return self._cls.parse(response.json())", "docstring": "Deletes an existing resource\n\nArgs:\nresource_id - int - The resource ID to be deleted", "source": "juraj-google-style"}
{"code": "def fetch_resource(url):\n    \n    try:\n        data = get_request(url)\n        lines = data.split('\\n')\n    except Exception as err:\n        raise err\n    \n    return lines", "docstring": "Fetch a resource and return the resulting lines in a list\nSend file_name to get more clean log messages\n\nArgs:\nurl(str)\n\nReturns:\nlines(list(str))", "source": "juraj-google-style"}
{"code": "def save(self, new_path=None):\n    self.saved_in_temp = (new_path is None)\n    if (new_path is None):\n        (fd, new_path) = tempfile.mkstemp()\n        os.close(fd)\n    if self.current_path:\n        shutil.move(self.current_path, new_path)\n    else:\n        with open(new_path, 'wb') as dest:\n            _copy_stream(self._data, dest, self._size)\n    self.current_path = new_path", "docstring": "Moves or creates the file with stream contents to a new location.\n\nArgs:\nnew_path: path to move to, if None a temporary file is created.", "source": "codesearchnet"}
{"code": "def __init__(self, expression, options):\n        \n        self._expression = expression\n        self._options = options", "docstring": "Initializes a new instance of the ExpressionParser class\nArgs:\nexpression: The cron expression string\noptions: Parsing options", "source": "juraj-google-style"}
{"code": "def update(self, **kwargs):\n        \n        updated = False\n\n        for prop in self.class_properties:\n            key = prop['key']\n            kwarg_key = to_camelcase(key)\n            if kwarg_key in kwargs:\n                if prop['required'] and not kwargs[kwarg_key]:\n                    raise InquisitorError('Missing required property {}'.format(prop['name']))\n\n                updated |= self.set_property(key, kwargs[kwarg_key])\n\n        return updated", "docstring": "Updates the object information based on live data, if there were any changes made. Any changes will be\nautomatically applied to the object, but will not be automatically persisted. You must manually call\n`db.session.add(object)` on the object.\n\nArgs:\n**kwargs (:obj:): AWS API Resource object fetched from AWS API\n\nReturns:\n`bool`", "source": "juraj-google-style"}
{"code": "def getPoly(rCut, nMax):\n    rCutVeryHard = (rCut + 5.0)\n    rx = ((0.5 * rCutVeryHard) * (x + 1))\n    basisFunctions = []\n    for i in range(1, (nMax + 1)):\n        basisFunctions.append((lambda rr, i=i, rCut=rCut: ((rCut - np.clip(rr, 0, rCut)) ** (i + 2))))\n    S = np.zeros((nMax, nMax))\n    for i in range(1, (nMax + 1)):\n        for j in range(1, (nMax + 1)):\n            S[((i - 1), (j - 1))] = ((2 * (rCut ** ((7 + i) + j))) / ((((5 + i) + j) * ((6 + i) + j)) * ((7 + i) + j)))\n    betas = sqrtm(np.linalg.inv(S))\n    if (betas.dtype == np.complex128):\n        raise ValueError('Could not calculate normalization factors for the polynomial basis in the domain of real numbers. Lowering the number of radial basis functions is advised.')\n    fs = np.zeros([nMax, len(x)])\n    for n in range(1, (nMax + 1)):\n        fs[((n - 1), :)] = ((rCut - np.clip(rx, 0, rCut)) ** (n + 2))\n    gss = np.dot(betas, fs)\n    return (nMax, rx, gss)", "docstring": "Used to calculate discrete vectors for the polynomial basis functions.\n\nArgs:\nrCut(float): Radial cutoff\nnMax(int): Number of polynomial radial functions", "source": "codesearchnet"}
{"code": "def while_loop(condition: Callable[..., Any], body: Callable[..., Any], inputs: Optional[List[Any]]=None, infeed_queue: Optional[tpu_feed.InfeedQueue]=None, name: Any=None) -> Any:\n    del name\n    inputs = [] if inputs is None else [ops.convert_to_tensor(x) for x in inputs]\n    input_types = [x.dtype for x in inputs]\n    input_arity = len(inputs)\n    body_arg_error = xla.check_function_argument_count(body, input_arity, infeed_queue)\n    if body_arg_error is not None:\n        if infeed_queue is None:\n            raise TypeError(f'Supplied loop body function cannot be called with the specified inputs. You specified {input_arity} inputs: {[i.name for i in inputs]}, but the loop body needs {body_arg_error}')\n        else:\n            raise TypeError(f'Supplied loop body function cannot be called with the specified inputs. You specified {input_arity} inputs: {[i.name for i in inputs]} and {infeed_queue.number_of_tuple_elements} additional inputs from infeed, but the computation needs {body_arg_error}')\n    condition_arg_error = xla.check_function_argument_count(condition, input_arity, None)\n    if condition_arg_error is not None:\n        if infeed_queue is None:\n            raise TypeError(f'Supplied loop condition function cannot be called with the specified inputs. You specified {input_arity} inputs: {[i.name for i in inputs]}, but the loop condition needs {condition_arg_error}')\n        else:\n            raise TypeError(f'Supplied loop condition function cannot be called with the specified inputs. You specified {input_arity} inputs: {[i.name for i in inputs]}, but the loop condition needs {condition_arg_error}. Note that infeed is not passed to the loop condition.')\n\n    def condition_wrapper(*inputs):\n        if input_arity == 0:\n            inputs = []\n        return condition(*inputs)\n\n    def body_wrapper(*inputs):\n        \n        inputs = list(inputs)\n        if input_arity == 0:\n            inputs = []\n        if infeed_queue:\n            number_of_shards = tpu_function.get_tpu_context().number_of_shards\n            if number_of_shards is None:\n                raise ValueError(\"Can't build training loop with infeed when there is no tpu_shard_context. Are you building a loop or graph directly rather than from inside tpu.rewrite, tpu.batch_parallel, tpu.shard, or tpu.replicate?\")\n            infeed_queue.set_number_of_shards(number_of_shards)\n            dequeue_ops = [d for d in infeed_queue.generate_dequeue_op()]\n        else:\n            dequeue_ops = []\n        outputs = body(*inputs + dequeue_ops)\n        if not isinstance(outputs, (list, tuple)):\n            outputs = (outputs,)\n        outputs = [o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o) for o in outputs]\n        output_operations = [o for o in outputs if isinstance(o, ops.Operation)]\n        output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)]\n        if outputs != output_tensors + output_operations:\n            raise ValueError('TPU training loop body must return zero or more Tensor values followed by zero or more Operations.')\n        output_types = [op.dtype for op in output_tensors]\n        if input_types != output_types:\n            raise TypeError('Mismatch between input types and output types for training loop body: {} vs {}'.format(input_types, output_types))\n        output_operations += dequeue_ops\n        if not output_tensors:\n            output_tensors = array_ops.constant(0)\n        if output_operations:\n            output_tensors = control_flow_ops.tuple(output_tensors, control_inputs=output_operations)\n        if tensor_tracer.TensorTracer.is_enabled():\n            num_replicas = tpu_function.get_tpu_context().number_of_shards\n            if num_replicas is None:\n                num_replicas = 1\n            tt = tensor_tracer.TensorTracer()\n            output_tensors = tt.trace_tpu(ops.get_default_graph(), output_tensors, None, num_replicas)\n        return output_tensors\n    if input_arity == 0:\n        inputs = [array_ops.constant(0)]\n    return while_loop_tf.while_loop(condition_wrapper, body_wrapper, inputs, name='', parallel_iterations=1)", "docstring": "Builds a training loop for TPUs.\n\nThe set of loop-carried tensors corresponds to `inputs`.  Both\n`condition` and `body` take the current value of the loop-carried\ntensors. 'body' additionally takes a tuple of infeed from\ninfeed_queue if infeed_queue is not None. `condition` must return a\nsingle boolean value that determines whether iteration\ncontinues. `body` must return an updated list of values for the\nloop-carried tensors.\n\nArgs:\ncondition: a Python function that builds the loop condition.\nbody: a Python function that builds the loop body.\ninputs: a list of initial values passed into the training loop, or None\n(equivalent to an empty list).\ninfeed_queue: if not None, the infeed queue from which to append a tuple of\narguments as inputs to condition.\nname: (Deprecated) Does nothing.\n\nReturns:\nThe final values of the loop-carried tensors.\n\nRaises:\nTypeError: if body or condition has the wrong signature.", "source": "github-repos"}
{"code": "def _send_trace(self, chunk=None):\n        \n\n        self._trace_sm_running = True\n        \n        if chunk is None:\n            chunk = self._next_tracing_chunk(20)\n\n        if chunk is None or len(chunk) == 0:\n            self._trace_sm_running = False\n            return\n\n        try:\n            self._send_notification(TracingChar.value_handle, chunk)\n            self._defer(self._send_trace)\n        except bable_interface.BaBLEException as err:\n            if err.packet.status == 'Rejected':  \n                time.sleep(0.05)\n                self._defer(self._send_trace, [chunk])\n            else:\n                self._audit('ErrorStreamingTrace')  \n                self._logger.exception(\"Error while tracing data\")", "docstring": "Stream tracing data to the ble client in 20 byte chunks\n\nArgs:\nchunk (bytearray): A chunk that should be sent instead of requesting a\nnew chunk from the pending reports.", "source": "juraj-google-style"}
{"code": "def create_token(self,\n                     token_name,\n                     project_name,\n                     dataset_name,\n                     is_public):\n        \n        return self.resources.create_token(token_name,\n                                           project_name,\n                                           dataset_name,\n                                           is_public)", "docstring": "Creates a token with the given parameters.\nArguments:\nproject_name (str): Project name\ndataset_name (str): Dataset name project is based on\ntoken_name (str): Token name\nis_public (int): 1 is public. 0 is not public\nReturns:\nbool: True if project created, false if not created.", "source": "juraj-google-style"}
{"code": "def _pyval_find_struct_keys_and_depth(pyval, keys):\n    if isinstance(pyval, dict):\n        keys.update(pyval.keys())\n        return 0\n    elif isinstance(pyval, (list, tuple)):\n        depth = None\n        for child in pyval:\n            child_depth = _pyval_find_struct_keys_and_depth(child, keys)\n            if child_depth is not None:\n                if depth is None:\n                    depth = child_depth + 1\n                elif depth != child_depth + 1:\n                    raise ValueError('Inconsistent depth of dictionaries')\n        return depth\n    else:\n        return None", "docstring": "Finds the keys & depth of nested dictionaries in `pyval`.\n\nArgs:\npyval: A nested structure of lists, tuples, and dictionaries.\nkeys: (output parameter) A set, which will be updated with any keys that are\nfound in the nested dictionaries.\n\nReturns:\nThe nesting depth of dictionaries in `pyval`, or `None` if `pyval` does\nnot contain any dictionaries.\nRaises:\nValueError: If dictionaries have inconsistent depth.", "source": "github-repos"}
{"code": "def repr(tick, pack=False):\n    if (tick == 9223372036854775807):\n        return '?'\n    dt = (datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=tick))\n    millis = (dt.microsecond / 1000)\n    if pack:\n        return ('%d%.2d%.2d%.2d%.2d%.2d%.3d' % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, millis))\n    return ('%d/%.2d/%.2d %.2d:%.2d:%.2d.%.3d' % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, millis))", "docstring": "Return a date string for an epoch-millis timestamp.\n\nArgs:\ntick (int): The timestamp in milliseconds since the epoch.\n\nReturns:\n(str):  A date time string", "source": "codesearchnet"}
{"code": "def _default_transform_fn(self, model, content, content_type, accept):\n    try:\n        data = self._input_fn(content, content_type)\n    except _errors.UnsupportedFormatError as e:\n        return self._error_response(e, http_client.UNSUPPORTED_MEDIA_TYPE)\n    prediction = self._predict_fn(data, model)\n    try:\n        result = self._output_fn(prediction, accept)\n    except _errors.UnsupportedFormatError as e:\n        return self._error_response(e, http_client.NOT_ACCEPTABLE)\n    return result", "docstring": "Make predictions against the model and return a serialized response.\n\nThis serves as the default implementation of transform_fn, used when the user has not\nimplemented one themselves.\n\nArgs:\nmodel (obj): model loaded by model_fn.\ncontent: request content.\ncontent_type (str): the request Content-Type.\naccept (str): accept content-type expected by the client.\n\nReturns:\nsagemaker_containers.beta.framework.worker.Response or tuple:\nthe serialized response data and its content type, either as a Response object or\na tuple of the form (response_data, content_type)", "source": "codesearchnet"}
{"code": "def get_vocabulary(preprocess_output_dir, name):\n    vocab_file = os.path.join(preprocess_output_dir, (CATEGORICAL_ANALYSIS % name))\n    if (not file_io.file_exists(vocab_file)):\n        raise ValueError(('File %s not found in %s' % ((CATEGORICAL_ANALYSIS % name), preprocess_output_dir)))\n    labels = python_portable_string(file_io.read_file_to_string(vocab_file)).split('\\n')\n    label_values = [x for x in labels if x]\n    return label_values", "docstring": "Loads the vocabulary file as a list of strings.\n\nArgs:\npreprocess_output_dir: Should contain the file CATEGORICAL_ANALYSIS % name.\nname: name of the csv column.\n\nReturns:\nList of strings.\n\nRaises:\nValueError: if file is missing.", "source": "codesearchnet"}
{"code": "def get_token(self,\n                  token_name,\n                  project_name,\n                  dataset_name):\n        \n        return self.resources.get_token(token_name,\n                                        project_name,\n                                        dataset_name)", "docstring": "Get a token with the given parameters.\nArguments:\nproject_name (str): Project name\ndataset_name (str): Dataset name project is based on\ntoken_name (str): Token name\nReturns:\ndict: Token info", "source": "juraj-google-style"}
{"code": "def trace_call(self, node, func, sigs, posargs, namedargs, result):\n    log.debug('Logging call to %r with %d args, return %r', func, len(posargs), result)\n    args = tuple(posargs)\n    kwargs = tuple((namedargs or {}).items())\n    record = _CallRecord(node, func, sigs, args, kwargs, result)\n    if isinstance(func.data, abstract.BoundPyTDFunction):\n        self._method_calls.add(record)\n    elif isinstance(func.data, abstract.PyTDFunction):\n        self._calls.add(record)", "docstring": "Add an entry into the call trace.\n\nArgs:\nnode: The CFG node right after this function call.\nfunc: A cfg.Binding of a function that was called.\nsigs: The signatures that the function might have been called with.\nposargs: The positional arguments, an iterable over cfg.Variable.\nnamedargs: The keyword arguments, a dict mapping str to cfg.Variable.\nresult: A Variable of the possible result values.", "source": "github-repos"}
{"code": "def ensuredir(dpath, mode=1023, verbose=None):\n    if (verbose is None):\n        verbose = 0\n    if isinstance(dpath, (list, tuple)):\n        dpath = join(*dpath)\n    if (not exists(dpath)):\n        if verbose:\n            print(('Ensuring new directory (%r)' % dpath))\n        if (sys.version_info.major == 2):\n            os.makedirs(normpath(dpath), mode=mode)\n        else:\n            os.makedirs(normpath(dpath), mode=mode, exist_ok=True)\n    elif verbose:\n        print(('Ensuring existing directory (%r)' % dpath))\n    return dpath", "docstring": "r\"\"\"\nEnsures that directory will exist. Creates new dir with sticky bits by\ndefault\n\nArgs:\ndpath (PathLike): dir to ensure. Can also be a tuple to send to join\nmode (int): octal mode of directory (default 0o1777)\nverbose (int): verbosity (default 0)\n\nReturns:\nPathLike: path: the ensured directory\n\nNotes:\nThis function is not thread-safe in Python2\n\nExample:\n>>> from ubelt.util_platform import *  # NOQA\n>>> import ubelt as ub\n>>> cache_dpath = ub.ensure_app_cache_dir('ubelt')\n>>> dpath = join(cache_dpath, 'ensuredir')\n>>> if exists(dpath):\n...     os.rmdir(dpath)\n>>> assert not exists(dpath)\n>>> ub.ensuredir(dpath)\n>>> assert exists(dpath)\n>>> os.rmdir(dpath)", "source": "codesearchnet"}
{"code": "def get_by_name(self, name):\n        \n        scopes = self._client.get_all()\n        result = [x for x in scopes if x['name'] == name]\n        return result[0] if result else None", "docstring": "Gets a Scope by name.\n\nArgs:\nname: Name of the Scope\n\nReturns:\ndict: Scope.", "source": "juraj-google-style"}
{"code": "def get_user(self, user_id):\n    try:\n        return get_user_model().objects.get(id=user_id)\n    except get_user_model().DoesNotExist:\n        return None", "docstring": "Get a user by their ID.\n\nArgs:\nuser_id:\nThe ID of the user to fetch.\n\nReturns:\nThe user with the specified ID if they exist and ``None``\notherwise.", "source": "codesearchnet"}
{"code": "def read_configuration_file(filepath=_give_default_file_path()):\n    \n    config = configparser.ConfigParser()\n    config.read(filepath)\n\n    def get_correct_type(section, key, config):\n        \n        def getstring(section, key, config):\n            return config[section][key]\n\n        def getinteger(section, key, config):  \n            return config[section].getint(key)\n\n        def getboolean(section, key, config):\n            return config[section].getboolean(key)\n\n        def getfloat(section, key, config):  \n            return config[section].getfloat(key)\n        special_actions = {}  \n        special_actions['defaults'] = {}\n        special_actions['defaults']['use_lookup'] = getboolean\n        try:\n            return special_actions[section][key](section, key, config)\n        except KeyError:\n            return getstring(section, key, config)\n\n    for section in config.sections():\n        for key in config[section]:\n            settings[section][key] = get_correct_type(section, key, config)\n    return settings", "docstring": "Read the configuration file.\n\n.. note:: This function changes ``cc.settings`` inplace and is\ntherefore not sideeffect free.\n\nArgs:\nfilepath (str): Where to read the file.\nThe default is under both UNIX and Windows ``~/.chemcoordrc``.\n\nReturns:\nNone:", "source": "juraj-google-style"}
{"code": "def which(program, path=None):\n    path = (path or os.environ['PATH'].split(os.pathsep))\n    abspath = (True if os.path.split(program)[0] else False)\n    if abspath:\n        if fs.isexe(program):\n            return program\n    else:\n        for directory in path:\n            directory = directory.strip('\"')\n            exe_file = os.path.join(directory, program)\n            if fs.isexe(exe_file):\n                return exe_file\n    return None", "docstring": "Returns the full path of shell commands.\n\nReplicates the functionality of system which (1) command. Looks\nfor the named program in the directories indicated in the $PATH\nenvironment variable, and returns the full path if found.\n\nExamples:\n\n>>> system.which(\"ls\")\n\"/bin/ls\"\n\n>>> system.which(\"/bin/ls\")\n\"/bin/ls\"\n\n>>> system.which(\"not-a-real-command\")\nNone\n\n>>> system.which(\"ls\", path=(\"/usr/bin\", \"/bin\"))\n\"/bin/ls\"\n\nArguments:\n\nprogram (str): The name of the program to look for. Can\nbe an absolute path.\npath (sequence of str, optional): A list of directories to\nlook for the pgoram in. Default value is system $PATH.\n\nReturns:\n\nstr: Full path to program if found, else None.", "source": "codesearchnet"}
{"code": "def check(self, dsm, **kwargs):\n        \n        layered_architecture = True\n        messages = []\n        categories = dsm.categories\n        dsm_size = dsm.size[0]\n\n        if not categories:\n            categories = ['appmodule'] * dsm_size\n\n        for i in range(0, dsm_size - 1):\n            for j in range(i + 1, dsm_size):\n                if (categories[i] != 'broker' and\n                        categories[j] != 'broker' and\n                        dsm.entities[i].split('.')[0] != dsm.entities[j].split('.')[0]):  \n                    if dsm.data[i][j] > 0:\n                        layered_architecture = False\n                        messages.append(\n                            'Dependency from %s to %s breaks the '\n                            'layered architecture.' % (\n                                dsm.entities[i], dsm.entities[j]))\n\n        return layered_architecture, '\\n'.join(messages)", "docstring": "Check layered architecture.\n\nArgs:\ndsm (:class:`DesignStructureMatrix`): the DSM to check.\n\nReturns:\nbool, str: True if layered architecture else False, messages", "source": "juraj-google-style"}
{"code": "def _build_rdf(self, data=None):\n    self.rdf = SimpleNamespace()\n    self.rdf.data = data\n    self.rdf.prefixes = SimpleNamespace()\n    self.rdf.uris = SimpleNamespace()\n    for (prefix, uri) in self.repo.context.items():\n        setattr(self.rdf.prefixes, prefix, rdflib.Namespace(uri))\n    self._parse_graph()", "docstring": "Parse incoming rdf as self.rdf.orig_graph, create copy at self.rdf.graph\n\nArgs:\ndata (): payload from GET request, expected RDF content in various serialization formats\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def Send(self, client_id, data):", "docstring": "Sends a data to a GUI client of |client_id|.\n\nArgs:\nclient_id: an opaque ID or object for a GUI client for the outgoing data\nor response. It must be gotten by callback call set by Start().\ndata: an outgoing byte stream data to a GUI client.\n\nRaises:\nIOError: Cannot send data to the GUI client.", "source": "github-repos"}
{"code": "def from_dict(cls, feature_extractor_dict: dict[str, Any], **kwargs) -> PreTrainedFeatureExtractor:\n    return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)\n    to_remove = []\n    for key, value in kwargs.items():\n        if key in feature_extractor_dict:\n            feature_extractor_dict[key] = value\n            to_remove.append(key)\n    for key in to_remove:\n        kwargs.pop(key, None)\n    feature_extractor = cls(**feature_extractor_dict)\n    logger.info(f'Feature extractor {feature_extractor}')\n    if return_unused_kwargs:\n        return (feature_extractor, kwargs)\n    else:\n        return feature_extractor", "docstring": "Instantiates a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a Python dictionary of\nparameters.\n\nArgs:\nfeature_extractor_dict (`Dict[str, Any]`):\nDictionary that will be used to instantiate the feature extractor object. Such a dictionary can be\nretrieved from a pretrained checkpoint by leveraging the\n[`~feature_extraction_utils.FeatureExtractionMixin.to_dict`] method.\nkwargs (`Dict[str, Any]`):\nAdditional parameters from which to initialize the feature extractor object.\n\nReturns:\n[`~feature_extraction_utils.FeatureExtractionMixin`]: The feature extractor object instantiated from those\nparameters.", "source": "github-repos"}
{"code": "def Deserialize(self, reader):\n        \n        self.Version = reader.ReadUInt32()\n        self.Services = reader.ReadUInt64()\n        self.Timestamp = reader.ReadUInt32()\n        self.Port = reader.ReadUInt16()\n        self.Nonce = reader.ReadUInt32()\n        self.UserAgent = reader.ReadVarString().decode('utf-8')\n        self.StartHeight = reader.ReadUInt32()\n        logger.debug(\"Version start height: T %s \" % self.StartHeight)\n        self.Relay = reader.ReadBool()", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def one_hot(indices, output_dim, on_value=1.0, off_value=0.0, dtype=tf.float32, name=None):\n    return OneHotOperation(indices, output_dim, on_value, off_value, dtype, name=name).outputs[0]", "docstring": "One hot operation.\n\nTODO(noam): Is there a good reason we need a special mtf.Operation here?\nWe could just use some code like this:\ncast(equal(indices, mtf_range(indices.mesh, output_dim, dtype=indices.dtype)),\ndtype)\n\nArgs:\nindices: a Tensor\noutput_dim: a Dimension\non_value: Value taken when indices are on at a location, default 1\noff_value: Value taken when indices are off at a location, default 0\ndtype: a tf.DType\nname: an optional string\nReturns:\na Tensor with shape extended by output_dim for the last axis.", "source": "codesearchnet"}
{"code": "def write(self, b):\n        \n        if not self._writable:\n            raise UnsupportedOperation('write')\n\n        \n        \n        \n        size = len(b)\n        with self._seek_lock:\n            start = self._seek\n            end = start + size\n            self._seek = end\n\n        buffer = self._write_buffer\n        if end <= len(buffer):\n            buffer = memoryview(buffer)\n        buffer[start:end] = b\n        return size", "docstring": "Write the given bytes-like object, b, to the underlying raw stream,\nand return the number of bytes written.\n\nArgs:\nb (bytes-like object): Bytes to write.\n\nReturns:\nint: The number of bytes written.", "source": "juraj-google-style"}
{"code": "def _infer_var_name(var):\n    name_to_var_dict = saveable_object_util.op_list_to_dict(var)\n    if len(name_to_var_dict) > 1:\n        raise TypeError('`var` = %s passed as arg violates the constraints.  name_to_var_dict = %s' % (var, name_to_var_dict))\n    return list(name_to_var_dict.keys())[0]", "docstring": "Returns name of the `var`.\n\nArgs:\nvar: A list. The list can contain either of the following:\n(i) A single `Variable`\n(ii) A single `ResourceVariable`\n(iii) Multiple `Variable` objects which must be slices of the same larger\nvariable.\n(iv) A single `PartitionedVariable`\n\nReturns:\nName of the `var`", "source": "github-repos"}
{"code": "def get_dict_to_print(field_to_obs):\n\n    def compressed_steps(steps):\n        return {'num_steps': len(set(steps)), 'min_step': min(steps), 'max_step': max(steps), 'last_step': steps[(- 1)], 'first_step': steps[0], 'outoforder_steps': get_out_of_order(steps)}\n\n    def full_steps(steps):\n        return {'steps': steps, 'outoforder_steps': get_out_of_order(steps)}\n    output = {}\n    for (field, observations) in field_to_obs.items():\n        if (not observations):\n            output[field] = None\n            continue\n        steps = [x['step'] for x in observations]\n        if (field in SHORT_FIELDS):\n            output[field] = compressed_steps(steps)\n        if (field in LONG_FIELDS):\n            output[field] = full_steps(steps)\n    return output", "docstring": "Transform the field-to-obs mapping into a printable dictionary.\n\nArgs:\nfield_to_obs: Dict that maps string field to `Observation` list.\n\nReturns:\nA dict with the keys and values to print to console.", "source": "codesearchnet"}
{"code": "def _wait_and_kill(pid_to_wait, pids_to_kill):\n  \n  \n  import psutil\n  if psutil.pid_exists(pid_to_wait):\n    psutil.Process(pid=pid_to_wait).wait()\n\n  for pid_to_kill in pids_to_kill:\n    if psutil.pid_exists(pid_to_kill):\n      p = psutil.Process(pid=pid_to_kill)\n      p.kill()\n      p.wait()", "docstring": "Helper function.\n\nWait for a process to finish if it exists, and then try to kill a list of\nprocesses.\n\nUsed by local_train\n\nArgs:\npid_to_wait: the process to wait for.\npids_to_kill: a list of processes to kill after the process of pid_to_wait finishes.", "source": "juraj-google-style"}
{"code": "def remove_notification_listener(self, notification_id):\n    \n\n    for v in self.notifications.values():\n      toRemove = list(filter(lambda tup: tup[0] == notification_id, v))\n      if len(toRemove) > 0:\n        v.remove(toRemove[0])\n        return True\n\n    return False", "docstring": "Remove a previously added notification callback.\n\nArgs:\nnotification_id: The numeric id passed back from add_notification_listener\n\nReturns:\nThe function returns boolean true if found and removed, false otherwise.", "source": "juraj-google-style"}
{"code": "def __eq__(self, other):\n    return isinstance(other, self.__class__) and self.to_string() == other.to_string()", "docstring": "Checks if the `other` DeviceSpec is same as the current instance, eg have\n\nsame value for all the internal fields.\n\nArgs:\nother: Another DeviceSpec\n\nReturns:\nReturn `True` if `other` is also a DeviceSpec instance and has same value\nas the current instance.\nReturn `False` otherwise.", "source": "github-repos"}
{"code": "def apply_pending(self, panel_obj, version):\n    updates = {}\n    new_panel = deepcopy(panel_obj)\n    new_panel['pending'] = []\n    new_panel['date'] = dt.datetime.now()\n    info_fields = ['disease_associated_transcripts', 'inheritance_models', 'reduced_penetrance', 'mosaicism', 'database_entry_version', 'comment']\n    new_genes = []\n    for update in panel_obj.get('pending', []):\n        hgnc_id = update['hgnc_id']\n        if (update['action'] != 'add'):\n            updates[hgnc_id] = update\n            continue\n        info = update.get('info', {})\n        gene_obj = {'hgnc_id': hgnc_id, 'symbol': update['symbol']}\n        for field in info_fields:\n            if (field in info):\n                gene_obj[field] = info[field]\n        new_genes.append(gene_obj)\n    for gene in panel_obj['genes']:\n        hgnc_id = gene['hgnc_id']\n        if (hgnc_id not in updates):\n            new_genes.append(gene)\n            continue\n        current_update = updates[hgnc_id]\n        action = current_update['action']\n        info = current_update['info']\n        if (action == 'delete'):\n            continue\n        elif (action == 'edit'):\n            for field in info_fields:\n                if (field in info):\n                    gene[field] = info[field]\n            new_genes.append(gene)\n    new_panel['genes'] = new_genes\n    new_panel['version'] = float(version)\n    inserted_id = None\n    if (new_panel['version'] == panel_obj['version']):\n        result = self.panel_collection.find_one_and_replace({'_id': panel_obj['_id']}, new_panel, return_document=pymongo.ReturnDocument.AFTER)\n        inserted_id = result['_id']\n    else:\n        new_panel.pop('_id')\n        panel_obj['is_archived'] = True\n        self.update_panel(panel_obj=panel_obj, date_obj=panel_obj['date'])\n        inserted_id = self.panel_collection.insert_one(new_panel).inserted_id\n    return inserted_id", "docstring": "Apply the pending changes to an existing gene panel or create a new version of the same panel.\n\nArgs:\npanel_obj(dict): panel in database to update\nversion(double): panel version to update\n\nReturns:\ninserted_id(str): id of updated panel or the new one", "source": "codesearchnet"}
{"code": "def get_colour_handler(extranames: List[str] = None,\n                       with_process_id: bool = False,\n                       with_thread_id: bool = False,\n                       stream: TextIO = None) -> logging.StreamHandler:\n    \n    fmt = \"%(white)s%(asctime)s.%(msecs)03d\"  \n    if with_process_id or with_thread_id:\n        procinfo = []  \n        if with_process_id:\n            procinfo.append(\"p%(process)d\")\n        if with_thread_id:\n            procinfo.append(\"t%(thread)d\")\n        fmt += \" [{}]\".format(\".\".join(procinfo))\n    extras = \":\" + \":\".join(extranames) if extranames else \"\"\n    fmt += \" %(name)s{extras}:%(levelname)s: \".format(extras=extras)\n    fmt += \"%(reset)s%(log_color)s%(message)s\"\n    cf = ColoredFormatter(fmt,\n                          datefmt=LOG_DATEFMT,\n                          reset=True,\n                          log_colors=LOG_COLORS,\n                          secondary_log_colors={},\n                          style='%')\n    ch = logging.StreamHandler(stream)\n    ch.setFormatter(cf)\n    return ch", "docstring": "Gets a colour log handler using a standard format.\n\nArgs:\nextranames: additional names to append to the logger's name\nwith_process_id: include the process ID in the logger's name?\nwith_thread_id: include the thread ID in the logger's name?\nstream: ``TextIO`` stream to send log output to\n\nReturns:\nthe :class:`logging.StreamHandler`", "source": "juraj-google-style"}
{"code": "def update(cls, customer_id, **kwargs):\n        \n        return cls().requests.put('customer/{customer_id}'.format(**locals()),\n                                  data=kwargs)", "docstring": "Static method defined to update paystack customer data by id.\n\nArgs:\ncustomer_id: paystack customer id.\nfirst_name: customer's first name(optional).\nlast_name: customer's last name(optional).\nemail: customer's email address(optional).\nphone:customer's phone number(optional).\n\nReturns:\nJson data from paystack API.", "source": "juraj-google-style"}
{"code": "def lint(ctx: click.Context, amend: bool = False, stage: bool = False):\n    \n    _lint(ctx, amend, stage)", "docstring": "Runs all linters\n\nArgs:\nctx: click context\namend: whether or not to commit results\nstage: whether or not to stage changes", "source": "juraj-google-style"}
{"code": "def get_experiment_fn(args):\n  \n\n  def get_experiment(output_dir):\n    \n    train_config = util.merge_metadata(args.preprocess_output_dir,\n                                       args.transforms_file)\n\n    \n    estimator = util.get_estimator(output_dir, train_config, args)\n\n    \n    schema_file = os.path.join(args.preprocess_output_dir, util.SCHEMA_FILE)\n\n    \n    additional_assets = {'features.json': args.transforms_file,\n                         util.SCHEMA_FILE: schema_file}\n    if util.is_classification_model(args.model_type):\n      target_name = train_config['target_column']\n      vocab_file_name = util.CATEGORICAL_ANALYSIS % target_name\n      vocab_file_path = os.path.join(\n          args.preprocess_output_dir, vocab_file_name)\n      assert file_io.file_exists(vocab_file_path)\n      additional_assets[vocab_file_name] = vocab_file_path\n\n    export_strategy_target = util.make_export_strategy(\n        train_config=train_config,\n        args=args,\n        keep_target=True,\n        assets_extra=additional_assets)\n    export_strategy_notarget = util.make_export_strategy(\n        train_config=train_config,\n        args=args,\n        keep_target=False,\n        assets_extra=additional_assets)\n\n    input_reader_for_train = get_reader_input_fn(\n        train_config=train_config,\n        preprocess_output_dir=args.preprocess_output_dir,\n        model_type=args.model_type,\n        data_paths=args.train_data_paths,\n        batch_size=args.train_batch_size,\n        shuffle=True,\n        num_epochs=args.num_epochs)\n\n    input_reader_for_eval = get_reader_input_fn(\n        train_config=train_config,\n        preprocess_output_dir=args.preprocess_output_dir,\n        model_type=args.model_type,\n        data_paths=args.eval_data_paths,\n        batch_size=args.eval_batch_size,\n        shuffle=False,\n        num_epochs=1)\n\n    return tf.contrib.learn.Experiment(\n        estimator=estimator,\n        train_input_fn=input_reader_for_train,\n        eval_input_fn=input_reader_for_eval,\n        train_steps=args.max_steps,\n        export_strategies=[export_strategy_target, export_strategy_notarget],\n        min_eval_frequency=args.min_eval_frequency,\n        eval_steps=None,\n    )\n\n  \n  return get_experiment", "docstring": "Builds the experiment function for learn_runner.run.\n\nArgs:\nargs: the command line args\n\nReturns:\nA function that returns a tf.learn experiment object.", "source": "juraj-google-style"}
{"code": "def api_representation(self, content_type):\n        \n        payload = dict(Subject=self.subject, Body=dict(ContentType=content_type, Content=self.body))\n\n        if self.sender is not None:\n            payload.update(From=self.sender.api_representation())\n\n        \n        if any(isinstance(item, str) for item in self.to):\n            self.to = [Contact(email=email) for email in self.to]\n\n        \n\n        recipients = [contact.api_representation() for contact in self.to]\n\n        payload.update(ToRecipients=recipients)\n\n        \n        if self.cc:\n            if any(isinstance(email, str) for email in self.cc):\n                self.cc = [Contact(email) for email in self.cc]\n\n            cc_recipients = [contact.api_representation() for contact in self.cc]\n            payload.update(CcRecipients=cc_recipients)\n\n        if self.bcc:\n            if any(isinstance(email, str) for email in self.bcc):\n                self.bcc = [Contact(email) for email in self.bcc]\n\n            bcc_recipients = [contact.api_representation() for contact in self.bcc]\n            payload.update(BccRecipients=bcc_recipients)\n\n        if self._attachments:\n            payload.update(Attachments=[attachment.api_representation() for attachment in self._attachments])\n\n        payload.update(Importance=str(self.importance))\n\n        return dict(Message=payload)", "docstring": "Returns the JSON representation of this message required for making requests to the API.\n\nArgs:\ncontent_type (str): Either 'HTML' or 'Text'", "source": "juraj-google-style"}
{"code": "def format_script(sensor_graph):\n    records = []\n    records.append(SetGraphOnlineRecord(False, address=8))\n    records.append(ClearDataRecord(address=8))\n    records.append(ResetGraphRecord(address=8))\n    for node in sensor_graph.nodes:\n        records.append(AddNodeRecord(str(node), address=8))\n    for streamer in sensor_graph.streamers:\n        records.append(AddStreamerRecord(streamer, address=8))\n    for (stream, value) in sorted(sensor_graph.constant_database.items(), key=(lambda x: x[0].encode())):\n        records.append(SetConstantRecord(stream, value, address=8))\n    records.append(PersistGraphRecord(address=8))\n    records.append(ClearConfigVariablesRecord())\n    for slot in sorted(sensor_graph.config_database, key=(lambda x: x.encode())):\n        for config_id in sorted(sensor_graph.config_database[slot]):\n            (config_type, value) = sensor_graph.config_database[slot][config_id]\n            byte_value = _convert_to_bytes(config_type, value)\n            records.append(SetConfigRecord(slot, config_id, byte_value))\n    app_tag = sensor_graph.metadata_database.get('app_tag')\n    app_version = sensor_graph.metadata_database.get('app_version')\n    if (app_tag is not None):\n        records.append(SetDeviceTagRecord(app_tag=app_tag, app_version=app_version))\n    script = UpdateScript(records)\n    return script.encode()", "docstring": "Create a binary script containing this sensor graph.\n\nThis function produces a repeatable script by applying a known sorting\norder to all constants and config variables when iterating over those\ndictionaries.\n\nArgs:\nsensor_graph (SensorGraph): the sensor graph that we want to format\n\nReturns:\nbytearray: The binary script data.", "source": "codesearchnet"}
{"code": "def __init__(self, source_path):\n        \n        self.source_path = source_path\n        self.package = get_developer_package(source_path)\n        self.type_settings = self.package.config.plugins.release_hook\n        self.settings = self.type_settings.get(self.name())", "docstring": "Create a release hook.\n\nArgs:\nsource_path: Path containing source that was released.", "source": "juraj-google-style"}
{"code": "def recipe_barnacle_dv360(config, auth_read, auth_write, partner, recipe_slug):\n    dataset(config, {'auth': auth_write, 'dataset': recipe_slug})\n    google_api(config, {'auth': auth_read, 'api': 'doubleclickbidmanager', 'version': 'v1.1', 'function': 'queries.listqueries', 'alias': 'list', 'results': {'bigquery': {'auth': auth_write, 'dataset': recipe_slug, 'table': 'DV_Reports'}}})\n    google_api(config, {'auth': auth_read, 'api': 'displayvideo', 'version': 'v1', 'function': 'partners.list', 'kwargs': {'fields': 'partners.displayName,partners.partnerId,nextPageToken'}, 'results': {'bigquery': {'auth': auth_write, 'dataset': recipe_slug, 'table': 'DV_Partners'}}})\n    google_api(config, {'auth': auth_read, 'api': 'displayvideo', 'version': 'v1', 'function': 'advertisers.list', 'kwargs': {'partnerId': partner, 'fields': 'advertisers.displayName,advertisers.advertiserId,nextPageToken'}, 'results': {'bigquery': {'auth': auth_write, 'dataset': recipe_slug, 'table': 'DV_Advertisers'}}})\n    google_api(config, {'auth': 'service', 'api': 'displayvideo', 'version': 'v1', 'function': 'users.list', 'kwargs': {}, 'results': {'bigquery': {'auth': auth_write, 'dataset': recipe_slug, 'table': 'DV_Users'}}})\n    bigquery(config, {'auth': auth_write, 'from': {'query': \"SELECT\\n         U.userId,\\n         U.name,\\n         U.email,\\n         U.displayName,\\n         REGEXP_EXTRACT(U.email, r'@(.+)') AS Domain,\\n         IF (ENDS_WITH(U.email, '.gserviceaccount.com'), 'Service', 'User') AS Authentication,\\n         IF((Select COUNT(advertiserId) from UNNEST(U.assignedUserRoles)) = 0, 'Partner', 'Advertiser') AS Scope,\\n         STRUCT(\\n           AUR.partnerId,\\n           P.displayName AS partnerName,\\n           AUR.userRole,\\n           AUR.advertiserId,\\n           A.displayName AS advertiserName,\\n           AUR.assignedUserRoleId\\n         ) AS assignedUserRoles,\\n         FROM `{dataset}.DV_Users` AS U,\\n         UNNEST(assignedUserRoles) AS AUR\\n         LEFT JOIN `{dataset}.DV_Partners` AS P\\n         ON AUR.partnerId=P.partnerId\\n         LEFT JOIN `{dataset}.DV_Advertisers` AS A\\n         ON AUR.advertiserId=A.advertiserId         \", 'parameters': {'dataset': recipe_slug}, 'legacy': False}, 'to': {'dataset': recipe_slug, 'view': 'Barnacle_User_Roles'}})\n    bigquery(config, {'auth': auth_write, 'from': {'query': \"SELECT\\n         R.*,\\n         P.displayName AS partnerName,\\n         A.displayName AS advertiserName,\\n         FROM (\\n         SELECT\\n           queryId,\\n           (SELECT CAST(value AS INT64) FROM UNNEST(R.params.filters) WHERE type = 'FILTER_PARTNER' LIMIT 1) AS partnerId,\\n           (SELECT CAST(value AS INT64) FROM UNNEST(R.params.filters) WHERE type = 'FILTER_ADVERTISER' LIMIT 1) AS advertiserId,\\n           R.schedule.frequency,\\n           R.params.metrics,\\n           R.params.type,\\n           R.metadata.dataRange,\\n           R.metadata.sendNotification,\\n           DATE(TIMESTAMP_MILLIS(R.metadata.latestReportRunTimeMS)) AS latestReportRunTime,\\n         FROM `{dataset}.DV_Reports` AS R) AS R\\n         LEFT JOIN `{dataset}.DV_Partners` AS P\\n         ON R.partnerId=P.partnerId\\n         LEFT JOIN `{dataset}.DV_Advertisers` AS A\\n         ON R.advertiserId=A.advertiserId         \", 'parameters': {'dataset': recipe_slug}, 'legacy': False}, 'to': {'dataset': recipe_slug, 'table': 'Barnacle_Reports'}})", "docstring": "Gives DV clients ability to see which users have access to which parts of an\naccount. Loads DV user profile mappings using the API into BigQuery and\nconnects to a DataStudio dashboard.\n\nArgs:\nauth_read (authentication) - Credentials used for writing data.\nauth_write (authentication) - Credentials used for writing data.\npartner (integer) - Partner ID to run user audit on.\nrecipe_slug (string) - Name of Google BigQuery dataset to create.", "source": "github-repos"}
{"code": "def add_rect(self, width, height, rid=None):\n        \n        assert(width > 0 and height >0)\n\n        \n        rect, _ = self._select_position(width, height)\n        if not rect:\n            return None\n        \n        \n        \n        self._split(rect)\n    \n        \n        self._remove_duplicates()\n\n        \n        rect.rid = rid\n        self.rectangles.append(rect)\n        return rect", "docstring": "Add rectangle of widthxheight dimensions.\n\nArguments:\nwidth (int, float): Rectangle width\nheight (int, float): Rectangle height\nrid: Optional rectangle user id\n\nReturns:\nRectangle: Rectangle with placemente coordinates\nNone: If the rectangle couldn be placed.", "source": "juraj-google-style"}
{"code": "def find_package_data():\n    l = list()\n    for start in ('ambry/support', 'ambry/bundle/default_files'):\n        for (root, dirs, files) in os.walk(start):\n            for f in files:\n                if f.endswith('.pyc'):\n                    continue\n                path = os.path.join(root, f).replace('ambry/', '')\n                l.append(path)\n    return {'ambry': l}", "docstring": "Returns package_data, because setuptools is too stupid to handle nested directories.\n\nReturns:\ndict: key is \"ambry\", value is list of paths.", "source": "codesearchnet"}
{"code": "def get_uid_state(self, id_or_uri):\n        \n        uri = self._client.build_uri(id_or_uri) + \"/uidState\"\n        return self._client.get(uri)", "docstring": "Retrieves the unit identification (UID) state (on, off, unknown) of the specified power outlet or extension bar\nresource. The device must be an HP iPDU component with a locator light (HP Intelligent Load Segment,\nHP AC Module, HP Intelligent Outlet Bar, or HP Intelligent Outlet).\n\nArgs:\nid_or_uri:\nCan be either the power device id or the uri\n\nReturns:\nstr: unit identification (UID) state", "source": "juraj-google-style"}
{"code": "def TerminateAFF4Flow(cls, flow_id, reason=None, status=None, token=None):\n    \n    flow_obj = aff4.FACTORY.Open(\n        flow_id, aff4_type=GRRFlow, mode=\"rw\", token=token)\n\n    if not flow_obj:\n      raise FlowError(\"Could not terminate flow %s\" % flow_id)\n\n    with flow_obj:\n      runner = flow_obj.GetRunner()\n      if not runner.IsRunning():\n        return\n\n      if token is None:\n        token = access_control.ACLToken()\n\n      if reason is None:\n        reason = \"Manual termination by console.\"\n\n      \n      runner.Error(reason, status_code=status)\n\n      flow_obj.Log(\"Terminated by user {0}. Reason: {1}\".format(\n          token.username, reason))\n\n      \n      super_token = token.SetUID()\n\n      \n      children_to_kill = aff4.FACTORY.MultiOpen(\n          flow_obj.ListChildren(), token=super_token, aff4_type=GRRFlow)\n\n      for child_obj in children_to_kill:\n        cls.TerminateAFF4Flow(\n            child_obj.urn, reason=\"Parent flow terminated.\", token=super_token)", "docstring": "Terminate a flow.\n\nArgs:\nflow_id: The flow session_id to terminate.\nreason: A reason to log.\nstatus: Status code used in the generated status message.\ntoken: The access token to be used for this request.\n\nRaises:\nFlowError: If the flow can not be found.", "source": "juraj-google-style"}
{"code": "def genHostCert(self, name, signas=None, outp=None, csr=None, sans=None):\n    (pkey, cert) = self._genBasePkeyCert(name, pkey=csr)\n    ext_sans = {('DNS:' + name)}\n    if isinstance(sans, str):\n        ext_sans = ext_sans.union(sans.split(','))\n    ext_sans = ','.join(sorted(ext_sans))\n    cert.add_extensions([crypto.X509Extension(b'nsCertType', False, b'server'), crypto.X509Extension(b'keyUsage', False, b'digitalSignature,keyEncipherment'), crypto.X509Extension(b'extendedKeyUsage', False, b'serverAuth'), crypto.X509Extension(b'basicConstraints', False, b'CA:FALSE'), crypto.X509Extension(b'subjectAltName', False, ext_sans.encode('utf-8'))])\n    if (signas is not None):\n        self.signCertAs(cert, signas)\n    else:\n        self.selfSignCert(cert, pkey)\n    if (not pkey._only_public):\n        keypath = self._savePkeyTo(pkey, 'hosts', ('%s.key' % name))\n        if (outp is not None):\n            outp.printf(('key saved: %s' % (keypath,)))\n    crtpath = self._saveCertTo(cert, 'hosts', ('%s.crt' % name))\n    if (outp is not None):\n        outp.printf(('cert saved: %s' % (crtpath,)))\n    return (pkey, cert)", "docstring": "Generates a host keypair.\n\nArgs:\nname (str): The name of the host keypair.\nsignas (str): The CA keypair to sign the new host keypair with.\noutp (synapse.lib.output.Output): The output buffer.\ncsr (OpenSSL.crypto.PKey): The CSR public key when generating the keypair from a CSR.\nsans (list): List of subject alternative names.\n\nExamples:\nMake a host keypair named \"myhost\":\n\nmyhostkey, myhostcert = cdir.genHostCert('myhost')\n\nReturns:\n((OpenSSL.crypto.PKey, OpenSSL.crypto.X509)): Tuple containing the private key and certificate objects.", "source": "codesearchnet"}
{"code": "def reduce(self, fn, *args):\n    assert not context.executing_eagerly()\n    tensor_specs = []\n    for arg in args:\n        if not isinstance(arg, tensor_lib.Tensor):\n            raise ValueError(f'Got a non-Tensor argument {arg} in reduce.')\n        batched_shape = tensor_shape.TensorShape([self._maybe_iters]).concatenate(arg.shape)\n        tensor_specs.append(tensor_lib.TensorSpec(shape=batched_shape, dtype=arg.dtype))\n    concrete_function = def_function.function(fn).get_concrete_function(*tensor_specs)\n    pl_outputs = []\n    with ops.control_dependencies(args):\n        for output in concrete_function.outputs:\n            if not isinstance(output, tensor_lib.Tensor):\n                raise ValueError(f'Got a non-Tensor output {output} while running reduce.')\n            if output.shape.is_fully_defined():\n                dummy = array_ops.zeros(output.shape.as_list(), dtype=output.dtype)\n                pl_outputs.append(array_ops.placeholder_with_default(dummy, shape=output.shape))\n            else:\n                pl_outputs.append(array_ops.placeholder(output.dtype, shape=output.shape))\n        reduction_op = array_ops.identity_n(pl_outputs)[0].op\n    self._reduce_map[reduction_op] = (concrete_function, args)\n    if len(reduction_op.outputs) == 1:\n        return reduction_op.outputs[0]\n    else:\n        return tuple(reduction_op.outputs)", "docstring": "Performs reduction `fn` on `args` vectorized across pfor iterations.\n\nNote that `fn` is traced once inside the loop function context. Hence any\ncaptures or side-effects will happen in that context. Call to the traced\nversion of `fn` happens during the construction of the vectorized code.\n\nNote that this currently may not work inside a control flow construct.\nArgs:\nfn: a reduction function. It will be called with arguments that have the\nsame structure as *args but with individual values whose rank may be\nhigher by 1 since they represent loop invariant vectorized versions of\nthe corresponding Tensors in *args.\n*args: unvectorized Tensors.\n\nReturns:\nThe result of running `fn` on the vectorized versions of `*args`. These\noutputs will be available as loop invariant values to all the iterations.", "source": "github-repos"}
{"code": "def set_guest_access(self, room_id, guest_access):\n    content = {'guest_access': guest_access}\n    return self.send_state_event(room_id, 'm.room.guest_access', content)", "docstring": "Set the guest access policy of the room.\n\nArgs:\nroom_id(str): The room to set the rules for.\nguest_access(str): Wether guests can join. One of: [\"can_join\",\n\"forbidden\"]", "source": "codesearchnet"}
{"code": "class EfficientNetEncoder(nn.Module):\n\n    def __init__(self, config: EfficientNetConfig):\n        super().__init__()\n        self.config = config\n        self.depth_coefficient = config.depth_coefficient\n\n        def round_repeats(repeats):\n            return int(math.ceil(self.depth_coefficient * repeats))\n        num_base_blocks = len(config.in_channels)\n        num_blocks = sum((round_repeats(n) for n in config.num_block_repeats))\n        curr_block_num = 0\n        blocks = []\n        for i in range(num_base_blocks):\n            in_dim = round_filters(config, config.in_channels[i])\n            out_dim = round_filters(config, config.out_channels[i])\n            stride = config.strides[i]\n            kernel_size = config.kernel_sizes[i]\n            expand_ratio = config.expand_ratios[i]\n            for j in range(round_repeats(config.num_block_repeats[i])):\n                id_skip = True if j == 0 else False\n                stride = 1 if j > 0 else stride\n                in_dim = out_dim if j > 0 else in_dim\n                adjust_padding = False if curr_block_num in config.depthwise_padding else True\n                drop_rate = config.drop_connect_rate * curr_block_num / num_blocks\n                block = EfficientNetBlock(config=config, in_dim=in_dim, out_dim=out_dim, stride=stride, kernel_size=kernel_size, expand_ratio=expand_ratio, drop_rate=drop_rate, id_skip=id_skip, adjust_padding=adjust_padding)\n                blocks.append(block)\n                curr_block_num += 1\n        self.blocks = nn.ModuleList(blocks)\n        self.top_conv = nn.Conv2d(in_channels=out_dim, out_channels=round_filters(config, 1280), kernel_size=1, padding='same', bias=False)\n        self.top_bn = nn.BatchNorm2d(num_features=config.hidden_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum)\n        self.top_activation = ACT2FN[config.hidden_act]\n\n    def forward(self, hidden_states: torch.FloatTensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> BaseModelOutputWithNoAttention:\n        all_hidden_states = (hidden_states,) if output_hidden_states else None\n        for block in self.blocks:\n            hidden_states = block(hidden_states)\n            if output_hidden_states:\n                all_hidden_states += (hidden_states,)\n        hidden_states = self.top_conv(hidden_states)\n        hidden_states = self.top_bn(hidden_states)\n        hidden_states = self.top_activation(hidden_states)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, all_hidden_states] if v is not None))\n        return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)", "docstring": "Forward propagates the embeddings through each EfficientNet block.\n\nArgs:\nconfig ([`EfficientNetConfig`]):\nModel configuration class.", "source": "github-repos"}
{"code": "def wait(self, timeout=None):\n    \n    poll = 30\n    while not self._is_complete:\n      try:\n        query_result = self._api.jobs_query_results(self._job_id,\n                                                    project_id=self._context.project_id,\n                                                    page_size=0,\n                                                    timeout=poll * 1000)\n      except Exception as e:\n        raise e\n      if query_result['jobComplete']:\n        if 'totalBytesProcessed' in query_result:\n          self._bytes_processed = int(query_result['totalBytesProcessed'])\n        self._cache_hit = query_result.get('cacheHit', None)\n        if 'totalRows' in query_result:\n          self._total_rows = int(query_result['totalRows'])\n        break\n\n      if timeout is not None:\n        timeout -= poll\n        if timeout <= 0:\n          break\n\n    self._refresh_state()\n    return self", "docstring": "Wait for the job to complete, or a timeout to happen.\n\nThis is more efficient than the version in the base Job class, in that we can\nuse a call that blocks for the poll duration rather than a sleep. That means we\nshouldn't block unnecessarily long and can also poll less.\n\nArgs:\ntimeout: how long to wait (in seconds) before giving up; default None which means no timeout.\n\nReturns:\nThe QueryJob", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.CreateReadSession = channel.unary_unary(\n            \"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/CreateReadSession\",\n            request_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.CreateReadSessionRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.ReadSession.FromString,\n        )\n        self.ReadRows = channel.unary_stream(\n            \"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/ReadRows\",\n            request_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.ReadRowsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.ReadRowsResponse.FromString,\n        )\n        self.BatchCreateReadSessionStreams = channel.unary_unary(\n            \"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/BatchCreateReadSessionStreams\",\n            request_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.BatchCreateReadSessionStreamsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.BatchCreateReadSessionStreamsResponse.FromString,\n        )\n        self.FinalizeStream = channel.unary_unary(\n            \"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/FinalizeStream\",\n            request_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.FinalizeStreamRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n        self.SplitReadStream = channel.unary_unary(\n            \"/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/SplitReadStream\",\n            request_serializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.SplitReadStreamRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_bigquery_dot_storage__v1beta1_dot_proto_dot_storage__pb2.SplitReadStreamResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def GetSubkeyByPath(self, key_path):\n    \n    if not self._registry_key and self._registry:\n      self._GetKeyFromRegistry()\n\n    subkey = self\n    for path_segment in key_paths.SplitKeyPath(key_path):\n      subkey = subkey.GetSubkeyByName(path_segment)\n      if not subkey:\n        break\n\n    return subkey", "docstring": "Retrieves a subkey by path.\n\nArgs:\nkey_path (str): path of the subkey.\n\nReturns:\nWinRegistryKey: Windows Registry subkey or None if not found.", "source": "juraj-google-style"}
{"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight)", "docstring": "Accumulates true positive and false negative statistics.\n\nArgs:\ny_true: The ground truth values, with the same dimensions as\n`y_pred`. Will be cast to `bool`.\ny_pred: The predicted values. Each element must be in the range\n`[0, 1]`.\nsample_weight: Optional weighting of each example. Defaults to `1`.\nCan be a tensor whose rank is either 0, or the same rank as\n`y_true`, and must be broadcastable to `y_true`.", "source": "github-repos"}
{"code": "def __init__(self, prevHash=None, prevIndex=None):\n        \n        super(TransactionInput, self).__init__()\n        self.PrevHash = prevHash\n        self.PrevIndex = prevIndex", "docstring": "Create an instance.\nArgs:\nprevHash (UInt256):\nprevIndex (int):", "source": "juraj-google-style"}
{"code": "def register_token(self, registry_address_hex: typing.AddressHex, token_address_hex: typing.AddressHex, retry_timeout: typing.NetworkTimeout=DEFAULT_RETRY_TIMEOUT) -> TokenNetwork:\n    registry_address = decode_hex(registry_address_hex)\n    token_address = decode_hex(token_address_hex)\n    registry = self._raiden.chain.token_network_registry(registry_address)\n    contracts_version = self._raiden.contract_manager.contracts_version\n    if (contracts_version == DEVELOPMENT_CONTRACT_VERSION):\n        token_network_address = registry.add_token_with_limits(token_address=token_address, channel_participant_deposit_limit=UINT256_MAX, token_network_deposit_limit=UINT256_MAX)\n    else:\n        token_network_address = registry.add_token_without_limits(token_address=token_address)\n    waiting.wait_for_payment_network(self._raiden, registry.address, token_address, retry_timeout)\n    return self._raiden.chain.token_network(token_network_address)", "docstring": "Register a token with the raiden token manager.\n\nArgs:\nregistry_address: registry address\ntoken_address_hex (string): a hex encoded token address.\n\nReturns:\n\nThe token network proxy.", "source": "codesearchnet"}
{"code": "def html(self, data=None, template=None):\n        \n        if data is None:\n            data = {}\n        if template:\n            return render(self.request, template, data)\n        return HttpResponse(data)", "docstring": "Send html document to user.\n\nArgs:\n- data: Dict to render template, or string with rendered HTML.\n- template: Name of template to render HTML document with passed data.", "source": "juraj-google-style"}
{"code": "def wcs_pix_transform(ct, i, format=0):\n    z1 = float(ct.z1)\n    z2 = float(ct.z2)\n    i = float(i)\n    yscale = (128.0 / (z2 - z1))\n    if ((format == 'T') or (format == 't')):\n        format = 1\n    if (i == 0):\n        t = 0.0\n    elif (ct.zt == W_LINEAR):\n        t = ((((i - 1) * (z2 - z1)) / 199.0) + z1)\n        t = max(z1, min(z2, t))\n    else:\n        t = float(i)\n    if (format > 1):\n        t = ((z2 - t) * yscale)\n    return t", "docstring": "Computes the WCS corrected pixel value given a coordinate\ntransformation and the raw pixel value.\n\nInput:\nct      coordinate transformation. instance of coord_tran.\ni       raw pixel intensity.\nformat  format string (optional).\n\nReturns:\nWCS corrected pixel value", "source": "codesearchnet"}
{"code": "def add_phenotype(self, institute, case, user, link, hpo_term=None,\n                      omim_term=None, is_group=False):\n        \n        hpo_results = []\n        try:\n            if hpo_term:\n                hpo_results = [hpo_term]\n            elif omim_term:\n                LOG.debug(\"Fetching info for mim term {0}\".format(omim_term))\n                disease_obj = self.disease_term(omim_term)\n                if disease_obj:\n                    for hpo_term in disease_obj.get('hpo_terms', []):\n                        hpo_results.append(hpo_term)\n            else:\n                raise ValueError('Must supply either hpo or omim term')\n        except ValueError as e:\n            \n            raise e\n\n        existing_terms = set(term['phenotype_id'] for term in\n                             case.get('phenotype_terms', []))\n\n        updated_case = case\n        phenotype_terms = []\n        for hpo_term in hpo_results:\n            LOG.debug(\"Fetching info for hpo term {0}\".format(hpo_term))\n            hpo_obj = self.hpo_term(hpo_term)\n            if hpo_obj is None:\n                raise ValueError(\"Hpo term: %s does not exist in database\" % hpo_term)\n\n            phenotype_id = hpo_obj['_id']\n            description = hpo_obj['description']\n            if phenotype_id not in existing_terms:\n                phenotype_term = dict(phenotype_id=phenotype_id, feature=description)\n                phenotype_terms.append(phenotype_term)\n\n                LOG.info(\"Creating event for adding phenotype term for case\"\n                            \" {0}\".format(case['display_name']))\n\n                self.create_event(\n                    institute=institute,\n                    case=case,\n                    user=user,\n                    link=link,\n                    category='case',\n                    verb='add_phenotype',\n                    subject=case['display_name'],\n                    content=phenotype_id\n                )\n\n            if is_group:\n                updated_case = self.case_collection.find_one_and_update(\n                    {'_id': case['_id']},\n                    {\n                        '$addToSet': {\n                            'phenotype_terms': {'$each': phenotype_terms},\n                            'phenotype_groups': {'$each': phenotype_terms},\n                        },\n                    },\n                    return_document=pymongo.ReturnDocument.AFTER\n                )\n            else:\n                updated_case = self.case_collection.find_one_and_update(\n                    {'_id': case['_id']},\n                    {\n                        '$addToSet': {\n                            'phenotype_terms': {'$each': phenotype_terms},\n                        },\n                    },\n                    return_document=pymongo.ReturnDocument.AFTER\n                )\n\n        LOG.debug(\"Case updated\")\n        return updated_case", "docstring": "Add a new phenotype term to a case\n\nCreate a phenotype term and event with the given information\n\nArgs:\ninstitute (Institute): A Institute object\ncase (Case): Case object\nuser (User): A User object\nlink (str): The url to be used in the event\nhpo_term (str): A hpo id\nomim_term (str): A omim id\nis_group (bool): is phenotype term a group?", "source": "juraj-google-style"}
{"code": "def new_stories(self, raw=False, limit=None):\n    new_stories = self._get_stories('newstories', limit)\n    if raw:\n        new_stories = [story.raw for story in new_stories]\n    return new_stories", "docstring": "Returns list of item ids of current new stories\n\nArgs:\nlimit (int): specifies the number of stories to be returned.\nraw (bool): Flag to indicate whether to transform all\nobjects into raw json.\n\nReturns:\n`list` object containing ids of new stories.", "source": "codesearchnet"}
{"code": "def _write_version(self, data, model):\n        \n        vdata = {'data': data,\n                 'key': model.key,\n                 'model': model.Meta.bucket_name,\n                 'timestamp': time.time()}\n        obj = version_bucket.new(data=vdata)\n        obj.add_index('key_bin', model.key)\n        obj.add_index('model_bin', vdata['model'])\n        obj.add_index('timestamp_int', int(vdata['timestamp']))\n        obj.store()\n        return obj.key", "docstring": "Writes a copy of the objects current state to write-once mirror bucket.\n\nArgs:\ndata (dict): Model instance's all data for versioning.\nmodel (instance): Model instance.\n\nReturns:\nKey of version record.\nkey (str): Version_bucket key.", "source": "juraj-google-style"}
{"code": "def _sanitize_input_structure(input_structure):\n        \n\n        input_structure = input_structure.copy()\n\n        \n        input_structure.remove_spin()\n\n        \n        input_structure = input_structure.get_primitive_structure(use_site_props=False)\n\n        \n        \n        \n        if \"magmom\" in input_structure.site_properties:\n            input_structure.remove_site_property(\"magmom\")\n\n        return input_structure", "docstring": "Sanitize our input structure by removing magnetic information\nand making primitive.\n\nArgs:\ninput_structure: Structure\n\nReturns: Structure", "source": "juraj-google-style"}
{"code": "def normalize_moments(counts, mean_ss, variance_ss, shift, name=None):\n    with ops.name_scope(name, 'normalize', [counts, mean_ss, variance_ss, shift]):\n        divisor = math_ops.reciprocal(counts, name='divisor')\n        if shift is not None:\n            shifted_mean = math_ops.multiply(mean_ss, divisor, name='shifted_mean')\n            mean = math_ops.add(shifted_mean, shift, name='mean')\n        else:\n            shifted_mean = math_ops.multiply(mean_ss, divisor, name='mean')\n            mean = shifted_mean\n        variance = math_ops.subtract(math_ops.multiply(variance_ss, divisor), math_ops.square(shifted_mean), name='variance')\n    return (mean, variance)", "docstring": "Calculate the mean and variance of based on the sufficient statistics.\n\nArgs:\ncounts: A `Tensor` containing the total count of the data (one value).\nmean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly\nshifted) sum of the elements to average over.\nvariance_ss: A `Tensor` containing the variance sufficient statistics: the\n(possibly shifted) squared sum of the data to compute the variance over.\nshift: A `Tensor` containing the value by which the data is shifted for\nnumerical stability, or `None` if no shift was performed.\nname: Name used to scope the operations that compute the moments.\n\nReturns:\nTwo `Tensor` objects: `mean` and `variance`.", "source": "github-repos"}
{"code": "def ed25519_generate_key_pair_from_secret(secret):\n    \n\n    \n    if not isinstance(secret, bytes):\n        secret = secret.encode()\n\n    hash_bytes = sha3.keccak_256(secret).digest()\n    sk = Ed25519SigningKeyFromHash.generate(hash_bytes=hash_bytes)\n    \n    private_value_base58 = sk.encode(encoding='base58')\n\n    \n    public_value_compressed_base58 = sk.get_verifying_key().encode(encoding='base58')\n\n    return private_value_base58, public_value_compressed_base58", "docstring": "Generate a new key pair.\nArgs:\nsecret (:class:`string`): A secret that serves as a seed\nReturns:\nA tuple of (private_key, public_key) encoded in base58.", "source": "juraj-google-style"}
{"code": "def GetHashData(hashable):\n    ms = StreamManager.GetStream()\n    writer = BinaryWriter(ms)\n    hashable.SerializeUnsigned(writer)\n    ms.flush()\n    retVal = ms.ToArray()\n    StreamManager.ReleaseStream(ms)\n    return retVal", "docstring": "Get the data used for hashing.\n\nArgs:\nhashable (neo.IO.Mixins.SerializableMixin): object extending SerializableMixin\n\nReturns:\nbytes:", "source": "codesearchnet"}
{"code": "def remove_location(self, location):\n        \n        \n        res = self._remove_hdxobject(self.data.get('groups'), location, matchon='name')\n        if not res:\n            res = self._remove_hdxobject(self.data.get('groups'), location.upper(), matchon='name')\n        if not res:\n            res = self._remove_hdxobject(self.data.get('groups'), location.lower(), matchon='name')\n        return res", "docstring": "Remove a location. If the location is already added, it is ignored.\n\nArgs:\nlocation (str): Location to remove\n\nReturns:\nbool: True if location removed or False if not", "source": "juraj-google-style"}
{"code": "def merge_corpus(self, corpus):\n        \n\n        \n        merging_corpus = Corpus.from_corpus(corpus)\n\n        self.import_tracks(corpus.tracks.values())\n        self.import_issuers(corpus.issuers.values())\n        utterance_idx_mapping = self.import_utterances(corpus.utterances.values())\n\n        for subview_idx, subview in merging_corpus.subviews.items():\n            for filter in subview.filter_criteria:\n                if isinstance(filter, subset.MatchingUtteranceIdxFilter):\n                    new_filtered_utt_ids = set()\n                    for utt_idx in filter.utterance_idxs:\n                        new_filtered_utt_ids.add(utterance_idx_mapping[utt_idx].idx)\n                    filter.utterance_idxs = new_filtered_utt_ids\n\n            new_idx = naming.index_name_if_in_list(subview_idx, self.subviews.keys())\n            self.import_subview(new_idx, subview)\n\n        for feat_container_idx, feat_container in merging_corpus.feature_containers.items():\n            self.new_feature_container(feat_container_idx, feat_container.path)", "docstring": "Merge the given corpus into this corpus. All assets (tracks, utterances, issuers, ...) are copied into\nthis corpus. If any ids (utt-idx, track-idx, issuer-idx, subview-idx, ...) are occurring in both corpora,\nthe ids from the merging corpus are suffixed by a number (starting from 1 until no other is matching).\n\nArgs:\ncorpus (CorpusView): The corpus to merge.", "source": "juraj-google-style"}
{"code": "def provides(arg_name=None, annotated_with=None, in_scope=None):\n    if ((arg_name is None) and (annotated_with is None) and (in_scope is None)):\n        raise errors.EmptyProvidesDecoratorError(locations.get_back_frame_loc())\n    return _get_pinject_wrapper(locations.get_back_frame_loc(), provider_arg_name=arg_name, provider_annotated_with=annotated_with, provider_in_scope_id=in_scope)", "docstring": "Modifies the binding of a provider method.\n\nIf arg_name is specified, then the created binding is for that arg name\ninstead of the one gotten from the provider method name (e.g., 'foo' from\n'provide_foo').\n\nIf annotated_with is specified, then the created binding includes that\nannotation object.\n\nIf in_scope is specified, then the created binding is in the scope with\nthat scope ID.\n\nAt least one of the args must be specified.  A provider method may not be\ndecorated with @provides() twice.\n\nArgs:\narg_name: the name of the arg to annotate on the decorated function\nannotated_with: an annotation object\nin_scope: a scope ID\nReturns:\na function that will decorate functions passed to it", "source": "codesearchnet"}
{"code": "def get_top_pairs(fsym, limit=5):\n\t\n\n\t\n\turl = build_url('pairs', fsym=fsym, limit=limit)\n\tdata = load_data(url)\n\n\treturn data['Data']", "docstring": "Get top trading pairs by 24 hour aggregated volume for a currency.\n\nArgs:\nfsym: FROM symbol.\nlimit: Number of results. Default value returns top 5 pairs.\n\nReturns:\nFunction returns a list containing a dictionary for each result:\n\n[{'exchange': ..., 'fromSymbol': ..., 'toSymbol': ..., 'volume24h': ...,\n'volume24hTo': ...},\n{...},\n...]\n\nThe list is ordered based on the volume of the FROM currency starting\nwith the highest value.", "source": "juraj-google-style"}
{"code": "def execute_code(self, code, filename=None, isolate=False):\n\n    def _apply():\n        self.compile_code(code=code, filename=filename, exec_namespace=self.globals)\n    if isolate:\n        saved_globals = dict(self.globals)\n        try:\n            _apply()\n        finally:\n            self.globals.clear()\n            self.globals.update(saved_globals)\n    else:\n        _apply()", "docstring": "Execute code within the execution context.\n\nArgs:\ncode (str or SourceCode): Rex code to execute.\nfilename (str): Filename to report if there are syntax errors.\nisolate (bool): If True, do not affect `self.globals` by executing\nthis code.", "source": "codesearchnet"}
{"code": "def _filter_top_k(x, k):\n    _, top_k_idx = nn_ops.top_k(x, k, sorted=False)\n    top_k_mask = math_ops.reduce_sum(array_ops.one_hot(top_k_idx, array_ops.shape(x)[-1], axis=-1), axis=-2)\n    return x * top_k_mask + NEG_INF * (1 - top_k_mask)", "docstring": "Filters top-k values in the last dim of x and set the rest to NEG_INF.\n\nUsed for computing top-k prediction values in dense labels (which has the same\nshape as predictions) for recall and precision top-k metrics.\n\nArgs:\nx: tensor with any dimensions.\nk: the number of values to keep.\n\nReturns:\ntensor with same shape and dtype as x.", "source": "github-repos"}
{"code": "def data_period_start_day_of_week(self, value=None):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `data_period_start_day_of_week`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `data_period_start_day_of_week`')\n        vals = set()\n        vals.add('Sunday')\n        vals.add('Monday')\n        vals.add('Tuesday')\n        vals.add('Wednesday')\n        vals.add('Thursday')\n        vals.add('Friday')\n        vals.add('Saturday')\n        if (value not in vals):\n            raise ValueError('value {} is not an accepted value for field `data_period_start_day_of_week`'.format(value))\n    self._data_period_start_day_of_week = value", "docstring": "Corresponds to IDD Field `data_period_start_day_of_week`\n\nArgs:\nvalue (str): value for IDD Field `data_period_start_day_of_week`\nAccepted values are:\n- Sunday\n- Monday\n- Tuesday\n- Wednesday\n- Thursday\n- Friday\n- Saturday\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def append(self, value):\n    if ((self._avm is not None) and (not self.terminated)):\n        path = self._last_path\n        if path:\n            path += '.'\n        self[(path + LIST_HEAD)] = value\n        self._last_path = (path + LIST_TAIL)\n        self[self._last_path] = AVM()\n    else:\n        raise TdlError('Cannot append to a closed list.')", "docstring": "Append an item to the end of an open ConsList.\n\nArgs:\nvalue (:class:`Conjunction`, :class:`Term`): item to add\nRaises:\n:class:`TdlError`: when appending to a closed list", "source": "codesearchnet"}
{"code": "def flownet2_fusion(self, x):\n        \n        with argscope([tf.layers.conv2d], activation=lambda x: tf.nn.leaky_relu(x, 0.1),\n                      padding='valid', strides=2, kernel_size=3,\n                      data_format='channels_first'), \\\n            argscope([tf.layers.conv2d_transpose], padding='same', activation=tf.identity,\n                     data_format='channels_first', strides=2, kernel_size=4):\n            conv0 = tf.layers.conv2d(pad(x, 1), 64, name='conv0', strides=1)\n\n            x = tf.layers.conv2d(pad(conv0, 1), 64, name='conv1')\n            conv1 = tf.layers.conv2d(pad(x, 1), 128, name='conv1_1', strides=1)\n            x = tf.layers.conv2d(pad(conv1, 1), 128, name='conv2')\n            conv2 = tf.layers.conv2d(pad(x, 1), 128, name='conv2_1', strides=1)\n\n            flow2 = tf.layers.conv2d(pad(conv2, 1), 2, name='predict_flow2', strides=1, activation=tf.identity)\n            flow2_up = tf.layers.conv2d_transpose(flow2, 2, name='upsampled_flow2_to_1')\n            x = tf.layers.conv2d_transpose(conv2, 32, name='deconv1', activation=lambda x: tf.nn.leaky_relu(x, 0.1))\n\n            concat1 = tf.concat([conv1, x, flow2_up], axis=1, name='concat1')\n            interconv1 = tf.layers.conv2d(pad(concat1, 1), 32, strides=1, name='inter_conv1', activation=tf.identity)\n\n            flow1 = tf.layers.conv2d(pad(interconv1, 1), 2, name='predict_flow1', strides=1, activation=tf.identity)\n            flow1_up = tf.layers.conv2d_transpose(flow1, 2, name='upsampled_flow1_to_0')\n            x = tf.layers.conv2d_transpose(concat1, 16, name='deconv0', activation=lambda x: tf.nn.leaky_relu(x, 0.1))\n\n            concat0 = tf.concat([conv0, x, flow1_up], axis=1, name='concat0')\n            interconv0 = tf.layers.conv2d(pad(concat0, 1), 16, strides=1, name='inter_conv0', activation=tf.identity)\n            flow0 = tf.layers.conv2d(pad(interconv0, 1), 2, name='predict_flow0', strides=1, activation=tf.identity)\n\n            return tf.identity(flow0, name='flow2')", "docstring": "Architecture in Table 4 of FlowNet 2.0.\n\nArgs:\nx: NCHW tensor, where C=11 is the concatenation of 7 items of [3, 2, 2, 1, 1, 1, 1] channels.", "source": "juraj-google-style"}
{"code": "def save(f, arr, vocab):\n    \n    itr = iter(vocab)\n    \n    word, idx = next(itr)\n    _write_line(f, arr[idx], word)\n    for word, idx in itr:\n        f.write(b'\\n')\n        _write_line(f, arr[idx], word)", "docstring": "Save word embedding file.\n\nArgs:\nf (File): File to write the vectors. File should be open for writing\nascii.\narr (numpy.array): Numpy array with ``float`` dtype.\nvocab (iterable): Each element is pair of a word (``bytes``) and ``arr``\nindex (``int``). Word should be encoded to str apriori.", "source": "juraj-google-style"}
{"code": "def get_fractional_coords(self, cart_coords: Vector3Like) -> np.ndarray:\n        \n        return dot(cart_coords, self.inv_matrix)", "docstring": "Returns the fractional coordinates given cartesian coordinates.\n\nArgs:\ncart_coords (3x1 array): Cartesian coords.\n\nReturns:\nFractional coordinates.", "source": "juraj-google-style"}
{"code": "def relu6(x):\n    return ops.relu6(x)", "docstring": "Relu6 activation function.\n\nIt's the ReLU function, but truncated to a maximum value of 6.\n\nArgs:\nx: Input tensor.", "source": "github-repos"}
{"code": "def epoch_to_human_time(epoch_time):\n    if isinstance(epoch_time, int):\n        try:\n            d = datetime.datetime.fromtimestamp(epoch_time / 1000)\n            return d.strftime('%m-%d-%Y %H:%M:%S ')\n        except ValueError:\n            return None", "docstring": "Converts an epoch timestamp to human readable time.\n\nThis essentially converts an output of get_current_epoch_time to an output\nof get_current_human_time\n\nArgs:\nepoch_time: An integer representing an epoch timestamp in milliseconds.\n\nReturns:\nA time string representing the input time.\nNone if input param is invalid.", "source": "github-repos"}
{"code": "class AveragePooling3D(keras_layers.AveragePooling3D, base.Layer):\n\n    def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs):\n        if strides is None:\n            raise ValueError('Argument `strides` must not be None.')\n        super(AveragePooling3D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)", "docstring": "Average pooling layer for 3D inputs (e.g. volumes).\n\nArgs:\npool_size: An integer or tuple/list of 3 integers:\n(pool_depth, pool_height, pool_width)\nspecifying the size of the pooling window.\nCan be a single integer to specify the same value for\nall spatial dimensions.\nstrides: An integer or tuple/list of 3 integers,\nspecifying the strides of the pooling operation.\nCan be a single integer to specify the same value for\nall spatial dimensions.\npadding: A string. The padding method, either 'valid' or 'same'.\nCase-insensitive.\ndata_format: A string. The ordering of the dimensions in the inputs.\n`channels_last` (default) and `channels_first` are supported.\n`channels_last` corresponds to inputs with shape\n`(batch, depth, height, width, channels)` while `channels_first`\ncorresponds to inputs with shape\n`(batch, channels, depth, height, width)`.\nname: A string, the name of the layer.", "source": "github-repos"}
{"code": "def strace(device, trace_address, breakpoint_address):\n    \n    jlink = pylink.JLink()\n    jlink.open()\n\n    \n    jlink.power_on()\n    jlink.set_tif(pylink.JLinkInterfaces.SWD)\n    jlink.connect(device)\n    jlink.reset()\n\n    \n    jlink.breakpoint_clear_all()\n\n    \n    op = pylink.JLinkStraceOperation.TRACE_START\n    jlink.strace_clear_all()\n    jlink.strace_start()\n\n    \n    \n    bphandle = jlink.breakpoint_set(breakpoint_address, thumb=True)\n    trhandle = jlink.strace_code_fetch_event(op, address=trace_address)\n    jlink.restart()\n    time.sleep(1)\n\n    \n    while True:\n        if jlink.halted():\n            break\n\n    \n    while True:\n        instructions = jlink.strace_read(1)\n        if len(instructions) == 0:\n            break\n        instruction = instructions[0]\n        print(jlink.disassemble_instruction(instruction))\n\n    jlink.power_off()\n    jlink.close()", "docstring": "Implements simple trace using the STrace API.\n\nArgs:\ndevice (str): the device to connect to\ntrace_address (int): address to begin tracing from\nbreakpoint_address (int): address to breakpoint at\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def GetCustomerIDs(client):\n    managed_customer_service = client.GetService('ManagedCustomerService', version='v201809')\n    offset = 0\n    selector = {'fields': ['CustomerId'], 'predicates': [{'field': 'CanManageClients', 'operator': 'EQUALS', 'values': [False]}], 'paging': {'startIndex': str(offset), 'numberResults': str(PAGE_SIZE)}}\n    queue = multiprocessing.Queue()\n    more_pages = True\n    while more_pages:\n        page = managed_customer_service.get(selector)\n        if (page and ('entries' in page) and page['entries']):\n            for entry in page['entries']:\n                queue.put(entry['customerId'])\n        else:\n            raise Exception(\"Can't retrieve any customer ID.\")\n        offset += PAGE_SIZE\n        selector['paging']['startIndex'] = str(offset)\n        more_pages = (offset < int(page['totalNumEntries']))\n    return queue", "docstring": "Retrieves all CustomerIds in the account hierarchy.\n\nNote that your configuration file must specify a client_customer_id belonging\nto an AdWords manager account.\n\nArgs:\nclient: an AdWordsClient instance.\nRaises:\nException: if no CustomerIds could be found.\nReturns:\nA Queue instance containing all CustomerIds in the account hierarchy.", "source": "codesearchnet"}
{"code": "def save_args(conditions, out_path):\n    \n\n    if isinstance(conditions, argparse.Namespace):\n        args = vars(conditions)\n    else:\n        args = conditions\n\n    try:\n        os.makedirs(out_path)\n    except OSError:\n        pass\n\n    with tempdir(prefix='args', dir=out_path) as tempd:\n        path = os.path.join(tempd, 'args.json')\n        with open(path, 'w') as f:\n            json.dump(args, f, indent=4)\n\n        new_path = os.path.join(out_path, 'args')\n        shutil.move(path, new_path)", "docstring": "A util function to save experiment condition for job table.\n\nArgs:\nconditions (:class:`argparse.Namespace` or dict): Experiment conditions\nto show on a job table. Keys are show as table header and values\nare show at a job row.\nout_path (str): Output directory name to save conditions.", "source": "juraj-google-style"}
{"code": "def post(self, url, params=None, data=None, files=None, **kwargs):\n        \n        return self.call_api(\n            \"POST\",\n            url,\n            params=params,\n            data=data,\n            files=files,\n            **kwargs\n        )", "docstring": "Call the API with a POST request.\n\nArgs:\nurl (str): Resource location relative to the base URL.\nparams (dict or None): Query-string parameters.\ndata (dict or None): Request body contents.\nfiles (dict or None: Files to be passed to the request.\n\nReturns:\nAn instance of ResultParser or ErrorParser.", "source": "juraj-google-style"}
{"code": "def set_processed_counts(self, shards_processed, shards_status):\n    \n    chart = google_chart_api.BarChart()\n\n    def filter_status(status_to_filter):\n      return [count if status == status_to_filter else 0\n              for count, status in zip(shards_processed, shards_status)]\n\n    if shards_status:\n      \n      \n      \n      \n      chart.stacked = True\n      chart.AddBars(filter_status(\"unknown\"), color=\"404040\")\n      chart.AddBars(filter_status(\"success\"), color=\"00ac42\")\n      chart.AddBars(filter_status(\"running\"), color=\"3636a9\")\n      chart.AddBars(filter_status(\"aborted\"), color=\"e29e24\")\n      chart.AddBars(filter_status(\"failed\"), color=\"f6350f\")\n    else:\n      chart.AddBars(shards_processed)\n\n    shard_count = len(shards_processed)\n\n    if shard_count > 95:\n      \n      pixels_per_shard = 700.0 / shard_count\n      bar_thickness = int(pixels_per_shard * .9)\n\n      chart.style = bar_chart.BarChartStyle(bar_thickness=bar_thickness,\n        bar_gap=0.1, use_fractional_gap_spacing=True)\n\n    if shards_processed and shard_count <= 95:\n      \n      \n      \n      stride_length = max(1, shard_count / 16)\n      chart.bottom.labels = []\n      for x in xrange(shard_count):\n        if (x % stride_length == 0 or\n            x == shard_count - 1):\n          chart.bottom.labels.append(x)\n        else:\n          chart.bottom.labels.append(\"\")\n      chart.left.labels = [\"0\", str(max(shards_processed))]\n      chart.left.min = 0\n\n    self.chart_width = min(700, max(300, shard_count * 20))\n    self.chart_url = chart.display.Url(self.chart_width, 200)", "docstring": "Updates a chart url to display processed count for each shard.\n\nArgs:\nshards_processed: list of integers with number of processed entities in\neach shard", "source": "juraj-google-style"}
{"code": "def resample(self, data, input_rate):\n    data16 = np.fromstring(string=data, dtype=np.int16)\n    resample_size = int(((len(data16) / self.input_rate) * self.RATE_PROCESS))\n    resample = signal.resample(data16, resample_size)\n    resample16 = np.array(resample, dtype=np.int16)\n    return resample16.tostring()", "docstring": "Microphone may not support our native processing sampling rate, so\nresample from input_rate to RATE_PROCESS here for webrtcvad and\ndeepspeech\n\nArgs:\ndata (binary): Input audio stream\ninput_rate (int): Input audio rate to resample from", "source": "codesearchnet"}
{"code": "def GetAttributeNames(self):\n    attribute_names = []\n    for attribute_name in iter(self.__dict__.keys()):\n        if (attribute_name[0] == '_'):\n            continue\n        attribute_names.append(attribute_name)\n    return attribute_names", "docstring": "Retrieves the names of all attributes.\n\nReturns:\nlist[str]: attribute names.", "source": "codesearchnet"}
{"code": "def getGUA(self, filterByPrefix=None):\n        \n        print '%s call getGUA' % self.port\n        print filterByPrefix\n        globalAddrs = []\n        try:\n            \n            globalAddrs = self.getGlobal()\n\n            if filterByPrefix is None:\n                return self.__padIp6Addr(globalAddrs[0])\n            else:\n                for line in globalAddrs:\n                    line = self.__padIp6Addr(line)\n                    print \"Padded IPv6 Address:\" + line\n                    if line.startswith(filterByPrefix):\n                        return line\n                print 'no global address matched'\n                return str(globalAddrs[0])\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger('getGUA() Error: ' + str(e))\n            return e", "docstring": "get expected global unicast IPv6 address of OpenThreadWpan\n\nArgs:\nfilterByPrefix: a given expected global IPv6 prefix to be matched\n\nReturns:\na global IPv6 address", "source": "juraj-google-style"}
{"code": "def to_env_vars(mapping):  \n    \n\n    def format_key(key):\n        \n        if key:\n            decoded_name = 'SM_%s' % str(key).upper()\n            return decoded_name\n        else:\n            return ''\n\n    def format_value(_mapping):\n        if six.PY3 and isinstance(_mapping, six.binary_type):\n            \n            return _mapping.decode('latin1')\n        elif _mapping is None:\n            return ''\n        elif isinstance(_mapping, six.string_types):\n            return str(_mapping)\n        else:\n            return json.dumps(_mapping, sort_keys=True, separators=(',', ':'), ensure_ascii=True)\n\n    return {format_key(k): format_value(v) for k, v in mapping.items()}", "docstring": "Transform a dictionary in a dictionary of env vars.\nExample:\n>>>env_vars = mapping.to_env_vars({'model_dir': '/opt/ml/model', 'batch_size': 25})\n>>>\n>>>print(args)\n['MODEL_DIR', '/opt/ml/model', 'BATCH_SIZE', 25]\nArgs:\nmapping (dict[str, object]): A Python mapping.\nReturns:\n(dict): Dictionary of env vars", "source": "juraj-google-style"}
{"code": "def dbmax05years(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `dbmax05years`'.format(value))\n\n        self._dbmax05years = value", "docstring": "Corresponds to IDD Field `dbmax05years`\n5-year return period values for maximum extreme dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmax05years`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def _ParseNamesString(self, names_string):\n    if (not names_string):\n        return\n    names_string = names_string.lower()\n    names = [name.strip() for name in names_string.split(',')]\n    file_entry_filter = file_entry_filters.NamesFileEntryFilter(names)\n    self._filter_collection.AddFilter(file_entry_filter)", "docstring": "Parses the name string.\n\nArgs:\nnames_string (str): comma separated filenames to filter.", "source": "codesearchnet"}
{"code": "def match_hail_sizes(model_tracks, obs_tracks, track_pairings):\n        \n        unpaired = list(range(len(model_tracks)))\n        for p, pair in enumerate(track_pairings):\n            model_track = model_tracks[pair[0]]\n            unpaired.remove(pair[0])\n            obs_track = obs_tracks[pair[1]]\n            obs_hail_sizes = np.array([step[obs_track.masks[t] == 1].max()\n                                       for t, step in enumerate(obs_track.timesteps)])\n            if obs_track.times.size > 1 and model_track.times.size > 1:\n                normalized_obs_times = 1.0 / (obs_track.times.max() - obs_track.times.min())\\\n                    * (obs_track.times - obs_track.times.min())\n                normalized_model_times = 1.0 / (model_track.times.max() - model_track.times.min())\\\n                    * (model_track.times - model_track.times.min())\n                hail_interp = interp1d(normalized_obs_times, obs_hail_sizes, kind=\"nearest\",\n                                       bounds_error=False, fill_value=0)\n                model_track.observations = hail_interp(normalized_model_times)\n            elif obs_track.times.size == 1:\n                model_track.observations = np.ones(model_track.times.shape) * obs_hail_sizes[0]\n            elif model_track.times.size == 1:\n                model_track.observations = np.array([obs_hail_sizes.max()])\n            print(pair[0], \"obs\",  obs_hail_sizes)\n            print(pair[0], \"model\", model_track.observations)\n        for u in unpaired:\n            model_tracks[u].observations = np.zeros(model_tracks[u].times.shape)", "docstring": "Given forecast and observed track pairings, maximum hail sizes are associated with each paired forecast storm\ntrack timestep. If the duration of the forecast and observed tracks differ, then interpolation is used for the\nintermediate timesteps.\n\nArgs:\nmodel_tracks: List of model track STObjects\nobs_tracks: List of observed STObjects\ntrack_pairings: list of tuples containing the indices of the paired (forecast, observed) tracks", "source": "juraj-google-style"}
{"code": "def get_all():\n    info_dir = _get_info_dir()\n    results = []\n    for filename in os.listdir(info_dir):\n        filepath = os.path.join(info_dir, filename)\n        try:\n            with open(filepath) as infile:\n                contents = infile.read()\n        except IOError as e:\n            if (e.errno == errno.EACCES):\n                continue\n            else:\n                raise\n        try:\n            info = _info_from_string(contents)\n        except ValueError:\n            tb_logging.get_logger().warning('invalid info file: %r', filepath, exc_info=True)\n        else:\n            results.append(info)\n    return results", "docstring": "Return TensorBoardInfo values for running TensorBoard processes.\n\nThis function may not provide a perfect snapshot of the set of running\nprocesses. Its result set may be incomplete if the user has cleaned\ntheir /tmp/ directory while TensorBoard processes are running. It may\ncontain extraneous entries if TensorBoard processes exited uncleanly\n(e.g., with SIGKILL or SIGQUIT).\n\nReturns:\nA fresh list of `TensorBoardInfo` objects.", "source": "codesearchnet"}
{"code": "def validate_config_has_one_of(config, one_of_keys):\n  \n  intersection = set(config).intersection(one_of_keys)\n  if len(intersection) > 1:\n    raise Exception('Only one of the values in \"%s\" is needed' % ', '.join(intersection))\n  if len(intersection) == 0:\n    raise Exception('One of the values in \"%s\" is needed' % ', '.join(one_of_keys))", "docstring": "Validate a config dictionary to make sure it has one and only one\nkey in one_of_keys.\n\nArgs:\nconfig: the config to validate.\none_of_keys: the list of possible keys that config can have one and only one.\n\nRaises:\nException if the config does not have any of them, or multiple of them.", "source": "juraj-google-style"}
{"code": "def store(self, obj):\n        \n        \n        \n        if type(obj) is AtlasServiceInstance.Instance:\n            query = { \"instance_id\" : obj.instance_id, \"database\" : obj.get_dbname(), \"cluster\": obj.get_cluster(), \"parameters\" : obj.parameters }\n        elif type(obj) is AtlasServiceBinding.Binding:\n            query = { \"binding_id\" : obj.binding_id, \"parameters\" : obj.parameters, \"instance_id\": obj.instance.instance_id }\n        else:\n            raise ErrStorageTypeUnsupported(type(obj))\n        \n        \n        try:\n            result = self.broker.insert_one(query)\n        except:\n            raise ErrStorageMongoConnection(\"Store Instance or Binding\")\n        \n        if result is not None:\n            \n            obj.provisioned = True\n            return result.inserted_id\n        \n        raise ErrStorageStore()", "docstring": "Store\n\nStore an object into the MongoDB storage for caching\n\nArgs:\nobj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding\n\nReturns:\nObjectId: MongoDB _id\n\nRaises:\nErrStorageMongoConnection: Error during MongoDB communication.\nErrStorageTypeUnsupported: Type unsupported.\nErrStorageStore : Failed to store the binding or instance.", "source": "juraj-google-style"}
{"code": "def wb010(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `wb010`'.format(value))\n\n        self._wb010 = value", "docstring": "Corresponds to IDD Field `wb010`\nWet-bulb temperature corresponding to 1.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `wb010`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def load(self, filename, create=None, default_conf={}):\n    (filenames, tries) = self.__search_config_files(filename)\n    if len(filenames):\n        self.__loaded_config_file = (filenames if self.__nested else filenames[0])\n        return self.__load_config_files((filenames if self.__nested else filenames[:1]))\n    if (create is not None):\n        self.__loaded_config_file = os.path.join(create, filename)\n        self.save(default_conf)\n        return default_conf\n    raise ConfigFileNotFoundException(('Config file not found in: %s' % tries))", "docstring": "Load the config file\n\nArgs:\nfilename (str): the filename of the config, without any path\ncreate (str): if the config file not found, and this parameter is not None,\na config file will be create with content of default_conf\ndefault_conf (dict): content of the default config data\n\nReturns:\nReturn value of the ConfigFormatter.decode or the default_conf value\n\nRaises:\nConfigFileNotFoundException: if the config file not found", "source": "codesearchnet"}
{"code": "def run_dumper(self, dumper):\n        \n\n        logging.debug(\"start dumper::\")\n        dumper(\n            experiments=self.experiments,\n            farms=self.farms,\n            barn=self.barn,\n            engine=self.current_engine,\n        )\n        logging.debug(\"::dumper ended\")", "docstring": "run dumber (once pr. engine)\n\nArgs:\ndumper: dumper to run (function or method).\n\nThe dumper takes the attributes experiments, farms, and barn as input.\nIt does not return anything. But can, if the dumper designer feels in\na bad and nasty mood, modify the input objects\n(for example experiments).", "source": "juraj-google-style"}
{"code": "def __init__(self, x, y):\n    self.x = x\n    self.y = y", "docstring": "Init method.\n\nArgs:\nx: Argument x.\ny: Argument y.", "source": "github-repos"}
{"code": "def get_vmss(access_token, subscription_id, resource_group, vmss_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API])\n    return do_get(endpoint, access_token)", "docstring": "Get virtual machine scale set details.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvmss_name (str): Name of the virtual machine scale set.\n\nReturns:\nHTTP response. JSON body of scale set properties.", "source": "codesearchnet"}
{"code": "def correct_absolute_refs(self, construction_table):\n    c_table = construction_table.copy()\n    abs_refs = constants.absolute_refs\n    problem_index = self.check_absolute_refs(c_table)\n    for i in problem_index:\n        order_of_refs = iter(permutations(abs_refs.keys()))\n        finished = False\n        while (not finished):\n            if self._has_valid_abs_ref(i, c_table):\n                finished = True\n            else:\n                row = c_table.index.get_loc(i)\n                c_table.iloc[(row, row:)] = next(order_of_refs)[row:3]\n    return c_table", "docstring": "Reindexe construction_table if linear reference in first three rows\npresent.\n\nUses :meth:`~Cartesian.check_absolute_refs` to obtain the problematic\nindices.\n\nArgs:\nconstruction_table (pd.DataFrame):\n\nReturns:\npd.DataFrame: Appropiately renamed construction table.", "source": "codesearchnet"}
{"code": "def __init__(self, filenames=None,\n                 selected_scans=None,\n                 profile=False,\n                 filestatuschecker=None,  \n                 fetch_one_liners=False,\n                 tester=None,\n                 initialize=False,\n                 ):\n        \n\n        if tester is None:\n            self.tester = prms.Instruments.tester\n        else:\n            self.tester = tester\n        self.loader = None  \n        self.logger = logging.getLogger(__name__)\n        self.logger.debug(\"created CellpyData instance\")\n        self.name = None\n        self.profile = profile\n        self.minimum_selection = {}\n        if filestatuschecker is None:\n            self.filestatuschecker = prms.Reader.filestatuschecker\n        else:\n            self.filestatuschecker = filestatuschecker\n        self.forced_errors = 0\n        self.summary_exists = False\n\n        if not filenames:\n            self.file_names = []\n        else:\n            self.file_names = filenames\n            if not self._is_listtype(self.file_names):\n                self.file_names = [self.file_names]\n        if not selected_scans:\n            self.selected_scans = []\n        else:\n            self.selected_scans = selected_scans\n            if not self._is_listtype(self.selected_scans):\n                self.selected_scans = [self.selected_scans]\n\n        self.datasets = []\n        self.status_datasets = []\n        self.selected_dataset_number = 0\n        self.number_of_datasets = 0\n\n        self.capacity_modifiers = ['reset', ]\n\n        self.list_of_step_types = ['charge', 'discharge',\n                                   'cv_charge', 'cv_discharge',\n                                   'charge_cv', 'discharge_cv',\n                                   'ocvrlx_up', 'ocvrlx_down', 'ir',\n                                   'rest', 'not_known']\n        \n        self.force_step_table_creation = \\\n            prms.Reader.force_step_table_creation\n        self.force_all = prms.Reader.force_all\n        self.sep = prms.Reader.sep\n        self._cycle_mode = prms.Reader.cycle_mode\n        \n        self.load_only_summary = prms.Reader.load_only_summary\n        self.select_minimal = prms.Reader.select_minimal\n        \n        \n        \n        self.limit_loaded_cycles = prms.Reader.limit_loaded_cycles\n        \n        self.ensure_step_table = prms.Reader.ensure_step_table\n        self.daniel_number = prms.Reader.daniel_number\n        \n        self.raw_datadir = prms.Paths.rawdatadir\n        \n        self.cellpy_datadir = prms.Paths.cellpydatadir\n        \n        self.auto_dirs = prms.Reader.auto_dirs\n\n        \n        self.headers_normal = get_headers_normal()\n        self.headers_summary = get_headers_summary()\n        self.headers_step_table = get_headers_step_table()\n\n        self.table_names = None  \n        self.set_instrument()\n\n        \n        self.cellpy_units = get_cellpy_units()\n\n        if initialize:\n            self.initialize()", "docstring": "CellpyData object\n\nArgs:\nfilenames: list of files to load.\nselected_scans:\nprofile: experimental feature.\nfilestatuschecker: property to compare cellpy and raw-files;\ndefault read from prms-file.\nfetch_one_liners: experimental feature.\ntester: instrument used (e.g. \"arbin\") (checks prms-file as\ndefault).\ninitialize: create a dummy (empty) dataset; defaults to False.", "source": "juraj-google-style"}
{"code": "def call_plugins(self, step):\n        \n        for plugin in self.plugins:\n            try:\n                getattr(plugin, step)()\n            except AttributeError:\n                self.logger.debug(\"{} doesn't exist on plugin {}\".format(step, plugin))\n            except TypeError:\n                self.logger.debug(\"{} on plugin {} is not callable\".format(step, plugin))", "docstring": "For each plugins, check if a \"step\" method exist on it, and call it\n\nArgs:\nstep (str): The method to search and call on each plugin", "source": "juraj-google-style"}
{"code": "def get_num_bytes(self, batch: Sequence[str]) -> int:\n    return sum((sys.getsizeof(element) for element in batch))", "docstring": "Returns:\nThe number of bytes of input batch elements.", "source": "github-repos"}
{"code": "def stl(A, b):\n    r\n    from scipy.linalg import solve_triangular\n\n    A = asarray(A, float)\n    b = asarray(b, float)\n    return solve_triangular(A, b, lower=True, check_finite=False)", "docstring": "r\"\"\"Shortcut to ``solve_triangular(A, b, lower=True, check_finite=False)``.\n\nSolve linear systems :math:`\\mathrm A \\mathbf x = \\mathbf b` when\n:math:`\\mathrm A` is a lower-triangular matrix.\n\nArgs:\nA (array_like): A lower-triangular matrix.\nb (array_like): Ordinate values.\n\nReturns:\n:class:`numpy.ndarray`: Solution ``x``.\n\nSee Also\n--------\nscipy.linalg.solve_triangular: Solve triangular linear equations.", "source": "juraj-google-style"}
{"code": "def get_error(self, block=False, timeout=None):\n    try:\n        error = self._errors.get(block=block, timeout=timeout)\n        return error\n    except Exception:\n        return None", "docstring": "Removes and returns an error from self._errors\n\nArgs:\nblock(bool): if True block until a RTMMessage is available,\nelse it will return None when self._inbox is empty\ntimeout(int): it blocks at most timeout seconds\n\nReturns:\nerror if inbox is not empty, else None", "source": "codesearchnet"}
{"code": "def __type_matches(self, obj: Any, type_: Type) -> bool:\n    if is_generic_union(type_):\n        for t in generic_type_args(type_):\n            if self.__type_matches(obj, t):\n                return True\n        return False\n    elif is_generic_list(type_):\n        if (not isinstance(obj, list)):\n            return False\n        for item in obj:\n            if (not self.__type_matches(item, generic_type_args(type_)[0])):\n                return False\n        return True\n    elif is_generic_dict(type_):\n        if (not isinstance(obj, OrderedDict)):\n            return False\n        for (key, value) in obj:\n            if (not isinstance(key, generic_type_args(type_)[0])):\n                return False\n            if (not self.__type_matches(value, generic_type_args(type_)[1])):\n                return False\n        return True\n    else:\n        return isinstance(obj, type_)", "docstring": "Checks that the object matches the given type.\n\nLike isinstance(), but will work with union types using Union, \\\nDict and List.\n\nArgs:\nobj: The object to check\ntype_: The type to check against\n\nReturns:\nTrue iff obj is of type type_", "source": "codesearchnet"}
{"code": "def __driver_helper(self, line):\n        \n        if line.strip() == '?':\n            self.stdout.write('\\n')\n            self.stdout.write(self.doc_string())\n        else:\n            toks = shlex.split(line[:-1])\n            try:\n                msg = self.__get_help_message(toks)\n            except Exception as e:\n                self.stderr.write('\\n')\n                self.stderr.write(traceback.format_exc())\n                self.stderr.flush()\n            self.stdout.write('\\n')\n            self.stdout.write(msg)\n        \n        self.stdout.write('\\n')\n        self.stdout.write(self.prompt)\n        self.stdout.write(line)\n        self.stdout.flush()", "docstring": "Driver level helper method.\n\n1.  Display help message for the given input. Internally calls\nself.__get_help_message() to obtain the help message.\n2.  Re-display the prompt and the input line.\n\nArguments:\nline: The input line.\n\nRaises:\nErrors from helper methods print stack trace without terminating\nthis shell. Other exceptions will terminate this shell.", "source": "juraj-google-style"}
{"code": "def _Open(self, hostname, port):\n    \n    try:\n      self._xmlrpc_server = SimpleXMLRPCServer.SimpleXMLRPCServer(\n          (hostname, port), logRequests=False, allow_none=True)\n    except SocketServer.socket.error as exception:\n      logger.warning((\n          'Unable to bind a RPC server on {0:s}:{1:d} with error: '\n          '{2!s}').format(hostname, port, exception))\n      return False\n\n    self._xmlrpc_server.register_function(\n        self._callback, self._RPC_FUNCTION_NAME)\n    return True", "docstring": "Opens the RPC communication channel for clients.\n\nArgs:\nhostname (str): hostname or IP address to connect to for requests.\nport (int): port to connect to for requests.\n\nReturns:\nbool: True if the communication channel was successfully opened.", "source": "juraj-google-style"}
{"code": "def _create_resource(self):\n    assert self._default_value.get_shape().ndims == 0\n    table_ref = gen_simple_hash_table_op.examples_simple_hash_table_create(key_dtype=self._key_dtype, value_dtype=self._value_dtype, name=self._name)\n    return table_ref", "docstring": "Create the resource tensor handle.\n\n`_create_resource` is an override of a method in base class\n`TrackableResource` that is required for SavedModel support. It can be\ncalled by the `resource_handle` property defined by `TrackableResource`.\n\nReturns:\nA tensor handle to the lookup table.", "source": "github-repos"}
{"code": "def create_container_definition(container_name, image, port=80, cpu=1.0, memgb=1.5, environment=None):\n    container = {'name': container_name}\n    container_properties = {'image': image}\n    container_properties['ports'] = [{'port': port}]\n    container_properties['resources'] = {'requests': {'cpu': cpu, 'memoryInGB': memgb}}\n    container['properties'] = container_properties\n    if (environment is not None):\n        container_properties['environmentVariables'] = environment\n    return container", "docstring": "Makes a python dictionary of container properties.\n\nArgs:\ncontainer_name: The name of the container.\nimage (str): Container image string. E.g. nginx.\nport (int): TCP port number. E.g. 8080.\ncpu (float): Amount of CPU to allocate to container. E.g. 1.0.\nmemgb (float): Memory in GB to allocate to container. E.g. 1.5.\nenvironment (list): A list of [{'name':'envname', 'value':'envvalue'}].\nSets environment variables in the container.\n\nReturns:\nA Python dictionary of container properties, pass a list of these to\ncreate_container_group().", "source": "codesearchnet"}
{"code": "def _create_plugin(self, config):\n    if (config is None):\n        raise ValueError('No plugin config to create plugin from.')\n    name = config.pop('name', None)\n    if (name is None):\n        raise cfg.AitConfigMissing('plugin name')\n    module_name = name.rsplit('.', 1)[0]\n    class_name = name.rsplit('.', 1)[(- 1)]\n    if (class_name in [x.name for x in (((self.outbound_streams + self.inbound_streams) + self.servers) + self.plugins)]):\n        raise ValueError('Plugin \"{}\" already loaded. Only one plugin of a given name is allowed'.format(class_name))\n    plugin_inputs = config.pop('inputs', None)\n    if (plugin_inputs is None):\n        log.warn('No plugin inputs specified for {}'.format(name))\n        plugin_inputs = []\n    subscribers = config.pop('outputs', None)\n    if (subscribers is None):\n        log.warn('No plugin outputs specified for {}'.format(name))\n        subscribers = []\n    module = import_module(module_name)\n    plugin_class = getattr(module, class_name)\n    instance = plugin_class(plugin_inputs, subscribers, zmq_args={'zmq_context': self.broker.context, 'zmq_proxy_xsub_url': self.broker.XSUB_URL, 'zmq_proxy_xpub_url': self.broker.XPUB_URL}, **config)\n    return instance", "docstring": "Creates a plugin from its config.\n\nParams:\nconfig:       plugin configuration as read by ait.config\nReturns:\nplugin:       a Plugin\nRaises:\nValueError:   if any of the required config values are missing", "source": "codesearchnet"}
{"code": "def pow(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent':\n        \n        return cls._binary_op(x, y, tf.pow, tf.float32)", "docstring": "Returns a TensorFluent for the pow function.TensorFluent\n\nArgs:\nx: The first operand.\ny: The second operand.\n\nReturns:\nA TensorFluent wrapping the pow function.", "source": "juraj-google-style"}
{"code": "def _GetPathSegmentSeparator(self, path):\n    if (path.startswith('\\\\') or path[1:].startswith(':\\\\')):\n        return '\\\\'\n    if path.startswith('/'):\n        return '/'\n    if ('/' and ('\\\\' in path)):\n        forward_count = len(path.split('/'))\n        backward_count = len(path.split('\\\\'))\n        if (forward_count > backward_count):\n            return '/'\n        return '\\\\'\n    if ('/' in path):\n        return '/'\n    return '\\\\'", "docstring": "Given a path give back the path separator as a best guess.\n\nArgs:\npath (str): path.\n\nReturns:\nstr: path segment separator.", "source": "codesearchnet"}
{"code": "def generate(self, step, params):\n        \n        subfactory = self.get_factory()\n        logger.debug(\n            \"SubFactory: Instantiating %s.%s(%s), create=%r\",\n            subfactory.__module__, subfactory.__name__,\n            utils.log_pprint(kwargs=params),\n            step,\n        )\n        force_sequence = step.sequence if self.FORCE_SEQUENCE else None\n        return step.recurse(subfactory, params, force_sequence=force_sequence)", "docstring": "Evaluate the current definition and fill its attributes.\n\nArgs:\nstep: a factory.builder.BuildStep\nparams (dict): additional, call-time added kwargs\nfor the step.", "source": "juraj-google-style"}
{"code": "def wait_for_import(self, connection_id, wait_interval):\n        \n        self.stdout.write(self.style.NOTICE('Waiting for import'), ending='')\n        state = utils.ConnectionStates.IMPORT_CONFIGURATION\n        while state == utils.ConnectionStates.IMPORT_CONFIGURATION:\n            \n            self.stdout.write(self.style.NOTICE('.'), ending='')\n            time.sleep(wait_interval)  \n            try:\n                connection = utils.get_connection(connection_id)\n            except requests.HTTPError as e:\n                raise CommandError(\"Failed to fetch connection information.\") from e\n            else:\n                state = connection['state']\n        self.stdout.write(self.style.NOTICE(' Done!'))", "docstring": "Wait until connection state is no longer ``IMPORT_CONFIGURATION``.\n\nArgs:\nconnection_id (str): Heroku Connect connection to monitor.\nwait_interval (int): How frequently to poll in seconds.\n\nRaises:\nCommandError: If fetch connection information fails.", "source": "juraj-google-style"}
{"code": "def parseMagnitude(m):\n        \n        m = NumberService().parse(m)\n\n        def toDecimalPrecision(n, k):\n            return float(\"%.*f\" % (k, round(n, k)))\n\n        \n        digits = 2\n        magnitude = toDecimalPrecision(m, digits)\n\n        \n        while not magnitude:\n            digits += 1\n            magnitude = toDecimalPrecision(m, digits)\n\n        \n        if m < 1.0:\n            magnitude = toDecimalPrecision(m, digits + 1)\n\n        \n        if int(magnitude) == magnitude:\n            magnitude = int(magnitude)\n\n        \n        magString = str(magnitude)\n        magString = re.sub(r'(\\d)e-(\\d+)',\n                           '\\g<1> times ten to the negative \\g<2>', magString)\n        magString = re.sub(r'(\\d)e\\+(\\d+)',\n                           '\\g<1> times ten to the \\g<2>', magString)\n        magString = re.sub(r'-(\\d+)', 'negative \\g<1>', magString)\n        magString = re.sub(r'\\b0(\\d+)', '\\g<1>', magString)\n        return magString", "docstring": "Parses a number m into a human-ready string representation.\nFor example, crops off floats if they're too accurate.\n\nArguments:\nm (float): Floating-point number to be cleaned.\n\nReturns:\nHuman-ready string description of the number.", "source": "juraj-google-style"}
{"code": "def get_video_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Optional[Union[int, List[int]]]=None, vision_feature_select_strategy: Optional[str]=None):\n    vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer\n    vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy\n    batch_size, frames, channels, height, width = pixel_values.shape\n    pixel_values = pixel_values.reshape(batch_size * frames, channels, height, width)\n    video_features = self.vision_tower(pixel_values, output_hidden_states=True)\n    if isinstance(vision_feature_layer, int):\n        selected_video_features = video_features.hidden_states[vision_feature_layer]\n    else:\n        hs_pool = [video_features.hidden_states[layer_idx] for layer_idx in vision_feature_layer]\n        selected_video_features = torch.cat(hs_pool, dim=-1)\n    if vision_feature_select_strategy == 'default':\n        selected_video_features = selected_video_features[:, 1:]\n    elif vision_feature_select_strategy == 'full':\n        selected_video_features = selected_video_features\n    video_features = self.vision_resampler(selected_video_features)\n    video_features = self.multi_modal_projector(video_features)\n    video_features = torch.split(video_features, frames, dim=0)\n    return video_features", "docstring": "Obtains video last hidden states from the vision tower and apply multimodal projection.\n\nArgs:\npixel_values (`torch.FloatTensor]` of shape `(batch_size, num_frames, channels, height, width)`)\nThe tensors corresponding to the input video.\nvision_feature_layer (`Union[int, List[int]]`, *optiona;*):\nThe index of the layer to select the vision feature. If multiple indices are provided,\nthe vision feature of the corresponding indices will be concatenated to form the\nvision features.\nvision_feature_select_strategy (`str`, *optional*):\nThe feature selection strategy used to select the vision feature from the vision backbone.\nCan be one of `\"default\"` or `\"full\"`\nReturns:\nvideo_features (List[`torch.Tensor`]): List of video feature tensor, each contains all the visual feature of all patches\nand are of shape `(num_videos, video_length, embed_dim)`).", "source": "github-repos"}
{"code": "def infer_paths(output_dir, **subdirs):\n  \n  directories = {}\n  for name, path in six.iteritems(subdirs):\n    directories[name] = path if path else os.path.join(output_dir, name)\n  directories[\"output_dir\"] = output_dir\n  return directories", "docstring": "Infers standard paths to policy and model directories.\n\nExample:\n>>> infer_paths(\"/some/output/dir/\", policy=\"\", model=\"custom/path\")\n{\"policy\": \"/some/output/dir/policy\", \"model\": \"custom/path\",\n\"output_dir\":\"/some/output/dir/\"}\n\nArgs:\noutput_dir: output directory.\n**subdirs: sub-directories.\n\nReturns:\na dictionary with the directories.", "source": "juraj-google-style"}
{"code": "def add_newlines(f, output, char):\n    line_count = get_line_count(f)\n    f = open(f, 'r+')\n    output = open(output, 'r+')\n    for line in range(line_count):\n        string = f.readline()\n        string = re.sub(char, (char + '\\n'), string)\n        output.write(string)", "docstring": "Adds line breaks after every occurance of a given character in a file.\n\nArgs:\nf: string, path to input file.\n\noutput: string, path to output file.\n\nReturns:\nNone.", "source": "codesearchnet"}
{"code": "def _get_and_write_archive(self, hunt, output_file_path):\n    hunt_archive = hunt.GetFilesArchive()\n    hunt_archive.WriteToFile(output_file_path)", "docstring": "Gets and writes a hunt archive.\n\nFunction is necessary for the _check_approval_wrapper to work.\n\nArgs:\nhunt: The GRR hunt object.\noutput_file_path: The output path where to write the Hunt Archive.", "source": "codesearchnet"}
{"code": "def register_handler(self, callable_obj, entrypoint, methods=('GET',)):\n    router_obj = Route.wrap_callable(uri=entrypoint, methods=methods, callable_obj=callable_obj)\n    if router_obj.is_valid:\n        self._routes.add(router_obj)\n        return self\n    raise RouteError('Missing params: methods: {} - entrypoint: {}'.format(methods, entrypoint))", "docstring": "Register a handler callable to a specific route.\n\nArgs:\nentrypoint (str): The uri relative path.\nmethods (tuple): A tuple of valid method strings.\ncallable_obj (callable): The callable object.\n\nReturns:\nThe Router instance (for chaining purposes).\n\nRaises:\nRouteError, for missing routing params or invalid callable\nobject type.", "source": "codesearchnet"}
{"code": "async def update_read_timestamp(self, read_timestamp=None):\n        \n        if read_timestamp is None:\n            read_timestamp = (self.events[-1].timestamp if self.events else\n                              datetime.datetime.now(datetime.timezone.utc))\n        if read_timestamp > self.latest_read_timestamp:\n            logger.info(\n                'Setting {} latest_read_timestamp from {} to {}'\n                .format(self.id_, self.latest_read_timestamp, read_timestamp)\n            )\n            \n            state = self._conversation.self_conversation_state\n            state.self_read_state.latest_read_timestamp = (\n                parsers.to_timestamp(read_timestamp)\n            )\n            try:\n                await self._client.update_watermark(\n                    hangouts_pb2.UpdateWatermarkRequest(\n                        request_header=self._client.get_request_header(),\n                        conversation_id=hangouts_pb2.ConversationId(\n                            id=self.id_\n                        ),\n                        last_read_timestamp=parsers.to_timestamp(\n                            read_timestamp\n                        ),\n                    )\n                )\n            except exceptions.NetworkError as e:\n                logger.warning('Failed to update read timestamp: {}'.format(e))\n                raise", "docstring": "Update the timestamp of the latest event which has been read.\n\nThis method will avoid making an API request if it will have no effect.\n\nArgs:\nread_timestamp (datetime.datetime): (optional) Timestamp to set.\nDefaults to the timestamp of the newest event.\n\nRaises:\n.NetworkError: If the timestamp cannot be updated.", "source": "juraj-google-style"}
{"code": "def _access_control(self, access_control, my_media_group=None):\n    extension = None\n    if (access_control is AccessControl.Private):\n        if my_media_group:\n            my_media_group.private = gdata.media.Private()\n    elif (access_control is AccessControl.Unlisted):\n        from gdata.media import YOUTUBE_NAMESPACE\n        from atom import ExtensionElement\n        kwargs = {'namespace': YOUTUBE_NAMESPACE, 'attributes': {'action': 'list', 'permission': 'denied'}}\n        extension = [ExtensionElement('accessControl', **kwargs)]\n    return extension", "docstring": "Prepares the extension element for access control\nExtension element is the optional parameter for the YouTubeVideoEntry\nWe use extension element to modify access control settings\n\nReturns:\ntuple of extension elements", "source": "codesearchnet"}
{"code": "def downloadRecords(search_result, from_doc=1):\n    downer = Downloader()\n    if ('set_number' not in search_result):\n        return []\n    set_number = str(search_result['set_number'])\n    if (len(set_number) < 6):\n        set_number = (((6 - len(set_number)) * '0') + set_number)\n    records = []\n    for cnt in range(search_result['no_records']):\n        doc_number = (from_doc + cnt)\n        if ((cnt >= MAX_RECORDS) or (doc_number > search_result['no_records'])):\n            break\n        set_data = downer.download((ALEPH_URL + Template(RECORD_URL_TEMPLATE).substitute(SET_NUM=set_number, RECORD_NUM=doc_number)))\n        records.append(set_data)\n    return records", "docstring": "Download `MAX_RECORDS` documents from `search_result` starting from\n`from_doc`.\n\nAttr:\nsearch_result (dict): returned from :func:`searchInAleph`.\nfrom_doc (int, default 1): Start from document number `from_doc`.\n\nReturns:\nlist: List of XML strings with documents in MARC OAI.", "source": "codesearchnet"}
{"code": "def enable(profile='allprofiles'):\n    \n    cmd = ['netsh', 'advfirewall', 'set', profile, 'state', 'on']\n    ret = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True)\n    if ret['retcode'] != 0:\n        raise CommandExecutionError(ret['stdout'])\n\n    return True", "docstring": ".. versionadded:: 2015.5.0\n\nEnable firewall profile\n\nArgs:\nprofile (Optional[str]): The name of the profile to enable. Default is\n``allprofiles``. Valid options are:\n\n- allprofiles\n- domainprofile\n- privateprofile\n- publicprofile\n\nReturns:\nbool: True if successful\n\nRaises:\nCommandExecutionError: If the command fails\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' firewall.enable", "source": "juraj-google-style"}
{"code": "def input_list_parser(infile_list):\n    final_list_of_files = []\n    for x in infile_list:\n        if op.isdir(x):\n            os.chdir(x)\n            final_list_of_files.extend(glob.glob('*'))\n        if op.isfile(x):\n            final_list_of_files.append(x)\n    return final_list_of_files", "docstring": "Always return a list of files with varying input.\n\n>>> input_list_parser(['/path/to/folder/'])\n['/path/to/folder/file1.txt', '/path/to/folder/file2.txt', '/path/to/folder/file3.txt']\n\n>>> input_list_parser(['/path/to/file.txt'])\n['/path/to/file.txt']\n\n>>> input_list_parser(['file1.txt'])\n['file1.txt']\n\nArgs:\ninfile_list: List of arguments\n\nReturns:\nlist: Standardized list of files", "source": "codesearchnet"}
{"code": "def get_url_reports(self, resources):\n        \n        api_name = 'virustotal-url-reports'\n\n        (all_responses, resources) = self._bulk_cache_lookup(api_name, resources)\n        resource_chunks = self._prepare_resource_chunks(resources, '\\n')\n        response_chunks = self._request_reports(\"resource\", resource_chunks, 'url/report')\n        self._extract_response_chunks(all_responses, response_chunks, api_name)\n\n        return all_responses", "docstring": "Retrieves a scan report on a given URL.\n\nArgs:\nresources: list of URLs.\nReturns:\nA dict with the URL as key and the VT report as value.", "source": "juraj-google-style"}
{"code": "def kill(self, procname):\n        \n        for proc in psutil.process_iter():\n            if proc.name() == procname:\n                self.info_log(\n                    '[pid:%s][name:%s] killed' %\n                    (proc.pid, proc.name())\n                )\n                proc.kill()", "docstring": "Kill by process name\n\nArgs:\nprocname (str)", "source": "juraj-google-style"}
{"code": "def write(self, data):\n        \n        if isinstance(data, WriteBuffer):\n            self._write_buffer.append(data)\n        else:\n            if len(data) > 0:\n                self._write_buffer.append(data)\n        if self.aggressive_write:\n            self._handle_write()\n        if self._write_buffer._total_length > 0:\n            self._register_or_update_event_handler(write=True)", "docstring": "Buffers some data to be sent to the host:port in a non blocking way.\n\nSo the data is always buffered and not sent on the socket in a\nsynchronous way.\n\nYou can give a WriteBuffer as parameter. The internal Connection\nWriteBuffer will be extended with this one (without copying).\n\nArgs:\ndata (str or WriteBuffer): string (or WriteBuffer) to write to\nthe host:port.", "source": "juraj-google-style"}
{"code": "def __getitem__(self, key):\n        \n        if isinstance(key, str):  \n            \n            if key in self.unmaterialized_cols:\n                return self.unmaterialized_cols[key]\n            raw_column = self.df[key].values\n            dtype = str(raw_column.dtype)\n            \n            if dtype == 'object':\n                raw_column = self.raw_columns[key]\n                weld_type = WeldVec(WeldChar())\n            else:\n                weld_type = grizzly_impl.numpy_to_weld_type_mapping[dtype]\n            if self.predicates is None:\n                return SeriesWeld(raw_column, weld_type, self, key)\n            return SeriesWeld(\n                grizzly_impl.filter(\n                    raw_column,\n                    self.predicates.expr,\n                    weld_type\n                ),\n                weld_type,\n                self,\n                key\n            )\n        elif isinstance(key, list):\n            \n            return DataFrameWeld(self.df[key], self.predicates)\n        elif isinstance(key, SeriesWeld):\n            \n            if self.predicates is not None:\n                return DataFrameWeld(self.df, key.per_element_and(self.predicates))\n            return DataFrameWeld(self.df, key)\n        raise Exception(\"Invalid type in __getitem__\")", "docstring": "Summary\n\nArgs:\nkey (TYPE): Description\n\nReturns:\nTYPE: Description\n\nRaises:\nException: Description", "source": "juraj-google-style"}
{"code": "def on_change(self, attr, *callbacks):\n        \n        if attr not in self.properties():\n            raise ValueError(\"attempted to add a callback on nonexistent %s.%s property\" % (self.__class__.__name__, attr))\n        super(Model, self).on_change(attr, *callbacks)", "docstring": "Add a callback on this object to trigger when ``attr`` changes.\n\nArgs:\nattr (str) : an attribute name on this object\n*callbacks (callable) : callback functions to register\n\nReturns:\nNone\n\nExample:\n\n.. code-block:: python\n\nwidget.on_change('value', callback1, callback2, ..., callback_n)", "source": "juraj-google-style"}
{"code": "def dp010(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `dp010`'.format(value))\n\n        self._dp010 = value", "docstring": "Corresponds to IDD Field `dp010`\nDew-point temperature corresponding to 1.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `dp010`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def connect_input(self, index, walker, trigger=None):\n    if (trigger is None):\n        trigger = TrueTrigger()\n    if (index >= len(self.inputs)):\n        raise TooManyInputsError('Input index exceeded max number of inputs', index=index, max_inputs=len(self.inputs), stream=self.stream)\n    self.inputs[index] = (walker, trigger)", "docstring": "Connect an input to a stream walker.\n\nIf the input is already connected to something an exception is thrown.\nOtherwise the walker is used to read inputs for that input.\n\nA triggering condition can optionally be passed that will determine\nwhen this input will be considered as triggered.\n\nArgs:\nindex (int): The index of the input that we want to connect\nwalker (StreamWalker): The stream walker to use for the input\ntrigger (InputTrigger): The trigger to use for the input.  If\nno trigger is specified, the input is considered to always be\ntriggered (so TrueTrigger is used)", "source": "codesearchnet"}
{"code": "def __init__(self, word_count=None):\n    \n\n    if isinstance(word_count, dict):\n      word_count = iteritems(word_count)\n    sorted_counts = list(sorted(word_count, key=lambda wc: wc[1], reverse=True))\n    words = [w for w,c in sorted_counts]\n    super(CountedVocabulary, self).__init__(words=words)\n    self.word_count = dict(sorted_counts)", "docstring": "Build attributes word_id and id_word from input.\n\nArgs:\nword_count (dictionary): A dictionary of the type word:count or\nlist of tuples of the type (word, count).", "source": "juraj-google-style"}
{"code": "def _PrintDictAsTable(self, src_dict):\n    \n    key_list = list(src_dict.keys())\n    key_list.sort()\n\n    print('|', end='')\n    for key in key_list:\n      print(' {0:s} |'.format(key), end='')\n    print('')\n\n    print('|', end='')\n    for key in key_list:\n      print(' :---: |', end='')\n    print('')\n\n    print('|', end='')\n    for key in key_list:\n      print(' {0!s} |'.format(src_dict[key]), end='')\n    print('\\n')", "docstring": "Prints a table of artifact definitions.\n\nArgs:\nsrc_dict (dict[str, ArtifactDefinition]): artifact definitions by name.", "source": "juraj-google-style"}
{"code": "def import_demonstrations(self, demonstrations):\n        \n        if isinstance(demonstrations, dict):\n            if self.unique_state:\n                demonstrations['states'] = dict(state=demonstrations['states'])\n            if self.unique_action:\n                demonstrations['actions'] = dict(action=demonstrations['actions'])\n\n            self.model.import_demo_experience(**demonstrations)\n\n        else:\n            if self.unique_state:\n                states = dict(state=list())\n            else:\n                states = {name: list() for name in demonstrations[0]['states']}\n            internals = {name: list() for name in demonstrations[0]['internals']}\n            if self.unique_action:\n                actions = dict(action=list())\n            else:\n                actions = {name: list() for name in demonstrations[0]['actions']}\n            terminal = list()\n            reward = list()\n\n            for demonstration in demonstrations:\n                if self.unique_state:\n                    states['state'].append(demonstration['states'])\n                else:\n                    for name, state in states.items():\n                        state.append(demonstration['states'][name])\n                for name, internal in internals.items():\n                    internal.append(demonstration['internals'][name])\n                if self.unique_action:\n                    actions['action'].append(demonstration['actions'])\n                else:\n                    for name, action in actions.items():\n                        action.append(demonstration['actions'][name])\n                terminal.append(demonstration['terminal'])\n                reward.append(demonstration['reward'])\n\n            self.model.import_demo_experience(\n                states=states,\n                internals=internals,\n                actions=actions,\n                terminal=terminal,\n                reward=reward\n            )", "docstring": "Imports demonstrations, i.e. expert observations. Note that for large numbers of observations,\nset_demonstrations is more appropriate, which directly sets memory contents to an array an expects\na different layout.\n\nArgs:\ndemonstrations: List of observation dicts", "source": "juraj-google-style"}
{"code": "def lengths_to_area_mask(feature_length, length, max_area_size):\n  \n\n  paddings = tf.cast(tf.expand_dims(\n      tf.logical_not(\n          tf.sequence_mask(feature_length, maxlen=length)), 2), tf.float32)\n  _, _, area_sum, _, _ = compute_area_features(paddings,\n                                               max_area_width=max_area_size)\n  mask = tf.squeeze(tf.logical_not(tf.cast(area_sum, tf.bool)), [2])\n  return mask", "docstring": "Generates a non-padding mask for areas based on lengths.\n\nArgs:\nfeature_length: a tensor of [batch_size]\nlength: the length of the batch\nmax_area_size: the maximum area size considered\nReturns:\nmask: a tensor in shape of [batch_size, num_areas]", "source": "juraj-google-style"}
{"code": "def fibo(max_value=None):\n    a = 1\n    b = 1\n    while True:\n        if ((max_value is None) or (a < max_value)):\n            (yield a)\n            (a, b) = (b, (a + b))\n        else:\n            (yield max_value)", "docstring": "Generator for fibonaccial decay.\n\nArgs:\nmax_value: The maximum value to yield. Once the value in the\ntrue fibonacci sequence exceeds this, the value\nof max_value will forever after be yielded.", "source": "codesearchnet"}
{"code": "def log(cls, x: 'TensorFluent') -> 'TensorFluent':\n        \n        return cls._unary_op(x, tf.log, tf.float32)", "docstring": "Returns a TensorFluent for the log function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the log function.", "source": "juraj-google-style"}
{"code": "def datestr2date(date_str):\n    \n    if any(c not in '0123456789-/' for c in date_str):\n        raise ValueError('Illegal character in date string')\n    if '/' in date_str:\n        try:\n            m, d, y = date_str.split('/')\n        except:\n            raise ValueError('Date {} must have no or exactly 2 slashes. {}'.\n                    format(date_str, VALID_DATE_FORMATS_TEXT))\n    elif '-' in date_str:\n        try:\n            d, m, y = date_str.split('-')\n        except:\n            raise ValueError('Date {} must have no or exactly 2 dashes. {}'.\n                    format(date_str, VALID_DATE_FORMATS_TEXT))\n    elif len(date_str) == 8 or len(date_str) == 6:\n        d = date_str[-2:]\n        m = date_str[-4:-2]\n        y = date_str[:-4]\n    else:\n        raise ValueError('Date format not recognised. {}'.format(\n                VALID_DATE_FORMATS_TEXT))\n    if len(y) == 2:\n        year = 2000 + int(y)\n    elif len(y) == 4:\n        year = int(y)\n    else:\n        raise ValueError('year must be 2 or 4 digits')\n    for s in (m, d):\n        if 1 <= len(s) <= 2:\n            month, day = int(m), int(d)\n        else:\n            raise ValueError('m and d must be 1 or 2 digits')\n    try:\n        return datetime.date(year, month, day)\n    except ValueError:\n        raise ValueError('Invalid date {}. {}'.format(date_str, \n                VALID_DATE_FORMATS_TEXT))", "docstring": "Turns a string into a datetime.date object. This will only work if the\nformat can be \"guessed\", so the string must have one of the formats from\nVALID_DATE_FORMATS_TEXT.\n\nArgs:\ndate_str (str) a string that represents a date\nReturns:\ndatetime.date object\nRaises:\nValueError if the input string does not have a valid format.", "source": "juraj-google-style"}
{"code": "def full_name(decl, with_defaults=True):\n    \n\n    if None is decl:\n        raise RuntimeError(\"Unable to generate full name for None object!\")\n    if with_defaults:\n        if not decl.cache.full_name:\n            path = declaration_path(decl)\n            if path == [\"\"]:\n                \n                \n                decl.cache.full_name = \"\"\n            else:\n                decl.cache.full_name = full_name_from_declaration_path(path)\n        return decl.cache.full_name\n    else:\n        if not decl.cache.full_partial_name:\n            path = partial_declaration_path(decl)\n            if path == [\"\"]:\n                \n                \n                decl.cache.full_partial_name = \"\"\n            else:\n                decl.cache.full_partial_name = \\\n                    full_name_from_declaration_path(path)\n        return decl.cache.full_partial_name", "docstring": "Returns declaration full qualified name.\n\nIf `decl` belongs to anonymous namespace or class, the function will return\nC++ illegal qualified name.\n\nArgs:\ndecl (declaration_t): declaration for which the full qualified name\nshould be calculated.\n\nReturns:\nlist[(str | basestring)]: full name of the declaration.", "source": "juraj-google-style"}
{"code": "def defect_concentration(self, chemical_potentials, temperature=300, fermi_level=0.0):\n        \n        n = self.multiplicity * 1e24 / self.defect.bulk_structure.volume\n        conc = n * np.exp(-1.0 * self.formation_energy(chemical_potentials, fermi_level=fermi_level) /\n                          (kb * temperature))\n\n        return conc", "docstring": "Get the defect concentration for a temperature and Fermi level.\nArgs:\ntemperature:\nthe temperature in K\nfermi_level:\nthe fermi level in eV (with respect to the VBM)\nReturns:\ndefects concentration in cm^-3", "source": "juraj-google-style"}
{"code": "def wrap_query_in_nested_if_field_is_nested(query, field, nested_fields):\n    \n    for element in nested_fields:\n        match_pattern = r'^{}.'.format(element)\n        if re.match(match_pattern, field):\n            return generate_nested_query(element, query)\n\n    return query", "docstring": "Helper for wrapping a query into a nested if the fields within the query are nested\n\nArgs:\nquery : The query to be wrapped.\nfield : The field that is being queried.\nnested_fields : List of fields which are nested.\nReturns:\n(dict): The nested query", "source": "juraj-google-style"}
{"code": "def _InitializeGraph(self, os_name, artifact_list):\n    dependencies = artifact_registry.REGISTRY.SearchDependencies(os_name, artifact_list)\n    (artifact_names, attribute_names) = dependencies\n    self._AddAttributeNodes(attribute_names)\n    self._AddArtifactNodesAndEdges(artifact_names)", "docstring": "Creates the nodes and directed edges of the dependency graph.\n\nArgs:\nos_name: String specifying the OS name.\nartifact_list: List of requested artifact names.", "source": "codesearchnet"}
{"code": "def delete_vmss_vms(access_token, subscription_id, resource_group, vmss_name, vm_ids):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/delete?api-version=', COMP_API])\n    body = (('{\"instanceIds\" : ' + vm_ids) + '}')\n    return do_post(endpoint, body, access_token)", "docstring": "Delete a VM in a VM Scale Set.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvmss_name (str): Name of the virtual machine scale set.\nvm_ids (str): String representation of a JSON list of VM IDs. E.g. '[1,2]'.\n\nReturns:\nHTTP response.", "source": "codesearchnet"}
{"code": "def get_all_reqs(self):\n    try:\n        open(self.req_file, 'rb')\n    except IOError:\n        msg = \"[Error] Cannot read file '%s'.\" % self.req_file\n        logging.error(msg)\n        sys.exit(1)\n    curr_status = True\n    parser = configparser.ConfigParser()\n    parser.read(self.req_file)\n    if not parser.sections():\n        err_msg = '[Error] Empty config file. '\n        err_msg += '(file = %s, ' % str(self.req_file)\n        err_msg += 'parser sectons = %s)' % str(parser.sections())\n        self.error_msg.append(err_msg)\n        logging.error(err_msg)\n        curr_status = False\n    required_dict = {}\n    optional_dict = {}\n    unsupported_dict = {}\n    dependency_dict = {}\n    for section in parser.sections():\n        all_configs = parser.options(section)\n        for config in all_configs:\n            spec = parser.get(section, config)\n            if section == 'Dependency':\n                dependency_dict[config] = []\n                spec_split = spec.split(',\\n')\n                if spec_split[0] == '[':\n                    spec_split = spec_split[1:]\n                elif '[' in spec_split[0]:\n                    spec_split[0] = spec_split[0].replace('[', '')\n                else:\n                    warn_msg = '[Warning] Config file format error: Missing `[`.'\n                    warn_msg += '(section = %s, ' % str(section)\n                    warn_msg += 'config = %s)' % str(config)\n                    logging.warning(warn_msg)\n                    self.warning_msg.append(warn_msg)\n                if spec_split[-1] == ']':\n                    spec_split = spec_split[:-1]\n                elif ']' in spec_split[-1]:\n                    spec_split[-1] = spec_split[-1].replace(']', '')\n                else:\n                    warn_msg = '[Warning] Config file format error: Missing `]`.'\n                    warn_msg += '(section = %s, ' % str(section)\n                    warn_msg += 'config = %s)' % str(config)\n                    logging.warning(warn_msg)\n                    self.warning_msg.append(warn_msg)\n                for rule in spec_split:\n                    spec_dict = self.filter_dependency(rule)\n                    cfg_name = spec_dict['cfg']\n                    dep_name = spec_dict['cfgd']\n                    cfg_req = self._Reqs(self.convert_to_list(spec_dict['cfg_spec'], ' '), config=cfg_name, section=section)\n                    dep_req = self._Reqs(self.convert_to_list(spec_dict['cfgd_spec'], ' '), config=dep_name, section=section)\n                    cfg_req_status = cfg_req.get_status\n                    dep_req_status = dep_req.get_status\n                    if not cfg_req_status[0] or not dep_req_status[0]:\n                        msg = '[Error] Failed to create _Reqs() instance for a '\n                        msg += 'dependency item. (config = %s, ' % str(cfg_name)\n                        msg += 'dep = %s)' % str(dep_name)\n                        logging.error(msg)\n                        self.error_msg.append(cfg_req_status[1])\n                        self.error_msg.append(dep_req_status[1])\n                        curr_status = False\n                        break\n                    else:\n                        dependency_dict[config].append([cfg_name, cfg_req, dep_name, dep_req])\n                if not curr_status:\n                    break\n            else:\n                if section == 'Required':\n                    add_to = required_dict\n                elif section == 'Optional':\n                    add_to = optional_dict\n                elif section == 'Unsupported':\n                    add_to = unsupported_dict\n                else:\n                    msg = '[Error] Section name `%s` is not accepted.' % str(section)\n                    msg += 'Accepted section names are `Required`, `Optional`, '\n                    msg += '`Unsupported`, and `Dependency`.'\n                    logging.error(msg)\n                    self.error_msg.append(msg)\n                    curr_status = False\n                    break\n                req_list = self.convert_to_list(self.filter_line(spec), ' ')\n                add_to[config] = self._Reqs(req_list, config=config, section=section)\n            if not curr_status:\n                break\n        if not curr_status:\n            break\n    return_dict = {'required': required_dict, 'optional': optional_dict, 'unsupported': unsupported_dict, 'dependency': dependency_dict}\n    return return_dict", "docstring": "Parses all compatibility specifications listed in the `.ini` config file.\n\nReads and parses each and all compatibility specifications from the `.ini`\nconfig file by sections. It then populates appropriate dicts that represent\neach section (e.g. `self.required`) and returns a tuple of the populated\ndicts.\n\nReturns:\nDict of dict\n{ `required`: Dict of `Required` configs and supported versions,\n`optional`: Dict of `Optional` configs and supported versions,\n`unsupported`: Dict of `Unsupported` configs and supported versions,\n`dependency`: Dict of `Dependency` configs and supported versions }", "source": "github-repos"}
{"code": "def parse_clnsig(acc, sig, revstat, transcripts):\n    clnsig_accsessions = []\n    if acc:\n        try:\n            acc = int(acc)\n        except ValueError:\n            pass\n        if isinstance(acc, int):\n            revstat_groups = []\n            if revstat:\n                revstat_groups = [rev.lstrip('_') for rev in revstat.split(',')]\n            sig_groups = []\n            if sig:\n                for significance in sig.split('/'):\n                    splitted_word = significance.split('_')\n                    sig_groups.append(' '.join(splitted_word[:2]))\n            for sign_term in sig_groups:\n                clnsig_accsessions.append({'value': sign_term, 'accession': int(acc), 'revstat': ', '.join(revstat_groups)})\n        else:\n            acc_groups = acc.split('|')\n            sig_groups = sig.split('|')\n            revstat_groups = revstat.split('|')\n            for (acc_group, sig_group, revstat_group) in zip(acc_groups, sig_groups, revstat_groups):\n                accessions = acc_group.split(',')\n                significances = sig_group.split(',')\n                revstats = revstat_group.split(',')\n                for (accession, significance, revstat) in zip(accessions, significances, revstats):\n                    clnsig_accsessions.append({'value': int(significance), 'accession': accession, 'revstat': revstat})\n    elif transcripts:\n        clnsig = set()\n        for transcript in transcripts:\n            for annotation in transcript.get('clinsig', []):\n                clnsig.add(annotation)\n        for annotation in clnsig:\n            clnsig_accsessions.append({'value': annotation})\n    return clnsig_accsessions", "docstring": "Get the clnsig information\n\nArgs:\nacc(str): The clnsig accession number, raw from vcf\nsig(str): The clnsig significance score, raw from vcf\nrevstat(str): The clnsig revstat, raw from vcf\ntranscripts(iterable(dict))\n\nReturns:\nclnsig_accsessions(list): A list with clnsig accessions", "source": "codesearchnet"}
{"code": "def __init__(self, url: str):\n        \n        self.url = url\n        parsed_url = urlparse(self.url)\n        self.scheme = parsed_url.scheme if parsed_url.scheme else 'file'\n        self.netloc = parsed_url.netloc\n        self.path = parsed_url.path\n        self.filename = os.path.basename(self.path)", "docstring": "Construct a File object from a url string.\n\nArgs:\n- url (string) : url string of the file e.g.\n- 'input.txt'\n- 'file:///scratch/proj101/input.txt'\n- 'globus://go#ep1/~/data/input.txt'\n- 'globus://ddb59aef-6d04-11e5-ba46-22000b92c6ec/home/johndoe/data/input.txt'", "source": "juraj-google-style"}
{"code": "def _add(self, frame, strict):\n        \n\n        if not isinstance(frame, Frame):\n            raise TypeError(\"%r not a Frame instance\" % frame)\n\n        orig_frame = frame\n        frame = frame._upgrade_frame()\n        if frame is None:\n            if not strict:\n                return\n            raise TypeError(\n                \"Can't upgrade %r frame\" % type(orig_frame).__name__)\n\n        hash_key = frame.HashKey\n        if strict or hash_key not in self:\n            self[hash_key] = frame\n            return\n\n        \n        \n        \n        while True:\n            old_frame = self[hash_key]\n            new_frame = old_frame._merge_frame(frame)\n            new_hash = new_frame.HashKey\n            if new_hash == hash_key:\n                self[hash_key] = new_frame\n                break\n            else:\n                assert new_frame is frame\n                if new_hash not in self:\n                    self[new_hash] = new_frame\n                    break\n                hash_key = new_hash", "docstring": "Add a frame.\n\nArgs:\nframe (Frame): the frame to add\nstrict (bool): if this should raise in case it can't be added\nand frames shouldn't be merged.", "source": "juraj-google-style"}
{"code": "def HandleBlockReceived(self, inventory):\n    block = IOHelper.AsSerializableWithType(inventory, 'neo.Core.Block.Block')\n    if (not block):\n        return\n    blockhash = block.Hash.ToBytes()\n    try:\n        if (blockhash in BC.Default().BlockRequests):\n            BC.Default().BlockRequests.remove(blockhash)\n    except KeyError:\n        pass\n    try:\n        if (blockhash in self.myblockrequests):\n            self.heart_beat(HEARTBEAT_BLOCKS)\n            self.myblockrequests.remove(blockhash)\n    except KeyError:\n        pass\n    self.leader.InventoryReceived(block)", "docstring": "Process a Block inventory payload.\n\nArgs:\ninventory (neo.Network.Inventory):", "source": "codesearchnet"}
{"code": "def bessel_k0e(x, name=None):\n    with ops.name_scope(name, 'bessel_k0e', [x]):\n        return gen_special_math_ops.bessel_k0e(x)", "docstring": "Computes the Bessel k0e function of `x` element-wise.\n\nModified Bessel function of order 0.\n\n>>> tf.math.special.bessel_k0e([0.5, 1., 2., 4.]).numpy()\narray([1.52410939, 1.14446308, 0.84156822, 0.60929767], dtype=float32)\n\nArgs:\nx: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n`float32`, `float64`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.k0e\n@end_compatibility", "source": "github-repos"}
{"code": "def verify_link_in_task_graph(chain, decision_link, task_link):\n    log.info('Verifying the {} {} task definition is part of the {} {} task graph...'.format(task_link.name, task_link.task_id, decision_link.name, decision_link.task_id))\n    if (task_link.task_id in decision_link.task_graph):\n        graph_defn = deepcopy(decision_link.task_graph[task_link.task_id])\n        verify_task_in_task_graph(task_link, graph_defn)\n        log.info(\"Found {} in the graph; it's a match\".format(task_link.task_id))\n        return\n    raise_on_errors([\"Can't find task {} {} in {} {} task-graph.json!\".format(task_link.name, task_link.task_id, decision_link.name, decision_link.task_id)])", "docstring": "Compare the runtime task definition against the decision task graph.\n\nArgs:\nchain (ChainOfTrust): the chain we're operating on.\ndecision_link (LinkOfTrust): the decision task link\ntask_link (LinkOfTrust): the task link we're testing\n\nRaises:\nCoTError: on failure.", "source": "codesearchnet"}
{"code": "def compute_g_values(self, input_ids: torch.LongTensor) -> torch.LongTensor:\n    self._check_input_ids_shape(input_ids)\n    ngrams = input_ids.unfold(dimension=1, size=self.ngram_len, step=1)\n    ngram_keys = self.compute_ngram_keys(ngrams)\n    return self.sample_g_values(ngram_keys)", "docstring": "Computes g values for each ngram from the given sequence of tokens.\n\nArgs:\ninput_ids (`torch.LongTensor`):\nInput token ids (batch_size, input_len).\n\nReturns:\nG values (batch_size, input_len - (ngram_len - 1), depth).", "source": "github-repos"}
{"code": "def load(self, auth, state=None, sync=True):\n        \n        self._keep_api.setAuth(auth)\n        self._reminders_api.setAuth(auth)\n        self._media_api.setAuth(auth)\n        if state is not None:\n            self.restore(state)\n        if sync:\n            self.sync(True)", "docstring": "Authenticate to Google with a prepared authentication object & sync.\nArgs:\nauth (APIAuth): Authentication object.\nstate (dict): Serialized state to load.\n\nRaises:\nLoginException: If there was a problem logging in.", "source": "juraj-google-style"}
{"code": "def parse_key(key):\n    (hkey, lkey) = struct.unpack('<II', key[0:UBIFS_SK_LEN])\n    ino_num = (hkey & UBIFS_S_KEY_HASH_MASK)\n    key_type = (lkey >> UBIFS_S_KEY_BLOCK_BITS)\n    khash = lkey\n    return {'type': key_type, 'ino_num': ino_num, 'khash': khash}", "docstring": "Parse node key\n\nArguments:\nStr:key    -- Hex string literal of node key.\n\nReturns:\nInt:key_type   -- Type of key, data, ino, dent, etc.\nInt:ino_num    -- Inode number.\nInt:khash      -- Key hash.", "source": "codesearchnet"}
{"code": "def get_country_by_id(self, country_id: int) -> typing.Optional['Country']:\n        \n        VALID_POSITIVE_INT.validate(country_id, 'get_country_by_id')\n        if country_id not in self._countries_by_id.keys():\n            for country in self.countries:\n\n                if country.country_id == country_id:\n                    self._countries_by_id[country_id] = country\n                    return country\n            raise ValueError(country_id)\n        else:\n            return self._countries_by_id[country_id]", "docstring": "Gets a country from its name\n\nArgs:\ncountry_id: country id\n\nReturns: Country", "source": "juraj-google-style"}
{"code": "def update_summary(self, w):\n    old = self.summary.v\n    reviewers = self._graph.retrieve_reviewers(self)\n    reviews = [self._graph.retrieve_review(r, self).score for r in reviewers]\n    weights = [w(r.anomalous_score) for r in reviewers]\n    if (sum(weights) == 0):\n        self.summary = np.mean(reviews)\n    else:\n        self.summary = np.average(reviews, weights=weights)\n    return abs((self.summary.v - old))", "docstring": "Update summary.\n\nThe new summary is a weighted average of reviews i.e.\n\n.. math::\n\n\\\\frac{\\\\sum_{r \\\\in R} \\\\mbox{weight}(r) \\\\times \\\\mbox{review}(r)}\n{\\\\sum_{r \\\\in R} \\\\mbox{weight}(r)},\n\nwhere :math:`R` is a set of reviewers reviewing this product,\n:math:`\\\\mbox{review}(r)` and :math:`\\\\mbox{weight}(r)` are\nthe review and weight of the reviewer :math:`r`, respectively.\n\nArgs:\nw: A weight function.\n\nReturns:\nabsolute difference between old summary and updated one.", "source": "codesearchnet"}
{"code": "def set_function_defaults(self, node: cfg.CFGNode, defaults_var: cfg.Variable) -> None:\n    defaults = self._extract_defaults(defaults_var)\n    new_sigs = []\n    for sig in self.signatures:\n        if defaults:\n            new_sigs.append(sig.set_defaults(defaults))\n        else:\n            d = sig.param_types\n            if hasattr(self, 'parent'):\n                d = d[1:]\n            new_sigs.append(sig.set_defaults(d))\n    self.signatures = new_sigs\n    if hasattr(self, 'parent'):\n        self.parent._member_map[self.name] = self.to_pytd_def(node, self.name)", "docstring": "Attempts to set default arguments for a function's signatures.\n\nIf defaults_var is not an unambiguous tuple (i.e. one that can be processed\nby abstract_utils.get_atomic_python_constant), every argument is made\noptional and a warning is issued. This function emulates __defaults__.\n\nIf this function is part of a class (or has a parent), that parent is\nupdated so the change is stored.\n\nArgs:\nnode: the node that defaults are being set at.\ndefaults_var: a Variable with a single binding to a tuple of default\nvalues.", "source": "github-repos"}
{"code": "def post_process(self, outputs, target_sizes):\n    logger.warning_once('`post_process` is deprecated and will be removed in v5 of Transformers, please use `post_process_object_detection` instead, with `threshold=0.` for equivalent results.')\n    out_logits, out_bbox = (outputs.logits, outputs.pred_boxes)\n    if len(out_logits) != len(target_sizes):\n        raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n    if target_sizes.shape[1] != 2:\n        raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch')\n    prob = nn.functional.softmax(out_logits, -1)\n    scores, labels = prob[..., :-1].max(-1)\n    boxes = center_to_corners_format(out_bbox)\n    img_h, img_w = target_sizes.unbind(1)\n    scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)\n    boxes = boxes * scale_fct[:, None, :]\n    results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]\n    return results", "docstring": "Converts the raw output of [`DetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,\nbottom_right_x, bottom_right_y) format. Only supports PyTorch.\n\nArgs:\noutputs ([`DetrObjectDetectionOutput`]):\nRaw outputs of the model.\ntarget_sizes (`torch.Tensor` of shape `(batch_size, 2)`):\nTensor containing the size (height, width) of each image of the batch. For evaluation, this must be the\noriginal image size (before any data augmentation). For visualization, this should be the image size\nafter data augment, but before padding.\nReturns:\n`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image\nin the batch as predicted by the model.", "source": "github-repos"}
{"code": "def _lookup_global(self, symbol):\n        \n        assert symbol.parts\n        namespace = self.namespaces\n        if len(symbol.parts) == 1:\n            \n            namespace = self.namespaces[None]\n        try:\n            \n            return self._lookup_namespace(symbol, namespace)\n        except Error as orig_exc:\n            try:\n                \n                \n                namespace = self.namespaces[None]\n                return self._lookup_namespace(symbol, namespace)\n            except Error:\n                raise orig_exc", "docstring": "Helper for lookup_symbol that only looks up global variables.\n\nArgs:\nsymbol: Symbol", "source": "juraj-google-style"}
{"code": "def env(mounts):\n    \n    f_mounts = [m.strip(\"/\") for m in mounts]\n\n    root = local.path(\"/\")\n\n    ld_libs = [root / m / \"lib\" for m in f_mounts]\n    ld_libs.extend([root / m / \"lib64\" for m in f_mounts])\n\n    paths = [root / m / \"bin\" for m in f_mounts]\n    paths.extend([root / m / \"sbin\" for m in f_mounts])\n    paths.extend([root / m for m in f_mounts])\n    return paths, ld_libs", "docstring": "Compute the environment of the change root for the user.\n\nArgs:\nmounts: The mountpoints of the current user.\nReturn:\npaths\nld_libs", "source": "juraj-google-style"}
{"code": "def create(self, vid):\n    command = ('vlan %s' % vid)\n    return (self.configure(command) if isvlan(vid) else False)", "docstring": "Creates a new VLAN resource\n\nArgs:\nvid (str): The VLAN ID to create\n\nReturns:\nTrue if create was successful otherwise False", "source": "codesearchnet"}
{"code": "def expand(sql, args=None):\n    (sql, args) = SqlModule.get_sql_statement_with_environment(sql, args)\n    return _sql_statement.SqlStatement.format(sql._sql, args)", "docstring": "Expand a SqlStatement, query string or SqlModule with a set of arguments.\n\nArgs:\nsql: a SqlStatement, %%sql module, or string containing a query.\nargs: a string of command line arguments or a dictionary of values. If a string, it is\npassed to the argument parser for the SqlModule associated with the SqlStatement or\nSqlModule. If a dictionary, it is used to override any default arguments from the\nargument parser. If the sql argument is a string then args must be None or a dictionary\nas in this case there is no associated argument parser.\nReturns:\nThe expanded SQL, list of referenced scripts, and list of referenced external tables.", "source": "codesearchnet"}
{"code": "def update_dict_recursive(editable_dict: dict, editing_dict: dict) -> None:\n    for (k, v) in editing_dict.items():\n        if isinstance(v, collections.Mapping):\n            update_dict_recursive(editable_dict.get(k, {}), v)\n        else:\n            editable_dict[k] = v", "docstring": "Updates dict recursively\n\nYou need to use this function to update dictionary if depth of editing_dict is more then 1\n\nArgs:\neditable_dict: dictionary, that will be edited\nediting_dict: dictionary, that contains edits\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def AddArguments(cls, argument_group):\n    \n    argument_group.add_argument(\n        '--virustotal-api-key', '--virustotal_api_key',\n        dest='virustotal_api_key', type=str, action='store', default=None,\n        metavar='API_KEY', help=(\n            'Specify the API key for use with VirusTotal.'))\n\n    argument_group.add_argument(\n        '--virustotal-free-rate-limit', '--virustotal_free_rate_limit',\n        dest='virustotal_free_rate_limit',\n        action='store_false', default=cls._DEFAULT_RATE_LIMIT, help=(\n            'Limit Virustotal requests to the default free API key rate of '\n            '4 requests per minute. Set this to false if you have an key '\n            'for the private API.'))\n\n    argument_group.add_argument(\n        '--virustotal-hash', '--virustotal_hash', dest='virustotal_hash',\n        type=str, action='store', choices=['md5', 'sha1', 'sha256'],\n        default=cls._DEFAULT_HASH, metavar='HASH', help=(\n            'Type of hash to query VirusTotal, the default is: {0:s}'.format(\n                cls._DEFAULT_HASH)))", "docstring": "Adds command line arguments the helper supports to an argument group.\n\nThis function takes an argument parser or an argument group object and adds\nto it all the command line arguments this helper supports.\n\nArgs:\nargument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\nargparse group.", "source": "juraj-google-style"}
{"code": "def previous(self) -> 'ArrayEntry':\n    try:\n        (newval, nbef) = self.before.pop()\n    except IndexError:\n        raise NonexistentInstance(self.json_pointer(), 'previous of first') from None\n    return ArrayEntry((self.index - 1), nbef, self.after.cons(self.value), newval, self.parinst, self.schema_node, self.timestamp)", "docstring": "Return an instance node corresponding to the previous entry.\n\nRaises:\nNonexistentInstance: If the receiver is the first entry of the\nparent array.", "source": "codesearchnet"}
{"code": "def get_variant_id(variant_dict=None, variant_line=None):\n    \n    \n    if variant_dict:\n        chrom = variant_dict['CHROM']\n        position = variant_dict['POS']\n        ref = variant_dict['REF']\n        alt = variant_dict['ALT']\n    elif variant_line:\n        splitted_line = variant_line.rstrip().split('\\t')\n        chrom = splitted_line[0]\n        position = splitted_line[1]\n        ref = splitted_line[3]\n        alt = splitted_line[4]\n    else:\n        raise Exception(\"Have to provide variant dict or variant line\")\n    \n    return '_'.join([\n        chrom,\n        position,\n        ref,\n        alt,\n    ])", "docstring": "Build a variant id\n\nThe variant id is a string made of CHROM_POS_REF_ALT\n\nArgs:\nvariant_dict (dict): A variant dictionary\n\nReturns:\nvariant_id (str)", "source": "juraj-google-style"}
{"code": "def has_no_current_path(self, path, **kwargs):\n    try:\n        return self.assert_no_current_path(path, **kwargs)\n    except ExpectationNotMet:\n        return False", "docstring": "Checks if the page doesn't have the given path.\n\nArgs:\npath (str | RegexObject): The string or regex that the current \"path\" should match.\n**kwargs: Arbitrary keyword arguments for :class:`CurrentPathQuery`.\n\nReturns:\nbool: Whether it doesn't match.", "source": "codesearchnet"}
{"code": "def get_usedby_and_readonly(self, id):\n    uri = (((self.URI + '/') + id) + '/usedby/readonly')\n    return self._client.get(uri)", "docstring": "Gets the build plans details os teh selected plan script as per the selected attributes.\n\nArgs:\nid: ID of the Plan Script.\n\nReturns:\narray of build plans", "source": "codesearchnet"}
{"code": "def is_same_vectors(self, vec_set1, vec_set2):\n    if (np.absolute(rel_strain(vec_set1[0], vec_set2[0])) > self.max_length_tol):\n        return False\n    elif (np.absolute(rel_strain(vec_set1[1], vec_set2[1])) > self.max_length_tol):\n        return False\n    elif (np.absolute(rel_angle(vec_set1, vec_set2)) > self.max_angle_tol):\n        return False\n    else:\n        return True", "docstring": "Determine if two sets of vectors are the same within length and angle\ntolerances\n\nArgs:\nvec_set1(array[array]): an array of two vectors\nvec_set2(array[array]): second array of two vectors", "source": "codesearchnet"}
{"code": "def execute_command(self, tab_name, panel_name, command_module, command_class, command_data=None):\n    command_data = ({} if (command_data is None) else command_data)\n    cmdclassname = '{}.{}'.format(command_module, command_class)\n    self._add_entry(templates.EXTERNAL_COMMAND.format(external_command_tab=tab_name, external_command_panel=panel_name, command_class_name=command_class, command_class=cmdclassname))\n    data_count = len(command_data.keys())\n    if (data_count > 0):\n        data_str_list = []\n        for (k, v) in command_data.items():\n            data_str_list.append(' \"{}\" , \"{}\"'.format(k, v))\n        data_str = '_\\n    ,'.join(data_str_list)\n        self._add_entry(templates.EXTERNAL_COMMANDDATA.format(data_count=data_count, data_string=data_str))", "docstring": "Append an execute external command entry to the journal.\n\nThis instructs Revit to execute the provided command from the\nprovided module, tab, and panel.\n\nArgs:\ntab_name (str): name of ribbon tab that contains the command\npanel_name (str): name of ribbon panel that contains the command\ncommand_module (str): name of module that provides the command\ncommand_class (str): name of command class inside command module\ncommand_data (dict): dict of string data to be passed to command\n\nExamples:\n>>> jm = JournalMaker()\n>>> cmdata = {'key1':'value1', 'key2':'value2'}\n>>> jm.execute_command(tab_name='Add-Ins',\n...                    panel_name='Panel Name',\n...                    command_module='Addon App Namespace',\n...                    command_class='Command Classname',\n...                    command_data=cmdata)", "source": "codesearchnet"}
{"code": "def __call__(self, string):\n    texts = []\n    floats = []\n    for i, part in enumerate(self._FLOAT_RE.split(string)):\n        if i % 2 == 0:\n            texts.append(part)\n        else:\n            floats.append(float(part))\n    return (texts, np.array(floats))", "docstring": "Extracts floats from a string.\n\n>>> text_parts, floats = _FloatExtractor()(\"Text 1.0 Text\")\n>>> text_parts\n[\"Text \", \" Text\"]\n>>> floats\nnp.array([1.0])\n\nArgs:\nstring: the string to extract floats from.\n\nReturns:\nA (string, array) pair, where `string` has each float replaced by \"...\"\nand `array` is a `float32` `numpy.array` containing the extracted floats.", "source": "github-repos"}
{"code": "def connect(backend=None, host=None, port=None, name=None, max_tries=None, connection_timeout=None, replicaset=None, ssl=None, login=None, password=None, ca_cert=None, certfile=None, keyfile=None, keyfile_passphrase=None, crlfile=None):\n    backend = (backend or bigchaindb.config['database']['backend'])\n    host = (host or bigchaindb.config['database']['host'])\n    port = (port or bigchaindb.config['database']['port'])\n    dbname = (name or bigchaindb.config['database']['name'])\n    replicaset = (replicaset or bigchaindb.config['database'].get('replicaset'))\n    ssl = (ssl if (ssl is not None) else bigchaindb.config['database'].get('ssl', False))\n    login = (login or bigchaindb.config['database'].get('login'))\n    password = (password or bigchaindb.config['database'].get('password'))\n    ca_cert = (ca_cert or bigchaindb.config['database'].get('ca_cert', None))\n    certfile = (certfile or bigchaindb.config['database'].get('certfile', None))\n    keyfile = (keyfile or bigchaindb.config['database'].get('keyfile', None))\n    keyfile_passphrase = (keyfile_passphrase or bigchaindb.config['database'].get('keyfile_passphrase', None))\n    crlfile = (crlfile or bigchaindb.config['database'].get('crlfile', None))\n    try:\n        (module_name, _, class_name) = BACKENDS[backend].rpartition('.')\n        Class = getattr(import_module(module_name), class_name)\n    except KeyError:\n        raise ConfigurationError('Backend `{}` is not supported. BigchainDB currently supports {}'.format(backend, BACKENDS.keys()))\n    except (ImportError, AttributeError) as exc:\n        raise ConfigurationError('Error loading backend `{}`'.format(backend)) from exc\n    logger.debug('Connection: {}'.format(Class))\n    return Class(host=host, port=port, dbname=dbname, max_tries=max_tries, connection_timeout=connection_timeout, replicaset=replicaset, ssl=ssl, login=login, password=password, ca_cert=ca_cert, certfile=certfile, keyfile=keyfile, keyfile_passphrase=keyfile_passphrase, crlfile=crlfile)", "docstring": "Create a new connection to the database backend.\n\nAll arguments default to the current configuration's values if not\ngiven.\n\nArgs:\nbackend (str): the name of the backend to use.\nhost (str): the host to connect to.\nport (int): the port to connect to.\nname (str): the name of the database to use.\nreplicaset (str): the name of the replica set (only relevant for\nMongoDB connections).\n\nReturns:\nAn instance of :class:`~bigchaindb.backend.connection.Connection`\nbased on the given (or defaulted) :attr:`backend`.\n\nRaises:\n:exc:`~ConnectionError`: If the connection to the database fails.\n:exc:`~ConfigurationError`: If the given (or defaulted) :attr:`backend`\nis not supported or could not be loaded.\n:exc:`~AuthenticationError`: If there is a OperationFailure due to\nAuthentication failure after connecting to the database.", "source": "codesearchnet"}
{"code": "def compute_v(self, memory_antecedent):\n    if self.shared_kv:\n        raise ValueError('compute_v cannot be called with shared_kv')\n    ret = mtf.einsum([memory_antecedent, self.wv], reduced_dims=[self.memory_input_dim])\n    if self.combine_dims:\n        ret = mtf.replace_dimensions(ret, ret.shape.dims[(- 1)], self.v_dims)\n    return ret", "docstring": "Compute value Tensor v.\n\nArgs:\nmemory_antecedent: a Tensor with dimensions\n{memory_input_dim} + other_dims\nReturns:\na Tensor with dimensions\nmemory_heads_dims + {value_dim} + other_dims", "source": "codesearchnet"}
{"code": "def _determine_profiles(self):\n    mp_insts = self._conn.EnumerateInstances('CIM_RegisteredProfile', namespace=self.interop_ns)\n    self._profiles = mp_insts", "docstring": "Determine the WBEM management profiles advertised by the WBEM server,\nby communicating with it and enumerating the instances of\n`CIM_RegisteredProfile`.\n\nIf the profiles could be determined, this method sets the\n:attr:`profiles` property of this object to the list of\n`CIM_RegisteredProfile` instances (as :class:`~pywbem.CIMInstance`\nobjects), and returns.\nOtherwise, it raises an exception.\n\nRaises:\n\nExceptions raised by :class:`~pywbem.WBEMConnection`.\nCIMError: CIM_ERR_NOT_FOUND, Interop namespace could not be\ndetermined.", "source": "codesearchnet"}
{"code": "def get_hash(path, hash_alg='sha256'):\n    h = hashlib.new(hash_alg)\n    with open(path, 'rb') as f:\n        for chunk in iter(functools.partial(f.read, 4096), b''):\n            h.update(chunk)\n    return h.hexdigest()", "docstring": "Get the hash of the file at ``path``.\n\nI'd love to make this async, but evidently file i/o is always ready\n\nArgs:\npath (str): the path to the file to hash.\nhash_alg (str, optional): the algorithm to use.  Defaults to 'sha256'.\n\nReturns:\nstr: the hexdigest of the hash.", "source": "codesearchnet"}
{"code": "def report(self, name, owner=None, **kwargs):\n        \n        return Report(self.tcex, name, owner=owner, **kwargs)", "docstring": "Create the Report TI object.\n\nArgs:\nowner:\nname:\n**kwargs:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def save_data_files(bs, prefix=None, directory=None):\n    \n    filename = 'phonon_band.dat'\n    filename = '{}_phonon_band.dat'.format(prefix) if prefix else filename\n    directory = directory if directory else '.'\n    filename = os.path.join(directory, filename)\n\n    with open(filename, 'w') as f:\n        header = '\n        f.write(header)\n\n        for band in bs.bands:\n            for d, e in zip(bs.distance, band):\n                f.write('{:.8f} {:.8f}\\n'.format(d, e))\n            f.write('\\n')\n\n    return filename", "docstring": "Write the phonon band structure data files to disk.\n\nArgs:\nbs (:obj:`~pymatgen.phonon.bandstructure.PhononBandStructureSymmLine`):\nThe phonon band structure.\nprefix (:obj:`str`, optional): Prefix for data file.\ndirectory (:obj:`str`, optional): Directory in which to save the data.\n\nReturns:\nstr: The filename of the written data file.", "source": "juraj-google-style"}
{"code": "def _iter_errors_custom(instance, checks, options):\n    \n    \n    for v_function in checks:\n        try:\n            result = v_function(instance)\n        except TypeError:\n            result = v_function(instance, options)\n        if isinstance(result, Iterable):\n            for x in result:\n                yield x\n        elif result is not None:\n            yield result\n\n    \n    for field in instance:\n        if type(instance[field]) is list:\n            for obj in instance[field]:\n                if _is_stix_obj(obj):\n                    for err in _iter_errors_custom(obj, checks, options):\n                        yield err", "docstring": "Perform additional validation not possible merely with JSON schemas.\n\nArgs:\ninstance: The STIX object to be validated.\nchecks: A sequence of callables which do the checks.  Each callable\nmay be written to accept 1 arg, which is the object to check,\nor 2 args, which are the object and a ValidationOptions instance.\noptions: ValidationOptions instance with settings affecting how\nvalidation should be done.", "source": "juraj-google-style"}
{"code": "def get_destination(self, filepath, targetdir=None):\n        \n        dst = self.change_extension(filepath, 'css')\n        if targetdir:\n            dst = os.path.join(targetdir, dst)\n        return dst", "docstring": "Return destination path from given source file path.\n\nDestination is allways a file with extension ``.css``.\n\nArgs:\nfilepath (str): A file path. The path is allways relative to\nsources directory. If not relative, ``targetdir`` won't be\njoined.\nabsolute (bool): If given will be added at beginning of file\npath.\n\nReturns:\nstr: Destination filepath.", "source": "juraj-google-style"}
{"code": "def screenshot(self, png_filename=None, format='raw'):\n        \n        value = self.http.get('screenshot').value\n        raw_value = base64.b64decode(value)\n        png_header = b\"\\x89PNG\\r\\n\\x1a\\n\"\n        if not raw_value.startswith(png_header) and png_filename:\n            raise WDAError(-1, \"screenshot png format error\")\n\n        if png_filename:\n            with open(png_filename, 'wb') as f:\n                f.write(raw_value)\n\n        if format == 'raw':\n            return raw_value\n        elif format == 'pillow':\n            from PIL import Image\n            buff = io.BytesIO(raw_value)\n            return Image.open(buff)\n        else:\n            raise ValueError(\"unknown format\")", "docstring": "Screenshot with PNG format\n\nArgs:\npng_filename(string): optional, save file name\nformat(string): return format, pillow or raw(default)\nReturns:\nraw data or PIL.Image\n\nRaises:\nWDAError", "source": "juraj-google-style"}
{"code": "def symlink(self, link_target, path, dir_fd=None):\n        \n        link_target = self._path_with_dir_fd(link_target, self.symlink, dir_fd)\n        self.filesystem.create_symlink(\n            path, link_target, create_missing_dirs=False)", "docstring": "Creates the specified symlink, pointed at the specified link target.\n\nArgs:\nlink_target: The target of the symlink.\npath: Path to the symlink to create.\ndir_fd: If not `None`, the file descriptor of a directory,\nwith `link_target` being relative to this directory.\nNew in Python 3.3.\n\nRaises:\nOSError:  if the file already exists.", "source": "juraj-google-style"}
{"code": "def get_structure_by_material_id(self, material_id, final=True,\n                                     conventional_unit_cell=False):\n        \n        prop = \"final_structure\" if final else \"initial_structure\"\n        data = self.get_data(material_id, prop=prop)\n        if conventional_unit_cell:\n            data[0][prop] = SpacegroupAnalyzer(data[0][prop]). \\\n                get_conventional_standard_structure()\n        return data[0][prop]", "docstring": "Get a Structure corresponding to a material_id.\n\nArgs:\nmaterial_id (str): Materials Project material_id (a string,\ne.g., mp-1234).\nfinal (bool): Whether to get the final structure, or the initial\n(pre-relaxation) structure. Defaults to True.\nconventional_unit_cell (bool): Whether to get the standard\nconventional unit cell\n\nReturns:\nStructure object.", "source": "juraj-google-style"}
{"code": "def rename_object(self, object_name, new_name):\n\n    def rename_fn(weights_dict, source_name, target_name):\n        weights_dict[target_name] = weights_dict[source_name]\n        weights_dict.pop(source_name)\n    self._edit_object(rename_fn, object_name, new_name)", "docstring": "Rename an object in the file (e.g. a layer).\n\nArgs:\nobject_name: String, name or path of the\nobject to rename (e.g. `\"dense_2\"` or\n`\"layers/dense_2\"`).\nnew_name: String, new name of the object.", "source": "github-repos"}
{"code": "def done(self, metadata: Optional[Dict[str, Any]]=None, related_links: Optional[Dict[str, str]]=None) -> None:", "docstring": "Marks current trial as done.\n\nArgs:\nmetadata: Additional metadata to add to current trial.\nrelated_links: Additional links to add to current trial.", "source": "github-repos"}
{"code": "def _RawGlobPathSpecWithNumericSchema(\n    file_system, parent_path_spec, segment_format, location, segment_number):\n  \n  segment_files = []\n\n  while True:\n    segment_location = segment_format.format(location, segment_number)\n\n    \n    \n    \n    kwargs = path_spec_factory.Factory.GetProperties(parent_path_spec)\n\n    kwargs['location'] = segment_location\n    if parent_path_spec.parent is not None:\n      kwargs['parent'] = parent_path_spec.parent\n\n    segment_path_spec = path_spec_factory.Factory.NewPathSpec(\n        parent_path_spec.type_indicator, **kwargs)\n\n    if not file_system.FileEntryExistsByPathSpec(segment_path_spec):\n      break\n\n    segment_files.append(segment_path_spec)\n\n    segment_number += 1\n\n  return segment_files", "docstring": "Globs for path specifications according to a numeric naming schema.\n\nArgs:\nfile_system (FileSystem): file system.\nparent_path_spec (PathSpec): parent path specification.\nsegment_format (str): naming schema of the segment file location.\nlocation (str): the base segment file location string.\nsegment_number (int): first segment number.\n\nReturns:\nlist[PathSpec]: path specifications that match the glob.", "source": "juraj-google-style"}
{"code": "def get_embedded_tweet(tweet):\n    if (tweet.retweeted_tweet is not None):\n        return tweet.retweeted_tweet\n    elif (tweet.quoted_tweet is not None):\n        return tweet.quoted_tweet\n    else:\n        return None", "docstring": "Get the retweeted Tweet OR the quoted Tweet and return it as a dictionary\n\nArgs:\ntweet (Tweet): A Tweet object (not simply a dict)\n\nReturns:\ndict (or None, if the Tweet is neither a quote tweet or a Retweet):\na dictionary representing the quote Tweet or the Retweet", "source": "codesearchnet"}
{"code": "def is_smart(self, value):\n        \n        self.set_bool(\"is_smart\", value)\n        if value is True:\n            if self.find(\"criteria\") is None:\n                \n                self.criteria = ElementTree.SubElement(self, \"criteria\")", "docstring": "Set group is_smart property to value.\n\nArgs:\nvalue: Boolean.", "source": "juraj-google-style"}
{"code": "def quad_genz_keister_16(order):\n    order = sorted(GENZ_KEISTER_16.keys())[order]\n    (abscissas, weights) = GENZ_KEISTER_16[order]\n    abscissas = numpy.array(abscissas)\n    weights = numpy.array(weights)\n    weights /= numpy.sum(weights)\n    abscissas *= numpy.sqrt(2)\n    return (abscissas, weights)", "docstring": "Hermite Genz-Keister 16 rule.\n\nArgs:\norder (int):\nThe quadrature order. Must be in the interval (0, 8).\n\nReturns:\n(:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]):\nAbscissas and weights\n\nExamples:\n>>> abscissas, weights = quad_genz_keister_16(1)\n>>> print(numpy.around(abscissas, 4))\n[-1.7321  0.      1.7321]\n>>> print(numpy.around(weights, 4))\n[0.1667 0.6667 0.1667]", "source": "codesearchnet"}
{"code": "def check_secret(self, secret):\n    try:\n        return hmac.compare_digest(secret, self.secret)\n    except AttributeError:\n        return (secret == self.secret)", "docstring": "Checks if the secret string used in the authentication attempt\nmatches the \"known\" secret string. Some mechanisms will override this\nmethod to control how this comparison is made.\n\nArgs:\nsecret: The secret string to compare against what was used in the\nauthentication attempt.\n\nReturns:\nTrue if the given secret matches the authentication attempt.", "source": "codesearchnet"}
{"code": "def export_to_tf_tensor(self, x, laid_out_x):\n    \n    tensor_layout = self.tensor_layout(x.shape)\n    if not tensor_layout.is_fully_replicated:\n      raise NotImplementedError(\n          \"SimdMeshImpl only supports export_to_tf_tensor of fully-replicated \"\n          \"Tensors.  Try reshaping to new dimension names. \"\n          \" x.shape = %s tensor_layout=%s\"\n          % (x.shape, tensor_layout))\n    return laid_out_x.one_slice", "docstring": "Turn a Tensor into a tf.Tensor.\n\nArgs:\nx: a Tensor\nlaid_out_x: a LaidOutTensor\nReturns:\na tf.Tensor", "source": "juraj-google-style"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    bos_token = [self.bos_token_id]\n    eos_token = [self.eos_token_id]\n    if token_ids_1 is None:\n        return len(bos_token + token_ids_0 + eos_token) * [0]\n    return len(bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token) * [0]", "docstring": "Create a mask from the two sequences passed. CLIP does not make use of token type ids, therefore a list of\nzeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def declare(self, name):\n    if (name in self._data):\n        raise KeyError('Declared name {} that already existed'.format(name))\n    self._data[name] = self._loop.create_future()", "docstring": "Declare that a key will be set in the future.\n\nThis will create a future for the key that is used to\nhold its result and allow awaiting it.\n\nArgs:\nname (str): The unique key that will be used.", "source": "codesearchnet"}
{"code": "def report_factory(app, report_name, **kwargs):\n    \n    \n    created = pendulum.now().to_rfc3339_string()\n    user_model = app._swimlane.user.as_usergroup_selection()\n\n    return Report(\n        app,\n        {\n            \"$type\": Report._type,\n            \"groupBys\": [],\n            \"aggregates\": [],\n            \"applicationIds\": [app.id],\n            \"columns\": [],\n            \"sorts\": {\n                \"$type\": \"System.Collections.Generic.Dictionary`2\"\n                         \"[[System.String, mscorlib],\"\n                         \"[Core.Models.Search.SortTypes, Core]], mscorlib\",\n            },\n            \"filters\": [],\n            \"defaultSearchReport\": False,\n            \"allowed\": [],\n            \"permissions\": {\n                \"$type\": \"Core.Models.Security.PermissionMatrix, Core\"\n            },\n            \"createdDate\": created,\n            \"modifiedDate\": created,\n            \"createdByUser\": user_model,\n            \"modifiedByUser\": user_model,\n            \"id\": None,\n            \"name\": report_name,\n            \"disabled\": False,\n            \"keywords\": \"\"\n        },\n        **kwargs\n    )", "docstring": "Report instance factory populating boilerplate raw data\n\nArgs:\napp (App): Swimlane App instance\nreport_name (str): Generated Report name\n\nKeyword Args\n**kwargs: Kwargs to pass to the Report class", "source": "juraj-google-style"}
{"code": "def tournament_name2number(self, name):\n        \n        tournaments = self.get_tournaments()\n        d = {t['name']: t['tournament'] for t in tournaments}\n        return d.get(name, None)", "docstring": "Translate tournament name to tournament number.\n\nArgs:\nname (str): tournament name to translate\n\nReturns:\nnumber (int): number of the tournament or `None` if unknown.\n\nExamples:\n>>> NumerAPI().tournament_name2number('delta')\n4\n>>> NumerAPI().tournament_name2number('foo')\nNone", "source": "juraj-google-style"}
{"code": "def genClientCert(self, name, outp=None):\n        \n        ucert = self.getUserCert(name)\n        if not ucert:\n            raise s_exc.NoSuchFile('missing User cert')\n\n        cacert = self._loadCertPath(self._getCaPath(ucert))\n        if not cacert:\n            raise s_exc.NoSuchFile('missing CA cert')\n\n        ukey = self.getUserKey(name)\n        if not ukey:\n            raise s_exc.NoSuchFile('missing User private key')\n\n        ccert = crypto.PKCS12()\n        ccert.set_friendlyname(name.encode('utf-8'))\n        ccert.set_ca_certificates([cacert])\n        ccert.set_certificate(ucert)\n        ccert.set_privatekey(ukey)\n\n        crtpath = self._saveP12To(ccert, 'users', '%s.p12' % name)\n        if outp is not None:\n            outp.printf('client cert saved: %s' % (crtpath,))", "docstring": "Generates a user PKCS #12 archive.\nPlease note that the resulting file will contain private key material.\n\nArgs:\nname (str): The name of the user keypair.\noutp (synapse.lib.output.Output): The output buffer.\n\nExamples:\nMake the PKC12 object for user \"myuser\":\n\nmyuserpkcs12 = cdir.genClientCert('myuser')\n\nReturns:\nOpenSSL.crypto.PKCS12: The PKCS #12 archive.", "source": "juraj-google-style"}
{"code": "def append(self, header, f, _left=False):\n        \n        self.items_length += len(header)\n        if _left:\n            self.deque.appendleft((header, f))\n        else:\n            self.deque.append((header, f))", "docstring": "Add a column to the table.\n\nArgs:\nheader (str):\nColumn header\n\nf (function(datum)->str):\nMakes the row string from the datum. Str returned by f should\nhave the same width as header.", "source": "juraj-google-style"}
{"code": "def latexify_spacegroup(spacegroup_symbol):\n    \n    sym = re.sub(r\"_(\\d+)\", r\"$_{\\1}$\", spacegroup_symbol)\n    return re.sub(r\"-(\\d)\", r\"$\\\\overline{\\1}$\", sym)", "docstring": "Generates a latex formatted spacegroup. E.g., P2_1/c is converted to\nP2$_{1}$/c and P-1 is converted to P$\\\\overline{1}$.\n\nArgs:\nspacegroup_symbol (str): A spacegroup symbol\n\nReturns:\nA latex formatted spacegroup with proper subscripts and overlines.", "source": "juraj-google-style"}
{"code": "def load_file_to_base64_str(f_path):\n    path = abs_path(f_path)\n    with io.open(path, 'rb') as f:\n        f_bytes = f.read()\n        base64_str = base64.b64encode(f_bytes).decode('utf-8')\n        return base64_str", "docstring": "Loads the content of a file into a base64 string.\n\nArgs:\nf_path: full path to the file including the file name.\n\nReturns:\nA base64 string representing the content of the file in utf-8 encoding.", "source": "github-repos"}
{"code": "def config(self, commands, **kwargs):\n    commands = make_iterable(commands)\n    commands = list(commands)\n    commands.insert(0, 'configure terminal')\n    response = self.run_commands(commands, **kwargs)\n    if self.autorefresh:\n        self.refresh()\n    response.pop(0)\n    return response", "docstring": "Configures the node with the specified commands\n\nThis method is used to send configuration commands to the node.  It\nwill take either a string or a list and prepend the necessary commands\nto put the session into config mode.\n\nArgs:\ncommands (str, list): The commands to send to the node in config\nmode.  If the commands argument is a string it will be cast to\na list.\nThe list of commands will also be prepended with the\nnecessary commands to put the session in config mode.\n**kwargs: Additional keyword arguments for expanded eAPI\nfunctionality. Only supported eAPI params are used in building\nthe request\n\nReturns:\nThe config method will return a list of dictionaries with the\noutput from each command.  The function will strip the\nresponse from any commands it prepends.", "source": "codesearchnet"}
{"code": "def create_contentkey_authorization_policy_options(access_token, key_delivery_type=\"2\", \\\nname=\"HLS Open Authorization Policy\", key_restriction_type=\"0\"):\n    \n    path = '/ContentKeyAuthorizationPolicyOptions'\n    endpoint = ''.join([ams_rest_endpoint, path])\n    body = '{ \\\n\t\t\"Name\":\"policy\",\\\n\t\t\"KeyDeliveryType\":\"' + key_delivery_type + '\", \\\n\t\t\"KeyDeliveryConfiguration\":\"\", \\\n\t\t\t\"Restrictions\":[{ \\\n\t\t\t\"Name\":\"' + name + '\", \\\n\t\t\t\"KeyRestrictionType\":\"' + key_restriction_type + '\", \\\n\t\t\t\"Requirements\":null \\\n\t\t}] \\\n\t}'\n    return do_ams_post(endpoint, path, body, access_token, \"json_only\")", "docstring": "Create Media Service Content Key Authorization Policy Options.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nkey_delivery_type (str): A Media Service Content Key Authorization Policy Delivery Type.\nname (str): A Media Service Contenty Key Authorization Policy Name.\nkey_restiction_type (str): A Media Service Contenty Key Restriction Type.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def _on_report(self, report, connection_id):\n    self._logger.info('Received report: %s', str(report))\n    self._trigger_callback('on_report', connection_id, report)\n    return False", "docstring": "Callback function called when a report has been processed.\n\nArgs:\nreport (IOTileReport): The report object\nconnection_id (int): The connection id related to this report\n\nReturns:\n- True to indicate that IOTileReportParser should also keep a copy of the report\nor False to indicate it should delete it.", "source": "codesearchnet"}
{"code": "def stateful_ops(self):\n    self._create_definition_if_needed()\n    return self._stateful_ops", "docstring": "Returns the list of stateful ops in function definition.\n\nReturns:\nA list of (op.name, op.type) pairs.", "source": "github-repos"}
{"code": "def apply_middleware(*middlewares):\n    \n    def inner(create_store_):\n        def create_wrapper(reducer, enhancer=None):\n            store = create_store_(reducer, enhancer)\n            dispatch = store['dispatch']\n            middleware_api = {\n                'get_state': store['get_state'],\n                'dispatch': lambda action: dispatch(action),\n            }\n            chain = [mw(middleware_api) for mw in middlewares]\n            dispatch = compose(*chain)(store['dispatch'])\n\n            return extend(store, {'dispatch': dispatch})\n        return create_wrapper\n    return inner", "docstring": "creates an enhancer function composed of middleware\n\nArgs:\n*middlewares: list of middleware functions to apply\n\nReturns:\nan enhancer for subsequent calls to create_store()", "source": "juraj-google-style"}
{"code": "def save_spectre_plot(self, filename='spectre.pdf', img_format='pdf', sigma=0.05, step=0.01):\n    (d, plt) = self.get_spectre_plot(sigma, step)\n    plt.savefig(filename, format=img_format)", "docstring": "Save matplotlib plot of the spectre to a file.\n\nArgs:\nfilename: Filename to write to.\nimg_format: Image format to use. Defaults to EPS.\nsigma: Full width at half maximum in eV for normal functions.\nstep: bin interval in eV", "source": "codesearchnet"}
{"code": "def union_of_bboxes(height, width, bboxes, erosion_rate=0.0, to_int=False):\n    (x1, y1) = (width, height)\n    (x2, y2) = (0, 0)\n    for b in bboxes:\n        (w, h) = ((b[2] - b[0]), (b[3] - b[1]))\n        (lim_x1, lim_y1) = ((b[0] + (erosion_rate * w)), (b[1] + (erosion_rate * h)))\n        (lim_x2, lim_y2) = ((b[2] - (erosion_rate * w)), (b[3] - (erosion_rate * h)))\n        (x1, y1) = (np.min([x1, lim_x1]), np.min([y1, lim_y1]))\n        (x2, y2) = (np.max([x2, lim_x2]), np.max([y2, lim_y2]))\n    return (x1, y1, x2, y2)", "docstring": "Calculate union of bounding boxes.\n\nArgs:\nheight (float): Height of image or space.\nwidth (float): Width of image or space.\nbboxes (list): List like bounding boxes. Format is `[x_min, y_min, x_max, y_max]`.\nerosion_rate (float): How much each bounding box can be shrinked, useful for erosive cropping.\nSet this in range [0, 1]. 0 will not be erosive at all, 1.0 can make any bbox to lose its volume.", "source": "codesearchnet"}
{"code": "def ConvertStringToFilename(name):\n  \n  return re.sub(\n      r\"\\W\", lambda x: \"%%%02X\" % ord(x.group(0)), name,\n      flags=re.UNICODE).rstrip(\"/\")", "docstring": "Converts an unicode string to a filesystem safe filename.\n\nFor maximum compatibility we escape all chars which are not alphanumeric (in\nthe unicode sense).\n\nArgs:\nname: a unicode string that is part of a subject.\n\nReturns:\nA safe filename with escaped special chars.", "source": "juraj-google-style"}
{"code": "def add(self, name, value, bitmask=DEFMASK):\n        \n        _add_enum_member(self._eid, name, value, bitmask)", "docstring": "Add an enum member\n\nArgs:\nname: Name of the member\nvalue: value of the member\nbitmask: bitmask. Only use if enum is a bitfield.", "source": "juraj-google-style"}
{"code": "def setMaximum(self, maximum):\n    if (not isinstance(maximum, int)):\n        raise TypeError('Argument is not of type int or long')\n    self._maximum = maximum", "docstring": "setter to _maximum.\n\nArgs:\nmaximum (int or long): new _maximum value", "source": "codesearchnet"}
{"code": "def Readdir(self, path, fh=None):\n    \n    if self.DataRefreshRequired(path):\n      self._RunAndWaitForVFSFileUpdate(path)\n\n    return super(GRRFuse, self).Readdir(path, fh=None)", "docstring": "Updates the directory listing from the client.\n\nArgs:\npath: The path to the directory to update. Client is inferred from this.\nfh: A file handler. Not used.\n\nReturns:\nA list of filenames.", "source": "juraj-google-style"}
{"code": "def load_weights_from_hdf5_group(f, model):\n    if 'keras_version' in f.attrs:\n        original_keras_version = f.attrs['keras_version']\n        if hasattr(original_keras_version, 'decode'):\n            original_keras_version = original_keras_version.decode('utf8')\n    else:\n        original_keras_version = '1'\n    if 'backend' in f.attrs:\n        original_backend = f.attrs['backend']\n        if hasattr(original_backend, 'decode'):\n            original_backend = original_backend.decode('utf8')\n    else:\n        original_backend = None\n    filtered_layers = []\n    for layer in model.layers:\n        weights = _legacy_weights(layer)\n        if weights:\n            filtered_layers.append(layer)\n    layer_names = load_attributes_from_hdf5_group(f, 'layer_names')\n    filtered_layer_names = []\n    for name in layer_names:\n        g = f[name]\n        weight_names = load_attributes_from_hdf5_group(g, 'weight_names')\n        if weight_names:\n            filtered_layer_names.append(name)\n    layer_names = filtered_layer_names\n    if len(layer_names) != len(filtered_layers):\n        raise ValueError(f'Layer count mismatch when loading weights from file. Model expected {len(filtered_layers)} layers, found {len(layer_names)} saved layers.')\n    for k, name in enumerate(layer_names):\n        g = f[name]\n        layer = filtered_layers[k]\n        symbolic_weights = _legacy_weights(layer)\n        weight_values = load_subset_weights_from_hdf5_group(g)\n        if len(weight_values) != len(symbolic_weights):\n            raise ValueError(f'Weight count mismatch for layer \n        _set_weights(layer, symbolic_weights, weight_values, name=f'layer \n    if 'top_level_model_weights' in f:\n        symbolic_weights = list((v for v in model._trainable_variables + model._non_trainable_variables if v in model.weights))\n        weight_values = load_subset_weights_from_hdf5_group(f['top_level_model_weights'])\n        if len(weight_values) != len(symbolic_weights):\n            raise ValueError(f'Weight count mismatch for top-level weights when loading weights from file. Model expects {len(symbolic_weights)} top-level weight(s). Received {len(weight_values)} saved top-level weight(s)')\n        _set_weights(model, symbolic_weights, weight_values, name='top-level model')", "docstring": "Implements topological (order-based) weight loading.\n\nArgs:\nf: A pointer to a HDF5 group.\nmodel: Model instance.\n\nRaises:\nValueError: in case of mismatch between provided layers\nand weights file.", "source": "github-repos"}
{"code": "def build_subresource_uri(self, resource_id_or_uri=None, subresource_id_or_uri=None, subresource_path=''):\n        \n        if subresource_id_or_uri and \"/\" in subresource_id_or_uri:\n            return subresource_id_or_uri\n        else:\n            if not resource_id_or_uri:\n                raise exceptions.HPOneViewValueError(RESOURCE_ID_OR_URI_REQUIRED)\n\n            resource_uri = self.build_uri(resource_id_or_uri)\n\n            uri = \"{}/{}/{}\".format(resource_uri, subresource_path, str(subresource_id_or_uri or ''))\n            uri = uri.replace(\"\n\n            if uri.endswith(\"/\"):\n                uri = uri[:-1]\n\n            return uri", "docstring": "Helps to build a URI with resource path and its sub resource path.\n\nArgs:\nresoure_id_or_uri: ID/URI of the main resource.\nsubresource_id__or_uri: ID/URI of the sub resource.\nsubresource_path: Sub resource path to be added with the URI.\n\nReturns:\nReturns URI", "source": "juraj-google-style"}
{"code": "def GetNTFSFileEntryByPathSpec(self, path_spec):\n    \n    \n    \n    location = getattr(path_spec, 'location', None)\n    mft_attribute = getattr(path_spec, 'mft_attribute', None)\n    mft_entry = getattr(path_spec, 'mft_entry', None)\n\n    if mft_attribute is not None and mft_entry is not None:\n      fsntfs_file_entry = self._fsntfs_volume.get_file_entry(mft_entry)\n    elif location is not None:\n      fsntfs_file_entry = self._fsntfs_volume.get_file_entry_by_path(location)\n    else:\n      raise errors.PathSpecError(\n          'Path specification missing location and MFT entry.')\n\n    return fsntfs_file_entry", "docstring": "Retrieves the NTFS file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\npyfsntfs.file_entry: NTFS file entry.\n\nRaises:\nPathSpecError: if the path specification is missing location and\nMFT entry.", "source": "juraj-google-style"}
{"code": "def list_documents(self, limit=None):\n    limit_str = ''\n    if limit:\n        try:\n            limit_str = 'LIMIT {}'.format(int(limit))\n        except (TypeError, ValueError):\n            pass\n    query = ('SELECT identifier FROM identifier_index ' + limit_str)\n    for row in self.backend.library.database.connection.execute(query).fetchall():\n        (yield row['identifier'])", "docstring": "Generates vids of all indexed identifiers.\n\nArgs:\nlimit (int, optional): If not empty, the maximum number of results to return\n\nGenerates:\nstr: vid of the document.", "source": "codesearchnet"}
{"code": "def _get_oxm_field_int(self):\n    if (self.oxm_class == OxmClass.OFPXMC_OPENFLOW_BASIC):\n        return OxmOfbMatchField(self.oxm_field).value\n    elif ((not isinstance(self.oxm_field, int)) or (self.oxm_field > 127)):\n        raise ValueError('oxm_field above 127: \"{self.oxm_field}\".')\n    return self.oxm_field", "docstring": "Return a valid integer value for oxm_field.\n\nUsed while packing.\n\nReturns:\nint: valid oxm_field value.\n\nRaises:\nValueError: If :attribute:`oxm_field` is bigger than 7 bits or\nshould be :class:`OxmOfbMatchField` and the enum has no such\nvalue.", "source": "codesearchnet"}
{"code": "def multiprocess_mapping(func, iterable):\n    \n    if os.name == 'nt':  \n        return list(map(func, iterable))\n    try:\n        p = multiprocessing.Pool()\n        return_data = list(p.imap(func, iterable))\n        p.close()\n        p.join()\n        return return_data\n    except OSError:\n        return list(map(func, iterable))", "docstring": "Multiprocess mapping the given function on the given iterable.\n\nThis only works in Linux and Mac systems since Windows has no forking capability. On Windows we fall back on\nsingle processing. Also, if we reach memory limits we fall back on single cpu processing.\n\nArgs:\nfunc (func): the function to apply\niterable (iterable): the iterable with the elements we want to apply the function on", "source": "juraj-google-style"}
{"code": "def read_scan(self):\n\n    def floatList(l):\n        ' return a list of float from a list of string '\n        return [float(v) for v in l]\n    scan_patt = re.compile('^\\\\sSummary of the potential surface scan:')\n    optscan_patt = re.compile('^\\\\sSummary of Optimized Potential Surface Scan')\n    data = {'energies': list(), 'coords': dict()}\n    with zopen(self.filename, 'r') as f:\n        line = f.readline()\n        while (line != ''):\n            if optscan_patt.match(line):\n                f.readline()\n                line = f.readline()\n                endScan = False\n                while (not endScan):\n                    data['energies'] += floatList(float_patt.findall(line))\n                    line = f.readline()\n                    while (not re.search('(^\\\\s+(\\\\d+)|^\\\\s-+)', line)):\n                        icname = line.split()[0].strip()\n                        if (icname in data['coords']):\n                            data['coords'][icname] += floatList(float_patt.findall(line))\n                        else:\n                            data['coords'][icname] = floatList(float_patt.findall(line))\n                        line = f.readline()\n                    if re.search('^\\\\s-+', line):\n                        endScan = True\n                    else:\n                        line = f.readline()\n            elif scan_patt.match(line):\n                line = f.readline()\n                data['coords'] = {icname: list() for icname in line.split()[1:(- 1)]}\n                f.readline()\n                line = f.readline()\n                while (not re.search('^\\\\s-+', line)):\n                    values = floatList(line.split())\n                    data['energies'].append(values[(- 1)])\n                    for (i, icname) in enumerate(data['coords']):\n                        data['coords'][icname].append(values[(i + 1)])\n                    line = f.readline()\n            else:\n                line = f.readline()\n    return data", "docstring": "Read a potential energy surface from a gaussian scan calculation.\n\nReturns:\n\nA dict: {\"energies\": [ values ],\n\"coords\": {\"d1\": [ values ], \"A2\", [ values ], ... }}\n\n\"energies\" are the energies of all points of the potential energy\nsurface. \"coords\" are the internal coordinates used to compute the\npotential energy surface and the internal coordinates optimized,\nlabelled by their name as defined in the calculation.", "source": "codesearchnet"}
{"code": "def _mark_maybe_missing_members(self, values):\n    values = list(values)\n    seen = set()\n    while values:\n        v = values.pop(0)\n        if v not in seen:\n            seen.add(v)\n            if isinstance(v, abstract.SimpleValue):\n                v.maybe_missing_members = True\n                for child in v.instance_type_parameters.values():\n                    values.extend(child.data)", "docstring": "Set maybe_missing_members to True on these values and their type params.\n\nArgs:\nvalues: A list of BaseValue objects. On every instance among the values,\nrecursively set maybe_missing_members to True on the instance and its\ntype parameters.", "source": "github-repos"}
{"code": "def _get_job_metadata(provider, user_id, job_name, script, task_ids, user_project, unique_job_id):\n    create_time = dsub_util.replace_timezone(datetime.datetime.now(), tzlocal())\n    user_id = (user_id or dsub_util.get_os_user())\n    job_metadata = provider.prepare_job_metadata(script.name, job_name, user_id, create_time)\n    if unique_job_id:\n        job_metadata['job-id'] = uuid.uuid4().hex\n    job_metadata['create-time'] = create_time\n    job_metadata['script'] = script\n    job_metadata['user-project'] = user_project\n    if task_ids:\n        job_metadata['task-ids'] = dsub_util.compact_interval_string(list(task_ids))\n    return job_metadata", "docstring": "Allow provider to extract job-specific metadata from command-line args.\n\nArgs:\nprovider: job service provider\nuser_id: user submitting the job\njob_name: name for the job\nscript: the script to run\ntask_ids: a set of the task-ids for all tasks in the job\nuser_project: name of the project to be billed for the request\nunique_job_id: generate a unique job id\n\nReturns:\nA dictionary of job-specific metadata (such as job id, name, etc.)", "source": "codesearchnet"}
{"code": "def register_site(self):\n    if self.oxd_id:\n        logger.info('Client is already registered. ID: %s', self.oxd_id)\n        return self.oxd_id\n    params = {'authorization_redirect_uri': self.authorization_redirect_uri, 'oxd_rp_programming_language': 'python'}\n    for op in self.opt_params:\n        if self.config.get('client', op):\n            params[op] = self.config.get('client', op)\n    for olp in self.opt_list_params:\n        if self.config.get('client', olp):\n            params[olp] = self.config.get('client', olp).split(',')\n    logger.debug('Sending command `register_site` with params %s', params)\n    response = self.msgr.request('register_site', **params)\n    logger.debug('Received response: %s', response)\n    if (response['status'] == 'error'):\n        raise OxdServerError(response['data'])\n    self.oxd_id = response['data']['oxd_id']\n    self.config.set('oxd', 'id', self.oxd_id)\n    logger.info('Site registration successful. Oxd ID: %s', self.oxd_id)\n    return self.oxd_id", "docstring": "Function to register the site and generate a unique ID for the site\n\nReturns:\n**string:** The ID of the site (also called client id) if the registration is successful\n\nRaises:\n**OxdServerError:** If the site registration fails.", "source": "codesearchnet"}
{"code": "def get_morph_files(directory):\n    lsdir = (os.path.join(directory, m) for m in os.listdir(directory))\n    return list(filter(_is_morphology_file, lsdir))", "docstring": "Get a list of all morphology files in a directory\n\nReturns:\nlist with all files with extensions '.swc' , 'h5' or '.asc' (case insensitive)", "source": "codesearchnet"}
{"code": "def log(cls, event=None, actor=None, data=None):\n        \n        from cloud_inquisitor.log import auditlog\n\n        auditlog(event=event, actor=actor, data=data)", "docstring": "Generate and insert a new event\n\nArgs:\nevent (str): Action performed\nactor (str): Actor (user or subsystem) triggering the event\ndata (dict): Any extra data necessary for describing the event\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def basis_state(str_state, num):\n    n = int(str_state, 2)\n    if (num >= len(str_state)):\n        state = np.zeros((1 << num), dtype=complex)\n        state[n] = 1\n        return state\n    else:\n        raise QiskitError('size of bitstring is greater than num.')", "docstring": "Return a basis state ndarray.\n\nArgs:\nstr_state (string): a string representing the state.\nnum (int): the number of qubits\nReturns:\nndarray:  state(2**num) a quantum state with basis basis state.\nRaises:\nQiskitError: if the dimensions is wrong", "source": "codesearchnet"}
{"code": "def from_tuples(year_month_day_tuples, validate=True):\n    years, months, days = ([], [], [])\n    for t in year_month_day_tuples:\n        years.append(t[0])\n        months.append(t[1])\n        days.append(t[2])\n    years = tf.constant(years, dtype=tf.int32)\n    months = tf.constant(months, dtype=tf.int32)\n    days = tf.constant(days, dtype=tf.int32)\n    return from_year_month_day(years, months, days, validate)", "docstring": "Creates DateTensor from a sequence of year-month-day Tuples.\n\nArgs:\nyear_month_day_tuples: Sequence of (year, month, day) Tuples. Months are\n1-based; constants from Months enum can be used instead of ints. Days are\nalso 1-based.\nvalidate: Whether to validate the dates.\n\nReturns:\nDateTensor object.\n\n#### Example\n\n```python\ndate_tensor = tff.datetime.dates_from_tuples([(2015, 4, 15), (2017, 12, 30)])\n```", "source": "github-repos"}
{"code": "def GetArtifactsForCollection(os_name, artifact_list):\n  \n  artifact_arranger = ArtifactArranger(os_name, artifact_list)\n  artifact_names = artifact_arranger.GetArtifactsInProperOrder()\n  return artifact_names", "docstring": "Wrapper for the ArtifactArranger.\n\nExtend the artifact list by dependencies and sort the artifacts to resolve the\ndependencies.\n\nArgs:\nos_name: String specifying the OS name.\nartifact_list: List of requested artifact names.\n\nReturns:\nA list of artifacts such that if they are collected in the given order\ntheir dependencies are resolved.", "source": "juraj-google-style"}
{"code": "def get_variable(self, feature_column, name):\n    if name in self._cols_to_vars_map[feature_column]:\n        return self._cols_to_vars_map[feature_column][name]\n    raise ValueError('Variable does not exist.')", "docstring": "Returns an existing variable.\n\nArgs:\nfeature_column: A `FeatureColumn` object this variable corresponds to.\nname: variable name.", "source": "github-repos"}
{"code": "def get_vms(self, vm_names=None):\n        \n        if not vm_names:\n            return self._vms.copy()\n\n        missing_vms = []\n        vms = {}\n        for name in vm_names:\n            try:\n                vms[name] = self._vms[name]\n            except KeyError:\n                \n                missing_vms.append(name)\n\n        if missing_vms:\n            raise utils.LagoUserException(\n                'The following vms do not exist: \\n{}'.format(\n                    '\\n'.join(missing_vms)\n                )\n            )\n\n        return vms", "docstring": "Returns the vm objects associated with vm_names\nif vm_names is None, return all the vms in the prefix\nArgs:\nvm_names (list of str): The names of the requested vms\nReturns\ndict: Which contains the requested vm objects indexed by name\nRaises:\nutils.LagoUserException: If a vm name doesn't exist", "source": "juraj-google-style"}
{"code": "def pack_tangents(tensors):\n    return TangentInfo(*pywrap_tfe.TFE_Py_PackJVPs(tensors))", "docstring": "Packs forward accumulator state into a TangentInfo tuple.\n\nArgs:\ntensors: A flat list of Tensors to pack forward accumulator state for.\n\nReturns:\nA tuple of (indices, tangents):\nindices: A sequence of sequences of two-element tuples. Each forward\naccumulator is represented as a sequence of tuples with (primal_index,\njvp_index). Both integers index into the concatenated `tensors + jvps`\narray.\ntangents: A flat list of Tensors. Best interpreted as a sequence to be\nappended to `tensors`.", "source": "github-repos"}
{"code": "def _SetupDatabase(host=None,\n                   port=None,\n                   user=None,\n                   password=None,\n                   database=None,\n                   client_key_path=None,\n                   client_cert_path=None,\n                   ca_cert_path=None):\n  \n  with contextlib.closing(\n      _Connect(\n          host=host,\n          port=port,\n          user=user,\n          password=password,\n          \n          \n          database=None,\n          client_key_path=client_key_path,\n          client_cert_path=client_cert_path,\n          ca_cert_path=ca_cert_path)) as conn:\n    with contextlib.closing(conn.cursor()) as cursor:\n      try:\n        cursor.execute(CREATE_DATABASE_QUERY.format(database))\n      except MySQLdb.MySQLError as e:\n        \n        if e.args[0] != mysql_error_constants.DB_CREATE_EXISTS:\n          raise\n\n      cursor.execute(\"USE {}\".format(database))\n      _CheckCollation(cursor)\n\n  def _MigrationConnect():\n    return _Connect(\n        host=host,\n        port=port,\n        user=user,\n        password=password,\n        database=database,\n        client_key_path=client_key_path,\n        client_cert_path=client_cert_path,\n        ca_cert_path=ca_cert_path)\n\n  mysql_migration.ProcessMigrations(_MigrationConnect,\n                                    config.CONFIG[\"Mysql.migrations_dir\"])", "docstring": "Connect to the given MySQL host and create a utf8mb4_unicode_ci database.\n\nArgs:\nhost: The hostname to connect to.\nport: The port to connect to.\nuser: The username to connect as.\npassword: The password to connect with.\ndatabase: The database name to create.\nclient_key_path: The path of the client private key file.\nclient_cert_path: The path of the client public key certificate file.\nca_cert_path: The path of the Certificate Authority (CA) certificate file.", "source": "juraj-google-style"}
{"code": "def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):\n    mask = input_ids.ne(padding_idx).int()\n    incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask\n    return incremental_indices.long() + padding_idx", "docstring": "Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols\nare ignored. This is modified from fairseq's `utils.make_positions`.\n\nArgs:\nx: torch.Tensor x:\n\nReturns: torch.Tensor", "source": "github-repos"}
{"code": "def serial_wire_viewer(jlink_serial, device):\n    \n    buf = StringIO.StringIO()\n    jlink = pylink.JLink(log=buf.write, detailed_log=buf.write)\n    jlink.open(serial_no=jlink_serial)\n\n    \n    \n    jlink.set_tif(pylink.enums.JLinkInterfaces.SWD)\n    jlink.connect(device, verbose=True)\n    jlink.coresight_configure()\n    jlink.set_reset_strategy(pylink.enums.JLinkResetStrategyCortexM3.RESETPIN)\n\n    \n    jlink.reset()\n    jlink.halt()\n\n    \n    sys.stdout.write('Serial Wire Viewer\\n')\n    sys.stdout.write('Press Ctrl-C to Exit\\n')\n    sys.stdout.write('Reading data from port 0:\\n\\n')\n\n    \n    jlink.reset(ms=10, halt=False)\n\n    \n    \n    try:\n        while True:\n            \n            if jlink.register_read(0x0) != 0x05:\n                continue\n\n            offset = jlink.register_read(0x1)\n            handle, ptr, num_bytes = jlink.memory_read32(offset, 3)\n            read = ''.join(map(chr, jlink.memory_read8(ptr, num_bytes)))\n\n            if num_bytes == 0:\n                \n                time.sleep(1)\n                continue\n\n            jlink.register_write(0x0, 0)\n            jlink.step(thumb=True)\n            jlink.restart(2, skip_breakpoints=True)\n\n            sys.stdout.write(read)\n            sys.stdout.flush()\n    except KeyboardInterrupt:\n        pass\n\n    sys.stdout.write('\\n')\n\n    return 0", "docstring": "Implements a Serial Wire Viewer (SWV).\n\nA Serial Wire Viewer (SWV) allows us implement real-time logging of output\nfrom a connected device over Serial Wire Output (SWO).\n\nArgs:\njlink_serial (str): the J-Link serial number\ndevice (str): the target CPU\n\nReturns:\nAlways returns ``0``.\n\nRaises:\nJLinkException: on error", "source": "juraj-google-style"}
{"code": "def depricated_name(newmethod):\n    \n    def decorator(func):\n        @wraps(func)\n        def wrapper(*args, **kwargs):\n            warnings.simplefilter('always', DeprecationWarning) \n            warnings.warn(\n                \"Function {} is depricated, please use {} instead.\".format(func.__name__, newmethod),\n                category=DeprecationWarning, stacklevel=2\n            )\n            warnings.simplefilter('default', DeprecationWarning)\n            return func(*args, **kwargs)\n        return wrapper\n    return decorator", "docstring": "Decorator for warning user of depricated functions before use.\n\nArgs:\nnewmethod (str): Name of method to use instead.", "source": "juraj-google-style"}
{"code": "def CheckCommaSpacing(filename, clean_lines, linenum, error):\n    raw = clean_lines.lines_without_raw_strings\n    line = clean_lines.elided[linenum]\n    if (Search(',[^,\\\\s]', ReplaceAll('\\\\boperator\\\\s*,\\\\s*\\\\(', 'F(', line)) and Search(',[^,\\\\s]', raw[linenum])):\n        error(filename, linenum, 'whitespace/comma', 3, 'Missing space after ,')\n    if Search(';[^\\\\s};\\\\\\\\)/]', line):\n        error(filename, linenum, 'whitespace/semicolon', 3, 'Missing space after ;')", "docstring": "Checks for horizontal spacing near commas and semicolons.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def match_global_phase(a: np.ndarray, b: np.ndarray) -> Tuple[(np.ndarray, np.ndarray)]:\n    if (a.shape != b.shape):\n        return (a, b)\n    k = max(np.ndindex(*a.shape), key=(lambda t: abs(b[t])))\n\n    def dephase(v):\n        r = np.real(v)\n        i = np.imag(v)\n        if (i == 0):\n            return ((- 1) if (r < 0) else 1)\n        if (r == 0):\n            return (1j if (i < 0) else (- 1j))\n        return np.exp(((- 1j) * np.arctan2(i, r)))\n    return ((a * dephase(a[k])), (b * dephase(b[k])))", "docstring": "Phases the given matrices so that they agree on the phase of one entry.\n\nTo maximize precision, the position with the largest entry from one of the\nmatrices is used when attempting to compute the phase difference between\nthe two matrices.\n\nArgs:\na: A numpy array.\nb: Another numpy array.\n\nReturns:\nA tuple (a', b') where a' == b' implies a == b*exp(i t) for some t.", "source": "codesearchnet"}
{"code": "def _set_checkpoint_initializer(variable, ckpt_file, tensor_name, slice_spec, name='checkpoint_initializer'):\n    base_type = variable.dtype.base_dtype\n    with ops.device(variable.device), ops.device('/cpu:0'):\n        restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]\n        names_to_saveables = saveable_object_util.op_list_to_dict([variable])\n        saveable_objects = []\n        for name, op in names_to_saveables.items():\n            for s in saveable_object_util.saveable_objects_for_op(op, name):\n                saveable_objects.append(s)\n        assert len(saveable_objects) == 1\n    init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)\n    variable._initializer_op = init_op\n    restore_op.set_shape(variable.shape)\n    variable._initial_value = restore_op", "docstring": "Overrides given variable's initialization op.\n\nSets variable initializer to assign op that initializes variable from tensor's\nvalue in the checkpoint.\n\nArgs:\nvariable: `tf.Variable` object.\nckpt_file: string, full path of the checkpoint.\ntensor_name: Name of the tensor to load from the checkpoint.\nslice_spec: Slice specification for loading partitioned tensors.\nname: Name of the operation.", "source": "github-repos"}
{"code": "def save(self, items):\n        \n        rows = []\n        indx = self.indx\n\n        size = 0\n        tick = s_common.now()\n\n        for item in items:\n\n            byts = s_msgpack.en(item)\n\n            size += len(byts)\n\n            lkey = s_common.int64en(indx)\n            indx += 1\n\n            rows.append((lkey, byts))\n\n        self.slab.putmulti(rows, append=True, db=self.db)\n        took = s_common.now() - tick\n\n        origindx = self.indx\n        self.indx = indx\n        return {'indx': indx, 'size': size, 'count': len(items), 'time': tick, 'took': took}\n\n        return origindx", "docstring": "Save a series of items to a sequence.\n\nArgs:\nitems (tuple): The series of items to save into the sequence.\n\nReturns:\nThe index of the first item", "source": "juraj-google-style"}
{"code": "def healthy_services(self, role=None):\n        \n        try:\n            query = self.rr.table(self.table)\n            if role:\n                query = query.get_all(role, index='role')\n            query = query.filter(\n                lambda svc: r.now().sub(svc[\"last_heartbeat\"]) < svc[\"ttl\"]   \n            ).order_by(\"load\")\n            result = query.run()\n            return result\n        except r.ReqlNonExistenceError:\n            return []", "docstring": "Look up healthy services in the registry.\n\nA service is considered healthy if its 'last_heartbeat' was less than\n'ttl' seconds ago\n\nArgs:\nrole (str, optional): role name\n\nReturns:\nIf `role` is supplied, returns list of healthy services for the\ngiven role, otherwise returns list of all healthy services. May\nreturn an empty list.", "source": "juraj-google-style"}
{"code": "def parse_uniprot_txt_file(infile):\n    \n    uniprot_metadata_dict = {}\n\n    metadata = old_parse_uniprot_txt_file(infile)\n    metadata_keys = list(metadata.keys())\n\n    if metadata_keys:\n        metadata_key = metadata_keys[0]\n    else:\n        return uniprot_metadata_dict\n\n    uniprot_metadata_dict['seq_len'] = len(str(metadata[metadata_key]['sequence']))\n    uniprot_metadata_dict['reviewed'] = metadata[metadata_key]['is_reviewed']\n    uniprot_metadata_dict['seq_version'] = metadata[metadata_key]['sequence_version']\n    uniprot_metadata_dict['entry_version'] = metadata[metadata_key]['entry_version']\n    if 'gene' in metadata[metadata_key]:\n        uniprot_metadata_dict['gene_name'] = metadata[metadata_key]['gene']\n    if 'description' in metadata[metadata_key]:\n        uniprot_metadata_dict['description'] = metadata[metadata_key]['description']\n    if 'refseq' in metadata[metadata_key]:\n        uniprot_metadata_dict['refseq'] = metadata[metadata_key]['refseq']\n    if 'kegg' in metadata[metadata_key]:\n        uniprot_metadata_dict['kegg'] = metadata[metadata_key]['kegg']\n    if 'ec' in metadata[metadata_key]:\n        uniprot_metadata_dict['ec_number'] = metadata[metadata_key]['ec']\n    if 'pfam' in metadata[metadata_key]:\n        uniprot_metadata_dict['pfam'] = metadata[metadata_key]['pfam']\n    if 'pdbs' in metadata[metadata_key]:\n        uniprot_metadata_dict['pdbs'] = list(set(metadata[metadata_key]['pdbs']))\n    return uniprot_metadata_dict", "docstring": "Parse a raw UniProt metadata file and return a dictionary.\n\nArgs:\ninfile: Path to metadata file\n\nReturns:\ndict: Metadata dictionary", "source": "juraj-google-style"}
{"code": "def build_twisted_request(self, method, url, extra_headers={}, body_producer=None, full_url=False):\n    uri = (url if full_url else self._url(url))\n    raw_headers = self.get_headers()\n    if extra_headers:\n        raw_headers.update(extra_headers)\n    headers = http_headers.Headers()\n    for header in raw_headers:\n        headers.addRawHeader(header, raw_headers[header])\n    agent = client.Agent(reactor)\n    request = agent.request(method, uri, headers, body_producer)\n    return (reactor, request)", "docstring": "Build a request for twisted\n\nArgs:\nmethod (str): Request method (GET/POST/PUT/DELETE/etc.) If not specified, it will be POST if post_data is not None\nurl (str): Destination URL (full, or relative)\n\nKwargs:\nextra_headers (dict): Headers (override default connection headers, if any)\nbody_producer (:class:`twisted.web.iweb.IBodyProducer`): Object producing request body\nfull_url (bool): If False, URL is relative\n\nReturns:\ntuple. Tuple with two elements: reactor, and request", "source": "codesearchnet"}
{"code": "def assert_keys_exist(self, caller, *keys):\n    assert keys, '*keys parameter must be specified.'\n    for key in keys:\n        self.assert_key_exists(key, caller)", "docstring": "Assert that context contains keys.\n\nArgs:\nkeys: validates that these keys exists in context\ncaller: string. calling function or module name - this used to\nconstruct error messages\n\nRaises:\nKeyNotInContextError: When key doesn't exist in context.", "source": "codesearchnet"}
{"code": "def read_parquet(path, engine=\"auto\", columns=None, **kwargs):\n    \n    return DataFrame(\n        query_compiler=BaseFactory.read_parquet(\n            path=path, columns=columns, engine=engine, **kwargs\n        )\n    )", "docstring": "Load a parquet object from the file path, returning a DataFrame.\n\nArgs:\npath: The filepath of the parquet file.\nWe only support local files for now.\nengine: This argument doesn't do anything for now.\nkwargs: Pass into parquet's read_pandas function.", "source": "juraj-google-style"}
{"code": "def get_neighbors_of_site_with_index(struct, n, approach='min_dist', delta=0.1, cutoff=10.0):\n    if (approach == 'min_dist'):\n        return MinimumDistanceNN(tol=delta, cutoff=cutoff).get_nn(struct, n)\n    elif (approach == 'voronoi'):\n        return VoronoiNN(tol=delta, cutoff=cutoff).get_nn(struct, n)\n    elif (approach == 'min_OKeeffe'):\n        return MinimumOKeeffeNN(tol=delta, cutoff=cutoff).get_nn(struct, n)\n    elif (approach == 'min_VIRE'):\n        return MinimumVIRENN(tol=delta, cutoff=cutoff).get_nn(struct, n)\n    else:\n        raise RuntimeError('unsupported neighbor-finding method ({}).'.format(approach))", "docstring": "Returns the neighbors of a given site using a specific neighbor-finding\nmethod.\n\nArgs:\nstruct (Structure): input structure.\nn (int): index of site in Structure object for which motif type\nis to be determined.\napproach (str): type of neighbor-finding approach, where\n\"min_dist\" will use the MinimumDistanceNN class,\n\"voronoi\" the VoronoiNN class, \"min_OKeeffe\" the\nMinimumOKeeffe class, and \"min_VIRE\" the MinimumVIRENN class.\ndelta (float): tolerance involved in neighbor finding.\ncutoff (float): (large) radius to find tentative neighbors.\n\nReturns: neighbor sites.", "source": "codesearchnet"}
{"code": "def validate(self, read_tuple_name):\n    if (reg_lrn.match(read_tuple_name) is None):\n        self.report_error(read_tuple_name=read_tuple_name, error_name='wrong_read_tuple_name_structure', message=\"'{}' is not matched\".format(reg_lrn))\n    else:\n        parts = read_tuple_name.split('__')\n        if (reg_prefix_part.match(parts[0]) is None):\n            self.report_error(read_tuple_name=read_tuple_name, error_name='wrong_prefix_part', message=\"'{}' is not matched\".format(reg_prefix_part))\n        if (reg_id_part.match(parts[1]) is None):\n            self.report_error(read_tuple_name=read_tuple_name, error_name='wrong_id_part', message=\"'{}' is not matched\".format(reg_id_part))\n        if (reg_segmental_part.match(parts[2]) is None):\n            self.report_error(read_tuple_name=read_tuple_name, error_name='wrong_segmental_part', message=\"'{}' is not matched\".format(reg_segmental_part))\n        if (reg_suffix_part.match(parts[3]) is None):\n            self.report_error(read_tuple_name=read_tuple_name, error_name='wrong_suffix_part', message=\"'{}' is not matched\".format(reg_suffix_part))\n        if (not self.rnf_profile.check(read_tuple_name)):\n            self.report_error(read_tuple_name=read_tuple_name, error_name='wrong_profile', message='Read has a wrong profile (wrong widths). It should be: {} but it is: {}.'.format(self.rnf_profile, rnftools.rnfformat.RnfProfile(read_tuple_name=read_tuple_name)), warning=True)", "docstring": "Check RNF validity of a read tuple.\n\nArgs:\nread_tuple_name (str): Read tuple name to be checked.s", "source": "codesearchnet"}
{"code": "def get_associated_uplink_groups(self):\n    uri = '{}/associatedUplinkGroups'.format(self.data['uri'])\n    return self._helper.do_get(uri)", "docstring": "Gets the uplink sets which are using an Ethernet network.\n\nReturns:\nlist: URIs of the associated uplink sets.", "source": "codesearchnet"}
{"code": "def _delocalize_logging_command(self, logging_path, user_project):\n    \n\n    \n    logging_prefix = os.path.splitext(logging_path.uri)[0]\n\n    \n    if logging_path.file_provider == job_model.P_LOCAL:\n      mkdir_cmd = 'mkdir -p \"%s\"\\n' % os.path.dirname(logging_prefix)\n      cp_cmd = 'cp'\n    elif logging_path.file_provider == job_model.P_GCS:\n      mkdir_cmd = ''\n      if user_project:\n        cp_cmd = 'gsutil -u {} -mq cp'.format(user_project)\n      else:\n        cp_cmd = 'gsutil -mq cp'\n    else:\n      assert False\n\n    \n    copy_logs_cmd = textwrap.dedent().format(\n        cp_cmd=cp_cmd, prefix=logging_prefix)\n\n    \n    body = textwrap.dedent().format(\n        mkdir_cmd=mkdir_cmd, copy_logs_cmd=copy_logs_cmd)\n\n    return body", "docstring": "Returns a command to delocalize logs.\n\nArgs:\nlogging_path: location of log files.\nuser_project: name of the project to be billed for the request.\n\nReturns:\neg. 'gs://bucket/path/myfile' or 'gs://bucket/script-foobar-12'", "source": "juraj-google-style"}
{"code": "def _gen_indicator_method(self, name, custom_class, value_count):\n        \n        method_name = name.replace(' ', '_').lower()\n\n        \n        def method_1(value1, xid, **kwargs):  \n            \n            indicator_obj = custom_class(value1, xid, **kwargs)\n            return self._indicator(indicator_obj)\n\n        def method_2(value1, value2, xid, **kwargs):  \n            \n            indicator_obj = custom_class(value1, value2, xid, **kwargs)\n            return self._indicator(indicator_obj)\n\n        def method_3(value1, value2, value3, xid, **kwargs):  \n            \n            indicator_obj = custom_class(value1, value2, value3, xid, **kwargs)\n            return self._indicator(indicator_obj)\n\n        method = locals()['method_{}'.format(value_count)]\n        setattr(self, method_name, method)", "docstring": "Dynamically generate custom Indicator methods.\n\nArgs:\nname (str): The name of the method.\ncustom_class (object): The class to add.\nvalue_count (int): The number of value parameters to support.", "source": "juraj-google-style"}
{"code": "def _determine_trace_and_create_report(self, graph, ops_in_exec_path, graph_summary_tag):\n    self._check_trace_files()\n    graph_order = tensor_tracer_report.sort_tensors_and_ops(graph)\n    tensor_trace_points = graph.get_collection(_TENSOR_TRACER_COLLECTION)\n    report_handler = tensor_tracer_report.TTReportHandle()\n    traced_tensors = self._determine_and_instrument_traced_tensors(graph_order, ops_in_exec_path, tensor_trace_points, report_handler)\n    logging.info('TensorTracer is tracing %d tensors.', len(traced_tensors))\n    if traced_tensors and tensor_tracer_flags.TT_CHECK_FILTER.value:\n        raise RuntimeError('Verify ops being traced by tensor tracer.')\n    tensor_trace_order = tensor_tracer_report.TensorTraceOrder(graph_order, traced_tensors)\n    num_signatures = self._num_signature_dimensions()\n    if num_signatures and self._use_tensor_values_cache():\n        if self._use_temp_cache():\n            self._create_temp_cache(len(traced_tensors), num_signatures, graph)\n        else:\n            self._create_or_get_tensor_values_cache(_TT_SUMMARY_TAG, graph, [len(traced_tensors), num_signatures])\n            if self._parameters.trace_mode in tensor_tracer_flags.TRACE_MODE_HISTORY:\n                self._create_or_get_tensor_history_values_cache(_TT_SUMMARY_TAG, graph, [len(traced_tensors), num_signatures])\n    if self._parameters.trace_mode in (tensor_tracer_flags.TRACE_MODE_SUMMARY, tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY):\n        self._report_proto = report_handler.create_report_proto(self._tt_config, self._parameters, tensor_trace_order, tensor_trace_points, self._signature_types())\n        if self._parameters.use_fingerprint_subdir:\n            self._parameters.trace_dir = os.path.join(self._parameters.trace_dir, self._report_proto.fingerprint)\n            logging.info('TensorTracer updating trace_dir to %s', self._parameters.trace_dir)\n        self._report_proto_path = report_handler.report_proto_path(self._parameters.trace_dir, graph_summary_tag)\n        if self._parameters.report_file_path != _SKIP_REPORT_FILE:\n            report_handler.write_report_proto(self._report_proto_path, self._report_proto, self._parameters)\n    elif self._parameters.trace_mode not in tensor_tracer_flags.TRACE_MODE_HISTORY:\n        report_handler.create_report(self._tt_config, self._parameters, tensor_trace_order, tensor_trace_points)\n    return tensor_trace_order", "docstring": "Work needs to be done prior to TPU or CPU tracing.\n\nArgs:\ngraph: tf.graph\nops_in_exec_path: Set of operations in the execution path.\ngraph_summary_tag: the summary tag name for the given graph.\nReturns:\nAn instance of tensor_tracer_report.TensorTraceOrder, containing list of\ntensors to be traced with their topological order information.\nRaises:\nRuntimeError: If opname filtering is incorrectly set.", "source": "github-repos"}
{"code": "def slice_inputs(self, indices_dataset, inputs):\n    flat_inputs = nest.flatten(inputs)\n\n    def dynamic_shape_like(t):\n        shape = list(t.shape)\n        shape[0] = None\n        return tuple(shape)\n    flat_dtypes = [inp.dtype for inp in flat_inputs]\n    contiguous = True\n    if self._shuffle and self._shuffle != 'batch':\n        contiguous = False\n\n    def grab_batch(indices):\n        \n\n        def py_method(ind):\n\n            def slice_array(data):\n                return training_utils.slice_arrays(data, ind.numpy(), contiguous=contiguous)\n            return [slice_array(inp) for inp in flat_inputs]\n        flat_out = script_ops.eager_py_func(py_method, [indices], flat_dtypes)\n        for v, original_inp in zip(flat_out, flat_inputs):\n            v.set_shape(dynamic_shape_like(original_inp))\n        return nest.pack_sequence_as(inputs, flat_out)\n    dataset = indices_dataset.map(grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)\n    return dataset", "docstring": "Slice inputs into a Dataset of batches.\n\nGiven a Dataset of batch indices and the unsliced inputs,\nthis step slices the inputs in a parallelized fashion\nand produces a dataset of input batches.\n\nArgs:\nindices_dataset: A Dataset of batched indices\ninputs: A python data structure that contains the inputs, targets,\nand possibly sample weights.\n\nReturns:\nA Dataset of input batches matching the batch indices.", "source": "github-repos"}
{"code": "def convert_variables_to_constants(sess, input_graph_def, output_node_names, variable_names_whitelist=None, variable_names_blacklist=None):\n    ret = convert_variables_to_constants_from_session_graph(session=sess, graph_def=input_graph_def, output_node_names=output_node_names, variable_names_allowlist=variable_names_whitelist, variable_names_denylist=variable_names_blacklist)\n    return ret", "docstring": "Replaces all the variables in a graph with constants of the same values.\n\nIf you have a trained graph containing Variable ops, it can be convenient to\nconvert them all to Const ops holding the same values. This makes it possible\nto describe the network fully with a single GraphDef file, and allows the\nremoval of a lot of ops related to loading and saving the variables.\n\nArgs:\nsess: Active TensorFlow session containing the variables.\ninput_graph_def: GraphDef object holding the network.\noutput_node_names: List of name strings for the result nodes of the graph.\nvariable_names_whitelist: The set of variable names to convert (by default,\nall variables are converted).\nvariable_names_blacklist: The set of variable names to omit converting to\nconstants.\n\nReturns:\nGraphDef containing a simplified version of the original.\n\nRaises:\nRuntimeError: if a DT_RESOURCE op is found whose ancestor Variables are both\ndenylisted AND whitelisted for freezing.", "source": "github-repos"}
{"code": "def dfa(self, ttab: TransitionTable, init: int = 0) -> int:\n        \n        state = init\n        while True:\n            disp = ttab[state]\n            ch = self.peek()\n            state = disp.get(ch, disp[\"\"])()\n            if state < 0:\n                return state\n            self.offset += 1", "docstring": "Run a DFA and return the final (negative) state.\n\nArgs:\nttab: Transition table (with possible side-effects).\ninit: Initial state.\n\nRaises:\nEndOfInput: If past the end of `self.input`.", "source": "juraj-google-style"}
{"code": "def waitForEvent(self, event_name, predicate, timeout=None):\n    if timeout is None:\n        timeout = self.default_timeout_sec\n    deadline = time.perf_counter() + timeout\n    while time.perf_counter() <= deadline:\n        single_rpc_timeout = deadline - time.perf_counter()\n        if single_rpc_timeout < 0:\n            break\n        single_rpc_timeout = min(single_rpc_timeout, self.rpc_max_timeout_sec)\n        try:\n            event = self.waitAndGet(event_name, single_rpc_timeout)\n        except errors.CallbackHandlerTimeoutError:\n            break\n        if predicate(event):\n            return event\n    raise errors.CallbackHandlerTimeoutError(self._device, f'Timed out after {timeout}s waiting for an \"{event_name}\" event that satisfies the predicate \"{predicate.__name__}\".')", "docstring": "Waits for an event of the specific name that satisfies the predicate.\n\nThis call will block until the expected event has been received or time\nout.\n\nThe predicate function defines the condition the event is expected to\nsatisfy. It takes an event and returns True if the condition is\nsatisfied, False otherwise.\n\nNote all events of the same name that are received but don't satisfy\nthe predicate will be discarded and not be available for further\nconsumption.\n\nArgs:\nevent_name: str, the name of the event to wait for.\npredicate: function, a function that takes an event (dictionary) and\nreturns a bool.\ntimeout: float, the number of seconds to wait before giving up. If None,\nit will be set to self.default_timeout_sec.\n\nReturns:\ndictionary, the event that satisfies the predicate if received.\n\nRaises:\nerrors.CallbackHandlerTimeoutError: raised if no event that satisfies the\npredicate is received after timeout seconds.", "source": "github-repos"}
{"code": "def check(self, dsm, simplicity_factor=2, **kwargs):\n        \n        \n        economy_of_mechanism = False\n        message = ''\n        data = dsm.data\n        categories = dsm.categories\n        dsm_size = dsm.size[0]\n\n        if not categories:\n            categories = ['appmodule'] * dsm_size\n\n        dependency_number = 0\n        \n        for i in range(0, dsm_size):\n            for j in range(0, dsm_size):\n                if (categories[i] not in ('framework', 'corelib') and\n                        categories[j] not in ('framework', 'corelib') and\n                        data[i][j] > 0):\n                    dependency_number += 1\n                    \n        if dependency_number < dsm_size * simplicity_factor:\n            economy_of_mechanism = True\n        else:\n            message = ' '.join([\n                'Number of dependencies (%s)' % dependency_number,\n                '> number of rows (%s)' % dsm_size,\n                '* simplicity factor (%s) = %s' % (\n                    simplicity_factor, dsm_size * simplicity_factor)])\n        return economy_of_mechanism, message", "docstring": "Check economy of mechanism.\n\nAs first abstraction, number of dependencies between two modules\n< 2 * the number of modules\n(dependencies to the framework are NOT considered).\n\nArgs:\ndsm (:class:`DesignStructureMatrix`): the DSM to check.\nsimplicity_factor (int): simplicity factor.\n\nReturns:\nbool: True if economic, else False", "source": "juraj-google-style"}
{"code": "def configure_profile(msg_type, profile_name, data, auth):\n    with jsonconfig.Config('messages', indent=4) as cfg:\n        write_data(msg_type, profile_name, data, cfg)\n        write_auth(msg_type, profile_name, auth, cfg)\n    print((('[+] Configuration entry for <' + profile_name) + '> created.'))\n    print(('[+] Configuration file location: ' + cfg.filename))", "docstring": "Create the profile entry.\n\nArgs:\n:msg_type: (str) message type to create config entry.\n:profile_name: (str) name of the profile entry\n:data: (dict) dict values for the 'settings'\n:auth: (dict) auth parameters", "source": "codesearchnet"}
{"code": "def post_process_segmentation(self, outputs: 'MaskFormerForInstanceSegmentationOutput', target_size: Optional[Tuple[int, int]]=None) -> 'torch.Tensor':\n    warnings.warn('`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use `post_process_instance_segmentation`', FutureWarning)\n    class_queries_logits = outputs.class_queries_logits\n    masks_queries_logits = outputs.masks_queries_logits\n    if target_size is not None:\n        masks_queries_logits = torch.nn.functional.interpolate(masks_queries_logits, size=target_size, mode='bilinear', align_corners=False)\n    masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]\n    masks_probs = masks_queries_logits.sigmoid()\n    segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs)\n    return segmentation", "docstring": "Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image segmentation predictions. Only\nsupports PyTorch.\n\nArgs:\noutputs ([`MaskFormerForInstanceSegmentationOutput`]):\nThe outputs from [`MaskFormerForInstanceSegmentation`].\n\ntarget_size (`Tuple[int, int]`, *optional*):\nIf set, the `masks_queries_logits` will be resized to `target_size`.\n\nReturns:\n`torch.Tensor`:\nA tensor of shape (`batch_size, num_class_labels, height, width`).", "source": "github-repos"}
{"code": "def find_in_coord_list(coord_list, coord, atol=1e-08):\n    if (len(coord_list) == 0):\n        return []\n    diff = (np.array(coord_list) - np.array(coord)[(None, :)])\n    return np.where(np.all((np.abs(diff) < atol), axis=1))[0]", "docstring": "Find the indices of matches of a particular coord in a coord_list.\n\nArgs:\ncoord_list: List of coords to test\ncoord: Specific coordinates\natol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and\narray.\n\nReturns:\nIndices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.", "source": "codesearchnet"}
{"code": "def aerosol_optical_depth(self, value=0.999):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `aerosol_optical_depth`'.format(value))\n    self._aerosol_optical_depth = value", "docstring": "Corresponds to IDD Field `aerosol_optical_depth`\n\nArgs:\nvalue (float): value for IDD Field `aerosol_optical_depth`\nUnit: thousandths\nMissing value: 0.999\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def GetSectionByIndex(self, section_index):\n    if (not self._is_parsed):\n        self._Parse()\n        self._is_parsed = True\n    if ((section_index < 0) or (section_index >= len(self._sections))):\n        return None\n    return self._sections[section_index]", "docstring": "Retrieves a specific section based on the index.\n\nArgs:\nsection_index (int): index of the section.\n\nReturns:\nVolumeExtent: a volume extent or None if not available.", "source": "codesearchnet"}
{"code": "def every_other(x, name=None):\n  \n  with tf.name_scope(name, 'every_other', [x]) as scope:\n    x = tf.convert_to_tensor(x, name='x')\n    return tf.reshape(\n        tf.slice(\n            tf.reshape(x, [-1, 2]), [0, 0], [-1, 1]),\n        [-1],\n        name=scope)", "docstring": "Drops every other value from the tensor and returns a 1D tensor.\n\nThis is useful if you are running multiple inputs through a model tower\nbefore splitting them and you want to line it up with some other data.\n\nArgs:\nx: the target tensor.\nname: the name for this op, defaults to every_other\nReturns:\nA tensorflow op.", "source": "juraj-google-style"}
{"code": "def make_seeds(self, count=1):\n    alg = self.algorithm\n    if alg in (a.value for a in random_ops_util.Algorithm):\n        keys = self._make_int64_keys(shape=[count])\n        zeros = array_ops.zeros_like(keys)\n        return array_ops_stack.stack([keys, zeros])\n    else:\n        raise ValueError(stateless_random_ops.unsupported_alg_error_msg(alg))", "docstring": "Generates seeds for stateless random ops.\n\nFor example:\n\n```python\nseeds = get_global_generator().make_seeds(count=10)\nfor i in range(10):\nseed = seeds[:, i]\nnumbers = stateless_random_normal(shape=[2, 3], seed=seed)\n...\n```\n\nArgs:\ncount: the number of seed pairs (note that stateless random ops need a\npair of seeds to invoke).\n\nReturns:\nA tensor of shape [2, count] and dtype int64.", "source": "github-repos"}
{"code": "def parallel(processor_list: Sequence[PartProcessor]) -> PartProcessor:\n    if not processor_list:\n        raise ValueError('processor_list is empty')\n    return _ParallelPartProcessor(processor_list)", "docstring": "Create a sequence of part processors to be run in parallel.\n\nArgs:\nprocessor_list: list of part processors.\n\nReturns:\nA processor consisting of the parallel run of all the processors in the\nlist. The execution is sequential from the first processor to the last but\nparts are processed concurrently overall.", "source": "github-repos"}
{"code": "def get_metrics_namespace(self) -> str:\n    return 'RunInference'", "docstring": "Returns:\nA namespace for metrics collected by the RunInference transform.", "source": "github-repos"}
{"code": "def product_category(request, category_id):\n    PRODUCTS_FORM_PREFIX = 'products'\n    VOUCHERS_FORM_PREFIX = 'vouchers'\n    v = _handle_voucher(request, VOUCHERS_FORM_PREFIX)\n    (voucher_form, voucher_handled) = v\n    category_id = int(category_id)\n    category = inventory.Category.objects.get(pk=category_id)\n    with BatchController.batch(request.user):\n        products = ProductController.available_products(request.user, category=category)\n        if (not products):\n            messages.warning(request, ('There are no products available from category: ' + category.name))\n            return redirect('dashboard')\n        p = _handle_products(request, category, products, PRODUCTS_FORM_PREFIX)\n        (products_form, discounts, products_handled) = p\n    if (request.POST and (not voucher_handled) and (not products_form.errors)):\n        if products_form.has_changed():\n            messages.success(request, 'Your reservations have been updated.')\n        return redirect(review)\n    data = {'category': category, 'discounts': discounts, 'form': products_form, 'voucher_form': voucher_form}\n    return render(request, 'registrasion/product_category.html', data)", "docstring": "Form for selecting products from an individual product category.\n\nArguments:\ncategory_id (castable to int): The id of the category to display.\n\nReturns:\nredirect or render:\nIf the form has been sucessfully submitted, redirect to\n``dashboard``. Otherwise, render\n``registrasion/product_category.html`` with data::\n\n{\n\"category\": category,         # An inventory.Category for\n# category_id\n\"discounts\": discounts,       # A list of\n# DiscountAndQuantity\n\"form\": products_form,        # A form for selecting\n# products\n\"voucher_form\": voucher_form, # A form for entering a\n# voucher code\n}", "source": "codesearchnet"}
{"code": "def generate_defect_structure(self, supercell=(1, 1, 1)):\n        \n        defect_structure = self.bulk_structure.copy()\n        defect_structure.make_supercell(supercell)\n\n        \n        \n        defect_properties = self.site.properties.copy()\n        if ('velocities' in self.bulk_structure.site_properties) and \\\n            'velocities' not in defect_properties:\n            if all( vel == self.bulk_structure.site_properties['velocities'][0]\n                    for vel in self.bulk_structure.site_properties['velocities']):\n                defect_properties['velocities'] = self.bulk_structure.site_properties['velocities'][0]\n            else:\n                raise ValueError(\"No velocity property specified for defect site and \"\n                                 \"bulk_structure velocities are not homogeneous. Please specify this \"\n                                 \"property within the initialized defect_site object.\")\n\n        \n        site_properties_for_fake_struct = {prop: [val] for prop,val in defect_properties.items()}\n        struct_for_defect_site = Structure( self.bulk_structure.copy().lattice,\n                                             [self.site.specie],\n                                             [self.site.frac_coords],\n                                             to_unit_cell=True,\n                                             site_properties = site_properties_for_fake_struct)\n        struct_for_defect_site.make_supercell(supercell)\n        defect_site = struct_for_defect_site[0]\n\n        poss_deflist = sorted(\n            defect_structure.get_sites_in_sphere(defect_site.coords, 2, include_index=True), key=lambda x: x[1])\n        defindex = poss_deflist[0][2]\n\n        subsite = defect_structure.pop(defindex)\n        defect_structure.append(self.site.specie.symbol, subsite.coords, coords_are_cartesian=True,\n                                properties = defect_site.properties)\n        defect_structure.set_charge(self.charge)\n        return defect_structure", "docstring": "Returns Defective Substitution structure, decorated with charge\nArgs:\nsupercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix", "source": "juraj-google-style"}
{"code": "def export(self, top=True):\n        \n        out = []\n        if top:\n            out.append(self._internal_name)\n        out.append(self._to_str(self.holiday_name))\n        out.append(self._to_str(self.holiday_day))\n        return \",\".join(out)", "docstring": "Exports object to its string representation.\n\nArgs:\ntop (bool):  if True appends `internal_name` before values.\nAll non list objects should be exported with value top=True,\nall list objects, that are embedded in as fields inlist objects\nshould be exported with `top`=False\n\nReturns:\nstr: The objects string representation", "source": "juraj-google-style"}
{"code": "def on_connection_state_change(self, event_type, callback):\n    listeners = self._connection_state_listeners.get(event_type, [])\n    listeners.append(callback)\n    self._connection_state_listeners[event_type] = listeners", "docstring": "Register a callback for a specific connection state change.\n\nRegister a callback to be triggered when the connection changes to\nthe specified state, signified by a ConnectionEvent.\n\nThe callback must be a coroutine.\n\nArgs:\nevent_type (ConnectionEvent): the connection event to listen for\ncallback (coroutine): a coroutine to call on the event occurrence", "source": "codesearchnet"}
{"code": "def get_log(self):\n    log_path = self.meta_data['logs_resource']\n    conn = Qubole.agent()\n    r = conn.get_raw(log_path)\n    return r.text", "docstring": "Fetches log for the command represented by this object\n\nReturns:\nThe log as a string", "source": "codesearchnet"}
{"code": "def reinforce_grid(self):\n        \n        \n\n        for grid_district in self.mv_grid_districts():\n\n            \n            grid_district.mv_grid.reinforce_grid()\n\n            \n            for lv_load_area in grid_district.lv_load_areas():\n                if not lv_load_area.is_aggregated:\n                    for lv_grid_district in lv_load_area.lv_grid_districts():\n                        lv_grid_district.lv_grid.reinforce_grid()", "docstring": "Performs grid reinforcement measures for all MV and LV grids\nArgs:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def setup_data_stream(self, connection_factory: Callable[([tuple], Connection)], data_stream_factory: Callable[([Connection], DataStream)]=DataStream) -> DataStream:\n    (yield from self._control_stream.write_command(Command('TYPE', 'I')))\n    reply = (yield from self._control_stream.read_reply())\n    self.raise_if_not_match('Binary mode', ReplyCodes.command_okay, reply)\n    address = (yield from self.passive_mode())\n    connection = (yield from connection_factory(address))\n    connection.reset()\n    (yield from connection.connect())\n    data_stream = data_stream_factory(connection)\n    return data_stream", "docstring": "Create and setup a data stream.\n\nThis function will set up passive and binary mode and handle\nconnecting to the data connection.\n\nArgs:\nconnection_factory: A coroutine callback that returns a connection\ndata_stream_factory: A callback that returns a data stream\n\nCoroutine.\n\nReturns:\nDataStream", "source": "codesearchnet"}
{"code": "def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.LANCZOS, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n    if input_data_format is None:\n        input_data_format = infer_channel_dimension_format(image, num_channels=(1, 3, 4))\n    data_format = input_data_format if data_format is None else data_format\n    if 'longest_edge' in size:\n        size = get_resize_output_image_size(image, resolution_max_side=size['longest_edge'], input_data_format=input_data_format)\n    elif 'height' in size and 'width' in size:\n        size = (size['height'], size['width'])\n    else:\n        raise ValueError(\"size must be a dictionary with key 'longest_edge' or 'height' and 'width'.\")\n    image_mode = None\n    if image.ndim == 2 or image.shape[-1] == 1:\n        image_mode = 'P'\n    image = to_pil_image(image, image_mode=image_mode, input_data_format=input_data_format)\n    resized_image = image.resize((size[1], size[0]), resample=resample)\n    resized_image = np.array(resized_image)\n    resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n    resized_image = to_channel_dimension_format(resized_image, data_format, input_channel_dim=ChannelDimension.LAST)\n    return resized_image", "docstring": "Resize an image. The longest edge of the image is resized to size[\"longest_edge\"], with the shortest edge\nresized to keep the input aspect ratio. Can also be used with size[\"height\"] and size[\"width\"].\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nsize (`Dict[str, int]`):\nSize of the output image.\nresample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`):\nResampling filter to use when resizing the image.\ndata_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the output image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def double_relaxation_run(cls, vasp_cmd, auto_npar=True, ediffg=(- 0.05), half_kpts_first_relax=False, auto_continue=False):\n    incar_update = {'ISTART': 1}\n    if ediffg:\n        incar_update['EDIFFG'] = ediffg\n    settings_overide_1 = None\n    settings_overide_2 = [{'dict': 'INCAR', 'action': {'_set': incar_update}}, {'file': 'CONTCAR', 'action': {'_file_copy': {'dest': 'POSCAR'}}}]\n    if (half_kpts_first_relax and os.path.exists('KPOINTS') and os.path.exists('POSCAR')):\n        kpts = Kpoints.from_file('KPOINTS')\n        orig_kpts_dict = kpts.as_dict()\n        kpts.kpts = np.round(np.maximum((np.array(kpts.kpts) / 2), 1)).astype(int).tolist()\n        low_kpts_dict = kpts.as_dict()\n        settings_overide_1 = [{'dict': 'KPOINTS', 'action': {'_set': low_kpts_dict}}]\n        settings_overide_2.append({'dict': 'KPOINTS', 'action': {'_set': orig_kpts_dict}})\n    return [VaspJob(vasp_cmd, final=False, suffix='.relax1', auto_npar=auto_npar, auto_continue=auto_continue, settings_override=settings_overide_1), VaspJob(vasp_cmd, final=True, backup=False, suffix='.relax2', auto_npar=auto_npar, auto_continue=auto_continue, settings_override=settings_overide_2)]", "docstring": "Returns a list of two jobs corresponding to an AFLOW style double\nrelaxation run.\n\nArgs:\nvasp_cmd (str): Command to run vasp as a list of args. For example,\nif you are using mpirun, it can be something like\n[\"mpirun\", \"pvasp.5.2.11\"]\nauto_npar (bool): Whether to automatically tune NPAR to be sqrt(\nnumber of cores) as recommended by VASP for DFT calculations.\nGenerally, this results in significant speedups. Defaults to\nTrue. Set to False for HF, GW and RPA calculations.\nediffg (float): Force convergence criteria for subsequent runs (\nignored for the initial run.)\nhalf_kpts_first_relax (bool): Whether to halve the kpoint grid\nfor the first relaxation. Speeds up difficult convergence\nconsiderably. Defaults to False.\n\nReturns:\nList of two jobs corresponding to an AFLOW style run.", "source": "codesearchnet"}
{"code": "def get_sketch(self, sketch_id):\n    \n    resource_url = '{0:s}/sketches/{1:d}/'.format(self.api_base_url, sketch_id)\n    response = self.session.get(resource_url)\n    response_dict = response.json()\n    try:\n      response_dict['objects']\n    except KeyError:\n      raise ValueError('Sketch does not exist or you have no access')\n    return response_dict", "docstring": "Get information on the specified sketch.\n\nArgs:\nsketch_id (int): ID of sketch\n\nReturns:\ndict: Dictionary of sketch information\n\nRaises:\nValueError: Sketch is inaccessible", "source": "juraj-google-style"}
{"code": "def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, expected, use_gpu):\n    total_size_1 = 1\n    total_size_2 = 1\n    for s in tensor_in_sizes:\n        total_size_1 *= s\n    for s in filter_in_sizes:\n        total_size_2 *= s\n    x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]\n    x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]\n    with self.cached_session(use_gpu=use_gpu) as sess:\n        t1 = constant_op.constant(x1, shape=tensor_in_sizes)\n        t1.set_shape(tensor_in_sizes)\n        t2 = constant_op.constant(x2, shape=filter_in_sizes)\n        conv = nn_ops.depthwise_conv2d_native(t1, t2, strides=[1, stride, stride, 1], padding=padding)\n        value = self.evaluate(conv)\n    tf_logging.info('value = %r', value)\n    self.assertArrayNear(expected, np.ravel(value), 1e-05)\n    self.assertShapeEqual(value, conv)", "docstring": "Verifies the output values of the depthwise convolution function.\n\nArgs:\ntensor_in_sizes: Input tensor dimensions in [batch, input_rows,\ninput_cols, input_depth].\nfilter_in_sizes: Filter tensor dimensions in [filter_rows, filter_cols,\ninput_depth, depth_multiplier].\nstride: Stride.\npadding: Padding type.\nexpected: An array containing the expected operation outputs.\nuse_gpu: Whether to use GPU.", "source": "github-repos"}
{"code": "class TrainState(train_state.TrainState):\n    logits_fn: Callable = struct.field(pytree_node=False)\n    loss_fn: Callable = struct.field(pytree_node=False)", "docstring": "Train state with an Optax optimizer.\n\nThe two functions below differ depending on whether the task is classification\nor regression.\n\nArgs:\nlogits_fn: Applied to last layer to obtain the logits.\nloss_fn: Function to compute the loss.", "source": "github-repos"}
{"code": "def filter_genes_and_strains(self, remove_genes_not_in_reference_model=True, remove_strains_with_no_orthology=True, remove_strains_with_no_differences=False, custom_keep_strains=None, custom_keep_genes=None):\n    if (len(self.df_orthology_matrix) == 0):\n        raise RuntimeError('Empty orthology matrix, please calculate first!')\n    reference_strain_gene_ids = [x.id for x in self.reference_gempro.genes]\n    initial_num_genes = len(reference_strain_gene_ids)\n    initial_num_strains = len(self.strain_ids)\n    to_remove_genes = []\n    if custom_keep_genes:\n        to_remove_genes.extend([x for x in reference_strain_gene_ids if (x not in custom_keep_genes)])\n    if remove_genes_not_in_reference_model:\n        to_remove_genes.extend([x for x in reference_strain_gene_ids if (x not in self.df_orthology_matrix.index.tolist())])\n    to_remove_genes = list(set(to_remove_genes))\n    if self.reference_gempro.model:\n        cobra.manipulation.delete_model_genes(self.reference_gempro.model, to_remove_genes)\n    else:\n        for g_id in to_remove_genes:\n            self.reference_gempro.genes.get_by_id(g_id).functional = False\n    new_gene_subset = [x.id for x in self.reference_gempro.functional_genes]\n    tmp_new_orthology_matrix = self.df_orthology_matrix[self.df_orthology_matrix.index.isin(new_gene_subset)]\n    if (custom_keep_strains or remove_strains_with_no_orthology or remove_strains_with_no_differences):\n        for strain_id in self.strain_ids:\n            if custom_keep_strains:\n                if (strain_id not in custom_keep_strains):\n                    self.strain_ids.remove(strain_id)\n                    continue\n            if remove_strains_with_no_orthology:\n                if (strain_id not in tmp_new_orthology_matrix.columns):\n                    self.strain_ids.remove(strain_id)\n                    log.info('{}: no orthologous genes found for this strain, removed from analysis.'.format(strain_id))\n                    continue\n                elif tmp_new_orthology_matrix[strain_id].isnull().all():\n                    self.strain_ids.remove(strain_id)\n                    log.info('{}: no orthologous genes found for this strain, removed from analysis.'.format(strain_id))\n                    continue\n            if remove_strains_with_no_differences:\n                not_in_strain = tmp_new_orthology_matrix[pd.isnull(tmp_new_orthology_matrix[strain_id])][strain_id].index.tolist()\n                if (len(not_in_strain) == 0):\n                    self.strain_ids.remove(strain_id)\n                    log.info('{}: strain has no differences from the base, removed from analysis.')\n                    continue\n    log.info('{} genes to be analyzed, originally {}'.format(len(self.reference_gempro.functional_genes), initial_num_genes))\n    log.info('{} strains to be analyzed, originally {}'.format(len(self.strain_ids), initial_num_strains))", "docstring": "Filters the analysis by keeping a subset of strains or genes based on certain criteria.\n\nArgs:\nremove_genes_not_in_reference_model (bool): Remove genes from reference model not in orthology matrix\nremove_strains_with_no_orthology (bool): Remove strains which have no orthologous genes found\nremove_strains_with_no_differences (bool): Remove strains which have all the same genes as the base model.\nDefault is False because since orthology is found using a PID cutoff, all genes may be present but\ndifferences may be on the sequence level.\ncustom_keep_genes (list): List of gene IDs to keep in analysis\ncustom_keep_strains (list): List of strain IDs to keep in analysis", "source": "codesearchnet"}
{"code": "def replace_batch_norm(model):\n    for name, module in model.named_children():\n        if isinstance(module, nn.BatchNorm2d):\n            new_module = DabDetrFrozenBatchNorm2d(module.num_features)\n            if not module.weight.device == torch.device('meta'):\n                new_module.weight.data.copy_(module.weight)\n                new_module.bias.data.copy_(module.bias)\n                new_module.running_mean.data.copy_(module.running_mean)\n                new_module.running_var.data.copy_(module.running_var)\n            model._modules[name] = new_module\n        if len(list(module.children())) > 0:\n            replace_batch_norm(module)", "docstring": "Recursively replace all `torch.nn.BatchNorm2d` with `DabDetrFrozenBatchNorm2d`.\n\nArgs:\nmodel (torch.nn.Module):\ninput model", "source": "github-repos"}
{"code": "def get_event(self, event_name, event_history=None):\n    if (event_history is None):\n        event_history = (event_name + '_history')\n    return self._db.rpoplpush(event_name, event_history)", "docstring": "Get an event from the database.\n\nGets an event from the named event list removing the event and\nadding it to the event history.\n\nArgs:\nevent_name (str): Event list key.\nevent_history (str, optional): Event history list.\n\nReturns:\nstr: string representation of the event object", "source": "codesearchnet"}
{"code": "def download_models(self, uniprot_acc, outdir='', force_rerun=False):\n    downloaded = []\n    subset = self.get_models(uniprot_acc)\n    for entry in subset:\n        ident = '{}_{}_{}_{}'.format(uniprot_acc, entry['template'], entry['from'], entry['to'])\n        outfile = op.join(outdir, (ident + '.pdb'))\n        if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n            response = requests.get(entry['url'])\n            if (response.status_code == 404):\n                log.error('{}: 404 returned, no model available.'.format(ident))\n            else:\n                with open(outfile, 'w') as f:\n                    f.write(response.text)\n                log.debug('{}: downloaded homology model'.format(ident))\n                downloaded.append(outfile)\n        else:\n            downloaded.append(outfile)\n    return downloaded", "docstring": "Download all models available for a UniProt accession number.\n\nArgs:\nuniprot_acc (str): UniProt ACC/ID\noutdir (str): Path to output directory, uses working directory if not set\nforce_rerun (bool): Force a redownload the models if they already exist\n\nReturns:\nlist: Paths to the downloaded models", "source": "codesearchnet"}
{"code": "def set_interface(self, vrf_name, interface, default=False, disable=False):\n    cmds = [('interface %s' % interface)]\n    cmds.append(self.command_builder('vrf forwarding', value=vrf_name, default=default, disable=disable))\n    return self.configure(cmds)", "docstring": "Adds a VRF to an interface\n\nNotes:\nRequires interface to be in routed mode. Must apply ip address\nafter VRF has been applied. This feature can also be accessed\nthrough the interfaces api.\n\nArgs:\nvrf_name (str): The VRF name to configure\ninterface (str): The interface to add the VRF too\ndefault (bool): Set interface VRF forwarding to default\ndisable (bool): Negate interface VRF forwarding\n\nReturns:\nTrue if the operation was successful otherwise False", "source": "codesearchnet"}
{"code": "def get_sid_string(principal):\n    if (principal is None):\n        principal = 'NULL SID'\n    try:\n        return win32security.ConvertSidToStringSid(principal)\n    except TypeError:\n        principal = get_sid(principal)\n    try:\n        return win32security.ConvertSidToStringSid(principal)\n    except pywintypes.error:\n        log.exception('Invalid principal %s', principal)\n        raise CommandExecutionError('Invalid principal {0}'.format(principal))", "docstring": "Converts a PySID object to a string SID.\n\nArgs:\n\nprincipal(str):\nThe principal to lookup the sid. Must be a PySID object.\n\nReturns:\nstr: A string sid\n\nUsage:\n\n.. code-block:: python\n\n# Get a PySID object\npy_sid = salt.utils.win_dacl.get_sid('jsnuffy')\n\n# Get the string version of the SID\nsalt.utils.win_dacl.get_sid_string(py_sid)", "source": "codesearchnet"}
{"code": "def create_s3_event(app_name, env, region, bucket, triggers):\n    \n    session = boto3.Session(profile_name=env, region_name=region)\n    s3_client = session.client('s3')\n\n    lambda_alias_arn = get_lambda_alias_arn(app_name, env, region)\n\n    LOG.debug(\"Lambda ARN for lambda function %s is %s.\", app_name, lambda_alias_arn)\n    LOG.debug(\"Creating S3 events for bucket %s\", bucket)\n\n    \n    principal = 's3.amazonaws.com'\n    statement_id = \"{}_s3_{}\".format(app_name, bucket).replace('.', '')\n    source_arn = \"arn:aws:s3:::{}\".format(bucket)\n    add_lambda_permissions(\n        function=lambda_alias_arn,\n        env=env,\n        region=region,\n        principal=principal,\n        statement_id=statement_id,\n        source_arn=source_arn)\n\n    \n    template_kwargs = {\"lambda_arn\": lambda_alias_arn, \"triggers\": triggers}\n    config = get_template(template_file='infrastructure/lambda/s3_event.json.j2', **template_kwargs)\n    s3_client.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=json.loads(config))\n\n    LOG.info(\"Created lambda %s S3 event on bucket %s\", app_name, bucket)", "docstring": "Create S3 lambda events from triggers\n\nArgs:\napp_name (str): name of the lambda function\nenv (str): Environment/Account for lambda function\nregion (str): AWS region of the lambda function\ntriggers (list): List of triggers from the settings", "source": "juraj-google-style"}
{"code": "def dump_tree(self, statement=None, indent_level=0):\n    out = u''\n    indent = (u' ' * indent_level)\n    if (statement is None):\n        for root_statement in self.statements:\n            out += self.dump_tree(root_statement, indent_level)\n    else:\n        out += ((indent + str(statement)) + u'\\n')\n        if (len(statement.children) > 0):\n            for child in statement.children:\n                out += self.dump_tree(child, indent_level=(indent_level + 4))\n    return out", "docstring": "Dump the AST for this parsed file.\n\nArgs:\nstatement (SensorGraphStatement): the statement to print\nif this function is called recursively.\nindent_level (int): The number of spaces to indent this\nstatement.  Used for recursively printing blocks of\nstatements.\nReturns:\nstr: The AST for this parsed sg file as a nested\ntree with one node per line and blocks indented.", "source": "codesearchnet"}
{"code": "def slice(self, start, end):\n        \n\n        reverse = False\n        if start > end:\n            temp = start\n            start = end\n            end = temp\n            reverse = True\n\n        seg = self.copy()\n        seg.points = seg.points[start:end+1]\n        if reverse:\n            seg.points = list(reversed(seg.points))\n\n        return seg", "docstring": "Creates a copy of the current segment between indexes. If end > start,\npoints are reverted\n\nArgs:\nstart (int): Start index\nend (int): End index\nReturns:\n:obj:`Segment`", "source": "juraj-google-style"}
{"code": "def create_bagit_stream(dir_name, payload_info_list):\n    zip_file = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED)\n    _add_path(dir_name, payload_info_list)\n    (payload_byte_count, payload_file_count) = _add_payload_files(zip_file, payload_info_list)\n    tag_info_list = _add_tag_files(zip_file, dir_name, payload_info_list, payload_byte_count, payload_file_count)\n    _add_manifest_files(zip_file, dir_name, payload_info_list, tag_info_list)\n    _add_tag_manifest_file(zip_file, dir_name, tag_info_list)\n    return zip_file", "docstring": "Create a stream containing a BagIt zip archive.\n\nArgs:\ndir_name : str\nThe name of the root directory in the zip file, under which all the files\nare placed (avoids \"zip bombs\").\n\npayload_info_list: list\nList of payload_info_dict, each dict describing a file.\n\n- keys: pid, filename, iter, checksum, checksum_algorithm\n- If the filename is None, the pid is used for the filename.", "source": "codesearchnet"}
{"code": "def get_images_by_catid_and_aoi(self, catid, aoi_wkt):\n        \n\n        self.logger.debug('Retrieving IDAHO metadata')\n\n        \n        url = '%s/search' % self.base_url\n\n        body = {\"filters\": [\"catalogID = '%s'\" % catid],\n                \"types\": [\"IDAHOImage\"],\n                \"searchAreaWkt\": aoi_wkt}\n\n        r = self.gbdx_connection.post(url, data=json.dumps(body))\n\n        r.raise_for_status()\n        if r.status_code == 200:\n            results = r.json()\n            numresults = len(results['results'])\n            self.logger.debug('%s IDAHO images found associated with catid %s'\n                              % (numresults, catid))\n\n            return results", "docstring": "Retrieves the IDAHO image records associated with a given catid.\nArgs:\ncatid (str): The source catalog ID from the platform catalog.\naoi_wkt (str): The well known text of the area of interest.\nReturns:\nresults (json): The full catalog-search response for IDAHO images\nwithin the catID.", "source": "juraj-google-style"}
{"code": "def needle_statistics_alignio(infile):\n    \n\n    alignments = list(AlignIO.parse(infile, \"emboss\"))\n\n    if len(alignments) > 1:\n        raise ValueError('Alignment file contains more than one pairwise alignment')\n\n    alignment = alignments[0]\n\n    with open(infile) as f:\n        line = f.readline()\n\n        for i in range(len(alignments)):\n            while line.rstrip() != \"\n                line = f.readline()\n                if not line:\n                    raise StopIteration\n\n            while line[0] == \"\n                \n                \n                parts = line[1:].split(\":\", 1)\n                key = parts[0].lower().strip()\n                if key == 'identity':\n                    ident_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()\n                    ident_num = int(ident_parse[0].split('/')[0])\n                    ident_percent = float(ident_parse[1])\n                    alignment.annotations['identity'] = ident_num\n                    alignment.annotations['percent_identity'] = ident_percent\n                if key == 'similarity':\n                    sim_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()\n                    sim_num = int(sim_parse[0].split('/')[0])\n                    sim_percent = float(sim_parse[1])\n                    alignment.annotations['similarity'] = sim_num\n                    alignment.annotations['percent_similarity'] = sim_percent\n                if key == 'gaps':\n                    gap_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()\n                    gap_num = int(gap_parse[0].split('/')[0])\n                    gap_percent = float(gap_parse[1])\n                    alignment.annotations['gaps'] = gap_num\n                    alignment.annotations['percent_gaps'] = gap_percent\n                if key == 'score':\n                    score = float(parts[1].strip())\n                    alignment.annotations['score'] = score\n\n                \n                line = f.readline()\n\n    return alignment", "docstring": "Reads in a needle alignment file and returns an AlignIO object with annotations\n\nArgs:\ninfile (str): Alignment file name\n\nReturns:\nAlignIO: annotated AlignIO object", "source": "juraj-google-style"}
{"code": "def success(channel, stats, name, platform, dp):\n    \n\n    \n    datapacks = [(\"Platform\", platform, False)]\n    for stat in stats:\n        \n        if stat[0] in (\"Duel 1v1\", \"Doubles 2v2\", \"Solo Standard 3v3\", \"Standard 3v3\"):\n            stat_name = \"__\" + stat[0] + \"__\"\n            stat_value = \"**\" + stat[1] + \"**\"\n        else:\n            stat_name = stat[0]\n            stat_value = stat[1]\n\n        \n        if stat[2]:\n            stat_value += \" *(Top \" + stat[2] + \"%)*\"\n\n        datapacks.append((stat_name, stat_value, True))\n\n    \n    gui = ui_embed.UI(\n        channel,\n        \"Rocket League Stats: {}\".format(name),\n        \"*Stats obtained from [Rocket League Tracker Network](https:\n        modulename=modulename,\n        colour=0x0088FF,\n        thumbnail=dp,\n        datapacks=datapacks\n    )\n\n    return gui", "docstring": "Creates an embed UI containing the Rocket League stats\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\nstats (tuple): Tuples of (field, value, percentile)\nname (str): The name of the player\nplatform (str): The playfor to search on, can be 'steam', 'ps', or 'xbox'\ndp (str): URL to the player's dp\n\nReturns:\n(discord.Embed): The created embed", "source": "juraj-google-style"}
{"code": "def _set_device(self, device) -> None:\n    self._set_device_from_string(compat.as_str(_device_string(device)))", "docstring": "Set the device of this operation.\n\nArgs:\ndevice: string or device..  The device to set.", "source": "github-repos"}
{"code": "def create_image_uri(region, framework, instance_type, framework_version, py_version=None, account='520713654638', accelerator_type=None, optimized_families=None):\n    optimized_families = (optimized_families or [])\n    if (py_version and (py_version not in VALID_PY_VERSIONS)):\n        raise ValueError('invalid py_version argument: {}'.format(py_version))\n    account = VALID_ACCOUNTS_BY_REGION.get(region, account)\n    if instance_type.startswith('local'):\n        device_type = ('cpu' if (instance_type == 'local') else 'gpu')\n    elif (not instance_type.startswith('ml.')):\n        raise ValueError('{} is not a valid SageMaker instance type. See: https:\n    else:\n        family = instance_type.split('.')[1]\n        if (family in optimized_families):\n            device_type = family\n        elif (family[0] in ['g', 'p']):\n            device_type = 'gpu'\n        else:\n            device_type = 'cpu'\n    if py_version:\n        tag = '{}-{}-{}'.format(framework_version, device_type, py_version)\n    else:\n        tag = '{}-{}'.format(framework_version, device_type)\n    if _accelerator_type_valid_for_framework(framework=framework, accelerator_type=accelerator_type, optimized_families=optimized_families):\n        framework += '-eia'\n    return '{}/sagemaker-{}:{}'.format(get_ecr_image_uri_prefix(account, region), framework, tag)", "docstring": "Return the ECR URI of an image.\n\nArgs:\nregion (str): AWS region where the image is uploaded.\nframework (str): framework used by the image.\ninstance_type (str): SageMaker instance type. Used to determine device type (cpu/gpu/family-specific optimized).\nframework_version (str): The version of the framework.\npy_version (str): Optional. Python version. If specified, should be one of 'py2' or 'py3'.\nIf not specified, image uri will not include a python component.\naccount (str): AWS account that contains the image. (default: '520713654638')\naccelerator_type (str): SageMaker Elastic Inference accelerator type.\noptimized_families (str): Instance families for which there exist specific optimized images.\n\nReturns:\nstr: The appropriate image URI based on the given parameters.", "source": "codesearchnet"}
{"code": "def load_variables(defines, config_file):\n    \n\n    if config_file is not None:\n        with open(config_file, \"r\") as conf_file:\n            variables = yaml.load(conf_file)\n    else:\n        variables = {}\n\n    for define in defines:\n        name, equ, value = define.partition('=')\n        if equ != '=':\n            print(\"Invalid variable definition\")\n            print(\"- expected name=value\")\n            print(\"- found: '%s'\" % define)\n            sys.exit(1)\n\n        variables[name] = value\n\n    return variables", "docstring": "Load all variables from cmdline args and/or a config file.\n\nArgs:\ndefines (list of str): A list of name=value pairs that\ndefine free variables.\nconfig_file (str): An optional path to a yaml config\nfile that defines a single dict with name=value\nvariable definitions.", "source": "juraj-google-style"}
{"code": "def insert_before(self, value: Union[(RawValue, Value)], raw: bool=False) -> 'ArrayEntry':\n    return ArrayEntry(self.index, self.before, self.after.cons(self.value), self._cook_value(value, raw), self.parinst, self.schema_node, datetime.now())", "docstring": "Insert a new entry before the receiver.\n\nArgs:\nvalue: The value of the new entry.\nraw: Flag to be set if `value` is raw.\n\nReturns:\nAn instance node of the new inserted entry.", "source": "codesearchnet"}
{"code": "def get_all_dataset_names(configuration=None, **kwargs):\n    dataset = Dataset(configuration=configuration)\n    dataset['id'] = 'all dataset names'\n    return dataset._write_to_hdx('list', kwargs, 'id')", "docstring": "Get all dataset names in HDX\n\nArgs:\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n**kwargs: See below\nlimit (int): Number of rows to return. Defaults to all dataset names.\noffset (int): Offset in the complete result for where the set of returned dataset names should begin\n\nReturns:\nList[str]: list of all dataset names in HDX", "source": "codesearchnet"}
{"code": "def _estimate_data_distribution(c, num_examples_per_class_seen):\n    num_classes = num_examples_per_class_seen.get_shape()[0]\n    num_examples_per_class_seen = math_ops.add(num_examples_per_class_seen, math_ops.reduce_sum(array_ops.one_hot(c, num_classes, dtype=dtypes.int64), 0))\n    init_prob_estimate = math_ops.truediv(num_examples_per_class_seen, math_ops.reduce_sum(num_examples_per_class_seen))\n    dist = math_ops.cast(init_prob_estimate, dtypes.float32)\n    return (num_examples_per_class_seen, dist)", "docstring": "Estimate data distribution as labels are seen.\n\nArgs:\nc: The class labels.  Type `int32`, shape `[batch_size]`.\nnum_examples_per_class_seen: Type `int64`, shape `[num_classes]`, containing\ncounts.\n\nReturns:\nnum_examples_per_lass_seen: Updated counts.  Type `int64`, shape\n`[num_classes]`.\ndist: The updated distribution.  Type `float32`, shape `[num_classes]`.", "source": "github-repos"}
{"code": "def cancel_id(cls, id):\n    conn = Qubole.agent()\n    data = {'status': 'kill'}\n    return conn.put(cls.element_path(id), data)", "docstring": "Cancels command denoted by this id\n\nArgs:\n`id`: command id", "source": "codesearchnet"}
{"code": "def render_registered(url_id, remote_info):\n    return template(read_index_template(), registered=True, url=remote_info['url'], seeder_data=json.dumps(remote_info), url_id=url_id)", "docstring": "Render template file for the registered user, which has some of the values\nprefilled.\n\nArgs:\nurl_id (str): Seeder URL id.\nremote_info (dict): Informations read from Seeder.\n\nReturns:\nstr: Template filled with data.", "source": "codesearchnet"}
{"code": "def forward_event_shape_tensor(self, input_shape, name='forward_event_shape_tensor'):\n    with self._name_scope(name, [input_shape]):\n        input_shape = ops.convert_to_tensor(input_shape, dtype=dtypes.int32, name='input_shape')\n        return self._forward_event_shape_tensor(input_shape)", "docstring": "Shape of a single sample from a single batch as an `int32` 1D `Tensor`.\n\nArgs:\ninput_shape: `Tensor`, `int32` vector indicating event-portion shape\npassed into `forward` function.\nname: name to give to the op\n\nReturns:\nforward_event_shape_tensor: `Tensor`, `int32` vector indicating\nevent-portion shape after applying `forward`.", "source": "github-repos"}
{"code": "def suggestions(self, word):\n    suggestions = set(self._misspelling_dict.get(word, [])).union(set(self._misspelling_dict.get(word.lower(), [])))\n    return sorted([same_case(source=word, destination=w) for w in suggestions])", "docstring": "Returns a list of suggestions for a misspelled word.\n\nArgs:\nword: The word to check.\n\nReturns:\nList of zero or more suggested replacements for word.", "source": "codesearchnet"}
{"code": "def set_napp(self, user, napp, version=None):\n        \n        self.user = user\n        self.napp = napp\n        self.version = version or 'latest'", "docstring": "Set info about NApp.\n\nArgs:\nuser (str): NApps Server username.\nnapp (str): NApp name.\nversion (str): NApp version.", "source": "juraj-google-style"}
{"code": "def CheckEmptyBlockBody(filename, clean_lines, linenum, error):\n  \n\n  \n  \n  \n  \n  \n  \n  line = clean_lines.elided[linenum]\n  matched = Match(r'\\s*(for|while|if)\\s*\\(', line)\n  if matched:\n    \n    (end_line, end_linenum, end_pos) = CloseExpression(\n        clean_lines, linenum, line.find('('))\n\n    \n    \n    \n    if end_pos >= 0 and Match(r';', end_line[end_pos:]):\n      if matched.group(1) == 'if':\n        error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,\n              'Empty conditional bodies should use {}')\n      else:\n        error(filename, end_linenum, 'whitespace/empty_loop_body', 5,\n              'Empty loop bodies should use {} or continue')", "docstring": "Look for empty loop/conditional body with only a single semicolon.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def up(self) -> 'InstanceNode':\n    ts = max(self.timestamp, self.parinst.timestamp)\n    return self.parinst._copy(self._zip(), ts)", "docstring": "Return an instance node corresponding to the receiver's parent.\n\nRaises:\nNonexistentInstance: If there is no parent.", "source": "codesearchnet"}
{"code": "def log_transition(self, transition, from_state, instance, *args, **kwargs):\n    logger = logging.getLogger('xworkflows.transitions')\n    try:\n        instance_repr = u(repr(instance), 'ignore')\n    except (UnicodeEncodeError, UnicodeDecodeError):\n        instance_repr = u('<bad repr>')\n    logger.info(u('%s performed transition %s.%s (%s -> %s)'), instance_repr, self.__class__.__name__, transition.name, from_state.name, transition.target.name)", "docstring": "Log a transition.\n\nArgs:\ntransition (Transition): the name of the performed transition\nfrom_state (State): the source state\ninstance (object): the modified object\n\nKwargs:\nAny passed when calling the transition", "source": "codesearchnet"}
{"code": "def _on_scan(self, info):\n    device_id = info['uuid']\n    expiration_time = info.get('validity_period', 60)\n    infocopy = deepcopy(info)\n    infocopy['expiration_time'] = (monotonic() + expiration_time)\n    with self._scan_lock:\n        self._scanned_devices[device_id] = infocopy", "docstring": "Callback called when a new device is discovered on this CMDStream\n\nArgs:\ninfo (dict): Information about the scanned device", "source": "codesearchnet"}
{"code": "def Images(self, run, tag):\n    accumulator = self.GetAccumulator(run)\n    return accumulator.Images(tag)", "docstring": "Retrieve the image events associated with a run and tag.\n\nArgs:\nrun: A string name of the run for which values are retrieved.\ntag: A string name of the tag for which values are retrieved.\n\nRaises:\nKeyError: If the run is not found, or the tag is not available for\nthe given run.\n\nReturns:\nAn array of `event_accumulator.ImageEvents`.", "source": "codesearchnet"}
{"code": "def __init__(self, uid):\n        \n        message = 'Schema of UID \"{}\" is unrecognized.'.format(uid)\n        super(UnknownUIDSchema, self).__init__(message)", "docstring": "Exception raised when a schema of a UID is unknown.\n\nArgs:\nuid (string): given UID", "source": "juraj-google-style"}
{"code": "def ensure_list_size(list_, size_):\n    \n    lendiff = (size_) - len(list_)\n    if lendiff > 0:\n        extension = [None for _ in range(lendiff)]\n        list_.extend(extension)", "docstring": "Allocates more space if needbe.\n\nEnsures len(``list_``) == ``size_``.\n\nArgs:\nlist_ (list): ``list`` to extend\nsize_ (int): amount to exent by", "source": "juraj-google-style"}
{"code": "def CopyFromDict(self, attributes):\n    \n    for attribute_name, attribute_value in attributes.items():\n      \n      if attribute_name[0] == '_':\n        continue\n      setattr(self, attribute_name, attribute_value)", "docstring": "Copies the attribute container from a dictionary.\n\nArgs:\nattributes (dict[str, object]): attribute values per name.", "source": "juraj-google-style"}
{"code": "def __sub__(self, other):\n    try:\n        other = as_dimension(other)\n    except (TypeError, ValueError):\n        return NotImplemented\n    if self._value is None or other.value is None:\n        return Dimension(None)\n    else:\n        return Dimension(self._value - other.value)", "docstring": "Returns the subtraction of `other` from `self`.\n\nDimensions are subtracted as follows:\n\n```python\ntf.compat.v1.Dimension(m)    - tf.compat.v1.Dimension(n)     ==\ntf.compat.v1.Dimension(m - n)\ntf.compat.v1.Dimension(m)    - tf.compat.v1.Dimension(None)  # equiv. to\ntf.compat.v1.Dimension(None)\ntf.compat.v1.Dimension(None) - tf.compat.v1.Dimension(n)     # equiv. to\ntf.compat.v1.Dimension(None)\ntf.compat.v1.Dimension(None) - tf.compat.v1.Dimension(None)  # equiv. to\ntf.compat.v1.Dimension(None)\n```\n\nArgs:\nother: Another Dimension, or a value accepted by `as_dimension`.\n\nReturns:\nA Dimension whose value is the subtraction of `other` from `self`.", "source": "github-repos"}
{"code": "def download_archive(self, id_or_uri, file_path):\n        \n        uri = self.URI + \"/archive/\" + extract_id_from_uri(id_or_uri)\n        return self._client.download(uri, file_path)", "docstring": "Download the details of the Golden Image capture logs, which has been archived based on the specific attribute\nID.\n\nArgs:\nid_or_uri: ID or URI of the Golden Image.\nfile_path (str): File name to save the archive.\n\nReturns:\nbool: Success.", "source": "juraj-google-style"}
{"code": "def _from_keras_log_format(data, **kwargs):\n    data_val = pd.DataFrame(data[['epoch']])\n    data_val['acc'] = data['val_acc']\n    data_val['loss'] = data['val_loss']\n    data_val['data'] = 'validation'\n    data_training = pd.DataFrame(data[['acc', 'loss', 'epoch']])\n    data_training['data'] = 'training'\n    result = pd.concat([data_training, data_val], sort=False)\n    plot(result, **kwargs)", "docstring": "Plot accuracy and loss from a panda's dataframe.\n\nArgs:\ndata: Panda dataframe in the format of the Keras CSV log.\noutput_dir_path: The path to the directory where the resultings plots\nshould end up.", "source": "codesearchnet"}
{"code": "def alt40mcp(msg):\n    \n    d = hex2bin(data(msg))\n\n    if d[0] == '0':\n        return None\n\n    alt = bin2int(d[1:13]) * 16    \n    return alt", "docstring": "Selected altitude, MCP/FCU\n\nArgs:\nmsg (String): 28 bytes hexadecimal message (BDS40) string\n\nReturns:\nint: altitude in feet", "source": "juraj-google-style"}
{"code": "def _sample_row(self):\n    unis = np.random.uniform(0, 1, self.n_var)\n    first_ind = np.random.randint(0, self.n_var)\n    adj = self.trees[0].get_adjacent_matrix()\n    visited = []\n    explore = [first_ind]\n    sampled = np.zeros(self.n_var)\n    itr = 0\n    while explore:\n        current = explore.pop(0)\n        neighbors = np.where((adj[(current, :)] == 1))[0].tolist()\n        if (itr == 0):\n            new_x = self.ppfs[current](unis[current])\n        else:\n            for i in range((itr - 1), (- 1), (- 1)):\n                current_ind = (- 1)\n                if (i >= self.truncated):\n                    continue\n                current_tree = self.trees[i].edges\n                for edge in current_tree:\n                    if (i == 0):\n                        if (((edge.L == current) and (edge.R == visited[0])) or ((edge.R == current) and (edge.L == visited[0]))):\n                            current_ind = edge.index\n                            break\n                    elif ((edge.L == current) or (edge.R == current)):\n                        condition = set(edge.D)\n                        condition.add(edge.L)\n                        condition.add(edge.R)\n                        visit_set = set(visited)\n                        visit_set.add(current)\n                        if condition.issubset(visit_set):\n                            current_ind = edge.index\n                        break\n                if (current_ind != (- 1)):\n                    copula_type = current_tree[current_ind].name\n                    copula = Bivariate(CopulaTypes(copula_type))\n                    copula.theta = current_tree[current_ind].theta\n                    derivative = copula.partial_derivative_scalar\n                    if (i == (itr - 1)):\n                        tmp = optimize.fminbound(derivative, EPSILON, 1.0, args=(unis[visited[0]], unis[current]))\n                    else:\n                        tmp = optimize.fminbound(derivative, EPSILON, 1.0, args=(unis[visited[0]], tmp))\n                    tmp = min(max(tmp, EPSILON), 0.99)\n            new_x = self.ppfs[current](tmp)\n        sampled[current] = new_x\n        for s in neighbors:\n            if (s not in visited):\n                explore.insert(0, s)\n        itr += 1\n        visited.insert(0, current)\n    return sampled", "docstring": "Generate a single sampled row from vine model.\n\nReturns:\nnumpy.ndarray", "source": "codesearchnet"}
{"code": "def build_relative_position(query_layer, key_layer):\n    query_size = query_layer.size(-2)\n    key_size = key_layer.size(-2)\n    q_ids = torch.arange(query_size, dtype=torch.long, device=query_layer.device)\n    k_ids = torch.arange(key_size, dtype=torch.long, device=key_layer.device)\n    rel_pos_ids = q_ids[:, None] - k_ids.view(1, -1).repeat(query_size, 1)\n    rel_pos_ids = rel_pos_ids[:query_size, :]\n    rel_pos_ids = rel_pos_ids.unsqueeze(0)\n    return rel_pos_ids", "docstring": "Build relative position according to the query and key\n\nWe assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key\n\\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -\nP_k\\)\n\nArgs:\nquery_size (int): the length of query\nkey_size (int): the length of key\n\nReturn:\n`torch.LongTensor`: A tensor with shape [1, query_size, key_size]", "source": "github-repos"}
{"code": "def role_instance(self, value):\n        \n        if value == self._defaults['ai.cloud.roleInstance'] and 'ai.cloud.roleInstance' in self._values:\n            del self._values['ai.cloud.roleInstance']\n        else:\n            self._values['ai.cloud.roleInstance'] = value", "docstring": "The role_instance property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def get_volume_list(self) -> list:\n    volumes = []\n    if (not self._manager):\n        raise RuntimeError('Only the Swarm manager node can retrieve all the services.')\n    volume_list = self._client.volumes.list()\n    for v_list in volume_list:\n        volumes.append(v_list.name)\n    return volumes", "docstring": "Get a list of docker volumes.\n\nOnly the manager nodes can retrieve all the volumes\n\nReturns:\nlist, all the names of the volumes in swarm", "source": "codesearchnet"}
{"code": "def chomp(text, max_len=280, split=None):\n    \n    split = split or '—;,.'\n    while length(text) > max_len:\n        try:\n            text = re.split(r'[' + split + ']', text[::-1], 1)[1][::-1]\n        except IndexError:\n            return text\n\n    return text", "docstring": "Shorten a string so that it fits under max_len, splitting it at 'split'.\nNot guaranteed to return a string under max_len, as it may not be possible\n\nArgs:\ntext (str): String to shorten\nmax_len (int): maximum length. default 140\nsplit (str): strings to split on (default is common punctuation: \"-;,.\")", "source": "juraj-google-style"}
{"code": "def flatten(weights, start=0, stop=2):\n    for (key, val) in weights.items():\n        new_shape = ((val.shape[0:start] + ((- 1),)) + val.shape[stop:])\n        weights[key] = val.reshape(new_shape)\n    return weights", "docstring": "This methods reshapes all values in a dictionary.\n\nThe indices from start to stop will be flattened into a single index.\n\nArgs:\nweights: A dictionary mapping keys to numpy arrays.\nstart: The starting index.\nstop: The ending index.", "source": "codesearchnet"}
{"code": "def total_clicks(self, url):\n        \n        url = self.clean_url(url)\n        clicks_url = f'{self.api_url}v3/link/clicks'\n        params = {\n            'link': url,\n            'access_token': self.api_key,\n            'format': 'txt'\n        }\n        response = self._get(clicks_url, params=params)\n        if not response.ok:\n            raise BadAPIResponseException(response.content)\n\n        try:\n            total_clicks = int(response.text)\n        except (KeyError, TypeError) as e:\n            logger.warning('Bad value from total_clicks response: %s', e)\n            return 0\n\n        return total_clicks", "docstring": "Total clicks implementation for Bit.ly\nArgs:\nurl: the URL you want to get the total clicks count\n\nReturns:\nAn int containing the total clicks count\n\nRaises:\nBadAPIResponseException: If the API Returns an error as response", "source": "juraj-google-style"}
{"code": "def shape_tensor(self, name='shape_tensor'):\n    with self._name_scope(name):\n        if self.shape.is_fully_defined():\n            return linear_operator_util.shape_tensor(self.shape.as_list())\n        else:\n            return self._shape_tensor()", "docstring": "Shape of this `LinearOperator`, determined at runtime.\n\nIf this operator acts like the batch matrix `A` with\n`A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding\n`[B1,...,Bb, M, N]`, equivalent to `tf.shape(A)`.\n\nArgs:\nname:  A name for this `Op`.\n\nReturns:\n`int32` `Tensor`", "source": "github-repos"}
{"code": "def jaccard_sims(feature_list):\n    \n\n    sim_info_list = []\n    for feature_info in feature_list:\n        md5_source = feature_info['md5']\n        features_source = feature_info['features']\n        for feature_info in feature_list:\n            md5_target = feature_info['md5']\n            features_target = feature_info['features']\n            if md5_source == md5_target: \n                continue\n            sim = jaccard_sim(features_source, features_target)\n            if sim > .5:\n                sim_info_list.append({'source': md5_source, 'target': md5_target, 'sim': sim})\n\n    return sim_info_list", "docstring": "Compute Jaccard similarities between all the observations in the feature list.\n\nArgs:\nfeature_list: a list of dictionaries, each having structure as\n{ 'md5' : String, 'features': list of Strings }\n\nReturns:\nlist of dictionaries with structure as\n{'source': md5 String, 'target': md5 String, 'sim': Jaccard similarity Number}", "source": "juraj-google-style"}
{"code": "def flatten(input_layer, preserve_batch=True):\n  \n  if preserve_batch:\n    return reshape(input_layer, [DIM_SAME, -1])\n  else:\n    return reshape(input_layer, [-1])", "docstring": "Flattens this.\n\nIf preserve_batch is True, the result is rank 2 and the first dim (batch) is\nunchanged. Otherwise the result is rank 1.\n\nArgs:\ninput_layer: The Pretty Tensor object, supplied.\npreserve_batch: If True (the default), then preserve the first dimension.\nReturns:\nA LayerWrapper with the flattened tensor.", "source": "juraj-google-style"}
{"code": "def _AlignDecodedDataOffset(self, decoded_data_offset):\n    \n    self._file_object.seek(0, os.SEEK_SET)\n\n    self._decoder = self._GetDecoder()\n    self._decoded_data = b''\n\n    encoded_data_offset = 0\n    encoded_data_size = self._file_object.get_size()\n\n    while encoded_data_offset < encoded_data_size:\n      read_count = self._ReadEncodedData(self._ENCODED_DATA_BUFFER_SIZE)\n      if read_count == 0:\n        break\n\n      encoded_data_offset += read_count\n\n      if decoded_data_offset < self._decoded_data_size:\n        self._decoded_data_offset = decoded_data_offset\n        break\n\n      decoded_data_offset -= self._decoded_data_size", "docstring": "Aligns the encoded file with the decoded data offset.\n\nArgs:\ndecoded_data_offset (int): decoded data offset.", "source": "juraj-google-style"}
{"code": "def run_multiple(self, eventLoops):\n        \n\n        self.nruns += len(eventLoops)\n        return self.communicationChannel.put_multiple(eventLoops)", "docstring": "run the event loops in the background.\n\nArgs:\neventLoops (list): a list of event loops to run", "source": "juraj-google-style"}
{"code": "def Wget(src_url, tgt_name, tgt_root=None):\n    if (tgt_root is None):\n        tgt_root = str(CFG['tmp_dir'])\n    from benchbuild.utils.cmd import wget\n    tgt_file = (local.path(tgt_root) / tgt_name)\n    if (not source_required(tgt_file)):\n        Copy(tgt_file, '.')\n        return\n    wget(src_url, '-O', tgt_file)\n    update_hash(tgt_file)\n    Copy(tgt_file, '.')", "docstring": "Download url, if required.\n\nArgs:\nsrc_url (str): Our SOURCE url.\ntgt_name (str): The filename we want to have on disk.\ntgt_root (str): The TARGET directory for the download.\nDefaults to ``CFG[\"tmpdir\"]``.", "source": "codesearchnet"}
{"code": "def fit_truncated_gaussian(samples, lower_bounds, upper_bounds):\n    if (len(samples.shape) == 1):\n        return _TruncatedNormalFitter()((samples, lower_bounds, upper_bounds))\n\n    def item_generator():\n        for ind in range(samples.shape[0]):\n            if is_scalar(lower_bounds):\n                lower_bound = lower_bounds\n            else:\n                lower_bound = lower_bounds[ind]\n            if is_scalar(upper_bounds):\n                upper_bound = upper_bounds\n            else:\n                upper_bound = upper_bounds[ind]\n            (yield (samples[ind], lower_bound, upper_bound))\n    results = np.array(multiprocess_mapping(_TruncatedNormalFitter(), item_generator()))\n    return (results[(:, 0)], results[(:, 1)])", "docstring": "Fits a truncated gaussian distribution on the given samples.\n\nThis will do a maximum likelihood estimation of a truncated Gaussian on the provided samples, with the\ntruncation points given by the lower and upper bounds.\n\nArgs:\nsamples (ndarray): a one or two dimensional array. If one dimensional we fit the truncated Gaussian on all\nvalues. If two dimensional, we calculate the truncated Gaussian for every set of samples over the\nfirst dimension.\nlower_bounds (ndarray or float): the lower bound, either a scalar or a lower bound per problem (first index of\nsamples)\nupper_bounds (ndarray or float): the upper bound, either a scalar or an upper bound per problem (first index of\nsamples)\n\nReturns:\nmean, std: the mean and std of the fitted truncated Gaussian", "source": "codesearchnet"}
{"code": "def xmon_op_from_proto_dict(proto_dict: Dict) -> ops.Operation:\n    \n\n    def raise_missing_fields(gate_name: str):\n        raise ValueError(\n            '{} missing required fields: {}'.format(gate_name, proto_dict))\n    param = _parameterized_value_from_proto_dict\n    qubit = devices.GridQubit.from_proto_dict\n    if 'exp_w' in proto_dict:\n        exp_w = proto_dict['exp_w']\n        if ('half_turns' not in exp_w or 'axis_half_turns' not in exp_w\n                or 'target' not in exp_w):\n            raise_missing_fields('ExpW')\n        return ops.PhasedXPowGate(\n            exponent=param(exp_w['half_turns']),\n            phase_exponent=param(exp_w['axis_half_turns']),\n        ).on(qubit(exp_w['target']))\n    elif 'exp_z' in proto_dict:\n        exp_z = proto_dict['exp_z']\n        if 'half_turns' not in exp_z or 'target' not in exp_z:\n            raise_missing_fields('ExpZ')\n        return ops.Z(qubit(exp_z['target']))**param(exp_z['half_turns'])\n    elif 'exp_11' in proto_dict:\n        exp_11 = proto_dict['exp_11']\n        if ('half_turns' not in exp_11 or 'target1' not in exp_11\n                or 'target2' not in exp_11):\n            raise_missing_fields('Exp11')\n        return ops.CZ(qubit(exp_11['target1']),\n                      qubit(exp_11['target2']))**param(exp_11['half_turns'])\n    elif 'measurement' in proto_dict:\n        meas = proto_dict['measurement']\n        invert_mask = cast(Tuple[Any, ...], ())\n        if 'invert_mask' in meas:\n            invert_mask = tuple(json.loads(x) for x in meas['invert_mask'])\n        if 'key' not in meas or 'targets' not in meas:\n            raise_missing_fields('Measurement')\n        return ops.MeasurementGate(\n            num_qubits=len(meas['targets']),\n            key=meas['key'],\n            invert_mask=invert_mask\n        ).on(*[qubit(q) for q in meas['targets']])\n    else:\n        raise ValueError('invalid operation: {}'.format(proto_dict))", "docstring": "Convert the proto dictionary to the corresponding operation.\n\nSee protos in api/google/v1 for specification of the protos.\n\nArgs:\nproto_dict: Dictionary representing the proto. Keys are always\nstrings, but values may be types correspond to a raw proto type\nor another dictionary (for messages).\n\nReturns:\nThe operation.\n\nRaises:\nValueError if the dictionary does not contain required values\ncorresponding to the proto.", "source": "juraj-google-style"}
{"code": "def query(self, object_class=None, json=None, **kwargs):\n    path = '/directory-sync-service/v1/{}'.format(object_class)\n    r = self._httpclient.request(method='POST', url=self.url, json=json, path=path, **kwargs)\n    return r", "docstring": "Query data stored in directory.\n\nRetrieves directory data by querying a Directory Sync Service\ncloud-based instance. The directory data is stored with the\nDirectory Sync Service instance using an agent that is installed\nin the customer's network.This agent retrieves directory data\nfrom the customer's Active Directory, and then sends it to the\ncloud-based Directory Sync Service instance.\n\nArgs:\nobject_class (str): Directory object class.\njson (dict): Payload/request body.\n**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.\n\nReturns:\nrequests.Response: Requests Response() object.\n\nExamples:\nComing soon.", "source": "codesearchnet"}
{"code": "def two_point_effective_mass( cartesian_k_points, eigenvalues ):\n    \n    assert( cartesian_k_points.shape[0] == 2 )\n    assert( eigenvalues.size == 2 )\n    dk = cartesian_k_points[ 1 ] - cartesian_k_points[ 0 ]\n    mod_dk = np.sqrt( np.dot( dk, dk ) )\n    delta_e = ( eigenvalues[ 1 ] - eigenvalues[ 0 ] ) * ev_to_hartree * 2.0\n    effective_mass = mod_dk * mod_dk / delta_e\n    return effective_mass", "docstring": "Calculate the effective mass given eigenvalues at two k-points.\nReimplemented from Aron Walsh's original effective mass Fortran code.\n\nArgs:\ncartesian_k_points (np.array): 2D numpy array containing the k-points in (reciprocal) Cartesian coordinates.\neigenvalues (np.array):        numpy array containing the eigenvalues at each k-point.\n\nReturns:\n(float): The effective mass", "source": "juraj-google-style"}
{"code": "def name_scope_only_in_function_or_graph(name):\n    if not context.executing_eagerly():\n        return ops.name_scope_v1(name)\n    else:\n        return NullContextmanager()", "docstring": "Internal-only entry point for `name_scope*`.\n\nEnters a compat.v1.name_scope only when in a function or graph,\nnot when running fully eagerly.\n\nArgs:\nname: The name argument that is passed to the op function.\n\nReturns:\n`name_scope*` context manager.", "source": "github-repos"}
{"code": "def add_string_pairs_from_attributed_ui_element(results, ui_element, comment_prefix):\n    attributed_strings = ui_element.getElementsByTagName('attributedString')\n    if (attributed_strings.length == 0):\n        return False\n    attributed_element = attributed_strings[0]\n    fragment_index = 1\n    for fragment in attributed_element.getElementsByTagName('fragment'):\n        try:\n            label_entry_key = fragment.attributes['content'].value\n        except KeyError:\n            label_entry_key = fragment.getElementsByTagName('string')[0].firstChild.nodeValue\n        comment = ('%s Part %d' % (comment_prefix, fragment_index))\n        results.append((label_entry_key, comment))\n        fragment_index += 1\n    return (fragment_index > 1)", "docstring": "Adds string pairs from a UI element with attributed text\n\nArgs:\nresults (list): The list to add the results to.\nattributed_element (element): The element from the xib that contains, to extract the fragments from.\ncomment_prefix (str): The prefix of the comment to use for extracted string\n(will be appended \"Part X\" suffices)\n\nReturns:\nbool: Whether or not an attributed string was found.", "source": "codesearchnet"}
{"code": "def replace(self, pattern, replacement):\n        \n        for i, line in enumerate(self):\n            if pattern in line:\n                self[i] = line.replace(pattern, replacement)", "docstring": "Replace all instances of a pattern with a replacement.\n\nArgs:\npattern (str): Pattern to replace\nreplacement (str): Text to insert", "source": "juraj-google-style"}
{"code": "def _paths_referenced_by(node: AbstractSyntaxTree) -> Tuple[Optional[str], Collection[str]]:\n    if isinstance(node, Identifier):\n        if node.value in ('$this', '$index', '$total'):\n            return (None, ())\n        else:\n            return (node.value, ())\n    if not node.children:\n        return (None, ())\n    context, paths = _paths_referenced_by(node.children[0])\n    if isinstance(node, Function):\n        context = None\n    if isinstance(node, Invocation) and isinstance(node.rhs, Identifier):\n        context = _append_path_to_context(context, node.rhs.value)\n        return (context, paths)\n    if context is not None:\n        paths = paths + (context,)\n    if isinstance(node, Invocation):\n        child_paths = _get_paths_from_children_except_first(node)\n        child_paths = tuple((_append_path_to_context(context, child_path) for child_path in child_paths))\n        return (context, paths + child_paths)\n    child_paths = _get_paths_from_children_except_first(node)\n    return (context, paths + child_paths)", "docstring": "Finds paths for any fields referenced in the given tree.\n\nRecursively builds paths by visitng the trees nodes depth-first in-order.\nReturns a tuple of (context, paths) where `context` is an identifier which may\nbe part of a dotted path completed by its parent and `paths` are the full\ndotted paths found so far.\n\nCallers are responsible for attempting to either continue chaining successive\nidentifiers from invocations to the context or acknowledging it as completed\nand adding it to `paths` if the caller has no identifiers to add to the chain.\n\nArgs:\nnode: The abstract syntax tree to search for paths.\n\nReturns:\nA tuple of (context, paths) as described above.", "source": "github-repos"}
{"code": "def _placeholder_value(like, shape_invariant, original=None):\n    if like is None:\n        return (original, None)\n    elif isinstance(like, (variables.Undefined, variables.UndefinedReturnValue)):\n        return (original, None)\n    elif isinstance(like, (int, float, bool)):\n        return (type(like)(0), None)\n    elif tensor_util.is_tf_type(like):\n        like_shape = shape_invariant if shape_invariant is not None else like.shape\n        if like_shape is None or like_shape.rank is None:\n            return (array_ops.zeros((), like.dtype), like_shape)\n        placeholder_shape = []\n        has_dynamic_dims = False\n        for s, i in zip(like.shape, like_shape):\n            if i is None:\n                like_dim = 0\n            elif isinstance(i, tensor_shape.Dimension):\n                if i.value is None:\n                    like_dim = 0\n                else:\n                    like_dim = i.value\n            else:\n                like_dim = i\n            if s is None:\n                placeholder_shape.append(like_dim)\n                has_dynamic_dims = True\n            elif isinstance(s, tensor_shape.Dimension):\n                if s.value is None:\n                    placeholder_shape.append(like_dim)\n                    has_dynamic_dims = True\n                else:\n                    placeholder_shape.append(s.value)\n            else:\n                placeholder_shape.append(s)\n        if has_dynamic_dims:\n            invariant = like_shape\n        else:\n            invariant = None\n        return (array_ops.zeros(placeholder_shape, like.dtype), invariant)\n    elif isinstance(like, (list, tuple, dict)):\n        if shape_invariant is None:\n            zipped = nest.map_structure(lambda v: _placeholder_value(v, None), nest.flatten(like))\n        else:\n            zipped = nest.map_structure(_placeholder_value, nest.flatten(like), nest.flatten(shape_invariant))\n        vals, invars = zip(*zipped)\n        return (nest.pack_sequence_as(like, vals), nest.pack_sequence_as(like, invars))\n    raise TypeError(\"Found an unsupported type '{}' while creating placeholder for {}. Supported types include Tensor, int, float, bool, list, tuple or dict.\".format(type(like).__name__, like))", "docstring": "Constructs a (dummy) placeholder value for a loop-initialized variable.\n\nArgs:\nlike: Any object. The value created by the first iteration of the loop. If a\nPython scalar, the placeholder will be the zero value of that type. If a\nTensor, the placeholder will be a zero tensor of matching shape and dtype.\nIf a list, dict or tuple, the placeholder will be an identical structure\nof placeholders.\nshape_invariant: The shape invariant specified by the user (or None, if\nnothing was specified) for the respective variable.\noriginal: Any object. The value of the variable prior to entering the loop.\nTypically, this is one of the special \"Undefined\" value, because that's\nwhen a placeholder is needed.\n\nReturns:\nEither a zero value of structure, shape and dtype matching 'like', or\n'original', if no such zero value could be created.", "source": "github-repos"}
{"code": "def map_kegg_all_genes(organism_code, target_db):\n    \n    mapping = bs_kegg.conv(target_db, organism_code)\n\n    \n    new_mapping = {}\n    for k,v in mapping.items():\n        new_mapping[k.replace(organism_code + ':', '')] = str(v.split(':')[1])\n\n    return new_mapping", "docstring": "Map all of an organism's gene IDs to the target database.\n\nThis is faster than supplying a specific list of genes to map,\nplus there seems to be a limit on the number you can map with a manual REST query anyway.\n\nArgs:\norganism_code: the three letter KEGG code of your organism\ntarget_db: ncbi-proteinid | ncbi-geneid | uniprot\n\nReturns:\nDictionary of ID mapping", "source": "juraj-google-style"}
{"code": "def _run_graph(self, device, output_shape, variable, num_outputs, axis):\n    graph = ops.Graph()\n    with graph.as_default():\n        if not variable:\n            if axis == 0:\n                input_shape = [output_shape[0] * num_outputs, output_shape[1]]\n                sizes = [output_shape[0] for _ in range(num_outputs)]\n            else:\n                input_shape = [output_shape[0], output_shape[1] * num_outputs]\n                sizes = [output_shape[1] for _ in range(num_outputs)]\n        else:\n            sizes = np.random.randint(low=max(1, output_shape[axis] - 2), high=output_shape[axis] + 2, size=num_outputs)\n            total_size = np.sum(sizes)\n            if axis == 0:\n                input_shape = [total_size, output_shape[1]]\n            else:\n                input_shape = [output_shape[0], total_size]\n        outputs = build_graph(device, input_shape, sizes, axis)\n    config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(optimizer_options=config_pb2.OptimizerOptions(opt_level=config_pb2.OptimizerOptions.L0)))\n    with session_lib.Session(graph=graph, config=config) as session:\n        logging.set_verbosity('info')\n        variables.global_variables_initializer().run()\n        bench = benchmark.TensorFlowBenchmark()\n        bench.run_op_benchmark(session, outputs, mbs=input_shape[0] * input_shape[1] * 4 * 2 * 100 / 1000000.0, extras={'input_shape': input_shape, 'variable': variable, 'axis': axis})", "docstring": "Run the graph and print its execution time.\n\nArgs:\ndevice: string, the device to run on.\noutput_shape: shape of each output tensors.\nvariable: whether or not the output shape should be fixed\nnum_outputs: the number of outputs to split the input into\naxis: axis to be split\n\nReturns:\nThe duration of the run in seconds.", "source": "github-repos"}
{"code": "def recipe_fred_regional_to_bigquery(config, auth, fred_api_key, fred_series_group, fred_region_type, fred_units, fred_frequency, fred_season, fred_aggregation_method, project, dataset):\n    fred(config, {'auth': auth, 'api_key': fred_api_key, 'frequency': fred_frequency, 'region_type': fred_region_type, 'regions': [{'series_group': fred_series_group, 'units': fred_units, 'season': fred_season, 'aggregation_method': fred_aggregation_method}], 'out': {'bigquery': {'project': project, 'dataset': dataset}}})", "docstring": "Download federal reserve region.\n\nArgs:\nauth (authentication) - Credentials used for writing data.\nfred_api_key (string) - 32 character alpha-numeric lowercase string.\nfred_series_group (string) - The ID for a group of seriess found in GeoFRED.\nfred_region_type (choice) - The region you want want to pull data for.\nfred_units (choice) - A key that indicates a data value transformation.\nfred_frequency (choice) - An optional parameter that indicates a lower frequency to aggregate values to.\nfred_season (choice) - The seasonality of the series group.\nfred_aggregation_method (choice) - A key that indicates the aggregation method used for frequency aggregation.\nproject (string) - Existing BigQuery project.\ndataset (string) - Existing BigQuery dataset.", "source": "github-repos"}
{"code": "def join(self) -> None:\n    self._is_thread_joined = True\n    self._thread.join()\n    if self._exception is not None:\n        self._testcase.fail('Error in checkedThread: %s' % str(self._exception))", "docstring": "Blocks until the thread terminates.\n\nRaises:\nself._testcase.failureException: If the thread terminates with due to\nan exception.", "source": "github-repos"}
{"code": "def compute_predicted_aligned_error(logits: torch.Tensor, max_bin: int=31, no_bins: int=64, **kwargs) -> Dict[str, torch.Tensor]:\n    boundaries = torch.linspace(0, max_bin, steps=no_bins - 1, device=logits.device)\n    aligned_confidence_probs = torch.nn.functional.softmax(logits, dim=-1)\n    predicted_aligned_error, max_predicted_aligned_error = _calculate_expected_aligned_error(alignment_confidence_breaks=boundaries, aligned_distance_error_probs=aligned_confidence_probs)\n    return {'aligned_confidence_probs': aligned_confidence_probs, 'predicted_aligned_error': predicted_aligned_error, 'max_predicted_aligned_error': max_predicted_aligned_error}", "docstring": "Computes aligned confidence metrics from logits.\n\nArgs:\nlogits: [*, num_res, num_res, num_bins] the logits output from\nPredictedAlignedErrorHead.\nmax_bin: Maximum bin value\nno_bins: Number of bins\nReturns:\naligned_confidence_probs: [*, num_res, num_res, num_bins] the predicted\naligned error probabilities over bins for each residue pair.\npredicted_aligned_error: [*, num_res, num_res] the expected aligned distance\nerror for each pair of residues.\nmax_predicted_aligned_error: [*] the maximum predicted error possible.", "source": "github-repos"}
{"code": "def set_xlim(self, xlim):\n        \n        if len(xlim) != 2:\n            raise ValueError(\"xlim must contain two elements\")\n\n        if xlim[1] < xlim[0]:\n            raise ValueError(\"Min must be less than Max\")\n\n        self.options[\"min_x\"] = xlim[0]\n        self.options[\"max_x\"] = xlim[1]", "docstring": "Set x-axis limits.\n\nAccepts a two-element list to set the x-axis limits.\n\nArgs:\nxlim (list): lower and upper bounds\n\nRaises:\nValueError: xlim must contain two elements\nValueError: Min must be less than max", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, output_attentions: bool=False):\n    residual = hidden_states\n    hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, output_attentions=output_attentions)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    residual = hidden_states\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    if self.training:\n        if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():\n            clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n            hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\nInput to the layer.\nattention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\nAttention mask.\nposition_embeddings (`torch.FloatTensor`, *optional*):\nPosition embeddings, to be added to `hidden_states`.\nreference_points (`torch.FloatTensor`, *optional*):\nReference points.\nspatial_shapes (`torch.LongTensor`, *optional*):\nSpatial shapes of the backbone feature maps.\nlevel_start_index (`torch.LongTensor`, *optional*):\nLevel start index.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def uniform(self, low: float, high: float) -> float:\n        \n        return float(lib.TCOD_random_get_double(self.random_c, low, high))", "docstring": "Return a random floating number in the range: low <= n <= high.\n\nArgs:\nlow (float): The lower bound of the random range.\nhigh (float): The upper bound of the random range.\n\nReturns:\nfloat: A random float.", "source": "juraj-google-style"}
{"code": "def get_resource(self, resource_key, **variables):\n        \n        handle = self.make_resource_handle(resource_key, **variables)\n        return self.get_resource_from_handle(handle, verify_repo=False)", "docstring": "Get a resource.\n\nAttempts to get and return a cached version of the resource if\navailable, otherwise a new resource object is created and returned.\n\nArgs:\nresource_key (`str`):  Name of the type of `Resources` to find\nvariables: data to identify / store on the resource\n\nReturns:\n`PackageRepositoryResource` instance.", "source": "juraj-google-style"}
{"code": "def schema(self):\n\n    def _info2columns(info):\n        return tuple(((column['name'], column['type']) for column in info))\n\n    def _table2tuple(table):\n        return (table, _info2columns(self.table_info(table)))\n    return [_table2tuple(table) for table in self.tables]", "docstring": "Returns the schema of all tables.\n\nFor each table, return the name, and a list of tuples\nrepresenting the columns. Each column tuple consists of a\n(name, type) pair. Note that additional metadata, such as\nwhether a column may be null, or whether a column is a primary\nkey, is not returned.\n\nExample:\n\n>>> db.schema\n[(\"bar\", ((\"id\", \"integer\"), (\"name\", \"table\"))]\n\nReturns:\n\nlist of tuples: Each tuple has the format (name, columns), where\n\"columns\" is a list of tuples of the form (name, type).", "source": "codesearchnet"}
{"code": "def WriteArtifactsFile(self, artifacts, filename):\n    \n    with open(filename, 'w') as file_object:\n      file_object.write(self.FormatArtifacts(artifacts))", "docstring": "Writes artifact definitions to a file.\n\nArgs:\nartifacts (list[ArtifactDefinition]): artifact definitions to be written.\nfilename (str): name of the file to write artifacts to.", "source": "juraj-google-style"}
{"code": "def DecodeValueFromAttribute(self, attribute_name, value, ts):\n    try:\n        attribute = Attribute.PREDICATES[attribute_name]\n        cls = attribute.attribute_type\n        self._AddAttributeToCache(attribute, LazyDecoder(cls, value, ts), self.synced_attributes)\n    except KeyError:\n        pass\n    except (ValueError, rdfvalue.DecodeError):\n        logging.debug('%s: %s invalid encoding. Skipping.', self.urn, attribute_name)", "docstring": "Given a serialized value, decode the attribute.\n\nOnly attributes which have been previously defined are permitted.\n\nArgs:\nattribute_name: The string name of the attribute.\nvalue: The serialized attribute value.\nts: The timestamp of this attribute.", "source": "codesearchnet"}
{"code": "def write_bit(self, registeraddress, value, functioncode=5):\n    _checkFunctioncode(functioncode, [5, 15])\n    _checkInt(value, minvalue=0, maxvalue=1, description='input value')\n    self._genericCommand(functioncode, registeraddress, value)", "docstring": "Write one bit to the slave.\n\nArgs:\n* registeraddress (int): The slave register address (use decimal numbers, not hex).\n* value (int): 0 or 1\n* functioncode (int): Modbus function code. Can be 5 or 15.\n\nReturns:\nNone\n\nRaises:\nValueError, TypeError, IOError", "source": "codesearchnet"}
{"code": "def get_all_pipelines(self):\n    pipelines = list(map(self.get_pipeline, self.pipeline_dict.keys()))\n    self._construct_solid_defs(pipelines)\n    return pipelines", "docstring": "Return all pipelines as a list\n\nReturns:\nList[PipelineDefinition]:", "source": "codesearchnet"}
{"code": "def subproc_call(cmd, timeout=None):\n    \n    try:\n        output = subprocess.check_output(\n            cmd, stderr=subprocess.STDOUT,\n            shell=True, timeout=timeout)\n        return output, 0\n    except subprocess.TimeoutExpired as e:\n        logger.warn(\"Command '{}' timeout!\".format(cmd))\n        logger.warn(e.output.decode('utf-8'))\n        return e.output, -1\n    except subprocess.CalledProcessError as e:\n        logger.warn(\"Command '{}' failed, return code={}\".format(cmd, e.returncode))\n        logger.warn(e.output.decode('utf-8'))\n        return e.output, e.returncode\n    except Exception:\n        logger.warn(\"Command '{}' failed to run.\".format(cmd))\n        return \"\", -2", "docstring": "Execute a command with timeout, and return STDOUT and STDERR\n\nArgs:\ncmd(str): the command to execute.\ntimeout(float): timeout in seconds.\n\nReturns:\noutput(bytes), retcode(int). If timeout, retcode is -1.", "source": "juraj-google-style"}
{"code": "def open_if_needed(self, mode=None):\n    was_open = self.is_open()\n    if (not was_open):\n        self.open(mode=mode)\n    try:\n        (yield self)\n    finally:\n        if (not was_open):\n            self.close()", "docstring": "Convenience context-manager for the use with ``with``.\nOpens the container if not already done.\nOnly closes the container if it was opened within this context.\n\nArgs:\nmode (str): Either 'r' for read-only, 'w' for truncate and write or\n'a' for append. (default: 'a').\nIf ``None``, uses ``self.mode``.", "source": "codesearchnet"}
{"code": "def parse_json(self, values_json):\n    \n    values_map = json.loads(values_json)\n    return self.override_from_dict(values_map)", "docstring": "Override existing hyperparameter values, parsing new values from a json object.\n\nArgs:\nvalues_json: String containing a json object of name:value pairs.\n\nReturns:\nThe `HParams` instance.\n\nRaises:\nKeyError: If a hyperparameter in `values_json` doesn't exist.\nValueError: If `values_json` cannot be parsed.", "source": "juraj-google-style"}
{"code": "async def send_message(self, segments, image_file=None, image_id=None, image_user_id=None):\n    async with self._send_message_lock:\n        if image_file:\n            try:\n                uploaded_image = (await self._client.upload_image(image_file, return_uploaded_image=True))\n            except exceptions.NetworkError as e:\n                logger.warning('Failed to upload image: {}'.format(e))\n                raise\n            image_id = uploaded_image.image_id\n        try:\n            request = hangouts_pb2.SendChatMessageRequest(request_header=self._client.get_request_header(), event_request_header=self._get_event_request_header(), message_content=hangouts_pb2.MessageContent(segment=[seg.serialize() for seg in segments]))\n            if (image_id is not None):\n                request.existing_media.photo.photo_id = image_id\n            if (image_user_id is not None):\n                request.existing_media.photo.user_id = image_user_id\n                request.existing_media.photo.is_custom_user_id = True\n            (await self._client.send_chat_message(request))\n        except exceptions.NetworkError as e:\n            logger.warning('Failed to send message: {}'.format(e))\n            raise", "docstring": "Send a message to this conversation.\n\nA per-conversation lock is acquired to ensure that messages are sent in\nthe correct order when this method is called multiple times\nasynchronously.\n\nArgs:\nsegments: List of :class:`.ChatMessageSegment` objects to include\nin the message.\nimage_file: (optional) File-like object containing an image to be\nattached to the message.\nimage_id: (optional) ID of an Picasa photo to be attached to the\nmessage. If you specify both ``image_file`` and ``image_id``\ntogether, ``image_file`` takes precedence and ``image_id`` will\nbe ignored.\nimage_user_id: (optional) Picasa user ID, required only if\n``image_id`` refers to an image from a different Picasa user,\nsuch as Google's sticker user.\n\nRaises:\n.NetworkError: If the message cannot be sent.", "source": "codesearchnet"}
{"code": "def _sign_simple_signature_fulfillment(cls, input_, message, key_pairs):\n    input_ = deepcopy(input_)\n    public_key = input_.owners_before[0]\n    message = sha3_256(message.encode())\n    if input_.fulfills:\n        message.update('{}{}'.format(input_.fulfills.txid, input_.fulfills.output).encode())\n    try:\n        input_.fulfillment.sign(message.digest(), base58.b58decode(key_pairs[public_key].encode()))\n    except KeyError:\n        raise KeypairMismatchException('Public key {} is not a pair to any of the private keys'.format(public_key))\n    return input_", "docstring": "Signs a Ed25519Fulfillment.\n\nArgs:\ninput_ (:class:`~bigchaindb.common.transaction.\nInput`) The input to be signed.\nmessage (str): The message to be signed\nkey_pairs (dict): The keys to sign the Transaction with.", "source": "codesearchnet"}
{"code": "def save(self, path):\n        \n        \n        self.clip.write_videofile(path, audio_fps=self.clip.audio.fps)", "docstring": "Save source video to file.\n\nArgs:\npath (str): Filename to save to.\n\nNotes: Saves entire source video to file, not just currently selected\nframes.", "source": "juraj-google-style"}
{"code": "def print_matrix(self, format=None, output=sys.stdout, depth=0, **kwargs):\n        \n        matrix = self.as_matrix(depth=depth)\n        matrix.print(format=format, output=output, **kwargs)", "docstring": "Print the matrix for self's nodes.\n\nArgs:\nformat (str): output format (csv, json or text).\noutput (file): file descriptor on which to write.\ndepth (int): depth of the matrix.", "source": "juraj-google-style"}
{"code": "def _dominant_task_for_jobs(tasks):\n  \n\n  per_job = _group_tasks_by_jobid(tasks)\n\n  ret = []\n  for job_id in per_job.keys():\n    tasks_in_salience_order = sorted(per_job[job_id], key=_importance_of_task)\n    ret.append(tasks_in_salience_order[0])\n  return ret", "docstring": "A list with, for each job, its dominant task.\n\nThe dominant task is the one that exemplifies its job's\nstatus. It is either:\n- the first (FAILURE or CANCELED) task, or if none\n- the first RUNNING task, or if none\n- the first SUCCESS task.\n\nArgs:\ntasks: a list of tasks to consider\n\nReturns:\nA list with, for each job, its dominant task.", "source": "juraj-google-style"}
{"code": "def __init__(self, conf, map_name, automount_mountpoint=None):\n    super(FilesCache, self).__init__(conf, map_name, automount_mountpoint=automount_mountpoint)\n    self.cache_filename_suffix = conf.get('cache_filename_suffix', 'cache')\n    self._indices = {}\n    if hasattr(self, '_INDEX_ATTRIBUTES'):\n        for index in self._INDEX_ATTRIBUTES:\n            self._indices[index] = {}", "docstring": "Create a handler for the given map type.\n\nArgs:\nconf: a configuration object\nmap_name: a string representing the type of map we are\nautomount_mountpoint: A string containing the automount mountpoint, used\nonly by automount maps.", "source": "github-repos"}
{"code": "def speed_clustering(clf, points, min_time):\n    \n    \n    changepoints = detect_changepoints(points, min_time)\n\n    \n    cp_info = []\n\n    for i in range(0, len(changepoints) - 1):\n        from_index = changepoints[i]\n        to_index = changepoints[i+1]\n        info = classify(clf, points[from_index:to_index], min_time, from_index, to_index)\n        if info:\n            cp_info.append(info)\n\n    return group_modes(cp_info)", "docstring": "Transportation mode infering, based on changepoint segmentation\n\nArgs:\nclf (:obj:`Classifier`): Classifier to use\npoints (:obj:`list` of :obj:`Point`)\nmin_time (float): Min time, in seconds, before do another segmentation\nReturns:\n:obj:`list` of :obj:`dict`", "source": "juraj-google-style"}
{"code": "def expand_role(self, role):\n        \n        if '/' in role:\n            return role\n        else:\n            return self.boto_session.resource('iam').Role(role).arn", "docstring": "Expand an IAM role name into an ARN.\n\nIf the role is already in the form of an ARN, then the role is simply returned. Otherwise we retrieve the full\nARN and return it.\n\nArgs:\nrole (str): An AWS IAM role (either name or full ARN).\n\nReturns:\nstr: The corresponding AWS IAM role ARN.", "source": "juraj-google-style"}
{"code": "def to_df(self, variables=None, format='wide', fillna=np.nan, **kwargs):\n    if (variables is None):\n        variables = list(self.variables.keys())\n    if (not isinstance(variables[0], BIDSVariable)):\n        variables = [v for v in self.variables.values() if (v.name in variables)]\n    dfs = [v.to_df(**kwargs) for v in variables]\n    df = pd.concat(dfs, axis=0, sort=True)\n    if (format == 'long'):\n        return df.reset_index(drop=True).fillna(fillna)\n    ind_cols = list((set(df.columns) - {'condition', 'amplitude'}))\n    df['amplitude'] = df['amplitude'].fillna('n/a')\n    df = df.pivot_table(index=ind_cols, columns='condition', values='amplitude', aggfunc='first')\n    df = df.reset_index().replace('n/a', fillna)\n    df.columns.name = None\n    return df", "docstring": "Merge variables into a single pandas DataFrame.\n\nArgs:\nvariables (list): Optional list of column names to retain; if None,\nall variables are returned.\nformat (str): Whether to return a DataFrame in 'wide' or 'long'\nformat. In 'wide' format, each row is defined by a unique\nonset/duration, and each variable is in a separate column. In\n'long' format, each row is a unique combination of onset,\nduration, and variable name, and a single 'amplitude' column\nprovides the value.\nfillna: Replace missing values with the specified value.\nkwargs: Optional keyword arguments to pass onto each Variable's\nto_df() call (e.g., condition, entities, and timing).\n\nReturns: A pandas DataFrame.", "source": "codesearchnet"}
{"code": "def CopyToProto(self, proto):\n    if ((self.file is not None) and (self._serialized_start is not None) and (self._serialized_end is not None)):\n        proto.ParseFromString(self.file.serialized_pb[self._serialized_start:self._serialized_end])\n    else:\n        raise Error('Descriptor does not contain serialization.')", "docstring": "Copies this to the matching proto in descriptor_pb2.\n\nArgs:\nproto: An empty proto instance from descriptor_pb2.\n\nRaises:\nError: If self couldnt be serialized, due to to few constructor arguments.", "source": "codesearchnet"}
{"code": "def _set_unknown_flag(self, name, value):\n    \n    setter = self.__dict__['__set_unknown']\n    if setter:\n      try:\n        setter(name, value)\n        return value\n      except (TypeError, ValueError):  \n        raise _exceptions.IllegalFlagValueError(\n            '\"{1}\" is not valid for --{0}' .format(name, value))\n      except NameError:  \n        pass\n    raise _exceptions.UnrecognizedFlagError(name, value)", "docstring": "Returns value if setting flag |name| to |value| returned True.\n\nArgs:\nname: str, name of the flag to set.\nvalue: Value to set.\n\nReturns:\nFlag value on successful call.\n\nRaises:\nUnrecognizedFlagError\nIllegalFlagValueError", "source": "juraj-google-style"}
{"code": "def GetHasherNamesFromString(cls, hasher_names_string):\n    hasher_names = []\n    if ((not hasher_names_string) or (hasher_names_string.strip() == 'none')):\n        return hasher_names\n    if (hasher_names_string.strip() == 'all'):\n        return cls.GetHasherNames()\n    for hasher_name in hasher_names_string.split(','):\n        hasher_name = hasher_name.strip()\n        if (not hasher_name):\n            continue\n        hasher_name = hasher_name.lower()\n        if (hasher_name in cls._hasher_classes):\n            hasher_names.append(hasher_name)\n    return hasher_names", "docstring": "Retrieves a list of a hasher names from a comma separated string.\n\nTakes a string of comma separated hasher names transforms it to a list of\nhasher names.\n\nArgs:\nhasher_names_string (str): comma separated names of hashers to enable,\nthe string 'all' to enable all hashers or 'none' to disable all\nhashers.\n\nReturns:\nlist[str]: names of valid hashers from the string, or an empty list if no\nvalid names are found.", "source": "codesearchnet"}
{"code": "def _lsb_release_info(self):\n    if (not self.include_lsb):\n        return {}\n    with open(os.devnull, 'w') as devnull:\n        try:\n            cmd = ('lsb_release', '-a')\n            stdout = subprocess.check_output(cmd, stderr=devnull)\n        except OSError:\n            return {}\n    content = stdout.decode(sys.getfilesystemencoding()).splitlines()\n    return self._parse_lsb_release_content(content)", "docstring": "Get the information items from the lsb_release command output.\n\nReturns:\nA dictionary containing all information items.", "source": "codesearchnet"}
{"code": "def _add_partition(self, connection, partition):\n        \n        logger.debug('Creating virtual table for partition.\\n    partition: {}'.format(partition.name))\n        sqlite_med.add_partition(connection, partition.datafile, partition.vid+'_vt')", "docstring": "Creates sqlite virtual table for mpr file of the given partition.\n\nArgs:\nconnection: connection to the sqlite db who stores mpr data.\npartition (orm.Partition):", "source": "juraj-google-style"}
{"code": "def save_csv(X, y, path):\n    \n\n    if sparse.issparse(X):\n        X = X.todense()\n\n    np.savetxt(path, np.hstack((y.reshape((-1, 1)), X)), delimiter=',')", "docstring": "Save data as a CSV file.\n\nArgs:\nX (numpy or scipy sparse matrix): Data matrix\ny (numpy array): Target vector.\npath (str): Path to the CSV file to save data.", "source": "juraj-google-style"}
{"code": "def run_fetches_info(self):\n    output = self._run_fetches_info\n    return output[0] if len(output) == 1 else output", "docstring": "Get a str representation of the fetches used in the Session.run() call.\n\nReturns:\nIf the information is available from one `Session.run` call, a `str`\nobtained from `repr(fetches)`.\nIf the information is available from multiple `Session.run` calls, a\n`list` of `str` from `repr(fetches)`.\nIf the information is not available, `None`.", "source": "github-repos"}
{"code": "def split_input(cls, mapper_spec):\n    params = _get_params(mapper_spec)\n    blob_keys = params[cls.BLOB_KEYS_PARAM]\n    if isinstance(blob_keys, basestring):\n        blob_keys = blob_keys.split(',')\n    blob_sizes = {}\n    for blob_key in blob_keys:\n        blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))\n        blob_sizes[blob_key] = blob_info.size\n    shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)\n    shards_per_blob = (shard_count \n    if (shards_per_blob == 0):\n        shards_per_blob = 1\n    chunks = []\n    for (blob_key, blob_size) in blob_sizes.items():\n        blob_chunk_size = (blob_size \n        for i in xrange((shards_per_blob - 1)):\n            chunks.append(BlobstoreLineInputReader.from_json({cls.BLOB_KEY_PARAM: blob_key, cls.INITIAL_POSITION_PARAM: (blob_chunk_size * i), cls.END_POSITION_PARAM: (blob_chunk_size * (i + 1))}))\n        chunks.append(BlobstoreLineInputReader.from_json({cls.BLOB_KEY_PARAM: blob_key, cls.INITIAL_POSITION_PARAM: (blob_chunk_size * (shards_per_blob - 1)), cls.END_POSITION_PARAM: blob_size}))\n    return chunks", "docstring": "Returns a list of shard_count input_spec_shards for input_spec.\n\nArgs:\nmapper_spec: The mapper specification to split from. Must contain\n'blob_keys' parameter with one or more blob keys.\n\nReturns:\nA list of BlobstoreInputReaders corresponding to the specified shards.", "source": "codesearchnet"}
{"code": "def get(self, item, default=None):\n        \n        if hasattr(self, item):\n            return getattr(self, item)\n        try:\n            return self.__getitem__(item)\n\n        except KeyError:\n            return default", "docstring": "Returns the value ``item`` from the host or hosts group variables.\n\nArguments:\nitem(``str``): The variable to get\ndefault(``any``): Return value if item not found", "source": "juraj-google-style"}
{"code": "def pairwise_alignment_stats(reference_seq_aln, other_seq_aln):\n    \n    if len(reference_seq_aln) != len(other_seq_aln):\n        raise ValueError('Sequence lengths not equal - was an alignment run?')\n\n    reference_seq_aln = ssbio.protein.sequence.utils.cast_to_str(reference_seq_aln)\n    other_seq_aln = ssbio.protein.sequence.utils.cast_to_str(other_seq_aln)\n\n    infodict = {}\n\n    \n    stats_percent_ident = get_percent_identity(a_aln_seq=reference_seq_aln, b_aln_seq=other_seq_aln)\n    infodict['percent_identity'] = stats_percent_ident\n\n    \n    aln_df = get_alignment_df(a_aln_seq=reference_seq_aln, b_aln_seq=other_seq_aln)\n    infodict['deletions'] = get_deletions(aln_df)\n    infodict['insertions'] = get_insertions(aln_df)\n    infodict['mutations'] = get_mutations(aln_df)\n    infodict['unresolved'] = get_unresolved(aln_df)\n\n    return infodict", "docstring": "Get a report of a pairwise alignment.\n\nArgs:\nreference_seq_aln (str, Seq, SeqRecord): Reference sequence, alignment form\nother_seq_aln (str, Seq, SeqRecord): Other sequence, alignment form\n\nReturns:\ndict: Dictionary of information on mutations, insertions, sequence identity, etc.", "source": "juraj-google-style"}
{"code": "def d_step(self, true_frames, gen_frames):\n    hparam_to_disc_loss = {'least_squares': gan_losses.least_squares_discriminator_loss, 'cross_entropy': gan_losses.modified_discriminator_loss, 'wasserstein': gan_losses.wasserstein_discriminator_loss}\n    (_, batch_size, _, _, _) = common_layers.shape_list(true_frames)\n    all_frames = tf.concat([true_frames, tf.stop_gradient(gen_frames)], axis=1)\n    all_logits = self.discriminator(all_frames)\n    (true_logits, fake_logits_stop) = (all_logits[:batch_size], all_logits[batch_size:])\n    mean_true_logits = tf.reduce_mean(true_logits)\n    tf.summary.scalar('mean_true_logits', mean_true_logits)\n    mean_fake_logits_stop = tf.reduce_mean(fake_logits_stop)\n    tf.summary.scalar('mean_fake_logits_stop', mean_fake_logits_stop)\n    discriminator_loss_func = hparam_to_disc_loss[self.hparams.gan_loss]\n    gan_d_loss = discriminator_loss_func(discriminator_real_outputs=true_logits, discriminator_gen_outputs=fake_logits_stop, add_summaries=True)\n    return (gan_d_loss, true_logits, fake_logits_stop)", "docstring": "Performs the discriminator step in computing the GAN loss.\n\nApplies stop-gradient to the generated frames while computing the\ndiscriminator loss to make sure that the gradients are not back-propagated\nto the generator. This makes sure that only the discriminator is updated.\n\nArgs:\ntrue_frames: True outputs\ngen_frames: Generated frames.\nReturns:\nd_loss: Loss component due to the discriminator.", "source": "codesearchnet"}
{"code": "def _generate_matrix(self, hash_bytes):\n        \n\n        \n        \n        half_columns = self.columns \n        cells = self.rows * half_columns\n\n        \n        matrix = [[False] * self.columns for _ in range(self.rows)]\n\n        \n        for cell in range(cells):\n\n            \n            \n            \n            if self._get_bit(cell, hash_bytes[1:]):\n\n                \n                column = cell \n                row = cell % self.rows\n\n                \n                \n                matrix[row][column] = True\n                matrix[row][self.columns - column - 1] = True\n\n        return matrix", "docstring": "Generates matrix that describes which blocks should be coloured.\n\nArguments:\nhash_bytes - List of hash byte values for which the identicon is being\ngenerated. Each element of the list should be an integer from 0 to\n255.\n\nReturns:\nList of rows, where each element in a row is boolean. True means the\nforeground colour should be used, False means a background colour\nshould be used.", "source": "juraj-google-style"}
{"code": "def push(self, x):\n    raise NotImplementedError()", "docstring": "Push a new value to the tracker.\n\nArgs:\nx: The value to be pushed.", "source": "github-repos"}
{"code": "def _broadcast(value, target):\n    return tf.broadcast_to(tf.convert_to_tensor(value=value, dtype=target.dtype), distribution_util.prefer_static_shape(target)[:(- 1)])", "docstring": "Broadcast a value to match the batching dimensions of a target.\n\nIf necessary the value is converted into a tensor. Both value and target\nshould be of the same dtype.\n\nArgs:\nvalue: A value to broadcast.\ntarget: A `Tensor` of shape [b1, ..., bn, d].\n\nReturns:\nA `Tensor` of shape [b1, ..., bn] and same dtype as the target.", "source": "codesearchnet"}
{"code": "def __init__(self, request, scalars_plugin_instance):\n    \n    self._request = request\n    self._scalars_plugin_instance = scalars_plugin_instance", "docstring": "Constructor.\n\nArgs:\nrequest: A ListSessionGroupsRequest protobuf.\nscalars_plugin_instance: A scalars_plugin.ScalarsPlugin.", "source": "juraj-google-style"}
{"code": "def save_libsvm(X, y, path):\n    \n\n    dump_svmlight_file(X, y, path, zero_based=False)", "docstring": "Save data as a LibSVM file.\n\nArgs:\nX (numpy or scipy sparse matrix): Data matrix\ny (numpy array): Target vector.\npath (str): Path to the CSV file to save data.", "source": "juraj-google-style"}
{"code": "async def wasSet(self, node, oldv):\n        \n        for func in self.onsets:\n            try:\n                await s_coro.ornot(func, node, oldv)\n            except asyncio.CancelledError:\n                raise\n            except Exception:\n                logger.exception('onset() error for %s' % (self.full,))", "docstring": "Fire the onset() handlers for this property.\n\nArgs:\nnode (synapse.lib.node.Node): The node whose property was set.\noldv (obj): The previous value of the property.", "source": "juraj-google-style"}
{"code": "def from_dict(self, dictionary):\n    for (remote_name, remote_value) in dictionary.items():\n        local_name = next((name for (name, attribute) in self._attributes.items() if (attribute.remote_name == remote_name)), None)\n        if local_name:\n            setattr(self, local_name, remote_value)\n        else:\n            pass", "docstring": "Sets all the exposed ReST attribues from the given dictionary\n\nArgs:\ndictionary (dict): dictionnary containing the raw object attributes and their values.\n\nExample:\n>>> info = {\"name\": \"my group\", \"private\": False}\n>>> group = NUGroup()\n>>> group.from_dict(info)\n>>> print \"name: %s - private: %s\" % (group.name, group.private)\n\"name: my group - private: False\"", "source": "codesearchnet"}
{"code": "def sparse(self, rows: np.ndarray=None, cols: np.ndarray=None, layer: str=None) -> scipy.sparse.coo_matrix:\n    if (layer is None):\n        return self.layers[''].sparse(rows=rows, cols=cols)\n    else:\n        return self.layers[layer].sparse(rows=rows, cols=cols)", "docstring": "Return the main matrix or specified layer as a scipy.sparse.coo_matrix, without loading dense matrix in RAM\n\nArgs:\nrows:\t\tRows to include, or None to include all\ncols:\t\tColumns to include, or None to include all\nlayer:\t\tLayer to return, or None to return the default layer\n\nReturns:\nSparse matrix (:class:`scipy.sparse.coo_matrix`)", "source": "codesearchnet"}
{"code": "def compile_from_config(self, config):\n    has_overridden_compile = self.__class__.compile != Trainer.compile\n    if has_overridden_compile:\n        warnings.warn(\"`compile()` was not called as part of model loading because the model's `compile()` method is custom. All subclassed Models that have `compile()` overridden should also override `get_compile_config()` and `compile_from_config(config)`. Alternatively, you can call `compile()` manually after loading.\", stacklevel=2)\n        return\n    config = serialization_lib.deserialize_keras_object(config)\n    self.compile(**config)\n    if hasattr(self, 'optimizer') and self.built:\n        self.optimizer.build(self.trainable_variables)", "docstring": "Compiles the model with the information given in config.\n\nThis method uses the information in the config (optimizer, loss,\nmetrics, etc.) to compile the model.\n\nArgs:\nconfig: Dict containing information for compiling the model.", "source": "github-repos"}
{"code": "def custom_returnvalue(self, printer, desc=None):\n        \n        self.return_info = ReturnInfo(None, printer, True, desc)", "docstring": "Use a custom function to print the return value.\n\nArgs:\nprinter (callable): A function that should take in the return\nvalue and convert it to a string.\ndesc (str): An optional description of the return value.", "source": "juraj-google-style"}
{"code": "def assert_existing_objects_matched(self):\n    for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):\n        trackable = self._checkpoint.object_by_proto_id.get(node_id, None)\n        if trackable is not None and trackable._update_uid < self._checkpoint.restore_uid:\n            raise AssertionError(f'Object {node} not assigned a value from checkpoint.')\n    for trackable_object in util.list_objects(self._object_graph_view, self._options.experimental_skip_slot_variables):\n        if isinstance(trackable_object, data_structures.TrackableDataStructure) and (not trackable_object._trackable_children(save_type=base.SaveType.CHECKPOINT)):\n            continue\n        self._checkpoint.all_python_objects.add(trackable_object)\n    unused_python_objects = object_identity.ObjectIdentitySet(_objects_with_attributes(self._checkpoint.all_python_objects)) - object_identity.ObjectIdentitySet(self._checkpoint.object_by_proto_id.values())\n    if unused_python_objects:\n        num_unused_python_objects = len(list(unused_python_objects))\n        num_variables_to_show = min(10, num_unused_python_objects)\n        raise AssertionError(f'Found {num_unused_python_objects} Python objects that were not bound to checkpointed values, likely due to changes in the Python program. Showing {num_variables_to_show} of {num_unused_python_objects} unmatched objects: {list(unused_python_objects)[:num_variables_to_show]}')\n    return self", "docstring": "Asserts that trackable Python objects have been matched.\n\nNote that this is a weaker assertion than `assert_consumed`. It will only\nfail for existing Python objects which are (transitive) dependencies of the\nroot object and which do not have an entry in the checkpoint.\n\nIt will not fail, for example, if a `tf.keras.Layer` object has not yet been\nbuilt and so has not created any `tf.Variable` objects.\n\nReturns:\n`self` for chaining.\n\nRaises:\nAssertionError: If a Python object exists in the transitive dependencies\nof the root object but does not have a value in the checkpoint.", "source": "github-repos"}
{"code": "def mtr_lm_dense(sz):\n  \n  n = 2 ** sz\n  hparams = mtf_unitransformer_base()\n  hparams.d_model = 1024\n  hparams.max_length = 1024\n  hparams.batch_size = 128\n  \n  hparams.num_hidden_layers = 6\n  hparams.d_ff = 8192 * n\n  hparams.d_kv = 256\n  hparams.num_heads = 8 * n\n  hparams.learning_rate_decay_steps = 65536\n  hparams.layout = \"batch:batch;vocab:model;d_ff:model;heads:model\"\n  hparams.mesh_shape = \"batch:32\"\n  return hparams", "docstring": "Series of architectures for language modeling.\n\nWe assume infinite training data, so no dropout necessary.\n\nYou can use languagemodel_wiki_noref_v32k_l1k.\n(1 epoch = ~46000 steps).\nTODO(noam): find a large enough dataset for these experiments.\n\nArgs:\nsz: an integer\n\nReturns:\na hparams", "source": "juraj-google-style"}
{"code": "def _fulfillment_from_details(data, _depth=0):\n    \n    if _depth == 100:\n        raise ThresholdTooDeep()\n\n    if data['type'] == 'ed25519-sha-256':\n        public_key = base58.b58decode(data['public_key'])\n        return Ed25519Sha256(public_key=public_key)\n\n    if data['type'] == 'threshold-sha-256':\n        threshold = ThresholdSha256(data['threshold'])\n        for cond in data['subconditions']:\n            cond = _fulfillment_from_details(cond, _depth+1)\n            threshold.add_subfulfillment(cond)\n        return threshold\n\n    raise UnsupportedTypeError(data.get('type'))", "docstring": "Load a fulfillment for a signing spec dictionary\n\nArgs:\ndata: tx.output[].condition.details dictionary", "source": "juraj-google-style"}
{"code": "def _randomInts(self, shape, low, high):\n    val = np.random.randint(low=low, high=high, size=shape)\n    return constant_op.constant(val, dtype=dtypes.int32)", "docstring": "Generate a tensor of random 32-bit integer values.\n\nNote that we use numpy to generate random numbers and then feed the result\nthrough a constant op to avoid the re-rolling of TensorFlow random ops on\neach run in graph mode.\n\nArgs:\nshape: The output shape.\nlow: Lower bound of random numbers generated, inclusive.\nhigh: Upper bound of random numbers generated, exclusive.\n\nReturns:\nA random tensor", "source": "github-repos"}
{"code": "def generate_pyi(src, options=None, loader=None):\n    options = options or config.Options.create()\n    ret = generate_pyi_ast(src, options, loader)\n    return (ret, _output_ast(ret.ast, options))", "docstring": "Run the inferencer on a string of source code, producing output.\n\nArgs:\nsrc: The source code.\noptions: config.Options object.\nloader: A load_pytd.Loader instance.\n\nReturns:\nA tuple, (analyze.Analysis, pyi ast as string).\n\nRaises:\nCompileError: If we couldn't parse the input file.\nUsageError: If the input filepath is invalid.", "source": "github-repos"}
{"code": "def create_subtask(self, cor, name=None, stop_timeout=1.0):\n    if self.stopped:\n        raise InternalError('Cannot add a subtask to a parent that is already stopped')\n    subtask = BackgroundTask(cor, name, loop=self._loop, stop_timeout=stop_timeout)\n    self.add_subtask(subtask)\n    return subtask", "docstring": "Create and add a subtask from a coroutine.\n\nThis function will create a BackgroundTask and then\ncall self.add_subtask() on it.\n\nArgs:\ncor (coroutine): The coroutine that should be wrapped\nin a background task.\nname (str): An optional name for the task.\nstop_timeout (float): The maximum time to wait for this\nsubtask to die after stopping it.\n\nReturns:\nBackgroundtask: The created subtask.", "source": "codesearchnet"}
{"code": "def _format_csv(content, delimiter):\n    reader = csv_reader(StringIO(content), delimiter=builtin_str(delimiter))\n    rows = [row for row in reader]\n    max_widths = [max(map(len, column)) for column in zip(*rows)]\n    lines = [' '.join(('{entry:{width}}'.format(entry=entry, width=(width + 2)) for (entry, width) in zip(row, max_widths))) for row in rows]\n    return '\\n'.join(lines)", "docstring": "Format delimited text to have same column width.\n\nArgs:\ncontent (str): The content of a metric.\ndelimiter (str): Value separator\n\nReturns:\nstr: Formatted content.\n\nExample:\n\n>>> content = (\n\"value_mse,deviation_mse,data_set\\n\"\n\"0.421601,0.173461,train\\n\"\n\"0.67528,0.289545,testing\\n\"\n\"0.671502,0.297848,validation\\n\"\n)\n>>> _format_csv(content, \",\")\n\n\"value_mse  deviation_mse   data_set\\n\"\n\"0.421601   0.173461        train\\n\"\n\"0.67528    0.289545        testing\\n\"\n\"0.671502   0.297848        validation\\n\"", "source": "codesearchnet"}
{"code": "def make(self, path, metadata=None):\n    if self.mode != 'w':\n        raise ValueError('`make` is only allowed in write mode.')\n    if not isinstance(metadata, (dict, type(None))):\n        raise ValueError(f'`metadata` should be a dict or `None`. Received: {metadata}')\n    self._h5_entry_path = path\n    if metadata:\n        self._create_h5_group(path, metadata=metadata)\n    else:\n        self._h5_entry_group = {}\n        self._h5_entry_initialized = False\n    return self", "docstring": "Make a new H5 entry group.\n\nThis method is only available in write mode. It defers the creation of\nthe H5 entry group until `__setitem__` is called, preventing the\ncreation of empty groups.\n\nArgs:\npath: `str`. The variable path.\nmetadata: Optional `dict`. The metadata to save with the H5 entry\ngroup. Defaults to `None`.", "source": "github-repos"}
{"code": "def limit_replace(self, accountID, orderID, **kwargs):\n        \n        return self.replace(\n            accountID,\n            orderID,\n            order=LimitOrderRequest(**kwargs)\n        )", "docstring": "Shortcut to replace a pending Limit Order in an Account\n\nArgs:\naccountID : The ID of the Account\norderID : The ID of the Limit Order to replace\nkwargs : The arguments to create a LimitOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "juraj-google-style"}
{"code": "def sg_cast(tensor, opt):\n    r\n    assert opt.dtype is not None, 'dtype is mandatory.'\n    return tf.cast(tensor, opt.dtype, name=opt.name)", "docstring": "r\"\"\"Casts a tensor to a new type.\n\nSee `tf.cast()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` or `SparseTensor` (automatically given by chain).\nopt:\ndtype : The destination type.\nname : If provided, it replaces current tensor's name\n\nReturns:\nA `Tensor` or `SparseTensor` with same shape as `tensor`.", "source": "juraj-google-style"}
{"code": "def _read_tags(self):\n    tags = self._config.get('tags', {})\n    logging.info('Tags:')\n    for tag_name in tags.keys():\n        tag = {}\n        tag['Key'] = tag_name\n        tag['Value'] = tags[tag_name]\n        self._tags.append(tag)\n        logging.info('{} = {}'.format(tag_name, tags[tag_name]))\n    logging.debug(json.dumps(self._tags, indent=2, sort_keys=True))\n    return True", "docstring": "Fill in the _tags dict from the tags file.\n\nArgs:\nNone\n\nReturns:\nTrue\n\nTodo:\nFigure what could go wrong and at least acknowledge the\nthe fact that Murphy was an optimist.", "source": "codesearchnet"}
{"code": "def _create_hparam_extractor(hparam_name):\n  \n  def extractor_fn(session_group):\n    if hparam_name in session_group.hparams:\n      return _value_to_python(session_group.hparams[hparam_name])\n    return None\n\n  return extractor_fn", "docstring": "Returns an extractor function that extracts an hparam from a session group.\n\nArgs:\nhparam_name: str. Identies the hparam to extract from the session group.\nReturns:\nA function that takes a tensorboard.hparams.SessionGroup protobuffer and\nreturns the value, as a native Python object, of the hparam identified by\n'hparam_name'.", "source": "juraj-google-style"}
{"code": "def _internal_kv_put(key, value, overwrite=False):\n    worker = ray.worker.get_global_worker()\n    if (worker.mode == ray.worker.LOCAL_MODE):\n        exists = (key in _local)\n        if ((not exists) or overwrite):\n            _local[key] = value\n        return exists\n    if overwrite:\n        updated = worker.redis_client.hset(key, 'value', value)\n    else:\n        updated = worker.redis_client.hsetnx(key, 'value', value)\n    return (updated == 0)", "docstring": "Globally associates a value with a given binary key.\n\nThis only has an effect if the key does not already have a value.\n\nReturns:\nalready_exists (bool): whether the value already exists.", "source": "codesearchnet"}
{"code": "def _compile_control_flow_expression(self, expr: Expression, scope: Dict[(str, TensorFluent)], batch_size: Optional[int]=None, noise: Optional[List[tf.Tensor]]=None) -> TensorFluent:\n    etype = expr.etype\n    args = expr.args\n    if (etype[1] == 'if'):\n        condition = self._compile_expression(args[0], scope, batch_size, noise)\n        true_case = self._compile_expression(args[1], scope, batch_size, noise)\n        false_case = self._compile_expression(args[2], scope, batch_size, noise)\n        fluent = TensorFluent.if_then_else(condition, true_case, false_case)\n    else:\n        raise ValueError('Invalid control flow expression:\\n{}'.format(expr))\n    return fluent", "docstring": "Compile a control flow expression `expr` into a TensorFluent\nin the given `scope` with optional batch size.\n\nArgs:\nexpr (:obj:`rddl2tf.expr.Expression`): A RDDL control flow expression.\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.\nbatch_size (Optional[size]): The batch size.\n\nReturns:\n:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.", "source": "codesearchnet"}
{"code": "def __init__(self, rom_path):\n        \n        \n        rom = ROM(rom_path)\n        \n        if rom.prg_rom_size == 0:\n            raise ValueError('ROM has no PRG-ROM banks.')\n        \n        if rom.has_trainer:\n            raise ValueError('ROM has trainer. trainer is not supported.')\n        \n        _ = rom.prg_rom\n        \n        _ = rom.chr_rom\n        \n        if rom.is_pal:\n            raise ValueError('ROM is PAL. PAL is not supported.')\n        \n        elif rom.mapper not in {0, 1, 2, 3}:\n            msg = 'ROM has an unsupported mapper number {}.'\n            raise ValueError(msg.format(rom.mapper))\n        \n        self.np_random = np.random.RandomState()\n        \n        self._rom_path = rom_path\n        \n        self._env = _LIB.Initialize(self._rom_path)\n        \n        self.viewer = None\n        \n        self._has_backup = False\n        \n        self.done = True\n        \n        self.controllers = [self._controller_buffer(port) for port in range(2)]\n        self.screen = self._screen_buffer()\n        self.ram = self._ram_buffer()", "docstring": "Create a new NES environment.\n\nArgs:\nrom_path (str): the path to the ROM for the environment\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def ParseRecord(self, parser_mediator, key, structure):\n    \n    if key not in (\n        'log_entry', 'log_entry_at_end', 'log_entry_offset',\n        'log_entry_offset_at_end'):\n      raise errors.ParseError(\n          'Unable to parse record, unknown structure: {0:s}'.format(key))\n\n    try:\n      date_time_string = self._GetISO8601String(structure)\n    except ValueError as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to determine date time string with error: {0!s}'.format(\n              exception))\n\n    fraction_of_second_length = len(structure.fraction_of_second)\n    if fraction_of_second_length == 3:\n      date_time = dfdatetime_time_elements.TimeElementsInMilliseconds()\n    elif fraction_of_second_length in (6, 7):\n      date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()\n\n    try:\n      date_time.CopyFromStringISO8601(date_time_string)\n    except ValueError as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to parse date time value: {0:s} with error: {1!s}'.format(\n              date_time_string, exception))\n      return\n\n    event_data = SCCMLogEventData()\n    event_data.component = structure.component\n    \n    event_data.offset = 0\n    event_data.text = structure.text\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parse the record and return an SCCM log event object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nkey (str): name of the parsed structure.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.\n\nRaises:\nParseError: when the structure type is unknown.", "source": "juraj-google-style"}
{"code": "def add_snippet_client(self, name, package):\n        \n        \n        if name in self._snippet_clients:\n            raise Error(\n                self,\n                'Name \"%s\" is already registered with package \"%s\", it cannot '\n                'be used again.' %\n                (name, self._snippet_clients[name].client.package))\n        \n        for snippet_name, client in self._snippet_clients.items():\n            if package == client.package:\n                raise Error(\n                    self,\n                    'Snippet package \"%s\" has already been loaded under name'\n                    ' \"%s\".' % (package, snippet_name))\n        client = snippet_client.SnippetClient(package=package, ad=self._device)\n        client.start_app_and_connect()\n        self._snippet_clients[name] = client", "docstring": "Adds a snippet client to the management.\n\nArgs:\nname: string, the attribute name to which to attach the snippet\nclient. E.g. `name='maps'` attaches the snippet client to\n`ad.maps`.\npackage: string, the package name of the snippet apk to connect to.\n\nRaises:\nError, if a duplicated name or package is passed in.", "source": "juraj-google-style"}
{"code": "def groups(self, group_type=None, filters=None, params=None):\n        \n        group = self._tcex.ti.group(group_type)\n        for g in self.tc_requests.groups_from_tag(group, self.name, filters=filters, params=params):\n            yield g", "docstring": "Gets all groups from a tag.\n\nArgs:\nfilters:\nparams:\ngroup_type:", "source": "juraj-google-style"}
{"code": "def load(cls, pkid_or_path=None):\n        \n        path = pkid_or_path\n        if isinstance(path, (int, np.int32, np.int64)):\n            raise NotImplementedError('Lookup via CMS not implemented.')\n        elif not os.path.isfile(path):\n            raise FileNotFoundError('File {} not found.'.format(path))\n        kwargs = {}\n        fields = defaultdict(dict)\n        with pd.HDFStore(path) as store:\n            for key in store.keys():\n                if 'kwargs' in key:\n                    kwargs.update(store.get_storer(key).attrs.metadata)\n                elif \"FIELD\" in key:\n                    name, dname = \"_\".join(key.split(\"_\")[1:]).split(\"/\")\n                    dname = dname.replace('values', '')\n                    fields[name][dname] = store[key]\n                else:\n                    name = str(key[1:])\n                    kwargs[name] = store[key]\n        for name, field_data in fields.items():\n            fps = field_data.pop('data')\n            kwargs[name] = Field(fps, field_values=[field_data[str(arr)] for arr in\n                                                    sorted(map(int, field_data.keys()))])\n        return cls(**kwargs)", "docstring": "Load a container object from a persistent location or file path.\n\nArgs:\npkid_or_path: Integer pkid corresponding to the container table or file path\n\nReturns:\ncontainer: The saved container object", "source": "juraj-google-style"}
{"code": "def export_as_package(self, package_path, cv_source):\n        \n        if os.path.exists(package_path):\n            raise exceptions.UserError('{} already exists'.format(package_path))\n\n        package_name = os.path.basename(os.path.normpath(package_path))\n\n        os.makedirs(package_path)\n\n        \n        with open(os.path.join(package_path, '__init__.py'), 'wb') as f:\n            f.write('from {}.builder import xcessiv_ensemble'.format(package_name).encode('utf8'))\n\n        \n        os.makedirs(os.path.join(package_path, 'baselearners'))\n        open(os.path.join(package_path, 'baselearners', '__init__.py'), 'a').close()\n        for idx, base_learner in enumerate(self.base_learners):\n            base_learner.export_as_file(os.path.join(package_path,\n                                                     'baselearners',\n                                                     'baselearner' + str(idx)))\n\n        \n        self.base_learner_origin.export_as_file(\n            os.path.join(package_path, 'metalearner'),\n            self.secondary_learner_hyperparameters\n        )\n\n        \n        with open(os.path.join(package_path, 'cv.py'), 'wb') as f:\n            f.write(cv_source.encode('utf8'))\n\n        \n        ensemble_source = ''\n        stacker_file_loc = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'stacker.py')\n        with open(stacker_file_loc) as f:\n            ensemble_source += f.read()\n\n        ensemble_source += '\\n\\n' \\\n                           '    def {}(self, X):\\n' \\\n                           '        return self._process_using_' \\\n                           'meta_feature_generator(X, \"{}\")\\n\\n'\\\n            .format(self.base_learner_origin.meta_feature_generator,\n                    self.base_learner_origin.meta_feature_generator)\n\n        with open(os.path.join(package_path, 'stacker.py'), 'wb') as f:\n            f.write(ensemble_source.encode('utf8'))\n\n        \n        builder_source = ''\n\n        for idx, base_learner in enumerate(self.base_learners):\n            builder_source += 'from {}.baselearners import baselearner{}\\n'.format(package_name, idx)\n\n        builder_source += 'from {}.cv import return_splits_iterable\\n'.format(package_name)\n\n        builder_source += 'from {} import metalearner\\n'.format(package_name)\n\n        builder_source += 'from {}.stacker import XcessivStackedEnsemble\\n'.format(package_name)\n\n        builder_source += '\\nbase_learners = [\\n'\n        for idx, base_learner in enumerate(self.base_learners):\n            builder_source += '    baselearner{}.base_learner,\\n'.format(idx)\n        builder_source += ']\\n'\n\n        builder_source += '\\nmeta_feature_generators = [\\n'\n        for idx, base_learner in enumerate(self.base_learners):\n            builder_source += '    baselearner{}.meta_feature_generator,\\n'.format(idx)\n        builder_source += ']\\n'\n\n        builder_source += '\\nxcessiv_ensemble = XcessivStackedEnsemble(base_learners=base_learners,' \\\n                          ' meta_feature_generators=meta_feature_generators,' \\\n                          ' secondary_learner=metalearner.base_learner,' \\\n                          ' cv_function=return_splits_iterable)\\n'\n\n        with open(os.path.join(package_path, 'builder.py'), 'wb') as f:\n            f.write(builder_source.encode('utf8'))", "docstring": "Exports the ensemble as a Python package and saves it to `package_path`.\n\nArgs:\npackage_path (str, unicode): Absolute/local path of place to save package in\n\ncv_source (str, unicode): String containing actual code for base learner\ncross-validation used to generate secondary meta-features.\n\nRaises:\nexceptions.UserError: If os.path.join(path, name) already exists.", "source": "juraj-google-style"}
{"code": "def get_variation_from_key(self, experiment_key, variation_key):\n    \n\n    variation_map = self.variation_key_map.get(experiment_key)\n\n    if variation_map:\n      variation = variation_map.get(variation_key)\n      if variation:\n        return variation\n      else:\n        self.logger.error('Variation key \"%s\" is not in datafile.' % variation_key)\n        self.error_handler.handle_error(exceptions.InvalidVariationException(enums.Errors.INVALID_VARIATION_ERROR))\n        return None\n\n    self.logger.error('Experiment key \"%s\" is not in datafile.' % experiment_key)\n    self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR))\n    return None", "docstring": "Get variation given experiment and variation key.\n\nArgs:\nexperiment: Key representing parent experiment of variation.\nvariation_key: Key representing the variation.\n\nReturns\nObject representing the variation.", "source": "juraj-google-style"}
{"code": "def sanitize_git_path(self, uri, ref=None):\n    if uri.endswith('.git'):\n        dir_name = uri[:(- 4)]\n    else:\n        dir_name = uri\n    dir_name = self.sanitize_uri_path(dir_name)\n    if (ref is not None):\n        dir_name += ('-%s' % ref)\n    return dir_name", "docstring": "Take a git URI and ref and converts it to a directory safe path.\n\nArgs:\nuri (string): git URI\n(e.g. git@github.com:foo/bar.git)\nref (string): optional git ref to be appended to the path\n\nReturns:\nstr: Directory name for the supplied uri", "source": "codesearchnet"}
{"code": "def decode_iter_request(data: dict) -> Optional[Union[(str, int)]]:\n    if ('response_metadata' in data):\n        return data['response_metadata'].get('next_cursor')\n    elif ('paging' in data):\n        current_page = int(data['paging'].get('page', 1))\n        max_page = int(data['paging'].get('pages', 1))\n        if (current_page < max_page):\n            return (current_page + 1)\n    elif (('has_more' in data) and data['has_more'] and ('latest' in data)):\n        return data['messages'][(- 1)]['ts']\n    return None", "docstring": "Decode incoming response from an iteration request\n\nArgs:\ndata: Response data\n\nReturns:\nNext itervalue", "source": "codesearchnet"}
{"code": "def _sideral(date, longitude=0., model='mean', eop_correction=True, terms=106):\n    \n\n    t = date.change_scale('UT1').julian_century\n\n    \n    theta = 67310.54841 + (876600 * 3600 + 8640184.812866) * t + 0.093104 * t ** 2\\\n        - 6.2e-6 * t ** 3\n\n    \n    theta /= 240.\n\n    if model == 'apparent':\n        theta += equinox(date, eop_correction, terms)\n\n    \n    theta += longitude\n    \n    theta %= 360.\n\n    return theta", "docstring": "Get the sideral time at a defined date\n\nArgs:\ndate (Date):\nlongitude (float): Longitude of the observer (in degrees)\nEast positive/West negative.\nmodel (str): 'mean' or 'apparent' for GMST and GAST respectively\nReturn:\nfloat: Sideral time in degrees\n\nGMST: Greenwich Mean Sideral Time\nLST: Local Sideral Time (Mean)\nGAST: Greenwich Apparent Sideral Time", "source": "juraj-google-style"}
{"code": "def add_plugin(self, f):\n        \n        if f.endswith('.py'):\n\n            \n            plugin_name = os.path.splitext(os.path.basename(f))[0]\n\n            \n            if plugin_name in sys.modules:\n                try:\n                    handler = reload(sys.modules[plugin_name])\n                    print'\\t- %s %sRELOAD%s' % (plugin_name, color.Yellow, color.Normal)\n                except ImportError, error:\n                    print 'Failed to import plugin: %s (%s)' % (plugin_name, error)\n                    return\n            else:\n                \n                try:\n                    handler = __import__(plugin_name, globals(), locals(), [], -1)\n                except ImportError, error:\n                    print 'Failed to import plugin: %s (%s)' % (plugin_name, error)\n                    return\n\n            \n            plugin = self.validate(handler)\n            print '\\t- %s %sOK%s' % (plugin_name, color.Green, color.Normal)\n            if plugin:\n\n                \n                \n                plugin['name'] = plugin_name\n                plugin['dependencies'] = plugin['class'].dependencies\n                plugin['docstring'] = plugin['class'].__doc__\n                plugin['mod_time'] = datetime.utcfromtimestamp(os.path.getmtime(f))\n\n                \n                try:\n                    plugin['sample_set_input'] = getattr(plugin['class'], 'sample_set_input')\n                except AttributeError:\n                    plugin['sample_set_input'] = False\n\n                \n                self.plugin_callback(plugin)", "docstring": "Adding and verifying plugin.\n\nArgs:\nf: the filepath for the plugin.", "source": "juraj-google-style"}
{"code": "def get_domain_reports(self, domains):\n        \n        api_name = 'virustotal-domain-reports'\n\n        (all_responses, domains) = self._bulk_cache_lookup(api_name, domains)\n        responses = self._request_reports(\"domain\", domains, 'domain/report')\n\n        for domain, response in zip(domains, responses):\n            if self._cache:\n                self._cache.cache_value(api_name, domain, response)\n            all_responses[domain] = response\n\n        return all_responses", "docstring": "Retrieves the most recent VT info for a set of domains.\n\nArgs:\ndomains: list of string domains.\nReturns:\nA dict with the domain as key and the VT report as value.", "source": "juraj-google-style"}
{"code": "def add_minute(self, minute):\n    _moy = (self.moy + int(minute))\n    return self.__class__.from_moy(_moy)", "docstring": "Create a new DateTime after the minutes are added.\n\nArgs:\nminute: An integer value for minutes.", "source": "codesearchnet"}
{"code": "def state_view_for_block(block_wrapper, state_view_factory):\n        \n        state_root_hash = \\\n            block_wrapper.state_root_hash \\\n            if block_wrapper is not None else None\n\n        return state_view_factory.create_view(state_root_hash)", "docstring": "Returns the state view for an arbitrary block.\n\nArgs:\nblock_wrapper (BlockWrapper): The block for which a state\nview is to be returned\nstate_view_factory (StateViewFactory): The state view factory\nused to create the StateView object\n\nReturns:\nStateView object associated with the block", "source": "juraj-google-style"}
{"code": "def mkdir(self, path):\n        \n\n        self.__validate_storage_path(path, projects_allowed=False)\n        parent_metadata = self.get_parent(path)\n        self.api_client.create_folder(path.split('/')[-1], parent_metadata['uuid'])", "docstring": "Create a folder in the storage service pointed by the given path.\n\nArgs:\npath (str): The path of the folder to be created\n\nReturns:\nNone\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "juraj-google-style"}
{"code": "def _execute(self, connection, query, fetch=True):\n    with connection.cursor() as cursor:\n        cursor.execute(query)\n        if fetch:\n            return cursor.fetchall()\n        else:\n            cursor.execute('COMMIT;')", "docstring": "Executes given query and returns result.\n\nArgs:\nconnection: connection to postgres database who stores mpr data.\nquery (str): sql query\nfetch (boolean, optional): if True, fetch query result and return it. If False, do not fetch.\n\nReturns:\niterable with query result or None if fetch is False.", "source": "codesearchnet"}
{"code": "def get_bucket_files(glob_pattern, base_dir, force=False, pattern_slice=slice(None)):\n    if (gcsfs is None):\n        raise RuntimeError(\"Missing 'gcsfs' dependency for GCS download.\")\n    if (not os.path.isdir(base_dir)):\n        raise OSError('Directory does not exist: {}'.format(base_dir))\n    if isinstance(glob_pattern, str):\n        glob_pattern = [glob_pattern]\n    fs = gcsfs.GCSFileSystem(token='anon')\n    filenames = []\n    for gp in glob_pattern:\n        if isinstance(gp, str):\n            glob_results = list(fs.glob(gp))\n        else:\n            glob_results = [fn for pat in gp for fn in fs.glob(pat)]\n        for fn in glob_results[pattern_slice]:\n            ondisk_fn = os.path.basename(fn)\n            ondisk_pathname = os.path.join(base_dir, ondisk_fn)\n            filenames.append(ondisk_pathname)\n            if (force and os.path.isfile(ondisk_pathname)):\n                os.remove(ondisk_pathname)\n            elif os.path.isfile(ondisk_pathname):\n                LOG.info('Found existing: {}'.format(ondisk_pathname))\n                continue\n            LOG.info('Downloading: {}'.format(ondisk_pathname))\n            fs.get(('gs:\n    if (not filenames):\n        raise OSError('No files could be found or downloaded.')\n    return filenames", "docstring": "Helper function to download files from Google Cloud Storage.\n\nArgs:\nglob_pattern (str or list): Glob pattern string or series of patterns\nused to search for on Google Cloud Storage. The pattern should\ninclude the \"gs://\" protocol prefix. If a list of lists, then the\nresults of each sublist pattern are concatenated and the result is\ntreated as one pattern result. This is important for things like\n``pattern_slice`` and complicated glob patterns not supported by\nGCP.\nbase_dir (str): Root directory to place downloaded files on the local\nsystem.\nforce (bool): Force re-download of data regardless of its existence on\nthe local system. Warning: May delete non-demo files stored in\ndownload directory.\npattern_slice (slice): Slice object to limit the number of files\nreturned by each glob pattern.", "source": "codesearchnet"}
{"code": "def OpenAndRead(relative_path='debugger-blacklist.yaml'):\n    try:\n        with open(os.path.join(sys.path[0], relative_path), 'r') as f:\n            return Read(f)\n    except IOError:\n        return None", "docstring": "Attempts to find the yaml configuration file, then read it.\n\nArgs:\nrelative_path: Optional relative path override.\n\nReturns:\nA Config object if the open and read were successful, None if the file\ndoes not exist (which is not considered an error).\n\nRaises:\nError (some subclass): As thrown by the called Read() function.", "source": "codesearchnet"}
{"code": "def _PromptUserForVSSCurrentVolume(self):\n    while True:\n        self._output_writer.Write('Volume Shadow Snapshots (VSS) were selected also process current\\nvolume? [yes, no]\\n')\n        process_current_volume = self._input_reader.Read()\n        process_current_volume = process_current_volume.strip()\n        process_current_volume = process_current_volume.lower()\n        if ((not process_current_volume) or (process_current_volume in ('no', 'yes'))):\n            break\n        self._output_writer.Write('\\nUnsupported option, please try again or abort with Ctrl^C.\\n\\n')\n    self._output_writer.Write('\\n')\n    return ((not process_current_volume) or (process_current_volume == 'yes'))", "docstring": "Prompts the user if the current volume with VSS should be processed.\n\nReturns:\nbool: True if the current volume with VSS should be processed.", "source": "codesearchnet"}
{"code": "def look_source(self, sourcepath, library_paths=None):\n        \n        \n        \n        if sourcepath not in self._CHILDREN_MAP:\n            with io.open(sourcepath, 'r', encoding='utf-8') as fp:\n                finded_paths = self.parse(fp.read())\n\n            children = self.resolve(sourcepath, finded_paths,\n                                    library_paths=library_paths)\n\n            \n            self._CHILDREN_MAP[sourcepath] = children\n\n            \n            for p in children:\n                self._PARENTS_MAP[p].add(sourcepath)\n\n            \n            \n            for path in children:\n                if path not in self._CHILDREN_MAP:\n                    self.look_source(path, library_paths=library_paths)\n\n        return", "docstring": "Open a SCSS file (sourcepath) and find all involved file through\nimports.\n\nThis will fill internal buffers ``_CHILDREN_MAP`` and ``_PARENTS_MAP``.\n\nArgs:\nsourcepath (str): Source file path to start searching for imports.\n\nKeyword Arguments:\nlibrary_paths (list): List of directory paths for libraries to\nresolve paths if resolving fails on the base source path.\nDefault to None.", "source": "juraj-google-style"}
{"code": "def get_cartesian(self):\n\n    def create_cartesian(positions, row):\n        xyz_frame = pd.DataFrame(columns=['atom', 'x', 'y', 'z'], index=self.index[:row], dtype='f8')\n        xyz_frame['atom'] = self.loc[(xyz_frame.index, 'atom')]\n        xyz_frame.loc[(:, ['x', 'y', 'z'])] = positions[:row]\n        from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian\n        cartesian = Cartesian(xyz_frame, metadata=self.metadata)\n        return cartesian\n    c_table = self.loc[(:, ['b', 'a', 'd'])]\n    c_table = c_table.replace(constants.int_label)\n    c_table = c_table.replace({k: v for (v, k) in enumerate(c_table.index)})\n    c_table = c_table.values.astype('i8').T\n    C = self.loc[(:, ['bond', 'angle', 'dihedral'])].values.T\n    C[([1, 2], :)] = np.radians(C[([1, 2], :)])\n    (err, row, positions) = transformation.get_X(C, c_table)\n    positions = positions.T\n    if (err == ERR_CODE_InvalidReference):\n        rename = dict(enumerate(self.index))\n        i = rename[row]\n        (b, a, d) = self.loc[(i, ['b', 'a', 'd'])]\n        cartesian = create_cartesian(positions, row)\n        raise InvalidReference(i=i, b=b, a=a, d=d, already_built_cartesian=cartesian)\n    elif (err == ERR_CODE_OK):\n        return create_cartesian(positions, (row + 1))", "docstring": "Return the molecule in cartesian coordinates.\n\nRaises an :class:`~exceptions.InvalidReference` exception,\nif the reference of the i-th atom is undefined.\n\nArgs:\nNone\n\nReturns:\nCartesian: Reindexed version of the zmatrix.", "source": "codesearchnet"}
{"code": "def getfileversion(self):\n        \n\n        status, major_v, minor_v, release, info = _C.Hgetfileversion(self._id)\n        _checkErr('getfileversion', status, \"cannot get file version\")\n        return major_v, minor_v, release, info", "docstring": "Get file version info.\n\nArgs:\nno argument\nReturns:\n4-element tuple with the following components:\n-major version number (int)\n-minor version number (int)\n-complete library version number (int)\n-additional information (string)\n\nC library equivalent : Hgetlibversion", "source": "juraj-google-style"}
{"code": "def delete_fork_relation(self, **kwargs):\n    path = ('/projects/%s/fork' % self.get_id())\n    self.manager.gitlab.http_delete(path, **kwargs)", "docstring": "Delete a forked relation between existing projects.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabDeleteError: If the server failed to perform the request", "source": "codesearchnet"}
{"code": "def ami_lookup(region='us-east-1', name='tomcat8'):\n    if AMI_JSON_URL:\n        ami_dict = _get_ami_dict(AMI_JSON_URL)\n        ami_id = ami_dict[region][name]\n    elif GITLAB_TOKEN:\n        warn_user('Use AMI_JSON_URL feature instead.')\n        ami_contents = _get_ami_file(region=region)\n        ami_dict = json.loads(ami_contents)\n        ami_id = ami_dict[name]\n    else:\n        ami_id = name\n    LOG.info('Using AMI: %s', ami_id)\n    return ami_id", "docstring": "Look up AMI ID.\n\nUse _name_ to find AMI ID. If no ami_base_url or gitlab_token is provided,\n_name_ is returned as the ami id.\n\nArgs:\nregion (str): AWS Region to find AMI ID.\nname (str): Simple AMI base name to lookup.\n\nReturns:\nstr: AMI ID for _name_ in _region_.", "source": "codesearchnet"}
{"code": "def get_msd_plot(self, plt=None, mode='specie'):\n    from pymatgen.util.plotting import pretty_plot\n    plt = pretty_plot(12, 8, plt=plt)\n    if (np.max(self.dt) > 100000):\n        plot_dt = (self.dt / 1000)\n        unit = 'ps'\n    else:\n        plot_dt = self.dt\n        unit = 'fs'\n    if (mode == 'species'):\n        for sp in sorted(self.structure.composition.keys()):\n            indices = [i for (i, site) in enumerate(self.structure) if (site.specie == sp)]\n            sd = np.average(self.sq_disp_ions[(indices, :)], axis=0)\n            plt.plot(plot_dt, sd, label=sp.__str__())\n        plt.legend(loc=2, prop={'size': 20})\n    elif (mode == 'sites'):\n        for (i, site) in enumerate(self.structure):\n            sd = self.sq_disp_ions[(i, :)]\n            plt.plot(plot_dt, sd, label=('%s - %d' % (site.specie.__str__(), i)))\n        plt.legend(loc=2, prop={'size': 20})\n    elif (mode == 'mscd'):\n        plt.plot(plot_dt, self.mscd, 'r')\n        plt.legend(['Overall'], loc=2, prop={'size': 20})\n    else:\n        plt.plot(plot_dt, self.msd, 'k')\n        plt.plot(plot_dt, self.msd_components[(:, 0)], 'r')\n        plt.plot(plot_dt, self.msd_components[(:, 1)], 'g')\n        plt.plot(plot_dt, self.msd_components[(:, 2)], 'b')\n        plt.legend(['Overall', 'a', 'b', 'c'], loc=2, prop={'size': 20})\n    plt.xlabel('Timestep ({})'.format(unit))\n    if (mode == 'mscd'):\n        plt.ylabel('MSCD ($\\\\AA^2$)')\n    else:\n        plt.ylabel('MSD ($\\\\AA^2$)')\n    plt.tight_layout()\n    return plt", "docstring": "Get the plot of the smoothed msd vs time graph. Useful for\nchecking convergence. This can be written to an image file.\n\nArgs:\nplt: A plot object. Defaults to None, which means one will be\ngenerated.\nmode (str): Determines type of msd plot. By \"species\", \"sites\",\nor direction (default). If mode = \"mscd\", the smoothed mscd vs.\ntime will be plotted.", "source": "codesearchnet"}
{"code": "def verify(self, obj):\n        \n\n        if obj not in self.options:\n            raise ValidationError(\"Object is not in list of enumerated options\",\n                                  reason='not in list of enumerated options', object=obj, options=self.options)\n\n        return obj", "docstring": "Verify that the object conforms to this verifier's schema.\n\nArgs:\nobj (object): A python object to verify\n\nRaises:\nValidationError: If there is a problem verifying the object, a\nValidationError is thrown with at least the reason key set indicating\nthe reason for the lack of validation.", "source": "juraj-google-style"}
{"code": "def times(*combined):\n    assert combined\n    if len(combined) == 1:\n        return combined[0]\n    first = combined[0]\n    rest_combined = times(*combined[1:])\n    combined_results = []\n    for a in first:\n        for b in rest_combined:\n            if set(a.keys()).intersection(set(b.keys())):\n                raise ValueError('Keys need to not overlap: {} vs {}'.format(a.keys(), b.keys()))\n            combined_results.append(OrderedDict(list(a.items()) + list(b.items())))\n    return combined_results", "docstring": "Generate a product of N sets of combinations.\n\ntimes(combine(a=[1,2]), combine(b=[3,4])) == combine(a=[1,2], b=[3,4])\n\nArgs:\n*combined: N lists of dictionaries that specify combinations.\n\nReturns:\na list of dictionaries for each combination.\n\nRaises:\nValueError: if some of the inputs have overlapping keys.", "source": "github-repos"}
{"code": "def get_developer_package(path, format=None):\n    \n    from rez.developer_package import DeveloperPackage\n    return DeveloperPackage.from_path(path, format=format)", "docstring": "Create a developer package.\n\nArgs:\npath (str): Path to dir containing package definition file.\nformat (str): Package definition file format, detected if None.\n\nReturns:\n`DeveloperPackage`.", "source": "juraj-google-style"}
{"code": "def get_pending_enrollment_message(cls, pending_users, enrolled_in):\n        \n        pending_emails = [pending_user.user_email for pending_user in pending_users]\n        return (\n            'warning',\n            _(\n                \"The following learners do not have an account on \"\n                \"{platform_name}. They have not been enrolled in \"\n                \"{enrolled_in}. When these learners create an account, they will \"\n                \"be enrolled automatically: {pending_email_list}\"\n            ).format(\n                platform_name=settings.PLATFORM_NAME,\n                enrolled_in=enrolled_in,\n                pending_email_list=', '.join(pending_emails),\n            )\n        )", "docstring": "Create message for the users who were enrolled in a course or program.\n\nArgs:\nusers: An iterable of PendingEnterpriseCustomerUsers who were successfully linked with a pending enrollment\nenrolled_in (str): A string identifier for the course or program the pending users were linked to\n\nReturns:\ntuple: A 2-tuple containing a message type and message text", "source": "juraj-google-style"}
{"code": "def _group_value_by_device(per_replica_values):\n    destinations = per_replica_values[0]._devices\n    grouped = [[] for _ in range(len(destinations))]\n    for per_replica_value in per_replica_values:\n        for i, v in enumerate(per_replica_value.values):\n            assert per_replica_value._devices == destinations\n            grouped[i].append((v, None))\n    return grouped", "docstring": "Group values into sublists by their devices.\n\nThis grouping is needed to call the all-reduce library because it expects a\nlist of the following form:\n[[(grad0_gpu0, v0_gpu0), (grad1_gpu0, v1_gpu0), (grad2_gpu0, v2_gpu0) ...],\n[(grad0_gpu1, v0_gpu1), (grad1_gpu1, v1_gpu1), (grad2_gpu1, v2_gpu1) ...],\n[(grad0_gpu2, v0_gpu2), (grad1_gpu0, v1_gpu2), (grad2_gpu0, v2_gpu2) ...],\n...\n]\n\nArgs:\nper_replica_values: a list of PerReplica objects.\n\nReturns:\na list of lists, each sublist has components for its corresponding device of\nPerReplica objects, paired with a None.", "source": "github-repos"}
{"code": "def LoadData(self, data, custom_properties=None):\n    \n    self.__data = []\n    self.AppendData(data, custom_properties)", "docstring": "Loads new rows to the data table, clearing existing rows.\n\nMay also set the custom_properties for the added rows. The given custom\nproperties dictionary specifies the dictionary that will be used for *all*\ngiven rows.\n\nArgs:\ndata: The rows that the table will contain.\ncustom_properties: A dictionary of string to string to set as the custom\nproperties for all rows.", "source": "juraj-google-style"}
{"code": "def url_is(white_list):\n\n    def func(url):\n        prefixes = white_list.get('PREFIXES', ())\n        for prefix in prefixes:\n            if url.startswith(prefix):\n                return True\n        constants = white_list.get('CONSTANTS', ())\n        for exact_url in constants:\n            if (url == exact_url):\n                return True\n        return False\n    return func", "docstring": "Function generator.\n\nArgs:\nwhite_list (dict): dict with PREFIXES and CONSTANTS keys (list values).\n\nReturns:\nfunc: a function to check if a URL is...", "source": "codesearchnet"}
{"code": "def _cached_by_domain(api_name):\n\n    def wrapped(func):\n\n        def decorated(self, domains):\n            if (not self._cache):\n                return func(self, domains)\n            all_responses = self._cache.bulk_lookup(api_name, domains)\n            domains = list((set(domains) - set(all_responses)))\n            if domains:\n                response = func(self, domains)\n                if (not response):\n                    raise ResponseError('No response for uncached domains')\n                for domain in response:\n                    self._cache.cache_value(api_name, domain, response[domain])\n                    all_responses[domain] = response[domain]\n            return all_responses\n        return decorated\n    return wrapped", "docstring": "A caching wrapper for functions that take a list of domains as\nparameters.\n\nRaises:\nResponseError - if the response received from the endpoint is\nnot valid.", "source": "codesearchnet"}
{"code": "def process_new_issues(self, volumes, existing_issues):\n    new_issues = {}\n    for (issue_id, volume) in volumes.items():\n        state = EBSIssueState.DETECTED.value\n        if (issue_id in existing_issues):\n            issue = existing_issues[issue_id]\n            data = {'state': state, 'notes': issue.notes, 'last_notice': issue.last_notice}\n            if issue.update(data):\n                new_issues.setdefault(issue.volume.account, []).append(issue)\n                self.log.debug('Updated EBSVolumeAuditIssue {}'.format(issue_id))\n        else:\n            properties = {'volume_id': volume.id, 'account_id': volume.account_id, 'location': volume.location, 'state': state, 'last_change': datetime.now(), 'last_notice': None, 'notes': []}\n            issue = EBSVolumeAuditIssue.create(issue_id, properties=properties)\n            new_issues.setdefault(issue.volume.account, []).append(issue)\n    return new_issues", "docstring": "Takes a dict of existing volumes missing tags and a dict of existing issues, and finds any new or updated\nissues.\n\nArgs:\nvolumes (:obj:`dict` of `str`: `EBSVolume`): Dict of current volumes with issues\nexisting_issues (:obj:`dict` of `str`: `EBSVolumeAuditIssue`): Current list of issues\n\nReturns:\n:obj:`dict` of `str`: `EBSVolumeAuditIssue`", "source": "codesearchnet"}
{"code": "def convert_tokens_into_matrix(self, token_list):\n        \n        return np.array(self.vectorize(token_list)).astype(np.float32)", "docstring": "Create matrix of sentences.\n\nArgs:\ntoken_list:     The list of tokens.\n\nReturns:\n2-D `np.ndarray` of sentences.\nEach row means one hot vectors of one sentence.", "source": "juraj-google-style"}
{"code": "def flatten_(structure):\n  \n  if isinstance(structure, dict):\n    if structure:\n      structure = zip(*sorted(structure.items(), key=lambda x: x[0]))[1]\n    else:\n      \n      structure = ()\n  if isinstance(structure, (tuple, list)):\n    result = []\n    for element in structure:\n      result += flatten_(element)\n    return tuple(result)\n  return (structure,)", "docstring": "Combine all leaves of a nested structure into a tuple.\n\nThe nested structure can consist of any combination of tuples, lists, and\ndicts. Dictionary keys will be discarded but values will ordered by the\nsorting of the keys.\n\nArgs:\nstructure: Nested structure.\n\nReturns:\nFlat tuple.", "source": "juraj-google-style"}
{"code": "def validate_read(self, address):\n        \n\n        if not any(address.startswith(ns) for ns in self._read_list):\n            raise AuthorizationException(address=address)", "docstring": "Raises an exception if the address is not allowed to be read in\nthis context, based on txn inputs.\n\nArgs:\naddress (str): An address to be validated.\n\nReturns:\nNone\n\nRaises:\nAuthorizationException", "source": "juraj-google-style"}
{"code": "def process_answer(self, user, item, asked, answered, time, answer, response_time, guess, **kwargs):\n    pass", "docstring": "This method is used during the answer streaming and is called after the\npredictive model for each answer.\n\nArgs:\nuser (int):\nidentifier of ther user answering the question\nasked (int):\nidentifier of the asked item\nanswered (int):\nidentifier of the answered item or None if the user answered\n\"I don't know\"\nresponse_time (int)\ntime the answer took in milliseconds\ntime (datetime.datetime)\ntime when the user answered the question\nguess (float):\nprobability of correct response in case of random answer", "source": "codesearchnet"}
{"code": "def print_serial_number_info(self, serial_number, print_to_screen=True):\n        \n        r = self.select_serial_number_row(serial_number)\n        if r.empty:\n            warnings.warn(\"missing serial number\")\n            return\n\n        txt1 = 80 * \"=\"\n        txt1 += \"\\n\"\n        txt1 += f\"   serial number {serial_number}\\n\"\n        txt1 = 80 * \"-\"\n        txt1 += \"\\n\"\n        txt2 = \"\"\n        for label, value in zip(r.columns, r.values[0]):\n            if label in self.headers:\n                txt1 += f\"{label}:    \\t {value}\\n\"\n            else:\n                txt2 += f\"({label}:    \\t {value})\\n\"\n        if print_to_screen:\n            print(txt1)\n            print(80 * \"-\")\n            print(txt2)\n            print(80 * \"=\")\n            return\n        else:\n            return txt1", "docstring": "Print information about the run.\n\nArgs:\nserial_number: serial number.\nprint_to_screen: runs the print statement if True,\nreturns txt if not.\n\nReturns:\ntxt if print_to_screen is False, else None.", "source": "juraj-google-style"}
{"code": "def concat_pairs(tensor_tuple0: Tuple[torch.Tensor], tensor_tuple1: Tuple[torch.Tensor]) -> Tuple[torch.Tensor]:\n    return tuple([torch.cat([tensor0, tensor1]) for tensor0, tensor1 in zip(tensor_tuple0, tensor_tuple1)])", "docstring": "Concatenate two tuples of tensors pairwise\n\nArgs:\ntensor_tuple0 (`Tuple[torch.Tensor]`):\nTuple of tensors.\ntensor_tuple1 (`Tuple[torch.Tensor]`):\nTuple of tensors.\n\nReturns:\n(`Tuple[torch.Tensor]`): Tuple of concatenated tensors.", "source": "github-repos"}
{"code": "def __init__(self, inputs=None,\n                 outputs=None,\n                 assettype=AssetType.GoverningToken,\n                 assetname='',\n                 amount=Fixed8(0),\n                 precision=0,\n                 owner=None,\n                 admin=None):\n        \n        super(RegisterTransaction, self).__init__(inputs, outputs)\n        self.Type = TransactionType.RegisterTransaction  \n        self.AssetType = assettype\n        self.Name = assetname\n        self.Amount = amount  \n\n        if inputs is not None:\n            self.inputs = inputs\n        else:\n            self.inputs = []\n\n        if outputs is not None:\n            self.outputs = outputs\n        else:\n            self.outputs = []\n\n        if owner is not None and type(owner) is not EllipticCurve.ECPoint:\n            raise Exception(\"Invalid owner, must be ECPoint instance\")\n\n        self.Owner = owner\n        self.Admin = admin\n        self.Precision = precision", "docstring": "Create an instance.\n\nArgs:\ninputs (list):\noutputs (list):\nassettype (neo.Core.AssetType):\nassetname (str):\namount (Fixed8):\nprecision (int): number of decimals the asset has.\nowner (EllipticCurve.ECPoint):\nadmin (UInt160):", "source": "juraj-google-style"}
{"code": "def trigger(self, target: str, trigger: str, parameters: Dict[(str, Any)]={}):\n    pass", "docstring": "Calls the specified Trigger of another Area with the optionally given parameters.\n\nArgs:\ntarget: The name of the target Area.\ntrigger: The name of the Trigger.\nparameters: The parameters of the function call.", "source": "codesearchnet"}
{"code": "def RegisterSourceType(cls, source_type_class):\n    if (source_type_class.TYPE_INDICATOR in cls._source_type_classes):\n        raise KeyError('Source type already set for type: {0:s}.'.format(source_type_class.TYPE_INDICATOR))\n    cls._source_type_classes[source_type_class.TYPE_INDICATOR] = source_type_class", "docstring": "Registers a source type.\n\nSource types are identified based on their type indicator.\n\nArgs:\nsource_type_class (type): source type.\n\nRaises:\nKeyError: if source types is already set for the corresponding\ntype indicator.", "source": "codesearchnet"}
{"code": "def create_additional_charge(self, *, subscription_id, description, plan_value, plan_tax, plan_tax_return_base, currency):\n    payload = {'description': description, 'additionalValues': [{'name': 'ITEM_VALUE', 'value': plan_value, 'currency': currency}, {'name': 'ITEM_TAX', 'value': plan_tax, 'currency': currency}, {'name': 'ITEM_TAX_RETURN_BASE', 'value': plan_tax_return_base, 'currency': currency}]}\n    fmt = 'subscriptions/{}/recurringBillItems'.format(subscription_id)\n    return self.client._post((self.url + fmt), json=payload, headers=self.get_headers())", "docstring": "Adds extra charges to the respective invoice for the current period.\n\nArgs:\nsubscription_id: Identification of the subscription\ndescription:\nplan_value:\nplan_tax:\nplan_tax_return_base:\ncurrency:\n\nReturns:", "source": "codesearchnet"}
{"code": "def create_list(self, **kwargs):\n    path = self._get_path('create_list')\n    kwargs.update({'session_id': self.session_id})\n    payload = {'name': kwargs.pop('name', None), 'description': kwargs.pop('description', None)}\n    if ('language' in kwargs):\n        payload['language'] = kwargs['language']\n    response = self._POST(path, kwargs, payload)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Create a new list.\n\nA valid session id is required.\n\nArgs:\nname: Name of the list.\ndescription: Description of the list.\nlanguage: (optional) ISO 639-1 code.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def rotation(self):\n    rotation = self._libinput.libinput_event_tablet_tool_get_rotation(self._handle)\n    changed = self._libinput.libinput_event_tablet_tool_rotation_has_changed(self._handle)\n    return (rotation, changed)", "docstring": "The current Z rotation of the tool in degrees, clockwise\nfrom the tool's logical neutral position and whether it has changed\nin this event.\n\nFor tools of type :attr:`~libinput.constant.TabletToolType.MOUSE`\nand :attr:`~libinput.constant.TabletToolType.LENS` the logical\nneutral position is pointing to the current logical north\nof the tablet. For tools of type\n:attr:`~libinput.constant.TabletToolType.BRUSH`, the logical\nneutral position is with the buttons pointing up.\n\nIf this axis does not exist on the current tool, this property is\n(0, :obj:`False`).\n\nReturns:\n(float, bool): The current value of the the axis and whether it has\nchanged.", "source": "codesearchnet"}
{"code": "def memcached_client(servers=config.memcached_uri, debug=config.debug_memcache):\n    key = None\n    try:\n        (client, key) = scoped_instance_manager.acquire(servers, debug=debug)\n        (yield client)\n    finally:\n        if key:\n            scoped_instance_manager.release(key)", "docstring": "Get a shared memcached instance.\n\nThis function shares the same memcached instance across nested invocations.\nThis is done so that memcached connections can be kept to a minimum, but at\nthe same time unnecessary extra reconnections are avoided. Typically an\ninitial scope (using 'with' construct) is made around parts of code that hit\nthe cache server many times - such as a resolve, or executing a context. On\nexit of the topmost scope, the memcached client is disconnected.\n\nReturns:\n`Client`: Memcached instance.", "source": "codesearchnet"}
{"code": "def get_initial_state_args(value_and_gradients_function, initial_position, grad_tolerance, control_inputs=None):\n    if control_inputs:\n        with tf.control_dependencies(control_inputs):\n            (f0, df0) = value_and_gradients_function(initial_position)\n    else:\n        (f0, df0) = value_and_gradients_function(initial_position)\n    converged = (norm(df0, dims=1) < grad_tolerance)\n    return dict(converged=converged, failed=tf.zeros_like(converged), num_iterations=tf.convert_to_tensor(value=0), num_objective_evaluations=tf.convert_to_tensor(value=1), position=initial_position, objective_value=f0, objective_gradient=df0)", "docstring": "Returns a dictionary to populate the initial state of the search procedure.\n\nPerforms an initial convergence check and the first evaluation of the\nobjective function.\n\nArgs:\nvalue_and_gradients_function: A Python callable that accepts a tensor and\nreturns a tuple of two tensors: the objective function value and its\nderivative.\ninitial_position: The starting point of the search procedure.\ngrad_tolerance: The gradient tolerance for the procedure.\ncontrol_inputs: Optional ops used to assert the validity of inputs, these\nare added as control dependencies to execute before the objective\nfunction is evaluated for the first time.\n\nReturns:\nAn dictionary with values for the following keys:\nconverged: True if the convergence check finds that the initial position\nis already an argmin of the objective function.\nfailed: Initialized to False.\nnum_objective_evaluations: Initialized to 1.\nposition: Initialized to the initial position.\nobjective_value: Initialized to the value of the objective function at\nthe initial position.\nobjective_gradient: Initialized to the gradient of the objective\nfunction at the initial position.", "source": "codesearchnet"}
{"code": "def label_count(self, label_list_ids=None):\n    count = collections.defaultdict(int)\n    for label_list in self.label_lists.values():\n        if ((label_list_ids is None) or (label_list.idx in label_list_ids)):\n            for (label_value, label_count) in label_list.label_count().items():\n                count[label_value] += label_count\n    return count", "docstring": "Return a dictionary containing the number of times,\nevery label-value in this utterance is occurring.\n\nArgs:\nlabel_list_ids (list): If not None, only labels from label-lists\nwith an id contained in this list\nare considered.\n\nReturns:\ndict: A dictionary containing the number of occurrences\nwith the label-value as key.", "source": "codesearchnet"}
{"code": "def show(self, view: View, request: Request):\n    return view.render('welcome', {'app': request.app().make('Application')})", "docstring": "Show the welcome page.\n\nArguments:\nview {masonite.view.View} -- The Masonite view class.\nApplication {config.application} -- The application config module.\n\nReturns:\nmasonite.view.View -- The Masonite view class.", "source": "codesearchnet"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return self.prefix_tokens + token_ids_0 + self.suffix_tokens\n    return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. An PLBART sequence has the following format, where `X` represents the sequence:\n\n- `input_ids` (for encoder) `X [eos, src_lang_code]`\n- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`\n\nBOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a\nseparator.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def dynamic_import(modname, import_tuples, developing=True, ignore_froms=[], dump=False, ignore_startswith=[], ignore_endswith=[], ignore_list=[], check_not_imported=True, return_initstr=False, verbose=False):\n    if verbose:\n        print(('[UTIL_IMPORT] Running Dynamic Imports for modname=%r ' % modname))\n    try:\n        module = sys.modules[modname]\n    except:\n        module = __import__(modname)\n    imports = [tup[0] for tup in import_tuples]\n    __excecute_imports(module, modname, imports, verbose=verbose)\n    if developing:\n        from_imports = __execute_fromimport_star(module, modname, import_tuples, ignore_list=ignore_list, ignore_startswith=ignore_startswith, ignore_endswith=ignore_endswith, check_not_imported=check_not_imported, verbose=verbose)\n    else:\n        from_imports = __execute_fromimport(module, modname, import_tuples, verbose=verbose)\n    inject_execstr = _inject_execstr(modname, import_tuples)\n    dump_requested = (((('--dump-%s-init' % modname) in sys.argv) or (('--print-%s-init' % modname) in sys.argv)) or dump)\n    overwrite_requested = (('--update-%s-init' % modname) in sys.argv)\n    if verbose:\n        print(('[UTIL_IMPORT] Finished Dynamic Imports for modname=%r ' % modname))\n    if dump_requested:\n        is_main_proc = (multiprocessing.current_process().name == 'MainProcess')\n        if is_main_proc:\n            from utool import util_str\n            initstr = _initstr(modname, imports, from_imports, inject_execstr)\n            print(util_str.indent(initstr))\n    if overwrite_requested:\n        '\\n        SeeAlso:\\n            util_inject.inject_python_code\\n            util_str.replace_between_tags\\n        '\n        is_main_proc = (multiprocessing.current_process().name == 'MainProcess')\n        if is_main_proc:\n            from utool import util_str\n            from os.path import join, exists\n            initstr = _initstr(modname, imports, from_imports, inject_execstr, withheader=False)\n            new_else = util_str.indent(initstr)\n            init_fpath = join(module.__path__[0], '__init__.py')\n            print(('attempting to update: %r' % init_fpath))\n            assert exists(init_fpath)\n            new_lines = []\n            editing = False\n            updated = False\n            with open(init_fpath, 'r') as file_:\n                lines = file_.readlines()\n                for line in lines:\n                    if (not editing):\n                        new_lines.append(line)\n                    if line.strip().startswith('\n                        new_lines.append((('\\n' + new_else) + '\\n    \n                        editing = True\n                        updated = True\n                    if line.strip().startswith('\n                        editing = False\n            if updated:\n                print(('writing updated file: %r' % init_fpath))\n                new_text = ''.join(new_lines)\n                with open(init_fpath, 'w') as file_:\n                    file_.write(new_text)\n            else:\n                print(('no write hook for file: %r' % init_fpath))\n    if return_initstr:\n        initstr = _initstr(modname, imports, from_imports, '', withheader=False)\n        return (inject_execstr, initstr)\n    else:\n        return inject_execstr", "docstring": "MAIN ENTRY POINT\n\nDynamically import listed util libraries and their attributes.\nCreate reload_subs function.\n\nUsing __import__ like this is typically not considered good style However,\nit is better than import * and this will generate the good file text that\ncan be used when the module is 'frozen\"\n\nReturns:\nstr: init_inject_str - by default all imports are executed in this\nfunction and only the remainig code needed to be executed is\nreturned to define the reload logic.\n\nstr, str: init_inject_str, init_str - if return_initstr is True then\nalso returns init_str defining the from imports.\n\nIgnore:\nignore_startswith = []\nignore_endswith = []\ncheck_not_imported = True\nverbose = True", "source": "codesearchnet"}
{"code": "def GetLaunchedFlows(self, flow_type='outstanding'):\n    result = None\n    all_clients = set(self.ListAllClients())\n    finished_clients = set(self.ListFinishedClients())\n    outstanding_clients = (all_clients - finished_clients)\n    if (flow_type == 'all'):\n        result = all_clients\n    elif (flow_type == 'finished'):\n        result = finished_clients\n    elif (flow_type == 'outstanding'):\n        result = outstanding_clients\n    flows = aff4.FACTORY.MultiListChildren([self.urn.Add(x.Basename()) for x in result])\n    return [x[0] for (_, x) in flows]", "docstring": "Returns the session IDs of all the flows we launched.\n\nArgs:\nflow_type: The type of flows to fetch. Can be \"all\", \"outstanding\" or\n\"finished\".\n\nReturns:\nA list of flow URNs.", "source": "codesearchnet"}
{"code": "def check_models_are_auto_configured(module: types.ModuleType, all_auto_models: List[str]) -> List[str]:\n    defined_models = get_models(module)\n    failures = []\n    for model_name, _ in defined_models:\n        if model_name not in all_auto_models and (not ignore_unautoclassed(model_name)):\n            failures.append(f'{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file `utils/check_repo.py`.')\n    return failures", "docstring": "Check models defined in module are each in an auto class.\n\nArgs:\nmodule (`types.ModuleType`):\nThe module in which we get the models.\nall_auto_models (`List[str]`):\nThe list of all models in an auto class (as obtained with `get_all_auto_configured_models()`).\n\nReturns:\n`List[str]`: The list of error messages corresponding to models not tested.", "source": "github-repos"}
{"code": "def overlap(self, feature, stranded: bool=False):\n    feature_strand = feature.strand\n    strand = self.strand\n    if (stranded and ((strand == '.') or ((strand == '+') and (feature_strand in ['-', '.'])) or ((strand == '-') and (feature_strand in ['+', '.'])))):\n        return False\n    iv_1 = set(range(feature.start, (feature.end + 1)))\n    iv_2 = set(range(self.start, (self.end + 1)))\n    if (len(iv_1.intersection(iv_2)) > 0):\n        return True\n    else:\n        return False", "docstring": "Determine if a feature's position overlaps with the entry\n\nArgs:\nfeature (class): GFF3Entry object\n\nstranded (bool): allow features to overlap on different strands\nif True [default: False]\n\nReturns:\nbool: True if features overlap, else False", "source": "codesearchnet"}
{"code": "def _add_mgmt_to_domains(self, conf, mgmts):\n        \n\n        for dom_name, dom_spec in conf['domains'].iteritems():\n            domain_mgmt = [\n                nic['net'] for nic in dom_spec['nics'] if nic['net'] in mgmts\n            ].pop()\n\n            dom_spec['mgmt_net'] = domain_mgmt", "docstring": "Add management network key('mgmt_net') to each domain. Note this\nassumes ``conf`` was validated.\n\nArgs:\nconf(dict): spec\nmgmts(list): list of management networks names", "source": "juraj-google-style"}
{"code": "def prepare(self, variables):\n        \n        initializedsteps = []\n        if variables is None:\n            variables = dict()\n        for step, params, _resources, _files in self.steps:\n            new_params = _complete_parameters(params, variables)\n            initializedsteps.append(step(new_params))\n        return initializedsteps", "docstring": "Initialize all steps in this recipe using their parameters.\n\nArgs:\nvariables (dict): A dictionary of global variable definitions\nthat may be used to replace or augment the parameters given\nto each step.\n\nReturns:\nlist of RecipeActionObject like instances: The list of instantiated\nsteps that can be used to execute this recipe.", "source": "juraj-google-style"}
{"code": "def _get_symmetry(self):\n    d = spglib.get_symmetry(self._cell, symprec=self._symprec, angle_tolerance=self._angle_tol)\n    trans = []\n    for t in d['translations']:\n        trans.append([float(Fraction.from_float(c).limit_denominator(1000)) for c in t])\n    trans = np.array(trans)\n    trans[(np.abs(trans) == 1)] = 0\n    return (d['rotations'], trans)", "docstring": "Get the symmetry operations associated with the structure.\n\nReturns:\nSymmetry operations as a tuple of two equal length sequences.\n(rotations, translations). \"rotations\" is the numpy integer array\nof the rotation matrices for scaled positions\n\"translations\" gives the numpy float64 array of the translation\nvectors in scaled positions.", "source": "codesearchnet"}
{"code": "def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    if attention_mask is None:\n        attention_mask = jnp.ones_like(input_ids)\n    if position_ids is None:\n        batch_size, sequence_length = input_ids.shape\n        position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n\n    def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):\n        encode_module = module._get_encoder_module()\n        return encode_module(input_ids, attention_mask, position_ids, **kwargs)\n    return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward)", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, FlaxPegasusForConditionalGeneration\n\n>>> model = FlaxPegasusForConditionalGeneration.from_pretrained(\"google/pegasus-large\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google/pegasus-large\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, max_length=1024, return_tensors=\"np\")\n>>> encoder_outputs = model.encode(**inputs)\n```", "source": "github-repos"}
{"code": "def for_input_type(self, input_type):\n    return self", "docstring": "Returns a specialized implementation of self, if it exists.\n\nOtherwise, returns self.\n\nArgs:\ninput_type: the type of input elements.", "source": "github-repos"}
{"code": "def _add_def_paths(self, prop_dict):\n    \n    for prop_key, prop_value in prop_dict.iteritems():\n      if prop_key == '$ref' and not 'prop_value'.startswith('\n        prop_dict[prop_key] = '\n      elif isinstance(prop_value, dict):\n        self._add_def_paths(prop_value)", "docstring": "Recursive method to add relative paths for any $ref objects.\n\nArgs:\nprop_dict: The property dict to alter.\n\nSide Effects:\nAlters prop_dict in-place.", "source": "juraj-google-style"}
{"code": "def get_collection(self, id_or_uri, filter=''):\n        \n        if filter:\n            filter = self.__make_query_filter(filter)\n            filter = \"?\" + filter[1:]\n\n        uri = \"{uri}{filter}\".format(uri=self.build_uri(id_or_uri), filter=filter)\n        logger.debug('Get resource collection (uri = %s)' % uri)\n        response = self._connection.get(uri)\n        return self.__get_members(response)", "docstring": "Retrieves a collection of resources.\n\nUse this function when the 'start' and 'count' parameters are not allowed in the GET call.\nOtherwise, use get_all instead.\n\nOptional filtering criteria may be specified.\n\nArgs:\nid_or_uri: Can be either the resource ID or the resource URI.\nfilter (list or str): General filter/query string.\n\nReturns:\nCollection of the requested resource.", "source": "juraj-google-style"}
{"code": "def _validate(self, value):\n    _LOGGER.info('validate: Got type %s', type(value))\n    if ((value is not None) and (not isinstance(value, client.Flow))):\n        raise TypeError('Property {0} must be convertible to a flow instance; received: {1}.'.format(self._name, value))", "docstring": "Validates a value as a proper Flow object.\n\nArgs:\nvalue: A value to be set on the property.\n\nRaises:\nTypeError if the value is not an instance of Flow.", "source": "codesearchnet"}
{"code": "def Delete(self, request, global_params=None):\n    config = self.GetMethodConfig('Delete')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Delete an association between a GCP project and a GitHub Enterprise server.\n\nArgs:\nrequest: (CloudbuildProjectsGithubEnterpriseConfigsDeleteRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    try:\n        self._ParseFileHeader(file_object)\n    except errors.ParseError as exception:\n        raise errors.ParseError('Unable to parse index file header with error: {0!s}'.format(exception))\n    file_object.seek(112, os.SEEK_CUR)\n    self._ParseIndexTable(file_object)", "docstring": "Parses a file-like object.\n\nArgs:\nparser_mediator (ParserMediator): a parser mediator.\nfile_object (dfvfs.FileIO): a file-like object to parse.\n\nRaises:\nParseError: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def create_project(self, collab_id):\n    return self._authenticated_request.to_endpoint('project/').with_json_body(self._prep_params(locals())).return_body().post()", "docstring": "Create a new project.\n\nArgs:\ncollab_id (int): The id of the collab the project should be created in.\n\nReturns:\nA dictionary of details of the created project::\n\n{\nu'collab_id': 12998,\nu'created_by': u'303447',\nu'created_on': u'2017-03-21T14:06:32.293902Z',\nu'description': u'',\nu'entity_type': u'project',\nu'modified_by': u'303447',\nu'modified_on': u'2017-03-21T14:06:32.293967Z',\nu'name': u'12998',\nu'uuid': u'2516442e-1e26-4de1-8ed8-94523224cc40'\n}\n\nRaises:\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "codesearchnet"}
{"code": "def _map_query_path_to_location_info(query_metadata_table):\n    query_path_to_location_info = {}\n    for (location, location_info) in query_metadata_table.registered_locations:\n        if (not isinstance(location, Location)):\n            continue\n        if (location.query_path in query_path_to_location_info):\n            equivalent_location_info = query_path_to_location_info[location.query_path]\n            if (not _location_infos_equal(location_info, equivalent_location_info)):\n                raise AssertionError(u'Differing LocationInfos at query_path {} between {} and {}. Expected parent_location.query_path, optional_scopes_depth, recursive_scopes_depth and types to be equal for LocationInfos sharing the same query path.'.format(location.query_path, location_info, equivalent_location_info))\n        query_path_to_location_info[location.query_path] = location_info\n    return query_path_to_location_info", "docstring": "Create a map from each query path to a LocationInfo at that path.\n\nArgs:\nquery_metadata_table: QueryMetadataTable, object containing all metadata collected during\nquery processing, including location metadata (e.g. which locations\nare folded or optional).\n\nReturns:\nDict[Tuple[str], LocationInfo], dictionary mapping query path to LocationInfo at that path.", "source": "codesearchnet"}
{"code": "def __init__(self, filename, sample_filename, probability_threshold=0.9):\n        \n        \n        self.samples = pd.read_csv(sample_filename, sep=\" \", skiprows=2,\n                                   names=[\"fid\", \"iid\", \"missing\", \"father\",\n                                          \"mother\", \"sex\", \"plink_geno\"],\n                                   dtype=dict(fid=str, iid=str))\n\n        \n        try:\n            self.samples = self.samples.set_index(\"iid\", verify_integrity=True)\n\n        except ValueError:\n            logging.info(\n                \"Setting the index as 'fid_iid' because the individual IDs \"\n                \"are not unique.\"\n            )\n\n            self.samples[\"fid_iid\"] = [\n                \"{fid}_{iid}\".format(fid=fid, iid=iid)\n                for fid, iid in zip(self.samples.fid, self.samples.iid)\n            ]\n            self.samples = self.samples.set_index(\n                \"fid_iid\", verify_integrity=True,\n            )\n\n        \n        self._impute2_file = get_open_func(filename)(filename, \"r\")\n\n        \n        self.has_index = path.isfile(filename + \".idx\")\n        self._impute2_index = None\n        self._index_has_location = False\n        if self.has_index:\n            self._impute2_index = get_index(\n                filename,\n                cols=[0, 1, 2],\n                names=[\"chrom\", \"name\", \"pos\"],\n                sep=\" \",\n            )\n\n            \n            try:\n                self._impute2_index = self._impute2_index.set_index(\n                    \"name\", verify_integrity=True,\n                )\n                self._has_duplicated = False\n\n            except ValueError as e:\n                self._has_duplicated = True\n\n                \n                duplicated = self._impute2_index.name.duplicated(keep=False)\n                duplicated_markers = self._impute2_index.loc[\n                    duplicated, \"name\"\n                ]\n                duplicated_marker_counts = duplicated_markers.value_counts()\n\n                \n                \n                self._dup_markers = {\n                    m: [] for m in duplicated_marker_counts.index\n                }\n\n                \n                logging.found_duplicates(duplicated_marker_counts.iteritems())\n\n                \n                counter = Counter()\n                for i, marker in duplicated_markers.iteritems():\n                    counter[marker] += 1\n                    new_name = \"{}:dup{}\".format(marker, counter[marker])\n                    self._impute2_index.loc[i, \"name\"] = new_name\n\n                    \n                    self._dup_markers[marker].append(new_name)\n\n                \n                self._impute2_index = self._impute2_index.set_index(\n                    \"name\", verify_integrity=True,\n                )\n\n            \n            self._index_has_location = (\n                \"chrom\" in self._impute2_index.columns and\n                \"pos\" in self._impute2_index.columns\n            )\n            if self._index_has_location:\n                \n                self._impute2_index[\"multiallelic\"] = False\n                self._impute2_index.loc[\n                    self._impute2_index.duplicated([\"chrom\", \"pos\"],\n                                                   keep=False),\n                    \"multiallelic\"\n                ] = True\n\n        \n        self.prob_t = probability_threshold", "docstring": "IMPUTE2 file reader.\n\nArgs:\nfilename (str): The name of the IMPUTE2 file.\nsample_filename (str): The name of the SAMPLE file.\nprobability_threshold (float): The probability threshold.\n\nNote\n====\nIf the sample IDs are not unique, the index is changed to be the\nsample family ID and individual ID (i.e. fid_iid).", "source": "juraj-google-style"}
{"code": "def __parse_tostr(self, text, **kwargs):\n    n = self.options.get('nbest', 1)\n    if (self._KW_BOUNDARY in kwargs):\n        patt = kwargs.get(self._KW_BOUNDARY, '.')\n        tokens = list(self.__split_pattern(text, patt))\n        text = ''.join([t[0] for t in tokens])\n        btext = self.__str2bytes(text)\n        self.__mecab.mecab_lattice_set_sentence(self.lattice, btext)\n        bpos = 0\n        self.__mecab.mecab_lattice_set_boundary_constraint(self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY)\n        for (token, match) in tokens:\n            bpos += 1\n            if match:\n                mark = self.MECAB_INSIDE_TOKEN\n            else:\n                mark = self.MECAB_ANY_BOUNDARY\n            for _ in range(1, len(self.__str2bytes(token))):\n                self.__mecab.mecab_lattice_set_boundary_constraint(self.lattice, bpos, mark)\n                bpos += 1\n            self.__mecab.mecab_lattice_set_boundary_constraint(self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY)\n    elif (self._KW_FEATURE in kwargs):\n        features = kwargs.get(self._KW_FEATURE, ())\n        fd = {morph: self.__str2bytes(feat) for (morph, feat) in features}\n        tokens = self.__split_features(text, [e[0] for e in features])\n        text = ''.join([t[0] for t in tokens])\n        btext = self.__str2bytes(text)\n        self.__mecab.mecab_lattice_set_sentence(self.lattice, btext)\n        bpos = 0\n        for (chunk, match) in tokens:\n            c = len(self.__str2bytes(chunk))\n            if (match == True):\n                self.__mecab.mecab_lattice_set_feature_constraint(self.lattice, bpos, (bpos + c), fd[chunk])\n            bpos += c\n    else:\n        btext = self.__str2bytes(text)\n        self.__mecab.mecab_lattice_set_sentence(self.lattice, btext)\n    self.__mecab.mecab_parse_lattice(self.tagger, self.lattice)\n    if (n > 1):\n        res = self.__mecab.mecab_lattice_nbest_tostr(self.lattice, n)\n    else:\n        res = self.__mecab.mecab_lattice_tostr(self.lattice)\n    if (res != self.__ffi.NULL):\n        raw = self.__ffi.string(res)\n        return self.__bytes2str(raw).strip()\n    else:\n        err = self.__mecab.mecab_lattice_strerror(self.lattice)\n        logger.error(self.__bytes2str(self.__ffi.string(err)))\n        raise MeCabError(self.__bytes2str(self.__ffi.string(err)))", "docstring": "Builds and returns the MeCab function for parsing Unicode text.\n\nArgs:\nfn_name: MeCab function name that determines the function\nbehavior, either 'mecab_sparse_tostr' or\n'mecab_nbest_sparse_tostr'.\n\nReturns:\nA function definition, tailored to parsing Unicode text and\nreturning the result as a string suitable for display on stdout,\nusing either the default or N-best behavior.", "source": "codesearchnet"}
{"code": "def set_agent(self, short_name, client_id):\n    if (short_name not in self.services):\n        raise ArgumentError('Unknown service name', short_name=short_name)\n    self.agents[short_name] = client_id", "docstring": "Register a client id that handlers commands for a service.\n\nArgs:\nshort_name (str): The name of the service to set an agent\nfor.\nclient_id (str): A globally unique id for the client that\nshould receive commands for this service.", "source": "codesearchnet"}
{"code": "def new_panel(store, institute_id, panel_name, display_name, csv_lines):\n    institute_obj = store.institute(institute_id)\n    if (institute_obj is None):\n        flash('{}: institute not found'.format(institute_id))\n        return None\n    panel_obj = store.gene_panel(panel_name)\n    if panel_obj:\n        flash('panel already exists: {} - {}'.format(panel_obj['panel_name'], panel_obj['display_name']))\n        return None\n    log.debug('parse genes from CSV input')\n    try:\n        new_genes = parse_genes(csv_lines)\n    except SyntaxError as error:\n        flash(error.args[0], 'danger')\n        return None\n    log.debug('build new gene panel')\n    panel_id = None\n    try:\n        panel_data = build_panel(dict(panel_name=panel_name, institute=institute_obj['_id'], version=1.0, date=dt.datetime.now(), display_name=display_name, genes=new_genes), store)\n        panel_id = store.add_gene_panel(panel_data)\n    except Exception as err:\n        log.error('An error occurred while adding the gene panel {}'.format(err))\n    return panel_id", "docstring": "Create a new gene panel.\n\nArgs:\nstore(scout.adapter.MongoAdapter)\ninstitute_id(str)\npanel_name(str)\ndisplay_name(str)\ncsv_lines(iterable(str)): Stream with genes\n\nReturns:\npanel_id: the ID of the new panel document created or None", "source": "codesearchnet"}
{"code": "def __init__(self, args=None, varargs=None, varkw=None, defaults=None, kwonlyargs=None, kwonlydefaults=None, annotations=None):\n    self.args = args or []\n    self.varargs = varargs\n    self.varkw = varkw\n    self.defaults = defaults or ()\n    self.kwonlyargs = kwonlyargs or []\n    self.kwonlydefaults = kwonlydefaults or {}\n    self.annotations = annotations or {}", "docstring": "Constructs a FullArgSpec with each provided attribute, or the default.\n\nArgs:\nargs: A list of the argument names accepted by the function.\nvarargs: The name of the *varargs argument or None if there isn't one.\nvarkw: The name of the **kwargs argument or None if there isn't one.\ndefaults: A tuple of the defaults for the arguments that accept defaults.\nkwonlyargs: A list of argument names that must be passed with a keyword.\nkwonlydefaults: A dictionary of keyword only arguments and their defaults.\nannotations: A dictionary of arguments and their annotated types.", "source": "github-repos"}
{"code": "def clock(self, interval, basis):\n        \n\n        cache_name = self._classify_clock(interval, basis)\n        cache_data = self.clock_cache.get(cache_name)\n\n        if cache_data is None:\n            parent_stream, trigger = self.parent.clock(interval, basis)\n\n            if trigger.use_count is False:\n                raise SensorGraphSemanticError(\"Unsupported clock trigger in GatedClockScope\", trigger=trigger)\n            elif interval % trigger.reference != 0:\n                raise SensorGraphSemanticError(\"Unsupported trigger ratio in GatedClockScope\", trigger=trigger, interval=interval)\n\n            ratio = interval \n\n            stream = self.allocator.allocate_stream(DataStream.CounterType)\n            latch_stream = self.allocator.attach_stream(self.latch_stream)\n\n            self.sensor_graph.add_node(u'({} always && {} {}) => {} using copy_latest_a'.format(parent_stream, latch_stream, self.latch_trigger, stream))\n            self.clock_cache[cache_name] = (stream, ratio)\n        else:\n            stream, ratio = cache_data\n\n        if interval % ratio != 0:\n            raise SensorGraphSemanticError(\"Unsupported trigger ratio in GatedClockScope\", ratio=ratio, interval=interval)\n\n        count = interval \n\n        clock_stream = self.allocator.attach_stream(stream)\n        return clock_stream, InputTrigger(u'count', '>=', count)", "docstring": "Return a NodeInput tuple for triggering an event every interval.\n\nWe request each distinct type of clock at most once and combine it with our\nlatch stream each time it is requested.\n\nArgs:\ninterval (int): The interval (in seconds) at which this input should\ntrigger.", "source": "juraj-google-style"}
{"code": "def floor(cls, x: 'TensorFluent') -> 'TensorFluent':\n    return cls._unary_op(x, tf.floor, tf.float32)", "docstring": "Returns a TensorFluent for the floor function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the floor function.", "source": "codesearchnet"}
{"code": "def _get_grand_potential(self, composition):\n        \n        if self.use_hull_energy:\n            grand_potential = self.pd_non_grand.get_hull_energy(composition)\n        else:\n            grand_potential = InterfacialReactivity._get_entry_energy(\n                self.pd_non_grand, composition)\n        grand_potential -= sum([composition[e] * mu\n                                for e, mu in self.pd.chempots.items()])\n        if self.norm:\n            \n            \n            grand_potential /= sum([composition[el]\n                                    for el in composition\n                                    if el not in self.pd.chempots])\n        return grand_potential", "docstring": "Computes the grand potential Phi at a given composition and\nchemical potential(s).\n\nArgs:\ncomposition (Composition): Composition object.\n\nReturns:\nGrand potential at a given composition at chemical potential(s).", "source": "juraj-google-style"}
{"code": "def generate(arglist, git_tag_override=None):\n    spec, head_symlink, _, dest_file = arglist\n    data = json.load(open(spec))\n    git_version = None\n    if not data['git']:\n        git_version = b'unknown'\n    else:\n        old_branch = data['branch']\n        new_branch = parse_branch_ref(head_symlink)\n        if new_branch != old_branch:\n            raise RuntimeError(\"Run ./configure again, branch was '%s' but is now '%s'\" % (old_branch, new_branch))\n        git_version = get_git_version(data['path'], git_tag_override)\n    write_version_info(dest_file, git_version)", "docstring": "Generate version_info.cc as given `destination_file`.\n\nArgs:\narglist: should be a sequence that contains\nspec, head_symlink, ref_symlink, destination_file.\n\n`destination_file` is the filename where version_info.cc will be written\n\n`spec` is a filename where the file contains a JSON dictionary\n'git' bool that is true if the source is in a git repo\n'path' base path of the source code\n'branch' the name of the ref specification of the current branch/tag\n\n`head_symlink` is a filename to HEAD that is cross-referenced against\nwhat is contained in the json branch designation.\n\n`ref_symlink` is unused in this script but passed, because the build\nsystem uses that file to detect when commits happen.\n\ngit_tag_override: Override the value for the git tag. This is useful for\nreleases where we want to build the release before the git tag is\ncreated.\n\nRaises:\nRuntimeError: If ./configure needs to be run, RuntimeError will be raised.", "source": "github-repos"}
{"code": "def is_location(v) -> (bool, str):\n\n    def convert2float(value):\n        try:\n            float_num = float(value)\n            return float_num\n        except ValueError:\n            return False\n    if (not isinstance(v, str)):\n        return (False, v)\n    split_lst = v.split(':')\n    if (len(split_lst) != 5):\n        return (False, v)\n    if convert2float(split_lst[3]):\n        longitude = abs(convert2float(split_lst[3]))\n        if (longitude > 90):\n            return (False, v)\n    if convert2float(split_lst[4]):\n        latitude = abs(convert2float(split_lst[3]))\n        if (latitude > 180):\n            return (False, v)\n    return (True, v)", "docstring": "Boolean function for checking if v is a location format\n\nArgs:\nv:\nReturns: bool", "source": "codesearchnet"}
{"code": "def save_link(self, path_info):\n        \n        assert path_info[\"scheme\"] == \"local\"\n        path = path_info[\"path\"]\n\n        if not os.path.exists(path):\n            return\n\n        mtime, _ = get_mtime_and_size(path)\n        inode = get_inode(path)\n        relpath = os.path.relpath(path, self.root_dir)\n\n        cmd = (\n            \"REPLACE INTO {}(path, inode, mtime) \"\n            'VALUES (\"{}\", {}, \"{}\")'.format(\n                self.LINK_STATE_TABLE, relpath, self._to_sqlite(inode), mtime\n            )\n        )\n        self._execute(cmd)", "docstring": "Adds the specified path to the list of links created by dvc. This\nlist is later used on `dvc checkout` to cleanup old links.\n\nArgs:\npath_info (dict): path info to add to the list of links.", "source": "juraj-google-style"}
{"code": "def __init__(self, label, names=(), path=None):\n        \n        self.label = label\n        self.names = names\n        self.path = path\n        for name in names:\n            setattr(self, name, self.__class__(name, path=\"{0}.{1}\".format(path, label) if path else label))", "docstring": "Create a new enumeration.  The parent enum creates an instance for each item.\n\nArgs:\nlabel (str): enum name\nnames (list): item labels\npath (list): qualified parent name, for :func:`repr` output", "source": "juraj-google-style"}
{"code": "def output(self, filename):\n        \n        if filename == '':\n            filename = 'contracts.dot'\n        if not filename.endswith('.dot'):\n            filename += \".dot\"\n        info = 'Inheritance Graph: ' + filename\n        self.info(info)\n        with open(filename, 'w', encoding='utf8') as f:\n            f.write('digraph \"\" {\\n')\n            for c in self.contracts:\n                f.write(self._summary(c))\n            f.write('}')", "docstring": "Output the graph in filename\nArgs:\nfilename(string)", "source": "juraj-google-style"}
{"code": "def show(config, section, opt):\n        \n        if section not in config.keys():\n            raise ConfigError(\"section '{}' doesn't exist\".format(section))\n\n        if opt not in config[section].keys():\n            raise ConfigError(\n                \"option '{}.{}' doesn't exist\".format(section, opt)\n            )\n\n        logger.info(config[section][opt])", "docstring": "Prints option value from the config.\n\nArgs:\nconfig (configobj.ConfigObj): config to work on.\nsection (str): section name.\nopt (str): option name.", "source": "juraj-google-style"}
{"code": "def _generate_parser(name, path, required=False, notfoundmsg=None):\n    output = ('def %s(dom):\\n' % _get_parser_name(name))\n    dom = True\n    parser_table = {'find': (lambda path: _find_template(path.params, path.index, required, notfoundmsg)), 'wfind': (lambda path: _wfind_template(dom, path.params, path.index, required, notfoundmsg)), 'match': (lambda path: _match_template(path.params, path.index, required, notfoundmsg)), 'left_neighbour_tag': (lambda path: _neigh_template(path.params, path.index, True, required, notfoundmsg)), 'right_neighbour_tag': (lambda path: _neigh_template(path.params, path.index, False, required, notfoundmsg))}\n    if isinstance(path, path_patterns.PathCall):\n        output += parser_table[path.call_type](path)\n    elif isinstance(path, path_patterns.Chained):\n        for path in path.chain:\n            output += parser_table[path.call_type](path)\n            dom = False\n    else:\n        raise UserWarning(('Unknown type of path parameters! (%s)' % str(path)))\n    output += (IND + 'return el\\n')\n    output += '\\n\\n'\n    return output", "docstring": "Generate parser named `name` for given `path`.\n\nArgs:\nname (str): Basename for the parsing function (see\n:func:`_get_parser_name` for details).\npath (obj): :class:`.PathCall` or :class:`.Chained` instance.\nrequired (bool, default False): Use :func:`_required_idiom` to returned\ndata.\nnotfoundmsg (str, default None): Message which will be used for\n:func:`_required_idiom` if the item is not found.\n\nReturns:\nstr: Python code for parsing `path`.", "source": "codesearchnet"}
{"code": "def plot_iso(axis, step, var):\n    (xmesh, ymesh, fld) = get_meshes_fld(step, var)\n    if conf.field.shift:\n        fld = np.roll(fld, conf.field.shift, axis=0)\n    axis.contour(xmesh, ymesh, fld, linewidths=1)", "docstring": "Plot isocontours of scalar field.\n\nArgs:\naxis (:class:`matplotlib.axes.Axes`): the axis handler of an\nexisting matplotlib figure where the isocontours should\nbe plotted.\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nvar (str): the scalar field name.", "source": "codesearchnet"}
{"code": "def predict(self, text):\n        \n        pred = self.predict_proba(text)\n        tags = self._get_tags(pred)\n\n        return tags", "docstring": "Predict using the model.\n\nArgs:\ntext: string, the input text.\n\nReturns:\ntags: list, shape = (num_words,)\nReturns predicted values.", "source": "juraj-google-style"}
{"code": "def quat2mat(quaternion):\n    \n    q = np.array(quaternion, dtype=np.float32, copy=True)[[3, 0, 1, 2]]\n    n = np.dot(q, q)\n    if n < EPS:\n        return np.identity(3)\n    q *= math.sqrt(2.0 / n)\n    q = np.outer(q, q)\n    return np.array(\n        [\n            [1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0]],\n            [q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0]],\n            [q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2]],\n        ]\n    )", "docstring": "Converts given quaternion (x, y, z, w) to matrix.\n\nArgs:\nquaternion: vec4 float angles\n\nReturns:\n3x3 rotation matrix", "source": "juraj-google-style"}
{"code": "def AddEnumDescriptor(self, enum_desc):\n    \n\n    if not isinstance(enum_desc, descriptor.EnumDescriptor):\n      raise TypeError('Expected instance of descriptor.EnumDescriptor.')\n\n    self._enum_descriptors[enum_desc.full_name] = enum_desc\n    self.AddFileDescriptor(enum_desc.file)", "docstring": "Adds an EnumDescriptor to the pool.\n\nThis method also registers the FileDescriptor associated with the message.\n\nArgs:\nenum_desc: An EnumDescriptor.", "source": "juraj-google-style"}
{"code": "def multi_flags_validator(flag_names, message='Flag validation failed', flag_values=FLAGS):\n\n    def decorate(function):\n        register_multi_flags_validator(flag_names, function, message=message, flag_values=flag_values)\n        return function\n    return decorate", "docstring": "A function decorator for defining a multi-flag validator.\n\nRegisters the decorated function as a validator for flag_names, e.g.\n\n@gflags.multi_flags_validator(['foo', 'bar'])\ndef _CheckFooBar(flags_dict):\n...\n\nSee register_multi_flags_validator() for the specification of checker\nfunction.\n\nArgs:\nflag_names: [str], a list of the flag names to be checked.\nmessage: error text to be shown to the user if checker returns False.\nIf checker raises ValidationError, message from the raised\nerror will be shown.\nflag_values: An optional FlagValues instance to validate against.\n\nReturns:\nA function decorator that registers its function argument as a validator.\n\nRaises:\nAttributeError: If a flag is not registered as a valid flag name.", "source": "codesearchnet"}
{"code": "def BalanceFor(self, assetId):\n    for (key, fixed8) in self.Balances.items():\n        if (key == assetId):\n            return fixed8\n    return Fixed8(0)", "docstring": "Get the balance for a given asset id.\n\nArgs:\nassetId (UInt256):\n\nReturns:\nFixed8: balance value.", "source": "codesearchnet"}
{"code": "def _init_from_bool(self, z, x):\n    if (z is None):\n        raise QiskitError('z vector must not be None.')\n    if (x is None):\n        raise QiskitError('x vector must not be None.')\n    if (len(z) != len(x)):\n        raise QiskitError('length of z and x vectors must be the same. (z: {} vs x: {})'.format(len(z), len(x)))\n    z = _make_np_bool(z)\n    x = _make_np_bool(x)\n    self._z = z\n    self._x = x\n    return self", "docstring": "Construct pauli from boolean array.\n\nArgs:\nz (numpy.ndarray): boolean, z vector\nx (numpy.ndarray): boolean, x vector\n\nReturns:\nPauli: self\n\nRaises:\nQiskitError: if z or x are None or the length of z and x are different.", "source": "codesearchnet"}
{"code": "def remove_child(self, child):\n        \n        if child in self.children.values() and hasattr(child, 'identifier'):\n            for k in self.children.keys():\n                if hasattr(self.children[k], 'identifier'):\n                    if self.children[k].identifier == child.identifier:\n                        if k in self._render_children_list:\n                            self._render_children_list.remove(k)\n                        self.children.pop(k)\n                        \n                        \n                        break", "docstring": "Removes a child instance from the Tag's children.\n\nArgs:\nchild (Tag): The child to be removed.", "source": "juraj-google-style"}
{"code": "def pop_event(self, event_name, timeout=DEFAULT_TIMEOUT):\n    if (not self.started):\n        raise IllegalStateError('Dispatcher needs to be started before popping.')\n    e_queue = self.get_event_q(event_name)\n    if (not e_queue):\n        raise TypeError('Failed to get an event queue for {}'.format(event_name))\n    try:\n        if timeout:\n            return e_queue.get(True, timeout)\n        elif (timeout == 0):\n            return e_queue.get(False)\n        else:\n            return e_queue.get(True)\n    except queue.Empty:\n        raise queue.Empty('Timeout after {}s waiting for event: {}'.format(timeout, event_name))", "docstring": "Pop an event from its queue.\n\nReturn and remove the oldest entry of an event.\nBlock until an event of specified name is available or\ntimes out if timeout is set.\n\nArgs:\nevent_name: Name of the event to be popped.\ntimeout: Number of seconds to wait when event is not present.\nNever times out if None.\n\nReturns:\nThe oldest entry of the specified event. None if timed out.\n\nRaises:\nIllegalStateError: Raised if pop is called before the dispatcher\nstarts polling.", "source": "codesearchnet"}
{"code": "def _CheckMacOSPaths(self, filename, artifact_definition, source, paths):\n    result = True\n    paths_with_private = []\n    paths_with_symbolic_link_to_private = []\n    for path in paths:\n        path_lower = path.lower()\n        path_segments = path_lower.split(source.separator)\n        if (not path_segments):\n            logging.warning('Empty path defined by artifact definition: {0:s} in file: {1:s}'.format(artifact_definition.name, filename))\n            result = False\n        elif (len(path_segments) == 1):\n            continue\n        elif (path_segments[1] in self._MACOS_PRIVATE_SUB_PATHS):\n            paths_with_symbolic_link_to_private.append(path)\n        elif ((path_segments[1] == 'private') and (len(path_segments) >= 2)):\n            if (path_segments[2] in self._MACOS_PRIVATE_SUB_PATHS):\n                paths_with_private.append(path)\n            else:\n                logging.warning('Unsupported private path: {0:s} defined by artifact definition: {1:s} in file: {2:s}'.format(path, artifact_definition.name, filename))\n                result = False\n    for private_path in paths_with_private:\n        if (private_path[8:] not in paths_with_symbolic_link_to_private):\n            logging.warning('Missing symbolic link: {0:s} for path: {1:s} defined by artifact definition: {2:s} in file: {3:s}'.format(private_path[8:], private_path, artifact_definition.name, filename))\n            result = False\n    for path in paths_with_symbolic_link_to_private:\n        private_path = '/private{0:s}'.format(path)\n        if (private_path not in paths_with_private):\n            logging.warning('Missing path: {0:s} for symbolic link: {1:s} defined by artifact definition: {2:s} in file: {3:s}'.format(private_path, path, artifact_definition.name, filename))\n            result = False\n    return result", "docstring": "Checks if the paths are valid MacOS paths.\n\nArgs:\nfilename (str): name of the artifacts definition file.\nartifact_definition (ArtifactDefinition): artifact definition.\nsource (SourceType): source definition.\npaths (list[str]): paths to validate.\n\nReturns:\nbool: True if the MacOS paths is valid.", "source": "codesearchnet"}
{"code": "def mark_deprecated(replaced_by):\n    \n    \n    def decorator(fn):   \n        @wraps(fn)\n        def wrapper(*args, **kw):   \n            from peltak.core import shell\n\n            if shell.is_tty:\n                warnings.warn(\"This command is has been deprecated. Please use \"\n                              \"{new} instead.\".format(new=replaced_by))\n\n            return fn(*args, **kw)\n\n        return wrapper\n\n    return decorator", "docstring": "Mark command as deprecated.\n\nArgs:\nreplaced_by (str):\nThe command that deprecated this command and should be used instead.", "source": "juraj-google-style"}
{"code": "def index(self, connection, partition, columns):\n        \n\n        import hashlib\n\n        query_tmpl = \n\n        if not isinstance(columns,(list,tuple)):\n            columns = [columns]\n\n        col_list = ','.join('\"{}\"'.format(col) for col in columns)\n\n        col_hash = hashlib.md5(col_list).hexdigest()\n\n        try:\n            table_name = partition.vid\n        except AttributeError:\n            table_name = partition \n\n        query = query_tmpl.format(\n            index_name='{}_{}_i'.format(table_name, col_hash), table_name=table_name,\n            columns=col_list)\n\n        logger.debug('Creating sqlite index: query: {}'.format(query))\n        cursor = connection.cursor()\n\n        cursor.execute(query)", "docstring": "Create an index on the columns.\n\nArgs:\nconnection (apsw.Connection): connection to sqlite database who stores mpr table or view.\npartition (orm.Partition):\ncolumns (list of str):", "source": "juraj-google-style"}
{"code": "def watermark_image(image, wtrmrk_path, corner=2):\n    padding = 2\n    wtrmrk_img = Image.open(wtrmrk_path)\n    if ((wtrmrk_img.width > (image.width - (padding * 2))) or (wtrmrk_img.height > (image.height - (padding * 2)))):\n        res = (int((image.width / 8.0)), int((image.height / 8.0)))\n        resize_in_place(wtrmrk_img, res)\n    pos = get_pos(corner, image.size, wtrmrk_img.size, padding)\n    was_P = (image.mode == 'P')\n    was_L = (image.mode == 'L')\n    if (image.mode not in ['RGB', 'RGBA']):\n        if (image.format in ['JPG', 'JPEG']):\n            image = image.convert('RGB')\n        else:\n            image = image.convert('RGBA')\n    image.paste(wtrmrk_img.convert('RGBA'), pos, wtrmrk_img.convert('RGBA'))\n    if was_P:\n        image = image.convert('P', palette=Image.ADAPTIVE, colors=256)\n    elif was_L:\n        image = image.convert('L')\n    return image", "docstring": "Adds a watermark image to an instance of a PIL Image.\n\nIf the provided watermark image (wtrmrk_path) is\nlarger than the provided base image (image), then\nthe watermark image will be automatically resized to\nroughly 1/8 the size of the base image.\n\nArgs:\nimage: An instance of a PIL Image. This is the base image.\nwtrmrk_path: Path to the watermark image to use.\ncorner: An integer between 0 and 3 representing the corner\nwhere the watermark image should be placed on top of the\nbase image. 0 is top left, 1 is top right, 2 is bottom\nright and 3 is bottom left. NOTE: Right now, this is\npermanently set to 2 (bottom right) but this can be\nchanged in the future by either creating a new cmd-line\nflag or putting this in the config file.\n\nReturns: The watermarked image", "source": "codesearchnet"}
{"code": "def send_msg(self, address, args=[]):\n    if (not address.startswith('/')):\n        address = '/{}'.format(address)\n    msg = osc_message_builder.OscMessageBuilder(address=address)\n    for arg in args:\n        msg.add_arg(arg)\n    self.conn.send(msg.build())\n    return", "docstring": "Send multiple args into a single message to a given address.\n\nArgs:\naddress (str): OSC Address.\nargs (list): Arguments to be parsed in VVVV.", "source": "codesearchnet"}
{"code": "def identity_kernel_initializer(shape, dtype=tf.float32, partition_info=None):\n    if (len(shape) != 4):\n        raise ValueError('Convolution kernels must be rank 4.')\n    (filter_height, filter_width, in_channels, out_channels) = shape\n    if (filter_width != filter_height):\n        raise ValueError('Identity initializer only works for square filters.')\n    if ((filter_width % 2) != 1):\n        raise ValueError('Identity initializer requires filters have odd height and width.')\n    if (in_channels != out_channels):\n        raise ValueError('in_channels must equal out_channels in order to construct per-channel identities.')\n    middle_pixel = (filter_height \n    is_middle_pixel = tf.logical_and(tf.equal(_range_along_dimension(0, shape), middle_pixel), tf.equal(_range_along_dimension(1, shape), middle_pixel))\n    is_same_channel = tf.equal(_range_along_dimension(2, shape), _range_along_dimension(3, shape))\n    return tf.cast(tf.logical_and(is_same_channel, is_middle_pixel), dtype=dtype)", "docstring": "An initializer for constructing identity convolution kernels.\n\nConstructs a convolution kernel such that applying it is the same as an\nidentity operation on the input. Formally, the kernel has entry [i, j, in,\nout] = 1 if in equals out and i and j are the middle of the kernel and 0\notherwise.\n\nArgs:\nshape: List of integers. Represents shape of result.\ndtype: data type for values in result.\npartition_info: Partition information for initializer functions. Ignored.\n\nReturns:\nTensor of desired shape and dtype such that applying it as a convolution\nkernel results in the identity operation.\n\nRaises:\nValueError: If shape does not define a valid kernel.\nIf filter width and height differ.\nIf filter width and height are not odd numbers.\nIf number of input and output channels differ.", "source": "codesearchnet"}
{"code": "def on_graph_def(self, graph_def, device_name, wall_time):\n    if self._dump_dir:\n        if self._grpc_path:\n            self._write_graph_def(graph_def, device_name, wall_time)\n        else:\n            self._cached_graph_defs.append(graph_def)\n            self._cached_graph_def_device_names.append(device_name)\n            self._cached_graph_def_wall_times.append(wall_time)\n    else:\n        self._event_listener_servicer.partition_graph_defs.append(graph_def)", "docstring": "Implementation of the tensor value-carrying Event proto callback.\n\nArgs:\ngraph_def: A GraphDef object.\ndevice_name: Name of the device on which the graph was created.\nwall_time: An epoch timestamp (in microseconds) for the graph.", "source": "github-repos"}
{"code": "class TFDebertaStableDropout(keras.layers.Layer):\n\n    def __init__(self, drop_prob, **kwargs):\n        super().__init__(**kwargs)\n        self.drop_prob = drop_prob\n\n    @tf.custom_gradient\n    def xdropout(self, inputs):\n        \n        mask = tf.cast(1 - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)), tf.bool)\n        scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=self.compute_dtype)\n        if self.drop_prob > 0:\n            inputs = tf.where(mask, tf.cast(0.0, dtype=self.compute_dtype), inputs) * scale\n\n        def grad(upstream):\n            if self.drop_prob > 0:\n                return tf.where(mask, tf.cast(0.0, dtype=self.compute_dtype), upstream) * scale\n            else:\n                return upstream\n        return (inputs, grad)\n\n    def call(self, inputs: tf.Tensor, training: tf.Tensor=False):\n        if training:\n            return self.xdropout(inputs)\n        return inputs", "docstring": "Optimized dropout module for stabilizing the training\n\nArgs:\ndrop_prob (float): the dropout probabilities", "source": "github-repos"}
{"code": "def set_calibration(self, enabled, imus):\n        \n        if len(imus) == 0:\n            imus = list(range(MAX_IMUS))\n\n        for i in imus:\n            if i < 0 or i >= MAX_IMUS:\n                logger.warn('Invalid IMU index {} in set_calibration'.format(i))\n                continue\n            self.imus[i]._use_calibration = enabled", "docstring": "Set calibration state for attached IMUs.\n\nArgs:\nenabled (bool): True to apply calibration to IMU data (if available).\nFalse to output uncalibrated data.\nimus (list): indicates which IMUs the calibration state should be set on.\nEmpty list or [0, 1, 2, 3, 4] will apply to all IMUs, [0, 1] only to\nfirst 2 IMUs, etc.", "source": "juraj-google-style"}
{"code": "def to_dataframe(self, start_row=0, max_rows=None, use_cache=True, dialect=None, billing_tier=None):\n    return self.results(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier).to_dataframe(start_row=start_row, max_rows=max_rows)", "docstring": "Exports the query results to a Pandas dataframe.\n\nArgs:\nstart_row: the row of the table at which to start the export (default 0).\nmax_rows: an upper limit on the number of rows to export (default None).\nuse_cache: whether to use cached results or not (default True).\ndialect : {'legacy', 'standard'}, default 'legacy'\n'legacy' : Use BigQuery's legacy SQL dialect.\n'standard' : Use BigQuery's standard SQL (beta), which is\ncompliant with the SQL 2011 standard.\nbilling_tier: Limits the billing tier for this job. Queries that have resource\nusage beyond this tier will fail (without incurring a charge). If unspecified, this\nwill be set to your project default. This can also be used to override your\nproject-wide default billing tier on a per-query basis.\nReturns:\nA Pandas dataframe containing the table data.", "source": "codesearchnet"}
{"code": "def processes(self, processes):\n        \n\n        if self._processes > 1:\n            self._pool.close()\n            self._pool.join()\n            self._pool = multiprocessing.Pool(processes)\n        else:\n            self._pool = None\n        self._logger.log('debug', 'Number of processes set to {}'.format(\n            processes\n        ))", "docstring": "Set the number of concurrent processes the ABC will utilize for\nfitness function evaluation; if <= 1, single process is used\n\nArgs:\nprocesses (int): number of concurrent processes", "source": "juraj-google-style"}
{"code": "def zeros_like(array, dtype=None, keepmeta=True):\n    if keepmeta:\n        return xr.zeros_like(array, dtype)\n    else:\n        return dc.zeros(array.shape, dtype)", "docstring": "Create an array of zeros with the same shape and type as the input array.\n\nArgs:\narray (xarray.DataArray): The shape and data-type of it define\nthese same attributes of the output array.\ndtype (data-type, optional): If specified, this function overrides\nthe data-type of the output array.\nkeepmeta (bool, optional): Whether *coords, attrs, and name of the input\narray are kept in the output one. Default is True.\n\nReturns:\narray (decode.array): Decode array filled with zeros.", "source": "codesearchnet"}
{"code": "def on_put(self, req, resp, handler=None, **kwargs):\n    self.handle((handler or self.update), req, resp, **kwargs)\n    resp.status = falcon.HTTP_ACCEPTED", "docstring": "Respond on PUT HTTP request assuming resource update flow.\n\nThis request handler assumes that PUT requests are associated with\nresource update/modification. Thus default flow for such requests is:\n\n* Modify existing resource instance and prepare its representation by\ncalling its update method handler.\n* Set response status code to ``202 Accepted``.\n\nArgs:\nreq (falcon.Request): request object instance.\nresp (falcon.Response): response object instance to be modified\nhandler (method): update method handler to be called. Defaults\nto ``self.update``.\n**kwargs: additional keyword arguments retrieved from url template.", "source": "codesearchnet"}
{"code": "def parent(self):\n    family = self.repository.get_parent_package_family(self.resource)\n    return (PackageFamily(family) if family else None)", "docstring": "Get the parent package family.\n\nReturns:\n`PackageFamily`.", "source": "codesearchnet"}
{"code": "def ndtri(p, name='ndtri'):\n    with ops.name_scope(name, values=[p]):\n        p = ops.convert_to_tensor(p, name='p')\n        if p.dtype.as_numpy_dtype not in [np.float32, np.float64]:\n            raise TypeError('p.dtype=%s is not handled, see docstring for supported types.' % p.dtype)\n        return _ndtri(p)", "docstring": "The inverse of the CDF of the Normal distribution function.\n\nReturns x such that the area under the pdf from minus infinity to x is equal\nto p.\n\nA piece-wise rational approximation is done for the function.\nThis is a port of the implementation in netlib.\n\nArgs:\np: `Tensor` of type `float32`, `float64`.\nname: Python string. A name for the operation (default=\"ndtri\").\n\nReturns:\nx: `Tensor` with `dtype=p.dtype`.\n\nRaises:\nTypeError: if `p` is not floating-type.", "source": "github-repos"}
{"code": "def pre(fqdn, parent, stackdepth, *argl, **argd):\n    \n    global _atdepth_call, _cstack_call\n    \n    \n    pcres = _pre_call(_atdepth_call, parent, fqdn, stackdepth+1,\n                      *argl, **argd)\n    entry, _atdepth_call, reduced, bound, ekey = pcres\n    _cstack_call.append(fqdn)\n    return (entry, bound, ekey)", "docstring": "Adds logging for a call to the specified function that is being handled\nby an external module.\n\nArgs:\nfqdn (str): fully-qualified domain name of the function being logged.\nparent: *object* that the function belongs to.\nstackdepth (int): maximum stack depth before entries are ignored.\nargl (list): positional arguments passed to the function call.\nargd (dict): keyword arguments passed to the function call.", "source": "juraj-google-style"}
{"code": "def contains(self, value, equality_comparer=operator.eq):\n        \n        if self.closed():\n            raise ValueError(\"Attempt to call contains() on a \"\n                             \"closed Queryable.\")\n\n        if not is_callable(equality_comparer):\n            raise TypeError(\"contains() parameter equality_comparer={0} is \"\n                \"not callable\".format(repr(equality_comparer)))\n\n        if equality_comparer is operator.eq:\n            return value in self._iterable\n\n        for item in self:\n            if equality_comparer(value, item):\n                return True\n        return False", "docstring": "Determines whether the sequence contains a particular value.\n\nExecution is immediate. Depending on the type of the sequence, all or\nnone of the sequence may be consumed by this operation.\n\nNote: This method uses immediate execution.\n\nArgs:\nvalue: The value to test for membership of the sequence\n\nReturns:\nTrue if value is in the sequence, otherwise False.\n\nRaises:\nValueError: If the Queryable has been closed.", "source": "juraj-google-style"}
{"code": "def configs_for_reader(reader=None, ppp_config_dir=None):\n    search_paths = ((ppp_config_dir,) if ppp_config_dir else tuple())\n    if (reader is not None):\n        if (not isinstance(reader, (list, tuple))):\n            reader = [reader]\n        new_readers = []\n        for reader_name in reader:\n            if (reader_name.endswith('.yaml') or (reader_name not in OLD_READER_NAMES)):\n                new_readers.append(reader_name)\n                continue\n            new_name = OLD_READER_NAMES[reader_name]\n            raise ValueError(\"Reader name '{}' has been deprecated, use '{}' instead.\".format(reader_name, new_name))\n        reader = new_readers\n        config_files = [(r if r.endswith('.yaml') else (r + '.yaml')) for r in reader]\n    else:\n        reader_configs = glob_config(os.path.join('readers', '*.yaml'), *search_paths)\n        config_files = set(reader_configs)\n    for config_file in config_files:\n        config_basename = os.path.basename(config_file)\n        reader_configs = config_search_paths(os.path.join('readers', config_basename), *search_paths)\n        if (not reader_configs):\n            raise ValueError('No reader(s) named: {}'.format(reader))\n        (yield reader_configs)", "docstring": "Generator of reader configuration files for one or more readers\n\nArgs:\nreader (Optional[str]): Yield configs only for this reader\nppp_config_dir (Optional[str]): Additional configuration directory\nto search for reader configuration files.\n\nReturns: Generator of lists of configuration files", "source": "codesearchnet"}
{"code": "def _split_input_from_namespace(cls, app, namespace, entity_kind,\n                                  shard_count):\n    \n\n    raw_entity_kind = cls._get_raw_entity_kind(entity_kind)\n    if shard_count == 1:\n      \n      return [key_range.KeyRange(namespace=namespace, _app=app)]\n\n    ds_query = datastore.Query(kind=raw_entity_kind,\n                               namespace=namespace,\n                               _app=app,\n                               keys_only=True)\n    ds_query.Order(\"__scatter__\")\n    random_keys = ds_query.Get(shard_count * cls._OVERSAMPLING_FACTOR)\n\n    if not random_keys:\n      \n      \n      return ([key_range.KeyRange(namespace=namespace, _app=app)] +\n              [None] * (shard_count - 1))\n\n    random_keys.sort()\n\n    if len(random_keys) >= shard_count:\n      \n      random_keys = cls._choose_split_points(random_keys, shard_count)\n\n    \n    key_ranges = []\n\n    key_ranges.append(key_range.KeyRange(\n        key_start=None,\n        key_end=random_keys[0],\n        direction=key_range.KeyRange.ASC,\n        include_start=False,\n        include_end=False,\n        namespace=namespace,\n        _app=app))\n\n    for i in range(0, len(random_keys) - 1):\n      key_ranges.append(key_range.KeyRange(\n          key_start=random_keys[i],\n          key_end=random_keys[i+1],\n          direction=key_range.KeyRange.ASC,\n          include_start=True,\n          include_end=False,\n          namespace=namespace,\n          _app=app))\n\n    key_ranges.append(key_range.KeyRange(\n        key_start=random_keys[-1],\n        key_end=None,\n        direction=key_range.KeyRange.ASC,\n        include_start=True,\n        include_end=False,\n        namespace=namespace,\n        _app=app))\n\n    if len(key_ranges) < shard_count:\n      \n      key_ranges += [None] * (shard_count - len(key_ranges))\n\n    return key_ranges", "docstring": "Helper for _split_input_from_params.\n\nIf there are not enough Entities to make all of the given shards, the\nreturned list of KeyRanges will include Nones. The returned list will\ncontain KeyRanges ordered lexographically with any Nones appearing at the\nend.\n\nArgs:\napp: the app.\nnamespace: the namespace.\nentity_kind: entity kind as string.\nshard_count: the number of shards.\n\nReturns:\nKeyRange objects.", "source": "juraj-google-style"}
{"code": "def files_comments_edit(self, *, comment: str, file: str, id: str, **kwargs) -> SlackResponse:\n    kwargs.update({'comment': comment, 'file': file, 'id': id})\n    return self.api_call('files.comments.edit', json=kwargs)", "docstring": "Edit an existing file comment.\n\nArgs:\ncomment (str): The body of the comment.\ne.g. 'Everyone should take a moment to read this file.'\nfile (str): The file id. e.g. 'F1234467890'\nid (str): The file comment id. e.g. 'Fc1234567890'", "source": "codesearchnet"}
{"code": "def _read_git_tags(\n        default_version=DEFAULT_VERSION,\n        git_command=('git', 'tag'),\n):\n    \n    try:\n        current_tags = check_output(git_command).splitlines()\n    except Exception:  \n        raise\n\n    if not current_tags[0]:\n        warnings.warn(\n            'Unable to resolve current version',\n            exceptions.ProsperDefaultVersionWarning)\n        return default_version\n\n    latest_version = semantic_version.Version(default_version)\n    for tag in current_tags:\n        tag_str = decode(tag, 'utf-8').replace('v', '')\n        try:\n            tag_ver = semantic_version.Version(tag_str)\n        except Exception:  \n            continue  \n\n        if tag_ver > latest_version:\n            latest_version = tag_ver\n\n    return str(latest_version)", "docstring": "tries to find current git tag\n\nNotes:\ngit_command exposed for testing null case\n\nArgs:\ndefault_version (str): what version to make\ngit_command (:obj:`list`): subprocess command\n\nRetruns:\nstr: latest version found, or default\n\nWarns:\nexceptions.ProsperDefaultVersionWarning: git version not found", "source": "juraj-google-style"}
{"code": "def _req(self, req):\n        \n        logger.debug('DUT> %s', req)\n        self._log and self.pause()\n        times = 3\n        res = None\n\n        while times:\n            times = times - 1\n            try:\n                self._sendline(req)\n                self._expect(req)\n\n                line = None\n                res = []\n\n                while True:\n                    line = self._readline()\n                    logger.debug('Got line %s', line)\n\n                    if line == 'Done':\n                        break\n\n                    if line:\n                        res.append(line)\n                break\n\n            except:\n                logger.exception('Failed to send command')\n                self.close()\n                self._init()\n\n        self._log and self.resume()\n        return res", "docstring": "Send command and wait for response.\n\nThe command will be repeated 3 times at most in case data loss of serial port.\n\nArgs:\nreq (str): Command to send, please do not include new line in the end.\n\nReturns:\n[str]: The output lines", "source": "juraj-google-style"}
{"code": "def get_oauth_data(self, code, client_id, client_secret, state):\n    request = self._get_request()\n    response = request.post(self.OAUTH_TOKEN_URL, {'state': state, 'code': code, 'grant_type': 'authorization_code', 'client_id': client_id, 'client_secret': client_secret})\n    return HSAccessTokenAuth.from_response(response)", "docstring": "Get Oauth data from HelloSign\n\nArgs:\n\ncode (str):             Code returned by HelloSign for our callback url\n\nclient_id (str):        Client id of the associated app\n\nclient_secret (str):    Secret token of the associated app\n\nReturns:\nA HSAccessTokenAuth object", "source": "codesearchnet"}
{"code": "def click_nowait(self, pattern, action='click', desc=None, **match_kwargs):\n    point = self.match(pattern, **match_kwargs)\n    if ((not point) or (not point.matched)):\n        return None\n    func = getattr(self, action)\n    func(*point.pos)\n    return point", "docstring": "Return immediately if no image found\n\nArgs:\n- pattern (str or Pattern): filename or an opencv image object.\n- action (str): click or long_click\n\nReturns:\nClick point or None", "source": "codesearchnet"}
{"code": "def Run(self, request, global_params=None):\n    config = self.GetMethodConfig('Run')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Runs a `BuildTrigger` at a particular source revision.\n\nArgs:\nrequest: (CloudbuildProjectsTriggersRunRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def _GetMergeTaskStorageFilePath(self, task):\n    filename = '{0:s}.plaso'.format(task.identifier)\n    return os.path.join(self._merge_task_storage_path, filename)", "docstring": "Retrieves the path of a task storage file in the merge directory.\n\nArgs:\ntask (Task): task.\n\nReturns:\nstr: path of a task storage file file in the merge directory.", "source": "codesearchnet"}
{"code": "def filter_dict(d, exclude):\n    \n    ret = {}\n    for key, value in d.items():\n        if key not in exclude:\n            ret.update({key: value})\n    return ret", "docstring": "Return a new dict with specified keys excluded from the origional dict\n\nArgs:\nd (dict): origional dict\nexclude (list): The keys that are excluded", "source": "juraj-google-style"}
{"code": "def crt(self, mp, mq):\n    u = (((mq - mp) * self.p_inverse) % self.q)\n    return (mp + (u * self.p))", "docstring": "The Chinese Remainder Theorem as needed for decryption. Returns the solution modulo n=pq.\n\nArgs:\nmp(int): the solution modulo p.\nmq(int): the solution modulo q.", "source": "codesearchnet"}
{"code": "def isholiday(self, date):\n        \n        date = parsefun(date)\n        if self.holidays:\n            \n            i = bisect.bisect_left(self.holidays, date)\n            if i == 0 and date < self.holidays[0]:\n                warn('Holiday list exhausted at start, ' \\\n                     'isholiday(%s) output may be incorrect.' % date)\n            elif i == len(self.holidays):\n                warn('Holiday list exhausted at end, ' \\\n                     'isholiday(%s) output may be incorrect.' % date)\n            elif self.holidays[i] == date:\n                return True\n        return False", "docstring": "Check if a given date is a holiday.\n\nArgs:\ndate (date, datetime or str): Date to be checked.\n\nReturns:\nbool: True if the date is a holiday, False otherwise.", "source": "juraj-google-style"}
{"code": "def count_function(function: _evaluation.CountFunction, operand_result: Optional[_sql_data_types.Select], params_result: Collection[_sql_data_types.StandardSqlExpression]) -> _sql_data_types.Select:\n    del function, params_result\n    if operand_result is None:\n        raise ValueError('count() cannot be called without an operand.')\n    if operand_result.from_part is None:\n        return _sql_data_types.Select(select_part=_sql_data_types.CountCall((_sql_data_types.RawExpression(operand_result.sql_alias, _sql_data_type=operand_result.sql_data_type),)), from_part=str(operand_result.to_subquery()), where_part=operand_result.where_part, sql_dialect=_sql_data_types.SqlDialect.SPARK)\n    else:\n        return dataclasses.replace(operand_result, select_part=_sql_data_types.CountCall((operand_result.select_part,)))", "docstring": "Returns an integer representing the number of elements in a collection.\n\nBy default, `_CountFunction` will return 0.\n\nArgs:\nfunction: The FHIRPath AST `HasValueFunction` node\noperand_result: The expression which is being evaluated\nparams_result: The parameter passed in to function\n\nReturns:\nA compiled Spark SQL expression.\n\nRaises:\nValueError: When the function is called without an operand", "source": "github-repos"}
{"code": "def _get_music_services_data(cls):\n    if (cls._music_services_data is not None):\n        return cls._music_services_data\n    result = {}\n    root = XML.fromstring(cls._get_music_services_data_xml().encode('utf-8'))\n    services = root.findall('Service')\n    for service in services:\n        result_value = service.attrib.copy()\n        name = service.get('Name')\n        result_value['Name'] = name\n        auth_element = service.find('Policy')\n        auth = auth_element.attrib\n        result_value.update(auth)\n        presentation_element = service.find('.\n        if (presentation_element is not None):\n            result_value['PresentationMapUri'] = presentation_element.get('Uri')\n        result_value['ServiceID'] = service.get('Id')\n        service_type = str(((int(service.get('Id')) * 256) + 7))\n        result_value['ServiceType'] = service_type\n        result[service_type] = result_value\n    cls._music_services_data = result\n    return result", "docstring": "Parse raw account data xml into a useful python datastructure.\n\nReturns:\ndict: Each key is a service_type, and each value is a\n`dict` containing relevant data.", "source": "codesearchnet"}
{"code": "def AddWeight(self, path_segment_index, weight):\n    if (path_segment_index not in self._weight_per_index):\n        raise ValueError('Path segment index not set.')\n    self._weight_per_index[path_segment_index] += weight\n    if (weight not in self._indexes_per_weight):\n        self._indexes_per_weight[weight] = []\n    self._indexes_per_weight[weight].append(path_segment_index)", "docstring": "Adds a weight for a specific path segment index.\n\nArgs:\npath_segment_index: an integer containing the path segment index.\nweight: an integer containing the weight.\n\nRaises:\nValueError: if the path segment weights do not contain\nthe path segment index.", "source": "codesearchnet"}
{"code": "def Get(self, request, global_params=None):\n    config = self.GetMethodConfig('Get')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Returns information about a previously requested build. The `Build` that is returned includes its status (such as `SUCCESS`, `FAILURE`, or `WORKING`), and timing information.\n\nArgs:\nrequest: (CloudbuildProjectsBuildsGetRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Build) The response message.", "source": "github-repos"}
{"code": "async def inspect(self, service_id: str) -> Mapping[str, Any]:\n        \n\n        response = await self.docker._query_json(\n            \"services/{service_id}\".format(service_id=service_id), method=\"GET\"\n        )\n        return response", "docstring": "Inspect a service\n\nArgs:\nservice_id: ID or name of the service\n\nReturns:\na dict with info about a service", "source": "juraj-google-style"}
{"code": "def test(x, y, regex_expr=False):\n    \n    return matches(x, y, regex_expr=regex_expr) if isregex(x) else equal(x, y)", "docstring": "Compares to values based on regular expression matching or\nstrict equality comparison.\n\nArguments:\nx (regex|str): string or regular expression to test.\ny (str): value to match.\nregex_expr (bool): enables regex string based expression matching.\n\nRaises:\nAssertionError: in case of matching error.\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def authenticate(self, username, password):\n    if self.config.get('LDAP_BIND_DIRECT_CREDENTIALS'):\n        result = self.authenticate_direct_credentials(username, password)\n    elif ((not self.config.get('LDAP_ALWAYS_SEARCH_BIND')) and (self.config.get('LDAP_USER_RDN_ATTR') == self.config.get('LDAP_USER_LOGIN_ATTR'))):\n        result = self.authenticate_direct_bind(username, password)\n    else:\n        result = self.authenticate_search_bind(username, password)\n    return result", "docstring": "An abstracted authentication method. Decides whether to perform a\ndirect bind or a search bind based upon the login attribute configured\nin the config.\n\nArgs:\nusername (str): Username of the user to bind\npassword (str): User's password to bind with.\n\nReturns:\nAuthenticationResponse", "source": "codesearchnet"}
{"code": "def evaluate_tensor_slice(tensor, tensor_slicing):\n    _ = tensor\n    if not validate_slicing_string(tensor_slicing):\n        raise ValueError('Invalid tensor-slicing string.')\n    return tensor[_parse_slices(tensor_slicing)]", "docstring": "Call eval on the slicing of a tensor, with validation.\n\nArgs:\ntensor: (numpy ndarray) The tensor value.\ntensor_slicing: (str or None) Slicing of the tensor, e.g., \"[:, 1]\". If\nNone, no slicing will be performed on the tensor.\n\nReturns:\n(numpy ndarray) The sliced tensor.\n\nRaises:\nValueError: If tensor_slicing is not a valid numpy ndarray slicing str.", "source": "github-repos"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(CheckResponsePayload, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = utils.BytearrayStream(input_stream.read(self.length))\n    if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):\n        self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)\n        self._unique_identifier.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.USAGE_LIMITS_COUNT, local_stream):\n        self._usage_limits_count = primitives.LongInteger(tag=enums.Tags.USAGE_LIMITS_COUNT)\n        self._usage_limits_count.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.CRYPTOGRAPHIC_USAGE_MASK, local_stream):\n        self._cryptographic_usage_mask = primitives.Integer(tag=enums.Tags.CRYPTOGRAPHIC_USAGE_MASK)\n        self._cryptographic_usage_mask.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.LEASE_TIME, local_stream):\n        self._lease_time = primitives.Interval(tag=enums.Tags.LEASE_TIME)\n        self._lease_time.read(local_stream, kmip_version=kmip_version)\n    self.is_oversized(local_stream)", "docstring": "Read the data encoding the Check response payload and decode it into\nits constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the data attribute is missing from the\nencoded payload.", "source": "codesearchnet"}
{"code": "def Analyze(self, hashes):\n    \n    hash_analyses = []\n    for digest in hashes:\n      json_response = self._QueryHash(digest)\n      hash_analysis = interface.HashAnalysis(digest, json_response)\n      hash_analyses.append(hash_analysis)\n\n    return hash_analyses", "docstring": "Looks up hashes in Viper using the Viper HTTP API.\n\nArgs:\nhashes (list[str]): hashes to look up.\n\nReturns:\nlist[HashAnalysis]: hash analysis.\n\nRaises:\nRuntimeError: If no host has been set for Viper.", "source": "juraj-google-style"}
{"code": "def get_explanation_dict(self, entry):\n    centry = self.process_entry(entry)\n    if (centry is None):\n        uncorrected_energy = entry.uncorrected_energy\n        corrected_energy = None\n    else:\n        uncorrected_energy = centry.uncorrected_energy\n        corrected_energy = centry.energy\n    d = {'compatibility': self.__class__.__name__, 'uncorrected_energy': uncorrected_energy, 'corrected_energy': corrected_energy}\n    corrections = []\n    corr_dict = self.get_corrections_dict(entry)\n    for c in self.corrections:\n        cd = {'name': str(c), 'description': c.__doc__.split('Args')[0].strip(), 'value': corr_dict.get(str(c), 0)}\n        corrections.append(cd)\n    d['corrections'] = corrections\n    return d", "docstring": "Provides an explanation dict of the corrections that are being applied\nfor a given compatibility scheme. Inspired by the \"explain\" methods\nin many database methodologies.\n\nArgs:\nentry: A ComputedEntry.\n\nReturns:\n(dict) of the form\n{\"Compatibility\": \"string\",\n\"Uncorrected_energy\": float,\n\"Corrected_energy\": float,\n\"Corrections\": [{\"Name of Correction\": {\n\"Value\": float, \"Explanation\": \"string\"}]}", "source": "codesearchnet"}
{"code": "def _generate_matrix(self, hash_bytes):\n    half_columns = ((self.columns \n    cells = (self.rows * half_columns)\n    matrix = [([False] * self.columns) for _ in range(self.rows)]\n    for cell in range(cells):\n        if self._get_bit(cell, hash_bytes[1:]):\n            column = (cell \n            row = (cell % self.rows)\n            matrix[row][column] = True\n            matrix[row][((self.columns - column) - 1)] = True\n    return matrix", "docstring": "Generates matrix that describes which blocks should be coloured.\n\nArguments:\nhash_bytes - List of hash byte values for which the identicon is being\ngenerated. Each element of the list should be an integer from 0 to\n255.\n\nReturns:\nList of rows, where each element in a row is boolean. True means the\nforeground colour should be used, False means a background colour\nshould be used.", "source": "codesearchnet"}
{"code": "def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, List[Tuple]]=None, top_k: int=100):\n    out_logits, out_bbox = (outputs.logits, outputs.pred_boxes)\n    if target_sizes is not None:\n        if len(out_logits) != len(target_sizes):\n            raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n    prob = out_logits.sigmoid()\n    prob = prob.view(out_logits.shape[0], -1)\n    k_value = min(top_k, prob.size(1))\n    topk_values, topk_indexes = torch.topk(prob, k_value, dim=1)\n    scores = topk_values\n    topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor')\n    labels = topk_indexes % out_logits.shape[2]\n    boxes = center_to_corners_format(out_bbox)\n    boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))\n    if target_sizes is not None:\n        if isinstance(target_sizes, List):\n            img_h = torch.Tensor([i[0] for i in target_sizes])\n            img_w = torch.Tensor([i[1] for i in target_sizes])\n        else:\n            img_h, img_w = target_sizes.unbind(1)\n        scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)\n        boxes = boxes * scale_fct[:, None, :]\n    results = []\n    for s, l, b in zip(scores, labels, boxes):\n        score = s[s > threshold]\n        label = l[s > threshold]\n        box = b[s > threshold]\n        results.append({'scores': score, 'labels': label, 'boxes': box})\n    return results", "docstring": "Converts the raw output of [`ConditionalDetrForObjectDetection`] into final bounding boxes in (top_left_x,\ntop_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.\n\nArgs:\noutputs ([`DetrObjectDetectionOutput`]):\nRaw outputs of the model.\nthreshold (`float`, *optional*):\nScore threshold to keep object detection predictions.\ntarget_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):\nTensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size\n(height, width) of each image in the batch. If left to None, predictions will not be resized.\ntop_k (`int`, *optional*, defaults to 100):\nKeep only top k bounding boxes before filtering by thresholding.\n\nReturns:\n`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image\nin the batch as predicted by the model.", "source": "github-repos"}
{"code": "def clear_values(self, red=0.0, green=0.0, blue=0.0, alpha=0.0, depth=1.0):\n    self.clear_color = (red, green, blue, alpha)\n    self.clear_depth = depth", "docstring": "Sets the clear values for the window buffer.\n\nArgs:\nred (float): red compoent\ngreen (float): green compoent\nblue (float): blue compoent\nalpha (float): alpha compoent\ndepth (float): depth value", "source": "codesearchnet"}
{"code": "def __init__(self, outputs):\n    self._outputs = self._wrap_and_check_outputs(outputs, self._SINGLE_OUTPUT_DEFAULT_NAME, error_label='Prediction')", "docstring": "Constructor for PredictOutput.\n\nArgs:\noutputs: A `Tensor` or a dict of string to `Tensor` representing the\npredictions.\n\nRaises:\nValueError: if the outputs is not dict, or any of its keys are not\nstrings, or any of its values are not `Tensor`s.", "source": "github-repos"}
{"code": "def unlock_swarm(self, key):\n    if isinstance(key, dict):\n        if ('UnlockKey' not in key):\n            raise errors.InvalidArgument('Invalid unlock key format')\n    else:\n        key = {'UnlockKey': key}\n    url = self._url('/swarm/unlock')\n    res = self._post_json(url, data=key)\n    self._raise_for_status(res)\n    return True", "docstring": "Unlock a locked swarm.\n\nArgs:\nkey (string): The unlock key as provided by\n:py:meth:`get_unlock_key`\n\nRaises:\n:py:class:`docker.errors.InvalidArgument`\nIf the key argument is in an incompatible format\n\n:py:class:`docker.errors.APIError`\nIf the server returns an error.\n\nReturns:\n`True` if the request was successful.\n\nExample:\n\n>>> key = client.get_unlock_key()\n>>> client.unlock_node(key)", "source": "codesearchnet"}
{"code": "def convert_variables_to_constants_from_session_graph(session, graph_def, output_node_names, variable_names_allowlist=None, variable_names_denylist=None):\n    graph_def, _ = _replace_variables_by_constants(converter_data=_SessionConverterData(session=session, graph_def=graph_def, output_node_names=output_node_names, variable_names_allowlist=variable_names_allowlist, variable_names_denylist=variable_names_denylist))\n    return graph_def", "docstring": "Replaces all the variables in a graph with constants of the same values.\n\nThis function works similarly to convert_variables_to_constants_v2, but it\nretrieves the constant values from a Session instead of from a\nConcreteFunction. This is useful when converting graphs generated from\nTensorFlow V1, where ConcreteFunctions are not available. This also differs\nfrom graph_util.convert_variables_to_constants in that it supports resource\nvariables when V2 control flow constructions are present.\n\nArgs:\nsession: Active TensorFlow session containing the variables.\ngraph_def: A GraphDef to convert.\noutput_node_names: List of name strings for the result nodes of the graph.\nvariable_names_allowlist: The set of variable names to convert (by default,\nall variables are converted).\nvariable_names_denylist: The set of variable names to omit converting to\nconstants.\n\nReturns:\nAn optimized GraphDef.", "source": "github-repos"}
{"code": "def get_sessions(self, app_path=None):\n        \n        if app_path is not None:\n            return self._tornado.get_sessions(app_path)\n        all_sessions = []\n        for path in self._tornado.app_paths:\n            all_sessions += self._tornado.get_sessions(path)\n        return all_sessions", "docstring": "Gets all currently active sessions for applications.\n\nArgs:\napp_path (str, optional) :\nThe configured application path for the application to return\nsessions for. If None, return active sessions for all\napplications. (default: None)\n\nReturns:\nlist[ServerSession]", "source": "juraj-google-style"}
{"code": "def on_test_end(self, logs=None):\n    logs = self._process_logs(logs)\n    for callback in self.callbacks:\n        callback.on_test_end(logs)", "docstring": "Calls the `on_test_end` methods of its callbacks.\n\nArgs:\nlogs: Dict. Currently no data is passed to this argument for this method\nbut that may change in the future.", "source": "github-repos"}
{"code": "def refund(request, invoice_id):\n    \n\n    current_invoice = InvoiceController.for_id_or_404(invoice_id)\n\n    try:\n        current_invoice.refund()\n        messages.success(request, \"This invoice has been refunded.\")\n    except ValidationError as ve:\n        messages.error(request, ve)\n\n    return redirect(\"invoice\", invoice_id)", "docstring": "Marks an invoice as refunded and requests a credit note for the\nfull amount paid against the invoice.\n\nThis view requires a login, and the logged in user must be staff.\n\nArguments:\ninvoice_id (castable to int): The ID of the invoice to refund.\n\nReturns:\nredirect:\nRedirects to ``invoice``.", "source": "juraj-google-style"}
{"code": "def symm_reduce(self, coords_set, threshold=1e-06):\n    surf_sg = SpacegroupAnalyzer(self.slab, 0.1)\n    symm_ops = surf_sg.get_symmetry_operations()\n    unique_coords = []\n    coords_set = [self.slab.lattice.get_fractional_coords(coords) for coords in coords_set]\n    for coords in coords_set:\n        incoord = False\n        for op in symm_ops:\n            if in_coord_list_pbc(unique_coords, op.operate(coords), atol=threshold):\n                incoord = True\n                break\n        if (not incoord):\n            unique_coords += [coords]\n    return [self.slab.lattice.get_cartesian_coords(coords) for coords in unique_coords]", "docstring": "Reduces the set of adsorbate sites by finding removing\nsymmetrically equivalent duplicates\n\nArgs:\ncoords_set: coordinate set in cartesian coordinates\nthreshold: tolerance for distance equivalence, used\nas input to in_coord_list_pbc for dupl. checking", "source": "codesearchnet"}
{"code": "def element_or_none(self, using, value):\n        \n        try:\n            return self._execute(Command.FIND_CHILD_ELEMENT, {\n                'using': using,\n                'value': value\n            })\n        except:\n            return None", "docstring": "Check if an element in the current element.\n\nSupport:\nAndroid iOS Web(WebView)\n\nArgs:\nusing(str): The element location strategy.\nvalue(str): The value of the location strategy.\n\nReturns:\nReturn Element if the element does exists and return None otherwise.\n\nRaises:\nWebDriverException.", "source": "juraj-google-style"}
{"code": "def _SetFieldType(self, field_proto, field_desc, package, scope):\n    if field_proto.type_name:\n        desc = self._GetTypeFromScope(package, field_proto.type_name, scope)\n    else:\n        desc = None\n    if (not field_proto.HasField('type')):\n        if isinstance(desc, descriptor.Descriptor):\n            field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE\n        else:\n            field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM\n    field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type)\n    if ((field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE) or (field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP)):\n        field_desc.message_type = desc\n    if (field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM):\n        field_desc.enum_type = desc\n    if (field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED):\n        field_desc.has_default_value = False\n        field_desc.default_value = []\n    elif field_proto.HasField('default_value'):\n        field_desc.has_default_value = True\n        if ((field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE) or (field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT)):\n            field_desc.default_value = float(field_proto.default_value)\n        elif (field_proto.type == descriptor.FieldDescriptor.TYPE_STRING):\n            field_desc.default_value = field_proto.default_value\n        elif (field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL):\n            field_desc.default_value = (field_proto.default_value.lower() == 'true')\n        elif (field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM):\n            field_desc.default_value = field_desc.enum_type.values_by_name[field_proto.default_value].number\n        elif (field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES):\n            field_desc.default_value = text_encoding.CUnescape(field_proto.default_value)\n        else:\n            field_desc.default_value = int(field_proto.default_value)\n    else:\n        field_desc.has_default_value = False\n        if ((field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE) or (field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT)):\n            field_desc.default_value = 0.0\n        elif (field_proto.type == descriptor.FieldDescriptor.TYPE_STRING):\n            field_desc.default_value = u''\n        elif (field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL):\n            field_desc.default_value = False\n        elif (field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM):\n            field_desc.default_value = field_desc.enum_type.values[0].number\n        elif (field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES):\n            field_desc.default_value = b''\n        else:\n            field_desc.default_value = 0\n    field_desc.type = field_proto.type", "docstring": "Sets the field's type, cpp_type, message_type and enum_type.\n\nArgs:\nfield_proto: Data about the field in proto format.\nfield_desc: The descriptor to modiy.\npackage: The package the field's container is in.\nscope: Enclosing scope of available types.", "source": "codesearchnet"}
{"code": "def equals(self, rhs):\n    for comparator in self._comparators:\n        if comparator.equals(rhs):\n            return True\n    return False", "docstring": "Checks whether any Comparator is equal to rhs.\n\nArgs:\n# rhs: can be anything\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def __init__(self, default: typing.Any, values: typing.List[typing.Any], frozen: bool=False):\n    if not isinstance(values, list) or not values:\n        raise ValueError(f'Values for Enum should be a non-empty list. Found {values!r}.')\n    if MISSING_VALUE != default and default not in values:\n        raise ValueError(f'Enum default value {default!r} is not in candidate list {values!r}.')\n    value_type = None\n    for v in values:\n        if v is None:\n            continue\n        if value_type is None:\n            value_type = type(v)\n        else:\n            next_type = type(v)\n            if issubclass(value_type, next_type):\n                value_type = next_type\n            elif not issubclass(next_type, value_type):\n                value_type = None\n                break\n    is_noneable = any([v is None for v in values])\n    if value_type is not None and issubclass(value_type, str):\n        value_type = str\n    self._values = values\n    super().__init__(value_type, default, is_noneable=is_noneable, frozen=frozen)", "docstring": "Constructor.\n\nArgs:\ndefault: default value for this spec.\nvalues: all acceptable values.\nfrozen: If True, values other than the default value is not accceptable.", "source": "github-repos"}
{"code": "def signature(self, name, file_name, file_type, file_content, owner=None, **kwargs):\n    return Signature(self.tcex, name, file_name, file_type, file_content, owner=owner, **kwargs)", "docstring": "Create the Signature TI object.\n\nArgs:\nowner:\nfile_content:\nfile_name:\nfile_type:\nname:\n**kwargs:\n\nReturn:", "source": "codesearchnet"}
{"code": "def _maybe_read_file(filename):\n  \n  try:\n    with open(filename) as infile:\n      return infile.read()\n  except IOError as e:\n    if e.errno == errno.ENOENT:\n      return None", "docstring": "Read the given file, if it exists.\n\nArgs:\nfilename: A path to a file.\n\nReturns:\nA string containing the file contents, or `None` if the file does\nnot exist.", "source": "juraj-google-style"}
{"code": "def load_caffe(model_desc, model_file):\n    \n    with change_env('GLOG_minloglevel', '2'):\n        import caffe\n        caffe.set_mode_cpu()\n        net = caffe.Net(model_desc, model_file, caffe.TEST)\n    param_dict = CaffeLayerProcessor(net).process()\n    logger.info(\"Model loaded from caffe. Params: \" +\n                \", \".join(sorted(param_dict.keys())))\n    return param_dict", "docstring": "Load a caffe model. You must be able to ``import caffe`` to use this\nfunction.\nArgs:\nmodel_desc (str): path to caffe model description file (.prototxt).\nmodel_file (str): path to caffe model parameter file (.caffemodel).\nReturns:\ndict: the parameters.", "source": "juraj-google-style"}
{"code": "def variable_dtype(self):\n    return self._variable_dtype", "docstring": "The variable dtype of this policy.\n\nThis is the dtype layers will create their variables in, unless a layer\nexplicitly chooses a different dtype. If this is different than\n`Policy.compute_dtype`, Layers will cast variables to the compute dtype to\navoid type errors.\n\nVariable regularizers are run in the variable dtype, not the compute dtype.\n\nReturns:\nThe variable dtype of this policy, as a string.", "source": "github-repos"}
{"code": "def delete(self, key):\n    data = None\n    if (key is not None):\n        data = self.db.delete(key.strip())\n    else:\n        self.tcex.log.warning(u'The key field was None.')\n    return data", "docstring": "Delete method of CRUD operation for all data types.\n\nArgs:\nkey (string): The variable to write to the DB.\n\nReturns:\n(string): Result of DB write.", "source": "codesearchnet"}
{"code": "def unpack(self, buff, offset=0):\n        \n        super().unpack(buff, offset)\n\n        self.version = self._version_ihl.value >> 4\n        self.ihl = self._version_ihl.value & 15\n        self.dscp = self._dscp_ecn.value >> 2\n        self.ecn = self._dscp_ecn.value & 3\n        self.length = self.length.value\n        self.identification = self.identification.value\n        self.flags = self._flags_offset.value >> 13\n        self.offset = self._flags_offset.value & 8191\n        self.ttl = self.ttl.value\n        self.protocol = self.protocol.value\n        self.checksum = self.checksum.value\n        self.source = self.source.value\n        self.destination = self.destination.value\n\n        if self.ihl > 5:\n            options_size = (self.ihl - 5) * 4\n            self.data = self.options.value[options_size:]\n            self.options = self.options.value[:options_size]\n        else:\n            self.data = self.options.value\n            self.options = b''", "docstring": "Unpack a binary struct into this object's attributes.\n\nReturn the values instead of the lib's basic types.\n\nArgs:\nbuff (bytes): Binary buffer.\noffset (int): Where to begin unpacking.\n\nRaises:\n:exc:`~.exceptions.UnpackException`: If unpack fails.", "source": "juraj-google-style"}
{"code": "def PrintMessage(self, message):\n    \n    fields = message.ListFields()\n    if self.use_index_order:\n      fields.sort(key=lambda x: x[0].index)\n    for field, value in fields:\n      if _IsMapEntry(field):\n        for key in sorted(value):\n          \n          \n          \n          \n          \n          entry_submsg = field.message_type._concrete_class(\n              key=key, value=value[key])\n          self.PrintField(field, entry_submsg)\n      elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n        for element in value:\n          self.PrintField(field, element)\n      else:\n        self.PrintField(field, value)", "docstring": "Convert protobuf message to text format.\n\nArgs:\nmessage: The protocol buffers message.", "source": "juraj-google-style"}
{"code": "def __init__(self, strategy, cluster_spec, task_type, task_id, session_config=None, rpc_layer='grpc', worker_barrier=None):\n    self._strategy = strategy\n    self._cluster_spec = cluster_spec\n    self._task_type = task_type\n    self._task_id = task_id\n    self._session_config = session_config\n    self._worker_barrier = worker_barrier\n    self._rpc_layer = rpc_layer\n    self._master_target = self._get_master_target()\n    self._num_workers = _get_num_workers(cluster_spec)\n    self._is_chief_node = self._is_chief()", "docstring": "Initialize the worker context object.\n\nArgs:\nstrategy: a `DistributionStrategy` object.\ncluster_spec: a ClusterSpec object. It can be empty or None in the local\ntraining case.\ntask_type: a string indicating the role of the corresponding task, such as\n\"worker\" or \"ps\". It can be None if it is local training or in-graph\nreplicated training.\ntask_id: an integer indicating id of the corresponding task. It can be\nNone if it is local training or in-graph replicated training.\nsession_config: an optional `tf.compat.v1.ConfigProto` object.\nrpc_layer: optional string specifying the RPC protocol for communication\nwith worker masters. If None or empty, hosts in the `cluster_spec` will\nbe used directly.\nworker_barrier: optional, the barrier object for worker synchronization.", "source": "github-repos"}
{"code": "async def _multipart(self, files_dict):\n        \n        boundary = bytes(_BOUNDARY, self.encoding)\n        hder_format = 'Content-Disposition: form-data; name=\"{}\"'\n        hder_format_io = '; filename=\"{}\"'\n\n        multip_pkg = b''\n\n        num_of_parts = len(files_dict)\n\n        for index, kv in enumerate(files_dict.items(), start=1):\n            multip_pkg += (b'--' + boundary + b'\\r\\n')\n            k, v = kv\n\n            try:\n                pkg_body = await self._file_manager(v)\n                multip_pkg += bytes(hder_format.format(k) +\n                                    hder_format_io.format(basename(v)),\n                                    self.encoding)\n                mime_type = mimetypes.guess_type(basename(v))\n                if not mime_type[1]:\n                    mime_type = 'application/octet-stream'\n                else:\n                    mime_type = '/'.join(mime_type)\n                multip_pkg += bytes('; Content-Type: ' + mime_type,\n                                    self.encoding)\n                multip_pkg += b'\\r\\n'*2 + pkg_body\n\n            except (TypeError, FileNotFoundError):\n                pkg_body = bytes(v, self.encoding) + b'\\r\\n'\n                multip_pkg += bytes(hder_format.format(k) +\n                                    '\\r\\n'*2, self.encoding)\n                multip_pkg += pkg_body\n\n            if index == num_of_parts:\n                multip_pkg += b'--' + boundary + b'--\\r\\n'\n        return multip_pkg", "docstring": "Forms multipart requests from a dict with name, path k/vs. Name\ndoes not have to be the actual file name.\n\nArgs:\nfiles_dict (dict): A dict of `filename:filepath`s, to be sent\nas multipart files.\n\nReturns:\nmultip_pkg (str): The strings representation of the content body,\nmultipart formatted.", "source": "juraj-google-style"}
{"code": "def download(self, location, local_dir='.'):\n        \n\n        self.logger.debug('Getting S3 info')\n        bucket = self.info['bucket']\n        prefix = self.info['prefix']\n\n        self.logger.debug('Connecting to S3')\n        s3conn = self.client\n\n        \n        location = location.strip('/')\n\n        self.logger.debug('Downloading contents')\n        objects = s3conn.list_objects(Bucket=bucket, Prefix=(prefix+'/'+location))\n        if 'Contents' not in objects:\n            raise ValueError('Download target {}/{}/{} was not found or inaccessible.'.format(bucket, prefix, location))\n        for s3key in objects['Contents']:\n            key = s3key['Key']\n    \n            \n            if not key or key.endswith('/'):\n                continue\n\n            \n            filepath = key.replace(prefix+'/'+location, '', 1).lstrip('/')\n            filename = key.split('/')[-1]\n            \n            \n            file_dir = filepath.split('/')[:-1]\n            file_dir = '/'.join(file_dir)\n            full_dir = os.path.join(local_dir, file_dir)\n\n            \n            if not os.path.isdir(full_dir):\n                os.makedirs(full_dir)\n\n            \n            s3conn.download_file(bucket, key, os.path.join(full_dir, filename))\n\n        self.logger.debug('Done!')", "docstring": "Download content from bucket/prefix/location.\nLocation can be a directory or a file (e.g., my_dir or my_dir/my_image.tif)\nIf location is a directory, all files in the directory are\ndownloaded. If it is a file, then that file is downloaded.\n\nArgs:\nlocation (str): S3 location within prefix.\nlocal_dir (str): Local directory where file(s) will be stored.\nDefault is here.", "source": "juraj-google-style"}
{"code": "def _comparison(self, op, value):\n    if (not self._indexed):\n        raise datastore_errors.BadFilterError(('Cannot query for unindexed property %s' % self._name))\n    from .query import FilterNode\n    if (value is not None):\n        value = self._do_validate(value)\n        value = self._call_to_base_type(value)\n        value = self._datastore_type(value)\n    return FilterNode(self._name, op, value)", "docstring": "Internal helper for comparison operators.\n\nArgs:\nop: The operator ('=', '<' etc.).\n\nReturns:\nA FilterNode instance representing the requested comparison.", "source": "codesearchnet"}
{"code": "def handle_config_change(self, new_config):\n    if self.user_handler:\n        self.user_handler(self.current_config, new_config)\n    self._call_spec_handlers(new_config)\n    self.current_config = copy.deepcopy(new_config)", "docstring": "Handle the new configuration.\n\nArgs:\nnew_config (dict): The new configuration", "source": "codesearchnet"}
{"code": "def transform_python_types(self, obj):\n        \n\n        \n        if is_datetime_type(obj):\n            return convert_datetime_type(obj)\n\n        if is_timedelta_type(obj):\n            return convert_timedelta_type(obj)\n\n        \n        elif isinstance(obj, slice):\n            return dict(start=obj.start, stop=obj.stop, step=obj.step)\n\n        \n        elif np.issubdtype(type(obj), np.floating):\n            return float(obj)\n        elif np.issubdtype(type(obj), np.integer):\n            return int(obj)\n        elif np.issubdtype(type(obj), np.bool_):\n            return bool(obj)\n\n        \n        elif isinstance(obj, decimal.Decimal):\n            return float(obj)\n\n        \n        elif rd and isinstance(obj, rd.relativedelta):\n            return dict(years=obj.years,\n                    months=obj.months,\n                    days=obj.days,\n                    hours=obj.hours,\n                    minutes=obj.minutes,\n                    seconds=obj.seconds,\n                    microseconds=obj.microseconds)\n\n        else:\n            return super(BokehJSONEncoder, self).default(obj)", "docstring": "Handle special scalars such as (Python, NumPy, or Pandas)\ndatetimes, or Decimal values.\n\nArgs:\nobj (obj) :\n\nThe object to encode. Anything not specifically handled in\nthis method is passed on to the default system JSON encoder.", "source": "juraj-google-style"}
{"code": "def set_iprouting(self, value=None, default=False, disable=False):\n    if (value is False):\n        disable = True\n    cmd = self.command_builder('ip routing', value=value, default=default, disable=disable)\n    return self.configure(cmd)", "docstring": "Configures the state of global ip routing\n\nEosVersion:\n4.13.7M\n\nArgs:\nvalue(bool): True if ip routing should be enabled or False if\nip routing should be disabled\ndefault (bool): Controls the use of the default keyword\ndisable (bool): Controls the use of the no keyword\n\nReturns:\nbool: True if the commands completed successfully otherwise False", "source": "codesearchnet"}
{"code": "def as_list(self, label=1, **kwargs):\n    label_to_use = (label if (self.mode == 'classification') else self.dummy_label)\n    ans = self.domain_mapper.map_exp_ids(self.local_exp[label_to_use], **kwargs)\n    ans = [(x[0], float(x[1])) for x in ans]\n    return ans", "docstring": "Returns the explanation as a list.\n\nArgs:\nlabel: desired label. If you ask for a label for which an\nexplanation wasn't computed, will throw an exception.\nWill be ignored for regression explanations.\nkwargs: keyword arguments, passed to domain_mapper\n\nReturns:\nlist of tuples (representation, weight), where representation is\ngiven by domain_mapper. Weight is a float.", "source": "codesearchnet"}
{"code": "def run_foreach_or_conditional(self, context):\n        \n        logger.debug(\"starting\")\n        \n        if self.foreach_items:\n            self.foreach_loop(context)\n        else:\n            \n            self.run_conditional_decorators(context)\n\n        logger.debug(\"done\")", "docstring": "Run the foreach sequence or the conditional evaluation.\n\nArgs:\ncontext: (pypyr.context.Context) The pypyr context. This arg will\nmutate.", "source": "juraj-google-style"}
{"code": "def generate_sample_set(self, tags=None):\n        \n        if isinstance(tags, str):\n            tags = [tags]\n        md5_list = self.data_store.tag_match(tags)\n        return self.store_sample_set(md5_list)", "docstring": "Generate a sample_set that maches the tags or all if tags are not specified.\n\nArgs:\ntags: Match samples against this tag list (or all if not specified)\n\nReturns:\nThe sample_set of those samples matching the tags", "source": "juraj-google-style"}
{"code": "def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n    default_to_square = True\n    if 'shortest_edge' in size:\n        size = size['shortest_edge']\n        default_to_square = False\n    elif 'height' in size and 'width' in size:\n        size = (size['height'], size['width'])\n    else:\n        raise ValueError(\"Size must contain either 'shortest_edge' or 'height' and 'width'.\")\n    output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format)\n    return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\nresized to keep the input aspect ratio.\n\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nsize (`Dict[str, int]`):\nSize of the output image.\nresample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\nResampling filter to use when resiizing the image.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "async def start(self, name=\"websocket_client\"):\n        \n\n        self._con = await websockets.connect(self.url)\n        self._connection_task = self._loop.add_task(self._manage_connection(), name=name)", "docstring": "Connect to the websocket server.\n\nThis method will spawn a background task in the designated event loop\nthat will run until stop() is called.  You can control the name of the\nbackground task for debugging purposes using the name parameter.  The\nname is not used in anyway except for debug logging statements.\n\nArgs:\nname (str): Optional name for the background task.", "source": "juraj-google-style"}
{"code": "def get_list(self, key, is_optional=False, is_secret=False, is_local=False, default=None, options=None):\n\n    def parse_list(v):\n        parts = v.split(',')\n        results = []\n        for part in parts:\n            part = part.strip()\n            if part:\n                results.append(part)\n        return results\n    return self._get_typed_value(key=key, target_type=list, type_convert=parse_list, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options)", "docstring": "Get a the value corresponding to the key and converts comma separated values to a list.\n\nArgs:\nkey: the dict key.\nis_optional: To raise an error if key was not found.\nis_secret: If the key is a secret.\nis_local: If the key is a local to this service.\ndefault: default value if is_optional is True.\noptions: list/tuple if provided, the value must be one of these values.\n\nReturns:\n`str`: value corresponding to the key.", "source": "codesearchnet"}
{"code": "def split_input(cls, mapper_spec):\n    \n    params = _get_params(mapper_spec)\n    shard_count = mapper_spec.shard_count\n\n    \n    start_time = params[cls.START_TIME_PARAM]\n    end_time = params[cls.END_TIME_PARAM]\n    seconds_per_shard = (end_time - start_time) / shard_count\n\n    \n    shards = []\n    for _ in xrange(shard_count - 1):\n      params[cls.END_TIME_PARAM] = (params[cls.START_TIME_PARAM] +\n                                    seconds_per_shard)\n      shards.append(LogInputReader(**params))\n      params[cls.START_TIME_PARAM] = params[cls.END_TIME_PARAM]\n\n    \n    params[cls.END_TIME_PARAM] = end_time\n    return shards + [LogInputReader(**params)]", "docstring": "Returns a list of input readers for the given input specification.\n\nArgs:\nmapper_spec: The MapperSpec for this InputReader.\n\nReturns:\nA list of InputReaders.", "source": "juraj-google-style"}
{"code": "def reset(self, history=None):\n        \n        if not history:\n            history = dict()\n\n        self.episode_rewards = history.get(\"episode_rewards\", list())\n        self.episode_timesteps = history.get(\"episode_timesteps\", list())\n        self.episode_times = history.get(\"episode_times\", list())", "docstring": "Resets the Runner's internal stats counters.\nIf history is empty, use default values in history.get().\n\nArgs:\nhistory (dict): A dictionary containing an already run experiment's results. Keys should be:\nepisode_rewards (list of rewards), episode_timesteps (lengths of episodes), episode_times (run-times)", "source": "juraj-google-style"}
{"code": "def lstsq(A, b):\n    r\n    A = asarray(A, float)\n    b = asarray(b, float)\n\n    if A.ndim == 1:\n        A = A[:, newaxis]\n\n    if A.shape[1] == 1:\n        return dot(A.T, b) / squeeze(dot(A.T, A))\n\n    rcond = finfo(double).eps * max(*A.shape)\n    return npy_lstsq(A, b, rcond=rcond)[0]", "docstring": "r\"\"\"Return the least-squares solution to a linear matrix equation.\n\nArgs:\nA (array_like): Coefficient matrix.\nb (array_like): Ordinate values.\n\nReturns:\n:class:`numpy.ndarray`: Least-squares solution.", "source": "juraj-google-style"}
{"code": "def prepare_capstone(syntax=AsmSyntax.att, target=None):\n    \n\n    if not HAVE_CAPSTONE:\n        raise NotImplementedError('pwnypack requires capstone to disassemble to AT&T and Intel syntax')\n\n    if target is None:\n        target = pwnypack.target.target\n\n    if target.arch == pwnypack.target.Target.Arch.x86:\n        if target.bits is pwnypack.target.Target.Bits.bits_32:\n            md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_32)\n        else:\n            md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64)\n    elif target.arch == pwnypack.target.Target.Arch.arm:\n        mode = 0\n\n        if target.bits is pwnypack.target.Target.Bits.bits_32:\n            arch = capstone.CS_ARCH_ARM\n\n            if target.mode and pwnypack.target.Target.Mode.arm_thumb:\n                mode = capstone.CS_MODE_THUMB\n            else:\n                mode = capstone.CS_MODE_ARM\n                if target.mode and pwnypack.target.Target.Mode.arm_m_class:\n                    mode |= capstone.CS_MODE_MCLASS\n\n            if target.mode and pwnypack.target.Target.Mode.arm_v8:\n                mode |= capstone.CS_MODE_V8\n        else:\n            arch = capstone.CS_ARCH_ARM64\n\n        if target.endian is pwnypack.target.Target.Endian.little:\n            mode |= capstone.CS_MODE_LITTLE_ENDIAN\n        else:\n            mode |= capstone.CS_MODE_BIG_ENDIAN\n\n        md = capstone.Cs(arch, mode)\n    else:\n        raise NotImplementedError('Only x86 is currently supported.')\n\n    md.skipdata = True\n\n    if syntax is AsmSyntax.att:\n        md.syntax = capstone.CS_OPT_SYNTAX_ATT\n    elif syntax is AsmSyntax.intel:\n        md.skipdata_setup(('db', None, None))\n    else:\n        raise NotImplementedError('capstone engine only implements AT&T and Intel syntax.')\n\n    return md", "docstring": "Prepare a capstone disassembler instance for a given target and syntax.\n\nArgs:\nsyntax(AsmSyntax): The assembler syntax (Intel or AT&T).\ntarget(~pwnypack.target.Target): The target to create a disassembler\ninstance for. The global target is used if this argument is\n``None``.\n\nReturns:\nAn instance of the capstone disassembler.\n\nRaises:\nNotImplementedError: If the specified target isn't supported.", "source": "juraj-google-style"}
{"code": "def market_if_touched(self, accountID, **kwargs):\n    return self.create(accountID, order=MarketIfTouchedOrderRequest(**kwargs))", "docstring": "Shortcut to create a MarketIfTouched Order in an Account\n\nArgs:\naccountID : The ID of the Account\nkwargs : The arguments to create a MarketIfTouchedOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "codesearchnet"}
{"code": "def Items(self, key):\n    \n    with self._mutex:\n      if key not in self._buckets:\n        raise KeyError('Key %s was not found in Reservoir' % key)\n      bucket = self._buckets[key]\n    return bucket.Items()", "docstring": "Return items associated with given key.\n\nArgs:\nkey: The key for which we are finding associated items.\n\nRaises:\nKeyError: If the key is not found in the reservoir.\n\nReturns:\n[list, of, items] associated with that key.", "source": "juraj-google-style"}
{"code": "def prepend(self, key, value, expire=0, noreply=None):\n    if (noreply is None):\n        noreply = self.default_noreply\n    return self._store_cmd(b'prepend', {key: value}, expire, noreply)[key]", "docstring": "The memcached \"prepend\" command.\n\nArgs:\nkey: str, see class docs for details.\nvalue: str, see class docs for details.\nexpire: optional int, number of seconds until the item is expired\nfrom the cache, or zero for no expiry (the default).\nnoreply: optional bool, True to not wait for the reply (defaults to\nself.default_noreply).\n\nReturns:\nTrue.", "source": "codesearchnet"}
{"code": "def _resolve_subkeys(key, separator='.'):\n    subkey = None\n    if (separator in key):\n        index = key.index(separator)\n        subkey = key[(index + 1):]\n        key = key[:index]\n    return (key, subkey)", "docstring": "Given a key which may actually be a nested key, return the top level\nkey and any nested subkeys as separate values.\n\nArgs:\nkey (str): A string that may or may not contain the separator.\nseparator (str): The namespace separator. Defaults to `.`.\n\nReturns:\nTuple[str, str]: The key and subkey(s).", "source": "codesearchnet"}
{"code": "def _result_type_impl(*arrays_and_dtypes):\n    promo_safety_mode = ops.get_dtype_conversion_mode()\n    valid_arrays_and_dtypes = []\n    for inp in arrays_and_dtypes:\n        if inp is not None:\n            if _is_acceptable_input_type(inp):\n                valid_arrays_and_dtypes.append(inp)\n            else:\n                raise NotImplementedError(f'Auto dtype conversion semantics does not support {type(inp)} type.')\n    dtypes_and_is_weak = [_get_dtype_and_weakness(x) for x in nest.flatten(valid_arrays_and_dtypes)]\n    if not dtypes_and_is_weak:\n        dtypes_and_is_weak = [(dtypes.float32, True)]\n    res = dtypes_and_is_weak[0]\n    for arg in dtypes_and_is_weak[1:]:\n        res = (res[0].base_dtype, res[1])\n        arg = (arg[0].base_dtype, arg[1])\n        try:\n            res_next, allowed_mode = _BINARY_DTYPE_RES_FULL[res][arg]\n        except KeyError as exc:\n            raise NotImplementedError(f'Implicit Conversion between {res[0]} and {arg[0]} is not allowed. Please convert the input manually if you need to.') from exc\n        if allowed_mode.value > promo_safety_mode.value:\n            raise TypeError(f'In promotion mode {promo_safety_mode}, implicit dtype promotion between ({res[0]}, weak={res[1]}) and ({arg[0]}, weak={arg[1]}) is disallowed. You need to explicitly specify the dtype in your op, or relax your dtype promotion rules (such as from SAFE mode to ALL mode).')\n        res = res_next\n    return res", "docstring": "Internal implementation of jnp_style_result_type.\n\nArgs:\n*arrays_and_dtypes: A list of Tensors, Variables, NumPy arrays or python\nnumbers.\n\nReturns:\nThe result promotion type from all the inputs.\n\nRaises:\nTypeError: when the promotion between the input dtypes is disabled in the\ncurrent mode\n\nNotImplementedError:\n(1) When arrays_and_dtypes contains an unsupported input type (e.g.\nRaggedTensor).\n(2) When there isn't a possible promotion for the input dtypes.", "source": "github-repos"}
{"code": "def roles(self):\n    if (not self.__roles):\n        self.__roles = Roles(self.__connection)\n    return self.__roles", "docstring": "Gets the Roles API client.\n\nReturns:\nRoles:", "source": "codesearchnet"}
{"code": "def result_to_dict(raw_result):\n    \n\n    result = {}\n\n    for channel_index, channel in enumerate(raw_result):\n        channel_id, channel_name = channel[0], channel[1]\n        channel_result = {\n            'id': channel_id,\n            'name': channel_name,\n            'movies': []\n        }\n        for movie in channel[2]:\n            channel_result['movies'].append({\n                'title': movie[1],\n                'start_time': datetime.fromtimestamp(movie[2]),\n                'end_time': datetime.fromtimestamp(movie[2] + movie[3]),\n                'inf': True if movie[3] else False,\n            })\n        result[channel_id] = channel_result\n\n    return result", "docstring": "Parse raw result from fetcher into readable dictionary\n\nArgs:\nraw_result (dict) - raw data from `fetcher`\n\nReturns:\ndict - readable dictionary", "source": "juraj-google-style"}
{"code": "def VerifyStructure(self, parser_mediator, lines):\n    \n    return (re.match(self._VERIFICATION_REGEX, lines) or\n            re.match(self._CHROMEOS_VERIFICATION_REGEX, lines)) is not None", "docstring": "Verifies that this is a syslog-formatted file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between\nparsers and other components, such as storage and dfvfs.\nlines (str): one or more lines from the text file.\n\nReturns:\nbool: True if this is the correct parser, False otherwise.", "source": "juraj-google-style"}
{"code": "def intersect_one_round(candidates, intersections):\n    next_candidates = []\n    for (first, second) in candidates:\n        both_linearized = False\n        if (first.__class__ is Linearization):\n            if (second.__class__ is Linearization):\n                both_linearized = True\n                bbox_int = bbox_intersect(first.curve.nodes, second.curve.nodes)\n            else:\n                bbox_int = bbox_line_intersect(second.nodes, first.start_node, first.end_node)\n        elif (second.__class__ is Linearization):\n            bbox_int = bbox_line_intersect(first.nodes, second.start_node, second.end_node)\n        else:\n            bbox_int = bbox_intersect(first.nodes, second.nodes)\n        if (bbox_int == BoxIntersectionType.DISJOINT):\n            continue\n        elif ((bbox_int == BoxIntersectionType.TANGENT) and (not both_linearized)):\n            tangent_bbox_intersection(first, second, intersections)\n            continue\n        if both_linearized:\n            from_linearized(first, second, intersections)\n            continue\n        lin1 = six.moves.map(Linearization.from_shape, first.subdivide())\n        lin2 = six.moves.map(Linearization.from_shape, second.subdivide())\n        next_candidates.extend(itertools.product(lin1, lin2))\n    return next_candidates", "docstring": "Perform one step of the intersection process.\n\n.. note::\n\nThis is a helper for :func:`_all_intersections` and that function\nhas a Fortran equivalent.\n\nChecks if the bounding boxes of each pair in ``candidates``\nintersect. If the bounding boxes do not intersect, the pair\nis discarded. Otherwise, the pair is \"accepted\". Then we\nattempt to linearize each curve in an \"accepted\" pair and\ntrack the overall linearization error for every curve\nencountered.\n\nArgs:\ncandidates (Union[list, itertools.chain]): An iterable of\npairs of curves (or linearized curves).\nintersections (list): A list of already encountered\nintersections. If any intersections can be readily determined\nduring this round of subdivision, then they will be added\nto this list.\n\nReturns:\nlist: Returns a list of the next round of ``candidates``.", "source": "codesearchnet"}
{"code": "def load_maps(maps_dir):\n    maps_dir = os.path.abspath(maps_dir)\n    maps = {}\n    for (root, dirnames, filenames) in os.walk(maps_dir):\n        for filename in filenames:\n            if filename.endswith('.xml'):\n                xml_file = os.path.join(root, filename)\n                map = MapSource.from_xml(xml_file, maps_dir)\n                if (map.id in maps):\n                    raise MapSourceException('duplicate map id: {} in file {}'.format(map.id, xml_file))\n                else:\n                    maps[map.id] = map\n    return maps", "docstring": "Load all xml map sources from a given directory.\n\nArgs:\nmaps_dir: path to directory to search for maps\n\nReturns:\ndict of MapSource:", "source": "codesearchnet"}
{"code": "def split(node, stack):\n  \n  node, defined, reaching = _fix(node)\n\n  \n  node = store_state(node, reaching, defined, stack)\n\n  \n  anno.clearanno(node)\n  return node", "docstring": "Carry over the state from the primal to the adjoint.\n\nArgs:\nnode: A module with the primal and adjoint function definitions as returned\nby `reverse_ad`.\nstack: The stack node to use for storing and restoring state.\n\nReturns:\nfunc: A `Module` node with two function definitions containing the primal\nand adjoint respectively.", "source": "juraj-google-style"}
{"code": "def _decode_filename(base_filename, problem_name, decode_hp):\n  \n  if decode_hp.shards > 1:\n    base_filename = _add_shard_to_filename(base_filename, decode_hp)\n  if (\"beam{beam}.alpha{alpha}.decodes\".format(\n      beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha))\n      in base_filename):\n    return base_filename\n  else:\n    return (\n        \"{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes\".format(\n            base=base_filename,\n            model=FLAGS.model,\n            hp=FLAGS.hparams_set,\n            problem=problem_name,\n            beam=str(decode_hp.beam_size),\n            alpha=str(decode_hp.alpha)))", "docstring": "Generates decode filename.\n\nArgs:\nbase_filename: A string, base of the decode filename.\nproblem_name: A string, name of the problem.\ndecode_hp: HParams for decoding.\n\nReturns:\nA string, produced decode filename.", "source": "juraj-google-style"}
{"code": "def send_result_email(self, sender=None):\n    \n    status = 'successful'\n    if self.was_aborted:\n      status = 'aborted'\n\n    app_id = os.environ['APPLICATION_ID']\n    shard_index = app_id.find('~')\n    if shard_index != -1:\n      app_id = app_id[shard_index+1:]\n\n    param_dict = {\n        'status': status,\n        'app_id': app_id,\n        'class_path': self._class_path,\n        'pipeline_id': self.root_pipeline_id,\n        'base_path': '%s.appspot.com%s' % (app_id, self.base_path),\n    }\n    subject = (\n        'Pipeline %(status)s: App \"%(app_id)s\", %(class_path)s'\n        '\n    body =  % param_dict\n\n    html =  % param_dict\n\n    if sender is None:\n      sender = '%s@%s.appspotmail.com' % (app_id, app_id)\n    try:\n      self._send_mail(sender, subject, body, html=html)\n    except (mail.InvalidSenderError, mail.InvalidEmailError):\n      logging.warning('Could not send result email for '\n                      'root pipeline ID \"%s\" from sender \"%s\"',\n                      self.root_pipeline_id, sender)", "docstring": "Sends an email to admins indicating this Pipeline has completed.\n\nFor developer convenience. Automatically called from finalized for root\nPipelines that do not override the default action.\n\nArgs:\nsender: (optional) Override the sender's email address.", "source": "juraj-google-style"}
{"code": "def validate(data):\n    \n    text = data.get('text')\n    if not isinstance(text, _string_types) or len(text) == 0:\n        raise ValueError('text field is required and should not be empty')\n\n    if 'markdown' in data and not type(data['markdown']) is bool:\n        raise ValueError('markdown field should be bool')\n\n    if 'attachments' in data:\n        if not isinstance(data['attachments'], (list, tuple)):\n            raise ValueError('attachments field should be list or tuple')\n\n        for attachment in data['attachments']:\n            if 'text' not in attachment and 'title' not in attachment:\n                raise ValueError('text or title is required in attachment')\n\n    return True", "docstring": "Validates incoming data\n\nArgs:\ndata(dict): the incoming data\n\nReturns:\nTrue if the data is valid\n\nRaises:\nValueError: the data is not valid", "source": "juraj-google-style"}
{"code": "async def getPropNorm(self, prop, valu):\n        \n        pobj = self.model.prop(prop)\n        if pobj is None:\n            raise s_exc.NoSuchProp(mesg=f'The property {prop} does not exist.',\n                                   prop=prop)\n        norm, info = pobj.type.norm(valu)\n        return norm, info", "docstring": "Get the normalized property value based on the Cortex data model.\n\nArgs:\nprop (str): The property to normalize.\nvalu: The value to normalize.\n\nReturns:\n(tuple): A two item tuple, containing the normed value and the info dictionary.\n\nRaises:\ns_exc.NoSuchProp: If the prop does not exist.\ns_exc.BadTypeValu: If the value fails to normalize.", "source": "juraj-google-style"}
{"code": "def mobility(sdat, tstart=None, tend=None):\n    tseries = sdat.tseries_between(tstart, tend)\n    steps = sdat.steps[tseries.index[0]:tseries.index[(- 1)]]\n    time = []\n    mob = []\n    for step in steps.filter(rprof=True):\n        time.append(step.timeinfo['t'])\n        mob.append((step.rprof.iloc[(- 1)].loc['vrms'] / step.timeinfo['vrms']))\n    return (np.array(mob), np.array(time))", "docstring": "Plates mobility.\n\nCompute the ratio vsurf / vrms.\n\nArgs:\nsdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.\ntstart (float): time at which the computation should start. Use the\nbeginning of the time series data if set to None.\ntend (float): time at which the computation should end. Use the\nend of the time series data if set to None.\nReturns:\ntuple of :class:`numpy.array`: mobility and time arrays.", "source": "codesearchnet"}
{"code": "def _create_centerline(self):\n    border = array(self.__densify_border())\n    vor = Voronoi(border)\n    vertex = vor.vertices\n    lst_lines = []\n    for (j, ridge) in enumerate(vor.ridge_vertices):\n        if ((- 1) not in ridge):\n            line = LineString([((vertex[ridge[0]][0] + self._minx), (vertex[ridge[0]][1] + self._miny)), ((vertex[ridge[1]][0] + self._minx), (vertex[ridge[1]][1] + self._miny))])\n            if (line.within(self._input_geom) and (len(line.coords[0]) > 1)):\n                lst_lines.append(line)\n    nr_lines = len(lst_lines)\n    if (nr_lines < 2):\n        raise RuntimeError('Number of produced ridges is too small: {}, this might be caused by too large interpolation distance.'.format(nr_lines))\n    return unary_union(lst_lines)", "docstring": "Calculate the centerline of a polygon.\n\nDensifies the border of a polygon which is then represented by a Numpy\narray of points necessary for creating the Voronoi diagram. Once the\ndiagram is created, the ridges located within the polygon are\njoined and returned.\n\nReturns:\na union of lines that are located within the polygon.", "source": "codesearchnet"}
{"code": "def process_data(data, number_to_keep):\n    result = dict()\n    if (number_to_keep != 0):\n        data_temp = dict(Counter(data).most_common(number_to_keep))\n        data_temp['rest'] = (sum(data.values()) - sum(data_temp.values()))\n        data = data_temp\n    labels = data\n    values = np.array([data[key] for key in labels], dtype=float)\n    pvalues = (values / sum(values))\n    for (position, label) in enumerate(labels):\n        result[label] = round(pvalues[position], 5)\n    return result", "docstring": "Prepare received data for representation.\n\nArgs:\ndata (dict): values to represent (ex. {'001' : 130})\nnumber_to_keep (int): number of elements to show individually.\n\nReturns:\ndict: processed data to show.", "source": "codesearchnet"}
{"code": "def ManuallyScheduleClients(self, token=None):\n    client_ids = set()\n    for flow_request in self.args.flows:\n        for client_id in flow_request.client_ids:\n            client_ids.add(client_id)\n    self.StartClients(self.session_id, client_ids, token=token)", "docstring": "Schedule all flows without using the Foreman.\n\nSince we know all the client ids to run on we might as well just schedule\nall the flows and wait for the results.\n\nArgs:\ntoken: A datastore access token.", "source": "codesearchnet"}
{"code": "def select_symbols(self, symbols, ret_list=False):\n        \n        symbols = list_strings(symbols)\n        exclude = symbols[0].startswith(\"-\")\n\n        if exclude:\n            if not all(s.startswith(\"-\") for s in symbols):\n                raise ValueError(\"When excluding symbols, all strings must start with `-`\")\n            symbols = [s[1:] for s in symbols]\n\n        symbols = set(symbols)\n        pseudos = []\n        for p in self:\n            if exclude:\n                if p.symbol in symbols: continue\n            else:\n                if p.symbol not in symbols: continue\n\n            pseudos.append(p)\n\n        if ret_list:\n            return pseudos\n        else:\n            return self.__class__(pseudos)", "docstring": "Return a :class:`PseudoTable` with the pseudopotentials with the given list of chemical symbols.\n\nArgs:\nsymbols: str or list of symbols\nPrepend the symbol string with \"-\", to exclude pseudos.\nret_list: if True a list of pseudos is returned instead of a :class:`PseudoTable`", "source": "juraj-google-style"}
{"code": "def recursion_error(self, repeated_parser: str):\n        \n        if self.finished:\n            return super().recursion_error(repeated_parser)\n        else:\n            line_index, character_index, line, pointer = self.current_line()\n\n            return 'Infinite recursion detected in {}; empty string was matched and will be matched forever\\n' \\\n                   'Line {}, character {}\\n\\n{}{}'.format(repeated_parser, line_index, character_index, line, pointer)", "docstring": "Generate an error to indicate that infinite recursion was encountered.\n\nA parser can supply a representation of itself to this method and the\nreader will supply the context, including the location where the\nparser stalled.\n\nArgs:\nrepeated_parser: A representation of the repeated parser\n\nReturns:\nA full error message", "source": "juraj-google-style"}
{"code": "def absolute_proportions(proportions, count):\n    \n\n    \n    relative_sum = sum(proportions.values())\n    absolute_proportions = {idx: int(count / relative_sum * prop_value) for idx, prop_value in\n                            proportions.items()}\n\n    \n    absolute_sum = sum(absolute_proportions.values())\n    rest_value = count - absolute_sum\n    subset_keys = sorted(list(proportions.keys()))\n\n    for i in range(rest_value):\n        key = subset_keys[i % len(subset_keys)]\n        absolute_proportions[key] += 1\n\n    return absolute_proportions", "docstring": "Split a given integer into n parts according to len(proportions) so they sum up to count and\nmatch the given proportions.\n\nArgs:\nproportions (dict): Dict of proportions, with a identifier as key.\n\nReturns:\ndict: Dictionary with absolute proportions and same identifiers as key.\n\nExample::\n\n>>> absolute_proportions({'train': 0.5, 'test': 0.5}, 100)\n{'train': 50, 'test': 50}", "source": "juraj-google-style"}
{"code": "def _ExtractContentSettingsExceptions(self, exceptions_dict, parser_mediator):\n    for permission in exceptions_dict:\n        if (permission not in self._EXCEPTIONS_KEYS):\n            continue\n        exception_dict = exceptions_dict.get(permission, {})\n        for (urls, url_dict) in exception_dict.items():\n            last_used = url_dict.get('last_used', None)\n            if (not last_used):\n                continue\n            (primary_url, secondary_url) = urls.split(',')\n            event_data = ChromeContentSettingsExceptionsEventData()\n            event_data.permission = permission\n            event_data.primary_url = primary_url\n            event_data.secondary_url = secondary_url\n            timestamp = int((last_used * 1000000))\n            date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)\n            event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n            parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts site specific events.\n\nArgs:\nexceptions_dict (dict): Permission exceptions data from Preferences file.\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.", "source": "codesearchnet"}
{"code": "def filter_all_reachable_leaves_many(self, identifier_filters, language, forbidden_identifiers=None):\n        \n        for i, identifier_filter in enumerate(identifier_filters):\n            if len(identifier_filter) == 1 and not isinstance(identifier_filter[0], list):\n                identifier_filters[i] = [identifier_filter]\n        item_identifiers = [\n            identifier[1:] if identifier.startswith('-') else identifier\n            for identifier_filter in identifier_filters\n            for identifier in set(flatten(identifier_filter))\n        ]\n        if forbidden_identifiers is None:\n            forbidden_identifiers = []\n        for identifier in forbidden_identifiers:\n            item_identifiers.append(identifier)\n        translated = self.translate_identifiers(item_identifiers, language)\n        forbidden_item_ids = {translated[identifier] for identifier in forbidden_identifiers}\n        leaves = self.get_leaves({translated[i] for i in item_identifiers}, language=language, forbidden_item_ids=forbidden_item_ids)\n        result = []\n        for identifier_filter in identifier_filters:\n            if len(identifier_filter) == 0:\n                result.append(self.get_all_available_leaves(language=language, forbidden_item_ids=forbidden_item_ids))\n                continue\n            filter_result = None\n            filter_neg_result = set()\n            for inner_filter in identifier_filter:\n                inner_result = None\n                inner_neg_result = None\n                if len(inner_filter) == 0:\n                    raise Exception('Empty nested filters are not allowed.')\n                for identifier in inner_filter:\n                    if inner_neg_result is not None:\n                        raise Exception('Nested filters can not contain multiple statements.')\n                    if identifier.startswith('-'):\n                        inner_neg_result = set(leaves[translated[identifier[1:]]])\n                    else:\n                        if inner_result is None:\n                            inner_result = set()\n                        inner_result |= set(leaves[translated[identifier]])\n                if inner_result is not None:\n                    if filter_result is None:\n                        filter_result = inner_result\n                    else:\n                        filter_result &= inner_result\n                if inner_neg_result is not None:\n                    filter_neg_result != inner_neg_result\n            result.append(sorted(list(filter_result - filter_neg_result)))\n        return result", "docstring": "Provides the same functionality as .. py:method:: ItemManager.filter_all_reachable_leaves(),\nbut for more filters in the same time.\n\nArgs:\nidentifier_filters: list of identifier filters\nlanguage (str): language used for further filtering (some objects\nfor different languages share the same item\n\nReturns:\nlist: list of list of item ids", "source": "juraj-google-style"}
{"code": "def change_numbering(self, rename_dict, inplace=False):\n        \n        output = self if inplace else self.copy()\n        new_index = [rename_dict.get(key, key) for key in self.index]\n        output.index = new_index\n        if not inplace:\n            return output", "docstring": "Return the reindexed version of Cartesian.\n\nArgs:\nrename_dict (dict): A dictionary mapping integers on integers.\n\nReturns:\nCartesian: A renamed copy according to the dictionary passed.", "source": "juraj-google-style"}
{"code": "def get_session(self, app_path, session_id):\n        \n        if app_path not in self._applications:\n            raise ValueError(\"Application %s does not exist on this server\" % app_path)\n        return self._applications[app_path].get_session(session_id)", "docstring": "Get an active a session by name application path and session ID.\n\nArgs:\napp_path (str) :\nThe configured application path for the application to return\na session for.\n\nsession_id (str) :\nThe session ID of the session to retrieve.\n\nReturns:\nServerSession", "source": "juraj-google-style"}
{"code": "def check(self, cell):\n    pass", "docstring": "Check correctness against single Jupyter cell.\n\nArgs:\ncell: JSON representation of single cell.\n\nReturns None if test succeeds, raise exception if test fails.", "source": "github-repos"}
{"code": "class RandomUniform(RandomInitializer):\n\n    def __init__(self, minval=-0.05, maxval=0.05, seed=None):\n        self.minval = minval\n        self.maxval = maxval\n        super().__init__(seed=seed)\n\n    def __call__(self, shape, dtype=None):\n        return random.uniform(shape=shape, minval=self.minval, maxval=self.maxval, seed=self.seed, dtype=dtype)\n\n    def get_config(self):\n        base_config = super().get_config()\n        config = {'minval': self.minval, 'maxval': self.maxval}\n        return {**base_config, **config}", "docstring": "Random uniform initializer.\n\nDraws samples from a uniform distribution for given parameters.\n\nExamples:\n\n>>> # Standalone usage:\n>>> initializer = RandomUniform(minval=0.0, maxval=1.0)\n>>> values = initializer(shape=(2, 2))\n\n>>> # Usage in a Keras layer:\n>>> initializer = RandomUniform(minval=0.0, maxval=1.0)\n>>> layer = Dense(3, kernel_initializer=initializer)\n\nArgs:\nminval: A python scalar or a scalar keras tensor. Lower bound of the\nrange of random values to generate (inclusive).\nmaxval: A python scalar or a scalar keras tensor. Upper bound of the\nrange of random values to generate (exclusive).\nseed: A Python integer or instance of\n`keras.backend.SeedGenerator`.\nUsed to make the behavior of the initializer\ndeterministic. Note that an initializer seeded with an integer\nor `None` (unseeded) will produce the same random values\nacross multiple calls. To get different random values\nacross multiple calls, use as seed an instance\nof `keras.backend.SeedGenerator`.", "source": "github-repos"}
{"code": "def __init__(self, name, row_identifier):\n    \n    super(SQLTableIdentifier, self).__init__()\n    self.name = name\n    self.row_identifier = row_identifier", "docstring": "Initializes a SQL table attribute container identifier.\n\nArgs:\nname (str): name of the table.\nrow_identifier (int): unique identifier of the row in the table.", "source": "juraj-google-style"}
{"code": "def run_filter_query(self, resource_name, filter_clause):\n        \n        url = self.base_url + \"/\" + resource_name\n        params = {\"filter\":json.dumps(filter_clause)}\n\n        r = requests.get(url, headers=self.headers, params=params)\n        logger.debug(\"requests.get result r.status_code:  {}\".format(r.status_code))\n\n        ClueApiClient._check_request_response(r)\n\n        return r.json()", "docstring": "run a query (get) against the CLUE api, using the API and user key fields of self and the fitler_clause provided\n\nArgs:\nresource_name: str - name of the resource / collection to query - e.g. genes, perts, cells etc.\nfilter_clause: dictionary - contains filter to pass to API to; uses loopback specification\n\nReturns: list of dictionaries containing the results of the query", "source": "juraj-google-style"}
{"code": "def optimizer(name):\n    warn_msg = 'Please update `registry.optimizer` callsite (likely due to a `HParams.optimizer` value)'\n    if (name == 'SGD'):\n        name = 'sgd'\n        tf.logging.warning((\"'SGD' optimizer now keyed by 'sgd'. %s\" % warn_msg))\n    elif (name == 'RMSProp'):\n        name = 'rms_prop'\n        tf.logging.warning((\"'RMSProp' optimizer now keyed by 'rms_prop'. %s\" % warn_msg))\n    else:\n        snake_name = misc_utils.camelcase_to_snakecase(name)\n        if (name != snake_name):\n            tf.logging.warning(('optimizer names now keyed by snake_case names. %s' % warn_msg))\n            name = snake_name\n    return Registries.optimizers[name]", "docstring": "Get pre-registered optimizer keyed by name.\n\n`name` should be snake case, though SGD -> sgd, RMSProp -> rms_prop and\nUpperCamelCase -> snake_case conversions included for legacy support.\n\nArgs:\nname: name of optimizer used in registration. This should be a snake case\nidentifier, though others supported for legacy reasons.\n\nReturns:\noptimizer", "source": "codesearchnet"}
{"code": "def install(pkg, target='LocalSystem', store=False, allow_untrusted=False):\n    if ('*.' not in pkg):\n        pkg = _quote(pkg)\n    target = _quote(target)\n    cmd = 'installer -pkg {0} -target {1}'.format(pkg, target)\n    if store:\n        cmd += ' -store'\n    if allow_untrusted:\n        cmd += ' -allowUntrusted'\n    python_shell = False\n    if ('*.' in cmd):\n        python_shell = True\n    return __salt__['cmd.run_all'](cmd, python_shell=python_shell)", "docstring": "Install a pkg file\n\nArgs:\npkg (str): The package to install\ntarget (str): The target in which to install the package to\nstore (bool): Should the package be installed as if it was from the\nstore?\nallow_untrusted (bool): Allow the installation of untrusted packages?\n\nReturns:\ndict: A dictionary containing the results of the installation\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' macpackage.install test.pkg", "source": "codesearchnet"}
{"code": "def transition_state(self, new_state):\n    if self.state == _InstrumentationBlockStates.UNKNOWN:\n        self.state = new_state\n        return self\n    else:\n        next_block = _InstrumentationBlock(state=new_state, prefix=self.prefix, previous_instrumentation_block=self)\n        if self.status_code in _InstrumentationStatusCodeCategories.TIMING:\n            next_block.begin_time = self.begin_time\n        return next_block", "docstring": "Transitions or sets the current instrumentation block to the new\nparser state.\n\nArgs:\nnew_state: _InstrumentationBlockStates, the state that the parser\nshould transition to.\n\nReturns:\nA new instrumentation block set to the new state, representing\nthe start of parsing a new instrumentation test method.\nAlternatively, if the current instrumentation block represents the\nstart of parsing a new instrumentation block (state UNKNOWN), then\nthis returns the current instrumentation block set to the now\nknown parsing state.", "source": "github-repos"}
{"code": "def fn_args(fn):\n    if isinstance(fn, functools.partial):\n        args = fn_args(fn.func)\n        args = [a for a in args[len(fn.args):] if a not in (fn.keywords or [])]\n    else:\n        if _is_callable_object(fn):\n            fn = fn.__call__\n        args = tf_inspect.getfullargspec(fn).args\n        if _is_bound_method(fn) and args:\n            args.pop(0)\n    return tuple(args)", "docstring": "Get argument names for function-like object.\n\nArgs:\nfn: Function, or function-like object (e.g., result of `functools.partial`).\n\nReturns:\n`tuple` of string argument names.\n\nRaises:\nValueError: if partial function has positionally bound arguments", "source": "github-repos"}
{"code": "def get_item(self, name, bootstrap=False):\n        \n        for item in self._get_items(bootstrap):\n            if item.name == name:\n                return item\n        return None", "docstring": "Get a particular item in the specification.\n\nArgs:\nname (str): The name of the item to retrieve.\nbootstrap (bool): Only search bootstrap items\n\nReturns (YapconfItem):\nA YapconfItem if it is found, None otherwise.", "source": "juraj-google-style"}
{"code": "def measurements(self, value):\n        \n        if value == self._defaults['measurements'] and 'measurements' in self._values:\n            del self._values['measurements']\n        else:\n            self._values['measurements'] = value", "docstring": "The measurements property.\n\nArgs:\nvalue (hash). the property value.", "source": "juraj-google-style"}
{"code": "def __init__(self, columns: list[str], vocab_size: Optional[int]=None, smooth: bool=True, name: Optional[str]=None):\n    super().__init__(columns)\n    self.vocab_size = vocab_size\n    self.smooth = smooth\n    self.name = name\n    self.tfidf_weight = None", "docstring": "This function applies a tf-idf transformation on the given columns\nof incoming data.\n\nTFIDF outputs two artifacts for each column: the vocabulary index and\nthe tfidf weight. The vocabulary index is a mapping from the original\nvocabulary to the new vocabulary. The tfidf weight is a mapping\nfrom the original vocabulary to the tfidf score.\n\nInput passed to the TFIDF is not modified and used to calculate the\nrequired artifacts.\n\nArgs:\ncolumns: List of column names to apply the transformation.\nvocab_size: (Optional) An integer that specifies the size of the\nvocabulary. Defaults to None.\n\nIf vocab_size is None, then the size of the vocabulary is\ndetermined by `tft.get_num_buckets_for_transformed_feature`.\nsmooth: (Optional) A boolean that specifies whether to apply\nsmoothing to the tf-idf score. Defaults to True.\nname: (Optional) A string that specifies the name of the operation.", "source": "github-repos"}
{"code": "def _get_implicit_credentials(cls):\n    environ_checkers = [cls._implicit_credentials_from_files, cls._implicit_credentials_from_gae, cls._implicit_credentials_from_gce]\n    for checker in environ_checkers:\n        credentials = checker()\n        if (credentials is not None):\n            return credentials\n    raise ApplicationDefaultCredentialsError(ADC_HELP_MSG)", "docstring": "Gets credentials implicitly from the environment.\n\nChecks environment in order of precedence:\n- Environment variable GOOGLE_APPLICATION_CREDENTIALS pointing to\na file with stored credentials information.\n- Stored \"well known\" file associated with `gcloud` command line tool.\n- Google App Engine (production and testing)\n- Google Compute Engine production environment.\n\nRaises:\nApplicationDefaultCredentialsError: raised when the credentials\nfail to be retrieved.", "source": "codesearchnet"}
{"code": "def build_pipeline_labels(job_metadata, task_metadata, task_id_pattern=None):\n  \n  labels = {\n      Label(name, job_metadata[name])\n      for name in ['job-name', 'job-id', 'user-id', 'dsub-version']\n  }\n\n  task_id = task_metadata.get('task-id')\n  if task_id is not None:  \n    if task_id_pattern:\n      task_id = task_id_pattern % task_id\n    labels.add(Label('task-id', str(task_id)))\n\n  task_attempt = task_metadata.get('task-attempt')\n  if task_attempt is not None:\n    labels.add(Label('task-attempt', str(task_attempt)))\n\n  return labels", "docstring": "Build a set() of standard job and task labels.\n\nArgs:\njob_metadata: Job metadata, such as job-id, job-name, and user-id.\ntask_metadata: Task metadata, such as the task-id.\ntask_id_pattern: A pattern for the task-id value, such as \"task-%d\"; the\noriginal google label values could not be strictly numeric, so \"task-\"\nwas prepended.\n\nReturns:\nA set of standard dsub Label() objects to attach to a pipeline.", "source": "juraj-google-style"}
{"code": "def _is_statically_shaped(element_spec):\n    for spec in nest.flatten(element_spec):\n        if isinstance(spec, (sparse_tensor.SparseTensorSpec, ragged_tensor.RaggedTensorSpec)):\n            if spec.shape.rank > 0 and spec.shape.as_list()[0] is None:\n                return False\n        else:\n            for component in spec._flat_tensor_specs:\n                if not component.shape.is_fully_defined():\n                    return False\n    return True", "docstring": "Test if an iterator output is statically shaped.\n\nFor sparse and ragged tensors this only tests the batch dimension.\n\nArgs:\nelement_spec: a nest structure of `tf.TypeSpec`. The element spec of the\ndataset of the iterator.\n\nReturns:\nTrue if the shape is static, false otherwise.", "source": "github-repos"}
{"code": "def _dequeue_return_value(self, tensors):\n    if self._names:\n        return {n: tensors[i] for i, n in enumerate(self._names)}\n    elif len(tensors) == 1:\n        return tensors[0]\n    else:\n        return tensors", "docstring": "Return the value to return from a dequeue op.\n\nIf the queue has names, return a dictionary with the\nnames as keys.  Otherwise return either a single tensor\nor a list of tensors depending on the length of `tensors`.\n\nArgs:\ntensors: List of tensors from the dequeue op.\n\nReturns:\nA single tensor, a list of tensors, or a dictionary\nof tensors.", "source": "github-repos"}
{"code": "def CheckDependencies(self, verbose_output=True):\n    \n    print('Checking availability and versions of dependencies.')\n    check_result = True\n\n    for module_name, dependency in sorted(self.dependencies.items()):\n      if module_name == 'sqlite3':\n        result, status_message = self._CheckSQLite3()\n      else:\n        result, status_message = self._CheckPythonModule(dependency)\n\n      if not result and module_name == 'lzma':\n        dependency.name = 'backports.lzma'\n        result, status_message = self._CheckPythonModule(dependency)\n\n      if not result and not dependency.is_optional:\n        check_result = False\n\n      self._PrintCheckDependencyStatus(\n          dependency, result, status_message, verbose_output=verbose_output)\n\n    if check_result and not verbose_output:\n      print('[OK]')\n\n    print('')\n    return check_result", "docstring": "Checks the availability of the dependencies.\n\nArgs:\nverbose_output (Optional[bool]): True if output should be verbose.\n\nReturns:\nbool: True if the dependencies are available, False otherwise.", "source": "juraj-google-style"}
{"code": "def __init__(self, name_context, spec, counter_factory, state_sampler):\n    assert isinstance(name_context, common.NameContext)\n    self.name_context = name_context\n    self.spec = spec\n    self.counter_factory = counter_factory\n    self.execution_context = None\n    self.consumers = collections.defaultdict(list)\n    self.metrics_container = MetricsContainer(self.name_context.metrics_name())\n    self.state_sampler = state_sampler\n    self.scoped_start_state = self.state_sampler.scoped_state(self.name_context, 'start', metrics_container=self.metrics_container)\n    self.scoped_process_state = self.state_sampler.scoped_state(self.name_context, 'process', metrics_container=self.metrics_container)\n    self.scoped_finish_state = self.state_sampler.scoped_state(self.name_context, 'finish', metrics_container=self.metrics_container)\n    self.receivers = []\n    self.setup_done = False\n    self.step_name = None\n    self.data_sampler: Optional[DataSampler] = None", "docstring": "Initializes a worker operation instance.\n\nArgs:\nname_context: A NameContext instance, with the name information for this\noperation.\nspec: A operation_specs.Worker* instance.\ncounter_factory: The CounterFactory to use for our counters.\nstate_sampler: The StateSampler for the current operation.", "source": "github-repos"}
{"code": "def __init__(self, residual_restriction=None, process_continuation=None, future_output_watermark=None):\n    self.residual_restriction = residual_restriction\n    self.process_continuation = process_continuation\n    self.future_output_watermark = future_output_watermark", "docstring": "Returned as a result of a `invoke_process_element()` invocation.\n\nArgs:\nresidual_restriction: a restriction for the unprocessed part of the\nelement.\nprocess_continuation: a `ProcessContinuation` if one was returned as the\nlast element of the SDF `process()` invocation.\nfuture_output_watermark: output watermark of the results that will be\nproduced when invoking the Splittable `DoFn`\nfor the current element with\n`residual_restriction`.", "source": "github-repos"}
{"code": "def _dict_to_tensor(self, x, k):\n    return array_ops_stack.stack([x[i] for i in range(k)])", "docstring": "Convert a dictionary to a tensor.\n\nArgs:\nx: A dictionary of length k.\nk: Dimension of x.\n\nReturns:\nA tensor with the same dimension.", "source": "github-repos"}
{"code": "def log_(\n    message: str,\n    logger: logging.Logger,\n    level: int = logging.INFO,\n    extra: Optional[Dict] = None,\n    trim: bool = False,\n) -> None:\n    \n    if extra is None:\n        extra = {}\n    \n    if message:\n        message = message.replace(\"\\n\", \"\").replace(\"  \", \" \").replace(\"{ \", \"{\")\n    if trim:\n        message = _trim_message(message)\n    \n    logger.log(level, message, extra=extra)", "docstring": "Log a request or response\n\nArgs:\nmessage: JSON-RPC request or response string.\nlogger:\nlevel: Log level.\nextra: More details to include in the log entry.\ntrim: Abbreviate log messages.", "source": "juraj-google-style"}
{"code": "def untar_to_directory(tarfile: str, directory: str, verbose: bool=False, gzipped: bool=False, skip_if_dir_exists: bool=True, run_func: Callable[([List[str]], Any)]=None, chdir_via_python: bool=True) -> None:\n    if (skip_if_dir_exists and os.path.isdir(directory)):\n        log.info('Skipping extraction of {} as directory {} exists', tarfile, directory)\n        return\n    log.info('Extracting {} -> {}', tarfile, directory)\n    require_executable(TAR)\n    mkdir_p(directory)\n    args = [TAR, '-x']\n    if verbose:\n        args.append('-v')\n    if gzipped:\n        args.append('-z')\n    if (platform.system() != 'Darwin'):\n        args.append('--force-local')\n    args.extend(['-f', tarfile])\n    if chdir_via_python:\n        with pushd(directory):\n            run_func(args)\n    else:\n        args.extend(['-C', directory])\n        run_func(args)", "docstring": "Unpacks a TAR file into a specified directory.\n\nArgs:\ntarfile: filename of the ``.tar`` file\ndirectory: destination directory\nverbose: be verbose?\ngzipped: is the ``.tar`` also gzipped, e.g. a ``.tar.gz`` file?\nskip_if_dir_exists: don't do anything if the destrination directory\nexists?\nrun_func: function to use to call an external command\nchdir_via_python: change directory via Python, not via ``tar``.\nConsider using this via Windows, because Cygwin ``tar`` v1.29 falls\nover when given a Windows path for its ``-C`` (or ``--directory``)\noption.", "source": "codesearchnet"}
{"code": "def _get_new_node_defs(self):\n    node_def_bytes = self.node_file.read()\n    node_defs = []\n    cur_pos = 0\n    while cur_pos < len(node_def_bytes):\n        size_bytes = node_def_bytes[cur_pos:cur_pos + 8]\n        size, = struct.unpack('<Q', size_bytes)\n        cur_pos += 8\n        node_def = node_def_pb2.NodeDef()\n        node_def.ParseFromString(node_def_bytes[cur_pos:cur_pos + size])\n        ignored_ops = []\n        if context.run_eager_op_as_function_enabled():\n            ignored_ops.extend(['_Arg', '_Retval', 'NoOp'])\n            ignored_ops.extend(['_Recv', '_HostRecv'])\n        if node_def.op not in ignored_ops:\n            node_defs.append(node_def)\n        cur_pos += size\n    self.assertEqual(cur_pos, len(node_def_bytes))\n    return node_defs", "docstring": "Gets new NodeDefs written by the NodeFileWriter.\n\nReturns:\nA list of new NodeDefs in the file written by NodeDefWriter since the last\ntime this method was called.", "source": "github-repos"}
{"code": "def __init__(self, s3_conn, es_client):\n        \n        self.s3_conn = s3_conn\n        self.es_client = es_client", "docstring": "Base class for Elasticsearch indexers\n\nSubclasses implement the index setting definition and transformation of data,\nThe base class handles index management and bulk indexing with ES\n\nArgs:\ns3_conn - a boto s3 connection\nes_client - an Elasticsearch indices client", "source": "juraj-google-style"}
{"code": "def _wait_for_any_job(provider, job_ids, poll_interval):\n    if (not job_ids):\n        return\n    while True:\n        tasks = provider.lookup_job_tasks({'*'}, job_ids=job_ids)\n        running_jobs = set()\n        failed_jobs = set()\n        for t in tasks:\n            status = t.get_field('task-status')\n            job_id = t.get_field('job-id')\n            if (status in ['FAILURE', 'CANCELED']):\n                failed_jobs.add(job_id)\n            if (status == 'RUNNING'):\n                running_jobs.add(job_id)\n        remaining_jobs = running_jobs.difference(failed_jobs)\n        if (failed_jobs or (len(remaining_jobs) != len(job_ids))):\n            return remaining_jobs\n        SLEEP_FUNCTION(poll_interval)", "docstring": "Waits until any of the listed jobs is not running.\n\nIn particular, if any of the jobs sees one of its tasks fail,\nwe count the whole job as failing (but do not terminate the remaining\ntasks ourselves).\n\nArgs:\nprovider: job service provider\njob_ids: a list of job IDs (string) to wait for\npoll_interval: integer seconds to wait between iterations\n\nReturns:\nA set of the jobIDs with still at least one running task.", "source": "codesearchnet"}
{"code": "def poll_stack(self):\n    logging.info('polling stack status, POLL_INTERVAL={}'.format(POLL_INTERVAL))\n    time.sleep(POLL_INTERVAL)\n    completed_states = ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'DELETE_COMPLETE']\n    stack_name = self._config.get('environment', {}).get('stack_name', None)\n    while True:\n        try:\n            response = self._cloudFormation.describe_stacks(StackName=stack_name)\n            stack = response['Stacks'][0]\n            current_status = stack['StackStatus']\n            logging.info('current status of {}: {}'.format(stack_name, current_status))\n            if (current_status.endswith('COMPLETE') or current_status.endswith('FAILED')):\n                if (current_status in completed_states):\n                    return True\n                else:\n                    return False\n            time.sleep(POLL_INTERVAL)\n        except ClientError as wtf:\n            if (str(wtf).find('does not exist') == (- 1)):\n                logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))\n                traceback.print_exc(file=sys.stdout)\n                return False\n            else:\n                logging.info('{} is gone'.format(stack_name))\n                return True\n        except Exception as wtf:\n            logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))\n            traceback.print_exc(file=sys.stdout)\n            return False", "docstring": "Spin in a loop while the Cloud Formation process either fails or succeeds\n\nArgs:\nNone\n\nReturns:\nGood or bad; True or False", "source": "codesearchnet"}
{"code": "def ops_used_by_graph_def(graph_def):\n    name_to_function = {}\n    for fun in graph_def.library.function:\n        name_to_function[fun.signature.name] = fun\n    used_ops = set()\n    functions_to_process = []\n\n    def mark_op_as_used(op):\n        if op not in used_ops and op in name_to_function:\n            functions_to_process.append(name_to_function[op])\n        used_ops.add(op)\n\n    def process_node(node):\n        mark_op_as_used(node.op)\n        if node.op in ['PartitionedCall', 'StatefulPartitionedCall']:\n            mark_op_as_used(node.attr['f'].func.name)\n    for node in graph_def.node:\n        process_node(node)\n    while functions_to_process:\n        fun = functions_to_process.pop()\n        for node in fun.node_def:\n            process_node(node)\n    return [op for op in used_ops if op not in name_to_function]", "docstring": "Collect the list of ops used by a graph.\n\nDoes not validate that the ops are all registered.\n\nArgs:\ngraph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.\n\nReturns:\nA list of strings, each naming an op used by the graph.", "source": "github-repos"}
{"code": "def Lookup(self, name):\n    if not self._name2item:\n        self._InitCache()\n    return self._name2item[name]", "docstring": "Convenience function: Look up a given name in the global namespace.\n\nTries to find a constant, function or class by this name.\n\nArgs:\nname: Name to look up.\n\nReturns:\nA Constant, Function or Class.\n\nRaises:\nKeyError: if this identifier doesn't exist.", "source": "github-repos"}
{"code": "def get_field_to_observations_map(generator, query_for_tag=''):\n\n    def increment(stat, event, tag=''):\n        assert (stat in TRACKED_FIELDS)\n        field_to_obs[stat].append(Observation(step=event.step, wall_time=event.wall_time, tag=tag)._asdict())\n    field_to_obs = dict([(t, []) for t in TRACKED_FIELDS])\n    for event in generator:\n        if (event.HasField('graph_def') and (not query_for_tag)):\n            increment('graph', event)\n        if (event.HasField('session_log') and (not query_for_tag)):\n            status = event.session_log.status\n            if (status == event_pb2.SessionLog.START):\n                increment('sessionlog:start', event)\n            elif (status == event_pb2.SessionLog.STOP):\n                increment('sessionlog:stop', event)\n            elif (status == event_pb2.SessionLog.CHECKPOINT):\n                increment('sessionlog:checkpoint', event)\n        elif event.HasField('summary'):\n            for value in event.summary.value:\n                if (query_for_tag and (value.tag != query_for_tag)):\n                    continue\n                for (proto_name, display_name) in SUMMARY_TYPE_TO_FIELD.items():\n                    if value.HasField(proto_name):\n                        increment(display_name, event, value.tag)\n    return field_to_obs", "docstring": "Return a field to `Observations` dict for the event generator.\n\nArgs:\ngenerator: A generator over event protos.\nquery_for_tag: A string that if specified, only create observations for\nevents with this tag name.\n\nReturns:\nA dict mapping keys in `TRACKED_FIELDS` to an `Observation` list.", "source": "codesearchnet"}
{"code": "def _count_objs(self, obj, path=None, **kwargs):\n        \n        sub_val = None\n        \n        if isinstance(obj, dict):\n            for key, value in obj.items():\n                if isinstance(value, (list, dict)):\n                    kwargs = self._count_objs(value,\n                                              self.make_path(key, path),\n                                              **kwargs)\n                else:\n                    if self.make_path(key, path) == self.sub_total:\n                        \n                        sub_val = value\n                    kwargs['current'] = self._increment_prop(key,\n                                                             path,\n                                                             **kwargs)\n        elif isinstance(obj, list):\n            for item in obj:\n                if isinstance(item, (list, dict)):\n                    kwargs = self._count_objs(item, path, **kwargs)\n                else:\n                    if path == self.sub_total:\n                        pdb.set_trace()\n                        sub_val = item\n                    kwargs['current'] = self._increment_prop(path, **kwargs)\n        else:\n            kwargs['current'] = self._increment_prop(path, **kwargs)\n            if path == self.sub_total:\n                pdb.set_trace()\n                sub_val = item\n        if kwargs.get('sub_val') is None:\n            kwargs['sub_val'] = sub_val\n        return kwargs", "docstring": "cycles through the object and adds in count values\n\nArgs:\n-----\nobj: the object to parse\npath: the current path\n\nkwargs:\n-------\ncurrent: a dictionary of counts for current call\nsub_val: the value to use for subtotal aggregation", "source": "juraj-google-style"}
{"code": "def _evolve(self, state, qargs=None):\n        \n        \n        if qargs is not None:\n            return SuperOp(self)._evolve(state, qargs)\n\n        \n        state = self._format_state(state)\n        if state.shape[0] != self._input_dim:\n            raise QiskitError(\n                \"QuantumChannel input dimension is not equal to state dimension.\"\n            )\n        if state.ndim == 1 and self._data[1] is None and \\\n           self._data[0].shape[0] \n            \n            \n            return np.dot(self._data[0], state)\n        \n        state = self._format_state(state, density_matrix=True)\n        stine_l, stine_r = self._data\n        if stine_r is None:\n            stine_r = stine_l\n        din, dout = self.dim\n        dtr = stine_l.shape[0] \n        shape = (dout, dtr, din)\n        return np.einsum('iAB,BC,jAC->ij', np.reshape(stine_l, shape), state,\n                         np.reshape(np.conjugate(stine_r), shape))", "docstring": "Evolve a quantum state by the QuantumChannel.\n\nArgs:\nstate (QuantumState): The input statevector or density matrix.\nqargs (list): a list of QuantumState subsystem positions to apply\nthe operator on.\n\nReturns:\nQuantumState: the output quantum state.\n\nRaises:\nQiskitError: if the operator dimension does not match the\nspecified QuantumState subsystem dimensions.", "source": "juraj-google-style"}
{"code": "def _handle_azure_exception():\n    try:\n        (yield)\n    except _AzureHttpError as exception:\n        if (exception.status_code in _ERROR_CODES):\n            raise _ERROR_CODES[exception.status_code](str(exception))\n        raise", "docstring": "Handles Azure exception and convert to class IO exceptions\n\nRaises:\nOSError subclasses: IO error.", "source": "codesearchnet"}
{"code": "def _run_using_default_session(operation, feed_dict, graph, session=None) -> None:\n    if session is None:\n        session = stack.get_default_session()\n        if session is None:\n            raise ValueError('Cannot execute operation using `run()`: No default session is registered. Use `with sess.as_default():` or pass an explicit session to `run(session=sess)`')\n        if session.graph is not graph:\n            raise ValueError(\"Cannot use the default session to execute operation: the operation's graph is different from the session's graph. Pass an explicit session to run(session=sess).\")\n    elif session.graph is not graph:\n        raise ValueError(\"Cannot use the given session to execute operation: the operation's graph is different from the session's graph.\")\n    session.run(operation, feed_dict)", "docstring": "Uses the default session to run \"operation\".\n\nArgs:\noperation: The Operation to be run.\nfeed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,\nnumpy ndarrays, TensorProtos, or strings.\ngraph: The graph in which \"operation\" is defined.\nsession: (Optional) A different session to use to run \"operation\".\n\nRaises:\nValueError: If no default session is available; the default session\ndoes not have \"graph\" as its graph; or if \"session\" is specified,\nand it does not have \"graph\" as its graph.", "source": "github-repos"}
{"code": "def FilterItems(self, filterFn, key=None):\n    \n    with self._mutex:\n      if key:\n        if key in self._buckets:\n          return self._buckets[key].FilterItems(filterFn)\n        else:\n          return 0\n      else:\n        return sum(bucket.FilterItems(filterFn)\n                   for bucket in self._buckets.values())", "docstring": "Filter items within a Reservoir, using a filtering function.\n\nArgs:\nfilterFn: A function that returns True for the items to be kept.\nkey: An optional bucket key to filter. If not specified, will filter all\nall buckets.\n\nReturns:\nThe number of items removed.", "source": "juraj-google-style"}
{"code": "def Sample(self, tasks_status):\n    \n    sample_time = time.time()\n    sample = '{0:f}\\t{1:d}\\t{2:d}\\t{3:d}\\t{4:d}\\t{5:d}\\n'.format(\n        sample_time, tasks_status.number_of_queued_tasks,\n        tasks_status.number_of_tasks_processing,\n        tasks_status.number_of_tasks_pending_merge,\n        tasks_status.number_of_abandoned_tasks,\n        tasks_status.total_number_of_tasks)\n    self._WritesString(sample)", "docstring": "Takes a sample of the status of queued tasks for profiling.\n\nArgs:\ntasks_status (TasksStatus): status information about tasks.", "source": "juraj-google-style"}
{"code": "def make_qq_plot(kev, obs, mdl, unit, key_text):\n    import omega as om\n    kev = np.asarray(kev)\n    obs = np.asarray(obs)\n    mdl = np.asarray(mdl)\n    c_obs = np.cumsum(obs)\n    c_mdl = np.cumsum(mdl)\n    mx = max(c_obs[(- 1)], c_mdl[(- 1)])\n    p = om.RectPlot()\n    p.addXY([0, mx], [0, mx], '1:1')\n    p.addXY(c_mdl, c_obs, key_text)\n    locs = (np.array([0, 0.05, 0.08, 0.11, 0.17, 0.3, 0.4, 0.7, 1]) * (kev.size - 2))\n    c0 = (mx * 1.05)\n    c1 = (mx * 1.1)\n    for loc in locs:\n        i0 = int(np.floor(loc))\n        frac = (loc - i0)\n        kevval = (((1 - frac) * kev[i0]) + (frac * kev[(i0 + 1)]))\n        mdlval = (((1 - frac) * c_mdl[i0]) + (frac * c_mdl[(i0 + 1)]))\n        obsval = (((1 - frac) * c_obs[i0]) + (frac * c_obs[(i0 + 1)]))\n        p.addXY([mdlval, mdlval], [c0, c1], ('%.2f keV' % kevval), dsn=2)\n        p.addXY([c0, c1], [obsval, obsval], None, dsn=2)\n    p.setLabels(('Cumulative model ' + unit), ('Cumulative data ' + unit))\n    p.defaultKeyOverlay.vAlign = 0.3\n    return p", "docstring": "Make a quantile-quantile plot comparing events and a model.\n\n*kev*\nA 1D, sorted array of event energy bins measured in keV.\n*obs*\nA 1D array giving the number or rate of events in each bin.\n*mdl*\nA 1D array giving the modeled number or rate of events in each bin.\n*unit*\nText describing the unit in which *obs* and *mdl* are measured; will\nbe shown on the plot axes.\n*key_text*\nText describing the quantile-quantile comparison quantity; will be\nshown on the plot legend.\nReturns:\nAn :class:`omega.RectPlot` instance.\n\n*TODO*: nothing about this is Sherpa-specific. Same goes for some of the\nplotting routines in :mod:`pkwit.environments.casa.data`; might be\nreasonable to add a submodule for generic X-ray-y plotting routines.", "source": "codesearchnet"}
{"code": "def parse_unique_urlencoded(content):\n    urlencoded_params = urllib.parse.parse_qs(content)\n    params = {}\n    for (key, value) in six.iteritems(urlencoded_params):\n        if (len(value) != 1):\n            msg = ('URL-encoded content contains a repeated value:%s -> %s' % (key, ', '.join(value)))\n            raise ValueError(msg)\n        params[key] = value[0]\n    return params", "docstring": "Parses unique key-value parameters from urlencoded content.\n\nArgs:\ncontent: string, URL-encoded key-value pairs.\n\nReturns:\ndict, The key-value pairs from ``content``.\n\nRaises:\nValueError: if one of the keys is repeated.", "source": "codesearchnet"}
{"code": "def batch_workflow_status(self, batch_workflow_id):\n        \n        self.logger.debug('Get status of batch workflow: ' + batch_workflow_id)\n        url = '%(base_url)s/batch_workflows/%(batch_id)s' % {\n            'base_url': self.base_url, 'batch_id': batch_workflow_id\n        }\n        r = self.gbdx_connection.get(url)\n\n        return r.json()", "docstring": "Checks GBDX batch workflow status.\n\nArgs:\nbatch workflow_id (str): Batch workflow id.\n\nReturns:\nBatch Workflow status (str).", "source": "juraj-google-style"}
{"code": "def get_user_groups(self, user):\n        \n        self.project_service.set_auth(self._token_project)\n        return self.project_service.get_user_groups(user)", "docstring": "Get user's group memberships.\n\nArgs:\nuser (string): User name.\n\nReturns:\n(list): User's groups.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def __getattr__(self, name):\n    \n    self._conn.send((self._ACCESS, name))\n    return self._receive()", "docstring": "Request an attribute from the environment.\n\nNote that this involves communication with the external process, so it can\nbe slow.\n\nArgs:\nname: Attribute to access.\n\nReturns:\nValue of the attribute.", "source": "juraj-google-style"}
{"code": "def get_mnemonics(self, mnemonics, uwis=None, alias=None):\n    uwis = (uwis or self.uwis)\n    wells = [w for w in self.__list if (w.uwi in uwis)]\n    all_wells = []\n    for w in wells:\n        this_well = [w.get_mnemonic(m, alias=alias) for m in mnemonics]\n        all_wells.append(this_well)\n    return all_wells", "docstring": "Looks at all the wells in turn and returns the highest thing\nin the alias table.\n\nArgs:\nmnemonics (list)\nalias (dict)\n\nReturns:\nlist. A list of lists.", "source": "codesearchnet"}
{"code": "def checkDeterminism(self, dataset_fn, expect_determinism, expected_elements):\n    if expect_determinism:\n        dataset = dataset_fn(100)\n        actual = self.getDatasetOutput(dataset)\n        self.assertAllEqual(expected_elements, actual)\n        return\n    for delay_ms in [10, 100, 1000, 20000, 100000]:\n        dataset = dataset_fn(delay_ms)\n        actual = self.getDatasetOutput(dataset)\n        self.assertCountEqual(expected_elements, actual)\n        for i in range(len(actual)):\n            if actual[i] != expected_elements[i]:\n                return\n    self.fail('Failed to observe nondeterministic ordering')", "docstring": "Tests whether a dataset produces its elements deterministically.\n\n`dataset_fn` takes a delay_ms argument, which tells it how long to delay\nproduction of the first dataset element. This gives us a way to trigger\nout-of-order production of dataset elements.\n\nArgs:\ndataset_fn: A function taking a delay_ms argument.\nexpect_determinism: Whether to expect deterministic ordering.\nexpected_elements: The elements expected to be produced by the dataset,\nassuming the dataset produces elements in deterministic order.", "source": "github-repos"}
{"code": "def as_object(obj):\n    LOGGER.debug('as_object(%s)', obj)\n    if isinstance(obj, datetime.date):\n        return as_date(obj)\n    elif hasattr(obj, '__dict__'):\n        out = {k: obj.__dict__[k] for k in obj.__dict__ if (not k.startswith('_'))}\n        for (k, v) in ((p, getattr(obj, p)) for (p, _) in inspect.getmembers(obj.__class__, (lambda x: isinstance(x, property)))):\n            out[k] = v\n        return out", "docstring": "Return a JSON serializable type for ``o``.\n\nArgs:\nobj (:py:class:`object`): the object to be serialized.\n\nRaises:\n:py:class:`AttributeError`:\nwhen ``o`` is not a Python object.\n\nReturns:\n(dict): JSON serializable type for the given object.", "source": "codesearchnet"}
{"code": "def compareBulk(self, retina_name, body):\n        \n\n        resourcePath = '/compare/bulk'\n        method = 'POST'\n\n        queryParams = {}\n        headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}\n        postData = None\n\n        queryParams['retina_name'] = retina_name\n        postData = body\n        response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)\n        return [metric.Metric(**r) for r in response.json()]", "docstring": "Bulk compare\nArgs:\nretina_name, str: The retina name (required)\nbody, ExpressionOperation: Bulk comparison of elements 2 by 2 (required)\nReturns: Array[Metric]", "source": "juraj-google-style"}
{"code": "def add_report(self, specification_name, report):\n        \n        self._reports[specification_name] = report\n\n        self._total = self._total + report.testsRun\n        self._failures = self._failures + len(report.failures)\n        self._errors = self._errors + len(report.errors)\n        self._success = self._total - self._failures - self._errors", "docstring": "Adds a given report with the given specification_name as key\nto the reports list and computes the number of success, failures\nand errors\n\nArgs:\nspecification_name: string representing the specification (with \".spec\")\nreport: The", "source": "juraj-google-style"}
{"code": "def _extend_before(self, other):\n    other_num_lines = other.num_lines()\n    self._lines = other.lines + self._lines\n    new_font_attr_segs = {}\n    for line_index in self.font_attr_segs:\n        new_font_attr_segs[other_num_lines + line_index] = self.font_attr_segs[line_index]\n    new_font_attr_segs.update(other.font_attr_segs)\n    self._font_attr_segs = new_font_attr_segs\n    new_annotations = {}\n    for key in self._annotations:\n        if isinstance(key, int):\n            new_annotations[other_num_lines + key] = self.annotations[key]\n        else:\n            new_annotations[key] = other.annotations[key]\n    new_annotations.update(other.annotations)\n    self._annotations = new_annotations", "docstring": "Add another RichTextLines object to the front.\n\nArgs:\nother: (RichTextLines) The other object to add to the front to this\nobject.", "source": "github-repos"}
{"code": "def join(self, timeout_s=None):\n    if (not self.thread):\n        return False\n    self.thread.join(timeout_s)\n    return self.running", "docstring": "Joins blocking until the interval ends or until timeout is reached.\n\nArgs:\ntimeout_s: The time in seconds to wait, defaults to forever.\nReturns:\nTrue if the interval is still running and we reached the timeout.", "source": "codesearchnet"}
{"code": "def add_to_dumper(dumper: Type, classes: List[Type]) -> None:\n    \n    if not isinstance(classes, list):\n        classes = [classes]  \n    for class_ in classes:\n        if issubclass(class_, enum.Enum):\n            dumper.add_representer(class_, EnumRepresenter(class_))\n        elif issubclass(class_, str) or issubclass(class_, UserString):\n            dumper.add_representer(class_, UserStringRepresenter(class_))\n        else:\n            dumper.add_representer(class_, Representer(class_))", "docstring": "Register user-defined classes with the Dumper.\n\nThis enables the Dumper to write objects of your classes to a \\\nYAML file. Note that all the arguments are types, not instances!\n\nArgs:\ndumper: Your dumper class(!), derived from yatiml.Dumper\nclasses: One or more classes to add.", "source": "juraj-google-style"}
{"code": "def _forward_backward_log(state_trans_log_probs, initial_state_log_probs, final_state_log_probs, observed_log_probs, sequence_length):\n    if state_trans_log_probs.shape.ndims == 2:\n        perm = [1, 0]\n    elif state_trans_log_probs.shape.ndims == 3:\n        perm = [0, 2, 1]\n    else:\n        raise ValueError(f'Rank of argument `state_trans_log_probs` must be known and equal to 2 or 3. Received state_trans_log_probs={state_trans_log_probs} of rank {state_trans_log_probs.shape.ndims}')\n    bwd_state_trans_log_probs = array_ops.transpose(state_trans_log_probs, perm)\n    batch_size = _get_dim(observed_log_probs, 1)\n\n    def _forward(state_log_prob, obs_log_prob):\n        state_log_prob = array_ops.expand_dims(state_log_prob, axis=1)\n        state_log_prob += state_trans_log_probs\n        state_log_prob = math_ops.reduce_logsumexp(state_log_prob, axis=-1)\n        state_log_prob += obs_log_prob\n        log_prob_sum = math_ops.reduce_logsumexp(state_log_prob, axis=-1, keepdims=True)\n        state_log_prob -= log_prob_sum\n        return state_log_prob\n    fwd = _scan(_forward, observed_log_probs, initial_state_log_probs, inclusive=True)\n\n    def _backward(accs, elems):\n        \n        state_log_prob, cum_log_sum = accs\n        obs_log_prob, mask = elems\n        state_log_prob += obs_log_prob\n        state_log_prob = array_ops.expand_dims(state_log_prob, axis=1)\n        state_log_prob += bwd_state_trans_log_probs\n        state_log_prob = math_ops.reduce_logsumexp(state_log_prob, axis=-1)\n        log_prob_sum = math_ops.reduce_logsumexp(state_log_prob, axis=-1, keepdims=True)\n        state_log_prob -= log_prob_sum\n        cum_log_sum += array_ops.squeeze(log_prob_sum, axis=[-1]) * mask\n        batched_mask = array_ops.expand_dims(mask, axis=1)\n        out = state_log_prob * batched_mask\n        out += final_state_log_probs * (1.0 - batched_mask)\n        return (out, cum_log_sum)\n    zero_log_sum = array_ops.zeros([batch_size])\n    maxlen = _get_dim(observed_log_probs, 0)\n    mask = array_ops.sequence_mask(sequence_length, maxlen, dtypes.float32)\n    mask = array_ops.transpose(mask, perm=[1, 0])\n    bwd, cum_log_sum = _scan(_backward, (observed_log_probs, mask), (final_state_log_probs, zero_log_sum), reverse=True, inclusive=True)\n    fwd_bwd_log_probs = fwd[1:] + bwd[1:]\n    fwd_bwd_log_probs_sum = math_ops.reduce_logsumexp(fwd_bwd_log_probs, axis=2, keepdims=True)\n    fwd_bwd_log_probs -= fwd_bwd_log_probs_sum\n    fwd_bwd_log_probs += math_ops.log(array_ops.expand_dims(mask, axis=2))\n    log_likelihood = bwd[0, :, 0] + cum_log_sum[0]\n    return (fwd_bwd_log_probs, log_likelihood)", "docstring": "Forward-backward algorithm computed in log domain.\n\nArgs:\nstate_trans_log_probs: tensor of shape [states, states] or if different\ntransition matrix per batch [batch_size, states, states]\ninitial_state_log_probs: tensor of shape [batch_size, states]\nfinal_state_log_probs: tensor of shape [batch_size, states]\nobserved_log_probs: tensor of shape [frames, batch_size, states]\nsequence_length: tensor of shape [batch_size]\n\nReturns:\nforward backward log probabilities: tensor of shape [frames, batch, states]\nlog_likelihood: tensor of shape [batch_size]\n\nRaises:\nValueError: If state_trans_log_probs has unknown or incorrect rank.", "source": "github-repos"}
{"code": "def get_blocks(self, block_structure=None):\n    if (block_structure is None):\n        block_structure = self.block_structure\n    try:\n        return self._get_blocks(block_structure)\n    except IncompatibleBlockStructures as e:\n        raise e", "docstring": "For a reducible circuit, get a sequence of subblocks that when\nconcatenated again yield the original circuit.  The block structure\ngiven has to be compatible with the circuits actual block structure,\ni.e. it can only be more coarse-grained.\n\nArgs:\nblock_structure (tuple): The block structure according to which the\nsubblocks are generated (default = ``None``, corresponds to the\ncircuit's own block structure)\n\nReturns:\nA tuple of subblocks that the circuit consists of.\n\nRaises:\n.IncompatibleBlockStructures", "source": "codesearchnet"}
{"code": "def quantile_for_list_of_values(self, **kwargs):\n    if self._is_transposed:\n        kwargs['axis'] = (kwargs.get('axis', 0) ^ 1)\n        return self.transpose().quantile_for_list_of_values(**kwargs)\n    axis = kwargs.get('axis', 0)\n    q = kwargs.get('q')\n    numeric_only = kwargs.get('numeric_only', True)\n    assert isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list))\n    if numeric_only:\n        new_columns = self.numeric_columns()\n    else:\n        new_columns = [col for (col, dtype) in zip(self.columns, self.dtypes) if (is_numeric_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype))]\n    if axis:\n        nonnumeric = [col for (col, dtype) in zip(self.columns, self.dtypes) if (not is_numeric_dtype(dtype))]\n        query_compiler = self.drop(columns=nonnumeric)\n        new_columns = query_compiler.index\n    else:\n        query_compiler = self\n\n    def quantile_builder(df, **kwargs):\n        result = df.quantile(**kwargs)\n        return (result.T if (axis == 1) else result)\n    func = query_compiler._prepare_method(quantile_builder, **kwargs)\n    q_index = pandas.Float64Index(q)\n    new_data = query_compiler._map_across_full_axis(axis, func)\n    if (axis == 1):\n        q_index = new_columns\n        new_columns = pandas.Float64Index(q)\n    result = self.__constructor__(new_data, q_index, new_columns)\n    return (result.transpose() if (axis == 1) else result)", "docstring": "Returns Manager containing quantiles along an axis for numeric columns.\n\nReturns:\nDataManager containing quantiles of original DataManager along an axis.", "source": "codesearchnet"}
{"code": "def find_dependencies(self, dataset_keys, **dfilter):\n    unknown_datasets = set()\n    for key in dataset_keys.copy():\n        (n, unknowns) = self._find_dependencies(key, **dfilter)\n        dataset_keys.discard(key)\n        if (n is not None):\n            dataset_keys.add(n.name)\n        if unknowns:\n            unknown_datasets.update(unknowns)\n            continue\n        self.add_child(self, n)\n    return unknown_datasets", "docstring": "Create the dependency tree.\n\nArgs:\ndataset_keys (iterable): Strings or DatasetIDs to find dependencies for\n**dfilter (dict): Additional filter parameters. See\n`satpy.readers.get_key` for more details.\n\nReturns:\n(Node, set): Root node of the dependency tree and a set of unknown datasets", "source": "codesearchnet"}
{"code": "def get(self) -> Union[(Event, None)]:\n    message = self._queue.get_message()\n    if (message and (message['type'] == 'message')):\n        event_id = DB.get_event(self._pub_key, self._processed_key)\n        event_data_str = DB.get_hash_value(self._data_key, event_id)\n        event_dict = ast.literal_eval(event_data_str)\n        event_dict['id'] = event_id\n        event_dict['subscriber'] = self._subscriber\n        return Event.from_config(event_dict)\n    return None", "docstring": "Get the latest event from the queue.\n\nCall this method to query the queue for the latest event.\n\nIf no event has been published None is returned.\n\nReturns:\nEvent or None", "source": "codesearchnet"}
{"code": "def GetTSKFileByPathSpec(self, path_spec):\n    \n    \n    \n    inode = getattr(path_spec, 'inode', None)\n    location = getattr(path_spec, 'location', None)\n\n    if inode is not None:\n      tsk_file = self._tsk_file_system.open_meta(inode=inode)\n    elif location is not None:\n      tsk_file = self._tsk_file_system.open(location)\n    else:\n      raise errors.PathSpecError(\n          'Path specification missing inode and location.')\n\n    return tsk_file", "docstring": "Retrieves the SleuthKit file object for a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\npytsk3.File: TSK file.\n\nRaises:\nPathSpecError: if the path specification is missing inode and location.", "source": "juraj-google-style"}
{"code": "def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):\n    if (not outdir):\n        outdir = self.structure_dir\n        if (not outdir):\n            raise ValueError('Output directory must be specified')\n    if (not pdb_file_type):\n        pdb_file_type = self.pdb_file_type\n    if (self.num_structures_experimental == 0):\n        log.debug('{}: no structures available - nothing will be downloaded'.format(self.id))\n        return\n    downloaded_pdb_ids = []\n    for s in self.get_experimental_structures():\n        log.debug('{}: downloading structure file from the PDB...'.format(s.id))\n        s.download_structure_file(outdir=outdir, file_type=pdb_file_type, force_rerun=force_rerun, load_header_metadata=True)\n        downloaded_pdb_ids.append(s.id)\n    return downloaded_pdb_ids", "docstring": "Download ALL mapped experimental structures to the protein structures directory.\n\nArgs:\noutdir (str): Path to output directory, if protein structures directory not set or other output directory is\ndesired\npdb_file_type (str): Type of PDB file to download, if not already set or other format is desired\nforce_rerun (bool): If files should be re-downloaded if they already exist\n\nReturns:\nlist: List of PDB IDs that were downloaded\n\nTodo:\n* Parse mmtf or PDB file for header information, rather than always getting the cif file for header info", "source": "codesearchnet"}
{"code": "def confirmdir(self, target_directory):\n        \n        try:\n            directory = self.resolve(target_directory)\n        except IOError as exc:\n            self.raise_os_error(exc.errno, target_directory)\n        if not directory.st_mode & S_IFDIR:\n            if self.is_windows_fs and IS_PY2:\n                error_nr = errno.EINVAL\n            else:\n                error_nr = errno.ENOTDIR\n            self.raise_os_error(error_nr, target_directory, 267)\n        return directory", "docstring": "Test that the target is actually a directory, raising OSError\nif not.\n\nArgs:\ntarget_directory: Path to the target directory within the fake\nfilesystem.\n\nReturns:\nThe FakeDirectory object corresponding to target_directory.\n\nRaises:\nOSError: if the target is not a directory.", "source": "juraj-google-style"}
{"code": "def args_to_dict(args):\n    arguments = dict()\n    for arg in args.split(','):\n        (key, value) = arg.split('=')\n        arguments[key] = value\n    return arguments", "docstring": "Convert command line arguments in a comma separated string to a dictionary\n\nArgs:\nargs (str): Command line arguments\n\nReturns:\nDictUpperBound[str,str]: Dictionary of arguments", "source": "codesearchnet"}
{"code": "def _compile_expression(self,\n                            expr: Expression,\n                            scope: Dict[str, TensorFluent],\n                            batch_size: Optional[int] = None,\n                            noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:\n        \n        etype2compiler = {\n            'constant':    self._compile_constant_expression,\n            'pvar':        self._compile_pvariable_expression,\n            'randomvar':   self._compile_random_variable_expression,\n            'arithmetic':  self._compile_arithmetic_expression,\n            'boolean':     self._compile_boolean_expression,\n            'relational':  self._compile_relational_expression,\n            'func':        self._compile_function_expression,\n            'control':     self._compile_control_flow_expression,\n            'aggregation': self._compile_aggregation_expression\n        }\n\n        etype = expr.etype\n        if etype[0] not in etype2compiler:\n            raise ValueError('Expression type unknown: {}'.format(etype))\n\n        with self.graph.as_default():\n            compiler_fn = etype2compiler[etype[0]]\n            return compiler_fn(expr, scope, batch_size, noise)", "docstring": "Compile the expression `expr` into a TensorFluent\nin the given `scope` with optional batch size.\n\nArgs:\nexpr (:obj:`rddl2tf.expr.Expression`): A RDDL expression.\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.\nbatch_size (Optional[size]): The batch size.\n\nReturns:\n:obj:`rddl2tf.fluent.TensorFluent`: The compiled TensorFluent.", "source": "juraj-google-style"}
{"code": "def returns_collection(self) -> bool:\n    return self.cardinality == Cardinality.COLLECTION or self.cardinality == Cardinality.CHILD_OF_COLLECTION", "docstring": "Indicates if the data type will evaluate to a collection.\n\nReturns:\nTrue in the following circumstances\n- The data type represents an element with cardinality greater than one.\n- The data type represents an element with a cardinality less than or\nequal to one, but that element is a child of a collection and will\nevaluate to a collection. For example, the path Patient.name.use will\nreturn a collection, despite 'use' being a scalar, because it is a\nchild of the collection, 'name.'\nFalse if the data type represents a scalar element whose parents are all\nalso scalars.", "source": "github-repos"}
{"code": "def generate_entry_label(entry):\n    if isinstance(entry, MultiEntry):\n        return ' + '.join([latexify_ion(e.name) for e in entry.entry_list])\n    else:\n        return latexify_ion(latexify(entry.name))", "docstring": "Generates a label for the pourbaix plotter\n\nArgs:\nentry (PourbaixEntry or MultiEntry): entry to get a label for", "source": "codesearchnet"}
{"code": "def level_cond_prior(prior_dist, z, latent, hparams, state):\n    latent_dist_encoder = hparams.get('latent_dist_encoder', None)\n    latent_skip = hparams.get('latent_skip', False)\n    if (latent_dist_encoder == 'pointwise'):\n        last_latent = latent\n        merge_std = hparams.level_scale\n        latent_shape = common_layers.shape_list(latent)\n        z_shape = common_layers.shape_list(z)\n        if (latent_shape != z_shape):\n            raise ValueError(('Expected latent_shape to be %s, got %s' % (latent_shape, z_shape)))\n        latent_dist = scale_gaussian_prior('latent_prior', latent, logscale_factor=3.0)\n        cond_dist = merge_level_and_latent_dist(prior_dist, latent_dist, merge_std=merge_std)\n    elif (latent_dist_encoder == 'conv_net'):\n        output_channels = common_layers.shape_list(z)[(- 1)]\n        last_latent = latent[(- 1)]\n        latent_stack = tf.concat(([prior_dist.loc] + latent), axis=(- 1))\n        latent_stack = noise_op(latent_stack, hparams)\n        cond_dist = latent_to_dist('latent_stack', latent_stack, hparams=hparams, output_channels=output_channels)\n    elif (latent_dist_encoder == 'conv3d_net'):\n        last_latent = latent[(- 1)]\n        output_channels = common_layers.shape_list(last_latent)[(- 1)]\n        num_steps = len(latent)\n        cond_latents = tf.stack(latent, axis=1)\n        prev_latents = tf.tile(tf.expand_dims(prior_dist.loc, axis=1), [1, num_steps, 1, 1, 1])\n        cond_latents = tf.concat((cond_latents, prev_latents), axis=(- 1))\n        cond_latents = noise_op(cond_latents, hparams)\n        cond_dist = temporal_latent_to_dist('latent_stack', cond_latents, hparams, output_channels=output_channels)\n    elif (latent_dist_encoder == 'conv_lstm'):\n        last_latent = latent\n        output_channels = common_layers.shape_list(z)[(- 1)]\n        latent_stack = tf.concat((prior_dist.loc, latent), axis=(- 1))\n        latent_stack = noise_op(latent_stack, hparams)\n        (_, state) = common_video.conv_lstm_2d(latent_stack, state, hparams.latent_encoder_width, kernel_size=3, name='conv_lstm')\n        cond_dist = single_conv_dist('state_to_dist', state.h, output_channels=output_channels)\n    if latent_skip:\n        new_mean = (cond_dist.loc + last_latent)\n        cond_dist = tfp.distributions.Normal(new_mean, cond_dist.scale)\n    return (cond_dist.loc, cond_dist.scale, state)", "docstring": "Returns a conditional prior for each level.\n\nArgs:\nprior_dist: Distribution conditioned on the previous levels.\nz: Tensor, output of the previous levels.\nlatent: Tensor or a list of tensors to condition the latent_distribution.\nhparams: next_frame_glow hparams.\nstate: Current LSTM state. Used only if hparams.latent_dist_encoder is\na lstm.\nRaises:\nValueError: If hparams.latent_dist_encoder is \"pointwise\" and if the shape\nof latent is different from z.", "source": "codesearchnet"}
{"code": "def credits(self, **kwargs):\n    path = self._get_series_id_season_number_episode_number_path('credits')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the TV episode credits by combination of season and episode number.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def save_summaries(frames, keys, selected_summaries, batch_dir, batch_name):\n    if (not frames):\n        logger.info('Could save summaries - no summaries to save!')\n        logger.info('You have no frames - aborting')\n        return None\n    if (not keys):\n        logger.info('Could save summaries - no summaries to save!')\n        logger.info('You have no keys - aborting')\n        return None\n    selected_summaries_dict = create_selected_summaries_dict(selected_summaries)\n    summary_df = pd.concat(frames, keys=keys, axis=1)\n    for (key, value) in selected_summaries_dict.items():\n        _summary_file_name = os.path.join(batch_dir, ('summary_%s_%s.csv' % (key, batch_name)))\n        _summary_df = summary_df.iloc[(:, (summary_df.columns.get_level_values(1) == value))]\n        _header = _summary_df.columns\n        _summary_df.to_csv(_summary_file_name, sep=';')\n        logger.info(('saved summary (%s) to:\\n       %s' % (key, _summary_file_name)))\n    logger.info('finished saving summaries')\n    return summary_df", "docstring": "Writes the summaries to csv-files\n\nArgs:\nframes: list of ``cellpy`` summary DataFrames\nkeys: list of indexes (typically run-names) for the different runs\nselected_summaries: list defining which summary data to save\nbatch_dir: directory to save to\nbatch_name: the batch name (will be used for making the file-name(s))\n\nReturns: a pandas DataFrame with your selected summaries.", "source": "codesearchnet"}
{"code": "def DeleteRecords(cls, ids, token):\n    with data_store.DB.GetMutationPool() as mutation_pool:\n        mutation_pool.QueueDeleteRecords(ids)", "docstring": "Delete records identified by ids.\n\nArgs:\nids: A list of ids provided by ClaimRecords.\ntoken: The database access token to delete with.\n\nRaises:\nLockError: If the queue is not locked.", "source": "codesearchnet"}
{"code": "def __str__(self):\n        \n        text = super(Baken, self).__format__('dms')\n        if self._locator:\n            text = '%s (%s)' % (self._locator, text)\n        return text", "docstring": "Pretty printed location string.\n\nArgs:\nmode (str): Coordinate formatting system to use\n\nReturns:\nstr: Human readable string representation of ``Baken`` object", "source": "juraj-google-style"}
{"code": "def __init__(self, user_pipeline: beam.Pipeline, pcolls: Optional[Set[beam.pvalue.PCollection]]=None):\n    assert not pcolls or all((pcoll.pipeline is user_pipeline for pcoll in pcolls)), 'All %s need to belong to %s' % (pcolls, user_pipeline)\n    self._user_pipeline = user_pipeline\n    self._pcolls = pcolls\n    self._cache_manager = ie.current_env().get_cache_manager(self._user_pipeline, create_if_absent=True)\n    if background_caching_job.has_source_to_cache(self._user_pipeline):\n        self._cache_manager = ie.current_env().get_cache_manager(self._user_pipeline)\n    _, self._context = self._user_pipeline.to_runner_api(return_context=True)\n    self._context.component_id_map = copy.copy(self._user_pipeline.component_id_map)\n    self._cacheables = self.cacheables()", "docstring": "Initializes a pipelilne for augmenting interactive flavor.\n\nArgs:\nuser_pipeline: a beam.Pipeline instance defined by the user.\npcolls: cacheable pcolls to be computed/retrieved. If the set is\nempty, all intermediate pcolls assigned to variables are applicable.", "source": "github-repos"}
{"code": "def start(self, extra_args='', tag=''):\n    if self.started:\n        return\n    utils.create_dir(self.log_path)\n    if tag:\n        tag = tag + ','\n    out_file_name = 'IPerfServer,{},{}{}.log'.format(self.port, tag, len(self.log_files))\n    full_out_path = os.path.join(self.log_path, out_file_name)\n    cmd = '%s %s > %s' % (self.iperf_str, extra_args, full_out_path)\n    self.iperf_process = utils.start_standing_subprocess(cmd, shell=True)\n    self.log_files.append(full_out_path)\n    self.started = True", "docstring": "Starts iperf server on specified port.\n\nArgs:\nextra_args: A string representing extra arguments to start iperf\nserver with.\ntag: Appended to log file name to identify logs from different\niperf runs.", "source": "github-repos"}
{"code": "def _bits_in_condition(self, cond):\n    all_bits = []\n    if (cond is not None):\n        all_bits.extend([(cond[0], j) for j in range(self.cregs[cond[0].name].size)])\n    return all_bits", "docstring": "Return a list of bits in the given condition.\n\nArgs:\ncond (tuple or None): optional condition (ClassicalRegister, int)\n\nReturns:\nlist[(ClassicalRegister, idx)]: list of bits", "source": "codesearchnet"}
{"code": "def heightmap_count_cells(hm: np.ndarray, mi: float, ma: float) -> int:\n    \n    return int(lib.TCOD_heightmap_count_cells(_heightmap_cdata(hm), mi, ma))", "docstring": "Return the number of map cells which value is between ``mi`` and ``ma``.\n\nArgs:\nhm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.\nmi (float): The lower bound.\nma (float): The upper bound.\n\nReturns:\nint: The count of values which fall between ``mi`` and ``ma``.\n\n.. deprecated:: 8.1\nCan be replaced by an equivalent NumPy function such as:\n``numpy.count_nonzero((mi <= hm) & (hm < ma))``", "source": "juraj-google-style"}
{"code": "def value_loss_given_predictions(value_prediction, rewards, reward_mask, gamma=0.99):\n    (B, T) = rewards.shape\n    assert ((B, T) == reward_mask.shape)\n    assert ((B, (T + 1), 1) == value_prediction.shape)\n    value_prediction = np.squeeze(value_prediction, axis=2)\n    value_prediction = (value_prediction[(:, :(- 1))] * reward_mask)\n    r2g = rewards_to_go(rewards, reward_mask, gamma=gamma)\n    loss = ((value_prediction - r2g) ** 2)\n    return (np.sum(loss) / np.sum(reward_mask))", "docstring": "Computes the value loss given the prediction of the value function.\n\nArgs:\nvalue_prediction: np.ndarray of shape (B, T+1, 1)\nrewards: np.ndarray of shape (B, T) of rewards.\nreward_mask: np.ndarray of shape (B, T), the mask over rewards.\ngamma: float, discount factor.\n\nReturns:\nThe average L2 value loss, averaged over instances where reward_mask is 1.", "source": "codesearchnet"}
{"code": "def _checkResponseWriteData(payload, writedata):\n    \n    _checkString(payload, minlength=4, description='payload')\n    _checkString(writedata, minlength=2, maxlength=2, description='writedata')\n\n    BYTERANGE_FOR_WRITEDATA = slice(2, 4)\n\n    receivedWritedata = payload[BYTERANGE_FOR_WRITEDATA]\n\n    if receivedWritedata != writedata:\n        raise ValueError('Wrong write data in the response: {0!r}, but commanded is {1!r}. The data payload is: {2!r}'.format( \\\n            receivedWritedata, writedata, payload))", "docstring": "Check that the write data as given in the response is correct.\n\nThe bytes 2 and 3 (zero based counting) in the payload holds the write data.\n\nArgs:\n* payload (string): The payload\n* writedata (string): The data to write, length should be 2 bytes.\n\nRaises:\nTypeError, ValueError", "source": "juraj-google-style"}
{"code": "def __gt__(self, other):\n        \n        if other.__class__ is not self.__class__:\n            return NotImplemented\n        return not self <= other", "docstring": "Test if self is greater than an object of the same class.\n\nArgs:\nother: The object to compare against.\n\nReturns:\nTrue if self is greater than other; else False.\n\nRaises:\nTypeError: Raised if the objects are not of the same class.", "source": "juraj-google-style"}
{"code": "def menu(title, options, cancel_label=\"Cancel\", flag_allow_empty=False, flag_cancel=True, ch='.'):\n  \n\n  num_options, flag_ok = len(options), 0\n  option = None  \n  min_allowed = 0 if flag_cancel else 1  \n\n  while True:\n    print(\"\")\n    for line in format_box(title, ch):\n        print(\"  \"+line)\n    for i, s in enumerate(options):\n      print((\"  {0:d} - {1!s}\".format(i+1, s)))\n    if flag_cancel: print((\"  0 - << (*{0!s}*)\".format(cancel_label)))\n    try:\n        s_option = input('? ')\n    except KeyboardInterrupt:\n        raise\n    except:\n        print(\"\")\n\n    n_try = 0\n    while True:\n      if n_try >= 10:\n        print('You are messing up!')\n        break\n\n      if len(s_option) == 0 and flag_allow_empty:\n        flag_ok = True\n        break\n\n      try:\n        option = int(s_option)\n        if min_allowed <= option <= num_options:\n          flag_ok = True\n          break\n      except ValueError:\n        print(\"Invalid integer value!\")\n\n      print((\"Invalid option, range is [{0:d}, {1:d}]!\".format(0 if flag_cancel else 1, num_options)))\n\n      n_try += 1\n      s_option = input(\"? \")\n\n    if flag_ok:\n      break\n  return option", "docstring": "Text menu.\n\nArguments:\ntitle -- menu title, to appear at the top\noptions -- sequence of strings\ncancel_label='Cancel' -- label to show at last \"zero\" option\nflag_allow_empty=0 -- Whether to allow empty option\nflag_cancel=True -- whether there is a \"0 - Cancel\" option\nch=\".\" -- character to use to draw frame around title\n\nReturns:\noption -- an integer: None; 0-Back/Cancel/etc; 1, 2, ...\n\nAdapted from irootlab menu.m", "source": "juraj-google-style"}
{"code": "def category(msg):\n    if ((common.typecode(msg) < 1) or (common.typecode(msg) > 4)):\n        raise RuntimeError(('%s: Not a identification message' % msg))\n    msgbin = common.hex2bin(msg)\n    return common.bin2int(msgbin[5:8])", "docstring": "Aircraft category number\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string\n\nReturns:\nint: category number", "source": "codesearchnet"}
{"code": "def _iterdump(self, file_name, headers=None):\n        \n        if headers is None:\n            headers = [\"Discharge_Capacity\", \"Charge_Capacity\"]\n\n        step_txt = self.headers_normal['step_index_txt']\n        point_txt = self.headers_normal['data_point_txt']\n        cycle_txt = self.headers_normal['cycle_index_txt']\n\n        self.logger.debug(\"iterating through file: %s\" % file_name)\n        if not os.path.isfile(file_name):\n            print(\"Missing file_\\n   %s\" % file_name)\n\n        filesize = os.path.getsize(file_name)\n        hfilesize = humanize_bytes(filesize)\n        txt = \"Filesize: %i (%s)\" % (filesize, hfilesize)\n        self.logger.info(txt)\n\n        table_name_global = TABLE_NAMES[\"global\"]\n        table_name_stats = TABLE_NAMES[\"statistic\"]\n        table_name_normal = TABLE_NAMES[\"normal\"]\n\n        \n\n        temp_dir = tempfile.gettempdir()\n        temp_filename = os.path.join(temp_dir, os.path.basename(file_name))\n        shutil.copy2(file_name, temp_dir)\n        constr = self.__get_res_connector(temp_filename)\n\n        if use_ado:\n            conn = dbloader.connect(constr)\n        else:\n            conn = dbloader.connect(constr, autocommit=True)\n\n        self.logger.debug(\"tmp file: %s\" % temp_filename)\n        self.logger.debug(\"constr str: %s\" % constr)\n\n        \n        self.logger.debug(\"reading global data table\")\n        sql = \"select * from %s\" % table_name_global\n        global_data_df = pd.read_sql_query(sql, conn)\n        \n        self.logger.debug(\"sql statement: %s\" % sql)\n\n        tests = global_data_df[self.headers_normal['test_id_txt']]\n        number_of_sets = len(tests)\n        self.logger.debug(\"number of datasets: %i\" % number_of_sets)\n        self.logger.debug(\"only selecting first test\")\n        test_no = 0\n        self.logger.debug(\"setting data for test number %i\" % test_no)\n        loaded_from = file_name\n        \n        start_datetime = global_data_df[self.headers_global['start_datetime_txt']][test_no]\n        test_ID = int(global_data_df[self.headers_normal['test_id_txt']][test_no])  \n        test_name = global_data_df[self.headers_global['test_name_txt']][test_no]\n\n        \n        self.logger.debug(\"reading raw-data\")\n\n        columns = [\"Data_Point\", \"Step_Index\", \"Cycle_Index\"]\n        columns.extend(headers)\n        columns_txt = \", \".join([\"%s\"] * len(columns)) % tuple(columns)\n\n        sql_1 = \"select %s \" % columns_txt\n        sql_2 = \"from %s \" % table_name_normal\n        sql_3 = \"where %s=%s \" % (self.headers_normal['test_id_txt'], test_ID)\n        sql_5 = \"order by %s\" % self.headers_normal['data_point_txt']\n        import time\n        info_list = []\n        info_header = [\"cycle\", \"row_count\", \"start_point\", \"end_point\"]\n        info_header.extend(headers)\n        self.logger.info(\" \".join(info_header))\n        self.logger.info(\"-------------------------------------------------\")\n\n        for cycle_number in range(1, 2000):\n            t1 = time.time()\n            self.logger.debug(\"picking cycle %i\" % cycle_number)\n            sql_4 = \"AND %s=%i \" % (cycle_txt, cycle_number)\n            sql = sql_1 + sql_2 + sql_3 + sql_4 + sql_5\n            self.logger.debug(\"sql statement: %s\" % sql)\n            normal_df = pd.read_sql_query(sql, conn)\n            t2 = time.time()\n            dt = t2 - t1\n            self.logger.debug(\"time: %f\" % dt)\n            if normal_df.empty:\n                self.logger.debug(\"reached the end\")\n                break\n            row_count, _ = normal_df.shape\n            start_point = normal_df[point_txt].min()\n            end_point = normal_df[point_txt].max()\n            last = normal_df.iloc[-1, :]\n\n            step_list = [cycle_number, row_count, start_point, end_point]\n            step_list.extend([last[x] for x in headers])\n            info_list.append(step_list)\n\n        self._clean_up_loadres(None, conn, temp_filename)\n        info_dict = pd.DataFrame(info_list, columns=info_header)\n        return info_dict", "docstring": "Function for dumping values from a file.\n\nShould only be used by developers.\n\nArgs:\nfile_name: name of the file\nheaders: list of headers to pick\ndefault:\n[\"Discharge_Capacity\", \"Charge_Capacity\"]\n\nReturns: pandas.DataFrame", "source": "juraj-google-style"}
{"code": "def installed_capabilities(image=None):\n    if (salt.utils.versions.version_cmp(__grains__['osversion'], '10') == (- 1)):\n        raise NotImplementedError('`installed_capabilities` is not available on this version of Windows: {0}'.format(__grains__['osversion']))\n    return _get_components('Capability Identity', 'Capabilities', 'Installed')", "docstring": "List the capabilities installed on the system\n\nArgs:\nimage (Optional[str]): The path to the root directory of an offline\nWindows image. If `None` is passed, the running operating system is\ntargeted. Default is None.\n\nRaises:\nNotImplementedError: For all versions of Windows that are not Windows 10\nand later. Server editions of Windows use ServerManager instead.\n\nReturns:\nlist: A list of installed capabilities\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' dism.installed_capabilities", "source": "codesearchnet"}
{"code": "def get_existing_pipelines(self):\n    url = '{0}/applications/{1}/pipelineConfigs'.format(API_URL, self.app_name)\n    resp = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n    assert resp.ok, 'Failed to lookup pipelines for {0}: {1}'.format(self.app_name, resp.text)\n    return resp.json()", "docstring": "Get existing pipeline configs for specific application.\n\nReturns:\nstr: Pipeline config json", "source": "codesearchnet"}
{"code": "def get_lowest_values(self, count):\n    count = int(count)\n    assert (count <= len(self._values)), 'count must be <= to Data Collection len. {} > {}.'.format(count, len(self._values))\n    assert (count > 0), 'count must be greater than 0. Got {}.'.format(count)\n    lowest_values = sorted(self._values)[0:count]\n    lowest_values_index = sorted(list(xrange(len(self._values))), key=(lambda k: self._values[k]))[0:count]\n    return (lowest_values, lowest_values_index)", "docstring": "Get a list of the the x lowest values of the Data Collection and their indices.\n\nThis is useful for situations where one needs to know the times of\nthe year when the smallest values of a data collection occur.\n\nArgs:\ncount: Integer representing the number of lowest values to account for.\n\nReturns:\nhighest_values: The n lowest values in data list, ordered from\nlowest to lowest.\nlowest_values_index: Indicies of the n lowest values in data\nlist, ordered from lowest to lowest.", "source": "codesearchnet"}
{"code": "def GetRawDevice(path):\n  \n  path = CanonicalPathToLocalPath(path)\n  \n  try:\n    path = win32file.GetLongPathName(path)\n  except pywintypes.error:\n    pass\n\n  try:\n    mount_point = win32file.GetVolumePathName(path)\n  except pywintypes.error as details:\n    logging.info(\"path not found. %s\", details)\n    raise IOError(\"No mountpoint for path: %s\" % path)\n\n  if not path.startswith(mount_point):\n    stripped_mp = mount_point.rstrip(\"\\\\\")\n    if not path.startswith(stripped_mp):\n      raise IOError(\"path %s is not mounted under %s\" % (path, mount_point))\n\n  corrected_path = LocalPathToCanonicalPath(path[len(mount_point):])\n  corrected_path = utils.NormalizePath(corrected_path)\n\n  volume = win32file.GetVolumeNameForVolumeMountPoint(mount_point).rstrip(\"\\\\\")\n  volume = LocalPathToCanonicalPath(volume)\n\n  \n  result = rdf_paths.PathSpec(\n      path=volume,\n      pathtype=rdf_paths.PathSpec.PathType.OS,\n      mount_point=mount_point.rstrip(\"\\\\\"))\n\n  return result, corrected_path", "docstring": "Resolves the raw device that contains the path.\n\nArgs:\npath: A path to examine.\n\nReturns:\nA pathspec to read the raw device as well as the modified path to read\nwithin the raw device. This is usually the path without the mount point.\n\nRaises:\nIOError: if the path does not exist or some unexpected behaviour occurs.", "source": "juraj-google-style"}
{"code": "def convert_frame_change(self, shift, instruction):\n        \n        command_dict = {\n            'name': 'fc',\n            't0': shift+instruction.start_time,\n            'ch': instruction.channels[0].name,\n            'phase': instruction.command.phase\n        }\n        return self._qobj_model(**command_dict)", "docstring": "Return converted `FrameChangeInstruction`.\n\nArgs:\nshift(int): Offset time.\ninstruction (FrameChangeInstruction): frame change instruction.\nReturns:\ndict: Dictionary of required parameters.", "source": "juraj-google-style"}
{"code": "def potential_purviews(self, direction, mechanism, purviews=False):\n        \n        if purviews is False:\n            purviews = self.network.potential_purviews(direction, mechanism)\n            \n            purviews = [purview for purview in purviews\n                        if set(purview).issubset(self.node_indices)]\n\n        \n        \n        \n        return irreducible_purviews(self.cm, direction, mechanism, purviews)", "docstring": "Return all purviews that could belong to the |MIC|/|MIE|.\n\nFilters out trivially-reducible purviews.\n\nArgs:\ndirection (Direction): |CAUSE| or |EFFECT|.\nmechanism (tuple[int]): The mechanism of interest.\n\nKeyword Args:\npurviews (tuple[int]): Optional subset of purviews of interest.", "source": "juraj-google-style"}
{"code": "def _make_pr_entry(self, step, wall_time, data_array, thresholds):\n    true_positives = [int(v) for v in data_array[metadata.TRUE_POSITIVES_INDEX]]\n    false_positives = [int(v) for v in data_array[metadata.FALSE_POSITIVES_INDEX]]\n    tp_index = metadata.TRUE_POSITIVES_INDEX\n    fp_index = metadata.FALSE_POSITIVES_INDEX\n    positives = data_array[([tp_index, fp_index], :)].astype(int).sum(axis=0)\n    end_index_inclusive = (len(positives) - 1)\n    while ((end_index_inclusive > 0) and (positives[end_index_inclusive] == 0)):\n        end_index_inclusive -= 1\n    end_index = (end_index_inclusive + 1)\n    return {'wall_time': wall_time, 'step': step, 'precision': data_array[(metadata.PRECISION_INDEX, :end_index)].tolist(), 'recall': data_array[(metadata.RECALL_INDEX, :end_index)].tolist(), 'true_positives': true_positives[:end_index], 'false_positives': false_positives[:end_index], 'true_negatives': [int(v) for v in data_array[metadata.TRUE_NEGATIVES_INDEX][:end_index]], 'false_negatives': [int(v) for v in data_array[metadata.FALSE_NEGATIVES_INDEX][:end_index]], 'thresholds': thresholds[:end_index]}", "docstring": "Creates an entry for PR curve data. Each entry corresponds to 1 step.\n\nArgs:\nstep: The step.\nwall_time: The wall time.\ndata_array: A numpy array of PR curve data stored in the summary format.\nthresholds: An array of floating point thresholds.\n\nReturns:\nA PR curve entry.", "source": "codesearchnet"}
{"code": "def get_metadata(self):\n    if (self._metadata is None):\n        self._metadata = self._source.get_metadata(self._handle)\n    return self._metadata", "docstring": "Returns the associated metadata info for this template version\n\nReturns:\ndict: Metadata for this version", "source": "codesearchnet"}
{"code": "def load_values(self, dictionary, as_defaults=False, flat=False):\n        \n        if flat:\n            \n            separator = self.settings.str_path_separator\n            flat_dictionary = dictionary\n            dictionary = collections.OrderedDict()\n            for k, v in flat_dictionary.items():\n                k_parts = k.split(separator)\n                c = dictionary\n                for i, kp in enumerate(k_parts):\n                    if i >= len(k_parts) - 1:\n                        c[kp] = v\n                    else:\n                        if kp not in c:\n                            c[kp] = collections.OrderedDict()\n                        c = c[kp]\n\n        for name, value in dictionary.items():\n            if name not in self:\n                if as_defaults:\n                    if isinstance(value, dict):\n                        self[name] = self.create_section()\n                        self[name].load_values(value, as_defaults=as_defaults)\n                    else:\n                        self[name] = self.create_item(name, default=value)\n                else:\n                    \n                    pass\n                continue\n\n            resolution = self._get_item_or_section(name, handle_not_found=False)\n            if is_config_item(resolution):\n                if as_defaults:\n                    resolution.default = value\n                else:\n                    resolution.value = value\n            else:\n                resolution.load_values(value, as_defaults=as_defaults)", "docstring": "Import config values from a dictionary.\n\nWhen ``as_defaults`` is set to ``True``, the values\nimported will be set as defaults. This can be used to\ndeclare the sections and items of configuration.\nValues of sections and items in ``dictionary`` can be\ndictionaries as well as instances of :class:`.Item` and\n:class:`.Config`.\n\nArgs:\ndictionary:\nas_defaults: if ``True``, the imported values will be set as defaults.", "source": "juraj-google-style"}
{"code": "def filter_list(lst, takeout, case_sensitive=True):\n    takeout = force_list(takeout)\n    if (not case_sensitive):\n        lst = [x.lower() for x in lst]\n        takeout = [y.lower() for y in takeout]\n    return [x for x in lst if (x not in takeout)]", "docstring": "Return a modified list removing items specified.\n\nArgs:\nlst: Original list of values\ntakeout: Object or objects to remove from lst\ncase_sensitive: if the search should be case sensitive\n\nReturns:\nlist: Filtered list of values", "source": "codesearchnet"}
{"code": "def add(self, key, value, expire=0, noreply=None):\n    if (noreply is None):\n        noreply = self.default_noreply\n    return self._store_cmd(b'add', {key: value}, expire, noreply)[key]", "docstring": "The memcached \"add\" command.\n\nArgs:\nkey: str, see class docs for details.\nvalue: str, see class docs for details.\nexpire: optional int, number of seconds until the item is expired\nfrom the cache, or zero for no expiry (the default).\nnoreply: optional bool, True to not wait for the reply (defaults to\nself.default_noreply).\n\nReturns:\nIf noreply is True, the return value is always True. Otherwise the\nreturn value is True if the value was stored, and False if it was\nnot (because the key already existed).", "source": "codesearchnet"}
{"code": "def max_pool(x, pool_size, strides, padding):\n    x = tf_np.asarray(x)\n    return tf_np.asarray(nn_ops.pool(input=x, window_shape=pool_size, pooling_type='MAX', strides=strides, padding=padding))", "docstring": "Performs an N-D max pooling.\n\nArgs:\nx: ndarray of rank N+2, of shape `[batch_size] + input_spatial_shape +\n[num_channels]`. Pooling happens over the spatial dimensions only.\npool_size: sequence of N ints.\nstrides: sequence of N ints.\npadding: a string, the padding algorithm. Must be \"SAME\" or \"VALID\".\n\nReturns:\nAn (N+2)-D array,  of shape\n[batch_size] + output_spatial_shape + [num_channels],\nwhere `output_spatial_shape` depends on the value of padding:\nIf padding = \"SAME\":\noutput_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])\nIf padding = \"VALID\":\noutput_spatial_shape[i] =\nceil((input_spatial_shape[i] - (pool_size[i] - 1)) / strides[i]).", "source": "github-repos"}
{"code": "def pprint(self, initials_only=False):\n    last_name = self.last\n    suffixes = ((', ' + self.suffix) if self.suffix else '')\n    if (initials_only and (last_name != u'')):\n        first_names = self.first_initials\n    else:\n        first_names = self.first\n    return u'{} {}{}'.format(first_names, last_name, suffixes).strip()", "docstring": "Pretty print the name.\n\nArgs:\ninitials_only (bool): ``True`` if we want the first names to be displayed with\nonly the initial followed by a dot. ``False`` otherwise.\n\nExamples:\n>>> ParsedName('Lieber, Stanley Martin').pprint()\nu'Stanley Martin Lieber'\n>>> ParsedName('Lieber, Stanley Martin').pprint(initials_only=True)\nu'S. M. Lieber'\n>>> ParsedName('Downey, Robert Jr.').pprint(initials_only=True)\nu'R. Downey Jr.'", "source": "codesearchnet"}
{"code": "def valueWritePreprocessor(valueString, replaceParamsFile=None):\n    if (type(valueString) is bool):\n        log.warning('Only numerical variable types can be handled by the valueReadPreprocessor function.')\n        return valueString\n    variableString = valueString\n    if (replaceParamsFile is not None):\n        if (variableString == REPLACE_NO_VALUE):\n            variableString = '[NO_VARIABLE]'\n        else:\n            try:\n                number = int(valueString)\n                if (number < 0):\n                    parameterID = (number * (- 1))\n                    for targetParam in replaceParamsFile.targetParameters:\n                        if (targetParam.id == parameterID):\n                            variableString = targetParam.targetVariable\n                            break\n            except:\n                pass\n    return variableString", "docstring": "Look up variable name in replace param file for the negative id given and return it.\n\nArgs:\nvalueString (str): String representing the value to be preprocessed.\nreplaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if\nreplacement variables are included in the project.\n\nReturns:\nstr: Processed value as a string", "source": "codesearchnet"}
{"code": "def all_max(tensors):\n    return _apply_all_reduce('max', tensors)", "docstring": "Returns a list of tensors with the all-reduce max across `tensors`.\n\nThe computation is done with an all-reduce operation, so if only some of the\nreturned tensors are evaluated then the computation will hang.\n\nArgs:\ntensors: The input tensors across which to reduce; must be assigned\nto GPU devices.\n\nReturns:\nList of tensors, each with the maximum of the input tensors, where tensor i\nhas the same device as `tensors[i]`.", "source": "github-repos"}
{"code": "def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    if token_ids_1 is not None:\n        return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]\n    return [1] + [0] * len(token_ids_0) + [1]", "docstring": "Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def add_file(profile, branch, file_path, file_contents, is_executable=False, commit_message=None):\n    branch_sha = get_branch_sha(profile, branch)\n    tree = get_files_in_branch(profile, branch_sha)\n    new_tree = add_file_to_tree(tree, file_path, file_contents, is_executable)\n    data = trees.create_tree(profile, new_tree)\n    sha = data.get('sha')\n    if (not commit_message):\n        commit_message = (('Added ' + file_path) + '.')\n    parents = [branch_sha]\n    commit_data = commits.create_commit(profile, commit_message, sha, parents)\n    commit_sha = commit_data.get('sha')\n    ref_data = refs.update_ref(profile, ('heads/' + branch), commit_sha)\n    return ref_data", "docstring": "Add a file to a branch.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nbranch\nThe name of a branch.\n\nfile_path\nThe path of the new file in the tree.\n\nfile_contents\nThe (UTF-8 encoded) contents of the new file.\n\nis_executable\nIf ``True``, the new file will get executable permissions (0755).\nOtherwise, it will get 0644 permissions.\n\ncommit_message\nA commit message to give to the commit.\n\nReturns:\nA dict with data about the branch's new ref (it includes the new SHA\nthe branch's HEAD points to, after committing the new file).", "source": "codesearchnet"}
{"code": "def _image_url(array, fmt='png', mode='data', quality=90, domain=None):\n    supported_modes = 'data'\n    if (mode not in supported_modes):\n        message = \"Unsupported mode '%s', should be one of '%s'.\"\n        raise ValueError(message, mode, supported_modes)\n    image_data = serialize_array(array, fmt=fmt, quality=quality)\n    base64_byte_string = base64.b64encode(image_data).decode('ascii')\n    return ((('data:image/' + fmt.upper()) + ';base64,') + base64_byte_string)", "docstring": "Create a data URL representing an image from a PIL.Image.\n\nArgs:\nimage: a numpy\nmode: presently only supports \"data\" for data URL\n\nReturns:\nURL representing image", "source": "codesearchnet"}
{"code": "def _embedding_lookup_for_sparse_tensor(self, inp: sparse_tensor.SparseTensor, weight: Optional[sparse_tensor.SparseTensor], table: tf_variables.Variable, feature: tpu_embedding_v2_utils.FeatureConfig) -> tensor.Tensor:\n\n    def sparse_to_dense_computation(inp, weight):\n        if weight is None:\n            weight = sparse_tensor.SparseTensor(inp.indices, array_ops.ones_like(inp.values, dtype=dtypes.float32), dense_shape=inp.dense_shape)\n        inp = sparse_ops.sparse_tensor_to_dense(inp)\n        weight = sparse_ops.sparse_tensor_to_dense(weight)\n        return (inp, weight)\n    inp, weight = tpu_replication.outside_compilation(sparse_to_dense_computation, inp=inp, weight=weight)\n    embeddings = embedding_ops.embedding_lookup_v2(table, inp)\n    weight = array_ops.expand_dims(weight, -1)\n    embeddings *= weight\n    if not feature.output_shape and feature.max_sequence_length > 0:\n        embeddings = self._pad_or_truncate_with_sequence_length(embeddings, feature.max_sequence_length)\n    else:\n        embeddings = self._apply_combiner_to_embeddings(embeddings, weight, feature.table.combiner)\n    return embeddings", "docstring": "Embedding lookup for sparse tensor based on its feature config.\n\nArgs:\ninp: a single SparseTensor input.\nweight: None or SparseTensor which has the same shape of the input.\ntable: a table variable.\nfeature: a feature config.\n\nReturns:\nEmbedding lookup result.", "source": "github-repos"}
{"code": "def reduce_sum(self, x):\n    return self.reduce(lambda y: math_ops.reduce_sum(y, axis=0), x)", "docstring": "Performs a sum reduction on `x` across pfor iterations.\n\nNote that this currently may not work inside a control flow construct.\nArgs:\nx: an unvectorized Tensor.\n\nReturns:\nA Tensor that has same rank as `x`. The value is the sum of the values\nof `x` across the pfor iterations.", "source": "github-repos"}
{"code": "def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):\n    if not isinstance(grid_pinpoints, list):\n        raise TypeError('grid_pinpoints should be a list of tuples or lists')\n    if not isinstance(image_size, (list, tuple)):\n        if not isinstance(image_size, (torch.Tensor, np.ndarray)):\n            raise TypeError(f'image_size invalid type: {type(image_size)} not valid, should be either list, tuple, np.ndarray or tensor')\n        image_size = image_size.tolist()\n    height, width = select_best_resolution(image_size, grid_pinpoints)\n    return (height", "docstring": "Calculate the shape of the image patch grid after the preprocessing for images of any resolution.\n\nArgs:\nimage_size (`tuple`):\nThe size of the input image in the format (width, height).\ngrid_pinpoints (`List`):\nA list containing possible resolutions. Each item in the list should be a tuple or list\nof the form `(height, width)`.\npatch_size (`int`):\nThe size of each image patch.\n\nReturns:\ntuple: The shape of the image patch grid in the format (width, height).", "source": "github-repos"}
{"code": "def get_sns_topic_arn(topic_name, account, region):\n    if ((topic_name.count(':') == 5) and topic_name.startswith('arn:aws:sns:')):\n        return topic_name\n    session = boto3.Session(profile_name=account, region_name=region)\n    sns_client = session.client('sns')\n    topics = sns_client.list_topics()['Topics']\n    matched_topic = None\n    for topic in topics:\n        topic_arn = topic['TopicArn']\n        if (topic_name == topic_arn.split(':')[(- 1)]):\n            matched_topic = topic_arn\n            break\n    else:\n        LOG.critical('No topic with name %s found.', topic_name)\n        raise SNSTopicNotFound('No topic with name {0} found'.format(topic_name))\n    return matched_topic", "docstring": "Get SNS topic ARN.\n\nArgs:\ntopic_name (str): Name of the topic to lookup.\naccount (str): Environment, e.g. dev\nregion (str): Region name, e.g. us-east-1\n\nReturns:\nstr: ARN for requested topic name", "source": "codesearchnet"}
{"code": "def convert_to_torch_compatible(cls, x):\n    return x", "docstring": "Convert a tensor to something that the Torch backend can consume.\n\nThis can be a Torch tensor, NumPy array or any other type of tensor that\n`keras.backend.torch.core.convert_to_tensor()` can consume.\nOnly called after slicing using `__getitem__`.\nUsed to densify sparse tensors and ragged tensors.\n\nArgs:\nx: the tensor to convert.\nReturns: the converted tensor.", "source": "github-repos"}
{"code": "def get_block_header(self, block_hash, id=None, endpoint=None):\n        \n        return self._call_endpoint(GET_BLOCK_HEADER, params=[block_hash, 1], id=id, endpoint=endpoint)", "docstring": "Get the corresponding block header information according to the specified script hash.\nArgs:\nblock_hash: (str) the block scripthash (e.g. 'a5508c9b6ed0fc09a531a62bc0b3efcb6b8a9250abaf72ab8e9591294c1f6957')\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\n\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"}
{"code": "def rationalize(flt: float, denominators: Set[int]=None) -> Fraction:\n    if (denominators is None):\n        denominators = _DENOMINATORS\n    frac = Fraction.from_float(flt).limit_denominator()\n    if (frac.denominator not in denominators):\n        raise ValueError('Cannot rationalize')\n    return frac", "docstring": "Convert a floating point number to a Fraction with a small\ndenominator.\n\nArgs:\nflt:            A floating point number\ndenominators:   Collection of standard denominators. Default is\n1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 32, 64, 128, 256, 512,\n1024, 2048, 4096, 8192\n\nRaises:\nValueError:     If cannot rationalize float", "source": "codesearchnet"}
{"code": "def deploy_ray_axis_func(axis, func, num_splits, kwargs, *partitions):\n    \n    table = concat_arrow_table_partitions(axis, partitions)\n    try:\n        result = func(table, **kwargs)\n    except Exception:\n        result = pyarrow.Table.from_pandas(func(table.to_pandas(), **kwargs))\n    return split_arrow_table_result(\n        axis, result, len(partitions), num_splits, table.schema.metadata\n    )", "docstring": "Deploy a function along a full axis in Ray.\n\nArgs:\naxis: The axis to perform the function along.\nfunc: The function to perform.\nnum_splits: The number of splits to return\n(see `split_result_of_axis_func_pandas`)\nkwargs: A dictionary of keyword arguments.\npartitions: All partitions that make up the full axis (row or column)\n\nReturns:\nA list of Pandas DataFrames.", "source": "juraj-google-style"}
{"code": "def save_headers(cls, filename: str, response: HTTPResponse):\n    new_filename = (filename + '-new')\n    with open('wb') as new_file:\n        new_file.write(response.header())\n        with wpull.util.reset_file_offset(response.body):\n            response.body.seek(0)\n            shutil.copyfileobj(response.body, new_file)\n    os.remove(filename)\n    os.rename(new_filename, filename)", "docstring": "Prepend the HTTP response header to the file.\n\nArgs:\nfilename: The path of the file\nresponse: Response", "source": "codesearchnet"}
{"code": "def new_random_wallet(cls, user_entropy=None, network=BitcoinMainNet):\n    seed = str(urandom(64))\n    seed += str(int((time.time() * (10 ** 6))))\n    if user_entropy:\n        user_entropy = str(user_entropy)\n        seed += user_entropy\n    return cls.from_master_secret(seed, network=network)", "docstring": "Generate a new wallet using a randomly generated 512 bit seed.\n\nArgs:\nuser_entropy: Optional user-supplied entropy which is combined\ncombined with the random seed, to help counteract compromised\nPRNGs.\n\nYou are encouraged to add an optional `user_entropy` string to protect\nagainst a compromised CSPRNG. This will be combined with the output\nfrom the CSPRNG. Note that if you do supply this value it only adds\nadditional entropy and will not be sufficient to recover the random\nwallet. If you're even saving `user_entropy` at all, you're doing it\nwrong.", "source": "codesearchnet"}
{"code": "def wait_for_batches(self, batch_ids, timeout=None):\n    self._batch_tracker.watch_statuses(self, batch_ids)\n    timeout = (timeout or DEFAULT_TIMEOUT)\n    start_time = time()\n    with self._wait_condition:\n        while True:\n            if (self._statuses is not None):\n                return _format_batch_statuses(self._statuses, batch_ids, self._batch_tracker)\n            if ((time() - start_time) > timeout):\n                statuses = self._batch_tracker.get_statuses(batch_ids)\n                return _format_batch_statuses(statuses, batch_ids, self._batch_tracker)\n            self._wait_condition.wait((timeout - (time() - start_time)))", "docstring": "Locks until a list of batch ids is committed to the block chain\nor a timeout is exceeded. Returns the statuses of those batches.\n\nArgs:\nbatch_ids (list of str): The ids of the batches to wait for\ntimeout(int): Maximum time in seconds to wait for\n\nReturns:\nlist of BatchStatus: BatchStatuses to send back to client", "source": "codesearchnet"}
{"code": "def get_unit(self, name):\n        \n        return Unit(client=self, data=self._single_request('Units.Get', unitName=name))", "docstring": "Retreive a specifi unit from the fleet cluster by name\n\nArgs:\nname (str): If specified, only this unit name is returned\n\nReturns:\nUnit: The unit identified by ``name`` in the fleet cluster\n\nRaises:\nfleet.v1.errors.APIError: Fleet returned a response code >= 400", "source": "juraj-google-style"}
{"code": "def resolve_prefix_path(cls, start_path=None):\n        \n        if not start_path or start_path == 'auto':\n            start_path = os.path.curdir\n\n        cur_path = start_path\n        LOGGER.debug('Checking if %s is a prefix', os.path.abspath(cur_path))\n        if cls.is_prefix(cur_path):\n            return os.path.abspath(cur_path)\n\n        \n        cur_path = join(start_path, '.lago')\n        while not cls.is_prefix(cur_path):\n            LOGGER.debug('%s  is not a prefix', cur_path)\n            cur_path = os.path.normpath(\n                os.path.join(cur_path, '..', '..', '.lago')\n            )\n            LOGGER.debug('Checking %s for a prefix', cur_path)\n            if os.path.realpath(join(cur_path, '..')) == '/':\n                raise RuntimeError(\n                    'Unable to find prefix for %s' %\n                    os.path.abspath(start_path)\n                )\n\n        return os.path.abspath(cur_path)", "docstring": "Look for an existing prefix in the given path, in a path/.lago dir, or\nin a .lago dir under any of it's parent directories\n\nArgs:\nstart_path (str): path to start the search from, if None passed, it\nwill use the current dir\n\nReturns:\nstr: path to the found prefix\n\nRaises:\nRuntimeError: if no prefix was found", "source": "juraj-google-style"}
{"code": "def initial_sql(self, value):\n    self._initial_sql = value\n    if (value is None):\n        try:\n            del self._connectionXML.attrib['one-time-sql']\n        except KeyError:\n            pass\n    else:\n        self._connectionXML.set('one-time-sql', value)", "docstring": "Set the connection's initial_sql property.\n\nArgs:\nvalue:  New initial_sql value. String.\n\nReturns:\nNothing.", "source": "codesearchnet"}
{"code": "def ExistsWithType(self, urn, aff4_type=None, follow_symlinks=True, age=NEWEST_TIME, token=None):\n    if (not aff4_type):\n        raise ValueError(\"aff4_type can't be None\")\n    try:\n        self.Open(urn, aff4_type=aff4_type, follow_symlinks=follow_symlinks, age=age, token=token)\n        return True\n    except InstantiationError:\n        return False", "docstring": "Checks if an object with a given URN and type exists in the datastore.\n\nArgs:\nurn: The urn to check.\naff4_type: Expected object type.\nfollow_symlinks: If object opened is a symlink, follow it.\nage: The age policy used to check this object. Should be either\nNEWEST_TIME or a time range given as a tuple (start, end) in\nmicroseconds since Jan 1st, 1970.\ntoken: The Security Token to use for opening this item.\n\nRaises:\nValueError: if aff4_type is not specified.\n\nReturns:\nTrue if there's an object with a matching type at a given URN, False\notherwise.", "source": "codesearchnet"}
{"code": "def GenerateCostReport(metagraph, per_node_report=False, verbose=False, cluster=None):\n    if cluster is None:\n        cluster = gcluster.Cluster(disable_detailed_stats=False)\n    return tf_wrap.GenerateCostReport(metagraph.SerializeToString(), per_node_report, verbose, cluster.tf_cluster)", "docstring": "Analyze the cost of each TensorFlow op and node in the provided metagraph.\n\nArgs:\nmetagraph: A TensorFlow MetaGraphDef.\nper_node_report: by default the report contains stats aggregated on a per op\ntype basis, setting per_node_report to True adds results for each\nindividual node to the report.\nverbose: Prints out the entire operation proto instead of a summary table.\ncluster: Analyze the costs using the specified cluster, or the local machine\nif no cluster was specified.\n\nReturns:\nA string of cost report.", "source": "github-repos"}
{"code": "def attach_template(self, _template, _key, **unbound_var_values):\n    if (_key in unbound_var_values):\n        raise ValueError(('%s specified twice.' % _key))\n    unbound_var_values[_key] = self\n    return _DeferredLayer(self.bookkeeper, _template.as_layer().construct, [], unbound_var_values, scope=self._scope, defaults=self._defaults, partial_context=self._partial_context)", "docstring": "Attaches the template to this with the _key is supplied with this layer.\n\nNote: names were chosen to avoid conflicts.\n\nArgs:\n_template: The template to construct.\n_key: The key that this layer should replace.\n**unbound_var_values: The values for the unbound_vars.\nReturns:\nA new layer with operation applied.\nRaises:\nValueError: If _key is specified twice or there is a problem computing the\ntemplate.", "source": "codesearchnet"}
{"code": "def fetch(self, plan_id, data={}, **kwargs):\n    return super(Plan, self).fetch(plan_id, data, **kwargs)", "docstring": "Fetch Plan for given Id\n\nArgs:\nplan_id : Id for which Plan object has to be retrieved\n\nReturns:\nPlan dict for given subscription Id", "source": "codesearchnet"}
{"code": "def get_expectations_config(self, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_configs_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False):\n    config = dict(self._expectations_config)\n    config = copy.deepcopy(config)\n    expectations = config['expectations']\n    discards = defaultdict(int)\n    if discard_failed_expectations:\n        new_expectations = []\n        for expectation in expectations:\n            if (('success_on_last_run' in expectation) and (expectation['success_on_last_run'] == False)):\n                discards['failed_expectations'] += 1\n            else:\n                new_expectations.append(expectation)\n        expectations = new_expectations\n    for expectation in expectations:\n        if ('success_on_last_run' in expectation):\n            del expectation['success_on_last_run']\n        if discard_result_format_kwargs:\n            if ('result_format' in expectation['kwargs']):\n                del expectation['kwargs']['result_format']\n                discards['result_format'] += 1\n        if discard_include_configs_kwargs:\n            if ('include_configs' in expectation['kwargs']):\n                del expectation['kwargs']['include_configs']\n                discards['include_configs'] += 1\n        if discard_catch_exceptions_kwargs:\n            if ('catch_exceptions' in expectation['kwargs']):\n                del expectation['kwargs']['catch_exceptions']\n                discards['catch_exceptions'] += 1\n    if (not suppress_warnings):\n        '\\nWARNING: get_expectations_config discarded\\n    12 failing expectations\\n    44 result_format kwargs\\n     0 include_config kwargs\\n     1 catch_exceptions kwargs\\nIf you wish to change this behavior, please set discard_failed_expectations, discard_result_format_kwargs, discard_include_configs_kwargs, and discard_catch_exceptions_kwargs appropirately.\\n            '\n        if any([discard_failed_expectations, discard_result_format_kwargs, discard_include_configs_kwargs, discard_catch_exceptions_kwargs]):\n            print('WARNING: get_expectations_config discarded')\n            if discard_failed_expectations:\n                print(('\\t%d failing expectations' % discards['failed_expectations']))\n            if discard_result_format_kwargs:\n                print(('\\t%d result_format kwargs' % discards['result_format']))\n            if discard_include_configs_kwargs:\n                print(('\\t%d include_configs kwargs' % discards['include_configs']))\n            if discard_catch_exceptions_kwargs:\n                print(('\\t%d catch_exceptions kwargs' % discards['catch_exceptions']))\n            print('If you wish to change this behavior, please set discard_failed_expectations, discard_result_format_kwargs, discard_include_configs_kwargs, and discard_catch_exceptions_kwargs appropirately.')\n    config['expectations'] = expectations\n    return config", "docstring": "Returns _expectation_config as a JSON object, and perform some cleaning along the way.\n\nArgs:\ndiscard_failed_expectations (boolean): \\\nOnly include expectations with success_on_last_run=True in the exported config.  Defaults to `True`.\ndiscard_result_format_kwargs (boolean): \\\nIn returned expectation objects, suppress the `result_format` parameter. Defaults to `True`.\ndiscard_include_configs_kwargs (boolean): \\\nIn returned expectation objects, suppress the `include_configs` parameter. Defaults to `True`.\ndiscard_catch_exceptions_kwargs (boolean): \\\nIn returned expectation objects, suppress the `catch_exceptions` parameter.  Defaults to `True`.\n\nReturns:\nAn expectation config.\n\nNote:\nget_expectations_config does not affect the underlying config at all. The returned config is a copy of _expectations_config, not the original object.", "source": "codesearchnet"}
{"code": "def recent_all_projects(self, limit=30, offset=0):\n    method = 'GET'\n    url = '/recent-builds?circle-token={token}&limit={limit}&offset={offset}'.format(token=self.client.api_token, limit=limit, offset=offset)\n    json_data = self.client.request(method, url)\n    return json_data", "docstring": "Return information about recent builds across all projects.\n\nArgs:\nlimit (int), Number of builds to return, max=100, defaults=30.\noffset (int): Builds returned from this point, default=0.\n\nReturns:\nA list of dictionaries.", "source": "codesearchnet"}
{"code": "def watch_printer(watch, value):\n    print('({: 8} s) {}: {}'.format(value.raw_time, watch, value.value))", "docstring": "Print a watched value.\n\nArgs:\nwatch (DataStream): The stream that was watched\nvalue (IOTileReading): The value to was seen", "source": "codesearchnet"}
{"code": "def __init__(self, policies=None, database_path=None):\n        \n        self._logger = logging.getLogger('kmip.server.engine')\n\n        self._cryptography_engine = engine.CryptographyEngine()\n\n        self.database_path = 'sqlite:\n        if not database_path:\n            self.database_path = 'sqlite:\n\n        self._data_store = sqlalchemy.create_engine(\n            self.database_path,\n            echo=False,\n            connect_args={'check_same_thread': False}\n        )\n        sqltypes.Base.metadata.create_all(self._data_store)\n        self._data_store_session_factory = sqlalchemy.orm.sessionmaker(\n            bind=self._data_store\n        )\n\n        self._lock = threading.RLock()\n\n        self._id_placeholder = None\n\n        self._protocol_versions = [\n            contents.ProtocolVersion(1, 4),\n            contents.ProtocolVersion(1, 3),\n            contents.ProtocolVersion(1, 2),\n            contents.ProtocolVersion(1, 1),\n            contents.ProtocolVersion(1, 0)\n        ]\n\n        self.default_protocol_version = self._protocol_versions[2]\n        self._protocol_version = self._protocol_versions[2]\n\n        self._object_map = {\n            enums.ObjectType.CERTIFICATE: objects.X509Certificate,\n            enums.ObjectType.SYMMETRIC_KEY: objects.SymmetricKey,\n            enums.ObjectType.PUBLIC_KEY: objects.PublicKey,\n            enums.ObjectType.PRIVATE_KEY: objects.PrivateKey,\n            enums.ObjectType.SPLIT_KEY: None,\n            enums.ObjectType.TEMPLATE: None,\n            enums.ObjectType.SECRET_DATA: objects.SecretData,\n            enums.ObjectType.OPAQUE_DATA: objects.OpaqueObject\n        }\n\n        self._attribute_policy = policy.AttributePolicy(self._protocol_version)\n        self._operation_policies = policies\n        self._client_identity = [None, None]", "docstring": "Create a KmipEngine.\n\nArgs:\npolicy_path (string): The path to the filesystem directory\ncontaining PyKMIP server operation policy JSON files.\nOptional, defaults to None.\ndatabase_path (string): The path to the SQLite database file\nused to store all server data. Optional, defaults to None.\nIf none, database path defaults to '/tmp/pykmip.database'.", "source": "juraj-google-style"}
{"code": "def set_property(self, key, value):\n        \n        value_type = type(value)\n        if value_type not in [str, int, bool]:\n            raise NotImplementedError(\n                'Only string, integer, and boolean properties are implemented')\n\n        key_object = self.properties.findChild(name='key', text=key)\n\n        \n        if key_object is None:\n            key_object = self.soup.new_tag('key')\n            key_object.string = key\n\n            self.properties.append(key_object)\n\n            value_object = self.soup.new_tag(\n                {str: 'string', int: 'integer', bool: str(value).lower()}[\n                    value_type])\n            if value_type is not bool:\n                value_object.string = str(value)\n\n            self.properties.append(value_object)\n\n            return\n\n        \n        \n        \n        value_object = key_object.find_next_sibling()\n\n        key_object.decompose()\n        value_object.decompose()\n\n        self.set_property(key, value)", "docstring": "Set a new (or updating existing) key value pair.\n\nArgs:\nkey: A string containing the key namespace\nvalue: A str, int, or bool value\n\nRaises:\nNotImplementedError: an unsupported value-type was provided", "source": "juraj-google-style"}
{"code": "def local_file(self, filename):\n        \n        LOG.info('Retrieving \"%s\" from \"%s\".', filename, self.runway_dir)\n\n        file_contents = ''\n\n        file_path = os.path.join(self.runway_dir, filename)\n\n        try:\n            with open(file_path, 'rt') as lookup_file:\n                file_contents = lookup_file.read()\n        except FileNotFoundError:\n            LOG.warning('File missing \"%s\".', file_path)\n            raise\n\n        LOG.debug('Local file contents:\\n%s', file_contents)\n        return file_contents", "docstring": "Read the local file in _self.runway_dir_.\n\nArgs:\nfilename (str): Name of file to retrieve relative to root of\n_runway_dir_.\n\nReturns:\nstr: Contents of local file.\n\nRaises:\nFileNotFoundError: Requested file missing.", "source": "juraj-google-style"}
{"code": "def _test_or_class_decorator(test_or_class, single_method_decorator):\n\n    def _decorate_test_or_class(obj):\n        if isinstance(obj, collections.abc.Iterable):\n            return itertools.chain.from_iterable((single_method_decorator(method) for method in obj))\n        if isinstance(obj, type):\n            cls = obj\n            for name, value in cls.__dict__.copy().items():\n                if callable(value) and name.startswith(unittest.TestLoader.testMethodPrefix):\n                    setattr(cls, name, single_method_decorator(value))\n            cls = type(cls).__new__(type(cls), cls.__name__, cls.__bases__, cls.__dict__.copy())\n            return cls\n        return single_method_decorator(obj)\n    if test_or_class is not None:\n        return _decorate_test_or_class(test_or_class)\n    return _decorate_test_or_class", "docstring": "Decorate a test or class with a decorator intended for one method.\n\nIf the test_or_class is a class:\nThis will apply the decorator to all test methods in the class.\n\nIf the test_or_class is an iterable of already-parameterized test cases:\nThis will apply the decorator to all the cases, and then flatten the\nresulting cross-product of test cases. This allows stacking the Keras\nparameterized decorators w/ each other, and to apply them to test methods\nthat have already been marked with an absl parameterized decorator.\n\nOtherwise, treat the obj as a single method and apply the decorator directly.\n\nArgs:\ntest_or_class: A test method (that may have already been decorated with a\nparameterized decorator, or a test class that extends\nkeras_parameterized.TestCase\nsingle_method_decorator:\nA parameterized decorator intended for a single test method.\nReturns:\nThe decorated result.", "source": "github-repos"}
{"code": "def get_sym_eq_kpoints(self, kpoint, cartesian=False, tol=0.01):\n    if (not self.structure):\n        return None\n    sg = SpacegroupAnalyzer(self.structure)\n    symmops = sg.get_point_group_operations(cartesian=cartesian)\n    points = np.dot(kpoint, [m.rotation_matrix for m in symmops])\n    rm_list = []\n    for i in range((len(points) - 1)):\n        for j in range((i + 1), len(points)):\n            if np.allclose(pbc_diff(points[i], points[j]), [0, 0, 0], tol):\n                rm_list.append(i)\n                break\n    return np.delete(points, rm_list, axis=0)", "docstring": "Returns a list of unique symmetrically equivalent k-points.\n\nArgs:\nkpoint (1x3 array): coordinate of the k-point\ncartesian (bool): kpoint is in cartesian or fractional coordinates\ntol (float): tolerance below which coordinates are considered equal\n\nReturns:\n([1x3 array] or None): if structure is not available returns None", "source": "codesearchnet"}
{"code": "def __init__(self, vs):\n    \n    shape = Shape([Dimension(\"stacked\", len(vs))] + vs[0].shape.dims)\n    name = \"stacked/\" + vs[0].name\n    \n    super(StackedVariable, self).__init__(\n        vs[0].mesh, name, shape, vs[0].dtype, None, vs[0].trainable)\n    self._name = name\n    self._masters = [v.get_master() for v in vs]\n    self._original_names = [v.name for v in vs]\n\n    \n    self._splittable_dims, self._unsplittable_dims = (\n        self._initialize_all_dimensions_as_splittable())", "docstring": "Create a StackedVariable.\n\nArgs:\nvs: a list of Variables", "source": "juraj-google-style"}
{"code": "def get_bonded_structure(self, structure, decorate=False):\n        \n\n        \n        from pymatgen.analysis.graphs import StructureGraph\n\n        if decorate:\n            \n            \n            \n            \n            order_parameters = [self.get_local_order_parameters(structure, n)\n                                for n in range(len(structure))]\n            structure.add_site_property('order_parameters', order_parameters)\n\n        sg = StructureGraph.with_local_env_strategy(structure, self)\n\n        return sg", "docstring": "Obtain a StructureGraph object using this NearNeighbor\nclass. Requires the optional dependency networkx\n(pip install networkx).\n\nArgs:\nstructure: Structure object.\ndecorate (bool): whether to annotate site properties\nwith order parameters using neighbors determined by\nthis NearNeighbor class\n\nReturns: a pymatgen.analysis.graphs.BondedStructure object", "source": "juraj-google-style"}
{"code": "def parse_variable(self, variable):\n        \n        data = None\n        if variable is not None:\n            variable = variable.strip()\n            if re.match(self._variable_match, variable):\n                var = re.search(self._variable_parse, variable)\n                data = {\n                    'root': var.group(0),\n                    'job_id': var.group(2),\n                    'name': var.group(3),\n                    'type': var.group(4),\n                }\n        return data", "docstring": "Method to parse an input or output variable.\n\n**Example Variable**::\n\n#App:1234:output!String\n\nArgs:\nvariable (string): The variable name to parse.\n\nReturns:\n(dictionary): Result of parsed string.", "source": "juraj-google-style"}
{"code": "def parse_date_range(date, alt_end_date=None):\n    \n    NOT_ENDED = \"9999\"\n    all_years = re.findall(r\"\\d{4}\", date)\n\n    if alt_end_date:\n        NOT_ENDED = alt_end_date\n\n    if not all_years:\n        return \"****\", NOT_ENDED\n\n    elif len(all_years) == 1:\n        return all_years[0], NOT_ENDED\n\n    return all_years[0], all_years[1]", "docstring": "Parse input `date` string in free-text format for four-digit long groups.\n\nArgs:\ndate (str): Input containing years.\n\nReturns:\ntuple: ``(from, to)`` as four-digit strings.", "source": "juraj-google-style"}
{"code": "def update_port(self, port_information, id_or_uri, timeout=-1):\n        \n        uri = self._client.build_uri(id_or_uri) + \"/ports\"\n        return self._client.update(port_information, uri, timeout)", "docstring": "Updates an interconnect port.\n\nArgs:\nid_or_uri: Can be either the interconnect id or the interconnect uri.\nport_information (dict): object to update\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: The interconnect.", "source": "juraj-google-style"}
{"code": "def FromPath(cls, path, follow_symlink = True):\n    \n    \n    \n    \n    \n    \n    \n    precondition.AssertType(follow_symlink, bool)\n\n    if follow_symlink:\n      stat_obj = os.stat(path)\n    else:\n      stat_obj = os.lstat(path)\n\n    return cls(path=path, stat_obj=stat_obj)", "docstring": "Returns stat information about the given OS path, calling os.[l]stat.\n\nArgs:\npath: A path to perform `stat` on.\nfollow_symlink: True if `stat` of a symlink should be returned instead of\na file that it points to. For non-symlinks this setting has no effect.\n\nReturns:\nStat instance, with information about the given path.", "source": "juraj-google-style"}
{"code": "def get_fields(model_class):\n    return [attr for (attr, value) in model_class.__dict__.items() if issubclass(type(value), (mongo.base.BaseField, mongo.EmbeddedDocumentField))]", "docstring": "Pass in a mongo model class and extract all the attributes which\nare mongoengine fields\n\nReturns:\nlist of strings of field attributes", "source": "codesearchnet"}
{"code": "def within(self, other: \"Interval\", inclusive: bool = True) -> bool:\n        \n        if not other:\n            return False\n        if inclusive:\n            return self.start >= other.start and self.end <= other.end\n        else:\n            return self.start > other.start and self.end < other.end", "docstring": "Is this interval contained within the other?\n\nArgs:\nother: the :class:`Interval` to check\ninclusive: use inclusive rather than exclusive range checks?", "source": "juraj-google-style"}
{"code": "class DFineIntegral(nn.Module):\n\n    def __init__(self, config: DFineConfig):\n        super().__init__()\n        self.max_num_bins = config.max_num_bins\n\n    def forward(self, pred_corners: torch.Tensor, project: torch.Tensor) -> torch.Tensor:\n        batch_size, num_queries, _ = pred_corners.shape\n        pred_corners = F.softmax(pred_corners.reshape(-1, self.max_num_bins + 1), dim=1)\n        pred_corners = F.linear(pred_corners, project.to(pred_corners.device)).reshape(-1, 4)\n        pred_corners = pred_corners.reshape(batch_size, num_queries, -1)\n        return pred_corners", "docstring": "A static layer that calculates integral results from a distribution.\n\nThis layer computes the target location using the formula: `sum{Pr(n) * W(n)}`,\nwhere Pr(n) is the softmax probability vector representing the discrete\ndistribution, and W(n) is the non-uniform Weighting Function.\n\nArgs:\nmax_num_bins (int): Max number of the discrete bins. Default is 32.\nIt can be adjusted based on the dataset or task requirements.", "source": "github-repos"}
{"code": "def func_load(code, defaults=None, closure=None, globs=None):\n    if isinstance(code, (tuple, list)):\n        code, defaults, closure = code\n        if isinstance(defaults, list):\n            defaults = tuple(defaults)\n\n    def ensure_value_to_cell(value):\n        \n\n        def dummy_fn():\n            value\n        cell_value = dummy_fn.__closure__[0]\n        if not isinstance(value, type(cell_value)):\n            return cell_value\n        return value\n    if closure is not None:\n        closure = tuple((ensure_value_to_cell(_) for _ in closure))\n    try:\n        raw_code = codecs.decode(code.encode('ascii'), 'base64')\n    except (UnicodeEncodeError, binascii.Error):\n        raw_code = code.encode('raw_unicode_escape')\n    code = marshal.loads(raw_code)\n    if globs is None:\n        globs = globals()\n    return python_types.FunctionType(code, globs, name=code.co_name, argdefs=defaults, closure=closure)", "docstring": "Deserializes a user defined function.\n\nArgs:\ncode: bytecode of the function.\ndefaults: defaults of the function.\nclosure: closure of the function.\nglobs: dictionary of global objects.\n\nReturns:\nA function object.", "source": "github-repos"}
{"code": "def test_batch_sample_paths_2d(self, batch_rank):\n    dtype = tf.float64\n    mu = np.array([0.2, 0.7])\n    a = np.array([[0.4, 0.1], [0.3, 0.2]])\n    b = np.array([[0.33, -0.03], [0.21, 0.5]])\n\n    def drift_fn(t, x):\n        return mu * tf.sqrt(t) * tf.ones_like(x, dtype=t.dtype)\n\n    def vol_fn(t, x):\n        return (a * t + b) * tf.ones(x.shape.as_list() + [2], dtype=t.dtype)\n    process = tff.models.GenericItoProcess(dim=2, drift_fn=drift_fn, volatility_fn=vol_fn, dtype=dtype)\n    times = np.array([0.1, 0.21, 0.32, 0.43, 0.55])\n    x0 = np.array([0.1, -1.1]) * np.ones([2] * batch_rank + [1, 2])\n    times_grid = None\n    time_step = 0.01\n    num_samples = 10000\n    normal_draws = None\n    paths = self.evaluate(process.sample_paths(times, num_samples=num_samples, initial_state=x0, time_step=time_step, times_grid=times_grid, normal_draws=normal_draws, seed=12134))\n    num_samples = 10000\n    self.assertAllClose(list(paths.shape), [2] * batch_rank + [num_samples, 5, 2], atol=0)\n    means = np.mean(paths, axis=batch_rank)\n    times = np.reshape(times, [1] * batch_rank + [-1, 1])\n    expected_means = np.reshape(x0, [2] * batch_rank + [1, 2]) + 2.0 / 3.0 * mu * np.power(times, 1.5)\n    self.assertAllClose(means, expected_means, rtol=0.01, atol=0.01)", "docstring": "Tests path properties for a batch of 2-dimentional Ito process.\n\nWe construct the following Ito processes.\n\ndX_1 = mu_1 sqrt(t) dt + s11 dW_1 + s12 dW_2\ndX_2 = mu_2 sqrt(t) dt + s21 dW_1 + s22 dW_2\n\nmu_1, mu_2 are constants.\ns_ij = a_ij t + b_ij\n\nFor this process expected value at time t is (x_0)_i + 2/3 * mu_i * t^1.5.\n\nArgs:\nbatch_rank: The rank of the batch of processes being simulated.", "source": "github-repos"}
{"code": "def __checkDecisionParameters(self, result, **values):\n    error = []\n    if (not result):\n        error.append('Function parameter (result array) should contain one or more header string!')\n    if (not values):\n        error.append('Function parameter (values variables) should contain one or more variable')\n    for header in result:\n        if (not (header in self.header)):\n            error.append((('String (' + header) + ') in result is not in header!'))\n    for header in values:\n        if (not (header in self.header)):\n            error.append((('Variable (' + header) + ') in values is not in header!'))\n        elif (not values[header].split()):\n            error.append((('Variable (' + header) + ') in values is empty string'))\n    if error:\n        return error", "docstring": "Checker of decision parameters, it will raise ValueError if finds something wrong.\n\nArgs:\nresult (array of str): See public decision methods\n**values (array of str): See public decision methods\n\nRaise:\nValueError: Result array none.\nValueError: Values dict none.\nValueError: Not find result key in header.\nValueError: Result value is empty.\n\nReturns:\nError array values", "source": "codesearchnet"}
{"code": "def split_identifiers(identifiers=[], proportions={}):\n    abs_proportions = absolute_proportions(proportions, len(identifiers))\n    parts = {}\n    start_index = 0\n    for (idx, proportion) in abs_proportions.items():\n        parts[idx] = identifiers[start_index:(start_index + proportion)]\n        start_index += proportion\n    return parts", "docstring": "Split the given identifiers by the given proportions.\n\nArgs:\nidentifiers (list): List of identifiers (str).\nproportions (dict): A dictionary containing the proportions with the identifier from the\ninput as key.\n\nReturns:\ndict: Dictionary containing a list of identifiers per part with the same key as the\nproportions dict.\n\nExample::\n\n>>> split_identifiers(\n>>>     identifiers=['a', 'b', 'c', 'd'],\n>>>     proportions={'melvin' : 0.5, 'timmy' : 0.5}\n>>> )\n{'melvin' : ['a', 'c'], 'timmy' : ['b', 'd']}", "source": "codesearchnet"}
{"code": "def set_rollover(self, area, enabled):\n    if (area == u'streaming'):\n        self._rollover_streaming = enabled\n    elif (area == u'storage'):\n        self._rollover_storage = enabled\n    else:\n        raise ArgumentError(\"You must pass one of 'storage' or 'streaming' to set_rollover\", area=area)", "docstring": "Configure whether rollover is enabled for streaming or storage streams.\n\nNormally a SensorLog is used in ring-buffer mode which means that old\nreadings are automatically overwritten as needed when new data is saved.\n\nHowever, you can configure it into fill-stop mode by using:\nset_rollover(\"streaming\"|\"storage\", True|False)\n\nBy default rollover is set to True for both streaming and storage and can\nbe controlled individually for each one.\n\nArgs:\narea (str): Either streaming or storage.\nenabled (bool): Whether to enable or disable rollover.", "source": "codesearchnet"}
{"code": "def validate_variable_name(self, name):\n    if (not name):\n        raise SerializerError('Variable name is empty'.format(name))\n    if (name[0] not in PROPERTY_ALLOWED_START):\n        msg = \"Variable name '{}' must starts with a letter\"\n        raise SerializerError(msg.format(name))\n    for item in name:\n        if (item not in PROPERTY_ALLOWED_CHARS):\n            msg = \"Invalid variable name '{}': it must only contains letters, numbers and '_' character\"\n            raise SerializerError(msg.format(name))\n    return True", "docstring": "Validate variable name.\n\nArguments:\nname (string): Property name.\n\nReturns:\nbool: ``True`` if variable name is valid.", "source": "codesearchnet"}
{"code": "def set_weights(self, new_weights):\n    self._check_sess()\n    assign_list = [self.assignment_nodes[name] for name in new_weights.keys() if (name in self.assignment_nodes)]\n    assert assign_list, 'No variables in the input matched those in the network. Possible cause: Two networks were defined in the same TensorFlow graph. To fix this, place each network definition in its own tf.Graph.'\n    self.sess.run(assign_list, feed_dict={self.placeholders[name]: value for (name, value) in new_weights.items() if (name in self.placeholders)})", "docstring": "Sets the weights to new_weights.\n\nNote:\nCan set subsets of variables as well, by only passing in the\nvariables you want to be set.\n\nArgs:\nnew_weights (Dict): Dictionary mapping variable names to their\nweights.", "source": "codesearchnet"}
{"code": "def play_mp3(self, mp3=None, data=None, block=True):\n        \n        if platform.machine() == 'mips':\n            command = 'madplay -o wave:- - | aplay -M'\n        else:\n            command = 'ffplay -autoexit -nodisp -'\n\n        if mp3:\n            def gen(m):\n                with open(m, 'rb') as f:\n                    d = f.read(1024)\n                    while d:\n                        yield d\n                        d = f.read(1024)\n\n            data = gen(mp3)\n\n        if isinstance(data, types.GeneratorType):\n            p = subprocess.Popen(command, stdin=subprocess.PIPE, shell=True)\n            for d in data:\n                p.stdin.write(d)\n\n            p.stdin.close()\n        else:\n            with tempfile.NamedTemporaryFile(mode='w+b') as f:\n                f.write(data)\n                f.flush()\n                f.seek(0)\n                p = subprocess.Popen(command, stdin=f, shell=True)\n\n        if block:\n            p.wait()", "docstring": "It supports GeneratorType mp3 stream or mp3 data string\nArgs:\nmp3: mp3 file\ndata: mp3 generator or data\nblock: if true, block until audio is played.", "source": "juraj-google-style"}
{"code": "def _try_recover(self, trial, error_msg):\n        \n        try:\n            self.trial_executor.stop_trial(\n                trial,\n                error=error_msg is not None,\n                error_msg=error_msg,\n                stop_logger=False)\n            trial.result_logger.flush()\n            if self.trial_executor.has_resources(trial.resources):\n                logger.info(\"Attempting to recover\"\n                            \" trial state from last checkpoint.\")\n                self.trial_executor.start_trial(trial)\n                if trial.status == Trial.ERROR:\n                    raise RuntimeError(\"Trial did not start correctly.\")\n            else:\n                logger.debug(\"Notifying Scheduler and requeueing trial.\")\n                self._requeue_trial(trial)\n        except Exception:\n            logger.exception(\"Error recovering trial from checkpoint, abort.\")\n            self._scheduler_alg.on_trial_error(self, trial)\n            self._search_alg.on_trial_complete(trial.trial_id, error=True)", "docstring": "Tries to recover trial.\n\nNotifies SearchAlgorithm and Scheduler if failure to recover.\n\nArgs:\ntrial (Trial): Trial to recover.\nerror_msg (str): Error message from prior to invoking this method.", "source": "juraj-google-style"}
{"code": "def set_available(self, show=None):\n        \n        show = self.state.show if show is None else show\n        self.set_presence(PresenceState(available=True, show=show))", "docstring": "Sets the agent availability to True.\n\nArgs:\nshow (aioxmpp.PresenceShow, optional): the show state of the presence (Default value = None)", "source": "juraj-google-style"}
{"code": "def update_serial(self, new_serial):\n    new_serial = str(new_serial)\n    if self.has_active_service:\n        raise DeviceError(self, 'Cannot change device serial number when there is service running.')\n    if (self._debug_tag == self.serial):\n        self._debug_tag = new_serial\n    self._serial = new_serial\n    self.adb.serial = new_serial\n    self.fastboot.serial = new_serial", "docstring": "Updates the serial number of a device.\n\nThe \"serial number\" used with adb's `-s` arg is not necessarily the\nactual serial number. For remote devices, it could be a combination of\nhost names and port numbers.\n\nThis is used for when such identifier of remote devices changes during\na test. For example, when a remote device reboots, it may come back\nwith a different serial number.\n\nThis is NOT meant for switching the object to represent another device.\n\nWe intentionally did not make it a regular setter of the serial\nproperty so people don't accidentally call this without understanding\nthe consequences.\n\nArgs:\nnew_serial: string, the new serial number for the same device.\n\nRaises:\nDeviceError: tries to update serial when any service is running.", "source": "codesearchnet"}
{"code": "def __init__(self, protocol):\n        \n        self._protocol = protocol\n        self._current_consumer = self._HEADER\n        self._message = None\n        self._buf_header = None", "docstring": "Configure a Receiver with a specific Bokeh protocol version.\n\nArgs:\nprotocol (Protocol) :\nA Bokeh protocol object to use to assemble collected message\nfragments.", "source": "juraj-google-style"}
{"code": "def Get(self, project_id):\n    if (project_id in self._emulators):\n        return self._emulators[project_id]\n    emulator = self.Create(project_id)\n    self._emulators[project_id] = emulator\n    return emulator", "docstring": "Returns an existing emulator instance for the provided project_id.\n\nIf an emulator instance doesn't yet exist, it creates one.\n\nArgs:\nproject_id: project ID\n\nReturns:\na DatastoreEmulator", "source": "codesearchnet"}
{"code": "def GetMessages(self, formatter_mediator, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter(\n          'Invalid event object - unsupported data type: {0:s}'.format(\n              event.data_type))\n\n    event_values = event.CopyToDict()\n\n    number_of_volumes = event_values.get('number_of_volumes', 0)\n    volume_serial_numbers = event_values.get('volume_serial_numbers', None)\n    volume_device_paths = event_values.get('volume_device_paths', None)\n    volumes_strings = []\n    for volume_index in range(0, number_of_volumes):\n      if not volume_serial_numbers:\n        volume_serial_number = 'UNKNOWN'\n      else:\n        volume_serial_number = volume_serial_numbers[volume_index]\n\n      if not volume_device_paths:\n        volume_device_path = 'UNKNOWN'\n      else:\n        volume_device_path = volume_device_paths[volume_index]\n\n      volumes_strings.append((\n          'volume: {0:d} [serial number: 0x{1:08X}, device path: '\n          '{2:s}]').format(\n              volume_index + 1, volume_serial_number, volume_device_path))\n\n    if volumes_strings:\n      event_values['volumes_string'] = ', '.join(volumes_strings)\n\n    return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def encode_dict(values_dict):\n    \n    return {key: encode_value(value) for key, value in six.iteritems(values_dict)}", "docstring": "Encode a dictionary into protobuf ``Value``-s.\n\nArgs:\nvalues_dict (dict): The dictionary to encode as protobuf fields.\n\nReturns:\nDict[str, ~google.cloud.firestore_v1beta1.types.Value]: A\ndictionary of string keys and ``Value`` protobufs as dictionary\nvalues.", "source": "juraj-google-style"}
{"code": "def _ensure_tuple(item):\n    if isinstance(item, tuple):\n        return item\n    elif isinstance(item, list):\n        return tuple(item)\n    elif isinstance(item, np.ndarray):\n        return tuple(item.tolist())\n    else:\n        raise NotImplementedError", "docstring": "Simply ensure that the passed item is a tuple.  If it is not, then\nconvert it if possible, or raise a NotImplementedError\n\nArgs:\nitem: the item that needs to become a tuple\n\nReturns:\nthe item casted as a tuple\n\nRaises:\nNotImplementedError: if converting the given item to a tuple\nis not implemented.", "source": "codesearchnet"}
{"code": "def base64url_decode(input):\n    rem = (len(input) % 4)\n    if (rem > 0):\n        input += (b'=' * (4 - rem))\n    return base64.urlsafe_b64decode(input)", "docstring": "Helper method to base64url_decode a string.\n\nArgs:\ninput (str): A base64url_encoded string to decode.", "source": "codesearchnet"}
{"code": "def data_filter(self, data):\n    try:\n        from .tcex_data_filter import DataFilter\n        return DataFilter(self, data)\n    except ImportError as e:\n        warn = u'Required Module is not installed ({}).'.format(e)\n        self.log.warning(warn)", "docstring": "Return an instance of the Data Filter Class.\n\nA simple helper module to filter results from ThreatConnect API or other data\nsource.  For example if results need to be filtered by an unsupported field the module\nallows you to pass the data array/list in and specify one or more filters to get just the\nresults required.\n\nArgs:\ndata (list): The list of dictionary structure to filter.\n\nReturns:\n(object): An instance of DataFilter Class", "source": "codesearchnet"}
{"code": "def average_coordination_number(structures, freq=10):\n    \n    coordination_numbers = {}\n    for spec in structures[0].composition.as_dict().keys():\n        coordination_numbers[spec] = 0.0\n    count = 0\n    for t in range(len(structures)):\n        if t % freq != 0:\n            continue\n        count += 1\n        vnn = VoronoiNN()\n        for atom in range(len(structures[0])):\n            cn = vnn.get_cn(structures[t], atom, use_weights=True)\n            coordination_numbers[structures[t][atom].species_string] += cn\n    elements = structures[0].composition.as_dict()\n    for el in coordination_numbers:\n        coordination_numbers[el] = coordination_numbers[el] / elements[\n            el] / count\n    return coordination_numbers", "docstring": "Calculates the ensemble averaged Voronoi coordination numbers\nof a list of Structures using VoronoiNN.\nTypically used for analyzing the output of a Molecular Dynamics run.\nArgs:\nstructures (list): list of Structures.\nfreq (int): sampling frequency of coordination number [every freq steps].\nReturns:\nDictionary of elements as keys and average coordination numbers as values.", "source": "juraj-google-style"}
{"code": "def _try_refresh_access_token(self) -> None:\n        \n        if self.refresh_token:\n            if not self.access_token or self._is_access_token_expired():\n                self.access_token, self.access_expiration = self._get_access_from_refresh()\n                self.access_expiration = time.time() + self.access_expiration", "docstring": "Attempts to get a new access token using the refresh token, if needed.\n\nIf the access token is expired and this instance has a stored refresh token,\nthen the refresh token is in the API call to get a new access token. If\nsuccessful, this instance is modified in-place with that new access token.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def double(self, count=0):\n    return 2 * count", "docstring": "Returns the input multiplied by 2.\n\nArgs:\ncount: Input number that you want to double.\n\nReturns:\nA number that is the double of count.", "source": "github-repos"}
{"code": "def get_cpus_by_arch(cls, arch):\n        \n\n        with open('/usr/share/libvirt/cpu_map.xml', 'r') as cpu_map:\n            cpu_xml = ET.parse(cpu_map)\n        try:\n            return cpu_xml.xpath('/cpus/arch[@name=\"{0}\"]'.format(arch))[0]\n        except IndexError:\n            raise LagoException('No such arch: {0}'.format(arch))", "docstring": "Get all CPUs info by arch\n\nArgs:\narch(str): CPU architecture\n\nReturns:\nlxml.etree.element: CPUs by arch XML\n\nRaises:\n:exc:`~LagoException`: If no such ARCH is found", "source": "juraj-google-style"}
{"code": "def _ParseQuery(self, parser_mediator, database, query, callback, cache):\n    row_cache = cache.GetRowCache(query)\n    try:\n        rows = database.Query(query)\n    except sqlite3.DatabaseError as exception:\n        parser_mediator.ProduceExtractionWarning('unable to run query: {0:s} on database with error: {1!s}'.format(query, exception))\n        return\n    for (index, row) in enumerate(rows):\n        if parser_mediator.abort:\n            break\n        row_hash = self._HashRow(row)\n        if (row_hash in row_cache):\n            continue\n        try:\n            callback(parser_mediator, query, row, cache=cache, database=database)\n        except Exception as exception:\n            parser_mediator.ProduceExtractionWarning('unable to parse row: {0:d} with callback: {1:s} on database with error: {2!s}'.format(index, callback.__name__, exception))\n            return\n        row_cache.add(row_hash)", "docstring": "Queries a database and parses the results.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\ndatabase (SQLiteDatabase): database.\nquery (str): query.\ncallback (function): function to invoke to parse an individual row.\ncache (SQLiteCache): cache.", "source": "codesearchnet"}
{"code": "def wsgi_simple_responder(\n        result: Union[str, bytes],\n        handler: Callable[[Union[str, bytes]], WSGI_TUPLE_TYPE],\n        start_response: TYPE_WSGI_START_RESPONSE,\n        status: str = '200 OK',\n        extraheaders: TYPE_WSGI_RESPONSE_HEADERS = None) \\\n        -> TYPE_WSGI_APP_RESULT:\n    \n    extraheaders = extraheaders or []\n    (contenttype, extraheaders2, output) = handler(result)\n    response_headers = [('Content-Type', contenttype),\n                        ('Content-Length', str(len(output)))]\n    response_headers.extend(extraheaders)\n    if extraheaders2 is not None:\n        response_headers.extend(extraheaders2)\n    \n    start_response(status, response_headers)\n    return [output]", "docstring": "Simple WSGI app.\n\nArgs:\nresult: the data to be processed by ``handler``\nhandler: a function returning a ``(contenttype, extraheaders, data)``\ntuple, e.g. ``text_result``, ``html_result``\nstart_response: standard WSGI ``start_response`` function\nstatus: status code (default ``\"200 OK\"``)\nextraheaders: optional extra HTTP headers\n\nReturns:\nWSGI application result", "source": "juraj-google-style"}
{"code": "def post_path(self, path: str, path_data: Union[(dict, None)], post_data: Any) -> dict:\n    path = self._insert_vars(path, (path_data or {}))\n    path = (self.BASE_URL + path)\n    self._try_refresh_access_token()\n    return self.session.post(path, json=post_data).json()", "docstring": "Modifies the ESI by an endpoint URL.\n\nThis method is not marked \"private\" as it _can_ be used\nby consuming code, but it's probably easier to call the\n`get_op` method instead.\n\nArgs:\npath: raw ESI URL path\npath_data: data to format the path with (can be None)\npost_data: data to send to ESI\n\nReturns:\nESI data", "source": "codesearchnet"}
{"code": "def jvp(self, primals, unconnected_gradients=UnconnectedGradients.NONE):\n    unconnected_gradients = UnconnectedGradients(unconnected_gradients)\n    if self._accumulator is None:\n        raise ValueError('Called jvp() without first tracing anything.')\n\n    def _fetch_jvp(tensor):\n        if hasattr(tensor, 'handle'):\n            unwrapped_tensor = ops.convert_to_tensor(tensor.handle)\n        else:\n            unwrapped_tensor = tensor\n        result = pywrap_tfe.TFE_Py_ForwardAccumulatorJVP(self._accumulator, unwrapped_tensor)\n        if result is None and unconnected_gradients == UnconnectedGradients.ZERO:\n            result = array_ops.zeros_like(tensor)\n        return result\n    return nest.map_structure(_fetch_jvp, primals)", "docstring": "Fetches the Jacobian-vector product computed for `primals`.\n\nNote that this method performs no computation, and simply looks up a JVP\nthat was already computed (unlike backprop using a `tf.GradientTape`, where\nthe computation happens on the call to `tape.gradient`).\n\nArgs:\nprimals: A watched Tensor or structure of Tensors to fetch the JVPs for.\nunconnected_gradients: A value which can either hold 'none' or 'zero' and\nalters the value which will be returned if no JVP was computed for\n`primals`. The possible values and effects are detailed in\n'tf.UnconnectedGradients' and it defaults to 'none'.\n\nReturns:\nTensors with the same shapes and dtypes as `primals`, or None if no JVP\nis available.", "source": "github-repos"}
{"code": "async def call(self, methname, *args, **kwargs):\n    todo = (methname, args, kwargs)\n    return (await self.task(todo))", "docstring": "Call a remote method by name.\n\nArgs:\nmethname (str): The name of the remote method.\n*args: Arguments to the method call.\n**kwargs: Keyword arguments to the method call.\n\nMost use cases will likely use the proxy methods directly:\n\nThe following two are effectively the same:\n\nvalu = proxy.getFooBar(x, y)\nvalu = proxy.call('getFooBar', x, y)", "source": "codesearchnet"}
{"code": "def FromEncoded(self, encoded):\n    stream_type = ((encoded >> 12) & 15)\n    stream_system = bool((encoded & (1 << 11)))\n    stream_id = (encoded & ((1 << 11) - 1))\n    return DataStream(stream_type, stream_id, stream_system)", "docstring": "Create a DataStream from an encoded 16-bit unsigned integer.\n\nReturns:\nDataStream: The decoded DataStream object", "source": "codesearchnet"}
{"code": "def lineReceived(self, line):\n        \n        while self._in_header:\n            if line:\n                self._headers.append(line)\n            else:\n                http, status, message = self._headers[0].split(\" \", 2)\n                status = int(status)\n                if status == 200:\n                    self.factory.get_stream().connected()\n                else:\n                    self.factory.continueTrying = 0\n                    self.transport.loseConnection()\n                    self.factory.get_stream().disconnected(RuntimeError(status, message))\n                    return\n\n                self._in_header = False\n            break\n        else:\n            try:\n                self._len_expected = int(line, 16)\n                self.setRawMode()\n            except:\n                pass", "docstring": "Callback issued by twisted when new line arrives.\n\nArgs:\nline (str): Incoming line", "source": "juraj-google-style"}
{"code": "def load(self, context):\n    \n    if not (context.flags.debugger_data_server_grpc_port > 0 or\n            context.flags.debugger_port > 0):\n      return None\n    flags = context.flags\n    try:\n      \n      import tensorflow\n    except ImportError:\n      raise ImportError(\n          'To use the debugger plugin, you need to have TensorFlow installed:\\n'\n          '  pip install tensorflow')\n    try:\n      \n      from tensorboard.plugins.debugger import debugger_plugin as debugger_plugin_lib\n      from tensorboard.plugins.debugger import interactive_debugger_plugin as interactive_debugger_plugin_lib\n      \n    except ImportError as e:\n      e_type, e_value, e_traceback = sys.exc_info()\n      message = e.msg if hasattr(e, 'msg') else e.message  \n      if 'grpc' in message:\n        e_value = ImportError(\n            message +\n            '\\n\\nTo use the debugger plugin, you need to have '\n            'gRPC installed:\\n  pip install grpcio')\n      six.reraise(e_type, e_value, e_traceback)\n    if flags.debugger_port > 0:\n      interactive_plugin = (\n          interactive_debugger_plugin_lib.InteractiveDebuggerPlugin(context))\n      logger.info('Starting Interactive Debugger Plugin at gRPC port %d',\n                   flags.debugger_data_server_grpc_port)\n      interactive_plugin.listen(flags.debugger_port)\n      return interactive_plugin\n    elif flags.debugger_data_server_grpc_port > 0:\n      noninteractive_plugin = debugger_plugin_lib.DebuggerPlugin(context)\n      logger.info('Starting Non-interactive Debugger Plugin at gRPC port %d',\n                   flags.debugger_data_server_grpc_port)\n      noninteractive_plugin.listen(flags.debugger_data_server_grpc_port)\n      return noninteractive_plugin\n    raise AssertionError()", "docstring": "Returns the debugger plugin, if possible.\n\nArgs:\ncontext: The TBContext flags including `add_arguments`.\n\nReturns:\nA DebuggerPlugin instance or None if it couldn't be loaded.", "source": "juraj-google-style"}
{"code": "def ReplaceAll(pattern, rep, s):\n    if (pattern not in _regexp_compile_cache):\n        _regexp_compile_cache[pattern] = sre_compile.compile(pattern)\n    return _regexp_compile_cache[pattern].sub(rep, s)", "docstring": "Replaces instances of pattern in a string with a replacement.\n\nThe compiled regex is kept in a cache shared by Match and Search.\n\nArgs:\npattern: regex pattern\nrep: replacement text\ns: search string\n\nReturns:\nstring with replacements made (or original string if no replacements)", "source": "codesearchnet"}
{"code": "class Distribution:\n\n    def __init__(self, device_mesh, batch_dim_name=None):\n        self._device_mesh = device_mesh\n        self._batch_dim_name = batch_dim_name\n\n    def get_data_layout(self, data_shape):\n        \n        raise NotImplementedError()\n\n    def get_variable_layout(self, variable):\n        \n        raise NotImplementedError()\n\n    def get_tensor_layout(self, path):\n        \n        raise NotImplementedError()\n\n    @contextlib.contextmanager\n    def scope(self):\n        \n        original_scope = distribution()\n        set_distribution(self)\n        try:\n            yield\n        finally:\n            set_distribution(original_scope)\n\n    @property\n    def device_mesh(self):\n        return self._device_mesh\n\n    @property\n    def batch_dim_name(self):\n        return self._batch_dim_name\n\n    def distribute_dataset(self, dataset):\n        \n        raise NotImplementedError()\n\n    def __repr__(self):\n        return f'<{self.__class__.__name__} device_mesh={self.device_mesh}>'\n\n    def __str__(self):\n        return self.__repr__()", "docstring": "Base class for variable distribution strategies.\n\nA `Distribution` has following key functionalities:\n\n1. Distribute the model variables to a `DeviceMesh`.\n2. Distribute the input data to a `DeviceMesh`.\n3. Distribute an intermediate state tensor in the model.\n\nIt can create a context scope so that the framework to properly detect the\n`Distribution` and distribute the variable/data accordingly.\n\nArgs:\ndevice_mesh: A `DeviceMesh` instance.", "source": "github-repos"}
{"code": "def transform_qubits(self: TSelf_Operation, func: Callable[([Qid], Qid)]) -> TSelf_Operation:\n    return self.with_qubits(*(func(q) for q in self.qubits))", "docstring": "Returns the same operation, but with different qubits.\n\nArgs:\nfunc: The function to use to turn each current qubit into a desired\nnew qubit.\n\nReturns:\nThe receiving operation but with qubits transformed by the given\nfunction.", "source": "codesearchnet"}
{"code": "def get_creds(use_personal_account: bool, service_account: str, private_key: str) -> credentials.Credentials:\n    if service_account and private_key:\n        try:\n            with open_local(private_key) as local_path:\n                creds = ee.ServiceAccountCredentials(service_account, local_path)\n        except Exception:\n            raise RuntimeError(f'Unable to open the private key {private_key}.')\n    elif use_personal_account:\n        ee.Authenticate()\n        creds, _ = default()\n    elif is_compute_engine():\n        creds = compute_engine.Credentials()\n    else:\n        creds, _ = default()\n    creds.refresh(requests.Request())\n    return creds", "docstring": "Fetches credentials for authentication.\n\nIf the `use_personal_account` argument is true then it will authenticate with pop-up\nbrowser window using personal account. Otherwise, if the application is running\nin compute engine, it will use credentials of service account bound to the VM.\nOtherwise, it will try to use user credentials.\n\nArgs:\nuse_personal_account: A flag to use personal account for ee authentication.\nservice_account: Service account address when using a private key for earth engine authentication.\nprivate_key: A private key path to authenticate earth engine using private key.\n\nReturns:\ncred: Credentials object.", "source": "github-repos"}
{"code": "def end_container(self, header_buf):\n        \n        if not self.__container_nodes:\n            raise ValueError(\"Attempted to end container with none active.\")\n        \n        self.__container_node.add_leaf(_Node(header_buf))\n        self.__container_node = self.__container_nodes.pop()\n        parent_container_length = self.__container_lengths.pop()\n        self.current_container_length = \\\n            parent_container_length + self.current_container_length + len(header_buf)", "docstring": "Add a node containing the container's header to the current subtree.\n\nThis node will be added as the leftmost leaf of the subtree that was\nstarted by the matching call to start_container.\n\nArgs:\nheader_buf (bytearray): bytearray containing the container header.", "source": "juraj-google-style"}
{"code": "def setlogging(mlogger, defval=None):\n    \n    log_level = os.getenv('SYN_LOG_LEVEL',\n                          defval)\n    if log_level:  \n        log_level = log_level.upper()\n        if log_level not in s_const.LOG_LEVEL_CHOICES:\n            raise ValueError('Invalid log level provided: {}'.format(log_level))\n        logging.basicConfig(level=log_level, format=s_const.LOG_FORMAT)\n        mlogger.info('log level set to %s', log_level)", "docstring": "Configure synapse logging.\n\nArgs:\nmlogger (logging.Logger): Reference to a logging.Logger()\ndefval (str): Default log level\n\nNotes:\nThis calls logging.basicConfig and should only be called once per process.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def send(self, msg):\n        \n        \n        slipDriver = sliplib.Driver()\n\n        \n        slipData = slipDriver.send(msg)\n\n        \n        res = self._serialPort.write(slipData)\n\n        \n        return res", "docstring": "Encodes data to slip protocol and then sends over serial port\n\nUses the SlipLib module to convert the message data into SLIP format.\nThe message is then sent over the serial port opened with the instance\nof the Faraday class used when invoking send().\n\nArgs:\nmsg (bytes): Bytes format message to send over serial port.\n\nReturns:\nint: Number of bytes transmitted over the serial port.", "source": "juraj-google-style"}
{"code": "def start_trial(self, trial, checkpoint=None):\n        \n\n        self._commit_resources(trial.resources)\n        try:\n            self._start_trial(trial, checkpoint)\n        except Exception as e:\n            logger.exception(\"Error starting runner for Trial %s\", str(trial))\n            error_msg = traceback.format_exc()\n            time.sleep(2)\n            self._stop_trial(trial, error=True, error_msg=error_msg)\n            if isinstance(e, AbortTrialExecution):\n                return  \n            try:\n                \n                trial.clear_checkpoint()\n                logger.info(\n                    \"Trying to start runner for Trial %s without checkpoint.\",\n                    str(trial))\n                self._start_trial(trial)\n            except Exception:\n                logger.exception(\n                    \"Error starting runner for Trial %s, aborting!\",\n                    str(trial))\n                error_msg = traceback.format_exc()\n                self._stop_trial(trial, error=True, error_msg=error_msg)", "docstring": "Starts the trial.\n\nWill not return resources if trial repeatedly fails on start.\n\nArgs:\ntrial (Trial): Trial to be started.\ncheckpoint (Checkpoint): A Python object or path storing the state\nof trial.", "source": "juraj-google-style"}
{"code": "def _PrintAPFSVolumeIdentifiersOverview(self, volume_system, volume_identifiers):\n    header = 'The following Apple File System (APFS) volumes were found:\\n'\n    self._output_writer.Write(header)\n    column_names = ['Identifier', 'Name']\n    table_view = views.CLITabularTableView(column_names=column_names)\n    for volume_identifier in volume_identifiers:\n        volume = volume_system.GetVolumeByIdentifier(volume_identifier)\n        if (not volume):\n            raise errors.SourceScannerError('Volume missing for identifier: {0:s}.'.format(volume_identifier))\n        volume_attribute = volume.GetAttribute('name')\n        table_view.AddRow([volume.identifier, volume_attribute.value])\n    self._output_writer.Write('\\n')\n    table_view.Write(self._output_writer)\n    self._output_writer.Write('\\n')", "docstring": "Prints an overview of APFS volume identifiers.\n\nArgs:\nvolume_system (dfvfs.APFSVolumeSystem): volume system.\nvolume_identifiers (list[str]): allowed volume identifiers.\n\nRaises:\nSourceScannerError: if a volume cannot be resolved from the volume\nidentifier.", "source": "codesearchnet"}
{"code": "def layers(self):\n    layers = [self._layer_def(style) for style in self.styles]\n    return layers", "docstring": "Renders the list of layers to add to the map.\n\nReturns:\nlayers (list): list of layer entries suitable for use in mapbox-gl 'map.addLayer()' call", "source": "codesearchnet"}
{"code": "def edges(self, nodes=None):\n        \n        \n        \n        \n        edges = set()\n        for node in (nodes or self.iterkeys()):\n            ends = self[node].nodes()\n            edges.update([(node, end) for end in ends])\n        return tuple(edges)", "docstring": "Returns a ``tuple`` of all edges in the ``DictGraph`` an edge is a pair\nof **node objects**.\n\nArguments:\n\n- nodes(iterable) [default: ``None``] iterable of **node objects** if\nspecified the edges will be limited to those outgoing from one of\nthe specified nodes.", "source": "juraj-google-style"}
{"code": "def __init__(self, map_name, timestamp_dir, cache_options, automount_mountpoint=None, can_do_incremental=False):\n    self.log = logging.getLogger(__name__)\n    self.map_name = map_name\n    self.timestamp_dir = timestamp_dir\n    self.cache_options = cache_options\n    self.can_do_incremental = can_do_incremental\n    if automount_mountpoint is None:\n        timestamp_prefix = '%s/timestamp-%s' % (timestamp_dir, map_name)\n    else:\n        automount_mountpoint = automount_mountpoint.lstrip('/')\n        automount_mountpoint = automount_mountpoint.replace('/', '_')\n        timestamp_prefix = '%s/timestamp-%s-%s' % (timestamp_dir, map_name, automount_mountpoint)\n    self.modify_file = '%s-modify' % timestamp_prefix\n    self.update_file = '%s-update' % timestamp_prefix\n    self.modify_time = None\n    self.update_time = None", "docstring": "Construct an updater object.\n\nArgs:\nmap_name: A string representing the type of the map we are an Updater for.\ntimestamp_dir: A string with the directory containing our timestamp files.\ncache_options: A dict containing the options for any caches we create.\nautomount_mountpoint: An optional string containing automount path info.\ncan_do_incremental: Indicates whether or not our source can provide\nincremental updates at all.", "source": "github-repos"}
{"code": "def FindEnumTypeByName(self, full_name):\n    \n\n    full_name = _NormalizeFullyQualifiedName(full_name)\n    if full_name not in self._enum_descriptors:\n      self.FindFileContainingSymbol(full_name)\n    return self._enum_descriptors[full_name]", "docstring": "Loads the named enum descriptor from the pool.\n\nArgs:\nfull_name: The full name of the enum descriptor to load.\n\nReturns:\nThe enum descriptor for the named type.", "source": "juraj-google-style"}
{"code": "def __init__(self, mapreduce_spec, shard_state, task_retry_count=0):\n    \n    self._shard_state = shard_state\n    self.mapreduce_spec = mapreduce_spec\n    \n    \n    self.task_retry_count = task_retry_count\n\n    if self.mapreduce_spec:\n      self.mapreduce_id = self.mapreduce_spec.mapreduce_id\n    else:\n      \n      self.mapreduce_id = None\n    if shard_state:\n      self.shard_id = shard_state.get_shard_id()\n    else:\n      \n      self.shard_id = None\n\n    \n    \n    self._mutation_pool = _MutationPool(mapreduce_spec=mapreduce_spec)\n    self._counters = _Counters(shard_state)\n    \n    \n    self.counters = self._counters\n\n    self._pools = {}\n    self.register_pool(\"mutation_pool\", self._mutation_pool)\n    self.register_pool(\"counters\", self.counters)", "docstring": "Constructor.\n\nArgs:\nmapreduce_spec: mapreduce specification as model.MapreduceSpec.\nshard_state: an instance of model.ShardState. This has to be the same\ninstance as the one MapperWorkerHandler mutates. All mutations are\nflushed to datastore in the end of the slice.\ntask_retry_count: how many times this task has been retried.", "source": "juraj-google-style"}
{"code": "def subscribe_sns_topic_to_sqs(self, region):\n    sns = self.session.resource('sns', region_name=region)\n    topic = sns.Topic('arn:aws:sns:{}:{}:{}'.format(region, self.account.account_number, self.topic_name))\n    topic.subscribe(Protocol='sqs', Endpoint=self.sqs_queue)\n    auditlog(event='cloudtrail.subscribe_sns_topic_to_sqs', actor=self.ns, data={'account': self.account.account_name, 'region': region})\n    return topic.attributes['TopicArn']", "docstring": "Subscribe SQS to the SNS topic. Returns the ARN of the SNS Topic subscribed\n\nArgs:\nregion (`str`): Name of the AWS region\n\nReturns:\n`str`", "source": "codesearchnet"}
{"code": "def generate_nb_states(n_states, n_cells, n_genes):\n    \n    W = np.random.dirichlet([1]*n_states, size=(n_cells,))\n    W = W.T\n    M = np.random.random((n_genes, n_states))*100\n    R = np.random.randint(1, 100, n_genes)\n    return M, W, R", "docstring": "Generates means and weights for the Negative Binomial Mixture Model.\nWeights are distributed Dirichlet(1,1,...), means are rand(0, 1).\nReturned values can be passed to generate_state_data(M, W).\n\nArgs:\nn_states (int): number of states or clusters\nn_cells (int): number of cells\nn_genes (int): number of genes\n\nReturns:\nM - genes x clusters\nW - clusters x cells\nR - genes x 1 - randint(1, 100)", "source": "juraj-google-style"}
{"code": "def rst_underline(heading: str, underline_char: str) -> str:\n    \n    assert \"\\n\" not in heading\n    assert len(underline_char) == 1\n    return heading + \"\\n\" + (underline_char * len(heading))", "docstring": "Underlines a heading for RST files.\n\nArgs:\nheading: text to underline\nunderline_char: character to use\n\nReturns:\nunderlined heading, over two lines (without a final terminating\nnewline)", "source": "juraj-google-style"}
{"code": "def update_snmp_configuration(self, configuration, timeout=-1):\n        \n        data = configuration.copy()\n        if 'type' not in data:\n            data['type'] = 'snmp-configuration'\n\n        uri = \"{}{}\".format(self.data[\"uri\"], self.SNMP_CONFIGURATION_PATH)\n        return self._helper.update(data, uri=uri, timeout=timeout)", "docstring": "Updates the SNMP configuration of a logical interconnect. Changes to the SNMP configuration are asynchronously\napplied to all managed interconnects.\n\nArgs:\nconfiguration: snmp configuration.\n\nReturns:\ndict: The Logical Interconnect.", "source": "juraj-google-style"}
{"code": "def write_data(msg_type, profile_name, data, cfg):\n    \n    if profile_name not in cfg.data:\n        cfg.data[profile_name] = {}\n    cfg.data[profile_name][msg_type] = data", "docstring": "Write the settings into the data portion of the cfg.\n\nArgs:\n:msg_type: (str) message type to create config entry.\n:profile_name: (str) name of the profile entry\n:data: (dict) dict values for the 'settings'\n:cfg: (jsonconfig.Config) config instance.", "source": "juraj-google-style"}
{"code": "def read_float(self, little_endian=True):\n        \n        if little_endian:\n            endian = \"<\"\n        else:\n            endian = \">\"\n        return self.unpack(\"%sf\" % endian, 4)", "docstring": "Read 4 bytes as a float value from the stream.\n\nArgs:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nfloat:", "source": "juraj-google-style"}
{"code": "def LoadFromString(cls, yaml_doc):\n    return cls(**googleads.common.LoadFromString(yaml_doc, cls._YAML_KEY, cls._REQUIRED_INIT_VALUES, cls._OPTIONAL_INIT_VALUES))", "docstring": "Creates an AdWordsClient with information stored in a yaml string.\n\nArgs:\nyaml_doc: The yaml string containing the cached AdWords data.\n\nReturns:\nAn AdWordsClient initialized with the values cached in the string.\n\nRaises:\nA GoogleAdsValueError if the given yaml string does not contain the\ninformation necessary to instantiate a client object - either a\nrequired key was missing or an OAuth2 key was missing.", "source": "codesearchnet"}
{"code": "def member_add(self, repl_id, params):\n        \n        repl = self[repl_id]\n        member_id = repl.repl_member_add(params)\n        self[repl_id] = repl\n        return member_id", "docstring": "create instance and add it to existing replcia\nArgs:\nrepl_id - replica set identity\nparams - member params\n\nreturn True if operation success otherwise False", "source": "juraj-google-style"}
{"code": "def add_group_maintainer(self, name, user):\n        \n        self.project_service.set_auth(self._token_project)\n        self.project_service.add_group_maintainer(name, user)", "docstring": "Add the given user to the named group.\n\nBoth group and user must already exist for this to succeed.\n\nArgs:\nname (string): Name of group.\nuser (string): User to add to group.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def _tracker_str(item):\n    \n    instance = tracker(item)\n    if instance is not None:\n        if isinstance(instance, str):\n            return instance\n        elif isinstance(instance, tuple):\n            return instance\n        else:\n            return instance.uuid\n    else:\n        \n        \n        return item", "docstring": "Returns a string representation of the tracker object for the given item.\n\nArgs:\nitem: object to get tracker for.\nfqdn (str): fully-qualified domain name of the object.", "source": "juraj-google-style"}
{"code": "def _load_tmp_fact(filepath):\n    \n    from hamster_lib import Fact\n\n    try:\n        with open(filepath, 'rb') as fobj:\n            fact = pickle.load(fobj)\n    except IOError:\n        fact = False\n    else:\n        if not isinstance(fact, Fact):\n            raise TypeError(_(\n                \"Something went wrong. It seems our pickled file does not contain\"\n                \" valid Fact instance. [Content: '{content}'; Type: {type}\".format(\n                    content=fact, type=type(fact))\n            ))\n    return fact", "docstring": "Load an 'ongoing fact' from a given location.\n\nArgs:\nfilepath: Full path to the tmpfile location.\n\nReturns:\nhamster_lib.Fact: ``Fact`` representing the 'ongoing fact'. Returns ``False``\nif no file was found.\n\nRaises:\nTypeError: If for some reason our stored instance is no instance of\n``hamster_lib.Fact``.", "source": "juraj-google-style"}
{"code": "def _parse_peer_address(self, config):\n        \n        match = re.search(r'peer-address ([^\\s]+)', config)\n        value = match.group(1) if match else None\n        return dict(peer_address=value)", "docstring": "Scans the config block and parses the peer-address value\n\nArgs:\nconfig (str): The config block to scan\n\nReturns:\ndict: A dict object that is intended to be merged into the\nresource dict", "source": "juraj-google-style"}
{"code": "def listup_sentence(self, data, counter=0):\n        \n        delimiter = self.delimiter_list[counter]\n        sentence_list = []\n        [sentence_list.append(sentence + delimiter) for sentence in data.split(delimiter) if sentence != \"\"]\n        if counter + 1 < len(self.delimiter_list):\n            sentence_list_r = []\n            [sentence_list_r.extend(self.listup_sentence(sentence, counter+1)) for sentence in sentence_list]\n            sentence_list = sentence_list_r\n\n        return sentence_list", "docstring": "Divide string into sentence list.\n\nArgs:\ndata:               string.\ncounter:            recursive counter.\n\nReturns:\nList of sentences.", "source": "juraj-google-style"}
{"code": "def send_async(self, transaction, headers=None):\n        \n        return self.transport.forward_request(\n            method='POST',\n            path=self.path,\n            json=transaction,\n            params={'mode': 'async'},\n            headers=headers)", "docstring": "Submit a transaction to the Federation with the mode `async`.\n\nArgs:\ntransaction (dict): the transaction to be sent\nto the Federation node(s).\nheaders (dict): Optional headers to pass to the request.\n\nReturns:\ndict: The transaction sent to the Federation node(s).", "source": "juraj-google-style"}
{"code": "def extract_only_content(self, path=None, payload=None, objectInput=None):\n        \n        if objectInput:\n            switches = [\"-t\"]\n            result = self._command_template(switches, objectInput)\n            return result, True, None\n        else:\n            f = file_path(path, payload)\n            switches = [\"-t\", f]\n            result = self._command_template(switches)\n            return result, path, f", "docstring": "Return only the text content of passed file.\nThese parameters are in OR. Only one of them can be analyzed.\n\nArgs:\npath (string): Path of file to analyze\npayload (string): Payload base64 to analyze\nobjectInput (object): file object/standard input to analyze\n\nReturns:\ntext of file passed (string)", "source": "juraj-google-style"}
{"code": "def find_clients(self, hosts):\n    \n    \n    clients = []\n    for host in hosts:\n      clients.append(self._get_client_by_hostname(host))\n    return [client for client in clients if client is not None]", "docstring": "Finds GRR clients given a list of hosts.\n\nArgs:\nhosts: List of hostname FQDNs\n\nReturns:\nList of GRR client objects.", "source": "juraj-google-style"}
{"code": "def write_to_file_by_name(folder, fname, data, mkdir=False):\n    \n    if not os.path.isdir(folder):\n        if mkdir:\n            preparedir(folder)\n        else:\n            created = preparedir(folder, False)\n            if not created:\n                raise ValueError(\"Failed to find %s.\" % folder)\n\n    file_path = os.path.join(folder, fname)\n\n    with open(file_path, writemode) as outf:\n        try:\n            outf.write(str(data))\n            return file_path\n        except Exception as e:\n            raise IOError(\"Failed to write %s to file:\\n\\t%s\" % (fname, str(e)))", "docstring": "Write a string of data to file by filename and folder.\n\nArgs:\nfolder: Target folder (e.g. c:/ladybug).\nfname: File name (e.g. testPts.pts).\ndata: Any data as string.\nmkdir: Set to True to create the directory if doesn't exist (Default: False).", "source": "juraj-google-style"}
{"code": "def check_new_round(self, hours=24, tournament=1):\n        \n        query = \n        arguments = {'tournament': tournament}\n        raw = self.raw_query(query, arguments)['data']['rounds'][0]\n        if raw is None:\n            return False\n        open_time = utils.parse_datetime_string(raw['openTime'])\n        now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)\n        is_new_round = open_time > now - datetime.timedelta(hours=hours)\n        return is_new_round", "docstring": "Check if a new round has started within the last `hours`.\n\nArgs:\nhours (int, optional): timeframe to consider, defaults to 24\ntournament (int): ID of the tournament (optional, defaults to 1)\n\nReturns:\nbool: True if a new round has started, False otherwise.\n\nExample:\n>>> NumerAPI().check_new_round()\nFalse", "source": "juraj-google-style"}
{"code": "def add_enum(name=None, index=None, flags=idaapi.hexflag(), bitfield=False):\n    if (name is not None):\n        with ignored(exceptions.EnumNotFound):\n            _get_enum(name)\n            raise exceptions.EnumAlreadyExists()\n    if ((index is None) or (index < 0)):\n        index = idaapi.get_enum_qty()\n    eid = idaapi.add_enum(index, name, flags)\n    if (eid == idaapi.BADADDR):\n        raise exceptions.EnumCreationFailed('Failed creating enum \"{}\"'.format(name))\n    if bitfield:\n        idaapi.set_enum_bf(eid, bitfield)\n    return Enum(eid=eid)", "docstring": "Create a new enum.\n\nArgs:\nname: Name of the enum to create.\nindex: The index of the enum. Leave at default to append the enum as the last enum.\nflags: Enum type flags.\nbitfield: Is the enum a bitfield.\n\nReturns:\nAn `Enum` object.", "source": "codesearchnet"}
{"code": "def custom_apply(self, path: utils.KeyPath, value_spec: class_schema.ValueSpec, allow_partial: bool, child_transform: Optional[Callable[[utils.KeyPath, class_schema.Field, Any], Any]]=None) -> Tuple[bool, Any]:", "docstring": "Custom apply on a value based on its original value spec.\n\nArgs:\npath: KeyPath of current object under its object tree.\nvalue_spec: Original value spec for this field.\nallow_partial: Whether allow partial object to be created.\nchild_transform: Function to transform child node values into their final\nvalues. Transform function is called on leaf nodes first, then on their\nparents, recursively.\n\nReturns:\nA tuple (proceed_with_standard_apply, value_to_proceed).\nIf proceed_with_standard_apply is set to False, value_to_proceed\nwill be used as final value.\n\nRaises:\nError when the value is not compatible with the value spec.", "source": "github-repos"}
{"code": "def _operation_status(self):\n    if (not google_v2_operations.is_done(self._op)):\n        return 'RUNNING'\n    if google_v2_operations.is_success(self._op):\n        return 'SUCCESS'\n    if google_v2_operations.is_canceled(self._op):\n        return 'CANCELED'\n    if google_v2_operations.is_failed(self._op):\n        return 'FAILURE'\n    raise ValueError('Status for operation {} could not be determined'.format(self._op['name']))", "docstring": "Returns the status of this operation.\n\nRaises:\nValueError: if the operation status cannot be determined.\n\nReturns:\nA printable status string (RUNNING, SUCCESS, CANCELED or FAILURE).", "source": "codesearchnet"}
{"code": "def new_stories(self, raw=False, limit=None):\n        \n        new_stories = self._get_stories('newstories', limit)\n        if raw:\n            new_stories = [story.raw for story in new_stories]\n        return new_stories", "docstring": "Returns list of item ids of current new stories\n\nArgs:\nlimit (int): specifies the number of stories to be returned.\nraw (bool): Flag to indicate whether to transform all\nobjects into raw json.\n\nReturns:\n`list` object containing ids of new stories.", "source": "juraj-google-style"}
{"code": "def get_conditional_uni(cls, left_parent, right_parent):\n        \n        left, right, _ = cls._identify_eds_ing(left_parent, right_parent)\n\n        left_u = left_parent.U[0] if left_parent.L == left else left_parent.U[1]\n        right_u = right_parent.U[0] if right_parent.L == right else right_parent.U[1]\n\n        return left_u, right_u", "docstring": "Identify pair univariate value from parents.\n\nArgs:\nleft_parent(Edge): left parent\nright_parent(Edge): right parent\n\nReturns:\ntuple[np.ndarray, np.ndarray]: left and right parents univariate.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.ListJobs = channel.unary_unary(\n            \"/google.cloud.scheduler.v1.CloudScheduler/ListJobs\",\n            request_serializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_cloudscheduler__pb2.ListJobsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_cloudscheduler__pb2.ListJobsResponse.FromString,\n        )\n        self.GetJob = channel.unary_unary(\n            \"/google.cloud.scheduler.v1.CloudScheduler/GetJob\",\n            request_serializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_cloudscheduler__pb2.GetJobRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_job__pb2.Job.FromString,\n        )\n        self.CreateJob = channel.unary_unary(\n            \"/google.cloud.scheduler.v1.CloudScheduler/CreateJob\",\n            request_serializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_cloudscheduler__pb2.CreateJobRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_job__pb2.Job.FromString,\n        )\n        self.UpdateJob = channel.unary_unary(\n            \"/google.cloud.scheduler.v1.CloudScheduler/UpdateJob\",\n            request_serializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_cloudscheduler__pb2.UpdateJobRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_job__pb2.Job.FromString,\n        )\n        self.DeleteJob = channel.unary_unary(\n            \"/google.cloud.scheduler.v1.CloudScheduler/DeleteJob\",\n            request_serializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_cloudscheduler__pb2.DeleteJobRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n        self.PauseJob = channel.unary_unary(\n            \"/google.cloud.scheduler.v1.CloudScheduler/PauseJob\",\n            request_serializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_cloudscheduler__pb2.PauseJobRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_job__pb2.Job.FromString,\n        )\n        self.ResumeJob = channel.unary_unary(\n            \"/google.cloud.scheduler.v1.CloudScheduler/ResumeJob\",\n            request_serializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_cloudscheduler__pb2.ResumeJobRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_job__pb2.Job.FromString,\n        )\n        self.RunJob = channel.unary_unary(\n            \"/google.cloud.scheduler.v1.CloudScheduler/RunJob\",\n            request_serializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_cloudscheduler__pb2.RunJobRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_scheduler__v1_dot_proto_dot_job__pb2.Job.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def voronoi(script, target_layer=0, source_layer=1, backward=True):\n    filter_xml = ''.join(['  <filter name=\"Voronoi Vertex Coloring\">\\n', '    <Param name=\"ColoredMesh\" ', 'value=\"{:d}\" '.format(target_layer), 'description=\"To be Colored Mesh\" ', 'type=\"RichMesh\" ', '/>\\n', '    <Param name=\"VertexMesh\" ', 'value=\"{:d}\" '.format(source_layer), 'description=\"Vertex Mesh\" ', 'type=\"RichMesh\" ', '/>\\n', '    <Param name=\"backward\" ', 'value=\"{}\" '.format(str(backward).lower()), 'description=\"BackDistance\" ', 'type=\"RichBool\" ', '/>\\n', '  </filter>\\n'])\n    util.write_filter(script, filter_xml)\n    return None", "docstring": "Given a Mesh 'M' and a Pointset 'P', the filter projects each vertex of\nP over M and color M according to the geodesic distance from these\nprojected points. Projection and coloring are done on a per vertex\nbasis.\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter to.\ntarget_layer (int): The mesh layer whose surface is colored. For each\nvertex of this mesh we decide the color according to the following\narguments.\nsource_layer (int): The mesh layer whose vertexes are used as seed\npoints for the color computation. These seeds point are projected\nonto the target_layer mesh.\nbackward (bool): If True the mesh is colored according to the distance\nfrom the frontier of the voronoi diagram induced by the\nsource_layer seeds.\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "codesearchnet"}
{"code": "def _build(self, images):\n    num_classes = self._num_output_classes\n    if (len(images.get_shape()) != 4):\n        raise base.IncompatibleShapeError(\"'images' must have shape [batch_size, height, width, depth].\")\n    if (self.WEIGHTS not in self._initializers):\n        if (self._model_size == self.BASIC):\n            self._initializers[self.WEIGHTS] = identity_kernel_initializer\n        elif (self._model_size == self.LARGE):\n            self._initializers[self.WEIGHTS] = noisy_identity_kernel_initializer(num_classes)\n        else:\n            raise ValueError(('Unrecognized model_size: %s' % self._model_size))\n    if (self.BIASES not in self._initializers):\n        self._initializers[self.BIASES] = tf.zeros_initializer()\n    if (self._model_size == self.BASIC):\n        self._conv_modules = [self._dilated_conv_layer(num_classes, 1, True, 'conv1'), self._dilated_conv_layer(num_classes, 1, True, 'conv2'), self._dilated_conv_layer(num_classes, 2, True, 'conv3'), self._dilated_conv_layer(num_classes, 4, True, 'conv4'), self._dilated_conv_layer(num_classes, 8, True, 'conv5'), self._dilated_conv_layer(num_classes, 16, True, 'conv6'), self._dilated_conv_layer(num_classes, 1, True, 'conv7'), self._dilated_conv_layer(num_classes, 1, False, 'conv8')]\n    elif (self._model_size == self.LARGE):\n        self._conv_modules = [self._dilated_conv_layer((2 * num_classes), 1, True, 'conv1'), self._dilated_conv_layer((2 * num_classes), 1, True, 'conv2'), self._dilated_conv_layer((4 * num_classes), 2, True, 'conv3'), self._dilated_conv_layer((8 * num_classes), 4, True, 'conv4'), self._dilated_conv_layer((16 * num_classes), 8, True, 'conv5'), self._dilated_conv_layer((32 * num_classes), 16, True, 'conv6'), self._dilated_conv_layer((32 * num_classes), 1, True, 'conv7'), self._dilated_conv_layer(num_classes, 1, False, 'conv8')]\n    else:\n        raise ValueError(('Unrecognized model_size: %s' % self._model_size))\n    dilation_mod = sequential.Sequential(self._conv_modules, name='dilation')\n    return dilation_mod(images)", "docstring": "Build dilation module.\n\nArgs:\nimages: Tensor of shape [batch_size, height, width, depth]\nand dtype float32. Represents a set of images with an arbitrary depth.\nNote that when using the default initializer, depth must equal\nnum_output_classes.\n\nReturns:\nTensor of shape [batch_size, height, width, num_output_classes] and dtype\nfloat32. Represents, for each image and pixel, logits for per-class\npredictions.\n\nRaises:\nIncompatibleShapeError: If images is not rank 4.\nValueError: If model_size is not one of 'basic' or 'large'.", "source": "codesearchnet"}
{"code": "def attach_profile_to_role(client, role_name='forrest_unicorn_role', profile_name='forrest_unicorn_profile'):\n    \n    current_instance_profiles = resource_action(\n        client,\n        action='list_instance_profiles_for_role',\n        log_format='Found Instance Profiles for %(RoleName)s.',\n        RoleName=role_name)['InstanceProfiles']\n\n    for profile in current_instance_profiles:\n        if profile['InstanceProfileName'] == profile_name:\n            LOG.info('Found Instance Profile attached to Role: %s -> %s', profile_name, role_name)\n            break\n    else:\n        for remove_profile in current_instance_profiles:\n            resource_action(\n                client,\n                action='remove_role_from_instance_profile',\n                log_format='Removed Instance Profile from Role: '\n                '%(InstanceProfileName)s -> %(RoleName)s',\n                InstanceProfileName=remove_profile['InstanceProfileName'],\n                RoleName=role_name)\n\n        resource_action(\n            client,\n            action='add_role_to_instance_profile',\n            log_format='Added Instance Profile to Role: '\n            '%(InstanceProfileName)s -> %(RoleName)s',\n            InstanceProfileName=profile_name,\n            RoleName=role_name)\n\n    return True", "docstring": "Attach an IAM Instance Profile _profile_name_ to Role _role_name_.\n\nArgs:\nrole_name (str): Name of Role.\nprofile_name (str): Name of Instance Profile.\n\nReturns:\nTrue upon successful completion.", "source": "juraj-google-style"}
{"code": "def get_canonical_name(api_names: Sequence[str], deprecated_api_names: Sequence[str]) -> Optional[str]:\n    non_deprecated_name = next((name for name in api_names if name not in deprecated_api_names), None)\n    if non_deprecated_name:\n        return non_deprecated_name\n    if api_names:\n        return api_names[0]\n    return None", "docstring": "Get preferred endpoint name.\n\nArgs:\napi_names: API names iterable.\ndeprecated_api_names: Deprecated API names iterable.\n\nReturns:\nReturns one of the following in decreasing preference:\n- first non-deprecated endpoint\n- first endpoint\n- None", "source": "github-repos"}
{"code": "def compute_q(self, query_antecedent):\n    \n    ret = mtf.einsum(\n        [query_antecedent, self.wq], reduced_dims=[self.query_input_dim])\n    if self.combine_dims:\n      ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.q_dims)\n    return ret", "docstring": "Compute query Tensor q.\n\nArgs:\nquery_antecedent: a Tensor with dimensions\n{query_input_dim} + other_dims\nReturns:\na Tensor with dimensions\nquery_heads_dims + {key_dim} + other_dims", "source": "juraj-google-style"}
{"code": "def evaluate(self, tensors) -> Union[ragged_tensor_value.RaggedTensorValue, sparse_tensor.SparseTensorValue, None]:\n    if context.executing_eagerly():\n        return self._eval_helper(tensors)\n    else:\n        sess = ops.get_default_session()\n        flattened_tensors = nest.flatten(tensors)\n        if sess is None:\n            with self.test_session() as sess:\n                flattened_results = sess.run(flattened_tensors)\n        else:\n            flattened_results = sess.run(flattened_tensors)\n        return nest.pack_sequence_as(tensors, flattened_results)", "docstring": "Evaluates tensors and returns numpy values.\n\nArgs:\ntensors: A Tensor or a nested list/tuple of Tensors.\n\nReturns:\ntensors numpy values.", "source": "github-repos"}
{"code": "def _on_connection_close(self, connection, reply_code_or_reason, reply_text=None):\n    self._channel = None\n    if isinstance(reply_code_or_reason, pika_errs.ConnectionClosed):\n        reply_code = reply_code_or_reason.reply_code\n        reply_text = reply_code_or_reason.reply_text\n    elif isinstance(reply_code_or_reason, int):\n        reply_code = reply_code_or_reason\n    else:\n        reply_code = 0\n        reply_text = str(reply_code_or_reason)\n    if (reply_code == 200):\n        _log.info('Server connection closed (%s), shutting down', reply_text)\n        connection.ioloop.stop()\n    else:\n        _log.warning('Connection to %s closed unexpectedly (%d): %s', connection.params.host, reply_code, reply_text)\n        self.call_later(1, self.reconnect)", "docstring": "Callback invoked when a previously-opened connection is closed.\n\nArgs:\nconnection (pika.connection.SelectConnection): The connection that\nwas just closed.\nreply_code_or_reason (int|Exception): The reason why the channel\nwas closed. In older versions of pika, this is the AMQP code.\nreply_text (str): The human-readable reason the connection was\nclosed (only in older versions of pika)", "source": "codesearchnet"}
{"code": "def get_assets(cls, lat, lon, begin=None, end=None):\n    instance = cls('planetary/earth/assets')\n    filters = {'lat': lat, 'lon': lon, 'begin': begin, 'end': end}\n    return instance.get_resource(**filters)", "docstring": "Returns date and ids of flyovers\n\nArgs:\nlat: latitude float\nlon: longitude float\nbegin: date instance\nend: date instance\n\nReturns:\njson", "source": "codesearchnet"}
{"code": "def generate_state_data(means, weights):\n    \n    x_true = np.dot(means, weights)\n    sample = np.random.poisson(x_true)\n    return sample.astype(float)", "docstring": "Generates data according to the Poisson Convex Mixture Model.\n\nArgs:\nmeans (array): Cell types- genes x clusters\nweights (array): Cell cluster assignments- clusters x cells\n\nReturns:\ndata matrix - genes x cells", "source": "juraj-google-style"}
{"code": "def escape_meta(self, string, pos):\n    if ((pos > 0) and (string[(pos - 1)] == '\\\\')):\n        string = (string[:(pos - 1)] + string[pos:])\n    else:\n        warnings.warn(\"Un-escaped meta-character: '{0}' (Escape it with a '\\\\')\".format(string[pos]), Warning)\n        pos += 1\n    meta = self.meta.search(string, pos)\n    return (string, meta)", "docstring": "Checks if a meta character is escaped or else warns about it.\n\nIf the meta character has an escape character ('\\') preceding it,\nthe meta character is escaped. If it does not, a warning is emitted\nthat the user should escape it.\n\nArguments:\nstring (str): The relevant string in which the character was found.\npos (int): The index of the meta character within the string.\n\nReturns:\nThe possibly escaped string and the next meta match.", "source": "codesearchnet"}
{"code": "def _validate_reference_field(parent: message.Message, field: descriptor.FieldDescriptor) -> None:\n    oneof = field.message_type.oneofs[0]\n    for i in range(proto_utils.field_content_length(parent, field)):\n        reference = proto_utils.get_value_at_field_index(parent, field, i)\n        reference_field_name = reference.WhichOneof(oneof.name)\n        if reference_field_name is None:\n            if not (reference.extension or reference.HasField('identifier') or reference.HasField('display')):\n                raise fhir_errors.InvalidFhirError(f'`{reference.DESCRIPTOR.name}` is an empty reference.')\n            return\n        field_options = field.GetOptions()\n        if not field_options.Extensions[annotations_pb2.valid_reference_type]:\n            return\n        if reference.HasField('uri') or reference.HasField('fragment'):\n            return\n        if annotation_utils.get_fhir_version(reference) == annotations_pb2.FhirVersion.DSTU2:\n            return\n        reference_field = reference.DESCRIPTOR.fields_by_name[reference_field_name]\n        if annotation_utils.is_typed_reference_field(reference_field):\n            reference_type = reference_field.GetOptions().Extensions[annotations_pb2.referenced_fhir_type]\n            is_allowed = False\n            for valid_type in field_options.Extensions[annotations_pb2.valid_reference_type]:\n                if valid_type == reference_type or valid_type == 'Resource':\n                    is_allowed = True\n                    break\n            if not is_allowed:\n                raise fhir_errors.InvalidFhirError(f'Message `{parent.DESCRIPTOR.full_name}` contains an invalid reference type: `{reference_type}` set at: `{reference_field_name}`.')", "docstring": "Ensure that the provided reference field is valid.\n\nArgs:\nparent: The containing Message.\nfield: The reference field descriptor.\n\nRaises:\nfhir_errors.InvalidFhirError: In the event of an empty reference (no\nextensions, no identifier, no display).", "source": "github-repos"}
{"code": "def write_file(self, file_name, vasp4_compatible=False):\n        \n\n        def _print_fortran_float(f):\n            \n            s = \"{:.10E}\".format(f)\n            if f >= 0:\n                return \"0.\" + s[0] + s[2:12] + 'E' + \"{:+03}\".format(int(s[13:]) + 1)\n            else:\n                return \"-.\" + s[1] + s[3:13] + 'E' + \"{:+03}\".format(int(s[14:]) + 1)\n\n        with zopen(file_name, \"wt\") as f:\n            p = Poscar(self.structure)\n\n            \n            comment = getattr(self, 'name', p.comment)\n\n            lines = comment + \"\\n\"\n            lines += \"   1.00000000000000\\n\"\n            latt = self.structure.lattice.matrix\n            lines += \" %12.6f%12.6f%12.6f\\n\" % tuple(latt[0, :])\n            lines += \" %12.6f%12.6f%12.6f\\n\" % tuple(latt[1, :])\n            lines += \" %12.6f%12.6f%12.6f\\n\" % tuple(latt[2, :])\n            if not vasp4_compatible:\n                lines += \"\".join([\"%5s\" % s for s in p.site_symbols]) + \"\\n\"\n            lines += \"\".join([\"%6d\" % x for x in p.natoms]) + \"\\n\"\n            lines += \"Direct\\n\"\n            for site in self.structure:\n                lines += \"%10.6f%10.6f%10.6f\\n\" % tuple(site.frac_coords)\n            lines += \" \\n\"\n            f.write(lines)\n            a = self.dim\n\n            def write_spin(data_type):\n                lines = []\n                count = 0\n                f.write(\"   {}   {}   {}\\n\".format(a[0], a[1], a[2]))\n                for (k, j, i) in itertools.product(list(range(a[2])),\n                                                   list(range(a[1])),\n                                                   list(range(a[0]))):\n                    lines.append(_print_fortran_float(self.data[data_type][i, j, k]))\n                    count += 1\n                    if count % 5 == 0:\n                        f.write(\" \" + \"\".join(lines) + \"\\n\")\n                        lines = []\n                    else:\n                        lines.append(\" \")\n                f.write(\" \" + \"\".join(lines) + \" \\n\")\n                f.write(\"\".join(self.data_aug.get(data_type, [])))\n\n            write_spin(\"total\")\n            if self.is_spin_polarized and self.is_soc:\n                write_spin(\"diff_x\")\n                write_spin(\"diff_y\")\n                write_spin(\"diff_z\")\n            elif self.is_spin_polarized:\n                write_spin(\"diff\")", "docstring": "Write the VolumetricData object to a vasp compatible file.\n\nArgs:\nfile_name (str): Path to a file\nvasp4_compatible (bool): True if the format is vasp4 compatible", "source": "juraj-google-style"}
{"code": "def get_image(width, height, want_grayscale, filepath):\n    with ops.Graph().as_default():\n        with session.Session():\n            file_data = io_ops.read_file(filepath)\n            channels = 1 if want_grayscale else 3\n            image_tensor = image_ops.decode_image(file_data, channels=channels).eval()\n            resized_tensor = image_ops.resize_images_v2(image_tensor, (height, width)).eval()\n    return resized_tensor", "docstring": "Returns an image loaded into an np.ndarray with dims [height, width, (3 or 1)].\n\nArgs:\nwidth: Width to rescale the image to.\nheight: Height to rescale the image to.\nwant_grayscale: Whether the result should be converted to grayscale.\nfilepath: Path of the image file..\n\nReturns:\nnp.ndarray of shape (height, width, channels) where channels is 1 if\nwant_grayscale is true, otherwise 3.", "source": "github-repos"}
{"code": "def plot_power_factor_mu(self, temp=600, output='eig', relaxation_time=1e-14, xlim=None):\n    import matplotlib.pyplot as plt\n    plt.figure(figsize=(9, 7))\n    pf = self._bz.get_power_factor(relaxation_time=relaxation_time, output=output, doping_levels=False)[temp]\n    plt.semilogy(self._bz.mu_steps, pf, linewidth=3.0)\n    self._plot_bg_limits()\n    self._plot_doping(temp)\n    if (output == 'eig'):\n        plt.legend(['PF$_1$', 'PF$_2$', 'PF$_3$'])\n    if (xlim is None):\n        plt.xlim((- 0.5), (self._bz.gap + 0.5))\n    else:\n        plt.xlim(xlim)\n    plt.ylabel('Power factor, ($\\\\mu$W/(mK$^2$))', fontsize=30.0)\n    plt.xlabel('E-E$_f$ (eV)', fontsize=30.0)\n    plt.xticks(fontsize=25)\n    plt.yticks(fontsize=25)\n    plt.tight_layout()\n    return plt", "docstring": "Plot the power factor in function of Fermi level. Semi-log plot\n\nArgs:\ntemp: the temperature\nxlim: a list of min and max fermi energy by default (0, and band\ngap)\ntau: A relaxation time in s. By default none and the plot is by\nunits of relaxation time\n\nReturns:\na matplotlib object", "source": "codesearchnet"}
{"code": "def join_room(self, room_id_or_alias):\n        \n        if not room_id_or_alias:\n            raise MatrixError(\"No alias or room ID to join.\")\n\n        path = \"/join/%s\" % quote(room_id_or_alias)\n\n        return self._send(\"POST\", path)", "docstring": "Performs /join/$room_id\n\nArgs:\nroom_id_or_alias (str): The room ID or room alias to join.", "source": "juraj-google-style"}
{"code": "def get_frequency_shift(\n            self,\n            grid_points,\n            temperatures=np.arange(0, 1001, 10, dtype='double'),\n            epsilons=None,\n            output_filename=None):\n        \n\n        if self._interaction is None:\n            self.set_phph_interaction()\n        if epsilons is None:\n            _epsilons = [0.1]\n        else:\n            _epsilons = epsilons\n        self._grid_points = grid_points\n        get_frequency_shift(self._interaction,\n                            self._grid_points,\n                            self._band_indices,\n                            _epsilons,\n                            temperatures,\n                            output_filename=output_filename,\n                            log_level=self._log_level)", "docstring": "Frequency shift from lowest order diagram is calculated.\n\nArgs:\nepslins(list of float):\nThe value to avoid divergence. When multiple values are given\nfrequency shifts for those values are returned.", "source": "juraj-google-style"}
{"code": "def _should_merge(self, pytd_type, union):\n    names = self._CONTAINER_NAMES[pytd_type]\n    length = None\n    for t in union.type_list:\n        if isinstance(t, pytd_type):\n            if length is None:\n                length = len(t.parameters)\n            elif length != len(t.parameters):\n                return True\n        elif isinstance(t, pytd.GenericType) and t.name in names:\n            return True\n    return False", "docstring": "Determine whether pytd_type values in the union should be merged.\n\nIf the union contains the homogeneous flavor of pytd_type (e.g.,\nGenericType(base_type=tuple) when pytd_type is TupleType), or pytd_type\nvalues of different lengths, we want to turn all of the pytd_type values\ninto homogeneous ones so that they can be merged into a single container.\n\nArgs:\npytd_type: The pytd type, either TupleType or CallableType.\nunion: a pytd.UnionType\n\nReturns:\nTrue if the pytd_type values should be merged, False otherwise.", "source": "github-repos"}
{"code": "def get_appliances(self, location_id):\n    \n    url = \"https:\n\n    headers = self.__gen_headers()\n    headers[\"Content-Type\"] = \"application/json\"\n\n    params = {\n      \"locationId\": location_id,\n    }\n    url = self.__append_url_params(url, params)\n\n    r = requests.get(url, headers=headers)\n    return r.json()", "docstring": "Get the appliances added for a specified location.\n\nArgs:\nlocation_id (string): identifiying string of appliance\n\nReturns:\nlist: dictionary objects containing appliances data", "source": "juraj-google-style"}
{"code": "def get_hash(path, hash_alg=\"sha256\"):\n    \n    h = hashlib.new(hash_alg)\n    with open(path, \"rb\") as f:\n        for chunk in iter(functools.partial(f.read, 4096), b''):\n            h.update(chunk)\n    return h.hexdigest()", "docstring": "Get the hash of the file at ``path``.\n\nI'd love to make this async, but evidently file i/o is always ready\n\nArgs:\npath (str): the path to the file to hash.\nhash_alg (str, optional): the algorithm to use.  Defaults to 'sha256'.\n\nReturns:\nstr: the hexdigest of the hash.", "source": "juraj-google-style"}
{"code": "def GetFileEntryByPathSpec(self, path_spec):\n    \n    location = getattr(path_spec, 'location', None)\n\n    if (location is None or\n        not location.startswith(self.LOCATION_ROOT)):\n      return None\n\n    if len(location) == 1:\n      return cpio_file_entry.CPIOFileEntry(\n          self._resolver_context, self, path_spec, is_root=True,\n          is_virtual=True)\n\n    cpio_archive_file_entry = self._cpio_archive_file.GetFileEntryByPath(\n        location[1:])\n    if cpio_archive_file_entry is None:\n      return None\n\n    return cpio_file_entry.CPIOFileEntry(\n        self._resolver_context, self, path_spec,\n        cpio_archive_file_entry=cpio_archive_file_entry)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\nCPIOFileEntry: a file entry or None if not available.", "source": "juraj-google-style"}
{"code": "def _set_mode(self, discover_mode, connect_mode):\n        \n\n        payload = struct.pack(\"<BB\", discover_mode, connect_mode)\n        response = self._send_command(6, 1, payload)\n\n        result, = unpack(\"<H\", response.payload)\n        if result != 0:\n            return False, {'reason': 'Error code from BLED112 setting mode', 'code': result}\n\n        return True, None", "docstring": "Set the mode of the BLED112, used to enable and disable advertising\n\nTo enable advertising, use 4, 2.\nTo disable advertising use 0, 0.\n\nArgs:\ndiscover_mode (int): The discoverability mode, 0 for off, 4 for on (user data)\nconnect_mode (int): The connectability mode, 0 for of, 2 for undirected connectable", "source": "juraj-google-style"}
{"code": "def __init__(self, structure, element):\n        \n        self.structure = structure\n        self.element = element\n        interstitial_finder = StructureMotifInterstitial(self.structure, self.element)\n\n        self.unique_defect_seq = []\n        \n        \n        pdc = PointDefectComparator()\n\n        for poss_site in interstitial_finder.enumerate_defectsites():\n            now_defect = Interstitial( self.structure, poss_site)\n            append_defect = True\n            for unique_defect in self.unique_defect_seq:\n                if pdc.are_equal( now_defect, unique_defect):\n                    append_defect = False\n            if append_defect:\n                self.unique_defect_seq.append( now_defect)\n\n        self.count_def = 0", "docstring": "Initializes an Interstitial generator using structure motifs\nArgs:\nstructure (Structure): pymatgen structure object\nelement (str or Element or Specie): element for the interstitial", "source": "juraj-google-style"}
{"code": "def _namespace_to_ord(namespace):\n    n = 0\n    for (i, c) in enumerate(namespace):\n        n += ((_LEX_DISTANCE[((MAX_NAMESPACE_LENGTH - i) - 1)] * NAMESPACE_CHARACTERS.index(c)) + 1)\n    return n", "docstring": "Converts a namespace string into an int representing its lexographic order.\n\n>>> _namespace_to_ord('')\n''\n>>> _namespace_to_ord('_')\n1\n>>> _namespace_to_ord('__')\n2\n\nArgs:\nnamespace: A namespace string.\n\nReturns:\nAn int representing the lexographical order of the given namespace string.", "source": "codesearchnet"}
{"code": "def smart_case(pred_fn_pairs, default=None, exclusive=False, name='smart_case'):\n    return control_flow_case._case_helper(smart_cond, pred_fn_pairs, default, exclusive, name, allow_python_preds=True)", "docstring": "Like tf.case, except attempts to statically evaluate predicates.\n\nIf any predicate in `pred_fn_pairs` is a bool or has a constant value, the\nassociated callable will be called or omitted depending on its value.\nOtherwise this functions like tf.case.\n\nArgs:\npred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a\ncallable which returns a list of tensors.\ndefault: Optional callable that returns a list of tensors.\nexclusive: True iff at most one predicate is allowed to evaluate to `True`.\nname: A name for this operation (optional).\n\nReturns:\nThe tensors returned by the first pair whose predicate evaluated to True, or\nthose returned by `default` if none does.\n\nRaises:\nTypeError: If `pred_fn_pairs` is not a list/dictionary.\nTypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.\nTypeError: If `fns[i]` is not callable for any i, or `default` is not\ncallable.", "source": "github-repos"}
{"code": "def cursor_event(self, x, y, dx, dy):\n        \n        self.sys_camera.rot_state(x, y)", "docstring": "The standard mouse movement event method.\nCan be overriden to add new functionality.\nBy default this feeds the system camera with new values.\n\nArgs:\nx: The current mouse x position\ny: The current mouse y position\ndx: Delta x postion (x position difference from the previous event)\ndy: Delta y postion (y position difference from the previous event)", "source": "juraj-google-style"}
{"code": "def get_sid(principal):\n    \n    \n    if principal is None:\n        principal = 'NULL SID'\n\n    \n    try:\n        sid = salt.utils.win_functions.get_sid_from_name(principal)\n    except CommandExecutionError:\n        sid = principal\n\n    \n    try:\n        sid = win32security.ConvertStringSidToSid(sid)\n    except pywintypes.error:\n        log.exception('Invalid user/group or sid: %s', principal)\n        raise CommandExecutionError(\n            'Invalid user/group or sid: {0}'.format(principal))\n    except TypeError:\n        raise CommandExecutionError\n\n    return sid", "docstring": "Converts a username to a sid, or verifies a sid. Required for working with\nthe DACL.\n\nArgs:\n\nprincipal(str):\nThe principal to lookup the sid. Can be a sid or a username.\n\nReturns:\nPySID Object: A sid\n\nUsage:\n\n.. code-block:: python\n\n# Get a user's sid\nsalt.utils.win_dacl.get_sid('jsnuffy')\n\n# Verify that the sid is valid\nsalt.utils.win_dacl.get_sid('S-1-5-32-544')", "source": "juraj-google-style"}
{"code": "def list_physical_devices(self, device_type=None):\n    self._initialize_physical_devices()\n    if device_type is None:\n        return list(self._physical_devices)\n    return [d for d in self._physical_devices if d.device_type == device_type]", "docstring": "List local devices visible to the system.\n\nThis API allows a client to query the devices before they have been\ninitialized by the eager runtime. Additionally a user can filter by device\ntype, to get only CPUs or GPUs.\n\nArgs:\ndevice_type: Optional device type to limit results to\n\nReturns:\nList of PhysicalDevice objects.", "source": "github-repos"}
{"code": "def get_year_description(self):\n\n    def format_year(s):\n        regex = re.compile('^\\\\d+$')\n        if regex.match(s):\n            year_int = int(s)\n            if (year_int < 1900):\n                return year_int\n            return datetime.date(year_int, 1, 1).strftime('%Y')\n        else:\n            return s\n    return self.get_segment_description(self._expression_parts[6], '', (lambda s: format_year(s)), (lambda s: _(', every {0} years').format(s)), (lambda s: _(', {0} through {1}')), (lambda s: _(', only in {0}')))", "docstring": "Generates a description for only the YEAR portion of the expression\n\nReturns:\nThe YEAR description", "source": "codesearchnet"}
{"code": "def read(path, encoding=\"utf-8\"):\n    \n    try:\n        with io.open(path, encoding=encoding) as f:\n            return f.read()\n    except Exception as e:\n        logger.error(\"read: %s failed. Error: %s\", path, e)\n        return \"\"", "docstring": "Read the content of the file.\n\nArgs:\npath (str): Path to the file\nencoding (str): File encoding. Default: utf-8\n\nReturns:\nstr: File content or empty string if there was an error", "source": "juraj-google-style"}
{"code": "def _audio_response_for_run(self, tensor_events, run, tag, sample):\n    response = []\n    index = 0\n    filtered_events = self._filter_by_sample(tensor_events, sample)\n    content_type = self._get_mime_type(run, tag)\n    for (index, tensor_event) in enumerate(filtered_events):\n        data = tensor_util.make_ndarray(tensor_event.tensor_proto)\n        label = data[(sample, 1)]\n        response.append({'wall_time': tensor_event.wall_time, 'step': tensor_event.step, 'label': plugin_util.markdown_to_safe_html(label), 'contentType': content_type, 'query': self._query_for_individual_audio(run, tag, sample, index)})\n    return response", "docstring": "Builds a JSON-serializable object with information about audio.\n\nArgs:\ntensor_events: A list of image event_accumulator.TensorEvent objects.\nrun: The name of the run.\ntag: The name of the tag the audio entries all belong to.\nsample: The zero-indexed sample of the audio sample for which to\nretrieve information. For instance, setting `sample` to `2` will\nfetch information about only the third audio clip of each batch,\nand steps with fewer than three audio clips will be omitted from\nthe results.\n\nReturns:\nA list of dictionaries containing the wall time, step, URL, width, and\nheight for each audio entry.", "source": "codesearchnet"}
{"code": "def _cmd(self, command, uid=None):\n        \n        if not uid:\n            uid = self.uid\n        self._client_send(json.dumps({'cmd': command, 'uid': uid}))\n        return self._client_receive()", "docstring": "Send a command to the server.\n\nArgs:\ncommand: str, The name of the command to execute.\nuid: int, the uid of the session to send the command to.\n\nReturns:\nThe line that was written back.", "source": "juraj-google-style"}
{"code": "def compute_values(edge_compatibility, v):\n    all_edge_values = tf.matmul(tf.to_float(edge_compatibility), v)\n    output = tf.reduce_sum(all_edge_values, axis=1)\n    return output", "docstring": "Compute values. If edge compatibilities is just adjacency, we get ggnn.\n\nArgs:\nedge_compatibility: A tensor of shape [batch, num_transforms, length, depth]\nv: A tensor of shape [batch, num_transforms, length, depth]\n\nReturns:\noutput: A [batch, length, depth] tensor", "source": "codesearchnet"}
{"code": "def configs_for_writer(writer=None, ppp_config_dir=None):\n    \n    search_paths = (ppp_config_dir,) if ppp_config_dir else tuple()\n    if writer is not None:\n        if not isinstance(writer, (list, tuple)):\n            writer = [writer]\n        \n        config_files = [w if w.endswith('.yaml') else w + '.yaml' for w in writer]\n    else:\n        writer_configs = glob_config(os.path.join('writers', '*.yaml'),\n                                     *search_paths)\n        config_files = set(writer_configs)\n\n    for config_file in config_files:\n        config_basename = os.path.basename(config_file)\n        writer_configs = config_search_paths(\n            os.path.join(\"writers\", config_basename), *search_paths)\n\n        if not writer_configs:\n            LOG.warning(\"No writer configs found for '%s'\", writer)\n            continue\n\n        yield writer_configs", "docstring": "Generator of writer configuration files for one or more writers\n\nArgs:\nwriter (Optional[str]): Yield configs only for this writer\nppp_config_dir (Optional[str]): Additional configuration directory\nto search for writer configuration files.\n\nReturns: Generator of lists of configuration files", "source": "juraj-google-style"}
{"code": "def _override_helper(clazz_object, operator, func):\n    if operator not in Tensor.OVERLOADABLE_OPERATORS:\n        raise ValueError(f'Overriding {operator} is disallowed. Allowed operators are {Tensor.OVERLOADABLE_OPERATORS}.')\n    setattr(clazz_object, operator, func)", "docstring": "Overrides (string) operator on Tensors to call func.\n\nArgs:\nclazz_object: the class to override for; either Tensor or SparseTensor.\noperator: the string name of the operator to override.\nfunc: the function that replaces the overridden operator.\n\nRaises:\nValueError: If operator is not allowed to be overwritten.", "source": "github-repos"}
{"code": "def configs_for_reader(reader=None, ppp_config_dir=None):\n    \n    search_paths = (ppp_config_dir,) if ppp_config_dir else tuple()\n    if reader is not None:\n        if not isinstance(reader, (list, tuple)):\n            reader = [reader]\n        \n        new_readers = []\n        for reader_name in reader:\n            if reader_name.endswith('.yaml') or reader_name not in OLD_READER_NAMES:\n                new_readers.append(reader_name)\n                continue\n\n            new_name = OLD_READER_NAMES[reader_name]\n            \n            \n            raise ValueError(\"Reader name '{}' has been deprecated, use '{}' instead.\".format(reader_name, new_name))\n            \n\n        reader = new_readers\n        \n        config_files = [r if r.endswith('.yaml') else r + '.yaml' for r in reader]\n    else:\n        reader_configs = glob_config(os.path.join('readers', '*.yaml'),\n                                     *search_paths)\n        config_files = set(reader_configs)\n\n    for config_file in config_files:\n        config_basename = os.path.basename(config_file)\n        reader_configs = config_search_paths(\n            os.path.join(\"readers\", config_basename), *search_paths)\n\n        if not reader_configs:\n            \n            \n            raise ValueError(\"No reader(s) named: {}\".format(reader))\n\n        yield reader_configs", "docstring": "Generator of reader configuration files for one or more readers\n\nArgs:\nreader (Optional[str]): Yield configs only for this reader\nppp_config_dir (Optional[str]): Additional configuration directory\nto search for reader configuration files.\n\nReturns: Generator of lists of configuration files", "source": "juraj-google-style"}
{"code": "def AnalyzeFileObject(self, file_object):\n    \n    tsk_image_object = tsk_image.TSKFileSystemImage(file_object)\n\n    try:\n      pytsk3.Volume_Info(tsk_image_object)\n    except IOError:\n      return None\n\n    return self.type_indicator", "docstring": "Retrieves the format specification.\n\nArgs:\nfile_object (FileIO): file-like object.\n\nReturns:\nstr: type indicator if the file-like object contains a supported format\nor None otherwise.", "source": "juraj-google-style"}
{"code": "def restrict_bond_dict(self, bond_dict):\n        \n        return {j: bond_dict[j] & set(self.index) for j in self.index}", "docstring": "Restrict a bond dictionary to self.\n\nArgs:\nbond_dict (dict): Look into :meth:`~chemcoord.Cartesian.get_bonds`,\nto see examples for a bond_dict.\n\nReturns:\nbond dictionary", "source": "juraj-google-style"}
{"code": "def abs_path(rel_path):\n    return os.path.abspath(os.path.join(os.path.dirname(sys._getframe(1).f_code.co_filename), rel_path))", "docstring": "Convert a path that is relative to the module from which this function is called,\nto an absolute path.\n\nArgs:\nrel_path: str\nPath relative to the location of the module file from which this function is called.\n\nReturns:\nstr : Absolute path to the location specified by ``rel_path``.", "source": "codesearchnet"}
{"code": "def get_error_name(error):\n    \n    error_type = type(error)\n    if error_type.__module__ in ['__main__', 'builtins']:\n        return error_type.__name__\n    else:\n        return f'{error_type.__module__}.{error_type.__name__}'", "docstring": "Return canonical error name as string.\n\nFor builtin errors like ValueError or Exception, will return the bare\nname, like ValueError or Exception.\n\nFor all other exceptions, will return modulename.errorname, such as\narbpackage.mod.myerror\n\nArgs:\nerror: Exception object.\n\nReturns:\nstr. Canonical error name.", "source": "juraj-google-style"}
{"code": "def ReadFile(filename, logger=None):\n    try:\n        encoding = file_resources.FileEncoding(filename)\n        with codecs.open(filename, mode='r', encoding=encoding) as fd:\n            lines = fd.readlines()\n        line_ending = file_resources.LineEnding(lines)\n        source = '\\n'.join((line.rstrip('\\r\\n') for line in lines)) + '\\n'\n        return (source, line_ending, encoding)\n    except IOError as e:\n        if logger:\n            logger(e)\n        e.args = (e.args[0], (filename, e.args[1][1], e.args[1][2], e.args[1][3]))\n        raise\n    except UnicodeDecodeError as e:\n        if logger:\n            logger('Could not parse %s! Consider excluding this file with --exclude.', filename)\n            logger(e)\n        e.args = (e.args[0], (filename, e.args[1][1], e.args[1][2], e.args[1][3]))\n        raise", "docstring": "Read the contents of the file.\n\nAn optional logger can be specified to emit messages to your favorite logging\nstream. If specified, then no exception is raised. This is external so that it\ncan be used by third-party applications.\n\nArguments:\nfilename: (unicode) The name of the file.\nlogger: (function) A function or lambda that takes a string and emits it.\n\nReturns:\nThe contents of filename.\n\nRaises:\nIOError: raised if there was an error reading the file.", "source": "github-repos"}
{"code": "def gen_schedule(user, num_blocks=6, surrounding_blocks=None):\n    no_signup_today = None\n    schedule = []\n    if (surrounding_blocks is None):\n        surrounding_blocks = EighthBlock.objects.get_upcoming_blocks(num_blocks)\n    if (len(surrounding_blocks) == 0):\n        return (None, False)\n    signups = EighthSignup.objects.filter(user=user, scheduled_activity__block__in=surrounding_blocks).select_related('scheduled_activity', 'scheduled_activity__block', 'scheduled_activity__activity')\n    block_signup_map = {s.scheduled_activity.block.id: s.scheduled_activity for s in signups}\n    for b in surrounding_blocks:\n        current_sched_act = block_signup_map.get(b.id, None)\n        if current_sched_act:\n            current_signup = current_sched_act.title_with_flags\n            current_signup_cancelled = current_sched_act.cancelled\n            current_signup_sticky = current_sched_act.activity.sticky\n            rooms = current_sched_act.get_scheduled_rooms()\n        else:\n            current_signup = None\n            current_signup_cancelled = False\n            current_signup_sticky = False\n            rooms = None\n        flags = ('locked' if b.locked else 'open')\n        blk_today = b.is_today()\n        if (blk_today and (not current_signup)):\n            flags += ' warning'\n        if current_signup_cancelled:\n            flags += ' cancelled warning'\n        if current_signup_cancelled:\n            current_signup = current_signup.replace(' (Cancelled)', '')\n        info = {'id': b.id, 'block': b, 'block_letter': b.block_letter, 'current_signup': current_signup, 'current_signup_cancelled': current_signup_cancelled, 'current_signup_sticky': current_signup_sticky, 'locked': b.locked, 'date': b.date, 'flags': flags, 'is_today': blk_today, 'signup_time': b.signup_time, 'signup_time_future': b.signup_time_future, 'rooms': rooms}\n        schedule.append(info)\n        if (blk_today and (not current_signup)):\n            no_signup_today = True\n    return (schedule, no_signup_today)", "docstring": "Generate a list of information about a block and a student's current activity signup.\n\nReturns:\nschedule\nno_signup_today", "source": "codesearchnet"}
{"code": "def __init__(self, context):\n    \n    self._db_connection_provider = context.db_connection_provider\n    self._multiplexer = context.multiplexer", "docstring": "Instantiates HistogramsPlugin via TensorBoard core.\n\nArgs:\ncontext: A base_plugin.TBContext instance.", "source": "juraj-google-style"}
{"code": "def dbr(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `dbr`'.format(value))\n    self._dbr = value", "docstring": "Corresponds to IDD Field `dbr` Daily temperature range for hottest\nmonth.\n\n[defined as mean of the difference between daily maximum\nand daily minimum dry-bulb temperatures for hottest month]\n\nArgs:\nvalue (float): value for IDD Field `dbr`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def __init__(self, context, name, task_id):\n        \n        self.name = name\n        self.context = context\n        self.task_id = task_id", "docstring": "Initialize ChainOfTrust.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context\nname (str): the name of the task (e.g., signing)\ntask_id (str): the task_id of the task", "source": "juraj-google-style"}
{"code": "def GetStringTypeSummary(obj, available_space, line_length):\n    if len(obj) + len(TWO_DOUBLE_QUOTES) <= available_space:\n        content = obj\n    else:\n        additional_len_needed = len(TWO_DOUBLE_QUOTES) + len(formatting.ELLIPSIS)\n        if available_space < additional_len_needed:\n            available_space = line_length\n        content = formatting.EllipsisTruncate(obj, available_space - len(TWO_DOUBLE_QUOTES), line_length)\n    return formatting.DoubleQuote(content)", "docstring": "Returns a custom summary for string type objects.\n\nThis function constructs a summary for string type objects by double quoting\nthe string value. The double quoted string value will be potentially truncated\nwith ellipsis depending on whether it has enough space available to show the\nfull string value.\n\nArgs:\nobj: The object to generate summary for.\navailable_space: Number of character spaces available.\nline_length: The full width of the terminal, default is 80.\n\nReturns:\nA summary for the input object.", "source": "github-repos"}
{"code": "def CredibleInterval(self, percentage=90):\n    prob = ((1 - (percentage / 100.0)) / 2)\n    interval = (self.Value(prob), self.Value((1 - prob)))\n    return interval", "docstring": "Computes the central credible interval.\n\nIf percentage=90, computes the 90% CI.\n\nArgs:\npercentage: float between 0 and 100\n\nReturns:\nsequence of two floats, low and high", "source": "codesearchnet"}
{"code": "def add_triple(self, p, o, auto_refresh=True):\n    self.rdf.graph.add((self.uri, p, self._handle_object(o)))\n    self._handle_triple_refresh(auto_refresh)", "docstring": "add triple by providing p,o, assumes s = subject\n\nArgs:\np (rdflib.term.URIRef): predicate\no (): object\nauto_refresh (bool): whether or not to update object-like self.rdf.triples\n\nReturns:\nNone: adds triple to self.rdf.graph", "source": "codesearchnet"}
{"code": "def download_files_maybe_extract(urls, directory, check_files=[]):\n    check_files = [os.path.join(directory, f) for f in check_files]\n    if _check_download(*check_files):\n        return\n    for url in urls:\n        download_file_maybe_extract(url=url, directory=directory)\n    if (not _check_download(*check_files)):\n        raise ValueError('[DOWNLOAD FAILED] `*check_files` not found')", "docstring": "Download the files at ``urls`` to ``directory``. Extract to ``directory`` if tar or zip.\n\nArgs:\nurls (str): Url of files.\ndirectory (str): Directory to download to.\ncheck_files (list of str): Check if these files exist, ensuring the download succeeded.\nIf these files exist before the download, the download is skipped.\n\nRaises:\nValueError: Error if one of the ``check_files`` are not found following the download.", "source": "codesearchnet"}
{"code": "def group_by_size(input_tensors, bytes_per_pack):\n    if bytes_per_pack == 0:\n        return [input_tensors]\n    packs = []\n    last_pack_size = 0\n    for value in input_tensors:\n        num_elements = value.shape.num_elements()\n        if num_elements is None:\n            logging.warning('not packing values due to the unknown or inconsistent shape of %s', value)\n            return [input_tensors]\n        size = num_elements * value.dtype.size\n        if not packs or last_pack_size > bytes_per_pack:\n            packs.append([])\n            last_pack_size = 0\n        packs[-1].append(value)\n        last_pack_size += size\n    return packs", "docstring": "Groups `input_tensors` into chunks of `bytes_per_pack`.\n\nThe method preserves the original order of `input_tensors`. The grouping is\nbest effort, each pack could have more or less bytes than `bytes_per_pack`.\nIt only groups values with known shape.\n\nArgs:\ninput_tensors: a list of Tensor.\nbytes_per_pack: an integer.\n\nReturns:\nA list of packs of Tensor. All values are grouped into one pack if\n`bytes_per_pack` is zero or any of the value has unknown shape.", "source": "github-repos"}
{"code": "def convert_aspect_ratios_to_ids(aspect_ratios: List[List[Tuple[int, int]]], max_image_tiles: int) -> np.ndarray:\n    batch_size = len(aspect_ratios)\n    max_num_images = max([len(row) for row in aspect_ratios])\n    supported_aspect_ratios = get_all_supported_aspect_ratios(max_image_tiles)\n    aspect_ratios_ids = np.zeros((batch_size, max_num_images), dtype=np.int64)\n    for i, sample_aspect_ratios in enumerate(aspect_ratios):\n        for j, (num_tiles_h, num_tiles_w) in enumerate(sample_aspect_ratios):\n            aspect_ratios_ids[i, j] = supported_aspect_ratios.index((num_tiles_h, num_tiles_w)) + 1\n    return aspect_ratios_ids", "docstring": "Convert aspect ratio tuples to unique ids.\n\nFor batch padding we use 0, because there might be different number of images in each batch.\nThe aspect ratio ids start from 1, with 1 corresponding to the first supported aspect ratio.\n\nArgs:\naspect_ratios (`List[List[Tuple[int, int]]]`):\nA list of aspect ratios for each image in the batch.\nmax_image_tiles (`int`):\nThe maximum number of tiles any image can be split into.\n\nReturns:\n`np.ndarray`:\nThe aspect ratios ids as a numpy array with shape (batch_size, max_num_images).\nEach id corresponds to the index of the aspect ratio in the list of supported aspect ratios,\noffset by 1 (so 0 can be used for padding).", "source": "github-repos"}
{"code": "def get_lib():\n    import tensorflow as tf\n    return _os_path.join(_os_path.dirname(tf.__file__))", "docstring": "Get the directory containing the TensorFlow framework library.\n\nReturns:\nThe directory as string.", "source": "github-repos"}
{"code": "def List(self, device_path):\n        \n        connection = self.protocol_handler.Open(self._handle, destination=b'sync:')\n        listing = self.filesync_handler.List(connection, device_path)\n        connection.Close()\n        return listing", "docstring": "Return a directory listing of the given path.\n\nArgs:\ndevice_path: Directory to list.", "source": "juraj-google-style"}
{"code": "def forward(self, raw_audio: torch.FloatTensor) -> Tuple[torch.Tensor, torch.Tensor]:\n    input_audio = raw_audio.permute(0, 2, 1).float()\n    latent_states = []\n    for level in range(self.levels):\n        encoder = self.encoders[level]\n        latent_state = encoder(input_audio)\n        latent_states.append(latent_state[-1])\n    _, music_tokens, commit_losses, _ = self.bottleneck(latent_states)\n    dequantised_states = []\n    for level in range(self.levels):\n        decoder = self.decoders[level]\n        dequantised_state = decoder(music_tokens[level:level + 1], all_levels=False)\n        dequantised_states.append(dequantised_state.permute(0, 2, 1))\n    commit_loss = sum(commit_losses)\n    loss = self.commit * commit_loss\n    return (dequantised_states, loss)", "docstring": "Forward pass of the VQ-VAE, encodes the `raw_audio` to latent states, which are then decoded for each level.\nThe commit loss, which ensure that the encoder's computed embeddings are close to the codebook vectors, is\ncomputed.\n\nArgs:\nraw_audio (`torch.FloatTensor`):\nAudio input which will be encoded and decoded.\n\nReturns:\n`Tuple[torch.Tensor, torch.Tensor]`\n\n\nExample:\n```python\n>>> from transformers import JukeboxVQVAE, set_seed\n>>> import torch\n\n>>> model = JukeboxVQVAE.from_pretrained(\"openai/jukebox-1b-lyrics\").eval()\n>>> set_seed(0)\n>>> zs = [torch.randint(100, (4, 1))]\n>>> model.decode(zs).shape\ntorch.Size([4, 8, 1])\n```", "source": "github-repos"}
{"code": "def make_multi_lagger(lags, groupby_kwargs=None):\n    \n    laggers = [SingleLagger(l, groupby_kwargs=groupby_kwargs) for l in lags]\n    feature_union = FeatureUnion([\n        (repr(lagger), lagger) for lagger in laggers\n    ])\n    return feature_union", "docstring": "Return a union of transformers that apply different lags\n\nArgs:\nlags (Collection[int]): collection of lags to apply\ngroupby_kwargs (dict): keyword arguments to pd.DataFrame.groupby", "source": "juraj-google-style"}
{"code": "def profile_df(df):\n    return IPython.core.display.HTML(pandas_profiling.ProfileReport(df).html.replace('bootstrap', 'nonexistent'))", "docstring": "Generate a profile of data in a dataframe.\n\nArgs:\ndf: the Pandas dataframe.", "source": "codesearchnet"}
{"code": "def GenerateLibSig(short_name):\n    with _UTILITY_LOCK:\n        utilities_used = ', '.join([utility for utility in sorted(_utility_registry)])\n        _utility_registry.Clear()\n    if utilities_used:\n        return (' (%s, %s, %s, %s)' % (short_name, _COMMON_LIB_SIG, _PYTHON_VERSION, utilities_used))\n    else:\n        return (' (%s, %s, %s)' % (short_name, _COMMON_LIB_SIG, _PYTHON_VERSION))", "docstring": "Generates a library signature suitable for a user agent field.\n\nArgs:\nshort_name: The short, product-specific string name for the library.\nReturns:\nA library signature string to append to user-supplied user-agent value.", "source": "codesearchnet"}
{"code": "def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n    known_args, pipeline_args = parse_known_args(argv)\n    pipeline_options = PipelineOptions(pipeline_args)\n    pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n    model_handler = VertexAIModelHandlerJSON(endpoint_id=known_args.endpoint, project=known_args.project, location=known_args.location, experiment=known_args.experiment, network=known_args.vpc_network, private=known_args.private)\n    pipeline = test_pipeline\n    if not test_pipeline:\n        pipeline = beam.Pipeline(options=pipeline_options)\n    read_glob = pipeline | 'Get glob' >> beam.Create([known_args.input])\n    read_image_name = read_glob | 'Get Image Paths' >> fileio.MatchAll()\n    load_image = read_image_name | 'Read Image' >> beam.Map(lambda image_name: read_image(image_name.path))\n    preprocess = load_image | 'Preprocess Image' >> beam.MapTuple(lambda img_name, img: (img_name, preprocess_image(img)))\n    predictions = preprocess | 'RunInference' >> RunInference(KeyedModelHandler(model_handler))\n    process_output = predictions | 'Process Predictions' >> beam.ParDo(PostProcessor())\n    _ = process_output | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)\n    result = pipeline.run()\n    result.wait_until_finish()\n    return result", "docstring": "Args:\nargv: Command line arguments defined for this example.\nsave_main_session: Used for internal testing.\ntest_pipeline: Used for internal testing.", "source": "github-repos"}
{"code": "def _generate_response(self, response: dict, request: dict) -> dict:\n        \n        response_template = deepcopy(self.response_template)\n        response_template['sessionAttributes']['sessionId'] = request['session']['sessionId']\n\n        for key, value in response_template.items():\n            if key not in response.keys():\n                response[key] = value\n\n        return response", "docstring": "Populates generated response with additional data conforming Alexa response specification.\n\nArgs:\nresponse: Raw user input extracted from Alexa request.\nrequest: Alexa request.\nReturns:\nresponse: Response conforming Alexa response specification.", "source": "juraj-google-style"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    question_suffix = [self.question_token_id] + [self.convert_tokens_to_ids('.')]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    if self.padding_side == 'right':\n        return len(cls + token_ids_0 + question_suffix + sep) * [0] + len(token_ids_1 + sep) * [1]\n    else:\n        return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + question_suffix + sep) * [1]", "docstring": "Create the token type IDs corresponding to the sequences passed. [What are token type\nIDs?](../glossary#token-type-ids)\n\nShould be overridden in a subclass if the model has a special way of building those.\n\nArgs:\ntoken_ids_0 (`List[int]`): The first tokenized sequence.\ntoken_ids_1 (`List[int]`, *optional*): The second tokenized sequence.\n\nReturns:\n`List[int]`: The token type ids.", "source": "github-repos"}
{"code": "def read_folder(directory):\n    \n    res = []\n    for filename in os.listdir(directory):\n        with io.open(os.path.join(directory, filename), encoding=\"utf-8\") as f:\n            content = f.read()\n            res.append(content)\n    return res", "docstring": "read text files in directory and returns them as array\n\nArgs:\ndirectory: where the text files are\n\nReturns:\nArray of text", "source": "juraj-google-style"}
{"code": "class VptqConfig(QuantizationConfigMixin):\n\n    def __init__(self, enable_proxy_error: bool=False, config_for_layers: Dict[str, Any]={}, shared_layer_config: Dict[str, Any]={}, modules_to_not_convert: Optional[List]=None, **kwargs):\n        self.quant_method = QuantizationMethod.VPTQ\n        self.enable_proxy_error = enable_proxy_error\n        self.config_for_layers: Dict[str, Any] = config_for_layers\n        self.shared_layer_config: Dict[str, Any] = shared_layer_config\n        self.modules_to_not_convert = modules_to_not_convert\n        self.post_init()\n\n    def post_init(self):\n        \n        for layer_name, layer_param in self.config_for_layers.items():\n            VptqLayerConfig(**layer_param)\n        if self.enable_proxy_error is True:\n            raise ValueError('enable_proxy_error should always be False until we support training')", "docstring": "This is a wrapper class about `vptq` parameters.\n\nArgs:\nenable_proxy_error (`bool`, *optional*, defaults to `False`): calculate proxy error for each layer\nconfig_for_layers (`Dict`, *optional*, defaults to `{}`): quantization params for each layer\nshared_layer_config (`Dict`, *optional*, defaults to `{}`): shared quantization params among layers\nmodules_to_not_convert (`list`, *optional*, default to `None`):\nThe list of modules to not quantize, useful for quantizing models that explicitly require to have\nsome modules left in their original precision (e.g. Whisper encoder, Llava encoder, Mixtral gate layers).\nkwargs (`Dict[str, Any]`, *optional*):\nAdditional parameters from which to initialize the configuration object.", "source": "github-repos"}
{"code": "def set_Tc(self, Tc, T=None):\n    if isinstance(Tc, Iterable):\n        if (len(Tc) == len(T)):\n            x = np.concatenate(([(- ttconf.BIG_NUMBER)], T, [ttconf.BIG_NUMBER]))\n            y = np.concatenate(([Tc[0]], Tc, [Tc[(- 1)]]))\n            self.Tc = interp1d(x, y)\n        else:\n            self.logger('need Tc values and Timepoints of equal length', 2, warn=True)\n            self.Tc = interp1d([(- ttconf.BIG_NUMBER), ttconf.BIG_NUMBER], [1e-05, 1e-05])\n    else:\n        self.Tc = interp1d([(- ttconf.BIG_NUMBER), ttconf.BIG_NUMBER], [(Tc + ttconf.TINY_NUMBER), (Tc + ttconf.TINY_NUMBER)])\n    self.calc_integral_merger_rate()", "docstring": "initialize the merger model with a coalescent time\n\nArgs:\n- Tc:   a float or an iterable, if iterable another argument T of same shape is required\n- T:    an array like of same shape as Tc that specifies the time pivots corresponding to Tc\nReturns:\n- None", "source": "codesearchnet"}
{"code": "def post_transform(self, args):\n        \n        \n        args = args[1:] if args and args[0] == 'az' else args\n\n        post_transform_commands = []\n        for i, arg in enumerate(args):\n            \n            if is_alias_command(['create'], args) and i > 0 and args[i - 1] in ['-c', '--command']:\n                post_transform_commands.append(arg)\n            else:\n                post_transform_commands.append(os.path.expandvars(arg))\n\n        AliasManager.write_alias_config_hash(self.alias_config_hash)\n        AliasManager.write_collided_alias(self.collided_alias)\n\n        return post_transform_commands", "docstring": "Inject environment variables, and write hash to alias hash file after transforming alias to commands.\n\nArgs:\nargs: A list of args to post-transform.", "source": "juraj-google-style"}
{"code": "def readinto(self, b):\n        \n        if not self._readable:\n            raise UnsupportedOperation('read')\n\n        with self._seek_lock:\n            \n            seek = self._seek\n\n            \n            queue = self._read_queue\n            if seek == 0:\n                \n                self._preload_range()\n\n            \n            size = len(b)\n            if size:\n                \n                \n                b_view = memoryview(b)\n                size_left = size\n            else:\n                \n                \n                b_view = b\n                size_left = -1\n            b_end = 0\n\n            \n            buffer_size = self._buffer_size\n            while size_left > 0 or size_left == -1:\n\n                \n                start = seek % buffer_size\n                queue_index = seek - start\n\n                \n                try:\n                    buffer = queue[queue_index]\n                except KeyError:\n                    \n                    break\n\n                \n                with handle_os_exceptions():\n                    try:\n                        queue[queue_index] = buffer = buffer.result()\n\n                    \n                    except AttributeError:\n                        pass\n                buffer_view = memoryview(buffer)\n                data_size = len(buffer)\n\n                \n                if not data_size:\n                    break\n\n                \n                if size_left != -1:\n                    end = start + size_left\n                else:\n                    end = data_size - start\n\n                \n                if end >= data_size:\n                    \n                    end = data_size\n\n                    \n                    del queue[queue_index]\n\n                    \n                    index = queue_index + buffer_size * self._max_buffers\n                    if index < self._size:\n                        queue[index] = self._workers.submit(\n                            self._read_range, index, index + buffer_size)\n\n                \n                read_size = end - start\n                if size_left != -1:\n                    size_left -= read_size\n                seek += read_size\n\n                \n                b_start = b_end\n                b_end = b_start + read_size\n\n                \n                b_view[b_start:b_end] = buffer_view[start:end]\n\n            \n            self._seek = seek\n            self._raw.seek(seek)\n\n        \n        return b_end", "docstring": "Read bytes into a pre-allocated, writable bytes-like object b,\nand return the number of bytes read.\n\nArgs:\nb (bytes-like object): buffer.\n\nReturns:\nint: number of bytes read", "source": "juraj-google-style"}
{"code": "def _get_jwt_for_audience(self, audience):\n    (token, expiry) = self._cache.get(audience, (None, None))\n    if ((token is None) or (expiry < _helpers.utcnow())):\n        (token, expiry) = self._make_jwt_for_audience(audience)\n        self._cache[audience] = (token, expiry)\n    return token", "docstring": "Get a JWT For a given audience.\n\nIf there is already an existing, non-expired token in the cache for\nthe audience, that token is used. Otherwise, a new token will be\ncreated.\n\nArgs:\naudience (str): The intended audience.\n\nReturns:\nbytes: The encoded JWT.", "source": "codesearchnet"}
{"code": "def transform_array_to_list(array):\n    if ((array.dtype.kind in ('u', 'i', 'f')) and (~ np.isfinite(array)).any()):\n        transformed = array.astype('object')\n        transformed[np.isnan(array)] = 'NaN'\n        transformed[np.isposinf(array)] = 'Infinity'\n        transformed[np.isneginf(array)] = '-Infinity'\n        return transformed.tolist()\n    elif ((array.dtype.kind == 'O') and pd and pd.isnull(array).any()):\n        transformed = array.astype('object')\n        transformed[pd.isnull(array)] = 'NaN'\n        return transformed.tolist()\n    return array.tolist()", "docstring": "Transforms a NumPy array into a list of values\n\nArgs:\narray (np.nadarray) : the NumPy array series to transform\n\nReturns:\nlist or dict", "source": "codesearchnet"}
{"code": "def interp_color(a, b, f):\n    a_ = (a.redF(), a.greenF(), a.blueF())\n    b_ = (b.redF(), b.greenF(), b.blueF())\n    a_ = [(x * (1 - f)) for x in a_]\n    b_ = [(x * f) for x in b_]\n    c = [(x + y) for (x, y) in zip(a_, b_)]\n    return QtGui.QColor.fromRgbF(*c)", "docstring": "Interpolate between two colors.\n\nReturns:\n`QColor` object.", "source": "codesearchnet"}
{"code": "def return_handler(self, call_node, function_nodes, saved_function_call_index, first_node):\n    if any((isinstance(node, YieldNode) for node in function_nodes)):\n        rhs_prefix = 'yld_'\n    elif any((isinstance(node, ConnectToExitNode) for node in function_nodes)):\n        rhs_prefix = 'ret_'\n    else:\n        return\n    LHS = ((CALL_IDENTIFIER + 'call_') + str(saved_function_call_index))\n    RHS = (rhs_prefix + get_call_names_as_string(call_node.func))\n    return_node = RestoreNode(((LHS + ' = ') + RHS), LHS, [RHS], line_number=call_node.lineno, path=self.filenames[(- 1)])\n    return_node.first_node = first_node\n    self.nodes[(- 1)].connect(return_node)\n    self.nodes.append(return_node)", "docstring": "Handle the return from a function during a function call.\n\nArgs:\ncall_node(ast.Call) : The node that calls the definition.\nfunction_nodes(list[Node]): List of nodes of the function being called.\nsaved_function_call_index(int): Unique number for each call.\nfirst_node(EntryOrExitNode or RestoreNode): Used to connect previous statements to this function.", "source": "codesearchnet"}
{"code": "def add(self, files):\n    if (files.__class__.__name__ == 'str'):\n        self._files.append(files)\n    else:\n        self._files.extend(files)", "docstring": "Adds files to check.\n\nArgs:\nfiles: List of files to check.", "source": "codesearchnet"}
{"code": "def FromDictionary(cls, msg_dict):\n    level = msg_dict.get('level')\n    msg = msg_dict.get('message')\n    now = msg_dict.get('now_time')\n    created = msg_dict.get('created_time')\n    count = msg_dict.get('count', 1)\n    msg_id = msg_dict.get('id', 0)\n    new_msg = ServiceMessage(level, msg, msg_id, created, now)\n    if (count > 1):\n        new_msg.count = count\n    return new_msg", "docstring": "Create from a dictionary with kv pairs.\n\nArgs:\nmsg_dict (dict): A dictionary with information as created by to_dict()\n\nReturns:\nServiceMessage: the converted message", "source": "codesearchnet"}
{"code": "def check(self):\n    if (not isinstance(self.parsed_yaml, dict)):\n        msg = 'In {0}:\\n'.format(self.sourcefile)\n        msg += 'Assistants and snippets must be Yaml mappings, not \"{0}\"!'.format(self.parsed_yaml)\n        raise exceptions.YamlTypeError(msg)\n    self._check_fullname(self.sourcefile)\n    self._check_description(self.sourcefile)\n    self._check_section_names(self.sourcefile)\n    self._check_project_type(self.sourcefile)\n    self._check_args(self.sourcefile)\n    self._check_files(self.sourcefile)\n    self._check_dependencies(self.sourcefile)\n    self._check_run(self.sourcefile)", "docstring": "Checks whether loaded yaml is well-formed according to syntax defined for\nversion 0.9.0 and later.\n\nRaises:\nYamlError: (containing a meaningful message) when the loaded Yaml\nis not well formed", "source": "codesearchnet"}
{"code": "def stack(xs, dim_name, axis=0, name=None):\n  \n  ret = StackOperation(xs, dim_name, axis, name).outputs[0]\n  return ret", "docstring": "Stack multiple Tensors to make a new dimension.\n\nArgs:\nxs: a list of Tensors with identical shapes.\ndim_name: a string (name of the new dimension)\naxis: an integer (index of the new dimension in the output shape)\nname: an optional string\n\nReturns:\na Tensor", "source": "juraj-google-style"}
{"code": "def _send(self, email_message):\n    pre_send.send(self.__class__, message=email_message)\n    if (not email_message.recipients()):\n        return False\n    from_email = sanitize_address(email_message.from_email, email_message.encoding)\n    recipients = [sanitize_address(addr, email_message.encoding) for addr in email_message.recipients()]\n    message = email_message.message().as_bytes(linesep='\\r\\n')\n    try:\n        result = self.conn.send_raw_email(Source=from_email, Destinations=recipients, RawMessage={'Data': message})\n        message_id = result['MessageId']\n        post_send.send(self.__class__, message=email_message, message_id=message_id)\n    except ClientError:\n        if (not self.fail_silently):\n            raise\n        return False\n    return True", "docstring": "Sends an individual message via the Amazon SES HTTP API.\n\nArgs:\nemail_message: A single Django EmailMessage object.\nReturns:\nTrue if the EmailMessage was sent successfully, otherwise False.\nRaises:\nClientError: An interaction with the Amazon SES HTTP API\nfailed.", "source": "codesearchnet"}
{"code": "def __init__(self, location, resource_pool):\n        \n        super(FileSystemPackageRepository, self).__init__(location, resource_pool)\n\n        global _settings\n        _settings = config.plugins.package_repository.filesystem\n\n        self.register_resource(FileSystemPackageFamilyResource)\n        self.register_resource(FileSystemPackageResource)\n        self.register_resource(FileSystemVariantResource)\n\n        self.register_resource(FileSystemCombinedPackageFamilyResource)\n        self.register_resource(FileSystemCombinedPackageResource)\n        self.register_resource(FileSystemCombinedVariantResource)\n\n        self.get_families = lru_cache(maxsize=None)(self._get_families)\n        self.get_family = lru_cache(maxsize=None)(self._get_family)\n        self.get_packages = lru_cache(maxsize=None)(self._get_packages)\n        self.get_variants = lru_cache(maxsize=None)(self._get_variants)\n        self.get_file = lru_cache(maxsize=None)(self._get_file)", "docstring": "Create a filesystem package repository.\n\nArgs:\nlocation (str): Path containing the package repository.", "source": "juraj-google-style"}
{"code": "def extraterrestrial_direct_normal_radiation(self, value=9999.0):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `extraterrestrial_direct_normal_radiation`'.format(value))\n            if value < 0.0:\n                raise ValueError(\n                    'value need to be greater or equal 0.0 '\n                    'for field `extraterrestrial_direct_normal_radiation`')\n\n        self._extraterrestrial_direct_normal_radiation = value", "docstring": "Corresponds to IDD Field `extraterrestrial_direct_normal_radiation`\n\nArgs:\nvalue (float): value for IDD Field `extraterrestrial_direct_normal_radiation`\nUnit: Wh/m2\nvalue >= 0.0\nMissing value: 9999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def add(self, information, timeout=(- 1)):\n    return self._client.create(information, timeout=timeout)", "docstring": "Adds a data center resource based upon the attributes specified.\n\nArgs:\ninformation: Data center information\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Added data center.", "source": "codesearchnet"}
{"code": "def SetLines(self, lines):\n    \n\n    (self._cli_lines, self._cli_cols) = TerminalSize()\n\n    if lines:\n      self._cli_lines = int(lines)", "docstring": "Set number of screen lines.\n\nArgs:\nlines: An int, number of lines. If None, use terminal dimensions.\n\nRaises:\nValueError, TypeError: Not a valid integer representation.", "source": "juraj-google-style"}
{"code": "def calc_padding(fmt, align):\n    remain = (struct.calcsize(fmt) % align)\n    if (remain == 0):\n        return ''\n    return ('x' * (align - remain))", "docstring": "Calculate how many padding bytes needed for ``fmt`` to be aligned to\n``align``.\n\nArgs:\nfmt (str): :mod:`struct` format.\nalign (int): alignment (2, 4, 8, etc.)\n\nReturns:\nstr: padding format (e.g., various number of 'x').\n\n>>> calc_padding('b', 2)\n'x'\n\n>>> calc_padding('b', 3)\n'xx'", "source": "codesearchnet"}
{"code": "def run(self, dag):\n    for node in dag.threeQ_or_more_gates():\n        rule = node.op.definition\n        if (not rule):\n            raise QiskitError(('Cannot unroll all 3q or more gates. No rule to expand instruction %s.' % node.op.name))\n        decomposition = DAGCircuit()\n        decomposition.add_qreg(rule[0][1][0][0])\n        for inst in rule:\n            decomposition.apply_operation_back(*inst)\n        decomposition = self.run(decomposition)\n        dag.substitute_node_with_dag(node, decomposition)\n    return dag", "docstring": "Expand 3+ qubit gates using their decomposition rules.\n\nArgs:\ndag(DAGCircuit): input dag\nReturns:\nDAGCircuit: output dag with maximum node degrees of 2\nRaises:\nQiskitError: if a 3q+ gate is not decomposable", "source": "codesearchnet"}
{"code": "def write(self, auth, resource, value, options={}, defer=False):\n        \n        return self._call('write', auth, [resource, value, options], defer)", "docstring": "Writes a single value to the resource specified.\n\nArgs:\nauth: cik for authentication.\nresource: resource to write to.\nvalue: value to write\noptions: options.", "source": "juraj-google-style"}
{"code": "class BatchFeature(BaseBatchFeature):", "docstring": "Holds the output of the image processor specific `__call__` methods.\n\nThis class is derived from a python dictionary and can be used as a dictionary.\n\nArgs:\ndata (`dict`):\nDictionary of lists/arrays/tensors returned by the __call__ method ('pixel_values', etc.).\ntensor_type (`Union[None, str, TensorType]`, *optional*):\nYou can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at\ninitialization.", "source": "github-repos"}
{"code": "def _force_edge_active(self, seqs: List[List[GridQubit]], edge: EDGE,\n                           sample_bool: Callable[[], bool]\n                           ) -> List[List[GridQubit]]:\n        \n\n        n0, n1 = edge\n\n        \n        seqs = list(seqs)\n\n        \n        i0, j0 = index_2d(seqs, n0)\n        i1, j1 = index_2d(seqs, n1)\n        s0 = seqs[i0]\n        s1 = seqs[i1]\n\n        \n        \n        \n        if i0 != i1:\n\n            \n            \n            part = [s0[:j0], s0[j0 + 1:]], [s1[:j1], s1[j1 + 1:]]\n\n            \n            del seqs[max(i0, i1)]\n            del seqs[min(i0, i1)]\n\n            \n            \n            c0 = 0 if not part[0][1] else 1 if not part[0][\n                0] else sample_bool()\n            if c0:\n                part[0][c0].reverse()\n\n            \n            \n            c1 = 0 if not part[1][1] else 1 if not part[1][\n                0] else sample_bool()\n            if not c1:\n                part[1][c1].reverse()\n\n            \n            seqs.append(part[0][c0] + [n0, n1] + part[1][c1])\n\n            \n            other = [1, 0]\n            seqs.append(part[0][other[c0]])\n            seqs.append(part[1][other[c1]])\n        else:\n            \n            if j0 > j1:\n                j0, j1 = j1, j0\n                n0, n1 = n1, n0\n\n            \n            \n            \n            head = s0[:j0]\n            inner = s0[j0 + 1:j1]\n            tail = s0[j1 + 1:]\n\n            \n            del seqs[i0]\n\n            \n            \n            if sample_bool():\n                \n                if sample_bool():\n                    seqs.append(inner + [n1, n0] + head[::-1])\n                    seqs.append(tail)\n                else:\n                    seqs.append(tail[::-1] + [n1, n0] + inner)\n                    seqs.append(head)\n            else:\n                \n                seqs.append(head + [n0, n1] + tail)\n                seqs.append(inner)\n\n        return [e for e in seqs if e]", "docstring": "Move which forces given edge to appear on some sequence.\n\nArgs:\nseqs: List of linear sequences covering chip.\nedge: Edge to be activated.\nsample_bool: Callable returning random bool.\n\nReturns:\nNew list of linear sequences with given edge on some of the\nsequences.", "source": "juraj-google-style"}
{"code": "def make_bubble_surface(dims=DEFAULT_DIMS, repeat=3):\n    gradients = make_gradients(dims)\n    return (np.sin((((gradients[0] - 0.5) * repeat) * np.pi)) * np.sin((((gradients[1] - 0.5) * repeat) * np.pi)))", "docstring": "Makes a surface from the product of sine functions on each axis.\n\nArgs:\ndims (pair): the dimensions of the surface to create\nrepeat (int): the frequency of the waves is set to ensure this many\nrepetitions of the function\n\nReturns:\nsurface: A surface.", "source": "codesearchnet"}
{"code": "def starts_with_prefix_in_list(text, prefixes):\n    for prefix in prefixes:\n        if text.startswith(prefix):\n            return True\n    return False", "docstring": "Return True if the given string starts with one of the prefixes in the given list, otherwise\nreturn False.\n\nArguments:\ntext (str): Text to check for prefixes.\nprefixes (list): List of prefixes to check for.\n\nReturns:\nbool: True if the given text starts with any of the given prefixes, otherwise False.", "source": "codesearchnet"}
{"code": "def wait_for_registration(self, processor_type):\n        \n        with self._condition:\n            self._condition.wait_for(lambda: (\n                processor_type in self\n                or self._cancelled_event.is_set()))\n            if self._cancelled_event.is_set():\n                raise WaitCancelledException()", "docstring": "Waits for a particular processor type to register or until\nis_cancelled is True. is_cancelled cannot be part of this class\nsince we aren't cancelling all waiting for a processor_type,\nbut just this particular wait.\n\nArgs:\nprocessor_type (ProcessorType): The family, and version of\nthe transaction processor.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def list_group_maintainers(self, name):\n        \n        self.project_service.set_auth(self._token_project)\n        return self.project_service.list_group_maintainers(name)", "docstring": "Get the maintainers of a group.\n\nArgs:\nname (string): Name of group to query.\n\nReturns:\n(list[string]): List of maintainer names.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def paginator(limit, offset, record_count, base_uri, page_nav_tpl='&limit={}&offset={}'):\n    \n\n    total_pages = int(math.ceil(record_count / limit))\n\n    next_cond = limit + offset <= record_count\n    prev_cond = offset >= limit\n\n    next_page = base_uri + page_nav_tpl.format(limit, offset + limit) if next_cond else None\n\n    prev_page = base_uri + page_nav_tpl.format(limit, offset - limit) if prev_cond else None\n\n    return OrderedDict([\n        ('total_count', record_count),\n        ('total_pages', total_pages),\n        ('next_page', next_page),\n        ('prev_page', prev_page)\n    ])", "docstring": "Compute pagination info for collection filtering.\n\nArgs:\nlimit (int): Collection filter limit.\noffset (int): Collection filter offset.\nrecord_count (int): Collection filter total record count.\nbase_uri (str): Collection filter base uri (without limit, offset)\npage_nav_tpl (str): Pagination template.\n\nReturns:\nA mapping of pagination info.", "source": "juraj-google-style"}
{"code": "def list_hierarchy(class_name, bases):\n    class_list = [Uri(class_name)]\n    for base in bases:\n        if (base.__name__ not in IGNORE_CLASSES):\n            class_list.append(Uri(base.__name__))\n    return list([i for i in set(class_list)])", "docstring": "Creates a list of the class hierarchy\n\nArgs:\n-----\nclass_name: name of the current class\nbases: list/tuple of bases for the current class", "source": "codesearchnet"}
{"code": "def annotate(self, gpl, annotation_column, gpl_on='ID', gsm_on='ID_REF', in_place=False):\n    if isinstance(gpl, GPL):\n        annotation_table = gpl.table\n    elif isinstance(gpl, DataFrame):\n        annotation_table = gpl\n    else:\n        raise TypeError('gpl should be a GPL object or a pandas.DataFrame')\n    annotated = self.table.merge(annotation_table[[gpl_on, annotation_column]], left_on=gsm_on, right_on=gpl_on)\n    del annotated[gpl_on]\n    if in_place:\n        self.table = annotated\n        return None\n    else:\n        return annotated", "docstring": "Annotate GSM with provided GPL\n\nArgs:\ngpl (:obj:`pandas.DataFrame`): A Platform or DataFrame to annotate with\nannotation_column (str`): Column in a table for annotation\ngpl_on (:obj:`str`): Use this column in GSM to merge. Defaults to \"ID\".\ngsm_on (:obj:`str`): Use this column in GPL to merge.\nDefaults to \"ID_REF\".\nin_place (:obj:`bool`): Substitute table in GSM by new annotated\ntable. Defaults to False.\n\nReturns:\n:obj:`pandas.DataFrame` or :obj:`None`: Annotated table or None\n\n\nRaises:\nTypeError: GPL should be GPL or pandas.DataFrame", "source": "codesearchnet"}
{"code": "def getall(self, key, default=[]):\n        \n        return self.data[key] if key in self.data else default", "docstring": "Return the list of all values for the specified key.\n\nArguments:\nkey (object): Key\ndefault (list): Default value to return if the key does not\nexist, defaults to ``[]``, i.e. an empty list.\n\nReturns:\nlist: List of all values for the specified key if the key\nexists, ``default`` otherwise.", "source": "juraj-google-style"}
{"code": "def __init__(self, vendor=None, body=b''):\n        \n        super().__init__()\n        self.vendor = vendor\n        self.body = body", "docstring": "Create instance attributes.\n\nArgs:\nvendor (int): 32-bit vendor ID.\nbody (bytes): Vendor-defined body", "source": "juraj-google-style"}
{"code": "def __init__(self, paginator, number, items):\n        \n        self.paginator = paginator\n        self.number = number\n        self.number0 = number - 1   \n        self.items = items\n        self.count = len(items)\n\n        \n        self.total_items = paginator.total_items\n        self.items_per_page = paginator.items_per_page\n        self.total_pages = paginator.total_pages\n        self.page_range = paginator.page_range\n        self.start_page = paginator.start_page\n        self.last_page = paginator.last_page", "docstring": "Constructor.\n\nArgs:\npaginator: The parent paginator object.\nnumber: The number of this page (starting from 1).\nitems: A list of items to belong to this page.", "source": "juraj-google-style"}
{"code": "def authorize(self, scheme, **params):\n    if (scheme not in self.schemes):\n        return False\n    for (field, value) in iteritems(params):\n        setattr(self, field, value)\n        if ((field in self.schemes[scheme][u'params'].keys()) and value):\n            self.schemes[scheme][u'params'][field] = value\n    return True", "docstring": "Store credentials required to satisfy a given auth scheme.\n\nArgs:\nscheme (str): The name of the Authentication scheme.\n**params: parameters for the specified scheme.\n\nReturns:\nTrue if parameters are set successfully (note that this doesn't mean\nthe credentials are valid)\nFalse if the scheme specified is not supported", "source": "codesearchnet"}
{"code": "def add_site(self, site):\n        \n        start_angle = 0\n        radius = 0\n        total_occu = 0\n\n        for specie, occu in site.species.items():\n            radius += occu * (specie.ionic_radius\n                              if isinstance(specie, Specie)\n                                 and specie.ionic_radius\n                              else specie.average_ionic_radius)\n            total_occu += occu\n\n        vis_radius = 0.2 + 0.002 * radius\n\n        for specie, occu in site.species.items():\n            if not specie:\n                color = (1, 1, 1)\n            elif specie.symbol in self.el_color_mapping:\n                color = [i / 255 for i in self.el_color_mapping[specie.symbol]]\n            mapper = self.add_partial_sphere(site.coords, vis_radius, color,\n                                             start_angle, start_angle + 360 * occu)\n            self.mapper_map[mapper] = [site]\n            start_angle += 360 * occu\n\n        if total_occu < 1:\n            mapper = self.add_partial_sphere(site.coords, vis_radius, (1,1,1),\n                                             start_angle, start_angle + 360 * (1 - total_occu))\n            self.mapper_map[mapper] = [site]", "docstring": "Add a site to the render window. The site is displayed as a sphere, the\ncolor of which is determined based on the element. Partially occupied\nsites are displayed as a single element color, though the site info\nstill shows the partial occupancy.\n\nArgs:\nsite: Site to add.", "source": "juraj-google-style"}
{"code": "def info_gen(self, code, message, compressed=False):\n    if ('COMPRESS=GZIP' in message):\n        return self.__info_gzip_gen()\n    if compressed:\n        return self.__info_yenczlib_gen()\n    return self.__info_plain_gen()", "docstring": "Dispatcher for the info generators.\n\nDetermines which __info_*_gen() should be used based on the supplied\nparameters.\n\nArgs:\ncode: The status code for the command response.\nmessage: The status message for the command reponse.\ncompressed: Force decompression. Useful for xz* commands.\n\nReturns:\nAn info generator.", "source": "codesearchnet"}
{"code": "def fsync(self, file_des):\n        \n        \n        if 0 <= file_des < NR_STD_STREAMS:\n            self.filesystem.raise_os_error(errno.EINVAL)\n        file_object = self.filesystem.get_open_file(file_des)\n        if self.filesystem.is_windows_fs:\n            if (not hasattr(file_object, 'allow_update') or\n                    not file_object.allow_update):\n                self.filesystem.raise_os_error(\n                    errno.EBADF, file_object.file_path)", "docstring": "Perform fsync for a fake file (in other words, do nothing).\n\nArgs:\nfile_des: The file descriptor of the open file.\n\nRaises:\nOSError: file_des is an invalid file descriptor.\nTypeError: file_des is not an integer.", "source": "juraj-google-style"}
{"code": "def __call__(self, user_lo_config):\n        \n        lo_config = {}\n\n        q_los = self.get_qubit_los(user_lo_config)\n        if q_los:\n            lo_config['qubit_lo_freq'] = q_los\n        m_los = self.get_meas_los(user_lo_config)\n        if m_los:\n            lo_config['meas_lo_freq'] = m_los\n\n        return self.qobj_model(**lo_config)", "docstring": "Return PulseQobjExperimentConfig\n\nArgs:\nuser_lo_config (LoConfig): A dictionary of LOs to format.\n\nReturns:\nPulseQobjExperimentConfig: qobj.", "source": "juraj-google-style"}
{"code": "def tersoff_potential(self, structure):\n    bv = BVAnalyzer()\n    el = [site.specie.symbol for site in structure]\n    valences = bv.get_valences(structure)\n    el_val_dict = dict(zip(el, valences))\n    gin = 'species \\n'\n    qerfstring = 'qerfc\\n'\n    for key in el_val_dict.keys():\n        if ((key != 'O') and ((el_val_dict[key] % 1) != 0)):\n            raise SystemError('Oxide has mixed valence on metal')\n        specie_string = (((key + ' core ') + str(el_val_dict[key])) + '\\n')\n        gin += specie_string\n        qerfstring += (((key + ' ') + key) + ' 0.6000 10.0000 \\n')\n    gin += '\n    met_oxi_ters = TersoffPotential().data\n    for key in el_val_dict.keys():\n        if (key != 'O'):\n            metal = (((key + '(') + str(int(el_val_dict[key]))) + ')')\n            ters_pot_str = met_oxi_ters[metal]\n            gin += ters_pot_str\n    gin += qerfstring\n    return gin", "docstring": "Generate the species, tersoff potential lines for an oxide structure\n\nArgs:\nstructure: pymatgen.core.structure.Structure", "source": "codesearchnet"}
{"code": "def __init__( self, initial_site ):\n        \n        Atom.atom_number += 1\n        self.number = Atom.atom_number\n        self._site = initial_site\n        \n        if self._site.occupation == 0:\n            self._site.occupation = self.number\n            self._site.is_occupied = True\n            self._site.atom = self\n        else:\n            raise ValueError( \"This site is already occupied by atom {}\".format( initial_site.occupation ) ) \n        self.reset()", "docstring": "Initialise an Atom instance.\n\nArgs:\ninitial_site (Site): Lattice site initially occupied by this Atom.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def ed25519_public_key_to_string(key):\n    return base64.b64encode(key.public_bytes(encoding=serialization.Encoding.Raw, format=serialization.PublicFormat.Raw), None).decode('utf-8')", "docstring": "Convert an ed25519 public key to a base64-encoded string.\n\nArgs:\nkey (Ed25519PublicKey): the key to write to the file.\n\nReturns:\nstr: the key representation as a str", "source": "codesearchnet"}
{"code": "def split(self, path):\n    return os.path.split(os.path.abspath(path))", "docstring": "Splits the given path into two parts.\n\nSplits the path into a pair (head, tail) such that tail contains the last\ncomponent of the path and head contains everything up to that.\n\nArgs:\npath: path as a string\nReturns:\na pair of path components as strings.", "source": "github-repos"}
{"code": "def raise_on_errors(errors, level=logging.CRITICAL):\n    if errors:\n        log.log(level, '\\n'.join(errors))\n        raise CoTError('\\n'.join(errors))", "docstring": "Raise a CoTError if errors.\n\nHelper function because I had this code block everywhere.\n\nArgs:\nerrors (list): the error errors\nlevel (int, optional): the log level to use.  Defaults to logging.CRITICAL\n\nRaises:\nCoTError: if errors is non-empty", "source": "codesearchnet"}
{"code": "def post_slack_message(message=None, channel=None, username=None, icon_emoji=None):\n    LOG.debug('Slack Channel: %s\\nSlack Message: %s', channel, message)\n    slack = slacker.Slacker(SLACK_TOKEN)\n    try:\n        slack.chat.post_message(channel=channel, text=message, username=username, icon_emoji=icon_emoji)\n        LOG.info('Message posted to %s', channel)\n    except slacker.Error:\n        LOG.info('error posted message to %s', channel)", "docstring": "Format the message and post to the appropriate slack channel.\n\nArgs:\nmessage (str): Message to post to slack\nchannel (str): Desired channel. Must start with #", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    filename = parser_mediator.GetFilename()\n    file_size = file_object.get_size()\n    if (file_size <= 0):\n        raise errors.UnableToParseFile('File size: {0:d} bytes is less equal 0.'.format(file_size))\n    if (file_size > 50000000):\n        raise errors.UnableToParseFile('File size: {0:d} bytes is larger than 50 MB.'.format(file_size))\n    top_level_object = self.GetTopLevel(file_object)\n    if (not top_level_object):\n        raise errors.UnableToParseFile('Unable to parse: {0:s} skipping.'.format(filename))\n    matching_plugin = None\n    for plugin in self._plugins:\n        try:\n            plugin.UpdateChainAndProcess(parser_mediator, plist_name=filename, top_level=top_level_object)\n            matching_plugin = plugin\n        except errors.WrongPlistPlugin as exception:\n            logger.debug('Wrong plugin: {0:s} for: {1:s}'.format(exception.args[0], exception.args[1]))\n    if ((not matching_plugin) and self._default_plugin):\n        self._default_plugin.UpdateChainAndProcess(parser_mediator, plist_name=filename, top_level=top_level_object)", "docstring": "Parses a plist file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def pnl_upsert(self, asset_manager_id, pnls):\n        \n        self.logger.info('Upsert PnL for - Asset Manager: %s', asset_manager_id)\n        pnls = [pnls] if not isinstance(pnls, list) else pnls\n\n        json_pnls = [pnl.to_interface() for pnl in pnls]\n        url = '%s/pnls/%s' % (self.endpoint, asset_manager_id)\n        response = self.session.put(url, json=json_pnls)\n        if response.ok:\n            results = []\n            for pnl_result in response.json():\n                results.append(json_to_pnl(pnl_result))\n            self.logger.info('Upserted %s PnL records', len(results))\n            return results\n        else:\n            self.logger.error(response.text)\n            response.raise_for_status()", "docstring": "Upsert a list of pnls. Note: this performs a full update\nof existing records with matching keys, so the passed in\npnl objects should be complete.\n\nArgs:\nasset_manager_id (int): the id of the asset manager owning the pnl\npnls (list): list of pnl objects to upsert", "source": "juraj-google-style"}
{"code": "def HasDataStream(self, name, case_sensitive=True):\n    if (not isinstance(name, py2to3.STRING_TYPES)):\n        raise ValueError('Name is not a string.')\n    name_lower = name.lower()\n    for data_stream in self._GetDataStreams():\n        if (data_stream.name == name):\n            return True\n        if ((not case_sensitive) and (data_stream.name.lower() == name_lower)):\n            return True\n    return False", "docstring": "Determines if the file entry has specific data stream.\n\nArgs:\nname (str): name of the data stream.\ncase_sensitive (Optional[bool]): True if the name is case sensitive.\n\nReturns:\nbool: True if the file entry has the data stream.\n\nRaises:\nValueError: if the name is not string.", "source": "codesearchnet"}
{"code": "def get_firmware(self):\n    uri = '{}/firmware'.format(self.data['uri'])\n    return self._helper.do_get(uri)", "docstring": "Get the firmware inventory of a server.\n\nNote:\nThis method is available for API version 300 or later.\n\nReturns:\ndict: Server Hardware firmware.", "source": "codesearchnet"}
{"code": "def stop(name, file=sys.stderr):\n    if is_enabled():\n        elapsed = (time() - __TIMERS[name])\n        if (elapsed > 60):\n            elapsed_str = '{:.1f} m'.format((elapsed / 60))\n        elif (elapsed > 1):\n            elapsed_str = '{:.1f} s'.format(elapsed)\n        else:\n            elapsed_str = '{:.1f} ms'.format((elapsed * 1000))\n        del __TIMERS[name]\n        print('[prof]', name, elapsed_str, file=file)\n    return is_enabled()", "docstring": "Stop a profiling timer.\n\nArguments:\n\nname (str): The name of the timer to stop. If no name is given, stop\nthe global anonymous timer.\n\nReturns:\n\nbool: Whether or not profiling is enabled.\n\nRaises:\n\nKeyError: If the named timer does not exist.", "source": "codesearchnet"}
{"code": "def _ReadAttributeValueDateTime(self, attribute_values_data, record_offset, attribute_values_data_offset, attribute_value_offset):\n    if (attribute_value_offset == 0):\n        return None\n    data_type_map = self._GetDataTypeMap('keychain_date_time')\n    file_offset = ((record_offset + attribute_values_data_offset) + attribute_value_offset)\n    attribute_value_offset -= (attribute_values_data_offset + 1)\n    attribute_value_data = attribute_values_data[attribute_value_offset:]\n    try:\n        date_time_attribute_value = self._ReadStructureFromByteStream(attribute_value_data, file_offset, data_type_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to map date time attribute value data at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))\n    return date_time_attribute_value.date_time.rstrip('\\x00')", "docstring": "Reads a date time attribute value.\n\nArgs:\nattribute_values_data (bytes): attribute values data.\nrecord_offset (int): offset of the record relative to the start of\nthe file.\nattribute_values_data_offset (int): offset of the attribute values data\nrelative to the start of the record.\nattribute_value_offset (int): offset of the attribute relative to\nthe start of the record.\n\nReturns:\nstr: date and time values.\n\nRaises:\nParseError: if the attribute value cannot be read.", "source": "codesearchnet"}
{"code": "def get_current_state(self, clearConfig: bool = False):\n        \n        json_state = self.download_configuration()\n\n        if \"errorCode\" in json_state:\n            LOGGER.error(\n                \"Could not get the current configuration. Error: %s\",\n                json_state[\"errorCode\"],\n            )\n            return False\n\n        if clearConfig:\n            self.devices = []\n            self.clients = []\n            self.groups = []\n            self.rules = []\n            self.functionalHomes = []\n\n        js_home = json_state[\"home\"]\n\n        self.from_json(js_home)\n\n        self._get_devices(json_state)\n        self._get_clients(json_state)\n        self._get_groups(json_state)\n\n        self._get_functionalHomes(js_home)\n        self._load_functionalChannels()\n\n        return True", "docstring": "downloads the current configuration and parses it into self\n\nArgs:\nclearConfig(bool): if set to true, this function will remove all old objects\nfrom self.devices, self.client, ... to have a fresh config instead of reparsing them", "source": "juraj-google-style"}
{"code": "def GetBalance(self, wallet, address, as_string=False):\n        \n        addr = PromptUtils.parse_param(address, wallet)\n        if isinstance(addr, UInt160):\n            addr = addr.Data\n        sb = ScriptBuilder()\n        sb.EmitAppCallWithOperationAndArgs(self.ScriptHash, 'balanceOf', [addr])\n\n        tx, fee, results, num_ops, engine_success = test_invoke(sb.ToArray(), wallet, [])\n        if engine_success:\n            try:\n                val = results[0].GetBigInteger()\n                precision_divisor = pow(10, self.decimals)\n                balance = Decimal(val) / Decimal(precision_divisor)\n                if as_string:\n                    formatter_str = '.%sf' % self.decimals\n                    balance_str = format(balance, formatter_str)\n                    return balance_str\n                return balance\n            except Exception as e:\n                logger.error(\"could not get balance: %s \" % e)\n                traceback.print_stack()\n        else:\n            addr_str = Crypto.ToAddress(UInt160(data=addr))\n            logger.error(\n                f\"Could not get balance of address {addr_str} for token contract {self.ScriptHash}. VM execution failed. Make sure the contract exists on the network and that it adheres to the NEP-5 standard\")\n\n        return 0", "docstring": "Get the token balance.\n\nArgs:\nwallet (neo.Wallets.Wallet): a wallet instance.\naddress (str): public address of the account to get the token balance of.\nas_string (bool): whether the return value should be a string. Default is False, returning an integer.\n\nReturns:\nint/str: token balance value as int (default), token balanace as string if `as_string` is set to True. 0 if balance retrieval failed.", "source": "juraj-google-style"}
{"code": "def SetStorageWriter(self, storage_writer):\n    self._storage_writer = storage_writer\n    self._last_event_data_hash = None\n    self._last_event_data_identifier = None", "docstring": "Sets the storage writer.\n\nArgs:\nstorage_writer (StorageWriter): storage writer.", "source": "codesearchnet"}
{"code": "def read_reply(self) -> Reply:\n    _logger.debug('Read reply')\n    reply = Reply()\n    while True:\n        line = (yield from self._connection.readline())\n        if (line[(- 1):] != b'\\n'):\n            raise NetworkError('Connection closed.')\n        self._data_event_dispatcher.notify_read(line)\n        reply.parse(line)\n        if (reply.code is not None):\n            break\n    return reply", "docstring": "Read a reply from the stream.\n\nReturns:\n.ftp.request.Reply: The reply\n\nCoroutine.", "source": "codesearchnet"}
{"code": "def ChunkedAttentionSelector(x, params, selector=None, **kwargs):\n    del params, kwargs\n    selector = (selector or (lambda x: ([] if (x < 1) else [(x - 1)])))\n    (triples, masks) = zip(*x)\n    (queries, keys, values) = zip(*triples)\n    result = []\n    for i in range(len(x)):\n        selected = selector(i)\n        new_key_list = [keys[j] for j in selected]\n        new_key = np.concatenate((new_key_list + [keys[i]]), axis=1)\n        new_value = np.concatenate(([values[j] for j in selected] + [values[i]]), axis=1)\n        new_mask_shapes = [(1, queries[i].shape[1], key.shape[1]) for key in new_key_list]\n        cur_mask = masks[i]\n        new_mask_list = [np.ones(s, dtype=cur_mask.dtype) for s in new_mask_shapes]\n        new_mask = np.concatenate((new_mask_list + [cur_mask]), axis=2)\n        result.append(((queries[i], new_key, new_value), new_mask))\n    return tuple(result)", "docstring": "Select which chunks to attend to in chunked attention.\n\nArgs:\nx: inputs, a list of elements of the form (q, k, v), mask for each chunk.\nparams: parameters (unused).\nselector: a function from chunk_number -> list of chunk numbers that says\nwhich other chunks should be appended to the given one (previous if None).\n**kwargs: unused other arguments.\n\nReturns:\na list of elements of the form (q, k', v'), mask' where k', v' and mask' are\nconcatenations of k, v and identity-extended masks from selected chunks.", "source": "codesearchnet"}
{"code": "def query_parameter(binding_key):\n    pbk = ParsedBindingKey(binding_key)\n    if (pbk.config_key not in _CONFIG):\n        err_str = \"Configurable '{}' has no bound parameters.\"\n        raise ValueError(err_str.format(pbk.given_selector))\n    if (pbk.arg_name not in _CONFIG[pbk.config_key]):\n        err_str = \"Configurable '{}' has no value bound for parameter '{}'.\"\n        raise ValueError(err_str.format(pbk.given_selector, pbk.arg_name))\n    return _CONFIG[pbk.config_key][pbk.arg_name]", "docstring": "Returns the currently bound value to the specified `binding_key`.\n\nThe `binding_key` argument should look like\n'maybe/some/scope/maybe.moduels.configurable_name.parameter_name'. Note that\nthis will not include default parameters.\n\nArgs:\nbinding_key: The parameter whose value should be set.\n\nReturns:\nThe value bound to the configurable/parameter combination given in\n`binding_key`.\n\nRaises:\nValueError: If no function can be found matching the configurable name\nspecified by `biding_key`, or if the specified parameter name is\nblacklisted or not in the function's whitelist (if present) or if there is\nno value bound for the queried parameter or configurable.", "source": "codesearchnet"}
{"code": "def char_matches(s1, s2, n=3):\n    return __matches(s1, s2, char_ngrams, n=n)", "docstring": "Character-level n-grams that match between two strings\n\nArgs:\ns1: a string\ns2: another string\nn: an int for the n in n-gram\n\nReturns:\nset: the n-grams found in both strings", "source": "codesearchnet"}
{"code": "def registration_backend(backend=None, namespace=None):\n    \n    \n    backend = backend or ORGS_REGISTRATION_BACKEND\n    class_module, class_name = backend.rsplit(\".\", 1)\n    mod = import_module(class_module)\n    return getattr(mod, class_name)(namespace=namespace)", "docstring": "Returns a specified registration backend\n\nArgs:\nbackend: dotted path to the registration backend class\nnamespace: URL namespace to use\n\nReturns:\nan instance of an RegistrationBackend", "source": "juraj-google-style"}
{"code": "def build(self):\n    self._import_submodules()\n    module_text_map = {}\n    footer_text_map = {}\n    for dest_module, dest_name_to_imports in self._module_imports.items():\n        imports_list = [get_canonical_import(imports) for _, imports in dest_name_to_imports.items()]\n        if self._lazy_loading:\n            module_text_map[dest_module] = _LAZY_LOADING_MODULE_TEXT_TEMPLATE % '\\n'.join(sorted(imports_list))\n        else:\n            module_text_map[dest_module] = '\\n'.join(sorted(imports_list))\n    root_module_footer = ''\n    if not self._lazy_loading:\n        underscore_names_str = ', '.join((\"'%s'\" % name for name in sorted(self._underscore_names_in_root)))\n        root_module_footer = \"\\n_names_with_underscore = [%s]\\n__all__ = [_s for _s in dir() if not _s.startswith('_')]\\n__all__.extend([_s for _s in _names_with_underscore])\\n\" % underscore_names_str\n    if self._api_version == 1 or self._lazy_loading:\n        for dest_module, _ in self._module_imports.items():\n            deprecation = 'False'\n            has_lite = 'False'\n            if self._api_version == 1:\n                if not dest_module.startswith(_COMPAT_MODULE_PREFIX):\n                    deprecation = 'True'\n            if not dest_module and 'lite' in self._module_imports and self._lazy_loading:\n                has_lite = 'True'\n            if self._lazy_loading:\n                public_apis_name = '_PUBLIC_APIS'\n            else:\n                public_apis_name = 'None'\n            footer_text_map[dest_module] = _DEPRECATION_FOOTER % (dest_module, public_apis_name, deprecation, has_lite)\n    return (module_text_map, footer_text_map, root_module_footer)", "docstring": "Get a map from destination module to __init__.py code for that module.\n\nReturns:\nA dictionary where\nkey: (string) destination module (for e.g. tf or tf.consts).\nvalue: (string) text that should be in __init__.py files for\ncorresponding modules.", "source": "github-repos"}
{"code": "def builtin_timescale(cls, name):\n    names = {'isc': TIMESCALE__ISC, 'usgs_isc': TIMESCALE__USGS_ISC, 'dnag': TIMESCALE__DNAG}\n    return cls.from_csv(text=names[name.lower()])", "docstring": "Generate a default timescale legend. No arguments.\n\nReturns:\nLegend: The timescale stored in `defaults.py`.", "source": "codesearchnet"}
{"code": "def scatter_add(self, sparse_delta, use_locking=False, name=None):\n    raise NotImplementedError", "docstring": "Adds `tf.IndexedSlices` to this variable.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to be added to this variable.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nThe updated variable.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"}
{"code": "def _parse_datetime(self, value):\n        \n        offset = 0\n        pattern = r\"\\s+([+-]{1}\\d+)\\Z\"\n        matches = re.search(pattern, value)\n        if matches:\n            value = re.sub(pattern, '', value)\n            offset = datetime.timedelta(hours=int(matches.group(1))/100)\n        return datetime.datetime.strptime(value, \"%Y/%m/%d %H:%M:%S\") - offset", "docstring": "Parses a datetime string from \"YYYY/MM/DD HH:MM:SS +HHMM\" format\n\nArgs:\nvalue (str): String\n\nReturns:\ndatetime. Datetime", "source": "juraj-google-style"}
{"code": "def _aggregate_grads(gradients):\n    assert gradients, 'No gradients to aggregate'\n    if len(gradients) == 1:\n        return gradients[0]\n    if all((isinstance(g, tensor_lib.Tensor) for g in gradients)):\n        return gen_math_ops.add_n(gradients)\n    else:\n        assert all((isinstance(g, (tensor_lib.Tensor, indexed_slices.IndexedSlices)) for g in gradients))\n        return backprop_util.AggregateIndexedSlicesGradients(gradients)", "docstring": "Aggregate gradients from multiple sources.\n\nArgs:\ngradients: A list of 'Tensor' or 'IndexedSlices' gradients.\n\nReturns:\nIf 'gradients' only has 'Tensor', returns an aggregated 'Tensor'.\nOtherwise returns an aggregated 'IndexedSlices'.", "source": "github-repos"}
{"code": "def _batched_mask_to_box_tf(masks: 'tf.Tensor'):\n    if tf.size(masks) == 0:\n        return tf.zeros([*masks.shape[:-2], 4])\n    shape = shape_list(masks)\n    height, width = shape[-2:]\n    in_height = tf.reduce_max(masks, axis=-1)\n    in_height_coords = in_height * tf.range(height)[None, :]\n    bottom_edges = tf.reduce_max(in_height_coords, axis=-1)\n    in_height_coords = in_height_coords + height * ~in_height\n    top_edges = tf.reduce_min(in_height_coords, axis=-1)\n    in_width, _ = tf.reduce_max(masks, axis=-2)\n    in_width_coords = in_width * tf.range(width)[None, :]\n    right_edges, _ = tf.reduce_max(in_width_coords, axis=-1)\n    in_width_coords = in_width_coords + width * ~in_width\n    left_edges, _ = tf.reduce_min(in_width_coords, axis=-1)\n    empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n    out = tf.stack([left_edges, top_edges, right_edges, bottom_edges], axis=-1)\n    out = out * tf.expand_dims(~empty_filter, -1)\n    out = tf.reshape(out, *shape[:-2], 4)\n    return out", "docstring": "Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which\ncorresponds the following required indices:\n- LEFT: left hand side of the bounding box\n- TOP: top of the bounding box\n- RIGHT: right of the bounding box\n- BOTTOM: bottom of the bounding box\n\nReturn [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape\nis channel_1 x channel_2 x ... x 4.\n\nArgs:\n- masks (`tf.Tensor` of shape `(batch, nb_mask, height, width)`)", "source": "github-repos"}
{"code": "def move(self, destination):\n    self.relocate(destination)\n    shutil.move(self.path, destination)\n    self._path = destination", "docstring": "Reconfigure and move the virtual environment to another path.\n\nArgs:\ndestination (str): The target path of the virtual environment.\n\nNote:\nUnlike `relocate`, this method *will* move the virtual to the\ngiven path.", "source": "codesearchnet"}
{"code": "def bessel_y0(x, name=None):\n    with ops.name_scope(name, 'bessel_y0', [x]):\n        return gen_special_math_ops.bessel_y0(x)", "docstring": "Computes the Bessel y0 function of `x` element-wise.\n\nModified Bessel function of order 0.\n\n>>> tf.math.special.bessel_y0([0.5, 1., 2., 4.]).numpy()\narray([-0.44451873,  0.08825696,  0.51037567, -0.01694074], dtype=float32)\n\nArgs:\nx: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n`float32`, `float64`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.y0\n@end_compatibility", "source": "github-repos"}
{"code": "def xcompile(source_code, args=0, optimize=True):\n    \n    code = crianza.compile(crianza.parse(source_code), optimize=optimize)\n    return crianza.native.compile(code, args=args)", "docstring": "Parses Crianza source code and returns a native Python function.\n\nArgs:\nargs: The resulting function's number of input parameters.\n\nReturns:\nA callable Python function.", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, output_attentions: Optional[bool]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Tuple[torch.FloatTensor]:\n    residual = hidden_states\n    hidden_states = self.attention_norm(hidden_states)\n    hidden_states, attn_weights = self.attention(hidden_states=hidden_states, attention_mask=attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions, **kwargs)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.ffn_norm(hidden_states)\n    hidden_states = self.feed_forward(hidden_states)\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`):\nInput to the layer of shape `(batch, seq_len, embed_dim)`.\nattention_mask (`torch.FloatTensor`):\nAttention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.\noutput_attentions (`bool`, *optional*, defaults to `False`):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def run_restore_ops(self, session=None):\n    raise AssertionError('No checkpoint specified, so no restore ops are available (save_path=None to Saver.restore).')", "docstring": "For consistency with `CheckpointLoadStatus`.\n\nUse `initialize_or_restore` for initializing if no checkpoint was passed\nto `Saver.restore` and restoring otherwise.\n\nArgs:\nsession: Not used.", "source": "github-repos"}
{"code": "def collapse_short_branches(self, threshold):\n        \n        if not isinstance(threshold,float) and not isinstance(threshold,int):\n            raise RuntimeError(\"threshold must be an integer or a float\")\n        elif threshold < 0:\n            raise RuntimeError(\"threshold cannot be negative\")\n        q = deque(); q.append(self.root)\n        while len(q) != 0:\n            next = q.popleft()\n            if next.edge_length is None or next.edge_length <= threshold:\n                if next.is_root():\n                    next.edge_length = None\n                elif not next.is_leaf():\n                    parent = next.parent; parent.remove_child(next)\n                    for c in next.children:\n                        parent.add_child(c)\n            q.extend(next.children)", "docstring": "Collapse internal branches (not terminal branches) with length less than or equal to ``threshold``. A branch length of ``None`` is considered 0\n\nArgs:\n``threshold`` (``float``): The threshold to use when collapsing branches", "source": "juraj-google-style"}
{"code": "def _file_exists_in_gcs(gcs_file_path, credentials=None):\n    gcs_service = _get_storage_service(credentials)\n    (bucket_name, object_name) = gcs_file_path[len('gs:\n    request = gcs_service.objects().get(bucket=bucket_name, object=object_name, projection='noAcl')\n    try:\n        request.execute()\n        return True\n    except errors.HttpError:\n        return False", "docstring": "Check whether the file exists, in GCS.\n\nArgs:\ngcs_file_path: The target file path; should have the 'gs://' prefix.\ncredentials: Optional credential to be used to load the file from gcs.\n\nReturns:\nTrue if the file's there.", "source": "codesearchnet"}
{"code": "def Deserialize(self, reader):\n        \n        super(Block, self).Deserialize(reader)\n\n        self.Transactions = []\n        byt = reader.ReadVarInt()\n        transaction_length = byt\n\n        if transaction_length < 1:\n            raise Exception('Invalid format')\n\n        for i in range(0, transaction_length):\n            tx = Transaction.DeserializeFrom(reader)\n            self.Transactions.append(tx)\n\n        if MerkleTree.ComputeRoot([tx.Hash for tx in self.Transactions]) != self.MerkleRoot:\n            raise Exception(\"Merkle Root Mismatch\")", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def remove_room_alias(self, room_alias):\n        \n        try:\n            self.api.remove_room_alias(room_alias)\n            return True\n        except MatrixRequestError:\n            return False", "docstring": "Remove mapping of an alias\n\nArgs:\nroom_alias(str): The alias to be removed.\n\nReturns:\nbool: True if the alias is removed, False otherwise.", "source": "juraj-google-style"}
{"code": "def validate(self, profile):\n    ij = self.load_install_json(profile.get('install_json'))\n    print('{}{}Profile: \"{}\".'.format(c.Style.BRIGHT, c.Fore.BLUE, profile.get('profile_name')))\n    for arg in self.profile_settings_args_install_json(ij, None):\n        if (profile.get('args', {}).get('app', {}).get(arg) is None):\n            print('{}{}Input \"{}\" not found.'.format(c.Style.BRIGHT, c.Fore.YELLOW, arg))", "docstring": "Check to see if any args are \"missing\" from profile.\n\nValidate all args from install.json are in the profile.  This can be helpful to validate\nthat any new args added to App are included in the profiles.\n\n.. Note:: This method does not work with layout.json Apps.\n\nArgs:\nprofile (dict): The current profile to validate.", "source": "codesearchnet"}
{"code": "def create_alias(target_path, alias_path):\n    \n    if platform.system() == 'Windows' and not alias_path.endswith('.lnk'):\n        alias_path += '.lnk'\n    if os.path.lexists(alias_path):\n        os.remove(alias_path)\n    if platform.system() == 'Windows':\n        from win32com import client\n        shell = client.Dispatch('WScript.Shell')\n        shortcut = shell.CreateShortCut(alias_path)\n        shortcut.Targetpath = target_path\n        shortcut.save()\n    else:\n        os.symlink(target_path, alias_path)", "docstring": "Creates an alias at 'alias_path' pointing to the file 'target_path'.\n\nOn Unix, this is implemented via symlink. On Windows, this is done by\ncreating a Windows shortcut file.\n\nArgs:\ntarget_path: Destination path that the alias should point to.\nalias_path: Path at which to create the new alias.", "source": "juraj-google-style"}
{"code": "def get_named_parent(decl):\n    \n\n    if not decl:\n        return None\n\n    parent = decl.parent\n    while parent and (not parent.name or parent.name == '::'):\n        parent = parent.parent\n    return parent", "docstring": "Returns a reference to a named parent declaration.\n\nArgs:\ndecl (declaration_t): the child declaration\n\nReturns:\ndeclaration_t: the declaration or None if not found.", "source": "juraj-google-style"}
{"code": "def _MergeEntities(self, a, b):\n    distance = transitfeed.ApproximateDistanceBetweenStops(a, b)\n    if (distance > self.largest_stop_distance):\n        raise MergeError(('Stops are too far apart: %.1fm (largest_stop_distance is %.1fm).' % (distance, self.largest_stop_distance)))\n    scheme = {'stop_id': self._MergeIdentical, 'stop_name': self._MergeIdenticalCaseInsensitive, 'zone_id': self._MergeIdentical, 'location_type': self._MergeIdentical}\n    return self._SchemedMerge(scheme, a, b)", "docstring": "Merges two stops.\n\nFor the stops to be merged, they must have:\n- the same stop_id\n- the same stop_name (case insensitive)\n- the same zone_id\n- locations less than largest_stop_distance apart\nThe other attributes can have arbitary changes. The merged attributes are\ntaken from the new stop.\n\nArgs:\na: The first stop.\nb: The second stop.\n\nReturns:\nThe merged stop.\n\nRaises:\nMergeError: The stops could not be merged.", "source": "codesearchnet"}
{"code": "def get_value(self, query):\n        \n        indices = self.get_dimension_indices(query)\n        index = self.get_value_index(indices)\n        value = self.get_value_by_index(index)\n        return value", "docstring": "Converts a dimension/category list of dicts into a data value \\\nin three steps.\n\nArgs:\nquery(list): list of dicts with the desired query.\n\nReturns:\nvalue(float): numeric data value.", "source": "juraj-google-style"}
{"code": "def regex_to_sql_like(regex_text: str, single_wildcard: str='_', zero_or_more_wildcard: str='%') -> List[str]:\n\n    def append_to_all(new_content: str) -> None:\n        nonlocal results\n        results = [(r + new_content) for r in results]\n\n    def split_and_append(new_options: List[str]) -> None:\n        nonlocal results\n        newresults = []\n        for option in new_options:\n            newresults.extend([(r + option) for r in results])\n        results = newresults\n\n    def deduplicate_wildcards(text: str) -> str:\n        while ((zero_or_more_wildcard + zero_or_more_wildcard) in text):\n            text = text.replace((zero_or_more_wildcard + zero_or_more_wildcard), zero_or_more_wildcard)\n        return text\n    working = regex_text\n    results = [zero_or_more_wildcard]\n    while working:\n        if working.startswith('.*'):\n            append_to_all(zero_or_more_wildcard)\n            working = working[2:]\n        elif working.startswith('['):\n            close_bracket = working.index(']')\n            bracketed = working[1:close_bracket]\n            option_groups = bracketed.split('|')\n            options = [c for group in option_groups for c in group]\n            split_and_append(options)\n            working = working[(close_bracket + 1):]\n        elif ((len(working) > 1) and (working[1] == '?')):\n            split_and_append(['', working[0]])\n            working = working[2:]\n        elif working.startswith('.'):\n            append_to_all(single_wildcard)\n            working = working[1:]\n        else:\n            append_to_all(working[0])\n            working = working[1:]\n    append_to_all(zero_or_more_wildcard)\n    results = [deduplicate_wildcards(r) for r in results]\n    return results", "docstring": "Converts regular expression text to a reasonably close fragment\nfor the SQL ``LIKE`` operator.\n\nNOT PERFECT, but works for current built-in regular expressions.\n\nArgs:\nregex_text: regular expression text to work with\nsingle_wildcard: SQL single wildcard, typically an underscore\nzero_or_more_wildcard: SQL \"zero/one/many\" wildcard, probably always\na percent symbol\n\nReturns:\nstring for an SQL string literal\n\nRaises:\n:exc:`ValueError` for some regex text that it doesn't understand\nproperly", "source": "codesearchnet"}
{"code": "def aside_view_declaration(self, view_name):\n        \n        if view_name in self._combined_asides:  \n            return getattr(self, self._combined_asides[view_name])  \n        else:\n            return None", "docstring": "Find and return a function object if one is an aside_view for the given view_name\n\nAside methods declare their view provision via @XBlockAside.aside_for(view_name)\nThis function finds those declarations for a block.\n\nArguments:\nview_name (string): the name of the view requested.\n\nReturns:\neither the function or None", "source": "juraj-google-style"}
{"code": "def build_sql_statement(self) -> str:\n    builders = self._view.get_select_expressions()\n    from_expressions = [f'`{self._dataset}`.{self._table_name}']\n    where_expressions = self._build_where_expressions(self._view.get_constraint_expressions())\n    if not builders:\n        return self._build_sql_statement(['*'], from_expressions, where_expressions)\n    sql_statement = ''\n    next_from_expressions = []\n    child_builders = []\n    columns_selected = []\n    while builders or next_from_expressions:\n        select_expressions, next_from_expressions = self._build_select_and_next_from_expressions(builders, child_builders, columns_selected)\n        sql_statement = self._build_sql_statement(select_expressions, from_expressions, where_expressions)\n        from_expressions = [f'({sql_statement})']\n        from_expressions.extend(next_from_expressions)\n        where_expressions = []\n        builders = tuple(child_builders)\n        child_builders = []\n    return sql_statement", "docstring": "Build SQL statement.\n\nReturns:\nSQL string representation of the view", "source": "github-repos"}
{"code": "def write_dot_file(G, filename):\n    \n    with io.open(filename, \"w\") as fh:\n        fh.write(\"strict digraph DependencyDiagram {\\n\")\n        edge_list = G.edges()\n        node_list = set(G.nodes())\n        if edge_list:\n            for edge in sorted(edge_list):\n                source, targ = edge\n                node_list = node_list - set(source)\n                node_list = node_list - set(targ)\n                line = '\"{}\" -> \"{}\";\\n'\n                fh.write(line.format(source, targ))\n        \n        if node_list:\n            for node in sorted(node_list):\n                line = '\"{}\"\\n'.format(node)\n                fh.write(line)\n        fh.write(\"}\")", "docstring": "Writes the graph G in dot file format for graphviz visualization.\n\nArgs:\na Networkx graph\nA filename to name the dot files", "source": "juraj-google-style"}
{"code": "def apply_transform(self, data: OperationInputT, output_column_name: str) -> dict[str, OperationOutputT]:", "docstring": "Define any processing logic in the apply_transform() method.\nprocessing logics are applied on inputs and returns a transformed\noutput.\nArgs:\ninputs: input data.", "source": "github-repos"}
{"code": "def unapprove(self, **kwargs):\n        \n        path = '%s/%s/unapprove' % (self.manager.path, self.get_id())\n        data = {}\n\n        server_data = self.manager.gitlab.http_post(path, post_data=data,\n                                                    **kwargs)\n        self._update_attrs(server_data)", "docstring": "Unapprove the merge request.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabMRApprovalError: If the unapproval failed", "source": "juraj-google-style"}
{"code": "def __request_finish(self, queue_item, new_requests, request_failed=False):\n    if self.__stopping:\n        return\n    del self.__threads[queue_item.get_hash()]\n    if request_failed:\n        new_queue_items = []\n        self.queue.move(queue_item, QueueItem.STATUS_ERRORED)\n    else:\n        self.routing.increase_route_count(queue_item.request)\n        new_queue_items = self.__add_scraped_requests_to_queue(queue_item, new_requests)\n        self.queue.move(queue_item, QueueItem.STATUS_FINISHED)\n    try:\n        action = self.__options.callbacks.request_after_finish(self.queue, queue_item, new_queue_items)\n    except Exception as e:\n        action = None\n        print(e)\n        print(traceback.format_exc())\n    queue_item.decompose()\n    if (action == CrawlerActions.DO_STOP_CRAWLING):\n        self.__should_stop = True\n    if ((action == CrawlerActions.DO_CONTINUE_CRAWLING) or (action is None)):\n        self.__should_spawn_new_requests = True", "docstring": "Called when the crawler finished the given queue item.\n\nArgs:\nqueue_item (:class:`nyawc.QueueItem`): The request/response pair that finished.\nnew_requests list(:class:`nyawc.http.Request`): All the requests that were found during this request.\nrequest_failed (bool): True if the request failed (if needs to be moved to errored).", "source": "codesearchnet"}
{"code": "def _get_authenticated_client(self, wsdl):\n    return zeep.Client((wsdl % quote(self.username)), transport=zeep.Transport(session=self._get_authenticated_session()))", "docstring": "Return an authenticated SOAP client.\n\nReturns:\nzeep.Client: Authenticated API client.", "source": "codesearchnet"}
{"code": "def parse(cls, json_value: Union[int, float, str, List[Any], Tuple[Any], None], spec: Optional[DNASpec]=None) -> 'DNA':\n    return DNA(json_value, spec=spec)", "docstring": "Parse DNA from a nested structure of numbers.\n\nDeprecated: use `DNA.__init__` instead.\n\nArgs:\njson_value: A nested structure of numbers.\nspec: DNA spec that will be applied to current DNA tree.\n\nReturns:\nan instance of DNA object.\n\nRaises:\nValueError: Bad format for json_value or parsed DNA does not conform to\nthe DNA spec.", "source": "github-repos"}
{"code": "class TFGPT2Tokenizer(keras.layers.Layer):\n\n    def __init__(self, vocab: Dict[str, int], merges: List[str], max_length: Optional[int]=None, pad_token_id: Optional[int]=None):\n        super().__init__()\n        self.pad_token_id = pad_token_id\n        self.max_length = max_length\n        self.vocab = vocab\n        self.merges = merges\n        self.tf_tokenizer = BytePairTokenizer(vocab, merges, sequence_length=max_length)\n\n    @classmethod\n    def from_tokenizer(cls, tokenizer: GPT2Tokenizer, *args, **kwargs):\n        \n        merges = [' '.join(m) for m in tokenizer.bpe_ranks.keys()]\n        vocab = tokenizer.get_vocab()\n        return cls(vocab, merges, *args, **kwargs)\n\n    @classmethod\n    def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, **kwargs):\n        \n        tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs)\n        return cls.from_tokenizer(tokenizer, *init_inputs, **kwargs)\n\n    @classmethod\n    def from_config(cls, config):\n        \n        return cls(**config)\n\n    def get_config(self):\n        return {'vocab': self.vocab, 'merges': self.merges, 'max_length': self.max_length, 'pad_token_id': self.pad_token_id}\n\n    def call(self, x, max_length: Optional[int]=None):\n        input_ids = self.tf_tokenizer(x)\n        attention_mask = tf.ones_like(input_ids)\n        if self.pad_token_id is not None:\n            max_length = max_length if max_length is not None else self.max_length\n            if max_length is not None:\n                input_ids, attention_mask = pad_model_inputs(input_ids, max_seq_length=max_length, pad_value=self.pad_token_id)\n        return {'attention_mask': attention_mask, 'input_ids': input_ids}", "docstring": "This is an in-graph tokenizer for GPT2. It should be initialized similarly to other tokenizers, using the\n`from_pretrained()` method. It can also be initialized with the `from_tokenizer()` method, which imports settings\nfrom an existing standard tokenizer object.\n\nIn-graph tokenizers, unlike other Hugging Face tokenizers, are actually Keras layers and are designed to be run\nwhen the model is called, rather than during preprocessing. As a result, they have somewhat more limited options\nthan standard tokenizer classes. They are most useful when you want to create an end-to-end model that goes\nstraight from `tf.string` inputs to outputs.\n\nArgs:\nvocab (Dict[str, int]): Vocabulary dict for Byte Pair Tokenizer\nmerges (List[str]): Merges list for Byte Pair Tokenizer", "source": "github-repos"}
{"code": "def signature_summary(self, default_values=False):\n    summary = f'{self._function_type!r}'\n    if default_values:\n        summary += '\\nDefaults:'\n        if self.default_values:\n            for name, value in self.default_values.items():\n                summary += f'\\n  {name}: {value!r}'\n        else:\n            summary += '\\n  None'\n    return summary", "docstring": "Returns a string summarizing this function's signature.\n\nArgs:\ndefault_values: If true, then include default values in the signature.\n\nReturns:\nA `string`.", "source": "github-repos"}
{"code": "def get_variables(self, include_submodules=False, include_nontrainable=False):\n        \n        if include_nontrainable:\n            model_variables = [self.all_variables[key] for key in sorted(self.all_variables)]\n\n            states_preprocessing_variables = [\n                variable for name in sorted(self.states_preprocessing)\n                for variable in self.states_preprocessing[name].get_variables()\n            ]\n            model_variables += states_preprocessing_variables\n\n            actions_exploration_variables = [\n                variable for name in sorted(self.actions_exploration)\n                for variable in self.actions_exploration[name].get_variables()\n            ]\n            model_variables += actions_exploration_variables\n\n            if self.reward_preprocessing is not None:\n                reward_preprocessing_variables = self.reward_preprocessing.get_variables()\n                model_variables += reward_preprocessing_variables\n\n        else:\n            model_variables = [self.variables[key] for key in sorted(self.variables)]\n\n        return model_variables", "docstring": "Returns the TensorFlow variables used by the model.\n\nArgs:\ninclude_submodules: Includes variables of submodules (e.g. baseline, target network)\nif true.\ninclude_nontrainable: Includes non-trainable variables if true.\n\nReturns:\nList of variables.", "source": "juraj-google-style"}
{"code": "def format_search_results(self, search_results):\n        \n        formatted_lines = []\n\n        for search_result in search_results:\n            lines = self._format_search_result(search_result)\n            formatted_lines.extend(lines)\n\n        return formatted_lines", "docstring": "Format search results.\n\nArgs:\nsearch_results (list of `ResourceSearchResult`): Search to format.\n\nReturns:\nList of 2-tuple: Text and color to print in.", "source": "juraj-google-style"}
{"code": "def load_lines(filename):\n    \n\n    with open(filename, 'r', encoding='utf-8') as f:\n        return [line.rstrip('\\n') for line in f.readlines()]", "docstring": "Load a text file as an array of lines.\n\nArgs:\nfilename: Path to the input file.\n\nReturns:\nAn array of strings, each representing an individual line.", "source": "juraj-google-style"}
{"code": "def check_layout(tensor: tensor_lib.Tensor, layout: layout_lib.Layout) -> None:\n    if fetch_layout(tensor) != layout:\n        raise ValueError('Layout of tensor: ' + str(fetch_layout(tensor)) + ', did not match expected layout: ' + str(layout))", "docstring": "Asserts that the layout of the DTensor is `layout`.\n\nArgs:\ntensor: A DTensor whose layout is to be checked.\nlayout: The `Layout` to compare against.\n\nRaises:\nValueError: If the layout of `tensor` does not match the supplied `layout`.", "source": "github-repos"}
{"code": "def get_user(\n        self, identified_with, identifier, req, resp, resource, uri_kwargs\n    ):\n        \n        stored_value = self.kv_store.get(\n            self._get_storage_key(identified_with, identifier)\n        )\n        if stored_value is not None:\n            user = self.serialization.loads(stored_value.decode())\n        else:\n            user = None\n\n        return user", "docstring": "Get user object for given identifier.\n\nArgs:\nidentified_with (object): authentication middleware used\nto identify the user.\nidentifier: middleware specifix user identifier (string or tuple\nin case of all built in authentication middleware classes).\n\nReturns:\ndict: user object stored in Redis if it exists, otherwise ``None``", "source": "juraj-google-style"}
{"code": "def select_all(self, serial_numbers):\n        \n        sheet = self.table\n        col = self.db_sheet_cols.id\n        rows = sheet.loc[:, col].isin(serial_numbers)\n        return sheet.loc[rows, :]", "docstring": "Select rows for identification for a list of serial_number.\n\nArgs:\nserial_numbers: list (or ndarray) of serial numbers\n\nReturns:\npandas.DataFrame", "source": "juraj-google-style"}
{"code": "def from_table(table, fields=None):\n    \n    if fields is None:\n      fields = '*'\n    elif isinstance(fields, list):\n      fields = ','.join(fields)\n    return Query('SELECT %s FROM %s' % (fields, table._repr_sql_()))", "docstring": "Return a Query for the given Table object\n\nArgs:\ntable: the Table object to construct a Query out of\nfields: the fields to return. If None, all fields will be returned. This can be a string\nwhich will be injected into the Query after SELECT, or a list of field names.\n\nReturns:\nA Query object that will return the specified fields from the records in the Table.", "source": "juraj-google-style"}
{"code": "def create_window(size=None, samples=16, *, fullscreen=False, title=None, threaded=True) -> Window:\n    \n\n    if size is None:\n        width, height = 1280, 720\n\n    else:\n        width, height = size\n\n    if samples < 0 or (samples & (samples - 1)) != 0:\n        raise Exception('Invalid number of samples: %d' % samples)\n\n    window = Window.__new__(Window)\n    window.wnd = glwnd.create_window(width, height, samples, fullscreen, title, threaded)\n    return window", "docstring": "Create the main window.\n\nArgs:\nsize (tuple): The width and height of the window.\nsamples (int): The number of samples.\n\nKeyword Args:\nfullscreen (bool): Fullscreen?\ntitle (bool): The title of the window.\nthreaded (bool): Threaded?\n\nReturns:\nWindow: The main window.", "source": "juraj-google-style"}
{"code": "def _check_wires_list(self, wires, node):\n        \n        if len(set(wires)) != len(wires):\n            raise DAGCircuitError(\"duplicate wires\")\n\n        wire_tot = len(node.qargs) + len(node.cargs)\n        if node.condition is not None:\n            wire_tot += node.condition[0].size\n\n        if len(wires) != wire_tot:\n            raise DAGCircuitError(\"expected %d wires, got %d\"\n                                  % (wire_tot, len(wires)))", "docstring": "Check that a list of wires is compatible with a node to be replaced.\n\n- no duplicate names\n- correct length for operation\nRaise an exception otherwise.\n\nArgs:\nwires (list[register, index]): gives an order for (qu)bits\nin the input circuit that is replacing the node.\nnode (DAGNode): a node in the dag\n\nRaises:\nDAGCircuitError: if check doesn't pass.", "source": "juraj-google-style"}
{"code": "def recipe_archive(config, auth_write, archive_days, archive_bucket, archive_path, archive_delete):\n    archive(config, {'auth': auth_write, 'days': archive_days, 'storage': {'bucket': archive_bucket, 'path': archive_path}, 'delete': archive_delete})", "docstring": "Wipe old information from a Storage bucket based on last update time.\n\nArgs:\nauth_write (authentication) - Credentials used for writing data.\narchive_days (integer) - NA\narchive_bucket (string) - NA\narchive_path (string) - NA\narchive_delete (boolean) - NA", "source": "github-repos"}
{"code": "def clean(decrypted: bytes) -> str:\n    r\n    last = decrypted[-1]\n    if isinstance(last, int):\n        return decrypted[:-last].decode('utf8')\n    return decrypted[:-ord(last)].decode('utf8')", "docstring": "r\"\"\"Strip padding from decrypted value.\n\nRemove number indicated by padding\ne.g. if last is '\\x0e' then ord('\\x0e') == 14, so take off 14.\n\nArgs:\ndecrypted: decrypted value\nReturns:\nDecrypted stripped of junk padding", "source": "juraj-google-style"}
{"code": "def delete_existing_policy(self, scaling_policy, server_group):\n        \n        self.log.info(\"Deleting policy %s on %s\", scaling_policy['policyName'], server_group)\n        delete_dict = {\n            \"application\":\n            self.app,\n            \"description\":\n            \"Delete scaling policy\",\n            \"job\": [{\n                \"policyName\": scaling_policy['policyName'],\n                \"serverGroupName\": server_group,\n                \"credentials\": self.env,\n                \"region\": self.region,\n                \"provider\": \"aws\",\n                \"type\": \"deleteScalingPolicy\",\n                \"user\": \"foremast-autoscaling-policy\"\n            }]\n        }\n        wait_for_task(json.dumps(delete_dict))", "docstring": "Given a scaling_policy and server_group, deletes the existing scaling_policy.\nScaling policies need to be deleted instead of upserted for consistency.\n\nArgs:\nscaling_policy (json): the scaling_policy json from Spinnaker that should be deleted\nserver_group (str): the affected server_group", "source": "juraj-google-style"}
{"code": "def save_output_in_cache(name, filename, output):\n    \n    cache_filename = _get_cache_filename(name, filename)\n    with _open_for_write(cache_filename) as f:\n        f.write(output)", "docstring": "Saves output in the cache location.\n\nArgs:\nname: string: name of the linter.\nfilename: string: path of the filename for which we are saving the output.\noutput: string: full output (not yet filetered) of the lint command.", "source": "juraj-google-style"}
{"code": "def _GetEventLogProviderKey(self, log_source):\n    \n    table_names = ['event_log_providers']\n    column_names = ['event_log_provider_key']\n    condition = 'log_source == \"{0:s}\"'.format(log_source)\n\n    values_list = list(self._database_file.GetValues(\n        table_names, column_names, condition))\n\n    number_of_values = len(values_list)\n    if number_of_values == 0:\n      return None\n\n    if number_of_values == 1:\n      values = values_list[0]\n      return values['event_log_provider_key']\n\n    raise RuntimeError('More than one value found in database.')", "docstring": "Retrieves the Event Log provider key.\n\nArgs:\nlog_source (str): Event Log source.\n\nReturns:\nstr: Event Log provider key or None if not available.\n\nRaises:\nRuntimeError: if more than one value is found in the database.", "source": "juraj-google-style"}
{"code": "def _close_on_stop(self, sess, cancel_op, coord):\n    coord.wait_for_stop()\n    try:\n        sess.run(cancel_op)\n    except Exception as e:\n        logging.vlog(1, 'Ignored exception: %s', str(e))", "docstring": "Close the queue when the Coordinator requests stop.\n\nArgs:\nsess: A Session.\ncancel_op: The Operation to run.\ncoord: Coordinator.", "source": "github-repos"}
{"code": "def PushAttributeContainer(self, serialized_data):\n    \n    self._list.append(serialized_data)\n    self.data_size += len(serialized_data)\n    self.next_sequence_number += 1", "docstring": "Pushes a serialized attribute container onto the list.\n\nArgs:\nserialized_data (bytes): serialized attribute container data.", "source": "juraj-google-style"}
{"code": "def encode_message(self, message):\n    message.check_initialized()\n    return json.dumps(message, cls=MessageJSONEncoder, protojson_protocol=self)", "docstring": "Encode Message instance to JSON string.\n\nArgs:\nMessage instance to encode in to JSON string.\n\nReturns:\nString encoding of Message instance in protocol JSON format.\n\nRaises:\nmessages.ValidationError if message is not initialized.", "source": "codesearchnet"}
{"code": "def populate_sites( self, number_of_atoms, selected_sites=None ):\n        \n        if number_of_atoms > self.number_of_sites:\n            raise ValueError\n        if selected_sites:\n            atoms = [ atom.Atom( initial_site = site ) for site in random.sample( [ s for s in self.sites if s.label in selected_sites ], number_of_atoms ) ]\n        else:\n            atoms = [ atom.Atom( initial_site = site ) for site in random.sample( self.sites, number_of_atoms ) ]\n        self.number_of_occupied_sites = number_of_atoms\n        return atoms", "docstring": "Populate the lattice sites with a specific number of atoms.\n\nArgs:\nnumber_of_atoms (Int): The number of atoms to populate the lattice sites with.\nselected_sites (:obj:List, optional): List of site labels if only some sites are to be occupied. Defaults to None.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def exists(self, filename):\n    if is_package(filename):\n        filepath = os.path.join(self.connection['mount_point'], 'Packages', filename)\n    else:\n        filepath = os.path.join(self.connection['mount_point'], 'Scripts', filename)\n    return os.path.exists(filepath)", "docstring": "Report whether a file exists on the distribution point.\n\nDetermines file type by extension.\n\nArgs:\nfilename: Filename you wish to check. (No path! e.g.:\n\"AdobeFlashPlayer-14.0.0.176.pkg\")", "source": "codesearchnet"}
{"code": "def compiler_ir_generator(stage='hlo', device_name=None, platform_name=None):\n    if device_name is not None:\n        if platform_name is not None:\n            raise ValueError('device_name and platform_name cannot be provided at the same time.')\n        warnings.warn('device_name is being deprecated. Use platform_name.')\n    device_name = maybe_get_device_name(device_name)\n    res_bytes = context.context().get_compiler_ir(device_name=device_name, platform_name=platform_name, function_name=fn_name, flat_args=filtered_flat_specs, captured_inputs=concrete_fn.captured_inputs, stage=stage)\n    if stage in ('stablehlo_serialized', 'hlo_serialized', 'optimized_hlo_serialized', 'optimized_hlo_proto_serialized'):\n        return res_bytes\n    else:\n        return res_bytes.decode('utf-8')", "docstring": "Gets the compiler IR bytes.\n\nArgs:\nstage: The exported stage for the given function.\ndevice_name: The name of the device with the form as\n\"/job:localhost/replica:0/task:0/device:CPU:0\", \"/device:TPU:0\" etc.\nWhen this is used, actual device is needed for getting the compiler IR.\nplatform_name: The name of the platform, e.g. \"TPU\". See the comment in\n`get_compiler_ir` in `context.py`.\n\nReturns:\nThe compiler IR bytes.", "source": "github-repos"}
{"code": "def is_connectable(host: str, port: Union[(int, str)]) -> bool:\n    socket_ = None\n    try:\n        socket_ = socket.create_connection((host, port), 1)\n        result = True\n    except socket.timeout:\n        result = False\n    finally:\n        if socket_:\n            socket_.close()\n    return result", "docstring": "Tries to connect to the device to see if it is connectable.\n\nArgs:\nhost: The host to connect.\nport: The port to connect.\n\nReturns:\nTrue or False.", "source": "codesearchnet"}
{"code": "def GetTSKVsPartByPathSpec(tsk_volume, path_spec):\n  \n  location = getattr(path_spec, 'location', None)\n  part_index = getattr(path_spec, 'part_index', None)\n  start_offset = getattr(path_spec, 'start_offset', None)\n  partition_index = None\n\n  if part_index is None:\n    if location is not None:\n      if location.startswith('/p'):\n        try:\n          partition_index = int(location[2:], 10) - 1\n        except ValueError:\n          pass\n\n      if partition_index is None or partition_index < 0:\n        location = None\n\n    if location is None and start_offset is None:\n      return None, None\n\n  bytes_per_sector = TSKVolumeGetBytesPerSector(tsk_volume)\n  current_part_index = 0\n  current_partition_index = 0\n  tsk_vs_part = None\n\n  \n  \n  \n  tsk_vs_part_list = list(tsk_volume)\n  number_of_tsk_vs_parts = len(tsk_vs_part_list)\n\n  if number_of_tsk_vs_parts > 0:\n    if (part_index is not None and\n        (part_index < 0 or part_index >= number_of_tsk_vs_parts)):\n      return None, None\n\n    for tsk_vs_part in tsk_vs_part_list:\n      if TSKVsPartIsAllocated(tsk_vs_part):\n        if partition_index is not None:\n          if partition_index == current_partition_index:\n            break\n        current_partition_index += 1\n\n      if part_index is not None and part_index == current_part_index:\n        break\n\n      if start_offset is not None:\n        start_sector = TSKVsPartGetStartSector(tsk_vs_part)\n\n        if start_sector is not None:\n          start_sector *= bytes_per_sector\n          if start_sector == start_offset:\n            break\n\n      current_part_index += 1\n\n  \n  \n  if tsk_vs_part is None or current_part_index >= number_of_tsk_vs_parts:\n    return None, None\n\n  if not TSKVsPartIsAllocated(tsk_vs_part):\n    current_partition_index = None\n  return tsk_vs_part, current_partition_index", "docstring": "Retrieves the TSK volume system part object from the TSK volume object.\n\nArgs:\ntsk_volume (pytsk3.Volume_Info): TSK volume information.\npath_spec (PathSpec): path specification.\n\nReturns:\ntuple: contains:\n\npytsk3.TSK_VS_PART_INFO: TSK volume system part information or\nNone on error.\nint: partition index or None if not available.", "source": "juraj-google-style"}
{"code": "def getUserSid(username):\n    if six.PY2:\n        username = _to_unicode(username)\n    domain = win32api.GetComputerName()\n    if (username.find('\\\\') != (- 1)):\n        domain = username.split('\\\\')[0]\n        username = username.split('\\\\')[(- 1)]\n    domain = domain.upper()\n    return win32security.ConvertSidToStringSid(win32security.LookupAccountName(None, ((domain + '\\\\') + username))[0])", "docstring": "Get the Security ID for the user\n\nArgs:\nusername (str): The user name for which to look up the SID\n\nReturns:\nstr: The user SID\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' user.getUserSid jsnuffy", "source": "codesearchnet"}
{"code": "def get_scheduler(name: Union[str, SchedulerType], optimizer: Optimizer, num_warmup_steps: Optional[int]=None, num_training_steps: Optional[int]=None, scheduler_specific_kwargs: Optional[dict]=None):\n    name = SchedulerType(name)\n    schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name]\n    if optimizer is not None and isinstance(optimizer, LayerWiseDummyOptimizer):\n        optimizer_dict = optimizer.optimizer_dict\n        scheduler_dict = {}\n        for param in optimizer_dict.keys():\n            scheduler_dict[param] = get_scheduler(name, optimizer=optimizer_dict[param], num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, scheduler_specific_kwargs=scheduler_specific_kwargs)\n\n        def scheduler_hook(param):\n            scheduler_dict[param].step()\n        for param in optimizer_dict.keys():\n            if param.requires_grad:\n                param.register_post_accumulate_grad_hook(scheduler_hook)\n        return LayerWiseDummyScheduler(optimizer_dict=optimizer_dict, lr=optimizer.defaults['lr'])\n    if name == SchedulerType.CONSTANT:\n        return schedule_func(optimizer)\n    if scheduler_specific_kwargs is None:\n        scheduler_specific_kwargs = {}\n    if name == SchedulerType.REDUCE_ON_PLATEAU:\n        return schedule_func(optimizer, **scheduler_specific_kwargs)\n    if num_warmup_steps is None:\n        raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.')\n    if name == SchedulerType.CONSTANT_WITH_WARMUP:\n        return schedule_func(optimizer, num_warmup_steps=num_warmup_steps)\n    if name == SchedulerType.INVERSE_SQRT:\n        return schedule_func(optimizer, num_warmup_steps=num_warmup_steps)\n    if name == SchedulerType.WARMUP_STABLE_DECAY:\n        return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, **scheduler_specific_kwargs)\n    if num_training_steps is None:\n        raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.')\n    return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, **scheduler_specific_kwargs)", "docstring": "Unified API to get any scheduler from its name.\n\nArgs:\nname (`str` or `SchedulerType`):\nThe name of the scheduler to use.\noptimizer (`torch.optim.Optimizer`):\nThe optimizer that will be used during training.\nnum_warmup_steps (`int`, *optional*):\nThe number of warmup steps to do. This is not required by all schedulers (hence the argument being\noptional), the function will raise an error if it's unset and the scheduler type requires it.\nnum_training_steps (`int``, *optional*):\nThe number of training steps to do. This is not required by all schedulers (hence the argument being\noptional), the function will raise an error if it's unset and the scheduler type requires it.\nscheduler_specific_kwargs (`dict`, *optional*):\nExtra parameters for schedulers such as cosine with restarts. Mismatched scheduler types and scheduler\nparameters will cause the scheduler function to raise a TypeError.", "source": "github-repos"}
{"code": "def platform_config_dir():\n    if LINUX:\n        dpath_ = os.environ.get('XDG_CONFIG_HOME', '~/.config')\n    elif DARWIN:\n        dpath_ = '~/Library/Application Support'\n    elif WIN32:\n        dpath_ = os.environ.get('APPDATA', '~/AppData/Roaming')\n    else:\n        raise NotImplementedError(('Unknown Platform  %r' % (sys.platform,)))\n    dpath = normpath(expanduser(dpath_))\n    return dpath", "docstring": "Returns a directory which should be writable for any application\nThis should be used for persistent configuration files.\n\nReturns:\nPathLike : path to the cahce dir used by the current operating system", "source": "codesearchnet"}
{"code": "def map_uniprot_resnum_to_pdb(uniprot_resnum, chain_id, sifts_file):\n    parser = etree.XMLParser(ns_clean=True)\n    tree = etree.parse(sifts_file, parser)\n    root = tree.getroot()\n    my_pdb_resnum = None\n    my_pdb_annotation = False\n    ent = '.\n    for chain in root.findall(ent):\n        if (chain.attrib['entityId'] == chain_id):\n            ures = ('.\n            my_uniprot_residue = chain.findall(ures)\n            if (len(my_uniprot_residue) == 1):\n                parent = my_uniprot_residue[0].getparent()\n                pres = '.\n                my_pdb_residue = parent.findall(pres)\n                my_pdb_resnum = int(my_pdb_residue[0].attrib['dbResNum'])\n                anno = '.\n                my_pdb_annotation = parent.findall(anno)\n                if (len(my_pdb_annotation) == 1):\n                    my_pdb_annotation = my_pdb_annotation[0].text\n                    if (my_pdb_annotation == 'Not_Observed'):\n                        my_pdb_annotation = False\n                else:\n                    my_pdb_annotation = True\n            else:\n                return (None, False)\n    return (my_pdb_resnum, my_pdb_annotation)", "docstring": "Map a UniProt residue number to its corresponding PDB residue number.\n\nThis function requires that the SIFTS file be downloaded,\nand also a chain ID (as different chains may have different mappings).\n\nArgs:\nuniprot_resnum (int): integer of the residue number you'd like to map\nchain_id (str): string of the PDB chain to map to\nsifts_file (str): Path to the SIFTS XML file\n\nReturns:\n(tuple): tuple containing:\n\nmapped_resnum (int): Mapped residue number\nis_observed (bool): Indicates if the 3D structure actually shows the residue", "source": "codesearchnet"}
{"code": "def wait_for_tx(self, tx, max_seconds=120):\n    \n    tx_hash = None\n    if isinstance(tx, (str, UInt256)):\n        tx_hash = str(tx)\n    elif isinstance(tx, Transaction):\n        tx_hash = tx.Hash.ToString()\n    else:\n        raise AttributeError(\"Supplied tx is type '%s', but must be Transaction or UInt256 or str\" % type(tx))\n\n    wait_event = Event()\n    time_start = time.time()\n\n    while True:\n        \n        _tx, height = Blockchain.Default().GetTransaction(tx_hash)\n        if height > -1:\n            return True\n\n        \n        wait_event.wait(3)\n\n        seconds_passed = time.time() - time_start\n        if seconds_passed > max_seconds:\n            raise TxNotFoundInBlockchainError(\"Transaction with hash %s not found after %s seconds\" % (tx_hash, int(seconds_passed)))", "docstring": "Wait for tx to show up on blockchain\n\nArgs:\ntx (Transaction or UInt256 or str): Transaction or just the hash\nmax_seconds (float): maximum seconds to wait for tx to show up. default: 120\n\nReturns:\nTrue: if transaction was found\n\nRaises:\nAttributeError: if supplied tx is not Transaction or UInt256 or str\nTxNotFoundInBlockchainError: if tx is not found in blockchain after max_seconds", "source": "juraj-google-style"}
{"code": "def get_icloud_folder_location():\n    yosemite_icloud_path = '~/Library/Mobile Documents/com~apple~CloudDocs/'\n    icloud_home = os.path.expanduser(yosemite_icloud_path)\n    if (not os.path.isdir(icloud_home)):\n        error('Unable to find your iCloud Drive =(')\n    return str(icloud_home)", "docstring": "Try to locate the iCloud Drive folder.\n\nReturns:\n(str) Full path to the iCloud Drive folder.", "source": "codesearchnet"}
{"code": "def kms_encrypt(value, key, aws_config=None):\n    aws_config = (aws_config or {})\n    aws = boto3.session.Session(**aws_config)\n    client = aws.client('kms')\n    enc_res = client.encrypt(KeyId=key, Plaintext=value)\n    return n(b64encode(enc_res['CiphertextBlob']))", "docstring": "Encrypt and value with KMS key.\n\nArgs:\nvalue (str): value to encrypt\nkey (str): key id or alias\naws_config (optional[dict]): aws credentials\ndict of arguments passed into boto3 session\nexample:\naws_creds = {'aws_access_key_id': aws_access_key_id,\n'aws_secret_access_key': aws_secret_access_key,\n'region_name': 'us-east-1'}\n\nReturns:\nstr: encrypted cipher text", "source": "codesearchnet"}
{"code": "def transform_table(self, table, table_meta, missing=None):\n        \n\n        if missing is None:\n            missing = self.missing\n\n        else:\n            self.missing = missing\n            warnings.warn(DEPRECATION_MESSAGE.format('transform_table'), DeprecationWarning)\n\n        content = {}\n        columns = []\n        table_name = table_meta['name']\n\n        for field in table_meta['fields']:\n            column_name = field['name']\n\n            if missing and table[column_name].isnull().any():\n                null_transformer = transformers.NullTransformer(field)\n                clean_column = null_transformer.fit_transform(table[column_name])\n                null_name = '?' + column_name\n                columns.append(null_name)\n                content[null_name] = clean_column[null_name].values\n                column = clean_column[column_name]\n\n            else:\n                column = table[column_name].to_frame()\n\n            transformer = self.transformers[(table_name, column_name)]\n            content[column_name] = transformer.transform(column)[column_name].values\n            columns.append(column_name)\n\n        return pd.DataFrame(content, columns=columns)", "docstring": "Apply the stored transformers to `table`.\n\nArgs:\ntable(pandas.DataFrame):     Contents of the table to be transformed.\n\ntable_meta(dict):   Metadata for the given table.\n\nmissing(bool):      Wheter or not use NullTransformer to handle missing values.\n\nReturns:\npandas.DataFrame: Transformed table.", "source": "juraj-google-style"}
{"code": "def recipe_dcm_run(config, auth_read, account, report_id, report_name):\n    dcm(config, {'auth': auth_read, 'report_run_only': True, 'report': {'account': account, 'report_id': report_id, 'name': report_name}})", "docstring": "Trigger a CM report run\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\naccount (integer) - CM network id.\nreport_id (integer) - CM report id, empty if using name.\nreport_name (string) - CM report name, empty if using id instead.", "source": "github-repos"}
{"code": "def AssignTasksToClient(self, client_id):\n    \n    rules = data_store.REL_DB.ReadAllForemanRules()\n    if not rules:\n      return 0\n\n    last_foreman_run = self._GetLastForemanRunTime(client_id)\n\n    latest_rule_creation_time = max(rule.creation_time for rule in rules)\n\n    if latest_rule_creation_time <= last_foreman_run:\n      return 0\n\n    \n    self._SetLastForemanRunTime(client_id, latest_rule_creation_time)\n\n    relevant_rules = []\n    expired_rules = False\n\n    now = rdfvalue.RDFDatetime.Now()\n\n    for rule in rules:\n      if rule.expiration_time < now:\n        expired_rules = True\n        continue\n      if rule.creation_time <= last_foreman_run:\n        continue\n\n      relevant_rules.append(rule)\n\n    actions_count = 0\n    if relevant_rules:\n      client_data = data_store.REL_DB.ReadClientFullInfo(client_id)\n      if client_data is None:\n        return\n\n      for rule in relevant_rules:\n        if rule.Evaluate(client_data):\n          actions_count += self._RunAction(rule, client_id)\n\n    if expired_rules:\n      data_store.REL_DB.RemoveExpiredForemanRules()\n\n    return actions_count", "docstring": "Examines our rules and starts up flows based on the client.\n\nArgs:\nclient_id: Client id of the client for tasks to be assigned.\n\nReturns:\nNumber of assigned tasks.", "source": "juraj-google-style"}
{"code": "def _make_columnar(self, x):\n    \n    if tensorshape_util.rank(x.shape) is not None:\n      if tensorshape_util.rank(x.shape) == 1:\n        x = x[tf.newaxis, :]\n      return x\n    shape = tf.shape(input=x)\n    maybe_expanded_shape = tf.concat([\n        shape[:-1],\n        distribution_util.pick_vector(\n            tf.equal(tf.rank(x), 1), [1], np.array([], dtype=np.int32)),\n        shape[-1:],\n    ], 0)\n    return tf.reshape(x, maybe_expanded_shape)", "docstring": "Ensures non-scalar input has at least one column.\n\nExample:\nIf `x = [1, 2, 3]` then the output is `[[1], [2], [3]]`.\n\nIf `x = [[1, 2, 3], [4, 5, 6]]` then the output is unchanged.\n\nIf `x = 1` then the output is unchanged.\n\nArgs:\nx: `Tensor`.\n\nReturns:\ncolumnar_x: `Tensor` with at least two dimensions.", "source": "juraj-google-style"}
{"code": "def task_ordinal_at_coordinates(self, device_coordinates):\n    return self._topology_tasks[tuple(device_coordinates)]", "docstring": "Returns the TensorFlow task number attached to `device_coordinates`.\n\nArgs:\ndevice_coordinates: An integer sequence describing a device's physical\ncoordinates in the TPU fabric.\n\nReturns:\nReturns the TensorFlow task number that contains the TPU device with those\nphysical coordinates.", "source": "github-repos"}
{"code": "def unpack(self, buff, offset=0):\n        \n        super().unpack(buff, offset)\n        \n        try:\n            self.oxm_field = self._unpack_oxm_field()\n        except ValueError as exception:\n            raise UnpackException(exception)\n\n        \n        self.oxm_hasmask = (self.oxm_field_and_mask & 1) == 1  \n\n        \n        start = offset + 4  \n        end = start + self.oxm_length\n        self.oxm_value = buff[start:end]", "docstring": "Unpack the buffer into a OxmTLV.\n\nArgs:\nbuff (bytes): The binary data to be unpacked.\noffset (int): If we need to shift the beginning of the data.", "source": "juraj-google-style"}
{"code": "def mpim_open(self, *, users: List[str], **kwargs) -> SlackResponse:\n        \n        kwargs.update({\"users\": users})\n        return self.api_call(\"mpim.open\", json=kwargs)", "docstring": "This method opens a multiparty direct message.\n\nArgs:\nusers (list): A lists of user ids. The ordering of the users\nis preserved whenever a MPIM group is returned.\ne.g. ['W1234567890', 'U2345678901', 'U3456789012']", "source": "juraj-google-style"}
{"code": "def get_artist_location(self, cache=True):\n        \n        if not (cache and ('artist_location' in self.cache)):\n            response = self.get_attribute('profile', bucket='artist_location')\n            self.cache['artist_location'] = response['songs'][0]['artist_location']\n        return self.cache['artist_location']", "docstring": "Get the location of a song's artist.\n\nArgs:\ncache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.\n\nReturns:\nAn artist location object.\n\nExample:\n>>> s = song.Song('SOQKVPH12A58A7AF4D')\n>>> s.artist_location\n{u'latitude': 34.053489999999996, u'location': u'Los Angeles, CA', u'longitude': -118.24532000000001}\n>>>", "source": "juraj-google-style"}
{"code": "def apply(self, func, axis, *args, **kwargs):\n    if callable(func):\n        return self._callable_func(func, axis, *args, **kwargs)\n    elif isinstance(func, dict):\n        return self._dict_func(func, axis, *args, **kwargs)\n    elif is_list_like(func):\n        return self._list_like_func(func, axis, *args, **kwargs)\n    else:\n        pass", "docstring": "Apply func across given axis.\n\nArgs:\nfunc: The function to apply.\naxis: Target axis to apply the function along.\n\nReturns:\nA new PandasQueryCompiler.", "source": "codesearchnet"}
{"code": "def advance_for_next_slice(self, recovery_slice=False):\n    \n    if recovery_slice:\n      self.slice_id += 2\n      \n      self.input_reader = self.input_reader.from_json(self._input_reader_json)\n    else:\n      self.slice_id += 1", "docstring": "Advance relavent states for next slice.\n\nArgs:\nrecovery_slice: True if this slice is running recovery logic.\nSee handlers.MapperWorkerCallbackHandler._attempt_slice_recovery\nfor more info.", "source": "juraj-google-style"}
{"code": "def find_record(self, model_class, record_id, reload=False):\n    cached_model = self.peek_record(model_class, record_id)\n    if ((cached_model is not None) and (reload is False)):\n        return cached_model\n    else:\n        return self._get_record(model_class, record_id)", "docstring": "Return a instance of model_class from the API or the local cache.\n\nArgs:\nmodel_class (:class:`cinder_data.model.CinderModel`): A subclass of\n:class:`cinder_data.model.CinderModel` of your chosen model.\nrecord_id (int): The id of the record requested.\nreload (bool, optional): Don't return the cached version if reload==True.\n\nReturns:\n:class:`cinder_data.model.CinderModel`: An instance of model_class or None.", "source": "codesearchnet"}
{"code": "def split_vector_ctype(ctype):\n    if (not is_vector_ctype(ctype)):\n        raise ValueError('The given ctype is not a vector type.')\n    for vector_length in [2, 3, 4, 8, 16]:\n        if ctype.endswith(str(vector_length)):\n            vector_str_len = len(str(vector_length))\n            return (ctype[:(- vector_str_len)], int(ctype[(- vector_str_len):]))", "docstring": "Split a vector ctype into a raw ctype and the vector length.\n\nIf the given ctype is not a vector type, we raise an error. I\n\nArgs:\nctype (str): the ctype to possibly split into a raw ctype and the vector length\n\nReturns:\ntuple: the raw ctype and the vector length", "source": "codesearchnet"}
{"code": "def reward(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor], next_state: Sequence[tf.Tensor]) -> tf.Tensor:\n    scope = self.reward_scope(state, action, next_state)\n    r = self.compile_reward(scope).tensor\n    with self.graph.as_default():\n        with tf.name_scope('reward'):\n            return tf.expand_dims(r, (- 1))", "docstring": "Compiles the reward function given the current `state`, `action` and\n`next_state`.\n\nArgs:\nstate (Sequence[tf.Tensor]): A tuple of current state tensors.\naction (Sequence[tf.Tensor]): A tuple of action tensors.\nnext_state (Sequence[tf.Tensor]): A tuple of next state tensors.\n\nReturns:\n(:obj:`tf.Tensor`): A tensor representing the reward function.", "source": "codesearchnet"}
{"code": "def request_with_retry(func, *args, **kwargs):\n    \n    max_retries = kwargs.pop('max_retries', 30)\n    sleep = 2\n    retry_count = 0\n    while True:\n        try:\n            response = func(*args, **kwargs)\n            response.raise_for_status()\n            return response\n        except (requests.exceptions.ConnectionError,\n                requests.exceptions.HTTPError,  \n                requests.exceptions.Timeout) as e:\n            if retry_count == max_retries:\n                return e\n            retry_count += 1\n            delay = sleep + random.random() * 0.25 * sleep\n            if isinstance(e, requests.exceptions.HTTPError) and e.response.status_code == 429:\n                logger.info(\n                    \"Rate limit exceeded, retrying in %s seconds\" % delay)\n            else:\n                logger.warning('requests_with_retry encountered retryable exception: %s. args: %s, kwargs: %s',\n                               e, args, kwargs)\n            time.sleep(delay)\n            sleep *= 2\n            if sleep > MAX_SLEEP_SECONDS:\n                sleep = MAX_SLEEP_SECONDS\n        except requests.exceptions.RequestException as e:\n            logger.error(response.json()['error'])  \n            logger.exception(\n                'requests_with_retry encountered unretryable exception: %s', e)\n            return e", "docstring": "Perform a requests http call, retrying with exponential backoff.\n\nArgs:\nfunc: An http-requesting function to call, like requests.post\nmax_retries: Maximum retries before giving up. By default we retry 30 times in ~2 hours before dropping the chunk\n*args: passed through to func\n**kwargs: passed through to func", "source": "juraj-google-style"}
{"code": "def _get_param_matcher(self, callable_type):\n    callable_param_count = collections.Counter(self.ctx.annotation_utils.get_type_parameters(callable_type))\n    if isinstance(callable_type, abstract.CallableClass):\n        callable_param_count.subtract(self.ctx.annotation_utils.get_type_parameters(callable_type.get_formal_type_parameter(abstract_utils.ARGS)))\n\n    def match(left, right, subst):\n        if not isinstance(left, abstract.TypeParameter) or not isinstance(right, abstract.TypeParameter) or right.constraints or right.bound or (callable_param_count[right] != 1):\n            return None\n        self._type_params.seen.add(right)\n        subst = subst.copy()\n        subst[right.full_name] = self.ctx.program.NewVariable([self.ctx.convert.empty], [], self._node)\n        return subst\n    return match", "docstring": "Helper for matching the parameters of a callable.\n\nArgs:\ncallable_type: The callable being matched against.\n\nReturns:\nA special param matcher: (left, right, subst) -> Optional[subst].\nleft: An argument to be matched against a parameter of callable_type.\nright: A parameter of callable_type.\nsubst: The current substitution dictionary.\nIf the matcher returns a non-None subst dict, then the match has succeeded\nvia special matching rules for single TypeVars. Otherwise, the caller\nshould next attempt normal matching on the inputs. (See\n_match_signature_against_callable for a usage example.)", "source": "github-repos"}
{"code": "def set_messages(self, messages: Sequence[CachedMessage]) -> None:\n        \n        uids = {msg.uid for msg in messages}\n        expunged = self._messages._uids - uids\n        return self.add_updates(messages, expunged)", "docstring": "This is the non-optimized alternative to :meth:`.add_updates` for\nbackend implementations that cannot detect their own updates and must\ninstead compare the entire state of the mailbox.\n\nThe ``messages`` list should contain the entire set of messages in the\nmailbox, ordered by UID. Any UID that previously existed and is not\nincluded in ``messages`` will be expunged.\n\nArgs:\nmessages: The entire set of cached message objects.", "source": "juraj-google-style"}
{"code": "def _getfullargspec(target):\n    return _convert_maybe_argspec_to_fullargspec(getargspec(target))", "docstring": "A python2 version of getfullargspec.\n\nArgs:\ntarget: the target object to inspect.\n\nReturns:\nA FullArgSpec with empty kwonlyargs, kwonlydefaults and annotations.", "source": "github-repos"}
{"code": "def update_headers(self, headers):\n    check_type(headers, dict, may_be_none=False)\n    self._req_session.headers.update(headers)", "docstring": "Update the HTTP headers used for requests in this session.\n\nNote: Updates provided by the dictionary passed as the `headers`\nparameter to this method are merged into the session headers by adding\nnew key-value pairs and/or updating the values of existing keys. The\nsession headers are not replaced by the provided dictionary.\n\nArgs:\nheaders(dict): Updates to the current session headers.", "source": "codesearchnet"}
{"code": "async def puts(self, items, seqn=None):\n        \n        size = 0\n\n        for chunk in s_common.chunks(items, 1000):\n            metrics = self._items.save(chunk)\n            self._metrics.add(metrics)\n            await self.fire('cryotank:puts', numrecords=len(chunk))\n            size += len(chunk)\n            await asyncio.sleep(0)\n\n        if seqn is not None:\n            iden, offs = seqn\n            self.setOffset(iden, offs + size)\n\n        return size", "docstring": "Add the structured data from items to the CryoTank.\n\nArgs:\nitems (list):  A list of objects to store in the CryoTank.\nseqn (iden, offs): An iden / offset pair to record.\n\nReturns:\nint: The ending offset of the items or seqn.", "source": "juraj-google-style"}
{"code": "def fresh(t, non_generic):\n    mappings = {}\n\n    def freshrec(tp):\n        p = prune(tp)\n        if isinstance(p, TypeVariable):\n            if is_generic(p, non_generic):\n                if (p not in mappings):\n                    mappings[p] = TypeVariable()\n                return mappings[p]\n            else:\n                return p\n        elif isinstance(p, dict):\n            return p\n        elif isinstance(p, Collection):\n            return Collection(*[freshrec(x) for x in p.types])\n        elif isinstance(p, Scalar):\n            return Scalar([freshrec(x) for x in p.types])\n        elif isinstance(p, TypeOperator):\n            return TypeOperator(p.name, [freshrec(x) for x in p.types])\n        elif isinstance(p, MultiType):\n            return MultiType([freshrec(x) for x in p.types])\n        else:\n            assert False, 'missing freshrec case {}'.format(type(p))\n    return freshrec(t)", "docstring": "Makes a copy of a type expression.\n\nThe type t is copied. The generic variables are duplicated and the\nnon_generic variables are shared.\n\nArgs:\nt: A type to be copied.\nnon_generic: A set of non-generic TypeVariables", "source": "codesearchnet"}
{"code": "def conversations_replies(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:\n    kwargs.update({'channel': channel, 'ts': ts})\n    return self.api_call('conversations.replies', http_verb='GET', params=kwargs)", "docstring": "Retrieve a thread of messages posted to a conversation\n\nArgs:\nchannel (str): Conversation ID to fetch thread from. e.g. 'C1234567890'\nts (str): Unique identifier of a thread's parent message. e.g. '1234567890.123456'", "source": "codesearchnet"}
{"code": "def _bfd_rx(self, **kwargs):\n        \n        int_type = kwargs['int_type']\n        method_name = 'interface_%s_bfd_interval_min_rx' % int_type\n        bfd_rx = getattr(self._interface, method_name)\n        config = bfd_rx(**kwargs)\n        if kwargs['delete']:\n            tag = 'min-rx'\n            config.find('.\n            pass\n        return config", "docstring": "Return the BFD minimum receive interval XML.\n\nYou should not use this method.\nYou probably want `BGP.bfd`.\n\nArgs:\nmin_rx (str): BFD receive interval in milliseconds (300, 500, etc)\ndelete (bool): Remove the configuration if ``True``.\n\nReturns:\nXML to be passed to the switch.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def __init__(self,\n               block_shape,\n               block_rows,\n               name='block_diagonal_matrix'):\n    \n    super(BlockDiagonalMatrix, self).__init__(\n        block_shape=block_shape,\n        block_rows=block_rows,\n        include_diagonal=True,\n        include_off_diagonal=False,\n        name=name)", "docstring": "Constructs a new `BlockDiagonalMatrix` module.\n\nArgs:\nblock_shape: tuple, 2-dimensional tuple indicating the shape of each\nindividual block.\nblock_rows: int, the number of blocks in each row (and column) of the\noutput matrix.\nname: string, name of the module.", "source": "juraj-google-style"}
{"code": "def luhn(base, num_only=False, allow_lower_case=False):\n    \n    if num_only:\n        alphabet = _ALPHABET[:10]\n    else:\n        alphabet = _ALPHABET\n    if allow_lower_case:\n        base = base.upper()\n    try:\n        pre_calc = (_PRE_CALC[alphabet.index(c)] for c in reversed(base))\n        cum = 0\n        parity = 1\n        for elem in pre_calc:\n            val, parity = elem[parity]\n            cum += val\n    except ValueError:\n        pass    \n    else:\n        return 10 - cum % 10\n    \n    if num_only:\n        msg = 'The string given must only contain digits.'\n    elif allow_lower_case:\n        msg = 'The string given must only contain digits and ascii letters.'\n    else:\n        msg = 'The string given must only contain digits and upper case ' \\\n              'ascii letters.'\n    raise ValueError(msg)", "docstring": "Return the Luhn check digit for the given string.\n\nArgs:\nbase(str): string for which to calculate the check digit\nnum_only(bool): allow only digits in `base` (default: False)\nallow_lower_case(bool): allow lower case letters in `base`\n(default: False)\n\nReturns:\nint: Luhn check digit\n\nRaises:\nValueError: given `base` contains an unallowed character", "source": "juraj-google-style"}
{"code": "def create_prefetch(self, addresses):\n    with self._lock:\n        for add in addresses:\n            self._state[add] = _ContextFuture(address=add, wait_for_tree=True)", "docstring": "Create futures needed before starting the process of reading the\naddress's value from the merkle tree.\n\nArgs:\naddresses (list of str): addresses in the txn's inputs that\naren't in any base context (or any in the chain).", "source": "codesearchnet"}
{"code": "def insert(self, start_time: int, schedule: ScheduleComponent) -> 'ScheduleComponent':\n        \n        return ops.insert(self, start_time, schedule)", "docstring": "Return a new schedule with `schedule` inserted within `self` at `start_time`.\n\nArgs:\nstart_time: time to be inserted\nschedule: schedule to be inserted", "source": "juraj-google-style"}
{"code": "def series_with_permutation(self, other):\n        \n        combined_permutation = tuple([self.permutation[p]\n                                      for p in other.permutation])\n        return CPermutation.create(combined_permutation)", "docstring": "Compute the series product with another channel permutation circuit\n\nArgs:\nother (CPermutation):\n\nReturns:\nCircuit: The composite permutation circuit (could also be the\nidentity circuit for n channels)", "source": "juraj-google-style"}
{"code": "def decstr2int(dec_str, decimals):\n    \n    if not isinstance(decimals, int):\n        raise TypeError('decimals must be an integer')\n    try:\n        dollars, cents = dec_str.split('.')\n    except ValueError:\n        if '.' not in dec_str:\n            dollars = dec_str\n            cents = '0'\n        else:\n            raise ValueError('Invalid decimal string')\n    else:\n        if len(cents) < decimals:\n            cents = cents.ljust(decimals, '0')\n        elif decimals < 1:\n            cents = '0'\n        elif len(cents) > decimals:\n            cents = cents[:decimals]\n    try:\n        cents = int(cents)\n    except:\n        cents = 0\n    try:\n        return int(int(dollars) * (10 ** decimals)) + cents\n    except:\n        raise ValueError('Invalid decimal string')", "docstring": "Returns an integer that has the value of the decimal string:\ndec_str*10^decimals\n\nArguments:\ndec_str (string) that represents a decimal number\ndecimals (int): number of decimals for creating the integer output\nReturns:\n(int)\nRaises:\nValueError if dec_string is not a valid decimal string\nTypeError if decimals is not an integer\nNote: values may be truncated (not rounded).", "source": "juraj-google-style"}
{"code": "def download_extract_tar(tar_url, folder, tar_filename=''):\n    try:\n        makedirs(folder)\n    except OSError:\n        if (not isdir(folder)):\n            raise\n    data_file = tar_filename\n    if (not data_file):\n        (fd, data_file) = mkstemp('.tar.gz')\n        download(tar_url, os.fdopen(fd, 'wb'))\n    else:\n        download(tar_url, data_file)\n    with tarfile.open(data_file) as tar:\n        tar.extractall(path=folder)", "docstring": "Download and extract the tar at the url to the given folder\n\nArgs:\ntar_url (str): URL of tar file to download\nfolder (str): Location of parent directory to extract to. Doesn't have to exist\ntar_filename (str): Location to download tar. Default is to a temp file", "source": "codesearchnet"}
{"code": "def id_range(self):\n    if (len(self._anchor_points) == 0):\n        return (0, 0)\n    return (self._anchor_points[0].reading_id, self._anchor_points[(- 1)].reading_id)", "docstring": "Get the range of archor reading_ids.\n\nReturns:\n(int, int): The lowest and highest reading ids.\n\nIf no reading ids have been loaded, (0, 0) is returned.", "source": "codesearchnet"}
{"code": "def request_json(link, outfile, force_rerun_flag, outdir=None):\n    if (not outdir):\n        outdir = ''\n    outfile = op.join(outdir, outfile)\n    if force_rerun(flag=force_rerun_flag, outfile=outfile):\n        text_raw = requests.get(link)\n        my_dict = text_raw.json()\n        with open(outfile, 'w') as f:\n            json.dump(my_dict, f)\n        log.debug('Loaded and saved {} to {}'.format(link, outfile))\n    else:\n        with open(outfile, 'r') as f:\n            my_dict = json.load(f)\n        log.debug('Loaded {}'.format(outfile))\n    return my_dict", "docstring": "Download a file in JSON format from a web request\n\nArgs:\nlink: Link to web request\noutfile: Name of output file\noutdir: Directory of output file\nforce_rerun_flag: If true, redownload the file\n\nReturns:\ndict: contents of the JSON request", "source": "codesearchnet"}
{"code": "def get_script_module(script_information, package='pylabcontrol', verbose=False):\n    (module, _, _, _, _, _, _) = Script.get_script_information(script_information=script_information, package=package, verbose=verbose)\n    return module", "docstring": "wrapper to get the module for a script\n\nArgs:\nscript_information: information of the script. This can be\n- a dictionary\n- a Script instance\n- name of Script class\npackage (optional): name of the package to which the script belongs, i.e. pylabcontrol or b26toolkit only used when script_information is a string\nReturns:\nmodule", "source": "codesearchnet"}
{"code": "def async_call(self, *args, **kwargs):\n\n    def after_autoconnect_callback(future):\n        if self.is_connected():\n            self._call(*args, **kwargs)\n        else:\n            pass\n    if ('callback' not in kwargs):\n        kwargs['callback'] = discard_reply_cb\n    if (not self.is_connected()):\n        if self.autoconnect:\n            connect_future = self.connect()\n            cb = after_autoconnect_callback\n            self.__connection._ioloop.add_future(connect_future, cb)\n        else:\n            error = ConnectionError('you are not connected and autoconnect=False')\n            kwargs['callback'](error)\n    else:\n        self._call(*args, **kwargs)", "docstring": "Calls a redis command, waits for the reply and call a callback.\n\nFollowing options are available (not part of the redis command itself):\n\n- callback\nFunction called (with the result as argument) when the result\nis available. If not set, the reply is silently discarded. In\ncase of errors, the callback is called with a\nTornadisException object as argument.\n\nArgs:\n*args: full redis command as variable length argument list or\na Pipeline object (as a single argument).\n**kwargs: options as keyword parameters.\n\nExamples:\n\n>>> def cb(result):\npass\n>>> client.async_call(\"HSET\", \"key\", \"field\", \"val\", callback=cb)", "source": "codesearchnet"}
{"code": "def event_date(self, event_date):\n        \n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        event_date = self._utils.format_datetime(event_date, date_format='%Y-%m-%dT%H:%M:%SZ')\n        self._data['eventDate'] = event_date\n        request = {'eventDate': event_date}\n        return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)", "docstring": "Updates the event_date.\n\nArgs:\nevent_date: Converted to %Y-%m-%dT%H:%M:%SZ date format.\n\nReturns:", "source": "juraj-google-style"}
{"code": "def parsed_top_level_errors(parsed, errors, component_type: str='') -> Errors:\n    fn_cnt = 0\n    rel_cnt = 0\n    nested_cnt = 0\n    for key in parsed:\n        if (parsed[key]['type'] == 'Function'):\n            fn_cnt += 1\n        if (parsed[key]['type'] == 'Relation'):\n            rel_cnt += 1\n        if (parsed[key]['type'] == 'Nested'):\n            nested_cnt += 1\n    if (not component_type):\n        if (nested_cnt > 1):\n            errors.append(('Error', 'Too many nested objects - can only have one per BEL Assertion'))\n        if nested_cnt:\n            if (rel_cnt > 2):\n                errors.append(('Error', 'Too many relations - can only have two in a nested BEL Assertion'))\n            elif (fn_cnt > 4):\n                errors.append(('Error', 'Too many BEL subject and object candidates'))\n        elif (rel_cnt > 1):\n            errors.append(('Error', 'Too many relations - can only have one in a BEL Assertion'))\n        elif (fn_cnt > 2):\n            errors.append(('Error', 'Too many BEL subject and object candidates'))\n    elif (component_type == 'subject'):\n        if (rel_cnt > 0):\n            errors.append(('Error', 'Too many relations - cannot have any in a BEL Subject'))\n        elif (fn_cnt > 1):\n            errors.append(('Error', 'Too many BEL subject candidates - can only have one'))\n    elif (component_type == 'object'):\n        if nested_cnt:\n            if (rel_cnt > 1):\n                errors.append(('Error', 'Too many relations - can only have one in a nested BEL object'))\n            elif (fn_cnt > 2):\n                errors.append(('Error', 'Too many BEL subject and object candidates in a nested BEL object'))\n        elif (rel_cnt > 0):\n            errors.append(('Error', 'Too many relations - cannot have any in a BEL Subject'))\n        elif (fn_cnt > 1):\n            errors.append(('Error', 'Too many BEL subject candidates - can only have one'))\n    return errors", "docstring": "Check full parse for errors\n\nArgs:\nparsed:\nerrors:\ncomponent_type: Empty string or 'subject' or 'object' to indicate that we\nare parsing the subject or object field input", "source": "codesearchnet"}
{"code": "def extraction_data_statistics(path):\n    with functions.DBContextManager(path) as session:\n        extraction = session.query(models.Extraction).first()\n        (X, y) = extraction.return_main_dataset()\n        functions.verify_dataset(X, y)\n        if (extraction.test_dataset['method'] == 'split_from_main'):\n            (X, X_test, y, y_test) = train_test_split(X, y, test_size=extraction.test_dataset['split_ratio'], random_state=extraction.test_dataset['split_seed'], stratify=y)\n        elif (extraction.test_dataset['method'] == 'source'):\n            if (('source' not in extraction.test_dataset) or (not extraction.test_dataset['source'])):\n                raise exceptions.UserError('Source is empty')\n            extraction_code = extraction.test_dataset['source']\n            extraction_function = functions.import_object_from_string_code(extraction_code, 'extract_test_dataset')\n            (X_test, y_test) = extraction_function()\n        else:\n            (X_test, y_test) = (None, None)\n        extraction_code = extraction.meta_feature_generation['source']\n        return_splits_iterable = functions.import_object_from_string_code(extraction_code, 'return_splits_iterable')\n        number_of_splits = 0\n        test_indices = []\n        try:\n            for (train_idx, test_idx) in return_splits_iterable(X, y):\n                number_of_splits += 1\n                test_indices.append(test_idx)\n        except Exception as e:\n            raise exceptions.UserError('User code exception', exception_message=str(e))\n        test_indices = np.concatenate(test_indices)\n        (X, y) = (X[test_indices], y[test_indices])\n        extraction_code = extraction.stacked_ensemble_cv['source']\n        return_splits_iterable = functions.import_object_from_string_code(extraction_code, 'return_splits_iterable')\n        number_of_splits_stacked_cv = 0\n        try:\n            for (train_idx, test_idx) in return_splits_iterable(X, y):\n                number_of_splits_stacked_cv += 1\n        except Exception as e:\n            raise exceptions.UserError('User code exception', exception_message=str(e))\n        data_stats = dict()\n        data_stats['train_data_stats'] = functions.verify_dataset(X, y)\n        if (X_test is not None):\n            data_stats['test_data_stats'] = functions.verify_dataset(X_test, y_test)\n        else:\n            data_stats['test_data_stats'] = None\n        data_stats['holdout_data_stats'] = {'number_of_splits': number_of_splits}\n        data_stats['stacked_ensemble_cv_stats'] = {'number_of_splits': number_of_splits_stacked_cv}\n        extraction.data_statistics = data_stats\n        session.add(extraction)\n        session.commit()", "docstring": "Generates data statistics for the given data extraction setup stored\nin Xcessiv notebook.\n\nThis is in rqtasks.py but not as a job yet. Temporarily call this directly\nwhile I'm figuring out Javascript lel.\n\nArgs:\npath (str, unicode): Path to xcessiv notebook", "source": "codesearchnet"}
{"code": "def call(method: Method, *args: Any, **kwargs: Any) -> Any:\n    return validate_args(method, *args, **kwargs)(*args, **kwargs)", "docstring": "Validates arguments and then calls the method.\n\nArgs:\nmethod: The method to call.\n*args, **kwargs: Arguments to the method.\n\nReturns:\nThe \"result\" part of the JSON-RPC response (the return value from the method).\n\nRaises:\nTypeError: If arguments don't match function signature.", "source": "codesearchnet"}
{"code": "def get_access_token(self, http=None, additional_claims=None):\n    if (additional_claims is None):\n        if ((self.access_token is None) or self.access_token_expired):\n            self.refresh(None)\n        return client.AccessTokenInfo(access_token=self.access_token, expires_in=self._expires_in())\n    else:\n        (token, unused_expiry) = self._create_token(additional_claims)\n        return client.AccessTokenInfo(access_token=token, expires_in=self._MAX_TOKEN_LIFETIME_SECS)", "docstring": "Create a signed jwt.\n\nArgs:\nhttp: unused\nadditional_claims: dict, additional claims to add to\nthe payload of the JWT.\nReturns:\nAn AccessTokenInfo with the signed jwt", "source": "codesearchnet"}
{"code": "def AddStopTime(self, stop, problems=None, schedule=None, **kwargs):\n    if (problems is None):\n        problems = problems_module.default_problem_reporter\n    stoptime = self.GetGtfsFactory().StopTime(problems=problems, stop=stop, **kwargs)\n    self.AddStopTimeObject(stoptime, schedule)", "docstring": "Add a stop to this trip. Stops must be added in the order visited.\n\nArgs:\nstop: A Stop object\nkwargs: remaining keyword args passed to StopTime.__init__\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "async def get_final_ranking(self) -> OrderedDict:\n    if (self._state != TournamentState.complete.value):\n        return None\n    ranking = {}\n    for p in self.participants:\n        if (p.final_rank in ranking):\n            ranking[p.final_rank].append(p)\n        else:\n            ranking[p.final_rank] = [p]\n    return OrderedDict(sorted(ranking.items(), key=(lambda t: t[0])))", "docstring": "Get the ordered players ranking\n\nReturns:\ncollections.OrderedDict[rank, List[Participant]]:\n\nRaises:\nAPIException", "source": "codesearchnet"}
{"code": "def _snapshot_device_function_stack_metadata(self) -> list[traceable_stack.TraceableObject]:\n    snapshot = []\n    for obj in self._device_function_stack.peek_traceable_objs():\n        obj_copy = obj.copy_metadata()\n        obj_copy.obj = obj.obj.display_name\n        snapshot.append(obj_copy)\n    return snapshot", "docstring": "Return device function stack as a list of TraceableObjects.\n\nReturns:\n[traceable_stack.TraceableObject, ...] where each TraceableObject's .obj\nmember is a displayable name for the user's argument to Graph.device, and\nthe filename and lineno members point to the code location where\nGraph.device was called directly or indirectly by the user.", "source": "github-repos"}
{"code": "def _is_autocomplete_valid(cur_commands, alias_command):\n    \n    parent_command = ' '.join(cur_commands[1:])\n    with open(GLOBAL_ALIAS_TAB_COMP_TABLE_PATH, 'r') as tab_completion_table_file:\n        try:\n            tab_completion_table = json.loads(tab_completion_table_file.read())\n            return alias_command in tab_completion_table and parent_command in tab_completion_table[alias_command]\n        except Exception:  \n            return False", "docstring": "Determine whether autocomplete can be performed at the current state.\n\nArgs:\nparser: The current CLI parser.\ncur_commands: The current commands typed in the console.\nalias_command: The alias command.\n\nReturns:\nTrue if autocomplete can be performed.", "source": "juraj-google-style"}
{"code": "def period_from_list(period: Tuple[int, List[int]]) -> dateslib.PeriodTensor:\n    amount = period[1]\n    period_type = period_pb2.PeriodType.Name(period[0])\n    return dateslib.PeriodTensor(amount, dateslib.PeriodType[period_type])", "docstring": "Utility to convert a list of periods to a PeriodTensor.\n\nArgs:\nperiod: A tuple of an integer (which corresponds to the proto type of the\nperiod (see `period_pb2.Period`)) and a list of period values.\n\nReturns:\nAn instance of the `PeriodTensor`.", "source": "github-repos"}
{"code": "def window_unpartition(self, windows: torch.Tensor, window_size: int, padding_shape: Tuple[int, int], original_shape: Tuple[int, int]) -> torch.Tensor:\n    pad_height, pad_width = padding_shape\n    height, width = original_shape\n    batch_size = windows.shape[0] \n    hidden_states = windows.reshape(batch_size, pad_height \n    hidden_states = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(batch_size, pad_height, pad_width, -1)\n    hidden_states = hidden_states[:, :height, :width, :].contiguous()\n    return hidden_states", "docstring": "Args:\nWindow unpartition into original sequences and removing padding.\nhidden_states (tensor):\ninput tokens with [batch_size * num_windows, window_size, window_size, channel].\nwindow_size (int):\nwindow size.\npadding_shape (Tuple):\npadded height and width (pad_height, pad_width).\noriginal_shape (Tuple): original height and width (height, width) before padding.\n\nReturns:\nhidden_states: unpartitioned sequences with [batch_size, height, width, channel].", "source": "github-repos"}
{"code": "def rsolve(A, b, epsilon=_epsilon):\n    A = asarray(A, float)\n    b = asarray(b, float)\n    if (A.shape[0] == 0):\n        return zeros((A.shape[1],))\n    if (A.shape[1] == 0):\n        return zeros((0,))\n    try:\n        x = lstsq(A, b, rcond=epsilon)\n        r = sum((x[3] > epsilon))\n        if (r == 0):\n            return zeros(A.shape[1])\n        return x[0]\n    except (ValueError, LinAlgError) as e:\n        warnings.warn(str(e), RuntimeWarning)\n    return solve(A, b)", "docstring": "r\"\"\"Robust solve for the linear equations.\n\nArgs:\nA (array_like): Coefficient matrix.\nb (array_like): Ordinate values.\n\nReturns:\n:class:`numpy.ndarray`: Solution ``x``.", "source": "codesearchnet"}
{"code": "def broadcast_tensors(*args: Sequence[tf.Tensor], name: Optional[str]=None) -> Tuple[tf.Tensor]:\n    name = 'broadcast_tensors' if name is None else name\n    with tf.name_scope(name):\n        output_shape = common_shape(*args)\n        return tuple((tf.broadcast_to(arg, output_shape) for arg in args))", "docstring": "Broadcasts arguments to the common shape.\n\n#### Example\n```python\nimport tensorflow as tf\nimport tf_quant_finance as tff\n\nargs = [tf.ones([1, 2], dtype=tf.float64), tf.constant([[True], [False]])]\ntff.utils.broadcast_tensor_shapes(*args)\n# Expected: (array([[1., 1.], [1., 1.]]),\n#            array([[True, True], [False, False]])\n```\n\nArgs:\n*args: A sequence of `Tensor`s of compatible shapes and any `dtype`s.\nname: Python string. The name to give to the ops created by this function.\nDefault value: `None` which maps to the default name\n`broadcast_tensor_shapes`.\n\nReturns:\nA tuple of broadcasted `Tensor`s. Each `Tensor` has the same `dtype` as the\ncorresponding input `Tensor`.\n\nRaises:\nValueError: If inputs are of incompatible shapes.", "source": "github-repos"}
{"code": "def _infer_binary_broadcast_shape(shape1, shape2, given_output_shape=None):\n    shape1 = convert_to_shape(shape1)\n    shape2 = convert_to_shape(shape2)\n    given_output_shape = convert_to_shape(given_output_shape)\n    if (given_output_shape is not None):\n        return given_output_shape\n    if is_subsequence(shape1.dims, shape2.dims):\n        return shape2\n    if is_subsequence(shape2.dims, shape1.dims):\n        return shape1\n    return Shape((shape1.dims + [d for d in shape2.dims if (d not in shape1.dims)]))", "docstring": "Infer shape of the output of a binary op with broadcasting.\n\nIf the output shape is not given with given_output_shape, then we check\nto see if one of the shapes is a subsequence of the other one, and we\nreturn the one that is the supersequence.  Otherwise, we list the dimensions\nof shape1, followed by all new dimensions in shape2.\n\nArgs:\nshape1: a Shape\nshape2: a Shape\ngiven_output_shape: an optional Shape\nReturns:\na Shape", "source": "codesearchnet"}
{"code": "def collapse(self, dimensions=None, function=None, spreadfn=None, **kwargs):\n    from .data import concat\n    if (not dimensions):\n        dimensions = self.kdims\n    if (not isinstance(dimensions, list)):\n        dimensions = [dimensions]\n    if ((self.ndims > 1) and (len(dimensions) != self.ndims)):\n        groups = self.groupby([dim for dim in self.kdims if (dim not in dimensions)])\n    elif all(((d in self.kdims) for d in dimensions)):\n        groups = HoloMap([(0, self)])\n    else:\n        raise KeyError('Supplied dimensions not found.')\n    collapsed = groups.clone(shared_data=False)\n    for (key, group) in groups.items():\n        if hasattr(group.last, 'interface'):\n            group_data = concat(group)\n            if function:\n                agg = group_data.aggregate(group.last.kdims, function, spreadfn, **kwargs)\n                group_data = group.type(agg)\n        else:\n            group_data = [el.data for el in group]\n            args = (group_data, function, group.last.kdims)\n            data = group.type.collapse_data(*args, **kwargs)\n            group_data = group.last.clone(data)\n        collapsed[key] = group_data\n    return (collapsed if (self.ndims - len(dimensions)) else collapsed.last)", "docstring": "Concatenates and aggregates along supplied dimensions\n\nUseful to collapse stacks of objects into a single object,\ne.g. to average a stack of Images or Curves.\n\nArgs:\ndimensions: Dimension(s) to collapse\nDefaults to all key dimensions\nfunction: Aggregation function to apply, e.g. numpy.mean\nspreadfn: Secondary reduction to compute value spread\nUseful for computing a confidence interval, spread, or\nstandard deviation.\n**kwargs: Keyword arguments passed to the aggregation function\n\nReturns:\nReturns the collapsed element or HoloMap of collapsed\nelements", "source": "codesearchnet"}
{"code": "def url_to_text(self, url):\n    (path, headers) = urllib.request.urlretrieve(url)\n    return self.path_to_text(path)", "docstring": "Download PDF file and transform its document to string.\n\nArgs:\nurl:   PDF url.\n\nReturns:\nstring.", "source": "codesearchnet"}
{"code": "def _to_snake_case(string):\n    sub_string = '\\\\1_\\\\2'\n    string = REGEX_CAMEL_FIRST.sub(sub_string, string)\n    return REGEX_CAMEL_SECOND.sub(sub_string, string).lower()", "docstring": "Return a snake cased version of the input string.\n\nArgs:\nstring (str): A camel cased string.\n\nReturns:\nstr: A snake cased string.", "source": "codesearchnet"}
{"code": "def GetHelp(self, prefix='', include_special_flags=True):\n    \n    \n    helplist = []\n\n    flags_by_module = self.FlagsByModuleDict()\n    if flags_by_module:\n      modules = sorted(flags_by_module)\n\n      \n      main_module = sys.argv[0]\n      if main_module in modules:\n        modules.remove(main_module)\n        modules = [main_module] + modules\n\n      for module in modules:\n        self.__RenderOurModuleFlags(module, helplist)\n      if include_special_flags:\n        self.__RenderModuleFlags('gflags',\n                                 _helpers.SPECIAL_FLAGS.FlagDict().values(),\n                                 helplist)\n    else:\n      \n      values = self.FlagDict().values()\n      if include_special_flags:\n        values.append(_helpers.SPECIAL_FLAGS.FlagDict().values())\n      self.__RenderFlagList(values, helplist, prefix)\n\n    return '\\n'.join(helplist)", "docstring": "Generates a help string for all known flags.\n\nArgs:\nprefix: str, per-line output prefix.\ninclude_special_flags: bool, whether to include description of\n_SPECIAL_FLAGS, i.e. --flagfile and --undefok.\n\nReturns:\nstr, formatted help message.", "source": "juraj-google-style"}
{"code": "def close(self):\n    if (self._fd is None):\n        return\n    try:\n        os.close(self._fd)\n    except OSError as e:\n        raise LEDError(e.errno, ('Closing LED: ' + e.strerror))\n    self._fd = None", "docstring": "Close the sysfs LED.\n\nRaises:\nLEDError: if an I/O or OS error occurs.", "source": "codesearchnet"}
{"code": "def clear(self, url=None, xpath=None):\n        \n        if url is not None:\n            query = self._query(url, xpath)\n            if query.count() > 0:\n                query.delete()\n                self.session.commit()\n            else:\n                raise KeyError(\"Cannot clear URL, not in cache: \" + str(url) + \" xpath:\" + str(xpath))\n        else:\n            \n            self.close()\n            if path.exists(self.db_path):\n                remove(self.db_path)", "docstring": "Clear cache\n\nArgs:\nurl (str): If given, clear specific item only. Otherwise remove the DB file.\nxpath (str): xpath to search (may be ``None``)", "source": "juraj-google-style"}
{"code": "def set_atten(self, idx, value):\n    if not self.is_open:\n        raise attenuator.Error('Connection to attenuator at %s is not open!' % self._telnet_client.host)\n    if idx + 1 > self.path_count:\n        raise IndexError('Attenuator index out of range!', self.path_count, idx)\n    if value > self.max_atten:\n        raise ValueError('Attenuator value out of range!', self.max_atten, value)\n    self._telnet_client.cmd('CHAN:%s:SETATT:%s' % (idx + 1, value))", "docstring": "Sets the attenuation value for a particular signal path.\n\nArgs:\nidx: Zero-based index int which is the identifier for a particular\nsignal path in an instrument. For instruments that only has one\nchannel, this is ignored by the device.\nvalue: A float that is the attenuation value to set.\n\nRaises:\nError: The underlying telnet connection to the instrument is not\nopen.\nIndexError: The index of the attenuator is greater than the maximum\nindex of the underlying instrument.\nValueError: The requested set value is greater than the maximum\nattenuation value.", "source": "github-repos"}
{"code": "def expand(self, pcoll):\n    return pcoll | core.CombinePerKey(TopCombineFn(self._n, self._key, self._reverse))", "docstring": "Expands the transform.\n\nRaises TypeCheckError: If the output type of the input PCollection is not\ncompatible with tuple[A, B].\n\nArgs:\npcoll: PCollection to process\n\nReturns:\nthe PCollection containing the result.", "source": "github-repos"}
{"code": "def put(value):\n    worker = global_worker\n    worker.check_connected()\n    with profiling.profile('ray.put'):\n        if (worker.mode == LOCAL_MODE):\n            return value\n        object_id = ray._raylet.compute_put_id(worker.current_task_id, worker.task_context.put_index)\n        worker.put_object(object_id, value)\n        worker.task_context.put_index += 1\n        return object_id", "docstring": "Store an object in the object store.\n\nArgs:\nvalue: The Python object to be stored.\n\nReturns:\nThe object ID assigned to this value.", "source": "codesearchnet"}
{"code": "def is_separating(direction, polygon1, polygon2):\n    norm_squared = ((direction[0] * direction[0]) + (direction[1] * direction[1]))\n    params = []\n    vertex = np.empty((2,), order='F')\n    for polygon in (polygon1, polygon2):\n        (_, polygon_size) = polygon.shape\n        min_param = np.inf\n        max_param = (- np.inf)\n        for index in six.moves.xrange(polygon_size):\n            vertex[:] = polygon[(:, index)]\n            param = (cross_product(direction, vertex) / norm_squared)\n            min_param = min(min_param, param)\n            max_param = max(max_param, param)\n        params.append((min_param, max_param))\n    return ((params[0][0] > params[1][1]) or (params[0][1] < params[1][0]))", "docstring": "Checks if a given ``direction`` is a separating line for two polygons.\n\n.. note::\n\nThis is a helper for :func:`_polygon_collide`.\n\nArgs:\ndirection (numpy.ndarray): A 1D ``2``-array (``float64``) of a\npotential separating line for the two polygons.\npolygon1 (numpy.ndarray): A ``2 x N`` array (``float64``) of ordered\npoints in a polygon.\npolygon2 (numpy.ndarray): A ``2 x N`` array (``float64``) of ordered\npoints in a polygon.\n\nReturns:\nbool: Flag indicating if ``direction`` is a separating line.", "source": "codesearchnet"}
{"code": "def __init__(self, lookup_list, do_not_log_prefix=None):\n    super().__init__()\n    self._lookup_list = lookup_list\n    self._do_not_log_prefix = do_not_log_prefix", "docstring": "Create this visitor.\n\nArgs:\nlookup_list: An iterable of symbol tables (i.e., objects that have a\n\"lookup\" function)\ndo_not_log_prefix: If given, don't log error messages for classes with\nthis prefix.", "source": "github-repos"}
{"code": "def average_name(self, var):\n    if var.ref() in self._averages:\n        return self._averages[var.ref()].name[:-len(':0')]\n    return ops.get_default_graph().unique_name(var.name[:-len(':0')] + '/' + self.name, mark_as_used=False)", "docstring": "[Meant for TF1] Returns name of `Variable` holding the average for `var`.\n\n(Designed to work with legacy `tf.compat.v1.train.Saver`, it is sensitive to\nspecific variable names and not recommended for TF2)\n\nThe typical scenario for `ExponentialMovingAverage` is to compute moving\naverages of variables during training, and restore the variables from the\ncomputed moving averages during evaluations.\n\nTo restore variables, you have to know the name of the shadow variables.\nThat name and the original variable can then be passed to a `Saver()` object\nto restore the variable from the moving average value with:\n`saver = tf.compat.v1.train.Saver({ema.average_name(var): var})`\n\n`average_name()` can be called whether or not `apply()` has been called.\n\nArgs:\nvar: A `Variable` object.\n\nReturns:\nA string: The name of the variable that will be used or was used\nby the `ExponentialMovingAverage class` to hold the moving average of\n`var`.", "source": "github-repos"}
{"code": "def create_dummy_files(backend_specific_objects: Optional[Dict[str, List[str]]]=None) -> Dict[str, str]:\n    if backend_specific_objects is None:\n        backend_specific_objects = read_init()\n    dummy_files = {}\n    for backend, objects in backend_specific_objects.items():\n        backend_name = '[' + ', '.join((f'\"{b}\"' for b in backend.split('_and_'))) + ']'\n        dummy_file = '\n        dummy_file += 'from ..utils import DummyObject, requires_backends\\n\\n'\n        dummy_file += '\\n'.join([create_dummy_object(o, backend_name) for o in objects])\n        dummy_files[backend] = dummy_file\n    return dummy_files", "docstring": "Create the content of the dummy files.\n\nArgs:\nbackend_specific_objects (`Dict[str, List[str]]`, *optional*):\nThe mapping backend name to list of backend-specific objects. If not passed, will be obtained by calling\n`read_init()`.\n\nReturns:\n`Dict[str, str]`: A dictionary mapping backend name to code of the corresponding backend file.", "source": "github-repos"}
{"code": "def _data_to_tensor(data_list, batch_size, name=None):\n    r\n    \n    const_list = [tf.constant(data) for data in data_list]\n\n    \n    queue_list = tf.train.slice_input_producer(const_list, capacity=batch_size*128, name=name)\n\n    \n    return tf.train.shuffle_batch(queue_list, batch_size, capacity=batch_size*128,\n                                  min_after_dequeue=batch_size*32, name=name)", "docstring": "r\"\"\"Returns batch queues from the whole data.\n\nArgs:\ndata_list: A list of ndarrays. Every array must have the same size in the first dimension.\nbatch_size: An integer.\nname: A name for the operations (optional).\n\nReturns:\nA list of tensors of `batch_size`.", "source": "juraj-google-style"}
{"code": "def pairwise(lst):\n    \n    if not lst:\n        return\n    length = len(lst)\n    for i in range(length - 1):\n        yield lst[i], lst[i + 1]\n    yield lst[-1], None", "docstring": "yield item i and item i+1 in lst. e.g.\n(lst[0], lst[1]), (lst[1], lst[2]), ..., (lst[-1], None)\nArgs:\nlst (list): List to process\nReturns:\nlist", "source": "juraj-google-style"}
{"code": "def _recur_flatten(key, x, out, sep='.'):\n    \n    if x is None or isinstance(x, (str, int, float, bool)):\n        out[key] = x\n        return out\n    if isinstance(x, list):\n        for i, v in enumerate(x):\n            new_key = '{}{}{}'.format(key, sep, i)\n            out = _recur_flatten(new_key, v, out, sep)\n    if isinstance(x, dict):\n        for k, v in x.items():\n            new_key = '{}{}{}'.format(key, sep, k)\n            out = _recur_flatten(new_key, v, out, sep)\n    return out", "docstring": "Helper function to flatten_dict\n\nRecursively flatten all nested values within a dict\n\nArgs:\nkey (str): parent key\nx (object): object to flatten or add to out dict\nout (dict): 1D output dict\nsep (str): flattened key separator string\n\nReturns:\ndict: flattened 1D dict", "source": "juraj-google-style"}
{"code": "def disambiguate_pdf(self, file, language=None, entities=None):\n        \n\n        body = {\n            \"customisation\": \"generic\"\n        }\n\n        if language:\n            body['language'] = {\"lang\": language}\n\n        if entities:\n            body['entities'] = entities\n\n        files = {\n            'query': str(body),\n            'file': (\n                file,\n                open(file, 'rb'),\n                'application/pdf',\n                {'Expires': '0'}\n            )\n        }\n\n        res, status = self.post(\n            self.disambiguate_service,\n            files=files,\n            headers={'Accept': 'application/json'},\n        )\n\n        if status != 200:\n            logger.debug('Disambiguation failed with error ' + str(status))\n\n        return self.decode(res), status", "docstring": "Call the disambiguation service in order to process a pdf file .\n\nArgs:\npdf (file): PDF file to be disambiguated.\nlanguage (str): language of text (if known)\n\nReturns:\ndict, int: API response and API status.", "source": "juraj-google-style"}
{"code": "def UploadSignedBinary(source_path,\n                       binary_type,\n                       platform,\n                       upload_subdirectory=\"\"):\n  \n  file_size = os.path.getsize(source_path)\n  if file_size > _MAX_SIGNED_BINARY_BYTES:\n    raise BinaryTooLargeError(\n        \"File [%s] is of size %d (bytes), which exceeds the allowed maximum \"\n        \"of %d bytes.\" % (source_path, file_size, _MAX_SIGNED_BINARY_BYTES))\n\n  context = [\"Platform:%s\" % platform.title(), \"Client Context\"]\n  signing_key = grr_config.CONFIG.Get(\n      \"PrivateKeys.executable_signing_private_key\", context=context)\n\n  root_api = maintenance_utils.InitGRRRootAPI()\n  binary_path = \"/\".join([\n      platform.lower(),\n      upload_subdirectory,\n      os.path.basename(source_path),\n  ])\n  binary = root_api.GrrBinary(binary_type, binary_path)\n\n  with open(source_path, \"rb\") as fd:\n    binary.Upload(\n        fd,\n        sign_fn=binary.DefaultUploadSigner(\n            private_key=signing_key.GetRawPrivateKey()))\n\n  print(\"Uploaded %s to %s\" % (binary_type, binary_path))", "docstring": "Signs a binary and uploads it to the datastore.\n\nArgs:\nsource_path: Path to the binary to upload.\nbinary_type: Type of the binary, e.g python-hack or executable.\nplatform: Client platform where the binary is intended to be run.\nupload_subdirectory: Path of a subdirectory to upload the binary to,\nrelative to the canonical path for binaries of the given type and\nplatform.\n\nRaises:\nBinaryTooLargeError: If the binary to upload is too large.", "source": "juraj-google-style"}
{"code": "def GetParserObjectByName(cls, parser_name):\n    parser_class = cls._parser_classes.get(parser_name, None)\n    if parser_class:\n        return parser_class()\n    return None", "docstring": "Retrieves a specific parser object by its name.\n\nArgs:\nparser_name (str): name of the parser.\n\nReturns:\nBaseParser: parser object or None.", "source": "codesearchnet"}
{"code": "def update_device_info(self, device_id, display_name):\n    content = {'display_name': display_name}\n    return self._send('PUT', ('/devices/%s' % device_id), content=content)", "docstring": "Update the display name of a device.\n\nArgs:\ndevice_id (str): The device ID of the device to update.\ndisplay_name (str): New display name for the device.", "source": "codesearchnet"}
{"code": "def _format_device(var):\n  \n  if var.dtype.name.endswith(\"_ref\"):\n    resource_var_annotation = \"(legacy)\"\n  else:\n    resource_var_annotation = \"(resource)\"\n\n  if var.device:\n    return \"{} {}\".format(var.device, resource_var_annotation)\n  else:\n    return resource_var_annotation", "docstring": "Returns the device with an annotation specifying `ResourceVariable`.\n\n\"legacy\" means a normal tf.Variable while \"resource\" means a ResourceVariable.\n\nFor example:\n`(legacy)`\n`(resource)`\n`/job:learner/task:0/device:CPU:* (legacy)`\n`/job:learner/task:0/device:CPU:* (resource)`\n\nArgs:\nvar: The Tensorflow Variable to print.", "source": "juraj-google-style"}
{"code": "def getInfo(self, query=None, process=False, mode='phonefy', qURI=None):\n    results = []\n    data = ''\n    if (self._modeIsValid(mode=mode) and self._isValidQuery(query, mode=mode)):\n        if (mode in ['mailfy', 'phonefy', 'searchfy', 'usufy']):\n            try:\n                results = getattr(self, 'do_{}'.format(mode))(query)\n            except AttributeError as e:\n                raise NotImplementedModeError(str(self), mode)\n    return json.dumps(results)", "docstring": "Method that checks the presence of a given query and recovers the first list of complains.\n\nArgs:\n-----\nquery: Query to verify.\nprocess: Calling the processing function.\nmode: Mode to be executed.\nqURI: A query to be checked.\n\nReturn:\n-------\nPython structure for the html processed.\n\nRaises:\n-------\nNoCredentialsException.\nNotImplementedModeError.\nBadImplementationError.", "source": "codesearchnet"}
{"code": "def join(self, other, **kwargs):\n        \n        if not isinstance(other, list):\n            other = [other]\n        return self._join_list_of_managers(other, **kwargs)", "docstring": "Joins a list or two objects together.\n\nArgs:\nother: The other object(s) to join on.\n\nReturns:\nJoined objects.", "source": "juraj-google-style"}
{"code": "def to_proto(self, export_scope=None):\n    if context.executing_eagerly():\n        raise RuntimeError('This operation is not supported when eager execution is enabled.')\n    if export_scope is None or self.handle.name.startswith(export_scope):\n        var_def = variable_pb2.VariableDef()\n        var_def.variable_name = ops.strip_name_scope(self.handle.name, export_scope)\n        if self._initial_value is not None:\n            var_def.initial_value_name = ops.strip_name_scope(self._initial_value.name, export_scope)\n        var_def.initializer_name = ops.strip_name_scope(self.initializer.name, export_scope)\n        if self._cached_value is not None:\n            var_def.snapshot_name = ops.strip_name_scope(self._cached_value.name, export_scope)\n        else:\n            var_def.snapshot_name = ops.strip_name_scope(self._graph_element.name, export_scope)\n        var_def.is_resource = True\n        var_def.trainable = self.trainable\n        var_def.synchronization = self.synchronization.value\n        var_def.aggregation = self.aggregation.value\n        if self._save_slice_info:\n            var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto(export_scope=export_scope))\n        return var_def\n    else:\n        return None", "docstring": "Converts a `ResourceVariable` to a `VariableDef` protocol buffer.\n\nArgs:\nexport_scope: Optional `string`. Name scope to remove.\n\nRaises:\nRuntimeError: If run in EAGER mode.\n\nReturns:\nA `VariableDef` protocol buffer, or `None` if the `Variable` is not\nin the specified name scope.", "source": "github-repos"}
{"code": "def validate_artifact_url(valid_artifact_rules, valid_artifact_task_ids, url):\n\n    def callback(match):\n        path_info = match.groupdict()\n        if (('taskId' in path_info) and (path_info['taskId'] not in valid_artifact_task_ids)):\n            return\n        if ('filepath' not in path_info):\n            return\n        return path_info['filepath']\n    filepath = match_url_regex(valid_artifact_rules, url, callback)\n    if (filepath is None):\n        raise ScriptWorkerTaskException(\"Can't validate url {}\".format(url), exit_code=STATUSES['malformed-payload'])\n    return unquote(filepath).lstrip('/')", "docstring": "Ensure a URL fits in given scheme, netloc, and path restrictions.\n\nIf we fail any checks, raise a ScriptWorkerTaskException with\n``malformed-payload``.\n\nArgs:\nvalid_artifact_rules (tuple): the tests to run, with ``schemas``, ``netlocs``,\nand ``path_regexes``.\nvalid_artifact_task_ids (list): the list of valid task IDs to download from.\nurl (str): the url of the artifact.\n\nReturns:\nstr: the ``filepath`` of the path regex.\n\nRaises:\nScriptWorkerTaskException: on failure to validate.", "source": "codesearchnet"}
{"code": "def use_value_spec(self, value_spec: Optional[pg_typing.List], allow_partial: bool=False) -> 'List':\n    if value_spec is None:\n        self._value_spec = None\n        self._accessor_writable = True\n        return self\n    if not isinstance(value_spec, pg_typing.List):\n        raise ValueError(self._error_message(f'Value spec for list must be a `pg.typing.List` object. Encountered: {value_spec!r}'))\n    if self._value_spec and self._value_spec != value_spec:\n        raise RuntimeError(self._error_message(f'List is already bound with a different value spec: {self._value_spec}. New value spec: {value_spec}.'))\n    self._allow_partial = allow_partial\n    if flags.is_type_check_enabled():\n        value_spec.apply(self, allow_partial=base.accepts_partial(self), child_transform=base.symbolic_transform_fn(self._allow_partial), root_path=self.sym_path)\n    else:\n        self._value_spec = value_spec\n    return self", "docstring": "Applies a ``pg.List`` as the value spec for current list.\n\nArgs:\nvalue_spec: A List ValueSpec to apply to this List.\nIf current List is schema-less (whose immediate members are not\nvalidated against schema), and `value_spec` is not None, the value spec\nwill be applied to the List.\nOr else if current List is already symbolic (whose immediate members\nare under the constraint of a List value spec), and `value_spec` is\nNone, current List will become schema-less. However, the schema\nconstraints for non-immediate members will remain.\nallow_partial: Whether allow partial dict based on the schema. This flag\nwill override allow_partial flag in __init__ for spec-less List.\n\nReturns:\nSelf.\n\nRaises:\nValueError: schema validation failed due to value error.\nRuntimeError: List is already bound with another value_spec.\nTypeError: type errors during validation.\nKeyError: key errors during validation.", "source": "github-repos"}
{"code": "def delete_association(self, target, api_type=None, api_sub_type=None, unique_id=None):\n        \n        api_type = api_type or target.api_type\n        api_sub_type = api_sub_type or target.api_sub_type\n        unique_id = unique_id or target.unique_id\n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        if not target.can_update():\n            self._tcex.handle_error(910, [target.type])\n\n        return self.tc_requests.delete_association(\n            self.api_type,\n            self.api_sub_type,\n            self.unique_id,\n            api_type,\n            api_sub_type,\n            unique_id,\n            owner=self.owner,\n        )", "docstring": "Deletes a association from a Indicator/Group/Victim\n\nArgs:\ntarget:\napi_type:\napi_sub_type:\nunique_id:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def add_comments(self, comments):\n        \n        for comment in comments:\n            if comment not in self.comments and len(comment) > 0:\n                self.comments.append(comment)\n            if len(self.comments[0]) == 0:\n                self.comments.pop(0)", "docstring": "Add comments to the localization entry\n\nArgs:\ncomments (list of str): The comments to be added to the localization entry.", "source": "juraj-google-style"}
{"code": "def get_atoms(structure, **kwargs):\n    if (not structure.is_ordered):\n        raise ValueError('ASE Atoms only supports ordered structures')\n    symbols = [str(site.specie.symbol) for site in structure]\n    positions = [site.coords for site in structure]\n    cell = structure.lattice.matrix\n    return Atoms(symbols=symbols, positions=positions, pbc=True, cell=cell, **kwargs)", "docstring": "Returns ASE Atoms object from pymatgen structure.\n\nArgs:\nstructure: pymatgen.core.structure.Structure\n**kwargs: other keyword args to pass into the ASE Atoms constructor\n\nReturns:\nASE Atoms object", "source": "codesearchnet"}
{"code": "def to_file_async(self, destination, format='csv', csv_delimiter=',', csv_header=True):\n    self.to_file(destination, format=format, csv_delimiter=csv_delimiter, csv_header=csv_header)", "docstring": "Start saving the results to a local file in CSV format and return a Job for completion.\n\nArgs:\ndestination: path on the local filesystem for the saved results.\nformat: the format to use for the exported data; currently only 'csv' is supported.\ncsv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','\ncsv_header: for CSV exports, whether to include an initial header line. Default true.\nReturns:\nA Job for the async save operation.\nRaises:\nAn Exception if the operation failed.", "source": "codesearchnet"}
{"code": "def outer_multiply(x, y):\n    x_shape = tf.shape(x)\n    padded_shape = tf.concat([x_shape, tf.ones(tf.rank(y), dtype=x_shape.dtype)], axis=0)\n    return tf.reshape(x, padded_shape) * y", "docstring": "Performs an outer multiplication of two tensors.\n\nGiven two `Tensor`s, `S` and `T` of shape `s` and `t` respectively, the outer\nproduct `P` is a `Tensor` of shape `s + t` whose components are given by:\n\n```none\nP_{i1,...ik, j1, ... , jm} = S_{i1...ik} T_{j1, ... jm}\n```\n\nArgs:\nx: A `Tensor` of any shape and numeric dtype.\ny: A `Tensor` of any shape and the same dtype as `x`.\n\nReturns:\nouter_product: A `Tensor` of shape Shape[x] + Shape[y] and the same dtype\nas `x`.", "source": "github-repos"}
{"code": "def model_code_key_prefix(code_location_key_prefix, model_name, image):\n    training_job_name = sagemaker.utils.name_from_image(image)\n    return '/'.join(filter(None, [code_location_key_prefix, (model_name or training_job_name)]))", "docstring": "Returns the s3 key prefix for uploading code during model deployment\n\nThe location returned is a potential concatenation of 2 parts\n1. code_location_key_prefix if it exists\n2. model_name or a name derived from the image\n\nArgs:\ncode_location_key_prefix (str): the s3 key prefix from code_location\nmodel_name (str): the name of the model\nimage (str): the image from which a default name can be extracted\n\nReturns:\nstr: the key prefix to be used in uploading code", "source": "codesearchnet"}
{"code": "def stop(name, file=sys.stderr):\n    \n    if is_enabled():\n        elapsed = (time() - __TIMERS[name])\n        if elapsed > 60:\n            elapsed_str = '{:.1f} m'.format(elapsed / 60)\n        elif elapsed > 1:\n            elapsed_str = '{:.1f} s'.format(elapsed)\n        else:\n            elapsed_str = '{:.1f} ms'.format(elapsed * 1000)\n\n        del __TIMERS[name]\n        print(\"[prof]\", name, elapsed_str, file=file)\n    return is_enabled()", "docstring": "Stop a profiling timer.\n\nArguments:\n\nname (str): The name of the timer to stop. If no name is given, stop\nthe global anonymous timer.\n\nReturns:\n\nbool: Whether or not profiling is enabled.\n\nRaises:\n\nKeyError: If the named timer does not exist.", "source": "juraj-google-style"}
{"code": "def _fill_in_missing(x):\n    default_value = '' if x.dtype == tf.string else 0\n    return tf.squeeze(tf.sparse.to_dense(tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]), default_value), axis=1)", "docstring": "Replace missing values in a SparseTensor.\n\nFills in missing values of `x` with '' or 0, and converts to a dense tensor.\n\nArgs:\nx: A `SparseTensor` of rank 2.  Its dense shape should have size at most 1\nin the second dimension.\n\nReturns:\nA rank 1 tensor where missing values of `x` have been filled in.", "source": "github-repos"}
{"code": "def unreduce_tensor(tensor, shape, axis, keepdims):\n  \n  if not keepdims:\n    if axis is None:\n      axis = range(len(shape))\n    elif isinstance(axis, int):\n      axis = axis,\n    for ax in sorted(axis):\n      tensor = tf.expand_dims(tensor, ax)\n  tile_shape = np.array(shape) / np.array(shape_as_list(tensor))\n  return tf.tile(tensor, tile_shape)", "docstring": "Reverse summing over a dimension.\n\nSee utils.py.\n\nArgs:\ntensor: The tensor that was reduced.\nshape: A list, the original shape of the tensor before reduction.\naxis: The axis or axes that were summed.\nkeepdims: Whether these axes were kept as singleton axes.\n\nReturns:\nA tensor with axes broadcast to match the shape of the original tensor.", "source": "juraj-google-style"}
{"code": "def output(self, _filename):\n        \n\n        txt = \"\"\n        for c in self.contracts:\n            (name, _inheritance, _var, func_summaries, _modif_summaries) = c.get_summary()\n            txt += blue(\"\\n+ Contract %s\\n\"%name)\n            \n            public = [(elem[0], (elem[1], elem[2]) ) for elem in func_summaries]\n\n            collect = collections.defaultdict(list)\n            for a,b in public:\n                collect[a].append(b)\n            public = list(collect.items())\n\n            for contract, functions in public:\n                txt += blue(\"  - From {}\\n\".format(contract))\n                functions = sorted(functions)\n                for (function, visi) in functions:\n                    if visi in ['external', 'public']:\n                        txt += green(\"    - {} ({})\\n\".format(function, visi))\n                for (function, visi) in functions:\n                    if visi in ['internal', 'private']:\n                        txt += magenta(\"    - {} ({})\\n\".format(function, visi))\n                for (function, visi) in functions:\n                    if visi not in ['external', 'public', 'internal', 'private']:\n                        txt += \"    - {}  ({})\\n\".format(function, visi)\n\n        self.info(txt)", "docstring": "_filename is not used\nArgs:\n_filename(string)", "source": "juraj-google-style"}
{"code": "def asdict(self):\n    return {'stream': self.stream, 'device_timestamp': self.raw_time, 'streamer_local_id': self.reading_id, 'timestamp': self.reading_time, 'extra_data': self.summary_data, 'data': self.raw_data}", "docstring": "Encode the data in this event into a dictionary.\n\nThe dictionary returned from this method is a reference to the data\nstored in the IOTileEvent, not a copy.  It should be treated as read\nonly.\n\nReturns:\ndict: A dictionary containing the information from this event.", "source": "codesearchnet"}
{"code": "def get_tensor_shard(param, empty_param, device_mesh, rank, dim):\n    param_dim = empty_param.dim()\n    if dim < 0:\n        dim = param_dim + dim\n    if dim >= param_dim:\n        raise ValueError(f'dim {dim} is out of bounds for tensor of dimension {param_dim}')\n    mesh_shape = device_mesh.shape\n    world_size = reduce(operator.mul, mesh_shape)\n    if rank >= world_size:\n        raise ValueError(f'Rank {rank} is out of bounds for mesh size {world_size}')\n    shard_size = empty_param.shape[dim] \n    start = rank * shard_size\n    end = start + shard_size\n    slice_indices = [slice(None)] * param_dim\n    slice_indices[dim] = slice(start, end)\n    return param[tuple(slice_indices)]", "docstring": "Generalized tensor sharding across a multi-dimensional device mesh.\n\nArgs:\nparam (torch.Tensor): The tensor to shard.\nempty_param (torch.Tensor): A tensor used for shape reference.\ndevice_mesh (torch.Tensor): Shape [d_0, ..., d_n] representing the mesh.\nrank (int): Global rank of the current process/device.\ndim (int): Dimension along which to shard the tensor.", "source": "github-repos"}
{"code": "def price(self, valuation_date, market, model=None, name=None):\n    del model, valuation_date\n    name = name or self._name + '_price'\n    with tf.name_scope(name):\n        reference_curve = market.reference_curve\n        df1 = reference_curve.get_discount_factor(self._accrual_start_dates)\n        df2 = reference_curve.get_discount_factor(self._accrual_end_dates)\n        fwd_rates = (df1 / df2 - 1.0) / self._accrual_daycount\n        total_accrual = tf.math.segment_sum(self._daycount_fractions, self._contract_idx)\n        if self._averaging_type == rc.AverageType.ARITHMETIC_AVERAGE:\n            settlement_rate = tf.math.segment_sum(fwd_rates * self._daycount_fractions, self._contract_idx) / total_accrual\n        else:\n            settlement_rate = (tf.math.segment_prod(1.0 + fwd_rates * self._daycount_fractions, self._contract_idx) - 1.0) / total_accrual\n        return 100.0 * (1.0 - settlement_rate)", "docstring": "Returns the price of the contract on the valuation date.\n\nArgs:\nvaluation_date: A scalar `DateTensor` specifying the date on which\nvaluation is being desired.\nmarket: An object of type `InterestRateMarket` which contains the\nnecessary information for pricing the FRA instrument.\nmodel: Reserved for future use.\nname: Python string. The name to give this op.\nDefault value: `None` which maps to `price`.\n\nReturns:\nA Rank 1 `Tensor` of real type containing the modeled price of each\nfutures contract based on the input market data.", "source": "github-repos"}
{"code": "def convert_lstm_weights(weights, from_cudnn=True):\n    kernels = transform_kernels(weights[0], transpose_input(from_cudnn), n_gates)\n    recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates)\n    if from_cudnn:\n        biases = np.sum(np.split(weights[2], 2, axis=0), axis=0)\n    else:\n        biases = np.tile(0.5 * weights[2], 2)\n    return [kernels, recurrent_kernels, biases]", "docstring": "Converts the weights between CuDNNLSTM and LSTM.\n\nArgs:\nweights: Original weights.\nfrom_cudnn: Indicates whether original weights are from CuDNN layer.\n\nReturns:\nUpdated weights compatible with LSTM.", "source": "github-repos"}
{"code": "def _get_backend(p0: _GPath, p1: _GPath) -> backend_lib.Backend:\n    if p0._backend in _GCS_BACKENDS:\n        return p0._backend\n    elif p1._backend in _GCS_BACKENDS:\n        return p1._backend\n    else:\n        return p0._backend", "docstring": "When composing with another backend, GCS win.\n\nTo allow `Path('.').replace('gs://')`\n\nArgs:\np0: Path to compare\np1: Path to compare\n\nReturns:\nGCS backend if one of the 2 path is GCS, else p0 backend.", "source": "github-repos"}
{"code": "def set_timing(self, timing: bool, reset: bool = False) -> None:\n        \n        self._timing = timing\n        if reset:\n            self.reset()", "docstring": "Manually set the ``timing`` parameter, and optionally reset the timers.\n\nArgs:\ntiming: should we be timing?\nreset: reset the timers?", "source": "juraj-google-style"}
{"code": "def _convert_path(path, name):\n    table = os.path.splitext(path)[0]\n    table = table.replace(os.path.sep, '__')\n    if (name is not None):\n        table = '___'.join([table, name])\n    table = re.sub('[^0-9a-zA-Z_]+', '_', table)\n    table = table.lower()\n    return table", "docstring": "Convert resource's path and name to storage's table name.\n\nArgs:\npath (str): resource path\nname (str): resource name\n\nReturns:\nstr: table name", "source": "codesearchnet"}
{"code": "def delete(filething):\n    \n\n    t = OggOpus(filething)\n    filething.fileobj.seek(0)\n    t.delete(filething)", "docstring": "delete(filething)\n\nArguments:\nfilething (filething)\nRaises:\nmutagen.MutagenError\n\nRemove tags from a file.", "source": "juraj-google-style"}
{"code": "def direct_normal_radiation(self, value=9999.0):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `direct_normal_radiation`'.format(value))\n        if (value < 0.0):\n            raise ValueError('value need to be greater or equal 0.0 for field `direct_normal_radiation`')\n    self._direct_normal_radiation = value", "docstring": "Corresponds to IDD Field `direct_normal_radiation`\n\nArgs:\nvalue (float): value for IDD Field `direct_normal_radiation`\nUnit: Wh/m2\nvalue >= 0.0\nMissing value: 9999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def _updateEncoding(self, index):\n    encoding = self._encodingComboBox.itemText(index)\n    encoding = encoding.lower()\n    self._encodingKey = _calculateEncodingKey(encoding)\n    self._previewFile()", "docstring": "Changes the value of the encoding combo box to the value of given index.\n\nThis method is also a `SLOT`.\nAfter the encoding is changed, the file will be reloaded and previewed.\n\nArgs:\nindex (int): An valid index of the combo box.", "source": "codesearchnet"}
{"code": "def vdot(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return Vdot().symbolic_call(x1, x2)\n    return backend.numpy.vdot(x1, x2)", "docstring": "Return the dot product of two vectors.\n\nIf the first argument is complex, the complex conjugate of the first\nargument is used for the calculation of the dot product.\n\nMultidimensional tensors are flattened before the dot product is taken.\n\nArgs:\nx1: First input tensor. If complex, its complex conjugate is taken\nbefore calculation of the dot product.\nx2: Second input tensor.\n\nReturns:\nOutput tensor.", "source": "github-repos"}
{"code": "def flux_minimization(model, fixed, solver, weights={}):\n    \n\n    fba = FluxBalanceProblem(model, solver)\n\n    for reaction_id, value in iteritems(fixed):\n        flux = fba.get_flux_var(reaction_id)\n        fba.prob.add_linear_constraints(flux >= value)\n\n    fba.minimize_l1()\n\n    return ((reaction_id, fba.get_flux(reaction_id))\n            for reaction_id in model.reactions)", "docstring": "Minimize flux of all reactions while keeping certain fluxes fixed.\n\nThe fixed reactions are given in a dictionary as reaction id\nto value mapping. The weighted L1-norm of the fluxes is minimized.\n\nArgs:\nmodel: MetabolicModel to solve.\nfixed: dict of additional lower bounds on reaction fluxes.\nsolver: LP solver instance to use.\nweights: dict of weights on the L1-norm terms.\n\nReturns:\nAn iterator of reaction ID and reaction flux pairs.", "source": "juraj-google-style"}
{"code": "def start_proc_mask_signal(proc):\n    \n    if not isinstance(proc, list):\n        proc = [proc]\n\n    with mask_sigint():\n        for p in proc:\n            if isinstance(p, mp.Process):\n                if sys.version_info < (3, 4) or mp.get_start_method() == 'fork':\n                    log_once(\n\"Starting a process with 'fork' method is not safe and may consume unnecessary extra memory.\"\n\" Use 'forkserver' method (available after Py3.4) instead if you run into any issues. \"\n\"See https:\n'warn')  \n            p.start()", "docstring": "Start process(es) with SIGINT ignored.\n\nArgs:\nproc: (mp.Process or list)\n\nNote:\nThe signal mask is only applied when called from main thread.", "source": "juraj-google-style"}
{"code": "def _process_new(self, feed_item):\n    creative = {'advertiserId': feed_item.get(FieldMap.ADVERTISER_ID, None), 'name': feed_item.get(FieldMap.CREATIVE_NAME, None), 'active': True}\n    self._associate_third_party_urls(feed_item, creative)\n    self._associate_click_tags(feed_item, creative)\n    if feed_item.get(FieldMap.CREATIVE_TYPE, None) == 'VIDEO':\n        creative['type'] = 'INSTREAM_VIDEO'\n        for association in feed_item.get('associations', []):\n            identifier = self.creative_asset_dao.get_identifier(association, self._creative_asset_feed)\n            creative['creativeAssets'] = [{'assetIdentifier': identifier, 'role': 'PARENT_VIDEO'}]\n        del creative['active']\n    elif feed_item.get(FieldMap.CREATIVE_TYPE, None) == 'DISPLAY':\n        creative['type'] = 'DISPLAY'\n        if feed_item.get(FieldMap.CREATIVE_WIDTH, None) and feed_item.get(FieldMap.CREATIVE_HEIGHT, None):\n            creative['size'] = {'kind': 'dfareporting\n        for association in feed_item.get('associations', []):\n            identifier = self.creative_asset_dao.get_identifier(association, self._creative_asset_feed)\n            creative['creativeAssets'] = [{'assetIdentifier': identifier, 'role': 'PRIMARY'}]\n            if feed_item.get(FieldMap.CREATIVE_BACKUP_ASSET_ID, None) and feed_item.get(FieldMap.CREATIVE_BACKUP_ASSET_ID, None) != '':\n                backup_identifier = self.creative_asset_dao.get_backup_identifier(association, self._creative_asset_feed)\n                creative['backupImageReportingLabel'] = feed_item.get(FieldMap.CREATIVE_BACKUP_NAME, None)\n                backup_features = feed_item.get(FieldMap.BACKUP_IMAGE_FEATURES, None)\n                if backup_features != None or backup_features != '':\n                    features = backup_features.split(',')\n                    creative['backupImageFeatures'] = features\n                creative['backupImageTargetWindow'] = {'targetWindowOption': feed_item.get(FieldMap.BACKUP_IMAGE_TARGET_WINDOW_OPTION, None), 'customHtml': feed_item.get(FieldMap.BACKUP_IMAGE_CUSTOM_HTML, None)}\n                lp = self.landing_page_dao.get(feed_item, column_name=FieldMap.BACKUP_IMAGE_CLICK_THROUGH_LANDING_PAGE_ID)\n                creative['backupImageClickThroughUrl'] = {'landingPageId': feed_item.get(FieldMap.BACKUP_IMAGE_CLICK_THROUGH_LANDING_PAGE_ID) if not lp else lp['id']}\n                creative['creativeAssets'].append({'assetIdentifier': backup_identifier, 'role': 'BACKUP_IMAGE'})\n        del creative['active']\n    else:\n        raise Exception('Only video and display are supported at the moment!')\n    return creative", "docstring": "Creates a new creative DCM object from a feed item representing an creative from the Bulkdozer feed.\n\nThis function simply creates the object to be inserted later by the BaseDAO\nobject.\n\nArgs:\nfeed_item: Feed item representing the creative from the Bulkdozer feed.\n\nReturns:\nA creative object ready to be inserted in DCM through the API.", "source": "github-repos"}
{"code": "def EquilibrateEigenVectorPhases(x, y):\n    phases = np.sum(np.conj(x) * y, -2, keepdims=True)\n    phases /= np.abs(phases)\n    return phases * x", "docstring": "Equilibrate the phase of the Eigenvectors in the columns of `x` and `y`.\n\nEigenvectors are only unique up to an arbitrary phase. This function rotates x\nsuch that it matches y. Precondition: The columns of x and y differ by a\nmultiplicative complex phase factor only.\n\nArgs:\nx: `np.ndarray` with Eigenvectors\ny: `np.ndarray` with Eigenvectors\n\nReturns:\n`np.ndarray` containing an equilibrated version of x.", "source": "github-repos"}
{"code": "def distance(self, physical_qubit1, physical_qubit2):\n    if (physical_qubit1 not in self.physical_qubits):\n        raise CouplingError(('%s not in coupling graph' % (physical_qubit1,)))\n    if (physical_qubit2 not in self.physical_qubits):\n        raise CouplingError(('%s not in coupling graph' % (physical_qubit2,)))\n    if (self._dist_matrix is None):\n        self._compute_distance_matrix()\n    return self._dist_matrix[(physical_qubit1, physical_qubit2)]", "docstring": "Returns the undirected distance between physical_qubit1 and physical_qubit2.\n\nArgs:\nphysical_qubit1 (int): A physical qubit\nphysical_qubit2 (int): Another physical qubit\n\nReturns:\nint: The undirected distance\n\nRaises:\nCouplingError: if the qubits do not exist in the CouplingMap", "source": "codesearchnet"}
{"code": "def _get_fullname(obj):\n    if (not hasattr(obj, '__name__')):\n        obj = obj.__class__\n    if (obj.__module__ in ('builtins', '__builtin__')):\n        return obj.__name__\n    return '{}.{}'.format(obj.__module__, obj.__name__)", "docstring": "Get the full name of an object including the module.\n\nArgs:\nobj: An object.\n\nReturns:\nThe full class name of the object.", "source": "codesearchnet"}
{"code": "def trace(self, data, callback=None):\n    conn_id = self._find_connection(self.conn_string)\n    if (conn_id is not None):\n        self.adapter.notify_event_nowait(self.conn_string, 'trace', data)\n    if (callback is not None):\n        callback((conn_id is not None))", "docstring": "Queue data for tracing\n\nArgs:\ndata (bytearray, string): Unstructured data to trace to any\nconnected client.\ncallback (callable): An optional callback that will be called with\na bool value of True when this data actually gets traced.\nIf the client disconnects and the data is dropped instead,\ncallback will be called with False.", "source": "codesearchnet"}
{"code": "def extract_output(self, accumulator, *args, **kwargs):\n    raise NotImplementedError(str(self))", "docstring": "Return result of converting accumulator into the output value.\n\nArgs:\naccumulator: the final accumulator value computed by this CombineFn\nfor the entire input key or PCollection. Can be modified for\nefficiency.\n*args: Additional arguments and side inputs.\n**kwargs: Additional arguments and side inputs.", "source": "github-repos"}
{"code": "def dims(x):\n  \n  if isinstance(x, tf.TensorShape):\n    return x.dims\n  r = tf.TensorShape(x).dims\n  return None if r is None else list(map(tf.compat.dimension_value, r))", "docstring": "Returns a list of dimension sizes, or `None` if `rank` is unknown.\n\nFor more details, see `help(tf.TensorShape.dims)`.\n\nArgs:\nx: object representing a shape; convertible to `tf.TensorShape`.\n\nReturns:\nshape_as_list: list of sizes or `None` values representing each\ndimensions size if known. A size is `tf.Dimension` if input is a\n`tf.TensorShape` and an `int` otherwise.", "source": "juraj-google-style"}
{"code": "def set_tuple_types(self, tuple_types):\n    if len(tuple_types) != self.number_of_tuple_elements:\n        raise ValueError(f'tuple_types is {str(tuple_types)}, but must be a list of length {self.number_of_tuple_elements}')\n    if self._frozen:\n        for frozen, updated in zip(self._tuple_types, tuple_types):\n            if frozen != updated:\n                raise ValueError(f'Trying to update InfeedQueue with frozen configuration with an incompatible type. Frozen types are {str(self._tuple_types)}, updated types are {str(tuple_types)}')\n    else:\n        try:\n            self._tuple_types = [dtypes.as_dtype(t) for t in tuple_types]\n        except TypeError as e:\n            raise TypeError(f'tuple_types is {str(tuple_types)}, but must be a list of elements each convertible to dtype: got error {str(e)}') from e", "docstring": "Sets the type of each element of the queue.\n\ntuple_types must be a list of length\nself.number_of_tuple_elements, and each element must be\nconvertible to a dtype.\n\nArgs:\ntuple_types: the types of each queue element.\n\nRaises:\nValueError: if tuple_types is not of length\nself.number_of_tuple_elements.\nTypeError: if an element of tuple_types cannot be converted to a\ndtype.", "source": "github-repos"}
{"code": "def translate_opcodes(code_obj, target):\n    \n\n    target = get_py_internals(target)\n    src_ops = code_obj.disassemble()\n\n    dst_opmap = target['opmap']\n    dst_ops = []\n\n    op_iter = enumerate(src_ops)\n    for i, op in op_iter:\n        if isinstance(op, pwnypack.bytecode.Label):\n            dst_ops.append(op)\n            continue\n\n        if op.name not in dst_opmap:\n            if op.name == 'POP_JUMP_IF_FALSE' and 'JUMP_IF_TRUE' in dst_opmap:\n                lbl = pwnypack.bytecode.Label()\n                dst_ops.extend([\n                    pwnypack.bytecode.Op('JUMP_IF_TRUE', lbl),\n                    pwnypack.bytecode.Op('POP_TOP', None),\n                    pwnypack.bytecode.Op('JUMP_ABSOLUTE', op.arg),\n                    lbl,\n                    pwnypack.bytecode.Op('POP_TOP', None),\n                ])\n            elif op.name == 'POP_JUMP_IF_TRUE' and 'JUMP_IF_FALSE' in dst_opmap:\n                lbl = pwnypack.bytecode.Label()\n                dst_ops.extend([\n                    pwnypack.bytecode.Op('JUMP_IF_FALSE', lbl),\n                    pwnypack.bytecode.Op('POP_TOP', None),\n                    pwnypack.bytecode.Op('JUMP_ABSOLUTE', op.arg),\n                    lbl,\n                    pwnypack.bytecode.Op('POP_TOP', None),\n                ])\n            elif op.name == 'JUMP_IF_FALSE' and 'JUMP_IF_FALSE_OR_POP' in dst_opmap and \\\n                    src_ops[i + 1].name == 'POP_TOP':\n                next(op_iter)\n                dst_ops.append(pwnypack.bytecode.Op('JUMP_IF_FALSE_OR_POP', op.arg))\n            elif op.name == 'JUMP_IF_TRUE' and 'JUMP_IF_TRUE_OR_POP' in dst_opmap and \\\n                    src_ops[i + 1].name == 'POP_TOP':\n                next(op_iter)\n                dst_ops.append(pwnypack.bytecode.Op('JUMP_IF_TRUE_OR_POP', op.arg))\n            else:\n                raise SyntaxError('Opcode %s not supported on target.' % op.name)\n        else:\n            dst_ops.append(op)\n\n    code_obj.assemble(dst_ops, target)", "docstring": "Very crude inter-python version opcode translator. Raises SyntaxError when\nthe opcode doesn't exist in the destination opmap. Used to transcribe\npython code objects between python versions.\n\nArguments:\ncode_obj(pwnypack.bytecode.CodeObject): The code object representation\nto translate.\ntarget(dict): The py_internals structure for the target\npython version.", "source": "juraj-google-style"}
{"code": "def verify(self, flag_values):\n    \n    param = self._get_input_to_checker_function(flag_values)\n    if not self.checker(param):\n      raise _exceptions.ValidationError(self.message)", "docstring": "Verifies that constraint is satisfied.\n\nflags library calls this method to verify Validator's constraint.\n\nArgs:\nflag_values: flags.FlagValues, the FlagValues instance to get flags from.\nRaises:\nError: Raised if constraint is not satisfied.", "source": "juraj-google-style"}
{"code": "def is_original_format(tweet):\n    \n    \n    \n    if \"created_at\" in tweet:\n        original_format = True\n    elif \"postedTime\" in tweet:\n        original_format = False\n    else:\n        raise NotATweetError(\"This dict has neither 'created_at' or 'postedTime' as keys\")\n    return original_format", "docstring": "Simple checker to flag the format of a tweet.\n\nArgs:\ntweet (Tweet): tweet in qustion\n\nReturns:\nBool\n\nExample:\n>>> import tweet_parser.tweet_checking as tc\n>>> tweet = {\"created_at\": 124125125125,\n...          \"text\": \"just setting up my twttr\",\n...          \"nested_field\": {\"nested_1\": \"field\", \"nested_2\": \"field2\"}}\n>>> tc.is_original_format(tweet)\nTrue", "source": "juraj-google-style"}
{"code": "def validates(version):\n    \n\n    def _validates(cls):\n        validators[version] = cls\n        meta_schema_id = cls.ID_OF(cls.META_SCHEMA)\n        if meta_schema_id:\n            meta_schemas[meta_schema_id] = cls\n        return cls\n    return _validates", "docstring": "Register the decorated validator for a ``version`` of the specification.\n\nRegistered validators and their meta schemas will be considered when\nparsing ``$schema`` properties' URIs.\n\nArguments:\n\nversion (str):\n\nAn identifier to use as the version's name\n\nReturns:\n\ncallable: a class decorator to decorate the validator with the version", "source": "juraj-google-style"}
{"code": "def to_pandas(self, is_transposed=False):\n        \n        \n        \n        \n        \n        if is_transposed:\n            return self.transpose().to_pandas(False).T\n        else:\n            retrieved_objects = [\n                [obj.to_pandas() for obj in part] for part in self.partitions\n            ]\n            if all(\n                isinstance(part, pandas.Series)\n                for row in retrieved_objects\n                for part in row\n            ):\n                axis = 0\n            elif all(\n                isinstance(part, pandas.DataFrame)\n                for row in retrieved_objects\n                for part in row\n            ):\n                axis = 1\n            else:\n                ErrorMessage.catch_bugs_and_request_email(True)\n            df_rows = [\n                pandas.concat([part for part in row], axis=axis)\n                for row in retrieved_objects\n                if not all(part.empty for part in row)\n            ]\n            if len(df_rows) == 0:\n                return pandas.DataFrame()\n            else:\n                return pandas.concat(df_rows)", "docstring": "Convert this object into a Pandas DataFrame from the partitions.\n\nArgs:\nis_transposed: A flag for telling this object that the external\nrepresentation is transposed, but not the internal.\n\nReturns:\nA Pandas DataFrame", "source": "juraj-google-style"}
{"code": "def load_flax_sharded_weights(cls, shard_files):\n    state_sharded_dict = {}\n    for shard_file in shard_files:\n        try:\n            with open(shard_file, 'rb') as state_f:\n                state = from_bytes(cls, state_f.read())\n        except (UnpicklingError, msgpack.exceptions.ExtraData) as e:\n            with open(shard_file) as f:\n                if f.read().startswith('version'):\n                    raise OSError('You seem to have cloned a repository without having git-lfs installed. Please install git-lfs and run `git lfs install` followed by `git lfs pull` in the folder you cloned.')\n                else:\n                    raise ValueError from e\n        except (UnicodeDecodeError, ValueError):\n            raise OSError(f'Unable to convert {shard_file} to Flax deserializable object. ')\n        state = flatten_dict(state, sep='/')\n        state_sharded_dict.update(state)\n        del state\n        gc.collect()\n    return unflatten_dict(state_sharded_dict, sep='/')", "docstring": "This is the same as [`flax.serialization.from_bytes`]\n(https:lax.readthedocs.io/en/latest/_modules/flax/serialization.html#from_bytes) but for a sharded checkpoint.\n\nThis load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being\nloaded in the model.\n\nArgs:\nshard_files (`List[str]`:\nThe list of shard files to load.\n\nReturns:\n`Dict`: A nested dictionary of the model parameters, in the expected format for flax models : `{'model':\n{'params': {'...'}}}`.", "source": "github-repos"}
{"code": "def get_structure_seqrecords(model):\n    \n\n    structure_seq_records = []\n\n    \n    for chain in model:\n        tracker = 0\n        chain_seq = ''\n        chain_resnums = []\n\n        \n        for res in chain.get_residues():\n            \n            res_id = res.id\n            res_num = res_id[1]\n            res_icode = res_id[2]\n\n            \n            \n            \n            if Polypeptide.is_aa(res, standard=True):\n                end_tracker = res_num\n                res_aa_one = Polypeptide.three_to_one(res.get_resname())\n\n                \n                if end_tracker != (tracker + 1):\n                    if res_icode != ' ':\n                        chain_seq += res_aa_one\n                        chain_resnums.append(res_num)\n                        tracker = end_tracker + 1\n                        continue\n                    else:\n                        multiplier = (end_tracker - tracker - 1)\n                        chain_seq += 'X' * multiplier\n                        \n                        chain_resnums.extend([float(\"Inf\")] * multiplier)\n\n                chain_seq += res_aa_one\n                chain_resnums.append(res_num)\n                tracker = end_tracker\n\n            else:\n                continue\n\n        chain_seq_record = SeqRecord(Seq(chain_seq, IUPAC.protein), id=chain.get_id())\n        chain_seq_record.letter_annotations['structure_resnums'] = chain_resnums\n        structure_seq_records.append(chain_seq_record)\n\n    return structure_seq_records", "docstring": "Get a dictionary of a PDB file's sequences.\n\nSpecial cases include:\n- Insertion codes. In the case of residue numbers like \"15A\", \"15B\", both residues are written out. Example: 9LPR\n- HETATMs. Currently written as an \"X\", or unknown amino acid.\n\nArgs:\nmodel: Biopython Model object of a Structure\n\nReturns:\nlist: List of SeqRecords", "source": "juraj-google-style"}
{"code": "def lin_moma(self, wt_fluxes):\n    reactions = set(self._adjustment_reactions())\n    z_diff = self._z_diff\n    v = self._v\n    with self.constraints() as constr:\n        for (f_reaction, f_value) in iteritems(wt_fluxes):\n            if (f_reaction in reactions):\n                constr.add((z_diff[f_reaction] >= (f_value - v[f_reaction])), ((f_value - v[f_reaction]) >= (- z_diff[f_reaction])))\n        self._prob.set_objective(z_diff.sum(reactions))\n        self._solve(lp.ObjectiveSense.Minimize)", "docstring": "Minimize the redistribution of fluxes using a linear objective.\n\nThe change in flux distribution is mimimized by minimizing the sum\nof the absolute values of the differences of wild type FBA solution\nand the knockout strain flux solution.\n\nThis formulation bases the solution on the wild type fluxes that\nare specified by the user. If these wild type fluxes were calculated\nusing FBA, then an arbitrary flux vector that optimizes the objective\nfunction is used. See [Segre`_02] for more information.\n\nArgs:\nwt_fluxes: Dictionary of all the wild type fluxes. Use\n:meth:`.get_fba_flux(objective)` to return a dictionary of\nfluxes found by FBA.", "source": "codesearchnet"}
{"code": "def _get_bond_data(line):\n        \n\n        line = line.split()\n        length = float(line[2])\n        \n        sites = line[0].replace(\"/\", \"-\").split(\"-\")\n        site_indices = tuple(int(ind) - 1 for ind in sites[1:4:2])\n        species = tuple(re.split(r\"\\d+\", spec)[0] for spec in sites[0:3:2])\n        label = \"%s%d-%s%d\" % (species[0], site_indices[0] + 1,\n                               species[1], site_indices[1] + 1)\n        return label, length, site_indices", "docstring": "Subroutine to extract bond label, site indices, and length from\na COPL header line. The site indices are zero-based, so they\ncan be easily used with a Structure object.\n\nExample header line: Fe-1/Fe-1-tr(-1,-1,-1) :  2.482 Ang.\n\nArgs:\nline: line in the COHPCAR header describing the bond.\n\nReturns:\nThe bond label, the bond length and a tuple of the site\nindices.", "source": "juraj-google-style"}
{"code": "class TableTransformerEncoder(TableTransformerPreTrainedModel):\n\n    def __init__(self, config: TableTransformerConfig):\n        super().__init__(config)\n        self.dropout = config.dropout\n        self.layerdrop = config.encoder_layerdrop\n        self.layers = nn.ModuleList([TableTransformerEncoderLayer(config) for _ in range(config.encoder_layers)])\n        self.layernorm = nn.LayerNorm(config.d_model)\n        self.post_init()\n\n    def forward(self, inputs_embeds=None, attention_mask=None, object_queries=None, output_attentions=None, output_hidden_states=None, return_dict=None):\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        hidden_states = inputs_embeds\n        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n        if attention_mask is not None:\n            attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        for encoder_layer in self.layers:\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            to_drop = False\n            if self.training:\n                dropout_probability = torch.rand([])\n                if dropout_probability < self.layerdrop:\n                    to_drop = True\n            if to_drop:\n                layer_outputs = (None, None)\n            else:\n                layer_outputs = encoder_layer(hidden_states, attention_mask, object_queries=object_queries, output_attentions=output_attentions)\n                hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        hidden_states = self.layernorm(hidden_states)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a\n[`TableTransformerEncoderLayer`].\n\nThe encoder updates the flattened feature map through multiple self-attention layers.\n\nSmall tweak for Table Transformer:\n\n- object_queries are added to the forward pass.\n\nArgs:\nconfig: TableTransformerConfig", "source": "github-repos"}
{"code": "def create_sp(operations, operation):\n    \n    operations.execute(\n        \"CREATE FUNCTION %s %s\" % (\n            operation.target.name, operation.target.sqltext\n        )\n    )", "docstring": "Implements ``CREATE FUNCTION``.\n\nArgs:\noperations: instance of ``alembic.operations.base.Operations``\noperation: instance of :class:`.ReversibleOp`\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def _GenerateStatsTable(self, feed_merger):\n    rows = []\n    rows.append('<tr><th class=\"header\"/><th class=\"header\">Merged</th><th class=\"header\">Copied from old feed</th><th class=\"header\">Copied from new feed</th></tr>')\n    for merger in feed_merger.GetMergerList():\n        stats = merger.GetMergeStats()\n        if (stats is None):\n            continue\n        (merged, not_merged_a, not_merged_b) = stats\n        rows.append(('<tr><th class=\"header\">%s</th><td class=\"header\">%d</td><td class=\"header\">%d</td><td class=\"header\">%d</td></tr>' % (merger.DATASET_NAME, merged, not_merged_a, not_merged_b)))\n    return ('<table>%s</table>' % '\\n'.join(rows))", "docstring": "Generate an HTML table of merge statistics.\n\nArgs:\nfeed_merger: The FeedMerger instance.\n\nReturns:\nThe generated HTML as a string.", "source": "codesearchnet"}
{"code": "def validate(self):\n    self._attribute_errors = dict()\n    for (local_name, attribute) in self._attributes.items():\n        value = getattr(self, local_name, None)\n        if (attribute.is_required and ((value is None) or (value == ''))):\n            self._attribute_errors[local_name] = {'title': 'Invalid input', 'description': 'This value is mandatory.', 'remote_name': attribute.remote_name}\n            continue\n        if (value is None):\n            continue\n        if (not self._validate_type(local_name, attribute.remote_name, value, attribute.attribute_type)):\n            continue\n        if ((attribute.min_length is not None) and (len(value) < attribute.min_length)):\n            self._attribute_errors[local_name] = {'title': 'Invalid length', 'description': ('Attribute %s minimum length should be %s but is %s' % (attribute.remote_name, attribute.min_length, len(value))), 'remote_name': attribute.remote_name}\n            continue\n        if ((attribute.max_length is not None) and (len(value) > attribute.max_length)):\n            self._attribute_errors[local_name] = {'title': 'Invalid length', 'description': ('Attribute %s maximum length should be %s but is %s' % (attribute.remote_name, attribute.max_length, len(value))), 'remote_name': attribute.remote_name}\n            continue\n        if (attribute.attribute_type == list):\n            valid = True\n            for item in value:\n                if (valid is True):\n                    valid = self._validate_value(local_name, attribute, item)\n        else:\n            self._validate_value(local_name, attribute, value)\n    return self.is_valid()", "docstring": "Validate the current object attributes.\n\nCheck all attributes and store errors\n\nReturns:\nReturns True if all attibutes of the object\nrespect contraints. Returns False otherwise and\nstore error in errors dict.", "source": "codesearchnet"}
{"code": "def relpath(self, path):\n        \n        for root in self.roots:\n            \n            if isinstance(root, Pattern):\n                match = root.match(path)\n                if not match:\n                    continue\n                root = match.group(0)\n\n            \n            try:\n                relative = path.split(root, 1)[1]\n                \n                \n                return relative.lstrip('/')\n            except IndexError:\n                continue\n\n        return path", "docstring": "Get path relative to storage.\n\nargs:\npath (str): Absolute path or URL.\n\nReturns:\nstr: relative path.", "source": "juraj-google-style"}
{"code": "def random_new(algo: int = RNG_CMWC) -> tcod.random.Random:\n    \n    return tcod.random.Random(algo)", "docstring": "Return a new Random instance.  Using ``algo``.\n\nArgs:\nalgo (int): The random number algorithm to use.\n\nReturns:\nRandom: A new Random instance using the given algorithm.", "source": "juraj-google-style"}
{"code": "def _WriteFileEntry(self, file_entry, data_stream_name, destination_file):\n    \n    source_file_object = file_entry.GetFileObject(\n        data_stream_name=data_stream_name)\n    if not source_file_object:\n      return\n\n    try:\n      with open(destination_file, 'wb') as destination_file_object:\n        source_file_object.seek(0, os.SEEK_SET)\n\n        data = source_file_object.read(self._COPY_BUFFER_SIZE)\n        while data:\n          destination_file_object.write(data)\n          data = source_file_object.read(self._COPY_BUFFER_SIZE)\n\n    finally:\n      source_file_object.close()", "docstring": "Writes the contents of the source file entry to a destination file.\n\nNote that this function will overwrite an existing file.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry whose content is to be written.\ndata_stream_name (str): name of the data stream whose content is to be\nwritten.\ndestination_file (str): path of the destination file.", "source": "juraj-google-style"}
{"code": "def connect(self, component):\n    if (not isinstance(component, ThreadPool)):\n        raise TypeError('\"component\" must be a ThreadPool object')\n    component.in_queue = self.out_queue\n    return component", "docstring": "Connect two ThreadPools.\n\nThe ``in_queue`` of the second pool will be set as the ``out_queue`` of\nthe current pool, thus all the output will be input to the second pool.\n\nArgs:\ncomponent (ThreadPool): the ThreadPool to be connected.\nReturns:\nThreadPool: the modified second ThreadPool.", "source": "codesearchnet"}
{"code": "def parse(self, words):\n        \n        def exact(words):\n            \n            try:\n                return float(words)\n            except:\n                return None\n\n        guess = exact(words)\n        if guess is not None:\n            return guess\n\n        split = words.split(' ')\n\n        \n        if split[-1] in self.__fractions__:\n            split[-1] = self.__fractions__[split[-1]]\n        elif split[-1] in self.__ordinals__:\n            split[-1] = self.__ordinals__[split[-1]]\n\n        parsed_ordinals = ' '.join(split)\n\n        return self.parseFloat(parsed_ordinals)", "docstring": "A general method for parsing word-representations of numbers.\nSupports floats and integers.\n\nArgs:\nwords (str): Description of an arbitrary number.\n\nReturns:\nA double representation of the words.", "source": "juraj-google-style"}
{"code": "def update(self, data):\n        \n        \n        updated = False\n\n        if 'state' in data:\n            updated = self.set_property('state', data['state'])\n\n        if 'end' in data:\n            updated |= self.set_property('end', data['end'])\n\n        if 'last_alert' in data:\n            updated |= self.set_property('last_alert', data['last_alert'])\n\n        return updated", "docstring": "Updates the object information based on live data, if there were any changes made. Any changes will be\nautomatically applied to the object, but will not be automatically persisted. You must manually call\n`db.session.add(instance)` on the object.\n\nArgs:\ndata (:obj:): AWS API Resource object fetched from AWS API\n\nReturns:\n`bool`", "source": "juraj-google-style"}
{"code": "def system_repertoire_distance(r1, r2):\n    \n    if config.MEASURE in measures.asymmetric():\n        raise ValueError(\n            '{} is asymmetric and cannot be used as a system-level '\n            'irreducibility measure.'.format(config.MEASURE))\n\n    return measures[config.MEASURE](r1, r2)", "docstring": "Compute the distance between two repertoires of a system.\n\nArgs:\nr1 (np.ndarray): The first repertoire.\nr2 (np.ndarray): The second repertoire.\n\nReturns:\nfloat: The distance between ``r1`` and ``r2``.", "source": "juraj-google-style"}
{"code": "def repr(self, changed_widgets=None):\n    if (changed_widgets is None):\n        changed_widgets = {}\n    return super(Widget, self).repr(changed_widgets)", "docstring": "Represents the widget as HTML format, packs all the attributes, children and so on.\n\nArgs:\nclient (App): Client instance.\nchanged_widgets (dict): A dictionary containing a collection of widgets that have to be updated.\nThe Widget that have to be updated is the key, and the value is its textual repr.", "source": "codesearchnet"}
{"code": "def _OpenParentFile(self, file_system, path_spec, vhdi_file):\n    \n    location = getattr(path_spec, 'location', None)\n    if not location:\n      raise errors.PathSpecError(\n          'Unsupported path specification without location.')\n\n    location_path_segments = file_system.SplitPath(location)\n\n    parent_filename = vhdi_file.parent_filename\n    _, _, parent_filename = parent_filename.rpartition('\\\\')\n\n    location_path_segments.pop()\n    location_path_segments.append(parent_filename)\n    parent_file_location = file_system.JoinPath(location_path_segments)\n\n    \n    \n    \n    kwargs = path_spec_factory.Factory.GetProperties(path_spec)\n\n    kwargs['location'] = parent_file_location\n    if path_spec.parent is not None:\n      kwargs['parent'] = path_spec.parent\n\n    parent_file_path_spec = path_spec_factory.Factory.NewPathSpec(\n        path_spec.type_indicator, **kwargs)\n\n    if not file_system.FileEntryExistsByPathSpec(parent_file_path_spec):\n      return\n\n    file_object = resolver.Resolver.OpenFileObject(\n        parent_file_path_spec, resolver_context=self._resolver_context)\n\n    vhdi_parent_file = pyvhdi.file()\n    vhdi_parent_file.open_file_object(file_object)\n\n    if vhdi_parent_file.parent_identifier:\n      self._OpenParentFile(\n          file_system, parent_file_path_spec, vhdi_parent_file)\n\n    vhdi_file.set_parent(vhdi_parent_file)\n\n    self._parent_vhdi_files.append(vhdi_parent_file)\n    self._sub_file_objects.append(file_object)", "docstring": "Opens the parent file.\n\nArgs:\nfile_system (FileSystem): file system of the VHDI file.\npath_spec (PathSpec): path specification of the VHDI file.\nvhdi_file (pyvhdi.file): VHDI file.\n\nRaises:\nPathSpecError: if the path specification is incorrect.", "source": "juraj-google-style"}
{"code": "def _queue_dag(self, name, *, data=None):\n        \n        if self._stop_workflow:\n            return None\n\n        if name not in self._dags_blueprint:\n            raise DagNameUnknown()\n\n        new_dag = copy.deepcopy(self._dags_blueprint[name])\n        new_dag.workflow_name = self.name\n        self._dags_running[new_dag.name] = self._celery_app.send_task(\n            JobExecPath.Dag, args=(new_dag, self._workflow_id, data),\n            queue=new_dag.queue, routing_key=new_dag.queue)\n\n        return new_dag.name", "docstring": "Add a new dag to the queue.\n\nIf the stop workflow flag is set, no new dag can be queued.\n\nArgs:\nname (str): The name of the dag that should be queued.\ndata (MultiTaskData): The data that should be passed on to the new dag.\n\nRaises:\nDagNameUnknown: If the specified dag name does not exist\n\nReturns:\nstr: The name of the queued dag.", "source": "juraj-google-style"}
{"code": "def _build_orthogonal_rings(core_locations: List[_CoreLocation], ring_size: int, rotate_ring_across_rings: bool) -> List[_CoreLocation]:\n    num_cores = len(core_locations)\n    permutation = _build_all_reduce_ring(core_locations[:ring_size])\n    for r in range(0, num_cores, ring_size):\n        core_locations[r:r + ring_size] = [core_locations[r + permutation[i]] for i in range(ring_size)]\n    logging.vlog(1, 'Permutated core locations: %s', core_locations)\n    transposed = []\n    for i in range(ring_size):\n        transposed += [core_locations[g + i] for g in range(0, num_cores, ring_size)]\n    num_rings = int(num_cores / ring_size)\n    permutation = _build_all_reduce_ring(transposed[:num_rings], rotate=rotate_ring_across_rings)\n    for r in range(0, num_cores, num_rings):\n        transposed[r:r + num_rings] = [transposed[r + permutation[i]] for i in range(num_rings)]\n    untransposed = []\n    for i in range(num_rings):\n        untransposed += [transposed[g + i] for g in range(0, num_cores, num_rings)]\n    logging.vlog(1, 'Stride-permutated core locations: %s', untransposed)\n    return untransposed", "docstring": "Build two all-reduce rings orthogonal to each other.\n\nOne ring includes every `ring_size` consecutive core locations. It is usually\napplied to the model-parallel dimension of a mesh to achieve best 1D\nall-reduce performance. The other ring includes core locations separated by\na stride of `ring_size`. It is usually applied to the data-parallel dimension\nof a mesh to get predictable strided all-reduce performance.\n\nArgs:\ncore_locations: A list of core locations expressed as [x, y, z, core].\nring_size: The number of core locations in the consecutive ring.\nrotate_ring_across_rings: Build column-major secondary rings.\n\nReturns:\nA permutation of the input list forming the described rings.", "source": "github-repos"}
{"code": "def log_and_count(self, event_str, msg_str=None, inc_int=None):\n    logger.info(' - '.join(map(str, [v for v in (event_str, msg_str, inc_int) if v])))\n    self.count(event_str, (inc_int or 1))", "docstring": "Count an event and write a message to a logger.\n\nArgs:\nevent_str: str\nThe name of an event to count. Used as a key in the event dict. The same\nname will be used in the summary. This also becomes a part of the message\nlogged by this function.\n\nmsg_str: str\nOptional message with details about the events. The message is only written\nto the log. While the ``event_str`` functions as a key and must remain the\nsame for the same type of event, ``log_str`` may change between calls.\n\ninc_int: int\nOptional argument to increase the count for the event by more than 1.", "source": "codesearchnet"}
{"code": "def __get_default_value_from_element(self, element):\n        \n\n        if element.name == \"select\":\n            options = element.find_all(\"option\")\n            is_multiple = element.has_attr(\"multiple\")\n\n            selected_options = [\n                option for option in options\n                if option.has_attr(\"selected\")\n            ]\n\n            if not selected_options and options:\n                selected_options = [options[0]]\n\n            selected_values = []\n\n            if is_multiple:\n                for option in selected_options:\n                    value = option[\"value\"] if option.has_attr(\"value\") else option.string\n                    selected_values.append(value)\n\n                return selected_values\n            elif len(selected_options) >= 1:\n                if selected_options[0].has_attr(\"value\"):\n                    return selected_options[0][\"value\"]\n                else:\n                    return selected_options[0].string\n\n            return \"\"\n\n        if element.name == \"textarea\":\n            return element.string if element.string is not None else \"\"\n\n        if element.name == \"input\" and element.has_attr(\"type\"):\n            if element[\"type\"] in (\"checkbox\", \"radio\"):\n                if not element.has_attr(\"checked\"):\n                    return False\n\n                if element.has_attr(\"value\"):\n                    return element[\"value\"]\n                else:\n                    return \"on\"\n\n        if element.has_attr(\"value\"):\n            return element[\"value\"]\n\n        return \"\"", "docstring": "Get the default value of a form element\n\nArgs:\nelements (obj): The soup element.\n\nReturns:\nstr: The default value", "source": "juraj-google-style"}
{"code": "def get_region(b):\n    \n    remap = {None: 'us-east-1', 'EU': 'eu-west-1'}\n    region = b.get('Location', {}).get('LocationConstraint')\n    return remap.get(region, region)", "docstring": "Tries to get the bucket region from Location.LocationConstraint\n\nSpecial cases:\nLocationConstraint EU defaults to eu-west-1\nLocationConstraint null defaults to us-east-1\n\nArgs:\nb (object): A bucket object\n\nReturns:\nstring: an aws region string", "source": "juraj-google-style"}
{"code": "def get_page_artid(self, separator='-'):\n    publication_info = get_value(self.record, 'publication_info[0]', default={})\n    return LiteratureReader.get_page_artid_for_publication_info(publication_info, separator)", "docstring": "Return the page range or the article id of a record.\n\nArgs:\nseparator(basestring): optional page range symbol, defaults to a single dash\n\nReturns:\nstring: the page range or the article id of the record.\n\nExamples:\n>>> record = {\n...     'publication_info': [\n...         {'artid': '054021'},\n...     ],\n... }\n>>> LiteratureReader(record).get_page_artid()\n'054021'", "source": "codesearchnet"}
{"code": "def _prepare_doc(func, args, delimiter_chars):\n    _LOG.debug(\"Preparing doc for '%s'\", func.__name__)\n    if (not func.__doc__):\n        return _get_default_help_message(func, args)\n    description = []\n    args_help = {}\n    fill_description = True\n    arg_name = None\n    arg_doc_regex = re.compile(('\\x08*(?P<arg_name>\\\\w+)\\\\s*%s\\\\s*(?P<help_msg>.+)' % delimiter_chars))\n    for line in func.__doc__.splitlines():\n        line = line.strip()\n        if (line and fill_description):\n            description.append(line)\n        elif line:\n            arg_match = arg_doc_regex.match(line)\n            try:\n                arg_name = arg_match.groupdict()['arg_name'].strip()\n                args_help[arg_name] = arg_match.groupdict()['help_msg'].strip()\n            except AttributeError:\n                if (arg_name is not None):\n                    args_help[arg_name] = ' '.join([args_help[arg_name], line])\n        else:\n            if ((not fill_description) and args_help):\n                break\n            fill_description = False\n    return _get_default_help_message(func, args, ' '.join(description), args_help)", "docstring": "From the function docstring get the arg parse description and arguments\nhelp message. If there is no docstring simple description and help\nmessage are created.\n\nArgs:\nfunc: the function that needs argument parsing\nargs: name of the function arguments\ndelimiter_chars: characters used to separate the parameters from their\nhelp message in the docstring\n\nReturns:\nA tuple containing the description to be used in the argument parser and\na dict indexed on the callable argument name and their associated help\nmessage", "source": "codesearchnet"}
{"code": "def _load_attributes(self, mft_config, attrs_view):\n    offset = 0\n    load_attrs = mft_config.attribute_load_list\n    while (attrs_view[offset:(offset + 4)] != b'\\xff\\xff\\xff\\xff'):\n        (attr_type, attr_len, non_resident) = _get_attr_info(attrs_view[offset:])\n        if (attr_type in load_attrs):\n            attr = Attribute.create_from_binary(non_resident, mft_config.load_dataruns, attrs_view[offset:])\n            if (not (attr.header.attr_type_id is AttrTypes.DATA)):\n                self.attrs[attr.header.attr_type_id].append(attr)\n            else:\n                self._add_data_attribute(attr)\n        offset += attr_len", "docstring": "Loads all the attributes of an entry.\n\nOnce executed, all the attributes should have been loaded in the\nattribute *attrs* instance attribute.\n\nArgs:\nmft_config (:obj:`MFTConfig`) - An instance of MFTConfig, as this tells\nhow the library will interpret data.\nattrs_view (memoryview(bytearray)) - A binary stream that starts at\nthe first attribute until the end of the entry", "source": "codesearchnet"}
{"code": "def start(self, attempts=5, timeout=2):\n    if (not self.alive()):\n        with LogTask(('Create network %s' % self.name())):\n            net = self.libvirt_con.networkCreateXML(self._libvirt_xml())\n            if (net is None):\n                raise RuntimeError(('failed to create network, XML: %s' % self._libvirt_xml()))\n            for _ in range(attempts):\n                if net.isActive():\n                    return\n                LOGGER.debug('waiting for network %s to become active', net.name())\n                time.sleep(timeout)\n            raise RuntimeError(('failed to verify network %s is active' % net.name()))", "docstring": "Start the network, will check if the network is active ``attempts``\ntimes, waiting ``timeout`` between each attempt.\n\nArgs:\nattempts (int): number of attempts to check the network is active\ntimeout  (int): timeout for each attempt\n\nReturns:\nNone\n\nRaises:\nRuntimeError: if network creation failed, or failed to verify it is\nactive.", "source": "codesearchnet"}
{"code": "def _client_send(self, message):\n    try:\n        self._client.write(f'{message}\\n'.encode('utf8'))\n        self._client.flush()\n    except socket.error as e:\n        raise errors.Error(self._device, f'Encountered socket error \"{e}\" sending RPC message \"{message}\"') from e", "docstring": "Sends an RPC message through the connection.\n\nArgs:\nmessage: str, the message to send.\n\nRaises:\nerrors.Error: if a socket error occurred during the send.", "source": "github-repos"}
{"code": "def run(self, stim, merge=True, **merge_kwargs):\n    results = list(chain(*[self.run_node(n, stim) for n in self.roots]))\n    results = list(flatten(results))\n    self._results = results\n    return (merge_results(results, **merge_kwargs) if merge else results)", "docstring": "Executes the graph by calling all Transformers in sequence.\n\nArgs:\nstim (str, Stim, list): One or more valid inputs to any\nTransformer's 'transform' call.\nmerge (bool): If True, all results are merged into a single pandas\nDataFrame before being returned. If False, a list of\nExtractorResult objects is returned (one per Extractor/Stim\ncombination).\nmerge_kwargs: Optional keyword arguments to pass onto the\nmerge_results() call.", "source": "codesearchnet"}
{"code": "def popular(self, **kwargs):\n    path = self._get_path('popular')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the list of popular movies on The Movie Database. This list\nrefreshes every day.\n\nArgs:\npage: (optional) Minimum value of 1.  Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def register_layouts(layouts, app, url='/api/props/', brand='Pyxley'):\n\n    def props(name):\n        if (name not in layouts):\n            name = list(layouts.keys())[0]\n        return jsonify({'layouts': layouts[name]['layout']})\n\n    def apps():\n        paths = []\n        for (i, k) in enumerate(layouts.keys()):\n            if (i == 0):\n                paths.append({'path': '/', 'label': layouts[k].get('title', k)})\n            paths.append({'path': ('/' + k), 'label': layouts[k].get('title', k)})\n        return jsonify({'brand': brand, 'navlinks': paths})\n    app.add_url_rule((url + '<string:name>/'), view_func=props)\n    app.add_url_rule(url, view_func=apps)", "docstring": "register UILayout with the flask app\n\ncreate a function that will send props for each UILayout\n\nArgs:\nlayouts (dict): dict of UILayout objects by name\napp (object): flask app\nurl (string): address of props; default is /api/props/", "source": "codesearchnet"}
{"code": "def deconstruct_single_qubit_matrix_into_angles(\n        mat: np.ndarray) -> Tuple[float, float, float]:\n    \n    \n    right_phase = cmath.phase(mat[0, 1] * np.conj(mat[0, 0])) + math.pi\n    mat = np.dot(mat, _phase_matrix(-right_phase))\n\n    \n    bottom_phase = cmath.phase(mat[1, 0] * np.conj(mat[0, 0]))\n    mat = np.dot(_phase_matrix(-bottom_phase), mat)\n\n    \n    rotation = math.atan2(abs(mat[1, 0]), abs(mat[0, 0]))\n    mat = np.dot(_rotation_matrix(-rotation), mat)\n\n    \n    diagonal_phase = cmath.phase(mat[1, 1] * np.conj(mat[0, 0]))\n\n    \n    return right_phase + diagonal_phase, rotation * 2, bottom_phase", "docstring": "Breaks down a 2x2 unitary into more useful ZYZ angle parameters.\n\nArgs:\nmat: The 2x2 unitary matrix to break down.\n\nReturns:\nA tuple containing the amount to phase around Z, then rotate around Y,\nthen phase around Z (all in radians).", "source": "juraj-google-style"}
{"code": "def _parse_input_data(self, node):\n        \n        data = DotDict()\n        try:\n            for nod in self._get_input_nodes(node):\n                data.update(self._parse_input_node(nod))\n        except Exception as e:\n            log.exception(\"Error while processing node: %s\" % node)\n        return data", "docstring": "Parses inputOutput part camunda modeller extensions.\nArgs:\nnode: SpiffWorkflow Node object.\n\nReturns:\nData dict.", "source": "juraj-google-style"}
{"code": "def load(self, source, pause=False):\n        \n        self._source = source\n        self._load_source(source)\n        if pause:\n            time.sleep(0.5)  \n            self.pause()", "docstring": "Loads a new source (as a file) from ``source`` (a file path or URL)\nby killing the current ``omxplayer`` process and forking a new one.\n\nArgs:\nsource (string): Path to the file to play or URL", "source": "juraj-google-style"}
{"code": "def _retry_on_appropriate_gcp_error(exception):\n    return isinstance(exception, (TooManyRequests, ServerError))", "docstring": "Retry filter that returns True if a returned HTTP error code is 5xx or 429.\nThis is used to retry remote requests that fail, most notably 429\n(TooManyRequests.)\n\nArgs:\nexception: the returned exception encountered during the request/response\nloop.\n\nReturns:\nboolean indication whether or not the exception is a Server Error (5xx) or\na TooManyRequests (429) error.", "source": "github-repos"}
{"code": "def _maybe_extract(compressed_filename, directory, extension=None):\n    logger.info('Extracting {}'.format(compressed_filename))\n    if (extension is None):\n        basename = os.path.basename(compressed_filename)\n        extension = basename.split('.', 1)[1]\n    if ('zip' in extension):\n        with zipfile.ZipFile(compressed_filename, 'r') as zip_:\n            zip_.extractall(directory)\n    elif (('tar' in extension) or ('tgz' in extension)):\n        with tarfile.open(compressed_filename, mode='r') as tar:\n            tar.extractall(path=directory)\n    logger.info('Extracted {}'.format(compressed_filename))", "docstring": "Extract a compressed file to ``directory``.\n\nArgs:\ncompressed_filename (str): Compressed file.\ndirectory (str): Extract to directory.\nextension (str, optional): Extension of the file; Otherwise, attempts to extract extension\nfrom the filename.", "source": "codesearchnet"}
{"code": "def dispatch(self, message):\n    for (validator, callback) in self.validators:\n        if (not validator.matches(message)):\n            continue\n        callback(message)\n        return\n    raise ArgumentError('No handler was registered for message', message=message)", "docstring": "Dispatch a message to a callback based on its schema.\n\nArgs:\nmessage (dict): The message to dispatch", "source": "codesearchnet"}
{"code": "def describe_training_job(self, TrainingJobName):\n    if (TrainingJobName not in LocalSagemakerClient._training_jobs):\n        error_response = {'Error': {'Code': 'ValidationException', 'Message': 'Could not find local training job'}}\n        raise ClientError(error_response, 'describe_training_job')\n    else:\n        return LocalSagemakerClient._training_jobs[TrainingJobName].describe()", "docstring": "Describe a local training job.\n\nArgs:\nTrainingJobName (str): Training job name to describe.\n\nReturns: (dict) DescribeTrainingJob Response.", "source": "codesearchnet"}
{"code": "def unlock_kinetis_read_until_ack(jlink, address):\n    request = swd.ReadRequest(address, ap=True)\n    response = None\n    while True:\n        response = request.send(jlink)\n        if response.ack():\n            break\n        elif response.wait():\n            continue\n        raise KinetisException('Read exited with status: %s', response.status)\n    return response", "docstring": "Polls the device until the request is acknowledged.\n\nSends a read request to the connected device to read the register at the\ngiven 'address'.  Polls indefinitely until either the request is ACK'd or\nthe request ends in a fault.\n\nArgs:\njlink (JLink): the connected J-Link\naddress (int) the address of the register to poll\n\nReturns:\n``SWDResponse`` object on success.\n\nRaises:\nKinetisException: when read exits with non-ack or non-wait status.\n\nNote:\nThis function is required in order to avoid reading corrupt or otherwise\ninvalid data from registers when communicating over SWD.", "source": "codesearchnet"}
{"code": "def call_with_layout(fn: Callable[..., Any], layout: Optional[layout_lib.Layout], *args, **kwargs) -> Any:\n    if layout is not None:\n        if context.executing_eagerly():\n            with default_mesh(layout.mesh):\n                with _dtensor_device()._default_layout(layout):\n                    return fn(*args, **kwargs)\n        else:\n            return relayout(fn(*args, **kwargs), layout)\n    return fn(*args, **kwargs)", "docstring": "Calls a function in the DTensor device scope if `layout` is not None.\n\nIf `layout` is not None, `fn` consumes DTensor(s) as input and produces a\nDTensor as output; a DTensor is a tf.Tensor with layout-related attributes.\n\nIf `layout` is None, `fn` consumes and produces regular tf.Tensors.\n\nArgs:\nfn: A supported TF API function such as tf.zeros.\nlayout: Optional, the layout of the output DTensor.\n*args:  Arguments given to `fn`.\n**kwargs: Keyword arguments given to `fn`.\n\nReturns:\nThe return value of `fn` transformed to a DTensor if requested.", "source": "github-repos"}
{"code": "def set_memcache_policy(self, func):\n    if (func is None):\n        func = self.default_memcache_policy\n    elif isinstance(func, bool):\n        func = (lambda unused_key, flag=func: flag)\n    self._memcache_policy = func", "docstring": "Set the memcache policy function.\n\nArgs:\nfunc: A function that accepts a Key instance as argument and returns\na bool indicating if it should be cached.  May be None.", "source": "codesearchnet"}
{"code": "def add(self, full_name: str, alias: str | None=None):\n    if not self.track_imports:\n        return\n    alias = alias or full_name\n    if '.' not in full_name or (full_name == alias and (not alias.endswith('.*'))):\n        self._direct_imports[alias] = full_name\n    else:\n        module, name = full_name.rsplit('.', 1)\n        if name == '*':\n            alias = '*'\n        if module == 'typing':\n            self._typing.add(name, alias)\n        else:\n            self._from_imports.setdefault(module, {})[alias] = name\n    self._reverse_alias_map[full_name] = alias", "docstring": "Adds an import.\n\nExamples:\n-------------------------------------------------------\nImport Statement           | Method Call\n-------------------------------------------------------\nimport abc                 | add('abc')\nimport abc as xyz          | add('abc', 'xyz')\nimport foo.bar             | add('foo.bar')\nfrom foo import bar        | add('foo.bar', 'bar')\nfrom foo import bar as baz | add('foo.bar', 'baz')\n\nArgs:\nfull_name: The full name of the thing being imported.\nalias: The name that the imported thing is assigned to.", "source": "github-repos"}
{"code": "def unstack(self, value, name=None):\n    return self._implementation.unstack(value, name=name)", "docstring": "Unstack the values of a `Tensor` in the TensorArray.\n\nIf input value shapes have rank-`R`, then the output TensorArray will\ncontain elements whose shapes are rank-`(R-1)`.\n\nArgs:\nvalue: (N+1)-D.  Tensor of type `dtype`.  The Tensor to unstack.\nname: A name for the operation (optional).\n\nReturns:\nA new TensorArray object with flow that ensures the unstack occurs.\nUse this object for all subsequent operations.\n\nRaises:\nValueError: if the shape inference fails.", "source": "github-repos"}
{"code": "def mom(self, K, **kws):\n    K = numpy.asarray(K, dtype=int)\n    shape = K.shape\n    dim = len(self)\n    if (dim > 1):\n        shape = shape[1:]\n    size = int((K.size / dim))\n    K = K.reshape(dim, size)\n    cache = {}\n    out = [evaluation.evaluate_moment(self, kdata, cache) for kdata in K.T]\n    out = numpy.array(out)\n    return out.reshape(shape)", "docstring": "Raw statistical moments.\n\nCreates non-centralized raw moments from the random variable. If\nanalytical options can not be utilized, Monte Carlo integration\nwill be used.\n\nArgs:\nK (numpy.ndarray):\nIndex of the raw moments. k.shape must be compatible with\ndistribution shape.  Sampling scheme when performing Monte\nCarlo\nrule (str):\nrule for estimating the moment if the analytical method fails.\ncomposite (numpy.ndarray):\nIf provided, composit quadrature will be used.  Ignored in the\ncase if gaussian=True.  If int provided, determines number of\neven domain splits. If array of ints, determines number of even\ndomain splits along each axis. If array of arrays/floats,\ndetermines location of splits.\nantithetic (numpy.ndarray):\nList of bool. Represents the axes to mirror using antithetic\nvariable during MCI.\n\nReturns:\n(numpy.ndarray):\nShapes are related through the identity\n``k.shape == dist.shape+k.shape``.", "source": "codesearchnet"}
{"code": "def makecontinuum(cube, **kwargs):\n    inchs = kwargs.pop('inchs', None)\n    exchs = kwargs.pop('exchs', None)\n    if ((inchs is not None) or (exchs is not None)):\n        raise KeyError('Inchs and exchs are no longer supported. Use weight instead.')\n    if (weight is None):\n        weight = 1.0\n    cont = ((cube * (1 / (weight ** 2))).sum(dim='ch') / (1 / (weight ** 2)).sum(dim='ch'))\n    xcoords = {'x': cube.x.values}\n    ycoords = {'y': cube.y.values}\n    chcoords = {'masterid': np.array([0]), 'kidid': np.array([0]), 'kidfq': np.array([0]), 'kidtp': np.array([1])}\n    scalarcoords = {'coordsys': cube.coordsys.values, 'datatype': cube.datatype.values, 'xref': cube.xref.values, 'yref': cube.yref.values}\n    return dc.cube(cont.values, xcoords=xcoords, ycoords=ycoords, chcoords=chcoords, scalarcoords=scalarcoords)", "docstring": "Make a continuum array.\n\nArgs:\ncube (decode.cube): Decode cube which will be averaged over channels.\nkwargs (optional): Other arguments.\ninchs (list): Included channel kidids.\nexchs (list): Excluded channel kidids.\n\nReturns:\ndecode cube (decode.cube): Decode cube (2d).", "source": "codesearchnet"}
{"code": "def generator_consumer(coro):  \n    \n    if not asyncio.iscoroutinefunction(coro):\n        raise TypeError('paco: coro must be a coroutine function')\n\n    @functools.wraps(coro)\n    @asyncio.coroutine\n    def wrapper(*args, **kw):\n        if len(args) > 1 and isgenerator(args[1]):\n            args = list(args)\n            args[1] = (yield from consume(args[1])\n                       if hasattr(args[1], '__anext__')\n                       else list(args[1]))\n            args = tuple(args)\n        return (yield from coro(*args, **kw))\n    return wrapper", "docstring": "Decorator wrapper that consumes sync/async generators provided as\ninterable input argument.\n\nThis function is only intended to be used internally.\n\nArguments:\ncoro (coroutinefunction): function to decorate\n\nRaises:\nTypeError: if function or coroutine function is not provided.\n\nReturns:\nfunction: decorated function.", "source": "juraj-google-style"}
{"code": "def broadcast_to(rt_input, shape: DynamicRaggedShape):\n    if not isinstance(shape, DynamicRaggedShape):\n        raise TypeError('shape must be a DynamicRaggedShape')\n    rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input)\n    origin_shape = None\n    if ragged_tensor.is_ragged(rt_input):\n        if shape.num_row_partitions != 0:\n            if rt_input.row_splits.dtype != shape.dtype:\n                raise ValueError('Cannot coerce row_splits.dtype')\n        else:\n            shape = shape.with_dtype(rt_input.row_splits.dtype)\n        origin_shape = DynamicRaggedShape.from_tensor(rt_input)\n    elif shape.num_row_partitions != 0:\n        origin_shape = DynamicRaggedShape.from_tensor(rt_input, dtype=shape.dtype)\n    else:\n        origin_shape = DynamicRaggedShape.from_tensor(rt_input, dtype=dtypes.int64)\n        shape = shape.with_dtype(dtype=dtypes.int64)\n    broadcaster = _get_broadcaster(origin_shape, shape)\n    return broadcaster.broadcast(rt_input)", "docstring": "Broadcasts a potentially ragged tensor to a ragged shape.\n\nTiles `rt_input` as necessary to match the given shape.\n\nBehavior is undefined if `rt_input` is not broadcast-compatible with `shape`.\n\nArgs:\nrt_input: The potentially ragged tensor to broadcast.\nshape: A `DynamicRaggedShape`\n\nReturns:\nA potentially ragged tensor whose values are taken from\n`rt_input`, and whose shape matches `shape`.", "source": "github-repos"}
{"code": "def _is_disk_usage_reset_each_run(self):\n    return False", "docstring": "Indicates whether disk usage is reset after each Session.run.\n\nSubclasses that clean up the disk usage after every run should\noverride this protected method.\n\nReturns:\n(`bool`) Whether the disk usage amount is reset to zero after\neach Session.run.", "source": "github-repos"}
{"code": "def get(self, service_id, insert_defaults=None):\n    return self.prepare_model(self.client.api.inspect_service(service_id, insert_defaults))", "docstring": "Get a service.\n\nArgs:\nservice_id (str): The ID of the service.\ninsert_defaults (boolean): If true, default values will be merged\ninto the output.\n\nReturns:\n:py:class:`Service`: The service.\n\nRaises:\n:py:class:`docker.errors.NotFound`\nIf the service does not exist.\n:py:class:`docker.errors.APIError`\nIf the server returns an error.\n:py:class:`docker.errors.InvalidVersion`\nIf one of the arguments is not supported with the current\nAPI version.", "source": "codesearchnet"}
{"code": "def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        super(Boolean, self).write(ostream, kmip_version=kmip_version)\n        self.write_value(ostream, kmip_version=kmip_version)", "docstring": "Write the encoding of the Boolean object to the output stream.\n\nArgs:\nostream (Stream): A buffer to contain the encoded bytes of a\nBoolean object. Usually a BytearrayStream object. Required.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def deepgetattr(obj, name, default=_UNSPECIFIED):\n    \n    try:\n        if '.' in name:\n            attr, subname = name.split('.', 1)\n            return deepgetattr(getattr(obj, attr), subname, default)\n        else:\n            return getattr(obj, name)\n    except AttributeError:\n        if default is _UNSPECIFIED:\n            raise\n        else:\n            return default", "docstring": "Try to retrieve the given attribute of an object, digging on '.'.\n\nThis is an extended getattr, digging deeper if '.' is found.\n\nArgs:\nobj (object): the object of which an attribute should be read\nname (str): the name of an attribute to look up.\ndefault (object): the default value to use if the attribute wasn't found\n\nReturns:\nthe attribute pointed to by 'name', splitting on '.'.\n\nRaises:\nAttributeError: if obj has no 'name' attribute.", "source": "juraj-google-style"}
{"code": "def __init__(self, node, function, enclosing_graph, first_function_input, type_attribute, function_attributes):\n    super(_FunctionCaller, self).__init__(node, function, enclosing_graph)\n    self._first_function_input = first_function_input\n    self._type_attribute = type_attribute\n    self._function_attributes = function_attributes", "docstring": "Initializes a _FunctionCaller.\n\nArgs:\nnode: As in _Node.\nfunction: As in _Node.\nenclosing_graph: As in _Node.\nfirst_function_input: The index of the first NodeDef input that is tied to\nthe function inputs. It is assumed that the rest of the NodeDef inputs\nmap one to one to function inputs.\ntype_attribute: The name of the NodeDef attribute that defines the input\ntypes. It is assumed that the types listed here map one-to-one with the\nfunction inputs (that is, they do _not_ specify types for inputs that\nare not passed to functions).\nfunction_attributes: The names of the NodeDef attributes containing\nreferences to functions.", "source": "github-repos"}
{"code": "def List(self, request, global_params=None):\n    config = self.GetMethodConfig('List')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Lists all tables in the specified dataset. Requires the READER dataset role.\n\nArgs:\nrequest: (BigqueryTablesListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(TableList) The response message.", "source": "github-repos"}
{"code": "def pulse_drawer(samples, duration, dt=None, interp_method='None', filename=None, interactive=False, dpi=150, nop=1000, size=(6, 5)):\n    try:\n        from matplotlib import pyplot as plt\n    except ImportError:\n        raise ImportError('pulse_drawer need matplotlib. Run \"pip install matplotlib\" before.')\n    if dt:\n        _dt = dt\n    else:\n        _dt = 1\n    re_y = np.real(samples)\n    im_y = np.imag(samples)\n    image = plt.figure(figsize=size)\n    ax0 = image.add_subplot(111)\n    if (interp_method == 'CubicSpline'):\n        time = ((np.arange(0, (duration + 1)) * _dt) + (0.5 * _dt))\n        cs_ry = CubicSpline(time[:(- 1)], re_y)\n        cs_iy = CubicSpline(time[:(- 1)], im_y)\n        _time = np.linspace(0, (duration * _dt), nop)\n        _re_y = cs_ry(_time)\n        _im_y = cs_iy(_time)\n    elif (interp_method == 'None'):\n        time = (np.arange(0, (duration + 1)) * _dt)\n        _time = np.r_[(time[0], np.repeat(time[1:(- 1)], 2), time[(- 1)])]\n        _re_y = np.repeat(re_y, 2)\n        _im_y = np.repeat(im_y, 2)\n    else:\n        raise QiskitError(('Invalid interpolation method \"%s\"' % interp_method))\n    ax0.fill_between(x=_time, y1=_re_y, y2=np.zeros_like(_time), facecolor='red', alpha=0.3, edgecolor='red', linewidth=1.5, label='real part')\n    ax0.fill_between(x=_time, y1=_im_y, y2=np.zeros_like(_time), facecolor='blue', alpha=0.3, edgecolor='blue', linewidth=1.5, label='imaginary part')\n    ax0.set_xlim(0, (duration * _dt))\n    ax0.grid(b=True, linestyle='-')\n    ax0.legend(bbox_to_anchor=(0.5, 1.0), loc='lower center', ncol=2, frameon=False, fontsize=14)\n    if filename:\n        image.savefig(filename, dpi=dpi, bbox_inches='tight')\n    plt.close(image)\n    if (image and interactive):\n        plt.show(image)\n    return image", "docstring": "Plot the interpolated envelope of pulse\n\nArgs:\nsamples (ndarray): Data points of complex pulse envelope.\nduration (int): Pulse length (number of points).\ndt (float): Time interval of samples.\ninterp_method (str): Method of interpolation\n(set `None` for turn off the interpolation).\nfilename (str): Name required to save pulse image.\ninteractive (bool): When set true show the circuit in a new window\n(this depends on the matplotlib backend being used supporting this).\ndpi (int): Resolution of saved image.\nnop (int): Data points for interpolation.\nsize (tuple): Size of figure.\nReturns:\nmatplotlib.figure: A matplotlib figure object for the pulse envelope.\nRaises:\nImportError: when the output methods requieres non-installed libraries.\nQiskitError: when invalid interpolation method is specified.", "source": "codesearchnet"}
{"code": "def threat(self, name, **kwargs):\n    group_obj = Threat(name, **kwargs)\n    return self._group(group_obj)", "docstring": "Add Threat data to Batch object\n\nArgs:\nname (str): The name for this Group.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nxid (str, kwargs): The external id for this Group.\n\nReturns:\nobj: An instance of Threat.", "source": "codesearchnet"}
{"code": "def index(self, value, start=0, end=None):\n    try:\n        index = self._dict[value]\n    except KeyError:\n        raise ValueError\n    else:\n        start = self._fix_neg_index(start)\n        end = self._fix_end_index(end)\n        if ((start <= index) and (index < end)):\n            return index\n        else:\n            raise ValueError", "docstring": "Return the index of value between start and end.\n\nBy default, the entire setlist is searched.\n\nThis runs in O(1)\n\nArgs:\nvalue: The value to find the index of\nstart (int): The index to start searching at (defaults to 0)\nend (int): The index to stop searching at (defaults to the end of the list)\nReturns:\nint: The index of the value\nRaises:\nValueError: If the value is not in the list or outside of start - end\nIndexError: If start or end are out of range", "source": "codesearchnet"}
{"code": "def delete(self, entity):\n    \n    key = _normalize_key(entity)\n    if key is None:\n      return self.ndb_delete(entity)\n    self.deletes.append(key)", "docstring": "Registers entity to delete from datastore.\n\nArgs:\nentity: an entity, model instance, or key to delete.", "source": "juraj-google-style"}
{"code": "def put_path(self, url, path):\n        \n        cache_path = self._url_to_path(url)\n\n        \n        try:\n            dir = os.path.dirname(cache_path)\n            os.makedirs(dir)\n        except OSError as e:\n            if e.errno != errno.EEXIST:\n                raise Error('Failed to create cache directories for ' % cache_path)\n\n        \n        try:\n            os.unlink(cache_path)\n        except OSError:\n            pass\n\n        try:\n            \n            os.link(path, cache_path)\n        except OSError:\n            try:\n                \n                shutil.copyfile(path, cache_path)\n            except IOError:\n                raise Error('Failed to cache %s as %s for %s' % (path, cache_path, url))", "docstring": "Puts a resource already on disk into the disk cache.\n\nArgs:\nurl: The original url of the resource\npath: The resource already available on disk\n\nRaises:\nCacheError: If the file cannot be put in cache", "source": "juraj-google-style"}
{"code": "def capture(self, payment_id, amount, data={}, **kwargs):\n    url = '{}/{}/capture'.format(self.base_url, payment_id)\n    data['amount'] = amount\n    return self.post_url(url, data, **kwargs)", "docstring": "Capture Payment for given Id\n\nArgs:\npayment_id : Id for which payment object has to be retrieved\nAmount : Amount for which the payment has to be retrieved\n\nReturns:\nPayment dict after getting captured", "source": "codesearchnet"}
{"code": "def _get_updated_values(before_values, after_values):\n    \n    assert before_values.keys() == after_values.keys()\n    return dict([(k, [before_values[k], after_values[k]])\n                 for k in before_values.keys()\n                 if before_values[k] != after_values[k]])", "docstring": "Get updated values from 2 dicts of values\n\nArgs:\nbefore_values (dict): values before update\nafter_values (dict): values after update\n\nReturns:\ndict: a diff dict with key is field key, value is tuple of\n(before_value, after_value)", "source": "juraj-google-style"}
{"code": "def span(self):\n    other = VersionRange(None)\n    bound = _Bound(self.bounds[0].lower, self.bounds[(- 1)].upper)\n    other.bounds = [bound]\n    return other", "docstring": "Return a contiguous range that is a superset of this range.\n\nReturns:\nA VersionRange object representing the span of this range. For\nexample, the span of \"2+<4|6+<8\" would be \"2+<8\".", "source": "codesearchnet"}
{"code": "def getConParams(virtualhost):\n    \n    return pika.ConnectionParameters(\n        host=settings.RABBITMQ_HOST,\n        port=int(settings.RABBITMQ_PORT),\n        virtual_host=virtualhost,\n        credentials=pika.PlainCredentials(\n            settings.RABBITMQ_USER_NAME,\n            settings.RABBITMQ_USER_PASSWORD\n        )\n    )", "docstring": "Connection object builder.\n\nArgs:\nvirtualhost (str): selected virtualhost in rabbitmq\n\nReturns:\npika.ConnectionParameters: object filled by `constants` from\n:class:`edeposit.amqp.settings`.", "source": "juraj-google-style"}
{"code": "def generate_stack_policy_args(stack_policy=None):\n    args = {}\n    if stack_policy:\n        logger.debug('Stack has a stack policy')\n        if stack_policy.url:\n            raise NotImplementedError\n        else:\n            args['StackPolicyBody'] = stack_policy.body\n    return args", "docstring": "Converts a stack policy object into keyword args.\n\nArgs:\nstack_policy (:class:`stacker.providers.base.Template`): A template\nobject representing a stack policy.\n\nReturns:\ndict: A dictionary of keyword arguments to be used elsewhere.", "source": "codesearchnet"}
{"code": "def duplicate(script, layer_num=None):\n    \n    filter_xml = '  <filter name=\"Duplicate Current layer\"/>\\n'\n    if isinstance(script, mlx.FilterScript):\n        if (layer_num is None) or (layer_num == script.current_layer()):\n            util.write_filter(script, filter_xml)\n            script.add_layer('{}_copy'.format(script.layer_stack[script.current_layer()]), True)\n        else:\n            change(script, layer_num)\n            util.write_filter(script, filter_xml)\n            script.add_layer('{}_copy'.format(script.layer_stack[layer_num]), True)\n    else:\n        util.write_filter(script, filter_xml)\n    return None", "docstring": "Duplicate a layer.\n\nNew layer label is '*_copy'.\n\nArgs:\nscript: the mlx.FilterScript object or script filename to write\nthe filter to.\nlayer_num (int): layer number to duplicate. Default is the\ncurrent layer. Not supported on the file base API.\n\nLayer stack:\nCreates a new layer\nChanges current layer to the new layer\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "juraj-google-style"}
{"code": "def add_string_pairs_from_label_element(xib_file, results, label, special_ui_components_prefix):\n    \n    label_entry_comment = extract_element_internationalized_comment(label)\n    if label_entry_comment is None:\n        return\n\n    warn_if_element_not_of_class(label, 'Label', special_ui_components_prefix)\n\n    if label.hasAttribute('usesAttributedText') and label.attributes['usesAttributedText'].value == 'YES':\n        add_string_pairs_from_attributed_ui_element(results, label, label_entry_comment)\n    else:\n        try:\n            label_entry_key = label.attributes['text'].value\n        except KeyError:\n            try:\n                label_entry_key = label.getElementsByTagName('string')[0].firstChild.nodeValue\n            except Exception:\n                label_entry_key = 'N/A'\n                logging.warn(\"%s: Missing text entry in %s\", xib_file, label.toxml('UTF8'))\n        results.append((label_entry_key, label_entry_comment))", "docstring": "Adds string pairs from a label element.\n\nArgs:\nxib_file (str): Path to the xib file.\nresults (list): The list to add the results to.\nlabel (element): The label element from the xib, to extract the string pairs from.\nspecial_ui_components_prefix (str):\nIf not None, extraction will not warn about internationalized UI components with this class prefix.", "source": "juraj-google-style"}
{"code": "def attach(self, container, stdout=True, stderr=True, stream=False, logs=False, demux=False):\n    params = {'logs': ((logs and 1) or 0), 'stdout': ((stdout and 1) or 0), 'stderr': ((stderr and 1) or 0), 'stream': ((stream and 1) or 0)}\n    headers = {'Connection': 'Upgrade', 'Upgrade': 'tcp'}\n    u = self._url('/containers/{0}/attach', container)\n    response = self._post(u, headers=headers, params=params, stream=True)\n    output = self._read_from_socket(response, stream, self._check_is_tty(container), demux=demux)\n    if stream:\n        return CancellableStream(output, response)\n    else:\n        return output", "docstring": "Attach to a container.\n\nThe ``.logs()`` function is a wrapper around this method, which you can\nuse instead if you want to fetch/stream container output without first\nretrieving the entire backlog.\n\nArgs:\ncontainer (str): The container to attach to.\nstdout (bool): Include stdout.\nstderr (bool): Include stderr.\nstream (bool): Return container output progressively as an iterator\nof strings, rather than a single string.\nlogs (bool): Include the container's previous output.\ndemux (bool): Keep stdout and stderr separate.\n\nReturns:\nBy default, the container's output as a single string (two if\n``demux=True``: one for stdout and one for stderr).\n\nIf ``stream=True``, an iterator of output strings. If\n``demux=True``, two iterators are returned: one for stdout and one\nfor stderr.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def _resize_image(image, height, width):\n    return tf.image.resize_images(image, [height, width], method=tf.image.ResizeMethod.BILINEAR, align_corners=False)", "docstring": "Simple wrapper around tf.resize_images.\n\nThis is primarily to make sure we use the same `ResizeMethod` and other\ndetails each time.\n\nArgs:\nimage: A 3-D image `Tensor`.\nheight: The target height for the resized image.\nwidth: The target width for the resized image.\n\nReturns:\nresized_image: A 3-D tensor containing the resized image. The first two\ndimensions have the shape [height, width].", "source": "codesearchnet"}
{"code": "def update_(self, conf_dict, conf_arg=True):\n    for (section, secdict) in conf_dict.items():\n        self[section].update_(secdict, conf_arg)", "docstring": "Update values of configuration options with dict.\n\nArgs:\nconf_dict (dict): dict of dict indexed with section and option\nnames.\nconf_arg (bool): if True, only options that can be set in a config\nfile are updated.", "source": "codesearchnet"}
{"code": "def _parse_meta_info(self, line):\n        \n        if self.mslevel:\n            self.meta_info['ms_level'] = self.mslevel\n\n        if self.polarity:\n            self.meta_info['polarity'] = self.polarity\n\n        for k, regexes in six.iteritems(self.meta_regex):\n            for reg in regexes:\n\n                m = re.search(reg, line, re.IGNORECASE)\n\n                if m:\n                    self.meta_info[k] = m.group(1).strip()", "docstring": "Parse and extract all meta data by looping through the dictionary of meta_info regexs\n\nupdates self.meta_info\n\nArgs:\nline (str): line of the msp file", "source": "juraj-google-style"}
{"code": "def sort(expr, field=None, keytype=None, ascending=True):\n    weld_obj = WeldObject(encoder_, decoder_)\n    expr_var = weld_obj.update(expr)\n    if isinstance(expr, WeldObject):\n        expr_var = expr.obj_id\n        weld_obj.dependencies[expr_var] = expr\n    if (field is not None):\n        key_str = ('x.$%s' % field)\n    else:\n        key_str = 'x'\n    if (not ascending):\n        key_str = (key_str + ('* %s(-1)' % keytype))\n    weld_template = '\\n    sort(%(expr)s, |x| %(key)s)\\n    '\n    weld_obj.weld_code = (weld_template % {'expr': expr_var, 'key': key_str})\n    return weld_obj", "docstring": "Sorts the vector.\nIf the field parameter is provided then the sort\noperators on a vector of structs where the sort key\nis the field of the struct.\n\nArgs:\nexpr (WeldObject)\nfield (Int)", "source": "codesearchnet"}
{"code": "def as_json(self,\n                entity_url,\n                context=None):\n        \n        try:\n            urllib.request.urlopen(entity_url)\n        except urllib.error.HTTPError:\n            raise ValueError(\"Cannot open {}\".format(entity_url))\n        entity_graph = self.read(entity_url)\n        entity_json = json.loads(\n            entity_graph.serialize(\n                format='json-ld',\n                context=context).decode())\n        return json.dumps(entity_json)", "docstring": "Method takes a entity uri and attempts to return the Fedora Object\nas a JSON-LD.\n\nArgs:\nentity_url(str): Fedora Commons URL of Entity\ncontext(None): Returns JSON-LD with Context, default is None\n\nReturns:\nstr: JSON-LD of Fedora Object", "source": "juraj-google-style"}
{"code": "def docx_text_from_xml_node(node: ElementTree.Element, level: int, config: TextProcessingConfig) -> str:\n    text = ''\n    if (node.tag == DOCX_TEXT):\n        text += (node.text or '')\n    elif (node.tag == DOCX_TAB):\n        text += '\\t'\n    elif (node.tag in DOCX_NEWLINES):\n        text += '\\n'\n    elif (node.tag == DOCX_NEWPARA):\n        text += '\\n\\n'\n    if (node.tag == DOCX_TABLE):\n        text += ('\\n\\n' + docx_table_from_xml_node(node, level, config))\n    else:\n        for child in node:\n            text += docx_text_from_xml_node(child, (level + 1), config)\n    return text", "docstring": "Returns text from an XML node within a DOCX file.\n\nArgs:\nnode: an XML node\nlevel: current level in XML hierarchy (used for recursion; start level\nis 0)\nconfig: :class:`TextProcessingConfig` control object\n\nReturns:\ncontents as a string", "source": "codesearchnet"}
{"code": "def sibling(self, name: InstanceName) -> \"ObjectMember\":\n        \n        ssn = self.parinst._member_schema_node(name)\n        try:\n            sibs = self.siblings.copy()\n            newval = sibs.pop(name)\n            sibs[self.name] = self.value\n            return ObjectMember(name, sibs, newval, self.parinst,\n                                ssn, self.timestamp)\n        except KeyError:\n            raise NonexistentInstance(self.json_pointer(),\n                                      f\"member '{name}'\") from None", "docstring": "Return an instance node corresponding to a sibling member.\n\nArgs:\nname: Instance name of the sibling member.\n\nRaises:\nNonexistentSchemaNode: If member `name` is not permitted by the\nschema.\nNonexistentInstance: If sibling member `name` doesn't exist.", "source": "juraj-google-style"}
{"code": "def nsx_controller_name(self, **kwargs):\n        \n        name = kwargs.pop('name')\n        name_args = dict(name=name)\n        method_name = 'nsx_controller_name'\n        method_class = self._brocade_tunnels\n        nsxcontroller_attr = getattr(method_class, method_name)\n        config = nsxcontroller_attr(**name_args)\n\n        if kwargs.pop('get', False):\n            output = self._callback(config, handler='get_config')\n        else:\n            output = self._callback(config)\n        return output", "docstring": "Get/Set nsx controller name\n\nArgs:\nname: (str) :   Name of the nsx controller\nget (bool) : Get nsx controller config(True,False)\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def bytes_to_readable_str(num_bytes, include_b=False):\n    if num_bytes is None:\n        return str(num_bytes)\n    if num_bytes < 1024:\n        result = '%d' % num_bytes\n    elif num_bytes < 1048576:\n        result = '%.2fk' % (num_bytes / 1024.0)\n    elif num_bytes < 1073741824:\n        result = '%.2fM' % (num_bytes / 1048576.0)\n    else:\n        result = '%.2fG' % (num_bytes / 1073741824.0)\n    if include_b:\n        result += 'B'\n    return result", "docstring": "Generate a human-readable string representing number of bytes.\n\nThe units B, kB, MB and GB are used.\n\nArgs:\nnum_bytes: (`int` or None) Number of bytes.\ninclude_b: (`bool`) Include the letter B at the end of the unit.\n\nReturns:\n(`str`) A string representing the number of bytes in a human-readable way,\nincluding a unit at the end.", "source": "github-repos"}
{"code": "def _pack_with_tf_ops(dataset, keys, length):\n  \n  empty_example = {}\n  for k in keys:\n    empty_example[k] = tf.zeros([0], dtype=tf.int32)\n    empty_example[k + \"_position\"] = tf.zeros([0], dtype=tf.int32)\n  keys_etc = empty_example.keys()\n\n  def write_packed_example(partial, outputs):\n    new_partial = empty_example.copy()\n    new_outputs = {}\n    for k in keys_etc:\n      new_outputs[k] = outputs[k].write(\n          outputs[k].size(),\n          tf.pad(partial[k], [[0, length - tf.size(partial[k])]]))\n    return new_partial, new_outputs\n\n  def map_fn(x):\n    \n    partial = empty_example.copy()\n    i = tf.zeros([], dtype=tf.int32)\n    dynamic_batch_size = tf.shape(x[keys[0]])[0]\n    outputs = {}\n    for k in keys:\n      outputs[k] = tf.TensorArray(\n          tf.int32, size=0, dynamic_size=True, element_shape=[length])\n      outputs[k + \"_position\"] = tf.TensorArray(\n          tf.int32, size=0, dynamic_size=True, element_shape=[length])\n    def cond_fn(i, partial, outputs):\n      del partial, outputs\n      return i < dynamic_batch_size\n    def body_fn(i, partial, outputs):\n      \n      can_append = True\n      one_example = {}\n      for k in keys:\n        val = tf.cast(x[k][i], tf.int32)\n        val = val[:tf.reduce_sum(tf.cast(tf.not_equal(val, 0), tf.int32))]\n        one_example[k] = val\n      for k in keys:\n        can_append = tf.logical_and(\n            can_append,\n            tf.less_equal(\n                tf.size(partial[k]) + tf.size(one_example[k]), length))\n      def false_fn():\n        return write_packed_example(partial, outputs)\n      def true_fn():\n        return partial, outputs\n      partial, outputs = tf.cond(can_append, true_fn, false_fn)\n      new_partial = {}\n      for k in keys:\n        new_seq = one_example[k][:length]\n        new_seq_len = tf.size(new_seq)\n        new_partial[k] = tf.concat([partial[k], new_seq], 0)\n        new_partial[k + \"_position\"] = tf.concat(\n            [partial[k + \"_position\"],\n             tf.range(new_seq_len, dtype=tf.int32)], 0)\n      partial = new_partial\n      return i+1, partial, outputs\n\n    i, partial, outputs = tf.while_loop(\n        cond_fn, body_fn, (i, partial, outputs),\n        back_prop=False,\n        shape_invariants=(\n            tf.TensorShape([]),\n            {k: tf.TensorShape([None]) for k in keys_etc},\n            {k: tf.TensorShape(None) for k in keys_etc},\n            ))\n    partial, outputs = write_packed_example(partial, outputs)\n    packed = {k: outputs[k].stack() for k in keys_etc}\n    for k in keys:\n      packed[k + \"_segmentation\"] = (\n          tf.cumsum(\n              tf.cast(tf.equal(packed[k + \"_position\"], 0), tf.int32), axis=1) *\n          tf.cast(tf.not_equal(packed[k], 0), tf.int32))\n    return packed\n  dataset = dataset.map(map_fn,\n                        num_parallel_calls=tf.data.experimental.AUTOTUNE)\n  return dataset.flat_map(tf.data.Dataset.from_tensor_slices)", "docstring": "Helper-function for packing a dataset which has already been batched.\n\nSee pack_dataset()\n\nUses tf.while_loop.  Slow.\n\nArgs:\ndataset: a dataset containing padded batches of examples.\nkeys: a list of strings\nlength: an integer\n\nReturns:\na dataset.", "source": "juraj-google-style"}
{"code": "def shortcut_string_merge(self, node_def):\n    device = node_def.device or ''\n    merge_key = (self._spec, device)\n    result = _string_merge_cache.get(merge_key)\n    if result is None:\n        result = self.__call__(node_def).to_string()\n        _string_merge_cache[merge_key] = result\n    return result", "docstring": "Merge a node def without materializing a full DeviceSpec object.\n\nOften a device merge is invoked in order to generate a string which can be\npassed into the c api. In such a case, we can cache the\nnode_def.device  ->  merge_result_string\n\nmap, and in most cases avoid:\n- Materializing a copy of self._spec (In the case of DeviceSpecV1)\n- Materializing a DeviceSpec for node_def.device\n- A DeviceSpec.merge_from invocation\n\nIn practice the cache hit rate for this function is very high, because the\nnumber of invocations when iterating through the device stack is much\nlarger than the number of devices.\n\nArgs:\nnode_def: An Operation (or Operation-like) to merge device constraints\nwith self._spec\n\nReturns:\nA string containing the merged device specification.", "source": "github-repos"}
{"code": "def _selection(candidate):\n    sample_index1 = np.random.choice(len(candidate))\n    sample_index2 = np.random.choice(len(candidate))\n    sample_1 = candidate[sample_index1]\n    sample_2 = candidate[sample_index2]\n    select_index = np.random.choice(len(sample_1))\n    logger.info((LOGGING_PREFIX + 'Perform selection from %sth to %sth at index=%s'), sample_index2, sample_index1, select_index)\n    next_gen = []\n    for i in range(len(sample_1)):\n        if (i is select_index):\n            next_gen.append(sample_2[i])\n        else:\n            next_gen.append(sample_1[i])\n    return next_gen", "docstring": "Perform selection action to candidates.\n\nFor example, new gene = sample_1 + the 5th bit of sample2.\n\nArgs:\ncandidate: List of candidate genes (encodings).\n\nExamples:\n>>> # Genes that represent 3 parameters\n>>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]])\n>>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]])\n>>> new_gene = _selection([gene1, gene2])\n>>> # new_gene could be gene1 overwritten with the\n>>> # 2nd parameter of gene2\n>>> # in which case:\n>>> #   new_gene[0] = gene1[0]\n>>> #   new_gene[1] = gene2[1]\n>>> #   new_gene[2] = gene1[0]\n\nReturns:\nNew gene (encoding)", "source": "codesearchnet"}
{"code": "def stop_worker(config, *, worker_ids=None):\n    if ((worker_ids is not None) and (not isinstance(worker_ids, list))):\n        worker_ids = [worker_ids]\n    celery_app = create_app(config)\n    celery_app.control.shutdown(destination=worker_ids)", "docstring": "Stop a worker process.\n\nArgs:\nconfig (Config): Reference to the configuration object from which the\nsettings for the worker are retrieved.\nworker_ids (list): An optional list of ids for the worker that should be stopped.", "source": "codesearchnet"}
{"code": "def _SerializeRequest(self, request):\n    parsed = urllib_parse.urlsplit(request.url)\n    request_line = urllib_parse.urlunsplit(('', '', parsed.path, parsed.query, ''))\n    if (not isinstance(request_line, six.text_type)):\n        request_line = request_line.decode('utf-8')\n    status_line = u' '.join((request.http_method, request_line, u'HTTP/1.1\\n'))\n    (major, minor) = request.headers.get('content-type', 'application/json').split('/')\n    msg = mime_nonmultipart.MIMENonMultipart(major, minor)\n    for (key, value) in request.headers.items():\n        if (key == 'content-type'):\n            continue\n        msg[key] = value\n    msg['Host'] = parsed.netloc\n    msg.set_unixfrom(None)\n    if (request.body is not None):\n        msg.set_payload(request.body)\n    str_io = six.StringIO()\n    gen = generator.Generator(str_io, maxheaderlen=0)\n    gen.flatten(msg, unixfrom=False)\n    body = str_io.getvalue()\n    return (status_line + body)", "docstring": "Convert a http_wrapper.Request object into a string.\n\nArgs:\nrequest: A http_wrapper.Request to serialize.\n\nReturns:\nThe request as a string in application/http format.", "source": "codesearchnet"}
{"code": "def ts_to_str(jwt_dict):\n    \n    d = ts_to_dt(jwt_dict)\n    for k, v in list(d.items()):\n        if isinstance(v, datetime.datetime):\n            d[k] = v.isoformat().replace('T', ' ')\n    return d", "docstring": "Convert timestamps in JWT to human readable dates.\n\nArgs:\njwt_dict: dict\nJWT with some keys containing timestamps.\n\nReturns:\ndict: Copy of input dict where timestamps have been replaced with human readable\ndates.", "source": "juraj-google-style"}
{"code": "def _handle_port_request(self, client_data, writer):\n    try:\n        pid = int(client_data)\n    except ValueError as error:\n        self._client_request_errors += 1\n        log.warning('Could not parse request: %s', error)\n        return\n    log.info('Request on behalf of pid %d.', pid)\n    log.info('cmdline: %s', _get_process_command_line(pid))\n    if (not _should_allocate_port(pid)):\n        self._denied_allocations += 1\n        return\n    port = self._port_pool.get_port_for_process(pid)\n    if (port > 0):\n        self._total_allocations += 1\n        writer.write('{:d}\\n'.format(port).encode('utf-8'))\n        log.debug('Allocated port %d to pid %d', port, pid)\n    else:\n        self._denied_allocations += 1", "docstring": "Given a port request body, parse it and respond appropriately.\n\nArgs:\nclient_data: The request bytes from the client.\nwriter: The asyncio Writer for the response to be written to.", "source": "codesearchnet"}
{"code": "def GetObject(self, identifier):\n    cache_value = self._values.get(identifier, None)\n    if (not cache_value):\n        return None\n    return cache_value.vfs_object", "docstring": "Retrieves a cached object based on the identifier.\n\nThis method ignores the cache value reference count.\n\nArgs:\nidentifier (str): VFS object identifier.\n\nReturns:\nobject: cached VFS object or None if not cached.", "source": "codesearchnet"}
{"code": "def dropout(x, level, noise_shape=None, seed=None):\n    if seed is None:\n        seed = np.random.randint(10000000.0)\n    return nn.dropout_v2(x, rate=level, noise_shape=noise_shape, seed=seed)", "docstring": "Sets entries in `x` to zero at random, while scaling the entire tensor.\n\nArgs:\nx: tensor\nlevel: fraction of the entries in the tensor\nthat will be set to 0.\nnoise_shape: shape for randomly generated keep/drop flags,\nmust be broadcastable to the shape of `x`\nseed: random seed to ensure determinism.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def _transform_indices(self, key):\n    ndims = self.ndims\n    if all(((not (isinstance(el, slice) or callable(el))) for el in key)):\n        dim_inds = []\n        for dim in self.kdims:\n            dim_type = self.get_dimension_type(dim)\n            if (isinstance(dim_type, type) and issubclass(dim_type, Number)):\n                dim_inds.append(self.get_dimension_index(dim))\n        str_keys = iter((key[i] for i in range(self.ndims) if (i not in dim_inds)))\n        num_keys = []\n        if len(dim_inds):\n            keys = list({tuple(((k[i] if (ndims > 1) else k) for i in dim_inds)) for k in self.keys()})\n            q = np.array([tuple(((key[i] if (ndims > 1) else key) for i in dim_inds))])\n            idx = np.argmin([(np.inner((q - np.array(x)), (q - np.array(x))) if (len(dim_inds) == 2) else np.abs((q - x))) for x in keys])\n            num_keys = iter(keys[idx])\n        key = tuple(((next(num_keys) if (i in dim_inds) else next(str_keys)) for i in range(self.ndims)))\n    elif any(((not (isinstance(el, slice) or callable(el))) for el in key)):\n        keys = self.keys()\n        for (i, k) in enumerate(key):\n            if isinstance(k, slice):\n                continue\n            dim_keys = np.array([ke[i] for ke in keys])\n            if (dim_keys.dtype.kind in 'OSU'):\n                continue\n            snapped_val = dim_keys[np.argmin(np.abs((dim_keys - k)))]\n            key = list(key)\n            key[i] = snapped_val\n        key = tuple(key)\n    return key", "docstring": "Snaps indices into the GridSpace to the closest coordinate.\n\nArgs:\nkey: Tuple index into the GridSpace\n\nReturns:\nTransformed key snapped to closest numeric coordinates", "source": "codesearchnet"}
{"code": "def update_state(self, state_arr, action_arr):\n    (x, y) = np.where((action_arr[(- 1)] == 1))\n    self.__agent_pos = (x[0], y[0])\n    self.__route_memory_list.append((x[0], y[0]))\n    self.__route_long_memory_list.append((x[0], y[0]))\n    self.__route_long_memory_list = list(set(self.__route_long_memory_list))\n    while (len(self.__route_memory_list) > self.__memory_num):\n        self.__route_memory_list = self.__route_memory_list[1:]\n    return self.extract_now_state()", "docstring": "Update state.\n\nOverride.\n\nArgs:\nstate_arr:    `np.ndarray` of state in `self.t`.\naction_arr:   `np.ndarray` of action in `self.t`.\n\nReturns:\n`np.ndarray` of state in `self.t+1`.", "source": "codesearchnet"}
{"code": "def add_network(self, network, netmask, area=0):\n    if ((network == '') or (netmask == '')):\n        raise ValueError('network and mask values may not be empty')\n    cmd = 'network {}/{} area {}'.format(network, netmask, area)\n    return self.configure_ospf(cmd)", "docstring": "Adds a network to be advertised by OSPF\n\nArgs:\nnetwork (str):  The network to be advertised in dotted decimal\nnotation\nnetmask (str):  The netmask to configure\narea (str):  The area the network belongs to.\nBy default this value is 0\nReturns:\nbool: True if the command completes successfully\nException:\nValueError: This will get raised if network or netmask\nare not passed to the method", "source": "codesearchnet"}
{"code": "def maybe_download_image_dataset(image_ids, target_dir):\n    tf.gfile.MakeDirs(target_dir)\n    num_images = len(image_ids)\n    for (i, image_id) in enumerate(image_ids):\n        destination = os.path.join(target_dir, ('%s.jpg' % i))\n        tmp_destination = ('%s.temp' % destination)\n        source_url = ('http:\n        if tf.gfile.Exists(destination):\n            tf.logging.info(('Image with ID already present, skipping download (%s of %s).' % ((i + 1), num_images)))\n            continue\n        tf.logging.info(('Downloading image with id %s (%s of %s)' % (image_id, (i + 1), num_images)))\n        response = requests.get(source_url, stream=True)\n        response.raise_for_status()\n        with tf.gfile.Open(tmp_destination, 'w') as f:\n            for block in response.iter_content(1024):\n                f.write(block)\n        tf.gfile.Rename(tmp_destination, destination)", "docstring": "Download a set of images from api.brain-map.org to `target_dir`.\n\nArgs:\nimage_ids: list, a list of image ids.\ntarget_dir: str, a directory to which to download the images.", "source": "codesearchnet"}
{"code": "def protected_branches():\n    master = conf.get('git.master_branch', 'master')\n    develop = conf.get('git.devel_branch', 'develop')\n    return conf.get('git.protected_branches', (master, develop))", "docstring": "Return branches protected by deletion.\n\nBy default those are master and devel branches as configured in pelconf.\n\nReturns:\nlist[str]: Names of important branches that should not be deleted.", "source": "codesearchnet"}
{"code": "def _retry_on_appropriate_openai_error(exception):\n    return isinstance(exception, (RateLimitError, APIError))", "docstring": "Retry filter that returns True for rate limit (429) or server (5xx) errors.\n\nArgs:\nexception: the returned exception encountered during the request/response\nloop.\n\nReturns:\nboolean indication whether or not the exception is a Server Error (5xx) or\na RateLimitError (429) error.", "source": "github-repos"}
{"code": "def _indicator(self, indicator_data):\n        \n        if isinstance(indicator_data, dict):\n            \n            xid = indicator_data.get('xid')\n        else:\n            \n            xid = indicator_data.xid\n\n        if self.indicators.get(xid) is not None:\n            \n            indicator_data = self.indicators.get(xid)\n        elif self.indicators_shelf.get(xid) is not None:\n            \n            indicator_data = self.indicators_shelf.get(xid)\n        else:\n            \n            self.indicators[xid] = indicator_data\n        return indicator_data", "docstring": "Return previously stored indicator or new indicator.\n\nArgs:\nindicator_data (dict|obj): An Indicator dict or instance of Indicator object.\n\nReturns:\ndict|obj: The new Indicator dict/object or the previously stored dict/object.", "source": "juraj-google-style"}
{"code": "def __init__(self, destination, transport):\n    \n    self._destination = destination\n    self._transport = transport", "docstring": "Create a new ADB stream.\n\nArgs:\ndestination: String identifier for the destination of this stream.\ntransport: AdbStreamTransport to use for reads/writes.", "source": "juraj-google-style"}
{"code": "def list_matching(self, ref_name: str, filter_: str) \\\n            -> Iterable[ListEntry]:\n        \n        canonical, canonical_i = self._get_pattern(ref_name + filter_)\n        for entry in self.list():\n            if entry.name == 'INBOX':\n                if canonical_i.match('INBOX'):\n                    yield entry\n            elif canonical.match(entry.name):\n                yield entry", "docstring": "Return all the entries in the list tree that match the given query.\n\nArgs:\nref_name: Mailbox reference name.\nfilter_: Mailbox name with possible wildcards.", "source": "juraj-google-style"}
{"code": "def timestr2time(time_str):\n    \n    if any(c not in '0123456789:' for c in time_str):\n        raise ValueError('Illegal character in time string')\n    if time_str.count(':') == 2:\n        h, m, s = time_str.split(':')\n    elif time_str.count(':') == 1:\n        h, m = time_str.split(':')\n        s = '00'\n    elif len(time_str) == 6:\n        h = time_str[:2]\n        m = time_str[2:4]\n        s = time_str[4:]\n    else:\n        raise ValueError('Time format not recognised. {}'.format(\n                VALID_TIME_FORMATS_TEXT))\n    if len(m) == 2 and len(s) == 2:\n        mins = int(m)\n        sec = int(s)\n    else:\n        raise ValueError('m and s must be 2 digits')\n    try:\n        return datetime.time(int(h), mins, sec)\n    except ValueError:\n        raise ValueError('Invalid time {}. {}'.format(time_str, \n                VALID_TIME_FORMATS_TEXT))", "docstring": "Turns a string into a datetime.time object. This will only work if the\nformat can be \"guessed\", so the string must have one of the formats from\nVALID_TIME_FORMATS_TEXT.\n\nArgs:\ntime_str (str) a string that represents a date\nReturns:\ndatetime.time object\nRaises:\nValueError if the input string does not have a valid format.", "source": "juraj-google-style"}
{"code": "def select(self, selector):\n    if self._is_single_string_selector(selector, 'name'):\n        return self._all_models_by_name.get_all(selector['name'])\n    else:\n        return find(self._all_models.values(), selector)", "docstring": "Query this document for objects that match the given selector.\n\nArgs:\nselector (JSON-like query dictionary) : you can query by type or by\nname, e.g. ``{\"type\": HoverTool}``, ``{\"name\": \"mycircle\"}``\n\nReturns:\nseq[Model]", "source": "codesearchnet"}
{"code": "def parse_author(cls, marc):\n        \n        name = None\n        code = None\n        linked_forms = None\n        is_corporation = None\n        record = None\n\n        \n        if marc[\"100a\"]:  \n            name = _first_or_none(marc[\"100a\"])\n            code = _first_or_none(marc[\"1007\"])\n            is_corporation = False\n            record = marc.datafields[\"100\"][0]  \n        elif marc[\"110a\"]:  \n            name = _first_or_none(marc[\"110a\"])\n            code = _first_or_none(marc[\"1107\"])\n            linked_forms = marc[\"410a2 \"]\n            is_corporation = True\n            record = marc.datafields[\"110\"][0]  \n        else:\n            return None\n\n        \n        linked_forms = marc[\"410a2 \"]\n\n        \n        type_descriptor = [\"osoba\", \"organizace\"]\n        alt_name = \"%s [%s]\" % (name, type_descriptor[is_corporation])\n        if linked_forms:\n            alt_name += \" (\" + \", \".join(linked_forms) + \")\"\n\n        return cls(\n            name=name,\n            code=code,\n            linked_forms=linked_forms,\n            is_corporation=is_corporation,\n            record=record,\n            alt_name=alt_name,\n        )", "docstring": "Parse author from `marc` data.\n\nArgs:\nmarc (obj): :class:`.MARCXMLRecord` instance. See module\n:mod:`.marcxml_parser` for details.\n\nReturns:\nobj: :class:`Author`.", "source": "juraj-google-style"}
{"code": "def get_experiment_kind(root):\n    \n    properties = {}\n    if root.find('experimentType').text == 'Ignition delay measurement':\n        properties['experiment-type'] = 'ignition delay'\n    else:\n        raise NotImplementedError(root.find('experimentType').text + ' not (yet) supported')\n\n    properties['apparatus'] = {'kind': '', 'institution': '', 'facility': ''}\n    kind = getattr(root.find('apparatus/kind'), 'text', False)\n    \n    if not kind:\n        raise MissingElementError('apparatus/kind')\n    elif kind in ['shock tube', 'rapid compression machine']:\n        properties['apparatus']['kind'] = kind\n    else:\n        raise NotImplementedError(kind + ' experiment not (yet) supported')\n\n    return properties", "docstring": "Read common properties from root of ReSpecTh XML file.\n\nArgs:\nroot (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file\n\nReturns:\nproperties (`dict`): Dictionary with experiment type and apparatus information.", "source": "juraj-google-style"}
{"code": "def tensor_list(elements, element_dtype=None, element_shape=None, use_tensor_array=False):\n    _validate_list_constructor(elements, element_dtype, element_shape)\n    if use_tensor_array:\n        return data_structures.tf_tensor_array_new(elements, element_dtype, element_shape)\n    else:\n        return data_structures.tf_tensor_list_new(elements, element_dtype, element_shape)", "docstring": "Creates an tensor list and populates it with the given elements.\n\nThis function provides a more uniform access to tensor lists and tensor\narrays, and allows optional initialization.\n\nNote: this function is a simplified wrapper. If you need greater control,\nit is recommended to use the underlying implementation directly.\n\nArgs:\nelements: Iterable[tf.Tensor, ...], the elements to initially fill the list\nwith\nelement_dtype: Optional[tf.DType], data type for the elements in the list;\nrequired if the list is empty\nelement_shape: Optional[tf.TensorShape], shape for the elements in the list;\nrequired if the list is empty\nuse_tensor_array: bool, whether to use the more compatible but restrictive\ntf.TensorArray implementation\nReturns:\nUnion[tf.Tensor, tf.TensorArray], the new list.\nRaises:\nValueError: for invalid arguments", "source": "github-repos"}
{"code": "def is_link(url, processed, files):\n    \n    if url not in processed:\n        is_file = url.endswith(BAD_TYPES)\n        if is_file:\n            files.add(url)\n            return False\n        return True\n    return False", "docstring": "Determine whether or not a link should be crawled\nA url should not be crawled if it\n- Is a file\n- Has already been crawled\n\nArgs:\nurl: str Url to be processed\nprocessed: list[str] List of urls that have already been crawled\n\nReturns:\nbool If `url` should be crawled", "source": "juraj-google-style"}
{"code": "def find_in_matrix_2d(val, matrix):\n    \n\n    dim = len(matrix[0])\n    item_index = 0\n\n    for row in matrix:\n        for i in row:\n            if i == val:\n                break\n            item_index += 1\n        if i == val:\n            break\n\n    loc = (int(item_index / dim), item_index % dim)\n\n    return loc", "docstring": "Returns a tuple representing the index of an item in a 2D matrix.\n\nArguments:\n- val (str) Value to look for\n- matrix (list) 2D matrix to search for val in\n\nReturns:\n- (tuple) Ordered pair representing location of val", "source": "juraj-google-style"}
{"code": "def _wait_for_glob(self, pattern, timeout_secs, for_checkpoint=True):\n    end_time = time.time() + timeout_secs\n    while time.time() < end_time:\n        if for_checkpoint:\n            if checkpoint_management.checkpoint_exists(pattern):\n                return\n        elif len(gfile.Glob(pattern)) >= 1:\n            return\n        time.sleep(0.05)\n    self.assertFalse(True, 'Glob never matched any file: %s' % pattern)", "docstring": "Wait for a checkpoint file to appear.\n\nArgs:\npattern: A string.\ntimeout_secs: How long to wait for in seconds.\nfor_checkpoint: whether we're globbing for checkpoints.", "source": "github-repos"}
{"code": "def dates_in_range(start_date, end_date):\n    return [(start_date + timedelta(n)) for n in range(int((end_date - start_date).days))]", "docstring": "Returns all dates between two dates.\n\nInclusive of the start date but not the end date.\n\nArgs:\nstart_date (datetime.date)\nend_date (datetime.date)\n\nReturns:\n(list) of datetime.date objects", "source": "codesearchnet"}
{"code": "def Issue(self, state, results):\n    result = CheckResult()\n    if (results and all((isinstance(r, CheckResult) for r in results))):\n        result.ExtendAnomalies(results)\n    else:\n        result.anomaly = [rdf_anomaly.Anomaly(type=anomaly_pb2.Anomaly.AnomalyType.Name(anomaly_pb2.Anomaly.ANALYSIS_ANOMALY), symptom=self.hint.Problem(state), finding=self.hint.Render(results), explanation=self.hint.Fix())]\n    return result", "docstring": "Collect anomalous findings into a CheckResult.\n\nComparisons with anomalous conditions collect anomalies into a single\nCheckResult message. The contents of the result varies depending on whether\nthe method making the comparison is a Check, Method or Probe.\n- Probes evaluate raw host data and generate Anomalies. These are condensed\ninto a new CheckResult.\n- Checks and Methods evaluate the results of probes (i.e. CheckResults). If\nthere are multiple probe results, all probe anomalies are aggregated into\na single new CheckResult for the Check or Method.\n\nArgs:\nstate: A text description of what combination of results were anomalous\n(e.g. some condition was missing or present.)\nresults: Anomalies or CheckResult messages.\n\nReturns:\nA CheckResult message.", "source": "codesearchnet"}
{"code": "def stacked_cnn(units: tf.Tensor, n_hidden_list: List, filter_width=3, use_batch_norm=False, use_dilation=False, training_ph=None, add_l2_losses=False):\n    l2_reg = (tf.nn.l2_loss if add_l2_losses else None)\n    for (n_layer, n_hidden) in enumerate(n_hidden_list):\n        if use_dilation:\n            dilation_rate = (2 ** n_layer)\n        else:\n            dilation_rate = 1\n        units = tf.layers.conv1d(units, n_hidden, filter_width, padding='same', dilation_rate=dilation_rate, kernel_initializer=INITIALIZER(), kernel_regularizer=l2_reg)\n        if use_batch_norm:\n            assert (training_ph is not None)\n            units = tf.layers.batch_normalization(units, training=training_ph)\n        units = tf.nn.relu(units)\n    return units", "docstring": "Number of convolutional layers stacked on top of each other\n\nArgs:\nunits: a tensorflow tensor with dimensionality [None, n_tokens, n_features]\nn_hidden_list: list with number of hidden units at the ouput of each layer\nfilter_width: width of the kernel in tokens\nuse_batch_norm: whether to use batch normalization between layers\nuse_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ...\ntraining_ph: boolean placeholder determining whether is training phase now or not.\nIt is used only for batch normalization to determine whether to use\ncurrent batch average (std) or memory stored average (std)\nadd_l2_losses: whether to add l2 losses on network kernels to\ntf.GraphKeys.REGULARIZATION_LOSSES or not\n\nReturns:\nunits: tensor at the output of the last convolutional layer", "source": "codesearchnet"}
{"code": "def LSTMLayer(cell_name, weights, m, c, x_seq, pad_seq):\n    if len(x_seq) != len(pad_seq):\n        raise ValueError('length of x_seq(%d) != pad_seq(%d)' % (len(x_seq), len(pad_seq)))\n    out_seq = []\n    for seq in range(len(x_seq)):\n        with ops.name_scope('%s_%d' % (cell_name, seq)):\n            m, c = LSTMCell(weights, m, c, x_seq[seq], pad_seq[seq])\n            out_seq.append(array_ops.identity(m, name='out'))\n    return out_seq", "docstring": "Unrolls a layer of LSTM cells forward by the sequence length.\n\nThe sequence length is determined by the length of x_seq and pad_seq, which\nmust be the same.\n\nArgs:\ncell_name: Base name of each cell.\nweights: Weight matrix with shape LSTMCellWeightsShape.\nm: Initial m states with shape [batch_size, num_nodes].\nc: Initial c states with shape [batch_size, num_nodes].\nx_seq: List of inputs, each with shape [batch_size, num_inputs].\nThe length of the list is the sequence length.\npad_seq: List of paddings, each with shape [batch_size, 1].\nThe length of the list is the sequence length.\nEach padding value is either 0 or 1, where 1 indicates padding;\ni.e. the input is shorter than the sequence length.\nReturns:\nList of per-sequence-step outputs, each with shape [batch_size, num_nodes].\nRaises:\nValueError: If len(x_seq) != len(pad_seq).", "source": "github-repos"}
{"code": "def load(cls, path):\n    with open(path, 'r') as in_file:\n        metadata = json.load(in_file)\n    return cls.from_dict(metadata)", "docstring": "Create a new MLPipeline from a JSON specification.\n\nThe JSON file format is the same as the one created by the `to_dict` method.\n\nArgs:\npath (str): Path of the JSON file to load.\n\nReturns:\nMLPipeline:\nA new MLPipeline instance with the specification found\nin the JSON file.", "source": "codesearchnet"}
{"code": "def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    tstream = BytearrayStream()\n    self.revocation_code.write(tstream, kmip_version=kmip_version)\n    if (self.revocation_message is not None):\n        self.revocation_message.write(tstream, kmip_version=kmip_version)\n    self.length = tstream.length()\n    super(RevocationReason, self).write(ostream, kmip_version=kmip_version)\n    ostream.write(tstream.buffer)", "docstring": "Write the data encoding the RevocationReason object to a stream.\n\nArgs:\nostream (Stream): A data stream in which to encode object data,\nsupporting a write method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def primitive_wrapper_from_primitive(self, primitive_message: message.Message) -> _primitive_wrappers.PrimitiveWrapper:", "docstring": "Wraps the FHIR protobuf primitive_message to handle parsing/printing.\n\nThe wrapped FHIR protobuf primitive provides necessary state for printing to\nthe FHIR JSON spec.\n\nArgs:\nprimitive_message: The FHIR primitive to wrap.\n\nRaises:\nValueError: In the event that primitive_message is not actually a\nprimitive FHIR type.\nReturns: A wrapper around primitive_message.", "source": "github-repos"}
{"code": "def _make_class_weight_map_fn(class_weight):\n    class_ids = list(sorted(class_weight.keys()))\n    expected_class_ids = list(range(len(class_ids)))\n    if class_ids != expected_class_ids:\n        error_msg = 'Expected `class_weight` to be a dict with keys from 0 to one less than the number of classes, found {}'.format(class_weight)\n        raise ValueError(error_msg)\n    class_weight_tensor = tensor_conversion.convert_to_tensor_v2_with_dispatch([class_weight[int(c)] for c in class_ids])\n\n    def _class_weights_map_fn(*data):\n        \n        x, y, sw = unpack_x_y_sample_weight(data)\n        if nest.is_nested(y):\n            raise ValueError('`class_weight` is only supported for Models with a single output.')\n        if y.shape.rank > 2:\n            raise ValueError('`class_weight` not supported for 3+ dimensional targets.')\n        y_classes = smart_cond.smart_cond(y.shape.rank == 2 and backend.shape(y)[1] > 1, lambda: backend.argmax(y, axis=1), lambda: math_ops.cast(backend.reshape(y, (-1,)), dtypes.int64))\n        cw = array_ops.gather_v2(class_weight_tensor, y_classes)\n        if sw is not None:\n            cw = math_ops.cast(cw, sw.dtype)\n            sw, cw = expand_1d((sw, cw))\n            sw = sw * cw\n        else:\n            sw = cw\n        return (x, y, sw)\n    return _class_weights_map_fn", "docstring": "Applies class weighting to a `Dataset`.\n\nThe `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where\n`y` must be a single `Tensor`.\n\nArgs:\nclass_weight: A map where the keys are integer class ids and values are\nthe class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`\n\nReturns:\nA function that can be used with `tf.data.Dataset.map` to apply class\nweighting.", "source": "github-repos"}
{"code": "def get_libstdcpp_version():\n    key = 'libstdcpp_ver'\n    out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])\n    if err and FLAGS.debug:\n        print('Error in detecting libstdc++ version:\\n %s' % str(err))\n    ver = out.split(b'_')[-1].replace(b'\\n', b'')\n    return ver", "docstring": "Retrieves version of libstdc++ detected.\n\nReturns:\nString that is the version of libstdc++.\ne.g. '3.4.25'", "source": "github-repos"}
{"code": "def Collect(\n      self, knowledge_base, artifact_definition, searcher, file_system):\n    \n    for source in artifact_definition.sources:\n      if source.type_indicator not in (\n          artifact_definitions.TYPE_INDICATOR_FILE,\n          artifact_definitions.TYPE_INDICATOR_PATH):\n        continue\n\n      for path in source.paths:\n        \n        \n        path_segments = path.split(source.separator)\n\n        find_spec = file_system_searcher.FindSpec(\n            location_glob=path_segments[1:], case_sensitive=False)\n\n        for path_specification in searcher.Find(find_specs=[find_spec]):\n          self._ParsePathSpecification(\n              knowledge_base, searcher, file_system, path_specification,\n              source.separator)", "docstring": "Collects values using a file artifact definition.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nartifact_definition (artifacts.ArtifactDefinition): artifact definition.\nsearcher (dfvfs.FileSystemSearcher): file system searcher to preprocess\nthe file system.\nfile_system (dfvfs.FileSystem): file system to be preprocessed.\n\nRaises:\nPreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def _run_graph_for_calibration_graph_mode(model_dir: str, tags: Collection[str], representative_dataset_map: rd.RepresentativeDatasetMapping) -> None:\n    _replace_tensors_by_numpy_ndarrays(representative_dataset_map)\n    with ops.Graph().as_default(), session.Session() as sess:\n        meta_graph: meta_graph_pb2.MetaGraphDef = loader_impl.load(sess, tags, export_dir=model_dir)\n        for signature_key, repr_ds in representative_dataset_map.items():\n            sig_def = meta_graph.signature_def[signature_key]\n            try:\n                _run_function_for_calibration_graph_mode(sess, signature_def=sig_def, representative_dataset=repr_ds)\n            except Exception as ex:\n                raise ValueError(f'Failed to run representative dataset through the function with the signature key: {signature_key}.') from ex", "docstring": "Runs the graph for calibration in graph mode.\n\nThis function assumes _graph mode_ (used when legacy TF1 is used or when eager\nmode is explicitly disabled) when running the graph. This step is used in\norder to collect the statistics in CustomAggregatorOp for quantization using\nthe representative dataset for the actual data provided for inference.\n\nArgs:\nmodel_dir: Path to SavedModel directory.\ntags: Collection of tags identifying the MetaGraphDef within the SavedModel.\nrepresentative_dataset_map: A map where signature keys are mapped to\ncorresponding representative datasets.\n\nRaises:\nValueError: When running the function with the representative dataset fails.", "source": "github-repos"}
{"code": "def build_polygon_dict(self, path, stroke_color='\n    if (not isinstance(path, list)):\n        raise AttributeError('To build a map path a list of dictionaries of latitude and logitudes is required')\n    polygon = {'path': path, 'stroke_color': stroke_color, 'stroke_opacity': stroke_opacity, 'stroke_weight': stroke_weight, 'fill_color': fill_color, 'fill_opacity': fill_opacity}\n    return polygon", "docstring": "Set a dictionary with the javascript class Polygon parameters\n\nThis function sets a default drawing configuration if the user just\npass the polygon path, but also allows to set each parameter\nindividually if the user wish so.\n\nArgs:\npath (list): A list of latitude and longitude point for the polygon\nstroke_color (str): Sets the color of the polygon border using\nhexadecimal color notation\nstroke_opacity (float): Sets the opacity of the polygon border\nin percentage. If stroke_opacity = 0, the border is transparent\nstroke_weight (int): Sets the stroke girth in pixels.\n\nfill_color (str): Sets the color of the polygon fill using\nhexadecimal color notation\nfill_opacity (float): Sets the opacity of the polygon fill", "source": "codesearchnet"}
{"code": "def concat_urls(*urls):\n    \n    normalized_urls = filter(bool, [url.strip('/') for url in urls])\n    joined_urls = '/'.join(normalized_urls)\n    if not joined_urls:\n        return '/'\n    return '/{}/'.format(joined_urls)", "docstring": "Concat Urls\nArgs:\n*args: (str)\n\nReturns:\nstr: urls starting and ending with / merged with /", "source": "juraj-google-style"}
{"code": "def index_impute2(fn):\n    \n    logger.info(\"Indexing {} (IMPUTE2)\".format(fn))\n    impute2_index(fn, cols=[0, 1, 2], names=[\"chrom\", \"name\", \"pos\"], sep=\" \")\n    logger.info(\"Index generated\")", "docstring": "Indexes an IMPUTE2 file.\n\nArgs:\nfn (str): The name of the IMPUTE2 file.", "source": "juraj-google-style"}
{"code": "class XCLIPVisionEncoder(nn.Module):\n\n    def __init__(self, config: XCLIPConfig):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([XCLIPVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for idx, encoder_layer in enumerate(self.layers):\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions)\n            else:\n                layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`XCLIPVisionEncoderLayer`].\n\nArgs:\nconfig: XCLIPConfig", "source": "github-repos"}
{"code": "def copy_file_if_newer(src_fs, src_path, dst_fs, dst_path):\n    with manage_fs(src_fs, writeable=False) as _src_fs:\n        with manage_fs(dst_fs, create=True) as _dst_fs:\n            if (_src_fs is _dst_fs):\n                if _source_is_newer(_src_fs, src_path, _dst_fs, dst_path):\n                    _src_fs.copy(src_path, dst_path, overwrite=True)\n                    return True\n                else:\n                    return False\n            else:\n                with _src_fs.lock(), _dst_fs.lock():\n                    if _source_is_newer(_src_fs, src_path, _dst_fs, dst_path):\n                        copy_file_internal(_src_fs, src_path, _dst_fs, dst_path)\n                        return True\n                    else:\n                        return False", "docstring": "Copy a file from one filesystem to another, checking times.\n\nIf the destination exists, and is a file, it will be first truncated.\nIf both source and destination files exist, the copy is executed only\nif the source file is newer than the destination file. In case\nmodification times of source or destination files are not available,\ncopy is always executed.\n\nArguments:\nsrc_fs (FS or str): Source filesystem (instance or URL).\nsrc_path (str): Path to a file on the source filesystem.\ndst_fs (FS or str): Destination filesystem (instance or URL).\ndst_path (str): Path to a file on the destination filesystem.\n\nReturns:\nbool: `True` if the file copy was executed, `False` otherwise.", "source": "codesearchnet"}
{"code": "def read(name, default=None, allow_none=False, fallback=None):\n    raw_value = environ.get(name)\n    if ((raw_value is None) and (fallback is not None)):\n        if ((not isinstance(fallback, builtins.list)) and (not isinstance(fallback, builtins.tuple))):\n            fallback = [fallback]\n        for fall in fallback:\n            raw_value = environ.get(fall)\n            if (raw_value is not None):\n                break\n    if (raw_value or (raw_value == '')):\n        return raw_value\n    elif ((default is not None) or allow_none):\n        return default\n    else:\n        raise KeyError('Set the \"{0}\" environment variable'.format(name))", "docstring": "Read the raw env value.\n\nRead the raw environment variable or use the default. If the value is not\nfound and no default is set throw an exception.\n\nArgs:\nname: The environment variable name\ndefault: The default value to use if no environment variable is found\nallow_none: If the return value can be `None` (i.e. optional)\nfallback: A list of fallback env variables to try and read if the primary environment\nvariable is unavailable.", "source": "codesearchnet"}
{"code": "def retry_until_valid_or_limit_reached(method, limit, validation_fn, sleep_s=1, catch_exceptions=()):\n    assert (limit > 0), 'Limit must be greater than 0'\n\n    def _execute_method(helper):\n        try:\n            return method()\n        except catch_exceptions:\n            if (not helper.remaining):\n                raise\n            return None\n    helper = RetryHelper((limit - 1))\n    result = _execute_method(helper)\n    while ((not validation_fn(result)) and helper.retry_if_possible()):\n        time.sleep(sleep_s)\n        result = _execute_method(helper)\n    return result", "docstring": "Executes a method until the retry limit or validation_fn returns True.\n\nThe method is always called once so the effective lower limit for 'limit' is\n1.  Passing in a number less than 1 will still result it the method being\ncalled once.\n\nArgs:\nmethod: The method to execute should take no arguments.\nlimit: The number of times to try this method.  Must be >0.\nvalidation_fn: The validation function called on the function result to\ndetermine whether to keep looping.\nsleep_s: The time to sleep in between invocations.\ncatch_exceptions: Tuple of exception types to catch and count as failures.\nReturns:\nWhatever the method last returned, implicit False would indicate the\nmethod never succeeded.", "source": "codesearchnet"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: Optional[bool]=False) -> List[int]:\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    if token_ids_1 is None:\n        return [0] * len(token_ids_0) + [1]\n    return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def seek_end(fileobj, offset):\n    \n\n    if offset < 0:\n        raise ValueError\n\n    if get_size(fileobj) < offset:\n        fileobj.seek(0, 0)\n    else:\n        fileobj.seek(-offset, 2)", "docstring": "Like fileobj.seek(-offset, 2), but will not try to go beyond the start\n\nNeeded since file objects from BytesIO will not raise IOError and\nfile objects from open() will raise IOError if going to a negative offset.\nTo make things easier for custom implementations, instead of allowing\nboth behaviors, we just don't do it.\n\nArgs:\nfileobj (fileobj)\noffset (int): how many bytes away from the end backwards to seek to\n\nRaises:\nIOError", "source": "juraj-google-style"}
{"code": "def get_generating_ops(ts):\n    ts = make_list_of_t(ts, allow_graph=False)\n    return [t.op for t in ts]", "docstring": "Return all the generating ops of the tensors in `ts`.\n\nArgs:\nts: a list of `tf.Tensor`\nReturns:\nA list of all the generating `tf.Operation` of the tensors in `ts`.\nRaises:\nTypeError: if `ts` cannot be converted to a list of `tf.Tensor`.", "source": "github-repos"}
{"code": "def dict_head(d, N=5):\n    \n    return {k: d[k] for k in list(d.keys())[:N]}", "docstring": "Return the head of a dictionary. It will be random!\n\nDefault is to return the first 5 key/value pairs in a dictionary.\n\nArgs:\nd: Dictionary to get head.\nN: Number of elements to display.\n\nReturns:\ndict: the first N items of the dictionary.", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, position_bias: Optional[torch.Tensor]=None, output_attentions: bool=False):\n    residual = hidden_states\n    hidden_states, attn_weights, _ = self.attention(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, position_bias=position_bias, output_attentions=output_attentions)\n    hidden_states = self.dropout(hidden_states)\n    hidden_states = residual + hidden_states\n    hidden_states = self.layer_norm(hidden_states)\n    hidden_states = hidden_states + self.feed_forward(hidden_states)\n    hidden_states = self.final_layer_norm(hidden_states)\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`):\ninput to the layer of shape `(batch, seq_len, hidden_size)`\nattention_mask (`torch.FloatTensor`):\nattention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very\nlarge negative values.\nlayer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size\n`(config.encoder_attention_heads,)`.\nposition_bias (`torch.FloatTensor`):\nrelative position embeddings of size `(seq_len, seq_len, hidden_size // encoder_attention_heads)`\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def from_voxels(voxels):\n    \n    dimensions = len(voxels[0])\n\n    for d in range(len(dimensions)):\n        size.append(max([i[d] for i in voxels]))\n\n    result = numpy.zeros(dimensions)\n\n    for v in voxels:\n        result[v] = 1\n\n    return result", "docstring": "Converts a voxel list to an ndarray.\n\nArguments:\nvoxels (tuple[]): A list of coordinates indicating coordinates of\npopulated voxels in an ndarray.\n\nReturns:\nnumpy.ndarray The result of the transformation.", "source": "juraj-google-style"}
{"code": "def read(self, length=0, timeout_ms=None):\n    return self._transport.read(length, timeouts.PolledTimeout.from_millis(timeout_ms))", "docstring": "Reads data from the remote end of this stream.\n\nInternally, this data will have been contained in AdbMessages, but\nusers of streams shouldn't need to care about the transport mechanism.\n\nArgs:\nlength: If provided, the number of bytes to read, otherwise all available\ndata will be returned (at least one byte).\ntimeout_ms: Time to wait for a message to come in for this stream, in\nmilliseconds (or as a PolledTimeout object).\n\nReturns:\nData that was read, or None if the end of the stream was reached.\n\nRaises:\nAdbProtocolError: Received an unexpected wonky non-stream packet (like a\nCNXN ADB message).\nAdbStreamClosedError: The stream is already closed.\nAdbTimeoutError: Timed out waiting for a message.", "source": "codesearchnet"}
{"code": "def std(self):\n    import math\n    chunk_iter = chunks(self.times, self.bestof)\n    times = list(map(min, chunk_iter))\n    mean = (sum(times) / len(times))\n    std = math.sqrt((sum((((t - mean) ** 2) for t in times)) / len(times)))\n    return std", "docstring": "The standard deviation of the best results of each trial.\n\nReturns:\nfloat: standard deviation of measured seconds\n\nNote:\nAs mentioned in the timeit source code, the standard deviation is\nnot often useful. Typically the minimum value is most informative.\n\nExample:\n>>> import math\n>>> self = Timerit(num=10, verbose=1)\n>>> self.call(math.factorial, 50)\n>>> assert self.std() >= 0", "source": "codesearchnet"}
{"code": "def copy_to(self, new_key, bucket=None):\n    \n    if bucket is None:\n      bucket = self._bucket\n    try:\n      new_info = self._api.objects_copy(self._bucket, self._key, bucket, new_key)\n    except Exception as e:\n      raise e\n    return Item(bucket, new_key, new_info, context=self._context)", "docstring": "Copies this item to the specified new key.\n\nArgs:\nnew_key: the new key to copy this item to.\nbucket: the bucket of the new item; if None (the default) use the same bucket.\nReturns:\nAn Item corresponding to new key.\nRaises:\nException if there was an error copying the item.", "source": "juraj-google-style"}
{"code": "def loop_until_timeout_or_not_none(timeout_s, function, sleep_s=1):\n    return loop_until_timeout_or_valid(timeout_s, function, (lambda x: (x is not None)), sleep_s)", "docstring": "Loops until the specified function returns non-None or until a timeout.\n\nArgs:\ntimeout_s: The number of seconds to wait until a timeout condition is\nreached. As a convenience, this accepts None to mean never timeout.  Can\nalso be passed a PolledTimeout object instead of an integer.\nfunction: The function to call each iteration.\nsleep_s: The number of seconds to wait after calling the function.\n\nReturns:\nWhatever the function returned last.", "source": "codesearchnet"}
{"code": "def _get_mean_and_median(hist: Hist) -> Tuple[(float, float)]:\n    x = ctypes.c_double(0)\n    q = ctypes.c_double(0.5)\n    hist.ComputeIntegral()\n    hist.GetQuantiles(1, x, q)\n    mean = hist.GetMean()\n    return (mean, x.value)", "docstring": "Retrieve the mean and median from a ROOT histogram.\n\nNote:\nThese values are not so trivial to calculate without ROOT, as they are the bin values\nweighted by the bin content.\n\nArgs:\nhist: Histogram from which the values will be extract.\nReturns:\nmean, median of the histogram.", "source": "codesearchnet"}
{"code": "def set_value(self, value, timeout):\n        \n        self.value = value\n        self.expiration = time.clock() * 1000 + timeout", "docstring": "Changes the cached value and updates creation time.\n\nArgs:\nvalue: the new cached value.\ntimeout: time to live for the object in milliseconds\n\nReturns: None", "source": "juraj-google-style"}
{"code": "def _generate_ascii(self, matrix, foreground, background):\n    return '\\n'.join([''.join([(foreground if cell else background) for cell in row]) for row in matrix])", "docstring": "Generates an identicon \"image\" in the ASCII format. The image will just\noutput the matrix used to generate the identicon.\n\nArguments:\n\nmatrix - Matrix describing which blocks in the identicon should be\npainted with foreground (background if inverted) colour.\n\nforeground - Character which should be used for representing\nforeground.\n\nbackground - Character which should be used for representing\nbackground.\n\nReturns:\n\nASCII representation of an identicon image, where one block is one\ncharacter.", "source": "codesearchnet"}
{"code": "def __init__(self, **kwargs):\n        \n        try:\n            self.UIStatusMsg = ''\n            self.mac = kwargs.get('EUI')\n            self.handle = None\n            self.AutoDUTEnable = False\n            self._is_net = False                  \n            self.logStatus = {'stop':'stop', 'running':'running', 'pauseReq':'pauseReq', 'paused':'paused'}\n            self.logThreadStatus = self.logStatus['stop']\n            self.connectType = (kwargs.get('Param5')).strip().lower() if kwargs.get('Param5') is not None else 'usb'\n            if self.connectType == 'ip':\n                self.dutIpv4 = kwargs.get('TelnetIP')\n                self.dutPort = kwargs.get('TelnetPort')\n                self.port = self.dutIpv4 + ':' + self.dutPort\n            else:\n                self.port = kwargs.get('SerialPort')\n            self.intialize()\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger('initialize() Error: ' + str(e))", "docstring": "initialize the serial port and default network parameters\nArgs:\n**kwargs: Arbitrary keyword arguments\nIncludes 'EUI' and 'SerialPort'", "source": "juraj-google-style"}
{"code": "def gen_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes):\n    batch_size = enc_output.shape[0]\n    proposals = []\n    _cur = 0\n    for level, (height, width) in enumerate(spatial_shapes):\n        mask_flatten_ = padding_mask[:, _cur:_cur + height * width].view(batch_size, height, width, 1)\n        valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1)\n        valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1)\n        grid_y, grid_x = meshgrid(torch.linspace(0, height - 1, height, dtype=enc_output.dtype, device=enc_output.device), torch.linspace(0, width - 1, width, dtype=enc_output.dtype, device=enc_output.device), indexing='ij')\n        grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)\n        scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2)\n        grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale\n        width_height = torch.ones_like(grid) * 0.05 * 2.0 ** level\n        proposal = torch.cat((grid, width_height), -1).view(batch_size, -1, 4)\n        proposals.append(proposal)\n        _cur += height * width\n    output_proposals = torch.cat(proposals, 1)\n    output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)\n    output_proposals = torch.log(output_proposals / (1 - output_proposals))\n    output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float('inf'))\n    output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))\n    object_query = enc_output\n    object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0))\n    object_query = object_query.masked_fill(~output_proposals_valid, float(0))\n    object_query = self.enc_output_norm(self.enc_output(object_query))\n    return (object_query, output_proposals)", "docstring": "Generate the encoder output proposals from encoded enc_output.\n\nArgs:\nenc_output (Tensor[batch_size, sequence_length, hidden_size]): Output of the encoder.\npadding_mask (Tensor[batch_size, sequence_length]): Padding mask for `enc_output`.\nspatial_shapes (List[Tuple[int, int]]): Spatial shapes of the feature maps.\n\nReturns:\n`tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction.\n- object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to\ndirectly predict a bounding box. (without the need of a decoder)\n- output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse\nsigmoid.", "source": "github-repos"}
{"code": "def delete(self):\n    clone = copy.deepcopy(self)\n    return [(item.delete() and item) for item in clone]", "docstring": "Deletes all objects that matches to the queryset.\n\nNote:\nUnlike RDBMS systems, this method makes individual save calls\nto backend DB store. So this is exists as more of a comfortable\nutility method and not a performance enhancement.\n\nReturns:\nList of deleted objects or None if *confirm* not set.\n\nExample:\n>>> Person.objects.filter(age__gte=16, name__startswith='jo').delete()", "source": "codesearchnet"}
{"code": "def plot_compare(self, other_plotter):\n        \n\n        data_orig = self.bs_plot_data()\n        data = other_plotter.bs_plot_data()\n\n        if len(data_orig['distances']) != len(data['distances']):\n            raise ValueError('The two objects are not compatible.')\n\n        plt = self.get_plot()\n        band_linewidth = 1\n        for i in range(other_plotter._nb_bands):\n            for d in range(len(data_orig['distances'])):\n                plt.plot(data_orig['distances'][d],\n                         [e[i] for e in data['frequency']][d],\n                         'r-', linewidth=band_linewidth)\n\n        return plt", "docstring": "plot two band structure for comparison. One is in red the other in blue.\nThe two band structures need to be defined on the same symmetry lines!\nand the distance between symmetry lines is\nthe one of the band structure used to build the PhononBSPlotter\n\nArgs:\nanother PhononBSPlotter object defined along the same symmetry lines\n\nReturns:\na matplotlib object with both band structures", "source": "juraj-google-style"}
{"code": "def _verifyClusterSpecEquality(self, cluster_spec, expected_proto):\n    self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())\n    self.assertProtoEquals(expected_proto, server_lib.ClusterSpec(cluster_spec).as_cluster_def())\n    self.assertProtoEquals(expected_proto, server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())\n    self.assertProtoEquals(expected_proto, server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())", "docstring": "Verifies that the ClusterSpec generates the correct proto.\n\nWe are testing this four different ways to ensure that the ClusterSpec\nreturned by the TPUClusterResolver behaves identically to a normal\nClusterSpec when passed into the generic ClusterSpec libraries.\n\nArgs:\ncluster_spec: ClusterSpec returned by the TPUClusterResolver\nexpected_proto: Expected protobuf", "source": "github-repos"}
{"code": "def construct_policy(app='coreforrest', env='dev', group='forrest', region='us-east-1', pipeline_settings=None):\n    \n    LOG.info('Create custom IAM Policy for %s.', app)\n\n    services = pipeline_settings.get('services', {})\n    LOG.debug('Found requested services: %s', services)\n\n    services = auto_service(pipeline_settings=pipeline_settings, services=services)\n\n    if services:\n        credential = get_env_credential(env=env)\n        account_number = credential['accountId']\n\n    statements = []\n    for service, value in services.items():\n        if value is True:\n            items = []\n        elif isinstance(value, str):\n            items = [value]\n        else:\n            items = value\n\n        rendered_statements = render_policy_template(\n            account_number=account_number,\n            app=app,\n            env=env,\n            group=group,\n            items=items,\n            pipeline_settings=pipeline_settings,\n            region=region,\n            service=service)\n\n        statements.extend(rendered_statements)\n\n    if statements:\n        policy_json = get_template('infrastructure/iam/wrapper.json.j2', statements=json.dumps(statements))\n    else:\n        LOG.info('No services defined for %s.', app)\n        policy_json = None\n\n    return policy_json", "docstring": "Assemble IAM Policy for _app_.\n\nArgs:\napp (str): Name of Spinnaker Application.\nenv (str): Environment/Account in AWS\ngroup (str):A Application group/namespace\nregion (str): AWS region\npipeline_settings (dict): Settings from *pipeline.json*.\n\nReturns:\njson: Custom IAM Policy for _app_.\nNone: When no *services* have been defined in *pipeline.json*.", "source": "juraj-google-style"}
{"code": "def _RDFClass(cls, table):\n    \n    rdf_cls_name = \"OsqueryTable{}\".format(hash(table.query))\n    try:\n      return cls._rdf_cls_cache[rdf_cls_name]\n    except KeyError:\n      pass\n\n    rdf_cls = compatibility.MakeType(rdf_cls_name,\n                                     (rdf_structs.RDFProtoStruct,), {})\n\n    rdf_cls.AddDescriptor(\n        rdf_structs.ProtoEmbedded(\n            name=\"metadata\", field_number=1, nested=ExportedMetadata))\n\n    rdf_cls.AddDescriptor(\n        rdf_structs.ProtoString(name=\"__query__\", field_number=2))\n\n    for idx, column in enumerate(table.header.columns):\n      \n      \n      if column.name == \"metadata\":\n        name = \"__metadata__\"\n      else:\n        name = column.name\n\n      descriptor = rdf_structs.ProtoString(name=name, field_number=idx + 3)\n      rdf_cls.AddDescriptor(descriptor)\n\n    cls._rdf_cls_cache[rdf_cls_name] = rdf_cls\n    return rdf_cls", "docstring": "Creates a dynamic RDF proto struct class for given osquery table.\n\nThe fields of the proto will correspond to the columns of the table.\n\nArgs:\ntable: An osquery table for which the class is about to be generated.\n\nReturns:\nA class object corresponding to the given table.", "source": "juraj-google-style"}
{"code": "def QA_fetch_user(user_cookie, db=DATABASE):\n    \n    collection = DATABASE.account\n\n    return [res for res in collection.find({'user_cookie': user_cookie}, {\"_id\": 0})]", "docstring": "get the user\n\nArguments:\nuser_cookie : str the unique cookie_id for a user\nKeyword Arguments:\ndb: database for query\n\nReturns:\nlist ---  [ACCOUNT]", "source": "juraj-google-style"}
{"code": "def load_chkpt_vars(model_path):\n    \n    model_path = get_checkpoint_path(model_path)\n    reader = tfv1.train.NewCheckpointReader(model_path)\n    var_names = reader.get_variable_to_shape_map().keys()\n    result = {}\n    for n in var_names:\n        result[n] = reader.get_tensor(n)\n    return result", "docstring": "Load all variables from a checkpoint to a dict.\n\nArgs:\nmodel_path(str): path to a checkpoint.\n\nReturns:\ndict: a name:value dict", "source": "juraj-google-style"}
{"code": "def unstack(df, level=(- 1), reset_index=True):\n    df = df.unstack(level=level)\n    if reset_index:\n        df = df.reset_index()\n        df.columns = df.columns.map(_join_names)\n    return df", "docstring": "pd.DataFrame.unstack adapter.\n\nCall the `df.unstack` method using the indicated level and afterwards\njoin the column names using an underscore.\n\nArgs:\ndf (pandas.DataFrame): DataFrame to unstack.\nlevel (str, int or list): Level(s) of index to unstack, can pass level name\nreset_index (bool): Whether to reset the index after unstacking\n\nReturns:\npandas.Dataframe: unstacked dataframe", "source": "codesearchnet"}
{"code": "def Mean(self):\n    old_p = 0\n    total = 0.0\n    for (x, new_p) in zip(self.xs, self.ps):\n        p = (new_p - old_p)\n        total += (p * x)\n        old_p = new_p\n    return total", "docstring": "Computes the mean of a CDF.\n\nReturns:\nfloat mean", "source": "codesearchnet"}
{"code": "def speed_info(self):\n        \n        speed_info = structs.JLinkSpeedInfo()\n        self._dll.JLINKARM_GetSpeedInfo(ctypes.byref(speed_info))\n        return speed_info", "docstring": "Retrieves information about supported target interface speeds.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nThe ``JLinkSpeedInfo`` instance describing the supported target\ninterface speeds.", "source": "juraj-google-style"}
{"code": "def value_of(\n            self,\n            value: Union[sympy.Basic, float, str]\n    ) -> Union[sympy.Basic, float]:\n        \n        if isinstance(value, str):\n            return self.param_dict.get(value, sympy.Symbol(value))\n        if isinstance(value, sympy.Basic):\n            if sys.version_info.major < 3:\n                \n                \n                d = {k.encode(): v for k, v in self.param_dict.items()}\n                v = value.subs(d)\n            else:\n                v = value.subs(self.param_dict)\n            return v if v.free_symbols else float(v)\n        return value", "docstring": "Attempt to resolve a Symbol or name or float to its assigned value.\n\nIf unable to resolve a sympy.Symbol, returns it unchanged.\nIf unable to resolve a name, returns a sympy.Symbol with that name.\n\nArgs:\nvalue: The sympy.Symbol or name or float to try to resolve into just\na float.\n\nReturns:\nThe value of the parameter as resolved by this resolver.", "source": "juraj-google-style"}
{"code": "def add_features(self, features, append=True, merge='outer', duplicates='ignore', min_studies=0.0, threshold=0.001):\n    if ((not append) or (not hasattr(self, 'feature_table'))):\n        self.feature_table = FeatureTable(self)\n    self.feature_table.add_features(features, merge=merge, duplicates=duplicates, min_studies=min_studies, threshold=threshold)", "docstring": "Construct a new FeatureTable from file.\n\nArgs:\nfeatures: Feature data to add. Can be:\n(a) A text file containing the feature data, where each row is\na study in the database, with features in columns. The first\ncolumn must contain the IDs of the studies to match up with the\nimage data.\n(b) A pandas DataFrame, where studies are in rows, features are\nin columns, and the index provides the study IDs.\nappend (bool): If True, adds new features to existing ones\nincrementally. If False, replaces old features.\nmerge, duplicates, min_studies, threshold: Additional arguments\npassed to FeatureTable.add_features().", "source": "codesearchnet"}
{"code": "def get_flat_tensor_specs(element_spec):\n    return list(itertools.chain.from_iterable((spec._flat_tensor_specs for spec in nest.flatten(element_spec))))", "docstring": "Returns a list `tf.TypeSpec`s for the element tensor representation.\n\nArgs:\nelement_spec: A nested structure of `tf.TypeSpec` objects representing to\nelement type specification.\n\nReturns:\nA list `tf.TypeSpec`s for the element tensor representation.", "source": "github-repos"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return self.bos_token_id + token_ids_0 + self.eos_token_id\n    return self.bos_token_id + token_ids_0 + token_ids_1 + self.eos_token_id", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. The special tokens depend on calling set_lang.\n\nAn NLLB sequence has the following format, where `X` represents the sequence:\n\n- `input_ids` (for encoder) `X [eos, src_lang_code]`\n- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`\n\nBOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a\nseparator.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def __init__(self, value=HashingAlgorithmEnum.SHA_256):\n        \n        super(HashingAlgorithm, self).__init__(\n            enums.HashingAlgorithm, value, Tags.HASHING_ALGORITHM)", "docstring": "Construct a HashingAlgorithm object.\n\nArgs:\nvalue (HashingAlgorithm): A HashingAlgorithm enumeration value,\n(e.g., HashingAlgorithm.MD5). Optional, defaults to\nHashingAlgorithm.SHA_256.", "source": "juraj-google-style"}
{"code": "async def _create_remote_user(self, **payload):\n        \n        \n        read_action = get_crud_action(method='create', model='user')\n\n        \n        user_data = await self.event_broker.ask(\n            action_type=read_action,\n            payload=payload\n        )\n        \n        return json.loads(user_data)", "docstring": "This method creates a service record in the remote user service\nwith the given email.\nArgs:\nuid (str): the user identifier to create\nReturns:\n(dict): a summary of the user that was created", "source": "juraj-google-style"}
{"code": "def _init_metadata_service(self, version):\n    metadata_cfg = self._load_config_section(CONFIG_METADATA_SECTION)\n    self._token_metadata = metadata_cfg[CONFIG_TOKEN]\n    proto = metadata_cfg[CONFIG_PROTOCOL]\n    host = metadata_cfg[CONFIG_HOST]\n    self._metadata = MetadataService(host, version)\n    self._metadata.base_protocol = proto\n    self._metadata.set_auth(self._token_metadata)", "docstring": "Method to initialize the Metadata Service from the config data\n\nArgs:\nversion (string): Version of Boss API to use.\n\nReturns:\nNone\n\nRaises:\n(KeyError): if given invalid version.", "source": "codesearchnet"}
{"code": "class SeamlessM4TProcessor(ProcessorMixin):\n    feature_extractor_class = 'SeamlessM4TFeatureExtractor'\n    tokenizer_class = ('SeamlessM4TTokenizer', 'SeamlessM4TTokenizerFast')\n\n    def __init__(self, feature_extractor, tokenizer):\n        super().__init__(feature_extractor, tokenizer)\n\n    def __call__(self, text=None, audios=None, src_lang=None, tgt_lang=None, **kwargs):\n        \n        sampling_rate = kwargs.pop('sampling_rate', None)\n        if text is None and audios is None:\n            raise ValueError('You have to specify either text or audios. Both cannot be none.')\n        elif text is not None and audios is not None:\n            raise ValueError('Text and audios are mututally exclusive when passed to `SeamlessM4T`. Specify one or another.')\n        elif text is not None:\n            if tgt_lang is not None:\n                self.tokenizer.tgt_lang = tgt_lang\n            if src_lang is not None:\n                self.tokenizer.src_lang = src_lang\n            encoding = self.tokenizer(text, **kwargs)\n            return encoding\n        else:\n            encoding = self.feature_extractor(audios, sampling_rate=sampling_rate, **kwargs)\n            return encoding\n\n    def batch_decode(self, *args, **kwargs):\n        \n        return self.tokenizer.batch_decode(*args, **kwargs)\n\n    def decode(self, *args, **kwargs):\n        \n        return self.tokenizer.decode(*args, **kwargs)\n\n    @property\n    def model_input_names(self):\n        tokenizer_input_names = self.tokenizer.model_input_names\n        feature_extractor_input_names = self.feature_extractor.model_input_names\n        return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))", "docstring": "Constructs a SeamlessM4T processor which wraps a SeamlessM4T feature extractor and a SeamlessM4T tokenizer into a\nsingle processor.\n\n[`SeamlessM4TProcessor`] offers all the functionalities of [`SeamlessM4TFeatureExtractor`] and\n[`SeamlessM4TTokenizerFast`]. See the [`~SeamlessM4TProcessor.__call__`] and [`~SeamlessM4TProcessor.decode`] for\nmore information.\n\nArgs:\nfeature_extractor ([`SeamlessM4TFeatureExtractor`]):\nThe audio processor is a required input.\ntokenizer ([`SeamlessM4TTokenizerFast`]):\nThe tokenizer is a required input.", "source": "github-repos"}
{"code": "def _update_task(self, task):\n        \n        self.task = task\n        self.task.data.update(self.task_data)\n        self.task_type = task.task_spec.__class__.__name__\n        self.spec = task.task_spec\n        self.task_name = task.get_name()\n        self.activity = getattr(self.spec, 'service_class', '')\n        self._set_lane_data()", "docstring": "Assigns current task step to self.task\nthen updates the task's data with self.task_data\n\nArgs:\ntask: Task object.", "source": "juraj-google-style"}
{"code": "def update_context(self, context, app=None):\n    if ((app is None) and (self._context is _CONTEXT_MISSING) and (not in_app_context())):\n        raise RuntimeError('Attempted to update component context without a bound app context or eager app set! Please pass the related app you want to update the context for!')\n    if (self._context is not _CONTEXT_MISSING):\n        self._context = ImmutableDict(context)\n    else:\n        key = self._get_context_name(app=app)\n        setattr(_CONTEXT_LOCALS, key, ImmutableDict(context))", "docstring": "Replace the component's context with a new one.\n\nArgs:\ncontext (dict): The new context to set this component's context to.\n\nKeyword Args:\napp (flask.Flask, optional): The app to update this context for. If\nnot provided, the result of ``Component.app`` will be used.", "source": "codesearchnet"}
{"code": "def _term(self, term):\n    term = str(term)\n    if term:\n        self.__query['q'] += term\n    return self", "docstring": "Add a term to the query.\n\nArguments:\nterm (str): The term to add.\n\nReturns:\nSearchHelper: Self", "source": "codesearchnet"}
{"code": "def subtract(inputs, **kwargs):\n    return Subtract(**kwargs)(inputs)", "docstring": "Functional interface to the `Subtract` layer.\n\nArgs:\ninputs: A list of input tensors (exactly 2).\n**kwargs: Standard layer keyword arguments.\n\nReturns:\nA tensor, the difference of the inputs.\n\nExamples:\n\n```python\nimport keras\n\ninput1 = keras.layers.Input(shape=(16,))\nx1 = keras.layers.Dense(8, activation='relu')(input1)\ninput2 = keras.layers.Input(shape=(32,))\nx2 = keras.layers.Dense(8, activation='relu')(input2)\nsubtracted = keras.layers.subtract([x1, x2])\n\nout = keras.layers.Dense(4)(subtracted)\nmodel = keras.models.Model(inputs=[input1, input2], outputs=out)\n```", "source": "github-repos"}
{"code": "def _update_explicit_bucket_count(a_float, dist):\n    buckets = dist.explicitBuckets\n    if (buckets is None):\n        raise ValueError((_BAD_UNSET_BUCKETS % u'explicit buckets'))\n    bucket_counts = dist.bucketCounts\n    bounds = buckets.bounds\n    if (len(bucket_counts) < (len(bounds) + 1)):\n        raise ValueError(_BAD_LOW_BUCKET_COUNT)\n    bucket_counts[bisect.bisect(bounds, a_float)] += 1", "docstring": "Adds `a_float` to `dist`, updating its explicit buckets.\n\nArgs:\na_float (float): a new value\ndist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):\nthe Distribution being updated\n\nRaises:\nValueError: if `dist` does not already have explict buckets defined\nValueError: if there are not enough bucket count fields in `dist`", "source": "codesearchnet"}
{"code": "def eval(self, feed_dict=None, session=None):\n    return _eval_using_default_session(self, feed_dict, self.graph, session)", "docstring": "Evaluates this tensor in a `Session`.\n\nNote: If you are not using `compat.v1` libraries, you should not need this,\n(or `feed_dict` or `Session`).  In eager execution (or within `tf.function`)\nyou do not need to call `eval`.\n\nCalling this method will execute all preceding operations that\nproduce the inputs needed for the operation that produces this\ntensor.\n\n*N.B.* Before invoking `Tensor.eval()`, its graph must have been\nlaunched in a session, and either a default session must be\navailable, or `session` must be specified explicitly.\n\nArgs:\nfeed_dict: A dictionary that maps `Tensor` objects to feed values. See\n`tf.Session.run` for a description of the valid feed values.\nsession: (Optional.) The `Session` to be used to evaluate this tensor. If\nnone, the default session will be used.\n\nReturns:\nA numpy array corresponding to the value of this tensor.", "source": "github-repos"}
{"code": "def clear_events(self, event_name):\n        \n        self.lock.acquire()\n        try:\n            q = self.get_event_q(event_name)\n            q.queue.clear()\n        except queue.Empty:\n            return\n        finally:\n            self.lock.release()", "docstring": "Clear all events of a particular name.\n\nArgs:\nevent_name: Name of the events to be popped.", "source": "juraj-google-style"}
{"code": "def List(self, request, global_params=None):\n    config = self.GetMethodConfig('List')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Lists previously requested builds. Previously requested builds may still be in-progress, or may have finished successfully or unsuccessfully.\n\nArgs:\nrequest: (CloudbuildProjectsBuildsListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ListBuildsResponse) The response message.", "source": "github-repos"}
{"code": "def get_statistics(self, id_or_uri, port_name=''):\n        \n        uri = self._client.build_uri(id_or_uri) + \"/statistics\"\n\n        if port_name:\n            uri = uri + \"/\" + port_name\n\n        return self._client.get(uri)", "docstring": "Gets the statistics from an interconnect.\n\nArgs:\nid_or_uri:  Can be either the interconnect id or the interconnect uri.\nport_name (str): A specific port name of an interconnect.\n\nReturns:\ndict: The statistics for the interconnect that matches id.", "source": "juraj-google-style"}
{"code": "def CheckKeyCompatibility(cls, key_path):\n    \n    key_path_upper = key_path.upper()\n    for key_path_prefix in cls._COMPATIBLE_REGISTRY_KEY_PATH_PREFIXES:\n      if key_path_upper.startswith(key_path_prefix):\n        return True\n\n    logger.warning('Key path: \"{0:s}\" is currently not supported'.format(\n        key_path))\n    return False", "docstring": "Checks if a Windows Registry key path is supported by dfWinReg.\n\nArgs:\nkey_path (str): path of the Windows Registry key.\n\nReturns:\nbool: True if key is compatible or False if not.", "source": "juraj-google-style"}
{"code": "def previous_weekday(date):\n    weekday = date.weekday()\n    if (weekday == 0):\n        n_days = 3\n    elif (weekday == 6):\n        n_days = 2\n    else:\n        n_days = 1\n    return (date - datetime.timedelta(days=n_days))", "docstring": "Returns the last weekday before date\n\nArgs:\ndate (datetime or datetime.date)\nReturns:\n(datetime or datetime.date)\nRaises:\n-", "source": "codesearchnet"}
{"code": "def add_filter(ds, patterns):\n    if (not plugins.is_datasource(ds)):\n        raise Exception('Filters are applicable only to datasources.')\n    delegate = dr.get_delegate(ds)\n    if delegate.raw:\n        raise Exception(\"Filters aren't applicable to raw datasources.\")\n    if (not delegate.filterable):\n        raise Exception((\"Filters aren't applicable to %s.\" % dr.get_name(ds)))\n    if (ds in _CACHE):\n        del _CACHE[ds]\n    if isinstance(patterns, six.string_types):\n        FILTERS[ds].add(patterns)\n    elif isinstance(patterns, list):\n        FILTERS[ds] |= set(patterns)\n    elif isinstance(patterns, set):\n        FILTERS[ds] |= patterns\n    else:\n        raise TypeError('patterns must be string, list, or set.')", "docstring": "Add a filter or list of filters to a datasource. A filter is a simple\nstring, and it matches if it is contained anywhere within a line.\n\nArgs:\nds (@datasource component): The datasource to filter\npatterns (str, [str]): A string, list of strings, or set of strings to\nadd to the datasource's filters.", "source": "codesearchnet"}
{"code": "def put(self, closure, tag=None):\n    closure.tag = tag\n    if tag is not None:\n        with self._queue_lock:\n            self._tagged_queue[tag].put(closure, block=False)\n            self._closures_queued_condition.notify_all()\n    else:\n        with self._put_wait_lock, self._queue_lock:\n            self._queue_free_slot_condition.wait_for(lambda: not self._queue.full())\n            self._queue.put(closure, block=False)\n            metric_utils.monitor_int('queued_closures', self._queue.qsize())\n            self._raise_if_error()\n            self._closures_queued_condition.notify()", "docstring": "Put a closure into the queue for later execution.\n\nIf `mark_failed` was called before `put`, the error from the first\ninvocation of `mark_failed` will be raised.\n\nArgs:\nclosure: The `Closure` to put into the queue.\ntag: if not None, put into a queue with the given tag.", "source": "github-repos"}
{"code": "def pymmh3_hash128_x86(key: Union[bytes, bytearray], seed: int) -> int:\n    \n\n    def fmix(h):\n        h ^= h >> 16\n        h = (h * 0x85ebca6b) & 0xFFFFFFFF\n        h ^= h >> 13\n        h = (h * 0xc2b2ae35) & 0xFFFFFFFF\n        h ^= h >> 16\n        return h\n\n    length = len(key)\n    nblocks = int(length / 16)\n\n    h1 = seed\n    h2 = seed\n    h3 = seed\n    h4 = seed\n\n    c1 = 0x239b961b\n    c2 = 0xab0e9789\n    c3 = 0x38b34ae5\n    c4 = 0xa1e38b93\n\n    \n    for block_start in range(0, nblocks * 16, 16):\n        k1 = (\n            key[block_start + 3] << 24 |\n            key[block_start + 2] << 16 |\n            key[block_start + 1] << 8 |\n            key[block_start + 0]\n        )\n        k2 = (\n            key[block_start + 7] << 24 |\n            key[block_start + 6] << 16 |\n            key[block_start + 5] << 8 |\n            key[block_start + 4]\n        )\n        k3 = (\n            key[block_start + 11] << 24 |\n            key[block_start + 10] << 16 |\n            key[block_start + 9] << 8 |\n            key[block_start + 8]\n        )\n        k4 = (\n            key[block_start + 15] << 24 |\n            key[block_start + 14] << 16 |\n            key[block_start + 13] << 8 |\n            key[block_start + 12]\n        )\n\n        k1 = (c1 * k1) & 0xFFFFFFFF\n        k1 = (k1 << 15 | k1 >> 17) & 0xFFFFFFFF  \n        k1 = (c2 * k1) & 0xFFFFFFFF\n        h1 ^= k1\n\n        h1 = (h1 << 19 | h1 >> 13) & 0xFFFFFFFF  \n        h1 = (h1 + h2) & 0xFFFFFFFF\n        h1 = (h1 * 5 + 0x561ccd1b) & 0xFFFFFFFF\n\n        k2 = (c2 * k2) & 0xFFFFFFFF\n        k2 = (k2 << 16 | k2 >> 16) & 0xFFFFFFFF  \n        k2 = (c3 * k2) & 0xFFFFFFFF\n        h2 ^= k2\n\n        h2 = (h2 << 17 | h2 >> 15) & 0xFFFFFFFF  \n        h2 = (h2 + h3) & 0xFFFFFFFF\n        h2 = (h2 * 5 + 0x0bcaa747) & 0xFFFFFFFF\n\n        k3 = (c3 * k3) & 0xFFFFFFFF\n        k3 = (k3 << 17 | k3 >> 15) & 0xFFFFFFFF  \n        k3 = (c4 * k3) & 0xFFFFFFFF\n        h3 ^= k3\n\n        h3 = (h3 << 15 | h3 >> 17) & 0xFFFFFFFF  \n        h3 = (h3 + h4) & 0xFFFFFFFF\n        h3 = (h3 * 5 + 0x96cd1c35) & 0xFFFFFFFF\n\n        k4 = (c4 * k4) & 0xFFFFFFFF\n        k4 = (k4 << 18 | k4 >> 14) & 0xFFFFFFFF  \n        k4 = (c1 * k4) & 0xFFFFFFFF\n        h4 ^= k4\n\n        h4 = (h4 << 13 | h4 >> 19) & 0xFFFFFFFF  \n        h4 = (h1 + h4) & 0xFFFFFFFF\n        h4 = (h4 * 5 + 0x32ac3b17) & 0xFFFFFFFF\n\n    \n    tail_index = nblocks * 16\n    k1 = 0\n    k2 = 0\n    k3 = 0\n    k4 = 0\n    tail_size = length & 15\n\n    if tail_size >= 15:\n        k4 ^= key[tail_index + 14] << 16\n    if tail_size >= 14:\n        k4 ^= key[tail_index + 13] << 8\n    if tail_size >= 13:\n        k4 ^= key[tail_index + 12]\n\n    if tail_size > 12:\n        k4 = (k4 * c4) & 0xFFFFFFFF\n        k4 = (k4 << 18 | k4 >> 14) & 0xFFFFFFFF  \n        k4 = (k4 * c1) & 0xFFFFFFFF\n        h4 ^= k4\n\n    if tail_size >= 12:\n        k3 ^= key[tail_index + 11] << 24\n    if tail_size >= 11:\n        k3 ^= key[tail_index + 10] << 16\n    if tail_size >= 10:\n        k3 ^= key[tail_index + 9] << 8\n    if tail_size >= 9:\n        k3 ^= key[tail_index + 8]\n\n    if tail_size > 8:\n        k3 = (k3 * c3) & 0xFFFFFFFF\n        k3 = (k3 << 17 | k3 >> 15) & 0xFFFFFFFF  \n        k3 = (k3 * c4) & 0xFFFFFFFF\n        h3 ^= k3\n\n    if tail_size >= 8:\n        k2 ^= key[tail_index + 7] << 24\n    if tail_size >= 7:\n        k2 ^= key[tail_index + 6] << 16\n    if tail_size >= 6:\n        k2 ^= key[tail_index + 5] << 8\n    if tail_size >= 5:\n        k2 ^= key[tail_index + 4]\n\n    if tail_size > 4:\n        k2 = (k2 * c2) & 0xFFFFFFFF\n        k2 = (k2 << 16 | k2 >> 16) & 0xFFFFFFFF  \n        k2 = (k2 * c3) & 0xFFFFFFFF\n        h2 ^= k2\n\n    if tail_size >= 4:\n        k1 ^= key[tail_index + 3] << 24\n    if tail_size >= 3:\n        k1 ^= key[tail_index + 2] << 16\n    if tail_size >= 2:\n        k1 ^= key[tail_index + 1] << 8\n    if tail_size >= 1:\n        k1 ^= key[tail_index + 0]\n\n    if tail_size > 0:\n        k1 = (k1 * c1) & 0xFFFFFFFF\n        k1 = (k1 << 15 | k1 >> 17) & 0xFFFFFFFF  \n        k1 = (k1 * c2) & 0xFFFFFFFF\n        h1 ^= k1\n\n    \n    h1 ^= length\n    h2 ^= length\n    h3 ^= length\n    h4 ^= length\n\n    h1 = (h1 + h2) & 0xFFFFFFFF\n    h1 = (h1 + h3) & 0xFFFFFFFF\n    h1 = (h1 + h4) & 0xFFFFFFFF\n    h2 = (h1 + h2) & 0xFFFFFFFF\n    h3 = (h1 + h3) & 0xFFFFFFFF\n    h4 = (h1 + h4) & 0xFFFFFFFF\n\n    h1 = fmix(h1)\n    h2 = fmix(h2)\n    h3 = fmix(h3)\n    h4 = fmix(h4)\n\n    h1 = (h1 + h2) & 0xFFFFFFFF\n    h1 = (h1 + h3) & 0xFFFFFFFF\n    h1 = (h1 + h4) & 0xFFFFFFFF\n    h2 = (h1 + h2) & 0xFFFFFFFF\n    h3 = (h1 + h3) & 0xFFFFFFFF\n    h4 = (h1 + h4) & 0xFFFFFFFF\n\n    return h4 << 96 | h3 << 64 | h2 << 32 | h1", "docstring": "Implements 128-bit murmur3 hash for x86, as per ``pymmh3``, with some\nbugfixes.\n\nArgs:\nkey: data to hash\nseed: seed\n\nReturns:\ninteger hash", "source": "juraj-google-style"}
{"code": "def GetCampaigns(self, client_customer_id):\n    \n    self.client.SetClientCustomerId(client_customer_id)\n    \n    \n    \n    max_tries = 3\n    today = time.strftime('%Y%m%d', time.localtime())\n    for i in xrange(1, max_tries + 1):\n      try:\n        selector = {\n            'fields': ['Id', 'Name', 'Status', 'BudgetId', 'Amount'],\n            'predicates': [\n                {\n                    'field': 'Status',\n                    'operator': 'NOT_EQUALS',\n                    'values': ['REMOVED']\n                }\n            ],\n            'dateRange': {\n                'min': today,\n                'max': today\n            }\n        }\n        campaigns = self.client.GetService('CampaignService').get(selector)\n        if int(campaigns['totalNumEntries']) > 0:\n          return campaigns['entries']\n        else:\n          return None\n      except Exception, e:\n        if i == max_tries:\n          raise GoogleAdsError(e)\n        continue", "docstring": "Returns a client account's Campaigns that haven't been removed.\n\nArgs:\nclient_customer_id: str Client Customer Id used to retrieve Campaigns.\n\nReturns:\nlist List of Campaign data objects.", "source": "juraj-google-style"}
{"code": "def convert_reduce_sum(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting reduce_sum ...')\n\n    keepdims = params['keepdims'] > 0\n    axis = params['axes']\n\n    def target_layer(x, keepdims=keepdims, axis=axis):\n        import keras.backend as K\n        return K.sum(x, keepdims=keepdims, axis=axis)\n\n    lambda_layer = keras.layers.Lambda(target_layer)\n    layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert reduce_sum layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def create_pipeline(gcp_project_id, region, pipeline_name, pipeline_root, csv_file, module_file, beam_runner, metadata_file):\n    example_gen = tfx.components.CsvExampleGen(input_base=csv_file)\n    statistics_gen = tfx.components.StatisticsGen(examples=example_gen.outputs['examples'])\n    schema_gen = tfx.components.SchemaGen(statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)\n    transform = tfx.components.Transform(examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], module_file=module_file)\n    trainer = tfx.components.Trainer(module_file=module_file, examples=transform.outputs['transformed_examples'], transform_graph=transform.outputs['transform_graph'])\n    components = [example_gen, statistics_gen, schema_gen, transform, trainer]\n    beam_pipeline_args_by_runner = {'DirectRunner': [], 'DataflowRunner': ['--runner=DataflowRunner', '--project=' + gcp_project_id, '--temp_location=' + os.path.join(pipeline_root, 'tmp'), '--region=' + region]}\n    return tfx.dsl.Pipeline(pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=components, enable_cache=True, metadata_connection_config=tfx.orchestration.metadata.sqlite_metadata_connection_config(metadata_file), beam_pipeline_args=beam_pipeline_args_by_runner[beam_runner])", "docstring": "Create the TFX pipeline.\n\nArgs:\ngcp_project_id (str): ID for the google cloud project to deploy the pipeline to.\nregion (str): Region in which to deploy the pipeline.\npipeline_name (str): Name for the Beam pipeline\npipeline_root (str): Path to artifact repository where TFX\nstores a pipeline’s artifacts.\ncsv_file (str): Path to the csv input file.\nmodule_file (str): Path to module file containing the preprocessing_fn and run_fn.\nbeam_runner (str): Beam runner: DataflowRunner or DirectRunner.\nmetadata_file (str): Path to store a metadata file as a mock metadata database.", "source": "github-repos"}
{"code": "def bool(name, execute_bool=True, default=None):\n    \n    def wrapped(func):\n        @functools.wraps(func)\n        def _decorator(*args, **kwargs):\n            if core.isset(name) and core.bool(name) == execute_bool:\n                return func(*args, **kwargs)\n            elif default is not None and default == execute_bool:\n                return func(*args, **kwargs)\n        return _decorator\n    return wrapped", "docstring": "Only execute the function if the boolean variable is set.\n\nArgs:\nname: The name of the environment variable\nexecute_bool: The boolean value to execute the function on\ndefault: The default value if the environment variable is not set (respects `execute_bool`)\n\nReturns:\nThe function return value or `None` if the function was skipped.", "source": "juraj-google-style"}
{"code": "def get_subscription_from_cli(name=None):\n    \n    home = os.path.expanduser('~')\n    azure_profile_path = home + os.sep + '.azure' + os.sep + 'azureProfile.json'\n    if os.path.isfile(azure_profile_path) is False:\n        print('Error from get_subscription_from_cli(): Cannot find ' +\n              azure_profile_path)\n        return None\n    with io.open(azure_profile_path, 'r', encoding='utf-8-sig') as azure_profile_fd:\n        azure_profile = json.load(azure_profile_fd)\n    for subscription_info in azure_profile['subscriptions']:\n        if (name is None and subscription_info['isDefault'] is True) or \\\n                                            subscription_info['name'] == name:\n            return subscription_info['id']\n    return None", "docstring": "Get the default, or named, subscription id from CLI's local cache.\n\nArgs:\nname (str): Optional subscription name. If this is set, the subscription id of the named\nsubscription is returned from the CLI cache if present. If not set, the subscription id\nof the default subscription is returned.\n\nReturns:\nAzure subscription ID string.\n\nRequirements:\nUser has run 'az login' once, or is in Azure Cloud Shell.", "source": "juraj-google-style"}
{"code": "def intersects(self, rect, edges=False):\n        \n        \n        if (self.bottom > rect.top or \\\n            self.top < rect.bottom or \\\n            self.left > rect.right or \\\n            self.right < rect.left):\n            return False\n      \n        \n        if not edges:\n            if (self.bottom == rect.top or \\\n                self.top == rect.bottom or \\\n                self.left == rect.right or \\\n                self.right == rect.left):\n                return False\n\n        \n        if (self.left == rect.right and self.bottom == rect.top or \\\n            self.left == rect.right and rect.bottom == self.top or \\\n            rect.left == self.right and self.bottom == rect.top or \\\n            rect.left == self.right and rect.bottom == self.top):\n            return False\n    \n        return True", "docstring": "Detect intersections between this rectangle and rect.\n\nArgs:\nrect (Rectangle): Rectangle to test for intersections.\nedges (bool): Accept edge touching rectangles as intersects or not\n\nReturns:\nbool: True if the rectangles intersect, False otherwise", "source": "juraj-google-style"}
{"code": "def add_field_with_label(self, key, label_description, field):\n        \n        self.inputs[key] = field\n        label = Label(label_description)\n        label.style['margin'] = '0px 5px'\n        label.style['min-width'] = '30%'\n        container = HBox()\n        container.style.update({'justify-content':'space-between', 'overflow':'auto', 'padding':'3px'})\n        container.append(label, key='lbl' + key)\n        container.append(self.inputs[key], key=key)\n        self.container.append(container, key=key)", "docstring": "Adds a field to the dialog together with a descriptive label and a unique identifier.\n\nNote: You can access to the fields content calling the function GenericDialog.get_field(key).\n\nArgs:\nkey (str): The unique identifier for the field.\nlabel_description (str): The string content of the description label.\nfield (Widget): The instance of the field Widget. It can be for example a TextInput or maybe\na custom widget.", "source": "juraj-google-style"}
{"code": "def _isValidQuery(self, query, mode='phonefy'):\n    try:\n        validator = self.modes[mode].get('query_validator')\n        if validator:\n            try:\n                compiledRegexp = re.compile('^{expr}$'.format(expr=validator))\n                return compiledRegexp.match(query)\n            except AttributeError as e:\n                return True\n    except AttributeError as e:\n        compiledRegexp = re.compile('^{r}$'.format(r=self.validQuery[mode]))\n        return compiledRegexp.match(query)", "docstring": "Method to verify if a given query is processable by the platform.\n\nThe system looks for the forbidden characters in self.Forbidden list.\n\nArgs:\n-----\nquery: The query to be launched.\nmode: To be chosen amongst mailfy, phonefy, usufy, searchfy.\nReturn:\n-------\nTrue | False", "source": "codesearchnet"}
{"code": "def get_gene_info(ensembl_ids=None, hgnc_symbols=None):\n    uniq_ensembl_ids = set((ensembl_id for ensembl_id in (ensembl_ids or [])))\n    uniq_hgnc_symbols = set((hgnc_symbol for hgnc_symbol in (hgnc_symbols or [])))\n    genes = []\n    gene_data = []\n    if uniq_ensembl_ids:\n        for ensembl_id in uniq_ensembl_ids:\n            for res in query_gene(ensembl_id=ensembl_id):\n                gene_data.append(res)\n    elif uniq_hgnc_symbols:\n        for hgnc_symbol in uniq_hgnc_symbols:\n            query_res = query_gene(hgnc_symbol=hgnc_symbol)\n            if query_res:\n                for res in query_res:\n                    gene_data.append(res)\n            else:\n                gene_data.append({'hgnc_symbol': hgnc_symbol, 'hgnc_id': None, 'ensembl_id': None, 'description': None, 'chrom': 'unknown', 'start': 0, 'stop': 0, 'hi_score': None, 'constraint_score': None})\n    for gene in gene_data:\n        genes.append(Gene(symbol=gene['hgnc_symbol'], hgnc_id=gene['hgnc_id'], ensembl_id=gene['ensembl_id'], description=gene['description'], chrom=gene['chrom'], start=gene['start'], stop=gene['stop'], location=get_cytoband_coord(gene['chrom'], gene['start']), hi_score=gene['hi_score'], constraint_score=gene['constraint_score'], omim_number=get_omim_number(gene['hgnc_symbol'])))\n    return genes", "docstring": "Return the genes info based on the transcripts found\n\nArgs:\nensembl_ids (Optional[list]): list of Ensembl gene ids\nhgnc_symbols (Optional[list]): list of HGNC gene symbols\n\nReturns:\niterable: an iterable with `Gene` objects", "source": "codesearchnet"}
{"code": "def get_min_instability(self, min_voltage=None, max_voltage=None):\n        \n        data = []\n        for pair in self._select_in_voltage_range(min_voltage, max_voltage):\n            if pair.decomp_e_charge is not None:\n                data.append(pair.decomp_e_charge)\n            if pair.decomp_e_discharge is not None:\n                data.append(pair.decomp_e_discharge)\n        return min(data) if len(data) > 0 else None", "docstring": "The minimum instability along a path for a specific voltage range.\n\nArgs:\nmin_voltage: The minimum allowable voltage.\nmax_voltage: The maximum allowable voltage.\n\nReturns:\nMinimum decomposition energy of all compounds along the insertion\npath (a subset of the path can be chosen by the optional arguments)", "source": "juraj-google-style"}
{"code": "def get_group(self, name, user_name=None):\n        \n        return self.service.get_group(\n            name, user_name, self.url_prefix, self.auth, self.session,\n            self.session_send_opts)", "docstring": "Get owner of group and the resources it's attached to.\n\nArgs:\nname (string): Name of group to query.\nuser_name (optional[string]): Supply None if not interested in determining if user is a member of the given group.\n\nReturns:\n(dict): Keys include 'owner', 'name', 'resources'.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def load_pyfile(self, path):\n    with open(path) as config_file:\n        contents = config_file.read()\n        try:\n            exec(compile(contents, path, 'exec'), self)\n        except Exception as e:\n            raise MalformedConfig(path, six.text_type(e))", "docstring": "Load python file as config.\n\nArgs:\npath (string): path to the python file", "source": "codesearchnet"}
{"code": "def tf_step(self, x, iteration, deltas, improvement, last_improvement, estimated_improvement):\n        \n        x, next_iteration, deltas, improvement, last_improvement, estimated_improvement = super(LineSearch, self).tf_step(\n            x, iteration, deltas, improvement, last_improvement, estimated_improvement\n        )\n\n        next_x = [t + delta for t, delta in zip(x, deltas)]\n\n        if self.mode == 'linear':\n            next_deltas = deltas\n            next_estimated_improvement = estimated_improvement + self.estimated_incr\n\n        elif self.mode == 'exponential':\n            next_deltas = [delta * self.parameter for delta in deltas]\n            next_estimated_improvement = estimated_improvement * self.parameter\n\n        target_value = self.fn_x(next_deltas)\n\n        next_improvement = tf.divide(\n            x=(target_value - self.base_value),\n            y=tf.maximum(x=next_estimated_improvement, y=util.epsilon)\n        )\n\n        return next_x, next_iteration, next_deltas, next_improvement, improvement, next_estimated_improvement", "docstring": "Iteration loop body of the line search algorithm.\n\nArgs:\nx: Current solution estimate $x_t$.\niteration: Current iteration counter $t$.\ndeltas: Current difference $x_t - x'$.\nimprovement: Current improvement $(f(x_t) - f(x')) / v'$.\nlast_improvement: Last improvement $(f(x_{t-1}) - f(x')) / v'$.\nestimated_improvement: Current estimated value $v'$.\n\nReturns:\nUpdated arguments for next iteration.", "source": "juraj-google-style"}
{"code": "def decode_list(self, ids):\n    \n    decoded_ids = []\n    for id_ in ids:\n      if 0 <= id_ < self._num_reserved_ids:\n        decoded_ids.append(RESERVED_TOKENS[int(id_)])\n      else:\n        decoded_ids.append(id_ - self._num_reserved_ids)\n    return [str(d) for d in decoded_ids]", "docstring": "Transform a sequence of int ids into a their string versions.\n\nThis method supports transforming individual input/output ids to their\nstring versions so that sequence to/from text conversions can be visualized\nin a human readable format.\n\nArgs:\nids: list of integers to be converted.\n\nReturns:\nstrs: list of human-readable string.", "source": "juraj-google-style"}
{"code": "def generate_exact(self, model, vcpu_num, host_cpu):\n        \n\n        nested = {'Intel': 'vmx', 'AMD': 'svm'}\n        cpu = ET.Element('cpu', match='exact')\n        ET.SubElement(cpu, 'model').text = model\n        cpu.append(self.generate_topology(vcpu_num))\n\n        vendor = host_cpu.findtext('vendor')\n        if not nested.get(vendor):\n            LOGGER.debug(\n                'Unknown vendor: {0}, did not configure nested '\n                'virtualization cpu flag on guest.'.format(vendor)\n            )\n            return cpu\n\n        model_vendor = LibvirtCPU.get_cpu_vendor(family=model)\n        if vendor != model_vendor:\n            LOGGER.debug(\n                (\n                    'Not enabling nested virtualization feature, host '\n                    'vendor is: {0}, guest vendor: '\n                    '{1}'.format(vendor, model_vendor)\n                )\n            )\n            return cpu\n\n        flag = nested[vendor]\n        if host_cpu.find('feature/[@name=\"{0}\"]'.format(flag)) is not None:\n            cpu.append(self.generate_feature(name=flag))\n        else:\n            LOGGER.debug(\n                (\n                    'missing {0} cpu flag on host, nested '\n                    'virtualization will probably not '\n                    'work.'\n                ).format(flag)\n            )\n\n        return cpu", "docstring": "Generate exact CPU model with nested virtualization CPU feature.\n\nArgs:\nmodel(str): libvirt supported CPU model\nvcpu_num(int): number of virtual cpus\nhost_cpu(lxml.etree.Element): the host CPU model\n\nReturns:\nlxml.etree.Element: CPU XML node", "source": "juraj-google-style"}
{"code": "def explain(self, entry):\n        \n        d = self.get_explanation_dict(entry)\n        print(\"The uncorrected value of the energy of %s is %f eV\" %\n              (entry.composition, d[\"uncorrected_energy\"]))\n        print(\"The following corrections / screening are applied for %s:\\n\" %\n              d[\"compatibility\"])\n        for c in d[\"corrections\"]:\n            print(\"%s correction: %s\\n\" % (c[\"name\"],\n                                           c[\"description\"]))\n            print(\"For the entry, this correction has the value %f eV.\" % c[\n                \"value\"])\n            print(\"-\" * 30)\n\n        print(\"The final energy after corrections is %f\" % d[\n            \"corrected_energy\"])", "docstring": "Prints an explanation of the corrections that are being applied for a\ngiven compatibility scheme. Inspired by the \"explain\" methods in many\ndatabase methodologies.\n\nArgs:\nentry: A ComputedEntry.", "source": "juraj-google-style"}
{"code": "def print_fhir_to_json_string_for_analytics(fhir_proto: message.Message) -> str:\n    printer = _json_printer.JsonPrinter.compact_printer_for_analytics(_PRIMITIVE_HANDLER)\n    return printer.print(fhir_proto)", "docstring": "Returns an Analytic FHIR JSON representation with no spaces or newlines.\n\nArgs:\nfhir_proto: The proto to serialize into a JSON string.\n\nReturns:\nAn Analytic FHIR JSON representation with no spaces or newlines.", "source": "github-repos"}
{"code": "def get_stability_criteria(self, s, n):\n        \n        n = get_uvec(n)\n        stress = s * np.outer(n, n)\n        sym_wallace = self.get_symmetric_wallace_tensor(stress)\n        return np.linalg.det(sym_wallace.voigt)", "docstring": "Gets the stability criteria from the symmetric\nWallace tensor from an input vector and stress\nvalue.\n\nArgs:\ns (float): Stress value at which to evaluate\nthe stability criteria\nn (3x1 array-like): direction of the applied\nstress", "source": "juraj-google-style"}
{"code": "def get_videos_for_ids(edx_video_ids, sort_field=None, sort_dir=SortDirection.asc):\n    (videos, __) = _get_videos_for_filter({'edx_video_id__in': edx_video_ids}, sort_field, sort_dir)\n    return videos", "docstring": "Returns an iterator of videos that match the given list of ids.\n\nArgs:\nedx_video_ids (list)\nsort_field (VideoSortField)\nsort_dir (SortDirection)\n\nReturns:\nA generator expression that contains the videos found, sorted by the\ngiven field and direction, with ties broken by edx_video_id to ensure a\ntotal order", "source": "codesearchnet"}
{"code": "def Delete(self, request, global_params=None):\n    config = self.GetMethodConfig('Delete')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Deletes the routine specified by routineId from the dataset.\n\nArgs:\nrequest: (BigqueryRoutinesDeleteRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(BigqueryRoutinesDeleteResponse) The response message.", "source": "github-repos"}
{"code": "def serialize_keras_object(obj):\n    if obj is None:\n        return obj\n    if isinstance(obj, PLAIN_TYPES):\n        return obj\n    if isinstance(obj, (list, tuple)):\n        config_arr = [serialize_keras_object(x) for x in obj]\n        return tuple(config_arr) if isinstance(obj, tuple) else config_arr\n    if isinstance(obj, dict):\n        return serialize_dict(obj)\n    if isinstance(obj, bytes):\n        return {'class_name': '__bytes__', 'config': {'value': obj.decode('utf-8')}}\n    if isinstance(obj, slice):\n        return {'class_name': '__slice__', 'config': {'start': serialize_keras_object(obj.start), 'stop': serialize_keras_object(obj.stop), 'step': serialize_keras_object(obj.step)}}\n    if isinstance(obj, type(Ellipsis)):\n        return {'class_name': '__ellipsis__', 'config': {}}\n    if isinstance(obj, backend.KerasTensor):\n        history = getattr(obj, '_keras_history', None)\n        if history:\n            history = list(history)\n            history[0] = history[0].name\n        return {'class_name': '__keras_tensor__', 'config': {'shape': obj.shape, 'dtype': obj.dtype, 'keras_history': history}}\n    if tf.available and isinstance(obj, tf.TensorShape):\n        return obj.as_list() if obj._dims is not None else None\n    if backend.is_tensor(obj):\n        return {'class_name': '__tensor__', 'config': {'value': backend.convert_to_numpy(obj).tolist(), 'dtype': backend.standardize_dtype(obj.dtype)}}\n    if type(obj).__module__ == np.__name__:\n        if isinstance(obj, np.ndarray) and obj.ndim > 0:\n            return {'class_name': '__numpy__', 'config': {'value': obj.tolist(), 'dtype': backend.standardize_dtype(obj.dtype)}}\n        else:\n            return obj.item()\n    if tf.available and isinstance(obj, tf.DType):\n        return obj.name\n    if isinstance(obj, types.FunctionType) and obj.__name__ == '<lambda>':\n        warnings.warn(f'The object being serialized includes a `lambda`. This is unsafe. In order to reload the object, you will have to pass `safe_mode=False` to the loading function. Please avoid using `lambda` in the future, and use named Python functions instead. This is the `lambda` being serialized: {inspect.getsource(obj)}', stacklevel=2)\n        return {'class_name': '__lambda__', 'config': {'value': python_utils.func_dump(obj)}}\n    if tf.available and isinstance(obj, tf.TypeSpec):\n        ts_config = obj._serialize()\n        ts_config = list(map(lambda x: x.as_list() if isinstance(x, tf.TensorShape) else x.name if isinstance(x, tf.DType) else x, ts_config))\n        return {'class_name': '__typespec__', 'spec_name': obj.__class__.__name__, 'module': obj.__class__.__module__, 'config': ts_config, 'registered_name': None}\n    inner_config = _get_class_or_fn_config(obj)\n    config_with_public_class = serialize_with_public_class(obj.__class__, inner_config)\n    if config_with_public_class is not None:\n        get_build_and_compile_config(obj, config_with_public_class)\n        record_object_after_serialization(obj, config_with_public_class)\n        return config_with_public_class\n    if isinstance(obj, types.FunctionType):\n        module = obj.__module__\n    else:\n        module = obj.__class__.__module__\n    class_name = obj.__class__.__name__\n    if module == 'builtins':\n        registered_name = None\n    elif isinstance(obj, types.FunctionType):\n        registered_name = object_registration.get_registered_name(obj)\n    else:\n        registered_name = object_registration.get_registered_name(obj.__class__)\n    config = {'module': module, 'class_name': class_name, 'config': inner_config, 'registered_name': registered_name}\n    get_build_and_compile_config(obj, config)\n    record_object_after_serialization(obj, config)\n    return config", "docstring": "Retrieve the config dict by serializing the Keras object.\n\n`serialize_keras_object()` serializes a Keras object to a python dictionary\nthat represents the object, and is a reciprocal function of\n`deserialize_keras_object()`. See `deserialize_keras_object()` for more\ninformation about the config format.\n\nArgs:\nobj: the Keras object to serialize.\n\nReturns:\nA python dict that represents the object. The python dict can be\ndeserialized via `deserialize_keras_object()`.", "source": "github-repos"}
{"code": "def main(raw_args=None):\n    if (raw_args is None):\n        raw_args = sys.argv[1:]\n    parser = build_parser()\n    args = parser.parse_args(raw_args)\n    if ((args.firmware_image is None) and (args.gdb is None)):\n        print('You must specify either a firmware image or attach a debugger with --gdb <PORT>')\n        return 1\n    test_args = ['qemu-system-gnuarmeclipse', '-verbose', '-verbose', '-board', 'STM32F0-Discovery', '-nographic', '-monitor', 'null', '-serial', 'null', '--semihosting-config', 'enable=on,target=native', '-d', 'unimp,guest_errors']\n    if args.firmware_image:\n        test_args += ['-image', args.firmware_image]\n    if args.gdb:\n        test_args += ['--gdb', ('tcp::%d' % args.gdb)]\n    proc = subprocess.Popen(test_args, stdout=sys.stdout, stderr=sys.stderr)\n    try:\n        proc.communicate()\n    except KeyboardInterrupt:\n        proc.terminate()\n    return 0", "docstring": "Run the iotile-emulate script.\n\nArgs:\nraw_args (list): Optional list of commmand line arguments.  If not\npassed these are pulled from sys.argv.", "source": "codesearchnet"}
{"code": "def _quadratic_sum_cost(self, state: _STATE) -> float:\n    cost = 0.0\n    total_len = float(len(self._c))\n    (seqs, _) = state\n    for seq in seqs:\n        cost += ((len(seq) / total_len) ** 2)\n    return (- cost)", "docstring": "Cost function that sums squares of lengths of sequences.\n\nArgs:\nstate: Search state, not mutated.\n\nReturns:\nCost which is minus the normalized quadratic sum of each linear\nsequence section in the state. This promotes single, long linear\nsequence solutions and converges to number -1. The solution with a\nlowest cost consists of every node being a single sequence and is\nalways less than 0.", "source": "codesearchnet"}
{"code": "def get_python_version():\n    ver = str(sys.version_info)\n    mmm = re.search('.*major=([\\\\d]), minor=([\\\\d]), micro=([\\\\d]+),.*', ver)\n    return mmm.group(1) + '.' + mmm.group(2) + '.' + mmm.group(3)", "docstring": "Retrieves default Python version.\n\nReturns:\nString that is the version of default Python.\ne.g. '2.7.4'", "source": "github-repos"}
{"code": "def inverse_removing(self, words_to_remove):\n    mask = np.ones(self.as_np.shape[0], dtype='bool')\n    mask[self.__get_idxs(words_to_remove)] = False\n    if (not self.bow):\n        return ''.join([(self.as_list[i] if mask[i] else 'UNKWORDZ') for i in range(mask.shape[0])])\n    return ''.join([self.as_list[v] for v in mask.nonzero()[0]])", "docstring": "Returns a string after removing the appropriate words.\n\nIf self.bow is false, replaces word with UNKWORDZ instead of removing\nit.\n\nArgs:\nwords_to_remove: list of ids (ints) to remove\n\nReturns:\noriginal raw string with appropriate words removed.", "source": "codesearchnet"}
{"code": "def top_rated(self, **kwargs):\n    path = self._get_path('top_rated')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the list of top rated movies. By default, this list will only\ninclude movies that have 10 or more votes. This list refreshes every\nday.\n\nArgs:\npage: (optional) Minimum value of 1.  Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def add_exac_info(genes, alias_genes, exac_lines):\n    \n    LOG.info(\"Add exac pli scores\")\n    for exac_gene in parse_exac_genes(exac_lines):\n        hgnc_symbol = exac_gene['hgnc_symbol'].upper()\n        pli_score = exac_gene['pli_score']\n        \n        for hgnc_id in get_correct_ids(hgnc_symbol, alias_genes):\n            genes[hgnc_id]['pli_score'] = pli_score", "docstring": "Add information from the exac genes\n\nCurrently we only add the pLi score on gene level\n\nThe exac resource only use HGNC symbol to identify genes so we need\nour alias mapping.\n\nArgs:\ngenes(dict): Dictionary with all genes\nalias_genes(dict): Genes mapped to all aliases\nensembl_lines(iteable): Iteable with raw ensembl info", "source": "juraj-google-style"}
{"code": "def data(self, index, role=Qt.DisplayRole):\n    if (not index.isValid()):\n        return None\n    col = index.column()\n    columnName = self._dataFrame.columns[index.row()]\n    columnDtype = self._dataFrame[columnName].dtype\n    if ((role == Qt.DisplayRole) or (role == Qt.EditRole)):\n        if (col == 0):\n            if (columnName == index.row()):\n                return index.row()\n            return columnName\n        elif (col == 1):\n            return SupportedDtypes.description(columnDtype)\n    elif (role == DTYPE_ROLE):\n        if (col == 1):\n            return columnDtype\n        else:\n            return None", "docstring": "Retrieve the data stored in the model at the given `index`.\n\nArgs:\nindex (QtCore.QModelIndex): The model index, which points at a\ndata object.\nrole (Qt.ItemDataRole, optional): Defaults to `Qt.DisplayRole`. You\nhave to use different roles to retrieve different data for an\n`index`. Accepted roles are `Qt.DisplayRole`, `Qt.EditRole` and\n`DTYPE_ROLE`.\n\nReturns:\nNone if an invalid index is given, the role is not accepted by the\nmodel or the column is greater than `1`.\nThe column name will be returned if the given column number equals `0`\nand the role is either `Qt.DisplayRole` or `Qt.EditRole`.\nThe datatype will be returned, if the column number equals `1`. The\n`Qt.DisplayRole` or `Qt.EditRole` return a human readable, translated\nstring, whereas the `DTYPE_ROLE` returns the raw data type.", "source": "codesearchnet"}
{"code": "def save(hdf5_filename, array):\n    \n    \n    hdf5_filename = os.path.expanduser(hdf5_filename)\n\n    try:\n        h = h5py.File(hdf5_filename, \"w\")\n        h.create_dataset('CUTOUT', data=array)\n        h.close()\n    except Exception as e:\n        raise ValueError(\"Could not save HDF5 file {0}.\".format(hdf5_filename))\n\n    return hdf5_filename", "docstring": "Export a numpy array to a HDF5 file.\n\nArguments:\nhdf5_filename (str): A filename to which to save the HDF5 data\narray (numpy.ndarray): The numpy array to save to HDF5\n\nReturns:\nString. The expanded filename that now holds the HDF5 data", "source": "juraj-google-style"}
{"code": "def exp(cls, x: 'TensorFluent') -> 'TensorFluent':\n    return cls._unary_op(x, tf.exp, tf.float32)", "docstring": "Returns a TensorFluent for the exp function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the exp function.", "source": "codesearchnet"}
{"code": "def on_run_start(self, request):\n    self._is_run_start = True\n    self._update_run_calls_state(request.run_call_count, request.fetches, request.feed_dict, is_callable_runner=request.is_callable_runner)\n    if self._active_tensor_filter:\n        return self._active_tensor_filter_run_start_response\n    self._exit_if_requested_by_user()\n    if self._run_call_count > 1 and (not self._skip_debug):\n        if self._run_through_times > 0:\n            return framework.OnRunStartResponse(framework.OnRunStartAction.NON_DEBUG_RUN, [])\n        elif self._run_through_times == 0:\n            return self._run_start_response or framework.OnRunStartResponse(framework.OnRunStartAction.DEBUG_RUN, self._get_run_debug_urls())\n    if self._run_start_response is None:\n        self._prep_cli_for_run_start()\n        self._run_start_response = self._launch_cli()\n        if self._active_tensor_filter:\n            self._active_tensor_filter_run_start_response = self._run_start_response\n        if self._run_through_times > 1:\n            self._run_through_times -= 1\n    self._exit_if_requested_by_user()\n    return self._run_start_response", "docstring": "Overrides on-run-start callback.\n\nArgs:\nrequest: An instance of `OnRunStartRequest`.\n\nReturns:\nAn instance of `OnRunStartResponse`.", "source": "github-repos"}
{"code": "def decrypt_block(self, cipherText):\n    \n    if not self.initialized:\n      raise TypeError(\"CamCrypt object has not been initialized\")\n    if len(cipherText) != BLOCK_SIZE:\n      raise ValueError(\"cipherText must be %d bytes long (received %d bytes)\" %\n                       (BLOCK_SIZE, len(cipherText)))\n    plain = ctypes.create_string_buffer(BLOCK_SIZE)\n    self.decblock(self.bitlen, cipherText, self.keytable, plain)\n    return plain.raw", "docstring": "Decrypt a 16-byte block of data.\n\nNOTE: This function was formerly called `decrypt`, but was changed when\nsupport for decrypting arbitrary-length strings was added.\n\nArgs:\ncipherText (str): 16-byte data.\n\nReturns:\n16-byte str.\n\nRaises:\nTypeError if CamCrypt object has not been initialized.\nValueError if `cipherText` is not BLOCK_SIZE (i.e. 16) bytes.", "source": "juraj-google-style"}
{"code": "def fstat(self, file_des):\n        \n        \n        file_object = self.filesystem.get_open_file(file_des).get_object()\n        return file_object.stat_result.copy()", "docstring": "Return the os.stat-like tuple for the FakeFile object of file_des.\n\nArgs:\nfile_des: The file descriptor of filesystem object to retrieve.\n\nReturns:\nThe FakeStatResult object corresponding to entry_path.\n\nRaises:\nOSError: if the filesystem object doesn't exist.", "source": "juraj-google-style"}
{"code": "def __init__(self, xcli, product_name, product_version):\n        \n        self.xcli = xcli\n        self.product_name = product_name\n        self.product_version = product_version\n        self.server_name = getfqdn()\n        self.platform = get_platform_details()\n        \n        if not self.product_name:\n            raise ValueError('product_name is empty')\n        if not self.product_version:\n            raise ValueError('product_version is empty')", "docstring": "init an EventsManager\nArgs:\nxcli (XCLIClient): xcli client to send the event\nproduct_name (string): the sending product's name\nproduct_version (string): the sending product's version\nRaises:\nValueError: if missing product_name or product_version", "source": "juraj-google-style"}
{"code": "def _expected_exercise_fn(design, calibration_indices, continuation_value, exercise_value):\n    mask = exercise_value > 0\n    design_t = tf.transpose(design, [0, 2, 1])\n    masked = tf.where(tf.expand_dims(tf.transpose(mask), axis=-1), design_t, tf.zeros_like(design_t))\n    if calibration_indices is None:\n        submask = masked\n        mask_cont_value = continuation_value\n    else:\n        submask = tf.gather(masked, calibration_indices, axis=1)\n        mask_cont_value = tf.gather(continuation_value, calibration_indices)\n    lhs = tf.matmul(submask, submask, transpose_a=True)\n    lhs_pinv = tf.linalg.pinv(lhs)\n    rhs = tf.matmul(submask, tf.expand_dims(tf.transpose(mask_cont_value), axis=-1), transpose_a=True)\n    beta = tf.matmul(lhs_pinv, rhs)\n    continuation = tf.matmul(design_t, beta)\n    return tf.nn.relu(tf.transpose(tf.squeeze(continuation, axis=-1)))", "docstring": "Returns the expected continuation value for each path.\n\nArgs:\ndesign: A real `Tensor` of shape `[batch_size, basis_size, num_samples]`.\ncalibration_indices: A rank 1 integer `Tensor` denoting indices of samples\nused for regression.\ncontinuation_value: A `Tensor` of shape `[num_samples, batch_size]` and of\nthe same dtype as `design`. The optimal value of the option conditional on\nnot exercising now or earlier, taking future information into account.\nexercise_value: A `Tensor` of the same shape and dtype as\n`continuation_value`. Value of the option if exercised immideately at\nthe current time\n\nReturns:\nA `Tensor` of the same shape and dtype as `continuation_value` whose\n`(n, v)`-th entry represents the expected continuation value of sample path\n`n` under the `v`-th payoff scheme.", "source": "github-repos"}
{"code": "def _ReadOperatingSystemArtifactValues(self, operating_system_values):\n    \n    if not operating_system_values:\n      raise errors.MalformedPresetError('Missing operating system values.')\n\n    family = operating_system_values.get('family', None)\n    product = operating_system_values.get('product', None)\n    version = operating_system_values.get('version', None)\n\n    if not family and not product:\n      raise errors.MalformedPresetError(\n          'Invalid operating system missing family and product.')\n\n    return artifacts.OperatingSystemArtifact(\n        family=family, product=product, version=version)", "docstring": "Reads an operating system artifact from a dictionary.\n\nArgs:\noperating_system_values (dict[str, object]): operating system values.\n\nReturns:\nOperatingSystemArtifact: an operating system artifact attribute container.\n\nRaises:\nMalformedPresetError: if the format of the operating system values are\nnot set or incorrect.", "source": "juraj-google-style"}
{"code": "def load_parent_implems(self, parent_implems):\n        \n        for trname, attr, implem in parent_implems.get_custom_implementations():\n            self.implementations[trname] = implem.copy()\n            self.transitions_at[trname] = attr\n            self.custom_implems.add(trname)", "docstring": "Import previously defined implementations.\n\nArgs:\nparent_implems (ImplementationList): List of implementations defined\nin a parent class.", "source": "juraj-google-style"}
{"code": "def _validate_at_hash(claims, access_token, algorithm):\n    if (('at_hash' not in claims) and (not access_token)):\n        return\n    elif (('at_hash' in claims) and (not access_token)):\n        msg = 'No access_token provided to compare against at_hash claim.'\n        raise JWTClaimsError(msg)\n    elif (access_token and ('at_hash' not in claims)):\n        msg = 'at_hash claim missing from token.'\n        raise JWTClaimsError(msg)\n    try:\n        expected_hash = calculate_at_hash(access_token, ALGORITHMS.HASHES[algorithm])\n    except (TypeError, ValueError):\n        msg = 'Unable to calculate at_hash to verify against token claims.'\n        raise JWTClaimsError(msg)\n    if (claims['at_hash'] != expected_hash):\n        raise JWTClaimsError('at_hash claim does not match access_token.')", "docstring": "Validates that the 'at_hash' parameter included in the claims matches\nwith the access_token returned alongside the id token as part of\nthe authorization_code flow.\n\nArgs:\nclaims (dict): The claims dictionary to validate.\naccess_token (str): The access token returned by the OpenID Provider.\nalgorithm (str): The algorithm used to sign the JWT, as specified by\nthe token headers.", "source": "codesearchnet"}
{"code": "def _load_credentials_file(credentials_file):\n    try:\n        credentials_file.seek(0)\n        data = json.load(credentials_file)\n    except Exception:\n        logger.warning('Credentials file could not be loaded, will ignore and overwrite.')\n        return {}\n    if (data.get('file_version') != 2):\n        logger.warning('Credentials file is not version 2, will ignore and overwrite.')\n        return {}\n    credentials = {}\n    for (key, encoded_credential) in iteritems(data.get('credentials', {})):\n        try:\n            credential_json = base64.b64decode(encoded_credential)\n            credential = client.Credentials.new_from_json(credential_json)\n            credentials[key] = credential\n        except:\n            logger.warning('Invalid credential {0} in file, ignoring.'.format(key))\n    return credentials", "docstring": "Load credentials from the given file handle.\n\nThe file is expected to be in this format:\n\n{\n\"file_version\": 2,\n\"credentials\": {\n\"key\": \"base64 encoded json representation of credentials.\"\n}\n}\n\nThis function will warn and return empty credentials instead of raising\nexceptions.\n\nArgs:\ncredentials_file: An open file handle.\n\nReturns:\nA dictionary mapping user-defined keys to an instance of\n:class:`oauth2client.client.Credentials`.", "source": "codesearchnet"}
{"code": "def _normalize_array(array, domain=(0, 1)):\n  \n  \n  array = np.array(array)\n  \n  array = np.squeeze(array)\n  assert len(array.shape) <= 3\n  assert np.issubdtype(array.dtype, np.number)\n  assert not np.isnan(array).any()\n\n  low, high = np.min(array), np.max(array)\n  if domain is None:\n    message = \"No domain specified, normalizing from measured (~%.2f, ~%.2f)\"\n    log.debug(message, low, high)\n    domain = (low, high)\n\n  \n  if low < domain[0] or high > domain[1]:\n    message = \"Clipping domain from (~{:.2f}, ~{:.2f}) to (~{:.2f}, ~{:.2f}).\"\n    log.info(message.format(low, high, domain[0], domain[1]))\n    array = array.clip(*domain)\n\n  min_value, max_value = np.iinfo(np.uint8).min, np.iinfo(np.uint8).max  \n  \n  if np.issubdtype(array.dtype, np.inexact):\n    offset = domain[0]\n    if offset != 0:\n      array -= offset\n      log.debug(\"Converting inexact array by subtracting -%.2f.\", offset)\n    scalar = max_value / (domain[1] - domain[0])\n    if scalar != 1:\n      array *= scalar\n      log.debug(\"Converting inexact array by scaling by %.2f.\", scalar)\n\n  return array.clip(min_value, max_value).astype(np.uint8)", "docstring": "Given an arbitrary rank-3 NumPy array, produce one representing an image.\n\nThis ensures the resulting array has a dtype of uint8 and a domain of 0-255.\n\nArgs:\narray: NumPy array representing the image\ndomain: expected range of values in array,\ndefaults to (0, 1), if explicitly set to None will use the array's\nown range of values and normalize them.\n\nReturns:\nnormalized PIL.Image", "source": "juraj-google-style"}
{"code": "def _sanitize_slices(slices, intended_shape, deficient_shape):\n    sanitized_slices = []\n    idx = 0\n    for slc in slices:\n        if slc is Ellipsis:\n            if idx < 0:\n                raise ValueError('Found multiple `...` in slices {}'.format(slices))\n            num_remaining_non_newaxis_slices = sum((s is not array_ops.newaxis for s in slices[slices.index(Ellipsis) + 1:]))\n            idx = -num_remaining_non_newaxis_slices\n        elif slc is array_ops.newaxis:\n            pass\n        else:\n            is_broadcast = intended_shape[idx] > deficient_shape[idx]\n            if isinstance(slc, slice):\n                start, stop, step = (slc.start, slc.stop, slc.step)\n                if start is not None:\n                    start = _prefer_static_where(is_broadcast, 0, start)\n                if stop is not None:\n                    stop = _prefer_static_where(is_broadcast, 1, stop)\n                if step is not None:\n                    step = _prefer_static_where(is_broadcast, 1, step)\n                slc = slice(start, stop, step)\n            else:\n                slc = _prefer_static_where(is_broadcast, 0, slc)\n            idx += 1\n        sanitized_slices.append(slc)\n    return sanitized_slices", "docstring": "Restricts slices to avoid overflowing size-1 (broadcast) dimensions.\n\nArgs:\nslices: iterable of slices received by `__getitem__`.\nintended_shape: int `Tensor` shape for which the slices were intended.\ndeficient_shape: int `Tensor` shape to which the slices will be applied.\nMust have the same rank as `intended_shape`.\nReturns:\nsanitized_slices: Python `list` of slice objects.", "source": "github-repos"}
{"code": "def enter(self, layer, inputs, build_graph, training, saving=None):\n    state = {'layer': layer, 'inputs': inputs, 'build_graph': build_graph, 'training': training, 'saving': saving}\n    return CallContextManager(self, state)", "docstring": "Push a Layer and its inputs and state onto the current call context.\n\nArgs:\nlayer: The `Layer` whose `call` is currently active.\ninputs: The inputs to the currently active `Layer`.\nbuild_graph: Whether currently inside a Graph or FuncGraph.\ntraining: Whether currently executing in training or inference mode.\nsaving: Whether currently saving to SavedModel.\n\nReturns:\nContext manager.", "source": "github-repos"}
{"code": "def search_groups(self, group):\n        \n        group_url = \"%s/%s/%s\" % (self.url, \"group\", group)\n        response = self.jss.get(group_url)\n        return LDAPGroupsResults(self.jss, response)", "docstring": "Search for LDAP groups.\n\nArgs:\ngroup: Group to search for. It is not entirely clear how the\nJSS determines the results- are regexes allowed, or\nglobbing?\n\nReturns:\nLDAPGroupsResult object.\n\nRaises:\nJSSGetError if no results are found.", "source": "juraj-google-style"}
{"code": "def convert_padding(padding, expected_length=4):\n    explicit_paddings = []\n    if padding == 'EXPLICIT':\n        raise ValueError(\"'EXPLICIT' is not a valid value for `padding`. To use explicit padding, `padding` must be a list.\")\n    if isinstance(padding, (list, tuple)):\n        for i, dim_paddings in enumerate(padding):\n            if not isinstance(dim_paddings, (list, tuple)):\n                raise ValueError(f'When `padding` is a list, each element of `padding` must be a list/tuple of size 2. Received: padding={padding} with element at index {i} of type {type(dim_paddings)}')\n            if len(dim_paddings) != 2:\n                raise ValueError(f'When `padding` is a list, each element of `padding` must be a list/tuple of size 2. Received: padding={padding} with element at index {i} of size {len(dim_paddings)}')\n            explicit_paddings.extend(dim_paddings)\n        if len(padding) != expected_length:\n            raise ValueError(f'When padding is a list, it must be of size {expected_length}. Received: padding={padding} of size {len(padding)}')\n        padding = 'EXPLICIT'\n    return (padding, explicit_paddings)", "docstring": "Converts Python padding to C++ padding for ops which take EXPLICIT padding.\n\nArgs:\npadding: the `padding` argument for a Python op which supports EXPLICIT\npadding.\nexpected_length: Expected number of entries in the padding list when\nexplicit padding is used.\n\nReturns:\n(padding, explicit_paddings) pair, which should be passed as attributes to a\nC++ op.\n\nRaises:\nValueError: If padding is invalid.", "source": "github-repos"}
{"code": "def extract(self, text: str) -> List[Extraction]:\n    doc = self._tokenizer.tokenize_to_spacy_doc(text)\n    self._load_matcher()\n    matches = [x for x in self._matcher(doc) if (x[1] != x[2])]\n    pos_filtered_matches = []\n    neg_filtered_matches = []\n    for (idx, start, end) in matches:\n        span_doc = self._tokenizer.tokenize_to_spacy_doc(doc[start:end].text)\n        this_spacy_rule = self._matcher.get(idx)\n        relations = self._find_relation(span_doc, this_spacy_rule)\n        (rule_id, _) = self._hash_map[idx]\n        this_rule = self._rule_lst[rule_id]\n        if self._filter_match(doc[start:end], relations, this_rule.patterns):\n            value = self._form_output(doc[start:end], this_rule.output_format, relations, this_rule.patterns)\n            if this_rule.polarity:\n                pos_filtered_matches.append((start, end, value, rule_id, relations))\n            else:\n                neg_filtered_matches.append((start, end, value, rule_id, relations))\n    return_lst = []\n    if pos_filtered_matches:\n        longest_lst_pos = self._get_longest(pos_filtered_matches)\n        if neg_filtered_matches:\n            longest_lst_neg = self._get_longest(neg_filtered_matches)\n            return_lst = self._reject_neg(longest_lst_pos, longest_lst_neg)\n        else:\n            return_lst = longest_lst_pos\n    extractions = []\n    for (start, end, value, rule_id, relation) in return_lst:\n        this_extraction = Extraction(value=value, extractor_name=self.name, start_token=start, end_token=end, start_char=doc[start].idx, end_char=(doc[(end - 1)].idx + len(doc[(end - 1)])), rule_id=rule_id.split('rule_id\n        extractions.append(this_extraction)\n    return extractions", "docstring": "Extract from text\n\nArgs:\ntext (str): input str to be extracted.\n\nReturns:\nList[Extraction]: the list of extraction or the empty list if there are no matches.", "source": "codesearchnet"}
{"code": "def bearing(self, format='numeric'):\n    bearings = []\n    for segment in self:\n        if (len(segment) < 2):\n            bearings.append([])\n        else:\n            bearings.append(segment.bearing(format))\n    return bearings", "docstring": "Calculate bearing between locations in segments.\n\nArgs:\nformat (str): Format of the bearing string to return\n\nReturns:\nlist of list of float: Groups of bearings between points in\nsegments", "source": "codesearchnet"}
{"code": "def GetEventTagByIdentifier(self, storage_file, event_identifier):\n    if (not self._index):\n        self._Build(storage_file)\n    lookup_key = event_identifier.CopyToString()\n    event_tag_identifier = self._index.get(lookup_key, None)\n    if (not event_tag_identifier):\n        return None\n    return storage_file.GetEventTagByIdentifier(event_tag_identifier)", "docstring": "Retrieves the most recently updated event tag for an event.\n\nArgs:\nstorage_file (BaseStorageFile): storage file.\nevent_identifier (AttributeContainerIdentifier): event attribute\ncontainer identifier.\n\nReturns:\nEventTag: event tag or None if the event has no event tag.", "source": "codesearchnet"}
{"code": "def dialog_open(self, *, dialog: dict, trigger_id: str, **kwargs) -> SlackResponse:\n    kwargs.update({'dialog': dialog, 'trigger_id': trigger_id})\n    return self.api_call('dialog.open', json=kwargs)", "docstring": "Open a dialog with a user.\n\nArgs:\ndialog (dict): A dictionary of dialog arguments.\n{\n\"callback_id\": \"46eh782b0\",\n\"title\": \"Request something\",\n\"submit_label\": \"Request\",\n\"state\": \"Max\",\n\"elements\": [\n{\n\"type\": \"text\",\n\"label\": \"Origin\",\n\"name\": \"loc_origin\"\n},\n{\n\"type\": \"text\",\n\"label\": \"Destination\",\n\"name\": \"loc_destination\"\n}\n]\n}\ntrigger_id (str): The trigger id of a recent message interaction.\ne.g. '12345.98765.abcd2358fdea'", "source": "codesearchnet"}
{"code": "def _get_executor_init(self, workers):\n    raise NotImplementedError", "docstring": "Gets the Pool initializer for multiprocessing.\n\nArgs:\nworkers: Number of workers.\n\nReturns:\nFunction, a Function to initialize the pool", "source": "github-repos"}
{"code": "def get_type_name_in_language(cls, type_name, sub_type, language):\n        \n        if language in cls.type_methods_cache:\n            m = cls.type_methods_cache[language]\n            if not m:\n                return type_name\n            return m(type_name)\n\n        found, method = load_language_plugins(language, 'get_type_name')\n        if found:\n            cls.type_methods_cache[language] = method\n            if method:\n                return method(type_name, sub_type)\n            else:\n                return type_name\n\n        module = importlib.import_module('.lang.%s' % language, package=\"monolithe.generators\")\n\n        if not hasattr(module, 'get_type_name'):\n            cls.type_methods_cache[language] = None\n            return type_name\n\n        method = getattr(module, 'get_type_name')\n        cls.type_methods_cache[language] = method\n        return method(type_name, sub_type)", "docstring": "Get the type for the given language\n\nArgs:\ntype_name (str): the type to convert\nlanguage (str): the language to use\n\nReturns:\na type name in the given language\n\nExample:\nget_type_name_in_language(\"Varchar\", \"python\")\n>>> str", "source": "juraj-google-style"}
{"code": "def sort_edge(edges):\n    return sorted(edges, key=(lambda x: (x.L, x.R)))", "docstring": "Sort iterable of edges first by left node indices then right.\n\nArgs:\nedges(list[Edge]): List of edges to be sorted.\n\nReturns:\nlist[Edge]: Sorted list by left and right node indices.", "source": "codesearchnet"}
{"code": "def laid_out_pcoord(self, mesh_axis):\n    \n    divisor = list_product(self.shape.to_integer_list[mesh_axis + 1:])\n    modulus = self.shape[mesh_axis].size\n    def my_fn(pnum):\n      return (pnum \n    return self.slicewise(my_fn, self.laid_out_pnum())", "docstring": "Returns a LaidOutTensor containing the processor coordinate.\n\nArgs:\nmesh_axis: int.\n\nReturns:\nLaidOutTensor where each slice is an integer scalar.", "source": "juraj-google-style"}
{"code": "def fetch(self, invoice_id, data={}, **kwargs):\n        \n        return super(Invoice, self).fetch(invoice_id, data, **kwargs)", "docstring": "Fetch Invoice for given Id\n\nArgs:\ninvoice_id : Id for which invoice object has to be retrieved\n\nReturns:\nInvoice dict for given invoice Id", "source": "juraj-google-style"}
{"code": "def autocorrelation(ts, normalized=False, unbiased=False):\n    ts = np.squeeze(ts)\n    if (ts.ndim <= 1):\n        if normalized:\n            ts = ((ts - ts.mean()) / ts.std())\n        N = ts.shape[0]\n        ar = np.asarray(ts)\n        acf = np.correlate(ar, ar, mode='full')\n        outlen = ((acf.shape[0] + 1) / 2)\n        acf = acf[(outlen - 1):]\n        if unbiased:\n            factor = np.array([(1.0 / (N - m)) for m in range(0, outlen)])\n            acf = (acf * factor)\n        dt = ((ts.tspan[(- 1)] - ts.tspan[0]) / (len(ts) - 1.0))\n        lags = (np.arange(outlen) * dt)\n        return Timeseries(acf, tspan=lags, labels=ts.labels)\n    else:\n        lastaxis = (ts.ndim - 1)\n        m = ts.shape[lastaxis]\n        acfs = [ts[(..., i)].autocorrelation(normalized, unbiased)[(..., np.newaxis)] for i in range(m)]\n        res = distob.concatenate(acfs, axis=lastaxis)\n        res.labels[lastaxis] = ts.labels[lastaxis]\n        return res", "docstring": "Returns the discrete, linear convolution of a time series with itself,\noptionally using unbiased normalization.\n\nN.B. Autocorrelation estimates are necessarily inaccurate for longer lags,\nas there are less pairs of points to convolve separated by that lag.\nTherefore best to throw out the results except for shorter lags, e.g.\nkeep lags from tau=0 up to one quarter of the total time series length.\n\nArgs:\nnormalized (boolean): If True, the time series will first be normalized\nto a mean of 0 and variance of 1. This gives autocorrelation 1 at\nzero lag.\n\nunbiased (boolean): If True, the result at each lag m will be scaled by\n1/(N-m). This gives an unbiased estimation of the autocorrelation of a\nstationary process from a finite length sample.\n\nRef: S. J. Orfanidis (1996) \"Optimum Signal Processing\", 2nd Ed.", "source": "codesearchnet"}
{"code": "def switch_to_line_in(self, source=None):\n    if source:\n        uid = source.uid\n    else:\n        uid = self.uid\n    self.avTransport.SetAVTransportURI([('InstanceID', 0), ('CurrentURI', 'x-rincon-stream:{0}'.format(uid)), ('CurrentURIMetaData', '')])", "docstring": "Switch the speaker's input to line-in.\n\nArgs:\nsource (SoCo): The speaker whose line-in should be played.\nDefault is line-in from the speaker itself.", "source": "codesearchnet"}
{"code": "def topics(self):\n    cluster = self._client.cluster\n    if (self._client._metadata_refresh_in_progress and self._client._topics):\n        future = cluster.request_update()\n        self._client.poll(future=future)\n    stash = cluster.need_all_topic_metadata\n    cluster.need_all_topic_metadata = True\n    future = cluster.request_update()\n    self._client.poll(future=future)\n    cluster.need_all_topic_metadata = stash\n    return cluster.topics()", "docstring": "Get all topics the user is authorized to view.\n\nReturns:\nset: topics", "source": "codesearchnet"}
{"code": "def deepnn(x):\n    with tf.name_scope('reshape'):\n        x_image = tf.reshape(x, [(- 1), 28, 28, 1])\n    with tf.name_scope('conv1'):\n        W_conv1 = weight_variable([5, 5, 1, 32])\n        b_conv1 = bias_variable([32])\n        h_conv1 = tf.nn.relu((conv2d(x_image, W_conv1) + b_conv1))\n    with tf.name_scope('pool1'):\n        h_pool1 = max_pool_2x2(h_conv1)\n    with tf.name_scope('conv2'):\n        W_conv2 = weight_variable([5, 5, 32, 64])\n        b_conv2 = bias_variable([64])\n        h_conv2 = tf.nn.relu((conv2d(h_pool1, W_conv2) + b_conv2))\n    with tf.name_scope('pool2'):\n        h_pool2 = max_pool_2x2(h_conv2)\n    with tf.name_scope('fc1'):\n        W_fc1 = weight_variable([((7 * 7) * 64), 1024])\n        b_fc1 = bias_variable([1024])\n        h_pool2_flat = tf.reshape(h_pool2, [(- 1), ((7 * 7) * 64)])\n        h_fc1 = tf.nn.relu((tf.matmul(h_pool2_flat, W_fc1) + b_fc1))\n    with tf.name_scope('dropout'):\n        keep_prob = tf.placeholder(tf.float32)\n        h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n    with tf.name_scope('fc2'):\n        W_fc2 = weight_variable([1024, 10])\n        b_fc2 = bias_variable([10])\n        y_conv = (tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n    return (y_conv, keep_prob)", "docstring": "deepnn builds the graph for a deep net for classifying digits.\n\nArgs:\nx: an input tensor with the dimensions (N_examples, 784), where 784 is\nthe number of pixels in a standard MNIST image.\n\nReturns:\nA tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with\nvalues equal to the logits of classifying the digit into one of 10\nclasses (the digits 0-9). keep_prob is a scalar placeholder for the\nprobability of dropout.", "source": "codesearchnet"}
{"code": "def _get_countdown_for_next_slice(self, spec):\n    countdown = 0\n    if (self._processing_limit(spec) != (- 1)):\n        countdown = max(int((parameters.config._SLICE_DURATION_SEC - (self._time() - self._start_time))), 0)\n    return countdown", "docstring": "Get countdown for next slice's task.\n\nWhen user sets processing rate, we set countdown to delay task execution.\n\nArgs:\nspec: model.MapreduceSpec\n\nReturns:\ncountdown in int.", "source": "codesearchnet"}
{"code": "def find_many(self, url, type, resource):\n        \n        return [type(item) for item in RestClient.get(url)[resource]]", "docstring": "Get a list of resources\n\nArgs:\nurl (string): URL to invoke\ntype (class): Class type\nresource (string): The REST Resource\nReturns:\nlist of object: List of resource instances", "source": "juraj-google-style"}
{"code": "def job_history(backend):\n    year = widgets.Output(layout=widgets.Layout(display='flex-inline', align_items='center', min_height='400px'))\n    month = widgets.Output(layout=widgets.Layout(display='flex-inline', align_items='center', min_height='400px'))\n    week = widgets.Output(layout=widgets.Layout(display='flex-inline', align_items='center', min_height='400px'))\n    tabs = widgets.Tab(layout=widgets.Layout(max_height='620px'))\n    tabs.children = [year, month, week]\n    tabs.set_title(0, 'Year')\n    tabs.set_title(1, 'Month')\n    tabs.set_title(2, 'Week')\n    tabs.selected_index = 1\n    _build_job_history(tabs, backend)\n    return tabs", "docstring": "Widget for displaying job history\n\nArgs:\nbackend (IBMQbackend): The backend.\n\nReturns:\nTab: A tab widget for history images.", "source": "codesearchnet"}
{"code": "def get_msms_df(model, pdb_id, outfile=None, outdir=None, outext='_msms.df', force_rerun=False):\n    \n    \n    \n\n    \n    outfile = ssbio.utils.outfile_maker(inname=pdb_id, outname=outfile, outdir=outdir, outext=outext)\n\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n        \n        try:\n            rd = PDB.ResidueDepth(model)\n        except AssertionError:\n            log.error('{}: unable to run MSMS'.format(pdb_id))\n            return pd.DataFrame()\n\n        \n        appender = []\n        for k in rd.property_keys:\n            x = rd.property_dict[k]\n            chain = k[0]\n            residue = k[1]\n            het = residue[0]\n            resnum = residue[1]\n            icode = residue[2]\n            resdepth = x[0]\n            cadepth = x[1]\n            appender.append((chain, resnum, icode, resdepth, cadepth))\n\n        df = pd.DataFrame.from_records(appender, columns=['chain', 'resnum', 'icode', 'res_depth', 'ca_depth'])\n        df.to_csv(outfile)\n    else:\n        log.debug('{}: already ran MSMS and force_rerun={}, loading results'.format(outfile, force_rerun))\n        df = pd.read_csv(outfile, index_col=0)\n\n    return df", "docstring": "Run MSMS (using Biopython) on a Biopython Structure Model.\n\nDepths are in units Angstroms. 1A = 10^-10 m = 1nm. Returns a dictionary of::\n\n{\nchain_id:{\nresnum1_id: (res_depth, ca_depth),\nresnum2_id: (res_depth, ca_depth)\n}\n}\n\nArgs:\nmodel: Biopython Structure Model\n\nReturns:\nPandas DataFrame: ResidueDepth property_dict, reformatted", "source": "juraj-google-style"}
{"code": "def run(self, module, post_check):\n        \n        try:\n            \n            \n            \n            _cwd = os.getcwd()\n            _sys_path = list(sys.path)\n            _sys_argv = list(sys.argv)\n            sys.path.insert(0, os.path.dirname(self._path))\n            sys.argv = [os.path.basename(self._path)] + self._argv\n\n            exec(self._code, module.__dict__)\n            post_check()\n\n        except Exception as e:\n            self._failed = True\n            self._error_detail = traceback.format_exc()\n\n            _exc_type, _exc_value, exc_traceback = sys.exc_info()\n            filename, line_number, func, txt = traceback.extract_tb(exc_traceback)[-1]\n\n            self._error = \"%s\\nFile \\\"%s\\\", line %d, in %s:\\n%s\" % (str(e), os.path.basename(filename), line_number, func, txt)\n\n        finally:\n            \n            os.chdir(_cwd)\n            sys.path = _sys_path\n            sys.argv = _sys_argv\n            self.ran = True", "docstring": "Execute the configured source code in a module and run any post\nchecks.\n\nArgs:\nmodule (Module) : a module to execute the configured code in.\n\npost_check(callable) : a function that can raise an exception\nif expected post-conditions are not met after code execution.", "source": "juraj-google-style"}
{"code": "def _ScanVolume(self, scan_context, scan_node, base_path_specs):\n    if ((not scan_node) or (not scan_node.path_spec)):\n        raise errors.ScannerError('Invalid or missing scan node.')\n    if scan_context.IsLockedScanNode(scan_node.path_spec):\n        self._ScanEncryptedVolume(scan_context, scan_node)\n        if scan_context.IsLockedScanNode(scan_node.path_spec):\n            return\n    if scan_node.IsVolumeSystemRoot():\n        self._ScanVolumeSystemRoot(scan_context, scan_node, base_path_specs)\n    elif scan_node.IsFileSystem():\n        self._ScanFileSystem(scan_node, base_path_specs)\n    elif (scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW):\n        path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_TSK, location='/', parent=scan_node.path_spec)\n        base_path_specs.append(path_spec)\n    else:\n        for sub_scan_node in scan_node.sub_nodes:\n            self._ScanVolume(scan_context, sub_scan_node, base_path_specs)", "docstring": "Scans a volume scan node for volume and file systems.\n\nArgs:\nscan_context (SourceScannerContext): source scanner context.\nscan_node (SourceScanNode): volume scan node.\nbase_path_specs (list[PathSpec]): file system base path specifications.\n\nRaises:\nScannerError: if the format of or within the source\nis not supported or the scan node is invalid.", "source": "codesearchnet"}
{"code": "def insert(self, **fields):\n    if (self.conflict_target or self.conflict_action):\n        compiler = self._build_insert_compiler([fields])\n        rows = compiler.execute_sql(return_id=True)\n        pk_field_name = self.model._meta.pk.name\n        return rows[0][pk_field_name]\n    return super().create(**fields).pk", "docstring": "Creates a new record in the database.\n\nThis allows specifying custom conflict behavior using .on_conflict().\nIf no special behavior was specified, this uses the normal Django create(..)\n\nArguments:\nfields:\nThe fields of the row to create.\n\nReturns:\nThe primary key of the record that was created.", "source": "codesearchnet"}
{"code": "def _request(self, method, url, body):\n    if ((method != 'POST') and (method != 'PUT')):\n        body = None\n    s = Session()\n    LOGGER.debug('Method: {0}, Url: {1}, Body: {2}.'.format(method, url, body))\n    req = Request(method, url, json=body)\n    prepped = s.prepare_request(req)\n    res = s.send(prepped, timeout=(self._timeout or None))\n    res.raise_for_status()\n    return res.json()", "docstring": "Internal method to send request to the remote server.\n\nArgs:\nmethod(str): HTTP Method(GET/POST/PUT/DELET/HEAD).\nurl(str): The request url.\nbody(dict): The JSON object to be sent.\n\nReturns:\nA dict represent the json body from server response.\n\nRaises:\nConnectionError: Meet network problem (e.g. DNS failure,\nrefused connection, etc).\nTimeout: A request times out.\nHTTPError: HTTP request returned an unsuccessful status code.", "source": "codesearchnet"}
{"code": "def normalize_attr_values(a: Any) -> np.ndarray:\n\t\n\tscalar = False\n\tif np.isscalar(a):\n\t\ta = np.array([a])\n\t\tscalar = True\n\tarr = normalize_attr_array(a)\n\tif np.issubdtype(arr.dtype, np.integer) or np.issubdtype(arr.dtype, np.floating):\n\t\tpass  \n\telif np.issubdtype(arr.dtype, np.character) or np.issubdtype(arr.dtype, np.object_):\n\t\tarr = normalize_attr_strings(arr)\n\telif np.issubdtype(arr.dtype, np.bool_):\n\t\tarr = arr.astype('ubyte')\n\tif scalar:\n\t\treturn arr[0]\n\telse:\n\t\treturn arr", "docstring": "Take all kinds of input values and validate/normalize them.\n\nArgs:\na\tList, tuple, np.matrix, np.ndarray or sparse matrix\nElements can be strings, numbers or bools\n\nReturns\na_normalized    An np.ndarray with elements conforming to one of the valid Loom attribute types\n\nRemarks:\nThis method should be used to prepare the values to be stored in the HDF5 file. You should not\nreturn the values to the caller; for that, use materialize_attr_values()", "source": "juraj-google-style"}
{"code": "def bartlett(x):\n    if any_symbolic_tensors((x,)):\n        return Bartlett().symbolic_call(x)\n    return backend.numpy.bartlett(x)", "docstring": "Bartlett window function.\nThe Bartlett window is a triangular window that rises then falls linearly.\n\nArgs:\nx: Scalar or 1D Tensor. Window length.\n\nReturns:\nA 1D tensor containing the Bartlett window values.\n\nExample:\n>>> x = keras.ops.convert_to_tensor(5)\n>>> keras.ops.bartlett(x)\narray([0. , 0.5, 1. , 0.5, 0. ], dtype=float32)", "source": "github-repos"}
{"code": "def _make_projcet_list(path):\n    \n    from collections import OrderedDict\n    from matplotlib.colors import LinearSegmentedColormap\n    from matplotlib.colors import rgb2hex as r2h\n    from numpy import linspace\n    \n    proj = []\n    projects = OrderedDict()\n    file_list = os.listdir(path)\n    for files in file_list:\n        if files.split(\".\")[0] not in proj and 'json' in files and \"\n            proj.append(files.split(\".\")[0])\n\n    \n    colors = _get_colors(len(proj))\n    p_c = 0\n\n    for p in proj:\n        tasks = OrderedDict()\n        temp = [x.split(\".\")[1] for x in file_list if p in x and \"\n        cmspace = linspace(0.95, 0.25, len(temp))\n        cm = LinearSegmentedColormap.from_list(\"acorn.{}\".format(p),\n                                               ['\n                                               N=max((len(temp), 25)))\n        hues = [r2h(cm(cmi)) for cmi in cmspace]\n        h_c = 0\n        for t in temp:\n            tasks[t] = [hues[h_c],p+\".\"+t+\".json\"]\n            h_c += 1\n        tasks[\"hex_color\"] = colors[p_c]\n        projects[p] = tasks\n        p_c += 1\n        \n    return projects", "docstring": "Returns a dictionaries in which each project is a key and the\ntasks are stored as a list within that dictionaly element.\n\nArgs:\npath (str): The path to the folder containing the *.json files.\n\nReturns:\nprojects (list of dict): A dictionary in which each project is a key\ncontaining a list of it's tasks.", "source": "juraj-google-style"}
{"code": "def _preprocess_movie_lens(ratings_df):\n  \n  ratings_df[\"data\"] = 1.0\n  num_timestamps = ratings_df[[\"userId\", \"timestamp\"]].groupby(\n      \"userId\").nunique()\n  last_user_timestamp = ratings_df[[\"userId\", \"timestamp\"]].groupby(\n      \"userId\").max()\n\n  ratings_df[\"numberOfTimestamps\"] = ratings_df[\"userId\"].apply(\n      lambda x: num_timestamps[\"timestamp\"][x])\n  ratings_df[\"lastTimestamp\"] = ratings_df[\"userId\"].apply(\n      lambda x: last_user_timestamp[\"timestamp\"][x])\n\n  ratings_df = ratings_df[ratings_df[\"numberOfTimestamps\"] > 2]\n\n  ratings_df = _create_row_col_indices(ratings_df)\n\n  train_ratings_df = ratings_df[\n      ratings_df[\"timestamp\"] < ratings_df[\"lastTimestamp\"]]\n  test_ratings_df = ratings_df[\n      ratings_df[\"timestamp\"] == ratings_df[\"lastTimestamp\"]]\n\n  return ratings_df, train_ratings_df, test_ratings_df", "docstring": "Separate the rating datafram into train and test sets.\n\nFilters out users with less than two distinct timestamps. Creates train set\nand test set. The test set contains all the last interactions of users with\nmore than two distinct timestamps.\n\nArgs:\nratings_df: pandas dataframe with columns 'userId', 'movieId', 'rating',\n'timestamp'.\n\nReturns:\ntuple of dataframes (filtered_ratings, train_ratings, test_ratings).", "source": "juraj-google-style"}
{"code": "def write_fixed_str(self, value, length):\n        \n        towrite = value.encode('utf-8')\n        slen = len(towrite)\n        if slen > length:\n            raise SDKException(ErrorCode.param_err('string longer than fixed length: %s' % length))\n        self.write_bytes(towrite)\n        diff = length - slen\n\n        while diff > 0:\n            self.write_byte(0)\n            diff -= 1", "docstring": "Write a string value to the stream.\n\nArgs:\nvalue (str): value to write to the stream.\nlength (int): length of the string to write.", "source": "juraj-google-style"}
{"code": "def fetch_layout(self, dtensor: Any) -> layout_lib.Layout:\n    if not context.executing_eagerly():\n        raise RuntimeError('`fetch_layout` must be called eagerly.')\n    if _pywrap_utils.IsVariable(dtensor):\n        dtensor = dtensor.read_value()\n    try:\n        layout_string = _pywrap_dtensor_device.FetchLayout(context.context()._handle, dtensor, self._device_info)\n    except core._NotOkStatusException as e:\n        raise core._status_to_exception(e) from None\n    if layout_string is None:\n        return None\n    return layout_lib.Layout.from_string(layout_string)", "docstring": "Fetches the layout of the DTensor.\n\nArgs:\ndtensor: The DTensor whose layout is to be fetched.\n\nReturns:\nThe `Layout` of this DTensor.\n\nRaises:\nRuntimeError: When not called eagerly.", "source": "github-repos"}
{"code": "def symmetric_linear_quantization_params(num_bits, saturation_min, saturation_max, per_channel=False):\n    with torch.no_grad():\n        n = 2 ** (num_bits - 1) - 1\n        if per_channel:\n            scale, _ = torch.max(torch.stack([saturation_min.abs(), saturation_max.abs()], dim=1), dim=1)\n            scale = torch.clamp(scale, min=1e-08) / n\n        else:\n            scale = max(saturation_min.abs(), saturation_max.abs())\n            scale = torch.clamp(scale, min=1e-08) / n\n    return scale", "docstring": "Compute the scaling factor with the given quantization range for symmetric quantization.\n\nArgs:\nsaturation_min (`torch.Tensor`):\nLower bound for quantization range.\nsaturation_max (`torch.Tensor`):\nUpper bound for quantization range.\nper_channel (`bool`, *optional*, defaults to `False`):\nWhether to or not use channel-wise quantization.\n\nReturns:\n`torch.Tensor`: Scaling factor that linearly quantizes the given range between *saturation_min* and\n*saturation_max*.", "source": "github-repos"}
{"code": "def generate(store, report_format, path):\n    \n    success = False\n    if report_format in ['html']:\n        rendered_content = {\n            'html': generate_html\n        }[report_format](store)\n\n        if not os.path.isdir(path):\n            os.makedirs(path)\n\n        if rendered_content is not None:\n            \n            with open(os.path.join(path, 'pipeline.' + report_format), 'w') as handle:\n                handle.write(rendered_content)\n            success = True\n    else:\n        Logger.get_logger(__name__).error(\"Unknown report format %s\", report_format)\n    return success", "docstring": "Generate file in defined format representing the report of pipeline(s).\n\nArgs:\nstore (Store): report data.\nreport_format (str): currently \"html\" is supported only.\npath (str): path where to write the report to. Missing sub folders will be created.", "source": "juraj-google-style"}
{"code": "class PatchTSTPatchify(nn.Module):\n\n    def __init__(self, config: PatchTSTConfig):\n        super().__init__()\n        self.sequence_length = config.context_length\n        self.patch_length = config.patch_length\n        self.patch_stride = config.patch_stride\n        if self.sequence_length <= self.patch_length:\n            raise ValueError(f'Sequence length ({self.sequence_length}) has to be greater than the patch length ({self.patch_length})')\n        self.num_patches = (max(self.sequence_length, self.patch_length) - self.patch_length) \n        new_sequence_length = self.patch_length + self.patch_stride * (self.num_patches - 1)\n        self.sequence_start = self.sequence_length - new_sequence_length\n\n    def forward(self, past_values: torch.Tensor):\n        \n        sequence_length = past_values.shape[-2]\n        if sequence_length != self.sequence_length:\n            raise ValueError(f\"Input sequence length ({sequence_length}) doesn't match model configuration ({self.sequence_length}).\")\n        output = past_values[:, self.sequence_start:, :]\n        output = output.unfold(dimension=-2, size=self.patch_length, step=self.patch_stride)\n        output = output.transpose(-2, -3).contiguous()\n        return output", "docstring": "A class to patchify the time series sequence into different patches\n\nReturns:\n`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`", "source": "github-repos"}
{"code": "def ContainsKey(self, public_key):\n        \n        return self.ContainsKeyHash(Crypto.ToScriptHash(public_key.encode_point(True), unhex=True))", "docstring": "Test if the wallet contains the supplied public key.\n\nArgs:\npublic_key (edcsa.Curve.point): a public key to test for its existance. e.g. KeyPair.PublicKey\n\nReturns:\nbool: True if exists, False otherwise.", "source": "juraj-google-style"}
{"code": "def load_image(image: Union[str, 'PIL.Image.Image'], timeout: Optional[float]=None) -> 'PIL.Image.Image':\n    requires_backends(load_image, ['vision'])\n    if isinstance(image, str):\n        if image.startswith('http:\n            image = PIL.Image.open(BytesIO(requests.get(image, timeout=timeout).content))\n        elif os.path.isfile(image):\n            image = PIL.Image.open(image)\n        else:\n            if image.startswith('data:image/'):\n                image = image.split(',')[1]\n            try:\n                b64 = base64.decodebytes(image.encode())\n                image = PIL.Image.open(BytesIO(b64))\n            except Exception as e:\n                raise ValueError(f'Incorrect image source. Must be a valid URL starting with `http:\n    elif isinstance(image, PIL.Image.Image):\n        image = image\n    else:\n        raise TypeError('Incorrect format used for image. Should be an url linking to an image, a base64 string, a local path, or a PIL image.')\n    image = PIL.ImageOps.exif_transpose(image)\n    image = image.convert('RGB')\n    return image", "docstring": "Loads `image` to a PIL Image.\n\nArgs:\nimage (`str` or `PIL.Image.Image`):\nThe image to convert to the PIL Image format.\ntimeout (`float`, *optional*):\nThe timeout value in seconds for the URL request.\n\nReturns:\n`PIL.Image.Image`: A PIL Image.", "source": "github-repos"}
{"code": "def summary_computed(self, sess, summary, global_step=None):\n    if not self._summary_writer:\n        raise RuntimeError('Writing a summary requires a summary writer.')\n    if global_step is None and self.global_step is not None:\n        global_step = training_util.global_step(sess, self.global_step)\n    self._summary_writer.add_summary(summary, global_step)", "docstring": "Indicate that a summary was computed.\n\nArgs:\nsess: A `Session` object.\nsummary: A Summary proto, or a string holding a serialized summary proto.\nglobal_step: Int. global step this summary is associated with. If `None`,\nit will try to fetch the current step.\n\nRaises:\nTypeError: if 'summary' is not a Summary proto or a string.\nRuntimeError: if the Supervisor was created without a `logdir`.", "source": "github-repos"}
{"code": "def create_position_ids_from_input_ids(input_ids, padding_idx):\n    mask = (input_ids != padding_idx).astype('i4')\n    if mask.ndim > 2:\n        mask = mask.reshape((-1, mask.shape[-1]))\n        incremental_indices = jnp.cumsum(mask, axis=1).astype('i4') * mask\n        incremental_indices = incremental_indices.reshape(input_ids.shape)\n    else:\n        incremental_indices = jnp.cumsum(mask, axis=1).astype('i4') * mask\n    return incremental_indices.astype('i4') + padding_idx", "docstring": "Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols\nare ignored. This is modified from fairseq's `utils.make_positions`.\n\nArgs:\ninput_ids: jnp.ndarray\npadding_idx: int\n\nReturns: jnp.ndarray", "source": "github-repos"}
{"code": "def matrices_compliance(dsm, complete_mediation_matrix):\n        \n        matrix = dsm.data\n        rows_dep_matrix = len(matrix)\n        cols_dep_matrix = len(matrix[0])\n        rows_med_matrix = len(complete_mediation_matrix)\n        cols_med_matrix = len(complete_mediation_matrix[0])\n\n        if (rows_dep_matrix != rows_med_matrix or\n                cols_dep_matrix != cols_med_matrix):\n            raise DesignStructureMatrixError(\n                'Matrices are NOT compliant '\n                '(number of rows/columns not equal)')\n\n        discrepancy_found = False\n        message = []\n        for i in range(0, rows_dep_matrix):\n            for j in range(0, cols_dep_matrix):\n                if ((complete_mediation_matrix[i][j] == 0 and\n                        matrix[i][j] > 0) or\n                        (complete_mediation_matrix[i][j] == 1 and\n                         matrix[i][j] < 1)):\n                    discrepancy_found = True\n                    message.append(\n                        'Untolerated dependency at %s:%s (%s:%s): '\n                        '%s instead of %s' % (\n                            i, j, dsm.entities[i], dsm.entities[j],\n                            matrix[i][j], complete_mediation_matrix[i][j]))\n\n        message = '\\n'.join(message)\n\n        return not discrepancy_found, message", "docstring": "Check if matrix and its mediation matrix are compliant.\n\nArgs:\ndsm (:class:`DesignStructureMatrix`): the DSM to check.\ncomplete_mediation_matrix (list of list of int): 2-dim array\n\nReturns:\nbool: True if compliant, else False", "source": "juraj-google-style"}
{"code": "def __init__(\n            self,\n            network_retries: int = DEFAULT_NETWORK_RETRIES,\n            network_timeout: int = DEFAULT_NETWORK_TIMEOUT\n    ) -> None:\n        \n        self._plugins_repository = PluginsRepository()\n\n        \n        SslConnection.set_global_network_settings(network_retries, network_timeout)", "docstring": "Create a scanner for running scanning commands synchronously.\n\nArgs:\nnetwork_retries: How many times SSLyze should retry a connection that timed out.\nnetwork_timeout: The time until an ongoing connection times out.", "source": "juraj-google-style"}
{"code": "def run(self):\n        \n        import behave.__main__ as behave\n\n        for d in self.firmware_dirs:\n            original_dir = os.getcwd()\n            os.chdir(d)\n\n            output = ''\n            try:\n                output = subprocess.check_output('make', shell=True, stderr=subprocess.STDOUT)\n            except subprocess.CalledProcessError as e:\n                if output:\n                    sys.stdout.write('Captured Output:%s%s%s' % (os.linesep, output, os.linesep))\n                os.chdir(original_dir)\n                raise e\n\n            os.chdir(original_dir)\n\n        return behave.main([self.features_dir])", "docstring": "Runs the command.\n\nArgs:\nself (BDDTestCommand): the ``BDDTestCommand`` instance\n\nReturns:\n``True`` on success, otherwise ``False``.\n\nRaises:\nValueError: if a build fails", "source": "juraj-google-style"}
{"code": "def add_status_parser(subparsers, parent_parser):\n    \n    parser = subparsers.add_parser(\n        'status',\n        help='Displays information about validator status',\n        description=\"Provides a subcommand to show a validator\\'s status\")\n\n    grand_parsers = parser.add_subparsers(title='subcommands',\n                                          dest='subcommand')\n    grand_parsers.required = True\n    add_status_show_parser(grand_parsers, parent_parser)", "docstring": "Adds argument parser for the status command\n\nArgs:\nsubparsers: Add parsers to this subparser object\nparent_parser: The parent argparse.ArgumentParser object", "source": "juraj-google-style"}
{"code": "def parse_fatcat(fatcat_xml):\n    \n    fatcat_results = {}\n\n    \n    with open(fatcat_xml, 'r') as f:\n        soup = BeautifulSoup(f, 'lxml')\n\n    \n    if soup.find('block'):\n        fatcat_results['tm_score'] = float(soup.find('afpchain')['tmscore'])\n\n    return fatcat_results", "docstring": "Parse a FATCAT XML result file.\n\nArgs:\nfatcat_xml (str): Path to FATCAT XML result file\n\nReturns:\ndict: Parsed information from the output\n\nTodo:\n- Only returning TM-score at the moment", "source": "juraj-google-style"}
{"code": "def _log_progress(self, bytes_downloaded):\n    self._total_bytes_downloaded += bytes_downloaded\n    now = time.time()\n    if (self._interactive_mode() or ((now - self._last_progress_msg_print_time) > 15)):\n        self._print_download_progress_msg(('Downloading %s: %s' % (self._url, tf_utils.bytes_to_readable_str(self._total_bytes_downloaded, True))))\n        self._last_progress_msg_print_time = now", "docstring": "Logs progress information about ongoing module download.\n\nArgs:\nbytes_downloaded: Number of bytes downloaded.", "source": "codesearchnet"}
{"code": "def InsertData(self, table_id, fd, schema, job_id):\n    \n    configuration = {\n        \"schema\": {\n            \"fields\": schema\n        },\n        \"destinationTable\": {\n            \"projectId\": self.project_id,\n            \"tableId\": table_id,\n            \"datasetId\": self.dataset_id\n        },\n        \"sourceFormat\": \"NEWLINE_DELIMITED_JSON\",\n    }\n\n    body = {\n        \"configuration\": {\n            \"load\": configuration\n        },\n        \"jobReference\": {\n            \"projectId\": self.project_id,\n            \"jobId\": job_id\n        }\n    }\n\n    \n    \n    mediafile = http.MediaFileUpload(\n        fd.name, mimetype=\"application/octet-stream\")\n    job = self.service.jobs().insert(\n        projectId=self.project_id, body=body, media_body=mediafile)\n    try:\n      response = job.execute()\n      return response\n    except errors.HttpError as e:\n      if self.GetDataset(self.dataset_id):\n        logging.exception(\"Error with job: %s\", job_id)\n      else:\n        \n        logging.info(\"Attempting to create dataset: %s\", self.dataset_id)\n        self.CreateDataset()\n      return self.RetryUpload(job, job_id, e)", "docstring": "Insert data into a bigquery table.\n\nIf the table specified doesn't exist, it will be created with the specified\nschema.\n\nArgs:\ntable_id: string table id\nfd: open file descriptor containing the newline separated JSON\nschema: BigQuery schema dict\njob_id: string job id\n\nReturns:\nAPI response object on success, None on failure", "source": "juraj-google-style"}
{"code": "def _obtain_sampled_health_pills(self, run, node_names):\n    \n    runs_to_tags_to_content = self._event_multiplexer.PluginRunToTagToContent(\n        constants.DEBUGGER_PLUGIN_NAME)\n\n    if run not in runs_to_tags_to_content:\n      \n      return {}\n\n    \n    \n    tags_to_content = runs_to_tags_to_content[run]\n\n    mapping = {}\n    for node_name in node_names:\n      if node_name not in tags_to_content:\n        \n        continue\n\n      health_pills = []\n      for tensor_event in self._event_multiplexer.Tensors(run, node_name):\n        json_string = tags_to_content[node_name]\n        try:\n          content_object = json.loads(tf.compat.as_text(json_string))\n          device_name = content_object['device']\n          output_slot = content_object['outputSlot']\n          health_pills.append(\n              self._tensor_proto_to_health_pill(tensor_event, node_name,\n                                                device_name, output_slot))\n        except (KeyError, ValueError) as e:\n          logger.error('Could not determine device from JSON string '\n                           '%r: %r', json_string, e)\n\n      mapping[node_name] = health_pills\n\n    return mapping", "docstring": "Obtains the health pills for a run sampled by the event multiplexer.\n\nThis is much faster than the alternative path of reading health pills from\ndisk.\n\nArgs:\nrun: The run to fetch health pills for.\nnode_names: A list of node names for which to retrieve health pills.\n\nReturns:\nA dictionary mapping from node name to a list of\nevent_accumulator.HealthPillEvents.", "source": "juraj-google-style"}
{"code": "def buid(valu=None):\n    if (valu is None):\n        return os.urandom(32)\n    byts = s_msgpack.en(valu)\n    return hashlib.sha256(byts).digest()", "docstring": "A binary GUID like sequence of 32 bytes.\n\nArgs:\nvalu (object): Optional, if provided, the hash of the msgpack\nencoded form of the object is returned. This can be used to\ncreate stable buids.\n\nNotes:\nBy default, this returns a random 32 byte value.\n\nReturns:\nbytes: A 32 byte value.", "source": "codesearchnet"}
{"code": "def __setstate__(self, state):\n    \n    self._api = state['api']\n    self._path = state['path']\n    self.name = api_utils._unquote_filename(self._path)\n    self._buffer_size = state['buffer_size']\n    self._max_request_size = state['request_size']\n    self._etag = state['etag']\n    self._file_size = state['size']\n    self._offset = state['offset']\n    self._buffer = _Buffer()\n    self.closed = state['closed']\n    self._buffer_future = None\n    if self._remaining() and not self.closed:\n      self._request_next_buffer()", "docstring": "Restore state as part of deserialization/unpickling.\n\nArgs:\nstate: the dictionary from a __getstate__ call\n\nAlong with restoring the state, pre-fetch the next read buffer.", "source": "juraj-google-style"}
{"code": "def upload(cls, file_obj, store=None):\n    if (store is None):\n        store = 'auto'\n    elif store:\n        store = '1'\n    else:\n        store = '0'\n    data = {'UPLOADCARE_STORE': store}\n    files = uploading_request('POST', 'base/', data=data, files={'file': file_obj})\n    file_ = cls(files['file'])\n    return file_", "docstring": "Uploads a file and returns ``File`` instance.\n\nArgs:\n- file_obj: file object to upload to\n- store (Optional[bool]): Should the file be automatically stored\nupon upload. Defaults to None.\n- False - do not store file\n- True - store file (can result in error if autostore\nis disabled for project)\n- None - use project settings\n\nReturns:\n``File`` instance", "source": "codesearchnet"}
{"code": "def add(self, pattern_txt):\n        \n        self.patterns[len(pattern_txt)] = pattern_txt\n\n        low = 0\n        high = len(pattern_txt) - 1\n\n        while not pattern_txt[low]:\n            low += 1\n\n        while not pattern_txt[high]:\n            high -= 1\n\n        min_pattern = pattern_txt[low:high + 1]\n        self.min_patterns[len(min_pattern)] = min_pattern", "docstring": "Add a pattern to the list.\n\nArgs:\npattern_txt (str list): the pattern, as a list of lines.", "source": "juraj-google-style"}
{"code": "def config_get(config, *path, default=None):\n    o = object()\n    result = get_in(config, path, default=o)\n    if (result is not o):\n        return result\n    else:\n        return default", "docstring": "Get a configuration option following a path through the config\n\nExample usage:\n\n>>> config_get(config,\n'problem', 'problem_type_details', 'scorer',\ndefault='accuracy')\n\nArgs:\nconfig (dict): config dict\n*path (list[str]): List of config sections and options to follow.\ndefault (default=None): A default value to return in the case that\nthe option does not exist.", "source": "codesearchnet"}
{"code": "def list_locations(access_token, subscription_id):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/locations?api-version=', BASE_API])\n    return do_get(endpoint, access_token)", "docstring": "List available locations for a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON list of locations.", "source": "juraj-google-style"}
{"code": "def _handle_response(self, response, valid_status_codes, resource):\n        \n        if response.status_code not in valid_status_codes:\n            raise InvalidStatusCodeError(\n                status_code=response.status_code,\n                expected_status_codes=valid_status_codes\n                )\n        if response.content:\n            data = response.json()\n            if isinstance(data, list):\n                \n                return [resource(**x) for x in data]\n            else:\n                \n                key = getattr(resource.Meta, 'pagination_key', None)\n                if isinstance(data.get(key), list):\n                    \n                    return [resource(**x) for x in data.get(key)]\n                else:\n                    \n                    return [resource(**data)]\n        return []", "docstring": "Handles Response objects\n\nArgs:\nresponse: An HTTP reponse object\nvalid_status_codes: A tuple list of valid status codes\nresource: The resource class to build from this response\n\nreturns:\nresources: A list of Resource instances", "source": "juraj-google-style"}
{"code": "def dismiss_prompt(self, text=None, wait=None):\n    with self.driver.dismiss_modal('prompt', text=text, wait=wait):\n        (yield)", "docstring": "Execute the wrapped code, dismissing a prompt.\n\nArgs:\ntext (str | RegexObject, optional): Text to match against the text in the modal.\nwait (int | float, optional): Maximum time to wait for the modal to appear after\nexecuting the wrapped code.\n\nRaises:\nModalNotFound: If a modal dialog hasn't been found.", "source": "codesearchnet"}
{"code": "def set_forced_variation(self, experiment_key, user_id, variation_key):\n    experiment = self.get_experiment_from_key(experiment_key)\n    if (not experiment):\n        return False\n    experiment_id = experiment.id\n    if (variation_key is None):\n        if (user_id in self.forced_variation_map):\n            experiment_to_variation_map = self.forced_variation_map.get(user_id)\n            if (experiment_id in experiment_to_variation_map):\n                del self.forced_variation_map[user_id][experiment_id]\n                self.logger.debug(('Variation mapped to experiment \"%s\" has been removed for user \"%s\".' % (experiment_key, user_id)))\n            else:\n                self.logger.debug(('Nothing to remove. Variation mapped to experiment \"%s\" for user \"%s\" does not exist.' % (experiment_key, user_id)))\n        else:\n            self.logger.debug(('Nothing to remove. User \"%s\" does not exist in the forced variation map.' % user_id))\n        return True\n    if (not validator.is_non_empty_string(variation_key)):\n        self.logger.debug('Variation key is invalid.')\n        return False\n    forced_variation = self.get_variation_from_key(experiment_key, variation_key)\n    if (not forced_variation):\n        return False\n    variation_id = forced_variation.id\n    if (user_id not in self.forced_variation_map):\n        self.forced_variation_map[user_id] = {experiment_id: variation_id}\n    else:\n        self.forced_variation_map[user_id][experiment_id] = variation_id\n    self.logger.debug(('Set variation \"%s\" for experiment \"%s\" and user \"%s\" in the forced variation map.' % (variation_id, experiment_id, user_id)))\n    return True", "docstring": "Sets users to a map of experiments to forced variations.\n\nArgs:\nexperiment_key: Key for experiment.\nuser_id: The user ID.\nvariation_key: Key for variation. If None, then clear the existing experiment-to-variation mapping.\n\nReturns:\nA boolean value that indicates if the set completed successfully.", "source": "codesearchnet"}
{"code": "def table_delete(self, table_name):\n    \n    url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)\n    return datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials,\n                                      raw_response=True)", "docstring": "Issues a request to delete a table.\n\nArgs:\ntable_name: the name of the table as a tuple of components.\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"}
{"code": "def GetEventFormatter(self, event):\n    \n    data_type = getattr(event, 'data_type', None)\n    if not data_type:\n      return None\n\n    return formatters_manager.FormattersManager.GetFormatterObject(\n        event.data_type)", "docstring": "Retrieves the event formatter for a specific event type.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nEventFormatter: event formatter or None.", "source": "juraj-google-style"}
{"code": "def debug(msg: str, *args, **kwargs) -> None:\n    _DEFAULT_LOGGER.debug(msg, *args, **kwargs)", "docstring": "Logs debug message.\n\nArgs:\nmsg: Message with possible format string.\n*args: Values for variables in the format string.\n**kwargs: Keyword arguments for the logger.", "source": "github-repos"}
{"code": "def _fire_event(self, event_name, *event_args, **event_kwargs):\n    if (event_name in self._allowed_events):\n        self._logger.debug('firing handlers for event %s ', event_name)\n        for (func, args, kwargs) in self._event_handlers[event_name]:\n            kwargs.update(event_kwargs)\n            func(self, *(event_args + args), **kwargs)", "docstring": "Execute all the handlers associated with given event.\n\nThis method executes all handlers associated with the event\n`event_name`. Optional positional and keyword arguments can be used to\npass arguments to **all** handlers added with this event. These\naguments updates arguments passed using :meth:`~ignite.engine.Engine.add_event_handler`.\n\nArgs:\nevent_name: event for which the handlers should be executed. Valid\nevents are from :class:`~ignite.engine.Events` or any `event_name` added by\n:meth:`~ignite.engine.Engine.register_events`.\n*event_args: optional args to be passed to all handlers.\n**event_kwargs: optional keyword args to be passed to all handlers.", "source": "codesearchnet"}
{"code": "def from_object(cls, obj):\n        \n        return cls(\n            obj.get('sessionId', None),\n            obj.get('status', 0),\n            obj.get('value', None)\n        )", "docstring": "The factory method to create WebDriverResult from JSON Object.\n\nArgs:\nobj(dict): The JSON Object returned by server.", "source": "juraj-google-style"}
{"code": "class Owlv2Encoder(nn.Module):\n\n    def __init__(self, config: Owlv2Config):\n        super().__init__()\n        self.layers = nn.ModuleList([Owlv2EncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for encoder_layer in self.layers:\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions)\n            else:\n                layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`Owlv2EncoderLayer`].\n\nArgs:\nconfig: Owlv2Config", "source": "github-repos"}
{"code": "def _checkResponseByteCount(payload):\n    POSITION_FOR_GIVEN_NUMBER = 0\n    NUMBER_OF_BYTES_TO_SKIP = 1\n    _checkString(payload, minlength=1, description='payload')\n    givenNumberOfDatabytes = ord(payload[POSITION_FOR_GIVEN_NUMBER])\n    countedNumberOfDatabytes = (len(payload) - NUMBER_OF_BYTES_TO_SKIP)\n    if (givenNumberOfDatabytes != countedNumberOfDatabytes):\n        errortemplate = ('Wrong given number of bytes in the response: {0}, but counted is {1} as data payload length is {2}.' + ' The data payload is: {3!r}')\n        errortext = errortemplate.format(givenNumberOfDatabytes, countedNumberOfDatabytes, len(payload), payload)\n        raise ValueError(errortext)", "docstring": "Check that the number of bytes as given in the response is correct.\n\nThe first byte in the payload indicates the length of the payload (first byte not counted).\n\nArgs:\npayload (string): The payload\n\nRaises:\nTypeError, ValueError", "source": "codesearchnet"}
{"code": "def _apply_shadow_vars(avg_grads):\n    ps_var_grads = []\n    for (grad, var) in avg_grads:\n        assert var.name.startswith('tower'), var.name\n        my_name = '/'.join(var.name.split('/')[1:])\n        my_name = get_op_tensor_name(my_name)[0]\n        new_v = tf.get_variable(my_name, dtype=var.dtype.base_dtype, initializer=var.initial_value, trainable=True)\n        ps_var_grads.append((grad, new_v))\n    return ps_var_grads", "docstring": "Create shadow variables on PS, and replace variables in avg_grads\nby these shadow variables.\n\nArgs:\navg_grads: list of (grad, var) tuples", "source": "codesearchnet"}
{"code": "def run_cuda_only(func: _F) -> _F:\n    if tf_inspect.isclass(func):\n        raise ValueError('`run_cuda_only` only supports test methods.')\n\n    def decorated(self: 'TensorFlowTestCase', *args, **kwargs):\n        if not is_gpu_available(cuda_only=True):\n            self.skipTest('Test requires CUDA GPU')\n        return func(self, *args, **kwargs)\n    return decorated", "docstring": "Execute the decorated test only if a GPU is available.\n\nThis function is intended to be applied to tests that require the presence\nof a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.\n\nArgs:\nfunc: function to be annotated.\n\nReturns:\nReturns a function that will conditionally skip the decorated test method.", "source": "github-repos"}
{"code": "def _head(self, client_kwargs):\n        \n        with _handle_client_error():\n            \n            if 'Key' in client_kwargs:\n                header = self.client.head_object(**client_kwargs)\n\n            \n            else:\n                header = self.client.head_bucket(**client_kwargs)\n\n        \n        for key in ('AcceptRanges', 'ResponseMetadata'):\n            header.pop(key, None)\n        return header", "docstring": "Returns object or bucket HTTP header.\n\nArgs:\nclient_kwargs (dict): Client arguments.\n\nReturns:\ndict: HTTP header.", "source": "juraj-google-style"}
{"code": "def constant(cls,\n            value: Value,\n            dtype: tf.DType = tf.float32) -> 'TensorFluent':\n        \n        t = tf.constant(value, dtype=dtype)\n        scope = [] \n        batch = False\n        return TensorFluent(t, scope, batch=batch)", "docstring": "Returns a constant `value` TensorFluent with given `dtype`.\n\nArgs:\nvalue: The constant value.\ndtype: The output's data type.\n\nReturns:\nA constant TensorFluent.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n               flush_size_chars=_FILE_POOL_FLUSH_SIZE,\n               ctx=None,\n               exclusive=False):\n    \n    self._flush_size = flush_size_chars\n    self._buffer = []\n    self._size = 0\n    self._ctx = ctx\n    self._exclusive = exclusive", "docstring": "Constructor.\n\nAny classes that subclass this will need to implement the _write() function.\n\nArgs:\nflush_size_chars: buffer flush threshold as int.\nctx: mapreduce context as context.Context.\nexclusive: a boolean flag indicating if the pool has an exclusive\naccess to the file. If it is True, then it's possible to write\nbigger chunks of data.", "source": "juraj-google-style"}
{"code": "def global_pool_1d(inputs, pooling_type='MAX', mask=None):\n    with tf.name_scope('global_pool', values=[inputs]):\n        if (mask is not None):\n            mask = tf.expand_dims(mask, axis=2)\n            inputs = tf.multiply(inputs, mask)\n        if (pooling_type == 'MAX'):\n            output = tf.reduce_max(inputs, axis=1)\n        elif (pooling_type == 'AVR'):\n            if (mask is not None):\n                output = tf.reduce_sum(inputs, axis=1)\n                num_elems = tf.reduce_sum(mask, axis=1, keepdims=True)\n                output = tf.div(output, tf.maximum(num_elems, 1))\n            else:\n                output = tf.reduce_mean(inputs, axis=1)\n    return output", "docstring": "Pool elements across the last dimension.\n\nUseful to convert a list of vectors into a single vector so as\nto get a representation of a set.\n\nArgs:\ninputs: A tensor of shape [batch_size, sequence_length, input_dims]\ncontaining the sequences of input vectors.\npooling_type: the pooling type to use, MAX or AVR\nmask: A tensor of shape [batch_size, sequence_length] containing a\nmask for the inputs with 1's for existing elements, and 0's elsewhere.\n\nReturns:\nA tensor of shape [batch_size, input_dims] containing the sequences of\ntransformed vectors.", "source": "codesearchnet"}
{"code": "def __init__(self, while_definition):\n        \n        logger.debug(\"starting\")\n\n        if isinstance(while_definition, dict):\n            \n            self.error_on_max = while_definition.get('errorOnMax', False)\n\n            \n            self.max = while_definition.get('max', None)\n\n            \n            self.sleep = while_definition.get('sleep', 0)\n\n            \n            self.stop = while_definition.get('stop', None)\n\n            if self.stop is None and self.max is None:\n                logger.error(f\"while decorator missing both max and stop.\")\n                raise PipelineDefinitionError(\"the while decorator must have \"\n                                              \"either max or stop, or both. \"\n                                              \"But not neither. Note that \"\n                                              \"setting stop: False with no \"\n                                              \"max is an infinite loop. If \"\n                                              \"an infinite loop is really \"\n                                              \"what you want, set stop: False\")\n        else:\n            \n            logger.error(f\"while decorator definition incorrect.\")\n            raise PipelineDefinitionError(\"while decorator must be a dict \"\n                                          \"(i.e a map) type.\")\n\n        logger.debug(\"done\")", "docstring": "Initialize the class. No duh, huh.\n\nYou can happily expect the initializer to initialize all\nmember attributes.\n\nArgs:\nwhile_definition: dict. This is the actual while definition as it\nexists in the pipeline yaml.", "source": "juraj-google-style"}
{"code": "def sub(x1, x2, output_shape=None, name=None):\n    output_shape = convert_to_shape(output_shape)\n    if (not isinstance(x2, Tensor)):\n        return ScalarAddOperation(x1, (- x2)).outputs[0]\n    with tf.name_scope(name, default_name='sub'):\n        (x1, x2) = binary_arguments_to_tensors(x1, x2)\n        return add(x1, negative(x2), output_shape=output_shape)", "docstring": "Binary subtraction with broadcsting.\n\nArgs:\nx1: a Tensor\nx2: a Tensor\noutput_shape: an optional Shape\nname: an optional string\nReturns:\na Tensor", "source": "codesearchnet"}
{"code": "def node_recipients(self, node_name, is_control=False, device_name=None):\n    if not self._debug_graphs:\n        raise LookupError('Node recipients are not loaded from partition graphs yet.')\n    device_name = self._infer_device_name(device_name, node_name)\n    debug_graph = self._debug_graphs[device_name]\n    if is_control:\n        return debug_graph.node_ctrl_recipients[node_name]\n    else:\n        return debug_graph.node_recipients[node_name]", "docstring": "Get recipient of the given node's output according to partition graphs.\n\nArgs:\nnode_name: (`str`) name of the node.\nis_control: (`bool`) whether control outputs, rather than non-control\noutputs, are to be returned.\ndevice_name: (`str`) name of the device. If there is only one device or if\nnode_name exists on only one device, this argument is optional.\n\nReturns:\n(`list` of `str`) all inputs to the node, as a list of node names.\n\nRaises:\nLookupError: If node inputs and control inputs have not been loaded\nfrom partition graphs yet.", "source": "github-repos"}
{"code": "def region(self, start=0, end=None):\n    if (end is None):\n        end = len(self.sequence)\n    return '>{}\\n{}'.format(self.id, self.sequence[start:end])", "docstring": "Returns a region of ``Sequence.sequence``, in FASTA format.\n\nIf called without kwargs, the entire sequence will be returned.\n\nArgs:\n\nstart (int): Start position of the region to be returned. Default\nis 0.\n\nend (int): End position of the region to be returned. Negative values\nwill function as they do when slicing strings.\n\nReturns:\n\nstr: A region of ``Sequence.sequence``, in FASTA format", "source": "codesearchnet"}
{"code": "def is_symmetric(self, symprec=0.1):\n    sg = SpacegroupAnalyzer(self, symprec=symprec)\n    return sg.is_laue()", "docstring": "Checks if slab is symmetric, i.e., contains inversion symmetry.\n\nArgs:\nsymprec (float): Symmetry precision used for SpaceGroup analyzer.\n\nReturns:\n(bool) Whether slab contains inversion symmetry.", "source": "codesearchnet"}
{"code": "def get_pyxb_binding_by_api_version(api_major, api_minor=0):\n    try:\n        return VERSION_TO_BINDING_DICT[(api_major, api_minor)]\n    except KeyError:\n        raise ValueError('Unknown DataONE API version: {}.{}'.format(api_major, api_minor))", "docstring": "Map DataONE API version tag to PyXB binding.\n\nGiven a DataONE API major version number, return PyXB binding that can\nserialize and deserialize DataONE XML docs of that version.\n\nArgs:\napi_major, api_minor: str or int\nDataONE API major and minor version numbers.\n\n- If ``api_major`` is an integer, it is combined with ``api_minor`` to form an\nexact version.\n\n- If ``api_major`` is a string of ``v1`` or ``v2``, ``api_minor`` is ignored\nand the latest PyXB bindingavailable for the ``api_major`` version is\nreturned.\n\nReturns:\nPyXB binding: E.g., ``d1_common.types.dataoneTypes_v1_1``.", "source": "codesearchnet"}
{"code": "def attrname_to_colname_dict(cls) -> Dict[(str, str)]:\n    attr_col = {}\n    for (attrname, column) in gen_columns(cls):\n        attr_col[attrname] = column.name\n    return attr_col", "docstring": "Asks an SQLAlchemy class how its attribute names correspond to database\ncolumn names.\n\nArgs:\ncls: SQLAlchemy ORM class\n\nReturns:\na dictionary mapping attribute names to database column names", "source": "codesearchnet"}
{"code": "def _BuildFindSpecsFromArtifact(self, definition, environment_variables):\n    find_specs = []\n    for source in definition.sources:\n        if (source.type_indicator == artifact_types.TYPE_INDICATOR_FILE):\n            for path_entry in set(source.paths):\n                specifications = self._BuildFindSpecsFromFileSourcePath(path_entry, source.separator, environment_variables, self._knowledge_base.user_accounts)\n                find_specs.extend(specifications)\n                self.file_system_artifact_names.add(definition.name)\n        elif (source.type_indicator == artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY):\n            for key_path in set(source.keys):\n                if ArtifactDefinitionsFilterHelper.CheckKeyCompatibility(key_path):\n                    specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path)\n                    find_specs.extend(specifications)\n                    self.registry_artifact_names.add(definition.name)\n        elif (source.type_indicator == artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE):\n            key_paths = {key_value['key'] for key_value in source.key_value_pairs}\n            key_paths_string = ', '.join(key_paths)\n            logger.warning('Windows Registry values are not supported, extracting keys: \"{0!s}\"'.format(key_paths_string))\n            for key_path in key_paths:\n                if ArtifactDefinitionsFilterHelper.CheckKeyCompatibility(key_path):\n                    specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path)\n                    find_specs.extend(specifications)\n                    self.registry_artifact_names.add(definition.name)\n        elif (source.type_indicator == artifact_types.TYPE_INDICATOR_ARTIFACT_GROUP):\n            for name in source.names:\n                specifications = self._BuildFindSpecsFromGroupName(name, environment_variables)\n                find_specs.extend(specifications)\n        else:\n            logger.warning('Unsupported artifact definition source type: \"{0:s}\"'.format(source.type_indicator))\n    return find_specs", "docstring": "Builds find specifications from an artifact definition.\n\nArgs:\ndefinition (artifacts.ArtifactDefinition): artifact definition.\nenvironment_variables (list[EnvironmentVariableArtifact]):\nenvironment variables.\n\nReturns:\nlist[dfvfs.FindSpec|dfwinreg.FindSpec]: dfVFS or dfWinReg find\nspecifications.", "source": "codesearchnet"}
{"code": "def __init__(self, column_names=None, column_sizes=None, title=None):\n    \n    super(CLITabularTableView, self).__init__(\n        column_names=column_names, title=title)\n    self._column_sizes = column_sizes or []", "docstring": "Initializes a command line table view.\n\nArgs:\ncolumn_names (Optional[list[str]]): column names.\ncolumn_sizes (Optional[list[int]]): minimum column sizes, in number of\ncharacters. If a column name or row value is larger than the\nminimum column size the column will be enlarged. Note that the\nminimum columns size will be rounded up to the number of spaces\nof the next tab.\ntitle (Optional[str]): title.", "source": "juraj-google-style"}
{"code": "def _decorate_block(self, start, end):\n    color = self._get_scope_highlight_color()\n    draw_order = DRAW_ORDERS.get('codefolding')\n    d = TextDecoration(self.editor.document(), start_line=start, end_line=(end + 1), draw_order=draw_order)\n    d.set_background(color)\n    d.set_full_width(True, clear=False)\n    self.editor.decorations.add(d)\n    self._scope_decos.append(d)", "docstring": "Create a decoration and add it to the editor.\n\nArgs:\nstart (int) start line of the decoration\nend (int) end line of the decoration", "source": "codesearchnet"}
{"code": "def match1(text, *patterns):\n    \n\n    if len(patterns) == 1:\n        pattern = patterns[0]\n        match = re.search(pattern, text)\n        if match:\n            return match.group(1)\n        else:\n            return None\n    else:\n        ret = []\n        for pattern in patterns:\n            match = re.search(pattern, text)\n            if match:\n                ret.append(match.group(1))\n        return ret", "docstring": "Scans through a string for substrings matched some patterns (first-subgroups only).\n\nArgs:\ntext: A string to be scanned.\npatterns: Arbitrary number of regex patterns.\n\nReturns:\nWhen only one pattern is given, returns a string (None if no match found).\nWhen more than one pattern are given, returns a list of strings ([] if no match found).", "source": "juraj-google-style"}
{"code": "def post(self, request):\n        \n        serializer = self.get_serializer(data=request.data)\n\n        if serializer.is_valid():\n            serializer.save()\n\n            return Response(serializer.data)\n\n        return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "docstring": "Save the provided data using the class' serializer.\n\nArgs:\nrequest:\nThe request being made.\n\nReturns:\nAn ``APIResponse`` instance. If the request was successful\nthe response will have a 200 status code and contain the\nserializer's data. Otherwise a 400 status code and the\nrequest's errors will be returned.", "source": "juraj-google-style"}
{"code": "def _parse_grad_debug_op_name(op_name):\n    name_items = op_name.split('/')\n    assert len(name_items) > 1\n    assert name_items[-1].startswith(_GRADIENT_DEBUG_TAG)\n    grad_debugger_uuid = name_items[-1][len(_GRADIENT_DEBUG_TAG):]\n    if '_' in grad_debugger_uuid:\n        grad_debugger_uuid = grad_debugger_uuid[:grad_debugger_uuid.index('_')]\n    orig_tensor_slot = int(name_items[-2][name_items[-2].rfind('_') + 1:])\n    orig_base_op_name = name_items[-2][:name_items[-2].rfind('_')]\n    orig_tensor_name = '/'.join(name_items[:-2] + [orig_base_op_name]) + ':%d' % orig_tensor_slot\n    return (grad_debugger_uuid, orig_tensor_name)", "docstring": "Parse the name of a debug gradient op.\n\nArgs:\nop_name: the name of the debug gradient op.\n\nReturns:\n1) The UUID of the GradientsDebugger that created the debug gradient op.\n2) Name of the original tensor whose gradient is debugged by the debug\ngradient op.", "source": "github-repos"}
{"code": "def output_key_name(self, input_key: str, output_hist: Hist, projection_name: str, **kwargs) -> str:\n    return projection_name", "docstring": "Returns the key under which the output object should be stored.\n\nNote:\nThis function is just a basic placeholder which returns the projection name\nand likely should be overridden.\n\nArgs:\ninput_key: Key of the input hist in the input dict\noutput_hist: The output histogram\nprojection_name: Projection name for the output histogram\nkwargs: Projection information dict combined with additional arguments passed to\nthe projection function.\nReturns:\nKey under which the output object should be stored. By default, it returns the\nprojection name.", "source": "codesearchnet"}
{"code": "def train_validation_split(arrays, validation_split):\n    flat_arrays = tree.flatten(arrays)\n    unsplitable = [type(t) for t in flat_arrays if not can_slice_array(t)]\n    if unsplitable:\n        raise ValueError(f'Argument `validation_split` is only supported for tensors or NumPy arrays.Found incompatible type in the input: {unsplitable}')\n    if all((t is None for t in flat_arrays)):\n        return (arrays, arrays)\n    first_non_none = None\n    for t in flat_arrays:\n        if t is not None:\n            first_non_none = t\n            break\n    batch_dim = int(first_non_none.shape[0])\n    split_at = int(math.floor(batch_dim * (1.0 - validation_split)))\n    if split_at == 0 or split_at == batch_dim:\n        raise ValueError(f'Training data contains {batch_dim} samples, which is not sufficient to split it into a validation and training set as specified by `validation_split={validation_split}`. Either provide more data, or a different value for the `validation_split` argument.')\n\n    def _split(t, start, end):\n        if t is None:\n            return t\n        return t[start:end]\n    sliceables = convert_to_sliceable(arrays)\n    train_arrays = tree.map_structure(lambda x: _split(x, start=0, end=split_at), sliceables)\n    val_arrays = tree.map_structure(lambda x: _split(x, start=split_at, end=batch_dim), sliceables)\n    return (train_arrays, val_arrays)", "docstring": "Split arrays into train and validation subsets in deterministic order.\n\nThe last part of data will become validation data.\n\nArgs:\narrays: Tensors to split. Allowed inputs are arbitrarily nested\nstructures of Tensors and NumPy arrays.\nvalidation_split: Float between 0 and 1. The proportion of the dataset\nto include in the validation split. The rest of the dataset will be\nincluded in the training split.\n\nReturns:\n`(train_arrays, validation_arrays)`", "source": "github-repos"}
{"code": "def from_value(cls, ion_type, value, annotations=()):\n    if (value is None):\n        value = IonPyNull()\n    else:\n        (args, kwargs) = cls._to_constructor_args(value)\n        value = cls(*args, **kwargs)\n    value.ion_event = None\n    value.ion_type = ion_type\n    value.ion_annotations = annotations\n    return value", "docstring": "Constructs a value as a copy with an associated Ion type and annotations.\n\nArgs:\nion_type (IonType): The associated Ion type.\nvalue (Any): The value to construct from, generally of type ``cls``.\nannotations (Sequence[unicode]):  The sequence Unicode strings decorating this value.", "source": "codesearchnet"}
{"code": "def generate_csr(private_key_bytes, subject_name, fqdn_list):\n    \n    return (\n        cryptography.x509.CertificateSigningRequestBuilder()\n        .subject_name(subject_name)\n        .add_extension(\n            extension=cryptography.x509.SubjectAlternativeName(\n                [cryptography.x509.DNSName(v) for v in fqdn_list]\n            ),\n            critical=False,\n        )\n        .sign(\n            private_key=private_key_bytes,\n            algorithm=cryptography.hazmat.primitives.hashes.SHA256(),\n            backend=cryptography.hazmat.backends.default_backend(),\n        )\n    )", "docstring": "Generate a Certificate Signing Request (CSR).\n\nArgs:\nprivate_key_bytes: bytes\nPrivate key with which the CSR will be signed.\n\nsubject_name: str\nCertificate Subject Name\n\nfqdn_list:\nList of Fully Qualified Domain Names (FQDN) and/or IP addresses for which\nthis certificate will provide authentication.\n\nE.g.: ['my.membernode.org', '1.2.3.4']", "source": "juraj-google-style"}
{"code": "def parents(self, sourcepath, recursive=True):\n        \n        return self._get_recursive_dependancies(\n            self._PARENTS_MAP,\n            sourcepath,\n            recursive=True\n        )", "docstring": "Recursively find all parents that import the given source path.\n\nArgs:\nsourcepath (str): Source file path to search for.\n\nKeyword Arguments:\nrecursive (bool): Switch to enabled recursive finding (if True).\nDefault to True.\n\nReturns:\nset: List of finded parents path.", "source": "juraj-google-style"}
{"code": "def __parameter_default(self, final_subfield):\n    if final_subfield.default:\n        if isinstance(final_subfield, messages.EnumField):\n            return final_subfield.default.name\n        else:\n            return final_subfield.default", "docstring": "Returns default value of final subfield if it has one.\n\nIf this subfield comes from a field list returned from __field_to_subfields,\nnone of the fields in the subfield list can have a default except the final\none since they all must be message fields.\n\nArgs:\nfinal_subfield: A simple field from the end of a subfield list.\n\nReturns:\nThe default value of the subfield, if any exists, with the exception of an\nenum field, which will have its value cast to a string.", "source": "codesearchnet"}
{"code": "def tagged(pode, tag):\n    \n    if tag.startswith('\n        tag = tag[1:]\n    return pode[1]['tags'].get(tag) is not None", "docstring": "Check if a packed node has a given tag.\n\nArgs:\npode (tuple): A packed node.\ntag (str): The tag to check.\n\nExamples:\nCheck if a node is tagged with \"woot\" and dostuff if it is.\n\nif s_node.tagged(node,'woot'):\ndostuff()\n\nNotes:\nIf the tag starts with `#`, this is removed prior to checking.\n\nReturns:\nbool: True if the tag is present. False otherwise.", "source": "juraj-google-style"}
{"code": "def destroy_sg(app='', env='', region='', **_):\n    vpc = get_vpc_id(account=env, region=region)\n    url = '{api}/securityGroups/{env}/{region}/{app}'.format(api=API_URL, env=env, region=region, app=app)\n    payload = {'vpcId': vpc}\n    security_group = requests.get(url, params=payload, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n    if (not security_group):\n        LOG.info('Nothing to delete.')\n    else:\n        LOG.info('Found Security Group in %(region)s: %(name)s', security_group)\n        destroy_request = get_template('destroy/destroy_sg.json.j2', app=app, env=env, region=region, vpc=vpc)\n        wait_for_task(destroy_request)\n    return True", "docstring": "Destroy Security Group.\n\nArgs:\napp (str): Spinnaker Application name.\nenv (str): Deployment environment.\nregion (str): Region name, e.g. us-east-1.\n\nReturns:\nTrue upon successful completion.", "source": "codesearchnet"}
{"code": "def parse_requirements(file_):\n    \n    modules = []\n    delim = [\"<\", \">\", \"=\", \"!\", \"~\"]  \n\n    try:\n        f = open_func(file_, \"r\")\n    except OSError:\n        logging.error(\"Failed on file: {}\".format(file_))\n        raise\n    else:\n        data = [x.strip() for x in f.readlines() if x != \"\\n\"]\n    finally:\n        f.close()\n\n    data = [x for x in data if x[0].isalpha()]\n\n    for x in data:\n        if not any([y in x for y in delim]):  \n            modules.append({\"name\": x, \"version\": None})\n        for y in x:\n            if y in delim:\n                module = x.split(y)\n                module_name = module[0]\n                module_version = module[-1].replace(\"=\", \"\")\n                module = {\"name\": module_name, \"version\": module_version}\n\n                if module not in modules:\n                    modules.append(module)\n\n                break\n\n    return modules", "docstring": "Parse a requirements formatted file.\n\nTraverse a string until a delimiter is detected, then split at said\ndelimiter, get module name by element index, create a dict consisting of\nmodule:version, and add dict to list of parsed modules.\n\nArgs:\nfile_: File to parse.\n\nRaises:\nOSerror: If there's any issues accessing the file.\n\nReturns:\ntuple: The contents of the file, excluding comments.", "source": "juraj-google-style"}
{"code": "def encode_mezzanine_asset(access_token, processor_id, asset_id, output_assetname, json_profile):\n    path = '/Jobs'\n    endpoint = ''.join([ams_rest_endpoint, path])\n    assets_path = ''.join(['/Assets', \"('\", asset_id, \"')\"])\n    assets_path_encoded = urllib.parse.quote(assets_path, safe='')\n    endpoint_assets = ''.join([ams_rest_endpoint, assets_path_encoded])\n    body = (((((((((('{     \\t\\t\"Name\":\"' + output_assetname) + '\",    \\t\\t\"InputMediaAssets\":[{        \\t  \\t\\t\"__metadata\":{        \\t     \\t\\t\\t\"uri\":\"') + endpoint_assets) + '\"        \\t  \\t\\t}      \\t \\t}],    \\t\\t\"Tasks\":[{        \\t  \\t\\t\"Configuration\":\\'') + json_profile) + '\\',        \\t  \\t\\t\"MediaProcessorId\":\"') + processor_id) + '\",        \\t  \\t\\t\"TaskBody\":\"<?xml version=\\\\\"1.0\\\\\" encoding=\\\\\"utf-16\\\\\"?><taskBody><inputAsset>JobInputAsset(0)</inputAsset><outputAsset assetCreationOptions=\\\\\"0\\\\\" assetName=\\\\\"') + output_assetname) + '\\\\\">JobOutputAsset(0)</outputAsset></taskBody>\"       \\t\\t}] \\t}')\n    return do_ams_post(endpoint, path, body, access_token)", "docstring": "Get Media Service Encode Mezanine Asset.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nprocessor_id (str): A Media Service Processor ID.\nasset_id (str): A Media Service Asset ID.\noutput_assetname (str): A Media Service Asset Name.\njson_profile (str): A Media Service JSON Profile.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def to_env_var(env_var: str, value) -> str:\n    val = to_yaml(value)\n    ret_val = ('%s=%s' % (env_var, escape_yaml(val)))\n    return ret_val", "docstring": "Create an environment variable from a name and a value.\n\nThis generates a shell-compatible representation of an\nenvironment variable that is assigned a YAML representation of\na value.\n\nArgs:\nenv_var (str): Name of the environment variable.\nvalue (Any): A value we convert from.", "source": "codesearchnet"}
{"code": "def collapse_repeated(labels, seq_length, name=None):\n    with ops.name_scope(name, 'collapse_repeated_labels', [labels, seq_length]):\n        labels = ops.convert_to_tensor(labels, name='labels')\n        seq_length = ops.convert_to_tensor(seq_length, name='seq_length')\n        label_mask = array_ops.concat([array_ops.ones_like(labels[:, :1], dtypes.bool), math_ops.not_equal(labels[:, 1:], labels[:, :-1])], axis=1)\n        maxlen = _get_dim(labels, 1)\n        seq_mask = array_ops.sequence_mask(seq_length, maxlen=maxlen)\n        label_mask = math_ops.logical_and(label_mask, seq_mask)\n        new_seq_len = math_ops.reduce_sum(math_ops.cast(label_mask, dtypes.int32), axis=1)\n        new_maxlen = math_ops.reduce_max(new_seq_len)\n        idx_mask = array_ops.sequence_mask(new_seq_len, maxlen=new_maxlen)\n        flat_labels = array_ops.reshape(labels, [-1])\n        flat_label_mask = array_ops.reshape(label_mask, [-1])\n        flat_idx_mask = array_ops.reshape(idx_mask, [-1])\n        idx = math_ops.range(_get_dim(flat_idx_mask, 0))\n        flat = array_ops.scatter_nd(indices=array_ops.expand_dims(array_ops.boolean_mask(idx, flat_idx_mask), axis=1), updates=array_ops.boolean_mask(flat_labels, flat_label_mask), shape=array_ops.shape(flat_idx_mask))\n        batch_size = _get_dim(labels, 0)\n        new_shape = [batch_size, new_maxlen]\n        return (array_ops.reshape(flat, new_shape), math_ops.cast(new_seq_len, seq_length.dtype))", "docstring": "Merge repeated labels into single labels.\n\nArgs:\nlabels: Tensor of shape [batch, max value in seq_length]\nseq_length: Tensor of shape [batch], sequence length of each batch element.\nname: A name for this `Op`. Defaults to \"collapse_repeated_labels\".\n\nReturns:\nA tuple `(collapsed_labels, new_seq_length)` where\n\ncollapsed_labels: Tensor of shape [batch, max_seq_length] with repeated\nlabels collapsed and padded to max_seq_length, eg:\n`[[A, A, B, B, A], [A, B, C, D, E]] => [[A, B, A, 0, 0], [A, B, C, D, E]]`\n\nnew_seq_length: int tensor of shape [batch] with new sequence lengths.", "source": "github-repos"}
{"code": "def all_distances(coords1, coords2):\n    c1 = np.array(coords1)\n    c2 = np.array(coords2)\n    z = ((c1[(:, None, :)] - c2[(None, :, :)]) ** 2)\n    return (np.sum(z, axis=(- 1)) ** 0.5)", "docstring": "Returns the distances between two lists of coordinates\n\nArgs:\ncoords1: First set of cartesian coordinates.\ncoords2: Second set of cartesian coordinates.\n\nReturns:\n2d array of cartesian distances. E.g the distance between\ncoords1[i] and coords2[j] is distances[i,j]", "source": "codesearchnet"}
{"code": "def execute(command, cwd=os.path.curdir, **options):\n    process = subprocess.Popen(shlex.split(command), cwd=cwd, **options)\n    (stdout, stderr) = process.communicate()\n    return (process, stdout, stderr)", "docstring": "Run the system command with optional options.\n\nArgs:\n* command: system command.\n* cwd: current working directory.\n* verbose: direct options for :func:`subprocess.Popen`.\n\nReturns:\nOpened process, standard output & error.", "source": "codesearchnet"}
{"code": "def inference(self, observed_arr):\n        \n        _ = self.__lstm_model.inference(observed_arr)\n        return self.__lstm_model.get_feature_points()", "docstring": "Draws samples from the `fake` distribution.\n\nArgs:\nobserved_arr:     `np.ndarray` of observed data points.\n\nReturns:\n`np.ndarray` of inferenced.", "source": "juraj-google-style"}
{"code": "def jaccard_sims(feature_list):\n    sim_info_list = []\n    for feature_info in feature_list:\n        md5_source = feature_info['md5']\n        features_source = feature_info['features']\n        for feature_info in feature_list:\n            md5_target = feature_info['md5']\n            features_target = feature_info['features']\n            if (md5_source == md5_target):\n                continue\n            sim = jaccard_sim(features_source, features_target)\n            if (sim > 0.5):\n                sim_info_list.append({'source': md5_source, 'target': md5_target, 'sim': sim})\n    return sim_info_list", "docstring": "Compute Jaccard similarities between all the observations in the feature list.\n\nArgs:\nfeature_list: a list of dictionaries, each having structure as\n{ 'md5' : String, 'features': list of Strings }\n\nReturns:\nlist of dictionaries with structure as\n{'source': md5 String, 'target': md5 String, 'sim': Jaccard similarity Number}", "source": "codesearchnet"}
{"code": "def genCaCert(self, name, signas=None, outp=None, save=True):\n    (pkey, cert) = self._genBasePkeyCert(name)\n    ext0 = crypto.X509Extension(b'basicConstraints', False, b'CA:TRUE')\n    cert.add_extensions([ext0])\n    if (signas is not None):\n        self.signCertAs(cert, signas)\n    else:\n        self.selfSignCert(cert, pkey)\n    if save:\n        keypath = self._savePkeyTo(pkey, 'cas', ('%s.key' % name))\n        if (outp is not None):\n            outp.printf(('key saved: %s' % (keypath,)))\n        crtpath = self._saveCertTo(cert, 'cas', ('%s.crt' % name))\n        if (outp is not None):\n            outp.printf(('cert saved: %s' % (crtpath,)))\n    return (pkey, cert)", "docstring": "Generates a CA keypair.\n\nArgs:\nname (str): The name of the CA keypair.\nsignas (str): The CA keypair to sign the new CA with.\noutp (synapse.lib.output.Output): The output buffer.\n\nExamples:\nMake a CA named \"myca\":\n\nmycakey, mycacert = cdir.genCaCert('myca')\n\nReturns:\n((OpenSSL.crypto.PKey, OpenSSL.crypto.X509)): Tuple containing the private key and certificate objects.", "source": "codesearchnet"}
{"code": "def get_output_at(self, node_index):\n    return self._get_node_attribute_at_index(node_index, 'output_tensors', 'output')", "docstring": "Retrieves the output tensor(s) of a layer at a given node.\n\nArgs:\nnode_index: Integer, index of the node\nfrom which to retrieve the attribute.\nE.g. `node_index=0` will correspond to the\nfirst output node of the layer.\n\nReturns:\nA tensor (or list of tensors if the layer has multiple outputs).\n\nRaises:\nRuntimeError: If called in Eager mode.", "source": "github-repos"}
{"code": "def assert_same_rank(self, other):\n    other = as_shape(other)\n    if ((self.ndims is not None) and (other.ndims is not None)):\n        if (self.ndims != other.ndims):\n            raise ValueError(('Shapes %s and %s must have the same rank' % (self, other)))", "docstring": "Raises an exception if `self` and `other` do not have convertible ranks.\n\nArgs:\nother: Another `TensorShape`.\n\nRaises:\nValueError: If `self` and `other` do not represent shapes with the\nsame rank.", "source": "codesearchnet"}
{"code": "def get_xml_request(self):\n\n    def wrap_xml_content(xml_content):\n        ' Wrap XML content string in the correct CPS request envelope.'\n        fields = ['<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n', '<cps:request xmlns:cps=\"www.clusterpoint.com\">\\n', '<cps:storage>', self.connection._storage, '</cps:storage>\\n']\n        if self.timestamp:\n            fields += []\n        if self.request_id:\n            fields += ['<cps:request_id>', str(self.request_id), '</cps:request_id>\\n']\n        if self.connection.reply_charset:\n            fields += []\n        if self.connection.application:\n            fields += ['<cps:application>', self.connection.application, '</cps:application>\\n']\n        fields += ['<cps:command>', self._command, '</cps:command>\\n', '<cps:user>', self.connection._user, '</cps:user>\\n', '<cps:password>', self.connection._password, '</cps:password>\\n', '<cps:account>', self.connection._account, '</cps:account>\\n']\n        if self.timeout:\n            fields += ['<cps:timeout>', str(self.timeout), '</cps:timeout>\\n']\n        if self.type:\n            fields += ['<cps:type>', self.type, '</cps:type>\\n']\n        if xml_content:\n            fields += ['<cps:content>\\n', xml_content, '\\n</cps:content>\\n']\n        else:\n            fields += '<cps:content/>\\n'\n        fields += '</cps:request>\\n'\n        xml_request = ''.join(fields)\n        return xml_request\n    xml_content = []\n    if self._documents:\n        xml_content += self._documents\n    for (key, value) in self._nested_content.items():\n        if value:\n            xml_content += ((['<{0}>'.format(key)] + ['<{0}>{1}</{0}>'.format(sub_key, sub_value) for (sub_key, sub_value) in value if sub_value]) + ['</{0}>'.format(key)])\n    for (key, value) in self._content.items():\n        if (not isinstance(value, list)):\n            value = [value]\n        xml_content += ['<{0}>{1}</{0}>'.format(key, item) for item in value if item]\n    xml_content = '\\n'.join(xml_content)\n    return wrap_xml_content(xml_content)", "docstring": "Make xml request string from stored request information.\n\nReturns:\nA properly formated XMl request string containing all set request fields and\nwraped in connections envelope.", "source": "codesearchnet"}
{"code": "def ParseApplicationUsageRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    application_name = self._GetRowValue(query_hash, row, 'event')\n    usage = 'Application {0:s}'.format(application_name)\n    event_data = MacOSApplicationUsageEventData()\n    event_data.application = self._GetRowValue(query_hash, row, 'app_path')\n    event_data.app_version = self._GetRowValue(query_hash, row, 'app_version')\n    event_data.bundle_id = self._GetRowValue(query_hash, row, 'bundle_id')\n    event_data.count = self._GetRowValue(query_hash, row, 'number_times')\n    event_data.query = query\n    timestamp = self._GetRowValue(query_hash, row, 'last_time')\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, usage)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an application usage row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "codesearchnet"}
{"code": "def _build_ds_from_instruction(instruction, ds_from_file_fn):\n    examples_ds = ds_from_file_fn(instruction['filepath'])\n    mask_ds = _build_mask_ds(mask_offset=instruction['mask_offset'], mask=instruction['mask'])\n    ds = tf.data.Dataset.zip((examples_ds, mask_ds))\n    ds = ds.filter((lambda example, mask: mask))\n    ds = ds.map((lambda example, mask: example))\n    return ds", "docstring": "Map an instruction to a real datasets for one particular shard.\n\nArgs:\ninstruction: A `dict` of `tf.Tensor` containing the instruction to load\nthe particular shard (filename, mask,...)\nds_from_file_fn: `fct`, function which returns the dataset associated to\nthe filename\n\nReturns:\ndataset: `tf.data.Dataset`, The shard loaded from the instruction", "source": "codesearchnet"}
{"code": "def from_dict(cls, config_dict, return_unused_kwargs=False, **kwargs):\n    config = cls(**config_dict)\n    to_remove = []\n    for key, value in kwargs.items():\n        if hasattr(config, key):\n            setattr(config, key, value)\n            to_remove.append(key)\n    for key in to_remove:\n        kwargs.pop(key, None)\n    if return_unused_kwargs:\n        return (config, kwargs)\n    else:\n        return config", "docstring": "Instantiates a [`QuantizationConfigMixin`] from a Python dictionary of parameters.\n\nArgs:\nconfig_dict (`Dict[str, Any]`):\nDictionary that will be used to instantiate the configuration object.\nreturn_unused_kwargs (`bool`,*optional*, defaults to `False`):\nWhether or not to return a list of unused keyword arguments. Used for `from_pretrained` method in\n`PreTrainedModel`.\nkwargs (`Dict[str, Any]`):\nAdditional parameters from which to initialize the configuration object.\n\nReturns:\n[`QuantizationConfigMixin`]: The configuration object instantiated from those parameters.", "source": "github-repos"}
{"code": "def device_id_to_slug(did):\n    \n\n    try:\n        device_slug = IOTileDeviceSlug(did, allow_64bits=False)\n    except ValueError:\n        raise ArgumentError(\"Unable to recognize {} as a device id\".format(did))\n\n    return str(device_slug)", "docstring": "Converts a device id into a correct device slug.\n\nArgs:\ndid (long) : A device id\ndid (string) : A device slug in the form of XXXX, XXXX-XXXX-XXXX, d--XXXX, d--XXXX-XXXX-XXXX-XXXX\nReturns:\nstr: The device slug in the d--XXXX-XXXX-XXXX-XXXX format\nRaises:\nArgumentError: if the ID is not in the [1, 16**12] range, or if not a valid string", "source": "juraj-google-style"}
{"code": "def connection_made(self, transport):\n        \n        self.transport = transport\n        self.responders = [self.make_responder(self)]\n\n        try:\n            good_func = callable(self.responders[0].on_data)\n        except AttributeError:\n            good_func = False\n\n        if not good_func:\n            err_str = \"Provided responder MUST implement an 'on_data' method\"\n            raise TypeError(err_str)\n\n        log_info = (id(self), self.remote_hostname, self.remote_port)\n        log.info(\"{:d} connection from {}:{}\", *log_info)", "docstring": "(asyncio.Protocol member)\n\nCalled upon when there is a new socket connection.\nThis creates a new responder (as determined by the member\n'responder_type') and stores in a list.\nIncoming data from this connection will always call on_data\nto the last element of this list.\n\nArgs:\ntransport (asyncio.Transport): The Transport handling the\nsocket communication", "source": "juraj-google-style"}
{"code": "def storage(line, cell=None):\n  \n  parser = datalab.utils.commands.CommandParser(prog='storage', description=)\n\n  \n  \n  \n  \n  \n  \n  \n  \n  copy_parser = parser.subcommand('copy',\n                                  'Copy one or more GCS objects to a different location.')\n  copy_parser.add_argument('-s', '--source', help='The name of the object(s) to copy', nargs='+')\n  copy_parser.add_argument('-d', '--destination', required=True,\n                           help='The copy destination. For multiple source items this must be a '\n                                'bucket.')\n  copy_parser.set_defaults(func=_storage_copy)\n\n  create_parser = parser.subcommand('create', 'Create one or more GCS buckets.')\n  create_parser.add_argument('-p', '--project', help='The project associated with the objects')\n  create_parser.add_argument('-b', '--bucket', help='The name of the bucket(s) to create',\n                             nargs='+')\n  create_parser.set_defaults(func=_storage_create)\n\n  delete_parser = parser.subcommand('delete', 'Delete one or more GCS buckets or objects.')\n  delete_parser.add_argument('-b', '--bucket', nargs='*',\n                             help='The name of the bucket(s) to remove')\n  delete_parser.add_argument('-o', '--object', nargs='*',\n                             help='The name of the object(s) to remove')\n  delete_parser.set_defaults(func=_storage_delete)\n\n  list_parser = parser.subcommand('list', 'List buckets in a project, or contents of a bucket.')\n  list_parser.add_argument('-p', '--project', help='The project associated with the objects')\n  group = list_parser.add_mutually_exclusive_group()\n  group.add_argument('-o', '--object',\n                     help='The name of the objects(s) to list; can include wildchars',\n                     nargs='?')\n  group.add_argument('-b', '--bucket',\n                     help='The name of the buckets(s) to list; can include wildchars',\n                     nargs='?')\n  list_parser.set_defaults(func=_storage_list)\n\n  read_parser = parser.subcommand('read',\n                                  'Read the contents of a storage object into a Python variable.')\n  read_parser.add_argument('-o', '--object', help='The name of the object to read',\n                           required=True)\n  read_parser.add_argument('-v', '--variable', required=True,\n                           help='The name of the Python variable to set')\n  read_parser.set_defaults(func=_storage_read)\n\n  view_parser = parser.subcommand('view', 'View the contents of a storage object.')\n  view_parser.add_argument('-n', '--head', type=int, default=20,\n                           help='The number of initial lines to view')\n  view_parser.add_argument('-t', '--tail', type=int, default=20,\n                           help='The number of lines from end to view')\n  view_parser.add_argument('-o', '--object', help='The name of the object to view',\n                           required=True)\n  view_parser.set_defaults(func=_storage_view)\n\n  write_parser = parser.subcommand('write',\n                                   'Write the value of a Python variable to a storage object.')\n  write_parser.add_argument('-v', '--variable', help='The name of the source Python variable',\n                            required=True)\n  write_parser.add_argument('-o', '--object', required=True,\n                            help='The name of the destination GCS object to write')\n  write_parser.add_argument('-c', '--content_type', help='MIME type', default='text/plain')\n  write_parser.set_defaults(func=_storage_write)\n\n  return datalab.utils.commands.handle_magic_line(line, cell, parser)", "docstring": "Implements the storage cell magic for ipython notebooks.\n\nArgs:\nline: the contents of the storage line.\nReturns:\nThe results of executing the cell.", "source": "juraj-google-style"}
{"code": "def _parse_logline_timestamp(t):\n    \n    date, time = t.split(' ')\n    month, day = date.split('-')\n    h, m, s = time.split(':')\n    s, ms = s.split('.')\n    return (month, day, h, m, s, ms)", "docstring": "Parses a logline timestamp into a tuple.\n\nArgs:\nt: Timestamp in logline format.\n\nReturns:\nAn iterable of date and time elements in the order of month, day, hour,\nminute, second, microsecond.", "source": "juraj-google-style"}
{"code": "def _ParseHTTPHeaders(self, header_data, offset, display_name):\n    \n    header_string = header_data.decode('ascii', errors='replace')\n\n    try:\n      http_header_start = header_string.index('request-method')\n    except ValueError:\n      logger.debug('No request method in header: \"{0:s}\"'.format(header_string))\n      return None, None\n\n    \n    http_headers = header_string[http_header_start::]\n\n    header_parts = http_headers.split('\\x00')\n\n    \n    request_method = header_parts[1]\n\n    if request_method not in self._REQUEST_METHODS:\n      logger.debug((\n          '[{0:s}] {1:s}:{2:d}: Unknown HTTP method \\'{3:s}\\'. Response '\n          'headers: \\'{4:s}\\'').format(\n              self.NAME, display_name, offset, request_method, header_string))\n\n    try:\n      response_head_start = http_headers.index('response-head')\n    except ValueError:\n      logger.debug('No response head in header: \"{0:s}\"'.format(header_string))\n      return request_method, None\n\n    \n    response_head = http_headers[response_head_start::]\n\n    response_head_parts = response_head.split('\\x00')\n\n    \n    \n    \n    response_head_text = response_head_parts[1]\n    response_head_text_parts = response_head_text.split('\\r\\n')\n\n    \n    \n    response_code = response_head_text_parts[0]\n\n    if not response_code.startswith('HTTP'):\n      logger.debug((\n          '[{0:s}] {1:s}:{2:d}: Could not determine HTTP response code. '\n          'Response headers: \\'{3:s}\\'.').format(\n              self.NAME, display_name, offset, header_string))\n\n    return request_method, response_code", "docstring": "Extract relevant information from HTTP header.\n\nArgs:\nheader_data (bytes): HTTP header data.\noffset (int): offset of the cache record, relative to the start of\nthe Firefox cache file.\ndisplay_name (str): display name of the Firefox cache file.\n\nReturns:\ntuple: containing:\n\nstr: HTTP request method or None if the value cannot be extracted.\nstr: HTTP response code or None if the value cannot be extracted.", "source": "juraj-google-style"}
{"code": "def before_request(self, request, method, url, headers):\n    parts = urllib.parse.urlsplit(url)\n    audience = urllib.parse.urlunsplit((parts.scheme, parts.netloc, parts.path, '', ''))\n    token = self._get_jwt_for_audience(audience)\n    self.apply(headers, token=token)", "docstring": "Performs credential-specific before request logic.\n\nArgs:\nrequest (Any): Unused. JWT credentials do not need to make an\nHTTP request to refresh.\nmethod (str): The request's HTTP method.\nurl (str): The request's URI. This is used as the audience claim\nwhen generating the JWT.\nheaders (Mapping): The request's headers.", "source": "codesearchnet"}
{"code": "def list_workflow_outputs(self):\n        \n        workflow_outputs = []\n        for task in self.tasks:\n            for output_port_name in task.outputs._portnames:\n                if task.outputs.__getattribute__(output_port_name).persist:\n                    workflow_outputs.append(task.name + ':' + output_port_name)\n\n        return workflow_outputs", "docstring": "Get a list of outputs from the workflow that are saved to S3. To get resolved locations call workflow status.\nArgs:\nNone\n\nReturns:\nlist", "source": "juraj-google-style"}
{"code": "def __content_type_matches(self, content_type, available_content_types):\n        \n\n        if content_type is None:\n            return False\n\n        if content_type in available_content_types:\n            return True\n\n        for available_content_type in available_content_types:\n            if available_content_type in content_type:\n                return True\n\n        return False", "docstring": "Check if the given content type matches one of the available content types.\n\nArgs:\ncontent_type (str): The given content type.\navailable_content_types list(str): All the available content types.\n\nReturns:\nbool: True if a match was found, False otherwise.", "source": "juraj-google-style"}
{"code": "def set_synchronous_execution(enable):\n    if enable is None:\n        context.context().execution_mode = None\n    elif enable:\n        context.context().execution_mode = context.SYNC\n    else:\n        context.context().execution_mode = context.ASYNC", "docstring": "Specifies whether operations are executed synchronously or asynchronously.\n\nTensorFlow can execute operations synchronously or asynchronously. If\nasynchronous execution is enabled, operations may return \"non-ready\" handles.\n\nWhen `enable` is set to None, an appropriate value will be picked\nautomatically. The value picked may change between TensorFlow releases.\n\nArgs:\nenable: Whether operations should be dispatched synchronously.\nValid values:\n- None: sets the system default.\n- True: executes each operation synchronously.\n- False: executes each operation asynchronously.", "source": "github-repos"}
{"code": "def contract_low_support(self, threshold):\n        \n        if not isinstance(threshold, float) and not isinstance(threshold, int):\n            raise TypeError(\"threshold must be float or int\")\n        to_contract = list()\n        for node in self.traverse_preorder():\n            try:\n                if float(str(node)) < threshold:\n                    to_contract.append(node)\n            except:\n                pass\n        for node in to_contract:\n            node.contract()", "docstring": "Contract internal nodes labeled by a number (e.g. branch support) below ``threshold``\n\nArgs:\n``threshold`` (``float``): The support threshold to use when contracting nodes", "source": "juraj-google-style"}
{"code": "def coinbase_withdraw(self, amount, currency, coinbase_account_id):\n    params = {'amount': amount, 'currency': currency, 'coinbase_account_id': coinbase_account_id}\n    return self._send_message('post', '/withdrawals/coinbase-account', data=json.dumps(params))", "docstring": "Withdraw funds to a coinbase account.\n\nYou can move funds between your Coinbase accounts and your cbpro\ntrading accounts within your daily limits. Moving funds between\nCoinbase and cbpro is instant and free.\n\nSee AuthenticatedClient.get_coinbase_accounts() to receive\ninformation regarding your coinbase_accounts.\n\nArgs:\namount (Decimal): The amount to withdraw.\ncurrency (str): The type of currency (eg. 'BTC')\ncoinbase_account_id (str): ID of the coinbase account.\n\nReturns:\ndict: Information about the deposit. Example::\n{\n\"id\":\"593533d2-ff31-46e0-b22e-ca754147a96a\",\n\"amount\":\"10.00\",\n\"currency\": \"BTC\",\n}", "source": "codesearchnet"}
{"code": "def _srvmgr(cmd, return_json=False):\n    \n    if isinstance(cmd, list):\n        cmd = ' '.join(cmd)\n\n    if return_json:\n        cmd = 'ConvertTo-Json -Compress -Depth 4 -InputObject @({0})' \\\n              ''.format(cmd)\n\n    cmd = 'Import-Module WebAdministration; {0}'.format(cmd)\n\n    ret = __salt__['cmd.run_all'](cmd, shell='powershell', python_shell=True)\n\n    if ret['retcode'] != 0:\n        msg = 'Unable to execute command: {0}\\nError: {1}' \\\n              ''.format(cmd, ret['stderr'])\n        log.error(msg)\n\n    return ret", "docstring": "Execute a powershell command from the WebAdministration PS module.\n\nArgs:\ncmd (list): The command to execute in a list\nreturn_json (bool): True formats the return in JSON, False just returns\nthe output of the command.\n\nReturns:\nstr: The output from the command", "source": "juraj-google-style"}
{"code": "def to_json_string(self):\n    return json.dumps(self.__dict__, indent=2) + '\\n'", "docstring": "Serializes this instance to a JSON formatted string.\nReturns:\nstr: JSON formatted string representing the configuration instance.", "source": "github-repos"}
{"code": "def are_equivalent_xml(a_xml, b_xml):\n    \n    return are_equivalent_pyxb(\n        d1_common.xml.deserialize(a_xml), d1_common.xml.deserialize(b_xml)\n    )", "docstring": "Check if two ReplicationPolicy XML docs are semantically equivalent.\n\nThe ReplicationPolicy XML docs are normalized before comparison.\n\nArgs:\na_xml, b_xml: ReplicationPolicy XML docs to compare\n\nReturns:\nbool: ``True`` if the resulting policies for the two objects are semantically\nequivalent.", "source": "juraj-google-style"}
{"code": "def array2bytes(arr, bytes_type=bytes):\n    bio = io.BytesIO()\n    np.save(bio, arr, allow_pickle=False)\n    return bytes_type(bio.getvalue())", "docstring": "Wraps NumPy's save function to return bytes.\n\nWe use :func:`numpy.save` rather than :meth:`numpy.ndarray.tobytes` because\nit encodes endianness and order.\n\nArgs:\narr (:obj:`numpy.ndarray`):\nArray to be saved.\n\nbytes_type (class, optional, default=bytes):\nThis class will be used to wrap the bytes objects in the\nserialization if `use_bytes` is true. Useful for when using\nPython 2 and using BSON encoding, which will not accept the raw\n`bytes` type, so `bson.Binary` can be used instead.\n\n\nReturns:\nbytes_type", "source": "codesearchnet"}
{"code": "def from_encoder_decoder_configs(cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs) -> PretrainedConfig:\n    logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config')\n    decoder_config.is_decoder = True\n    decoder_config.add_cross_attention = True\n    return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`SpeechEncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model\nconfiguration and decoder model configuration.\n\nReturns:\n[`SpeechEncoderDecoderConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def _find_max_under_constraint(self, constrained, dependent, predicate):\n    feasible = array_ops.where_v2(predicate(constrained, self.value))\n    feasible_exists = math_ops.greater(array_ops.size(feasible), 0)\n    max_dependent = math_ops.reduce_max(array_ops.gather(dependent, feasible))\n    return array_ops.where_v2(feasible_exists, max_dependent, 0.0)", "docstring": "Returns the maximum of dependent_statistic that satisfies the constraint.\n\nArgs:\nconstrained: Over these values the constraint\nis specified. A rank-1 tensor.\ndependent: From these values the maximum that satiesfies the\nconstraint is selected. Values in this tensor and in\n`constrained` are linked by having the same threshold at each\nposition, hence this tensor must have the same shape.\npredicate: A binary boolean functor to be applied to arguments\n`constrained` and `self.value`, e.g. `tf.greater`.\n\nReturns maximal dependent value, if no value satiesfies the constraint 0.0.", "source": "github-repos"}
{"code": "def get_environmental_configuration(self, id_or_uri):\n        \n        uri = self._client.build_uri(id_or_uri) + \"/environmentalConfiguration\"\n        return self._client.get(uri)", "docstring": "Returns a description of the environmental configuration (supported feature set, calibrated minimum & maximum\npower, location & dimensions, ...) of the resource.\n\nArgs:\nid_or_uri:\nCan be either the Unmanaged Device id or the uri\n\nReturns:\ndict:\nEnvironmentalConfiguration", "source": "juraj-google-style"}
{"code": "def intern(self, text):\n    if self.table_type.is_shared:\n        raise TypeError('Cannot intern on shared symbol table')\n    if (not isinstance(text, six.text_type)):\n        raise TypeError(('Cannot intern non-Unicode sequence into symbol table: %r' % text))\n    token = self.get(text)\n    if (token is None):\n        token = self.__add_text(text)\n    return token", "docstring": "Interns the given Unicode sequence into the symbol table.\n\nNote:\nThis operation is only valid on local symbol tables.\n\nArgs:\ntext (unicode): The target to intern.\n\nReturns:\nSymbolToken: The mapped symbol token which may already exist in the table.", "source": "codesearchnet"}
{"code": "def import_event(tensor, name=None):\n    return gen_summary_ops.import_event(_summary_state.writer._resource, tensor, name=name)", "docstring": "Writes a `tf.compat.v1.Event` binary proto.\n\nThis can be used to import existing event logs into a new summary writer sink.\nPlease note that this is lower level than the other summary functions and\nwill ignore the `tf.summary.should_record_summaries` setting.\n\nArgs:\ntensor: A `tf.Tensor` of type `string` containing a serialized\n`tf.compat.v1.Event` proto.\nname: A name for the operation (optional).\n\nReturns:\nThe created `tf.Operation`.", "source": "github-repos"}
{"code": "def FindHeader(self, header):\n    for section_list in self.include_list:\n        for f in section_list:\n            if (f[0] == header):\n                return f[1]\n    return (- 1)", "docstring": "Check if a header has already been included.\n\nArgs:\nheader: header to check.\nReturns:\nLine number of previous occurrence, or -1 if the header has not\nbeen seen before.", "source": "codesearchnet"}
{"code": "def hr_dp020(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `hr_dp020`'.format(value))\n    self._hr_dp020 = value", "docstring": "Corresponds to IDD Field `hr_dp020`\nhumidity ratio corresponding to\nDew-point temperature corresponding to 2.0% annual cumulative frequency of occurrence\ncalculated at the standard atmospheric pressure at elevation of station\n\nArgs:\nvalue (float): value for IDD Field `hr_dp020`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def _all_sum_grad(op, grad):\n    if op.get_attr('reduction') != b'sum':\n        raise LookupError('No gradient defined for NcclAllReduce except for reduction=\"sum\".')\n    _check_device(grad, expected=op.device)\n    num_devices = op.get_attr('num_devices')\n    shared_name = op.get_attr('shared_name') + b'_grad'\n    with ops.device(op.device):\n        return gen_nccl_ops.nccl_all_reduce(input=grad, reduction='sum', num_devices=num_devices, shared_name=shared_name)", "docstring": "The gradients for `all_sum`.\n\nArgs:\nop: The `all_sum` `Operation` that we are differentiating.\ngrad: Gradient with respect to the output of the `all_sum` op.\n\nReturns:\nThe gradient with respect to the output of `all_sum`.\n\nRaises:\nLookupError: If `reduction` is not `sum`.", "source": "github-repos"}
{"code": "def entanglement_of_formation(state, d0, d1=None):\n    \n    state = np.array(state)\n\n    if d1 is None:\n        d1 = int(len(state) / d0)\n\n    if state.ndim == 2 and len(state) == 4 and d0 == 2 and d1 == 2:\n        return __eof_qubit(state)\n    elif state.ndim == 1:\n        \n        if d0 < d1:\n            tr = [1]\n        else:\n            tr = [0]\n        state = partial_trace(state, tr, dimensions=[d0, d1])\n        return entropy(state)\n    else:\n        print('Input must be a state-vector or 2-qubit density matrix.')\n    return None", "docstring": "Compute the entanglement of formation of quantum state.\n\nThe input quantum state must be either a bipartite state vector, or a\n2-qubit density matrix.\n\nArgs:\nstate (array_like): (N) array_like or (4,4) array_like, a\nbipartite quantum state.\nd0 (int): the dimension of the first subsystem.\nd1 (int or None): the dimension of the second subsystem.\n\nReturns:\nfloat: The entanglement of formation.", "source": "juraj-google-style"}
{"code": "def camelcase(text, acronyms=None):\n    (words, _case, _sep) = case_parse.parse_case(text, acronyms)\n    if words:\n        words[0] = words[0].lower()\n    return ''.join(words)", "docstring": "Return text in camelCase style.\n\nArgs:\ntext: input string to convert case\ndetect_acronyms: should attempt to detect acronyms\nacronyms: a list of acronyms to detect\n\n>>> camelcase(\"hello world\")\n'helloWorld'\n>>> camelcase(\"HELLO_HTML_WORLD\", True, [\"HTML\"])\n'helloHTMLWorld'", "source": "codesearchnet"}
{"code": "def _validate_or_infer_batch_size(self, batch_size, steps, x):\n    if isinstance(x, (data_types.DatasetV1, data_types.DatasetV2, data_utils.Sequence)) or tf_inspect.isgenerator(x):\n        if batch_size is not None:\n            raise ValueError('The `batch_size` argument must not be specified for the given input type. Received input: {}, batch_size: {}'.format(x, batch_size))\n        return\n    layers = self._flatten_layers(include_self=False, recursive=False)\n    first_layer = next(layers, None)\n    if first_layer:\n        static_batch_size = training_utils.get_static_batch_size(first_layer)\n        if static_batch_size is not None:\n            if self._distribution_strategy and distributed_training_utils.global_batch_size_supported(self._distribution_strategy):\n                num_splits_for_ds = self._distribution_strategy.num_replicas_in_sync\n            else:\n                num_splits_for_ds = 1\n            if batch_size is not None:\n                if batch_size % num_splits_for_ds != 0:\n                    raise ValueError('The `batch_size` argument ({}) must be divisible the by number of replicas ({})'.format(batch_size, num_splits_for_ds))\n                per_replica_batch_size = batch_size \n                if per_replica_batch_size != static_batch_size:\n                    raise ValueError('The `batch_size` argument value {} is incompatible with the specified batch size of your Input Layer: {}'.format(per_replica_batch_size, static_batch_size))\n            if isinstance(x, (data_types.DatasetV2, iterator_ops.Iterator, iterator_ops.IteratorBase)):\n                ds_batch_size = tensor_shape.Dimension(nest.flatten(dataset_ops.get_legacy_output_shapes(x))[0][0]).value\n                if ds_batch_size is not None:\n                    if ds_batch_size % num_splits_for_ds != 0:\n                        raise ValueError('The batch output shape of your `Dataset` {} cannot be divisible by number of replicas {}'.format(ds_batch_size, num_splits_for_ds))\n                    ds_per_replica_batch_size = ds_batch_size \n                    if ds_per_replica_batch_size != static_batch_size:\n                        raise ValueError('The batch output shape of your `Dataset` is {}, which is incompatible with the specified batch size of your Input Layer: {}'.format(ds_per_replica_batch_size, static_batch_size))\n            if steps is None:\n                batch_size = static_batch_size * num_splits_for_ds\n    if batch_size is None and steps is None:\n        batch_size = 32\n    return batch_size", "docstring": "Validates that the `batch_size` provided is consistent with InputLayer.\n\nIt's possible that the user specified a static batch size in their\nInputLayer. If so, this method checks the provided `batch_size` and `x`\narguments are consistent with this static batch size. Also, if\n`batch_size` is `None`, this method will attempt to infer the batch size\nfrom the static batch size of the InputLayer. Lastly, ValueError will be\nraised if `x` is a tf.data.Dataset and `batch_size` is specified as we\nexpect users to provide batched datasets.\n\nArgs:\nbatch_size: The batch_size provided as an argument to\nfit/evaluate/predict.\nsteps: The steps provided as an argument to fit/evaluate/predict.\nx: The data passed as `x` to fit/evaluate/predict.\n\nReturns:\nThe validated batch_size, auto-inferred from the first layer if not\nprovided.", "source": "github-repos"}
{"code": "def send_fetches(self):\n    futures = []\n    for (node_id, request) in six.iteritems(self._create_fetch_requests()):\n        if self._client.ready(node_id):\n            log.debug('Sending FetchRequest to node %s', node_id)\n            future = self._client.send(node_id, request)\n            future.add_callback(self._handle_fetch_response, request, time.time())\n            future.add_errback(log.error, 'Fetch to node %s failed: %s', node_id)\n            futures.append(future)\n    self._fetch_futures.extend(futures)\n    self._clean_done_fetch_futures()\n    return futures", "docstring": "Send FetchRequests for all assigned partitions that do not already have\nan in-flight fetch or pending fetch data.\n\nReturns:\nList of Futures: each future resolves to a FetchResponse", "source": "codesearchnet"}
{"code": "def moma(self, wt_fluxes):\n    reactions = set(self._adjustment_reactions())\n    v = self._v\n    obj_expr = 0\n    for (f_reaction, f_value) in iteritems(wt_fluxes):\n        if (f_reaction in reactions):\n            obj_expr += ((f_value - v[f_reaction]) ** 2)\n    self._prob.set_objective(obj_expr)\n    self._solve(lp.ObjectiveSense.Minimize)", "docstring": "Minimize the redistribution of fluxes using Euclidean distance.\n\nMinimizing the redistribution of fluxes using a quadratic objective\nfunction. The distance is minimized by minimizing the sum of\n(wild type - knockout)^2.\n\nArgs:\nwt_fluxes: Dictionary of all the wild type fluxes that will be\nused to find a close MOMA solution. Fluxes can be expiremental\nor calculated using :meth: get_fba_flux(objective).", "source": "codesearchnet"}
{"code": "def keypath(self, key):\n        \n        return fs.path(self.path, self.escape_key(key))", "docstring": "Get the filesystem path for a key.\n\nArguments:\nkey: Key.\n\nReturns:\nstr: Absolute path.", "source": "juraj-google-style"}
{"code": "def _MergeEntities(self, a, b):\n    \n    distance = transitfeed.ApproximateDistanceBetweenStops(a, b)\n    if distance > self.largest_stop_distance:\n      raise MergeError(\"Stops are too far apart: %.1fm \"\n                       \"(largest_stop_distance is %.1fm).\" %\n                       (distance, self.largest_stop_distance))\n    scheme = {'stop_id': self._MergeIdentical,\n              'stop_name': self._MergeIdenticalCaseInsensitive,\n              'zone_id': self._MergeIdentical,\n              'location_type': self._MergeIdentical}\n    return self._SchemedMerge(scheme, a, b)", "docstring": "Merges two stops.\n\nFor the stops to be merged, they must have:\n- the same stop_id\n- the same stop_name (case insensitive)\n- the same zone_id\n- locations less than largest_stop_distance apart\nThe other attributes can have arbitary changes. The merged attributes are\ntaken from the new stop.\n\nArgs:\na: The first stop.\nb: The second stop.\n\nReturns:\nThe merged stop.\n\nRaises:\nMergeError: The stops could not be merged.", "source": "juraj-google-style"}
{"code": "def GetCacheValueByObject(self, vfs_object):\n    for (identifier, cache_value) in iter(self._values.items()):\n        if (not cache_value):\n            raise RuntimeError('Missing cache value.')\n        if (cache_value.vfs_object == vfs_object):\n            return (identifier, cache_value)\n    return (None, None)", "docstring": "Retrieves the cache value for the cached object.\n\nArgs:\nvfs_object (object): VFS object that was cached.\n\nReturns:\ntuple[str, ObjectsCacheValue]: identifier and cache value object or\n(None, None) if not cached.\n\nRaises:\nRuntimeError: if the cache value is missing.", "source": "codesearchnet"}
{"code": "def get_box(self, box_key = None, sort_by = None):\n\t\t\n\t\turi = '/'.join([\n\t\t\t\t\t\tself.api_uri,\n\t\t\t\t\t\tself.boxes_suffix\n\t\t\t\t\t\t])\n\t\tif box_key:\n\t\t\turi = '/'.join([\n\t\t\t\t\t\t\turi,\n\t\t\t\t\t\t\tbox_key\n\t\t\t\t\t\t\t])\n\t\tif sort_by:\n\t\t\t\tif sort_by in ['creationTimestamp', 'lastUpdatedTimestamp']:\n\t\t\t\t\turi += self.sort_by_postfix + sort_by\n\t\t\t\telse:\t\t\n\t\t\t\t\treturn requests.codes.bad_request, {'success' : 'False', \n\t\t\t\t\t\t\t\t\t\t\t\t'error': 'sortBy needs to be \\'creationTimestamp\\', or \\'lastUpdatedTimestamp\\''}\n\t\treturn self._req('get', uri)", "docstring": "Gets a list of one/all box objects. Performs a single GET.\nTo go deeper individual boxes need to be polled for their contents.\nThis is a directory for what we could ask for.\nArgs:\nbox_key\t\tkey for the target box (default: None i.e. ALL)\nsort_by\t\tin desc order by 'creationTimestamp' or 'lastUpdatedTimestamp'\nreturns \t(status code for the GET request, dict of box or a list thereof)", "source": "juraj-google-style"}
{"code": "def create_tasks(self, wfk_file, scr_input):\n        \n        assert len(self) == 0\n        wfk_file = self.wfk_file = os.path.abspath(wfk_file)\n\n        \n        \n        shell_manager = self.manager.to_shell_manager(mpi_procs=1)\n\n        w = Work(workdir=self.tmpdir.path_join(\"_qptdm_run\"), manager=shell_manager)\n\n        fake_input = scr_input.deepcopy()\n        fake_task = w.register(fake_input)\n        w.allocate()\n        w.build()\n\n        \n        \n        fake_task.inlink_file(wfk_file)\n        fake_task.set_vars({\"nqptdm\": -1})\n        fake_task.start_and_wait()\n\n        \n        with NetcdfReader(fake_task.outdir.has_abiext(\"qptdms.nc\")) as reader:\n            qpoints = reader.read_value(\"reduced_coordinates_of_kpoints\")\n        \n\n        \n        for qpoint in qpoints:\n            qptdm_input = scr_input.deepcopy()\n            qptdm_input.set_vars(nqptdm=1, qptdm=qpoint)\n            new_task = self.register_scr_task(qptdm_input, manager=self.manager)\n            \n            if self.flow.gc is not None:\n                new_task.set_gc(self.flow.gc)\n\n        self.allocate()", "docstring": "Create the SCR tasks and register them in self.\n\nArgs:\nwfk_file: Path to the ABINIT WFK file to use for the computation of the screening.\nscr_input: Input for the screening calculation.", "source": "juraj-google-style"}
{"code": "def _ScanNode(self, scan_context, scan_node, auto_recurse=True):\n    if (not scan_context):\n        raise ValueError('Invalid scan context.')\n    if (not scan_node):\n        raise ValueError('Invalid scan node.')\n    scan_path_spec = scan_node.path_spec\n    system_level_file_entry = None\n    if scan_node.IsSystemLevel():\n        system_level_file_entry = resolver.Resolver.OpenFileEntry(scan_node.path_spec, resolver_context=self._resolver_context)\n        if (system_level_file_entry is None):\n            raise errors.BackEndError('Unable to open file entry.')\n        if system_level_file_entry.IsDirectory():\n            scan_context.SetSourceType(definitions.SOURCE_TYPE_DIRECTORY)\n            return\n        source_path_spec = self.ScanForStorageMediaImage(scan_node.path_spec)\n        if source_path_spec:\n            scan_node.scanned = True\n            scan_node = scan_context.AddScanNode(source_path_spec, scan_node)\n            if system_level_file_entry.IsDevice():\n                source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE\n            else:\n                source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE\n            scan_context.SetSourceType(source_type)\n            if (not auto_recurse):\n                return\n    source_path_spec = None\n    while True:\n        if scan_node.IsFileSystem():\n            break\n        if scan_node.SupportsEncryption():\n            self._ScanEncryptedVolumeNode(scan_context, scan_node)\n        if scan_context.IsLockedScanNode(scan_node.path_spec):\n            break\n        source_path_spec = self.ScanForVolumeSystem(scan_node.path_spec)\n        if (not source_path_spec):\n            break\n        if (not scan_context.HasScanNode(source_path_spec)):\n            scan_node.scanned = True\n            scan_node = scan_context.AddScanNode(source_path_spec, scan_node)\n            if (system_level_file_entry and system_level_file_entry.IsDevice()):\n                source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE\n            else:\n                source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE\n            scan_context.SetSourceType(source_type)\n        if scan_node.IsVolumeSystemRoot():\n            self._ScanVolumeSystemRootNode(scan_context, scan_node, auto_recurse=auto_recurse)\n            return\n        if ((not auto_recurse) and scan_context.updated):\n            return\n        if (not scan_context.updated):\n            break\n    if scan_node.IsVolumeSystemRoot():\n        pass\n    elif scan_context.IsLockedScanNode(scan_node.path_spec):\n        pass\n    elif ((scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW) and auto_recurse and (scan_node.path_spec != scan_path_spec)):\n        pass\n    elif (not scan_node.IsFileSystem()):\n        source_path_spec = self.ScanForFileSystem(scan_node.path_spec)\n        if (not source_path_spec):\n            if (scan_node.path_spec.type_indicator == definitions.TYPE_INDICATOR_RAW):\n                scan_node = scan_context.RemoveScanNode(scan_node.path_spec)\n                scan_context.source_type = definitions.SOURCE_TYPE_FILE\n            else:\n                scan_context.SetSourceType(definitions.SOURCE_TYPE_FILE)\n        elif (not scan_context.HasScanNode(source_path_spec)):\n            scan_node.scanned = True\n            scan_node = scan_context.AddScanNode(source_path_spec, scan_node)\n            if (system_level_file_entry and system_level_file_entry.IsDevice()):\n                source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE\n            else:\n                source_type = definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE\n            scan_context.SetSourceType(source_type)\n    if (not scan_node.scanned):\n        scan_node.scanned = True", "docstring": "Scans a node for supported formats.\n\nArgs:\nscan_context (SourceScannerContext): source scanner context.\nscan_node (SourceScanNode): source scan node.\nauto_recurse (Optional[bool]): True if the scan should automatically\nrecurse as far as possible.\n\nRaises:\nBackEndError: if the source cannot be scanned.\nValueError: if the scan context or scan node is invalid.", "source": "codesearchnet"}
{"code": "def get(self, request):\n        \n        code = request.GET.get(\"code\")\n\n        if not code:\n            \n            return render(request, 'django_auth_adfs/login_failed.html', {\n                'error_message': \"No authorization code was provided.\",\n            }, status=400)\n\n        redirect_to = request.GET.get(\"state\")\n\n        user = authenticate(request=request, authorization_code=code)\n\n        if user is not None:\n            if user.is_active:\n                login(request, user)\n                \n                \n                \n                if redirect_to:\n                    redirect_to = base64.urlsafe_b64decode(redirect_to.encode()).decode()\n                else:\n                    redirect_to = django_settings.LOGIN_REDIRECT_URL\n                url_is_safe = is_safe_url(\n                    url=redirect_to,\n                    allowed_hosts=[request.get_host()],\n                    require_https=request.is_secure(),\n                )\n                redirect_to = redirect_to if url_is_safe else '/'\n                return redirect(redirect_to)\n            else:\n                \n                return render(request, 'django_auth_adfs/login_failed.html', {\n                    'error_message': \"Your account is disabled.\",\n                }, status=403)\n        else:\n            \n            return render(request, 'django_auth_adfs/login_failed.html', {\n                'error_message': \"Login failed.\",\n            }, status=401)", "docstring": "Handles the redirect from ADFS to our site.\nWe try to process the passed authorization code and login the user.\n\nArgs:\nrequest (django.http.request.HttpRequest): A Django Request object", "source": "juraj-google-style"}
{"code": "def get_reserved_vlan_range(self, id_or_uri):\n        \n        uri = self._client.build_uri(id_or_uri) + \"/reserved-vlan-range\"\n        return self._client.get(uri)", "docstring": "Gets the reserved vlan ID range for the fabric.\n\nNote:\nThis method is only available on HPE Synergy.\n\nArgs:\nid_or_uri: ID or URI of fabric.\n\nReturns:\ndict: vlan-pool", "source": "juraj-google-style"}
{"code": "def performance_curve(self):\n    pod = (self.contingency_tables['TP'] / (self.contingency_tables['TP'] + self.contingency_tables['FN']))\n    far = (self.contingency_tables['FP'] / (self.contingency_tables['FP'] + self.contingency_tables['TP']))\n    far[((self.contingency_tables['FP'] + self.contingency_tables['TP']) == 0)] = np.nan\n    return pd.DataFrame({'POD': pod, 'FAR': far, 'Thresholds': self.thresholds}, columns=['POD', 'FAR', 'Thresholds'])", "docstring": "Calculate the Probability of Detection and False Alarm Ratio in order to output a performance diagram.\n\nReturns:\npandas.DataFrame containing POD, FAR, and probability thresholds.", "source": "codesearchnet"}
{"code": "def __init__(self,pos=None,chrom=None,separate_chroms=False):\n        \n        \n        assert pos is not None, 'Slider:: set pos'\n        assert chrom is not None, 'Slider:: set chrom'\n\n        self.pos   = pos\n        self.chrom = chrom\n        \n        self.separate_chroms = separate_chroms\n        \n        self.windows = None\n        \n        self.info   = {}\n        pass", "docstring": "Constructor\nArgs:\npos:        position\nchrom:      chromosome", "source": "juraj-google-style"}
{"code": "async def addNodes(self, nodedefs):\n        \n\n        for (formname, formvalu), forminfo in nodedefs:\n\n            props = forminfo.get('props')\n\n            \n            if props is not None:\n                props.pop('.created', None)\n\n            node = await self.addNode(formname, formvalu, props=props)\n            if node is not None:\n                tags = forminfo.get('tags')\n                if tags is not None:\n                    for tag, asof in tags.items():\n                        await node.addTag(tag, valu=asof)\n\n            yield node", "docstring": "Add/merge nodes in bulk.\n\nThe addNodes API is designed for bulk adds which will\nalso set properties and add tags to existing nodes.\nNodes are specified as a list of the following tuples:\n\n( (form, valu), {'props':{}, 'tags':{}})\n\nArgs:\nnodedefs (list): A list of nodedef tuples.\n\nReturns:\n(list): A list of xact messages.", "source": "juraj-google-style"}
{"code": "def chartspan(cls, start, end):\n        \n        return cls(Lnk.CHARTSPAN, (int(start), int(end)))", "docstring": "Create a Lnk object for a chart span.\n\nArgs:\nstart: the initial chart vertex\nend: the final chart vertex", "source": "juraj-google-style"}
{"code": "def get_enum_from_canonical_name(self, enum_name):\n        \n        return next((e for e in self.enums if e.canonical_name == enum_name), None)", "docstring": "Return an enum from a canonical name\nArgs:\nenum_name (str): canonical name of the enum\nReturns:\nEnum", "source": "juraj-google-style"}
{"code": "def get_cartesian(self):\n        \n        coords = ['x', 'y', 'z']\n        eq_sets = self._metadata['eq']['eq_sets']\n        sym_ops = self._metadata['eq']['sym_ops']\n        frame = pd.DataFrame(index=[i for v in eq_sets.values() for i in v],\n                             columns=['atom', 'x', 'y', 'z'], dtype='f8')\n        frame['atom'] = pd.Series(\n            {i: self.loc[k, 'atom'] for k, v in eq_sets.items() for i in v})\n        frame.loc[self.index, coords] = self.loc[:, coords]\n        for i in eq_sets:\n            for j in eq_sets[i]:\n                frame.loc[j, coords] = np.dot(sym_ops[i][j],\n                                              frame.loc[i, coords])\n        return Cartesian(frame)", "docstring": "Return a :class:`~Cartesian` where all\nmembers of a symmetry equivalence class are inserted back in.\n\nArgs:\nNone\n\nReturns:\nCartesian: A new cartesian instance.", "source": "juraj-google-style"}
{"code": "def inc(self, key, count=1):\n    if count < 0:\n        raise ValueError('Counter must be monotonically increasing.')\n    if not _enabled:\n        return\n    self._counts[key] = self._counts.get(key, 0) + count\n    self._total += count", "docstring": "Increment the metric by the specified amount.\n\nArgs:\nkey: A string to be used as the key.\ncount: The amount to increment by (non-negative integer).\n\nRaises:\nValueError: if the count is less than 0.", "source": "github-repos"}
{"code": "def maybe_append_oov_vectors(embeddings, num_oov_buckets):\n  \n  num_embeddings = np.shape(embeddings)[0]\n  embedding_dim = np.shape(embeddings)[1]\n  embeddings.resize(\n      [num_embeddings + num_oov_buckets, embedding_dim], refcheck=False)", "docstring": "Adds zero vectors for oov buckets if num_oov_buckets > 0.\n\nSince we are assigning zero vectors, adding more that one oov bucket is only\nmeaningful if we perform fine-tuning.\n\nArgs:\nembeddings: Embeddings to extend.\nnum_oov_buckets: Number of OOV buckets in the extended embedding.", "source": "juraj-google-style"}
{"code": "def make_action(self, fn, schema_parser, meta):\n    validate_input = validate_output = None\n    if ('$input' in meta):\n        with MarkKey('$input'):\n            validate_input = schema_parser.parse(meta['$input'])\n    if ('$output' in meta):\n        with MarkKey('$output'):\n            validate_output = schema_parser.parse(meta['$output'])\n\n    def action(data):\n        if validate_input:\n            try:\n                data = validate_input(data)\n            except Invalid as ex:\n                return abort(400, 'InvalidData', str(ex))\n            if isinstance(data, dict):\n                rv = fn(**data)\n            else:\n                rv = fn(data)\n        else:\n            rv = fn()\n        (rv, status, headers) = unpack(rv)\n        if validate_output:\n            try:\n                rv = validate_output(rv)\n            except Invalid as ex:\n                return abort(500, 'ServerError', str(ex))\n        return (rv, status, headers)\n    return action", "docstring": "Make resource's method an action\n\nValidate input, output by schema in meta.\nIf no input schema, call fn without params.\nIf no output schema, will not validate return value.\n\nArgs:\nfn: resource's method\nschema_parser: for parsing schema in meta\nmeta: meta data of the action", "source": "codesearchnet"}
{"code": "async def evaluate_model(eval_model_path, target_model_path, sgf_dir, seed):\n  \n\n  lines = await run(\n      'bazel-bin/cc/eval',\n      '--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'eval.flags')),\n      '--model={}'.format(eval_model_path),\n      '--model_two={}'.format(target_model_path),\n      '--sgf_dir={}'.format(sgf_dir),\n      '--seed={}'.format(seed))\n  result = '\\n'.join(lines[-7:])\n  logging.info(result)\n  eval_stats, target_stats = parse_win_stats_table(result, 2)\n  num_games = eval_stats.total_wins + target_stats.total_wins\n  win_rate = eval_stats.total_wins / num_games\n  logging.info('Win rate %s vs %s: %.3f', eval_stats.model_name,\n               target_stats.model_name, win_rate)\n  return win_rate", "docstring": "Evaluate one model against a target.\n\nArgs:\neval_model_path: the path to the model to evaluate.\ntarget_model_path: the path to the model to compare to.\nsgf_dif: directory path to write SGF output to.\nseed: random seed to use when running eval.\n\nReturns:\nThe win-rate of eval_model against target_model in the range [0, 1].", "source": "juraj-google-style"}
{"code": "def list_groups(name):\n    if six.PY2:\n        name = _to_unicode(name)\n    ugrp = set()\n    try:\n        user = info(name)['groups']\n    except KeyError:\n        return False\n    for group in user:\n        ugrp.add(group.strip(' *'))\n    return sorted(list(ugrp))", "docstring": "Return a list of groups the named user belongs to\n\nArgs:\nname (str): The user name for which to list groups\n\nReturns:\nlist: A list of groups to which the user belongs\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' user.list_groups foo", "source": "codesearchnet"}
{"code": "def getConfigPath(configFileName = None):\n    \n    paths = {}\n    applicationPath = \"./\"\n\n    \n    if sys.platform == 'win32':\n        applicationPath = os.path.expanduser(os.path.join('~\\\\', 'OSRFramework'))\n    else:\n        applicationPath = os.path.expanduser(os.path.join('~/', '.config', 'OSRFramework'))\n\n    \n    paths = {\n        \"appPath\": applicationPath,\n        \"appPathData\": os.path.join(applicationPath, \"data\"),\n        \"appPathDefaults\": os.path.join(applicationPath, \"default\"),\n        \"appPathPlugins\": os.path.join(applicationPath, \"plugins\"),\n        \"appPathWrappers\": os.path.join(applicationPath, \"plugins\", \"wrappers\"),\n        \"appPathPatterns\": os.path.join(applicationPath, \"plugins\", \"patterns\"),\n    }\n\n    \n    for path in paths.keys():\n        if not os.path.exists(paths[path]):\n            os.makedirs(paths[path])\n\n    return paths", "docstring": "Auxiliar function to get the configuration paths depending on the system\n\nArgs:\n-----\nconfigFileName: TODO.\n\nReturns:\n--------\nA dictionary with the following keys: appPath, appPathDefaults,\nappPathTransforms, appPathPlugins, appPathPatterns, appPathPatterns.", "source": "juraj-google-style"}
{"code": "def macro_state(self, micro_state):\n    assert (len(micro_state) == len(self.micro_indices))\n    reindexed = self.reindex()\n    return utils.state_of(reindexed.output_indices, micro_state)", "docstring": "Compute the macro-state of this blackbox.\n\nThis is just the state of the blackbox's output indices.\n\nArgs:\nmicro_state (tuple[int]): The state of the micro-elements in the\nblackbox.\n\nReturns:\ntuple[int]: The state of the output indices.", "source": "codesearchnet"}
{"code": "def get_historical_data(nmr_problems):\n    \n    observations = np.tile(np.array([[10, 256, 202, 97]]), (nmr_problems, 1))\n    nmr_tanks_ground_truth = np.ones((nmr_problems,)) * 276\n    return observations, nmr_tanks_ground_truth", "docstring": "Get the historical tank data.\n\nArgs:\nnmr_problems (int): the number of problems\n\nReturns:\ntuple: (observations, nmr_tanks_ground_truth)", "source": "juraj-google-style"}
{"code": "def delete(self, name, action, seqno):\n    return self.configure(('no route-map %s %s %s' % (name, action, seqno)))", "docstring": "Deletes the routemap from the node\n\nNote:\nThis method will attempt to delete the routemap from the nodes\noperational config.  If the routemap does not exist then this\nmethod will not perform any changes but still return True\n\nArgs:\nname (string): The full name of the routemap.\naction (string): The action to take for this routemap clause.\nseqno (integer): The sequence number for the routemap clause.\n\nReturns:\nTrue if the routemap could be deleted otherwise False (see Node)", "source": "codesearchnet"}
{"code": "def validate_activation(classifier_activation, weights):\n    if weights is None:\n        return\n    classifier_activation = activations.get(classifier_activation)\n    if classifier_activation not in {activations.get('softmax'), activations.get(None)}:\n        raise ValueError(f'Only `None` and `softmax` activations are allowed for the `classifier_activation` argument when using pretrained weights, with `include_top=True`; Received: classifier_activation={classifier_activation}')", "docstring": "validates that the classifer_activation is compatible with the weights.\n\nArgs:\nclassifier_activation: str or callable activation function\nweights: The pretrained weights to load.\n\nRaises:\nValueError: if an activation other than `None` or `softmax` are used with\npretrained weights.", "source": "github-repos"}
{"code": "def generate_hyperband_schedule(self, R, eta):\n        \n        schedule = []\n        s_max = int(math.floor(math.log(R, eta)))\n        \n        for s in range(0, s_max + 1):\n            n = math.ceil(int((s_max + 1) / (s + 1)) * eta ** s)\n            r = R * eta ** (-s)\n            bracket = []\n            for i in range(0, s + 1):\n                n_i = int(math.floor(n * eta ** (-i)))\n                r_i = int(r * eta ** i)\n                bracket.append((n_i, r_i))\n            schedule = [bracket] + schedule\n        return schedule", "docstring": "Generate hyperband schedule according to the paper.\n\nArgs:\nR: maximum resources per config.\neta: proportion of configruations to discard per\niteration of successive halving.\n\nReturns: hyperband schedule, which is represented\nas a list of brackets, where each bracket\ncontains a list of (num configurations,\nnum resources to use per configuration).\nSee the paper for more details.", "source": "juraj-google-style"}
{"code": "def _infer_output_coder(self, input_type=None, input_coder=None):\n    return None", "docstring": "Returns the output coder to use for output of this transform.\n\nThe Coder returned here should not be wrapped in a WindowedValueCoder\nwrapper.\n\nArgs:\ninput_type: An instance of an allowed built-in type, a custom class, or a\ntypehints.TypeConstraint for the input type, or None if not available.\ninput_coder: Coder object for encoding input to this PTransform, or None\nif not available.\n\nReturns:\nCoder object for encoding output of this PTransform or None if unknown.", "source": "github-repos"}
{"code": "def _ConvertInteger(value):\n    if (isinstance(value, float) and (not value.is_integer())):\n        raise ParseError(\"Couldn't parse integer: {0}.\".format(value))\n    if (isinstance(value, six.text_type) and (value.find(' ') != (- 1))):\n        raise ParseError('Couldn\\'t parse integer: \"{0}\".'.format(value))\n    return int(value)", "docstring": "Convert an integer.\n\nArgs:\nvalue: A scalar value to convert.\n\nReturns:\nThe integer value.\n\nRaises:\nParseError: If an integer couldn't be consumed.", "source": "codesearchnet"}
{"code": "def is_on_curve(self, point):\n    (X, Y) = (point.X, point.Y)\n    return (((((pow(Y, 2, self.P) - pow(X, 3, self.P)) - (self.a * X)) - self.b) % self.P) == 0)", "docstring": "Checks whether a point is on the curve.\n\nArgs:\npoint (AffinePoint): Point to be checked.\n\nReturns:\nbool: True if point is on the curve, False otherwise.", "source": "codesearchnet"}
{"code": "def _DownloadScript(self, url, dest_dir):\n    if url.startswith('gs:\n        url = re.sub('^gs:\n        return self._DownloadAuthUrl(url, dest_dir)\n    header = 'http[s]?:\n    domain = 'storage\\\\.googleapis\\\\.com'\n    bucket = '(?P<bucket>[a-z0-9][-_.a-z0-9]*[a-z0-9])'\n    obj = '(?P<obj>[^\\\\*\\\\?]+)'\n    gs_regex = re.compile(('\\\\A%s%s\\\\.%s/%s\\\\Z' % (header, bucket, domain, obj)))\n    match = gs_regex.match(url)\n    if match:\n        return self._DownloadAuthUrl(url, dest_dir)\n    gs_regex = re.compile(('\\\\A%s(commondata)?%s/%s/%s\\\\Z' % (header, domain, bucket, obj)))\n    match = gs_regex.match(url)\n    if match:\n        return self._DownloadAuthUrl(url, dest_dir)\n    return self._DownloadUrl(url, dest_dir)", "docstring": "Download the contents of the URL to the destination.\n\nArgs:\nurl: string, the URL to download.\ndest_dir: string, the path to a directory for storing metadata scripts.\n\nReturns:\nstring, the path to the file storing the metadata script.", "source": "codesearchnet"}
{"code": "def reload_napps(self, napps=None):\n    if (napps is None):\n        napps = []\n        api = self._config.get('kytos', 'api')\n        endpoint = os.path.join(api, 'api', 'kytos', 'core', 'reload', 'all')\n        response = self.make_request(endpoint)\n    for napp in napps:\n        api = self._config.get('kytos', 'api')\n        endpoint = os.path.join(api, 'api', 'kytos', 'core', 'reload', napp[0], napp[1])\n        response = self.make_request(endpoint)\n    if (response.status_code != 200):\n        raise KytosException('Error reloading the napp: Module not founded or could not be imported')\n    return response.content", "docstring": "Reload a specific NApp or all Napps.\n\nArgs:\nnapp (list): NApp list to be reload.\nRaises:\nrequests.HTTPError: When there's a server error.", "source": "codesearchnet"}
{"code": "def lookup_id(self, group):\n    filter = ['(cn={})'.format(group), '(objectclass=posixGroup)']\n    results = self.client.search(filter, ['gidNumber'])\n    if (len(results) < 1):\n        raise ldap_tools.exceptions.NoGroupsFound('No Groups Returned by LDAP')\n    elif (len(results) > 1):\n        raise ldap_tools.exceptions.TooManyResults('Multiple groups found. Please narrow your search.')\n    else:\n        return results[0].gidNumber.value", "docstring": "Lookup GID for the given group.\n\nArgs:\ngroup: Name of group whose ID needs to be looked up\n\nReturns:\nA bytestring representation of the group ID (gid)\nfor the group specified\n\nRaises:\nldap_tools.exceptions.NoGroupsFound:\nNo Groups were returned by LDAP\n\nldap_tools.exceptions.TooManyResults:\nMore than one group was returned by LDAP", "source": "codesearchnet"}
{"code": "async def on_message(message):\n    \n\n    \n    server = message.server\n    author = message.author\n    channel = message.channel\n    content = message.content\n\n    data = datatools.get_data()\n\n    if not data[\"discord\"][\"servers\"][server.id][_data.modulename][\"activated\"]:\n        return\n\n    \n    if server is not None and author != channel.server.me:\n        \n        prefix = data[\"discord\"][\"servers\"][server.id][\"prefix\"]\n        if content.startswith(prefix):\n            \n            package = content.split(\" \")\n            command = package[0][len(prefix):]\n\n            \n            if command == 'gamedeals':\n                await client.send_typing(channel)\n\n                \n                posts = api_reddit.get_top10()\n\n                if posts:\n                    for post in posts:\n                        \n                        embed = ui_embed.success(channel, post)\n                        await embed.send()\n                else:\n                    embed = ui_embed.no_results(channel)\n                    await embed.send()", "docstring": "The on_message event handler for this module\n\nArgs:\nmessage (discord.Message): Input message", "source": "juraj-google-style"}
{"code": "def _check_obj_properties(self, pub, name='pub'):\n    if (not hasattr(pub, 'indexes')):\n        raise InvalidType((\"`%s` doesn't have .indexes property!\" % name))\n    if (not pub.indexes):\n        raise InvalidType(('`%s.indexes` is not set!' % name))\n    if (not hasattr(pub, 'project_key')):\n        raise InvalidType((\"`%s` doesn't have .project_key property!\" % name))\n    if (not pub.project_key):\n        raise InvalidType(('`%s.project_key` is not set!' % name))", "docstring": "Make sure, that `pub` has the right interface.\n\nArgs:\npub (obj): Instance which will be checked.\nname (str): Name of the instance. Used in exception. Default `pub`.\n\nRaises:\nInvalidType: When the `pub` is not instance of `obj_type`.", "source": "codesearchnet"}
{"code": "def remove_option(self, section, option):\n        \n        try:\n            section = self.__getitem__(section)\n        except KeyError:\n            raise NoSectionError(section) from None\n        option = self.optionxform(option)\n        existed = option in section.options()\n        if existed:\n            del section[option]\n        return existed", "docstring": "Remove an option.\n\nArgs:\nsection (str): section name\noption (str): option name\n\nReturns:\nbool: whether the option was actually removed", "source": "juraj-google-style"}
{"code": "def interruptWrite(self, endpoint, buffer, timeout = 100):\n        r\n        return self.dev.write(endpoint, buffer, timeout)", "docstring": "r\"\"\"Perform a interrupt write request to the endpoint specified.\n\nArguments:\nendpoint: endpoint number.\nbuffer: sequence data buffer to write.\nThis parameter can be any sequence type.\ntimeout: operation timeout in milliseconds. (default: 100)\nReturns the number of bytes written.", "source": "juraj-google-style"}
{"code": "def set_record_attn(self, record_attn):\n\n    def _should_record_attn(layer_idx):\n        if isinstance(record_attn, bool):\n            return record_attn\n        return layer_idx in record_attn\n    for i, layer in enumerate(self._attn_mods):\n        layer.attn.record_attn = _should_record_attn(i)\n    if not record_attn:\n        self.saved_attn_weights = []", "docstring": "Makes forward prop dump self-attention softmaxes to self.saved_attn_weights.\n\nArgs:\nrecord_attn (`Union[bool,set]`):\nEither a set of layer indices indicating which layers to store, or a boolean value indicating Whether\nto dump all.", "source": "github-repos"}
{"code": "def set_timezone(self, timezone: str):\n        \n        data = {\"timezoneId\": timezone}\n        return self._restCall(\"home/setTimezone\", body=json.dumps(data))", "docstring": "sets the timezone for the AP. e.g. \"Europe/Berlin\"\nArgs:\ntimezone(str): the new timezone", "source": "juraj-google-style"}
{"code": "def _get_oauth2_client_id_and_secret(settings_instance):\n    \n    secret_json = getattr(settings_instance,\n                          'GOOGLE_OAUTH2_CLIENT_SECRETS_JSON', None)\n    if secret_json is not None:\n        return _load_client_secrets(secret_json)\n    else:\n        client_id = getattr(settings_instance, \"GOOGLE_OAUTH2_CLIENT_ID\",\n                            None)\n        client_secret = getattr(settings_instance,\n                                \"GOOGLE_OAUTH2_CLIENT_SECRET\", None)\n        if client_id is not None and client_secret is not None:\n            return client_id, client_secret\n        else:\n            raise exceptions.ImproperlyConfigured(\n                \"Must specify either GOOGLE_OAUTH2_CLIENT_SECRETS_JSON, or \"\n                \"both GOOGLE_OAUTH2_CLIENT_ID and \"\n                \"GOOGLE_OAUTH2_CLIENT_SECRET in settings.py\")", "docstring": "Initializes client id and client secret based on the settings.\n\nArgs:\nsettings_instance: An instance of ``django.conf.settings``.\n\nReturns:\nA 2-tuple, the first item is the client id and the second\nitem is the client secret.", "source": "juraj-google-style"}
{"code": "def main(argv=None):\n    if (argv is None):\n        argv = sys.argv[1:]\n    parser = build_args()\n    args = parser.parse_args(args=argv)\n    (recipe_name, _ext) = os.path.splitext(os.path.basename(args.recipe))\n    rm = RecipeManager()\n    rm.add_recipe_folder(os.path.dirname(args.recipe), whitelist=[os.path.basename(args.recipe)])\n    recipe = rm.get_recipe(recipe_name)\n    if (args.archive is not None):\n        print(('Archiving recipe into %s' % args.archive))\n        recipe.archive(args.archive)\n        return 0\n    if args.info:\n        print(recipe)\n        return 0\n    variables = load_variables(args.define, args.config)\n    success = 0\n    start_time = time.time()\n    if (args.loop is None):\n        try:\n            recipe.run(variables)\n            success += 1\n        except IOTileException as exc:\n            print(('Error running recipe: %s' % str(exc)))\n            return 1\n    else:\n        while True:\n            value = input(('Enter value for loop variable %s (return to stop): ' % args.loop))\n            if (value == ''):\n                break\n            local_vars = dict(**variables)\n            local_vars[args.loop] = value\n            try:\n                recipe.run(local_vars)\n                success += 1\n            except IOTileException as exc:\n                print(('--> ERROR processing loop variable %s: %s' % (value, str(exc))))\n    end_time = time.time()\n    total_time = (end_time - start_time)\n    if (success == 0):\n        per_time = 0.0\n    else:\n        per_time = (total_time / success)\n    print(('Performed %d runs in %.1f seconds (%.1f seconds / run)' % (success, total_time, per_time)))\n    return 0", "docstring": "Main entry point for iotile-ship recipe runner.\n\nThis is the iotile-ship command line program.\n\nArgs:\nargv (list of str): An optional set of command line\nparameters.  If not passed, these are taken from\nsys.argv.", "source": "codesearchnet"}
{"code": "def clean_headers(headers):\n    \n    clean = {}\n    try:\n        for k, v in six.iteritems(headers):\n            if not isinstance(k, six.binary_type):\n                k = str(k)\n            if not isinstance(v, six.binary_type):\n                v = str(v)\n            clean[_helpers._to_bytes(k)] = _helpers._to_bytes(v)\n    except UnicodeEncodeError:\n        from oauth2client.client import NonAsciiHeaderError\n        raise NonAsciiHeaderError(k, ': ', v)\n    return clean", "docstring": "Forces header keys and values to be strings, i.e not unicode.\n\nThe httplib module just concats the header keys and values in a way that\nmay make the message header a unicode string, which, if it then tries to\ncontatenate to a binary request body may result in a unicode decode error.\n\nArgs:\nheaders: dict, A dictionary of headers.\n\nReturns:\nThe same dictionary but with all the keys converted to strings.", "source": "juraj-google-style"}
{"code": "def __init__(self, bits: List[int], order: int):\n    super().__init__(trainable=False)\n    bits = check_bits(bits)\n    order = check_order(order)\n    indices_list = []\n    for i in range(1, order + 1):\n        combos = itertools.combinations(range(len(bits)), i)\n        indices_list.extend(list(combos))\n    self.indices = tf.ragged.stack(indices_list)\n    self.num_terms = len(indices_list)", "docstring": "Initializes a Parity layer.\n\nArgs:\nbits: Unique labels for the bits on which this distribution is supported.\norder: Maximum size of bit groups to take the parity of.", "source": "github-repos"}
{"code": "def base_name_from_image(image):\n    \n    m = re.match(\"^(.+/)?([^:/]+)(:[^:]+)?$\", image)\n    algo_name = m.group(2) if m else image\n    return algo_name", "docstring": "Extract the base name of the image to use as the 'algorithm name' for the job.\n\nArgs:\nimage (str): Image name.\n\nReturns:\nstr: Algorithm name, as extracted from the image name.", "source": "juraj-google-style"}
{"code": "def when_connected(self):\n    if (self._client and (not self._client.is_closed)):\n        return defer.succeed(self._client)\n    else:\n        return self._client_deferred", "docstring": "Retrieve the currently-connected Protocol, or the next one to connect.\n\nReturns:\ndefer.Deferred: A Deferred that fires with a connected\n:class:`FedoraMessagingProtocolV2` instance. This is similar to\nthe whenConnected method from the Twisted endpoints APIs, which\nis sadly isn't available before 16.1.0, which isn't available\nin EL7.", "source": "codesearchnet"}
{"code": "def sam2rnf(args):\n    \n\n    rnftools.mishmash.Source.recode_sam_reads(\n        sam_fn=args.sam_fn,\n        fastq_rnf_fo=args.fq_fo,\n        fai_fo=args.fai_fo,\n        genome_id=args.genome_id,\n        number_of_read_tuples=10**9,\n        simulator_name=args.simulator_name,\n        allow_unmapped=args.allow_unmapped,\n    )", "docstring": "Convert SAM to RNF-based FASTQ with respect to argparse parameters.\n\nArgs:\nargs (...): Arguments parsed by argparse", "source": "juraj-google-style"}
{"code": "def infer_annotation(type_comments):\n    \n    \n    assert type_comments\n    args = {}  \n    returns = set()\n    for comment in type_comments:\n        arg_types, return_type = parse_type_comment(comment)\n        for i, arg_type in enumerate(arg_types):\n            args.setdefault(i, set()).add(arg_type)\n        returns.add(return_type)\n    combined_args = []\n    for i in sorted(args):\n        arg_infos = list(args[i])\n        kind = argument_kind(arg_infos)\n        if kind is None:\n            raise InferError('Ambiguous argument kinds:\\n' + '\\n'.join(type_comments))\n        types = [arg.type for arg in arg_infos]\n        combined = combine_types(types)\n        if str(combined) == 'None':\n            \n            \n            combined = UnionType([ClassType('None'), AnyType()])\n        if kind != ARG_POS and (len(str(combined)) > 120 or isinstance(combined, UnionType)):\n            \n            combined = AnyType()\n        combined_args.append(Argument(combined, kind))\n    combined_return = combine_types(returns)\n    return combined_args, combined_return", "docstring": "Given some type comments, return a single inferred signature.\n\nArgs:\ntype_comments: Strings of form '(arg1, ... argN) -> ret'\n\nReturns: Tuple of (argument types and kinds, return type).", "source": "juraj-google-style"}
{"code": "def obtain_all_bond_lengths(sp1, sp2, default_bl=None):\n    if isinstance(sp1, Element):\n        sp1 = sp1.symbol\n    if isinstance(sp2, Element):\n        sp2 = sp2.symbol\n    syms = tuple(sorted([sp1, sp2]))\n    if (syms in bond_lengths):\n        return bond_lengths[syms].copy()\n    elif (default_bl is not None):\n        return {1: default_bl}\n    else:\n        raise ValueError('No bond data for elements {} - {}'.format(*syms))", "docstring": "Obtain bond lengths for all bond orders from bond length database\n\nArgs:\nsp1 (Specie): First specie.\nsp2 (Specie): Second specie.\ndefault_bl: If a particular type of bond does not exist, use this\nbond length as a default value (bond order = 1).\nIf None, a ValueError will be thrown.\n\nReturn:\nA dict mapping bond order to bond length in angstrom", "source": "codesearchnet"}
{"code": "def _apply_user_agent(headers, user_agent):\n    if (user_agent is not None):\n        if ('user-agent' in headers):\n            headers['user-agent'] = ((user_agent + ' ') + headers['user-agent'])\n        else:\n            headers['user-agent'] = user_agent\n    return headers", "docstring": "Adds a user-agent to the headers.\n\nArgs:\nheaders: dict, request headers to add / modify user\nagent within.\nuser_agent: str, the user agent to add.\n\nReturns:\ndict, the original headers passed in, but modified if the\nuser agent is not None.", "source": "codesearchnet"}
{"code": "def _GetIdentifierMappings(self, parser_mediator, cache, database):\n    identifier_mappings = cache.GetResults('SruDbIdMapTable', default_value={})\n    if (not identifier_mappings):\n        esedb_table = database.get_table_by_name('SruDbIdMapTable')\n        if (not esedb_table):\n            parser_mediator.ProduceExtractionWarning('unable to retrieve table: SruDbIdMapTable')\n        else:\n            identifier_mappings = self._ParseIdentifierMappingsTable(parser_mediator, esedb_table)\n        cache.StoreDictInCache('SruDbIdMapTable', identifier_mappings)\n    return identifier_mappings", "docstring": "Retrieves the identifier mappings from SruDbIdMapTable table.\n\nIn the SRUM database individual tables contain numeric identifiers for\nthe application (\"AppId\") and user identifier (\"UserId\"). A more descriptive\nstring of these values can be found in the SruDbIdMapTable. For example the\nnumeric value of 42 mapping to DiagTrack. This method will cache the\nmappings of a specific SRUM database.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ncache (ESEDBCache): cache, which contains information about\nthe identifiers stored in the SruDbIdMapTable table.\ndatabase (pyesedb.file): ESE database.\n\nReturns:\ndict[int, str]: mapping of numeric identifiers to their string\nrepresentation.", "source": "codesearchnet"}
{"code": "def referrer_uri(self, value):\n        \n        if value == self._defaults['referrerUri'] and 'referrerUri' in self._values:\n            del self._values['referrerUri']\n        else:\n            self._values['referrerUri'] = value", "docstring": "The referrer_uri property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def can_generate(cls) -> bool:\n    if 'GenerationMixin' in str(cls.prepare_inputs_for_generation) and 'GenerationMixin' in str(cls.generate):\n        return False\n    return True", "docstring": "Returns whether this model can generate sequences with `.generate()`.\n\nReturns:\n`bool`: Whether this model can generate sequences with `.generate()`.", "source": "github-repos"}
{"code": "def handle(data_type, data, data_id=None, caller=None):\n    if (not data_id):\n        data_id = data_type\n    if (data_id not in _handlers):\n        _handlers[data_id] = dict([(h.handle, h) for h in handlers.instantiate_for_data_type(data_type, data_id=data_id)])\n    for handler in list(_handlers[data_id].values()):\n        try:\n            data = handler(data, caller=caller)\n        except Exception as inst:\n            vodka.log.error((\"Data handler '%s' failed with error\" % handler))\n            vodka.log.error(traceback.format_exc())\n    return data", "docstring": "execute all data handlers on the specified data according to data type\n\nArgs:\ndata_type (str): data type handle\ndata (dict or list): data\n\nKwargs:\ndata_id (str): can be used to differentiate between different data\nsets of the same data type. If not specified will default to\nthe data type\ncaller (object): if specified, holds the object or function that\nis trying to handle data\n\nReturns:\ndict or list - data after handlers have been executed on it", "source": "codesearchnet"}
{"code": "def get_context_from_cmdln(args, desc=\"Run scriptworker\"):\n    \n    context = Context()\n    parser = argparse.ArgumentParser(description=desc)\n    parser.add_argument(\n        \"config_path\", type=str, nargs=\"?\", default=\"scriptworker.yaml\",\n        help=\"the path to the config file\"\n    )\n    parsed_args = parser.parse_args(args)\n    context.config, credentials = create_config(config_path=parsed_args.config_path)\n    update_logging_config(context)\n    return context, credentials", "docstring": "Create a Context object from args.\n\nArgs:\nargs (list): the commandline args.  Generally sys.argv\n\nReturns:\ntuple: ``scriptworker.context.Context`` with populated config, and\ncredentials frozendict", "source": "juraj-google-style"}
{"code": "def get_cert_contents(kwargs):\n    paths = {'certificate': kwargs.get('path_to_certificate'), 'private_key': kwargs.get('path_to_private_key'), 'chain': kwargs.get('path_to_chain')}\n    for (key, value) in paths.items():\n        if (value is not None):\n            continue\n        path = input(('Path to %s (skip): ' % (key,)))\n        if ((path == 'skip') or (not path.strip())):\n            continue\n        paths[key] = path\n    parameters = {'ServerCertificateName': kwargs.get('cert_name')}\n    for (key, path) in paths.items():\n        if (not path):\n            continue\n        try:\n            contents = path.read()\n        except AttributeError:\n            with open(utils.full_path(path)) as read_file:\n                contents = read_file.read()\n        if (key == 'certificate'):\n            parameters['CertificateBody'] = contents\n        elif (key == 'private_key'):\n            parameters['PrivateKey'] = contents\n        elif (key == 'chain'):\n            parameters['CertificateChain'] = contents\n    return parameters", "docstring": "Builds parameters with server cert file contents.\n\nArgs:\nkwargs(dict): The keyword args passed to ensure_server_cert_exists,\noptionally containing the paths to the cert, key and chain files.\n\nReturns:\ndict: A dictionary containing the appropriate parameters to supply to\nupload_server_certificate. An empty dictionary if there is a\nproblem.", "source": "codesearchnet"}
{"code": "def _check_or_build_spatial_positions(pos, index_dims, batch_size):\n    if pos is None:\n        pos = build_linear_positions(index_dims)\n        pos = pos[None].expand((batch_size,) + pos.shape)\n        pos = torch.reshape(pos, [batch_size, np.prod(index_dims), -1])\n    elif pos.shape[-1] != len(index_dims):\n        raise ValueError('Spatial features have the wrong number of dimensions.')\n    return pos", "docstring": "Checks or builds spatial position features (x, y, ...).\n\nArgs:\npos (`torch.FloatTensor`):\nNone, or an array of position features. If None, position features are built. Otherwise, their size is checked.\nindex_dims (`List[int]`):\nAn iterable giving the spatial/index size of the data to be featurized.\nbatch_size (`int`):\nThe batch size of the data to be featurized.\n\nReturns:\n`torch.FloatTensor` of shape `(batch_size, prod(index_dims))` an array of position features.", "source": "github-repos"}
{"code": "def SetDocumentType(self, document_type):\n    self._document_type = document_type\n    logger.debug('Elasticsearch document type: {0:s}'.format(document_type))", "docstring": "Sets the document type.\n\nArgs:\ndocument_type (str): document type.", "source": "codesearchnet"}
{"code": "def get_template(self, template_id):\n    request = self._get_request()\n    return request.get((self.TEMPLATE_GET_URL + template_id))", "docstring": "Gets a Template which includes a list of Accounts that can access it\n\nArgs:\n\ntemplate_id (str): The id of the template to retrieve\n\nReturns:\nA Template object", "source": "codesearchnet"}
{"code": "def escape_yaml(raw_str: str) -> str:\n    \n    escape_list = [char for char in raw_str if char in ['!', '{', '[']]\n    if len(escape_list) == 0:\n        return raw_str\n\n    str_quotes = '\"'\n    i_str_quotes = \"'\"\n    if str_quotes in raw_str and str_quotes not in raw_str[1:-1]:\n        return raw_str\n\n    if str_quotes in raw_str[1:-1]:\n        raw_str = i_str_quotes + raw_str + i_str_quotes\n    else:\n        raw_str = str_quotes + raw_str + str_quotes\n    return raw_str", "docstring": "Shell-Escape a yaml input string.\n\nArgs:\nraw_str: The unescaped string.", "source": "juraj-google-style"}
{"code": "def __init__(self, session_identifier=None):\n    \n    super(Task, self).__init__()\n    self.aborted = False\n    self.completion_time = None\n    self.file_entry_type = None\n    self.has_retry = False\n    self.identifier = '{0:s}'.format(uuid.uuid4().hex)\n    self.last_processing_time = None\n    self.merge_priority = None\n    self.path_spec = None\n    self.session_identifier = session_identifier\n    self.start_time = int(time.time() * definitions.MICROSECONDS_PER_SECOND)\n    self.storage_file_size = None", "docstring": "Initializes a task attribute container.\n\nArgs:\nsession_identifier (Optional[str]): identifier of the session the task\nis part of.", "source": "juraj-google-style"}
{"code": "def get_obj_frm_str(obj_str, **kwargs):\n    \n    obj_str = obj_str.format(**kwargs)\n    args = []\n    kwargs = {}\n    params = []\n    \n    if \"(\" in obj_str:\n        call_args = obj_str[obj_str.find(\"(\"):]\n        obj_str = obj_str[:obj_str.find(\"(\")]\n        call_args = call_args[1:-1]\n        if call_args:\n            call_args = call_args.split(\",\")\n        else:\n            call_args = []\n        call_args = [arg.strip() for arg in call_args]\n\n        for arg in call_args:\n            if \"=\" in arg:\n                parts = arg.split(\"=\")\n                kwargs[parts[0]] = parts[1]\n            else:\n                args.append(arg)\n    \n    if \"[\" in obj_str:\n        params = obj_str[obj_str.find(\"[\"):]\n        obj_str = obj_str[:obj_str.find(\"[\")]\n        params = [part.replace(\"[\", \"\").replace(\"]\", \"\")\n                  for part in params.split(\"][\")]\n    obj = pydoc.locate(obj_str)\n    if params:\n        for part in params:\n            obj = get_attr(obj, part)\n    if args or kwargs:\n        if kwargs:\n            obj = obj.__call__(*args, **kwargs)\n        else:\n            obj = obj.__call__(*args)\n    return obj", "docstring": "Returns a python object from a python object string\n\nargs:\nobj_str: python object path expamle\n\"rdfframework.connections.ConnManager[{param1}]\"\n\nkwargs:\n* kwargs used to format the 'obj_str'", "source": "juraj-google-style"}
{"code": "def _get_outer_context_id(self, graph):\n    if hasattr(graph, 'outer_graph') and graph.outer_graph:\n        return self._get_context_id(graph.outer_graph)\n    else:\n        return None", "docstring": "Get the ID of the immediate outer context of the input graph.\n\nArgs:\ngraph: The graph (context) in question.\n\nReturns:\nIf an outer context exists, the immediate outer context name as a string.\nIf such as outer context does not exist (i.e., `graph` is itself\noutermost), `None`.", "source": "github-repos"}
{"code": "def write(self, records=None, path=None, fields=None, append=False, gzip=None):\n    if (path is None):\n        if (not self.is_attached()):\n            raise ItsdbError('no path given for detached table')\n        else:\n            path = self.path\n    path = _normalize_table_path(path)\n    (dirpath, name) = os.path.split(path)\n    if (fields is None):\n        fields = self.fields\n    if (records is None):\n        records = iter(self)\n    _write_table(dirpath, name, records, fields, append=append, gzip=gzip, encoding=self.encoding)\n    if (self.is_attached() and (path == _normalize_table_path(self.path))):\n        self.path = _table_filename(path)\n        self._sync_with_file()", "docstring": "Write the table to disk.\n\nThe basic usage has no arguments and writes the table's data\nto the attached file. The parameters accommodate a variety of\nuse cases, such as using *fields* to refresh a table to a\nnew schema or *records* and *append* to incrementally build a\ntable.\n\nArgs:\nrecords: an iterable of :class:`Record` objects to write;\nif `None` the table's existing data is used\npath: the destination file path; if `None` use the\npath of the file attached to the table\nfields (:class:`Relation`): table schema to use for\nwriting, otherwise use the current one\nappend: if `True`, append rather than overwrite\ngzip: compress with gzip if non-empty\nExamples:\n>>> table.write()\n>>> table.write(results, path='new/path/result')", "source": "codesearchnet"}
{"code": "def OpenClient(client_id=None, token=None):\n    if (not token):\n        try:\n            token = ApprovalFind(client_id, token=token)\n        except access_control.UnauthorizedAccess as e:\n            logging.debug('No authorization found for access to client: %s', e)\n    try:\n        client = aff4.FACTORY.Open(rdfvalue.RDFURN(client_id), mode='r', token=token)\n        return (client, token)\n    except access_control.UnauthorizedAccess:\n        logging.warning('Unable to find a valid reason for client %s. You may need to request approval.', client_id)\n        return (None, None)", "docstring": "Opens the client, getting potential approval tokens.\n\nArgs:\nclient_id: The client id that should be opened.\ntoken: Token to use to open the client\n\nReturns:\ntuple containing (client, token) objects or (None, None) on if\nno appropriate aproval tokens were found.", "source": "codesearchnet"}
{"code": "def check_data_type(self):\n    metadata_type = self.column_metadata.get('type')\n    if ((self.type != metadata_type) and (metadata_type not in self.type)):\n        raise ValueError(\"Types of transformer don't match\")", "docstring": "Check the type of the transformer and column match.\n\nArgs:\ncolumn_metadata(dict): Metadata of the column.\n\nRaises a ValueError if the types don't match", "source": "codesearchnet"}
{"code": "def process_data(data, number_to_keep):\n    \n\n    result = dict()\n\n    if number_to_keep != 0:\n        data_temp = dict(Counter(data).most_common(number_to_keep))\n        data_temp['rest'] = sum(data.values()) - sum(data_temp.values())\n        data = data_temp\n\n    labels = data\n    values = np.array([data[key] for key in labels], dtype=float)\n    pvalues = values / sum(values)\n    for position, label in enumerate(labels):\n        result[label] = round(pvalues[position], 5)\n\n    return result", "docstring": "Prepare received data for representation.\n\nArgs:\ndata (dict): values to represent (ex. {'001' : 130})\nnumber_to_keep (int): number of elements to show individually.\n\nReturns:\ndict: processed data to show.", "source": "juraj-google-style"}
{"code": "def cond(pred, true_fn, false_fn):\n    return Cond()(pred, true_fn, false_fn)", "docstring": "Conditionally applies `true_fn` or `false_fn`.\n\nArgs:\npred: Boolean scalar type\ntrue_fn: Callable returning the output for the `pred == True` case.\nfalse_fn: Callable returning the output for the `pred == False` case.\n\nReturns:\nThe output of either `true_fn` or `false_fn` depending on pred.", "source": "github-repos"}
{"code": "def list_members(self, name, type=\"USER\", recurse=True, max_results=1000):\n        \n        results = self.client.service.getListMembership(\n            name, type, recurse, max_results, self.proxy_id,\n        )\n        return [item[\"member\"] for item in results]", "docstring": "Look up all the members of a list.\n\nArgs:\nname (str): The name of the list\ntype (str): The type of results to return. \"USER\" to get users,\n\"LIST\" to get lists.\nrecurse (bool): Presumably, whether to recurse into member lists\nwhen retrieving users.\nmax_results (int): Maximum number of results to return.\n\nReturns:\nlist of strings: names of the members of the list", "source": "juraj-google-style"}
{"code": "def sunrise(self, date=None, zenith=None):\n        \n        return (segment.sunrise(date, zenith) for segment in self)", "docstring": "Calculate sunrise times for locations.\n\nArgs:\ndate (datetime.date): Calculate rise or set for given date\nzenith (str): Calculate sunrise events, or end of twilight\nReturns:\nlist of list of datetime.datetime: The time for the sunrise for\neach point in each segment", "source": "juraj-google-style"}
{"code": "def prepare_all_data(data_dir, block_pct_tokens_thresh=0.1):\n    \n    gs_blocks_dir = os.path.join(data_dir, GOLD_STANDARD_BLOCKS_DIRNAME)\n    gs_blocks_filenames = get_filenames(\n        gs_blocks_dir, full_path=False, match_regex=re.escape(GOLD_STANDARD_BLOCKS_EXT))\n    gs_blocks_fileroots = (\n        re.search(r'(.+)' + re.escape(GOLD_STANDARD_BLOCKS_EXT), gs_blocks_filename).group(1)\n        for gs_blocks_filename in gs_blocks_filenames)\n\n    return [prepare_data(data_dir, fileroot, block_pct_tokens_thresh)\n            for fileroot in gs_blocks_fileroots]", "docstring": "Prepare data for all HTML + gold standard blocks examples in ``data_dir``.\n\nArgs:\ndata_dir (str)\nblock_pct_tokens_thresh (float): must be in [0.0, 1.0]\n\nReturns:\nList[Tuple[str, List[float, int, List[str]], List[float, int, List[str]]]]\n\nSee Also:\n:func:`prepare_data`", "source": "juraj-google-style"}
{"code": "def create_channel(cls, address=\"spanner.googleapis.com:443\", credentials=None):\n        \n        grpc_gcp_config = grpc_gcp.api_config_from_text_pb(\n            pkg_resources.resource_string(__name__, _SPANNER_GRPC_CONFIG)\n        )\n        options = [(grpc_gcp.API_CONFIG_CHANNEL_ARG, grpc_gcp_config)]\n        return google.api_core.grpc_helpers.create_channel(\n            address, credentials=credentials, scopes=cls._OAUTH_SCOPES\n        )", "docstring": "Create and return a gRPC channel object.\n\nArgs:\naddress (str): The host for the channel to use.\ncredentials (~.Credentials): The\nauthorization credentials to attach to requests. These\ncredentials identify this application to the service. If\nnone are specified, the client will attempt to ascertain\nthe credentials from the environment.\n\nReturns:\ngrpc.Channel: A gRPC channel object.", "source": "juraj-google-style"}
{"code": "def update(self, b):\n    hv = self.hashfunc(b)\n    (a, b) = self.permutations\n    phv = np.bitwise_and((((a * hv) + b) % _mersenne_prime), np.uint64(_max_hash))\n    self.hashvalues = np.minimum(phv, self.hashvalues)", "docstring": "Update this MinHash with a new value.\nThe value will be hashed using the hash function specified by\nthe `hashfunc` argument in the constructor.\n\nArgs:\nb: The value to be hashed using the hash function specified.\n\nExample:\nTo update with a new string value (using the default SHA1 hash\nfunction, which requires bytes as input):\n\n.. code-block:: python\n\nminhash = Minhash()\nminhash.update(\"new value\".encode('utf-8'))\n\nWe can also use a different hash function, for example, `pyfarmhash`:\n\n.. code-block:: python\n\nimport farmhash\ndef _hash_32(b):\nreturn farmhash.hash32(b)\nminhash = MinHash(hashfunc=_hash_32)\nminhash.update(\"new value\")", "source": "codesearchnet"}
{"code": "def with_start_after(self, after_namespace):\n    namespace_start = _ord_to_namespace((_namespace_to_ord(after_namespace) + 1))\n    return NamespaceRange(namespace_start, self.namespace_end, _app=self.app)", "docstring": "Returns a copy of this NamespaceName with a new namespace_start.\n\nArgs:\nafter_namespace: A namespace string.\n\nReturns:\nA NamespaceRange object whose namespace_start is the lexographically next\nnamespace after the given namespace string.\n\nRaises:\nValueError: if the NamespaceRange includes only a single namespace.", "source": "codesearchnet"}
{"code": "def condition_indices(df):\n    eigvals = eigenvalues(df)\n    cond_idx = np.sqrt((eigvals.max() / eigvals))\n    return pd.Series(cond_idx, df.columns, name='Condition index')", "docstring": "Returns a pandas Series with condition indices of the df columns.\n\nArgs:\ndf: pandas DataFrame with columns to run diagnostics on", "source": "codesearchnet"}
{"code": "def _GetEventLogProviderKey(self, log_source):\n    table_names = ['event_log_providers']\n    column_names = ['event_log_provider_key']\n    condition = 'log_source == \"{0:s}\"'.format(log_source)\n    values_list = list(self._database_file.GetValues(table_names, column_names, condition))\n    number_of_values = len(values_list)\n    if (number_of_values == 0):\n        return None\n    if (number_of_values == 1):\n        values = values_list[0]\n        return values['event_log_provider_key']\n    raise RuntimeError('More than one value found in database.')", "docstring": "Retrieves the Event Log provider key.\n\nArgs:\nlog_source (str): Event Log source.\n\nReturns:\nstr: Event Log provider key or None if not available.\n\nRaises:\nRuntimeError: if more than one value is found in the database.", "source": "codesearchnet"}
{"code": "def _model_loss(model, inputs, targets, output_loss_metrics=None, sample_weights=None, training=False):\n    total_loss = 0\n    kwargs = {}\n    if model._expects_training_arg:\n        kwargs['training'] = training\n    if len(inputs) == 1 and (not isinstance(inputs, dict)):\n        inputs = inputs[0]\n    if any((isinstance(input_t, (np.ndarray, float, int)) for input_t in nest.flatten(inputs))):\n        inputs = nest.map_structure(tensor_conversion.convert_to_tensor_v2_with_dispatch, inputs)\n    outs = model(inputs, **kwargs)\n    outs = nest.flatten(outs)\n    if targets:\n        targets = training_utils_v1.cast_if_floating_dtype_and_mismatch(targets, outs)\n    if sample_weights:\n        new_sample_weights = []\n        for val in sample_weights:\n            if val is not None:\n                new_sample_weights.append(training_utils_v1.cast_if_floating_dtype(tensor_conversion.convert_to_tensor_v2_with_dispatch(val)))\n            else:\n                new_sample_weights.append(None)\n        sample_weights = new_sample_weights\n    masks = [getattr(t, '_keras_mask', None) for t in outs]\n    targets = nest.flatten(targets)\n    output_losses = []\n    with backend.name_scope('loss'):\n        loss_fns = [loss_fn for loss_fn in model.loss_functions if loss_fn is not None]\n        custom_losses = model.losses\n        if not loss_fns and (not custom_losses):\n            if training:\n                raise ValueError('The model cannot be trained because it has no loss to optimize.')\n            else:\n                raise ValueError('The model cannot be evaluated because it has no loss to compute.')\n        for i, loss_fn in enumerate(loss_fns):\n            weights = sample_weights[i] if sample_weights else None\n            mask = masks[i]\n            with backend.name_scope(model.output_names[i] + '_loss'):\n                if mask is not None:\n                    mask = math_ops.cast(mask, outs[i].dtype)\n                    if weights is None:\n                        weights = mask\n                    else:\n                        weights = math_ops.cast(weights, outs[i].dtype)\n                        mask, _, weights = losses_utils.squeeze_or_expand_dimensions(mask, sample_weight=weights)\n                        weights *= mask\n                if hasattr(loss_fn, 'reduction'):\n                    per_sample_losses = loss_fn.call(targets[i], outs[i])\n                    weighted_losses = losses_utils.compute_weighted_loss(per_sample_losses, sample_weight=weights, reduction=losses_utils.ReductionV2.NONE)\n                    loss_reduction = loss_fn.reduction\n                    if loss_reduction == losses_utils.ReductionV2.AUTO:\n                        loss_reduction = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE\n                    output_loss = losses_utils.reduce_weighted_loss(weighted_losses, reduction=loss_reduction)\n                else:\n                    output_loss = loss_fn(targets[i], outs[i], sample_weight=weights)\n                    loss_reduction = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE\n            if len(model.outputs) > 1:\n                output_losses.append(output_loss_metrics[i](output_loss))\n            if loss_reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE:\n                output_loss = losses_utils.scale_loss_for_distribution(output_loss)\n            total_loss += model._loss_weights_list[i] * output_loss\n        if custom_losses:\n            total_loss += losses_utils.scale_loss_for_distribution(math_ops.add_n(custom_losses))\n    return (outs, total_loss, output_losses, masks)", "docstring": "Calculates the loss for a given model.\n\nArgs:\nmodel: The model on which metrics are being calculated.\ninputs: Either a dictionary of inputs to the model or a list of input\narrays.\ntargets: List of target arrays.\noutput_loss_metrics: List of metrics that are used to aggregated output\nloss values.\nsample_weights: Optional list of sample weight arrays.\ntraining: Whether the model should be run in inference or training mode.\n\nReturns:\nReturns the model output, total loss, loss value calculated using the\nspecified loss function and masks for each output. The total loss includes\nregularization losses and applies masking and sample weighting\nto the loss value.", "source": "github-repos"}
{"code": "def set_current_position(self, position):\n    raise NotImplementedError", "docstring": "Updates the last-consumed position to the given position.\n\nA source may invoke this method for records that do not start at split\npoints. This may modify the internal state of the ``RangeTracker``. If the\nrecord starts at a split point, method ``try_claim()`` **must** be invoked\ninstead of this method.\n\nArgs:\nposition: starting position of a record being read by a source.", "source": "github-repos"}
{"code": "def get_contacts(self, issue):\n        \n        \n        if not issue.resource:\n            return []\n\n        account_contacts = issue.resource.account.contacts\n        try:\n            resource_owners = issue.resource.get_owner_emails()\n            \n            if type(resource_owners) is list:\n                for resource_owner in resource_owners:\n                    account_contacts.append({'type': 'email', 'value': resource_owner})\n        except AttributeError:\n            pass\n        return account_contacts", "docstring": "Returns a list of contacts for an issue\n\nArgs:\nissue (:obj:`RequiredTagsIssue`): Issue record\n\nReturns:\n`list` of `dict`", "source": "juraj-google-style"}
{"code": "def _register_json_primitive(object_type, encoder, decoder):\n  \n  global _TYPE_TO_ENCODER\n  global _TYPE_NAME_TO_DECODER\n  if object_type not in _TYPE_TO_ENCODER:\n    _TYPE_TO_ENCODER[object_type] = encoder\n    _TYPE_NAME_TO_DECODER[object_type.__name__] = decoder", "docstring": "Extend what Pipeline can serialize.\n\nArgs:\nobject_type: type of the object.\nencoder: a function that takes in an object and returns\na dict of json primitives.\ndecoder: inverse function of encoder.", "source": "juraj-google-style"}
{"code": "def get_iterator_id_fn(unused_dummy):\n    return script_ops.numpy_function(generator_state.get_next_id, args, dtypes.int64)", "docstring": "Creates a unique `iterator_id` for each pass over the dataset.\n\nThe returned `iterator_id` disambiguates between multiple concurrently\nexisting iterators.\n\nArgs:\nunused_dummy: Ignored value.\n\nReturns:\nA `tf.int64` tensor whose value uniquely identifies an iterator in\n`generator_state`.", "source": "github-repos"}
{"code": "def line(self, x0, y0, x1, y1, char):\n        \n        \n        if x0 > x1:\n            x1, x0 = x0, x1\n            y1, y0 = y0, y1\n\n        dx = x1 - x0\n        dy = y1 - y0\n\n        if dx == 0 and dy == 0:\n            self.point(x0, y0, char)\n        elif abs(dx) >= abs(dy):\n            for x in range(x0, x1 + 1):\n                if dx == 0:\n                    y = y0\n                else:\n                    y = y0 + int(round((x - x0) * dy / float((dx))))\n                self.point(x, y, char)\n        elif y0 < y1:\n            for y in range(y0, y1 + 1):\n                if dy == 0:\n                    x = x0\n                else:\n                    x = x0 + int(round((y - y0) * dx / float((dy))))\n                self.point(x, y, char)\n        else:\n            for y in range(y1, y0 + 1):\n                if dy == 0:\n                    x = x0\n                else:\n                    x = x1 + int(round((y - y1) * dx / float((dy))))\n                self.point(x, y, char)", "docstring": "Create a line on ASCII canvas.\n\nArgs:\nx0 (int): x coordinate where the line should start.\ny0 (int): y coordinate where the line should start.\nx1 (int): x coordinate where the line should end.\ny1 (int): y coordinate where the line should end.\nchar (str): character to draw the line with.", "source": "juraj-google-style"}
{"code": "def fill_treeview(self, tree, input_dict):\n        \n\n        tree.model().removeRows(0, tree.model().rowCount())\n\n        def add_element(item, key, value):\n            child_name = QtWidgets.QStandardItem(key)\n\n            if isinstance(value, dict):\n                for key_child, value_child in value.items():\n                    add_element(child_name, key_child, value_child)\n                item.appendRow(child_name)\n            else:\n                child_value = QtWidgets.QStandardItem(str(value))\n\n                item.appendRow([child_name, child_value])\n\n        for index, (key, value) in enumerate(input_dict.items()):\n\n            if isinstance(value, dict):\n                item = QtWidgets.QStandardItem(key)\n                for sub_key, sub_value in value.items():\n                    add_element(item, sub_key, sub_value)\n                tree.model().appendRow(item)\n            elif isinstance(value, str):\n                item = QtGui.QStandardItem(key)\n                item_value = QtGui.QStandardItem(value)\n                item_value.setEditable(True)\n                item_value.setSelectable(True)\n                tree.model().appendRow([item, item_value])", "docstring": "fills a treeview with nested parameters\nArgs:\ntree: QtWidgets.QTreeView\nparameters: dictionary or Parameter object\n\nReturns:", "source": "juraj-google-style"}
{"code": "def read_locations(filename):\n    \n    data = ConfigParser()\n    if filename == '-':\n        data.read_file(sys.stdin)\n    else:\n        data.read(filename)\n    if not data.sections():\n        logging.debug('Config file is empty')\n\n    locations = {}\n    for name in data.sections():\n        if data.has_option(name, 'locator'):\n            latitude, longitude = utils.from_grid_locator(data.get(name,\n                                                                   'locator'))\n        else:\n            latitude = data.getfloat(name, 'latitude')\n            longitude = data.getfloat(name, 'longitude')\n        locations[name] = (latitude, longitude)\n    return locations", "docstring": "Pull locations from a user's config file.\n\nArgs:\nfilename (str): Config file to parse\n\nReturns:\ndict: List of locations from config file", "source": "juraj-google-style"}
{"code": "def process_file(options, source_text=None, generate_callgraphs=False, preserve_pytype_vm=False):\n    with config.verbosity_from(options):\n        loader = load_pytd.create_loader(options)\n        src = source_text or io.read_source_file(options.input)\n        with io.wrap_pytype_exceptions(PytypeError, filename=options.input):\n            ret = analyze.infer_types(src=src, options=options, loader=loader)\n            pytd_module = ret.ast\n    ast_root_node = astlib.parse(src, options.input, feature_version=options.python_version[1])\n    module_name = 'module'\n    src_code = source.Code(src, ret.context.vm.opcode_traces, VmTrace, filename=options.input)\n    ix = Indexer(ast=astlib, src=src_code, loader=loader, module_name=module_name, pytd_module=pytd_module)\n    ix.index(ast_root_node)\n    ix.finalize()\n    ix.vm = ret.context.vm\n    if generate_callgraphs:\n        ix.function_map = callgraph.collect_function_map(ix)\n    if not preserve_pytype_vm:\n        ix.vm = None\n    return ix", "docstring": "Process a single file and return cross references.\n\nArgs:\noptions: A dictionary of pytype options.\nsource_text: Optional text of the file; will be read from the file pointed\nto by options.input if not supplied.\ngenerate_callgraphs: Collect call graph information\npreserve_pytype_vm: Preserve the pytype vm in the indexer\n\nReturns:\nThe Indexer object used for indexing.\n\nRaises:\nPytypeError if pytype fails.", "source": "github-repos"}
{"code": "def add(self, data, name=None):\n        \n        if name is None:\n            n = len(self.data)\n            while \"Series %d\"%n in self.data:\n                n += 1\n            name = \"Series %d\"%n\n        self.data[name] = data\n        return name", "docstring": "Appends a new column of data to the data source.\n\nArgs:\ndata (seq) : new data to add\nname (str, optional) : column name to use.\nIf not supplied, generate a name of the form \"Series ####\"\n\nReturns:\nstr:  the column name used", "source": "juraj-google-style"}
{"code": "def format_usage(doc, width=None):\n    \n    \n    sections = doc.replace('\\r', '').split('\\n\\n')\n    width = width or get_terminal_size().columns or 80\n    return '\\n\\n'.join(_wrap_section(s.strip(), width) for s in sections)", "docstring": "Format the docstring for display to the user.\n\nArgs:\ndoc: The docstring to reformat for display.\n\nReturns:\nThe docstring formatted to parse and display to the user. This includes\ndedenting, rewrapping, and translating the docstring if necessary.", "source": "juraj-google-style"}
{"code": "def _ParseChatData(self, data):\n    data_store = {}\n    if ('body' in data):\n        body = data.get('body', '').replace('\\n', ' ')\n        if (body.startswith('\n            body_dict = self._ExtractJQuery(body)\n            (title, _, _) = body.partition('{')\n            body = '{0:s} <{1!s}>'.format(title[2:], self._DictToListOfStrings(body_dict))\n    else:\n        body = 'No text.'\n    data_store['text'] = body\n    room = data.get('rooms', None)\n    if (not room):\n        room = data.get('room', None)\n    if room:\n        data_store['room'] = room\n    data_store['id'] = data.get('id', None)\n    user = data.get('user', None)\n    if user:\n        try:\n            user_sid = int(user)\n            data_store['sid'] = user_sid\n        except (ValueError, TypeError):\n            data_store['user'] = user\n    return data_store", "docstring": "Parses chat comment data.\n\nArgs:\ndata (dict[str, object]): chat comment data as returned by SQLite.\n\nReturns:\ndict[str, object]: parsed chat comment data.", "source": "codesearchnet"}
{"code": "def serialize_file(struct, path, format=None, encoding='utf-8'):\n    try:\n        with open(path, 'wb') as f:\n            return serialize(struct, format, f, encoding)\n    except EnvironmentError as e:\n        raise AnyMarkupError(e, traceback.format_exc())", "docstring": "A convenience wrapper of serialize, which accepts path of file to serialize to.\n\nArgs:\nstruct: structure (dict or list) with unicode members to serialize; note that list\ncan only be serialized to json\npath: path of the file to serialize to\nformat: override markup format to serialize structure as (taken from filename\nby default)\nencoding: encoding to use when serializing, defaults to utf-8\nReturns:\nnumber of bytes written\nRaises:\nAnyMarkupError if a problem occurs while serializing", "source": "codesearchnet"}
{"code": "def predict_task_proba(self, X, t=0, **kwargs):\n    return self.predict_proba(X, **kwargs)[t]", "docstring": "Predicts probabilistic labels for an input X on task t\n\nArgs:\nX: The input for the predict_proba method\nt: The task index to predict for which to predict probabilities\nReturns:\nAn [n, K_t] tensor of predictions for task t\nNOTE: By default, this method calls predict_proba and extracts element\nt. If it is possible to predict individual tasks in isolation, however,\nthis method may be overriden for efficiency's sake.", "source": "codesearchnet"}
{"code": "def write_alias_config_hash(alias_config_hash='', empty_hash=False):\n        \n        with open(GLOBAL_ALIAS_HASH_PATH, 'w') as alias_config_hash_file:\n            alias_config_hash_file.write('' if empty_hash else alias_config_hash)", "docstring": "Write self.alias_config_hash to the alias hash file.\n\nArgs:\nempty_hash: True if we want to write an empty string into the file. Empty string in the alias hash file\nmeans that we have to perform a full load of the command table in the next run.", "source": "juraj-google-style"}
{"code": "def et2roc(et_fo, roc_fo):\n    stats_dicts = [{'q': q, 'M': 0, 'w': 0, 'm': 0, 'P': 0, 'U': 0, 'u': 0, 'T': 0, 't': 0, 'x': 0} for q in range((rnftools.lavender.MAXIMAL_MAPPING_QUALITY + 1))]\n    for line in et_fo:\n        line = line.strip()\n        if ((line != '') and (line[0] != '\n            (read_tuple_name, tab, info_categories) = line.partition('\\t')\n            intervals = info_categories.split(',')\n            for interval in intervals:\n                category = interval[0]\n                (left, colon, right) = interval[2:].partition('-')\n                for q in range(int(left), (int(right) + 1)):\n                    stats_dicts[q][category] += 1\n    roc_fo.write(('\n    roc_fo.write(('\n    roc_fo.write(('\n    roc_fo.write(('\n    roc_fo.write(('\n    roc_fo.write(('\n    roc_fo.write(('\n    roc_fo.write(('\n    roc_fo.write(('\n    roc_fo.write(('\n    roc_fo.write(('\n    roc_fo.write(('\n    roc_fo.write(('\n    roc_fo.write(('\n    roc_fo.write(('\n    l_numbers = []\n    for line in stats_dicts:\n        numbers = [line['M'], line['w'], line['m'], line['P'], line['U'], line['u'], line['T'], line['t'], line['x']]\n        if (numbers != l_numbers):\n            roc_fo.write(('\\t'.join((([str(line['q'])] + list(map(str, numbers))) + [str(sum(numbers))])) + os.linesep))\n        l_numbers = numbers", "docstring": "ET to ROC conversion.\n\nArgs:\net_fo (file): File object for the ET file.\nroc_fo (file): File object for the ROC file.\n\nraises: ValueError", "source": "codesearchnet"}
{"code": "def emit_obj_delete(self, category: str, name: str, timestamp: int, pid: int, tid: int, object_id: int) -> None:\n    event = self._create_event('D', category, name, pid, tid, timestamp)\n    event['id'] = object_id\n    self._events.append(event)", "docstring": "Adds an object deletion event to the trace.\n\nArgs:\ncategory: The event category as a string.\nname:  The event name as a string.\ntimestamp:  The timestamp of this event as a long integer.\npid:  Identifier of the process generating this event as an integer.\ntid:  Identifier of the thread generating this event as an integer.\nobject_id: Identifier of the object as an integer.", "source": "github-repos"}
{"code": "def _ListFileEntry(self, file_system, file_entry, parent_full_path, output_writer):\n    full_path = file_system.JoinPath([parent_full_path, file_entry.name])\n    if ((not self._list_only_files) or file_entry.IsFile()):\n        output_writer.WriteFileEntry(full_path)\n    for sub_file_entry in file_entry.sub_file_entries:\n        self._ListFileEntry(file_system, sub_file_entry, full_path, output_writer)", "docstring": "Lists a file entry.\n\nArgs:\nfile_system (dfvfs.FileSystem): file system that contains the file entry.\nfile_entry (dfvfs.FileEntry): file entry to list.\nparent_full_path (str): full path of the parent file entry.\noutput_writer (StdoutWriter): output writer.", "source": "codesearchnet"}
{"code": "def set_file_logger(filename: str, name: str = 'parsl', level: int = logging.DEBUG, format_string: Optional[str] = None):\n    \n    if format_string is None:\n        format_string = \"%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s]  %(message)s\"\n\n    logger = logging.getLogger(name)\n    logger.setLevel(logging.DEBUG)\n    handler = logging.FileHandler(filename)\n    handler.setLevel(level)\n    formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')\n    handler.setFormatter(formatter)\n    logger.addHandler(handler)\n\n    \n    \n    futures_logger = logging.getLogger(\"concurrent.futures\")\n    futures_logger.addHandler(handler)", "docstring": "Add a stream log handler.\n\nArgs:\n- filename (string): Name of the file to write logs to\n- name (string): Logger name\n- level (logging.LEVEL): Set the logging level.\n- format_string (string): Set the format string\n\nReturns:\n-  None", "source": "juraj-google-style"}
{"code": "def interior_angle(p1, p2, o=(0, 0)):\n    \n    v1 = vector(o, p1)\n    v2 = vector(o, p2)\n    len1 = distance(o, p1)\n    len2 = distance(o, p2)\n    try:\n        return acos(dot_product(v1, v2) / (len1 * len2))\n    except ZeroDivisionError:\n        raise ValueError(\"p1 or p2 is overlapped with origin\")", "docstring": "Returns interior angle of two vector(0 <= θ <= pi)\nArgs:\np1, p2: point (x, y)\no: origin\nRaises:\nValueError: p1 or p2 is overlapped with origin", "source": "juraj-google-style"}
{"code": "def register_trainable(name, trainable):\n    from ray.tune.trainable import Trainable\n    from ray.tune.function_runner import wrap_function\n    if isinstance(trainable, type):\n        logger.debug('Detected class for trainable.')\n    elif isinstance(trainable, FunctionType):\n        logger.debug('Detected function for trainable.')\n        trainable = wrap_function(trainable)\n    elif callable(trainable):\n        logger.warning('Detected unknown callable for trainable. Converting to class.')\n        trainable = wrap_function(trainable)\n    if (not issubclass(trainable, Trainable)):\n        raise TypeError('Second argument must be convertable to Trainable', trainable)\n    _global_registry.register(TRAINABLE_CLASS, name, trainable)", "docstring": "Register a trainable function or class.\n\nArgs:\nname (str): Name to register.\ntrainable (obj): Function or tune.Trainable class. Functions must\ntake (config, status_reporter) as arguments and will be\nautomatically converted into a class during registration.", "source": "codesearchnet"}
{"code": "def floatx():\n    return _FLOATX", "docstring": "Returns the default float type, as a string.\n\nE.g. `'float16'`, `'float32'`, `'float64'`.\n\nReturns:\nString, the current default float type.\n\nExample:\n>>> tf.keras.backend.floatx()\n'float32'", "source": "github-repos"}
{"code": "def require_validated(self, req, partial=False, bulk=False):\n    representations = ([self.require_representation(req)] if (not bulk) else self.require_representation(req))\n    if (bulk and (not isinstance(representations, list))):\n        raise ValidationError('Request payload should represent a list of resources.').as_bad_request()\n    object_dicts = []\n    try:\n        for representation in representations:\n            object_dict = self.serializer.from_representation(representation)\n            self.serializer.validate(object_dict, partial)\n            object_dicts.append(object_dict)\n    except DeserializationError as err:\n        raise err.as_bad_request()\n    except ValidationError as err:\n        raise err.as_bad_request()\n    return (object_dicts if bulk else object_dicts[0])", "docstring": "Require fully validated internal object dictionary.\n\nInternal object dictionary creation is based on content-decoded\nrepresentation retrieved from request body. Internal object validation\nis performed using resource serializer.\n\nArgs:\nreq (falcon.Request): request object\npartial (bool): set to True if partially complete representation\nis accepted (e.g. for patching instead of full update). Missing\nfields in representation will be skiped.\nbulk (bool): set to True if request payload represents multiple\nresources instead of single one.\n\nReturns:\ndict: dictionary of fields and values representing internal object.\nEach value is a result of ``field.from_representation`` call.", "source": "codesearchnet"}
{"code": "def match_row_splits_dtypes(*tensors, **kwargs):\n    return_dtype = kwargs.pop('return_dtype', False)\n    if kwargs:\n        raise ValueError(f'Unexpected keyword args {kwargs}.')\n    has_int32 = False\n    has_int64 = False\n    for tensor in tensors:\n        if isinstance(tensor, RaggedTensor):\n            if tensor.row_splits.dtype == dtypes.int32:\n                has_int32 = True\n            else:\n                has_int64 = True\n    if has_int32 and has_int64:\n        if not ragged_config.auto_cast_partition_dtype():\n            raise ValueError('Input RaggedTensors have mismatched row_splits dtypes; use RaggedTensor.with_row_splits_dtype() to convert them to compatible dtypes.')\n        dtype = dtypes.int64\n        tensors = tuple((t.with_row_splits_dtype(dtypes.int64) if isinstance(t, RaggedTensor) else t for t in tensors))\n    elif has_int32:\n        dtype = dtypes.int32\n    else:\n        dtype = dtypes.int64\n    if return_dtype:\n        return (dtype, tensors)\n    else:\n        return tensors", "docstring": "Return a copy of `tensors` with row_splits all having the same dtype.\n\nArgs:\n*tensors: A list of Tensors or RaggedTensors.\n**kwargs: If 'return_dtype=True', then return a tuple (dtype, tensors),\nwhere `dtype` is the data type used by row-splits, and `tensors` is the\nconverted list of `Tensors` and `RaggedTensors`.\n\nReturns:\nThe converted list of `Tensors` and `RaggedTensors`.", "source": "github-repos"}
{"code": "async def delCronJob(self, iden):\n        \n        cron = self.cell.agenda.appts.get(iden)\n        if cron is None:\n            raise s_exc.NoSuchIden()\n        self._trig_auth_check(cron.useriden)\n        await self.cell.agenda.delete(iden)", "docstring": "Delete a cron job\n\nArgs:\niden (bytes):  The iden of the cron job to be deleted", "source": "juraj-google-style"}
{"code": "def _StopExtractionProcesses(self, abort=False):\n    logger.debug('Stopping extraction processes.')\n    self._StopMonitoringProcesses()\n    if abort:\n        self._AbortTerminate()\n    logger.debug('Emptying task queue.')\n    self._task_queue.Empty()\n    for _ in self._processes_per_pid:\n        try:\n            self._task_queue.PushItem(plaso_queue.QueueAbort(), block=False)\n        except errors.QueueFull:\n            logger.warning('Task queue full, unable to push abort message.')\n    self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)\n    self._task_queue.Close(abort=abort)\n    if (not abort):\n        self._AbortTerminate()\n        self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)\n        self._task_queue.Close(abort=True)\n    self._AbortKill()", "docstring": "Stops the extraction processes.\n\nArgs:\nabort (bool): True to indicated the stop is issued on abort.", "source": "codesearchnet"}
{"code": "def __init__(self, asset_id, amount):\n        \n        self.AssetId = asset_id\n        self.Amount = amount", "docstring": "Create an instance.\n\nArgs:\nasset_id (UInt256):\namount (Fixed8):", "source": "juraj-google-style"}
{"code": "def _print_test_names_for_suite(suite_class):\n    config = config_parser.TestRunConfig()\n    runner = test_runner.TestRunner(log_dir=config.log_path, testbed_name=config.testbed_name)\n    cls = suite_class(runner, config)\n    try:\n        cls.setup_suite(config)\n    finally:\n        cls.teardown_suite()\n    last = ''\n    for name in runner.get_full_test_names():\n        tag = name.split('.')[0]\n        if tag != last:\n            last = tag\n            print('==========> %s <==========' % tag)\n        print(name)", "docstring": "Prints the names of all the tests in a suite classes.\n\nArgs:\nsuite_class: a test suite_class to be run.", "source": "github-repos"}
{"code": "def sync_main(async_main, config_path=None, default_config=None, should_validate_task=True, loop_function=asyncio.get_event_loop):\n    context = _init_context(config_path, default_config)\n    _init_logging(context)\n    if should_validate_task:\n        validate_task_schema(context)\n    loop = loop_function()\n    loop.run_until_complete(_handle_asyncio_loop(async_main, context))", "docstring": "Entry point for scripts using scriptworker.\n\nThis function sets up the basic needs for a script to run. More specifically:\n* it creates the scriptworker context and initializes it with the provided config\n* the path to the config file is either taken from `config_path` or from `sys.argv[1]`.\n* it verifies `sys.argv` doesn't have more arguments than the config path.\n* it creates the asyncio event loop so that `async_main` can run\n\nArgs:\nasync_main (function): The function to call once everything is set up\nconfig_path (str, optional): The path to the file to load the config from.\nLoads from ``sys.argv[1]`` if ``None``. Defaults to None.\ndefault_config (dict, optional): the default config to use for ``_init_context``.\ndefaults to None.\nshould_validate_task (bool, optional): whether we should validate the task\nschema. Defaults to True.\nloop_function (function, optional): the function to call to get the\nevent loop; here for testing purposes. Defaults to\n``asyncio.get_event_loop``.", "source": "codesearchnet"}
{"code": "def _prepare_headers(self, additional_headers=None, **kwargs):\n    user_agent = 'pyseaweed/{version}'.format(version=__version__)\n    headers = {'User-Agent': user_agent}\n    if (additional_headers is not None):\n        headers.update(additional_headers)\n    return headers", "docstring": "Prepare headers for http communication.\n\nReturn dict of header to be used in requests.\n\nArgs:\n.. versionadded:: 0.3.2\n**additional_headers**: (optional) Additional headers\nto be used with request\n\nReturns:\nHeaders dict. Key and values are string", "source": "codesearchnet"}
{"code": "def fillup_layer(layer_length, arrow_char):\n        \n        breakwire_layer = []\n        for _ in range(layer_length):\n            breakwire_layer.append(BreakWire(arrow_char))\n        return breakwire_layer", "docstring": "Creates a layer with BreakWire elements.\nArgs:\nlayer_length (int): The length of the layer to create\narrow_char (char): The char used to create the BreakWire element.\n\nReturns:\nlist: The new layer.", "source": "juraj-google-style"}
{"code": "def sort_index(self, **kwargs):\n    axis = kwargs.pop('axis', 0)\n    index = (self.columns if axis else self.index)\n    ascending = kwargs.pop('ascending', True)\n    if (ascending is None):\n        ascending = False\n    kwargs['ascending'] = ascending\n\n    def sort_index_builder(df, **kwargs):\n        if axis:\n            df.columns = index\n        else:\n            df.index = index\n        return df.sort_index(axis=axis, **kwargs)\n    func = self._prepare_method(sort_index_builder, **kwargs)\n    new_data = self._map_across_full_axis(axis, func)\n    if axis:\n        new_columns = pandas.Series(self.columns).sort_values(**kwargs)\n        new_index = self.index\n    else:\n        new_index = pandas.Series(self.index).sort_values(**kwargs)\n        new_columns = self.columns\n    return self.__constructor__(new_data, new_index, new_columns, self.dtypes.copy())", "docstring": "Sorts the data with respect to either the columns or the indices.\n\nReturns:\nDataManager containing the data sorted by columns or indices.", "source": "codesearchnet"}
{"code": "def copy_r(src, dst):\n    \n\n    abssrc = os.path.abspath(src)\n    absdst = os.path.abspath(dst)\n    try:\n        os.makedirs(absdst)\n    except OSError:\n        \n        pass\n    for f in os.listdir(abssrc):\n        fpath = os.path.join(abssrc, f)\n        if os.path.isfile(fpath):\n            shutil.copy(fpath, absdst)\n        elif not absdst.startswith(fpath):\n            copy_r(fpath, os.path.join(absdst, f))\n        else:\n            warnings.warn(\"Cannot copy %s to itself\" % fpath)", "docstring": "Implements a recursive copy function similar to Unix's \"cp -r\" command.\nSurprisingly, python does not have a real equivalent. shutil.copytree\nonly works if the destination directory is not present.\n\nArgs:\nsrc (str): Source folder to copy.\ndst (str): Destination folder.", "source": "juraj-google-style"}
{"code": "def apply(self, window_length, samples=True, func1d=None):\n    window_length /= (1 if samples else self.step)\n    if (func1d is None):\n        func1d = np.mean\n    params = self.__dict__.copy()\n    out = self._rolling_window(int(window_length), func1d)\n    return Curve(out, params=params)", "docstring": "Runs any kind of function over a window.\n\nArgs:\nwindow_length (int): the window length. Required.\nsamples (bool): window length is in samples. Use False for a window\nlength given in metres.\nfunc1d (function): a function that takes a 1D array and returns a\nscalar. Default: ``np.mean()``.\n\nReturns:\nCurve.", "source": "codesearchnet"}
{"code": "def intrusion_sets(self, name, owner=None, **kwargs):\n        \n        return IntrusionSet(self.tcex, name, owner=owner, **kwargs)", "docstring": "Create the Intrustion Set TI object.\n\nArgs:\nowner:\nname:\n**kwargs:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def stop_tuning_job(self, name):\n    try:\n        LOGGER.info('Stopping tuning job: {}'.format(name))\n        self.sagemaker_client.stop_hyper_parameter_tuning_job(HyperParameterTuningJobName=name)\n    except ClientError as e:\n        error_code = e.response['Error']['Code']\n        if (error_code == 'ValidationException'):\n            LOGGER.info('Tuning job: {} is already stopped or not running.'.format(name))\n        else:\n            LOGGER.error('Error occurred while attempting to stop tuning job: {}. Please try again.'.format(name))\n            raise", "docstring": "Stop the Amazon SageMaker hyperparameter tuning job with the specified name.\n\nArgs:\nname (str): Name of the Amazon SageMaker hyperparameter tuning job.\n\nRaises:\nClientError: If an error occurs while trying to stop the hyperparameter tuning job.", "source": "codesearchnet"}
{"code": "def get_compound_bodies(node):\n    if isinstance(node, (ast.Module, ast.FunctionDef, ast.ClassDef, ast.With)):\n        return [node.body]\n    elif isinstance(node, (ast.If, ast.While, ast.For)):\n        return [node.body, node.orelse]\n    elif (PY2 and isinstance(node, ast.TryFinally)):\n        return [node.body, node.finalbody]\n    elif (PY2 and isinstance(node, ast.TryExcept)):\n        return ([node.body, node.orelse] + [h.body for h in node.handlers])\n    elif (PY3 and isinstance(node, ast.Try)):\n        return ([node.body, node.orelse, node.finalbody] + [h.body for h in node.handlers])\n    end\n    return []", "docstring": "Returns a list of bodies of a compound statement node.\n\nArgs:\nnode: AST node.\n\nReturns:\nA list of bodies of the node. If the given node does not represent\na compound statement, an empty list is returned.", "source": "codesearchnet"}
{"code": "def replace_batch_norm(model):\n    for name, module in model.named_children():\n        if isinstance(module, nn.BatchNorm2d):\n            new_module = ConditionalDetrFrozenBatchNorm2d(module.num_features)\n            if not module.weight.device == torch.device('meta'):\n                new_module.weight.data.copy_(module.weight)\n                new_module.bias.data.copy_(module.bias)\n                new_module.running_mean.data.copy_(module.running_mean)\n                new_module.running_var.data.copy_(module.running_var)\n            model._modules[name] = new_module\n        if len(list(module.children())) > 0:\n            replace_batch_norm(module)", "docstring": "Recursively replace all `torch.nn.BatchNorm2d` with `ConditionalDetrFrozenBatchNorm2d`.\n\nArgs:\nmodel (torch.nn.Module):\ninput model", "source": "github-repos"}
{"code": "def delete(workflow_id: str = None, workflow_version: str = None):\n    \n    if workflow_id is None and workflow_version is None:\n        keys = DB.get_keys(\"workflow_definitions:*\")\n        DB.delete(*keys)\n    elif workflow_id is not None and workflow_version is None:\n        keys = DB.get_keys(\"workflow_definitions:{}:*\".format(workflow_id))\n        DB.delete(*keys)\n    elif workflow_id is None and workflow_version is not None:\n        keys = DB.get_keys(\"workflow_definitions:*:{}\"\n                           .format(workflow_version))\n        DB.delete(*keys)\n    else:\n        name = \"workflow_definitions:{}:{}\".format(workflow_id,\n                                                   workflow_version)\n        DB.delete(name)", "docstring": "Delete workflow definitions.\n\nArgs:\nworkflow_id (str, optional): Optional workflow identifier\nworkflow_version (str, optional): Optional workflow identifier version\n\nIf workflow_id and workflow_version are None, delete all workflow\ndefinitions.", "source": "juraj-google-style"}
{"code": "def broadcast_dynamic_shape(shape_x: dynamic_ragged_shape.DenseOrRaggedShape, shape_y: dynamic_ragged_shape.DenseOrRaggedShape) -> dynamic_ragged_shape.DynamicRaggedShape:\n    if not isinstance(shape_x, dynamic_ragged_shape.DynamicRaggedShape):\n        shape_x = dynamic_ragged_shape.DynamicRaggedShape([], shape_x)\n    if not isinstance(shape_y, dynamic_ragged_shape.DynamicRaggedShape):\n        shape_y = dynamic_ragged_shape.DynamicRaggedShape([], shape_y)\n    return dynamic_ragged_shape.broadcast_dynamic_shape(shape_x, shape_y)", "docstring": "Returns the shape formed by broadcasting two shapes to be compatible.\n\n1. If shape_x and shape_y both have row_partitions, then fail if their dtypes\ndon't match.\n2. If neither has row_partitions and they have different dtypes,\ngo with int64.\n3. If one has row_partitions, go with that dtype.\n\nArgs:\nshape_x: A `DynamicRaggedShape`\nshape_y: A `DynamicRaggedShape`\n\nReturns:\nA `DynamicRaggedShape`.\nRaises:\nValueError: If `shape_x` and `shape_y` are not broadcast-compatible.", "source": "github-repos"}
{"code": "def append_with_data(url, data):\n        \n\n        if data is None:\n            return url\n\n        url_parts = list(urlparse(url))\n\n        query = OrderedDict(parse_qsl(url_parts[4], keep_blank_values=True))\n        query.update(data)\n\n        url_parts[4] = URLHelper.query_dict_to_string(query)\n\n        return urlunparse(url_parts)", "docstring": "Append the given URL with the given data OrderedDict.\n\nArgs:\nurl (str): The URL to append.\ndata (obj): The key value OrderedDict to append to the URL.\n\nReturns:\nstr: The new URL.", "source": "juraj-google-style"}
{"code": "def info(self):\n    result = list()\n    result.append('Agents:\\n')\n    for agent in self._all_agents:\n        result.append('\\tName: ')\n        result.append(agent.name)\n        result.append('\\n\\tType: ')\n        result.append(type(agent).__name__)\n        result.append('\\n\\t')\n        result.append('Sensors:\\n')\n        for sensor in self._sensor_map[agent.name].keys():\n            result.append('\\t\\t')\n            result.append(Sensors.name(sensor))\n            result.append('\\n')\n    return ''.join(result)", "docstring": "Returns a string with specific information about the environment.\nThis information includes which agents are in the environment and which sensors they have.\n\nReturns:\nstr: The information in a string format.", "source": "codesearchnet"}
{"code": "def save_results(self, output_dir='.', prefix='', prefix_sep='_',\n                     image_list=None):\n        \n\n        if prefix == '':\n            prefix_sep = ''\n\n        if not exists(output_dir):\n            makedirs(output_dir)\n\n        logger.debug(\"Saving results...\")\n        if image_list is None:\n            image_list = self.images.keys()\n        for suffix, img in self.images.items():\n            if suffix in image_list:\n                filename = prefix + prefix_sep + suffix + '.nii.gz'\n                outpath = join(output_dir, filename)\n                imageutils.save_img(img, outpath, self.dataset.masker)", "docstring": "Write out any images generated by the meta-analysis.\nArgs:\noutput_dir (str): folder to write images to\nprefix (str): all image files will be prepended with this string\nprefix_sep (str): glue between the prefix and rest of filename\nimage_list (list): optional list of images to save--e.g.,\n['pFgA_z', 'pAgF']. If image_list is None (default), will save\nall images.", "source": "juraj-google-style"}
{"code": "def get_meta_graph_def_from_tags(self, tags):\n    found_match = False\n    meta_graph_def_to_load = None\n    available_tags = []\n    for meta_graph_def in self._saved_model.meta_graphs:\n        available_tags.append(set(meta_graph_def.meta_info_def.tags))\n        if set(meta_graph_def.meta_info_def.tags) == set(tags):\n            meta_graph_def_to_load = meta_graph_def\n            found_match = True\n            break\n    if not found_match:\n        raise RuntimeError(f\"MetaGraphDef associated with tags {str(tags).strip('[]')} could not be found in SavedModel, with available tags '{available_tags}'. To inspect available tag-sets in the SavedModel, please use the SavedModel CLI: `saved_model_cli`.\")\n    return meta_graph_def_to_load", "docstring": "Return MetaGraphDef with the exact specified tags.\n\nArgs:\ntags: A list or set of string tags that identify the MetaGraphDef.\n\nReturns:\nMetaGraphDef with the same tags.\n\nRaises:\nRuntimeError: if no metagraphs were found with the associated tags.", "source": "github-repos"}
{"code": "def stream(\n        self,\n        accountID,\n        **kwargs\n    ):\n        \n\n        request = Request(\n            'GET',\n            '/v3/accounts/{accountID}/transactions/stream'\n        )\n\n        request.set_path_param(\n            'accountID',\n            accountID\n        )\n\n        request.set_stream(True)\n\n        class Parser():\n            def __init__(self, ctx):\n                self.ctx = ctx\n\n            def __call__(self, line):\n                j = json.loads(line.decode('utf-8'))\n\n                type = j.get(\"type\")\n\n                if type is None:\n                    return (\"unknown\", j)\n                elif type == \"HEARTBEAT\":\n                    return (\n                        \"transaction.TransactionHeartbeat\",\n                        self.ctx.transaction.TransactionHeartbeat.from_dict(\n                            j,\n                            self.ctx\n                        )\n                    )\n\n                transaction = self.ctx.transaction.Transaction.from_dict(\n                    j, self.ctx\n                )\n\n                return (\n                    \"transaction.Transaction\",\n                    transaction\n                )\n\n                \n        request.set_line_parser(\n            Parser(self.ctx)\n        )\n\n        response = self.ctx.request(request)\n\n\n        return response", "docstring": "Get a stream of Transactions for an Account starting from when the\nrequest is made.\n\nArgs:\naccountID:\nAccount Identifier\n\nReturns:\nv20.response.Response containing the results from submitting the\nrequest", "source": "juraj-google-style"}
{"code": "def FromDictionary(cls, dictionary):\n    \n    if 'user_id' in dictionary:\n      raise errors.GitkitClientError('use localId instead')\n    if 'localId' not in dictionary:\n      raise errors.GitkitClientError('must specify localId')\n    if 'email' not in dictionary:\n      raise errors.GitkitClientError('must specify email')\n\n    return cls(decode=False, **dictionary)", "docstring": "Initializes from user specified dictionary.\n\nArgs:\ndictionary: dict of user specified attributes\nReturns:\nGitkitUser object", "source": "juraj-google-style"}
{"code": "def _convert_keras_to_saved_model(self, output_dir):\n    try:\n\n        def _is_keras_3():\n            \n            try:\n                import keras\n                return keras.__version__.startswith('3') and isinstance(self._keras_model, keras.layers.Layer)\n            except ImportError:\n                return False\n        if _is_keras_3():\n            import keras\n            export_archive = keras.export.ExportArchive()\n            export_archive.track(self._keras_model)\n            if isinstance(self._keras_model, (keras.src.models.Functional, keras.src.models.Sequential)):\n                input_signature = nest.map_structure(lambda x: tensor_spec.TensorSpec(x.shape, dtype=x.dtype, name=x.name), self._keras_model.inputs)\n                if isinstance(input_signature, list) and len(input_signature) > 1:\n                    input_signature = [input_signature]\n            else:\n                save_spec = _get_save_spec(self._keras_model)\n                if not save_spec:\n                    raise ValueError('The model provided has never been called. It must be called at least once before export.')\n                input_signature = [save_spec]\n            inference_fn = functools.partial(self._keras_model.__call__, training=False)\n            export_archive.add_endpoint('serve', inference_fn, input_signature)\n            export_archive.write_out(output_dir)\n        else:\n            _save.save(self._keras_model, output_dir, options=_save_options.SaveOptions(save_debug_info=True))\n    except Exception:\n        return (None, None, None)\n    self.saved_model_dir = output_dir\n    self._saved_model_tags = set([_tag_constants.SERVING])\n    self._saved_model_exported_names = [_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]\n    self._parse_saved_model_args(always_enable_saved_model_import=self.experimental_lower_to_saved_model)\n    if self.saved_model_dir:\n        graph_def, input_tensors, output_tensors = self._load_saved_model(self.saved_model_dir, self._saved_model_tags)\n        self._trackable_obj = _load(self.saved_model_dir, self._saved_model_tags)\n        return (graph_def, input_tensors, output_tensors)\n    return (None, None, None)", "docstring": "Save Keras model to the SavedModel format.\n\nArgs:\noutput_dir: The output directory to save the SavedModel.\n\nReturns:\ngraph_def: The frozen GraphDef.\ninput_tensors: List of input tensors.\noutput_tensors: List of output tensors.", "source": "github-repos"}
{"code": "def _to_tf_type(dtype):\n    return dtypes.as_dtype(dtype)", "docstring": "Converts a native python or numpy type to TF DType.\n\nArgs:\ndtype: Could be a python type, a numpy type or a TF DType.\n\nReturns:\nA tensorflow `DType`.", "source": "github-repos"}
{"code": "def transform(self, args):\n    if self.parse_error():\n        AliasManager.write_alias_config_hash(empty_hash=True)\n        return args\n    if self.detect_alias_config_change():\n        self.load_full_command_table()\n        self.collided_alias = AliasManager.build_collision_table(self.alias_table.sections())\n        build_tab_completion_table(self.alias_table)\n    else:\n        self.load_collided_alias()\n    transformed_commands = []\n    alias_iter = enumerate(args, 1)\n    for (alias_index, alias) in alias_iter:\n        is_collided_alias = ((alias in self.collided_alias) and (alias_index in self.collided_alias[alias]))\n        is_named_arg = ((alias_index > 1) and args[(alias_index - 2)].startswith('-'))\n        is_named_arg_flag = alias.startswith('-')\n        excluded_commands = is_alias_command(['remove', 'export'], transformed_commands)\n        if ((not alias) or is_collided_alias or is_named_arg or is_named_arg_flag or excluded_commands):\n            transformed_commands.append(alias)\n            continue\n        full_alias = self.get_full_alias(alias)\n        if self.alias_table.has_option(full_alias, 'command'):\n            cmd_derived_from_alias = self.alias_table.get(full_alias, 'command')\n            telemetry.set_alias_hit(full_alias)\n        else:\n            transformed_commands.append(alias)\n            continue\n        pos_args_table = build_pos_args_table(full_alias, args, alias_index)\n        if pos_args_table:\n            logger.debug(POS_ARG_DEBUG_MSG, full_alias, cmd_derived_from_alias, pos_args_table)\n            transformed_commands += render_template(cmd_derived_from_alias, pos_args_table)\n            for pos_arg in pos_args_table:\n                next(alias_iter)\n        else:\n            logger.debug(DEBUG_MSG, full_alias, cmd_derived_from_alias)\n            transformed_commands += shlex.split(cmd_derived_from_alias)\n    return self.post_transform(transformed_commands)", "docstring": "Transform any aliases in args to their respective commands.\n\nArgs:\nargs: A list of space-delimited command input extracted directly from the console.\n\nReturns:\nA list of transformed commands according to the alias configuration file.", "source": "codesearchnet"}
{"code": "def validate(datapackage, schema='base'):\n    errors = []\n    schema_obj = None\n    datapackage_obj = None\n    if isinstance(datapackage, six.string_types):\n        try:\n            datapackage_obj = json.loads(datapackage)\n        except ValueError as e:\n            errors.append(DataPackageValidateException(e))\n    elif (not isinstance(datapackage, dict)):\n        msg = \"Data Package must be a dict or JSON string, but was a '{0}'\"\n        dp_type = type(datapackage).__name__\n        error = DataPackageValidateException(msg.format(dp_type))\n        errors.append(error)\n    else:\n        datapackage_obj = datapackage\n    try:\n        if isinstance(schema, six.string_types):\n            try:\n                schema = json.loads(schema)\n            except ValueError:\n                pass\n        schema_obj = Schema(schema)\n    except (SchemaError, RegistryError) as e:\n        errors.append(e)\n    if ((datapackage_obj is not None) and (schema_obj is not None)):\n        try:\n            schema_obj.validate(datapackage_obj)\n        except ValidationError as e:\n            errors.append(e)\n    if errors:\n        exception = DataPackageValidateException()\n        exception.errors = errors\n        raise exception", "docstring": "Validate Data Package datapackage.json files against a jsonschema.\n\nArgs:\ndatapackage (str or dict): The Data Package descriptor file (i.e.\ndatapackage.json) as a dict or its contents in a string.\nschema (str or dict): If a string, it can be the schema ID in the\nregistry, a local path, a URL or the schema's JSON as a string. If\na dict, it must be the JSON Schema itself.\n\nReturns:\nNone\n\nRaises:\nDataPackageValidateException: This exception has the list of the\nvalidation errors in its `.errors` attribute.", "source": "codesearchnet"}
{"code": "def register_multi_flags_validator(flag_names, multi_flags_checker, message='Flags validation failed', flag_values=FLAGS):\n    v = gflags_validators.MultiFlagsValidator(flag_names, multi_flags_checker, message)\n    _add_validator(flag_values, v)", "docstring": "Adds a constraint to multiple flags.\n\nThe constraint is validated when flags are initially parsed, and after each\nchange of the corresponding flag's value.\n\nArgs:\nflag_names: [str], a list of the flag names to be checked.\nmulti_flags_checker: callable, a function to validate the flag.\ninput - dictionary, with keys() being flag_names, and value for each key\nbeing the value of the corresponding flag (string, boolean, etc).\noutput - Boolean. Must return True if validator constraint is satisfied.\nIf constraint is not satisfied, it should either return False or\nraise gflags.ValidationError.\nmessage: Error text to be shown to the user if checker returns False.\nIf checker raises gflags.ValidationError, message from the raised error\nwill be shown.\nflag_values: An optional FlagValues instance to validate against.\n\nRaises:\nAttributeError: If a flag is not registered as a valid flag name.", "source": "codesearchnet"}
{"code": "async def check_status(self, pipeline_uuid: str) -> api_pb2.Status:\n    self._verify_pipeline_uuid(pipeline_uuid)\n    request = api_pb2.CheckStatusRequest(pipeline_uuid=pipeline_uuid)\n    response = await self._stub.CheckStatus(request, **self._kwargs)\n    return response.status", "docstring": "Get status of the pipeline by his pipeline\n\nArgs:\npipeline_uuid: uuid of the pipeline\n\nReturns:\nstatus: status of the pipeline", "source": "github-repos"}
{"code": "def task_done(self, message):\n        \n        topic_partition = (message.topic, message.partition)\n        if topic_partition not in self._topics:\n            logger.warning('Unrecognized topic/partition in task_done message: '\n                           '{0}:{1}'.format(*topic_partition))\n            return False\n\n        offset = message.offset\n\n        \n        prev_done = self._offsets.task_done[topic_partition]\n        if prev_done is not None and offset != (prev_done + 1):\n            logger.warning('Marking task_done on a non-continuous offset: %d != %d + 1',\n                           offset, prev_done)\n\n        \n        \n        prev_commit = self._offsets.commit[topic_partition]\n        if prev_commit is not None and ((offset + 1) <= prev_commit):\n            logger.warning('Marking task_done on a previously committed offset?: %d (+1) <= %d',\n                           offset, prev_commit)\n\n        self._offsets.task_done[topic_partition] = offset\n\n        \n        if self._does_auto_commit_messages():\n            self._incr_auto_commit_message_count()\n\n        if self._should_auto_commit():\n            self.commit()\n\n        return True", "docstring": "Mark a fetched message as consumed.\n\nOffsets for messages marked as \"task_done\" will be stored back\nto the kafka cluster for this consumer group on commit()\n\nArguments:\nmessage (KafkaMessage): the message to mark as complete\n\nReturns:\nTrue, unless the topic-partition for this message has not\nbeen configured for the consumer. In normal operation, this\nshould not happen. But see github issue 364.", "source": "juraj-google-style"}
{"code": "def _download(self):\n    repo = self._config.get('napps', 'repo')\n    napp_id = '{}/{}-{}.napp'.format(self.user, self.napp, self.version)\n    uri = os.path.join(repo, napp_id)\n    return urllib.request.urlretrieve(uri)[0]", "docstring": "Download NApp package from server.\n\nReturn:\nstr: Downloaded temp filename.\n\nRaises:\nurllib.error.HTTPError: If download is not successful.", "source": "codesearchnet"}
{"code": "def _CreateStyleForRoute(self, doc, route):\n    \n    style_id = 'route_%s' % route.route_id\n    style = ET.SubElement(doc, 'Style', {'id': style_id})\n    linestyle = ET.SubElement(style, 'LineStyle')\n    width = ET.SubElement(linestyle, 'width')\n    type_to_width = {0: '3',  \n                     1: '3',  \n                     2: '5',  \n                     3: '1'}  \n    width.text = type_to_width.get(route.route_type, '1')\n    if route.route_color:\n      color = ET.SubElement(linestyle, 'color')\n      red = route.route_color[0:2].lower()\n      green = route.route_color[2:4].lower()\n      blue = route.route_color[4:6].lower()\n      color.text = 'ff%s%s%s' % (blue, green, red)\n    return style_id", "docstring": "Create a KML Style element for the route.\n\nThe style sets the line colour if the route colour is specified. The\nline thickness is set depending on the vehicle type.\n\nArgs:\ndoc: The KML Document ElementTree.Element instance.\nroute: The transitfeed.Route to create the style for.\n\nReturns:\nThe id of the style as a string.", "source": "juraj-google-style"}
{"code": "def parent_callback(self, executor_fu):\n    with self._update_lock:\n        if (not executor_fu.done()):\n            raise ValueError('done callback called, despite future not reporting itself as done')\n        if (executor_fu != self.parent):\n            if ((executor_fu.exception() is None) and (not isinstance(executor_fu.result(), RemoteExceptionWrapper))):\n                raise ValueError('internal consistency error: AppFuture done callback called without an exception, but parent has been changed since then')\n        try:\n            res = executor_fu.result()\n            if isinstance(res, RemoteExceptionWrapper):\n                res.reraise()\n            super().set_result(executor_fu.result())\n        except Exception as e:\n            if (executor_fu.retries_left > 0):\n                pass\n            else:\n                super().set_exception(e)", "docstring": "Callback from a parent future to update the AppFuture.\n\nUsed internally by AppFuture, and should not be called by code using AppFuture.\n\nArgs:\n- executor_fu (Future): Future returned by the executor along with callback.\nThis may not be the current parent future, as the parent future may have\nalready been updated to point to a retrying execution, and in that case,\nthis is logged.\n\nIn the case that a new parent has been attached, we must immediately discard\nthis result no matter what it contains (although it might be interesting\nto log if it was successful...)\n\nReturns:\n- None\n\nUpdates the super() with the result() or exception()", "source": "codesearchnet"}
{"code": "def get_content_of_file(self, name, full_path=False):\n        \n        if self.handle:\n            for member in self.handle.getmembers():\n                if (full_path and member.name == name) or (\n                        not full_path and os.path.basename(\n                            member.name) == name):\n                    extracted = self.handle.extractfile(member)\n                    return extracted.read().decode(\n                        locale.getpreferredencoding())\n\n        return None", "docstring": "Returns content of file from archive.\n\nIf full_path is set to False and two files with given name exist,\ncontent of one is returned (it is not specified which one that is).\nIf set to True, returns content of exactly that file.\n\nArgs:\nname: name of the file to get content of\nReturns:\nContent of the file with given name or None, if no such.", "source": "juraj-google-style"}
{"code": "def _NormalizedVolumeIdentifiers(self, volume_system, volume_identifiers, prefix='v'):\n    normalized_volume_identifiers = []\n    for volume_identifier in volume_identifiers:\n        if isinstance(volume_identifier, int):\n            volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier)\n        elif (not volume_identifier.startswith(prefix)):\n            try:\n                volume_identifier = int(volume_identifier, 10)\n                volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier)\n            except (TypeError, ValueError):\n                pass\n        try:\n            volume = volume_system.GetVolumeByIdentifier(volume_identifier)\n        except KeyError:\n            volume = None\n        if (not volume):\n            raise errors.ScannerError('Volume missing for identifier: {0:s}.'.format(volume_identifier))\n        normalized_volume_identifiers.append(volume_identifier)\n    return normalized_volume_identifiers", "docstring": "Normalizes volume identifiers.\n\nArgs:\nvolume_system (VolumeSystem): volume system.\nvolume_identifiers (list[int|str]): allowed volume identifiers, formatted\nas an integer or string with prefix.\nprefix (Optional[str]): volume identifier prefix.\n\nReturns:\nlist[str]: volume identifiers with prefix.\n\nRaises:\nScannerError: if the volume identifier is not supported or no volume\ncould be found that corresponds with the identifier.", "source": "codesearchnet"}
{"code": "def __enter__(self) -> str:\n    ctx = context.context()\n    if ctx.executing_eagerly():\n        old_name = ctx.scope_name\n        name = self._name\n        if not name:\n            scope_name = ''\n        elif name[-1] == '/':\n            scope_name = name\n        elif old_name:\n            scope_name = old_name + name + '/'\n        else:\n            scope_name = name + '/'\n        ctx.scope_name = scope_name\n\n        def _restore_name_scope(*_):\n            ctx.scope_name = old_name\n        self._exit_fns.append(_restore_name_scope)\n    else:\n        scope = get_default_graph().name_scope(self._name)\n        scope_name = scope.__enter__()\n        self._exit_fns.append(scope.__exit__)\n    return scope_name", "docstring": "Start the scope block.\n\nReturns:\nThe scope name.", "source": "github-repos"}
{"code": "def prepend_to_list(self, key, *value, pipeline=False):\n    if pipeline:\n        self._pipeline.lpush(key, *value)\n    else:\n        self._db.lpush(key, *value)", "docstring": "Add new element to the start of the list stored at key.\n\nArgs:\nkey (str): Key where the list is stored\nvalue: Value to add to the list\npipeline (bool): True, start a transaction block. Default false.", "source": "codesearchnet"}
{"code": "def get_cond_latents(all_latents=None, hparams=None):\n  \n  cond_latents = None\n  if hparams.gen_mode == \"conditional\":\n    if hparams.latent_dist_encoder in [\"conv_net\", \"conv3d_net\"]:\n      num_cond_latents = (hparams.num_cond_latents +\n                          int(hparams.cond_first_frame))\n      if len(all_latents) >= num_cond_latents:\n        cond_latents = all_latents[-hparams.num_cond_latents:]\n        if hparams.cond_first_frame:\n          cond_latents = [all_latents[0]] + cond_latents\n    elif hparams.latent_dist_encoder in [\"pointwise\", \"conv_lstm\"]:\n      if all_latents:\n        cond_latents = all_latents[-1]\n\n  if hparams.gen_mode == \"conditional\":\n    global_step = tf.train.get_or_create_global_step()\n    condition = tf.greater(global_step, hparams.pretrain_steps)\n  else:\n    condition = tf.constant(False, dtype=tf.bool)\n  return condition, cond_latents", "docstring": "Get z^{cond}_{t} given z^{1..t-1}.\n\nArgs:\nall_latents: list of list of tensors,\nouter-size equals no.of time_steps-1\ninner-size equals hparams.n_levels.\nhparams: See next_frame_glow_hparams.\nReturns:\ncond_latents: conditional latents at time-step t.", "source": "juraj-google-style"}
{"code": "def draw_rect(self, rect):\n        \n        check_int_err(lib.SDL_RenderDrawRect(self._ptr, rect._ptr))", "docstring": "Draw a rectangle on the current rendering target.\n\nArgs:\nrect (Rect): The destination rectangle, or None to outline the entire rendering target.\n\nRaises:\nSDLError: If an error is encountered.", "source": "juraj-google-style"}
{"code": "def _weight_generator(self, reviewers):\n        \n        scores = [r.anomalous_score for r in reviewers]\n        mu = np.average(scores)\n        sigma = np.std(scores)\n\n        if sigma:\n            def w(v):\n                \n                try:\n                    exp = math.exp(self.alpha * (v - mu) / sigma)\n                    return 1. / (1. + exp)\n                except OverflowError:\n                    return 0.\n\n            return w\n\n        else:\n            \n            \n            return lambda v: 1.", "docstring": "Compute a weight function for the given reviewers.\n\nArgs:\nreviewers: a set of reviewers to compute weight function.\n\nReturns:\na function computing a weight for a reviewer.", "source": "juraj-google-style"}
{"code": "def as_dataframe(self, pattern='*', max_rows=None):\n    \n    data = []\n    for i, group in enumerate(self.list(pattern)):\n      if max_rows is not None and i >= max_rows:\n        break\n      parent = self._group_dict.get(group.parent_id)\n      parent_display_name = '' if parent is None else parent.display_name\n      data.append([\n          group.id, group.display_name, group.parent_id,\n          parent_display_name, group.is_cluster, group.filter])\n\n    return pandas.DataFrame(data, columns=self._DISPLAY_HEADERS)", "docstring": "Creates a pandas dataframe from the groups that match the filters.\n\nArgs:\npattern: An optional pattern to further filter the groups. This can\ninclude Unix shell-style wildcards. E.g. ``\"Production *\"``,\n``\"*-backend\"``.\nmax_rows: The maximum number of groups to return. If None, return all.\n\nReturns:\nA pandas dataframe containing matching groups.", "source": "juraj-google-style"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    cls = [self.cls_token_id]\n    sep = [self.sep_token_id]\n    return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A BERT sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def get_keypoint_predictions(heatmaps: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n    if not isinstance(heatmaps, np.ndarray):\n        raise ValueError('Heatmaps should be np.ndarray')\n    if heatmaps.ndim != 4:\n        raise ValueError('Heatmaps should be 4-dimensional')\n    batch_size, num_keypoints, _, width = heatmaps.shape\n    heatmaps_reshaped = heatmaps.reshape((batch_size, num_keypoints, -1))\n    idx = np.argmax(heatmaps_reshaped, 2).reshape((batch_size, num_keypoints, 1))\n    scores = np.amax(heatmaps_reshaped, 2).reshape((batch_size, num_keypoints, 1))\n    preds = np.tile(idx, (1, 1, 2)).astype(np.float32)\n    preds[:, :, 0] = preds[:, :, 0] % width\n    preds[:, :, 1] = preds[:, :, 1] \n    preds = np.where(np.tile(scores, (1, 1, 2)) > 0.0, preds, -1)\n    return (preds, scores)", "docstring": "Get keypoint predictions from score maps.\n\nArgs:\nheatmaps (`np.ndarray` of shape `(batch_size, num_keypoints, height, width)`):\nModel predicted heatmaps.\n\nReturns:\ntuple: A tuple containing aggregated results.\n\n- coords (`np.ndarray` of shape `(batch_size, num_keypoints, 2)`):\nPredicted keypoint location.\n- scores (`np.ndarray` of shape `(batch_size, num_keypoints, 1)`):\nScores (confidence) of the keypoints.", "source": "github-repos"}
{"code": "def parse_compounds(compound_info, case_id, variant_type):\n    compounds = []\n    if compound_info:\n        for family_info in compound_info.split(','):\n            splitted_entry = family_info.split(':')\n            if (splitted_entry[0] == case_id):\n                for compound in splitted_entry[1].split('|'):\n                    splitted_compound = compound.split('>')\n                    compound_obj = {}\n                    compound_name = splitted_compound[0]\n                    compound_obj['variant'] = generate_md5_key((compound_name.split('_') + [variant_type, case_id]))\n                    try:\n                        compound_score = float(splitted_compound[1])\n                    except (TypeError, IndexError):\n                        compound_score = 0.0\n                    compound_obj['score'] = compound_score\n                    compound_obj['display_name'] = compound_name\n                    compounds.append(compound_obj)\n    return compounds", "docstring": "Get a list with compounds objects for this variant.\n\nArguments:\ncompound_info(str): A Variant dictionary\ncase_id (str): unique family id\nvariant_type(str): 'research' or 'clinical'\n\nReturns:\ncompounds(list(dict)): A list of compounds", "source": "codesearchnet"}
{"code": "def extractHolidayDate(self, setting_holiday):\n        \n        ret = namedtuple(\"result\", [\"Holiday\", \"Month\", \"Day\"])\n        setting_holiday += 1\n        ret.Holiday = str(setting_holiday)\n\n        if (setting_holiday < 1) or (setting_holiday > Extents.Holidays):\n            ekm_log(\"Out of bounds:  holiday \" + str(setting_holiday))\n            ret.Holiday = ret.Month = ret.Day = str(0)\n            return ret\n\n        idxday = \"Holiday_\" + str(setting_holiday) + \"_Day\"\n        idxmon = \"Holiday_\" + str(setting_holiday) + \"_Mon\"\n        if idxmon not in self.m_hldy:\n            ret.Holiday = ret.Month = ret.Day = str(0)\n            return ret\n        if idxday not in self.m_hldy:\n            ret.Holiday = ret.Month = ret.Day = str(0)\n            return ret\n        ret.Day = self.m_hldy[idxday][MeterData.StringValue]\n        ret.Month = self.m_hldy[idxmon][MeterData.StringValue]\n        return ret", "docstring": "Read a single holiday date from meter buffer.\n\nArgs:\nsetting_holiday (int):  Holiday from 0-19 or in range(Extents.Holidays)\n\nReturns:\ntuple: Holiday tuple, elements are strings.\n\n=============== ======================\nHoliday         Holiday 0-19 as string\nDay             Day 1-31 as string\nMonth           Monty 1-12 as string\n=============== ======================", "source": "juraj-google-style"}
{"code": "def _ParseKeyWithPlugin(self, parser_mediator, registry_key, plugin):\n    \n    try:\n      plugin.UpdateChainAndProcess(parser_mediator, registry_key)\n    except (IOError, dfwinreg_errors.WinRegistryValueError) as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'in key: {0:s} error: {1!s}'.format(registry_key.path, exception))", "docstring": "Parses the Registry key with a specific plugin.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nregistry_key (dfwinreg.WinRegistryKey): Windwos Registry key.\nplugin (WindowsRegistryPlugin): Windows Registry plugin.", "source": "juraj-google-style"}
{"code": "def execute(self, command, *args, encoding=_NOTSET):\n    if ((self._reader is None) or self._reader.at_eof()):\n        msg = (self._close_msg or 'Connection closed or corrupted')\n        raise ConnectionClosedError(msg)\n    if (command is None):\n        raise TypeError('command must not be None')\n    if (None in args):\n        raise TypeError('args must not contain None')\n    command = command.upper().strip()\n    is_pubsub = (command in _PUBSUB_COMMANDS)\n    is_ping = (command in ('PING', b'PING'))\n    if (self._in_pubsub and (not (is_pubsub or is_ping))):\n        raise RedisError('Connection in SUBSCRIBE mode')\n    elif is_pubsub:\n        logger.warning('Deprecated. Use `execute_pubsub` method directly')\n        return self.execute_pubsub(command, *args)\n    if (command in ('SELECT', b'SELECT')):\n        cb = partial(self._set_db, args=args)\n    elif (command in ('MULTI', b'MULTI')):\n        cb = self._start_transaction\n    elif (command in ('EXEC', b'EXEC')):\n        cb = partial(self._end_transaction, discard=False)\n    elif (command in ('DISCARD', b'DISCARD')):\n        cb = partial(self._end_transaction, discard=True)\n    else:\n        cb = None\n    if (encoding is _NOTSET):\n        encoding = self._encoding\n    fut = self._loop.create_future()\n    if (self._pipeline_buffer is None):\n        self._writer.write(encode_command(command, *args))\n    else:\n        encode_command(command, *args, buf=self._pipeline_buffer)\n    self._waiters.append((fut, encoding, cb))\n    return fut", "docstring": "Executes redis command and returns Future waiting for the answer.\n\nRaises:\n* TypeError if any of args can not be encoded as bytes.\n* ReplyError on redis '-ERR' responses.\n* ProtocolError when response can not be decoded meaning connection\nis broken.\n* ConnectionClosedError when either client or server has closed the\nconnection.", "source": "codesearchnet"}
{"code": "def batch_normalize_with_arguments(x, arguments):\n    x = prettytensor.wrap(x)\n    if isinstance(arguments, bool):\n        if arguments:\n            return x.batch_normalize()\n        else:\n            return x\n    kwargs = arguments._asdict()\n    defaults = prettytensor._defaults\n    for arg in ('learned_moments_update_rate', 'variance_epsilon', 'scale_after_normalization'):\n        if (kwargs.get(arg, None) is None):\n            if (arg in defaults):\n                kwargs[arg] = defaults[arg]\n            else:\n                del kwargs[arg]\n    return x.batch_normalize(**kwargs)", "docstring": "Applies batch normalization to x as specified in arguments.\n\nArgs:\nx: A Pretty Tensor.\narguments: Either a boolean to batch_normalize or a\nBatchNormalizationArguments\n\nReturns:\nx with batch normalization applied.", "source": "codesearchnet"}
{"code": "def get_counter_metric(result: PipelineResult, namespace: str, name: str) -> int:\n    metrics = result.metrics().query(MetricsFilter().with_namespace(namespace).with_name(name))\n    counters = metrics['counters']\n    if len(counters) > 1:\n        raise RuntimeError('%d instead of one metric result matches name: %s in namespace %s' % (len(counters), name, namespace))\n    return counters[0].result if len(counters) > 0 else -1", "docstring": "get specific counter metric from pipeline result\n\nArgs:\nresult: the PipelineResult which metrics are read from\nnamespace: a string representing the namespace of wanted metric\nname: a string representing the  name of the wanted metric\n\nReturns:\nthe result of the wanted metric if it exist, else -1", "source": "github-repos"}
{"code": "def range(*args, prefix: str):\n        \n        return [NamedQubit(prefix + str(i)) for i in range(*args)]", "docstring": "Returns a range of NamedQubits.\n\nThe range returned starts with the prefix, and followed by a qubit for\neach number in the range, e.g.:\n\nNamedQubit.range(3, prefix=\"a\") -> [\"a1\", \"a2\", \"a3]\nNamedQubit.range(2, 4, prefix=\"a\") -> [\"a2\", \"a3]\n\nArgs:\n*args: Args to be passed to Python's standard range function.\nprefix: A prefix for constructed NamedQubits.\n\nReturns:\nA list of NamedQubits.", "source": "juraj-google-style"}
{"code": "def generate_sitemap(self, path='sitemap.xml', https=False):\n\t\t\n\t\tsitemap = russell.sitemap.generate_sitemap(self, https=https)\n\t\tself.write_file(path, sitemap)", "docstring": "Generate an XML sitemap.\n\nArgs:\npath (str): The name of the file to write to.\nhttps (bool): If True, links inside the sitemap with relative scheme\n(e.g. example.com/something) will be set to HTTPS. If False (the\ndefault), they will be set to plain HTTP.", "source": "juraj-google-style"}
{"code": "def set_timestamp(cls, filename: str, response: HTTPResponse):\n        \n        last_modified = response.fields.get('Last-Modified')\n\n        if not last_modified:\n            return\n\n        try:\n            last_modified = email.utils.parsedate(last_modified)\n        except ValueError:\n            _logger.exception('Failed to parse date.')\n            return\n\n        last_modified = time.mktime(last_modified)\n\n        os.utime(filename, (time.time(), last_modified))", "docstring": "Set the Last-Modified timestamp onto the given file.\n\nArgs:\nfilename: The path of the file\nresponse: Response", "source": "juraj-google-style"}
{"code": "def wait(self, timeout=None):\n    \n    if self._future:\n      try:\n        \n        self._future.exception(timeout)\n      except concurrent.futures.TimeoutError:\n        self._timeout()\n      self._refresh_state()\n    else:\n      \n      while not self.is_complete:\n        if timeout is not None:\n          if timeout <= 0:\n            self._timeout()\n          timeout -= Job._POLL_INTERVAL_SECONDS\n        time.sleep(Job._POLL_INTERVAL_SECONDS)\n    return self", "docstring": "Wait for the job to complete, or a timeout to happen.\n\nArgs:\ntimeout: how long to wait before giving up (in seconds); default None which means no timeout.\n\nReturns:\nThe Job", "source": "juraj-google-style"}
{"code": "def profile(self, num):\n    baseuri = (self._BASE_URI + 'company/{}'.format(num))\n    res = self.session.get(baseuri)\n    self.handle_http_error(res)\n    return res", "docstring": "Search for company profile by company number.\n\nArgs:\nnum (str): Company number to search on.", "source": "codesearchnet"}
{"code": "def heightmap_get_normal(\n    hm: np.ndarray, x: float, y: float, waterLevel: float\n) -> Tuple[float, float, float]:\n    \n    cn = ffi.new(\"float[3]\")\n    lib.TCOD_heightmap_get_normal(_heightmap_cdata(hm), x, y, cn, waterLevel)\n    return tuple(cn)", "docstring": "Return the map normal at given coordinates.\n\nArgs:\nhm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.\nx (float): The x coordinate.\ny (float): The y coordinate.\nwaterLevel (float): The heightmap is considered flat below this value.\n\nReturns:\nTuple[float, float, float]: An (x, y, z) vector normal.", "source": "juraj-google-style"}
{"code": "def CreateSmartShoppingAdGroup(client, campaign_id):\n    ad_group_service = client.GetService('AdGroupService', version='v201809')\n    ad_group = {'campaignId': campaign_id, 'name': ('Smart Shopping ad group \n    adgroup_operations = {'operator': 'ADD', 'operand': ad_group}\n    ad_group = ad_group_service.mutate(adgroup_operations)['value'][0]\n    ad_group_id = ad_group['id']\n    print(('AdGroup with name \"%s\" and ID \"%s\" was added.' % (ad_group['name'], ad_group_id)))\n    return ad_group_id", "docstring": "Adds a new Smart Shopping ad group.\n\nArgs:\nclient: an AdWordsClient instance.\ncampaign_id: the str ID of a Smart Shopping campaign.\nReturns:\nAn ad group ID.", "source": "codesearchnet"}
{"code": "def push_doc(self, document):\n        \n        msg = self._protocol.create('PUSH-DOC', document)\n        reply = self._send_message_wait_for_reply(msg)\n        if reply is None:\n            raise RuntimeError(\"Connection to server was lost\")\n        elif reply.header['msgtype'] == 'ERROR':\n            raise RuntimeError(\"Failed to push document: \" + reply.content['text'])\n        else:\n            return reply", "docstring": "Push a document to the server, overwriting any existing server-side doc.\n\nArgs:\ndocument : (Document)\nA Document to push to the server\n\nReturns:\nThe server reply", "source": "juraj-google-style"}
{"code": "def set_cache_policy(self, func):\n    if (func is None):\n        func = self.default_cache_policy\n    elif isinstance(func, bool):\n        func = (lambda unused_key, flag=func: flag)\n    self._cache_policy = func", "docstring": "Set the context cache policy function.\n\nArgs:\nfunc: A function that accepts a Key instance as argument and returns\na bool indicating if it should be cached.  May be None.", "source": "codesearchnet"}
{"code": "def cumulative_probabilities( self ):\n        \n        partition_function = np.sum( self.p )\n        return np.cumsum( self.p ) / partition_function", "docstring": "Cumulative sum of the relative probabilities for all possible jumps.\n\nArgs:\nNone\n\nReturns:\n(np.array): Cumulative sum of relative jump probabilities.", "source": "juraj-google-style"}
{"code": "def check_prerequisites(\n        prerequisites,\n        checker,\n        msg_tmpl='Prerequisites \"{}\" are required in method \"{}\" but not '\n        'found, please install them first.'):\n    \n\n    def wrap(func):\n\n        @functools.wraps(func)\n        def wrapped_func(*args, **kwargs):\n            requirements = [prerequisites] if isinstance(\n                prerequisites, str) else prerequisites\n            missing = []\n            for item in requirements:\n                if not checker(item):\n                    missing.append(item)\n            if missing:\n                print(msg_tmpl.format(', '.join(missing), func.__name__))\n                raise RuntimeError('Prerequisites not meet.')\n            else:\n                return func(*args, **kwargs)\n\n        return wrapped_func\n\n    return wrap", "docstring": "A decorator factory to check if prerequisites are satisfied.\n\nArgs:\nprerequisites (str of list[str]): Prerequisites to be checked.\nchecker (callable): The checker method that returns True if a\nprerequisite is meet, False otherwise.\nmsg_tmpl (str): The message template with two variables.\n\nReturns:\ndecorator: A specific decorator.", "source": "juraj-google-style"}
{"code": "def combine(args, part=None):\n    args = [cleanup(arg) for arg in args]\n    if (part is not None):\n        (parts, orders) = part\n        if (numpy.array(orders).size == 1):\n            orders = ([int(numpy.array(orders).item())] * len(args))\n        parts = numpy.array(parts).flatten()\n        for (i, arg) in enumerate(args):\n            (m, n) = (float(parts[i]), float(orders[i]))\n            l = len(arg)\n            args[i] = arg[int(((m / n) * l)):int((((m + 1) / n) * l))]\n    shapes = [arg.shape for arg in args]\n    size = (numpy.prod(shapes, 0)[0] * numpy.sum(shapes, 0)[1])\n    if (size > (10 ** 9)):\n        raise MemoryError('Too large sets')\n    if (len(args) == 1):\n        out = args[0]\n    elif (len(args) == 2):\n        out = combine_two(*args)\n    else:\n        arg1 = combine_two(*args[:2])\n        out = combine(([arg1] + args[2:]))\n    return out", "docstring": "All linear combination of a list of list.\n\nArgs:\nargs (numpy.ndarray) : List of input arrays.  Components to take linear\ncombination of with `args[i].shape=(N[i], M[i])` where N is to be\ntaken linear combination of and M is static.  M[i] is set to 1 if\nmissing.\n\nReturns:\n(numpy.array) : matrix of combinations with shape (numpy.prod(N),\nnumpy.sum(M)).\n\nExamples:\n>>> A, B = [1,2], [[4,4],[5,6]]\n>>> print(chaospy.quad.combine([A, B]))\n[[1. 4. 4.]\n[1. 5. 6.]\n[2. 4. 4.]\n[2. 5. 6.]]", "source": "codesearchnet"}
{"code": "def _ParseCString(self, page_data, string_offset):\n    cstring_map = self._GetDataTypeMap('cstring')\n    try:\n        value_string = self._ReadStructureFromByteStream(page_data[string_offset:], string_offset, cstring_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to map string data at offset: 0x{0:08x} with error: {1!s}'.format(string_offset, exception))\n    return value_string.rstrip('\\x00')", "docstring": "Parses a C string from the page data.\n\nArgs:\npage_data (bytes): page data.\nstring_offset (int): offset of the string relative to the start\nof the page.\n\nReturns:\nstr: string.\n\nRaises:\nParseError: when the string cannot be parsed.", "source": "codesearchnet"}
{"code": "def label_search(self, label:str) -> List[dict]:\n        \n        ilx_rows = self.label2rows(self.local_degrade(label))\n        if not ilx_rows:\n            return None\n        else:\n            return ilx_rows", "docstring": "Returns the rows in InterLex associated with that label\n\nNote:\nPressumed to have duplicated labels in InterLex\nArgs:\nlabel: label of the entity you want to find\nReturns:\nNone or List[dict]", "source": "juraj-google-style"}
{"code": "def _overload_operator(cls, tensor_class, operator):\n    tensor_oper = getattr(tensor_class, operator)\n    tensor_oper = getattr(tensor_oper, '__func__', tensor_oper)\n    setattr(cls, operator, tensor_oper)", "docstring": "Overload an operator with the same implementation as a base Tensor class.\n\nWe pull the operator out of the class dynamically to avoid ordering issues.\n\nArgs:\ntensor_class: The (Composite)Tensor to get the method from.\noperator: string. The operator name.", "source": "github-repos"}
{"code": "def read(self, size=None):\n    \n    if not self._is_open:\n      raise IOError('Not opened.')\n\n    if self._current_offset < 0:\n      raise IOError('Invalid current offset value less than zero.')\n\n    \n    \n    if self._current_offset >= self._size:\n      return b''\n\n    if size is None or self._current_offset + size > self._size:\n      size = self._size - self._current_offset\n\n    if self._tsk_attribute:\n      data = self._tsk_file.read_random(\n          self._current_offset, size, self._tsk_attribute.info.type,\n          self._tsk_attribute.info.id)\n    else:\n      data = self._tsk_file.read_random(self._current_offset, size)\n\n    \n    \n    \n    self._current_offset += len(data)\n\n    return data", "docstring": "Reads a byte string from the file-like object at the current offset.\n\nThe function will read a byte string of the specified size or\nall of the remaining data if no size was specified.\n\nArgs:\nsize (Optional[int]): number of bytes to read, where None is all\nremaining data.\n\nReturns:\nbytes: data read.\n\nRaises:\nIOError: if the read failed.\nOSError: if the read failed.", "source": "juraj-google-style"}
{"code": "def insert(self, optional_root_locations_path):\n        \n        encountered_simple_optional = False\n        parent_location = self._root_location\n        for optional_root_location in optional_root_locations_path:\n            if encountered_simple_optional:\n                raise AssertionError(u'Encountered simple optional root location {} in path, but'\n                                     u'further locations are present. This should not happen: {}'\n                                     .format(optional_root_location, optional_root_locations_path))\n\n            if optional_root_location not in self._location_to_children:\n                \n                \n                encountered_simple_optional = True\n            else:\n                self._location_to_children[parent_location].add(optional_root_location)\n                parent_location = optional_root_location", "docstring": "Insert a path of optional Locations into the tree.\n\nEach OptionalTraversalTree object contains child Location objects as keys mapping to\nother OptionalTraversalTree objects.\n\nArgs:\noptional_root_locations_path: list of optional root Locations all except the last\nof which must be present in complex_optional_roots", "source": "juraj-google-style"}
{"code": "def promote_artifacts(self, promote_stage='latest'):\n    if (promote_stage.lower() == 'alpha'):\n        self._sync_to_uri(self.s3_canary_uri)\n    elif (promote_stage.lower() == 'canary'):\n        self._sync_to_uri(self.s3_latest_uri)\n    else:\n        self._sync_to_uri(self.s3_latest_uri)", "docstring": "Promote artifact version to dest.\n\nArgs:\npromote_stage (string): Stage that is being promoted", "source": "codesearchnet"}
{"code": "def read_int8(self, little_endian=True):\n        \n        if little_endian:\n            endian = \"<\"\n        else:\n            endian = \">\"\n        return self.unpack('%sb' % endian)", "docstring": "Read 1 byte as a signed integer value from the stream.\n\nArgs:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint:", "source": "juraj-google-style"}
{"code": "def _merge_tensor_signatures(self, signatures):\n    sorted_update = []\n    if self._num_signature_dimensions() > 1:\n        signature_indices = self._signature_types()\n        for _, val in sorted(signatures.items(), key=lambda item: signature_indices[item[0]]):\n            sorted_update.append(val)\n        updates = array_ops_stack.stack(sorted_update, axis=0, name='merge_single_op_signatures')\n    elif self._num_signature_dimensions() == 1:\n        (_, val), = signatures.items()\n        updates = val\n    else:\n        raise ValueError('Cannot merge 0 signatures. Check the value passed for flag --signatures.')\n    return updates", "docstring": "Returns a tensor that merges the given signatures.\n\nArgs:\nsignatures: A dictionary of the signature updates from signature name to\na tensor of dimension [1].\nReturns:\nA tensor that concats the signature values in a predefined order.\nRaises:\nValueError: Unable to merge signatures.", "source": "github-repos"}
{"code": "def _ReadEncryptedData(self, read_size):\n    \n    encrypted_data = self._file_object.read(read_size)\n\n    read_count = len(encrypted_data)\n\n    self._encrypted_data = b''.join([self._encrypted_data, encrypted_data])\n\n    self._decrypted_data, self._encrypted_data = (\n        self._decrypter.Decrypt(self._encrypted_data))\n\n    self._decrypted_data_size = len(self._decrypted_data)\n\n    return read_count", "docstring": "Reads encrypted data from the file-like object.\n\nArgs:\nread_size (int): number of bytes of encrypted data to read.\n\nReturns:\nint: number of bytes of encrypted data read.", "source": "juraj-google-style"}
{"code": "def on_predict_batch_end(self, batch, logs=None):", "docstring": "Called at the end of a batch in `predict` methods.\n\nSubclasses should override for any actions to run.\n\nNote that if the `steps_per_execution` argument to `compile` in\n`tf.keras.Model` is set to `N`, this method will only be called every `N`\nbatches.\n\nArgs:\nbatch: Integer, index of batch within the current epoch.\nlogs: Dict. Aggregated metric results up until this batch.", "source": "github-repos"}
{"code": "def merge_two_dictionaries(a, b, merge_lists=False):\n    key = None\n    try:\n        if ((a is None) or isinstance(a, (six.string_types, six.text_type, six.integer_types, float))):\n            a = b\n        elif isinstance(a, list):\n            if isinstance(b, list):\n                if merge_lists:\n                    a.extend(b)\n                else:\n                    a = b\n            else:\n                a.append(b)\n        elif isinstance(a, (dict, UserDict)):\n            if isinstance(b, (dict, UserDict)):\n                for key in b:\n                    if (key in a):\n                        a[key] = merge_two_dictionaries(a[key], b[key], merge_lists=merge_lists)\n                    else:\n                        a[key] = b[key]\n            else:\n                raise ValueError(('Cannot merge non-dict \"%s\" into dict \"%s\"' % (b, a)))\n        else:\n            raise ValueError(('NOT IMPLEMENTED \"%s\" into \"%s\"' % (b, a)))\n    except TypeError as e:\n        raise ValueError(('TypeError \"%s\" in key \"%s\" when merging \"%s\" into \"%s\"' % (e, key, b, a)))\n    return a", "docstring": "Merges b into a and returns merged result\n\nNOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen\n\nArgs:\na (DictUpperBound): dictionary to merge into\nb (DictUpperBound): dictionary to merge from\nmerge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False.\n\nReturns:\nDictUpperBound: Merged dictionary", "source": "codesearchnet"}
{"code": "def nb_ll_row(params, data_row):\n    \n    p = params[0]\n    r = params[1]\n    n = len(data_row)\n    ll = np.sum(gammaln(data_row + r)) - np.sum(gammaln(data_row + 1))\n    ll -= n*gammaln(r)\n    ll += np.sum(data_row)*np.log(p)\n    ll += n*r*np.log(1-p)\n    return -ll", "docstring": "returns the negative LL of a single row.\n\nArgs:\nparams (array) - [p, r]\ndata_row (array) - 1d array of data\n\nReturns:\nLL of row", "source": "juraj-google-style"}
{"code": "def __init__(self, submission_id, submissions, storage_bucket):\n    \n    super(DefenseSubmission, self).__init__(submission_id, submissions,\n                                            storage_bucket)\n    if self.type != TYPE_DEFENSE:\n      raise WorkerError('Incorrect defense type for submission \"{0}\"'.format(\n          submission_id))", "docstring": "Initializes DefenseSubmission.\n\nArgs:\nsubmission_id: ID of the submission\nsubmissions: instance of CompetitionSubmissions with all submissions\nstorage_bucket: storage bucket where all submissions are stored\n\nRaises:\nWorkerError: if submission has incorrect type", "source": "juraj-google-style"}
{"code": "def __init__(self, feed_fn):\n    self.feed_fn = feed_fn", "docstring": "Initializes a `FeedFnHook`.\n\nArgs:\nfeed_fn: function that takes no arguments and returns `dict` of `Tensor`\nto feed.", "source": "github-repos"}
{"code": "def check_denotation(target_values, predicted_values):\n    if (len(target_values) != len(predicted_values)):\n        return False\n    for target in target_values:\n        if (not any((target.match(pred) for pred in predicted_values))):\n            return False\n    return True", "docstring": "Return True if the predicted denotation is correct.\n\nArgs:\ntarget_values (list[Value])\npredicted_values (list[Value])\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def ParseContactRow(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = TwitterIOSContactEventData()\n    event_data.description = self._GetRowValue(query_hash, row, 'description')\n    event_data.followers_count = self._GetRowValue(\n        query_hash, row, 'followersCount')\n    event_data.following = self._GetRowValue(query_hash, row, 'following')\n    event_data.following_count = self._GetRowValue(\n        query_hash, row, 'followingCount')\n    event_data.location = self._GetRowValue(query_hash, row, 'location')\n    event_data.name = self._GetRowValue(query_hash, row, 'name')\n    event_data.profile_url = self._GetRowValue(\n        query_hash, row, 'profileImageUrl')\n    event_data.query = query\n    event_data.screen_name = self._GetRowValue(query_hash, row, 'screenName')\n    event_data.url = self._GetRowValue(query_hash, row, 'url')\n\n    timestamp = self._GetRowValue(query_hash, row, 'createdDate')\n    if timestamp:\n      \n      timestamp = int(timestamp)\n      date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_CREATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'updatedAt')\n    if timestamp:\n      \n      timestamp = int(timestamp)\n      date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_UPDATE)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a contact row from the database.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from query.", "source": "juraj-google-style"}
{"code": "def _send_json(self, method, path, data):\n        \n        headers = {'Content-type': 'application/json'}\n        return self._make_request(method, path, data=data, headers=headers)", "docstring": "Make a application/json request.\n\nArgs:\n`method`: The method of the request (POST or PUT).\n`path`: The path to the resource.\n`data`: The JSON-encoded data.\nReturns:\nThe content of the response.\nRaises:\nAn exception depending on the HTTP status code of the response.", "source": "juraj-google-style"}
{"code": "def __init__(self, name, client=None):\n        \n        self._name = name\n        self.client = client\n        self._state = None", "docstring": "Create a Thing.\n\nArgs:\nname (str): name of the Thing. This corresponds to the\nAWS IoT Thing name.\nclient (str): MQTT client connection to use. This can be set\nanytime before publishing Thing messages to the server.", "source": "juraj-google-style"}
{"code": "def _apply(self, ctx: ExtensionContext) -> AugmentedDict:\n        \n        node_key, node_value = ctx.node\n\n        def process(pattern: Pattern[str], _str: str) -> str:\n            _match = pattern.match(_str)\n            if _match is None:\n                return _str\n            \n            \n            placeholder, envvar = _match.group(1), _match.group(2)\n            envvalue = os.environ.get(envvar, None)\n            if envvalue is None and self.fail_on_unset:\n                raise ExtensionError(\"Environment variable '{}' is unset.\".format(envvar))\n\n            return _str.replace(placeholder, envvalue or self.default)\n\n        _pattern = re.compile(self.__pattern__)\n        node_key = process(_pattern, node_key)\n        node_value = process(_pattern, node_value)\n\n        return {node_key: node_value}", "docstring": "Replaces any {{env::*}} directives with it's actual environment variable value or a default.\n\nArgs:\nctx: The processing context.\n\nReturns:\nReturns the altered node key and value.", "source": "juraj-google-style"}
{"code": "def FetchAllGraphSeries(label, report_type, period=None, token=None):\n    if _ShouldUseLegacyDatastore():\n        return _FetchAllGraphSeriesFromTheLegacyDB(label, report_type, period=period, token=token)\n    if (period is None):\n        time_range = None\n    else:\n        range_end = rdfvalue.RDFDatetime.Now()\n        time_range = time_utils.TimeRange((range_end - period), range_end)\n    return data_store.REL_DB.ReadAllClientGraphSeries(label, report_type, time_range=time_range)", "docstring": "Fetches graph series for the given label and report-type from the DB.\n\nArgs:\nlabel: Client label to fetch data for.\nreport_type: rdf_stats.ClientGraphSeries.ReportType to fetch data for.\nperiod: rdfvalue.Duration specifying how far back in time to fetch data. If\nnot provided, all data for the given label and report-type will be\nreturned.\ntoken: ACL token to use for reading from the legacy (non-relational)\ndatastore.\n\nRaises:\nAFF4AttributeTypeError: If, when reading to the legacy DB, an unexpected\nreport-data type is encountered.\n\nReturns:\nA dict mapping timestamps to graph-series. The timestamps\nrepresent when the graph-series were written to the datastore.", "source": "codesearchnet"}
{"code": "def get(self, name, *default):\n    curr = self.values\n    for part in name.split('.'):\n        if (part in curr):\n            curr = curr[part]\n        elif default:\n            return default[0]\n        else:\n            fmt = \"Context value '{}' does not exist:\\n{}\"\n            raise AttributeError(fmt.format(name, util.yaml_dump(self.values)))\n    return curr", "docstring": "Get context value with the given name and optional default.\n\nArgs:\nname (str):\nThe name of the context value.\n*default (Any):\nIf given and the key doesn't not exist, this will be returned\ninstead. If it's not given and the context value does not exist,\n`AttributeError` will be raised\n\nReturns:\nThe requested context value.  If the value does not exist it will\nreturn `default` if give or raise `AttributeError`.\n\nRaises:\nAttributeError: If the value does not exist and `default` was not\ngiven.", "source": "codesearchnet"}
{"code": "def is_text(self):\n    return (self.type in [self._TYPE_PASTE, self._TYPE_TEXT, self._TYPE_TWEET])", "docstring": "Tells if this message is a text message.\n\nReturns:\nbool. Success", "source": "codesearchnet"}
{"code": "def parse_step(step_name):\n    prefix = 'step'\n    step_name = step_name.lower().replace(' ', '_')\n    step_name = step_name[len(prefix):] if prefix and step_name.startswith(prefix) else step_name\n    return step_name.strip(':_')", "docstring": "Replaces white spaces and removes 'Step:' label\n\nArgs:\nstep_name(str): step name passed in metric ParDo\n\nReturns:\nlower case step name without namespace and step label", "source": "github-repos"}
{"code": "def publish(cls, message, client_filter=None):\n    with cls._lock:\n        for client in cls.subscribers:\n            if ((not client_filter) or client_filter(client)):\n                client.send(message)", "docstring": "Publish messages to subscribers.\n\nArgs:\nmessage: The message to publish.\nclient_filter: A filter function to call passing in each client. Only\nclients for whom the function returns True will have the\nmessage sent to them.", "source": "codesearchnet"}
{"code": "def teardown(self, *args, **kwargs):\n    pass", "docstring": "Called to clean up an instance before it is discarded.\n\nIf you are using Dataflow, you need to enable Dataflow Runner V2\nbefore using this feature.\n\nArgs:\n*args: Additional arguments and side inputs.\n**kwargs: Additional arguments and side inputs.", "source": "github-repos"}
{"code": "def index_add(x, idx, y):\n    return _index_update_helper(tf_np.ndarray._with_index_add, x, idx, y)", "docstring": "Pure equivalent of `x[idx] += y`.\n\nReturns the value of x that would result from the NumPy-style indexed\nassignment `x[idx] += y`. Because it's a pure function, `x` itself won't be\nchanged.\n\nArgs:\nx: an array with the values to be updated.\nidx: a Numpy-style index, consisting of `None`, integers, slice objects,\nellipses, ndarrays with integer dtypes, or a tuple of the above.\ny: the array of updates. `y` must be broadcastable to the shape of the array\nthat would be returned by `x[idx]`.\n\nReturns:\nThe updated version of `x`.", "source": "github-repos"}
{"code": "def guess_depth(packages):\n    \n    if len(packages) == 1:\n        return packages[0].count('.') + 2\n    return min(p.count('.') for p in packages) + 1", "docstring": "Guess the optimal depth to use for the given list of arguments.\n\nArgs:\npackages (list of str): list of packages.\n\nReturns:\nint: guessed depth to use.", "source": "juraj-google-style"}
{"code": "def _separate_string(string: str, stride: int, separator: str) -> str:\n    result = ''\n    for i, c in enumerate(string):\n        if i > 0 and i % stride == 0:\n            result += separator\n        result += c\n    return result", "docstring": "Returns a separated string by separator at multiples of stride.\n\nFor example, the input:\n* string: 'thequickbrownfoxjumpedoverthelazydog'\n* stride: 3\n* separator: '-'\n\nWould produce a return value of:\n'the-qui-ckb-row-nfo-xju-mpe-dov-ert-hel-azy-dog'\n\nArgs:\nstring: The string to split.\nstride: The interval to insert the separator at.\nseparator: The string to insert at every stride interval.\n\nReturns:\nThe original string with the separator present at every stride interval.", "source": "github-repos"}
{"code": "def generate_string(self, initial_logits, initial_state, sequence_length):\n    current_logits = initial_logits\n    current_state = initial_state\n    generated_letters = []\n    for _ in range(sequence_length):\n        char_index = tf.squeeze(tf.multinomial(current_logits, 1))\n        char_one_hot = tf.one_hot(char_index, self._output_size, 1.0, 0.0)\n        generated_letters.append(char_one_hot)\n        (gen_out_seq, current_state) = self._core(tf.nn.relu(self._embed_module(char_one_hot)), current_state)\n        current_logits = self._output_module(gen_out_seq)\n    generated_string = tf.stack(generated_letters)\n    return generated_string", "docstring": "Builds sub-graph to generate a string, sampled from the model.\n\nArgs:\ninitial_logits: Starting logits to sample from.\ninitial_state: Starting state for the RNN core.\nsequence_length: Number of characters to sample.\n\nReturns:\nA Tensor of characters, with dimensions `[sequence_length, batch_size,\noutput_size]`.", "source": "codesearchnet"}
{"code": "def from_file(cls, path):\n\n        \n\n        with open(path, 'r', errors='replace') as f:\n            return cls(f.read())", "docstring": "Create a text from a file.\n\nArgs:\npath (str): The file path.", "source": "juraj-google-style"}
{"code": "def _AddPropertiesForNonRepeatedScalarField(field, cls):\n    proto_field_name = field.name\n    property_name = _PropertyName(proto_field_name)\n    type_checker = type_checkers.GetTypeChecker(field)\n    default_value = field.default_value\n    valid_values = set()\n    is_proto3 = (field.containing_type.syntax == 'proto3')\n\n    def getter(self):\n        return self._fields.get(field, default_value)\n    getter.__module__ = None\n    getter.__doc__ = ('Getter for %s.' % proto_field_name)\n    clear_when_set_to_default = (is_proto3 and (not field.containing_oneof))\n\n    def field_setter(self, new_value):\n        new_value = type_checker.CheckValue(new_value)\n        if (clear_when_set_to_default and (not new_value)):\n            self._fields.pop(field, None)\n        else:\n            self._fields[field] = new_value\n        if (not self._cached_byte_size_dirty):\n            self._Modified()\n    if field.containing_oneof:\n\n        def setter(self, new_value):\n            field_setter(self, new_value)\n            self._UpdateOneofState(field)\n    else:\n        setter = field_setter\n    setter.__module__ = None\n    setter.__doc__ = ('Setter for %s.' % proto_field_name)\n    doc = ('Magic attribute generated for \"%s\" proto field.' % proto_field_name)\n    setattr(cls, property_name, property(getter, setter, doc=doc))", "docstring": "Adds a public property for a nonrepeated, scalar protocol message field.\nClients can use this property to get and directly set the value of the field.\nNote that when the client sets the value of a field by using this property,\nall necessary \"has\" bits are set as a side-effect, and we also perform\ntype-checking.\n\nArgs:\nfield: A FieldDescriptor for this field.\ncls: The class we're constructing.", "source": "codesearchnet"}
{"code": "def __init__(self, function_name, level=1, children_inputs_mappings=None, **kwargs):\n    self._function_name = function_name\n    self._level = level\n    if self._level == 1:\n        assert children_inputs_mappings is None\n    else:\n        assert isinstance(children_inputs_mappings, dict)\n    self._children_inputs_mappings = children_inputs_mappings\n    if self._children_inputs_mappings is not None:\n        self._validate_children_inputs_mappings(self._children_inputs_mappings)\n    self._unique_function_id = _uuid.uuid1().hex\n    self._attrs_to_store_later = kwargs\n    self._stored_attrs = False\n    self._inputs = OpHint.OpHintArgumentTracker(self._function_name, self._unique_function_id, 'InputHint', OpHint.FUNCTION_INPUT_INDEX_ATTR, level, self._children_inputs_mappings)\n    self._outputs = OpHint.OpHintArgumentTracker(self._function_name, self._unique_function_id, 'OutputHint', OpHint.FUNCTION_OUTPUT_INDEX_ATTR, level, self._children_inputs_mappings)", "docstring": "Create a OpHint.\n\nArgs:\nfunction_name: Name of the function (the custom op name in tflite)\nlevel: OpHint level.\nchildren_inputs_mappings: Children OpHint inputs/outputs mapping.\nchildren_inputs_mappings should like below:\n\"parent_first_child_input\":\n[{\"parent_input_index\": num, \"child_input_index\": num}, ...]\n\"parent_last_child_output\":\n[{\"parent_output_index\": num, \"child_output_index\": num}, ...]\n\"internal_children_input_output\":\n[{\"child_input_index\": num, \"child_output_index\": num}, ...]\n**kwargs: Keyword arguments of any constant attributes for the function.", "source": "github-repos"}
{"code": "def exit_hook(callable, once=True):\n    if (once and (callable in ExitHooks)):\n        return\n    ExitHooks.append(callable)", "docstring": "r\"\"\"A decorator that makes the decorated function to run while ec exits.\n\nArgs:\ncallable (callable): The target callable.\nonce (bool): Avoids adding a func to the hooks, if it has been added already. Defaults to True.\n\nNote:\nHooks are processedd in a LIFO order.", "source": "codesearchnet"}
{"code": "def play_alert(zones, alert_uri, alert_volume=20, alert_duration=0, fade_back=False):\n    \n\n    \n    for zone in zones:\n        zone.snap = Snapshot(zone)\n        zone.snap.snapshot()\n        print('snapshot of zone: {}'.format(zone.player_name))\n\n    \n    for zone in zones:\n        \n        if zone.is_coordinator:\n            if not zone.is_playing_tv:  \n                \n                trans_state = zone.get_current_transport_info()\n                if trans_state['current_transport_state'] == 'PLAYING':\n                    zone.pause()\n\n        \n        zone.volume = alert_volume\n        zone.mute = False\n\n    \n    print('will play: {} on all coordinators'.format(alert_uri))\n    for zone in zones:\n        if zone.is_coordinator:\n            zone.play_uri(uri=alert_uri, title='Sonos Alert')\n\n    \n    time.sleep(alert_duration)\n\n    \n    for zone in zones:\n        print('restoring {}'.format(zone.player_name))\n        zone.snap.restore(fade=fade_back)", "docstring": "Demo function using soco.snapshot across multiple Sonos players.\n\nArgs:\nzones (set): a set of SoCo objects\nalert_uri (str): uri that Sonos can play as an alert\nalert_volume (int): volume level for playing alert (0 tp 100)\nalert_duration (int): length of alert (if zero then length of track)\nfade_back (bool): on reinstating the zones fade up the sound?", "source": "juraj-google-style"}
{"code": "def add_ast_fn(d, spec, parent_function=None):\n    if (d['type'] == 'Function'):\n        ast_fn = Function(d['function']['name'], spec, parent_function=parent_function)\n        for arg in d['args']:\n            if (arg['type'] == 'Function'):\n                ast_fn.add_argument(add_ast_fn(arg, spec, parent_function=ast_fn))\n            elif (arg['type'] == 'NSArg'):\n                ast_fn.add_argument(NSArg(arg['nsarg']['ns'], arg['nsarg']['ns_val'], ast_fn))\n            elif (arg['type'] == 'StrArg'):\n                ast_fn.add_argument(StrArg(arg['arg'], ast_fn))\n    return ast_fn", "docstring": "Convert dict AST to object AST Function\n\nArgs:\nast_fn: AST object Function\nd: AST as dictionary\nspec: BEL Specification\n\nReturn:\nast_fn", "source": "codesearchnet"}
{"code": "def post_process(self, outputs, target_sizes):\n    logging.warning_once('`post_process` is deprecated and will be removed in v5 of Transformers, please use `post_process_object_detection` instead, with `threshold=0.` for equivalent results.')\n    out_logits, out_bbox = (outputs.logits, outputs.pred_boxes)\n    if len(out_logits) != len(target_sizes):\n        raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n    if target_sizes.shape[1] != 2:\n        raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch')\n    prob = out_logits.sigmoid()\n    topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 300, dim=1)\n    scores = topk_values\n    topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor')\n    labels = topk_indexes % out_logits.shape[2]\n    boxes = center_to_corners_format(out_bbox)\n    boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))\n    img_h, img_w = target_sizes.unbind(1)\n    scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)\n    boxes = boxes * scale_fct[:, None, :]\n    results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]\n    return results", "docstring": "Converts the output of [`ConditionalDetrForObjectDetection`] into the format expected by the Pascal VOC format (xmin, ymin, xmax, ymax).\nOnly supports PyTorch.\n\nArgs:\noutputs ([`ConditionalDetrObjectDetectionOutput`]):\nRaw outputs of the model.\ntarget_sizes (`torch.Tensor` of shape `(batch_size, 2)`):\nTensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original\nimage size (before any data augmentation). For visualization, this should be the image size after data\naugment, but before padding.\nReturns:\n`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image\nin the batch as predicted by the model.", "source": "github-repos"}
{"code": "def validate(self, scope: ValidationScope = ValidationScope.all,\n                 ctype: ContentType = ContentType.config) -> None:\n        \n        self.schema_node._validate(self, scope, ctype)", "docstring": "Validate the receiver's value.\n\nArgs:\nscope: Scope of the validation (syntax, semantics or all).\nctype: Receiver's content type.\n\nRaises:\nSchemaError: If the value doesn't conform to the schema.\nSemanticError: If the value violates a semantic constraint.\nYangTypeError: If the value is a scalar of incorrect type.", "source": "juraj-google-style"}
{"code": "def get_block(self, block_id):\n        \n\n        block = backend.query.get_block(self.connection, block_id)\n        latest_block = self.get_latest_block()\n        latest_block_height = latest_block['height'] if latest_block else 0\n\n        if not block and block_id > latest_block_height:\n            return\n\n        result = {'height': block_id,\n                  'transactions': []}\n\n        if block:\n            transactions = backend.query.get_transactions(self.connection, block['transactions'])\n            result['transactions'] = [t.to_dict() for t in Transaction.from_db(self, transactions)]\n\n        return result", "docstring": "Get the block with the specified `block_id`.\n\nReturns the block corresponding to `block_id` or None if no match is\nfound.\n\nArgs:\nblock_id (int): block id of the block to get.", "source": "juraj-google-style"}
{"code": "def predict_proba(self, x, batch_size=32, verbose=0):\n    warnings.warn('`model.predict_proba()` is deprecated and will be removed after 2021-01-01. Please use `model.predict()` instead.')\n    preds = self.predict(x, batch_size, verbose)\n    if preds.min() < 0.0 or preds.max() > 1.0:\n        logging.warning('Network returning invalid probability values. The last layer might not normalize predictions into probabilities (like softmax or sigmoid would).')\n    return preds", "docstring": "Generates class probability predictions for the input samples.\n\nThe input samples are processed batch by batch.\n\nArgs:\nx: input data, as a Numpy array or list of Numpy arrays\n(if the model has multiple inputs).\nbatch_size: integer.\nverbose: verbosity mode, 0 or 1.\n\nReturns:\nA Numpy array of probability predictions.", "source": "github-repos"}
{"code": "def get(self, key, default=None):\n    if (key.count('.') == 0):\n        return super(DotDict, self).get(key, default)\n    value = default\n    (first, remainder) = key.split('.', 1)\n    if (first in self):\n        value = super(DotDict, self).get(first, default)\n        if isinstance(value, (dict, DotDict)):\n            return DotDict(value).get(remainder, default)\n    return value", "docstring": "Get a value from the `DotDict`.\n\nThe `key` parameter can either be a regular string key,\ne.g. \"foo\", or it can be a string key with dot notation,\ne.g. \"foo.bar.baz\", to signify a nested lookup.\n\nThe default value is returned if any level of the key's\ncomponents are not found.\n\nArgs:\nkey (str): The key to get the value for.\ndefault: The return value should the given key\nnot exist in the `DotDict`.", "source": "codesearchnet"}
{"code": "def create_interconnect(self, location_entries, timeout=(- 1)):\n    return self._helper.create(location_entries, uri=self.locations_uri, timeout=timeout)", "docstring": "Creates an interconnect at the given location.\n\nWarning:\nIt does not create the LOGICAL INTERCONNECT itself.\nIt will fail if no interconnect is already present on the specified position.\n\nArgs:\nlocation_entries (dict): Dictionary with location entries.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Created interconnect.", "source": "codesearchnet"}
{"code": "def identify(self, token):\n        \n\n        payload = {\n            'op': 2,\n            'd': {\n                'token': self.token,\n                'properties': {\n                    '$os': sys.platform,\n                    '$browser': 'legobot',\n                    '$device': 'legobot'\n                },\n                'compress': False,\n                'large_threshold': 250\n            }\n        }\n        payload['d']['synced_guilds'] = []\n        logger.info(\"Identifying with the following message: \\\n                    {}\".format(payload))\n        self.ws.send(json.dumps(payload))\n        return", "docstring": "Identifies to the websocket endpoint\n\nArgs:\ntoken (string): Discord bot token", "source": "juraj-google-style"}
{"code": "def assert_split_at_fraction_exhaustive(source, start_position=None, stop_position=None, perform_multi_threaded_test=True):\n    expected_items = read_from_source(source, start_position, stop_position)\n    if not expected_items:\n        raise ValueError('Source %r is empty.' % source)\n    if len(expected_items) == 1:\n        raise ValueError('Source %r only reads a single item.' % source)\n    all_non_trivial_fractions = []\n    any_successful_fractions = False\n    any_non_trivial_fractions = False\n    for i in range(len(expected_items)):\n        stats = SplitFractionStatistics([], [])\n        assert_split_at_fraction_binary(source, expected_items, i, 0.0, None, 1.0, None, stats)\n        if stats.successful_fractions:\n            any_successful_fractions = True\n        if stats.non_trivial_fractions:\n            any_non_trivial_fractions = True\n        all_non_trivial_fractions.append(stats.non_trivial_fractions)\n    if not any_successful_fractions:\n        raise ValueError('SplitAtFraction test completed vacuously: no successful split fractions found')\n    if not any_non_trivial_fractions:\n        raise ValueError('SplitAtFraction test completed vacuously: no non-trivial split fractions found')\n    if not perform_multi_threaded_test:\n        return\n    num_total_trials = 0\n    for i in range(len(expected_items)):\n        non_trivial_fractions = [2.0]\n        non_trivial_fractions.extend(all_non_trivial_fractions[i])\n        min_non_trivial_fraction = min(non_trivial_fractions)\n        if min_non_trivial_fraction == 2.0:\n            continue\n        num_trials = 0\n        have_success = False\n        have_failure = False\n        thread_pool = _ThreadPool(2)\n        try:\n            while True:\n                num_trials += 1\n                if num_trials > MAX_CONCURRENT_SPLITTING_TRIALS_PER_ITEM:\n                    _LOGGER.warning('After %d concurrent splitting trials at item \n                    break\n                if _assert_split_at_fraction_concurrent(source, expected_items, i, min_non_trivial_fraction, thread_pool):\n                    have_success = True\n                else:\n                    have_failure = True\n                if have_success and have_failure:\n                    _LOGGER.info('%d trials to observe both success and failure of concurrent splitting at item \n                    break\n        finally:\n            thread_pool.close()\n        num_total_trials += num_trials\n        if num_total_trials > MAX_CONCURRENT_SPLITTING_TRIALS_TOTAL:\n            _LOGGER.warning('After %d total concurrent splitting trials, considered only %d items, giving up.', num_total_trials, i)\n            break\n    _LOGGER.info('%d total concurrent splitting trials for %d items', num_total_trials, len(expected_items))", "docstring": "Performs and tests dynamic work rebalancing exhaustively.\n\nAsserts that for each possible start position, a source can be split at\nevery interesting fraction (halfway between two fractions that differ by at\nleast one item) and the results are consistent if a split succeeds.\nVerifies multi threaded splitting as well.\n\nArgs:\nsource (~apache_beam.io.iobase.BoundedSource): the source to perform\ndynamic splitting on.\nperform_multi_threaded_test (bool): if :data:`True` performs a\nmulti-threaded test, otherwise this test is skipped.\n\nRaises:\nValueError: if the exhaustive splitting test fails.", "source": "github-repos"}
{"code": "def search(cls, session, queries, out_type):\n    cls._check_implements('search')\n    domain = cls.get_search_domain(queries)\n    return cls(('/search/%s.json' % cls.__endpoint__), data={'query': str(domain)}, session=session, out_type=out_type)", "docstring": "Search for a record given a domain.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nqueries (helpscout.models.Domain or iter): The queries for the\ndomain. If a ``Domain`` object is provided, it will simply be\nreturned. Otherwise, a ``Domain`` object will be generated\nfrom the complex queries. In this case, the queries should\nconform to the interface in\n:func:`helpscout.domain.Domain.from_tuple`.\nout_type (helpscout.BaseModel): The type of record to output. This\nshould be provided by child classes, by calling super.\n\nReturns:\nRequestPaginator(output_type=helpscout.BaseModel): Results\niterator of the ``out_type`` that is defined.", "source": "codesearchnet"}
{"code": "def _pack_images(images, rows, cols):\n    shape = onp.shape(images)\n    (width, height, depth) = shape[(- 3):]\n    images = onp.reshape(images, ((- 1), width, height, depth))\n    batch = onp.shape(images)[0]\n    rows = onp.minimum(rows, batch)\n    cols = onp.minimum((batch \n    images = images[:(rows * cols)]\n    images = onp.reshape(images, (rows, cols, width, height, depth))\n    images = onp.transpose(images, [0, 2, 1, 3, 4])\n    images = onp.reshape(images, [(rows * width), (cols * height), depth])\n    return images", "docstring": "Helper utility to make a tiled field of images from numpy arrays.\n\nArgs:\nimages: Image tensor in shape [N, W, H, C].\nrows: Number of images per row in tiled image.\ncols: Number of images per column in tiled image.\n\nReturns:\nA tiled image of shape [W * rows, H * cols, C].\nTruncates incomplete rows.", "source": "codesearchnet"}
{"code": "def remove_pos_arg_placeholders(alias_command):\n    split_command = shlex.split(alias_command)\n    boundary_index = len(split_command)\n    for (i, subcommand) in enumerate(split_command):\n        if ((not re.match('^[a-z]', subcommand.lower())) or (i > COLLISION_CHECK_LEVEL_DEPTH)):\n            boundary_index = i\n            break\n    return ' '.join(split_command[:boundary_index]).lower()", "docstring": "Remove positional argument placeholders from alias_command.\n\nArgs:\nalias_command: The alias command to remove from.\n\nReturns:\nThe alias command string without positional argument placeholder.", "source": "codesearchnet"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    if token_ids_1 is None:\n        return [1] + [0] * len(token_ids_0) + [1]\n    return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def get_key(self, match_key, num_results=1, best=True, **dfilter):\n    return get_key(match_key, self.keys(), num_results=num_results, best=best, **dfilter)", "docstring": "Get multiple fully-specified keys that match the provided query.\n\nArgs:\nkey (DatasetID): DatasetID of query parameters to use for\nsearching. Any parameter that is `None`\nis considered a wild card and any match is\naccepted. Can also be a string representing the\ndataset name or a number representing the dataset\nwavelength.\nnum_results (int): Number of results to return. If `0` return all,\nif `1` return only that element, otherwise\nreturn a list of matching keys.\n**dfilter (dict): See `get_key` function for more information.", "source": "codesearchnet"}
{"code": "def CheckPath(self, path, path_segment_separator=None):\n    if (not self._case_sensitive):\n        path = path.lower()\n    if (path_segment_separator is None):\n        path_segment_separator = self._path_segment_separator\n    path_segments = path.split(path_segment_separator)\n    number_of_path_segments = len(path_segments)\n    scan_object = self._root_node\n    while scan_object:\n        if isinstance(scan_object, py2to3.STRING_TYPES):\n            break\n        if (scan_object.path_segment_index >= number_of_path_segments):\n            scan_object = scan_object.default_value\n            continue\n        path_segment = path_segments[scan_object.path_segment_index]\n        scan_object = scan_object.GetScanObject(path_segment)\n    if (not isinstance(scan_object, py2to3.STRING_TYPES)):\n        return False\n    filter_path_segments = scan_object.split(self._path_segment_separator)\n    return (filter_path_segments == path_segments)", "docstring": "Checks if a path matches the scan tree-based path filter.\n\nArgs:\npath: a string containing the path.\npath_segment_separator: optional string containing the path segment\nseparator. None defaults to the path segment\nseparator that was set when the path filter\nscan tree was initialized.\n\nReturns:\nA boolean indicating if the path matches the filter.", "source": "codesearchnet"}
{"code": "def post(self, service, data):\n    url = self._url_format(service)\n    data = Base._data_to_json(data)\n    headers = {'content-type': 'application/json'}\n    return self.rest_action(self._session.post, url, data=data, headers=headers)", "docstring": "Generic POST operation for sending data to Learning Modules API.\n\nData should be a JSON string or a dict.  If it is not a string,\nit is turned into a JSON string for the POST body.\n\nArgs:\nservice (str): The endpoint service to use, i.e. gradebook\ndata (json or dict): the data payload\n\nRaises:\nrequests.RequestException: Exception connection error\nValueError: Unable to decode response content\n\nReturns:\nlist: the json-encoded content of the response", "source": "codesearchnet"}
{"code": "def save_state_regularly(self, fname, frequency=600):\n        \n        self.save_state(fname)\n        loop = asyncio.get_event_loop()\n        self.save_state_loop = loop.call_later(frequency,\n                                               self.save_state_regularly,\n                                               fname,\n                                               frequency)", "docstring": "Save the state of node with a given regularity to the given\nfilename.\n\nArgs:\nfname: File name to save retularly to\nfrequency: Frequency in seconds that the state should be saved.\nBy default, 10 minutes.", "source": "juraj-google-style"}
{"code": "def latch_config_variables(self):\n    return {desc.name: desc.latch() for desc in self._config_variables.values()}", "docstring": "Latch the current value of all config variables as python objects.\n\nThis function will capture the current value of all config variables\nat the time that this method is called.  It must be called after\nstart() has been called so that any default values in the config\nvariables have been properly set otherwise DataError will be thrown.\n\nConceptually this method performs the operation that happens just\nbefore a tile executive hands control to the tile application\nfirmware. It latches in the value of all config variables at that\npoint in time.\n\nFor convenience, this method does all necessary binary -> python\nnative object conversion so that you just get python objects back.\n\nReturns:\ndict: A dict of str -> object with the config variable values.\n\nThe keys in the dict will be the name passed to\n`declare_config_variable`.\n\nThe values will be the python objects that result from calling\nlatch() on each config variable.  Consult ConfigDescriptor.latch()\nfor documentation on how that method works.", "source": "codesearchnet"}
{"code": "def is_end_node(node):\n    \n    return (isinstance(node, ast.Expr) and\n            isinstance(node.value, ast.Name) and\n            node.value.id == 'end')", "docstring": "Checks if a node is the \"end\" keyword.\n\nArgs:\nnode: AST node.\n\nReturns:\nTrue if the node is the \"end\" keyword, otherwise False.", "source": "juraj-google-style"}
{"code": "def _next_layer_gather_index(bc, original_rp, broadcast_rp):\n    old_value_rowids = array_ops.gather(bc.gather_index, broadcast_rp.value_rowids())\n\n    def gi_no_broadcast():\n        old_row_starts = array_ops.gather(original_rp.row_splits(), old_value_rowids)\n        expected_row_lengths = array_ops.gather(params=original_rp.row_lengths(), indices=bc.gather_index)\n        actual_row_lengths = broadcast_rp.row_lengths()\n        check_valid = check_ops.assert_equal(expected_row_lengths, actual_row_lengths, message='Cannot broadcast')\n        gather_index = old_row_starts + broadcast_rp.offsets_in_rows()\n        return control_flow_ops.with_dependencies([check_valid], gather_index)\n\n    def gi_broadcast():\n        return old_value_rowids\n    if not original_rp.is_uniform():\n        return gi_no_broadcast()\n    do_broadcast = math_ops.equal(original_rp.uniform_row_length(), constant_op.constant(1, original_rp.dtype))\n    gather_index = cond.cond(do_broadcast, true_fn=gi_broadcast, false_fn=gi_no_broadcast)\n    return gather_index", "docstring": "Create the next layer gather_index whether or not a broadcast happens.\n\n*----------bc-------->*\n|                     |\noriginal_rp           broadcast_rp\n|                     |\n\\|/                   \\|/\n*--next_broadcaster-->*\n\nArgs:\nbc: the old broadcaster.\noriginal_rp: the original row partition.\nbroadcast_rp: the target row partition.\n\nReturns:\nthe gather_index for next_broadcaster.\nRaises:\nInvalidArgumentError if the shapes are incompatible.", "source": "github-repos"}
{"code": "def set(self, *args):\n    assert (len(args) in (1, 2))\n    if (len(args) == 1):\n        value = args[0]\n        self._impl.set(value)\n    else:\n        (index, value) = args\n        if isinstance(value, Real):\n            self._impl.setTplDbl(Tuple(index)._impl, value)\n        elif isinstance(value, basestring):\n            self._impl.setTplStr(Tuple(index)._impl, value)\n        else:\n            raise TypeError", "docstring": "Set the value of a single instance of this parameter.\n\nArgs:\nargs: value if the parameter is scalar, index and value\notherwise.\n\nRaises:\nRuntimeError: If the entity has been deleted in the underlying\nAMPL.\n\nTypeError: If the parameter is not scalar and the index is not\nprovided.", "source": "codesearchnet"}
{"code": "def has_title(self, title, **kwargs):\n        \n\n        try:\n            self.assert_title(title, **kwargs)\n            return True\n        except ExpectationNotMet:\n            return False", "docstring": "Checks if the page has the given title.\n\nArgs:\ntitle (str | RegexObject): The string or regex that the title should match.\n**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.\n\nReturns:\nbool: Whether it matches.", "source": "juraj-google-style"}
{"code": "def draw_text(img, pos, text, color, font_scale=0.4):\n    img = img.astype(np.uint8)\n    (x0, y0) = (int(pos[0]), int(pos[1]))\n    font = cv2.FONT_HERSHEY_SIMPLEX\n    ((text_w, text_h), _) = cv2.getTextSize(text, font, font_scale, 1)\n    if ((x0 + text_w) > img.shape[1]):\n        x0 = (img.shape[1] - text_w)\n    if ((y0 - int((1.15 * text_h))) < 0):\n        y0 = int((1.15 * text_h))\n    back_topleft = (x0, (y0 - int((1.3 * text_h))))\n    back_bottomright = ((x0 + text_w), y0)\n    cv2.rectangle(img, back_topleft, back_bottomright, color, (- 1))\n    text_bottomleft = (x0, (y0 - int((0.25 * text_h))))\n    cv2.putText(img, text, text_bottomleft, font, font_scale, (222, 222, 222), lineType=cv2.LINE_AA)\n    return img", "docstring": "Draw text on an image.\n\nArgs:\npos (tuple): x, y; the position of the text\ntext (str):\nfont_scale (float):\ncolor (tuple): a 3-tuple BGR color in [0, 255]", "source": "codesearchnet"}
{"code": "def run(self, gin):\n        \n        with ScratchDir(\".\"):\n            p = subprocess.Popen(\n                self._gulp_cmd, stdout=subprocess.PIPE,\n                stdin=subprocess.PIPE, stderr=subprocess.PIPE\n            )\n\n            out, err = p.communicate(bytearray(gin, \"utf-8\"))\n            out = out.decode(\"utf-8\")\n            err = err.decode(\"utf-8\")\n\n            if \"Error\" in err or \"error\" in err:\n                print(gin)\n                print(\"----output_0---------\")\n                print(out)\n                print(\"----End of output_0------\\n\\n\\n\")\n                print(\"----output_1--------\")\n                print(out)\n                print(\"----End of output_1------\")\n                raise GulpError(err)\n\n            \n            if \"ERROR\" in out:\n                raise GulpError(out)\n\n            \n            conv_err_string = \"Conditions for a minimum have not been satisfied\"\n            if conv_err_string in out:\n                raise GulpConvergenceError()\n\n            gout = \"\"\n            for line in out.split(\"\\n\"):\n                gout = gout + line + \"\\n\"\n            return gout", "docstring": "Run GULP using the gin as input\n\nArgs:\ngin: GULP input string\n\nReturns:\ngout: GULP output string", "source": "juraj-google-style"}
{"code": "async def init(self, name, conf=None):\n    tank = self.tanks.get(name)\n    if (tank is not None):\n        return tank\n    iden = s_common.guid()\n    logger.info('Creating new tank: %s', name)\n    path = s_common.genpath(self.dirn, 'tanks', iden)\n    tank = (await CryoTank.anit(path, conf))\n    node = (await self.names.open((name,)))\n    (await node.set((iden, conf)))\n    self.tanks.put(name, tank)\n    return tank", "docstring": "Generate a new CryoTank with a given name or get an reference to an existing CryoTank.\n\nArgs:\nname (str): Name of the CryoTank.\n\nReturns:\nCryoTank: A CryoTank instance.", "source": "codesearchnet"}
{"code": "def load_disease_terms(adapter, genemap_lines, genes=None, hpo_disease_lines=None):\n    if (not genes):\n        genes = adapter.genes_by_alias()\n    disease_terms = get_mim_phenotypes(genemap_lines=genemap_lines)\n    if (not hpo_disease_lines):\n        hpo_disease_lines = fetch_hpo_phenotype_to_terms()\n    hpo_diseases = parse_hpo_diseases(hpo_disease_lines)\n    start_time = datetime.now()\n    nr_diseases = None\n    LOG.info('Loading the hpo disease...')\n    for (nr_diseases, disease_number) in enumerate(disease_terms):\n        disease_info = disease_terms[disease_number]\n        disease_id = 'OMIM:{0}'.format(disease_number)\n        if (disease_id in hpo_diseases):\n            hpo_terms = hpo_diseases[disease_id]['hpo_terms']\n            if hpo_terms:\n                disease_info['hpo_terms'] = hpo_terms\n        disease_obj = build_disease_term(disease_info, genes)\n        adapter.load_disease_term(disease_obj)\n    LOG.info('Loading done. Nr of diseases loaded {0}'.format(nr_diseases))\n    LOG.info('Time to load diseases: {0}'.format((datetime.now() - start_time)))", "docstring": "Load the omim phenotypes into the database\n\nParse the phenotypes from genemap2.txt and find the associated hpo terms\nfrom ALL_SOURCES_ALL_FREQUENCIES_diseases_to_genes_to_phenotypes.txt.\n\nArgs:\nadapter(MongoAdapter)\ngenemap_lines(iterable(str))\ngenes(dict): Dictionary with all genes found in database\nhpo_disease_lines(iterable(str))", "source": "codesearchnet"}
{"code": "def contains(self, key):\n    try:\n        self._api.objects_get(self._bucket, key)\n    except datalab.utils.RequestException as e:\n        if (e.status == 404):\n            return False\n        raise e\n    except Exception as e:\n        raise e\n    return True", "docstring": "Checks if the specified item exists.\n\nArgs:\nkey: the key of the item to lookup.\nReturns:\nTrue if the item exists; False otherwise.\nRaises:\nException if there was an error requesting information about the item.", "source": "codesearchnet"}
{"code": "def is_compatible(self, other: 'ValueSpec') -> bool:", "docstring": "Returns True if values acceptable to `other` is acceptable to this spec.\n\nArgs:\nother: Other value spec.\n\nReturns:\nTrue if values that is applicable to the other value spec can be applied\nto current spec. Otherwise False.", "source": "github-repos"}
{"code": "def list_(return_yaml=True, include_pillar=True, include_opts=True, **kwargs):\n    beacons = None\n    try:\n        eventer = salt.utils.event.get_event('minion', opts=__opts__)\n        res = __salt__['event.fire']({'func': 'list', 'include_pillar': include_pillar, 'include_opts': include_opts}, 'manage_beacons')\n        if res:\n            event_ret = eventer.get_event(tag='/salt/minion/minion_beacons_list_complete', wait=kwargs.get('timeout', 30))\n            log.debug('event_ret %s', event_ret)\n            if (event_ret and event_ret['complete']):\n                beacons = event_ret['beacons']\n    except KeyError:\n        ret = {'comment': 'Event module not available. Beacon list failed.', 'result': False}\n        return ret\n    if beacons:\n        if return_yaml:\n            tmp = {'beacons': beacons}\n            return salt.utils.yaml.safe_dump(tmp, default_flow_style=False)\n        else:\n            return beacons\n    else:\n        return {'beacons': {}}", "docstring": "List the beacons currently configured on the minion.\n\nArgs:\n\nreturn_yaml (bool):\nWhether to return YAML formatted output, default ``True``.\n\ninclude_pillar (bool):\nWhether to include beacons that are configured in pillar, default\nis ``True``.\n\ninclude_opts (bool):\nWhether to include beacons that are configured in opts, default is\n``True``.\n\nReturns:\nlist: List of currently configured Beacons.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' beacons.list", "source": "codesearchnet"}
{"code": "class AutoformerFeatureEmbedder(nn.Module):\n\n    def __init__(self, cardinalities: List[int], embedding_dims: List[int]) -> None:\n        super().__init__()\n        self.num_features = len(cardinalities)\n        self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)])\n\n    def forward(self, features: torch.Tensor) -> torch.Tensor:\n        if self.num_features > 1:\n            cat_feature_slices = torch.chunk(features, self.num_features, dim=-1)\n        else:\n            cat_feature_slices = [features]\n        return torch.cat([embed(cat_feature_slice.squeeze(-1)) for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices)], dim=-1)", "docstring": "Embed a sequence of categorical features.\n\nArgs:\ncardinalities (`list[int]`):\nList of cardinalities of the categorical features.\nembedding_dims (`list[int]`):\nList of embedding dimensions of the categorical features.", "source": "github-repos"}
{"code": "def _ContainsNone(self, fail_verb, excluded):\n    present = []\n    if len(excluded) == 1:\n        if excluded[0] in self._actual:\n            present.extend(excluded)\n    elif excluded:\n        try:\n            actual_set = set(self._actual)\n        except TypeError:\n            actual_set = self._actual\n        for i in excluded:\n            if i in actual_set:\n                present.append(i)\n    if present:\n        self._FailWithBadResults(fail_verb, excluded, 'contains', present)", "docstring": "Determines if the subject contains none of the excluded elements.\n\nHelper function for ContainsNoneIn() and ContainsNoneOf().\n\nArgs:\nfail_verb: string describing how the excluded elements should be excluded.\nexcluded: iterable of objects that should not be contained in the subject.\n\nReturns:\nNone if the subject contains none of the expected elements.\n\nRaises:\nTruthAssertionError: the subject contains any of the excluded elements.", "source": "github-repos"}
{"code": "def read(in_path):\n    assert os.path.exists(in_path), \"The following GRP file can't be found. in_path: {}\".format(in_path)\n    with open(in_path, 'r') as f:\n        lines = f.readlines()\n        grp = [line.strip() for line in lines if (line and (not re.match('^\n    return grp", "docstring": "Read a grp file at the path specified by in_path.\n\nArgs:\nin_path (string): path to GRP file\n\nReturns:\ngrp (list)", "source": "codesearchnet"}
{"code": "def validate(self, proxy_scanner, expected_num=20, queue_timeout=3, val_timeout=5):\n    while (self.proxy_num() < expected_num):\n        try:\n            candidate_proxy = proxy_scanner.proxy_queue.get(timeout=queue_timeout)\n        except queue.Empty:\n            if proxy_scanner.is_scanning():\n                continue\n            else:\n                break\n        addr = candidate_proxy['addr']\n        protocol = candidate_proxy['protocol']\n        ret = self.is_valid(addr, protocol, val_timeout)\n        if (self.proxy_num() >= expected_num):\n            self.logger.info('Enough valid proxies, thread {} exit.'.format(threading.current_thread().name))\n            break\n        if ret['valid']:\n            self.add_proxy(Proxy(addr, protocol))\n            self.logger.info('{} ok, {:.2f}s'.format(addr, ret['response_time']))\n        else:\n            self.logger.info('{} invalid, {}'.format(addr, ret['msg']))", "docstring": "Target function of validation threads\n\nArgs:\nproxy_scanner: A ProxyScanner object.\nexpected_num: Max number of valid proxies to be scanned.\nqueue_timeout: Timeout for getting a proxy from the queue.\nval_timeout: An integer passed to `is_valid` as argument `timeout`.", "source": "codesearchnet"}
{"code": "def __init__(self, task_queue, verbose=True):\n        \n        multiprocessing.Process.__init__(self)\n        self._task_queue = task_queue\n        self.total_task = self._task_queue.qsize()\n        self.current_state = None\n        self.verbose = verbose", "docstring": "Construct an instance of TaskTracker\n\nArgs:\ntask_queue (multiprocessing.JoinableQueue): A queue of the\ninput data.\nverbose (bool, optional): Set to False to disable verbose output.", "source": "juraj-google-style"}
{"code": "def _update_task(self, task):\n    self.task = task\n    self.task.data.update(self.task_data)\n    self.task_type = task.task_spec.__class__.__name__\n    self.spec = task.task_spec\n    self.task_name = task.get_name()\n    self.activity = getattr(self.spec, 'service_class', '')\n    self._set_lane_data()", "docstring": "Assigns current task step to self.task\nthen updates the task's data with self.task_data\n\nArgs:\ntask: Task object.", "source": "codesearchnet"}
{"code": "def check_partitioners(partitioners, keys):\n    if (partitioners is None):\n        return {}\n    _assert_is_dictlike(partitioners, valid_keys=keys)\n    keys = set(keys)\n    if (not (set(partitioners) <= keys)):\n        extra_keys = (set(partitioners) - keys)\n        raise KeyError('Invalid partitioner keys {}, partitioners can only be provided for {}'.format(', '.join((\"'{}'\".format(key) for key in extra_keys)), ', '.join((\"'{}'\".format(key) for key in keys))))\n    _check_nested_callables(partitioners, 'Partitioner')\n    return partitioners", "docstring": "Checks the given partitioners.\n\nThis checks that `partitioners` is a dictionary that only contains keys in\n`keys`, and furthermore the entries in `partitioners` are functions or\nfurther dictionaries (the latter used, for example, in passing partitioners\nto modules inside modules) that must satisfy the same constraints.\n\nArgs:\npartitioners: Dictionary of partitioners (allowing nested dictionaries) or\nNone.\nkeys: Iterable of valid keys for `partitioners`.\n\nReturns:\nChecked dictionary of partitioners. If `partitioners=None`, an empty\ndictionary will be returned.\n\nRaises:\nKeyError: If an partitioner is provided for a key not in `keys`.\nTypeError: If a provided partitioner is not a callable function, or\n`partitioners` is not a Mapping.", "source": "codesearchnet"}
{"code": "def most_recent(path, startswith=None, endswith=None):\n    \n    candidate_files = []\n    for filename in all_files_in_directory(path):\n        if startswith and not os.path.basename(filename).startswith(startswith):\n            continue\n        if endswith and not filename.endswith(endswith):\n            continue\n        candidate_files.append({'name': filename, 'modtime': os.path.getmtime(filename)})\n\n    \n    most_recent = sorted(candidate_files, key=lambda k: k['modtime'], reverse=True)\n    return most_recent[0]['name'] if most_recent else None", "docstring": "Recursively inspect all files under a directory and return the most recent\n\nArgs:\npath (str): the path of the directory to traverse\nstartswith (str): the file name start with (optional)\nendswith (str): the file name ends with (optional)\nReturns:\nthe most recent file within the subdirectory", "source": "juraj-google-style"}
{"code": "def remove(self, keys, name=None):\n    if keys.dtype != self._key_dtype:\n        raise TypeError(f'Dtype of argument `keys` must be {self._key_dtype}, received: {keys.dtype}')\n    with ops.name_scope(name, '%s_lookup_table_remove' % self.name, (self.resource_handle, keys, self._default_value)):\n        op = gen_lookup_ops.lookup_table_remove_v2(self.resource_handle, keys)\n    return op", "docstring": "Removes `keys` and its associated values from the table.\n\nIf a key is not present in the table, it is silently ignored.\n\nArgs:\nkeys: Keys to remove. Can be a tensor of any shape. Must match the table's\nkey type.\nname: A name for the operation (optional).\n\nReturns:\nThe created Operation.\n\nRaises:\nTypeError: when `keys` do not match the table data types.", "source": "github-repos"}
{"code": "def copy_results(self, copy_to_dir, rename_model_to=None, force_rerun=False):\n    if (not rename_model_to):\n        rename_model_to = self.model_to_use\n    new_model_path = op.join(copy_to_dir, '{}.pdb'.format(rename_model_to))\n    if self.structure_path:\n        if ssbio.utils.force_rerun(flag=force_rerun, outfile=new_model_path):\n            custom_clean = CleanPDB()\n            my_pdb = StructureIO(self.structure_path)\n            new_model_path = my_pdb.write_pdb(custom_selection=custom_clean, custom_name=rename_model_to, out_dir=copy_to_dir, force_rerun=force_rerun)\n        self.load_structure_path(structure_path=new_model_path, file_type='pdb')\n        dest_itasser_dir = op.join(copy_to_dir, '{}_itasser'.format(rename_model_to))\n        if (not op.exists(dest_itasser_dir)):\n            os.mkdir(dest_itasser_dir)\n        for attr in self._attrs_to_copy:\n            old_file_path = getattr(self, attr)\n            new_file_path = op.join(dest_itasser_dir, op.basename(old_file_path))\n            if ssbio.utils.force_rerun(flag=force_rerun, outfile=new_file_path):\n                shutil.copy2(old_file_path, new_file_path)\n                log.debug('{}: copied from {}'.format(new_file_path, old_file_path))\n            else:\n                log.debug('{}: file already exists'.format(new_file_path))\n            setattr(self, attr, new_file_path)", "docstring": "Copy the raw information from I-TASSER modeling to a new folder.\n\nCopies all files in the list _attrs_to_copy.\n\nArgs:\ncopy_to_dir (str): Directory to copy the minimal set of results per sequence.\nrename_model_to (str): New file name (without extension)\nforce_rerun (bool): If existing models and results should be overwritten.", "source": "codesearchnet"}
{"code": "def __init__(self):\n    self._last_step_outputs = {}\n    self._last_step_outputs_reduce_ops = {}\n    self._non_tensor_outputs = {}", "docstring": "Initialize an output context.\n\nReturns:\nA context object.", "source": "github-repos"}
{"code": "async def is_try_or_pull_request(context, task):\n    \n    if is_github_task(task):\n        return await is_pull_request(context, task)\n    else:\n        return is_try(task, context.config['source_env_prefix'])", "docstring": "Determine if a task is a try or a pull-request-like task (restricted privs).\n\nChecks are the ones done in ``is_try`` and ``is_pull_request``\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\ntask (dict): the task definition to check.\n\nReturns:\nbool: True if it's a pull-request or a try task", "source": "juraj-google-style"}
{"code": "def _MergeIdenticalCaseInsensitive(self, a, b):\n    if (a.lower() != b.lower()):\n        raise MergeError((\"values must be the same (case insensitive) ('%s' vs '%s')\" % (transitfeed.EncodeUnicode(a), transitfeed.EncodeUnicode(b))))\n    return b", "docstring": "Tries to merge two strings.\n\nThe string are required to be the same ignoring case. The second string is\nalways used as the merged value.\n\nArgs:\na: The first string.\nb: The second string.\n\nReturns:\nThe merged string. This is equal to the second string.\n\nRaises:\nMergeError: The strings were not the same ignoring case.", "source": "codesearchnet"}
{"code": "def reset_logger(name, level=None, handler=None):\n  \n  \n  if level is None:\n    level = logging.INFO\n  logger = logging.getLogger(name)\n  logger.setLevel(level)\n\n  \n  handler = handler or logging.StreamHandler()\n  handler.setFormatter(logging.Formatter(_DEFAULT_LOG_FORMAT))\n\n  \n  \n  \n  logger.handlers = [handler]\n  return logger", "docstring": "Make a standard python logger object with default formatter, handler, etc.\n\nDefaults are:\n- level == logging.INFO\n- handler == logging.StreamHandler()\n\nArgs:\nname: a logger name.\nlevel: an optional initial log level for this logger.\nhandler: an optional initial handler for this logger.\n\nReturns: a standard python logger with a single handler.", "source": "juraj-google-style"}
{"code": "def genes_by_alias(hgnc_genes):\n    alias_genes = {}\n    for hgnc_id in hgnc_genes:\n        gene = hgnc_genes[hgnc_id]\n        hgnc_symbol = gene['hgnc_symbol']\n        for alias in gene['previous_symbols']:\n            true_id = None\n            if (alias == hgnc_symbol):\n                true_id = hgnc_id\n            if (alias in alias_genes):\n                alias_genes[alias.upper()]['ids'].add(hgnc_id)\n                if true_id:\n                    alias_genes[alias.upper()]['true_id'] = hgnc_id\n            else:\n                alias_genes[alias.upper()] = {'true': true_id, 'ids': set([hgnc_id])}\n    return alias_genes", "docstring": "Return a dictionary with hgnc symbols as keys\n\nValue of the dictionaries are information about the hgnc ids for a symbol.\nIf the symbol is primary for a gene then 'true_id' will exist.\nA list of hgnc ids that the symbol points to is in ids.\n\nArgs:\nhgnc_genes(dict): a dictionary with hgnc_id as key and gene info as value\n\nReturns:\nalias_genes(dict):\n{\n'hgnc_symbol':{\n'true_id': int,\n'ids': list(int)\n}\n}", "source": "codesearchnet"}
{"code": "def is_stopped(self):\n    resp = self._client.send(Request(action='is_dag_stopped', payload={'dag_name': self._dag_name}))\n    return resp.payload['is_stopped']", "docstring": "Check whether the task received a stop signal from the workflow.\n\nTasks can use the stop flag to gracefully terminate their work. This is\nparticularly important for long running tasks and tasks that employ an\ninfinite loop, such as trigger tasks.\n\nReturns:\nbool: True if the task should be stopped.", "source": "codesearchnet"}
{"code": "def dispatch(self, message):\n        \n\n        for validator, callback in self.validators:\n            if not validator.matches(message):\n                continue\n\n            callback(message)\n            return\n\n        raise ArgumentError(\"No handler was registered for message\", message=message)", "docstring": "Dispatch a message to a callback based on its schema.\n\nArgs:\nmessage (dict): The message to dispatch", "source": "juraj-google-style"}
{"code": "def register(self, cmd: Type[Command]) -> None:\n    self.commands[cmd.command] = cmd", "docstring": "Register a new IMAP command.\n\nArgs:\ncmd: The new command type.", "source": "codesearchnet"}
{"code": "def editline_with_regex(self, regex_tgtline, to_replace):\n    for (idx, line) in enumerate(self._swp_lines):\n        mobj = re.match(regex_tgtline, line)\n        if mobj:\n            self._swp_lines[idx] = to_replace\n            return", "docstring": "find the first matched line, then replace\n\nArgs:\nregex_tgtline (str): regular expression used to match the target line\nto_replace    (str): line you wanna use to replace", "source": "codesearchnet"}
{"code": "def exists(self, path):\n        \n\n        self.__validate_storage_path(path)\n        try:\n            metadata = self.api_client.get_entity_by_query(path=path)\n        except StorageNotFoundException:\n            return False\n\n        return metadata and 'uuid' in metadata", "docstring": "Check if a certain path exists in the storage service.\n\nArgs:\npath (str): The path to be checked\n\nReturns:\nTrue if the path exists, False otherwise\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "juraj-google-style"}
{"code": "def get_lock_state_transaction(self, transaction_id):\n        \n        response = None\n        try:\n            response = requests.get(\n                urls.get_lockstate_transaction(self._giid, transaction_id),\n                headers={\n                    'Accept': 'application/json, text/javascript, */*; q=0.01',\n                    'Cookie': 'vid={}'.format(self._vid)})\n        except requests.exceptions.RequestException as ex:\n            raise RequestError(ex)\n        _validate_response(response)\n        return json.loads(response.text)", "docstring": "Get lock state transaction status\n\nArgs:\ntransaction_id: Transaction ID received from set_lock_state", "source": "juraj-google-style"}
{"code": "def ping(hostname: str, timeout_s: int = 5) -> bool:\n    \n    if sys.platform == \"win32\":\n        timeout_ms = timeout_s * 1000\n        args = [\n            \"ping\",\n            hostname,\n            \"-n\", \"1\",  \n            \"-w\", str(timeout_ms),  \n        ]\n    elif sys.platform.startswith('linux'):\n        args = [\n            \"ping\",\n            hostname,\n            \"-c\", \"1\",  \n            \"-w\", str(timeout_s),  \n        ]\n    else:\n        raise AssertionError(\"Don't know how to ping on this operating system\")\n    proc = subprocess.Popen(args,\n                            stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n    proc.communicate()\n    retcode = proc.returncode\n    return retcode == 0", "docstring": "Pings a host, using OS tools.\n\nArgs:\nhostname: host name or IP address\ntimeout_s: timeout in seconds\n\nReturns:\nwas the ping successful?", "source": "juraj-google-style"}
{"code": "def setup_config(cfg, config_filenames=None, env_var_name=None):\n    if (env_var_name is None):\n        env_var_name = 'BB_CONFIG_FILE'\n    config_path = os.getenv(env_var_name, None)\n    if (not config_path):\n        config_path = find_config(defaults=config_filenames)\n    if config_path:\n        cfg.load(config_path)\n        cfg['config_file'] = os.path.abspath(config_path)\n    cfg.init_from_env()", "docstring": "This will initialize the given configuration object.\n\nThe following resources are available in the same order:\n1) Default settings.\n2) Config file.\n3) Environment variables.\n\nWARNING: Environment variables do _not_ take precedence over the config\nfile right now. (init_from_env will refuse to update the\nvalue, if there is already one.)\n\nArgs:\nconfig_filenames: list of possible config filenames\nenv_var_name: name of the environment variable holding the config path", "source": "codesearchnet"}
{"code": "def get_diff(repo: Repo, base_commit: str, commits: List[str]) -> List[str]:\n    print('\\n\n    code_diff = []\n    for commit in commits:\n        for diff_obj in commit.diff(base_commit):\n            if diff_obj.change_type == 'A' and diff_obj.b_path.endswith('.py'):\n                code_diff.append(diff_obj.b_path)\n            elif diff_obj.change_type == 'D' and diff_obj.a_path.endswith('.py'):\n                code_diff.append(diff_obj.a_path)\n            elif diff_obj.change_type in ['M', 'R'] and diff_obj.b_path.endswith('.py'):\n                if diff_obj.a_path != diff_obj.b_path:\n                    code_diff.extend([diff_obj.a_path, diff_obj.b_path])\n                elif diff_is_docstring_only(repo, commit, diff_obj.b_path):\n                    print(f'Ignoring diff in {diff_obj.b_path} as it only concerns docstrings or comments.')\n                else:\n                    code_diff.append(diff_obj.a_path)\n    return code_diff", "docstring": "Get the diff between a base commit and one or several commits.\n\nArgs:\nrepo (`git.Repo`):\nA git repository (for instance the Transformers repo).\nbase_commit (`str`):\nThe commit reference of where to compare for the diff. This is the current commit, not the branching point!\ncommits (`List[str]`):\nThe list of commits with which to compare the repo at `base_commit` (so the branching point).\n\nReturns:\n`List[str]`: The list of Python files with a diff (files added, renamed or deleted are always returned, files\nmodified are returned if the diff in the file is not only in docstrings or comments, see\n`diff_is_docstring_only`).", "source": "github-repos"}
{"code": "def wait_until_final(self, poll_interval=1, timeout=60):\n    start_time = time.time()\n    elapsed = 0\n    while ((self.status != 'complete') and ((timeout <= 0) or (elapsed < timeout))):\n        time.sleep(poll_interval)\n        self.refresh()\n        elapsed = (time.time() - start_time)", "docstring": "It will poll the URL to grab the latest status resource in a given\ntimeout and time interval.\n\nArgs:\npoll_interval (int): how often to poll the status service.\ntimeout (int): how long to poll the URL until giving up. Use <= 0\nto wait forever", "source": "codesearchnet"}
{"code": "def create_feed_dict_from_input_data(input_data: RepresentativeSample, signature_def: meta_graph_pb2.SignatureDef) -> Mapping[str, np.ndarray]:\n    feed_dict = {}\n    for input_key, input_value in input_data.items():\n        input_tensor_name = signature_def.inputs[input_key].name\n        value = input_value\n        if isinstance(input_value, core.Tensor):\n            value = input_value.eval()\n        feed_dict[input_tensor_name] = value\n    return feed_dict", "docstring": "Constructs a feed_dict from input data.\n\nNote: This function should only be used in graph mode.\n\nThis is a helper function that converts an 'input key -> input value' mapping\nto a feed dict. A feed dict is an 'input tensor name -> input value' mapping\nand can be directly passed to the `feed_dict` argument of `sess.run()`.\n\nArgs:\ninput_data: Input key -> input value mapping. The input keys should match\nthe input keys of `signature_def`.\nsignature_def: A SignatureDef representing the function that `input_data` is\nan input to.\n\nReturns:\nFeed dict, which is intended to be used as input for `sess.run`. It is\nessentially a mapping: input tensor name -> input value. Note that the input\nvalue in the feed dict is not a `Tensor`.", "source": "github-repos"}
{"code": "def get_extra_args():\n    g = ops.get_default_graph()\n    if isinstance(g, _FuncGraph):\n        return g.extra_args\n    else:\n        return []", "docstring": "Returns the corresponding function arguments for the captured inputs.\n\nReturns:\nIf the default graph is being used to define a function, the\nreturned list of place holders are those used inside the function\nbody corresponding those returned by get_extra_inputs(). Otherwise,\nreturns an empty list.", "source": "github-repos"}
{"code": "def get(self, item, alt=None):\n        \n        try:\n            val = self[item]\n        except ValueError:\n            return alt\n\n        return val if val is not None else alt", "docstring": "Standard dict-like .get() method.\n\nArgs:\nitem (str): See :meth:`.__getitem__` for details.\nalt (default None): Alternative value, if item is not found.\n\nReturns:\nobj: `item` or `alt`, if item is not found.", "source": "juraj-google-style"}
{"code": "def get_train_examples(self, data_dir, filename=None):\n    if data_dir is None:\n        data_dir = ''\n    if self.train_file is None:\n        raise ValueError('SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor')\n    with open(os.path.join(data_dir, self.train_file if filename is None else filename), 'r', encoding='utf-8') as reader:\n        input_data = json.load(reader)['data']\n    return self._create_examples(input_data, 'train')", "docstring": "Returns the training examples from the data directory.\n\nArgs:\ndata_dir: Directory containing the data files used for training and evaluating.\nfilename: None by default, specify this if the training file has a different name than the original one\nwhich is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.", "source": "github-repos"}
{"code": "def ExtractEvents(\n      self, parser_mediator, registry_key, codepage='cp1252', **kwargs):\n    \n    self._ParseSubKey(parser_mediator, registry_key, [], codepage=codepage)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\ncodepage (Optional[str]): extended ASCII string codepage.", "source": "juraj-google-style"}
{"code": "def moments_v2(x, axes, shift=None, keepdims=False, name=None):\n    return moments(x=x, axes=axes, shift=shift, name=name, keep_dims=keepdims)", "docstring": "Calculates the mean and variance of `x`.\n\nThe mean and variance are calculated by aggregating the contents of `x`\nacross `axes`.  If `x` is 1-D and `axes = [0]` this is just the mean\nand variance of a vector.\n\nNote: shift is currently not used; the true mean is computed and used.\n\nWhen using these moments for batch normalization (see\n`tf.nn.batch_normalization`):\n\n* for so-called \"global normalization\", used with convolutional filters with\nshape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.\n* for simple batch normalization pass `axes=[0]` (batch only).\n\nArgs:\nx: A `Tensor`.\naxes: Array of ints.  Axes along which to compute mean and\nvariance.\nshift: Not used in the current implementation.\nkeepdims: produce moments with the same dimensionality as the input.\nname: Name used to scope the operations that compute the moments.\n\nReturns:\nTwo `Tensor` objects: `mean` and `variance`.", "source": "github-repos"}
{"code": "def init_log(log_file):\n    \n    \n    log = None\n    try:\n        log = open(log_file, 'a')\n\n        \n        \n        sys.stdout = Tee(sys.stdout, log)\n    except:\n        pass\n    return log", "docstring": "Creates log file on disk and \"Tees\" :py:class:`sys.stdout` to console and disk\n\nArgs:\nlog_file (str): The path on disk to append or create the log file.\n\nReturns:\nfile: The opened log file.", "source": "juraj-google-style"}
{"code": "def intersect(self, second_iterable, selector=identity):\n    if self.closed():\n        raise ValueError('Attempt to call intersect() on a closed Queryable.')\n    if (not is_iterable(second_iterable)):\n        raise TypeError('Cannot compute intersect() with second_iterable of non-iterable {0}'.format(str(type(second_iterable))[7:(- 1)]))\n    if (not is_callable(selector)):\n        raise TypeError('intersect() parameter selector={0} is not callable'.format(repr(selector)))\n    return self._create(self._generate_intersect_result(second_iterable, selector))", "docstring": "Returns those elements which are both in the source sequence and in\nthe second_iterable.\n\nNote: This method uses deferred execution.\n\nArgs:\nsecond_iterable: Elements are returned if they are also in the\nsequence.\n\nselector: An optional single argument function which is used to\nproject the elements in the source and second_iterables prior\nto comparing them. If omitted the identity function will be\nused.\n\nReturns:\nA sequence containing all elements in the source sequence  which\nare also members of the second sequence.\n\nRaises:\nValueError: If the Queryable has been closed.\nTypeError: If the second_iterable is not in fact iterable.\nTypeError: If the selector is not callable.", "source": "codesearchnet"}
{"code": "def success(channel, image, hex_str):\n    \n\n    hex_number = int(hex_str, 16)\n\n    \n    gui = ui_embed.UI(\n        channel,\n        \"\",\n        \"\n        modulename=modulename,\n        colour=hex_number,\n        thumbnail=image,\n    )\n\n    return gui", "docstring": "Creates an embed UI containing a hex color message\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\nimage (str): The url of the image to add\nhex_str (str): The hex value\n\nReturns:\nui (ui_embed.UI): The embed UI object that was created", "source": "juraj-google-style"}
{"code": "def is_back_tracking(neurite):\n\n    def pair(segs):\n        ' Pairs the input list into triplets'\n        return zip(segs, segs[1:])\n\n    def coords(node):\n        ' Returns the first three values of the tree that correspond to the x, y, z coordinates'\n        return node[COLS.XYZ]\n\n    def max_radius(seg):\n        ' Returns maximum radius from the two segment endpoints'\n        return max(seg[0][COLS.R], seg[1][COLS.R])\n\n    def is_not_zero_seg(seg):\n        ' Returns True if segment has zero length'\n        return (not np.allclose(coords(seg[0]), coords(seg[1])))\n\n    def is_in_the_same_verse(seg1, seg2):\n        ' Checks if the vectors face the same direction. This\\n        is true if their dot product is greater than zero.\\n        '\n        v1 = (coords(seg2[1]) - coords(seg2[0]))\n        v2 = (coords(seg1[1]) - coords(seg1[0]))\n        return (np.dot(v1, v2) >= 0)\n\n    def is_seg2_within_seg1_radius(dist, seg1, seg2):\n        ' Checks whether the orthogonal distance from the point at the end of\\n        seg1 to seg2 segment body is smaller than the sum of their radii\\n        '\n        return (dist <= (max_radius(seg1) + max_radius(seg2)))\n\n    def is_seg1_overlapping_with_seg2(seg1, seg2):\n        'Checks if a segment is in proximity of another one upstream'\n        s1 = coords(seg2[0])\n        s2 = coords(seg2[1])\n        C = (0.5 * (s1 + s2))\n        P = coords(seg1[1])\n        CP = (P - C)\n        S1S2 = (s2 - s1)\n        prj = mm.vector_projection(CP, S1S2)\n        if (not is_seg2_within_seg1_radius(np.linalg.norm((CP - prj)), seg1, seg2)):\n            return False\n        return (np.linalg.norm(prj) < (0.55 * np.linalg.norm(S1S2)))\n\n    def is_inside_cylinder(seg1, seg2):\n        ' Checks if seg2 approximately lies within a cylindrical volume of seg1.\\n        Two conditions must be satisfied:\\n            1. The two segments are not facing the same direction  (seg2 comes back to seg1)\\n            2. seg2 is overlaping with seg1\\n        '\n        return ((not is_in_the_same_verse(seg1, seg2)) and is_seg1_overlapping_with_seg2(seg1, seg2))\n    section_itr = (snode for snode in neurite.iter_sections() if (snode.points.shape[0] > 2))\n    for snode in section_itr:\n        segment_pairs = list(filter(is_not_zero_seg, pair(snode.points)))\n        for (i, seg1) in enumerate(segment_pairs[1:]):\n            for seg2 in segment_pairs[0:(i + 1)]:\n                if is_inside_cylinder(seg1, seg2):\n                    return True\n    return False", "docstring": "Check if a neurite process backtracks to a previous node. Back-tracking takes place\nwhen a daughter of a branching process goes back and either overlaps with a previous point, or\nlies inside the cylindrical volume of the latter.\n\nArgs:\nneurite(Neurite): neurite to operate on\n\nReturns:\nTrue Under the following scenaria:\n1. A segment endpoint falls back and overlaps with a previous segment's point\n2. The geometry of a segment overlaps with a previous one in the section", "source": "codesearchnet"}
{"code": "def sheets_values_batch_update(config, auth, sheet_url_or_name, data):\n    sheet_id = sheets_id(config, auth, sheet_url_or_name)\n    API_Sheets(config, auth).spreadsheets().values().batchUpdate(spreadsheetId=sheet_id, body=data).execute()", "docstring": "Helper for performing batch value operations.\n\nArgs:\nconfig - see starthinker/util/configuration.py\nauth - user or service\nsheet_url_or_name - one of: URL, document title, or id\ndata - JSON data for sending to batch request\n\nNo Return", "source": "github-repos"}
{"code": "def xresnet18(pretrained=False, **kwargs):\n    \n    model = XResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n    if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['xresnet18']))\n    return model", "docstring": "Constructs a XResNet-18 model.\n\nArgs:\npretrained (bool): If True, returns a model pre-trained on ImageNet", "source": "juraj-google-style"}
{"code": "def create_tree_from_string(line):\n    \n    depth         = 0\n    current_word  = \"\"\n    root          = None\n    current_node  = root\n\n    for char in line:\n        if char == '(':\n            if current_node is not None and len(current_word) > 0:\n                attribute_text_label(current_node, current_word)\n                current_word = \"\"\n            depth += 1\n            if depth > 1:\n                \n                child = LabeledTree(depth=depth)\n                current_node.add_child(child)\n                current_node = child\n                root.add_general_child(child)\n            else:\n                root = LabeledTree(depth=depth)\n                root.add_general_child(root)\n                current_node = root\n\n        elif char == ')':\n            \n            if len(current_word) > 0:\n                attribute_text_label(current_node, current_word)\n                current_word = \"\"\n\n            \n            depth -= 1\n            if current_node.parent != None:\n                current_node.parent.udepth = max(current_node.udepth+1, current_node.parent.udepth)\n            current_node = current_node.parent\n        else:\n            \n            current_word += char\n    if depth != 0:\n        raise ParseError(\"Not an equal amount of closing and opening parentheses\")\n\n    return root", "docstring": "Parse and convert a string representation\nof an example into a LabeledTree datastructure.\n\nArguments:\n----------\nline : str, string version of the tree.\n\nReturns:\n--------\nLabeledTree : parsed tree.", "source": "juraj-google-style"}
{"code": "def sample_from_discretized_mix_logistic(pred, seed=None):\n    (logits, locs, log_scales, coeffs) = split_to_discretized_mix_logistic_params(pred)\n    num_mixtures = shape_list(logits)[(- 1)]\n    gumbel_noise = (- tf.log((- tf.log(tf.random_uniform(tf.shape(logits), minval=1e-05, maxval=(1.0 - 1e-05), seed=seed)))))\n    sel = tf.one_hot(tf.argmax((logits + gumbel_noise), (- 1)), depth=num_mixtures, dtype=tf.float32)\n    sel = tf.expand_dims(sel, (- 1))\n    locs = tf.reduce_sum((locs * sel), 3)\n    log_scales = tf.reduce_sum((log_scales * sel), 3)\n    coeffs = tf.reduce_sum((coeffs * sel), 3)\n    uniform_noise = tf.random_uniform(tf.shape(locs), minval=1e-05, maxval=(1.0 - 1e-05), seed=seed)\n    logistic_noise = (tf.log(uniform_noise) - tf.log1p((- uniform_noise)))\n    x = (locs + (tf.exp(log_scales) * logistic_noise))\n    x0 = x[(..., 0)]\n    x1 = (x[(..., 1)] + (coeffs[(..., 0)] * x0))\n    x2 = ((x[(..., 2)] + (coeffs[(..., 1)] * x0)) + (coeffs[(..., 2)] * x1))\n    x = tf.stack([x0, x1, x2], axis=(- 1))\n    x = tf.clip_by_value(x, (- 1.0), 1.0)\n    return x", "docstring": "Sampling from a discretized mixture of logistics.\n\nArgs:\npred: A [batch, height, width, num_mixtures*10] tensor of floats\ncomprising one unconstrained mixture probability, three means\n(one per channel), three standard deviations (one per channel),\nand three coefficients which linearly parameterize dependence across\nchannels.\nseed: Random seed.\n\nReturns:\nA tensor of shape [batch, height, width, 3] with real intensities scaled\nbetween -1 and 1.", "source": "codesearchnet"}
{"code": "def match(self, message) -> bool:\n    if (self.to and (message.to != self.to)):\n        return False\n    if (self.sender and (message.sender != self.sender)):\n        return False\n    if (self.body and (message.body != self.body)):\n        return False\n    if (self.thread and (message.thread != self.thread)):\n        return False\n    for (key, value) in self.metadata.items():\n        if (message.get_metadata(key) != value):\n            return False\n    logger.debug(f'message matched {self} == {message}')\n    return True", "docstring": "Returns wether a message matches with this message or not.\nThe message can be a Message object or a Template object.\n\nArgs:\nmessage (spade.message.Message): the message to match to\n\nReturns:\nbool: wether the message matches or not", "source": "codesearchnet"}
{"code": "def sample_g_values(self, ngram_keys: torch.LongTensor) -> torch.LongTensor:\n    sampling_table_size, = self.sampling_table.shape\n    sampling_table = self.sampling_table.reshape((1, 1, sampling_table_size))\n    ngram_keys = ngram_keys % sampling_table_size\n    return torch.take_along_dim(sampling_table, indices=ngram_keys, dim=2)", "docstring": "Samples g values from Bernoulli distribution.\n\nIt is not possible to pass random keys in a vectorized way in torch. Instead\nwe pre-compute a random sampling table, and use apply modulo table size to\nmap from ngram keys (int64) to g values.\n\nArgs:\nngram_keys (`torch.LongTensor`):\nRandom keys (batch_size, num_ngrams, depth).\n\nReturns:\nG values (batch_size, num_ngrams, depth).", "source": "github-repos"}
{"code": "def city(self, value=None):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `city`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `city`')\n    self._city = value", "docstring": "Corresponds to IDD Field `city`\n\nArgs:\nvalue (str): value for IDD Field `city`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def build_image(registry, image):\n    \n    \n    if ':' in image['name']:\n        _, tag = image['name'].split(':', 1)\n    else:\n        _, tag = image['name'], None\n\n    values = {\n        'registry': '' if registry is None else registry + '/',\n        'image': image['name'],\n        'tag': tag,\n    }\n\n    if tag is None:\n        args = [\n            '-t {registry}{image}'.format(**values),\n            '-t {registry}{image}:{version}'.format(\n                version=versioning.current(),\n                **values\n            ),\n        ]\n    else:\n        args = ['-t {registry}{image}'.format(**values)]\n\n    if 'file' in image:\n        args.append('-f {}'.format(conf.proj_path(image['file'])))\n\n    with conf.within_proj_dir(image.get('path', '.')):\n        log.info(\"Building <33>{registry}<35>/{image}\", **values)\n        shell.run('docker build {args} .'.format(args=' '.join(args)))", "docstring": "Build docker image.\n\nArgs:\nregistry (str):\nThe name of the registry this image belongs to. If not given, the\nresulting image will have a name without the registry.\nimage (dict[str, Any]):\nThe dict containing the information about the built image. This is\nthe same dictionary as defined in DOCKER_IMAGES variable.", "source": "juraj-google-style"}
{"code": "def save_as_json(total: list,\n                 name='data.json',\n                 sort_by: str = None,\n                 no_duplicate=False,\n                 order='asc'):\n    \n    if sort_by:\n        reverse = order == 'desc'\n        total = sorted(total, key=itemgetter(sort_by), reverse=reverse)\n    if no_duplicate:\n        total = [key for key, _ in groupby(total)]\n    data = json.dumps(total, ensure_ascii=False)\n    Path(name).write_text(data, encoding='utf-8')", "docstring": "Save what you crawled as a json file.\n\nArgs:\ntotal (list): Total of data you crawled.\nname (str, optional): Defaults to 'data.json'. The name of the file.\nsort_by (str, optional): Defaults to None. Sort items by a specific key.\nno_duplicate (bool, optional): Defaults to False. If True, it will remove duplicated data.\norder (str, optional): Defaults to 'asc'. The opposite option is 'desc'.", "source": "juraj-google-style"}
{"code": "def VerifyStructure(self, parser_mediator, line):\n    \n    self._last_month = 0\n    self._year_use = parser_mediator.GetEstimatedYear()\n\n    key = 'header'\n\n    try:\n      structure = self._MAC_WIFI_HEADER.parseString(line)\n    except pyparsing.ParseException:\n      structure = None\n\n    if not structure:\n      key = 'turned_over_header'\n\n      try:\n        structure = self._MAC_WIFI_TURNED_OVER_HEADER.parseString(line)\n      except pyparsing.ParseException:\n        structure = None\n\n    if not structure:\n      logger.debug('Not a Mac Wifi log file')\n      return False\n\n    time_elements_tuple = self._GetTimeElementsTuple(key, structure)\n\n    try:\n      dfdatetime_time_elements.TimeElementsInMilliseconds(\n          time_elements_tuple=time_elements_tuple)\n    except ValueError:\n      logger.debug(\n          'Not a Mac Wifi log file, invalid date and time: {0!s}'.format(\n              structure.date_time))\n      return False\n\n    self._last_month = time_elements_tuple[1]\n\n    return True", "docstring": "Verify that this file is a Mac Wifi log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nline (str): line from a text file.\n\nReturns:\nbool: True if the line is in the expected format, False if not.", "source": "juraj-google-style"}
{"code": "def parse(source):\n    if isinstance(source, str):\n        return parse_stream(six.StringIO(source))\n    else:\n        return parse_stream(source)", "docstring": "Parses source code returns an array of instructions suitable for\noptimization and execution by a Machine.\n\nArgs:\nsource: A string or stream containing source code.", "source": "codesearchnet"}
{"code": "def _logspace_mean(log_values):\n  \n  \n  \n  \n  \n  center = tf.stop_gradient(_sample_max(log_values))\n\n  \n  centered_values = tf.math.exp(log_values - center)\n\n  \n  \n  \n  \n  log_mean_of_values = tf.math.log(_sample_mean(centered_values)) + center\n\n  return log_mean_of_values", "docstring": "Evaluate `Log[E[values]]` in a stable manner.\n\nArgs:\nlog_values:  `Tensor` holding `Log[values]`.\n\nReturns:\n`Tensor` of same `dtype` as `log_values`, reduced across dim 0.\n`Log[Mean[values]]`.", "source": "juraj-google-style"}
{"code": "def resolve(self, keys: Iterable[str]) -> Tuple[Dict[KeySpec, List[str]], List[str]]:\n    keys = list(keys)\n    input_keyset = set(keys)\n    nonconst_key_specs = [k for k in self._fields.keys() if not k.is_const]\n    nonconst_keys = {k: [] for k in nonconst_key_specs}\n    unmatched_keys = []\n    keys_by_key_spec = dict()\n    for key in keys:\n        if key not in self._fields:\n            matched_nonconst_keys = False\n            for key_spec in nonconst_key_specs:\n                if key_spec.match(key):\n                    nonconst_keys[key_spec].append(key)\n                    matched_nonconst_keys = True\n                    break\n            if not matched_nonconst_keys:\n                unmatched_keys.append(key)\n    for key_spec in self._fields.keys():\n        keys = []\n        if not key_spec.is_const:\n            keys = nonconst_keys.get(key_spec, [])\n        elif key_spec in input_keyset:\n            keys.append(str(key_spec))\n        keys_by_key_spec[key_spec] = keys\n    return (keys_by_key_spec, unmatched_keys)", "docstring": "Resolve keys by grouping them by their matched fields.\n\nArgs:\nkeys: A list of string keys.\n\nReturns:\nA tuple of matched key results and unmatched keys.\nMatched key results are an ordered dict of KeySpec to matched keys,\nin field declaration order.\nUnmatched keys are strings from input.", "source": "github-repos"}
{"code": "def extend(self, *iterables):\n        \n\n        for value in iterables:\n            list.extend(self, value)\n        return self", "docstring": "Add all values of all iterables at the end of the list\n\nArgs:\niterables: iterable which content to add at the end\n\nExample:\n\n>>> from ww import l\n>>> lst = l([])\n>>> lst.extend([1, 2])\n[1, 2]\n>>> lst\n[1, 2]\n>>> lst.extend([3, 4]).extend([5, 6])\n[1, 2, 3, 4, 5, 6]\n>>> lst\n[1, 2, 3, 4, 5, 6]", "source": "juraj-google-style"}
{"code": "def filter_values(cls, part_info):\n    filtered = []\n    for info_list in cls.filter_parts(part_info).values():\n        filtered += info_list\n    return filtered", "docstring": "Filter the part_info dict list looking for instances of our class\n\nArgs:\npart_info (dict): {part_name: [Info] or None} as returned from\nController.run_hook()\n\nReturns:\nlist: [info] where info is a subclass of cls", "source": "codesearchnet"}
{"code": "def register_read_multiple(self, register_indices):\n    num_regs = len(register_indices)\n    buf = (ctypes.c_uint32 * num_regs)(*register_indices)\n    data = (ctypes.c_uint32 * num_regs)(0)\n    statuses = (ctypes.c_uint8 * num_regs)(0)\n    res = self._dll.JLINKARM_ReadRegs(buf, data, statuses, num_regs)\n    if (res < 0):\n        raise errors.JLinkException(res)\n    return list(data)", "docstring": "Retrieves the values from the registers specified.\n\nArgs:\nself (JLink): the ``JLink`` instance\nregister_indices (list): list of registers to read\n\nReturns:\nA list of values corresponding one-to-one for each of the given\nregister indices.  The returned list of values are the values in\norder of which the indices were specified.\n\nRaises:\nJLinkException: if a given register is invalid or an error occurs.", "source": "codesearchnet"}
{"code": "def _follow_link(self, link_path_components, link):\n    link_path = link.contents\n    sep = self._path_separator(link_path)\n    if (not self._starts_with_root_path(link_path)):\n        components = link_path_components[:(- 1)]\n        components.append(link_path)\n        link_path = sep.join(components)\n    return self.normpath(link_path)", "docstring": "Follow a link w.r.t. a path resolved so far.\n\nThe component is either a real file, which is a no-op, or a\nsymlink. In the case of a symlink, we have to modify the path\nas built up so far\n/a/b => ../c  should yield /a/../c (which will normalize to /a/c)\n/a/b => x     should yield /a/x\n/a/b => /x/y/z should yield /x/y/z\nThe modified path may land us in a new spot which is itself a\nlink, so we may repeat the process.\n\nArgs:\nlink_path_components: The resolved path built up to the link\nso far.\nlink: The link object itself.\n\nReturns:\n(string) The updated path resolved after following the link.\n\nRaises:\nIOError: if there are too many levels of symbolic link", "source": "codesearchnet"}
{"code": "def headers(self, headers=None, **kw):\n        \n        headers = kw if kw else headers\n        self._request.headers = headers\n        self.add_matcher(matcher('HeadersMatcher', headers))", "docstring": "Defines a dictionary of arguments.\n\nHeader keys are case insensitive.\n\nArguments:\nheaders (dict): headers to match.\n**headers (dict): headers to match as variadic keyword arguments.\n\nReturns:\nself: current Mock instance.", "source": "juraj-google-style"}
{"code": "def _ReadStructure(self, file_object, file_offset, data_size, data_type_map, description):\n    data = self._ReadData(file_object, file_offset, data_size, description)\n    return self._ReadStructureFromByteStream(data, file_offset, data_type_map, description)", "docstring": "Reads a structure.\n\nArgs:\nfile_object (FileIO): file-like object.\nfile_offset (int): offset of the data relative from the start of\nthe file-like object.\ndata_size (int): data size of the structure.\ndata_type_map (dtfabric.DataTypeMap): data type map of the structure.\ndescription (str): description of the structure.\n\nReturns:\nobject: structure values object.\n\nRaises:\nFileFormatError: if the structure cannot be read.\nValueError: if file-like object or date type map are invalid.", "source": "codesearchnet"}
{"code": "def size(self):\n    gate_ops = 0\n    for (instr, _, _) in self.data:\n        if (instr.name not in ['barrier', 'snapshot']):\n            gate_ops += 1\n    return gate_ops", "docstring": "Returns total number of gate operations in circuit.\n\nReturns:\nint: Total number of gate operations.", "source": "codesearchnet"}
{"code": "def commit_offsets_async(self, offsets, callback=None):\n    self._invoke_completed_offset_commit_callbacks()\n    if (not self.coordinator_unknown()):\n        future = self._do_commit_offsets_async(offsets, callback)\n    else:\n        future = self.lookup_coordinator()\n        future.add_callback((lambda r: functools.partial(self._do_commit_offsets_async, offsets, callback)()))\n        if callback:\n            future.add_errback((lambda e: self.completed_offset_commits.appendleft((callback, offsets, e))))\n    self._client.poll(timeout_ms=0)\n    return future", "docstring": "Commit specific offsets asynchronously.\n\nArguments:\noffsets (dict {TopicPartition: OffsetAndMetadata}): what to commit\ncallback (callable, optional): called as callback(offsets, response)\nresponse will be either an Exception or a OffsetCommitResponse\nstruct. This callback can be used to trigger custom actions when\na commit request completes.\n\nReturns:\nkafka.future.Future", "source": "codesearchnet"}
{"code": "def modutf7_decode(data: bytes) -> str:\n    parts = []\n    is_usascii = True\n    buf = memoryview(data)\n    while buf:\n        byte = buf[0]\n        if is_usascii:\n            if (buf[0:2] == b'&-'):\n                parts.append('&')\n                buf = buf[2:]\n            elif (byte == 38):\n                is_usascii = False\n                buf = buf[1:]\n            else:\n                parts.append(chr(byte))\n                buf = buf[1:]\n        else:\n            for (i, byte) in enumerate(buf):\n                if (byte == 45):\n                    to_decode = buf[:i].tobytes()\n                    decoded = _modified_b64decode(to_decode)\n                    parts.append(decoded)\n                    buf = buf[(i + 1):]\n                    is_usascii = True\n                    break\n    if (not is_usascii):\n        to_decode = buf.tobytes()\n        decoded = _modified_b64decode(to_decode)\n        parts.append(decoded)\n    return ''.join(parts)", "docstring": "Decode the bytestring using modified UTF-7.\n\nArgs:\ndata: The encoded bytestring to decode.", "source": "codesearchnet"}
{"code": "def implement(cls, implementations, for_type=None, for_types=None):\n    for type_ in cls.__get_type_args(for_type, for_types):\n        cls._implement_for_type(for_type=type_, implementations=implementations)", "docstring": "Provide protocol implementation for a type.\n\nRegister all implementations of multimethod functions in this\nprotocol and add the type into the abstract base class of the\nprotocol.\n\nArguments:\nimplementations: A dict of (function, implementation), where each\nfunction is multimethod and each implementation is a callable.\nfor_type: The concrete type implementations apply to.\nfor_types: Same as for_type, but takes a tuple of types.\n\nYou may not supply both for_type and for_types for obvious reasons.\n\nRaises:\nValueError for arguments.\nTypeError if not all implementations are provided or if there\nare issues related to polymorphism (e.g. attempting to\nimplement a non-multimethod function.", "source": "codesearchnet"}
{"code": "def static(self, root, path, media_type=None, charset='UTF-8'):\n    root = os.path.abspath(os.path.join(root, ''))\n    path = os.path.abspath(os.path.join(root, path.lstrip('/\\\\')))\n    self.response.state['filename'] = os.path.basename(path)\n    if (not path.startswith(root)):\n        return 403\n    elif (not os.path.isfile(path)):\n        return 404\n    if (media_type is not None):\n        self.response.media_type = media_type\n    else:\n        self.response.media_type = mimetypes.guess_type(path)[0]\n    self.response.charset = charset\n    with open(path, 'rb') as f:\n        return f.read()", "docstring": "Send content of a static file as response.\n\nThe path to the document root directory should be specified as\nthe root argument. This is very important to prevent directory\ntraversal attack. This method guarantees that only files within\nthe document root directory are served and no files outside this\ndirectory can be accessed by a client.\n\nThe path to the actual file to be returned should be specified\nas the path argument. This path must be relative to the document\ndirectory.\n\nThe *media_type* and *charset* arguments are used to set the\nContent-Type header of the HTTP response. If *media_type*\nis not specified or specified as ``None`` (the default), then it\nis guessed from the filename of the file to be returned.\n\nArguments:\nroot (str): Path to document root directory.\npath (str): Path to file relative to document root directory.\nmedia_type (str, optional): Media type of file.\ncharset (str, optional): Character set of file.\n\nReturns:\nbytes: Content of file to be returned in the HTTP response.", "source": "codesearchnet"}
{"code": "def shape(self):\n    return self._dense_shape_default", "docstring": "Get the `TensorShape` representing the shape of the dense tensor.\n\nReturns:\nA `TensorShape` object.", "source": "github-repos"}
{"code": "def file_modified_time(file_name) -> pd.Timestamp:\n    \n    return pd.to_datetime(time.ctime(os.path.getmtime(filename=file_name)))", "docstring": "File modified time in python\n\nArgs:\nfile_name: file name\n\nReturns:\npd.Timestamp", "source": "juraj-google-style"}
{"code": "def check_trace_mode(device_type, trace_mode):\n    if trace_mode == tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY:\n        if device_type != _DEVICE_TYPE_TPU:\n            raise ValueError('Device_type \"%s\" is not yet supported for trace mode \"%s\"' % (device_type, trace_mode))", "docstring": "Checks if the given trace mode work on the given device type.\n\nArgs:\ndevice_type: Device type, TPU, GPU, CPU.\ntrace_mode: Tensor tracer trace mode.\nRaises:\nValueError: If the given trace mode is not supported for the device.", "source": "github-repos"}
{"code": "def state_estimation_ensemble(data, k, n_runs=10, M_list=[], **se_params):\n    if (len(M_list) == 0):\n        M_list = []\n        for i in range(n_runs):\n            (M, W, ll) = poisson_estimate_state(data, k, **se_params)\n            M_list.append(M)\n    M_stacked = np.hstack(M_list)\n    (M_new, W_new, ll) = poisson_estimate_state(M_stacked, k, **se_params)\n    W_new = np.dot(data.T, M_new)\n    W_new = (W_new / W_new.sum(0))\n    return (M_new, W_new, ll)", "docstring": "Runs an ensemble method on the list of M results...\n\nArgs:\ndata: genes x cells array\nk: number of classes\nn_runs (optional): number of random initializations of state estimation\nM_list (optional): list of M arrays from state estimation\nse_params (optional): optional poisson_estimate_state params\n\nReturns:\nM_new\nW_new\nll", "source": "codesearchnet"}
{"code": "def __init__(self, certificate_type, value, masks=None,\n                 name='Certificate'):\n        \n        super(Certificate, self).__init__()\n\n        self._object_type = enums.ObjectType.CERTIFICATE\n\n        self.value = value\n        self.certificate_type = certificate_type\n        self.names = [name]\n\n        if masks:\n            self.cryptographic_usage_masks = masks\n\n        \n        \n        self._cryptographic_algorithm = None\n        self._cryptographic_length = None\n        self._certificate_length = None\n\n        \n        \n        self._cryptographic_parameters = list()\n        self._digital_signature_algorithm = list()\n\n        self.validate()", "docstring": "Create a Certificate.\n\nArgs:\ncertificate_type(CertificateType): An enumeration defining the\ntype of the certificate.\nvalue(bytes): The bytes representing the certificate.\nmasks(list): A list of CryptographicUsageMask enumerations\ndefining how the certificate will be used.\nname(string): The string name of the certificate.", "source": "juraj-google-style"}
{"code": "def count_up_to(ref, limit, name=None):\n    if ref.dtype._is_ref_dtype:\n        return gen_state_ops.count_up_to(ref, limit=limit, name=name)\n    return gen_state_ops.resource_count_up_to(ref.handle, limit, T=ref.dtype, name=name)", "docstring": "Increments 'ref' until it reaches 'limit'.\n\nArgs:\nref: A Variable. Must be one of the following types: `int32`, `int64`.\nShould be from a scalar `Variable` node.\nlimit: An `int`.\nIf incrementing ref would bring it above limit, instead generates an\n'OutOfRange' error.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor`. Has the same type as `ref`.\nA copy of the input before increment. If nothing else modifies the\ninput, the values produced will all be distinct.", "source": "github-repos"}
{"code": "def get_model_test_files() -> List[str]:\n    _ignore_files = ['test_modeling_common', 'test_modeling_encoder_decoder', 'test_modeling_flax_encoder_decoder', 'test_modeling_flax_speech_encoder_decoder', 'test_modeling_marian', 'test_modeling_tf_common', 'test_modeling_tf_encoder_decoder']\n    test_files = []\n    model_test_root = os.path.join(PATH_TO_TESTS, 'models')\n    model_test_dirs = []\n    for x in os.listdir(model_test_root):\n        x = os.path.join(model_test_root, x)\n        if os.path.isdir(x):\n            model_test_dirs.append(x)\n    for target_dir in [PATH_TO_TESTS] + model_test_dirs:\n        for file_or_dir in os.listdir(target_dir):\n            path = os.path.join(target_dir, file_or_dir)\n            if os.path.isfile(path):\n                filename = os.path.split(path)[-1]\n                if 'test_modeling' in filename and os.path.splitext(filename)[0] not in _ignore_files:\n                    file = os.path.join(*path.split(os.sep)[1:])\n                    test_files.append(file)\n    return test_files", "docstring": "Get the model test files.\n\nReturns:\n`List[str]`: The list of test files. The returned files will NOT contain the `tests` (i.e. `PATH_TO_TESTS`\ndefined in this script). They will be considered as paths relative to `tests`. A caller has to use\n`os.path.join(PATH_TO_TESTS, ...)` to access the files.", "source": "github-repos"}
{"code": "def AddRoute(self, short_name, long_name, route_type, route_id=None):\n    \n    if route_id is None:\n      route_id = util.FindUniqueId(self.routes)\n    route = self._gtfs_factory.Route(short_name=short_name, long_name=long_name,\n                        route_type=route_type, route_id=route_id)\n    route.agency_id = self.GetDefaultAgency().agency_id\n    self.AddRouteObject(route)\n    return route", "docstring": "Add a route to this schedule.\n\nArgs:\nshort_name: Short name of the route, such as \"71L\"\nlong_name: Full name of the route, such as \"NW 21st Ave/St Helens Rd\"\nroute_type: A type such as \"Tram\", \"Subway\" or \"Bus\"\nroute_id: id of the route or None, in which case a unique id is picked\nReturns:\nA new Route object", "source": "juraj-google-style"}
{"code": "def _group_sentences(total_nb_sentences, group_length):\n        \n        sentences_groups = []\n        current_sentence_group = []\n\n        for i in range(0, total_nb_sentences):\n            if i % group_length == 0:\n                if len(current_sentence_group) > 0:\n                    sentences_groups.append(current_sentence_group)\n                current_sentence_group = [i]\n            else:\n                current_sentence_group.append(i)\n\n        if len(current_sentence_group) > 0:\n            sentences_groups.append(current_sentence_group)\n\n        return sentences_groups", "docstring": "Split sentences in groups, given a specific group length.\n\nArgs:\ntotal_nb_sentences (int): Total available sentences.\ngroup_length (int): Limit of length for each group.\n\nReturns:\nlist: Contains groups (lists) of sentences.", "source": "juraj-google-style"}
{"code": "def is_table(engine, sql):\n    \n    if engine.dialect.has_table(engine, sql):\n        return True\n    return False", "docstring": "Check with the given sql arg is query or table\n\nArgs:\nengine: SQLAlchemy connection engine\nsql: SQL query or table name\n\nReturns:\nTrue for table or False if not", "source": "juraj-google-style"}
{"code": "def get_sequence_dense_tensor(self, transformation_cache, state_manager):\n    pass", "docstring": "Returns a `TensorSequenceLengthPair`.\n\nArgs:\ntransformation_cache: A `FeatureTransformationCache` object to access\nfeatures.\nstate_manager: A `StateManager` to create / access resources such as\nlookup tables.", "source": "github-repos"}
{"code": "def ParseOptions(self, options):\n    helpers_manager.ArgumentHelperManager.ParseOptions(options, self, names=['data_location'])\n    signature_identifiers = self.ParseStringOption(options, 'signature_identifiers')\n    if (signature_identifiers == 'list'):\n        self.list_signature_identifiers = True\n    if self.list_signature_identifiers:\n        return\n    self._ParseInformationalOptions(options)\n    self._ParseLogFileOptions(options)\n    self._ParseStorageMediaOptions(options)\n    self._destination_path = self.ParseStringOption(options, 'path', default_value='export')\n    if (not self._data_location):\n        logger.warning('Unable to automatically determine data location.')\n    argument_helper_names = ['artifact_definitions', 'process_resources']\n    helpers_manager.ArgumentHelperManager.ParseOptions(options, self, names=argument_helper_names)\n    self._ParseFilterOptions(options)\n    if (getattr(options, 'no_vss', False) or getattr(options, 'include_duplicates', False)):\n        self._skip_duplicates = False\n    self._EnforceProcessMemoryLimit(self._process_memory_limit)", "docstring": "Parses the options and initializes the front-end.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "codesearchnet"}
{"code": "def fig_to_svg(fig):\n    buf = io.StringIO()\n    fig.savefig(buf, format='svg')\n    buf.seek(0)\n    return buf.getvalue()", "docstring": "Helper function to convert matplotlib figure to SVG string\n\nReturns:\nstr: figure as SVG string", "source": "codesearchnet"}
{"code": "def _parse_schema_field(field):\n    schema = bigquery.TableFieldSchema()\n    schema.name = field['name']\n    schema.type = field['type']\n    if 'mode' in field:\n        schema.mode = field['mode']\n    else:\n        schema.mode = 'NULLABLE'\n    if 'description' in field:\n        schema.description = field['description']\n    if 'fields' in field:\n        schema.fields = [_parse_schema_field(x) for x in field['fields']]\n    return schema", "docstring": "Parse a single schema field from dictionary.\n\nArgs:\nfield: Dictionary object containing serialized schema.\n\nReturns:\nA TableFieldSchema for a single column in BigQuery.", "source": "github-repos"}
{"code": "def ParseForwardedIps(self, forwarded_ips):\n    \n    addresses = []\n    forwarded_ips = forwarded_ips or []\n    for ip in forwarded_ips:\n      if ip and (IP_REGEX.match(ip) or IP_ALIAS_REGEX.match(ip)):\n        addresses.extend([str(addr) for addr in list(netaddr.IPNetwork(ip))])\n      else:\n        self.logger.warning('Could not parse IP address: \"%s\".', ip)\n    return addresses", "docstring": "Parse and validate forwarded IP addresses.\n\nArgs:\nforwarded_ips: list, the IP address strings to parse.\n\nReturns:\nlist, the valid IP address strings.", "source": "juraj-google-style"}
{"code": "def get_checkpoint_state(checkpoint_dir, latest_filename=None):\n    if isinstance(checkpoint_dir, os.PathLike):\n        checkpoint_dir = os.fspath(checkpoint_dir)\n    ckpt = None\n    coord_checkpoint_filename = _GetCheckpointFilename(checkpoint_dir, latest_filename)\n    f = None\n    try:\n        if file_io.file_exists(coord_checkpoint_filename):\n            file_content = file_io.read_file_to_string(coord_checkpoint_filename)\n            ckpt = CheckpointState()\n            text_format.Merge(file_content, ckpt)\n            if not ckpt.model_checkpoint_path:\n                raise ValueError('Invalid checkpoint state loaded from ' + checkpoint_dir)\n            if not os.path.isabs(ckpt.model_checkpoint_path):\n                ckpt.model_checkpoint_path = os.path.join(checkpoint_dir, ckpt.model_checkpoint_path)\n            for i, p in enumerate(ckpt.all_model_checkpoint_paths):\n                if not os.path.isabs(p):\n                    ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p)\n    except errors.OpError as e:\n        logging.warning('%s: %s', type(e).__name__, e)\n        logging.warning('%s: Checkpoint ignored', coord_checkpoint_filename)\n        return None\n    except text_format.ParseError as e:\n        logging.warning('%s: %s', type(e).__name__, e)\n        logging.warning('%s: Checkpoint ignored', coord_checkpoint_filename)\n        return None\n    finally:\n        if f:\n            f.close()\n    return ckpt", "docstring": "Returns CheckpointState proto from the \"checkpoint\" file.\n\nIf the \"checkpoint\" file contains a valid CheckpointState\nproto, returns it.\n\nArgs:\ncheckpoint_dir: The directory of checkpoints.\nlatest_filename: Optional name of the checkpoint file.  Default to\n'checkpoint'.\n\nReturns:\nA CheckpointState if the state was available, None\notherwise.\n\nRaises:\nValueError: if the checkpoint read doesn't have model_checkpoint_path set.", "source": "github-repos"}
{"code": "def testHeatEquation_WithDefaultBoundaryCondtion(self, lower_bc_type, upper_bc_type):\n\n    def final_cond_fn(x):\n        return math.e * math.sin(x)\n\n    def expected_result_fn(x):\n        return tf.sin(x)\n\n    @neumann\n    def boundary_fn(t, x):\n        del x\n        return -tf.exp(t)\n    lower_boundary_fn = boundary_fn if lower_bc_type == 'Neumann' else None\n    upper_boundary_fn = boundary_fn if upper_bc_type == 'Neumann' else None\n    grid = grids.uniform_grid(minimums=[0.0], maximums=[5 * math.pi], sizes=[1000], dtype=np.float32)\n    self._testHeatEquation(grid, final_t=1, time_step=0.01, final_cond_fn=final_cond_fn, expected_result_fn=expected_result_fn, one_step_fn=crank_nicolson_step(), lower_boundary_fn=lower_boundary_fn, upper_boundary_fn=upper_boundary_fn, error_tolerance=0.001)", "docstring": "Test for Default boundary conditions.\n\nTests solving heat equation with the following boundary conditions involving\ndefault boundary `u_xx(0, t) = 0` or `u_xx(5 pi, t) = 0`.\n\nThe exact solution `u(x, t=0) = e^t sin(x)`.\nArgs:\nlower_bc_type: Lower boundary condition type.\nupper_bc_type: Upper boundary condition type.", "source": "github-repos"}
{"code": "def compute_output_signature(self, input_signature):\n\n    def check_type_return_shape(s):\n        if not isinstance(s, tensor_lib.TensorSpec):\n            raise TypeError('Only TensorSpec signature types are supported, but saw signature entry: {}.'.format(s))\n        return s.shape\n    input_shape = nest.map_structure(check_type_return_shape, input_signature)\n    output_shape = self.compute_output_shape(input_shape)\n    dtype = self._compute_dtype\n    if dtype is None:\n        input_dtypes = [s.dtype for s in nest.flatten(input_signature)]\n        dtype = input_dtypes[0]\n    return nest.map_structure(lambda s: tensor_lib.TensorSpec(dtype=dtype, shape=s), output_shape)", "docstring": "Compute the output tensor signature of the layer based on the inputs.\n\nUnlike a TensorShape object, a TensorSpec object contains both shape\nand dtype information for a tensor. This method allows layers to provide\noutput dtype information if it is different from the input dtype.\nFor any layer that doesn't implement this function,\nthe framework will fall back to use `compute_output_shape`, and will\nassume that the output dtype matches the input dtype.\n\nArgs:\ninput_signature: Single TensorSpec or nested structure of TensorSpec\nobjects, describing a candidate input for the layer.\n\nReturns:\nSingle TensorSpec or nested structure of TensorSpec objects, describing\nhow the layer would transform the provided input.\n\nRaises:\nTypeError: If input_signature contains a non-TensorSpec object.", "source": "github-repos"}
{"code": "def link_to_storage(self, sensor_log):\n    if (self.walker is not None):\n        self._sensor_log.destroy_walker(self.walker)\n        self.walker = None\n    self.walker = sensor_log.create_walker(self.selector)\n    self._sensor_log = sensor_log", "docstring": "Attach this DataStreamer to an underlying SensorLog.\n\nCalling this method is required if you want to use this DataStreamer\nto generate reports from the underlying data in the SensorLog.\n\nYou can call it multiple times and it will unlink itself from any\nprevious SensorLog each time.\n\nArgs:\nsensor_log (SensorLog): Actually create a StreamWalker to go along with this\nstreamer so that we can check if it's triggered.", "source": "codesearchnet"}
{"code": "def publish(self, data):\n        \n        if self.entity_api_key == \"\":\n            return {'status': 'failure', 'response': 'No API key found in request'}\n        publish_url = self.base_url + \"api/0.1.0/publish\"\n        publish_headers = {\"apikey\": self.entity_api_key}\n        publish_data = {\n            \"exchange\": \"amq.topic\",\n            \"key\": str(self.entity_id),\n            \"body\": str(data)\n        }\n        with self.no_ssl_verification():\n            r = requests.post(publish_url, json.dumps(publish_data), headers=publish_headers)\n        response = dict()\n        if \"No API key\" in str(r.content.decode(\"utf-8\")):\n            response[\"status\"] = \"failure\"\n            r = json.loads(r.content.decode(\"utf-8\"))['message']\n        elif 'publish message ok' in str(r.content.decode(\"utf-8\")):\n            response[\"status\"] = \"success\"\n            r = r.content.decode(\"utf-8\")\n        else:\n            response[\"status\"] = \"failure\"\n            r = r.content.decode(\"utf-8\")\n        response[\"response\"] = str(r)\n        return response", "docstring": "This function allows an entity to publish data to the middleware.\n\nArgs:\ndata    (string): contents to be published by this entity.", "source": "juraj-google-style"}
{"code": "def description(self):\n    for e in self:\n        if isinstance(e, Description):\n            return e.value\n    raise NoSuchAnnotation", "docstring": "Obtain the description associated with the element.\n\nRaises:\n:class:`NoSuchAnnotation` if there is no associated description.", "source": "codesearchnet"}
{"code": "def __init__(self, name, requires, at_least_one, optional):\n        \n        self.name = name\n        self.requires = requires\n        self.at_least_one = at_least_one\n        self.optional = optional", "docstring": "Create Intent object\n\nArgs:\nname(str): Name for Intent\nrequires(list): Entities that are required\nat_least_one(list): One of these Entities are required\noptional(list): Optional Entities used by the intent", "source": "juraj-google-style"}
{"code": "def _required_idiom(tag_name, index, notfoundmsg):\n    cond = ''\n    if (index > 0):\n        cond = (' or len(el) - 1 < %d' % index)\n    tag_name = str(tag_name)\n    output = (IND + ('if not el%s:\\n' % cond))\n    output += ((IND + IND) + 'raise UserWarning(\\n')\n    output += (((IND + IND) + IND) + ('%s +\\n' % repr((notfoundmsg.strip() + '\\n'))))\n    output += ((((IND + IND) + IND) + repr(('Tag name: ' + tag_name))) + \" + '\\\\n' +\\n\")\n    output += (((IND + IND) + IND) + \"'El:' + str(el) + '\\\\n' +\\n\")\n    output += (((IND + IND) + IND) + \"'Dom:' + str(dom)\\n\")\n    output += ((IND + IND) + ')\\n\\n')\n    return ((output + IND) + ('el = el[%d]\\n\\n' % index))", "docstring": "Generate code, which make sure that `tag_name` has enoug items.\n\nArgs:\ntag_name (str): Name of the container.\nindex (int): Index of the item you want to obtain from container.\nnotfoundmsg (str): Raise :class:`.UserWarning` with debug data and\nfollowing message.\n\nReturns:\nstr: Python code.", "source": "codesearchnet"}
{"code": "def RecursiveDownload(dir_obj,\n                      target_dir,\n                      max_depth=10,\n                      depth=1,\n                      overwrite=False,\n                      max_threads=10):\n  \n  if not isinstance(dir_obj, aff4.AFF4Volume):\n    return\n\n  \n  thread_pool = threadpool.ThreadPool.Factory(\"Downloader\", max_threads)\n  thread_pool.Start()\n\n  for sub_file_entry in dir_obj.OpenChildren():\n    path_elements = [target_dir]\n    sub_target_dir = u\"/\".join(path_elements)\n    try:\n      \n      if isinstance(sub_file_entry, aff4.AFF4Stream):\n        args = (sub_file_entry.urn, sub_target_dir, sub_file_entry.token,\n                overwrite)\n        thread_pool.AddTask(\n            target=CopyAFF4ToLocal, args=args, name=\"Downloader\")\n      elif \"Container\" in sub_file_entry.behaviours:\n        if depth >= max_depth:  \n          continue\n        try:\n          os.makedirs(sub_target_dir)\n        except OSError:\n          pass\n        RecursiveDownload(\n            sub_file_entry,\n            sub_target_dir,\n            overwrite=overwrite,\n            depth=depth + 1)\n    except IOError:\n      logging.exception(\"Unable to download %s\", sub_file_entry.urn)\n    finally:\n      sub_file_entry.Close()\n\n  \n  if depth <= 1:\n    thread_pool.Stop(join_timeout=THREADPOOL_JOIN_TIMEOUT)", "docstring": "Recursively downloads a file entry to the target path.\n\nArgs:\ndir_obj: An aff4 object that contains children.\ntarget_dir: Full path of the directory to write to.\nmax_depth: Depth to download to. 1 means just the directory itself.\ndepth: Current depth of recursion.\noverwrite: Should we overwrite files that exist.\nmax_threads: Use this many threads to do the downloads.", "source": "juraj-google-style"}
{"code": "def shape(x):\n    if any_symbolic_tensors((x,)):\n        return x.shape\n    return backend.core.shape(x)", "docstring": "Gets the shape of the tensor input.\n\nNote: On the TensorFlow backend, when `x` is a `tf.Tensor` with dynamic\nshape, dimensions which are dynamic in the context of a compiled function\nwill have a `tf.Tensor` value instead of a static integer value.\n\nArgs:\nx: A tensor. This function will try to access the `shape` attribute of\nthe input tensor.\n\nReturns:\nA tuple of integers or None values, indicating the shape of the input\ntensor.\n\nExample:\n\n>>> x = keras.ops.zeros((8, 12))\n>>> keras.ops.shape(x)\n(8, 12)", "source": "github-repos"}
{"code": "def combine(*rnf_profiles):\n        \n\n        for rnf_profile in rnf_profiles:\n            self.prefix_width = max(self.prefix_width, rnf_profile.prefix_width)\n            self.read_tuple_id_width = max(self.read_tuple_id_width, rnf_profile.read_tuple_id_width)\n            self.genome_id_width = max(self.genome_id_width, rnf_profile.genome_id_width)\n            self.chr_id_width = max(self.chr_id_width, rnf_profile.chr_id_width)\n            self.coor_width = max(self.coor_width, rnf_profile.coor_width)", "docstring": "Combine more profiles and set their maximal values.\n\nArgs:\n*rnf_profiles (rnftools.rnfformat.RnfProfile): RNF profile.", "source": "juraj-google-style"}
{"code": "def get_shortest_distance(self, other):\n        \n        coords = ['x', 'y', 'z']\n        pos1 = self.loc[:, coords].values\n        pos2 = other.loc[:, coords].values\n        D = self._jit_pairwise_distances(pos1, pos2)\n        i, j = np.unravel_index(D.argmin(), D.shape)\n        d = D[i, j]\n        i, j = dict(enumerate(self.index))[i], dict(enumerate(other.index))[j]\n        return i, j, d", "docstring": "Calculate the shortest distance between self and other\n\nArgs:\nCartesian: other\n\nReturns:\ntuple: Returns a tuple ``i, j, d`` with the following meaning:\n\n``i``:\nThe index on self that minimises the pairwise distance.\n\n``j``:\nThe index on other that minimises the pairwise distance.\n\n``d``:\nThe distance between self and other. (float)", "source": "juraj-google-style"}
{"code": "def _block_orth(self, p1, p2, p3):\n    p1_shape = p1.shape.as_list()\n    if p1_shape != p2.shape.as_list() or p1_shape != p3.shape.as_list():\n        raise ValueError(f'The dimension of the matrices must be the same. Received p1.shape={p1.shape}, p2.shape={p2.shape} and p3.shape={p3.shape}.')\n    n = p1_shape[0]\n    eye = linalg_ops_impl.eye(n, dtype=self.dtype)\n    kernel2x2x2 = {}\n\n    def matmul(p1, p2, p3):\n        return math_ops.matmul(math_ops.matmul(p1, p2), p3)\n\n    def cast(i, p):\n        \n        return i * p + (1 - i) * (eye - p)\n    for i in [0, 1]:\n        for j in [0, 1]:\n            for k in [0, 1]:\n                kernel2x2x2[i, j, k] = matmul(cast(i, p1), cast(j, p2), cast(k, p3))\n    return kernel2x2x2", "docstring": "Construct a 3 x 3 kernel.\n\nUsed to construct orthgonal kernel.\n\nArgs:\np1: A symmetric projection matrix.\np2: A symmetric projection matrix.\np3: A symmetric projection matrix.\n\nReturns:\nA 2 x 2 x 2 kernel.\nRaises:\nValueError: If the dimensions of p1, p2 and p3 are different.", "source": "github-repos"}
{"code": "def _build_frange_part(start, stop, stride, zfill=0):\n        \n        if stop is None:\n            return ''\n        pad_start = pad(start, zfill)\n        pad_stop = pad(stop, zfill)\n        if stride is None or start == stop:\n            return '{0}'.format(pad_start)\n        elif abs(stride) == 1:\n            return '{0}-{1}'.format(pad_start, pad_stop)\n        else:\n            return '{0}-{1}x{2}'.format(pad_start, pad_stop, stride)", "docstring": "Private method: builds a proper and padded\nframe range string.\n\nArgs:\nstart (int): first frame\nstop (int or None): last frame\nstride (int or None): increment\nzfill (int): width for zero padding\n\nReturns:\nstr:", "source": "juraj-google-style"}
{"code": "def __init__(self, output_mediator):\n    \n    super(MySQL4n6TimeOutputModule, self).__init__(output_mediator)\n    self._connection = None\n    self._count = None\n    self._cursor = None\n    self._dbname = 'log2timeline'\n    self._host = 'localhost'\n    self._password = 'forensic'\n    self._port = None\n    self._user = 'root'", "docstring": "Initializes the output module object.\n\nArgs:\noutput_mediator (OutputMediator): mediates interactions between output\nmodules and other components, such as storage and dfvfs.", "source": "juraj-google-style"}
{"code": "def __init__(self, success, uid, *, payload=None):\n        \n        self.success = success\n        self.uid = uid\n        self.payload = payload if payload is not None else {}", "docstring": "Initialise the response object.\n\nArgs:\nsuccess (bool): True if the request was successful.\nuid (str): Unique response id.\npayload (dict): A dictionary with the response data.", "source": "juraj-google-style"}
{"code": "def __init__(self, tpu_cluster_resolver=None, device_assignment=None):\n    logging.warning('`tf.distribute.experimental.TPUStrategy` is deprecated, please use the non-experimental symbol `tf.distribute.TPUStrategy` instead.')\n    super().__init__(TPUExtended(self, tpu_cluster_resolver, device_assignment=device_assignment))\n    distribute_lib.distribution_strategy_gauge.get_cell('V2').set('TPUStrategy')\n    distribute_lib.distribution_strategy_replica_gauge.get_cell('num_workers').set(self.extended.num_hosts)\n    distribute_lib.distribution_strategy_replica_gauge.get_cell('num_replicas_per_worker').set(self.extended.num_replicas_per_host)\n    self._enable_packed_variable_in_eager_mode = True", "docstring": "Synchronous training in TPU donuts or Pods.\n\nArgs:\ntpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,\nwhich provides information about the TPU cluster.\ndevice_assignment: Optional `tf.tpu.experimental.DeviceAssignment` to\nspecify the placement of replicas on the TPU cluster.", "source": "github-repos"}
{"code": "def authenticated_request(self, endpoint, method='GET', params=None, data=None):\n        \n        headers = {\n                'X-Access-Token' : self.access_token,\n                'X-Client-ID' : self.client_id\n                }\n        return self.api.request(endpoint, method=method, headers=headers, params=params, data=data)", "docstring": "Send a request to the given Wunderlist API with 'X-Access-Token' and 'X-Client-ID' headers and ensure the response code is as expected given the request type\n\nParams:\nendpoint -- API endpoint to send request to\n\nKeyword Args:\nmethod -- GET, PUT, PATCH, DELETE, etc.\nparams -- parameters to encode in the request\ndata -- data to send with the request", "source": "juraj-google-style"}
{"code": "def parse(cls, version_string, partial=False, coerce=False):\n        \n        if not version_string:\n            raise ValueError('Invalid empty version string: %r' % version_string)\n\n        if partial:\n            version_re = cls.partial_version_re\n        else:\n            version_re = cls.version_re\n\n        match = version_re.match(version_string)\n        if not match:\n            raise ValueError('Invalid version string: %r' % version_string)\n\n        major, minor, patch, prerelease, build = match.groups()\n\n        if _has_leading_zero(major):\n            raise ValueError(\"Invalid leading zero in major: %r\" % version_string)\n        if _has_leading_zero(minor):\n            raise ValueError(\"Invalid leading zero in minor: %r\" % version_string)\n        if _has_leading_zero(patch):\n            raise ValueError(\"Invalid leading zero in patch: %r\" % version_string)\n\n        major = int(major)\n        minor = cls._coerce(minor, partial)\n        patch = cls._coerce(patch, partial)\n\n        if prerelease is None:\n            if partial and (build is None):\n                \n                return (major, minor, patch, None, None)\n            else:\n                prerelease = ()\n        elif prerelease == '':\n            prerelease = ()\n        else:\n            prerelease = tuple(prerelease.split('.'))\n            cls._validate_identifiers(prerelease, allow_leading_zeroes=False)\n\n        if build is None:\n            if partial:\n                build = None\n            else:\n                build = ()\n        elif build == '':\n            build = ()\n        else:\n            build = tuple(build.split('.'))\n            cls._validate_identifiers(build, allow_leading_zeroes=True)\n\n        return (major, minor, patch, prerelease, build)", "docstring": "Parse a version string into a Version() object.\n\nArgs:\nversion_string (str), the version string to parse\npartial (bool), whether to accept incomplete input\ncoerce (bool), whether to try to map the passed in string into a\nvalid Version.", "source": "juraj-google-style"}
{"code": "def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple]]=None):\n    logits = outputs.logits\n    if target_sizes is not None:\n        if len(logits) != len(target_sizes):\n            raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n        if is_torch_tensor(target_sizes):\n            target_sizes = target_sizes.numpy()\n        semantic_segmentation = []\n        for idx in range(len(logits)):\n            resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)\n            semantic_map = resized_logits[0].argmax(dim=0)\n            semantic_segmentation.append(semantic_map)\n    else:\n        semantic_segmentation = logits.argmax(dim=1)\n        semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]\n    return semantic_segmentation", "docstring": "Converts the output of [`BeitForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.\n\nArgs:\noutputs ([`BeitForSemanticSegmentation`]):\nRaw outputs of the model.\ntarget_sizes (`List[Tuple]` of length `batch_size`, *optional*):\nList of tuples corresponding to the requested final size (height, width) of each prediction. If unset,\npredictions will not be resized.\n\nReturns:\nsemantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic\nsegmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is\nspecified). Each entry of each `torch.Tensor` correspond to a semantic class id.", "source": "github-repos"}
{"code": "def save_as_json(total: list, name='data.json', sort_by: str=None, no_duplicate=False, order='asc'):\n    if sort_by:\n        reverse = (order == 'desc')\n        total = sorted(total, key=itemgetter(sort_by), reverse=reverse)\n    if no_duplicate:\n        total = [key for (key, _) in groupby(total)]\n    data = json.dumps(total, ensure_ascii=False)\n    Path(name).write_text(data, encoding='utf-8')", "docstring": "Save what you crawled as a json file.\n\nArgs:\ntotal (list): Total of data you crawled.\nname (str, optional): Defaults to 'data.json'. The name of the file.\nsort_by (str, optional): Defaults to None. Sort items by a specific key.\nno_duplicate (bool, optional): Defaults to False. If True, it will remove duplicated data.\norder (str, optional): Defaults to 'asc'. The opposite option is 'desc'.", "source": "codesearchnet"}
{"code": "def request_via_socket(sock, search_target):\n    \n    msgparts = dict(HOST=MCAST_IP_PORT, MAN='\"ssdp:discover\"', MX='3', ST=search_target)\n    msg = encode_request('M-SEARCH * HTTP/1.1', **msgparts)\n    sock.sendto(msg, (MCAST_IP, MCAST_PORT))", "docstring": "Send an SSDP search request via the provided socket.\n\nArgs:\nsock: A socket suitable for use to send a broadcast message - preferably\none created by :py:func:`make_socket`.\nsearch_target (string): A :term:`resource type` target to search for.", "source": "juraj-google-style"}
{"code": "def AddTrip(self, schedule=None, headsign=None, service_period=None,\n              trip_id=None):\n    \n    if schedule is None:\n      assert self._schedule is not None\n      schedule = self._schedule\n    if trip_id is None:\n      trip_id = util.FindUniqueId(schedule.trips)\n    if service_period is None:\n      service_period = schedule.GetDefaultServicePeriod()\n    trip_class = self.GetGtfsFactory().Trip\n    trip_obj = trip_class(route=self, headsign=headsign,\n                service_period=service_period, trip_id=trip_id)\n    schedule.AddTripObject(trip_obj)\n    return trip_obj", "docstring": "Add a trip to this route.\n\nArgs:\nschedule: a Schedule object which will hold the new trip or None to use\nthe schedule of this route.\nheadsign: headsign of the trip as a string\nservice_period: a ServicePeriod object or None to use\nschedule.GetDefaultServicePeriod()\ntrip_id: optional trip_id for the new trip\n\nReturns:\na new Trip object", "source": "juraj-google-style"}
{"code": "def _write_to_hdx(self, action, data, id_field_name, file_to_upload=None):\n        \n        \n        file = None\n        try:\n            if file_to_upload:\n                file = open(file_to_upload, 'rb')\n                files = [('upload', file)]\n            else:\n                files = None\n            return self.configuration.call_remoteckan(self.actions()[action], data, files=files)\n        except Exception as e:\n            raisefrom(HDXError, 'Failed when trying to %s %s! (POST)' % (action, data[id_field_name]), e)\n        finally:\n            if file_to_upload and file:\n                file.close()", "docstring": "Creates or updates an HDX object in HDX and return HDX object metadata dict\n\nArgs:\naction (str): Action to perform eg. 'create', 'update'\ndata (Dict): Data to write to HDX\nid_field_name (str): Name of field containing HDX object identifier or None\nfile_to_upload (Optional[str]): File to upload to HDX\n\nReturns:\nDict: HDX object metadata", "source": "juraj-google-style"}
{"code": "def _update_old_module(old_module: types.ModuleType, new_module: types.ModuleType) -> None:\n    old_module.__dict__.clear()\n    old_module.__dict__.update(new_module.__dict__)", "docstring": "Mutate the old module version with the new dict.\n\nThis also try to update the class, functions,... from the old module (so\ninstances are updated in-place).\n\nArgs:\nold_module: Old module to update\nnew_module: New module", "source": "github-repos"}
{"code": "def proba2onehot(proba: [list, np.ndarray], confident_threshold: float, classes:  [list, np.ndarray]) -> np.ndarray:\n    \n    return labels2onehot(proba2labels(proba, confident_threshold, classes), classes)", "docstring": "Convert vectors of probabilities to one-hot representations using confident threshold\n\nArgs:\nproba: samples where each sample is a vector of probabilities to belong with given classes\nconfident_threshold: boundary of probability to belong with a class\nclasses: array of classes' names\n\nReturns:\n2d array with one-hot representation of given samples", "source": "juraj-google-style"}
{"code": "def handle_result(self, completed_bundle: '_Bundle', completed_timers, result: 'TransformResult'):\n    with self._lock:\n        committed_bundles, unprocessed_bundles = self._commit_bundles(result.uncommitted_output_bundles, result.unprocessed_bundles)\n        self._metrics.commit_logical(completed_bundle, result.logical_metric_updates)\n        self._update_side_inputs_container(committed_bundles, result)\n        tasks = self._watermark_manager.update_watermarks(completed_bundle, result.transform, completed_timers, committed_bundles, unprocessed_bundles, result.keyed_watermark_holds, self._side_inputs_container)\n        self._pending_unblocked_tasks.extend(tasks)\n        if result.counters:\n            for counter in result.counters:\n                merged_counter = self._counter_factory.get_counter(counter.name, counter.combine_fn)\n                merged_counter.accumulator.merge([counter.accumulator])\n        existing_keyed_state = self._transform_keyed_states[result.transform]\n        for k, v in result.partial_keyed_state.items():\n            existing_keyed_state[k] = v\n        return committed_bundles", "docstring": "Handle the provided result produced after evaluating the input bundle.\n\nHandle the provided TransformResult, produced after evaluating\nthe provided committed bundle (potentially None, if the result of a root\nPTransform).\n\nThe result is the output of running the transform contained in the\nTransformResult on the contents of the provided bundle.\n\nArgs:\ncompleted_bundle: the bundle that was processed to produce the result.\ncompleted_timers: the timers that were delivered to produce the\ncompleted_bundle.\nresult: the ``TransformResult`` of evaluating the input bundle\n\nReturns:\nthe committed bundles contained within the handled result.", "source": "github-repos"}
{"code": "def get_auth(self, key, is_list=False, is_optional=False, is_secret=False, is_local=False, default=None, options=None):\n    if is_list:\n        return self._get_typed_list_value(key=key, target_type=AuthSpec, type_convert=self.parse_auth_spec, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options)\n    return self._get_typed_value(key=key, target_type=AuthSpec, type_convert=self.parse_auth_spec, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options)", "docstring": "Get a the value corresponding to the key and converts it to `AuthSpec`.\n\nArgs\nkey: the dict key.\nis_list: If this is one element or a list of elements.\nis_optional: To raise an error if key was not found.\nis_secret: If the key is a secret.\nis_local: If the key is a local to this service.\ndefault: default value if is_optional is True.\noptions: list/tuple if provided, the value must be one of these values.\n\nReturns:\n`str`: value corresponding to the key.", "source": "codesearchnet"}
{"code": "def _CreateOutputModule(self, options):\n    \n    formatter_mediator = formatters_mediator.FormatterMediator(\n        data_location=self._data_location)\n\n    try:\n      formatter_mediator.SetPreferredLanguageIdentifier(\n          self._preferred_language)\n    except (KeyError, TypeError) as exception:\n      raise RuntimeError(exception)\n\n    mediator = output_mediator.OutputMediator(\n        self._knowledge_base, formatter_mediator,\n        preferred_encoding=self.preferred_encoding)\n    mediator.SetTimezone(self._preferred_time_zone)\n\n    try:\n      output_module = output_manager.OutputManager.NewOutputModule(\n          self._output_format, mediator)\n\n    except (KeyError, ValueError) as exception:\n      raise RuntimeError(\n          'Unable to create output module with error: {0!s}'.format(\n              exception))\n\n    if output_manager.OutputManager.IsLinearOutputModule(self._output_format):\n      output_file_object = open(self._output_filename, 'wb')\n      output_writer = tools.FileObjectOutputWriter(output_file_object)\n      output_module.SetOutputWriter(output_writer)\n\n    helpers_manager.ArgumentHelperManager.ParseOptions(options, output_module)\n\n    \n    \n    \n    missing_parameters = output_module.GetMissingArguments()\n    while missing_parameters:\n      for parameter in missing_parameters:\n        value = self._PromptUserForInput(\n            'Missing parameter {0:s} for output module'.format(parameter))\n        if value is None:\n          logger.warning(\n              'Unable to set the missing parameter for: {0:s}'.format(\n                  parameter))\n          continue\n\n        setattr(options, parameter, value)\n\n      helpers_manager.ArgumentHelperManager.ParseOptions(\n          options, output_module)\n      missing_parameters = output_module.GetMissingArguments()\n\n    return output_module", "docstring": "Creates the output module.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nReturns:\nOutputModule: output module.\n\nRaises:\nRuntimeError: if the output module cannot be created.", "source": "juraj-google-style"}
{"code": "def setup(self, keywords=None):\n    self._keywords = keywords\n    self._output_path = tempfile.mkdtemp()", "docstring": "Sets up the _keywords attribute.\n\nArgs:\nkeywords: pipe separated list of keyword to search", "source": "codesearchnet"}
{"code": "def Relay(self, inventory):\n        \n        inventory = InvPayload(type=inventory.InventoryType, hashes=[inventory.Hash.ToBytes()])\n        m = Message(\"inv\", inventory)\n        self.SendSerializedMessage(m)\n\n        return True", "docstring": "Wrap the inventory in a InvPayload object and send it over the write to the remote node.\n\nArgs:\ninventory:\n\nReturns:\nbool: True (fixed)", "source": "juraj-google-style"}
{"code": "def _ParsePerformanceOptions(self, options):\n    \n    self._buffer_size = getattr(options, 'buffer_size', 0)\n    if self._buffer_size:\n      \n      \n      \n      try:\n        if self._buffer_size[-1].lower() == 'm':\n          self._buffer_size = int(self._buffer_size[:-1], 10)\n          self._buffer_size *= self._BYTES_IN_A_MIB\n        else:\n          self._buffer_size = int(self._buffer_size, 10)\n      except ValueError:\n        raise errors.BadConfigOption(\n            'Invalid buffer size: {0!s}.'.format(self._buffer_size))\n\n    self._queue_size = self.ParseNumericOption(options, 'queue_size')", "docstring": "Parses the performance options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "juraj-google-style"}
{"code": "def attention_mask_ignore_padding(inputs, dtype=tf.float32):\n  \n  inputs = rename_length_to_memory_length(inputs)\n  return mtf.cast(mtf.equal(inputs, 0), dtype) * -1e9", "docstring": "Bias for encoder-decoder attention.\n\nArgs:\ninputs: a mtf.Tensor with shape [..., length_dim]\ndtype: a tf.dtype\n\nReturns:\na mtf.Tensor with shape [..., memory_length_dim]", "source": "juraj-google-style"}
{"code": "def iplot_state_paulivec(rho, figsize=None, slider=False, show_legend=False):\n    html_template = Template('\\n    <p>\\n        <div id=\"paulivec_$divNumber\"></div>\\n    </p>\\n    ')\n    javascript_template = Template('\\n    <script>\\n        requirejs.config({\\n            paths: {\\n                qVisualization: \"https:\n    rho = _validate_input_state(rho)\n    if (figsize is None):\n        figsize = (7, 5)\n    options = {'width': figsize[0], 'height': figsize[1], 'slider': int(slider), 'show_legend': int(show_legend)}\n    div_number = str(time.time())\n    div_number = re.sub('[.]', '', div_number)\n    data_to_plot = []\n    rho_data = process_data(rho)\n    data_to_plot.append(dict(data=rho_data))\n    html = html_template.substitute({'divNumber': div_number})\n    javascript = javascript_template.substitute({'divNumber': div_number, 'executions': data_to_plot, 'options': options})\n    display(HTML((html + javascript)))", "docstring": "Create a paulivec representation.\n\nGraphical representation of the input array.\n\nArgs:\nrho (array): State vector or density matrix.\nfigsize (tuple): Figure size in pixels.\nslider (bool): activate slider\nshow_legend (bool): show legend of graph content", "source": "codesearchnet"}
{"code": "def plot_time_series(self, f_start=None, f_stop=None, if_id=0, logged=True, orientation='h', MJD_time=False, **kwargs):\n    ax = plt.gca()\n    (plot_f, plot_data) = self.grab_data(f_start, f_stop, if_id)\n    if (logged and (self.header[b'nbits'] >= 8)):\n        plot_data = db(plot_data)\n    if (len(plot_data.shape) > 1):\n        plot_data = plot_data.mean(axis=1)\n    else:\n        plot_data = plot_data.mean()\n    extent = self._calc_extent(plot_f=plot_f, plot_t=self.timestamps, MJD_time=MJD_time)\n    plot_t = np.linspace(extent[2], extent[3], len(self.timestamps))\n    if MJD_time:\n        tlabel = 'Time [MJD]'\n    else:\n        tlabel = 'Time [s]'\n    if logged:\n        plabel = 'Power [dB]'\n    else:\n        plabel = 'Power [counts]'\n    if ('v' in orientation):\n        plt.plot(plot_data, plot_t, **kwargs)\n        plt.xlabel(plabel)\n    else:\n        plt.plot(plot_t, plot_data, **kwargs)\n        plt.xlabel(tlabel)\n        plt.ylabel(plabel)\n    ax.autoscale(axis='both', tight=True)", "docstring": "Plot the time series.\n\nArgs:\nf_start (float): start frequency, in MHz\nf_stop (float): stop frequency, in MHz\nlogged (bool): Plot in linear (False) or dB units (True),\nkwargs: keyword args to be passed to matplotlib imshow()", "source": "codesearchnet"}
{"code": "def encode_field(self, field, value):\n        \n        if isinstance(field, messages.BytesField):\n            if field.repeated:\n                value = [base64.b64encode(byte) for byte in value]\n            else:\n                value = base64.b64encode(value)\n        elif isinstance(field, message_types.DateTimeField):\n            \n            if field.repeated:\n                value = [i.isoformat() for i in value]\n            else:\n                value = value.isoformat()\n        return value", "docstring": "Encode a python field value to a JSON value.\n\nArgs:\nfield: A ProtoRPC field instance.\nvalue: A python value supported by field.\n\nReturns:\nA JSON serializable value appropriate for field.", "source": "juraj-google-style"}
{"code": "def safe_datetime_cast(self, col):\n        \n        casted_dates = pd.to_datetime(col[self.col_name], format=self.date_format, errors='coerce')\n\n        if len(casted_dates[casted_dates.isnull()]):\n            \n            \n            slice_ = casted_dates.isnull() & ~col[self.col_name].isnull()\n            col[slice_][self.col_name].apply(self.strptime_format)\n\n        return casted_dates", "docstring": "Parses string values into datetime.\n\nArgs:\ncol(pandas.DataFrame): Data to transform.\n\nReturns:\npandas.Series", "source": "juraj-google-style"}
{"code": "def strip_path_prefix(ipath, prefix):\n    \n    if prefix is None:\n        return ipath\n\n    return ipath[len(prefix):] if ipath.startswith(prefix) else ipath", "docstring": "Strip prefix from path.\n\nArgs:\nipath: input path\nprefix: the prefix to remove, if it is found in :ipath:\n\nExamples:\n>>> strip_path_prefix(\"/foo/bar\", \"/bar\")\n'/foo/bar'\n>>> strip_path_prefix(\"/foo/bar\", \"/\")\n'foo/bar'\n>>> strip_path_prefix(\"/foo/bar\", \"/foo\")\n'/bar'\n>>> strip_path_prefix(\"/foo/bar\", \"None\")\n'/foo/bar'", "source": "juraj-google-style"}
{"code": "def _convert_as_saved_model(self):\n    temp_dir = tempfile.mkdtemp()\n    try:\n        graph_def, input_tensors, output_tensors = self._convert_keras_to_saved_model(temp_dir)\n        if self.saved_model_dir:\n            return super(TFLiteKerasModelConverterV2, self).convert(graph_def, input_tensors, output_tensors)\n    finally:\n        shutil.rmtree(temp_dir, True)", "docstring": "Converts a Keras model as a saved model.\n\nReturns:\nThe converted data in serialized format.", "source": "github-repos"}
{"code": "def process(self, element: tuple[str, prediction_log_pb2.PredictionLog]) -> Iterable[str]:\n    filename, predict_log = (element[0], element[1].predict_log)\n    output_value = predict_log.response.outputs\n    output_tensor = tf.io.decode_raw(output_value['output_0'].tensor_content, out_type=tf.float32)\n    max_index_output_tensor = tf.math.argmax(output_tensor, axis=0)\n    yield (filename + ',' + str(tf.get_static_value(max_index_output_tensor)))", "docstring": "Args:\nelement: Tuple of str, and PredictionLog. Inference can be parsed\nfrom prediction_log\nreturns:\nstr of filename and inference.", "source": "github-repos"}
{"code": "def __init__(self, app):\n        \n\n        self.app = app\n\n        \n        flask_secret_key = app.config.get('SECRET_KEY', None)\n        if not flask_secret_key:\n            raise ConfigError('Config setting SECRET_KEY is missing.')\n\n        \n        key = flask_secret_key.encode()\n        if len(key)<32:\n            print('WARNING: Flask-User TokenManager: SECRET_KEY is shorter than 32 bytes.')\n            key = key + b' '*32    \n\n        key32 = key[:32]\n        base64_key32 = base64.urlsafe_b64encode(key32)\n\n        \n        \n        from cryptography.fernet import Fernet\n        self.fernet = Fernet(base64_key32)", "docstring": "Check config settings and initialize the Fernet encryption cypher.\n\nFernet is basically AES128 in CBC mode, with a timestamp and a signature.\n\nArgs:\napp(Flask): The Flask application instance.", "source": "juraj-google-style"}
{"code": "def _exists(self, path):\n    return self._hdfs_client.status(path, strict=False) is not None", "docstring": "Returns True if path exists as a file or directory in HDFS.\n\nArgs:\npath: String in the form /...", "source": "github-repos"}
{"code": "def format_message(self, evr_hist_data):\n    size_formatter_info = {'s': (- 1), 'c': 1, 'i': 4, 'd': 4, 'u': 4, 'x': 4, 'hh': 1, 'h': 2, 'l': 4, 'll': 8, 'f': 8, 'g': 8, 'e': 8}\n    type_formatter_info = {'c': 'U{}', 'i': 'MSB_I{}', 'd': 'MSB_I{}', 'u': 'MSB_U{}', 'f': 'MSB_D{}', 'e': 'MSB_D{}', 'g': 'MSB_D{}', 'x': 'MSB_U{}'}\n    formatters = re.findall('%(?:\\\\d+\\\\$)?([cdieEfgGosuxXhlL]+)', self._message)\n    cur_byte_index = 0\n    data_chunks = []\n    for f in formatters:\n        f_size_char = f_type = f[(- 1)]\n        if (len(f) > 1):\n            f_size_char = f[:(- 1)]\n        fsize = size_formatter_info[f_size_char.lower()]\n        try:\n            if (f_type != 's'):\n                end_index = (cur_byte_index + fsize)\n                fstr = type_formatter_info[f_type.lower()].format((fsize * 8))\n                if ((fsize == 1) and ('MSB_' in fstr)):\n                    fstr = fstr[4:]\n                d = dtype.PrimitiveType(fstr).decode(evr_hist_data[cur_byte_index:end_index])\n            else:\n                end_index = str(evr_hist_data).index('\\x00', cur_byte_index)\n                d = str(evr_hist_data[cur_byte_index:end_index])\n            data_chunks.append(d)\n        except:\n            msg = 'Unable to format EVR Message with data {}'.format(evr_hist_data)\n            log.error(msg)\n            raise ValueError(msg)\n        cur_byte_index = end_index\n        if (f == 's'):\n            cur_byte_index += 1\n    if (len(formatters) == 0):\n        return self._message\n    else:\n        msg = self._message\n        for f in formatters:\n            if (len(f) > 1):\n                msg = msg.replace('%{}'.format(f), '%{}'.format(f[(- 1)]))\n        return (msg % tuple(data_chunks))", "docstring": "Format EVR message with EVR data\n\nGiven a byte array of EVR data, format the EVR's message attribute\nprintf format strings and split the byte array into appropriately\nsized chunks. Supports most format strings containing length and type\nfields.\n\nArgs:\nevr_hist_data: A bytearray of EVR data. Bytes are expected to be in\nMSB ordering.\n\nExample formatting::\n\n# This is the character '!', string 'Foo', and int '4279317316'\nbytearray([0x21, 0x46, 0x6f, 0x6f, 0x00, 0xff, 0x11, 0x33, 0x44])\n\nReturns:\nThe EVR's message string formatted with the EVR data or the\nunformatted EVR message string if there are no valid format\nstrings present in it.\n\nRaises:\nValueError: When the bytearray cannot be fully processed with the\nspecified format strings. This is usually a result of the\nexpected data length and the byte array length not matching.", "source": "codesearchnet"}
{"code": "def _remove_overlap_sub(self, also_remove_contiguous: bool) -> bool:\n        \n        \n        for i in range(len(self.intervals)):\n            for j in range(i + 1, len(self.intervals)):\n                first = self.intervals[i]\n                second = self.intervals[j]\n                if also_remove_contiguous:\n                    test = first.contiguous(second)\n                else:\n                    test = first.overlaps(second)\n                if test:\n                    newint = first.union(second)\n                    self.intervals.pop(j)\n                    self.intervals.pop(i)  \n                    self.intervals.append(newint)\n                    return True\n        return False", "docstring": "Called by :meth:`remove_overlap`. Removes the first overlap found.\n\nArgs:\nalso_remove_contiguous: treat contiguous (as well as overlapping)\nintervals as worthy of merging?\n\nReturns:\nbool: ``True`` if an overlap was removed; ``False`` otherwise", "source": "juraj-google-style"}
{"code": "def merge_classes(self, instances):\n    classes = {v.cls for v in instances if v.cls != self.empty}\n    return self.merge_values(sorted(classes, key=lambda cls: cls.full_name))", "docstring": "Merge the classes of the given instances.\n\nArgs:\ninstances: An iterable of instances.\n\nReturns:\nAn abstract.BaseValue created by merging the instances' classes.", "source": "github-repos"}
{"code": "def wait_for_contract(self, contract_address_hex, timeout=None):\n        \n        contract_address = decode_hex(contract_address_hex)\n        start_time = time.time()\n        result = self._raiden.chain.client.web3.eth.getCode(\n            to_checksum_address(contract_address),\n        )\n\n        current_time = time.time()\n        while not result:\n            if timeout and start_time + timeout > current_time:\n                return False\n\n            result = self._raiden.chain.client.web3.eth.getCode(\n                to_checksum_address(contract_address),\n            )\n            gevent.sleep(0.5)\n\n            current_time = time.time()\n\n        return len(result) > 0", "docstring": "Wait until a contract is mined\n\nArgs:\ncontract_address_hex (string): hex encoded address of the contract\ntimeout (int): time to wait for the contract to get mined\n\nReturns:\nTrue if the contract got mined, false otherwise", "source": "juraj-google-style"}
{"code": "def __init__(self, layer, trainable):\n    self._trainable = trainable\n    self._layer = layer\n    if self._layer is not None and (not hasattr(self._layer, '_resources')):\n        self._layer._resources = data_structures.Mapping()\n    self._cols_to_vars_map = collections.defaultdict(lambda: {})\n    self._cols_to_resources_map = collections.defaultdict(lambda: {})", "docstring": "Creates an _StateManagerImpl object.\n\nArgs:\nlayer: The input layer this state manager is associated with.\ntrainable: Whether by default, variables created are trainable or not.", "source": "github-repos"}
{"code": "def get_request_feature(self, name):\n        \n        if '[]' in name:\n            \n            return self.request.query_params.getlist(\n                name) if name in self.features else None\n        elif '{}' in name:\n            \n            return self._extract_object_params(\n                name) if name in self.features else {}\n        else:\n            \n            return self.request.query_params.get(\n                name) if name in self.features else None", "docstring": "Parses the request for a particular feature.\n\nArguments:\nname: A feature name.\n\nReturns:\nA feature parsed from the URL if the feature is supported, or None.", "source": "juraj-google-style"}
{"code": "def _SimpleDecoder(wire_type, decode_value):\n  \n\n  def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default):\n    if is_packed:\n      local_DecodeVarint = _DecodeVarint\n      def DecodePackedField(buffer, pos, end, message, field_dict):\n        value = field_dict.get(key)\n        if value is None:\n          value = field_dict.setdefault(key, new_default(message))\n        (endpoint, pos) = local_DecodeVarint(buffer, pos)\n        endpoint += pos\n        if endpoint > end:\n          raise _DecodeError('Truncated message.')\n        while pos < endpoint:\n          (element, pos) = decode_value(buffer, pos)\n          value.append(element)\n        if pos > endpoint:\n          del value[-1]   \n          raise _DecodeError('Packed element was truncated.')\n        return pos\n      return DecodePackedField\n    elif is_repeated:\n      tag_bytes = encoder.TagBytes(field_number, wire_type)\n      tag_len = len(tag_bytes)\n      def DecodeRepeatedField(buffer, pos, end, message, field_dict):\n        value = field_dict.get(key)\n        if value is None:\n          value = field_dict.setdefault(key, new_default(message))\n        while 1:\n          (element, new_pos) = decode_value(buffer, pos)\n          value.append(element)\n          \n          \n          pos = new_pos + tag_len\n          if buffer[new_pos:pos] != tag_bytes or new_pos >= end:\n            \n            if new_pos > end:\n              raise _DecodeError('Truncated message.')\n            return new_pos\n      return DecodeRepeatedField\n    else:\n      def DecodeField(buffer, pos, end, message, field_dict):\n        (field_dict[key], pos) = decode_value(buffer, pos)\n        if pos > end:\n          del field_dict[key]  \n          raise _DecodeError('Truncated message.')\n        return pos\n      return DecodeField\n\n  return SpecificDecoder", "docstring": "Return a constructor for a decoder for fields of a particular type.\n\nArgs:\nwire_type:  The field's wire type.\ndecode_value:  A function which decodes an individual value, e.g.\n_DecodeVarint()", "source": "juraj-google-style"}
{"code": "def compress(self, counts_limit):\n        \n        if self.payload:\n            \n            \n            varint_len = counts_limit * (self.word_size + 1)\n            \n            encode_buf = (c_byte * (payload_header_size + varint_len))()\n\n            \n            varint_len = encode(addressof(self.counts), counts_limit,\n                                self.word_size,\n                                addressof(encode_buf) + payload_header_size,\n                                varint_len)\n\n            \n            self.payload.payload_len = varint_len\n            ctypes.memmove(addressof(encode_buf), addressof(self.payload), payload_header_size)\n\n            cdata = zlib.compress(ctypes.string_at(encode_buf, payload_header_size + varint_len))\n            return cdata\n        \n        raise RuntimeError('No payload to compress')", "docstring": "Compress this payload instance\nArgs:\ncounts_limit how many counters should be encoded\nstarting from index 0 (can be 0),\nReturn:\nthe compressed payload (python string)", "source": "juraj-google-style"}
{"code": "def reflection(n1, n2):\n    r = (abs(((n1 - n2) / (n1 + n2))) ** 2)\n    return r", "docstring": "Calculate the power reflection at the interface\nof two refractive index materials.\n\nArgs:\nn1 (float): Refractive index of material 1.\nn2 (float): Refractive index of material 2.\n\nReturns:\nfloat: The percentage of reflected power.", "source": "codesearchnet"}
{"code": "def get_soft_device_placement():\n    return context.context().soft_device_placement", "docstring": "Return status of soft device placement flag.\n\nIf enabled, ops can be placed on different devices than the device explicitly\nassigned by the user. This potentially has a large performance cost due to an\nincrease in data communication between devices.\n\nSome cases where soft_device_placement would modify device assignment are:\n1. no GPU/TPU implementation for the OP\n2. no GPU devices are known or registered\n3. need to co-locate with reftype input(s) which are from CPU\n4. an OP can not be compiled by XLA.  Common for TPU which always requires\nthe XLA compiler.\n\nFor TPUs, if this option is true, a feature called automatic outside\ncompilation is enabled. Automatic outside compilation will move uncompilable\nops within a TPU program to instead run on the host. This can be used when\nencountering compilation failures due to unsupported ops.\n\nReturns:\nA boolean indicating if soft placement is enabled.", "source": "github-repos"}
{"code": "def add_metric(self, labels, value, timestamp=None):\n        \n        self.samples.append(Sample(\n            self.name + '_info',\n            dict(dict(zip(self._labelnames, labels)), **value),\n            1,\n            timestamp,\n        ))", "docstring": "Add a metric to the metric family.\n\nArgs:\nlabels: A list of label values\nvalue: A dict of labels", "source": "juraj-google-style"}
{"code": "def _parallel_part_processors(part_processors: Sequence[PartProcessorWithMatchFn]) -> PartProcessorFn:\n\n    async def part_processor(content: ProcessorPart) -> AsyncIterable[ProcessorPart]:\n        output_queue = asyncio.Queue()\n        processors = []\n        match_fns = []\n        passthrough_fallback = False\n        passthrough_always = False\n        for p in part_processors:\n            if p is PASSTHROUGH_FALLBACK:\n                passthrough_fallback = True\n                continue\n            if p is PASSTHROUGH_ALWAYS:\n                passthrough_always = True\n                continue\n            processors.append(_CaptureReservedSubstreams(output_queue, p))\n            match_fns.append(p.match)\n        parallel_processor = _CaptureReservedSubstreams(output_queue, map_processor.parallel_part_functions(processors, match_fns, with_default_output=passthrough_fallback, with_always_output=passthrough_always))\n        content = parallel_processor(content)\n        create_task(_enqueue_content(content, output_queue))\n        while (part := (await output_queue.get())) is not None:\n            yield part\n            output_queue.task_done()\n    return part_processor", "docstring": "Combine **part processors** in parallel.\n\nAdds debug and status streams to the output.\n\nNOTE: Substreams debug and status are yielded immediately instead of passing\nthem to the next processor.\n\nArgs:\npart_processors: sequence of part processors to compute concurrently.\n\nReturns:\nPart processor that computes the output of the provided sequence of part\nprocessors concurrently.", "source": "github-repos"}
{"code": "def _merge_hdx_update(self, object_type, id_field_name, file_to_upload=None, **kwargs):\n    merge_two_dictionaries(self.data, self.old_data)\n    if ('batch_mode' in kwargs):\n        self.data['batch_mode'] = kwargs['batch_mode']\n    if ('skip_validation' in kwargs):\n        self.data['skip_validation'] = kwargs['skip_validation']\n    ignore_field = self.configuration[('%s' % object_type)].get('ignore_on_update')\n    self.check_required_fields(ignore_fields=[ignore_field])\n    operation = kwargs.get('operation', 'update')\n    self._save_to_hdx(operation, id_field_name, file_to_upload)", "docstring": "Helper method to check if HDX object exists and update it\n\nArgs:\nobject_type (str): Description of HDX object type (for messages)\nid_field_name (str): Name of field containing HDX object identifier\nfile_to_upload (Optional[str]): File to upload to HDX\n**kwargs: See below\noperation (string): Operation to perform eg. patch. Defaults to update.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def __init__(self, mean, volatility, dtype=None, name=None):\n    self._name = name or 'geometric_brownian_motion'\n    with tf.name_scope(self._name):\n        self._mean, self._mean_is_constant = pw.convert_to_tensor_or_func(mean, dtype=dtype, name='mean')\n        self._dtype = dtype or self._mean.dtype\n        self._volatility, self._volatility_is_constant = pw.convert_to_tensor_or_func(volatility, dtype=self._dtype, name='volatility')\n        self._volatility_squared = self._volatility_squared_from_volatility(self._volatility, self._volatility_is_constant, dtype=self._dtype, name='volatility_squared')\n        self._dim = 1", "docstring": "Initializes the Geometric Brownian Motion.\n\nArgs:\nmean: A real `Tensor` broadcastable to `batch_shape + [1]` or an instance\nof left-continuous `PiecewiseConstantFunc` with `batch_shape + [1]`\ndimensions. Here `batch_shape` represents a batch of independent\nGBMs. Corresponds to the mean drift of the Ito process.\nvolatility: A real `Tensor` broadcastable to `batch_shape + [1]` or an\ninstance of left-continuous `PiecewiseConstantFunc` of the same `dtype`\nand `batch_shape` as set by `mean`. Corresponds to the volatility of the\nprocess and should be positive.\ndtype: The default dtype to use when converting values to `Tensor`s.\nDefault value: `None` which means that default dtypes inferred from\n`mean` is used.\nname: Python string. The name to give to the ops created by this class.\nDefault value: `None` which maps to the default name\n'geometric_brownian_motion'.", "source": "github-repos"}
{"code": "def make_pose(translation, rotation):\n    \n    pose = np.zeros((4, 4))\n    pose[:3, :3] = rotation\n    pose[:3, 3] = translation\n    pose[3, 3] = 1.0\n    return pose", "docstring": "Makes a homogenous pose matrix from a translation vector and a rotation matrix.\n\nArgs:\ntranslation: a 3-dim iterable\nrotation: a 3x3 matrix\n\nReturns:\npose: a 4x4 homogenous matrix", "source": "juraj-google-style"}
{"code": "def _parse_hparams(hparams):\n    prefixes = ['agent_', 'optimizer_', 'runner_', 'replay_buffer_']\n    ret = []\n    for prefix in prefixes:\n        ret_dict = {}\n        for key in hparams.values():\n            if (prefix in key):\n                par_name = key[len(prefix):]\n                ret_dict[par_name] = hparams.get(key)\n        ret.append(ret_dict)\n    return ret", "docstring": "Split hparams, based on key prefixes.\n\nArgs:\nhparams: hyperparameters\n\nReturns:\nTuple of hparams for respectably: agent, optimizer, runner, replay_buffer.", "source": "codesearchnet"}
{"code": "def CollectFromWindowsRegistry(\n      cls, artifacts_registry, knowledge_base, searcher):\n    \n    for preprocess_plugin in cls._windows_registry_plugins.values():\n      artifact_definition = artifacts_registry.GetDefinitionByName(\n          preprocess_plugin.ARTIFACT_DEFINITION_NAME)\n      if not artifact_definition:\n        logger.warning('Missing artifact definition: {0:s}'.format(\n            preprocess_plugin.ARTIFACT_DEFINITION_NAME))\n        continue\n\n      logger.debug('Running Windows Registry preprocessor plugin: {0:s}'.format(\n          preprocess_plugin.ARTIFACT_DEFINITION_NAME))\n      try:\n        preprocess_plugin.Collect(knowledge_base, artifact_definition, searcher)\n      except (IOError, errors.PreProcessFail) as exception:\n        logger.warning((\n            'Unable to collect value from artifact definition: {0:s} '\n            'with error: {1!s}').format(\n                preprocess_plugin.ARTIFACT_DEFINITION_NAME, exception))", "docstring": "Collects values from Windows Registry values.\n\nArgs:\nartifacts_registry (artifacts.ArtifactDefinitionsRegistry): artifacts\ndefinitions registry.\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nsearcher (dfwinreg.WinRegistrySearcher): Windows Registry searcher to\npreprocess the Windows Registry.", "source": "juraj-google-style"}
{"code": "def __init__(self, *args, **kwargs):\n        \n        if \"widget\" not in kwargs:\n            kwargs[\"widget\"] = PasswordStrengthInput(render_value=False)\n\n        super(PasswordField, self).__init__(*args, **kwargs)", "docstring": "Init method.\n\nArgs:\n*args (): Django's args for a form field.\n**kwargs (): Django's kwargs for a form field.", "source": "juraj-google-style"}
{"code": "def retrieve_data_from_config(msg, cfg):\n    msg_type = msg.__class__.__name__.lower()\n    for attr in msg:\n        if ((getattr(msg, attr) is None) and (attr in cfg.data[msg.profile][msg_type])):\n            setattr(msg, attr, cfg.data[msg.profile][msg_type][attr])", "docstring": "Update msg attrs with values from the profile configuration if the\nmsg.attr=None, else leave it alone.\n\nArgs:\n:msg: (Message class) an instance of a message class.\n:cfg: (jsonconfig.Config) config instance.", "source": "codesearchnet"}
{"code": "def _CheckForOutOfOrderStepAndMaybePurge(self, event):\n    \n    if event.step < self.most_recent_step and event.HasField('summary'):\n      self._Purge(event, by_tags=True)", "docstring": "Check for out-of-order event.step and discard expired events for tags.\n\nCheck if the event is out of order relative to the global most recent step.\nIf it is, purge outdated summaries for tags that the event contains.\n\nArgs:\nevent: The event to use as reference. If the event is out-of-order, all\nevents with the same tags, but with a greater event.step will be purged.", "source": "juraj-google-style"}
{"code": "def remove_container(self, container, v=False, link=False, force=False):\n    params = {'v': v, 'link': link, 'force': force}\n    res = self._delete(self._url('/containers/{0}', container), params=params)\n    self._raise_for_status(res)", "docstring": "Remove a container. Similar to the ``docker rm`` command.\n\nArgs:\ncontainer (str): The container to remove\nv (bool): Remove the volumes associated with the container\nlink (bool): Remove the specified link and not the underlying\ncontainer\nforce (bool): Force the removal of a running container (uses\n``SIGKILL``)\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def transpose(self, name=None):\n    \n\n    if name is None:\n      name = self.module_name + \"_transpose\"\n    return AddBias(output_shape=lambda: self._input_shape,\n                   bias_dims=self._bias_dims,\n                   initializers=self._initializers,\n                   regularizers=self._regularizers,\n                   name=name)", "docstring": "Returns transposed `AddBias` module.\n\nArgs:\nname: Optional string assigning name of transpose module. The default name\nis constructed by appending \"_transpose\" to `self.module_name`.\n\nReturns:\nTransposed `AddBias` module.", "source": "juraj-google-style"}
{"code": "def download(url):\n    filepath = get_file(fname='tmp.zip', origin=url, extract=True)\n    base_dir = os.path.dirname(filepath)\n    weights_file = os.path.join(base_dir, 'weights.h5')\n    params_file = os.path.join(base_dir, 'params.json')\n    preprocessor_file = os.path.join(base_dir, 'preprocessor.pickle')\n    return (weights_file, params_file, preprocessor_file)", "docstring": "Download a trained weights, config and preprocessor.\n\nArgs:\nurl (str): target url.", "source": "codesearchnet"}
{"code": "def make_quadratic(poly, strength, vartype=None, bqm=None):\n    if (bqm is None):\n        if (vartype is None):\n            raise ValueError('one of vartype and bqm must be provided')\n        bqm = BinaryQuadraticModel.empty(vartype)\n    else:\n        if (not isinstance(bqm, BinaryQuadraticModel)):\n            raise TypeError('create_using must be a BinaryQuadraticModel')\n        if ((vartype is not None) and (vartype is not bqm.vartype)):\n            raise ValueError('one of vartype and create_using must be provided')\n    bqm.info['reduction'] = {}\n    new_poly = {}\n    for (term, bias) in iteritems(poly):\n        if (len(term) == 0):\n            bqm.add_offset(bias)\n        elif (len(term) == 1):\n            (v,) = term\n            bqm.add_variable(v, bias)\n        else:\n            new_poly[term] = bias\n    return _reduce_degree(bqm, new_poly, vartype, strength)", "docstring": "Create a binary quadratic model from a higher order polynomial.\n\nArgs:\npoly (dict):\nPolynomial as a dict of form {term: bias, ...}, where `term` is a tuple of\nvariables and `bias` the associated bias.\n\nstrength (float):\nStrength of the reduction constraint. Insufficient strength can result in the\nbinary quadratic model not having the same minimizations as the polynomial.\n\nvartype (:class:`.Vartype`, optional):\nVartype of the polynomial. If `bqm` is provided, vartype is not required.\n\nbqm (:class:`.BinaryQuadraticModel`, optional):\nThe terms of the reduced polynomial are added to this binary quadratic model.\nIf not provided, a new binary quadratic model is created.\n\nReturns:\n:class:`.BinaryQuadraticModel`\n\nExamples:\n\n>>> poly = {(0,): -1, (1,): 1, (2,): 1.5, (0, 1): -1, (0, 1, 2): -2}\n>>> bqm = dimod.make_quadratic(poly, 5.0, dimod.SPIN)", "source": "codesearchnet"}
{"code": "def get_newest(blocks, layout_blocks):\n    \n    layout_temp = list(layout_blocks)\n\n    for i in range(0, len(layout_temp)):\n        for k in range(0, len(layout_blocks)):\n            if blocks[layout_temp[i]].ec_hdr.image_seq != blocks[layout_blocks[k]].ec_hdr.image_seq:\n                continue\n\n            if blocks[layout_temp[i]].leb_num != blocks[layout_blocks[k]].leb_num:\n                continue\n\n            if blocks[layout_temp[i]].vid_hdr.sqnum > blocks[layout_blocks[k]].vid_hdr.sqnum:\n                del layout_blocks[k]\n                break\n\n    return layout_blocks", "docstring": "Filter out old layout blocks from list\n\nArguments:\nList:blocks        -- List of block objects\nList:layout_blocks -- List of layout block indexes\n\nReturns:\nList -- Newest layout blocks in list", "source": "juraj-google-style"}
{"code": "class Chunk:\n    content: Content\n    id: str = field(default_factory=lambda: str(uuid.uuid4()))\n    index: int = 0\n    metadata: Dict[str, Any] = field(default_factory=dict)\n    embedding: Optional[Embedding] = None", "docstring": "Represents a chunk of embeddable content with metadata.\n\nArgs:\ncontent: The actual content of the chunk\nid: Unique identifier for the chunk\nindex: Index of this chunk within the original document\nmetadata: Additional metadata about the chunk (e.g., document source)\nembedding: Vector embeddings of the content", "source": "github-repos"}
{"code": "def get_excel_workbook(api_data, result_info_key, identifier_keys):\n    cleaned_data = []\n    for item_data in api_data:\n        result_info = item_data.pop(result_info_key, {})\n        cleaned_item_data = {}\n        if ('meta' in item_data):\n            meta = item_data.pop('meta')\n            cleaned_item_data['meta'] = meta\n        for key in item_data:\n            cleaned_item_data[key] = item_data[key]['result']\n        cleaned_item_data[result_info_key] = result_info\n        cleaned_data.append(cleaned_item_data)\n    data_list = copy.deepcopy(cleaned_data)\n    workbook = openpyxl.Workbook()\n    write_worksheets(workbook, data_list, result_info_key, identifier_keys)\n    return workbook", "docstring": "Generates an Excel workbook object given api_data returned by the Analytics API\n\nArgs:\napi_data: Analytics API data as a list of dicts (one per identifier)\nresult_info_key: the key in api_data dicts that contains the data results\nidentifier_keys: the list of keys used as requested identifiers\n(address, zipcode, block_id, etc)\n\nReturns:\nraw excel file data", "source": "codesearchnet"}
{"code": "def cleanup(self):\n    with LogTask('Stop prefix'):\n        self.stop()\n    with LogTask('Tag prefix as uninitialized'):\n        os.unlink(self.paths.prefix_lagofile())", "docstring": "Stops any running entities in the prefix and uninitializes it, usually\nyou want to do this if you are going to remove the prefix afterwards\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def publish(self, topic, dct):\n        \n        get_logger().info(\"Publishing message {} on routing key \"\n                          \"{}...\".format(dct, topic))\n\n        self._channel.basic_publish(\n            exchange=self.exchange,\n            routing_key=topic,\n            body=json.dumps(dct)\n        )", "docstring": "Send a dict with internal routing key to the exchange.\n\nArgs:\ntopic: topic to publish the message to\ndct: dict object to send", "source": "juraj-google-style"}
{"code": "def auto_convert_cell(flagable, cell, position, worksheet, flags, units, parens_as_neg=True):\n    conversion = cell\n    if isinstance(cell, (int, float)):\n        pass\n    elif isinstance(cell, basestring):\n        if (not cell):\n            conversion = None\n        else:\n            conversion = auto_convert_string_cell(flagable, cell, position, worksheet, flags, units, parens_as_neg=parens_as_neg)\n    elif (cell != None):\n        flagable.flag_change(flags, 'warning', position, worksheet, flagable.FLAGS['unknown-to-string'])\n        conversion = str(cell)\n        if (not conversion):\n            conversion = None\n    else:\n        pass\n    return conversion", "docstring": "Performs a first step conversion of the cell to check\nit's type or try to convert if a valid conversion exists.\n\nArgs:\nparens_as_neg: Converts numerics surrounded by parens to negative values", "source": "codesearchnet"}
{"code": "def from_file(cls, filename, *, strict=True):\n        \n        config = cls()\n        config.load_from_file(filename, strict=strict)\n        return config", "docstring": "Create a new Config object from a configuration file.\n\nArgs:\nfilename (str): The location and name of the configuration file.\nstrict (bool): If true raises a ConfigLoadError when the configuration\ncannot be found.\n\nReturns:\nAn instance of the Config class.\n\nRaises:\nConfigLoadError: If the configuration cannot be found.", "source": "juraj-google-style"}
{"code": "def create_assembly_instance(self, assembly_uri, part_uri, configuration):\n    payload = {'documentId': part_uri['did'], 'elementId': part_uri['eid'], 'versionId': part_uri['wvm'], 'isAssembly': False, 'isWholePartStudio': True, 'configuration': self.encode_configuration(part_uri['did'], part_uri['eid'], configuration)}\n    return self._api.request('post', (((((((('/api/assemblies/d/' + assembly_uri['did']) + '/') + assembly_uri['wvm_type']) + '/') + assembly_uri['wvm']) + '/e/') + assembly_uri['eid']) + '/instances'), body=payload)", "docstring": "Insert a configurable part into an assembly.\n\nArgs:\n- assembly (dict): eid, wid, and did of the assembly into which will be inserted\n- part (dict): eid and did of the configurable part\n- configuration (dict): the configuration\n\nReturns:\n- requests.Response: Onshape response data", "source": "codesearchnet"}
{"code": "def get_realtime_urls(admin_view_func=lambda x: x):\n    \n    from .widgets import REALTIME_WIDGETS\n    return [url(w.url_regex, admin_view_func(w.as_view()), name=w.url_name)\n            for w in REALTIME_WIDGETS]", "docstring": "Get the URL for real-time widgets.\n\nArgs:\nadmin_view_func (callable): an admin_view method from an AdminSite\ninstance. By default: identity.\n\nReturns:\nlist: the list of the real-time URLs as django's ``url()``.", "source": "juraj-google-style"}
{"code": "class JSON_To_BigQuery(json.JSONEncoder):\n\n    def default(self, obj):\n        if isinstance(obj, bytes):\n            return base64.standard_b64encode(obj).decode('ascii')\n        elif isinstance(obj, datetime.datetime):\n            return obj.strftime('%s %s' % (self.BIGQUERY_DATE_FORMAT, self.BIGQUERY_TIME_FORMAT))\n        elif isinstance(obj, datetime.date):\n            return obj.strftime(self.BIGQUERY_DATE_FORMAT)\n        elif isinstance(obj, datetime.time):\n            return obj.strftime(self.BIGQUERY_TIME_FORMAT)\n        elif isinstance(obj, map):\n            return list(obj)\n        else:\n            return super(JSON_To_BigQuery, self).default(obj)", "docstring": "Translate complex Python objects into BigQuery formats where json does not have defaults.\n\nUsage: json.dumps(..., cls=JSON_To_BigQuery)\n\nCurrently translates:\nbytes -> base64\ndetetime - > str\ndete - > str\ntime - > str\n\nArgs:\nobj -  any json dumps parameter without a default handler\n\nReturns:\nAlways a string version of the object passed in.", "source": "github-repos"}
{"code": "def get_help(func):\n    help_text = ''\n    if isinstance(func, dict):\n        name = context_name(func)\n        help_text = (('\\n' + name) + '\\n\\n')\n        doc = inspect.getdoc(func)\n        if (doc is not None):\n            doc = inspect.cleandoc(doc)\n            help_text += (doc + '\\n')\n        return help_text\n    sig = func.metadata.signature()\n    doc = inspect.getdoc(func)\n    if (doc is not None):\n        doc = inspect.cleandoc(doc)\n    help_text += (('\\n' + sig) + '\\n\\n')\n    if (doc is not None):\n        help_text += (doc + '\\n')\n    if inspect.isclass(func):\n        func = func.__init__\n    if func.metadata.load_from_doc:\n        return help_text\n    help_text += '\\nArguments:\\n'\n    for (key, info) in func.metadata.annotated_params.items():\n        type_name = info.type_name\n        desc = ''\n        if (info.desc is not None):\n            desc = info.desc\n        help_text += ('  - %s (%s): %s\\n' % (key, type_name, desc))\n    return help_text", "docstring": "Return usage information about a context or function.\n\nFor contexts, just return the context name and its docstring\nFor functions, return the function signature as well as its\nargument types.\n\nArgs:\nfunc (callable): An annotated callable function\n\nReturns:\nstr: The formatted help text", "source": "codesearchnet"}
{"code": "class ScoreAggregation(AggregationFn, _AggModelIdMixin, _SourcePredictionMixin):\n\n    def __init__(self, agg_func: Callable[[Iterable[float]], float], agg_model_id: Optional[str]=None, include_source_predictions: bool=False):\n        self._agg = agg_func\n        _AggModelIdMixin.__init__(self, agg_model_id)\n        _SourcePredictionMixin.__init__(self, include_source_predictions)\n\n    def apply(self, predictions: Iterable[AnomalyPrediction]) -> AnomalyPrediction:\n        \n        result_dict: dict[str, Any] = {}\n        _AggModelIdMixin.add_model_id(self, result_dict)\n        _SourcePredictionMixin.add_source_predictions(self, result_dict, predictions)\n        scores = [prediction.score for prediction in predictions if prediction.score is not None and (not math.isnan(prediction.score))]\n        if len(scores) > 0:\n            result_dict['score'] = self._agg(scores)\n        elif all(map(lambda x: x.score is None, predictions)):\n            result_dict['score'] = None\n        else:\n            result_dict['score'] = float('NaN')\n        return AnomalyPrediction(**result_dict)", "docstring": "Aggregates anomaly predictions based on their scores.\n\nThis is an abstract base class for `AggregationFn`s that combine multiple\n`AnomalyPrediction` objects into a single `AnomalyPrediction` based on\nthe scores of the input predictions.\n\nArgs:\nagg_func (Callable[[Iterable[float]], float]): A function that aggregates\na collection of anomaly scores (floats) into a single score.\nagg_model_id (Optional[str]): The model id used in aggregated predictions.\nDefaults to None.\ninclude_source_predictions (bool): If True, include the input predictions in\nthe `source_predictions` of the output. Defaults to False.", "source": "github-repos"}
{"code": "def get_speaker_info(self, refresh=False, timeout=None):\n    if (self.speaker_info and (refresh is False)):\n        return self.speaker_info\n    else:\n        response = requests.get((('http:\n        dom = XML.fromstring(response.content)\n    device = dom.find('{urn:schemas-upnp-org:device-1-0}device')\n    if (device is not None):\n        self.speaker_info['zone_name'] = device.findtext('{urn:schemas-upnp-org:device-1-0}roomName')\n        self.speaker_info['player_icon'] = device.findtext('{urn:schemas-upnp-org:device-1-0}iconList/{urn:schemas-upnp-org:device-1-0}icon/{urn:schemas-upnp-org:device-1-0}url')\n        self.speaker_info['uid'] = self.uid\n        self.speaker_info['serial_number'] = device.findtext('{urn:schemas-upnp-org:device-1-0}serialNum')\n        self.speaker_info['software_version'] = device.findtext('{urn:schemas-upnp-org:device-1-0}softwareVersion')\n        self.speaker_info['hardware_version'] = device.findtext('{urn:schemas-upnp-org:device-1-0}hardwareVersion')\n        self.speaker_info['model_number'] = device.findtext('{urn:schemas-upnp-org:device-1-0}modelNumber')\n        self.speaker_info['model_name'] = device.findtext('{urn:schemas-upnp-org:device-1-0}modelName')\n        self.speaker_info['display_version'] = device.findtext('{urn:schemas-upnp-org:device-1-0}displayVersion')\n        mac = self.speaker_info['serial_number'].split(':')[0]\n        self.speaker_info['mac_address'] = mac\n        return self.speaker_info\n    return None", "docstring": "Get information about the Sonos speaker.\n\nArguments:\nrefresh(bool): Refresh the speaker info cache.\ntimeout: How long to wait for the server to send\ndata before giving up, as a float, or a\n`(connect timeout, read timeout)` tuple\ne.g. (3, 5). Default is no timeout.\n\nReturns:\ndict: Information about the Sonos speaker, such as the UID,\nMAC Address, and Zone Name.", "source": "codesearchnet"}
{"code": "def _IDW(self, latitude, longitude, radius=1):\n    tile = self.get_file(latitude, longitude)\n    if (tile is None):\n        return None\n    return tile._InverseDistanceWeighted(latitude, longitude, radius)", "docstring": "Return the interpolated elevation at a point.\n\nLoad the correct tile for latitude and longitude given.\nIf the tile doesn't exist, return None. Otherwise,\ncall the tile's Inverse Distance Weighted function and\nreturn the elevation.\n\nArgs:\nlatitude: float with the latitude in decimal degrees\nlongitude: float with the longitude in decimal degrees\nradius: int of 1 or 2 indicating the approximate radius\nof adjacent cells to include\n\nReturns:\na float of the interpolated elevation with the same unit\nas the .hgt file (meters)", "source": "codesearchnet"}
{"code": "def to_dataframe(self, bqstorage_client=None, dtypes=None, progress_bar_type=None):\n        \n        if pandas is None:\n            raise ValueError(_NO_PANDAS_ERROR)\n        return pandas.DataFrame()", "docstring": "Create an empty dataframe.\n\nArgs:\nbqstorage_client (Any):\nIgnored. Added for compatibility with RowIterator.\ndtypes (Any):\nIgnored. Added for compatibility with RowIterator.\nprogress_bar_type (Any):\nIgnored. Added for compatibility with RowIterator.\n\nReturns:\npandas.DataFrame:\nAn empty :class:`~pandas.DataFrame`.", "source": "juraj-google-style"}
{"code": "def plot_soma3d(ax, soma, color=None, alpha=_ALPHA):\n    color = _get_color(color, tree_type=NeuriteType.soma)\n    if isinstance(soma, SomaCylinders):\n        for (start, end) in zip(soma.points, soma.points[1:]):\n            common.plot_cylinder(ax, start=start[COLS.XYZ], end=end[COLS.XYZ], start_radius=start[COLS.R], end_radius=end[COLS.R], color=color, alpha=alpha)\n    else:\n        common.plot_sphere(ax, center=soma.center[COLS.XYZ], radius=soma.radius, color=color, alpha=alpha)\n    _update_3d_datalim(ax, soma)", "docstring": "Generates a 3d figure of the soma.\n\nArgs:\nax(matplotlib axes): on what to plot\nsoma(neurom.core.Soma): plotted soma\ncolor(str or None): Color of plotted values, None corresponds to default choice\nalpha(float): Transparency of plotted values", "source": "codesearchnet"}
{"code": "def ansible_inventory(self, keys=['vm-type', 'groups', 'vm-provider']):\n    lansible = LagoAnsible(self._prefix)\n    return lansible.get_inventory_str(keys=keys)", "docstring": "Get an Ansible inventory as a string, ``keys`` should be list on which\nto group the hosts by. You can use any key defined in LagoInitFile.\n\nExamples of possible `keys`:\n\n`keys=['disks/0/metadata/arch']`, would group the hosts by the\narchitecture.\n\n`keys=['/disks/0/metadata/distro', 'disks/0/metadata/arch']`,\nwould create groups by architecture and also by distro.\n\n`keys=['groups']` - would group hosts by the groups defined for\neach VM in the LagoInitFile, i.e.:\n\ndomains:\n\nvm-01:\n...\ngroups: web-server\n..\nvm-02:\n..\ngroups: db-server\n\n\nArgs:\nkeys (list of str): Path to the keys that will be used to\ncreate groups.\n\nReturns:\nstr: INI-like Ansible inventory", "source": "codesearchnet"}
{"code": "def _gather_beams(tensor: torch.Tensor, beam_indices: torch.Tensor) -> torch.Tensor:\n    while len(beam_indices.shape) < len(tensor.shape):\n        beam_indices = beam_indices.unsqueeze(-1)\n    gathered_tensor = torch.take_along_dim(input=tensor, indices=beam_indices, dim=1)\n    return gathered_tensor", "docstring": "Gathers the beam slices indexed by beam_indices into new beam array.\n\nArgs:\ntensor (`torch.Tensor`): A tensor containing data to be gathered. The tensor is a 2D or a 3D tensor\nwith the two first dimensions depicting the batch and the beam dimensions.\nbeam_indices (`torch.Tensor` of shape `(batch_size, num_beams_to_select)`): The indices of the beams to\nselect .\n\nReturns:\nA tensor with the selected beams", "source": "github-repos"}
{"code": "def _relative_position_to_absolute_position_unmasked(x):\n    x_shape = common_layers.shape_list(x)\n    batch = x_shape[0]\n    heads = x_shape[1]\n    length = x_shape[2]\n    col_pad = tf.zeros((batch, heads, length, 1))\n    x = tf.concat([x, col_pad], axis=3)\n    flat_x = tf.reshape(x, [batch, heads, ((length * 2) * length)])\n    flat_pad = tf.zeros((batch, heads, (length - 1)))\n    flat_x_padded = tf.concat([flat_x, flat_pad], axis=2)\n    final_x = tf.reshape(flat_x_padded, [batch, heads, (length + 1), ((2 * length) - 1)])\n    final_x = final_x[(:, :, :, (length - 1):)]\n    final_x = final_x[(:, :, :length, :)]\n    return final_x", "docstring": "Converts tensor from relative to aboslute indexing for local attention.\n\nArgs:\nx: a Tensor of shape [batch (or batch*num_blocks), heads,\nlength, 2 * length - 1]\n\nReturns:\nA Tensor of shape [batch (or batch*num_blocks), heads, length, length-1]", "source": "codesearchnet"}
{"code": "def infer_typehints_schema(data):\n    column_data = OrderedDict()\n    for row in data:\n        for key, value in row.items():\n            column_data.setdefault(key, []).append(value)\n    column_types = OrderedDict([(key, infer_element_type(values)) for key, values in column_data.items()])\n    return column_types", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\nInfer Beam types for tabular data.\n\nArgs:\ndata (List[dict]): A list of dictionaries representing rows in a table.\n\nReturns:\nAn OrderedDict mapping column names to Beam types.", "source": "github-repos"}
{"code": "def has_result(state, incorrect_msg=\"Your query did not return a result.\"):\n    \n\n    \n    has_no_error(state)\n\n    if not state.solution_result:\n        raise NameError(\n            \"You are using has_result() to verify that the student query generated an error, but the solution query did not return a result either!\"\n        )\n\n    if not state.student_result:\n        state.do_test(incorrect_msg)\n\n    return state", "docstring": "Checks if the student's query returned a result.\n\nArgs:\nincorrect_msg: If specified, this overrides the automatically generated feedback message\nin case the student's query did not return a result.", "source": "juraj-google-style"}
{"code": "def iri(uri_string):\n    uri_string = str(uri_string)\n    if (uri_string[:1] == '?'):\n        return uri_string\n    if (uri_string[:1] == '['):\n        return uri_string\n    if (uri_string[:1] != '<'):\n        uri_string = '<{}'.format(uri_string.strip())\n    if (uri_string[(len(uri_string) - 1):] != '>'):\n        uri_string = '{}>'.format(uri_string.strip())\n    return uri_string", "docstring": "converts a string to an IRI or returns an IRI if already formated\n\nArgs:\nuri_string: uri in string format\n\nReturns:\nformated uri with <>", "source": "codesearchnet"}
{"code": "def from_tensors(self, tensors: Iterator[core.Tensor]) -> Any:\n    del tensors\n    return self.placeholder_value(PlaceholderContext())", "docstring": "Generates a value of this type from Tensors.\n\nMust use the same fixed amount of tensors as `to_tensors`.\n\nArgs:\ntensors: An iterator from which the tensors can be pulled.\n\nReturns:\nA value of this type.", "source": "github-repos"}
{"code": "def generate_batch(self, inputs: List[List[int]], generation_config: Optional[GenerationConfig]=None, progress_bar: bool=True, **kwargs) -> List[List[int]]:\n    if not inputs:\n        return []\n    manager = self.init_continuous_batching(generation_config=generation_config)\n    manager.start()\n    results = {}\n    num_requests = len(inputs)\n    try:\n        from tqdm.contrib.logging import logging_redirect_tqdm\n        with logging_redirect_tqdm([logger]):\n            with tqdm(total=num_requests, disable=not progress_bar, desc=f'Solving {num_requests} requests', unit='request') as pbar:\n                manager.add_requests(inputs, **kwargs)\n                finished_count = 0\n                while finished_count < num_requests:\n                    result = manager.get_result(timeout=1)\n                    if result:\n                        req_id = result.request_id\n                        if result.status == RequestStatus.FINISHED:\n                            results[req_id] = result\n                            finished_count += 1\n                            pbar.update(1)\n                    elif not manager.is_running():\n                        logger.error('Generation thread terminated unexpectedly.')\n                        break\n    except Exception as e:\n        logger.error(f'Error during batch generation: {e}', exc_info=True)\n    finally:\n        manager.stop(block=True, timeout=5.0)\n    return results", "docstring": "Generate sequences for a batch of prompts using continuous batching.\n\nArgs:\ninputs: List of input token sequences (prompts)\ngeneration_config: Optional generation configuration\n**kwargs: Additional generation parameters\n\nReturns:\n`List[List[int]]`: A list containing the generated sequences (including prompt tokens\nif not handled otherwise) for each input prompt, in the same order.\nReturns an empty list `[]` for requests that failed.", "source": "github-repos"}
{"code": "def client(self):\n    if (self._client is None):\n        self._client = Client_(self.servers)\n    return self._client", "docstring": "Get the native memcache client.\n\nReturns:\n`memcache.Client` instance.", "source": "codesearchnet"}
{"code": "def parse_str_to_expression(fiql_str):\n    \n    \n    nesting_lvl = 0\n    last_element = None\n    expression = Expression()\n    for (preamble, selector, comparison, argument) in iter_parse(fiql_str):\n        if preamble:\n            for char in preamble:\n                if char == '(':\n                    if isinstance(last_element, BaseExpression):\n                        raise FiqlFormatException(\n                            \"%s can not be followed by %s\" % (\n                                last_element.__class__, Expression))\n                    expression = expression.create_nested_expression()\n                    nesting_lvl += 1\n                elif char == ')':\n                    expression = expression.get_parent()\n                    last_element = expression\n                    nesting_lvl -= 1\n                else:\n                    if not expression.has_constraint():\n                        raise FiqlFormatException(\n                            \"%s proceeding initial %s\" % (\n                                Operator, Constraint))\n                    if isinstance(last_element, Operator):\n                        raise FiqlFormatException(\n                            \"%s can not be followed by %s\" % (\n                                Operator, Operator))\n                    last_element = Operator(char)\n                    expression = expression.add_operator(last_element)\n        if selector:\n            if isinstance(last_element, BaseExpression):\n                raise FiqlFormatException(\"%s can not be followed by %s\" % (\n                    last_element.__class__, Constraint))\n            last_element = Constraint(selector, comparison, argument)\n            expression.add_element(last_element)\n    if nesting_lvl != 0:\n        raise FiqlFormatException(\n            \"At least one nested expression was not correctly closed\")\n    if not expression.has_constraint():\n        raise FiqlFormatException(\n            \"Parsed string '%s' contained no constraint\" % fiql_str)\n    return expression", "docstring": "Parse a FIQL formatted string into an ``Expression``.\n\nArgs:\nfiql_str (string): The FIQL formatted string we want to parse.\n\nReturns:\nExpression: An ``Expression`` object representing the parsed FIQL\nstring.\n\nRaises:\nFiqlFormatException: Unable to parse string due to incorrect\nformatting.\n\nExample:\n\n>>> expression = parse_str_to_expression(\n...         \"name==bar,dob=gt=1990-01-01\")", "source": "juraj-google-style"}
{"code": "def parse_file(filename):\n        \n        poscar_read = False\n        poscar_string = []\n        dataset = []\n        all_dataset = []\n        \n        \n        all_dataset_aug = {}\n        dim = None\n        dimline = None\n        read_dataset = False\n        ngrid_pts = 0\n        data_count = 0\n        poscar = None\n        with zopen(filename, \"rt\") as f:\n            for line in f:\n                original_line = line\n                line = line.strip()\n                if read_dataset:\n                    toks = line.split()\n                    for tok in toks:\n                        if data_count < ngrid_pts:\n                            \n                            \n                            \n                            x = data_count % dim[0]\n                            y = int(math.floor(data_count / dim[0])) % dim[1]\n                            z = int(math.floor(data_count / dim[0] / dim[1]))\n                            dataset[x, y, z] = float(tok)\n                            data_count += 1\n                    if data_count >= ngrid_pts:\n                        read_dataset = False\n                        data_count = 0\n                        all_dataset.append(dataset)\n                elif not poscar_read:\n                    if line != \"\" or len(poscar_string) == 0:\n                        poscar_string.append(line)\n                    elif line == \"\":\n                        poscar = Poscar.from_string(\"\\n\".join(poscar_string))\n                        poscar_read = True\n                elif not dim:\n                    dim = [int(i) for i in line.split()]\n                    ngrid_pts = dim[0] * dim[1] * dim[2]\n                    dimline = line\n                    read_dataset = True\n                    dataset = np.zeros(dim)\n                elif line == dimline:\n                    \n                    \n                    read_dataset = True\n                    dataset = np.zeros(dim)\n                else:\n                    \n                    \n                    \n                    key = len(all_dataset) - 1\n                    if key not in all_dataset_aug:\n                        all_dataset_aug[key] = []\n                    all_dataset_aug[key].append(original_line)\n            if len(all_dataset) == 4:\n\n                data = {\"total\": all_dataset[0], \"diff_x\": all_dataset[1],\n                        \"diff_y\": all_dataset[2], \"diff_z\": all_dataset[3]}\n                data_aug = {\"total\": all_dataset_aug.get(0, None),\n                            \"diff_x\": all_dataset_aug.get(1, None),\n                            \"diff_y\": all_dataset_aug.get(2, None),\n                            \"diff_z\": all_dataset_aug.get(3, None)}\n\n                \n                \n                \n                \n                \n                \n                \n                diff_xyz = np.array([data[\"diff_x\"], data[\"diff_y\"],\n                                     data[\"diff_z\"]])\n                diff_xyz = diff_xyz.reshape((3, dim[0] * dim[1] * dim[2]))\n                ref_direction = np.array([1.01, 1.02, 1.03])\n                ref_sign = np.sign(np.dot(ref_direction, diff_xyz))\n                diff = np.multiply(np.linalg.norm(diff_xyz, axis=0), ref_sign)\n                data[\"diff\"] = diff.reshape((dim[0], dim[1], dim[2]))\n\n            elif len(all_dataset) == 2:\n                data = {\"total\": all_dataset[0], \"diff\": all_dataset[1]}\n                data_aug = {\"total\": all_dataset_aug.get(0, None),\n                            \"diff\": all_dataset_aug.get(1, None)}\n            else:\n                data = {\"total\": all_dataset[0]}\n                data_aug = {\"total\": all_dataset_aug.get(0, None)}\n            return poscar, data, data_aug", "docstring": "Convenience method to parse a generic volumetric data file in the vasp\nlike format. Used by subclasses for parsing file.\n\nArgs:\nfilename (str): Path of file to parse\n\nReturns:\n(poscar, data)", "source": "juraj-google-style"}
{"code": "def flags(cls):\n    assert (cls.__bases__ == (object,))\n    d = dict(cls.__dict__)\n    new_type = type(cls.__name__, (int,), d)\n    new_type.__module__ = cls.__module__\n    map_ = {}\n    for (key, value) in iteritems(d):\n        if ((key.upper() == key) and isinstance(value, integer_types)):\n            value_instance = new_type(value)\n            setattr(new_type, key, value_instance)\n            map_[value] = key\n\n    def str_(self):\n        value = int(self)\n        matches = []\n        for (k, v) in map_.items():\n            if (value & k):\n                matches.append(('%s.%s' % (type(self).__name__, v)))\n                value &= (~ k)\n        if ((value != 0) or (not matches)):\n            matches.append(text_type(value))\n        return ' | '.join(matches)\n\n    def repr_(self):\n        return ('<%s: %d>' % (str(self), int(self)))\n    setattr(new_type, '__repr__', repr_)\n    setattr(new_type, '__str__', str_)\n    return new_type", "docstring": "A decorator for creating an int flags class.\n\nMakes the values a subclass of the type and implements repr/str.\nThe new class will be a subclass of int.\n\nArgs:\ncls (type): The class to convert to an flags\n\nReturns:\ntype: A new class\n\n::\n\n@flags\nclass Foo(object):\nFOO = 1\nBAR = 2", "source": "codesearchnet"}
{"code": "def dvds_new_releases(self, **kwargs):\n        \n        path = self._get_path('dvds_new_releases')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Gets the upcoming movies from the API.\n\nArgs:\npage_limit (optional): number of movies to show per page, default=16\npage (optional): results page number, default=1\ncountry (optional): localized data for selected country, default=\"us\"\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def _is_ready(self, as_of):\n        \n        if self.is_one_off():\n            return self.initial_billing_cycle.date_range.lower <= as_of\n        else:\n            return True", "docstring": "Is the RecurringCost ready to be enacted as of the date `as_of`\n\nThis determines if `as_of` precedes the start of `initial_billing_cycle`. If so,\nwe should not be enacting this RecurringCost yet.\n\nArgs:\nas_of (Date):", "source": "juraj-google-style"}
{"code": "def create_batch(cls, size, **kwargs):\n    return [cls.create(**kwargs) for _ in range(size)]", "docstring": "Create a batch of instances of the given class, with overriden attrs.\n\nArgs:\nsize (int): the number of instances to create\n\nReturns:\nobject list: the created instances", "source": "codesearchnet"}
{"code": "def create_backup(self, resource, timeout=(- 1)):\n    return self._client.create(resource, uri=self.BACKUPS_PATH, timeout=timeout)", "docstring": "Creates a backup bundle with all the artifacts present on the appliance. At any given point only one backup\nbundle will exist on the appliance.\n\nArgs:\nresource (dict): Deployment Group to create the backup.\ntimeout:\nTimeout in seconds. Waits for task completion by default. The timeout does not abort the operation in\nOneView, it just stops waiting for its completion.\n\nReturns:\ndict: A Deployment Group associated with the Artifact Bundle backup.", "source": "codesearchnet"}
{"code": "class ParameterListState(object):\n\n    def __init__(self, opening_bracket, newline, opening_column):\n        self.opening_bracket = opening_bracket\n        self.has_split_before_first_param = newline\n        self.opening_column = opening_column\n        self.parameters = opening_bracket.parameters\n        self.split_before_closing_bracket = False\n\n    @property\n    def closing_bracket(self):\n        return self.opening_bracket.matching_bracket\n\n    @property\n    def has_typed_return(self):\n        return self.closing_bracket.next_token.value == '->'\n\n    @property\n    @lru_cache()\n    def has_default_values(self):\n        return any((param.has_default_value for param in self.parameters))\n\n    @property\n    @lru_cache()\n    def ends_in_comma(self):\n        if not self.parameters:\n            return False\n        return self.parameters[-1].last_token.next_token.value == ','\n\n    @property\n    @lru_cache()\n    def last_token(self):\n        token = self.opening_bracket.matching_bracket\n        while not token.is_comment and token.next_token:\n            token = token.next_token\n        return token\n\n    @lru_cache()\n    def LastParamFitsOnLine(self, indent):\n        \n        if not self.has_typed_return:\n            return False\n        if not self.parameters:\n            return True\n        total_length = self.last_token.total_length\n        last_param = self.parameters[-1].first_token\n        total_length -= last_param.total_length - len(last_param.value)\n        return total_length + indent <= style.Get('COLUMN_LIMIT')\n\n    @lru_cache()\n    def SplitBeforeClosingBracket(self, indent):\n        \n        if style.Get('DEDENT_CLOSING_BRACKETS'):\n            return True\n        if self.ends_in_comma:\n            return True\n        if not self.parameters:\n            return False\n        total_length = self.last_token.total_length\n        last_param = self.parameters[-1].first_token\n        total_length -= last_param.total_length - len(last_param.value)\n        return total_length + indent > style.Get('COLUMN_LIMIT')\n\n    def Clone(self):\n        clone = ParameterListState(self.opening_bracket, self.has_split_before_first_param, self.opening_column)\n        clone.split_before_closing_bracket = self.split_before_closing_bracket\n        clone.parameters = [param.Clone() for param in self.parameters]\n        return clone\n\n    def __repr__(self):\n        return '[opening_bracket::%s, has_split_before_first_param::%s, opening_column::%d]' % (self.opening_bracket, self.has_split_before_first_param, self.opening_column)\n\n    def __eq__(self, other):\n        return hash(self) == hash(other)\n\n    def __ne__(self, other):\n        return not self == other\n\n    def __hash__(self, *args, **kwargs):\n        return hash((self.opening_bracket, self.has_split_before_first_param, self.opening_column, (hash(param) for param in self.parameters)))", "docstring": "Maintains the state of function parameter list formatting decisions.\n\nAttributes:\nopening_bracket: The opening bracket of the parameter list.\nclosing_bracket: The closing bracket of the parameter list.\nhas_typed_return: True if the function definition has a typed return.\nends_in_comma: True if the parameter list ends in a comma.\nlast_token: Returns the last token of the function declaration.\nhas_default_values: True if the parameters have default values.\nhas_split_before_first_param: Whether there is a newline before the first\nparameter.\nopening_column: The position of the opening parameter before a newline.\nparameters: A list of parameter objects (Parameter).\nsplit_before_closing_bracket: Split before the closing bracket. Sometimes\nneeded if the indentation would collide.", "source": "github-repos"}
{"code": "def Print(self, x, data, message, **kwargs):  \n    \n    tf.logging.info(\"PlacementMeshImpl::Print\")\n    new_slices = x.tensor_list[:]\n    with tf.device(self._devices[0]):\n      new_slices[0] = tf.Print(\n          new_slices[0], [t for d in data for t in d.tensor_list],\n          message, **kwargs)\n    return self.LaidOutTensor(new_slices)", "docstring": "call tf.Print.\n\nArgs:\nx: a LaidOutTensor\ndata: a list of LaidOutTensor\nmessage: a string\n**kwargs: keyword arguments to tf.print\nReturns:\na LaidOutTensor", "source": "juraj-google-style"}
{"code": "def solve_fba(self, objective):\n        \n        self._prob.set_objective(self._v_wt[objective])\n        return self._solve(lp.ObjectiveSense.Maximize)", "docstring": "Solve the wild type problem using FBA.\n\nArgs:\nobjective: The objective reaction to be maximized.\n\nReturns:\nThe LP Result object for the solved FBA problem.", "source": "juraj-google-style"}
{"code": "def check_type(o, acceptable_types, may_be_none=True):\n    \n    if not isinstance(acceptable_types, tuple):\n        acceptable_types = (acceptable_types,)\n\n    if may_be_none and o is None:\n        \n        pass\n    elif isinstance(o, acceptable_types):\n        \n        pass\n    else:\n        \n        error_message = (\n            \"We were expecting to receive an instance of one of the following \"\n            \"types: {types}{none}; but instead we received {o} which is a \"\n            \"{o_type}.\".format(\n                types=\", \".join([repr(t.__name__) for t in acceptable_types]),\n                none=\"or 'None'\" if may_be_none else \"\",\n                o=o,\n                o_type=repr(type(o).__name__)\n            )\n        )\n        raise TypeError(error_message)", "docstring": "Object is an instance of one of the acceptable types or None.\n\nArgs:\no: The object to be inspected.\nacceptable_types: A type or tuple of acceptable types.\nmay_be_none(bool): Whether or not the object may be None.\n\nRaises:\nTypeError: If the object is None and may_be_none=False, or if the\nobject is not an instance of one of the acceptable types.", "source": "juraj-google-style"}
{"code": "def _check_rules(browser, rules_js, config):\n    if (config.rules_to_run is None):\n        msg = 'No accessibility rules were specified to check.'\n        log.warning(msg)\n        return None\n    rules = config.rules_to_run\n    if rules:\n        rules_config = u'auditConfig.auditRulesToRun = {rules};'.format(rules=rules)\n    else:\n        rules_config = ''\n    ignored_rules = config.rules_to_ignore\n    if ignored_rules:\n        rules_config += u'\\nauditConfig.auditRulesToIgnore = {rules};'.format(rules=ignored_rules)\n    script = dedent(u'\\n            {rules_js}\\n            var auditConfig = new axs.AuditConfiguration();\\n            {rules_config}\\n            auditConfig.scope = {scope};\\n            var run_results = axs.Audit.run(auditConfig);\\n            var audit_results = axs.Audit.auditResults(run_results)\\n            return audit_results;\\n        '.format(rules_js=rules_js, rules_config=rules_config, scope=config.scope))\n    result = browser.execute_script(script)\n    audit_results = AuditResults(errors=result.get('errors_'), warnings=result.get('warnings_'))\n    return audit_results", "docstring": "Check the page for violations of the configured rules. By default,\nall rules in the ruleset will be checked.\n\nArgs:\nbrowser: a browser instance.\nrules_js: the ruleset JavaScript as a string.\nconfig: an AxsAuditConfig instance.\n\nReturns:\nA namedtuple with 'errors' and 'warnings' fields whose values are\nthe errors and warnings returned from the audit.\n\nNone if config has rules_to_run set to None.\n\n__Caution__: You probably don't really want to call this method\ndirectly! It will be used by `A11yAudit.do_audit` if using this ruleset.", "source": "codesearchnet"}
{"code": "def name_changed(self, changed_item):\n        \n        name = str(changed_item.text())\n\n        \n        if name != '':\n            if name != self.selected_element_name:\n                self.elements_from_file[name] = self.elements_from_file[self.selected_element_name]\n                del self.elements_from_file[self.selected_element_name]\n                self.selected_element_name = name", "docstring": "checks if name has been changed and ignores the name change if the changed_item is an existing script\nArgs:\nchanged_item:", "source": "juraj-google-style"}
{"code": "def discrete_bottleneck(self, x):\n    \n    x_reshaped = self.slice_hidden(x)\n    x_means_hot = []\n    x_means = 0\n    loss = 0\n    x_means_hot, x_means, q_loss, e_loss = self.embedding_lookup(\n        x_reshaped, self.means)\n\n    if self.hparams.ema:\n      tf.logging.info(\"Using EMA with beta = {}\".format(self.hparams.beta))\n      updated_ema_count = \\\n          moving_averages.assign_moving_average(\n              self.ema_count,\n              tf.reduce_sum(\n                  tf.reshape(\n                      x_means_hot,\n                      shape=[-1, self.hparams.num_blocks,\n                             self.hparams.block_v_size]),\n                  axis=0),\n              self.hparams.decay,\n              zero_debias=False)\n\n      dw = tf.matmul(\n          tf.transpose(x_means_hot, perm=[1, 2, 0]),\n          tf.transpose(x_reshaped, perm=[1, 0, 2]))\n\n      updated_ema_means = \\\n          moving_averages.assign_moving_average(\n              self.ema_means, dw, self.hparams.decay,\n              zero_debias=False)\n      n = tf.reduce_sum(updated_ema_count, axis=-1, keep_dims=True)\n      updated_ema_count = ((updated_ema_count + self.hparams.epsilon) / (\n          n + 2**self.hparams.z_size * self.hparams.epsilon) * n)\n      updated_ema_means = updated_ema_means / tf.expand_dims(\n          updated_ema_count, axis=-1)\n\n      with tf.control_dependencies([e_loss]):\n        update_means = tf.assign(self.means, updated_ema_means)\n        with tf.control_dependencies([update_means]):\n          loss += self.hparams.beta * e_loss\n    else:\n      \n      loss += q_loss + self.hparams.beta * e_loss\n\n    \n    x_means_idx = tf.argmax(x_means_hot, axis=-1)\n\n    \n    num_bits = int(self.hparams.z_size \n    x_means_bits = self.int_to_bit(x_means_idx, num_bits=num_bits, base=2)\n    x_discrete = self.bit_to_int(\n        tf.to_int32(x_means_bits), num_bits=self.hparams.z_size, base=2)\n\n    \n    shape_x = common_layers.shape_list(x)\n    shape_discrete = shape_x[:-1]\n    x_discrete = tf.reshape(x_discrete, shape_discrete)\n    x_means = tf.reshape(x_means, shape=shape_x)\n    h1 = x + tf.stop_gradient(x_means - x)\n\n    h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name=\"vch2\")\n    res = tf.layers.dense(\n        tf.nn.relu(h2), self.hparams.hidden_size, name=\"vcfin\")\n    embed_fn = partial(self.embed)\n    return {\n        \"dense\": res,\n        \"discrete\": x_discrete,\n        \"loss\": loss,\n        \"embed\": embed_fn\n    }", "docstring": "Discretization bottleneck for latent variables.\n\nArgs:\nx: Input to the discretization bottleneck.\n\nReturns:\nEmbedding to pass to the decoder, discrete latent, loss, and the\nembedding\nfunction.\n\nRaises:\nValueError: If projection_tensors is None for reshape_method\nproject, or\nema_count or ema_means is None if we are using ema, or unknown\nargs.", "source": "juraj-google-style"}
{"code": "def on_run_end(self, request):\n    self._is_run_start = False\n    if request.performed_action == framework.OnRunStartAction.DEBUG_RUN:\n        partition_graphs = None\n        if request.run_metadata and request.run_metadata.partition_graphs:\n            partition_graphs = request.run_metadata.partition_graphs\n        elif request.client_graph_def:\n            partition_graphs = [request.client_graph_def]\n        if request.tf_error and (not os.path.isdir(self._dump_root)):\n            raise request.tf_error\n        debug_dump = debug_data.DebugDumpDir(self._dump_root, partition_graphs=partition_graphs)\n        debug_dump.set_python_graph(self._sess.graph)\n        passed_filter = None\n        passed_filter_exclude_node_names = None\n        if self._active_tensor_filter:\n            if not debug_dump.find(self._tensor_filters[self._active_tensor_filter], first_n=1, exclude_node_names=self._active_filter_exclude_node_names):\n                self._remove_dump_root()\n                return framework.OnRunEndResponse()\n            else:\n                passed_filter = self._active_tensor_filter\n                passed_filter_exclude_node_names = self._active_filter_exclude_node_names\n                self._active_tensor_filter = None\n                self._active_filter_exclude_node_names = None\n        self._prep_debug_cli_for_run_end(debug_dump, request.tf_error, passed_filter, passed_filter_exclude_node_names)\n        self._run_start_response = self._launch_cli()\n        self._remove_dump_root()\n    elif request.performed_action == framework.OnRunStartAction.PROFILE_RUN:\n        self._prep_profile_cli_for_run_end(self._sess.graph, request.run_metadata)\n        self._run_start_response = self._launch_cli()\n    else:\n        self._run_start_response = None\n    return framework.OnRunEndResponse()", "docstring": "Overrides on-run-end callback.\n\nActions taken:\n1) Load the debug dump.\n2) Bring up the Analyzer CLI.\n\nArgs:\nrequest: An instance of OnSessionInitRequest.\n\nReturns:\nAn instance of OnSessionInitResponse.", "source": "github-repos"}
{"code": "def extractDates(self, inp):\n\n    def merge(param):\n        (day, time) = param\n        if (not (day or time)):\n            return None\n        if (not day):\n            return time\n        if (not time):\n            return day\n        return datetime.datetime(day.year, day.month, day.day, time.hour, time.minute)\n    days = self.extractDays(inp)\n    times = self.extractTimes(inp)\n    return map(merge, zip_longest(days, times, fillvalue=None))", "docstring": "Extract semantic date information from an input string.\nIn effect, runs both parseDay and parseTime on the input\nstring and merges the results to produce a comprehensive\ndatetime object.\n\nArgs:\ninp (str): Input string to be parsed.\n\nReturns:\nA list of datetime objects containing the extracted dates from the\ninput snippet, or an empty list if not found.", "source": "codesearchnet"}
{"code": "def wp_decode(self, sequences):\n    decode_strs = [seq.replace(' ', '') for seq in self.wp_tokenizer.batch_decode(sequences)]\n    return decode_strs", "docstring": "Convert a list of lists of word piece token ids into a list of strings by calling word piece tokenizer.\n\nArgs:\nsequences (`torch.Tensor`):\nList of tokenized input ids.\nReturns:\n`List[str]`: The list of wp decoded sentences.", "source": "github-repos"}
{"code": "def _get_function(self, name):\n    return self._functions.get(compat.as_str(name), None)", "docstring": "Returns the function definition for 'name'.\n\nArgs:\nname: string function name.\n\nReturns:\nThe function def proto.", "source": "github-repos"}
{"code": "def playback_trajectory(env, ep_dir):\n    xml_path = os.path.join(ep_dir, 'model.xml')\n    with open(xml_path, 'r') as f:\n        env.reset_from_xml_string(f.read())\n    state_paths = os.path.join(ep_dir, 'state_*.npz')\n    t = 0\n    for state_file in sorted(glob(state_paths)):\n        print(state_file)\n        dic = np.load(state_file)\n        states = dic['states']\n        for state in states:\n            env.sim.set_state_from_flattened(state)\n            env.sim.forward()\n            env.render()\n            t += 1\n            if ((t % 100) == 0):\n                print(t)", "docstring": "Playback data from an episode.\n\nArgs:\nep_dir: The path to the directory containing data for an episode.", "source": "codesearchnet"}
{"code": "def update_tag(self, tag_name, description=None,\n                   custom_properties=None, **kwargs):\n        \n        data = {'description': description or '',\n                'customProperties': custom_properties or {}}\n        resp = self._put(self._u(self._TAG_ENDPOINT_SUFFIX, tag_name),\n                         data=data, **kwargs)\n        resp.raise_for_status()\n        return resp.json()", "docstring": "update a tag by name\n\nArgs:\ntag_name (string): name of tag to update\ndescription (optional[string]): a description\ncustom_properties (optional[dict]): dictionary of custom properties", "source": "juraj-google-style"}
{"code": "def _convert_id_to_token(self, artists_index, genres_index, lyric_index):\n    artist = self.artists_decoder.get(artists_index)\n    genres = [self.genres_decoder.get(genre) for genre in genres_index]\n    lyrics = [self.lyrics_decoder.get(character) for character in lyric_index]\n    return (artist, genres, lyrics)", "docstring": "Converts an index (integer) in a token (str) using the vocab.\n\nArgs:\nartists_index (`int`):\nIndex of the artist in its corresponding dictionary.\ngenres_index (`Union[List[int], int]`):\nIndex of the genre in its corresponding dictionary.\nlyric_index (`List[int]`):\nList of character indices, which each correspond to a character.", "source": "github-repos"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    if token_ids_1 is not None:\n        return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]\n    return [1] + [0] * len(token_ids_0) + [1]", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def is_link(path):\n    if (sys.getwindowsversion().major < 6):\n        raise SaltInvocationError('Symlinks are only supported on Windows Vista or later.')\n    try:\n        return salt.utils.path.islink(path)\n    except Exception as exc:\n        raise CommandExecutionError(exc)", "docstring": "Check if the path is a symlink\n\nThis is only supported on Windows Vista or later.\n\nInline with Unix behavior, this function will raise an error if the path\nis not a symlink, however, the error raised will be a SaltInvocationError,\nnot an OSError.\n\nArgs:\npath (str): The path to a file or directory\n\nReturns:\nbool: True if path is a symlink, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' file.is_link /path/to/link", "source": "codesearchnet"}
{"code": "def to_dict(self, fields=None):\n        \n        data = {}\n\n        def _add(field):\n            return (fields is None or field in fields)\n\n        if _add(\"resolved_packages\"):\n            resolved_packages = []\n            for pkg in (self._resolved_packages or []):\n                resolved_packages.append(pkg.handle.to_dict())\n            data[\"resolved_packages\"] = resolved_packages\n\n        if _add(\"serialize_version\"):\n            data[\"serialize_version\"] = \\\n                '.'.join(map(str, ResolvedContext.serialize_version))\n\n        if _add(\"patch_locks\"):\n            data[\"patch_locks\"] = dict((k, v.name) for k, v in self.patch_locks)\n\n        if _add(\"package_orderers\"):\n            package_orderers = [package_order.to_pod(x)\n                                for x in (self.package_orderers or [])]\n            data[\"package_orderers\"] = package_orderers or None\n\n        if _add(\"package_filter\"):\n            data[\"package_filter\"] = self.package_filter.to_pod()\n\n        if _add(\"graph\"):\n            if self.graph_string and self.graph_string.startswith('{'):\n                graph_str = self.graph_string  \n            else:\n                g = self.graph()\n                graph_str = write_compacted(g)\n\n            data[\"graph\"] = graph_str\n\n        data.update(dict(\n            timestamp=self.timestamp,\n            requested_timestamp=self.requested_timestamp,\n            building=self.building,\n            caching=self.caching,\n            implicit_packages=map(str, self.implicit_packages),\n            package_requests=map(str, self._package_requests),\n            package_paths=self.package_paths,\n\n            default_patch_lock=self.default_patch_lock.name,\n\n            rez_version=self.rez_version,\n            rez_path=self.rez_path,\n            user=self.user,\n            host=self.host,\n            platform=self.platform,\n            arch=self.arch,\n            os=self.os,\n            created=self.created,\n\n            parent_suite_path=self.parent_suite_path,\n            suite_context_name=self.suite_context_name,\n\n            status=self.status_.name,\n            failure_description=self.failure_description,\n\n            from_cache=self.from_cache,\n            solve_time=self.solve_time,\n            load_time=self.load_time,\n            num_loaded_packages=self.num_loaded_packages\n        ))\n\n        if fields:\n            data = dict((k, v) for k, v in data.iteritems() if k in fields)\n\n        return data", "docstring": "Convert context to dict containing only builtin types.\n\nArgs:\nfields (list of str): If present, only write these fields into the\ndict. This can be used to avoid constructing expensive fields\n(such as 'graph') for some cases.\n\nReturns:\ndict: Dictified context.", "source": "juraj-google-style"}
{"code": "def labels(self, main_type, sub_type, unique_id, owner=None, filters=None, params=None):\n        \n        params = params or {}\n\n        if owner:\n            params['owner'] = owner\n        if filters and filters.filters:\n            params['filters'] = filters.filters_string\n        if not sub_type:\n            url = '/v2/{}/{}/securityLabels'.format(main_type, unique_id)\n        else:\n            url = '/v2/{}/{}/{}/securityLabels'.format(main_type, sub_type, unique_id)\n\n        for l in self._iterate(url, params, 'securityLabel'):\n            yield l", "docstring": "Args:\nmain_type:\nsub_type:\nunique_id:\nowner:\nfilters:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def graph_def(self):\n    return self._graph.as_graph_def(add_shapes=self._add_shapes)", "docstring": "A serializable version of the underlying TensorFlow graph.\n\nReturns:\nA graph_pb2.GraphDef proto containing nodes for all of the Operations in\nthe underlying TensorFlow graph.", "source": "github-repos"}
{"code": "def transformer_prepare_encoder(inputs, target_space, hparams, features=None):\n  \n  ishape_static = inputs.shape.as_list()\n  encoder_input = inputs\n  if features and \"inputs_segmentation\" in features:\n    \n    inputs_segmentation = features[\"inputs_segmentation\"]\n    inputs_position = features[\"inputs_position\"]\n    targets_segmentation = features[\"targets_segmentation\"]\n    if (hasattr(hparams, \"unidirectional_encoder\") and\n        hparams.unidirectional_encoder):\n      tf.logging.info(\"Using unidirectional encoder\")\n      encoder_self_attention_bias = (\n          common_attention.attention_bias_lower_triangle(\n              common_layers.shape_list(inputs)[1]))\n    else:\n      encoder_self_attention_bias = (\n          common_attention.attention_bias_same_segment(\n              inputs_segmentation, inputs_segmentation))\n    encoder_decoder_attention_bias = (\n        common_attention.attention_bias_same_segment(targets_segmentation,\n                                                     inputs_segmentation))\n  else:\n    encoder_padding = common_attention.embedding_to_padding(encoder_input)\n    ignore_padding = common_attention.attention_bias_ignore_padding(\n        encoder_padding)\n    if (hasattr(hparams, \"unidirectional_encoder\") and\n        hparams.unidirectional_encoder):\n      tf.logging.info(\"Using unidirectional encoder\")\n      encoder_self_attention_bias = (\n          common_attention.attention_bias_lower_triangle(\n              common_layers.shape_list(inputs)[1]))\n    else:\n      \n      encoder_self_attention_bias = ignore_padding\n    encoder_decoder_attention_bias = ignore_padding\n    inputs_position = None\n  if hparams.proximity_bias:\n    encoder_self_attention_bias += common_attention.attention_bias_proximal(\n        common_layers.shape_list(inputs)[1])\n  if target_space is not None and hparams.get(\"use_target_space_embedding\",\n                                              True):\n    \n    emb_target_space = common_layers.embedding(\n        target_space,\n        32,\n        ishape_static[-1],\n        name=\"target_space_embedding\",\n        dtype=hparams.get(\"activation_dtype\", \"float32\"))\n    emb_target_space = tf.reshape(emb_target_space, [1, 1, -1])\n    encoder_input += emb_target_space\n  if hparams.pos == \"timing\":\n    if inputs_position is not None:\n      encoder_input = common_attention.add_timing_signal_1d_given_position(\n          encoder_input, inputs_position)\n    else:\n      encoder_input = common_attention.add_timing_signal_1d(encoder_input)\n  elif hparams.pos == \"emb\":\n    encoder_input = common_attention.add_positional_embedding(\n        encoder_input, hparams.max_length, \"inputs_positional_embedding\",\n        inputs_position)\n\n  encoder_self_attention_bias = common_layers.cast_like(\n      encoder_self_attention_bias, encoder_input)\n  encoder_decoder_attention_bias = common_layers.cast_like(\n      encoder_decoder_attention_bias, encoder_input)\n  return (encoder_input, encoder_self_attention_bias,\n          encoder_decoder_attention_bias)", "docstring": "Prepare one shard of the model for the encoder.\n\nArgs:\ninputs: a Tensor.\ntarget_space: a Tensor.\nhparams: run hyperparameters\nfeatures: optionally pass the entire features dictionary as well.\nThis is needed now for \"packed\" datasets.\n\nReturns:\nencoder_input: a Tensor, bottom of encoder stack\nencoder_self_attention_bias: a bias tensor for use in encoder self-attention\nencoder_decoder_attention_bias: a bias tensor for use in encoder-decoder\nattention", "source": "juraj-google-style"}
{"code": "def _get_index_points(self, index_points=None):\n    \n    if self._index_points is None and index_points is None:\n      raise ValueError(\n          'This GaussianProcess instance was not instantiated with a value for '\n          'index_points. One must therefore be provided when calling sample, '\n          'log_prob, and other such methods. In particular, one can\\'t compute '\n          'KL divergences to/from an instance of `GaussianProccess` with '\n          'unspecified `index_points` directly. Instead, use the '\n          '`get_marginal_distribution` function, which takes `index_points` as '\n          'an argument and returns a `Normal` or '\n          '`MultivariateNormalLinearOperator` instance, whose KL can be '\n          'computed.')\n    return index_points if index_points is not None else self._index_points", "docstring": "Return `index_points` if not None, else `self._index_points`.\n\nArgs:\nindex_points: if given, this is what is returned; else,\n`self._index_points`\n\nReturns:\nindex_points: the given arg, if not None, else the class member\n`self._index_points`.\n\nRases:\nValueError: if `index_points` and `self._index_points` are both `None`.", "source": "juraj-google-style"}
{"code": "def transformer_encoder_ffn_unit(x, hparams, nonpadding_mask=None, pad_remover=None):\n    with tf.variable_scope('ffn'):\n        if (hparams.transformer_ffn_type == 'fc'):\n            y = transformer.transformer_ffn_layer(common_layers.layer_preprocess(x, hparams), hparams, pad_remover, conv_padding='SAME', nonpadding_mask=nonpadding_mask)\n        if (hparams.transformer_ffn_type == 'sepconv'):\n            assert (nonpadding_mask is not None), 'The nonpadding_mask should be provided, otherwise the model uses the leaked padding information to estimate the length!'\n            y = common_layers.sepconv_relu_sepconv(common_layers.layer_preprocess(x, hparams), filter_size=hparams.filter_size, output_size=hparams.hidden_size, first_kernel_size=(3, 1), second_kernel_size=(5, 1), padding='SAME', nonpadding_mask=nonpadding_mask, dropout=hparams.relu_dropout)\n        x = common_layers.layer_postprocess(x, y, hparams)\n    return x", "docstring": "Applies a feed-forward function which is parametrised for encoding.\n\nArgs:\nx: input\nhparams: model hyper-parameters\nnonpadding_mask: optional Tensor with shape [batch_size, encoder_length]\nindicating what positions are not padding.  This is used\nto mask out padding in convoltutional layers.  We generally only\nneed this mask for \"packed\" datasets, because for ordinary datasets,\nno padding is ever followed by nonpadding.\npad_remover: to mask out padding in convolutional layers (efficiency).\n\nReturns:\nthe output tensor", "source": "codesearchnet"}
{"code": "def _parse_price(html_chunk):\n    price = get_first_content(html_chunk.find('div', {'class': 'prices'}))\n    if (not price):\n        return None\n    price = dhtmlparser.removeTags(price)\n    price = price.split('\\n')[(- 1)]\n    return price", "docstring": "Parse price of the book.\n\nArgs:\nhtml_chunk (obj): HTMLElement containing slice of the page with details.\n\nReturns:\nstr/None: Price as string with currency or None if not found.", "source": "codesearchnet"}
{"code": "def __init__(self, config, auth: str) -> None:\n    self.config = config\n    self.auth = auth\n    self.columns = SA_FIELDS\n    self.reportId = None", "docstring": "Construct a report factory, providing project and authentication data.\n\nThis class will track the reportID internally if the request call is used.\n\nArgs:\nconfig, required - see: starthinker/util/configuration.py\nauth, required - either \"user\" or \"service\" used to create and/or read the report.\n\nReturns: None", "source": "github-repos"}
{"code": "def from_config(cls, config, custom_objects=None, columns_by_name=None):\n    return cls._from_config(config, custom_objects, columns_by_name)", "docstring": "Creates a FeatureColumn from its config.\n\nThis method should be the reverse of `get_config`, capable of instantiating\nthe same FeatureColumn from the config dictionary. See `get_config` for an\nexample of common (de)serialization practices followed in this file.\n\nTODO(b/118939620): This is a private method until consensus is reached on\nsupporting object deserialization deduping within Keras.\n\nArgs:\nconfig: A Dict config acquired with `get_config`.\ncustom_objects: Optional dictionary mapping names (strings) to custom\nclasses or functions to be considered during deserialization.\ncolumns_by_name: A Dict[String, FeatureColumn] of existing columns in\norder to avoid duplication. Should be passed to any calls to\ndeserialize_feature_column().\n\nReturns:\nA FeatureColumn for the input config.", "source": "github-repos"}
{"code": "def sample_uniform(domain, rng):\n    if isinstance(domain, hp.IntInterval):\n        return rng.randint(domain.min_value, domain.max_value)\n    elif isinstance(domain, hp.RealInterval):\n        return rng.uniform(domain.min_value, domain.max_value)\n    elif isinstance(domain, hp.Discrete):\n        return rng.choice(domain.values)\n    else:\n        raise TypeError(('unknown domain type: %r' % (domain,)))", "docstring": "Sample a value uniformly from a domain.\n\nArgs:\ndomain: An `IntInterval`, `RealInterval`, or `Discrete` domain.\nrng: A `random.Random` object; defaults to the `random` module.\n\nRaises:\nTypeError: If `domain` is not a known kind of domain.\nIndexError: If the domain is empty.", "source": "codesearchnet"}
{"code": "def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, List[Tuple]]=None, top_k: int=100):\n    out_logits, out_bbox = (outputs.logits, outputs.pred_boxes)\n    if target_sizes is not None:\n        if len(out_logits) != len(target_sizes):\n            raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n    prob = out_logits.sigmoid()\n    prob = prob.view(out_logits.shape[0], -1)\n    k_value = min(top_k, prob.size(1))\n    topk_values, topk_indexes = torch.topk(prob, k_value, dim=1)\n    scores = topk_values\n    topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor')\n    labels = topk_indexes % out_logits.shape[2]\n    boxes = center_to_corners_format(out_bbox)\n    boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))\n    if target_sizes is not None:\n        if isinstance(target_sizes, List):\n            img_h = torch.Tensor([i[0] for i in target_sizes])\n            img_w = torch.Tensor([i[1] for i in target_sizes])\n        else:\n            img_h, img_w = target_sizes.unbind(1)\n        scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)\n        boxes = boxes * scale_fct[:, None, :]\n    results = []\n    for s, l, b in zip(scores, labels, boxes):\n        score = s[s > threshold]\n        label = l[s > threshold]\n        box = b[s > threshold]\n        results.append({'scores': score, 'labels': label, 'boxes': box})\n    return results", "docstring": "Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x,\ntop_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.\n\nArgs:\noutputs ([`DetrObjectDetectionOutput`]):\nRaw outputs of the model.\nthreshold (`float`, *optional*):\nScore threshold to keep object detection predictions.\ntarget_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):\nTensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size\n(height, width) of each image in the batch. If left to None, predictions will not be resized.\ntop_k (`int`, *optional*, defaults to 100):\nKeep only top k bounding boxes before filtering by thresholding.\n\nReturns:\n`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image\nin the batch as predicted by the model.", "source": "github-repos"}
{"code": "def notify_progress(self, conn_string, operation, finished, total, wait=True):\n    if (operation not in self.PROGRESS_OPERATIONS):\n        raise ArgumentError('Invalid operation for progress event: {}'.format(operation))\n    event = dict(operation=operation, finished=finished, total=total)\n    if wait:\n        return self.notify_event(conn_string, 'progress', event)\n    self.notify_event_nowait(conn_string, 'progress', event)\n    return None", "docstring": "Send a progress event.\n\nProgress events can be sent for ``debug`` and ``script`` operations and\nnotify the caller about the progress of these potentially long-running\noperations.  They have two integer properties that specify what fraction\nof the operation has been completed.\n\nArgs:\nconn_string (str): The device that is sending the event.\noperations (str): The operation that is in progress: debug or script\nfinished (int): The number of \"steps\" that have finished.\ntotal (int): The total number of steps to perform.\nwait (bool): Whether to return an awaitable that we can use to\nblock until the notification has made it to all callbacks.\n\nReturns:\nawaitable or None: An awaitable if wait=True.\n\nIf wait is False, the notification is run in the background with\nno way to check its progress and None is returned.", "source": "codesearchnet"}
{"code": "def condition_indices(df):\n    \n    eigvals = eigenvalues(df)\n    cond_idx = np.sqrt(eigvals.max() / eigvals)\n    return pd.Series(cond_idx, df.columns, name='Condition index')", "docstring": "Returns a pandas Series with condition indices of the df columns.\n\nArgs:\ndf: pandas DataFrame with columns to run diagnostics on", "source": "juraj-google-style"}
{"code": "def get_user(self, user_id=None, user_name=None):\n    if user_id:\n        endpoint = '/api/user_id/{0}'.format(user_id)\n    elif user_name:\n        endpoint = '/api/user_name/{0}'.format(user_name)\n    else:\n        endpoint = '/api/user'\n    data = self._make_request(verb='GET', endpoint=endpoint)\n    try:\n        return User.NewFromJSON(data)\n    except:\n        return data", "docstring": "Get a user object from the API. If no ``user_id`` or ``user_name``\nis specified, it will return the User object for the currently\nauthenticated user.\n\nArgs:\nuser_id (int): User ID of the user for whom you want to get\ninformation. [Optional]\nuser_name(str): Username for the user for whom you want to get\ninformation. [Optional]\n\nReturns:\nA User object.", "source": "codesearchnet"}
{"code": "def to_json(self, variables=None):\n        \n\n        variables_to_resolve = []\n        if variables:\n            for key, value in variables.items():\n                variables_to_resolve.append(Variable(key, value))\n        for k in self.get_parameter_definitions():\n            if not variables or k not in variables:\n                \n                \n                \n                variables_to_resolve.append(Variable(k, 'unused_value'))\n        self.resolve_variables(variables_to_resolve)\n\n        return self.render_template()[1]", "docstring": "Render the blueprint and return the template in json form.\n\nArgs:\nvariables (dict):\nOptional dictionary providing/overriding variable values.\n\nReturns:\nstr: the rendered CFN JSON template", "source": "juraj-google-style"}
{"code": "def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, **kwargs):\n    beginning_of_answer = self.tokenizer.convert_tokens_to_ids(BEGINNING_OF_ANSWER_STRING)\n    unpadded_output_sequences = [seq[(seq == beginning_of_answer).nonzero(as_tuple=True)[0] + 1:] for seq in generated_outputs]\n    max_len = max((len(seq) for seq in unpadded_output_sequences))\n    padded_output_sequences = torch.full((len(unpadded_output_sequences), max_len), self.pad_token_id)\n    for i, seq in enumerate(unpadded_output_sequences):\n        padded_output_sequences[i, :len(seq)] = torch.tensor(seq)\n    return self.batch_decode(padded_output_sequences, skip_special_tokens=skip_special_tokens, **kwargs)", "docstring": "Post-processes the output of `FuyuForConditionalGeneration` to only return the text output.\n\nArgs:\ngenerated_outputs (`torch.Tensor` or `np.ndarray`):\nThe output of the model. The output is expected to be a tensor of shape `(batch_size, sequence_length)`\ncontaining the token ids of the generated sequences.\nskip_special_tokens (`bool`, *optional*, defaults to `True`):\nWhether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.\n**kwargs:\nAdditional arguments to be passed to the tokenizer's `batch_decode method`.\n\nReturns:\n`List[str]`: The decoded text output.", "source": "github-repos"}
{"code": "def strip_number(self):\n    if (self.type != EventType.TABLET_PAD_STRIP):\n        raise AttributeError(_wrong_prop.format(self.type))\n    return self._libinput.libinput_event_tablet_pad_get_strip_number(self._handle)", "docstring": "The number of the strip that has changed state,\nwith 0 being the first strip.\n\nOn tablets with only one strip, this method always returns 0.\n\nFor events not of type\n:attr:`~libinput.constant.EventType.TABLET_PAD_STRIP`, this property\nraises :exc:`AttributeError`.\n\nReturns:\nint: The index of the strip that changed state.\nRaises:\nAttributeError", "source": "codesearchnet"}
{"code": "def get_dict(self, only_attributes=None, exclude_attributes=None, df_format=False):\n    to_exclude = ['coach_bsites', 'coach_ec', 'coach_go_mf', 'coach_go_bp', 'coach_go_cc']\n    if (not exclude_attributes):\n        excluder = to_exclude\n    else:\n        excluder = ssbio.utils.force_list(exclude_attributes)\n        excluder.extend(to_exclude)\n    summary_dict = StructProp.get_dict(self, only_attributes=only_attributes, exclude_attributes=excluder, df_format=df_format)\n    if self.coach_bsites:\n        tmp = {('top_bsite_' + k): v for (k, v) in self.coach_bsites[0].items()}\n        summary_dict.update(tmp)\n    if self.coach_ec:\n        tmp = {('top_ec_' + k): v for (k, v) in self.coach_ec[0].items()}\n        summary_dict.update(tmp)\n    if self.coach_go_mf:\n        tmp = {('top_go_mf_' + k): v for (k, v) in self.coach_go_mf[0].items()}\n        summary_dict.update(tmp)\n    if self.coach_go_bp:\n        tmp = {('top_go_bp_' + k): v for (k, v) in self.coach_go_bp[0].items()}\n        summary_dict.update(tmp)\n    if self.coach_go_cc:\n        tmp = {('top_go_cc_' + k): v for (k, v) in self.coach_go_cc[0].items()}\n        summary_dict.update(tmp)\n    return summary_dict", "docstring": "Summarize the I-TASSER run in a dictionary containing modeling results and top predictions from COACH\n\nArgs:\nonly_attributes (str, list): Attributes that should be returned. If not provided, all are returned.\nexclude_attributes (str, list): Attributes that should be excluded.\ndf_format (bool): If dictionary values should be formatted for a dataframe\n(everything possible is transformed into strings, int, or float -\nif something can't be transformed it is excluded)\n\nReturns:\ndict: Dictionary of attributes", "source": "codesearchnet"}
{"code": "def on_test_end(self, logs=None):", "docstring": "Called at the end of evaluation or validation.\n\nSubclasses should override for any actions to run.\n\nArgs:\nlogs: Dict. Currently the output of the last call to\n`on_test_batch_end()` is passed to this argument for this method\nbut that may change in the future.", "source": "github-repos"}
{"code": "def _matmul_3d_with_map_fn(a, b, **kwargs):\n    if isinstance(b, ragged_tensor.RaggedTensor) and (b.ragged_rank == 2 or kwargs.get('transpose_b') or kwargs.get('adjoint_b')):\n        output_ragged_rank = 2\n    else:\n        output_ragged_rank = 1\n\n    def single_batch_matmul(x):\n        out = _matmul_2d(x[0], x[1], **kwargs)\n        if output_ragged_rank == 2:\n            out = ragged_tensor.RaggedTensor.from_tensor(out)\n        return out\n    fn_out_shape = None\n    row_splits_dtype = a.row_splits.dtype if isinstance(a, ragged_tensor.RaggedTensor) else b.row_splits.dtype\n    output_type = kwargs['output_type']\n    if output_type is None:\n        output_type = a.dtype\n    spec = ragged_tensor.RaggedTensorSpec(shape=fn_out_shape, dtype=output_type, ragged_rank=output_ragged_rank - 1, row_splits_dtype=row_splits_dtype)\n    result = map_fn.map_fn(single_batch_matmul, elems=(a, b), fn_output_signature=spec)\n    if kwargs.get('transpose_a') or kwargs.get('adjoint_a'):\n        result._set_shape(a.shape[:-2] + a.shape[-1:] + [None])\n    else:\n        result._set_shape(a.shape[:-2] + a.shape[-2:-1] + [None])\n    if kwargs.get('transpose_b') or kwargs.get('adjoint_b'):\n        result._set_shape(b.shape[:-2] + [None] + b.shape[-2:-1])\n    else:\n        result._set_shape(b.shape[:-2] + [None] + b.shape[-1:])\n    return result", "docstring": "Multiplies batches of 2D matrices using map_fn.\n\n`output[n, i, k]` = sum_j (a[n, i, j] * b[n, j, k])` (for all `n`, `i`, `k`).\n\nRequires that `a[n, i].nrows()` == `b[n].nrows()` (for all `n` and `i`).\n\nArgs:\na: A 3D Tensor or RaggedTensor with `shape=[B, I, J]`, where dimensions `I`\nand `J` may be ragged.\nb: A 3D Tensor or RaggedTensor with `shape=[B, J, K]`, where dimensions `J`\nand `K` may be ragged.\n**kwargs: Additional arguments for `tf.matmul` (e.g. transpose_a).\n\nReturns:\nA 3D RaggedTensor with `shape=[B, (I), (K)]`.", "source": "github-repos"}
{"code": "def _sim_timestamps(self, max_rate, bg_rate, emission, i_start, rs, ip_start=0, scale=10, sort=True):\n    counts_chunk = sim_timetrace_bg(emission, max_rate, bg_rate, self.t_step, rs=rs)\n    nrows = emission.shape[0]\n    if (bg_rate is not None):\n        nrows += 1\n    assert (counts_chunk.shape == (nrows, emission.shape[1]))\n    max_counts = counts_chunk.max()\n    if (max_counts == 0):\n        return (np.array([], dtype=np.int64), np.array([], dtype=np.int64))\n    time_start = (i_start * scale)\n    time_stop = (time_start + (counts_chunk.shape[1] * scale))\n    ts_range = np.arange(time_start, time_stop, scale, dtype='int64')\n    times_chunk_p = []\n    par_index_chunk_p = []\n    for (ip, counts_chunk_ip) in enumerate(counts_chunk):\n        times_c_ip = []\n        for v in range(1, (max_counts + 1)):\n            times_c_ip.append(ts_range[(counts_chunk_ip >= v)])\n        t = np.hstack(times_c_ip)\n        times_chunk_p.append(t)\n        par_index_chunk_p.append(np.full(t.size, (ip + ip_start), dtype='u1'))\n    times_chunk = np.hstack(times_chunk_p)\n    par_index_chunk = np.hstack(par_index_chunk_p)\n    if sort:\n        index_sort = times_chunk.argsort(kind='mergesort')\n        times_chunk = times_chunk[index_sort]\n        par_index_chunk = par_index_chunk[index_sort]\n    return (times_chunk, par_index_chunk)", "docstring": "Simulate timestamps from emission trajectories.\n\nUses attributes: `.t_step`.\n\nReturns:\nA tuple of two arrays: timestamps and particles.", "source": "codesearchnet"}
{"code": "async def get_person(self, id_):\n        \n        data = await self._get_person_json(\n            id_,\n            OrderedDict(append_to_response='movie_credits')\n        )\n        return Person.from_json(data, self.config['data'].get('images'))", "docstring": "Retrieve person data by ID.\n\nArguments:\nid_ (:py:class:`int`): The person's TMDb ID.\n\nReturns:\n:py:class:`~.Person`: The requested person.", "source": "juraj-google-style"}
{"code": "def _add_deprecation_notice_to_docstring(docstring, message):\n    if docstring:\n        return f'{docstring}\\n\\n.. deprecated:: {message}'\n    else:\n        return f'.. deprecated:: {message}'", "docstring": "Adds a deprecation notice to a docstring.\n\nArgs:\ndocstring: The original docstring (can be None or empty).\nmessage: The deprecation message to add.\n\nReturns:\nThe modified docstring.", "source": "github-repos"}
{"code": "def convert_code(in_file, out_file, in_alg='taudem', out_alg='arcgis', datatype=None):\n        \n        FileClass.check_file_exists(in_file)\n        in_alg = in_alg.lower()\n        out_alg = out_alg.lower()\n        if in_alg not in FlowModelConst.d8_dirs or out_alg not in FlowModelConst.d8_dirs:\n            raise RuntimeError('The input algorithm name should one of %s' %\n                               ', '.join(list(FlowModelConst.d8_dirs.keys())))\n        convert_dict = dict()\n        in_code = FlowModelConst.d8_dirs.get(in_alg)\n        out_code = FlowModelConst.d8_dirs.get(out_alg)\n        assert len(in_code) == len(out_code)\n        for i, tmp_in_code in enumerate(in_code):\n            convert_dict[tmp_in_code] = out_code[i]\n        if datatype is not None and datatype in GDALDataType:\n            RasterUtilClass.raster_reclassify(in_file, convert_dict, out_file, datatype)\n        else:\n            RasterUtilClass.raster_reclassify(in_file, convert_dict, out_file)", "docstring": "convert D8 flow direction code from one algorithm to another.\nArgs:\nin_file: input raster file path\nout_file: output raster file path\nin_alg: available algorithms are in FlowModelConst.d8_dirs. \"taudem\" is the default\nout_alg: same as in_alg. \"arcgis\" is the default\ndatatype: default is None and use the datatype of the in_file", "source": "juraj-google-style"}
{"code": "def parameterized_send(self, request, parameter_list):\n    response_queues = OrderedDict()\n    for parameter in parameter_list:\n        response_queues[parameter] = self.send((request % parameter))\n    return response_queues", "docstring": "Send batched requests for a list of parameters\n\nArgs:\nrequest (str): Request to send, like \"%s.*?\\n\"\nparameter_list (list): parameters to format with, like\n[\"TTLIN\", \"TTLOUT\"]\n\nReturns:\ndict: {parameter: response_queue}", "source": "codesearchnet"}
{"code": "def ParseFromHumanReadable(self, string):\n    if (not string):\n        return None\n    match = self.REGEX.match(string.strip().lower())\n    if (not match):\n        raise DecodeError(('Unknown specification for ByteSize %s' % string))\n    multiplier = self.DIVIDERS.get(match.group(2))\n    if (not multiplier):\n        raise DecodeError(('Invalid multiplier %s' % match.group(2)))\n    value = match.group(1)\n    if ('.' in value):\n        value = float(value)\n    else:\n        value = int(value)\n    self._value = int((value * multiplier))", "docstring": "Parse a human readable string of a byte string.\n\nArgs:\nstring: The string to parse.\n\nRaises:\nDecodeError: If the string can not be parsed.", "source": "codesearchnet"}
{"code": "def load_from_file(self, yamlfile, _override=True, _allow_undeclared=False):\n    \n    self._logger.info('Loading configuration from file: %s', yamlfile)\n\n    try:\n      parsed_yaml = self._modules['yaml'].safe_load(yamlfile.read())\n    except self._modules['yaml'].YAMLError:\n      self._logger.exception('Problem parsing YAML')\n      raise self.ConfigurationInvalidError(\n          'Failed to load from %s as YAML' % yamlfile)\n\n    if not isinstance(parsed_yaml, dict):\n      \n      raise self.ConfigurationInvalidError(\n          'YAML parsed, but wrong type, should be dict', parsed_yaml)\n\n    self._logger.debug('Configuration loaded from file: %s', parsed_yaml)\n    self.load_from_dict(\n        parsed_yaml, _override=_override, _allow_undeclared=_allow_undeclared)", "docstring": "Loads the configuration from a file.\n\nParsed contents must be a single dict mapping config key to value.\n\nArgs:\nyamlfile: The opened file object to load configuration from.\nSee load_from_dict() for other args' descriptions.\n\nRaises:\nConfigurationInvalidError: If configuration file can't be read, or can't\nbe parsed as either YAML (or JSON, which is a subset of YAML).", "source": "juraj-google-style"}
{"code": "def merge_translations(localization_bundle_path):\n    logging.info('Merging translations')\n    for lang_dir in os.listdir(localization_bundle_path):\n        if (lang_dir == DEFAULT_LANGUAGE_DIRECTORY_NAME):\n            continue\n        for translated_path in glob.glob(os.path.join(localization_bundle_path, lang_dir, ('*' + TRANSLATED_SUFFIX))):\n            strings_path = translated_path[:((- 1) * len(TRANSLATED_SUFFIX))]\n            localizable_path = os.path.join(localization_bundle_path, DEFAULT_LANGUAGE_DIRECTORY_NAME, os.path.basename(strings_path))\n            localization_merge_back(localizable_path, strings_path, translated_path, strings_path)", "docstring": "Merges the new translation with the old one.\n\nThe translated files are saved as '.translated' file, and are merged with old translated file.\n\nArgs:\nlocalization_bundle_path (str): The path to the localization bundle.", "source": "codesearchnet"}
{"code": "def get_build_tool_version(self):\n    with open(('%s/%s/build.gradle' % (self.path, self.src_folder))) as f:\n        for line in f.readlines():\n            if ('buildToolsVersion' in line):\n                matches = re.findall('buildToolsVersion \\\\\"(.+?)\\\\\"', line)\n                if (len(matches) == 1):\n                    return matches[0]\n    return config.build_tool_version", "docstring": "Gets the build tool version to be used by zipalign from build.gradle file.\n\nReturns:\nA string containing the build tool version, default is 23.0.2.", "source": "codesearchnet"}
{"code": "def oem(self):\n        \n        buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n        res = self._dll.JLINKARM_GetOEMString(ctypes.byref(buf))\n        if res != 0:\n            raise errors.JLinkException('Failed to grab OEM string.')\n\n        oem = ctypes.string_at(buf).decode()\n        if len(oem) == 0:\n            \n            \n            return None\n\n        return oem", "docstring": "Retrieves and returns the OEM string of the connected J-Link.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nThe string of the OEM.  If this is an original SEGGER product, then\n``None`` is returned instead.\n\nRaises:\nJLinkException: on hardware error.", "source": "juraj-google-style"}
{"code": "def grid_destroy_from_name(job_name):\n    \n    jobs = grid_reload_from_name(job_name)\n    for job in jobs:\n        job.delete()\n        logger.info(\"Killing the job (%s, %s)\" % (job.site, job.uid))", "docstring": "Destroy all the jobs with a given name.\n\nArgs:\njob_name (str): the job name", "source": "juraj-google-style"}
{"code": "def _get_endpoint(self, sub_domain):\n        \n        storage_parameters = self._storage_parameters or dict()\n        account_name = storage_parameters.get('account_name')\n\n        if not account_name:\n            raise ValueError('\"account_name\" is required for Azure storage')\n\n        suffix = storage_parameters.get(\n            'endpoint_suffix', 'core.windows.net')\n\n        self._endpoint = 'http%s:\n            '' if self._unsecure else 's', account_name, sub_domain, suffix)\n\n        return account_name, suffix.replace('.', r'\\.')", "docstring": "Get endpoint information from storage parameters.\n\nUpdate system with endpoint information and return information required\nto define roots.\n\nArgs:\nself (pycosio._core.io_system.SystemBase subclass): System.\nsub_domain (str): Azure storage sub-domain.\n\nReturns:\ntuple of str: account_name, endpoint_suffix", "source": "juraj-google-style"}
{"code": "def query_api_version(self):\n    version_resp = self._session.get('/api/version', logon_required=False)\n    self._api_version = version_resp\n    return self._api_version", "docstring": "The Query API Version operation returns information about\nthe level of Web Services API supported by the HMC.\n\nThis operation does not require authentication.\n\nReturns:\n\n:term:`json object`:\nA JSON object with members ``api-major-version``,\n``api-minor-version``, ``hmc-version`` and ``hmc-name``.\nFor details about these properties, see section\n'Response body contents' in section 'Query API Version' in the\n:term:`HMC API` book.\n\nRaises:\n\n:exc:`~zhmcclient.HTTPError`\n:exc:`~zhmcclient.ParseError`\n:exc:`~zhmcclient.ConnectionError`", "source": "codesearchnet"}
{"code": "def GetFileSystemTypeIndicators(cls, path_spec, resolver_context=None):\n    if ((cls._file_system_remainder_list is None) or (cls._file_system_store is None)):\n        (specification_store, remainder_list) = cls._GetSpecificationStore(definitions.FORMAT_CATEGORY_FILE_SYSTEM)\n        cls._file_system_remainder_list = remainder_list\n        cls._file_system_store = specification_store\n    if (cls._file_system_scanner is None):\n        cls._file_system_scanner = cls._GetSignatureScanner(cls._file_system_store)\n    return cls._GetTypeIndicators(cls._file_system_scanner, cls._file_system_store, cls._file_system_remainder_list, path_spec, resolver_context=resolver_context)", "docstring": "Determines if a file contains a supported file system types.\n\nArgs:\npath_spec (PathSpec): path specification.\nresolver_context (Optional[Context]): resolver context, where None\nrepresents the built-in context which is not multi process safe.\n\nReturns:\nlist[str]: supported format type indicators.", "source": "codesearchnet"}
{"code": "def Dump(obj,\n         sort_keys = False,\n         encoder = None):\n  \n  \n  \n  \n  \n  text = json.dumps(\n      obj,\n      indent=2,\n      sort_keys=sort_keys,\n      ensure_ascii=False,\n      cls=encoder,\n      separators=_SEPARATORS)  \n\n  \n  \n  \n  \n  if compatibility.PY2 and isinstance(text, bytes):\n    text = text.decode(\"utf-8\")  \n\n  return text", "docstring": "Stringifies a Python object into its JSON representation.\n\nArgs:\nobj: A Python object to convert to JSON.\nsort_keys: If True, output dictionaries keys in sorted (ascending) order.\nencoder: An (optional) encoder class to use.\n\nReturns:\nA JSON representation of the given object.", "source": "juraj-google-style"}
{"code": "def _segment_reduce(values, index, segment_reduce_fn, name):\n    flat_index = flatten(index)\n    vector_shape = values.size()[len(index.indices.size()):]\n    flattened_shape = torch.cat([torch.as_tensor([-1], dtype=torch.long), torch.as_tensor(vector_shape, dtype=torch.long)], dim=0)\n    flat_values = values.reshape(flattened_shape.tolist())\n    out = torch.zeros(int(flat_index.num_segments), dtype=torch.float, device=flat_values.device)\n    segment_means = out.scatter_reduce(dim=0, index=flat_index.indices.long(), src=flat_values.float(), reduce=segment_reduce_fn, include_self=False)\n    device = index.num_segments.device\n    new_shape = torch.cat([torch.as_tensor(index.batch_shape(), dtype=torch.long, device=device), torch.as_tensor([index.num_segments], dtype=torch.long, device=device), torch.as_tensor(vector_shape, dtype=torch.long, device=device)], dim=0)\n    output_values = segment_means.clone().view(new_shape.tolist()).to(values.dtype)\n    output_index = range_index_map(index.batch_shape(), index.num_segments)\n    return (output_values, output_index)", "docstring": "Applies a segment reduction segment-wise.\n\nArgs:\nvalues (`torch.Tensor`):\nTensor with segment values.\nindex (`IndexMap`):\nIndexMap.\nsegment_reduce_fn (`str`):\nName for the reduce operation. One of \"sum\", \"mean\", \"max\" or \"min\".\nname (`str`):\nName for the operation. Currently not used\n\nReturns:\n(`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments).", "source": "github-repos"}
{"code": "def __init__(self, index, port = 8081):\n        \n\n        self.index = index\n        self.server = None\n        self.port = port if port else find_free_port()\n        self.settings = index.columns\n        self.docs = index.docs\n        self._create_settings()\n        self.html_path = get_cur_path()+'/data/table/'\n\n        \n        self.cleanup_flag = False", "docstring": "Table Constructor\n\ntodo::make sure this is memory efficient\n\nArgs:\nIndex (Index):  An Index object with a valid .query method\nand a .columns attribute.\n\nReturns:\nA table object\n\nUsage example\n\n>>> Table(ind)", "source": "juraj-google-style"}
{"code": "def get_by_provider_display_name(self, provider_display_name):\n        \n        san_managers = self._client.get_all()\n        result = [x for x in san_managers if x['providerDisplayName'] == provider_display_name]\n        return result[0] if result else None", "docstring": "Gets a SAN Manager by provider display name.\n\nArgs:\nprovider_display_name: Name of the Provider Display Name\n\nReturns:\ndict: SAN Manager.", "source": "juraj-google-style"}
{"code": "def __init__(self, config: JetMoeConfig, layer_idx: Optional[int]=None):\n    super().__init__()\n    self.config = config\n    self.layer_idx = layer_idx\n    self.is_causal = True\n    if layer_idx is None:\n        logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.')\n    self.top_k = config.num_experts_per_tok\n    self.attention_dropout = config.attention_dropout\n    self.kv_projection_size = config.kv_channels * config.num_key_value_heads\n    self.num_key_value_heads = config.num_key_value_heads\n    self.num_heads = config.num_attention_heads\n    self.head_dim = config.kv_channels\n    self.experts = JetMoeMoA(config)\n    self.kv_proj = torch.nn.Linear(config.hidden_size, self.kv_projection_size * 2, bias=False)\n    self.rotary_emb = JetMoeRotaryEmbedding(config)", "docstring": "Initialize the JetMoeAttention module.\n\nArgs:\nconfig:\nConfiguration object with model hyperparameters.\nlayer_idx:\nIndex of the layer in the model.", "source": "github-repos"}
{"code": "def scale_geom_opt_threshold(self, gradient=0.1, displacement=0.1, energy=0.1):\n    if ((gradient < (1.0 / (300 - 1))) or (displacement < (1.0 / (1200 - 1))) or (energy < (1.0 / (100 - 1)))):\n        raise ValueError('The geometry optimization convergence criteria is too tight')\n    self.params['rem']['geom_opt_tol_gradient'] = int((gradient * 300))\n    self.params['rem']['geom_opt_tol_displacement'] = int((displacement * 1200))\n    self.params['rem']['geom_opt_tol_energy'] = int((energy * 100))", "docstring": "Adjust the convergence criteria of geometry optimization.\n\nArgs:\ngradient: the scale factor for gradient criteria. If less than\n1.0, you are tightening the threshold. The base value is\n300 × 10E−6\ndisplacement: the scale factor for atomic displacement. If less\nthen 1.0, you are tightening the threshold. The base value is\n1200 × 10E−6\nenergy: the scale factor for energy change between successive\niterations. If less than 1.0, you are tightening the\nthreshold. The base value is 100 × 10E−8.", "source": "codesearchnet"}
{"code": "def _verify_time_range(payload_dict):\n    now = int(time.time())\n    issued_at = payload_dict.get('iat')\n    if (issued_at is None):\n        raise AppIdentityError('No iat field in token: {0}'.format(payload_dict))\n    expiration = payload_dict.get('exp')\n    if (expiration is None):\n        raise AppIdentityError('No exp field in token: {0}'.format(payload_dict))\n    if (expiration >= (now + MAX_TOKEN_LIFETIME_SECS)):\n        raise AppIdentityError('exp field too far in future: {0}'.format(payload_dict))\n    earliest = (issued_at - CLOCK_SKEW_SECS)\n    if (now < earliest):\n        raise AppIdentityError('Token used too early, {0} < {1}: {2}'.format(now, earliest, payload_dict))\n    latest = (expiration + CLOCK_SKEW_SECS)\n    if (now > latest):\n        raise AppIdentityError('Token used too late, {0} > {1}: {2}'.format(now, latest, payload_dict))", "docstring": "Verifies the issued at and expiration from a JWT payload.\n\nMakes sure the current time (in UTC) falls between the issued at and\nexpiration for the JWT (with some skew allowed for via\n``CLOCK_SKEW_SECS``).\n\nArgs:\npayload_dict: dict, A dictionary containing a JWT payload.\n\nRaises:\nAppIdentityError: If there is no ``'iat'`` field in the payload\ndictionary.\nAppIdentityError: If there is no ``'exp'`` field in the payload\ndictionary.\nAppIdentityError: If the JWT expiration is too far in the future (i.e.\nif the expiration would imply a token lifetime\nlonger than what is allowed.)\nAppIdentityError: If the token appears to have been issued in the\nfuture (up to clock skew).\nAppIdentityError: If the token appears to have expired in the past\n(up to clock skew).", "source": "codesearchnet"}
{"code": "def _AddFileDescriptor(self, file_desc):\n    \n\n    if not isinstance(file_desc, descriptor.FileDescriptor):\n      raise TypeError('Expected instance of descriptor.FileDescriptor.')\n    self._file_descriptors[file_desc.name] = file_desc", "docstring": "Adds a FileDescriptor to the pool, non-recursively.\n\nIf the FileDescriptor contains messages or enums, the caller must explicitly\nregister them.\n\nArgs:\nfile_desc: A FileDescriptor.", "source": "juraj-google-style"}
{"code": "def display(port=None, height=None):\n    _display(port=port, height=height, print_message=True, display_handle=None)", "docstring": "Display a TensorBoard instance already running on this machine.\n\nArgs:\nport: The port on which the TensorBoard server is listening, as an\n`int`, or `None` to automatically select the most recently\nlaunched TensorBoard.\nheight: The height of the frame into which to render the TensorBoard\nUI, as an `int` number of pixels, or `None` to use a default value\n(currently 800).", "source": "codesearchnet"}
{"code": "def get_vulnerability_chains(current_node, sink, def_use, chain=[]):\n    for use in def_use[current_node]:\n        if (use == sink):\n            (yield chain)\n        else:\n            vuln_chain = list(chain)\n            vuln_chain.append(use)\n            (yield from get_vulnerability_chains(use, sink, def_use, vuln_chain))", "docstring": "Traverses the def-use graph to find all paths from source to sink that cause a vulnerability.\n\nArgs:\ncurrent_node()\nsink()\ndef_use(dict):\nchain(list(Node)): A path of nodes between source and sink.", "source": "codesearchnet"}
{"code": "def ParseRecord(self, parser_mediator, key, structure):\n    \n    if key not in ('header', 'header_signature', 'logline'):\n      raise errors.ParseError(\n          'Unable to parse record, unknown structure: {0:s}'.format(key))\n\n    if key == 'logline':\n      self._ParseLogLine(parser_mediator, structure)\n\n    elif key == 'header':\n      self._ParseHeader(parser_mediator, structure)\n\n    elif key == 'header_signature':\n      \n      \n      \n      \n      \n      logger.warning('Unknown locale header.')\n      self._xchat_year = 0", "docstring": "Parses a log record structure and produces events.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nkey (str): identifier of the structure of tokens.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.\n\nRaises:\nParseError: when the structure type is unknown.", "source": "juraj-google-style"}
{"code": "def resize_bytes(fobj, old_size, new_size, offset):\n    \n\n    if new_size < old_size:\n        delete_size = old_size - new_size\n        delete_at = offset + new_size\n        delete_bytes(fobj, delete_size, delete_at)\n    elif new_size > old_size:\n        insert_size = new_size - old_size\n        insert_at = offset + old_size\n        insert_bytes(fobj, insert_size, insert_at)", "docstring": "Resize an area in a file adding and deleting at the end of it.\nDoes nothing if no resizing is needed.\n\nArgs:\nfobj (fileobj)\nold_size (int): The area starting at offset\nnew_size (int): The new size of the area\noffset (int): The start of the area\nRaises:\nIOError", "source": "juraj-google-style"}
{"code": "def derive_value(self, value):\n        \n        return IonEvent(\n            self.event_type,\n            self.ion_type,\n            value,\n            self.field_name,\n            self.annotations,\n            self.depth\n        )", "docstring": "Derives a new event from this one setting the ``value`` attribute.\n\nArgs:\nvalue: (any):\nThe value associated with the derived event.\n\nReturns:\nIonEvent: The newly generated non-thunk event.", "source": "juraj-google-style"}
{"code": "def for_default_graph(*args, **kwargs):\n    graph = tf.get_default_graph()\n    collection = graph.get_collection(_BOOKKEEPER)\n    if collection:\n        if (args or kwargs):\n            raise ValueError(('Requesting construction of a BookKeeper that already exists: %s %s' % (args, kwargs)))\n        return collection[0]\n    else:\n        books = BOOKKEEPER_FACTORY(*args, g=graph, **kwargs)\n        graph.add_to_collection(_BOOKKEEPER, books)\n        return books", "docstring": "Creates a bookkeeper for the default graph.\n\nArgs:\n*args: Arguments to pass into Bookkeeper's constructor.\n**kwargs: Arguments to pass into Bookkeeper's constructor.\nReturns:\nA new Bookkeeper.\nRaises:\nValueError: If args or kwargs are provided and the Bookkeeper already\nexists.", "source": "codesearchnet"}
{"code": "def restart(self, container, timeout=10):\n        \n        params = {'t': timeout}\n        url = self._url(\"/containers/{0}/restart\", container)\n        conn_timeout = self.timeout\n        if conn_timeout is not None:\n            conn_timeout += timeout\n        res = self._post(url, params=params, timeout=conn_timeout)\n        self._raise_for_status(res)", "docstring": "Restart a container. Similar to the ``docker restart`` command.\n\nArgs:\ncontainer (str or dict): The container to restart. If a dict, the\n``Id`` key is used.\ntimeout (int): Number of seconds to try to stop for before killing\nthe container. Once killed it will then be restarted. Default\nis 10 seconds.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def get_enabled_features(self, user_id, attributes=None):\n    \n\n    enabled_features = []\n    if not self.is_valid:\n      self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_enabled_features'))\n      return enabled_features\n\n    if not isinstance(user_id, string_types):\n      self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))\n      return enabled_features\n\n    if not self._validate_user_inputs(attributes):\n      return enabled_features\n\n    for feature in self.config.feature_key_map.values():\n      if self.is_feature_enabled(feature.key, user_id, attributes):\n        enabled_features.append(feature.key)\n\n    return enabled_features", "docstring": "Returns the list of features that are enabled for the user.\n\nArgs:\nuser_id: ID for user.\nattributes: Dict representing user attributes.\n\nReturns:\nA list of the keys of the features that are enabled for the user.", "source": "juraj-google-style"}
{"code": "def nack(self, items):\n    self.modify_ack_deadline([requests.ModAckRequest(ack_id=item.ack_id, seconds=0) for item in items])\n    self.drop([requests.DropRequest(*item) for item in items])", "docstring": "Explicitly deny receipt of messages.\n\nArgs:\nitems(Sequence[NackRequest]): The items to deny.", "source": "codesearchnet"}
{"code": "def _validate_observation_data(kernel, observation_index_points, observations):\n    ndims = kernel.feature_ndims\n    if (tensorshape_util.is_fully_defined(observation_index_points.shape[:(- ndims)]) and tensorshape_util.is_fully_defined(observations.shape)):\n        index_point_count = observation_index_points.shape[:(- ndims)]\n        observation_count = observations.shape\n        try:\n            tf.broadcast_static_shape(index_point_count, observation_count)\n        except ValueError:\n            raise ValueError('Observation index point and observation counts are not broadcastable: {} and {}, respectively.'.format(index_point_count, observation_count))", "docstring": "Ensure that observation data and locations have consistent shapes.\n\nThis basically means that the batch shapes are broadcastable. We can only\nensure this when those shapes are fully statically defined.\n\n\nArgs:\nkernel: The GP kernel.\nobservation_index_points: the observation data locations in the index set.\nobservations: the observation data.\n\nRaises:\nValueError: if the observations' batch shapes are not broadcastable.", "source": "codesearchnet"}
{"code": "def _ParseCommentRecord(self, structure):\n    \n    comment = structure[1]\n    if comment.startswith('Version'):\n      _, _, self._version = comment.partition(':')\n    elif comment.startswith('Software'):\n      _, _, self._software = comment.partition(':')\n    elif comment.startswith('Time'):\n      _, _, time_format = comment.partition(':')\n      if 'local' in time_format.lower():\n        self._use_local_timezone = True", "docstring": "Parse a comment and store appropriate attributes.\n\nArgs:\nstructure (pyparsing.ParseResults): parsed log line.", "source": "juraj-google-style"}
{"code": "def pre_release_work(patch: bool=False):\n    default_version = get_version()\n    if patch and default_version.is_devrelease:\n        raise ValueError(\"Can't create a patch version from the dev branch, checkout a released version!\")\n    if default_version.is_devrelease:\n        default_version = default_version.base_version\n    elif patch:\n        default_version = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'\n    else:\n        default_version = f'{default_version.major}.{default_version.minor + 1}.0'\n    version = input(f'Which version are you releasing? [{default_version}]')\n    if len(version) == 0:\n        version = default_version\n    print(f'Updating version to {version}.')\n    global_version_update(version, patch=patch)\n    print('Deleting conversion scripts.')\n    remove_conversion_scripts()", "docstring": "Do all the necessary pre-release steps:\n- figure out the next minor release version and ask confirmation\n- update the version everywhere\n- clean-up the model list in the main README\n\nArgs:\npatch (`bool`, *optional*, defaults to `False`): Whether or not this is a patch release.", "source": "github-repos"}
{"code": "def _process_new(self, feed_item):\n    return {'assetIdentifier': {'name': feed_item.get(FieldMap.CREATIVE_ASSET_FILE_NAME, None), 'type': feed_item.get(FieldMap.CREATIVE_TYPE, None)}}", "docstring": "Creates a new creative asset DCM object from a feed item representing a creative asset from the Bulkdozer feed.\n\nThis function simply creates the object to be inserted later by the BaseDAO\nobject.\n\nArgs:\nfeed_item: Feed item representing the creative asset from the Bulkdozer\nfeed.\n\nReturns:\nA creative asset object ready to be inserted in DCM through the API.", "source": "github-repos"}
{"code": "def matvec(self, x, adjoint=False, name='matvec'):\n    with self._name_scope(name):\n        block_dimensions = self._block_range_dimensions() if adjoint else self._block_domain_dimensions()\n        if linear_operator_util.arg_is_blockwise(block_dimensions, x, -1):\n            for i, block in enumerate(x):\n                if not isinstance(block, linear_operator.LinearOperator):\n                    block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block)\n                    self._check_input_dtype(block)\n                    block_dimensions[i].assert_is_compatible_with(block.shape[-1])\n                    x[i] = block\n            x_mat = [block[..., array_ops.newaxis] for block in x]\n            y_mat = self.matmul(x_mat, adjoint=adjoint)\n            return [array_ops.squeeze(y, axis=-1) for y in y_mat]\n        x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name='x')\n        self._check_input_dtype(x)\n        op_dimension = self.range_dimension if adjoint else self.domain_dimension\n        op_dimension.assert_is_compatible_with(x.shape[-1])\n        x_mat = x[..., array_ops.newaxis]\n        y_mat = self.matmul(x_mat, adjoint=adjoint)\n        return array_ops.squeeze(y_mat, axis=-1)", "docstring": "Transform [batch] vector `x` with left multiplication:  `x --> Ax`.\n\n```python\n# Make an operator acting like batch matrix A.  Assume A.shape = [..., M, N]\noperator = LinearOperator(...)\n\nX = ... # shape [..., N], batch vector\n\nY = operator.matvec(X)\nY.shape\n==> [..., M]\n\nY[..., :] = sum_j A[..., :, j] X[..., j]\n```\n\nArgs:\nx: `Tensor` with compatible shape and same `dtype` as `self`, or an\niterable of `Tensor`s. `Tensor`s are treated a [batch] vectors, meaning\nfor every set of leading dimensions, the last dimension defines a\nvector.\nSee class docstring for definition of compatibility.\nadjoint: Python `bool`.  If `True`, left multiply by the adjoint: `A^H x`.\nname:  A name for this `Op`.\n\nReturns:\nA `Tensor` with shape `[..., M]` and same `dtype` as `self`.", "source": "github-repos"}
{"code": "def inference(self, observed_arr):\n        \n        self.__pred_arr = self.__lstm_model.inference(observed_arr)\n        return self.__pred_arr", "docstring": "Draws samples from the `true` distribution.\n\nArgs:\nobserved_arr:     `np.ndarray` of observed data points.\n\nReturns:\n`np.ndarray` of inferenced.", "source": "juraj-google-style"}
{"code": "def sort_ordered_objects(items, getter=(lambda x: x)):\n    return sorted(items, key=(lambda x: getattr(getter(x), OrderedBase.CREATION_COUNTER_FIELD, (- 1))))", "docstring": "Sort an iterable of OrderedBase instances.\n\nArgs:\nitems (iterable): the objects to sort\ngetter (callable or None): a function to extract the OrderedBase instance from an object.\n\nExamples:\n>>> sort_ordered_objects([x, y, z])\n>>> sort_ordered_objects(v.items(), getter=lambda e: e[1])", "source": "codesearchnet"}
{"code": "def get_min_max_value(statistics: calib_stats_pb2.CalibrationStatistics, calib_opts: stablehlo_quant_config_pb2.CalibrationOptions) -> tuple[float, float]:\n    calib_method = calib_opts.calibration_method\n    if calib_method not in _REGISTRY:\n        raise ValueError(f'Unsupported calibration method: {calib_method}')\n    calibration_algorithm = _REGISTRY[calib_method](statistics, calib_opts)\n    return calibration_algorithm.get_min_max_value()", "docstring": "Calculates min and max from statistics using calibration options.\n\nArgs:\nstatistics: Collected calibration statistics.\ncalib_opts: Calibration options used for calculating min and max.\n\nReturns:\n(min_value, max_value): Min and max calculated using calib_opts.\n\nRaises:\nValueError: Unsupported calibration method is given.", "source": "github-repos"}
{"code": "def get_service_state_object_id(subsystem: str, name: str,\n                                    version: str) -> str:\n        \n        return '{}:{}:{}'.format(subsystem, name, version)", "docstring": "Return service state data object key.\n\nArgs:\nsubsystem (str): Subsystem the service belongs to\nname (str): Name of the Service\nversion (str): Version of the Service\n\nReturns:\nstr, Key used to store the service state data object", "source": "juraj-google-style"}
{"code": "def _batch_accumulator(cls, primals, tangents):\n    acc = super(ForwardAccumulator, cls).__new__(cls, primals, tangents)\n    acc._recording = False\n    acc._accumulator = pywrap_tfe.TFE_Py_ForwardAccumulatorNew(True)\n    primal_ids = set()\n    for primal, tangent in zip(nest.flatten(primals), nest.flatten(tangents)):\n        tangent.shape.assert_is_compatible_with(tensor_shape.TensorShape([None]) + primal.shape)\n        if id(primal) in primal_ids:\n            raise ValueError('Tensor {} was specified as a primal multiple times. This may indicate an error. If it was intended, please sum the corresponding tangents.')\n        primal_ids.add(id(primal))\n    acc._watch(primals, tangents)\n    return acc", "docstring": "Factory constructor to test accumulator on batches of tangents.\n\nArgs:\nprimals: A tensor or nested structure of tensors to watch.\ntangents: A tensor or nested structure of tensors, with the same nesting\nstructure as `primals`, with each element being a vector with compatible\nshape `[None] + primal.shape` of the corresponding primal element.\n\nReturns:\nA batch accumulator object.", "source": "github-repos"}
{"code": "def get_configuration_file(configuration_files: list[str]) -> str:\n    configuration_files_map = {}\n    for file_name in configuration_files:\n        if file_name.startswith('config.') and file_name.endswith('.json') and (file_name != 'config.json'):\n            v = file_name.removeprefix('config.').removesuffix('.json')\n            configuration_files_map[v] = file_name\n    available_versions = sorted(configuration_files_map.keys())\n    configuration_file = CONFIG_NAME\n    transformers_version = version.parse(__version__)\n    for v in available_versions:\n        if version.parse(v) <= transformers_version:\n            configuration_file = configuration_files_map[v]\n        else:\n            break\n    return configuration_file", "docstring": "Get the configuration file to use for this version of transformers.\n\nArgs:\nconfiguration_files (`List[str]`): The list of available configuration files.\n\nReturns:\n`str`: The configuration file to use.", "source": "github-repos"}
{"code": "class ZoeDepthReassembleStage(nn.Module):\n\n    def __init__(self, config):\n        super().__init__()\n        self.readout_type = config.readout_type\n        self.layers = nn.ModuleList()\n        for neck_hidden_size, factor in zip(config.neck_hidden_sizes, config.reassemble_factors):\n            self.layers.append(ZoeDepthReassembleLayer(config, channels=neck_hidden_size, factor=factor))\n        if config.readout_type == 'project':\n            self.readout_projects = nn.ModuleList()\n            hidden_size = config.backbone_hidden_size\n            for _ in config.neck_hidden_sizes:\n                self.readout_projects.append(nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act]))\n\n    def forward(self, hidden_states: List[torch.Tensor], patch_height, patch_width) -> List[torch.Tensor]:\n        \n        batch_size = hidden_states[0].shape[0]\n        hidden_states = torch.cat(hidden_states, dim=0)\n        cls_token, hidden_states = (hidden_states[:, 0], hidden_states[:, 1:])\n        total_batch_size, sequence_length, num_channels = hidden_states.shape\n        hidden_states = hidden_states.reshape(total_batch_size, patch_height, patch_width, num_channels)\n        hidden_states = hidden_states.permute(0, 3, 1, 2).contiguous()\n        if self.readout_type == 'project':\n            hidden_states = hidden_states.flatten(2).permute((0, 2, 1))\n            readout = cls_token.unsqueeze(dim=1).expand_as(hidden_states)\n            hidden_states = torch.cat((hidden_states, readout), -1)\n        elif self.readout_type == 'add':\n            hidden_states = hidden_states + cls_token.unsqueeze(-1)\n        out = []\n        for stage_idx, hidden_state in enumerate(hidden_states.split(batch_size, dim=0)):\n            if self.readout_type == 'project':\n                hidden_state = self.readout_projects[stage_idx](hidden_state)\n            hidden_state = hidden_state.permute(0, 2, 1).reshape(batch_size, -1, patch_height, patch_width)\n            hidden_state = self.layers[stage_idx](hidden_state)\n            out.append(hidden_state)\n        return out", "docstring": "This class reassembles the hidden states of the backbone into image-like feature representations at various\nresolutions.\n\nThis happens in 3 stages:\n1. Map the N + 1 tokens to a set of N tokens, by taking into account the readout ([CLS]) token according to\n`config.readout_type`.\n2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`.\n3. Resizing the spatial dimensions (height, width).\n\nArgs:\nconfig (`[ZoeDepthConfig]`):\nModel configuration class defining the model architecture.", "source": "github-repos"}
{"code": "def get(self, uid: int) -> Optional[CachedMessage]:\n    return self._cache.get(uid)", "docstring": "Return the given cached message.\n\nArgs:\nuid: The message UID.", "source": "codesearchnet"}
{"code": "def GetCampaignFeeds(client, feed, placeholder_type):\n  \n  campaign_feed_service = client.GetService('CampaignFeedService', 'v201809')\n\n  campaign_feeds = []\n  more_pages = True\n\n  selector = {\n      'fields': ['CampaignId', 'MatchingFunction', 'PlaceholderTypes'],\n      'predicates': [\n          {\n              'field': 'Status',\n              'operator': 'EQUALS',\n              'values': ['ENABLED']\n          },\n          {\n              'field': 'FeedId',\n              'operator': 'EQUALS',\n              'values': [feed['id']]\n          },\n          {\n              'field': 'PlaceholderTypes',\n              'operator': 'CONTAINS_ANY',\n              'values': [placeholder_type]\n          }\n      ],\n      'paging': {\n          'startIndex': 0,\n          'numberResults': PAGE_SIZE\n      }\n  }\n\n  while more_pages:\n    page = campaign_feed_service.get(selector)\n\n    if 'entries' in page:\n      campaign_feeds.extend(page['entries'])\n\n    selector['paging']['startIndex'] += PAGE_SIZE\n    more_pages = selector['paging']['startIndex'] < int(page['totalNumEntries'])\n\n  return campaign_feeds", "docstring": "Get a list of Feed Item Ids used by a campaign via a given Campaign Feed.\n\nArgs:\nclient: an AdWordsClient instance.\nfeed: a Campaign Feed.\nplaceholder_type: the Placeholder Type.\n\nReturns:\nA list of Feed Item Ids.", "source": "juraj-google-style"}
{"code": "def NHWCToNCHW(input_tensor):\n    if isinstance(input_tensor, tensor.Tensor):\n        return array_ops.transpose(input_tensor, [0, 3, 1, 2])\n    else:\n        return [input_tensor[0], input_tensor[3], input_tensor[1], input_tensor[2]]", "docstring": "Convert the input from NHWC format to NCHW.\n\nArgs:\ninput_tensor:  a 4-D tensor, or a 4-element array representing the same.\n\nReturns:\nthe converted tensor or a shape array", "source": "github-repos"}
{"code": "def direct_transformers_import(path: str, file='__init__.py') -> ModuleType:\n    name = 'transformers'\n    location = os.path.join(path, file)\n    spec = importlib.util.spec_from_file_location(name, location, submodule_search_locations=[path])\n    module = importlib.util.module_from_spec(spec)\n    spec.loader.exec_module(module)\n    module = sys.modules[name]\n    return module", "docstring": "Imports transformers directly\n\nArgs:\npath (`str`): The path to the source file\nfile (`str`, *optional*): The file to join with the path. Defaults to \"__init__.py\".\n\nReturns:\n`ModuleType`: The resulting imported module", "source": "github-repos"}
{"code": "def AddRow(self, values):\n    \n    if self._number_of_columns and len(values) != self._number_of_columns:\n      raise ValueError('Number of values is out of bounds.')\n\n    self._rows.append(values)\n\n    if not self._number_of_columns:\n      self._number_of_columns = len(values)", "docstring": "Adds a row of values.\n\nArgs:\nvalues (list[object]): values.\n\nRaises:\nValueError: if the number of values is out of bounds.", "source": "juraj-google-style"}
{"code": "def shift(x, offset, dim, wrap, name=None):\n  \n  return ShiftOperation(x, offset, dim, wrap, name=name).outputs[0]", "docstring": "Shift operation.\n\nShift x right by +offset in dimension dim.\n\nArgs:\nx: a Tensor\noffset: an integer. If negative, shift left instead of right.\ndim: a Dimension of x\nwrap: a boolean - whether to wrap (True) or pad with zeros (False).\nname: an optional string\n\nReturns:\na Tensor with the same shape and dtype as x", "source": "juraj-google-style"}
{"code": "def go_from(self, vertex):\n    if self.vertex_out:\n        self.vertex_out.edges_out.remove(self)\n    self.vertex_out = vertex\n    vertex.edges_out.add(self)", "docstring": "Tell the edge to go out from this vertex.\n\nArgs:\nvertex (Vertex): vertex to go from.", "source": "codesearchnet"}
{"code": "def get_member_information(self, query_params=None):\n    return self.fetch_json(uri_path=self.base_uri, query_params=(query_params or {}))", "docstring": "Get Information for a member. Returns a dictionary of values.\n\nReturns:\ndict", "source": "codesearchnet"}
{"code": "def create(self, vrf_name, rd=None):\n    commands = [('vrf definition %s' % vrf_name)]\n    if rd:\n        commands.append(('rd %s' % rd))\n    return self.configure(commands)", "docstring": "Creates a new VRF resource\n\nNote: A valid RD has the following format admin_ID:local_assignment.\nThe admin_ID can be an AS number or globally assigned IPv4 address.\nThe local_assignment can be an integer between 0-65,535 if the\nadmin_ID is an IPv4 address and can be between 0-4,294,967,295 if\nthe admin_ID is an AS number. If the admin_ID is an AS number the\nlocal_assignment could also be in the form of an IPv4 address.\n\nArgs:\nvrf_name (str): The VRF name to create\nrd (str): The value to configure the vrf rd\n\nReturns:\nTrue if create was successful otherwise False", "source": "codesearchnet"}
{"code": "def _compute_causal_mask(self, query, value=None):\n    q_seq_length = ops.shape(query)[1]\n    v_seq_length = q_seq_length if value is None else ops.shape(value)[1]\n    ones_mask = ops.ones((1, q_seq_length, v_seq_length), dtype='int32')\n    row_index = ops.cumsum(ones_mask, axis=-2)\n    col_index = ops.cumsum(ones_mask, axis=-1)\n    return ops.greater_equal(row_index, col_index)", "docstring": "Computes a causal mask (e.g., for masked self-attention layers).\n\nFor example, if query and value both contain sequences of length 4,\nthis function returns a boolean tensor equal to:\n\n```\n[[[True,  False, False, False],\n[True,  True,  False, False],\n[True,  True,  True,  False],\n[True,  True,  True,  True]]]\n```\n\nArgs:\nquery: query tensor of shape `(B, T, ...)`.\nvalue: value tensor of shape `(B, S, ...)` (optional, defaults to\nquery).\n\nReturns:\nmask: a boolean tensor of shape `(1, T, S)` containing a lower\ntriangular matrix of shape `(T, S)`.", "source": "github-repos"}
{"code": "def compose(*funcs):\n    \n    if not funcs:\n        return lambda *args: args[0] if args else None\n\n    if len(funcs) == 1:\n        return funcs[0]\n\n    last = funcs[-1]\n    rest = funcs[0:-1]\n    return lambda *args: reduce(lambda ax, func: func(ax),\n                                reversed(rest), last(*args))", "docstring": "chained function composition wrapper\n\ncreates function f, where f(x) = arg0(arg1(arg2(...argN(x))))\n\nif *funcs is empty, an identity function is returned.\n\nArgs:\n*funcs: list of functions to chain\n\nReturns:\na new function composed of chained calls to *args", "source": "juraj-google-style"}
{"code": "def GetAttributeContainerByIndex(self, index):\n    \n    if index < 0:\n      raise IndexError(\n          'Unsupported negative index value: {0:d}.'.format(index))\n\n    if index < len(self._list):\n      return self._list[index]\n\n    return None", "docstring": "Retrieves a specific serialized attribute container from the list.\n\nArgs:\nindex (int): attribute container index.\n\nReturns:\nbytes: serialized attribute container data or None if not available.\n\nRaises:\nIndexError: if the index is less than zero.", "source": "juraj-google-style"}
{"code": "def Parse(self, raw_data):\n    self.results = raw_data\n    for f in self.filters:\n        self.results = f.Parse(self.results)\n    return self.results", "docstring": "Take the results and yield results that passed through the filters.\n\nThe output of each filter is used as the input for successive filters.\n\nArgs:\nraw_data: An iterable series of rdf values.\n\nReturns:\nA list of rdf values that matched all filters.", "source": "codesearchnet"}
{"code": "def launch_external_file(filename: str, raise_if_fails: bool = False) -> None:\n    \n    log.info(\"Launching external file: {!r}\", filename)\n    try:\n        if sys.platform.startswith('linux'):\n            cmdargs = [\"xdg-open\", filename]\n            \n            subprocess.call(cmdargs)\n        else:\n            \n            \n            os.startfile(filename)\n    except Exception as e:\n        log.critical(\"Error launching {!r}: error was {}.\\n\\n{}\",\n                     filename, str(e), traceback.format_exc())\n        if raise_if_fails:\n            raise", "docstring": "Launches a file using the operating system's standard launcher.\n\nArgs:\nfilename: file to launch\nraise_if_fails: raise any exceptions from\n``subprocess.call([\"xdg-open\", filename])`` (Linux)\nor ``os.startfile(filename)`` (otherwise)? If not, exceptions\nare suppressed.", "source": "juraj-google-style"}
{"code": "def update_unexpected_keys(self, model, unexpected_keys: List[str], prefix: str) -> List[str]:\n    return unexpected_keys", "docstring": "Override this method if you want to adjust the `unexpected_keys`.\n\nArgs:\nunexpected_keys (`List[str]`, *optional*):\nThe list of unexpected keys in the checkpoint compared to the state dict of the model", "source": "github-repos"}
{"code": "def list_media_endpoint_keys(access_token, subscription_id, rgname, msname):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', rgname, '/providers/microsoft.media/', '/mediaservices/', msname, '/listKeys?api-version=', MEDIA_API])\n    return do_get(endpoint, access_token)", "docstring": "list the media endpoint keys in a media service\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\nmsname (str): Media service name.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def get_load_balancer(self, id):\n        \n        return LoadBalancer.get_object(api_token=self.token, id=id)", "docstring": "Returns a Load Balancer object by its ID.\n\nArgs:\nid (str): Load Balancer ID", "source": "juraj-google-style"}
{"code": "def AdManagerDateTimePacker(cls, value, version):\n    \n\n    if isinstance(value, datetime.datetime):\n      if value.tzinfo is None:\n        raise googleads.errors.GoogleAdsValueError(\n            'Datetime %s is not timezone aware.' % value\n        )\n      return {\n          'date': cls.AdManagerDateTimePacker(value.date(), version),\n          'hour': value.hour,\n          'minute': value.minute,\n          'second': value.second,\n          \n          'timeZoneId' if version >= 'v201811' else 'timeZoneID':\n              value.tzinfo.zone,\n      }\n    elif isinstance(value, datetime.date):\n      return {'year': value.year, 'month': value.month, 'day': value.day}", "docstring": "Returns dicts formatted for Ad Manager SOAP based on date/datetime.\n\nArgs:\nvalue: A date or datetime object to be converted.\nversion: the version of the current API, e.g. 'v201811'\n\nReturns:\nThe value object correctly represented for Ad Manager SOAP.", "source": "juraj-google-style"}
{"code": "def optimal_partitions(sizes, counts, num_part):\n    if (num_part < 2):\n        return [(sizes[0], sizes[(- 1)])]\n    if (num_part >= len(sizes)):\n        partitions = [(x, x) for x in sizes]\n        return partitions\n    nfps = _compute_nfps_real(counts, sizes)\n    (partitions, _, _) = _compute_best_partitions(num_part, sizes, nfps)\n    return partitions", "docstring": "Compute the optimal partitions given a distribution of set sizes.\n\nArgs:\nsizes (numpy.array): The complete domain of set sizes in ascending\norder.\ncounts (numpy.array): The frequencies of all set sizes in the same\norder as `sizes`.\nnum_part (int): The number of partitions to create.\n\nReturns:\nlist: A list of partitions in the form of `(lower, upper)` tuples,\nwhere `lower` and `upper` are lower and upper bound (inclusive)\nset sizes of each partition.", "source": "codesearchnet"}
{"code": "def get(self, block=True, timeout=None):\n    if (not block):\n        (success, item) = ray.get(self.actor.get.remote())\n        if (not success):\n            raise Empty\n    elif (timeout is None):\n        (success, item) = ray.get(self.actor.get.remote())\n        while (not success):\n            (success, item) = ray.get(self.actor.get.remote())\n    elif (timeout < 0):\n        raise ValueError(\"'timeout' must be a non-negative number\")\n    else:\n        endtime = (time.time() + timeout)\n        success = False\n        while ((not success) and (time.time() < endtime)):\n            (success, item) = ray.get(self.actor.get.remote())\n        if (not success):\n            raise Empty\n    return item", "docstring": "Gets an item from the queue.\n\nUses polling if block=True, so there is no guarantee of order if\nmultiple consumers get from the same empty queue.\n\nReturns:\nThe next item in the queue.\n\nRaises:\nEmpty if the queue is empty and blocking is False.", "source": "codesearchnet"}
{"code": "def _restructure_if_volume_follows_journal(left, right):\n\n    def _get_volume_keyword_op_and_remaining_subtree(right_subtree):\n        if (isinstance(right_subtree, NotOp) and isinstance(right_subtree.op, KeywordOp) and (right_subtree.op.left == Keyword('volume'))):\n            return (None, None)\n        elif (isinstance(right_subtree, AndOp) and isinstance(right_subtree.left, NotOp) and isinstance(right_subtree.left.op, KeywordOp) and (right_subtree.left.op.left == Keyword('volume'))):\n            return (None, right_subtree.right)\n        elif (isinstance(right_subtree, KeywordOp) and (right_subtree.left == Keyword('volume'))):\n            return (right_subtree, None)\n        elif (isinstance(right_subtree, AndOp) and (right_subtree.left.left == Keyword('volume'))):\n            return (right_subtree.left, right_subtree.right)\n    journal_value = left.right.value\n    volume_and_remaining_subtree = _get_volume_keyword_op_and_remaining_subtree(right)\n    if (not volume_and_remaining_subtree):\n        return\n    (volume_node, remaining_subtree) = volume_and_remaining_subtree\n    if volume_node:\n        left.right.value = ','.join([journal_value, volume_node.right.value])\n    return (AndOp(left, remaining_subtree) if remaining_subtree else left)", "docstring": "Remove volume node if it follows a journal logically in the tree hierarchy.\n\nArgs:\nleft (ast.ASTElement): The journal KeywordOp node.\nright (ast.ASTElement): The rest of the tree to be restructured.\n\nReturn:\n(ast.ASTElement): The restructured tree, with the volume node removed.\n\nNotes:\nThis happens to support queries like \"journal Phys.Rev. and vol d85\". Appends the value of KeywordOp with\nKeyword 'volume' and discards 'volume' KeywordOp node from the tree.", "source": "codesearchnet"}
{"code": "def _UpdateStatus(\n      self, status, display_name, number_of_consumed_sources, storage_writer,\n      force=False):\n    \n    current_timestamp = time.time()\n    if not force and current_timestamp < (\n        self._last_status_update_timestamp + self._STATUS_UPDATE_INTERVAL):\n      return\n\n    if status == definitions.STATUS_INDICATOR_IDLE:\n      status = definitions.STATUS_INDICATOR_RUNNING\n\n    used_memory = self._process_information.GetUsedMemory() or 0\n\n    self._processing_status.UpdateForemanStatus(\n        self._name, status, self._pid, used_memory, display_name,\n        number_of_consumed_sources, storage_writer.number_of_event_sources, 0,\n        storage_writer.number_of_events, 0, 0, 0, 0, 0,\n        storage_writer.number_of_warnings)\n\n    if self._status_update_callback:\n      self._status_update_callback(self._processing_status)\n\n    self._last_status_update_timestamp = current_timestamp", "docstring": "Updates the processing status.\n\nArgs:\nstatus (str): human readable status of the processing e.g. 'Idle'.\ndisplay_name (str): human readable of the file entry currently being\nprocessed.\nnumber_of_consumed_sources (int): number of consumed sources.\nstorage_writer (StorageWriter): storage writer for a session storage.\nforce (Optional[bool]): True if the update should be forced ignoring\nthe last status update time.", "source": "juraj-google-style"}
{"code": "def diet_expert(x, hidden_size, params):\n\n    @fn_with_diet_vars(params)\n    def diet_expert_internal(x):\n        dim = x.get_shape().as_list()[(- 1)]\n        h = tf.layers.dense(x, hidden_size, activation=tf.nn.relu, use_bias=False)\n        y = tf.layers.dense(h, dim, use_bias=False)\n        y *= tf.rsqrt(tf.to_float((dim * hidden_size)))\n        return y\n    return diet_expert_internal(x)", "docstring": "A two-layer feed-forward network with relu activation on hidden layer.\n\nUses diet variables.\nRecomputes hidden layer on backprop to save activation memory.\n\nArgs:\nx: a Tensor with shape [batch, io_size]\nhidden_size: an integer\nparams: a diet variable HParams object.\n\nReturns:\na Tensor with shape [batch, io_size]", "source": "codesearchnet"}
{"code": "def unload(self):\n        \n        unloaded = False\n\n        if self._lib is not None:\n            if self._winlib is not None:\n                \n                \n                \n                \n                ctypes.windll.kernel32.FreeLibrary.argtypes = (\n                    ctypes.c_void_p,\n                )\n\n                \n                \n                ctypes.windll.kernel32.FreeLibrary(self._lib._handle)\n                ctypes.windll.kernel32.FreeLibrary(self._winlib._handle)\n\n                self._lib = None\n                self._winlib = None\n\n                unloaded = True\n            else:\n                \n                \n                del self._lib\n                self._lib = None\n                unloaded = True\n\n        if self._temp is not None:\n            os.remove(self._temp.name)\n            self._temp = None\n\n        return unloaded", "docstring": "Unloads the library's DLL if it has been loaded.\n\nThis additionally cleans up the temporary DLL file that was created\nwhen the library was loaded.\n\nArgs:\nself (Library): the ``Library`` instance\n\nReturns:\n``True`` if the DLL was unloaded, otherwise ``False``.", "source": "juraj-google-style"}
{"code": "def render(self, fname=''):\n    import qnet.visualization.circuit_pyx as circuit_visualization\n    from tempfile import gettempdir\n    from time import time, sleep\n    if (not fname):\n        tmp_dir = gettempdir()\n        fname = os.path.join(tmp_dir, 'tmp_{}.png'.format(hash(time)))\n    if circuit_visualization.draw_circuit(self, fname):\n        done = False\n        for k in range(20):\n            if os.path.exists(fname):\n                done = True\n                break\n            else:\n                sleep(0.5)\n        if done:\n            return fname\n    raise CannotVisualize()", "docstring": "Render the circuit expression and store the result in a file\n\nArgs:\nfname (str): Path to an image file to store the result in.\n\nReturns:\nstr: The path to the image file", "source": "codesearchnet"}
{"code": "def ReleaseObject(self, identifier):\n    \n    if identifier not in self._values:\n      raise KeyError('Missing cached object for identifier: {0:s}'.format(\n          identifier))\n\n    cache_value = self._values[identifier]\n    if not cache_value:\n      raise RuntimeError('Missing cache value for identifier: {0:s}'.format(\n          identifier))\n\n    cache_value.DecrementReferenceCount()", "docstring": "Releases a cached object based on the identifier.\n\nThis method decrements the cache value reference count.\n\nArgs:\nidentifier (str): VFS object identifier.\n\nRaises:\nKeyError: if the VFS object is not found in the cache.\nRuntimeError: if the cache value is missing.", "source": "juraj-google-style"}
{"code": "def __init__(self, application, a_service):\n        \n        if not isinstance(a_service, sm_messages.Service):\n            raise ValueError(u\"service is None or not an instance of Service\")\n\n        self._application = application\n        self._service = a_service\n\n        method_registry, reporting_rules = self._configure()\n        self._method_registry = method_registry\n        self._reporting_rules = reporting_rules", "docstring": "Initializes a new Middleware instance.\n\nArgs:\napplication: the wrapped wsgi application\na_service (:class:`endpoints_management.gen.servicemanagement_v1_messages.Service`):\na service instance", "source": "juraj-google-style"}
{"code": "def follow(id, edges, directed=False, _visited=None):\n    \n    if _visited is None:\n        _visited = set()\n    _visited.add(id)\n\n    for row in edges[edges.ix[:, 0] == id].values:\n        if(row[1] not in _visited):\n            follow(row[1], edges, directed, _visited)\n\n    if not directed:\n        for row in edges[edges.ix[:, 1] == id].values:\n            if(row[0] not in _visited):\n                follow(row[0], edges, directed, _visited)\n\n    return _visited", "docstring": "Follow the a graph to find the nodes connected to a given node.\nArgs:\nid: the id of the starting node\nedges: a pandas DataFrame of edges. Each row is an edge with two columns containing\nthe ids of the vertices.\ndirected: If True, edges are directed from first column to second column.\nOtherwise edges are undirected.\n_visited: used internally for recursion\nReturns: the set of all nodes connected to the starting node.", "source": "juraj-google-style"}
{"code": "def apply_masks(tensor: torch.Tensor, masks: List[torch.Tensor]) -> torch.Tensor:\n    all_masked_tensors = []\n    for mask in masks:\n        mask = mask.to(tensor.device)\n        mask_keep = mask.unsqueeze(-1).repeat(1, 1, tensor.size(-1))\n        all_masked_tensors += [torch.gather(tensor, dim=1, index=mask_keep)]\n    return torch.cat(all_masked_tensors, dim=0)", "docstring": "Args:\ntensor (`torch.Tensor`):\nTensor of shape [batch_size, num_patches, feature_dim]\nmasks (`List[torch.Tensor]`):\nList of tensors of shape [batch_size, num_patches] containing indices of patches to keep", "source": "github-repos"}
{"code": "def retrieve_review(self, reviewer, product):\n    if (not isinstance(reviewer, self._reviewer_cls)):\n        raise TypeError(\"Type of given reviewer isn't acceptable:\", reviewer, ', expected:', self._reviewer_cls)\n    elif (not isinstance(product, self._product_cls)):\n        raise TypeError(\"Type of given product isn't acceptable:\", product, ', expected:', self._product_cls)\n    try:\n        return self.graph[reviewer][product]['review']\n    except TypeError:\n        raise KeyError('{0} does not review {1}.'.format(reviewer, product))", "docstring": "Retrieve review that the given reviewer put the given product.\n\nArgs:\nreviewer: An instance of Reviewer.\nproduct: An instance of Product.\n\nReturns:\nA review object.\n\nRaises:\nTypeError: when given reviewer and product aren't instance of\nspecified reviewer and product class when this graph is constructed.\nKeyError: When the reviewer does not review the product.", "source": "codesearchnet"}
{"code": "def in_to_out(self, in_path, out_path=None):\n    is_in_place_edit = False\n    if is_same_file(in_path, out_path):\n        logger.debug('in path and out path are the same file. writing to temp file and then replacing in path with the temp file.')\n        out_path = None\n        is_in_place_edit = True\n    logger.debug(f'opening source file: {in_path}')\n    with open(in_path) as infile:\n        if out_path:\n            logger.debug(f'opening destination file for writing: {out_path}')\n            ensure_dir(out_path)\n            with open(out_path, 'w') as outfile:\n                outfile.writelines(self.formatter(infile))\n            return\n        else:\n            logger.debug('opening temp file for writing...')\n            with NamedTemporaryFile(mode='w+t', dir=os.path.dirname(in_path), delete=False) as outfile:\n                outfile.writelines(self.formatter(infile))\n            is_in_place_edit = True\n    if is_in_place_edit:\n        logger.debug(f'moving temp file to: {in_path}')\n        move_temp_file(outfile.name, infile.name)", "docstring": "Write a single file in to out, running self.formatter on each line.\n\nIf in_path and out_path point to the same thing it will in-place edit\nand overwrite the in path. Even easier, if you do want to edit a file\nin place, don't specify out_path, or set it to None.\n\nArgs:\nin_path: str or path-like. Must refer to a single existing file.\nout_path: str or path-like. Must refer to a single destination file\nlocation. will create directory structure if it doesn't\nexist.\nIf out_path is not specified or None, will in-place edit\nand overwrite the in-files.\n\nReturns:\nNone.", "source": "codesearchnet"}
{"code": "def __init__(self, out_stream=None, hide_cursor=True):\n        \n        BaseWindow.__init__(self, out_stream=out_stream,\n                            hide_cursor=hide_cursor)\n        self.fullscreen_ctx = self.t.fullscreen()", "docstring": "Constructs a FullscreenWindow\n\nArgs:\nout_stream (file): Defaults to sys.__stdout__\nhide_cursor (bool): Hides cursor while in context", "source": "juraj-google-style"}
{"code": "def _build_graph(self, tags):\n    graph = SimpleGraph()\n    for tag_index in xrange(len(tags)):\n        for entity_index in xrange(len(tags[tag_index].get('entities'))):\n            a_entity_name = graph_key_from_tag(tags[tag_index], entity_index)\n            tokens = self.tokenizer.tokenize(tags[tag_index].get('entities', [])[entity_index].get('match'))\n            for tag in tags[(tag_index + 1):]:\n                start_token = tag.get('start_token')\n                if (start_token >= (tags[tag_index].get('start_token') + len(tokens))):\n                    for b_entity_index in xrange(len(tag.get('entities'))):\n                        b_entity_name = graph_key_from_tag(tag, b_entity_index)\n                        graph.add_edge(a_entity_name, b_entity_name)\n    return graph", "docstring": "Builds a graph from the entities included in the tags.\nNote this is used internally.\n\nArgs:\ntags (list): A list of the tags to include in graph\n\nReturns:\ngraph : this is the resulting graph of the tagged entities.", "source": "codesearchnet"}
{"code": "def add(self, *dic):\n        \n        dicList = list(flatten(dic))\n        \n        for d in dicList:\n            \n            di = []\n            for k in d:\n                \n                di.append(Pair(k, IntegerSingle(d[k])))\n            dictSingle = DictSingle(di)\n            \n            self._add([dictSingle], self.l)", "docstring": "add a config to StartCalendarInterval.\n\nArgs:\n*dic (dict): dictionary with format {'Day': 12, 'Hour': 34} Avaliable keys are Month, Day, Weekday, Hour, Minute. *Note the uppercase.* You can use gen(), genMix() to generate complex config dictionary.", "source": "juraj-google-style"}
{"code": "def _StructPackEncoder(wire_type, format):\n    value_size = struct.calcsize(format)\n\n    def SpecificEncoder(field_number, is_repeated, is_packed):\n        local_struct_pack = struct.pack\n        if is_packed:\n            tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)\n            local_EncodeVarint = _EncodeVarint\n\n            def EncodePackedField(write, value):\n                write(tag_bytes)\n                local_EncodeVarint(write, (len(value) * value_size))\n                for element in value:\n                    write(local_struct_pack(format, element))\n            return EncodePackedField\n        elif is_repeated:\n            tag_bytes = TagBytes(field_number, wire_type)\n\n            def EncodeRepeatedField(write, value):\n                for element in value:\n                    write(tag_bytes)\n                    write(local_struct_pack(format, element))\n            return EncodeRepeatedField\n        else:\n            tag_bytes = TagBytes(field_number, wire_type)\n\n            def EncodeField(write, value):\n                write(tag_bytes)\n                return write(local_struct_pack(format, value))\n            return EncodeField\n    return SpecificEncoder", "docstring": "Return a constructor for an encoder for a fixed-width field.\n\nArgs:\nwire_type:  The field's wire type, for encoding tags.\nformat:  The format string to pass to struct.pack().", "source": "codesearchnet"}
{"code": "def ask_for_approval(full_changeset=None, params_diff=None, include_verbose=False):\n    approval_options = ['y', 'n']\n    if include_verbose:\n        approval_options.append('v')\n    approve = ui.ask('Execute the above changes? [{}] '.format('/'.join(approval_options))).lower()\n    if (include_verbose and (approve == 'v')):\n        if params_diff:\n            logger.info('Full changeset:\\n\\n%s\\n%s', format_params_diff(params_diff), yaml.safe_dump(full_changeset))\n        else:\n            logger.info('Full changeset:\\n%s', yaml.safe_dump(full_changeset))\n        return ask_for_approval()\n    elif (approve != 'y'):\n        raise exceptions.CancelExecution", "docstring": "Prompt the user for approval to execute a change set.\n\nArgs:\nfull_changeset (list, optional): A list of the full changeset that will\nbe output if the user specifies verbose.\nparams_diff (list, optional): A list of DictValue detailing the\ndifferences between two parameters returned by\n:func:`stacker.actions.diff.diff_dictionaries`\ninclude_verbose (bool, optional): Boolean for whether or not to include\nthe verbose option", "source": "codesearchnet"}
{"code": "def arange(start, stop=None, step=1, dtype=None):\n    if not step:\n        raise ValueError('step must be non-zero.')\n    if dtype:\n        dtype = np_utils.result_type(dtype)\n    elif stop is None:\n        dtype = np_utils.result_type(start, step)\n    else:\n        dtype = np_utils.result_type(start, step, stop)\n    if step > 0 and (stop is not None and start > stop or (stop is None and start < 0)):\n        return array([], dtype=dtype)\n    if step < 0 and (stop is not None and start < stop or (stop is None and start > 0)):\n        return array([], dtype=dtype)\n    return math_ops.cast(math_ops.range(start, limit=stop, delta=step), dtype=dtype)", "docstring": "Returns `step`-separated values in the range [start, stop).\n\nArgs:\nstart: Start of the interval. Included in the range.\nstop: End of the interval. If not specified, `start` is treated as 0 and\n`start` value is used as `stop`. If specified, it is not included in the\nrange if `step` is integer. When `step` is floating point, it may or may\nnot be included.\nstep: The difference between 2 consecutive values in the output range. It is\nrecommended to use `linspace` instead of using non-integer values for\n`step`.\ndtype: Optional. Type of the resulting ndarray. Could be a python type, a\nNumPy type or a TensorFlow `DType`. If not provided, the largest type of\n`start`, `stop`, `step` is used.\n\nRaises:\nValueError: If step is zero.", "source": "github-repos"}
{"code": "def common_vector_root(vec1, vec2):\n    root = []\n    for (v1, v2) in zip(vec1, vec2):\n        if (v1 == v2):\n            root.append(v1)\n        else:\n            return root\n    return root", "docstring": "Return common root of the two vectors.\n\nArgs:\nvec1 (list/tuple): First vector.\nvec2 (list/tuple): Second vector.\n\nUsage example::\n\n>>> common_vector_root([1, 2, 3, 4, 5], [1, 2, 8, 9, 0])\n[1, 2]\n\nReturns:\nlist: Common part of two vectors or blank list.", "source": "codesearchnet"}
{"code": "def to_add_skip_model(self, start_id, end_id):\n        \n        self.operation_history.append((\"to_add_skip_model\", start_id, end_id))\n        filters_end = self.layer_list[end_id].output.shape[-1]\n        filters_start = self.layer_list[start_id].output.shape[-1]\n        start_node_id = self.layer_id_to_output_node_ids[start_id][0]\n\n        pre_end_node_id = self.layer_id_to_input_node_ids[end_id][0]\n        end_node_id = self.layer_id_to_output_node_ids[end_id][0]\n\n        skip_output_id = self._insert_pooling_layer_chain(start_node_id, end_node_id)\n\n        \n        new_conv_layer = get_conv_class(self.n_dim)(filters_start, filters_end, 1)\n        skip_output_id = self.add_layer(new_conv_layer, skip_output_id)\n\n        \n        add_input_node_id = self._add_node(deepcopy(self.node_list[end_node_id]))\n        add_layer = StubAdd()\n\n        self._redirect_edge(pre_end_node_id, end_node_id, add_input_node_id)\n        self._add_edge(add_layer, add_input_node_id, end_node_id)\n        self._add_edge(add_layer, skip_output_id, end_node_id)\n        add_layer.input = [\n            self.node_list[add_input_node_id],\n            self.node_list[skip_output_id],\n        ]\n        add_layer.output = self.node_list[end_node_id]\n        self.node_list[end_node_id].shape = add_layer.output_shape\n\n        \n        if self.weighted:\n            filter_shape = (1,) * self.n_dim\n            weights = np.zeros((filters_end, filters_start) + filter_shape)\n            bias = np.zeros(filters_end)\n            new_conv_layer.set_weights(\n                (add_noise(weights, np.array([0, 1])), add_noise(bias, np.array([0, 1])))\n            )", "docstring": "Add a weighted add skip-connection from after start node to end node.\nArgs:\nstart_id: The convolutional layer ID, after which to start the skip-connection.\nend_id: The convolutional layer ID, after which to end the skip-connection.", "source": "juraj-google-style"}
{"code": "def calculate_sleep_time(attempt, delay_factor=5.0, randomization_factor=0.5, max_delay=120):\n    if (attempt <= 0):\n        return 0\n    delay = (float((2 ** (attempt - 1))) * float(delay_factor))\n    delay = (delay * ((randomization_factor * random.random()) + 1))\n    return min(delay, max_delay)", "docstring": "Calculate the sleep time between retries, in seconds.\n\nBased off of `taskcluster.utils.calculateSleepTime`, but with kwargs instead\nof constant `delay_factor`/`randomization_factor`/`max_delay`.  The taskcluster\nfunction generally slept for less than a second, which didn't always get\npast server issues.\n\nArgs:\nattempt (int): the retry attempt number\ndelay_factor (float, optional): a multiplier for the delay time.  Defaults to 5.\nrandomization_factor (float, optional): a randomization multiplier for the\ndelay time.  Defaults to .5.\nmax_delay (float, optional): the max delay to sleep.  Defaults to 120 (seconds).\n\nReturns:\nfloat: the time to sleep, in seconds.", "source": "codesearchnet"}
{"code": "def get_data(self, file_path=sys.stdin, delimiter=',', categories_delimiter=None):\n    if (file_path == sys.stdin):\n        logger.info('Read data from standard input')\n        lines = [line.replace('\\n', '') for line in file_path]\n    else:\n        logger.info(('Read data from file ' + file_path))\n        with open(file_path) as file:\n            lines = list(file)\n    columns = lines[0].rstrip('\\n').split(delimiter)[1:]\n    categories = None\n    if categories_delimiter:\n        (columns, categories) = zip(*[c.split(categories_delimiter, 1) for c in columns])\n    size = len(columns)\n    data = [list(map(int, l.split(delimiter)[1:])) for l in lines[1:(size + 1)]]\n    return DesignStructureMatrix(data, columns, categories)", "docstring": "Implement get_dsm method from Provider class.\n\nParse CSV to return an instance of DSM.\n\nArgs:\nfile_path (str/fd): path or file descriptor.\ndelimiter (str): character(s) used as delimiter for columns.\ncategories_delimiter (str):\ncharacter(s) used as delimiter for categories and keys\n(first column).\n\nReturns:\nDSM: instance of DSM.", "source": "codesearchnet"}
{"code": "def plot_feature_correlation_heatmap(df, features, font_size=9, figsize=(15, 15), save_filename=None):\n    features = features[:]\n    features += ['target']\n    mcorr = df[features].corr()\n    mask = np.zeros_like(mcorr, dtype=np.bool)\n    mask[np.triu_indices_from(mask)] = True\n    cmap = sns.diverging_palette(220, 10, as_cmap=True)\n    fig = plt.figure(figsize=figsize)\n    heatmap = sns.heatmap(mcorr, mask=mask, cmap=cmap, square=True, annot=True, fmt='0.2f', annot_kws={'size': font_size})\n    heatmap.tick_params(axis='both', which='major', labelsize=font_size)\n    heatmap.tick_params(axis='both', which='minor', labelsize=font_size)\n    heatmap.set_xticklabels(features, rotation=90)\n    heatmap.set_yticklabels(reversed(features))\n    plt.show()\n    if (save_filename is not None):\n        fig.savefig(save_filename, dpi=300)", "docstring": "Plot a correlation heatmap between every feature pair.\n\nArgs:\ndf: Pandas dataframe containing the target column (named 'target').\nfeatures: The list of features to include in the correlation plot.\nfont_size: Font size for heatmap cells and axis labels.\nfigsize: The size of the plot.\nsave_filename: (Optional) The path of the file to save a high-res version of the plot to.", "source": "codesearchnet"}
{"code": "def _setup(self, delete=True):\n    if delete:\n        self.clear()\n    with nn.context_scope(self.ctx):\n        outputs = self.func(*(self.inputs_f + self.func_args), **self.func_kwargs)\n        if (not hasattr(outputs, '__iter__')):\n            self.outputs = [outputs]\n        else:\n            self.outputs = outputs\n    self.func_ins = self.outputs[0].parent\n    self.inputs = self.func_ins.inputs", "docstring": "Create a function instance and execute setup.\n\nArgs:\ndelete (bool): Delete buffered variables.", "source": "codesearchnet"}
{"code": "def fit(self, x_tr, y_tr, epochs=50, batchsize=32,\n            learning_rate=0.01, verbose=None, device=None):\n        \n        if batchsize > len(x_tr):\n            batchsize = len(x_tr)\n        verbose, device = SETTINGS.get_default(('verbose', verbose),\n                                               ('device', device))\n        self.model = NCC_model()\n        opt = th.optim.Adam(self.model.parameters(), lr=learning_rate)\n        criterion = th.nn.BCEWithLogitsLoss()\n        y = y_tr.values if isinstance(y_tr, pd.DataFrame) else y_tr\n        y = th.Tensor(y)/2 + .5\n        \n        self.model = self.model.to(device)\n        y = y.to(device)\n        dataset = []\n        for i, (idx, row) in enumerate(x_tr.iterrows()):\n\n            a = row['A'].reshape((len(row['A']), 1))\n            b = row['B'].reshape((len(row['B']), 1))\n            m = np.hstack((a, b))\n            m = m.astype('float32')\n            m = th.from_numpy(m).t().unsqueeze(0)\n            dataset.append(m)\n        dataset = [m.to(device) for m in dataset]\n        acc = [0]\n        da = th.utils.data.DataLoader(Dataset(dataset, y), batch_size=batchsize,\n                                      shuffle=True)\n        data_per_epoch = (len(dataset) \n        with trange(epochs, desc=\"Epochs\", disable=not verbose) as te:\n            for epoch in te:\n                with trange(data_per_epoch, desc=\"Batches of {}\".format(batchsize),\n                            disable=not (verbose and batchsize == len(dataset))) as t:\n                    output = []\n                    labels = []\n                    for (batch, label), i in zip(da, t):\n                        opt.zero_grad()\n                        \n                        out = th.stack([self.model(m) for m in batch], 0).squeeze(2)\n                        loss = criterion(out, label)\n                        loss.backward()\n                        t.set_postfix(loss=loss.item())\n                        opt.step()\n                        output.append(out)\n                        labels.append(label)\n                    acc = th.where(th.cat(output, 0) > .5,\n                                   th.ones(len(output)),\n                                   th.zeros(len(output))) - th.cat(labels, 0)\n                    te.set_postfix(Acc=1-acc.abs().mean().item())", "docstring": "Fit the NCC model.\n\nArgs:\nx_tr (pd.DataFrame): CEPC format dataframe containing the pairs\ny_tr (pd.DataFrame or np.ndarray): labels associated to the pairs\nepochs (int): number of train epochs\nlearning_rate (float): learning rate of Adam\nverbose (bool): verbosity (defaults to ``cdt.SETTINGS.verbose``)\ndevice (str): cuda or cpu device (defaults to ``cdt.SETTINGS.default_device``)", "source": "juraj-google-style"}
{"code": "def put_many(self, type: Type[T], items: Iterable[T], context: PipelineContext=None) -> None:\n    pass", "docstring": "Puts multiple objects of the same type into the data sink.\n\nArgs:\ntype: The type of the objects being inserted.\nitems: The objects to be inserted.\ncontext: The context of the insertion (mutable).", "source": "codesearchnet"}
{"code": "def get(self, key=None, **kwargs):\n    clone = copy.deepcopy(self)\n    if self._start:\n        clone.adapter.set_params(start=self._start)\n    if self._rows:\n        clone.adapter.set_params(rows=self._rows)\n    if key:\n        (data, key) = clone.adapter.get(key)\n    elif kwargs:\n        (data, key) = clone.filter(**kwargs).adapter.get()\n    else:\n        (data, key) = clone.adapter.get()\n    if (clone._cfg['rtype'] == ReturnType.Object):\n        return (data, key)\n    return self._make_model(data, key)", "docstring": "Ensures that only one result is returned from DB and raises an exception otherwise.\nCan work in 3 different way.\n\n- If no argument is given, only does \"ensuring about one and only object\" job.\n- If key given as only argument, retrieves the object from DB.\n- if query filters given, implicitly calls filter() method.\n\nRaises:\nMultipleObjectsReturned: If there is more than one (1) record is returned.", "source": "codesearchnet"}
{"code": "def _compute_hparam_info_from_values(self, name, values):\n    \n    \n    \n    \n    \n    result = api_pb2.HParamInfo(name=name, type=api_pb2.DATA_TYPE_UNSET)\n    distinct_values = set(\n        _protobuf_value_to_string(v) for v in values if _protobuf_value_type(v))\n    for v in values:\n      v_type = _protobuf_value_type(v)\n      if not v_type:\n        continue\n      if result.type == api_pb2.DATA_TYPE_UNSET:\n        result.type = v_type\n      elif result.type != v_type:\n        result.type = api_pb2.DATA_TYPE_STRING\n      if result.type == api_pb2.DATA_TYPE_STRING:\n        \n        break\n\n    \n    if result.type == api_pb2.DATA_TYPE_UNSET:\n      return None\n\n    \n    \n    if (result.type == api_pb2.DATA_TYPE_STRING\n        and len(distinct_values) <= self._max_domain_discrete_len):\n      result.domain_discrete.extend(distinct_values)\n\n    return result", "docstring": "Builds an HParamInfo message from the hparam name and list of values.\n\nArgs:\nname: string. The hparam name.\nvalues: list of google.protobuf.Value messages. The list of values for the\nhparam.\n\nReturns:\nAn api_pb2.HParamInfo message.", "source": "juraj-google-style"}
{"code": "def underlying_variable_ref(t):\n  \n  while t.op.type in [\"Identity\", \"ReadVariableOp\", \"Enter\"]:\n    t = t.op.inputs[0]\n\n  op_type = t.op.type\n  if \"Variable\" in op_type or \"VarHandle\" in op_type:\n    return t\n  else:\n    return None", "docstring": "Find the underlying variable ref.\n\nTraverses through Identity, ReadVariableOp, and Enter ops.\nStops when op type has Variable or VarHandle in name.\n\nArgs:\nt: a Tensor\n\nReturns:\na Tensor that is a variable ref, or None on error.", "source": "juraj-google-style"}
{"code": "def is_commutable(expr1, expr2, eps=0.00000001):\n    \n    return sum((x * x.conjugate()).real for x in commutator(expr1, expr2).coeffs()) < eps", "docstring": "Test whether expr1 and expr2 are commutable.\n\nArgs:\nexpr1 (Expr, Term or Pauli operator): Pauli's expression.\nexpr2 (Expr, Term or Pauli operator): Pauli's expression.\neps (float, optional): Machine epsilon.\nIf |[expr1, expr2]| < eps, consider it is commutable.\n\nReturns:\nbool: if expr1 and expr2 are commutable, returns True, otherwise False.", "source": "juraj-google-style"}
{"code": "def get_last_checkpoint(rundir='runinfo'):\n    if (not os.path.isdir(rundir)):\n        return []\n    dirs = sorted(os.listdir(rundir))\n    if (len(dirs) == 0):\n        return []\n    last_runid = dirs[(- 1)]\n    last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid))\n    if (not os.path.isdir(last_checkpoint)):\n        return []\n    return [last_checkpoint]", "docstring": "Find the checkpoint from the last run, if one exists.\n\nNote that checkpoints are incremental, and this helper will not find\nprevious checkpoints from earlier than the most recent run. It probably\nshould be made to do so.\n\nKwargs:\n- rundir(str) : Path to the runinfo directory\n\nReturns:\n- a list suitable for checkpointFiles parameter of DataFlowKernel\nconstructor, with 0 or 1 elements", "source": "codesearchnet"}
{"code": "def max(self, value):\n        \n        if value == self._defaults['max'] and 'max' in self._values:\n            del self._values['max']\n        else:\n            self._values['max'] = value", "docstring": "The max property.\n\nArgs:\nvalue (float). the property value.", "source": "juraj-google-style"}
{"code": "def DecoderLayer(feature_depth,\n                 feedforward_depth,\n                 num_heads,\n                 dropout,\n                 mode):\n  \n  return layers.Serial(\n      layers.Residual(  \n          layers.LayerNorm(),\n          layers.Branch(),\n          layers.Parallel(layers.Identity(),  \n                          layers.CausalMask(axis=-2)),  \n          layers.MultiHeadedAttention(feature_depth, num_heads=num_heads,\n                                      dropout=dropout, mode=mode),\n          layers.Dropout(rate=dropout, mode=mode)\n      ),\n      ResidualFeedForward(feature_depth, feedforward_depth, dropout, mode=mode)\n  )", "docstring": "Transformer decoder layer.\n\nArgs:\nfeature_depth: int:  depth of embedding\nfeedforward_depth: int: depth of feed-forward layer\nnum_heads: int: number of attention heads\ndropout: float: dropout rate (how much to drop out)\nmode: str: 'train' or 'eval'\n\nReturns:\nthe layer.", "source": "juraj-google-style"}
{"code": "def is_noncopyable(class_, already_visited_cls_vars=None):\n    \n    logger = utils.loggers.cxx_parser\n\n    class_decl = class_traits.get_declaration(class_)\n\n    true_header = \"is_noncopyable(TRUE) - %s - \" % class_.decl_string\n\n    if is_union(class_):\n        return False\n\n    if class_decl.is_abstract:\n        logger.debug(true_header + \"abstract client\")\n        return True\n\n    \n    \n    copy_ = find_copy_constructor(class_decl)\n    if copy_ and copy_.access_type == 'public' and not copy_.is_artificial:\n        return False\n\n    if already_visited_cls_vars is None:\n        already_visited_cls_vars = []\n\n    for base_desc in class_decl.recursive_bases:\n        assert isinstance(base_desc, class_declaration.hierarchy_info_t)\n\n        if base_desc.related_class.decl_string in \\\n                ('::boost::noncopyable', '::boost::noncopyable_::noncopyable'):\n            logger.debug(true_header + \"derives from boost::noncopyable\")\n            return True\n\n        if not has_copy_constructor(base_desc.related_class):\n\n            base_copy_ = find_copy_constructor(base_desc.related_class)\n\n            if base_copy_ and base_copy_.access_type == 'private':\n                logger.debug(\n                    true_header +\n                    \"there is private copy constructor\")\n                return True\n            elif __is_noncopyable_single(\n                    base_desc.related_class, already_visited_cls_vars):\n                logger.debug(\n                    true_header +\n                    \"__is_noncopyable_single returned True\")\n                return True\n\n        if __is_noncopyable_single(\n                base_desc.related_class, already_visited_cls_vars):\n            logger.debug(\n                true_header +\n                \"__is_noncopyable_single returned True\")\n            return True\n\n    if not has_copy_constructor(class_decl):\n        logger.debug(true_header + \"does not have trivial copy constructor\")\n        return True\n    elif not has_public_constructor(class_decl):\n        logger.debug(true_header + \"does not have a public constructor\")\n        return True\n    elif has_destructor(class_decl) and not has_public_destructor(class_decl):\n        logger.debug(true_header + \"has private destructor\")\n        return True\n\n    return __is_noncopyable_single(class_decl, already_visited_cls_vars)", "docstring": "Checks if class is non copyable\n\nArgs:\nclass_ (declarations.class_t): the class to be checked\nalready_visited_cls_vars (list): optional list of vars that should not\nbe checked a second time, to prevent infinite recursions.\nIn general you can ignore this argument, it is mainly used during\nrecursive calls of is_noncopyable() done by pygccxml.\n\nReturns:\nbool: if the class is non copyable", "source": "juraj-google-style"}
{"code": "def google_api_append(schema, values, rows):\n    for row in rows:\n        for s in schema:\n            row[s['name']] = values[s['name']]\n        yield row", "docstring": "Append columns to the rows containing the kwargs used to call the API.\nArgs:\nschema (dict): name of the key to use for the api arguments\nvalues (dict): the kwargs used to call the API\nrows (list): a list of rows to add the prefix to each one\n\nReturns (list):\nA generator containing the rows", "source": "github-repos"}
{"code": "def count_resource_variables(model):\n    if not isinstance(model, schema_fb.ModelT):\n        model = convert_bytearray_to_object(model)\n    unique_shared_names = set()\n    for subgraph in model.subgraphs:\n        if subgraph.operators is None:\n            continue\n        for op in subgraph.operators:\n            builtin_code = schema_util.get_builtin_code_from_operator_code(model.operatorCodes[op.opcodeIndex])\n            if builtin_code == schema_fb.BuiltinOperator.VAR_HANDLE:\n                unique_shared_names.add(op.builtinOptions.sharedName)\n    return len(unique_shared_names)", "docstring": "Calculates the number of unique resource variables in a model.\n\nArgs:\nmodel: the input tflite model, either as bytearray or object.\n\nReturns:\nAn integer number representing the number of unique resource variables.", "source": "github-repos"}
{"code": "def bulkDetails(self, packageNames):\n    params = {'au': '1'}\n    req = googleplay_pb2.BulkDetailsRequest()\n    req.docid.extend(packageNames)\n    data = req.SerializeToString()\n    message = self.executeRequestApi2(BULK_URL, post_data=data.decode('utf-8'), content_type=CONTENT_TYPE_PROTO, params=params)\n    response = message.payload.bulkDetailsResponse\n    return [(None if (not utils.hasDoc(entry)) else utils.parseProtobufObj(entry.doc)) for entry in response.entry]", "docstring": "Get several apps details from a list of package names.\n\nThis is much more efficient than calling N times details() since it\nrequires only one request. If an item is not found it returns an empty object\ninstead of throwing a RequestError('Item not found') like the details() function\n\nArgs:\npackageNames (list): a list of app IDs (usually starting with 'com.').\n\nReturns:\na list of dictionaries containing docv2 data, or None\nif the app doesn't exist", "source": "codesearchnet"}
{"code": "def ParseFileEntry(self, parser_mediator, file_entry):\n    filename = parser_mediator.GetFilename()\n    database = SQLiteDatabase(filename, temporary_directory=parser_mediator.temporary_directory)\n    file_object = file_entry.GetFileObject()\n    try:\n        database.Open(file_object)\n    except (IOError, ValueError, sqlite3.DatabaseError) as exception:\n        parser_mediator.ProduceExtractionWarning('unable to open SQLite database with error: {0!s}'.format(exception))\n        file_object.close()\n        return\n    (database_wal, wal_file_entry) = self._OpenDatabaseWithWAL(parser_mediator, file_entry, file_object, filename)\n    file_object.close()\n    cache = SQLiteCache()\n    try:\n        table_names = frozenset(database.tables)\n        for plugin in self._plugins:\n            if (not plugin.REQUIRED_TABLES.issubset(table_names)):\n                continue\n            schema_match = plugin.CheckSchema(database)\n            if (plugin.REQUIRES_SCHEMA_MATCH and (not schema_match)):\n                parser_mediator.ProduceExtractionWarning('plugin: {0:s} found required tables but not a matching schema'.format(plugin.NAME))\n                continue\n            parser_mediator.SetFileEntry(file_entry)\n            parser_mediator.AddEventAttribute('schema_match', schema_match)\n            try:\n                plugin.UpdateChainAndProcess(parser_mediator, cache=cache, database=database, database_wal=database_wal, wal_file_entry=wal_file_entry)\n            except Exception as exception:\n                parser_mediator.ProduceExtractionWarning('plugin: {0:s} unable to parse SQLite database with error: {1!s}'.format(plugin.NAME, exception))\n            finally:\n                parser_mediator.RemoveEventAttribute('schema_match')\n            if (not database_wal):\n                continue\n            schema_match = plugin.CheckSchema(database)\n            parser_mediator.SetFileEntry(wal_file_entry)\n            parser_mediator.AddEventAttribute('schema_match', schema_match)\n            try:\n                plugin.UpdateChainAndProcess(parser_mediator, cache=cache, database=database, database_wal=database_wal, wal_file_entry=wal_file_entry)\n            except Exception as exception:\n                parser_mediator.ProduceExtractionWarning('plugin: {0:s} unable to parse SQLite database and WAL with error: {1!s}'.format(plugin.NAME, exception))\n            finally:\n                parser_mediator.RemoveEventAttribute('schema_match')\n    finally:\n        database.Close()", "docstring": "Parses a SQLite database file entry.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nfile_entry (dfvfs.FileEntry): file entry to be parsed.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def ExtractCredentialsFromPathSpec(self, path_spec):\n    \n    credentials = manager.CredentialsManager.GetCredentials(path_spec)\n    for identifier in credentials.CREDENTIALS:\n      value = getattr(path_spec, identifier, None)\n      if value is None:\n        continue\n\n      self.SetCredential(path_spec, identifier, value)", "docstring": "Extracts credentials from a path specification.\n\nArgs:\npath_spec (PathSpec): path specification to extract credentials from.", "source": "juraj-google-style"}
{"code": "def filter_line(self, line):\n    filtered = []\n    warn_msg = []\n    splited = line.split('\\n')\n    if not line and len(splited) < 1:\n        warn_msg = '[Warning] Empty line detected while filtering lines.'\n        logging.warning(warn_msg)\n        self.warning_msg.append(warn_msg)\n    if splited[0] == '[':\n        filtered = splited[1:]\n    elif '[' in splited[0]:\n        splited = splited[0].replace('[', '')\n        filtered = splited\n    else:\n        warn_msg = '[Warning] Format error. `[` could be missing in '\n        warn_msg += 'the config (.ini) file. (line = %s)' % str(line)\n        logging.warning(warn_msg)\n        self.warning_msg.append(warn_msg)\n    if filtered[-1] == ']':\n        filtered = filtered[:-1]\n    elif ']' in filtered[-1]:\n        filtered[-1] = filtered[-1].replace(']', '')\n    else:\n        warn_msg = '[Warning] Format error. `]` could be missing in '\n        warn_msg += 'the config (.ini) file. (line = %s)' % str(line)\n        logging.warning(warn_msg)\n        self.warning_msg.append(warn_msg)\n    return filtered", "docstring": "Removes `[` or `]` from the input line.\n\nArgs:\nline: String that is a compatibility specification line from the `.ini`\nconfig file.\n\nReturns:\nString that is a compatibility specification line without `[` and `]`.", "source": "github-repos"}
{"code": "def clean_pdb(pdb_file, out_suffix='_clean', outdir=None, force_rerun=False, remove_atom_alt=True, keep_atom_alt_id='A', remove_atom_hydrogen=True, add_atom_occ=True, remove_res_hetero=True, keep_chemicals=None, keep_res_only=None, add_chain_id_if_empty='X', keep_chains=None):\n    outfile = ssbio.utils.outfile_maker(inname=pdb_file, append_to_name=out_suffix, outdir=outdir, outext='.pdb')\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n        my_pdb = StructureIO(pdb_file)\n        my_cleaner = CleanPDB(remove_atom_alt=remove_atom_alt, remove_atom_hydrogen=remove_atom_hydrogen, keep_atom_alt_id=keep_atom_alt_id, add_atom_occ=add_atom_occ, remove_res_hetero=remove_res_hetero, keep_res_only=keep_res_only, add_chain_id_if_empty=add_chain_id_if_empty, keep_chains=keep_chains, keep_chemicals=keep_chemicals)\n        my_clean_pdb = my_pdb.write_pdb(out_suffix=out_suffix, out_dir=outdir, custom_selection=my_cleaner, force_rerun=force_rerun)\n        return my_clean_pdb\n    else:\n        return outfile", "docstring": "Clean a PDB file.\n\nArgs:\npdb_file (str): Path to input PDB file\nout_suffix (str): Suffix to append to original filename\noutdir (str): Path to output directory\nforce_rerun (bool): If structure should be re-cleaned if a clean file exists already\nremove_atom_alt (bool): Remove alternate positions\nkeep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep\nremove_atom_hydrogen (bool): Remove hydrogen atoms\nadd_atom_occ (bool): Add atom occupancy fields if not present\nremove_res_hetero (bool): Remove all HETATMs\nkeep_chemicals (str, list): If removing HETATMs, keep specified chemical names\nkeep_res_only (str, list): Keep ONLY specified resnames, deletes everything else!\nadd_chain_id_if_empty (str): Add a chain ID if not present\nkeep_chains (str, list): Keep only these chains\n\nReturns:\nstr: Path to cleaned PDB file", "source": "codesearchnet"}
{"code": "def vec_to_surface(vec):\n    miller = ([None] * 3)\n    index = []\n    for (i, value) in enumerate(vec):\n        if (abs(value) < 1e-08):\n            miller[i] = 0\n        else:\n            index.append(i)\n    if (len(index) == 1):\n        miller[index[0]] = 1\n    else:\n        min_index = np.argmin([i for i in vec if (i != 0)])\n        true_index = index[min_index]\n        index.pop(min_index)\n        frac = []\n        for (i, value) in enumerate(index):\n            frac.append(Fraction((vec[value] / vec[true_index])).limit_denominator(100))\n        if (len(index) == 1):\n            miller[true_index] = frac[0].denominator\n            miller[index[0]] = frac[0].numerator\n        else:\n            com_lcm = lcm(frac[0].denominator, frac[1].denominator)\n            miller[true_index] = com_lcm\n            miller[index[0]] = (frac[0].numerator * int(round((com_lcm / frac[0].denominator))))\n            miller[index[1]] = (frac[1].numerator * int(round((com_lcm / frac[1].denominator))))\n    return miller", "docstring": "Transform a float vector to a surface miller index with integers.\n\nArgs:\nvec (1 by 3 array float vector): input float vector\nReturn:\nthe surface miller index of the input vector.", "source": "codesearchnet"}
{"code": "def flatten(weights, start=0, stop=2):\n    \n    for key, val in weights.items():\n        new_shape = val.shape[0:start] + (-1, ) + val.shape[stop:]\n        weights[key] = val.reshape(new_shape)\n    return weights", "docstring": "This methods reshapes all values in a dictionary.\n\nThe indices from start to stop will be flattened into a single index.\n\nArgs:\nweights: A dictionary mapping keys to numpy arrays.\nstart: The starting index.\nstop: The ending index.", "source": "juraj-google-style"}
{"code": "def compile_sgf(in_path, optimize=True, model=None):\n    if (model is None):\n        model = DeviceModel()\n    parser = SensorGraphFileParser()\n    parser.parse_file(in_path)\n    parser.compile(model)\n    if optimize:\n        opt = SensorGraphOptimizer()\n        opt.optimize(parser.sensor_graph, model=model)\n    return parser.sensor_graph", "docstring": "Compile and optionally optimize an SGF file.\n\nArgs:\nin_path (str): The input path to the sgf file to compile.\noptimize (bool): Whether to optimize the compiled result,\ndefaults to True if not passed.\nmodel (DeviceModel): Optional device model if we are\ncompiling for a nonstandard device.  Normally you should\nleave this blank.\n\nReturns:\nSensorGraph: The compiled sensorgraph object", "source": "codesearchnet"}
{"code": "def validate_tag(self, key, value):\n        \n        if key == 'owner':\n            return validate_email(value, self.partial_owner_match)\n        elif key == self.gdpr_tag:\n            return value in self.gdpr_tag_values\n        else:\n            return True", "docstring": "Check whether a tag value is valid\n\nArgs:\nkey: A tag key\nvalue: A tag value\n\nReturns:\n`(True or False)`\nA boolean indicating whether or not the value is valid", "source": "juraj-google-style"}
{"code": "def handle_config_change(self, new_config):\n        \n        if self.user_handler:\n            self.user_handler(self.current_config, new_config)\n        self._call_spec_handlers(new_config)\n        self.current_config = copy.deepcopy(new_config)", "docstring": "Handle the new configuration.\n\nArgs:\nnew_config (dict): The new configuration", "source": "juraj-google-style"}
{"code": "def tokenize(self, text: str, pair: Optional[str]=None, add_special_tokens: bool=False, **kwargs) -> List[str]:\n    raise NotImplementedError", "docstring": "Converts a string into a sequence of tokens, replacing unknown tokens with the `unk_token`.\n\nArgs:\ntext (`str`):\nThe sequence to be encoded.\npair (`str`, *optional*):\nA second sequence to be encoded with the first.\nadd_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not to add the special tokens associated with the corresponding model.\nkwargs (additional keyword arguments, *optional*):\nWill be passed to the underlying model specific encode method. See details in\n[`~PreTrainedTokenizerBase.__call__`]\n\nReturns:\n`List[str]`: The list of tokens.", "source": "github-repos"}
{"code": "def get_bond_length(sp1, sp2, bond_order=1):\n    sp1 = (Element(sp1) if isinstance(sp1, str) else sp1)\n    sp2 = (Element(sp2) if isinstance(sp2, str) else sp2)\n    try:\n        all_lengths = obtain_all_bond_lengths(sp1, sp2)\n        return all_lengths[bond_order]\n    except (ValueError, KeyError):\n        warnings.warn(('No order %d bond lengths between %s and %s found in database. Returning sum of atomic radius.' % (bond_order, sp1, sp2)))\n        return (sp1.atomic_radius + sp2.atomic_radius)", "docstring": "Get the bond length between two species.\n\nArgs:\nsp1 (Specie): First specie.\nsp2 (Specie): Second specie.\nbond_order: For species with different possible bond orders,\nthis allows one to obtain the bond length for a particular bond\norder. For example, to get the C=C bond length instead of the\nC-C bond length, this should be set to 2. Defaults to 1.\n\nReturns:\nBond length in Angstrom. If no data is available, the sum of the atomic\nradius is used.", "source": "codesearchnet"}
{"code": "def Serialize(self, writer: BinaryWriter):\n        \n        byt = None\n        if self.Type == StateType.Account:\n            byt = b'\\x40'\n        elif self.Type == StateType.Validator:\n            byt = b'\\x48'\n        writer.WriteByte(byt)\n        writer.WriteVarBytes(self.Key)\n        writer.WriteVarString(self.Field)\n        writer.WriteVarBytes(self.Value)", "docstring": "Serialize full object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def handle_partial_sample_weights(outputs, sample_weights, sample_weight_modes, check_all_flat=False):\n    any_sample_weight = sample_weights is not None and any((w is not None for w in sample_weights))\n    partial_sample_weight = any_sample_weight and any((w is None for w in sample_weights))\n    if not any_sample_weight:\n        return (None, any_sample_weight, partial_sample_weight)\n    if not partial_sample_weight:\n        return (sample_weights, any_sample_weight, partial_sample_weight)\n    if check_all_flat:\n        nest.assert_same_structure(list_to_tuple(sample_weights), list_to_tuple(nest.flatten(sample_weights)))\n        nest.assert_same_structure(list_to_tuple(outputs), list_to_tuple(nest.flatten(outputs)))\n        if sample_weight_modes is not None:\n            nest.assert_same_structure(sample_weight_modes, nest.flatten(sample_weight_modes))\n    new_sample_weights = []\n    for i, sw in enumerate(sample_weights):\n        if sw is None:\n            as_numpy = isinstance(outputs[i], np.ndarray)\n            output = outputs[i]\n            output_shape = output.shape if as_numpy else array_ops.shape(output)\n            is_temporal = sample_weight_modes is not None and sample_weight_modes[i] == 'temporal'\n            sw_shape = (output_shape[0], output_shape[1]) if is_temporal else (output_shape[0],)\n            new_sample_weights.append(np.ones(sw_shape) if as_numpy else array_ops.ones(sw_shape))\n        else:\n            new_sample_weights.append(sw)\n    return (list_to_tuple(new_sample_weights), any_sample_weight, partial_sample_weight)", "docstring": "Adds 1.0 as sample weights for the outputs for which there is no weight.\n\nArgs:\noutputs: List of model outputs.\nsample_weights: List of sample weight inputs.\nsample_weight_modes: List of sample weight modes or None.\ncheck_all_flat: Ensure that inputs are not nested structures. This is not\na free check, so we may not want to run it eagerly every iteration.\n\nReturns:\nTuple of sample weights, one sample weight for every output, and booleans\ndescribing the raw sample weights.", "source": "github-repos"}
{"code": "def __init__(self, path, delimiter=b','):\n    \n    self._path = path\n    self._delimiter = delimiter", "docstring": "Initializes an instance of a Csv instance.\nArgs:\npath: path of the Csv file.\ndelimiter: the separator used to parse a Csv line.", "source": "juraj-google-style"}
{"code": "def get_metrics_namespace(self) -> str:\n    return 'BeamML_Sklearn'", "docstring": "Returns:\nA namespace for metrics collected by the RunInference transform.", "source": "github-repos"}
{"code": "def __init__(self, size):\n    \n    super(CircularBuffer, self).__init__()\n    self._index = 0\n    self._list = []\n    self._size = size", "docstring": "Initializes a circular buffer object.\n\nArgs:\nsize (int): number of elements in the buffer.", "source": "juraj-google-style"}
{"code": "def CheckGradConfigsToTest():\n\n    def Config(input_size, filter_size, out_size, stride=1, padding='SAME', dilations=None):\n        return (input_size, filter_size, out_size, stride, padding, dilations)\n    return [Config([2, 5, 8, 1], [4, 4, 1, 2], [2, 5, 8, 2]), Config([4, 5, 5, 1], [2, 2, 1, 2], [4, 2, 2, 2], 2, padding='VALID'), Config([2, 4, 4, 2], [3, 1, 2, 2], [2, 4, 4, 4]), Config([1, 15, 15, 2], [1, 3, 2, 1], [1, 15, 15, 2]), Config([2, 15, 16, 1], [3, 3, 1, 2], [2, 5, 5, 2], 3, padding='VALID'), Config([2, 5, 8, 1], [4, 3, 1, 2], [2, 5, 8, 2], dilations=[1, 2]), Config([1, 3, 1, 2], [2, 1, 2, 1], [1, 3, 1, 2]), Config([2, 2, 3, 2], [2, 1, 2, 1], [2, 2, 3, 2]), Config([2, 2, 3, 1], [2, 2, 1, 1], [2, 2, 3, 1])]", "docstring": "Iterator for different convolution shapes, strides and paddings.\n\ncompute_gradient_error() is very expensive. So the configs should be\nrelatively small.\n\nReturns:\nList of tuples (input_size, filter_size, out_size, stride, padding,\ndilations), the depthwise convolution parameters.", "source": "github-repos"}
{"code": "async def getPropNorm(self, prop, valu):\n    pobj = self.model.prop(prop)\n    if (pobj is None):\n        raise s_exc.NoSuchProp(mesg=f'The property {prop} does not exist.', prop=prop)\n    (norm, info) = pobj.type.norm(valu)\n    return (norm, info)", "docstring": "Get the normalized property value based on the Cortex data model.\n\nArgs:\nprop (str): The property to normalize.\nvalu: The value to normalize.\n\nReturns:\n(tuple): A two item tuple, containing the normed value and the info dictionary.\n\nRaises:\ns_exc.NoSuchProp: If the prop does not exist.\ns_exc.BadTypeValu: If the value fails to normalize.", "source": "codesearchnet"}
{"code": "def extract_jtl_string_pairs_from_text_file(results_dict, file_path):\n    result_pairs = re.findall(JTL_REGEX, open(file_path).read())\n    for (result_key, result_comment) in result_pairs:\n        results_dict[result_key] = result_comment\n    return results_dict", "docstring": "Extracts all string pairs matching the JTL pattern from given text file.\n\nThis can be used as an \"extract_func\" argument in the extract_string_pairs_in_directory method.\n\nArgs:\nresults_dict (dict): The dict to add the the string pairs to.\nfile_path (str): The path of the file from which to extract the string pairs.", "source": "codesearchnet"}
{"code": "def stations_listeners(stations):\n    \n    stations = stations if isinstance(stations, (list, tuple)) else [stations]\n\n    listeners = []\n    for sta in stations:\n\n        listeners.append(StationSignalListener(sta))\n        listeners.append(StationMaxListener(sta))\n        if sta.mask is not None:\n            listeners.append(StationMaskListener(sta))\n\n    return listeners", "docstring": "Function for creating listeners for a a list of station\n\nArgs:\nstations (iterable): List of TopocentricFrame\nReturn:\nlist of Listener", "source": "juraj-google-style"}
{"code": "def __init__(self, score, related_data=None):\n        \n        self.check_score(score)\n        if related_data is None:\n            related_data = {}\n        self.score, self.related_data = score, related_data\n        if isinstance(score, Exception):\n            \n            self.__class__ = ErrorScore\n        super(Score, self).__init__()", "docstring": "Abstract base class for scores.\n\nArgs:\nscore (int, float, bool): A raw value to wrap in a Score class.\nrelated_data (dict, optional): Artifacts to store with the score.", "source": "juraj-google-style"}
{"code": "def has_button(self, button):\n\t\t\n\n\t\trc = self._libinput.libinput_device_pointer_has_button(\n\t\t\tself._handle, button)\n\t\tassert rc >= 0, 'This device is not a pointer device'\n\t\treturn bool(rc)", "docstring": "Check if this device has a given button.\n\nArgs:\nbutton (int): Button to check for, see ``input.h`` for button\ndefinitions.\nReturns:\nbool: :obj:`True` if the device has this button, :obj:`False` if\nit does not.\nRaises:\nAssertionError", "source": "juraj-google-style"}
{"code": "def format_checksum(checksum_pyxb):\n    \n    return '{}/{}'.format(\n        checksum_pyxb.algorithm.upper().replace('-', ''), checksum_pyxb.value().lower()\n    )", "docstring": "Create string representation of a PyXB Checksum object.\n\nArgs:\nPyXB Checksum object\n\nReturns:\nstr : Combined hexadecimal value and algorithm name.", "source": "juraj-google-style"}
{"code": "def readToken(self):\n    if (not self.tokenFile):\n        raise SkypeAuthException('No token file specified')\n    try:\n        with open(self.tokenFile, 'r') as f:\n            lines = f.read().splitlines()\n    except OSError:\n        raise SkypeAuthException(\"Token file doesn't exist or not readable\")\n    try:\n        (user, skypeToken, skypeExpiry, regToken, regExpiry, msgsHost) = lines\n        skypeExpiry = datetime.fromtimestamp(int(skypeExpiry))\n        regExpiry = datetime.fromtimestamp(int(regExpiry))\n    except ValueError:\n        raise SkypeAuthException('Token file is malformed')\n    if (datetime.now() >= skypeExpiry):\n        raise SkypeAuthException('Token file has expired')\n    self.userId = user\n    self.tokens['skype'] = skypeToken\n    self.tokenExpiry['skype'] = skypeExpiry\n    if (datetime.now() < regExpiry):\n        self.tokens['reg'] = regToken\n        self.tokenExpiry['reg'] = regExpiry\n        self.msgsHost = msgsHost\n    else:\n        self.getRegToken()", "docstring": "Attempt to re-establish a connection using previously acquired tokens.\n\nIf the Skype token is valid but the registration token is invalid, a new endpoint will be registered.\n\nRaises:\n.SkypeAuthException: if the token file cannot be used to authenticate", "source": "codesearchnet"}
{"code": "def __init__(self, profile_datum_list, time_unit=cli_shared.TIME_UNIT_US):\n    self._profile_datum_list = profile_datum_list\n    self.formatted_start_time = [datum.start_time for datum in profile_datum_list]\n    self.formatted_op_time = [cli_shared.time_to_readable_str(datum.op_time, force_time_unit=time_unit) for datum in profile_datum_list]\n    self.formatted_exec_time = [cli_shared.time_to_readable_str(datum.node_exec_stats.all_end_rel_micros, force_time_unit=time_unit) for datum in profile_datum_list]\n    self._column_names = ['Node', 'Op Type', 'Start Time (us)', 'Op Time (%s)' % time_unit, 'Exec Time (%s)' % time_unit, 'Filename:Lineno(function)']\n    self._column_sort_ids = [SORT_OPS_BY_OP_NAME, SORT_OPS_BY_OP_TYPE, SORT_OPS_BY_START_TIME, SORT_OPS_BY_OP_TIME, SORT_OPS_BY_EXEC_TIME, SORT_OPS_BY_LINE]", "docstring": "Constructor.\n\nArgs:\nprofile_datum_list: List of `ProfileDatum` objects.\ntime_unit: must be in cli_shared.TIME_UNITS.", "source": "github-repos"}
{"code": "def __get_all_scrapers(self):\n    modules_strings = self.__get_all_scrapers_modules()\n    modules = []\n    for module_string in modules_strings:\n        module = importlib.import_module(('nyawc.scrapers.' + module_string))\n        modules.append(getattr(module, module_string))\n    return modules", "docstring": "Find all available scraper references.\n\nReturns:\nlist(obj): The scraper references.", "source": "codesearchnet"}
{"code": "def swo_supported_speeds(self, cpu_speed, num_speeds=3):\n        \n        buf_size = num_speeds\n        buf = (ctypes.c_uint32 * buf_size)()\n        res = self._dll.JLINKARM_SWO_GetCompatibleSpeeds(cpu_speed, 0, buf, buf_size)\n        if res < 0:\n            raise errors.JLinkException(res)\n\n        return list(buf)[:res]", "docstring": "Retrives a list of SWO speeds supported by both the target and the\nconnected J-Link.\n\nThe supported speeds are returned in order from highest to lowest.\n\nArgs:\nself (JLink): the ``JLink`` instance\ncpu_speed (int): the target's CPU speed in Hz\nnum_speeds (int): the number of compatible speeds to return\n\nReturns:\nA list of compatible SWO speeds in Hz in order from highest to lowest.", "source": "juraj-google-style"}
{"code": "def _get_next_date_from_partial_date(partial_date):\n    \n    relativedelta_arg = 'years'\n\n    if partial_date.month:\n        relativedelta_arg = 'months'\n    if partial_date.day:\n        relativedelta_arg = 'days'\n\n    next_date = parse(partial_date.dumps()) + relativedelta(**{relativedelta_arg: 1})\n    return PartialDate.from_parts(\n        next_date.year,\n        next_date.month if partial_date.month else None,\n        next_date.day if partial_date.day else None\n    )", "docstring": "Calculates the next date from the given partial date.\n\nArgs:\npartial_date (inspire_utils.date.PartialDate): The partial date whose next date should be calculated.\n\nReturns:\nPartialDate: The next date from the given partial date.", "source": "juraj-google-style"}
{"code": "def has_resource(self, feature_column, resource_name):\n    return resource_name in self._cols_to_resources_map[feature_column]", "docstring": "Returns true iff a resource with same name exists.\n\nResources can be things such as tables, variables, trackables, etc.\n\nArgs:\nfeature_column: A `FeatureColumn` object this variable corresponds to.\nresource_name: Name of the resource.", "source": "github-repos"}
{"code": "def WrapTypeDeclUnit(name, items):\n    functions = {}\n    classes = {}\n    constants = collections.defaultdict(TypeBuilder)\n    aliases = {}\n    typevars = {}\n    for item in items:\n        if isinstance(item, pytd.Function):\n            if item.name in functions:\n                if item.kind != functions[item.name].kind:\n                    raise ValueError(f\"Can't combine {item.kind} and {functions[item.name].kind}\")\n                functions[item.name] = pytd.Function(item.name, functions[item.name].signatures + item.signatures, item.kind)\n            else:\n                functions[item.name] = item\n        elif isinstance(item, pytd.Class):\n            if item.name in classes:\n                raise NameError(f'Duplicate top level class: {item.name!r}')\n            classes[item.name] = item\n        elif isinstance(item, pytd.Constant):\n            constants[item.name].add_type(item.type)\n        elif isinstance(item, pytd.Alias):\n            if item.name in aliases:\n                raise NameError(f'Duplicate top level alias or import: {item.name!r}')\n            aliases[item.name] = item\n        elif isinstance(item, pytd.TypeParameter):\n            if item.name in typevars:\n                raise NameError(f'Duplicate top level type parameter: {item.name!r}')\n            typevars[item.name] = item\n        else:\n            raise ValueError(f'Invalid top level pytd item: {type(item)!r}')\n    categories = {'function': functions, 'class': classes, 'constant': constants, 'alias': aliases, 'typevar': typevars}\n    for c1, c2 in itertools.combinations(categories, 2):\n        _check_intersection(categories[c1], categories[c2], c1, c2)\n    return pytd.TypeDeclUnit(name=name, constants=tuple((pytd.Constant(name, t.build()) for name, t in sorted(constants.items()))), type_params=tuple(typevars.values()), classes=tuple(classes.values()), functions=tuple(functions.values()), aliases=tuple(aliases.values()))", "docstring": "Given a list (classes, functions, etc.), wrap a pytd around them.\n\nArgs:\nname: The name attribute of the resulting TypeDeclUnit.\nitems: A list of items. Can contain pytd.Class, pytd.Function and\npytd.Constant.\n\nReturns:\nA pytd.TypeDeclUnit.\nRaises:\nValueError: In case of an invalid item in the list.\nNameError: For name conflicts.", "source": "github-repos"}
{"code": "def create_latest_log_alias(actual_path, alias):\n    alias_path = os.path.join(os.path.dirname(actual_path), alias)\n    utils.create_alias(actual_path, alias_path)", "docstring": "Creates a symlink to the latest test run logs.\n\nArgs:\nactual_path: string, the source directory where the latest test run's\nlogs are.\nalias: string, the name of the directory to contain the latest log\nfiles.", "source": "github-repos"}
{"code": "def eval_from_json(json):\n        \n        close  = json[-1]['close'] \n        low    = min(poloniex.get_attribute(json, 'low')) \n        high   = max(poloniex.get_attribute(json, 'high')) \n        return SO.eval_algorithm(close, low, high)", "docstring": "Evaluates SO from JSON (typically Poloniex API response)\n\nArgs:\njson: List of dates where each entry is a dict of raw market data.\n\nReturns:\nFloat SO between 0 and 100.", "source": "juraj-google-style"}
{"code": "def get_open_file(self, file_des):\n    if (not is_int_type(file_des)):\n        raise TypeError('an integer is required')\n    if ((file_des >= len(self.open_files)) or (self.open_files[file_des] is None)):\n        self.raise_os_error(errno.EBADF, str(file_des))\n    return self.open_files[file_des][0]", "docstring": "Return an open file.\n\nArgs:\nfile_des: File descriptor of the open file.\n\nRaises:\nOSError: an invalid file descriptor.\nTypeError: filedes is not an integer.\n\nReturns:\nOpen file object.", "source": "codesearchnet"}
{"code": "def when_matches(self, path, good_value, bad_values=None, timeout=None, event_timeout=None):\n    future = self.when_matches_async(path, good_value, bad_values)\n    self.wait_all_futures(future, timeout=timeout, event_timeout=event_timeout)", "docstring": "Resolve when an path value equals value\n\nArgs:\npath (list): The path to wait to\ngood_value (object): the value to wait for\nbad_values (list): values to raise an error on\ntimeout (float): time in seconds to wait for responses, wait\nforever if None\nevent_timeout: maximum time in seconds to wait between each response\nevent, wait forever if None", "source": "codesearchnet"}
{"code": "def overlay(array1, array2, alpha=0.5):\n    if ((alpha < 0.0) or (alpha > 1.0)):\n        raise ValueError('`alpha` needs to be between [0, 1]')\n    if (array1.shape != array2.shape):\n        raise ValueError('`array1` and `array2` must have the same shapes')\n    return ((array1 * alpha) + (array2 * (1.0 - alpha))).astype(array1.dtype)", "docstring": "Overlays `array1` onto `array2` with `alpha` blending.\n\nArgs:\narray1: The first numpy array.\narray2: The second numpy array.\nalpha: The alpha value of `array1` as overlayed onto `array2`. This value needs to be between [0, 1],\nwith 0 being `array2` only to 1 being `array1` only (Default value = 0.5).\n\nReturns:\nThe `array1`, overlayed with `array2` using `alpha` blending.", "source": "codesearchnet"}
{"code": "def random_masking(inputs: torch.Tensor, mask_ratio: float, unmasked_channel_indices: Optional[list]=None, channel_consistent_masking: bool=False, mask_value: int=0):\n    if mask_ratio < 0 or mask_ratio >= 1:\n        raise ValueError(f'Mask ratio {mask_ratio} has to be between 0 and 1.')\n    batch_size, num_channels, sequence_length, num_features = inputs.shape\n    device = inputs.device\n    len_keep = int(sequence_length * (1 - mask_ratio))\n    if channel_consistent_masking:\n        noise = torch.rand(batch_size, 1, sequence_length, device=device)\n        noise = noise.repeat(1, num_channels, 1)\n    else:\n        noise = torch.rand(batch_size, num_channels, sequence_length, device=device)\n    mask = torch.ones(batch_size, num_channels, sequence_length, device=device)\n    mask[:, :, :len_keep] = 0\n    ids_shuffle = torch.argsort(noise, dim=-1)\n    ids_restore = torch.argsort(ids_shuffle, dim=-1)\n    mask = torch.gather(mask, dim=-1, index=ids_restore)\n    mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features)\n    if unmasked_channel_indices is not None:\n        mask[:, unmasked_channel_indices, :, :] = 0\n    inputs_mask = inputs.masked_fill(mask.bool(), mask_value)\n    return (inputs_mask, mask[..., 0])", "docstring": "random_masking: Mask the input considering the control variables.\n\nArgs:\ninputs (`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, num_features)`):\nThe input tensor to mask.\nmask_ratio (`float`):\nMasking ratio applied to mask the input data during random pretraining. It is the number between 0 and 1.\nunmasked_channel_indices (list, *optional*):\nIndices of channels that will not be masked.\nchannel_consistent_masking (bool, *optional*, defaults to `False`):\nWhen true, masking will be same across all channels of a timeseries. Otherwise, masking positions will vary\nacross channels.\nmask_value (int, *optional*, defaults to 0):\nDefine the value of masked patches for pretraining.\n\nReturns:\n`tuple(torch.Tensor)`: inputs_mask, masked input, same shape as input Tensor and mask tensor of shape [bs x c x\nn]", "source": "github-repos"}
{"code": "def set_mode(path, mode):\n    \n    func_name = '{0}.set_mode'.format(__virtualname__)\n    if __opts__.get('fun', '') == func_name:\n        log.info('The function %s should not be used on Windows systems; '\n                 'see function docs for details. The value returned is '\n                 'always None. Use set_perms instead.', func_name)\n\n    return get_mode(path)", "docstring": "Set the mode of a file\n\nThis just calls get_mode, which returns None because we don't use mode on\nWindows\n\nArgs:\npath: The path to the file or directory\nmode: The mode (not used)\n\nReturns:\nNone\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' file.set_mode /etc/passwd 0644", "source": "juraj-google-style"}
{"code": "def GetDecompressor(cls, compression_method):\n    \n    compression_method = compression_method.lower()\n    decompressor = cls._decompressors.get(compression_method, None)\n    if not decompressor:\n      return None\n\n    return decompressor()", "docstring": "Retrieves the decompressor object for a specific compression method.\n\nArgs:\ncompression_method (str): compression method identifier.\n\nReturns:\nDecompressor: decompressor or None if the compression method does\nnot exists.", "source": "juraj-google-style"}
{"code": "def _render_objects(self, items, attributes=None, datatype='object'):\n    \n    if not items:\n      return\n\n    if datatype == 'chartdata':\n      if not attributes:\n        attributes = [items['cols'][i]['label'] for i in range(0, len(items['cols']))]\n      items = items['rows']\n      indices = {attributes[i]: i for i in range(0, len(attributes))}\n\n    num_segments = len(self._segments)\n    self._segments.append('<table>')\n\n    first = True\n    for o in items:\n      if first:\n        first = False\n        if datatype == 'dict' and not attributes:\n          attributes = list(o.keys())\n\n        if attributes is not None:\n          self._segments.append('<tr>')\n          for attr in attributes:\n            self._segments.append('<th>%s</th>' % attr)\n          self._segments.append('</tr>')\n\n      self._segments.append('<tr>')\n      if attributes is None:\n        self._segments.append('<td>%s</td>' % HtmlBuilder._format(o))\n      else:\n        for attr in attributes:\n          if datatype == 'dict':\n            self._segments.append('<td>%s</td>' % HtmlBuilder._format(o.get(attr, None), nbsp=True))\n          elif datatype == 'chartdata':\n            self._segments.append('<td>%s</td>' % HtmlBuilder._format(o['c'][indices[attr]]['v'],\n                                                                      nbsp=True))\n          else:\n            self._segments.append('<td>%s</td>' % HtmlBuilder._format(o.__getattribute__(attr),\n                                                                      nbsp=True))\n\n      self._segments.append('</tr>')\n\n    self._segments.append('</table>')\n    if first:\n      \n      self._segments = self._segments[:num_segments]", "docstring": "Renders an HTML table with the specified list of objects.\n\nArgs:\nitems: the iterable collection of objects to render.\nattributes: the optional list of properties or keys to render.\ndatatype: the type of data; one of 'object' for Python objects, 'dict' for a list\nof dictionaries, or 'chartdata' for Google chart data.", "source": "juraj-google-style"}
{"code": "def _are_scopes_sufficient(authorized_scopes, sufficient_scopes):\n    for sufficient_scope_set in sufficient_scopes:\n        if sufficient_scope_set.issubset(authorized_scopes):\n            return True\n    return False", "docstring": "Check if a list of authorized scopes satisfies any set of sufficient scopes.\n\nArgs:\nauthorized_scopes: a list of strings, return value from oauth.get_authorized_scopes\nsufficient_scopes: a set of sets of strings, return value from _process_scopes", "source": "codesearchnet"}
{"code": "def get_dilation_rates(hparams, width):\n    allowed_dilations = [([1] * 5)]\n    apply_dilations = hparams.get('latent_apply_dilations', False)\n    dilation_rates = hparams.get('latent_dilation_rates', [1, 3])\n    if apply_dilations:\n        for rate in dilation_rates:\n            filter_size = (3 + (2 * rate))\n            if (filter_size <= width):\n                curr_dilation = [1, 1, (rate + 1), (rate + 1), 1]\n                allowed_dilations.append(curr_dilation)\n    return allowed_dilations", "docstring": "Get a list of valid dilation rates.\n\nArgs:\nhparams: HParams.\nwidth: spatial dimension. Ensures that the effective filter size is\nnot larger than the spatial dimension.\nReturns:\nallowed_dilations: A list of dilation rates.", "source": "codesearchnet"}
{"code": "def create_latest_log_alias(actual_path):\n    \n    alias_path = os.path.join(os.path.dirname(actual_path), 'latest')\n    utils.create_alias(actual_path, alias_path)", "docstring": "Creates a symlink to the latest test run logs.\n\nArgs:\nactual_path: The source directory where the latest test run's logs are.", "source": "juraj-google-style"}
{"code": "def save_vocabulary(self, save_directory, filename_prefix: Optional[str]=None) -> Tuple[str]:\n    if not os.path.isdir(save_directory):\n        logger.error(f'Vocabulary path ({save_directory}) should be a directory')\n        return\n    out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])\n    if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):\n        copyfile(self.vocab_file, out_vocab_file)\n    elif not os.path.isfile(self.vocab_file):\n        with open(out_vocab_file, 'wb') as fi:\n            content_spiece_model = self.sp_model.serialized_model_proto()\n            fi.write(content_spiece_model)\n    return (out_vocab_file,)", "docstring": "Save the vocabulary and special tokens file to a directory.\n\nArgs:\nsave_directory (`str`):\nThe directory in which to save the vocabulary.\n\nReturns:\n`Tuple(str)`: Paths to the files saved.", "source": "github-repos"}
{"code": "def one_hot_class_label_loss(top_out, targets, model_hparams, vocab_size, weights_fn):\n    del model_hparams, vocab_size\n    loss_scale = tf.losses.softmax_cross_entropy(onehot_labels=targets, logits=top_out)\n    weights = weights_fn(targets)\n    loss_denom = tf.reduce_sum(weights)\n    return (loss_scale, loss_denom)", "docstring": "Apply softmax cross-entropy between outputs and targets.\n\nArgs:\ntop_out: logits Tensor with shape [batch, ?, ?, num_classes]\ntargets: one-hot encoding Tensor with shape [batch, ?, ?, num_classes]\nmodel_hparams: HParams, model hyperparmeters.\nvocab_size: int, vocabulary size.\nweights_fn:\n\nReturns:\nloss_scale (cross-entropy), loss_denom", "source": "codesearchnet"}
{"code": "def write_seq_as_temp_fasta(seq):\n    \n    sr = ssbio.protein.sequence.utils.cast_to_seq_record(seq, id='tempfasta')\n    return write_fasta_file(seq_records=sr, outname='temp', outdir=tempfile.gettempdir(), force_rerun=True)", "docstring": "Write a sequence as a temporary FASTA file\n\nArgs:\nseq (str, Seq, SeqRecord): Sequence string, Biopython Seq or SeqRecord object\n\nReturns:\nstr: Path to temporary FASTA file (located in system temporary files directory)", "source": "juraj-google-style"}
{"code": "def _get_time(header, keys, name):\n        \n        for key in keys:\n            try:\n                return _to_timestamp(header.pop(key))\n            except KeyError:\n                continue\n        raise _UnsupportedOperation(name)", "docstring": "Get time from header\n\nArgs:\nheader (dict): Object header.\nkeys (tuple of str): Header keys.\nname (str): Method name.\n\nReturns:\nfloat: The number of seconds since the epoch", "source": "juraj-google-style"}
{"code": "def _break_ties(self, Y_s, break_ties='random'):\n    (n, k) = Y_s.shape\n    Y_h = np.zeros(n)\n    diffs = np.abs((Y_s - Y_s.max(axis=1).reshape((- 1), 1)))\n    TOL = 1e-05\n    for i in range(n):\n        max_idxs = np.where((diffs[(i, :)] < TOL))[0]\n        if (len(max_idxs) == 1):\n            Y_h[i] = (max_idxs[0] + 1)\n        elif (break_ties == 'random'):\n            Y_h[i] = (np.random.choice(max_idxs) + 1)\n        elif (break_ties == 'abstain'):\n            Y_h[i] = 0\n        elif isinstance(break_ties, int):\n            Y_h[i] = break_ties\n        else:\n            ValueError(f'break_ties={break_ties} policy not recognized.')\n    return Y_h", "docstring": "Break ties in each row of a tensor according to the specified policy\n\nArgs:\nY_s: An [n, k] np.ndarray of probabilities\nbreak_ties: A tie-breaking policy:\n\"abstain\": return an abstain vote (0)\n\"random\": randomly choose among the tied options\nNOTE: if break_ties=\"random\", repeated runs may have\nslightly different results due to difference in broken ties\n[int]: ties will be broken by using this label", "source": "codesearchnet"}
{"code": "def MakePmfFromHist(hist, name=None):\n    \n    if name is None:\n        name = hist.name\n\n    \n    d = dict(hist.GetDict())\n    pmf = Pmf(d, name)\n    pmf.Normalize()\n    return pmf", "docstring": "Makes a normalized PMF from a Hist object.\n\nArgs:\nhist: Hist object\nname: string name\n\nReturns:\nPmf object", "source": "juraj-google-style"}
{"code": "def _GetISO8601String(self, structure):\n    \n    fraction_of_second_length = len(structure.fraction_of_second)\n    if fraction_of_second_length not in (3, 6, 7):\n      raise ValueError(\n          'unsupported time fraction of second length: {0:d}'.format(\n              fraction_of_second_length))\n\n    try:\n      fraction_of_second = int(structure.fraction_of_second, 10)\n    except (TypeError, ValueError) as exception:\n      raise ValueError(\n          'unable to determine fraction of second with error: {0!s}'.format(\n              exception))\n\n    \n    if fraction_of_second_length == 7:\n      fraction_of_second, _ = divmod(fraction_of_second, 10)\n\n    date_time_string = '{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}'.format(\n        structure.year, structure.month, structure.day, structure.hour,\n        structure.minute, structure.second)\n\n    if fraction_of_second_length > 0:\n      date_time_string = '{0:s}.{1:d}'.format(\n          date_time_string, fraction_of_second)\n\n    utc_offset_minutes = structure.get('utc_offset_minutes', None)\n    if utc_offset_minutes is not None:\n      try:\n        time_zone_offset = int(utc_offset_minutes[1:], 10)\n      except (IndexError, ValueError) as exception:\n        raise ValueError(\n            'Unable to parse time zone offset with error: {0!s}.'.format(\n                exception))\n\n      time_zone_hours, time_zone_minutes = divmod(time_zone_offset, 60)\n      date_time_string = '{0:s}{1:s}{2:02d}:{3:02d}'.format(\n          date_time_string, utc_offset_minutes[0], time_zone_hours,\n          time_zone_minutes)\n\n    return date_time_string", "docstring": "Retrieves an ISO8601 date time string from the structure.\n\nThe date and time values in the SCCM log are formatted as:\ntime=\"19:33:19.766-330\" date=\"11-28-2014\"\n\nArgs:\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.\n\nReturns:\nstr: ISO 8601 date time string.\n\nRaises:\nValueError: if the structure cannot be converted into a date time string.", "source": "juraj-google-style"}
{"code": "def has_intersection(self, other):\n        \n        return bool(lib.SDL_HasIntersection(self._ptr, other._ptr))", "docstring": "Return whether this rectangle intersects with another rectangle.\n\nArgs:\nother (Rect): The rectangle to test intersection with.\n\nReturns:\nbool: True if there is an intersection, False otherwise.", "source": "juraj-google-style"}
{"code": "def equal(mol, query, largest_only=True, ignore_hydrogen=True):\n    \n    m = molutil.clone(mol)\n    q = molutil.clone(query)\n    if largest_only:\n        m = molutil.largest_graph(m)\n        q = molutil.largest_graph(q)\n    if ignore_hydrogen:\n        m = molutil.make_Hs_implicit(m)\n        q = molutil.make_Hs_implicit(q)\n    if molutil.mw(m) == molutil.mw(q):\n        gm = GraphMatcher(q.graph, m.graph, node_match=atom_match)\n        return gm.is_isomorphic()\n    return False", "docstring": "if mol is exactly same structure as the query, return True\nArgs:\nmol: Compound\nquery: Compound", "source": "juraj-google-style"}
{"code": "def initialize_or_restore(self, session=None):\n    if context.executing_eagerly():\n        return\n    if session is None:\n        session = get_session()\n    all_objects = util.list_objects(self._object_graph_view)\n    already_initialized_objects = object_identity.ObjectIdentitySet(self._checkpoint.object_by_proto_id.values())\n    initializers_for_non_restored_variables = [c.initializer for c in all_objects if hasattr(c, 'initializer') and c not in already_initialized_objects and (getattr(c, '_update_uid', self._checkpoint.restore_uid - 1) < self._checkpoint.restore_uid)]\n    self.run_restore_ops(session=session)\n    session.run(initializers_for_non_restored_variables)", "docstring": "Run operations to initialize or restore objects in the dependency graph.\n\nAny objects in the dependency graph which have initializers but are not in\nthe checkpoint will have those initializers run, unless those variables are\nbeing restored by a later call to `tf.train.Checkpoint.restore()`.\n\nThis method has a sibling in `InitializationOnlyStatus` which instead\ninitializes variables. That type is returned if no checkpoint is specified\nin `Saver.restore`.\n\nArgs:\nsession: The session to run init/restore ops in. If `None`, uses the\ndefault session.", "source": "github-repos"}
{"code": "def _pare_down_model(self, strain_gempro, genes_to_remove):\n    strain_genes = [x.id for x in strain_gempro.genes]\n    genes_to_remove.extend(self.missing_in_orthology_matrix)\n    genes_to_remove = list(set(genes_to_remove).intersection(set(strain_genes)))\n    if (len(genes_to_remove) == 0):\n        log.info('{}: no genes marked non-functional'.format(strain_gempro.id))\n        return\n    else:\n        log.debug('{}: {} genes to be marked non-functional'.format(strain_gempro.id, len(genes_to_remove)))\n    if strain_gempro.model:\n        strain_gempro.model._trimmed = False\n        strain_gempro.model._trimmed_genes = []\n        strain_gempro.model._trimmed_reactions = {}\n        cobra.manipulation.delete_model_genes(strain_gempro.model, genes_to_remove)\n        if strain_gempro.model._trimmed:\n            log.info('{}: marked {} genes as non-functional, deactivating {} reactions'.format(strain_gempro.id, len(strain_gempro.model._trimmed_genes), len(strain_gempro.model._trimmed_reactions)))\n    else:\n        for g in genes_to_remove:\n            strain_gempro.genes.get_by_id(g).functional = False\n        log.info('{}: marked {} genes as non-functional'.format(strain_gempro.id, len(genes_to_remove)))", "docstring": "Mark genes as non-functional in a GEM-PRO. If there is a COBRApy model associated with it, the\nCOBRApy method delete_model_genes is utilized to delete genes.\n\nArgs:\nstrain_gempro (GEMPRO): GEMPRO object\ngenes_to_remove (list): List of gene IDs to remove from the model", "source": "codesearchnet"}
{"code": "def _generate_altered_sql_dependencies(self, dep_changed_keys):\n        \n        for key, removed_deps, added_deps in dep_changed_keys:\n            app_label, sql_name = key\n            operation = AlterSQLState(sql_name, add_dependencies=tuple(added_deps),\n                                      remove_dependencies=tuple(removed_deps))\n            sql_deps = [key]\n            self.add_sql_operation(app_label, sql_name, operation, sql_deps)", "docstring": "Generate forward operations for changing/creating SQL item dependencies.\n\nDependencies are only in-memory and should be reflecting database dependencies, so\nchanging them in SQL config does not alter database. Such actions are persisted in separate\ntype operation - `AlterSQLState`.\n\nArgs:\ndep_changed_keys (list): Data about keys, that have their dependencies changed.\nList of tuples (key, removed depndencies, added_dependencies).", "source": "juraj-google-style"}
{"code": "def resource_json(self, resource: str) -> dict:\n    resource = self.api_document['schemas'][resource]['properties']\n    return self.to_json(from_api=resource)", "docstring": "Return Discovery API Document json for a resource.\n\nExpands all the references.\n\nArgs:\nresource: the name of the Google API resource\n\nReturns:\nA dictionary representation of the resource.", "source": "github-repos"}
{"code": "def _AddPathSegments(self, path, ignore_list):\n    \n    path_segments = path.split(self._path_segment_separator)\n    for path_segment_index, path_segment in enumerate(path_segments):\n      if path_segment_index not in self.path_segments_per_index:\n        self.path_segments_per_index[path_segment_index] = {}\n\n      if path_segment_index not in ignore_list:\n        path_segments = self.path_segments_per_index[path_segment_index]\n\n        if path_segment not in path_segments:\n          path_segments[path_segment] = []\n\n        paths_per_segment_list = path_segments[path_segment]\n        paths_per_segment_list.append(path)", "docstring": "Adds the path segments to the table.\n\nArgs:\npath: a string containing the path.\nignore_list: a list of path segment indexes to ignore, where 0 is the\nindex of the first path segment relative from the root.", "source": "juraj-google-style"}
{"code": "def login(self, email, password):\n    response = FlightData.session.post(url=LOGIN_URL, data={'email': email, 'password': password, 'remember': 'true', 'type': 'web'}, headers={'Origin': 'https:\n    response = (self._fr24.json_loads_byteified(response.content) if (response.status_code == 200) else None)\n    if response:\n        token = response['userData']['subscriptionKey']\n        self.AUTH_TOKEN = token", "docstring": "Login to the flightradar24 session\n\nThe API currently uses flightradar24 as the primary data source. The site provides different levels of data based on user plans.\nFor users who have signed up for a plan, this method allows to login with the credentials from flightradar24. The API obtains\na token that will be passed on all the requests; this obtains the data as per the plan limits.\n\nArgs:\nemail (str): The email ID which is used to login to flightradar24\npassword (str): The password for the user ID\n\nExample::\n\nfrom pyflightdata import FlightData\nf=FlightData()\nf.login(myemail,mypassword)", "source": "codesearchnet"}
{"code": "def document(self, document_tree, backend=None):\n        \n        return self.template(document_tree, configuration=self,\n                             backend=backend)", "docstring": "Create a :class:`DocumentTemplate` object based on the given\ndocument tree and this template configuration\n\nArgs:\ndocument_tree (DocumentTree): tree of the document's contents\nbackend: the backend to use when rendering the document", "source": "juraj-google-style"}
{"code": "def _compute_initial_out_degree(self):\n    out_degree = collections.defaultdict(int)\n    for tensor_name in self.get_all_tensor_names():\n        if self.is_tensor_final(tensor_name):\n            out_degree[tensor_name] = 1\n    for operation_name in self.get_all_operation_names():\n        for input_name in self.get_operation_input_names(operation_name):\n            out_degree[input_name] += 1\n    return out_degree", "docstring": "The number of operations which use each tensor as input.\n\nReturns:\na {string, int} mapping tensor name to the number of operations which use\nit as input, or one plus that quantity if the tensor is final.", "source": "codesearchnet"}
{"code": "def delete(self, start=None, stop=None):\n    _check_start_stop(start, stop)\n    start_loc = (self._bisect_right(start) - 1)\n    if (stop is None):\n        stop_loc = len(self._keys)\n    else:\n        stop_loc = self._bisect_left(stop)\n    for value in self._values[start_loc:stop_loc]:\n        if (value is NOT_SET):\n            raise KeyError((start, stop))\n    self.set(NOT_SET, start=start, stop=stop)", "docstring": "Delete the range from start to stop from self.\n\nRaises:\nKeyError: If part of the passed range isn't mapped.", "source": "codesearchnet"}
{"code": "def set_triple(self, p, o, auto_refresh=True):\n\n\t\t\n\n\t\tself.rdf.graph.set((self.uri, p, self._handle_object(o)))\n\n\t\t\n\t\tself._handle_triple_refresh(auto_refresh)", "docstring": "Assuming the predicate or object matches a single triple, sets the other for that triple.\n\nArgs:\np (rdflib.term.URIRef): predicate\no (): object\nauto_refresh (bool): whether or not to update object-like self.rdf.triples\n\nReturns:\nNone: modifies pre-existing triple in self.rdf.graph", "source": "juraj-google-style"}
{"code": "def __init__(self, context):\n    \n    self._logdir = context.logdir\n    self._has_auth_group = (context.flags and\n                            'authorized_groups' in context.flags and\n                            context.flags.authorized_groups is not '')", "docstring": "Constructs an interactive inference plugin for TensorBoard.\n\nArgs:\ncontext: A base_plugin.TBContext instance.", "source": "juraj-google-style"}
{"code": "def sheets_url(config, auth, url_or_name):\n    sheet_id = sheets_id(config, auth, url_or_name)\n    return 'https:", "docstring": "Normalizes a full sheet URL from some key.\n\nArgs:\nconfig - see starthinker/util/configuration.py\nauth - user or service\nurl_or_name - one of: URL, document title, or id\n\nReturns:\nURL of sheet.", "source": "github-repos"}
{"code": "def create_dir_v2(path):\n    _pywrap_file_io.CreateDir(compat.path_to_bytes(path))", "docstring": "Creates a directory with the name given by `path`.\n\nArgs:\npath: string, name of the directory to be created\n\nNotes: The parent directories need to exist. Use `tf.io.gfile.makedirs`\ninstead if there is the possibility that the parent dirs don't exist.\n\nRaises:\nerrors.OpError: If the operation fails.", "source": "github-repos"}
{"code": "def WritePathStatHistory(self, client_path, stat_entries):\n    \n    client_path_history = ClientPathHistory()\n    for timestamp, stat_entry in iteritems(stat_entries):\n      client_path_history.AddStatEntry(timestamp, stat_entry)\n\n    self.MultiWritePathHistory({client_path: client_path_history})", "docstring": "Writes a collection of `StatEntry` observed for particular path.\n\nArgs:\nclient_path: A `ClientPath` instance.\nstat_entries: A dictionary with timestamps as keys and `StatEntry`\ninstances as values.", "source": "juraj-google-style"}
{"code": "def download(timestamp, dataset, path=None, products=None, levels=None, offset=0):\n    if (path is None):\n        path = DATA_PATH\n    closest = ((timestamp.hour \n    filename = dataset(closest, offset)\n    gfs_timestamp = ('%s%02d' % (timestamp.strftime('%Y%m%d'), closest))\n    url = baseurl(gfs_timestamp, filename)\n    index = (url + '.idx')\n    messages = message_index(index)\n    segments = _filter_messages(messages, products, levels)\n    dl_path = (path + ('/%s/' % gfs_timestamp))\n    _verify_path(dl_path)\n    _download_segments((path + filename), url, segments)", "docstring": "save GFS grib file to DATA_PATH.\n\nArgs:\ndataset(function): naming convention function.  eg. pgrb2\ntimestamp(datetime): ???\npath(str): if None defaults to DATA_PATH\nproducts(list): TMP, etc. if None downloads all.\nlayers(list): surface, etc. if None downloads all.\noffset(int): should be multiple of 3", "source": "codesearchnet"}
{"code": "def _hat_integral(self, x):\n    \n    x = tf.cast(x, self.power.dtype)\n    t = self.power - 1.\n    return tf.exp((-t) * tf.math.log1p(x) - tf.math.log(t))", "docstring": "Integral of the `hat` function, used for sampling.\n\nWe choose a `hat` function, h(x) = x^(-power), which is a continuous\n(unnormalized) density touching each positive integer at the (unnormalized)\npmf. This function implements `hat` integral: H(x) = int_x^inf h(t) dt;\nwhich is needed for sampling purposes.\n\nArguments:\nx: A Tensor of points x at which to evaluate H(x).\n\nReturns:\nA Tensor containing evaluation H(x) at x.", "source": "juraj-google-style"}
{"code": "def tarfile_extract(fileobj, dest_path):\n        \n        \n        \n        \n        tar = tarfile.open(mode='r|', fileobj=fileobj,\n                           bufsize=pipebuf.PIPE_BUF_BYTES)\n\n        \n        dest_path = os.path.realpath(dest_path)\n\n        \n        extracted_files = []\n\n        \n        \n        \n        for member in tar:\n            assert not member.name.startswith('/')\n            relpath = os.path.join(dest_path, member.name)\n\n            \n            \n            if member.issym():\n                target_path = os.path.join(dest_path, member.name)\n                try:\n                    os.symlink(member.linkname, target_path)\n                except OSError as e:\n                    if e.errno == errno.EEXIST:\n                        os.remove(target_path)\n                        os.symlink(member.linkname, target_path)\n                    else:\n                        raise\n                continue\n\n            if member.isreg() and member.size >= pipebuf.PIPE_BUF_BYTES:\n                cat_extract(tar, member, relpath)\n            else:\n                tar.extract(member, path=dest_path)\n\n            filename = os.path.realpath(relpath)\n            extracted_files.append(filename)\n\n            \n            \n            if len(extracted_files) > 1000:\n                _fsync_files(extracted_files)\n                del extracted_files[:]\n        tar.close()\n        _fsync_files(extracted_files)", "docstring": "Extract a tarfile described by a file object to a specified path.\n\nArgs:\nfileobj (file): File object wrapping the target tarfile.\ndest_path (str): Path to extract the contents of the tarfile to.", "source": "juraj-google-style"}
{"code": "def add(self, spec):\n    for limit in spec.limit_to:\n        if (limit not in self.limit_to):\n            self.limit_to.append(limit)", "docstring": "Add limitations of given spec to self's.\n\nArgs:\nspec (PackageSpec): another spec.", "source": "codesearchnet"}
{"code": "def from_db(cls, bigchain, tx_dict_list):\n    return_list = True\n    if isinstance(tx_dict_list, dict):\n        tx_dict_list = [tx_dict_list]\n        return_list = False\n    tx_map = {}\n    tx_ids = []\n    for tx in tx_dict_list:\n        tx.update({'metadata': None})\n        tx_map[tx['id']] = tx\n        tx_ids.append(tx['id'])\n    assets = list(bigchain.get_assets(tx_ids))\n    for asset in assets:\n        if (asset is not None):\n            tx = tx_map[asset['id']]\n            del asset['id']\n            tx['asset'] = asset\n    tx_ids = list(tx_map.keys())\n    metadata_list = list(bigchain.get_metadata(tx_ids))\n    for metadata in metadata_list:\n        tx = tx_map[metadata['id']]\n        tx.update({'metadata': metadata.get('metadata')})\n    if return_list:\n        tx_list = []\n        for (tx_id, tx) in tx_map.items():\n            tx_list.append(cls.from_dict(tx))\n        return tx_list\n    else:\n        tx = list(tx_map.values())[0]\n        return cls.from_dict(tx)", "docstring": "Helper method that reconstructs a transaction dict that was returned\nfrom the database. It checks what asset_id to retrieve, retrieves the\nasset from the asset table and reconstructs the transaction.\n\nArgs:\nbigchain (:class:`~bigchaindb.tendermint.BigchainDB`): An instance\nof BigchainDB used to perform database queries.\ntx_dict_list (:list:`dict` or :obj:`dict`): The transaction dict or\nlist of transaction dict as returned from the database.\n\nReturns:\n:class:`~Transaction`", "source": "codesearchnet"}
{"code": "def normalize_date(tmy_date, year):\n    month = tmy_date.month\n    day = (tmy_date.day - 1)\n    hour = tmy_date.hour\n    if ((month is 1) and (day is 0) and (hour is 0)):\n        year = (year + 1)\n    return (datetime.datetime(year, month, 1) + datetime.timedelta(days=day, hours=hour, minutes=0))", "docstring": "change TMY3 date to an arbitrary year.\n\nArgs:\ntmy_date (datetime): date to mangle.\nyear (int): desired year.\n\nReturns:\n(None)", "source": "codesearchnet"}
{"code": "async def send_message(self, name, level, message):\n    if (name not in self.services):\n        raise ArgumentError('Unknown service name', short_name=name)\n    msg = self.services[name]['state'].post_message(level, message)\n    (await self._notify_update(name, 'new_message', msg.to_dict()))", "docstring": "Post a message for a service.\n\nArgs:\nname (string): The short name of the service to query\nlevel (int): The level of the message (info, warning, error)\nmessage (string): The message contents", "source": "codesearchnet"}
{"code": "def collect_publications(self):\n    pubs = list(self.sub_publications)\n    for sub_tree in self.sub_trees:\n        pubs.extend(sub_tree.collect_publications())\n    return pubs", "docstring": "Recursively collect list of all publications referenced in this\ntree and all sub-trees.\n\nReturns:\nlist: List of UUID strings.", "source": "codesearchnet"}
{"code": "def __init__(self, dtype, shape, accumulator_ref):\n    self._dtype = dtype\n    if shape is not None:\n        self._shape = tensor_shape.TensorShape(shape)\n    else:\n        self._shape = tensor_shape.unknown_shape()\n    self._accumulator_ref = accumulator_ref\n    if context.executing_eagerly():\n        self._name = context.context().scope_name\n    else:\n        self._name = self._accumulator_ref.op.name.split('/')[-1]", "docstring": "Creates a new ConditionalAccumulator.\n\nArgs:\ndtype: Datatype of the accumulated gradients.\nshape: Shape of the accumulated gradients.\naccumulator_ref: A handle to the conditional accumulator, created by sub-\nclasses", "source": "github-repos"}
{"code": "def DefaultParseValue(value):\n    try:\n        return _LiteralEval(value)\n    except (SyntaxError, ValueError):\n        return value", "docstring": "The default argument parsing function used by Fire CLIs.\n\nIf the value is made of only Python literals and containers, then the value\nis parsed as it's Python value. Otherwise, provided the value contains no\nquote, escape, or parenthetical characters, the value is treated as a string.\n\nArgs:\nvalue: A string from the command line to be parsed for use in a Fire CLI.\nReturns:\nThe parsed value, of the type determined most appropriate.", "source": "github-repos"}
{"code": "def get(self):\n    return self._master._get_helper(self._master._sorted_items, self._q)", "docstring": "Returns the calculated quantiles based on the master tracker's buffer.\n\nReturns:\nA list of calculated quantiles.", "source": "github-repos"}
{"code": "def easeInOutCirc(n):\n    \n    _checkRange(n)\n    n = n * 2\n    if n < 1:\n        return -0.5 * (math.sqrt(1 - n**2) - 1)\n    else:\n        n = n - 2\n        return 0.5 * (math.sqrt(1 - n**2) + 1)", "docstring": "A circular tween function that accelerates, reaches the midpoint, and then decelerates.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "juraj-google-style"}
{"code": "def copy_to_file(self, name, fp_dest, callback=None):\n        \n        assert compat.is_native(name)\n\n        def _write_to_file(data):\n            \n            fp_dest.write(data)\n            if callback:\n                callback(data)\n\n        self.ftp.retrbinary(\n            \"RETR {}\".format(name), _write_to_file, FtpTarget.DEFAULT_BLOCKSIZE\n        )", "docstring": "Write cur_dir/name to file-like `fp_dest`.\n\nArgs:\nname (str): file name, located in self.curdir\nfp_dest (file-like): must support write() method\ncallback (function, optional):\nCalled like `func(buf)` for every written chunk", "source": "juraj-google-style"}
{"code": "def _tokens_to_subtoken(self, tokens):\n        \n        ret = []\n        for token in tokens:\n            ret.extend(\n                self._escaped_token_to_subtoken_strings(_escape_token(token, self._alphabet)))\n        return ret", "docstring": "Converts a list of tokens to a list of subtoken.\n\nArgs:\ntokens: a list of strings.\nReturns:\na list of integers in the range [0, vocab_size)", "source": "juraj-google-style"}
{"code": "def select_by_value(self, value):\n    self._selected_key = None\n    self._selected_item = None\n    for k in self.children:\n        item = self.children[k]\n        if (item.get_text() == value):\n            item.attributes['selected'] = 'selected'\n            self._selected_key = k\n            self._selected_item = item\n        elif ('selected' in item.attributes):\n            del item.attributes['selected']", "docstring": "Selects a DropDownItem by means of the contained text-\n\nArgs:\nvalue (str): Textual content of the DropDownItem that have to be selected.", "source": "codesearchnet"}
{"code": "def _unwrap_el(self, value):\n        \n        if isinstance(value, dict) and 'ELEMENT' in value:\n            element_id = value.get('ELEMENT')\n            return WebElement(element_id, self)\n        elif isinstance(value, list) and not isinstance(value, str):\n            return [self._unwrap_el(item) for item in value]\n        else:\n            return value", "docstring": "Convert {'Element': 1234} to WebElement Object\n\nArgs:\nvalue(str|list|dict): The value field in the json response.\n\nReturns:\nThe unwrapped value.", "source": "juraj-google-style"}
{"code": "def extend(*args):\n    \n    if not args:\n        return {}\n\n    first = args[0]\n    rest = args[1:]\n    out = type(first)(first)\n    for each in rest:\n        out.update(each)\n    return out", "docstring": "shallow dictionary merge\n\nArgs:\na: dict to extend\nb: dict to apply to a\n\nReturns:\nnew instance of the same type as _a_, with _a_ and _b_ merged.", "source": "juraj-google-style"}
{"code": "def save_to_file(json_data, filename):\n    if filename[-5:] != '.json':\n        print('filename: %s' % filename)\n        filename += '.json'\n    with open(PATH_TO_DIR + '/' + filename, 'w') as f:\n        json.dump(json_data, f, sort_keys=True, indent=4)\n    print(' Successfully wrote configs to file `%s`.\\n' % filename)", "docstring": "Saves all detected configuration(s) into a JSON file.\n\nArgs:\njson_data: Dict of all configurations found.\nfilename: String that is the name of the output JSON file.", "source": "github-repos"}
{"code": "def weights_concatenated(labels):\n    eos_mask = tf.to_int32(tf.equal(labels, 1))\n    sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True)\n    in_target = tf.equal(tf.mod(sentence_num, 2), 1)\n    sentence_num_plus_one = (sentence_num + 1)\n    shifted = tf.pad(sentence_num_plus_one, [[0, 0], [2, 0], [0, 0], [0, 0]])[(:, :(- 2), :, :)]\n    nonboilerplate = tf.equal(sentence_num_plus_one, shifted)\n    ret = to_float(tf.logical_and(nonboilerplate, in_target))\n    return ret", "docstring": "Assign weight 1.0 to the \"target\" part of the concatenated labels.\n\nThe labels look like:\nsource English I love you . ID1 target French Je t'aime . ID1 source\nEnglish the cat ID1 target French le chat ID1 source English ...\n\nWe want to assign weight 1.0 to all words in the target text (including the\nID1 end symbol), but not to the source text or the boilerplate.  In the\nabove example, the target words that get positive weight are:\nJe t'aime . ID1 le chat ID1\n\nArgs:\nlabels: a Tensor\nReturns:\na Tensor", "source": "codesearchnet"}
{"code": "def get_dict(self):\n    self.is_valid()\n    return self._get_dict()", "docstring": "Returns the internal-API dictionary representing the\n:class:`DisplayDataItem`.\n\nReturns:\nDict[str, Any]: A dictionary. The internal-API dictionary representing\nthe :class:`DisplayDataItem`.\n\nRaises:\nValueError: if the item is not valid.", "source": "github-repos"}
{"code": "def insert_chain(cur, chain, encoded_data=None):\n    if (encoded_data is None):\n        encoded_data = {}\n    if ('nodes' not in encoded_data):\n        encoded_data['nodes'] = json.dumps(sorted(chain), separators=(',', ':'))\n    if ('chain_length' not in encoded_data):\n        encoded_data['chain_length'] = len(chain)\n    insert = 'INSERT OR IGNORE INTO chain(chain_length, nodes) VALUES (:chain_length, :nodes);'\n    cur.execute(insert, encoded_data)", "docstring": "Insert a chain into the cache.\n\nArgs:\ncur (:class:`sqlite3.Cursor`):\nAn sqlite3 cursor. This function is meant to be run within a :obj:`with` statement.\n\nchain (iterable):\nA collection of nodes. Chains in embedding act as one node.\n\nencoded_data (dict, optional):\nIf a dictionary is provided, it will be populated with the serialized data. This is\nuseful for preventing encoding the same information many times.\n\nNotes:\nThis function assumes that the nodes in chain are index-labeled.", "source": "codesearchnet"}
{"code": "def CopyMicrosecondsToFractionOfSecond(cls, microseconds):\n    \n    if microseconds < 0 or microseconds >= definitions.MICROSECONDS_PER_SECOND:\n      raise ValueError(\n          'Number of microseconds value: {0:d} out of bounds.'.format(\n              microseconds))\n\n    return decimal.Decimal(microseconds) / definitions.MICROSECONDS_PER_SECOND", "docstring": "Copies the number of microseconds to a fraction of second value.\n\nArgs:\nmicroseconds (int): number of microseconds.\n\nReturns:\ndecimal.Decimal: fraction of second, which must be a value between 0.0 and\n1.0.\n\nRaises:\nValueError: if the number of microseconds is out of bounds.", "source": "juraj-google-style"}
{"code": "def fit(self, X, y):\n    self._word_vocab.add_documents(X)\n    self._label_vocab.add_documents(y)\n    if self._use_char:\n        for doc in X:\n            self._char_vocab.add_documents(doc)\n    self._word_vocab.build()\n    self._char_vocab.build()\n    self._label_vocab.build()\n    return self", "docstring": "Learn vocabulary from training set.\n\nArgs:\nX : iterable. An iterable which yields either str, unicode or file objects.\n\nReturns:\nself : IndexTransformer.", "source": "codesearchnet"}
{"code": "def setRelay(self, seconds, relay, status, password=\"00000000\"):\n        \n        result = False\n        self.setContext(\"setRelay\")\n        try:\n            self.clearCmdMsg()\n\n            if len(password) != 8:\n                self.writeCmdMsg(\"Invalid password length.\")\n                self.setContext(\"\")\n                return result\n\n            if seconds < 0 or seconds > 9999:\n                self.writeCmdMsg(\"Relay duration must be between 0 and 9999.\")\n                self.setContext(\"\")\n                return result\n\n            if not self.requestA():\n                self.writeCmdMsg(\"Bad read CRC on setting\")\n            else:\n                if not self.serialCmdPwdAuth(password):\n                    self.writeCmdMsg(\"Password failure\")\n                else:\n                    req_str = \"\"\n                    req_str = (\"01573102303038\" +\n                               binascii.hexlify(str(relay)).zfill(2) +\n                               \"28\" +\n                               binascii.hexlify(str(status)).zfill(2) +\n                               binascii.hexlify(str(seconds).zfill(4)) + \"2903\")\n                    req_str += self.calc_crc16(req_str[2:].decode(\"hex\"))\n                    self.m_serial_port.write(req_str.decode(\"hex\"))\n                    if self.m_serial_port.getResponse(self.getContext()).encode(\"hex\") == \"06\":\n                        self.writeCmdMsg(\"Success: 06 returned.\")\n                        result = True\n            self.serialPostEnd()\n        except:\n            ekm_log(traceback.format_exc(sys.exc_info()))\n\n        self.setContext(\"\")\n        return result", "docstring": "Serial call to set relay.\n\nArgs:\nseconds (int): Seconds to hold, ero is hold forever. See :class:`~ekmmeters.RelayInterval`.\nrelay (int): Selected relay, see :class:`~ekmmeters.Relay`.\nstatus (int): Status to set, see :class:`~ekmmeters.RelayState`\npassword (str): Optional password\n\nReturns:\nbool: True on completion and ACK.", "source": "juraj-google-style"}
{"code": "def typing(self, room: Room, timeout: int=5000):\n    path = f'/rooms/{quote(room.room_id)}/typing/{quote(self.user_id)}'\n    return self.api._send('PUT', path, {'typing': True, 'timeout': timeout})", "docstring": "Send typing event directly to api\n\nArgs:\nroom: room to send typing event to\ntimeout: timeout for the event, in ms", "source": "codesearchnet"}
{"code": "def list_workers(config, *, filter_by_queues=None):\n    \n    celery_app = create_app(config)\n    worker_stats = celery_app.control.inspect().stats()\n    queue_stats = celery_app.control.inspect().active_queues()\n\n    if worker_stats is None:\n        return []\n\n    workers = []\n    for name, w_stat in worker_stats.items():\n        queues = [QueueStats.from_celery(q_stat) for q_stat in queue_stats[name]]\n\n        add_worker = filter_by_queues is None\n        if not add_worker:\n            for queue in queues:\n                if queue.name in filter_by_queues:\n                    add_worker = True\n                    break\n\n        if add_worker:\n            workers.append(WorkerStats.from_celery(name, w_stat, queues))\n\n    return workers", "docstring": "Return a list of all available workers.\n\nArgs:\nconfig (Config): Reference to the configuration object from which the\nsettings are retrieved.\nfilter_by_queues (list): Restrict the returned workers to workers that listen to\nat least one of the queue names in this list.\n\nReturns:\nlist: A list of WorkerStats objects.", "source": "juraj-google-style"}
{"code": "def _RemoveForwardedIps(self, forwarded_ips, interface):\n    for address in forwarded_ips:\n        self.ip_forwarding_utils.RemoveForwardedIp(address, interface)", "docstring": "Remove the forwarded IP addresses from the network interface.\n\nArgs:\nforwarded_ips: list, the forwarded IP address strings to delete.\ninterface: string, the output device to use.", "source": "codesearchnet"}
{"code": "def add_to_screen(self, screen_width, screen):\n    for (lineno, fields) in enumerate(self.line_fields):\n        for (left, field) in self.compute_positions(screen_width, fields):\n            logger.debug('Adding field %s to screen %s at x=%d->%d, y=%d', field, screen.ref, left, ((left + field.width) - 1), (1 + lineno))\n            self.widgets[field] = field.add_to_screen(screen, left, (1 + lineno))\n            self.register_hooks(field)", "docstring": "Add the pattern to a screen.\n\nAlso fills self.widgets.\n\nArgs:\nscreen_width (int): the width of the screen\nscreen (lcdprod.Screen): the screen to fill.", "source": "codesearchnet"}
{"code": "def create(cls, **kwargs):\n    try:\n        return cls.add(cls.new(**kwargs))\n    except:\n        cls.session.rollback()\n        raise", "docstring": "Initializes a new instance, adds it to the db and commits\nthe transaction.\n\nArgs:\n\n**kwargs: The keyword arguments for the init constructor.\n\nExamples:\n\n>>> user = User.create(name=\"Vicky\", email=\"vicky@h.com\")\n>>> user.id\n35", "source": "codesearchnet"}
{"code": "def __add__(self, other):\n        \n        \n        assert isinstance(other, LocationDescriptor), \"You can only add LocationDescriptor together.\"\n        assert self._separation_char == other._separation_char, \\\n            \"You can only add LocationDescriptor together if they share the same separator character.\"\n        new_location_string_list = self.get_locations_list() + other.get_locations_list()\n        return LocationDescriptor(new_location_string_list)", "docstring": "Create a **new** :class:`LocationDescriptor` object that is the sum of this one and another.\n\nArgs:\nself: This :class:`LocationDescriptor` object.\nother: Another :class:`LocationDescriptor` object.\n\nReturns:\nSum of both :class:`LocationDescriptor` objects.", "source": "juraj-google-style"}
{"code": "def _GetFirefoxConfig(self, file_object, display_name):\n    to_read = min(file_object.get_size(), self._INITIAL_CACHE_FILE_SIZE)\n    while (file_object.get_offset() < to_read):\n        offset = file_object.get_offset()\n        try:\n            (cache_entry, _) = self._ReadCacheEntry(file_object, display_name, self._MINIMUM_BLOCK_SIZE)\n            record_size = ((self._CACHE_ENTRY_HEADER_SIZE + cache_entry.request_size) + cache_entry.information_size)\n            if (record_size >= 4096):\n                block_size = 4096\n            elif (record_size >= 1024):\n                block_size = 1024\n            else:\n                block_size = 256\n            return self.FIREFOX_CACHE_CONFIG(block_size, offset)\n        except IOError:\n            logger.debug('[{0:s}] {1:s}:{2:d}: Invalid record.'.format(self.NAME, display_name, offset))\n    raise errors.UnableToParseFile('Could not find a valid cache record. Not a Firefox cache file.')", "docstring": "Determine cache file block size.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object.\ndisplay_name (str): display name.\n\nReturns:\nfirefox_cache_config: namedtuple containing the block size and first\nrecord offset.\n\nRaises:\nUnableToParseFile: if no valid cache record could be found.", "source": "codesearchnet"}
{"code": "def cooccurrences(self, domains):\n        \n        api_name = 'opendns-cooccurrences'\n        fmt_url_path = u'recommendations/name/{0}.json'\n        return self._multi_get(api_name, fmt_url_path, domains)", "docstring": "Get the domains related to input domains.\n\nArgs:\ndomains: an enumerable of strings domain names\nReturns:\nAn enumerable of string domain names", "source": "juraj-google-style"}
{"code": "def circuit_to_quirk_url(circuit: circuits.Circuit, prefer_unknown_gate_to_failure: bool=False, escape_url=True) -> str:\n    circuit = circuit.copy()\n    linearize_circuit_qubits(circuit)\n    cols = []\n    for moment in circuit:\n        can_merges = []\n        for op in moment.operations:\n            for (col, can_merge) in _to_quirk_cols(op, prefer_unknown_gate_to_failure):\n                if can_merge:\n                    can_merges.append(col)\n                else:\n                    cols.append(col)\n        if can_merges:\n            merged_col = ([1] * max((len(e) for e in can_merges)))\n            for col in can_merges:\n                for i in range(len(col)):\n                    if (col[i] != 1):\n                        merged_col[i] = col[i]\n            cols.append(merged_col)\n    circuit_json = json.JSONEncoder(ensure_ascii=False, separators=(',', ':'), sort_keys=True).encode({'cols': cols})\n    if escape_url:\n        suffix = urllib.parse.quote(circuit_json)\n    else:\n        suffix = circuit_json\n    return 'http:", "docstring": "Returns a Quirk URL for the given circuit.\n\nArgs:\ncircuit: The circuit to open in Quirk.\nprefer_unknown_gate_to_failure: If not set, gates that fail to convert\nwill cause this function to raise an error. If set, a URL\ncontaining bad gates will be generated. (Quirk will open the URL,\nand replace the bad gates with parse errors, but still get the rest\nof the circuit.)\nescape_url: If set, the generated URL will have special characters such\nas quotes escaped using %. This makes it possible to paste the URL\ninto forums and the command line and etc and have it properly\nparse. If not set, the generated URL will be more compact and human\nreadable (and can still be pasted directly into a browser's address\nbar).\n\nReturns:", "source": "codesearchnet"}
{"code": "def _prepare_tables(self):\n    values = torch.tensor([[[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]]])\n    row_index = IndexMap(indices=torch.tensor([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 0, 0], [1, 1, 1], [2, 2, 2]]]), num_segments=3, batch_dims=1)\n    col_index = IndexMap(indices=torch.tensor([[[0, 0, 1], [0, 0, 1], [0, 0, 1]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]]), num_segments=3, batch_dims=1)\n    return (values, row_index, col_index)", "docstring": "Prepares two tables, both with three distinct rows.\nThe first table has two columns:\n1.0, 2.0 | 3.0\n2.0, 0.0 | 1.0\n1.0, 3.0 | 4.0\nThe second table has three columns:\n1.0 | 2.0 | 3.0\n2.0 | 0.0 | 1.0\n1.0 | 3.0 | 4.0\nReturns:\nSegmentedTensors with the tables.", "source": "github-repos"}
{"code": "def str(name, default=None, allow_none=False, fallback=None):\n    value = read(name, default, allow_none, fallback=fallback)\n    if ((value is None) and allow_none):\n        return None\n    else:\n        return builtins.str(value).strip()", "docstring": "Get a string based environment value or the default.\n\nArgs:\nname: The environment variable name\ndefault: The default value to use if no environment variable is found\nallow_none: If the return value can be `None` (i.e. optional)", "source": "codesearchnet"}
{"code": "def is_deterministic(self):\n    return False", "docstring": "Whether this coder is guaranteed to encode values deterministically.\n\nA deterministic coder is required for key coders in GroupByKey operations\nto produce consistent results.\n\nFor example, note that the default coder, the PickleCoder, is not\ndeterministic: the ordering of picked entries in maps may vary across\nexecutions since there is no defined order, and such a coder is not in\ngeneral suitable for usage as a key coder in GroupByKey operations, since\neach instance of the same key may be encoded differently.\n\nReturns:\nWhether coder is deterministic.", "source": "github-repos"}
{"code": "def _cauchy_equation(wavelength, coefficients):\n    n = 0.0\n    for (i, c) in enumerate(coefficients):\n        exponent = (2 * i)\n        n += (c / (wavelength ** exponent))\n    return n", "docstring": "Helpful function to evaluate Cauchy equations.\n\nArgs:\nwavelength (float, list, None): The wavelength(s) the\nCauchy equation will be evaluated at.\ncoefficients (list): A list of the coefficients of\nthe Cauchy equation.\n\nReturns:\nfloat, list: The refractive index at the target wavelength(s).", "source": "codesearchnet"}
{"code": "def _calculate_scores(self, query, key):\n    q_reshaped = array_ops.expand_dims(query, axis=-2)\n    k_reshaped = array_ops.expand_dims(key, axis=-3)\n    if self.use_scale:\n        scale = self.scale\n    else:\n        scale = 1.0\n    return math_ops.reduce_sum(scale * math_ops.tanh(q_reshaped + k_reshaped), axis=-1)", "docstring": "Calculates attention scores as a nonlinear sum of query and key.\n\nArgs:\nquery: Query tensor of shape `[batch_size, Tq, dim]`.\nkey: Key tensor of shape `[batch_size, Tv, dim]`.\nReturns:\nTensor of shape `[batch_size, Tq, Tv]`.", "source": "github-repos"}
{"code": "def request_via_socket(sock, search_target):\n    msgparts = dict(HOST=MCAST_IP_PORT, MAN='\"ssdp:discover\"', MX='3', ST=search_target)\n    msg = encode_request('M-SEARCH * HTTP/1.1', **msgparts)\n    sock.sendto(msg, (MCAST_IP, MCAST_PORT))", "docstring": "Send an SSDP search request via the provided socket.\n\nArgs:\nsock: A socket suitable for use to send a broadcast message - preferably\none created by :py:func:`make_socket`.\nsearch_target (string): A :term:`resource type` target to search for.", "source": "codesearchnet"}
{"code": "def add_gemini_query(self, name, query):\n    logger.info('Adding query {0} with text {1}'.format(name, query))\n    new_query = GeminiQuery(name=name, query=query)\n    self.session.add(new_query)\n    self.save()\n    return new_query", "docstring": "Add a user defined gemini query\n\nArgs:\nname (str)\nquery (str)", "source": "codesearchnet"}
{"code": "def codeblocks(start=None, end=None, full=True):\n    if full:\n        for function in functions(start, end):\n            fc = FlowChart(f=function.func_t)\n            for block in fc:\n                (yield block)\n    else:\n        (start, end) = fix_addresses(start, end)\n        for code_block in FlowChart(bounds=(start, end)):\n            (yield code_block)", "docstring": "Get all `CodeBlock`s in a given range.\n\nArgs:\nstart - start address of the range. If `None` uses IDB start.\nend - end address of the range. If `None` uses IDB end.\nfull - `True` is required to change node info (e.g. color). `False` causes faster iteration.", "source": "codesearchnet"}
{"code": "def load(self, key: str) -> _ModelLoadStats:\n    if key in self._tag_map:\n        self._tag_map.move_to_end(key)\n        return _ModelLoadStats(self._tag_map[key], None, None)\n    else:\n        self._tag_map[key] = uuid.uuid4().hex\n    tag = self._tag_map[key]\n    mh = self._mh_map[key]\n    if self._max_models is not None and self._max_models < len(self._tag_map):\n        tag_to_remove = self._tag_map.popitem(last=False)[1]\n        shared_handle, model_to_remove = self._proxy_map[tag_to_remove]\n        shared_handle.release(model_to_remove)\n        del self._proxy_map[tag_to_remove]\n    memory_before = _get_current_process_memory_in_bytes()\n    start_time = _to_milliseconds(time.time_ns())\n    shared_handle = multi_process_shared.MultiProcessShared(mh.load_model, tag=tag)\n    model_reference = shared_handle.acquire()\n    self._proxy_map[tag] = (shared_handle, model_reference)\n    memory_after = _get_current_process_memory_in_bytes()\n    end_time = _to_milliseconds(time.time_ns())\n    return _ModelLoadStats(tag, end_time - start_time, memory_after - memory_before)", "docstring": "Loads the appropriate model for the given key into memory.\nArgs:\nkey: the key associated with the model we'd like to load.\nReturns:\n_ModelLoadStats with tag, byte size, and latency to load the model. If\nthe model was already loaded, byte size/latency will be None.", "source": "github-repos"}
{"code": "def chain(processor_list: Sequence[Processor | PartProcessor]) -> Processor:\n    if not processor_list:\n        raise ValueError('processor_list is empty')\n    chain_processor = processor_list[0]\n    for p in processor_list[1:]:\n        chain_processor = chain_processor + p\n    if isinstance(chain_processor, PartProcessor):\n        chain_processor = chain_processor.to_processor()\n    return chain_processor", "docstring": "Chain a sequence of processors.\n\nArgs:\nprocessor_list: list of part processors or generic processors.\n\nReturns:\nA processor consisting of the chain of all the processors in the list. The\nexecution is sequential from the first processor to the last but parts are\nprocessed concurrently overall.", "source": "github-repos"}
{"code": "def _int_to_pos(self, flat_position):\n    return ((flat_position % self.env.action_space.screen_shape[0]), (flat_position % self.env.action_space.screen_shape[1]))", "docstring": "Returns x, y from flat_position integer.\n\nArgs:\nflat_position: flattened position integer\n\nReturns: x, y", "source": "codesearchnet"}
{"code": "def search_rule_by_id(self, ruleID) -> Rule:\n        \n        for r in self.rules:\n            if r.id == ruleID:\n                return r\n        return None", "docstring": "searches a rule by given id\n\nArgs:\nruleID(str): the rule to search for\n\nReturns\nthe rule object or None if it couldn't find a rule", "source": "juraj-google-style"}
{"code": "def __init__(self, text_encoder_config=None, data=\"clean100\", **kwargs):\n    \n    if data not in _DATA_OPTIONS:\n      raise ValueError(\"data must be one of %s\" % _DATA_OPTIONS)\n    name = kwargs.get(\"name\")\n    if name is None:\n      encoder_name = (\n          text_encoder_config.name if text_encoder_config else \"plain_text\")\n      data_name = data\n      name = \"%s_%s\" % (data_name, encoder_name)\n    kwargs[\"name\"] = name\n\n    description = kwargs.get(\"description\")\n    if description is None:\n      if text_encoder_config:\n        encoder_description = \"Transcriptions use the %s\" % (\n            text_encoder_config.encoder_cls.__name__)\n      else:\n        encoder_description = \"Transcriptions are in plain text.\"\n\n      if data == \"all\":\n        data_description = \"Uses all data.\"\n      else:\n        data_description = (\"Uses only clean data,%s including train-clean-360.\"\n                            % (\"\" if data == \"clean360\" else \" not\"))\n\n      description = \"%s %s\" % (data_description, encoder_description)\n    kwargs[\"description\"] = description\n\n    super(LibrispeechConfig, self).__init__(**kwargs)\n    self.text_encoder_config = text_encoder_config\n    self.data = data", "docstring": "Constructs a LibrispeechConfig.\n\nArgs:\ntext_encoder_config: `tfds.features.text.TextEncoderConfig`, configuration\nfor the `tfds.features.text.TextEncoder` used for the text feature.\ndata: `str`, one of `(clean100, clean360, all)`. `clean100` uses only the\nclean data without `train-clean-360`. `clean360` uses clean data with\n`train-clean-360`. `all` uses all the data.\n**kwargs: keyword arguments forwarded to super.", "source": "juraj-google-style"}
{"code": "def check_for_missing_options(config):\n    for (section_name, section) in config:\n        for (option_name, option) in section:\n            if (option.required and (option.value is None)):\n                raise exc.MissingRequiredOption('Option {0} in namespace {1} is required.'.format(option_name, section_name))\n    return config", "docstring": "Iter over a config and raise if a required option is still not set.\n\nArgs:\nconfig (confpy.core.config.Configuration): The configuration object\nto validate.\n\nRaises:\nMissingRequiredOption: If any required options are not set in the\nconfiguration object.\n\nRequired options with default values are considered set and will not cause\nthis function to raise.", "source": "codesearchnet"}
{"code": "def _alephResultToDict(dom):\n    \n    result = {}\n    for i in dom.childs:\n        if not i.isOpeningTag():\n            continue\n\n        keyword = i.getTagName().strip()\n        value = _tryConvertToInt(i.getContent().strip())\n\n        \n        \n        if keyword in result:                  \n            if isinstance(result[keyword], list):  \n                result[keyword].append(value)          \n            else:                                  \n                result[keyword] = [result[keyword], value]\n        else:                                  \n            result[keyword] = value\n\n    return result", "docstring": "Convert part of non-nested XML to :py:class:`dict`.\n\nArgs:\ndom (HTMLElement tree): pre-parsed XML (see dhtmlparser).\n\nReturns:\ndict: with python data", "source": "juraj-google-style"}
{"code": "def lookup_instances(fragment, verbose=True, filter_by_key=True):\n  \n\n  def vprint(*args):\n    if verbose:\n      print(*args)\n\n  region = get_region()\n  client = get_ec2_client()\n  ec2 = get_ec2_resource()\n  response = client.describe_instances()\n  assert is_good_response(response)\n\n  instance_list = []\n  for instance in ec2.instances.all():\n    if instance.state['Name'] != 'running':\n      continue\n\n    name = get_name(instance)\n    if (fragment in name or fragment in str(instance.public_ip_address) or\n            fragment in str(instance.id) or fragment in str(instance.private_ip_address)):\n      instance_list.append((util.toseconds(instance.launch_time), instance))\n\n  sorted_instance_list = reversed(sorted(instance_list, key=itemgetter(0)))\n  filtered_instance_list = []  \n  vprint(\"Using region \", region)\n  for (ts, instance) in sorted_instance_list:\n    if filter_by_key and instance.key_name != get_keypair_name():\n      vprint(f\"Got key {instance.key_name}, expected {get_keypair_name()}\")\n      continue\n    filtered_instance_list.append(instance)\n  return filtered_instance_list", "docstring": "Returns ec2.Instance object whose name contains fragment, in reverse order of launching (ie,\nmost recent intance first). Optionally filters by key, only including instances launched with\nkey_name matching current username.\n\nargs:\nverbose: print information about all matching instances found\n\nfilter_by_key  if True, ignore instances that are not launched with current\nuser's default key", "source": "juraj-google-style"}
{"code": "def __setattr__(self, name, value):\n        \n        if name in self.__by_name or name.startswith('_Message__'):\n            object.__setattr__(self, name, value)\n        else:\n            raise AttributeError(\"May not assign arbitrary value %s \"\n                                 \"to message %s\" % (name, type(self).__name__))", "docstring": "Change set behavior for messages.\n\nMessages may only be assigned values that are fields.\n\nDoes not try to validate field when set.\n\nArgs:\nname: Name of field to assign to.\nvalue: Value to assign to field.\n\nRaises:\nAttributeError when trying to assign value that is not a field.", "source": "juraj-google-style"}
{"code": "def __init__(self, port=CONTROLLER_PORT, easgd_alpha=0.5,\n                 \n                 start_halving_at=6, end_at=10, sync_freq=10, halving_freq=1,\n                 valid_freq=1500, learning_rate=0.1, log_path=None):\n        \n\n        Controller.__init__(self, port)\n        self.epoch_start_halving = start_halving_at\n        self.end_at = end_at\n        self.sync_freq = sync_freq\n        self.start_time = None\n        self.rand = np.random.RandomState(3)\n        self.epoch = 0\n        self._current_iter = 0\n        self._iters_from_last_valid = 0\n        self._evaluating = False\n        self._valid_freq = valid_freq\n        self._halving_freq = halving_freq\n        self._done = False\n        self._lr = learning_rate\n        self._easgd_alpha = easgd_alpha\n        self._training_names = []\n        self._evaluation_names = []\n        self._best_valid_cost = sys.float_info.max\n        self._lock = Lock()\n\n        self.num_train_batches = 0\n        self.batch_pool = []\n        self._train_costs = []\n        self._epoch_start_time = None\n        self.prepared_worker_pool = set()\n        self.log_file = open(log_path, \"w\") if log_path else None\n        if log_path:\n            logging.info(\"write logs into {}\".format(log_path))\n        logging.info(\"multi-gpu server is listening port {}\".format(port))", "docstring": "Initialize the controller.\n\nArgs:\nport (int): batches in one training step\neasgd_alpha (float)", "source": "juraj-google-style"}
{"code": "def get_metadata(self, key: str, per_trial: bool=True) -> Optional[Any]:", "docstring": "Gets metadata for current trial or current sampling.\n\nArgs:\nkey: A string as key to metadata.\nper_trial: If True, the key is retrieved per curent trial. Otherwise, it\nis retrieved per current sampling.\n\nReturns:\nA value that can be deserialized by `pg.from_json_str`.", "source": "github-repos"}
{"code": "def _check_wiremap_validity(self, wire_map, keymap, valmap):\n        \n        for k, v in wire_map.items():\n            kname = \"%s[%d]\" % (k[0].name, k[1])\n            vname = \"%s[%d]\" % (v[0].name, v[1])\n            if k not in keymap:\n                raise DAGCircuitError(\"invalid wire mapping key %s\" % kname)\n            if v not in valmap:\n                raise DAGCircuitError(\"invalid wire mapping value %s\" % vname)\n            if type(k) is not type(v):\n                raise DAGCircuitError(\"inconsistent wire_map at (%s,%s)\" %\n                                      (kname, vname))", "docstring": "Check that the wiremap is consistent.\n\nCheck that the wiremap refers to valid wires and that\nthose wires have consistent types.\n\nArgs:\nwire_map (dict): map from (register,idx) in keymap to\n(register,idx) in valmap\nkeymap (dict): a map whose keys are wire_map keys\nvalmap (dict): a map whose keys are wire_map values\n\nRaises:\nDAGCircuitError: if wire_map not valid", "source": "juraj-google-style"}
{"code": "def word_fts(self, word):\n    return list(map(self.fts, self.segs(word)))", "docstring": "Return featural analysis of `word`\n\nArgs:\nword (unicode):  one or more IPA segments\n\nReturns:\nlist: list of lists (value, feature) tuples where each inner list\ncorresponds to a segment in `word`", "source": "codesearchnet"}
{"code": "def batch_shape_tensor(self, name='batch_shape_tensor'):\n    with self._name_scope(name):\n        return self._batch_shape_tensor()", "docstring": "Shape of batch dimensions of this operator, determined at runtime.\n\nIf this operator acts like the batch matrix `A` with\n`A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding\n`[B1,...,Bb]`.\n\nArgs:\nname:  A name for this `Op`.\n\nReturns:\n`int32` `Tensor`", "source": "github-repos"}
{"code": "def podcast_episodes(self, *, device_id=None):\n    if (device_id is None):\n        device_id = self.device_id\n    podcast_episode_list = []\n    for chunk in self.podcast_episodes_iter(device_id=device_id, page_size=49995):\n        podcast_episode_list.extend(chunk)\n    return podcast_episode_list", "docstring": "Get a listing of podcast episodes for all subscribed podcasts.\n\nParamaters:\ndevice_id (str, Optional): A mobile device ID.\nDefault: Use ``device_id`` of the :class:`MobileClient` instance.\n\nReturns:\nlist: Podcast episode dicts.", "source": "codesearchnet"}
{"code": "def __init__(self, query):\n    \n    self._timeseries_list = list(query.iter(headers_only=True))\n\n    \n    \n    self._metric_type = query.metric_type", "docstring": "Initializes the QueryMetadata given the query object.\n\nArgs:\nquery: A Query object.", "source": "juraj-google-style"}
{"code": "class DbrxAttentionConfig(PretrainedConfig):\n    base_config_key = 'attn_config'\n\n    def __init__(self, attn_pdrop: float=0.0, clip_qkv: Optional[float]=None, kv_n_heads: int=1, rope_theta: float=10000.0, **kwargs: Any):\n        super().__init__(**kwargs)\n        self.attn_pdrop = attn_pdrop\n        self.clip_qkv = clip_qkv\n        self.kv_n_heads = kv_n_heads\n        self.rope_theta = rope_theta\n        for k in ['model_type', 'attn_implementation', 'transformers_version', '_commit_hash', 'torch_dtype']:\n            if k in kwargs:\n                kwargs.pop(k)\n        if len(kwargs) != 0:\n            raise ValueError(f'Found unknown kwargs={kwargs!r}')", "docstring": "Configuration class for Dbrx Attention.\n\n[`DbrxAttention`] class. It is used to instantiate attention layers\naccording to the specified arguments, defining the layers architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\nattn_pdrop (`float`, *optional*, defaults to 0.0):\nThe dropout probability for the attention layers.\nclip_qkv (`float`, *optional*):\nIf set, clip the queries, keys, and values in the attention layer to this value.\nkv_n_heads (`int`, *optional*, defaults to 1): For grouped_query_attention only, allow user to specify number of kv heads.\nrope_theta (`float`, *optional*, defaults to 10000.0): The base frequency for rope.", "source": "github-repos"}
{"code": "def uniform_row_length(self):\n    return self._uniform_row_length", "docstring": "Returns the length of each row in this partition, if rows are uniform.\n\nIf all rows in this `RowPartition` have the same length, then this returns\nthat length as a scalar integer `Tensor`.  Otherwise, it returns `None`.\n\nReturns:\nscalar Tensor with `type=self.dtype`, or `None`.", "source": "github-repos"}
{"code": "def _test_dir(self, test_name):\n    test_dir = os.path.join(self.get_temp_dir(), test_name)\n    if os.path.isdir(test_dir):\n        for f in glob.glob('%s/*' % test_dir):\n            os.remove(f)\n    else:\n        os.makedirs(test_dir)\n    return test_dir", "docstring": "Create an empty dir to use for tests.\n\nArgs:\ntest_name: Name of the test.\n\nReturns:\nAbsolute path to the test directory.", "source": "github-repos"}
{"code": "def try_claim(self, position):\n    raise NotImplementedError", "docstring": "Atomically determines if a record at a split point is within the range.\n\nThis method should be called **if and only if** the record is at a split\npoint. This method may modify the internal state of the ``RangeTracker`` by\nupdating the last-consumed position to ``position``.\n\n** Thread safety **\n\nMethods of the class ``RangeTracker`` including this method may get invoked\nby different threads, hence must be made thread-safe, e.g. by using a single\nlock object.\n\nArgs:\nposition: starting position of a record being read by a source.\n\nReturns:\n``True``, if the given position falls within the current range, returns\n``False`` otherwise.", "source": "github-repos"}
{"code": "def up_to(self, term: str) -> str:\n    end = self.input.find(term, self.offset)\n    if (end < 0):\n        raise EndOfInput(self)\n    res = self.input[self.offset:end]\n    self.offset = (end + 1)\n    return res", "docstring": "Parse and return segment terminated by the first occurence of a string.\n\nArgs:\nterm: Terminating string.\n\nRaises:\nEndOfInput: If `term` does not occur in the rest of the input text.", "source": "codesearchnet"}
{"code": "def advance_for_next_slice(self, recovery_slice=False):\n    self.slice_start_time = None\n    self.slice_request_id = None\n    self.slice_retries = 0\n    self.acquired_once = False\n    if recovery_slice:\n        self.slice_id += 2\n    else:\n        self.slice_id += 1", "docstring": "Advance self for next slice.\n\nArgs:\nrecovery_slice: True if this slice is running recovery logic.\nSee handlers.MapperWorkerCallbackHandler._attempt_slice_recovery\nfor more info.", "source": "codesearchnet"}
{"code": "def log_every_n(level, msg, n, *args):\n    \n    count = _GetNextLogCountPerToken(_GetFileAndLine())\n    log_if(level, msg, not (count % n), *args)", "docstring": "Log 'msg % args' at level 'level' once per 'n' times.\n\nLogs the 1st call, (N+1)st call, (2N+1)st call,  etc.\nNot threadsafe.\n\nArgs:\nlevel: The level at which to log.\nmsg: The message to be logged.\nn: The number of times this should be called before it is logged.\n*args: The args to be substituted into the msg.", "source": "juraj-google-style"}
{"code": "def GetShadowMap(self, since=None):\n    return ShadowUpdateGetter(self.conf).GetUpdates(source=self, search_base=self.conf['base'], search_filter=self.conf['filter'], search_scope=self.conf['scope'], since=since)", "docstring": "Return the shadow map from this source.\n\nArgs:\nsince: Get data only changed since this timestamp (inclusive) or None\nfor all data.\n\nReturns:\ninstance of ShadowMap", "source": "github-repos"}
{"code": "def save_component(self, component_name, save_path):\n    component = self.get_component(component_name=component_name)\n    self._validate_savable(component=component, component_name=component_name)\n    return component.save(sess=self.session, save_path=save_path)", "docstring": "Saves a component of this model to the designated location.\n\nArgs:\ncomponent_name: The component to save.\nsave_path: The location to save to.\nReturns:\nCheckpoint path where the component was saved.", "source": "codesearchnet"}
{"code": "def model(x):\n    hidden_act = dense_layer(hidden_weights, x)\n    logits_act = dense_layer(output_weights, hidden_act, tf.identity)\n    y = tf.nn.softmax(logits_act)\n    return y", "docstring": "Feed forward function of the model.\n\nArgs:\nx: a (?, 28*28) tensor consisting of the feature inputs for a batch of\nexamples.\n\nReturns:\nA (?, 10) tensor containing the class scores for each example.", "source": "github-repos"}
{"code": "def swo_read(self, offset, num_bytes, remove=False):\n    buf_size = ctypes.c_uint32(num_bytes)\n    buf = (ctypes.c_uint8 * num_bytes)(0)\n    self._dll.JLINKARM_SWO_Read(buf, offset, ctypes.byref(buf_size))\n    buf_size = buf_size.value\n    if remove:\n        self.swo_flush(buf_size)\n    return list(buf)[:buf_size]", "docstring": "Reads data from the SWO buffer.\n\nThe data read is not automatically removed from the SWO buffer after\nreading unless ``remove`` is ``True``.  Otherwise the callee must\nexplicitly remove the data by calling ``.swo_flush()``.\n\nArgs:\nself (JLink): the ``JLink`` instance\noffset (int): offset of first byte to be retrieved\nnum_bytes (int): number of bytes to read\nremove (bool): if data should be removed from buffer after read\n\nReturns:\nA list of bytes read from the SWO buffer.", "source": "codesearchnet"}
{"code": "def sholl_frequency(nrn, neurite_type=NeuriteType.all, step_size=10):\n    nrns = neuron_population(nrn)\n    neurite_filter = is_type(neurite_type)\n    min_soma_edge = float('Inf')\n    max_radii = 0\n    neurites_list = []\n    for neuron in nrns:\n        neurites_list.extend(((neurites, neuron.soma.center) for neurites in neuron.neurites if neurite_filter(neurites)))\n        min_soma_edge = min(min_soma_edge, neuron.soma.radius)\n        max_radii = max(max_radii, np.max(np.abs(bounding_box(neuron))))\n    radii = np.arange(min_soma_edge, (max_radii + step_size), step_size)\n    ret = np.zeros_like(radii)\n    for (neurites, center) in neurites_list:\n        ret += sholl_crossings(neurites, center, radii)\n    return ret", "docstring": "perform Sholl frequency calculations on a population of neurites\n\nArgs:\nnrn(morph): nrn or population\nneurite_type(NeuriteType): which neurites to operate on\nstep_size(float): step size between Sholl radii\n\nNote:\nGiven a neuron, the soma center is used for the concentric circles,\nwhich range from the soma radii, and the maximum radial distance\nin steps of `step_size`.  When a population is given, the concentric\ncircles range from the smallest soma radius to the largest radial neurite\ndistance.  Finally, each segment of the neuron is tested, so a neurite that\nbends back on itself, and crosses the same Sholl radius will get counted as\nhaving crossed multiple times.", "source": "codesearchnet"}
{"code": "def center_crop(self, image: 'torch.Tensor', size: SizeDict, **kwargs) -> 'torch.Tensor':\n    if size.height is None or size.width is None:\n        raise ValueError(f\"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}\")\n    image_height, image_width = image.shape[-2:]\n    crop_height, crop_width = (size.height, size.width)\n    if crop_width > image_width or crop_height > image_height:\n        padding_ltrb = [(crop_width - image_width) \n        image = F.pad(image, padding_ltrb, fill=0)\n        image_height, image_width = image.shape[-2:]\n        if crop_width == image_width and crop_height == image_height:\n            return image\n    crop_top = int((image_height - crop_height) / 2.0)\n    crop_left = int((image_width - crop_width) / 2.0)\n    return F.crop(image, crop_top, crop_left, crop_height, crop_width)", "docstring": "Center crop an image to `(size[\"height\"], size[\"width\"])`. If the input size is smaller than `crop_size` along\nany edge, the image is padded with 0's and then center cropped.\n\nArgs:\nimage (`\"torch.Tensor\"`):\nImage to center crop.\nsize (`Dict[str, int]`):\nSize of the output image.\n\nReturns:\n`torch.Tensor`: The center cropped image.", "source": "github-repos"}
{"code": "def forward(self, hidden_features):\n    hidden_features = hidden_features.transpose(-1, -2)\n    if self.head_aggregation == 'use_last':\n        hidden_features = hidden_features[..., -1]\n    elif self.head_aggregation == 'max_pool':\n        hidden_features = hidden_features.max(dim=-1).values\n    elif self.head_aggregation == 'avg_pool':\n        hidden_features = hidden_features.mean(dim=-1)\n    if self.flatten:\n        hidden_features = self.flatten(hidden_features)\n    hidden_features = self.dropout(hidden_features)\n    hidden_features = self.projection(hidden_features)\n    if self.distribution_output is None and self.output_range is not None:\n        hidden_features = torch.sigmoid(hidden_features) * (self.output_range[1] - self.output_range[0]) + self.output_range[0]\n    return hidden_features", "docstring": "Args:\nhidden_features (`torch.Tensor` of shape `(batch_size x num_patch x d_model)` in `flatten` mode\nor `(batch_size x n_vars x num_patch x d_model)` in `common_channel`/`mix_channel` mode.): Input hidden\nfeatures.\n\nReturns:\n`torch.Tensor` of shape `(batch_size x num_targets)`.", "source": "github-repos"}
{"code": "def process_rule(edges: Edges, ast: Function, rule: Mapping[(str, Any)], spec: BELSpec):\n    ast_type = ast.__class__.__name__\n    trigger_functions = rule.get('trigger_function', [])\n    trigger_types = rule.get('trigger_type', [])\n    rule_subject = rule.get('subject')\n    rule_relation = rule.get('relation')\n    rule_object = rule.get('object')\n    log.debug(f'Running {rule_relation}  Type: {ast_type}')\n    if isinstance(ast, Function):\n        function_name = ast.name\n        args = ast.args\n        parent_function = ast.parent_function\n        if (function_name in trigger_functions):\n            if (rule_subject == 'trigger_value'):\n                subject = ast\n            if (rule_object == 'args'):\n                for arg in args:\n                    log.debug(f'1: {subject} {arg}')\n                    edge_ast = BELAst(subject, rule_relation, arg, spec)\n                    edges.append(edge_ast)\n            elif ((rule_object == 'parent_function') and parent_function):\n                log.debug(f'2: {subject} {parent_function}')\n                edge_ast = BELAst(subject, rule_relation, parent_function, spec)\n                edges.append(edge_ast)\n        elif (ast_type in trigger_types):\n            if (rule_subject == 'trigger_value'):\n                subject = ast\n            if (rule_object == 'args'):\n                for arg in args:\n                    log.debug(f'3: {subject} {arg}')\n                    edge_ast = BELAst(subject, rule_relation, arg, spec)\n                    edges.append(edge_ast)\n            elif ((rule_object == 'parent_function') and parent_function):\n                log.debug(f'4: {subject} {parent_function}')\n                edge_ast = BELAst(subject, rule_relation, parent_function, spec)\n                edges.append(edge_ast)\n    if isinstance(ast, NSArg):\n        term = '{}:{}'.format(ast.namespace, ast.value)\n        parent_function = ast.parent_function\n        if (ast_type in trigger_types):\n            if (rule_subject == 'trigger_value'):\n                subject = term\n            if (rule_object == 'args'):\n                for arg in args:\n                    log.debug(f'5: {subject} {arg}')\n                    edge_ast = BELAst(subject, rule_relation, arg, spec)\n                    edges.append(edge_ast)\n            elif ((rule_object == 'parent_function') and parent_function):\n                log.debug(f'6: {subject} {parent_function}')\n                edge_ast = BELAst(subject, rule_relation, parent_function, spec)\n                edges.append(edge_ast)\n    if hasattr(ast, 'args'):\n        for arg in ast.args:\n            process_rule(edges, arg, rule, spec)", "docstring": "Process computed edge rule\n\nRecursively processes BELAst versus a single computed edge rule\n\nArgs:\nedges (List[Tuple[Union[Function, str], str, Function]]): BEL Edge ASTs\nast (Function): BEL Function AST\nrule (Mapping[str, Any]: computed edge rule", "source": "codesearchnet"}
{"code": "def load_state(self, in_path):\n    with open(in_path, 'r') as infile:\n        state = json.load(infile)\n    self.restore_state(state)", "docstring": "Load the current state of this emulated object from a file.\n\nThe file should have been produced by a previous call to save_state.\n\nArgs:\nin_path (str): The path to the saved state dump that you wish\nto load.", "source": "codesearchnet"}
{"code": "def apply_op(input_layer, operation, *op_args, **op_kwargs):\n  \n  return input_layer.with_tensor(\n      operation(input_layer.tensor, *op_args, **op_kwargs))", "docstring": "Applies the given operation to this before without adding any summaries.\n\nArgs:\ninput_layer: The input layer for this op.\noperation: An operation that takes a tensor and the supplied args.\n*op_args: Extra arguments for operation.\n**op_kwargs: Keyword arguments for the operation.\nReturns:\nA new layer with operation applied.", "source": "juraj-google-style"}
{"code": "def convert_to_rgb(image: ImageInput) -> ImageInput:\n    if not isinstance(image, PIL.Image.Image):\n        return image\n    if image.mode == 'RGB':\n        return image\n    image_rgba = image.convert('RGBA')\n    background = Image.new('RGBA', image_rgba.size, (255, 255, 255))\n    alpha_composite = Image.alpha_composite(background, image_rgba)\n    alpha_composite = alpha_composite.convert('RGB')\n    return alpha_composite", "docstring": "Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image\nas is.\nArgs:\nimage (Image):\nThe image to convert.", "source": "github-repos"}
{"code": "def add(self, method_mask, path, func):\n    is_err = (len(signature(func).parameters) == 3)\n    is_subchain = isinstance(func, MiddlewareChain)\n    tup = MiddlewareNode(func=func, mask=method_mask, path=path, is_errorhandler=is_err, is_subchain=is_subchain)\n    self.mw_list.append(tup)", "docstring": "Add a function to the middleware chain.\nThis function is returned when iterating over the chain with matching method and path.\n\nArgs:\nmethod_mask (growler.http.HTTPMethod): A bitwise mask intended to match specific\nrequest methods.\npath (str or regex): An object with which to compare request urls\nfunc (callable): The function to be yieled from the generator upon a request\nmatching the method_mask and path", "source": "codesearchnet"}
{"code": "def get_float(self, min_float=_MIN_FLOAT, max_float=_MAX_FLOAT):\n    return self.fdp.ConsumeFloatInRange(min_float, max_float)", "docstring": "Consume a float with given constraints.\n\nArgs:\nmin_float: Minimum allowed float.\nmax_float: Maximum allowed float.\n\nReturns:\nConsumed float based on input bytes and constraints.", "source": "github-repos"}
{"code": "def _unary_op(cls,\n            x: 'TensorFluent',\n            op: Callable[[tf.Tensor], tf.Tensor],\n            dtype: tf.DType) -> 'TensorFluent':\n        \n        x = x.cast(dtype)\n        t = op(x.tensor)\n        scope = x.scope.as_list()\n        batch = x.batch\n        return TensorFluent(t, scope, batch=batch)", "docstring": "Returns a TensorFluent for the unary `op` applied to fluent `x`.\n\nArgs:\nx: The input fluent.\nop: The unary operation.\ndtype: The output's data type.\n\nReturns:\nA TensorFluent wrapping the unary operator's output.", "source": "juraj-google-style"}
{"code": "def bleu_score(predictions, labels, **unused_kwargs):\n    outputs = tf.to_int32(tf.argmax(predictions, axis=(- 1)))\n    outputs = tf.squeeze(outputs, axis=[(- 1), (- 2)])\n    labels = tf.squeeze(labels, axis=[(- 1), (- 2)])\n    bleu = tf.py_func(compute_bleu, (labels, outputs), tf.float32)\n    return (bleu, tf.constant(1.0))", "docstring": "BLEU score computation between labels and predictions.\n\nAn approximate BLEU scoring method since we do not glue word pieces or\ndecode the ids and tokenize the output. By default, we use ngram order of 4\nand use brevity penalty. Also, this does not have beam search.\n\nArgs:\npredictions: tensor, model predictions\nlabels: tensor, gold output.\n\nReturns:\nbleu: int, approx bleu score", "source": "codesearchnet"}
{"code": "def _ParseFileData(self, knowledge_base, file_object):\n    \n    text_file_object = dfvfs_text_file.TextFile(file_object, encoding='utf-8')\n\n    product_values = {}\n    for line in text_file_object.readlines():\n      line = line.strip()\n      if line.startswith('\n        continue\n      key, value = line.split('=')\n      key = key.strip().upper()\n      value = value.strip().strip('\"')\n      product_values[key] = value\n\n    if not knowledge_base.GetValue('operating_system_product'):\n      system_product = product_values.get('DISTRIB_DESCRIPTION', None)\n      if system_product:\n        knowledge_base.SetValue('operating_system_product', system_product)", "docstring": "Parses file content (data) for system product preprocessing attribute.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nfile_object (dfvfs.FileIO): file-like object that contains the artifact\nvalue data.\n\nRaises:\nerrors.PreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def end_at(self, document_fields):\n    return self._cursor_helper(document_fields, before=False, start=False)", "docstring": "End query results at a particular document value.\n\nThe result set will **include** the document specified by\n``document_fields``.\n\nIf the current query already has specified an end cursor -- either\nvia this method or\n:meth:`~.firestore_v1beta1.query.Query.end_before` -- this will\noverwrite it.\n\nWhen the query is sent to the server, the ``document_fields`` will\nbe used in the order given by fields set by\n:meth:`~.firestore_v1beta1.query.Query.order_by`.\n\nArgs:\ndocument_fields (Union[~.firestore_v1beta1.\\\ndocument.DocumentSnapshot, dict, list, tuple]): a document\nsnapshot or a dictionary/list/tuple of fields representing a\nquery results cursor. A cursor is a collection of values that\nrepresent a position in a query result set.\n\nReturns:\n~.firestore_v1beta1.query.Query: A query with cursor. Acts as\na copy of the current query, modified with the newly added\n\"end at\" cursor.", "source": "codesearchnet"}
{"code": "def has_option(self, section, option):\n        \n        if section not in self.sections():\n            return False\n        else:\n            option = self.optionxform(option)\n            return option in self[section]", "docstring": "Checks for the existence of a given option in a given section.\n\nArgs:\nsection (str): name of section\noption (str): name of option\n\nReturns:\nbool: whether the option exists in the given section", "source": "juraj-google-style"}
{"code": "def publish(cls, message, client_filter=None):\n    \n    with cls._lock:\n      for client in cls.subscribers:\n        if (not client_filter) or client_filter(client):\n          client.send(message)", "docstring": "Publish messages to subscribers.\n\nArgs:\nmessage: The message to publish.\nclient_filter: A filter function to call passing in each client. Only\nclients for whom the function returns True will have the\nmessage sent to them.", "source": "juraj-google-style"}
{"code": "def time_series(timefile, colnames):\n    if (not timefile.is_file()):\n        return None\n    data = pd.read_csv(timefile, delim_whitespace=True, dtype=str, header=None, skiprows=1, index_col=0, engine='c', memory_map=True, error_bad_lines=False, warn_bad_lines=False)\n    data = data.apply(pd.to_numeric, raw=True, errors='coerce')\n    rows_to_del = []\n    irow = (len(data) - 1)\n    while (irow > 0):\n        iprev = (irow - 1)\n        while ((iprev >= 0) and (data.index[irow] <= data.index[iprev])):\n            rows_to_del.append(iprev)\n            iprev -= 1\n        irow = iprev\n    if rows_to_del:\n        rows_to_keep = (set(range(len(data))) - set(rows_to_del))\n        data = data.take(list(rows_to_keep), convert=False)\n    ncols = data.shape[1]\n    _tidy_names(colnames, ncols)\n    data.columns = colnames\n    return data", "docstring": "Read temporal series text file.\n\nIf :data:`colnames` is too long, it will be truncated. If it is too short,\nadditional numeric column names from 0 to N-1 will be attributed to the N\nextra columns present in :data:`timefile`.\n\nArgs:\ntimefile (:class:`pathlib.Path`): path of the time.dat file.\ncolnames (list of names): names of the variables expected in\n:data:`timefile` (may be modified).\n\nReturns:\n:class:`pandas.DataFrame`:\nTime series, with the variables in columns and the time steps in\nrows.", "source": "codesearchnet"}
{"code": "async def verify_worker_impls(chain):\n    \n    valid_worker_impls = get_valid_worker_impls()\n    for obj in chain.get_all_links_in_chain():\n        worker_impl = obj.worker_impl\n        log.info(\"Verifying {} {} as a {} task...\".format(obj.name, obj.task_id, worker_impl))\n        \n        \n        await valid_worker_impls[worker_impl](chain, obj)", "docstring": "Verify the task type (e.g. decision, build) of each link in the chain.\n\nArgs:\nchain (ChainOfTrust): the chain we're operating on\n\nRaises:\nCoTError: on failure", "source": "juraj-google-style"}
{"code": "def get_group_by_id(self, group_id: str) -> typing.Optional['Group']:\n        \n        VALID_POSITIVE_INT.validate(group_id, 'get_group_by_id', exc=ValueError)\n        for group in self.groups:\n\n            if group.group_id == group_id:\n                return group\n        return None", "docstring": "Gets a group by id\n\nArgs:\ngroup_id: group id\n\nReturns: Group", "source": "juraj-google-style"}
{"code": "def reset(self, indices=None):\n    \n    if indices is None:\n      indices = np.arange(len(self._envs))\n    if self._blocking:\n      observs = [self._envs[index].reset() for index in indices]\n    else:\n      observs = [self._envs[index].reset(blocking=False) for index in indices]\n      observs = [observ() for observ in observs]\n    observ = np.stack(observs)\n    return observ", "docstring": "Reset the environment and convert the resulting observation.\n\nArgs:\nindices: The batch indices of environments to reset; defaults to all.\n\nReturns:\nBatch of observations.", "source": "juraj-google-style"}
{"code": "def _try_load_par_source(source_file_path):\n    prefix_path = source_file_path\n    while True:\n        prefix_path, basename = os.path.split(prefix_path)\n        if not basename:\n            break\n        suffix_path = os.path.normpath(os.path.relpath(source_file_path, start=prefix_path))\n        if prefix_path.endswith('.par') and os.path.isfile(prefix_path):\n            with zipfile.ZipFile(prefix_path) as z:\n                norm_names = [os.path.normpath(name) for name in z.namelist()]\n                if suffix_path in norm_names:\n                    with z.open(z.namelist()[norm_names.index(suffix_path)]) as zf:\n                        source_text = zf.read().decode('utf-8')\n                        return source_text.split('\\n')", "docstring": "Try loading the source code inside a .par file.\n\nA .par file is a zip-compressed, self-contained Python executable.\nIt contains the content of individual Python source files that can\nbe read only through extracting from the zip file.\n\nArgs:\nsource_file_path: The full path to the file inside the .par file. This\npath should include the path to the .par file itself, followed by the\nintra-par path, e.g.,\n\"/tmp/my_executable.par/org-tensorflow/tensorflow/python/foo/bar.py\".\n\nReturns:\nIf successful, lines of the source file as a `list` of `str`s.\nElse, `None`.", "source": "github-repos"}
{"code": "def sg_summary_activation(tensor, prefix=None, name=None):\n    r\n    \n    prefix = '' if prefix is None else prefix + '/'\n    \n    name = prefix + _pretty_name(tensor) if name is None else prefix + name\n    \n    _scalar(name + '/ratio',\n            tf.reduce_mean(tf.cast(tf.greater(tensor, 0), tf.sg_floatx)))\n    _histogram(name + '/ratio-h', tensor)", "docstring": "r\"\"\"Register `tensor` to summary report as `activation`\n\nArgs:\ntensor: A `Tensor` to log as activation\nprefix: A `string`. A prefix to display in the tensor board web UI.\nname: A `string`. A name to display in the tensor board web UI.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def query_foursquare(point, max_distance, client_id, client_secret):\n    if (not client_id):\n        return []\n    if (not client_secret):\n        return []\n    if from_cache(FS_CACHE, point, max_distance):\n        return from_cache(FS_CACHE, point, max_distance)\n    url = (FOURSQUARE_URL % (client_id, client_secret, point.lat, point.lon, max_distance))\n    req = requests.get(url)\n    if (req.status_code != 200):\n        return []\n    response = req.json()\n    result = []\n    venues = response['response']['venues']\n    for venue in venues:\n        name = venue['name']\n        distance = venue['location']['distance']\n        categories = [c['shortName'] for c in venue['categories']]\n        result.append({'label': name, 'distance': distance, 'types': categories, 'suggestion_type': 'FOURSQUARE'})\n    foursquare_insert_cache(point, result)\n    return result", "docstring": "Queries Squarespace API for a location\n\nArgs:\npoint (:obj:`Point`): Point location to query\nmax_distance (float): Search radius, in meters\nclient_id (str): Valid Foursquare client id\nclient_secret (str): Valid Foursquare client secret\nReturns:\n:obj:`list` of :obj:`dict`: List of locations with the following format:\n{\n'label': 'Coffee house',\n'distance': 19,\n'types': 'Commerce',\n'suggestion_type': 'FOURSQUARE'\n}", "source": "codesearchnet"}
{"code": "def _load_from_file(path):\n        \n        config = []\n\n        try:\n            with open(path, 'r') as config_file:\n                config = yaml.load(config_file)['normalizations']\n        except EnvironmentError as e:\n            raise ConfigError('Problem while loading file: %s' %\n                              e.args[1] if len(e.args) > 1 else e)\n        except (TypeError, KeyError) as e:\n            raise ConfigError('Config file has an unexpected structure: %s' % e)\n        except yaml.YAMLError:\n            raise ConfigError('Invalid YAML file syntax')\n\n        return config", "docstring": "Load a config file from the given path.\n\nLoad all normalizations from the config file received as\nargument. It expects to find a YAML file with a list of\nnormalizations and arguments under the key 'normalizations'.\n\nArgs:\npath: Path to YAML file.", "source": "juraj-google-style"}
{"code": "async def pipe_to_log(pipe, filehandles=(), level=logging.INFO):\n    \n    while True:\n        line = await pipe.readline()\n        if line:\n            line = to_unicode(line)\n            log.log(level, line.rstrip())\n            for filehandle in filehandles:\n                print(line, file=filehandle, end=\"\")\n        else:\n            break", "docstring": "Log from a subprocess PIPE.\n\nArgs:\npipe (filehandle): subprocess process STDOUT or STDERR\nfilehandles (list of filehandles, optional): the filehandle(s) to write\nto.  If empty, don't write to a separate file.  Defaults to ().\nlevel (int, optional): the level to log to.  Defaults to ``logging.INFO``.", "source": "juraj-google-style"}
{"code": "def compute_number_edges(function):\n    \n    n = 0\n    for node in function.nodes:\n        n += len(node.sons)\n    return n", "docstring": "Compute the number of edges of the CFG\nArgs:\nfunction (core.declarations.function.Function)\nReturns:\nint", "source": "juraj-google-style"}
{"code": "def loads(s, model):\n    \n    graphs = penman.loads(s, cls=XMRSCodec)\n    xs = [model.from_triples(g.triples()) for g in graphs]\n    return xs", "docstring": "Deserialize PENMAN graphs from a string\n\nArgs:\ns (str): serialized PENMAN graphs\nmodel: Xmrs subclass instantiated from decoded triples\nReturns:\na list of objects (of class *model*)", "source": "juraj-google-style"}
{"code": "def _HandleMetadataUpdate(\n      self, metadata_key='', recursive=True, wait=True, timeout=None,\n      retry=True):\n    \n    exception = None\n    while True:\n      try:\n        return self._GetMetadataUpdate(\n            metadata_key=metadata_key, recursive=recursive, wait=wait,\n            timeout=timeout)\n      except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:\n        if not isinstance(e, type(exception)):\n          exception = e\n          self.logger.error('GET request error retrieving metadata. %s.', e)\n        if retry:\n          continue\n        else:\n          break", "docstring": "Wait for a successful metadata response.\n\nArgs:\nmetadata_key: string, the metadata key to watch for changes.\nrecursive: bool, True if we should recursively watch for metadata changes.\nwait: bool, True if we should wait for a metadata change.\ntimeout: int, timeout in seconds for returning metadata output.\nretry: bool, True if we should retry on failure.\n\nReturns:\njson, the deserialized contents of the metadata server.", "source": "juraj-google-style"}
{"code": "def pretty_print_config_to_json(self, configs):\n    descriptor = self.get_directory_list_doc(configs)\n    return json.dumps(descriptor, sort_keys=True, indent=2, separators=(',', ': '))", "docstring": "JSON string description of a protorpc.remote.Service in a discovery doc.\n\nArgs:\nconfigs: Either a single dict or a list of dicts containing the service\nconfigurations to list.\n\nReturns:\nstring, The directory list document as a JSON string.", "source": "codesearchnet"}
{"code": "def resource_path(relative_path=None, expect=None):\n    if (expect not in (None, 'file', 'folder')):\n        raise ArgumentError(\"Invalid expect parameter, must be None, 'file' or 'folder'\", expect=expect)\n    this_dir = os.path.dirname(__file__)\n    _resource_path = os.path.join(this_dir, '..', 'config')\n    if (relative_path is not None):\n        path = os.path.normpath(relative_path)\n        _resource_path = os.path.join(_resource_path, path)\n    if ((expect == 'file') and (not os.path.isfile(_resource_path))):\n        raise DataError((\"Expected resource %s to be a file and it wasn't\" % _resource_path))\n    elif ((expect == 'folder') and (not os.path.isdir(_resource_path))):\n        raise DataError((\"Expected resource %s to be a folder and it wasn't\" % _resource_path))\n    return os.path.abspath(_resource_path)", "docstring": "Return the absolute path to a resource in iotile-build.\n\nThis method finds the path to the `config` folder inside\niotile-build, appends `relative_path` to it and then\nchecks to make sure the desired file or directory exists.\n\nYou can specify expect=(None, 'file', or 'folder') for\nwhat you expect to find at the given path.\n\nArgs:\nrelative_path (str): The relative_path from the config\nfolder to the resource in question.  This path can\nbe specified using / characters on all operating\nsystems since it will be normalized before usage.\nIf None is passed, the based config folder will\nbe returned.\nexpect (str): What the path should resolve to, which is\nchecked before returning, raising a DataError if\nthe check fails.  You can pass None for no checking,\nfile for checking `os.path.isfile`, or folder for\nchecking `os.path.isdir`.  Default: None\n\nReturns:\nstr: The normalized absolute path to the resource.", "source": "codesearchnet"}
{"code": "def forceSetSlaac(self, slaacAddress):\n        \n        print '%s call forceSetSlaac' % self.port\n        print slaacAddress\n        try:\n            cmd = 'ipaddr add %s' % str(slaacAddress)\n            print cmd\n            return self.__sendCommand(cmd)[0] == 'Done'\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger(\"forceSetSlaac() Error: \" + str(e))", "docstring": "force to set a slaac IPv6 address to Thread interface\n\nArgs:\nslaacAddress: a slaac IPv6 address to be set\n\nReturns:\nTrue: successful to set slaac address to Thread interface\nFalse: fail to set slaac address to Thread interface", "source": "juraj-google-style"}
{"code": "def _matrix_conv(self, m1, m2):\n    n = m1[0, 0].shape.as_list()[0]\n    if n != m2[0, 0].shape.as_list()[0]:\n        raise ValueError(f'The entries in matrices m1 and m2 must have the same dimensions. Received m1[0, 0].shape={m1[0, 0].shape} and m2[0, 0].shape={m2[0, 0].shape}.')\n    k = int(np.sqrt(len(m1)))\n    l = int(np.sqrt(len(m2)))\n    result = {}\n    size = k + l - 1\n    for i in range(size):\n        for j in range(size):\n            result[i, j] = array_ops.zeros([n, n], self.dtype)\n            for index1 in range(min(k, i + 1)):\n                for index2 in range(min(k, j + 1)):\n                    if i - index1 < l and j - index2 < l:\n                        result[i, j] += math_ops.matmul(m1[index1, index2], m2[i - index1, j - index2])\n    return result", "docstring": "Matrix convolution.\n\nArgs:\nm1: A k x k dictionary, each element is a n x n matrix.\nm2: A l x l dictionary, each element is a n x n matrix.\n\nReturns:\n(k + l - 1) * (k + l - 1) dictionary each element is a n x n matrix.\nRaises:\nValueError: if the entries of m1 and m2 are of different dimensions.", "source": "github-repos"}
{"code": "def _EnforceShapeInvariant(merge_var, next_var):\n    if isinstance(merge_var, tensor_lib.Tensor):\n        m_shape = merge_var.get_shape()\n        n_shape = next_var.get_shape()\n        if not _ShapeLessThanOrEqual(n_shape, m_shape):\n            enter = merge_var.op.inputs[0].op\n            assert util.IsLoopEnter(enter)\n            input_t = enter.inputs[0]\n            raise ValueError(\"Input tensor '%s' enters the loop with shape %s, but has shape %s after one iteration. To allow the shape to vary across iterations, use the `shape_invariants` argument of tf.while_loop to specify a less-specific shape.\" % (input_t.name, input_t.shape, n_shape))\n    else:\n        raise TypeError(f\"'merge_var' must be a Tensor. Received: {type(merge_var)}.\")", "docstring": "Check if the shapes of the loops variables are invariants.\n\nArgs:\nmerge_var: The tensor representing the initial values of the loop\nvariables.\nnext_var: The tensor representing the values of the loop variables\nafter one loop iteration.\n\nRaises:\nValueError: If any tensor in `merge_var` has a more specific shape than\nits corresponding tensor in `next_var`.", "source": "github-repos"}
{"code": "def process(self, element):\n    input_ids = self._tokenizer(element, return_tensors='pt', padding='max_length', max_length=512).input_ids\n    return input_ids", "docstring": "Process the raw text input to a format suitable for\nT5ForConditionalGeneration model inference\n\nArgs:\nelement: A string of text\n\nReturns:\nA tokenized example that can be read by the\nT5ForConditionalGeneration", "source": "github-repos"}
{"code": "def __init__(self, token):\n        r\n\n        self.base_url = 'http:\n        self.token = token\n        self.geo_criteria = ['stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc',\n                             'subgacc']", "docstring": "r\"\"\" Instantiates an instance of MesoPy.\n\nArguments:\n----------\ntoken: string, mandatory\nYour API token that authenticates you for requests against MesoWest.mes\n\nReturns:\n--------\nNone.\n\nRaises:\n-------\nNone.", "source": "juraj-google-style"}
{"code": "def all_logging_disabled(highest_level=logging.CRITICAL):\n    previous_level = logging.root.manager.disable\n    logging.disable(highest_level)\n    try:\n        (yield)\n    finally:\n        logging.disable(previous_level)", "docstring": "Disable all logging temporarily.\n\nA context manager that will prevent any logging messages triggered during the body from being processed.\n\nArgs:\nhighest_level: the maximum logging level that is being blocked", "source": "codesearchnet"}
{"code": "def __init__(self, tid=None, stdout=None, stderr=None):\n        \n        self._tid = tid\n        super().__init__()\n        self.parent = None\n        self._update_lock = threading.Lock()\n        self._outputs = []\n        self._stdout = stdout\n        self._stderr = stderr", "docstring": "Initialize the AppFuture.\n\nArgs:\n\nKWargs:\n- tid (Int) : Task id should be any unique identifier. Now Int.\n- stdout (str) : Stdout file of the app.\nDefault: None\n- stderr (str) : Stderr file of the app.\nDefault: None", "source": "juraj-google-style"}
{"code": "def build_images(prefix, images, tag=None, commit_range=None, push=False, chart_version=None):\n    value_modifications = {}\n    for (name, options) in images.items():\n        image_path = options.get('contextPath', os.path.join('images', name))\n        image_tag = tag\n        paths = (list(options.get('paths', [])) + [image_path, 'chartpress.yaml'])\n        last_commit = last_modified_commit(*paths)\n        if (tag is None):\n            if chart_version:\n                image_tag = '{}-{}'.format(chart_version, last_commit)\n            else:\n                image_tag = last_commit\n        image_name = (prefix + name)\n        image_spec = '{}:{}'.format(image_name, image_tag)\n        value_modifications[options['valuesPath']] = {'repository': image_name, 'tag': SingleQuotedScalarString(image_tag)}\n        template_namespace = {'LAST_COMMIT': last_commit, 'TAG': image_tag}\n        if (tag or image_needs_building(image_spec)):\n            build_args = render_build_args(options, template_namespace)\n            build_image(image_path, image_spec, build_args, options.get('dockerfilePath'))\n        else:\n            print(f'Skipping build for {image_spec}, it already exists')\n        if push:\n            if (tag or image_needs_pushing(image_spec)):\n                check_call(['docker', 'push', image_spec])\n            else:\n                print(f'Skipping push for {image_spec}, already on registry')\n    return value_modifications", "docstring": "Build a collection of docker images\n\nArgs:\nprefix (str): the prefix to add to images\nimages (dict): dict of image-specs from chartpress.yml\ntag (str):\nSpecific tag to use instead of the last modified commit.\nIf unspecified the tag for each image will be the hash of the last commit\nto modify the image's files.\ncommit_range (str):\nThe range of commits to consider, e.g. for building in CI.\nIf an image hasn't changed in the given range,\nit will not be rebuilt.\npush (bool):\nWhether to push the resulting images (default: False).\nchart_version (str):\nThe chart version, included as a prefix on image tags\nif `tag` is not specified.", "source": "codesearchnet"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return (len(token_ids_0) + 2) * [0]\n    return [0] * (len(token_ids_0) + 1) + [1] * (len(token_ids_1) + 3)", "docstring": "Create the token type IDs corresponding to the sequences passed. [What are token type\nIDs?](../glossary#token-type-ids) Should be overridden in a subclass if the model has a special way of\nbuilding: those.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nThe first tokenized sequence.\ntoken_ids_1 (`List[int]`, *optional*):\nThe second tokenized sequence.\nReturns:\n`List[int]`: The token type ids.", "source": "github-repos"}
{"code": "def from_orbit(cls, orbit, name=None, norad_id=None, cospar_id=None):\n        \n\n        name = \"0 %s\\n\" % name if name is not None else \"\"\n        norad_id = norad_id if norad_id is not None else \"99999\"\n\n        if cospar_id is not None:\n            y, _, i = cospar_id.partition('-')\n            cospar_id = y[2:] + i\n        else:\n            cospar_id = \"\"\n\n        orbit = orbit.copy(form='TLE', frame='TEME')\n\n        date = orbit.date.datetime\n        i, Ω, e, ω, M, n = orbit\n\n        line1 = \"1 {norad_id}U {cospar_id:<8} {date:%y}{day:012.8f} {ndot:>10} {ndotdot:>8} {bstar:>8} 0  999\".format(\n            norad_id=norad_id,\n            cospar_id=cospar_id,\n            date=date,\n            day=int(\"{:%j}\".format(date)) + date.hour / 24. + date.minute / 1440 + date.second / 86400 + date.microsecond / 86400000000.,\n            ndot=\"{: 0.8f}\".format(orbit.complements['ndot'] / 2).replace(\"0.\", \".\"),\n            ndotdot=_unfloat(orbit.complements['ndotdot'] / 6),\n            bstar=_unfloat(orbit.complements['bstar']),\n        )\n        line2 = \"2 {norad_id} {i:8.4f} {Ω:8.4f} {e} {ω:8.4f} {M:8.4f} {n:11.8f}99999\".format(\n            norad_id=norad_id,\n            i=np.degrees(i),\n            Ω=np.degrees(Ω),\n            e=\"{:.7f}\".format(e)[2:],\n            ω=np.degrees(ω),\n            M=np.degrees(M),\n            n=n * 86400 / (2 * np.pi)\n        )\n\n        line1 += str(cls._checksum(line1))\n        line2 += str(cls._checksum(line2))\n\n        return cls(\"%s%s\\n%s\" % (name, line1, line2))", "docstring": "Convert an orbit to it's TLE representation\n\nArgs:\norbit (Orbit)\nnorad_id (str or int):\ncospar_id (str):\nReturn:\nstr: TLE representation", "source": "juraj-google-style"}
{"code": "def sequential_experts_gemm(token_states, expert_weights, tokens_per_expert):\n    num_tokens = token_states.shape[0]\n    out_features = expert_weights.shape[-1]\n    output = torch.zeros(num_tokens, out_features, dtype=token_states.dtype, device=token_states.device)\n    cumsum_num_tokens = torch.cumsum(tokens_per_expert, dim=0)\n    zero_tensor = torch.zeros(1, dtype=torch.long, device=cumsum_num_tokens.device)\n    cumsum_num_tokens = torch.cat((zero_tensor, cumsum_num_tokens))\n    for expert_num in range(expert_weights.shape[0]):\n        start = cumsum_num_tokens[expert_num]\n        end = cumsum_num_tokens[expert_num + 1]\n        tokens = token_states[start:end]\n        out = torch.matmul(tokens, expert_weights[expert_num])\n        output[start:end] = out\n    return output", "docstring": "Compute the matrix multiplication (GEMM) for each expert sequentially. This approach is computationally inefficient, especially when dealing with a large number of experts.\n\nArgs:\ntoken_states (torch.Tensor): Input tensor of shape (num_tokens, in_features).\nexpert_weights (torch.Tensor): Weight tensor of shape (num_experts, in_features, out_features).\ntokens_per_expert (torch.Tensor): Number of tokens assigned to each expert.\n\nReturns:\ntorch.Tensor: Output tensor of shape (num_tokens, out_features).", "source": "github-repos"}
{"code": "def get_asset(self, asset_hash, id=None, endpoint=None):\n        \n        return self._call_endpoint(GET_ASSET_STATE, params=[asset_hash], id=id, endpoint=endpoint)", "docstring": "Get an asset by its hash\nArgs:\nasset_hash: (str) asset to lookup, example would be 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b'\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\n\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"}
{"code": "def en010(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `en010`'.format(value))\n\n        self._en010 = value", "docstring": "Corresponds to IDD Field `en010`\nmean coincident dry-bulb temperature to\nEnthalpy corresponding to 1.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `en010`\nUnit: kJ/kg\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def set_management_icmp(enabled=True, deploy=False):\n    if (enabled is True):\n        value = 'no'\n    elif (enabled is False):\n        value = 'yes'\n    else:\n        raise CommandExecutionError('Invalid option provided for service enabled option.')\n    ret = {}\n    query = {'type': 'config', 'action': 'set', 'xpath': \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/service\", 'element': '<disable-icmp>{0}</disable-icmp>'.format(value)}\n    ret.update(__proxy__['panos.call'](query))\n    if (deploy is True):\n        ret.update(commit())\n    return ret", "docstring": "Enables or disables the ICMP management service on the device.\n\nCLI Example:\n\nArgs:\nenabled (bool): If true the service will be enabled. If false the service will be disabled.\n\ndeploy (bool): If true then commit the full candidate configuration, if false only set pending change.\n\n.. code-block:: bash\n\nsalt '*' panos.set_management_icmp\nsalt '*' panos.set_management_icmp enabled=False deploy=True", "source": "codesearchnet"}
{"code": "def from_string(cls, key, password='notasecret'):\n        \n        key = _helpers._from_bytes(key)  \n        marker_id, key_bytes = pem.readPemBlocksFromFile(\n            six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)\n\n        if marker_id == 0:\n            pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes,\n                                                 format='DER')\n        elif marker_id == 1:\n            key_info, remaining = decoder.decode(\n                key_bytes, asn1Spec=_PKCS8_SPEC)\n            if remaining != b'':\n                raise ValueError('Unused bytes', remaining)\n            pkey_info = key_info.getComponentByName('privateKey')\n            pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(),\n                                                 format='DER')\n        else:\n            raise ValueError('No key could be detected.')\n\n        return cls(pkey)", "docstring": "Construct an RsaSigner instance from a string.\n\nArgs:\nkey: string, private key in PEM format.\npassword: string, password for private key file. Unused for PEM\nfiles.\n\nReturns:\nRsaSigner instance.\n\nRaises:\nValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in\nPEM format.", "source": "juraj-google-style"}
{"code": "def _get_scripts(self, host_metadata):\n    deploy_scripts = host_metadata.get('deploy-scripts', [])\n    if deploy_scripts:\n        return deploy_scripts\n    ovirt_scripts = host_metadata.get('ovirt-scripts', [])\n    if ovirt_scripts:\n        warnings.warn('Deprecated entry \"ovirt-scripts\" will not be supported in the future, replace with \"deploy-scripts\"')\n    return ovirt_scripts", "docstring": "Temporary method to retrieve the host scripts\n\nTODO:\nremove once the \"ovirt-scripts\" option gets deprecated\n\nArgs:\nhost_metadata(dict): host metadata to retrieve the scripts for\n\nReturns:\nlist: deploy scripts for the host, empty if none found", "source": "codesearchnet"}
{"code": "def _ModifyInterface(self, interface_config, config_key, config_value, replace=False):\n    config_entry = ('%s=%s' % (config_key, config_value))\n    if (not open(interface_config).read().count(config_key)):\n        with open(interface_config, 'a') as config:\n            config.write(('%s\\n' % config_entry))\n    elif replace:\n        for line in fileinput.input(interface_config, inplace=True):\n            print(re.sub(('%s=.*' % config_key), config_entry, line.rstrip()))", "docstring": "Write a value to a config file if not already present.\n\nArgs:\ninterface_config: string, the path to a config file.\nconfig_key: string, the configuration key to set.\nconfig_value: string, the value to set for the configuration key.\nreplace: bool, replace the configuration option if already present.", "source": "codesearchnet"}
{"code": "def readHolidayDates(self):\n    self.setContext('readHolidayDates')\n    try:\n        req_str = '0152310230304230282903'\n        self.request(False)\n        req_crc = self.calc_crc16(req_str[2:].decode('hex'))\n        req_str += req_crc\n        self.m_serial_port.write(req_str.decode('hex'))\n        raw_ret = self.m_serial_port.getResponse(self.getContext())\n        self.serialPostEnd()\n        unpacked_read = self.unpackStruct(raw_ret, self.m_hldy)\n        self.convertData(unpacked_read, self.m_hldy, self.m_kwh_precision)\n        return_crc = self.calc_crc16(raw_ret[1:(- 2)])\n        if (str(return_crc) == str(self.m_hldy['crc16'][MeterData.StringValue])):\n            ekm_log('Holidays and Schedules CRC success')\n            self.setContext('')\n            return True\n    except:\n        ekm_log(traceback.format_exc(sys.exc_info()))\n    self.setContext('')\n    return False", "docstring": "Serial call to read holiday dates into meter object buffer.\n\nReturns:\nbool: True on completion.", "source": "codesearchnet"}
{"code": "def _stratonovich_integral(dim, dt, sqrt_dt, dw, stratonovich_draws, order):\n    p = order - 1\n    sqrt_rho_p = tf.sqrt(tf.constant(1 / 12 - sum([1 / r ** 2 for r in range(1, order + 1)]) / 2 / _PI ** 2, dtype=dw.dtype))\n    mu = stratonovich_draws[0]\n    zeta = tf.transpose(stratonovich_draws[1], [2, 0, 1])\n    eta = tf.transpose(stratonovich_draws[2], [2, 0, 1])\n    xi = dw / sqrt_dt\n    r_i = tf.stack([tf.ones(zeta[0, ...].shape + [dim], dtype=zeta.dtype) / r for r in range(1, order + 1)], 0)\n    value = dt * (_outer_prod(dw, dw) / 2 + sqrt_rho_p * (_outer_prod(mu[..., p], xi) - _outer_prod(xi, mu[..., p])))\n    value += dt * tf.reduce_sum(tf.multiply(_outer_prod(zeta, _SQRT_2 * xi + eta) - _outer_prod(_SQRT_2 * xi + eta, zeta), r_i), 0) / (2 * _PI)\n    return value", "docstring": "Approximate Stratonovich integrals J(i, j).\n\n\n\nArgs:\ndim: An integer. The dimension of the state.\ndt: A double. The time step.\nsqrt_dt: A double. The square root of dt.\ndw: A double. The Wiener increment.\nstratonovich_draws: A list of tensors corresponding to the independent\nN(0,1) random variables used in the approximation.\norder: An integer. The stratonovich_order.\n\nReturns:\nA Tensor of shape [dw.shape[0], dim, dim] corresponding to the Stratonovich\nintegral for each pairwise component of the Wiener process. In other words,\nJ(i,j) corresponds to an integral over W_i and W_j.", "source": "github-repos"}
{"code": "def get_filters(component):\n\n    def inner(c, filters=None):\n        filters = (filters or set())\n        if (not ENABLED):\n            return filters\n        if (not plugins.is_datasource(c)):\n            return filters\n        if (c in FILTERS):\n            filters |= FILTERS[c]\n        for d in dr.get_dependents(c):\n            filters |= inner(d, filters)\n        return filters\n    if (component not in _CACHE):\n        _CACHE[component] = inner(component)\n    return _CACHE[component]", "docstring": "Get the set of filters for the given datasource.\n\nFilters added to a ``RegistryPoint`` will be applied to all datasources that\nimplement it. Filters added to a datasource implementation apply only to\nthat implementation.\n\nFor example, a filter added to ``Specs.ps_auxww`` will apply to\n``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``,\n``SosSpecs.ps_auxww``, etc. But a filter added to ``DefaultSpecs.ps_auxww``\nwill only apply to ``DefaultSpecs.ps_auxww``. See the modules in\n``insights.specs`` for those classes.\n\nArgs:\ncomponent (a datasource): The target datasource\n\nReturns:\nset: The set of filters defined for the datasource", "source": "codesearchnet"}
{"code": "def fill_tree(self, tree, input_dict):\n        \n\n        def add_element(item, key, value):\n            child_name = QtGui.QStandardItem(key)\n            child_name.setDragEnabled(False)\n            child_name.setSelectable(False)\n            child_name.setEditable(False)\n\n            if isinstance(value, dict):\n                for ket_child, value_child in value.items():\n                    add_element(child_name, ket_child, value_child)\n                child_value = QtGui.QStandardItem('')\n            else:\n                child_value = QtGui.QStandardItem(str(value))\n                child_value.setData(value)\n\n            child_value.setDragEnabled(False)\n            child_value.setSelectable(False)\n            child_value.setEditable(False)\n            item.appendRow([child_name, child_value])\n\n        for index, (loaded_item, loaded_item_settings) in enumerate(input_dict.items()):\n            \n            item = QtGui.QStandardItem(loaded_item)\n\n            for key, value in loaded_item_settings['settings'].items():\n                add_element(item, key, value)\n\n            value = QtGui.QStandardItem('')\n            tree.model().appendRow([item, value])\n\n            if tree == self.tree_loaded:\n                item.setEditable(False)\n            tree.setFirstColumnSpanned(index, self.tree_infile.rootIndex(), True)", "docstring": "fills a tree with nested parameters\nArgs:\ntree: QtGui.QTreeView\nparameters: dictionary or Parameter object\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _requests_post(self, url, json=None, data=None, username='', password='', xapikey='', headers=None, timeout=30):\n    if (headers is None):\n        headers = {}\n    auth = None\n    if (username and password):\n        auth = requests.auth.HTTPBasicAuth(username, password)\n    elif xapikey:\n        headers['x-api-key'] = xapikey\n    headers['User-Agent'] = self.user_agent\n    request = requests.post(url, auth=auth, data=data, json=json, headers=headers, timeout=timeout)\n    message = json\n    return (request.text, message, request.status_code, request.headers)", "docstring": "This function will POST to the url endpoint using requests.\nReturning an AdyenResult object on 200 HTTP response.\nEither json or data has to be provided.\nIf username and password are provided, basic auth will be used.\n\n\nArgs:\nurl (str): url to send the POST\njson (dict, optional): Dict of the JSON to POST\ndata (dict, optional): Dict, presumed flat structure of key/value\nof request to place\nusername (str, optionl): Username for basic auth. Must be included\nas part of password.\npassword (str, optional): Password for basic auth. Must be included\nas part of username.\nheaders (dict, optional): Key/Value pairs of headers to include\ntimeout (int, optional): Default 30. Timeout for the request.\n\nReturns:\nstr:    Raw response received\nstr:    Raw request placed\nint:    HTTP status code, eg 200,404,401\ndict:   Key/Value pairs of the headers received.", "source": "codesearchnet"}
{"code": "def outgoing_args(self, nodeid):\n        \n        _vars = self._vars\n        _hcons = self._hcons\n        args = self.args(nodeid)  \n        for arg, val in list(args.items()):\n            \n            if arg == IVARG_ROLE or val not in _vars:\n                del args[arg]\n            else:\n                refs = _vars[val]['refs']\n                \n                if not (val in _hcons or IVARG_ROLE in refs or 'LBL' in refs):\n                    del args[arg]\n        return args", "docstring": "Return the arguments going from *nodeid* to other predications.\n\nValid arguments include regular variable arguments and scopal\n(label-selecting or HCONS) arguments. MOD/EQ\nlinks, intrinsic arguments, and constant arguments are not\nincluded.\n\nArgs:\nnodeid: the nodeid of the EP that is the arguments' source\nReturns:\ndict: `{role: tgt}`", "source": "juraj-google-style"}
{"code": "def TransformerEncoder(vocab_size, num_classes=10, feature_depth=512, feedforward_depth=2048, num_layers=6, num_heads=8, dropout=0.1, max_len=2048, mode='train'):\n    input_embedding = layers.Serial(layers.Embedding(feature_depth, vocab_size), layers.Dropout(rate=dropout, mode=mode), layers.PositionalEncoding(max_len=max_len))\n    return layers.Serial(layers.Branch(), layers.Parallel(input_embedding, layers.PaddingMask()), layers.Serial(*[EncoderLayer(feature_depth, feedforward_depth, num_heads, dropout, mode) for _ in range(num_layers)]), layers.FirstBranch(), layers.LayerNorm(), layers.Mean(axis=1), layers.Dense(num_classes), layers.LogSoftmax())", "docstring": "Transformer encoder.\n\nArgs:\nvocab_size: int: vocab size\nnum_classes: how many classes on output\nfeature_depth: int:  depth of embedding\nfeedforward_depth: int: depth of feed-forward layer\nnum_layers: int: number of encoder/decoder layers\nnum_heads: int: number of attention heads\ndropout: float: dropout rate (how much to drop out)\nmax_len: int: maximum symbol length for positional encoding\nmode: str: 'train' or 'eval'\n\nReturns:\nthe Transformer encoder layer.", "source": "codesearchnet"}
{"code": "def dms_maker(self, force_rerun=False):\n        \n        log.debug('{}: running surface representation maker...'.format(self.id))\n\n        if not self.receptorpdb_path:\n            return ValueError('Please run protein_only_and_noH')\n\n        dms = op.join(self.dock_dir, '{}_receptor.dms'.format(self.id))\n\n        if ssbio.utils.force_rerun(flag=force_rerun, outfile=dms):\n            cmd = 'dms {} -n -w 1.4 -o {}'.format(self.receptorpdb_path, dms)\n            os.system(cmd)\n\n        self.dms_path = dms\n\n        if ssbio.utils.is_non_zero_file(dms):\n            self.dms_path = dms\n            log.debug('{}: successful dms execution'.format(self.dms_path))\n        else:\n            log.critical('{}: dms_maker failed to run on receptor file'.format(self.receptorpdb_path))", "docstring": "Create surface representation (dms file) of receptor\n\nArgs:\nforce_rerun (bool): If method should be rerun even if output file exists", "source": "juraj-google-style"}
{"code": "def stage_tc_create_security_label(self, label, resource):\n        \n        sl_resource = resource.security_labels(label)\n        sl_resource.http_method = 'POST'\n        sl_response = sl_resource.request()\n        if sl_response.get('status') != 'Success':\n            self.log.warning(\n                '[tcex] Failed adding security label \"{}\" ({}).'.format(\n                    label, sl_response.get('response').text\n                )\n            )", "docstring": "Add a security label to a resource.\n\nArgs:\nlabel (str): The security label (must exit in ThreatConnect).\nresource (obj): An instance of tcex resource class.", "source": "juraj-google-style"}
{"code": "def __init__(self, n=3, cap_front=True, cap_end=True):\n        \n        if n < 2:\n            raise ValueError('n must be 1 or more')\n        super(Alkane, self).__init__()\n\n        \n        if not cap_front:\n            n += 1\n        if not cap_end:\n            n += 1\n        chain = mb.recipes.Polymer(CH2(), n=n-2, port_labels=('up', 'down'))\n        self.add(chain, 'chain')\n\n        if cap_front:\n            self.add(CH3(), \"methyl_front\")\n            mb.force_overlap(move_this=self['chain'],\n                             from_positions=self['chain']['up'],\n                             to_positions=self['methyl_front']['up'])\n        else:\n            \n            self.add(chain['up'], 'up', containment=False)\n\n        if cap_end:\n            self.add(CH3(), 'methyl_end')\n            mb.force_overlap(self['methyl_end'], self['methyl_end']['up'], self['chain']['down'])\n        else:\n            \n            self.add(chain['down'], 'down', containment=False)", "docstring": "Initialize an Alkane Compound.\n\nArgs:\nn: Number of carbon atoms.\ncap_front: Add methyl group to beginning of chain ('down' port).\ncap_end: Add methyl group to end of chain ('up' port).", "source": "juraj-google-style"}
{"code": "def ping(self, timeout=12):\n        \n        self.conn(\"POST\", \"{0}/users/ME/endpoints/{1}/active\".format(self.conn.msgsHost, self.id),\n                  auth=SkypeConnection.Auth.RegToken, json={\"timeout\": timeout})", "docstring": "Send a keep-alive request for the endpoint.\n\nArgs:\ntimeout (int): maximum amount of time for the endpoint to stay active", "source": "juraj-google-style"}
{"code": "def config():\n    out = shell.run('git config --list', capture=True, never_pretend=True).stdout.strip()\n    result = {}\n    for line in out.splitlines():\n        (name, value) = line.split('=', 1)\n        result[name.strip()] = value.strip()\n    return result", "docstring": "Return the current git configuration.\n\nReturns:\ndict[str, Any]: The current git config taken from ``git config --list``.", "source": "codesearchnet"}
{"code": "def l2_regression_loss(y, target, name=None):\n  \n  with tf.name_scope(name, 'l2_regression', [y, target]) as scope:\n    y = tf.convert_to_tensor(y, name='y')\n    target = tf.convert_to_tensor(target, name='target')\n    return tf.sqrt(l2_regression_sq_loss(y, target, name=scope))", "docstring": "Calculates the square root of the SSE between y and target.\n\nArgs:\ny: the calculated values.\ntarget: the desired values.\nname: the name for this op, defaults to l2_regression\nReturns:\nA tensorflow op.", "source": "juraj-google-style"}
{"code": "def dict_from_file(filename, key_type=str):\n    mapping = {}\n    with open(filename, 'r') as f:\n        for line in f:\n            items = line.rstrip('\\n').split()\n            assert (len(items) >= 2)\n            key = key_type(items[0])\n            val = (items[1:] if (len(items) > 2) else items[1])\n            mapping[key] = val\n    return mapping", "docstring": "Load a text file and parse the content as a dict.\n\nEach line of the text file will be two or more columns splited by\nwhitespaces or tabs. The first column will be parsed as dict keys, and\nthe following columns will be parsed as dict values.\n\nArgs:\nfilename(str): Filename.\nkey_type(type): Type of the dict's keys. str is user by default and\ntype conversion will be performed if specified.\n\nReturns:\ndict: The parsed contents.", "source": "codesearchnet"}
{"code": "def fn_with_custom_grad(grad_fn, use_global_vars=False):\n\n    def dec(fn):\n\n        @functools.wraps(fn)\n        def wrapped(*args):\n            return _fn_with_custom_grad(fn, args, grad_fn, use_global_vars=use_global_vars)\n        return wrapped\n    return dec", "docstring": "Decorator to create a subgraph with a custom gradient function.\n\nThe subgraph created by the decorated function is NOT put in a Defun and so\ndoes not suffer from the limitations of the Defun (all subgraph ops on the\nsame device, no summaries).\n\nArgs:\ngrad_fn: function with signature\n(inputs, variables, outputs, output_grads) -> (grad_inputs, grad_vars),\nall of which are lists of Tensors.\nuse_global_vars: if True, variables will be the global variables created.\nIf False, will be the trainable variables.\n\nReturns:\nDecorator for function such that the gradient is defined by grad_fn.", "source": "codesearchnet"}
{"code": "def set_tensor_final(self, tensor_name):\n    tensor = self._name_to_tensor(tensor_name)\n    self._final_tensors.add(tensor)", "docstring": "Denotes a tensor as a final output of the computation.\n\nArgs:\ntensor_name: a string, name of a tensor in the graph.", "source": "codesearchnet"}
{"code": "def resolve_variables(self, provided_variables):\n        \n        self.resolved_variables = {}\n        defined_variables = self.defined_variables()\n        variable_dict = dict((var.name, var) for var in provided_variables)\n        for var_name, var_def in defined_variables.items():\n            value = resolve_variable(\n                var_name,\n                var_def,\n                variable_dict.get(var_name),\n                self.name\n            )\n            self.resolved_variables[var_name] = value", "docstring": "Resolve the values of the blueprint variables.\n\nThis will resolve the values of the `VARIABLES` with values from the\nenv file, the config, and any lookups resolved.\n\nArgs:\nprovided_variables (list of :class:`stacker.variables.Variable`):\nlist of provided variables", "source": "juraj-google-style"}
{"code": "def with_inverse(points, noise):\n    \n    \n    n_points = len(points)/2\n    break_point = n_points\n\n    points_part = copy.deepcopy(points)\n    points_part = list(reversed(points_part))\n    part = kalman_filter(points_part, noise)\n    total = kalman_filter(points, noise)\n\n    result = list(reversed(part))[:break_point] + total[break_point:]\n    result[break_point] = point_mean(part[break_point], total[break_point])\n\n    return result", "docstring": "Smooths a set of points\n\nIt smooths them twice, once in given order, another one in the reverse order.\nThe the first half of the results will be taken from the reverse order and\nthe second half from the normal order.\n\nArgs:\npoints (:obj:`list` of :obj:`Point`)\nnoise (float): Expected noise, the higher it is the more the path will\nbe smoothed.\nReturns:\n:obj:`list` of :obj:`Point`", "source": "juraj-google-style"}
{"code": "def _SwitchRefOrTensor(data, pred, name='Switch'):\n    data = ops.convert_to_tensor_or_composite(data, name='data')\n    with ops.colocate_with(data, ignore_existing=True):\n        if isinstance(data, tensor_lib.Tensor):\n            if data.dtype._is_ref_dtype:\n                return ref_switch(data, pred, name=name)\n        return switch(data, pred, name=name)", "docstring": "Forwards `data` to an output determined by `pred`.\n\nIf `pred` is false, the `data` input is forwarded to the first output.\nOtherwise, the data goes to the second output.\n\nThis op handles `Tensor`s and `IndexedSlices`.\n\nArgs:\ndata: The tensor to be forwarded to the appropriate output.\npred: A scalar that specifies which output port will receive data.\nname: A name for this operation (optional).\n\nReturns:\n`(output_false, output_true)`: If `pred` is true, data will be forwarded to\n`output_true`, otherwise it goes to `output_false`.\n\nRaises:\nTypeError: if data is not a Tensor or IndexedSlices", "source": "github-repos"}
{"code": "def parse_individual(sample):\n    \n    ind_info = {}\n    if 'sample_id' not in sample:\n        raise PedigreeError(\"One sample is missing 'sample_id'\")\n    sample_id = sample['sample_id']\n    \n    if 'sex' not in sample:\n        raise PedigreeError(\"Sample %s is missing 'sex'\" % sample_id)\n    sex = sample['sex']\n    if sex not in REV_SEX_MAP:\n        log.warning(\"'sex' is only allowed to have values from {}\"\n                    .format(', '.join(list(REV_SEX_MAP.keys()))))\n        raise PedigreeError(\"Individual %s has wrong formated sex\" % sample_id)\n\n    \n    if 'phenotype' not in sample:\n        raise PedigreeError(\"Sample %s is missing 'phenotype'\"\n                            % sample_id)\n    phenotype = sample['phenotype']\n    if phenotype not in REV_PHENOTYPE_MAP:\n        log.warning(\"'phenotype' is only allowed to have values from {}\"\n                    .format(', '.join(list(REV_PHENOTYPE_MAP.keys()))))\n        raise PedigreeError(\"Individual %s has wrong formated phenotype\" % sample_id)\n\n    ind_info['individual_id'] = sample_id\n    ind_info['display_name'] = sample.get('sample_name', sample['sample_id'])\n\n    ind_info['sex'] = sex\n    ind_info['phenotype'] = phenotype\n\n    ind_info['father'] = sample.get('father')\n    ind_info['mother'] = sample.get('mother')\n\n    ind_info['confirmed_parent'] = sample.get('confirmed_parent')\n    ind_info['confirmed_sex'] = sample.get('confirmed_sex')\n    ind_info['predicted_ancestry'] = sample.get('predicted_ancestry')\n\n    bam_file = sample.get('bam_path')\n    if bam_file:\n        ind_info['bam_file'] = bam_file\n\n    mt_bam = sample.get('mt_bam')\n    if mt_bam:\n        ind_info['mt_bam'] = mt_bam\n\n    analysis_type = sample.get('analysis_type')\n    if analysis_type:\n        ind_info['analysis_type'] = analysis_type\n\n    ind_info['capture_kits'] = ([sample.get('capture_kit')]\n                                if 'capture_kit' in sample else [])\n\n    \n    vcf2cytosure = sample.get('vcf2cytosure')\n    if vcf2cytosure:\n        ind_info['vcf2cytosure'] = vcf2cytosure\n\n    \n    tumor_type = sample.get('tumor_type')\n    if tumor_type:\n        ind_info['tumor_type'] = tumor_type\n\n    tumor_mutational_burden = sample.get('tmb')\n    if tumor_mutational_burden:\n        ind_info['tmb'] = tumor_mutational_burden\n\n    msi = sample.get('msi')\n    if msi:\n        ind_info['msi'] = msi\n\n    tumor_purity = sample.get('tumor_purity')\n    if tumor_purity:\n        ind_info['tumor_purity'] = tumor_purity\n\n    return ind_info", "docstring": "Parse individual information\n\nArgs:\nsample (dict)\n\nReturns:\n{\n'individual_id': str,\n'father': str,\n'mother': str,\n'display_name': str,\n'sex': str,\n'phenotype': str,\n'bam_file': str,\n'vcf2cytosure': str,\n'analysis_type': str,\n'capture_kits': list(str),\n}", "source": "juraj-google-style"}
{"code": "def emit_counter(self, category: str, name: str, pid: int, timestamp: int, counter: str, value: int) -> None:\n    event = self._create_event('C', category, name, pid, 0, timestamp)\n    event['args'] = {counter: value}\n    self._events.append(event)", "docstring": "Emits a record for a single counter.\n\nArgs:\ncategory: The event category as a string.\nname:  The event name as a string.\npid:  Identifier of the process generating this event as an integer.\ntimestamp:  The timestamp of this event as a long integer.\ncounter: Name of the counter as a string.\nvalue:  Value of the counter as an integer.", "source": "github-repos"}
{"code": "def _extract_dir(self, dir_not_exists, output):\n    \n    if not dir_not_exists:\n        lst = output.dir_cache\n        return {i[\"relpath\"]: i[\"md5\"] for i in lst}\n    return {}", "docstring": "Extract the content of dvc tree file\nArgs:\nself(object) - Repo class instance\ndir_not_exists(bool) - flag for directory existence\noutput(object) - OutputLOCAL class instance\nReturns:\ndict - dictionary with keys - paths to file in .dvc/cache\nvalues -checksums for that files", "source": "juraj-google-style"}
{"code": "def write_object_proto_for_resource_variable(resource_variable, proto, options, enforce_naming=True):\n    proto.variable.SetInParent()\n    if enforce_naming and (not resource_variable.name.endswith(':0')):\n        raise ValueError(f\"Cowardly refusing to save variable {resource_variable.name} because of unexpected suffix in the name (expected ':0')which won't be restored.\")\n    proto.variable.name = tensor_module.get_op_name(resource_variable.name)\n    proto.variable.trainable = resource_variable.trainable\n    proto.variable.dtype = resource_variable.dtype.as_datatype_enum\n    proto.variable.synchronization = resource_variable.synchronization.value\n    proto.variable.aggregation = resource_variable.aggregation.value\n    proto.variable.shape.CopyFrom(resource_variable.shape.as_proto())\n    if options.experimental_variable_policy._save_variable_devices():\n        if hasattr(resource_variable, 'device'):\n            proto.variable.device = resource_variable.device", "docstring": "Writes additional information of the variable into the SavedObject proto.\n\nThis allows users to define a `hook` to provide extra information of the\nvariable to the SavedObject.\n\nFor example, DistributedVariable class would fill in components in the\ndistributed context.\n\nArgs:\nresource_variable: A `ResourceVariable` or `DistributedValue` that has the\ninformation to be saved into the proto.\nproto: `SavedObject` proto to update.\noptions: A `SaveOption` instance that configures save behavior.\nenforce_naming: A bool determining whether to check that names end in the\nexpected string ':0'", "source": "github-repos"}
{"code": "def check_config_options(_class, required_options, optional_options, options):\n    for opt in required_options:\n        if (opt not in options):\n            msg = 'Required option missing: {0}'\n            raise ConfigurationError(msg.format(opt))\n    for opt in options:\n        if (opt not in (required_options + optional_options)):\n            msg = 'Unknown config option to `{0}`: {1}'\n            _logger.warn(msg.format(_class, opt))", "docstring": "Helper method to check options.\n\nArguments:\n_class           -- the original class that takes received the options.\nrequired_options -- the options that are required. If they are not\npresent, a ConfigurationError is raised. Given as a\ntuple.\noptional_options -- the options that are optional. Given options that are\nnot present in `optional_options` nor in\n`required_options` will be logged as unrecognized.\nGiven as a tuple.\noptions          -- a dictionary of given options.\n\nRaises:\nConfigurationError -- if any required option is missing.", "source": "codesearchnet"}
{"code": "def add_logged_in_session(self, response=None):\n    if (not response):\n        response = self.get('go/api/pipelines.xml')\n    self._set_session_cookie(response)\n    if (not self._session_id):\n        raise AuthenticationFailed('No session id extracted from request.')\n    response = self.get('go/pipelines')\n    match = re.search('name=\"authenticity_token\".+?value=\"([^\"]+)', response.read().decode('utf-8'))\n    if match:\n        self._authenticity_token = match.group(1)\n    else:\n        raise AuthenticationFailed('Authenticity token not found on page')", "docstring": "Make the request appear to be coming from a browser\n\nThis is to interact with older parts of Go that doesn't have a\nproper API call to be made. What will be done:\n\n1. If no response passed in a call to `go/api/pipelines.xml` is\nmade to get a valid session\n2. `JSESSIONID` will be populated from this request\n3. A request to `go/pipelines` will be so the\n`authenticity_token` (CSRF) can be extracted. It will then\nsilently be injected into `post_args` on any POST calls that\ndoesn't start with `go/api` from this point.\n\nArgs:\nresponse: a :class:`Response` object from a previously successful\nAPI call. So we won't have to query `go/api/pipelines.xml`\nunnecessarily.\n\nRaises:\nHTTPError: when the HTTP request fails.\nAuthenticationFailed: when failing to get the `session_id`\nor the `authenticity_token`.", "source": "codesearchnet"}
{"code": "def show_fields(self, block=None):\n        \n        mapping = self._mapping()\n        if block is None:\n            return mapping\n        elif block == \"top\":\n            blocks = set()\n            for key in mapping.keys():\n                blocks.add(key.split(\".\")[0])\n            block_map = {}\n            for b in blocks:\n                block_map[b] = \"object\"\n        else:\n            block_map = {}\n            for key, value in mapping.items():\n                if key.startswith(block):\n                    block_map[key] = value\n        return block_map", "docstring": "Retrieve and return the mapping for the given metadata block.\n\nArguments:\nblock (str): The top-level field to fetch the mapping for (for example, ``\"mdf\"``),\nor the special values ``None`` for everything or ``\"top\"`` for just the\ntop-level fields.\n**Default:** ``None``.\nindex (str): The Search index to map. **Default:** The current index.\n\nReturns:\ndict: ``field:datatype`` pairs.", "source": "juraj-google-style"}
{"code": "def restart(self, container, timeout=10):\n    params = {'t': timeout}\n    url = self._url('/containers/{0}/restart', container)\n    conn_timeout = self.timeout\n    if (conn_timeout is not None):\n        conn_timeout += timeout\n    res = self._post(url, params=params, timeout=conn_timeout)\n    self._raise_for_status(res)", "docstring": "Restart a container. Similar to the ``docker restart`` command.\n\nArgs:\ncontainer (str or dict): The container to restart. If a dict, the\n``Id`` key is used.\ntimeout (int): Number of seconds to try to stop for before killing\nthe container. Once killed it will then be restarted. Default\nis 10 seconds.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def check_prerequisites(prerequisites, checker, msg_tmpl='Prerequisites \"{}\" are required in method \"{}\" but not found, please install them first.'):\n\n    def wrap(func):\n\n        @functools.wraps(func)\n        def wrapped_func(*args, **kwargs):\n            requirements = ([prerequisites] if isinstance(prerequisites, str) else prerequisites)\n            missing = []\n            for item in requirements:\n                if (not checker(item)):\n                    missing.append(item)\n            if missing:\n                print(msg_tmpl.format(', '.join(missing), func.__name__))\n                raise RuntimeError('Prerequisites not meet.')\n            else:\n                return func(*args, **kwargs)\n        return wrapped_func\n    return wrap", "docstring": "A decorator factory to check if prerequisites are satisfied.\n\nArgs:\nprerequisites (str of list[str]): Prerequisites to be checked.\nchecker (callable): The checker method that returns True if a\nprerequisite is meet, False otherwise.\nmsg_tmpl (str): The message template with two variables.\n\nReturns:\ndecorator: A specific decorator.", "source": "codesearchnet"}
{"code": "def lasio_get(l, section, item, attrib='value', default=None, remap=None, funcs=None):\n    remap = (remap or {})\n    item_to_fetch = remap.get(item, item)\n    if (item_to_fetch is None):\n        return None\n    try:\n        obj = getattr(l, section)\n        result = getattr(obj, item_to_fetch)[attrib]\n    except:\n        return default\n    if (funcs is not None):\n        f = funcs.get(item, null)\n        result = f(result)\n    return result", "docstring": "Grabs, renames and transforms stuff from a lasio object.\n\nArgs:\nl (lasio): a lasio instance.\nsection (str): The LAS section to grab from, eg ``well``\nitem (str): The item in the LAS section to grab from, eg ``name``\nattrib (str): The attribute of the item to grab, eg ``value``\ndefault (str): What to return instead.\nremap (dict): Optional. A dict of 'old': 'new' LAS field names.\nfuncs (dict): Optional. A dict of 'las field': function() for\nimplementing a transform before loading. Can be a lambda.\n\nReturns:\nThe transformed item.", "source": "codesearchnet"}
{"code": "def get_ast_dict(belstr, component_type: str = \"\"):\n    \n\n    errors = []\n    parsed = {}\n    bels = list(belstr)\n    char_locs, errors = parse_chars(bels, errors)\n    parsed, errors = parse_functions(belstr, char_locs, parsed, errors)\n    parsed, errors = parse_args(bels, char_locs, parsed, errors)\n    parsed, errors = arg_types(parsed, errors)\n    parsed, errors = parse_relations(belstr, char_locs, parsed, errors)\n    parsed, errors = parse_nested(bels, char_locs, parsed, errors)\n    errors = parsed_top_level_errors(parsed, errors)\n\n    ast, errors = parsed_to_ast(parsed, errors, component_type=component_type)\n\n    return ast, errors", "docstring": "Convert BEL string to AST dictionary\n\nArgs:\nbelstr: BEL string\ncomponent_type: Empty string or 'subject' or 'object' to indicate that we\nare parsing the subject or object field input", "source": "juraj-google-style"}
{"code": "def send(self, **req_kwargs):\n    i = 0\n    while True:\n        response = self._send(**req_kwargs).json()\n        if ('error' not in response):\n            break\n        error = response['error']\n        if (error['code'] != 401):\n            raise exception.APIException(error['code'], error)\n        if (i >= self.RETRY_CNT):\n            raise exception.APIException(error['code'], error)\n        logger.info('Refreshing access token')\n        self._auth.refresh()\n        i += 1\n    return response", "docstring": "Send an authenticated request to a Google API.\nAutomatically retries if the access token has expired.\n\nArgs:\n**req_kwargs: Arbitrary keyword arguments to pass to Requests.\n\nReturn:\ndict: The parsed JSON response.\n\nRaises:\nAPIException: If the server returns an error.\nLoginException: If :py:meth:`login` has not been called.", "source": "codesearchnet"}
{"code": "def img_to_array(img, data_format=None, dtype=None):\n    data_format = backend.standardize_data_format(data_format)\n    if dtype is None:\n        dtype = backend.floatx()\n    x = np.asarray(img, dtype=dtype)\n    if len(x.shape) == 3:\n        if data_format == 'channels_first':\n            x = x.transpose(2, 0, 1)\n    elif len(x.shape) == 2:\n        if data_format == 'channels_first':\n            x = x.reshape((1, x.shape[0], x.shape[1]))\n        else:\n            x = x.reshape((x.shape[0], x.shape[1], 1))\n    else:\n        raise ValueError(f'Unsupported image shape: {x.shape}')\n    return x", "docstring": "Converts a PIL Image instance to a NumPy array.\n\nExample:\n\n```python\nfrom PIL import Image\nimg_data = np.random.random(size=(100, 100, 3))\nimg = keras.utils.array_to_img(img_data)\narray = keras.utils.image.img_to_array(img)\n```\n\nArgs:\nimg: Input PIL Image instance.\ndata_format: Image data format, can be either `\"channels_first\"` or\n`\"channels_last\"`. Defaults to `None`, in which case the global\nsetting `keras.backend.image_data_format()` is used (unless you\nchanged it, it defaults to `\"channels_last\"`).\ndtype: Dtype to use. `None` means the global setting\n`keras.backend.floatx()` is used (unless you changed it, it\ndefaults to `\"float32\"`).\n\nReturns:\nA 3D NumPy array.", "source": "github-repos"}
{"code": "def _RemoveAuthorizedKeys(self, user):\n    pw_entry = self._GetUser(user)\n    if (not pw_entry):\n        return\n    home_dir = pw_entry.pw_dir\n    authorized_keys_file = os.path.join(home_dir, '.ssh', 'authorized_keys')\n    if os.path.exists(authorized_keys_file):\n        try:\n            os.remove(authorized_keys_file)\n        except OSError as e:\n            message = 'Could not remove authorized keys for user %s. %s.'\n            self.logger.warning(message, user, str(e))", "docstring": "Remove a Linux user account's authorized keys file to prevent login.\n\nArgs:\nuser: string, the Linux user account to remove access.", "source": "codesearchnet"}
{"code": "def get_platform():\n    global PLATFORM\n    cmd = 'uname'\n    out, err = run_shell_cmd(cmd)\n    platform_detected = out.strip().lower()\n    if platform_detected != 'linux':\n        if err and FLAGS.debug:\n            print('Error in detecting platform:\\n %s' % str(err))\n        print('Error: Detected unsupported operating system.\\nStopping...')\n        sys.exit(1)\n    else:\n        PLATFORM = platform_detected\n    return PLATFORM", "docstring": "Retrieves platform information.\n\nCurrently the script only support linux. If other platoforms such as Windows\nor MacOS is detected, it throws an error and terminates.\n\nReturns:\nString that is platform type.\ne.g. 'linux'", "source": "github-repos"}
{"code": "def __init__(self, lexer=None, **kwargs):\n        \n        if lexer is not None:\n            if isinstance(lexer, JbossLexer):\n                self.lexer = lexer.lexer\n            else:\n                \n                self.lexer = lexer\n        else:\n            self.lexer = JbossLexer().lexer\n\n        kwargs.setdefault('debug', False)\n        kwargs.setdefault('write_tables', False)\n        self.parser = ply.yacc.yacc(module=self, **kwargs)", "docstring": "Constructs the JsonParser based on the grammar contained herein.\nSuccessful construction builds the ply.yacc instance and sets\nself.parser.\nArgs:\nlexer: A ply.lex or JsonLexer instance that will produce JSON_TOKENS.", "source": "juraj-google-style"}
{"code": "def parents(self, as_resources=False):\n\n\t\t\n\n\t\tparents = [o for s,p,o in self.rdf.graph.triples((None, self.rdf.prefixes.fedora.hasParent, None))]\n\n\t\t\n\t\tif as_resources:\n\t\t\tlogger.debug('retrieving parent as resource')\n\t\t\tparents = [ self.repo.get_resource(parent) for parent in parents ]\n\n\t\treturn parents", "docstring": "method to return hierarchical parents of this resource\n\nArgs:\nas_resources (bool): if True, opens each as appropriate resource type instead of return URI only\n\nReturns:\n(list): list of resources", "source": "juraj-google-style"}
{"code": "def find_connected_atoms(struct, tolerance=0.45, ldict=JmolNN().el_radius):\n    n_atoms = len(struct.species)\n    fc = np.array(struct.frac_coords)\n    species = list(map(str, struct.species))\n    for (i, item) in enumerate(species):\n        if (not (item in ldict.keys())):\n            species[i] = str(Specie.from_string(item).element)\n    latmat = struct.lattice.matrix\n    connected_list = []\n    for i in range(n_atoms):\n        for j in range((i + 1), n_atoms):\n            max_bond_length = ((ldict[species[i]] + ldict[species[j]]) + tolerance)\n            add_ij = False\n            for move_cell in itertools.product([0, 1, (- 1)], [0, 1, (- 1)], [0, 1, (- 1)]):\n                if (not add_ij):\n                    frac_diff = ((fc[j] + move_cell) - fc[i])\n                    distance_ij = np.dot(latmat.T, frac_diff)\n                    if (np.linalg.norm(distance_ij) < max_bond_length):\n                        add_ij = True\n            if add_ij:\n                connected_list.append([i, j])\n    return np.array(connected_list)", "docstring": "Finds the list of bonded atoms.\n\nArgs:\nstruct (Structure): Input structure\ntolerance: length in angstroms used in finding bonded atoms. Two atoms are considered bonded if (radius of atom 1) + (radius of atom 2) + (tolerance) < (distance between atoms 1 and 2). Default value = 0.45, the value used by JMol and Cheon et al.\nldict: dictionary of bond lengths used in finding bonded atoms. Values from JMol are used as default\nstandardize: works with conventional standard structures if True. It is recommended to keep this as True.\n\nReturns:\nconnected_list: A numpy array of shape (number of bonded pairs, 2); each row of is of the form [atomi, atomj].\natomi and atomj are the indices of the atoms in the input structure.\nIf any image of atomj is bonded to atomi with periodic boundary conditions, [atomi, atomj] is included in the list.\nIf atomi is bonded to multiple images of atomj, it is only counted once.", "source": "codesearchnet"}
{"code": "def update_config(self, config, timeout=(- 1)):\n    return self._client.update(config, uri=(self.URI + '/config'), timeout=timeout)", "docstring": "Updates the remote server configuration and the automatic backup schedule for backup.\n\nArgs:\nconfig (dict): Object to update.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.\n\nReturns:\ndict: Backup details.", "source": "codesearchnet"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    bos_token_id = [1] if self.add_bos_token else []\n    eos_token_id = [1] if self.add_eos_token else []\n    if token_ids_1 is None:\n        return bos_token_id + [0] * len(token_ids_0) + eos_token_id\n    return bos_token_id + [0] * len(token_ids_0) + eos_token_id + bos_token_id + [0] * len(token_ids_1) + eos_token_id", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def fetch(self, transfer_id, data={}, **kwargs):\n        \n        return super(Transfer, self).fetch(transfer_id, data, **kwargs)", "docstring": "Fetch Transfer for given Id\n\nArgs:\ntransfer_id : Id for which transfer object has to be retrieved\n\nReturns:\nTransfer dict for given transfer Id", "source": "juraj-google-style"}
{"code": "async def verify_task_types(chain):\n    \n    valid_task_types = get_valid_task_types()\n    task_count = {}\n    for obj in chain.get_all_links_in_chain():\n        task_type = obj.task_type\n        log.info(\"Verifying {} {} as a {} task...\".format(obj.name, obj.task_id, task_type))\n        task_count.setdefault(task_type, 0)\n        task_count[task_type] += 1\n        \n        \n        await valid_task_types[task_type](chain, obj)\n    return task_count", "docstring": "Verify the task type (e.g. decision, build) of each link in the chain.\n\nArgs:\nchain (ChainOfTrust): the chain we're operating on\n\nReturns:\ndict: mapping task type to the number of links.", "source": "juraj-google-style"}
{"code": "def __init__(self, name: str, ctx: 'context.Context'):\n    super().__init__(name, ctx)\n    self._cls = None\n    self.members = datatypes.MonitorDict()\n    self._instance_type_parameters: 'datatypes.AliasingMonitorDict[str, cfg.Variable]' = datatypes.AliasingMonitorDict()\n    self._maybe_missing_members: bool | None = None\n    self._type_key: 'frozenset[_base.BaseValue | _typing.LateAnnotation | tuple[str, frozenset]] | None' = None\n    self._fullhash = None\n    self._cached_changestamps = self._get_changestamps()", "docstring": "Initialize a SimpleValue.\n\nArgs:\nname: Name of this value. For debugging and error reporting.\nctx: The abstract context.", "source": "github-repos"}
{"code": "def looks_like_url(url):\n    if (not isinstance(url, basestring)):\n        return False\n    if ((not isinstance(url, basestring)) or (len(url) >= 1024) or (not cre_url.match(url))):\n        return False\n    return True", "docstring": "Simplified check to see if the text appears to be a URL.\n\nSimilar to `urlparse` but much more basic.\n\nReturns:\nTrue if the url str appears to be valid.\nFalse otherwise.\n\n>>> url = looks_like_url(\"totalgood.org\")\n>>> bool(url)\nTrue", "source": "codesearchnet"}
{"code": "def _check_module_is_image_embedding(module_spec):\n  \n  issues = []\n\n  \n  \n  \n  input_info_dict = module_spec.get_input_info_dict()\n  if (list(input_info_dict.keys()) != [\"images\"] or\n      input_info_dict[\"images\"].dtype != tf.float32):\n    issues.append(\"Module 'default' signature must require a single input, \"\n                  \"which must have type float32 and name 'images'.\")\n  else:\n    try:\n      image_util.get_expected_image_size(module_spec)\n    except ValueError as e:\n      issues.append(\"Module does not support hub.get_expected_image_size(); \"\n                    \"original error was:\\n\" + str(e))  \n\n  \n  \n  output_info_dict = module_spec.get_output_info_dict()\n  if \"default\" not in output_info_dict:\n    issues.append(\"Module 'default' signature must have a 'default' output.\")\n  else:\n    output_type = output_info_dict[\"default\"].dtype\n    output_shape = output_info_dict[\"default\"].get_shape()\n    if not (output_type == tf.float32 and output_shape.ndims == 2 and\n            output_shape.dims[1].value):\n      issues.append(\"Module 'default' signature must have a 'default' output \"\n                    \"of tf.Tensor(shape=(_,K), dtype=float32).\")\n\n  if issues:\n    raise ValueError(\"Module is not usable as image embedding: %r\" % issues)", "docstring": "Raises ValueError if `module_spec` is not usable as image embedding.\n\nArgs:\nmodule_spec: A `_ModuleSpec` to test.\n\nRaises:\nValueError: if `module_spec` default signature is not compatible with\nmappingan \"images\" input to a Tensor(float32, shape=(_,K)).", "source": "juraj-google-style"}
{"code": "def _CheckIsFile(self, file_entry):\n    \n    if definitions.FILE_ENTRY_TYPE_FILE not in self._file_entry_types:\n      return False\n    return file_entry.IsFile()", "docstring": "Checks the is_file find specification.\n\nArgs:\nfile_entry (FileEntry): file entry.\n\nReturns:\nbool: True if the file entry matches the find specification, False if not.", "source": "juraj-google-style"}
{"code": "def grep(regex, output):\n    lines = output.decode('utf-8').strip().splitlines()\n    results = []\n    for line in lines:\n        if re.search(regex, line):\n            results.append(line.strip())\n    return results", "docstring": "Similar to linux's `grep`, this returns the line in an output stream\nthat matches a given regex pattern.\n\nIt does not rely on the `grep` binary and is not sensitive to line endings,\nso it can be used cross-platform.\n\nArgs:\nregex: string, a regex that matches the expected pattern.\noutput: byte string, the raw output of the adb cmd.\n\nReturns:\nA list of strings, all of which are output lines that matches the\nregex pattern.", "source": "codesearchnet"}
{"code": "def record_corrected_value(self, value, expected_interval, count=1):\n        \n        while True:\n            if not self.record_value(value, count):\n                return False\n            if value <= expected_interval or expected_interval <= 0:\n                return True\n            value -= expected_interval", "docstring": "Record a new value into the histogram and correct for\ncoordinated omission if needed\n\nArgs:\nvalue: the value to record (must be in the valid range)\nexpected_interval: the expected interval between 2 value samples\ncount: incremental count (defaults to 1)", "source": "juraj-google-style"}
{"code": "def parse_response(service, response, search_type):\n    _LOG.debug('Parse response \"%s\" from service \"%s\" of type \"%s\"', response, service, search_type)\n    items = []\n    if ('searchResult' in response):\n        response = response['searchResult']\n    elif ('getMetadataResult' in response):\n        response = response['getMetadataResult']\n    else:\n        raise ValueError('\"response\" should contain either the key \"searchResult\" or \"getMetadataResult\"')\n    search_metadata = {'number_returned': response['count'], 'total_matches': None, 'search_type': search_type, 'update_id': None}\n    for result_type in ('mediaCollection', 'mediaMetadata'):\n        result_type_proper = (result_type[0].upper() + result_type[1:])\n        raw_items = response.get(result_type, [])\n        if isinstance(raw_items, OrderedDict):\n            raw_items = [raw_items]\n        for raw_item in raw_items:\n            class_key = (result_type_proper + raw_item['itemType'].title())\n            cls = get_class(class_key)\n            items.append(cls.from_music_service(service, raw_item))\n    return SearchResult(items, **search_metadata)", "docstring": "Parse the response to a music service query and return a SearchResult\n\nArgs:\nservice (MusicService): The music service that produced the response\nresponse (OrderedDict): The response from the soap client call\nsearch_type (str): A string that indicates the search type that the\nresponse is from\n\nReturns:\nSearchResult: A SearchResult object", "source": "codesearchnet"}
{"code": "def _process(compressor, input_filename, output_filename):\n    compressor(input_filename, output_filename)\n    result_size = os.path.getsize(output_filename)\n    return _CompressorResult(result_size, output_filename, compressor.__name__)", "docstring": "Helper function to compress an image.\n\nReturns:\n_CompressorResult named tuple, with the resulting size, the name of the\noutput file and the name of the compressor.", "source": "codesearchnet"}
{"code": "def percent_point(self, U):\n        \n        self.check_fit()\n        return norm.ppf(U, loc=self.mean, scale=self.std)", "docstring": "Given a cumulated distribution value, returns a value in original space.\n\nArguments:\nU: `np.ndarray` of shape (n, 1) and values in [0,1]\n\nReturns:\n`np.ndarray`: Estimated values in original space.", "source": "juraj-google-style"}
{"code": "def open_writer(self, init_result, uid):\n    raise NotImplementedError", "docstring": "Opens a writer for writing a bundle of elements to the sink.\n\nArgs:\ninit_result: the result of initialize_write() invocation.\nuid: a unique identifier generated by the system.\nReturns:\nan ``iobase.Writer`` that can be used to write a bundle of records to the\ncurrent sink.", "source": "github-repos"}
{"code": "def load_model_from_hdf5(filepath, custom_objects=None, compile=True):\n    if h5py is None:\n        raise ImportError('`load_model()` using h5 format requires h5py. Could not import h5py.')\n    if not custom_objects:\n        custom_objects = {}\n    gco = object_registration.GLOBAL_CUSTOM_OBJECTS\n    tlco = global_state.get_global_attribute('custom_objects_scope_dict', {})\n    custom_objects = {**custom_objects, **gco, **tlco}\n    opened_new_file = not isinstance(filepath, h5py.File)\n    if opened_new_file:\n        f = h5py.File(filepath, mode='r')\n    else:\n        f = filepath\n    model = None\n    try:\n        model_config = f.attrs.get('model_config')\n        if model_config is None:\n            raise ValueError(f'No model config found in the file at {filepath}.')\n        if hasattr(model_config, 'decode'):\n            model_config = model_config.decode('utf-8')\n        model_config = json_utils.decode(model_config)\n        with saving_options.keras_option_scope(use_legacy_config=True):\n            model = saving_utils.model_from_config(model_config, custom_objects=custom_objects)\n            load_weights_from_hdf5_group(f['model_weights'], model)\n        if compile:\n            training_config = f.attrs.get('training_config')\n            if hasattr(training_config, 'decode'):\n                training_config = training_config.decode('utf-8')\n            if training_config is None:\n                logging.warning('No training configuration found in the save file, so the model was *not* compiled. Compile it manually.')\n                return model\n            training_config = json_utils.decode(training_config)\n            model.compile(**saving_utils.compile_args_from_training_config(training_config, custom_objects))\n            saving_utils.try_build_compiled_arguments(model)\n            if 'optimizer_weights' in f:\n                try:\n                    if isinstance(model.optimizer, optimizers.Optimizer):\n                        model.optimizer.build(model._trainable_variables)\n                    else:\n                        model.optimizer._create_all_weights(model._trainable_variables)\n                except (NotImplementedError, AttributeError):\n                    logging.warning('Error when creating the weights of optimizer {}, making it impossible to restore the saved optimizer state. As a result, your model is starting with a freshly initialized optimizer.')\n                optimizer_weight_values = load_optimizer_weights_from_hdf5_group(f)\n                try:\n                    model.optimizer.set_weights(optimizer_weight_values)\n                except ValueError:\n                    logging.warning('Error in loading the saved optimizer state. As a result, your model is starting with a freshly initialized optimizer.')\n    finally:\n        if opened_new_file:\n            f.close()\n    return model", "docstring": "Loads a model saved via `save_model_to_hdf5`.\n\nArgs:\nfilepath: One of the following:\n- String, path to the saved model\n- `h5py.File` object from which to load the model\ncustom_objects: Optional dictionary mapping names\n(strings) to custom classes or functions to be\nconsidered during deserialization.\ncompile: Boolean, whether to compile the model\nafter loading.\n\nReturns:\nA Keras model instance. If an optimizer was found\nas part of the saved model, the model is already\ncompiled. Otherwise, the model is uncompiled and\na warning will be displayed. When `compile` is set\nto `False`, the compilation is omitted without any\nwarning.\n\nRaises:\nImportError: if h5py is not available.\nValueError: In case of an invalid savefile.", "source": "github-repos"}
{"code": "def AddCalledComponent(self, component, target, args, filename, lineno, capacity, action=CALLED_CALLABLE):\n    element = FireTraceElement(component=component, action=action, target=target, args=args, filename=filename, lineno=lineno, capacity=capacity)\n    self.elements.append(element)", "docstring": "Adds an element to the trace indicating that a component was called.\n\nAlso applies to instantiating a class.\n\nArgs:\ncomponent: The result of calling the callable.\ntarget: The name of the callable.\nargs: The args consumed in order to call this callable.\nfilename: The file in which the callable is defined, or None if N/A.\nlineno: The line number on which the callable is defined, or None if N/A.\ncapacity: (bool) Whether the callable could have accepted additional args.\naction: The value to include as the action in the FireTraceElement.", "source": "github-repos"}
{"code": "def get_appliance(self, id_or_uri, fields=''):\n    uri = ((self.URI + '/image-streamer-appliances/') + extract_id_from_uri(id_or_uri))\n    if fields:\n        uri += ('?fields=' + fields)\n    return self._client.get(uri)", "docstring": "Gets the particular Image Streamer resource based on its ID or URI.\n\nArgs:\nid_or_uri:\nCan be either the Os Deployment Server ID or the URI\nfields:\nSpecifies which fields should be returned in the result.\n\nReturns:\ndict: Image Streamer resource.", "source": "codesearchnet"}
{"code": "def copy_fhir_type_with_root_element_definition(self, root_element_definition: message.Message) -> 'FhirPathDataType':\n    return dataclasses.replace(self, root_element_definition=root_element_definition)", "docstring": "Copies the type and sets the root_element_definition.\n\nArgs:\nroot_element_definition: Element definition to set for the type.\n\nReturns:\nA copy of the original type with the root_element_definition set.", "source": "github-repos"}
{"code": "async def find_user(cls, config: Config, user: str) \\\n            -> Tuple[str, str]:\n        \n        with open(config.users_file, 'r') as users_file:\n            for line in users_file:\n                this_user, user_dir, password = line.split(':', 2)\n                if user == this_user:\n                    return password.rstrip('\\r\\n'), user_dir or user\n        raise InvalidAuth()", "docstring": "If the given user ID exists, return its expected password and\nmailbox path. Override this method to implement custom login logic.\n\nArgs:\nconfig: The maildir config object.\nuser: The expected user ID.\n\nRaises:\nInvalidAuth: The user ID was not valid.", "source": "juraj-google-style"}
{"code": "def calculate_bv_sum(site, nn_list, scale_factor=1.0):\n    el1 = Element(site.specie.symbol)\n    bvsum = 0\n    for (nn, dist) in nn_list:\n        el2 = Element(nn.specie.symbol)\n        if (((el1 in ELECTRONEG) or (el2 in ELECTRONEG)) and (el1 != el2)):\n            r1 = BV_PARAMS[el1]['r']\n            r2 = BV_PARAMS[el2]['r']\n            c1 = BV_PARAMS[el1]['c']\n            c2 = BV_PARAMS[el2]['c']\n            R = ((r1 + r2) - (((r1 * r2) * ((sqrt(c1) - sqrt(c2)) ** 2)) / ((c1 * r1) + (c2 * r2))))\n            vij = exp(((R - (dist * scale_factor)) / 0.31))\n            bvsum += (vij * (1 if (el1.X < el2.X) else (- 1)))\n    return bvsum", "docstring": "Calculates the BV sum of a site.\n\nArgs:\nsite:\nThe site\nnn_list:\nList of nearest neighbors in the format [(nn_site, dist), ...].\nscale_factor:\nA scale factor to be applied. This is useful for scaling distance,\nesp in the case of calculation-relaxed structures which may tend\nto under (GGA) or over bind (LDA).", "source": "codesearchnet"}
{"code": "def _usage_id_from_node(self, node, parent_id, id_generator=None):\n    if (id_generator is not None):\n        warnings.warn('Passing an id_generator directly is deprecated in favor of constructing the Runtime with the id_generator', DeprecationWarning, stacklevel=3)\n    id_generator = (id_generator or self.id_generator)\n    block_type = node.tag\n    node.attrib.pop('xblock-family', None)\n    def_id = id_generator.create_definition(block_type)\n    usage_id = id_generator.create_usage(def_id)\n    keys = ScopeIds(None, block_type, def_id, usage_id)\n    block_class = self.mixologist.mix(self.load_block_type(block_type))\n    aside_children = []\n    for child in node.iterchildren():\n        xblock_family = child.attrib.pop('xblock-family', None)\n        if xblock_family:\n            xblock_family = self._family_id_to_superclass(xblock_family)\n            if issubclass(xblock_family, XBlockAside):\n                aside_children.append(child)\n    for child in aside_children:\n        self._aside_from_xml(child, def_id, usage_id, id_generator)\n        node.remove(child)\n    block = block_class.parse_xml(node, self, keys, id_generator)\n    block.parent = parent_id\n    block.save()\n    return usage_id", "docstring": "Create a new usage id from an XML dom node.\n\nArgs:\nnode (lxml.etree.Element): The DOM node to interpret.\nparent_id: The usage ID of the parent block\nid_generator (IdGenerator): The :class:`.IdGenerator` to use\nfor creating ids", "source": "codesearchnet"}
{"code": "def start_services(self, service_alises):\n    for name in service_alises:\n        if name not in self._service_objects:\n            raise Error(self._device, 'No service is registered under the name \"%s\", cannot start.' % name)\n        service = self._service_objects[name]\n        if not service.is_alive:\n            service.start()", "docstring": "Starts the specified services.\n\nServices will be started in the order specified by the input list.\nNo-op for services that are already running.\n\nArgs:\nservice_alises: list of strings, the aliases of services to start.", "source": "github-repos"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(CancelResponsePayload, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = utils.BytearrayStream(input_stream.read(self.length))\n    if self.is_tag_next(enums.Tags.ASYNCHRONOUS_CORRELATION_VALUE, local_stream):\n        self._asynchronous_correlation_value = primitives.ByteString(tag=enums.Tags.ASYNCHRONOUS_CORRELATION_VALUE)\n        self._asynchronous_correlation_value.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.CANCELLATION_RESULT, local_stream):\n        self._cancellation_result = primitives.Enumeration(enums.CancellationResult, tag=enums.Tags.CANCELLATION_RESULT)\n        self._cancellation_result.read(local_stream, kmip_version=kmip_version)\n    self.is_oversized(local_stream)", "docstring": "Read the data encoding the Cancel response payload and decode it into\nits constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the data attribute is missing from the\nencoded payload.", "source": "codesearchnet"}
{"code": "def get_case(family_lines, family_type='ped', vcf_path=None):\n    \n    family = None\n    LOG.info(\"Parsing family information\")\n    \n    family_parser = FamilyParser(family_lines, family_type)\n    \n    families = list(family_parser.families.keys())\n    \n    LOG.info(\"Found families {0}\".format(', '.join(families)))\n    \n    if len(families) > 1:\n        raise CaseError(\"Only one family per load can be used\")\n    \n    family = family_parser.families[families[0]]\n    \n    return family", "docstring": "Return ped_parser case from a family file\n\nCreate a dictionary with case data. If no family file is given create from VCF\n\nArgs:\nfamily_lines (iterator): The family lines\nfamily_type (str): The format of the family lines\nvcf_path(str): Path to VCF\n\nReturns:\nfamily (Family): A ped_parser family object", "source": "juraj-google-style"}
{"code": "def _FormatSizeInUnitsOf1024(self, size):\n    magnitude_1024 = 0\n    used_memory_1024 = float(size)\n    while (used_memory_1024 >= 1024):\n        used_memory_1024 /= 1024\n        magnitude_1024 += 1\n    if (0 < magnitude_1024 <= 7):\n        return '{0:.1f} {1:s}'.format(used_memory_1024, self._UNITS_1024[magnitude_1024])\n    return '{0:d} B'.format(size)", "docstring": "Represents a number of bytes in units of 1024.\n\nArgs:\nsize (int): size in bytes.\n\nReturns:\nstr: human readable string of the size.", "source": "codesearchnet"}
{"code": "def resolve_peer_creds(self):\n    if (not IS_UID_GID_RESOLVABLE):\n        raise NotImplementedError('UID/GID lookup is unavailable under current platform. It can only be done under UNIX-like OS but not under the Google App Engine')\n    elif (not self.peercreds_resolve_enabled):\n        raise RuntimeError('UID/GID lookup is disabled within this server')\n    user = pwd.getpwuid(self.peer_uid).pw_name\n    group = grp.getgrgid(self.peer_gid).gr_name\n    return (user, group)", "docstring": "Return the username and group tuple of the peercreds if available.\n\nRaises:\nNotImplementedError: in case of unsupported OS\nRuntimeError: in case of UID/GID lookup unsupported or disabled", "source": "codesearchnet"}
{"code": "def from_string(cls, cl_function, dependencies=(), nmr_constraints=None):\n        \n        return_type, function_name, parameter_list, body = split_cl_function(cl_function)\n        return SimpleConstraintFunction(return_type, function_name, parameter_list, body, dependencies=dependencies,\n                                        nmr_constraints=nmr_constraints)", "docstring": "Parse the given CL function into a SimpleCLFunction object.\n\nArgs:\ncl_function (str): the function we wish to turn into an object\ndependencies (list or tuple of CLLibrary): The list of CL libraries this function depends on\n\nReturns:\nSimpleCLFunction: the CL data type for this parameter declaration", "source": "juraj-google-style"}
{"code": "def log_softmax(x, axis=-1):\n    return ops.log_softmax(x, axis=axis)", "docstring": "Log-Softmax activation function.\n\nEach input vector is handled independently.\nThe `axis` argument sets which axis of the input the function\nis applied along.\n\nArgs:\nx: Input tensor.\naxis: Integer, axis along which the softmax is applied.", "source": "github-repos"}
{"code": "def get_next_as_optional(self):\n    raise NotImplementedError('Iterator.get_next_as_optional()')", "docstring": "Returns the next element wrapped in `tf.experimental.Optional`.\n\nIf the iterator has reached the end of the sequence, the returned\n`tf.experimental.Optional` will have no value.\n\n>>> dataset = tf.data.Dataset.from_tensors(42)\n>>> iterator = iter(dataset)\n>>> optional = iterator.get_next_as_optional()\n>>> print(optional.has_value())\ntf.Tensor(True, shape=(), dtype=bool)\n>>> print(optional.get_value())\ntf.Tensor(42, shape=(), dtype=int32)\n>>> optional = iterator.get_next_as_optional()\n>>> print(optional.has_value())\ntf.Tensor(False, shape=(), dtype=bool)\n\nReturns:\nA `tf.experimental.Optional` object representing the next element.", "source": "github-repos"}
{"code": "def with_subject(self, subject):\n    return self.__class__(self._signer, service_account_email=self._service_account_email, scopes=self._scopes, token_uri=self._token_uri, subject=subject, project_id=self._project_id, additional_claims=self._additional_claims.copy())", "docstring": "Create a copy of these credentials with the specified subject.\n\nArgs:\nsubject (str): The subject claim.\n\nReturns:\ngoogle.auth.service_account.Credentials: A new credentials\ninstance.", "source": "codesearchnet"}
{"code": "def set(self, refresh_token):\n    logger.info('Saving refresh_token to %s', repr(self._filename))\n    try:\n        with open(self._filename, 'w') as f:\n            f.write(refresh_token)\n    except IOError as e:\n        logger.warning('Failed to save refresh_token: %s', e)", "docstring": "Cache a refresh token, ignoring any failure.\n\nArgs:\nrefresh_token (str): Refresh token to cache.", "source": "codesearchnet"}
{"code": "def _GetTaskStorageFilePath(self, task):\n    filename = '{0:s}.plaso'.format(task.identifier)\n    return os.path.join(self._task_storage_path, filename)", "docstring": "Retrieves the path of a task storage file in the temporary directory.\n\nArgs:\ntask (Task): task.\n\nReturns:\nstr: path of a task storage file in the temporary directory.", "source": "codesearchnet"}
{"code": "def get_acmg(acmg_terms):\n    \n    prediction = 'uncertain_significance'\n    \n    pvs = False\n    \n    ps_terms = []\n    \n    pm_terms = []\n    \n    pp_terms = []\n    \n    ba = False\n    \n    bs_terms = []\n    \n    bp_terms = []\n    for term in acmg_terms:\n        if term.startswith('PVS'):\n            pvs = True\n        elif term.startswith('PS'):\n            ps_terms.append(term)\n        elif term.startswith('PM'):\n            pm_terms.append(term)\n        elif term.startswith('PP'):\n            pp_terms.append(term)\n        elif term.startswith('BA'):\n            ba = True\n        elif term.startswith('BS'):\n            bs_terms.append(term)\n        elif term.startswith('BP'):\n            bp_terms.append(term)\n\n    \n    pathogenic = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)\n    likely_pathogenic = is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms)\n    benign = is_benign(ba, bs_terms)\n    likely_benign = is_likely_benign(bs_terms, bp_terms)\n\n    if (pathogenic or likely_pathogenic):\n        if (benign or likely_benign):\n            prediction = 'uncertain_significance'\n        elif pathogenic:\n            prediction = 'pathogenic'\n        else:\n            prediction = 'likely_pathogenic'\n    else:\n        if benign:\n            prediction = 'benign'\n        if likely_benign:\n            prediction = 'likely_benign'\n\n    return prediction", "docstring": "Use the algorithm described in ACMG paper to get a ACMG calssification\n\nArgs:\nacmg_terms(set(str)): A collection of prediction terms\n\nReturns:\nprediction(int):\n0 - Uncertain Significanse\n1 - Benign\n2 - Likely Benign\n3 - Likely Pathogenic\n4 - Pathogenic", "source": "juraj-google-style"}
{"code": "def _set_value(self, slot_record):\n    \n    if slot_record.status == _SlotRecord.FILLED:\n      self.filled = True\n      self._filler_pipeline_key = _SlotRecord.filler.get_value_for_datastore(\n          slot_record)\n      self._fill_datetime = slot_record.fill_time\n      self._value = slot_record.value", "docstring": "Sets the value of this slot based on its corresponding _SlotRecord.\n\nDoes nothing if the slot has not yet been filled.\n\nArgs:\nslot_record: The _SlotRecord containing this Slot's value.", "source": "juraj-google-style"}
{"code": "def ProcessFile(filename, vlevel, extra_check_functions=None):\n  \n\n  _SetVerboseLevel(vlevel)\n  _BackupFilters()\n\n  if not ProcessConfigOverrides(filename):\n    _RestoreFilters()\n    return\n\n  lf_lines = []\n  crlf_lines = []\n  try:\n    \n    \n    \n    \n    \n    \n    \n    if filename == '-':\n      lines = codecs.StreamReaderWriter(sys.stdin,\n                                        codecs.getreader('utf8'),\n                                        codecs.getwriter('utf8'),\n                                        'replace').read().split('\\n')\n    else:\n      lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\\n')\n\n    \n    \n    for linenum in range(len(lines) - 1):\n      if lines[linenum].endswith('\\r'):\n        lines[linenum] = lines[linenum].rstrip('\\r')\n        crlf_lines.append(linenum + 1)\n      else:\n        lf_lines.append(linenum + 1)\n\n  except IOError:\n    _cpplint_state.PrintError(\n        \"Skipping input '%s': Can't open for reading\\n\" % filename)\n    _RestoreFilters()\n    return\n\n  \n  file_extension = filename[filename.rfind('.') + 1:]\n\n  \n  \n  if filename != '-' and file_extension not in GetAllExtensions():\n    \n    \n    bazel_gen_files = set([ \n        \"external/local_config_cc/libtool\",\n        \"external/local_config_cc/make_hashed_objlist.py\", \n        \"external/local_config_cc/wrapped_ar\",\n        \"external/local_config_cc/wrapped_clang\",\n        \"external/local_config_cc/xcrunwrapper.sh\",\n    ])\n    if not filename in bazel_gen_files:\n       _cpplint_state.PrintError('Ignoring %s; not a valid file name '\n                                 '(%s)\\n' % (filename, ', '.join(GetAllExtensions())))\n  else:\n    ProcessFileData(filename, file_extension, lines, Error,\n                    extra_check_functions)\n\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    if lf_lines and crlf_lines:\n      \n      \n      \n      for linenum in crlf_lines:\n        Error(filename, linenum, 'whitespace/newline', 1,\n              'Unexpected \\\\r (^M) found; better to use only \\\\n')\n\n  _RestoreFilters()", "docstring": "Does google-lint on a single file.\n\nArgs:\nfilename: The name of the file to parse.\n\nvlevel: The level of errors to report.  Every error of confidence\n>= verbose_level will be reported.  0 is a good default.\n\nextra_check_functions: An array of additional check functions that will be\nrun on each source line. Each function takes 4\narguments: filename, clean_lines, line, error", "source": "juraj-google-style"}
{"code": "def InTemplateArgumentList(self, clean_lines, linenum, pos):\n    \n    while linenum < clean_lines.NumLines():\n      \n      line = clean_lines.elided[linenum]\n      match = Match(r'^[^{};=\\[\\]\\.<>]*(.)', line[pos:])\n      if not match:\n        linenum += 1\n        pos = 0\n        continue\n      token = match.group(1)\n      pos += len(match.group(0))\n\n      \n      \n      \n      if token in ('{', '}', ';'): return False\n\n      \n      \n      \n      \n      \n      if token in ('>', '=', '[', ']', '.'): return True\n\n      \n      \n      if token != '<':\n        pos += 1\n        if pos >= len(line):\n          linenum += 1\n          pos = 0\n        continue\n\n      \n      \n      (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)\n      if end_pos < 0:\n        \n        return False\n      linenum = end_line\n      pos = end_pos\n    return False", "docstring": "Check if current position is inside template argument list.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\npos: position just after the suspected template argument.\nReturns:\nTrue if (linenum, pos) is inside template arguments.", "source": "juraj-google-style"}
{"code": "def from_dict(cls, config_dict, return_unused_kwargs=False, **kwargs):\n    if 'quantization_config' in config_dict:\n        config_dict = dict(sparsity_config=config_dict.get('sparsity_config'), **config_dict['quantization_config'])\n    return super().from_dict(config_dict, return_unused_kwargs=return_unused_kwargs, **kwargs)", "docstring": "Instantiates a [`CompressedTensorsConfig`] from a Python dictionary of parameters.\nOptionally unwraps any args from the nested quantization_config\n\nArgs:\nconfig_dict (`Dict[str, Any]`):\nDictionary that will be used to instantiate the configuration object.\nreturn_unused_kwargs (`bool`,*optional*, defaults to `False`):\nWhether or not to return a list of unused keyword arguments. Used for `from_pretrained` method in\n`PreTrainedModel`.\nkwargs (`Dict[str, Any]`):\nAdditional parameters from which to initialize the configuration object.\n\nReturns:\n[`QuantizationConfigMixin`]: The configuration object instantiated from those parameters.", "source": "github-repos"}
{"code": "def update_splits_if_different(self, split_dict):\n    assert isinstance(split_dict, splits_lib.SplitDict)\n    if (self._splits and splits_lib.check_splits_equals(self._splits, split_dict)):\n        return\n    self._set_splits(split_dict)", "docstring": "Overwrite the splits if they are different from the current ones.\n\n* If splits aren't already defined or different (ex: different number of\nshards), then the new split dict is used. This will trigger stats\ncomputation during download_and_prepare.\n* If splits are already defined in DatasetInfo and similar (same names and\nshards): keep the restored split which contains the statistics (restored\nfrom GCS or file)\n\nArgs:\nsplit_dict: `tfds.core.SplitDict`, the new split", "source": "codesearchnet"}
{"code": "def compile_dependencies(self, sourcepath, include_self=False):\n        \n        items = self.inspector.parents(sourcepath)\n\n        \n        if include_self:\n            items.add(sourcepath)\n\n        return filter(None, [self.compile_source(item) for item in items])", "docstring": "Apply compile on all dependencies\n\nArgs:\nsourcepath (string): Sass source path to compile to its\ndestination using project settings.\n\nKeyword Arguments:\ninclude_self (bool): If ``True`` the given sourcepath is add to\nitems to compile, else only its dependencies are compiled.", "source": "juraj-google-style"}
{"code": "def init_properties(env='dev', app='unnecessary', **_):\n    \n    aws_env = boto3.session.Session(profile_name=env)\n    s3client = aws_env.resource('s3')\n\n    generated = get_details(app=app, env=env)\n    archaius = generated.archaius()\n\n    archaius_file = ('{path}/application.properties').format(path=archaius['path'])\n\n    try:\n        s3client.Object(archaius['bucket'], archaius_file).get()\n        LOG.info('Found: %(bucket)s/%(file)s', {'bucket': archaius['bucket'], 'file': archaius_file})\n        return True\n    except boto3.exceptions.botocore.client.ClientError:\n        s3client.Object(archaius['bucket'], archaius_file).put()\n        LOG.info('Created: %(bucket)s/%(file)s', {'bucket': archaius['bucket'], 'file': archaius_file})\n        return False", "docstring": "Make sure _application.properties_ file exists in S3.\n\nFor Applications with Archaius support, there needs to be a file where the\ncloud environment variable points to.\n\nArgs:\nenv (str): Deployment environment/account, i.e. dev, stage, prod.\napp (str): GitLab Project name.\n\nReturns:\nTrue when application.properties was found.\nFalse when application.properties needed to be created.", "source": "juraj-google-style"}
{"code": "def parseArgs(args):\n\t\n\n\t\n\tif not isinstance(args, (list,tuple)):\n\t\traise ValueError('args is not a list or tuple')\n\n\t\n\tdRet\t= {}\n\n\t\n\tfor s in args:\n\n\t\t\n\t\toRes\t= re.match(u'^--([^=]+)(?:=(.+))?$', s)\n\n\t\t\n\t\tif oRes:\n\n\t\t\t\n\t\t\tmGroup2\t= oRes.group(2)\n\t\t\tdRet[oRes.group(1)]\t= (not mGroup2 and True or mGroup2)\n\n\t\t\n\t\telse:\n\t\t\ttry:\t\t\t\tdRet['?'].append(s)\n\t\t\texcept KeyError:\tdRet['?'] = [s]\n\n\t\n\treturn dRet", "docstring": "Parse Arguments\n\nUsed to parse the arguments passed to the script\n\nArgs:\nargs (list): A list of strings representing arguments to a script\n\nReturns:\ndict: Returns a dictionary with args as keys and the values sent with\nthem or True for valueless arguments\n\nRaises:\nValueError: If args is not a list or tuple", "source": "juraj-google-style"}
{"code": "def redirect_stdout(new_stdout):\n    \n    old_stdout, sys.stdout = sys.stdout, new_stdout\n    try:\n        yield None\n    finally:\n        sys.stdout = old_stdout", "docstring": "Redirect the stdout\n\nArgs:\nnew_stdout (io.StringIO): New stdout to use instead", "source": "juraj-google-style"}
{"code": "def iter_variants_by_names(self, names):\n        \n        if not self.is_parallel:\n            yield from super().iter_variants_by_names(names)\n\n        else:\n            for info, dosage in self._bgen.iter_variants_by_names(names):\n                yield Genotypes(\n                    Variant(info.name,\n                            CHROM_STR_ENCODE.get(info.chrom, info.chrom),\n                            info.pos, [info.a1, info.a2]),\n                    dosage,\n                    reference=info.a1,\n                    coded=info.a2,\n                    multiallelic=True,\n                )", "docstring": "Iterates over the genotypes for variants using a list of names.\n\nArgs:\nnames (list): The list of names for variant extraction.", "source": "juraj-google-style"}
{"code": "def __init__(self, output_mediator):\n    \n    super(OutputModule, self).__init__()\n    self._output_mediator = output_mediator", "docstring": "Initializes an output module.\n\nArgs:\noutput_mediator (OutputMediator): mediates interactions between output\nmodules and other components, such as storage and dfvfs.\n\nRaises:\nValueError: when there are unused keyword arguments.", "source": "juraj-google-style"}
{"code": "def parse_function_params(params):\n    function_meta = {'args': [], 'kwargs': {}}\n    params_str = params.strip()\n    if (params_str == ''):\n        return function_meta\n    args_list = params_str.split(',')\n    for arg in args_list:\n        arg = arg.strip()\n        if ('=' in arg):\n            (key, value) = arg.split('=')\n            function_meta['kwargs'][key.strip()] = parse_string_value(value.strip())\n        else:\n            function_meta['args'].append(parse_string_value(arg))\n    return function_meta", "docstring": "parse function params to args and kwargs.\n\nArgs:\nparams (str): function param in string\n\nReturns:\ndict: function meta dict\n\n{\n\"args\": [],\n\"kwargs\": {}\n}\n\nExamples:\n>>> parse_function_params(\"\")\n{'args': [], 'kwargs': {}}\n\n>>> parse_function_params(\"5\")\n{'args': [5], 'kwargs': {}}\n\n>>> parse_function_params(\"1, 2\")\n{'args': [1, 2], 'kwargs': {}}\n\n>>> parse_function_params(\"a=1, b=2\")\n{'args': [], 'kwargs': {'a': 1, 'b': 2}}\n\n>>> parse_function_params(\"1, 2, a=3, b=4\")\n{'args': [1, 2], 'kwargs': {'a':3, 'b':4}}", "source": "codesearchnet"}
{"code": "def relative_probability_from_lookup_table( self, jump_lookup_table ):\n        \n        l1 = self.initial_site.label\n        l2 = self.final_site.label\n        c1 = self.initial_site.nn_occupation()\n        c2 = self.final_site.nn_occupation()\n        return jump_lookup_table.jump_probability[ l1 ][ l2 ][ c1 ][ c2 ]", "docstring": "Relative probability of accepting this jump from a lookup-table.\n\nArgs:\njump_lookup_table (LookupTable): the lookup table to be used for this jump.\n\nReturns:\n(Float): relative probability of accepting this jump.", "source": "juraj-google-style"}
{"code": "def _add_remove_user_template(self, url, template_id, account_id=None, email_address=None):\n    if ((not email_address) and (not account_id)):\n        raise HSException('No email address or account_id specified')\n    data = {}\n    if (account_id is not None):\n        data = {'account_id': account_id}\n    else:\n        data = {'email_address': email_address}\n    request = self._get_request()\n    response = request.post((url + template_id), data)\n    return response", "docstring": "Add or Remove user from a Template\n\nWe use this function for two tasks because they have the same API call\n\nArgs:\n\ntemplate_id (str):      The id of the template\n\naccount_id (str):       ID of the account to add/remove access to/from\n\nemail_address (str):    The email_address of the account to add/remove access to/from\n\nRaises:\nHSException: If no email address or account_id specified\n\nReturns:\nA Template object", "source": "codesearchnet"}
{"code": "def _module_info_from_proto(module_info_def, import_scope=None):\n    graph = tf.get_default_graph()\n\n    def prepend_name_scope(name_scope):\n        return ops.prepend_name_scope(name_scope, import_scope)\n\n    def process_leafs(name):\n        return _path_to_graph_element(prepend_name_scope(name), graph)\n    connected_subgraphs = []\n    module_info = ModuleInfo(module_name=module_info_def.module_name, scope_name=prepend_name_scope(module_info_def.scope_name), class_name=module_info_def.class_name, connected_subgraphs=connected_subgraphs)\n    for connected_subgraph_def in module_info_def.connected_subgraphs:\n        connected_subgraph = ConnectedSubGraph(module=module_info, name_scope=prepend_name_scope(connected_subgraph_def.name_scope), inputs=_nested_from_proto(connected_subgraph_def.inputs, process_leafs), outputs=_nested_from_proto(connected_subgraph_def.outputs, process_leafs))\n        connected_subgraphs.append(connected_subgraph)\n    return module_info", "docstring": "Deserializes `module_info_def` proto.\n\nArgs:\nmodule_info_def: An instance of `module_pb2.SonnetModule`.\nimport_scope: Optional `string`. Name scope to use.\n\nReturns:\nAn instance of `ModuleInfo`.\n\nRaises:\nbase_errors.ModuleInfoError: If the probobuf is of the wrong type or\nif some of its fields are missing.", "source": "codesearchnet"}
{"code": "def debug(self, status=None, nids=None):\n    (nrows, ncols) = get_terminal_size()\n    sched_excfile = os.path.join(self.workdir, '_exceptions')\n    if os.path.exists(sched_excfile):\n        with open(sched_excfile, 'r') as fh:\n            cprint('Found exceptions raised by the scheduler', 'red')\n            cprint(fh.read(), color='red')\n            return\n    if (status is not None):\n        tasks = list(self.iflat_tasks(status=status, nids=nids))\n    else:\n        errors = list(self.iflat_tasks(status=self.S_ERROR, nids=nids))\n        qcriticals = list(self.iflat_tasks(status=self.S_QCRITICAL, nids=nids))\n        abicriticals = list(self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids))\n        tasks = ((errors + qcriticals) + abicriticals)\n    ntasks = 0\n    for task in tasks:\n        print(make_banner(str(task), width=ncols, mark='='))\n        ntasks += 1\n        for efname in ['qerr_file', 'stderr_file']:\n            err_file = getattr(task, efname)\n            if err_file.exists:\n                s = err_file.read()\n                if (not s):\n                    continue\n                print(make_banner(str(err_file), width=ncols, mark='='))\n                cprint(s, color='red')\n        try:\n            report = task.get_event_report()\n            if (report and report.num_errors):\n                print(make_banner(os.path.basename(report.filename), width=ncols, mark='='))\n                s = '\\n'.join((str(e) for e in report.errors))\n            else:\n                s = None\n        except Exception as exc:\n            s = str(exc)\n        count = 0\n        if (s is not None):\n            cprint(s, color='red')\n            count += 1\n        if (not count):\n            log_files = task.tmpdir.list_filepaths(wildcard='*LOG_*')\n            if (not log_files):\n                cprint('No *LOG_* file in tmpdir. This usually happens if you are running with many CPUs', color='magenta')\n            for log_file in log_files:\n                try:\n                    report = EventsParser().parse(log_file)\n                    if report.errors:\n                        print(report)\n                        count += 1\n                        break\n                except Exception as exc:\n                    cprint(str(exc), color='red')\n                    count += 1\n                    break\n        if (not count):\n            cprint('Houston, we could not find any error message that can explain the problem', color='magenta')\n    print(('Number of tasks analyzed: %d' % ntasks))", "docstring": "This method is usually used when the flow didn't completed succesfully\nIt analyzes the files produced the tasks to facilitate debugging.\nInfo are printed to stdout.\n\nArgs:\nstatus: If not None, only the tasks with this status are selected\nnids: optional list of node identifiers used to filter the tasks.", "source": "codesearchnet"}
{"code": "def serialize(self, accumulator):\n    pass", "docstring": "Serialize an accumulator for a remote call.\n\nThis function serializes an accumulator to be sent to a remote process.\n\nArgs:\naccumulator: The accumulator to serialize.\n\nReturns:\nA byte string representing the passed accumulator.", "source": "github-repos"}
{"code": "def copy(self, src, dst, other_system=None):\n        \n        copy_source = self.get_client_kwargs(src)\n        copy_destination = self.get_client_kwargs(dst)\n        with _handle_client_error():\n            self.client.copy_object(CopySource=copy_source, **copy_destination)", "docstring": "Copy object of the same storage.\n\nArgs:\nsrc (str): Path or URL.\ndst (str): Path or URL.\nother_system (pycosio._core.io_system.SystemBase subclass): Unused.", "source": "juraj-google-style"}
{"code": "def regrep(filename, patterns, reverse=False, terminate_on_match=False, postprocess=str):\n    compiled = {k: re.compile(v) for (k, v) in patterns.items()}\n    matches = collections.defaultdict(list)\n    gen = (reverse_readfile(filename) if reverse else zopen(filename, 'rt'))\n    for (i, l) in enumerate(gen):\n        for (k, p) in compiled.items():\n            m = p.search(l)\n            if m:\n                matches[k].append([[postprocess(g) for g in m.groups()], ((- i) if reverse else i)])\n        if (terminate_on_match and all([len(matches.get(k, [])) for k in compiled.keys()])):\n            break\n    try:\n        gen.close()\n    except:\n        pass\n    return matches", "docstring": "A powerful regular expression version of grep.\n\nArgs:\nfilename (str): Filename to grep.\npatterns (dict): A dict of patterns, e.g.,\n{\"energy\": \"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-\\.]+)\"}.\nreverse (bool): Read files in reverse. Defaults to false. Useful for\nlarge files, especially when used with terminate_on_match.\nterminate_on_match (bool): Whether to terminate when there is at\nleast one match in each key in pattern.\npostprocess (callable): A post processing function to convert all\nmatches. Defaults to str, i.e., no change.\n\nReturns:\nA dict of the following form:\n{key1: [[[matches...], lineno], [[matches...], lineno],\n[[matches...], lineno], ...],\nkey2: ...}\nFor reverse reads, the lineno is given as a -ve number. Please note\nthat 0-based indexing is used.", "source": "codesearchnet"}
{"code": "def __init__(self, pidfile, logger, port = 64042, host = 'localhost'):\n        \n        super(RemoteControllerDeamon, self).__init__(pidfile, logger)\n        self.__port = port\n        self.__host = host\n        for name in dir(self):\n            method = getattr(self, name)\n            if hasattr(method, 'registered_for_rpc'):\n                self.register_method(method, method.registered_for_rpc.__name__)", "docstring": "Create a daemon which is controllable via jsonrpc with decorator\n\nArgs:\npidfile (str): path to create pid file\nlogger (logging.Logger): logger for the daemon\nport (int):\nhost (str):", "source": "juraj-google-style"}
{"code": "def write_makeconfig(_path):\n    \n    http_proxy = str(CFG[\"gentoo\"][\"http_proxy\"])\n    ftp_proxy = str(CFG[\"gentoo\"][\"ftp_proxy\"])\n    rsync_proxy = str(CFG[\"gentoo\"][\"rsync_proxy\"])\n\n    path.mkfile_uchroot(local.path('/') / _path)\n    with open(_path, 'w') as makeconf:\n        lines = \n\n        makeconf.write(lines)\n\n        mounts = CFG[\"container\"][\"mounts\"].value\n        tmp_dir = str(CFG[\"tmp_dir\"])\n        mounts.append({\"src\": tmp_dir, \"tgt\": \"/mnt/distfiles\"})\n        CFG[\"container\"][\"mounts\"] = mounts\n\n        if http_proxy is not None:\n            http_s = \"http_proxy={0}\".format(http_proxy)\n            https_s = \"https_proxy={0}\".format(http_proxy)\n            makeconf.write(http_s + \"\\n\")\n            makeconf.write(https_s + \"\\n\")\n\n        if ftp_proxy is not None:\n            fp_s = \"ftp_proxy={0}\".format(ftp_proxy)\n            makeconf.write(fp_s + \"\\n\")\n\n        if rsync_proxy is not None:\n            rp_s = \"RSYNC_PROXY={0}\".format(rsync_proxy)\n            makeconf.write(rp_s + \"\\n\")", "docstring": "Write a valid gentoo make.conf file to :path:.\n\nArgs:\npath - The output path of the make.conf", "source": "juraj-google-style"}
{"code": "def _get_lr_tensor(self):\n    lr = (tf.squared_difference(1.0, tf.sqrt(self._mu)) / self._h_min)\n    return lr", "docstring": "Get lr minimizing the surrogate.\n\nReturns:\nThe lr_t.", "source": "codesearchnet"}
{"code": "def _print_download_progress_msg(self, msg, flush=False):\n    \n    if self._interactive_mode():\n      \n      \n      self._max_prog_str = max(self._max_prog_str, len(msg))\n      sys.stdout.write(\"\\r%-{}s\".format(self._max_prog_str) % msg)\n      sys.stdout.flush()\n      if flush:\n        print(\"\\n\")\n    else:\n      \n      \n      logging.info(msg)", "docstring": "Prints a message about download progress either to the console or TF log.\n\nArgs:\nmsg: Message to print.\nflush: Indicates whether to flush the output (only used in interactive\nmode).", "source": "juraj-google-style"}
{"code": "def get_images_by_tail_number(self, tail_number, page=1, limit=100):\n    url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit)\n    return self._fr24.get_aircraft_image_data(url)", "docstring": "Fetch the images of a particular aircraft by its tail number.\n\nThis method can be used to get the images of the aircraft. The images are in 3 sizes and you can use what suits your need.\n\nArgs:\ntail_number (str): The tail number, e.g. VT-ANL\npage (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data\nlimit (int): Optional limit on number of records returned\n\nReturns:\nA dict with the images of the aircraft in various sizes\n\nExample::\n\nfrom pyflightdata import FlightData\nf=FlightData()\n#optional login\nf.login(myemail,mypassword)\nf.get_images_by_flight_number('VT-ANL')\nf.get_images_by_flight_number('VT-ANL',page=1,limit=10)", "source": "codesearchnet"}
{"code": "def generate(cls, country_code, bank_code, account_code):\n    spec = _get_iban_spec(country_code)\n    bank_code_length = code_length(spec, 'bank_code')\n    branch_code_length = code_length(spec, 'branch_code')\n    bank_and_branch_code_length = (bank_code_length + branch_code_length)\n    account_code_length = code_length(spec, 'account_code')\n    if (len(bank_code) > bank_and_branch_code_length):\n        raise ValueError('Bank code exceeds maximum size {}'.format(bank_and_branch_code_length))\n    if (len(account_code) > account_code_length):\n        raise ValueError('Account code exceeds maximum size {}'.format(account_code_length))\n    bank_code = bank_code.rjust(bank_and_branch_code_length, '0')\n    account_code = account_code.rjust(account_code_length, '0')\n    iban = (((country_code + '??') + bank_code) + account_code)\n    return cls(iban)", "docstring": "Generate an IBAN from it's components.\n\nIf the bank-code and/or account-number have less digits than required by their\ncountry specific representation, the respective component is padded with zeros.\n\nExamples:\n\nTo generate an IBAN do the following::\n\n>>> bank_code = '37040044'\n>>> account_code = '532013000'\n>>> iban = IBAN.generate('DE', bank_code, account_code)\n>>> iban.formatted\n'DE89 3704 0044 0532 0130 00'\n\nArgs:\ncountry_code (str): The ISO 3166 alpha-2 country code.\nbank_code (str): The country specific bank-code.\naccount_code (str): The customer specific account-code.", "source": "codesearchnet"}
{"code": "def process_files(self, path, recursive=False):\n        \n        self._logger.info('Processing files in \"%s\"', path)\n\n        for (path, file) in files_generator(path, recursive):\n            if not file.endswith(BATCH_EXTENSION):\n                self.process_file(os.path.join(path, file))", "docstring": "Apply normalizations over all files in the given directory.\n\nIterate over all files in a given directory. Normalizations\nwill be applied to each file, storing the result in a new file.\nThe extension for the new file will be the one defined in\nBATCH_EXTENSION.\n\nArgs:\npath: Path to the directory.\nrecursive: Whether to find files recursively or not.", "source": "juraj-google-style"}
{"code": "def _FindFileContainingSymbolInDb(self, symbol):\n    \n    try:\n      file_proto = self._internal_db.FindFileContainingSymbol(symbol)\n    except KeyError as error:\n      if self._descriptor_db:\n        file_proto = self._descriptor_db.FindFileContainingSymbol(symbol)\n      else:\n        raise error\n    if not file_proto:\n      raise KeyError('Cannot find a file containing %s' % symbol)\n    return self._ConvertFileProtoToFileDescriptor(file_proto)", "docstring": "Finds the file in descriptor DB containing the specified symbol.\n\nArgs:\nsymbol: The name of the symbol to search for.\n\nReturns:\nA FileDescriptor that contains the specified symbol.\n\nRaises:\nKeyError: if the file cannot be found in the descriptor database.", "source": "juraj-google-style"}
{"code": "def __init__(self, agent_interface_format=None, map_size=None):\n    \n    if not agent_interface_format:\n      raise ValueError(\"Please specify agent_interface_format\")\n\n    self._agent_interface_format = agent_interface_format\n    aif = self._agent_interface_format\n\n    if (aif.use_feature_units\n        or aif.use_camera_position\n        or aif.use_raw_units):\n      self.init_camera(\n          aif.feature_dimensions,\n          map_size,\n          aif.camera_width_world_units)\n\n    self._valid_functions = _init_valid_functions(\n        aif.action_dimensions)", "docstring": "Initialize a Features instance matching the specified interface format.\n\nArgs:\nagent_interface_format: See the documentation for `AgentInterfaceFormat`.\nmap_size: The size of the map in world units, needed for feature_units.\n\nRaises:\nValueError: if agent_interface_format isn't specified.\nValueError: if map_size isn't specified when use_feature_units or\nuse_camera_position is.", "source": "juraj-google-style"}
{"code": "def _create_or_restore_slot_variable(self, slot_variable_position, slot_name, variable):\n    named_slots = self._slot_dict(slot_name)\n    variable_key = _var_key(variable)\n    slot_variable = named_slots.get(variable_key, None)\n    if slot_variable is None and context.executing_eagerly() and slot_variable_position.is_simple_variable() and (not ops.get_default_graph()._variable_creator_stack):\n        initializer = trackable.CheckpointInitialValueCallable(checkpoint_position=slot_variable_position)\n        slot_variable = self._get_or_make_slot_with_initializer(var=variable, initializer=initializer, shape=variable.shape, dtype=variable.dtype, slot_name=slot_name, op_name=self._name)\n    if slot_variable is not None:\n        slot_variable_position.restore(slot_variable)\n    else:\n        self._deferred_slot_restorations.setdefault(slot_name, {}).setdefault(variable_key, []).append(slot_variable_position)", "docstring": "Restore a slot variable's value, possibly creating it.\n\nCalled when a variable which has an associated slot variable is created or\nrestored. When executing eagerly, we create the slot variable with a\nrestoring initializer.\n\nNo new variables are created when graph building. Instead,\n_restore_slot_variable catches these after normal creation and adds restore\nops to the graph. This method is nonetheless important when graph building\nfor the case when a slot variable has already been created but `variable`\nhas just been added to a dependency graph (causing us to realize that the\nslot variable needs to be restored).\n\nArgs:\nslot_variable_position: A `trackable._CheckpointPosition` object\nindicating the slot variable `Trackable` object to be restored.\nslot_name: The name of this `Optimizer`'s slot to restore into.\nvariable: The variable object this slot is being created for.", "source": "github-repos"}
{"code": "def _ReadSelectedVolumes(self, volume_system, prefix='v'):\n    volume_identifiers_string = self._input_reader.Read()\n    volume_identifiers_string = volume_identifiers_string.strip()\n    if (not volume_identifiers_string):\n        return []\n    selected_volumes = self._ParseVolumeIdentifiersString(volume_identifiers_string, prefix=prefix)\n    if (selected_volumes == ['all']):\n        return ['{0:s}{1:d}'.format(prefix, volume_index) for volume_index in range(1, (volume_system.number_of_volumes + 1))]\n    return selected_volumes", "docstring": "Reads the selected volumes provided by the user.\n\nArgs:\nvolume_system (APFSVolumeSystem): volume system.\nprefix (Optional[str]): volume identifier prefix.\n\nReturns:\nlist[str]: selected volume identifiers including prefix.\n\nRaises:\nKeyboardInterrupt: if the user requested to abort.\nValueError: if the volume identifiers string could not be parsed.", "source": "codesearchnet"}
{"code": "def image_from_console(console: tcod.console.Console) -> tcod.image.Image:\n    return tcod.image.Image._from_cdata(ffi.gc(lib.TCOD_image_from_console(_console(console)), lib.TCOD_image_delete))", "docstring": "Return an Image with a Consoles pixel data.\n\nThis effectively takes a screen-shot of the Console.\n\nArgs:\nconsole (Console): Any Console instance.", "source": "codesearchnet"}
{"code": "def __exit__(self, exc_type, exc_val, exc_tb) -> bool:\n    if self._worker_pool is not None:\n        self._worker_pool.close()\n        self._worker_pool = None\n    self._context_manager_active = False\n    if exc_type is ChildProcessError:\n        sys.stderr.write(str(exc_val))\n        return True\n    return False", "docstring": "Context manager cleanup.\n\nCloses the worker pool if it exists.\n\nArgs:\nexc_type: The type of the raised exception, if any.\nexc_val: The raised exception, if any.\nexc_tb: The traceback of the raised exception, if any.\n\nReturns:\n`True` if an exception should be suppressed, `False` otherwise.", "source": "github-repos"}
{"code": "def __mul__(self, rhs):\n        \n\n        if isinstance(rhs, scipy.sparse.spmatrix):\n            def qIter(qs):\n                for j in range(qs.shape[1]):\n                    qi = qs.getcol(j).toarray().ravel()\n                    yield qi\n                return\n        else:\n            def qIter(qs):\n                for j in range(qs.shape[1]):\n                    qi = qs[:, j]\n                    yield qi\n                return\n\n        result = np.empty(rhs.shape, dtype=np.complex128)\n        for i, q in enumerate(qIter(rhs)):\n            result[:, i] = self._solve(q)\n\n        return result", "docstring": "Carries out the action of solving for wavefields.\n\nArgs:\nrhs (sparse matrix): Right-hand side vector(s)\n\nReturns:\nnp.ndarray: Wavefields", "source": "juraj-google-style"}
{"code": "def _FormatAttrToken(self, token_data):\n    return {'mode': token_data.file_mode, 'uid': token_data.user_identifier, 'gid': token_data.group_identifier, 'system_id': token_data.file_system_identifier, 'node_id': token_data.file_identifier, 'device': token_data.device}", "docstring": "Formats an attribute token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_attr32|bsm_token_data_attr64): AUT_ATTR32 or\nAUT_ATTR64 token data.\n\nReturns:\ndict[str, str]: token values.", "source": "codesearchnet"}
{"code": "def conv_block(name, x, mid_channels, dilations=None, activation=\"relu\",\n               dropout=0.0):\n  \n  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n\n    x_shape = common_layers.shape_list(x)\n    is_2d = len(x_shape) == 4\n    num_steps = x_shape[1]\n    if is_2d:\n      first_filter = [3, 3]\n      second_filter = [1, 1]\n    else:\n      \n      \n      if num_steps == 1:\n        first_filter = [1, 3, 3]\n      else:\n        first_filter = [2, 3, 3]\n      second_filter = [1, 1, 1]\n\n    \n    \n    x = conv(\"1_1\", x, output_channels=mid_channels, filter_size=first_filter,\n             dilations=dilations)\n    x = tf.nn.relu(x)\n    x = get_dropout(x, rate=dropout)\n\n    \n    \n    if activation == \"relu\":\n      x = conv(\"1_2\", x, output_channels=mid_channels,\n               filter_size=second_filter, dilations=dilations)\n      x = tf.nn.relu(x)\n    elif activation == \"gatu\":\n      \n      x_tanh = conv(\"1_tanh\", x, output_channels=mid_channels,\n                    filter_size=second_filter, dilations=dilations)\n      x_sigm = conv(\"1_sigm\", x, output_channels=mid_channels,\n                    filter_size=second_filter, dilations=dilations)\n      x = tf.nn.tanh(x_tanh) * tf.nn.sigmoid(x_sigm)\n\n    x = get_dropout(x, rate=dropout)\n    return x", "docstring": "2 layer conv block used in the affine coupling layer.\n\nArgs:\nname: variable scope.\nx: 4-D or 5-D Tensor.\nmid_channels: Output channels of the second layer.\ndilations: Optional, list of integers.\nactivation: relu or gatu.\nIf relu, the second layer is relu(W*x)\nIf gatu, the second layer is tanh(W1*x) * sigmoid(W2*x)\ndropout: Dropout probability.\nReturns:\nx: 4-D Tensor: Output activations.", "source": "juraj-google-style"}
{"code": "def assign_nested_vars(variables, tensors, indices=None):\n  \n  if isinstance(variables, (tuple, list)):\n    return tf.group(*[\n        assign_nested_vars(variable, tensor)\n        for variable, tensor in zip(variables, tensors)])\n  if indices is None:\n    return variables.assign(tensors)\n  else:\n    return tf.scatter_update(variables, indices, tensors)", "docstring": "Assign tensors to matching nested tuple of variables.\n\nArgs:\nvariables: Nested tuple or list of variables to update.\ntensors: Nested tuple or list of tensors to assign.\nindices: Batch indices to assign to; default to all.\n\nReturns:\nOperation.", "source": "juraj-google-style"}
{"code": "def set(self, key, value):\n        \n\n        changed = super().set(key=key, value=value)\n\n        if not changed:\n            return False\n\n        self._log.info('Saving configuration to \"%s\"...', self._filename)\n\n        with open(self._filename, 'w') as stream:\n            stream.write(self.content)\n            self._log.info('Saved configuration to \"%s\".', self._filename)\n\n        return True", "docstring": "Updates the value of the given key in the file.\n\nArgs:\nkey (str): Key of the property to update.\nvalue (str): New value of the property.\n\nReturn:\nbool: Indicates whether or not a change was made.", "source": "juraj-google-style"}
{"code": "def delete(self, file_path):\n        \n        now = datetime.datetime.now().isoformat()\n        url = nurls['put'] + upload_path + file_name\n\n        headers = {'userid': self.user_id,\n                   'useridx': self.useridx,\n                   'Content-Type': \"application/x-www-form-urlencoded; charset=UTF-8\",\n                   'charset': 'UTF-8',\n                   'Origin': 'http:\n        }\n        r = self.session.delete(url = url, headers = headers)\n\n        return self.resultManager(r.text)", "docstring": "DELETE\n\nArgs:\nfile_path: Full path for a file you want to delete\nupload_path: Ndrive path where you want to delete file\nex) /Picture/\n\nReturns:\nTrue: Delete success\nFalse: Delete failed", "source": "juraj-google-style"}
{"code": "def sections_list(self, cmd=None):\n        \n        sections = list(self.common.sections)\n        if not cmd:\n            if self.bare is not None:\n                sections.extend(self.bare.sections)\n                return sections\n            return []\n        sections.extend(self.subcmds[cmd].sections)\n        if cmd in self._conf:\n            sections.append(cmd)\n        return sections", "docstring": "List of config sections used by a command.\n\nArgs:\ncmd (str): command name, set to ``None`` or ``''`` for the bare\ncommand.\n\nReturns:\nlist of str: list of configuration sections used by that command.", "source": "juraj-google-style"}
{"code": "def handle_message_registered(self, msg_data, host):\n        \n        response = None\n\n        if msg_data[\"method\"] == \"EVENT\":\n            logger.debug(\"<%s> <euuid:%s> Event message \"\n                         \"received\" % (msg_data[\"cuuid\"], msg_data[\"euuid\"]))\n            response = self.event(msg_data[\"cuuid\"],\n                                  host,\n                                  msg_data[\"euuid\"],\n                                  msg_data[\"event_data\"],\n                                  msg_data[\"timestamp\"],\n                                  msg_data[\"priority\"])\n\n        elif msg_data[\"method\"] == \"OK EVENT\":\n            logger.debug(\"<%s> <euuid:%s> Event confirmation message \"\n                         \"received\" % (msg_data[\"cuuid\"], msg_data[\"euuid\"]))\n            try:\n                del self.event_uuids[msg_data[\"euuid\"]]\n            except KeyError:\n                logger.warning(\"<%s> <euuid:%s> Euuid does not exist in event \"\n                               \"buffer. Key was removed before we could process \"\n                               \"it.\" % (msg_data[\"cuuid\"], msg_data[\"euuid\"]))\n\n        elif msg_data[\"method\"] == \"OK NOTIFY\":\n            logger.debug(\"<%s> <euuid:%s> Ok notify \"\n                         \"received\" % (msg_data[\"cuuid\"], msg_data[\"euuid\"]))\n            try:\n                del self.event_uuids[msg_data[\"euuid\"]]\n            except KeyError:\n                logger.warning(\"<%s> <euuid:%s> Euuid does not exist in event \"\n                               \"buffer. Key was removed before we could process \"\n                               \"it.\" % (msg_data[\"cuuid\"], msg_data[\"euuid\"]))\n\n\n        return response", "docstring": "Processes messages that have been delivered by a registered client.\n\nArgs:\nmsg (string): The raw packet data delivered from the listener. This\ndata will be unserialized and then processed based on the packet's\nmethod.\nhost (tuple): The (address, host) tuple of the source message.\n\nReturns:\nA response that will be sent back to the client via the listener.", "source": "juraj-google-style"}
{"code": "def _map_condition(self, wire_map, condition):\n    if (condition is None):\n        new_condition = None\n    else:\n        bit0 = (condition[0], 0)\n        new_condition = (wire_map.get(bit0, bit0)[0], condition[1])\n    return new_condition", "docstring": "Use the wire_map dict to change the condition tuple's creg name.\n\nArgs:\nwire_map (dict): a map from wires to wires\ncondition (tuple): (ClassicalRegister,int)\nReturns:\ntuple(ClassicalRegister,int): new condition", "source": "codesearchnet"}
{"code": "def compose_q(self, r: Rotation, normalize_quats: bool=True) -> Rotation:\n    q1 = self.get_quats()\n    q2 = r.get_quats()\n    new_quats = quat_multiply(q1, q2)\n    return Rotation(rot_mats=None, quats=new_quats, normalize_quats=normalize_quats)", "docstring": "Compose the quaternions of the current Rotation object with those of another.\n\nDepending on whether either Rotation was initialized with quaternions, this function may call\ntorch.linalg.eigh.\n\nArgs:\nr:\nAn update rotation object\nReturns:\nAn updated rotation object", "source": "github-repos"}
{"code": "def ssa(scatterer, h_pol=True):\n    \n\n    ext_xs = ext_xsect(scatterer, h_pol=h_pol)\n    return sca_xsect(scatterer, h_pol=h_pol)/ext_xs if ext_xs > 0.0 else 0.0", "docstring": "Single-scattering albedo for the current setup, with polarization.\n\nArgs:\nscatterer: a Scatterer instance.\nh_pol: If True (default), use horizontal polarization.\nIf False, use vertical polarization.\n\nReturns:\nThe single-scattering albedo.", "source": "juraj-google-style"}
{"code": "def __init__(\n        self,\n        optimizer,\n        ls_max_iterations=10,\n        ls_accept_ratio=0.9,\n        ls_mode='exponential',\n        ls_parameter=0.5,\n        ls_unroll_loop=False,\n        scope='optimized-step',\n        summary_labels=()\n    ):\n        \n        self.solver = LineSearch(\n            max_iterations=ls_max_iterations,\n            accept_ratio=ls_accept_ratio,\n            mode=ls_mode,\n            parameter=ls_parameter,\n            unroll_loop=ls_unroll_loop\n        )\n\n        super(OptimizedStep, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new optimized step meta optimizer instance.\n\nArgs:\noptimizer: The optimizer which is modified by this meta optimizer.\nls_max_iterations: Maximum number of line search iterations.\nls_accept_ratio: Line search acceptance ratio.\nls_mode: Line search mode, see LineSearch solver.\nls_parameter: Line search parameter, see LineSearch solver.\nls_unroll_loop: Unroll line search loop if true.", "source": "juraj-google-style"}
{"code": "def __init__(self, unit_def):\n        \n\n        if isinstance(unit_def, str):\n            unit = collections.defaultdict(int)\n            import re\n            for m in re.finditer(r\"([A-Za-z]+)\\s*\\^*\\s*([\\-0-9]*)\", unit_def):\n                p = m.group(2)\n                p = 1 if not p else int(p)\n                k = m.group(1)\n                unit[k] += p\n        else:\n            unit = {k: v for k, v in dict(unit_def).items() if v != 0}\n        self._unit = check_mappings(unit)", "docstring": "Constructs a unit.\n\nArgs:\nunit_def: A definition for the unit. Either a mapping of unit to\npowers, e.g., {\"m\": 2, \"s\": -1} represents \"m^2 s^-1\",\nor simply as a string \"kg m^2 s^-1\". Note that the supported\nformat uses \"^\" as the power operator and all units must be\nspace-separated.", "source": "juraj-google-style"}
{"code": "def state_scope(self, state_fluents: Sequence[tf.Tensor]) -> Dict[(str, TensorFluent)]:\n    return dict(zip(self.rddl.domain.state_fluent_ordering, state_fluents))", "docstring": "Returns a partial scope with current state-fluents.\n\nArgs:\nstate_fluents (Sequence[tf.Tensor]): The current state fluents.\n\nReturns:\nA mapping from state fluent names to :obj:`rddl2tf.fluent.TensorFluent`.", "source": "codesearchnet"}
{"code": "def _GetDecodedStreamSize(self):\n    self._file_object.seek(0, os.SEEK_SET)\n    self._decoder = self._GetDecoder()\n    self._decoded_data = b''\n    encoded_data_offset = 0\n    encoded_data_size = self._file_object.get_size()\n    decoded_stream_size = 0\n    while (encoded_data_offset < encoded_data_size):\n        read_count = self._ReadEncodedData(self._ENCODED_DATA_BUFFER_SIZE)\n        if (read_count == 0):\n            break\n        encoded_data_offset += read_count\n        decoded_stream_size += self._decoded_data_size\n    return decoded_stream_size", "docstring": "Retrieves the decoded stream size.\n\nReturns:\nint: decoded stream size.", "source": "codesearchnet"}
{"code": "def make_sample_her_transitions(replay_strategy, replay_k, reward_fun):\n    if (replay_strategy == 'future'):\n        future_p = (1 - (1.0 / (1 + replay_k)))\n    else:\n        future_p = 0\n\n    def _sample_her_transitions(episode_batch, batch_size_in_transitions):\n        'episode_batch is {key: array(buffer_size x T x dim_key)}\\n        '\n        T = episode_batch['u'].shape[1]\n        rollout_batch_size = episode_batch['u'].shape[0]\n        batch_size = batch_size_in_transitions\n        episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)\n        t_samples = np.random.randint(T, size=batch_size)\n        transitions = {key: episode_batch[key][(episode_idxs, t_samples)].copy() for key in episode_batch.keys()}\n        her_indexes = np.where((np.random.uniform(size=batch_size) < future_p))\n        future_offset = (np.random.uniform(size=batch_size) * (T - t_samples))\n        future_offset = future_offset.astype(int)\n        future_t = ((t_samples + 1) + future_offset)[her_indexes]\n        future_ag = episode_batch['ag'][(episode_idxs[her_indexes], future_t)]\n        transitions['g'][her_indexes] = future_ag\n        info = {}\n        for (key, value) in transitions.items():\n            if key.startswith('info_'):\n                info[key.replace('info_', '')] = value\n        reward_params = {k: transitions[k] for k in ['ag_2', 'g']}\n        reward_params['info'] = info\n        transitions['r'] = reward_fun(**reward_params)\n        transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:]) for k in transitions.keys()}\n        assert (transitions['u'].shape[0] == batch_size_in_transitions)\n        return transitions\n    return _sample_her_transitions", "docstring": "Creates a sample function that can be used for HER experience replay.\n\nArgs:\nreplay_strategy (in ['future', 'none']): the HER replay strategy; if set to 'none',\nregular DDPG experience replay is used\nreplay_k (int): the ratio between HER replays and regular replays (e.g. k = 4 -> 4 times\nas many HER replays as regular replays are used)\nreward_fun (function): function to re-compute the reward with substituted goals", "source": "codesearchnet"}
{"code": "async def find(self, seq_set: SequenceSet, selected: SelectedMailbox, requirement: FetchRequirement=FetchRequirement.METADATA) -> AsyncIterable[Tuple[(int, MessageT)]]:\n    for (seq, cached_msg) in selected.messages.get_all(seq_set):\n        msg = (await self.get(cached_msg.uid, cached_msg, requirement))\n        if (msg is not None):\n            (yield (seq, msg))", "docstring": "Find the active message UID and message pairs in the mailbox that\nare contained in the given sequences set. Message sequence numbers\nare resolved by the selected mailbox session.\n\nArgs:\nseq_set: The sequence set of the desired messages.\nselected: The selected mailbox session.\nrequirement: The data required from each message.", "source": "codesearchnet"}
{"code": "def impulse_noise(x, severity=1):\n    c = [0.03, 0.06, 0.09, 0.17, 0.27][(severity - 1)]\n    x = tfds.core.lazy_imports.skimage.util.random_noise((np.array(x) / 255.0), mode='s&p', amount=c)\n    x_clip = (np.clip(x, 0, 1) * 255)\n    return around_and_astype(x_clip)", "docstring": "Impulse noise corruption to images.\n\nArgs:\nx: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\nseverity: integer, severity of corruption.\n\nReturns:\nnumpy array, image with uint8 pixels in [0,255]. Added impulse noise.", "source": "codesearchnet"}
{"code": "def loader(self, file_name, bad_steps=None, **kwargs):\n    new_tests = []\n    if (not os.path.isfile(file_name)):\n        self.logger.info(('Missing file_\\n   %s' % file_name))\n        return None\n    filesize = os.path.getsize(file_name)\n    hfilesize = humanize_bytes(filesize)\n    txt = ('Filesize: %i (%s)' % (filesize, hfilesize))\n    self.logger.debug(txt)\n    temp_dir = tempfile.gettempdir()\n    temp_filename = os.path.join(temp_dir, os.path.basename(file_name))\n    shutil.copy2(file_name, temp_dir)\n    self.logger.debug(('tmp file: %s' % temp_filename))\n    self.logger.debug('HERE WE LOAD THE DATA')\n    data = DataSet()\n    fid = FileID(file_name)\n    test_no = 1\n    data.test_no = test_no\n    data.loaded_from = file_name\n    data.channel_index = None\n    data.channel_number = None\n    data.creator = None\n    data.item_ID = None\n    data.schedule_file_name = None\n    data.start_datetime = None\n    data.test_ID = None\n    data.test_name = None\n    data.raw_data_files.append(fid)\n    self.logger.debug('reading raw-data')\n    self.mpr_data = None\n    self.mpr_log = None\n    self.mpr_settings = None\n    self._load_mpr_data(temp_filename, bad_steps)\n    length_of_test = self.mpr_data.shape[0]\n    self.logger.debug(f'length of test: {length_of_test}')\n    self.logger.debug('renaming columns')\n    self._rename_headers()\n    summary_df = self._create_summary_data()\n    if summary_df.empty:\n        txt = '\\nCould not find any summary (stats-file)!'\n        txt += ' (summary_df.empty = True)'\n        txt += '\\n -> issue make_summary(use_cellpy_stat_file=False)'\n        warnings.warn(txt)\n    data.dfsummary = summary_df\n    data.dfdata = self.mpr_data\n    data.raw_data_files_length.append(length_of_test)\n    new_tests.append(data)\n    self._clean_up(temp_filename)\n    return new_tests", "docstring": "Loads data from biologics .mpr files.\n\nArgs:\nfile_name (str): path to .res file.\nbad_steps (list of tuples): (c, s) tuples of steps s\n(in cycle c) to skip loading.\n\nReturns:\nnew_tests (list of data objects)", "source": "codesearchnet"}
{"code": "def _table_viewer(table, rows_per_page=25, fields=None):\n  \n\n  \n\n  if not table.exists():\n    raise Exception('Table %s does not exist' % table.full_name)\n\n  if not table.is_listable():\n    return \"Done\"\n\n  _HTML_TEMPLATE = u\n\n  if fields is None:\n    fields = google.datalab.utils.commands.get_field_list(fields, table.schema)\n  div_id = google.datalab.utils.commands.Html.next_id()\n  meta_count = ('rows: %d' % table.length) if table.length >= 0 else ''\n  meta_name = table.full_name if table.job is None else ('job: %s' % table.job.id)\n  if table.job:\n    if table.job.cache_hit:\n      meta_cost = 'cached'\n    else:\n      bytes = bigquery._query_stats.QueryStats._size_formatter(table.job.bytes_processed)\n      meta_cost = '%s processed' % bytes\n    meta_time = 'time: %.1fs' % table.job.total_time\n  else:\n    meta_cost = ''\n    meta_time = ''\n\n  data, total_count = google.datalab.utils.commands.get_data(table, fields, first_row=0,\n                                                             count=rows_per_page)\n\n  if total_count < 0:\n    \n    \n    fetched_count = len(data['rows'])\n    if fetched_count < rows_per_page:\n      total_count = fetched_count\n\n  chart = 'table' if 0 <= total_count <= rows_per_page else 'paged_table'\n  meta_entries = [meta_count, meta_time, meta_cost, meta_name]\n  meta_data = '(%s)' % (', '.join([entry for entry in meta_entries if len(entry)]))\n\n  return _HTML_TEMPLATE.format(div_id=div_id,\n                               static_table=google.datalab.utils.commands.HtmlBuilder\n                               .render_chart_data(data),\n                               meta_data=meta_data,\n                               chart_style=chart,\n                               source_index=google.datalab.utils.commands\n                               .get_data_source_index(table.full_name),\n                               fields=','.join(fields),\n                               total_rows=total_count,\n                               rows_per_page=rows_per_page,\n                               data=json.dumps(data, cls=google.datalab.utils.JSONEncoder))", "docstring": "Return a table viewer.\n\nThis includes a static rendering of the first page of the table, that gets replaced\nby the charting code in environments where Javascript is executable and BQ is available.\n\nArgs:\ntable: the table to view.\nrows_per_page: how many rows to display at one time.\nfields: an array of field names to display; default is None which uses the full schema.\nReturns:\nA string containing the HTML for the table viewer.", "source": "juraj-google-style"}
{"code": "def update_paths_and_config(self, config, pkg_dir_name,\n                                pkg_cache_dir=None):\n        \n        if pkg_cache_dir is None:\n            pkg_cache_dir = self.package_cache_dir\n        cached_dir_path = os.path.join(pkg_cache_dir, pkg_dir_name)\n\n        \n        if config.get('paths'):\n            for path in config['paths']:\n                path_to_append = os.path.join(cached_dir_path,\n                                              path)\n                logger.debug(\"Appending \\\"%s\\\" to python sys.path\",\n                             path_to_append)\n                sys.path.append(path_to_append)\n        else:\n            sys.path.append(cached_dir_path)\n\n        \n        \n        if config.get('configs'):\n            for config_filename in config['configs']:\n                self.configs_to_merge.append(os.path.join(cached_dir_path,\n                                                          config_filename))", "docstring": "Handle remote source defined sys.paths & configs.\n\nArgs:\nconfig (dict): git config dictionary\npkg_dir_name (string): directory name of the stacker archive\npkg_cache_dir (string): fully qualified path to stacker cache\ncache directory", "source": "juraj-google-style"}
{"code": "def __init__(self, left, right):\n        \n        if isinstance(left, Dist) and len(left) > 1:\n            if (not isinstance(left, J) or\n                    evaluation.get_dependencies(*list(left.inverse_map))):\n                raise StochasticallyDependentError(\n                    \"Joint distribution with dependencies not supported.\")\n        if isinstance(right, Dist) and len(right) > 1:\n            if (not isinstance(right, J) or\n                    evaluation.get_dependencies(*list(right.inverse_map))):\n                raise StochasticallyDependentError(\n                    \"Joint distribution with dependencies not supported.\")\n\n        assert isinstance(left, Dist) or isinstance(right, Dist)\n        Dist.__init__(self, left=left, right=right)", "docstring": "Constructor.\n\nArgs:\nleft (Dist, numpy.ndarray) : Left hand side.\nright (Dist, numpy.ndarray) : Right hand side.", "source": "juraj-google-style"}
{"code": "def add_update_user(self, user, capacity=None):\n        \n        \n        if isinstance(user, str):\n            user = hdx.data.user.User.read_from_hdx(user, configuration=self.configuration)\n        elif isinstance(user, dict):\n            user = hdx.data.user.User(user, configuration=self.configuration)\n        if isinstance(user, hdx.data.user.User):\n            users = self.data.get('users')\n            if users is None:\n                users = list()\n                self.data['users'] = users\n            if capacity is not None:\n                user['capacity'] = capacity\n            self._addupdate_hdxobject(users, 'name', user)\n            return\n        raise HDXError('Type %s cannot be added as a user!' % type(user).__name__)", "docstring": "Add new or update existing user in organization with new metadata. Capacity eg. member, admin\nmust be supplied either within the User object or dictionary or using the capacity argument (which takes\nprecedence).\n\nArgs:\nuser (Union[User,Dict,str]): Either a user id or user metadata either from a User object or a dictionary\ncapacity (Optional[str]): Capacity of user eg. member, admin. Defaults to None.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def __extract_directory(self, path, files, destination):\n        \n\n        \n        destination_path = os.path.join(destination, path)\n        if not os.path.exists(destination_path):\n            os.makedirs(destination_path)\n\n        for name, contents in files.items():\n            item_path = os.path.join(path, name)\n\n            \n            \n            if 'files' in contents:\n                self.__extract_directory(\n                    item_path,\n                    contents['files'],\n                    destination\n                )\n\n                continue\n\n            self.__extract_file(item_path, contents, destination)", "docstring": "Extracts a single directory to the specified directory on disk.\n\nArgs:\npath (str):\nRelative (to the root of the archive) path of the directory\nto extract.\n\nfiles (dict):\nA dictionary of files from a *.asar file header.\n\ndestination (str):\nThe path to extract the files to.", "source": "juraj-google-style"}
{"code": "def calculate(self, token_list_x, token_list_y):\n        \n\n        x, y = self.unique(token_list_x, token_list_y)\n        try:\n            result = len(x & y) / len(x | y)\n        except ZeroDivisionError:\n            result = 0.0\n        return result", "docstring": "Calculate similarity with the Jaccard coefficient.\n\nConcrete method.\n\nArgs:\ntoken_list_x:    [token, token, token, ...]\ntoken_list_y:    [token, token, token, ...]\n\nReturns:\nSimilarity.", "source": "juraj-google-style"}
{"code": "def plot_seebeck_temp(self, doping='all', output='average'):\n        \n\n        import matplotlib.pyplot as plt\n        if output == 'average':\n            sbk = self._bz.get_seebeck(output='average')\n        elif output == 'eigs':\n            sbk = self._bz.get_seebeck(output='eigs')\n\n        plt.figure(figsize=(22, 14))\n        tlist = sorted(sbk['n'].keys())\n        doping = self._bz.doping['n'] if doping == 'all' else doping\n        for i, dt in enumerate(['n', 'p']):\n            plt.subplot(121 + i)\n            for dop in doping:\n                d = self._bz.doping[dt].index(dop)\n                sbk_temp = []\n                for temp in tlist:\n                    sbk_temp.append(sbk[dt][temp][d])\n                if output == 'average':\n                    plt.plot(tlist, sbk_temp, marker='s',\n                             label=str(dop) + ' $cm^{-3}$')\n                elif output == 'eigs':\n                    for xyz in range(3):\n                        plt.plot(tlist, zip(*sbk_temp)[xyz], marker='s',\n                                 label=str(xyz) + ' ' + str(dop) + ' $cm^{-3}$')\n            plt.title(dt + '-type', fontsize=20)\n            if i == 0:\n                plt.ylabel(\"Seebeck \\n coefficient  ($\\\\mu$V/K)\", fontsize=30.0)\n            plt.xlabel('Temperature (K)', fontsize=30.0)\n\n            p = 'lower right' if i == 0 else ''\n            plt.legend(loc=p, fontsize=15)\n            plt.grid()\n            plt.xticks(fontsize=25)\n            plt.yticks(fontsize=25)\n\n        plt.tight_layout()\n\n        return plt", "docstring": "Plot the Seebeck coefficient in function of temperature for different\ndoping levels.\n\nArgs:\ndopings: the default 'all' plots all the doping levels in the analyzer.\nSpecify a list of doping levels if you want to plot only some.\noutput: with 'average' you get an average of the three directions\nwith 'eigs' you get all the three directions.\nReturns:\na matplotlib object", "source": "juraj-google-style"}
{"code": "def random_array(shape, mean=128., std=20.):\n    \n    x = np.random.random(shape)\n    \n    x = (x - np.mean(x)) / (np.std(x) + K.epsilon())\n    \n    x = (x * std) + mean\n    return x", "docstring": "Creates a uniformly distributed random array with the given `mean` and `std`.\n\nArgs:\nshape: The desired shape\nmean: The desired mean (Default value = 128)\nstd: The desired std (Default value = 20)\n\nReturns: Random numpy array of given `shape` uniformly distributed with desired `mean` and `std`.", "source": "juraj-google-style"}
{"code": "def start(self):\n    self._server.start()", "docstring": "Starts this server.\n\n>>> dispatcher = tf.data.experimental.service.DispatchServer(start=False)\n>>> dispatcher.start()\n\nRaises:\ntf.errors.OpError: Or one of its subclasses if an error occurs while\nstarting the server.", "source": "github-repos"}
{"code": "def create_dir(path):\n    full_path = abs_path(path)\n    if (not os.path.exists(full_path)):\n        try:\n            os.makedirs(full_path)\n        except OSError as e:\n            if (e.errno != os.errno.EEXIST):\n                raise", "docstring": "Creates a directory if it does not exist already.\n\nArgs:\npath: The path of the directory to create.", "source": "codesearchnet"}
{"code": "def response(self, in_thread: Optional[bool] = None) -> \"Message\":\n        \n        data = {\"channel\": self[\"channel\"]}\n\n        if in_thread:\n            if \"message\" in self:\n                data[\"thread_ts\"] = (\n                    self[\"message\"].get(\"thread_ts\") or self[\"message\"][\"ts\"]\n                )\n            else:\n                data[\"thread_ts\"] = self.get(\"thread_ts\") or self[\"ts\"]\n        elif in_thread is None:\n            if \"message\" in self and \"thread_ts\" in self[\"message\"]:\n                data[\"thread_ts\"] = self[\"message\"][\"thread_ts\"]\n            elif \"thread_ts\" in self:\n                data[\"thread_ts\"] = self[\"thread_ts\"]\n\n        return Message(data)", "docstring": "Create a response message.\n\nDepending on the incoming message the response can be in a thread. By default the response follow where the\nincoming message was posted.\n\nArgs:\nin_thread (boolean): Overwrite the `threading` behaviour\n\nReturns:\na new :class:`slack.event.Message`", "source": "juraj-google-style"}
{"code": "def __init__(self, optimizer_path: str, optimizer_args: ListOrTuple[str], worker_count: Optional[int]=None, expand_to_input: str=_DEFAULT_INPUT_FILE_EXPANSION_TOKEN):\n    if worker_count is not None and worker_count < 1:\n        raise ValueError(f'The `worker_count` argument must be either `None` or a positive integer; got {worker_count}.')\n    self._optimizer_path: str = optimizer_path\n    self._optimizer_args: list[str] = list(optimizer_args)\n    self._worker_count: Optional[int] = worker_count\n    self._expand_to_input: str = expand_to_input\n    self._worker_pool: Optional[multiprocessing.pool.Pool] = None\n    self._context_manager_active: bool = False", "docstring": "TestCheckWriter constructor.\n\nArgs:\noptimizer_path: The program to use for optimizing the HLO.\noptimizer_args: The arguments to pass into the optimizer tool.\nworker_count: The number of worker threads to use for parallel test-case\ntransformations. If `None`, the worker count will be inferred. If 1, or\nif the instance is used without a context manager (i.e. a `with` block),\nthe transformations will be performed sequentially.\nexpand_to_input: When running the optimizer on a test case, all instances\nof this substring in `optimizer_args` will expand to the path of a\ntemporary file containing the text of that test case.", "source": "github-repos"}
{"code": "def prod(x, axis=None, keepdims=False):\n    \n    from .function_bases import prod as prod_base\n    if axis is None:\n        axis = range(x.ndim)\n    elif not hasattr(axis, '__iter__'):\n        axis = [axis]\n    return prod_base(x, axis, keepdims)", "docstring": "Reduction along axes with product operation.\n\nArgs:\nx (Variable): An input variable.\naxis (None, int or tuple of ints): Axis or axes along which product is\ncalculated. Passing the default value `None` will reduce all dimensions.\nkeepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.\n\nReturns:\n~nnabla.Variable: N-D array.\n\nNote:\nBackward computation is not accurate in a zero value input.", "source": "juraj-google-style"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    if token_ids_1 is None:\n        return [0] * len(token_ids_0) + [1]\n    return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def _load_from_file_object(self, f):\n    subtoken_strings = []\n    for line in f:\n        s = line.strip()\n        if ((s.startswith(\"'\") and s.endswith(\"'\")) or (s.startswith('\"') and s.endswith('\"'))):\n            s = s[1:(- 1)]\n        subtoken_strings.append(native_to_unicode(s))\n    self._init_subtokens_from_list(subtoken_strings)\n    self._init_alphabet_from_tokens(subtoken_strings)", "docstring": "Load from a file object.\n\nArgs:\nf: File object to load vocabulary from", "source": "codesearchnet"}
{"code": "def get_rms_dist(self, struct1, struct2):\n        \n        struct1, struct2 = self._process_species([struct1, struct2])\n        struct1, struct2, fu, s1_supercell = self._preprocess(struct1, struct2)\n        match = self._match(struct1, struct2, fu, s1_supercell, use_rms=True,\n                            break_on_match=False)\n\n        if match is None:\n            return None\n        else:\n            return match[0], max(match[1])", "docstring": "Calculate RMS displacement between two structures\n\nArgs:\nstruct1 (Structure): 1st structure\nstruct2 (Structure): 2nd structure\n\nReturns:\nrms displacement normalized by (Vol / nsites) ** (1/3)\nand maximum distance between paired sites. If no matching\nlattice is found None is returned.", "source": "juraj-google-style"}
{"code": "def gene_to_panels(self, case_obj):\n        \n        LOG.info(\"Building gene to panels\")\n        gene_dict = {}\n\n        for panel_info in case_obj.get('panels', []):\n            panel_name = panel_info['panel_name']\n            panel_version = panel_info['version']\n            panel_obj = self.gene_panel(panel_name, version=panel_version)\n            if not panel_obj:\n                \n                LOG.warning(\"Panel: {0}, version {1} does not exist in database\".format(panel_name, panel_version))\n\n            for gene in panel_obj['genes']:\n                hgnc_id = gene['hgnc_id']\n\n                if hgnc_id not in gene_dict:\n                    gene_dict[hgnc_id] = set([panel_name])\n                    continue\n\n                gene_dict[hgnc_id].add(panel_name)\n\n        LOG.info(\"Gene to panels done\")\n\n        return gene_dict", "docstring": "Fetch all gene panels and group them by gene\n\nArgs:\ncase_obj(scout.models.Case)\nReturns:\ngene_dict(dict): A dictionary with gene as keys and a set of\npanel names as value", "source": "juraj-google-style"}
{"code": "def run_pass_pipeline(mlir_txt, pass_pipeline, show_debug_info=False):\n    return pywrap_mlir.experimental_run_pass_pipeline(mlir_txt, pass_pipeline, show_debug_info)", "docstring": "Runs a pipeline over input module.\n\nArgs:\nmlir_txt: Textual representation of the MLIR module.\npass_pipeline: Pass pipeline to run on module.\nshow_debug_info: Whether to include locations in the emitted textual form.\n\nReturns:\nA textual representation of the MLIR module corresponding to the\ntransformed module.", "source": "github-repos"}
{"code": "def make_merged_spec(self, dev):\n    return self.__class__(*self._get_combined_properties(dev))", "docstring": "Returns a new DeviceSpec which incorporates `dev`.\n\nWhen combining specs, `dev` will take precedence over the current spec.\nSo for instance:\n```\nfirst_spec = tf.DeviceSpec(job=0, device_type=\"CPU\")\nsecond_spec = tf.DeviceSpec(device_type=\"GPU\")\ncombined_spec = first_spec.make_merged_spec(second_spec)\n```\n\nis equivalent to:\n```\ncombined_spec = tf.DeviceSpec(job=0, device_type=\"GPU\")\n```\n\nArgs:\ndev: a `DeviceSpec`\n\nReturns:\nA new `DeviceSpec` which combines `self` and `dev`", "source": "github-repos"}
{"code": "def append(self, data):\n    if (isinstance(data, list) and (len(data) > 0)):\n        self.nodes.append(data)\n    else:\n        self.nodes.append([data])", "docstring": "Appends items or lists to the Lattice\n\nArgs:\ndata (item,list) : The Item or List to be added to the Lattice", "source": "codesearchnet"}
{"code": "def tersoff_input(self, structure, periodic=False, uc=True, *keywords):\n    gin = self.keyword_line(*keywords)\n    gin += self.structure_lines(structure, cell_flg=periodic, frac_flg=periodic, anion_shell_flg=False, cation_shell_flg=False, symm_flg=(not uc))\n    gin += self.tersoff_potential(structure)\n    return gin", "docstring": "Gets a GULP input with Tersoff potential for an oxide structure\n\nArgs:\nstructure: pymatgen.core.structure.Structure\nperiodic (Default=False): Flag denoting whether periodic\nboundary conditions are used\nlibrary (Default=None): File containing the species and potential.\nuc (Default=True): Unit Cell Flag.\nkeywords: GULP first line keywords.", "source": "codesearchnet"}
{"code": "def reply(self, text):\n    data = {'text': text, 'vchannel_id': self['vchannel_id']}\n    if self.is_p2p():\n        data['type'] = RTMMessageType.P2PMessage\n        data['to_uid'] = self['uid']\n    else:\n        data['type'] = RTMMessageType.ChannelMessage\n        data['channel_id'] = self['channel_id']\n    return RTMMessage(data)", "docstring": "Replys a text message\n\nArgs:\ntext(str): message content\n\nReturns:\nRTMMessage", "source": "codesearchnet"}
{"code": "def check_list_type(objects, allowed_type, name, allow_none=True):\n    if (objects is None):\n        if (not allow_none):\n            raise TypeError(('%s is None, which is not allowed.' % name))\n        return objects\n    if (not isinstance(objects, (tuple, list))):\n        raise TypeError(('%s is not a list.' % name))\n    if (not all((isinstance(i, allowed_type) for i in objects))):\n        type_list = sorted(list(set((type(obj) for obj in objects))))\n        raise TypeError((\"%s contains types that don't match %s: %s\" % (name, allowed_type.__name__, type_list)))\n    return objects", "docstring": "Verify that objects in list are of the allowed type or raise TypeError.\n\nArgs:\nobjects: The list of objects to check.\nallowed_type: The allowed type of items in 'settings'.\nname: Name of the list of objects, added to the exception.\nallow_none: If set, None is also allowed.\n\nRaises:\nTypeError: if object is not of the allowed type.\n\nReturns:\nThe list of objects, for convenient use in assignment.", "source": "codesearchnet"}
{"code": "def sanitize_spec_name(name: str) -> str:\n    if not name:\n        return 'unknown'\n    swapped = ''.join([c if c.isalnum() else '_' for c in name.lower()])\n    if swapped[0].isalpha():\n        return swapped\n    else:\n        return 'tensor_' + swapped", "docstring": "Sanitizes Spec names. Matches Graph Node and Python naming conventions.\n\nWithout sanitization, names that are not legal Python parameter names can be\nset which makes it challenging to represent callables supporting the named\ncalling capability.\n\nArgs:\nname: The name to sanitize.\n\nReturns:\nA string that meets Python parameter conventions.", "source": "github-repos"}
{"code": "def __init__(self, channel):\n    \n    self.Login = channel.unary_unary(\n        '/api.Dgraph/Login',\n        request_serializer=api__pb2.LoginRequest.SerializeToString,\n        response_deserializer=api__pb2.Response.FromString,\n        )\n    self.Query = channel.unary_unary(\n        '/api.Dgraph/Query',\n        request_serializer=api__pb2.Request.SerializeToString,\n        response_deserializer=api__pb2.Response.FromString,\n        )\n    self.Mutate = channel.unary_unary(\n        '/api.Dgraph/Mutate',\n        request_serializer=api__pb2.Mutation.SerializeToString,\n        response_deserializer=api__pb2.Assigned.FromString,\n        )\n    self.Alter = channel.unary_unary(\n        '/api.Dgraph/Alter',\n        request_serializer=api__pb2.Operation.SerializeToString,\n        response_deserializer=api__pb2.Payload.FromString,\n        )\n    self.CommitOrAbort = channel.unary_unary(\n        '/api.Dgraph/CommitOrAbort',\n        request_serializer=api__pb2.TxnContext.SerializeToString,\n        response_deserializer=api__pb2.TxnContext.FromString,\n        )\n    self.CheckVersion = channel.unary_unary(\n        '/api.Dgraph/CheckVersion',\n        request_serializer=api__pb2.Check.SerializeToString,\n        response_deserializer=api__pb2.Version.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def save(nifti_filename, numpy_data):\n    \n    \n    nifti_filename = os.path.expanduser(nifti_filename)\n\n    try:\n        nifti_img = nib.Nifti1Image(numpy_data, numpy.eye(4))\n        nib.save(nifti_img, nifti_filename)\n\n    except Exception as e:\n        raise ValueError(\"Could not save file {0}.\".format(nifti_filename))\n    return nifti_filename", "docstring": "Export a numpy array to a nifti file.  TODO: currently using dummy\nheaders and identity matrix affine transform. This can be expanded.\n\nArguments:\nnifti_filename (str): A filename to which to save the nifti data\nnumpy_data (numpy.ndarray): The numpy array to save to nifti\n\nReturns:\nString. The expanded filename that now holds the nifti data", "source": "juraj-google-style"}
{"code": "def get_authority(config, metrics, rrset_channel, **kwargs):\n    builder = authority.GCEAuthorityBuilder(config, metrics, rrset_channel, **kwargs)\n    return builder.build_authority()", "docstring": "Get a GCEAuthority client.\n\nA factory function that validates configuration and creates a\nproper GCEAuthority.\n\nArgs:\nconfig (dict): GCEAuthority related configuration.\nmetrics (obj): :interface:`IMetricRelay` implementation.\nrrset_channel (asyncio.Queue): Queue used for sending messages\nto the reconciler plugin.\nkw (dict): Additional keyword arguments to pass to the\nAuthority.\nReturns:\nA :class:`GCEAuthority` instance.", "source": "codesearchnet"}
{"code": "def has_no_unchecked_field(self, locator, **kwargs):\n    kwargs['checked'] = False\n    return self.has_no_selector('field', locator, **kwargs)", "docstring": "Checks if the page or current node has no radio button or checkbox with the given label,\nvalue, or id, that is currently unchecked.\n\nArgs:\nlocator (str): The label, name, or id of an unchecked field.\n**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.\n\nReturns:\nbool: Whether it doesn't exist.", "source": "codesearchnet"}
{"code": "def _real_set(self, obj, old, value, hint=None, setter=None):\n    if (self.property.matches(value, old) and (hint is None)):\n        return\n    was_set = (self.name in obj._property_values)\n    if was_set:\n        old_attr_value = obj._property_values[self.name]\n    else:\n        old_attr_value = old\n    if (old_attr_value is not value):\n        if isinstance(old_attr_value, PropertyValueContainer):\n            old_attr_value._unregister_owner(obj, self)\n        if isinstance(value, PropertyValueContainer):\n            value._register_owner(obj, self)\n        if (self.name in obj._unstable_themed_values):\n            del obj._unstable_themed_values[self.name]\n        if (self.name in obj._unstable_default_values):\n            del obj._unstable_default_values[self.name]\n        obj._property_values[self.name] = value\n    self._trigger(obj, old, value, hint=hint, setter=setter)", "docstring": "Internal implementation helper to set property values.\n\nThis function handles bookkeeping around noting whether values have\nbeen explicitly set, etc.\n\nArgs:\nobj (HasProps)\nThe object the property is being set on.\n\nold (obj) :\nThe previous value of the property to compare\n\nhint (event hint or None, optional)\nAn optional update event hint, e.g. ``ColumnStreamedEvent``\n(default: None)\n\nUpdate event hints are usually used at times when better\nupdate performance can be obtained by special-casing in\nsome way (e.g. streaming or patching column data sources)\n\nsetter (ClientSession or ServerSession or None, optional) :\nThis is used to prevent \"boomerang\" updates to Bokeh apps.\n(default: None)\n\nIn the context of a Bokeh server application, incoming updates\nto properties will be annotated with the session that is\ndoing the updating. This value is propagated through any\nsubsequent change notifications that the update triggers.\nThe session can compare the event setter to itself, and\nsuppress any updates that originate from itself.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def slice(self, begin, end):\n    if begin < 0 or end < 0:\n        raise ValueError('Encountered negative index.')\n    lines = self.lines[begin:end]\n    font_attr_segs = {}\n    for key in self.font_attr_segs:\n        if key >= begin and key < end:\n            font_attr_segs[key - begin] = self.font_attr_segs[key]\n    annotations = {}\n    for key in self.annotations:\n        if not isinstance(key, int):\n            annotations[key] = self.annotations[key]\n        elif key >= begin and key < end:\n            annotations[key - begin] = self.annotations[key]\n    return RichTextLines(lines, font_attr_segs=font_attr_segs, annotations=annotations)", "docstring": "Slice a RichTextLines object.\n\nThe object itself is not changed. A sliced instance is returned.\n\nArgs:\nbegin: (int) Beginning line index (inclusive). Must be >= 0.\nend: (int) Ending line index (exclusive). Must be >= 0.\n\nReturns:\n(RichTextLines) Sliced output instance of RichTextLines.\n\nRaises:\nValueError: If begin or end is negative.", "source": "github-repos"}
{"code": "def _read_from_seg(self, n):\n    result = self._seg.read(size=n)\n    if (result == ''):\n        return result\n    offset = self._seg.tell()\n    if (offset > self._seg_valid_length):\n        extra = (offset - self._seg_valid_length)\n        result = result[:((- 1) * extra)]\n    self._offset += len(result)\n    return result", "docstring": "Read from current seg.\n\nArgs:\nn: max number of bytes to read.\n\nReturns:\nvalid bytes from the current seg. \"\" if no more is left.", "source": "codesearchnet"}
{"code": "def GetUnicodeString(value):\n    if isinstance(value, list):\n        value = [GetUnicodeString(item) for item in value]\n        return ''.join(value)\n    if isinstance(value, py2to3.INTEGER_TYPES):\n        value = '{0:d}'.format(value)\n    if (not isinstance(value, py2to3.UNICODE_TYPE)):\n        return codecs.decode(value, 'utf8', 'ignore')\n    return value", "docstring": "Attempts to convert the argument to a Unicode string.\n\nArgs:\nvalue (list|int|bytes|str): value to convert.\n\nReturns:\nstr: string representation of the argument.", "source": "codesearchnet"}
{"code": "def prune_non_existent_outputs(compound_match_query):\n    if (len(compound_match_query.match_queries) == 1):\n        return compound_match_query\n    elif (len(compound_match_query.match_queries) == 0):\n        raise AssertionError(u'Received CompoundMatchQuery with an empty list of MatchQuery objects.')\n    else:\n        match_queries = []\n        for match_query in compound_match_query.match_queries:\n            match_traversals = match_query.match_traversals\n            output_block = match_query.output_block\n            present_locations_tuple = _get_present_locations(match_traversals)\n            (present_locations, present_non_optional_locations) = present_locations_tuple\n            new_output_fields = {}\n            for (output_name, expression) in six.iteritems(output_block.fields):\n                if isinstance(expression, OutputContextField):\n                    (location_name, _) = expression.location.get_location_name()\n                    if (location_name not in present_locations):\n                        raise AssertionError(u'Non-optional output location {} was not found in present_locations: {}'.format(expression.location, present_locations))\n                    new_output_fields[output_name] = expression\n                elif isinstance(expression, FoldedContextField):\n                    base_location = expression.fold_scope_location.base_location\n                    (location_name, _) = base_location.get_location_name()\n                    if (location_name not in present_locations):\n                        raise AssertionError(u'Folded output location {} was found in present_locations: {}'.format(base_location, present_locations))\n                    new_output_fields[output_name] = expression\n                elif isinstance(expression, TernaryConditional):\n                    (location_name, _) = expression.if_true.location.get_location_name()\n                    if (location_name in present_locations):\n                        if (location_name in present_non_optional_locations):\n                            new_output_fields[output_name] = expression.if_true\n                        else:\n                            new_output_fields[output_name] = expression\n                else:\n                    raise AssertionError(u'Invalid expression of type {} in output block: {}'.format(type(expression).__name__, output_block))\n            match_queries.append(MatchQuery(match_traversals=match_traversals, folds=match_query.folds, output_block=ConstructResult(new_output_fields), where_block=match_query.where_block))\n        return CompoundMatchQuery(match_queries=match_queries)", "docstring": "Remove non-existent outputs from each MatchQuery in the given CompoundMatchQuery.\n\nEach of the 2^n MatchQuery objects (except one) has been pruned to exclude some Traverse blocks,\nFor each of these, remove the outputs (that have been implicitly pruned away) from each\ncorresponding ConstructResult block.\n\nArgs:\ncompound_match_query: CompoundMatchQuery object containing 2^n pruned MatchQuery objects\n(see convert_optional_traversals_to_compound_match_query)\n\nReturns:\nCompoundMatchQuery with pruned ConstructResult blocks for each of the 2^n MatchQuery objects", "source": "codesearchnet"}
{"code": "def getServerSSLContext(self, hostname=None):\n    sslctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n    if (hostname is None):\n        hostname = socket.gethostname()\n    certfile = self.getHostCertPath(hostname)\n    if (certfile is None):\n        raise s_exc.NoCertKey(('Missing .crt for %s' % hostname))\n    keyfile = self.getHostKeyPath(hostname)\n    if (keyfile is None):\n        raise s_exc.NoCertKey(('Missing .key for %s' % hostname))\n    sslctx.load_cert_chain(certfile, keyfile)\n    return sslctx", "docstring": "Returns an ssl.SSLContext appropriate to listen on a socket\n\nArgs:\nhostname:  if None, the value from socket.gethostname is used to find the key in the servers directory.\nThis name should match the not-suffixed part of two files ending in .key and .crt in the hosts subdirectory", "source": "codesearchnet"}
{"code": "def __setattr__(cls, name, value):\n        \n        if cls.__initialized and name not in _POST_INIT_ATTRIBUTE_NAMES:\n            raise AttributeError('May not change values: %s' % name)\n        else:\n            type.__setattr__(cls, name, value)", "docstring": "Overridden to avoid setting variables after init.\n\nSetting attributes on a class must work during the period of\ninitialization to set the enumation value class variables and\nbuild the name/number maps. Once __init__ has set the\n__initialized flag to True prohibits setting any more values\non the class. The class is in effect frozen.\n\nArgs:\nname: Name of value to set.\nvalue: Value to set.", "source": "juraj-google-style"}
{"code": "def fit(self, X, truncated=3):\n        \n        self.n_sample, self.n_var = X.shape\n        self.columns = X.columns\n        self.tau_mat = X.corr(method='kendall').values\n        self.u_matrix = np.empty([self.n_sample, self.n_var])\n\n        self.truncated = truncated\n        self.depth = self.n_var - 1\n        self.trees = []\n\n        self.unis, self.ppfs = [], []\n        for i, col in enumerate(X):\n            uni = self.model()\n            uni.fit(X[col])\n            self.u_matrix[:, i] = uni.cumulative_distribution(X[col])\n            self.unis.append(uni)\n            self.ppfs.append(uni.percent_point)\n\n        self.train_vine(self.vine_type)\n        self.fitted = True", "docstring": "Fit a vine model to the data.\n\nArgs:\nX(numpy.ndarray): data to be fitted.\ntruncated(int): max level to build the vine.", "source": "juraj-google-style"}
{"code": "def create_identity_with_grad_check_fn(expected_gradient, expected_dtype=None):\n\n    @custom_gradient.custom_gradient\n    def _identity_with_grad_check(x):\n        \n        x = array_ops.identity(x)\n\n        def grad(dx):\n            \n            if expected_dtype:\n                assert dx.dtype == expected_dtype, 'dx.dtype should be %s but is: %s' % (expected_dtype, dx.dtype)\n            expected_tensor = tensor_conversion.convert_to_tensor_v2(expected_gradient, dtype=dx.dtype, name='expected_gradient')\n            with ops.control_dependencies([x]):\n                assert_op = check_ops.assert_equal(dx, expected_tensor)\n            with ops.control_dependencies([assert_op]):\n                dx = array_ops.identity(dx)\n            return dx\n        return (x, grad)\n\n    def identity_with_grad_check(x):\n        return _identity_with_grad_check(x)\n    return identity_with_grad_check", "docstring": "Returns a function that asserts it's gradient has a certain value.\n\nThis serves as a hook to assert intermediate gradients have a certain value.\nThis returns an identity function. The identity's gradient function is also\nthe identity function, except it asserts that the gradient equals\n`expected_gradient` and has dtype `expected_dtype`.\n\nArgs:\nexpected_gradient: The gradient function asserts that the gradient is this\nvalue.\nexpected_dtype: The gradient function asserts the gradient has this dtype.\n\nReturns:\nAn identity function whose gradient function asserts the gradient has a\ncertain value.", "source": "github-repos"}
{"code": "def _prefix_exists_in_gcs(gcs_prefix, credentials=None):\n  \n  gcs_service = _get_storage_service(credentials)\n\n  bucket_name, prefix = gcs_prefix[len('gs:\n  \n  \n  request = gcs_service.objects().list(\n      bucket=bucket_name, prefix=prefix, maxResults=1)\n  response = request.execute()\n  return response.get('items', None)", "docstring": "Check whether there is a GCS object whose name starts with the prefix.\n\nSince GCS doesn't actually have folders, this is how we check instead.\n\nArgs:\ngcs_prefix: The path; should start with 'gs://'.\ncredentials: Optional credential to be used to load the file from gcs.\n\nReturns:\nTrue if the prefix matches at least one object in GCS.\n\nRaises:\nerrors.HttpError: if it can't talk to the server", "source": "juraj-google-style"}
{"code": "def print_dict(d, show_missing=True):\n    for (k, v) in sorted(d.items()):\n        if ((not v) and show_missing):\n            print('{} -'.format(k))\n        elif isinstance(v, list):\n            print(k)\n            for item in v:\n                print('   {}'.format(item))\n        elif isinstance(v, dict):\n            print(k)\n            for (kk, vv) in sorted(v.items()):\n                print('   {:<20} {}'.format(kk, vv))", "docstring": "Prints a shallow dict to console.\n\nArgs:\nd: Dict to print.\nshow_missing: Whether to show keys with empty values.", "source": "codesearchnet"}
{"code": "def encode(self, vecs):\n        \n        assert vecs.dtype == np.float32\n        assert vecs.ndim == 2\n        N, D = vecs.shape\n        assert D == self.Ds * self.M, \"input dimension must be Ds * M\"\n\n        \n        codes = np.empty((N, self.M), dtype=self.code_dtype)\n        for m in range(self.M):\n            if self.verbose:\n                print(\"Encoding the subspace: {} / {}\".format(m, self.M))\n            vecs_sub = vecs[:, m * self.Ds : (m+1) * self.Ds]\n            codes[:, m], _ = vq(vecs_sub, self.codewords[m])\n\n        return codes", "docstring": "Encode input vectors into PQ-codes.\n\nArgs:\nvecs (np.ndarray): Input vectors with shape=(N, D) and dtype=np.float32.\n\nReturns:\nnp.ndarray: PQ codes with shape=(N, M) and dtype=self.code_dtype", "source": "juraj-google-style"}
{"code": "def AddBackpropIndexedSlicesAccumulator(self, op: ops.Operation, grad):\n    values = grad.values\n    indices = grad.indices\n    dense_shape = grad.dense_shape\n    self.Exit()\n    if self.outer_context:\n        self.outer_context.Enter()\n    if values.get_shape().is_fully_defined():\n        values_shape = tensor_shape.TensorShape([tensor_shape.Dimension(1)] + values.get_shape().dims[1:])\n        if self.outer_context:\n            self.outer_context.Enter()\n        values_acc = constant_op.constant(0, values.dtype, shape=values_shape, name='b_acc')\n        if self.outer_context:\n            self.outer_context.Exit()\n    else:\n        values_shape = _resource_safe_shape(op.inputs[0])[1:]\n        values_shape = array_ops.concat([[1], values_shape], 0)\n        values_acc = array_ops.zeros(values_shape, dtype=values.dtype)\n    indices_acc = constant_op.constant([0], indices.dtype)\n    shape_acc = None\n    if dense_shape is not None:\n        if dense_shape.get_shape().is_fully_defined():\n            if self.outer_context:\n                self.outer_context.Enter()\n            shape_acc = constant_op.constant(0, dense_shape.dtype, shape=dense_shape.get_shape())\n            if self.outer_context:\n                self.outer_context.Exit()\n        else:\n            shape_acc = array_ops.zeros_like(array_ops.shape_internal(op.inputs[0], optimize=False, out_type=dense_shape.dtype), optimize=False)\n    if self.outer_context:\n        self.outer_context.Exit()\n    self.Enter()\n    self.AddName(values_acc.name)\n    self.AddName(indices_acc.name)\n    init_acc = [indices_acc, values_acc]\n    if shape_acc is not None:\n        self.AddName(shape_acc.name)\n        init_acc.append(shape_acc)\n    enter_acc = [_Enter(x, self._name, is_constant=False, parallel_iterations=self._parallel_iterations, use_input_shape=False, name='b_acc') for x in init_acc]\n    enter_acc[0].set_shape([None])\n    if values_acc.shape.dims is not None:\n        enter_acc[1].set_shape([None] + values_acc.shape.as_list()[1:])\n    self.loop_enters.extend(enter_acc)\n    merge_acc = [merge([x, x], name='b_acc')[0] for x in enter_acc]\n    switch_acc = [switch(x, self._pivot) for x in merge_acc]\n    acc_indexed_slices = [array_ops.concat([xa[1], xv], 0) for xa, xv in zip(switch_acc[:2], [indices, values])]\n    if shape_acc is not None:\n        acc_indexed_slices.append(math_ops.maximum(dense_shape, switch_acc[2][1]))\n    next_acc = [_NextIteration(x) for x in acc_indexed_slices]\n    for xm, xn in zip(merge_acc, next_acc):\n        xm.op._update_input(1, xn)\n    exit_acc = [exit(x[0], name='b_acc') for x in switch_acc]\n    self.loop_exits.extend(exit_acc)\n    self.ExitResult(exit_acc)\n    return indexed_slices.IndexedSlices(indices=exit_acc[0], values=exit_acc[1], dense_shape=exit_acc[2] if shape_acc is not None else None)", "docstring": "This is used for accumulating gradients that are IndexedSlices.\n\nThis is essentially the equivalent of AddBackpropAccumulator but optimized\nfor things like updating embeddings from within a while loop.\n\nArgs:\nop: The Enter op for a loop invariant.\ngrad: The partial gradients represented as an IndexedSlices.\n\nReturns:\nThe accumulated IndexedSlices gradient of the loop invariant.", "source": "github-repos"}
{"code": "def diff_toDelta(self, diffs):\n    text = []\n    for (op, data) in diffs:\n        if (op == self.DIFF_INSERT):\n            data = data.encode('utf-8')\n            text.append(('+' + urllib.quote(data, \"!~*'();/?:@&=+$,\n        elif (op == self.DIFF_DELETE):\n            text.append(('-%d' % len(data)))\n        elif (op == self.DIFF_EQUAL):\n            text.append(('=%d' % len(data)))\n    return '\\t'.join(text)", "docstring": "Crush the diff into an encoded string which describes the operations\nrequired to transform text1 into text2.\nE.g. =3\\t-2\\t+ing  -> Keep 3 chars, delete 2 chars, insert 'ing'.\nOperations are tab-separated.  Inserted text is escaped using %xx notation.\n\nArgs:\ndiffs: Array of diff tuples.\n\nReturns:\nDelta text.", "source": "codesearchnet"}
{"code": "def register(config_class, processor_class, exist_ok=False):\n    PROCESSOR_MAPPING.register(config_class, processor_class, exist_ok=exist_ok)", "docstring": "Register a new processor for this class.\n\nArgs:\nconfig_class ([`PretrainedConfig`]):\nThe configuration corresponding to the model to register.\nprocessor_class ([`ProcessorMixin`]): The processor to register.", "source": "github-repos"}
{"code": "async def storm(self, text, opts=None, num=None, cmdr=False):\n    mesgs = (await self._runStorm(text, opts, cmdr))\n    if (num is not None):\n        nodes = [m for m in mesgs if (m[0] == 'node')]\n        if (len(nodes) != num):\n            raise AssertionError(f'Expected {num} nodes, got {len(nodes)}')\n    return mesgs", "docstring": "A helper for executing a storm command and getting a list of storm messages.\n\nArgs:\ntext (str): Storm command to execute.\nopts (dict): Opt to pass to the cortex during execution.\nnum (int): Number of nodes to expect in the output query. Checks that with an assert statement.\ncmdr (bool): If True, executes the line via the Cmdr CLI and will send output to outp.\n\nNotes:\nThe opts dictionary will not be used if cmdr=True.\n\nReturns:\nlist: A list of storm messages.", "source": "codesearchnet"}
{"code": "def __init__(self, input_energy: energy.BitstringEnergy, num_expectation_samples: int, num_burnin_samples: int, name: Union[None, str]=None):\n    super().__init__(input_energy, num_expectation_samples, name=name)\n    self._kernel = GibbsWithGradientsKernel(input_energy)\n    self._chain_state = tf.Variable(tfp.distributions.Bernoulli(probs=[0.5] * self.energy.num_bits, dtype=tf.int8).sample(), trainable=False)\n    self.num_burnin_samples = num_burnin_samples", "docstring": "Initializes a GibbsWithGradientsInference.\n\nArgs:\ninput_energy: The parameterized energy function which defines this\ndistribution via the equations of an energy based model.  This class\nassumes that all parameters of `energy` are `tf.Variable`s and that\nthey are all returned by `energy.variables`.\nnum_expectation_samples: Number of samples to draw and use for estimating\nthe expectation value.\nnum_burnin_samples: Number of samples to discard when letting the chain\nequilibrate after updating the parameters of `input_energy`.\nname: Optional name for the model.", "source": "github-repos"}
{"code": "def build_and_pickle_dump(self, abivalidate=False):\n        \n        self.build()\n        if not abivalidate: return self.pickle_dump()\n\n        \n        isok, errors = self.abivalidate_inputs()\n        if isok: return self.pickle_dump()\n        errlines = []\n        for i, e in enumerate(errors):\n            errlines.append(\"[%d] %s\" % (i, e))\n        raise ValueError(\"\\n\".join(errlines))", "docstring": "Build dirs and file of the `Flow` and save the object in pickle format.\nReturns 0 if success\n\nArgs:\nabivalidate: If True, all the input files are validate by calling\nthe abinit parser. If the validation fails, ValueError is raise.", "source": "juraj-google-style"}
{"code": "def __init__(self, vlan_pcp=None):\n        \n        super().__init__(action_type=ActionType.OFPAT_SET_VLAN_PCP, length=8)\n        self.vlan_pcp = vlan_pcp", "docstring": "Create an ActionVlanPCP with the optional parameters below.\n\nArgs:\nvlan_pcp (int): VLAN Priority.\n\n.. note:: The vlan_pcp field is 8 bits long,\nbut only the lower 3 bits have meaning.", "source": "juraj-google-style"}
{"code": "def truncate_to_field_length(self, field, value):\n    max_len = getattr(self.__class__, field).prop.columns[0].type.length\n    if (value and (len(value) > max_len)):\n        return value[:max_len]\n    else:\n        return value", "docstring": "Truncate the value of a string field to the field's max length.\n\nUse this in a validator to check/truncate values before inserting them into the database.\nCopy the below example code after ``@validates`` to your model class and replace ``field1`` and ``field2`` with\nyour field name(s).\n\n:Example:\n\nfrom sqlalchemy.orm import validates\n# ... omitting other imports ...\n\nclass MyModel(base.Base):\n\nfield1 = Column(String(128))\nfield2 = Column(String(64))\n\n@validates('field1', 'field2')\ndef truncate(self, field, value):\nreturn self.truncate_to_field_length(field, value)\n\nArgs:\nfield (str): field name to validate\nvalue (str/unicode): value to validate\n\nReturns:\nstr/unicode: value truncated to field max length", "source": "codesearchnet"}
{"code": "def json_to_data(fn=None, return_json=True):\n\n    def json_to_data_decorator(fn):\n\n        @handle_type_error\n        @wraps(fn)\n        def get_data_wrapper(*args, **kwargs):\n            kwargs['data'] = decode_json_body()\n            if (not return_json):\n                return fn(*args, **kwargs)\n            return encode_json_body(fn(*args, **kwargs))\n        return get_data_wrapper\n    if fn:\n        return json_to_data_decorator(fn)\n    return json_to_data_decorator", "docstring": "Decode JSON from the request and add it as ``data`` parameter for wrapped\nfunction.\n\nArgs:\nreturn_json (bool, default True): Should the decorator automatically\nconvert returned value to JSON?", "source": "codesearchnet"}
{"code": "def _escaped_token_to_subtoken_strings(self, escaped_token):\n        \n        \n        \n        ret = []\n        start = 0\n        token_len = len(escaped_token)\n        while start < token_len:\n            for end in xrange(min(token_len, start + self._max_subtoken_len), start, -1):\n                subtoken = escaped_token[start:end]\n                if subtoken in self._all_subtoken_strings:\n                    ret.append(subtoken)\n                    start = end\n                    break\n\n            else:  \n                \n                \n                \n                assert False, \"Token substring not found in subtoken vocabulary.\"\n\n        return ret", "docstring": "Converts an escaped token string to a list of subtoken strings.\n\nArgs:\nescaped_token: An escaped token as a unicode string.\nReturns:\nA list of subtokens as unicode strings.", "source": "juraj-google-style"}
{"code": "def _FormatField(self, field):\n    if (self._FIELD_DELIMITER and isinstance(field, py2to3.STRING_TYPES)):\n        return field.replace(self._FIELD_DELIMITER, ' ')\n    return field", "docstring": "Formats a field.\n\nArgs:\nfield (str): field value.\n\nReturns:\nstr: formatted field value.", "source": "codesearchnet"}
{"code": "def match(self, path):\n        \n        match = self._re.search(path)\n        if match is None:\n            return None\n        kwargs_indexes = match.re.groupindex.values()\n        args_indexes = [i for i in range(1, match.re.groups + 1)\n                          if i not in kwargs_indexes]\n        args = [match.group(i) for i in args_indexes]\n        kwargs = {}\n        for name, index in match.re.groupindex.items():\n            kwargs[name] = match.group(index)\n        return self._callback, args, kwargs", "docstring": "Return route handler with arguments if path matches this route.\n\nArguments:\npath (str): Request path\n\nReturns:\ntuple or None: A tuple of three items:\n\n1. Route handler (callable)\n2. Positional arguments (list)\n3. Keyword arguments (dict)\n\n``None`` if the route does not match the path.", "source": "juraj-google-style"}
{"code": "def strace_configure(self, port_width):\n    if (port_width not in [1, 2, 4]):\n        raise ValueError(('Invalid port width: %s' % str(port_width)))\n    config_string = ('PortWidth=%d' % port_width)\n    res = self._dll.JLINK_STRACE_Config(config_string.encode())\n    if (res < 0):\n        raise errors.JLinkException('Failed to configure STRACE port')\n    return None", "docstring": "Configures the trace port width for tracing.\n\nNote that configuration cannot occur while STRACE is running.\n\nArgs:\nself (JLink): the ``JLink`` instance\nport_width (int): the trace port width to use.\n\nReturns:\n``None``\n\nRaises:\nValueError: if ``port_width`` is not ``1``, ``2``, or ``4``.\nJLinkException: on error.", "source": "codesearchnet"}
{"code": "def maybe_download(directory, filename, uri):\n    tf.gfile.MakeDirs(directory)\n    filepath = os.path.join(directory, filename)\n    if tf.gfile.Exists(filepath):\n        tf.logging.info(('Not downloading, file already found: %s' % filepath))\n        return filepath\n    tf.logging.info(('Downloading %s to %s' % (uri, filepath)))\n    try:\n        tf.gfile.Copy(uri, filepath)\n    except tf.errors.UnimplementedError:\n        if uri.startswith('http'):\n            inprogress_filepath = (filepath + '.incomplete')\n            (inprogress_filepath, _) = urllib.urlretrieve(uri, inprogress_filepath, reporthook=download_report_hook)\n            print()\n            tf.gfile.Rename(inprogress_filepath, filepath)\n        else:\n            raise ValueError(('Unrecognized URI: ' + filepath))\n    statinfo = os.stat(filepath)\n    tf.logging.info(('Successfully downloaded %s, %s bytes.' % (filename, statinfo.st_size)))\n    return filepath", "docstring": "Download filename from uri unless it's already in directory.\n\nCopies a remote file to local if that local file does not already exist.  If\nthe local file pre-exists this function call, it does not check that the local\nfile is a copy of the remote.\n\nRemote filenames can be filepaths, any URI readable by tensorflow.gfile, or a\nURL.\n\nArgs:\ndirectory: path to the directory that will be used.\nfilename: name of the file to download to (do nothing if it already exists).\nuri: URI to copy (or download) from.\n\nReturns:\nThe path to the downloaded file.", "source": "codesearchnet"}
{"code": "def has_shell_command(self, command):\n        \n        try:\n            output = self.shell(['command', '-v',\n                                 command]).decode('utf-8').strip()\n            return command in output\n        except AdbError:\n            \n            \n            return False", "docstring": "Checks to see if a given check command exists on the device.\n\nArgs:\ncommand: A string that is the name of the command to check.\n\nReturns:\nA boolean that is True if the command exists and False otherwise.", "source": "juraj-google-style"}
{"code": "def decode_response(status: int, headers: MutableMapping, body: bytes) -> dict:\n    data = decode_body(headers, body)\n    raise_for_status(status, headers, data)\n    raise_for_api_error(headers, data)\n    return data", "docstring": "Decode incoming response\n\nArgs:\nstatus: Response status\nheaders: Response headers\nbody: Response body\n\nReturns:\nResponse data", "source": "codesearchnet"}
{"code": "def tangent(f):\n  \n  node = annotate.resolve_calls(f)\n  RemoveWith().visit(node)\n  wrapped = functools.wraps(f)(compile_.compile_function(node))\n  wrapped.tangent = f\n  return wrapped", "docstring": "A decorator which removes the `with insert_grad_of` statement.\n\nThis allows the function to be called as usual.\n\nArgs:\nf: A function\n\nReturns:\nA function with any `with insert_grad_of` context managers removed.", "source": "juraj-google-style"}
{"code": "def get_build_info():\n    return build_info.build_info", "docstring": "Get a dictionary describing TensorFlow's build environment.\n\nValues are generated when TensorFlow is compiled, and are static for each\nTensorFlow package. The return value is a dictionary with string keys such as:\n\n- cuda_version\n- cudnn_version\n- is_cuda_build\n- is_rocm_build\n- msvcp_dll_names\n- nvcuda_dll_name\n- cudart_dll_name\n- cudnn_dll_name\n\nNote that the actual keys and values returned by this function is subject to\nchange across different versions of TensorFlow or across platforms.\n\nReturns:\nA Dictionary describing TensorFlow's build environment.", "source": "github-repos"}
{"code": "def get_layer_index_bound_by_layer_name(layers, layer_range=None):\n    if layer_range is not None:\n        if len(layer_range) != 2:\n            raise ValueError(f'layer_range must be a list or tuple of length 2. Received: layer_range = {layer_range} of length {len(layer_range)}')\n        if not isinstance(layer_range[0], str) or not isinstance(layer_range[1], str):\n            raise ValueError(f'layer_range should contain string type only. Received: {layer_range}')\n    else:\n        return [0, len(layers)]\n    lower_index = [idx for idx, layer in enumerate(layers) if re.match(layer_range[0], layer.name)]\n    upper_index = [idx for idx, layer in enumerate(layers) if re.match(layer_range[1], layer.name)]\n    if not lower_index or not upper_index:\n        raise ValueError(f'Passed layer_names do not match the layer names in the model. Received: {layer_range}')\n    if min(lower_index) > max(upper_index):\n        return [min(upper_index), max(lower_index) + 1]\n    return [min(lower_index), max(upper_index) + 1]", "docstring": "Get the layer indexes from the model based on layer names.\n\nThe layer indexes can be used to slice the model into sub models for\ndisplay.\n\nArgs:\nmodel: `Model` instance.\nlayer_names: a list or tuple of 2 strings, the starting layer name and\nending layer name (both inclusive) for the result. All layers will\nbe included when `None` is provided.\n\nReturns:\nThe index value of layer based on its unique name (layer_names).\nOutput will be [first_layer_index, last_layer_index + 1].", "source": "github-repos"}
{"code": "def local_variables_initializer():\n    if context.executing_eagerly():\n        return control_flow_ops.no_op(name='local_variables_initializer')\n    return variables_initializer(local_variables())", "docstring": "Returns an Op that initializes all local variables.\n\nThis is just a shortcut for `variables_initializer(local_variables())`\n\n@compatibility(TF2)\nIn TF2, variables are initialized immediately when they are created. There is\nno longer a need to run variable initializers before using them.\n@end_compatibility\n\nReturns:\nAn Op that initializes all local variables in the graph.", "source": "github-repos"}
{"code": "def get_template(template):\n    \n    from cloud_inquisitor.database import db\n\n    tmpl = db.Template.find_one(template_name=template)\n    if not tmpl:\n        raise InquisitorError('No such template found: {}'.format(template))\n\n    tmplenv = Environment(loader=BaseLoader, autoescape=True)\n    tmplenv.filters['json_loads'] = json.loads\n    tmplenv.filters['slack_quote_join'] = lambda data: ', '.join('`{}`'.format(x) for x in data)\n\n    return tmplenv.from_string(tmpl.template)", "docstring": "Return a Jinja2 template by filename\n\nArgs:\ntemplate (str): Name of the template to return\n\nReturns:\nA Jinja2 Template object", "source": "juraj-google-style"}
{"code": "def pretty_emit(self, record, is_header=False, task_level=None):\n        \n        task = record.task or self.cur_task\n        if task_level is None:\n            task_level = self.cur_depth_level\n\n        if is_header:\n            extra_prefix = (\n                self.get_task_indicator(task_level - 1) + ' ' +\n                ('' if self.am_i_main_thread else '[%s] ' % self.cur_thread) +\n                task + ': '\n            )\n            record.levelno = logging.INFO\n        else:\n            extra_prefix = '  ' + self.get_task_indicator(task_level) + ' '\n\n        if task:\n            record.msg = (\n                '  ' * (task_level - 1) + extra_prefix + str(record.msg)\n            )\n\n        super().emit(record)\n        super().flush()", "docstring": "Wrapper around the :class:`logging.StreamHandler` emit method to add\nsome decoration stuff to the message\n\nArgs:\nrecord (logging.LogRecord): log record to emit\nis_header (bool): if this record is a header, usually, a start or\nend task message\ntask_level (int): If passed, will take that as the current nested\ntask level instead of calculating it from the current tasks\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def group_alleles_by_start_end_Xbp(arr, bp=28):\n    starts = arr[(:, 0:bp)]\n    ends = arr[(:, (- bp):)]\n    starts_ends_idxs = defaultdict(list)\n    (l, seq_len) = arr.shape\n    for i in range(l):\n        start_i = starts[i]\n        end_i = ends[i]\n        start_i_str = ''.join([str(x) for x in start_i])\n        end_i_str = ''.join([str(x) for x in end_i])\n        starts_ends_idxs[(start_i_str + end_i_str)].append(i)\n    return starts_ends_idxs", "docstring": "Group alleles by matching ends\n\nArgs:\narr (numpy.array): 2D int matrix of alleles\nbp (int): length of ends to group by\n\nReturns:\ndict of lists: key of start + end strings to list of indices of alleles with matching ends", "source": "codesearchnet"}
{"code": "def _parse_session_run_index(self, event):\n    \n    metadata_string = event.log_message.message\n    try:\n      metadata = json.loads(metadata_string)\n    except ValueError as e:\n      logger.error(\n          \"Could not decode metadata string '%s' for step value: %s\",\n          metadata_string, e)\n      return constants.SENTINEL_FOR_UNDETERMINED_STEP\n\n    try:\n      return metadata[\"session_run_index\"]\n    except KeyError:\n      logger.error(\n          \"The session_run_index is missing from the metadata: %s\",\n          metadata_string)\n      return constants.SENTINEL_FOR_UNDETERMINED_STEP", "docstring": "Parses the session_run_index value from the event proto.\n\nArgs:\nevent: The event with metadata that contains the session_run_index.\n\nReturns:\nThe int session_run_index value. Or\nconstants.SENTINEL_FOR_UNDETERMINED_STEP if it could not be determined.", "source": "juraj-google-style"}
{"code": "def prune(self, limit=None, n=None, percentile=None, keep_ends=False):\n    strip = self.copy()\n    if (not (limit or n or percentile)):\n        m = 'You must provide a limit or n or percentile for pruning.'\n        raise StriplogError(m)\n    if limit:\n        prune = [i for (i, iv) in enumerate(strip) if (iv.thickness < limit)]\n    if n:\n        prune = strip.thinnest(n=n, index=True)\n    if percentile:\n        n = np.floor(((len(strip) * percentile) / 100))\n        prune = strip.thinnest(n=n, index=True)\n    if keep_ends:\n        (first, last) = (0, (len(strip) - 1))\n        if (first in prune):\n            prune.remove(first)\n        if (last in prune):\n            prune.remove(last)\n    del strip[prune]\n    return strip", "docstring": "Remove intervals below a certain limit thickness. In place.\n\nArgs:\nlimit (float): Anything thinner than this will be pruned.\nn (int): The n thinnest beds will be pruned.\npercentile (float): The thinnest specified percentile will be\npruned.\nkeep_ends (bool): Whether to keep the first and last, regardless\nof whether they meet the pruning criteria.", "source": "codesearchnet"}
{"code": "def start_task(self, method, *args, **kwargs):\n    thread = threading.Thread(target=method, args=args, kwargs=kwargs)\n    thread.is_daemon = False\n    thread.start()\n    self.threads.append(thread)", "docstring": "Start a task in a separate thread\n\nArgs:\nmethod: the method to start in a separate thread\nargs: Accept args/kwargs arguments", "source": "codesearchnet"}
{"code": "def filepaths_in_dir(path):\n    filepaths = []\n    for (root, directories, filenames) in os.walk(path):\n        for filename in filenames:\n            filepath = os.path.join(root, filename)\n            filepath = filepath.replace(path, '').lstrip('/')\n            filepaths.append(filepath)\n    return filepaths", "docstring": "Find all files in a directory, and return the relative paths to those files.\n\nArgs:\npath (str): the directory path to walk\n\nReturns:\nlist: the list of relative paths to all files inside of ``path`` or its\nsubdirectories.", "source": "codesearchnet"}
{"code": "def save_config(self, lookup_key, config):\n    with self._config_lock:\n        self._configs[lookup_key] = config", "docstring": "Save a configuration to the cache of configs.\n\nArgs:\nlookup_key: A string containing the cache lookup key.\nconfig: The dict containing the configuration to save to the cache.", "source": "codesearchnet"}
{"code": "def delete_keys(d: Dict[(Any, Any)], keys_to_delete: List[Any], keys_to_keep: List[Any]) -> None:\n    for k in keys_to_delete:\n        if ((k in d) and (k not in keys_to_keep)):\n            del d[k]", "docstring": "Deletes keys from a dictionary, in place.\n\nArgs:\nd:\ndictonary to modify\nkeys_to_delete:\nif any keys are present in this list, they are deleted...\nkeys_to_keep:\n... unless they are present in this list.", "source": "codesearchnet"}
{"code": "def __get_distribution_tags(self, client, arn):\n    return {t['Key']: t['Value'] for t in client.list_tags_for_resource(Resource=arn)['Tags']['Items']}", "docstring": "Returns a dict containing the tags for a CloudFront distribution\n\nArgs:\nclient (botocore.client.CloudFront): Boto3 CloudFront client object\narn (str): ARN of the distribution to get tags for\n\nReturns:\n`dict`", "source": "codesearchnet"}
{"code": "def to_channel_dimension_format(image: np.ndarray, channel_dim: Union[ChannelDimension, str], input_channel_dim: Optional[Union[ChannelDimension, str]]=None) -> np.ndarray:\n    if not isinstance(image, np.ndarray):\n        raise TypeError(f'Input image must be of type np.ndarray, got {type(image)}')\n    if input_channel_dim is None:\n        input_channel_dim = infer_channel_dimension_format(image)\n    target_channel_dim = ChannelDimension(channel_dim)\n    if input_channel_dim == target_channel_dim:\n        return image\n    if target_channel_dim == ChannelDimension.FIRST:\n        axes = list(range(image.ndim - 3)) + [image.ndim - 1, image.ndim - 3, image.ndim - 2]\n        image = image.transpose(axes)\n    elif target_channel_dim == ChannelDimension.LAST:\n        axes = list(range(image.ndim - 3)) + [image.ndim - 2, image.ndim - 1, image.ndim - 3]\n        image = image.transpose(axes)\n    else:\n        raise ValueError(f'Unsupported channel dimension format: {channel_dim}')\n    return image", "docstring": "Converts `image` to the channel dimension format specified by `channel_dim`. The input\ncan have arbitrary number of leading dimensions. Only last three dimension will be permuted\nto format the `image`.\n\nArgs:\nimage (`numpy.ndarray`):\nThe image to have its channel dimension set.\nchannel_dim (`ChannelDimension`):\nThe channel dimension format to use.\ninput_channel_dim (`ChannelDimension`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred from the input image.\n\nReturns:\n`np.ndarray`: The image with the channel dimension set to `channel_dim`.", "source": "github-repos"}
{"code": "def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, module_name):\n    how_many_bottlenecks = 0\n    ensure_dir_exists(bottleneck_dir)\n    for (label_name, label_lists) in image_lists.items():\n        for category in ['training', 'testing', 'validation']:\n            category_list = label_lists[category]\n            for (index, unused_base_name) in enumerate(category_list):\n                get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir, category, bottleneck_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, module_name)\n                how_many_bottlenecks += 1\n                if ((how_many_bottlenecks % 100) == 0):\n                    tf.logging.info((str(how_many_bottlenecks) + ' bottleneck files created.'))", "docstring": "Ensures all the training, testing, and validation bottlenecks are cached.\n\nBecause we're likely to read the same image multiple times (if there are no\ndistortions applied during training) it can speed things up a lot if we\ncalculate the bottleneck layer values once for each image during\npreprocessing, and then just read those cached values repeatedly during\ntraining. Here we go through all the images we've found, calculate those\nvalues, and save them off.\n\nArgs:\nsess: The current active TensorFlow Session.\nimage_lists: OrderedDict of training images for each label.\nimage_dir: Root folder string of the subfolders containing the training\nimages.\nbottleneck_dir: Folder string holding cached files of bottleneck values.\njpeg_data_tensor: Input tensor for jpeg data from file.\ndecoded_image_tensor: The output of decoding and resizing the image.\nresized_input_tensor: The input node of the recognition graph.\nbottleneck_tensor: The penultimate output layer of the graph.\nmodule_name: The name of the image module being used.\n\nReturns:\nNothing.", "source": "codesearchnet"}
{"code": "def _new_named_tuple(self, class_name: str, fields: list[tuple[str, Any]]) -> pytd.Class:\n    class_base = pytd.NamedType('typing.NamedTuple')\n    class_constants = tuple((pytd.Constant(n, t) for n, t in fields))\n    return pytd.Class(name=class_name, keywords=(), bases=(class_base,), methods=(), constants=class_constants, decorators=(), classes=(), slots=None, template=())", "docstring": "Generates a pytd class for a named tuple.\n\nArgs:\nclass_name: The name of the generated class\nfields: A list of (name, type) tuples.\n\nReturns:\nA generated class that describes the named tuple.", "source": "github-repos"}
{"code": "def restore_app_connection(self, port=None):\n    self.host_port = (port or utils.get_available_host_port())\n    self._retry_connect()\n    self.ed = self._start_event_client()", "docstring": "Restores the sl4a after device got disconnected.\n\nInstead of creating new instance of the client:\n- Uses the given port (or find a new available host_port if none is\ngiven).\n- Tries to connect to remote server with selected port.\n\nArgs:\nport: If given, this is the host port from which to connect to remote\ndevice port. If not provided, find a new available port as host\nport.\n\nRaises:\nAppRestoreConnectionError: When the app was not able to be started.", "source": "codesearchnet"}
{"code": "def decode_list(cls, obj, element_type):\n    if (not isinstance(obj, list)):\n        raise Exception('expected a python list')\n    return list(map((lambda x: cls.do_decode(x, element_type)), obj))", "docstring": "Decodes json into a list, handling conversion of the elements.\n\nArgs:\nobj: the json object to decode\nelement_type: a class object which is the conjure type of\nthe elements in this list.\nReturns:\nA python list where the elements are instances of type\nelement_type.", "source": "codesearchnet"}
{"code": "def mean(self):\n    chunk_iter = chunks(self.times, self.bestof)\n    times = list(map(min, chunk_iter))\n    mean = (sum(times) / len(times))\n    return mean", "docstring": "The mean of the best results of each trial.\n\nReturns:\nfloat: mean of measured seconds\n\nNote:\nThis is typically less informative than simply looking at the min.\nIt is recommended to use min as the expectation value rather than\nmean in most cases.\n\nExample:\n>>> import math\n>>> self = Timerit(num=10, verbose=0)\n>>> self.call(math.factorial, 50)\n>>> assert self.mean() > 0", "source": "codesearchnet"}
{"code": "def __init__(self, src_file, sync_dst_file, *async_dst_files):\n        \n        \n        self._origin_stack = '\\n'.join(traceback.format_stack())\n        self.tee_file = None  \n        self._src_file = src_file\n        self._sync_dst_file = sync_dst_file\n        self._async_dst_files = list(async_dst_files)\n        self._write_queues = []\n        self._write_threads = []\n        for f in async_dst_files:\n            q = queue.Queue()\n\n            t = spawn_reader_writer(q.get, functools.partial(self._write, f))\n            self._write_queues.append(q)\n            self._write_threads.append(t)\n\n        src_fd = self._src_file.fileno()\n\n        def read():\n            \n            \n            \n            \n            \n            try:\n                return os.read(src_fd, 1024)\n            except OSError:\n                \n                \n                return six.b('')\n\n        self._read_thread = spawn_reader_writer(read, self._write_to_all)", "docstring": "Constructor.\n\nArgs:\nsrc_file: file to read from.\nsync_dst_file: file to write to synchronously when `self.write()` is\ncalled.\nasync_dst_files: files to write to asynchronously", "source": "juraj-google-style"}
{"code": "def __x_google_quota_definitions_descriptor(self, limit_definitions):\n    \n    if not limit_definitions:\n      return None\n\n    definitions_list = [{\n        'name': ld.metric_name,\n        'metric': ld.metric_name,\n        'unit': '1/min/{project}',\n        'values': {'STANDARD': ld.default_limit},\n        'displayName': ld.display_name,\n    } for ld in limit_definitions]\n\n    metrics = [{\n        'name': ld.metric_name,\n        'valueType': 'INT64',\n        'metricKind': 'GAUGE',\n    } for ld in limit_definitions]\n\n    return {\n        'quota': {'limits': definitions_list},\n        'metrics': metrics,\n    }", "docstring": "Describes the quota limit definitions for an API.\n\nArgs:\nlimit_definitions: List of endpoints.LimitDefinition tuples\n\nReturns:\nA dict descriptor of the API's quota limit definitions.", "source": "juraj-google-style"}
{"code": "def _reshape(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n    batch_size, seq_length, three_times_hidden_size = fused_qkv.shape\n    fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim)\n    query_layer = fused_qkv[..., 0, :].transpose(1, 2)\n    key_layer = fused_qkv[..., 1, :].transpose(1, 2)\n    value_layer = fused_qkv[..., 2, :].transpose(1, 2)\n    return (query_layer, key_layer, value_layer)", "docstring": "Split the last dimension into (num_heads, head_dim) and reshapes to (bs, heads, len, dim) shape\nwithout making any copies, results share same memory storage as `fused_qkv`\n\nArgs:\nfused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim]\n\nReturns:\nquery: [batch_size, num_heads, seq_length, head_dim]\nkey: [batch_size, num_heads, seq_length, head_dim]\nvalue: [batch_size, num_heads, seq_length, head_dim]", "source": "github-repos"}
{"code": "def generate_code(meta, prefix=None, node=False, min=False):\n    if isinstance(meta, dict):\n        (url_prefix, auth_header, resources) = parse_meta(meta)\n    else:\n        (url_prefix, auth_header, resources) = meta\n    if (prefix is not None):\n        url_prefix = prefix\n    core = render_core(url_prefix, auth_header, resources)\n    if min:\n        filename = 'res.web.min.js'\n    else:\n        filename = 'res.web.js'\n    if node:\n        filename = 'res.node.js'\n    base = read_file(filename)\n    return base.replace('\"", "docstring": "Generate res.js\n\nArgs:\nmeta: tuple(url_prefix, auth_header, resources) or metadata of API\nReturns:\nres.js source code", "source": "codesearchnet"}
{"code": "def draw_ID(ID, idx_array, drawID_raster):\n    \n    for i in range(idx_array.shape[0]):\n        x = idx_array[i, 0]\n        y = idx_array[i, 1]\n        drawID_raster[x, y] = ID\n    return drawID_raster", "docstring": "Draw every pixel's ID\n\nAfter computing all given value's pixels connectivity, every pixel will\nhave an ID. Then we need to draw these pixels' ID on the undrawed\nrasterfile.\n\nArgs:\nID: given ID value\nidx_array: pixels position set which have the given ID value\ndrawID_raster: undrawed rasterfile\n\nReturn:\ndrawID_raster: rasterfile after drawing ID", "source": "juraj-google-style"}
{"code": "def output_compressed_dinf(dinfflowang, compdinffile, weightfile):\n        \n        dinf_r = RasterUtilClass.read_raster(dinfflowang)\n        data = dinf_r.data\n        xsize = dinf_r.nCols\n        ysize = dinf_r.nRows\n        nodata_value = dinf_r.noDataValue\n\n        cal_dir_code = frompyfunc(DinfUtil.compress_dinf, 2, 3)\n        updated_angle, dir_code, weight = cal_dir_code(data, nodata_value)\n\n        RasterUtilClass.write_gtiff_file(dinfflowang, ysize, xsize, updated_angle,\n                                         dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Float32)\n        RasterUtilClass.write_gtiff_file(compdinffile, ysize, xsize, dir_code,\n                                         dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Int16)\n        RasterUtilClass.write_gtiff_file(weightfile, ysize, xsize, weight,\n                                         dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Float32)", "docstring": "Output compressed Dinf flow direction and weight to raster file\nArgs:\ndinfflowang: Dinf flow direction raster file\ncompdinffile: Compressed D8 flow code\nweightfile: The correspond weight", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.CreateJob = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.JobService/CreateJob\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_job__service__pb2.CreateJobRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_job__pb2.Job.FromString,\n        )\n        self.GetJob = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.JobService/GetJob\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_job__service__pb2.GetJobRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_job__pb2.Job.FromString,\n        )\n        self.UpdateJob = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.JobService/UpdateJob\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_job__service__pb2.UpdateJobRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_job__pb2.Job.FromString,\n        )\n        self.DeleteJob = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.JobService/DeleteJob\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_job__service__pb2.DeleteJobRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n        self.ListJobs = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.JobService/ListJobs\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_job__service__pb2.ListJobsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_job__service__pb2.ListJobsResponse.FromString,\n        )\n        self.BatchDeleteJobs = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.JobService/BatchDeleteJobs\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_job__service__pb2.BatchDeleteJobsRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n        self.SearchJobs = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.JobService/SearchJobs\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_job__service__pb2.SearchJobsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_job__service__pb2.SearchJobsResponse.FromString,\n        )\n        self.SearchJobsForAlert = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.JobService/SearchJobsForAlert\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_job__service__pb2.SearchJobsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_job__service__pb2.SearchJobsResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def load_identity_signer(key_dir, key_name):\n    key_path = os.path.join(key_dir, '{}.priv'.format(key_name))\n    if (not os.path.exists(key_path)):\n        raise LocalConfigurationError('No such signing key file: {}'.format(key_path))\n    if (not os.access(key_path, os.R_OK)):\n        raise LocalConfigurationError('Key file is not readable: {}'.format(key_path))\n    LOGGER.info('Loading signing key: %s', key_path)\n    try:\n        with open(key_path, 'r') as key_file:\n            private_key_str = key_file.read().strip()\n    except IOError as e:\n        raise LocalConfigurationError('Could not load key file: {}'.format(str(e)))\n    try:\n        private_key = Secp256k1PrivateKey.from_hex(private_key_str)\n    except signing.ParseError as e:\n        raise LocalConfigurationError('Invalid key in file {}: {}'.format(key_path, str(e)))\n    context = signing.create_context('secp256k1')\n    crypto_factory = CryptoFactory(context)\n    return crypto_factory.new_signer(private_key)", "docstring": "Loads a private key from the key directory, based on a validator's\nidentity.\n\nArgs:\nkey_dir (str): The path to the key directory.\nkey_name (str): The name of the key to load.\n\nReturns:\nSigner: the cryptographic signer for the key", "source": "codesearchnet"}
{"code": "def linear(x):\n    return x", "docstring": "Linear activation function (pass-through).\n\nA \"linear\" activation is an identity function:\nit returns the input, unmodified.\n\nArgs:\nx: Input tensor.", "source": "github-repos"}
{"code": "def register_model(cls, model):\n    rest_name = model.rest_name\n    resource_name = model.resource_name\n    if (rest_name not in cls._model_rest_name_registry):\n        cls._model_rest_name_registry[rest_name] = [model]\n        cls._model_resource_name_registry[resource_name] = [model]\n    elif (model not in cls._model_rest_name_registry[rest_name]):\n        cls._model_rest_name_registry[rest_name].append(model)\n        cls._model_resource_name_registry[resource_name].append(model)", "docstring": "Register a model class according to its remote name\n\nArgs:\nmodel: the model to register", "source": "codesearchnet"}
{"code": "def assert_text(self, *args, **kwargs):\n    query = TextQuery(*args, **kwargs)\n\n    @self.synchronize(wait=query.wait)\n    def assert_text():\n        count = query.resolve_for(self)\n        if (not (matches_count(count, query.options) and ((count > 0) or expects_none(query.options)))):\n            raise ExpectationNotMet(query.failure_message)\n        return True\n    return assert_text()", "docstring": "Asserts that the page or current node has the given text content, ignoring any HTML tags.\n\nArgs:\n*args: Variable length argument list for :class:`TextQuery`.\n**kwargs: Arbitrary keyword arguments for :class:`TextQuery`.\n\nReturns:\nTrue\n\nRaises:\nExpectationNotMet: If the assertion hasn't succeeded during the wait time.", "source": "codesearchnet"}
{"code": "def _ragged_stack_concat_axis_0(rt_inputs, stack_values):\n    flat_values = [rt.flat_values for rt in rt_inputs]\n    concatenated_flat_values = array_ops.concat(flat_values, axis=0)\n    nested_splits = [rt.nested_row_splits for rt in rt_inputs]\n    ragged_rank = rt_inputs[0].ragged_rank\n    concatenated_nested_splits = [_concat_ragged_splits([ns[dim] for ns in nested_splits]) for dim in range(ragged_rank)]\n    if stack_values:\n        stack_lengths = array_ops_stack.stack([rt.nrows() for rt in rt_inputs])\n        stack_splits = ragged_util.lengths_to_splits(stack_lengths)\n        concatenated_nested_splits.insert(0, stack_splits)\n    return ragged_tensor.RaggedTensor.from_nested_row_splits(concatenated_flat_values, concatenated_nested_splits, validate=False)", "docstring": "Helper function to concatenate or stack ragged tensors along axis 0.\n\nArgs:\nrt_inputs: A list of RaggedTensors, all with the same rank and ragged_rank.\nstack_values: Boolean.  If true, then stack values; otherwise, concatenate\nthem.\n\nReturns:\nA RaggedTensor.", "source": "github-repos"}
{"code": "def add_argument(self, parser, bootstrap=False):\n    if self.cli_expose:\n        args = self._get_argparse_names(parser.prefix_chars)\n        kwargs = self._get_argparse_kwargs(bootstrap)\n        parser.add_argument(*args, **kwargs)", "docstring": "Add this item as an argument to the given parser.\n\nArgs:\nparser (argparse.ArgumentParser): The parser to add this item to.\nbootstrap: Flag to indicate whether you only want to mark this\nitem as required or not", "source": "codesearchnet"}
{"code": "def setup(self, hosts, artifacts, extra_artifacts, use_tsk, reason, grr_server_url, grr_username, grr_password, approvers=None, verify=True):\n    super(GRRArtifactCollector, self).setup(reason, grr_server_url, grr_username, grr_password, approvers=approvers, verify=verify)\n    if (artifacts is not None):\n        self.artifacts = [item.strip() for item in artifacts.strip().split(',')]\n    if (extra_artifacts is not None):\n        self.extra_artifacts = [item.strip() for item in extra_artifacts.strip().split(',')]\n    self.hostnames = [item.strip() for item in hosts.strip().split(',')]\n    self.use_tsk = use_tsk", "docstring": "Initializes a GRR artifact collector.\n\nArgs:\nhosts: Comma-separated list of hostnames to launch the flow on.\nartifacts: list of GRR-defined artifacts.\nextra_artifacts: list of GRR-defined artifacts to append.\nuse_tsk: toggle for use_tsk flag on GRR flow.\nreason: justification for GRR access.\ngrr_server_url: GRR server URL.\ngrr_username: GRR username.\ngrr_password: GRR password.\napprovers: list of GRR approval recipients.\nverify: boolean, whether to verify the GRR server's x509 certificate.", "source": "codesearchnet"}
{"code": "def fasta_format_check(fasta_path, logger):\n    header_count = 0\n    line_count = 1\n    nt_count = 0\n    with open(fasta_path) as f:\n        for l in f:\n            l = l.strip()\n            if (l == ''):\n                continue\n            if (l[0] == '>'):\n                header_count += 1\n                continue\n            if ((header_count == 0) and (l[0] != '>')):\n                error_msg = 'First non-blank line (L:{line_count}) does not contain FASTA header. Line beginning with \">\" expected.'.format(line_count=line_count)\n                logger.error(error_msg)\n                raise Exception(error_msg)\n            non_nucleotide_chars_in_line = (set(l) - VALID_NUCLEOTIDES)\n            if (len(non_nucleotide_chars_in_line) > 0):\n                error_msg = 'Line {line} contains the following non-nucleotide characters: {non_nt_chars}'.format(line=line_count, non_nt_chars=', '.join([x for x in non_nucleotide_chars_in_line]))\n                logger.error(error_msg)\n                raise Exception(error_msg)\n            nt_count += len(l)\n            line_count += 1\n        if (nt_count == 0):\n            error_msg = 'File \"{}\" does not contain any nucleotide sequence.'.format(fasta_path)\n            logger.error(error_msg)\n            raise Exception(error_msg)\n        logger.info('Valid FASTA format \"{}\" ({} bp)'.format(fasta_path, nt_count))", "docstring": "Check that a file is valid FASTA format.\n\n- First non-blank line needs to begin with a '>' header character.\n- Sequence can only contain valid IUPAC nucleotide characters\n\nArgs:\nfasta_str (str): FASTA file contents string\n\nRaises:\nException: If invalid FASTA format", "source": "codesearchnet"}
{"code": "def install_app(app, target='/Applications/'):\n    if (target[(- 4):] != '.app'):\n        if (app[(- 1):] == '/'):\n            base_app = os.path.basename(app[:(- 1)])\n        else:\n            base_app = os.path.basename(app)\n        target = os.path.join(target, base_app)\n    if (not (app[(- 1)] == '/')):\n        app += '/'\n    cmd = 'rsync -a --delete \"{0}\" \"{1}\"'.format(app, target)\n    return __salt__['cmd.run'](cmd)", "docstring": "Install an app file by moving it into the specified Applications directory\n\nArgs:\napp (str): The location of the .app file\ntarget (str): The target in which to install the package to\nDefault is ''/Applications/''\n\nReturns:\nstr: The results of the rsync command\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' macpackage.install_app /tmp/tmp.app /Applications/", "source": "codesearchnet"}
{"code": "def Push(self, source_file, device_filename, mtime='0', timeout_ms=None, progress_callback=None, st_mode=None):\n    if isinstance(source_file, str):\n        if os.path.isdir(source_file):\n            self.Shell(('mkdir ' + device_filename))\n            for f in os.listdir(source_file):\n                self.Push(os.path.join(source_file, f), ((device_filename + '/') + f), progress_callback=progress_callback)\n            return\n        source_file = open(source_file, 'rb')\n    with source_file:\n        connection = self.protocol_handler.Open(self._handle, destination=b'sync:', timeout_ms=timeout_ms)\n        kwargs = {}\n        if (st_mode is not None):\n            kwargs['st_mode'] = st_mode\n        self.filesync_handler.Push(connection, source_file, device_filename, mtime=int(mtime), progress_callback=progress_callback, **kwargs)\n    connection.Close()", "docstring": "Push a file or directory to the device.\n\nArgs:\nsource_file: Either a filename, a directory or file-like object to push to\nthe device.\ndevice_filename: Destination on the device to write to.\nmtime: Optional, modification time to set on the file.\ntimeout_ms: Expected timeout for any part of the push.\nst_mode: stat mode for filename\nprogress_callback: callback method that accepts filename, bytes_written and total_bytes,\ntotal_bytes will be -1 for file-like objects", "source": "codesearchnet"}
{"code": "async def snap(self, user=None, view=None):\n        \n\n        if view is None:\n            view = self.view\n\n        if user is None:\n            user = self.auth.getUserByName('root')\n\n        snap = await view.snap(user)\n\n        return snap", "docstring": "Return a transaction object for the default view.\n\nArgs:\nwrite (bool): Set to True for a write transaction.\n\nReturns:\n(synapse.lib.snap.Snap)\n\nNOTE: This must be used in a with block.", "source": "juraj-google-style"}
{"code": "async def evaluate_trained_model(state):\n    return (await evaluate_model(state.train_model_path, state.best_model_path, os.path.join(fsdb.eval_dir(), state.train_model_name), state.seed))", "docstring": "Evaluate the most recently trained model against the current best model.\n\nArgs:\nstate: the RL loop State instance.", "source": "codesearchnet"}
{"code": "def DocumentVersionsRow(\n      self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    \n    \n    version_path = self._GetRowValue(query_hash, row, 'version_path')\n    path = self._GetRowValue(query_hash, row, 'path')\n\n    paths = version_path.split('/')\n    if len(paths) < 2 or not paths[1].isdigit():\n      user_sid = ''\n    else:\n      user_sid = paths[1]\n    version_path = self.ROOT_VERSION_PATH + version_path\n    path, _, _ = path.rpartition('/')\n\n    event_data = MacDocumentVersionsEventData()\n    \n    event_data.last_time = self._GetRowValue(query_hash, row, 'last_time')\n    event_data.name = self._GetRowValue(query_hash, row, 'name')\n    event_data.path = path\n    event_data.query = query\n    \n    event_data.user_sid = '{0!s}'.format(user_sid)\n    event_data.version_path = version_path\n\n    timestamp = self._GetRowValue(query_hash, row, 'version_time')\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_CREATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a document versions row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def get_dimension(self, dimension, default=None, strict=False):\n    if ((dimension is not None) and (not isinstance(dimension, (int, basestring, Dimension)))):\n        raise TypeError(('Dimension lookup supports int, string, and Dimension instances, cannot lookup Dimensions using %s type.' % type(dimension).__name__))\n    all_dims = self.dimensions()\n    if isinstance(dimension, int):\n        if (0 <= dimension < len(all_dims)):\n            return all_dims[dimension]\n        elif strict:\n            raise KeyError(('Dimension %r not found' % dimension))\n        else:\n            return default\n    dimension = dimension_name(dimension)\n    name_map = {dim.name: dim for dim in all_dims}\n    name_map.update({dim.label: dim for dim in all_dims})\n    name_map.update({util.dimension_sanitizer(dim.name): dim for dim in all_dims})\n    if (strict and (dimension not in name_map)):\n        raise KeyError(('Dimension %r not found.' % dimension))\n    else:\n        return name_map.get(dimension, default)", "docstring": "Get a Dimension object by name or index.\n\nArgs:\ndimension: Dimension to look up by name or integer index\ndefault (optional): Value returned if Dimension not found\nstrict (bool, optional): Raise a KeyError if not found\n\nReturns:\nDimension object for the requested dimension or default", "source": "codesearchnet"}
{"code": "def add_redistribution(self, protocol, route_map_name=None):\n    protocols = ['bgp', 'rip', 'static', 'connected']\n    if (protocol not in protocols):\n        raise ValueError('redistributed protocol must bebgp, connected, rip or static')\n    if (route_map_name is None):\n        cmd = 'redistribute {}'.format(protocol)\n    else:\n        cmd = 'redistribute {} route-map {}'.format(protocol, route_map_name)\n    return self.configure_ospf(cmd)", "docstring": "Adds a protocol redistribution to OSPF\n\nArgs:\nprotocol (str):  protocol to redistribute\nroute_map_name (str): route-map to be used to\nfilter the protocols\nReturns:\nbool: True if the command completes successfully\nException:\nValueError:  This will be raised if the protocol pass is not one\nof the following: [rip, bgp, static, connected]", "source": "codesearchnet"}
{"code": "def send(self, content_type='HTML'):\n        \n\n        payload = self.api_representation(content_type)\n\n        endpoint = 'https:\n        self._make_api_call('post', endpoint=endpoint, data=json.dumps(payload))", "docstring": "Takes the recipients, body, and attachments of the Message and sends.\n\nArgs:\ncontent_type: Can either be 'HTML' or 'Text', defaults to HTML.", "source": "juraj-google-style"}
{"code": "def linear_interpolate_rank(tensor1, tensor2, coeffs, rank=1):\n    (_, _, _, num_channels) = common_layers.shape_list(tensor1)\n    diff_sq_sum = tf.reduce_sum(((tensor1 - tensor2) ** 2), axis=(0, 1, 2))\n    (_, feature_ranks) = tf.math.top_k(diff_sq_sum, k=rank)\n    feature_rank = feature_ranks[(- 1)]\n    channel_inds = tf.range(num_channels, dtype=tf.int32)\n    channel_mask = tf.equal(channel_inds, feature_rank)\n    ones_t = tf.ones(num_channels, dtype=tf.float32)\n    zeros_t = tf.zeros(num_channels, dtype=tf.float32)\n    interp_tensors = []\n    for coeff in coeffs:\n        curr_coeff = tf.where(channel_mask, (coeff * ones_t), zeros_t)\n        interp_tensor = (tensor1 + (curr_coeff * (tensor2 - tensor1)))\n        interp_tensors.append(interp_tensor)\n    return tf.concat(interp_tensors, axis=0)", "docstring": "Linearly interpolate channel at \"rank\" between two tensors.\n\nThe channels are ranked according to their L2 norm between tensor1[channel]\nand tensor2[channel].\n\nArgs:\ntensor1: 4-D Tensor, NHWC\ntensor2: 4-D Tensor, NHWC\ncoeffs: list of floats.\nrank: integer.\nReturns:\ninterp_latents: list of interpolated 4-D Tensors, shape=(NHWC)", "source": "codesearchnet"}
{"code": "def get_roles(client):\n        \n        done = False\n        marker = None\n        roles = []\n\n        while not done:\n            if marker:\n                response = client.list_roles(Marker=marker)\n            else:\n                response = client.list_roles()\n\n            roles += response['Roles']\n\n            if response['IsTruncated']:\n                marker = response['Marker']\n            else:\n                done = True\n\n        return roles", "docstring": "Returns a list of all the roles for an account. Returns a list containing all the roles for the account.\n\nArgs:\nclient (:obj:`boto3.session.Session`): A boto3 Session object\n\nReturns:\n:obj:`list` of `dict`", "source": "juraj-google-style"}
{"code": "def tag_versions(repo_path):\n    \n    repo = dulwich.repo.Repo(repo_path)\n    tags = get_tags(repo)\n    maj_version = 0\n    feat_version = 0\n    fix_version = 0\n    last_maj_version = 0\n    last_feat_version = 0\n    result = []\n\n    for commit_sha, children in reversed(\n            get_children_per_first_parent(repo_path).items()\n    ):\n        commit = get_repo_object(repo, commit_sha)\n        maj_version, feat_version, fix_version = get_version(\n            commit=commit,\n            tags=tags,\n            maj_version=maj_version,\n            feat_version=feat_version,\n            fix_version=fix_version,\n            children=children,\n        )\n        if (\n            last_maj_version != maj_version or\n            last_feat_version != feat_version\n        ):\n            last_maj_version = maj_version\n            last_feat_version = feat_version\n            tag_name = 'refs/tags/v%d.%d' % (maj_version, feat_version)\n            if ON_PYTHON3:\n                repo[str.encode(tag_name)] = commit\n            else:\n                repo[tag_name] = commit\n\n            result.append(\n                'v%d.%d -> %s' % (maj_version, feat_version, commit_sha)\n            )\n\n    return '\\n'.join(result)", "docstring": "Given a repo will add a tag for each major version.\n\nArgs:\nrepo_path(str): path to the git repository to tag.", "source": "juraj-google-style"}
{"code": "def sholl_crossings(neurites, center, radii):\n\n    def _count_crossings(neurite, radius):\n        'count_crossings of segments in neurite with radius'\n        r2 = (radius ** 2)\n        count = 0\n        for (start, end) in iter_segments(neurite):\n            (start_dist2, end_dist2) = (morphmath.point_dist2(center, start), morphmath.point_dist2(center, end))\n            count += int(((start_dist2 <= r2 <= end_dist2) or (end_dist2 <= r2 <= start_dist2)))\n        return count\n    return np.array([sum((_count_crossings(neurite, r) for neurite in iter_neurites(neurites))) for r in radii])", "docstring": "calculate crossings of neurites\n\nArgs:\nnrn(morph): morphology on which to perform Sholl analysis\nradii(iterable of floats): radii for which crossings will be counted\n\nReturns:\nArray of same length as radii, with a count of the number of crossings\nfor the respective radius", "source": "codesearchnet"}
{"code": "def get_sparse_tensors(self, transformation_cache, state_manager):\n    sparse_tensors = self.categorical_column.get_sparse_tensors(transformation_cache, state_manager)\n    return self._get_sparse_tensors_helper(sparse_tensors)", "docstring": "Returns an IdWeightPair.\n\n`IdWeightPair` is a pair of `SparseTensor`s which represents ids and\nweights.\n\n`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`\n`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a\n`SparseTensor` of `float` or `None` to indicate all weights should be\ntaken to be 1. If specified, `weight_tensor` must have exactly the same\nshape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing\noutput of a `VarLenFeature` which is a ragged matrix.\n\nArgs:\ntransformation_cache: A `FeatureTransformationCache` object to access\nfeatures.\nstate_manager: A `StateManager` to create / access resources such as\nlookup tables.", "source": "github-repos"}
{"code": "def _is_valid_netmask(self, netmask):\n    mask = netmask.split('.')\n    if (len(mask) == 4):\n        try:\n            for x in mask:\n                if (int(x) not in self._valid_mask_octets):\n                    return False\n        except ValueError:\n            return False\n        for (idx, y) in enumerate(mask):\n            if ((idx > 0) and (y > mask[(idx - 1)])):\n                return False\n        return True\n    try:\n        netmask = int(netmask)\n    except ValueError:\n        return False\n    return (0 <= netmask <= self._max_prefixlen)", "docstring": "Verify that the netmask is valid.\n\nArgs:\nnetmask: A string, either a prefix or dotted decimal\nnetmask.\n\nReturns:\nA boolean, True if the prefix represents a valid IPv4\nnetmask.", "source": "codesearchnet"}
{"code": "def normalize_url(base_url, rel_url):\n    if (not rel_url):\n        return None\n    if (not is_absolute_url(rel_url)):\n        rel_url = rel_url.replace('../', '/')\n        if ((not base_url.endswith('/')) and (not rel_url.startswith('/'))):\n            return ((base_url + '/') + rel_url.replace('../', '/'))\n        return (base_url + rel_url.replace('../', '/'))\n    return rel_url", "docstring": "Normalize the `url` - from relative, create absolute URL.\n\nArgs:\nbase_url (str): Domain with ``protocol://`` string\nrel_url (str): Relative or absolute url.\n\nReturns:\nstr/None: Normalized URL or None if `url` is blank.", "source": "codesearchnet"}
{"code": "def assign_add(self, delta, use_locking=False, name=None, read_value=True):\n    assign = state_ops.assign_add(self._variable, delta, use_locking=use_locking, name=name)\n    if read_value:\n        return assign\n    return assign.op", "docstring": "Adds a value to this variable.\n\nThis is essentially a shortcut for `assign_add(self, delta)`.\n\nArgs:\ndelta: A `Tensor`. The value to add to this variable.\nuse_locking: If `True`, use locking during the operation.\nname: The name of the operation to be created\nread_value: if True, will return something which evaluates to the new\nvalue of the variable; if False will return the assign op.\n\nReturns:\nA `Tensor` that will hold the new value of this variable after\nthe addition has completed.", "source": "github-repos"}
{"code": "def create_config(sections, section_contents):\n    \n    sections_length, section_contents_length = len(sections), len(section_contents)\n    if sections_length != section_contents_length:\n        raise ValueError(\"Mismatch between argument lengths.\\n\"\n                         \"len(sections) = {}\\n\"\n                         \"len(section_contents) = {}\"\n                         .format(sections_length, section_contents_length))\n    config = configparser.ConfigParser()\n    for section, section_content in zip(sections, section_contents):\n        config[section] = section_content\n    return config", "docstring": "Create a config file from the provided sections and key value pairs.\n\nArgs:\nsections (List[str]): A list of section keys.\nkey_value_pairs (Dict[str, str]): A list of of dictionaries. Must be as long as\nthe list of sections. That is to say, if there are two sections, there should be two\ndicts.\nReturns:\nconfigparser.ConfigParser: A ConfigParser.\nRaises:\nValueError", "source": "juraj-google-style"}
{"code": "def connect_to(name):\n    kwargs = config_for(name)\n    if (not kwargs):\n        raise AttributeError('connection profile not found in config')\n    node = connect(return_node=True, **kwargs)\n    return node", "docstring": "Creates a node instance based on an entry from the config\n\nThis function will retrieve the settings for the specified connection\nfrom the config and return a Node instance.  The configuration must\nbe loaded prior to calling this function.\n\nArgs:\nname (str): The name of the connection to load from the config.  The\nname argument should be the connection name (everything right of\nthe colon from the INI file)\n\nReturns:\nThis function will return an instance of Node with the settings\nfrom the config instance.\n\nRaises:\nAttributeError: raised if the specified configuration name is not\nfound in the loaded configuration", "source": "codesearchnet"}
{"code": "def fileToMD5(filename, block_size=256*128, binary=False):\n    \n    md5 = hashlib.md5()\n    with open(filename,'rb') as f:\n        for chunk in iter(lambda: f.read(block_size), b''):\n             md5.update(chunk)\n    if not binary:\n        return md5.hexdigest()\n    return md5.digest()", "docstring": "A function that calculates the MD5 hash of a file.\n\nArgs:\n-----\nfilename: Path to the file.\nblock_size: Chunks of suitable size. Block size directly depends on\nthe block size of your filesystem to avoid performances issues.\nBlocks of 4096 octets (Default NTFS).\nbinary: A boolean representing whether the returned info is in binary\nformat or not.\n\nReturns:\n--------\nstring: The  MD5 hash of the file.", "source": "juraj-google-style"}
{"code": "def _sort_records_map(records):\n  \n  ctx = context.get()\n  l = len(records)\n  key_records = [None] * l\n\n  logging.debug(\"Parsing\")\n  for i in range(l):\n    proto = kv_pb.KeyValue()\n    proto.ParseFromString(records[i])\n    key_records[i] = (proto.key(), records[i])\n\n  logging.debug(\"Sorting\")\n  key_records.sort(cmp=_compare_keys)\n\n  logging.debug(\"Writing\")\n  mapper_spec = ctx.mapreduce_spec.mapper\n  params = input_readers._get_params(mapper_spec)\n  bucket_name = params.get(\"bucket_name\")\n  filename = (ctx.mapreduce_spec.name + \"/\" + ctx.mapreduce_id + \"/output-\" +\n              ctx.shard_id + \"-\" + str(int(time.time())))\n  full_filename = \"/%s/%s\" % (bucket_name, filename)\n  filehandle = cloudstorage.open(full_filename, mode=\"w\")\n  with output_writers.GCSRecordsPool(filehandle, ctx=ctx) as pool:\n    for key_record in key_records:\n      pool.append(key_record[1])\n\n  logging.debug(\"Finalizing\")\n  filehandle.close()\n\n  entity = _OutputFile(key_name=full_filename,\n                       parent=_OutputFile.get_root_key(ctx.mapreduce_id))\n  entity.put()", "docstring": "Map function sorting records.\n\nConverts records to KeyValue protos, sorts them by key and writes them\ninto new GCS file. Creates _OutputFile entity to record resulting\nfile name.\n\nArgs:\nrecords: list of records which are serialized KeyValue protos.", "source": "juraj-google-style"}
{"code": "def DEFINE_boolean(name, default, help, flag_values=_flagvalues.FLAGS, module_name=None, **args):\n    DEFINE_flag(_flag.BooleanFlag(name, default, help, **args), flag_values, module_name)", "docstring": "Registers a boolean flag.\n\nSuch a boolean flag does not take an argument.  If a user wants to\nspecify a false value explicitly, the long option beginning with 'no'\nmust be used: i.e. --noflag\n\nThis flag will have a value of None, True or False.  None is possible\nif default=None and the user does not specify the flag on the command\nline.\n\nArgs:\nname: str, the flag name.\ndefault: bool|str|None, the default value of the flag.\nhelp: str, the help message.\nflag_values: FlagValues, the FlagValues instance with which the flag will\nbe registered. This should almost never need to be overridden.\nmodule_name: str, the name of the Python module declaring this flag.\nIf not provided, it will be computed using the stack trace of this call.\n**args: dict, the extra keyword args that are passed to Flag __init__.", "source": "codesearchnet"}
{"code": "def case(pred_fn_pairs, default=None, exclusive=False, name='smart_case'):\n    return control_flow_ops._case_helper(cond, pred_fn_pairs, default, exclusive, name, allow_python_preds=True)", "docstring": "Like tf.case, except attempts to statically evaluate predicates.\n\nIf any predicate in `pred_fn_pairs` is a bool or has a constant value, the\nassociated callable will be called or omitted depending on its value.\nOtherwise this functions like tf.case.\n\nArgs:\npred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a\ncallable which returns a list of tensors.\ndefault: Optional callable that returns a list of tensors.\nexclusive: True iff at most one predicate is allowed to evaluate to `True`.\nname: A name for this operation (optional).\n\nReturns:\nThe tensors returned by the first pair whose predicate evaluated to True, or\nthose returned by `default` if none does.\n\nRaises:\nTypeError: If `pred_fn_pairs` is not a list/dictionary.\nTypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.\nTypeError: If `fns[i]` is not callable for any i, or `default` is not\ncallable.", "source": "codesearchnet"}
{"code": "def mesh_axis_to_tensor_axis(self, mesh_ndims):\n    \n    ta2ma = self._tensor_axis_to_mesh_axis\n    return tuple(\n        [ta2ma.index(mesh_axis) if mesh_axis in ta2ma else None\n         for mesh_axis in xrange(mesh_ndims)])", "docstring": "For each mesh axis, which Tensor axis maps to it.\n\nArgs:\nmesh_ndims: int.\n\nReturns:\nTuple of optional integers, with length mesh_ndims.", "source": "juraj-google-style"}
{"code": "def FromString(cls, range_string):\n    disjuncts = None\n    range_string = range_string.strip()\n    if (len(range_string) == 0):\n        raise ArgumentError('You must pass a finite string to SemanticVersionRange.FromString', range_string=range_string)\n    if ((len(range_string) == 1) and (range_string[0] == '*')):\n        conj = (None, None, True, True)\n        disjuncts = [[conj]]\n    elif (range_string[0] == '^'):\n        ver = range_string[1:]\n        try:\n            ver = SemanticVersion.FromString(ver)\n        except DataError as err:\n            raise ArgumentError('Could not parse ^X.Y.Z version', parse_error=str(err), range_string=range_string)\n        lower = ver\n        upper = ver.inc_first_nonzero()\n        conj = (lower, upper, True, False)\n        disjuncts = [[conj]]\n    elif (range_string[0] == '='):\n        ver = range_string[1:]\n        try:\n            ver = SemanticVersion.FromString(ver)\n        except DataError as err:\n            raise ArgumentError('Could not parse =X.Y.Z version', parse_error=str(err), range_string=range_string)\n        conj = (ver, ver, True, True)\n        disjuncts = [[conj]]\n    if (disjuncts is None):\n        raise ArgumentError('Invalid range specification that could not be parsed', range_string=range_string)\n    return SemanticVersionRange(disjuncts)", "docstring": "Parse a version range string into a SemanticVersionRange\n\nCurrently, the only possible range strings are:\n\n^X.Y.Z - matches all versions with the same leading nonzero digit\ngreater than or equal the given range.\n* - matches everything\n=X.Y.Z - matches only the exact version given\n\nArgs:\nrange_string (string): A string specifying the version range\n\nReturns:\nSemanticVersionRange: The resulting version range object\n\nRaises:\nArgumentError: if the range string does not define a valid range.", "source": "codesearchnet"}
{"code": "def ValidateFeedStartAndExpirationDates(self, problems, first_date, last_date,\n                                          first_date_origin, last_date_origin,\n                                          today):\n    \n    warning_cutoff = today + datetime.timedelta(days=60)\n    if last_date < warning_cutoff:\n        problems.ExpirationDate(time.mktime(last_date.timetuple()),\n                                last_date_origin)\n\n    if first_date > today:\n      problems.FutureService(time.mktime(first_date.timetuple()),\n                             first_date_origin)", "docstring": "Validate the start and expiration dates of the feed.\nIssue a warning if it only starts in the future, or if\nit expires within 60 days.\n\nArgs:\nproblems: The problem reporter object\nfirst_date: A date object representing the first day the feed is active\nlast_date: A date object representing the last day the feed is active\ntoday: A date object representing the date the validation is being run on\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def update_variant(self, variant_obj):\n        \n        LOG.debug('Updating variant %s', variant_obj.get('simple_id'))\n\n        new_variant = self.variant_collection.find_one_and_replace(\n            {'_id': variant_obj['_id']},\n            variant_obj,\n            return_document=pymongo.ReturnDocument.AFTER\n        )\n        return new_variant", "docstring": "Update one variant document in the database.\n\nThis means that the variant in the database will be replaced by variant_obj.\n\nArgs:\nvariant_obj(dict)\n\nReturns:\nnew_variant(dict)", "source": "juraj-google-style"}
{"code": "def available_credit(context):\n    notes = commerce.CreditNote.unclaimed().filter(invoice__user=user_for_context(context))\n    ret = (notes.values('amount').aggregate(Sum('amount'))['amount__sum'] or 0)\n    return (0 - ret)", "docstring": "Calculates the sum of unclaimed credit from this user's credit notes.\n\nReturns:\nDecimal: the sum of the values of unclaimed credit notes for the\ncurrent user.", "source": "codesearchnet"}
{"code": "def get_megatron_sharded_states(args, tp_size, pp_size, pp_rank):\n    tp_state_dicts = []\n    for i in range(tp_size):\n        sub_dir_name = f'mp_rank_{i:02d}' if pp_size == 1 else f'mp_rank_{i:02d}_{pp_rank:03d}'\n        for checkpoint_name in ['model_optim_rng.pt', 'model_rng.pt']:\n            checkpoint_path = os.path.join(args.load_path, sub_dir_name, checkpoint_name)\n            if os.path.isfile(checkpoint_path):\n                break\n        check_torch_load_is_safe()\n        state_dict = torch.load(checkpoint_path, map_location='cpu', weights_only=True)\n        tp_state_dicts.append(state_dict)\n    return tp_state_dicts", "docstring": "Get sharded checkpoints from NVIDIA Megatron-LM checkpoint based on the provided tensor parallel size, pipeline\nparallel size and pipeline parallel rank.\n\nArgs:\nargs (argparse.Namespace): the arguments to the script\ntp_size (int): the tensor parallel size\npp_size (int): the pipeline parallel size\npp_rank (int): the pipeline parallel rank", "source": "github-repos"}
{"code": "def __getattr__(self, name):\n    \n    return lambda *args, **kwargs: self._Execute(name, *args, **kwargs)", "docstring": "Handles transparent proxying to gdb subprocess.\n\nThis returns a lambda which, when called, sends an RPC request to gdb\nArgs:\nname: The method to call within GdbService\nReturns:\nThe result of the RPC.", "source": "juraj-google-style"}
{"code": "def get_object(tree):\n    \n    if isinstance(tree, Tree):\n        if tree.label() == 'DT' or tree.label() == 'POS':\n            return ''\n        words = []\n        for child in tree:\n            words.append(get_object(child))\n        return ' '.join([_f for _f in words if _f])\n    else:\n        return tree", "docstring": "Get the object in the tree object.\n\nMethod should remove unnecessary letters and words::\n\nthe\na/an\n's\n\nArgs:\ntree (Tree): Parsed tree structure\nReturns:\nResulting string of tree ``(Ex: \"red car\")``", "source": "juraj-google-style"}
{"code": "def _object_url(self, objtype, objid):\n        \n        return \"{base_url}/api/{api_version}/{controller}/{obj_id}\".format(\n            base_url=self._base_url(),\n            api_version=self.api_version,\n            controller=self._controller_name(objtype),\n            obj_id=objid\n        )", "docstring": "Generate the URL for the specified object\n\nArgs:\nobjtype (str): The object's type\nobjid (int): The objects ID\n\nReturns:\nA string containing the URL of the object", "source": "juraj-google-style"}
{"code": "def htmlcolor_to_rgb(str_color):\n    if (not (str_color.startswith('\n        raise ValueError(\"Bad html color format. Expected: '\n    result = [((1.0 * int(n, 16)) / 255) for n in (str_color[1:3], str_color[3:5], str_color[5:])]\n    return result", "docstring": "function to convert HTML-styly color string to RGB values\n\nArgs:\ns: Color in HTML format\n\nReturns:\nlist of three RGB color components", "source": "codesearchnet"}
{"code": "def evaluate_hourly_forecasts(self):\n    score_columns = ['Run_Date', 'Forecast_Hour', 'Ensemble Name', 'Model_Name', 'Forecast_Variable', 'Neighbor_Radius', 'Smoothing_Radius', 'Size_Threshold', 'ROC', 'Reliability']\n    all_scores = pd.DataFrame(columns=score_columns)\n    for (h, hour) in enumerate(range(self.start_hour, (self.end_hour + 1))):\n        for neighbor_radius in self.neighbor_radii:\n            n_filter = disk(neighbor_radius)\n            for (s, size_threshold) in enumerate(self.size_thresholds):\n                print('Eval hourly forecast {0:02d} {1} {2} {3} {4:d} {5:d}'.format(hour, self.model_name, self.forecast_variable, self.run_date, neighbor_radius, size_threshold))\n                hour_obs = fftconvolve((self.raw_obs[self.mrms_variable][h] >= self.obs_thresholds[s]), n_filter, mode='same')\n                hour_obs[(hour_obs > 1)] = 1\n                hour_obs[(hour_obs < 1)] = 0\n                if self.obs_mask:\n                    hour_obs = hour_obs[(self.raw_obs[self.mask_variable][h] > 0)]\n                for smoothing_radius in self.smoothing_radii:\n                    hour_var = 'neighbor_prob_r_{0:d}_s_{1:d}_{2}_{3:0.2f}'.format(neighbor_radius, smoothing_radius, self.forecast_variable, size_threshold)\n                    if self.obs_mask:\n                        hour_forecast = self.hourly_forecasts[hour_var][h][(self.raw_obs[self.mask_variable][h] > 0)]\n                    else:\n                        hour_forecast = self.hourly_forecasts[hour_var][h]\n                    roc = DistributedROC(thresholds=self.probability_levels, obs_threshold=0.5)\n                    roc.update(hour_forecast, hour_obs)\n                    rel = DistributedReliability(thresholds=self.probability_levels, obs_threshold=0.5)\n                    rel.update(hour_forecast, hour_obs)\n                    row = [self.run_date, hour, self.ensemble_name, self.model_name, self.forecast_variable, neighbor_radius, smoothing_radius, size_threshold, roc, rel]\n                    all_scores.loc[(hour_var + '_{0:d}'.format(hour))] = row\n    return all_scores", "docstring": "Calculates ROC curves and Reliability scores for each forecast hour.\n\nReturns:\nA pandas DataFrame containing forecast metadata as well as DistributedROC and Reliability objects.", "source": "codesearchnet"}
{"code": "def refresh_state(self, id_or_uri, configuration, timeout=-1):\n        \n        uri = self._client.build_uri(id_or_uri) + self.REFRESH_STATE_PATH\n        return self._client.update(resource=configuration, uri=uri, timeout=timeout)", "docstring": "Refreshes a drive enclosure.\n\nArgs:\nid_or_uri: Can be either the resource ID or the resource URI.\nconfiguration: Configuration\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Drive Enclosure", "source": "juraj-google-style"}
{"code": "def has_implicit_access_to_enrollment_api(user, obj):\n    request = get_request_or_stub()\n    decoded_jwt = get_decoded_jwt_from_request(request)\n    return request_user_has_implicit_access_via_jwt(decoded_jwt, ENTERPRISE_ENROLLMENT_API_ADMIN_ROLE, obj)", "docstring": "Check that if request user has implicit access to `ENTERPRISE_ENROLLMENT_API_ADMIN_ROLE` feature role.\n\nReturns:\nboolean: whether the request user has access or not", "source": "codesearchnet"}
{"code": "def __eq__(self, other):\n        \n        if super().__eq__(other) and \\\n                (self._samples == other._samples).all():\n            return True\n        return False", "docstring": "Two SamplePulses are the same if they are of the same type\nand have the same name and samples.\n\nArgs:\nother (SamplePulse): other SamplePulse\n\nReturns:\nbool: are self and other equal.", "source": "juraj-google-style"}
{"code": "def ListAssets(logdir, plugin_name):\n    plugin_dir = PluginDirectory(logdir, plugin_name)\n    try:\n        return [x.rstrip('/') for x in tf.io.gfile.listdir(plugin_dir)]\n    except tf.errors.NotFoundError:\n        return []", "docstring": "List all the assets that are available for given plugin in a logdir.\n\nArgs:\nlogdir: A directory that was created by a TensorFlow summary.FileWriter.\nplugin_name: A string name of a plugin to list assets for.\n\nReturns:\nA string list of available plugin assets. If the plugin subdirectory does\nnot exist (either because the logdir doesn't exist, or because the plugin\ndidn't register) an empty list is returned.", "source": "codesearchnet"}
{"code": "def parse(file_contents, file_name):\n    \n\n    try:\n        yaml.load(file_contents)\n    except Exception:\n\n        _, exc_value, _ = sys.exc_info()\n        return(\"Cannot Parse: {file_name}: \\n {exc_value}\"\n               .format(file_name=file_name, exc_value=exc_value))", "docstring": "This takes a list of filenames and their paths of expected yaml files and\ntried to parse them, erroring if there are any parsing issues.\n\nArgs:\nfile_contents (str): Contents of a yml file\n\nRaises:\nyaml.parser.ParserError: Raises an error if the file contents cannot be\nparsed and interpreted as yaml", "source": "juraj-google-style"}
{"code": "def trace_flush(self):\n        \n        cmd = enums.JLinkTraceCommand.FLUSH\n        res = self._dll.JLINKARM_TRACE_Control(cmd, 0)\n        if (res == 1):\n            raise errors.JLinkException('Failed to flush the trace buffer.')\n        return None", "docstring": "Flushes the trace buffer.\n\nAfter this method is called, the trace buffer is empty.  This method is\nbest called when the device is reset.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def validate_file(fn, options=None):\n    file_results = FileValidationResults(filepath=fn)\n    output.info(('Performing JSON schema validation on %s' % fn))\n    if (not options):\n        options = ValidationOptions(files=fn)\n    try:\n        with open(fn) as instance_file:\n            file_results.object_results = validate(instance_file, options)\n    except Exception as ex:\n        if ('Expecting value' in str(ex)):\n            line_no = str(ex).split()[3]\n            file_results.fatal = ValidationErrorResults(('Invalid JSON input on line %s' % line_no))\n        else:\n            file_results.fatal = ValidationErrorResults(ex)\n        msg = \"Unexpected error occurred with file '{fn}'. No further validation will be performed: {error}\"\n        output.info(msg.format(fn=fn, error=str(ex)))\n    file_results.is_valid = (all((object_result.is_valid for object_result in file_results.object_results)) and (not file_results.fatal))\n    return file_results", "docstring": "Validate the input document `fn` according to the options passed in.\n\nIf any exceptions are raised during validation, no further validation\nwill take place.\n\nArgs:\nfn: The filename of the JSON file to be validated.\noptions: An instance of ``ValidationOptions``.\n\nReturns:\nAn instance of FileValidationResults.", "source": "codesearchnet"}
{"code": "def get_all_apps():\n    LOG.info('Retreiving list of all Spinnaker applications')\n    url = '{}/applications'.format(API_URL)\n    response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n    assert response.ok, 'Could not retrieve application list'\n    pipelines = response.json()\n    LOG.debug('All Applications:\\n%s', pipelines)\n    return pipelines", "docstring": "Get a list of all applications in Spinnaker.\n\nReturns:\nrequests.models.Response: Response from Gate containing list of all apps.", "source": "codesearchnet"}
{"code": "def parse(cls, args):\n        \n\n        try:\n            (options, args) = cls.optparser.parse_args(args)\n            if options.mode not in [\"1\", \"2\"]:\n                raise ParseError(\"mode must be either '1' or '2'\",\n                                 cls.optparser.format_help())\n\n            if (options.dbtap_id is None) or (options.db_table is None):\n                raise ParseError(\"dbtap_id and db_table are required\",\n                                 cls.optparser.format_help())\n\n            if options.mode is \"1\":\n                if options.hive_table is None:\n                    raise ParseError(\"hive_table is required for mode 1\",\n                                     cls.optparser.format_help())\n            elif options.export_dir is None:    \n                raise ParseError(\"export_dir is required for mode 2\",\n                                 cls.optparser.format_help())\n\n            if options.db_update_mode is not None:\n                if options.db_update_mode not in [\"allowinsert\", \"updateonly\"]:\n                    raise ParseError(\"db_update_mode should either be left blank for append \"\n                                     \"mode or be 'updateonly' or 'allowinsert'\",\n                                     cls.optparser.format_help())\n                if options.db_update_mode is \"updateonly\":\n                    if options.db_update_keys is None:\n                        raise ParseError(\"db_update_keys is required when db_update_mode \"\n                                         \"is 'updateonly'\",\n                                         cls.optparser.format_help())\n                elif options.db_update_keys is not None:\n                    raise ParseError(\"db_update_keys is used only when db_update_mode \"\n                                     \"is 'updateonly'\",\n                                     cls.optparser.format_help())\n\n        except OptionParsingError as e:\n            raise ParseError(e.msg, cls.optparser.format_help())\n        except OptionParsingExit as e:\n            return None\n\n        v = vars(options)\n        v[\"command_type\"] = \"DbExportCommand\"\n        return v", "docstring": "Parse command line arguments to construct a dictionary of command\nparameters that can be used to create a command\n\nArgs:\n`args`: sequence of arguments\n\nReturns:\nDictionary that can be used in create method\n\nRaises:\nParseError: when the arguments are not correct", "source": "juraj-google-style"}
{"code": "async def await_rpc(self, address, rpc_id, *args, **kwargs):\n    self.verify_calling_thread(True, 'await_rpc must be called from **inside** the event loop')\n    if isinstance(rpc_id, RPCDeclaration):\n        arg_format = rpc_id.arg_format\n        resp_format = rpc_id.resp_format\n        rpc_id = rpc_id.rpc_id\n    else:\n        arg_format = kwargs.get('arg_format', None)\n        resp_format = kwargs.get('resp_format', None)\n    arg_payload = b''\n    if (arg_format is not None):\n        arg_payload = pack_rpc_payload(arg_format, args)\n    self._logger.debug('Sending rpc to %d:%04X, payload=%s', address, rpc_id, args)\n    response = AwaitableResponse()\n    self._rpc_queue.put_rpc(address, rpc_id, arg_payload, response)\n    try:\n        resp_payload = (await response.wait(1.0))\n    except RPCRuntimeError as err:\n        resp_payload = err.binary_error\n    if (resp_format is None):\n        return []\n    resp = unpack_rpc_payload(resp_format, resp_payload)\n    return resp", "docstring": "Send an RPC from inside the EmulationLoop.\n\nThis is the primary method by which tasks running inside the\nEmulationLoop dispatch RPCs.  The RPC is added to the queue of waiting\nRPCs to be drained by the RPC dispatch task and this coroutine will\nblock until it finishes.\n\n**This method must only be called from inside the EmulationLoop**\n\nArgs:\naddress (int): The address of the tile that has the RPC.\nrpc_id (int): The 16-bit id of the rpc we want to call\n*args: Any required arguments for the RPC as python objects.\n**kwargs: Only two keyword arguments are supported:\n- arg_format: A format specifier for the argument list\n- result_format: A format specifier for the result\n\nReturns:\nlist: A list of the decoded response members from the RPC.", "source": "codesearchnet"}
{"code": "def __init__(self, *args, pubdate=None, excerpt=None, tags=None, allow_comments=True, **kwargs):\n\t\t\n\t\tsuper().__init__(*args, **kwargs)\n\t\tself.excerpt = excerpt or _get_excerpt(self.body)\n\t\tself.pubdate = pubdate\n\t\tself.tags = tags or []\n\t\tself.allow_comments = allow_comments", "docstring": "Constructor. Also see Entry.__init__.\n\nArgs:\npubdate (datetime): When the post was published.\nexcerpt (str): An excerpt of the post body.\ntags (list): A list of Tag objects associated with the post.\nallow_comments (bool): Whether to allow comments. Default False.", "source": "juraj-google-style"}
{"code": "def get_file(self, filename, scope='all'):\n    filename = os.path.abspath(os.path.join(self.root, filename))\n    layouts = self._get_layouts_in_scope(scope)\n    for ly in layouts:\n        if (filename in ly.files):\n            return ly.files[filename]\n    return None", "docstring": "Returns the BIDSFile object with the specified path.\n\nArgs:\nfilename (str): The path of the file to retrieve. Must be either\nan absolute path, or relative to the root of this BIDSLayout.\nscope (str, list): Scope of the search space. If passed, only\nBIDSLayouts that match the specified scope will be\nsearched. See BIDSLayout docstring for valid values.\n\nReturns: A BIDSFile, or None if no match was found.", "source": "codesearchnet"}
{"code": "def uncheck(self, locator=None, allow_label_click=None, **kwargs):\n        \n\n        self._check_with_label(\n            \"checkbox\", False, locator=locator, allow_label_click=allow_label_click, **kwargs)", "docstring": "Find a check box and uncheck it. The check box can be found via name, id, or label text. ::\n\npage.uncheck(\"German\")\n\nArgs:\nlocator (str, optional): Which check box to uncheck.\nallow_label_click (bool, optional): Attempt to click the label to toggle state if\nelement is non-visible. Defaults to :data:`capybara.automatic_label_click`.\n**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.", "source": "juraj-google-style"}
{"code": "def has_request(self, request):\n        \n\n        queue_item = QueueItem(request, Response(request.url))\n        key = queue_item.get_hash()\n\n        for status in QueueItem.STATUSES:\n            if key in self.__get_var(\"items_\" + status).keys():\n                return True\n\n        return False", "docstring": "Check if the given request already exists in the queue.\n\nArgs:\nrequest (:class:`nyawc.http.Request`): The request to check.\n\nReturns:\nbool: True if already exists, False otherwise.", "source": "juraj-google-style"}
{"code": "def find_module_defining_flag(self, flagname, default=None):\n    \n    registered_flag = self._flags().get(flagname)\n    if registered_flag is None:\n      return default\n    for module, flags in six.iteritems(self.flags_by_module_dict()):\n      for flag in flags:\n        \n        \n        \n        if (flag.name == registered_flag.name and\n            flag.short_name == registered_flag.short_name):\n          return module\n    return default", "docstring": "Return the name of the module defining this flag, or default.\n\nArgs:\nflagname: str, name of the flag to lookup.\ndefault: Value to return if flagname is not defined. Defaults\nto None.\n\nReturns:\nThe name of the module which registered the flag with this name.\nIf no such module exists (i.e. no flag with this name exists),\nwe return default.", "source": "juraj-google-style"}
{"code": "def delete(self, reference, option=None):\n    write_pb = _helpers.pb_for_delete(reference._document_path, option)\n    self._add_write_pbs([write_pb])", "docstring": "Add a \"change\" to delete a document.\n\nSee\n:meth:`~.firestore_v1beta1.document.DocumentReference.delete` for\nmore information on how ``option`` determines how the change is\napplied.\n\nArgs:\nreference (~.firestore_v1beta1.document.DocumentReference): A\ndocument reference that will be deleted in this batch.\noption (Optional[~.firestore_v1beta1.client.WriteOption]): A\nwrite option to make assertions / preconditions on the server\nstate of the document before applying changes.", "source": "codesearchnet"}
{"code": "def get_output_info_dict(self, signature=None):\n    \n    return self._spec.get_output_info_dict(signature=signature, tags=self._tags)", "docstring": "Describes the outputs provided by a signature.\n\nArgs:\nsignature: A string with the signature to get ouputs information for.\nIf None, the default signature is used if defined.\n\nReturns:\nThe result of ModuleSpec.get_output_info_dict() for the given signature,\nand the graph variant selected by `tags` when this Module was initialized.\n\nRaises:\nKeyError: if there is no such signature.", "source": "juraj-google-style"}
{"code": "def _update_state_from_shard_states(self, state, shard_states, control):\n    (state.active_shards, state.aborted_shards, state.failed_shards) = (0, 0, 0)\n    total_shards = 0\n    processed_counts = []\n    processed_status = []\n    state.counters_map.clear()\n    for s in shard_states:\n        total_shards += 1\n        status = 'unknown'\n        if s.active:\n            state.active_shards += 1\n            status = 'running'\n        if (s.result_status == model.ShardState.RESULT_SUCCESS):\n            status = 'success'\n        elif (s.result_status == model.ShardState.RESULT_ABORTED):\n            state.aborted_shards += 1\n            status = 'aborted'\n        elif (s.result_status == model.ShardState.RESULT_FAILED):\n            state.failed_shards += 1\n            status = 'failed'\n        state.counters_map.add_map(s.counters_map)\n        processed_counts.append(s.counters_map.get(context.COUNTER_MAPPER_CALLS))\n        processed_status.append(status)\n    state.set_processed_counts(processed_counts, processed_status)\n    state.last_poll_time = datetime.datetime.utcfromtimestamp(self._time())\n    spec = state.mapreduce_spec\n    if (total_shards != spec.mapper.shard_count):\n        logging.error(\"Found %d shard states. Expect %d. Issuing abort command to job '%s'\", total_shards, spec.mapper.shard_count, spec.mapreduce_id)\n        model.MapreduceControl.abort(spec.mapreduce_id)\n    state.active = bool(state.active_shards)\n    if ((not control) and (state.failed_shards or state.aborted_shards)):\n        model.MapreduceControl.abort(spec.mapreduce_id)\n    if (not state.active):\n        if (state.failed_shards or (not total_shards)):\n            state.result_status = model.MapreduceState.RESULT_FAILED\n        elif state.aborted_shards:\n            state.result_status = model.MapreduceState.RESULT_ABORTED\n        else:\n            state.result_status = model.MapreduceState.RESULT_SUCCESS\n        self._finalize_outputs(spec, state)\n        self._finalize_job(spec, state)\n    else:\n\n        @db.transactional(retries=5)\n        def _put_state():\n            'The helper for storing the state.'\n            fresh_state = model.MapreduceState.get_by_job_id(spec.mapreduce_id)\n            if (not fresh_state.active):\n                logging.warning('Job %s is not active. Looks like spurious task execution. Dropping controller task.', spec.mapreduce_id)\n                return\n            config = util.create_datastore_write_config(spec)\n            state.put(config=config)\n        _put_state()", "docstring": "Update mr state by examing shard states.\n\nArgs:\nstate: current mapreduce state as MapreduceState.\nshard_states: an iterator over shard states.\ncontrol: model.MapreduceControl entity.", "source": "codesearchnet"}
{"code": "def update(self, media_blob: genai_types.Blob):\n    if self.generation_start_sec is not None and self.ttft_sec is None:\n        self.time_audio_start = time.perf_counter()\n        self.ttft_sec = self.time_audio_start - self.generation_start_sec\n    self.audio_duration += audio_duration_sec(media_blob.data, RECEIVE_SAMPLE_RATE)", "docstring": "Updates the generation request with the new media data.\n\nArgs:\nmedia_blob: The new media data.", "source": "github-repos"}
{"code": "def _group(self, group_data):\n    if isinstance(group_data, dict):\n        xid = group_data.get('xid')\n    else:\n        xid = group_data.xid\n    if (self.groups.get(xid) is not None):\n        group_data = self.groups.get(xid)\n    elif (self.groups_shelf.get(xid) is not None):\n        group_data = self.groups_shelf.get(xid)\n    else:\n        self.groups[xid] = group_data\n    return group_data", "docstring": "Return previously stored group or new group.\n\nArgs:\ngroup_data (dict|obj): An Group dict or instance of Group object.\n\nReturns:\ndict|obj: The new Group dict/object or the previously stored dict/object.", "source": "codesearchnet"}
{"code": "def remove_line(self, section, line):\n    try:\n        s = self._get_section(section, create=False)\n    except KeyError:\n        return 0\n    return s.remove(line)", "docstring": "Remove all instances of a line.\n\nReturns:\nint: the number of lines removed", "source": "codesearchnet"}
{"code": "def list_key_values(input: t.Dict[str, str]) -> None:\n    for cmd, desc in input.items():\n        print(f'{cmd}  =>  {desc}')", "docstring": "Display key-value pairs from a dictionary.\n\nArgs:\ninput (Dict[str, str]): The dictionary containing key-value pairs.", "source": "github-repos"}
{"code": "def peek_step(self, val: ArrayValue, sn: 'DataNode') -> Tuple[(ObjectValue, 'DataNode')]:\n    keys = self.parse_keys(sn)\n    for en in val:\n        flag = True\n        try:\n            for k in keys:\n                if (en[k] != keys[k]):\n                    flag = False\n                    break\n        except KeyError:\n            continue\n        if flag:\n            return (en, sn)\n    return (None, sn)", "docstring": "Return the entry addressed by the receiver + its schema node.\n\nArgs:\nval: Current value (array).\nsn:  Current schema node.", "source": "codesearchnet"}
{"code": "def _initialize_pvariables(self, pvariables: Dict[(str, PVariable)], ordering: List[str], initializer: Optional[InitializerList]=None) -> List[Tuple[(str, TensorFluent)]]:\n    if (initializer is not None):\n        init = dict()\n        for ((name, args), value) in initializer:\n            arity = (len(args) if (args is not None) else 0)\n            name = '{}/{}'.format(name, arity)\n            init[name] = init.get(name, [])\n            init[name].append((args, value))\n    fluents = []\n    for name in ordering:\n        pvar = pvariables[name]\n        shape = self.rddl._param_types_to_shape(pvar.param_types)\n        dtype = utils.range_type_to_dtype(pvar.range)\n        fluent = np.full(shape, pvar.default)\n        if (initializer is not None):\n            for (args, val) in init.get(name, []):\n                if (args is not None):\n                    idx = []\n                    for (ptype, arg) in zip(pvar.param_types, args):\n                        idx.append(self.rddl.object_table[ptype]['idx'][arg])\n                    idx = tuple(idx)\n                    fluent[idx] = val\n                else:\n                    fluent = val\n        with self.graph.as_default():\n            t = tf.constant(fluent, dtype=dtype, name=utils.identifier(name))\n            scope = ([None] * len(t.shape))\n            fluent = TensorFluent(t, scope, batch=False)\n            fluent_pair = (name, fluent)\n            fluents.append(fluent_pair)\n    return fluents", "docstring": "Instantiates `pvariables` given an initialization list and\nreturns a list of TensorFluents in the given `ordering`.\n\nReturns:\nList[Tuple[str, TensorFluent]]: A list of pairs of fluent name and fluent tensor.", "source": "codesearchnet"}
{"code": "def parse(self, body):\n        \n        if isinstance(body, six.string_types):\n            body = json.loads(body)\n\n        \n        version = body['version']\n        self.version = version\n\n        \n        session = body['session']\n        self.session.new = session['new']\n        self.session.session_id = session['sessionId']\n        application_id = session['application']['applicationId']\n        self.session.application.application_id = application_id\n        if 'attributes' in session and session['attributes']:\n            self.session.attributes = session.get('attributes', {})\n        else:\n            self.session.attributes = {}\n        self.session.user.user_id = session['user']['userId']\n        self.session.user.access_token = session['user'].get('accessToken', 0)\n\n        \n        request = body['request']\n\n        \n        if request['type'] == 'LaunchRequest':\n            self.request = LaunchRequest()\n\n        \n        elif request['type'] == 'IntentRequest':\n            self.request = IntentRequest()\n            self.request.intent = Intent()\n            intent = request['intent']\n            self.request.intent.name = intent['name']\n            if 'slots' in intent and intent['slots']:\n                for name, slot in six.iteritems(intent['slots']):\n                    self.request.intent.slots[name] = Slot()\n                    self.request.intent.slots[name].name = slot['name']\n                    self.request.intent.slots[name].value = slot.get('value')\n\n        \n        elif request['type'] == 'SessionEndedRequest':\n            self.request = SessionEndedRequest()\n            self.request.reason = request['reason']\n\n        \n        self.request.type = request['type']\n        self.request.request_id = request['requestId']\n        self.request.timestamp = request['timestamp']\n\n        return self", "docstring": "Parse JSON request, storing content in object attributes.\n\nArgs:\nbody: str. HTTP request body.\n\nReturns:\nself", "source": "juraj-google-style"}
{"code": "def exists(self, pattern, **match_kwargs):\n    ret = self.match(pattern, **match_kwargs)\n    if (ret is None):\n        return None\n    if (not ret.matched):\n        return None\n    return ret", "docstring": "Check if image exists in screen\n\nReturns:\nIf exists, return FindPoint, or\nreturn None if result.confidence < self.image_match_threshold", "source": "codesearchnet"}
{"code": "def register(name):\n    if not isinstance(name, str):\n        raise TypeError('Expected `name` to be a string; got %r' % (name,))\n    if not _REGISTERED_NAME_RE.match(name):\n        raise ValueError(\"Registered name must have the form '{project_name}.{type_name}' (e.g. 'my_project.MyTypeSpec'); got %r.\" % name)\n\n    def decorator_fn(cls):\n        if not (isinstance(cls, type) and issubclass(cls, internal.TypeSpec)):\n            raise TypeError('Expected `cls` to be a TypeSpec; got %r' % (cls,))\n        if cls in _TYPE_SPEC_TO_NAME:\n            raise ValueError('Class %s.%s has already been registered with name %s.' % (cls.__module__, cls.__name__, _TYPE_SPEC_TO_NAME[cls]))\n        if name in _NAME_TO_TYPE_SPEC:\n            raise ValueError('Name %s has already been registered for class %s.%s.' % (name, _NAME_TO_TYPE_SPEC[name].__module__, _NAME_TO_TYPE_SPEC[name].__name__))\n        _TYPE_SPEC_TO_NAME[cls] = name\n        _NAME_TO_TYPE_SPEC[name] = cls\n        return cls\n    return decorator_fn", "docstring": "Decorator used to register a globally unique name for a TypeSpec subclass.\n\nArgs:\nname: The name of the type spec.  Must be globally unique.  Must have the\nform `\"{project_name}.{type_name}\"`.  E.g. `\"my_project.MyTypeSpec\"`.\n\nReturns:\nA class decorator that registers the decorated class with the given name.", "source": "github-repos"}
{"code": "def reload_config(self, dockercfg_path=None):\n        \n        self._auth_configs = auth.load_config(\n            dockercfg_path, credstore_env=self.credstore_env\n        )", "docstring": "Force a reload of the auth configuration\n\nArgs:\ndockercfg_path (str): Use a custom path for the Docker config file\n(default ``$HOME/.docker/config.json`` if present,\notherwise``$HOME/.dockercfg``)\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def load_ner_model(lang='en', version='2'):\n    src_dir = 'ner{}'.format(version)\n    p = locate_resource(src_dir, lang)\n    fh = _open(p)\n    try:\n        return pickle.load(fh)\n    except UnicodeDecodeError:\n        fh.seek(0)\n        return pickle.load(fh, encoding='latin1')", "docstring": "Return a named entity extractor parameters for `lang` and of version `version`\n\nArgs:\nlang (string): language code.\nversion (string): version of the parameters to be used.", "source": "codesearchnet"}
{"code": "def group_modes(modes):\n    \n    if len(modes) > 0:\n        previous = modes[0]\n        grouped = []\n\n        for changep in modes[1:]:\n            if changep['label'] != previous['label']:\n                previous['to'] = changep['from']\n                grouped.append(previous)\n                previous = changep\n\n        previous['to'] = modes[-1]['to']\n        grouped.append(previous)\n        return grouped\n    else:\n        return modes", "docstring": "Groups consecutive transportation modes with same label, into one\n\nArgs:\nmodes (:obj:`list` of :obj:`dict`)\nReturns:\n:obj:`list` of :obj:`dict`", "source": "juraj-google-style"}
{"code": "def _astimezone_ts(self, timezone):\n    if (self.created.tzinfo is timezone):\n        return self\n    else:\n        nw_obj = Timestamps(((None,) * 4))\n        nw_obj.created = self.created.astimezone(timezone)\n        nw_obj.changed = self.changed.astimezone(timezone)\n        nw_obj.mft_changed = self.mft_changed.astimezone(timezone)\n        nw_obj.accessed = self.accessed.astimezone(timezone)\n        return nw_obj", "docstring": "Changes the time zones of all timestamps.\n\nReceives a new timezone and applies to all timestamps, if necessary.\n\nArgs:\ntimezone (:obj:`tzinfo`): Time zone to be applied\n\nReturns:\nA new ``Timestamps`` object if the time zone changes, otherwise returns ``self``.", "source": "codesearchnet"}
{"code": "def last_updated(self, path):\n    raise NotImplementedError", "docstring": "Get UNIX Epoch time in seconds on the FileSystem.\n\nArgs:\npath: string path of file.\n\nReturns: float UNIX Epoch time\n\nRaises:\n``BeamIOError``: if path doesn't exist.", "source": "github-repos"}
{"code": "def get_metadata(self, handle):\n        \n        response = self.open_url(url=handle, suffix='.metadata')\n        try:\n            return json.load(response)\n        finally:\n            response.close()", "docstring": "Returns the associated metadata info for the given handle, the metadata\nfile must exist (``handle + '.metadata'``). If the given handle has an\n``.xz`` extension, it will get removed when calculating the handle\nmetadata path\n\nArgs:\nhandle (str): Path to the template to get the metadata from\n\nReturns:\ndict: Metadata for the given handle", "source": "juraj-google-style"}
{"code": "def merge_dictionaries(dicts, merge_lists=False):\n    \n    \n    dict1 = dicts[0]\n    for other_dict in dicts[1:]:\n        merge_two_dictionaries(dict1, other_dict, merge_lists=merge_lists)\n    return dict1", "docstring": "Merges all dictionaries in dicts into a single dictionary and returns result\n\nArgs:\ndicts (List[DictUpperBound]): Dictionaries to merge into the first one in the list\nmerge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False.\n\nReturns:\nDictUpperBound: Merged dictionary", "source": "juraj-google-style"}
{"code": "def appendDirectory(self, directory, projectFilePath):\n    lines = []\n    with open(projectFilePath, 'r') as original:\n        for l in original:\n            lines.append(l)\n    with open(projectFilePath, 'w') as new:\n        for line in lines:\n            card = {}\n            try:\n                card = self._extractCard(line)\n            except:\n                card = self._extractDirectoryCard(line)\n            numSpaces = max(2, (25 - len(card['name'])))\n            if (card['value'] is None):\n                rewriteLine = ('%s\\n' % card['name'])\n            elif (card['name'] == 'WMS'):\n                rewriteLine = ('%s %s\\n' % (card['name'], card['value']))\n            elif (card['name'] == 'PROJECT_PATH'):\n                filePath = ('\"%s\"' % os.path.normpath(directory))\n                rewriteLine = ('%s%s%s\\n' % (card['name'], (' ' * numSpaces), filePath))\n            elif ('\"' in card['value']):\n                filename = card['value'].strip('\"')\n                filePath = ('\"%s\"' % os.path.join(directory, filename))\n                rewriteLine = ('%s%s%s\\n' % (card['name'], (' ' * numSpaces), filePath))\n            else:\n                rewriteLine = ('%s%s%s\\n' % (card['name'], (' ' * numSpaces), card['value']))\n            new.write(rewriteLine)", "docstring": "Append directory to relative paths in project file. By default, the project file paths are read and written as\nrelative paths. Use this method to prepend a directory to all the paths in the project file.\n\nArgs:\ndirectory (str): Directory path to prepend to file paths in project file.\nprojectFilePath (str): Path to project file that will be modified.", "source": "codesearchnet"}
{"code": "def _process_kwargs_parameters(sig, func, parent_class, model_name_lowercase, documented_kwargs, indent_level, undocumented_parameters):\n    docstring = ''\n    source_args_dict = source_args_doc(ImageProcessorArgs)\n    unroll_kwargs = func.__name__ in UNROLL_KWARGS_METHODS\n    if not unroll_kwargs and parent_class is not None:\n        unroll_kwargs = any((unroll_kwargs_class in parent_class.__name__ for unroll_kwargs_class in UNROLL_KWARGS_CLASSES))\n    if unroll_kwargs:\n        kwargs_parameters = [kwargs_param for _, kwargs_param in sig.parameters.items() if kwargs_param.kind == inspect.Parameter.VAR_KEYWORD]\n        for kwarg_param in kwargs_parameters:\n            if kwarg_param.annotation == inspect.Parameter.empty:\n                continue\n            kwargs_documentation = kwarg_param.annotation.__args__[0].__doc__\n            if kwargs_documentation is not None:\n                documented_kwargs, _ = parse_docstring(kwargs_documentation)\n                if model_name_lowercase is not None:\n                    documented_kwargs = format_args_docstring(documented_kwargs, model_name_lowercase)\n            for param_name, param_type_annotation in kwarg_param.annotation.__args__[0].__annotations__.items():\n                param_type = str(param_type_annotation)\n                optional = False\n                if 'typing' in param_type:\n                    param_type = ''.join(param_type.split('typing.')).replace('transformers.', '~')\n                else:\n                    param_type = f'{param_type.replace('transformers.', '~').replace('builtins', '')}.{param_name}'\n                if 'ForwardRef' in param_type:\n                    param_type = re.sub(\"ForwardRef\\\\('([\\\\w.]+)'\\\\)\", '\\\\1', param_type)\n                if 'Optional' in param_type:\n                    param_type = re.sub('Optional\\\\[(.*?)\\\\]', '\\\\1', param_type)\n                    optional = True\n                param_default = ''\n                if parent_class is not None:\n                    param_default = str(getattr(parent_class, param_name, ''))\n                    param_default = f', defaults to `{param_default}`' if param_default != '' else ''\n                param_type, optional_string, shape_string, additional_info, description, is_documented = _get_parameter_info(param_name, documented_kwargs, source_args_dict, param_type, optional)\n                if is_documented:\n                    if param_type == '':\n                        print(f'🚨 {param_name} for {kwarg_param.annotation.__args__[0].__qualname__} in file {func.__code__.co_filename} has no type')\n                    param_type = param_type if '`' in param_type else f'`{param_type}`'\n                    if additional_info:\n                        docstring += set_min_indent(f'{param_name} ({param_type}{additional_info}):{description}', indent_level + 8)\n                    else:\n                        docstring += set_min_indent(f'{param_name} ({param_type}{shape_string}{optional_string}{param_default}):{description}', indent_level + 8)\n                else:\n                    undocumented_parameters.append(f'🚨 `{param_name}` is part of {kwarg_param.annotation.__args__[0].__qualname__}, but not documented. Make sure to add it to the docstring of the function in {func.__code__.co_filename}.')\n    return docstring", "docstring": "Process **kwargs parameters if needed.\n\nArgs:\nsig (`inspect.Signature`): Function signature\nfunc (`function`): Function the parameters belong to\nparent_class (`class`): Parent class of the function\nmodel_name_lowercase (`str`): Lowercase model name\ndocumented_kwargs (`dict`): Dictionary of kwargs that are already documented\nindent_level (`int`): Indentation level\nundocumented_parameters (`list`): List to append undocumented parameters to", "source": "github-repos"}
{"code": "def perform_extract_job(self, destination, job_id, table_reference, destination_format, project=None, include_header=True, compression=ExportCompression.NONE, use_avro_logical_types=False, job_labels=None):\n    job_project = project or table_reference.projectId\n    job_reference = bigquery.JobReference(jobId=job_id, projectId=job_project)\n    request = bigquery.BigqueryJobsInsertRequest(projectId=job_project, job=bigquery.Job(configuration=bigquery.JobConfiguration(extract=bigquery.JobConfigurationExtract(destinationUris=destination, sourceTable=table_reference, printHeader=include_header, destinationFormat=destination_format, compression=compression, useAvroLogicalTypes=use_avro_logical_types), labels=_build_job_labels(job_labels)), jobReference=job_reference))\n    return self._start_job(request).jobReference", "docstring": "Starts a job to export data from BigQuery.\n\nReturns:\nbigquery.JobReference with the information about the job that was started.", "source": "github-repos"}
{"code": "def decode(self, codes):\n        \n        assert codes.ndim == 2\n        N, M = codes.shape\n        assert M == self.M\n        assert codes.dtype == self.code_dtype\n\n        vecs = np.empty((N, self.Ds * self.M), dtype=np.float32)\n        for m in range(self.M):\n            vecs[:, m * self.Ds : (m+1) * self.Ds] = self.codewords[m][codes[:, m], :]\n\n        return vecs", "docstring": "Given PQ-codes, reconstruct original D-dimensional vectors\napproximately by fetching the codewords.\n\nArgs:\ncodes (np.ndarray): PQ-cdoes with shape=(N, M) and dtype=self.code_dtype.\nEach row is a PQ-code\n\nReturns:\nnp.ndarray: Reconstructed vectors with shape=(N, D) and dtype=np.float32", "source": "juraj-google-style"}
{"code": "def set_size(self, height=220, width=350,\n                 height_threshold=120,\n                 width_threshold=160):\n        \n        self.set_integer(\"height\", height)\n        self.set_integer(\"width\", width)\n        self.set_integer(\"small_height_threshold\", height_threshold)\n        self.set_integer(\"small_width_threshold\", width_threshold)", "docstring": "Set the size of the chart.\n\nArgs:\nheight (int): height in pixels.\nwidth (int): width in pixels.\nheight_threshold (int): height threshold in pixels\nwidth_threshold (int): width threshold in pixesls", "source": "juraj-google-style"}
{"code": "def window_design(self, window_length, beta):\n        \n\n        self.window = np.kaiser(window_length, beta)\n\n        return self.window", "docstring": "Kaiser window design\n\nArgs:\nwindow_length: Length of the window in number of samples\nbeta: Beta value for Kaiser window design\n\nReturns:\nwindow: Window designed using the beta and length provided as inputs", "source": "juraj-google-style"}
{"code": "def __init__(self, model_data, image, env=None):\n        \n        self.model_data = model_data\n        self.image = image\n        self.env = env", "docstring": "Create a definition of a model which can be part of an Inference Pipeline\nArgs:\nmodel_data (str): The S3 location of a SageMaker model data ``.tar.gz`` file.\nimage (str): A Docker image URI.\nenv (dict[str, str]): Environment variables to run with ``image`` when hosted in SageMaker (default: None).", "source": "juraj-google-style"}
{"code": "def load_hdf5(path):\n    \n\n    with h5py.File(path, 'r') as f:\n        is_sparse = f['issparse'][...]\n        if is_sparse:\n            shape = tuple(f['shape'][...])\n            data = f['data'][...]\n            indices = f['indices'][...]\n            indptr = f['indptr'][...]\n            X = sparse.csr_matrix((data, indices, indptr), shape=shape)\n        else:\n            X = f['data'][...]\n\n        y = f['target'][...]\n\n    return X, y", "docstring": "Load data from a HDF5 file.\n\nArgs:\npath (str): A path to the HDF5 format file containing data.\ndense (boolean): An optional variable indicating if the return matrix\nshould be dense.  By default, it is false.\n\nReturns:\nData matrix X and target vector y", "source": "juraj-google-style"}
{"code": "def get_version(self, id=None, endpoint=None):\n        \n        return self._call_endpoint(GET_VERSION, id=id, endpoint=endpoint)", "docstring": "Get the current version of the endpoint.\nNote: Not all endpoints currently implement this method\n\nArgs:\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"}
{"code": "def get_dimension(self, key, value, **kwargs):\n    return self._get_object_by_name(self._DIMENSION_ENDPOINT_SUFFIX, '{0}/{1}'.format(key, value), **kwargs)", "docstring": "get a dimension by key and value\n\nArgs:\nkey (string): key of the dimension\nvalue (string): value of the dimension\n\nReturns:\ndictionary of response", "source": "codesearchnet"}
{"code": "def set_checkbox_value(w, value):\n    \n    save = w.blockSignals(True)\n    try:\n        w.setChecked(bool(value))\n    finally:\n        w.blockSignals(save)", "docstring": "Sets a checkbox's \"checked\" property + signal blocking + value tolerance\n\nArgs:\nw: QCheckBox instance\nvalue: something that can be converted to a bool", "source": "juraj-google-style"}
{"code": "def minimum(self, vars_list: List[str]) -> 'TensorFluent':\n        \n        return self._aggregation_op(tf.reduce_min, self, vars_list)", "docstring": "Returns the TensorFluent for the minimum aggregation function.\n\nArgs:\nvars_list: The list of variables to be aggregated over.\n\nReturns:\nA TensorFluent wrapping the minimum aggregation function.", "source": "juraj-google-style"}
{"code": "def recover_session(self, master: str, saver: saver_lib.Saver=None, checkpoint_dir: str=None, checkpoint_filename_with_path: str=None, wait_for_checkpoint=False, max_wait_secs=7200, config=None) -> Tuple[session.Session, bool]:\n    sess, is_loaded_from_checkpoint = self._restore_checkpoint(master, saver, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path, wait_for_checkpoint=wait_for_checkpoint, max_wait_secs=max_wait_secs, config=config)\n    local_init_success, msg = self._try_run_local_init_op(sess)\n    if not is_loaded_from_checkpoint:\n        return (sess, False)\n    restoring_file = checkpoint_dir or checkpoint_filename_with_path\n    if not local_init_success:\n        logging.info('Restoring model from %s did not make model ready for local init: %s', restoring_file, msg)\n        return (sess, False)\n    is_ready, msg = self._model_ready(sess)\n    if not is_ready:\n        logging.info('Restoring model from %s did not make model ready: %s', restoring_file, msg)\n        return (sess, False)\n    logging.info('Restored model from %s', restoring_file)\n    return (sess, is_loaded_from_checkpoint)", "docstring": "Creates a `Session`, recovering if possible.\n\nCreates a new session on 'master'.  If the session is not initialized\nand can be recovered from a checkpoint, recover it.\n\nArgs:\nmaster: `String` representation of the TensorFlow master to use.\nsaver: A `Saver` object used to restore a model.\ncheckpoint_dir: Path to the checkpoint files. The latest checkpoint in the\ndir will be used to restore.\ncheckpoint_filename_with_path: Full file name path to the checkpoint file.\nwait_for_checkpoint: Whether to wait for checkpoint to become available.\nmax_wait_secs: Maximum time to wait for checkpoints to become available.\nconfig: Optional `ConfigProto` proto used to configure the session.\n\nReturns:\nA pair (sess, initialized) where 'initialized' is `True` if\nthe session could be recovered and initialized, `False` otherwise.\n\nRaises:\nValueError: If both checkpoint_dir and checkpoint_filename_with_path are\nset.", "source": "github-repos"}
{"code": "def GetCustomerIDs(client):\n  \n  \n  \n  managed_customer_service = client.GetService('ManagedCustomerService',\n                                               version='v201809')\n\n  offset = 0\n\n  \n  selector = {\n      'fields': ['CustomerId'],\n      'predicates': [{\n          'field': 'CanManageClients',\n          'operator': 'EQUALS',\n          'values': [False]\n      }],\n      'paging': {\n          'startIndex': str(offset),\n          'numberResults': str(PAGE_SIZE)\n      }\n  }\n\n  \n  queue = multiprocessing.Queue()\n  more_pages = True\n\n  while more_pages:\n    page = managed_customer_service.get(selector)\n\n    if page and 'entries' in page and page['entries']:\n      for entry in page['entries']:\n        queue.put(entry['customerId'])\n    else:\n      raise Exception('Can\\'t retrieve any customer ID.')\n    offset += PAGE_SIZE\n    selector['paging']['startIndex'] = str(offset)\n    more_pages = offset < int(page['totalNumEntries'])\n\n  return queue", "docstring": "Retrieves all CustomerIds in the account hierarchy.\n\nNote that your configuration file must specify a client_customer_id belonging\nto an AdWords manager account.\n\nArgs:\nclient: an AdWordsClient instance.\nRaises:\nException: if no CustomerIds could be found.\nReturns:\nA Queue instance containing all CustomerIds in the account hierarchy.", "source": "juraj-google-style"}
{"code": "def l1_normalize(x, dim, epsilon=1e-12, name=None):\n  \n  with tf.name_scope(name, 'l1_normalize', [x]) as scope:\n    x = tf.convert_to_tensor(x, name='x')\n    x = tf.verify_tensor_all_finite(x, 'Error at input %s' % scope)\n    x_norm = tf.maximum(tf.reduce_sum(tf.abs(x), [dim], keep_dims=True),\n                        epsilon)\n    return tf.div(x, x_norm, name=scope)", "docstring": "l1 normalizes x.\n\nArgs:\nx: The tensor to normalize.\ndim: The dimension to normalize along.\nepsilon: Lower bound on the norm, used to avoid exploding gradients as the\nnorm approaches 0.\nname: Optional name for this op.\nReturns:\nx normalized along dim.", "source": "juraj-google-style"}
{"code": "def timestamp(stamp, tolerance=150):\n    try:\n        tolerance = datetime.timedelta(0, tolerance)\n        timestamp_low = dateutil.parser.parse(stamp)\n        timestamp_high = (timestamp_low + tolerance)\n        now = datetime.datetime.now(timestamp_low.tzinfo)\n    except ValueError:\n        return False\n    return ((now >= timestamp_low) and (now <= timestamp_high))", "docstring": "Validate timestamp specified by request.\n\nSee `validate.request` for additional info.\n\nArgs:\nstamp: str. Time request was made as ISO 8601 timestamp.\ntolerance: int. Number of seconds request remains valid from timestamp.\n\nReturns\nbool: True if valid, False otherwise.", "source": "codesearchnet"}
{"code": "def datetime_from_isoformat(value: str):\n    if (sys.version_info >= (3, 7)):\n        return datetime.fromisoformat(value)\n    return datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f')", "docstring": "Return a datetime object from an isoformat string.\n\nArgs:\nvalue (str): Datetime string in isoformat.", "source": "codesearchnet"}
{"code": "def get_lang_tags(index_page):\n    \n    dom = dhtmlparser.parseString(index_page)\n\n    lang_tags = [\n        get_html_lang_tags(dom),\n        get_dc_lang_tags(dom),\n        [detect_language(dom)],\n        get_html_tag_lang_params(dom),\n    ]\n\n    return list(sorted(set(\n        SourceString(normalize(lang), source=lang.source)\n        for lang in sum(lang_tags, [])\n    )))", "docstring": "Collect informations about language of the page from HTML and Dublin core\ntags and langdetect guesses.\n\nArgs:\nindex_page (str): HTML content of the page you wish to analyze.\n\nReturns:\nlist: List of :class:`.SourceString` objects.", "source": "juraj-google-style"}
{"code": "def _ragged_tensor_binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0, axis=-1):\n    fn = functools.partial(binary_crossentropy, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis)\n    return _ragged_tensor_apply_loss(fn, y_true, y_pred)", "docstring": "Implements support for handling RaggedTensors.\n\nArgs:\ny_true: Tensor of one-hot true targets.\ny_pred: Tensor of predicted targets.\nfrom_logits: Whether `y_pred` is expected to be a logits tensor. By default,\nwe assume that `y_pred` encodes a probability distribution.\nlabel_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For\nexample, if `0.1`, use `0.1 / num_classes` for non-target labels\nand `0.9 + 0.1 / num_classes` for target labels.\naxis: Axis along which to compute crossentropy.\n\nReturns:\nBinary crossentropy loss value.\n\nExpected shape: (batch, sequence_len) with sequence_len being variable\nper batch.\nReturn shape: (batch,); returns the per batch mean of the loss values.\n\nWhen used by BinaryCrossentropy() with the default reduction\n(SUM_OVER_BATCH_SIZE), the reduction averages the per batch losses over\nthe number of batches.", "source": "github-repos"}
{"code": "def _loop_exits_early(loop):\n    loop_nodes = (astroid.For, astroid.While)\n    definition_nodes = (astroid.FunctionDef, astroid.ClassDef)\n    inner_loop_nodes = [_node for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes) if (_node != loop)]\n    return any((_node for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes) if (_get_break_loop_node(_node) not in inner_loop_nodes)))", "docstring": "Returns true if a loop may ends up in a break statement.\n\nArgs:\nloop (astroid.For, astroid.While): the loop node inspected.\n\nReturns:\nbool: True if the loop may ends up in a break statement, False otherwise.", "source": "codesearchnet"}
{"code": "def _run_dnb_normalization(self, dnb_data, sza_data):\n    dnb_data = xr.DataArray(dnb_data, dims=('y', 'x'))\n    sza_data = xr.DataArray(sza_data, dims=('y', 'x'))\n    good_mask = (~ (dnb_data.isnull() | sza_data.isnull()))\n    output_dataset = dnb_data.where(good_mask)\n    output_dataset = output_dataset.values.copy()\n    dnb_data = dnb_data.values\n    sza_data = sza_data.values\n    (day_mask, mixed_mask, night_mask) = make_day_night_masks(sza_data, good_mask.values, self.high_angle_cutoff, self.low_angle_cutoff, stepsDegrees=self.mixed_degree_step)\n    did_equalize = False\n    has_multi_times = (len(mixed_mask) > 0)\n    if day_mask.any():\n        did_equalize = True\n        if ((self.adaptive_day == 'always') or (has_multi_times and (self.adaptive_day == 'multiple'))):\n            LOG.debug('Adaptive histogram equalizing DNB day data...')\n            local_histogram_equalization(dnb_data, day_mask, valid_data_mask=good_mask.values, local_radius_px=self.day_radius_pixels, out=output_dataset)\n        else:\n            LOG.debug('Histogram equalizing DNB day data...')\n            histogram_equalization(dnb_data, day_mask, out=output_dataset)\n    if mixed_mask:\n        for mask in mixed_mask:\n            if mask.any():\n                did_equalize = True\n                if ((self.adaptive_mixed == 'always') or (has_multi_times and (self.adaptive_mixed == 'multiple'))):\n                    LOG.debug('Adaptive histogram equalizing DNB mixed data...')\n                    local_histogram_equalization(dnb_data, mask, valid_data_mask=good_mask.values, local_radius_px=self.mixed_radius_pixels, out=output_dataset)\n                else:\n                    LOG.debug('Histogram equalizing DNB mixed data...')\n                    histogram_equalization(dnb_data, day_mask, out=output_dataset)\n    if night_mask.any():\n        did_equalize = True\n        if ((self.adaptive_night == 'always') or (has_multi_times and (self.adaptive_night == 'multiple'))):\n            LOG.debug('Adaptive histogram equalizing DNB night data...')\n            local_histogram_equalization(dnb_data, night_mask, valid_data_mask=good_mask.values, local_radius_px=self.night_radius_pixels, out=output_dataset)\n        else:\n            LOG.debug('Histogram equalizing DNB night data...')\n            histogram_equalization(dnb_data, night_mask, out=output_dataset)\n    if (not did_equalize):\n        raise RuntimeError('No valid data found to histogram equalize')\n    return output_dataset", "docstring": "Scale the DNB data using a adaptive histogram equalization method.\n\nArgs:\ndnb_data (ndarray): Day/Night Band data array\nsza_data (ndarray): Solar Zenith Angle data array", "source": "codesearchnet"}
{"code": "def clarke_thermalcond(self, structure):\n        \n        nsites = structure.num_sites\n        volume = structure.volume\n        tot_mass = sum([e.atomic_mass for e in structure.species])\n        natoms = structure.composition.num_atoms\n        weight = float(structure.composition.weight)\n        avg_mass = 1.6605e-27 * tot_mass / natoms\n        mass_density = 1.6605e3 * nsites * weight / (natoms * volume)\n        return 0.87 * 1.3806e-23 * avg_mass**(-2./3.) \\\n            * mass_density**(1./6.) * self.y_mod**0.5", "docstring": "Calculates Clarke's thermal conductivity (in SI units)\n\nArgs:\nstructure: pymatgen structure object\n\nReturns: Clarke's thermal conductivity (in SI units)", "source": "juraj-google-style"}
{"code": "def valid_paths(self, *args):\n        \n        for i, path in enumerate(args, start=0):\n            cp = list(args)\n            current = cp.pop(i)\n            if current in cp:\n                raise SettingsInvalidError(\"Multiple occurences finded for \"\n                                           \"path: {}\".format(current))\n\n        return True", "docstring": "Validate that given paths are not the same.\n\nArgs:\n(string): Path to validate.\n\nRaises:\nboussole.exceptions.SettingsInvalidError: If there is more than one\noccurence of the same path.\n\nReturns:\nbool: ``True`` if paths are validated.", "source": "juraj-google-style"}
{"code": "def table_exists(client, table_reference):\n    \n    from google.cloud.exceptions import NotFound\n\n    try:\n        client.get_table(table_reference)\n        return True\n    except NotFound:\n        return False", "docstring": "Return if a table exists.\n\nArgs:\nclient (google.cloud.bigquery.client.Client):\nA client to connect to the BigQuery API.\ntable_reference (google.cloud.bigquery.table.TableReference):\nA reference to the table to look for.\n\nReturns:\nbool: ``True`` if the table exists, ``False`` otherwise.", "source": "juraj-google-style"}
{"code": "def average_over_unit_sphere(self, quad=None):\n    quad = (quad or DEFAULT_QUAD)\n    (weights, points) = (quad['weights'], quad['points'])\n    return sum([(w * self.project(n)) for (w, n) in zip(weights, points)])", "docstring": "Method for averaging the tensor projection over the unit\nwith option for custom quadrature.\n\nArgs:\nquad (dict): quadrature for integration, should be\ndictionary with \"points\" and \"weights\" keys defaults\nto quadpy.sphere.Lebedev(19) as read from file\n\nReturns:\nAverage of tensor projected into vectors on the unit sphere", "source": "codesearchnet"}
{"code": "def etm_register_write(self, register_index, value, delay=False):\n        \n        self._dll.JLINKARM_ETM_WriteReg(int(register_index), int(value), int(delay))\n        return None", "docstring": "Writes a value to an ETM register.\n\nArgs:\nself (JLink): the ``JLink`` instance.\nregister_index (int): the register to write to.\nvalue (int): the value to write to the register.\ndelay (bool): boolean specifying if the write should be buffered.\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. CamemBERT, like\nRoBERTa, does not make use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def hset(self, key, value):\n        \n        return self.r.hset(self.hash, key, value)", "docstring": "Create key/value pair in Redis.\n\nArgs:\nkey (string): The key to create in Redis.\nvalue (any): The value to store in Redis.\n\nReturns:\n(string): The response from Redis.", "source": "juraj-google-style"}
{"code": "def composite_tensor_to_variants(value, type_spec=None, name=None):\n    if not isinstance(value, composite_tensor.CompositeTensor):\n        raise TypeError(f'Expected `value` to be a CompositeTensor. Received {type(value)}.')\n    if type_spec is None:\n        type_spec = value._type_spec\n    if not type_spec.is_compatible_with(value):\n        raise ValueError(f'`type_spec` {type_spec} is not compatible with `value` {value!r}.')\n    metadata = composite_tensor_variant_pb2.CompositeTensorVariantMetadata()\n    metadata.type_spec_proto.CopyFrom(nested_structure_coder.encode_structure(type_spec).type_spec_value)\n    return gen_composite_tensor_ops.CompositeTensorVariantFromComponents(components=nest.flatten(value, expand_composites=True), metadata=metadata.SerializeToString(), name=name)", "docstring": "Encodes `value` as a scalar variant tensor.\n\nArgs:\nvalue: The `ExtensionType` value to encode.\ntype_spec: Information about the value's type that should be included in the\nencoding.\nname: Optional name for the operation.\n\nReturns:\nA Tensor with shape=`()` and dtype=`tf.variant`.\n\nRaises:\nValueError: If `type_spec` is not compatible with `value`.", "source": "github-repos"}
{"code": "def permute(self, ordering: np.ndarray, axis: int) -> None:\n    if self._file.__contains__('tiles'):\n        del self._file['tiles']\n    ordering = list(np.array(ordering).flatten())\n    self.layers._permute(ordering, axis=axis)\n    if (axis == 0):\n        self.row_attrs._permute(ordering)\n        self.row_graphs._permute(ordering)\n    if (axis == 1):\n        self.col_attrs._permute(ordering)\n        self.col_graphs._permute(ordering)", "docstring": "Permute the dataset along the indicated axis.\n\nArgs:\nordering (list of int): \tThe desired order along the axis\n\naxis (int):\t\t\t\t\tThe axis along which to permute\n\nReturns:\nNothing.", "source": "codesearchnet"}
{"code": "def generate_poisson_lineage(n_states, n_cells_per_cluster, n_genes, means=300):\n    M = (np.random.random((n_genes, n_states)) * means)\n    center = M.mean(1)\n    W = np.zeros((n_states, (n_cells_per_cluster * n_states)))\n    index = 0\n    means = np.array(([(1.0 / n_states)] * n_states))\n    for c in range(n_states):\n        for i in range(n_cells_per_cluster):\n            w = np.copy(means)\n            new_value = (w[c] + ((i * (1.0 - (1.0 / n_states))) / n_cells_per_cluster))\n            w[:] = ((1.0 - new_value) / (n_states - 1.0))\n            w[c] = new_value\n            W[(:, index)] = w\n            index += 1\n    return (M, W)", "docstring": "Generates a lineage for each state- assumes that each state has a common\nancestor.\n\nReturns:\nM - genes x clusters\nW - clusters x cells", "source": "codesearchnet"}
{"code": "def import_image_from_image(self, image, repository=None, tag=None,\n                                changes=None):\n        \n        return self.import_image(\n            image=image, repository=repository, tag=tag, changes=changes\n        )", "docstring": "Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only\nsupports importing from another image, like the ``FROM`` Dockerfile\nparameter.\n\nArgs:\nimage (str): Image name to import from\nrepository (str): The repository to create\ntag (str): The tag to apply", "source": "juraj-google-style"}
{"code": "def HasWarnings(self):\n    has_errors = self._HasAttributeContainers(self._CONTAINER_TYPE_EXTRACTION_ERROR)\n    if has_errors:\n        return True\n    return self._HasAttributeContainers(self._CONTAINER_TYPE_EXTRACTION_WARNING)", "docstring": "Determines if a store contains extraction warnings.\n\nReturns:\nbool: True if the store contains extraction warnings.", "source": "codesearchnet"}
{"code": "def util_pattern_space(time_series, lag, dim):\n    \n    n = len(time_series)\n\n    if lag * dim > n:\n        raise Exception('Result matrix exceeded size limit, try to change lag or dim.')\n    elif lag < 1:\n        raise Exception('Lag should be greater or equal to 1.')\n\n    pattern_space = np.empty((n - lag * (dim - 1), dim))\n    for i in range(n - lag * (dim - 1)):\n        for j in range(dim):\n            pattern_space[i][j] = time_series[i + j * lag]\n\n    return pattern_space", "docstring": "Create a set of sequences with given lag and dimension\n\nArgs:\ntime_series: Vector or string of the sample data\nlag: Lag between beginning of sequences\ndim: Dimension (number of patterns)\n\nReturns:\n2D array of vectors", "source": "juraj-google-style"}
{"code": "def get(self, type: Type[T], query: Mapping[str, Any]) -> T:\n        \n        LOGGER.info(\"Getting SourceHandlers for \\\"{type}\\\"\".format(type=type.__name__))\n        try:\n            handlers = self._get_types[type]\n        except KeyError:\n            try:\n                LOGGER.info(\"Building new SourceHandlers for \\\"{type}\\\"\".format(type=type.__name__))\n                handlers = self._get_handlers(type)\n            except NoConversionError:\n                handlers = None\n            self._get_types[type] = handlers\n\n        if handlers is None:\n            raise NoConversionError(\"No source can provide \\\"{type}\\\"\".format(type=type.__name__))\n\n        LOGGER.info(\"Creating new PipelineContext\")\n        context = self._new_context()\n\n        LOGGER.info(\"Querying SourceHandlers for \\\"{type}\\\"\".format(type=type.__name__))\n        for handler in handlers:\n            try:\n                return handler.get(query, context)\n            except NotFoundError:\n                pass\n\n        raise NotFoundError(\"No source returned a query result!\")", "docstring": "Gets a query from the data pipeline.\n\n1) Extracts the query the sequence of data sources.\n2) Inserts the result into the data sinks (if appropriate).\n3) Transforms the result into the requested type if it wasn't already.\n4) Inserts the transformed result into any data sinks.\n\nArgs:\nquery: The query being requested.\ncontext: The context for the extraction (mutable).\n\nReturns:\nThe requested object.", "source": "juraj-google-style"}
{"code": "def write_dirpath(dirpath, strategy):\n    if strategy is None:\n        strategy = distribute_lib.get_strategy()\n    if strategy is None:\n        return dirpath\n    if not strategy.extended._in_multi_worker_mode():\n        return dirpath\n    if strategy.extended.should_checkpoint:\n        return dirpath\n    return _get_temp_dir(dirpath, strategy)", "docstring": "Returns the writing dir that should be used to save file distributedly.\n\n`dirpath` would be created if it doesn't exist.\n\nArgs:\ndirpath: Original dirpath that would be used without distribution.\nstrategy: The tf.distribute strategy object currently used.\n\nReturns:\nThe writing dir path that should be used to save with distribution.", "source": "github-repos"}
{"code": "def determine_final_config(config_module):\n    \n    config = Config(\n        DEFAULT_LIBRARY_RC_ADDITIONS, DEFAULT_LIBRARY_RC_REPLACEMENTS,\n        DEFAULT_TEST_RC_ADDITIONS, DEFAULT_TEST_RC_REPLACEMENTS)\n\n    for field in config._fields:\n        if hasattr(config_module, field):\n            config = config._replace(**{field: getattr(config_module, field)})\n\n    return config", "docstring": "Determines the final additions and replacements.\n\nCombines the config module with the defaults.\n\nArgs:\nconfig_module: The loaded local configuration module.\n\nReturns:\nConfig: the final configuration.", "source": "juraj-google-style"}
{"code": "def count_weights(scope=None, exclude=None, graph=None):\n  \n  if scope:\n    scope = scope if scope.endswith('/') else scope + '/'\n  graph = graph or tf.get_default_graph()\n  vars_ = graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n  if scope:\n    vars_ = [var for var in vars_ if var.name.startswith(scope)]\n  if exclude:\n    exclude = re.compile(exclude)\n    vars_ = [var for var in vars_ if not exclude.match(var.name)]\n  shapes = [var.get_shape().as_list() for var in vars_]\n  return int(sum(np.prod(shape) for shape in shapes))", "docstring": "Count learnable parameters.\n\nArgs:\nscope: Restrict the count to a variable scope.\nexclude: Regex to match variable names to exclude.\ngraph: Operate on a graph other than the current default graph.\n\nReturns:\nNumber of learnable parameters as integer.", "source": "juraj-google-style"}
{"code": "def _format_collection_name(self):\n    base_uri = self._format_resource_name()\n    if (base_uri[(- 2):] == '_s'):\n        endind = 2\n    else:\n        endind = 1\n    return base_uri[:(- endind)]", "docstring": "Formats a name from Collection format\n\nCollections are of two name formats based on their actual URI\nrepresentation in the REST service.\n\n1. For cases where the actual URI of a collection is singular, for\nexample,\n\n/mgmt/tm/ltm/node\n\nThe name of the collection, as exposed to the user, will be made\nplural. For example,\n\nmgmt.tm.ltm.nodes\n\n2. For cases where the actual URI of a collection is plural, for\nexample,\n\n/mgmt/cm/shared/licensing/pools/\n\nThe name of the collection, as exposed to the user, will remain\nplural, but will have an `_s` appended to it. For example,\n\nmgmt.cm.shared.licensing.pools_s\n\nThis method is responsible for undoing the user provided plurality.\nIt ensures that the URI that is being sent to the REST service is\ncorrectly plural, or plural plus.\n\nReturns:\nA string representation of the user formatted Collection with its\nplurality identifier removed appropriately.", "source": "codesearchnet"}
{"code": "def _ParseToken(self, file_object, file_offset):\n    \n    token_type = self._ParseTokenType(file_object, file_offset)\n    token_data = None\n\n    token_data_map_name = self._DATA_TYPE_MAP_PER_TOKEN_TYPE.get(\n        token_type, None)\n    if token_data_map_name:\n      token_data_map = self._GetDataTypeMap(token_data_map_name)\n\n      token_data, _ = self._ReadStructureFromFileObject(\n          file_object, file_offset + 1, token_data_map)\n\n    return token_type, token_data", "docstring": "Parses a token.\n\nArgs:\nfile_object (dfvfs.FileIO): file-like object.\nfile_offset (int): offset of the token relative to the start of\nthe file-like object.\n\nReturns:\ntuple: containing:\nint: token type\nobject: token data or None if the token type is not supported.", "source": "juraj-google-style"}
{"code": "def register_flag_by_module_id(self, module_id, flag):\n    \n    flags_by_module_id = self.flags_by_module_id_dict()\n    flags_by_module_id.setdefault(module_id, []).append(flag)", "docstring": "Records the module that defines a specific flag.\n\nArgs:\nmodule_id: int, the ID of the Python module.\nflag: Flag, the Flag instance that is key to the module.", "source": "juraj-google-style"}
{"code": "def __init__(self, nmr_items, ctype):\n        \n        self._ctype = ctype\n        self._mot_float_dtype = None\n        self._nmr_items = nmr_items", "docstring": "Adds a private memory array of the indicated size to the kernel data elements.\n\nThis is useful if you want to have private memory arrays in kernel data structs.\n\nArgs:\nnmr_items (int): the size of the private memory array\nctype (str): the desired c-type for this local memory object, like ``int``, ``float`` or ``mot_float_type``.", "source": "juraj-google-style"}
{"code": "def handle_intermediate_response(self, item_session: ItemSession) -> Actions:\n    self._waiter.reset()\n    action = self.handle_response(item_session)\n    return action", "docstring": "Callback for successful intermediate responses.\n\nReturns:\nA value from :class:`.hook.Actions`.", "source": "codesearchnet"}
{"code": "def filter_pyfqn(cls, value, relative_to=0):\n\n    def collect_packages(element, packages):\n        parent = element.eContainer()\n        if parent:\n            collect_packages(parent, packages)\n        packages.append(element.name)\n    packages = []\n    collect_packages(value, packages)\n    if ((relative_to < 0) or (relative_to > len(packages))):\n        raise ValueError('relative_to not in range of number of packages')\n    fqn = '.'.join(packages[relative_to:])\n    if relative_to:\n        fqn = ('.' + fqn)\n    return cls.module_path_map.get(fqn, fqn)", "docstring": "Returns Python form of fully qualified name.\n\nArgs:\nrelative_to: If greater 0, the returned path is relative to the first n directories.", "source": "codesearchnet"}
{"code": "async def get_run_error(self, pipeline_uuid: str) -> str:\n    self._verify_pipeline_uuid(pipeline_uuid)\n    request = api_pb2.GetRunErrorRequest(pipeline_uuid=pipeline_uuid)\n    response = await self._stub.GetRunError(request, **self._kwargs)\n    return response.output", "docstring": "Get the error of pipeline execution.\n\nArgs:\npipeline_uuid: uuid of the pipeline\n\nReturns:\noutput: contain an error of pipeline execution", "source": "github-repos"}
{"code": "def __init__(self, rdfclass=None, **kwargs):\n    \n    super(RDFValueType, self).__init__(**kwargs)\n    self._type = self.rdfclass = rdfclass", "docstring": "An arg which must be an RDFValue.\n\nArgs:\nrdfclass: The RDFValue class that this arg must be.\n**kwargs: Passthrough to base class.", "source": "juraj-google-style"}
{"code": "def update(self, data, timeout=-1, force=False):\n        \n        uri = self.data[\"uri\"]\n        self.data = self._helper.update(data, uri=uri, timeout=timeout, force=force)\n\n        return self", "docstring": "Updates one or more attributes for a server hardware type resource.\nArgs:\ndata (dict): Object to update.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\nforce: Flag to force the operation.\nReturns:\ndict: Updated server hardware type.", "source": "juraj-google-style"}
{"code": "def repr_assist(obj, remap=None):\n    \n    if not remap:\n        remap = {}\n    data = []\n    for arg in inspect.getargspec(getattr(obj.__class__, '__init__'))[0]:\n        if arg == 'self':\n            continue\n        elif arg in remap:\n            value = remap[arg]\n        else:\n            try:\n                value = getattr(obj, arg)\n            except AttributeError:\n                value = getattr(obj, '_%s' % arg)\n        if isinstance(value, (type(None), list, basestring, datetime.date,\n                              datetime.time)):\n            data.append(repr(value))\n        else:\n            data.append(str(value))\n    return \"%s(%s)\" % (obj.__class__.__name__, ', '.join(data))", "docstring": "Helper function to simplify ``__repr__`` methods.\n\nArgs:\nobj: Object to pull argument values for\nremap (dict): Argument pairs to remap before output\n\nReturns:\nstr: Self-documenting representation of ``value``", "source": "juraj-google-style"}
{"code": "def ask_stories(self, raw=False, limit=None):\n        \n        ask_stories = self._get_stories('askstories', limit)\n        if raw:\n            ask_stories = [story.raw for story in ask_stories]\n        return ask_stories", "docstring": "Returns list of item ids of latest Ask HN stories\n\nArgs:\nlimit (int): specifies the number of stories to be returned.\nraw (bool): Flag to indicate whether to transform all\nobjects into raw json.\n\nReturns:\n`list` object containing ids of Ask HN stories.", "source": "juraj-google-style"}
{"code": "def set(self, namespace, key, value, description=None):\n        \n\n        if isinstance(value, DBCChoice):\n            vtype = 'choice'\n\n        elif isinstance(value, DBCString):\n            vtype = 'string'\n\n        elif isinstance(value, DBCFloat):\n            vtype = 'float'\n\n        elif isinstance(value, DBCInt):\n            vtype = 'int'\n\n        elif isinstance(value, DBCArray):\n            vtype = 'array'\n\n        elif isinstance(value, DBCJSON):\n            vtype = 'json'\n\n        elif isinstance(value, bool):\n            vtype = 'bool'\n\n        else:\n            raise ValueError('Invalid config item type: {}'.format(type(value)))\n\n        if namespace in self.__data and key in self.__data[namespace]:\n            itm = db.ConfigItem.find_one(\n                ConfigItem.namespace_prefix == namespace,\n                ConfigItem.key == key\n            )\n\n            if not itm:\n                raise KeyError(key)\n\n            itm.value = value\n            itm.type = vtype\n            if description:\n                itm.description = description\n        else:\n            itm = ConfigItem()\n            itm.key = key\n            itm.value = value\n            itm.type = vtype\n            itm.description = description\n            itm.namespace_prefix = namespace\n\n        db.session.add(itm)\n        db.session.commit()\n\n        if namespace in self.__data:\n            self.__data[namespace][key] = value\n        else:\n            self.__data[namespace] = {key: value}", "docstring": "Set (create/update) a configuration item\n\nArgs:\nnamespace (`str`): Namespace for the item\nkey (`str`): Key of the item\nvalue (`Any`): Value of the type, must by one of `DBCString`, `DBCFloat`, `DBCInt`, `DBCArray`, `DBCJSON` or\n`bool`\ndescription (`str`): Description of the configuration item\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def CopyFromStringTuple(self, time_elements_tuple):\n    \n    if len(time_elements_tuple) < 7:\n      raise ValueError((\n          'Invalid time elements tuple at least 7 elements required,'\n          'got: {0:d}').format(len(time_elements_tuple)))\n\n    year, month, day_of_month, hours, minutes, seconds, milliseconds = (\n        time_elements_tuple)\n\n    try:\n      milliseconds = int(milliseconds, 10)\n    except (TypeError, ValueError):\n      raise ValueError('Invalid millisecond value: {0!s}'.format(milliseconds))\n\n    if milliseconds < 0 or milliseconds >= definitions.MILLISECONDS_PER_SECOND:\n      raise ValueError('Invalid number of milliseconds.')\n\n    fraction_of_second = (\n        decimal.Decimal(milliseconds) / definitions.MILLISECONDS_PER_SECOND)\n\n    time_elements_tuple = (\n        year, month, day_of_month, hours, minutes, seconds,\n        str(fraction_of_second))\n\n    super(TimeElementsInMilliseconds, self).CopyFromStringTuple(\n        time_elements_tuple)", "docstring": "Copies time elements from string-based time elements tuple.\n\nArgs:\ntime_elements_tuple (Optional[tuple[str, str, str, str, str, str, str]]):\ntime elements, contains year, month, day of month, hours, minutes,\nseconds and milliseconds.\n\nRaises:\nValueError: if the time elements tuple is invalid.", "source": "juraj-google-style"}
{"code": "def __init__(self, parameter, value, valid_values=None):\n        \n        \n        msg = 'Invalid value \"{value}\" supplied to {parameter}.'.format(\n            parameter=parameter, value=value)\n        if valid_values:\n            msg += ' Valid options are: {}'.format(', '.join(valid_values))\n        super(InvalidCliValueError, self).__init__(msg)", "docstring": "Instantiate the exception with a descriptive message.\n\nArgs:\nparameter: The CLI parameter with the invalid value.\nvalue: The invalid value passed to the CLI parameter.\nvalid_values: The values that would have been accepted by the\nparameter.", "source": "juraj-google-style"}
{"code": "def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):\n    vision_data = {}\n    if image_sizes is not None:\n        num_image_tokens = [self.image_seq_length + 2] * len(image_sizes)\n        num_image_patches = [1] * len(image_sizes)\n        vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})\n    return MultiModalData(**vision_data)", "docstring": "Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.\n\nArgs:\nimage_sizes (`List[List[int]]`, *optional*):\nThe input sizes formatted as (height, width) per each image.\n\nReturns:\n`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided\ninput modalities, along with other useful data.", "source": "github-repos"}
{"code": "def as_signature_def(self, receiver_tensors):\n    pass", "docstring": "Generate a SignatureDef proto for inclusion in a MetaGraphDef.\n\nThe SignatureDef will specify outputs as described in this ExportOutput,\nand will use the provided receiver_tensors as inputs.\n\nArgs:\nreceiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying\ninput nodes that will be fed.", "source": "github-repos"}
{"code": "def get_product_version(path: typing.Union[str, Path]) -> VersionInfo:\n    \n    path = Path(path).absolute()\n    pe_info = pefile.PE(str(path))\n\n    try:\n        for file_info in pe_info.FileInfo:  \n            if isinstance(file_info, list):\n                result = _parse_file_info(file_info)\n                if result:\n                    return result\n            else:\n                result = _parse_file_info(pe_info.FileInfo)\n                if result:\n                    return result\n\n        raise RuntimeError(f'unable to obtain version from {path}')\n    except (KeyError, AttributeError) as exc:\n        traceback.print_exc()\n        raise RuntimeError(f'unable to obtain version from {path}') from exc", "docstring": "Get version info from executable\n\nArgs:\npath: path to the executable\n\nReturns: VersionInfo", "source": "juraj-google-style"}
{"code": "def binary_guesser(handle, num_bytes=512):\n    text_chars = (''.join(map(chr, range(32, 127))) + '\\n\\r\\t\\x08')\n    byte_chars = text_chars.encode()\n    handle_location = handle.tell()\n    first_block = handle.read(num_bytes)\n    if (type(first_block) is str):\n        first_block = first_block.encode()\n    filtered_block = first_block.translate(None, delete=byte_chars)\n    handle.seek(handle_location)\n    if ((float(len(filtered_block)) / float(len(first_block))) > 0.3):\n        pass\n    else:\n        msg = '{0} is probably not a binary file'.format(handle.name)\n        raise FormatError(message=msg)", "docstring": "Raise error if file not likely binary\n\nGuesses if a file is binary, raises error if file is not likely binary,\nthen returns to location in file when handle passed to binary_guesser.\n\nArgs:\nhandle (file): File handle of file thought to be binary\n\nnum_bytes (int): Bytes of file to read to guess binary, more bytes\nis often better but takes longer\n\nRaises:\nFormatError: Error raised if file is not likely binary\n\nExample:\nThe following example demonstrate how to use binary_guesser.\nNote: These doctests will not pass, examples are only in doctest\nformat as per convention. bio_utils uses pytests for testing.\n\n>>> binary_guesser(open('test.binary'))", "source": "codesearchnet"}
{"code": "def range(self, start_row=0, max_rows=None):\n    fetcher = self._get_row_fetcher(start_row=start_row, max_rows=max_rows)\n    return iter(datalab.utils.Iterator(fetcher))", "docstring": "Get an iterator to iterate through a set of table rows.\n\nArgs:\nstart_row: the row of the table at which to start the iteration (default 0)\nmax_rows: an upper limit on the number of rows to iterate through (default None)\n\nReturns:\nA row iterator.", "source": "codesearchnet"}
{"code": "def fib(n):\n    \n    assert n > 0\n    a, b = 1, 1\n    for i in range(n - 1):\n        a, b = b, a + b\n    return a", "docstring": "Fibonacci example function\n\nArgs:\nn (int): integer\n\nReturns:\nint: n-th Fibonacci number", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.Range = channel.unary_unary(\n        '/etcdserverpb.KV/Range',\n        request_serializer=rpc__pb2.RangeRequest.SerializeToString,\n        response_deserializer=rpc__pb2.RangeResponse.FromString,\n        )\n    self.Put = channel.unary_unary(\n        '/etcdserverpb.KV/Put',\n        request_serializer=rpc__pb2.PutRequest.SerializeToString,\n        response_deserializer=rpc__pb2.PutResponse.FromString,\n        )\n    self.DeleteRange = channel.unary_unary(\n        '/etcdserverpb.KV/DeleteRange',\n        request_serializer=rpc__pb2.DeleteRangeRequest.SerializeToString,\n        response_deserializer=rpc__pb2.DeleteRangeResponse.FromString,\n        )\n    self.Txn = channel.unary_unary(\n        '/etcdserverpb.KV/Txn',\n        request_serializer=rpc__pb2.TxnRequest.SerializeToString,\n        response_deserializer=rpc__pb2.TxnResponse.FromString,\n        )\n    self.Compact = channel.unary_unary(\n        '/etcdserverpb.KV/Compact',\n        request_serializer=rpc__pb2.CompactionRequest.SerializeToString,\n        response_deserializer=rpc__pb2.CompactionResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def set_status(self, status):\n        \n\n        text = \"\"\n        colour = \"\n        if status == 0:\n            text = \"OFFLINE\"\n            colour = \"\n        elif status == 1:\n            text = \"STARTING\"\n            colour = \"\n        elif status == 2:\n            text = \"ONLINE\"\n            colour = \"\n\n        self.status.set(text)\n        self.statusbar.config(background=colour)", "docstring": "Updates the status text\n\nArgs:\nstatus (int): The offline/starting/online status of Modis\n0: offline, 1: starting, 2: online", "source": "juraj-google-style"}
{"code": "def get_registered_name(obj):\n    if obj in _GLOBAL_CUSTOM_NAMES:\n        return _GLOBAL_CUSTOM_NAMES[obj]\n    else:\n        return obj.__name__", "docstring": "Returns the name registered to an object within the Keras framework.\n\nThis function is part of the Keras serialization and deserialization\nframework. It maps objects to the string names associated with those objects\nfor serialization/deserialization.\n\nArgs:\nobj: The object to look up.\n\nReturns:\nThe name associated with the object, or the default Python name if the\nobject is not registered.", "source": "github-repos"}
{"code": "def _check_model_use_buffer_offset(model_object):\n    if not model_object.metadata:\n        return False\n    for meta in model_object.metadata:\n        if meta.name.decode('utf-8') == 'buffer_location':\n            return True\n    return False", "docstring": "Checks if a model object uses buffer offsets to store constant buffers.\n\nArgs:\nmodel_object: tflite model, a python object\n\nReturns:\nTrue of the model_object has the metadata entry \"buffer_location\"\nFalse otherwise", "source": "github-repos"}
{"code": "def from_parquet(path: str, timestamps: str='timestamp', indexes: Optional[List[str]]=None, **kwargs) -> EventSet:\n    import pandas as pd\n    if indexes is None:\n        indexes = []\n    df = pd.read_parquet(path, **kwargs)\n    return from_pandas(df, indexes=indexes, timestamps=timestamps)", "docstring": "Reads an [`EventSet`][temporian.EventSet] from a parquet file.\n\nExample:\n```python\n>>> temp_file = str(tmp_dir / \"temporal_data.parquet\")\n>>> og_eventset = tp.event_set(timestamps=[1,], features={\"f1\": [0.1]})\n>>> tp.to_parquet(og_eventset, temp_file)\n>>> evset = tp.from_parquet(temp_file)\n>>> evset\nindexes: []\nfeatures: [('f1', float64)]\nevents:\n(1 events):\ntimestamps: [1.]\n'f1': [0.1]\n...\n\n```\n\nArgs:\npath: Path to the file.\ntimestamps: Name of the column to be used as timestamps for the\nEventSet.\nindexes: Names of the columns to be used as indexes for the EventSet.\nIf None, a flat EventSet will be created.\n\nReturns:\nEventSet read from file.", "source": "github-repos"}
{"code": "def load_json(filename, **kwargs):\n    with open(filename, 'r', encoding='utf-8') as f:\n        return json.load(f, **kwargs)", "docstring": "Load a JSON object from the specified file.\n\nArgs:\nfilename: Path to the input JSON file.\n**kwargs: Additional arguments to `json.load`.\n\nReturns:\nThe object deserialized from JSON.", "source": "codesearchnet"}
{"code": "def parse_url(cls, string):\n    match = cls.URL_RE.match(string)\n    if (not match):\n        raise InvalidKeyError(cls, string)\n    return match.groupdict()", "docstring": "If it can be parsed as a version_guid with no preceding org + offering, returns a dict\nwith key 'version_guid' and the value,\n\nIf it can be parsed as a org + offering, returns a dict\nwith key 'id' and optional keys 'branch' and 'version_guid'.\n\nRaises:\nInvalidKeyError: if string cannot be parsed -or- string ends with a newline.", "source": "codesearchnet"}
{"code": "def _randomize_speed(base_speed: int, sigma: int=None) -> int:\n    if (sigma is None):\n        int_sigma = int((base_speed / 4))\n    else:\n        int_sigma = sigma\n    val = MissionWeather._gauss(base_speed, int_sigma)\n    if (val < 0):\n        return 0\n    return min(val, 50)", "docstring": "Creates a variation in wind speed\n\nArgs:\nbase_speed: base wind speed\nsigma: sigma value for gaussian variation\n\nReturns: random wind speed", "source": "codesearchnet"}
{"code": "def run_inference(self, batch: Sequence[Union[tf.Tensor, torch.Tensor]], model: Union[AutoModel, TFAutoModel], inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n    inference_args = {} if not inference_args else inference_args\n    if not self._framework:\n        if isinstance(batch[0], tf.Tensor):\n            self._framework = 'tf'\n        else:\n            self._framework = 'pt'\n    if self._framework == 'pt' and self._device == 'GPU' and is_gpu_available_torch():\n        model.to(torch.device('cuda'))\n    if self._inference_fn:\n        return self._inference_fn(batch, model, inference_args, inference_args, self._model_uri)\n    if self._framework == 'tf':\n        return _default_inference_fn_tensorflow(batch, model, self._device, inference_args, self._model_uri)\n    else:\n        return _default_inference_fn_torch(batch, model, self._device, inference_args, self._model_uri)", "docstring": "Runs inferences on a batch of Tensors and returns an Iterable of\nTensors Predictions.\n\nThis method stacks the list of Tensors in a vectorized format to optimize\nthe inference call.\n\nArgs:\nbatch: A sequence of Tensors. These Tensors should be batchable, as\nthis method will call `tf.stack()`/`torch.stack()` and pass in\nbatched Tensors with dimensions (batch_size, n_features, etc.)\ninto the model's predict() function.\nmodel: A Tensorflow/PyTorch model.\ninference_args (dict[str, Any]): Non-batchable arguments required as\ninputs to the model's inference function. Unlike Tensors in `batch`,\nthese parameters will not be dynamically batched.\n\nReturns:\nAn Iterable of type PredictionResult.", "source": "github-repos"}
{"code": "def best_training_job(self):\n    self._ensure_last_tuning_job()\n    tuning_job_describe_result = self.estimator.sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job(HyperParameterTuningJobName=self.latest_tuning_job.name)\n    try:\n        return tuning_job_describe_result['BestTrainingJob']['TrainingJobName']\n    except KeyError:\n        raise Exception('Best training job not available for tuning job: {}'.format(self.latest_tuning_job.name))", "docstring": "Return name of the best training job for the latest hyperparameter tuning job.\n\nRaises:\nException: If there is no best training job available for the hyperparameter tuning job.", "source": "codesearchnet"}
{"code": "def _ffn_layer_multi_inputs(inputs_list, hparams, ffn_layer_type='dense', name='ffn', kernel_initializer=None, bias_initializer=None, activation=None, pad_remover=None, preprocess=False, postprocess=False):\n    num_inputs = len(inputs_list)\n    assert (num_inputs > 0)\n    if (preprocess and (num_inputs == 1)):\n        inputs_list[0] = common_layers.layer_preprocess(inputs_list[0], hparams)\n    if postprocess:\n        original_inputs = inputs_list[0]\n    main_input = inputs_list[0]\n    original_shape = common_layers.shape_list(main_input)\n    assert (hparams.hidden_size == common_layers.shape_list(main_input)[(- 1)])\n    for inputs in inputs_list:\n        main_input.get_shape().assert_is_compatible_with(inputs.get_shape())\n\n    def remove_pads(x):\n        original_shape = common_layers.shape_list(x)\n        x = tf.reshape(x, tf.concat([[(- 1)], original_shape[2:]], axis=0))\n        x = tf.expand_dims(pad_remover.remove(x), axis=0)\n        return x\n    if pad_remover:\n        for (i, inputs) in enumerate(inputs_list):\n            inputs_list[i] = remove_pads(inputs)\n    ffn_inputs = inputs_list[0]\n    if (len(inputs_list) != 1):\n        ffn_inputs = tf.concat(inputs_list, axis=(- 1))\n    if (ffn_layer_type == 'dense'):\n        output = common_layers.dense(ffn_inputs, hparams.hidden_size, name=name, activation=activation, use_bias=True, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)\n    elif (ffn_layer_type == 'dense_dropconnect'):\n        output = common_layers.dense_dropconnect(ffn_inputs, hparams.hidden_size, name=name, dropconnect_dropout=hparams.dropconnect_dropout, output_activation=activation)\n        postprocess = False\n    elif (ffn_layer_type == 'dense_relu_dense'):\n        output = common_layers.dense_relu_dense(ffn_inputs, hparams.filter_size, hparams.hidden_size, name=name, dropout=hparams.relu_dropout, output_activation=activation)\n    else:\n        raise ValueError(('Unknown ffn_layer type: %s' % ffn_layer_type))\n    if pad_remover:\n        output = tf.reshape(pad_remover.restore(tf.squeeze(output, axis=0)), original_shape)\n    if postprocess:\n        if (num_inputs == 1):\n            output = common_layers.layer_postprocess(original_inputs, output, hparams)\n        else:\n            hp = copy.copy(hparams)\n            hp.layer_postprocess_sequence = hp.layer_postprocess_sequence.replace('a', '')\n            output = common_layers.layer_postprocess(original_inputs, output, hp)\n    return output", "docstring": "Implements a Feed-forward layer with multiple inputs, pad-removing, etc.\n\nArgs:\ninputs_list: list of input tensors\nhparams: hyper-parameters\nffn_layer_type: dense / dense_dropconnect/ dense_relu_dense\nname: name\nkernel_initializer: kernel initializer\nbias_initializer: bias initializer\nactivation: activation function\npad_remover: pad remover\npreprocess: if preprocess the input\npostprocess: if postprocess the output\n\nReturns:\na tensor\nRaises:\nValueError: Unknown ffn_layer type.", "source": "codesearchnet"}
{"code": "def from_json(json):\n    return Point(lat=json['lat'], lon=json['lon'], time=isostr_to_datetime(json['time']))", "docstring": "Creates Point instance from JSON representation\n\nArgs:\njson (:obj:`dict`): Must have at least the following keys: lat (float), lon (float),\ntime (string in iso format). Example,\n{\n\"lat\": 9.3470298,\n\"lon\": 3.79274,\n\"time\": \"2016-07-15T15:27:53.574110\"\n}\njson: map representation of Point instance\nReturns:\n:obj:`Point`", "source": "codesearchnet"}
{"code": "def has_current_path(self, path, **kwargs):\n    try:\n        return self.assert_current_path(path, **kwargs)\n    except ExpectationNotMet:\n        return False", "docstring": "Checks if the page has the given path.\n\nArgs:\npath (str | RegexObject): The string or regex that the current \"path\" should match.\n**kwargs: Arbitrary keyword arguments for :class:`CurrentPathQuery`.\n\nReturns:\nbool: Whether it matches.", "source": "codesearchnet"}
{"code": "def make_parts_for(self, field_name, field_data):\n        \n        typ = field_data.field_type\n        subtyp = field_data.field_subtype\n\n        if typ in (\"read\", \"xadc\"):\n            writeable = False\n        else:\n            writeable = True\n\n        if typ == \"time\" or typ in (\"param\", \"read\") and subtyp == \"time\":\n            self._make_time_parts(field_name, field_data, writeable)\n        elif typ == \"write\" and subtyp == \"action\":\n            self._make_action_part(field_name, field_data)\n        elif typ in (\"param\", \"read\", \"write\", \"xadc\"):\n            self._make_param_part(field_name, field_data, writeable)\n        elif typ == \"bit_out\":\n            self._make_out(field_name, field_data, \"bit\")\n        elif typ == \"pos_out\":\n            self._make_out(field_name, field_data, \"pos\")\n            self._make_scale_offset(field_name)\n            self._make_out_capture(field_name, field_data)\n        elif typ == \"ext_out\":\n            self._make_out_capture(field_name, field_data)\n        elif typ == \"bit_mux\":\n            self._make_mux(field_name, field_data, \"bit\")\n            self._make_mux_delay(field_name)\n        elif typ == \"pos_mux\":\n            self._make_mux(field_name, field_data, \"pos\")\n        elif typ == \"table\":\n            self._make_table(field_name, field_data)\n        else:\n            raise ValueError(\"Unknown type %r subtype %r\" % (typ, subtyp))", "docstring": "Create the relevant parts for this field\n\nArgs:\nfield_name (str): Short field name, e.g. VAL\nfield_data (FieldData): Field data object", "source": "juraj-google-style"}
{"code": "def create_remoteckan(cls, site_url, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, session=None, **kwargs):\n    if (not session):\n        session = get_session(user_agent, user_agent_config_yaml, user_agent_lookup, prefix=Configuration.prefix, method_whitelist=frozenset(['HEAD', 'TRACE', 'GET', 'POST', 'PUT', 'OPTIONS', 'DELETE']), **kwargs)\n        ua = session.headers['User-Agent']\n    else:\n        ua = kwargs.get('full_agent')\n        if (not ua):\n            ua = UserAgent.get(user_agent, user_agent_config_yaml, user_agent_lookup, prefix=Configuration.prefix, **kwargs)\n    return ckanapi.RemoteCKAN(site_url, user_agent=ua, session=session)", "docstring": "Create remote CKAN instance from configuration\n\nArgs:\nsite_url (str): Site url.\nuser_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed.\nuser_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml.\nuser_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied.\nsession (requests.Session): requests Session object to use. Defaults to calling hdx.utilities.session.get_session()\n\nReturns:\nckanapi.RemoteCKAN: Remote CKAN instance", "source": "codesearchnet"}
{"code": "def set_attribute(self, attribute: str, value: Union[(ScalarType, yaml.Node)]) -> None:\n    start_mark = StreamMark('generated node', 0, 0, 0)\n    end_mark = StreamMark('generated node', 0, 0, 0)\n    if isinstance(value, str):\n        value_node = yaml.ScalarNode('tag:yaml.org,2002:str', value, start_mark, end_mark)\n    elif isinstance(value, bool):\n        value_str = ('true' if value else 'false')\n        value_node = yaml.ScalarNode('tag:yaml.org,2002:bool', value_str, start_mark, end_mark)\n    elif isinstance(value, int):\n        value_node = yaml.ScalarNode('tag:yaml.org,2002:int', str(value), start_mark, end_mark)\n    elif isinstance(value, float):\n        value_node = yaml.ScalarNode('tag:yaml.org,2002:float', str(value), start_mark, end_mark)\n    elif (value is None):\n        value_node = yaml.ScalarNode('tag:yaml.org,2002:null', '', start_mark, end_mark)\n    elif isinstance(value, yaml.Node):\n        value_node = value\n    else:\n        raise TypeError('Invalid kind of value passed to set_attribute()')\n    attr_index = self.__attr_index(attribute)\n    if (attr_index is not None):\n        key_node = self.yaml_node.value[attr_index][0]\n        self.yaml_node.value[attr_index] = (key_node, value_node)\n    else:\n        key_node = yaml.ScalarNode('tag:yaml.org,2002:str', attribute, start_mark, end_mark)\n        self.yaml_node.value.append((key_node, value_node))", "docstring": "Sets the attribute to the given value.\n\nUse only if is_mapping() returns True.\n\nIf the attribute does not exist, this adds a new attribute, \\\nif it does, it will be overwritten.\n\nIf value is a str, int, float, bool or None, the attribute will \\\nbe set to this value. If you want to set the value to a list or \\\ndict containing other values, build a yaml.Node and pass it here.\n\nArgs:\nattribute: Name of the attribute whose value to change.\nvalue: The value to set.", "source": "codesearchnet"}
{"code": "async def set_start_date(self, date: str, time: str, check_in_duration: int = None):\n        \n        date_time = datetime.strptime(date + ' ' + time, '%Y/%m/%d %H:%M')\n        res = await self.connection('PUT',\n                                    'tournaments/{}'.format(self._id),\n                                    'tournament',\n                                    start_at=date_time,\n                                    check_in_duration=check_in_duration or 0)\n        self._refresh_from_json(res)", "docstring": "set the tournament start date (and check in duration)\n\n|methcoro|\n\nArgs:\ndate: fomatted date as YYYY/MM/DD (2017/02/14)\ntime: fromatted time as HH:MM (20:15)\ncheck_in_duration (optional): duration in minutes\n\nRaises:\nAPIException", "source": "juraj-google-style"}
{"code": "def get_densities(self, spin=None):\n        \n        if self.densities is None:\n            result = None\n        elif spin is None:\n            if Spin.down in self.densities:\n                result = self.densities[Spin.up] + self.densities[Spin.down]\n            else:\n                result = self.densities[Spin.up]\n        else:\n            result = self.densities[spin]\n        return result", "docstring": "Returns the density of states for a particular spin.\n\nArgs:\nspin: Spin\n\nReturns:\nReturns the density of states for a particular spin. If Spin is\nNone, the sum of all spins is returned.", "source": "juraj-google-style"}
{"code": "def clear_values(self, red=0.0, green=0.0, blue=0.0, alpha=0.0, depth=1.0):\n        \n        self.clear_color = (red, green, blue, alpha)\n        self.clear_depth = depth", "docstring": "Sets the clear values for the window buffer.\n\nArgs:\nred (float): red compoent\ngreen (float): green compoent\nblue (float): blue compoent\nalpha (float): alpha compoent\ndepth (float): depth value", "source": "juraj-google-style"}
{"code": "def ParseLines(self, input_lines):\n    current_macro = None\n    for line in input_lines:\n        if line.startswith('PDDM-'):\n            directive = line.split(' ', 1)[0]\n            if (directive == 'PDDM-DEFINE'):\n                (name, args) = self._ParseDefineLine(line)\n                if self._macros.get(name):\n                    raise PDDMError(('Attempt to redefine macro: \"%s\"' % line))\n                current_macro = self.MacroDefinition(name, args)\n                self._macros[name] = current_macro\n                continue\n            if (directive == 'PDDM-DEFINE-END'):\n                if (not current_macro):\n                    raise PDDMError(('Got DEFINE-END directive without an active macro: \"%s\"' % line))\n                current_macro = None\n                continue\n            raise PDDMError(('Hit a line with an unknown directive: \"%s\"' % line))\n        if current_macro:\n            current_macro.AppendLine(line)\n            continue\n        if (line.strip() == ''):\n            continue\n        raise PDDMError(('Hit a line that wasn\\'t a directive and no open macro definition: \"%s\"' % line))", "docstring": "Parses list of lines.\n\nArgs:\ninput_lines: A list of strings of input to parse (no newlines on the\nstrings).\n\nRaises:\nPDDMError if there are any issues.", "source": "codesearchnet"}
{"code": "def _ParseLogonApplications(self, parser_mediator, registry_key):\n    \n    for application in self._LOGON_APPLICATIONS:\n      command_value = registry_key.GetValueByName(application)\n      if not command_value:\n        continue\n\n      values_dict = {\n          'Application': application,\n          'Command': command_value.GetDataAsObject(),\n          'Trigger': 'Logon'}\n\n      event_data = windows_events.WindowsRegistryEventData()\n      event_data.key_path = registry_key.path\n      event_data.offset = registry_key.offset\n      event_data.regvalue = values_dict\n      event_data.source_append = ': Winlogon'\n\n      event = time_events.DateTimeValuesEvent(\n          registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses the registered logon applications.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def regression_errors(y, y_hat, smoothing_window=0.01, smooth=True):\n    \n    errors = np.abs(y - y_hat)[:, 0]\n\n    if not smooth:\n        return errors\n\n    smoothing_window = int(smoothing_window * len(y))\n\n    return pd.Series(errors).ewm(span=smoothing_window).mean().values", "docstring": "Compute an array of absolute errors comparing predictions and expected output.\n\nIf smooth is True, apply EWMA to the resulting array of errors.\n\nArgs:\ny (array): Ground truth.\ny_hat (array): Predictions array.\nsmoothing_window (float): Size of the smoothing window, expressed as a proportion\nof the total length of y.\nsmooth (bool): whether the returned errors should be smoothed with EWMA.\n\nReturns:\n(array): errors", "source": "juraj-google-style"}
{"code": "def do_ams_delete(endpoint, path, access_token):\n    \n    headers = {\"DataServiceVersion\": dsversion_min,\n               \"MaxDataServiceVersion\": dsversion_max,\n               \"Accept\": json_acceptformat,\n               \"Accept-Charset\" : charset,\n               \"Authorization\": 'Bearer ' + access_token,\n               \"x-ms-version\" : xmsversion}\n    response = requests.delete(endpoint, headers=headers, allow_redirects=False)\n    \n    \n    if response.status_code == 301:\n        redirected_url = ''.join([response.headers['location'], path])\n        response = requests.delete(redirected_url, headers=headers)\n    return response", "docstring": "Do a AMS DELETE request and return JSON.\nArgs:\nendpoint (str): Azure Media Services Initial Endpoint.\npath (str): Azure Media Services Endpoint Path.\naccess_token (str): A valid Azure authentication token.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def _validate_path(path):\n    if (not path):\n        raise ValueError('Path is empty')\n    if (not isinstance(path, basestring)):\n        raise TypeError(('Path should be a string but is %s (%s).' % (path.__class__, path)))", "docstring": "Basic validation of Google Storage paths.\n\nArgs:\npath: a Google Storage path. It should have form '/bucket/filename'\nor '/bucket'.\n\nRaises:\nValueError: if path is invalid.\nTypeError: if path is not of type basestring.", "source": "codesearchnet"}
{"code": "def create(self, **kwargs):\n    resource = self.resource.create(kwargs)\n    if ('admin_token' in kwargs):\n        resource.context.authorize('Gem-Application', api_token=resource.api_token, admin_token=kwargs['admin_token'])\n    app = self.wrap(resource)\n    return self.add(app)", "docstring": "Create a new Application.\n\nArgs:\n**kwargs: Arbitrary keyword arguments, including:\nname (str): A name for the new Application.\n\nReturns:\nA round.Application object if successful.", "source": "codesearchnet"}
{"code": "def signature(array):\n    length = len(array)\n    index = (_NUM_SIGNATURE_BYTES if (length > _NUM_SIGNATURE_BYTES) else length)\n    return array[:index]", "docstring": "Returns the first 262 bytes of the given bytearray\nas part of the file header signature.\n\nArgs:\narray: bytearray to extract the header signature.\n\nReturns:\nFirst 262 bytes of the file content as bytearray type.", "source": "codesearchnet"}
{"code": "def scoped_state(self, name_context: Union[str, 'common.NameContext'], state_name: str, io_target=None, metrics_container: Optional['MetricsContainer']=None) -> statesampler_impl.ScopedState:\n    if not isinstance(name_context, common.NameContext):\n        name_context = common.NameContext(name_context)\n    counter_name = CounterName(state_name + '-msecs', stage_name=self._prefix, step_name=name_context.metrics_name(), io_target=io_target)\n    if counter_name in self._states_by_name:\n        return self._states_by_name[counter_name]\n    else:\n        output_counter = self._counter_factory.get_counter(counter_name, Counter.SUM)\n        self._states_by_name[counter_name] = super()._scoped_state(counter_name, name_context, output_counter, metrics_container)\n        return self._states_by_name[counter_name]", "docstring": "Returns a ScopedState object associated to a Step and a State.\n\nArgs:\nname_context: common.NameContext. It is the step name information.\nstate_name: str. It is the state name (e.g. process / start / finish).\nio_target:\nmetrics_container: MetricsContainer. The step's metrics container.\n\nReturns:\nA ScopedState that keeps the execution context and is able to switch it\nfor the execution thread.", "source": "github-repos"}
{"code": "def _run_graph(self, device, dtype, data_format, input_shape, filter_shape, strides, padding, num_iters, warmup_iters):\n    graph = ops.Graph()\n    with graph.as_default():\n        warmup_outputs, outputs = build_graph(device, dtype, data_format, input_shape, filter_shape, strides, padding, num_iters, warmup_iters)\n        config = config_pb2.ConfigProto()\n        config.graph_options.optimizer_options.opt_level = -1\n        rewrite_options = config.graph_options.rewrite_options\n        rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON if FLAGS.enable_layout_optimizer else rewriter_config_pb2.RewriterConfig.OFF\n        rewrite_options.dependency_optimization = rewriter_config_pb2.RewriterConfig.OFF\n        with session_lib.Session(graph=graph, config=config) as session:\n            variables.global_variables_initializer().run()\n            session.run(warmup_outputs)\n            start_time = time.time()\n            session.run(outputs)\n            duration = (time.time() - start_time) / num_iters\n            print('%s %s %s inputshape:%s filtershape:%s strides:%s padding:%s %d iters: %.8f sec' % (device, str(dtype), data_format, str(input_shape).replace(' ', ''), str(filter_shape).replace(' ', ''), str(strides).replace(' ', ''), padding, num_iters, duration))\n    name_template = 'conv2d_{device}_{datatype}_{data_format}_input_shape_{inputshape}_filter_shape_{filtershape}_strides_{strides}_padding_{padding}'\n    self.report_benchmark(name=name_template.format(device=device, datatype=str(dtype), data_format=str(data_format), inputshape=str(input_shape).replace(' ', ''), filtershape=str(filter_shape).replace(' ', ''), strides=str(strides).replace(' ', ''), padding=padding).replace(' ', ''), iters=num_iters, wall_time=duration)\n    return duration", "docstring": "runs the graph and print its execution time.\n\nArgs:\ndevice: String, the device to run on.\ndtype: Data type for the convolution.\ndata_format: A string from: \"NHWC\" or \"NCHW\". Data format for input and\noutput data.\ninput_shape: Shape of the input tensor.\nfilter_shape: Shape of the filter tensor.\nstrides: A list of ints. 1-D of length 4. The stride of sliding\nwindow for each dimension of input.\npadding: A string from: \"SAME\", \"VALID\". The type of padding\nalgorithm to use.  num_iters: Number of iterations to run the\nbenchmark.\nnum_iters: number of iterations to run conv2d.\nwarmup_iters: number of iterations for warmup runs.\n\nReturns:\nThe duration of the run in seconds.", "source": "github-repos"}
{"code": "def get_release_data(self):\n    previous_package = self.get_previous_release()\n    if previous_package:\n        previous_version = previous_package.version\n        previous_revision = previous_package.revision\n    else:\n        previous_version = None\n        previous_revision = None\n    if (self.vcs is None):\n        return dict(vcs='None', previous_version=previous_version)\n    revision = None\n    with self.repo_operation():\n        revision = self.vcs.get_current_revision()\n    changelog = self.get_changelog()\n    maxlen = config.max_package_changelog_chars\n    if (maxlen and changelog and (len(changelog) > (maxlen + 3))):\n        changelog = (changelog[:maxlen] + '...')\n    return dict(vcs=self.vcs.name(), revision=revision, changelog=changelog, previous_version=previous_version, previous_revision=previous_revision)", "docstring": "Get release data for this release.\n\nReturns:\ndict.", "source": "codesearchnet"}
{"code": "def unwrap_arguments(xml_response):\n        \n\n        \n\n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n\n        \n        \n        xml_response = xml_response.encode('utf-8')\n        try:\n            tree = XML.fromstring(xml_response)\n        except XML.ParseError:\n            \n            \n            filtered = illegal_xml_re.sub('', xml_response.decode('utf-8'))\\\n                                     .encode('utf-8')\n            tree = XML.fromstring(filtered)\n\n        \n        \n        \n        \n        action_response = tree.find(\n            \"{http:\n        return dict((i.tag, i.text or \"\") for i in action_response)", "docstring": "Extract arguments and their values from a SOAP response.\n\nArgs:\nxml_response (str):  SOAP/xml response text (unicode,\nnot utf-8).\nReturns:\ndict: a dict of ``{argument_name: value}`` items.", "source": "juraj-google-style"}
{"code": "def main(argv=None):\n    if (argv is None):\n        argv = sys.argv[1:]\n    try:\n        executor = None\n        parser = build_args()\n        args = parser.parse_args(args=argv)\n        model = DeviceModel()\n        parser = SensorGraphFileParser()\n        parser.parse_file(args.sensor_graph)\n        parser.compile(model)\n        if (not args.disable_optimizer):\n            opt = SensorGraphOptimizer()\n            opt.optimize(parser.sensor_graph, model=model)\n        graph = parser.sensor_graph\n        sim = SensorGraphSimulator(graph)\n        for stop in args.stop:\n            sim.stop_condition(stop)\n        for watch in args.watch:\n            watch_sel = DataStreamSelector.FromString(watch)\n            graph.sensor_log.watch(watch_sel, watch_printer)\n        if (args.semihost_device is not None):\n            executor = SemihostedRPCExecutor(args.port, args.semihost_device)\n            sim.rpc_executor = executor\n        for mock in args.mock_rpc:\n            (slot, rpc_id, value) = process_mock_rpc(mock)\n            sim.rpc_executor.mock(slot, rpc_id, value)\n        for stim in args.stimulus:\n            sim.stimulus(stim)\n        graph.load_constants()\n        if (args.trace is not None):\n            sim.record_trace()\n        try:\n            if args.connected:\n                sim.step(user_connected, 8)\n            sim.run(accelerated=(not args.realtime))\n        except KeyboardInterrupt:\n            pass\n        if (args.trace is not None):\n            sim.trace.save(args.trace)\n    finally:\n        if (executor is not None):\n            executor.hw.close()\n    return 0", "docstring": "Main entry point for iotile sensorgraph simulator.\n\nThis is the iotile-sgrun command line program.  It takes\nan optional set of command line parameters to allow for\ntesting.\n\nArgs:\nargv (list of str): An optional set of command line\nparameters.  If not passed, these are taken from\nsys.argv.", "source": "codesearchnet"}
{"code": "def DocumentVersionsRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    version_path = self._GetRowValue(query_hash, row, 'version_path')\n    path = self._GetRowValue(query_hash, row, 'path')\n    paths = version_path.split('/')\n    if ((len(paths) < 2) or (not paths[1].isdigit())):\n        user_sid = ''\n    else:\n        user_sid = paths[1]\n    version_path = (self.ROOT_VERSION_PATH + version_path)\n    (path, _, _) = path.rpartition('/')\n    event_data = MacDocumentVersionsEventData()\n    event_data.last_time = self._GetRowValue(query_hash, row, 'last_time')\n    event_data.name = self._GetRowValue(query_hash, row, 'name')\n    event_data.path = path\n    event_data.query = query\n    event_data.user_sid = '{0!s}'.format(user_sid)\n    event_data.version_path = version_path\n    timestamp = self._GetRowValue(query_hash, row, 'version_time')\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a document versions row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "codesearchnet"}
{"code": "def docx_text_from_xml(xml: str, config: TextProcessingConfig) -> str:\n    \n    root = ElementTree.fromstring(xml)\n    return docx_text_from_xml_node(root, 0, config)", "docstring": "Converts an XML tree of a DOCX file to string contents.\n\nArgs:\nxml: raw XML text\nconfig: :class:`TextProcessingConfig` control object\n\nReturns:\ncontents as a string", "source": "juraj-google-style"}
{"code": "def get_topics_strings(topics_words, alpha, vocabulary,\n                       topics_to_print=10, words_per_topic=10):\n  \n  alpha = np.squeeze(alpha, axis=0)\n  \n  \n  highest_weight_topics = np.argsort(-alpha, kind=\"mergesort\")\n  top_words = np.argsort(-topics_words, axis=1)\n\n  res = []\n  for topic_idx in highest_weight_topics[:topics_to_print]:\n    l = [\"index={} alpha={:.2f}\".format(topic_idx, alpha[topic_idx])]\n    l += [vocabulary[word] for word in top_words[topic_idx, :words_per_topic]]\n    res.append(\" \".join(l))\n\n  return np.array(res)", "docstring": "Returns the summary of the learned topics.\n\nArguments:\ntopics_words: KxV tensor with topics as rows and words as columns.\nalpha: 1xK tensor of prior Dirichlet concentrations for the\ntopics.\nvocabulary: A mapping of word's integer index to the corresponding string.\ntopics_to_print: The number of topics with highest prior weight to\nsummarize.\nwords_per_topic: Number of wodrs per topic to return.\n\nReturns:\nsummary: A np.array with strings.", "source": "juraj-google-style"}
{"code": "def set_parameter_vector(self, vector, include_frozen=False):\n    v = self.parameter_vector\n    if include_frozen:\n        v[:] = vector\n    else:\n        v[self.unfrozen_mask] = vector\n    self.parameter_vector = v\n    self.dirty = True", "docstring": "Set the parameter values to the given vector\n\nArgs:\nvector (array[vector_size] or array[full_size]): The target\nparameter vector. This must be in the same order as\n``parameter_names`` and it should only include frozen\nparameters if ``include_frozen`` is ``True``.\ninclude_frozen (Optional[bool]): Should the frozen parameters be\nincluded in the returned value? (default: ``False``)", "source": "codesearchnet"}
{"code": "class HerbertTokenizerFast(PreTrainedTokenizerFast):\n    vocab_files_names = VOCAB_FILES_NAMES\n    slow_tokenizer_class = HerbertTokenizer\n\n    def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', sep_token='</s>', **kwargs):\n        super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, sep_token=sep_token, **kwargs)\n\n    def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n        \n        cls = [self.cls_token_id]\n        sep = [self.sep_token_id]\n        if token_ids_1 is None:\n            return cls + token_ids_0 + sep\n        return cls + token_ids_0 + sep + token_ids_1 + sep\n\n    def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n        \n        if already_has_special_tokens:\n            return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n        if token_ids_1 is None:\n            return [1] + [0] * len(token_ids_0) + [1]\n        return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]\n\n    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:\n        files = self._tokenizer.model.save(save_directory, name=filename_prefix)\n        return tuple(files)", "docstring": "Construct a \"Fast\" BPE tokenizer for HerBERT (backed by HuggingFace's *tokenizers* library).\n\nPeculiarities:\n\n- uses BERT's pre-tokenizer: BertPreTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of\na punctuation character will be treated separately.\n\nThis tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the methods. Users should refer to the\nsuperclass for more information regarding methods.\n\nArgs:\nvocab_file (`str`):\nPath to the vocabulary file.\nmerges_file (`str`):\nPath to the merges file.", "source": "github-repos"}
{"code": "def __init__(self, context):\n    \n    self._logdir = context.logdir\n    self._multiplexer = context.multiplexer\n    self._plugin_name_to_instance = context.plugin_name_to_instance", "docstring": "Instantiates ScalarsPlugin via TensorBoard core.\n\nArgs:\ncontext: A base_plugin.TBContext instance.", "source": "juraj-google-style"}
{"code": "def allocate(self, workdir=None, use_smartio=False):\n        \n        if workdir is not None:\n            \n            self.set_workdir(workdir)\n            for i, work in enumerate(self):\n                work.set_workdir(os.path.join(self.workdir, \"w\" + str(i)))\n\n        if not hasattr(self, \"workdir\"):\n            raise RuntimeError(\"You must call flow.allocate(workdir) if the workdir is not passed to __init__\")\n\n        for work in self:\n            \n            work.allocate(manager=self.manager)\n            work.set_flow(self)\n            \n            for task in work:\n                task.set_work(work)\n\n        self.check_dependencies()\n\n        if not hasattr(self, \"_allocated\"): self._allocated = 0\n        self._allocated += 1\n\n        if use_smartio:\n            self.use_smartio()\n\n        return self", "docstring": "Allocate the `Flow` i.e. assign the `workdir` and (optionally)\nthe :class:`TaskManager` to the different tasks in the Flow.\n\nArgs:\nworkdir: Working directory of the flow. Must be specified here\nif we haven't initialized the workdir in the __init__.\n\nReturn:\nself", "source": "juraj-google-style"}
{"code": "def hardware_version(self):\n    version = self._dll.JLINKARM_GetHardwareVersion()\n    major = ((version / 10000) % 100)\n    minor = ((version / 100) % 100)\n    return ('%d.%02d' % (major, minor))", "docstring": "Returns the hardware version of the connected J-Link as a\nmajor.minor string.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nHardware version string.", "source": "codesearchnet"}
{"code": "def convert(in_file, out_file, in_fmt='', out_fmt=''):\n    in_file = os.path.expanduser(in_file)\n    out_file = os.path.expanduser(out_file)\n    if (not os.path.exists(in_file)):\n        raise IOError('Input file {0} does not exist, stopping...'.format(in_file))\n    in_fmt = (in_fmt.lower() or _guess_format_from_extension(in_file.split('.')[(- 1)].lower()))\n    out_fmt = (out_fmt.lower() or _guess_format_from_extension(out_file.split('.')[(- 1)].lower()))\n    if ((not in_fmt) or (not out_fmt)):\n        raise ValueError('Cannot determine conversion formats.')\n        return False\n    if (in_fmt is out_fmt):\n        shutil.copyfileobj(in_file, out_file)\n        return out_file\n    if (in_fmt == 'hdf5'):\n        from . import hdf5\n        data = hdf5.load(in_file)\n    elif (in_fmt == 'tiff'):\n        from . import tiff\n        data = tiff.load(in_file)\n    elif (in_fmt == 'png'):\n        from . import png\n        data = png.load(in_file)\n    else:\n        return _fail_pair_conversion(in_fmt, out_fmt)\n    if (out_fmt == 'hdf5'):\n        from . import hdf5\n        return hdf5.save(out_file, data)\n    elif (out_fmt == 'tiff'):\n        from . import tiff\n        return tiff.save(out_file, data)\n    elif (out_fmt == 'png'):\n        from . import png\n        return png.export_png(out_file, data)\n    else:\n        return _fail_pair_conversion(in_fmt, out_fmt)\n    return _fail_pair_conversion(in_fmt, out_fmt)", "docstring": "Converts in_file to out_file, guessing datatype in the absence of\nin_fmt and out_fmt.\n\nArguments:\nin_file:    The name of the (existing) datafile to read\nout_file:   The name of the file to create with converted data\nin_fmt:     Optional. The format of incoming data, if not guessable\nout_fmt:    Optional. The format of outgoing data, if not guessable\n\nReturns:\nString. Output filename", "source": "codesearchnet"}
{"code": "def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None):\n  \n  with tf.variable_scope(\"sigmoid_accuracy_one_hot\", values=[logits, labels]):\n    del weights_fn\n    predictions = tf.nn.sigmoid(logits)\n    labels = tf.argmax(labels, -1)\n    predictions = tf.argmax(predictions, -1)\n    _, accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions)\n    return accuracy, tf.constant(1.0)", "docstring": "Calculate accuracy for a set, given one-hot labels and logits.\n\nArgs:\nlogits: Tensor of size [batch-size, o=1, p=1, num-classes]\nlabels: Tensor of size [batch-size, o=1, p=1, num-classes]\nweights_fn: Function that takes in labels and weighs examples (unused)\nReturns:\naccuracy (scalar), weights", "source": "juraj-google-style"}
{"code": "def text(self) -> str:\n    if not mime_types.is_text(self.mimetype) and (not mime_types.is_json(self.mimetype)):\n        raise ValueError('Part is not text.')\n    return self.part.text or ''", "docstring": "Returns part text as string.\n\nReturns:\nThe text of the part.\n\nRaises:\nValueError if part has no text.", "source": "github-repos"}
{"code": "def signmessage(self, address, message):\n        \n        signature = self.rpc.call(\"signmessage\", address, message)\n        self.logger.debug(\"Signature: %s\" % signature)\n        return signature", "docstring": "Sign a message with the private key of an address.\n\nCryptographically signs a message using ECDSA.  Since this requires\nan address's private key, the wallet must be unlocked first.\n\nArgs:\naddress (str): address used to sign the message\nmessage (str): plaintext message to which apply the signature\n\nReturns:\nstr: ECDSA signature over the message", "source": "juraj-google-style"}
{"code": "def remove(self, source: Source) -> None:\n    self.unload(source)\n    if isinstance(source, RemoteSource):\n        shutil.rmtree(source.location, ignore_errors=True)\n    self.save()", "docstring": "Unregisters a given source with this server. If the given source is a\nremote source, then its local copy will be removed from disk.\n\nRaises:\nKeyError: if the given source is not registered with this server.", "source": "codesearchnet"}
{"code": "def RetryUpload(self, job, job_id, error):\n    \n    if self.IsErrorRetryable(error):\n      retry_count = 0\n      sleep_interval = config.CONFIG[\"BigQuery.retry_interval\"]\n      while retry_count < config.CONFIG[\"BigQuery.retry_max_attempts\"]:\n\n        time.sleep(sleep_interval.seconds)\n        logging.info(\"Retrying job_id: %s\", job_id)\n        retry_count += 1\n\n        try:\n          response = job.execute()\n          return response\n        except errors.HttpError as e:\n          if self.IsErrorRetryable(e):\n            sleep_interval *= config.CONFIG[\"BigQuery.retry_multiplier\"]\n            logging.exception(\"Error with job: %s, will retry in %s\", job_id,\n                              sleep_interval)\n          else:\n            raise BigQueryJobUploadError(\n                \"Can't retry error code %s. Giving up\"\n                \" on job: %s.\" % (e.resp.status, job_id))\n    else:\n      raise BigQueryJobUploadError(\"Can't retry error code %s. Giving up on \"\n                                   \"job: %s.\" % (error.resp.status, job_id))\n\n    raise BigQueryJobUploadError(\n        \"Giving up on job:%s after %s retries.\" % (job_id, retry_count))", "docstring": "Retry the BigQuery upload job.\n\nUsing the same job id protects us from duplicating data on the server. If we\nfail all of our retries we raise.\n\nArgs:\njob: BigQuery job object\njob_id: ID string for this upload job\nerror: errors.HttpError object from the first error\n\nReturns:\nAPI response object on success, None on failure\nRaises:\nBigQueryJobUploadError: if we can't get the bigquery job started after\nretry_max_attempts", "source": "juraj-google-style"}
{"code": "def _get_ngrams(n, text):\n    ngram_set = set()\n    text_length = len(text)\n    max_index_ngram_start = (text_length - n)\n    for i in range((max_index_ngram_start + 1)):\n        ngram_set.add(tuple(text[i:(i + n)]))\n    return ngram_set", "docstring": "Calculates n-grams.\n\nArgs:\nn: which n-grams to calculate\ntext: An array of tokens\n\nReturns:\nA set of n-grams", "source": "codesearchnet"}
{"code": "def correction(self, word):\n        \n        return max(self.candidates(word), key=self.word_probability)", "docstring": "The most probable correct spelling for the word\n\nArgs:\nword (str): The word to correct\nReturns:\nstr: The most likely candidate", "source": "juraj-google-style"}
{"code": "def _is_named_tuple(instance):\n    if not isinstance(instance, tuple):\n        return False\n    return hasattr(instance, '_fields') and isinstance(instance._fields, collections_abc.Sequence) and all((isinstance(f, str) for f in instance._fields))", "docstring": "Returns True iff `instance` is a `namedtuple`.\n\nArgs:\ninstance: An instance of a Python object.\n\nReturns:\nTrue if `instance` is a `namedtuple`.", "source": "github-repos"}
{"code": "def unwrap_or(self, default: U) -> Union[(T, U)]:\n    return self.unwrap_or_else((lambda : default))", "docstring": "Returns the contained value or ``default``.\n\nArgs:\ndefault: The default value.\n\nReturns:\nThe contained value if the :py:class:`Option` is ``Some``,\notherwise ``default``.\n\nNotes:\nIf you wish to use a result of a function call as the default,\nit is recommnded to use :py:meth:`unwrap_or_else` instead.\n\nExamples:\n>>> Some(0).unwrap_or(3)\n0\n>>> NONE.unwrap_or(0)\n0", "source": "codesearchnet"}
{"code": "def write_file(self, path, contents):\n    path = self._get_dist_path(path)\n    if (not os.path.isdir(os.path.dirname(path))):\n        os.makedirs(os.path.dirname(path))\n    if isinstance(contents, bytes):\n        mode = 'wb+'\n    else:\n        mode = 'w'\n    with open(path, mode) as file:\n        file.write(contents)", "docstring": "Write a file of any type to the destination path. Useful for files like\nrobots.txt, manifest.json, and so on.\n\nArgs:\npath (str): The name of the file to write to.\ncontents (str or bytes): The contents to write.", "source": "codesearchnet"}
{"code": "def ack(self, items):\n        \n        \n        for item in items:\n            time_to_ack = item.time_to_ack\n            if time_to_ack is not None:\n                self._manager.ack_histogram.add(time_to_ack)\n\n        ack_ids = [item.ack_id for item in items]\n        request = types.StreamingPullRequest(ack_ids=ack_ids)\n        self._manager.send(request)\n\n        \n        self.drop(items)", "docstring": "Acknowledge the given messages.\n\nArgs:\nitems(Sequence[AckRequest]): The items to acknowledge.", "source": "juraj-google-style"}
{"code": "def run(self, dag):\n    coupling_map = self._coupling_map\n    ordered_virtual_gates = list(dag.serial_layers())\n    if (self.initial_layout is None):\n        if self.property_set['layout']:\n            self.initial_layout = self.property_set['layout']\n        else:\n            self.initial_layout = Layout.generate_trivial_layout(*dag.qregs.values())\n    if (len(dag.qubits()) != len(self.initial_layout)):\n        raise TranspilerError('The layout does not match the amount of qubits in the DAG')\n    if (len(self._coupling_map.physical_qubits) != len(self.initial_layout)):\n        raise TranspilerError('Mappers require to have the layout to be the same size as the coupling map')\n    mapped_gates = []\n    layout = self.initial_layout.copy()\n    gates_remaining = ordered_virtual_gates.copy()\n    while gates_remaining:\n        best_step = _search_forward_n_swaps(layout, gates_remaining, coupling_map)\n        layout = best_step['layout']\n        gates_mapped = best_step['gates_mapped']\n        gates_remaining = best_step['gates_remaining']\n        mapped_gates.extend(gates_mapped)\n    mapped_dag = _copy_circuit_metadata(dag, coupling_map)\n    for node in mapped_gates:\n        mapped_dag.apply_operation_back(op=node.op, qargs=node.qargs, cargs=node.cargs)\n    return mapped_dag", "docstring": "Run one pass of the lookahead mapper on the provided DAG.\n\nArgs:\ndag (DAGCircuit): the directed acyclic graph to be mapped\nReturns:\nDAGCircuit: A dag mapped to be compatible with the coupling_map in\nthe property_set.\nRaises:\nTranspilerError: if the coupling map or the layout are not\ncompatible with the DAG", "source": "codesearchnet"}
{"code": "def forward(self, inputs, expert_size):\n    input_list = inputs.split(expert_size, dim=0)\n    output_list = []\n    for i in range(self.num_experts):\n        output_list.append(F.linear(input_list[i], self.weight[i]))\n    results = torch.cat(output_list, dim=0)\n    return results", "docstring": "Forward pass of the GraniteMoeHybridParallelExperts module.\n\nArgs:\ninputs (Tensor):\nInput tensor.\nexpert_size:\nExpert size information.\n\nReturns:\nTensor: Output tensor.", "source": "github-repos"}
{"code": "def get_policies_from_aws(client, scope='Local'):\n        \n        done = False\n        marker = None\n        policies = []\n\n        while not done:\n            if marker:\n                response = client.list_policies(Marker=marker, Scope=scope)\n            else:\n                response = client.list_policies(Scope=scope)\n\n            policies += response['Policies']\n\n            if response['IsTruncated']:\n                marker = response['Marker']\n            else:\n                done = True\n\n        return policies", "docstring": "Returns a list of all the policies currently applied to an AWS Account. Returns a list containing all the\npolicies for the specified scope\n\nArgs:\nclient (:obj:`boto3.session.Session`): A boto3 Session object\nscope (`str`): The policy scope to use. Default: Local\n\nReturns:\n:obj:`list` of `dict`", "source": "juraj-google-style"}
{"code": "def as_str(bytes_or_text, encoding='utf-8'):\n    return as_text(bytes_or_text, encoding)", "docstring": "Acts as an alias for the `as_text` function..\n\nArgs:\nbytes_or_text: The input value to be converted. A bytes or unicode object.\nencoding: Optional string. The encoding to use if bytes_or_text is a bytes\nobject. Defaults to 'utf-8'.\n\nReturns:\nA unicode string.\n\nRaises:\nTypeError: If bytes_or_text is not a bytes or unicode object.\nUnicodeDecodeError: If bytes_or_text is a bytes object and cannot be\ndecoded using the specified encoding.", "source": "github-repos"}
{"code": "def make_export_strategy(\n        args,\n        keep_target,\n        assets_extra,\n        features,\n        schema,\n        stats):\n  \n  target_name = feature_transforms.get_target_name(features)\n  csv_header = [col['name'] for col in schema]\n  if not keep_target:\n    csv_header.remove(target_name)\n\n  def export_fn(estimator, export_dir_base, checkpoint_path=None, eval_result=None):\n    with ops.Graph().as_default() as g:\n      contrib_variables.create_global_step(g)\n\n      input_ops = feature_transforms.build_csv_serving_tensors_for_training_step(\n          args.analysis, features, schema, stats, keep_target)\n      model_fn_ops = estimator._call_model_fn(input_ops.features,\n                                              None,\n                                              model_fn_lib.ModeKeys.INFER)\n      output_fetch_tensors = make_prediction_output_tensors(\n          args=args,\n          features=features,\n          input_ops=input_ops,\n          model_fn_ops=model_fn_ops,\n          keep_target=keep_target)\n\n      \n      \n      signature_inputs = {key: tf.saved_model.utils.build_tensor_info(tensor)\n                          for key, tensor in six.iteritems(input_ops.default_inputs)}\n      signature_outputs = {key: tf.saved_model.utils.build_tensor_info(tensor)\n                           for key, tensor in six.iteritems(output_fetch_tensors)}\n      signature_def_map = {\n          'serving_default':\n              signature_def_utils.build_signature_def(\n                  signature_inputs,\n                  signature_outputs,\n                  tf.saved_model.signature_constants.PREDICT_METHOD_NAME)}\n\n      if not checkpoint_path:\n        \n        checkpoint_path = saver.latest_checkpoint(estimator._model_dir)\n      if not checkpoint_path:\n        raise ValueError(\"Couldn't find trained model at %s.\"\n                         % estimator._model_dir)\n\n      export_dir = saved_model_export_utils.get_timestamped_export_dir(\n          export_dir_base)\n\n      if (model_fn_ops.scaffold is not None and\n         model_fn_ops.scaffold.saver is not None):\n        saver_for_restore = model_fn_ops.scaffold.saver\n      else:\n        saver_for_restore = saver.Saver(sharded=True)\n\n      with tf_session.Session('') as session:\n        saver_for_restore.restore(session, checkpoint_path)\n        init_op = control_flow_ops.group(\n            variables.local_variables_initializer(),\n            resources.initialize_resources(resources.shared_resources()),\n            tf.tables_initializer())\n\n        \n        builder = saved_model_builder.SavedModelBuilder(export_dir)\n        builder.add_meta_graph_and_variables(\n            session, [tag_constants.SERVING],\n            signature_def_map=signature_def_map,\n            assets_collection=ops.get_collection(\n                ops.GraphKeys.ASSET_FILEPATHS),\n            legacy_init_op=init_op)\n        builder.save(False)\n\n      \n      if assets_extra:\n        assets_extra_path = os.path.join(compat.as_bytes(export_dir),\n                                         compat.as_bytes('assets.extra'))\n        for dest_relative, source in assets_extra.items():\n          dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),\n                                       compat.as_bytes(dest_relative))\n          dest_path = os.path.dirname(dest_absolute)\n          file_io.recursive_create_dir(dest_path)\n          file_io.copy(source, dest_absolute)\n\n    \n    saved_model_export_utils.garbage_collect_exports(\n        export_dir_base,\n        exports_to_keep=3)\n\n    \n    \n    if keep_target:\n      final_dir = os.path.join(args.job_dir, 'evaluation_model')\n    else:\n      final_dir = os.path.join(args.job_dir, 'model')\n    if file_io.is_directory(final_dir):\n      file_io.delete_recursively(final_dir)\n    file_io.recursive_create_dir(final_dir)\n    recursive_copy(export_dir, final_dir)\n\n    return export_dir\n\n  if keep_target:\n    intermediate_dir = 'intermediate_evaluation_models'\n  else:\n    intermediate_dir = 'intermediate_prediction_models'\n\n  return export_strategy.ExportStrategy(intermediate_dir, export_fn)", "docstring": "Makes prediction graph that takes json input.\n\nArgs:\nargs: command line args\nkeep_target: If ture, target column is returned in prediction graph. Target\ncolumn must also exist in input data\nassets_extra: other fiels to copy to the output folder\njob_dir: root job folder\nfeatures: features dict\nschema: schema list\nstats: stats dict", "source": "juraj-google-style"}
{"code": "def __generate_reference__(self, triple_map, **kwargs):\n        \n        element = kwargs.get(\"element\")\n        found_elements = element.xpath(\n            triple_map.reference,\n            namespaces=self.xml_ns)\n        for elem in found_elements:\n            raw_text = elem.text.strip()\n            \n            if not raw_text.startswith(\"http\"):\n                continue\n            return rdflib.URIRef(raw_text)", "docstring": "Internal method takes a triple_map and returns the result of\napplying to XPath to the current DOM context\n\nArgs:\n-----\ntriple_map: SimpleNamespace\nelement: etree.Element", "source": "juraj-google-style"}
{"code": "def add_dict_to_hash(a_hash, a_dict):\n    if (a_dict is None):\n        return\n    for (k, v) in a_dict.items():\n        a_hash.update((((b'\\x00' + k.encode('utf-8')) + b'\\x00') + v.encode('utf-8')))", "docstring": "Adds `a_dict` to `a_hash`\n\nArgs:\na_hash (`Hash`): the secure hash, e.g created by hashlib.md5\na_dict (dict[string, [string]]): the dictionary to add to the hash", "source": "codesearchnet"}
{"code": "def or_filter(self, **filters):\n        \n        clone = copy.deepcopy(self)\n        clone.adapter.add_query([(\"OR_QRY\", filters)])\n        return clone", "docstring": "Works like \"filter\" but joins given filters with OR operator.\n\nArgs:\n**filters: Query filters as keyword arguments.\n\nReturns:\nSelf. Queryset object.\n\nExample:\n>>> Person.objects.or_filter(age__gte=16, name__startswith='jo')", "source": "juraj-google-style"}
{"code": "def calc_radius(latitude, ellipsoid='WGS84'):\n    ellipsoids = {'Airy (1830)': (6377.563, 6356.257), 'Bessel': (6377.397, 6356.079), 'Clarke (1880)': (6378.249145, 6356.51486955), 'FAI sphere': (6371, 6371), 'GRS-67': (6378.16, 6356.775), 'International': (6378.388, 6356.912), 'Krasovsky': (6378.245, 6356.863), 'NAD27': (6378.206, 6356.584), 'WGS66': (6378.145, 6356.758), 'WGS72': (6378.135, 6356.751), 'WGS84': (6378.137, 6356.752)}\n    (major, minor) = ellipsoids[ellipsoid]\n    eccentricity = (1 - ((minor ** 2) / (major ** 2)))\n    sl = math.sin(math.radians(latitude))\n    return ((major * (1 - eccentricity)) / ((1 - (eccentricity * (sl ** 2))) ** 1.5))", "docstring": "Calculate earth radius for a given latitude.\n\nThis function is most useful when dealing with datasets that are very\nlocalised and require the accuracy of an ellipsoid model without the\ncomplexity of code necessary to actually use one.  The results are meant to\nbe used as a :data:`BODY_RADIUS` replacement when the simple geocentric\nvalue is not good enough.\n\nThe original use for ``calc_radius`` is to set a more accurate radius value\nfor use with trigpointing databases that are keyed on the OSGB36 datum, but\nit has been expanded to cover other ellipsoids.\n\nArgs:\nlatitude (float): Latitude to calculate earth radius for\nellipsoid (tuple of float): Ellipsoid model to use for calculation\n\nReturns:\nfloat: Approximated Earth radius at the given latitude", "source": "codesearchnet"}
{"code": "def _get_operator_param_name_and_values(operator_class_name, task_details):\n    operator_task_details = task_details.copy()\n    if ('type' in operator_task_details.keys()):\n        del operator_task_details['type']\n    if ('up_stream' in operator_task_details.keys()):\n        del operator_task_details['up_stream']\n    if (operator_class_name == 'BigQueryOperator'):\n        return PipelineGenerator._get_bq_execute_params(operator_task_details)\n    if (operator_class_name == 'BigQueryToCloudStorageOperator'):\n        return PipelineGenerator._get_bq_extract_params(operator_task_details)\n    if (operator_class_name == 'GoogleCloudStorageToBigQueryOperator'):\n        return PipelineGenerator._get_bq_load_params(operator_task_details)\n    return operator_task_details", "docstring": "Internal helper gets the name of the python parameter for the Airflow operator class. In\nsome cases, we do not expose the airflow parameter name in its native form, but choose to\nexpose a name that's more standard for Datalab, or one that's more friendly. For example,\nAirflow's BigQueryOperator uses 'bql' for the query string, but we want %%bq users in Datalab\nto use 'query'. Hence, a few substitutions that are specific to the Airflow operator need to\nbe made.\n\nSimilarly, we the parameter value could come from the notebook's context. All that happens\nhere.\n\nReturns:\nDict containing _only_ the keys and values that are required in Airflow operator definition.\nThis requires a substituting existing keys in the dictionary with their Airflow equivalents (\ni.e. by adding new keys, and removing the existing ones).", "source": "codesearchnet"}
{"code": "def __init__(self, structure, include_bv_charge=False):\n        \n        self.structure = structure\n        self.include_bv_charge = include_bv_charge\n\n        \n        sga = SpacegroupAnalyzer(self.structure)\n        self.symm_structure = sga.get_symmetrized_structure()\n        self.equiv_site_seq = list(self.symm_structure.equivalent_sites)\n\n        self.struct_valences = None\n        if self.include_bv_charge:\n            bv = BVAnalyzer()\n            self.struct_valences = bv.get_valences(self.structure)", "docstring": "Initializes a Vacancy Generator\nArgs:\nstructure(Structure): pymatgen structure object", "source": "juraj-google-style"}
{"code": "def from_config(cls, config: dict):\n        \n        timestamp = config.get('timestamp', None)\n        return cls(config.get('id'),\n                   config.get('type'),\n                   config.get('data', dict()),\n                   config.get('origin', None),\n                   timestamp,\n                   config.get('object_type', None),\n                   config.get('object_id', None),\n                   config.get('object_key', None))", "docstring": "Create an event object from an event dictionary object.\n\nArgs:\nconfig (dict): Event Configuration dictionary.", "source": "juraj-google-style"}
{"code": "def _checkResponseNumberOfRegisters(payload, numberOfRegisters):\n    _checkString(payload, minlength=4, description='payload')\n    _checkInt(numberOfRegisters, minvalue=1, maxvalue=65535, description='numberOfRegisters')\n    BYTERANGE_FOR_NUMBER_OF_REGISTERS = slice(2, 4)\n    bytesForNumberOfRegisters = payload[BYTERANGE_FOR_NUMBER_OF_REGISTERS]\n    receivedNumberOfWrittenReisters = _twoByteStringToNum(bytesForNumberOfRegisters)\n    if (receivedNumberOfWrittenReisters != numberOfRegisters):\n        raise ValueError('Wrong number of registers to write in the response: {0}, but commanded is {1}. The data payload is: {2!r}'.format(receivedNumberOfWrittenReisters, numberOfRegisters, payload))", "docstring": "Check that the number of written registers as given in the response is correct.\n\nThe bytes 2 and 3 (zero based counting) in the payload holds the value.\n\nArgs:\n* payload (string): The payload\n* numberOfRegisters (int): Number of registers that have been written\n\nRaises:\nTypeError, ValueError", "source": "codesearchnet"}
{"code": "class IncMeanTracker(WindowedTracker, MeanTracker):\n\n    def __init__(self, window_mode, **kwargs):\n        super().__init__(window_mode=window_mode, **kwargs)\n        self._mean = 0\n\n    def push(self, x):\n        \n        if not math.isnan(x):\n            self._n += 1\n            delta = x - self._mean\n        else:\n            delta = 0\n        if self._window_mode == WindowMode.SLIDING:\n            if len(self._queue) >= self._window_size and (not math.isnan((old_x := self.pop()))):\n                self._n -= 1\n                delta += self._mean - old_x\n            super().push(x)\n        if self._n > 0:\n            self._mean += delta / self._n\n        else:\n            self._mean = 0\n\n    def get(self):\n        \n        if self._n < 1:\n            return float('nan')\n        return self._mean", "docstring": "Base class for incremental mean trackers.\n\nThis class implements incremental calculation of the mean, which is more\nefficient for streaming data as it updates the mean with each new data point\ninstead of recalculating from scratch.\n\nArgs:\nwindow_mode: A `WindowMode` enum specifying whether the window is `LANDMARK`\nor `SLIDING`.\n**kwargs: Keyword arguments passed to the parent class constructor.", "source": "github-repos"}
{"code": "def add_variable(var, restore=True):\n    collections = [MODEL_VARIABLES]\n    if restore:\n        collections.append(VARIABLES_TO_RESTORE)\n    for collection in collections:\n        if (var not in tf.get_collection(collection)):\n            tf.add_to_collection(collection, var)", "docstring": "Adds a variable to the MODEL_VARIABLES collection.\n\nOptionally it will add the variable to  the VARIABLES_TO_RESTORE collection.\nArgs:\nvar: a variable.\nrestore: whether the variable should be added to the\nVARIABLES_TO_RESTORE collection.", "source": "codesearchnet"}
{"code": "def _PrintStorageInformationAsText(self, storage_reader):\n    \n    table_view = views.ViewsFactory.GetTableView(\n        self._views_format_type, title='Plaso Storage Information')\n    table_view.AddRow(['Filename', os.path.basename(self._storage_file_path)])\n    table_view.AddRow(['Format version', storage_reader.format_version])\n    table_view.AddRow(\n        ['Serialization format', storage_reader.serialization_format])\n    table_view.Write(self._output_writer)\n\n    if storage_reader.storage_type == definitions.STORAGE_TYPE_SESSION:\n      self._PrintSessionsOverview(storage_reader)\n      self._PrintSessionsDetails(storage_reader)\n\n      storage_counters = self._CalculateStorageCounters(storage_reader)\n\n      if 'parsers' not in storage_counters:\n        self._output_writer.Write(\n            'Unable to determine number of events generated per parser.\\n')\n      else:\n        self._PrintParsersCounter(storage_counters['parsers'])\n\n      if 'analysis_reports' not in storage_counters:\n        self._output_writer.Write(\n            'Unable to determine number of reports generated per plugin.\\n')\n      else:\n        self._PrintAnalysisReportCounter(storage_counters['analysis_reports'])\n\n      if 'event_labels' not in storage_counters:\n        self._output_writer.Write(\n            'Unable to determine number of event tags generated per label.\\n')\n      else:\n        self._PrintEventLabelsCounter(storage_counters['event_labels'])\n\n      self._PrintWarningCounters(storage_counters)\n\n      if self._verbose:\n        self._PrintWarningsDetails(storage_reader)\n\n      self._PrintAnalysisReportsDetails(storage_reader)\n\n    elif storage_reader.storage_type == definitions.STORAGE_TYPE_TASK:\n      self._PrintTasksInformation(storage_reader)", "docstring": "Prints information about the store as human-readable text.\n\nArgs:\nstorage_reader (StorageReader): storage reader.", "source": "juraj-google-style"}
{"code": "def append(self, item):\n    \n    if self.should_flush():\n      self.flush()\n    self.items.append(item)", "docstring": "Add new item to the list.\n\nIf needed, append will first flush existing items and clear existing items.\n\nArgs:\nitem: an item to add to the list.", "source": "juraj-google-style"}
{"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    y_true = ops.convert_to_tensor(y_true, dtype=self.dtype)\n    y_pred = ops.convert_to_tensor(y_pred, dtype='float32')\n    y_pred = ops.cast(y_pred >= self.threshold, self.dtype)\n    return super().update_state(y_true, y_pred, sample_weight)", "docstring": "Accumulates the confusion matrix statistics.\n\nBefore the confusion matrix is updated, the predicted values are\nthresholded to be:\n0 for values that are smaller than the `threshold`\n1 for values that are larger or equal to the `threshold`\n\nArgs:\ny_true: The ground truth values.\ny_pred: The predicted values.\nsample_weight: Optional weighting of each example. Can\nbe a `Tensor` whose rank is either 0, or the same as `y_true`,\nand must be broadcastable to `y_true`. Defaults to `1`.\n\nReturns:\nUpdate op.", "source": "github-repos"}
{"code": "def read(self, size=None):\n    \n    if not self._is_open:\n      raise IOError('Not opened.')\n\n    \n    \n    return self._file_object.read(size)", "docstring": "Reads a byte string from the file-like object at the current offset.\n\nThe function will read a byte string of the specified size or\nall of the remaining data if no size was specified.\n\nArgs:\nsize (Optional[int]): number of bytes to read, where None is all\nremaining data.\n\nReturns:\nbytes: data read.\n\nRaises:\nIOError: if the read failed.\nOSError: if the read failed.", "source": "juraj-google-style"}
{"code": "def append_dictionary_to_file(localization_key_to_comment, file_path, section_name):\n    \n    output_file = open_strings_file(file_path, \"a\")\n    write_section_header_to_file(output_file, section_name)\n    for entry_key, entry_comment in sorted(localization_key_to_comment.iteritems(), key=operator.itemgetter(1)):\n        output_file.write(u'\\n')\n        write_entry_to_file(output_file, entry_comment, entry_key)\n    output_file.close()", "docstring": "Appends dictionary of localization keys and comments to a file\n\nArgs:\nlocalization_key_to_comment (dict): A mapping between localization keys and comments.\nfile_path (str): The path of the file to append to.\nsection_name (str): The name of the section.", "source": "juraj-google-style"}
{"code": "def get_container_details(self, container_id_or_name: str) -> dict:\n    container = self._client.containers.get(container_id_or_name)\n    return container.attrs", "docstring": "Get details of a container.\n\nArgs:\ncontainer_id_or_name (string): docker container id or name\n\nReturns:\ndict, details of the container", "source": "codesearchnet"}
{"code": "def numpy(self):\n    return _var_to_tensor(self).numpy()", "docstring": "Copies the values in this ShardedVariable to a NumPy array.\n\nFirst converts to a single Tensor using the registered conversion function,\nwhich concatenates the shards, then uses Tensor.numpy() to convert to\na NumPy array.\n\nReturns:\nA NumPy array of the same shape and dtype.", "source": "github-repos"}
{"code": "def create(self, teamId, personId=None, personEmail=None, isModerator=False, **request_parameters):\n    check_type(teamId, basestring, may_be_none=False)\n    check_type(personId, basestring)\n    check_type(personEmail, basestring)\n    check_type(isModerator, bool)\n    post_data = dict_from_items_with_values(request_parameters, teamId=teamId, personId=personId, personEmail=personEmail, isModerator=isModerator)\n    json_data = self._session.post(API_ENDPOINT, json=post_data)\n    return self._object_factory(OBJECT_TYPE, json_data)", "docstring": "Add someone to a team by Person ID or email address.\n\nAdd someone to a team by Person ID or email address; optionally making\nthem a moderator.\n\nArgs:\nteamId(basestring): The team ID.\npersonId(basestring): The person ID.\npersonEmail(basestring): The email address of the person.\nisModerator(bool): Set to True to make the person a team moderator.\n**request_parameters: Additional request parameters (provides\nsupport for parameters that may be added in the future).\n\nReturns:\nTeamMembership: A TeamMembership object with the details of the\ncreated team membership.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"}
{"code": "def print_alignment(mapping, instance1, instance2):\n    \n    result = []\n    for instance1_item, m in zip(instance1, mapping):\n        r = instance1_item[1] + \"(\" + instance1_item[2] + \")\"\n        if m == -1:\n            r += \"-Null\"\n        else:\n            instance2_item = instance2[m]\n            r += \"-\" + instance2_item[1] + \"(\" + instance2_item[2] + \")\"\n        result.append(r)\n    return \" \".join(result)", "docstring": "print the alignment based on a node mapping\nArgs:\nmapping: current node mapping list\ninstance1: nodes of AMR 1\ninstance2: nodes of AMR 2", "source": "juraj-google-style"}
{"code": "def result(self, timeout=None):\n    self._blocking_poll(timeout=timeout)\n    if (self._exception is not None):\n        raise self._exception\n    return self._result", "docstring": "Get the result of the operation, blocking if necessary.\n\nArgs:\ntimeout (int):\nHow long (in seconds) to wait for the operation to complete.\nIf None, wait indefinitely.\n\nReturns:\ngoogle.protobuf.Message: The Operation's result.\n\nRaises:\ngoogle.api_core.GoogleAPICallError: If the operation errors or if\nthe timeout is reached before the operation completes.", "source": "codesearchnet"}
{"code": "def _decode_response_string_and_validate_format(self, rpc_id, response):\n    if not response:\n        raise errors.ProtocolError(self._device, errors.ProtocolError.NO_RESPONSE_FROM_SERVER)\n    result = json.loads(response)\n    for field_name in RPC_RESPONSE_REQUIRED_FIELDS:\n        if field_name not in result:\n            raise errors.ProtocolError(self._device, errors.ProtocolError.RESPONSE_MISSING_FIELD % field_name)\n    if result['id'] != rpc_id:\n        raise errors.ProtocolError(self._device, errors.ProtocolError.MISMATCHED_API_ID)\n    return result", "docstring": "Decodes response JSON string to python dict and validates its format.\n\nArgs:\nrpc_id: int, the actual id of this RPC. It should be the same with the id\nin the response, otherwise throws an error.\nresponse: str, the JSON string of the RPC response.\n\nReturns:\nA dict decoded from the response JSON string.\n\nRaises:\nerrors.ProtocolError: if the response format is invalid.", "source": "github-repos"}
{"code": "def get_imports(filename: Union[str, os.PathLike]) -> list[str]:\n    with open(filename, encoding='utf-8') as f:\n        content = f.read()\n    imported_modules = set()\n    import transformers.utils\n\n    def recursive_look_for_imports(node):\n        if isinstance(node, ast.Try):\n            return\n        elif isinstance(node, ast.If):\n            test = node.test\n            for condition_node in ast.walk(test):\n                if isinstance(condition_node, ast.Call):\n                    check_function = getattr(condition_node.func, 'id', '')\n                    if check_function.endswith('available') and check_function.startswith('is_flash_attn') or hasattr(transformers.utils.import_utils, check_function):\n                        return\n        elif isinstance(node, ast.Import):\n            for alias in node.names:\n                top_module = alias.name.split('.')[0]\n                if top_module:\n                    imported_modules.add(top_module)\n        elif isinstance(node, ast.ImportFrom):\n            if node.level == 0 and node.module:\n                top_module = node.module.split('.')[0]\n                if top_module:\n                    imported_modules.add(top_module)\n        for child in ast.iter_child_nodes(node):\n            recursive_look_for_imports(child)\n    tree = ast.parse(content)\n    recursive_look_for_imports(tree)\n    return sorted(imported_modules)", "docstring": "Extracts all the libraries (not relative imports this time) that are imported in a file.\n\nArgs:\nfilename (`str` or `os.PathLike`): The module file to inspect.\n\nReturns:\n`list[str]`: The list of all packages required to use the input module.", "source": "github-repos"}
{"code": "def create_issue(title: str, description: str, labels: Optional[List[str]]=None) -> Tuple[int, str]:\n    url = 'https:\n    data = {'owner': _GITHUB_REPO_OWNER, 'repo': _GITHUB_REPO_NAME, 'title': title, 'body': description, 'labels': [_AWAITING_TRIAGE_LABEL, _PERF_ALERT_LABEL]}\n    if labels:\n        data['labels'].extend(labels)\n    response = requests.post(url=url, data=json.dumps(data), headers=_HEADERS, timeout=_REQUEST_TIMEOUT_SECS).json()\n    return (response['number'], response['html_url'])", "docstring": "Create an issue with title, description with a label.\n\nArgs:\ntitle:  GitHub issue title.\ndescription: GitHub issue description.\nlabels: Labels used to tag the GitHub issue.\nReturns:\nTuple containing GitHub issue number and issue URL.", "source": "github-repos"}
{"code": "def posterior_chromatogram_hypotheses_fast(experiment, prior_chrom_null):\n    tg_ids = experiment.df.tg_num_id.values\n    pp_values = (1 - experiment.df['pep'].values)\n    current_tg_id = tg_ids[0]\n    scores = []\n    final_result = []\n    final_result_h0 = []\n    for i in range(tg_ids.shape[0]):\n        id_ = tg_ids[i]\n        if (id_ != current_tg_id):\n            prior_pg_true = ((1.0 - prior_chrom_null) / len(scores))\n            rr = single_chromatogram_hypothesis_fast(np.array(scores), prior_chrom_null, prior_pg_true)\n            final_result.extend(rr[1:])\n            final_result_h0.extend((rr[0] for i in range(len(scores))))\n            scores = []\n            current_tg_id = id_\n        scores.append((1.0 - pp_values[i]))\n    prior_pg_true = ((1.0 - prior_chrom_null) / len(scores))\n    rr = single_chromatogram_hypothesis_fast(np.array(scores), prior_chrom_null, prior_pg_true)\n    final_result.extend(rr[1:])\n    final_result_h0.extend(([rr[0]] * len(scores)))\n    return (final_result, final_result_h0)", "docstring": "Compute posterior probabilities for each chromatogram\n\nFor each chromatogram (each group_id / peptide precursor), all hypothesis of all peaks\nbeing correct (and all others false) as well as the h0 (all peaks are\nfalse) are computed.\n\nThe prior probability that the  are given in the function\n\nThis assumes that the input data is sorted by tg_num_id\n\nArgs:\nexperiment(:class:`data_handling.Multipeptide`): the data of one experiment\nprior_chrom_null(float): the prior probability that any precursor\nis absent (all peaks are false)\n\nReturns:\ntuple(hypothesis, h0): two vectors that contain for each entry in\nthe input dataframe the probabilities for the hypothesis that the\npeak is correct and the probability for the h0", "source": "codesearchnet"}
{"code": "def get_ref(profile, ref):\n    resource = ('/refs/' + ref)\n    data = api.get_request(profile, resource)\n    return prepare(data)", "docstring": "Fetch a ref.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nref\nThe ref to fetch, e.g., ``heads/my-feature-branch``.\n\nReturns\nA dict with data about the ref.", "source": "codesearchnet"}
{"code": "def set_sflow(self, name, value=None, default=False, disable=False):\n    if (value not in [True, False, None]):\n        raise ValueError\n    commands = [('interface %s' % name)]\n    commands.append(self.command_builder('sflow enable', value=value, default=default, disable=disable))\n    return self.configure(commands)", "docstring": "Configures the sFlow state on the interface\n\nArgs:\nname (string): The interface identifier.  It must be a full\ninterface name (ie Ethernet, not Et)\n\nvalue (boolean): True if sFlow should be enabled otherwise False\n\ndefault (boolean): Specifies the default value for sFlow\n\ndisable (boolean): Specifies to disable sFlow\n\nReturns:\nTrue if the operation succeeds otherwise False is returned", "source": "codesearchnet"}
{"code": "def __init__(self, filename, error_handler, **kwargs):\n    self._filename = filename\n    self._error_handler = error_handler\n    self.lexer = ply.lex.lex(module=self, **kwargs)", "docstring": "Create a Lex lexer.\n\nTo pass this into a Ply Yacc parser, pass it in using the .lexer propert\nof an StlLexer instance:\nmy_lexer = StlLexer()\nmy_parser = ply.yacc.parser(lexer=my_lexer.lexer)\n\nArgs:\nfilename: The filename string to use in any error messaging.\nerror_handler: A object to handle and lexing errors.\nkwargs: Forwarded to ply.lex.lex.", "source": "github-repos"}
{"code": "def save_plot(self, filename, img_format=\"eps\", ylim=None,\n                  zero_to_efermi=True, smooth=False):\n        \n        plt = self.get_plot(ylim=ylim, zero_to_efermi=zero_to_efermi,\n                            smooth=smooth)\n        plt.savefig(filename, format=img_format)\n        plt.close()", "docstring": "Save matplotlib plot to a file.\n\nArgs:\nfilename: Filename to write to.\nimg_format: Image format to use. Defaults to EPS.\nylim: Specifies the y-axis limits.", "source": "juraj-google-style"}
{"code": "def open(self):\n    if self._is_open:\n        raise exceptions.ClientConnectionFailure('client connection already open')\n    else:\n        try:\n            self.proxy.open()\n            self._is_open = True\n        except Exception as e:\n            self.logger.error('could not open client connection: %s', e)\n            raise", "docstring": "Open the client connection.\n\nRaises:\nClientConnectionFailure: if the client connection is already open\nException: if an error occurs while trying to open the connection", "source": "codesearchnet"}
{"code": "def filter_invalid_unicode_from_table(table):\n    if not hasattr(table, 'table_id'):\n        table.table_id = 0\n    for row_index, row in table.iterrows():\n        for col_index, cell in enumerate(row):\n            cell, is_invalid = filter_invalid_unicode(cell)\n            if is_invalid:\n                logging.warning(f'Scrub an invalid table body @ table_id: {table.table_id}, row_index: {row_index}, col_index: {col_index}')\n    for col_index, column in enumerate(table.columns):\n        column, is_invalid = filter_invalid_unicode(column)\n        if is_invalid:\n            logging.warning(f'Scrub an invalid table header @ table_id: {table.table_id}, col_index: {col_index}')", "docstring": "Removes invalid unicode from table. Checks whether a table cell text contains an invalid unicode encoding. If yes,\nreset the table cell text to an empty str and log a warning for each invalid cell\n\nArgs:\ntable: table to clean.", "source": "github-repos"}
{"code": "def create_complete_files(climan, path, cmd, *cmds, zsh_sourceable=False):\n    path = pathlib.Path(path)\n    zsh_dir = (path / 'zsh')\n    if (not zsh_dir.exists()):\n        zsh_dir.mkdir(parents=True)\n    zsh_file = (zsh_dir / '_{}.sh'.format(cmd))\n    bash_dir = (path / 'bash')\n    if (not bash_dir.exists()):\n        bash_dir.mkdir(parents=True)\n    bash_file = (bash_dir / '{}.sh'.format(cmd))\n    climan.zsh_complete(zsh_file, cmd, *cmds, sourceable=zsh_sourceable)\n    climan.bash_complete(bash_file, cmd, *cmds)", "docstring": "Create completion files for bash and zsh.\n\nArgs:\ncliman (:class:`~loam.cli.CLIManager`): CLI manager.\npath (path-like): directory in which the config files should be\ncreated. It is created if it doesn't exist.\ncmd (str): command name that should be completed.\ncmds (str): extra command names that should be completed.\nzsh_sourceable (bool): if True, the generated file will contain an\nexplicit call to ``compdef``, which means it can be sourced\nto activate CLI completion.", "source": "codesearchnet"}
{"code": "def ParseNetworkConnectivityUsage(\n      self, parser_mediator, cache=None, database=None, table=None,\n      **unused_kwargs):\n    \n    \n    self._ParseGUIDTable(\n        parser_mediator, cache, database, table,\n        self._NETWORK_CONNECTIVITY_USAGE_VALUES_MAP,\n        SRUMNetworkConnectivityUsageEventData)", "docstring": "Parses the network connectivity usage monitor table.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ncache (Optional[ESEDBCache]): cache, which contains information about\nthe identifiers stored in the SruDbIdMapTable table.\ndatabase (Optional[pyesedb.file]): ESE database.\ntable (Optional[pyesedb.table]): table.", "source": "juraj-google-style"}
{"code": "def read_zmat(cls, inputfile, implicit_index=True):\n    cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral']\n    if implicit_index:\n        zmat_frame = pd.read_table(inputfile, comment='\n        zmat_frame.index = range(1, (len(zmat_frame) + 1))\n    else:\n        zmat_frame = pd.read_table(inputfile, comment='\n        zmat_frame.set_index('temp_index', drop=True, inplace=True)\n        zmat_frame.index.name = None\n    if pd.isnull(zmat_frame.iloc[(0, 1)]):\n        zmat_values = [1.27, 127.0, 127.0]\n        zmat_refs = [constants.int_label[x] for x in ['origin', 'e_z', 'e_x']]\n        for (row, i) in enumerate(zmat_frame.index[:3]):\n            cols = ['b', 'a', 'd']\n            zmat_frame.loc[(:, cols)] = zmat_frame.loc[(:, cols)].astype('O')\n            if (row < 2):\n                zmat_frame.loc[(i, cols[row:])] = zmat_refs[row:]\n                zmat_frame.loc[(i, ['bond', 'angle', 'dihedral'][row:])] = zmat_values[row:]\n            else:\n                zmat_frame.loc[(i, 'd')] = zmat_refs[2]\n                zmat_frame.loc[(i, 'dihedral')] = zmat_values[2]\n    elif (zmat_frame.iloc[(0, 1)] in constants.int_label.keys()):\n        zmat_frame = zmat_frame.replace({col: constants.int_label for col in ['b', 'a', 'd']})\n    zmat_frame = cls._cast_correct_types(zmat_frame)\n    try:\n        Zmat = cls(zmat_frame)\n    except InvalidReference:\n        raise UndefinedCoordinateSystem('Your zmatrix cannot be transformed to cartesian coordinates')\n    return Zmat", "docstring": "Reads a zmat file.\n\nLines beginning with ``#`` are ignored.\n\nArgs:\ninputfile (str):\nimplicit_index (bool): If this option is true the first column\nhas to be the element symbols for the atoms.\nThe row number is used to determine the index.\n\nReturns:\nZmat:", "source": "codesearchnet"}
{"code": "def convert_to_generator_like(data, batch_size=None, steps_per_epoch=None, epochs=1, shuffle=False):\n    if isinstance(data, tuple):\n        data = tuple((ele for ele in data if not all((e is None for e in nest.flatten(ele)))))\n    if data_utils.is_generator_or_sequence(data) or isinstance(data, iterator_ops.IteratorBase):\n        if isinstance(data, data_utils.Sequence):\n            if steps_per_epoch is None:\n                steps_per_epoch = len(data)\n        return (data, steps_per_epoch)\n    if isinstance(data, data_types.DatasetV2):\n        return (dataset_ops.make_one_shot_iterator(data), steps_per_epoch)\n    num_samples = int(nest.flatten(data)[0].shape[0])\n    if batch_size is None:\n        raise ValueError('When passing input data as arrays, do not specify `steps_per_epoch`/`steps` argument. Please use `batch_size` instead.')\n    steps_per_epoch = int(math.ceil(num_samples / batch_size))\n\n    def _gen(data):\n        \n        index_array = np.arange(num_samples)\n        for _ in range(epochs):\n            if shuffle:\n                np.random.shuffle(index_array)\n            batches = generic_utils.make_batches(num_samples, batch_size)\n            for batch_start, batch_end in batches:\n                batch_ids = index_array[batch_start:batch_end]\n                flat_batch_data = training_utils.slice_arrays(nest.flatten(data), batch_ids, contiguous=not shuffle)\n                yield nest.pack_sequence_as(data, flat_batch_data)\n    return (_gen(data), steps_per_epoch)", "docstring": "Make a generator out of NumPy or EagerTensor inputs.\n\nArgs:\ndata: Either a generator or `keras.utils.data_utils.Sequence` object or\n`Dataset`, `Iterator`, or a {1,2,3}-tuple of NumPy arrays or EagerTensors.\nIf a tuple, the elements represent `(x, y, sample_weights)` and may be\n`None` or `[None]`.\nbatch_size: Used when creating a generator out of tuples of NumPy arrays or\nEagerTensors.\nsteps_per_epoch: Steps of the generator to run each epoch. If `None` the\nnumber of steps will be read from the data (for\n`keras.utils.data_utils.Sequence` types).\nepochs: Total number of epochs to run.\nshuffle: Whether the data should be shuffled.\n\nReturns:\n- Generator, `keras.utils.data_utils.Sequence`, or `Iterator`.\n\nRaises:\n- ValueError: If `batch_size` is not provided for NumPy or EagerTensor\ninputs.", "source": "github-repos"}
{"code": "def must_exist(*components):\n    \n    _path = path(*components)\n    if not exists(_path):\n        raise File404(_path)\n    return _path", "docstring": "Ensure path exists.\n\nArguments:\n*components (str[]): Path components.\n\nReturns:\nstr: File path.\n\nRaises:\nFile404: If path does not exist.", "source": "juraj-google-style"}
{"code": "class EncodecDecoderOutput(ModelOutput):\n    audio_values: Optional[torch.FloatTensor] = None", "docstring": "Args:\naudio_values (`torch.FloatTensor`  of shape `(batch_size, segment_length)`, *optional*):\nDecoded audio values, obtained using the decoder part of Encodec.", "source": "github-repos"}
{"code": "def list_instances(i_info, param_str, numbered=False):\n    \n    print(param_str)\n\n    for i in i_info:\n        if numbered:\n            print(\"Instance {}\n\n        print(\"  {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}\".\n              format(C_TI, C_NORM, C_STAT[i_info[i]['state']],\n                     i_info[i]['tag']['Name'], i_info[i]['id'],\n                     i_info[i]['state'], C_HEAD2))\n        print(\"  AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}\".\n              format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame']))\n        list_tags(i_info[i]['tag'])\n    debg.dprintx(\"All Data\")\n    debg.dprintx(i_info, True)", "docstring": "Display a list of all instances and their details.\n\nIterates through all the instances in the dict, and displays\ninformation for each instance.\n\nArgs:\ni_info (dict): information on instances and details.\nparam_str (str): the title to display before the list.\nnumbered (bool): optional - indicates wheter the list should be\ndisplayed with numbers before each instance.\nThis is used when called from user_picklist.", "source": "juraj-google-style"}
{"code": "def current_spi_to_number(self):\n        \n        if self.slots['subpage'] == None:\n            return self.sub_pi_to_number(0, 0)\n        else:\n            return self.sub_pi_to_number(self.slots['subpage'],\n                                         self.slots['subitem'])", "docstring": "Convert subpage & subitem to a integer\n\n* if page == 1, then return 0, since the item count is the true # of items\n* if page == 2, then return, page-1 * items_per_page, since we are\nreturning the # of items on a full page.\n\nArgs:\n* None\n\nReturns:\n* Integer - Which represents the number of items up to the page.", "source": "juraj-google-style"}
{"code": "def CheckCondition(condition, check_object):\n  \n  try:\n    of = objectfilter.Parser(condition).Parse()\n    compiled_filter = of.Compile(objectfilter.BaseFilterImplementation)\n    return compiled_filter.Matches(check_object)\n  except objectfilter.Error as e:\n    raise ConditionError(e)", "docstring": "Check if a condition matches an object.\n\nArgs:\ncondition: A string condition e.g. \"os == 'Windows'\"\ncheck_object: Object to validate, e.g. an rdf_client.KnowledgeBase()\n\nReturns:\nTrue or False depending on whether the condition matches.\n\nRaises:\nConditionError: If condition is bad.", "source": "juraj-google-style"}
{"code": "def reshape(vari, shape):\n    if isinstance(vari, Poly):\n        core = vari.A.copy()\n        for key in vari.keys:\n            core[key] = reshape(core[key], shape)\n        out = Poly(core, vari.dim, shape, vari.dtype)\n        return out\n    return numpy.asarray(vari).reshape(shape)", "docstring": "Reshape the shape of a shapeable quantity.\n\nArgs:\nvari (chaospy.poly.base.Poly, numpy.ndarray):\nShapeable input quantity.\nshape (tuple):\nThe polynomials new shape. Must be compatible with the number of\nelements in ``vari``.\n\nReturns:\n(chaospy.poly.base.Poly, numpy.ndarray):\nSame type as ``vari``.\n\nExamples:\n>>> poly = chaospy.prange(6)\n>>> print(poly)\n[1, q0, q0^2, q0^3, q0^4, q0^5]\n>>> print(chaospy.reshape(poly, (2,3)))\n[[1, q0, q0^2], [q0^3, q0^4, q0^5]]", "source": "codesearchnet"}
{"code": "def sync_proxy(self, mri, block):\n        \n        done_queue = Queue()\n        self._queues[mri] = done_queue\n        update_fields = set()\n\n        def callback(value=None):\n            if isinstance(value, Exception):\n                \n                if isinstance(value, Disconnected):\n                    \n                    update_fields.clear()\n                    block.health.set_value(\n                        value=\"pvAccess disconnected\",\n                        alarm=Alarm.disconnected(\"pvAccess disconnected\")\n                    )\n            else:\n                with block.notifier.changes_squashed:\n                    if not update_fields:\n                        self.log.debug(\"Regenerating from %s\", list(value))\n                        self._regenerate_block(block, value, update_fields)\n                        done_queue.put(None)\n                    else:\n                        self._update_block(block, value, update_fields)\n\n        m = self._ctxt.monitor(mri, callback, notify_disconnect=True)\n        self._monitors.add(m)\n        done_queue.get(timeout=DEFAULT_TIMEOUT)", "docstring": "Abstract method telling the ClientComms to sync this proxy Block\nwith its remote counterpart. Should wait until it is connected\n\nArgs:\nmri (str): The mri for the remote block\nblock (BlockModel): The local proxy Block to keep in sync", "source": "juraj-google-style"}
{"code": "def recv(self, request_id):\n        \n        log.debug(\"Reading response %d from Kafka\" % request_id)\n\n        \n        if not self._sock:\n            self.reinit()\n\n        \n        resp = self._read_bytes(4)\n        (size,) = struct.unpack('>i', resp)\n\n        \n        resp = self._read_bytes(size)\n        return resp", "docstring": "Get a response packet from Kafka\n\nArguments:\nrequest_id: can be any int (only used for debug logging...)\n\nReturns:\nstr: Encoded kafka packet response from server", "source": "juraj-google-style"}
{"code": "def os_version_info_ex():\n    if (not HAS_WIN32):\n        return\n\n    class OSVersionInfo(ctypes.Structure):\n        _fields_ = (('dwOSVersionInfoSize', DWORD), ('dwMajorVersion', DWORD), ('dwMinorVersion', DWORD), ('dwBuildNumber', DWORD), ('dwPlatformId', DWORD), ('szCSDVersion', (WCHAR * 128)))\n\n        def __init__(self, *args, **kwds):\n            super(OSVersionInfo, self).__init__(*args, **kwds)\n            self.dwOSVersionInfoSize = ctypes.sizeof(self)\n            kernel32.GetVersionExW(ctypes.byref(self))\n\n    class OSVersionInfoEx(OSVersionInfo):\n        _fields_ = (('wServicePackMajor', WORD), ('wServicePackMinor', WORD), ('wSuiteMask', WORD), ('wProductType', BYTE), ('wReserved', BYTE))\n    return OSVersionInfoEx()", "docstring": "Helper function to return the results of the GetVersionExW Windows API call.\nIt is a ctypes Structure that contains Windows OS Version information.\n\nReturns:\nclass: An instance of a class containing version info", "source": "codesearchnet"}
{"code": "def decode(self, encoded):\n        \n        encoded = super().decode(encoded)\n        tokens = [self.itos[index] for index in encoded]\n        return self.detokenize(tokens)", "docstring": "Decodes a tensor into a sequence.\n\nArgs:\nencoded (torch.Tensor): Encoded sequence.\n\nReturns:\nstr: Sequence decoded from ``encoded``.", "source": "juraj-google-style"}
{"code": "def log(self: EventSetOrNode) -> EventSetOrNode:\n    from temporian.core.operators.unary import log\n    return log(self)", "docstring": "Calculates the natural logarithm of an [`EventSet`][temporian.EventSet]'s\nfeatures.\n\nCan only be used on floating point features.\n\nExample:\n```python\n>>> a = tp.event_set(\n...     timestamps=[1, 2, 3, 4, 5],\n...     features={\"M\": [np.e, 1., 2., 10., -1.]},\n... )\n>>> a.log()\nindexes: ...\ntimestamps: [1. 2. 3. 4. 5.]\n'M': [1. 0. 0.6931 2.3026 nan]\n...\n\n```\n\nReturns:\nEventSetOr with logarithm of input features.", "source": "github-repos"}
{"code": "def delete(self, membershipId):\n    check_type(membershipId, basestring)\n    self._session.delete(((API_ENDPOINT + '/') + membershipId))", "docstring": "Delete a membership, by ID.\n\nArgs:\nmembershipId(basestring): The membership ID.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"}
{"code": "def load_text(self, text, tokenizer=None):\n        \n        if tokenizer:\n            words = [x.lower() for x in tokenizer(text)]\n        else:\n            words = self.tokenize(text)\n\n        self._dictionary.update(words)\n        self._update_dictionary()", "docstring": "Load text from which to generate a word frequency list\n\nArgs:\ntext (str): The text to be loaded\ntokenizer (function): The function to use to tokenize a string", "source": "juraj-google-style"}
{"code": "def from_parent(parent_key, i):\n    if (not isinstance(parent_key, HDPrivateKey)):\n        raise TypeError('parent_key must be an HDPrivateKey object.')\n    hmac_key = parent_key.chain_code\n    if (i & 2147483648):\n        hmac_data = ((b'\\x00' + bytes(parent_key._key)) + i.to_bytes(length=4, byteorder='big'))\n    else:\n        hmac_data = (parent_key.public_key.compressed_bytes + i.to_bytes(length=4, byteorder='big'))\n    I = hmac.new(hmac_key, hmac_data, hashlib.sha512).digest()\n    (Il, Ir) = (I[:32], I[32:])\n    parse_Il = int.from_bytes(Il, 'big')\n    if (parse_Il >= bitcoin_curve.n):\n        return None\n    child_key = ((parse_Il + parent_key._key.key) % bitcoin_curve.n)\n    if (child_key == 0):\n        return None\n    child_depth = (parent_key.depth + 1)\n    return HDPrivateKey(key=child_key, chain_code=Ir, index=i, depth=child_depth, parent_fingerprint=parent_key.fingerprint)", "docstring": "Derives a child private key from a parent\nprivate key. It is not possible to derive a child\nprivate key from a public parent key.\n\nArgs:\nparent_private_key (HDPrivateKey):", "source": "codesearchnet"}
{"code": "def release_docs_side_effect(content):\n    \n    \n    result = content.replace(\"{\", \"{{\").replace(\"}\", \"}}\")\n    \n    result = result.replace(\"{{version}}\", \"{version}\")\n    result = result.replace(\"{{circleci_build}}\", \"{circleci_build}\")\n    result = result.replace(\"{{travis_build}}\", \"{travis_build}\")\n    result = result.replace(\"{{appveyor_build}}\", \"{appveyor_build}\")\n    result = result.replace(\"{{coveralls_build}}\", \"{coveralls_build}\")\n    return result", "docstring": "Updates the template so that curly braces are escaped correctly.\n\nArgs:\ncontent (str): The template for ``docs/index.rst.release.template``.\n\nReturns:\nstr: The updated template with properly escaped curly braces.", "source": "juraj-google-style"}
{"code": "def __init__(self, model, task, cmdOptions):\n    \n    validateOpfJsonValue(task, \"opfTaskSchema.json\")\n\n    \n    self.__logger = logging.getLogger(\".\".join(\n      ['com.numenta', self.__class__.__module__, self.__class__.__name__]))\n    \n\n    self.__logger.debug((\"Instantiated %s(\" + \\\n                      \"model=%r, \" + \\\n                      \"task=%r, \" + \\\n                      \"cmdOptions=%r)\") % \\\n                        (self.__class__.__name__,\n                         model,\n                         task,\n                         cmdOptions))\n\n    \n    streamDef = task['dataset']\n    datasetReader = opf_basic_environment.BasicDatasetReader(streamDef)\n\n    self.__model = model\n    self.__datasetReader = datasetReader\n    self.__task = task\n    self.__cmdOptions = cmdOptions\n\n\n    self.__predictionLogger = opf_basic_environment.BasicPredictionLogger(\n      fields=model.getFieldInfo(),\n      experimentDir=cmdOptions.experimentDir,\n      label=task['taskLabel'],\n      inferenceType=self.__model.getInferenceType())\n\n    taskControl = task['taskControl']\n\n    \n    self.__taskDriver = OPFTaskDriver(\n      taskControl=taskControl,\n      model=model)\n\n    loggedMetricPatterns = taskControl.get('loggedMetrics', None)\n    loggedMetricLabels = matchPatterns(loggedMetricPatterns,\n                                       self.__taskDriver.getMetricLabels())\n\n    self.__predictionLogger.setLoggedMetrics(loggedMetricLabels)\n\n    \n    self.__metricsLogger = opf_basic_environment.BasicPredictionMetricsLogger(\n      experimentDir=cmdOptions.experimentDir,\n      label=task['taskLabel'])", "docstring": "Constructor\n\nArgs:\nmodel: The OPF Model instance against which to run the task\ntask: A dictionary conforming to opfTaskSchema.json\ncmdOptions: ParseCommandLineOptionsResult namedtuple", "source": "juraj-google-style"}
{"code": "def mark_complex(self, name, serializer, deserializer):\n        \n\n        self._complex_properties[name] = (serializer, deserializer)", "docstring": "Mark a property as complex with serializer and deserializer functions.\n\nArgs:\nname (str): The name of the complex property.\nserializer (callable): The function to call to serialize the property's\nvalue to something that can be saved in a json.\ndeserializer (callable): The function to call to unserialize the property\nfrom a dict loaded by a json back to the original value.", "source": "juraj-google-style"}
{"code": "def get_operation_device(self, operation_name):\n    operation = self._name_to_operation(operation_name)\n    if isinstance(operation, tf.Operation):\n        return operation.device\n    else:\n        return None", "docstring": "The device of an operation.\n\nNote that only tf operations have device assignments.\n\nArgs:\noperation_name: a string, name of an operation in the graph.\n\nReturns:\na string or None, representing the device name.", "source": "codesearchnet"}
{"code": "def putenv(key, value):\n    \n\n    key = path2fsn(key)\n    value = path2fsn(value)\n\n    if is_win and PY2:\n        try:\n            set_windows_env_var(key, value)\n        except WindowsError:\n            \n            raise ValueError\n    else:\n        try:\n            os.putenv(key, value)\n        except OSError:\n            \n            \n            raise ValueError", "docstring": "Like `os.putenv` but takes unicode under Windows + Python 2\n\nArgs:\nkey (pathlike): The env var to get\nvalue (pathlike): The value to set\nRaises:\nValueError", "source": "juraj-google-style"}
{"code": "def _FormatPropertyName(self, property_name):\n    \n    \n    fix_key = re.sub(r'(.)([A-Z][a-z]+)', r'\\1_\\2', property_name)\n    return re.sub(r'([a-z0-9])([A-Z])', r'\\1_\\2', fix_key).lower()", "docstring": "Formats a camel case property name as snake case.\n\nArgs:\nproperty_name (str): property name in camel case.\n\nReturns:\nstr: property name in snake case.", "source": "juraj-google-style"}
{"code": "def _AddAttributeContainer(self, container_type, attribute_container):\n    \n    container_list = self._GetSerializedAttributeContainerList(container_type)\n\n    identifier = identifiers.SQLTableIdentifier(\n        container_type, container_list.next_sequence_number + 1)\n    attribute_container.SetIdentifier(identifier)\n\n    serialized_data = self._SerializeAttributeContainer(attribute_container)\n\n    container_list.PushAttributeContainer(serialized_data)\n\n    if container_list.data_size > self._maximum_buffer_size:\n      self._WriteSerializedAttributeContainerList(container_type)", "docstring": "Adds an attribute container.\n\nArgs:\ncontainer_type (str): attribute container type.\nattribute_container (AttributeContainer): attribute container.\n\nRaises:\nIOError: if the attribute container cannot be serialized.\nOSError: if the attribute container cannot be serialized.", "source": "juraj-google-style"}
{"code": "def get_pattern_step_time(self, patternnumber, stepnumber):\n    _checkPatternNumber(patternnumber)\n    _checkStepNumber(stepnumber)\n    address = _calculateRegisterAddress('time', patternnumber, stepnumber)\n    return self.read_register(address, 0)", "docstring": "Get the step time.\n\nArgs:\n* patternnumber (integer): 0-7\n* stepnumber (integer): 0-7\n\nReturns:\nThe step time (int??).", "source": "codesearchnet"}
{"code": "def abort_expired_batches(self, request_timeout_ms, cluster):\n        \n        expired_batches = []\n        to_remove = []\n        count = 0\n        for tp in list(self._batches.keys()):\n            assert tp in self._tp_locks, 'TopicPartition not in locks dict'\n\n            \n            \n            \n            \n            \n            \n            if tp in self.muted:\n                continue\n\n            with self._tp_locks[tp]:\n                \n                \n                dq = self._batches[tp]\n                for batch in dq:\n                    is_full = bool(bool(batch != dq[-1]) or batch.records.is_full())\n                    \n                    if batch.maybe_expire(request_timeout_ms,\n                                          self.config['retry_backoff_ms'],\n                                          self.config['linger_ms'],\n                                          is_full):\n                        expired_batches.append(batch)\n                        to_remove.append(batch)\n                        count += 1\n                        self.deallocate(batch)\n                    else:\n                        \n                        break\n\n                \n                \n                \n                if to_remove:\n                    for batch in to_remove:\n                        dq.remove(batch)\n                    to_remove = []\n\n        if expired_batches:\n            log.warning(\"Expired %d batches in accumulator\", count) \n\n        return expired_batches", "docstring": "Abort the batches that have been sitting in RecordAccumulator for\nmore than the configured request_timeout due to metadata being\nunavailable.\n\nArguments:\nrequest_timeout_ms (int): milliseconds to timeout\ncluster (ClusterMetadata): current metadata for kafka cluster\n\nReturns:\nlist of ProducerBatch that were expired", "source": "juraj-google-style"}
{"code": "def get_sine_pos_embed(pos_tensor: torch.Tensor, num_pos_feats: int=128, temperature: int=10000, exchange_xy: bool=True) -> Tensor:\n    scale = 2 * math.pi\n    dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device)\n    dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / num_pos_feats)\n\n    def sine_func(x: torch.Tensor):\n        sin_x = x * scale / dim_t\n        sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), dim=3).flatten(2)\n        return sin_x\n    pos_tensor = pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1)\n    position_embeddings = [sine_func(x) for x in pos_tensor]\n    if exchange_xy:\n        position_embeddings[0], position_embeddings[1] = (position_embeddings[1], position_embeddings[0])\n    position_embeddings = torch.cat(position_embeddings, dim=-1)\n    return position_embeddings", "docstring": "Generate sine position embeddings from a position tensor.\n\nArgs:\npos_tensor (torch.Tensor):\nTensor containing positions. Shape: [..., n].\nnum_pos_feats (`int`, *optional*, defaults to 128):\nProjected shape for each float in the tensor.\ntemperature (`int`, *optional*, defaults to 10000):\nTemperature in the sine/cosine function.\nexchange_xy (`bool`, *optional*, defaults to `True`):\nExchange pos x and pos y. For example, input tensor is [x,y], the results will be [pos(y), pos(x)].\n\nReturns:\nposition_embeddings (torch.Tensor): shape: [..., n * hidden_size].", "source": "github-repos"}
{"code": "def print_type_of_instance(self, t: types.BaseValue, instance=None) -> str:", "docstring": "Returns a string of the type of an instance of t.\n\nFor example, if t is `int`, then this method returns \"int\".\n\nArgs:\nt: An abstract value.\ninstance: A specific instance of t to print.", "source": "github-repos"}
{"code": "def export(self, template_file_name, output_file_name,\n               sort=\"public\", data=None, limit=0):\n        \n        exportedData = {}\n        exportedUsers = self.getSortedUsers()\n        template = self.__getTemplate(template_file_name)\n        position = 1\n\n        if not limit:\n            exportedData[\"users\"] = exportedUsers\n        else:\n            exportedData[\"users\"] = exportedUsers[:limit]\n\n        for u in exportedData[\"users\"]:\n            u[\"position\"] = position\n            u[\"comma\"] = position < len(exportedData[\"users\"])\n            position += 1\n\n        exportedData[\"extraData\"] = data\n\n        renderer = Renderer()\n        output = renderer.render(template, exportedData)\n\n        with open(output_file_name, \"w\") as text_file:\n            text_file.write(output)", "docstring": "Export ranking to a file.\n\nArgs:\ntemplate_file_name (str): where is the template\n(moustache template)\noutput_file_name (str): where create the file with the ranking\nsort (str): field to sort the users", "source": "juraj-google-style"}
{"code": "def insert_arguments_into_query(compilation_result, arguments):\n    _ensure_arguments_are_provided(compilation_result.input_metadata, arguments)\n    if (compilation_result.language == MATCH_LANGUAGE):\n        return insert_arguments_into_match_query(compilation_result, arguments)\n    elif (compilation_result.language == GREMLIN_LANGUAGE):\n        return insert_arguments_into_gremlin_query(compilation_result, arguments)\n    elif (compilation_result.language == SQL_LANGUAGE):\n        return insert_arguments_into_sql_query(compilation_result, arguments)\n    else:\n        raise AssertionError(u'Unrecognized language in compilation result: {}'.format(compilation_result))", "docstring": "Insert the arguments into the compiled GraphQL query to form a complete query.\n\nArgs:\ncompilation_result: a CompilationResult object derived from the GraphQL compiler\narguments: dict, mapping argument name to its value, for every parameter the query expects.\n\nReturns:\nstring, a query in the appropriate output language, with inserted argument data", "source": "codesearchnet"}
{"code": "def _restore_training_state(self, restore_state):\n        \n        self.load_state_dict(restore_state[\"model\"])\n        self.optimizer.load_state_dict(restore_state[\"optimizer\"])\n        self.lr_scheduler.load_state_dict(restore_state[\"lr_scheduler\"])\n        start_iteration = restore_state[\"iteration\"] + 1\n        if self.config[\"verbose\"]:\n            print(f\"Restored checkpoint to iteration {start_iteration}.\")\n\n        if restore_state[\"best_model_found\"]:\n            \n            \n            \n            self.checkpointer.best_model_found = True\n            self.checkpointer.best_iteration = restore_state[\"best_iteration\"]\n            self.checkpointer.best_score = restore_state[\"best_score\"]\n            if self.config[\"verbose\"]:\n                print(\n                    f\"Updated checkpointer: \"\n                    f\"best_score={self.checkpointer.best_score:.3f}, \"\n                    f\"best_iteration={self.checkpointer.best_iteration}\"\n                )\n        return start_iteration", "docstring": "Restores the model and optimizer states\n\nThis helper function restores the model's state to a given iteration so\nthat a user can resume training at any epoch.\n\nArgs:\nrestore_state: a state_dict dictionary", "source": "juraj-google-style"}
{"code": "def choices_validator(choices):\n\n    def validator(value):\n        if (value not in choices):\n            raise ValidationError('{} is not in {}'.format(value, list(choices)))\n    return validator", "docstring": "Return validator function that will check if ``value in choices``.\n\nArgs:\nmax_value (list, set, tuple): allowed choices for new validator", "source": "codesearchnet"}
{"code": "def parse_dtype_info(flags):\n    if (flags.dtype in (i[0] for i in DTYPE_MAP.values())):\n        return\n    try:\n        (flags.dtype, default_loss_scale) = DTYPE_MAP[flags.dtype]\n    except KeyError:\n        raise ValueError('Invalid dtype: {}'.format(flags.dtype))\n    flags.loss_scale = (flags.loss_scale or default_loss_scale)", "docstring": "Convert dtype string to tf dtype, and set loss_scale default as needed.\n\nArgs:\nflags: namespace object returned by arg parser.\n\nRaises:\nValueError: If an invalid dtype is provided.", "source": "codesearchnet"}
{"code": "def load_vocabulary(lang='en', type='wiki'):\n    src_dir = '{}_vocab'.format(type)\n    p = locate_resource(src_dir, lang)\n    return CountedVocabulary.from_vocabfile(p)", "docstring": "Return a CountedVocabulary object.\n\nArgs:\nlang (string): language code.\ntype (string): wiki,...", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context):\n    \n    super(BDEFileSystem, self).__init__(resolver_context)\n    self._bde_volume = None\n    self._file_object = None", "docstring": "Initializes a file system.\n\nArgs:\nresolver_context (Context): resolver context.", "source": "juraj-google-style"}
{"code": "def runTemplate(id, data={}):\n        \n        conn = Qubole.agent()\n        path = str(id) + \"/run\"\n        res = conn.post(Template.element_path(path), data)\n        cmdType = res['command_type']\n        cmdId = res['id']\n        cmdClass = eval(cmdType)\n        cmd = cmdClass.find(cmdId)\n        while not Command.is_done(cmd.status):\n            time.sleep(Qubole.poll_interval)\n            cmd = cmdClass.find(cmd.id)\n        return Template.getResult(cmdClass, cmd)", "docstring": "Run an existing Template and waits for the Result.\nPrints result to stdout.\n\nArgs:\n`id`: ID of the template to run\n`data`: json data containing the input_vars\n\nReturns:\nAn integer as status (0: success, 1: failure)", "source": "juraj-google-style"}
{"code": "def load_yaml(task: Task, file: str) -> Result:\n    with open(file, 'r') as f:\n        yml = ruamel.yaml.YAML(typ='safe')\n        data = yml.load(f)\n    return Result(host=task.host, result=data)", "docstring": "Loads a yaml file.\n\nArguments:\nfile: path to the file containing the yaml file to load\n\nExamples:\n\nSimple example with ``ordered_dict``::\n\n> nr.run(task=load_yaml,\nfile=\"mydata.yaml\")\n\nReturns:\nResult object with the following attributes set:\n* result (``dict``): dictionary with the contents of the file", "source": "codesearchnet"}
{"code": "def check_connection(host='localhost', port=27017, username=None, password=None,\n                     authdb=None, max_delay=1):\n    \n    \n    \n    if username and password:\n        uri = (\"mongodb:\n               .format(quote_plus(username), quote_plus(password), host, port, authdb))\n        log_uri = (\"mongodb:\n               .format(quote_plus(username), host, port, authdb))\n    else:\n        log_uri = uri = \"mongodb:\n    \n    LOG.info(\"Test connection with uri: %s\", log_uri)\n    client = MongoClient(uri, serverSelectionTimeoutMS=max_delay)\n    try:\n        client.server_info()\n    except (ServerSelectionTimeoutError,OperationFailure) as err:\n        LOG.warning(err)\n        return False\n\n    return True", "docstring": "Check if a connection could be made to the mongo process specified\n\nArgs:\nhost(str)\nport(int)\nusername(str)\npassword(str)\nauthdb (str): database to to for authentication\nmax_delay(int): Number of milliseconds to wait for connection\n\nReturns:\nbool: If connection could be established", "source": "juraj-google-style"}
{"code": "def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max'):\n    if data_format is None:\n        data_format = image_data_format()\n    if data_format not in {'channels_first', 'channels_last'}:\n        raise ValueError('Unknown data_format: ' + str(data_format))\n    x, tf_data_format = _preprocess_conv3d_input(x, data_format)\n    padding = _preprocess_padding(padding)\n    if tf_data_format == 'NDHWC':\n        strides = (1,) + strides + (1,)\n        pool_size = (1,) + pool_size + (1,)\n    else:\n        strides = (1, 1) + strides\n        pool_size = (1, 1) + pool_size\n    if pool_mode == 'max':\n        x = nn.max_pool3d(x, pool_size, strides, padding=padding, data_format=tf_data_format)\n    elif pool_mode == 'avg':\n        x = nn.avg_pool3d(x, pool_size, strides, padding=padding, data_format=tf_data_format)\n    else:\n        raise ValueError('Invalid pooling mode: ' + str(pool_mode))\n    if data_format == 'channels_first' and tf_data_format == 'NDHWC':\n        x = array_ops.transpose(x, (0, 4, 1, 2, 3))\n    return x", "docstring": "3D Pooling.\n\nArgs:\nx: Tensor or variable.\npool_size: tuple of 3 integers.\nstrides: tuple of 3 integers.\npadding: string, `\"same\"` or `\"valid\"`.\ndata_format: string, `\"channels_last\"` or `\"channels_first\"`.\npool_mode: string, `\"max\"` or `\"avg\"`.\n\nReturns:\nA tensor, result of 3D pooling.\n\nRaises:\nValueError: if `data_format` is neither `\"channels_last\"` or\n`\"channels_first\"`.\nValueError: if `pool_mode` is neither `\"max\"` or `\"avg\"`.", "source": "github-repos"}
{"code": "def check_valid(spec):\n    DeviceSpec.from_string(spec)", "docstring": "Check that a device spec is valid.\n\nArgs:\nspec: a string.\n\nRaises:\nAn exception if the spec is invalid.", "source": "github-repos"}
{"code": "def _delete(self, url, data, scope):\n        \n        self._create_session(scope)\n        response = self.session.delete(url, data=data)\n        return response.status_code, response.text", "docstring": "Make a DELETE request using the session object to a Degreed endpoint.\n\nArgs:\nurl (str): The url to send a DELETE request to.\ndata (str): The json encoded payload to DELETE.\nscope (str): Must be one of the scopes Degreed expects:\n- `CONTENT_PROVIDER_SCOPE`\n- `COMPLETION_PROVIDER_SCOPE`", "source": "juraj-google-style"}
{"code": "def transmit(self, payload, **kwargs):\n        \n        kwargs['app_label'] = 'degreed'\n        kwargs['model_name'] = 'DegreedLearnerDataTransmissionAudit'\n        kwargs['remote_user_id'] = 'degreed_user_email'\n        super(DegreedLearnerTransmitter, self).transmit(payload, **kwargs)", "docstring": "Send a completion status call to Degreed using the client.\n\nArgs:\npayload: The learner completion data payload to send to Degreed", "source": "juraj-google-style"}
{"code": "def setup_and_load_epoch(hparams, data_dir, which_epoch_data=None):\n  \n  t2t_env = rl_utils.setup_env(\n      hparams, batch_size=hparams.real_batch_size,\n      max_num_noops=hparams.max_num_noops\n  )\n  \n  if which_epoch_data is not None:\n    if which_epoch_data == \"last\":\n      which_epoch_data = infer_last_epoch_num(data_dir)\n    assert isinstance(which_epoch_data, int), \\\n      \"{}\".format(type(which_epoch_data))\n    t2t_env.start_new_epoch(which_epoch_data, data_dir)\n  else:\n    t2t_env.start_new_epoch(-999)\n  return t2t_env", "docstring": "Load T2TGymEnv with data from one epoch.\n\nArgs:\nhparams: hparams.\ndata_dir: data directory.\nwhich_epoch_data: data from which epoch to load.\n\nReturns:\nenv.", "source": "juraj-google-style"}
{"code": "def get_name_or_instance_id(self, with_id=False):\n    name = self.get_tag('Name', case_sensitive=False)\n    if (name and (len(name.value.strip()) > 0)):\n        return ('{0} ({1})'.format(name.value, self.id) if with_id else name.value)\n    return self.id", "docstring": "Returns the name of an instance if existant, else return the instance id\n\nArgs:\nwith_id (bool): Include the instance ID even if the name is found (default: False)\n\nReturns:\nName and/or instance ID of the instance object", "source": "codesearchnet"}
{"code": "def extract_numerics_alert(event):\n  \n  value = event.summary.value[0]\n  debugger_plugin_metadata_content = None\n  if value.HasField(\"metadata\"):\n    plugin_data = value.metadata.plugin_data\n    if plugin_data.plugin_name == constants.DEBUGGER_PLUGIN_NAME:\n      debugger_plugin_metadata_content = plugin_data.content\n\n  if not debugger_plugin_metadata_content:\n    raise ValueError(\"Event proto input lacks debugger plugin SummaryMetadata.\")\n\n  debugger_plugin_metadata_content = tf.compat.as_text(\n      debugger_plugin_metadata_content)\n  try:\n    content_object = json.loads(debugger_plugin_metadata_content)\n    device_name = content_object[\"device\"]\n  except (KeyError, ValueError) as e:\n    raise ValueError(\"Could not determine device from JSON string %r, %r\" %\n                     (debugger_plugin_metadata_content, e))\n\n  debug_op_suffix = \":DebugNumericSummary\"\n  if not value.node_name.endswith(debug_op_suffix):\n    raise ValueError(\n        \"Event proto input does not have the expected debug op suffix %s\" %\n        debug_op_suffix)\n  tensor_name = value.node_name[:-len(debug_op_suffix)]\n\n  elements = tf_debug.load_tensor_from_event(event)\n  nan_count = elements[constants.NAN_NUMERIC_SUMMARY_OP_INDEX]\n  neg_inf_count = elements[constants.NEG_INF_NUMERIC_SUMMARY_OP_INDEX]\n  pos_inf_count = elements[constants.POS_INF_NUMERIC_SUMMARY_OP_INDEX]\n  if nan_count > 0 or neg_inf_count > 0 or pos_inf_count > 0:\n    return NumericsAlert(\n        device_name, tensor_name, event.wall_time, nan_count, neg_inf_count,\n        pos_inf_count)\n  return None", "docstring": "Determines whether a health pill event contains bad values.\n\nA bad value is one of NaN, -Inf, or +Inf.\n\nArgs:\nevent: (`Event`) A `tensorflow.Event` proto from `DebugNumericSummary`\nops.\n\nReturns:\nAn instance of `NumericsAlert`, if bad values are found.\n`None`, if no bad values are found.\n\nRaises:\nValueError: if the event does not have the expected tag prefix or the\ndebug op name is not the expected debug op name suffix.", "source": "juraj-google-style"}
{"code": "def label_contains(\n    node,\n    triggers\n):\n    \n    for trigger in triggers:\n        if trigger.trigger_word in node.label:\n            yield TriggerNode(trigger, node)", "docstring": "Determine if node contains any of the trigger_words provided.\n\nArgs:\nnode(Node): CFG node to check.\ntrigger_words(list[Union[Sink, Source]]): list of trigger words to look for.\n\nReturns:\nIterable of TriggerNodes found. Can be multiple because multiple\ntrigger_words can be in one node.", "source": "juraj-google-style"}
{"code": "def register_entity(self, entity_value, entity_type, alias_of=None):\n        \n        if alias_of:\n            self.trie.insert(entity_value.lower(), data=(alias_of, entity_type))\n        else:\n            self.trie.insert(entity_value.lower(), data=(entity_value, entity_type))\n            self.trie.insert(entity_type.lower(), data=(entity_type, 'Concept'))", "docstring": "Register an entity to be tagged in potential parse results\n\nArgs:\nentity_value(str): the value/proper name of an entity instance (Ex: \"The Big Bang Theory\")\nentity_type(str): the type/tag of an entity instance (Ex: \"Television Show\")", "source": "juraj-google-style"}
{"code": "def _convert_token_to_id(self, token, token_type='TOKEN_TIME') -> int:\n    return self.encoder.get(f'{token}_{token_type}', int(self.unk_token))", "docstring": "Encodes the Midi tokens to transformer generated token ids.\n\nArgs:\ntoken (`int`):\nThis denotes the token value.\ntoken_type (`str`):\nThis denotes the type of the token. There are four types of midi tokens such as \"TOKEN_TIME\",\n\"TOKEN_VELOCITY\", \"TOKEN_NOTE\" and \"TOKEN_SPECIAL\".\n\nReturns:\n`int`: returns the id of the token.", "source": "github-repos"}
{"code": "def convert(self):\n    return super(TFLiteConverterV2, self).convert()", "docstring": "Converts a TensorFlow GraphDef based on instance variables.\n\nReturns:\nThe converted data in serialized format.\n\nRaises:\nValueError:\nNo concrete function is specified.\nMultiple concrete functions are specified.\nInput shape is not specified.\nInvalid quantization parameters.", "source": "github-repos"}
{"code": "def _get_data_by_field(self, field_number):\n    if (not self.is_data_loaded):\n        self._import_data()\n    if (not (0 <= field_number < self._num_of_fields)):\n        raise ValueError(('Field number should be between 0-%d' % self._num_of_fields))\n    return self._data[field_number]", "docstring": "Return a data field by field number.\n\nThis is a useful method to get the values for fields that Ladybug\ncurrently doesn't import by default. You can find list of fields by typing\nEPWFields.fields\n\nArgs:\nfield_number: a value between 0 to 34 for different available epw fields.\n\nReturns:\nAn annual Ladybug list", "source": "codesearchnet"}
{"code": "def add_channel(channel: EFBChannel):\n    global master, slaves\n    if isinstance(channel, EFBChannel):\n        if (channel.channel_type == ChannelType.Slave):\n            slaves[channel.channel_id] = channel\n        else:\n            master = channel\n    else:\n        raise TypeError('Channel instance is expected')", "docstring": "Register the channel with the coordinator.\n\nArgs:\nchannel (EFBChannel): Channel to register", "source": "codesearchnet"}
{"code": "def forward(self, num_patches_height: int, num_patches_width: int) -> torch.Tensor:\n    hpos_ids = torch.arange(num_patches_height, device=self.inv_freq.device).unsqueeze(1).expand(-1, num_patches_width)\n    wpos_ids = torch.arange(num_patches_width, device=self.inv_freq.device).unsqueeze(0).expand(num_patches_height, -1)\n    pos_ids = torch.stack([hpos_ids.flatten(), wpos_ids.flatten()], dim=-1)\n    max_grid_size = max(num_patches_height, num_patches_width)\n    seq = torch.arange(max_grid_size, device=self.inv_freq.device, dtype=self.inv_freq.dtype)\n    rotary_pos_emb_full = torch.outer(seq, self.inv_freq)\n    rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)\n    return rotary_pos_emb", "docstring": "Calculate the Rotary Position Embedding (RoPE) for MLCDVisionModel based on the grid size.\n\nArgs:\nnum_patches_height (int): Number of patches in the height dimension.\nnum_patches_width (int): Number of patches in the width dimension.\n\nReturns:\ntorch.Tensor: Rotary positional embeddings for the given grid size.", "source": "github-repos"}
{"code": "def decode_function_result(self, function_name, data):\n    description = self.function_data[function_name]\n    arguments = decode_abi(description['decode_types'], data)\n    return arguments", "docstring": "Return the function call result decoded.\n\nArgs:\nfunction_name (str): One of the existing functions described in the\ncontract interface.\ndata (bin): The encoded result from calling `function_name`.\n\nReturn:\nList[object]: The values returned by the call to `function_name`.", "source": "codesearchnet"}
{"code": "def terminate_session(self, token):\n    url = (self.rest_url + ('/session/%s' % token))\n    response = self._delete(url)\n    if (not response.ok):\n        return None\n    return True", "docstring": "Terminates the session token, effectively logging out the user\nfrom all crowd-enabled services.\n\nArgs:\ntoken: The session token.\n\nReturns:\nTrue: If session terminated\n\nNone: If session termination failed", "source": "codesearchnet"}
{"code": "def inspect_container(self, container):\n    return self._result(self._get(self._url('/containers/{0}/json', container)), True)", "docstring": "Identical to the `docker inspect` command, but only for containers.\n\nArgs:\ncontainer (str): The container to inspect\n\nReturns:\n(dict): Similar to the output of `docker inspect`, but as a\nsingle dict\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def create_transaction(self, to_account):\n    from_account = self.statement_import.bank_account\n    transaction = Transaction.objects.create()\n    Leg.objects.create(transaction=transaction, account=from_account, amount=(+ (self.amount * (- 1))))\n    Leg.objects.create(transaction=transaction, account=to_account, amount=(- (self.amount * (- 1))))\n    transaction.date = self.date\n    transaction.save()\n    self.transaction = transaction\n    self.save()\n    return transaction", "docstring": "Create a transaction for this statement amount and account, into to_account\n\nThis will also set this StatementLine's ``transaction`` attribute to the newly\ncreated transaction.\n\nArgs:\nto_account (Account): The account the transaction is into / out of.\n\nReturns:\nTransaction: The newly created (and committed) transaction.", "source": "codesearchnet"}
{"code": "def deps_from_import_graph(import_graph):\n\n    def make_module(filename):\n        return resolved_file_to_module(import_graph.provenance[filename])\n\n    def split_files(filenames):\n        stubs = []\n        sources = []\n        for f in filenames:\n            if _is_type_stub(f):\n                stubs.append(f)\n            else:\n                sources.append(make_module(f))\n        return (stubs, sources)\n    stubs_to_source_deps = collections.defaultdict(list)\n    modules = []\n    for node, deps in reversed(import_graph.deps_list()):\n        stubs, sources = split_files(_get_filenames(node))\n        flat_deps = utils.unique_list(itertools.chain.from_iterable((_get_filenames(d) for d in deps)))\n        stub_deps, source_deps = split_files(flat_deps)\n        for stub in stubs:\n            stubs_to_source_deps[stub].extend(source_deps)\n            for stub_dep in stub_deps:\n                stubs_to_source_deps[stub].extend(stubs_to_source_deps[stub_dep])\n        if sources:\n            for stub in stub_deps:\n                source_deps.extend(stubs_to_source_deps[stub])\n            modules.append((tuple(sources), tuple(source_deps)))\n    return modules", "docstring": "Construct PytypeRunner args from an importlab.ImportGraph instance.\n\nKept as a separate function so PytypeRunner can be tested independently of\nimportlab.\n\nArgs:\nimport_graph: An importlab.ImportGraph instance.\n\nReturns:\nList of (tuple of source modules, tuple of direct deps) in dependency order.", "source": "github-repos"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(UsernamePasswordCredential, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = BytearrayStream(input_stream.read(self.length))\n    if self.is_tag_next(enums.Tags.USERNAME, local_stream):\n        self._username = primitives.TextString(tag=enums.Tags.USERNAME)\n        self._username.read(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Username/password credential encoding missing the username.')\n    if self.is_tag_next(enums.Tags.PASSWORD, local_stream):\n        self._password = primitives.TextString(tag=enums.Tags.PASSWORD)\n        self._password.read(local_stream, kmip_version=kmip_version)\n    self.is_oversized(local_stream)", "docstring": "Read the data encoding the UsernamePasswordCredential struct and\ndecode it into its constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the username is missing from the encoding.", "source": "codesearchnet"}
{"code": "def _to_backend_mesh(device_mesh):\n    mesh_dims = list(zip(device_mesh.axis_names, device_mesh.shape))\n    return dtensor.create_distributed_mesh(mesh_dims=mesh_dims, local_devices=device_mesh.devices.flatten())", "docstring": "Convert the DeviceMesh to Tensorflow backend specific Mesh.\n\nArgs:\ndevice_mesh: DeviceMesh instance to convert.\n\nReturns:\nA `tf.dtensor.Mesh` instance.", "source": "github-repos"}
{"code": "def preprocess(source):\n  \n  doc = html5lib.parseFragment(source)\n  source = ET.tostring(doc, encoding='utf-8', method='text').decode('utf-8')\n  source = source.replace(u'\\n', u'').strip()\n  source = re.sub(r'\\s\\s+', u' ', source)\n  return source", "docstring": "Removes unnecessary break lines and white spaces.\n\nArgs:\nsource (str): Input sentence.\n\nReturns:\nPreprocessed sentence. (str)", "source": "juraj-google-style"}
{"code": "def build(self):\n\n    def _create_per_worker_dataset():\n        dataset = self._dataset_fn()\n        return dataset\n    per_worker_dataset = self._coordinator._create_per_worker_resources(_create_per_worker_dataset)\n    dataset_fn_output_type_spec = self._dataset_fn.structured_outputs._type_spec\n    for dataset_remote_value in per_worker_dataset._values:\n        dataset_remote_value._type_spec = dataset_fn_output_type_spec\n    return per_worker_dataset", "docstring": "Trigger dataset creation on workers without creating an iterator.\n\nReturns:\nA PerWorkerValues object containing a tuple of RemoteValues, themselves\ncontaining the built Dataset for each worker", "source": "github-repos"}
{"code": "def to_pytd_type(self, val: abstract.BaseValue) -> pytd.Type:\n    if val is self._ctx.consts.Any:\n        return pytd.AnythingType()\n    elif isinstance(val, abstract.Union):\n        return pytd_utils.JoinTypes((self.to_pytd_type(v) for v in val.options))\n    elif isinstance(val, abstract.PythonConstant):\n        return pytd.NamedType(f'builtins.{val.constant.__class__.__name__}')\n    elif isinstance(val, abstract.FunctionArgDict):\n        return pytd.NamedType('builtins.dict')\n    elif isinstance(val, abstract.SimpleClass):\n        return pytd.GenericType(base_type=pytd.NamedType('builtins.type'), parameters=(pytd.NamedType(val.name),))\n    elif isinstance(val, abstract.BaseInstance):\n        return pytd.NamedType(val.cls.name)\n    elif isinstance(val, (abstract.BaseFunction, abstract.BoundFunction)):\n        if len(val.signatures) > 1:\n            fixed_length_posargs_only = False\n        else:\n            sig = val.signatures[0]\n            fixed_length_posargs_only = not sig.defaults and (not sig.varargs_name) and (not sig.kwonly_params) and (not sig.kwargs_name)\n        if fixed_length_posargs_only:\n            pytd_sig, = self.to_pytd_def(val).signatures\n            params = tuple((param.type for param in pytd_sig.params))\n            return pytd.CallableType(base_type=pytd.NamedType('typing.Callable'), parameters=params + (pytd_sig.return_type,))\n        else:\n            ret = abstract.join_values(self._ctx, [frame.get_return_value() for frame in val.analyze()])\n            return pytd.GenericType(base_type=pytd.NamedType('typing.Callable'), parameters=(pytd.AnythingType(), self.to_pytd_type(ret)))\n    else:\n        raise NotImplementedError(f'to_pytd_type() not implemented for {val.__class__.__name__}: {val}')", "docstring": "Returns the type of the abstract value, as a pytd node.\n\nFor example, if the abstract value is:\nPythonConstant(0)\nthen to_pytd_type() produces:\npytd.NamedType(int)\n\nArgs:\nval: The abstract value.", "source": "github-repos"}
{"code": "def ReadFromDirectory(self, artifacts_reader, path, extension='yaml'):\n    for artifact_definition in artifacts_reader.ReadDirectory(path, extension=extension):\n        self.RegisterDefinition(artifact_definition)", "docstring": "Reads artifact definitions into the registry from files in a directory.\n\nThis function does not recurse sub directories.\n\nArgs:\nartifacts_reader (ArtifactsReader): an artifacts reader.\npath (str): path of the directory to read from.\nextension (Optional[str]): extension of the filenames to read.\n\nRaises:\nKeyError: if a duplicate artifact definition is encountered.", "source": "codesearchnet"}
{"code": "def git_merge(base, head, no_ff=False):\n    \n    \n    pretend = context.get('pretend', False)\n    branch = git.current_branch(refresh=True)\n\n    if branch.name != base and not pretend:\n        git_checkout(base)\n\n    args = []\n\n    if no_ff:\n        args.append('--no-ff')\n\n    log.info(\"Merging <33>{}<32> into <33>{}<32>\", head, base)\n    shell.run('git merge {args} {branch}'.format(\n        args=' '.join(args),\n        branch=head,\n    ))\n\n    if branch.name != base and not pretend:\n        git_checkout(branch.name)", "docstring": "Merge *head* into *base*.\n\nArgs:\nbase (str):\nThe base branch. *head* will be merged into this branch.\nhead (str):\nThe branch that will be merged into *base*.\nno_ff (bool):\nIf set to **True** it will force git to create merge commit. If set\nto **False** (default) it will do a fast-forward merge if possible.", "source": "juraj-google-style"}
{"code": "def __init__(self, key=None, **kwargs):\n    \n    if not key:\n      raise ValueError('Missing key.')\n\n    super(RC4Decrypter, self).__init__()\n    self._rc4_cipher = ARC4.new(key)", "docstring": "Initializes a decrypter.\n\nArgs:\nkey (Optional[bytes]): key.\nkwargs (dict): keyword arguments depending on the decrypter.\n\nRaises:\nValueError: when key is not set.", "source": "juraj-google-style"}
{"code": "def hessian(self, coordinates):\n    N3 = coordinates.size\n    hessian = numpy.zeros((N3, N3), float)\n    for term in self.terms:\n        term.add_to_hessian(coordinates, hessian)\n    return hessian", "docstring": "Compute the force-field Hessian for the given coordinates.\n\nArgument:\n| ``coordinates`` -- A numpy array with the Cartesian atom\ncoordinates, with shape (N,3).\n\nReturns:\n| ``hessian`` -- A numpy array with the Hessian, with shape (3*N,\n3*N).", "source": "codesearchnet"}
{"code": "def change_numbering(self, new_index=None):\n    if (new_index is None):\n        new_index = range(len(self))\n    elif (len(new_index) != len(self)):\n        raise ValueError('len(new_index) has to be the same as len(self)')\n    c_table = self.loc[(:, ['b', 'a', 'd'])]\n    c_table = c_table.replace(constants.int_label)\n    try:\n        c_table = c_table.astype('i8')\n    except ValueError:\n        raise ValueError('Due to a bug in pandas it is necessary to have integer columns')\n    c_table = c_table.replace(self.index, new_index)\n    c_table = c_table.replace({v: k for (k, v) in constants.int_label.items()})\n    out = self.copy()\n    out.unsafe_loc[(:, ['b', 'a', 'd'])] = c_table\n    out._frame.index = new_index\n    return out", "docstring": "Change numbering to a new index.\n\nChanges the numbering of index and all dependent numbering\n(bond_with...) to a new_index.\nThe user has to make sure that the new_index consists of distinct\nelements.\n\nArgs:\nnew_index (list): If None the new_index is taken from 1 to the\nnumber of atoms.\n\nReturns:\nZmat: Reindexed version of the zmatrix.", "source": "codesearchnet"}
{"code": "def enable(self, key_id, **kwargs):\n        \n        path = '%s/%s/enable' % (self.path, key_id)\n        self.gitlab.http_post(path, **kwargs)", "docstring": "Enable a deploy key for a project.\n\nArgs:\nkey_id (int): The ID of the key to enable\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabProjectDeployKeyError: If the key could not be enabled", "source": "juraj-google-style"}
{"code": "def verifymessage(self, address, signature, message):\n    verified = self.rpc.call('verifymessage', address, signature, message)\n    self.logger.debug(('Signature verified: %s' % str(verified)))\n    return verified", "docstring": "Verifies that a message has been signed by an address.\n\nArgs:\naddress (str): address claiming to have signed the message\nsignature (str): ECDSA signature\nmessage (str): plaintext message which was signed\n\nReturns:\nbool: True if the address signed the message, False otherwise", "source": "codesearchnet"}
{"code": "def request_server_info(self):\n    if (self._server_info is None):\n        self._server_info = self._send_request_server_info()\n    return self._server_info", "docstring": "Ask for information about the server.\n\nReturns:\nA dictionary of server attributes.", "source": "codesearchnet"}
{"code": "def fetch(clobber=False):\n    \n\n    dest_dir = fname_pattern = os.path.join(data_dir(), 'iphas')\n    url_pattern = 'http:\n    fname_pattern = os.path.join(dest_dir, 'A_samp_') + '{:03d}.tar.gz'\n\n    \n    if not clobber:\n        h5_fname = os.path.join(dest_dir, 'iphas.h5')\n        h5_size = 227817543 \n        h5_dsets = {\n            'samples': (61130,)\n        }\n        if fetch_utils.h5_file_exists(h5_fname, h5_size, dsets=h5_dsets):\n            print('File appears to exist already. Call `fetch(clobber=True)` '\n                  'to force overwriting of existing file.')\n            return\n\n    \n    file_md5sum = {\n        30:  'dd531e397622bc97d4ff92b6c7863ade',\n        40:  'b0f925eb3e46b77876e4054a26ad5b52',\n        50:  'ea3b9500f0419d66dd92d9f9c127c2b5',\n        60:  'cccf136f4e2306a6038e8093499216fd',\n        70:  'a05fe2f815086686056c18087cc5410b',\n        80:  '799bf618c8827b3d7250c884ec66ec49',\n        90:  'd2a302d917da768bacf6ea74cb9dcfad',\n        100: '2c75e31ad9320818556c4c9964b6af65',\n        110: '742ea8de6f5f8a7e549f6c56b0088789',\n        120: '9beabfa2c9634f953adadb5016eab072',\n        130: '7cd7313f466eb60e8318d0f1bd32e035',\n        140: 'fb6d09e4d939081b891e245c30b791f1',\n        150: '8e9b6dc1561183aeadc64f41c85a64a8',\n        160: '8a35828457b7b1d53d06998114553674',\n        170: '7ffb29ec23e2f625dcfaaa84c293821d',\n        180: 'c737da479d132b88483d6ddab5b25fc8',\n        190: '9bc5fc7f7ba55f36a167473bb3679601',\n        200: '7d8ffc4aa2f7c7026d8aa3ffb670d48e',\n        210: 'e31b04964b7970b81fc90c120b4ebc24'\n    }\n\n    \n    for key in file_md5sum:\n        url = url_pattern.format(key)\n        print('Downloading {}'.format(url))\n\n        fetch_utils.download_and_verify(\n            url,\n            file_md5sum[key],\n            fname_pattern.format(key))\n\n    \n    print('Repacking files...')\n    ascii2h5(dest_dir, os.path.join(dest_dir, 'iphas.h5'))\n\n    \n    print('Removing original files...')\n    for key in file_md5sum:\n        os.remove(fname_pattern.format(key))", "docstring": "Downloads the IPHAS 3D dust map of Sale et al. (2014).\n\nArgs:\nclobber (Optional[bool]): If ``True``, any existing file will be\noverwritten, even if it appears to match. If ``False`` (the\ndefault), ``fetch()`` will attempt to determine if the dataset\nalready exists. This determination is not 100\\% robust against data\ncorruption.", "source": "juraj-google-style"}
{"code": "def __init__(self, dump_root, tfdbg_run_id, circular_buffer_size=DEFAULT_CIRCULAR_BUFFER_SIZE):\n    if not dump_root:\n        raise ValueError('Empty or None dump root')\n    self._dump_root = dump_root\n    self._tfdbg_run_id = tfdbg_run_id\n    _pywrap_debug_events_writer.Init(self._dump_root, self._tfdbg_run_id, circular_buffer_size)", "docstring": "Construct a DebugEventsWriter object.\n\nNOTE: Given the same `dump_root`, all objects from this constructor\nwill point to the same underlying set of writers. In other words, they\nwill write to the same set of debug events files in the `dump_root`\nfolder.\n\nArgs:\ndump_root: The root directory for dumping debug data. If `dump_root` does\nnot exist as a directory, it will be created.\ntfdbg_run_id: Debugger Run ID.\ncircular_buffer_size: Size of the circular buffer for each of the two\nexecution-related debug events files: with the following suffixes: -\n.execution - .graph_execution_traces If <= 0, the circular-buffer\nbehavior will be abolished in the constructed object.", "source": "github-repos"}
{"code": "def make_spiral_texture(spirals=6.0, ccw=False, offset=0.0, resolution=1000):\n    dist = np.sqrt(np.linspace(0.0, 1.0, resolution))\n    if ccw:\n        direction = 1.0\n    else:\n        direction = (- 1.0)\n    angle = ((((dist * spirals) * np.pi) * 2.0) * direction)\n    spiral_texture = ((((np.cos(angle) * dist) / 2.0) + 0.5), (((np.sin(angle) * dist) / 2.0) + 0.5))\n    return spiral_texture", "docstring": "Makes a texture consisting of a spiral from the origin.\n\nArgs:\nspirals (float): the number of rotations to make\nccw (bool): make spirals counter-clockwise (default is clockwise)\noffset (float): if non-zero, spirals start offset by this amount\nresolution (int): number of midpoints along the spiral\n\nReturns:\nA texture.", "source": "codesearchnet"}
{"code": "def get_largest_schedule_within_budget(self, budget, proportion_discard):\n        \n\n        \n        \n        valid_schedules_and_costs = []\n        for R in range(1, budget):\n            schedule = self.generate_hyperband_schedule(R, proportion_discard)\n            cost = self.compute_schedule_cost(schedule)\n            if cost <= budget:\n                valid_schedules_and_costs.append((schedule, cost))\n\n        \n        valid_schedules_and_costs.sort(key=lambda x: x[1], reverse=True)\n        return valid_schedules_and_costs[0][0]", "docstring": "Gets the largest hyperband schedule within target_budget.\nThis is required since the original hyperband algorithm uses R,\nthe maximum number of resources per configuration.\nTODO(maxlam): Possibly binary search it if this becomes a bottleneck.\n\nArgs:\nbudget: total budget of the schedule.\nproportion_discard: hyperband parameter that specifies\nthe proportion of configurations to discard per iteration.", "source": "juraj-google-style"}
{"code": "def convert_to_rgb(image: np.ndarray, palette: Optional[PIL.ImagePalette.ImagePalette]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> ImageInput:\n    if input_data_format is None:\n        input_data_format = infer_channel_dimension_format(image, num_channels=(1, 3, 4))\n    data_format = input_data_format if data_format is None else data_format\n    mode = 'P' if palette is not None else None\n    image = to_pil_image(image, image_mode=mode, input_data_format=input_data_format)\n    if image.mode == 'P' and palette is not None:\n        image.putpalette(palette)\n    image_rgba = image.convert('RGBA')\n    background = Image.new('RGBA', image_rgba.size, (255, 255, 255))\n    alpha_composite = Image.alpha_composite(background, image_rgba)\n    alpha_composite = alpha_composite.convert('RGB')\n    output_array = np.array(alpha_composite)\n    output_array = to_channel_dimension_format(output_array, data_format, input_channel_dim=ChannelDimension.LAST)\n    return output_array", "docstring": "Converts an image to RGB format.\nArgs:\nimage (`np.ndarray`):\nThe image to convert.\npalette (List[int], *optional*):\nThe palette to use if given.\ndata_format (ChannelDimension or str, *optional*):\nThe channel dimension format for the output image. If not provided, it will be the same as the input image.\ninput_data_format (ChannelDimension or str, *optional*):\nThe channel dimension format of the input image.", "source": "github-repos"}
{"code": "def clip_action(action, space):\n    \n\n    if isinstance(space, gym.spaces.Box):\n        return np.clip(action, space.low, space.high)\n    elif isinstance(space, gym.spaces.Tuple):\n        if type(action) not in (tuple, list):\n            raise ValueError(\"Expected tuple space for actions {}: {}\".format(\n                action, space))\n        out = []\n        for a, s in zip(action, space.spaces):\n            out.append(clip_action(a, s))\n        return out\n    else:\n        return action", "docstring": "Called to clip actions to the specified range of this policy.\n\nArguments:\naction: Single action.\nspace: Action space the actions should be present in.\n\nReturns:\nClipped batch of actions.", "source": "juraj-google-style"}
{"code": "def mme_match(case_obj, match_type, mme_base_url, mme_token, nodes=None, mme_accepts=None):\n    query_patients = []\n    server_responses = []\n    url = None\n    query_patients = case_obj['mme_submission']['patients']\n    if (match_type == 'internal'):\n        url = ''.join([mme_base_url, '/match'])\n        for patient in query_patients:\n            json_resp = matchmaker_request(url=url, token=mme_token, method='POST', content_type=mme_accepts, accept=mme_accepts, data={'patient': patient})\n            resp_obj = {'server': 'Local MatchMaker node', 'patient_id': patient['id'], 'results': json_resp.get('results'), 'status_code': json_resp.get('status_code'), 'message': json_resp.get('message')}\n            server_responses.append(resp_obj)\n    else:\n        query_patients = [patient['id'] for patient in query_patients]\n        node_ids = [node['id'] for node in nodes]\n        if (match_type in node_ids):\n            node_ids = [match_type]\n        for patient in query_patients:\n            for node in node_ids:\n                url = ''.join([mme_base_url, '/match/external/', patient, '?node=', node])\n                json_resp = matchmaker_request(url=url, token=mme_token, method='POST')\n                resp_obj = {'server': node, 'patient_id': patient, 'results': json_resp.get('results'), 'status_code': json_resp.get('status_code'), 'message': json_resp.get('message')}\n                server_responses.append(resp_obj)\n    return server_responses", "docstring": "Initiate a MatchMaker match against either other Scout patients or external nodes\n\nArgs:\ncase_obj(dict): a scout case object already submitted to MME\nmatch_type(str): 'internal' or 'external'\nmme_base_url(str): base url of the MME server\nmme_token(str): auth token of the MME server\nmme_accepts(str): request content accepted by MME server (only for internal matches)\n\nReturns:\nmatches(list): a list of eventual matches", "source": "codesearchnet"}
{"code": "def scopes_as(self, new_scopes):\n        \n        old_scopes, self.scopes = self.scopes, new_scopes\n        yield\n        self.scopes = old_scopes", "docstring": "Replace my :attr:`scopes` for the duration of the with block.\n\nMy global scope is not replaced.\n\nArgs:\nnew_scopes (list of dict-likes): The new :attr:`scopes` to use.", "source": "juraj-google-style"}
{"code": "def on(self, event):\n        \n        def decorator(f):\n            self.add_event_handler(f, event)\n            return f\n\n        return decorator", "docstring": "Decorator helper method around `add_event_handler`. Example:\n\n>>> from telethon import TelegramClient, events\n>>> client = TelegramClient(...)\n>>>\n>>> @client.on(events.NewMessage)\n... async def handler(event):\n...     ...\n...\n>>>\n\nArgs:\nevent (`_EventBuilder` | `type`):\nThe event builder class or instance to be used,\nfor instance ``events.NewMessage``.", "source": "juraj-google-style"}
{"code": "def serialize_many_sparse(sp_input, name=None, out_type=dtypes.string):\n    return serialize_many_sparse_v2(sp_input, out_type, name)", "docstring": "Serialize `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor`.\n\nThe `SparseTensor` must have rank `R` greater than 1, and the first dimension\nis treated as the minibatch dimension.  Elements of the `SparseTensor`\nmust be sorted in increasing order of this first dimension.  The serialized\n`SparseTensor` objects going into each row of the output `Tensor` will have\nrank `R-1`.\n\nThe minibatch size `N` is extracted from `sparse_shape[0]`.\n\nArgs:\nsp_input: The input rank `R` `SparseTensor`.\nname: A name prefix for the returned tensors (optional).\nout_type: The `dtype` to use for serialization.\n\nReturns:\nA matrix (2-D `Tensor`) with `N` rows and `3` columns. Each column\nrepresents serialized `SparseTensor`'s indices, values, and shape\n(respectively).\n\nRaises:\nTypeError: If `sp_input` is not a `SparseTensor`.", "source": "github-repos"}
{"code": "def slice(self, start, end):\n    reverse = False\n    if (start > end):\n        temp = start\n        start = end\n        end = temp\n        reverse = True\n    seg = self.copy()\n    seg.points = seg.points[start:(end + 1)]\n    if reverse:\n        seg.points = list(reversed(seg.points))\n    return seg", "docstring": "Creates a copy of the current segment between indexes. If end > start,\npoints are reverted\n\nArgs:\nstart (int): Start index\nend (int): End index\nReturns:\n:obj:`Segment`", "source": "codesearchnet"}
{"code": "def cancel(self, nids=None):\n    if self.has_chrooted:\n        warnings.warn('Cannot cancel the flow via sshfs!')\n        return (- 1)\n    if os.path.exists(self.pid_file):\n        cprint('Found scheduler attached to this flow.', 'yellow')\n        cprint('Sending SIGKILL to the scheduler before cancelling the tasks!', 'yellow')\n        with open(self.pid_file, 'rt') as fh:\n            pid = int(fh.readline())\n        retcode = os.system(('kill -9 %d' % pid))\n        self.history.info(('Sent SIGKILL to the scheduler, retcode: %s' % retcode))\n        try:\n            os.remove(self.pid_file)\n        except IOError:\n            pass\n    num_cancelled = 0\n    for task in self.iflat_tasks(nids=nids):\n        num_cancelled += task.cancel()\n    return num_cancelled", "docstring": "Cancel all the tasks that are in the queue.\nnids is an optional list of node identifiers used to filter the tasks.\n\nReturns:\nNumber of jobs cancelled, negative value if error", "source": "codesearchnet"}
{"code": "def _send_success_response(self, response, start_response):\n    \n    headers = [('Content-Type', 'application/json; charset=UTF-8')]\n    return util.send_wsgi_response('200 OK', headers, response, start_response)", "docstring": "Sends an HTTP 200 json success response.\n\nThis calls start_response and returns the response body.\n\nArgs:\nresponse: A string containing the response body to return.\nstart_response: A function with semantics defined in PEP-333.\n\nReturns:\nA string, the response body.", "source": "juraj-google-style"}
{"code": "def _publish_status(self, slug, data):\n    status_topic = (self.topics.prefix + 'devices/{}/data/status'.format(slug))\n    self._logger.debug('Publishing status message: (topic=%s) (message=%s)', status_topic, str(data))\n    self.client.publish(status_topic, data)", "docstring": "Publish a status message for a device\n\nArgs:\nslug (string): The device slug that we are publishing on behalf of\ndata (dict): The status message data to be sent back to the caller", "source": "codesearchnet"}
{"code": "def epoch_rates_to_pmf(problems, epoch_rates=None):\n    if (epoch_rates is None):\n        epoch_rates = ([1.0] * len(problems))\n    example_rates = [(epoch_rate * p.num_training_examples) for (p, epoch_rate) in zip(problems, epoch_rates)]\n    return example_rates_to_pmf(example_rates)", "docstring": "Create a probability-mass-function based on relative epoch rates.\n\nif epoch_rates=None, then we use uniform epoch rates [1.0] * len(problems)\ni.e. it takes each problem the same time to go through one epoch.\n\nIf epoch_rates is given, then these are the relative numbers of epochs\nof each problem to go through in a given amount of time.\n\nEach must have problem.num_training_examples implemented.\n\nArgs:\nproblems: a list of Problem instances.\nepoch_rates: an optional list of float\n\nReturns:\na list of floating point values.", "source": "codesearchnet"}
{"code": "def get_linear_interpolated_value(x_values, y_values, x):\n    a = np.array(sorted(zip(x_values, y_values), key=(lambda d: d[0])))\n    ind = np.where((a[(:, 0)] >= x))[0]\n    if ((len(ind) == 0) or (ind[0] == 0)):\n        raise ValueError('x is out of range of provided x_values')\n    i = ind[0]\n    (x1, x2) = (a[(i - 1)][0], a[i][0])\n    (y1, y2) = (a[(i - 1)][1], a[i][1])\n    return (y1 + (((y2 - y1) / (x2 - x1)) * (x - x1)))", "docstring": "Returns an interpolated value by linear interpolation between two values.\nThis method is written to avoid dependency on scipy, which causes issues on\nthreading servers.\n\nArgs:\nx_values: Sequence of x values.\ny_values: Corresponding sequence of y values\nx: Get value at particular x\n\nReturns:\nValue at x.", "source": "codesearchnet"}
{"code": "def Write(self, schedule, output_file):\n    root = ET.Element('kml')\n    root.attrib['xmlns'] = 'http:\n    doc = ET.SubElement(root, 'Document')\n    open_tag = ET.SubElement(doc, 'open')\n    open_tag.text = '1'\n    self._CreateStopsFolder(schedule, doc)\n    if self.split_routes:\n        route_types = set()\n        for route in schedule.GetRouteList():\n            route_types.add(route.route_type)\n        route_types = list(route_types)\n        route_types.sort()\n        for route_type in route_types:\n            self._CreateRoutesFolder(schedule, doc, route_type)\n    else:\n        self._CreateRoutesFolder(schedule, doc)\n    self._CreateShapesFolder(schedule, doc)\n    self._SetIndentation(root)\n    if isinstance(output_file, file):\n        output = output_file\n    else:\n        output = open(output_file, 'w')\n    output.write('<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n')\n    ET.ElementTree(root).write(output, 'utf-8')", "docstring": "Writes out a feed as KML.\n\nArgs:\nschedule: A transitfeed.Schedule object containing the feed to write.\noutput_file: The name of the output KML file, or file object to use.", "source": "codesearchnet"}
{"code": "def heightmap_has_land_on_border(hm: np.ndarray, waterlevel: float) -> bool:\n    return bool(lib.TCOD_heightmap_has_land_on_border(_heightmap_cdata(hm), waterlevel))", "docstring": "Returns True if the map edges are below ``waterlevel``, otherwise False.\n\nArgs:\nhm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.\nwaterLevel (float): The water level to use.\n\nReturns:\nbool: True if the map edges are below ``waterlevel``, otherwise False.", "source": "codesearchnet"}
{"code": "def get_stored_content_length(headers):\n  \n  length = headers.get('x-goog-stored-content-length')\n  if length is None:\n    length = headers.get('content-length')\n  return length", "docstring": "Return the content length (in bytes) of the object as stored in GCS.\n\nx-goog-stored-content-length should always be present except when called via\nthe local dev_appserver. Therefore if it is not present we default to the\nstandard content-length header.\n\nArgs:\nheaders: a dict of headers from the http response.\n\nReturns:\nthe stored content length.", "source": "juraj-google-style"}
{"code": "def get_value(value_proto):\n  \n  field = value_proto.WhichOneof('value_type')\n  if field in __native_value_types:\n      return getattr(value_proto, field)\n  if field == 'timestamp_value':\n    return from_timestamp(value_proto.timestamp_value)\n  if field == 'array_value':\n    return [get_value(sub_value)\n            for sub_value in value_proto.array_value.values]\n  return None", "docstring": "Gets the python object equivalent for the given value proto.\n\nArgs:\nvalue_proto: datastore.Value proto message.\n\nReturns:\nthe corresponding python object value. timestamps are converted to\ndatetime, and datastore.Value is returned for blob_key_value.", "source": "juraj-google-style"}
{"code": "def describe_message(message_definition):\n    \n    message_descriptor = MessageDescriptor()\n    message_descriptor.name = message_definition.definition_name().split(\n        '.')[-1]\n\n    fields = sorted(message_definition.all_fields(),\n                    key=lambda v: v.number)\n    if fields:\n        message_descriptor.fields = [describe_field(field) for field in fields]\n\n    try:\n        nested_messages = message_definition.__messages__\n    except AttributeError:\n        pass\n    else:\n        message_descriptors = []\n        for name in nested_messages:\n            value = getattr(message_definition, name)\n            message_descriptors.append(describe_message(value))\n\n        message_descriptor.message_types = message_descriptors\n\n    try:\n        nested_enums = message_definition.__enums__\n    except AttributeError:\n        pass\n    else:\n        enum_descriptors = []\n        for name in nested_enums:\n            value = getattr(message_definition, name)\n            enum_descriptors.append(describe_enum(value))\n\n        message_descriptor.enum_types = enum_descriptors\n\n    return message_descriptor", "docstring": "Build descriptor for Message class.\n\nArgs:\nmessage_definition: Message class to provide descriptor for.\n\nReturns:\nInitialized MessageDescriptor instance describing the Message class.", "source": "juraj-google-style"}
{"code": "def emit(self, value):\n    if (not self._tstate.output_writer):\n        logging.error('emit is called, but no output writer is set.')\n        return\n    self._tstate.output_writer.write(value)", "docstring": "Emits a value to output writer.\n\nArgs:\nvalue: a value of type expected by the output writer.", "source": "codesearchnet"}
{"code": "def register_list(self):\n        \n        num_items = self.MAX_NUM_CPU_REGISTERS\n        buf = (ctypes.c_uint32 * num_items)()\n        num_regs = self._dll.JLINKARM_GetRegisterList(buf, num_items)\n        return buf[:num_regs]", "docstring": "Returns a list of the indices for the CPU registers.\n\nThe returned indices can be used to read the register content or grab\nthe register name.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nList of registers.", "source": "juraj-google-style"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, pixel_attention_mask: torch.LongTensor=None):\n    batch_size, num_images, num_channels, height, width = pixel_values.shape\n    pixel_values = pixel_values.to(dtype=self.dtype)\n    pixel_values = pixel_values.view(batch_size * num_images, *pixel_values.shape[2:])\n    nb_values_per_image = pixel_values.shape[1:].numel()\n    real_images_inds = (pixel_values == 0.0).sum(dim=(-1, -2, -3)) != nb_values_per_image\n    pixel_values = pixel_values[real_images_inds].contiguous()\n    if pixel_attention_mask is None:\n        pixel_attention_mask = torch.ones(size=(pixel_values.size(0), pixel_values.size(2), pixel_values.size(3)), dtype=torch.bool, device=pixel_values.device)\n    else:\n        pixel_attention_mask = pixel_attention_mask.view(batch_size * num_images, *pixel_attention_mask.shape[2:])\n        pixel_attention_mask = pixel_attention_mask[real_images_inds].contiguous()\n    patch_size = self.config.vision_config.patch_size\n    patches_subgrid = pixel_attention_mask.unfold(dimension=1, size=patch_size, step=patch_size)\n    patches_subgrid = patches_subgrid.unfold(dimension=2, size=patch_size, step=patch_size)\n    patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) > 0).bool()\n    image_hidden_states = self.vision_model(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask)\n    image_hidden_states.last_hidden_state\n    image_hidden_states = self.connector(image_hidden_states.last_hidden_state)\n    return image_hidden_states", "docstring": "Encodes images into continuous embeddings that can be forwarded to the language model.\n\nArgs:\npixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\nThe tensors corresponding to the input images.\npixel_attention_mask (`torch.LongTensor`, *optional*):\nThe attention mask indicating padded regions in the image.", "source": "github-repos"}
{"code": "def create(self, ospf_process_id, vrf=None):\n    value = int(ospf_process_id)\n    if (not (0 < value < 65536)):\n        raise ValueError('ospf as must be between 1 and 65535')\n    command = 'router ospf {}'.format(ospf_process_id)\n    if vrf:\n        command += (' vrf %s' % vrf)\n    return self.configure(command)", "docstring": "Creates a OSPF process in the specified VRF or the default VRF.\n\nArgs:\nospf_process_id (str): The OSPF process Id value\nvrf (str): The VRF to apply this OSPF process to\nReturns:\nbool: True if the command completed successfully\nException:\nValueError: If the ospf_process_id passed in less\nthan 0 or greater than 65536", "source": "codesearchnet"}
{"code": "def add_material(self, x_min, x_max, n, angle=0):\n    self._mat_params.append([x_min, x_max, n, angle])\n    if (not callable(n)):\n        n_mat = (lambda wl: n)\n    else:\n        n_mat = n\n    Structure._add_material(self, x_min, self.y_min, x_max, self.y_max, n_mat(self._wl), angle)\n    return self.n", "docstring": "Add a refractive index between two x-points.\n\nArgs:\nx_min (float): The start x-point.\nx_max (float): The stop x-point.\nn (float, function):  Refractive index between\n`x_min` and `x_max`.  Either a constant (`float`), or\na function that accepts one parameters, the\nwavelength, and returns a float of the refractive\nindex.  This is useful when doing wavelength\nsweeps and solving for the group velocity.  The\nfunction provided could be a Sellmeier equation.\nangle (float): Angle in degrees of the slope of the\nsidewalls at `x_min` and `x_max`.  This is useful\nfor defining a ridge with angled sidewalls.", "source": "codesearchnet"}
{"code": "def resize(self, size_gigabytes, region):\n    return self.get_data(('volumes/%s/actions/' % self.id), type=POST, params={'type': 'resize', 'size_gigabytes': size_gigabytes, 'region': region})", "docstring": "Detach a Volume to a Droplet.\n\nArgs:\nsize_gigabytes: int - size of the Block Storage volume in GiB\nregion: string - slug identifier for the region", "source": "codesearchnet"}
{"code": "def create_default_views(self, create_datastore_views=False):\n    package = deepcopy(self.data)\n    if self.resources:\n        package['resources'] = self._convert_hdxobjects(self.resources)\n    data = {'package': package, 'create_datastore_views': create_datastore_views}\n    self._write_to_hdx('create_default_views', data, 'package')", "docstring": "Create default resource views for all resources in dataset\n\nArgs:\ncreate_datastore_views (bool): Whether to try to create resource views that point to the datastore\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def plot_helmholtz_free_energy(self, tmin, tmax, ntemp, ylim=None, **kwargs):\n        \n        temperatures = np.linspace(tmin, tmax, ntemp)\n\n        if self.structure:\n            ylabel = r\"$\\Delta F$ (kJ/mol)\"\n        else:\n            ylabel = r\"$\\Delta F$ (kJ/mol-c)\"\n\n        fig = self._plot_thermo(self.dos.helmholtz_free_energy, temperatures, ylabel=ylabel, ylim=ylim,\n                                factor=1e-3, **kwargs)\n\n        return fig", "docstring": "Plots the vibrational contribution to the Helmoltz free energy in a temperature range.\n\nArgs:\ntmin: minimum temperature\ntmax: maximum temperature\nntemp: number of steps\nylim: tuple specifying the y-axis limits.\nkwargs: kwargs passed to the matplotlib function 'plot'.\nReturns:\nmatplotlib figure", "source": "juraj-google-style"}
{"code": "def shape2d(a):\n    \n    if type(a) == int:\n        return [a, a]\n    if isinstance(a, (list, tuple)):\n        assert len(a) == 2\n        return list(a)\n    raise RuntimeError(\"Illegal shape: {}\".format(a))", "docstring": "Ensure a 2D shape.\n\nArgs:\na: a int or tuple/list of length 2\n\nReturns:\nlist: of length 2. if ``a`` is a int, return ``[a, a]``.", "source": "juraj-google-style"}
{"code": "def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None):\n    if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]):\n        if (num_lower < 0):\n            num_lower = (rows - 1)\n        if (num_upper < 0):\n            num_upper = (cols - 1)\n        lower_mask = np.tri(cols, rows, num_lower).T\n        upper_mask = np.tri(rows, cols, num_upper)\n        band = ((np.ones((rows, cols)) * lower_mask) * upper_mask)\n        if out_shape:\n            band = band.reshape(out_shape)\n        band = tf.constant(band, tf.float32)\n    else:\n        band = tf.matrix_band_part(tf.ones([rows, cols]), tf.cast(num_lower, tf.int64), tf.cast(num_upper, tf.int64))\n        if out_shape:\n            band = tf.reshape(band, out_shape)\n    return band", "docstring": "Matrix band part of ones.\n\nArgs:\nrows: int determining number of rows in output\ncols: int\nnum_lower: int, maximum distance backward. Negative values indicate\nunlimited.\nnum_upper: int, maximum distance forward. Negative values indicate\nunlimited.\nout_shape: shape to reshape output by.\n\nReturns:\nTensor of size rows * cols reshaped into shape out_shape.", "source": "codesearchnet"}
{"code": "def Copy(From, To):\n    from benchbuild.utils.cmd import cp\n    cp('-ar', '--reflink=auto', From, To)", "docstring": "Small copy wrapper.\n\nArgs:\nFrom (str): Path to the SOURCE.\nTo (str): Path to the TARGET.", "source": "codesearchnet"}
{"code": "def str2dict_values(str_in):\n    \n    tmp_dict = str2dict(str_in)\n    if tmp_dict is None:\n        return None\n    return [tmp_dict[key] for key in sorted(k for k in tmp_dict)]", "docstring": "Extracts the values from a string that represents a dict and returns them\nsorted by key.\n\nArgs:\nstr_in (string) that contains python dict\nReturns:\n(list) with values or None if no valid dict was found\nRaises:\n-", "source": "juraj-google-style"}
{"code": "def teleport(self, agent_name, location=None, rotation=None):\n    self.agents[agent_name].teleport((location * 100), rotation)\n    self.tick()", "docstring": "Teleports the target agent to any given location, and applies a specific rotation.\n\nArgs:\nagent_name (str): The name of the agent to teleport.\nlocation (np.ndarray or list): XYZ coordinates (in meters) for the agent to be teleported to.\nIf no location is given, it isn't teleported, but may still be rotated. Defaults to None.\nrotation (np.ndarray or list): A new rotation target for the agent.\nIf no rotation is given, it isn't rotated, but may still be teleported. Defaults to None.", "source": "codesearchnet"}
{"code": "def _format(name, arr):\n    title = '\n    tlen = len(title)\n    print('-' * tlen)\n    print(title)\n    print('-' * tlen)\n    print(' Total \n    if arr:\n        for item in arr:\n            detail = ''\n            if isinstance(item[1], list):\n                for itm in item[1]:\n                    detail += str(itm) + ', '\n                detail = detail[:-2]\n            else:\n                detail = str(item[1])\n            print(\"  %s ('%s')\\n\" % (str(item[0]), detail))\n    else:\n        print('  No %s' % name)\n    print('\\n')", "docstring": "Prints compatibility check results with a format.\n\nArgs:\nname: String that is the title representing list `arr`.\narr: List of items to be printed in a certain format.", "source": "github-repos"}
{"code": "def get_dopants_from_substitution_probabilities(structure, num_dopants=5, threshold=0.001, match_oxi_sign=False):\n    els_have_oxi_states = [hasattr(s, 'oxi_state') for s in structure.species]\n    if (not all(els_have_oxi_states)):\n        raise ValueError('All sites in structure must have oxidation states to predict dopants.')\n    sp = SubstitutionPredictor(threshold=threshold)\n    subs = [sp.list_prediction([s]) for s in set(structure.species)]\n    subs = [{'probability': pred['probability'], 'dopant_species': list(pred['substitutions'].keys())[0], 'original_species': list(pred['substitutions'].values())[0]} for species_preds in subs for pred in species_preds]\n    subs.sort(key=(lambda x: x['probability']), reverse=True)\n    return _get_dopants(subs, num_dopants, match_oxi_sign)", "docstring": "Get dopant suggestions based on substitution probabilities.\n\nArgs:\nstructure (Structure): A pymatgen structure decorated with\noxidation states.\nnum_dopants (int): The number of suggestions to return for\nn- and p-type dopants.\nthreshold (float): Probability threshold for substitutions.\nmatch_oxi_sign (bool): Whether to force the dopant and original species\nto have the same sign of oxidation state. E.g. If the original site\nis in a negative charge state, then only negative dopants will be\nreturned.\n\nReturns:\n(dict): Dopant suggestions, given as a dictionary with keys \"n_type\" and\n\"p_type\". The suggestions for each doping type are given as a list of\ndictionaries, each with they keys:\n\n- \"probability\": The probability of substitution.\n- \"dopant_species\": The dopant species.\n- \"original_species\": The substituted species.", "source": "codesearchnet"}
{"code": "def view(self, viewer=None, use_curr_dir=False):\n    if (viewer is None):\n        viewer = settings['defaults']['viewer']\n    if use_curr_dir:\n        TEMP_DIR = os.path.curdir\n    else:\n        TEMP_DIR = tempfile.gettempdir()\n\n    def give_filename(i):\n        filename = (('ChemCoord_' + str(i)) + '.xyz')\n        return os.path.join(TEMP_DIR, filename)\n    i = 1\n    while os.path.exists(give_filename(i)):\n        i = (i + 1)\n    self.to_xyz(give_filename(i))\n\n    def open_file(i):\n        'Open file and close after being finished.'\n        try:\n            subprocess.check_call([viewer, give_filename(i)])\n        except (subprocess.CalledProcessError, FileNotFoundError):\n            raise\n        finally:\n            if use_curr_dir:\n                pass\n            else:\n                os.remove(give_filename(i))\n    Thread(target=open_file, args=(i,)).start()", "docstring": "View your molecule.\n\n.. note:: This function writes a temporary file and opens it with\nan external viewer.\nIf you modify your molecule afterwards you have to recall view\nin order to see the changes.\n\nArgs:\nviewer (str): The external viewer to use. If it is None,\nthe default as specified in cc.settings['defaults']['viewer']\nis used.\nuse_curr_dir (bool): If True, the temporary file is written to\nthe current diretory. Otherwise it gets written to the\nOS dependendent temporary directory.\n\nReturns:\nNone:", "source": "codesearchnet"}
{"code": "def update(self, rec=None, drop=None, tables=None, install=None, materialize=None, indexes=None, joins=0, views=0):\n    if (not drop):\n        drop = []\n    if (not tables):\n        tables = set()\n    if (not install):\n        install = set()\n    if (not materialize):\n        materialize = set()\n    if (not indexes):\n        indexes = set()\n    if rec:\n        self.update(drop=rec.drop, tables=rec.tables, install=rec.install, materialize=rec.materialize, indexes=rec.indexes, joins=rec.joins)\n    self.drop += drop\n    self.tables |= set(tables)\n    self.install |= set(install)\n    self.materialize |= set(materialize)\n    self.indexes |= set(indexes)\n    self.joins += joins\n    self.views += views\n    if ((self.joins > 0) or (self.views > 0)):\n        self.materialize |= self.install\n        self.install = set()", "docstring": "Updates current record.\n\nArgs:\nrec (FIMRecord):", "source": "codesearchnet"}
{"code": "def __cloudflare_list_zones(self, *, account, **kwargs):\n        \n        done = False\n        zones = []\n        page = 1\n\n        while not done:\n            kwargs['page'] = page\n            response = self.__cloudflare_request(account=account, path='/zones', args=kwargs)\n            info = response['result_info']\n\n            if 'total_pages' not in info or page == info['total_pages']:\n                done = True\n            else:\n                page += 1\n\n            zones += response['result']\n\n        return zones", "docstring": "Helper function to list all zones registered in the CloudFlare system. Returns a `list` of the zones\n\nArgs:\naccount (:obj:`CloudFlareAccount`): A CloudFlare Account object\n**kwargs (`dict`): Extra arguments to pass to the API endpoint\n\nReturns:\n`list` of `dict`", "source": "juraj-google-style"}
{"code": "def get_or_search(self) -> List[GridQubit]:\n    if (not self._sequence):\n        self._sequence = self._find_sequence()\n    return self._sequence", "docstring": "Starts the search or gives previously calculated sequence.\n\nReturns:\nThe linear qubit sequence found.", "source": "codesearchnet"}
{"code": "async def _catch_response(self, h11_connection):\n    response = (await self._recv_event(h11_connection))\n    resp_data = {'encoding': self.encoding, 'method': self.method, 'status_code': response.status_code, 'reason_phrase': str(response.reason, 'utf-8'), 'http_version': str(response.http_version, 'utf-8'), 'headers': c_i_dict([(str(name, 'utf-8'), str(value, 'utf-8')) for (name, value) in response.headers]), 'body': b'', 'url': self.req_url}\n    for header in response.headers:\n        if (header[0] == b'set-cookie'):\n            try:\n                resp_data['headers']['set-cookie'].append(str(header[1], 'utf-8'))\n            except (KeyError, AttributeError):\n                resp_data['headers']['set-cookie'] = [str(header[1], 'utf-8')]\n    get_body = False\n    try:\n        if (int(resp_data['headers']['content-length']) > 0):\n            get_body = True\n    except KeyError:\n        try:\n            if ('chunked' in resp_data['headers']['transfer-encoding'].lower()):\n                get_body = True\n        except KeyError:\n            if (resp_data['headers'].get('connection', '').lower() == 'close'):\n                get_body = True\n    if get_body:\n        if (self.callback is not None):\n            endof = (await self._body_callback(h11_connection))\n        elif self.stream:\n            if (not (((self.scheme == self.initial_scheme) and (self.host == self.initial_netloc)) or (resp_data['headers']['connection'].lower() == 'close'))):\n                self.sock._active = False\n            resp_data['body'] = StreamBody(h11_connection, self.sock, resp_data['headers'].get('content-encoding', None), resp_data['encoding'])\n            self.streaming = True\n        else:\n            while True:\n                data = (await self._recv_event(h11_connection))\n                if isinstance(data, h11.Data):\n                    resp_data['body'] += data.data\n                elif isinstance(data, h11.EndOfMessage):\n                    break\n    else:\n        endof = (await self._recv_event(h11_connection))\n        assert isinstance(endof, h11.EndOfMessage)\n    if self.streaming:\n        return StreamResponse(**resp_data)\n    return Response(**resp_data)", "docstring": "Instantiates the parser which manages incoming data, first getting\nthe headers, storing cookies, and then parsing the response's body,\nif any.\n\nThis function also instances the Response class in which the response\nstatus line, headers, cookies, and body is stored.\n\nIt should be noted that in order to remain preformant, if the user\nwishes to do any file IO it should use async files or risk long wait\ntimes and risk connection issues server-side when using callbacks.\n\nIf a callback is used, the response's body will be None.\n\nReturns:\nThe most recent response object.", "source": "codesearchnet"}
{"code": "def sort_objects(objects: List[Any], key: Optional[Callable[[Any], str]]=None) -> List[Any]:\n\n    def noop(x):\n        return x\n    if key is None:\n        key = noop\n    constants = [obj for obj in objects if key(obj).isupper()]\n    classes = [obj for obj in objects if key(obj)[0].isupper() and (not key(obj).isupper())]\n    functions = [obj for obj in objects if not key(obj)[0].isupper()]\n    key1 = ignore_underscore_and_lowercase(key)\n    return sorted(constants, key=key1) + sorted(classes, key=key1) + sorted(functions, key=key1)", "docstring": "Sort a list of objects following the rules of isort (all uppercased first, camel-cased second and lower-cased\nlast).\n\nArgs:\nobjects (`List[Any]`):\nThe list of objects to sort.\nkey (`Callable[[Any], str]`, *optional*):\nA function taking an object as input and returning a string, used to sort them by alphabetical order.\nIf not provided, will default to noop (so a `key` must be provided if the `objects` are not of type string).\n\nReturns:\n`List[Any]`: The sorted list with the same elements as in the inputs", "source": "github-repos"}
{"code": "def ConvertDateTimeToOffset(date_time_value):\n  \n  date_time_obj = datetime(int(date_time_value['date']['year']),\n                           int(date_time_value['date']['month']),\n                           int(date_time_value['date']['day']),\n                           int(date_time_value['hour']),\n                           int(date_time_value['minute']),\n                           int(date_time_value['second']))\n  date_time_str = pytz.timezone(\n      date_time_value['timeZoneId']).localize(date_time_obj).isoformat()\n\n  if date_time_str[-5:] == '00:00':\n    return date_time_str[:-6] + 'Z'\n  else:\n    return date_time_str", "docstring": "Converts the PQL formatted response for a dateTime object.\n\nOutput conforms to ISO 8061 format, e.g. 'YYYY-MM-DDTHH:MM:SSz.'\n\nArgs:\ndate_time_value: dict The date time value from the PQL response.\n\nReturns:\nstr: A string representation of the date time value uniform to\nReportService.", "source": "juraj-google-style"}
{"code": "def read_double(self, little_endian=True):\n        \n        if little_endian:\n            endian = \"<\"\n        else:\n            endian = \">\"\n        return self.unpack(\"%sd\" % endian, 8)", "docstring": "Read 8 bytes as a double value from the stream.\n\nArgs:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nfloat:", "source": "juraj-google-style"}
{"code": "def create_summary_metadata(display_name, description, num_thresholds):\n    pr_curve_plugin_data = plugin_data_pb2.PrCurvePluginData(version=PROTO_VERSION, num_thresholds=num_thresholds)\n    content = pr_curve_plugin_data.SerializeToString()\n    return summary_pb2.SummaryMetadata(display_name=display_name, summary_description=description, plugin_data=summary_pb2.SummaryMetadata.PluginData(plugin_name=PLUGIN_NAME, content=content))", "docstring": "Create a `summary_pb2.SummaryMetadata` proto for pr_curves plugin data.\n\nArguments:\ndisplay_name: The display name used in TensorBoard.\ndescription: The description to show in TensorBoard.\nnum_thresholds: The number of thresholds to use for PR curves.\n\nReturns:\nA `summary_pb2.SummaryMetadata` protobuf object.", "source": "codesearchnet"}
{"code": "def process_tag(self, tag_proc_name, tag):\n        \n        tag_processor = self.tag_procs[tag_proc_name]\n\n        db_entry = (tag_processor.get_name(tag),\n                    tag_processor.get_entry_type(tag),\n                    tag_processor.get_filename(tag))\n\n        self.zeal_db.insert(*db_entry)\n\n        self.entry_count += 1", "docstring": "Process a tag with a tag processor and insert a DB entry.\n\nArgs:\ntag_proc_name: A string key that maps to the TagProcessor to use.\ntag: A BeautifulSoup Tag to process.", "source": "juraj-google-style"}
{"code": "def _InverseDistanceWeighted(self, latitude, longitude, radius=1):\n    if (radius == 1):\n        offsetmatrix = (None, (0, 1), None, ((- 1), 0), (0, 0), (1, 0), None, (0, (- 1)), None)\n    elif (radius == 2):\n        offsetmatrix = (None, None, (0, 2), None, None, None, ((- 1), 1), (0, 1), (1, 1), None, ((- 2), 0), ((- 1), 0), (0, 0), (1, 0), (2, 0), None, ((- 1), (- 1)), (0, (- 1)), (1, (- 1)), None, None, None, (0, (- 2)), None, None)\n    else:\n        raise ValueError('Radius {} invalid, expected 1 or 2'.format(radius))\n    (row, column) = self.get_row_and_column(latitude, longitude)\n    (center_lat, center_long) = self.get_lat_and_long(row, column)\n    if ((latitude == center_lat) and (longitude == center_long)):\n        return self.get_elevation_from_row_and_column(int(row), int(column))\n    weights = 0\n    elevation = 0\n    for offset in offsetmatrix:\n        if ((offset is not None) and (0 <= (row + offset[0]) < self.square_side) and (0 <= (column + offset[1]) < self.square_side)):\n            cell = self.get_elevation_from_row_and_column(int((row + offset[0])), int((column + offset[1])))\n            if (cell is not None):\n                distance = mod_utils.distance(latitude, longitude, (center_lat + (float(offset[0]) / (self.square_side - 1))), (center_long + (float(offset[1]) / (self.square_side - 1))))\n                weights += (1 / distance)\n                elevation += (cell / distance)\n    return (elevation / weights)", "docstring": "Return the Inverse Distance Weighted Elevation.\n\nInterpolate the elevation of the given point using the inverse\ndistance weigthing algorithm (exp of 1) in the form:\nsum((1/distance) * elevation)/sum(1/distance)\nfor each point in the matrix.\nThe matrix size is determined by the radius. A radius of 1 uses\n5 points and a radius of 2 uses 13 points. The matrices are set\nup to use cells adjacent to and including the one that contains\nthe given point. Any cells referenced by the matrix that are on\nneighboring tiles are ignored.\n\nArgs:\nlatitude: float of the latitude in decimal degrees\nlongitude: float of the longitude in decimal degrees\nradius: int of 1 or 2 indicating the size of the matrix\n\nReturns:\na float of the interpolated elevation in the same units as\nthe underlying .hgt file (meters)\n\nExceptions:\nraises a ValueError if an invalid radius is supplied", "source": "codesearchnet"}
{"code": "class RunOfflineDetector(beam.PTransform[beam.PCollection[NestedKeyedInputT], beam.PCollection[NestedKeyedOutputT]]):\n\n    def __init__(self, offline_detector: OfflineDetector):\n        self._offline_detector = offline_detector\n\n    def _restore_and_convert(self, elem: tuple[tuple[Any, Any, beam.Row], Any]) -> NestedKeyedOutputT:\n        \n        (orig_key, temp_key, row), prediction = elem\n        assert isinstance(prediction, AnomalyPrediction), 'Wrong model handler output type.' + f\"Expected: 'AnomalyPrediction', but got '{type(prediction).__name__}'. \" + 'Consider adding a post-processing function via `with_postprocess_fn` ' + f\"to convert from '{type(prediction).__name__}' to 'AnomalyPrediction', \" + 'or use `score_prediction_adapter` or `label_prediction_adapter` to ' + 'perform the conversion.'\n        result = AnomalyResult(example=row, predictions=[dataclasses.replace(prediction, model_id=self._offline_detector._model_id)])\n        return (orig_key, (temp_key, result))\n\n    def _select_features(self, elem: tuple[Any, beam.Row]) -> tuple[Any, beam.Row]:\n        assert self._offline_detector._features is not None\n        k, v = elem\n        row_dict = v._asdict()\n        return (k, beam.Row(**{k: row_dict[k] for k in self._offline_detector._features}))\n\n    def expand(self, input: beam.PCollection[NestedKeyedInputT]) -> beam.PCollection[NestedKeyedOutputT]:\n        model_uuid = f'{self._offline_detector._model_id}:{uuid.uuid4().hex[:6]}'\n        run_inference = RunInference(self._offline_detector._keyed_model_handler, **self._offline_detector._run_inference_args)\n        rekeyed_model_input = input | 'Rekey' >> beam.Map(lambda x: ((x[0], x[1][0], x[1][1]), x[1][1]))\n        if self._offline_detector._features is not None:\n            rekeyed_model_input = rekeyed_model_input | 'Select Features' >> beam.Map(self._select_features)\n        rekeyed_model_output = rekeyed_model_input | f'Call RunInference ({model_uuid})' >> run_inference\n        ret = rekeyed_model_output | 'Restore keys and convert model output' >> beam.Map(self._restore_and_convert)\n        if self._offline_detector._threshold_criterion:\n            ret = ret | f'Run Threshold Criterion ({model_uuid})' >> RunThresholdCriterion(self._offline_detector._threshold_criterion)\n        return ret", "docstring": "Runs a offline anomaly detector on a PCollection of data.\n\nThis PTransform applies a `OfflineDetector` to the input data, handling\ncustom input/output conversion and inference.\n\nArgs:\noffline_detector: The `OfflineDetector` to run.", "source": "github-repos"}
{"code": "async def get_matches(self, force_update=False) -> list:\n        \n        if force_update or self.matches is None:\n            res = await self.connection('GET',\n                                        'tournaments/{}/matches'.format(self._id),\n                                        include_attachments=1)\n            self._refresh_matches_from_json(res)\n        return self.matches or []", "docstring": "get all matches (once the tournament is started)\n\n|methcoro|\n\nArgs:\nforce_update (default=False): True to force an update to the Challonge API\n\nReturns:\nlist[Match]:\n\nRaises:\nAPIException", "source": "juraj-google-style"}
{"code": "def __init__(self, package, ad, config=None):\n    super().__init__(package=package, device=ad)\n    self.host_port = None\n    self.device_port = None\n    self.uid = UNKNOWN_UID\n    self._adb = ad.adb\n    self._user_id = None if config is None else config.user_id\n    self._proc = None\n    self._client = None\n    self._conn = None\n    self._event_client = None\n    self._config = config or Config()\n    self._server_start_stdout = []", "docstring": "Initializes the instance of Snippet Client V2.\n\nArgs:\npackage: str, see base class.\nad: AndroidDevice, the android device object associated with this client.\nconfig: Config, the configuration object. See the docstring of the\n`Config` class for supported configurations.", "source": "github-repos"}
{"code": "def predict(self, text):\n    pred = self.predict_proba(text)\n    tags = self._get_tags(pred)\n    return tags", "docstring": "Predict using the model.\n\nArgs:\ntext: string, the input text.\n\nReturns:\ntags: list, shape = (num_words,)\nReturns predicted values.", "source": "codesearchnet"}
{"code": "def write_to_filterbank(self, filename_out):\n    print('[Filterbank] Warning: Non-standard function to write in filterbank (.fil) format. Please use Waterfall.')\n    n_bytes = int((self.header[b'nbits'] / 8))\n    with open(filename_out, 'wb') as fileh:\n        fileh.write(generate_sigproc_header(self))\n        j = self.data\n        if (n_bytes == 4):\n            np.float32(j.ravel()).tofile(fileh)\n        elif (n_bytes == 2):\n            np.int16(j.ravel()).tofile(fileh)\n        elif (n_bytes == 1):\n            np.int8(j.ravel()).tofile(fileh)", "docstring": "Write data to blimpy file.\n\nArgs:\nfilename_out (str): Name of output file", "source": "codesearchnet"}
{"code": "def ParseUserEngagedRow(\n      self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = WindowsTimelineUserEngagedEventData()\n    event_data.package_identifier = self._GetRowValue(\n        query_hash, row, 'PackageName')\n\n    \n    \n    payload_json_bytes = bytes(self._GetRowValue(query_hash, row, 'Payload'))\n    payload_json_string = payload_json_bytes.decode('utf-8')\n    payload = json.loads(payload_json_string)\n\n    if 'reportingApp' in payload:\n      event_data.reporting_app = payload['reportingApp']\n    if 'activeDurationSeconds' in payload:\n      event_data.active_duration_seconds = int(payload['activeDurationSeconds'])\n\n    timestamp = self._GetRowValue(query_hash, row, 'StartTime')\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_START)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a timeline row that describes a user interacting with an app.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def allreduce_grads_hierarchical(all_grads, devices, average=False):\n    num_gpu = len(devices)\n    assert (num_gpu == 8), num_gpu\n    assert (len(all_grads) == num_gpu), len(all_grads)\n    group_size = (num_gpu \n    agg_all_grads = []\n    for (varid, grads) in enumerate(zip(*all_grads)):\n        g0_main_gpu = (varid % num_gpu)\n        g1_main_gpu = ((g0_main_gpu + group_size) % num_gpu)\n        g0_start = (0 if (g0_main_gpu < group_size) else group_size)\n        g1_start = (0 if (g1_main_gpu < group_size) else group_size)\n        assert (g0_start != g1_start)\n        g0_grads = grads[g0_start:(g0_start + group_size)]\n        g1_grads = grads[g1_start:(g1_start + group_size)]\n        with tf.device(devices[g0_main_gpu]):\n            g0_agg = tf.add_n(g0_grads, name='group0_agg')\n        with tf.device(devices[g1_main_gpu]):\n            g1_agg = tf.add_n(g1_grads, name='group1_agg')\n            g1_total_agg = tf.add(g0_agg, g1_agg, name='group1_total_agg')\n        with tf.device(devices[g0_main_gpu]):\n            g0_total_agg = tf.identity(g1_total_agg, name='group0_total_agg')\n        agg_grads = []\n        for k in range(num_gpu):\n            if ((k < group_size) == (g0_main_gpu < group_size)):\n                main_gpu = g0_total_agg\n            else:\n                main_gpu = g1_total_agg\n            with tf.device(devices[k]):\n                if (not average):\n                    device_total_agg = tf.identity(main_gpu, name='device{}_total_agg'.format(k))\n                else:\n                    device_total_agg = tf.multiply(main_gpu, (1.0 / num_gpu), name='device{}_total_agg'.format(k))\n                agg_grads.append(device_total_agg)\n        agg_all_grads.append(agg_grads)\n    agg_all_grads = list(zip(*agg_all_grads))\n    return agg_all_grads", "docstring": "Hierarchical allreduce for DGX-1 system.\n\nArgs:\nall_grads (K x N): List of list of gradients. N is the number of variables.\ndevices ([str]): K str for the K devices.\naverage (bool): average gradients or not.\n\nReturns:\n(K x N): same as input, but each grad is replaced by the average over K lists.", "source": "codesearchnet"}
{"code": "def convert_outlook_msg(msg_bytes):\n    \n    if not is_outlook_msg(msg_bytes):\n        raise ValueError(\"The supplied bytes are not an Outlook MSG file\")\n    orig_dir = os.getcwd()\n    tmp_dir = tempfile.mkdtemp()\n    os.chdir(tmp_dir)\n    with open(\"sample.msg\", \"wb\") as msg_file:\n        msg_file.write(msg_bytes)\n    try:\n        subprocess.check_call([\"msgconvert\", \"sample.msg\"],\n                              stdout=null_file, stderr=null_file)\n        eml_path = \"sample.eml\"\n        with open(eml_path, \"rb\") as eml_file:\n            rfc822 = eml_file.read()\n    except FileNotFoundError:\n        raise EmailParserError(\n            \"Failed to convert Outlook MSG: msgconvert utility not found\")\n    finally:\n        os.chdir(orig_dir)\n        shutil.rmtree(tmp_dir)\n\n    return rfc822", "docstring": "Uses the ``msgconvert`` Perl utility to convert an Outlook MS file to\nstandard RFC 822 format\n\nArgs:\nmsg_bytes (bytes): the content of the .msg file\n\nReturns:\nA RFC 822 string", "source": "juraj-google-style"}
{"code": "def use_color(setting):\n    if (setting not in COLOR_CHOICES):\n        raise InvalidColorSetting(setting)\n    return ((setting == 'always') or ((setting == 'auto') and sys.stdout.isatty() and terminal_supports_color))", "docstring": "Choose whether to use color based on the command argument.\n\nArgs:\nsetting - Either `auto`, `always`, or `never`", "source": "codesearchnet"}
{"code": "def init_pool_generator(gens, random_seed=None, id_queue=None):\n    global _SHARED_SEQUENCES\n    _SHARED_SEQUENCES = gens\n    worker_proc = multiprocessing.current_process()\n    worker_proc.name = f'Keras_worker_{worker_proc.name}'\n    if random_seed is not None:\n        np.random.seed(random_seed + worker_proc.ident)\n    if id_queue is not None:\n        id_queue.put(worker_proc.ident, block=True, timeout=0.1)", "docstring": "Initializer function for pool workers.\n\nArgs:\ngens: State which should be made available to worker processes.\nrandom_seed: An optional value with which to seed child processes.\nid_queue: A multiprocessing Queue of worker ids.\nThis is used to indicate that a worker process\nwas created by Keras.", "source": "github-repos"}
{"code": "def get_all(self, seq_set: SequenceSet) \\\n            -> Sequence[Tuple[int, CachedMessage]]:\n        \n        if seq_set.uid:\n            all_uids = seq_set.flatten(self.max_uid) & self._uids\n            return [(seq, self._cache[uid])\n                    for seq, uid in enumerate(self._sorted, 1)\n                    if uid in all_uids]\n        else:\n            all_seqs = seq_set.flatten(self.exists)\n            return [(seq, self._cache[uid])\n                    for seq, uid in enumerate(self._sorted, 1)\n                    if seq in all_seqs]", "docstring": "Return the cached messages, and their sequence numbers, for the\ngiven sequence set.\n\nArgs:\nseq_set: The message sequence set.", "source": "juraj-google-style"}
{"code": "def angle_to_distance(angle, units='metric'):\n    \n    distance = math.radians(angle) * BODY_RADIUS\n\n    if units in ('km', 'metric'):\n        return distance\n    elif units in ('sm', 'imperial', 'US customary'):\n        return distance / STATUTE_MILE\n    elif units in ('nm', 'nautical'):\n        return distance / NAUTICAL_MILE\n    else:\n        raise ValueError('Unknown units type %r' % units)", "docstring": "Convert angle in to distance along a great circle.\n\nArgs:\nangle (float): Angle in degrees to convert to distance\nunits (str): Unit type to be used for distances\n\nReturns:\nfloat: Distance in ``units``\n\nRaises:\nValueError: Unknown value for ``units``", "source": "juraj-google-style"}
{"code": "def update_dtype(self, attr_name, index, dtype):\n    attr = self._node.attr[attr_name]\n    num_types = 0\n    if attr.HasField('list'):\n        types = attr.list.type\n        num_types = len(types)\n        if num_types > index:\n            types[index] = dtype\n            return\n    elif attr.HasField('type'):\n        num_types = 1\n        if index == 0:\n            attr.type = dtype\n            return\n    raise ValueError(f'`index` {index:d} is out of range for node({self._node.name}).attr({attr_name}), which has {num_types:d} elements.')", "docstring": "Changes the type of a given input.\n\nArgs:\nattr_name: The NodeDef attribute containing the type to change.\nindex: The index of the input type to change.\ndtype: The type to change to.", "source": "github-repos"}
{"code": "def getPageType(name,number=False):\n    \n    if not name in pageNames():\n        return None\n    pageType=PyOrigin.Pages(name).GetType()\n    if number:\n        return str(pageType)\n    if pageType==1:\n        return \"matrix\"\n    if pageType==2:\n        return \"book\"\n    if pageType==3:\n        return \"graph\"\n    if pageType==4:\n        return \"layout\"\n    if pageType==5:\n        return \"notes\"", "docstring": "Returns the type of the page with that name.\nIf that name doesn't exist, None is returned.\n\nArgs:\nname (str): name of the page to get the folder from\nnumber (bool): if True, return numbers (i.e., a graph will be 3)\nif False, return words where appropriate (i.e, \"graph\")\n\nReturns:\nstring of the type of object the page is", "source": "juraj-google-style"}
{"code": "def add_options(cls, parser):\n    kwargs = {'action': 'store', 'default': '', 'parse_from_config': True, 'comma_separated_list': True}\n    for num in range(cls.min_check, cls.max_check):\n        parser.add_option(None, '--filename_check{}'.format(num), **kwargs)", "docstring": "Required by flake8\nadd the possible options, called first\n\nArgs:\nparser (OptionsManager):", "source": "codesearchnet"}
{"code": "def giant_text_sqltype(dialect: Dialect) -> str:\n    if (dialect.name == SqlaDialectName.SQLSERVER):\n        return 'NVARCHAR(MAX)'\n    elif (dialect.name == SqlaDialectName.MYSQL):\n        return 'LONGTEXT'\n    else:\n        raise ValueError('Unknown dialect: {}'.format(dialect.name))", "docstring": "Returns the SQL column type used to make very large text columns for a\ngiven dialect.\n\nArgs:\ndialect: a SQLAlchemy :class:`Dialect`\nReturns:\nthe SQL data type of \"giant text\", typically 'LONGTEXT' for MySQL\nand 'NVARCHAR(MAX)' for SQL Server.", "source": "codesearchnet"}
{"code": "def read_molden(inputfile, start_index=0, get_bonds=True):\n    \n    from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian\n    with open(inputfile, 'r') as f:\n        found = False\n        while not found:\n            line = f.readline()\n            if '[N_GEO]' in line:\n                found = True\n                number_of_molecules = int(f.readline().strip())\n\n        energies = []\n        found = False\n        while not found:\n            line = f.readline()\n            if 'energy' in line:\n                found = True\n                for _ in range(number_of_molecules):\n                    energies.append(float(f.readline().strip()))\n\n        found = False\n        while not found:\n            line = f.readline()\n            if '[GEOMETRIES] (XYZ)' in line:\n                found = True\n                current_line = f.tell()\n                number_of_atoms = int(f.readline().strip())\n                f.seek(current_line)\n\n        cartesians = []\n        for energy in energies:\n            cartesian = Cartesian.read_xyz(\n                f, start_index=start_index, get_bonds=get_bonds,\n                nrows=number_of_atoms, engine='python')\n            cartesian.metadata['energy'] = energy\n            cartesians.append(cartesian)\n    return cartesians", "docstring": "Read a molden file.\n\nArgs:\ninputfile (str):\nstart_index (int):\n\nReturns:\nlist: A list containing :class:`~chemcoord.Cartesian` is returned.", "source": "juraj-google-style"}
{"code": "def most_uncertain_by_mask(self, mask, y):\n        \n        idxs = np.where(mask)[0]\n        \n        return idxs[np.argsort(np.abs(self.probs[idxs,y]-(1/self.num_classes)))[:4]]", "docstring": "Extracts the first 4 most uncertain indexes from the ordered list of probabilities\n\nArguments:\nmask (numpy.ndarray): the mask of probabilities specific to the selected class; a boolean array with shape (num_of_samples,) which contains True where class==selected_class, and False everywhere else\ny (int): the selected class\n\nReturns:\nidxs (ndarray): An array of indexes of length 4", "source": "juraj-google-style"}
{"code": "def _write_to_command_buffer(self, to_write):\n    np.copyto(self._command_bool_ptr, True)\n    to_write += '0'\n    input_bytes = str.encode(to_write)\n    for (index, val) in enumerate(input_bytes):\n        self._command_buffer_ptr[index] = val", "docstring": "Write input to the command buffer.  Reformat input string to the correct format.\n\nArgs:\nto_write (str): The string to write to the command buffer.", "source": "codesearchnet"}
{"code": "def get_path(name, *default):\n    global g_config\n    value = get(name, *default)\n    if (value is None):\n        return None\n    return proj_path(value)", "docstring": "Get config value as path relative to the project directory.\n\nThis allows easily defining the project configuration within the fabfile\nas always relative to that fabfile.\n\nArgs:\nname (str):\nThe name of the config value containing the path.\n*default (Any):\nIf given and the key doesn't not exist, this will be returned\ninstead. If it's not given and the config value does not exist,\nAttributeError will be raised\n\nReturns:\nThe requested config value. This is one of the global values defined\nin this file. If the value does not exist it will return `default` if\ngive or raise `AttributeError`.\n\nRaises:\nAttributeError: If the value does not exist and `default` was not given.", "source": "codesearchnet"}
{"code": "def get_tensors(graph):\n    if not isinstance(graph, ops.Graph):\n        raise TypeError('Expected a graph, got: {}'.format(type(graph)))\n    ts = []\n    for op in graph.get_operations():\n        ts += op.outputs\n    return ts", "docstring": "get all the tensors which are input or output of an op in the graph.\n\nArgs:\ngraph: a `tf.Graph`.\nReturns:\nA list of `tf.Tensor`.\nRaises:\nTypeError: if graph is not a `tf.Graph`.", "source": "github-repos"}
{"code": "def _ParseLogLine(self, parser_mediator, structure):\n    \n    try:\n      date_time = dfdatetime_time_elements.TimeElements(\n          time_elements_tuple=structure.date_time)\n      \n      date_time.is_local_time = True\n    except ValueError:\n      parser_mediator.ProduceExtractionWarning(\n          'invalid date time value: {0!s}'.format(structure.date_time))\n      return\n\n    event_data = SophosAVLogEventData()\n    event_data.text = structure.text\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_ADDED,\n        time_zone=parser_mediator.timezone)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a log line.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.", "source": "juraj-google-style"}
{"code": "def with_headers(self, headers):\n    copy = headers.copy()\n    copy.update(self._headers)\n    return self.__copy_and_set('headers', copy)", "docstring": "Adds headers to the request\n\nArgs:\nheaders (dict): The headers to add the request headers\n\nReturns:\nThe request builder instance in order to chain calls", "source": "codesearchnet"}
{"code": "def get(self):\n    raise NotImplementedError('Must be implemented in subclasses.')", "docstring": "Wait for the result of `RemoteValue` and return the tensor result.\n\nThis makes the value concrete by copying the remote tensor to local.\n\nReturns:\nThe actual output (in the form of `tf.Tensor`s) of the `tf.function`\nassociated with this `RemoteValue`, previously returned by a\n`tf.distribute.experimental.coordinator.ClusterCoordinator.schedule` call.\nThis can be a single Tensor, or a structure of Tensors, depending on the\noutput of the `tf.function`.\n\nRaises:\ntf.errors.CancelledError: If the function that produces this `RemoteValue`\nis aborted or cancelled due to failure.", "source": "github-repos"}
{"code": "def size_str(size_in_bytes):\n  \n  if not size_in_bytes:\n    return \"?? GiB\"\n\n  size_in_bytes = float(size_in_bytes)\n  for (name, size_bytes) in _NAME_LIST:\n    value = size_in_bytes / size_bytes\n    if value >= 1.0:\n      return \"{:.2f} {}\".format(value, name)\n  return \"{} {}\".format(int(size_in_bytes), \"bytes\")", "docstring": "Returns a human readable size string.\n\nIf size_in_bytes is None, then returns \"?? GiB\".\n\nFor example `size_str(1.5 * tfds.units.GiB) == \"1.50 GiB\"`.\n\nArgs:\nsize_in_bytes: `int` or `None`, the size, in bytes, that we want to\nformat as a human-readable size string.", "source": "juraj-google-style"}
{"code": "def actnorm_center(name, x, reverse=False, init=False):\n    shape = common_layers.shape_list(x)\n    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n        assert ((len(shape) == 2) or (len(shape) == 4))\n        if (len(shape) == 2):\n            x_mean = tf.reduce_mean(x, [0], keepdims=True)\n            b = get_variable_ddi('b', (1, shape[1]), initial_value=(- x_mean), init=init)\n        elif (len(shape) == 4):\n            x_mean = tf.reduce_mean(x, [0, 1, 2], keepdims=True)\n            b = get_variable_ddi('b', (1, 1, 1, shape[3]), initial_value=(- x_mean), init=init)\n        if (not reverse):\n            x += b\n        else:\n            x -= b\n        return x", "docstring": "Add a bias to x.\n\nInitialize such that the output of the first minibatch is zero centered\nper channel.\n\nArgs:\nname: scope\nx: 2-D or 4-D Tensor.\nreverse: Forward or backward operation.\ninit: data-dependent initialization.\n\nReturns:\nx_center: (x + b), if reverse is True and (x - b) otherwise.", "source": "codesearchnet"}
{"code": "def query(self, time_indices):\n    \n    if self._disposed:\n      raise ValueError(\n          'Cannot query: this _WatchStore instance is already disposed')\n    if not isinstance(time_indices, (tuple, list)):\n      time_indices = [time_indices]\n    output = []\n    for time_index in time_indices:\n      if isinstance(self._data[time_index], _TensorValueDiscarded):\n        output.append(None)\n      else:\n        data_item = self._data[time_index]\n        if (hasattr(data_item, 'dtype') and\n            tensor_helper.translate_dtype(data_item.dtype) == 'string'):\n          _, _, data_item = tensor_helper.array_view(data_item)\n          data_item = np.array(\n              tensor_helper.process_buffers_for_display(data_item),\n              dtype=np.object)\n        output.append(data_item)\n\n    return output", "docstring": "Query the values at given time indices.\n\nArgs:\ntime_indices: 0-based time indices to query, as a `list` of `int`.\n\nReturns:\nValues as a list of `numpy.ndarray` (for time indices in memory) or\n`None` (for time indices discarded).", "source": "juraj-google-style"}
{"code": "def create_socket(self):\n    socket_path = os.path.join(self.config_dir, 'pueue.sock')\n    try:\n        if os.path.exists(socket_path):\n            os.remove(socket_path)\n        self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n        self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n        self.socket.bind(socket_path)\n        self.socket.setblocking(0)\n        self.socket.listen(0)\n        os.chmod(socket_path, stat.S_IRWXU)\n    except Exception:\n        self.logger.error(\"Daemon couldn't socket. Aborting\")\n        self.logger.exception()\n        sys.exit(1)\n    return self.socket", "docstring": "Create a socket for the daemon, depending on the directory location.\n\nArgs:\nconfig_dir (str): The absolute path to the config directory used by the daemon.\n\nReturns:\nsocket.socket: The daemon socket. Clients connect to this socket.", "source": "codesearchnet"}
{"code": "def _print_primitive_field(self, field_name: str, field: descriptor.FieldDescriptor, value: Any) -> None:\n    if proto_utils.field_is_repeated(field):\n        string_values = []\n        elements = []\n        extensions_found = False\n        nonnull_values_found = False\n        for primitive in value:\n            wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)\n            string_values.append(wrapper.json_value())\n            elements.append(wrapper.get_element())\n            nonnull_values_found = nonnull_values_found or wrapper.has_value()\n            extensions_found = extensions_found or wrapper.has_element()\n        if nonnull_values_found:\n            self.generator.add_field(field_name)\n            self._print_list(string_values, self.generator.push)\n        if extensions_found:\n            if nonnull_values_found:\n                self.generator.push(',')\n                self.generator.add_newline()\n            self.generator.add_field(f'_{field_name}')\n            self._print_list(elements, self._print)\n    elif self.json_format == _FhirJsonFormat.ANALYTIC and field.message_type.name == 'ReferenceId':\n        str_value = proto_utils.get_value_at_field(value, 'value')\n        self.generator.add_field(field_name, f'\"{str_value}\"')\n    else:\n        wrapper = self.primitive_handler.primitive_wrapper_from_primitive(value)\n        if wrapper.has_value():\n            self.generator.add_field(field_name, wrapper.json_value())\n        if wrapper.has_element() and self.json_format == _FhirJsonFormat.PURE:\n            if wrapper.has_value():\n                self.generator.push(',')\n                self.generator.add_newline()\n            self.generator.add_field(f'_{field_name}')\n            self._print(wrapper.get_element())", "docstring": "Prints the primitive field.\n\nArgs:\nfield_name: The name of the field.\nfield: The FielDescriptor whose contents to print.\nvalue: The value present at field to print.", "source": "github-repos"}
{"code": "def one_of(self, chset: str) -> str:\n    res = self.peek()\n    if (res in chset):\n        self.offset += 1\n        return res\n    raise UnexpectedInput(self, ('one of ' + chset))", "docstring": "Parse one character form the specified set.\n\nArgs:\nchset: string of characters to try as alternatives.\n\nReturns:\nThe character that was actually matched.\n\nRaises:\nUnexpectedInput: If the next character is not in `chset`.", "source": "codesearchnet"}
{"code": "def gather_continuous_embeddings(self, word_embeddings: torch.Tensor, continuous_embeddings: List[torch.Tensor], image_patch_input_indices: torch.Tensor) -> torch.Tensor:\n    if not word_embeddings.shape[0] == len(continuous_embeddings):\n        raise ValueError(f'Batch sizes must match! Got len(continuous_embeddings)={len(continuous_embeddings)!r} and word_embeddings.shape[0]={word_embeddings.shape[0]!r}')\n    output_embeddings = word_embeddings.clone()\n    for batch_idx in range(word_embeddings.shape[0]):\n        dst_indices = torch.nonzero(image_patch_input_indices[batch_idx] >= 0, as_tuple=True)[0]\n        src_indices = image_patch_input_indices[batch_idx][dst_indices]\n        if src_indices.shape[0] > continuous_embeddings[batch_idx].shape[0]:\n            raise ValueError(f'Number of continuous embeddings continuous_embeddings[batch_idx].shape={continuous_embeddings[batch_idx].shape!r} does not match number of continuous token ids src_indices.shape={src_indices.shape!r} in batch element {batch_idx}.')\n        output_embeddings[batch_idx, dst_indices] = continuous_embeddings[batch_idx][src_indices].to(output_embeddings.device)\n    return output_embeddings", "docstring": "This function places the continuous_embeddings into the word_embeddings at the locations\nindicated by image_patch_input_indices. Different batch elements can have different numbers of continuous\nembeddings.\n\nArgs:\nword_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\nTensor of word embeddings.\ncontinuous_embeddings (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`):\nTensor of continuous embeddings. The length of the list is the batch size. Each entry is shape\n[num_image_embeddings, hidden], and num_image_embeddings needs to match the number of non-negative\nindices in image_patch_input_indices for that batch element.\nimage_patch_input_indices (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\nTensor of indices of the image patches in the input_ids tensor.", "source": "github-repos"}
{"code": "def get_if_set(self, addresses):\n    with self._lock:\n        results = []\n        for add in addresses:\n            results.append(self._get_if_set(add))\n        return results", "docstring": "Returns the value set in this context, or None, for each address in\naddresses.\n\nArgs:\naddresses (list of str): The addresses to return values for, if set\nwithin this context.\n\nReturns:\n(list): bytes set at the address or None", "source": "codesearchnet"}
{"code": "def for_all_test_methods(decorator: Callable[..., Any], *args, **kwargs) -> Callable[[_TC], _TC]:\n\n    def all_test_methods_impl(cls: _TC) -> _TC:\n        \n        for name in dir(cls):\n            value = getattr(cls, name)\n            if callable(value) and name.startswith('test') and (name != 'test_session'):\n                setattr(cls, name, decorator(*args, **kwargs)(value))\n        return cls\n    return all_test_methods_impl", "docstring": "Generate class-level decorator from given method-level decorator.\n\nIt is expected for the given decorator to take some arguments and return\na method that is then called on the test method to produce a decorated\nmethod.\n\nArgs:\ndecorator: The decorator to apply.\n*args: Positional arguments\n**kwargs: Keyword arguments\nReturns: Function that will decorate a given classes test methods with the\ndecorator.", "source": "github-repos"}
{"code": "def space_to_batch_direct(input_array, block_shape, paddings):\n    input_array = np.array(input_array)\n    block_shape = np.array(block_shape)\n    num_block_dims = len(block_shape)\n    paddings = np.array(paddings).reshape((len(block_shape), 2))\n    padded = np.pad(input_array, pad_width=[[0, 0]] + list(paddings) + [[0, 0]] * (input_array.ndim - 1 - num_block_dims), mode='constant')\n    reshaped_padded_shape = [input_array.shape[0]]\n    output_shape = [input_array.shape[0] * np.prod(block_shape)]\n    for block_dim, block_shape_value in enumerate(block_shape):\n        reduced_size = padded.shape[block_dim + 1] \n        reshaped_padded_shape.append(reduced_size)\n        output_shape.append(reduced_size)\n        reshaped_padded_shape.append(block_shape_value)\n    reshaped_padded_shape.extend(input_array.shape[num_block_dims + 1:])\n    output_shape.extend(input_array.shape[num_block_dims + 1:])\n    reshaped_padded = padded.reshape(reshaped_padded_shape)\n    permuted_reshaped_padded = np.transpose(reshaped_padded, list(np.arange(num_block_dims) * 2 + 2) + [0] + list(np.arange(num_block_dims) * 2 + 1) + list(np.arange(input_array.ndim - num_block_dims - 1) + 1 + num_block_dims * 2))\n    return permuted_reshaped_padded.reshape(output_shape)", "docstring": "Direct Python implementation of space-to-batch conversion.\n\nThis is used for tests only.\n\nArgs:\ninput_array: N-D array\nblock_shape: 1-D array of shape [num_block_dims].\npaddings: 2-D array of shape [num_block_dims, 2].\n\nReturns:\nConverted tensor.", "source": "github-repos"}
{"code": "def load(self, binary: pyquil.Program) -> 'QuantumFlowQVM':\n    assert (self.status in ['connected', 'done'])\n    prog = quil_to_program(str(binary))\n    self._prog = prog\n    self.program = binary\n    self.status = 'loaded'\n    return self", "docstring": "Load a pyQuil program, and initialize QVM into a fresh state.\n\nArgs:\nbinary: A pyQuil program", "source": "codesearchnet"}
{"code": "def summary(self, v):\n    if hasattr(v, '__iter__'):\n        self._summary = self._summary_cls(v)\n    else:\n        self._summary = self._summary_cls(float(v))", "docstring": "Set summary.\n\nArgs:\nv: A new summary. It could be a single number or lists.", "source": "codesearchnet"}
{"code": "def get_variable_name_from_bird(bird_conf):\n    bird_variable_pattern = re.compile('\\n        ^\\\\s*\\n        define\\\\s+\\n        (?P<name>\\\\S+\\\\b)\\n        \\\\s+\\n        =\\n        ', re.VERBOSE)\n    with open(bird_conf, 'r') as content:\n        for line in content.readlines():\n            variable_match = bird_variable_pattern.search(line)\n            if variable_match:\n                return variable_match.group('name')\n    return None", "docstring": "Return the variable name set in Bird configuration.\n\nThe variable name in Bird configuration is set with the keyword 'define',\nhere is an example:\n\ndefine ACAST_PS_ADVERTISE =\n\nand we exract the string between the word 'define' and the equals sign.\n\nArguments:\nbird_conf (str): The absolute file name path of Bird configuration.\n\nReturns:\nThe variable name as a string or None if it isn't found.", "source": "codesearchnet"}
{"code": "def update_headers(self, headers):\n        \n        check_type(headers, dict, may_be_none=False)\n        self._req_session.headers.update(headers)", "docstring": "Update the HTTP headers used for requests in this session.\n\nNote: Updates provided by the dictionary passed as the `headers`\nparameter to this method are merged into the session headers by adding\nnew key-value pairs and/or updating the values of existing keys. The\nsession headers are not replaced by the provided dictionary.\n\nArgs:\nheaders(dict): Updates to the current session headers.", "source": "juraj-google-style"}
{"code": "def map_(function, *structures, **kwargs):\n    flatten = kwargs.pop('flatten', False)\n    assert (not kwargs), 'map() got unexpected keyword arguments.'\n\n    def impl(function, *structures):\n        if (len(structures) == 0):\n            return structures\n        if all((isinstance(s, (tuple, list)) for s in structures)):\n            if (len(set((len(x) for x in structures))) > 1):\n                raise ValueError('Cannot merge tuples or lists of different length.')\n            args = tuple((impl(function, *x) for x in _builtin_zip(*structures)))\n            if hasattr(structures[0], '_fields'):\n                return type(structures[0])(*args)\n            else:\n                return type(structures[0])(args)\n        if all((isinstance(s, dict) for s in structures)):\n            if (len(set((frozenset(x.keys()) for x in structures))) > 1):\n                raise ValueError('Cannot merge dicts with different keys.')\n            merged = {k: impl(function, *(s[k] for s in structures)) for k in structures[0]}\n            return type(structures[0])(merged)\n        return function(*structures)\n    result = impl(function, *structures)\n    if flatten:\n        result = flatten_(result)\n    return result", "docstring": "Apply a function to every element in a nested structure.\n\nIf multiple structures are provided as input, their structure must match and\nthe function will be applied to corresponding groups of elements. The nested\nstructure can consist of any combination of lists, tuples, and dicts.\n\nArgs:\nfunction: The function to apply to the elements of the structure. Receives\none argument for every structure that is provided.\n*structures: One of more nested structures.\nflatten: Whether to flatten the resulting structure into a tuple. Keys of\ndictionaries will be discarded.\n\nReturns:\nNested structure.", "source": "codesearchnet"}
{"code": "def plot_kurtosis(self, f_start=None, f_stop=None, if_id=0, **kwargs):\n        \n        ax = plt.gca()\n\n        plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)\n\n        \n        if self.header[b'foff'] < 0:\n            plot_data = plot_data[..., ::-1] \n            plot_f = plot_f[::-1]\n\n        try:\n            plot_kurtosis = scipy.stats.kurtosis(plot_data, axis=0, nan_policy='omit')\n        except:\n            plot_kurtosis = plot_data*0.0\n\n        plt.plot(plot_f, plot_kurtosis, **kwargs)\n        plt.ylabel(\"Kurtosis\")\n        plt.xlabel(\"Frequency [MHz]\")\n\n        plt.xlim(plot_f[0], plot_f[-1])", "docstring": "Plot kurtosis\n\nArgs:\nf_start (float): start frequency, in MHz\nf_stop (float): stop frequency, in MHz\nkwargs: keyword args to be passed to matplotlib imshow()", "source": "juraj-google-style"}
{"code": "def find_wells_without_curve(self, mnemonic, alias=None):\n        \n        return Project([w for w in self if w.get_curve(mnemonic, alias=alias) is None])", "docstring": "Returns a new Project with only the wells which DO NOT have the named curve.\n\nArgs:\nmenmonic (str): the name of the curve to look for.\nalias (dict): a welly alias dictionary.\n\nReturns:\nproject.", "source": "juraj-google-style"}
{"code": "def parse_genes(transcripts):\n    \n    \n    genes_to_transcripts = {}\n    \n    \n    genes = []\n\n    hgvs_identifier = None\n    canonical_transcript = None\n    exon = None\n    \n    for transcript in transcripts:\n        \n        hgnc_id = transcript['hgnc_id']\n        hgnc_symbol = transcript['hgnc_symbol']\n\n        if (transcript['is_canonical'] and transcript.get('coding_sequence_name')):\n            hgvs_identifier = transcript.get('coding_sequence_name')\n            canonical_transcript = transcript['transcript_id']\n            exon = transcript['exon']\n\n        \n        if hgnc_id:\n            if hgnc_id in genes_to_transcripts:\n                genes_to_transcripts[hgnc_id].append(transcript)\n            else:\n                genes_to_transcripts[hgnc_id] = [transcript]\n        else:\n            if hgnc_symbol:\n                if hgnc_symbol in genes_to_transcripts:\n                    genes_to_transcripts[hgnc_symbol].append(transcript)\n                else:\n                    genes_to_transcripts[hgnc_symbol] = [transcript]\n\n    \n    \n    \n    \n    for gene_id in genes_to_transcripts:\n        \n        gene_transcripts = genes_to_transcripts[gene_id]\n        \n        most_severe_consequence = None\n        \n        most_severe_rank = float('inf')\n        \n        most_severe_transcript = None\n        \n        most_severe_region = None\n        \n        most_severe_sift = None\n        most_severe_polyphen = None\n        \n        \n        for transcript in gene_transcripts:\n            hgnc_id = transcript['hgnc_id']\n            hgnc_symbol = transcript['hgnc_symbol']\n            \n            for consequence in transcript['functional_annotations']:\n                \n                \n                new_rank = SO_TERMS[consequence]['rank']\n                \n                if new_rank < most_severe_rank:\n                    \n                    most_severe_rank = new_rank\n                    most_severe_consequence = consequence\n                    most_severe_transcript = transcript\n                    most_severe_sift = transcript['sift_prediction']\n                    most_severe_polyphen = transcript['polyphen_prediction']\n                    most_severe_region = SO_TERMS[consequence]['region']\n\n        gene = {\n            'transcripts': gene_transcripts,\n            'most_severe_transcript': most_severe_transcript,\n            'most_severe_consequence': most_severe_consequence,\n            'most_severe_sift': most_severe_sift,\n            'most_severe_polyphen': most_severe_polyphen,\n            'hgnc_id': hgnc_id,\n            'hgnc_symbol': hgnc_symbol,\n            'region_annotation': most_severe_region,\n            'hgvs_identifier': transcript['coding_sequence_name'],\n            'canonical_transcript': transcript['transcript_id'],\n            'exon': transcript['exon'],\n        }\n        genes.append(gene)    \n\n    return genes", "docstring": "Parse transcript information and get the gene information from there.\n\nUse hgnc_id as identifier for genes and ensembl transcript id to identify transcripts\n\nArgs:\ntranscripts(iterable(dict))\n\nReturns:\ngenes (list(dict)): A list with dictionaries that represents genes", "source": "juraj-google-style"}
{"code": "def forall(self, vars_list: List[str]) -> 'TensorFluent':\n    return self._aggregation_op(tf.reduce_all, self, vars_list)", "docstring": "Returns the TensorFluent for the forall aggregation function.\n\nArgs:\nvars_list: The list of variables to be aggregated over.\n\nReturns:\nA TensorFluent wrapping the forall aggregation function.", "source": "codesearchnet"}
{"code": "def _create_hash_from_doc(doc: Mapping[str, Any]) -> str:\n    \n\n    doc_string = json.dumps(doc, sort_keys=True)\n    return _create_hash(doc_string)", "docstring": "Create hash Id from edge record\n\nArgs:\nedge (Mapping[str, Any]): edge record to create hash from\n\nReturns:\nstr: Murmur3 128 bit hash", "source": "juraj-google-style"}
{"code": "def add_trace(self, *args, **kwargs):\n    args = list(args)\n    kwargs = kwargs.copy()\n    for fn in self._functions.values():\n        if self._expects_training_arg:\n\n            def trace_with_training(value, fn=fn):\n                utils.set_training_arg(value, self._training_arg_index, args, kwargs)\n                add_trace_to_queue(fn, args, kwargs, value)\n            trace_with_training(True)\n            trace_with_training(False)\n        else:\n            add_trace_to_queue(fn, args, kwargs)", "docstring": "Traces all functions with the same args and kwargs.\n\nArgs:\n*args: Positional args passed to the original function.\n**kwargs: Keyword args passed to the original function.", "source": "github-repos"}
{"code": "def compute_context_repetition_mask(self, input_ids: torch.LongTensor) -> torch.LongTensor:\n    self._check_input_ids_shape(input_ids)\n    batch_size, _ = input_ids.shape\n    state = SynthIDTextWatermarkState(batch_size=batch_size, ngram_len=self.ngram_len, context_history_size=self.context_history_size, device=self.device)\n    contexts = input_ids[:, :-1].unfold(dimension=1, size=self.ngram_len - 1, step=1)\n    _, num_contexts, _ = contexts.shape\n    are_repeated_contexts = []\n    for i in range(num_contexts):\n        context = contexts[:, i, :]\n        hash_result = torch.ones(batch_size, device=self.device, dtype=torch.long)\n        context_hash = self.accumulate_hash(hash_result, context)[:, None]\n        is_repeated_context = (state.context_history == context_hash).any(dim=1, keepdim=True)\n        are_repeated_contexts.append(is_repeated_context)\n        state.context_history = torch.concat((context_hash, state.context_history), dim=1)[:, :-1]\n    are_repeated_contexts = torch.concat(are_repeated_contexts, dim=1)\n    return torch.logical_not(are_repeated_contexts)", "docstring": "Computes repetition mask.\n\n0 and 1 stand for repeated and not repeated context n-1 grams respectively.\n\nArgs:\ninput_ids (`torch.LongTensor`):\nInput token ids (batch_size, input_len).\n\nReturns:\nRepetitions mask (batch_size, input_len - (ngram_len - 1)).", "source": "github-repos"}
{"code": "def clear_db(self):\n        \n        self.data_store.clear_db()\n\n        \n        self.plugin_manager.load_all_plugins()\n\n        \n        self._store_information()", "docstring": "Clear the Main Database of all samples and worker output.\nArgs:\nNone\nReturns:\nNothing", "source": "juraj-google-style"}
{"code": "def change_disk_usage(self, usage_change, file_path, st_dev):\n    mount_point = self._mount_point_for_device(st_dev)\n    if mount_point:\n        total_size = mount_point['total_size']\n        if (total_size is not None):\n            if ((total_size - mount_point['used_size']) < usage_change):\n                self.raise_io_error(errno.ENOSPC, file_path)\n        mount_point['used_size'] += usage_change", "docstring": "Change the used disk space by the given amount.\n\nArgs:\nusage_change: Number of bytes added to the used space.\nIf negative, the used space will be decreased.\n\nfile_path: The path of the object needing the disk space.\n\nst_dev: The device ID for the respective file system.\n\nRaises:\nIOError: if usage_change exceeds the free file system space", "source": "codesearchnet"}
{"code": "def __init__(self, max_meter=None, band_types=None, capabilities=None,\n                 max_bands=None, max_color=None):\n        \n        super().__init__()\n        self.max_meter = max_meter\n        self.band_types = band_types\n        self.capabilities = capabilities\n        self.max_bands = max_bands\n        self.max_color = max_color", "docstring": "Create a MeterFeatures with the optional parameters below.\n\nArgs:\nmax_meter(int): Maximum number of meters.\nband_types (|MeterBandType_v0x04|):\nBitmaps of OFPMBT_* values supported.\ncapabilities (|MeterFlags_v0x04|): Bitmaps of \"ofp_meter_flags\".\nmax_bands(int): Maximum bands per meters\nmax_color(int): Maximum color value", "source": "juraj-google-style"}
{"code": "def create_backup(name):\n    r\n    if name in list_backups():\n        raise CommandExecutionError('Backup already present: {0}'.format(name))\n\n    ps_cmd = ['Backup-WebConfiguration',\n              '-Name', \"'{0}'\".format(name)]\n\n    cmd_ret = _srvmgr(ps_cmd)\n\n    if cmd_ret['retcode'] != 0:\n        msg = 'Unable to backup web configuration: {0}\\nError: {1}' \\\n              ''.format(name, cmd_ret['stderr'])\n        raise CommandExecutionError(msg)\n\n    return name in list_backups()", "docstring": "r'''\nBackup an IIS Configuration on the System.\n\n.. versionadded:: 2017.7.0\n\n.. note::\nBackups are stored in the ``$env:Windir\\System32\\inetsrv\\backup``\nfolder.\n\nArgs:\nname (str): The name to give the backup\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.create_backup good_config_20170209", "source": "juraj-google-style"}
{"code": "def has_types(self, types, all_=True):\n    func = (all if all_ else any)\n    return func([self.get_stim(t) for t in listify(types)])", "docstring": "Check whether the current component list matches all Stim types\nin the types argument.\n\nArgs:\ntypes (Stim, list): a Stim class or iterable of Stim classes.\nall_ (bool): if True, all input types must match; if False, at\nleast one input type must match.\n\nReturns:\nTrue if all passed types match at least one Stim in the component\nlist, otherwise False.", "source": "codesearchnet"}
{"code": "def interactive_console(self):\n    if (not self.running()):\n        raise RuntimeError(('VM %s is not running' % self._libvirt_.name))\n    virsh_command = ['virsh', '-c', config.get('libvirt_url'), 'console', self._libvirt_name()]\n    return utils.run_interactive_command(command=virsh_command)", "docstring": "Opens an interactive console\n\nReturns:\nlago.utils.CommandStatus: result of the virsh command execution", "source": "codesearchnet"}
{"code": "def measurement_key(val: Any, default: Any=RaiseTypeErrorIfNotProvided):\n    getter = getattr(val, '_measurement_key_', None)\n    result = (NotImplemented if (getter is None) else getter())\n    if (result is not NotImplemented):\n        return result\n    if (default is not RaiseTypeErrorIfNotProvided):\n        return default\n    if (getter is None):\n        raise TypeError(\"object of type '{}' has no _measurement_key_ method.\".format(type(val)))\n    raise TypeError(\"object of type '{}' does have a _measurement_key_ method, but it returned NotImplemented.\".format(type(val)))", "docstring": "Get the measurement key for the given value.\n\nArgs:\nval: The value which has the measurement key..\ndefault: Determines the fallback behavior when `val` doesn't have\na measurement key. If `default` is not set, a TypeError is raised.\nIf default is set to a value, that value is returned if the value\ndoes not have `_measurement_key_`.\n\nReturns:\nIf `val` has a `_measurement_key_` method and its result is not\n`NotImplemented`, that result is returned. Otherwise, if a default\nvalue was specified, the default value is returned.\n\nRaises:\nTypeError: `val` doesn't have a _measurement_key_ method (or that method\nreturned NotImplemented) and also no default value was specified.", "source": "codesearchnet"}
{"code": "def columns_exist(inspect_dataset):\n    \n\n    \n    if not hasattr(inspect_dataset, \"columns\"):\n        warnings.warn(\n            \"No columns list found in dataset; no autoinspection performed.\")\n        return\n    elif isinstance(inspect_dataset.columns[0], string_types):\n        columns = inspect_dataset.columns\n    elif isinstance(inspect_dataset.columns[0], dict) and \"name\" in inspect_dataset.columns[0]:\n        columns = [col['name'] for col in inspect_dataset.columns]\n    else:\n        raise AutoInspectError(\n            \"Unable to determine column names for this dataset.\")\n\n    create_multiple_expectations(\n        inspect_dataset, columns, \"expect_column_to_exist\")", "docstring": "This function will take a dataset and add expectations that each column present exists.\n\nArgs:\ninspect_dataset (great_expectations.dataset): The dataset to inspect and to which to add expectations.", "source": "juraj-google-style"}
{"code": "def create_db(file_pth):\n    \n    conn = sqlite3.connect(file_pth)\n    c = conn.cursor()\n\n    c.execute('DROP TABLE IF EXISTS library_spectra_source')\n    c.execute(\n              )\n\n    c.execute('DROP TABLE IF EXISTS metab_compound')\n    c.execute()\n\n    c.execute('DROP TABLE IF EXISTS library_spectra_meta')\n    c.execute(\n              )\n\n    c.execute('DROP TABLE IF EXISTS library_spectra')\n    c.execute(\n              )\n\n    c.execute('DROP TABLE IF EXISTS library_spectra_annotation')\n    c.execute(\n              )", "docstring": "Create an empty SQLite database for library spectra.\n\nExample:\n>>> from msp2db.db import create_db\n>>> db_pth = 'library.db'\n>>> create_db(file_pth=db_pth)\n\nArgs:\nfile_pth (str): File path for SQLite database", "source": "juraj-google-style"}
{"code": "def _get_image_nums_and_video_nums(self, input_ids: Optional[torch.LongTensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n    image_token_id = self.config.image_token_id\n    video_token_id = self.config.video_token_id\n    vision_start_token_id = self.config.vision_start_token_id\n    vision_start_mask = input_ids == vision_start_token_id\n    vision_first_mask = torch.roll(vision_start_mask, shifts=1, dims=1)\n    image_mask = input_ids == image_token_id\n    video_mask = input_ids == video_token_id\n    image_nums = torch.sum(vision_first_mask & image_mask, dim=1)\n    video_nums = torch.sum(vision_first_mask & video_mask, dim=1)\n    return (image_nums, video_nums)", "docstring": "Get the number of images and videos for each sample to calculate the separation length of the sample tensor.\nThese parameters are not passed through the processor to avoid unpredictable impacts from interface modifications.\n\nArgs:\ninput_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\nIndices of input sequence tokens in the vocabulary.\n\nReturns:\nimage_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`)\nvideo_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`)", "source": "github-repos"}
{"code": "def set_values(self, values, separator='\\n', indent=4*' '):\n        \n        self._updated = True\n        self._multiline_value_joined = True\n        self._values = values\n        if separator == '\\n':\n            values.insert(0, '')\n            separator = separator + indent\n        self._value = separator.join(values)", "docstring": "Sets the value to a given list of options, e.g. multi-line values\n\nArgs:\nvalues (list): list of values\nseparator (str): separator for values, default: line separator\nindent (str): indentation depth in case of line separator", "source": "juraj-google-style"}
{"code": "def from_api_repr(cls, resource, client):\n    job_ref_properties = resource.get('jobReference', {'projectId': client.project})\n    job_ref = _JobReference._from_api_repr(job_ref_properties)\n    job = cls(job_ref, client)\n    resource['jobReference'] = job_ref_properties\n    job._properties = resource\n    return job", "docstring": "Construct an UnknownJob from the JSON representation.\n\nArgs:\nresource (dict): JSON representation of a job.\nclient (google.cloud.bigquery.client.Client):\nClient connected to BigQuery API.\n\nReturns:\nUnknownJob: Job corresponding to the resource.", "source": "codesearchnet"}
{"code": "def DeleteOldFeedItems(client, feed_item_ids, feed):\n    if (not feed_item_ids):\n        return\n    feed_item_service = client.GetService('FeedItemService', 'v201809')\n    operations = [{'operator': 'REMOVE', 'operand': {'feedId': feed['id'], 'feedItemId': feed_item_id}} for feed_item_id in feed_item_ids]\n    feed_item_service.mutate(operations)", "docstring": "Deletes the old feed items for which extension settings have been created.\n\nArgs:\nclient: an AdWordsClient instance.\nfeed_item_ids: a list of Feed Item Ids.\nfeed: the Feed containing the given Feed Item Ids.", "source": "codesearchnet"}
{"code": "def _normalize_feature_columns(feature_columns):\n    if isinstance(feature_columns, _FeatureColumn):\n        feature_columns = [feature_columns]\n    if isinstance(feature_columns, collections_abc.Iterator):\n        feature_columns = list(feature_columns)\n    if isinstance(feature_columns, dict):\n        raise ValueError('Expected feature_columns to be iterable, found dict.')\n    for column in feature_columns:\n        if not isinstance(column, _FeatureColumn):\n            raise ValueError('Items of feature_columns must be a _FeatureColumn. Given (type {}): {}.'.format(type(column), column))\n    if not feature_columns:\n        raise ValueError('feature_columns must not be empty.')\n    name_to_column = {}\n    for column in feature_columns:\n        if column.name in name_to_column:\n            raise ValueError('Duplicate feature column name found for columns: {} and {}. This usually means that these columns refer to same base feature. Either one must be discarded or a duplicated but renamed item must be inserted in features dict.'.format(column, name_to_column[column.name]))\n        name_to_column[column.name] = column\n    return feature_columns", "docstring": "Normalizes the `feature_columns` input.\n\nThis method converts the `feature_columns` to list type as best as it can. In\naddition, verifies the type and other parts of feature_columns, required by\ndownstream library.\n\nArgs:\nfeature_columns: The raw feature columns, usually passed by users.\n\nReturns:\nThe normalized feature column list.\n\nRaises:\nValueError: for any invalid inputs, such as empty, duplicated names, etc.", "source": "github-repos"}
{"code": "def get_stream(data=None):\n        \n        if len(__mstreams_available__) == 0:\n            if data:\n                mstream = MemoryStream(data)\n                mstream.seek(0)\n            else:\n                mstream = MemoryStream()\n            __mstreams__.append(mstream)\n            return mstream\n\n        mstream = __mstreams_available__.pop()\n\n        if data is not None and len(data):\n            mstream.clean_up()\n            mstream.write(data)\n\n        mstream.seek(0)\n\n        return mstream", "docstring": "Get a MemoryStream instance.\n\nArgs:\ndata (bytes, bytearray, BytesIO): (Optional) data to create the stream from.\n\nReturns:\nMemoryStream: instance.", "source": "juraj-google-style"}
{"code": "def map_creative_third_party_url_feeds(self, creative_feed, third_party_url_feed):\n    for creative in creative_feed:\n        creative['third_party_urls'] = [third_party_url for third_party_url in third_party_url_feed if self._assignment_matches(creative, third_party_url)]", "docstring": "Maps third party url feed to the corresponding creative.\n\nThird party URL is a child object to the creative, and there is a 1 creative\nto many third party urls relationship. In Bulkdozer they are represented by\ntwo separate tab in the feed, and this method maps the creatives to their\nrespective third party URLs based on the creative ID.\n\nArgs:\ncreative_feed: Creative feed.\nthird_party_url_feed: Third party url feed.", "source": "github-repos"}
{"code": "def mark_causative(self, institute, case, user, link, variant):\n    display_name = variant['display_name']\n    LOG.info('Mark variant {0} as causative in the case {1}'.format(display_name, case['display_name']))\n    LOG.info('Adding variant to causatives in case {0}'.format(case['display_name']))\n    LOG.info('Marking case {0} as solved'.format(case['display_name']))\n    updated_case = self.case_collection.find_one_and_update({'_id': case['_id']}, {'$push': {'causatives': variant['_id']}, '$set': {'status': 'solved'}}, return_document=pymongo.ReturnDocument.AFTER)\n    LOG.info('Creating case event for marking {0} causative'.format(variant['display_name']))\n    self.create_event(institute=institute, case=case, user=user, link=link, category='case', verb='mark_causative', variant=variant, subject=variant['display_name'])\n    LOG.info('Creating variant event for marking {0} causative'.format(case['display_name']))\n    self.create_event(institute=institute, case=case, user=user, link=link, category='variant', verb='mark_causative', variant=variant, subject=variant['display_name'])\n    return updated_case", "docstring": "Create an event for marking a variant causative.\n\nArguments:\ninstitute (dict): A Institute object\ncase (dict): Case object\nuser (dict): A User object\nlink (str): The url to be used in the event\nvariant (variant): A variant object\n\nReturns:\nupdated_case(dict)", "source": "codesearchnet"}
{"code": "def _container_start_handler_factory(ion_type, before_yield=lambda c, ctx: None):\n    \n    assert ion_type.is_container\n\n    @coroutine\n    def container_start_handler(c, ctx):\n        before_yield(c, ctx)\n        yield\n        yield ctx.event_transition(IonEvent, IonEventType.CONTAINER_START, ion_type, value=None)\n    return container_start_handler", "docstring": "Generates handlers for tokens that begin with container start characters.\n\nArgs:\nion_type (IonType): The type of this container.\nbefore_yield (Optional[callable]): Called at initialization. Accepts the first character's ordinal and the\ncurrent context; performs any necessary initialization actions.", "source": "juraj-google-style"}
{"code": "def option_configure(debug=False, path=None):\n    \n    if CONFIG_SCRIPT in sys.argv[0]:\n        debug = True    \n    if path is None:\n        path = local_config['PROJECT']['CONFIG_PATH']\n    if debug:\n        if os.path.isfile(path):\n            debug_mode('local_config file: ', local_config, debug, halt=True)\n        else:\n            msg = \n            debug_mode(msg, {'CONFIG_PATH': path}, debug, halt=True)\n    r = configuration.init(debug, path)\n    return r", "docstring": "Summary:\nInitiate configuration menu to customize metal runtime options.\nConsole script ```keyconfig``` invokes this option_configure directly\nin debug mode to display the contents of the local config file (if exists)\nArgs:\n:path (str): full path to default local configuration file location\n:debug (bool): debug flag, when True prints out contents of local\nconfig file\nReturns:\nTYPE (bool):  Configuration Success | Failure", "source": "juraj-google-style"}
{"code": "def _create_delta(self):\n        \n        states = self._read_transitions()\n        total_states = len(states)\n        self._add_sink_state(states)\n        nulltrans = self._read_null_transitions()\n\n        def delta(current_state, character):\n            \n            if character != '':\n                newstate = states[current_state][ord(character)]\n                if newstate > 0:\n                    return newstate\n                else:\n                    return total_states\n            else:\n                return nulltrans[current_state]\n\n        return total_states + 1, delta", "docstring": "This function creates the delta transition\nArgs:\nstartState (int): Initial state of automaton\nResults:\nint, func: A number indicating the total states, and the delta function", "source": "juraj-google-style"}
{"code": "def _HandleDuplicates(self, new_aliases):\n    name_to_alias = {}\n    out = []\n    for a in new_aliases:\n        if a.name not in name_to_alias:\n            name_to_alias[a.name] = a\n            out.append(a)\n            continue\n        existing = name_to_alias[a.name]\n        if self._EquivalentAliases(existing, a):\n            continue\n        existing_name = existing.type.name or existing.type.__class__.__name__\n        a_name = a.type.name or a.type.__class__.__name__\n        raise KeyError(f'Duplicate top level items: {existing_name!r}, {a_name!r}')\n    return out", "docstring": "Handle duplicate module-level aliases.\n\nAliases pointing to qualified names could be the result of importing the\nsame entity through multiple import paths, which should not count as an\nerror; instead we just deduplicate them.\n\nArgs:\nnew_aliases: The list of new aliases to deduplicate\n\nReturns:\nA deduplicated list of aliases.\n\nRaises:\nKeyError: If there is a name clash.", "source": "github-repos"}
{"code": "def is_supergroup(self, subgroup):\n    warnings.warn('This is not fully functional. Only trivial subsets are tested right now. ')\n    return set(subgroup.symmetry_ops).issubset(self.symmetry_ops)", "docstring": "True if this group is a supergroup of the supplied group.\n\nArgs:\nsubgroup (SymmetryGroup): Subgroup to test.\n\nReturns:\nTrue if this group is a supergroup of the supplied group.", "source": "codesearchnet"}
{"code": "def get_unresolved(aln_df):\n    \n    unresolved_df = aln_df[aln_df['type'] == 'unresolved']\n    unresolved = []\n    if not unresolved_df.empty:\n        unresolved_df['id_a_pos'] = unresolved_df['id_a_pos'].astype(int)\n        unresolved = unresolved_df.id_a_pos.tolist()\n    return unresolved", "docstring": "Get a list of residue numbers (in the original sequence's numbering) that are unresolved\n\nArgs:\naln_df (DataFrame): Alignment DataFrame\n\nReturns:\nlist: Residue numbers that are mutated", "source": "juraj-google-style"}
{"code": "def _load_features_from_images(self, images, names=None):\n    if ((names is not None) and (len(names) != len(images))):\n        raise Exception('Lists of feature names and images must be of same length!')\n    self.feature_names = (names if (names is not None) else images)\n    self.feature_images = imageutils.load_imgs(images, self.masker)", "docstring": "Load feature image data from image files.\n\nArgs:\nimages: A list of image filenames.\nnames: An optional list of strings to use as the feature names. Must\nbe in the same order as the images.", "source": "codesearchnet"}
{"code": "def _clip_gradient_op(dtype):\n\n    def clip_gradient_backward(op, grad):\n        clip_value_min = op.inputs[1]\n        clip_value_max = op.inputs[2]\n        clipped_grad = tf.clip_by_value(grad, clip_value_min, clip_value_max)\n        return (clipped_grad, None, None)\n\n    def clip_gradient_forward(x, clip_value_min, clip_value_max):\n        del clip_value_min\n        del clip_value_max\n        return x\n    func_name = 'ClipGradient_{}'.format(dtype.name)\n    return function.Defun(dtype, dtype, dtype, python_grad_func=clip_gradient_backward, func_name=func_name)(clip_gradient_forward)", "docstring": "Create an op that clips gradients using a Defun.\n\nThe tensorflow Defun decorator creates an op and tensorflow caches these op\nautomatically according to `func_name`. Using a Defun decorator twice with the\nsame `func_name` does not create a new op, instead the cached op is used.\n\nThis method produces a new op the first time it is called with a given `dtype`\nargument, and then uses the cached op each time it is called after that with\nthe same `dtype`. The min and max clip values are given as arguments for the\nforward pass method so that they can be used in the backwards pass.\n\nArgs:\ndtype: the dtype of the net whose gradient is being clipped.\n\nReturns:\nThe op that clips gradients.", "source": "codesearchnet"}
{"code": "def run_docker(self, commands):\n    try:\n        import docker\n    except ImportError:\n        print('{}{}Could not import docker module (try \"pip install docker\").'.format(c.Style.BRIGHT, c.Fore.RED))\n        sys.exit(1)\n    app_args_data = self.profile.get('profile_args').data\n    install_json = self.profile.get('install_json')\n    client = docker.from_env()\n    app_dir = os.getcwd()\n    ports = {}\n    if self.args.vscd:\n        ports = {'{}/tcp'.format(self.args.vscd_port): self.args.vscd_port}\n    volumes = {}\n    in_path = '{}/{}'.format(app_dir, app_args_data.get('tc_in_path'))\n    if (app_args_data.get('tc_in_path') is not None):\n        volumes[in_path] = {'bind': in_path}\n    log_path = '{}/{}'.format(app_dir, app_args_data.get('tc_log_path'))\n    if (app_args_data.get('tc_log_path') is not None):\n        volumes[log_path] = {'bind': log_path}\n    out_path = '{}/{}'.format(app_dir, app_args_data.get('tc_out_path'))\n    if (app_args_data.get('tc_out_path') is not None):\n        volumes[out_path] = {'bind': out_path}\n    temp_path = '{}/{}'.format(app_dir, app_args_data.get('tc_temp_path'))\n    if (app_args_data.get('tc_temp_path') is not None):\n        volumes[temp_path] = {'bind': temp_path}\n    volumes[app_dir] = {'bind': app_dir}\n    if (self.args.docker_image is not None):\n        docker_image = self.args.docker_image\n    else:\n        docker_image = self.profile.get('dockerImage', install_json.get('dockerImage', self.docker_image))\n    status_code = 1\n    try:\n        self.container = client.containers.run(docker_image, entrypoint=commands.get('cli_command'), environment=['PYTHONPATH={}/lib_latest'.format(app_dir)], detach=True, ports=ports, remove=True, volumes=volumes, working_dir=app_dir)\n        results = self.container.wait()\n        status_code = results.get('StatusCode')\n        error = results.get('Error')\n        if error:\n            print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, error))\n    except Exception as e:\n        print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, e))\n        sys.exit()\n    return self.run_exit_code(status_code)", "docstring": "Run App in Docker Container.\n\nArgs:\ncommands (dict): A dictionary of the CLI commands.\n\nReturns:\nint: The exit code of the subprocess command.", "source": "codesearchnet"}
{"code": "def get_all_subtypes_with_tags(self):\n    assert self.has_enumerated_subtypes(), 'Enumerated subtypes not set.'\n    subtypes_with_tags = []\n    fifo = deque([subtype_field.data_type for subtype_field in self.get_enumerated_subtypes()])\n    while fifo:\n        data_type = fifo.popleft()\n        subtypes_with_tags.append((data_type._get_subtype_tags(), data_type))\n        if data_type.has_enumerated_subtypes():\n            for subtype_field in data_type.get_enumerated_subtypes():\n                fifo.append(subtype_field.data_type)\n    return subtypes_with_tags", "docstring": "Unlike other enumerated-subtypes-related functionality, this method\nreturns not just direct subtypes, but all subtypes of this struct. The\ntag of each subtype is the list of tags from which the type descends.\n\nThis method only applies to structs that enumerate subtypes.\n\nUse this when you need to generate a lookup table for a root struct\nthat maps a generated class representing a subtype to the tag it needs\nin the serialized format.\n\nReturns:\nList[Tuple[List[String], Struct]]", "source": "codesearchnet"}
{"code": "def get(issue_id, issue_type_id):\n    return db.Issue.find_one((Issue.issue_id == issue_id), (Issue.issue_type_id == issue_type_id))", "docstring": "Return issue by ID\n\nArgs:\nissue_id (str): Unique Issue identifier\nissue_type_id (str): Type of issue to get\n\nReturns:\n:obj:`Issue`: Returns Issue object if found, else None", "source": "codesearchnet"}
{"code": "class ClapAudioPatchMerging(nn.Module):\n\n    def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:\n        super().__init__()\n        self.input_resolution = input_resolution\n        self.dim = dim\n        self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)\n        self.norm = norm_layer(4 * dim)\n\n    def maybe_pad(self, input_feature, height, width):\n        should_pad = height % 2 == 1 or width % 2 == 1\n        if should_pad:\n            pad_values = (0, 0, 0, width % 2, 0, height % 2)\n            input_feature = nn.functional.pad(input_feature, pad_values)\n        return input_feature\n\n    def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor:\n        height, width = input_dimensions\n        batch_size, dim, num_channels = input_feature.shape\n        input_feature = input_feature.view(batch_size, height, width, num_channels)\n        input_feature = self.maybe_pad(input_feature, height, width)\n        input_feature_0 = input_feature[:, 0::2, 0::2, :]\n        input_feature_1 = input_feature[:, 1::2, 0::2, :]\n        input_feature_2 = input_feature[:, 0::2, 1::2, :]\n        input_feature_3 = input_feature[:, 1::2, 1::2, :]\n        input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)\n        input_feature = input_feature.view(batch_size, -1, 4 * num_channels)\n        input_feature = self.norm(input_feature)\n        input_feature = self.reduction(input_feature)\n        return input_feature", "docstring": "Patch Merging Layer.\n\nArgs:\ninput_resolution (`Tuple[int]`):\nResolution of input feature.\ndim (`int`):\nNumber of input channels.\nnorm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):\nNormalization layer class.", "source": "github-repos"}
{"code": "def CreateCampaign(client, merchant_id, budget_id):\n    campaign_service = client.GetService('CampaignService', 'v201809')\n    campaign = {'name': ('Shopping campaign \n    operations = [{'operator': 'ADD', 'operand': campaign}]\n    return campaign_service.mutate(operations)['value'][0]", "docstring": "Creates a new Display Network campaign.\n\nArgs:\nclient: an AdWordsClient instance.\nmerchant_id: a int merchant center ID.\nbudget_id: a int budget ID.\n\nReturns:\nThe campaign that was successfully created.", "source": "codesearchnet"}
{"code": "def GetFileObjectReferenceCount(self, path_spec):\n    cache_value = self._file_object_cache.GetCacheValue(path_spec.comparable)\n    if (not cache_value):\n        return None\n    return cache_value.reference_count", "docstring": "Retrieves the reference count of a cached file-like object.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nint: reference count or None if there is no file-like object for\nthe corresponding path specification cached.", "source": "codesearchnet"}
{"code": "def get_lat_long(self, callsign, timestamp=timestamp_now):\n    callsign_data = self.get_all(callsign, timestamp=timestamp)\n    return {const.LATITUDE: callsign_data[const.LATITUDE], const.LONGITUDE: callsign_data[const.LONGITUDE]}", "docstring": "Returns Latitude and Longitude for a callsign\n\nArgs:\ncallsign (str): Amateur Radio callsign\ntimestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)\n\nReturns:\ndict: Containing Latitude and Longitude\n\nRaises:\nKeyError: No data found for callsign\n\nExample:\nThe following code returns Latitude & Longitude for \"DH1TW\"\n\n>>> from pyhamtools import LookupLib, Callinfo\n>>> my_lookuplib = LookupLib(lookuptype=\"countryfile\")\n>>> cic = Callinfo(my_lookuplib)\n>>> cic.get_lat_long(\"DH1TW\")\n{\n'latitude': 51.0,\n'longitude': -10.0\n}\n\nNote:\nUnfortunately, in most cases the returned Latitude and Longitude are not very precise.\nClublog and Country-files.com use the country's capital coordinates in most cases, if no\ndedicated entry in the database exists. Best results will be retrieved with QRZ.com Lookup.", "source": "codesearchnet"}
{"code": "def _find_docstring_line(self, start, end):\n        \n        for i in range(start, end + 1):\n            if i in self._tokenized_triple_quotes:\n                return i\n        return None", "docstring": "Find the row where a docstring starts in a function or class.\n\nThis will search for the first match of a triple quote token in\nrow sequence from the start of the class or function.\n\nArgs:\nstart: the row where the class / function starts.\nend: the row where the class / function ends.\n\nReturns:\nint: the row number where the docstring is found.", "source": "juraj-google-style"}
{"code": "def make_job(name: str='', run_name: str='', num_tasks: int=0, install_script: str='', **kwargs) -> backend.Job:\n    return _backend.make_job(name=name, run_name=run_name, num_tasks=num_tasks, install_script=install_script, **kwargs)", "docstring": "Create a job using current backend. Blocks until all tasks are up and initialized.\n\nArgs:\nname: name of the job\nrun_name: name of the run (auto-assigned if empty)\nnum_tasks: number of tasks\ninstall_script: bash-runnable script\n**kwargs:\n\nReturns:\nbackend.Job", "source": "codesearchnet"}
{"code": "def import_entities(self, entities):\n    edata = Entity.create_payload(entities)\n    r = fapi.upload_entities(self.namespace, self.name, edata, self.api_url)\n    fapi._check_response_code(r, 201)", "docstring": "Upload entity objects.\n\nArgs:\nentities: iterable of firecloud.Entity objects.", "source": "codesearchnet"}
{"code": "def clinvar_submission_header(submission_objs, csv_type):\n    \n\n    complete_header = {} \n    custom_header = {}   \n    if csv_type == 'variant_data' :\n        complete_header = CLINVAR_HEADER\n    else:\n        complete_header = CASEDATA_HEADER\n\n    for header_key, header_value in complete_header.items(): \n        for clinvar_obj in submission_objs: \n            for key, value in clinvar_obj.items(): \n\n                if not header_key in custom_header and header_key == key: \n                    custom_header[header_key] = header_value\n\n    return custom_header", "docstring": "Determine which fields to include in csv header by checking a list of submission objects\n\nArgs:\nsubmission_objs(list): a list of objects (variants or casedata) to include in a csv file\ncsv_type(str) : 'variant_data' or 'case_data'\n\nReturns:\ncustom_header(dict): A dictionary with the fields required in the csv header. Keys and values are specified in CLINVAR_HEADER and CASEDATA_HEADER", "source": "juraj-google-style"}
{"code": "def CalculateWaitForRetry(retry_attempt, max_wait=60):\n    wait_time = (2 ** retry_attempt)\n    max_jitter = (wait_time / 4.0)\n    wait_time += random.uniform((- max_jitter), max_jitter)\n    return max(1, min(wait_time, max_wait))", "docstring": "Calculates amount of time to wait before a retry attempt.\n\nWait time grows exponentially with the number of attempts. A\nrandom amount of jitter is added to spread out retry attempts from\ndifferent clients.\n\nArgs:\nretry_attempt: Retry attempt counter.\nmax_wait: Upper bound for wait time [seconds].\n\nReturns:\nNumber of seconds to wait before retrying request.", "source": "codesearchnet"}
{"code": "def deploy(app_id, version, promote, quiet):\n    \n    \n    gae_app = GaeApp.for_branch(git.current_branch().name)\n\n    if gae_app is None and None in (app_id,  version):\n        msg = (\n            \"Can't find an AppEngine app setup for branch <35>{}<32> and\"\n            \"--project and --version were not given.\"\n        )\n        log.err(msg, git.current_branch().name)\n        sys.exit(1)\n\n    if version is not None:\n        gae_app.version = version\n\n    if app_id is not None:\n        gae_app.app_id = app_id\n\n    gae_app.deploy(promote, quiet)", "docstring": "Deploy the app to AppEngine.\n\nArgs:\napp_id (str):\nAppEngine App ID. Overrides config value app_id if given.\nversion (str):\nAppEngine project version. Overrides config values if given.\npromote (bool):\nIf set to **True** promote the current remote app version to the one\nthat's being deployed.\nquiet (bool):\nIf set to **True** this will pass the ``--quiet`` flag to gcloud\ncommand.", "source": "juraj-google-style"}
{"code": "def output_shape(self):\n    if not self._inbound_nodes:\n        raise AttributeError('The layer has never been called and thus has no defined output shape.')\n    all_output_shapes = set([str(node.output_shapes) for node in self._inbound_nodes])\n    if len(all_output_shapes) == 1:\n        return self._inbound_nodes[0].output_shapes\n    else:\n        raise AttributeError('The layer \"%s\" has multiple inbound nodes, with different output shapes. Hence the notion of \"output shape\" is ill-defined for the layer. Use `get_output_shape_at(node_index)` instead.' % self.name)", "docstring": "Retrieves the output shape(s) of a layer.\n\nOnly applicable if the layer has one output,\nor if all outputs have the same shape.\n\nReturns:\nOutput shape, as an integer shape tuple\n(or list of shape tuples, one tuple per output tensor).\n\nRaises:\nAttributeError: if the layer has no defined output shape.\nRuntimeError: if called in Eager mode.", "source": "github-repos"}
{"code": "def remove(self,\n               entity_id,\n               property_uri,\n               value):\n        \n        if not entity_id.startswith(\"http\"):\n            entity_uri = urllib.parse.urljoin(self.base_url, entity_id)\n        else:\n            entity_uri = entity_id\n        sparql_template = Template()\n        sparql = sparql_template.substitute(\n            prefix=build_prefixes(self.namespaces),\n            entity=entity_uri,\n            prop_name=property_uri,\n            value_str=self.__value_format__(value))\n        delete_property_request = urllib.request.Request(\n            entity_uri,\n            data=sparql.encode(),\n            method='PATCH',\n            headers={'Content-Type': 'application/sparql-update'})\n        response = urllib.request.urlopen(delete_property_request)\n        if response.code < 400:\n            return True\n        return False", "docstring": "Method removes a triple for the given/subject.\n\nArgs:\nentity_id(string): Fedora Object ID, ideally URI of the subject\nproperty_uri(string):\nvalue(string):\n\nReturn:\nboolean: True if triple was removed from the object", "source": "juraj-google-style"}
{"code": "def candidate_paths(self, filepath):\n    (filelead, filetail) = os.path.split(filepath)\n    (name, extension) = os.path.splitext(filetail)\n    if extension:\n        extension = extension[1:]\n    filenames = [name]\n    if (not name.startswith('_')):\n        filenames.append('_{}'.format(name))\n    if (extension and (extension in self.CANDIDATE_EXTENSIONS)):\n        filenames = ['.'.join([k, extension]) for k in filenames]\n    else:\n        if extension:\n            filenames = ['.'.join([k, extension]) for k in filenames]\n        new = []\n        for ext in self.CANDIDATE_EXTENSIONS:\n            new.extend(['.'.join([k, ext]) for k in filenames])\n        filenames = new\n    return [os.path.join(filelead, v) for v in filenames]", "docstring": "Return candidates path for given path\n\n* If Filename does not starts with ``_``, will build a candidate for\nboth with and without ``_`` prefix;\n* Will build For each available extensions if filename does not have\nan explicit extension;\n* Leading path directory is preserved;\n\nArgs:\nfilepath (str): Relative path as finded in an import rule from a\nSCSS source.\n\nReturns:\nlist: Builded candidate paths (as relative paths).", "source": "codesearchnet"}
{"code": "def forward(self, encoder_hidden_states, padding_masks=None):\n    hidden_states = encoder_hidden_states.transpose(1, -1)\n    for layer in self.conv_layers:\n        hidden_states = layer(hidden_states)\n    hidden_states = self.linear(hidden_states.transpose(1, 2))\n    if padding_masks is not None:\n        hidden_states = hidden_states.masked_fill(padding_masks, 0.0)\n    return hidden_states", "docstring": "Calculate forward propagation.\n\nArgs:\nencoder_hidden_states (`torch.Tensor` of shape `(batch_size, max_text_length, input_dim)`):\nBatch of input sequences.\npadding_masks (`torch.ByteTensor` of shape `(batch_size, max_text_length)`, *optional*):\nBatch of masks indicating padded part.\n\nReturns:\nTensor: Batch of predicted sequences `(batch_size, max_text_length, 1)`.", "source": "github-repos"}
{"code": "def toarray(vari):\n    if isinstance(vari, Poly):\n        shape = vari.shape\n        out = numpy.asarray([{} for _ in range(numpy.prod(shape))], dtype=object)\n        core = vari.A.copy()\n        for key in core.keys():\n            core[key] = core[key].flatten()\n            for i in range(numpy.prod(shape)):\n                if (not numpy.all((core[key][i] == 0))):\n                    out[i][key] = core[key][i]\n        for i in range(numpy.prod(shape)):\n            out[i] = Poly(out[i], vari.dim, (), vari.dtype)\n        out = out.reshape(shape)\n        return out\n    return numpy.asarray(vari)", "docstring": "Convert polynomial array into a numpy.asarray of polynomials.\n\nArgs:\nvari (Poly, numpy.ndarray):\nInput data.\n\nReturns:\n(numpy.ndarray):\nA numpy array with ``Q.shape==A.shape``.\n\nExamples:\n>>> poly = cp.prange(3)\n>>> print(poly)\n[1, q0, q0^2]\n>>> array = cp.toarray(poly)\n>>> print(isinstance(array, numpy.ndarray))\nTrue\n>>> print(array[1])\nq0", "source": "codesearchnet"}
{"code": "def list_images(self):\n    r = self.get((self.registry_url + '/v2/_catalog'), auth=self.auth)\n    return r.json()['repositories']", "docstring": "List images stored in the registry.\n\nReturns:\nlist[str]: List of image names.", "source": "codesearchnet"}
{"code": "def get_unique_tags(field_to_obs):\n    return {field: sorted(set([x.get('tag', '') for x in observations])) for (field, observations) in field_to_obs.items() if (field in TAG_FIELDS)}", "docstring": "Returns a dictionary of tags that a user could query over.\n\nArgs:\nfield_to_obs: Dict that maps string field to `Observation` list.\n\nReturns:\nA dict that maps keys in `TAG_FIELDS` to a list of string tags present in\nthe event files. If the dict does not have any observations of the type,\nmaps to an empty list so that we can render this to console.", "source": "codesearchnet"}
{"code": "def gae_advantages(td_deltas, mask, lambda_=0.95, gamma=0.99):\n    return rewards_to_go(td_deltas, mask, (lambda_ * gamma))", "docstring": "r\"\"\"Computes the GAE advantages given the one step TD-residuals.\n\nThe formula for a GAE advantage estimator is as follows:\n\nA_{bt} = \\sum_{l=0}^{\\infty}(\\gamma * \\lambda)^{l}(\\delta_{b,t+l}).\n\nInternally we just call rewards_to_go, since it is the same computation.\n\nArgs:\ntd_deltas: np.ndarray of shape (B, T) of one step TD-residuals.\nmask: np.ndarray of shape (B, T) of mask for the residuals. It maybe the\ncase that the `td_deltas` are already masked correctly since they are\nproduced by `deltas(...)`\nlambda_: float, lambda parameter for GAE estimators.\ngamma: float, lambda parameter for GAE estimators.\n\nReturns:\nGAE advantage estimates.", "source": "codesearchnet"}
{"code": "def reset_time_estimate(self, **kwargs):\n        \n        path = '%s/%s/reset_time_estimate' % (self.manager.path, self.get_id())\n        return self.manager.gitlab.http_post(path, **kwargs)", "docstring": "Resets estimated time for the object to 0 seconds.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabTimeTrackingError: If the time tracking update cannot be done", "source": "juraj-google-style"}
{"code": "def smooth(self, noise, strategy=INVERSE_STRATEGY):\n        \n        if strategy is INVERSE_STRATEGY:\n            self.points = with_inverse(self.points, noise)\n        elif strategy is EXTRAPOLATE_STRATEGY:\n            self.points = with_extrapolation(self.points, noise, 30)\n        elif strategy is NO_STRATEGY:\n            self.points = with_no_strategy(self.points, noise)\n        return self", "docstring": "In-place smoothing\n\nSee smooth_segment function\n\nArgs:\nnoise (float): Noise expected\nstrategy (int): Strategy to use. Either smooth.INVERSE_STRATEGY\nor smooth.EXTRAPOLATE_STRATEGY\nReturns:\n:obj:`Segment`", "source": "juraj-google-style"}
{"code": "def _InitValues(self, sizes):\n    total_size = 1\n    for s in sizes:\n        total_size *= s\n    x = [f * 0.5 for f in range(1, total_size + 1)]\n    return constant_op.constant(x, shape=sizes)", "docstring": "Initializes values for input tensors.\n\nArgs:\nsizes: Tensor dimensions.\n\nReturns:\nTensor initialized to values.", "source": "github-repos"}
{"code": "def update_headers(self, response):\n        \n        if 'expires' in response.headers and 'cache-control' in response.headers:\n            self.msg = self.server_cache_headers\n            return response.headers\n        else:\n            self.msg = self.default_cache_vars\n            date = parsedate(response.headers['date'])\n            expires = datetime(*date[:6]) + timedelta(0, self.expire_after)\n            response.headers.update({'expires': formatdate(calendar.timegm(expires.timetuple())),\n                                'cache-control': 'public'})\n            return response.headers", "docstring": "Returns the updated caching headers.\n\nArgs:\nresponse (HttpResponse): The response from the remote service\n\nReturns:\nresponse:(HttpResponse.Headers): Http caching headers", "source": "juraj-google-style"}
{"code": "def require_version(requirement: str, hint: Optional[str]=None) -> None:\n    hint = f'\\n{hint}' if hint is not None else ''\n    if re.match('^[\\\\w_\\\\-\\\\d]+$', requirement):\n        pkg, op, want_ver = (requirement, None, None)\n    else:\n        match = re.findall('^([^!=<>\\\\s]+)([\\\\s!=<>]{1,2}.+)', requirement)\n        if not match:\n            raise ValueError(f'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}')\n        pkg, want_full = match[0]\n        want_range = want_full.split(',')\n        wanted = {}\n        for w in want_range:\n            match = re.findall('^([\\\\s!=<>]{1,2})(.+)', w)\n            if not match:\n                raise ValueError(f'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}')\n            op, want_ver = match[0]\n            wanted[op] = want_ver\n            if op not in ops:\n                raise ValueError(f'{requirement}: need one of {list(ops.keys())}, but got {op}')\n    if pkg == 'python':\n        got_ver = '.'.join([str(x) for x in sys.version_info[:3]])\n        for op, want_ver in wanted.items():\n            _compare_versions(op, got_ver, want_ver, requirement, pkg, hint)\n        return\n    try:\n        got_ver = importlib.metadata.version(pkg)\n    except importlib.metadata.PackageNotFoundError:\n        raise importlib.metadata.PackageNotFoundError(f\"The '{requirement}' distribution was not found and is required by this application. {hint}\")\n    if want_ver is not None:\n        for op, want_ver in wanted.items():\n            _compare_versions(op, got_ver, want_ver, requirement, pkg, hint)", "docstring": "Perform a runtime check of the dependency versions, using the exact same syntax used by pip.\n\nThe installed module version comes from the *site-packages* dir via *importlib.metadata*.\n\nArgs:\nrequirement (`str`): pip style definition, e.g.,  \"tokenizers==0.9.4\", \"tqdm>=4.27\", \"numpy\"\nhint (`str`, *optional*): what suggestion to print in case of requirements not being met\n\nExample:\n\n```python\nrequire_version(\"pandas>1.1.2\")\nrequire_version(\"numpy>1.18.5\", \"this is important to have for whatever reason\")\n```", "source": "github-repos"}
{"code": "def pb(name, data, display_name=None, description=None):\n    import tensorflow.compat.v1 as tf\n    data = np.array(data)\n    if (data.shape != ()):\n        raise ValueError(('Expected scalar shape for data, saw shape: %s.' % data.shape))\n    if (data.dtype.kind not in ('b', 'i', 'u', 'f')):\n        raise ValueError(('Cast %s to float is not supported' % data.dtype.name))\n    tensor = tf.make_tensor_proto(data.astype(np.float32))\n    if (display_name is None):\n        display_name = name\n    summary_metadata = metadata.create_summary_metadata(display_name=display_name, description=description)\n    tf_summary_metadata = tf.SummaryMetadata.FromString(summary_metadata.SerializeToString())\n    summary = tf.Summary()\n    summary.value.add(tag=('%s/scalar_summary' % name), metadata=tf_summary_metadata, tensor=tensor)\n    return summary", "docstring": "Create a legacy scalar summary protobuf.\n\nArguments:\nname: A unique name for the generated summary, including any desired\nname scopes.\ndata: A rank-0 `np.array` or array-like form (so raw `int`s and\n`float`s are fine, too).\ndisplay_name: Optional name for this summary in TensorBoard, as a\n`str`. Defaults to `name`.\ndescription: Optional long-form description for this summary, as a\n`str`. Markdown is supported. Defaults to empty.\n\nReturns:\nA `tf.Summary` protobuf object.", "source": "codesearchnet"}
{"code": "def cube(data, xcoords=None, ycoords=None, chcoords=None, scalarcoords=None, datacoords=None, attrs=None, name=None):\n    cube = xr.DataArray(data, dims=('x', 'y', 'ch'), attrs=attrs, name=name)\n    cube.dcc._initcoords()\n    if (xcoords is not None):\n        cube.coords.update({key: ('x', xcoords[key]) for key in xcoords})\n    if (ycoords is not None):\n        cube.coords.update({key: ('y', ycoords[key]) for key in ycoords})\n    if (chcoords is not None):\n        cube.coords.update({key: ('ch', chcoords[key]) for key in chcoords})\n    if (datacoords is not None):\n        cube.coords.update({key: (('x', 'y', 'ch'), datacoords[key]) for key in datacoords})\n    if (scalarcoords is not None):\n        cube.coords.update(scalarcoords)\n    return cube", "docstring": "Create a cube as an instance of xarray.DataArray with Decode accessor.\n\nArgs:\ndata (numpy.ndarray): 3D (x x y x channel) array.\nxcoords (dict, optional): Dictionary of arrays that label x axis.\nycoords (dict, optional): Dictionary of arrays that label y axis.\nchcoords (dict, optional): Dictionary of arrays that label channel axis.\nscalarcoords (dict, optional): Dictionary of values that don't label any axes (point-like).\ndatacoords (dict, optional): Dictionary of arrays that label x, y, and channel axes.\nattrs (dict, optional): Dictionary of attributes to add to the instance.\nname (str, optional): String that names the instance.\n\nReturns:\ndecode cube (decode.cube): Decode cube.", "source": "codesearchnet"}
{"code": "def acquire(self, constructor_fn: Callable[[], Any], tag: Any=None) -> Any:\n    return _shared_map.acquire(self._key, constructor_fn, tag)", "docstring": "Acquire a reference to the object associated with this Shared handle.\n\nArgs:\nconstructor_fn: function that initialises / constructs the object if not\npresent in the cache. This function should take no arguments. It should\nreturn an initialised object, or None if the object could not be\ninitialised / constructed.\ntag: an optional indentifier to store with the cached object. If\nsubsequent calls to acquire use different tags, the object will be\nreloaded rather than returned from cache.\n\nReturns:\nA reference to an initialised object, either from the cache, or\nnewly-constructed.", "source": "github-repos"}
{"code": "def validate_redis(self, db_data, user_data, oper):\n        \n        passed = True\n        \n        if isinstance(db_data, int):\n            db_data = str(db_data)\n        if isinstance(user_data, int):\n            user_data = str(user_data)\n\n        \n        \n        if isinstance(db_data, (list)):\n            try:\n                db_data = sorted(db_data)\n            except TypeError:\n                \n                pass\n        if isinstance(user_data, (list)):\n            try:\n                user_data = sorted(user_data)\n            except TypeError:\n                \n                pass\n\n        if oper not in self.operators:\n            self.log.error('Invalid operator provided ({})'.format(oper))\n            return False\n\n        \n        if self.operators.get(oper)(db_data, user_data):\n            self.reports.profile_validation(True)\n        else:\n            self.reports.profile_validation(False)\n            passed = False\n\n        \n        self.validate_log_output(passed, db_data, user_data, oper)\n\n        return passed", "docstring": "Validate data in Redis.\n\nArgs:\ndb_data (str): The data store in Redis.\nuser_data (str): The user provided data.\noper (str): The comparison operator.\n\nReturns:\nbool: True if the data passed validation.", "source": "juraj-google-style"}
{"code": "def __init__(self, variant_tensor, resource_creator):\n    super(_VariantTracker, self).__init__(device='CPU')\n    self._resource_handle = variant_tensor\n    if not isinstance(resource_creator, def_function.Function):\n        raise TypeError('Resource creator should already be a tf.function.')\n    self._create_resource = resource_creator", "docstring": "Record that `variant_tensor` is associated with `resource_creator`.\n\nArgs:\nvariant_tensor: The variant-dtype Tensor associated with the Dataset. This\nTensor will be a captured input to functions which use the Dataset, and\nis used by saving code to identify the corresponding _VariantTracker.\nresource_creator: A zero-argument function which creates a new\nvariant-dtype Tensor. This function will be included in SavedModels and\nrun to re-create the Dataset's variant Tensor on restore.", "source": "github-repos"}
{"code": "def send_to_default_exchange(self, sess_id, message=None):\n        \n        msg = json.dumps(message, cls=ZEngineJSONEncoder)\n        log.debug(\"Sending following message to %s queue through default exchange:\\n%s\" % (\n            sess_id, msg))\n        self.get_channel().publish(exchange='', routing_key=sess_id, body=msg)", "docstring": "Send messages through RabbitMQ's default exchange,\nwhich will be delivered through routing_key (sess_id).\n\nThis method only used for un-authenticated users, i.e. login process.\n\nArgs:\nsess_id string: Session id\nmessage dict: Message object.", "source": "juraj-google-style"}
{"code": "def serialize_example(transformed_json_data, features, feature_indices, target_name):\n  \n  import six\n  import tensorflow as tf\n  from trainer import feature_transforms\n\n  line = str(transformed_json_data[target_name][0])\n  for name, info in feature_indices:\n    if features[name]['transform'] in [feature_transforms.IDENTITY_TRANSFORM,\n                                       feature_transforms.SCALE_TRANSFORM]:\n      line += ' %d:%s' % (info['index_start'], str(transformed_json_data[name][0]))\n    elif features[name]['transform'] in [feature_transforms.ONE_HOT_TRANSFORM,\n                                         feature_transforms.MULTI_HOT_TRANSFORM]:\n      for i in range(info['size']):\n        if i in transformed_json_data[name]:\n          line += ' %d:1' % (info['index_start'] + i)\n    elif features[name]['transform'] in [feature_transforms.IMAGE_TRANSFORM]:\n      for i in range(info['size']):\n        line += ' %d:%s' % (info['index_start'] + i, str(transformed_json_data[name][i]))\n\n  return line", "docstring": "Makes an instance of data in libsvm format.\n\nArgs:\ntransformed_json_data: dict of transformed data.\nfeatures: features config.\nfeature_indices: output of feature_transforms.get_transformed_feature_indices()\n\nReturns:\nThe text line representation of an instance in libsvm format.", "source": "juraj-google-style"}
{"code": "def get_language(self, text):\n    files = {'text': text}\n    (res, status_code) = self.post(self.language_service, files=files)\n    if (status_code != 200):\n        logger.debug('Language recognition failed.')\n    return (self.decode(res), status_code)", "docstring": "Recognise the language of the text in input\n\nArgs:\nid (str): The text whose the language needs to be recognised\n\nReturns:\ndict, int: A dict containing the recognised language and the\nconfidence score.", "source": "codesearchnet"}
{"code": "def _add_source(model):\n    \n    ignored_keys = {\"author_tags\", \"original_xml\", \"additional_info\"}\n\n    \n    source = \"Aleph\"\n    for key, val in model.get_mapping().iteritems():\n        if key in ignored_keys:\n            continue\n\n        if type(val) in [list, tuple]:\n            ss_val = [\n                SourceString(item, source).to_dict()\n                for item in val\n            ]\n        else:\n            ss_val = [SourceString(val, source).to_dict()]\n\n        setattr(model, key, ss_val)\n\n    return model", "docstring": "Go over all attributes in `model` and add :class:`SourceString` to them.\n\nArgs:\nmodel (obj): :class:`Model` instance.\n\nReturns:\nobj: :class:`Model` instance with :class:`SourceString` descriptors.", "source": "juraj-google-style"}
{"code": "async def send_script(self, client_id, conn_string, script):\n    conn_id = self._client_connection(client_id, conn_string)\n    (await self.adapter.send_script(conn_id, script))", "docstring": "Send a script to a device on behalf of a client.\n\nSee :meth:`AbstractDeviceAdapter.send_script`.\n\nArgs:\nclient_id (str): The client we are working for.\nconn_string (str): A connection string that will be\npassed to the underlying device adapter.\nscript (bytes): The script that we wish to send.\n\nRaises:\nDeviceServerError: There is an issue with your client_id such\nas not being connected to the device.\nDeviceAdapterError: The adapter had a protocol issue sending the script.", "source": "codesearchnet"}
{"code": "def bit_for_bit(model_path, bench_path, config):\n    \n    fname = model_path.split(os.path.sep)[-1]\n    \n    if not (os.path.isfile(bench_path) and os.path.isfile(model_path)):\n        return elements.error(\"Bit for Bit\",\n                              \"File named \" + fname + \" has no suitable match!\")\n    try:\n        model_data = Dataset(model_path)\n        bench_data = Dataset(bench_path)\n    except (FileNotFoundError, PermissionError):\n        return elements.error(\"Bit for Bit\",\n                              \"File named \" + fname + \" could not be read!\")\n    if not (netcdf.has_time(model_data) and netcdf.has_time(bench_data)):\n        return elements.error(\"Bit for Bit\",\n                              \"File named \" + fname + \" could not be read!\")\n\n    \n    headers = [\"Max Error\", \"Index of Max Error\", \"RMS Error\", \"Plot\"]\n    stats = LIVVDict()\n    for i, var in enumerate(config[\"bit_for_bit_vars\"]):\n        if var in model_data.variables and var in bench_data.variables:\n            m_vardata = model_data.variables[var][:]\n            b_vardata = bench_data.variables[var][:]\n            diff_data = m_vardata - b_vardata\n            if diff_data.any():\n                stats[var][\"Max Error\"] = np.amax(np.absolute(diff_data))\n                stats[var][\"Index of Max Error\"] = str(\n                        np.unravel_index(np.absolute(diff_data).argmax(), diff_data.shape))\n                stats[var][\"RMS Error\"] = np.sqrt(np.sum(np.square(diff_data).flatten()) /\n                                                  diff_data.size)\n                pf = plot_bit_for_bit(fname, var, m_vardata, b_vardata, diff_data)\n            else:\n                stats[var][\"Max Error\"] = stats[var][\"RMS Error\"] = 0\n                pf = stats[var][\"Index of Max Error\"] = \"N/A\"\n            stats[var][\"Plot\"] = pf\n        else:\n            stats[var] = {\"Max Error\": \"No Match\", \"RMS Error\": \"N/A\", \"Plot\": \"N/A\"}\n    model_data.close()\n    bench_data.close()\n    return elements.bit_for_bit(\"Bit for Bit\", headers, stats)", "docstring": "Checks whether the given files have bit for bit solution matches\non the given variable list.\n\nArgs:\nmodel_path: absolute path to the model dataset\nbench_path: absolute path to the benchmark dataset\nconfig: the configuration of the set of analyses\n\nReturns:\nA dictionary created by the elements object corresponding to\nthe results of the bit for bit testing", "source": "juraj-google-style"}
{"code": "def extract_all_content(self, path=None, payload=None, objectInput=None, pretty_print=False, convert_to_obj=False):\n    f = file_path(path, payload, objectInput)\n    switches = ['-J', '-t', '-r', f]\n    if (not pretty_print):\n        switches.remove('-r')\n    result = self._command_template(switches)\n    if (result and convert_to_obj):\n        result = json.loads(result, encoding='utf-8')\n    return (result, path, f)", "docstring": "This function returns a JSON of all contents and\nmetadata of passed file\n\nArgs:\npath (string): Path of file to analyze\npayload (string): Payload base64 to analyze\nobjectInput (object): file object/standard input to analyze\npretty_print (boolean): If True adds newlines and whitespace,\nfor better readability\nconvert_to_obj (boolean): If True convert JSON in object", "source": "codesearchnet"}
{"code": "def convert_to_tensor_with_default(value, default, dtype=None, name=None):\n    rtn_val = default if value is None else value\n    return tf.convert_to_tensor(rtn_val, dtype=dtype, name=name)", "docstring": "Converts the given `value` to a `Tensor` or returns the `default` value.\n\nConverts the input `value` to a `Tensor` or returns `default` converted to a\n`Tensor` if `value == None`.\n\nArgs:\nvalue: An object whose type has a registered Tensor conversion function.\ndefault: The value to return if `value == None`.\ndtype: Optional element type for the returned tensor. If missing, the type\nis inferred from the type of value.\nname: Optional name to use if a new Tensor is created.\n\nReturns:\nA Tensor based on value.", "source": "github-repos"}
{"code": "def _consumers(self):\n    consumers = nest.flatten([component.consumers() for component in nest.flatten(self, expand_composites=True) if getattr(component, 'graph', None) is not None])\n    return list(set(consumers))", "docstring": "Returns a list of `Operation`s that consume this `CompositeTensor`.\n\nReturns:\nA list of `Operation`s.\n\nRaises:\nRuntimeError: If this method is called while executing eagerly.", "source": "github-repos"}
{"code": "def delete(filepath):\n    \n    \n    remove_acl(filepath)\n\n    \n    remove_immutable_attribute(filepath)\n\n    \n    if os.path.isfile(filepath) or os.path.islink(filepath):\n        os.remove(filepath)\n    elif os.path.isdir(filepath):\n        shutil.rmtree(filepath)", "docstring": "Delete the given file, directory or link.\n\nIt Should support undelete later on.\n\nArgs:\nfilepath (str): Absolute full path to a file. e.g. /path/to/file", "source": "juraj-google-style"}
{"code": "def addRow(self, triggered):\n        \n        if triggered:\n            model = self.tableView.model()\n            model.addDataFrameRows()\n            self.sender().setChecked(False)", "docstring": "Adds a row to the model.\n\nThis method is also a slot.\n\nArgs:\ntriggered (bool): If the corresponding button was\nactivated, the row will be appended to the end.", "source": "juraj-google-style"}
{"code": "class Wav2Vec2PhonemeCTCTokenizerOutput(ModelOutput):\n    text: Union[List[str], str]\n    char_offsets: Union[List[ListOfDict], ListOfDict] = None", "docstring": "Output type of [` Wav2Vec2PhonemeCTCTokenizer`], with transcription.\n\nArgs:\ntext (list of `str` or `str`):\nDecoded logits in text from. Usually the speech transcription.\nchar_offsets (list of `List[Dict[str, Union[int, str]]]` or `List[Dict[str, Union[int, str]]]`):\nOffsets of the decoded characters. In combination with sampling rate and model downsampling rate char\noffsets can be used to compute time stamps for each character. Total logit score of the beam associated with\nproduced text.", "source": "github-repos"}
{"code": "def save_to_file(self, filename, remap_dim0=None, remap_dim1=None):\n    \n    \n    \n    with open(filename, 'w') as fobj:\n      columns = list(sorted(self._dim1))\n      for col in columns:\n        fobj.write(',')\n        fobj.write(str(remap_dim1[col] if remap_dim1 else col))\n      fobj.write('\\n')\n      for row in sorted(self._dim0):\n        fobj.write(str(remap_dim0[row] if remap_dim0 else row))\n        for col in columns:\n          fobj.write(',')\n          fobj.write(str(self[row, col]))\n        fobj.write('\\n')", "docstring": "Saves matrix to the file.\n\nArgs:\nfilename: name of the file where to save matrix\nremap_dim0: dictionary with mapping row indices to row names which should\nbe saved to file. If none then indices will be used as names.\nremap_dim1: dictionary with mapping column indices to column names which\nshould be saved to file. If none then indices will be used as names.", "source": "juraj-google-style"}
{"code": "def SetFlushInterval(self, flush_interval):\n    \n    self._flush_interval = flush_interval\n    logger.debug('Elasticsearch flush interval: {0:d}'.format(flush_interval))", "docstring": "Set the flush interval.\n\nArgs:\nflush_interval (int): number of events to buffer before doing a bulk\ninsert.", "source": "juraj-google-style"}
{"code": "def set_attributes(self, **kwargs):\n        \n        for field, value in kwargs.items():\n            if field in self.Meta.attributes:\n                setattr(self, field, value)", "docstring": "Set the resource attributes from the kwargs.\nOnly sets items in the `self.Meta.attributes` white list.\n\nArgs:\nkwargs: Keyword arguements passed into the init of this class", "source": "juraj-google-style"}
{"code": "def __init__(self, dct_type=2, validate_args=False, name='dct'):\n    \n    \n    if dct_type not in (2, 3):\n      raise NotImplementedError('`type` must be one of 2 or 3')\n    self._dct_type = dct_type\n    super(DiscreteCosineTransform, self).__init__(\n        forward_min_event_ndims=1,\n        inverse_min_event_ndims=1,\n        is_constant_jacobian=True,\n        validate_args=validate_args,\n        name=name)", "docstring": "Instantiates the `DiscreteCosineTransform` bijector.\n\nArgs:\ndct_type: Python `int`, the DCT type performed by the forward\ntransformation. Currently, only 2 and 3 are supported.\nvalidate_args: Python `bool` indicating whether arguments should be\nchecked for correctness.\nname: Python `str` name given to ops managed by this object.", "source": "juraj-google-style"}
{"code": "def modify_ack_deadline(self, seconds):\n    self._request_queue.put(requests.ModAckRequest(ack_id=self._ack_id, seconds=seconds))", "docstring": "Resets the deadline for acknowledgement.\n\nNew deadline will be the given value of seconds from now.\n\nThe default implementation handles this for you; you should not need\nto manually deal with setting ack deadlines. The exception case is\nif you are implementing your own custom subclass of\n:class:`~.pubsub_v1.subcriber._consumer.Consumer`.\n\nArgs:\nseconds (int): The number of seconds to set the lease deadline\nto. This should be between 0 and 600. Due to network latency,\nvalues below 10 are advised against.", "source": "codesearchnet"}
{"code": "def get_enterprise_customer_or_404(enterprise_uuid):\n    \n    EnterpriseCustomer = apps.get_model('enterprise', 'EnterpriseCustomer')  \n    try:\n        enterprise_uuid = UUID(enterprise_uuid)\n        return EnterpriseCustomer.objects.get(uuid=enterprise_uuid)  \n    except (TypeError, ValueError, EnterpriseCustomer.DoesNotExist):\n        LOGGER.error('Unable to find enterprise customer for UUID: [%s]', enterprise_uuid)\n        raise Http404", "docstring": "Given an EnterpriseCustomer UUID, return the corresponding EnterpriseCustomer or raise a 404.\n\nArguments:\nenterprise_uuid (str): The UUID (in string form) of the EnterpriseCustomer to fetch.\n\nReturns:\n(EnterpriseCustomer): The EnterpriseCustomer given the UUID.", "source": "juraj-google-style"}
{"code": "def in_coord_list(coord_list, coord, atol=1e-8):\n    \n    return len(find_in_coord_list(coord_list, coord, atol=atol)) > 0", "docstring": "Tests if a particular coord is within a coord_list.\n\nArgs:\ncoord_list: List of coords to test\ncoord: Specific coordinates\natol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and\narray.\n\nReturns:\nTrue if coord is in the coord list.", "source": "juraj-google-style"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. PLBart does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def target_call_func(self, answer: Union[str, List[str]], add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n    is_batched = isinstance(answer, (list, tuple))\n    if is_batched:\n        return self.target_batch_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)\n    else:\n        return self.target_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)", "docstring": "The method tokenizes and prepares the answer label for the model.\n\nArgs:\nanswer (`str` or `List[str]`):\nCorresponding answer supervision to the queries for training the model.", "source": "github-repos"}
{"code": "def allconcat(self, x, mesh_axis, concat_axis):\n    \n    return self._collective_with_groups(\n        x, [mesh_axis],\n        functools.partial(allconcat_ring, concat_axis=concat_axis))", "docstring": "Grouped allconcat (like MPI allgather followed by concat).\n\nArgs:\nx: a LaidOutTensor\nmesh_axis: an integer - the mesh axis along which to group\nconcat_axis: an integer (the Tensor axis along which to concatenate)\nReturns:\na LaidOutTensor", "source": "juraj-google-style"}
{"code": "def _validate_signature_def_map(self, signature_def_map):\n    for signature_def_key in signature_def_map:\n        signature_def = signature_def_map[signature_def_key]\n        inputs = signature_def.inputs\n        outputs = signature_def.outputs\n        for inputs_key in inputs:\n            self._validate_tensor_info(inputs[inputs_key])\n        for outputs_key in outputs:\n            self._validate_tensor_info(outputs[outputs_key])\n    if constants.INIT_OP_SIGNATURE_KEY in signature_def_map:\n        raise KeyError(f'SignatureDef map key \"{constants.INIT_OP_SIGNATURE_KEY}\" is reserved for initialization. Please use a different key.')\n    if constants.TRAIN_OP_SIGNATURE_KEY in signature_def_map:\n        raise KeyError(f'SignatureDef map key \"{constants.TRAIN_OP_SIGNATURE_KEY}\" is reserved for the train op. Please use a different key.')", "docstring": "Validates the `SignatureDef` entries in the signature def map.\n\nValidation of entries in the signature def map includes ensuring that the\n`name` and `dtype` fields of the TensorInfo protos of the `inputs` and\n`outputs` of each `SignatureDef` are populated. Also ensures that reserved\nSignatureDef keys for the initialization and train ops are not used.\n\nArgs:\nsignature_def_map: The map of signature defs to be validated.\n\nRaises:\nAssertionError: If a TensorInfo is not valid.\nKeyError: If a reserved signature key is used in the map.", "source": "github-repos"}
{"code": "def decode(tokens):\n  \n  token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]\n  ret = []\n  for i, token in enumerate(tokens):\n    if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:\n      ret.append(u\" \")\n    ret.append(token)\n  return \"\".join(ret)", "docstring": "Decode a list of tokens to a unicode string.\n\nArgs:\ntokens: a list of Unicode strings\nReturns:\na unicode string", "source": "juraj-google-style"}
{"code": "def _is_variable(node_def: node_def_pb2.NodeDef) -> bool:\n    return node_def.op == 'VarHandleOp'", "docstring": "Determines whether `node_def` is a variable node.\n\nArgs:\nnode_def: `NodeDef` to test whether it is a variable or not.\n\nReturns:\nReturns True if it is a variable.", "source": "github-repos"}
{"code": "def clear(self, back_r: int=0, back_g: int=0, back_b: int=0, fore_r: int=0, fore_g: int=0, fore_b: int=0, char: str=' ') -> None:\n    n = (self.width * self.height)\n    self.back_r = ([back_r] * n)\n    self.back_g = ([back_g] * n)\n    self.back_b = ([back_b] * n)\n    self.fore_r = ([fore_r] * n)\n    self.fore_g = ([fore_g] * n)\n    self.fore_b = ([fore_b] * n)\n    self.char = ([ord(char)] * n)", "docstring": "Clears the console.  Values to fill it with are optional, defaults\nto black with no characters.\n\nArgs:\nback_r (int): Red background color, from 0 to 255.\nback_g (int): Green background color, from 0 to 255.\nback_b (int): Blue background color, from 0 to 255.\nfore_r (int): Red foreground color, from 0 to 255.\nfore_g (int): Green foreground color, from 0 to 255.\nfore_b (int): Blue foreground color, from 0 to 255.\nchar (AnyStr): A single character str or bytes object.", "source": "codesearchnet"}
{"code": "def normalize(self, text, normalizations=None):\n    for (normalization, kwargs) in self._parse_normalizations((normalizations or self._config.normalizations)):\n        try:\n            text = getattr(self, normalization)(text, **kwargs)\n        except AttributeError as e:\n            self._logger.debug('Invalid normalization: %s', e)\n    return text", "docstring": "Normalize a given text applying all normalizations.\n\nNormalizations to apply can be specified through a list of\nparameters and will be executed in that order.\n\nArgs:\ntext: The text to be processed.\nnormalizations: List of normalizations to apply.\n\nReturns:\nThe text normalized.", "source": "codesearchnet"}
{"code": "def helper_add(access_token, ck_id, path, body):\n    full_path = ''.join([path, \"('\", ck_id, \"')\"])\n    full_path_encoded = urllib.parse.quote(full_path, safe='')\n    endpoint = ''.join([ams_rest_endpoint, full_path_encoded])\n    return do_ams_put(endpoint, full_path_encoded, body, access_token, 'json_only', '1.0;NetFx')", "docstring": "Helper Function to add strings to a URL path.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nck_id (str): A CK ID.\npath (str): A URL Path.\nbody (str): A Body.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def get_variation_for_feature(self, feature, user_id, attributes=None):\n    \n\n    experiment = None\n    variation = None\n    bucketing_id = self._get_bucketing_id(user_id, attributes)\n\n    \n    if feature.groupId:\n      group = self.config.get_group(feature.groupId)\n      if group:\n        experiment = self.get_experiment_in_group(group, bucketing_id)\n        if experiment and experiment.id in feature.experimentIds:\n          variation = self.get_variation(experiment, user_id, attributes)\n\n          if variation:\n            self.logger.debug('User \"%s\" is in variation %s of experiment %s.' % (\n              user_id,\n              variation.key,\n              experiment.key\n            ))\n            return Decision(experiment, variation, enums.DecisionSources.FEATURE_TEST)\n      else:\n        self.logger.error(enums.Errors.INVALID_GROUP_ID_ERROR.format('_get_variation_for_feature'))\n\n    \n    elif feature.experimentIds:\n      \n      experiment = self.config.get_experiment_from_id(feature.experimentIds[0])\n      if experiment:\n        variation = self.get_variation(experiment, user_id, attributes)\n\n        if variation:\n          self.logger.debug('User \"%s\" is in variation %s of experiment %s.' % (\n            user_id,\n            variation.key,\n            experiment.key\n          ))\n          return Decision(experiment, variation, enums.DecisionSources.FEATURE_TEST)\n\n    \n    if feature.rolloutId:\n      rollout = self.config.get_rollout_from_id(feature.rolloutId)\n      return self.get_variation_for_rollout(rollout, user_id, attributes)\n    else:\n      return Decision(None, None, enums.DecisionSources.ROLLOUT)", "docstring": "Returns the experiment/variation the user is bucketed in for the given feature.\n\nArgs:\nfeature: Feature for which we are determining if it is enabled or not for the given user.\nuser_id: ID for user.\nattributes: Dict representing user attributes.\n\nReturns:\nDecision namedtuple consisting of experiment and variation for the user.", "source": "juraj-google-style"}
{"code": "def remove_send_message(self, connection):\n        \n        if connection in self._send_message:\n            del self._send_message[connection]\n            LOGGER.debug(\"Removed send_message function \"\n                         \"for connection %s\", connection)\n        else:\n            LOGGER.warning(\"Attempted to remove send_message \"\n                           \"function for connection %s, but no \"\n                           \"send_message function was registered\",\n                           connection)", "docstring": "Removes a send_message function previously registered\nwith the Dispatcher.\n\nArgs:\nconnection (str): A locally unique identifier provided\nby the receiver of messages.", "source": "juraj-google-style"}
{"code": "def __get_scope(cls,\n            expr: Union['Expression', Tuple]) -> Set[str]:\n        \n        scope = set()\n        for i, atom in enumerate(expr):\n            if isinstance(atom, Expression):\n                scope.update(cls.__get_scope(atom._expr))\n            elif type(atom) in [tuple, list]:\n                scope.update(cls.__get_scope(atom))\n            elif atom == 'pvar_expr':\n                functor, params = expr[i+1]\n                arity = len(params) if params is not None else 0\n                name = '{}/{}'.format(functor, arity)\n                scope.add(name)\n                break\n        return scope", "docstring": "Returns the set of fluents in the expression's scope.\n\nArgs:\nexpr: Expression object or nested tuple of Expressions.\n\nReturns:\nThe set of fluents in the expression's scope.", "source": "juraj-google-style"}
{"code": "def _is_valid_netmask(self, prefixlen):\n        \n        try:\n            prefixlen = int(prefixlen)\n        except ValueError:\n            return False\n        return 0 <= prefixlen <= self._max_prefixlen", "docstring": "Verify that the netmask/prefixlen is valid.\n\nArgs:\nprefixlen: A string, the netmask in prefix length format.\n\nReturns:\nA boolean, True if the prefix represents a valid IPv6\nnetmask.", "source": "juraj-google-style"}
{"code": "def do_ams_auth(endpoint, body):\n    \n    headers = {\"content-type\": \"application/x-www-form-urlencoded\",\n               \"Accept\": json_acceptformat}\n    return requests.post(endpoint, data=body, headers=headers)", "docstring": "Acquire Media Services Authentication Token.\nArgs:\nendpoint (str): Azure Media Services Initial Endpoint.\nbody (str): A Content Body.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def insert_into_range(self, operations: ops.OP_TREE, start: int, end: int) -> int:\n    if (not (0 <= start <= end <= len(self))):\n        raise IndexError('Bad insert indices: [{}, {})'.format(start, end))\n    operations = list(ops.flatten_op_tree(operations))\n    for op in operations:\n        self._device.validate_operation(op)\n    i = start\n    op_index = 0\n    while (op_index < len(operations)):\n        op = operations[op_index]\n        while ((i < end) and (not self._device.can_add_operation_into_moment(op, self._moments[i]))):\n            i += 1\n        if (i >= end):\n            break\n        self._moments[i] = self._moments[i].with_operation(op)\n        op_index += 1\n    if (op_index >= len(operations)):\n        return end\n    return self.insert(end, operations[op_index:])", "docstring": "Writes operations inline into an area of the circuit.\n\nArgs:\nstart: The start of the range (inclusive) to write the\ngiven operations into.\nend: The end of the range (exclusive) to write the given\noperations into. If there are still operations remaining,\nnew moments are created to fit them.\noperations: An operation or tree of operations to insert.\n\nReturns:\nAn insertion index that will place operations after the operations\nthat were inserted by this method.\n\nRaises:\nIndexError: Bad inline_start and/or inline_end.", "source": "codesearchnet"}
{"code": "def plug(update_kwargs=True, **plugs_map):\n    for a_plug in plugs_map.values():\n        if (not (isinstance(a_plug, PlugPlaceholder) or issubclass(a_plug, BasePlug))):\n            raise InvalidPlugError(('Plug %s is not a subclass of plugs.BasePlug nor a placeholder for one' % a_plug))\n\n    def result(func):\n        'Wrap the given function and return the wrapper.\\n\\n    Args:\\n      func: The function to wrap.\\n\\n    Returns:\\n      A PhaseDescriptor that, when called will invoke the wrapped function,\\n        passing plugs as keyword args.\\n\\n    Raises:\\n      DuplicatePlugError:  If a plug name is declared twice for the\\n          same function.\\n    '\n        phase = openhtf.core.phase_descriptor.PhaseDescriptor.wrap_or_copy(func)\n        duplicates = (frozenset((p.name for p in phase.plugs)) & frozenset(plugs_map))\n        if duplicates:\n            raise DuplicatePlugError(('Plugs %s required multiple times on phase %s' % (duplicates, func)))\n        phase.plugs.extend([PhasePlug(name, a_plug, update_kwargs=update_kwargs) for (name, a_plug) in six.iteritems(plugs_map)])\n        return phase\n    return result", "docstring": "Creates a decorator that passes in plugs when invoked.\n\nThis function returns a decorator for a function that will replace positional\narguments to that function with the plugs specified.  See the module\ndocstring for details and examples.\n\nNote this decorator does not work with class or bound methods, but does work\nwith @staticmethod.\n\nArgs:\nupdate_kwargs: If true, makes the decorated phase take this plug as a kwarg.\n**plugs_map: Dict mapping name to Plug type.\n\nReturns:\nA PhaseDescriptor that will pass plug instances in as kwargs when invoked.\n\nRaises:\nInvalidPlugError: If a type is provided that is not a subclass of BasePlug.", "source": "codesearchnet"}
{"code": "def create_and_fill_np_array(start_or_end_logits, dataset, max_len):\n    step = 0\n    logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float32)\n    for i, output_logit in enumerate(start_or_end_logits):\n        batch_size = output_logit.shape[0]\n        cols = output_logit.shape[1]\n        if step + batch_size < len(dataset):\n            logits_concat[step:step + batch_size, :cols] = output_logit\n        else:\n            logits_concat[step:, :cols] = output_logit[:len(dataset) - step]\n        step += batch_size\n    return logits_concat", "docstring": "Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor\n\nArgs:\nstart_or_end_logits(:obj:`tensor`):\nThis is the output predictions of the model. We can only enter either start or end logits.\neval_dataset: Evaluation dataset\nmax_len(:obj:`int`):\nThe maximum length of the output tensor. ( See the model.eval() part for more details )", "source": "github-repos"}
{"code": "def changes(self):\n    output = []\n    if (self.status() is self.UNMODIFIED):\n        output = [(self.formatter % (' ', self.key, self.old_value))]\n    elif (self.status() is self.ADDED):\n        output.append((self.formatter % ('+', self.key, self.new_value)))\n    elif (self.status() is self.REMOVED):\n        output.append((self.formatter % ('-', self.key, self.old_value)))\n    elif (self.status() is self.MODIFIED):\n        output.append((self.formatter % ('-', self.key, self.old_value)))\n        output.append((self.formatter % ('+', self.key, self.new_value)))\n    return output", "docstring": "Returns a list of changes to represent the diff between\nold and new value.\n\nReturns:\nlist: [string] representation of the change (if any)\nbetween old and new value", "source": "codesearchnet"}
{"code": "def run_instruction(self, op: opcodes.Opcode, state: frame_state.FrameState) -> frame_state.FrameState:\n    _opcode_counter.inc(op.name)\n    self.frame.current_opcode = op\n    self._importing = 'IMPORT' in op.__class__.__name__\n    if log.isEnabledFor(logging.INFO):\n        vm_utils.log_opcode(op, state, self.frame, len(self.frames))\n    if op.line in self._branch_tracker.matches.match_cases:\n        state = self._handle_match_case(state, op)\n    bytecode_fn = getattr(self, f'byte_{op.name}', None)\n    if bytecode_fn is None:\n        raise VirtualMachineError(f'Unknown opcode: {op.name}')\n    state = bytecode_fn(state, op)\n    if state.why in ('reraise', 'Never'):\n        state = state.set_why('exception')\n    implicit_return = op.name in ('RETURN_VALUE', 'RETURN_CONST') and op.line not in self._director.return_lines\n    if len(self.frames) <= 2:\n        for err in self._branch_tracker.check_ending(op, implicit_return):\n            self.ctx.errorlog.incomplete_match(self.frames, err.line, err.cases)\n    self.frame.current_opcode = None\n    return state", "docstring": "Run a single bytecode instruction.\n\nArgs:\nop: An opcode.\nstate: The state just before running this instruction.\n\nReturns:\nThe state right after this instruction that should roll over to the\nsubsequent instruction. If this opcode aborts this function (e.g. through\na 'raise'), then the state's \"why\" attribute is set to the abort reason.\nRaises:\nVirtualMachineError: if a fatal error occurs.", "source": "github-repos"}
{"code": "def find_dependency_wheels(tile):\n    return [os.path.join(x.folder, 'python', x.support_wheel) for x in _iter_dependencies(tile) if x.has_wheel]", "docstring": "Return a list of all python wheel objects created by dependencies of this tile\n\nArgs:\ntile (IOTile): Tile that we should scan for dependencies\n\nReturns:\nlist: A list of paths to dependency wheels", "source": "codesearchnet"}
{"code": "async def _async_wait_for_process(future_process: Any, out: Optional[Union[(TeeCapture, IO[str])]]=sys.stdout, err: Optional[Union[(TeeCapture, IO[str])]]=sys.stderr) -> CommandOutput:\n    process = (await future_process)\n    future_output = _async_forward(process.stdout, out)\n    future_err_output = _async_forward(process.stderr, err)\n    (output, err_output) = (await asyncio.gather(future_output, future_err_output))\n    (await process.wait())\n    return CommandOutput(output, err_output, process.returncode)", "docstring": "Awaits the creation and completion of an asynchronous process.\n\nArgs:\nfuture_process: The eventually created process.\nout: Where to write stuff emitted by the process' stdout.\nerr: Where to write stuff emitted by the process' stderr.\n\nReturns:\nA (captured output, captured error output, return code) triplet.", "source": "codesearchnet"}
{"code": "def _get_samples_shared_with(self, other, index=None):\n    if isinstance(other, (pd.DataFrame, Projection)):\n        df_other = (other.coords if isinstance(other, Projection) else other)\n        if (len(set(df_other.index)) != len(df_other.index)):\n            raise ValueError('other index has duplicates')\n        if (len(set(self.coords.index)) != len(self.coords.index)):\n            raise ValueError('This projection index has duplicates')\n        if index:\n            uniq_idx = set(index)\n            if (len(uniq_idx) != len(index)):\n                raise ValueError('index has has duplicates')\n            if (uniq_idx - set(df_other.index)):\n                raise ValueError('index has samples not in other')\n            if (uniq_idx - set(self.coords.index)):\n                raise ValueError('index has samples not in this projection')\n        else:\n            uniq_idx = (set(df_other.index) & set(self.coords.index))\n            if (not len(uniq_idx)):\n                raise ValueError('No samples shared between other and this projection')\n        idx = list(uniq_idx)\n        return (self.coords.loc[(idx, :)].values, df_other.loc[(idx, :)].values)\n    else:\n        other = np.array(other)\n        if (other.shape != self.coords.shape):\n            raise ValueError('array-like must have the same shape as self.coords')\n        return (self.coords.values, other)", "docstring": "Find samples shared with another dataset.\n\nArgs:\nother\n(:py:class:`pymds.Projection` or :py:class:`pandas.DataFrame`\nor `array-like`):\nThe other dataset. If `other` is an instance of\n:py:class:`pymds.Projection` or :py:class:`pandas.DataFrame`,\nthen `other` must have indexes in common with this projection.\nIf `array-like`, then other must have same dimensions as\n`self.coords`.\nindex (`list-like` or `None`): If `other` is an instance of\n:py:class:`pymds.Projection` or :py:class:`pandas.DataFrame`\nthen only return samples in index.\n\nReturns:\n`tuple`: containing:\n\n- this (`numpy.array`) Shape [`x`, `n`].\n- other (`numpy.array`) Shape [`x`, `n`].", "source": "codesearchnet"}
{"code": "def zip_fit_params(data):\n    (genes, cells) = data.shape\n    m = data.mean(1)\n    v = data.var(1)\n    M = ((v - m) / (((m ** 2) + v) - m))\n    M = np.array([min(1.0, max(0.0, x)) for x in M])\n    L = ((m + (v / m)) - 1.0)\n    L[np.isnan(L)] = 0.0\n    L = np.array([max(0.0, x) for x in L])\n    return (L, M)", "docstring": "Returns the ZIP parameters that best fit a given data set.\n\nArgs:\ndata (array): 2d array of genes x cells belonging to a given cluster\n\nReturns:\nL (array): 1d array of means\nM (array): 1d array of zero-inflation parameter", "source": "codesearchnet"}
{"code": "def probability_density(self, X):\n        \n        self.check_fit()\n        return norm.pdf(X, loc=self.mean, scale=self.std)", "docstring": "Compute probability density.\n\nArguments:\nX: `np.ndarray` of shape (n, 1).\n\nReturns:\nnp.ndarray", "source": "juraj-google-style"}
{"code": "def StartsWith(this, that):\n  \n  this_iter = iter(this)\n  that_iter = iter(that)\n\n  while True:\n    try:\n      this_value = next(that_iter)\n    except StopIteration:\n      return True\n\n    try:\n      that_value = next(this_iter)\n    except StopIteration:\n      return False\n\n    if this_value != that_value:\n      return False", "docstring": "Checks whether an items of one iterable are a prefix of another.\n\nArgs:\nthis: An iterable that needs to be checked.\nthat: An iterable of which items must match the prefix of `this`.\n\nReturns:\n`True` if `that` is a prefix of `this`, `False` otherwise.", "source": "juraj-google-style"}
{"code": "def _hertz_to_mel(frequencies_hertz, name=None):\n    with ops.name_scope(name, 'hertz_to_mel', [frequencies_hertz]):\n        frequencies_hertz = ops.convert_to_tensor(frequencies_hertz)\n        return _MEL_HIGH_FREQUENCY_Q * math_ops.log(1.0 + frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ)", "docstring": "Converts frequencies in `frequencies_hertz` in Hertz to the mel scale.\n\nArgs:\nfrequencies_hertz: A `Tensor` of frequencies in Hertz.\nname: An optional name for the operation.\n\nReturns:\nA `Tensor` of the same shape and type of `frequencies_hertz` containing\nfrequencies in the mel scale.", "source": "github-repos"}
{"code": "def ub_to_str(string):\n    \n    if not isinstance(string, str):\n        if six.PY2:\n            return str(string)\n        else:\n            return string.decode()\n    return string", "docstring": "converts py2 unicode / py3 bytestring into str\nArgs:\nstring (unicode, byte_string): string to be converted\n\nReturns:\n(str)", "source": "juraj-google-style"}
{"code": "def add_argument(self, parser, bootstrap=False):\n    tmp_default = self.default\n    exclusive_grp = parser.add_mutually_exclusive_group()\n    self.default = True\n    args = self._get_argparse_names(parser.prefix_chars)\n    kwargs = self._get_argparse_kwargs(bootstrap)\n    exclusive_grp.add_argument(*args, **kwargs)\n    self.default = False\n    args = self._get_argparse_names(parser.prefix_chars)\n    kwargs = self._get_argparse_kwargs(bootstrap)\n    exclusive_grp.add_argument(*args, **kwargs)\n    self.default = tmp_default", "docstring": "Add boolean item as an argument to the given parser.\n\nAn exclusive group is created on the parser, which will add\na boolean-style command line argument to the parser.\n\nExamples:\nA non-nested boolean value with the name 'debug' will result\nin a command-line argument like the following:\n\n'--debug/--no-debug'\n\nArgs:\nparser (argparse.ArgumentParser): The parser to add this item to.\nbootstrap (bool): Flag to indicate whether you only want to mark\nthis item as required or not.", "source": "codesearchnet"}
{"code": "def run_metadata(name, data, step=None):\n    summary_metadata = summary_pb2.SummaryMetadata()\n    summary_metadata.plugin_data.plugin_name = 'graph_run_metadata'\n    summary_metadata.plugin_data.content = b'1'\n    with summary_scope(name, 'graph_run_metadata_summary', [data, step]) as (tag, _):\n        with ops.device('cpu:0'):\n            tensor = constant_op.constant(data.SerializeToString(), dtype=dtypes.string)\n        return write(tag=tag, tensor=tensor, step=step, metadata=summary_metadata)", "docstring": "Writes entire RunMetadata summary.\n\nA RunMetadata can contain DeviceStats, partition graphs, and function graphs.\nPlease refer to the proto for definition of each field.\n\nArgs:\nname: A name for this summary. The summary tag used for TensorBoard will be\nthis name prefixed by any active name scopes.\ndata: A RunMetadata proto to write.\nstep: Explicit `int64`-castable monotonic step value for this summary. If\nomitted, this defaults to `tf.summary.experimental.get_step()`, which must\nnot be None.\n\nReturns:\nTrue on success, or false if no summary was written because no default\nsummary writer was available.\n\nRaises:\nValueError: if a default writer exists, but no step was provided and\n`tf.summary.experimental.get_step()` is None.", "source": "github-repos"}
{"code": "def publish(self, topic, dct):\n    get_logger().info('Publishing message {} on routing key {}...'.format(dct, topic))\n    self._channel.basic_publish(exchange=self.exchange, routing_key=topic, body=json.dumps(dct))", "docstring": "Send a dict with internal routing key to the exchange.\n\nArgs:\ntopic: topic to publish the message to\ndct: dict object to send", "source": "codesearchnet"}
{"code": "def from_dict(cls, d, fmt=None):\n        \n        if fmt == \"abivars\":\n            from pymatgen.io.abinit.abiobjects import structure_from_abivars\n            return structure_from_abivars(cls=cls, **d)\n\n        lattice = Lattice.from_dict(d[\"lattice\"])\n        sites = [PeriodicSite.from_dict(sd, lattice) for sd in d[\"sites\"]]\n        charge = d.get(\"charge\", None)\n        return cls.from_sites(sites, charge=charge)", "docstring": "Reconstitute a Structure object from a dict representation of Structure\ncreated using as_dict().\n\nArgs:\nd (dict): Dict representation of structure.\n\nReturns:\nStructure object", "source": "juraj-google-style"}
{"code": "def yaml_to_ordered_dict(stream, loader=yaml.SafeLoader):\n    \n    class OrderedUniqueLoader(loader):\n        \n\n        \n        NO_DUPE_SIBLINGS = [\"stacks\", \"class_path\"]\n        \n        NO_DUPE_CHILDREN = [\"stacks\"]\n\n        def _error_mapping_on_dupe(self, node, node_name):\n            \n            if isinstance(node, MappingNode):\n                mapping = {}\n                for n in node.value:\n                    a = n[0]\n                    b = mapping.get(a.value, None)\n                    if b:\n                        msg = \"{} mapping cannot have duplicate keys {} {}\"\n                        raise ConstructorError(\n                            msg.format(node_name, b.start_mark, a.start_mark)\n                        )\n                    mapping[a.value] = a\n\n        def _validate_mapping(self, node, deep=False):\n            if not isinstance(node, MappingNode):\n                raise ConstructorError(\n                    None, None,\n                    \"expected a mapping node, but found %s\" % node.id,\n                    node.start_mark)\n            mapping = OrderedDict()\n            for key_node, value_node in node.value:\n                key = self.construct_object(key_node, deep=deep)\n                try:\n                    hash(key)\n                except TypeError as exc:\n                    raise ConstructorError(\n                        \"while constructing a mapping\", node.start_mark,\n                        \"found unhashable key (%s)\" % exc, key_node.start_mark\n                    )\n                \n                if key in mapping and key in self.NO_DUPE_SIBLINGS:\n                    msg = \"{} key cannot have duplicate siblings {} {}\"\n                    raise ConstructorError(\n                        msg.format(key, node.start_mark, key_node.start_mark)\n                    )\n                if key in self.NO_DUPE_CHILDREN:\n                    \n                    self._error_mapping_on_dupe(value_node, key_node.value)\n                value = self.construct_object(value_node, deep=deep)\n                mapping[key] = value\n            return mapping\n\n        def construct_mapping(self, node, deep=False):\n            \n            if isinstance(node, MappingNode):\n                self.flatten_mapping(node)\n            return self._validate_mapping(node, deep=deep)\n\n        def construct_yaml_map(self, node):\n            data = OrderedDict()\n            yield data\n            value = self.construct_mapping(node)\n            data.update(value)\n\n    OrderedUniqueLoader.add_constructor(\n        u'tag:yaml.org,2002:map', OrderedUniqueLoader.construct_yaml_map,\n    )\n    return yaml.load(stream, OrderedUniqueLoader)", "docstring": "Provides yaml.load alternative with preserved dictionary order.\n\nArgs:\nstream (string): YAML string to load.\nloader (:class:`yaml.loader`): PyYAML loader class. Defaults to safe\nload.\n\nReturns:\nOrderedDict: Parsed YAML.", "source": "juraj-google-style"}
{"code": "def user_exists(self, username):\n        \n\n        response = self._get(self.rest_url + \"/user\",\n                             params={\"username\": username})\n\n        if not response.ok:\n            return None\n\n        return True", "docstring": "Determines if the user exists.\n\nArgs:\nusername: The user name.\n\n\nReturns:\nbool:\nTrue if the user exists in the Crowd application.", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    \n    if file_object.read(1) != b'{':\n      raise errors.UnableToParseFile((\n          '[{0:s}] {1:s} is not a valid Preference file, '\n          'missing opening brace.').format(\n              self.NAME, parser_mediator.GetDisplayName()))\n\n    file_object.seek(0, os.SEEK_SET)\n    file_content = file_object.read()\n    file_content = codecs.decode(file_content, self._ENCODING)\n\n    \n    try:\n      json_dict = json.loads(file_content)\n    except ValueError as exception:\n      raise errors.UnableToParseFile((\n          '[{0:s}] Unable to parse file {1:s} as JSON: {2!s}').format(\n              self.NAME, parser_mediator.GetDisplayName(), exception))\n    except IOError as exception:\n      raise errors.UnableToParseFile((\n          '[{0:s}] Unable to open file {1:s} for parsing as'\n          'JSON: {2!s}').format(\n              self.NAME, parser_mediator.GetDisplayName(), exception))\n\n    \n    if not set(self.REQUIRED_KEYS).issubset(set(json_dict.keys())):\n      raise errors.UnableToParseFile('File does not contain Preference data.')\n\n    extensions_setting_dict = json_dict.get('extensions')\n    if not extensions_setting_dict:\n      raise errors.UnableToParseFile(\n          '[{0:s}] {1:s} is not a valid Preference file, '\n          'does not contain extensions value.'.format(\n              self.NAME, parser_mediator.GetDisplayName()))\n\n    extensions_dict = extensions_setting_dict.get('settings')\n    if not extensions_dict:\n      raise errors.UnableToParseFile(\n          '[{0:s}] {1:s} is not a valid Preference file, '\n          'does not contain extensions settings value.'.format(\n              self.NAME, parser_mediator.GetDisplayName()))\n\n    extensions_autoupdate_dict = extensions_setting_dict.get('autoupdate')\n    if extensions_autoupdate_dict:\n      autoupdate_lastcheck_timestamp = extensions_autoupdate_dict.get(\n          'last_check', None)\n\n      if autoupdate_lastcheck_timestamp:\n        autoupdate_lastcheck = int(autoupdate_lastcheck_timestamp, 10)\n\n        event_data = ChromeExtensionsAutoupdaterEventData()\n        event_data.message = 'Chrome extensions autoupdater last run'\n\n        date_time = dfdatetime_webkit_time.WebKitTime(\n            timestamp=autoupdate_lastcheck)\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_ADDED)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      autoupdate_nextcheck_timestamp = extensions_autoupdate_dict.get(\n          'next_check', None)\n      if autoupdate_nextcheck_timestamp:\n        autoupdate_nextcheck = int(autoupdate_nextcheck_timestamp, 10)\n\n        event_data = ChromeExtensionsAutoupdaterEventData()\n        event_data.message = 'Chrome extensions autoupdater next run'\n\n        date_time = dfdatetime_webkit_time.WebKitTime(\n            timestamp=autoupdate_nextcheck)\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_ADDED)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    browser_dict = json_dict.get('browser', None)\n    if browser_dict and 'last_clear_browsing_data_time' in browser_dict:\n      last_clear_history_timestamp = browser_dict.get(\n          'last_clear_browsing_data_time', None)\n\n      if last_clear_history_timestamp:\n        last_clear_history = int(last_clear_history_timestamp, 10)\n\n        event_data = ChromeExtensionsAutoupdaterEventData()\n        event_data.message = 'Chrome history was cleared by user'\n\n        date_time = dfdatetime_webkit_time.WebKitTime(\n            timestamp=last_clear_history)\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_DELETED)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    self._ExtractExtensionInstallEvents(extensions_dict, parser_mediator)\n\n    profile_dict = json_dict.get('profile', None)\n    if profile_dict:\n      content_settings_dict = profile_dict.get('content_settings', None)\n      if content_settings_dict:\n        exceptions_dict = content_settings_dict.get('exceptions', None)\n        if exceptions_dict:\n          self._ExtractContentSettingsExceptions(\n              exceptions_dict, parser_mediator)", "docstring": "Parses a Chrome preferences file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def __init__(self, scope, parent, explicit=True):\n        \n        CodeStatement.__init__(self, scope, parent)\n        self.body = []\n        self.explicit = explicit", "docstring": "Constructor for code blocks.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.\n\nKwargs:\nexplicit (bool): Whether the block is explicit in the code.", "source": "juraj-google-style"}
{"code": "def clone(self, name=None):\n    \n    if name is None:\n      name = self.module_name + \"_clone\"\n\n    return type(self)(output_channels=self.output_channels,\n                      kernel_shape=self._kernel_shape,\n                      stride=self._stride,\n                      rate=self._rate,\n                      padding=self._padding,\n                      use_bias=self._use_bias,\n                      initializers=self._initializers,\n                      partitioners=self._partitioners,\n                      regularizers=self._regularizers,\n                      mask=self._mask,\n                      data_format=self._data_format,\n                      custom_getter=self._custom_getter,\n                      name=name)", "docstring": "Returns a cloned `_ConvND` module.\n\nArgs:\nname: Optional string assigning name of cloned module. The default name\nis constructed by appending \"_clone\" to `self.module_name`.\n\nReturns:\nA copy of the current class.", "source": "juraj-google-style"}
{"code": "def tokenize(self, s, pattern=None, active=None):\n        \n        if pattern is None:\n            if self.tokenize_pattern is None:\n                pattern = r'[ \\t]+'\n            else:\n                pattern = self.tokenize_pattern\n        if active is None:\n            active = self.active\n        return self.group.tokenize(s, pattern=pattern, active=active)", "docstring": "Rewrite and tokenize the input string *s*.\n\nArgs:\ns (str): the input string to process\npattern (str, optional): the regular expression pattern on\nwhich to split tokens; defaults to `[ \\t]+`\nactive (optional): a collection of external module names\nthat may be applied if called\nReturns:\na :class:`~delphin.tokens.YyTokenLattice` containing the\ntokens and their characterization information", "source": "juraj-google-style"}
{"code": "def _process_parameters_section(func_documentation, sig, func, class_name, model_name_lowercase, parent_class, indent_level):\n    docstring = set_min_indent('Args:\\n', indent_level + 4)\n    undocumented_parameters = []\n    documented_params = {}\n    documented_kwargs = {}\n    if func_documentation is not None:\n        documented_params, func_documentation = parse_docstring(func_documentation)\n        if model_name_lowercase is not None:\n            documented_params = format_args_docstring(documented_params, model_name_lowercase)\n    param_docstring, missing_args = _process_regular_parameters(sig, func, class_name, documented_params, indent_level, undocumented_parameters)\n    docstring += param_docstring\n    kwargs_docstring = _process_kwargs_parameters(sig, func, parent_class, model_name_lowercase, documented_kwargs, indent_level, undocumented_parameters)\n    docstring += kwargs_docstring\n    if len(undocumented_parameters) > 0:\n        print('\\n'.join(undocumented_parameters))\n    return docstring", "docstring": "Process the parameters section of the docstring.\n\nArgs:\nfunc_documentation (`str`): Existing function documentation (manually specified in the docstring)\nsig (`inspect.Signature`): Function signature\nfunc (`function`): Function the parameters belong to\nclass_name (`str`): Name of the class the function belongs to\nmodel_name_lowercase (`str`): Lowercase model name\nparent_class (`class`): Parent class of the function (if any)\nindent_level (`int`): Indentation level", "source": "github-repos"}
{"code": "def retrieve_token(self, token):\n        \n        headers = self.client._get_private_headers()\n        endpoint = '/tokens/{}'.format(token)\n        return self.client._get(self.client.URL_BASE + endpoint, headers=headers)", "docstring": "Retrieve Token details for a specific Token.\n\nArgs:\ntoken: The identifier of the token.\n\n\nReturns:", "source": "juraj-google-style"}
{"code": "def get_link_flags():\n    is_mac = _platform.system() == 'Darwin'\n    ver = _VERSION.split('.')[0]\n    flags = []\n    if not _MONOLITHIC_BUILD:\n        flags.append('-L%s' % get_lib())\n        if is_mac:\n            flags.append('-ltensorflow_framework.%s' % ver)\n        else:\n            flags.append('-l:libtensorflow_framework.so.%s' % ver)\n    return flags", "docstring": "Returns the linker flags for linking with TensorFlow.\n\nThe returned list of arguments can be passed to the linker for linking against\nTensorFlow. The result is platform dependent.\n\nFor example, on a typical Linux system with Python 3.7 the following command\nprints `['-L/usr/local/lib/python3.7/dist-packages/tensorflow',\n'-l:libtensorflow_framework.so.2']`\n\n>>> print(tf.sysconfig.get_link_flags())\n\nReturns:\nA list of strings for the linker flags.", "source": "github-repos"}
{"code": "def __parse_cmd_args(args, sudo, shell):\n    if (isinstance(args, tuple) and (len(args) == 1) and isinstance(args[0], tuple)):\n        args = args[0]\n    if shell:\n        if isinstance(args, six.string_types):\n            pass\n        elif (isinstance(args, (list, tuple)) and (len(args) > 1)):\n            args = ' '.join(args)\n        elif (isinstance(args, (list, tuple)) and (len(args) == 1)):\n            if isinstance(args[0], (tuple, list)):\n                args = ' '.join(args)\n            elif isinstance(args[0], six.string_types):\n                args = args[0]\n    elif isinstance(args, six.string_types):\n        args = shlex.split(args, posix=(not WIN32))\n    elif isinstance(args, (list, tuple)):\n        if (len(args) > 1):\n            args = tuple(args)\n        elif (len(args) == 1):\n            if isinstance(args[0], (tuple, list)):\n                args = tuple(args[0])\n            elif isinstance(args[0], six.string_types):\n                args = shlex.split(args[0], posix=(not WIN32))\n    if (sudo is True):\n        if (not WIN32):\n            if shell:\n                args = ('sudo ' + args)\n            else:\n                args = (tuple(['sudo']) + tuple(args))\n        else:\n            pass\n    if WIN32:\n        if ((len(args) == 1) and isinstance(args[0], six.string_types)):\n            args = shlex.split(args[0], posix=(not WIN32))\n    return args", "docstring": "When shell is True, Popen will only accept strings. No tuples\nShell really should not be true.\n\nReturns:\nargs suitable for subprocess.Popen\n\nI'm not quite sure what those are yet. Plain old string seem to work\nwell? But I remember needing shlex at some point.\n\nCommandLine:\npython -m utool.util_cplat --test-__parse_cmd_args\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_cplat import *  # NOQA\n>>> # build test data\n>>> args = 'echo \"hello world\"'\n>>> sudo = False\n>>> shell = False\n>>> # execute function\n>>> args = __parse_cmd_args(args, sudo, shell)\n>>> # verify results\n>>> result = str(args)\n>>> print(result)", "source": "codesearchnet"}
{"code": "def is_valid_assignment(self, mtf_dimension_name, mesh_dimension_name):\n    return ((mtf_dimension_name in self._splittable_mtf_dimension_names) and ((self._mtf_dimension_name_to_size_gcd[mtf_dimension_name] % self._mesh_dimension_name_to_size[mesh_dimension_name]) == 0))", "docstring": "Whether this MTF dimension may be assigned to this mesh dimension.\n\nArgs:\nmtf_dimension_name: string, the name of a Mesh TensorFlow dimension.\nmesh_dimension_name: string, the name of a mesh dimension.\n\nReturns:\nA boolean indicating whether the assignment is valid.", "source": "codesearchnet"}
{"code": "def parse(file_contents, file_name):\n    \n\n    env = Environment()\n    result = \"\"\n    try:\n        env.parse(file_contents)\n    except Exception:\n        _, exc_value, _ = sys.exc_info()\n        result += \"ERROR: Jinja2 Template File: {0}\".format(file_name)\n        result += repr(exc_value) + '\\n'\n\n    return result", "docstring": "Takes a list of files which are assumed to be jinja2 templates and tries to\nparse the contents of the files\n\nArgs:\nfile_contents (str): File contents of a jinja file\n\nRaises:\nException: An exception is raised if the contents of the file cannot be\nparsed.", "source": "juraj-google-style"}
{"code": "def with_values(self, new_values):\n    new_values = _convert_to_ragged_tensor_values(new_values)\n    new_values.shape.with_rank_at_least(1)\n    self.values.shape[:1].assert_is_compatible_with(new_values.shape[:1])\n    if isinstance(new_values, RaggedTensor) and self._row_partition.dtype != new_values.row_splits.dtype:\n        if not ragged_config.auto_cast_partition_dtype():\n            raise ValueError('self and new_values have mismatched row_splits dtypes; use RaggedTensor.with_row_splits_dtype() to convert them to compatible dtypes.')\n        new_values = new_values.with_row_splits_dtype(dtypes.int64)\n        return self.with_row_splits_dtype(dtypes.int64).with_values(new_values)\n    return RaggedTensor(values=new_values, row_partition=self._row_partition, internal=True)", "docstring": "Returns a copy of `self` with `values` replaced by `new_value`.\n\nPreserves cached row-partitioning tensors such as `self.cached_nrows` and\n`self.cached_value_rowids` if they have values.\n\nArgs:\nnew_values: Potentially ragged tensor to use as the `values` for the\nreturned `RaggedTensor`.  Must have `rank > 0`, and must have the same\nnumber of rows as `self.values`.\n\nReturns:\nA `RaggedTensor`.  `result.rank = 1 + new_values.rank`.\n`result.ragged_rank = 1 + new_values.ragged_rank`", "source": "github-repos"}
{"code": "def MakePmfFromItems(t, name=''):\n    \n    pmf = Pmf(dict(t), name)\n    pmf.Normalize()\n    return pmf", "docstring": "Makes a PMF from a sequence of value-probability pairs\n\nArgs:\nt: sequence of value-probability pairs\nname: string name for this PMF\n\nReturns:\nPmf object", "source": "juraj-google-style"}
{"code": "def locked_put(self, credentials):\n        \n        keyring.set_password(self._service_name, self._user_name,\n                             credentials.to_json())", "docstring": "Write Credentials to file.\n\nArgs:\ncredentials: Credentials, the credentials to store.", "source": "juraj-google-style"}
{"code": "def write(gctoo, out_fname, data_null='NaN', metadata_null='-666', filler_null='-666', data_float_format='%.4f'):\n    if (not out_fname.endswith('.gct')):\n        out_fname += '.gct'\n    f = open(out_fname, 'w')\n    dims = [str(gctoo.data_df.shape[0]), str(gctoo.data_df.shape[1]), str(gctoo.row_metadata_df.shape[1]), str(gctoo.col_metadata_df.shape[1])]\n    write_version_and_dims(VERSION, dims, f)\n    write_top_half(f, gctoo.row_metadata_df, gctoo.col_metadata_df, metadata_null, filler_null)\n    write_bottom_half(f, gctoo.row_metadata_df, gctoo.data_df, data_null, data_float_format, metadata_null)\n    f.close()\n    logger.info('GCT has been written to {}'.format(out_fname))", "docstring": "Write a gctoo object to a gct file.\n\nArgs:\ngctoo (gctoo object)\nout_fname (string): filename for output gct file\ndata_null (string): how to represent missing values in the data (default = \"NaN\")\nmetadata_null (string): how to represent missing values in the metadata (default = \"-666\")\nfiller_null (string): what value to fill the top-left filler block with (default = \"-666\")\ndata_float_format (string): how many decimal points to keep in representing data\n(default = 4 digits; None will keep all digits)\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def get_app_hostname():\n    if ((not is_running_on_app_engine()) or is_running_on_localhost()):\n        return None\n    app_id = app_identity.get_application_id()\n    prefix = get_hostname_prefix()\n    suffix = 'appspot.com'\n    if (':' in app_id):\n        tokens = app_id.split(':')\n        api_name = tokens[1]\n        if (tokens[0] == 'google.com'):\n            suffix = 'googleplex.com'\n    else:\n        api_name = app_id\n    return '{0}{1}.{2}'.format(prefix, api_name, suffix)", "docstring": "Return hostname of a running Endpoints service.\n\nReturns hostname of an running Endpoints API. It can be 1) \"localhost:PORT\"\nif running on development server, or 2) \"app_id.appspot.com\" if running on\nexternal app engine prod, or \"app_id.googleplex.com\" if running as Google\nfirst-party Endpoints API, or 4) None if not running on App Engine\n(e.g. Tornado Endpoints API).\n\nReturns:\nA string representing the hostname of the service.", "source": "codesearchnet"}
{"code": "def info(name):\n    try:\n        handle_scm = win32service.OpenSCManager(None, None, win32service.SC_MANAGER_CONNECT)\n    except pywintypes.error as exc:\n        raise CommandExecutionError('Failed to connect to the SCM: {0}'.format(exc.strerror))\n    try:\n        handle_svc = win32service.OpenService(handle_scm, name, (((win32service.SERVICE_ENUMERATE_DEPENDENTS | win32service.SERVICE_INTERROGATE) | win32service.SERVICE_QUERY_CONFIG) | win32service.SERVICE_QUERY_STATUS))\n    except pywintypes.error as exc:\n        raise CommandExecutionError('Failed To Open {0}: {1}'.format(name, exc.strerror))\n    try:\n        config_info = win32service.QueryServiceConfig(handle_svc)\n        status_info = win32service.QueryServiceStatusEx(handle_svc)\n        try:\n            description = win32service.QueryServiceConfig2(handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION)\n        except pywintypes.error:\n            description = 'Failed to get description'\n        delayed_start = win32service.QueryServiceConfig2(handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO)\n    finally:\n        win32service.CloseServiceHandle(handle_scm)\n        win32service.CloseServiceHandle(handle_svc)\n    ret = dict()\n    try:\n        sid = win32security.LookupAccountName('', 'NT Service\\\\{0}'.format(name))[0]\n        ret['sid'] = win32security.ConvertSidToStringSid(sid)\n    except pywintypes.error:\n        ret['sid'] = 'Failed to get SID'\n    ret['BinaryPath'] = config_info[3]\n    ret['LoadOrderGroup'] = config_info[4]\n    ret['TagID'] = config_info[5]\n    ret['Dependencies'] = config_info[6]\n    ret['ServiceAccount'] = config_info[7]\n    ret['DisplayName'] = config_info[8]\n    ret['Description'] = description\n    ret['Status_ServiceCode'] = status_info['ServiceSpecificExitCode']\n    ret['Status_CheckPoint'] = status_info['CheckPoint']\n    ret['Status_WaitHint'] = status_info['WaitHint']\n    ret['StartTypeDelayed'] = delayed_start\n    flags = list()\n    for bit in SERVICE_TYPE:\n        if isinstance(bit, int):\n            if (config_info[0] & bit):\n                flags.append(SERVICE_TYPE[bit])\n    ret['ServiceType'] = (flags if flags else config_info[0])\n    flags = list()\n    for bit in SERVICE_CONTROLS:\n        if (status_info['ControlsAccepted'] & bit):\n            flags.append(SERVICE_CONTROLS[bit])\n    ret['ControlsAccepted'] = (flags if flags else status_info['ControlsAccepted'])\n    try:\n        ret['Status_ExitCode'] = SERVICE_ERRORS[status_info['Win32ExitCode']]\n    except KeyError:\n        ret['Status_ExitCode'] = status_info['Win32ExitCode']\n    try:\n        ret['StartType'] = SERVICE_START_TYPE[config_info[1]]\n    except KeyError:\n        ret['StartType'] = config_info[1]\n    try:\n        ret['ErrorControl'] = SERVICE_ERROR_CONTROL[config_info[2]]\n    except KeyError:\n        ret['ErrorControl'] = config_info[2]\n    try:\n        ret['Status'] = SERVICE_STATE[status_info['CurrentState']]\n    except KeyError:\n        ret['Status'] = status_info['CurrentState']\n    return ret", "docstring": "Get information about a service on the system\n\nArgs:\nname (str): The name of the service. This is not the display name. Use\n``get_service_name`` to find the service name.\n\nReturns:\ndict: A dictionary containing information about the service.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' service.info spooler", "source": "codesearchnet"}
{"code": "def stop_instance(self):\n    stop_url = self._get_url('stop_path')\n    res = self.rest_client.session.put(stop_url, json={})\n    _handle_http_errors(res)\n    return res.json()", "docstring": "Stop the instance for this Streaming Analytics service.\n\nReturns:\ndict: JSON response for the instance stop operation.", "source": "codesearchnet"}
{"code": "def sync_executors(self):\n    if self._context_handle:\n        pywrap_tfe.TFE_ContextSyncExecutors(self._context_handle)\n    else:\n        raise ValueError('Context is not initialized.')", "docstring": "Sync both local executors and the ones on remote workers.\n\nIn async execution mode, local function calls can return before the\ncorresponding remote op/function execution requests are completed. Calling\nthis method creates a synchronization barrier for remote executors. It only\nreturns when all remote pending nodes are finished, potentially with errors\nif any remote executors are in error state.\n\nRaises:\nValueError: if context is not initialized.", "source": "github-repos"}
{"code": "def back_propagation(self, delta_arr):\n        \n        re_encoder_delta_arr, delta_hidden_arr, re_encoder_grads_list = self.__retrospective_encoder.hidden_back_propagate(\n            delta_arr[:, -1]\n        )\n        re_encoder_grads_list.insert(0, None)\n        re_encoder_grads_list.insert(0, None)\n\n        observed_arr, encoded_arr, decoded_arr, re_encoded_arr = self.__inferenced_tuple\n        delta_arr = self.__encoder_decoder_controller.computable_loss.compute_delta(\n            decoded_arr, \n            observed_arr\n        )\n        delta_arr[:, -1] += re_encoder_delta_arr[:, -1]\n\n        decoder_grads_list, encoder_delta_arr, encoder_grads_list = self.__encoder_decoder_controller.back_propagation(\n            delta_arr\n        )\n        return re_encoder_grads_list, decoder_grads_list, encoder_delta_arr, encoder_grads_list", "docstring": "Back propagation.\n\nArgs:\ndelta_output_arr:    Delta.\n\nReturns:\nTuple data.\n- decoder's `list` of gradations,\n- encoder's `np.ndarray` of Delta,\n- encoder's `list` of gradations.", "source": "juraj-google-style"}
{"code": "def recombine(self, parents: List[pg.DNA], global_state: pg.geno.AttributeDict, step: int) -> List[pg.DNA]:", "docstring": "Generate a list of child DNA based on the list of parents given.\n\nUser should override this method with optional keyword arguments\n'global_state' and 'step'.\n\nThe parents DNA contains a metadata field 'generation', which is the\ngeneration of the parent DNA. If the Recombinator does not assign this\nfield for the new child DNA, the child DNA will have the maximum generation\nfrom the parents plus 1.\n\nArgs:\nparents: Parent trials.\nglobal_state: An `AttributeDict` object as the global state container,\nwhich is readable/writable during the operation.\nstep: Number of examples historically proposed, which can be used for\ndetermining a cross over schedule.\n\nReturns:\nA list of generated child DNA.", "source": "github-repos"}
{"code": "def console_get_width(con: tcod.console.Console) -> int:\n    return int(lib.TCOD_console_get_width(_console(con)))", "docstring": "Return the width of a console.\n\nArgs:\ncon (Console): Any Console instance.\n\nReturns:\nint: The width of a Console.\n\n.. deprecated:: 2.0\nUse `Console.width` instead.", "source": "codesearchnet"}
{"code": "def get(account_id, account_type_id=None):\n        \n        if type(account_id) == str:\n            args = {'account_name': account_id}\n        else:\n            args = {'account_id': account_id}\n\n        if account_type_id:\n            args['account_type_id'] = account_type_id\n\n        return db.Account.find_one(**args)", "docstring": "Return account by ID and type\n\nArgs:\naccount_id (`int`, `str`): Unique Account identifier\naccount_type_id (str): Type of account to get\n\nReturns:\n:obj:`Account`: Returns an Account object if found, else None", "source": "juraj-google-style"}
{"code": "def __init__(self, scaffold=None, master='', config=None, checkpoint_dir=None, checkpoint_filename_with_path=None):\n    self._checkpoint_dir = checkpoint_dir\n    self._checkpoint_filename_with_path = checkpoint_filename_with_path\n    self._scaffold = scaffold or Scaffold()\n    self._session_manager = None\n    self._master = master\n    self._config = config", "docstring": "Initializes a chief session creator.\n\nArgs:\nscaffold: A `Scaffold` used for gathering or building supportive ops. If\nnot specified a default one is created. It's used to finalize the graph.\nmaster: `String` representation of the TensorFlow master to use.\nconfig: `ConfigProto` proto used to configure the session.\ncheckpoint_dir: A string.  Optional path to a directory where to restore\nvariables.\ncheckpoint_filename_with_path: Full file name path to the checkpoint file.", "source": "github-repos"}
{"code": "def can_user_access_build(param_name):\n    build_id = (request.args.get(param_name, type=int) or request.form.get(param_name, type=int) or request.json[param_name])\n    if (not build_id):\n        logging.debug('Build ID in param_name=%r was missing', param_name)\n        abort(400)\n    ops = operations.UserOps(current_user.get_id())\n    (build, user_is_owner) = ops.owns_build(build_id)\n    if (not build):\n        logging.debug('Could not find build_id=%r', build_id)\n        abort(404)\n    if (current_user.is_authenticated() and (not user_is_owner)):\n        ops.evict()\n        claim_invitations(current_user)\n        (build, user_is_owner) = ops.owns_build(build_id)\n    if (not user_is_owner):\n        if (current_user.is_authenticated() and current_user.superuser):\n            pass\n        elif (request.method != 'GET'):\n            logging.debug('No way to log in user via modifying request')\n            abort(403)\n        elif build.public:\n            pass\n        elif current_user.is_authenticated():\n            logging.debug('User does not have access to this build')\n            abort(flask.Response('You cannot access this build', 403))\n        else:\n            logging.debug('Redirecting user to login to get build access')\n            abort(login.unauthorized())\n    elif (not login_fresh()):\n        logging.debug('User login is old; forcing refresh')\n        abort(login.needs_refresh())\n    return build", "docstring": "Determines if the current user can access the build ID in the request.\n\nArgs:\nparam_name: Parameter name to use for getting the build ID from the\nrequest. Will fetch from GET or POST requests.\n\nReturns:\nThe build the user has access to.", "source": "codesearchnet"}
{"code": "def __init__(self, filenames, index=0, buffer_size=None, _account_id=None,\n               delimiter=None):\n    \n    self._filenames = filenames\n    self._index = index\n    self._buffer_size = buffer_size\n    self._account_id = _account_id\n    self._delimiter = delimiter\n    self._bucket = None\n    self._bucket_iter = None\n\n    \n    \n    \n    \n    \n    self._fail_on_missing_input = None", "docstring": "Initialize a GoogleCloudStorageInputReader instance.\n\nArgs:\nfilenames: A list of Google Cloud Storage filenames of the form\n'/bucket/objectname'.\nindex: Index of the next filename to read.\nbuffer_size: The size of the read buffer, None to use default.\n_account_id: Internal use only. See cloudstorage documentation.\ndelimiter: Delimiter used as path separator. See class doc for details.", "source": "juraj-google-style"}
{"code": "def _StopStyleSelectionMethod(self, doc):\n    if (not self.show_stop_hierarchy):\n        return (lambda stop: (None, None))\n    self._CreateStyle(doc, 'stop_entrance', {'IconStyle': {'color': 'ff0000ff'}})\n    self._CreateStyle(doc, 'entrance_connection', {'LineStyle': {'color': 'ff0000ff', 'width': '2'}})\n    self._CreateStyle(doc, 'stop_platform', {'IconStyle': {'color': 'ffff0000'}})\n    self._CreateStyle(doc, 'platform_connection', {'LineStyle': {'color': 'ffff0000', 'width': '2'}})\n    self._CreateStyle(doc, 'stop_standalone', {'IconStyle': {'color': 'ff00ff00'}})\n\n    def StyleSelectionMethod(stop):\n        if (stop.location_type == transitfeed.Stop.LOCATION_TYPE_STATION):\n            return ('stop_station', None)\n        elif (stop.location_type == googletransit.Stop.LOCATION_TYPE_ENTRANCE):\n            return ('stop_entrance', 'entrance_connection')\n        elif stop.parent_station:\n            return ('stop_platform', 'platform_connection')\n        return ('stop_standalone', None)\n    return StyleSelectionMethod", "docstring": "Create a method to determine which style to apply to a stop placemark.\n\nArgs:\ndoc: the KML document.\n\nReturns:\nA function that should accept a Stop argument and return a tuple of\n(stop placemark style id, pathway placemark style id).  Either style id\ncan be None, indicating no style should be set.\n\nGiven a Stop, we need to determine what KML style to apply to the stops'\nplacemark.  In the most basic case, no styling is applied.  However, if\nshow_stop_hierarchy is enabled, we style each type of stop differently\ndepending on if the stop is a station, platform, entrance, etc.  This method\nreturns a function that is used to pick which style id should be associated\nwith a stop placemark, or None if no style should be applied.  It also\noptionally returns a style id to associate with any line-string connections\nassociated with a stop (eg. to show the pathway between an entrance and a\nstation).", "source": "codesearchnet"}
{"code": "def _ReadCompressedData(self, read_size):\n    \n    self._uncompressed_data = self._zip_ext_file.read(read_size)\n    self._uncompressed_data_size = len(self._uncompressed_data)", "docstring": "Reads compressed data from the file-like object.\n\nArgs:\nread_size (int): number of bytes of compressed data to read.", "source": "juraj-google-style"}
{"code": "def to_json(value: Any, **kwargs) -> Any:\n    if isinstance(value, (type(None), bool, int, float, str)):\n        v = value\n    elif isinstance(value, JSONConvertible):\n        v = value.to_json(**kwargs)\n    elif isinstance(value, tuple):\n        v = [JSONConvertible.TUPLE_MARKER] + to_json(list(value), **kwargs)\n    elif isinstance(value, list):\n        v = [to_json(item, **kwargs) for item in value]\n    elif isinstance(value, dict):\n        v = {k: to_json(v, **kwargs) for k, v in value.items()}\n    elif isinstance(value, (type, typing.GenericAlias)):\n        v = _type_to_json(value)\n    elif inspect.isbuiltin(value):\n        v = _builtin_function_to_json(value)\n    elif inspect.isfunction(value):\n        v = _function_to_json(value)\n    elif inspect.ismethod(value):\n        v = _method_to_json(value)\n    elif isinstance(value, typing._Final):\n        v = _annotation_to_json(value)\n    elif value is ...:\n        v = {JSONConvertible.TYPE_NAME_KEY: 'type', 'name': 'builtins.Ellipsis'}\n    else:\n        v, converted = (None, False)\n        if JSONConvertible.TYPE_CONVERTER is not None:\n            converter = JSONConvertible.TYPE_CONVERTER(type(value))\n            if converter:\n                v = to_json(converter(value))\n                converted = True\n        if not converted:\n            v = _OpaqueObject(value).to_json(**kwargs)\n    return v", "docstring": "Serializes a (maybe) JSONConvertible value into a plain Python object.\n\nArgs:\nvalue: value to serialize. Applicable value types are:\n\n* Builtin python types: None, bool, int, float, string;\n* JSONConvertible types;\n* List types;\n* Tuple types;\n* Dict types.\n\n**kwargs: Keyword arguments to pass to value.to_json if value is\nJSONConvertible.\n\nReturns:\nJSON value.", "source": "github-repos"}
{"code": "def to_python(self):\n    return (self.selector, COMPARISON_MAP.get(self.comparison, self.comparison), self.argument)", "docstring": "Deconstruct the ``Constraint`` instance to a tuple.\n\nReturns:\ntuple: The deconstructed ``Constraint``.", "source": "codesearchnet"}
{"code": "def _parse_username(self, config):\n        \n        (username, priv, role, nopass, fmt, secret, sshkey) = config\n        resource = dict()\n        resource['privilege'] = priv\n        resource['role'] = role\n        resource['nopassword'] = nopass == 'nopassword'\n        resource['format'] = fmt\n        resource['secret'] = secret\n        resource['sshkey'] = sshkey\n        return {username: resource}", "docstring": "Scans the config block and returns the username as a dict\n\nArgs:\nconfig (str): The config block to parse\n\nReturns:\ndict: A resource dict that is intended to be merged into the\nuser resource", "source": "juraj-google-style"}
{"code": "def AddBackpropAccumulatedValue(self, history_value, value, dead_branch=False):\n    history_ctxt = history_value.op._get_control_flow_context()\n    cond_ctxt = None\n    value_ctxt = value.op._get_control_flow_context()\n    while value_ctxt and value_ctxt != history_ctxt:\n        if isinstance(value_ctxt, control_flow_ops.CondContext):\n            cond_ctxt = value_ctxt\n            break\n        value_ctxt = value_ctxt.outer_context\n    with ops.control_dependencies(None):\n        self.grad_context.Enter()\n        if cond_ctxt:\n            grad_state = self\n            pred = None\n            while pred is None and grad_state:\n                pred = grad_state.history_map.get(cond_ctxt.pred.name)\n                grad_state = grad_state.outer_grad_state\n            if pred is None:\n                pred = cond_ctxt.pred\n            branch = 1 - cond_ctxt.branch if dead_branch else cond_ctxt.branch\n            history_value = control_flow_ops._SwitchRefOrTensor(history_value, pred)[branch]\n        pop = gen_data_flow_ops.stack_pop_v2(history_value, value.dtype.base_dtype)\n        pop.set_shape(value.get_shape())\n        self.grad_context.Exit()\n    parallel_iterations = self.grad_context.parallel_iterations\n    if parallel_iterations > 1:\n        self.grad_sync._add_control_input(pop.op)\n    return pop", "docstring": "Add the getter for an accumulated value in the grad context.\n\nThis is added to the backprop loop. Called in the grad context to\nget the value of an accumulated value. The stack pop op must be guarded\nby the pred of the controlling cond.\n\nArgs:\nhistory_value: The history (a stack) of a value.\nvalue: The value that is pushed onto the stack.\ndead_branch: True iff the tensor is on a dead branch of a cond.\n\nReturns:\nThe current value (the top of the stack).", "source": "github-repos"}
{"code": "def create(self, resource):\n    return self.service.create(resource, self.url_prefix, self.auth, self.session, self.session_send_opts)", "docstring": "Create the given resource.\n\nArgs:\nresource (intern.resource.boss.BossResource): Create a data model object with attributes matching those of the resource.\n\nReturns:\n(intern.resource.boss.BossResource): Returns resource of type requested on success.\n\nRaises:\nrequests.HTTPError on failure.", "source": "codesearchnet"}
{"code": "def cancel(self, subscription_id, data={}, **kwargs):\n    url = '{}/{}/cancel'.format(self.base_url, subscription_id)\n    return self.post_url(url, data, **kwargs)", "docstring": "Cancel subscription given by subscription_id\n\nArgs:\nsubscription_id : Id for which subscription has to be cancelled\n\nReturns:\nSubscription Dict for given subscription id", "source": "codesearchnet"}
{"code": "def VerifyServerControlResponse(self, http_object):\n    if (http_object.code != 200):\n        return False\n    try:\n        (http_object.messages, http_object.source, http_object.nonce) = self.communicator.DecryptMessage(http_object.data)\n        return True\n    except communicator.DecodingError as e:\n        logging.info('Protobuf decode error: %s.', e)\n        return False", "docstring": "Verify the server response to a 'control' endpoint POST message.\n\nWe consider the message correct if and only if we can decrypt it\nproperly. Note that in practice we can not use the HTTP status to figure out\nif the request worked because captive proxies have a habit of lying and\nreturning a HTTP success code even when there is no connectivity.\n\nArgs:\nhttp_object: The HTTPObject returned from the HTTP transaction.\n\nReturns:\nTrue if the http_object is correct. False if it is not valid.\n\nSide Effect:\nFill in the decoded_data attribute in the http_object.", "source": "codesearchnet"}
{"code": "def greedy_decode(logits_fn, initial_ids, temperature=0.0, initial_states=None, eos_id=EOS_ID, forced_ids=None, use_tpu=True):\n    length_dim = initial_ids.shape.dims[(- 1)]\n    mesh = initial_ids.mesh\n    num_steps = mtf.constant(mesh, length_dim.size, dtype=tf.int32)\n\n    def cond_fn(step_num, prev_ids, *unused_states):\n        'Should we run another loop iteration.'\n        overflow = mtf.equal(step_num, num_steps)\n        has_eos = mtf.reduce_any(mtf.equal(prev_ids, eos_id), reduced_dim=length_dim)\n        all_has_eos = mtf.reduce_all(has_eos)\n        return mtf.logical_not(mtf.logical_or(overflow, all_has_eos))\n\n    def body_fn(step_num, ids, *states):\n        'Body function for greedy decoding.\\n\\n    Args:\\n      step_num: a mtf.Tensor\\n      ids: a mtf.Tensor\\n      *states: additional mtf.Tensors\\n    Returns:\\n      new_step_num, new_ids, *new_states\\n    '\n        (logits, new_states) = logits_fn(step_num, ids, states)\n        vocab_dim = logits.shape.dims[(- 1)]\n        new_ids = mtf.sample_with_temperature(logits, vocab_dim, temperature)\n        if (forced_ids is not None):\n            forced = mtf.gather(forced_ids, step_num, length_dim)\n            new_ids = (forced + (new_ids * mtf.to_int32(mtf.equal(forced, 0))))\n        ids += (new_ids * mtf.one_hot(step_num, length_dim, dtype=tf.int32))\n        new_step_num = (step_num + 1)\n        return ([new_step_num, ids] + new_states)\n    initial_step_num = mtf.constant(mesh, 0, dtype=tf.int32)\n    while_loop_inputs = ([initial_step_num, initial_ids] + initial_states)\n    (final_step_num, mtf_samples) = mtf.while_loop(cond_fn, body_fn, while_loop_inputs, num_loop_vars=(None if use_tpu else 2))[:2]\n    mtf_samples = mtf.Print(mtf_samples, [final_step_num], 'output_length')\n    return mtf_samples", "docstring": "Greedy decoding.\n\nArgs:\nlogits_fn: Interface to the model, to provide logits.\nShoud take:\nstep_num - mtf Scalar\nids - mtf Tensor with shape [..., length]\nstates - list of mtf.Tensor\nShould return:\nlogits - [batch, vocab_size]\nnew_states - list of mtf.Tensor\ninitial_ids: mtf.Tensor with shape [..., length], containing zeros.\ntemperature: a float between 0.0 (argmax) and 1.0 (random)\ninitial_states: list of mtf.Tensor\neos_id: ID for end of sentence.\nforced_ids: optional mtf.Tensor with shape [..., length]\nuse_tpu: a boolean\nReturns:\nTensor with shape [..., length]", "source": "codesearchnet"}
{"code": "def createEmails(nicks=None, nicksFile=None):\n    \n    candidate_emails = set()\n    if nicks != None:\n        for n in nicks:\n            for e in email_providers.domains:\n                candidate_emails.add(\"{}@{}\".format(n, e))\n    elif nicksFile != None:\n        with open(nicksFile, \"r\") as iF:\n            nicks = iF.read().splitlines()\n            for n in nicks:\n                for e in email_providers.domains:\n                    candidate_emails.add(\"{}@{}\".format(n, e))\n    return candidate_emails", "docstring": "Method that globally permits to generate the emails to be checked.\n\nArgs:\n-----\nnicks: List of aliases.\nnicksFile: The filepath to the aliases file.\n\nReturns:\n--------\nlist: list of emails to be checked.", "source": "juraj-google-style"}
{"code": "def share(self, name, item):\n        \n\n        try:\n\n            if isinstance(item, s_telepath.Aware):\n                item.onTeleShare(self, name)\n\n            self.shared[name] = item\n\n        except Exception:\n            logger.exception(f'onTeleShare() error for: {name}')", "docstring": "Share an object via the telepath protocol.\n\nArgs:\nname (str): Name of the shared object\nitem (object): The object to share over telepath.", "source": "juraj-google-style"}
{"code": "def encoder(self, inputs, n_layers=3):\n    \n    latent_dims = self.hparams.z_dim\n\n    shape_as_list = inputs.shape.as_list()\n    if len(shape_as_list) != 5:\n      raise ValueError(\"Expected inputs to be a 5-D, got %d\" %\n                       len(shape_as_list))\n    if inputs.dtype != tf.float32:\n      raise ValueError(\"Expected dtype tf.float32, got %s\" % inputs.dtype)\n\n    \n    batch_size, _ = shape_as_list[:2]\n    inputs = tf.reshape(inputs, [-1] + list(inputs.shape)[2:])\n    n_filters = 64\n    rectified = None\n\n    \n    \n    \n    padding = [[0, 0], [1, 1], [1, 1], [0, 0]]\n    for i in range(n_layers):\n      with tf.variable_scope(\"layer_%d\" % (i + 1)):\n        n_filters *= 2**i\n        if i:\n          padded = tf.pad(rectified, padding)\n        else:\n          padded = tf.pad(inputs, padding)\n        convolved = tf.layers.conv2d(padded, filters=n_filters, kernel_size=4,\n                                     strides=2, padding=\"VALID\")\n        normalized = tf.contrib.layers.instance_norm(convolved)\n        rectified = tf.nn.leaky_relu(normalized, alpha=0.2)\n\n    \n    pooled = tf.nn.avg_pool(\n        rectified, [1] + rectified.shape[1:3].as_list() + [1],\n        strides=[1, 1, 1, 1], padding=\"VALID\")\n    squeezed = tf.squeeze(pooled, [1, 2])\n\n    \n    \n    with tf.variable_scope(\"z_mu\"):\n      z_mu = tf.layers.dense(squeezed, latent_dims)\n    with tf.variable_scope(\"z_log_sigma_sq\"):\n      z_log_var = tf.layers.dense(squeezed, latent_dims)\n      z_log_var = tf.clip_by_value(z_log_var, -10, 10)\n\n    \n    z_mu = tf.reshape(z_mu, (batch_size, -1, latent_dims))\n    z_log_var = tf.reshape(\n        z_log_var, (batch_size, -1, latent_dims))\n    return z_mu, z_log_var", "docstring": "Convnet that encodes inputs into mean and std of a gaussian.\n\nArgs:\ninputs: 5-D Tensor, shape (batch_size, num_frames, width, height, channels)\nn_layers: Number of layers.\n\nReturns:\nz_mu: Mean of the latent gaussians.\nz_log_var: log(var) of the latent gaussians.\n\nRaises:\nValueError: If inputs is not a 5-D tensor or not float32.", "source": "juraj-google-style"}
{"code": "def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):\n        \n\n        if not pdb_file_type:\n            pdb_file_type = self.pdb_file_type\n\n        counter = 0\n        for g in tqdm(self.genes):\n            pdbs = g.protein.pdb_downloader_and_metadata(outdir=outdir, pdb_file_type=pdb_file_type, force_rerun=force_rerun)\n\n            if pdbs:\n                counter += len(pdbs)\n\n        log.info('Updated PDB metadata dataframe. See the \"df_pdb_metadata\" attribute for a summary dataframe.')\n        log.info('Saved {} structures total'.format(counter))", "docstring": "Download ALL mapped experimental structures to each protein's structures directory.\n\nArgs:\noutdir (str): Path to output directory, if GEM-PRO directories were not set or other output directory is\ndesired\npdb_file_type (str): Type of PDB file to download, if not already set or other format is desired\nforce_rerun (bool): If files should be re-downloaded if they already exist", "source": "juraj-google-style"}
{"code": "def StartsWith(self, value):\n    \n    self._awql = self._CreateSingleValueCondition(value, 'STARTS_WITH')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"starts with\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "juraj-google-style"}
{"code": "def write_float(self, value, little_endian=True):\n        \n        if little_endian:\n            endian = \"<\"\n        else:\n            endian = \">\"\n        return self.pack('%sf' % endian, value)", "docstring": "Pack the value as a float and write 4 bytes to the stream.\n\nArgs:\nvalue (number): the value to write to the stream.\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint: the number of bytes written.", "source": "juraj-google-style"}
{"code": "def get_params(img, output_size):\n        \n        w, h, *_ = img.shape\n\n        th, tw = output_size\n        if w == tw and h == th:\n            return 0, 0, h, w\n\n        i = random.randint(0, h - th)\n        j = random.randint(0, w - tw)\n        return i, j, th, tw", "docstring": "Get parameters for ``crop`` for a random crop.\n\nArgs:\nimg (PIL Image): Image to be cropped.\noutput_size (tuple): Expected output size of the crop.\n\nReturns:\ntuple: params (i, j, h, w) to be passed to ``crop`` for random crop.", "source": "juraj-google-style"}
{"code": "def _GetInstanceConfig(self):\n    try:\n        instance_data = self.metadata_dict['instance']['attributes']\n    except KeyError:\n        instance_data = {}\n        self.logger.warning('Instance attributes were not found.')\n    try:\n        project_data = self.metadata_dict['project']['attributes']\n    except KeyError:\n        project_data = {}\n        self.logger.warning('Project attributes were not found.')\n    return (instance_data.get('google-instance-configs') or project_data.get('google-instance-configs'))", "docstring": "Get the instance configuration specified in metadata.\n\nReturns:\nstring, the instance configuration data.", "source": "codesearchnet"}
{"code": "def compute_output_shape(self, input_shape):\n    if context.executing_eagerly():\n        self._maybe_build(input_shape)\n        with func_graph.FuncGraph(str(self.name) + '_scratch_graph').as_default():\n            input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)\n\n            def _make_placeholder_like(shape):\n                ph = backend.placeholder(shape=shape, dtype=self.dtype)\n                ph._keras_mask = None\n                return ph\n            inputs = nest.map_structure(_make_placeholder_like, input_shape)\n            try:\n                outputs = self(inputs, training=False)\n            except TypeError as e:\n                raise NotImplementedError(\"We could not automatically infer the static shape of the layer's output. Please implement the `compute_output_shape` method on your layer (%s).\" % self.__class__.__name__) from e\n        return nest.map_structure(lambda t: t.shape, outputs)\n    raise NotImplementedError('Please run in eager mode or implement the `compute_output_shape` method on your layer (%s).' % self.__class__.__name__)", "docstring": "Computes the output shape of the layer.\n\nIf the layer has not been built, this method will call `build` on the\nlayer. This assumes that the layer will later be used with inputs that\nmatch the input shape provided here.\n\nArgs:\ninput_shape: Shape tuple (tuple of integers)\nor list of shape tuples (one per output tensor of the layer).\nShape tuples can include None for free dimensions,\ninstead of an integer.\n\nReturns:\nAn input shape tuple.", "source": "github-repos"}
{"code": "def check_managed_pipeline(name='', app_name=''):\n    \n    *pipeline_name_prefix, bracket_region = name.split()\n    region = bracket_region.strip('[]')\n\n    not_managed_message = '\"{0}\" is not managed.'.format(name)\n\n    if 'onetime' in region:\n        LOG.info('\"%s\" is a onetime, marked for cleaning.', name)\n        return region\n\n    if not all([bracket_region.startswith('['), bracket_region.endswith(']')]):\n        LOG.debug('\"%s\" does not end with \"[region]\".', name)\n        raise ValueError(not_managed_message)\n\n    if len(pipeline_name_prefix) is not 1:\n        LOG.debug('\"%s\" does not only have one word before [region].', name)\n        raise ValueError(not_managed_message)\n\n    if app_name not in pipeline_name_prefix:\n        LOG.debug('\"%s\" does not use \"%s\" before [region].', name, app_name)\n        raise ValueError(not_managed_message)\n\n    return region", "docstring": "Check a Pipeline name is a managed format **app_name [region]**.\n\nArgs:\nname (str): Name of Pipeline to check.\napp_name (str): Name of Application to find in Pipeline name.\n\nReturns:\nstr: Region name from managed Pipeline name.\n\nRaises:\nValueError: Pipeline is not managed.", "source": "juraj-google-style"}
{"code": "def _create_inbound_stream(self, config=None):\n        \n        if config is None:\n            raise ValueError('No stream config to create stream from.')\n\n        name = self._get_stream_name(config)\n        stream_handlers = self._get_stream_handlers(config, name)\n        stream_input = config.get('input', None)\n        if stream_input is None:\n            raise(cfg.AitConfigMissing('inbound stream {}\\'s input'.format(name)))\n\n        if type(stream_input[0]) is int:\n            return PortInputStream(name,\n                                   stream_input,\n                                   stream_handlers,\n                                   zmq_args={'zmq_context': self.broker.context,\n                                             'zmq_proxy_xsub_url': self.broker.XSUB_URL,\n                                             'zmq_proxy_xpub_url': self.broker.XPUB_URL})\n        else:\n            return ZMQStream(name,\n                             stream_input,\n                             stream_handlers,\n                             zmq_args={'zmq_context': self.broker.context,\n                                       'zmq_proxy_xsub_url': self.broker.XSUB_URL,\n                                       'zmq_proxy_xpub_url': self.broker.XPUB_URL})", "docstring": "Creates an inbound stream from its config.\n\nParams:\nconfig:       stream configuration as read by ait.config\nReturns:\nstream:       a Stream\nRaises:\nValueError:   if any of the required config values are missing", "source": "juraj-google-style"}
{"code": "def list_media_services_rg(access_token, subscription_id, rgname):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', rgname, '/providers/microsoft.media/mediaservices?api-version=', MEDIA_API])\n    return do_get(endpoint, access_token)", "docstring": "List the media services in a resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def create_media_assetfile(access_token, parent_asset_id, name, is_primary='false', is_encrypted='false', encryption_scheme='None', encryptionkey_id='None'):\n    path = '/Files'\n    endpoint = ''.join([ams_rest_endpoint, path])\n    if (encryption_scheme == 'StorageEncryption'):\n        body = (((((((((((((('{ \\t\\t\\t\"IsEncrypted\": \"' + is_encrypted) + '\", \\t\\t\\t\"EncryptionScheme\": \"') + encryption_scheme) + '\", \\t\\t\\t\"EncryptionVersion\": \"') + '1.0') + '\", \\t\\t\\t\"EncryptionKeyId\": \"') + encryptionkey_id) + '\", \\t\\t\\t\"IsPrimary\": \"') + is_primary) + '\", \\t\\t\\t\"MimeType\": \"video/mp4\", \\t\\t\\t\"Name\": \"') + name) + '\", \\t\\t\\t\"ParentAssetId\": \"') + parent_asset_id) + '\" \\t\\t}')\n    else:\n        body = (((((('{ \\t\\t\\t\"IsPrimary\": \"' + is_primary) + '\", \\t\\t\\t\"MimeType\": \"video/mp4\", \\t\\t\\t\"Name\": \"') + name) + '\", \\t\\t\\t\"ParentAssetId\": \"') + parent_asset_id) + '\" \\t\\t}')\n    return do_ams_post(endpoint, path, body, access_token)", "docstring": "Create Media Service Asset File.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nparent_asset_id (str): Media Service Parent Asset ID.\nname (str): Media Service Asset Name.\nis_primary (str): Media Service Primary Flag.\nis_encrypted (str): Media Service Encryption Flag.\nencryption_scheme (str): Media Service Encryption Scheme.\nencryptionkey_id (str): Media Service Encryption Key ID.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def update_dict_recursive(editable_dict: dict, editing_dict: dict) -> None:\n    \n    for k, v in editing_dict.items():\n        if isinstance(v, collections.Mapping):\n            update_dict_recursive(editable_dict.get(k, {}), v)\n        else:\n            editable_dict[k] = v", "docstring": "Updates dict recursively\n\nYou need to use this function to update dictionary if depth of editing_dict is more then 1\n\nArgs:\neditable_dict: dictionary, that will be edited\nediting_dict: dictionary, that contains edits\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def panel(self, panel_id):\n    if (not isinstance(panel_id, ObjectId)):\n        panel_id = ObjectId(panel_id)\n    panel_obj = self.panel_collection.find_one({'_id': panel_id})\n    return panel_obj", "docstring": "Fetch a gene panel by '_id'.\n\nArgs:\npanel_id (str, ObjectId): str or ObjectId of document ObjectId\n\nReturns:\ndict: panel object or `None` if panel not found", "source": "codesearchnet"}
{"code": "def BuildFindSpecs(self, artifact_filter_names, environment_variables=None):\n    \n    find_specs = []\n    for name in artifact_filter_names:\n      definition = self._artifacts_registry.GetDefinitionByName(name)\n      if not definition:\n        logger.debug('undefined artifact definition: {0:s}'.format(name))\n        continue\n\n      logger.debug('building find spec from artifact definition: {0:s}'.format(\n          name))\n      artifact_find_specs = self._BuildFindSpecsFromArtifact(\n          definition, environment_variables)\n      find_specs.extend(artifact_find_specs)\n\n    for find_spec in find_specs:\n      if isinstance(find_spec, file_system_searcher.FindSpec):\n        self.file_system_find_specs.append(find_spec)\n\n      elif isinstance(find_spec, registry_searcher.FindSpec):\n        self.registry_find_specs.append(find_spec)\n\n      else:\n        logger.warning('Unsupported find specification type: {0:s}'.format(\n            type(find_spec)))", "docstring": "Builds find specifications from artifact definitions.\n\nArgs:\nartifact_filter_names (list[str]): names of artifact definitions that are\nused for filtering file system and Windows Registry key paths.\nenvironment_variables (Optional[list[EnvironmentVariableArtifact]]):\nenvironment variables.", "source": "juraj-google-style"}
{"code": "def phase_uniquizer(all_phases):\n    measurement_name_maker = UniqueNameMaker(itertools.chain.from_iterable((phase.measurements.keys() for phase in all_phases if phase.measurements)))\n    attachment_names = list(itertools.chain.from_iterable((phase.attachments.keys() for phase in all_phases)))\n    attachment_names.extend(itertools.chain.from_iterable(([('multidim_' + name) for (name, meas) in phase.measurements.items() if (meas.dimensions is not None)] for phase in all_phases if phase.measurements)))\n    attachment_name_maker = UniqueNameMaker(attachment_names)\n    for phase in all_phases:\n        for (name, _) in sorted(phase.measurements.items()):\n            old_name = name\n            name = measurement_name_maker.make_unique(name)\n            phase.measurements[old_name].name = name\n            phase.measurements[name] = phase.measurements.pop(old_name)\n        for (name, _) in sorted(phase.attachments.items()):\n            old_name = name\n            name = attachment_name_maker.make_unique(name)\n            phase.attachments[old_name].name = name\n            phase.attachments[name] = phase.attachments.pop(old_name)\n    return all_phases", "docstring": "Makes the names of phase measurement and attachments unique.\n\nThis function will make the names of measurements and attachments unique.\nIt modifies the input all_phases.\n\nArgs:\nall_phases: the phases to make unique\n\nReturns:\nthe phases now modified.", "source": "codesearchnet"}
{"code": "def install_js():\n    target_jsdir = join(SERVER, 'static', 'js')\n    target_cssdir = join(SERVER, 'static', 'css')\n    target_tslibdir = join(SERVER, 'static', 'lib')\n    STATIC_ASSETS = [join(JS, 'bokeh.js'), join(JS, 'bokeh.min.js'), join(CSS, 'bokeh.css'), join(CSS, 'bokeh.min.css')]\n    if (not all((exists(a) for a in STATIC_ASSETS))):\n        print(BOKEHJS_INSTALL_FAIL)\n        sys.exit(1)\n    if exists(target_jsdir):\n        shutil.rmtree(target_jsdir)\n    shutil.copytree(JS, target_jsdir)\n    if exists(target_cssdir):\n        shutil.rmtree(target_cssdir)\n    shutil.copytree(CSS, target_cssdir)\n    if exists(target_tslibdir):\n        shutil.rmtree(target_tslibdir)\n    if exists(TSLIB):\n        lib = {'lib.es5.d.ts', 'lib.dom.d.ts', 'lib.es2015.core.d.ts', 'lib.es2015.promise.d.ts', 'lib.es2015.symbol.d.ts', 'lib.es2015.iterable.d.ts'}\n        shutil.copytree(TSLIB, target_tslibdir, ignore=(lambda _, files: [f for f in files if (f not in lib)]))", "docstring": "Copy built BokehJS files into the Python source tree.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def __init__(self, **kwargs):\n        \n        self.unitname = kwargs.get('unitname', self.unitname)\n        self.unitmultiplier = kwargs.get('unitmultiplier', self.unitmultiplier)", "docstring": "Distance unit parameter.\n\nArgs:\n\n- **unitname**: A pycrs.elements.units.UnitName instance with the name given by each supported format.\n- **unitmultiplier**: A pycrs.elements.units.UnitMultiplier instance.", "source": "juraj-google-style"}
{"code": "def set_lock_state(self, code, device_label, state):\n        \n        response = None\n        try:\n            response = requests.put(\n                urls.set_lockstate(self._giid, device_label, state),\n                headers={\n                    'Accept': 'application/json, text/javascript, */*; q=0.01',\n                    'Content-Type': 'application/json',\n                    'Cookie': 'vid={}'.format(self._vid)},\n                data=json.dumps({\"code\": str(code)}))\n        except requests.exceptions.RequestException as ex:\n            raise RequestError(ex)\n        _validate_response(response)\n        return json.loads(response.text)", "docstring": "Lock or unlock\n\nArgs:\ncode (str): Lock code\ndevice_label (str): device label of lock\nstate (str): 'lock' or 'unlock'", "source": "juraj-google-style"}
{"code": "def print_layer_summary(layer):\n    try:\n        output_shape = layer.output_shape\n    except AttributeError:\n        output_shape = 'multiple'\n    except RuntimeError:\n        output_shape = '?'\n    name = layer.name\n    cls_name = layer.__class__.__name__\n    if not layer.built and (not getattr(layer, '_is_graph_network', False)):\n        params = '0 (unused)'\n    else:\n        params = layer.count_params()\n    fields = [name + ' (' + cls_name + ')', output_shape, params]\n    print_row(fields, positions)", "docstring": "Prints a summary for a single layer.\n\nArgs:\nlayer: target layer.", "source": "github-repos"}
{"code": "def init_cache(self, batch_size, max_length, encoder_outputs):\n    decoder_input_ids = jnp.ones((batch_size, max_length), dtype='i4')\n    decoder_attention_mask = jnp.ones_like(decoder_input_ids)\n    decoder_position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape)\n\n    def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):\n        decoder_module = module._get_decoder_module()\n        return decoder_module(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, **kwargs)\n    init_variables = self.module.init(jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward)\n    return unfreeze(init_variables['cache'])", "docstring": "Args:\nbatch_size (`int`):\nbatch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.\nmax_length (`int`):\nmaximum possible length for auto-regressive decoding. Defines the sequence length of the initialized\ncache.\nencoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):\n`encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:\n`attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)\nis a sequence of hidden-states at the output of the last layer of the encoder. Used in the\ncross-attention of the decoder.", "source": "github-repos"}
{"code": "def all_events_filter(\n            self,\n            from_block: BlockSpecification = GENESIS_BLOCK_NUMBER,\n            to_block: BlockSpecification = 'latest',\n    ) -> StatelessFilter:\n        \n        return self.events_filter(None, from_block, to_block)", "docstring": "Install a new filter for all the events emitted by the current token network contract\n\nArgs:\nfrom_block: Create filter starting from this block number (default: 0).\nto_block: Create filter stopping at this block number (default: 'latest').\n\nReturn:\nThe filter instance.", "source": "juraj-google-style"}
{"code": "def _get_operation_input_field_values(self, metadata, file_input):\n    \n\n    \n    \n    \n    input_args = metadata['request']['ephemeralPipeline']['inputParameters']\n    vals_dict = metadata['request']['pipelineArgs']['inputs']\n\n    \n    names = [\n        arg['name'] for arg in input_args if ('localCopy' in arg) == file_input\n    ]\n\n    \n    return {name: vals_dict[name] for name in names if name in vals_dict}", "docstring": "Returns a dictionary of envs or file inputs for an operation.\n\nArgs:\nmetadata: operation metadata field\nfile_input: True to return a dict of file inputs, False to return envs.\n\nReturns:\nA dictionary of input field name value pairs", "source": "juraj-google-style"}
{"code": "def _PrintAnalysisStatusHeader(self, processing_status):\n    self._output_writer.Write('Storage file\\t\\t: {0:s}\\n'.format(self._storage_file_path))\n    self._PrintProcessingTime(processing_status)\n    if (processing_status and processing_status.events_status):\n        self._PrintEventsStatus(processing_status.events_status)\n    self._output_writer.Write('\\n')", "docstring": "Prints the analysis status header.\n\nArgs:\nprocessing_status (ProcessingStatus): processing status.", "source": "codesearchnet"}
{"code": "def accuracy(y_true: [list, np.ndarray], y_predicted: [list, np.ndarray]) -> float:\n    \n    examples_len = len(y_true)\n    correct = sum([y1 == y2 for y1, y2 in zip(y_true, y_predicted)])\n    return correct / examples_len if examples_len else 0", "docstring": "Calculate accuracy in terms of absolute coincidence\n\nArgs:\ny_true: array of true values\ny_predicted: array of predicted values\n\nReturns:\nportion of absolutely coincidental samples", "source": "juraj-google-style"}
{"code": "def diff(self) -> List[str]:\n    return set(self.to_track.keys()) - self._seen", "docstring": "This method returns a set difference between the keys in the tracked state dict and the one we have access so far.\nThis is an effective method to check if we have update all the keys\n\nReturns:\nList[str]: List of keys not yet updated", "source": "github-repos"}
{"code": "def draw_point(self, x, y):\n    check_int_err(lib.SDL_RenderDrawPoint(self._ptr, x, y))", "docstring": "Draw a point on the current rendering target.\n\nArgs:\nx (int): The x coordinate of the point.\ny (int): The y coordinate of the point.\n\nRaises:\nSDLError: If an error is encountered.", "source": "codesearchnet"}
{"code": "def calculate_subscription_lifecycle(subscription_id):\n    \n    subscription = Subscription.objects.select_related(\"messageset\", \"schedule\").get(\n        id=subscription_id\n    )\n    behind = subscription.messages_behind()\n    if behind == 0:\n        return\n\n    current_messageset = subscription.messageset\n    current_sequence_number = subscription.next_sequence_number\n    end_subscription = Subscription.fast_forward_lifecycle(subscription, save=False)[-1]\n    BehindSubscription.objects.create(\n        subscription=subscription,\n        messages_behind=behind,\n        current_messageset=current_messageset,\n        current_sequence_number=current_sequence_number,\n        expected_messageset=end_subscription.messageset,\n        expected_sequence_number=end_subscription.next_sequence_number,\n    )", "docstring": "Calculates the expected lifecycle position the subscription in\nsubscription_ids, and creates a BehindSubscription entry for them.\n\nArgs:\nsubscription_id (str): ID of subscription to calculate lifecycle for", "source": "juraj-google-style"}
{"code": "async def leave_conversation(self, conv_id):\n        \n        logger.info('Leaving conversation: {}'.format(conv_id))\n        await self._conv_dict[conv_id].leave()\n        del self._conv_dict[conv_id]", "docstring": "Leave a conversation.\n\nArgs:\nconv_id (str): ID of conversation to leave.", "source": "juraj-google-style"}
{"code": "def _MeanAggregator(inputs, segments):\n    result = []\n    for inputs_i, segments_i in zip(array_ops.split(inputs, inputs.shape[0]), array_ops.split(segments, segments.shape[0])):\n        means_i = math_ops.unsorted_segment_mean(inputs_i, segments_i, num_segments=math_ops.reduce_max(segments_i) + 1)\n        result.append(array_ops.reshape(array_ops.gather(means_i, segments_i), [-1]))\n    return array_ops_stack.stack(result, axis=0)", "docstring": "Replaces each segment with its mean along the last axis.\n\nSpecifically, each value in the `inputs` tensor gets replaced by the mean\nvalue computed from the values that belong to the same segment.\n\nArgs:\ninputs: A 2-tensor. Aggregation is done over dimension 1.\nsegments: A 2-tensor, same shape as `input`.\n\nReturns:\nThe result, same shape and type as `inputs`.", "source": "github-repos"}
{"code": "def __process_instr(self, instr, avoid, next_addr, initial_state, execution_state, trace_current):\n        \n        \n        if instr.mnemonic == ReilMnemonic.JCC:\n            not_taken_addr = next_addr\n            address, index = split_address(instr.address)\n\n            logger.debug(\"[+] Processing branch: {:\n\n            \n            if isinstance(instr.operands[0], ReilRegisterOperand):\n                next_ip = self.__process_branch_cond(instr, avoid, initial_state, execution_state, trace_current, not_taken_addr)\n\n            \n            else:\n                next_ip = self.__process_branch_uncond(instr, trace_current, not_taken_addr)\n\n        \n        else:\n            trace_current += [(instr, None)]\n\n            self.__cpu.execute(instr)\n\n            next_ip = next_addr\n\n        return next_ip", "docstring": "Process a REIL instruction.\n\nArgs:\ninstr (ReilInstruction): Instruction to process.\navoid (list): List of addresses to avoid while executing the code.\nnext_addr (int): Address of the following instruction.\ninitial_state (State): Initial execution state.\nexecution_state (Queue): Queue of execution states.\ntrace_current (list): Current trace.\n\nReturns:\nint: Returns the next address to execute.", "source": "juraj-google-style"}
{"code": "def _op_in_graph_mode(tensor):\n    if context.executing_eagerly():\n        return tensor\n    return tensor.op", "docstring": "Returns the tensor's op in graph mode, or the tensor in eager mode.\n\nThis is useful because sometimes an op is needed in graph mode instead of a\ntensor. In eager mode, there are no ops.\n\nArgs:\ntensor: A tensor.\n\nReturns:\nThe tensor's op in graph mode. The tensor in eager mode.", "source": "github-repos"}
{"code": "def download(url, output_file=None, open_file=True, allow_overwrite=False):\n    \n    filename = url.split('/')[-1]\n    if output_file is None:\n        cache = os.path.join(get_data_home(), filename)\n    else:\n        cache = output_file\n    if os.path.exists(cache) and not allow_overwrite:\n        logger.info(\"> {} already exists.\".format(cache))\n        logger.info(\"> If you have any issue when using this file, \")\n        logger.info(\"> manually remove the file and try download again.\")\n    else:\n        r = request.urlopen(url)\n        try:\n            if six.PY2:\n                content_length = int(r.info().dict['content-length'])\n            elif six.PY3:\n                content_length = int(r.info()['Content-Length'])\n        except:\n            content_length = 0\n        unit = 1000000\n        content = b''\n        with tqdm(total=content_length, desc=filename, unit='B', unit_scale=True, unit_divisor=1024) as t:\n            while True:\n                data = r.read(unit)\n                l = len(data)\n                t.update(l)\n                if l == 0:\n                    break\n                content += data\n        with open(cache, 'wb') as f:\n            f.write(content)\n    if not open_file:\n        return\n    return open(cache, 'rb')", "docstring": "Download a file from URL.\n\nArgs:\nurl (str): URL.\noutput_file (str, optional): If given, the downloaded file is written to the given path.\nopen_file (bool): If True, it returns an opened file stream of the downloaded file.\nallow_overwrite (bool): If True, it overwrites an existing file.\n\nReturns:\nReturns file object if open_file is True, otherwise None.", "source": "juraj-google-style"}
{"code": "def on_click(self, handler):\n    self.on_event(ButtonClick, handler)\n    self.on_event(MenuItemClick, handler)", "docstring": "Set up a handler for button or menu item clicks.\n\nArgs:\nhandler (func) : handler function to call when button is activated.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def delete_metadata(self, resource, keys):\n        \n        self.metadata_service.set_auth(self._token_metadata)\n        self.metadata_service.delete(resource, keys)", "docstring": "Deletes the given key-value pairs associated with the given resource.\n\nWill attempt to delete all key-value pairs even if some fail.\n\nArgs:\nresource (intern.resource.boss.BossResource)\nkeys (list)\n\nRaises:\nHTTPErrorList on failure.", "source": "juraj-google-style"}
{"code": "def resample(self, size, interpolation=gdalconst.GRA_NearestNeighbour):\n        \n        \n        factors = (size[0] / float(self.RasterXSize),\n                   size[1] / float(self.RasterYSize))\n        affine = AffineTransform(*tuple(self.affine))\n        affine.scale = (affine.scale[0] / factors[0],\n                        affine.scale[1] / factors[1])\n        dest = self.new(size, affine)\n        \n        gdal.ReprojectImage(self.ds, dest.ds, None, None, interpolation)\n        return dest", "docstring": "Returns a new instance resampled to provided size.\n\nArguments:\nsize -- tuple of x,y image dimensions", "source": "juraj-google-style"}
{"code": "def subset_gctoo(gctoo, row_bool=None, col_bool=None, rid=None, cid=None, ridx=None, cidx=None, exclude_rid=None, exclude_cid=None):\n    assert (sum([(rid is not None), (row_bool is not None), (ridx is not None)]) <= 1), 'Only one of rid, row_bool, and ridx can be provided.'\n    assert (sum([(cid is not None), (col_bool is not None), (cidx is not None)]) <= 1), 'Only one of cid, col_bool, and cidx can be provided.'\n    rows_to_keep = get_rows_to_keep(gctoo, rid, row_bool, ridx, exclude_rid)\n    cols_to_keep = get_cols_to_keep(gctoo, cid, col_bool, cidx, exclude_cid)\n    rows_to_keep_bools = gctoo.data_df.index.isin(rows_to_keep)\n    cols_to_keep_bools = gctoo.data_df.columns.isin(cols_to_keep)\n    out_gctoo = GCToo.GCToo(src=gctoo.src, version=gctoo.version, data_df=gctoo.data_df.loc[(rows_to_keep_bools, cols_to_keep_bools)], row_metadata_df=gctoo.row_metadata_df.loc[(rows_to_keep_bools, :)], col_metadata_df=gctoo.col_metadata_df.loc[(cols_to_keep_bools, :)])\n    assert (out_gctoo.data_df.size > 0), 'Subsetting yielded an empty gct!'\n    logger.info(('Initial GCToo with {} rows and {} columns subsetted down to ' + '{} rows and {} columns.').format(gctoo.data_df.shape[0], gctoo.data_df.shape[1], out_gctoo.data_df.shape[0], out_gctoo.data_df.shape[1]))\n    return out_gctoo", "docstring": "Extract a subset of data from a GCToo object in a variety of ways.\nThe order of rows and columns will be preserved.\n\nArgs:\ngctoo (GCToo object)\nrow_bool (list of bools): length must equal gctoo.data_df.shape[0]\ncol_bool (list of bools): length must equal gctoo.data_df.shape[1]\nrid (list of strings): rids to include\ncid (list of strings): cids to include\nridx (list of integers): row integer ids to include\ncidx (list of integers): col integer ids to include\nexclude_rid (list of strings): rids to exclude\nexclude_cid (list of strings): cids to exclude\n\nReturns:\nout_gctoo (GCToo object): gctoo after subsetting", "source": "codesearchnet"}
{"code": "def reset_port_protection(self, id_or_uri, timeout=(- 1)):\n    uri = (self._client.build_uri(id_or_uri) + '/resetportprotection')\n    return self._client.update_with_zero_body(uri, timeout)", "docstring": "Triggers a reset of port protection.\n\nCause port protection to be reset on all the interconnects of the logical interconnect that matches ID.\n\nArgs:\nid_or_uri: Can be either the interconnect id or the interconnect uri.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: The interconnect.", "source": "codesearchnet"}
{"code": "def add_gene_info(self, variant_obj, gene_panels=None):\n    gene_panels = (gene_panels or [])\n    variant_obj['has_refseq'] = False\n    extra_info = {}\n    for panel_obj in gene_panels:\n        for gene_info in panel_obj['genes']:\n            hgnc_id = gene_info['hgnc_id']\n            if (hgnc_id not in extra_info):\n                extra_info[hgnc_id] = []\n            extra_info[hgnc_id].append(gene_info)\n    for variant_gene in variant_obj.get('genes', []):\n        hgnc_id = variant_gene['hgnc_id']\n        hgnc_gene = self.hgnc_gene(hgnc_id)\n        if (not hgnc_gene):\n            continue\n        transcripts_dict = {}\n        for transcript in hgnc_gene.get('transcripts', []):\n            tx_id = transcript['ensembl_transcript_id']\n            transcripts_dict[tx_id] = transcript\n        hgnc_gene['transcripts_dict'] = transcripts_dict\n        if hgnc_gene.get('incomplete_penetrance'):\n            variant_gene['omim_penetrance'] = True\n        panel_info = extra_info.get(hgnc_id, [])\n        disease_associated = set()\n        disease_associated_no_version = set()\n        manual_penetrance = False\n        mosaicism = False\n        manual_inheritance = set()\n        for gene_info in panel_info:\n            for tx in gene_info.get('disease_associated_transcripts', []):\n                stripped = re.sub('\\\\.[0-9]', '', tx)\n                disease_associated_no_version.add(stripped)\n                disease_associated.add(tx)\n            if gene_info.get('reduced_penetrance'):\n                manual_penetrance = True\n            if gene_info.get('mosaicism'):\n                mosaicism = True\n            manual_inheritance.update(gene_info.get('inheritance_models', []))\n        variant_gene['disease_associated_transcripts'] = list(disease_associated)\n        variant_gene['manual_penetrance'] = manual_penetrance\n        variant_gene['mosaicism'] = mosaicism\n        variant_gene['manual_inheritance'] = list(manual_inheritance)\n        for transcript in variant_gene.get('transcripts', []):\n            tx_id = transcript['transcript_id']\n            if (not (tx_id in transcripts_dict)):\n                continue\n            hgnc_transcript = transcripts_dict[tx_id]\n            if hgnc_transcript.get('is_primary'):\n                transcript['is_primary'] = True\n            if (not hgnc_transcript.get('refseq_id')):\n                continue\n            refseq_id = hgnc_transcript['refseq_id']\n            transcript['refseq_id'] = refseq_id\n            variant_obj['has_refseq'] = True\n            if (refseq_id in disease_associated_no_version):\n                transcript['is_disease_associated'] = True\n            transcript['refseq_identifiers'] = hgnc_transcript.get('refseq_identifiers', [])\n        variant_gene['common'] = hgnc_gene\n        variant_gene['disease_terms'] = self.disease_terms(hgnc_id)\n    return variant_obj", "docstring": "Add extra information about genes from gene panels\n\nArgs:\nvariant_obj(dict): A variant from the database\ngene_panels(list(dict)): List of panels from database", "source": "codesearchnet"}
{"code": "def calculate_3D_elastic_energy(self, film, match, elasticity_tensor=None, include_strain=False):\n    if (elasticity_tensor is None):\n        return 9999\n    struc = SlabGenerator(self.film, match['film_miller'], 20, 15, primitive=False).get_slab().oriented_unit_cell\n    film_matrix = list(match['film_sl_vecs'])\n    film_matrix.append(np.cross(film_matrix[0], film_matrix[1]))\n    substrate_matrix = list(match['sub_sl_vecs'])\n    temp_sub = np.cross(substrate_matrix[0], substrate_matrix[1])\n    temp_sub = ((temp_sub * fast_norm(film_matrix[2])) / fast_norm(temp_sub))\n    substrate_matrix.append(temp_sub)\n    transform_matrix = np.transpose(np.linalg.solve(film_matrix, substrate_matrix))\n    dfm = Deformation(transform_matrix)\n    strain = dfm.green_lagrange_strain.convert_to_ieee(struc, initial_fit=False)\n    energy_density = elasticity_tensor.energy_density(strain)\n    if include_strain:\n        return (((film.volume * energy_density) / len(film.sites)), strain.von_mises_strain)\n    else:\n        return ((film.volume * energy_density) / len(film.sites))", "docstring": "Calculates the multi-plane elastic energy. Returns 999 if no elastic\ntensor was given on init\n\nArgs:\nfilm(Structure): conventional standard structure for the film\nmatch(dictionary) : match dictionary from substrate analyzer\nelasticity_tensor(ElasticTensor): elasticity tensor for the film\ninclude_strain(bool): include strain in the output or not; changes\nreturn from just the energy to a tuple with the energy and strain\nin voigt notation", "source": "codesearchnet"}
{"code": "def _fetch_certs(request, certs_url):\n    response = request(certs_url, method='GET')\n    if (response.status != http_client.OK):\n        raise exceptions.TransportError('Could not fetch certificates at {}'.format(certs_url))\n    return json.loads(response.data.decode('utf-8'))", "docstring": "Fetches certificates.\n\nGoogle-style cerificate endpoints return JSON in the format of\n``{'key id': 'x509 certificate'}``.\n\nArgs:\nrequest (google.auth.transport.Request): The object used to make\nHTTP requests.\ncerts_url (str): The certificate endpoint URL.\n\nReturns:\nMapping[str, str]: A mapping of public key ID to x.509 certificate\ndata.", "source": "codesearchnet"}
{"code": "def get_collectors(self, limit=1000, offset=0):\n    options = {'limit': limit, 'offset': offset}\n    request = requests.get(self.url, params=options, auth=self.auth)\n    try:\n        results = request.json()['collectors']\n    except KeyError:\n        results = request.json()\n    except json.decoder.JSONDecodeError:\n        results = []\n    return results", "docstring": "Returns a dict of collectors.\n\nArgs:\nlimit (int): number of collectors to return\noffset (int): the offset of where the list of collectors should begin from", "source": "codesearchnet"}
{"code": "def draw_layer(ax, layer):\n    ax.set_aspect('equal', 'datalim')\n    ax.plot(*layer)\n    ax.axis('off')", "docstring": "Draws a layer on the given matplotlib axis.\n\nArgs:\nax (axis): the matplotlib axis to draw on\nlayer (layer): the layers to plot", "source": "codesearchnet"}
{"code": "def _print_results(file, status):\n    file_color = c.Fore.GREEN\n    status_color = c.Fore.RED\n    if (status == 'Success'):\n        status_color = c.Fore.GREEN\n    elif (status == 'Skipped'):\n        status_color = c.Fore.YELLOW\n    print('{}{!s:<13}{}{!s:<35}{}{!s:<8}{}{}'.format(c.Fore.CYAN, 'Downloading:', file_color, file, c.Fore.CYAN, 'Status:', status_color, status))", "docstring": "Print the download results.\n\nArgs:\nfile (str): The filename.\nstatus (str): The file download status.", "source": "codesearchnet"}
{"code": "def __init__(self, additional_note='', kwargs_dict=None):\n    self._additional_note = additional_note\n    if kwargs_dict:\n        bullets = []\n        for key in sorted(kwargs_dict.keys()):\n            value = kwargs_dict[key]\n            if any((x.isspace() for x in key)):\n                raise ValueError('Parameter name \"%s\" contains whitespace.' % key)\n            value = value.lstrip()\n            if '\\n' in value:\n                raise ValueError('Parameter description for \"%s\" contains newlines.' % key)\n            bullets.append('*  `%s`: %s' % (key, value))\n        self._additional_note += '\\n\\n", "docstring": "Initializes the AppendDocstring object.\n\nArgs:\nadditional_note: Python string added as additional docstring to public\nversion of function.\nkwargs_dict: Python string/string dictionary representing specific kwargs\nexpanded from the **kwargs input.\n\nRaises:\nValueError: if kwargs_dict.key contains whitespace.\nValueError: if kwargs_dict.value contains newlines.", "source": "github-repos"}
{"code": "def bidiagonalize_real_matrix_pair_with_symmetric_products(mat1: np.ndarray, mat2: np.ndarray, *, rtol: float=1e-05, atol: float=1e-08, check_preconditions: bool=True) -> Tuple[(np.ndarray, np.ndarray)]:\n    if check_preconditions:\n        if np.any((np.imag(mat1) != 0)):\n            raise ValueError('mat1 must be real.')\n        if np.any((np.imag(mat2) != 0)):\n            raise ValueError('mat2 must be real.')\n        if (not predicates.is_hermitian(mat1.dot(mat2.T), rtol=rtol, atol=atol)):\n            raise ValueError('mat1 @ mat2.T must be symmetric.')\n        if (not predicates.is_hermitian(mat1.T.dot(mat2), rtol=rtol, atol=atol)):\n            raise ValueError('mat1.T @ mat2 must be symmetric.')\n    (base_left, base_diag, base_right) = _svd_handling_empty(np.real(mat1))\n    base_diag = np.diag(base_diag)\n    dim = base_diag.shape[0]\n    rank = dim\n    while ((rank > 0) and tolerance.all_near_zero(base_diag[((rank - 1), (rank - 1))], atol=atol)):\n        rank -= 1\n    base_diag = base_diag[(:rank, :rank)]\n    semi_corrected = base_left.T.dot(np.real(mat2)).dot(base_right.T)\n    overlap = semi_corrected[(:rank, :rank)]\n    overlap_adjust = diagonalize_real_symmetric_and_sorted_diagonal_matrices(overlap, base_diag, rtol=rtol, atol=atol, check_preconditions=check_preconditions)\n    extra = semi_corrected[(rank:, rank:)]\n    (extra_left_adjust, _, extra_right_adjust) = _svd_handling_empty(extra)\n    left_adjust = combinators.block_diag(overlap_adjust, extra_left_adjust)\n    right_adjust = combinators.block_diag(overlap_adjust.T, extra_right_adjust)\n    left = left_adjust.T.dot(base_left.T)\n    right = base_right.T.dot(right_adjust.T)\n    return (left, right)", "docstring": "Finds orthogonal matrices that diagonalize both mat1 and mat2.\n\nRequires mat1 and mat2 to be real.\nRequires mat1.T @ mat2 to be symmetric.\nRequires mat1 @ mat2.T to be symmetric.\n\nArgs:\nmat1: One of the real matrices.\nmat2: The other real matrix.\nrtol: Relative numeric error threshold.\natol: Absolute numeric error threshold.\ncheck_preconditions: If set, verifies that the inputs are real, and that\nmat1.T @ mat2 and mat1 @ mat2.T are both symmetric. Defaults to set.\n\nReturns:\nA tuple (L, R) of two orthogonal matrices, such that both L @ mat1 @ R\nand L @ mat2 @ R are diagonal matrices.\n\nRaises:\nValueError: Matrices don't meet preconditions (e.g. not real).", "source": "codesearchnet"}
{"code": "def get_config():\n    profiles = {}\n    curr = None\n    cmd = ['netsh', 'advfirewall', 'show', 'allprofiles']\n    ret = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True)\n    if (ret['retcode'] != 0):\n        raise CommandExecutionError(ret['stdout'])\n    for line in ret['stdout'].splitlines():\n        if (not curr):\n            tmp = re.search('(.*) Profile Settings:', line)\n            if tmp:\n                curr = tmp.group(1)\n        elif line.startswith('State'):\n            profiles[curr] = (line.split()[1] == 'ON')\n            curr = None\n    return profiles", "docstring": "Get the status of all the firewall profiles\n\nReturns:\ndict: A dictionary of all profiles on the system\n\nRaises:\nCommandExecutionError: If the command fails\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' firewall.get_config", "source": "codesearchnet"}
{"code": "def _GetBetweenQEqualsAndAmpersand(self, url):\n    (_, _, url) = url.partition('?')\n    (_, _, url) = url.partition('q=')\n    if (not url):\n        return ''\n    (url, _, _) = url.partition('&')\n    return url", "docstring": "Retrieves the substring between the substrings 'q=' and '&'.\n\nArgs:\nurl (str): URL.\n\nReturns:\nstr: search query, the value between 'q=' and '&'  or None if no query\nwas found.", "source": "codesearchnet"}
{"code": "def recursive_copy(src_dir, dest_dir):\n  \n\n  file_io.recursive_create_dir(dest_dir)\n  for file_name in file_io.list_directory(src_dir):\n    old_path = os.path.join(src_dir, file_name)\n    new_path = os.path.join(dest_dir, file_name)\n\n    if file_io.is_directory(old_path):\n      recursive_copy(old_path, new_path)\n    else:\n      file_io.copy(old_path, new_path, overwrite=True)", "docstring": "Copy the contents of src_dir into the folder dest_dir.\nArgs:\nsrc_dir: gsc or local path.\ndest_dir: gcs or local path.", "source": "juraj-google-style"}
{"code": "def polyFitIgnoringOutliers(\n        x, y, deg=2, niter=3, nstd=2, return_outliers=False):\n    \n    if return_outliers:\n        a = all_outliers = np.zeros_like(y, dtype=bool)\n    for i in range(niter):\n        poly = np.polyfit(x, y, deg)\n        p = np.poly1d(poly)\n        if i == niter - 1:\n            break\n        y_fit = p(x)\n        dy = y - y_fit\n        std = (dy**2).mean()**0.5\n        inliers = abs(dy) < nstd * std\n        if return_outliers:\n            a[~inliers] = True\n\n        if inliers.sum() > deg + 1:\n            x = x[inliers]\n            y = y[inliers]\n            if return_outliers:\n                a = a[inliers]\n        else:\n            break\n    if return_outliers:\n        return p, all_outliers\n    return p", "docstring": "Returns:\n(np.poly1d): callable function of polynomial fit excluding all outliers\nArgs:\ndeg (int): degree of polynomial fit\nn_iter (int): do linear regression n times\nsuccessive removing\nnstd (float): exclude outliers, if their deviation\nis > [nstd] * standard deviation\nreturn_outliers (bool): also return outlier positions as 2. arg", "source": "juraj-google-style"}
{"code": "def registerAccount(self, person, vendorSpecific=None):\n        \n        response = self.registerAccountResponse(person, vendorSpecific)\n        return self._read_boolean_response(response)", "docstring": "See Also: registerAccountResponse()\n\nArgs:\nperson:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def get_or_create(session, model, **kwargs):\n    \n    instance = session.query(model).filter_by(**kwargs).first()\n    if instance:\n        return instance, False\n    else:\n        instance = model(**kwargs)\n        if 'dataset' in kwargs:\n            instance.update_sequence_id(session, kwargs['dataset'])\n        session.add(instance)\n        session.commit()\n        return instance, True", "docstring": "Get or create sqlalchemy instance.\n\nArgs:\nsession (Sqlalchemy session):\nmodel (sqlalchemy model):\nkwargs (dict): kwargs to lookup or create instance.\n\nReturns:\nTuple: first element is found or created instance, second is boolean - True if instance created,\nFalse if instance found.", "source": "juraj-google-style"}
{"code": "def absl_to_cpp(level):\n  \n  if not isinstance(level, int):\n    raise TypeError('Expect an int level, found {}'.format(type(level)))\n  if level >= 0:\n    \n    return 0\n  else:\n    return -level", "docstring": "Converts an absl log level to a cpp log level.\n\nArgs:\nlevel: int, an absl.logging level.\n\nRaises:\nTypeError: Raised when level is not an integer.\n\nReturns:\nThe corresponding integer level for use in Abseil C++.", "source": "juraj-google-style"}
{"code": "def report_server_init_errors(address=None, port=None, **kwargs):\n    try:\n        (yield)\n    except EnvironmentError as e:\n        if (e.errno == errno.EADDRINUSE):\n            log.critical('Cannot start Bokeh server, port %s is already in use', port)\n        elif (e.errno == errno.EADDRNOTAVAIL):\n            log.critical(\"Cannot start Bokeh server, address '%s' not available\", address)\n        else:\n            codename = errno.errorcode[e.errno]\n            log.critical('Cannot start Bokeh server [%s]: %r', codename, e)\n        sys.exit(1)", "docstring": "A context manager to help print more informative error messages when a\n``Server`` cannot be started due to a network problem.\n\nArgs:\naddress (str) : network address that the server will be listening on\n\nport (int) : network address that the server will be listening on\n\nExample:\n\n.. code-block:: python\n\nwith report_server_init_errors(**server_kwargs):\nserver = Server(applications, **server_kwargs)\n\nIf there are any errors (e.g. port or address in already in use) then a\ncritical error will be logged and the process will terminate with a\ncall to ``sys.exit(1)``", "source": "codesearchnet"}
{"code": "def get(self, url, params=None, **kwargs):\n        \n        return self.call_api(\n            \"GET\",\n            url,\n            params=params,\n            **kwargs\n        )", "docstring": "Call the API with a GET request.\n\nArgs:\nurl (str): Resource location relative to the base URL.\nparams (dict or None): Query-string parameters.\n\nReturns:\nResultParser or ErrorParser.", "source": "juraj-google-style"}
{"code": "def optimize_boolean_expression_comparisons(ir_blocks):\n    operator_inverses = {u'=': u'!=', u'!=': u'='}\n\n    def visitor_fn(expression):\n        'Expression visitor function that performs the above rewriting.'\n        if (not isinstance(expression, BinaryComposition)):\n            return expression\n        left_is_binary_composition = isinstance(expression.left, BinaryComposition)\n        right_is_binary_composition = isinstance(expression.right, BinaryComposition)\n        if ((not left_is_binary_composition) and (not right_is_binary_composition)):\n            return expression\n        identity_literal = None\n        inverse_literal = None\n        if (expression.operator == u'='):\n            identity_literal = TrueLiteral\n            inverse_literal = FalseLiteral\n        elif (expression.operator == u'!='):\n            identity_literal = FalseLiteral\n            inverse_literal = TrueLiteral\n        else:\n            return expression\n        expression_to_rewrite = None\n        if ((expression.left == identity_literal) and right_is_binary_composition):\n            return expression.right\n        elif ((expression.right == identity_literal) and left_is_binary_composition):\n            return expression.left\n        elif ((expression.left == inverse_literal) and right_is_binary_composition):\n            expression_to_rewrite = expression.right\n        elif ((expression.right == inverse_literal) and left_is_binary_composition):\n            expression_to_rewrite = expression.left\n        if (expression_to_rewrite is None):\n            return expression\n        elif (expression_to_rewrite.operator not in operator_inverses):\n            return expression\n        else:\n            return BinaryComposition(operator_inverses[expression_to_rewrite.operator], expression_to_rewrite.left, expression_to_rewrite.right)\n    new_ir_blocks = []\n    for block in ir_blocks:\n        new_block = block.visit_and_update_expressions(visitor_fn)\n        new_ir_blocks.append(new_block)\n    return new_ir_blocks", "docstring": "Optimize comparisons of a boolean binary comparison expression against a boolean literal.\n\nRewriting example:\nBinaryComposition(\n'=',\nBinaryComposition('!=', something, NullLiteral)\nFalse)\n\nThe above is rewritten into:\nBinaryComposition('=', something, NullLiteral)\n\nArgs:\nir_blocks: list of basic block objects\n\nReturns:\na new list of basic block objects, with the optimization applied", "source": "codesearchnet"}
{"code": "def cudnn_stacked_bi_gru(units, n_hidden, seq_lengths=None, n_stacks=2, keep_prob=1.0, concat_stacked_outputs=False, trainable_initial_states=False, name='cudnn_stacked_bi_gru', reuse=False):\n    if (seq_lengths is None):\n        seq_lengths = (tf.ones([tf.shape(units)[0]], dtype=tf.int32) * tf.shape(units)[1])\n    outputs = [units]\n    with tf.variable_scope(name, reuse=reuse):\n        for n in range(n_stacks):\n            if (n == 0):\n                inputs = outputs[(- 1)]\n            else:\n                inputs = variational_dropout(outputs[(- 1)], keep_prob=keep_prob)\n            ((h_fw, h_bw), _) = cudnn_bi_gru(inputs, n_hidden, seq_lengths, n_layers=1, trainable_initial_states=trainable_initial_states, name='{}_cudnn_bi_gru'.format(n), reuse=reuse)\n            outputs.append(tf.concat([h_fw, h_bw], axis=2))\n    if concat_stacked_outputs:\n        return tf.concat(outputs[1:], axis=2)\n    return outputs[(- 1)]", "docstring": "Fast CuDNN Stacked Bi-GRU implementation\n\nArgs:\nunits: tf.Tensor with dimensions [B x T x F], where\nB - batch size\nT - number of tokens\nF - features\nn_hidden: dimensionality of hidden state\nseq_lengths: number of tokens in each sample in the batch\nn_stacks: number of stacked Bi-GRU\nkeep_prob: dropout keep_prob between Bi-GRUs (intra-layer dropout)\nconcat_stacked_outputs: return last Bi-GRU output or concat outputs from every Bi-GRU,\ntrainable_initial_states: whether to create a special trainable variable\nto initialize the hidden states of the network or use just zeros\nname: name of the variable scope to use\nreuse: whether to reuse already initialized variable\n\n\nReturns:\nh - all hidden states along T dimension,\ntf.Tensor with dimensionality [B x T x ((n_hidden * 2) * n_stacks)]", "source": "codesearchnet"}
{"code": "def easeInOutExpo(n):\n    \n    _checkRange(n)\n    if n == 0:\n        return 0\n    elif n == 1:\n        return 1\n    else:\n        n = n * 2\n        if n < 1:\n            return 0.5 * 2**(10 * (n - 1))\n        else:\n            n -= 1\n            \n            return 0.5 * (-1 * (2 ** (-10 * n)) + 2)", "docstring": "An exponential tween function that accelerates, reaches the midpoint, and then decelerates.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "juraj-google-style"}
{"code": "def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):\n    if attention_mask is not None and attention_mask.dim() == 4:\n        causal_mask = attention_mask\n    else:\n        min_dtype = torch.finfo(dtype).min\n        causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)\n        if sequence_length != 1:\n            causal_mask = torch.triu(causal_mask, diagonal=1)\n        causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)\n        causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)\n        if attention_mask is not None:\n            causal_mask = causal_mask.clone()\n            mask_length = attention_mask.shape[-1]\n            padding_attention_mask = (attention_mask[:, None, None, :] == attention_mask[:, None, :, None])[:, :, -sequence_length:, :].to(dtype)\n            padding_mask = causal_mask[:, :, :, :mask_length] + padding_attention_mask\n            padding_mask = padding_mask == 0\n            causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)\n    return causal_mask", "docstring": "Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.\n\nArgs:\nattention_mask (`torch.Tensor`):\nA 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape\n`(batch_size, 1, query_length, key_value_length)`.\nsequence_length (`int`):\nThe sequence length being processed.\ntarget_length (`int`):\nThe target length: when generating with static cache, the mask should be as long as the static cache,\nto account for the 0 padding, the part of the cache that is not filled yet.\ndtype (`torch.dtype`):\nThe dtype to use for the 4D attention mask.\ncache_position (`torch.Tensor`):\nIndices depicting the position of the input sequence tokens in the sequence.\nbatch_size (`torch.Tensor`):\nBatch size.", "source": "github-repos"}
{"code": "def golden_images(self):\n    if (not self.__golden_images):\n        self.__golden_images = GoldenImages(self.__connection)\n    return self.__golden_images", "docstring": "Gets the Golden Images API client.\n\nReturns:\nGoldenImages:", "source": "codesearchnet"}
{"code": "def next_generation(self, mut_rate=0, max_mut_amt=0, log_base=10):\n    if (self.__num_processes > 1):\n        process_pool = Pool(processes=self.__num_processes)\n        members = [m.get() for m in self.__members]\n    else:\n        members = self.__members\n    if (len(members) == 0):\n        raise Exception('Generation 0 not found: use generate_population() first')\n    selected_members = self.__select_fn(members)\n    reproduction_probs = list(reversed(logspace(0.0, 1.0, num=len(selected_members), base=log_base)))\n    reproduction_probs = (reproduction_probs / sum(reproduction_probs))\n    self.__members = []\n    for _ in range(self.__pop_size):\n        parent_1 = nrandom.choice(selected_members, p=reproduction_probs)\n        parent_2 = nrandom.choice(selected_members, p=reproduction_probs)\n        feed_dict = {}\n        for param in self.__parameters:\n            which_parent = uniform(0, 1)\n            if (which_parent < 0.5):\n                feed_dict[param.name] = parent_1.parameters[param.name]\n            else:\n                feed_dict[param.name] = parent_2.parameters[param.name]\n            feed_dict[param.name] = self.__mutate_parameter(feed_dict[param.name], param, mut_rate, max_mut_amt)\n        if (self.__num_processes > 1):\n            self.__members.append(process_pool.apply_async(self._start_process, [self.__cost_fn, feed_dict, self.__cost_fn_args]))\n        else:\n            self.__members.append(Member(feed_dict, self.__cost_fn(feed_dict, self.__cost_fn_args)))\n    if (self.__num_processes > 1):\n        process_pool.close()\n        process_pool.join()\n    self.__determine_best_member()", "docstring": "Generates the next population from a previously evaluated generation\n\nArgs:\nmut_rate (float): mutation rate for new members (0.0 - 1.0)\nmax_mut_amt (float): how much the member is allowed to mutate\n(0.0 - 1.0, proportion change of mutated parameter)\nlog_base (int): the higher this number, the more likely the first\nMembers (chosen with supplied selection function) are chosen\nas parents for the next generation", "source": "codesearchnet"}
{"code": "def closing(input_rasterfilename, times):\n    input_raster = RasterUtilClass.read_raster(input_rasterfilename)\n    closing_raster = input_raster\n    for i in range(times):\n        closing_raster = RasterUtilClass.raster_dilation(closing_raster)\n    for i in range(times):\n        closing_raster = RasterUtilClass.raster_erosion(closing_raster)\n    return closing_raster", "docstring": "Do closing.\n\nClosing: Dilate firstly, then Erode.\n\nArgs:\ninput_rasterfilename: input original raster image filename.\ntimes: Erode and Dilate times.\n\nReturns:\nclosing_raster: raster image after close.", "source": "codesearchnet"}
{"code": "def transpose(self, name=None):\n    \n    if any(x > 1 for x in self._rate):\n      raise base.NotSupportedError(\n          \"Cannot transpose a dilated convolution module.\")\n\n    if any(p != self._conv_op_padding for p in self._padding):\n      raise base.NotSupportedError(\n          \"Cannot tranpose a convolution using mixed paddings or paddings \"\n          \"other than SAME or VALID.\")\n\n    if name is None:\n      name = self.module_name + \"_transpose\"\n\n    def output_shape():\n      if self._data_format == DATA_FORMAT_NCHW:\n        return self.input_shape[2:4]\n      else:  \n        return self.input_shape[1:3]\n\n    return Conv2DTranspose(output_channels=lambda: self._input_channels,\n                           output_shape=output_shape,\n                           kernel_shape=self._kernel_shape,\n                           stride=self._stride,\n                           padding=self._conv_op_padding,\n                           use_bias=self._use_bias,\n                           initializers=self._initializers,\n                           partitioners=self._partitioners,\n                           regularizers=self._regularizers,\n                           data_format=self._data_format,\n                           custom_getter=self._custom_getter,\n                           name=name)", "docstring": "Returns matching `Conv2DTranspose` module.\n\nArgs:\nname: Optional string assigning name of transpose module. The default name\nis constructed by appending \"_transpose\" to `self.name`.\n\nReturns:\n`Conv2DTranspose` module.\n\nRaises:\nbase.NotSupportedError: If `rate` in any dimension > 1.", "source": "juraj-google-style"}
{"code": "def copy(self, destination):\n\n\t\t\n\n\t\t\n\t\tdestination_uri = self.repo.parse_uri(destination)\n\n\t\t\n\t\tresponse = self.repo.api.http_request('COPY', self.uri, data=None, headers={'Destination':destination_uri.toPython()})\n\n\t\t\n\t\tif response.status_code == 201:\n\t\t\treturn destination_uri\n\t\telse:\n\t\t\traise Exception('HTTP %s, could not move resource %s to %s' % (response.status_code, self.uri, destination_uri))", "docstring": "Method to copy resource to another location\n\nArgs:\ndestination (rdflib.term.URIRef, str): URI location to move resource\n\nReturns:\n(Resource) new, moved instance of resource", "source": "juraj-google-style"}
{"code": "def micros_to_timestamp(micros, timestamp):\n  \n  seconds = long(micros / _MICROS_PER_SECOND)\n  micro_remainder = micros % _MICROS_PER_SECOND\n  timestamp.seconds = seconds\n  timestamp.nanos = micro_remainder * _NANOS_PER_MICRO", "docstring": "Convert microseconds from utc epoch to google.protobuf.timestamp.\n\nArgs:\nmicros: a long, number of microseconds since utc epoch.\ntimestamp: a google.protobuf.timestamp.Timestamp to populate.", "source": "juraj-google-style"}
{"code": "def warp(self, to_sref, dest=None, interpolation=gdalconst.GRA_NearestNeighbour):\n        \n        if not hasattr(to_sref, 'ExportToWkt'):\n            to_sref = SpatialReference(to_sref)\n        dest_wkt = to_sref.ExportToWkt()\n        dtype = self[0].DataType\n        err_thresh = 0.125\n        \n        \n        vrt = gdal.AutoCreateWarpedVRT(self.ds, None, dest_wkt,\n                                       interpolation, err_thresh)\n        if vrt is None:\n            raise ValueError('Could not warp %s to %s' % (self, dest_wkt))\n        warpsize = (vrt.RasterXSize, vrt.RasterYSize, len(self))\n        warptrans = vrt.GetGeoTransform()\n        vrt = None\n        if dest is None:\n            imgio = MemFileIO()\n            rwarp = self.driver.raster(imgio, warpsize, dtype)\n            imgio.close()\n        else:\n            rwarp = self.driver.raster(dest, warpsize, dtype)\n        rwarp.SetGeoTransform(warptrans)\n        rwarp.SetProjection(to_sref)\n        if self.nodata is not None:\n            for band in rwarp:\n                band.SetNoDataValue(self.nodata)\n                band = None\n        \n        gdal.ReprojectImage(self.ds, rwarp.ds, None, None, interpolation)\n        return rwarp", "docstring": "Returns a new reprojected instance.\n\nArguments:\nto_sref -- spatial reference as a proj4 or wkt string, or a\nSpatialReference\nKeyword args:\ndest -- filepath as str\ninterpolation -- GDAL interpolation type", "source": "juraj-google-style"}
{"code": "def register(self, table):\n        \n        if table.table_type.is_system:\n            raise ValueError('Cannot add system table to catalog')\n        if not table.table_type.is_shared:\n            raise ValueError('Cannot add local table to catalog')\n        if table.is_substitute:\n            raise ValueError('Cannot add substitute table to catalog')\n\n        versions = self.__tables.get(table.name)\n        if versions is None:\n            versions = {}\n            self.__tables[table.name] = versions\n        versions[table.version] = table", "docstring": "Adds a shared table to the catalog.\n\nArgs:\ntable (SymbolTable): A non-system, shared symbol table.", "source": "juraj-google-style"}
{"code": "def compose_q_update_vec(self, q_update_vec: torch.Tensor, normalize_quats: bool=True) -> Rotation:\n    quats = self.get_quats()\n    new_quats = quats + quat_multiply_by_vec(quats, q_update_vec)\n    return Rotation(rot_mats=None, quats=new_quats, normalize_quats=normalize_quats)", "docstring": "Returns a new quaternion Rotation after updating the current object's underlying rotation with a quaternion\nupdate, formatted as a [*, 3] tensor whose final three columns represent x, y, z such that (1, x, y, z) is the\ndesired (not necessarily unit) quaternion update.\n\nArgs:\nq_update_vec:\nA [*, 3] quaternion update tensor\nnormalize_quats:\nWhether to normalize the output quaternion\nReturns:\nAn updated Rotation", "source": "github-repos"}
{"code": "def getToC(doc, simple=True):\n\n    def recurse(olItem, liste, lvl):\n        'Recursively follow the outline item chain and record item information in a list.'\n        while olItem:\n            if olItem.title:\n                title = olItem.title\n            else:\n                title = ' '\n            if (not olItem.isExternal):\n                if olItem.uri:\n                    page = (olItem.page + 1)\n                else:\n                    page = (- 1)\n            else:\n                page = (- 1)\n            if (not simple):\n                link = getLinkDict(olItem)\n                liste.append([lvl, title, page, link])\n            else:\n                liste.append([lvl, title, page])\n            if olItem.down:\n                liste = recurse(olItem.down, liste, (lvl + 1))\n            olItem = olItem.next\n        return liste\n    if doc.isClosed:\n        raise ValueError('illegal operation on closed document')\n    olItem = doc.outline\n    if (not olItem):\n        return []\n    lvl = 1\n    liste = []\n    return recurse(olItem, liste, lvl)", "docstring": "Create a table of contents.\n\nArgs:\nsimple: a bool to control output. Returns a list, where each entry consists of outline level, title, page number and link destination (if simple = False). For details see PyMuPDF's documentation.", "source": "codesearchnet"}
{"code": "def __init__(self, name, data=None, package_cls=None):\n        \n        super(PackageMaker, self).__init__(data)\n        self.name = name\n        self.package_cls = package_cls or Package\n\n        \n        self.installed_variants = []\n        self.skipped_variants = []", "docstring": "Create a package maker.\n\nArgs:\nname (str): Package name.", "source": "juraj-google-style"}
{"code": "def copy_sharding(from_tensor, to_tensor, use_sharding_op=False):\n    sharding = get_tensor_sharding(from_tensor)\n    if sharding is None:\n        return to_tensor\n    if isinstance(to_tensor, resource_variable_ops.BaseResourceVariable) and context.xla_sharding_for_resource_variables_enabled():\n        proto = xla_data_pb2.OpSharding()\n        proto.ParseFromString(sharding)\n        to_tensor._set_xla_sharding(proto)\n        return to_tensor\n    if use_sharding_op:\n        to_tensor = tf2xla.sharding(to_tensor, sharding=sharding)\n    attr_value = attr_value_pb2.AttrValue(s=sharding)\n    to_tensor.op._set_attr('_XlaSharding', attr_value)\n    return to_tensor", "docstring": "Copies the a tensor's sharding to another.\n\nArgs:\nfrom_tensor: Source tensor. Must be the sole output of an op.\nto_tensor: the tensor the annotate with the copy.\nuse_sharding_op: whether to create a sharding op on `to_tensor`.\n\nReturns:\nA tensor with sharding annotation copied from `from_tensor`.", "source": "github-repos"}
{"code": "def __init__(self, items: Optional[Iterable[Any]]=None, *, value_spec: Optional[pg_typing.List]=None, onchange_callback: Optional[Callable[[Dict[utils.KeyPath, base.FieldUpdate]], None]]=None, allow_partial: bool=False, accessor_writable: bool=True, sealed: bool=False, root_path: Optional[utils.KeyPath]=None):\n    if value_spec and (not isinstance(value_spec, pg_typing.List)):\n        raise TypeError(f\"Argument 'value_spec' must be a `pg.typing.List` object. Encountered {value_spec}.\")\n    base.Symbolic.__init__(self, allow_partial=allow_partial, accessor_writable=accessor_writable, sealed=False, root_path=root_path)\n    self._value_spec = None\n    self._onchange_callback = None\n    list.__init__(self)\n    if items:\n        if isinstance(items, List):\n            items = items.sym_values()\n        for item in items:\n            self._set_item_without_permission_check(len(self), item)\n    if value_spec:\n        self.use_value_spec(value_spec, allow_partial)\n    self._onchange_callback = onchange_callback\n    self.seal(sealed)", "docstring": "Constructor.\n\nArgs:\nitems: A optional iterable object as initial value for this list.\nvalue_spec: Value spec that applies to this List.\nonchange_callback: Callback when sub-tree has been modified.\nallow_partial: Whether to allow unbound or partial fields. This takes\neffect only when value_spec is not None.\naccessor_writable: Whether to allow modification of this List using\naccessors (operator[]).\nsealed: Whether to seal this List after creation.\nroot_path: KeyPath of this List in its object tree.", "source": "github-repos"}
{"code": "def sym_distance(cls, q0, q1):\n    q = Quaternion.sym_log_map(q0, q1)\n    return q.norm", "docstring": "Quaternion symmetrized distance.\n\nFind the intrinsic symmetrized geodesic distance between q0 and q1.\n\nParams:\nq0: the first quaternion\nq1: the second quaternion\n\nReturns:\nA positive amount corresponding to the length of the symmetrized\ngeodesic curve connecting q0 to q1.\n\nNote:\nThis formulation is more numerically stable when performing\niterative gradient descent on the Riemannian quaternion manifold.\nHowever, the distance between q and -q is equal to pi, rendering this\nformulation not useful for measuring rotation similarities when the\nsamples are spread over a \"solid\" angle of more than pi/2 radians\n(the spread refers to quaternions as point samples on the unit hypersphere).", "source": "codesearchnet"}
{"code": "def __init__(self, batch_size, key_depth, val_depth, memory_size,\n               sharpen_factor=1., name=\"neural_memory\"):\n    \n    self.name = name\n    self.batch_size = batch_size\n    self.key_depth = key_depth\n    self.val_depth = val_depth\n    self.memory_size = memory_size\n    self.sharpen_factor = sharpen_factor\n    with tf.variable_scope(name):\n      self.segment_number = tf.get_variable(\n          \"segment_number\", [self.batch_size],\n          dtype=tf.int32, trainable=False,\n          initializer=tf.constant_initializer(100000))\n      self.mem_vals = tf.get_variable(\n          \"memvals\", [self.batch_size, self.memory_size, self.val_depth],\n          dtype=tf.float32, trainable=False,\n          initializer=tf.constant_initializer(.0))\n      self.mean_logits = tf.get_variable(\n          \"meanlogits\", [self.batch_size, self.memory_size],\n          dtype=tf.float32, trainable=False,\n          initializer=tf.constant_initializer(.0))", "docstring": "Initialize the memory object.\n\nArgs:\nbatch_size: the batch size.\nkey_depth: the depth of the memory keys.\nval_depth: the depth of the memory values.\nmemory_size: the number of items in the memory.\nsharpen_factor: the sharpen_factor for addressing the memory.\nname: the optional variable scope.", "source": "juraj-google-style"}
{"code": "def _get_token(request=None, allowed_auth_schemes=('OAuth', 'Bearer'), allowed_query_keys=('bearer_token', 'access_token')):\n    allowed_auth_schemes = _listlike_guard(allowed_auth_schemes, 'allowed_auth_schemes', iterable_only=True)\n    auth_header = os.environ.get('HTTP_AUTHORIZATION')\n    if auth_header:\n        for auth_scheme in allowed_auth_schemes:\n            if auth_header.startswith(auth_scheme):\n                return auth_header[(len(auth_scheme) + 1):]\n        return None\n    if request:\n        allowed_query_keys = _listlike_guard(allowed_query_keys, 'allowed_query_keys', iterable_only=True)\n        for key in allowed_query_keys:\n            (token, _) = request.get_unrecognized_field_info(key)\n            if token:\n                return token", "docstring": "Get the auth token for this request.\n\nAuth token may be specified in either the Authorization header or\nas a query param (either access_token or bearer_token).  We'll check in\nthis order:\n1. Authorization header.\n2. bearer_token query param.\n3. access_token query param.\n\nArgs:\nrequest: The current request, or None.\n\nReturns:\nThe token in the request or None.", "source": "codesearchnet"}
{"code": "def parse_cscore(infile):\n    cscore_dict = {}\n    with open(infile, 'r') as f:\n        for ll in f.readlines():\n            if ll.lower().startswith('model1'):\n                l = ll.split()\n                cscore = l[1]\n                tmscore_full = l[2].split('+-')\n                tmscore = tmscore_full[0]\n                tmscore_err = tmscore_full[1]\n                rmsd_full = l[3].split('+-')\n                rmsd = rmsd_full[0]\n                rmsd_err = rmsd_full[1]\n                cscore_dict['c_score'] = float(cscore)\n                cscore_dict['tm_score'] = float(tmscore)\n                cscore_dict['tm_score_err'] = float(tmscore_err)\n                cscore_dict['rmsd'] = float(rmsd)\n                cscore_dict['rmsd_err'] = float(rmsd_err)\n    return cscore_dict", "docstring": "Parse the cscore file to return a dictionary of scores.\n\nArgs:\ninfile (str): Path to cscore\n\nReturns:\ndict: Dictionary of scores", "source": "codesearchnet"}
{"code": "def end_of(self, event_id, import_options=True):\n    event_id = str(event_id)\n    if (event_id in DatePickerDictionary.items):\n        linked_picker = DatePickerDictionary.items[event_id]\n        self.config['linked_to'] = linked_picker.config['id']\n        if import_options:\n            backup_moment_format = self.config['options']['format']\n            self.config['options'].update(linked_picker.config['options'])\n            self.config['options'].update(self.options_param)\n            if (self.format_param or ('format' in self.options_param)):\n                self.config['options']['format'] = backup_moment_format\n            else:\n                self.format = linked_picker.format\n        self.config['options']['useCurrent'] = False\n        self._link_to(linked_picker)\n    else:\n        raise KeyError(('start-date not specified for event_id \"%s\"' % event_id))\n    return self", "docstring": "Set Date-Picker as the end-date of a date-range.\n\nArgs:\n- event_id (string): User-defined unique id for linking two fields\n- import_options (bool): inherit options from start-date input,\ndefault: TRUE", "source": "codesearchnet"}
{"code": "def compile_action_preconditions(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> List[TensorFluent]:\n    scope = self.action_precondition_scope(state, action)\n    preconds = []\n    with self.graph.as_default():\n        with tf.name_scope('action_preconditions'):\n            for p in self.rddl.domain.preconds:\n                fluent = self._compile_expression(p, scope)\n                preconds.append(fluent)\n            return preconds", "docstring": "Compiles the action preconditions given current `state` and `action` fluents.\n\nArgs:\nstate (Sequence[tf.Tensor]): The current state fluents.\naction (Sequence[tf.Tensor]): The action fluents.\n\nReturns:\nA list of :obj:`rddl2tf.fluent.TensorFluent`.", "source": "codesearchnet"}
{"code": "def create_event_model(event):\n    \n    if event['type'].startswith('task'):\n        factory = {\n            JobEventName.Started: JobStartedEvent,\n            JobEventName.Succeeded: JobSucceededEvent,\n            JobEventName.Stopped: JobStoppedEvent,\n            JobEventName.Aborted: JobAbortedEvent\n        }\n        if event['type'] in factory:\n            return factory[event['type']].from_event(event)\n        else:\n            raise JobEventTypeUnsupported(\n                'Unsupported event type {}'.format(event['type']))\n    elif event['type'].startswith('worker'):\n        raise WorkerEventTypeUnsupported(\n            'Unsupported event type {}'.format(event['type']))\n    else:\n        raise EventTypeUnknown('Unknown event type {}'.format(event['type']))", "docstring": "Factory function that turns a celery event into an event object.\n\nArgs:\nevent (dict): A dictionary that represents a celery event.\n\nReturns:\nobject: An event object representing the received event.\n\nRaises:\nJobEventTypeUnsupported: If an unsupported celery job event was received.\nWorkerEventTypeUnsupported: If an unsupported celery worker event was received.\nEventTypeUnknown: If an unknown event type (neither job nor worker) was received.", "source": "juraj-google-style"}
{"code": "def yaw_pitch_roll(self):\n    self._normalise()\n    yaw = np.arctan2((2 * ((self.q[0] * self.q[3]) - (self.q[1] * self.q[2]))), (1 - (2 * ((self.q[2] ** 2) + (self.q[3] ** 2)))))\n    pitch = np.arcsin((2 * ((self.q[0] * self.q[2]) + (self.q[3] * self.q[1]))))\n    roll = np.arctan2((2 * ((self.q[0] * self.q[1]) - (self.q[2] * self.q[3]))), (1 - (2 * ((self.q[1] ** 2) + (self.q[2] ** 2)))))\n    return (yaw, pitch, roll)", "docstring": "Get the equivalent yaw-pitch-roll angles aka. intrinsic Tait-Bryan angles following the z-y'-x'' convention\n\nReturns:\nyaw:    rotation angle around the z-axis in radians, in the range `[-pi, pi]`\npitch:  rotation angle around the y'-axis in radians, in the range `[-pi/2, -pi/2]`\nroll:   rotation angle around the x''-axis in radians, in the range `[-pi, pi]`\n\nThe resulting rotation_matrix would be R = R_x(roll) R_y(pitch) R_z(yaw)\n\nNote:\nThis feature only makes sense when referring to a unit quaternion. Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.", "source": "codesearchnet"}
{"code": "def initial_state(self, batch_size, trainable=False):\n    init_state = tf.eye(self._mem_slots, batch_shape=[batch_size])\n    if (self._mem_size > self._mem_slots):\n        difference = (self._mem_size - self._mem_slots)\n        pad = tf.zeros((batch_size, self._mem_slots, difference))\n        init_state = tf.concat([init_state, pad], (- 1))\n    elif (self._mem_size < self._mem_slots):\n        init_state = init_state[(:, :, :self._mem_size)]\n    return init_state", "docstring": "Creates the initial memory.\n\nWe should ensure each row of the memory is initialized to be unique,\nso initialize the matrix to be the identity. We then pad or truncate\nas necessary so that init_state is of size\n(batch_size, self._mem_slots, self._mem_size).\n\nArgs:\nbatch_size: The size of the batch.\ntrainable: Whether the initial state is trainable. This is always True.\n\nReturns:\ninit_state: A truncated or padded matrix of size\n(batch_size, self._mem_slots, self._mem_size).", "source": "codesearchnet"}
{"code": "def Collect(self, top_frame):\n    \n    \n    frame = top_frame\n    top_line = self.breakpoint['location']['line']\n    breakpoint_frames = self.breakpoint['stackFrames']\n    try:\n      \n      if 'expressions' in self.breakpoint:\n        self.breakpoint['evaluatedExpressions'] = [\n            self._CaptureExpression(top_frame, expression) for expression\n            in self.breakpoint['expressions']]\n\n      while frame and (len(breakpoint_frames) < self.max_frames):\n        line = top_line if frame == top_frame else frame.f_lineno\n        code = frame.f_code\n        if len(breakpoint_frames) < self.max_expand_frames:\n          frame_arguments, frame_locals = self.CaptureFrameLocals(frame)\n        else:\n          frame_arguments = []\n          frame_locals = []\n\n        breakpoint_frames.append({\n            'function': _GetFrameCodeObjectName(frame),\n            'location': {\n                'path': NormalizePath(code.co_filename),\n                'line': line\n            },\n            'arguments': frame_arguments,\n            'locals': frame_locals\n        })\n        frame = frame.f_back\n\n    except BaseException as e:  \n      \n      \n      self.breakpoint['status'] = {\n          'isError': True,\n          'description': {\n              'format': ('INTERNAL ERROR: Failed while capturing locals '\n                         'of frame $0: $1'),\n              'parameters': [str(len(breakpoint_frames)), str(e)]}}\n\n    \n    \n    num_vars = 1\n\n    \n    \n    while (num_vars < len(self._var_table)) and (\n        self._total_size < self.max_size):\n      self._var_table[num_vars] = self.CaptureVariable(\n          self._var_table[num_vars], 0, self.default_capture_limits,\n          can_enqueue=False)\n\n      \n      num_vars += 1\n\n    \n    \n    self.TrimVariableTable(num_vars)\n\n    self._CaptureEnvironmentLabels()\n    self._CaptureRequestLogId()\n    self._CaptureUserId()", "docstring": "Collects call stack, local variables and objects.\n\nStarts collection from the specified frame. We don't start from the top\nframe to exclude the frames due to debugger. Updates the content of\nself.breakpoint.\n\nArgs:\ntop_frame: top frame to start data collection.", "source": "juraj-google-style"}
{"code": "def make_call_types(f, globals_d):\n    \n    \n    arg_spec = getargspec(f)\n    args = [k for k in arg_spec.args if k != \"self\"]\n\n    defaults = {}  \n    if arg_spec.defaults:\n        default_args = args[-len(arg_spec.defaults):]\n        for a, default in zip(default_args, arg_spec.defaults):\n            defaults[a] = default\n\n    if not getattr(f, \"__annotations__\", None):\n        \n        annotations = make_annotations(f, globals_d)\n    else:\n        annotations = f.__annotations__\n\n    call_types = OrderedDict()  \n    for a in args:\n        anno = anno_with_default(annotations[a], defaults.get(a, NO_DEFAULT))\n        assert isinstance(anno, Anno), \\\n            \"Argument %r has type %r which is not an Anno\" % (a, anno)\n        call_types[a] = anno\n\n    return_type = anno_with_default(annotations.get(\"return\", None))\n    if return_type is Any:\n        return_type = Anno(\"Any return value\", Any, \"return\")\n    assert return_type is None or isinstance(return_type, Anno), \\\n        \"Return has type %r which is not an Anno\" % (return_type,)\n\n    return call_types, return_type", "docstring": "Make a call_types dictionary that describes what arguments to pass to f\n\nArgs:\nf: The function to inspect for argument names (without self)\nglobals_d: A dictionary of globals to lookup annotation definitions in", "source": "juraj-google-style"}
{"code": "def start_listener_thread(self, timeout_ms=30000, exception_handler=None):\n        \n        try:\n            thread = Thread(target=self.listen_forever,\n                            args=(timeout_ms, exception_handler))\n            thread.daemon = True\n            self.sync_thread = thread\n            self.should_listen = True\n            thread.start()\n        except RuntimeError:\n            e = sys.exc_info()[0]\n            logger.error(\"Error: unable to start thread. %s\", str(e))", "docstring": "Start a listener thread to listen for events in the background.\n\nArgs:\ntimeout (int): How long to poll the Home Server for before\nretrying.\nexception_handler (func(exception)): Optional exception handler\nfunction which can be used to handle exceptions in the caller\nthread.", "source": "juraj-google-style"}
{"code": "def __write_to_fil_light(self, filename_out, *args, **kwargs):\n        \n\n        n_bytes  = self.header[b'nbits'] / 8\n        with open(filename_out, \"wb\") as fileh:\n            fileh.write(generate_sigproc_header(self)) \n            j = self.data\n            if n_bytes == 4:\n                np.float32(j.ravel()).tofile(fileh)\n            elif n_bytes == 2:\n                np.int16(j.ravel()).tofile(fileh)\n            elif n_bytes == 1:\n                np.int8(j.ravel()).tofile(fileh)", "docstring": "Write data to .fil file.\n\nArgs:\nfilename_out (str): Name of output file", "source": "juraj-google-style"}
{"code": "def _get_snippet_ctime(self, snip_name):\n        \n        if snip_name not in self.snip_ctimes:\n            snippet = yaml_snippet_loader.YamlSnippetLoader.get_snippet_by_name(snip_name)\n            self.snip_ctimes[snip_name] = os.path.getctime(snippet.path)\n        return self.snip_ctimes[snip_name]", "docstring": "Returns and remembers (during this DevAssistant invocation) last ctime of given\nsnippet.\n\nCalling ctime costs lost of time and some snippets, like common_args, are used widely,\nso we don't want to call ctime bazillion times on them during one invocation.\n\nArgs:\nsnip_name: name of snippet to get ctime for\nReturns:\nctime of the snippet", "source": "juraj-google-style"}
{"code": "def _GetWinevtRcDatabaseReader(self):\n    if ((not self._winevt_database_reader) and self._data_location):\n        database_path = os.path.join(self._data_location, self._WINEVT_RC_DATABASE)\n        if (not os.path.isfile(database_path)):\n            return None\n        self._winevt_database_reader = winevt_rc.WinevtResourcesSqlite3DatabaseReader()\n        if (not self._winevt_database_reader.Open(database_path)):\n            self._winevt_database_reader = None\n    return self._winevt_database_reader", "docstring": "Opens the Windows Event Log resource database reader.\n\nReturns:\nWinevtResourcesSqlite3DatabaseReader: Windows Event Log resource\ndatabase reader or None.", "source": "codesearchnet"}
{"code": "class PoolerAnswerClass(nn.Module):\n\n    def __init__(self, config):\n        super().__init__()\n        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)\n        self.activation = nn.Tanh()\n        self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)\n        logger.warning_once('[DEPRECATION WARNING] `PoolerAnswerClass` is deprecated and will be removed in v4.53. Please use model-specific class, e.g. `XLMPoolerAnswerClass`.')\n\n    def forward(self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, cls_index: Optional[torch.LongTensor]=None) -> torch.FloatTensor:\n        \n        hsz = hidden_states.shape[-1]\n        assert start_states is not None or start_positions is not None, 'One of start_states, start_positions should be not None'\n        if start_positions is not None:\n            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)\n            start_states = hidden_states.gather(-2, start_positions).squeeze(-2)\n        if cls_index is not None:\n            cls_index = cls_index[:, None, None].expand(-1, -1, hsz)\n            cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2)\n        else:\n            cls_token_state = hidden_states[:, -1, :]\n        x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))\n        x = self.activation(x)\n        x = self.dense_1(x).squeeze(-1)\n        return x", "docstring": "Compute SQuAD 2.0 answer class from classification and start tokens hidden states.\n\nArgs:\nconfig ([`PretrainedConfig`]):\nThe config used by the model, will be used to grab the `hidden_size` of the model.", "source": "github-repos"}
{"code": "def __call__(self, func: T) -> T:\n    api_names_attr = API_ATTRS[self._api_name].names\n    api_names_attr_v1 = API_ATTRS_V1[self._api_name].names\n    _, undecorated_func = tf_decorator.unwrap(func)\n    self.set_attr(undecorated_func, api_names_attr, self._names)\n    self.set_attr(undecorated_func, api_names_attr_v1, self._names_v1)\n    for name in self._names:\n        _NAME_TO_SYMBOL_MAPPING[name] = func\n    for name_v1 in self._names_v1:\n        _NAME_TO_SYMBOL_MAPPING['compat.v1.%s' % name_v1] = func\n    return func", "docstring": "Calls this decorator.\n\nArgs:\nfunc: decorated symbol (function or class).\n\nReturns:\nThe input function with _tf_api_names attribute set.", "source": "github-repos"}
{"code": "def create_pull_response(responses):\n    from google.cloud import pubsub\n    from google.protobuf import timestamp_pb2\n    res = pubsub.types.PullResponse()\n    for response in responses:\n        received_message = pubsub.types.ReceivedMessage()\n        message = received_message.message\n        message.data = response.data\n        if response.attributes is not None:\n            for k, v in response.attributes.items():\n                message.attributes[k] = v\n        publish_time = timestamp_pb2.Timestamp()\n        if response.publish_time_secs is not None:\n            publish_time.seconds = response.publish_time_secs\n        if response.publish_time_nanos is not None:\n            publish_time.nanos = response.publish_time_nanos\n        message.publish_time = publish_time\n        if response.ack_id is not None:\n            received_message.ack_id = response.ack_id\n        res.received_messages.append(received_message)\n    return res", "docstring": "Create an instance of ``google.cloud.pubsub.types.ReceivedMessage``.\n\nUsed to simulate the response from pubsub.SubscriberClient().pull().\n\nArgs:\nresponses: list of ``PullResponseMessage``\n\nReturns:\nAn instance of ``google.cloud.pubsub.types.PullResponse`` populated with\nresponses.", "source": "github-repos"}
{"code": "def get_reference(root):\n    reference = {}\n    elem = root.find('bibliographyLink')\n    if (elem is None):\n        raise MissingElementError('bibliographyLink')\n    ref_doi = elem.get('doi', None)\n    ref_key = elem.get('preferredKey', None)\n    if (ref_doi is not None):\n        try:\n            ref = crossref_api.works(ids=ref_doi)['message']\n        except (HTTPError, habanero.RequestError, ConnectionError):\n            if (ref_key is None):\n                raise KeywordError('DOI not found and preferredKey attribute not set')\n            else:\n                warn('Missing doi attribute in bibliographyLink or lookup failed. Setting \"detail\" key as a fallback; please update to the appropriate fields.')\n                reference['detail'] = ref_key\n                if (reference['detail'][(- 1)] != '.'):\n                    reference['detail'] += '.'\n        else:\n            if (ref_key is not None):\n                warn('Using DOI to obtain reference information, rather than preferredKey.')\n            reference['doi'] = elem.attrib['doi']\n            reference['journal'] = ref.get('container-title')[0]\n            ref_year = (ref.get('published-print') or ref.get('published-online'))\n            reference['year'] = int(ref_year['date-parts'][0][0])\n            reference['volume'] = int(ref.get('volume'))\n            reference['pages'] = ref.get('page')\n            reference['authors'] = []\n            for author in ref['author']:\n                auth = {}\n                auth['name'] = ' '.join([author['given'], author['family']])\n                orcid = author.get('ORCID')\n                if orcid:\n                    auth['ORCID'] = orcid.lstrip('http:\n                reference['authors'].append(auth)\n    elif (ref_key is not None):\n        warn('Missing doi attribute in bibliographyLink. Setting \"detail\" key as a fallback; please update to the appropriate fields.')\n        reference['detail'] = ref_key\n        if (reference['detail'][(- 1)] != '.'):\n            reference['detail'] += '.'\n    else:\n        raise MissingAttributeError('preferredKey', 'bibliographyLink')\n    return reference", "docstring": "Read reference info from root of ReSpecTh XML file.\n\nArgs:\nroot (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file\n\nReturns:\nproperties (`dict`): Dictionary with reference information", "source": "codesearchnet"}
{"code": "def path_fraction_point(points, fraction):\n    \n    seg_id, offset = path_fraction_id_offset(points, fraction, relative_offset=True)\n    return linear_interpolate(points[seg_id], points[seg_id + 1], offset)", "docstring": "Computes the point which corresponds to the fraction\nof the path length along the piecewise linear curve which\nis constructed from the set of points.\n\nArgs:\npoints: an iterable of indexable objects with indices\n0, 1, 2 correspoding to 3D cartesian coordinates\nfraction: path length fraction (0 <= fraction <= 1)\n\nReturns:\nThe 3D coordinates of the aforementioned point", "source": "juraj-google-style"}
{"code": "def tail(self, n):\n        \n        \n        if n < 0:\n            n = max(0, len(self.index) + n)\n        if self._is_transposed:\n            result = self.__constructor__(\n                self.data.transpose().take(1, -n).transpose(),\n                self.index[-n:],\n                self.columns,\n                self._dtype_cache,\n            )\n            result._is_transposed = True\n        else:\n            result = self.__constructor__(\n                self.data.take(0, -n), self.index[-n:], self.columns, self._dtype_cache\n            )\n        return result", "docstring": "Returns the last n rows.\n\nArgs:\nn: Integer containing the number of rows to return.\n\nReturns:\nDataManager containing the last n rows of the original DataManager.", "source": "juraj-google-style"}
{"code": "def _GetNumberOfDaysInCentury(self, year):\n    if (year < 0):\n        raise ValueError('Year value out of bounds.')\n    (year, _) = divmod(year, 100)\n    if self._IsLeapYear(year):\n        return 36525\n    return 36524", "docstring": "Retrieves the number of days in a century.\n\nArgs:\nyear (int): year in the century e.g. 1970.\n\nReturns:\nint: number of (remaining) days in the century.\n\nRaises:\nValueError: if the year value is out of bounds.", "source": "codesearchnet"}
{"code": "def GetDataDownloader(self, version=sorted(_SERVICE_MAP.keys())[(- 1)], server=None):\n    if (not server):\n        server = DEFAULT_ENDPOINT\n    return DataDownloader(self, version, server)", "docstring": "Creates a downloader for Ad Manager reports and PQL result sets.\n\nThis is a convenience method. It is functionally identical to calling\nDataDownloader(ad_manager_client, version, server)\n\nArgs:\n[optional]\nversion: A string identifying the Ad Manager version to connect to.\nThis defaults to what is currently the latest version. This will be\nupdated in future releases to point to what is then the\nlatest version.\nserver: A string identifying the webserver hosting the Ad Manager API.\n\nReturns:\nA DataDownloader tied to this AdManagerClient, ready to download reports.", "source": "codesearchnet"}
{"code": "def getmtime(self, path):\n    try:\n        file_obj = self.filesystem.resolve(path)\n        return file_obj.st_mtime\n    except IOError:\n        self.filesystem.raise_os_error(errno.ENOENT, winerror=3)", "docstring": "Returns the modification time of the fake file.\n\nArgs:\npath: the path to fake file.\n\nReturns:\n(int, float) the modification time of the fake file\nin number of seconds since the epoch.\n\nRaises:\nOSError: if the file does not exist.", "source": "codesearchnet"}
{"code": "def parse_peddy_ped_check(lines):\n    \n    ped_check = []\n    header = []\n    for i,line in enumerate(lines):\n        line = line.rstrip()\n        if i == 0:\n            \n            header = line.lstrip('\n        else:\n            pair_info = dict(zip(header, line.split(',')))\n            \n            \n            pair_info['hets_a'] = convert_number(pair_info['hets_a'])\n            \n            \n            pair_info['hets_b'] = convert_number(pair_info['hets_b'])\n            \n            \n            \n            pair_info['ibs0'] = convert_number(pair_info['ibs0'])\n            \n            \n            \n            pair_info['ibs2'] = convert_number(pair_info['ibs2'])\n\n            \n            pair_info['n'] = convert_number(pair_info['n'])\n            \n            \n            pair_info['rel'] = convert_number(pair_info['rel'])\n\n            \n            pair_info['pedigree_relatedness'] = convert_number(pair_info['pedigree_relatedness'])\n            \n            \n            pair_info['rel_difference'] = convert_number(pair_info['rel_difference'])\n\n            \n            pair_info['shared_hets'] = convert_number(pair_info['shared_hets'])\n\n            \n            \n            pair_info['pedigree_parents'] = make_bool(pair_info.get('pedigree_parents'))\n            \n            \n            \n            pair_info['predicted_parents'] = make_bool(pair_info.get('predicted_parents'))\n\n            \n            pair_info['parent_error'] = make_bool(pair_info.get('parent_error'))\n\n            \n            pair_info['sample_duplication_error'] = make_bool(pair_info.get('sample_duplication_error'))\n            \n            \n            ped_check.append(pair_info)\n\n    return ped_check", "docstring": "Parse a .ped_check.csv file\n\nArgs:\nlines(iterable(str))\n\nReturns:\nped_check(list(dict))", "source": "juraj-google-style"}
{"code": "def _decorate_run_options_for_profile(self, run_options):\n    run_options.trace_level = config_pb2.RunOptions.FULL_TRACE", "docstring": "Modify a RunOptions object for profiling TensorFlow graph execution.\n\nArgs:\nrun_options: (RunOptions) the modified RunOptions object.", "source": "github-repos"}
{"code": "def page(self, title=None, pageid=None, auto_suggest=True, redirect=True, preload=False):\n    if (((title is None) or (title.strip() == '')) and (pageid is None)):\n        raise ValueError('Either a title or a pageid must be specified')\n    elif title:\n        if auto_suggest:\n            temp_title = self.suggest(title)\n            if (temp_title is None):\n                raise PageError(title=title)\n            else:\n                title = temp_title\n        return MediaWikiPage(self, title, redirect=redirect, preload=preload)\n    else:\n        return MediaWikiPage(self, pageid=pageid, preload=preload)", "docstring": "Get MediaWiki page based on the provided title or pageid\n\nArgs:\ntitle (str): Page title\npageid (int): MediaWiki page identifier\nauto-suggest (bool): **True:** Allow page title auto-suggest\nredirect (bool): **True:** Follow page redirects\npreload (bool): **True:** Load most page properties\nRaises:\nValueError: when title is blank or None and no pageid is \\\nprovided\nRaises:\n:py:func:`mediawiki.exceptions.PageError`: if page does \\\nnot exist\nNote:\nTitle takes precedence over pageid if both are provided", "source": "codesearchnet"}
{"code": "def _calculate_page_index(index, data):\n    if (index > data['total_results']):\n        raise ValueError('index not in paged data')\n    page_length = len(data['results'])\n    return (((index", "docstring": "Determine the location of a given index in paged data.\n\nArguments:\nindex (:py:class:`int`): The overall index.\ndata: (:py:class:`dict`) The first page of data.\n\nReturns:\n:py:class:`tuple`: The location of that index, in the format\n``(page, index_in_page)``.", "source": "codesearchnet"}
{"code": "def MemberVisible(component, name, member, class_attrs=None, verbose=False):\n    if isinstance(name, str) and name.startswith('__'):\n        return False\n    if verbose:\n        return True\n    if member is absolute_import or member is division or member is print_function:\n        return False\n    if isinstance(member, type(absolute_import)):\n        return False\n    modules_to_hide = []\n    if inspect.ismodule(member) and member in modules_to_hide:\n        return False\n    if inspect.isclass(component):\n        if class_attrs is None:\n            class_attrs = inspectutils.GetClassAttrsDict(component) or {}\n        class_attr = class_attrs.get(name)\n        if class_attr:\n            if class_attr.kind in ('method', 'property'):\n                return False\n            tuplegetter = getattr(collections, '_tuplegetter', type(None))\n            if isinstance(class_attr.object, tuplegetter):\n                return False\n    if isinstance(name, str):\n        return not name.startswith('_')\n    return True", "docstring": "Returns whether a member should be included in auto-completion or help.\n\nDetermines whether a member of an object with the specified name should be\nincluded in auto-completion or help text(both usage and detailed help).\n\nIf the member name starts with '__', it will always be excluded. If it\nstarts with only one '_', it will be included for all non-string types. If\nverbose is True, the members, including the private members, are included.\n\nWhen not in verbose mode, some modules and functions are excluded as well.\n\nArgs:\ncomponent: The component containing the member.\nname: The name of the member.\nmember: The member itself.\nclass_attrs: (optional) If component is a class, provide this as:\ninspectutils.GetClassAttrsDict(component). If not provided, it will be\ncomputed.\nverbose: Whether to include private members.\nReturns\nA boolean value indicating whether the member should be included.", "source": "github-repos"}
{"code": "def get_formal_type_parameter(self, t: str) -> 'BaseValue':\n    del t\n    return self.ctx.convert.unsolvable", "docstring": "Get the class's type for the type parameter.\n\nTreating self as a class_mixin.Class, gets its formal type for the given\ntype parameter. For the real implementation, see\nParameterizedClass.get_formal_type_parameter.\n\nArgs:\nt: The name of the type parameter.\n\nReturns:\nA formal type.", "source": "github-repos"}
{"code": "def _is_txn_to_replay(self, txn_id, possible_successor, already_seen):\n        \n\n        is_successor = self._is_predecessor_of_possible_successor(\n            txn_id,\n            possible_successor)\n        in_different_batch = not self._is_in_same_batch(txn_id,\n                                                        possible_successor)\n        has_not_been_seen = possible_successor not in already_seen\n\n        return is_successor and in_different_batch and has_not_been_seen", "docstring": "Decide if possible_successor should be replayed.\n\nArgs:\ntxn_id (str): Id of txn in failed batch.\npossible_successor (str): Id of txn to possibly replay.\nalready_seen (list): A list of possible_successors that have\nbeen replayed.\n\nReturns:\n(bool): If the possible_successor should be replayed.", "source": "juraj-google-style"}
{"code": "def _runExperimentImpl(options, model=None):\n  \n  json_helpers.validate(options.privateOptions,\n                        schemaDict=g_parsedPrivateCommandLineOptionsSchema)\n\n  \n  experimentDir = options.experimentDir\n  descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(\n      experimentDir)\n  expIface = helpers.getExperimentDescriptionInterfaceFromModule(\n      descriptionPyModule)\n\n  \n  if options.privateOptions['listAvailableCheckpoints']:\n    _printAvailableCheckpoints(experimentDir)\n    return None\n\n  \n  experimentTasks = expIface.getModelControl().get('tasks', [])\n\n  \n  \n  if (len(experimentTasks) == 0 and\n      expIface.getModelControl()['environment'] == OpfEnvironment.Nupic):\n    expIface.convertNupicEnvToOPF()\n    experimentTasks = expIface.getModelControl().get('tasks', [])\n\n  \n  \n  expIface.normalizeStreamSources()\n\n  \n  newSerialization = options.privateOptions['newSerialization']\n\n  \n  if options.privateOptions['listTasks']:\n    print \"Available tasks:\"\n\n    for label in [t['taskLabel'] for t in experimentTasks]:\n      print \"\\t\", label\n\n    return None\n\n  \n  if options.privateOptions['runCheckpointName']:\n\n    assert model is None\n\n    checkpointName = options.privateOptions['runCheckpointName']\n\n    model = ModelFactory.loadFromCheckpoint(\n          savedModelDir=_getModelCheckpointDir(experimentDir, checkpointName),\n          newSerialization=newSerialization)\n\n  elif model is not None:\n    print \"Skipping creation of OPFExperiment instance: caller provided his own\"\n  else:\n    modelDescription = expIface.getModelDescription()\n    model = ModelFactory.create(modelDescription)\n\n  \n  if options.privateOptions['createCheckpointName']:\n    checkpointName = options.privateOptions['createCheckpointName']\n    _saveModel(model=model,\n               experimentDir=experimentDir,\n               checkpointLabel=checkpointName,\n               newSerialization=newSerialization)\n\n    return model\n\n  \n\n  \n  taskIndexList = range(len(experimentTasks))\n\n  customTaskExecutionLabelsList = options.privateOptions['taskLabels']\n  if customTaskExecutionLabelsList:\n    taskLabelsList = [t['taskLabel'] for t in experimentTasks]\n    taskLabelsSet = set(taskLabelsList)\n\n    customTaskExecutionLabelsSet = set(customTaskExecutionLabelsList)\n\n    assert customTaskExecutionLabelsSet.issubset(taskLabelsSet), \\\n           (\"Some custom-provided task execution labels don't correspond \"\n            \"to actual task labels: mismatched labels: %r; actual task \"\n            \"labels: %r.\") % (customTaskExecutionLabelsSet - taskLabelsSet,\n                              customTaskExecutionLabelsList)\n\n    taskIndexList = [taskLabelsList.index(label) for label in\n                     customTaskExecutionLabelsList]\n\n    print \"\n                                                   i in taskIndexList]\n\n  \n  for taskIndex in taskIndexList:\n\n    task = experimentTasks[taskIndex]\n\n    \n    taskRunner = _TaskRunner(model=model,\n                             task=task,\n                             cmdOptions=options)\n    taskRunner.run()\n    del taskRunner\n\n    if options.privateOptions['checkpointModel']:\n      _saveModel(model=model,\n                 experimentDir=experimentDir,\n                 checkpointLabel=task['taskLabel'],\n                 newSerialization=newSerialization)\n\n  return model", "docstring": "Creates and runs the experiment\n\nArgs:\noptions: namedtuple ParseCommandLineOptionsResult\nmodel: For testing: may pass in an existing OPF Model instance\nto use instead of creating a new one.\n\nReturns: reference to OPFExperiment instance that was constructed (this\nis provided to aid with debugging) or None, if none was\ncreated.", "source": "juraj-google-style"}
{"code": "def exhaustive_curie_check(self, ontology: pd.DataFrame, curie_predicate: str, curie_prefix: str, diff: bool=True) -> Tuple[list]:\n    (inside, outside) = ([], [])\n    curie_prefix = curie_prefix.replace(':', '')\n    header = (['Index'] + list(ontology.columns))\n    for row in ontology.itertuples():\n        row = {header[i]: val for (i, val) in enumerate(row)}\n        entity_curie = row[curie_predicate]\n        if isinstance(entity_curie, list):\n            if (len(entity_curie) != 0):\n                exit('Need to have only 1 iri in the cell from the onotology.')\n            else:\n                entity_curie = entity_curie[0]\n        entity_curie = ((curie_prefix + ':') + self.extract_fragment(entity_curie))\n        ilx_row = self.curie2row.get(entity_curie)\n        if ilx_row:\n            inside.append({'external_ontology_row': row, 'ilx_rows': [ilx_row]})\n        else:\n            outside.append(row)\n    if diff:\n        diff = self.__exhaustive_diff(inside)\n        return (inside, outside, diff)\n    return (inside, outside)", "docstring": "All entities with conflicting curies gets a full diff to see if they belong\n\nArgs:\nontology: pandas DataFrame created from an ontology where the colnames are predicates\nand if classes exist it is also thrown into a the colnames.\ncurie_predicate: usually in qname form and is the colname of the DataFrame\ncurie_prefix: Not all cells in the DataFrame will have complete curies so we extract\nthe fragement from the cell and use the prefix to complete it.\ndiff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2\nReturns:\ninside: entities that are inside of InterLex\noutside: entities NOT in InterLex\ndiff (optional): List[List[dict]]... so complicated but usefull diff between matches only", "source": "codesearchnet"}
{"code": "def _controller_buffer(self, port):\n        \n        \n        address = _LIB.Controller(self._env, port)\n        \n        buffer_ = ctypes.cast(address, ctypes.POINTER(CONTROLLER_VECTOR)).contents\n        \n        return np.frombuffer(buffer_, dtype='uint8')", "docstring": "Find the pointer to a controller and setup a NumPy buffer.\n\nArgs:\nport: the port of the controller to setup\n\nReturns:\na NumPy buffer with the controller's binary data", "source": "juraj-google-style"}
{"code": "def _compile_arithmetic_expression(self, expr: Expression, scope: Dict[(str, TensorFluent)], batch_size: Optional[int]=None, noise: Optional[List[tf.Tensor]]=None) -> TensorFluent:\n    etype = expr.etype\n    args = expr.args\n    if (len(args) == 1):\n        etype2op = {'+': (lambda x: x), '-': (lambda x: (- x))}\n        if (etype[1] not in etype2op):\n            raise ValueError('Invalid binary arithmetic expression:\\n{}'.format(expr))\n        op = etype2op[etype[1]]\n        x = self._compile_expression(args[0], scope, batch_size, noise)\n        fluent = op(x)\n    else:\n        etype2op = {'+': (lambda x, y: (x + y)), '-': (lambda x, y: (x - y)), '*': (lambda x, y: (x * y)), '/': (lambda x, y: (x / y))}\n        if (etype[1] not in etype2op):\n            raise ValueError('Invalid binary arithmetic expression:\\n{}'.format(expr))\n        op = etype2op[etype[1]]\n        x = self._compile_expression(args[0], scope, batch_size, noise)\n        y = self._compile_expression(args[1], scope, batch_size, noise)\n        fluent = op(x, y)\n    return fluent", "docstring": "Compile an arithmetic expression `expr` into a TensorFluent\nin the given `scope` with optional batch size.\n\nArgs:\nexpr (:obj:`rddl2tf.expr.Expression`): A RDDL arithmetic expression.\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.\nbatch_size (Optional[size]): The batch size.\n\nReturns:\n:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.", "source": "codesearchnet"}
{"code": "def slice_hidden(self, x):\n    x_sliced = tf.reshape(x, shape=[(- 1), self.hparams.num_blocks, self.hparams.block_dim])\n    return x_sliced", "docstring": "Slice encoder hidden state into block_dim.\n\nArgs:\nx: Encoder hidden state of shape [-1, hidden_size].\n\nReturns:\nSliced states of shape [-1, num_blocks, block_dim].", "source": "codesearchnet"}
{"code": "def floor(x):\n    if any_symbolic_tensors((x,)):\n        return Floor().symbolic_call(x)\n    return backend.numpy.floor(x)", "docstring": "Return the floor of the input, element-wise.\n\nThe floor of the scalar `x` is the largest integer `i`, such that `i <= x`.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput tensor, element-wise floor of `x`.", "source": "github-repos"}
{"code": "def get(cls, issue_id):\n    res = Issue.get(issue_id, IssueType.get(cls.issue_type).issue_type_id)\n    return (cls(res) if res else None)", "docstring": "Returns the class object identified by `issue_id`\n\nArgs:\nissue_id (str): Unique EC2 Instance ID to load from database\n\nReturns:\nEC2 Instance object if found, else None", "source": "codesearchnet"}
{"code": "def _ParseCachedEntryVista(self, value_data, cached_entry_offset):\n    \n    try:\n      cached_entry = self._ReadStructureFromByteStream(\n          value_data[cached_entry_offset:], cached_entry_offset,\n          self._cached_entry_data_type_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError(\n          'Unable to parse cached entry value with error: {0!s}'.format(\n              exception))\n\n    path_size = cached_entry.path_size\n    maximum_path_size = cached_entry.maximum_path_size\n    path_offset = cached_entry.path_offset\n\n    if path_offset > 0 and path_size > 0:\n      path_size += path_offset\n      maximum_path_size += path_offset\n\n      try:\n        path = value_data[path_offset:path_size].decode('utf-16-le')\n      except UnicodeDecodeError:\n        raise errors.ParseError('Unable to decode cached entry path to string')\n\n    cached_entry_object = AppCompatCacheCachedEntry()\n    cached_entry_object.cached_entry_size = (\n        self._cached_entry_data_type_map.GetByteSize())\n    cached_entry_object.insertion_flags = cached_entry.insertion_flags\n    cached_entry_object.last_modification_time = (\n        cached_entry.last_modification_time)\n    cached_entry_object.path = path\n    cached_entry_object.shim_flags = cached_entry.shim_flags\n\n    return cached_entry_object", "docstring": "Parses a Windows Vista cached entry.\n\nArgs:\nvalue_data (bytes): value data.\ncached_entry_offset (int): offset of the first cached entry data\nrelative to the start of the value data.\n\nReturns:\nAppCompatCacheCachedEntry: cached entry.\n\nRaises:\nParseError: if the value data could not be parsed.", "source": "juraj-google-style"}
{"code": "def get(cls, session, team_id):\n        \n        return cls(\n            '/teams/%d.json' % team_id,\n            singleton=True,\n            session=session,\n        )", "docstring": "Return a specific team.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nteam_id (int): The ID of the team to get.\n\nReturns:\nhelpscout.models.Person: A person singleton representing the team,\nif existing. Otherwise ``None``.", "source": "juraj-google-style"}
{"code": "def List(self, request, global_params=None):\n    config = self.GetMethodConfig('List')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Lists all jobs that you started in the specified project. Job information is available for a six month period after creation. The job list is sorted in reverse chronological order, by job creation time. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.\n\nArgs:\nrequest: (BigqueryJobsListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(JobList) The response message.", "source": "github-repos"}
{"code": "def preprocess(source):\n    doc = html5lib.parseFragment(source)\n    source = ET.tostring(doc, encoding='utf-8', method='text').decode('utf-8')\n    source = source.replace(u'\\n', u'').strip()\n    source = re.sub('\\\\s\\\\s+', u' ', source)\n    return source", "docstring": "Removes unnecessary break lines and white spaces.\n\nArgs:\nsource (str): Input sentence.\n\nReturns:\nPreprocessed sentence. (str)", "source": "codesearchnet"}
{"code": "def _parse_flowcontrol_send(self, config):\n        \n        value = 'off'\n        match = re.search(r'flowcontrol send (\\w+)$', config, re.M)\n        if match:\n            value = match.group(1)\n        return dict(flowcontrol_send=value)", "docstring": "Scans the config block and returns the flowcontrol send value\n\nArgs:\nconfig (str): The interface config block to scan\n\nReturns:\ndict: Returns a dict object with the flowcontrol send value\nretrieved from the config block.  The returned dict object\nis intended to be merged into the interface resource dict", "source": "juraj-google-style"}
{"code": "def time_zones_for_number(numobj):\n    \n    ntype = number_type(numobj)\n    if ntype == PhoneNumberType.UNKNOWN:\n        return _UNKNOWN_TIME_ZONE_LIST\n    elif not is_number_type_geographical(ntype, numobj.country_code):\n        return _country_level_time_zones_for_number(numobj)\n    return time_zones_for_geographical_number(numobj)", "docstring": "As time_zones_for_geographical_number() but explicitly checks the\nvalidity of the number passed in.\n\nArguments:\nnumobj -- a valid phone number for which we want to get the time zones to which it belongs\nReturns a list of the corresponding time zones or a single element list with the default\nunknown time zone if no other time zone was found or if the number was invalid", "source": "juraj-google-style"}
{"code": "def build_chain(self, source, chain):\n    for group in WalkByGroup(source, (chain.order + 1)):\n        pre = group[:(- 1)]\n        res = group[(- 1)]\n        if (pre not in chain.content):\n            chain.content[pre] = {res: 1}\n        elif (res not in chain.content[pre]):\n            chain.content[pre][res] = 1\n        else:\n            chain.content[pre][res] += 1\n    chain.decache()", "docstring": "Build markov chain from source on top of existin chain\n\nArgs:\nsource: iterable which will be used to build chain\nchain: MarkovChain in currently loaded shelve file that\nwill be extended by source", "source": "codesearchnet"}
{"code": "def _GetEntries(self, paths, max_entries, iterator_from_file, is_sequence=False):\n    entries = {}\n    index = 0\n    for filepath in paths:\n        reader = iterator_from_file(filepath)\n        for record in reader:\n            if is_sequence:\n                sequence_example = tf.train.SequenceExample.FromString(record)\n                self._ParseExample(sequence_example.context.feature, sequence_example.feature_lists.feature_list, entries, index)\n            else:\n                self._ParseExample(tf.train.Example.FromString(record).features.feature, [], entries, index)\n            index += 1\n            if (index == max_entries):\n                return (entries, index)\n    return (entries, index)", "docstring": "Extracts examples into a dictionary of feature values.\n\nArgs:\npaths: A list of the paths to the files to parse.\nmax_entries: The maximum number of examples to load.\niterator_from_file: A method that takes a file path string and returns an\niterator to the examples in that file.\nis_sequence: True if the input data from 'iterator_from_file' are\ntf.SequenceExamples, False if tf.Examples. Defaults to false.\n\nReturns:\nA tuple with two elements:\n- A dictionary of all features parsed thus far and arrays of their\nvalues.\n- The number of examples parsed.", "source": "codesearchnet"}
{"code": "def SetDecodedStreamSize(self, decoded_stream_size):\n    if self._is_open:\n        raise IOError('Already open.')\n    if (decoded_stream_size < 0):\n        raise ValueError('Invalid decoded stream size: {0:d} value out of bounds.'.format(decoded_stream_size))\n    self._decoded_stream_size = decoded_stream_size", "docstring": "Sets the decoded stream size.\n\nThis function is used to set the decoded stream size if it can be\ndetermined separately.\n\nArgs:\ndecoded_stream_size (int): size of the decoded stream in bytes.\n\nRaises:\nIOError: if the file-like object is already open.\nOSError: if the file-like object is already open.\nValueError: if the decoded stream size is invalid.", "source": "codesearchnet"}
{"code": "def _might_have_parameter(fn_or_cls, arg_name):\n  \n  if inspect.isclass(fn_or_cls):\n    fn = _find_class_construction_fn(fn_or_cls)\n  else:\n    fn = fn_or_cls\n\n  while hasattr(fn, '__wrapped__'):\n    fn = fn.__wrapped__\n  arg_spec = _get_cached_arg_spec(fn)\n  if six.PY3:\n    if arg_spec.varkw:\n      return True\n    return arg_name in arg_spec.args or arg_name in arg_spec.kwonlyargs\n  else:\n    if arg_spec.keywords:\n      return True\n    return arg_name in arg_spec.args", "docstring": "Returns True if `arg_name` might be a valid parameter for `fn_or_cls`.\n\nSpecifically, this means that `fn_or_cls` either has a parameter named\n`arg_name`, or has a `**kwargs` parameter.\n\nArgs:\nfn_or_cls: The function or class to check.\narg_name: The name fo the parameter.\n\nReturns:\nWhether `arg_name` might be a valid argument of `fn`.", "source": "juraj-google-style"}
{"code": "def get_sv_variants(self, chromosome=None, end_chromosome=None, sv_type=None, \n                        pos=None, end=None):\n        \n        query = {}\n        \n        if chromosome:\n            query['chrom'] = chromosome\n        if end_chromosome:\n            query['end_chrom'] = end_chromosome\n        if sv_type:\n            query['sv_type'] = sv_type\n        if pos:\n            if not '$and' in query:\n                query['$and'] = []\n            query['$and'].append({'pos_left': {'$lte': pos}})\n            query['$and'].append({'pos_right': {'$gte': pos}})\n            \n        if end:\n            if not '$and' in query:\n                query['$and'] = []\n            query['$and'].append({'end_left': {'$lte': end}})\n            query['$and'].append({'end_right': {'$gte': end}})\n        \n        LOG.info(\"Find all sv variants {}\".format(query))\n        \n        return self.db.structural_variant.find(query).sort([('chrom', ASCENDING), ('pos_left', ASCENDING)])", "docstring": "Return all structural variants in the database\n\nArgs:\nchromosome (str)\nend_chromosome (str)\nsv_type (str)\npos (int): Left position of SV\nend (int): Right position of SV\n\nReturns:\nvariants (Iterable(Variant))", "source": "juraj-google-style"}
{"code": "class IntLayerNorm(nn.Module):\n\n    def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant='none'):\n        super().__init__()\n        self.normalized_shape = normalized_shape\n        self.eps = eps\n        self.weight = nn.Parameter(torch.zeros(normalized_shape))\n        self.bias = nn.Parameter(torch.zeros(normalized_shape))\n        self.quant_mode = quant_mode\n        if force_dequant in ['nonlinear', 'layernorm']:\n            logger.info('Force dequantize layernorm')\n            self.quant_mode = False\n        self.register_buffer('shift', torch.zeros(1))\n        self.output_bit = output_bit\n        self.max_bit = 32\n        self.dim_sqrt = None\n        self.activation = QuantAct(self.output_bit, quant_mode=self.quant_mode)\n\n    def set_shift(self, y_int):\n        with torch.no_grad():\n            y_sq_int = y_int ** 2\n            var_int = torch.sum(y_sq_int, axis=2, keepdim=True)\n            shift = torch.log2(torch.sqrt(var_int / 2 ** self.max_bit)).ceil().max()\n            shift_old = self.shift\n            self.shift = torch.max(self.shift, shift)\n            logger.info(f'Dynamic shift adjustment: {int(shift_old)} -> {int(self.shift)}')\n\n    def overflow_fallback(self, y_int):\n        \n        self.set_shift(y_int)\n        y_int_shifted = floor_ste.apply(y_int / 2 ** self.shift)\n        y_sq_int = y_int_shifted ** 2\n        var_int = torch.sum(y_sq_int, axis=2, keepdim=True)\n        return var_int\n\n    def forward(self, x, scaling_factor=None):\n        if not self.quant_mode:\n            mean = x.mean(axis=2, keepdim=True)\n            y = x - mean\n            var = torch.mean(y ** 2, axis=2, keepdim=True)\n            x = y / torch.sqrt(self.eps + var)\n            x = x * self.weight + self.bias\n            return (x, None)\n        if self.dim_sqrt is None:\n            n = torch.tensor(x.shape[2], dtype=torch.float)\n            self.dim_sqrt = torch.sqrt(n).to(x.device)\n        x_int = x / scaling_factor\n        mean_int = round_ste.apply(x_int.mean(axis=2, keepdim=True))\n        y_int = x_int - mean_int\n        y_int_shifted = floor_ste.apply(y_int / 2 ** self.shift)\n        y_sq_int = y_int_shifted ** 2\n        var_int = torch.sum(y_sq_int, axis=2, keepdim=True)\n        if self.training:\n            if var_int.max() >= 2 ** self.max_bit:\n                var_int = self.overflow_fallback(y_int)\n                assert var_int.max() < 2 ** self.max_bit + 0.1, 'Error detected in overflow handling: `var_int` exceeds `self.max_bit` (the maximum possible bit width)'\n        std_int = floor_ste.apply(torch.sqrt(var_int)) * 2 ** self.shift\n        factor = floor_ste.apply(2 ** 31 / std_int)\n        y_int = floor_ste.apply(y_int * factor / 2)\n        scaling_factor = self.dim_sqrt / 2 ** 30\n        bias = self.bias.data.detach() / self.weight.data.detach()\n        bias_int = floor_ste.apply(bias / scaling_factor)\n        y_int = y_int + bias_int\n        scaling_factor = scaling_factor * self.weight\n        x = y_int * scaling_factor\n        return (x, scaling_factor)", "docstring": "Quantized version of `torch.nn.LayerNorm`. Adds quantization-specific arguments on top of `torch.nn.LayerNorm`.\n\nArgs:\noutput_bit (`int`, *optional*, defaults to `8`):\nBitwidth for the layer output activation.\nquant_mode (`bool`, *optional*, defaults to `False`):\nWhether or not the layer is quantized.\nforce_dequant (`str`, *optional*, defaults to `\"none\"`):\nForce dequantize the layer if either \"layernorm\" or \"nonlinear\" is given.", "source": "github-repos"}
{"code": "def add_graph(self, run_key, device_name, graph_def, debug=False):\n    graph_dict = (self._run_key_to_debug_graphs if debug else self._run_key_to_original_graphs)\n    if (not (run_key in graph_dict)):\n        graph_dict[run_key] = dict()\n    graph_dict[run_key][tf.compat.as_str(device_name)] = debug_graphs_helper.DebugGraphWrapper(graph_def)", "docstring": "Add a GraphDef.\n\nArgs:\nrun_key: A key for the run, containing information about the feeds,\nfetches, and targets.\ndevice_name: The name of the device that the `GraphDef` is for.\ngraph_def: An instance of the `GraphDef` proto.\ndebug: Whether `graph_def` consists of the debug ops.", "source": "codesearchnet"}
{"code": "def _determine_best_metric(self, metrics, trial):\n    is_new_best_metric = False\n    if self.args.metric_for_best_model is not None:\n        metric_to_check = self.args.metric_for_best_model\n        if not metric_to_check.startswith('eval_'):\n            metric_to_check = f'eval_{metric_to_check}'\n        try:\n            metric_value = metrics[metric_to_check]\n        except KeyError as exc:\n            raise KeyError(f\"The `metric_for_best_model` training argument is set to '{metric_to_check}', which is not found in the evaluation metrics. The available evaluation metrics are: {list(metrics.keys())}. Consider changing the `metric_for_best_model` via the TrainingArguments.\") from exc\n        operator = np.greater if self.args.greater_is_better else np.less\n        if self.state.best_metric is None:\n            self.state.best_metric = float('-inf') if self.args.greater_is_better else float('inf')\n        if operator(metric_value, self.state.best_metric):\n            self.state.best_metric = metric_value\n            if self.args.save_strategy in [SaveStrategy.STEPS, SaveStrategy.EPOCH]:\n                self.state.best_global_step = self.state.global_step\n            is_new_best_metric = True\n    return is_new_best_metric", "docstring": "Determine if the model should be saved based on the evaluation metrics.\n\nReturns:\nbool: True if a new best metric was found, else False", "source": "github-repos"}
{"code": "def _ParseAndComputePenalties(self, code, dumptree=False):\n    tree = pytree_utils.ParseCodeToTree(code)\n    split_penalty.ComputeSplitPenalties(tree)\n    if dumptree:\n        pytree_visitor.DumpPyTree(tree, target_stream=sys.stderr)\n    return tree", "docstring": "Parses the code and computes split penalties.\n\nArguments:\ncode: code to parse as a string\ndumptree: if True, the parsed pytree (after penalty assignment) is dumped\nto stderr. Useful for debugging.\n\nReturns:\nParse tree.", "source": "github-repos"}
{"code": "def t0(self):\n    return self._t0", "docstring": "Absolute timestamp of the first dumped tensor across all devices.\n\nReturns:\n(`int`) absolute timestamp of the first dumped tensor, in microseconds.", "source": "github-repos"}
{"code": "def write(self, default: bool=False):\n        \n\n        none_type = type(None)\n\n        if default:  \n            ordered_vals = ['query', 'subject', 'identity', 'length', \n                            'mismatches', 'gaps', 'query_start', 'query_end', \n                            'subject_start', 'subject_end', 'evalue', \n                            'bitscore']\n        else:  \n            try:\n                ordered_vals = [self.custom_fs[i] if i in self.custom_fs \n                            else getattr(self, i) for i in self.fs_order]\n            except TypeError:\n                ordered_vals = [getattr(self, i) for i in self.fs_order]\n\n        \n        fstr = \"\\t\".join(['-' if type(i) == none_type else str(i) for i in \n                        ordered_vals])\n\n        return '{}{}'.format(fstr, os.linesep)", "docstring": "Restore B6/M8 entry to original format\n\nArgs:\ndefault (bool): output entry in default BLAST+ B6 format\n\nReturns:\nstr: properly formatted string containing the B6/M8 entry", "source": "juraj-google-style"}
{"code": "def last_metric_eval(multiplexer, session_name, metric_name):\n    try:\n        (run, tag) = run_tag_from_session_and_metric(session_name, metric_name)\n        tensor_events = multiplexer.Tensors(run=run, tag=tag)\n    except KeyError as e:\n        raise KeyError((\"Can't find metric %s for session: %s. Underlying error message: %s\" % (metric_name, session_name, e)))\n    last_event = tensor_events[(- 1)]\n    return (last_event.wall_time, last_event.step, tf.make_ndarray(last_event.tensor_proto).item())", "docstring": "Returns the last evaluations of the given metric at the given session.\n\nArgs:\nmultiplexer: The EventMultiplexer instance allowing access to\nthe exported summary data.\nsession_name: String. The session name for which to get the metric\nevaluations.\nmetric_name: api_pb2.MetricName proto. The name of the metric to use.\n\nReturns:\nA 3-tuples, of the form [wall-time, step, value], denoting\nthe last evaluation of the metric, where wall-time denotes the wall time\nin seconds since UNIX epoch of the time of the evaluation, step denotes\nthe training step at which the model is evaluated, and value denotes the\n(scalar real) value of the metric.\n\nRaises:\nKeyError if the given session does not have the metric.", "source": "codesearchnet"}
{"code": "def call_api(self, method_type, method_name, valid_status_codes, resource, data, uid, **kwargs):\n    url = resource.get_resource_url(resource, base_url=self.Meta.base_url)\n    if (method_type in SINGLE_RESOURCE_METHODS):\n        if ((not uid) and (not kwargs)):\n            raise MissingUidException\n        url = resource.get_url(url=url, uid=uid, **kwargs)\n    params = {'headers': self.get_http_headers(self.Meta.name, method_name, **kwargs), 'url': url}\n    if ((method_type in ['POST', 'PUT', 'PATCH']) and isinstance(data, dict)):\n        params.update(json=data)\n    prepared_request = self.prepare_http_request(method_type, params, **kwargs)\n    response = self.session.send(prepared_request)\n    return self._handle_response(response, valid_status_codes, resource)", "docstring": "Make HTTP calls.\n\nArgs:\nmethod_type: The HTTP method\nmethod_name: The name of the python method making the HTTP call\nvalid_status_codes: A tuple of integer status codes\ndeemed acceptable as response statuses\nresource: The resource class that will be generated\ndata: The post data being sent.\nuid: The unique identifier of the resource.\nReturns:\n\nkwargs is a list of keyword arguments. Additional custom keyword\narguments can be sent into this method and will be passed into\nsubclass methods:\n\n- get_url\n- prepare_http_request\n- get_http_headers", "source": "codesearchnet"}
{"code": "def fit_cosine_function(wind):\n    wind_daily = wind.groupby(wind.index.date).mean()\n    wind_daily_hourly = pd.Series(index=wind.index, data=wind_daily.loc[wind.index.date].values)\n    df = pd.DataFrame(data=dict(daily=wind_daily_hourly, hourly=wind)).dropna(how='any')\n    x = np.array([df.daily, df.index.hour])\n    (popt, pcov) = scipy.optimize.curve_fit(_cosine_function, x, df.hourly)\n    return popt", "docstring": "fits a cosine function to observed hourly windspeed data\n\nArgs:\nwind: observed hourly windspeed data\n\nReturns:\nparameters needed to generate diurnal features of windspeed using a cosine function", "source": "codesearchnet"}
{"code": "def check_embeddings_within_bounds(tensor: tf.Tensor, embed_dim: int, tensor_name: str='input_ids') -> None:\n    tf.debugging.assert_less(tensor, tf.cast(embed_dim, dtype=tensor.dtype), message=f\"The maximum value of {tensor_name} ({tf.math.reduce_max(tensor)}) must be smaller than the embedding layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.\")", "docstring": "`tf.gather`, on which TF embedding layers are based, won't check positive out of bound indices on GPU, returning\nzeros instead. This function adds a check against that dangerous silent behavior.\n\nArgs:\ntensor (`tf.Tensor`): The tensor of indices to check.\nembed_dim (`int`): The embedding dimension.\ntensor_name (`str`, *optional*): The name of the tensor to use in the error message.", "source": "github-repos"}
{"code": "def _list_certs(certificate_store='My'):\n    \n    ret = dict()\n    blacklist_keys = ['DnsNameList', 'Thumbprint']\n\n    ps_cmd = ['Get-ChildItem',\n              '-Path', r\"'Cert:\\LocalMachine\\{0}'\".format(certificate_store),\n              '|',\n              'Select-Object DnsNameList, SerialNumber, Subject, Thumbprint, Version']\n\n    cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)\n\n    try:\n        items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n    except ValueError:\n        raise CommandExecutionError('Unable to parse return data as Json.')\n\n    for item in items:\n\n        cert_info = dict()\n        for key in item:\n            if key not in blacklist_keys:\n                cert_info[key.lower()] = item[key]\n\n        cert_info['dnsnames'] = []\n        if item['DnsNameList']:\n            cert_info['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']]\n\n        ret[item['Thumbprint']] = cert_info\n\n    return ret", "docstring": "List details of available certificates in the LocalMachine certificate\nstore.\n\nArgs:\ncertificate_store (str): The name of the certificate store on the local\nmachine.\n\nReturns:\ndict: A dictionary of certificates found in the store", "source": "juraj-google-style"}
{"code": "def set_render_option(self, render_option):\n    self._render_option = render_option", "docstring": "Sets the rendering option.\n\nArgs:\nrender_option: (str) this parameter decides how the pipeline graph is\nrendered. See display.pipeline_graph_renderer for available options.", "source": "github-repos"}
{"code": "def module_selected(self, module_name, module_ui):\n        \n        if self.current_button == self.module_buttons[module_name]:\n            return\n\n        self.module_buttons[module_name].config(bg=\"\n        if self.current_button is not None:\n            self.current_button.config(bg=\"white\")\n        self.current_button = self.module_buttons[module_name]\n\n        self.clear_ui()\n\n        try:\n            \n            module_ui_frame = ModuleUIBaseFrame(self.module_ui, module_name, module_ui)\n            module_ui_frame.grid(column=0, row=0, sticky=\"W E N S\")\n        except Exception as e:\n            logger.error(\"Could not load UI for {}\".format(module_name))\n            logger.exception(e)\n            \n            tk.Label(self.module_ui, text=\"Could not load UI for {}\".format(module_name)).grid(\n                column=0, row=0, padx=0, pady=0, sticky=\"W E N S\")", "docstring": "Called when a module is selected\n\nArgs:\nmodule_name (str): The name of the module\nmodule_ui: The function to call to create the module's UI", "source": "juraj-google-style"}
{"code": "def set_attr_text(self, attr_key, attr_val, el_idx=0):\n    self.get_element_by_attr_key(attr_key, el_idx).attrib[attr_key] = attr_val", "docstring": "Set the value of the selected attribute of the selected element.\n\nArgs:\nattr_key : str\nName of attribute for which to search\n\nattr_val : str\nText to set for the attribute.\n\nel_idx : int\nIndex of element to use in the event that there are multiple sibling\nelements with the same name.", "source": "codesearchnet"}
{"code": "async def wait_done(self) -> int:\n    (await self._done_running_evt.wait())\n    if (self._exit_code is None):\n        raise SublemonLifetimeError('Subprocess exited abnormally with `None` exit code')\n    return self._exit_code", "docstring": "Coroutine to wait for subprocess run completion.\n\nReturns:\nThe exit code of the subprocess.", "source": "codesearchnet"}
{"code": "def set_notify_dispatch_request(self, notify_dispatch_request, *args):\n        \n        self._notify_dispatch_request = notify_dispatch_request\n        self._notify_args = args", "docstring": "Set function to call just before requests are dispatched\n\nArgs:\nnotify_dispatch_request (callable): function will be called\nwith request as single arg just before request is dispatched", "source": "juraj-google-style"}
{"code": "def Write(self, schedule, output_file):\n    \n    \n    root = ET.Element('kml')\n    root.attrib['xmlns'] = 'http:\n    doc = ET.SubElement(root, 'Document')\n    open_tag = ET.SubElement(doc, 'open')\n    open_tag.text = '1'\n    self._CreateStopsFolder(schedule, doc)\n    if self.split_routes:\n      route_types = set()\n      for route in schedule.GetRouteList():\n        route_types.add(route.route_type)\n      route_types = list(route_types)\n      route_types.sort()\n      for route_type in route_types:\n        self._CreateRoutesFolder(schedule, doc, route_type)\n    else:\n      self._CreateRoutesFolder(schedule, doc)\n    self._CreateShapesFolder(schedule, doc)\n\n    \n    self._SetIndentation(root)\n\n    \n    if isinstance(output_file, file):\n      output = output_file\n    else:\n      output = open(output_file, 'w')\n    output.write()\n    ET.ElementTree(root).write(output, 'utf-8')", "docstring": "Writes out a feed as KML.\n\nArgs:\nschedule: A transitfeed.Schedule object containing the feed to write.\noutput_file: The name of the output KML file, or file object to use.", "source": "juraj-google-style"}
{"code": "def cumulative_distribution(self, X):\n    self.check_fit()\n    return norm.cdf(X, loc=self.mean, scale=self.std)", "docstring": "Cumulative distribution function for gaussian distribution.\n\nArguments:\nX: `np.ndarray` of shape (n, 1).\n\nReturns:\nnp.ndarray: Cumulative density for X.", "source": "codesearchnet"}
{"code": "def update_dynamic_gene_list(self, case, hgnc_symbols=None, hgnc_ids=None, phenotype_ids=None, build='37'):\n    dynamic_gene_list = []\n    res = []\n    if hgnc_ids:\n        LOG.info('Fetching genes by hgnc id')\n        res = self.hgnc_collection.find({'hgnc_id': {'$in': hgnc_ids}, 'build': build})\n    elif hgnc_symbols:\n        LOG.info('Fetching genes by hgnc symbols')\n        res = []\n        for symbol in hgnc_symbols:\n            for gene_obj in self.gene_by_alias(symbol=symbol, build=build):\n                res.append(gene_obj)\n    for gene_obj in res:\n        dynamic_gene_list.append({'hgnc_symbol': gene_obj['hgnc_symbol'], 'hgnc_id': gene_obj['hgnc_id'], 'description': gene_obj['description']})\n    LOG.info('Update dynamic gene panel for: %s', case['display_name'])\n    updated_case = self.case_collection.find_one_and_update({'_id': case['_id']}, {'$set': {'dynamic_gene_list': dynamic_gene_list, 'dynamic_panel_phenotypes': (phenotype_ids or [])}}, return_document=pymongo.ReturnDocument.AFTER)\n    LOG.debug('Case updated')\n    return updated_case", "docstring": "Update the dynamic gene list for a case\n\nAdds a list of dictionaries to case['dynamic_gene_list'] that looks like\n\n{\nhgnc_symbol: str,\nhgnc_id: int,\ndescription: str\n}\n\nArguments:\ncase (dict): The case that should be updated\nhgnc_symbols (iterable): A list of hgnc_symbols\nhgnc_ids (iterable): A list of hgnc_ids\n\nReturns:\nupdated_case(dict)", "source": "codesearchnet"}
{"code": "def set_properties(self, property_dict):\n    self.properties.update(property_dict)", "docstring": "Sets a dictionary of properties on this entity.\n\nArgs:\nproperty_dict: A map from property name to value. See\n:class:`google.cloud.datastore.entity.Entity` documentation for allowed\nvalues.", "source": "github-repos"}
{"code": "def int64_counter(urn, metric, ptransform=None, pcollection=None, labels=None) -> metrics_pb2.MonitoringInfo:\n    labels = labels or {}\n    labels.update(create_labels(ptransform=ptransform, pcollection=pcollection))\n    if isinstance(metric, int):\n        metric = coders.VarIntCoder().encode(metric)\n    return create_monitoring_info(urn, SUM_INT64_TYPE, metric, labels)", "docstring": "Return the counter monitoring info for the specifed URN, metric and labels.\n\nArgs:\nurn: The URN of the monitoring info/metric.\nmetric: The payload field to use in the monitoring info or an int value.\nptransform: The ptransform id used as a label.\npcollection: The pcollection id used as a label.", "source": "github-repos"}
{"code": "def _ConstructAndTestGradient(self, image_shape, kernel_shape, strides, rates, padding, use_gpu):\n    assert image_shape[3] == kernel_shape[2]\n    np.random.seed(1)\n    image = np.random.random_sample(image_shape).astype(np.float32)\n    kernel = np.random.random_sample(kernel_shape).astype(np.float32)\n    strides = [1] + strides + [1]\n    rates = [1] + rates + [1]\n    image_tensor = constant_op.constant(image, shape=image_shape, name='input')\n    kernel_tensor = constant_op.constant(kernel, shape=kernel_shape, name='filter')\n\n    def compute_erosion2d(image_tensor, kernel_tensor):\n        return nn_ops.erosion2d(image_tensor, kernel_tensor, strides=strides, rates=rates, padding=padding, name='erosion2d')\n    with test_util.device(use_gpu=use_gpu):\n        with self.cached_session():\n            err1 = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient(lambda x: compute_erosion2d(x, kernel_tensor), [image_tensor]))\n            err2 = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient(lambda x: compute_erosion2d(image_tensor, x), [kernel_tensor]))\n            err = max(err1, err2)\n    print('Erosion gradient error = %f' % err)\n    self.assertLess(err, 0.0001)", "docstring": "Verifies the gradients of the erosion function.\n\nArgs:\nimage_shape: Input shape, [batch, in_height, in_width, channels].\nkernel_shape: Filter shape, [filter_height, filter_width, channels].\nstrides: Output strides, specified as [stride_height, stride_width].\nrates: Atrous rates, specified as [rate_height, rate_width].\npadding: Padding type.\nuse_gpu: Whether we are running on GPU.", "source": "github-repos"}
{"code": "def prune_candidates(candidates):\n    pruned = []\n    for (first, second) in candidates:\n        if (first.__class__ is Linearization):\n            nodes1 = first.curve.nodes\n        else:\n            nodes1 = first.nodes\n        if (second.__class__ is Linearization):\n            nodes2 = second.curve.nodes\n        else:\n            nodes2 = second.nodes\n        if convex_hull_collide(nodes1, nodes2):\n            pruned.append((first, second))\n    return pruned", "docstring": "Reduce number of candidate intersection pairs.\n\n.. note::\n\nThis is a helper for :func:`_all_intersections`.\n\nUses more strict bounding box intersection predicate by forming the\nactual convex hull of each candidate curve segment and then checking\nif those convex hulls collide.\n\nArgs:\ncandidates (List): An iterable of pairs of curves (or\nlinearized curves).\n\nReturns:\nList: A pruned list of curve pairs.", "source": "codesearchnet"}
{"code": "def get_plot(self, ylim=None, units='thz'):\n    u = freq_units(units)\n    plt = pretty_plot(12, 8)\n    band_linewidth = 1\n    data = self.bs_plot_data()\n    for d in range(len(data['distances'])):\n        for i in range(self._nb_bands):\n            plt.plot(data['distances'][d], [(data['frequency'][d][i][j] * u.factor) for j in range(len(data['distances'][d]))], 'b-', linewidth=band_linewidth)\n    self._maketicks(plt)\n    plt.axhline(0, linewidth=1, color='k')\n    plt.xlabel('$\\\\mathrm{Wave\\\\ Vector}$', fontsize=30)\n    ylabel = '$\\\\mathrm{{Frequencies\\\\ ({})}}$'.format(u.label)\n    plt.ylabel(ylabel, fontsize=30)\n    x_max = data['distances'][(- 1)][(- 1)]\n    plt.xlim(0, x_max)\n    if (ylim is not None):\n        plt.ylim(ylim)\n    plt.tight_layout()\n    return plt", "docstring": "Get a matplotlib object for the bandstructure plot.\n\nArgs:\nylim: Specify the y-axis (frequency) limits; by default None let\nthe code choose.\nunits: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.", "source": "codesearchnet"}
{"code": "def parse_args(cmd_args, is_script=False):\n    \n    parser = argparse.ArgumentParser(\n        description=__doc__,\n        formatter_class=NewlinesHelpFormatter,\n        epilog=CODES_TABLE\n    )\n\n    \n    if is_script:\n        parser.add_argument(\n            \"files\",\n            metavar=\"FILES\",\n            nargs=\"*\",\n            default=sys.stdin,\n            help=\"A whitespace separated list of STIX files or directories of \"\n                 \"STIX files to validate. If none given, stdin will be used.\"\n        )\n    parser.add_argument(\n        \"-r\",\n        \"--recursive\",\n        dest=\"recursive\",\n        action=\"store_true\",\n        default=True,\n        help=\"Recursively descend into input directories.\"\n    )\n    parser.add_argument(\n        \"-s\",\n        \"--schemas\",\n        dest=\"schema_dir\",\n        help=\"Custom schema directory. If provided, input will be validated \"\n             \"against these schemas in addition to the STIX schemas bundled \"\n             \"with this script.\"\n    )\n    parser.add_argument(\n        \"--version\",\n        dest=\"version\",\n        default=DEFAULT_VER,\n        help=\"The version of the STIX specification to validate against (e.g. \"\n             \"\\\"2.0\\\").\"\n    )\n\n    \n    parser.add_argument(\n        \"-v\",\n        \"--verbose\",\n        dest=\"verbose\",\n        action=\"store_true\",\n        default=False,\n        help=\"Print informational notes and more verbose error messages.\"\n    )\n\n    parser.add_argument(\n        \"-q\",\n        \"--silent\",\n        dest=\"silent\",\n        action=\"store_true\",\n        default=False,\n        help=\"Silence all output to stdout.\"\n    )\n\n    parser.add_argument(\n        \"-d\",\n        \"--disable\",\n        \"--ignore\",\n        dest=\"disabled\",\n        default=\"\",\n        help=\"A comma-separated list of recommended best practice checks to \"\n             \"skip. By default, no checks are disabled. \\n\\n\"\n             \"Example: --disable 202,210\"\n    )\n\n    parser.add_argument(\n        \"-e\",\n        \"--enable\",\n        \"--select\",\n        dest=\"enabled\",\n        default=\"\",\n        help=\"A comma-separated list of recommended best practice checks to \"\n             \"enable. If the --disable option is not used, no other checks \"\n             \"will be run. By default, all checks are enabled.\\n\\n\"\n             \"Example: --enable 218\"\n    )\n\n    parser.add_argument(\n        \"--strict\",\n        dest=\"strict\",\n        action=\"store_true\",\n        default=False,\n        help=\"Treat warnings as errors and fail validation if any are found.\"\n    )\n\n    parser.add_argument(\n        \"--strict-types\",\n        dest=\"strict_types\",\n        action=\"store_true\",\n        default=False,\n        help=\"Ensure that no custom object types are used, only those defined\"\n             \" in the STIX specification.\"\n    )\n\n    parser.add_argument(\n        \"--strict-properties\",\n        dest=\"strict_properties\",\n        action=\"store_true\",\n        default=False,\n        help=\"Ensure that no custom properties are used, only those defined\"\n             \" in the STIX specification.\"\n    )\n\n    parser.add_argument(\n        \"--no-cache\",\n        dest=\"no_cache\",\n        action=\"store_true\",\n        default=False,\n        help=\"Disable the caching of external source values.\"\n    )\n\n    parser.add_argument(\n        \"--refresh-cache\",\n        dest=\"refresh_cache\",\n        action=\"store_true\",\n        default=False,\n        help=\"Clears the cache of external source values, then \"\n             \"during validation downloads them again.\"\n    )\n\n    parser.add_argument(\n        \"--clear-cache\",\n        dest=\"clear_cache\",\n        action=\"store_true\",\n        default=False,\n        help=\"Clear the cache of external source values after validation.\"\n    )\n\n    parser.add_argument(\n        \"--enforce-refs\",\n        dest=\"enforce_refs\",\n        action=\"store_true\",\n        default=False,\n        help=\"Ensures that all SDOs being referenced by SROs are contained \"\n             \"within the same bundle.\"\n    )\n\n    args = parser.parse_args(cmd_args)\n\n    if not is_script:\n        args.files = \"\"\n    if not args.version:\n        args.version = DEFAULT_VER\n\n    return ValidationOptions(args)", "docstring": "Parses a list of command line arguments into a ValidationOptions object.\n\nArgs:\ncmd_args (list of str): The list of command line arguments to be parsed.\nis_script: Whether the arguments are intended for use in a stand-alone\nscript or imported into another tool.\n\nReturns:\nInstance of ``ValidationOptions``", "source": "juraj-google-style"}
{"code": "def __str__(self, talker='GP'):\n        \n        if not len(talker) == 2:\n            raise ValueError('Talker ID must be two characters %r' % talker)\n        data = ['%sGLL' % talker]\n        data.extend(nmea_latitude(self.latitude))\n        data.extend(nmea_longitude(self.longitude))\n        data.append('%s.%02i' % (self.time.strftime('%H%M%S'),\n                                 self.time.microsecond / 1000000))\n        data.append('A' if self.status else 'V')\n        if self.mode:\n            data.append(self.mode)\n        data = ','.join(data)\n        return '$%s*%02X\\r' % (data, calc_checksum(data))", "docstring": "Pretty printed position string.\n\nArgs:\ntalker (str): Talker ID\n\nReturns:\nstr: Human readable string representation of ``Position`` object", "source": "juraj-google-style"}
{"code": "def putfile(self, filepath, buildroot, metahash):\n        \n        def gen_obj_path(filename):\n            filehash = util.hash_file(filepath).hexdigest()\n            return filehash, os.path.join(self.obj_cachedir, filehash[0:2],\n                                          filehash[2:4], filehash)\n\n        filepath_relative = filepath.split(buildroot)[1][1:]  \n        \n        incachepath = self._genpath(filepath_relative, metahash)\n\n        filehash, obj_path = gen_obj_path(filepath)\n        if not os.path.exists(obj_path):\n            obj_dir = os.path.dirname(obj_path)\n            if not os.path.exists(obj_dir):\n                os.makedirs(obj_dir)\n            log.debug('Adding to obj cache: %s -> %s', filepath, obj_path)\n            os.link(filepath, obj_path)\n\n        if os.path.exists(incachepath):\n            existingfile_hash = util.hash_file(incachepath).hexdigest()\n            if filehash != existingfile_hash:\n                log.warn('File found in mh cache, but checksum differs. '\n                         'Replacing with this new version. (File: %s)',\n                         filepath)\n                log.warn('Possible reasons for this:')\n                log.warn(' 1. This build is not hermetic, and something '\n                         'differs about the build environment compared to the '\n                         'previous build.')\n                log.warn(' 2. This file has a timestamp or other build-time '\n                         'related data encoded into it, which will always '\n                         'cause the checksum to differ when built.')\n                log.warn(' 3. Everything is terrible and nothing works.')\n                os.unlink(incachepath)\n\n        if not os.path.exists(incachepath):\n            log.debug('Adding to mh cache: %s -> %s', filepath, incachepath)\n            if not os.path.exists(os.path.dirname(incachepath)):\n                os.makedirs(os.path.dirname(incachepath))\n            os.link(obj_path, incachepath)", "docstring": "Put a file in the cache.\n\nArgs:\nfilepath: Path to file on disk.\nbuildroot: Path to buildroot\nbuildrule: The rule that generated this file.\nmetahash: hash object", "source": "juraj-google-style"}
{"code": "def SignMessage(self, message, script_hash):\n        \n\n        keypair = self.GetKeyByScriptHash(script_hash)\n        prikey = bytes(keypair.PrivateKey)\n        res = Crypto.Default().Sign(message, prikey)\n        return res, keypair.PublicKey", "docstring": "Sign a message with a specified script_hash.\n\nArgs:\nmessage (str): a hex encoded message to sign\nscript_hash (UInt160): a bytearray (len 20).\n\nReturns:\nstr: the signed message", "source": "juraj-google-style"}
{"code": "def attention_lm_decoder(decoder_input, decoder_self_attention_bias, hparams, name='decoder'):\n    x = decoder_input\n    with tf.variable_scope(name):\n        for layer in range(hparams.num_hidden_layers):\n            with tf.variable_scope(('layer_%d' % layer)):\n                with tf.variable_scope('self_attention'):\n                    y = common_attention.multihead_attention(common_layers.layer_preprocess(x, hparams), None, decoder_self_attention_bias, (hparams.attention_key_channels or hparams.hidden_size), (hparams.attention_value_channels or hparams.hidden_size), hparams.hidden_size, hparams.num_heads, hparams.attention_dropout)\n                    x = common_layers.layer_postprocess(x, y, hparams)\n                with tf.variable_scope('ffn'):\n                    y = common_layers.conv_hidden_relu(common_layers.layer_preprocess(x, hparams), hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout)\n                    x = common_layers.layer_postprocess(x, y, hparams)\n        return common_layers.layer_preprocess(x, hparams)", "docstring": "A stack of attention_lm layers.\n\nArgs:\ndecoder_input: a Tensor\ndecoder_self_attention_bias: bias Tensor for self-attention\n(see common_attention.attention_bias())\nhparams: hyperparameters for model\nname: a string\n\nReturns:\ny: a Tensors", "source": "codesearchnet"}
{"code": "def nrows(self):\n    if self._nrows is not None:\n        return self._nrows\n    nsplits = tensor_shape.dimension_at_index(self._row_splits.shape, 0)\n    if nsplits.value is None:\n        return array_ops.shape(self._row_splits, out_type=self.dtype)[0] - 1\n    else:\n        return constant_op.constant(nsplits.value - 1, dtype=self.dtype)", "docstring": "Returns the number of rows created by this `RowPartition`.\n\nReturns:\nscalar integer Tensor", "source": "github-repos"}
{"code": "def __init__(self, site1, site2):\n        \n        self.site1 = site1\n        self.site2 = site2", "docstring": "Initializes a covalent bond between two sites.\n\nArgs:\nsite1 (Site): First site.\nsite2 (Site): Second site.", "source": "juraj-google-style"}
{"code": "def dumps(self, with_defaults=False):\n        \n        return self._rw.dump_config_to_string(self._config, with_defaults=with_defaults)", "docstring": "Generate a string representing all the configuration values.\n\nArgs:\nwith_defaults (bool): if ``True``, values of items with no custom values will be included in the output\nif they have a default value set.", "source": "juraj-google-style"}
{"code": "def ReconcileShadow(self, store_type):\n    \n    for k, v in iteritems(self.entry):\n      if v.pw_entry.store == store_type:\n        shadow_entry = self.shadow.get(k)\n        if shadow_entry is not None:\n          v.pw_entry = shadow_entry\n        else:\n          v.pw_entry.store = \"UNKNOWN\"", "docstring": "Verify that entries that claim to use shadow files have a shadow entry.\n\nIf the entries of the non-shadowed file indicate that a shadow file is used,\ncheck that there is actually an entry for that file in shadow.\n\nArgs:\nstore_type: The type of password store that should be used (e.g.\n/etc/shadow or /etc/gshadow)", "source": "juraj-google-style"}
{"code": "def register_recipe(cls, recipe):\n    \n    recipe_name = recipe.contents['name']\n    cls._recipe_classes[recipe_name] = (\n        recipe.contents, recipe.args, recipe.__doc__)", "docstring": "Registers a dftimewolf recipe.\n\nArgs:\nrecipe: imported python module representing the recipe.", "source": "juraj-google-style"}
{"code": "def call(self, hidden_states: tf.Tensor, attention_mask: np.ndarray | tf.Tensor | None=None, layer_head_mask: tf.Tensor | None=None, past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, training: Optional[bool]=False, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:\n    residual = hidden_states\n    if self.do_layer_norm_before:\n        hidden_states = self.self_attn_layer_norm(hidden_states)\n    self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n    hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask)\n    hidden_states = self.dropout(hidden_states, training=training)\n    hidden_states = residual + hidden_states\n    if not self.do_layer_norm_before:\n        hidden_states = self.self_attn_layer_norm(hidden_states)\n    residual = hidden_states\n    if self.do_layer_norm_before:\n        hidden_states = self.final_layer_norm(hidden_states)\n    hidden_states = self.fc1(hidden_states)\n    hidden_states = self.activation_fn(hidden_states)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = self.dropout(hidden_states, training=training)\n    hidden_states = residual + hidden_states\n    if not self.do_layer_norm_before:\n        hidden_states = self.final_layer_norm(hidden_states)\n    return (hidden_states, self_attn_weights, present_key_value)", "docstring": "Args:\nhidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`tf.Tensor`, *optional*): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\nlayer_head_mask (`tf.Tensor`, *optional*): mask for attention heads in a given layer of size\n`(decoder_attention_heads,)`\npast_key_value (`Tuple(tf.Tensor)`, *optional*): cached past key and value projection states\ntraining (`bool`, *optional*, defaults to `False`):\nWhether or not to use the model in training mode (some modules like dropout modules have different\nbehaviors between training and evaluation).", "source": "github-repos"}
{"code": "def read_bit(self, registeraddress, functioncode=2):\n        \n        _checkFunctioncode(functioncode, [1, 2])\n        return self._genericCommand(functioncode, registeraddress)", "docstring": "Read one bit from the slave.\n\nArgs:\n* registeraddress (int): The slave register address (use decimal numbers, not hex).\n* functioncode (int): Modbus function code. Can be 1 or 2.\n\nReturns:\nThe bit value 0 or 1 (int).\n\nRaises:\nValueError, TypeError, IOError", "source": "juraj-google-style"}
{"code": "def linear_quantize(input, scale, zero_point, inplace=False):\n    if len(input.shape) == 4:\n        scale = scale.view(-1, 1, 1, 1)\n        zero_point = zero_point.view(-1, 1, 1, 1)\n    elif len(input.shape) == 2:\n        scale = scale.view(-1, 1)\n        zero_point = zero_point.view(-1, 1)\n    else:\n        scale = scale.view(-1)\n        zero_point = zero_point.view(-1)\n    if inplace:\n        input.mul_(1.0 / scale).add_(zero_point).round_()\n        return input\n    return torch.round(1.0 / scale * input + zero_point)", "docstring": "Quantize single-precision input tensor to integers with the given scaling factor and zeropoint.\n\nArgs:\ninput (`torch.Tensor`):\nSingle-precision input tensor to be quantized.\nscale (`torch.Tensor`):\nScaling factor for quantization.\nzero_pint (`torch.Tensor`):\nShift for quantization.\ninplace (`bool`, *optional*, defaults to `False`):\nWhether to compute inplace or not.\n\nReturns:\n`torch.Tensor`: Linearly quantized value of *input* according to *scale* and *zero_point*.", "source": "github-repos"}
{"code": "def automatic_density_by_vol(structure, kppvol, force_gamma=False):\n        \n        vol = structure.lattice.reciprocal_lattice.volume\n        kppa = kppvol * vol * structure.num_sites\n        return Kpoints.automatic_density(structure, kppa,\n                                         force_gamma=force_gamma)", "docstring": "Returns an automatic Kpoint object based on a structure and a kpoint\ndensity per inverse Angstrom^3 of reciprocal cell.\n\nAlgorithm:\nSame as automatic_density()\n\nArgs:\nstructure (Structure): Input structure\nkppvol (int): Grid density per Angstrom^(-3) of reciprocal cell\nforce_gamma (bool): Force a gamma centered mesh\n\nReturns:\nKpoints", "source": "juraj-google-style"}
{"code": "def get_related(self):\n\n\t\t\n\n\t\tif self.exists and hasattr(self.rdf.triples, 'ore') and hasattr(self.rdf.triples.ore, 'aggregates'):\n\t\t\trelated = [ self.repo.parse_uri(uri) for uri in self.rdf.triples.ore.aggregates ]\n\n\t\t\t\n\t\t\treturn related\n\n\t\telse:\n\t\t\treturn []", "docstring": "get ore:aggregates for this resource, optionally retrieving resource payload\n\nArgs:\nretrieve (bool): if True, issue .refresh() on resource thereby confirming existence and retrieving payload", "source": "juraj-google-style"}
{"code": "def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):\n        \n        \n        countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)\n        if countryinfo is not None:\n            return countryinfo.get('\n        return None", "docstring": "Get country name from ISO3 code\n\nArgs:\niso3 (str): ISO3 code for which to get country name\nuse_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\nexception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\nReturns:\nOptional[str]: Country name", "source": "juraj-google-style"}
{"code": "def row_splits_dtype(self):\n    return self._row_splits_dtype", "docstring": "The `tf.dtypes.DType` of the RaggedTensor's `row_splits`.\n\nExamples:\n\n>>> rt = tf.ragged.constant([[1, 2, 3], [4]], row_splits_dtype=tf.int64)\n>>> tf.type_spec_from_value(rt).row_splits_dtype\ntf.int64\n\nReturns:\nA `tf.dtypes.DType` for the RaggedTensor's `row_splits` tensor. One\nof `tf.int32` or `tf.int64`.", "source": "github-repos"}
{"code": "def _print_task_data(self, task):\n    \n    print(' {0:s} ({1:s})'.format(task['name'], task['id']))\n    paths = task.get('saved_paths', [])\n    if not paths:\n      return\n    for path in paths:\n      if path.endswith('worker-log.txt'):\n        continue\n      if path.endswith('{0:s}.log'.format(task.get('id'))):\n        continue\n      if path.startswith('/'):\n        continue\n      print('   ' + path)", "docstring": "Pretty-prints task data.\n\nArgs:\ntask: Task dict generated by Turbinia.", "source": "juraj-google-style"}
{"code": "def get_other_answers_simple(pool, seeded_answers, get_student_item_dict, num_responses):\n    ret = []\n    pool = {int(k): v for (k, v) in pool.items()}\n    total_in_pool = len(seeded_answers)\n    merged_pool = convert_seeded_answers(seeded_answers)\n    student_id = get_student_item_dict()['student_id']\n    for key in pool:\n        total_in_pool += len(pool[key])\n        if (student_id in pool[key].keys()):\n            total_in_pool -= 1\n        if (key in merged_pool):\n            merged_pool[key].update(pool[key].items())\n        else:\n            merged_pool[key] = pool[key]\n    selected = []\n    while (len(ret) < min(num_responses, total_in_pool)):\n        for (option, students) in merged_pool.items():\n            student = student_id\n            i = 0\n            while (((student == student_id) or (i > 100)) and ((str(option) + student) not in selected)):\n                student = random.choice(students.keys())\n                i += 1\n            selected.append((str(option) + student))\n            if student.startswith('seeded'):\n                rationale = students[student]\n            else:\n                student_item = get_student_item_dict(student)\n                submission = sas_api.get_answers_for_student(student_item)\n                rationale = submission.get_rationale(0)\n            ret.append({'option': option, 'rationale': rationale})\n            if (len(ret) >= min(num_responses, total_in_pool)):\n                break\n    return {'answers': ret}", "docstring": "Get answers from others with simple algorithm, which picks one answer for each option.\n\nArgs:\nsee `get_other_answers`\nnum_responses (int): the number of responses to be returned. This value may not be\nrespected if there is not enough answers to return\n\nReturns:\ndict: answers based on the selection algorithm", "source": "codesearchnet"}
{"code": "def xeval(source, optimize=True):\n    \n    native = xcompile(source, optimize=optimize)\n    return native()", "docstring": "Compiles to native Python bytecode and runs program, returning the\ntopmost value on the stack.\n\nArgs:\noptimize: Whether to optimize the code after parsing it.\n\nReturns:\nNone: If the stack is empty\nobj: If the stack contains a single value\n[obj, obj, ...]: If the stack contains many values", "source": "juraj-google-style"}
{"code": "def update(self, resource, timeout=-1):\n        \n        self.__set_default_values(resource)\n        uri = self._client.build_uri(resource['logicalSwitch']['uri'])\n        return self._client.update(resource, uri=uri, timeout=timeout)", "docstring": "Updates a Logical Switch.\n\nArgs:\nresource (dict): Object to update.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.\n\nReturns:\ndict: Updated resource.", "source": "juraj-google-style"}
{"code": "def logout(self, client_id, return_to, federated=False):\n        \n        return_to = quote_plus(return_to)\n\n        if federated is True:\n            return self.get(\n                'https:\n                    self.domain, client_id, return_to),\n                headers={'Content-Type': 'application/json'}\n            )\n        return self.get(\n            'https:\n                                                                   client_id,\n                                                                   return_to),\n            headers={'Content-Type': 'application/json'}\n        )", "docstring": "Logout\n\nUse this endpoint to logout a user. If you want to navigate the user to a\nspecific URL after the logout, set that URL at the returnTo parameter.\nThe URL should be included in any the appropriate Allowed Logout URLs list:\n\nArgs:\nclient_id (str): The client_id of your application.\n\nreturnTo (str): URL to redirect the user after the logout.\n\nfederated (bool): Querystring parameter to log the user out of the IdP", "source": "juraj-google-style"}
{"code": "def ProcessConfigOverrides(filename):\n    abs_filename = os.path.abspath(filename)\n    cfg_filters = []\n    keep_looking = True\n    while keep_looking:\n        (abs_path, base_name) = os.path.split(abs_filename)\n        if (not base_name):\n            break\n        cfg_file = os.path.join(abs_path, 'CPPLINT.cfg')\n        abs_filename = abs_path\n        if (not os.path.isfile(cfg_file)):\n            continue\n        try:\n            with open(cfg_file) as file_handle:\n                for line in file_handle:\n                    (line, _, _) = line.partition('\n                    if (not line.strip()):\n                        continue\n                    (name, _, val) = line.partition('=')\n                    name = name.strip()\n                    val = val.strip()\n                    if (name == 'set noparent'):\n                        keep_looking = False\n                    elif (name == 'filter'):\n                        cfg_filters.append(val)\n                    elif (name == 'exclude_files'):\n                        if base_name:\n                            pattern = re.compile(val)\n                            if pattern.match(base_name):\n                                _cpplint_state.PrintInfo(('Ignoring \"%s\": file excluded by \"%s\". File path component \"%s\" matches pattern \"%s\"\\n' % (filename, cfg_file, base_name, val)))\n                                return False\n                    elif (name == 'linelength'):\n                        global _line_length\n                        try:\n                            _line_length = int(val)\n                        except ValueError:\n                            _cpplint_state.PrintError('Line length must be numeric.')\n                    elif (name == 'extensions'):\n                        global _valid_extensions\n                        try:\n                            extensions = [ext.strip() for ext in val.split(',')]\n                            _valid_extensions = set(extensions)\n                        except ValueError:\n                            sys.stderr.write(('Extensions should be a comma-separated list of values;for example: extensions=hpp,cpp\\nThis could not be parsed: \"%s\"' % (val,)))\n                    elif (name == 'headers'):\n                        global _header_extensions\n                        try:\n                            extensions = [ext.strip() for ext in val.split(',')]\n                            _header_extensions = set(extensions)\n                        except ValueError:\n                            sys.stderr.write(('Extensions should be a comma-separated list of values;for example: extensions=hpp,cpp\\nThis could not be parsed: \"%s\"' % (val,)))\n                    elif (name == 'root'):\n                        global _root\n                        _root = val\n                    else:\n                        _cpplint_state.PrintError(('Invalid configuration option (%s) in file %s\\n' % (name, cfg_file)))\n        except IOError:\n            _cpplint_state.PrintError((\"Skipping config file '%s': Can't open for reading\\n\" % cfg_file))\n            keep_looking = False\n    for cfg_filter in reversed(cfg_filters):\n        _AddFilters(cfg_filter)\n    return True", "docstring": "Loads the configuration files and processes the config overrides.\n\nArgs:\nfilename: The name of the file being processed by the linter.\n\nReturns:\nFalse if the current |filename| should not be processed further.", "source": "codesearchnet"}
{"code": "def has_no_checked_field(self, locator, **kwargs):\n        \n\n        kwargs[\"checked\"] = True\n        return self.has_no_selector(\"field\", locator, **kwargs)", "docstring": "Checks if the page or current node has no radio button or checkbox with the given label,\nvalue, or id that is currently checked.\n\nArgs:\nlocator (str): The label, name, or id of a checked field.\n**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.\n\nReturns:\nbool: Whether it doesn't exist.", "source": "juraj-google-style"}
{"code": "def rename(oldname, newname, overwrite=False):\n    rename_v2(oldname, newname, overwrite)", "docstring": "Rename or move a file / directory.\n\nArgs:\noldname: string, pathname for a file\nnewname: string, pathname to which the file needs to be moved\noverwrite: boolean, if false it's an error for `newname` to be occupied by\nan existing file.\n\nRaises:\nerrors.OpError: If the operation fails.", "source": "github-repos"}
{"code": "def add_glyph(self, source_or_glyph, glyph=None, **kw):\n    if (glyph is not None):\n        source = source_or_glyph\n    else:\n        (source, glyph) = (ColumnDataSource(), source_or_glyph)\n    if (not isinstance(source, DataSource)):\n        raise ValueError(\"'source' argument to add_glyph() must be DataSource subclass\")\n    if (not isinstance(glyph, Glyph)):\n        raise ValueError(\"'glyph' argument to add_glyph() must be Glyph subclass\")\n    g = GlyphRenderer(data_source=source, glyph=glyph, **kw)\n    self.renderers.append(g)\n    return g", "docstring": "Adds a glyph to the plot with associated data sources and ranges.\n\nThis function will take care of creating and configuring a Glyph object,\nand then add it to the plot's list of renderers.\n\nArgs:\nsource (DataSource) : a data source for the glyphs to all use\nglyph (Glyph) : the glyph to add to the Plot\n\n\nKeyword Arguments:\nAny additional keyword arguments are passed on as-is to the\nGlyph initializer.\n\nReturns:\nGlyphRenderer", "source": "codesearchnet"}
{"code": "def register_subcommand(parser: ArgumentParser):\n    serve_parser = parser.add_parser('serve', help='CLI tool to run inference requests through REST and GraphQL endpoints.')\n    serve_parser.add_argument('--task', type=str, choices=get_supported_tasks(), help='The task to run the pipeline on')\n    serve_parser.add_argument('--host', type=str, default='localhost', help='Interface the server will listen on.')\n    serve_parser.add_argument('--port', type=int, default=8888, help='Port the serving will listen to.')\n    serve_parser.add_argument('--workers', type=int, default=1, help='Number of http workers')\n    serve_parser.add_argument('--model', type=str, help=\"Model's name or path to stored model.\")\n    serve_parser.add_argument('--config', type=str, help=\"Model's config name or path to stored model.\")\n    serve_parser.add_argument('--tokenizer', type=str, help='Tokenizer name to use.')\n    serve_parser.add_argument('--device', type=int, default=-1, help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)')\n    serve_parser.set_defaults(func=serve_command_factory)", "docstring": "Register this command to argparse so it's available for the transformer-cli\n\nArgs:\nparser: Root parser to register command-specific arguments", "source": "github-repos"}
{"code": "def compare_versions(ver1='', oper='==', ver2=''):\n    if (not ver1):\n        raise SaltInvocationError('compare_version, ver1 is blank')\n    if (not ver2):\n        raise SaltInvocationError('compare_version, ver2 is blank')\n    if (ver1 == 'latest'):\n        ver1 = six.text_type(sys.maxsize)\n    if (ver2 == 'latest'):\n        ver2 = six.text_type(sys.maxsize)\n    if (ver1 == 'Not Found'):\n        ver1 = '0.0.0.0.0'\n    if (ver2 == 'Not Found'):\n        ver2 = '0.0.0.0.0'\n    return salt.utils.versions.compare(ver1, oper, ver2, ignore_epoch=True)", "docstring": "Compare software package versions\n\nArgs:\nver1 (str): A software version to compare\noper (str): The operand to use to compare\nver2 (str): A software version to compare\n\nReturns:\nbool: True if the comparison is valid, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' pkg.compare_versions 1.2 >= 1.3", "source": "codesearchnet"}
{"code": "def send_highspeed(self, data, progress_callback):\n    if (not self.connected):\n        raise HardwareError('Cannot send a script if we are not in a connected state')\n    if (isinstance(data, str) and (not isinstance(data, bytes))):\n        raise ArgumentError('You must send bytes or bytearray to _send_highspeed', type=type(data))\n    if (not isinstance(data, bytes)):\n        data = bytes(data)\n    try:\n        self._on_progress = progress_callback\n        self._loop.run_coroutine(self.adapter.send_script(0, data))\n    finally:\n        self._on_progress = None", "docstring": "Send a script to a device at highspeed, reporting progress.\n\nThis method takes a binary blob and downloads it to the device as fast\nas possible, calling the passed progress_callback periodically with\nupdates on how far it has gotten.\n\nArgs:\ndata (bytes): The binary blob that should be sent to the device at highspeed.\nprogress_callback (callable): A function that will be called periodically to\nreport progress.  The signature must be callback(done_count, total_count)\nwhere done_count and total_count will be passed as integers.", "source": "codesearchnet"}
{"code": "def __init__(self, project_id, credentials, config=None):\n    \n    self._project_id = project_id\n    self._credentials = credentials\n    self._config = config if config is not None else Context._get_default_config()", "docstring": "Initializes an instance of a Context object.\n\nArgs:\nproject_id: the current cloud project.\ncredentials: the credentials to use to authorize requests.\nconfig: key/value configurations for cloud operations", "source": "juraj-google-style"}
{"code": "def preprocess_data(data: List[Tuple[List[str], List[str]]], to_lower: bool = True,\n                    append_case: str = \"first\") -> List[Tuple[List[Tuple[str]], List[str]]]:\n    \n    new_data = []\n    for words, tags in data:\n        new_words = [process_word(word, to_lower=to_lower, append_case=append_case)\n                     for word in words]\n        \n        new_tags = tags\n        new_data.append((new_words, new_tags))\n    return new_data", "docstring": "Processes all words in data using\n:func:`~deeppavlov.dataset_iterators.morphotagger_iterator.process_word`.\n\nArgs:\ndata: a list of pairs (words, tags), each pair corresponds to a single sentence\nto_lower: whether to lowercase\nappend_case: whether to add case mark\n\nReturns:\na list of preprocessed sentences", "source": "juraj-google-style"}
{"code": "def create(self, data, *args, **kwargs):\n        \n        \n        \n        if self.create.__func__.__module__ != self.__module__:\n            raise Exception(\"Child method not implemented\")\n\n        self._MambuStruct__method  = \"POST\"\n        self._MambuStruct__data    = data\n        self.connect(*args, **kwargs)\n        self._MambuStruct__method  = \"GET\"\n        self._MambuStruct__data    = None", "docstring": "Creates an entity in Mambu\n\nThis method must be implemented in child classes\n\nArgs:\ndata (dictionary): dictionary with data to send, this dictionary\nis specific for each Mambu entity", "source": "juraj-google-style"}
{"code": "def speechlib_mel(sample_rate, n_fft, n_mels, fmin=None, fmax=None):\n    bank_width = int(n_fft \n    if fmax is None:\n        fmax = sample_rate / 2\n    if fmin is None:\n        fmin = 0\n    assert fmin >= 0, 'fmin cannot be negative'\n    assert fmin < fmax <= sample_rate / 2, 'fmax must be between (fmin, samplerate / 2]'\n\n    def mel(f):\n        return 1127.0 * np.log(1.0 + f / 700.0)\n\n    def bin2mel(fft_bin):\n        return 1127.0 * np.log(1.0 + fft_bin * sample_rate / (n_fft * 700.0))\n\n    def f2bin(f):\n        return int(f * n_fft / sample_rate + 0.5)\n    klo = f2bin(fmin) + 1\n    khi = f2bin(fmax)\n    khi = max(khi, klo)\n    mlo = mel(fmin)\n    mhi = mel(fmax)\n    m_centers = np.linspace(mlo, mhi, n_mels + 2)\n    ms = (mhi - mlo) / (n_mels + 1)\n    matrix = np.zeros((n_mels, bank_width), dtype=np.float32)\n    for m in range(0, n_mels):\n        left = m_centers[m]\n        center = m_centers[m + 1]\n        right = m_centers[m + 2]\n        for fft_bin in range(klo, khi):\n            mbin = bin2mel(fft_bin)\n            if left < mbin < right:\n                matrix[m, fft_bin] = 1.0 - abs(center - mbin) / ms\n    return matrix", "docstring": "Create a Mel filter-bank the same as SpeechLib FbankFC.\n\nArgs:\nsample_rate (int): Sample rate in Hz. number > 0 [scalar]\nn_fft (int): FFT size. int > 0 [scalar]\nn_mel (int): Mel filter size. int > 0 [scalar]\nfmin (float): lowest frequency (in Hz). If None use 0.0.\nfloat >= 0 [scalar]\nfmax: highest frequency (in Hz). If None use sample_rate / 2.\nfloat >= 0 [scalar]\n\nReturns\nout (numpy.ndarray): Mel transform matrix\n[shape=(n_mels, 1 + n_fft/2)]", "source": "github-repos"}
{"code": "def execute(self, commands, encoding='json', **kwargs):\n    if (encoding not in ('json', 'text')):\n        raise TypeError('encoding must be one of [json, text]')\n    try:\n        self.error = None\n        request = self.request(commands, encoding=encoding, **kwargs)\n        response = self.send(request)\n        return response\n    except (ConnectionError, CommandError, TypeError) as exc:\n        exc.commands = commands\n        self.error = exc\n        raise", "docstring": "Executes the list of commands on the destination node\n\nThis method takes a list of commands and sends them to the\ndestination node, returning the results.  The execute method handles\nputting the destination node in enable mode and will pass the\nenable password, if required.\n\nArgs:\ncommands (list): A list of commands to execute on the remote node\nencoding (string): The encoding to send along with the request\nmessage to the destination node.  Valid values include 'json'\nor 'text'.  This argument will influence the response object\nencoding\n**kwargs: Arbitrary keyword arguments\n\nReturns:\nA decoded response message as a native Python dictionary object\nthat has been deserialized from JSON.\n\nRaises:\nCommandError:  A CommandError is raised that includes the error\ncode, error message along with the list of commands that were\nsent to the node.  The exception instance is also stored in\nthe error property and is availble until the next request is\nsent", "source": "codesearchnet"}
{"code": "def read_from_hdx(identifier, configuration=None):\n    dataset = Dataset(configuration=configuration)\n    result = dataset._dataset_load_from_hdx(identifier)\n    if result:\n        return dataset\n    return None", "docstring": "Reads the dataset given by identifier from HDX and returns Dataset object\n\nArgs:\nidentifier (str): Identifier of dataset\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nOptional[Dataset]: Dataset object if successful read, None if not", "source": "codesearchnet"}
{"code": "def get_cytoband_coord(chrom, pos):\n    chrom = chrom.strip('chr')\n    pos = int(pos)\n    result = None\n    logger.debug('Finding Cytoband for chrom:{0} pos:{1}'.format(chrom, pos))\n    if (chrom in CYTOBANDS):\n        for interval in CYTOBANDS[chrom][pos]:\n            result = '{0}{1}'.format(chrom, interval.data)\n    return result", "docstring": "Get the cytoband coordinate for a position\n\nArgs:\nchrom(str): A chromosome\npos(int): The position\n\nReturns:\ncytoband", "source": "codesearchnet"}
{"code": "def minimum_image( self, r1, r2 ):\n        \n        delta_r = r2 - r1\n        delta_r = np.array( [ x - math.copysign( 1.0, x ) if abs(x) > 0.5 else x for x in delta_r ] )\n        return( delta_r )", "docstring": "Find the minimum image vector from point r1 to point r2.\n\nArgs:\nr1 (np.array): fractional coordinates of point r1.\nr2 (np.array): fractional coordinates of point r2.\n\nReturns:\n(np.array): the fractional coordinate vector from r1 to the nearest image of r2.", "source": "juraj-google-style"}
{"code": "def close(self, reason=None):\n        \n        with self._closing:\n            if self._closed:\n                return\n\n            \n            if self.is_active:\n                _LOGGER.debug(\"Stopping consumer.\")\n                self._consumer.stop()\n            self._consumer = None\n\n            self._rpc.close()\n            self._rpc = None\n            self._closed = True\n            _LOGGER.debug(\"Finished stopping manager.\")\n\n        if reason:\n            \n            _LOGGER.debug(\"reason for closing: %s\" % reason)\n            if isinstance(reason, Exception):\n                raise reason\n            raise RuntimeError(reason)", "docstring": "Stop consuming messages and shutdown all helper threads.\n\nThis method is idempotent. Additional calls will have no effect.\n\nArgs:\nreason (Any): The reason to close this. If None, this is considered\nan \"intentional\" shutdown.", "source": "juraj-google-style"}
{"code": "def Write(self, output_writer):\n    for (column_index, column_size) in enumerate(self._column_sizes):\n        (column_size, _) = divmod(column_size, self._NUMBER_OF_SPACES_IN_TAB)\n        column_size = ((column_size + 1) * self._NUMBER_OF_SPACES_IN_TAB)\n        self._column_sizes[column_index] = column_size\n    if self._columns:\n        self._WriteRow(output_writer, self._columns, in_bold=True)\n    for values in self._rows:\n        self._WriteRow(output_writer, values)", "docstring": "Writes the table to output writer.\n\nArgs:\noutput_writer (CLIOutputWriter): output writer.", "source": "codesearchnet"}
{"code": "def _shared_name(self):\n    return self.name[:self.name.index(':')]", "docstring": "The shared name of the variable.\n\nUnlike name(), shared_name doesn't have \":0\" suffix. It is user-specified\nname with name scope prefix.\n\nReturns:\nvariable name.", "source": "github-repos"}
{"code": "def __eq__(self, other):\n        \n        if type(self) is not type(other) or \\\n                self.name != other.name or \\\n                self.num_qubits != other.num_qubits or \\\n                self.num_clbits != other.num_clbits or \\\n                self.definition != other.definition:\n            return False\n\n        for self_param, other_param in zip_longest(self.params, other.params):\n            if self_param == other_param:\n                continue\n\n            try:\n                if numpy.isclose(float(self_param), float(other_param),\n                                 atol=_CUTOFF_PRECISION):\n                    continue\n            except TypeError:\n                pass\n\n            return False\n\n        return True", "docstring": "Two instructions are the same if they have the same name,\nsame dimensions, and same params.\n\nArgs:\nother (instruction): other instruction\n\nReturns:\nbool: are self and other equal.", "source": "juraj-google-style"}
{"code": "def merge_svg_layers(svg_sources, share_transform=True):\n    \n    \n    (width, height), layers = get_svg_layers(svg_sources)\n\n    if share_transform:\n        transforms = [layer_i.attrib['transform'] for layer_i in layers\n                      if 'transform' in layer_i.attrib]\n        if len(transforms) > 1:\n            raise ValueError('Transform can only be shared if *exactly one* '\n                             'layer has a transform ({} layers have '\n                             '`transform` attributes)'.format(len(transforms)))\n        elif transforms:\n            \n            for layer_i in layers:\n                layer_i.attrib['transform'] = transforms[0]\n\n    \n    dwg = svgwrite.Drawing(profile='tiny', debug=False, size=(width, height))\n\n    \n    output_svg_root = etree.fromstring(dwg.tostring())\n    output_svg_root.extend(layers)\n\n    \n    output = StringIO.StringIO()\n    output.write(etree.tostring(output_svg_root))\n    output.seek(0)\n    return output", "docstring": "Merge layers from input svg sources into a single XML document.\n\nArgs:\n\nsvg_sources (list) : A list of file-like objects, each containing\none or more XML layers.\nshare_transform (bool) : If exactly one layer has a transform, apply it\nto *all* other layers as well.\n\nReturns:\n\nStringIO.StringIO : File-like object containing merge XML document.", "source": "juraj-google-style"}
{"code": "def get_text(revision, strip=True):\n  \n  \n  start_pos = revision.find(\"<text\")\n  assert start_pos != -1\n  end_tag_pos = revision.find(\">\", start_pos)\n  assert end_tag_pos != -1\n  end_tag_pos += len(\">\")\n  end_pos = revision.find(\"</text>\")\n  if end_pos == -1:\n    ret = \"\"\n  else:\n    ret = revision[end_tag_pos:end_pos]\n  if strip:\n    ret = strip_text(ret)\n  ret = text_encoder.to_unicode_utf8(ret)\n  return ret", "docstring": "Extract the text from a revision.\n\nArgs:\nrevision: a string\nstrip: a boolean\n\nReturns:\na string", "source": "juraj-google-style"}
{"code": "def connection(self):\n    ctx = stack.top\n    if (ctx is None):\n        raise Exception('Working outside of the Flask application context. If you wish to make a connection outside of a flask application context, please handle your connections and use manager.make_connection()')\n    if hasattr(ctx, 'ldap3_manager_main_connection'):\n        return ctx.ldap3_manager_main_connection\n    else:\n        connection = self._make_connection(bind_user=self.config.get('LDAP_BIND_USER_DN'), bind_password=self.config.get('LDAP_BIND_USER_PASSWORD'), contextualise=False)\n        connection.bind()\n        if (ctx is not None):\n            ctx.ldap3_manager_main_connection = connection\n        return connection", "docstring": "Convenience property for externally accessing an authenticated\nconnection to the server. This connection is automatically\nhandled by the appcontext, so you do not have to perform an unbind.\n\nReturns:\nldap3.Connection: A bound ldap3.Connection\nRaises:\nldap3.core.exceptions.LDAPException: Since this method is performing\na bind on behalf of the caller. You should handle this case\noccuring, such as invalid service credentials.", "source": "codesearchnet"}
{"code": "def _safe_issubclass(derived, parent):\n    try:\n        return issubclass(derived, parent)\n    except (TypeError, AttributeError):\n        if hasattr(derived, '__origin__'):\n            try:\n                return issubclass(derived.__origin__, parent)\n            except TypeError:\n                pass\n        return False", "docstring": "Like issubclass, but swallows TypeErrors.\n\nThis is useful for when either parameter might not actually be a class,\ne.g. typing.Union isn't actually a class.\n\nArgs:\nderived: As in issubclass.\nparent: As in issubclass.\n\nReturns:\nissubclass(derived, parent), or False if a TypeError was raised.", "source": "github-repos"}
{"code": "def clear(self, timestamp):\n    self.storage.clear()\n    self.push(streams.DATA_CLEARED, timestamp, 1)", "docstring": "Clear all data from the RSL.\n\nThis pushes a single reading once we clear everything so that\nwe keep track of the highest ID that we have allocated to date.\n\nThis needs the current timestamp to be able to properly timestamp\nthe cleared storage reading that it pushes.\n\nArgs:\ntimestamp (int): The current timestamp to store with the\nreading.", "source": "codesearchnet"}
{"code": "def __init__(self, vars_map):\n    \n    super(core.PostProcessor, self).__init__()\n\n    self.vars_map = {}\n    for var_name, value in iteritems(vars_map):\n      var_regex = re.compile(\n          re.escape(\"%\" + var_name + \"%\"), flags=re.IGNORECASE)\n      self.vars_map[var_name.lower()] = (var_regex, value)", "docstring": "EnvVarsPostProcessor constructor.\n\nArgs:\nvars_map: Dictionary of \"string\" -> \"string|list\", i.e. a mapping of\nenvironment variables names to their suggested values or to lists\nof their suggested values.", "source": "juraj-google-style"}
{"code": "def change_tz(cal, new_timezone, default, utc_only=False, utc_tz=icalendar.utc):\n    \n\n    for vevent in getattr(cal, 'vevent_list', []):\n        start = getattr(vevent, 'dtstart', None)\n        end   = getattr(vevent, 'dtend',   None)\n        for node in (start, end):\n            if node:\n                dt = node.value\n                if (isinstance(dt, datetime) and\n                        (not utc_only or dt.tzinfo == utc_tz)):\n                    if dt.tzinfo is None:\n                        dt = dt.replace(tzinfo = default)\n                    node.value = dt.astimezone(new_timezone)", "docstring": "Change the timezone of the specified component.\n\nArgs:\ncal (Component): the component to change\nnew_timezone (tzinfo): the timezone to change to\ndefault (tzinfo): a timezone to assume if the dtstart or dtend in cal\ndoesn't have an existing timezone\nutc_only (bool): only convert dates that are in utc\nutc_tz (tzinfo): the tzinfo to compare to for UTC when processing\nutc_only=True", "source": "juraj-google-style"}
{"code": "def core_name(self):\n    buf_size = self.MAX_BUF_SIZE\n    buf = (ctypes.c_char * buf_size)()\n    self._dll.JLINKARM_Core2CoreName(self.core_cpu(), buf, buf_size)\n    return ctypes.string_at(buf).decode()", "docstring": "Returns the name of the target ARM core.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nThe target core's name.", "source": "codesearchnet"}
{"code": "def get_type_info(obj):\n    if isinstance(obj, primitive_types):\n        return ('primitive', type(obj).__name__)\n    if isinstance(obj, sequence_types):\n        return ('sequence', type(obj).__name__)\n    if isinstance(obj, array_types):\n        return ('array', type(obj).__name__)\n    if isinstance(obj, key_value_types):\n        return ('key-value', type(obj).__name__)\n    if isinstance(obj, types.ModuleType):\n        return ('module', type(obj).__name__)\n    if isinstance(obj, (types.FunctionType, types.MethodType)):\n        return ('function', type(obj).__name__)\n    if isinstance(obj, type):\n        if hasattr(obj, '__dict__'):\n            return ('class', obj.__name__)\n    if isinstance(type(obj), type):\n        if hasattr(obj, '__dict__'):\n            cls_name = type(obj).__name__\n            if (cls_name == 'classobj'):\n                cls_name = obj.__name__\n                return ('class', '{}'.format(cls_name))\n            if (cls_name == 'instance'):\n                cls_name = obj.__class__.__name__\n            return ('instance', '{} instance'.format(cls_name))\n    return ('unknown', type(obj).__name__)", "docstring": "Get type information for a Python object\n\nArgs:\nobj: The Python object\n\nReturns:\ntuple: (object type \"catagory\", object type name)", "source": "codesearchnet"}
{"code": "def get_node_ip_address(address='8.8.8.8:53'):\n    (ip_address, port) = address.split(':')\n    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n    try:\n        s.connect((ip_address, int(port)))\n        node_ip_address = s.getsockname()[0]\n    except Exception as e:\n        node_ip_address = '127.0.0.1'\n        if (e.errno == 101):\n            try:\n                host_name = socket.getfqdn(socket.gethostname())\n                node_ip_address = socket.gethostbyname(host_name)\n            except Exception:\n                pass\n    finally:\n        s.close()\n    return node_ip_address", "docstring": "Determine the IP address of the local node.\n\nArgs:\naddress (str): The IP address and port of any known live service on the\nnetwork you care about.\n\nReturns:\nThe IP address of the current node.", "source": "codesearchnet"}
{"code": "def DeregisterDefinition(self, artifact_definition):\n    artifact_definition_name = artifact_definition.name.lower()\n    if (artifact_definition_name not in self._artifact_definitions):\n        raise KeyError('Artifact definition not set for name: {0:s}.'.format(artifact_definition.name))\n    del self._artifact_definitions[artifact_definition_name]", "docstring": "Deregisters an artifact definition.\n\nArtifact definitions are identified based on their lower case name.\n\nArgs:\nartifact_definition (ArtifactDefinition): an artifact definition.\n\nRaises:\nKeyError: if an artifact definition is not set for the corresponding name.", "source": "codesearchnet"}
{"code": "def norm(self, valu):\n    func = self._type_norms.get(type(valu))\n    if (func is None):\n        raise s_exc.NoSuchFunc(name=self.name, mesg=('no norm for type: %r' % (type(valu),)))\n    return func(valu)", "docstring": "Normalize the value for a given type.\n\nArgs:\nvalu (obj): The value to normalize.\n\nReturns:\n((obj,dict)): The normalized valu, info tuple.\n\nNotes:\nThe info dictionary uses the following key conventions:\nsubs (dict): The normalized sub-fields as name: valu entries.", "source": "codesearchnet"}
{"code": "def get_output_from_cache(name, filename):\n    cache_filename = _get_cache_filename(name, filename)\n    if (os.path.exists(cache_filename) and (os.path.getmtime(filename) < os.path.getmtime(cache_filename))):\n        with io.open(cache_filename) as f:\n            return f.read()\n    return None", "docstring": "Returns the output from the cache if still valid.\n\nIt checks that the cache file is defined and that its modification time is\nafter the modification time of the original file.\n\nArgs:\nname: string: name of the linter.\nfilename: string: path of the filename for which we are retrieving the\noutput.\n\nReturns: a string with the output, if it is still valid, or None otherwise.", "source": "codesearchnet"}
{"code": "def BuildService(self, cls):\n    \n\n    \n    \n    \n    def _WrapCallMethod(srvc, method_descriptor,\n                        rpc_controller, request, callback):\n      return self._CallMethod(srvc, method_descriptor,\n                       rpc_controller, request, callback)\n    self.cls = cls\n    cls.CallMethod = _WrapCallMethod\n    cls.GetDescriptor = staticmethod(lambda: self.descriptor)\n    cls.GetDescriptor.__doc__ = \"Returns the service descriptor.\"\n    cls.GetRequestClass = self._GetRequestClass\n    cls.GetResponseClass = self._GetResponseClass\n    for method in self.descriptor.methods:\n      setattr(cls, method.name, self._GenerateNonImplementedMethod(method))", "docstring": "Constructs the service class.\n\nArgs:\ncls: The class that will be constructed.", "source": "juraj-google-style"}
{"code": "def configure(self, options):\n        \n        self.client.api.configure_plugin(self.name, options)\n        self.reload()", "docstring": "Update the plugin's settings.\n\nArgs:\noptions (dict): A key-value mapping of options.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def create_chapter_from_file(self, file_name, url=None, title=None):\n    with codecs.open(file_name, 'r', encoding='utf-8') as f:\n        content_string = f.read()\n    return self.create_chapter_from_string(content_string, url, title)", "docstring": "Creates a Chapter object from an html or xhtml file. Sanitizes the\nfile's content using the clean_function method, and saves\nit as the content of the created chapter.\n\nArgs:\nfile_name (string): The file_name containing the html or xhtml\ncontent of the created Chapter\nurl (Option[string]): A url to infer the title of the chapter from\ntitle (Option[string]): The title of the created Chapter. By\ndefault, this is None, in which case the title will try to be\ninferred from the webpage at the url.\n\nReturns:\nChapter: A chapter object whose content is the given file\nand whose title is that provided or inferred from the url", "source": "codesearchnet"}
{"code": "def learn_one(self, x: beam.Row) -> None:\n    if len(x.__dict__) != 1:\n        raise ValueError('ZScore.learn_one expected univariate input, but got %s', str(x))\n    v = next(iter(x))\n    self._stdev_tracker.push(v)\n    self._sub_stat_tracker.push(v)", "docstring": "Updates the mean and standard deviation trackers with a new data point.\n\nArgs:\nx: A `beam.Row` containing a single numerical value.", "source": "github-repos"}
{"code": "def collect_function_renames():\n    renames = set()\n    all_v2_names = get_all_v2_names()\n\n    def visit(unused_path, unused_parent, children):\n        \n        for child in children:\n            _, attr = tf_decorator.unwrap(child[1])\n            api_names_v1 = [name for name in tf_export.get_v1_names(attr) if '.__internal__.' not in name]\n            api_names_v2 = tf_export.get_v2_names(attr)\n            if not api_names_v2:\n                api_names_v2 = [name for name in api_names_v1 if name in all_v2_names]\n            deprecated_api_names = set(api_names_v1) - set(api_names_v2)\n            for name in deprecated_api_names:\n                renames.add((name, get_canonical_name(api_names_v2, name)))\n    visitor = public_api.PublicAPIVisitor(visit)\n    visitor.do_not_descend_map['tf'].append('contrib')\n    visitor.private_map['tf.compat'] = ['v1', 'v2']\n    traverse.traverse(tf.version, visitor)\n    traverse.traverse(tf.compat.v1, visitor)\n    traverse.traverse(tf.compat.v2, visitor)\n    return renames", "docstring": "Looks for functions/classes that need to be renamed in TF 2.0.\n\nReturns:\nSet of tuples of the form (current name, new name).", "source": "github-repos"}
{"code": "def forward(self, main_feature, condition_feature):\n    probabilities_and_temperature = self.mlp(torch.concat((main_feature, condition_feature), dim=1))\n    probabilities, temperature = (probabilities_and_temperature[:, :2, ...], probabilities_and_temperature[:, 2:, ...])\n    probabilities = probabilities + self.p_eps\n    probabilities = probabilities[:, 0, ...] / (probabilities[:, 0, ...] + probabilities[:, 1, ...])\n    temperature = temperature + self.p_eps\n    temperature = temperature[:, 0, ...] / (temperature[:, 0, ...] + temperature[:, 1, ...])\n    temperature = temperature.unsqueeze(1)\n    temperature = (self.max_temp - self.min_temp) * temperature + self.min_temp\n    return self.log_binomial_transform(probabilities, temperature)", "docstring": "Args:\nmain_feature (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):\nMain feature.\ncondition_feature (torch.Tensor of shape `(batch_size, num_channels, height, width)`):\nCondition feature.\n\nReturns:\n`torch.Tensor`:\nOutput log binomial distribution", "source": "github-repos"}
{"code": "def todo(self, **kwargs):\n    path = ('%s/%s/todo' % (self.manager.path, self.get_id()))\n    self.manager.gitlab.http_post(path, **kwargs)", "docstring": "Create a todo associated to the object.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabTodoError: If the todo cannot be set", "source": "codesearchnet"}
{"code": "def assert_processor_available(processor: str) -> None:\n    \n    if processor not in [Processors.XHTML2PDF,\n                         Processors.WEASYPRINT,\n                         Processors.PDFKIT]:\n        raise AssertionError(\"rnc_pdf.set_pdf_processor: invalid PDF processor\"\n                             \" specified\")\n    if processor == Processors.WEASYPRINT and not weasyprint:\n        raise RuntimeError(\"rnc_pdf: Weasyprint requested, but not available\")\n    if processor == Processors.XHTML2PDF and not xhtml2pdf:\n        raise RuntimeError(\"rnc_pdf: xhtml2pdf requested, but not available\")\n    if processor == Processors.PDFKIT and not pdfkit:\n        raise RuntimeError(\"rnc_pdf: pdfkit requested, but not available\")", "docstring": "Assert that a specific PDF processor is available.\n\nArgs:\nprocessor: a PDF processor type from :class:`Processors`\n\nRaises:\nAssertionError: if bad ``processor``\nRuntimeError: if requested processor is unavailable", "source": "juraj-google-style"}
{"code": "def __init__(self, data_type=DATA_TYPE):\n    \n    super(SyslogLineEventData, self).__init__(data_type=data_type)\n    self.body = None\n    self.hostname = None\n    self.pid = None\n    self.reporter = None\n    self.severity = None", "docstring": "Initializes an event data attribute container.\n\nArgs:\ndata_type (Optional[str]): event data type indicator.", "source": "juraj-google-style"}
{"code": "async def inspect(self, name: str) -> Mapping:\n        \n        response = await self.docker._query_json(\"images/{name}/json\".format(name=name))\n        return response", "docstring": "Return low-level information about an image\n\nArgs:\nname: name of the image", "source": "juraj-google-style"}
{"code": "def cut_spectrum(sp, l0, lf):\n    if (l0 >= lf):\n        raise ValueError('l0 must be lower than lf')\n    idx0 = np.argmin(np.abs((sp.x - l0)))\n    idx1 = np.argmin(np.abs((sp.x - lf)))\n    out = copy.deepcopy(sp)\n    out.x = out.x[idx0:idx1]\n    out.y = out.y[idx0:idx1]\n    return out", "docstring": "Cuts spectrum given a wavelength interval, leaving origina intact\n\nArgs:\nsp: Spectrum instance\nl0: initial wavelength\nlf: final wavelength\n\nReturns:\nSpectrum: cut spectrum", "source": "codesearchnet"}
{"code": "def stop_artifact_creation(self, id_or_uri, task_uri):\n    data = {'taskUri': task_uri}\n    uri = (((self.URI + '/') + extract_id_from_uri(id_or_uri)) + self.STOP_CREATION_PATH)\n    return self._client.update(data, uri=uri)", "docstring": "Stops creation of the selected Artifact Bundle.\n\nArgs:\nid_or_uri: ID or URI of the Artifact Bundle.\ntask_uri: Task URI associated with the Artifact Bundle.\n\nReturns:\nstring:", "source": "codesearchnet"}
{"code": "def is_user_in_experiment(config, experiment, attributes, logger):\n    audience_conditions = experiment.getAudienceConditionsOrIds()\n    logger.debug(audience_logs.EVALUATING_AUDIENCES_COMBINED.format(experiment.key, json.dumps(audience_conditions)))\n    if ((audience_conditions is None) or (audience_conditions == [])):\n        logger.info(audience_logs.AUDIENCE_EVALUATION_RESULT_COMBINED.format(experiment.key, 'TRUE'))\n        return True\n    if (attributes is None):\n        attributes = {}\n\n    def evaluate_custom_attr(audienceId, index):\n        audience = config.get_audience(audienceId)\n        custom_attr_condition_evaluator = condition_helper.CustomAttributeConditionEvaluator(audience.conditionList, attributes, logger)\n        return custom_attr_condition_evaluator.evaluate(index)\n\n    def evaluate_audience(audienceId):\n        audience = config.get_audience(audienceId)\n        if (audience is None):\n            return None\n        logger.debug(audience_logs.EVALUATING_AUDIENCE.format(audienceId, audience.conditions))\n        result = condition_tree_evaluator.evaluate(audience.conditionStructure, (lambda index: evaluate_custom_attr(audienceId, index)))\n        result_str = (str(result).upper() if (result is not None) else 'UNKNOWN')\n        logger.info(audience_logs.AUDIENCE_EVALUATION_RESULT.format(audienceId, result_str))\n        return result\n    eval_result = condition_tree_evaluator.evaluate(audience_conditions, evaluate_audience)\n    eval_result = (eval_result or False)\n    logger.info(audience_logs.AUDIENCE_EVALUATION_RESULT_COMBINED.format(experiment.key, str(eval_result).upper()))\n    return eval_result", "docstring": "Determine for given experiment if user satisfies the audiences for the experiment.\n\nArgs:\nconfig: project_config.ProjectConfig object representing the project.\nexperiment: Object representing the experiment.\nattributes: Dict representing user attributes which will be used in determining\nif the audience conditions are met. If not provided, default to an empty dict.\nlogger: Provides a logger to send log messages to.\n\nReturns:\nBoolean representing if user satisfies audience conditions for any of the audiences or not.", "source": "codesearchnet"}
{"code": "def _compute_keys(self, n_minus_1_grams: torch.LongTensor, indices: torch.LongTensor) -> Tuple[torch.LongTensor, torch.LongTensor]:\n    batch_size, _ = n_minus_1_grams.shape\n    hash_result = torch.ones(batch_size, device=self.device, dtype=torch.long)\n    hash_result_with_just_context = self.accumulate_hash(hash_result, n_minus_1_grams)\n    hash_result = torch.vmap(self.accumulate_hash, in_dims=(None, 1), out_dims=1)(hash_result_with_just_context, indices[:, :, None])\n    keys = self.keys[None, None, :, None]\n    hash_result = torch.vmap(self.accumulate_hash, in_dims=(None, 2), out_dims=2)(hash_result, keys)\n    return (hash_result, hash_result_with_just_context)", "docstring": "Computes random keys for each ngram and depth.\n\nArgs:\nn_minus_1_grams (`torch.LongTensor`):\nNgrams (batch_size, ngram_len - 1).\nindices (`torch.LongTensor`):\nindices of the continuations (batch_size, num_indices)\n\nReturns:\nNgram keys (batch_size, num_indices, depth).", "source": "github-repos"}
{"code": "def rename(self, new_folder_name):\n        \n        headers = self.headers\n        endpoint = 'https:\n        payload = '{ \"DisplayName\": \"' + new_folder_name + '\"}'\n\n        r = requests.patch(endpoint, headers=headers, data=payload)\n\n        if check_response(r):\n            return_folder = r.json()\n            return self._json_to_folder(self.account, return_folder)", "docstring": "Renames the Folder to the provided name.\n\nArgs:\nnew_folder_name: A string of the replacement name.\n\nRaises:\nAuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token.\n\nReturns:\nA new Folder representing the folder with the new name on Outlook.", "source": "juraj-google-style"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    prefix_ones = [1] * len(self.prefix_tokens)\n    suffix_ones = [1]\n    if token_ids_1 is None:\n        return prefix_ones + [0] * len(token_ids_0) + suffix_ones\n    return prefix_ones + [0] * len(token_ids_0) + [0] * len(token_ids_1) + suffix_ones", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def call(func, args):\n    \n    assert hasattr(func, '__call__'), 'Cannot call func: {}'.format(\n        func.__name__)\n    raw_func = (\n        func if isinstance(func, FunctionType) else func.__class__.__call__)\n    hints = collections.defaultdict(lambda: Any, get_type_hints(raw_func))\n    argspec = _getargspec(raw_func)\n    named_args = {}\n    varargs = ()\n    for k, nk, v in _normalize(args):\n        if nk == argspec.varargs:\n            hints[nk] = Tuple[hints[nk], ...]\n        elif nk not in argspec.args and argspec.varkw in hints:\n            hints[nk] = hints[argspec.varkw]\n        try:\n            value = cast(hints[nk], v)\n        except TypeError as e:\n            _LOGGER.exception(e)\n            six.raise_from(exc.InvalidCliValueError(k, v), e)\n        if nk == argspec.varargs:\n            varargs = value\n        elif (nk in argspec.args or argspec.varkw) and (\n                nk not in named_args or named_args[nk] is None):\n            named_args[nk] = value\n    return func(*varargs, **named_args)", "docstring": "Call the function with args normalized and cast to the correct types.\n\nArgs:\nfunc: The function to call.\nargs: The arguments parsed by docopt.\n\nReturns:\nThe return value of func.", "source": "juraj-google-style"}
{"code": "def __init__(self, fill_method='zero', fill_missing=True, **kwargs):\n        \n        super().__init__()\n        self.fill_missing = fill_missing\n        self.filler = SimpleFill(fill_method)", "docstring": "Imputs NaN's using various filling methods like mean, zero, median, min, random\n\n\nArgs:\nfill_method: How NaN's will be exchanged. Possible values: 'mean', 'zero', 'median', 'min', 'random'\nfill_missing: If True, transformer will fill NaN values by filling method", "source": "juraj-google-style"}
{"code": "def set_error_filter(self, filt):\n    self._filter = filt", "docstring": "Set the error filter.\n\nArgs:\nfilt: A function or callable object that accepts a single argument of type\nError and returns True if that error should be included in the log.  A\nfilter of None will add all errors.\n\nNOTE: The filter may adjust some properties of the error.", "source": "github-repos"}
{"code": "def add_user(self, group, username):\n    try:\n        self.lookup_id(group)\n    except ldap_tools.exceptions.InvalidResult as err:\n        raise err from None\n    operation = {'memberUid': [(ldap3.MODIFY_ADD, [username])]}\n    self.client.modify(self.__distinguished_name(group), operation)", "docstring": "Add a user to the specified LDAP group.\n\nArgs:\ngroup: Name of group to update\nusername: Username of user to add\n\nRaises:\nldap_tools.exceptions.InvalidResult:\nResults of the query were invalid.  The actual exception raised\ninherits from InvalidResult.  See #lookup_id for more info.", "source": "codesearchnet"}
{"code": "def cap17(msg):\n    \n    allbds = ['05', '06', '07', '08', '09', '0A', '20', '21', '40', '41',\n              '42', '43', '44', '45', '48', '50', '51', '52', '53', '54',\n              '55', '56', '5F', '60', 'NA', 'NA', 'E1', 'E2']\n\n    d = hex2bin(data(msg))\n    idx = [i for i, v in enumerate(d[:28]) if v=='1']\n    capacity = ['BDS'+allbds[i] for i in idx if allbds[i] is not 'NA']\n\n    return capacity", "docstring": "Extract capacities from BDS 1,7 message\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nlist: list of suport BDS codes", "source": "juraj-google-style"}
{"code": "def add(self, doc, attributes=None):\n    doc_ref = str(doc[self._ref])\n    self._documents[doc_ref] = (attributes or {})\n    self.document_count += 1\n    for (field_name, field) in self._fields.items():\n        extractor = field.extractor\n        field_value = (doc[field_name] if (extractor is None) else extractor(doc))\n        tokens = Tokenizer(field_value)\n        terms = self.pipeline.run(tokens)\n        field_ref = FieldRef(doc_ref, field_name)\n        field_terms = defaultdict(int)\n        self.field_term_frequencies[str(field_ref)] = field_terms\n        self.field_lengths[str(field_ref)] = len(terms)\n        for term in terms:\n            term_key = str(term)\n            field_terms[term_key] += 1\n            if (term_key not in self.inverted_index):\n                posting = {_field_name: {} for _field_name in self._fields}\n                posting['_index'] = self.term_index\n                self.term_index += 1\n                self.inverted_index[term_key] = posting\n            if (doc_ref not in self.inverted_index[term_key][field_name]):\n                self.inverted_index[term_key][field_name][doc_ref] = defaultdict(list)\n            for metadata_key in self.metadata_whitelist:\n                metadata = term.metadata[metadata_key]\n                self.inverted_index[term_key][field_name][doc_ref][metadata_key].append(metadata)", "docstring": "Adds a document to the index.\n\nBefore adding documents to the index it should have been fully\nsetup, with the document ref and all fields to index already having\nbeen specified.\n\nThe document must have a field name as specified by the ref (by default\nthis is 'id') and it should have all fields defined for indexing,\nthough None values will not cause errors.\n\nArgs:\n- doc (dict): The document to be added to the index.\n- attributes (dict, optional): A set of attributes corresponding\nto the document, currently a single `boost` -> int will be\ntaken into account.", "source": "codesearchnet"}
{"code": "def setEditable(self, editable):\n        \n        if not isinstance(editable, bool):\n            raise TypeError('Argument is not of type bool')\n        self._editable = editable", "docstring": "setter to _editable. apply changes while changing dtype.\n\nRaises:\nTypeError: if editable is not of type bool.\n\nArgs:\neditable (bool): apply changes while changing dtype.", "source": "juraj-google-style"}
{"code": "def get_numeric_features_to_observed_range(examples):\n    observed_features = collections.defaultdict(list)\n    for example in examples:\n        for feature_name in get_numeric_feature_names(example):\n            original_feature = parse_original_feature_from_example(example, feature_name)\n            observed_features[feature_name].extend(original_feature.original_value)\n    return {feature_name: {'observedMin': min(feature_values), 'observedMax': max(feature_values)} for (feature_name, feature_values) in iteritems(observed_features)}", "docstring": "Returns numerical features and their observed ranges.\n\nArgs:\nexamples: Examples to read to get ranges.\n\nReturns:\nA dict mapping feature_name -> {'observedMin': 'observedMax': } dicts,\nwith a key for each numerical feature.", "source": "codesearchnet"}
{"code": "def upload_backup_bundle_from_file(self, file_path, deployment_groups_id_or_uri):\n        \n        deployment_groups_uri = deployment_groups_id_or_uri\n\n        if self.DEPLOYMENT_GROUPS_URI not in deployment_groups_id_or_uri:\n            deployment_groups_uri = self.DEPLOYMENT_GROUPS_URI + deployment_groups_id_or_uri\n\n        uri = self.BACKUP_ARCHIVE_PATH + \"?deploymentGrpUri=\" + deployment_groups_uri\n\n        return self._client.upload(file_path, uri)", "docstring": "Restore an Artifact Bundle from a backup file.\n\nArgs:\nfile_path (str): The File Path to restore the Artifact Bundle.\ndeployment_groups_id_or_uri: ID or URI of the Deployment Groups.\n\nReturns:\ndict: Deployment group.", "source": "juraj-google-style"}
{"code": "def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame:\n    df = pd.DataFrame(index=table.index)\n    for column in self.columns:\n        df = column.update_dataframe(df, table=table, validate=validate)\n    return df", "docstring": "Return a fully recoded dataframe.\n\nArgs:\ntable (pd.DataFrame): A dataframe on which to apply recoding logic.\nvalidate (bool): If ``True``, recoded table must pass validation tests.", "source": "codesearchnet"}
{"code": "def js_adaptor(buffer):\n    buffer = re.sub('true', 'True', buffer)\n    buffer = re.sub('false', 'False', buffer)\n    buffer = re.sub('none', 'None', buffer)\n    buffer = re.sub('NaN', '\"NaN\"', buffer)\n    return buffer", "docstring": "convert javascript objects like true, none, NaN etc. to\nquoted word.\n\nArguments:\nbuffer: string to be converted\n\nReturns:\nstring after conversion", "source": "codesearchnet"}
{"code": "def get_resize_output_image_size(image, resolution_max_side: int, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]:\n    height, width = get_image_size(image, channel_dim=input_data_format)\n    height, width = _resize_output_size_rescale_to_max_len(height, width, max_len=resolution_max_side)\n    height, width = _resize_output_size_scale_below_upper_bound(height, width, max_len=MAX_IMAGE_SIZE)\n    return (height, width)", "docstring": "Get the output size of the image after resizing given a dictionary specifying the max and min sizes.\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nresolution_max_side (`int`):\nThe longest edge of the image will be resized to this value. The shortest edge will be resized to keep the\ninput aspect ratio.\ninput_data_format (`ChannelDimension` or `str`):\nThe channel dimension format of the input image.\nReturns:\nThe output size of the image after resizing.", "source": "github-repos"}
{"code": "def update_work_as_completed(self, worker_id, work_id, other_values=None,\n                               error=None):\n    \n    client = self._datastore_client\n    try:\n      with client.transaction() as transaction:\n        work_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id,\n                              KIND_WORK, work_id)\n        work_entity = client.get(work_key, transaction=transaction)\n        if work_entity['claimed_worker_id'] != worker_id:\n          return False\n        work_entity['is_completed'] = True\n        if other_values:\n          work_entity.update(other_values)\n        if error:\n          work_entity['error'] = text_type(error)\n        transaction.put(work_entity)\n    except Exception:\n      return False\n    return True", "docstring": "Updates work piece in datastore as completed.\n\nArgs:\nworker_id: ID of the worker which did the work\nwork_id: ID of the work which was done\nother_values: dictionary with additonal values which should be saved\nwith the work piece\nerror: if not None then error occurred during computation of the work\npiece. In such case work will be marked as completed with error.\n\nReturns:\nwhether work was successfully updated", "source": "juraj-google-style"}
{"code": "def power(self, n):\n        \n        if n > 0:\n            return super().power(n)\n        return Chi(SuperOp(self).power(n))", "docstring": "The matrix power of the channel.\n\nArgs:\nn (int): compute the matrix power of the superoperator matrix.\n\nReturns:\nChi: the matrix power of the SuperOp converted to a Chi channel.\n\nRaises:\nQiskitError: if the input and output dimensions of the\nQuantumChannel are not equal, or the power is not an integer.", "source": "juraj-google-style"}
{"code": "def install_bootstrapped_files(nb_path=None, server_config=True, DEBUG=False):\n    install_path = None\n    print('Starting hide_code.js install...')\n    current_dir = path.abspath(path.dirname(__file__))\n    config_dirs = j_path.jupyter_config_path()\n    notebook_module_path = Utils.get_notebook_module_dir()\n    for dir in config_dirs:\n        custom_dir = path.join(dir, 'custom')\n        if path.isdir(custom_dir):\n            install_path = custom_dir\n            break\n    if (install_path == None):\n        print('No config directories contain \"custom\" folder. Trying Jupyter notebook module path...')\n        install_path = path.join(notebook_module_path, 'static', 'custom')\n    if (nb_path != None):\n        install_path = nb_path\n        print(('Using argument supplied path: ' + install_path))\n    if DEBUG:\n        print(install_path)\n    if path.isdir(install_path):\n        shutil.copyfile(path.join(current_dir, 'hide_code.js'), path.join(install_path, 'hide_code.js'))\n        print(('Copying hide_code.js to ' + install_path))\n        print('Attempting to configure custom.js to auto-load hide_code.js...')\n        try:\n            with open(path.join(current_dir, 'auto-load.txt')) as auto:\n                auto_load_txt = auto.read()\n                auto_loaded = False\n                with open(path.join(install_path, 'custom.js'), 'r') as customJS:\n                    if (auto_load_txt in customJS.read()):\n                        auto_loaded = True\n                        print('Custom.js already configured to auto-load hide_code.js.')\n                if (not auto_loaded):\n                    with open(path.join(install_path, 'custom.js'), 'a') as customJS:\n                        customJS.write(auto_load_txt)\n                        print('Configured custom.js to auto-load hide_code.js.')\n        except:\n            print('Custom.js not in custom directory.')\n    else:\n        print(('Unable to install into ' + install_path))\n        print(\"Directory doesn't exist.\")\n        print('Make sure Jupyter is installed.')\n    if server_config:\n        print('Attempting to configure auto-loading for hide_code export handlers.')\n        try:\n            server_cm = ConfigManager(config_dir=j_path.jupyter_config_dir())\n            cfg = server_cm.get('jupyter_notebook_config')\n            server_extensions = cfg.setdefault('NotebookApp', {}).setdefault('server_extensions', [])\n            extension = 'hide_code.hide_code'\n            if (extension not in server_extensions):\n                cfg['NotebookApp']['server_extensions'] += [extension]\n                server_cm.update('jupyter_notebook_config', cfg)\n                print('Configured jupyter to auto-load hide_code export handlers.')\n            else:\n                print('Jupyter already configured to auto-load export handlers.')\n        except:\n            print('Unable to install server extension.')", "docstring": "Installs javascript and exporting server extensions in Jupyter notebook.\n\nArgs:\nnb_path (string): Path to notebook module.\nserver_config (boolean): Install exporting server extensions.\nDEBUG (boolean): Verbose mode.", "source": "codesearchnet"}
{"code": "def __init__(self, broker, queue_output, backend=None,\n                 max_tasks_in_memory=None, max_workers_in_memory=None):\n        \n        self._app = Celery(broker=broker, backend=backend)\n        self._queue_output = queue_output\n\n        from celery.backends.base import DisabledBackend\n        self._use_result_backend = not isinstance(self._app.backend, DisabledBackend)\n\n        logger.info('Creating %s: max_tasks=%d; max_workers=%d',\n                    EventListener.__name__, max_tasks_in_memory, max_workers_in_memory)\n        logger.info('Celery broker=%s; backend=%s; using_result_backend=%s',\n                    broker, backend, self._use_result_backend)\n\n        \n        self.memory = State(\n            max_tasks_in_memory=max_tasks_in_memory,\n            max_workers_in_memory=max_workers_in_memory,\n        )  \n\n        \n        self._listener_thread = None  \n        self._celery_receiver = None  \n\n        \n        self._wait_event = threading.Event()\n\n        \n        def sigterm_handler(_signo, _stack_frame):  \n            self.__stop()\n\n        signal.signal(signal.SIGTERM, sigterm_handler)\n        self.__start()", "docstring": "Constructs an event listener instance.\n\nArgs:\nbroker (str): the broker being used by the celery system.\nqueue_output (Queue): to send to streaming dispatcher.\nbackend (str): the result backend being used by the celery system.\nmax_tasks_in_memory (int): max tasks stored\nmax_workers_in_memory (int): max workers stored", "source": "juraj-google-style"}
{"code": "def add_gene_panel(self, panel_obj):\n    panel_name = panel_obj['panel_name']\n    panel_version = panel_obj['version']\n    display_name = panel_obj.get('display_name', panel_name)\n    if self.gene_panel(panel_name, panel_version):\n        raise IntegrityError('Panel {0} with version {1} already exist in database'.format(panel_name, panel_version))\n    LOG.info('loading panel {0}, version {1} to database'.format(display_name, panel_version))\n    result = self.panel_collection.insert_one(panel_obj)\n    LOG.debug('Panel saved')\n    return result.inserted_id", "docstring": "Add a gene panel to the database\n\nArgs:\npanel_obj(dict)", "source": "codesearchnet"}
{"code": "def static_nvals(self):\n    if self._nvals is not None:\n        nvals = tensor_util.constant_value(self._nvals)\n        if nvals is not None:\n            return nvals\n    if self._value_rowids is not None:\n        nvals = tensor_shape.dimension_at_index(self._value_rowids.shape, 0)\n        if nvals.value is not None:\n            return nvals.value\n    return None", "docstring": "The number of values in this partition, if statically known.\n\n```python\nself.value_rowids().shape == [self.static_vals]\n```\n\nReturns:\nThe number of values in this partition as an `int` (if statically known);\nor `None` (otherwise).", "source": "github-repos"}
{"code": "def setEditorData(self, editor, index):\n    editor.blockSignals(True)\n    data = index.data()\n    dataIndex = editor.findData(data)\n    editor.setCurrentIndex(dataIndex)\n    editor.blockSignals(False)", "docstring": "Sets the current data for the editor.\n\nThe data displayed has the same value as `index.data(Qt.EditRole)`\n(the translated name of the datatype). Therefor a lookup for all items\nof the combobox is made and the matching item is set as the currently\ndisplayed item.\n\nSignals emitted by the editor are blocked during exection of this method.\n\nArgs:\neditor (QtGui.QComboBox): The current editor for the item. Should be\na `QtGui.QComboBox` as defined in `createEditor`.\nindex (QtCore.QModelIndex): The index of the current item.", "source": "codesearchnet"}
{"code": "def set_all_file_column_labels(self, xlabel=None, ylabel=None):\n        \n        if xlabel is not None:\n            self.general.x_column_label = xlabel\n        if ylabel is not None:\n            self.general.y_column_label = ylabel\n        if xlabel is None and ylabel is None:\n            warnings.warn(\"is not specifying x or y lables even\"\n                          + \"though column labels function is called.\", UserWarning)\n        return", "docstring": "Indicate general x,y column labels.\n\nThis sets the general x and y column labels into data files for all plots.\nIt can be overridden for specific plots.\n\nArgs:\nxlabel/ylabel (str, optional): String indicating column label for x,y values\ninto the data files. Default is None.\n\nRaises:\nUserWarning: If xlabel and ylabel are both not specified,\nThe user will be alerted, but the code will not stop.", "source": "juraj-google-style"}
{"code": "def filter_object(obj, marks, presumption=DELETE):\n    \n    if isinstance(obj, list):\n        keys = reversed(range(0, len(obj)))\n    else:\n        keys = obj.keys()\n\n    for k in keys:\n        v = obj[k]\n        m = marks.get(id(v), UNSPECIFIED)\n        if m == DELETE:\n            del obj[k]  \n        elif m == KEEP or presumption==KEEP:\n            \n            if isinstance(v, list) or isinstance(v, dict):\n                filter_object(v, marks, presumption=KEEP)\n        elif m == UNSPECIFIED:\n            \n            if isinstance(v, list) or isinstance(v, dict):\n                filter_object(v, marks, presumption=DELETE)\n                if len(v) == 0:\n                    del obj[k]\n            else:\n                del obj[k]", "docstring": "Filter down obj based on marks, presuming keys should be kept/deleted.\n\nArgs:\nobj: The object to be filtered. Filtering is done in-place.\nmarks: An object mapping id(obj) --> {DELETE,KEEP}\nThese values apply to the entire subtree, unless inverted.\npresumption: The default action to take on all keys.", "source": "juraj-google-style"}
{"code": "def is_complex_format_str(node):\n    inferred = utils.safe_infer(node)\n    if ((inferred is None) or (not isinstance(inferred.value, str))):\n        return True\n    try:\n        parsed = list(string.Formatter().parse(inferred.value))\n    except ValueError:\n        return False\n    for (_, _, format_spec, _) in parsed:\n        if format_spec:\n            return True\n    return False", "docstring": "Checks if node represents a string with complex formatting specs.\n\nArgs:\nnode (astroid.node_classes.NodeNG): AST node to check\nReturns:\nbool: True if inferred string uses complex formatting, False otherwise", "source": "codesearchnet"}
{"code": "def convert_matmul(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting matmul ...')\n    if (names == 'short'):\n        tf_name = ('MMUL' + random_string(4))\n    elif (names == 'keep'):\n        tf_name = w_name\n    else:\n        tf_name = (w_name + str(random.random()))\n    if (len(inputs) == 1):\n        weights_name = '{0}.weight'.format(w_name)\n        W = weights[weights_name].numpy().transpose()\n        (input_channels, output_channels) = W.shape\n        keras_weights = [W]\n        dense = keras.layers.Dense(output_channels, weights=keras_weights, use_bias=False, name=tf_name, bias_initializer='zeros', kernel_initializer='zeros')\n        layers[scope_name] = dense(layers[inputs[0]])\n    elif (len(inputs) == 2):\n        weights_name = '{0}.weight'.format(w_name)\n        W = weights[weights_name].numpy().transpose()\n        (input_channels, output_channels) = W.shape\n        keras_weights = [W]\n        dense = keras.layers.Dense(output_channels, weights=keras_weights, use_bias=False, name=tf_name, bias_initializer='zeros', kernel_initializer='zeros')\n        layers[scope_name] = dense(layers[inputs[0]])\n    else:\n        raise AssertionError('Cannot convert matmul layer')", "docstring": "Convert matmul layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def __init__(self, name, default=None, help=None, type=str):\n        \n        self._name = name\n        self._default = default\n        self._help = help\n        self._type = type", "docstring": "Initialise the workflow option.\n\nArgs:\nname (str): The name of the option under which the value will be stored.\ndefault: The default value that should be used when no value is specified.\nSet to None to make this a non-optional option.\nhelp (str): A short help string for this option.\ntype: The type of the option. Supported types are: str, int, float, bool", "source": "juraj-google-style"}
{"code": "def median(self, **kwargs):\n    if self._is_transposed:\n        kwargs['axis'] = (kwargs.get('axis', 0) ^ 1)\n        return self.transpose().median(**kwargs)\n    axis = kwargs.get('axis', 0)\n    func = self._build_mapreduce_func(pandas.DataFrame.median, **kwargs)\n    return self._full_axis_reduce(axis, func)", "docstring": "Returns median of each column or row.\n\nReturns:\nA new QueryCompiler object containing the median of each column or row.", "source": "codesearchnet"}
{"code": "def _execute_command(self, key, *args):\n    client = self.redis_clients[(key.redis_shard_hash() % len(self.redis_clients))]\n    return client.execute_command(*args)", "docstring": "Execute a Redis command on the appropriate Redis shard based on key.\n\nArgs:\nkey: The object ID or the task ID that the query is about.\nargs: The command to run.\n\nReturns:\nThe value returned by the Redis command.", "source": "codesearchnet"}
{"code": "def _prepare_images_structure(self, images: ImageInput) -> ImageInput:\n    return make_flat_list_of_images(images)", "docstring": "Prepare the images structure for processing.\n\nArgs:\nimages (`ImageInput`):\nThe input images to process.\n\nReturns:\n`ImageInput`: The images with a valid nesting.", "source": "github-repos"}
{"code": "def send(url, data):\n    validate(data)\n    return requests.post(url, json=data)", "docstring": "Sends an incoming message\n\nArgs:\nurl(str): the incoming hook url\ndata(dict): the sending data\n\nReturns:\nrequests.Response", "source": "codesearchnet"}
{"code": "def _format_value(cls, value, type_):\n    res = value\n    if type_ == 'CLASS':\n        res = '{}.{}'.format(value.__module__, value.__name__)\n    elif type_ == 'DURATION':\n        res = value.total_seconds() * 1000\n    elif type_ == 'TIMESTAMP':\n        res = calendar.timegm(value.timetuple()) * 1000 + value.microsecond \n    return res", "docstring": "Returns the API representation of a value given its type.\n\nArgs:\nvalue: The value of the item that needs to be shortened.\ntype_(string): The type of the value.\n\nReturns:\nA formatted value in the form of a float, int, or string.", "source": "github-repos"}
{"code": "def read_config_info(ini_file):\n    \n    try:\n        config = RawConfigParser()\n        config.optionxform = lambda option: option\n        config.read(ini_file)\n        the_stuff = {}\n        for section in config.sections():\n            the_stuff[section] = {}\n            for option in config.options(section):\n                the_stuff[section][option] = config.get(section, option)\n\n        return the_stuff\n    except Exception as wtf:\n        logging.error('Exception caught in read_config_info(): {}'.format(wtf))\n        traceback.print_exc(file=sys.stdout)\n        return sys.exit(1)", "docstring": "Read the INI file\n\nArgs:\nini_file - path to the file\n\nReturns:\nA dictionary of stuff from the INI file\n\nExits:\n1 - if problems are encountered", "source": "juraj-google-style"}
{"code": "def to_css(self):\n    if (self.a == 1.0):\n        return ('hsl(%d, %s%%, %s%%)' % (self.h, (self.s * 100), (self.l * 100)))\n    else:\n        return ('hsla(%d, %s%%, %s%%, %s)' % (self.h, (self.s * 100), (self.l * 100), self.a))", "docstring": "Generate the CSS representation of this HSL color.\n\nReturns:\nstr, ``\"hsl(...)\"`` or ``\"hsla(...)\"``", "source": "codesearchnet"}
{"code": "def _get_file_iterator(self, file_obj):\n        \n        file_obj.seek(0)\n\n        return iter(lambda: file_obj.read(self.read_bs), '')", "docstring": "For given `file_obj` return iterator, which will read the file in\n`self.read_bs` chunks.\n\nArgs:\nfile_obj (file): File-like object.\n\nReturn:\niterator: Iterator reading the file-like object in chunks.", "source": "juraj-google-style"}
{"code": "def __init__(self, _args):\n        \n        super(TcExLib, self).__init__(_args)\n\n        \n        self.latest_version = None\n        self.lib_directory = 'lib_{}.{}.{}'.format(\n            sys.version_info.major, sys.version_info.minor, sys.version_info.micro\n        )\n        self.requirements_file = 'requirements.txt'\n        self.static_lib_dir = 'lib_latest'\n        self.use_temp_requirements_file = False", "docstring": "Initialize Class properties.\n\nArgs:\n_args (namespace): The argparser args Namespace.", "source": "juraj-google-style"}
{"code": "def Open(self, hostname, port):\n    \n    server_url = 'http:\n\n    try:\n      self._xmlrpc_proxy = xmlrpclib.ServerProxy(\n          server_url, allow_none=True)\n    except SocketServer.socket.error as exception:\n      logger.warning((\n          'Unable to connect to RPC server on {0:s}:{1:d} with error: '\n          '{2!s}').format(hostname, port, exception))\n      return False\n\n    return True", "docstring": "Opens a RPC communication channel to the server.\n\nArgs:\nhostname (str): hostname or IP address to connect to for requests.\nport (int): port to connect to for requests.\n\nReturns:\nbool: True if the communication channel was established.", "source": "juraj-google-style"}
{"code": "def infer(query, replacements=None, root_type=None, libs=('stdcore', 'stdmath')):\n    if root_type:\n        type_scope = scope.ScopeStack(std_core.MODULE, root_type)\n    else:\n        type_scope = scope.ScopeStack(std_core.MODULE)\n    stdcore_included = False\n    for lib in libs:\n        if (lib == 'stdcore'):\n            stdcore_included = True\n            continue\n        module = std_core.LibraryModule.ALL_MODULES.get(lib)\n        if (not module):\n            raise TypeError(('No standard library module %r.' % lib))\n        type_scope = scope.ScopeStack(module, type_scope)\n    if (not stdcore_included):\n        raise TypeError(\"'stdcore' must always be included.\")\n    query = q.Query(query, params=replacements)\n    return infer_type.infer_type(query, type_scope)", "docstring": "Determine the type of the query's output without actually running it.\n\nArguments:\nquery: A query object or string with the query.\nreplacements: Built-time parameters to the query, either as dict or as\nan array (for positional interpolation).\nroot_type: The types of variables to be supplied to the query inference.\nlibs: What standard libraries should be taken into account for the\ninference.\n\nReturns:\nThe type of the query's output, if it can be determined. If undecidable,\nreturns efilter.protocol.AnyType.\n\nNOTE: The inference returns the type of a row in the results, not of the\nactual Python object returned by 'apply'. For example, if a query\nreturns multiple rows, each one of which is an integer, the type of the\noutput is considered to be int, not a collection of rows.\n\nExamples:\ninfer(\"5 + 5\") # -> INumber\n\ninfer(\"SELECT * FROM people WHERE age > 10\") # -> AnyType\n\n# If root_type implements the IStructured reflection API:\ninfer(\"SELECT * FROM people WHERE age > 10\", root_type=...) # -> dict", "source": "codesearchnet"}
{"code": "def step_interpolation(x, xp, fp, **kwargs):\n    del kwargs\n    xp = np.expand_dims(xp, (- 1))\n    (lower, upper) = (xp[:(- 1)], xp[1:])\n    conditions = ((x >= lower) & (x < upper))\n    conditions = np.concatenate([[(x < xp[0])], conditions, [(x >= xp[(- 1)])]])\n    values = np.concatenate([[fp[0]], fp])\n    assert np.all((np.sum(conditions, 0) == 1)), 'xp must be increasing.'\n    indices = np.argmax(conditions, 0)\n    return values[indices].astype(np.float32)", "docstring": "Multi-dimensional step interpolation.\n\nReturns the multi-dimensional step interpolant to a function with\ngiven discrete data points (xp, fp), evaluated at x.\n\nNote that *N and *M indicate zero or more dimensions.\n\nArgs:\nx: An array of shape [*N], the x-coordinates of the interpolated values.\nxp: An np.array of shape [D], the x-coordinates of the data points, must be\nincreasing.\nfp: An np.array of shape [D, *M], the y-coordinates of the data points.\n**kwargs: Unused.\n\nReturns:\nAn array of shape [*N, *M], the interpolated values.", "source": "codesearchnet"}
{"code": "def AddStop(self, lat, lng, name, stop_id=None):\n    \n    if stop_id is None:\n      stop_id = util.FindUniqueId(self.stops)\n    stop = self._gtfs_factory.Stop(stop_id=stop_id, lat=lat, lng=lng, name=name)\n    self.AddStopObject(stop)\n    return stop", "docstring": "Add a stop to this schedule.\n\nArgs:\nlat: Latitude of the stop as a float or string\nlng: Longitude of the stop as a float or string\nname: Name of the stop, which will appear in the feed\nstop_id: stop_id of the stop or None, in which case a unique id is picked\n\nReturns:\nA new Stop object", "source": "juraj-google-style"}
{"code": "def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels):\n    if not masks.shape[0] == scores.shape[0] == labels.shape[0]:\n        raise ValueError('mask, scores and labels must have the same shape!')\n    to_keep = labels.ne(num_labels) & (scores > object_mask_threshold)\n    return (masks[to_keep], scores[to_keep], labels[to_keep])", "docstring": "Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and\n`labels`.\n\nArgs:\nmasks (`torch.Tensor`):\nA tensor of shape `(num_queries, height, width)`.\nscores (`torch.Tensor`):\nA tensor of shape `(num_queries)`.\nlabels (`torch.Tensor`):\nA tensor of shape `(num_queries)`.\nobject_mask_threshold (`float`):\nA number between 0 and 1 used to binarize the masks.\nRaises:\n`ValueError`: Raised when the first dimension doesn't match in all input tensors.\nReturns:\n`Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region\n< `object_mask_threshold`.", "source": "github-repos"}
{"code": "def make_color_wheel(bins=None):\n    \n    if bins is None:\n        bins = [15, 6, 4, 11, 13, 6]\n    assert len(bins) == 6\n\n    RY, YG, GC, CB, BM, MR = tuple(bins)\n\n    ry = [1, np.arange(RY) / RY, 0]\n    yg = [1 - np.arange(YG) / YG, 1, 0]\n    gc = [0, 1, np.arange(GC) / GC]\n    cb = [0, 1 - np.arange(CB) / CB, 1]\n    bm = [np.arange(BM) / BM, 0, 1]\n    mr = [1, 0, 1 - np.arange(MR) / MR]\n\n    num_bins = RY + YG + GC + CB + BM + MR\n\n    color_wheel = np.zeros((3, num_bins), dtype=np.float32)\n\n    col = 0\n    for i, color in enumerate([ry, yg, gc, cb, bm, mr]):\n        for j in range(3):\n            color_wheel[j, col:col + bins[i]] = color[j]\n        col += bins[i]\n\n    return color_wheel.T", "docstring": "Build a color wheel.\n\nArgs:\nbins(list or tuple, optional): Specify the number of bins for each\ncolor range, corresponding to six ranges: red -> yellow,\nyellow -> green, green -> cyan, cyan -> blue, blue -> magenta,\nmagenta -> red. [15, 6, 4, 11, 13, 6] is used for default\n(see Middlebury).\n\nReturns:\nndarray: Color wheel of shape (total_bins, 3).", "source": "juraj-google-style"}
{"code": "def install(device: AndroidDevice, apk_path: str, timeout: int=DEFAULT_TIMEOUT_INSTALL_APK_SEC, user_id: Optional[int]=None, params: Optional[Iterable[str]]=None) -> None:\n    android_api_version = int(device.build_info['build_version_sdk'])\n    if user_id is not None and android_api_version < 24:\n        raise ValueError('Cannot specify `user_id` for device below SDK 24.')\n    args = ['-r', '-t']\n    if android_api_version >= 24:\n        if user_id is None:\n            user_id = device.adb.current_user_id\n        args = ['--user', str(user_id)] + args\n    if android_api_version >= 23:\n        args.append('-g')\n    if android_api_version >= 17:\n        args.append('-d')\n    args += params or []\n    args.append(apk_path)\n    try:\n        _execute_adb_install(device, args, timeout)\n        return\n    except adb.AdbError as e:\n        if not _should_retry_apk_install(str(e)):\n            raise\n        device.log.debug('Retrying installation of %s', apk_path)\n        device.reboot()\n        _execute_adb_install(device, args, timeout)", "docstring": "Install an apk on an Android device.\n\nInstalling apk is more complicated than most people realize on Android.\nThis is just a util for the most common use cases. If you need special logic\nbeyond this, we recomend you write your own instead of modifying this.\n\nArgs:\ndevice: AndroidDevice, Mobly's Android controller object.\napk_path: string, file path of an apk file.\ntimeout: int, the number of seconds to wait before timing out.\nuser_id: int, the ID of the user to install the apk for. For SDK>=24,\ninstall for the current user by default. Android's multi-user support\ndid not realistically work until SDK 24.\nparams: string list, additional parameters included in the adb install cmd.\n\nRaises:\nAdbError: Installation failed.\nValueError: Attempts to set user_id on SDK<24.", "source": "github-repos"}
{"code": "def __init__(self, *nodes, depth=0):\n        \n        self.edges = set()\n        vertices = []\n        matrix = Matrix(*nodes, depth=depth)\n        for key in matrix.keys:\n            vertices.append(Vertex(key))\n        for l, line in enumerate(matrix.data):\n            for c, cell in enumerate(line):\n                if cell > 0:\n                    self.edges.add(Edge(vertices[l], vertices[c], weight=cell))\n        self.vertices = set(vertices)", "docstring": "Initialization method.\n\nAn intermediary matrix is built to ease the creation of the graph.\n\nArgs:\n*nodes (list of DSM/Package/Module):\nthe nodes on which to build the graph.\ndepth (int): the depth of the intermediary matrix. See\nthe documentation for Matrix class.", "source": "juraj-google-style"}
{"code": "def clean_file(c_source, virtualenv_dirname):\n    \n    with open(c_source, \"r\") as file_obj:\n        contents = file_obj.read().rstrip()\n    \n    py_version = \"python{}.{}\".format(*sys.version_info[:2])\n    lib_path = os.path.join(\n        \".nox\", virtualenv_dirname, \"lib\", py_version, \"site-packages\", \"\"\n    )\n    contents = contents.replace(lib_path, \"\")\n    \n    lines = contents.split(\"\\n\")\n    with open(c_source, \"w\") as file_obj:\n        for line in lines:\n            file_obj.write(line.rstrip() + \"\\n\")", "docstring": "Strip trailing whitespace and clean up \"local\" names in C source.\n\nThese source files are autogenerated from the ``cython`` CLI.\n\nArgs:\nc_source (str): Path to a ``.c`` source file.\nvirtualenv_dirname (str): The name of the ``virtualenv``\ndirectory where Cython is installed (this is part of a\nrelative path ``.nox/{NAME}/lib/...``).", "source": "juraj-google-style"}
{"code": "def compute_advantages(rollout, last_r, gamma=0.9, lambda_=1.0, use_gae=True):\n    \n\n    traj = {}\n    trajsize = len(rollout[SampleBatch.ACTIONS])\n    for key in rollout:\n        traj[key] = np.stack(rollout[key])\n\n    if use_gae:\n        assert SampleBatch.VF_PREDS in rollout, \"Values not found!\"\n        vpred_t = np.concatenate(\n            [rollout[SampleBatch.VF_PREDS],\n             np.array([last_r])])\n        delta_t = (\n            traj[SampleBatch.REWARDS] + gamma * vpred_t[1:] - vpred_t[:-1])\n        \n        \n        traj[Postprocessing.ADVANTAGES] = discount(delta_t, gamma * lambda_)\n        traj[Postprocessing.VALUE_TARGETS] = (\n            traj[Postprocessing.ADVANTAGES] +\n            traj[SampleBatch.VF_PREDS]).copy().astype(np.float32)\n    else:\n        rewards_plus_v = np.concatenate(\n            [rollout[SampleBatch.REWARDS],\n             np.array([last_r])])\n        traj[Postprocessing.ADVANTAGES] = discount(rewards_plus_v, gamma)[:-1]\n        \n        traj[Postprocessing.VALUE_TARGETS] = np.zeros_like(\n            traj[Postprocessing.ADVANTAGES])\n\n    traj[Postprocessing.ADVANTAGES] = traj[\n        Postprocessing.ADVANTAGES].copy().astype(np.float32)\n\n    assert all(val.shape[0] == trajsize for val in traj.values()), \\\n        \"Rollout stacked incorrectly!\"\n    return SampleBatch(traj)", "docstring": "Given a rollout, compute its value targets and the advantage.\n\nArgs:\nrollout (SampleBatch): SampleBatch of a single trajectory\nlast_r (float): Value estimation for last observation\ngamma (float): Discount factor.\nlambda_ (float): Parameter for GAE\nuse_gae (bool): Using Generalized Advantage Estamation\n\nReturns:\nSampleBatch (SampleBatch): Object with experience from rollout and\nprocessed rewards.", "source": "juraj-google-style"}
{"code": "def _check_expiration(self, url: str, data: 'SavedEndpoint') -> 'SavedEndpoint':\n    if (data.expires_after < time.time()):\n        del self.data[url]\n        data = None\n    return data", "docstring": "Checks the expiration time for data for a url.\n\nIf the data has expired, it is deleted from the cache.\n\nArgs:\nurl: url to check\ndata: page of data for that url\n\nReturns:\nvalue of either the passed data or None if it expired", "source": "codesearchnet"}
{"code": "def getitem_slot(self, node: cfg.CFGNode, index_var: cfg.Variable) -> tuple[cfg.CFGNode, cfg.Variable]:\n    results = []\n    unresolved = False\n    node, ret = self.call_pytd(node, '__getitem__', index_var)\n    if self.is_concrete:\n        for val in index_var.bindings:\n            try:\n                index = self.ctx.convert.value_to_constant(val.data, int)\n            except abstract_utils.ConversionError:\n                unresolved = True\n            else:\n                self_len = len(self.pyval)\n                if -self_len <= index < self_len:\n                    results.append(self.pyval[index])\n                else:\n                    unresolved = True\n    if unresolved or not self.is_concrete:\n        results.append(ret)\n    return (node, self.ctx.join_variables(node, results))", "docstring": "Implements __getitem__ for List.\n\nArguments:\nnode: The current CFG node.\nindex_var: The Variable containing the index value, the i in lst[i].\n\nReturns:\nTuple of (node, return_variable). node may be the same as the argument.\nreturn_variable is a Variable with bindings of the possible return values.", "source": "github-repos"}
{"code": "def from_millis(cls, timeout_ms):\n    \n    if hasattr(timeout_ms, 'has_expired'):\n      return timeout_ms\n    if timeout_ms is None:\n      return cls(None)\n    return cls(timeout_ms / 1000.0)", "docstring": "Create a new PolledTimeout if needed.\n\nIf timeout_ms is already a PolledTimeout, just return it, otherwise create a\nnew PolledTimeout with the given timeout in milliseconds.\n\nArgs:\ntimeout_ms: PolledTimeout object, or number of milliseconds to use for\ncreating a new one.\n\nReturns:\nA PolledTimeout object that will expire in timeout_ms milliseconds, which\nmay be timeout_ms itself, or a newly allocated PolledTimeout.", "source": "juraj-google-style"}
{"code": "def __deepcopy__(self, memo):\n    with distribute_lib.enter_or_assert_strategy(self._distribute_strategy):\n        v = copy.deepcopy(self._v, memo)\n    copied_variable = type(self)(strategy=self._distribute_strategy, v=v, aggregation=self._aggregation)\n    memo[id(self)] = copied_variable\n    return copied_variable", "docstring": "Perform a deepcopy of the `AggregatingVariable`.\n\nUnlike the deepcopy of a regular tf.Variable, this keeps the original\nstrategy and devices of the `AggregatingVariable`.  To avoid confusion\nwith the behavior of deepcopy on a regular `Variable` (which does\ncopy into new devices), we only allow a deepcopy of a `AggregatingVariable`\nwithin its originating strategy scope.\n\nArgs:\nmemo: The memoization object for `deepcopy`.\n\nReturns:\nA deep copy of the current `AggregatingVariable`.\n\nRaises:\nRuntimeError: If trying to deepcopy into a different strategy.", "source": "github-repos"}
{"code": "def make_mapper(features):\n    if (not features):\n        features = Feature(input=[], transformer=NullTransformer())\n    if (not iterable(features)):\n        features = (features,)\n    return DataFrameMapper([t.as_input_transformer_tuple() for t in features], input_df=True)", "docstring": "Make a DataFrameMapper from a feature or list of features\n\nArgs:\nfeatures (Union[Feature, List[Feature]]): feature or list of features\n\nReturns:\nDataFrameMapper: mapper made from features", "source": "codesearchnet"}
{"code": "def compute_classification_results(self, adv_batches, dataset_batches, dataset_meta, defense_work=None):\n    class_batch_to_work = {}\n    if defense_work:\n        for v in itervalues(defense_work.work):\n            class_batch_to_work[v['output_classification_batch_id']] = v\n    accuracy_matrix = ResultMatrix()\n    error_matrix = ResultMatrix()\n    hit_target_class_matrix = ResultMatrix()\n    processed_images_count = {}\n    total_count = len(self.data)\n    processed_count = 0\n    logging.info('Processing %d files with classification results', len(self.data))\n    for (k, v) in iteritems(self.data):\n        if ((processed_count % 100) == 0):\n            logging.info('Processed %d out of %d classification results', processed_count, total_count)\n        processed_count += 1\n        defense_id = v['submission_id']\n        adv_batch = adv_batches.data[v['adversarial_batch_id']]\n        attack_id = adv_batch['submission_id']\n        work_item = class_batch_to_work.get(k)\n        required_work_stats = ['stat_correct', 'stat_error', 'stat_target_class', 'stat_num_images']\n        if (work_item and work_item['error']):\n            continue\n        if (work_item and all(((work_item.get(i) is not None) for i in required_work_stats))):\n            count_correctly_classified = work_item['stat_correct']\n            count_errors = work_item['stat_error']\n            count_hit_target_class = work_item['stat_target_class']\n            num_images = work_item['stat_num_images']\n        else:\n            logging.warning('Recomputing accuracy for classification batch %s', k)\n            (count_correctly_classified, count_errors, count_hit_target_class, num_images) = analyze_one_classification_result(self._storage_client, v['result_path'], adv_batch, dataset_batches, dataset_meta)\n        accuracy_matrix[(defense_id, attack_id)] += count_correctly_classified\n        error_matrix[(defense_id, attack_id)] += count_errors\n        hit_target_class_matrix[(defense_id, attack_id)] += count_hit_target_class\n        processed_images_count[defense_id] = (processed_images_count.get(defense_id, 0) + num_images)\n    return (accuracy_matrix, error_matrix, hit_target_class_matrix, processed_images_count)", "docstring": "Computes classification results.\n\nArgs:\nadv_batches: instance of AversarialBatches\ndataset_batches: instance of DatasetBatches\ndataset_meta: instance of DatasetMetadata\ndefense_work: instance of DefenseWorkPieces\n\nReturns:\naccuracy_matrix, error_matrix, hit_target_class_matrix,\nprocessed_images_count", "source": "codesearchnet"}
{"code": "def index_bgen(fn, legacy=False):\n    logger.info(\"Indexing {} (BGEN) using 'bgenix'{}\".format(fn, (' (legacy mode)' if legacy else '')))\n    command = ['bgenix', '-g', fn, '-index']\n    if legacy:\n        command.append('-with-rowid')\n    try:\n        logger.info(\"Executing '{}'\".format(' '.join(command)))\n        subprocess.Popen(command).communicate()\n    except FileNotFoundError:\n        logger.error(\"Cannot find 'bgenix', impossible to index {}\".format(fn))\n        sys.exit(1)\n    logger.info('Index generated')", "docstring": "Indexes a BGEN file.\n\nArgs:\nfn (str): The name of the BGEN file.", "source": "codesearchnet"}
{"code": "def tensor_dimension_to_mesh_axis(self, tensor_dimension, mesh_shape):\n    \n    val = [i for i, mesh_dimension in enumerate(mesh_shape)\n           if (tensor_dimension.name, mesh_dimension.name) in self._pairs]\n    if len(val) > 1:\n      raise ValueError(\n          \"Tensor dimension maps to multiple mesh dimensions\"\n          \" tensor_dimension=%s mesh_shape=%s layout=%s\"\n          % (tensor_dimension, mesh_shape, self._pairs))\n    return val[0] if val else None", "docstring": "Mesh axis associated with tensor dimension (or None).\n\nArgs:\ntensor_dimension: Dimension.\nmesh_shape: Shape.\n\nReturns:\nInteger or None.\n\nRaises:\nValueError: If one Tensor dimension maps to two mesh dimensions.", "source": "juraj-google-style"}
{"code": "def _term(self, term):\n        \n        \n        term = str(term)\n        if term:\n            self.__query[\"q\"] += term\n        return self", "docstring": "Add a term to the query.\n\nArguments:\nterm (str): The term to add.\n\nReturns:\nSearchHelper: Self", "source": "juraj-google-style"}
{"code": "def sigmoid(x):\n    return nn.sigmoid(x)", "docstring": "Element-wise sigmoid.\n\nArgs:\nx: A tensor or variable.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def launch_minecraft(port, installdir=\"MalmoPlatform\", replaceable=False):\n    \n    launch_script = './launchClient.sh'\n    if os.name == 'nt':\n        launch_script = 'launchClient.bat'\n    cwd = os.getcwd()\n    os.chdir(installdir)\n    os.chdir(\"Minecraft\")\n    try:\n        cmd = [launch_script, '-port', str(port), '-env']\n        if replaceable:\n            cmd.append('-replaceable')\n        subprocess.check_call(cmd)\n    finally:\n        os.chdir(cwd)", "docstring": "Launch Minecraft listening for malmoenv connections.\nArgs:\nport:  the TCP port to listen on.\ninstalldir: the install dir name. Defaults to MalmoPlatform.\nMust be same as given (or defaulted) in download call if used.\nreplaceable: whether or not to automatically restart Minecraft (default is false).", "source": "juraj-google-style"}
{"code": "def get_parent(self, path):\n    self.__validate_storage_path(path, projects_allowed=False)\n    path_steps = [step for step in path.split('/') if step]\n    del path_steps[(- 1)]\n    parent_path = '/{0}'.format('/'.join(path_steps))\n    return self.api_client.get_entity_by_query(path=parent_path)", "docstring": "Get the parent entity of the entity pointed by the given path.\n\nArgs:\npath (str): The path of the entity whose parent is needed\n\nReturns:\nA JSON object of the parent entity if found.\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "codesearchnet"}
{"code": "def _WriteCacheFile(self, cache_filename, scopes):\n    creds = {'scopes': sorted(list(scopes)), 'svc_acct_name': self.__service_account_name}\n    creds_str = json.dumps(creds)\n    cache_file = _MultiProcessCacheFile(cache_filename)\n    try:\n        cache_file.LockedWrite(creds_str)\n    except KeyboardInterrupt:\n        raise\n    except:\n        pass", "docstring": "Writes the credential metadata to the cache file.\n\nThis does not save the credentials themselves (CredentialStore class\noptionally handles that after this class is initialized).\n\nArgs:\ncache_filename: Cache filename to check.\nscopes: Scopes for the desired credentials.", "source": "codesearchnet"}
{"code": "def write_value(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    try:\n        ostream.write(pack('!Q', self.value))\n    except Exception:\n        self.logger.error('Error writing boolean value to buffer')\n        raise", "docstring": "Write the value of the Boolean object to the output stream.\n\nArgs:\nostream (Stream): A buffer to contain the encoded bytes of the\nvalue of a Boolean object. Usually a BytearrayStream object.\nRequired.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def do_test(create_module_fn, exported_names=None, show_debug_info=False):\n    if exported_names is None:\n        exported_names = []\n    logging.set_stderrthreshold('error')\n    tf.enable_v2_behavior()\n\n    def app_main(argv):\n        \n        if len(argv) > 1:\n            raise app.UsageError('Too many command-line arguments.')\n        if FLAGS.save_model_path:\n            save_model_path = FLAGS.save_model_path\n        else:\n            save_model_path = tempfile.mkdtemp(suffix='.saved_model')\n        save_options = tf.saved_model.SaveOptions(save_debug_info=show_debug_info)\n        tf.saved_model.save(create_module_fn(), save_model_path, options=save_options)\n        logging.info('Saved model to: %s', save_model_path)\n        mlir = pywrap_mlir.experimental_convert_saved_model_to_mlir(save_model_path, ','.join(exported_names), show_debug_info)\n        mlir = pywrap_mlir.experimental_run_pass_pipeline(mlir, 'canonicalize', show_debug_info)\n        print(mlir)\n        filename = '%s/result.mlirbc' % save_model_path\n        pywrap_mlir.experimental_write_bytecode(filename, mlir)\n        if not file_io.file_exists(filename):\n            raise app.UsageError('Failed to create bytecode output.')\n    app.run(app_main)", "docstring": "Runs test.\n\n1. Performs absl and tf \"main\"-like initialization that must run before almost\nanything else.\n2. Converts `tf.Module` to SavedModel\n3. Converts SavedModel to MLIR\n4. Prints the textual MLIR to stdout (it is expected that the caller will have\nFileCheck checks in its file to check this output).\n\nThis is only for use by the MLIR SavedModel importer tests.\n\nArgs:\ncreate_module_fn: A callable taking no arguments, which returns the\n`tf.Module` to be converted and printed.\nexported_names: A set of exported names for the MLIR converter (default is\n\"export all\").\nshow_debug_info: If true, shows debug locations in the resulting MLIR.", "source": "github-repos"}
{"code": "def block_diag(*blocks: np.ndarray) -> np.ndarray:\n    for b in blocks:\n        if (b.shape[0] != b.shape[1]):\n            raise ValueError('Blocks must be square.')\n    if (not blocks):\n        return np.zeros((0, 0), dtype=np.complex128)\n    n = sum((b.shape[0] for b in blocks))\n    dtype = functools.reduce(_merge_dtypes, (b.dtype for b in blocks))\n    result = np.zeros(shape=(n, n), dtype=dtype)\n    i = 0\n    for b in blocks:\n        j = (i + b.shape[0])\n        result[(i:j, i:j)] = b\n        i = j\n    return result", "docstring": "Concatenates blocks into a block diagonal matrix.\n\nArgs:\n*blocks: Square matrices to place along the diagonal of the result.\n\nReturns:\nA block diagonal matrix with the given blocks along its diagonal.\n\nRaises:\nValueError: A block isn't square.", "source": "codesearchnet"}
{"code": "def _WriteAttributeContainer(self, attribute_container):\n    if (attribute_container.CONTAINER_TYPE == self._CONTAINER_TYPE_EVENT):\n        (timestamp, serialized_data) = self._serialized_event_heap.PopEvent()\n    else:\n        serialized_data = self._SerializeAttributeContainer(attribute_container)\n    if (self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB):\n        compressed_data = zlib.compress(serialized_data)\n        serialized_data = sqlite3.Binary(compressed_data)\n    else:\n        compressed_data = ''\n    if self._storage_profiler:\n        self._storage_profiler.Sample('write', attribute_container.CONTAINER_TYPE, len(serialized_data), len(compressed_data))\n    if (attribute_container.CONTAINER_TYPE == self._CONTAINER_TYPE_EVENT):\n        query = 'INSERT INTO event (_timestamp, _data) VALUES (?, ?)'\n        self._cursor.execute(query, (timestamp, serialized_data))\n    else:\n        query = 'INSERT INTO {0:s} (_data) VALUES (?)'.format(attribute_container.CONTAINER_TYPE)\n        self._cursor.execute(query, (serialized_data,))\n    identifier = identifiers.SQLTableIdentifier(attribute_container.CONTAINER_TYPE, self._cursor.lastrowid)\n    attribute_container.SetIdentifier(identifier)", "docstring": "Writes an attribute container.\n\nThe table for the container type must exist.\n\nArgs:\nattribute_container (AttributeContainer): attribute container.", "source": "codesearchnet"}
{"code": "def get_available_versions(self, project_name):\n        \n        available_versions = self.pypi_client.package_releases(project_name)\n\n        if not available_versions:\n            available_versions = self.pypi_client.package_releases(\n                project_name.capitalize()\n            )\n\n        \n        return dict(\n            (self._parse_version(version), version)\n            for version in available_versions\n        )", "docstring": "Query PyPI to see if package has any available versions.\n\nArgs:\nproject_name (str): The name the project on PyPI.\n\nReturns:\ndict: Where keys are tuples of parsed versions and values are the\nversions returned by PyPI.", "source": "juraj-google-style"}
{"code": "def contains(self, key):\n    \n    path = self.object_path(key)\n    return os.path.exists(path) and os.path.isfile(path)", "docstring": "Returns whether the object named by `key` exists.\nOptimized to only check whether the file object exists.\n\nArgs:\nkey: Key naming the object to check.\n\nReturns:\nboalean whether the object exists", "source": "juraj-google-style"}
{"code": "def scalar_input_map(func, input_):\n    if util_iter.isiterable(input_):\n        return list(map(func, input_))\n    else:\n        return func(input_)", "docstring": "Map like function\n\nArgs:\nfunc: function to apply\ninput_ : either an iterable or scalar value\n\nReturns:\nIf ``input_`` is iterable this function behaves like map\notherwise applies func to ``input_``", "source": "codesearchnet"}
{"code": "def load_types_for_deserialization(cls, *types_to_deserialize: Type[Any]) -> ContextManager[Dict[str, Type[Any]]]:\n    return cls._TYPE_REGISTRY.load_types_for_deserialization(*types_to_deserialize)", "docstring": "Context manager for loading unregistered types for deserialization.\n\nExample::\n\nclass A(pg.Object):\nauto_register = False\nx: int\n\nclass B(A):\ny: str\nwith pg.JSONConvertile.load_types_for_deserialization(A, B):\npg.from_json_str(A(1).to_json_str())\npg.from_json_str(B(1, 'hi').to_json_str())\n\nArgs:\n*types_to_deserialize: A list of types to be loaded for deserialization.\n\nReturns:\nA context manager within which the objects of the requested types\ncould be deserialized.", "source": "github-repos"}
{"code": "def dataframe(start_row=0, max_rows=None, use_cache=True):\n    \n    output = QueryOutput()\n    output._output_type = 'dataframe'\n    output._dataframe_start_row = start_row\n    output._dataframe_max_rows = max_rows\n    output._use_cache = use_cache\n    return output", "docstring": "Construct a query output object where the result is a dataframe\n\nArgs:\nstart_row: the row of the table at which to start the export (default 0).\nmax_rows: an upper limit on the number of rows to export (default None).\nuse_cache: whether to use cached results or not (default True).", "source": "juraj-google-style"}
{"code": "def docx_table_from_xml_node(table_node: ElementTree.Element,\n                             level: int,\n                             config: TextProcessingConfig) -> str:\n    \n    table = CustomDocxTable()\n    for row_node in table_node:\n        if row_node.tag != DOCX_TABLE_ROW:\n            continue\n        table.new_row()\n        for cell_node in row_node:\n            if cell_node.tag != DOCX_TABLE_CELL:\n                continue\n            table.new_cell()\n            for para_node in cell_node:\n                text = docx_text_from_xml_node(para_node, level, config)\n                if text:\n                    table.add_paragraph(text)\n    return docx_process_table(table, config)", "docstring": "Converts an XML node representing a DOCX table into a textual\nrepresentation.\n\nArgs:\ntable_node: XML node\nlevel: current level in XML hierarchy (used for recursion; start level\nis 0)\nconfig: :class:`TextProcessingConfig` control object\n\nReturns:\nstring representation", "source": "juraj-google-style"}
{"code": "def mix_over_posterior_draws(means, variances):\n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n\n  with tf.compat.v1.name_scope(\n      'mix_over_posterior_draws', values=[means, variances]):\n    num_posterior_draws = dist_util.prefer_static_value(\n        tf.shape(input=means))[0]\n\n    component_observations = tfd.Independent(\n        distribution=tfd.Normal(\n            loc=dist_util.move_dimension(means, 0, -2),\n            scale=tf.sqrt(dist_util.move_dimension(variances, 0, -2))),\n        reinterpreted_batch_ndims=1)\n\n    return tfd.MixtureSameFamily(\n        mixture_distribution=tfd.Categorical(\n            logits=tf.zeros([num_posterior_draws],\n                            dtype=component_observations.dtype)),\n        components_distribution=component_observations)", "docstring": "Construct a predictive normal distribution that mixes over posterior draws.\n\nArgs:\nmeans: float `Tensor` of shape\n`[num_posterior_draws, ..., num_timesteps]`.\nvariances: float `Tensor` of shape\n`[num_posterior_draws, ..., num_timesteps]`.\n\nReturns:\nmixture_dist: `tfd.MixtureSameFamily(tfd.Independent(tfd.Normal))` instance\nrepresenting a uniform mixture over the posterior samples, with\n`batch_shape = ...` and `event_shape = [num_timesteps]`.", "source": "juraj-google-style"}
{"code": "class AssertEqual(beam.PTransform):\n\n    def __init__(self, elements: Iterable[Any]):\n        self._elements = elements\n\n    def expand(self, pcoll):\n        return assert_that(pcoll | beam.Map(lambda row: beam.Row(**row._asdict())), equal_to(dicts_to_rows(self._elements)))", "docstring": "Asserts that the input contains exactly the elements provided.\n\nThis is primarily used for testing; it will cause the entire pipeline to\nfail if the input to this transform is not exactly the set of `elements`\ngiven in the config parameter.\n\nAs with Create, YAML/JSON-style mappings are interpreted as Beam rows,\ne.g.::\n\ntype: AssertEqual\ninput: SomeTransform\nconfig:\nelements:\n- {a: 0, b: \"foo\"}\n- {a: 1, b: \"bar\"}\n\nwould ensure that `SomeTransform` produced exactly two elements with values\n`(a=0, b=\"foo\")` and `(a=1, b=\"bar\")` respectively.\n\nArgs:\nelements: The set of elements that should belong to the PCollection.\nYAML/JSON-style mappings will be interpreted as Beam rows.", "source": "github-repos"}
{"code": "def write(self, __text: str) -> None:\n        \n        if __text == os.linesep:\n            self.handle.write(__text)\n        else:\n            frame = inspect.currentframe()\n            if frame is None:\n                filename = 'unknown'\n                lineno = 0\n            else:\n                outer = frame.f_back\n                filename = outer.f_code.co_filename.split(os.sep)[-1]\n                lineno = outer.f_lineno\n            self.handle.write('[{:>15s}:{:03d}] {}'.format(filename[-15:],\n                                                           lineno, __text))", "docstring": "Write text to the debug stream.\n\nArgs:\n__text: Text to write", "source": "juraj-google-style"}
{"code": "def _replace_image(image_url, image_tag, ebook_folder, image_name=None):\n    try:\n        assert isinstance(image_tag, bs4.element.Tag)\n    except AssertionError:\n        raise TypeError(('image_tag cannot be of type ' + str(type(image_tag))))\n    if (image_name is None):\n        image_name = str(uuid.uuid4())\n    try:\n        image_full_path = os.path.join(ebook_folder, 'images')\n        assert os.path.exists(image_full_path)\n        image_extension = save_image(image_url, image_full_path, image_name)\n        image_tag['src'] = (((('images' + '/') + image_name) + '.') + image_extension)\n    except ImageErrorException:\n        image_tag.decompose()\n    except AssertionError:\n        raise ValueError((\"%s doesn't exist or doesn't contain a subdirectory images\" % ebook_folder))\n    except TypeError:\n        image_tag.decompose()", "docstring": "Replaces the src of an image to link to the local copy in the images folder of the ebook. Tightly coupled with bs4\npackage.\n\nArgs:\nimage_url (str): The url of the image.\nimage_tag (bs4.element.Tag): The bs4 tag containing the image.\nebook_folder (str): The directory where the ebook files are being saved. This must contain a subdirectory\ncalled \"images\".\nimage_name (Option[str]): The short name to save the image as. Should not contain a directory or an extension.", "source": "codesearchnet"}
{"code": "def _get_resource_list(self, rsrc_dict):\n        \n        if 'collections' in rsrc_dict:\n            return rsrc_dict['collections']\n        if 'experiments' in rsrc_dict:\n            return rsrc_dict['experiments']\n        if 'channels' in rsrc_dict:\n            return rsrc_dict['channels']\n        if 'coords' in rsrc_dict:\n            return rsrc_dict['coords']\n\n        raise RuntimeError('Invalid list response received from Boss.  No known resource type returned.')", "docstring": "Extracts list of resources from the HTTP response.\n\nArgs:\nrsrc_dict (dict): HTTP response encoded in a dictionary.\n\nReturns:\n(list[string]): List of a type of resource (collections, experiments, etc).\n\nRaises:\n(RuntimeError): If rsrc_dict does not contain any known resources.", "source": "juraj-google-style"}
{"code": "def load_pip_addons(_globals):\n    for package_name in known_pip_addons:\n        (_, username) = package_username(package_name)\n        try:\n            load_addon(username, package_name.replace('-', '_'), _globals)\n        except ImportError:\n            pass", "docstring": "Load all known fabsetup addons which are installed as pypi pip-packages.\n\nArgs:\n_globals(dict): the globals() namespace of the fabric script.\n\nReturn: None", "source": "codesearchnet"}
{"code": "def egress(self, envelope, http_headers, operation, binding_options):\n    if self._logger.isEnabledFor(logging.INFO):\n        service_name = operation.binding.wsdl.services.keys()[0]\n        self._logger.info(_REQUEST_LOG_LINE, service_name, operation.name, binding_options['address'])\n    if self._logger.isEnabledFor(logging.DEBUG):\n        http_headers_safe = http_headers.copy()\n        if (self._AUTHORIZATION_HEADER in http_headers_safe):\n            http_headers_safe[self._AUTHORIZATION_HEADER] = self._REDACTED\n        request_string = etree.tostring(envelope, pretty_print=True)\n        safe_request = self._DEVELOPER_TOKEN_SUB.sub(self._REDACTED, request_string.decode('utf-8'))\n        self._logger.debug(_REQUEST_XML_LOG_LINE, http_headers_safe, safe_request)\n    return (envelope, http_headers)", "docstring": "Overrides the egress function ror request logging.\n\nArgs:\nenvelope: An Element with the SOAP request data.\nhttp_headers: A dict of the current http headers.\noperation: The SoapOperation instance.\nbinding_options: An options dict for the SOAP binding.\n\nReturns:\nA tuple of the envelope and headers.", "source": "codesearchnet"}
{"code": "def send(email, subject=None, from_email=None, to_email=None, cc=None, bcc=None, reply_to=None, smtp=None):\n    if is_string(email):\n        email = EmailContent(email)\n    from_email = sanitize_email_address((from_email or email.headers.get('from')))\n    to_email = sanitize_email_address((to_email or email.headers.get('to')))\n    cc = sanitize_email_address((cc or email.headers.get('cc')))\n    bcc = sanitize_email_address((bcc or email.headers.get('bcc')))\n    reply_to = sanitize_email_address((reply_to or email.headers.get('reply-to')))\n    message_args = {'html': email.html, 'text': email.text, 'subject': (subject or email.headers.get('subject', '')), 'mail_from': from_email, 'mail_to': to_email}\n    if cc:\n        message_args['cc'] = cc\n    if bcc:\n        message_args['bcc'] = bcc\n    if reply_to:\n        message_args['headers'] = {'reply-to': reply_to}\n    message = emails.Message(**message_args)\n    for (filename, data) in email.inline_images:\n        message.attach(filename=filename, content_disposition='inline', data=data)\n    message.send(smtp=smtp)", "docstring": "Send markdown email\n\nArgs:\nemail (str/obj): A markdown string or EmailContent object\nsubject (str): subject line\nfrom_email (str): sender email address\nto_email (str/list): recipient email addresses\ncc (str/list): CC email addresses (string or a list)\nbcc (str/list): BCC email addresses (string or a list)\nreply_to (str): Reply-to email address\nsmtp (dict): SMTP configuration (dict)\n\nSchema of smtp dict:\nhost (str): SMTP server host. Default: localhost\nport (int): SMTP server port. Default: 25\ntls (bool): Use TLS. Default: False\nssl (bool): Use SSL. Default: False\nuser (bool): SMTP login user. Default empty\npassword (bool): SMTP login password. Default empty", "source": "codesearchnet"}
{"code": "def read(self, vals):\n        \n        i = 0\n        {%- for field in fields %}\n        {%- if field.is_list %}\n        count = int(vals[i])\n        i += 1\n        for _ in range(count):\n            obj = {{field.object_name}}()\n            obj.read(vals[i:i + obj.field_count])\n            self.add_{{field.field_name}}(obj)\n            i += obj.field_count\n        {%- else %}\n        if len(vals[i]) == 0:\n            self.{{field.field_name}} = None\n        else:\n            self.{{field.field_name}} = vals[i]\n        i += 1\n        {%- endif %}\n        {%- endfor %}", "docstring": "Read values\n\nArgs:\nvals (list): list of strings representing values", "source": "juraj-google-style"}
{"code": "def save(self, filename=None, directory=None):\n    if (filename is not None):\n        self.filename = filename\n    if (directory is not None):\n        self.directory = directory\n    filepath = self.filepath\n    tools.mkdirs(filepath)\n    data = text_type(self.source)\n    with io.open(filepath, 'w', encoding=self.encoding) as fd:\n        fd.write(data)\n        if (not data.endswith(u'\\n')):\n            fd.write(u'\\n')\n    return filepath", "docstring": "Save the DOT source to file. Ensure the file ends with a newline.\n\nArgs:\nfilename: Filename for saving the source (defaults to ``name`` + ``'.gv'``)\ndirectory: (Sub)directory for source saving and rendering.\nReturns:\nThe (possibly relative) path of the saved source file.", "source": "codesearchnet"}
{"code": "def Add(self, service, method, request, global_params=None):\n    method_config = service.GetMethodConfig(method)\n    upload_config = service.GetUploadConfig(method)\n    http_request = service.PrepareHttpRequest(method_config, request, global_params=global_params, upload_config=upload_config)\n    api_request = self.ApiCall(http_request, self.retryable_codes, service, method_config)\n    self.api_requests.append(api_request)", "docstring": "Add a request to the batch.\n\nArgs:\nservice: A class inheriting base_api.BaseApiService.\nmethod: A string indicated desired method from the service. See\nthe example in the class docstring.\nrequest: An input message appropriate for the specified\nservice.method.\nglobal_params: Optional additional parameters to pass into\nmethod.PrepareHttpRequest.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _read_parquet_columns(path, columns, num_splits, kwargs):\n    import pyarrow.parquet as pq\n    df = pq.read_pandas(path, columns=columns, **kwargs).to_pandas()\n    return (_split_result_for_readers(0, num_splits, df) + [len(df.index)])", "docstring": "Use a Ray task to read columns from Parquet into a Pandas DataFrame.\n\nNote: Ray functions are not detected by codecov (thus pragma: no cover)\n\nArgs:\npath: The path of the Parquet file.\ncolumns: The list of column names to read.\nnum_splits: The number of partitions to split the column into.\n\nReturns:\nA list containing the split Pandas DataFrames and the Index as the last\nelement. If there is not `index_col` set, then we just return the length.\nThis is used to determine the total length of the DataFrame to build a\ndefault Index.", "source": "codesearchnet"}
{"code": "def break_bond(self, ind1, ind2, tol=0.2):\n    sites = self._sites\n    clusters = [[sites[ind1]], [sites[ind2]]]\n    sites = [site for (i, site) in enumerate(sites) if (i not in (ind1, ind2))]\n\n    def belongs_to_cluster(site, cluster):\n        for test_site in cluster:\n            if CovalentBond.is_bonded(site, test_site, tol=tol):\n                return True\n        return False\n    while (len(sites) > 0):\n        unmatched = []\n        for site in sites:\n            for cluster in clusters:\n                if belongs_to_cluster(site, cluster):\n                    cluster.append(site)\n                    break\n            else:\n                unmatched.append(site)\n        if (len(unmatched) == len(sites)):\n            raise ValueError('Not all sites are matched!')\n        sites = unmatched\n    return (self.__class__.from_sites(cluster) for cluster in clusters)", "docstring": "Returns two molecules based on breaking the bond between atoms at index\nind1 and ind2.\n\nArgs:\nind1 (int): Index of first site.\nind2 (int): Index of second site.\ntol (float): Relative tolerance to test. Basically, the code\nchecks if the distance between the sites is less than (1 +\ntol) * typical bond distances. Defaults to 0.2, i.e.,\n20% longer.\n\nReturns:\nTwo Molecule objects representing the two clusters formed from\nbreaking the bond.", "source": "codesearchnet"}
{"code": "def _on_report(self, sequence, topic, message):\n        \n\n        try:\n            conn_key = self._find_connection(topic)\n            conn_id = self.conns.get_connection_id(conn_key)\n        except ArgumentError:\n            self._logger.warn(\"Dropping report message that does not correspond with a known connection, topic=%s\", topic)\n            return\n\n        try:\n            rep_msg = messages.ReportNotification.verify(message)\n\n            serialized_report = {}\n            serialized_report['report_format'] = rep_msg['report_format']\n            serialized_report['encoded_report'] = rep_msg['report']\n            serialized_report['received_time'] = datetime.datetime.strptime(rep_msg['received_time'].encode().decode(), \"%Y%m%dT%H:%M:%S.%fZ\")\n\n            report = self.report_parser.deserialize_report(serialized_report)\n            self._trigger_callback('on_report', conn_id, report)\n        except Exception:\n            self._logger.exception(\"Error processing report conn_id=%d\", conn_id)", "docstring": "Process a report received from a device.\n\nArgs:\nsequence (int): The sequence number of the packet received\ntopic (string): The topic this message was received on\nmessage (dict): The message itself", "source": "juraj-google-style"}
{"code": "def random_channel_shift(x, intensity_range, channel_axis=0):\n    intensity = np.random.uniform(-intensity_range, intensity_range)\n    return apply_channel_shift(x, intensity, channel_axis=channel_axis)", "docstring": "Performs a random channel shift.\n\nDEPRECATED.\n\nArgs:\nx: Input tensor. Must be 3D.\nintensity_range: Transformation intensity.\nchannel_axis: Index of axis for channels in the input tensor.\n\nReturns:\nNumpy image tensor.", "source": "github-repos"}
{"code": "def _apply_conv(self, inputs, w):\n    \n    if self._data_format == DATA_FORMAT_NWC:\n      h_dim = 1\n      two_dim_conv_data_format = DATA_FORMAT_NHWC\n    else:\n      h_dim = 2\n      two_dim_conv_data_format = DATA_FORMAT_NCHW\n\n    inputs = tf.expand_dims(inputs, axis=h_dim)\n    two_dim_conv_stride = self.stride[:h_dim] + (1,) + self.stride[h_dim:]\n\n    \n    two_dim_conv_rate = (1,) + self._rate\n\n    w_dw, w_pw = w\n    outputs = tf.nn.separable_conv2d(inputs,\n                                     w_dw,\n                                     w_pw,\n                                     strides=two_dim_conv_stride,\n                                     rate=two_dim_conv_rate,\n                                     padding=self._conv_op_padding,\n                                     data_format=two_dim_conv_data_format)\n    outputs = tf.squeeze(outputs, [h_dim])\n    return outputs", "docstring": "Apply a `separable_conv2d` operation on `inputs` using `w`.\n\nArgs:\ninputs: A Tensor of shape `data_format` and of type `tf.float16`,\n`tf.bfloat16` or `tf.float32`.\nw: A tuple of weight matrices of the same type as `inputs`, the first\nbeing the depthwise weight matrix, and the second being the pointwise\nweight matrix.\n\nReturns:\noutputs: The result of the convolution operation on `inputs`.", "source": "juraj-google-style"}
{"code": "def load_dot_env_file(dot_env_path):\n    \n    if not os.path.isfile(dot_env_path):\n        return {}\n\n    logger.log_info(\"Loading environment variables from {}\".format(dot_env_path))\n    env_variables_mapping = {}\n\n    with io.open(dot_env_path, 'r', encoding='utf-8') as fp:\n        for line in fp:\n            \n            if \"=\" in line:\n                variable, value = line.split(\"=\", 1)\n            elif \":\" in line:\n                variable, value = line.split(\":\", 1)\n            else:\n                raise exceptions.FileFormatError(\".env format error\")\n\n            env_variables_mapping[variable.strip()] = value.strip()\n\n    utils.set_os_environ(env_variables_mapping)\n    return env_variables_mapping", "docstring": "load .env file.\n\nArgs:\ndot_env_path (str): .env file path\n\nReturns:\ndict: environment variables mapping\n\n{\n\"UserName\": \"debugtalk\",\n\"Password\": \"123456\",\n\"PROJECT_KEY\": \"ABCDEFGH\"\n}\n\nRaises:\nexceptions.FileFormatError: If .env file format is invalid.", "source": "juraj-google-style"}
{"code": "def price(self, market: pmd.ProcessedMarketData, name: Optional[str]=None) -> types.FloatTensor:\n    name = name or self._name + '_price'\n    with tf.name_scope(name):\n        discount_curve = cashflow_streams.get_discount_curve(self._discount_curve_type, market, self._discount_curve_mask)\n        currencies = [cur.currency.value for cur in self._discount_curve_type]\n        vol_surface = equity_utils.get_vol_surface(currencies, self._equity, market, self._equity_mask)\n        spots = tf.stack(market.spot(currencies, self._equity), axis=0)\n        discount_factors = discount_curve.discount_factor(self._expiry_date.expand_dims(axis=-1))\n        daycount_convention = discount_curve.daycount_convention\n        day_count_fn = market_data_utils.get_daycount_fn(daycount_convention)\n        if spots.shape.rank > 0:\n            spots = tf.gather(spots, self._equity_mask)\n        if self._model == 'BS-LSM':\n            vols = vol_surface.volatility(expiry_dates=self._expiry_date.expand_dims(axis=-1), strike=tf.expand_dims(self._strike, axis=-1))\n            prices = utils.bs_lsm_price(spots=spots, expiry_times=day_count_fn(start_date=market.date, end_date=self._expiry_date, dtype=self._dtype), strikes=self._strike, volatility=tf.squeeze(vols, axis=-1), discount_factors=tf.squeeze(discount_factors), is_call_option=self._is_call_option, num_samples=self._num_samples, num_exercise_times=self._num_exercise_times, num_calibration_samples=self._num_calibration_samples, seed=self._seed)\n            return self._short_position * self._contract_amount * prices\n        else:\n            raise ValueError('Only BS-LSM model is supported. Supplied {}'.format(self._model))", "docstring": "Returns the present value of the American options.\n\nArgs:\nmarket: An instance of `ProcessedMarketData`.\nname: Python str. The name to give to the ops created by this function.\nDefault value: `None` which maps to 'price'.\n\nReturns:\nA `Tensor` of shape `batch_shape`  containing the modeled price of each\nAmerican option contract based on the input market data.", "source": "github-repos"}
{"code": "def write(self, face, data, viewport=None, *, alignment=1) -> None:\n    if (type(data) is Buffer):\n        data = data.mglo\n    self.mglo.write(face, data, viewport, alignment)", "docstring": "Update the content of the texture.\n\nArgs:\nface (int): The face to update.\ndata (bytes): The pixel data.\nviewport (tuple): The viewport.\n\nKeyword Args:\nalignment (int): The byte alignment of the pixels.", "source": "codesearchnet"}
{"code": "def _parse_dbpath(dbpath):\n    if isinstance(dbpath, list):\n        dbpath = '|'.join(dbpath)\n    if (not dbpath.endswith('$')):\n        dbpath = ('(%s)$' % dbpath)\n    return dbpath", "docstring": "Converts the dbpath to a regexp pattern.\n\nTransforms dbpath from a string or an array of strings to a\nregexp pattern which will be used to match database names.\n\nArgs:\ndbpath: a string or an array containing the databases to be matched\nfrom a cluster.\n\nReturns:\nA regexp pattern that will match any of the desired databases on\non a cluster.", "source": "codesearchnet"}
{"code": "def window_unpartition(windows, window_size, pad_height_width, height_width):\n    padded_height, padded_width = pad_height_width\n    height, width = height_width\n    batch_size = windows.shape[0] \n    hidden_state = windows.view(batch_size, padded_height \n    hidden_state = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous()\n    hidden_state = hidden_state.view(batch_size, padded_height, padded_width, -1)\n    hidden_state = hidden_state[:, :height, :width, :].contiguous()\n    return hidden_state", "docstring": "Window unpartition into original sequences and removing padding.\n\nArgs:\nwindows (`torch.Tensor`):\nInput tokens with [batch_size * num_windows, window_size, window_size, num_channels].\nwindow_size (`int`):\nWindow size.\npad_height_width (`Tuple[int]`):\nPadded height and width (padded_height, padded_width).\nheight_width (`Tuple[int]`):\nOriginal height and width before padding.\n\nReturns:\nhidden_state: unpartitioned sequences with [batch_size, height, width, num_channels].", "source": "github-repos"}
{"code": "def from_text_vision_configs(cls, text_config: Dict, vision_config: Dict, **kwargs):\n    config_dict = {}\n    config_dict['text_config'] = text_config\n    config_dict['vision_config'] = vision_config\n    return cls.from_dict(config_dict, **kwargs)", "docstring": "Instantiate a [`Owlv2Config`] (or a derived class) from owlv2 text model configuration and owlv2 vision\nmodel configuration.\n\nReturns:\n[`Owlv2Config`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def retry(self, retry_message=''):\n    \n    if not self.async:\n      raise UnexpectedPipelineError(\n          'May only call retry() method for asynchronous pipelines.')\n    if self.try_cancel():\n      self._context.transition_retry(self._pipeline_key, retry_message)\n      return True\n    else:\n      return False", "docstring": "Forces a currently running asynchronous pipeline to retry.\n\nNote this may not be called by synchronous or generator pipelines. Those\nmust instead raise the 'Retry' exception during execution.\n\nArgs:\nretry_message: Optional message explaining why the retry happened.\n\nReturns:\nTrue if the Pipeline should be retried, False if it cannot be cancelled\nmid-flight for some reason.", "source": "juraj-google-style"}
{"code": "def gumbel_softmax(x, z_size, mode, softmax_k=0, temperature_warmup_steps=150000, summary=True, name=None):\n    with tf.variable_scope(name, default_name='gumbel_softmax'):\n        m = tf.layers.dense(x, (2 ** z_size), name='mask')\n        if (softmax_k > 0):\n            (m, kl) = top_k_softmax(m, softmax_k)\n            return (m, m, (1.0 - tf.reduce_mean(kl)))\n        logsm = tf.nn.log_softmax(m)\n        gumbel_samples = gumbel_sample(common_layers.shape_list(m))\n        steps = temperature_warmup_steps\n        gumbel_samples *= (common_layers.inverse_exp_decay((steps \n        temperature = (1.2 - common_layers.inverse_lin_decay(steps))\n        temperature = tf.cond(tf.less(tf.random_uniform([]), 0.9), (lambda : temperature), (lambda : tf.random_uniform([], minval=0.5, maxval=1.0)))\n        s = tf.nn.softmax(((logsm + gumbel_samples) / temperature))\n        m = tf.nn.softmax(m)\n        kl = (- tf.reduce_max(logsm, axis=(- 1)))\n        if summary:\n            tf.summary.histogram('max-log', tf.reshape(kl, [(- 1)]))\n        maxvec = tf.reshape(tf.argmax(m, axis=(- 1)), [(- 1)])\n        maxvhot = tf.stop_gradient(tf.one_hot(maxvec, (2 ** z_size)))\n        distrib = (tf.reshape(logsm, [(- 1), (2 ** z_size)]) * maxvhot)\n        d_mean = tf.reduce_mean(distrib, axis=[0], keep_dims=True)\n        d_variance = tf.reduce_mean(tf.squared_difference(distrib, d_mean), axis=[0])\n        d_dev = (- tf.reduce_mean(d_variance))\n        ret = s\n        if (mode != tf.estimator.ModeKeys.TRAIN):\n            ret = tf.reshape(maxvhot, common_layers.shape_list(s))\n        return (m, ret, ((d_dev * 5.0) + (tf.reduce_mean(kl) * 0.002)))", "docstring": "Gumbel softmax discretization bottleneck.\n\nArgs:\nx: Input to the discretization bottleneck.\nz_size: Number of bits, where discrete codes range from 1 to 2**z_size.\nmode: tf.estimator.ModeKeys.\nsoftmax_k: If > 0 then do top-k softmax.\ntemperature_warmup_steps: Number of steps it takes to decay temperature to\n0.\nsummary: Whether to write summaries.\nname: Name for the bottleneck scope.\n\nReturns:\nEmbedding function, discrete code, and loss.", "source": "codesearchnet"}
{"code": "def temporal_latent_to_dist(name, x, hparams, output_channels=None):\n  \n  _, _, width, _, res_channels = common_layers.shape_list(x)\n  if output_channels is None:\n    output_channels = res_channels\n  dilation_rates = get_dilation_rates(hparams, width)\n  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n    h = x\n    for i in range(hparams.latent_encoder_depth):\n      if hparams.latent_apply_dilations:\n        h2 = dilated_conv_stack(\"dil_latent_3d_res_%d\" % i, h,\n                                mid_channels=hparams.latent_encoder_width,\n                                output_channels=res_channels,\n                                dilation_rates=dilation_rates,\n                                activation=hparams.latent_activation,\n                                dropout=hparams.latent_dropout)\n      else:\n        h2 = conv_stack(\"latent_3d_res_%d\" % i, h,\n                        mid_channels=hparams.latent_encoder_width,\n                        output_channels=res_channels,\n                        activation=hparams.latent_activation,\n                        dropout=hparams.latent_dropout)\n      h += h2\n\n    \n    \n    h = h[:, -1, :, :, :]\n    h = conv(\"res_final\", h, apply_actnorm=False, conv_init=\"zeros\",\n             output_channels=2*output_channels, filter_size=[1, 1])\n    mean, log_scale = h[:, :, :, 0::2], h[:, :, :, 1::2]\n  return tfp.distributions.Normal(mean, tf.exp(log_scale))", "docstring": "Network that maps a time-indexed list of 3-D latents to a gaussian.\n\nArgs:\nname: variable scope.\nx: List of 4-D Tensors indexed by time, (NHWC)\nhparams: tf.contrib.training.Hparams.\noutput_channels: int, Number of channels of the output gaussian mean.\nReturns:\ndist: tfp.distributions.Normal", "source": "juraj-google-style"}
{"code": "def _save(filename, tensor_names, tensors, tensor_slices=None, name='save'):\n    if tensor_slices is None:\n        return gen_io_ops.save(filename, tensor_names, tensors, name=name)\n    else:\n        return gen_io_ops.save_slices(filename, tensor_names, tensor_slices, tensors, name=name)", "docstring": "Save a list of tensors to a file with given names.\n\nExample usage without slice info:\nSave(\"/foo/bar\", [\"w\", \"b\"], [w, b])\n\nExample usage with slices:\nSave(\"/foo/bar\", [\"w\", \"w\"], [slice0, slice1],\ntensor_slices=[\"4 10 0,2:-\", \"4 10 2,2:-\"])\n\nArgs:\nfilename: the file name of the sstable.\ntensor_names: a list of strings.\ntensors: the list of tensors to be saved.\ntensor_slices: Optional list of strings to specify the shape and slices of\na larger virtual tensor that each tensor is a part of.  If not specified\neach tensor is saved as a full slice.\nname: string.  Optional name for the op.\n\nRequires:\nThe length of tensors should match the size of tensor_names and of\ntensor_slices.\n\nReturns:\nAn Operation that saves the tensors.", "source": "github-repos"}
{"code": "def HashFilePath(self, path, byte_count):\n    \n    with open(path, \"rb\") as fd:\n      self.HashFile(fd, byte_count)", "docstring": "Updates underlying hashers with file on a given path.\n\nArgs:\npath: A path to the file that is going to be fed to the hashers.\nbyte_count: A maximum numbers of bytes that are going to be processed.", "source": "juraj-google-style"}
{"code": "def one_of(self, chset: str) -> str:\n        \n        res = self.peek()\n        if res in chset:\n            self.offset += 1\n            return res\n        raise UnexpectedInput(self, \"one of \" + chset)", "docstring": "Parse one character form the specified set.\n\nArgs:\nchset: string of characters to try as alternatives.\n\nReturns:\nThe character that was actually matched.\n\nRaises:\nUnexpectedInput: If the next character is not in `chset`.", "source": "juraj-google-style"}
{"code": "def pnlSingle(\n            self, account: str = '', modelCode: str = '',\n            conId: int = 0) -> List[PnLSingle]:\n        \n        return [v for v in self.wrapper.pnlSingles.values() if\n                (not account or v.account == account) and\n                (not modelCode or v.modelCode == modelCode) and\n                (not conId or v.conId == conId)]", "docstring": "List of subscribed :class:`.PnLSingle` objects (profit and loss for\nsingle positions).\n\nThe :class:`.PnLSingle` objects are kept live updated.\n\nArgs:\naccount: If specified, filter for this account name.\nmodelCode: If specified, filter for this account model.\nconId: If specified, filter for this contract ID.", "source": "juraj-google-style"}
{"code": "def _create_key_func(extractor, none_is_largest):\n  \n  if none_is_largest:\n    def key_func_none_is_largest(session_group):\n      value = extractor(session_group)\n      return (value is None, value)\n    return key_func_none_is_largest\n  def key_func_none_is_smallest(session_group):\n    value = extractor(session_group)\n    return (value is not None, value)\n  return key_func_none_is_smallest", "docstring": "Returns a key_func to be used in list.sort().\n\nReturns a key_func to be used in list.sort() that sorts session groups\nby the value extracted by extractor. 'None' extracted values will either\nbe considered largest or smallest as specified by the \"none_is_largest\"\nboolean parameter.\n\nArgs:\nextractor: An extractor function that extract the key from the session\ngroup.\nnone_is_largest: bool. If true treats 'None's as largest; otherwise\nsmallest.", "source": "juraj-google-style"}
{"code": "def time(func, *args, **kwargs):\n    start_time = time_module.time()\n    func(*args, **kwargs)\n    end_time = time_module.time()\n    return (end_time - start_time)", "docstring": "Call the supplied function with the supplied arguments,\nand return the total execution time as a float in seconds.\n\nThe precision of the returned value depends on the precision of\n`time.time()` on your platform.\n\nArguments:\nfunc: the function to run.\n*args: positional arguments to pass into the function.\n**kwargs: keyword arguments to pass into the function.\nReturns:\nExecution time of the function as a float in seconds.", "source": "codesearchnet"}
{"code": "def words(self, index = None):\n        \n        if index is None:\n            return self.select(Word,None,True,default_ignore_structure)\n        else:\n            if index < 0:\n                index = self.count(Word,None,True,default_ignore_structure) + index\n            for i, e in enumerate(self.select(Word,None,True,default_ignore_structure)):\n                if i == index:\n                    return e\n            raise IndexError", "docstring": "Returns a generator of Word elements found (recursively) under this element.\n\nArguments:\n* ``index``: If set to an integer, will retrieve and return the n'th element (starting at 0) instead of returning the list of all", "source": "juraj-google-style"}
{"code": "def print_tools(self, pattern=None, buf=sys.stdout):\n    seen = set()\n    rows = []\n    context = self.context\n    if context:\n        data = context.get_tools()\n        conflicts = set(context.get_conflicting_tools().keys())\n        for (_, (variant, tools)) in sorted(data.items()):\n            pkg_str = variant.qualified_package_name\n            for tool in tools:\n                if (pattern and (not fnmatch(tool, pattern))):\n                    continue\n                if (tool in conflicts):\n                    label = '(in conflict)'\n                    color = critical\n                else:\n                    label = ''\n                    color = None\n                rows.append([tool, '-', pkg_str, 'active context', label, color])\n                seen.add(tool)\n    for suite in self.suites:\n        for (tool, d) in suite.get_tools().iteritems():\n            if (tool in seen):\n                continue\n            if (pattern and (not fnmatch(tool, pattern))):\n                continue\n            label = []\n            color = None\n            path = which(tool)\n            if path:\n                path_ = os.path.join(suite.tools_path, tool)\n                if (path != path_):\n                    label.append((\"(hidden by unknown tool '%s')\" % path))\n                    color = warning\n            variant = d['variant']\n            if isinstance(variant, set):\n                pkg_str = ', '.join(variant)\n                label.append('(in conflict)')\n                color = critical\n            else:\n                pkg_str = variant.qualified_package_name\n            orig_tool = d['tool_name']\n            if (orig_tool == tool):\n                orig_tool = '-'\n            label = ' '.join(label)\n            source = (\"context '%s' in suite '%s'\" % (d['context_name'], suite.load_path))\n            rows.append([tool, orig_tool, pkg_str, source, label, color])\n            seen.add(tool)\n    _pr = Printer(buf)\n    if (not rows):\n        _pr('No matching tools.')\n        return False\n    headers = [['TOOL', 'ALIASING', 'PACKAGE', 'SOURCE', '', None], ['----', '--------', '-------', '------', '', None]]\n    rows = (headers + sorted(rows, key=(lambda x: x[0].lower())))\n    print_colored_columns(_pr, rows)\n    return True", "docstring": "Print a list of visible tools.\n\nArgs:\npattern (str): Only list tools that match this glob pattern.", "source": "codesearchnet"}
{"code": "def _getAuthenticated(self, browser, url):\n    try:\n        if (len(self.creds) > 0):\n            c = random.choice(self.creds)[0]\n            browser.setNewPassword(url, c.user, c.password)\n            return True\n        else:\n            raise NoCredentialsException(str(self))\n    except AttributeError as e:\n        raise BadImplementationError(str(e))", "docstring": "Getting authenticated.\n\nThis method may be overwritten.\n\nTODO: update to version 2 of the wrappers.\n\nArgs:\n-----\nbrowser: The browser in which the user will be authenticated.\nurl: The URL to get authenticated in.\n\nReturn:\n-------\nTrue or False.\n\nRaises:\n------\nNoCredentialsException: If no valid credentials have been found.\nBadImplementationError: If an expected attribute is missing.", "source": "codesearchnet"}
{"code": "def get_tree_starting_at(module: str, edges: List[Tuple[str, str]]) -> List[Union[str, List[str]]]:\n    vertices_seen = [module]\n    new_edges = [edge for edge in edges if edge[0] == module and edge[1] != module and ('__init__.py' not in edge[1])]\n    tree = [module]\n    while len(new_edges) > 0:\n        tree.append(new_edges)\n        final_vertices = list({edge[1] for edge in new_edges})\n        vertices_seen.extend(final_vertices)\n        new_edges = [edge for edge in edges if edge[0] in final_vertices and edge[1] not in vertices_seen and ('__init__.py' not in edge[1])]\n    return tree", "docstring": "Returns the tree starting at a given module following all edges.\n\nArgs:\nmodule (`str`): The module that will be the root of the subtree we want.\neges (`List[Tuple[str, str]]`): The list of all edges of the tree.\n\nReturns:\n`List[Union[str, List[str]]]`: The tree to print in the following format: [module, [list of edges\nstarting at module], [list of edges starting at the preceding level], ...]", "source": "github-repos"}
{"code": "def get_destination(self, filepath, targetdir=None):\n    dst = self.change_extension(filepath, 'css')\n    if targetdir:\n        dst = os.path.join(targetdir, dst)\n    return dst", "docstring": "Return destination path from given source file path.\n\nDestination is allways a file with extension ``.css``.\n\nArgs:\nfilepath (str): A file path. The path is allways relative to\nsources directory. If not relative, ``targetdir`` won't be\njoined.\nabsolute (bool): If given will be added at beginning of file\npath.\n\nReturns:\nstr: Destination filepath.", "source": "codesearchnet"}
{"code": "def preprocess_bel_stmt(stmt: str) -> str:\n    \n\n    stmt = stmt.strip()  \n    stmt = re.sub(r\",+\", \",\", stmt)  \n    stmt = re.sub(r\",\", \", \", stmt)  \n    stmt = re.sub(r\" +\", \" \", stmt)  \n\n    return stmt", "docstring": "Clean up basic formatting of BEL statement\n\nArgs:\nstmt: BEL statement as single string\n\nReturns:\ncleaned BEL statement", "source": "juraj-google-style"}
{"code": "def get_sub_category(alt_len, ref_len, category, svtype=None):\n    \n    subcategory = ''\n\n    if category in ('snv', 'indel', 'cancer'):\n        if ref_len == alt_len:\n            subcategory = 'snv'\n        else:\n            subcategory = 'indel'\n    elif category == 'sv':\n        subcategory = svtype\n\n    return subcategory", "docstring": "Get the subcategory for a VCF variant\n\nThe sub categories are:\n'snv', 'indel', 'del', 'ins', 'dup', 'bnd', 'inv'\n\nArgs:\nalt_len(int)\nref_len(int)\ncategory(str)\nsvtype(str)\n\nReturns:\nsubcategory(str)", "source": "juraj-google-style"}
{"code": "def from_flag(cls, flagname, flag_values, other_flag_values=None):\n    first_module = flag_values.find_module_defining_flag(flagname, default='<unknown>')\n    if (other_flag_values is None):\n        second_module = _helpers.get_calling_module()\n    else:\n        second_module = other_flag_values.find_module_defining_flag(flagname, default='<unknown>')\n    flag_summary = flag_values[flagname].help\n    msg = (\"The flag '%s' is defined twice. First from %s, Second from %s.  Description from first occurrence: %s\" % (flagname, first_module, second_module, flag_summary))\n    return cls(msg)", "docstring": "Creates a DuplicateFlagError by providing flag name and values.\n\nArgs:\nflagname: str, the name of the flag being redefined.\nflag_values: FlagValues, the FlagValues instance containing the first\ndefinition of flagname.\nother_flag_values: FlagValues, if it is not None, it should be the\nFlagValues object where the second definition of flagname occurs.\nIf it is None, we assume that we're being called when attempting\nto create the flag a second time, and we use the module calling\nthis one as the source of the second definition.\n\nReturns:\nAn instance of DuplicateFlagError.", "source": "codesearchnet"}
{"code": "def log_uuid(self, uuid):\n    if ((uuid not in self.uuids) and (uuid in uuids)):\n        self.uuids[uuid] = uuids[uuid].describe()", "docstring": "Logs the object with the specified `uuid` to `self.uuids` if\npossible.\n\nArgs:\nuuid (str): string value of :meth:`uuid.uuid4` value for the\nobject.", "source": "codesearchnet"}
{"code": "def list_partitions(self, table, retry=DEFAULT_RETRY):\n    table = _table_arg_to_table_ref(table, default_project=self.project)\n    meta_table = self.get_table(TableReference(self.dataset(table.dataset_id, project=table.project), ('%s$__PARTITIONS_SUMMARY__' % table.table_id)))\n    subset = [col for col in meta_table.schema if (col.name == 'partition_id')]\n    return [row[0] for row in self.list_rows(meta_table, selected_fields=subset, retry=retry)]", "docstring": "List the partitions in a table.\n\nArguments:\ntable (Union[ \\\n:class:`~google.cloud.bigquery.table.Table`, \\\n:class:`~google.cloud.bigquery.table.TableReference`, \\\nstr, \\\n]):\nThe table or reference from which to get partition info\nretry (google.api_core.retry.Retry):\n(Optional) How to retry the RPC.\n\nReturns:\nList[str]:\nA list of the partition ids present in the partitioned table", "source": "codesearchnet"}
{"code": "def __getitem__(self, pkg_id):\n        \n        if pkg_id in self.__reg_software:\n            return self.__reg_software[pkg_id]\n        else:\n            raise KeyError(pkg_id)", "docstring": "Returns information on a package.\n\nArgs:\npkg_id (str): Package Id of the software/component\n\nReturns:\ndict or list: List if ``version_only`` is ``True`` otherwise dict", "source": "juraj-google-style"}
{"code": "def prepare_for_model(self, ids: List[int], pair_ids: Optional[List[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy, None]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding:\n    padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)\n    pair = bool(pair_ids is not None)\n    len_ids = len(ids)\n    len_pair_ids = len(pair_ids) if pair else 0\n    if return_token_type_ids and (not add_special_tokens):\n        raise ValueError('Asking to return token_type_ids while setting add_special_tokens to False results in an undefined behavior. Please set add_special_tokens to True or set return_token_type_ids to None.')\n    if return_overflowing_tokens and truncation_strategy == TruncationStrategy.LONGEST_FIRST and (pair_ids is not None):\n        raise ValueError('Not possible to return overflowing tokens for pair of sequences with the `longest_first`. Please select another truncation strategy than `longest_first`, for instance `only_second` or `only_first`.')\n    if return_token_type_ids is None:\n        return_token_type_ids = 'token_type_ids' in self.model_input_names\n    if return_attention_mask is None:\n        return_attention_mask = 'attention_mask' in self.model_input_names\n    encoded_inputs = {}\n    total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)\n    overflowing_tokens = []\n    if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and (total_len > max_length):\n        ids, pair_ids, overflowing_tokens = self.truncate_sequences(ids, pair_ids=pair_ids, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride)\n    if return_overflowing_tokens:\n        encoded_inputs['overflowing_tokens'] = overflowing_tokens\n        encoded_inputs['num_truncated_tokens'] = total_len - max_length\n    if add_special_tokens:\n        sequence = self.build_inputs_with_special_tokens(ids, pair_ids)\n        token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)\n    else:\n        sequence = ids + pair_ids if pair else ids\n        token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])\n    encoded_inputs['input_ids'] = sequence\n    if return_token_type_ids:\n        encoded_inputs['token_type_ids'] = token_type_ids\n    if return_special_tokens_mask:\n        if add_special_tokens:\n            encoded_inputs['special_tokens_mask'] = self.get_special_tokens_mask(ids, pair_ids)\n        else:\n            encoded_inputs['special_tokens_mask'] = [0] * len(sequence)\n    self._eventual_warn_about_too_long_sequence(encoded_inputs['input_ids'], max_length, verbose)\n    if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:\n        encoded_inputs = self.pad(encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)\n    if return_length:\n        encoded_inputs['length'] = len(encoded_inputs['input_ids'])\n    batch_outputs = BatchEncoding(encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis)\n    return batch_outputs", "docstring": "Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It\nadds special tokens, truncates sequences if overflowing while taking into account the special tokens and\nmanages a moving window (with user defined stride) for overflowing tokens. Please Note, for *pair_ids*\ndifferent than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return\noverflowing tokens. Such a combination of arguments will raise an error.\n\nArgs:\nids (`List[int]`):\nTokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and\n`convert_tokens_to_ids` methods.\npair_ids (`List[int]`, *optional*):\nTokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`\nand `convert_tokens_to_ids` methods.", "source": "github-repos"}
{"code": "def to_json_string(self) -> str:\n    dictionary = self.to_dict()\n    for key, value in dictionary.items():\n        if isinstance(value, np.ndarray):\n            dictionary[key] = value.tolist()\n    _processor_class = dictionary.pop('_processor_class', None)\n    if _processor_class is not None:\n        dictionary['processor_class'] = _processor_class\n    return json.dumps(dictionary, indent=2, sort_keys=True) + '\\n'", "docstring": "Serializes this instance to a JSON string.\n\nReturns:\n`str`: String containing all the attributes that make up this feature_extractor instance in JSON format.", "source": "github-repos"}
{"code": "def binary_cross_entropy_loss_with_logits(x, target, name=None):\n  \n  with tf.name_scope(name, 'binary_cross_entropy_with_logits',\n                     [x, target]) as scope:\n    x.get_shape().assert_is_compatible_with(target.get_shape())\n    neg_softplus = -tf.nn.softplus(-x)\n    return -tf.add(tf.multiply(target, neg_softplus),\n                   tf.multiply(1 - target, -x + neg_softplus),\n                   name=scope)", "docstring": "Calculates the binary cross entropy between sigmoid(x) and target.\n\nExpects unscaled logits. Do not pass in results of sigmoid operation.\n\nArgs:\nx: the calculated pre-sigmoid values\ntarget: the desired values.\nname: the name for this op, defaults to binary_cross_entropy_with_logits\nReturns:\n-(target * -softplus(-x) + (1-target) * (-x - softplus(-x)))\nRaises:\nValueError: If shapes are incompatible.", "source": "juraj-google-style"}
{"code": "def forward(self, layer_input):\n    bsz, length, emb_size = layer_input.size()\n    layer_input = layer_input.reshape(-1, emb_size)\n    _, batch_index, batch_gates, expert_size, router_logits = self.router(layer_input)\n    expert_inputs = layer_input[batch_index]\n    hidden_states = self.input_linear(expert_inputs, expert_size)\n    chunked_hidden_states = hidden_states.chunk(2, dim=-1)\n    hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1]\n    expert_outputs = self.output_linear(hidden_states, expert_size)\n    expert_outputs = expert_outputs * batch_gates[:, None]\n    zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device)\n    layer_output = zeros.index_add(0, batch_index, expert_outputs)\n    layer_output = layer_output.view(bsz, length, self.input_size)\n    return (layer_output, router_logits)", "docstring": "Forward pass of the mixture of experts layer.\n\nArgs:\nlayer_input (Tensor):\nInput tensor.\n\nReturns:\nTensor:\nOutput tensor.\nTensor:\nRouter logits.", "source": "github-repos"}
{"code": "def get_model_schema_and_features(model_dir):\n  \n  schema_file = os.path.join(model_dir, 'assets.extra', 'schema.json')\n  schema = json.loads(file_io.read_file_to_string(schema_file))\n  features_file = os.path.join(model_dir, 'assets.extra', 'features.json')\n  features_config = json.loads(file_io.read_file_to_string(features_file))\n  return schema, features_config", "docstring": "Get a local model's schema and features config.\n\nArgs:\nmodel_dir: local or GCS path of a model.\nReturns:\nA tuple of schema (list) and features config (dict).", "source": "juraj-google-style"}
{"code": "def claim(self, file_readers):\n    (prefix_to_reader, unclaimed_readers) = self._find_strelka_files(file_readers)\n    prefix_by_patients = self._split_prefix_by_patient(prefix_to_reader)\n    self._validate_vcf_readers(prefix_by_patients)\n    vcf_readers = self._create_vcf_readers(prefix_to_reader)\n    return (unclaimed_readers, vcf_readers)", "docstring": "Recognizes and claims Strelka VCFs form the set of all input VCFs.\n\nEach defined caller has a chance to evaluate and claim all the incoming\nfiles as something that it can process.\n\nArgs:\nfile_readers: the collection of currently unclaimed files\n\nReturns:\nA tuple of unclaimed readers and StrelkaVcfReaders.", "source": "codesearchnet"}
{"code": "def __init__(self, listener, dispatcher):\n        \n        logger.info('Creating %s', ClearlyServer.__name__)\n\n        self.listener = listener\n        self.dispatcher = dispatcher", "docstring": "Constructs a server instance.\n\nArgs:\nlistener (EventListener): the object that listens and keeps celery events\ndispatcher (StreamingDispatcher): the mechanism to dispatch data to clients", "source": "juraj-google-style"}
{"code": "def automatic_linemode(divisions, ibz):\n        \n        kpoints = list()\n        labels = list()\n        for path in ibz.kpath[\"path\"]:\n            kpoints.append(ibz.kpath[\"kpoints\"][path[0]])\n            labels.append(path[0])\n            for i in range(1, len(path) - 1):\n                kpoints.append(ibz.kpath[\"kpoints\"][path[i]])\n                labels.append(path[i])\n                kpoints.append(ibz.kpath[\"kpoints\"][path[i]])\n                labels.append(path[i])\n\n            kpoints.append(ibz.kpath[\"kpoints\"][path[-1]])\n            labels.append(path[-1])\n\n        return Kpoints(\"Line_mode KPOINTS file\",\n                       style=Kpoints.supported_modes.Line_mode,\n                       coord_type=\"Reciprocal\",\n                       kpts=kpoints,\n                       labels=labels,\n                       num_kpts=int(divisions))", "docstring": "Convenient static constructor for a KPOINTS in mode line_mode.\ngamma centered Monkhorst-Pack grids and the number of subdivisions\nalong each reciprocal lattice vector determined by the scheme in the\nVASP manual.\n\nArgs:\ndivisions: Parameter determining the number of k-points along each\nhight symetry lines.\nibz: HighSymmKpath object (pymatgen.symmetry.bandstructure)\n\nReturns:\nKpoints object", "source": "juraj-google-style"}
{"code": "def _CopyFromDateTimeValues(self, date_time_values):\n    \n    year = date_time_values.get('year', 0)\n    month = date_time_values.get('month', 0)\n    day_of_month = date_time_values.get('day_of_month', 0)\n    hours = date_time_values.get('hours', 0)\n    minutes = date_time_values.get('minutes', 0)\n    seconds = date_time_values.get('seconds', 0)\n\n    self._normalized_timestamp = None\n    self._number_of_seconds = self._GetNumberOfSecondsFromElements(\n        year, month, day_of_month, hours, minutes, seconds)\n    self._time_elements_tuple = (\n        year, month, day_of_month, hours, minutes, seconds)\n\n    self.is_local_time = False", "docstring": "Copies time elements from date and time values.\n\nArgs:\ndate_time_values  (dict[str, int]): date and time values, such as year,\nmonth, day of month, hours, minutes, seconds, microseconds.", "source": "juraj-google-style"}
{"code": "def fit(self, x, augment=False, rounds=1, seed=None):\n    x = np.asarray(x, dtype=self.dtype)\n    if x.ndim != 4:\n        raise ValueError('Input to `.fit()` should have rank 4. Got array with shape: ' + str(x.shape))\n    if x.shape[self.channel_axis] not in {1, 3, 4}:\n        warnings.warn('Expected input to be images (as Numpy array) following the data format convention \"' + self.data_format + '\" (channels on axis ' + str(self.channel_axis) + '), i.e. expected either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. However, it was passed an array with shape ' + str(x.shape) + ' (' + str(x.shape[self.channel_axis]) + ' channels).')\n    if seed is not None:\n        np.random.seed(seed)\n    x = np.copy(x)\n    if self.rescale:\n        x *= self.rescale\n    if augment:\n        ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=self.dtype)\n        for r in range(rounds):\n            for i in range(x.shape[0]):\n                ax[i + r * x.shape[0]] = self.random_transform(x[i])\n        x = ax\n    if self.featurewise_center:\n        self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))\n        broadcast_shape = [1, 1, 1]\n        broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]\n        self.mean = np.reshape(self.mean, broadcast_shape)\n        x -= self.mean\n    if self.featurewise_std_normalization:\n        self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))\n        broadcast_shape = [1, 1, 1]\n        broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]\n        self.std = np.reshape(self.std, broadcast_shape)\n        x /= self.std + 1e-06\n    if self.zca_whitening:\n        n = len(x)\n        flat_x = np.reshape(x, (n, -1))\n        u, s, _ = np.linalg.svd(flat_x.T, full_matrices=False)\n        s_inv = np.sqrt(n) / (s + self.zca_epsilon)\n        self.zca_whitening_matrix = (u * s_inv).dot(u.T)", "docstring": "Fits the data generator to some sample data.\n\nThis computes the internal data stats related to the\ndata-dependent transformations, based on an array of sample data.\n\nOnly required if `featurewise_center` or\n`featurewise_std_normalization` or `zca_whitening`\nare set to `True`.\n\nWhen `rescale` is set to a value, rescaling is applied to\nsample data before computing the internal data stats.\n\nArgs:\nx: Sample data. Should have rank 4.\nIn case of grayscale data,\nthe channels axis should have value 1, in case\nof RGB data, it should have value 3, and in case\nof RGBA data, it should have value 4.\naugment: Boolean (default: False).\nWhether to fit on randomly augmented samples.\nrounds: Int (default: 1).\nIf using data augmentation (`augment=True`),\nthis is how many augmentation passes over the data to use.\nseed: Int (default: None). Random seed.", "source": "github-repos"}
{"code": "def add_one(self, url: str,\n                url_properties: Optional[URLProperties]=None,\n                url_data: Optional[URLData]=None):\n        \n        self.add_many([AddURLInfo(url, url_properties, url_data)])", "docstring": "Add a single URL to the table.\n\nArgs:\nurl: The URL to be added\nurl_properties: Additional values to be saved\nurl_data: Additional data to be saved", "source": "juraj-google-style"}
{"code": "def norm(self, coords: Vector3Like, frac_coords: bool = True) -> float:\n        \n        return np.sqrt(self.dot(coords, coords, frac_coords=frac_coords))", "docstring": "Compute the norm of vector(s).\n\nArgs:\ncoords:\nArray-like object with the coordinates.\nfrac_coords:\nBoolean stating whether the vector corresponds to fractional or\ncartesian coordinates.\n\nReturns:\none-dimensional `numpy` array.", "source": "juraj-google-style"}
{"code": "def _safe_close(self, sess: session.Session):\n    try:\n        sess.close()\n    except Exception:\n        pass", "docstring": "Closes a session without raising an exception.\n\nJust like sess.close() but ignores exceptions.\n\nArgs:\nsess: A `Session`.", "source": "github-repos"}
{"code": "def show_inputs(self, varnames=None, nids=None, wslice=None, stream=sys.stdout):\n        \n        if varnames is not None:\n            \n            varnames = [s.strip() for s in list_strings(varnames)]\n            dlist = collections.defaultdict(list)\n            for task in self.select_tasks(nids=nids, wslice=wslice):\n                dstruct = task.input.structure.as_dict(fmt=\"abivars\")\n\n                for vname in varnames:\n                    value = task.input.get(vname, None)\n                    if value is None: \n                        value = dstruct.get(vname, None)\n                    if value is not None:\n                        dlist[vname].append((task, value))\n\n            for vname in varnames:\n                tv_list = dlist[vname]\n                if not tv_list:\n                    stream.write(\"[%s]: Found 0 tasks with this variable\\n\" % vname)\n                else:\n                    stream.write(\"[%s]: Found %s tasks with this variable\\n\" % (vname, len(tv_list)))\n                    for i, (task, value) in enumerate(tv_list):\n                        stream.write(\"   %s --> %s\\n\" % (str(value), task))\n                stream.write(\"\\n\")\n\n        else:\n            lines = []\n            for task in self.select_tasks(nids=nids, wslice=wslice):\n                s = task.make_input(with_header=True)\n\n                \n                if task.deps:\n                    s += \"\\n\\nDependencies:\\n\" + \"\\n\".join(str(dep) for dep in task.deps)\n                else:\n                    s += \"\\n\\nDependencies: None\"\n\n                lines.append(2*\"\\n\" + 80 * \"=\" + \"\\n\" + s + 2*\"\\n\")\n\n            stream.writelines(lines)", "docstring": "Print the input of the tasks to the given stream.\n\nArgs:\nvarnames:\nList of Abinit variables. If not None, only the variable in varnames\nare selected and printed.\nnids:\nList of node identifiers. By defaults all nodes are shown\nwslice:\nSlice object used to select works.\nstream:\nFile-like object, Default: sys.stdout", "source": "juraj-google-style"}
{"code": "def eval(self, session=None):\n    raise NotImplementedError", "docstring": "In a session, computes and returns the value of this variable.\n\nThis is not a graph construction method, it does not add ops to the graph.\n\nThis convenience method requires a session where the graph\ncontaining this variable has been launched. If no session is\npassed, the default session is used.  See `tf.compat.v1.Session` for more\ninformation on launching a graph and on sessions.\n\n```python\nv = tf.Variable([1, 2])\ninit = tf.compat.v1.global_variables_initializer()\n\nwith tf.compat.v1.Session() as sess:\nsess.run(init)\n# Usage passing the session explicitly.\nprint(v.eval(sess))\n# Usage with the default session.  The 'with' block\n# above makes 'sess' the default session.\nprint(v.eval())\n```\n\nArgs:\nsession: The session to use to evaluate this variable. If none, the\ndefault session is used.\n\nReturns:\nA numpy `ndarray` with a copy of the value of this variable.", "source": "github-repos"}
{"code": "def _ParseMRUListKey(self, parser_mediator, registry_key, codepage='cp1252'):\n    \n    try:\n      mrulist = self._ParseMRUListValue(registry_key)\n    except (ValueError, errors.ParseError) as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to parse MRUList value with error: {0!s}'.format(exception))\n      return\n\n    if not mrulist:\n      return\n\n    values_dict = {}\n    found_terminator = False\n    for entry_index, entry_letter in enumerate(mrulist):\n      \n      if entry_letter == 0:\n        break\n\n      if found_terminator:\n        parser_mediator.ProduceExtractionWarning((\n            'found additional MRUList entries after terminator in key: '\n            '{0:s}.').format(registry_key.path))\n\n        \n        found_terminator = False\n\n      entry_letter = chr(entry_letter)\n\n      value_string = self._ParseMRUListEntryValue(\n          parser_mediator, registry_key, entry_index, entry_letter,\n          codepage=codepage)\n\n      value_text = 'Index: {0:d} [MRU Value {1:s}]'.format(\n          entry_index + 1, entry_letter)\n\n      values_dict[value_text] = value_string\n\n    event_data = windows_events.WindowsRegistryEventData()\n    event_data.key_path = registry_key.path\n    event_data.offset = registry_key.offset\n    event_data.regvalue = values_dict\n    event_data.source_append = self._SOURCE_APPEND\n\n    event = time_events.DateTimeValuesEvent(\n        registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extract event objects from a MRUList Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\ncodepage (Optional[str]): extended ASCII string codepage.", "source": "juraj-google-style"}
{"code": "def get_sanger_unevaluated(store, institute_id, user_id):\n    \n\n    \n    \n    sanger_ordered_by_case = store.sanger_ordered(institute_id, user_id)\n    unevaluated = []\n\n    \n    for item in sanger_ordered_by_case:\n        case_id = item['_id']\n        \n        case_obj = store.case(case_id=case_id)\n\n        if not case_obj: \n            continue\n\n        case_display_name = case_obj.get('display_name')\n\n        \n        varid_list = item['vars']\n\n        unevaluated_by_case = {}\n        unevaluated_by_case[case_display_name] = []\n\n        for var_id in varid_list:\n            \n            variant_obj = store.variant(document_id=var_id, case_id=case_id)\n\n            \n            if variant_obj is None or variant_obj.get('sanger_ordered') is None or variant_obj.get('sanger_ordered') is False:\n                continue\n\n            validation = variant_obj.get('validation', 'not_evaluated')\n\n            \n            if validation in ['True positive', 'False positive']:\n                continue\n\n            unevaluated_by_case[case_display_name].append(variant_obj['_id'])\n\n        \n        if len(unevaluated_by_case[case_display_name]) > 0:\n            unevaluated.append(unevaluated_by_case)\n\n    return unevaluated", "docstring": "Get all variants for an institute having Sanger validations ordered but still not evaluated\n\nArgs:\nstore(scout.adapter.MongoAdapter)\ninstitute_id(str)\n\nReturns:\nunevaluated: a list that looks like this: [ {'case1': [varID_1, varID_2, .., varID_n]}, {'case2' : [varID_1, varID_2, .., varID_n]} ],\nwhere the keys are case_ids and the values are lists of variants with Sanger ordered but not yet validated", "source": "juraj-google-style"}
{"code": "def __init__(self, min_bundle_size=0, desired_bundle_size=DEFAULT_DESIRED_BUNDLE_SIZE, columns=None, with_filename=False, label='ReadAllFiles'):\n    super().__init__()\n    source_from_file = partial(_ParquetSource, min_bundle_size=min_bundle_size, columns=columns)\n    self._read_all_files = filebasedsource.ReadAllFiles(True, CompressionTypes.UNCOMPRESSED, desired_bundle_size, min_bundle_size, source_from_file, with_filename)\n    self.label = label", "docstring": "Initializes ``ReadAllFromParquet``.\n\nArgs:\nmin_bundle_size: the minimum size in bytes, to be considered when\nsplitting the input into bundles.\ndesired_bundle_size: the desired size in bytes, to be considered when\nsplitting the input into bundles.\ncolumns: list of columns that will be read from files. A column name\nmay be a prefix of a nested field, e.g. 'a' will select\n'a.b', 'a.c', and 'a.d.e'\nwith_filename: If True, returns a Key Value with the key being the file\nname and the value being the actual data. If False, it only returns\nthe data.", "source": "github-repos"}
{"code": "def _infer_state_dtype(explicit_dtype, state):\n    if explicit_dtype is not None:\n        return explicit_dtype\n    elif nest.is_nested(state):\n        inferred_dtypes = [element.dtype for element in nest.flatten(state)]\n        if not inferred_dtypes:\n            raise ValueError(f'Unable to infer dtype from argument state={state}.')\n        all_same = all((x == inferred_dtypes[0] for x in inferred_dtypes))\n        if not all_same:\n            raise ValueError(f'Argument state={state} has tensors of different inferred dtypes. Unable to infer a single representative dtype. Dtypes received: {inferred_dtypes}')\n        return inferred_dtypes[0]\n    else:\n        return state.dtype", "docstring": "Infer the dtype of an RNN state.\n\nArgs:\nexplicit_dtype: explicitly declared dtype or None.\nstate: RNN's hidden state. Must be a Tensor or a nested iterable containing\nTensors.\n\nReturns:\ndtype: inferred dtype of hidden state.\n\nRaises:\nValueError: if `state` has heterogeneous dtypes or is empty.", "source": "github-repos"}
{"code": "def root(self) -> bytes:\n    retry_interval = ADB_ROOT_RETRY_ATTEMPT_INTERVAL_SEC\n    for attempt in range(ADB_ROOT_RETRY_ATTEMPTS):\n        try:\n            return self._exec_adb_cmd('root', args=None, shell=False, timeout=None, stderr=None)\n        except AdbError as e:\n            if attempt + 1 < ADB_ROOT_RETRY_ATTEMPTS:\n                logging.debug('Retry the command \"%s\" since Error \"%s\" occurred.' % (utils.cli_cmd_to_string(e.cmd), e.stderr.decode('utf-8').strip()))\n                time.sleep(retry_interval)\n                retry_interval *= 2\n            else:\n                raise e", "docstring": "Enables ADB root mode on the device.\n\nThis method will retry to execute the command `adb root` when an\nAdbError occurs, since sometimes the error `adb: unable to connect\nfor root: closed` is raised when executing `adb root` immediately after\nthe device is booted to OS.\n\nReturns:\nA string that is the stdout of root command.\n\nRaises:\nAdbError: If the command exit code is not 0.", "source": "github-repos"}
{"code": "def links(res: requests.models.Response, search: str=None, pattern: str=None) -> list:\n    hrefs = [link.to_text() for link in find_all_links(res.text)]\n    if search:\n        hrefs = [href for href in hrefs if (search in href)]\n    if pattern:\n        hrefs = [href for href in hrefs if re.findall(pattern, href)]\n    return list(set(hrefs))", "docstring": "Get the links of the page.\n\nArgs:\nres (requests.models.Response): The response of the page.\nsearch (str, optional): Defaults to None. Search the links you want.\npattern (str, optional): Defaults to None. Search the links use a regex pattern.\n\nReturns:\nlist: All the links of the page.", "source": "codesearchnet"}
{"code": "def draw(self, filename, color=True):\n    verify_dependencies(['pgv'])\n    if (not hasattr(self, '_results')):\n        raise RuntimeError('Graph cannot be drawn before it is executed. Try calling run() first.')\n    g = pgv.AGraph(directed=True)\n    g.node_attr['colorscheme'] = 'set312'\n    for elem in self._results:\n        if (not hasattr(elem, 'history')):\n            continue\n        log = elem.history\n        while log:\n            source_from = (log.parent[6] if log.parent else '')\n            s_node = hash((source_from, log[2]))\n            s_color = stim_list.index(log[2])\n            s_color = ((s_color % 12) + 1)\n            t_node = hash((log[6], log[7]))\n            t_style = ('filled,' if color else '')\n            t_style += ('dotted' if log.implicit else '')\n            if log[6].endswith('Extractor'):\n                t_color = '\n            elif log[6].endswith('Filter'):\n                t_color = '\n            else:\n                t_color = '\n            r_node = hash((log[6], log[5]))\n            r_color = stim_list.index(log[5])\n            r_color = ((r_color % 12) + 1)\n            if color:\n                g.add_node(s_node, label=log[2], shape='ellipse', style='filled', fillcolor=s_color)\n                g.add_node(t_node, label=log[6], shape='box', style=t_style, fillcolor=t_color)\n                g.add_node(r_node, label=log[5], shape='ellipse', style='filled', fillcolor=r_color)\n            else:\n                g.add_node(s_node, label=log[2], shape='ellipse')\n                g.add_node(t_node, label=log[6], shape='box', style=t_style)\n                g.add_node(r_node, label=log[5], shape='ellipse')\n            g.add_edge(s_node, t_node, style=t_style)\n            g.add_edge(t_node, r_node, style=t_style)\n            log = log.parent\n    g.draw(filename, prog='dot')", "docstring": "Render a plot of the graph via pygraphviz.\n\nArgs:\nfilename (str): Path to save the generated image to.\ncolor (bool): If True, will color graph nodes based on their type,\notherwise will draw a black-and-white graph.", "source": "codesearchnet"}
{"code": "def _get_or_load_domain(self, domain):\n    if isinstance(domain, six.string_types):\n        if (domain in self.domains):\n            return self.domains[domain]\n        elif exists(domain):\n            with open(domain, 'r') as fobj:\n                domain = json.load(fobj)\n        else:\n            raise ValueError(\"No domain could be found/loaded from input '{}'; value must be either the name of an existing Domain, or a valid path to a configuration file.\".format(domain))\n    name = domain['name']\n    if (name in self.domains):\n        msg = \"Domain with name '{}' already exists; returning existing Domain configuration.\".format(name)\n        warnings.warn(msg)\n        return self.domains[name]\n    entities = domain.get('entities', [])\n    domain = Domain(domain)\n    for e in entities:\n        self.add_entity(domain=domain, **e)\n    self.domains[name] = domain\n    return self.domains[name]", "docstring": "Return a domain if one already exists, or create a new one if not.\n\nArgs:\ndomain (str, dict): Can be one of:\n- The name of the Domain to return (fails if none exists)\n- A path to the Domain configuration file\n- A dictionary containing configuration information", "source": "codesearchnet"}
{"code": "def inter_data_operation(self, axis, func, other):\n        \n        if axis:\n            partitions = self.row_partitions\n            other_partitions = other.row_partitions\n        else:\n            partitions = self.column_partitions\n            other_partitions = other.column_partitions\n        func = self.preprocess_func(func)\n        result = np.array(\n            [\n                partitions[i].apply(\n                    func,\n                    num_splits=self._compute_num_partitions(),\n                    other_axis_partition=other_partitions[i],\n                )\n                for i in range(len(partitions))\n            ]\n        )\n        return self.__constructor__(result) if axis else self.__constructor__(result.T)", "docstring": "Apply a function that requires two BaseFrameManager objects.\n\nArgs:\naxis: The axis to apply the function over (0 - rows, 1 - columns)\nfunc: The function to apply\nother: The other BaseFrameManager object to apply func to.\n\nReturns:\nA new BaseFrameManager object, the type of object that called this.", "source": "juraj-google-style"}
{"code": "def chain_break_frequency(samples, embedding):\n    counts = {v: 0 for v in embedding}\n    total = 0\n    for sample in samples:\n        for (v, chain) in iteritems(embedding):\n            vals = [sample[u] for u in chain]\n            if (not _all_equal(vals)):\n                counts[v] += 1\n        total += 1\n    return {v: (counts[v] / total) for v in embedding}", "docstring": "Determines the frequency of chain breaks in the given samples.\n\nArgs:\nsamples (iterable): An iterable of samples where each sample\nis a dict of the form {v: val, ...} where v is a variable\nin the target graph and val is the associated value as\ndetermined by a binary quadratic model sampler.\nembedding (dict): The mapping from the source graph to the target graph.\nShould be of the form {v: {s, ...}, ...} where v is a variable in the\nsource model and s is a variable in the target model.\n\nReturns:\ndict: The frequency of chain breaks in the form {v: f, ...} where v\nis a variable in the source graph and frequency is the fraction\nof chains that were broken as a float.", "source": "codesearchnet"}
{"code": "def removeRouter(self, xRouterId):\n        \n        print '%s call removeRouter' % self.port\n        print xRouterId\n        routerId = ''\n        routerId = self.__convertRlocToRouterId(xRouterId)\n        print routerId\n\n        if routerId == None:\n            print 'no matched xRouterId'\n            return False\n\n        try:\n            cmd = 'releaserouterid %s' % routerId\n            return self.__sendCommand(cmd)[0] != 'Fail'\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger('removeRouter() Error: ' + str(e))", "docstring": "kick router with a given router id from the Thread Network\n\nArgs:\nxRouterId: a given router id in hex format\n\nReturns:\nTrue: successful to remove the router from the Thread Network\nFalse: fail to remove the router from the Thread Network", "source": "juraj-google-style"}
{"code": "def _CreateAnalysisPlugins(self, options):\n    if (not self._analysis_plugins):\n        return {}\n    analysis_plugins = analysis_manager.AnalysisPluginManager.GetPluginObjects(self._analysis_plugins)\n    for analysis_plugin in analysis_plugins.values():\n        helpers_manager.ArgumentHelperManager.ParseOptions(options, analysis_plugin)\n    return analysis_plugins", "docstring": "Creates the analysis plugins.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nReturns:\ndict[str, AnalysisPlugin]: analysis plugins and their names.", "source": "codesearchnet"}
{"code": "def documentation(self, level='first'):\n    docs = (t.docstring for t in (list(self.conjunction.terms) + [self]) if (t.docstring is not None))\n    if (level.lower() == 'first'):\n        doc = next(docs, None)\n    elif (level.lower() == 'top'):\n        doc = list(docs)\n    return doc", "docstring": "Return the documentation of the type.\n\nBy default, this is the first docstring on a top-level term.\nBy setting *level* to `\"top\"`, the list of all docstrings on\ntop-level terms is returned, including the type's `docstring`\nvalue, if not `None`, as the last item. The docstring for the\ntype itself is available via :attr:`TypeDefinition.docstring`.\n\nArgs:\nlevel (str): `\"first\"` or `\"top\"`\nReturns:\na single docstring or a list of docstrings", "source": "codesearchnet"}
{"code": "def create(self, domain, type_name, search_command, body):\n    return self._request(domain, type_name, search_command, 'POST', body)", "docstring": "Create entry in ThreatConnect Data Store\n\nArgs:\ndomain (string): One of 'local', 'organization', or 'system'.\ntype_name (string): This is a free form index type name. The ThreatConnect API will use\nthis resource verbatim.\nsearch_command (string): Search command to pass to ES.\nbody (str): JSON serialized data.", "source": "codesearchnet"}
{"code": "def _convert_ddb_list_to_list(conversion_list):\n    \n    ret_list = []\n    for v in conversion_list:\n        for v1 in v:\n            ret_list.append(v[v1])\n    return ret_list", "docstring": "Given a dynamodb list, it will return a python list without the dynamodb\ndatatypes\n\nArgs:\nconversion_list (dict): a dynamodb list which includes the\ndatatypes\n\nReturns:\nlist: Returns a sanitized list without the dynamodb datatypes", "source": "juraj-google-style"}
{"code": "def transform_absolute_coords(self, width, height):\n    if (self.type != EventType.POINTER_MOTION_ABSOLUTE):\n        raise AttributeError(_wrong_meth.format(self.type))\n    abs_x = self._libinput.libinput_event_pointer_get_absolute_x_transformed(self._handle, width)\n    abs_y = self._libinput.libinput_event_pointer_get_absolute_y_transformed(self._handle, height)\n    return (abs_x, abs_y)", "docstring": "Return the current absolute coordinates of the pointer event,\ntransformed to screen coordinates.\n\nFor pointer events that are not of type\n:attr:`~libinput.constant.EventType.POINTER_MOTION_ABSOLUTE`,\nthis method raises :exc:`AttributeError`.\n\nArgs:\nwidth (int): The current output screen width.\nheight (int): The current output screen height.\nReturns:\n(float, float): The current absolute (x, y) coordinates transformed\nto a screen coordinates.\nRaises:\nAttributeError", "source": "codesearchnet"}
{"code": "def getJsonFromApi(view, request):\n    jsonText = view(request)\n    jsonText = json.loads(jsonText.content.decode('utf-8'))\n    return jsonText", "docstring": "Return json from querying Web Api\n\nArgs:\nview: django view function.\nrequest: http request object got from django.\n\nReturns: json format dictionary", "source": "codesearchnet"}
{"code": "def force_checkpoint_conversion(value=True):\n    global _FORCE_CHECKPOINT_CONVERSION\n    _FORCE_CHECKPOINT_CONVERSION = value", "docstring": "Forces checkpoint to use the new implementation.\n\nThe new checkpoint implementation is changing the saved metadata slightly,\nand therefore may break forward compatibility in newly saved checkpoints. This\nmeans:\n\n- Previous versions of TensorFlow may not be able to load new checkpoints.\n- Backwards compatibility is unchanged: Old checkpoints can still be loaded.\n\nTensorFlow guarantees 3 weeks of forward compatibility, so this flag will be\nremoved in the future weeks, after which checkpoint conversion will happen by\ndefault.\n\n**What happens when this flag is enabled?**\n\nThe checkpoint will be saved with different metadata, meaning that previous\nversions of TensorFlow (<=2.10) will not be able to load this checkpoint.\n\nArgs:\nvalue: Boolean value, whether or not to force checkpoint conversion to the\nnew implementation.", "source": "github-repos"}
{"code": "def nack(self, items):\n        \n        self.modify_ack_deadline(\n            [requests.ModAckRequest(ack_id=item.ack_id, seconds=0) for item in items]\n        )\n        self.drop([requests.DropRequest(*item) for item in items])", "docstring": "Explicitly deny receipt of messages.\n\nArgs:\nitems(Sequence[NackRequest]): The items to deny.", "source": "juraj-google-style"}
{"code": "def get_help_usage(command):\n    \n    \n    if not command:\n        doc = get_primary_command_usage()\n    elif command in ('-a', '--all'):\n        subcommands = [k for k in settings.subcommands if k is not None]\n        available_commands = subcommands + ['help']\n        command_doc = '\\nAvailable commands:\\n{}\\n'.format(\n            '\\n'.join('  {}'.format(c) for c in sorted(available_commands)))\n        doc = get_primary_command_usage(command_doc)\n    elif command.startswith('-'):\n        raise ValueError(\"Unrecognized option '{}'.\".format(command))\n    elif command in settings.subcommands:\n        subcommand = settings.subcommands[command]\n        doc = format_usage(subcommand.__doc__)\n    docopt.docopt(doc, argv=('--help',))", "docstring": "Print out a help message and exit the program.\n\nArgs:\ncommand: If a command value is supplied then print the help message for\nthe command module if available. If the command is '-a' or '--all',\nthen print the standard help message but with a full list of\navailable commands.\n\nRaises:\nValueError: Raised if the help message is requested for an invalid\ncommand or an unrecognized option is passed to help.", "source": "juraj-google-style"}
{"code": "def _choose_random_edge(self, edges: Set[EDGE]) -> Optional[EDGE]:\n        \n        if edges:\n            index = self._rand.randint(len(edges))\n            for e in edges:\n                if not index:\n                    return e\n                index -= 1\n        return None", "docstring": "Picks random edge from the set of edges.\n\nArgs:\nedges: Set of edges to pick from.\n\nReturns:\nRandom edge from the supplied set, or None for empty set.", "source": "juraj-google-style"}
{"code": "def _encode_value(self, value):\n    if isinstance(value, (int, float, str, bool, datetime)):\n        return value\n    elif isinstance(value, list):\n        return [self._encode_value(item) for item in value]\n    elif isinstance(value, dict):\n        result = {}\n        for (key, item) in value.items():\n            result[key] = self._encode_value(item)\n        return result\n    else:\n        return self._gridfs.put(Binary(pickle.dumps(value)), workflow_id=self._workflow_id)", "docstring": "Encodes the value such that it can be stored into MongoDB.\n\nAny primitive types are stored directly into MongoDB, while non-primitive types\nare pickled and stored as GridFS objects. The id pointing to a GridFS object\nreplaces the original value.\n\nArgs:\nvalue (object): The object that should be encoded for storing in MongoDB.\n\nReturns:\nobject: The encoded value ready to be stored in MongoDB.", "source": "codesearchnet"}
{"code": "def NewFromJSON(data):\n        \n        s = Shake(\n            id=data.get('id', None),\n            name=data.get('name', None),\n            url=data.get('url', None),\n            thumbnail_url=data.get('thumbnail_url', None),\n            description=data.get('description', None),\n            type=data.get('type', None),\n            created_at=data.get('created_at', None),\n            updated_at=data.get('updated_at', None)\n        )\n        if data.get('owner', None):\n            s.owner = User.NewFromJSON(data.get('owner', None))\n        return s", "docstring": "Create a new Shake instance from a JSON dict.\n\nArgs:\ndata (dict): JSON dictionary representing a Shake.\n\nReturns:\nA Shake instance.", "source": "juraj-google-style"}
{"code": "def reopen_encoded(fileobj, mode='r', fallback_encoding=None):\n    \n    encoding = determine_encoding(fileobj.name, fallback_encoding)\n    fileobj.close()\n    return open(fileobj.name, mode, encoding=encoding)", "docstring": "Makes sure that a file was opened with some valid encoding.\n\nArguments:\nfileobj (file): The file-object.\nmode (str, optional): The mode in which to re-open the file.\nfallback_encoding (str, optional): The encoding in which to re-open\nthe file if it does not specify an encoding itself.\n\nReturns:\nfile: The re-opened file.", "source": "juraj-google-style"}
{"code": "def cifar_generator(cifar_version, tmp_dir, training, how_many, start_from=0):\n  \n  if cifar_version == \"cifar10\":\n    url = _CIFAR10_URL\n    train_files = _CIFAR10_TRAIN_FILES\n    test_files = _CIFAR10_TEST_FILES\n    prefix = _CIFAR10_PREFIX\n    image_size = _CIFAR10_IMAGE_SIZE\n    label_key = \"labels\"\n  elif cifar_version == \"cifar100\" or cifar_version == \"cifar20\":\n    url = _CIFAR100_URL\n    train_files = _CIFAR100_TRAIN_FILES\n    test_files = _CIFAR100_TEST_FILES\n    prefix = _CIFAR100_PREFIX\n    image_size = _CIFAR100_IMAGE_SIZE\n    if cifar_version == \"cifar100\":\n      label_key = \"fine_labels\"\n    else:\n      label_key = \"coarse_labels\"\n\n  _get_cifar(tmp_dir, url)\n  data_files = train_files if training else test_files\n  all_images, all_labels = [], []\n  for filename in data_files:\n    path = os.path.join(tmp_dir, prefix, filename)\n    with tf.gfile.Open(path, \"rb\") as f:\n      if six.PY2:\n        data = cPickle.load(f)\n      else:\n        data = cPickle.load(f, encoding=\"latin1\")\n    images = data[\"data\"]\n    num_images = images.shape[0]\n    images = images.reshape((num_images, 3, image_size, image_size))\n    all_images.extend([\n        np.squeeze(images[j]).transpose((1, 2, 0)) for j in range(num_images)\n    ])\n    labels = data[label_key]\n    all_labels.extend([labels[j] for j in range(num_images)])\n  return image_utils.image_generator(\n      all_images[start_from:start_from + how_many],\n      all_labels[start_from:start_from + how_many])", "docstring": "Image generator for CIFAR-10 and 100.\n\nArgs:\ncifar_version: string; one of \"cifar10\" or \"cifar100\"\ntmp_dir: path to temporary storage directory.\ntraining: a Boolean; if true, we use the train set, otherwise the test set.\nhow_many: how many images and labels to generate.\nstart_from: from which image to start.\n\nReturns:\nAn instance of image_generator that produces CIFAR-10 images and labels.", "source": "juraj-google-style"}
{"code": "def _create_RSA_private_key(self,\n                                bytes):\n        \n\n        try:\n            private_key = serialization.load_pem_private_key(\n                bytes,\n                password=None,\n                backend=default_backend()\n            )\n            return private_key\n        except Exception:\n            private_key = serialization.load_der_private_key(\n                bytes,\n                password=None,\n                backend=default_backend()\n            )\n            return private_key", "docstring": "Instantiates an RSA key from bytes.\n\nArgs:\nbytes (byte string): Bytes of RSA private key.\nReturns:\nprivate_key\n(cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):\nRSA private key created from key bytes.", "source": "juraj-google-style"}
{"code": "def diff_charsToLines(self, diffs, lineArray):\n    \n    for i in range(len(diffs)):\n      text = []\n      for char in diffs[i][1]:\n        text.append(lineArray[ord(char)])\n      diffs[i] = (diffs[i][0], \"\".join(text))", "docstring": "Rehydrate the text in a diff from a string of line hashes to real lines\nof text.\n\nArgs:\ndiffs: Array of diff tuples.\nlineArray: Array of unique strings.", "source": "juraj-google-style"}
{"code": "def get_gutter_client(alias='default', cache=CLIENT_CACHE, **kwargs):\n    from gutter.client.models import Manager\n    if (not alias):\n        return Manager(**kwargs)\n    elif (alias not in cache):\n        cache[alias] = Manager(**kwargs)\n    return cache[alias]", "docstring": "Creates gutter clients and memoizes them in a registry for future quick access.\n\nArgs:\nalias (str or None): Name of the client. Used for caching.\nIf name is falsy then do not use the cache.\ncache (dict): cache to store gutter managers in.\n**kwargs: kwargs to be passed the Manger class.\n\nReturns (Manager):\nA gutter client.", "source": "codesearchnet"}
{"code": "def __instancecheck__(cls, other):\n        \n        \n        try:\n            return bool(\n                isinstance(other, cls.__type__) and cls(other)  \n            )\n        except ValueError:\n            return False", "docstring": "Determine if an instance is of the sliced type and within bounds.\n\nArgs:\nother: The instance to test.\n\nReturns:\nTrue if the object is both of the same type as sliced by the\ncreated class as well as within the bounds defined by the class.", "source": "juraj-google-style"}
{"code": "async def verify_parent_task(chain, link):\n    worker_type = get_worker_type(link.task)\n    if (worker_type not in chain.context.config['valid_decision_worker_types']):\n        raise CoTError('{} is not a valid decision workerType!'.format(worker_type))\n    if (chain is not link):\n        path = link.get_artifact_full_path('public/task-graph.json')\n        if (not os.path.exists(path)):\n            raise CoTError(\"{} {}: {} doesn't exist!\".format(link.name, link.task_id, path))\n        link.task_graph = load_json_or_yaml(path, is_path=True, exception=CoTError, message=\"Can't load {}! %(exc)s\".format(path))\n        for target_link in chain.get_all_links_in_chain():\n            if ((target_link.parent_task_id == link.task_id) and (target_link.task_id != link.task_id) and (target_link.task_type not in PARENT_TASK_TYPES)):\n                verify_link_in_task_graph(chain, link, target_link)\n    try:\n        (await verify_parent_task_definition(chain, link))\n    except (BaseDownloadError, KeyError) as e:\n        raise CoTError(e)", "docstring": "Verify the parent task Link.\n\nAction task verification is currently in the same verification function as\ndecision tasks, because sometimes we'll have an action task masquerading as\na decision task, e.g. in templatized actions for release graphs. To make\nsure our guess of decision or action task isn't fatal, we call this\nfunction; this function uses ``is_action()`` to determine how to verify\nthe task.\n\nArgs:\nchain (ChainOfTrust): the chain we're operating on.\nlink (LinkOfTrust): the task link we're checking.\n\nRaises:\nCoTError: on chain of trust verification error.", "source": "codesearchnet"}
{"code": "def apply_strain(self, strain):\n        \n        s = (1 + np.array(strain)) * np.eye(3)\n        self.lattice = Lattice(np.dot(self._lattice.matrix.T, s).T)", "docstring": "Apply a strain to the lattice.\n\nArgs:\nstrain (float or list): Amount of strain to apply. Can be a float,\nor a sequence of 3 numbers. E.g., 0.01 means all lattice\nvectors are increased by 1%. This is equivalent to calling\nmodify_lattice with a lattice with lattice parameters that\nare 1% larger.", "source": "juraj-google-style"}
{"code": "def register_scenario(self, scenario_name, handler):\n    if (scenario_name in self._known_scenarios):\n        raise ArgumentError('Attempted to add the same scenario name twice', scenario_name=scenario_name, previous_handler=self._known_scenarios[scenario_name])\n    self._known_scenarios[scenario_name] = handler", "docstring": "Register a scenario handler for this object.\n\nScenario handlers are callable functions with no positional arguments\nthat can be called by name with the load_scenario function and should\nprepare the emulated object into a known state.  The purpose of a\nscenario is to make it easy to get a device into a specific state for\ntesting purposes that may otherwise be difficult or time consuming to\nprepare on the physical, non-emulated device.\n\nArgs:\nscenario_name (str): The name of this scenario that can be passed to\nload_scenario later in order to invoke the scenario.\nhandler (callable): A callable function that takes no positional\narguments and can prepare this object into the given scenario\nstate.  It may take required or optional keyword arguments that\nmay be passed to `load_scenario` if needed.", "source": "codesearchnet"}
{"code": "def GetMessages(self, formatter_mediator, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    event_values = event.CopyToDict()\n    return self._FormatMessages(\n        self.FORMAT_STRING, self.FORMAT_STRING_SHORT, event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def confirm_iam_role(self, account):\n    try:\n        iam = self.session.client('iam')\n        rolearn = iam.get_role(RoleName=self.role_name)['Role']['Arn']\n        return rolearn\n    except ClientError as e:\n        if (e.response['Error']['Code'] == 'NoSuchEntity'):\n            self.create_iam_role(account)\n        else:\n            raise\n    except Exception as e:\n        self.log.exception('Failed validating IAM role for VPC Flow Log Auditing for {}'.format(e))", "docstring": "Return the ARN of the IAM Role on the provided account as a string. Returns an `IAMRole` object from boto3\n\nArgs:\naccount (:obj:`Account`): Account where to locate the role\n\nReturns:\n:obj:`IAMRole`", "source": "codesearchnet"}
{"code": "def _update_object(object_key: str, event: Event):\n    \n    events_list_key = _keys.events_list(object_key)\n    events_data_key = _keys.events_data(object_key)\n    event_dict = deepcopy(event.config)\n    event_dict.pop('id')\n    DB.append_to_list(events_list_key, event.id, pipeline=True)\n    DB.set_hash_value(events_data_key, event.id, json.dumps(event_dict),\n                      pipeline=True)", "docstring": "Update the events list and events data for the object.\n\n- Adds the event Id to the list of events for the object.\n- Adds the event data to the hash of object event data keyed by event\nid.\n\nArgs:\nobject_key (str): Key of the object being updated.\nevent (Event): Event object", "source": "juraj-google-style"}
{"code": "def region(self, start=0, end=None):\n        \n        if end is None:\n            end = len(self.sequence)\n        return '>{}\\n{}'.format(self.id, self.sequence[start:end])", "docstring": "Returns a region of ``Sequence.sequence``, in FASTA format.\n\nIf called without kwargs, the entire sequence will be returned.\n\nArgs:\n\nstart (int): Start position of the region to be returned. Default\nis 0.\n\nend (int): End position of the region to be returned. Negative values\nwill function as they do when slicing strings.\n\nReturns:\n\nstr: A region of ``Sequence.sequence``, in FASTA format", "source": "juraj-google-style"}
{"code": "def cast_type(self, var, cast_type=None):\n        \n\n        if cast_type is None:\n            cast_type = self.valid_values\n\n        try:\n            if cast_type == int:\n                return int(var)\n            elif cast_type == float:\n                return float(var)\n            elif type == str:\n                return str(var)\n            elif isinstance(cast_type, list):\n                \n                return type(cast_type[0])(var)\n            else:\n                return None\n        except ValueError:\n            return None\n\n        return var", "docstring": "cast the value into the type typ\nif type is not provided it is set to self.valid_values\nArgs:\nvar: variable to be cast\ntype: target type\n\nReturns: the variable var csat into type typ", "source": "juraj-google-style"}
{"code": "def start_automated_run(path, automated_run_id):\n    with functions.DBContextManager(path) as session:\n        automated_run = session.query(models.AutomatedRun).filter_by(id=automated_run_id).first()\n        if (not automated_run):\n            raise exceptions.UserError('Automated run {} does not exist'.format(automated_run_id))\n        automated_run.job_id = get_current_job().id\n        automated_run.job_status = 'started'\n        session.add(automated_run)\n        session.commit()\n        try:\n            if (automated_run.category == 'bayes'):\n                automatedruns.start_naive_bayes(automated_run, session, path)\n            elif (automated_run.category == 'tpot'):\n                automatedruns.start_tpot(automated_run, session, path)\n            elif (automated_run.category == 'greedy_ensemble_search'):\n                automatedruns.start_greedy_ensemble_search(automated_run, session, path)\n            else:\n                raise Exception('Something went wrong. Invalid category for automated run')\n            automated_run.job_status = 'finished'\n            session.add(automated_run)\n            session.commit()\n        except:\n            session.rollback()\n            automated_run.job_status = 'errored'\n            automated_run.description['error_type'] = repr(sys.exc_info()[0])\n            automated_run.description['error_value'] = repr(sys.exc_info()[1])\n            automated_run.description['error_traceback'] = traceback.format_exception(*sys.exc_info())\n            session.add(automated_run)\n            session.commit()\n            raise", "docstring": "Starts automated run. This will automatically create\nbase learners until the run finishes or errors out.\n\nArgs:\npath (str): Path to Xcessiv notebook\n\nautomated_run_id (str): Automated Run ID", "source": "codesearchnet"}
{"code": "def ias53(msg):\n    \n    d = hex2bin(data(msg))\n\n    if d[12] == '0':\n        return None\n\n    ias = bin2int(d[13:23])    \n    return ias", "docstring": "Indicated airspeed, DBS 5,3 message\n\nArgs:\nmsg (String): 28 bytes hexadecimal message\n\nReturns:\nint: indicated arispeed in knots", "source": "juraj-google-style"}
{"code": "def _non_batched_matmul(lhs, rhs, lhs_contraction, rhs_contraction):\n    return math_ops.tensordot(lhs, rhs, axes=(list(lhs_contraction), list(rhs_contraction)))", "docstring": "Compute the non-batched matrix multiplication.\n\nIf it is the general non-batched/single-batched matrix multiplication,\nuse the highly optimized kernel `tf.tensordot` to handle it.\n\nArgs:\nlhs: an array (the left-hand side matrix/vector to be multiplied)\nrhs: an array (the right-hand side matrix/vector to be multiplied)\nlhs_contraction: Sequence[int] (the contraction dimensions of lhs)\nrhs_contraction: Sequence[int] (the contraction dimensions of rhs)\n\nReturns:\nAn array that contains the result.", "source": "github-repos"}
{"code": "def update(self, data):\n        \n        \n        updated = self.set_property('state', data['state'])\n        updated |= self.set_property('notes', sorted(data['notes'] or []))\n        updated |= self.set_property('last_notice', data['last_notice'])\n\n        if updated:\n            self.set_property('last_change', datetime.now())\n\n        return updated", "docstring": "Updates the object information based on live data, if there were any changes made. Any changes will be\nautomatically applied to the object, but will not be automatically persisted. You must manually call\n`db.session.add(instance)` on the object.\n\nArgs:\ndata (:obj:): AWS API Resource object fetched from AWS API\n\nReturns:\n`bool`", "source": "juraj-google-style"}
{"code": "def NewOutputModule(cls, name, output_mediator):\n    \n    output_class = cls.GetOutputClass(name)\n    return output_class(output_mediator)", "docstring": "Creates a new output module object for the specified output format.\n\nArgs:\nname (str): name of the output module.\noutput_mediator (OutputMediator): output mediator.\n\nReturns:\nOutputModule: output module.\n\nRaises:\nKeyError: if there is no output class found with the supplied name.\nValueError: if name is not a string.", "source": "juraj-google-style"}
{"code": "def createSimpleResourceMap(ore_pid, scimeta_pid, sciobj_pid_list):\n    ore = ResourceMap()\n    ore.initialize(ore_pid)\n    ore.addMetadataDocument(scimeta_pid)\n    ore.addDataDocuments(sciobj_pid_list, scimeta_pid)\n    return ore", "docstring": "Create a simple OAI-ORE Resource Map with one Science Metadata document and any\nnumber of Science Data objects.\n\nThis creates a document that establishes an association between a Science Metadata\nobject and any number of Science Data objects. The Science Metadata object contains\ninformation that is indexed by DataONE, allowing both the Science Metadata and the\nScience Data objects to be discoverable in DataONE Search. In search results, the\nobjects will appear together and can be downloaded as a single package.\n\nArgs:\nore_pid: str\nPersistent Identifier (PID) to use for the new Resource Map\n\nscimeta_pid: str\nPID for an object that will be listed as the Science Metadata that is\ndescribing the Science Data objects.\n\nsciobj_pid_list: list of str\nList of PIDs that will be listed as the Science Data objects that are being\ndescribed by the Science Metadata.\n\nReturns:\nResourceMap : OAI-ORE Resource Map", "source": "codesearchnet"}
{"code": "def get_path_attribute(obj, path):\n    path = path.replace('original.', '').replace('current_user.', '')\n    attr_parts = path.split('.')\n    res = obj\n    try:\n        for part in attr_parts:\n            try:\n                res = getattr(res, part)\n            except AttributeError:\n                res = getattr(res.get(), part)\n    except (peewee.DoesNotExist, AttributeError):\n        return None\n    return res", "docstring": "Given a path like `related_record.related_record2.id`, this method\nwill be able to pull the value of ID from that object, returning None\nif it doesn't exist.\n\nArgs:\nobj (fleaker.db.Model):\nThe object to attempt to pull the value from\npath (str):\nThe path to follow to pull the value from\n\nReturns:\n(int|str|None):\nThe value at the end of the path. None if it doesn't exist at\nany point in the path.", "source": "codesearchnet"}
{"code": "def _ParseDistributedTrackingIdentifier(\n      self, parser_mediator, uuid_object, origin):\n    \n    if uuid_object.version == 1:\n      event_data = windows_events.WindowsDistributedLinkTrackingEventData(\n          uuid_object, origin)\n      date_time = dfdatetime_uuid_time.UUIDTime(timestamp=uuid_object.time)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_CREATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    return '{{{0!s}}}'.format(uuid_object)", "docstring": "Extracts data from a Distributed Tracking identifier.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nuuid_object (uuid.UUID): UUID of the Distributed Tracking identifier.\norigin (str): origin of the event (event source).\n\nReturns:\nstr: UUID string of the Distributed Tracking identifier.", "source": "juraj-google-style"}
{"code": "def set_peer_link(self, value=None, default=False, disable=False):\n    return self._configure_mlag('peer-link', value, default, disable)", "docstring": "Configures the mlag peer-link value\n\nArgs:\nvalue (str): The value to configure the peer-link\ndefault (bool): Configures the peer-link using the\ndefault keyword\ndisable (bool): Negates the peer-link using the no keyword\n\nReturns:\nbool: Returns True if the commands complete successfully", "source": "codesearchnet"}
{"code": "def prepare_data_index(self, silence_percentage, unknown_percentage, wanted_words, validation_percentage, testing_percentage):\n    random.seed(RANDOM_SEED)\n    wanted_words_index = {}\n    for index, wanted_word in enumerate(wanted_words):\n        wanted_words_index[wanted_word] = index + 2\n    self.data_index = {'validation': [], 'testing': [], 'training': []}\n    unknown_index = {'validation': [], 'testing': [], 'training': []}\n    all_words = {}\n    search_path = os.path.join(self.data_dir, '*', '*.wav')\n    for wav_path in gfile.Glob(search_path):\n        _, word = os.path.split(os.path.dirname(wav_path))\n        word = word.lower()\n        if word == BACKGROUND_NOISE_DIR_NAME:\n            continue\n        all_words[word] = True\n        set_index = which_set(wav_path, validation_percentage, testing_percentage)\n        if word in wanted_words_index:\n            self.data_index[set_index].append({'label': word, 'file': wav_path})\n        else:\n            unknown_index[set_index].append({'label': word, 'file': wav_path})\n    if not all_words:\n        raise Exception('No .wavs found at ' + search_path)\n    for index, wanted_word in enumerate(wanted_words):\n        if wanted_word not in all_words:\n            raise Exception('Expected to find ' + wanted_word + ' in labels but only found ' + ', '.join(all_words.keys()))\n    silence_wav_path = self.data_index['training'][0]['file']\n    for set_index in ['validation', 'testing', 'training']:\n        set_size = len(self.data_index[set_index])\n        silence_size = int(math.ceil(set_size * silence_percentage / 100))\n        for _ in range(silence_size):\n            self.data_index[set_index].append({'label': SILENCE_LABEL, 'file': silence_wav_path})\n        random.shuffle(unknown_index[set_index])\n        unknown_size = int(math.ceil(set_size * unknown_percentage / 100))\n        self.data_index[set_index].extend(unknown_index[set_index][:unknown_size])\n    for set_index in ['validation', 'testing', 'training']:\n        random.shuffle(self.data_index[set_index])\n    self.words_list = prepare_words_list(wanted_words)\n    self.word_to_index = {}\n    for word in all_words:\n        if word in wanted_words_index:\n            self.word_to_index[word] = wanted_words_index[word]\n        else:\n            self.word_to_index[word] = UNKNOWN_WORD_INDEX\n    self.word_to_index[SILENCE_LABEL] = SILENCE_INDEX", "docstring": "Prepares a list of the samples organized by set and label.\n\nThe training loop needs a list of all the available data, organized by\nwhich partition it should belong to, and with ground truth labels attached.\nThis function analyzes the folders below the `data_dir`, figures out the\nright\nlabels for each file based on the name of the subdirectory it belongs to,\nand uses a stable hash to assign it to a data set partition.\n\nArgs:\nsilence_percentage: How much of the resulting data should be background.\nunknown_percentage: How much should be audio outside the wanted classes.\nwanted_words: Labels of the classes we want to be able to recognize.\nvalidation_percentage: How much of the data set to use for validation.\ntesting_percentage: How much of the data set to use for testing.\n\nReturns:\nDictionary containing a list of file information for each set partition,\nand a lookup map for each class to determine its numeric index.\n\nRaises:\nException: If expected files are not found.", "source": "github-repos"}
{"code": "def create_image_streamer_client(self):\n    image_streamer = ImageStreamerClient(self.__image_streamer_ip, self.__connection.get_session_id(), self.__connection._apiVersion, self.__connection._sslBundle)\n    return image_streamer", "docstring": "Create the Image Streamer API Client.\n\nReturns:\nImageStreamerClient:", "source": "codesearchnet"}
{"code": "def zip_columns(columns):\n    \n    weld_obj = WeldObject(encoder_, decoder_)\n    column_vars = []\n    for column in columns:\n        col_var = weld_obj.update(column)\n        if isinstance(column, WeldObject):\n            col_var = column.obj_id\n            weld_obj.dependencies[col_var] = column\n        column_vars.append(col_var)\n\n    arrays = \", \".join(column_vars)\n\n    weld_template = \n    weld_obj.weld_code = weld_template % {\n        \"array\": arrays,\n    }\n\n    return weld_obj", "docstring": "Zip together multiple columns.\n\nArgs:\ncolumns (WeldObject / Numpy.ndarray): lust of columns\n\nReturns:\nA WeldObject representing this computation", "source": "juraj-google-style"}
{"code": "def CalculateHashes(self, base_path_specs, output_writer):\n    for base_path_spec in base_path_specs:\n        file_system = resolver.Resolver.OpenFileSystem(base_path_spec)\n        file_entry = resolver.Resolver.OpenFileEntry(base_path_spec)\n        if (file_entry is None):\n            logging.warning('Unable to open base path specification:\\n{0:s}'.format(base_path_spec.comparable))\n            continue\n        self._CalculateHashesFileEntry(file_system, file_entry, '', output_writer)", "docstring": "Recursive calculates hashes starting with the base path specification.\n\nArgs:\nbase_path_specs (list[dfvfs.PathSpec]): source path specification.\noutput_writer (StdoutWriter): output writer.", "source": "codesearchnet"}
{"code": "def recipe_dcm_to_bigquery(config, auth_read, auth_write, account, report_id, report_name, dataset, table, is_incremental_load):\n    dcm(config, {'auth': auth_read, 'report': {'account': account, 'report_id': report_id, 'name': report_name}, 'out': {'bigquery': {'auth': auth_write, 'dataset': dataset, 'table': table, 'header': True, 'is_incremental_load': is_incremental_load}}})", "docstring": "Move existing CM report into a BigQuery table.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nauth_write (authentication) - Credentials used for writing data.\naccount (integer) - CM network id.\nreport_id (integer) - CM report id, empty if using name .\nreport_name (string) - CM report name, empty if using id instead.\ndataset (string) - Dataset to be written to in BigQuery.\ntable (string) - Table to be written to in BigQuery.\nis_incremental_load (boolean) - Clear data in destination table during this report's time period, then append report data to existing table.", "source": "github-repos"}
{"code": "def to_numpy(self, dtype=None, copy=False):\n        \n        return self._default_to_pandas(\"to_numpy\", dtype=dtype, copy=copy)", "docstring": "Convert the DataFrame to a NumPy array.\n\nArgs:\ndtype: The dtype to pass to numpy.asarray()\ncopy: Whether to ensure that the returned value is a not a view on another\narray.\n\nReturns:\nA numpy array.", "source": "juraj-google-style"}
{"code": "def create_default_views(self, create_datastore_views=False):\n        \n        \n        package = deepcopy(self.data)\n        if self.resources:\n            package['resources'] = self._convert_hdxobjects(self.resources)\n\n        data = {'package': package, 'create_datastore_views': create_datastore_views}\n        self._write_to_hdx('create_default_views', data, 'package')", "docstring": "Create default resource views for all resources in dataset\n\nArgs:\ncreate_datastore_views (bool): Whether to try to create resource views that point to the datastore\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def reorder_resource_views(self, resource_views):\n        \n        \n        if not isinstance(resource_views, list):\n            raise HDXError('ResourceViews should be a list!')\n        ids = list()\n        for resource_view in resource_views:\n            if isinstance(resource_view, str):\n                resource_view_id = resource_view\n            else:\n                resource_view_id = resource_view['id']\n            if is_valid_uuid(resource_view_id) is False:\n                raise HDXError('%s is not a valid resource view id!' % resource_view)\n            ids.append(resource_view_id)\n        _, result = self._read_from_hdx('resource view', self.data['id'], 'id',\n                                        ResourceView.actions()['reorder'], order=ids)", "docstring": "Order resource views in resource.\n\nArgs:\nresource_views (List[Union[ResourceView,Dict,str]]): A list of either resource view ids or resource views metadata from ResourceView objects or dictionaries\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def incr(self, counter_name, delta=1):\n    self._state.counters_map.increment(counter_name, delta)", "docstring": "Changes counter by delta.\n\nArgs:\ncounter_name: the name of the counter to change. str.\ndelta: int.", "source": "codesearchnet"}
{"code": "def in_sorted(values, value):\n    \n    index = bisect.bisect_left(values, value)\n    if index >= len(values):\n        return False\n\n    return values[index] == value", "docstring": "Checks if a value is in a sorted list.\n\nUses the :mod:`bisect` builtin to find the insertion point for\n``value``.\n\nArgs:\nvalues (List[int]): Integers sorted in ascending order.\nvalue (int): Value to check if contained in ``values``.\n\nReturns:\nbool: Indicating if the value is contained.", "source": "juraj-google-style"}
{"code": "def configure_bigchaindb(command):\n\n    @functools.wraps(command)\n    def configure(args):\n        config_from_cmdline = None\n        try:\n            if (args.log_level is not None):\n                config_from_cmdline = {'log': {'level_console': args.log_level, 'level_logfile': args.log_level}, 'server': {'loglevel': args.log_level}}\n        except AttributeError:\n            pass\n        bigchaindb.config_utils.autoconfigure(filename=args.config, config=config_from_cmdline, force=True)\n        command(args)\n    return configure", "docstring": "Decorator to be used by command line functions, such that the\nconfiguration of bigchaindb is performed before the execution of\nthe command.\n\nArgs:\ncommand: The command to decorate.\n\nReturns:\nThe command wrapper function.", "source": "codesearchnet"}
{"code": "def _contains_op_with_name_and_attribute(self, nodes: Iterable[node_def_pb2.NodeDef], op_name: str, attr_name: str, attr_val: _AttrValType, node_name: str='') -> bool:\n\n    def match_node_name(name):\n        if not node_name:\n            return True\n        compiled_regex = re.compile(node_name)\n        match = re.fullmatch(compiled_regex, name)\n        return match is not None\n    return any((node.attr.get(attr_name) == attr_val for node in nodes if node.op == op_name and match_node_name(node.name)))", "docstring": "Determine whether there is a node whose operation name matches `op_name`.\n\nIf `attr_name` is given, additionally check if the `attr_val` matches with\nthe attribute value of the op.\n\nArgs:\nnodes: Iterable of NodeDefs.\nop_name: Name of the op to match.\nattr_name: Name of the attribute of the op to match.\nattr_val: Value of the attr_name to check.\nnode_name: Name of the node to match. Accepts regex2 format.\n\nReturns:\nTrue if there exists a node whose name matches `op_name` and 'attr_val' if\n'attr_name' is given.", "source": "github-repos"}
{"code": "def unparse(node, indentation=None, include_encoding_marker=True):\n    del indentation\n    if not isinstance(node, (list, tuple)):\n        node = (node,)\n    codes = []\n    if include_encoding_marker:\n        codes.append('\n    for n in node:\n        if isinstance(n, gast.AST):\n            ast_n = gast.gast_to_ast(n)\n        else:\n            ast_n = n\n        if astunparse is ast:\n            ast.fix_missing_locations(ast_n)\n        codes.append(astunparse.unparse(ast_n).strip())\n    return '\\n'.join(codes)", "docstring": "Returns the source code of given AST.\n\nArgs:\nnode: The code to compile, as an AST object.\nindentation: Unused, deprecated. The returning code will always be indented\nat 4 spaces.\ninclude_encoding_marker: Bool, whether to include a comment on the first\nline to explicitly specify UTF-8 encoding.\n\nReturns:\ncode: The source code generated from the AST object\nsource_mapping: A mapping between the user and AutoGraph generated code.", "source": "github-repos"}
{"code": "def has_implicit_access_to_dashboard(user, obj):\n    request = get_request_or_stub()\n    decoded_jwt = get_decoded_jwt_from_request(request)\n    return request_user_has_implicit_access_via_jwt(decoded_jwt, ENTERPRISE_DASHBOARD_ADMIN_ROLE)", "docstring": "Check that if request user has implicit access to `ENTERPRISE_DASHBOARD_ADMIN_ROLE` feature role.\n\nReturns:\nboolean: whether the request user has access or not", "source": "codesearchnet"}
{"code": "def max_steps_per_epoch():\n    return _MAX_STEPS_PER_EPOCH", "docstring": "Get the maximum number of steps for any call to fit/evaluate/predict.\n\nRetrieves the limit on the number of epochs set by\n`keras.config.set_max_steps_per_epoch` or the `KERAS_MAX_STEPS_PER_EPOCH`\nenvironment variable.\n\nArgs:\nmax_epochs: The integer limit on the number of epochs or `None`. If\n`None`, no limit is applied.", "source": "github-repos"}
{"code": "def to_qasm(self,\n                header: Optional[str] = None,\n                precision: int = 10,\n                qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n                ) -> str:\n        \n        return str(self._to_qasm_output(header, precision, qubit_order))", "docstring": "Returns QASM equivalent to the circuit.\n\nArgs:\nheader: A multi-line string that is placed in a comment at the top\nof the QASM. Defaults to a cirq version specifier.\nprecision: Number of digits to use when representing numbers.\nqubit_order: Determines how qubits are ordered in the QASM\nregister.", "source": "juraj-google-style"}
{"code": "def _decode_filename(base_filename, problem_name, decode_hp):\n    if (decode_hp.shards > 1):\n        base_filename = _add_shard_to_filename(base_filename, decode_hp)\n    if ('beam{beam}.alpha{alpha}.decodes'.format(beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha)) in base_filename):\n        return base_filename\n    else:\n        return '{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes'.format(base=base_filename, model=FLAGS.model, hp=FLAGS.hparams_set, problem=problem_name, beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha))", "docstring": "Generates decode filename.\n\nArgs:\nbase_filename: A string, base of the decode filename.\nproblem_name: A string, name of the problem.\ndecode_hp: HParams for decoding.\n\nReturns:\nA string, produced decode filename.", "source": "codesearchnet"}
{"code": "def cipher(self):\n    if (self.offset is False):\n        self.offset = randrange(5, 25)\n        logging.info('Random offset selected: {0}'.format(self.offset))\n    logging.debug('Offset set: {0}'.format(self.offset))\n    ciphered_message_list = list(self.message)\n    for (i, letter) in enumerate(ciphered_message_list):\n        if letter.isalpha():\n            if letter.isupper():\n                alphabet = [character.upper() for character in self.alphabet]\n            else:\n                alphabet = self.alphabet\n            logging.debug('Letter: {0}'.format(letter))\n            logging.debug('Alphabet: {0}'.format(alphabet))\n            value = alphabet.index(letter)\n            cipher_value = (value + self.offset)\n            if ((cipher_value > 25) or (cipher_value < 0)):\n                cipher_value = (cipher_value % 26)\n            logging.debug('Cipher value: {0}'.format(cipher_value))\n            ciphered_message_list[i] = alphabet[cipher_value]\n            logging.debug('Ciphered letter: {0}'.format(letter))\n    self.message = ''.join(ciphered_message_list)\n    return self.message", "docstring": "Applies the Caesar shift cipher.\n\nBased on the attributes of the object, applies the Caesar shift cipher\nto the message attribute. Accepts positive and negative integers as\noffsets.\n\nRequired attributes:\nmessage\noffset\n\nReturns:\nString with cipher applied.", "source": "codesearchnet"}
{"code": "def solve(A, b):\n    r\n    A = asarray(A, float)\n    b = asarray(b, float)\n    if A.shape[0] == 1:\n\n        with errstate(divide=\"ignore\"):\n            A_ = array([[1.0 / A[0, 0]]])\n\n        if not isfinite(A_[0, 0]):\n            raise LinAlgError(\"Division error.\")\n\n        return dot(A_, b)\n    elif A.shape[0] == 2:\n        a = A[0, 0]\n        b_ = A[0, 1]\n        c = A[1, 0]\n        d = A[1, 1]\n        A_ = array([[d, -b_], [-c, a]])\n\n        with errstate(divide=\"ignore\"):\n            A_ /= a * d - b_ * c\n\n        if not npy_all(isfinite(A_)):\n            raise LinAlgError(\"Division error.\")\n\n        return dot(A_, b)\n    return _solve(A, b)", "docstring": "r\"\"\"Solve for the linear equations :math:`\\mathrm A \\mathbf x = \\mathbf b`.\n\nArgs:\nA (array_like): Coefficient matrix.\nb (array_like): Ordinate values.\n\nReturns:\n:class:`numpy.ndarray`: Solution ``x``.", "source": "juraj-google-style"}
{"code": "class TFTopPLogitsWarper(TFLogitsWarper):\n\n    def __init__(self, top_p: float, filter_value: float=-float('Inf'), min_tokens_to_keep: int=1):\n        if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0):\n            raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}')\n        if not isinstance(min_tokens_to_keep, int) or min_tokens_to_keep < 1:\n            raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}')\n        self.top_p = top_p\n        self.filter_value = filter_value\n        self.min_tokens_to_keep = min_tokens_to_keep\n\n    def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:\n        topk_scores, topk_indices = tf.math.top_k(scores, scores.shape[-1])\n        mask_scores = tf.fill(scores.shape, self.filter_value)\n        cumulative_probs = tf.math.cumsum(stable_softmax(topk_scores, axis=-1), axis=-1)\n        score_mask = cumulative_probs < self.top_p\n        score_mask = tf.concat((tf.ones([score_mask.shape[0], 1], dtype=tf.bool), score_mask[:, :-1]), axis=-1)\n        score_mask = tf.concat((tf.ones([score_mask.shape[0], self.min_tokens_to_keep], dtype=tf.bool), score_mask[:, self.min_tokens_to_keep:]), axis=-1)\n        topk_next_scores = tf.where(score_mask, topk_scores, mask_scores)\n        scatter_rows = tf.tile(tf.expand_dims(tf.range(topk_indices.shape[0]), axis=-1), [1, topk_indices.shape[-1]])\n        scatter_indices = tf.stack((scatter_rows, topk_indices), axis=-1)\n        next_scores = tf.scatter_nd(scatter_indices, topk_next_scores, shape=topk_next_scores.shape)\n        return next_scores", "docstring": "[`TFLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to <= prob_cut_off.\n\nArgs:\ntop_p (`float`):\nIf set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or\nhigher are kept for generation.\nfilter_value (`float`, *optional*, defaults to -inf):\nAll filtered values will be set to this float value.\nmin_tokens_to_keep (`int`, *optional*, defaults to 1):\nMinimum number of tokens that cannot be filtered.", "source": "github-repos"}
{"code": "def get_function_from_signature(self, function_signature):\n        \n        return next((f for f in self.functions if f.full_name == function_signature), None)", "docstring": "Return a function from a signature\nArgs:\nfunction_signature (str): signature of the function (without return statement)\nReturns:\nFunction", "source": "juraj-google-style"}
{"code": "def has_values(o):\n    try:\n        next(o)\n        return True\n    except StopIteration:\n        return False", "docstring": "Converts iterator to a boolean.\n\nDestroys iterator but returns True if at least one value is present.\n\nArgs:\n* o: An iterator instance.\n\nReturns:\n* True if at least one instance or False if none.", "source": "github-repos"}
{"code": "def _to_components(self, value):\n    raise NotImplementedError('%s._to_components()' % type(self).__name__)", "docstring": "Encodes `value` as a nested structure of `Tensor` or `CompositeTensor`.\n\nArgs:\nvalue: A value compatible with this `TypeSpec`.  (Caller is responsible\nfor ensuring compatibility.)\n\nReturns:\nA nested structure of `tf.Tensor` or `tf.CompositeTensor` compatible with\n`self._component_specs`, which can be used to reconstruct `value`.", "source": "github-repos"}
{"code": "def store(self, store=None, usage='both', mech=None, overwrite=False, set_default=False):\n    if (store is None):\n        if (rcred_rfc5588 is None):\n            raise NotImplementedError('Your GSSAPI implementation does not have support for RFC 5588')\n        return rcred_rfc5588.store_cred(self, usage, mech, overwrite, set_default)\n    else:\n        if (rcred_cred_store is None):\n            raise NotImplementedError('Your GSSAPI implementation does not have support for manipulating credential stores directly')\n        store = _encode_dict(store)\n        return rcred_cred_store.store_cred_into(store, self, usage, mech, overwrite, set_default)", "docstring": "Store these credentials into the given store\n\nThis method stores the current credentials into the specified\ncredentials store.  If the default store is used, support for\n:rfc:`5588` is required.  Otherwise, support for the credentials\nstore extension is required.\n\n:requires-ext:`rfc5588` or :requires-ext:`cred_store`\n\nArgs:\nstore (dict): the store into which to store the credentials,\nor None for the default store.\nusage (str): the usage to store the credentials with -- either\n'both', 'initiate', or 'accept'\nmech (OID): the :class:`MechType` to associate with the\nstored credentials\noverwrite (bool): whether or not to overwrite existing credentials\nstored with the same name, etc\nset_default (bool): whether or not to set these credentials as\nthe default credentials for the given store.\n\nReturns:\nStoreCredResult: the results of the credential storing operation\n\nRaises:\nGSSError\nExpiredCredentialsError\nMissingCredentialsError\nOperationUnavailableError\nDuplicateCredentialsElementError", "source": "codesearchnet"}
{"code": "def get_name_servers(self, id_or_uri):\n    uri = (self._client.build_uri(id_or_uri) + '/nameServers')\n    return self._client.get(uri)", "docstring": "Gets the named servers for an interconnect.\n\nArgs:\nid_or_uri:  Can be either the interconnect id or the interconnect uri.\n\nReturns:\ndict: the name servers for an interconnect.", "source": "codesearchnet"}
{"code": "def user_lists(self, username, member_type='USER'):\n    return self.client.service.getUserLists(username, member_type, self.proxy_id)", "docstring": "Look up all the lists that the user is a member of.\n\nArgs:\nusername (str): The MIT username of the user\nmember_type(str): The type of user, \"USER\" or \"STRING\"\n\nReturns:\nlist of strings: names of the lists that this user is a member of", "source": "codesearchnet"}
{"code": "def dumps(messages):\n    \n    serialized_messages = []\n    try:\n        for message in messages:\n            message_dict = message._dump()\n            serialized_messages.append(message_dict)\n    except AttributeError:\n        _log.error(\"Improper object for messages serialization.\")\n        raise TypeError(\"Message have to be instance of Message class or subclass.\")\n\n    return json.dumps(serialized_messages, sort_keys=True)", "docstring": "Serialize messages to a JSON formatted str\n\nArgs:\nmessages (list): The list of messages to serialize. Each message in\nthe messages is subclass of Messge.\n\nReturns:\nstr: Serialized messages.\n\nRaises:\nTypeError: If at least one message is not instance of Message class or subclass.", "source": "juraj-google-style"}
{"code": "def decode(value, strip=False):\n    \n    if value is None:\n        return None\n    if isinstance(value, bytes) and not isinstance(value, unicode):\n        value = value.decode(\"utf-8\")\n    if strip:\n        return unicode(value).strip()\n    return unicode(value)", "docstring": "Python 2/3 friendly decoding of output.\n\nArgs:\nvalue (str | unicode | bytes | None): The value to decode.\nstrip (bool): If True, `strip()` the returned string. (Default value = False)\n\nReturns:\nstr: Decoded value, if applicable.", "source": "juraj-google-style"}
{"code": "def set_hostname(hostname):\n    \n    with salt.utils.winapi.Com():\n        conn = wmi.WMI()\n        comp = conn.Win32_ComputerSystem()[0]\n    return comp.Rename(Name=hostname)", "docstring": "Set the hostname of the windows minion, requires a restart before this will\nbe updated.\n\n.. versionadded:: 2016.3.0\n\nArgs:\nhostname (str): The hostname to set\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt 'minion-id' system.set_hostname newhostname", "source": "juraj-google-style"}
{"code": "def train_model(samples_path: str, labels_path: str, model_state_output_path: str):\n    samples = pandas.read_csv(samples_path)\n    labels = pandas.read_csv(labels_path)\n    xgb = xgboost.XGBClassifier(max_depth=3)\n    xgb.fit(samples, labels)\n    xgb.save_model(model_state_output_path)\n    return xgb", "docstring": "Function to train the XGBoost model.\nArgs:\nsamples_path: path to csv file containing the training data\nlabels_path: path to csv file containing the labels for the training data\nmodel_state_output_path: Path to store the trained model", "source": "github-repos"}
{"code": "def get_function_args_defaults(f):\n    signature = get_signature(f)\n    parameter = inspect.Parameter\n    _SUPPORTED_ARG_TYPES = [parameter.POSITIONAL_ONLY, parameter.POSITIONAL_OR_KEYWORD]\n    args = [name for name, p in signature.parameters.items() if p.kind in _SUPPORTED_ARG_TYPES]\n    defaults = [p.default for p in signature.parameters.values() if p.kind in _SUPPORTED_ARG_TYPES and p.default is not p.empty]\n    return (args, defaults)", "docstring": "Returns the function arguments of a given function.\n\nReturns:\n(args: List[str], defaults: List[Any]). The first list names the\narguments of the method and the second one has the values of the default\narguments. This is similar to ``inspect.getfullargspec()``'s results, except\nit doesn't include bound arguments and may follow function wrappers.", "source": "github-repos"}
{"code": "def trainer_results(trainer, mean=0, std=1, title='', show=True, save=True):\n    return plot_network_results(network=trainer.module, ds=trainer.ds, mean=mean, std=std, title=title, show=show, save=save)", "docstring": "Plot the performance of the Network and SupervisedDataSet in a pybrain Trainer\n\nDataSet target and output values are denormalized before plotting with:\n\noutput * std + mean\n\nWhich inverses the normalization\n\n(output - mean) / std\n\nArgs:\ntrainer (Trainer): a pybrain Trainer instance containing a valid Network and DataSet\nds (DataSet): a pybrain DataSet to override the one contained in `trainer`.\nRequired if trainer is a Network instance rather than a Trainer instance.\nmean (float): mean of the denormalized dataset (default: 0)\nOnly affects the scale of the plot\nstd (float): std (standard deviation) of the denormalized dataset (default: 1)\ntitle (str): title to display on the plot.\n\nReturns:\n3-tuple: (trainer, mean, std), A trainer/dataset along with denormalization info", "source": "codesearchnet"}
{"code": "def run_experiment(hparams):\n    estimator = train_and_maybe_evaluate(hparams)\n    schema = taxi.read_schema(hparams.schema_file)\n    tf_transform_output = tft.TFTransformOutput(hparams.tf_transform_dir)\n    eval_model_dir = os.path.join(hparams.output_dir, EVAL_MODEL_DIR)\n    receiver_fn = lambda: model.eval_input_receiver_fn(tf_transform_output, schema)\n    tfma.export.export_eval_savedmodel(estimator=estimator, export_dir_base=eval_model_dir, eval_input_receiver_fn=receiver_fn)", "docstring": "Train the model then export it for tf.model_analysis evaluation.\n\nArgs:\nhparams: Holds hyperparameters used to train the model as name/value pairs.", "source": "github-repos"}
{"code": "def loadnetcdf(filename, copy=True):\n    \n    filename = str(Path(filename).expanduser())\n\n    if copy:\n        dataarray = xr.open_dataarray(filename).copy()\n    else:\n        dataarray = xr.open_dataarray(filename, chunks={})\n\n    if dataarray.name is None:\n        dataarray.name = filename.rstrip('.nc')\n\n    for key, val in dataarray.coords.items():\n        if val.dtype.kind == 'S':\n            dataarray[key] = val.astype('U')\n        elif val.dtype == np.int32:\n            dataarray[key] = val.astype('i8')\n\n    return dataarray", "docstring": "Load a dataarray from a NetCDF file.\n\nArgs:\nfilename (str): Filename (*.nc).\ncopy (bool): If True, dataarray is copied in memory. Default is True.\n\nReturns:\ndataarray (xarray.DataArray): Loaded dataarray.", "source": "juraj-google-style"}
{"code": "def load_data_split(proc_data_dir):\n    ds_train = Dataset.load(path.join(proc_data_dir, 'train.bin'))\n    ds_val = Dataset.load(path.join(proc_data_dir, 'val.bin'))\n    ds_test = Dataset.load(path.join(proc_data_dir, 'test.bin'))\n    return (ds_train, ds_val, ds_test)", "docstring": "Loads a split dataset\n\nArgs:\nproc_data_dir: Directory with the split and processed data\n\nReturns:\n(Training Data, Validation Data, Test Data)", "source": "codesearchnet"}
{"code": "def _prefix_from_ip_int(self, ip_int):\n        \n        prefixlen = self._max_prefixlen\n        while prefixlen:\n            if ip_int & 1:\n                break\n            ip_int >>= 1\n            prefixlen -= 1\n\n        if ip_int == (1 << prefixlen) - 1:\n            return prefixlen\n        else:\n            raise NetmaskValueError('Bit pattern does not match /1*0*/')", "docstring": "Return prefix length from a bitwise netmask.\n\nArgs:\nip_int: An integer, the netmask in expanded bitwise format.\n\nReturns:\nAn integer, the prefix length.\n\nRaises:\nNetmaskValueError: If the input is not a valid netmask.", "source": "juraj-google-style"}
{"code": "def set_cookie(self, key, value, domain=None, path='/', secure=False,\n                   httponly=True):\n        \n        self._cookies[key] = value\n        if domain:\n            self._cookies[key]['domain'] = domain\n        if path:\n            self._cookies[key]['path'] = path\n        if secure:\n            self._cookies[key]['secure'] = secure\n        if httponly:\n            self._cookies[key]['httponly'] = httponly", "docstring": "Set a cookie.\n\nArgs:\nkey (:obj:`str`): Cookie name\nvalue (:obj:`str`): Cookie value\ndomain (:obj:`str`): Cookie domain\npath (:obj:`str`): Cookie value\nsecure (:obj:`bool`): True if secure, False otherwise\nhttponly (:obj:`bool`): True if it's a HTTP only cookie, False\notherwise", "source": "juraj-google-style"}
{"code": "def set_json(self, obj, status=HttpStatusCodes.HTTP_200):\n    obj = json.dumps(obj, sort_keys=True, default=(lambda x: str(x)))\n    self.set_status(status)\n    self.set_header(HttpResponseHeaders.CONTENT_TYPE, 'application/json')\n    self.set_content(obj)", "docstring": "Helper method to set a JSON response.\n\nArgs:\nobj (:obj:`object`): JSON serializable object\nstatus (:obj:`str`, optional): Status code of the response", "source": "codesearchnet"}
{"code": "def get_2d_local_memory_v2(x, query_shape, memory_flange):\n    (_, height, width, depth_x) = common_layers.shape_list(x)\n    paddings = [[0, 0], [memory_flange[0], memory_flange[0]], [memory_flange[1], memory_flange[1]], [0, 0]]\n    padded_x = tf.pad(x, paddings)\n    padded_x.set_shape([None, (height + (2 * memory_flange[0])), (width + (2 * memory_flange[1])), depth_x])\n    num_h_memory_blocks = ((height \n    num_w_memory_blocks = ((width \n    x_memory_blocks = _extract_blocks(padded_x, query_shape[0], query_shape[1])\n    x_width_blocks = tf.split(x_memory_blocks, num_w_memory_blocks, 2)\n    x_left_width = tf.concat(x_width_blocks[:(num_w_memory_blocks - 1)], axis=2)\n    x_right_width = tf.concat(x_width_blocks[1:], axis=2)\n    x_memory_blocks = tf.concat([x_left_width, x_right_width], axis=4)\n    x_height_blocks = tf.split(x_memory_blocks, num_h_memory_blocks, 1)\n    x_top_height = tf.concat(x_height_blocks[:(num_h_memory_blocks - 1)], axis=1)\n    x_bottom_height = tf.concat(x_height_blocks[1:], axis=1)\n    x = tf.concat([x_top_height, x_bottom_height], axis=3)\n    return x", "docstring": "Gathering memory blocks around query blocks. flange is half of query .\n\nOnly works if memory flanges are half of query sizes.\n\nArgs:\nx: a [batch, height, width, depth tensor]\nquery_shape: 2-d integer list of query shape\nmemory_flange: 2-d integer list of memory flanges\n\nReturns:\nx: A [batch, num_h_blocks, num_w_blocks,\nquery_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]]\ntensor.", "source": "codesearchnet"}
{"code": "def files_delete(self, *, id: str, **kwargs) -> SlackResponse:\n    kwargs.update({'id': id})\n    return self.api_call('files.delete', json=kwargs)", "docstring": "Deletes a file.\n\nArgs:\nid (str): The file id. e.g. 'F1234467890'", "source": "codesearchnet"}
{"code": "def list(self, name=None, all=False, filters=None):\n    resp = self.client.api.images(name=name, all=all, filters=filters)\n    return [self.get(r['Id']) for r in resp]", "docstring": "List images on the server.\n\nArgs:\nname (str): Only show images belonging to the repository ``name``\nall (bool): Show intermediate image layers. By default, these are\nfiltered out.\nfilters (dict): Filters to be processed on the image list.\nAvailable filters:\n- ``dangling`` (bool)\n- ``label`` (str): format either ``key`` or ``key=value``\n\nReturns:\n(list of :py:class:`Image`): The images.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def _SetFieldType(self, field_proto, field_desc, package, scope):\n    \n    if field_proto.type_name:\n      desc = self._GetTypeFromScope(package, field_proto.type_name, scope)\n    else:\n      desc = None\n\n    if not field_proto.HasField('type'):\n      if isinstance(desc, descriptor.Descriptor):\n        field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE\n      else:\n        field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM\n\n    field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType(\n        field_proto.type)\n\n    if (field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE\n        or field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP):\n      field_desc.message_type = desc\n\n    if field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:\n      field_desc.enum_type = desc\n\n    if field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n      field_desc.has_default_value = False\n      field_desc.default_value = []\n    elif field_proto.HasField('default_value'):\n      field_desc.has_default_value = True\n      if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or\n          field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT):\n        field_desc.default_value = float(field_proto.default_value)\n      elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING:\n        field_desc.default_value = field_proto.default_value\n      elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL:\n        field_desc.default_value = field_proto.default_value.lower() == 'true'\n      elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:\n        field_desc.default_value = field_desc.enum_type.values_by_name[\n            field_proto.default_value].number\n      elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES:\n        field_desc.default_value = text_encoding.CUnescape(\n            field_proto.default_value)\n      else:\n        \n        field_desc.default_value = int(field_proto.default_value)\n    else:\n      field_desc.has_default_value = False\n      if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or\n          field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT):\n        field_desc.default_value = 0.0\n      elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING:\n        field_desc.default_value = u''\n      elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL:\n        field_desc.default_value = False\n      elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:\n        field_desc.default_value = field_desc.enum_type.values[0].number\n      elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES:\n        field_desc.default_value = b''\n      else:\n        \n        field_desc.default_value = 0\n\n    field_desc.type = field_proto.type", "docstring": "Sets the field's type, cpp_type, message_type and enum_type.\n\nArgs:\nfield_proto: Data about the field in proto format.\nfield_desc: The descriptor to modiy.\npackage: The package the field's container is in.\nscope: Enclosing scope of available types.", "source": "juraj-google-style"}
{"code": "def _create_all_weights(self, var_list):\n    _ = self.iterations\n    self._create_hypers()\n    self._create_slots(var_list)", "docstring": "Creates all weights, including iterations, hyperparameters and slot vars.\n\nThis will add newly created variables to `optimizer.weights`.\n\nNew variables are only created when this method is called the first time, or\nwhen called with different variables in the var_list.\n\nArgs:\nvar_list: list or tuple of `Variable` objects that will be minimized\nusing this optimizer.", "source": "github-repos"}
{"code": "def serialize(data):\n    return rapidjson.dumps(data, skipkeys=False, ensure_ascii=False, sort_keys=True)", "docstring": "Serialize a dict into a JSON formatted string.\n\nThis function enforces rules like the separator and order of keys.\nThis ensures that all dicts are serialized in the same way.\n\nThis is specially important for hashing data. We need to make sure that\neveryone serializes their data in the same way so that we do not have\nhash mismatches for the same structure due to serialization\ndifferences.\n\nArgs:\ndata (dict): dict to serialize\n\nReturns:\nstr: JSON formatted string", "source": "codesearchnet"}
{"code": "def _process_image_id(self):\n    try:\n        image_info = self.image_id.strip().split(':')\n        self.image_publisher = image_info[0]\n        self.image_offer = image_info[1]\n        self.image_sku = image_info[2]\n        self.image_version = image_info[3]\n    except Exception:\n        self.image_publisher = None", "docstring": "Split image id into component values.\n\nExample: SUSE:SLES:12-SP3:2018.01.04\nPublisher:Offer:Sku:Version\n\nRaises:\nIf image_id is not a valid format.", "source": "codesearchnet"}
{"code": "def _init_index(root_dir, schema, index_name):\n    index_dir = os.path.join(root_dir, index_name)\n    try:\n        if (not os.path.exists(index_dir)):\n            os.makedirs(index_dir)\n            return (create_in(index_dir, schema), index_dir)\n        else:\n            return (open_dir(index_dir), index_dir)\n    except Exception as e:\n        logger.error(\"Init error: failed to open search index at: '{}': {} \".format(index_dir, e))\n        raise", "docstring": "Creates new index or opens existing.\n\nArgs:\nroot_dir (str): root dir where to find or create index.\nschema (whoosh.fields.Schema): schema of the index to create or open.\nindex_name (str): name of the index.\n\nReturns:\ntuple ((whoosh.index.FileIndex, str)): first element is index, second is index directory.", "source": "codesearchnet"}
{"code": "def register_extension(self, ext_in, ext_out, force=False):\n        \n        if not force and (ext_in in self.__extensions.keys()):\n            self.log_warning(\"Extension %s already exist, ignore redefinition.\" % ext_in)\n            return\n\n        self.__extensions[ext_in] = ext_out", "docstring": "Add/register a file extension.\n\n\nArgs:\next_in (str): Extension of input files.\next_out (str): Extension of corresponding output files.\nforce (bool): If ``force`` is set to ``True``, simply overwrite existing extensions, otherwise do nothing.\nIf the ``logger`` is set, log a warning about the duplicate extension if ``force == False``.", "source": "juraj-google-style"}
{"code": "def MessageToJson(message, including_default_value_fields=False):\n  \n  js = _MessageToJsonObject(message, including_default_value_fields)\n  return json.dumps(js, indent=2)", "docstring": "Converts protobuf message to JSON format.\n\nArgs:\nmessage: The protocol buffers message instance to serialize.\nincluding_default_value_fields: If True, singular primitive fields,\nrepeated fields, and map fields will always be serialized.  If\nFalse, only serialize non-empty fields.  Singular message fields\nand oneof fields are not affected by this option.\n\nReturns:\nA string containing the JSON formatted protocol buffer message.", "source": "juraj-google-style"}
{"code": "def get_all_thread_ids(self):\n    json = self._get_json(self._url.thread_list())\n    return [thread['no'] for page in json for thread in page['threads']]", "docstring": "Return the ID of every thread on this board.\n\nReturns:\nlist of ints: List of IDs of every thread on this board.", "source": "codesearchnet"}
{"code": "def handle_upnp_error(self, xml_error):\n        \n\n        \n\n        \n        \n        \n        \n        \n        \n\n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n\n        \n        xml_error = xml_error.encode('utf-8')\n        error = XML.fromstring(xml_error)\n        log.debug(\"Error %s\", xml_error)\n        error_code = error.findtext(\n            '.\n        if error_code is not None:\n            description = self.UPNP_ERRORS.get(int(error_code), '')\n            raise SoCoUPnPException(\n                message='UPnP Error {} received: {} from {}'.format(\n                    error_code, description, self.soco.ip_address),\n                error_code=error_code,\n                error_description=description,\n                error_xml=xml_error\n            )\n        else:\n            \n            log.error(\"Unknown error received from %s\", self.soco.ip_address)\n            raise UnknownSoCoException(xml_error)", "docstring": "Disect a UPnP error, and raise an appropriate exception.\n\nArgs:\nxml_error (str):  a unicode string containing the body of the\nUPnP/SOAP Fault response. Raises an exception containing the\nerror code.", "source": "juraj-google-style"}
{"code": "def _FormatField(self, field):\n    \n    if self._FIELD_DELIMITER and isinstance(field, py2to3.STRING_TYPES):\n      return field.replace(self._FIELD_DELIMITER, ' ')\n    return field", "docstring": "Formats a field.\n\nArgs:\nfield (str): field value.\n\nReturns:\nstr: formatted field value.", "source": "juraj-google-style"}
{"code": "def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):\n    do_image_splitting = images_kwargs.get('do_image_splitting', None) or self.do_image_splitting\n    max_image_size = images_kwargs.get('max_image_size', None) or self.max_image_size\n    size = images_kwargs.get('size', None) or self.size\n    if do_image_splitting:\n        height, width = _resize_output_size_rescale_to_max_len(height, width, max_len=size['longest_edge'])\n        height, width = _resize_output_size_scale_below_upper_bound(height, width, max_len=4096)\n        aspect_ratio = width / height\n        if width >= height:\n            resized_width = math.ceil(width / max_image_size['longest_edge']) * max_image_size['longest_edge']\n            resized_height = int(width / aspect_ratio)\n            resized_height = math.ceil(height / max_image_size['longest_edge']) * max_image_size['longest_edge']\n        elif height > width:\n            resized_height = math.ceil(height / max_image_size['longest_edge']) * max_image_size['longest_edge']\n            resized_width = int(height * aspect_ratio)\n            resized_width = math.ceil(width / max_image_size['longest_edge']) * max_image_size['longest_edge']\n        max_height = max_width = max_image_size['longest_edge']\n        if resized_height > max_height or resized_width > max_width:\n            num_rows = math.ceil(resized_height / max_height)\n            num_cols = math.ceil(resized_width / max_width)\n            num_patches = num_rows * num_cols + 1\n    return num_patches", "docstring": "A utility that returns number of image patches for a given image size.\n\nArgs:\nheight (`int`):\nHeight of the input image.\nwidth (`int`):\nWidth of the input image.\nimages_kwargs (`dict`, *optional*)\nAny kwargs to override defaults of the image processor.\nReturns:\n`int`: Number of patches per image.", "source": "github-repos"}
{"code": "def rtruediv(self, other, axis=\"columns\", level=None, fill_value=None):\n        \n        return self._binary_op(\n            \"rtruediv\", other, axis=axis, level=level, fill_value=fill_value\n        )", "docstring": "Div this DataFrame against another DataFrame/Series/scalar.\n\nArgs:\nother: The object to use to apply the div against this.\naxis: The axis to div over.\nlevel: The Multilevel index level to apply div over.\nfill_value: The value to fill NaNs with.\n\nReturns:\nA new DataFrame with the rdiv applied.", "source": "juraj-google-style"}
{"code": "class TFIdeficsVisionEncoder(tf.keras.layers.Layer):\n\n    def __init__(self, config: IdeficsVisionConfig, **kwargs):\n        super().__init__(**kwargs)\n        self.config = config\n        self.layers = [TFIdeficsVisionEncoderLayer(config, name=f'layers.{i}') for i in range(config.num_hidden_layers)]\n        self.gradient_checkpointing = False\n\n    def call(self, inputs_embeds, attention_mask: Optional[tf.Tensor]=None, causal_attention_mask: Optional[tf.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=None) -> Union[Tuple, TFBaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for idx, encoder_layer in enumerate(self.layers):\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            if self.gradient_checkpointing and training:\n\n                def create_custom_forward(module):\n\n                    def custom_forward(*inputs):\n                        return module(*inputs, output_attentions)\n                    return custom_forward\n                layer_outputs = tf.recompute_grad(create_custom_forward(encoder_layer), hidden_states, attention_mask, causal_attention_mask)\n            else:\n                layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)\n\n    def build(self, input_shape=None):\n        if self.built:\n            return\n        self.built = True\n        if getattr(self, 'layers', None) is not None:\n            for layer in self.layers:\n                with tf.name_scope(layer.name):\n                    layer.build(None)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`TFIdeficsVisionEncoderLayer`].\n\nArgs:\nconfig: IdeficsVisionConfig", "source": "github-repos"}
{"code": "def fit_transform(self, tables=None, transformer_dict=None, transformer_list=None, missing=None):\n    if (missing is None):\n        missing = self.missing\n    else:\n        self.missing = missing\n        warnings.warn(DEPRECATION_MESSAGE.format('fit_transform'), DeprecationWarning)\n    transformed = {}\n    if (tables is None):\n        tables = self.table_dict\n    if ((transformer_dict is None) and (transformer_list is None)):\n        transformer_dict = self.transformer_dict\n    for table_name in tables:\n        (table, table_meta) = tables[table_name]\n        transformed_table = self.fit_transform_table(table, table_meta, transformer_dict, transformer_list)\n        transformed[table_name] = transformed_table\n    return transformed", "docstring": "Create, apply and store the specified transformers for the given tables.\n\nArgs:\ntables(dict):   Mapping of table names to `tuple` where each tuple is on the form\n(`pandas.DataFrame`, `dict`). The `DataFrame` contains the table data\nand the `dict` the corresponding meta information.\nIf not specified, the tables will be retrieved using the meta_file.\n\ntransformer_dict(dict):     Mapping  `tuple(str, str)` -> `str` where the tuple is\n(table_name, column_name).\n\ntransformer_list(list):     List of transformers to use. Overrides the transformers in\nthe meta_file.\n\nmissing(bool):      Wheter or not use NullTransformer to handle missing values.\n\nReturns:\ndict: Map from `str` (table_names) to `pandas.DataFrame` (transformed data).", "source": "codesearchnet"}
{"code": "def FindMessageTypeByName(self, full_name):\n    full_name = _NormalizeFullyQualifiedName(full_name)\n    if (full_name not in self._descriptors):\n        self.FindFileContainingSymbol(full_name)\n    return self._descriptors[full_name]", "docstring": "Loads the named descriptor from the pool.\n\nArgs:\nfull_name: The full name of the descriptor to load.\n\nReturns:\nThe descriptor for the named type.", "source": "codesearchnet"}
{"code": "def Trim(lst, limit):\n    limit = max(0, limit)\n    clipping = lst[limit:]\n    del lst[limit:]\n    return clipping", "docstring": "Trims a given list so that it is not longer than given limit.\n\nArgs:\nlst: A list to trim.\nlimit: A maximum number of elements in the list after trimming.\n\nReturns:\nA suffix of the input list that was trimmed.", "source": "codesearchnet"}
{"code": "def notify(self, notices):\n    tmpl_html = get_template('required_tags_notice.html')\n    tmpl_text = get_template('required_tags_notice.txt')\n    for (recipient, data) in list(notices.items()):\n        body_html = tmpl_html.render(data=data)\n        body_text = tmpl_text.render(data=data)\n        send_notification(subsystem=self.ns, recipients=[recipient], subject=self.email_subject, body_html=body_html, body_text=body_text)", "docstring": "Send notifications to the recipients provided\n\nArgs:\nnotices (:obj:`dict` of `str`: `list`): A dictionary mapping notification messages to the recipient.\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def completely_parse_reader(parser: Parser[(Input, Output)], reader: Reader[Input]) -> Result[Output]:\n    result = (parser << eof).consume(reader)\n    if isinstance(result, Continue):\n        return Success(result.value)\n    else:\n        used = set()\n        unique_expected = []\n        for expected_lambda in result.expected:\n            expected = expected_lambda()\n            if (expected not in used):\n                used.add(expected)\n                unique_expected.append(expected)\n        return Failure(result.farthest.expected_error(' or '.join(unique_expected)))", "docstring": "Consume reader and return Success only on complete consumption.\n\nThis is a helper function for ``parse`` methods, which return ``Success``\nwhen the input is completely consumed and ``Failure`` with an appropriate\nmessage otherwise.\n\nArgs:\nparser: The parser doing the consuming\nreader: The input being consumed\n\nReturns:\nA parsing ``Result``", "source": "codesearchnet"}
{"code": "def supports_card_actions(channel_id: str, button_cnt: int = 100) -> bool:\n        \n\n        max_actions = {\n            Channels.facebook: 3,\n            Channels.skype: 3,\n            Channels.ms_teams: 3,\n            Channels.line: 99,\n            Channels.slack: 100,\n            Channels.emulator: 100,\n            Channels.direct_line: 100,\n            Channels.webchat: 100,\n            Channels.cortana: 100,\n        }\n        return button_cnt <= max_actions[channel_id] if channel_id in max_actions else False", "docstring": "Determine if a number of Card Actions are supported by a Channel.\n\nArgs:\nchannel_id (str): The Channel to check if the Card Actions are supported in.\nbutton_cnt (int, optional): Defaults to 100. The number of Card Actions to check for the Channel.\n\nReturns:\nbool: True if the Channel supports the button_cnt total Card Actions, False if the Channel does not support that number of Card Actions.", "source": "juraj-google-style"}
{"code": "def download_listing(self, file: Optional[IO], duration_timeout: Optional[float]=None) -> ListingResponse:\n    if (self._session_state != SessionState.directory_request_sent):\n        raise RuntimeError('File request not sent')\n    self._session_state = SessionState.file_request_sent\n    (yield from self.download(file=file, rewind=False, duration_timeout=duration_timeout))\n    try:\n        if (self._response.body.tell() == 0):\n            listings = ()\n        elif (self._listing_type == 'mlsd'):\n            self._response.body.seek(0)\n            machine_listings = wpull.protocol.ftp.util.parse_machine_listing(self._response.body.read().decode('utf-8', errors='surrogateescape'), convert=True, strict=False)\n            listings = list(wpull.protocol.ftp.util.machine_listings_to_file_entries(machine_listings))\n        else:\n            self._response.body.seek(0)\n            file = io.TextIOWrapper(self._response.body, encoding='utf-8', errors='surrogateescape')\n            listing_parser = ListingParser(file=file)\n            listings = list(listing_parser.parse_input())\n            _logger.debug('Listing detected as %s', listing_parser.type)\n            file.detach()\n    except (ListingError, ValueError) as error:\n        raise ProtocolError(*error.args) from error\n    self._response.files = listings\n    self._response.body.seek(0)\n    self._session_state = SessionState.response_received\n    return self._response", "docstring": "Read file listings.\n\nArgs:\nfile: A file object or asyncio stream.\nduration_timeout: Maximum time in seconds of which the\nentire file must be read.\n\nReturns:\nA Response populated the file listings\n\nBe sure to call :meth:`start_file_listing` first.\n\nCoroutine.", "source": "codesearchnet"}
{"code": "def filter_queryset(self, request, term, queryset=None, **dependent_fields):\n    if (queryset is None):\n        queryset = self.get_queryset()\n    search_fields = self.get_search_fields()\n    select = Q()\n    term = term.replace('\\t', ' ')\n    term = term.replace('\\n', ' ')\n    for t in [t for t in term.split(' ') if (not (t == ''))]:\n        select &= reduce((lambda x, y: (x | Q(**{y: t}))), search_fields, Q(**{search_fields[0]: t}))\n    if dependent_fields:\n        select &= Q(**dependent_fields)\n    return queryset.filter(select).distinct()", "docstring": "Return QuerySet filtered by search_fields matching the passed term.\n\nArgs:\nrequest (django.http.request.HttpRequest): The request is being passed from\nthe JSON view and can be used to dynamically alter the response queryset.\nterm (str): Search term\nqueryset (django.db.models.query.QuerySet): QuerySet to select choices from.\n**dependent_fields: Dependent fields and their values. If you want to inherit\nfrom ModelSelect2Mixin and later call to this method, be sure to pop\neverything from keyword arguments that is not a dependent field.\n\nReturns:\nQuerySet: Filtered QuerySet", "source": "codesearchnet"}
{"code": "def mlir_convert(options, saved_model_dir, input_tensors, output_tensors, **kwargs):\n    test_params = kwargs.get('test_params', {})\n    extra_convert_options = kwargs.get('extra_convert_options', zip_test_utils.ExtraConvertOptions())\n    tflite_model = None\n    log = ''\n    signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY\n    converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir, [signature_key])\n    converter.allow_custom_ops = extra_convert_options.allow_custom_ops\n    converter.experimental_new_quantizer = options.mlir_quantizer\n    if options.make_tf_ptq_tests:\n        if options.hlo_aware_conversion:\n            tf_quantization_mode = 'DEFAULT'\n        else:\n            tf_quantization_mode = 'LEGACY_INTEGER'\n        converter._experimental_tf_quantization_mode = tf_quantization_mode\n    if options.run_with_flex:\n        converter.target_spec.supported_ops = set([lite.OpsSet.TFLITE_BUILTINS, lite.OpsSet.SELECT_TF_OPS])\n    if options.enable_dynamic_update_slice:\n        converter._experimental_enable_dynamic_update_slice = True\n    converter.unfold_batchmatmul = options.unfold_batchmatmul\n    if test_params.get('dynamic_range_quantize', False):\n        converter.optimizations = [lite.Optimize.DEFAULT]\n    if options.experimental_low_bit_qat:\n        converter._experimental_low_bit_qat = True\n    if test_params.get('fully_quantize', False):\n        converter.optimizations = [lite.Optimize.DEFAULT]\n        min_value, max_value = test_params.get('input_range', (-1, 1))\n\n        def representative_dataset(input_tensors):\n            calibration_inputs = {}\n            for name, shape, dtype in input_tensors:\n                if shape:\n                    dims = [1 if dim.value is None else dim.value for dim in shape.dims]\n                    calibration_inputs[name] = np.random.uniform(min_value, max_value, tuple(dims)).astype(dtype.as_numpy_dtype)\n            return calibration_inputs\n\n        def representative_dataset_gen():\n            for _ in range(100):\n                yield representative_dataset(input_tensors)\n        if test_params.get('quant_16x8', False):\n            converter.target_spec.supported_ops = [lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8]\n        else:\n            converter.target_spec.supported_ops = [lite.OpsSet.TFLITE_BUILTINS_INT8]\n        converter.representative_dataset = representative_dataset_gen\n        if extra_convert_options.inference_input_type:\n            converter.inference_input_type = extra_convert_options.inference_input_type\n        if extra_convert_options.inference_output_type:\n            converter.inference_output_type = extra_convert_options.inference_output_type\n    try:\n        tflite_model = converter.convert()\n        if options.expected_ops_in_converted_model:\n            ops_list = tflite_test_util.get_ops_list(tflite_model)\n            for expected_op in options.expected_ops_in_converted_model:\n                if expected_op not in ops_list:\n                    tflite_model = None\n                    raise ValueError('{} op not found in the converted model'.format(expected_op))\n    except Exception as e:\n        log = str(e)\n    return (tflite_model, log)", "docstring": "Convert a saved model into a tflite model with MLIR-based conversion.\n\nArgs:\noptions: A lite.testing.generate_examples_lib.Options instance.\nsaved_model_dir: Path to the saved model.\ninput_tensors: List of input tensor tuples `(name, shape, type)`.\noutput_tensors: List of output tensors (names).\n**kwargs: Extra parameters.\n\nReturns:\noutput tflite model, log_txt from conversion\nor None, log_txt if it did not convert properly.", "source": "github-repos"}
{"code": "async def verify_scriptworker_task(chain, obj):\n    errors = []\n    if (obj.worker_impl != 'scriptworker'):\n        errors.append('{} {} must be run from scriptworker!'.format(obj.name, obj.task_id))\n    raise_on_errors(errors)", "docstring": "Verify the signing trust object.\n\nCurrently the only check is to make sure it was run on a scriptworker.\n\nArgs:\nchain (ChainOfTrust): the chain we're operating on\nobj (ChainOfTrust or LinkOfTrust): the trust object for the signing task.", "source": "codesearchnet"}
{"code": "def html_page_for_render_items(bundle, docs_json, render_items, title, template=None, template_variables={}):\n    if (title is None):\n        title = DEFAULT_TITLE\n    (bokeh_js, bokeh_css) = bundle\n    json_id = make_id()\n    json = escape(serialize_json(docs_json), quote=False)\n    json = wrap_in_script_tag(json, 'application/json', json_id)\n    script = wrap_in_script_tag(script_for_render_items(json_id, render_items))\n    context = template_variables.copy()\n    context.update(dict(title=title, bokeh_js=bokeh_js, bokeh_css=bokeh_css, plot_script=(json + script), docs=render_items, base=FILE, macros=MACROS))\n    if (len(render_items) == 1):\n        context['doc'] = context['docs'][0]\n        context['roots'] = context['doc'].roots\n    context['plot_div'] = '\\n'.join((div_for_render_item(item) for item in render_items))\n    if (template is None):\n        template = FILE\n    elif isinstance(template, string_types):\n        template = _env.from_string(('{% extends base %}\\n' + template))\n    html = template.render(context)\n    return encode_utf8(html)", "docstring": "Render an HTML page from a template and Bokeh render items.\n\nArgs:\nbundle (tuple):\na tuple containing (bokehjs, bokehcss)\n\ndocs_json (JSON-like):\nSerialized Bokeh Document\n\nrender_items (RenderItems)\nSpecific items to render from the document and where\n\ntitle (str or None)\nA title for the HTML page. If None, DEFAULT_TITLE is used\n\ntemplate (str or Template or None, optional) :\nA Template to be used for the HTML page. If None, FILE is used.\n\ntemplate_variables (dict, optional):\nAny Additional variables to pass to the template\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def image_format(value):\n    if (value.image.format.upper() not in constants.ALLOWED_IMAGE_FORMATS):\n        raise ValidationError(MESSAGE_INVALID_IMAGE_FORMAT)", "docstring": "Confirms that the uploaded image is of supported format.\n\nArgs:\nvalue (File): The file with an `image` property containing the image\n\nRaises:\ndjango.forms.ValidationError", "source": "codesearchnet"}
{"code": "def GetString(self):\n    string_list = []\n    string_list.append('Report generated from: {0:s}'.format(self.plugin_name))\n    time_compiled = getattr(self, 'time_compiled', 0)\n    if time_compiled:\n        time_compiled = timelib.Timestamp.CopyToIsoFormat(time_compiled)\n        string_list.append('Generated on: {0:s}'.format(time_compiled))\n    filter_string = getattr(self, 'filter_string', '')\n    if filter_string:\n        string_list.append('Filter String: {0:s}'.format(filter_string))\n    string_list.append('')\n    string_list.append('Report text:')\n    string_list.append(self.text)\n    return '\\n'.join(string_list)", "docstring": "Retrieves a string representation of the report.\n\nReturns:\nstr: string representation of the report.", "source": "codesearchnet"}
{"code": "def top_1(x, reduced_dim, dtype=tf.int32, name=None):\n  \n  reduced_dim = convert_to_dimension(reduced_dim)\n  with tf.name_scope(name, default_name=\"top_1\"):\n    max_val = reduce_max(x, reduced_dim=reduced_dim)\n    is_max = to_float(equal(x, max_val))\n    pos = mtf_range(x.mesh, reduced_dim, tf.float32)\n    ret = reduce_max(is_max * pos, reduced_dim=reduced_dim)\n    ret = cast(ret, dtype)\n    return ret, max_val", "docstring": "Argmax and Max.\n\nArgs:\nx: a Tensor\nreduced_dim: a Dimension in x.shape.dims\ndtype: a tf.dtype (for the output)\nname: an optional string\nReturns:\nindices: a Tensor with given dtype\nvalues: optional Tensor equal to mtf.reduce_max(x, reduced_dim=reduced_dim)", "source": "juraj-google-style"}
{"code": "def create_package(name, data, package_cls=None):\n    \n    from rez.package_maker__ import PackageMaker\n    maker = PackageMaker(name, data, package_cls=package_cls)\n    return maker.get_package()", "docstring": "Create a package given package data.\n\nArgs:\nname (str): Package name.\ndata (dict): Package data. Must conform to `package_maker.package_schema`.\n\nReturns:\n`Package` object.", "source": "juraj-google-style"}
{"code": "def save(tiff_filename, numpy_data):\n    tiff_filename = os.path.expanduser(tiff_filename)\n    if (type(numpy_data) is str):\n        fp = open(png_filename, 'wb')\n        fp.write(numpy_data)\n        fp.close()\n        return png_filename\n    try:\n        img = tiff.imsave(tiff_filename, numpy_data)\n    except Exception as e:\n        raise ValueError('Could not save TIFF file {0}.'.format(tiff_filename))\n    return tiff_filename", "docstring": "Export a numpy array to a TIFF file.\n\nArguments:\ntiff_filename:  A filename to which to save the TIFF data\nnumpy_data:     The numpy array to save to TIFF\n\nReturns:\nString. The expanded filename that now holds the TIFF data", "source": "codesearchnet"}
{"code": "def open_port(upnp, internal_port, external_start_port=None):\n    \n    if external_start_port is None:\n        external_start_port = internal_port\n\n    if upnp is None:\n        return False\n\n    def register(internal, external):\n        \n        mapping = upnp.getspecificportmapping(external, 'UDP')\n        if mapping is not None:\n            \n            lanaddr, internal_mapped, name, _, _ = mapping\n\n            is_valid_mapping = (\n                lanaddr == upnp.lanaddr and\n                name == RAIDEN_IDENTIFICATOR and\n                internal_mapped == internal\n            )\n            is_not_our_mapping = (\n                internal_mapped != internal and\n                name != RAIDEN_IDENTIFICATOR\n            )\n            is_previous_mapping = (\n                internal_mapped != internal and\n                name == RAIDEN_IDENTIFICATOR and\n                lanaddr == upnp.lanaddr\n            )\n            if is_valid_mapping:\n                log.debug(\n                    'keeping pre-existing portmapping',\n                    internal=internal,\n                    external=external,\n                    lanaddr=lanaddr,\n                )\n                return True\n            elif lanaddr != upnp.lanaddr:\n                \n                log.debug(\n                    'ignoring existing mapping for other IP',\n                    internal=internal,\n                    external=external,\n                    other_ip=lanaddr,\n                    our_ip=upnp.lanaddr,\n                )\n                return False\n            elif is_not_our_mapping:\n                log.debug(\n                    'ignoring existing mapping for other program',\n                    name=name,\n                )\n                \n                return False\n            elif is_previous_mapping:\n                \n                log.debug('releasing previous port mapping')\n                upnp.deleteportmapping(external, 'UDP')\n\n        log.debug('trying to create new port mapping', internal=internal, external=external)\n        return upnp.addportmapping(\n            external,\n            'UDP',\n            upnp.lanaddr,\n            internal,\n            RAIDEN_IDENTIFICATOR,\n            '',\n        )\n\n    external_port = external_start_port\n    success = register(internal_port, external_port)\n    while not success and external_port <= MAX_PORT:\n        external_port += 1\n        log.debug('trying', external=external_port)\n        success = register(internal_port, external_port)\n\n    if success:\n        return upnp.externalipaddress(), external_port\n    else:\n        log.error(\n            'could not register a port-mapping',\n            location='FIXME',\n        )\n        return False\n\n    return False", "docstring": "Open a port for the raiden service (listening at `internal_port`) through\nUPnP.\n\nArgs:\ninternal_port (int): the target port of the raiden service\nexternal_start_port (int): query for an external port starting here\n(default: internal_port)\nReturns:\nexternal_ip_address, external_port (tuple(str, int)): if successful or None", "source": "juraj-google-style"}
{"code": "def _escaped_token_to_subtoken_ids(self, escaped_token):\n    return [self._subtoken_string_to_id[subtoken] for subtoken in self._escaped_token_to_subtoken_strings(escaped_token)]", "docstring": "Converts an escaped token string to a list of subtoken IDs.\n\nArgs:\nescaped_token: An escaped token as a unicode string.\nReturns:\nA list of subtoken IDs as integers.", "source": "codesearchnet"}
{"code": "def _error_messages(self, driver_id):\n        \n        assert isinstance(driver_id, ray.DriverID)\n        message = self.redis_client.execute_command(\n            \"RAY.TABLE_LOOKUP\", ray.gcs_utils.TablePrefix.ERROR_INFO, \"\",\n            driver_id.binary())\n\n        \n        if message is None:\n            return []\n\n        gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(\n            message, 0)\n        error_messages = []\n        for i in range(gcs_entries.EntriesLength()):\n            error_data = ray.gcs_utils.ErrorTableData.GetRootAsErrorTableData(\n                gcs_entries.Entries(i), 0)\n            assert driver_id.binary() == error_data.DriverId()\n            error_message = {\n                \"type\": decode(error_data.Type()),\n                \"message\": decode(error_data.ErrorMessage()),\n                \"timestamp\": error_data.Timestamp(),\n            }\n            error_messages.append(error_message)\n        return error_messages", "docstring": "Get the error messages for a specific driver.\n\nArgs:\ndriver_id: The ID of the driver to get the errors for.\n\nReturns:\nA list of the error messages for this driver.", "source": "juraj-google-style"}
{"code": "def __init__(self, code=None, contract_properties=0, name=None, version=None, author=None, email=None,\n                 description=None):\n        \n        self.Code = code\n        self.ContractProperties = contract_properties\n        self.Name = name\n        self.CodeVersion = version\n        self.Author = author\n        self.Email = email\n        self.Description = description", "docstring": "Create an instance.\n\nArgs:\ncode (neo.Core.FunctionCode):\ncontract_properties (neo.SmartContract.ContractParameterType): contract type.\nname (bytes):\nversion (bytes):\nauthor (bytes):\nemail (bytes):\ndescription (bytes):", "source": "juraj-google-style"}
{"code": "def _GetKeysDefaultEmpty(self, top_level, keys, depth=1):\n    keys = set(keys)\n    match = {}\n    if (depth == 1):\n        for key in keys:\n            value = top_level.get(key, None)\n            if (value is not None):\n                match[key] = value\n    else:\n        for (_, parsed_key, parsed_value) in plist_interface.RecurseKey(top_level, depth=depth):\n            if (parsed_key in keys):\n                match[parsed_key] = parsed_value\n                if (set(match.keys()) == keys):\n                    return match\n    return match", "docstring": "Retrieves plist keys, defaulting to empty values.\n\nArgs:\ntop_level (plistlib._InternalDict): top level plist object.\nkeys (set[str]): names of keys that should be returned.\ndepth (int): depth within the plist, where 1 is top level.\n\nReturns:\ndict[str, str]: values of the requested keys.", "source": "codesearchnet"}
{"code": "def determine_final_config(config_module):\n    config = Config(DEFAULT_LIBRARY_RC_ADDITIONS, DEFAULT_LIBRARY_RC_REPLACEMENTS, DEFAULT_TEST_RC_ADDITIONS, DEFAULT_TEST_RC_REPLACEMENTS)\n    for field in config._fields:\n        if hasattr(config_module, field):\n            config = config._replace(**{field: getattr(config_module, field)})\n    return config", "docstring": "Determines the final additions and replacements.\n\nCombines the config module with the defaults.\n\nArgs:\nconfig_module: The loaded local configuration module.\n\nReturns:\nConfig: the final configuration.", "source": "codesearchnet"}
{"code": "def read(self, filename, binary_mode=False, size=None, offset=None):\n        \n        mode = \"rb\" if binary_mode else \"r\"\n        with io.open(filename, mode) as f:\n            if offset is not None:\n                f.seek(offset)\n            if size is not None:\n                return f.read(size)\n            else:\n                return f.read()", "docstring": "Reads contents of a file to a string.\n\nArgs:\nfilename: string, a path\nbinary_mode: bool, read as binary if True, otherwise text\nsize: int, number of bytes or characters to read, otherwise\nread all the contents of the file from the offset\noffset: int, offset into file to read from, otherwise read\nfrom the very beginning\n\nReturns:\nSubset of the contents of the file as a string or bytes.", "source": "juraj-google-style"}
{"code": "def event(self, name, **kwargs):\n        \n        group_obj = Event(name, **kwargs)\n        return self._group(group_obj)", "docstring": "Add Event data to Batch object.\n\nArgs:\nname (str): The name for this Group.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nevent_date (str, kwargs): The event datetime expression for this Group.\nstatus (str, kwargs): The status for this Group.\nxid (str, kwargs): The external id for this Group.\n\nReturns:\nobj: An instance of Event.", "source": "juraj-google-style"}
{"code": "def emit_flow_end(self, name: str, timestamp: int, pid: int, tid: int, flow_id: int) -> None:\n    event = self._create_event('t', 'DataFlow', name, pid, tid, timestamp)\n    event['id'] = flow_id\n    self._events.append(event)", "docstring": "Adds a flow end event to the trace.\n\nWhen matched with a flow start event (with the same 'flow_id') this will\ncause the trace viewer to draw an arrow between the start and end events.\n\nArgs:\nname:  The event name as a string.\ntimestamp:  The timestamp of this event as a long integer.\npid:  Identifier of the process generating this event as an integer.\ntid:  Identifier of the thread generating this event as an integer.\nflow_id: Identifier of the flow as an integer.", "source": "github-repos"}
{"code": "def get_rollout_from_id(self, rollout_id):\n    layer = self.rollout_id_map.get(rollout_id)\n    if layer:\n        return layer\n    self.logger.error(('Rollout with ID \"%s\" is not in datafile.' % rollout_id))\n    return None", "docstring": "Get rollout for the provided ID.\n\nArgs:\nrollout_id: ID of the rollout to be fetched.\n\nReturns:\nRollout corresponding to the provided ID.", "source": "codesearchnet"}
{"code": "def write_index(fn, index):\n    \n    with open(fn, \"wb\") as o_file:\n        o_file.write(_CHECK_STRING)\n        o_file.write(zlib.compress(bytes(\n            index.to_csv(None, index=False, encoding=\"utf-8\"),\n            encoding=\"utf-8\",\n        )))", "docstring": "Writes the index to file.\n\nArgs:\nfn (str): the name of the file that will contain the index.\nindex (pandas.DataFrame): the index.", "source": "juraj-google-style"}
{"code": "def createURL(self, word, mode='phonefy'):\n    try:\n        return self.modes[mode]['url'].format(placeholder=urllib.pathname2url(word))\n    except:\n        if (mode == 'base'):\n            if (word[0] == '/'):\n                return ((self.baseURL + word[1:]), word)\n            else:\n                return (self.baseURL + word)\n        else:\n            try:\n                return self.url[mode].replace((('<' + mode) + '>'), urllib.pathname2url(word))\n            except:\n                pass\n    return None", "docstring": "Method to create the URL replacing the word in the appropriate URL.\n\nArgs:\n-----\nword: Word to be searched.\nmode: Mode to be executed.\n\nReturn:\n-------\nThe URL to be queried.", "source": "codesearchnet"}
{"code": "def AddDatastore(self, urn):\n    if (urn not in self._datastores):\n        self._datastores.add(urn)\n        return True\n    return False", "docstring": "Adds a datastore URN as a source.\n\nArgs:\nurn: an RDF URN value of the datastore.\n\nReturns:\nTrue if the datastore is not an already existing source.", "source": "codesearchnet"}
{"code": "def substitute(self, var_map, cont=False, tag=None):\n    return self.apply(substitute, var_map=var_map, cont=cont, tag=tag)", "docstring": "Substitute sub-expressions both on the lhs and rhs\n\nArgs:\nvar_map (dict): Dictionary with entries of the form\n``{expr: substitution}``", "source": "codesearchnet"}
{"code": "def remove_pad(x, pad_remover, mode):\n  \n  \n  x = expert_utils.flatten_all_but_last(x)\n\n  \n  if mode != ModeKeys.PREDICT:\n    \n    \n    \n    x = pad_remover.remove(x)\n\n  x = tf.expand_dims(x, axis=0)  \n  return x", "docstring": "Remove padding by concatenating all dimension into one.\n\nArgs:\nx (tf.Tensor): input of shape [batch_size, length, depth]\npad_remover (obj): a PadRemover object\nmode (ModeKeys): infer, train or eval. If inference, the padding remover is\nnot applied\n\nReturns:\ntf.Tensor of shape [1,length_nonpad,depth] where\nlength_nonpad <= batch_size*length", "source": "juraj-google-style"}
{"code": "def process_usufy(self, data):\n        \n        mode = \"usufy\"\n        info = []\n\n        try:\n            \n            verifier = self.modes.get(mode, {}).get(\"extra_fields\", {})\n            for field in verifier.keys():\n                regexp = verifier[field]\n                values = re.findall(regexp, data)\n\n                for val in values:\n                    aux = {}\n                    aux[\"type\"] = field\n                    aux[\"value\"] = val\n                    aux[\"attributes\"] = []\n                    if aux not in info:\n                        info.append(aux)\n        except AttributeError as e:\n            \n            for field in self.fieldsRegExp[mode].keys():\n                \n                try:\n                    \n                    regexp = self.fieldsRegExp[mode][field][\"start\"]+\"([^\\)]+)\"+self.fieldsRegExp[mode][field][\"end\"]\n\n                    tmp = re.findall(regexp, data)\n\n                    \n                    values = []\n                    for t in tmp:\n                        if self.fieldsRegExp[mode][field][\"end\"] in t:\n\n                            values.append(t.split(self.fieldsRegExp[mode][field][\"end\"])[0])\n                        else:\n                            values.append(t)\n                except:\n                    \n                    regexp = self.fieldsRegExp[mode][field]\n\n                    values = re.findall(regexp, data)\n\n                for val in values:\n                    aux = {}\n                    aux[\"type\"] = field\n                    aux[\"value\"] = val\n                    aux[\"attributes\"] = []\n                    if aux not in info:\n                        info.append(aux)\n        return info", "docstring": "Method to process and extract the entities of a usufy\n\nArgs:\n-----\ndata: The information from which the info will be extracted.\n\nReturn:\n-------\nA list of the entities found.", "source": "juraj-google-style"}
{"code": "def train_on_batch(self, data: List[Iterable], labels: Iterable[list]) -> None:\n    (X, Y) = self._transform_batch(data, labels)\n    self.model_.train_on_batch(X, Y)", "docstring": "Trains model on a single batch\n\nArgs:\ndata: a batch of word sequences\nlabels: a batch of correct tag sequences\nReturns:\nthe trained model", "source": "codesearchnet"}
{"code": "def ContainsIgnoreCase(self, value):\n    self._awql = self._CreateSingleValueCondition(value, 'CONTAINS_IGNORE_CASE')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"contains ignore case\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "codesearchnet"}
{"code": "def add_option(self, section, name, value):\n    if self._is_live():\n        raise RuntimeError('Submitted units cannot update their options')\n    option = {'section': section, 'name': name, 'value': value}\n    self._data['options'].append(option)\n    return True", "docstring": "Add an option to a section of the unit file\n\nArgs:\nsection (str): The name of the section, If it doesn't exist it will be created\nname (str): The name of the option to add\nvalue (str): The value of the option\n\nReturns:\nTrue: The item was added", "source": "codesearchnet"}
{"code": "def get_direct_band_gap_dict(self):\n    if self.is_metal():\n        raise ValueError('get_direct_band_gap_dict shouldonly be used with non-metals')\n    direct_gap_dict = {}\n    for (spin, v) in self.bands.items():\n        above = v[np.all((v > self.efermi), axis=1)]\n        min_above = np.min(above, axis=0)\n        below = v[np.all((v < self.efermi), axis=1)]\n        max_below = np.max(below, axis=0)\n        diff = (min_above - max_below)\n        kpoint_index = np.argmin(diff)\n        band_indices = [np.argmax(below[(:, kpoint_index)]), (np.argmin(above[(:, kpoint_index)]) + len(below))]\n        direct_gap_dict[spin] = {'value': diff[kpoint_index], 'kpoint_index': kpoint_index, 'band_indices': band_indices}\n    return direct_gap_dict", "docstring": "Returns a dictionary of information about the direct\nband gap\n\nReturns:\na dictionary of the band gaps indexed by spin\nalong with their band indices and k-point index", "source": "codesearchnet"}
{"code": "def _line_is_numpy_parameter_type(line_info):\n    line_stripped = line_info.remaining.strip()\n    if ':' in line_stripped:\n        previous_indent = line_info.previous.indentation\n        current_indent = line_info.indentation\n        if ':' in line_info.previous.line and current_indent > previous_indent:\n            return False\n        else:\n            return True\n    return False", "docstring": "Returns whether the line contains a numpy style parameter type definition.\n\nWe look for a line of the form:\nx : type\n\nAnd we have to exclude false positives on argument descriptions containing a\ncolon by checking the indentation of the line above.\n\nArgs:\nline_info: Information about the current line.\nReturns:\nTrue if the line is a numpy parameter type definition, False otherwise.", "source": "github-repos"}
{"code": "def participant_from_submission_path(submission_path):\n    basename = os.path.basename(submission_path)\n    file_ext = None\n    for e in ALLOWED_EXTENSIONS:\n        if basename.endswith(e):\n            file_ext = e\n            break\n    if (not file_ext):\n        raise ValueError(('Invalid submission path: ' + submission_path))\n    basename = basename[:(- len(file_ext))]\n    if basename.isdigit():\n        return {'team_id': int(basename)}\n    if basename.startswith('baseline_'):\n        return {'baseline_id': basename[len('baseline_'):]}\n    raise ValueError(('Invalid submission path: ' + submission_path))", "docstring": "Parses type of participant based on submission filename.\n\nArgs:\nsubmission_path: path to the submission in Google Cloud Storage\n\nReturns:\ndict with one element. Element key correspond to type of participant\n(team, baseline), element value is ID of the participant.\n\nRaises:\nValueError: is participant can't be determined based on submission path.", "source": "codesearchnet"}
{"code": "def properties(cls, with_bases=True):\n        \n        if with_bases:\n            return accumulate_from_superclasses(cls, \"__properties__\")\n        else:\n            return set(cls.__properties__)", "docstring": "Collect the names of properties on this class.\n\nThis method *optionally* traverses the class hierarchy and includes\nproperties defined on any parent classes.\n\nArgs:\nwith_bases (bool, optional) :\nWhether to include properties defined on parent classes in\nthe results. (default: True)\n\nReturns:\nset[str] : property names", "source": "juraj-google-style"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    if token_ids_1 is not None:\n        return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1)\n    return [1] + [0] * len(token_ids_0)", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def hinge(y_true, y_pred):\n    y_pred = ops.convert_to_tensor(y_pred)\n    y_true = ops.cast(y_true, dtype=y_pred.dtype)\n    y_true = ops.convert_to_tensor(y_true)\n    y_true = convert_binary_labels_to_hinge(y_true)\n    return ops.mean(ops.maximum(1.0 - y_true * y_pred, 0.0), axis=-1)", "docstring": "Computes the hinge loss between `y_true` & `y_pred`.\n\nFormula:\n\n```python\nloss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)\n```\n\nArgs:\ny_true: The ground truth values. `y_true` values are expected to be -1\nor 1. If binary (0 or 1) labels are provided they will be converted\nto -1 or 1 with shape = `[batch_size, d0, .. dN]`.\ny_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.\n\nReturns:\nHinge loss values with shape = `[batch_size, d0, .. dN-1]`.\n\nExample:\n\n>>> y_true = np.random.choice([-1, 1], size=(2, 3))\n>>> y_pred = np.random.random(size=(2, 3))\n>>> loss = keras.losses.hinge(y_true, y_pred)", "source": "github-repos"}
{"code": "def decode(self, targets, encoder_outputs, attention_bias):\n    with tf.name_scope('decode'):\n        decoder_inputs = self.embedding_softmax_layer(targets)\n        with tf.name_scope('shift_targets'):\n            decoder_inputs = tf.pad(decoder_inputs, [[0, 0], [1, 0], [0, 0]])[(:, :(- 1), :)]\n        with tf.name_scope('add_pos_encoding'):\n            length = tf.shape(decoder_inputs)[1]\n            decoder_inputs += model_utils.get_position_encoding(length, self.params.hidden_size)\n        if self.train:\n            mlperf_log.transformer_print(key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT, value=self.params.layer_postprocess_dropout)\n            decoder_inputs = tf.nn.dropout(decoder_inputs, (1 - self.params.layer_postprocess_dropout))\n        decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(length)\n        outputs = self.decoder_stack(decoder_inputs, encoder_outputs, decoder_self_attention_bias, attention_bias)\n        logits = self.embedding_softmax_layer.linear(outputs)\n        return logits", "docstring": "Generate logits for each value in the target sequence.\n\nArgs:\ntargets: target values for the output sequence.\nint tensor with shape [batch_size, target_length]\nencoder_outputs: continuous representation of input sequence.\nfloat tensor with shape [batch_size, input_length, hidden_size]\nattention_bias: float tensor with shape [batch_size, 1, 1, input_length]\n\nReturns:\nfloat32 tensor with shape [batch_size, target_length, vocab_size]", "source": "codesearchnet"}
{"code": "def _allocate_subnets(self, conf):\n    allocated_subnets = []\n    try:\n        for net_spec in conf.get('nets', {}).itervalues():\n            if (net_spec['type'] != 'nat'):\n                continue\n            gateway = net_spec.get('gw')\n            if gateway:\n                allocated_subnet = self._subnet_store.acquire(self.paths.uuid(), gateway)\n            else:\n                allocated_subnet = self._subnet_store.acquire(self.paths.uuid())\n                net_spec['gw'] = str(allocated_subnet.iter_hosts().next())\n            allocated_subnets.append(allocated_subnet)\n    except:\n        self._subnet_store.release(allocated_subnets)\n        raise\n    return (allocated_subnets, conf)", "docstring": "Allocate all the subnets needed by the given configuration spec\n\nArgs:\nconf (dict): Configuration spec where to get the nets definitions\nfrom\n\nReturns:\ntuple(list, dict): allocated subnets and modified conf", "source": "codesearchnet"}
{"code": "def pair_wise_dice_loss(inputs: Tensor, labels: Tensor) -> Tensor:\n    inputs = inputs.sigmoid().flatten(1)\n    numerator = 2 * torch.matmul(inputs, labels.T)\n    denominator = inputs.sum(-1)[:, None] + labels.sum(-1)[None, :]\n    loss = 1 - (numerator + 1) / (denominator + 1)\n    return loss", "docstring": "A pair wise version of the dice loss, see `dice_loss` for usage.\n\nArgs:\ninputs (`torch.Tensor`):\nA tensor representing a mask\nlabels (`torch.Tensor`):\nA tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs\n(0 for the negative class and 1 for the positive class).\n\nReturns:\n`torch.Tensor`: The computed loss between each pairs.", "source": "github-repos"}
{"code": "def unpack_rpc_payload(resp_format, payload):\n    code = _create_argcode(resp_format, payload)\n    return struct.unpack(code, payload)", "docstring": "Unpack an RPC payload according to resp_format.\n\nArgs:\nresp_format (str): a struct format code (without the <) for the\nparameter format for this RPC.  This format code may include the final\ncharacter V, which means that it expects a variable length bytearray.\npayload (bytes): The binary payload that should be unpacked.\n\nReturns:\nlist: A list of the unpacked payload items.", "source": "codesearchnet"}
{"code": "def compile_keywords(keywords):\n    \n    mdt = []\n    cz_keywords = []\n    en_keywords = []\n    for keyword in keywords:\n        keyword = keyword_to_info(keyword.encode(\"utf-8\"))\n\n        if not keyword:\n            continue\n\n        cz_keywords.append({\n            \"uid\": keyword[\"uid\"],\n            \"zahlavi\": keyword[\"zahlavi\"],\n            \"zdroj\": \"czenas\",\n        })\n\n        if keyword.get(\"mdt\"):\n            mdt.append({\n                \"mdt\": keyword[\"mdt\"],\n                \"mrf\": keyword[\"mrf\"],\n            })\n\n        angl_ekvivalent = keyword.get(\"angl_ekvivalent\")\n        if angl_ekvivalent:\n            en_keywords.append({\n                \"zahlavi\": angl_ekvivalent,\n                \"zdroj\": keyword.get(\"zdroj_angl_ekvivalentu\") or \"eczenas\",\n            })\n\n    return mdt, cz_keywords, en_keywords", "docstring": "Translate `keywords` to full keyword records as they are used in Aleph.\n\nReturns tuple with three lists, each of which is later used in different\npart of the MRC/MARC record.\n\nArgs:\nkeywords (list): List of keyword strings.\n\nReturns:\ntuple: (mdt_list, cz_keyword_list, en_keyword_list)", "source": "juraj-google-style"}
{"code": "def PushItem(self, item, block=True):\n    try:\n        self._queue.put(item, block=block)\n    except Queue.Full as exception:\n        raise errors.QueueFull(exception)", "docstring": "Pushes an item onto the queue.\n\nArgs:\nitem (object): item to add.\nblock (Optional[bool]): True to block the process when the queue is full.\n\nRaises:\nQueueFull: if the item could not be pushed the queue because it's full.", "source": "codesearchnet"}
{"code": "class FlaxBaseModelOutputWithPoolingAndNoAttention(ModelOutput):\n    last_hidden_state: Optional[jnp.ndarray] = None\n    pooler_output: Optional[jnp.ndarray] = None\n    hidden_states: Optional[Tuple[jnp.ndarray]] = None", "docstring": "Base class for model's outputs that also contains a pooling of the last hidden states.\n\nArgs:\nlast_hidden_state (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`):\nSequence of hidden-states at the output of the last layer of the model.\npooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`):\nLast layer hidden-state after a pooling operation on the spatial dimensions.\nhidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\nTuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one\nfor the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the\nmodel at the output of each layer plus the optional initial embedding outputs.", "source": "github-repos"}
{"code": "def download(branch=None, buildMod=False):\n    \n    gradlew = \"./gradlew\"\n    if os.name == 'nt':\n        gradlew = \"gradlew.bat\"\n\n    if branch is None:\n        branch = malmo_version\n\n    subprocess.check_call([\"git\", \"clone\", \"-b\", branch, \"https:\n\n    os.chdir(malmo_install_dir)\n    os.chdir(\"Minecraft\")\n    try:\n        \n        pathlib.Path(\"src/main/resources/version.properties\").write_text(\"malmomod.version={}\\n\".format(malmo_version))\n\n        \n        if buildMod:\n            subprocess.check_call([gradlew, \"setupDecompWorkspace\", \"build\", \"testClasses\", \"-x\", \"test\", \"--stacktrace\", \"-Pversion={}\"\n                .format(malmo_version)])\n\n        minecraft_dir = os.getcwd()\n    finally:\n        os.chdir(\"../..\")\n\n    if \"MALMO_XSD_PATH\" not in os.environ:\n        print(\"Please make sure you set the MALMO_XSD_PATH environment variable to \\\"{}/Schemas\\\"!\"\n                 .format(str(pathlib.Path(malmo_install_dir).absolute())))\n    return minecraft_dir", "docstring": "Download Malmo from github and optionaly build the Minecraft Mod.\nArgs:\nbranch: optional branch to clone. Default is release version.\nbuildMod: don't build the Mod unless build arg is given as True.\nReturns:\nThe path for the Malmo Minecraft mod.", "source": "juraj-google-style"}
{"code": "def search_globs(path, patterns):\n    \n    \n    for pattern in (p for p in patterns if p):\n        if pattern.startswith('/'):\n            \n            regex = fnmatch.translate(pattern[1:])\n            regex = regex.replace('\\\\Z', '')\n\n            temp_path = path[1:] if path.startswith('/') else path\n            m = re.search(regex, temp_path)\n\n            if m and m.start() == 0:\n                return True\n\n        else:\n            regex = fnmatch.translate(pattern)\n            regex = regex.replace('\\\\Z', '')\n\n            if re.search(regex, path):\n                return True\n\n    return False", "docstring": "Test whether the given *path* contains any patterns in *patterns*\n\nArgs:\npath (str):\nA file path to test for matches.\npatterns (list[str]):\nA list of glob string patterns to test against. If *path* matches\nany of those patters, it will return True.\n\nReturns:\nbool: **True** if the ``path`` matches any pattern in *patterns*.", "source": "juraj-google-style"}
{"code": "def save(self, vleaf, fpath, cleanup=False, format=None):\n    graph = self.create_graphviz_digraph(vleaf, format=format)\n    graph.render(fpath, cleanup=cleanup)", "docstring": "Save the graph to a given file path.\n\nArgs:\nvleaf (`nnabla.Variable`): End variable. All variables and functions which can be traversed from this variable are shown in the reuslt.\nfpath (`str`): The file path used to save.\ncleanup (`bool`): Clean up the source file after rendering. Default is False.\nformat (str):\nForce overwrite ``format`` (``'pdf', 'png', ...)``) configuration.", "source": "codesearchnet"}
{"code": "def set_consistent(self, consistent_config):\n        \n\n        \n        self.topology._add_job_control_plane()\n        self.oport.operator.consistent(consistent_config)\n        return self._make_placeable()", "docstring": "Indicates that the stream is the start of a consistent region.\n\nArgs:\nconsistent_config(consistent.ConsistentRegionConfig): the configuration of the consistent region.\n\nReturns:\nStream: Returns this stream.\n\n.. versionadded:: 1.11", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.ListVoices = channel.unary_unary(\n            \"/google.cloud.texttospeech.v1.TextToSpeech/ListVoices\",\n            request_serializer=google_dot_cloud_dot_texttospeech__v1_dot_proto_dot_cloud__tts__pb2.ListVoicesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_texttospeech__v1_dot_proto_dot_cloud__tts__pb2.ListVoicesResponse.FromString,\n        )\n        self.SynthesizeSpeech = channel.unary_unary(\n            \"/google.cloud.texttospeech.v1.TextToSpeech/SynthesizeSpeech\",\n            request_serializer=google_dot_cloud_dot_texttospeech__v1_dot_proto_dot_cloud__tts__pb2.SynthesizeSpeechRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_texttospeech__v1_dot_proto_dot_cloud__tts__pb2.SynthesizeSpeechResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def _get_course_content_from_ecommerce(course_id, site_code=None):\n    \n    api = get_ecommerce_client(site_code=site_code)\n    try:\n        api_response = api.courses(course_id).get()\n    except Exception:  \n        logger.exception(\n            'An error occurred while retrieving data for course run [%s] from the Catalog API.',\n            course_id,\n            exc_info=True\n        )\n        return {}\n\n    return {\n        'title': api_response.get('name'),\n        'verification_deadline': api_response.get('verification_deadline')\n    }", "docstring": "Get course information using the Ecommerce course api.\n\nIn case of error returns empty response.\nArguments:\ncourse_id (str): course key of the course\nsite_code (str): site code\n\nReturns:\ncourse information from Ecommerce", "source": "juraj-google-style"}
{"code": "def _inspect_summary_cache(self, cache, replica_id, step_num, output_stream, tensor_trace_order):\n\n    def _inspect_tensor(tensor):\n        \n        if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NAN_INF:\n            return cond.cond(math_ops.greater(tensor, 0.0), lambda: 'has NaNs/Infs!', lambda: 'has no NaNs or Infs.')\n        else:\n            return tensor\n    if not tensor_trace_order.traced_tensors:\n        logging.warn('Inspect mode has no tensors in the cache to check.')\n        return control_flow_ops.no_op\n    if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NAN_INF:\n        step_has_nan_or_inf = math_ops.greater(math_ops.reduce_sum(cache), 0.0)\n    else:\n        step_has_nan_or_inf = math_ops.reduce_any(gen_math_ops.logical_or(gen_math_ops.is_nan(cache), gen_math_ops.is_inf(cache)))\n    step_error_message = cond.cond(step_has_nan_or_inf, lambda: 'NaNs or Infs in the step!', lambda: 'No numerical issues have been found for the step.')\n    if self._parameters.collect_summary_per_core:\n        stats = ['\\n\\n', 'core:', replica_id, ',', 'step:', step_num, '-->', step_error_message, 'Printing tensors for mode:%s...' % self._parameters.trace_mode]\n    else:\n        stats = ['\\n\\n', 'step:', step_num, '-->', step_error_message, 'Printing tensors for mode:%s...' % self._parameters.trace_mode]\n    for tensor_name, cache_idx in sorted(tensor_trace_order.tensorname_to_cache_idx.items(), key=lambda item: item[1]):\n        if self._parameters.collect_summary_per_core:\n            stats.extend(['\\n', 'core:', replica_id, ',', 'step:', step_num, ',', tensor_name, '-->', _inspect_tensor(cache[cache_idx, 0])])\n        else:\n            stats.extend(['\\n', 'step:', step_num, ',', tensor_name, '-->', _inspect_tensor(cache[cache_idx, 0])])\n    return logging_ops.print_v2(*stats, summarize=-1, output_stream=output_stream)", "docstring": "Generates a print operation to print trace inspection.\n\nArgs:\ncache: Tensor storing the trace results for the step.\nreplica_id: Tensor storing the replica id of the running core.\nstep_num: Step number.\noutput_stream: Where to print the outputs, e.g., file path, or sys.stderr.\ntensor_trace_order: TensorTraceOrder object holding tensorname to id map.\n\nReturns:\nThe Op to flush the cache to file.", "source": "github-repos"}
{"code": "def create_sketch(self, name, description):\n    resource_url = '{0:s}/sketches/'.format(self.api_base_url)\n    form_data = {'name': name, 'description': description}\n    response = self.session.post(resource_url, json=form_data)\n    response_dict = response.json()\n    sketch_id = response_dict['objects'][0]['id']\n    return sketch_id", "docstring": "Create a new sketch with the specified name and description.\n\nArgs:\nname (str): Title of sketch\ndescription (str): Description of sketch\n\nReturns:\nint: ID of created sketch", "source": "codesearchnet"}
{"code": "def from_mass_fractions(cls, mass_fractions, formula=None):\n        \n        mass_fractions = process_wildcard(mass_fractions)\n        atomic_fractions = convert_mass_to_atomic_fractions(mass_fractions)\n        if not formula:\n            formula = generate_name(atomic_fractions)\n        return cls(cls._key, mass_fractions, atomic_fractions, formula)", "docstring": "Creates a composition from a mass fraction :class:`dict`.\n\nArgs:\nmass_fractions (dict): mass fraction :class:`dict`.\nThe keys are atomic numbers and the values weight fractions.\nWildcard are accepted, e.g. ``{5: '?', 25: 0.4}`` where boron\nwill get a mass fraction of 0.6.\nformula (str): optional chemical formula for the composition.\nIf ``None``, a formula will be generated for the composition.", "source": "juraj-google-style"}
{"code": "def ensure_proc_terminate(proc):\n    if isinstance(proc, list):\n        for p in proc:\n            ensure_proc_terminate(p)\n        return\n\n    def stop_proc_by_weak_ref(ref):\n        proc = ref()\n        if (proc is None):\n            return\n        if (not proc.is_alive()):\n            return\n        proc.terminate()\n        proc.join()\n    assert isinstance(proc, mp.Process)\n    atexit.register(stop_proc_by_weak_ref, weakref.ref(proc))", "docstring": "Make sure processes terminate when main process exit.\n\nArgs:\nproc (multiprocessing.Process or list)", "source": "codesearchnet"}
{"code": "def exclude_from_weight_decay(self, var_list=None, var_names=None):\n    if hasattr(self, '_built') and self._built:\n        raise ValueError('`exclude_from_weight_decay()` can only be configured before the optimizer is built.')\n    if var_list:\n        self._exclude_from_weight_decay = set((self._var_key(variable) for variable in var_list))\n    else:\n        self._exclude_from_weight_decay = set()\n    if var_names and len(var_names) > 0:\n        self._exclude_from_weight_decay_pattern = re.compile('|'.join(set(var_names)))\n    else:\n        self._exclude_from_weight_decay_pattern = None\n    self._exclude_from_weight_decay_cache = dict()", "docstring": "Exclude variables from weight decay.\n\nThis method must be called before the optimizer's `build` method is\ncalled. You can set specific variables to exclude out, or set a list of\nstrings as the anchor words, if any of which appear in a variable's\nname, then the variable is excluded.\n\nArgs:\nvar_list: A list of `Variable`s to exclude from weight decay.\nvar_names: A list of strings. If any string in `var_names` appear\nin the model variable's name, then this model variable is\nexcluded from weight decay. For example, `var_names=['bias']`\nexcludes all bias variables from weight decay.", "source": "github-repos"}
{"code": "def send_message_event(self, room_id, event_type, content, txn_id=None,\n                           timestamp=None):\n        \n        if not txn_id:\n            txn_id = self._make_txn_id()\n\n        path = \"/rooms/%s/send/%s/%s\" % (\n            quote(room_id), quote(event_type), quote(str(txn_id)),\n        )\n        params = {}\n        if timestamp:\n            params[\"ts\"] = timestamp\n        return self._send(\"PUT\", path, content, query_params=params)", "docstring": "Perform PUT /rooms/$room_id/send/$event_type\n\nArgs:\nroom_id (str): The room ID to send the message event in.\nevent_type (str): The event type to send.\ncontent (dict): The JSON content to send.\ntxn_id (int): Optional. The transaction ID to use.\ntimestamp (int): Set origin_server_ts (For application services only)", "source": "juraj-google-style"}
{"code": "def __init__(self, learning_rate, use_locking=False, name='GradientDescent'):\n    super(GradientDescentOptimizer, self).__init__(use_locking, name)\n    self._learning_rate = learning_rate\n    self._learning_rate_tensor = None", "docstring": "Construct a new gradient descent optimizer.\n\nArgs:\nlearning_rate: A Tensor or a floating point value.  The learning\nrate to use.\nuse_locking: If True use locks for update operations.\nname: Optional name prefix for the operations created when applying\ngradients. Defaults to \"GradientDescent\".\n\n@compatibility(eager)\nWhen eager execution is enabled, `learning_rate` can be a callable that\ntakes no arguments and returns the actual value to use. This can be useful\nfor changing these values across different invocations of optimizer\nfunctions.\n@end_compatibility", "source": "github-repos"}
{"code": "def __iadd__(self, values):\n\t\t\n\t\tself._check_type(values, '+=')\n\t\tself.extend(values)\n\t\treturn self", "docstring": "Add all values to the end of self.\n\nArgs:\nvalues (Iterable): Values to append\nRaises:\nValueError: If any values are already present", "source": "juraj-google-style"}
{"code": "def __init__(self, resolver_context, file_object=None):\n    \n    super(FileObjectIO, self).__init__(resolver_context)\n    self._file_object = file_object\n    self._file_object_set_in_init = bool(file_object)\n    self._size = None", "docstring": "Initializes a file-like object.\n\nArgs:\nresolver_context (Context): resolver context.\nfile_object (Optional[FileIO]): file-like object.", "source": "juraj-google-style"}
{"code": "def _DictAsString(result, verbose=False):\n    class_attrs = inspectutils.GetClassAttrsDict(result)\n    result_visible = {key: value for key, value in result.items() if completion.MemberVisible(result, key, value, class_attrs=class_attrs, verbose=verbose)}\n    if not result_visible:\n        return '{}'\n    longest_key = max((len(str(key)) for key in result_visible.keys()))\n    format_string = f'{{key:{longest_key + 1}s}} {{value}}'\n    lines = []\n    for key, value in result.items():\n        if completion.MemberVisible(result, key, value, class_attrs=class_attrs, verbose=verbose):\n            line = format_string.format(key=f'{key}:', value=_OneLineResult(value))\n            lines.append(line)\n    return '\\n'.join(lines)", "docstring": "Returns a dict as a string.\n\nArgs:\nresult: The dict to convert to a string\nverbose: Whether to include 'hidden' members, those keys starting with _.\nReturns:\nA string representing the dict", "source": "github-repos"}
{"code": "def latest(self, **kwargs):\n        \n        path = self._get_id_path('latest')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the most newly created TV show. This is a live response\nand will continuously change.\n\nArgs:\nlanguage: (optional) ISO 639 code.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def delete_device(self, auth_body, device_id):\n    content = {'auth': auth_body}\n    return self._send('DELETE', ('/devices/%s' % device_id), content=content)", "docstring": "Deletes the given device, and invalidates any access token associated with it.\n\nNOTE: This endpoint uses the User-Interactive Authentication API.\n\nArgs:\nauth_body (dict): Authentication params.\ndevice_id (str): The device ID of the device to delete.", "source": "codesearchnet"}
{"code": "def SelectFieldPrompt(field_name, context_str, *options):\n    option_format_str = '[ {} ] \"{}\"'\n    option_dict = {}\n    print(context_str)\n    print('Please select one of the following options for field \"{}\"'.format(field_name))\n    for (cnt, option) in enumerate(options):\n        option_dict['{}'.format((cnt + 1))] = option\n        if (not callable(option)):\n            print(option_format_str.format((cnt + 1), u(str(option))))\n        else:\n            print(option_format_str.format((cnt + 1), option.__name__))\n    choice = None\n    while (choice not in option_dict):\n        choice = input('option> ').strip()\n    new_value = option_dict[choice]\n    if callable(new_value):\n        return new_value()\n    else:\n        return new_value", "docstring": "Prompts user to pick from provided options.\n\nIt is possible to provide a function as an option although it is\nnot yet tested.  This could allow a user to be prompted to provide\ntheir own value rather than the listed options.\n\nArgs:\nfield_name (string): Name of the field.\ncontext_str (string): Printed to give the user context.\noptions: Variable arguments, should be vobject Components\nin a list. As retrieved from a vCard.contents dictionary.\n\nReturns:\nOne of the options passed in.  Ideally always a list.", "source": "codesearchnet"}
{"code": "def construct(cls, name, range=None):\n        \n        other = Requirement(None)\n        other.name_ = name\n        other.range_ = VersionRange() if range is None else range\n        return other", "docstring": "Create a requirement directly from an object name and VersionRange.\n\nArgs:\nname: Object name string.\nrange: VersionRange object. If None, an unversioned requirement is\ncreated.", "source": "juraj-google-style"}
{"code": "def destringize(self, string):\n        \n\n        m = segment_destr_pattern.match(string)\n        self.genome_id = int(m.group(1))\n        self.chr_id = int(m.group(2))\n        self.direction = m.group(3)\n        self.left = int(m.group(4))\n        self.right = int(m.group(5))", "docstring": "Get RNF values for this segment from its textual representation and\nsave them into this object.\n\nArgs:\nstring (str): Textual representation of a segment.", "source": "juraj-google-style"}
{"code": "def delete(self, invoice_id, **kwargs):\n        \n        url = \"{}/{}\".format(self.base_url, invoice_id)\n        return self.delete_url(url, {}, **kwargs)", "docstring": "Delete an invoice\nYou can delete an invoice which is in the draft state.\n\nArgs:\ninvoice_id : Id for delete the invoice\nReturns:\nThe response is always be an empty array like this - []", "source": "juraj-google-style"}
{"code": "def row_limits(self):\n    return self._row_splits[1:]", "docstring": "Returns the limit indices for rows in this row partition.\n\nThese indices specify where the values for each row end.\n`partition.row_limits()` is equal to `partition.row_splits()[:-1]`.\n\nReturns:\nA 1-D integer Tensor with shape `[self.nrows]`.\nThe returned tensor is nonnegative, and is sorted in ascending order.\n`self.row_limits()[-1] == self.nvals()`.", "source": "github-repos"}
{"code": "def get_create_agent(agent_kwargs):\n  \n\n  def create_agent(sess, environment, summary_writer=None):\n    \n    return BatchDQNAgent(\n        env_batch_size=environment.batch_size,\n        sess=sess,\n        num_actions=environment.action_space.n,\n        summary_writer=summary_writer,\n        tf_device=\"/gpu:*\",\n        **agent_kwargs)\n\n  return create_agent", "docstring": "Factory for dopamine agent initialization.\n\nArgs:\nagent_kwargs: dict of BatchDQNAgent parameters\n\nReturns:\nFunction(sess, environment, summary_writer) -> BatchDQNAgent instance.", "source": "juraj-google-style"}
{"code": "def isset(alias_name):\n    \n    warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2)\n    raw_value = read(alias_name, allow_none=True)\n    if raw_value:\n        if re.compile(r'.+:\n            return True\n        else:\n            warnings.warn('\"{0}_PORT={1}\" does not look like a docker link.'.format(alias_name, raw_value), stacklevel=2)\n            return False\n\n    return False", "docstring": "Return a boolean if the docker link is set or not and is a valid looking docker link value.\n\nArgs:\nalias_name: The link alias name", "source": "juraj-google-style"}
{"code": "def get_namespace(self, name_seq):\n    namespaces = self.namespaces\n    result = []\n    for name in name_seq:\n        namespaces = namespaces.get(name)\n        if (not namespaces):\n            break\n        result.append(name)\n    return result", "docstring": "Returns the prefix of names from name_seq that are known namespaces.\n\nArgs:\nname_seq: ['names', 'of', 'possible', 'namespace', 'to', 'find']\n\nReturns:\n['names', 'that', 'are', 'namespaces', 'possibly', 'empty', 'list']", "source": "codesearchnet"}
{"code": "def import_module(name):\n    \n    parts = name.split('.')\n    path = None\n    module_name = ''\n    fhandle = None\n\n    for index, part in enumerate(parts):\n        module_name = part if index == 0 else '%s.%s' % (module_name, part)\n        path = [path] if path is not None else path\n\n        try:\n            fhandle, path, descr = imp.find_module(part, path)\n            if module_name in sys.modules:\n                \n                \n                mod = sys.modules[module_name]\n            else:\n                mod = imp.load_module(module_name, fhandle, path, descr)\n        finally:\n            \n            if fhandle:\n                fhandle.close()\n\n    return mod", "docstring": "Imports a module into the current runtime environment\n\nThis function emulates the Python import system that allows for\nimporting full path modules.  It will break down the module and\nimport each part (or skip if it is already loaded in cache).\n\nArgs:\nname (str): The name of the module to import.  This should be\nthe full path of the module\n\nReturns:\nThe module that was imported", "source": "juraj-google-style"}
{"code": "def get_lacp_mode(self, name):\n    members = self.get_members(name)\n    if (not members):\n        return DEFAULT_LACP_MODE\n    for member in self.get_members(name):\n        match = re.search('channel-group\\\\s\\\\d+\\\\smode\\\\s(?P<value>.+)', self.get_block(('^interface %s' % member)))\n        return match.group('value')", "docstring": "Returns the LACP mode for the specified Port-Channel interface\n\nArgs:\nname(str): The Port-Channel interface name to return the LACP\nmode for from the configuration\n\nReturns:\nThe configured LACP mode for the interface.  Valid mode values\nare 'on', 'passive', 'active'", "source": "codesearchnet"}
{"code": "def zenith_luminance(self, value=9999.0):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `zenith_luminance`'.format(value))\n        if (value < 0.0):\n            raise ValueError('value need to be greater or equal 0.0 for field `zenith_luminance`')\n    self._zenith_luminance = value", "docstring": "Corresponds to IDD Field `zenith_luminance`\nwill be missing if >= 9999\n\nArgs:\nvalue (float): value for IDD Field `zenith_luminance`\nUnit: Cd/m2\nvalue >= 0.0\nMissing value: 9999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def reset_folder(self, folder):\n        \n        warnings.warn('This is a destructive action that cannot be undone.')\n        self.post('reset', data={}, params={'folder': folder})", "docstring": "Erase the database index from a given folder and restart Syncthing.\n\nArgs:\nfolder (str): Folder ID.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def get(self, attr: FetchAttribute) -> MaybeBytes:\n        \n        attr_name = attr.value.decode('ascii')\n        method = getattr(self, '_get_' + attr_name.replace('.', '_'))\n        return method(attr)", "docstring": "Return the bytes representation of the given message attribue.\n\nArgs:\nattr: The fetch attribute.\n\nRaises:\n:class:`NotFetchable`", "source": "juraj-google-style"}
{"code": "def UpdateNumberOfEventSources(\n      self, number_of_consumed_sources, number_of_produced_sources):\n    \n    consumed_sources_delta = 0\n    if number_of_consumed_sources is not None:\n      if number_of_consumed_sources < self.number_of_consumed_sources:\n        raise ValueError(\n            'Number of consumed sources smaller than previous update.')\n\n      consumed_sources_delta = (\n          number_of_consumed_sources - self.number_of_consumed_sources)\n\n      self.number_of_consumed_sources = number_of_consumed_sources\n      self.number_of_consumed_sources_delta = consumed_sources_delta\n\n    produced_sources_delta = 0\n    if number_of_produced_sources is not None:\n      if number_of_produced_sources < self.number_of_produced_sources:\n        raise ValueError(\n            'Number of produced sources smaller than previous update.')\n\n      produced_sources_delta = (\n          number_of_produced_sources - self.number_of_produced_sources)\n\n      self.number_of_produced_sources = number_of_produced_sources\n      self.number_of_produced_sources_delta = produced_sources_delta\n\n    return consumed_sources_delta > 0 or produced_sources_delta > 0", "docstring": "Updates the number of event sources.\n\nArgs:\nnumber_of_consumed_sources (int): total number of event sources consumed\nby the process.\nnumber_of_produced_sources (int): total number of event sources produced\nby the process.\n\nReturns:\nbool: True if either number of event sources has increased.\n\nRaises:\nValueError: if the consumed or produced number of event sources is\nsmaller than the value of the previous update.", "source": "juraj-google-style"}
{"code": "def index_one(self, instance, force=False):\n        \n        if not self.is_indexed(instance) and not force:\n            doc = self._as_document(instance)\n            self._index_document(doc, force=force)\n            logger.debug('{} indexed as\\n {}'.format(instance.__class__, pformat(doc)))\n            return True\n\n        logger.debug('{} already indexed.'.format(instance.__class__))\n        return False", "docstring": "Indexes exactly one object of the Ambry system.\n\nArgs:\ninstance (any): instance to index.\nforce (boolean): if True replace document in the index.\n\nReturns:\nboolean: True if document added to index, False if document already exists in the index.", "source": "juraj-google-style"}
{"code": "def detect_suicidal_func(func):\n    if func.is_constructor:\n        return False\n    if (func.visibility != 'public'):\n        return False\n    calls = [c.name for c in func.internal_calls]\n    if (not (('suicide(address)' in calls) or ('selfdestruct(address)' in calls))):\n        return False\n    if func.is_protected():\n        return False\n    return True", "docstring": "Detect if the function is suicidal\n\nDetect the public functions calling suicide/selfdestruct without protection\nReturns:\n(bool): True if the function is suicidal", "source": "codesearchnet"}
{"code": "def env():\n    return _env", "docstring": "Returns the object holds the test environment information.\n\nTests should modify this in the main process if needed, and it will be passed\nto the worker processes each time a test case is run.\n\nReturns:\na TestEnvironment object.", "source": "github-repos"}
{"code": "def get_ax_fig_plt(ax=None, **kwargs):\n    \n    import matplotlib.pyplot as plt\n    if ax is None:\n        fig = plt.figure(**kwargs)\n        ax = fig.add_subplot(1, 1, 1)\n    else:\n        fig = plt.gcf()\n\n    return ax, fig, plt", "docstring": "Helper function used in plot functions supporting an optional Axes argument.\nIf ax is None, we build the `matplotlib` figure and create the Axes else\nwe return the current active figure.\n\nArgs:\nkwargs: keyword arguments are passed to plt.figure if ax is not None.\n\nReturns:\nax: :class:`Axes` object\nfigure: matplotlib figure\nplt: matplotlib pyplot module.", "source": "juraj-google-style"}
{"code": "def _get_covariance(self, X):\n    result = pd.DataFrame(index=range(len(X)))\n    column_names = self.get_column_names(X)\n    for column_name in column_names:\n        column = self.get_column(X, column_name)\n        distrib = self.distribs[column_name]\n        cdf = distrib.cumulative_distribution(column)\n        if (distrib.constant_value is not None):\n            cdf = (np.ones(column.shape) - EPSILON)\n        result = self.set_column(result, column_name, stats.norm.ppf(cdf))\n    result = result[(result != np.inf).all(axis=1)]\n    return pd.DataFrame(data=result).cov().values", "docstring": "Compute covariance matrix with transformed data.\n\nArgs:\nX: `numpy.ndarray` or `pandas.DataFrame`.\n\nReturns:\nnp.ndarray", "source": "codesearchnet"}
{"code": "def set_reprompt_text(self, text):\n    self.response.reprompt.outputSpeech.type = 'PlainText'\n    self.response.reprompt.outputSpeech.text = text", "docstring": "Set response reprompt output speech as plain text type.\n\nArgs:\ntext: str. Response speech used when type is 'PlainText'. Cannot\nexceed 8,000 characters.", "source": "codesearchnet"}
{"code": "def _ensure_package_loaded(path, component):\n    logger = logging.getLogger(__name__)\n    packages = component.find_products('support_package')\n    if (len(packages) == 0):\n        return None\n    elif (len(packages) > 1):\n        raise ExternalError(\"Component had multiple products declared as 'support_package\", products=packages)\n    if ((len(path) > 2) and (':' in path[2:])):\n        (path, _, _) = path.rpartition(':')\n    package_base = packages[0]\n    relative_path = os.path.normpath(os.path.relpath(path, start=package_base))\n    if relative_path.startswith('..'):\n        raise ExternalError('Component had python product output of support_package', package=package_base, product=path, relative_path=relative_path)\n    if (not relative_path.endswith('.py')):\n        raise ExternalError('Python product did not end with .py', path=path)\n    relative_path = relative_path[:(- 3)]\n    if (os.pathsep in relative_path):\n        raise ExternalError('Python support wheels with multiple subpackages not yet supported', relative_path=relative_path)\n    support_distro = component.support_distribution\n    if (support_distro not in sys.modules):\n        logger.debug('Creating dynamic support wheel package: %s', support_distro)\n        (file, path, desc) = imp.find_module(os.path.basename(package_base), [os.path.dirname(package_base)])\n        imp.load_module(support_distro, file, path, desc)\n    return '{}.{}'.format(support_distro, relative_path)", "docstring": "Ensure that the given module is loaded as a submodule.\n\nReturns:\nstr: The name that the module should be imported as.", "source": "codesearchnet"}
{"code": "def delete_request(profile, resource):\n    url = get_url(profile, resource)\n    headers = get_headers(profile)\n    return requests.delete(url, headers=headers)", "docstring": "Do a DELETE request to Github's API.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nresource\nThe part of a Github API URL that comes after ``.../:repo/git``.\nFor instance, for ``.../:repo/git/commits``, it's ``/commits``.\n\nReturns:\nThe response returned by the ``requests`` library when it does the\nPOST request.", "source": "codesearchnet"}
{"code": "def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:\n    max_new_tokens = int(self.num_assistant_tokens)\n    if max_new_tokens == 0:\n        return (input_ids, None)\n    input_ids = input_ids.to(self.assistant_model.device)\n    remove_from_pkv = 0\n    assistant_input_ids, remove_from_pkv = self._prepare_assistant_input_ids(input_ids)\n    self.prev_assistant_ids = assistant_input_ids\n    min_new_tokens = max(min(max_new_tokens, self.main_model_min_length - assistant_input_ids.shape[-1]), 0)\n    self._update_past_and_masks(assistant_input_ids, remove_from_pkv)\n    generation_args = self._prepare_generation_args(assistant_input_ids, min_new_tokens, max_new_tokens)\n    self.assistant_kwargs.pop('attention_mask', None)\n    assistant_output = self.assistant_model.generate(**generation_args, **self.assistant_kwargs)\n    new_target_ids = self._process_assistant_outputs(input_ids, assistant_output.sequences, assistant_input_ids)\n    self.prev_target_ids_len = input_ids.shape[1]\n    self.assistant_kwargs['past_key_values'] = assistant_output.past_key_values\n    self.prev_assistant_ids = assistant_output.sequences\n    if self.prev_target_ids_len >= new_target_ids.shape[1]:\n        return (input_ids, None)\n    return (new_target_ids, None)", "docstring": "Fetches the candidates to be tried for the current input.\n\nArgs:\ninput_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\nIndices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)\n\nReturn:\n`torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be\nassessed by the model and a `torch.FloatTensor` of shape `(batch_size, candidate_length,\nvocabulary_size)` containing the logits associated to each candidate.", "source": "github-repos"}
{"code": "def diff_commonPrefix(self, text1, text2):\n    if ((not text1) or (not text2) or (text1[0] != text2[0])):\n        return 0\n    pointermin = 0\n    pointermax = min(len(text1), len(text2))\n    pointermid = pointermax\n    pointerstart = 0\n    while (pointermin < pointermid):\n        if (text1[pointerstart:pointermid] == text2[pointerstart:pointermid]):\n            pointermin = pointermid\n            pointerstart = pointermin\n        else:\n            pointermax = pointermid\n        pointermid = (((pointermax - pointermin) \n    return pointermid", "docstring": "Determine the common prefix of two strings.\n\nArgs:\ntext1: First string.\ntext2: Second string.\n\nReturns:\nThe number of characters common to the start of each string.", "source": "codesearchnet"}
{"code": "def supervised_to_dict(dataset, text2self):\n  \n  def my_fn(inputs, targets):\n    if text2self:\n      return {\"targets\": targets}\n    else:\n      return {\"inputs\": inputs, \"targets\": targets}\n  return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)", "docstring": "Turns a supervised dataset into a dataset with a feature dictionary.\n\nif text2self, then the features dictionary contains a \"targets\" key.\nelse, the features dictionary contains \"inputs\" and \"targets\" keys.\n\nArgs:\ndataset: a tf.data.Dataset\ntext2self: a boolean\nReturns:\na tf.data.Dataset", "source": "juraj-google-style"}
{"code": "def _build(self, inputs, **normalization_build_kwargs):\n    if ((self._normalization_ctor in {batch_norm.BatchNorm, batch_norm_v2.BatchNormV2}) and ('is_training' not in normalization_build_kwargs)):\n        raise ValueError('Boolean is_training flag must be explicitly specified when using batch normalization.')\n    self._input_shape = tuple(inputs.get_shape().as_list())\n    net = inputs\n    final_index = (len(self._layers) - 1)\n    for (i, layer) in enumerate(self._layers):\n        net = layer(net)\n        if ((i != final_index) or self._normalize_final):\n            if (self._normalization_ctor is not None):\n                normalizer = self._normalization_ctor(name='batch_norm_{}'.format(i), **self._normalization_kwargs)\n                net = normalizer(net, **util.remove_unsupported_kwargs(normalizer, normalization_build_kwargs))\n            elif normalization_build_kwargs:\n                tf.logging.warning('No normalization configured, but extra kwargs provided: {}'.format(normalization_build_kwargs))\n        if ((i != final_index) or self._activate_final):\n            net = self._activation(net)\n    return net", "docstring": "Assembles the `ConvNet2D` and connects it to the graph.\n\nArgs:\ninputs: A 4D Tensor of shape `[batch_size, input_height, input_width,\ninput_channels]`.\n**normalization_build_kwargs: kwargs passed to the normalization module\nat _build time.\n\nReturns:\nA 4D Tensor of shape `[batch_size, output_height, output_width,\noutput_channels[-1]]`.\n\nRaises:\nValueError: If `is_training` is not explicitly specified when using\nbatch normalization.", "source": "codesearchnet"}
{"code": "def create_rag_adapter() -> EmbeddingTypeAdapter[Chunk, Chunk]:\n    return EmbeddingTypeAdapter(input_fn=_extract_chunk_text, output_fn=_add_embedding_fn)", "docstring": "Creates adapter for converting between Chunk and Embedding types.\n\nThe adapter:\n- Extracts text from Chunk.content.text for embedding\n- Creates Embedding objects from model output\n- Sets Embedding in Chunk.embedding\n\nReturns:\nEmbeddingTypeAdapter configured for RAG pipeline types", "source": "github-repos"}
{"code": "def random_expr_with_required_var(depth, required_var, optional_list, ops):\n    if (not depth):\n        if required_var:\n            return required_var\n        return str(optional_list[random.randrange(len(optional_list))])\n    max_depth_side = random.randrange(2)\n    other_side_depth = random.randrange(depth)\n    required_var_side = random.randrange(2)\n    left = random_expr_with_required_var(((depth - 1) if max_depth_side else other_side_depth), (required_var if required_var_side else None), optional_list, ops)\n    right = random_expr_with_required_var(((depth - 1) if (not max_depth_side) else other_side_depth), (required_var if (not required_var_side) else None), optional_list, ops)\n    op = ops[random.randrange(len(ops))]\n    return ExprNode(left, right, op)", "docstring": "Generate a random expression tree with a required variable.\n\nThe required variable appears exactly once in the expression.\n\nArgs:\ndepth: At least one leaf will be this many levels down from the top.\nrequired_var: A char. This char is guaranteed to be placed exactly once at\na leaf somewhere in the tree. This is the var to solve for.\noptional_list: A list of chars. These chars are randomly selected as leaf\nvalues. These are constant vars.\nops: A list of ExprOp instances.\n\nReturns:\nAn ExprNode instance which is the root of the generated expression tree.", "source": "codesearchnet"}
{"code": "def execute_code_block(elem, doc):\n    \n    command = select_executor(elem, doc).split(' ')\n    code = elem.text\n    if 'plt' in elem.attributes or 'plt' in elem.classes:\n        code = save_plot(code, elem)\n    command.append(code)\n    if 'args' in elem.attributes:\n        for arg in elem.attributes['args'].split():\n            command.append(arg)\n\n    cwd = elem.attributes['wd'] if 'wd' in elem.attributes else None\n\n    return subprocess.run(command,\n                          encoding='utf8',\n                          stdout=subprocess.PIPE,\n                          stderr=subprocess.STDOUT,\n                          cwd=cwd).stdout", "docstring": "Executes a code block by passing it to the executor.\n\nArgs:\nelem The AST element.\ndoc  The document.\n\nReturns:\nThe output of the command.", "source": "juraj-google-style"}
{"code": "def transmit(self, payload, **kwargs):\n    kwargs['app_label'] = 'sap_success_factors'\n    kwargs['model_name'] = 'SapSuccessFactorsLearnerDataTransmissionAudit'\n    kwargs['remote_user_id'] = 'sapsf_user_id'\n    super(SapSuccessFactorsLearnerTransmitter, self).transmit(payload, **kwargs)", "docstring": "Send a completion status call to SAP SuccessFactors using the client.\n\nArgs:\npayload: The learner completion data payload to send to SAP SuccessFactors", "source": "codesearchnet"}
{"code": "def decode_payload(cls, request):\n    \n    \n    \n    if request.headers.get(cls.PAYLOAD_VERSION_HEADER) != cls.PAYLOAD_VERSION:\n      raise DeprecationWarning(\n          \"Task is generated by an older incompatible version of mapreduce. \"\n          \"Please kill this job manually\")\n    return cls._decode_payload(request.body)", "docstring": "Decode task payload.\n\nHugeTask controls its own payload entirely including urlencoding.\nIt doesn't depend on any particular web framework.\n\nArgs:\nrequest: a webapp Request instance.\n\nReturns:\nA dict of str to str. The same as the params argument to __init__.\n\nRaises:\nDeprecationWarning: When task payload constructed from an older\nincompatible version of mapreduce.", "source": "juraj-google-style"}
{"code": "def warning(msg: str, *args, **kwargs) -> None:\n    _DEFAULT_LOGGER.warning(msg, *args, **kwargs)", "docstring": "Logs warning message.\n\nArgs:\nmsg: Message with possible format string.\n*args: Values for variables in the format string.\n**kwargs: Keyword arguments for the logger.", "source": "github-repos"}
{"code": "def locked_put(self, credentials):\n    entity = self._model.get_or_insert(self._key_name)\n    setattr(entity, self._property_name, credentials)\n    entity.put()\n    if self._cache:\n        self._cache.set(self._key_name, credentials.to_json())", "docstring": "Write a Credentials to the datastore.\n\nArgs:\ncredentials: Credentials, the credentials to store.", "source": "codesearchnet"}
{"code": "def GetArtifactDependencies(rdf_artifact, recursive=False, depth=1):\n    deps = set()\n    for source in rdf_artifact.sources:\n        if (source.type in (rdf_artifacts.ArtifactSource.SourceType.ARTIFACT, rdf_artifacts.ArtifactSource.SourceType.ARTIFACT_GROUP)):\n            if source.attributes.GetItem('names'):\n                deps.update(source.attributes.GetItem('names'))\n    if (depth > 10):\n        raise RuntimeError('Max artifact recursion depth reached.')\n    deps_set = set(deps)\n    if recursive:\n        for dep in deps:\n            artifact_obj = REGISTRY.GetArtifact(dep)\n            new_dep = GetArtifactDependencies(artifact_obj, True, depth=(depth + 1))\n            if new_dep:\n                deps_set.update(new_dep)\n    return deps_set", "docstring": "Return a set of artifact dependencies.\n\nArgs:\nrdf_artifact: RDF object artifact.\nrecursive: If True recurse into dependencies to find their dependencies.\ndepth: Used for limiting recursion depth.\n\nReturns:\nA set of strings containing the dependent artifact names.\n\nRaises:\nRuntimeError: If maximum recursion depth reached.", "source": "codesearchnet"}
{"code": "def __init__(self, predicate, if_true, if_false):\n        \n        super(TernaryConditional, self).__init__(predicate, if_true, if_false)\n        self.predicate = predicate\n        self.if_true = if_true\n        self.if_false = if_false\n        self.validate()", "docstring": "Construct an expression that evaluates a predicate and returns one of two results.\n\nArgs:\npredicate: Expression to evaluate, and based on which to choose the returned value\nif_true: Expression to return if the predicate was true\nif_false: Expression to return if the predicate was false\n\nReturns:\nnew TernaryConditional object", "source": "juraj-google-style"}
{"code": "class FlaxDataCollatorForLanguageModeling:\n    tokenizer: PreTrainedTokenizerBase\n    mlm_probability: float = 0.15\n\n    def __post_init__(self):\n        if self.tokenizer.mask_token is None:\n            raise ValueError('This tokenizer does not have a mask token which is necessary for masked language modeling. You should pass `mlm=False` to train on causal language modeling instead.')\n\n    def __call__(self, examples: list[dict[str, np.ndarray]], pad_to_multiple_of: int) -> dict[str, np.ndarray]:\n        batch = self.tokenizer.pad(examples, pad_to_multiple_of=pad_to_multiple_of, return_tensors=TensorType.NUMPY)\n        special_tokens_mask = batch.pop('special_tokens_mask', None)\n        batch['input_ids'], batch['labels'] = self.mask_tokens(batch['input_ids'], special_tokens_mask=special_tokens_mask)\n        return batch\n\n    def mask_tokens(self, inputs: np.ndarray, special_tokens_mask: Optional[np.ndarray]) -> tuple[np.ndarray, np.ndarray]:\n        \n        labels = inputs.copy()\n        probability_matrix = np.full(labels.shape, self.mlm_probability)\n        special_tokens_mask = special_tokens_mask.astype('bool')\n        probability_matrix[special_tokens_mask] = 0.0\n        masked_indices = np.random.binomial(1, probability_matrix).astype('bool')\n        labels[~masked_indices] = -100\n        indices_replaced = np.random.binomial(1, np.full(labels.shape, 0.8)).astype('bool') & masked_indices\n        inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)\n        indices_random = np.random.binomial(1, np.full(labels.shape, 0.5)).astype('bool')\n        indices_random &= masked_indices & ~indices_replaced\n        random_words = np.random.randint(self.tokenizer.vocab_size, size=labels.shape, dtype='i4')\n        inputs[indices_random] = random_words[indices_random]\n        return (inputs, labels)", "docstring": "Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they\nare not all of the same length.\n\nArgs:\ntokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):\nThe tokenizer used for encoding the data.\nmlm_probability (:obj:`float`, `optional`, defaults to 0.15):\nThe probability with which to (randomly) mask tokens in the input.\n\n.. note::\n\nFor best performance, this data collator should be used with a dataset having items that are dictionaries or\nBatchEncoding, with the :obj:`\"special_tokens_mask\"` key, as returned by a\n:class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the\nargument :obj:`return_special_tokens_mask=True`.", "source": "github-repos"}
{"code": "def is_mobile(user_agent):\n    \n    if user_agent:\n        b = reg_b.search(user_agent)\n        v = reg_v.search(user_agent[0:4])\n        return b or v\n    return False", "docstring": "Checks if the user browser from the given user agent is mobile.\n\nArgs:\nuser_agent: A given user agent.\n\nReturns: True if the browser from the user agent is mobile.", "source": "juraj-google-style"}
{"code": "def relocate(source, destination, move=False):\n    \n    venv = api.VirtualEnvironment(source)\n    if not move:\n\n        venv.relocate(destination)\n        return None\n\n    venv.move(destination)\n    return None", "docstring": "Adjust the virtual environment settings and optional move it.\n\nArgs:\nsource (str): Path to the existing virtual environment.\ndestination (str): Desired path of the virtual environment.\nmove (bool): Whether or not to actually move the files. Default False.", "source": "juraj-google-style"}
{"code": "def run(self, fetch_list, feed_dict=None, sess=None):\n    if (tf.get_default_graph() != self._graph):\n        raise ValueError('The current default graph is different from the graph used at construction time of RecurrentRunner.')\n    if (feed_dict is None):\n        all_feeds_dict = {}\n    else:\n        all_feeds_dict = dict(feed_dict)\n    all_feeds_dict.update(self._state_feeds)\n    all_fetches_list = list(fetch_list)\n    all_fetches_list += self._state_fetches\n    sess = (sess or tf.get_default_session())\n    fetches = sess.run(all_fetches_list, all_feeds_dict)\n    states = fetches[len(fetch_list):]\n    for (i, s) in enumerate(states):\n        self._state_feeds[self._state_feed_names[i]] = s\n    return fetches[:len(fetch_list)]", "docstring": "Runs the graph with the provided feeds and fetches.\n\nThis function wraps sess.Run(), but takes care of state saving and\nrestoring by feeding in states and storing the new state values.\nArgs:\nfetch_list: A list of requested output tensors.\nfeed_dict: A dictionary of feeds - see Session.Run(). Optional.\nsess: The Tensorflow session to run. Can be None.\nReturns:\nThe requested tensors as numpy arrays.\nRaises:\nValueError: If the default graph during object construction was\ndifferent from the current default graph.", "source": "codesearchnet"}
{"code": "def update(self, resource, id_or_uri=None, timeout=-1):\n        \n        uri = resource.pop('uri', None)\n        if not uri:\n            if not id_or_uri:\n                raise ValueError(\"URI was not provided\")\n            uri = self._client.build_uri(id_or_uri)\n        return self._client.update(resource=resource, uri=uri, timeout=timeout)", "docstring": "Updates the specified alert resource.\n\nArgs:\nresource (dict): Object to update.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Updated alert.", "source": "juraj-google-style"}
{"code": "def _Open(self, path_spec, mode='rb'):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    range_offset = getattr(path_spec, 'range_offset', None)\n    if range_offset is None:\n      raise errors.PathSpecError(\n          'Unsupported path specification without encoding method.')\n\n    range_size = getattr(path_spec, 'range_size', None)\n    if range_size is None:\n      raise errors.PathSpecError(\n          'Unsupported path specification without encoding method.')\n\n    self._range_offset = range_offset\n    self._range_size = range_size", "docstring": "Opens the file system defined by path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\nmode (Optional[str]): file access mode. The default is 'rb' which\nrepresents read-only binary.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file system could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def preprocess_input(x, data_format=None):\n    return x", "docstring": "A placeholder method for backward compatibility.\n\nThe preprocessing logic has been included in the EfficientNetV2 model\nimplementation. Users are no longer required to call this method to\nnormalize the input data. This method does nothing and only kept as a\nplaceholder to align the API surface between old and new version of model.\n\nArgs:\nx: A floating point `numpy.array` or a tensor.\ndata_format: Optional data format of the image tensor/array. Defaults to\nNone, in which case the global setting\n`keras.backend.image_data_format()` is used\n(unless you changed it, it defaults to \"channels_last\").{mode}\n\nReturns:\nUnchanged `numpy.array` or tensor.", "source": "github-repos"}
{"code": "def set_led(self, colorcode):\n    data = []\n    data.append(10)\n    data.append(self.servoid)\n    data.append(RAM_WRITE_REQ)\n    data.append(LED_CONTROL_RAM)\n    data.append(1)\n    data.append(colorcode)\n    send_data(data)", "docstring": "Set the LED Color of Herkulex\n\nArgs:\ncolorcode (int): The code for colors\n(0x00-OFF\n0x02-BLUE\n0x03-CYAN\n0x04-RED\n0x05-ORANGE\n0x06-VIOLET\n0x07-WHITE", "source": "codesearchnet"}
{"code": "def _CheckAttribute(self, attribute, value):\n    \n    if not isinstance(attribute, Attribute):\n      raise AttributeError(\"Attribute %s must be of type aff4.Attribute()\" %\n                           attribute)\n\n    if not isinstance(value, attribute.attribute_type):\n      raise ValueError(\"Value for attribute %s must be of type %s()\" %\n                       (attribute, attribute.attribute_type.__name__))", "docstring": "Check that the value is of the expected type.\n\nArgs:\nattribute: An instance of Attribute().\nvalue: An instance of RDFValue.\n\nRaises:\nValueError: when the value is not of the expected type.\nAttributeError: When the attribute is not of type Attribute().", "source": "juraj-google-style"}
{"code": "def search(self, query, verbose=0):\n        \n        if verbose > 0:\n            print(\"searching \" + query)\n        query = query.lower()\n        qgram = ng(query, self.slb)\n        qocument = set()\n        for q in qgram:\n            if q in self.ngrams.keys():\n                for i in self.ngrams[q]:\n                    qocument.add(i)\n        self.qocument = qocument\n        results = {}\n        for i in qocument:\n           for j in self.D[i].keys():\n                if not j in results.keys():\n                    results[j] = 0\n                results[j] = results[j] + self.D[i][j]\n        sorted_results = sorted(results.items(), key=operator.itemgetter(1), reverse=True)\n        return [self.elements[f[0]] for f in sorted_results]", "docstring": "Searches files satisfying query\n\nIt first decompose the query in ngrams, then score each document containing\nat least one ngram with the number. The ten document having the most ngrams\nin common with the query are selected.\n\nArgs:\nquery (str): what to search;\nresults_number (int): number of results to return (default: 10)", "source": "juraj-google-style"}
{"code": "def _open_tracing_interface(self, conn_id, callback):\n        \n\n        try:\n            handle = self._find_handle(conn_id)\n            services = self._connections[handle]['services']\n        except (ValueError, KeyError):\n            callback(conn_id, self.id, False, 'Connection closed unexpectedly before we could open the streaming interface')\n            return\n\n        self._command_task.async_command(['_enable_tracing', handle, services],\n                                         self._on_interface_finished, {'connection_id': conn_id, 'callback': callback})", "docstring": "Enable the debug tracing interface for this IOTile device\n\nArgs:\nconn_id (int): the unique identifier for the connection\ncallback (callback): Callback to be called when this command finishes\ncallback(conn_id, adapter_id, success, failure_reason)", "source": "juraj-google-style"}
{"code": "def cut_spectrum(sp, l0, lf):\n    \n\n    if l0 >= lf:\n        raise ValueError(\"l0 must be lower than lf\")\n    idx0 = np.argmin(np.abs(sp.x - l0))\n    idx1 = np.argmin(np.abs(sp.x - lf))\n    out = copy.deepcopy(sp)\n    out.x = out.x[idx0:idx1]\n    out.y = out.y[idx0:idx1]\n    return out", "docstring": "Cuts spectrum given a wavelength interval, leaving origina intact\n\nArgs:\nsp: Spectrum instance\nl0: initial wavelength\nlf: final wavelength\n\nReturns:\nSpectrum: cut spectrum", "source": "juraj-google-style"}
{"code": "def get_ieee_rotation(structure, refine_rotation=True):\n        \n        \n        sga = SpacegroupAnalyzer(structure)\n        dataset = sga.get_symmetry_dataset()\n        trans_mat = dataset['transformation_matrix']\n        conv_latt = Lattice(np.transpose(np.dot(np.transpose(\n            structure.lattice.matrix), np.linalg.inv(trans_mat))))\n        xtal_sys = sga.get_crystal_system()\n\n        vecs = conv_latt.matrix\n        lengths = np.array(conv_latt.abc)\n        angles = np.array(conv_latt.angles)\n        rotation = np.zeros((3, 3))\n\n        \n        if xtal_sys == \"cubic\":\n            rotation = [vecs[i] / lengths[i] for i in range(3)]\n\n        \n        elif xtal_sys == \"tetragonal\":\n            rotation = np.array([vec / mag for (mag, vec) in\n                                 sorted(zip(lengths, vecs),\n                                        key=lambda x: x[0])])\n            if abs(lengths[2] - lengths[1]) < abs(lengths[1] - lengths[0]):\n                rotation[0], rotation[2] = rotation[2], rotation[0].copy()\n            rotation[1] = get_uvec(np.cross(rotation[2], rotation[0]))\n\n        \n        elif xtal_sys == \"orthorhombic\":\n            rotation = [vec / mag for (mag, vec) in sorted(zip(lengths, vecs))]\n            rotation = np.roll(rotation, 2, axis=0)\n\n        \n        \n        elif xtal_sys in (\"trigonal\", \"hexagonal\"):\n            \n            tf_index = np.argmin(abs(angles - 120.))\n            non_tf_mask = np.logical_not(angles == angles[tf_index])\n            rotation[2] = get_uvec(vecs[tf_index])\n            rotation[0] = get_uvec(vecs[non_tf_mask][0])\n            rotation[1] = get_uvec(np.cross(rotation[2], rotation[0]))\n\n        \n        elif xtal_sys == \"monoclinic\":\n            \n            u_index = np.argmax(abs(angles - 90.))\n            n_umask = np.logical_not(angles == angles[u_index])\n            rotation[1] = get_uvec(vecs[u_index])\n            \n            c = [vec / mag for (mag, vec) in\n                 sorted(zip(lengths[n_umask], vecs[n_umask]))][0]\n            rotation[2] = np.array(c)\n            rotation[0] = np.cross(rotation[1], rotation[2])\n\n        \n        elif xtal_sys == \"triclinic\":\n            rotation = [vec / mag for (mag, vec) in sorted(zip(lengths, vecs))]\n            rotation[1] = get_uvec(np.cross(rotation[2], rotation[0]))\n            rotation[0] = np.cross(rotation[1], rotation[2])\n\n        rotation = SquareTensor(rotation)\n        if refine_rotation:\n            rotation = rotation.refine_rotation()\n\n        return rotation", "docstring": "Given a structure associated with a tensor, determines\nthe rotation matrix for IEEE conversion according to\nthe 1987 IEEE standards.\n\nArgs:\nstructure (Structure): a structure associated with the\ntensor to be converted to the IEEE standard\nrefine_rotation (bool): whether to refine the rotation\nusing SquareTensor.refine_rotation", "source": "juraj-google-style"}
{"code": "def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        super(GetAttributesResponsePayload, self).read(\n            input_buffer,\n            kmip_version=kmip_version\n        )\n        local_buffer = utils.BytearrayStream(input_buffer.read(self.length))\n\n        if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_buffer):\n            unique_identifier = primitives.TextString(\n                tag=enums.Tags.UNIQUE_IDENTIFIER\n            )\n            unique_identifier.read(local_buffer, kmip_version=kmip_version)\n            self.unique_identifier = unique_identifier.value\n        else:\n            raise exceptions.InvalidKmipEncoding(\n                \"The GetAttributes response payload encoding is missing the \"\n                \"unique identifier.\"\n            )\n\n        if kmip_version < enums.KMIPVersion.KMIP_2_0:\n            self._attributes = list()\n            while self.is_tag_next(enums.Tags.ATTRIBUTE, local_buffer):\n                attribute = objects.Attribute()\n                attribute.read(local_buffer, kmip_version=kmip_version)\n                self._attributes.append(attribute)\n        else:\n            if self.is_tag_next(enums.Tags.ATTRIBUTES, local_buffer):\n                attributes = objects.Attributes()\n                attributes.read(local_buffer, kmip_version=kmip_version)\n                \n                temp_attr = objects.convert_attributes_to_template_attribute(\n                    attributes\n                )\n                self._attributes = temp_attr.attributes\n            else:\n                raise exceptions.InvalidKmipEncoding(\n                    \"The GetAttributes response payload encoding is missing \"\n                    \"the attributes structure.\"\n                )\n\n        self.is_oversized(local_buffer)", "docstring": "Read the data encoding the GetAttributes response payload and decode\nit into its constituent parts.\n\nArgs:\ninput_buffer (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def get_install_value(self, value_name, wanted_type=None):\n    try:\n        (item_value, item_type) = self.__reg_query_value(self.__reg_uninstall_handle, value_name)\n    except pywintypes.error as exc:\n        if (exc.winerror == winerror.ERROR_FILE_NOT_FOUND):\n            return None\n        raise\n    if (wanted_type and (item_type not in self.__reg_types[wanted_type])):\n        item_value = None\n    return item_value", "docstring": "For the uninstall section of the registry return the name value.\n\nArgs:\nvalue_name (str): Registry value name.\nwanted_type (str):\nThe type of value wanted if the type does not match\nNone is return. wanted_type support values are\n``str`` ``int`` ``list`` ``bytes``.\n\nReturns:\nvalue: Value requested or None if not found.", "source": "codesearchnet"}
{"code": "def validate(self, graph):\n    if (not nx.is_directed_acyclic_graph(graph)):\n        raise DirectedAcyclicGraphInvalid(graph_name=self._name)", "docstring": "Validate the graph by checking whether it is a directed acyclic graph.\n\nArgs:\ngraph (DiGraph): Reference to a DiGraph object from NetworkX.\n\nRaises:\nDirectedAcyclicGraphInvalid: If the graph is not a valid dag.", "source": "codesearchnet"}
{"code": "def get_defaults(path):\n    \n\n    defaults = {}\n\n    if os.path.isfile(path):\n        with open(path) as f:\n\n            for line in f:\n                line = line.strip()\n                if '=' not in line or line.startswith('\n                    continue\n\n                k, v = line.split('=', 1)\n                v = v.strip('\"').strip(\"'\")\n\n                defaults[k] = v\n        return defaults\n    else:\n        return {}", "docstring": "Reads file for configuration defaults.\n\nArguments:\n- path (str) Absolute filepath (usually ~/.licenser)\n\nReturns:\n- (dict) Defaults for name, email, license, .txt extension", "source": "juraj-google-style"}
{"code": "def __init__(self, bits: List[int], energy_layers: List[tf.keras.layers.Layer], name: Union[None, str]=None):\n    super().__init__(name=name)\n    self._bits = energy_utils.check_bits(bits)\n    self._energy_layers = energy_layers", "docstring": "Initializes a BitstringEnergy.\n\nArgs:\nbits: Unique labels for the bits on which this distribution is supported.\nenergy_layers: Concatenation of these layers yields trainable map from\nbitstrings to scalars.\nname: Optional name for the model.", "source": "github-repos"}
{"code": "def _LiteralEval(value):\n    root = ast.parse(value, mode='eval')\n    if isinstance(root.body, ast.BinOp):\n        raise ValueError(value)\n    for node in ast.walk(root):\n        for field, child in ast.iter_fields(node):\n            if isinstance(child, list):\n                for index, subchild in enumerate(child):\n                    if isinstance(subchild, ast.Name):\n                        child[index] = _Replacement(subchild)\n            elif isinstance(child, ast.Name):\n                replacement = _Replacement(child)\n                setattr(node, field, replacement)\n    return ast.literal_eval(root)", "docstring": "Parse value as a Python literal, or container of containers and literals.\n\nFirst the AST of the value is updated so that bare-words are turned into\nstrings. Then the resulting AST is evaluated as a literal or container of\nonly containers and literals.\n\nThis allows for the YAML-like syntax {a: b} to represent the dict {'a': 'b'}\n\nArgs:\nvalue: A string to be parsed as a literal or container of containers and\nliterals.\nReturns:\nThe Python value representing the value arg.\nRaises:\nValueError: If the value is not an expression with only containers and\nliterals.\nSyntaxError: If the value string has a syntax error.", "source": "github-repos"}
{"code": "def from_grpc_status(status_code, message, **kwargs):\n    \n    error_class = exception_class_for_grpc_status(status_code)\n    error = error_class(message, **kwargs)\n\n    if error.grpc_status_code is None:\n        error.grpc_status_code = status_code\n\n    return error", "docstring": "Create a :class:`GoogleAPICallError` from a :class:`grpc.StatusCode`.\n\nArgs:\nstatus_code (grpc.StatusCode): The gRPC status code.\nmessage (str): The exception message.\nkwargs: Additional arguments passed to the :class:`GoogleAPICallError`\nconstructor.\n\nReturns:\nGoogleAPICallError: An instance of the appropriate subclass of\n:class:`GoogleAPICallError`.", "source": "juraj-google-style"}
{"code": "def _get_contexts_for_squash(self, batch_signature):\n        \n\n        batch = self._batches_by_id[batch_signature].batch\n        index = self._batches.index(batch)\n        contexts = []\n        txns_added_predecessors = []\n        for b in self._batches[index::-1]:\n            batch_is_valid = True\n            contexts_from_batch = []\n            for txn in b.transactions[::-1]:\n                result = self._txn_results[txn.header_signature]\n                if not result.is_valid:\n                    batch_is_valid = False\n                    break\n                else:\n                    txn_id = txn.header_signature\n                    if txn_id not in txns_added_predecessors:\n                        txns_added_predecessors.append(\n                            self._txn_predecessors[txn_id])\n                        contexts_from_batch.append(result.context_id)\n            if batch_is_valid:\n                contexts.extend(contexts_from_batch)\n\n        return contexts", "docstring": "Starting with the batch referenced by batch_signature, iterate back\nthrough the batches and for each valid batch collect the context_id.\nAt the end remove contexts for txns that are other txn's predecessors.\n\nArgs:\nbatch_signature (str): The batch to start from, moving back through\nthe batches in the scheduler\n\nReturns:\n(list): Context ids that haven't been previous base contexts.", "source": "juraj-google-style"}
{"code": "def VerifyStructure(self, parser_mediator, line):\n    \n    try:\n      structure = self._LINE.parseString(line)\n    except pyparsing.ParseException:\n      logger.debug('Not a SkyDrive old log file')\n      return False\n\n    day_of_month, month, year, hours, minutes, seconds, milliseconds = (\n        structure.date_time)\n\n    time_elements_tuple = (\n        year, month, day_of_month, hours, minutes, seconds, milliseconds)\n\n    try:\n      dfdatetime_time_elements.TimeElementsInMilliseconds(\n          time_elements_tuple=time_elements_tuple)\n    except ValueError:\n      logger.debug(\n          'Not a SkyDrive old log file, invalid date and time: {0!s}'.format(\n              structure.date_time))\n      return False\n\n    return True", "docstring": "Verify that this file is a SkyDrive old log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nline (str): line from a text file.\n\nReturns:\nbool: True if the line is in the expected format, False if not.", "source": "juraj-google-style"}
{"code": "def filter_children(self, ctype: ContentType=None) -> List[SchemaNode]:\n    if (ctype is None):\n        ctype = self.content_type()\n    return [c for c in self.children if ((not isinstance(c, (RpcActionNode, NotificationNode))) and ((c.content_type().value & ctype.value) != 0))]", "docstring": "Return receiver's children based on content type.\n\nArgs:\nctype: Content type.", "source": "codesearchnet"}
{"code": "def _process_book(html_chunk):\n    \n    title, url = _parse_title_url(html_chunk)\n    book_format, pages, isbn = _parse_format_pages_isbn(html_chunk)\n\n    \n    pub = Publication(\n        title=title,\n        authors=_parse_authors(html_chunk),\n        price=_parse_price(html_chunk),\n        publisher=\"Grada\"\n    )\n\n    \n    pub.optionals.URL = url\n    pub.optionals.ISBN = isbn\n    pub.optionals.pages = pages\n    pub.optionals.format = book_format\n    pub.optionals.sub_title = _parse_subtitle(html_chunk)\n    pub.optionals.description = _parse_description(html_chunk)\n\n    return pub", "docstring": "Parse available informations about book from the book details page.\n\nArgs:\nhtml_chunk (obj): HTMLElement containing slice of the page with\ndetails.\n\nReturns:\nobj: :class:`structures.Publication` instance with book details.", "source": "juraj-google-style"}
{"code": "def warning_handler(self, handler):\n    if (not self.opened()):\n        handler = (handler or util.noop)\n        self._warning_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler)\n        self._dll.JLINKARM_SetWarnOutHandler(self._warning_handler)", "docstring": "Setter for the warning handler function.\n\nIf the DLL is open, this function is a no-op, so it should be called\nprior to calling ``open()``.\n\nArgs:\nself (JLink): the ``JLink`` instance\nhandler (function): function to call on warning messages\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def circuit_to_instruction(circuit):\n    instruction = Instruction(name=circuit.name, num_qubits=sum([qreg.size for qreg in circuit.qregs]), num_clbits=sum([creg.size for creg in circuit.cregs]), params=[])\n    instruction.control = None\n\n    def find_bit_position(bit):\n        'find the index of a given bit (Register, int) within\\n        a flat ordered list of bits of the circuit\\n        '\n        if isinstance(bit[0], QuantumRegister):\n            ordered_regs = circuit.qregs\n        else:\n            ordered_regs = circuit.cregs\n        reg_index = ordered_regs.index(bit[0])\n        return (sum([reg.size for reg in ordered_regs[:reg_index]]) + bit[1])\n    definition = circuit.data.copy()\n    if (instruction.num_qubits > 0):\n        q = QuantumRegister(instruction.num_qubits, 'q')\n    if (instruction.num_clbits > 0):\n        c = ClassicalRegister(instruction.num_clbits, 'c')\n    definition = list(map((lambda x: (x[0], list(map((lambda y: (q, find_bit_position(y))), x[1])), list(map((lambda y: (c, find_bit_position(y))), x[2])))), definition))\n    instruction.definition = definition\n    return instruction", "docstring": "Build an ``Instruction`` object from a ``QuantumCircuit``.\n\nThe instruction is anonymous (not tied to a named quantum register),\nand so can be inserted into another circuit. The instruction will\nhave the same string name as the circuit.\n\nArgs:\ncircuit (QuantumCircuit): the input circuit.\n\nReturn:\nInstruction: an instruction equivalent to the action of the\ninput circuit. Upon decomposition, this instruction will\nyield the components comprising the original circuit.", "source": "codesearchnet"}
{"code": "def random_string_generator(size=6, chars=string.ascii_uppercase):\n    try:\n        return ''.join((random.choice(chars) for _ in range(size)))\n    except:\n        (line, filename, synerror) = trace()\n        raise ArcRestHelperError({'function': 'random_string_generator', 'line': line, 'filename': filename, 'synerror': synerror})\n    finally:\n        pass", "docstring": "Generates a random string from a set of characters.\n\nArgs:\nsize (int): The length of the resultant string. Defaults to 6.\nchars (str): The characters to be used by :py:func:`random.choice`. Defaults to :py:const:`string.ascii_uppercase`.\n\nReturns:\nstr: The randomly generated string.\n\nExamples:\n>>> arcresthelper.common.random_string_generator()\n'DCNYWU'\n>>> arcresthelper.common.random_string_generator(12, \"arcREST\")\n'cESaTTEacTES'", "source": "codesearchnet"}
{"code": "def wait_for_job(self, job, poll=5):\n    desc = _wait_until_training_done((lambda last_desc: _train_done(self.sagemaker_client, job, last_desc)), None, poll)\n    self._check_job_status(job, desc, 'TrainingJobStatus')\n    return desc", "docstring": "Wait for an Amazon SageMaker training job to complete.\n\nArgs:\njob (str): Name of the training job to wait for.\npoll (int): Polling interval in seconds (default: 5).\n\nReturns:\n(dict): Return value from the ``DescribeTrainingJob`` API.\n\nRaises:\nValueError: If the training job fails.", "source": "codesearchnet"}
{"code": "def get_push_pop_stack():\n    push = copy.deepcopy(PUSH_STACK)\n    pop = copy.deepcopy(POP_STACK)\n    anno.setanno(push, 'pop', pop)\n    anno.setanno(push, 'gen_push', True)\n    anno.setanno(pop, 'push', push)\n    op_id = _generate_op_id()\n    return (push, pop, op_id)", "docstring": "Create pop and push nodes for substacks that are linked.\n\nReturns:\nA push and pop node which have `push_func` and `pop_func` annotations\nrespectively, identifying them as such. They also have a `pop` and\n`push` annotation respectively, which links the push node to the pop\nnode and vice versa.", "source": "codesearchnet"}
{"code": "def get_locations_list(self, lower_bound=0, upper_bound=None):\n    real_upper_bound = upper_bound\n    if (upper_bound is None):\n        real_upper_bound = self.nbr_of_sub_locations()\n    try:\n        return self._locations_list[lower_bound:real_upper_bound]\n    except:\n        return list()", "docstring": "Return the internal location list.\n\nArgs:\nlower_bound:\nupper_bound:\n\nReturns:", "source": "codesearchnet"}
{"code": "def serialize_to_display(self, doc_format='pretty-xml', *args, **kwargs):\n    return super(ResourceMap, self).serialize(*args, format=doc_format, encoding=None, **kwargs).decode('utf-8')", "docstring": "Serialize ResourceMap to an XML doc that is pretty printed for display.\n\nArgs:\ndoc_format: str\nOne of: ``xml``, ``n3``, ``turtle``, ``nt``, ``pretty-xml``, ``trix``,\n``trig`` and ``nquads``.\n\nargs and kwargs:\nOptional arguments forwarded to rdflib.ConjunctiveGraph.serialize().\n\nReturns:\nstr: Pretty printed Resource Map XML doc\n\nNote:\nOnly the default, \"xml\", is automatically indexed by DataONE.", "source": "codesearchnet"}
{"code": "def _print_tensor_info(tensor_info, indent=0):\n    indent_str = '  ' * indent\n\n    def in_print(s):\n        print(indent_str + s)\n    in_print('    dtype: ' + {value: key for key, value in types_pb2.DataType.items()}[tensor_info.dtype])\n    if tensor_info.tensor_shape.unknown_rank:\n        shape = 'unknown_rank'\n    else:\n        dims = [str(dim.size) for dim in tensor_info.tensor_shape.dim]\n        shape = ', '.join(dims)\n        shape = '(' + shape + ')'\n    in_print('    shape: ' + shape)\n    in_print('    name: ' + tensor_info.name)", "docstring": "Prints details of the given tensor_info.\n\nArgs:\ntensor_info: TensorInfo object to be printed.\nindent: How far (in increments of 2 spaces) to indent each line output", "source": "github-repos"}
{"code": "def create_stub(generated_create_stub, channel=None, service_path=None, service_port=None, credentials=None, scopes=None, ssl_credentials=None):\n    if (channel is None):\n        target = '{}:{}'.format(service_path, service_port)\n        if (credentials is None):\n            credentials = _grpc_google_auth.get_default_credentials(scopes)\n        channel = _grpc_google_auth.secure_authorized_channel(credentials, target, ssl_credentials=ssl_credentials)\n    return generated_create_stub(channel)", "docstring": "Creates a gRPC client stub.\n\nArgs:\ngenerated_create_stub (Callable): The generated gRPC method to create a\nstub.\nchannel (grpc.Channel): A Channel object through which to make calls.\nIf None, a secure channel is constructed. If specified, all\nremaining arguments are ignored.\nservice_path (str): The domain name of the API remote host.\nservice_port (int): The port on which to connect to the remote host.\ncredentials (google.auth.credentials.Credentials): The authorization\ncredentials to attach to requests. These credentials identify your\napplication to the service.\nscopes (Sequence[str]): The OAuth scopes for this service. This\nparameter is ignored if a credentials is specified.\nssl_credentials (grpc.ChannelCredentials): gRPC channel credentials\nused to create a secure gRPC channel. If not specified, SSL\ncredentials will be created using default certificates.\n\nReturns:\ngrpc.Client: A gRPC client stub.", "source": "codesearchnet"}
{"code": "def query(self, minhash, k):\n        \n        if k <= 0:\n            raise ValueError(\"k must be positive\")\n        if len(minhash) < self.k*self.l:\n            raise ValueError(\"The num_perm of MinHash out of range\")\n        results = set()\n        r = self.k\n        while r > 0:\n            for key in self._query(minhash, r, self.l):\n                results.add(key)\n                if len(results) >= k:\n                    return list(results)\n            r -= 1\n        return list(results)", "docstring": "Return the approximate top-k keys that have the highest\nJaccard similarities to the query set.\n\nArgs:\nminhash (datasketch.MinHash): The MinHash of the query set.\nk (int): The maximum number of keys to return.\n\nReturns:\n`list` of at most k keys.", "source": "juraj-google-style"}
{"code": "def add_region_feature(self, start_resnum, end_resnum, feat_type=None, feat_id=None, qualifiers=None):\n        \n        if self.feature_file:\n            raise ValueError('Feature file associated with sequence, please remove file association to append '\n                             'additional features.')\n\n        if not feat_type:\n            feat_type = 'Manually added protein sequence region feature'\n        newfeat = SeqFeature(location=FeatureLocation(start_resnum-1, end_resnum),\n                             type=feat_type,\n                             id=feat_id,\n                             qualifiers=qualifiers)\n\n        self.features.append(newfeat)", "docstring": "Add a feature to the features list describing a region of the protein sequence.\n\nArgs:\nstart_resnum (int): Start residue number of the protein sequence feature\nend_resnum (int): End residue number of the protein sequence feature\nfeat_type (str, optional): Optional description of the feature type (ie. 'binding domain')\nfeat_id (str, optional): Optional ID of the feature type (ie. 'TM1')", "source": "juraj-google-style"}
{"code": "def get_concept_item_mapping(self, concepts=None, lang=None):\n    if (concepts is None):\n        concepts = self.filter(active=True)\n        if (lang is not None):\n            concepts = concepts.filter(lang=lang)\n    if (lang is None):\n        languages = set([concept.lang for concept in concepts])\n        if (len(languages) > 1):\n            raise Exception('Concepts has multiple languages')\n        lang = list(languages)[0]\n    item_lists = Item.objects.filter_all_reachable_leaves_many([json.loads(concept.query) for concept in concepts], lang)\n    return dict(zip([c.pk for c in concepts], item_lists))", "docstring": "Get mapping of concepts to items belonging to concept.\n\nArgs:\nconcepts (list of Concept): Defaults to None meaning all concepts\nlang (str): language of concepts, if None use language of concepts\n\nReturns:\ndict: concept (int) -> list of item ids (int)", "source": "codesearchnet"}
{"code": "def convert_selu(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting selu ...')\n\n    if names == 'short':\n        tf_name = 'SELU' + random_string(4)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    selu = keras.layers.Activation('selu', name=tf_name)\n    layers[scope_name] = selu(layers[inputs[0]])", "docstring": "Convert selu layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def spherical_vert(script, radius=1.0, center_pt=(0.0, 0.0, 0.0)):\n    function = 'sqrt((x-{})^2+(y-{})^2+(z-{})^2)<={}'.format(center_pt[0], center_pt[1], center_pt[2], radius)\n    vert_function(script, function=function)\n    return None", "docstring": "Select all vertices within a spherical radius\n\nArgs:\nradius (float): radius of the sphere\ncenter_pt (3 coordinate tuple or list): center point of the sphere\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "codesearchnet"}
{"code": "def recipe_bigquery_function(config, auth, function, dataset):\n    bigquery(config, {'auth': auth, 'function': function, 'to': {'dataset': dataset}})", "docstring": "Add a custom function or table to a dataset.\n\nArgs:\nauth (authentication) - Credentials used for writing function.\nfunction (choice) - Function or table to create.\ndataset (string) - Existing BigQuery dataset.", "source": "github-repos"}
{"code": "def apply_grads(self, grads, variables):\n    ops = []\n    for (grad, var) in zip(grads, variables):\n        ops.extend(self.apply_grad(grad, var))\n    if (not ops):\n        return ops\n    return variables[0].graph.combine_assignments(ops)", "docstring": "Apply gradients to variables.\n\nCall this function externally instead of apply_grad().  This causes the\noperations to be combined, which is necessary for stacking variables\nsee mtf.rewrite_stack_variables().\n\nArgs:\ngrads: a list of Tensor\nvariables: a list of Variables\nReturns:\na list of Operations", "source": "codesearchnet"}
{"code": "def merge(self, dataset):\n\n    def merge_data(source, dest):\n        for (key, value) in source.items():\n            if isinstance(value, dict):\n                merge_data(value, dest.setdefault(key, {}))\n            else:\n                dest[key] = value\n        return dest\n    merge_data(dataset.data, self._data)\n    for h in dataset.task_history:\n        if (h not in self._task_history):\n            self._task_history.append(h)", "docstring": "Merge the specified dataset on top of the existing data.\n\nThis replaces all values in the existing dataset with the values from the\ngiven dataset.\n\nArgs:\ndataset (TaskData): A reference to the TaskData object that should be merged\non top of the existing object.", "source": "codesearchnet"}
{"code": "def score_intersect(self, term1, term2, **kwargs):\n    t1_kde = self.kde(term1, **kwargs)\n    t2_kde = self.kde(term2, **kwargs)\n    overlap = np.minimum(t1_kde, t2_kde)\n    return np.trapz(overlap)", "docstring": "Compute the geometric area of the overlap between the kernel density\nestimates of two terms.\n\nArgs:\nterm1 (str)\nterm2 (str)\n\nReturns: float", "source": "codesearchnet"}
{"code": "def call(self, hidden_states: tf.Tensor, attention_mask: np.ndarray | tf.Tensor | None, layer_head_mask: tf.Tensor | None, training: Optional[bool]=False) -> tf.Tensor:\n    residual = hidden_states\n    hidden_states, self_attn_weights, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask)\n    tf.debugging.assert_equal(shape_list(hidden_states), shape_list(residual), message=f'Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}')\n    hidden_states = self.dropout(hidden_states, training=training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    residual = hidden_states\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = self.activation_dropout(hidden_states, training=training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = self.dropout(hidden_states, training=training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    return (hidden_states, self_attn_weights)", "docstring": "Args:\nhidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`tf.Tensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\nlayer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size\n`(encoder_attention_heads,)`", "source": "github-repos"}
{"code": "def _ParseFileHeader(self, file_object):\n    file_header_map = self._GetDataTypeMap('chrome_cache_data_block_file_header')\n    try:\n        (file_header, _) = self._ReadStructureFromFileObject(file_object, 0, file_header_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse data block file header with error: {0!s}'.format(exception))\n    if (file_header.signature != self._FILE_SIGNATURE):\n        raise errors.ParseError('Unsupported data block file signature')\n    format_version = '{0:d}.{1:d}'.format(file_header.major_version, file_header.minor_version)\n    if (format_version not in ('2.0', '2.1')):\n        raise errors.ParseError('Unsupported data block file format version: {0:s}'.format(format_version))\n    if (file_header.block_size not in (256, 1024, 4096)):\n        raise errors.ParseError('Unsupported data block file block size: {0:d}'.format(file_header.block_size))", "docstring": "Parses the file header.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object to parse.\n\nRaises:\nParseError: if the file header cannot be read.", "source": "codesearchnet"}
{"code": "def get_authorization_url(self, client_id=None, instance_id=None,\n                              redirect_uri=None, region=None, scope=None,\n                              state=None):\n        \n        client_id = client_id or self.client_id\n        instance_id = instance_id or self.instance_id\n        redirect_uri = redirect_uri or self.redirect_uri\n        region = region or self.region\n        scope = scope or self.scope\n        state = state or str(uuid.uuid4())\n        self.state = state\n        return Request(\n            'GET',\n            self.auth_base_url,\n            params={\n                'client_id': client_id,\n                'instance_id': instance_id,\n                'redirect_uri': redirect_uri,\n                'region': region,\n                'response_type': 'code',\n                'scope': scope,\n                'state': state\n            }\n        ).prepare().url, state", "docstring": "Generate authorization URL.\n\nArgs:\nclient_id (str): OAuth2 client ID. Defaults to ``None``.\ninstance_id (str): App Instance ID. Defaults to ``None``.\nredirect_uri (str): Redirect URI. Defaults to ``None``.\nregion (str): App Region. Defaults to ``None``.\nscope (str): Permissions. Defaults to ``None``.\nstate (str): UUID to detect CSRF. Defaults to ``None``.\n\nReturns:\nstr, str: Auth URL, state", "source": "juraj-google-style"}
{"code": "def get_type(self):\n    raise NotImplementedError('Base class should not be called directly!')", "docstring": "This function returns the type of the sniffer.\n\nReturns:\nThe type (string) of the sniffer. Corresponds to the 'Type' key of\nthe sniffer configuration.", "source": "github-repos"}
{"code": "def create_heroku_connect_schema(using=DEFAULT_DB_ALIAS):\n    connection = connections[using]\n    with connection.cursor() as cursor:\n        cursor.execute(_SCHEMA_EXISTS_QUERY, [settings.HEROKU_CONNECT_SCHEMA])\n        schema_exists = cursor.fetchone()[0]\n        if schema_exists:\n            return False\n        cursor.execute('CREATE SCHEMA %s;', [AsIs(settings.HEROKU_CONNECT_SCHEMA)])\n    with connection.schema_editor() as editor:\n        for model in get_heroku_connect_models():\n            editor.create_model(model)\n        editor.execute('CREATE EXTENSION IF NOT EXISTS \"hstore\";')\n        from heroku_connect.models import TriggerLog, TriggerLogArchive\n        for cls in [TriggerLog, TriggerLogArchive]:\n            editor.create_model(cls)\n    return True", "docstring": "Create Heroku Connect schema.\n\nNote:\nThis function is only meant to be used for local development.\nIn a production environment the schema will be created by\nHeroku Connect.\n\nArgs:\nusing (str): Alias for database connection.\n\nReturns:\nbool: ``True`` if the schema was created, ``False`` if the\nschema already exists.", "source": "codesearchnet"}
{"code": "def parse_date(date_string, ignoretz=True):\n    \n    try:\n        return parser.parse(date_string, ignoretz=ignoretz)\n    except TypeError:\n        return None", "docstring": "Parse a string as a date. If the string fails to parse, `None` will be returned instead\n\n>>> parse_date('2017-08-15T18:24:31')\ndatetime.datetime(2017, 8, 15, 18, 24, 31)\n\nArgs:\ndate_string (`str`): Date in string format to parse\nignoretz (`bool`): If set ``True``, ignore time zones and return a naive :class:`datetime` object.\n\nReturns:\n`datetime`, `None`", "source": "juraj-google-style"}
{"code": "def isubset(self, *keys):\n        \n        \n        return ww.g((key, self[key]) for key in keys)", "docstring": "Return key, self[key] as generator for key in keys.\n\nRaise KeyError if a key does not exist\n\nArgs:\nkeys: Iterable containing keys\n\nExample:\n\n>>> from ww import d\n>>> list(d({1: 1, 2: 2, 3: 3}).isubset(1, 3))\n[(1, 1), (3, 3)]", "source": "juraj-google-style"}
{"code": "def init_cache(self, batch_size, max_length, encoder_outputs):\n    decoder_input_ids = jnp.ones((batch_size, max_length), dtype='i4')\n    decoder_attention_mask = jnp.ones_like(decoder_input_ids)\n    decoder_position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape)\n\n    def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):\n        decoder_module = module._get_decoder_module()\n        return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)\n    init_variables = self.module.init(jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward)\n    return unfreeze(init_variables['cache'])", "docstring": "Args:\nbatch_size (`int`):\nbatch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.\nmax_length (`int`):\nmaximum possible length for auto-regressive decoding. Defines the sequence length of the initialized\ncache.\nencoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):\n`encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:\n`attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)\nis a sequence of hidden-states at the output of the last layer of the encoder. Used in the\ncross-attention of the decoder.", "source": "github-repos"}
{"code": "def beam_sql(self, line: str, cell: Optional[str]=None) -> Optional[PValue]:\n    input_str = line\n    if cell:\n        input_str += ' ' + cell\n    parsed = self._parser.parse(input_str.strip().split())\n    if not parsed:\n        return\n    output_name = parsed.output_name\n    verbose = parsed.verbose\n    query = parsed.query\n    runner = parsed.runner\n    if output_name and (not output_name.isidentifier()) or keyword.iskeyword(output_name):\n        on_error('The output_name \"%s\" is not a valid identifier. Please supply a valid identifier that is not a Python keyword.', line)\n        return\n    if not query:\n        on_error('Please supply the SQL query to be executed.')\n        return\n    if runner and runner not in _SUPPORTED_RUNNERS:\n        on_error('Runner \"%s\" is not supported. Supported runners are %s.', runner, _SUPPORTED_RUNNERS)\n        return\n    query = ' '.join(query)\n    found = find_pcolls(query, pcoll_by_name(), verbose=verbose)\n    schemas = set()\n    main_session = importlib.import_module('__main__')\n    for _, pcoll in found.items():\n        if not match_is_named_tuple(pcoll.element_type):\n            on_error('PCollection %s of type %s is not a NamedTuple. See https:\n            return\n        register_coder_for_schema(pcoll.element_type, verbose=verbose)\n        if hasattr(main_session, pcoll.element_type.__name__):\n            schemas.add(pcoll.element_type)\n    if runner in ('DirectRunner', None):\n        collect_data_for_local_run(query, found)\n        output_name, output, chain = apply_sql(query, output_name, found)\n        chain.current.schemas = schemas\n        cache_output(output_name, output)\n        return output\n    output_name, current_node, chain = apply_sql(query, output_name, found, False)\n    current_node.schemas = schemas\n    if runner == 'DataflowRunner':\n        _ = chain.to_pipeline()\n        _ = DataflowOptionsForm(output_name, pcoll_by_name()[output_name], verbose).display_for_input()\n        return None\n    else:\n        raise ValueError('Unsupported runner %s.', runner)", "docstring": "The beam_sql line/cell magic that executes a Beam SQL.\n\nArgs:\nline: the string on the same line after the beam_sql magic.\ncell: everything else in the same notebook cell as a string. If None,\nbeam_sql is used as line magic. Otherwise, cell magic.\n\nReturns None if running into an error or waiting for user input (running on\na selected runner remotely), otherwise a PValue as if a SqlTransform is\napplied.", "source": "github-repos"}
{"code": "def get_experiment_from_key(self, experiment_key):\n    experiment = self.experiment_key_map.get(experiment_key)\n    if experiment:\n        return experiment\n    self.logger.error(('Experiment key \"%s\" is not in datafile.' % experiment_key))\n    self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR))\n    return None", "docstring": "Get experiment for the provided experiment key.\n\nArgs:\nexperiment_key: Experiment key for which experiment is to be determined.\n\nReturns:\nExperiment corresponding to the provided experiment key.", "source": "codesearchnet"}
{"code": "def get_type_info(obj):\n    \n    if isinstance(obj, primitive_types):\n        return ('primitive', type(obj).__name__)\n    if isinstance(obj, sequence_types):\n        return ('sequence', type(obj).__name__)\n    if isinstance(obj, array_types):\n        return ('array', type(obj).__name__)\n    if isinstance(obj, key_value_types):\n        return ('key-value', type(obj).__name__)\n    if isinstance(obj, types.ModuleType):\n        return ('module', type(obj).__name__)\n    if isinstance(obj, (types.FunctionType, types.MethodType)):\n        return ('function', type(obj).__name__)\n    if isinstance(obj, type):\n        if hasattr(obj, '__dict__'):\n            return ('class', obj.__name__)\n    if isinstance(type(obj), type):\n        if hasattr(obj, '__dict__'):\n            cls_name = type(obj).__name__\n            if cls_name == 'classobj':\n                cls_name = obj.__name__\n                return ('class', '{}'.format(cls_name))\n            if cls_name == 'instance':\n                cls_name = obj.__class__.__name__\n            return ('instance', '{} instance'.format(cls_name))\n\n    return ('unknown', type(obj).__name__)", "docstring": "Get type information for a Python object\n\nArgs:\nobj: The Python object\n\nReturns:\ntuple: (object type \"catagory\", object type name)", "source": "juraj-google-style"}
{"code": "async def freeze(self, *args, **kwargs):\n    uid = kwargs.get('uid', 0)\n    coinid = kwargs.get('coinid')\n    amount = kwargs.get('amount')\n    address = kwargs.get('address')\n    try:\n        coinid = coinid.replace('TEST', '')\n    except:\n        pass\n    try:\n        uid = int(uid)\n    except:\n        return (await self.error_400('User id must be integer. '))\n    try:\n        amount = int(amount)\n    except:\n        return (await self.error_400('Amount must be integer. '))\n    try:\n        assert (amount > 0)\n    except:\n        return (await self.error_400('Amount must be positive integer. '))\n    if ((not uid) and address):\n        uid = (await self.get_uid_by_address(address=address, coinid=coinid))\n        if isinstance(uid, dict):\n            return uid\n    database = self.client[self.collection]\n    collection = database[coinid]\n    balance = (await collection.find_one({'uid': uid}))\n    if (not balance):\n        return (await self.error_404(('Freeze. Balance with uid:%s and type:%s not found.' % (uid, coinid))))\n    difference = (int(balance['amount_active']) - int(amount))\n    if (difference < 0):\n        return (await self.error_403('Freeze. Insufficient amount in the account'))\n    amount_frozen = (int(balance['amount_frozen']) + int(amount))\n    (await collection.find_one_and_update({'uid': uid}, {'$set': {'amount_active': str(difference), 'amount_frozen': str(amount_frozen)}}))\n    result = (await collection.find_one({'uid': uid}))\n    result['amount_frozen'] = int(result['amount_frozen'])\n    result['amount_active'] = int(result['amount_active'])\n    del result['_id']\n    return result", "docstring": "Freeze users balance\n\nAccepts:\n- uid [integer] (users id from main server)\n- coinid [string] (blockchain type in uppercase)\n- amount [integer] (amount for freezing)\n\nReturns:\n- uid [integer] (users id from main server)\n- coinid [string] (blockchain type in uppercase)\n- amount_active [integer] (activae users amount)\n- amount_frozen [integer] (frozen users amount)", "source": "codesearchnet"}
{"code": "def delete_panel(self, panel_obj):\n    res = self.panel_collection.delete_one({'_id': panel_obj['_id']})\n    LOG.warning(('Deleting panel %s, version %s' % (panel_obj['panel_name'], panel_obj['version'])))\n    return res", "docstring": "Delete a panel by '_id'.\n\nArgs:\npanel_obj(dict)\n\nReturns:\nres(pymongo.DeleteResult)", "source": "codesearchnet"}
{"code": "def unique_timestamps(self: EventSetOrNode) -> EventSetOrNode:\n    from temporian.core.operators.unique_timestamps import unique_timestamps\n    return unique_timestamps(self)", "docstring": "Removes events with duplicated timestamps from an\n[`EventSet`][temporian.EventSet].\n\nReturns a feature-less EventSet where each timestamp from the original\none only appears once. If the input is indexed, the unique operation is\napplied independently for each index.\n\nUsage example:\n```python\n>>> a = tp.event_set(timestamps=[5, 9, 9, 16], features={'f': [1,2,3,4]})\n>>> b = a.unique_timestamps()\n>>> b\nindexes: []\nfeatures: []\nevents:\n(3 events):\ntimestamps: [ 5. 9. 16.]\n...\n\n```\n\nReturns:\nEventSet without features with unique timestamps in the input.", "source": "github-repos"}
{"code": "def group_by_mimetype(content: ProcessorContent) -> dict[str, ProcessorContent]:\n    grouped_content = {}\n    for mimetype, part in content.items():\n        if mimetype not in grouped_content:\n            grouped_content[mimetype] = ProcessorContent()\n        grouped_content[mimetype] += part\n    return grouped_content", "docstring": "Groups content by mimetype.\n\nThe order of parts within each mimetype grouping is preserved, maintaining the\nsame order as they appeared in the original input `content`.\n\nArgs:\ncontent: The content to group.\n\nReturns:\nA dictionary mapping mimetypes to ProcessorContent objects, with the same\norder as in the original input `content`.", "source": "github-repos"}
{"code": "def compare_versions(ver1='', oper='==', ver2=''):\n    \n    if not ver1:\n        raise SaltInvocationError('compare_version, ver1 is blank')\n    if not ver2:\n        raise SaltInvocationError('compare_version, ver2 is blank')\n\n    \n    if ver1 == 'latest':\n        ver1 = six.text_type(sys.maxsize)\n    if ver2 == 'latest':\n        ver2 = six.text_type(sys.maxsize)\n    \n    if ver1 == 'Not Found':\n        ver1 = '0.0.0.0.0'\n    if ver2 == 'Not Found':\n        ver2 = '0.0.0.0.0'\n\n    return salt.utils.versions.compare(ver1, oper, ver2, ignore_epoch=True)", "docstring": "Compare software package versions\n\nArgs:\nver1 (str): A software version to compare\noper (str): The operand to use to compare\nver2 (str): A software version to compare\n\nReturns:\nbool: True if the comparison is valid, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' pkg.compare_versions 1.2 >= 1.3", "source": "juraj-google-style"}
{"code": "def colored(cls, color, message):\n        \n        return getattr(cls, color.upper()) + message + cls.DEFAULT", "docstring": "Small function to wrap a string around a color\n\nArgs:\ncolor (str): name of the color to wrap the string with, must be one\nof the class properties\nmessage (str): String to wrap with the color\n\nReturns:\nstr: the colored string", "source": "juraj-google-style"}
{"code": "def commit_author(sha1=''):\n    with conf.within_proj_dir():\n        cmd = 'git show -s --format=\"%an||%ae\" {}'.format(sha1)\n        result = shell.run(cmd, capture=True, never_pretend=True).stdout\n        (name, email) = result.split('||')\n        return Author(name, email)", "docstring": "Return the author of the given commit.\n\nArgs:\nsha1 (str):\nThe sha1 of the commit to query. If not given, it will return the\nsha1 for the current commit.\nReturns:\nAuthor: A named tuple ``(name, email)`` with the commit author details.", "source": "codesearchnet"}
{"code": "def remove_config(self, id):\n        \n        url = self._url('/configs/{0}', id)\n        res = self._delete(url)\n        self._raise_for_status(res)\n        return True", "docstring": "Remove a config\n\nArgs:\nid (string): Full ID of the config to remove\n\nReturns (boolean): True if successful\n\nRaises:\n:py:class:`docker.errors.NotFound`\nif no config with that ID exists", "source": "juraj-google-style"}
{"code": "def _transpose_batch_time(x):\n    x_static_shape = x.get_shape()\n    if x_static_shape.rank is not None and x_static_shape.rank < 2:\n        return x\n    x_rank = array_ops.rank(x)\n    x_t = array_ops.transpose(x, array_ops.concat(([1, 0], math_ops.range(2, x_rank)), axis=0))\n    x_t.set_shape(tensor_shape.TensorShape([x_static_shape.dims[1].value, x_static_shape.dims[0].value]).concatenate(x_static_shape[2:]))\n    return x_t", "docstring": "Transposes the batch and time dimensions of a Tensor.\n\nIf the input tensor has rank < 2 it returns the original tensor. Retains as\nmuch of the static shape information as possible.\n\nArgs:\nx: A Tensor.\n\nReturns:\nx transposed along the first two dimensions.", "source": "github-repos"}
{"code": "def _forward_over_back_hessian(f, params, use_pfor, dtype=None):\n    return _vectorize_parameters(functools.partial(_hvp, f, params), params, use_pfor=use_pfor, dtype=dtype)", "docstring": "Computes the full Hessian matrix for the scalar-valued f(*params).\n\nArgs:\nf: A function taking `params` and returning a scalar.\nparams: A possibly nested structure of tensors.\nuse_pfor: If true, uses `tf.vectorized_map` calls instead of looping.\ndtype: Required if `use_pfor=False`. A possibly nested structure of dtypes\n(e.g. `tf.float32`) matching the structure of `f`'s returns.\n\nReturns:\nA possibly nested structure of matrix slices corresponding to `params`. Each\nslice has shape [P, p_s] where `p_s` is the number of parameters (`tf.size`)\nin the corresponding element of `params` and `P` is the total number of\nparameters (`sum_s(p_s)`). The full matrix can be obtained by concatenating\nalong the second axis.", "source": "github-repos"}
{"code": "def get(self, *, search, limit=0, headers=None):\n        \n        return self.transport.forward_request(\n            method='GET',\n            path=self.path,\n            params={'search': search, 'limit': limit},\n            headers=headers\n        )", "docstring": "Retrieves the assets that match a given text search string.\n\nArgs:\nsearch (str): Text search string.\nlimit (int): Limit the number of returned documents. Defaults to\nzero meaning that it returns all the matching assets.\nheaders (dict): Optional headers to pass to the request.\n\nReturns:\n:obj:`list` of :obj:`dict`: List of assets that match the query.", "source": "juraj-google-style"}
{"code": "def delete_asset(self, asset_id, asset_type):\n        \n        return self.asset(asset_id, asset_type=asset_type, action='DELETE')", "docstring": "Delete the asset with the provided asset_id.\n\nArgs:\nasset_id: The id of the asset.\nasset_type: The asset type.\n\nReturns:", "source": "juraj-google-style"}
{"code": "def __new__(cls, name, parents, dct):\n        \n        newClass = super(CommandMeta, cls).__new__(cls, name, parents, dct)\n\n        if name != 'Command':\n            for attribute in ['name', 'description', 'help']:\n                if attribute not in dct or dct[attribute] is None:\n                    raise ValueError('%s cannot be None.' % attribute)\n            CommandMeta.registry[name] = newClass\n\n        return newClass", "docstring": "Creates a new Command class and validates it.\n\nArgs:\ncls (Class): the class object being created\nname (name): the name of the class being created\nparents (list): list of parent classes\ndct (dictionary): class attributes\n\nReturns:\n``Class``", "source": "juraj-google-style"}
{"code": "def wrap_in_placeholder(self, arg, shape_info):\n    if shape_info == 'known':\n        return arg\n    if isinstance(arg, ragged_tensor.RaggedTensor):\n        return arg.with_flat_values(self.wrap_in_placeholder(arg.flat_values, shape_info))\n    if isinstance(arg, tensor_shape.TensorShape):\n        if arg.ndims is None:\n            return arg\n        arg = constant_op.constant(arg.as_list())\n    if shape_info == 'unknown_rank':\n        return array_ops.placeholder_with_default(arg, None)\n    if shape_info == 'unknown_dims':\n        return array_ops.placeholder_with_default(arg, [None] * arg.shape.rank)\n    raise AssertionError('Unexpected shape_info %r' % shape_info)", "docstring": "Wraps `arg` in a placeholder to limit static shape info.\n\nArgs:\narg: The value to wrap.  A Tensor, RaggedTensor, or TensorShape.\nshape_info: One of ['known', 'unknown_dims', 'unknown_rank'].\n\nReturns:\n* If shape_info is 'known': returns `arg`.\n* If shape_info is 'unknown_dims': returns a placeholder wrapping `arg`\nwhere the dimension sizes are unknown.  If `arg` is a TensorShape,\nthen convert it to a vector first.  If `arg` is a RaggedTensor, then\nwrap the flat_values.\n* If shape_info is 'unknown_rank': returns a placeholder wrapping `arg`\nwhere the rank is unknown.  If `arg` is a TensorShape, then convert it\nto a vector first.  If `arg` is a RaggedTensor, then wrap the\nflat_values.", "source": "github-repos"}
{"code": "def tensor_summary(name, tensor, summary_description=None, collections=None, summary_metadata=None, family=None, display_name=None):\n    if summary_metadata is None:\n        summary_metadata = _SummaryMetadata()\n    if summary_description is not None:\n        summary_metadata.summary_description = summary_description\n    if display_name is not None:\n        summary_metadata.display_name = display_name\n    serialized_summary_metadata = summary_metadata.SerializeToString()\n    if _distribute_summary_op_util.skip_summary():\n        return _constant_op.constant('')\n    with _summary_op_util.summary_scope(name, family, values=[tensor]) as (tag, scope):\n        val = _gen_logging_ops.tensor_summary_v2(tensor=tensor, tag=tag, name=scope, serialized_summary_metadata=serialized_summary_metadata)\n        _summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])\n    return val", "docstring": "Outputs a `Summary` protocol buffer with a serialized tensor.proto.\n\nArgs:\nname: A name for the generated node. If display_name is not set, it will\nalso serve as the tag name in TensorBoard. (In that case, the tag\nname will inherit tf name scopes.)\ntensor: A tensor of any type and shape to serialize.\nsummary_description: A long description of the summary sequence. Markdown\nis supported.\ncollections: Optional list of graph collections keys. The new summary op is\nadded to these collections. Defaults to `[GraphKeys.SUMMARIES]`.\nsummary_metadata: Optional SummaryMetadata proto (which describes which\nplugins may use the summary value).\nfamily: Optional; if provided, used as the prefix of the summary tag,\nwhich controls the name used for display on TensorBoard when\ndisplay_name is not set.\ndisplay_name: A string used to name this data in TensorBoard. If this is\nnot set, then the node name will be used instead.\n\nReturns:\nA scalar `Tensor` of type `string`. The serialized `Summary` protocol\nbuffer.", "source": "github-repos"}
{"code": "def _update_graph(self, vertex_dict=None, edge_dict=None):\n\n    def set_attrs(ref, attrs):\n        for attr_name, attr_val in attrs.items():\n            ref.set(attr_name, attr_val)\n    with self._lock:\n        if vertex_dict:\n            for vertex, vertex_attrs in vertex_dict.items():\n                set_attrs(self._vertex_refs[vertex], vertex_attrs)\n        if edge_dict:\n            for edge, edge_attrs in edge_dict.items():\n                if isinstance(edge, tuple):\n                    set_attrs(self._edge_refs[edge], edge_attrs)\n                else:\n                    for vertex_pair in self._edge_to_vertex_pairs[edge]:\n                        set_attrs(self._edge_refs[vertex_pair], edge_attrs)", "docstring": "Updates the pydot.Dot object with the given attribute update\n\nArgs:\nvertex_dict: (Dict[str, Dict[str, str]]) maps vertex names to attributes\nedge_dict: This should be\nEither (Dict[str, Dict[str, str]]) which maps edge names to attributes\nOr (Dict[(str, str), Dict[str, str]]) which maps vertex pairs to edge\nattributes", "source": "github-repos"}
{"code": "def invite_by_email(self, email, user, organization, **kwargs):\n        \n        \n        try:\n            invitee = self.user_model.objects.get(email__iexact=email)\n        except self.user_model.DoesNotExist:\n            invitee = None\n\n        \n        user_invitation = self.invitation_model.objects.create(\n            invitee=invitee,\n            invitee_identifier=email.lower(),\n            invited_by=user,\n            organization=organization,\n        )\n        self.send_invitation(user_invitation)\n        return user_invitation", "docstring": "Primary interface method by which one user invites another to join\n\nArgs:\nemail:\nrequest:\n**kwargs:\n\nReturns:\nan invitation instance\n\nRaises:\nMultipleObjectsReturned if multiple matching users are found", "source": "juraj-google-style"}
{"code": "def add_presence_listener(self, callback):\n    listener_uid = uuid4()\n    self.presence_listeners[listener_uid] = callback\n    return listener_uid", "docstring": "Add a presence listener that will send a callback when the client receives\na presence update.\n\nArgs:\ncallback (func(roomchunk)): Callback called when a presence update arrives.\n\nReturns:\nuuid.UUID: Unique id of the listener, can be used to identify the listener.", "source": "codesearchnet"}
{"code": "def position(msg0, msg1, t0, t1, lat_ref=None, lon_ref=None):\n    \n    tc0 = typecode(msg0)\n    tc1 = typecode(msg1)\n\n    if (5<=tc0<=8 and 5<=tc1<=8):\n        if (not lat_ref) or (not lon_ref):\n            raise RuntimeError(\"Surface position encountered, a reference \\\n                               position lat/lon required. Location of \\\n                               receiver can be used.\")\n        else:\n            return surface_position(msg0, msg1, t0, t1, lat_ref, lon_ref)\n\n    elif (9<=tc0<=18 and 9<=tc1<=18):\n        \n        return airborne_position(msg0, msg1, t0, t1)\n\n    elif (20<=tc0<=22 and 20<=tc1<=22):\n        \n        return airborne_position(msg0, msg1, t0, t1)\n\n    else:\n        raise RuntimeError(\"incorrect or inconsistant message types\")", "docstring": "Decode position from a pair of even and odd position message\n(works with both airborne and surface position messages)\n\nArgs:\nmsg0 (string): even message (28 bytes hexadecimal string)\nmsg1 (string): odd message (28 bytes hexadecimal string)\nt0 (int): timestamps for the even message\nt1 (int): timestamps for the odd message\n\nReturns:\n(float, float): (latitude, longitude) of the aircraft", "source": "juraj-google-style"}
{"code": "def hex_to_name(hexx):\n    for (n, h) in defaults.COLOURS.items():\n        if ((len(n) > 1) and (h == hexx.upper())):\n            return n.lower()\n    return None", "docstring": "Convert hex to a color name, using matplotlib's colour names.\n\nArgs:\nhexx (str): A hexadecimal colour, starting with '#'.\n\nReturns:\nstr: The name of the colour, or None if not found.", "source": "codesearchnet"}
{"code": "def save_summaries(frames, keys, selected_summaries, batch_dir, batch_name):\n    \n    if not frames:\n        logger.info(\"Could save summaries - no summaries to save!\")\n        logger.info(\"You have no frames - aborting\")\n        return None\n    if not keys:\n        logger.info(\"Could save summaries - no summaries to save!\")\n        logger.info(\"You have no keys - aborting\")\n        return None\n\n    selected_summaries_dict = create_selected_summaries_dict(selected_summaries)\n    summary_df = pd.concat(frames, keys=keys, axis=1)\n    \n    for key, value in selected_summaries_dict.items():\n        _summary_file_name = os.path.join(batch_dir, \"summary_%s_%s.csv\" % (\n        key, batch_name))\n        _summary_df = summary_df.iloc[:,\n                      summary_df.columns.get_level_values(1) == value]\n        \n        _header = _summary_df.columns\n        _summary_df.to_csv(_summary_file_name, sep=\";\")\n        logger.info(\n            \"saved summary (%s) to:\\n       %s\" % (key, _summary_file_name))\n    logger.info(\"finished saving summaries\")\n    return summary_df", "docstring": "Writes the summaries to csv-files\n\nArgs:\nframes: list of ``cellpy`` summary DataFrames\nkeys: list of indexes (typically run-names) for the different runs\nselected_summaries: list defining which summary data to save\nbatch_dir: directory to save to\nbatch_name: the batch name (will be used for making the file-name(s))\n\nReturns: a pandas DataFrame with your selected summaries.", "source": "juraj-google-style"}
{"code": "def check_num_tasks(chain, task_count):\n    errors = []\n    min_decision_tasks = 1\n    if (task_count['decision'] < min_decision_tasks):\n        errors.append('{} decision tasks; we must have at least {}!'.format(task_count['decision'], min_decision_tasks))\n    raise_on_errors(errors)", "docstring": "Make sure there are a specific number of specific task types.\n\nCurrently we only check decision tasks.\n\nArgs:\nchain (ChainOfTrust): the chain we're operating on\ntask_count (dict): mapping task type to the number of links.\n\nRaises:\nCoTError: on failure.", "source": "codesearchnet"}
{"code": "def load_institute(adapter, internal_id, display_name, sanger_recipients=None):\n    \n\n    institute_obj = build_institute(\n        internal_id=internal_id,\n        display_name=display_name,\n        sanger_recipients=sanger_recipients\n    )\n    log.info(\"Loading institute {0} with display name {1}\" \\\n             \" into database\".format(internal_id, display_name))\n\n    adapter.add_institute(institute_obj)", "docstring": "Load a institute into the database\n\nArgs:\nadapter(MongoAdapter)\ninternal_id(str)\ndisplay_name(str)\nsanger_recipients(list(email))", "source": "juraj-google-style"}
{"code": "def _Identity(tensor, name=None):\n    tensor = ops.internal_convert_to_tensor_or_composite(tensor, as_ref=True)\n    tensor = variable_utils.convert_variables_to_tensors(tensor)\n    if isinstance(tensor, tensor_lib.Tensor):\n        if tensor.dtype._is_ref_dtype:\n            return gen_array_ops.ref_identity(tensor, name=name)\n        else:\n            return array_ops.identity(tensor, name=name)\n    elif isinstance(tensor, composite_tensor.CompositeTensor):\n        return nest.map_structure(_Identity, tensor, expand_composites=True)\n    else:\n        raise TypeError(f\"'tensor' must be a Tensor or CompositeTensor. Received: {type(tensor)}.\")", "docstring": "Return a tensor with the same shape and contents as the input tensor.\n\nArgs:\ntensor: A Tensor.\nname: A name for this operation (optional).\n\nReturns:\nA Tensor with the same type and value as the input Tensor.", "source": "github-repos"}
{"code": "def __init__(self, input_reader=None, output_writer=None):\n    \n    super(Log2TimelineTool, self).__init__(\n        input_reader=input_reader, output_writer=output_writer)\n    self._command_line_arguments = None\n    self._enable_sigsegv_handler = False\n    self._number_of_extraction_workers = 0\n    self._storage_serializer_format = definitions.SERIALIZER_FORMAT_JSON\n    self._source_type = None\n    self._status_view = status_view.StatusView(self._output_writer, self.NAME)\n    self._status_view_mode = status_view.StatusView.MODE_WINDOW\n    self._stdout_output_writer = isinstance(\n        self._output_writer, tools.StdoutOutputWriter)\n    self._worker_memory_limit = None\n\n    self.dependencies_check = True\n    self.list_hashers = False\n    self.list_parsers_and_plugins = False\n    self.list_profilers = False\n    self.show_info = False\n    self.show_troubleshooting = False", "docstring": "Initializes a log2timeline CLI tool.\n\nArgs:\ninput_reader (Optional[InputReader]): input reader, where None indicates\nthat the stdin input reader should be used.\noutput_writer (Optional[OutputWriter]): output writer, where None\nindicates that the stdout output writer should be used.", "source": "juraj-google-style"}
{"code": "def save_attributes_to_hdf5_group(group, name, data):\n    bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT]\n    if bad_attributes:\n        raise RuntimeError('The following attributes cannot be saved to HDF5 file because they are larger than %d bytes: %s' % (HDF5_OBJECT_HEADER_LIMIT, ', '.join(bad_attributes)))\n    data_npy = np.asarray(data)\n    num_chunks = 1\n    chunked_data = np.array_split(data_npy, num_chunks)\n    while any((x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data)):\n        num_chunks += 1\n        chunked_data = np.array_split(data_npy, num_chunks)\n    if num_chunks > 1:\n        for chunk_id, chunk_data in enumerate(chunked_data):\n            group.attrs['%s%d' % (name, chunk_id)] = chunk_data\n    else:\n        group.attrs[name] = data", "docstring": "Saves attributes (data) of the specified name into the HDF5 group.\n\nThis method deals with an inherent problem of HDF5 file which is not\nable to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes.\n\nArgs:\ngroup: A pointer to a HDF5 group.\nname: A name of the attributes to save.\ndata: Attributes data to store.\n\nRaises:\nRuntimeError: If any single attribute is too large to be saved.", "source": "github-repos"}
{"code": "def create_from_binary(cls, load_dataruns, binary_view):\n    (attr_type, attr_len, non_resident, name_len, name_offset, flags, attr_id, start_vcn, end_vcn, rl_offset, compress_usize, alloc_sstream, curr_sstream, init_sstream) = cls._REPR.unpack(binary_view[:cls._REPR.size])\n    if name_len:\n        name = binary_view[name_offset:(name_offset + (2 * name_len))].tobytes().decode('utf_16_le')\n    else:\n        name = None\n    nw_obj = cls((AttrTypes(attr_type), attr_len, bool(non_resident), AttrFlags(flags), attr_id, name), (start_vcn, end_vcn, rl_offset, compress_usize, alloc_sstream, curr_sstream, init_sstream))\n    if load_dataruns:\n        nw_obj.data_runs = DataRuns.create_from_binary(binary_view[nw_obj.rl_offset:])\n    _MOD_LOGGER.debug('NonResidentAttrHeader object created successfully')\n    return nw_obj", "docstring": "Creates a new object NonResidentAttrHeader from a binary stream. The binary\nstream can be represented by a byte string, bytearray or a memoryview of the\nbytearray.\n\nArgs:\nload_dataruns (bool) - Indicates if the dataruns are to be loaded\nbinary_view (memoryview of bytearray) - A binary stream with the\ninformation of the attribute\nnon_resident_offset (int) - The offset where the non resident header\nbegins\n\nReturns:\nNonResidentAttrHeader: New object using hte binary stream as source", "source": "codesearchnet"}
{"code": "def gen_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes):\n    batch_size = enc_output.shape[0]\n    proposals = []\n    _cur = 0\n    level_ids = []\n    for level, (height, width) in enumerate(spatial_shapes):\n        mask_flatten_ = padding_mask[:, _cur:_cur + height * width].view(batch_size, height, width, 1)\n        valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1)\n        valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1)\n        grid_y, grid_x = meshgrid(torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device), torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device), indexing='ij')\n        grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)\n        scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2)\n        grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale\n        width_height = torch.ones_like(grid) * 0.05 * 2.0 ** level\n        proposal = torch.cat((grid, width_height), -1).view(batch_size, -1, 4)\n        proposals.append(proposal)\n        _cur += height * width\n        level_ids.append(grid.new_ones(height * width, dtype=torch.long) * level)\n    output_proposals = torch.cat(proposals, 1)\n    output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)\n    output_proposals = torch.log(output_proposals / (1 - output_proposals))\n    output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float('inf'))\n    output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))\n    object_query = enc_output\n    object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0))\n    object_query = object_query.masked_fill(~output_proposals_valid, float(0))\n    object_query = self.enc_output_norm(self.enc_output(object_query))\n    level_ids = torch.cat(level_ids)\n    return (object_query, output_proposals, level_ids)", "docstring": "Generate the encoder output proposals from encoded enc_output.\n\nArgs:\nenc_output (Tensor[batch_size, sequence_length, hidden_size]): Output of the encoder.\npadding_mask (Tensor[batch_size, sequence_length]): Padding mask for `enc_output`.\nspatial_shapes (Tensor[num_feature_levels, 2]): Spatial shapes of the feature maps.\n\nReturns:\n`tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction.\n- object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to\ndirectly predict a bounding box. (without the need of a decoder)\n- output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse\nsigmoid.", "source": "github-repos"}
{"code": "def login_with_password(self, username, password, limit=10):\n    warn('login_with_password is deprecated. Use login with sync=True.', DeprecationWarning)\n    return self.login(username, password, limit, sync=True)", "docstring": "Deprecated. Use ``login`` with ``sync=True``.\n\nLogin to the homeserver.\n\nArgs:\nusername (str): Account username\npassword (str): Account password\nlimit (int): Deprecated. How many messages to return when syncing.\nThis will be replaced by a filter API in a later release.\n\nReturns:\nstr: Access token\n\nRaises:\nMatrixRequestError", "source": "codesearchnet"}
{"code": "def get(self, record_id):\n        \n        record_url = self.record_url(record_id)\n        return self._get(record_url)", "docstring": "Retrieves a record by its id\n\n>>> record = airtable.get('recwPQIfs4wKPyc9D')\n\nArgs:\nrecord_id(``str``): Airtable record id\n\nReturns:\nrecord (``dict``): Record", "source": "juraj-google-style"}
{"code": "def parse_config(self, config):\n    prefix = self.argument_prefix\n    self.sources = config.get_sources(prefix)\n    self.smart_sources = [self._get_smart_filename(s) for s in self.sources]\n    self.index = config.get_index(prefix)\n    self.source_roots = OrderedSet(config.get_paths(('%s_source_roots' % prefix)))\n    for (arg, dest) in list(self.paths_arguments.items()):\n        val = config.get_paths(arg)\n        setattr(self, dest, val)\n    for (arg, dest) in list(self.path_arguments.items()):\n        val = config.get_path(arg)\n        setattr(self, dest, val)\n    self.formatter.parse_config(config)", "docstring": "Override this, making sure to chain up first, if your extension adds\nits own custom command line arguments, or you want to do any further\nprocessing on the automatically added arguments.\n\nThe default implementation will set attributes on the extension:\n- 'sources': a set of absolute paths to source files for this extension\n- 'index': absolute path to the index for this extension\n\nAdditionally, it will set an attribute for each argument added with\n`Extension.add_path_argument` or `Extension.add_paths_argument`, with\nthe extension's `Extension.argument_prefix` stripped, and dashes\nchanged to underscores.\n\nArgs:\nconfig: a `config.Config` instance", "source": "codesearchnet"}
{"code": "def calculate_uncertainty(self, logits: torch.Tensor) -> torch.Tensor:\n    uncertainty_scores = -torch.abs(logits)\n    return uncertainty_scores", "docstring": "In Mask2Former paper, uncertainty is estimated as L1 distance between 0.0 and the logit prediction in 'logits'\nfor the foreground class in `classes`.\n\nArgs:\nlogits (`torch.Tensor`):\nA tensor of shape (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is:\nthe number of foreground classes. The values are logits.\n\nReturns:\nscores (`torch.Tensor`): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most\nuncertain locations having the highest uncertainty score.", "source": "github-repos"}
{"code": "def __similarity(s1, s2, ngrams_fn, n=3):\n    (ngrams1, ngrams2) = (set(ngrams_fn(s1, n=n)), set(ngrams_fn(s2, n=n)))\n    matches = ngrams1.intersection(ngrams2)\n    return ((2 * len(matches)) / (len(ngrams1) + len(ngrams2)))", "docstring": "The fraction of n-grams matching between two sequences\n\nArgs:\ns1: a string\ns2: another string\nn: an int for the n in n-gram\n\nReturns:\nfloat: the fraction of n-grams matching", "source": "codesearchnet"}
{"code": "def find_all_template(im_source, im_search, threshold=0.5, maxcnt=0, rgb=False, bgremove=False):\n    \n    \n    \n    method = cv2.TM_CCOEFF_NORMED\n\n    if rgb:\n        s_bgr = cv2.split(im_search) \n        i_bgr = cv2.split(im_source)\n        weight = (0.3, 0.3, 0.4)\n        resbgr = [0, 0, 0]\n        for i in range(3): \n            resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], method)\n        res = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]\n    else:\n        s_gray = cv2.cvtColor(im_search, cv2.COLOR_BGR2GRAY)\n        i_gray = cv2.cvtColor(im_source, cv2.COLOR_BGR2GRAY)\n        \n        if bgremove:\n            s_gray = cv2.Canny(s_gray, 100, 200)\n            i_gray = cv2.Canny(i_gray, 100, 200)\n\n        res = cv2.matchTemplate(i_gray, s_gray, method)\n    w, h = im_search.shape[1], im_search.shape[0]\n\n    result = []\n    while True:\n        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n        if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\n            top_left = min_loc\n        else:\n            top_left = max_loc\n        if DEBUG: \n            print('templmatch_value(thresh:%.1f) = %.3f' %(threshold, max_val)) \n        if max_val < threshold:\n            break\n        \n        middle_point = (top_left[0]+w/2, top_left[1]+h/2)\n        result.append(dict(\n            result=middle_point,\n            rectangle=(top_left, (top_left[0], top_left[1] + h), (top_left[0] + w, top_left[1]), (top_left[0] + w, top_left[1] + h)),\n            confidence=max_val\n        ))\n        if maxcnt and len(result) >= maxcnt:\n            break\n        \n        cv2.floodFill(res, None, max_loc, (-1000,), max_val-threshold+0.1, 1, flags=cv2.FLOODFILL_FIXED_RANGE)\n    return result", "docstring": "Locate image position with cv2.templateFind\n\nUse pixel match to find pictures.\n\nArgs:\nim_source(string): 图像、素材\nim_search(string): 需要查找的图片\nthreshold: 阈值，当相识度小于该阈值的时候，就忽略掉\n\nReturns:\nA tuple of found [(point, score), ...]\n\nRaises:\nIOError: when file read error", "source": "juraj-google-style"}
{"code": "def get_membership(self, uuid=None):\n    group_id = self.get_group_id(uuid=uuid)\n    uri = 'group/{group_id}/member'\n    mbr_data = self.get(uri.format(group_id=group_id), params=None)\n    return mbr_data", "docstring": "Get membership data based on uuid.\n\nArgs:\nuuid (str): optional uuid. defaults to self.cuuid\n\nRaises:\nPyLmodUnexpectedData: No data was returned.\nrequests.RequestException: Exception connection error\n\nReturns:\ndict: membership json", "source": "codesearchnet"}
{"code": "def mktemp(self, container: Container) -> str:\n    logger.debug('creating a temporary file inside container %s', container.uid)\n    response = self.command(container, 'mktemp')\n    if (response.code != 0):\n        msg = 'failed to create temporary file for container {}: [{}] {}'\n        msg = msg.format(uid, response.code, response.output)\n        logger.error(msg)\n        raise Exception(msg)\n    assert (response.code == 0), 'failed to create temporary file'\n    fn = response.output.strip()\n    logger.debug('created temporary file inside container %s: %s', container.uid, fn)\n    return fn", "docstring": "Creates a named temporary file within a given container.\n\nReturns:\nthe absolute path to the created temporary file.", "source": "codesearchnet"}
{"code": "def masks_to_boxes(masks: np.ndarray) -> np.ndarray:\n    if masks.size == 0:\n        return np.zeros((0, 4))\n    h, w = masks.shape[-2:]\n    y = np.arange(0, h, dtype=np.float32)\n    x = np.arange(0, w, dtype=np.float32)\n    y, x = np.meshgrid(y, x, indexing='ij')\n    x_mask = masks * np.expand_dims(x, axis=0)\n    x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)\n    x = np.ma.array(x_mask, mask=~np.array(masks, dtype=bool))\n    x_min = x.filled(fill_value=100000000.0)\n    x_min = x_min.reshape(x_min.shape[0], -1).min(-1)\n    y_mask = masks * np.expand_dims(y, axis=0)\n    y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)\n    y = np.ma.array(y_mask, mask=~np.array(masks, dtype=bool))\n    y_min = y.filled(fill_value=100000000.0)\n    y_min = y_min.reshape(y_min.shape[0], -1).min(-1)\n    return np.stack([x_min, y_min, x_max, y_max], 1)", "docstring": "Compute the bounding boxes around the provided panoptic segmentation masks.\n\nArgs:\nmasks: masks in format `[number_masks, height, width]` where N is the number of masks\n\nReturns:\nboxes: bounding boxes in format `[number_masks, 4]` in xyxy format", "source": "github-repos"}
{"code": "def session_new(self, **kwargs):\n        \n        path = self._get_path('session_new')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Generate a session id for user based authentication.\n\nA session id is required in order to use any of the write methods.\n\nArgs:\nrequest_token: The token you generated for the user to approve.\nThe token needs to be approved before being\nused here.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def DeregisterPlugin(cls, plugin_class):\n    \n    name = getattr(\n        plugin_class, 'ARTIFACT_DEFINITION_NAME', plugin_class.__name__)\n    name = name.lower()\n    if name not in cls._plugins:\n      raise KeyError(\n          'Artifact plugin class not set for name: {0:s}.'.format(name))\n\n    del cls._plugins[name]\n\n    if name in cls._file_system_plugins:\n      del cls._file_system_plugins[name]\n\n    if name in cls._knowledge_base_plugins:\n      del cls._knowledge_base_plugins[name]\n\n    if name in cls._windows_registry_plugins:\n      del cls._windows_registry_plugins[name]", "docstring": "Deregisters an preprocess plugin class.\n\nArgs:\nplugin_class (type): preprocess plugin class.\n\nRaises:\nKeyError: if plugin class is not set for the corresponding name.\nTypeError: if the source type of the plugin class is not supported.", "source": "juraj-google-style"}
{"code": "def smartupgrade(self, restart=True, dependencies=False, prerelease=False):\n        \n        if not self.check():\n            if self.verbose:\n                print(\"Package {} already up-to-date!\".format(self.pkg))\n            return\n        if self.verbose:\n            print(\"Upgrading {} ...\".format(self.pkg))\n        self.upgrade(dependencies, prerelease, force=False)\n        if restart:\n            self.restart()", "docstring": "Upgrade the package if there is a later version available.\nArgs:\nrestart: restart app if True\ndependencies: update package dependencies if True (see pip --no-deps)\nprerelease: update to pre-release and development versions", "source": "juraj-google-style"}
{"code": "def save_config(config, logdir=None):\n  \n  if logdir:\n    with config.unlocked:\n      config.logdir = logdir\n    message = 'Start a new run and write summaries and checkpoints to {}.'\n    tf.logging.info(message.format(config.logdir))\n    tf.gfile.MakeDirs(config.logdir)\n    config_path = os.path.join(config.logdir, 'config.yaml')\n    with tf.gfile.FastGFile(config_path, 'w') as file_:\n      yaml.dump(config, file_, default_flow_style=False)\n  else:\n    message = (\n        'Start a new run without storing summaries and checkpoints since no '\n        'logging directory was specified.')\n    tf.logging.info(message)\n  return config", "docstring": "Save a new configuration by name.\n\nIf a logging directory is specified, is will be created and the configuration\nwill be stored there. Otherwise, a log message will be printed.\n\nArgs:\nconfig: Configuration object.\nlogdir: Location for writing summaries and checkpoints if specified.\n\nReturns:\nConfiguration object.", "source": "juraj-google-style"}
{"code": "def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n    known_args, pipeline_args = parse_known_args(argv)\n    pipeline_options = PipelineOptions(pipeline_args)\n    pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n    milk_quality_data = pandas.read_csv(known_args.pipeline_input_data)\n    start = time.mktime(time.strptime('2023/06/29 10:00:00', '%Y/%m/%d %H:%M:%S'))\n    test_stream = TestStream()\n    test_stream.advance_watermark_to(start)\n    samples = [milk_quality_data.iloc[i:i + 1] for i in range(len(milk_quality_data))]\n    for watermark_offset, sample in enumerate(samples, 1):\n        test_stream.advance_watermark_to(start + watermark_offset)\n        test_stream.add_elements([sample])\n    test_stream.advance_watermark_to_infinity()\n    model_handler = XGBoostModelHandlerPandas(model_class=xgboost.XGBClassifier, model_state=known_args.model_state)\n    with beam.Pipeline() as p:\n        _ = p | test_stream | 'window' >> beam.WindowInto(window.SlidingWindows(30, 5)) | 'RunInference' >> RunInference(model_handler) | 'Count number of elements in window' >> beam.CombineGlobally(AggregateMilkQualityResults()).without_defaults() | 'Print' >> beam.Map(print)", "docstring": "Args:\nargv: Command line arguments defined for this example.\nsave_main_session: Used for internal testing.\ntest_pipeline: Used for internal testing.", "source": "github-repos"}
{"code": "def easeInElastic(n, amplitude=1, period=0.3):\n    _checkRange(n)\n    return (1 - easeOutElastic((1 - n), amplitude=amplitude, period=period))", "docstring": "An elastic tween function that begins with an increasing wobble and then snaps into the destination.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "codesearchnet"}
{"code": "def text(self, tag, textdata, step=None):\n    if (step is None):\n        step = self._step\n    else:\n        self._step = step\n    smd = SummaryMetadata(plugin_data=SummaryMetadata.PluginData(plugin_name='text'))\n    if isinstance(textdata, (str, bytes)):\n        tensor = tf.make_tensor_proto(values=[textdata.encode(encoding='utf_8')], shape=(1,))\n    else:\n        textdata = onp.array(textdata)\n        datashape = onp.shape(textdata)\n        if (len(datashape) == 1):\n            tensor = tf.make_tensor_proto(values=[td.encode(encoding='utf_8') for td in textdata], shape=(datashape[0],))\n        elif (len(datashape) == 2):\n            tensor = tf.make_tensor_proto(values=[td.encode(encoding='utf_8') for td in onp.reshape(textdata, (- 1))], shape=(datashape[0], datashape[1]))\n    summary = Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])\n    self.add_summary(summary, step)", "docstring": "Saves a text summary.\n\nArgs:\ntag: str: label for this data\ntextdata: string, or 1D/2D list/numpy array of strings\nstep: int: training step\nNote: markdown formatting is rendered by tensorboard.", "source": "codesearchnet"}
{"code": "def _get_tf2_flags(parser):\n    input_file_group = parser.add_mutually_exclusive_group()\n    input_file_group.add_argument('--saved_model_dir', type=str, help='Full path of the directory containing the SavedModel.')\n    input_file_group.add_argument('--keras_model_file', type=str, help='Full filepath of HDF5 file containing tf.Keras model.')\n    parser.add_argument('--saved_model_tag_set', type=str, help='Comma-separated set of tags identifying the MetaGraphDef within the SavedModel to analyze. All tags must be present. In order to pass in an empty tag set, pass in \"\". (default \"serve\")')\n    parser.add_argument('--saved_model_signature_key', type=str, help='Key identifying the SignatureDef containing inputs and outputs. (default DEFAULT_SERVING_SIGNATURE_DEF_KEY)')\n    parser.add_argument('--enable_v1_converter', action='store_true', help='Enables the TensorFlow V1 converter in 2.0')", "docstring": "Returns ArgumentParser for tflite_convert for TensorFlow 2.0.\n\nArgs:\nparser: ArgumentParser", "source": "github-repos"}
{"code": "def _write_input(self, input_dir=\".\"):\n        \n        with open(os.path.join(input_dir, self.input_file), 'wt', encoding=\"utf-8\") as inp:\n            for k, v in self.control_params.items():\n                inp.write('{} {}\\n'.format(k, self._format_param_val(v)))\n            \n            \n            \n            for idx, mol in enumerate(self.mols):\n                filename = os.path.join(\n                    input_dir, '{}.{}'.format(\n                        idx, self.control_params[\"filetype\"])).encode(\"ascii\")\n                \n                if self.control_params[\"filetype\"] == \"pdb\":\n                    self.write_pdb(mol, filename, num=idx+1)\n                \n                else:\n                    a = BabelMolAdaptor(mol)\n                    pm = pb.Molecule(a.openbabel_mol)\n                    pm.write(self.control_params[\"filetype\"], filename=filename,\n                             overwrite=True)\n\n                inp.write(\"\\n\")\n                inp.write(\n                    \"structure {}.{}\\n\".format(\n                        os.path.join(input_dir, str(idx)),\n                        self.control_params[\"filetype\"]))\n                for k, v in self.param_list[idx].items():\n                    inp.write('  {} {}\\n'.format(k, self._format_param_val(v)))\n                inp.write('end structure\\n')", "docstring": "Write the packmol input file to the input directory.\n\nArgs:\ninput_dir (string): path to the input directory", "source": "juraj-google-style"}
{"code": "def get_samples_live_last(self, sensor_id):\n    \n    url = \"https:\n\n    headers = self.__gen_headers()\n    headers[\"Content-Type\"] = \"application/json\"\n\n    params = { \"sensorId\": sensor_id }\n    url = self.__append_url_params(url, params)\n\n    r = requests.get(url, headers=headers)\n    return r.json()", "docstring": "Get the last sample recorded by the sensor.\n\nArgs:\nsensor_id (string): hexadecimal id of the sensor to query, e.g.\n``0x0013A20040B65FAD``\n\nReturns:\nlist: dictionary objects containing sample data", "source": "juraj-google-style"}
{"code": "def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame:\n        \n        return self._recode_output(self._recode_input(table, validate=validate), validate=validate)", "docstring": "Pass the appropriate columns through each recoder function sequentially and return the final result.\n\nArgs:\ntable (pd.DataFrame): A dataframe on which to apply recoding logic.\nvalidate (bool): If ``True``, recoded table must pass validation tests.", "source": "juraj-google-style"}
{"code": "def read_cs_raw_symmetrized_tensors(self):\n    header_pattern = '\\\\s+-{50,}\\\\s+\\\\s+Absolute Chemical Shift tensors\\\\s+\\\\s+-{50,}$'\n    first_part_pattern = '\\\\s+UNSYMMETRIZED TENSORS\\\\s+$'\n    row_pattern = '\\\\s+'.join((['([-]?\\\\d+\\\\.\\\\d+)'] * 3))\n    unsym_footer_pattern = '^\\\\s+SYMMETRIZED TENSORS\\\\s+$'\n    with zopen(self.filename, 'rt') as f:\n        text = f.read()\n    unsym_table_pattern_text = (((header_pattern + first_part_pattern) + '(?P<table_body>.+)') + unsym_footer_pattern)\n    table_pattern = re.compile(unsym_table_pattern_text, (re.MULTILINE | re.DOTALL))\n    rp = re.compile(row_pattern)\n    m = table_pattern.search(text)\n    if m:\n        table_text = m.group('table_body')\n        micro_header_pattern = 'ion\\\\s+\\\\d+'\n        micro_table_pattern_text = (((micro_header_pattern + '\\\\s*^(?P<table_body>(?:\\\\s*') + row_pattern) + ')+)\\\\s+')\n        micro_table_pattern = re.compile(micro_table_pattern_text, (re.MULTILINE | re.DOTALL))\n        unsym_tensors = []\n        for mt in micro_table_pattern.finditer(table_text):\n            table_body_text = mt.group('table_body')\n            tensor_matrix = []\n            for line in table_body_text.rstrip().split('\\n'):\n                ml = rp.search(line)\n                processed_line = [float(v) for v in ml.groups()]\n                tensor_matrix.append(processed_line)\n            unsym_tensors.append(tensor_matrix)\n        self.data['unsym_cs_tensor'] = unsym_tensors\n    else:\n        raise ValueError('NMR UNSYMMETRIZED TENSORS is not found')", "docstring": "Parse the matrix form of NMR tensor before corrected to table.\n\nReturns:\nnsymmetrized tensors list in the order of atoms.", "source": "codesearchnet"}
{"code": "def save(self, target, format=None, encoding=None, **options):\n    if (encoding is None):\n        encoding = config.DEFAULT_ENCODING\n    if (format is None):\n        (_, format) = helpers.detect_scheme_and_format(target)\n    writer_class = self.__custom_writers.get(format)\n    if (writer_class is None):\n        if (format not in config.WRITERS):\n            message = ('Format \"%s\" is not supported' % format)\n            raise exceptions.FormatError(message)\n        writer_class = helpers.import_attribute(config.WRITERS[format])\n    writer_options = helpers.extract_options(options, writer_class.options)\n    if options:\n        message = 'Not supported options \"%s\" for format \"%s\"'\n        message = (message % (', '.join(options), format))\n        raise exceptions.TabulatorException(message)\n    writer = writer_class(**writer_options)\n    writer.write(self.iter(), target, headers=self.headers, encoding=encoding)", "docstring": "Save stream to the local filesystem.\n\nArgs:\ntarget (str): Path where to save the stream.\nformat (str, optional): The format the stream will be saved as. If\nNone, detects from the ``target`` path. Defaults to None.\nencoding (str, optional): Saved file encoding. Defaults to\n``config.DEFAULT_ENCODING``.\n**options: Extra options passed to the writer.", "source": "codesearchnet"}
{"code": "def single_device(cl_device_type='GPU', platform=None, fallback_to_any_device_type=False):\n    if isinstance(cl_device_type, str):\n        cl_device_type = device_type_from_string(cl_device_type)\n    device = None\n    if (platform is None):\n        platforms = cl.get_platforms()\n    else:\n        platforms = [platform]\n    for platform in platforms:\n        devices = platform.get_devices(device_type=cl_device_type)\n        for dev in devices:\n            if device_supports_double(dev):\n                try:\n                    env = CLEnvironment(platform, dev)\n                    return [env]\n                except cl.RuntimeError:\n                    pass\n    if (not device):\n        if fallback_to_any_device_type:\n            return cl.get_platforms()[0].get_devices()\n        else:\n            raise ValueError('No devices of the specified type ({}) found.'.format(cl.device_type.to_string(cl_device_type)))\n    raise ValueError('No suitable OpenCL device found.')", "docstring": "Get a list containing a single device environment, for a device of the given type on the given platform.\n\nThis will only fetch devices that support double (possibly only double with a pragma\ndefined, but still, it should support double).\n\nArgs:\ncl_device_type (cl.device_type.* or string): The type of the device we want,\ncan be a opencl device type or a string matching 'GPU', 'CPU' or 'ALL'.\nplatform (opencl platform): The opencl platform to select the devices from\nfallback_to_any_device_type (boolean): If True, try to fallback to any possible device in the system.\n\nReturns:\nlist of CLEnvironment: List with one element, the CL runtime environment requested.", "source": "codesearchnet"}
{"code": "def emit_region(self, timestamp: int, duration: int, pid: int, tid: int, category: str, name: str, args: Dict[str, Any]) -> None:\n    event = self._create_event('X', category, name, pid, tid, timestamp)\n    event['dur'] = duration\n    event['args'] = args\n    self._events.append(event)", "docstring": "Adds a region event to the trace.\n\nArgs:\ntimestamp:  The start timestamp of this region as a long integer.\nduration:  The duration of this region as a long integer.\npid:  Identifier of the process generating this event as an integer.\ntid:  Identifier of the thread generating this event as an integer.\ncategory: The event category as a string.\nname:  The event name as a string.\nargs:  A JSON-compatible dictionary of event arguments.", "source": "github-repos"}
{"code": "def readMonthTariffs(self, months_type):\n        \n        self.setContext(\"readMonthTariffs\")\n        try:\n\n            req_type = binascii.hexlify(str(months_type).zfill(1))\n            req_str = \"01523102303031\" + req_type + \"282903\"\n            work_table = self.m_mons\n            if months_type == ReadMonths.kWhReverse:\n                work_table = self.m_rev_mons\n\n            self.request(False)\n            req_crc = self.calc_crc16(req_str[2:].decode(\"hex\"))\n            req_str += req_crc\n            self.m_serial_port.write(req_str.decode(\"hex\"))\n            raw_ret = self.m_serial_port.getResponse(self.getContext())\n            self.serialPostEnd()\n            unpacked_read = self.unpackStruct(raw_ret, work_table)\n            self.convertData(unpacked_read, work_table, self.m_kwh_precision)\n            return_crc = self.calc_crc16(raw_ret[1:-2])\n            if str(return_crc) == str(work_table[\"crc16\"][MeterData.StringValue]):\n                ekm_log(\"Months CRC success, type = \" + str(req_type))\n                self.setContext(\"\")\n                return True\n        except:\n            ekm_log(traceback.format_exc(sys.exc_info()))\n\n        self.setContext(\"\")\n        return False", "docstring": "Serial call to read month tariffs block into meter object buffer.\n\nArgs:\nmonths_type (int): A :class:`~ekmmeters.ReadMonths` value.\n\nReturns:\nbool: True on completion.", "source": "juraj-google-style"}
{"code": "def accuracy_score(gold, pred, ignore_in_gold=[], ignore_in_pred=[]):\n    \n    gold, pred = _preprocess(gold, pred, ignore_in_gold, ignore_in_pred)\n\n    if len(gold) and len(pred):\n        acc = np.sum(gold == pred) / len(gold)\n    else:\n        acc = 0\n\n    return acc", "docstring": "Calculate (micro) accuracy.\nArgs:\ngold: A 1d array-like of gold labels\npred: A 1d array-like of predicted labels (assuming abstain = 0)\nignore_in_gold: A list of labels for which elements having that gold\nlabel will be ignored.\nignore_in_pred: A list of labels for which elements having that pred\nlabel will be ignored.\n\nReturns:\nA float, the (micro) accuracy score", "source": "juraj-google-style"}
{"code": "def reassign_label(cls, destination_cluster, label):\n    conn = Qubole.agent(version=Cluster.api_version)\n    data = {'destination_cluster': destination_cluster, 'label': label}\n    return conn.put((cls.rest_entity_path + '/reassign-label'), data)", "docstring": "Reassign a label from one cluster to another.\n\nArgs:\n`destination_cluster`: id/label of the cluster to move the label to\n\n`label`: label to be moved from the source cluster", "source": "codesearchnet"}
{"code": "async def runCmdLine(self, line):\n    if self.echoline:\n        self.outp.printf(f'{self.cmdprompt}{line}')\n    ret = None\n    name = line.split(None, 1)[0]\n    cmdo = self.getCmdByName(name)\n    if (cmdo is None):\n        self.printf(('cmd not found: %s' % (name,)))\n        return\n    try:\n        ret = (await cmdo.runCmdLine(line))\n    except s_exc.CliFini:\n        (await self.fini())\n    except asyncio.CancelledError:\n        self.printf('Cmd cancelled')\n    except Exception as e:\n        exctxt = traceback.format_exc()\n        self.printf(exctxt)\n        self.printf(('error: %s' % e))\n    return ret", "docstring": "Run a single command line.\n\nArgs:\nline (str): Line to execute.\n\nExamples:\nExecute the 'woot' command with the 'help' switch:\n\nawait cli.runCmdLine('woot --help')\n\nReturns:\nobject: Arbitrary data from the cmd class.", "source": "codesearchnet"}
{"code": "def sub_location(self, nbr):\n    assert (nbr > (- 1)), 'Sub location number must be greater or equal to 0!'\n    assert (nbr < (self.nbr_of_sub_locations() - 1)), (('Sub location number must be lower than %d!' % self.nbr_of_sub_locations()) - 1)\n    return self._locations_list[nbr]", "docstring": "Return a given sub location, 0-based.\n\nArgs:\nnbr:\n\nReturns:", "source": "codesearchnet"}
{"code": "def read_avg_core_poten(self):\n\n    def pairwise(iterable):\n        's -> (s0,s1), (s1,s2), (s2, s3), ...'\n        a = iter(iterable)\n        return zip(a, a)\n    with zopen(self.filename, 'rt') as foutcar:\n        line = foutcar.readline()\n        aps = []\n        while (line != ''):\n            line = foutcar.readline()\n            if ('the norm of the test charge is' in line):\n                ap = []\n                while (line != ''):\n                    line = foutcar.readline()\n                    if ('E-fermi' in line):\n                        aps.append(ap)\n                        break\n                    data = line.split()\n                    for (i, pot) in pairwise(data):\n                        ap.append(float(pot))\n    return aps", "docstring": "Read the core potential at each ionic step.\n\nReturns:\nA list for each ionic step containing a list of the average core\npotentials for each atom: [[avg core pot]].\n\nExample:\nThe average core potential of the 2nd atom of the structure at the\nlast ionic step is: [-1][1]", "source": "codesearchnet"}
{"code": "def disconnect(self, container, *args, **kwargs):\n        \n        if isinstance(container, Container):\n            container = container.id\n        return self.client.api.disconnect_container_from_network(\n            container, self.id, *args, **kwargs\n        )", "docstring": "Disconnect a container from this network.\n\nArgs:\ncontainer (str): Container to disconnect from this network, as\neither an ID, name, or\n:py:class:`~docker.models.containers.Container` object.\nforce (bool): Force the container to disconnect from a network.\nDefault: ``False``\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def WriteEvent(self, event):\n    \n    self.WriteEventStart()\n\n    try:\n      self.WriteEventBody(event)\n\n    except errors.NoFormatterFound as exception:\n      error_message = 'unable to retrieve formatter with error: {0!s}'.format(\n          exception)\n      self._ReportEventError(event, error_message)\n\n    except errors.WrongFormatter as exception:\n      error_message = 'wrong formatter with error: {0!s}'.format(exception)\n      self._ReportEventError(event, error_message)\n\n    self.WriteEventEnd()", "docstring": "Writes the event to the output.\n\nArgs:\nevent (EventObject): event.", "source": "juraj-google-style"}
{"code": "def get_enumerations_from_bit_mask(enumeration, mask):\n    return [x for x in enumeration if ((x.value & mask) == x.value)]", "docstring": "A utility function that creates a list of enumeration values from a bit\nmask for a specific mask enumeration class.\n\nArgs:\nenumeration (class): The enumeration class from which to draw\nenumeration values.\nmask (int): The bit mask from which to identify enumeration values.\n\nReturns:\nlist: A list of enumeration values corresponding to the bit mask.", "source": "codesearchnet"}
{"code": "def _process_worker(call_queue, result_queue, shutdown):\n    \n    while True:\n        try:\n            call_item = call_queue.get(block=True, timeout=0.1)\n        except queue.Empty:\n            if shutdown.is_set():\n                return\n        else:\n            try:\n                r = call_item()\n            except BaseException as e:\n                result_queue.put(_ResultItem(call_item.work_id, exception=e))\n            else:\n                result_queue.put(_ResultItem(call_item.work_id, result=r))", "docstring": "Evaluates calls from call_queue and places the results in result_queue.\n\nThis worker is run in a seperate process.\n\nArgs:\ncall_queue: A multiprocessing.Queue of _CallItems that will be read and\nevaluated by the worker.\nresult_queue: A multiprocessing.Queue of _ResultItems that will written\nto by the worker.\nshutdown: A multiprocessing.Event that will be set as a signal to the\nworker that it should exit when call_queue is empty.", "source": "juraj-google-style"}
{"code": "def _infer_shape(self, dimensions):\n    n = np.prod(dimensions)\n    m = np.prod(abs(np.array(self._shape)))\n    v = np.array(self._shape)\n    v[(v == (- 1))] = (n \n    return tuple(v)", "docstring": "Replaces the -1 wildcard in the output shape vector.\n\nThis function infers the correct output shape given the input dimensions.\n\nArgs:\ndimensions: List of input non-batch dimensions.\n\nReturns:\nTuple of non-batch output dimensions.", "source": "codesearchnet"}
{"code": "def get_servo_temperature(self):\n        \n        data = []\n        data.append(0x09)\n        data.append(self.servoid)\n        data.append(RAM_READ_REQ)\n        data.append(TEMPERATURE_RAM)\n        data.append(BYTE2)\n        send_data(data)\n        rxdata = []\n        try:\n            rxdata = SERPORT.read(13)\n            return ord(rxdata[9])\n        except HerkulexError:\n            raise HerkulexError(\"Could not communicate with motors\")", "docstring": "Gets the current temperature of Herkulex\n\nArgs:\nnone\n\nReturns:\nint: the current temperature register of Herkulex\n\nRaises:\nSerialException: Error occured while opening serial port", "source": "juraj-google-style"}
{"code": "def __rmod__(self, other):\n        \n        try:\n            other = as_dimension(other)\n        except (TypeError, ValueError):\n            return NotImplemented\n        return other % self", "docstring": "Returns `other` modulo `self`.\n\nArgs:\nother: Another Dimension, or a value accepted by `as_dimension`.\n\nReturns:\nA Dimension whose value is `other` modulo `self`.", "source": "juraj-google-style"}
{"code": "def __init__(self, datastore_client, entity_kind_batches, entity_kind_images):\n    \n    self._datastore_client = datastore_client\n    self._entity_kind_batches = entity_kind_batches\n    self._entity_kind_images = entity_kind_images\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    self._data = {}", "docstring": "Initialize ImageBatchesBase.\n\nArgs:\ndatastore_client: instance of the CompetitionDatastoreClient\nentity_kind_batches: Cloud Datastore entity kind which is used to store\nbatches of images.\nentity_kind_images: Cloud Datastore entity kind which is used to store\nindividual images.", "source": "juraj-google-style"}
{"code": "def generators_from_logdir(logdir):\n  \n  subdirs = io_wrapper.GetLogdirSubdirectories(logdir)\n  generators = [\n      itertools.chain(*[\n          generator_from_event_file(os.path.join(subdir, f))\n          for f in tf.io.gfile.listdir(subdir)\n          if io_wrapper.IsTensorFlowEventsFile(os.path.join(subdir, f))\n      ]) for subdir in subdirs\n  ]\n  return generators", "docstring": "Returns a list of event generators for subdirectories with event files.\n\nThe number of generators returned should equal the number of directories\nwithin logdir that contain event files. If only logdir contains event files,\nreturns a list of length one.\n\nArgs:\nlogdir: A log directory that contains event files.\n\nReturns:\nList of event generators for each subdirectory with event files.", "source": "juraj-google-style"}
{"code": "def segment_text(text, seg_regex=SEG_REGEX):\n    for m in seg_regex.finditer(text):\n        (yield m.group(0))", "docstring": "Return an iterator of segments in the text.\n\nArgs:\ntext (unicode): string of IPA Unicode text\nseg_regex (_regex.Pattern): compiled regex defining a segment (base +\nmodifiers)\n\nReturn:\ngenerator: segments in the input text", "source": "codesearchnet"}
{"code": "def send_peers(self, connection_id):\n        \n        with self._lock:\n            \n            \n            peer_endpoints = list(self._peers.values())\n            if self._endpoint:\n                peer_endpoints.append(self._endpoint)\n            peers_response = GetPeersResponse(peer_endpoints=peer_endpoints)\n            try:\n                \n                \n                self._network.send(\n                    validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE,\n                    peers_response.SerializeToString(),\n                    connection_id,\n                    one_way=True)\n            except ValueError:\n                LOGGER.debug(\"Connection disconnected: %s\", connection_id)", "docstring": "Sends a message containing our peers to the\nconnection identified by connection_id.\n\nArgs:\nconnection_id (str): A unique identifier which identifies an\nconnection on the network server socket.", "source": "juraj-google-style"}
{"code": "def _find_reader_dataset(self, dataset_key, **dfilter):\n    too_many = False\n    for (reader_name, reader_instance) in self.readers.items():\n        try:\n            ds_id = reader_instance.get_dataset_key(dataset_key, **dfilter)\n        except TooManyResults:\n            LOG.trace('Too many datasets matching key {} in reader {}'.format(dataset_key, reader_name))\n            too_many = True\n            continue\n        except KeyError:\n            LOG.trace(\"Can't find dataset %s in reader %s\", str(dataset_key), reader_name)\n            continue\n        LOG.trace('Found {} in reader {} when asking for {}'.format(str(ds_id), reader_name, repr(dataset_key)))\n        try:\n            return self.getitem(ds_id)\n        except KeyError:\n            return Node(ds_id, {'reader_name': reader_name})\n    if too_many:\n        raise TooManyResults('Too many keys matching: {}'.format(dataset_key))", "docstring": "Attempt to find a `DatasetID` in the available readers.\n\nArgs:\ndataset_key (str, float, DatasetID):\nDataset name, wavelength, or a combination of `DatasetID`\nparameters to use in searching for the dataset from the\navailable readers.\n**dfilter (list or str): `DatasetID` parameters besides `name`\nand `wavelength` to use to filter the\navailable datasets. Passed directly to\n`get_dataset_key` of the readers, see\nthat method for more information.", "source": "codesearchnet"}
{"code": "def NetshStaticIp(interface, ip=u'127.0.0.9', subnet=u'255.255.255.255', gw=u'127.0.0.1'):\n    args = ['/c', 'netsh', 'interface', 'ip', 'set', 'address', interface, 'static', ip, subnet, gw, '1']\n    res = client_utils_common.Execute('cmd', args, time_limit=(- 1), bypass_whitelist=True)\n    return res", "docstring": "Changes interface to a staticly set IP.\n\nSets IP configs to local if no paramaters passed.\n\nArgs:\ninterface: Name of the interface.\nip: IP address.\nsubnet: Subnet mask.\ngw: IP address of the default gateway.\n\nReturns:\nA tuple of stdout, stderr, exit_status.", "source": "codesearchnet"}
{"code": "def _CalculateYLines(self, dists):\n    \n    tot_dist = sum(dists)\n    if tot_dist > 0:\n      pixel_dist = [float(d * (self._gheight-20))/tot_dist for d in dists]\n      pixel_grid = [0]+[int(pd + sum(pixel_dist[0:i])) for i,pd in\n                        enumerate(pixel_dist)]\n    else:\n      pixel_grid = []\n\n    return pixel_grid", "docstring": "Builds a list with y-coordinates for the horizontal lines in the graph.\n\nArgs:\n# One integer for each pair of stations\n# indicating the approximate distance\ndists: [0,33,140, ... ,X]\n\nReturns:\n# One integer y-coordinate for each station normalized between\n# 0 and X, where X is the height of the graph in pixels\n[0, 33, 140, ... , X]", "source": "juraj-google-style"}
{"code": "def display_arr(screen, arr, video_size, transpose):\n    \n    \n    if transpose:\n        pyg_img = pygame.surfarray.make_surface(arr.swapaxes(0, 1))\n    else:\n        pyg_img = arr\n    \n    pyg_img = pygame.transform.scale(pyg_img, video_size)\n    \n    screen.blit(pyg_img, (0, 0))", "docstring": "Display an image to the pygame screen.\n\nArgs:\nscreen (pygame.Surface): the pygame surface to write frames to\narr (np.ndarray): numpy array representing a single frame of gameplay\nvideo_size (tuple): the size to render the frame as\ntranspose (bool): whether to transpose the frame before displaying\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _get_run_debug_urls(self):\n    return ['file:", "docstring": "Get the debug_urls value for the current run() call.\n\nReturns:\ndebug_urls: (list of str) Debug URLs for the current run() call.\nCurrently, the list consists of only one URL that is a file:// URL.", "source": "github-repos"}
{"code": "def _deduplicate_indexed_slices(values, indices):\n    unique_indices, new_index_positions = array_ops.unique(indices)\n    summed_values = math_ops.unsorted_segment_sum(values, new_index_positions, array_ops.shape(unique_indices)[0])\n    return (summed_values, unique_indices)", "docstring": "Sums `values` associated with any non-unique `indices`.\n\nArgs:\nvalues: A `Tensor` with rank >= 1.\nindices: A one-dimensional integer `Tensor`, indexing into the first\ndimension of `values` (as in an IndexedSlices object).\nReturns:\nA tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a\nde-duplicated version of `indices` and `summed_values` contains the sum of\n`values` slices associated with each unique index.", "source": "github-repos"}
{"code": "def gmeta_pop(gmeta, info=False):\n    if (type(gmeta) is GlobusHTTPResponse):\n        gmeta = json.loads(gmeta.text)\n    elif (type(gmeta) is str):\n        gmeta = json.loads(gmeta)\n    elif (type(gmeta) is not dict):\n        raise TypeError('gmeta must be dict, GlobusHTTPResponse, or JSON string')\n    results = []\n    for res in gmeta['gmeta']:\n        for con in res['content']:\n            results.append(con)\n    if info:\n        fyi = {'total_query_matches': gmeta.get('total')}\n        return (results, fyi)\n    else:\n        return results", "docstring": "Remove GMeta wrapping from a Globus Search result.\nThis function can be called on the raw GlobusHTTPResponse that Search returns,\nor a string or dictionary representation of it.\n\nArguments:\ngmeta (dict, str, or GlobusHTTPResponse): The Globus Search result to unwrap.\ninfo (bool): If ``False``, will return a list of the results\nand discard the metadata. If ``True``, will return a tuple containing\nthe results list, and other information about the query.\n**Default**: ``False``.\n\nReturns:\nlist (if ``info=False``): The unwrapped results.\ntuple (if ``info=True``): The unwrapped results, and a dictionary of query information.", "source": "codesearchnet"}
{"code": "def has_chosen(state, correct, msgs):\n    if (not issubclass(type(correct), int)):\n        raise InstructorError('Inside `has_chosen()`, the argument `correct` should be an integer.')\n    student_process = state.student_process\n    if (not isDefinedInProcess(MC_VAR_NAME, student_process)):\n        raise InstructorError('Option not available in the student process')\n    else:\n        selected_option = getOptionFromProcess(student_process, MC_VAR_NAME)\n        if (not issubclass(type(selected_option), int)):\n            raise InstructorError('selected_option should be an integer')\n        if ((selected_option < 1) or (correct < 1)):\n            raise InstructorError('selected_option and correct should be greater than zero')\n        if ((selected_option > len(msgs)) or (correct > len(msgs))):\n            raise InstructorError('there are not enough feedback messages defined')\n        feedback_msg = msgs[(selected_option - 1)]\n        state.reporter.success_msg = msgs[(correct - 1)]\n        state.do_test(EqualTest(selected_option, correct, feedback_msg))", "docstring": "Test multiple choice exercise.\n\nTest for a MultipleChoiceExercise. The correct answer (as an integer) and feedback messages\nare passed to this function.\n\nArgs:\ncorrect (int): the index of the correct answer (should be an instruction). Starts at 1.\nmsgs (list(str)): a list containing all feedback messages belonging to each choice of the\nstudent. The list should have the same length as the number of options.", "source": "codesearchnet"}
{"code": "def read_data_event(self, whence, complete=False, can_flush=False):\n    return Transition(None, _read_data_handler(whence, self, complete, can_flush))", "docstring": "Creates a transition to a co-routine for retrieving data as bytes.\n\nArgs:\nwhence (Coroutine): The co-routine to return to after the data is satisfied.\ncomplete (Optional[bool]): True if STREAM_END should be emitted if no bytes are read or\navailable; False if INCOMPLETE should be emitted in that case.\ncan_flush (Optional[bool]): True if NEXT may be requested after INCOMPLETE is emitted as a result of this\ndata request.", "source": "codesearchnet"}
{"code": "def plot_residuals(self, plot=None):\n        \n        if plot is None:\n            import matplotlib.pyplot as plot\n        x = numpy.arange(1, len(self.residuals) + 1)\n        y = _gvar.mean(self.residuals)\n        yerr = _gvar.sdev(self.residuals)\n        plot.errorbar(x=x, y=y, yerr=yerr, fmt='o', color='b')\n        plot.ylabel('normalized residuals')\n        xr = [x[0], x[-1]]\n        plot.plot([x[0], x[-1]], [0, 0], 'r-')\n        plot.fill_between(\n            x=xr, y1=[-1,-1], y2=[1,1], color='r', alpha=0.075\n            )\n        return plot", "docstring": "Plot normalized fit residuals.\n\nThe sum of the squares of the residuals equals ``self.chi2``.\nIndividual residuals should be distributed about one, in\na Gaussian distribution.\n\nArgs:\nplot: :mod:`matplotlib` plotter. If ``None``, uses\n``matplotlib.pyplot`.\n\nReturns:\nPlotter ``plot``.", "source": "juraj-google-style"}
{"code": "def _ParseApplicationPasswordRecord(self, parser_mediator, record):\n    key = record.get('_key_', None)\n    if ((not key) or (not key.startswith(b'ssgp'))):\n        raise errors.ParseError('Unsupported application password record key value does not start with: \"ssgp\".')\n    event_data = KeychainApplicationRecordEventData()\n    event_data.account_name = self._ParseBinaryDataAsString(parser_mediator, record['acct'])\n    event_data.comments = self._ParseBinaryDataAsString(parser_mediator, record['crtr'])\n    event_data.entry_name = self._ParseBinaryDataAsString(parser_mediator, record['PrintName'])\n    ssgp_hash = codecs.encode(key[4:], 'hex')\n    event_data.ssgp_hash = codecs.decode(ssgp_hash, 'utf-8')\n    event_data.text_description = self._ParseBinaryDataAsString(parser_mediator, record['desc'])\n    date_time = self._ParseDateTimeValue(parser_mediator, record['cdat'])\n    if date_time:\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n    date_time = self._ParseDateTimeValue(parser_mediator, record['mdat'])\n    if date_time:\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts the information from an application password record.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrecord (dict[str, object]): database record.\n\nRaises:\nParseError: if Internet password record cannot be parsed.", "source": "codesearchnet"}
{"code": "def add_physical_qubit(self, physical_qubit):\n    if (not isinstance(physical_qubit, int)):\n        raise CouplingError('Physical qubits should be integers.')\n    if (physical_qubit in self.physical_qubits):\n        raise CouplingError(('The physical qubit %s is already in the coupling graph' % physical_qubit))\n    self.graph.add_node(physical_qubit)\n    self._dist_matrix = None\n    self._qubit_list = None", "docstring": "Add a physical qubit to the coupling graph as a node.\n\nphysical_qubit (int): An integer representing a physical qubit.\n\nRaises:\nCouplingError: if trying to add duplicate qubit", "source": "codesearchnet"}
{"code": "def get_last_next(self, date):\n    (past, future) = ((None, None), (None, None))\n    for (mjd, value) in reversed(self.data):\n        if (mjd <= date):\n            past = (mjd, value)\n            break\n        future = (mjd, value)\n    return (past, future)", "docstring": "Provide the last and next leap-second events relative to a date\n\nArgs:\ndate (float): Date in MJD\nReturn:\ntuple:", "source": "codesearchnet"}
{"code": "def get_csv(filename):\n    check_if_this_file_exist(filename)\n    filename = os.path.abspath(filename)\n    s = command_line(['exiftool', '-G', '-csv', '-sort', filename])\n    if s:\n        s = s.decode('utf-8')\n        return s\n    else:\n        return 0", "docstring": "Return a csv representation of the exif\n\nget a filename and returns a unicode string with a CSV format\n\nArguments:\nfilename {string} -- your filename\n\nReturns:\n[unicode] -- unicode string", "source": "codesearchnet"}
{"code": "def _read_template(template):\n    \n    template = _read_content_or_path(template)\n    file_obj = StringIO.StringIO(template)\n\n    return ET.parse(file_obj)", "docstring": "Read XSLT template.\n\nArgs:\ntemplate (str): Filename or XML string. Don't use ``\\\\n`` in case of\nfilename.\n\nReturns:\nobj: Required XML parsed with ``lxml.etree``.", "source": "juraj-google-style"}
{"code": "def _calibrate_ir(radiance, coefs):\n    logger.debug('Calibrating to brightness temperature')\n    n = coefs['n']\n    bteff = ((C2 * n) / xu.log((1 + ((C1 * (n ** 3)) / radiance.where((radiance > 0))))))\n    bt = xr.DataArray(((bteff * coefs['b']) + coefs['a']))\n    return bt.where(xu.logical_and((bt >= coefs['btmin']), (bt <= coefs['btmax'])))", "docstring": "Convert IR radiance to brightness temperature\n\nReference: [IR]\n\nArgs:\nradiance: Radiance [mW m-2 cm-1 sr-1]\ncoefs: Dictionary of calibration coefficients. Keys:\nn: The channel's central wavenumber [cm-1]\na: Offset [K]\nb: Slope [1]\nbtmin: Minimum brightness temperature threshold [K]\nbtmax: Maximum brightness temperature threshold [K]\n\nReturns:\nBrightness temperature [K]", "source": "codesearchnet"}
{"code": "def seed(self, seed): \n        \n        if seed is None:\n            self.env.seed = round(time.time())\n        else:\n            self.env.seed = seed\n        return self.env.seed", "docstring": "Sets the random seed of the environment to the given value (current time, if seed=None).\nNaturally deterministic Environments (e.g. ALE or some gym Envs) don't have to implement this method.\n\nArgs:\nseed (int): The seed to use for initializing the pseudo-random number generator (default=epoch time in sec).\nReturns: The actual seed (int) used OR None if Environment did not override this method (no seeding supported).", "source": "juraj-google-style"}
{"code": "def tscore(sample1, sample2):\n    \n    if len(sample1) != len(sample2):\n        raise ValueError(\"different number of values\")\n    error = pooled_sample_variance(sample1, sample2) / len(sample1)\n    diff = statistics.mean(sample1) - statistics.mean(sample2)\n    return diff / math.sqrt(error * 2)", "docstring": "Calculate a t-test score for the difference between two samples.\n\nArgs:\nsample1: one sample.\nsample2: the other sample.\n\nReturns:\nThe t-test score, as a float.", "source": "juraj-google-style"}
{"code": "def unarchive_user(self, user_id):\n        \n        url = self.record_url + \"/unarchive\"\n        res = requests.patch(url=url, json={\"user_id\": user_id}, headers=HEADERS, verify=False)\n        self.write_response_html_to_file(res,\"bob.html\")\n        res.raise_for_status()", "docstring": "Unarchives the user with the specified user ID.\n\nArgs:\nuser_id: `int`. The ID of the user to unarchive.\n\nReturns:\n`NoneType`: None.", "source": "juraj-google-style"}
{"code": "def _preserve_bonds(self, sliced_cartesian, use_lookup=None):\n    if (use_lookup is None):\n        use_lookup = settings['defaults']['use_lookup']\n    included_atoms_set = set(sliced_cartesian.index)\n    assert included_atoms_set.issubset(set(self.index)), 'The sliced Cartesian has to be a subset of the bigger frame'\n    bond_dic = self.get_bonds(use_lookup=use_lookup)\n    new_atoms = set([])\n    for atom in included_atoms_set:\n        new_atoms = (new_atoms | bond_dic[atom])\n    new_atoms = (new_atoms - included_atoms_set)\n    while (not (new_atoms == set([]))):\n        index_of_interest = new_atoms.pop()\n        included_atoms_set = (included_atoms_set | self.get_coordination_sphere(index_of_interest, n_sphere=float('inf'), only_surface=False, exclude=included_atoms_set, give_only_index=True, use_lookup=use_lookup))\n        new_atoms = (new_atoms - included_atoms_set)\n    molecule = self.loc[(included_atoms_set, :)]\n    return molecule", "docstring": "Is called after cutting geometric shapes.\n\nIf you want to change the rules how bonds are preserved, when\napplying e.g. :meth:`Cartesian.cut_sphere` this is the\nfunction you have to modify.\nIt is recommended to inherit from the Cartesian class to\ntailor it for your project, instead of modifying the\nsource code of ChemCoord.\n\nArgs:\nsliced_frame (Cartesian):\nuse_lookup (bool): Use a lookup variable for\n:meth:`~chemcoord.Cartesian.get_bonds`. The default is\nspecified in ``settings['defaults']['use_lookup']``\n\nReturns:\nCartesian:", "source": "codesearchnet"}
{"code": "def get_value(data, key):\n    ref = data\n    try:\n        for subkey in key.split('.'):\n            if isinstance(ref, dict):\n                ref = ref[subkey]\n            else:\n                print(('CRITICAL: Cannot use subkey %s on non-dictionary element' % subkey))\n                return None\n        return ref\n    except KeyError:\n        return None", "docstring": "Follow the dot notation to get the proper field, then perform the action\n\nArgs:\ndata: the data as a dictionary (required to be a dictionary)\nkey: the key (as dot notation) into the data that gives the field (IP.src)\n\nReturns:\nthe value of the field(subfield) if it exist, otherwise None", "source": "codesearchnet"}
{"code": "def add_moving_summary(*args, **kwargs):\n    decay = kwargs.pop('decay', 0.95)\n    coll = kwargs.pop('collection', MOVING_SUMMARY_OPS_KEY)\n    summ_coll = kwargs.pop('summary_collections', None)\n    assert (len(kwargs) == 0), ('Unknown arguments: ' + str(kwargs))\n    ctx = get_current_tower_context()\n    if ((ctx is not None) and (not ctx.is_main_training_tower)):\n        return []\n    graph = tf.get_default_graph()\n    try:\n        control_flow_ctx = graph._get_control_flow_context()\n        if ((control_flow_ctx is not None) and control_flow_ctx.IsXLAContext()):\n            return\n    except Exception:\n        pass\n    if (tf.get_variable_scope().reuse is True):\n        logger.warn('add_moving_summary() called under reuse=True scope, ignored.')\n        return []\n    for x in args:\n        assert isinstance(x, (tf.Tensor, tf.Variable)), x\n        assert (x.get_shape().ndims == 0), 'add_moving_summary() only accepts scalar tensor! Got one with {}'.format(x.get_shape())\n    ema_ops = []\n    for c in args:\n        name = re.sub('tower[0-9]+/', '', c.op.name)\n        with tf.name_scope(None):\n            if (not c.dtype.is_floating):\n                c = tf.cast(c, tf.float32)\n            with _enter_vs_reuse_ns('EMA') as vs:\n                ema_var = tf.get_variable(name, shape=c.shape, dtype=c.dtype, initializer=tf.constant_initializer(), trainable=False)\n                ns = vs.original_name_scope\n            with tf.name_scope(ns):\n                ema_op = moving_averages.assign_moving_average(ema_var, c, decay, zero_debias=True, name=(name + '_EMA_apply'))\n            ema_ops.append(ema_op)\n        with tf.name_scope(None):\n            tf.summary.scalar((name + '-summary'), ema_op, collections=summ_coll)\n    if (coll is not None):\n        for op in ema_ops:\n            tf.add_to_collection(coll, op)\n    return ema_ops", "docstring": "Summarize the moving average for scalar tensors.\nThis function is a no-op if not calling from main training tower.\n\nArgs:\nargs: scalar tensors to summarize\ndecay (float): the decay rate. Defaults to 0.95.\ncollection (str or None): the name of the collection to add EMA-maintaining ops.\nThe default will work together with the default\n:class:`MovingAverageSummary` callback.\nsummary_collections ([str]): the names of collections to add the\nsummary op. Default is TF's default (`tf.GraphKeys.SUMMARIES`).\n\nReturns:\n[tf.Tensor]: list of tensors returned by assign_moving_average,\nwhich can be used to maintain the EMA.", "source": "codesearchnet"}
{"code": "async def export_image(self, name: str):\n    response = (await self.docker._query('images/{name}/get'.format(name=name), 'GET'))\n    return response.content", "docstring": "Get a tarball of an image by name or id.\n\nArgs:\nname: name/id of the image to be exported\n\nReturns:\nStreamreader of tarball image", "source": "codesearchnet"}
{"code": "def print_args(output=sys.stdout):\n\n    def decorator(func):\n        'The decorator function.\\n        '\n\n        @wraps(func)\n        def _(*args, **kwargs):\n            'The decorated function.\\n            '\n            output.write('Args: {0}, KwArgs: {1}\\n'.format(str(args), str(kwargs)))\n            return func(*args, **kwargs)\n        return _\n    return decorator", "docstring": "Decorate a function so that print arguments before calling it.\n\nArgs:\noutput: writable to print args. (Default: sys.stdout)", "source": "codesearchnet"}
{"code": "def _rmsprop(self, grads, cache=None, decay_rate=0.95):\n        \n        if cache is None:\n            cache = np.zeros_like(grads)\n        cache = decay_rate * cache + (1 - decay_rate) * grads ** 2\n        step = -grads / np.sqrt(cache + K.epsilon())\n        return step, cache", "docstring": "Uses RMSProp to compute step from gradients.\n\nArgs:\ngrads: numpy array of gradients.\ncache: numpy array of same shape as `grads` as RMSProp cache\ndecay_rate: How fast to decay cache\n\nReturns:\nA tuple of\nstep: numpy array of the same shape as `grads` giving the step.\nNote that this does not yet take the learning rate into account.\ncache: Updated RMSProp cache.", "source": "juraj-google-style"}
{"code": "def open(self, host, port=23):\n        \n        self._telnet_client.open(host, port)\n        config_str = self._telnet_client.cmd(\"MN?\")\n        if config_str.startswith(\"MN=\"):\n            config_str = config_str[len(\"MN=\"):]\n        self.properties = dict(\n            zip(['model', 'max_freq', 'max_atten'], config_str.split(\"-\", 2)))\n        self.max_atten = float(self.properties['max_atten'])", "docstring": "Opens a telnet connection to the desired AttenuatorDevice and\nqueries basic information.\n\nArgs:\nhost: A valid hostname (IP address or DNS-resolvable name) to an\nMC-DAT attenuator instrument.\nport: An optional port number (defaults to telnet default 23)", "source": "juraj-google-style"}
{"code": "def jacobian_s(nodes, degree, dimension):\n    r\n    num_nodes = (degree * (degree + 1)) \n    result = np.empty((dimension, num_nodes), order=\"F\")\n    index = 0\n    i = 0\n    for num_vals in six.moves.xrange(degree, 0, -1):\n        for _ in six.moves.xrange(num_vals):\n            result[:, index] = nodes[:, i + 1] - nodes[:, i]\n            \n            index += 1\n            i += 1\n        \n        i += 1\n    return float(degree) * result", "docstring": "r\"\"\"Compute :math:`\\frac{\\partial B}{\\partial s}`.\n\n.. note::\n\nThis is a helper for :func:`_jacobian_both`, which has an\nequivalent Fortran implementation.\n\nArgs:\nnodes (numpy.ndarray): Array of nodes in a surface.\ndegree (int): The degree of the surface.\ndimension (int): The dimension the surface lives in.\n\nReturns:\nnumpy.ndarray: Nodes of the Jacobian surface in\nB |eacute| zier form.", "source": "juraj-google-style"}
{"code": "def set_label_list(self, label_lists):\n        \n\n        if isinstance(label_lists, annotations.LabelList):\n            label_lists = [label_lists]\n\n        for label_list in label_lists:\n            if label_list.idx is None:\n                label_list.idx = 'default'\n\n            label_list.utterance = self\n            self.label_lists[label_list.idx] = label_list", "docstring": "Set the given label-list for this utterance.\nIf the label-list-idx is not set, ``default`` is used.\nIf there is already a label-list with the given idx,\nit will be overriden.\n\nArgs:\nlabel_list (LabelList, list): A single or multi. label-lists to add.", "source": "juraj-google-style"}
{"code": "def needle_statistics(infile):\n    alignments = list(AlignIO.parse(infile, 'emboss'))\n    alignment_properties = defaultdict(dict)\n    with open(infile) as f:\n        line = f.readline()\n        for i in range(len(alignments)):\n            while (line.rstrip() != '\n                line = f.readline()\n                if (not line):\n                    raise StopIteration\n            while (line[0] == '\n                parts = line[1:].split(':', 1)\n                key = parts[0].lower().strip()\n                if (key == '1'):\n                    a_id = parts[1].strip()\n                if (key == '2'):\n                    b_id = parts[1].strip()\n                if (key == 'identity'):\n                    ident_parse = parts[1].strip().replace('(', '').replace(')', '').replace('%', '').split()\n                    ident_num = int(ident_parse[0].split('/')[0])\n                    ident_percent = float(ident_parse[1])\n                    alignment_properties[((a_id + '_') + b_id)]['identity'] = ident_num\n                    alignment_properties[((a_id + '_') + b_id)]['percent_identity'] = ident_percent\n                if (key == 'similarity'):\n                    sim_parse = parts[1].strip().replace('(', '').replace(')', '').replace('%', '').split()\n                    sim_num = int(sim_parse[0].split('/')[0])\n                    sim_percent = float(sim_parse[1])\n                    alignment_properties[((a_id + '_') + b_id)]['similarity'] = sim_num\n                    alignment_properties[((a_id + '_') + b_id)]['percent_similarity'] = sim_percent\n                if (key == 'gaps'):\n                    gap_parse = parts[1].strip().replace('(', '').replace(')', '').replace('%', '').split()\n                    gap_num = int(gap_parse[0].split('/')[0])\n                    gap_percent = float(gap_parse[1])\n                    alignment_properties[((a_id + '_') + b_id)]['gaps'] = gap_num\n                    alignment_properties[((a_id + '_') + b_id)]['percent_gaps'] = gap_percent\n                if (key == 'score'):\n                    score = float(parts[1].strip())\n                    alignment_properties[((a_id + '_') + b_id)]['score'] = score\n                line = f.readline()\n    return alignment_properties", "docstring": "Reads in a needle alignment file and spits out statistics of the alignment.\n\nArgs:\ninfile (str): Alignment file name\n\nReturns:\ndict: alignment_properties - a dictionary telling you the number of gaps, identity, etc.", "source": "codesearchnet"}
{"code": "def __init__(self, outer_index, inner_index):\n    if outer_index.batch_dims != inner_index.batch_dims:\n        raise ValueError('outer_index.batch_dims and inner_index.batch_dims must be the same.')\n    super().__init__(indices=inner_index.indices + outer_index.indices * inner_index.num_segments, num_segments=inner_index.num_segments * outer_index.num_segments, batch_dims=inner_index.batch_dims)\n    self.outer_index = outer_index\n    self.inner_index = inner_index", "docstring": "Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the\nintersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows\nand columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation\ncombines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has *num_segments* equal to\n*outer_index.num_segments* * *inner_index.num_segments*\n\nArgs:\nouter_index (`IndexMap`):\nIndexMap.\ninner_index (`IndexMap`):\nIndexMap, must have the same shape as *outer_index*.", "source": "github-repos"}
{"code": "def seq_int_arr(seqs):\n    return np.array([[NT_TO_INT[c] for c in x.upper()] for x in seqs])", "docstring": "Convert list of ACGT strings to matix of 1-4 ints\n\nArgs:\nseqs (list of str): nucleotide sequences with only 'ACGT' characters\n\nReturns:\nnumpy.array of int: matrix of integers from 1 to 4 inclusive representing A, C, G, and T\nstr: nucleotide sequence string", "source": "codesearchnet"}
{"code": "def _handle_is_dag_stopped(self, request):\n    return Response(success=True, uid=request.uid, payload={'is_stopped': (request.payload['dag_name'] in self._stop_dags)})", "docstring": "The handler for the dag_stopped request.\n\nThe dag_stopped request checks whether a dag is flagged to be terminated.\n\nArgs:\nrequest (Request): Reference to a request object containing the\nincoming request. The payload has to contain the\nfollowing fields:\n'dag_name': the name of the dag that should be checked\n\nReturns:\nResponse: A response object containing the following fields:\n- is_stopped: True if the dag is flagged to be stopped.", "source": "codesearchnet"}
{"code": "def flags(cls):\n    \n\n    assert cls.__bases__ == (object,)\n\n    d = dict(cls.__dict__)\n    new_type = type(cls.__name__, (int,), d)\n    new_type.__module__ = cls.__module__\n\n    map_ = {}\n    for key, value in iteritems(d):\n        if key.upper() == key and isinstance(value, integer_types):\n            value_instance = new_type(value)\n            setattr(new_type, key, value_instance)\n            map_[value] = key\n\n    def str_(self):\n        value = int(self)\n        matches = []\n        for k, v in map_.items():\n            if value & k:\n                matches.append(\"%s.%s\" % (type(self).__name__, v))\n                value &= ~k\n        if value != 0 or not matches:\n            matches.append(text_type(value))\n\n        return \" | \".join(matches)\n\n    def repr_(self):\n        return \"<%s: %d>\" % (str(self), int(self))\n\n    setattr(new_type, \"__repr__\", repr_)\n    setattr(new_type, \"__str__\", str_)\n\n    return new_type", "docstring": "A decorator for creating an int flags class.\n\nMakes the values a subclass of the type and implements repr/str.\nThe new class will be a subclass of int.\n\nArgs:\ncls (type): The class to convert to an flags\n\nReturns:\ntype: A new class\n\n::\n\n@flags\nclass Foo(object):\nFOO = 1\nBAR = 2", "source": "juraj-google-style"}
{"code": "def add_graph(\n        self,\n        y,\n        x_label=None,\n        y_label=\"\",\n        title=\"\",\n        x_run=None,\n        y_run=None,\n        svg_size_px=None,\n        key_position=\"bottom right\",\n    ):\n        \n\n        if x_run is None:\n            x_run = self.default_x_run\n        if y_run is None:\n            y_run = self.default_y_run\n        if svg_size_px is None:\n            svg_size_px = self.default_svg_size_px\n\n        for panel in self.panels:\n            x_run = self._load_x_run(x_run)\n            y_run = self._load_y_run(y_run)\n            svg_size_px = self._load_svg_size_px(svg_size_px)\n            panel.add_graph(\n                y=y,\n                x_run=x_run,\n                y_run=y_run,\n                svg_size_px=svg_size_px,\n                y_label=y_label,\n                x_label=x_label if x_label is not None else self.default_x_label,\n                title=title,\n                key_position=key_position,\n            )", "docstring": "Add a new graph to the overlap report.\n\nArgs:\ny (str): Value plotted on y-axis.\nx_label (str): Label on x-axis.\ny_label (str): Label on y-axis.\ntitle (str): Title of the plot.\nx_run ((float,float)): x-range.\ny_run ((int,int)): y-rang.\nsvg_size_px ((int,int): Size of SVG image in pixels.\nkey_position (str): GnuPlot position of the legend.", "source": "juraj-google-style"}
{"code": "def _maybe_partial_apply_variables(fn, args, kwargs):\n\n    def is_distributed_var(x):\n        flat = nest.flatten(x)\n        return flat and isinstance(flat[0], values.DistributedVariable)\n    var_kwargs = {}\n    nonvar_kwargs = {}\n    if kwargs:\n        var_kwargs = {k: v for k, v in kwargs.items() if is_distributed_var(v)}\n    if var_kwargs:\n        nonvar_kwargs = {k: v for k, v in kwargs.items() if not is_distributed_var(v)}\n    positional_args = []\n    index_of_star_args = None\n    for i, p in enumerate(tf_inspect.signature(fn).parameters.values()):\n        if i == 0 and p.name == 'self':\n            continue\n        if p.kind == tf_inspect.Parameter.POSITIONAL_OR_KEYWORD:\n            positional_args.append(p.name)\n        elif p.kind == tf_inspect.Parameter.VAR_POSITIONAL:\n            index_of_star_args = i\n        elif p.kind == tf_inspect.Parameter.POSITIONAL_ONLY:\n            if var_kwargs or any((is_distributed_var(a) for a in args)):\n                raise ValueError(f'Mixing Variables and positional-only parameters not supported by TPUStrategy. Received {len(var_kwargs)} DistributedVariables in **kwargs and {sum((is_distributed_var(a) for a in args))} in *args, expected zero for both.')\n            return (fn, args, kwargs)\n    star_args = []\n    have_seen_var_arg = False\n    for i, a in enumerate(args):\n        if is_distributed_var(a):\n            if index_of_star_args is not None and i >= index_of_star_args:\n                raise ValueError('TPUStrategy.run() cannot handle Variables passed to *args. Either name the function argument, or capture the Variable implicitly.')\n            if len(positional_args) <= i:\n                raise ValueError('Too many positional arguments passed to call to TPUStrategy.run().')\n            var_kwargs[positional_args[i]] = a\n            have_seen_var_arg = True\n        else:\n            if index_of_star_args is not None and i >= index_of_star_args:\n                if have_seen_var_arg:\n                    raise ValueError('TPUStrategy.run() cannot handle both Variables and a mix of positional args and *args. Either remove the *args, or capture the Variable implicitly.')\n                else:\n                    star_args.append(a)\n                    continue\n            if len(positional_args) <= i:\n                raise ValueError('Too many positional arguments passed to call to TPUStrategy.run().')\n            nonvar_kwargs[positional_args[i]] = a\n    if var_kwargs:\n        return (functools.partial(fn, **var_kwargs), star_args, nonvar_kwargs)\n    return (fn, args, kwargs)", "docstring": "Inspects arguments to partially apply any DistributedVariable.\n\nThis avoids an automatic cast of the current variable value to tensor.\n\nNote that a variable may be captured implicitly with Python scope instead of\npassing it to run(), but supporting run() keeps behavior consistent\nwith MirroredStrategy.\n\nSince positional arguments must be applied from left to right, this function\ndoes some tricky function inspection to move variable positional arguments\ninto kwargs. As a result of this, we can't support passing Variables as *args,\nnor as args to functions which combine both explicit positional arguments and\n*args.\n\nArgs:\nfn: The function to run, as passed to run().\nargs: Positional arguments to fn, as passed to run().\nkwargs: Keyword arguments to fn, as passed to run().\n\nReturns:\nA tuple of the function (possibly wrapped), args, kwargs (both\npossibly filtered, with members of args possibly moved to kwargs).\nIf no variables are found, this function is a noop.\n\nRaises:\nValueError: If the function signature makes unsupported use of *args, or if\ntoo many arguments are passed.", "source": "github-repos"}
{"code": "def __init__(self, name, description=None):\n    \n    super(ArtifactDefinition, self).__init__()\n    self.conditions = []\n    self.description = description\n    self.name = name\n    self.labels = []\n    self.provides = []\n    self.sources = []\n    self.supported_os = []\n    self.urls = []", "docstring": "Initializes an artifact definition.\n\nArgs:\nname (str): name that uniquely identifiers the artifact definition.\ndescription (Optional[str]): description of the artifact definition.", "source": "juraj-google-style"}
{"code": "def intersect(self, other):\n    lowest_stop = min(self.stop_hz, other.stop_hz)\n    highest_start = max(self.start_hz, other.start_hz)\n    return FrequencyBand(highest_start, lowest_stop)", "docstring": "Return the intersection between this frequency band and another.\n\nArgs:\nother (FrequencyBand): the instance to intersect with\n\nExamples::\n>>> import zounds\n>>> b1 = zounds.FrequencyBand(500, 1000)\n>>> b2 = zounds.FrequencyBand(900, 2000)\n>>> intersection = b1.intersect(b2)\n>>> intersection.start_hz, intersection.stop_hz\n(900, 1000)", "source": "codesearchnet"}
{"code": "def plot_weight_posteriors(names, qm_vals, qs_vals, fname):\n    fig = figure.Figure(figsize=(6, 3))\n    canvas = backend_agg.FigureCanvasAgg(fig)\n    ax = fig.add_subplot(1, 2, 1)\n    for (n, qm) in zip(names, qm_vals):\n        sns.distplot(qm.flatten(), ax=ax, label=n)\n    ax.set_title('weight means')\n    ax.set_xlim([(- 1.5), 1.5])\n    ax.legend()\n    ax = fig.add_subplot(1, 2, 2)\n    for (n, qs) in zip(names, qs_vals):\n        sns.distplot(qs.flatten(), ax=ax)\n    ax.set_title('weight stddevs')\n    ax.set_xlim([0, 1.0])\n    fig.tight_layout()\n    canvas.print_figure(fname, format='png')\n    print('saved {}'.format(fname))", "docstring": "Save a PNG plot with histograms of weight means and stddevs.\n\nArgs:\nnames: A Python `iterable` of `str` variable names.\nqm_vals: A Python `iterable`, the same length as `names`,\nwhose elements are Numpy `array`s, of any shape, containing\nposterior means of weight varibles.\nqs_vals: A Python `iterable`, the same length as `names`,\nwhose elements are Numpy `array`s, of any shape, containing\nposterior standard deviations of weight varibles.\nfname: Python `str` filename to save the plot to.", "source": "codesearchnet"}
{"code": "def call(self, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, decoder_input_ids: tf.Tensor | None=None, decoder_attention_mask: tf.Tensor | None=None, decoder_position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, decoder_head_mask: tf.Tensor | None=None, cross_attn_head_mask: tf.Tensor | None=None, encoder_outputs: TFBaseModelOutput | None=None, past_key_values: Tuple[Tuple[tf.Tensor]] | None=None, inputs_embeds: tf.Tensor | None=None, decoder_inputs_embeds: tf.Tensor | None=None, use_cache: bool | None=None, output_attentions: bool | None=None, output_hidden_states: bool | None=None, return_dict: bool | None=None, labels: tf.Tensor | None=None, training: bool=False) -> Tuple[tf.Tensor] | TFSeq2SeqLMOutput:\n    if labels is not None:\n        labels = tf.where(labels == self.config.pad_token_id, tf.fill(shape_list(labels), tf.cast(-100, labels.dtype)), labels)\n        use_cache = False\n        if decoder_input_ids is None and decoder_inputs_embeds is None:\n            decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)\n    outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n    lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)\n    lm_logits = self.bias_layer(lm_logits)\n    masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)\n    if not return_dict:\n        output = (lm_logits,) + outputs[1:]\n        return (masked_lm_loss,) + output if masked_lm_loss is not None else output\n    return TFSeq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)", "docstring": "labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):\nLabels for computing the masked language modeling loss. Indices should either be in `[0, ...,\nconfig.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nReturns:", "source": "github-repos"}
{"code": "def set_callback(self, property_name, callback):\n    if property_name not in self._config:\n        raise KeyError('%s is not a valid property name.' % property_name)\n    if not callable(callback):\n        raise TypeError('The callback object provided is not callable.')\n    self._set_callbacks[property_name] = callback", "docstring": "Set a set-callback for given property.\n\nArgs:\nproperty_name: Name of the property.\ncallback: The callback as a `callable` of signature:\ndef cbk(config):\nwhere config is the config after it is set to the new value.\nThe callback is invoked each time the set() method is called with the\nmatching property_name.\n\nRaises:\nKeyError: If property_name does not exist.\nTypeError: If `callback` is not callable.", "source": "github-repos"}
{"code": "def get_clinvar_id(self, submission_id):\n    submission_obj = self.clinvar_submission_collection.find_one({'_id': ObjectId(submission_id)})\n    clinvar_subm_id = submission_obj.get('clinvar_subm_id')\n    return clinvar_subm_id", "docstring": "Returns the official Clinvar submission ID for a submission object\n\nArgs:\nsubmission_id(str): submission_id(str) : id of the submission\n\nReturns:\nclinvar_subm_id(str): a string with a format: SUB[0-9]. It is obtained from clinvar portal when starting a new submission", "source": "codesearchnet"}
{"code": "def optimize(self, sess, batch_index):\n    feed_dict = {self._batch_index: batch_index, self._per_device_batch_size: self._loaded_per_device_batch_size, self._max_seq_len: self._loaded_max_seq_len}\n    for tower in self._towers:\n        feed_dict.update(tower.loss_graph.extra_compute_grad_feed_dict())\n    fetches = {'train': self._train_op}\n    for tower in self._towers:\n        fetches.update(tower.loss_graph.extra_compute_grad_fetches())\n    return sess.run(fetches, feed_dict=feed_dict)", "docstring": "Run a single step of SGD.\n\nRuns a SGD step over a slice of the preloaded batch with size given by\nself._loaded_per_device_batch_size and offset given by the batch_index\nargument.\n\nUpdates shared model weights based on the averaged per-device\ngradients.\n\nArgs:\nsess: TensorFlow session.\nbatch_index: Offset into the preloaded data. This value must be\nbetween `0` and `tuples_per_device`. The amount of data to\nprocess is at most `max_per_device_batch_size`.\n\nReturns:\nThe outputs of extra_ops evaluated over the batch.", "source": "codesearchnet"}
{"code": "def preprocess_JPEG(self, image, **kwargs):\n        \n        save_kwargs = {\n            'progressive': VERSATILEIMAGEFIELD_PROGRESSIVE_JPEG,\n            'quality': QUAL\n        }\n        if image.mode != 'RGB':\n            image = image.convert('RGB')\n        return (image, save_kwargs)", "docstring": "Receive a PIL Image instance of a JPEG and returns 2-tuple.\n\nArgs:\n* [0]: Image instance, converted to RGB\n* [1]: Dict with a quality key (mapped to the value of `QUAL` as\ndefined by the `VERSATILEIMAGEFIELD_JPEG_RESIZE_QUALITY`\nsetting)", "source": "juraj-google-style"}
{"code": "def waitForEvent(self, event_name, predicate, timeout=DEFAULT_TIMEOUT):\n    deadline = (time.time() + timeout)\n    while (time.time() <= deadline):\n        rpc_timeout = (deadline - time.time())\n        if (rpc_timeout < 0):\n            break\n        rpc_timeout = min(rpc_timeout, MAX_TIMEOUT)\n        try:\n            event = self.waitAndGet(event_name, rpc_timeout)\n        except TimeoutError:\n            break\n        if predicate(event):\n            return event\n    raise TimeoutError(self._ad, ('Timed out after %ss waiting for an \"%s\" event that satisfies the predicate \"%s\".' % (timeout, event_name, predicate.__name__)))", "docstring": "Wait for an event of a specific name that satisfies the predicate.\n\nThis call will block until the expected event has been received or time\nout.\n\nThe predicate function defines the condition the event is expected to\nsatisfy. It takes an event and returns True if the condition is\nsatisfied, False otherwise.\n\nNote all events of the same name that are received but don't satisfy\nthe predicate will be discarded and not be available for further\nconsumption.\n\nArgs:\nevent_name: string, the name of the event to wait for.\npredicate: function, a function that takes an event (dictionary) and\nreturns a bool.\ntimeout: float, default is 120s.\n\nReturns:\ndictionary, the event that satisfies the predicate if received.\n\nRaises:\nTimeoutError: raised if no event that satisfies the predicate is\nreceived after timeout seconds.", "source": "codesearchnet"}
{"code": "def total_stored(self, wanted, slots=None):\n        \n        if slots is None:\n            slots = self.window.slots\n        wanted = make_slot_check(wanted)\n        return sum(slot.amount for slot in slots if wanted(slot))", "docstring": "Calculates the total number of items of that type\nin the current window or given slot range.\n\nArgs:\nwanted: function(Slot) or Slot or itemID or (itemID, metadata)", "source": "juraj-google-style"}
{"code": "def convert(self, value):\n        \n        if self._type is str:\n            return str(value)\n        elif self._type is int:\n            try:\n                return int(value)\n            except (UnicodeError, ValueError):\n                raise WorkflowArgumentError('Cannot convert {} to int'.format(value))\n        elif self._type is float:\n            try:\n                return float(value)\n            except (UnicodeError, ValueError):\n                raise WorkflowArgumentError('Cannot convert {} to float'.format(value))\n        elif self._type is bool:\n            if isinstance(value, bool):\n                return bool(value)\n            value = value.lower()\n            if value in ('true', '1', 'yes', 'y'):\n                return True\n            elif value in ('false', '0', 'no', 'n'):\n                return False\n            raise WorkflowArgumentError('Cannot convert {} to bool'.format(value))\n        else:\n            return value", "docstring": "Convert the specified value to the type of the option.\n\nArgs:\nvalue: The value that should be converted.\n\nReturns:\nThe value with the type given by the option.", "source": "juraj-google-style"}
{"code": "def ReceiveMessages(self, client_id, messages):\n    if data_store.RelationalDBEnabled():\n        return self.ReceiveMessagesRelationalFlows(client_id, messages)\n    now = time.time()\n    with queue_manager.QueueManager(token=self.token) as manager:\n        for (session_id, msgs) in iteritems(collection.Group(messages, operator.attrgetter('session_id'))):\n            leftover_msgs = self.HandleWellKnownFlows(msgs)\n            unprocessed_msgs = []\n            for msg in leftover_msgs:\n                if ((msg.auth_state == msg.AuthorizationState.AUTHENTICATED) or (msg.session_id == self.unauth_allowed_session_id)):\n                    unprocessed_msgs.append(msg)\n            if (len(unprocessed_msgs) < len(leftover_msgs)):\n                logging.info('Dropped %d unauthenticated messages for %s', (len(leftover_msgs) - len(unprocessed_msgs)), client_id)\n            if (not unprocessed_msgs):\n                continue\n            for msg in unprocessed_msgs:\n                manager.QueueResponse(msg)\n            for msg in unprocessed_msgs:\n                if (msg.request_id == 0):\n                    manager.QueueNotification(session_id=msg.session_id)\n                    break\n                elif (msg.type == rdf_flows.GrrMessage.Type.STATUS):\n                    if msg.HasTaskID():\n                        manager.DeQueueClientRequest(msg)\n                    manager.QueueNotification(session_id=msg.session_id, last_status=msg.request_id)\n                    stat = rdf_flows.GrrStatus(msg.payload)\n                    if (stat.status == rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED):\n                        crash_details = rdf_client.ClientCrash(client_id=client_id, session_id=session_id, backtrace=stat.backtrace, crash_message=stat.error_message, nanny_status=stat.nanny_status, timestamp=rdfvalue.RDFDatetime.Now())\n                        events.Events.PublishEvent('ClientCrash', crash_details, token=self.token)\n    logging.debug('Received %s messages from %s in %s sec', len(messages), client_id, (time.time() - now))", "docstring": "Receives and processes the messages from the source.\n\nFor each message we update the request object, and place the\nresponse in that request's queue. If the request is complete, we\nsend a message to the worker.\n\nArgs:\nclient_id: The client which sent the messages.\nmessages: A list of GrrMessage RDFValues.", "source": "codesearchnet"}
{"code": "def generate_host_passthrough(self, vcpu_num):\n        \n\n        cpu = ET.Element('cpu', mode='host-passthrough')\n        cpu.append(self.generate_topology(vcpu_num))\n        if vcpu_num > 1:\n            cpu.append(self.generate_numa(vcpu_num))\n        return cpu", "docstring": "Generate host-passthrough XML cpu node\n\nArgs:\nvcpu_num(str): number of virtual CPUs\n\nReturns:\nlxml.etree.Element: CPU XML node", "source": "juraj-google-style"}
{"code": "def _ReadParserPresetValues(self, preset_definition_values):\n    \n    if not preset_definition_values:\n      raise errors.MalformedPresetError('Missing preset definition values.')\n\n    name = preset_definition_values.get('name', None)\n    if not name:\n      raise errors.MalformedPresetError(\n          'Invalid preset definition missing name.')\n\n    parsers = preset_definition_values.get('parsers', None)\n    if not parsers:\n      raise errors.MalformedPresetError(\n          'Invalid preset definition missing parsers.')\n\n    parser_preset = ParserPreset(name, parsers)\n\n    for operating_system_values in preset_definition_values.get(\n        'operating_systems', []):\n      operating_system = self._ReadOperatingSystemArtifactValues(\n          operating_system_values)\n      parser_preset.operating_systems.append(operating_system)\n\n    return parser_preset", "docstring": "Reads a parser preset from a dictionary.\n\nArgs:\npreset_definition_values (dict[str, object]): preset definition values.\n\nReturns:\nParserPreset: a parser preset.\n\nRaises:\nMalformedPresetError: if the format of the preset definition is not set\nor incorrect, or the preset of a specific operating system has already\nbeen set.", "source": "juraj-google-style"}
{"code": "def __init__(self, sess):\n    _check_type(sess, (session.BaseSession, monitored_session.MonitoredSession))\n    self.session = sess", "docstring": "Constructor.\n\nArgs:\nsess: A tensorflow Session object.", "source": "github-repos"}
{"code": "def on_raw_update(\n        self=None,\n        group: int = 0\n    ) -> callable:\n        \n\n        def decorator(func: callable) -> Tuple[Handler, int]:\n            if isinstance(func, tuple):\n                func = func[0].callback\n\n            handler = pyrogram.RawUpdateHandler(func)\n\n            if isinstance(self, int):\n                return handler, group if self is None else group\n\n            if self is not None:\n                self.add_handler(handler, group)\n\n            return handler, group\n\n        return decorator", "docstring": "Use this decorator to automatically register a function for handling raw updates.\nThis does the same thing as :meth:`add_handler` using the :class:`RawUpdateHandler`.\n\nArgs:\ngroup (``int``, *optional*):\nThe group identifier, defaults to 0.", "source": "juraj-google-style"}
{"code": "def read_model(input_tflite_file):\n    if not gfile.Exists(input_tflite_file):\n        raise RuntimeError('Input file not found at %r\\n' % input_tflite_file)\n    with gfile.GFile(input_tflite_file, 'rb') as input_file_handle:\n        model_bytearray = bytearray(input_file_handle.read())\n    return read_model_from_bytearray(model_bytearray)", "docstring": "Reads a tflite model as a python object.\n\nArgs:\ninput_tflite_file: Full path name to the input tflite file\n\nRaises:\nRuntimeError: If input_tflite_file path is invalid.\nIOError: If input_tflite_file cannot be opened.\n\nReturns:\nA python object corresponding to the input tflite file.", "source": "github-repos"}
{"code": "def _GetTypeIndicators(cls, signature_scanner, specification_store, remainder_list, path_spec, resolver_context=None):\n    type_indicator_list = []\n    file_object = resolver.Resolver.OpenFileObject(path_spec, resolver_context=resolver_context)\n    scan_state = pysigscan.scan_state()\n    try:\n        signature_scanner.scan_file_object(scan_state, file_object)\n        for scan_result in iter(scan_state.scan_results):\n            format_specification = specification_store.GetSpecificationBySignature(scan_result.identifier)\n            if (format_specification.identifier not in type_indicator_list):\n                type_indicator_list.append(format_specification.identifier)\n        for analyzer_helper in remainder_list:\n            result = analyzer_helper.AnalyzeFileObject(file_object)\n            if (result is not None):\n                type_indicator_list.append(result)\n    finally:\n        file_object.close()\n    return type_indicator_list", "docstring": "Determines if a file contains a supported format types.\n\nArgs:\nsignature_scanner (pysigscan.scanner): signature scanner.\nspecification_store (FormatSpecificationStore): specification store.\nremainder_list (list[AnalyzerHelper]): remaining analyzer helpers that\ndo not have a format specification.\npath_spec (PathSpec): path specification.\nresolver_context (Optional[Context]): resolver context, where None\nrepresents the built-in context which is not multi process safe.\n\nReturns:\nlist[str]: supported format type indicators.", "source": "codesearchnet"}
{"code": "def delete_resource_group(access_token, subscription_id, rgname):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourcegroups/', rgname,\n                        '?api-version=', RESOURCE_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete the named resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\n\nReturns:\nHTTP response.", "source": "juraj-google-style"}
{"code": "def parse_pyc_string(data):\n    return pyc.loads(data)", "docstring": "Parse pyc data from a string.\n\nArgs:\ndata: pyc data\n\nReturns:\nAn instance of pycnite.types.CodeTypeBase.", "source": "github-repos"}
{"code": "def get_address_coords(self, address):\n    url = ('https:\n    r = requests.get(url)\n    r.raise_for_status()\n    results = r.json()['results']\n    lat = results[0]['geometry']['location']['lat']\n    lng = results[0]['geometry']['location']['lng']\n    return (lat, lng)", "docstring": "Use the google geocoder to get latitude and longitude for an address string\n\nArgs:\naddress: any address string\n\nReturns:\nA tuple of (lat,lng)", "source": "codesearchnet"}
{"code": "def __init__(self, latent_size):\n    \n    super(ProbabilisticGrammarVariational, self).__init__()\n    self.latent_size = latent_size\n    self.encoder_net = tf.keras.Sequential([\n        tf.keras.layers.Conv1D(64, 3, padding=\"SAME\"),\n        tf.keras.layers.BatchNormalization(),\n        tf.keras.layers.Activation(tf.nn.elu),\n        tf.keras.layers.Conv1D(128, 3, padding=\"SAME\"),\n        tf.keras.layers.BatchNormalization(),\n        tf.keras.layers.Activation(tf.nn.elu),\n        tf.keras.layers.Dropout(0.1),\n        tf.keras.layers.GlobalAveragePooling1D(),\n        tf.keras.layers.Dense(latent_size * 2, activation=None),\n    ])", "docstring": "Constructs a variational posterior for a probabilistic grammar.\n\nArgs:\nlatent_size: Number of dimensions in the latent code.", "source": "juraj-google-style"}
{"code": "def resolve(node, source_info, graphs, resolver):\n    visitor = FunctionVisitor(source_info, graphs, resolver)\n    node = visitor.visit(node)\n    return node", "docstring": "Performs type inference.\n\nArgs:\nnode: ast.AST\nsource_info: transformer.SourceInfo\ngraphs: Dict[ast.FunctionDef, cfg.Graph]\nresolver: Resolver\n\nReturns:\nast.AST", "source": "github-repos"}
{"code": "def complain(distribution_name):\n    \n    try:\n        pkg_resources.get_distribution(distribution_name)\n        warnings.warn(\n            \"The {pkg} distribution is now obsolete. \"\n            \"Please `pip uninstall {pkg}`. \"\n            \"In the future, this warning will become an ImportError.\".format(\n                pkg=distribution_name\n            ),\n            DeprecationWarning,\n        )\n    except pkg_resources.DistributionNotFound:\n        pass", "docstring": "Issue a warning if `distribution_name` is installed.\n\nIn a future release, this method will be updated to raise ImportError\nrather than just send a warning.\n\nArgs:\ndistribution_name (str): The name of the obsolete distribution.", "source": "juraj-google-style"}
{"code": "def create_migration_template(name):\n    \n    assert name, 'Name of the migration can not be empty.'\n    from . import migrations\n\n    \n    \n    package = migrations\n    prefix = package.__name__ + '.'\n    all_versions = []\n    for importer, modname, ispkg in pkgutil.iter_modules(package.__path__, prefix):\n        version = int(modname.split('.')[-1].split('_')[0])\n        all_versions.append(version)\n\n    next_number = max(all_versions) + 1\n\n    \n    \n    next_migration_name = '{}_{}.py'.format(next_number, name)\n    migration_fullname = os.path.join(package.__path__[0], next_migration_name)\n\n    \n    \n    with open(migration_fullname, 'w') as f:\n        f.write(MIGRATION_TEMPLATE)\n    return migration_fullname", "docstring": "Creates migration file. Returns created file name.\nArgs:\nname (str): name of the migration.\n\nReturns:\nstr: name of the migration file.", "source": "juraj-google-style"}
{"code": "def ipv4_lstrip_zeros(address):\n    \n\n    \n    obj = address.strip().split('.')\n\n    for x, y in enumerate(obj):\n\n        \n        obj[x] = y.split('/')[0].lstrip('0')\n        if obj[x] in ['', None]:\n            obj[x] = '0'\n\n    return '.'.join(obj)", "docstring": "The function to strip leading zeros in each octet of an IPv4 address.\n\nArgs:\naddress (:obj:`str`): An IPv4 address.\n\nReturns:\nstr: The modified IPv4 address.", "source": "juraj-google-style"}
{"code": "def std(x, axis=None, keepdims=False):\n    if x.dtype.base_dtype == dtypes_module.bool:\n        x = math_ops.cast(x, floatx())\n    return math_ops.reduce_std(x, axis=axis, keepdims=keepdims)", "docstring": "Standard deviation of a tensor, alongside the specified axis.\n\nIt is an alias to `tf.math.reduce_std`.\n\nArgs:\nx: A tensor or variable. It should have numerical dtypes. Boolean type\ninputs will be converted to float.\naxis: An integer, the axis to compute the standard deviation. If `None`\n(the default), reduces all dimensions. Must be in the range\n`[-rank(x), rank(x))`.\nkeepdims: A boolean, whether to keep the dimensions or not.\nIf `keepdims` is `False`, the rank of the tensor is reduced\nby 1. If `keepdims` is `True`, the reduced dimension is retained with\nlength 1.\n\nReturns:\nA tensor with the standard deviation of elements of `x` with same dtype.\nBoolean type input will be converted to float.", "source": "github-repos"}
{"code": "def contains(self, time: datetime.datetime,\n                 inclusive: bool = True) -> bool:\n        \n        if inclusive:\n            return self.start <= time <= self.end\n        else:\n            return self.start < time < self.end", "docstring": "Does the interval contain a momentary time?\n\nArgs:\ntime: the ``datetime.datetime`` to check\ninclusive: use inclusive rather than exclusive range checks?", "source": "juraj-google-style"}
{"code": "def add(self, key, value):\n        \n        if isinstance(value, list):\n            \n            for val in value:\n                self._add_arg_python(key, val)\n        elif isinstance(value, dict):\n            err = 'Dictionary types are not currently supported for field.'\n            print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, err))\n        else:\n            mask = False\n            env_var = re.compile(r'^\\$env\\.(.*)$')\n            envs_var = re.compile(r'^\\$envs\\.(.*)$')\n\n            if env_var.match(str(value)):\n                \n                env_key = env_var.match(str(value)).groups()[0]\n                value = os.environ.get(env_key, value)\n            elif envs_var.match(str(value)):\n                \n                env_key = envs_var.match(str(value)).groups()[0]\n                value = os.environ.get(env_key, value)\n                mask = True\n            self._add_arg(key, value, mask)", "docstring": "Add CLI Arg to lists value.\n\nArgs:\nkey (string): The CLI Args key (e.g., --name).\nvalue (string): The CLI Args value (e.g., bob).", "source": "juraj-google-style"}
{"code": "def gen_sl_transform_matricies(area_multiple):\n    \n    return [np.array(((i, j), (0, area_multiple / i)))\n            for i in get_factors(area_multiple)\n            for j in range(area_multiple", "docstring": "Generates the transformation matricies that convert a set of 2D\nvectors into a super lattice of integer area multiple as proven\nin Cassels:\n\nCassels, John William Scott. An introduction to the geometry of\nnumbers. Springer Science & Business Media, 2012.\n\nArgs:\narea_multiple(int): integer multiple of unit cell area for super\nlattice area\n\nReturns:\nmatrix_list: transformation matricies to covert unit vectors to\nsuper lattice vectors", "source": "juraj-google-style"}
{"code": "def read(self, nodes=None, **kwargs):\n        \n        if nodes is None:\n            required_nodes = self.wishlist - set(self.datasets.keys())\n            nodes = self.dep_tree.leaves(nodes=required_nodes)\n        return self._read_datasets(nodes, **kwargs)", "docstring": "Load datasets from the necessary reader.\n\nArgs:\nnodes (iterable): DependencyTree Node objects\n**kwargs: Keyword arguments to pass to the reader's `load` method.\n\nReturns:\nDatasetDict of loaded datasets", "source": "juraj-google-style"}
{"code": "def cctop_submit(seq_str):\n    url = 'http:\n    r = requests.post(url)\n    jobid = r.text.split('ID: ')[1]\n    return jobid", "docstring": "Submit a protein sequence string to CCTOP and return the job ID.\n\nArgs:\nseq_str (str): Protein sequence as a string\n\nReturns:\ndict: Job ID on the CCTOP server", "source": "codesearchnet"}
{"code": "def KernelVersion():\n    rtl_osversioninfoexw = RtlOSVersionInfoExw()\n    try:\n        RtlGetVersion(rtl_osversioninfoexw)\n    except OSError:\n        return 'unknown'\n    return ('%d.%d.%d' % (rtl_osversioninfoexw.dwMajorVersion, rtl_osversioninfoexw.dwMinorVersion, rtl_osversioninfoexw.dwBuildNumber))", "docstring": "Gets the kernel version as string, eg. \"5.1.2600\".\n\nReturns:\nThe kernel version, or \"unknown\" in the case of failure.", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context, file_system, path_spec, is_root=False):\n    \n    location = getattr(path_spec, 'location', None)\n\n    \n    \n    is_windows_device = False\n    if platform.system() == 'Windows' and location:\n      try:\n        \n        is_windows_device = pysmdev.check_device(location)\n      except IOError:\n        pass\n\n    stat_info = None\n    if not is_windows_device and location:\n      \n      \n      \n      try:\n        stat_info = os.lstat(location)\n      except OSError as exception:\n        raise errors.BackEndError(\n            'Unable to retrieve stat object with error: {0!s}'.format(\n                exception))\n\n    super(OSFileEntry, self).__init__(\n        resolver_context, file_system, path_spec, is_root=is_root,\n        is_virtual=False)\n    self._is_windows_device = is_windows_device\n    self._name = None\n    self._stat_info = stat_info\n\n    if is_windows_device:\n      self.entry_type = definitions.FILE_ENTRY_TYPE_DEVICE\n\n    elif stat_info:\n      \n      \n      \n      \n      is_link = os.path.islink(location)\n\n      \n      \n      \n      \n      if stat.S_ISLNK(stat_info.st_mode) or is_link:\n        self.entry_type = definitions.FILE_ENTRY_TYPE_LINK\n      elif stat.S_ISREG(stat_info.st_mode):\n        self.entry_type = definitions.FILE_ENTRY_TYPE_FILE\n      elif stat.S_ISDIR(stat_info.st_mode):\n        self.entry_type = definitions.FILE_ENTRY_TYPE_DIRECTORY\n      elif (stat.S_ISCHR(stat_info.st_mode) or\n            stat.S_ISBLK(stat_info.st_mode)):\n        self.entry_type = definitions.FILE_ENTRY_TYPE_DEVICE\n      elif stat.S_ISFIFO(stat_info.st_mode):\n        self.entry_type = definitions.FILE_ENTRY_TYPE_PIPE\n      elif stat.S_ISSOCK(stat_info.st_mode):\n        self.entry_type = definitions.FILE_ENTRY_TYPE_SOCKET", "docstring": "Initializes a file entry.\n\nArgs:\nresolver_context (Context): resolver context.\nfile_system (FileSystem): file system.\npath_spec (PathSpec): path specification.\nis_root (Optional[bool]): True if the file entry is the root file entry\nof the corresponding file system.\n\nRaises:\nBackEndError: If an OSError comes up it is caught and an\nBackEndError error is raised instead.", "source": "juraj-google-style"}
{"code": "def IsNTFS(self):\n    tsk_fs_type = self.GetFsType()\n    return (tsk_fs_type in [pytsk3.TSK_FS_TYPE_NTFS, pytsk3.TSK_FS_TYPE_NTFS_DETECT])", "docstring": "Determines if the file system is NTFS.\n\nReturns:\nbool: True if the file system is NTFS.", "source": "codesearchnet"}
{"code": "def _package_path(package):\n    from os import path\n    confdir = config_dir()\n    return path.join(confdir, '{}.cfg'.format(package))", "docstring": "Returns the full path to the default package configuration file.\n\nArgs:\npackage (str): name of the python package to return a path for.", "source": "codesearchnet"}
{"code": "def prepend_block(self, node, reverse=False):\n    if (not isinstance(node, grammar.STATEMENTS)):\n        raise ValueError\n    if reverse:\n        self.to_prepend_block[(- 1)].appendleft(node)\n    else:\n        self.to_prepend_block[(- 1)].append(node)", "docstring": "Prepend a statement to the current block.\n\nArgs:\nnode: The statement to prepend.\nreverse: When called multiple times, this flag determines whether the\nstatement should be prepended or appended to the already inserted\nstatements.\n\nRaises:\nValueError: If the given node is not a statement.", "source": "codesearchnet"}
{"code": "def __init__(self, swap, expiry_date=None, dtype=None, name=None):\n    self._name = name or 'swaption'\n    with tf.name_scope(self._name):\n        self._dtype = dtype\n        self._expiry_date = dates.convert_to_date_tensor(expiry_date)\n        self._swap = swap", "docstring": "Initialize a batch of European swaptions.\n\nArgs:\nswap: An instance of `InterestRateSwap` specifying the interest rate\nswaps underlying the swaptions. The batch size of the swaptions being\ncreated would be the same as the batch size of the `swap`.\nexpiry_date: An optional rank 1 `DateTensor` specifying the expiry dates\nfor each swaption. The shape of the input should be the same as the\nbatch size of the `swap` input.\nDefault value: None in which case the option expity date is the same as\nthe start date of each underlying swap.\ndtype: `tf.Dtype`. If supplied the dtype for the real variables or ops\neither supplied to the Swaption object or created by the Swaption\nobject.\nDefault value: None which maps to the default dtype inferred by\nTensorFlow.\nname: Python str. The name to give to the ops created by this class.\nDefault value: `None` which maps to 'swaption'.", "source": "github-repos"}
{"code": "def _music_lib_search(self, search, start, max_items):\n    response = self.contentDirectory.Browse([('ObjectID', search), ('BrowseFlag', 'BrowseDirectChildren'), ('Filter', '*'), ('StartingIndex', start), ('RequestedCount', max_items), ('SortCriteria', '')])\n    metadata = {}\n    for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']:\n        metadata[camel_to_underscore(tag)] = int(response[tag])\n    return (response, metadata)", "docstring": "Perform a music library search and extract search numbers.\n\nYou can get an overview of all the relevant search prefixes (like\n'A:') and their meaning with the request:\n\n.. code ::\n\nresponse = device.contentDirectory.Browse([\n('ObjectID', '0'),\n('BrowseFlag', 'BrowseDirectChildren'),\n('Filter', '*'),\n('StartingIndex', 0),\n('RequestedCount', 100),\n('SortCriteria', '')\n])\n\nArgs:\nsearch (str): The ID to search.\nstart (int): The index of the forst item to return.\nmax_items (int): The maximum number of items to return.\n\nReturns:\ntuple: (response, metadata) where response is the returned metadata\nand metadata is a dict with the 'number_returned',\n'total_matches' and 'update_id' integers", "source": "codesearchnet"}
{"code": "def get_metric_values(self):\n    group_names = self.properties.get('metric-groups', None)\n    if (not group_names):\n        group_names = self.manager.get_metric_values_group_names()\n    ret = []\n    for group_name in group_names:\n        try:\n            mo_val = self.manager.get_metric_values(group_name)\n            ret_item = (group_name, mo_val)\n            ret.append(ret_item)\n        except ValueError:\n            pass\n    return ret", "docstring": "Get the faked metrics, for all metric groups and all resources that\nhave been prepared on the manager object of this context object.\n\nReturns:\n\niterable of tuple (group_name, iterable of values): The faked\nmetrics, in the order they had been added, where:\n\ngroup_name (string): Metric group name.\n\nvalues (:class:~zhmcclient.FakedMetricObjectValues`):\nThe metric values for one resource at one point in time.", "source": "codesearchnet"}
{"code": "def emit_code_from_ir(sql_query_tree, compiler_metadata):\n    \n    context = CompilationContext(\n        query_path_to_selectable=dict(),\n        query_path_to_location_info=sql_query_tree.query_path_to_location_info,\n        query_path_to_output_fields=sql_query_tree.query_path_to_output_fields,\n        query_path_to_filters=sql_query_tree.query_path_to_filters,\n        query_path_to_node=sql_query_tree.query_path_to_node,\n        compiler_metadata=compiler_metadata,\n    )\n\n    return _query_tree_to_query(sql_query_tree.root, context)", "docstring": "Return a SQLAlchemy Query from a passed SqlQueryTree.\n\nArgs:\nsql_query_tree: SqlQueryTree, tree representation of the query to emit.\ncompiler_metadata: SqlMetadata, SQLAlchemy specific metadata.\n\nReturns:\nSQLAlchemy Query", "source": "juraj-google-style"}
{"code": "def get_paginated_catalogs(self, querystring=None):\n    return self._load_data(self.CATALOGS_ENDPOINT, default=[], querystring=querystring, traverse_pagination=False, many=False)", "docstring": "Return a paginated list of course catalogs, including name and ID.\n\nReturns:\ndict: Paginated response containing catalogs available for the user.", "source": "codesearchnet"}
{"code": "def _find_mapreduce_yaml(start, checked):\n  \n  dir = start\n  while dir not in checked:\n    checked.add(dir)\n    for mr_yaml_name in MR_YAML_NAMES:\n      yaml_path = os.path.join(dir, mr_yaml_name)\n      if os.path.exists(yaml_path):\n        return yaml_path\n    dir = os.path.dirname(dir)\n  return None", "docstring": "Traverse the directory tree identified by start until a directory already\nin checked is encountered or the path of mapreduce.yaml is found.\n\nChecked is present both to make loop termination easy to reason about and so\nthat the same directories do not get rechecked.\n\nArgs:\nstart: the path to start in and work upward from\nchecked: the set of already examined directories\n\nReturns:\nthe path of mapreduce.yaml file or None if not found.", "source": "juraj-google-style"}
{"code": "def set_calibration(self, enabled, imus):\n    if (len(imus) == 0):\n        imus = list(range(MAX_IMUS))\n    for i in imus:\n        if ((i < 0) or (i >= MAX_IMUS)):\n            logger.warn('Invalid IMU index {} in set_calibration'.format(i))\n            continue\n        self.imus[i]._use_calibration = enabled", "docstring": "Set calibration state for attached IMUs.\n\nArgs:\nenabled (bool): True to apply calibration to IMU data (if available).\nFalse to output uncalibrated data.\nimus (list): indicates which IMUs the calibration state should be set on.\nEmpty list or [0, 1, 2, 3, 4] will apply to all IMUs, [0, 1] only to\nfirst 2 IMUs, etc.", "source": "codesearchnet"}
{"code": "def write_byte(self, value):\n    if isinstance(value, bytes):\n        self.stream.write(value)\n    elif isinstance(value, str):\n        self.stream.write(value.encode('utf-8'))\n    elif isinstance(value, int):\n        self.stream.write(bytes([value]))", "docstring": "Write a single byte to the stream.\n\nArgs:\nvalue (bytes, str or int): value to write to the stream.", "source": "codesearchnet"}
{"code": "def SetPreferredLanguageIdentifier(self, language_identifier):\n    \n    if not isinstance(language_identifier, py2to3.STRING_TYPES):\n      raise ValueError('Language identifier is not a string.')\n\n    values = language_ids.LANGUAGE_IDENTIFIERS.get(\n        language_identifier.lower(), None)\n    if not values:\n      raise KeyError('Language identifier: {0:s} is not defined.'.format(\n          language_identifier))\n    self._language_identifier = language_identifier\n    self._lcid = values[0]", "docstring": "Sets the preferred language identifier.\n\nArgs:\nlanguage_identifier (str): language identifier string such as \"en-US\"\nfor US English or \"is-IS\" for Icelandic.\n\nRaises:\nKeyError: if the language identifier is not defined.\nValueError: if the language identifier is not a string type.", "source": "juraj-google-style"}
{"code": "def combine_with_wd_noise(f_n, amp_n, f_n_wd, amp_n_wd):\n    \n\n    \n    amp_n_wd_interp = interpolate.interp1d(f_n_wd, amp_n_wd, bounds_error=False, fill_value=1e-30)\n\n    \n    amp_n_wd = amp_n_wd_interp(f_n)\n\n    \n    amp_n = amp_n*(amp_n >= amp_n_wd) + amp_n_wd*(amp_n < amp_n_wd)\n    return f_n, amp_n", "docstring": "Combine noise with wd noise.\n\nCombines noise and white dwarf background noise based on greater\namplitude value at each noise curve step.\n\nArgs:\nf_n (float array): Frequencies of noise curve.\namp_n (float array): Amplitude values of noise curve.\nf_n_wd (float array): Frequencies of wd noise.\namp_n_wd (float array): Amplitude values of wd noise.\n\nReturns:\n(tuple of float arrays): Amplitude values of combined noise curve.", "source": "juraj-google-style"}
{"code": "def open(self, mode=None):\n        \n\n        if mode is None:\n            mode = self.mode\n        elif mode not in ['r', 'w', 'a']:\n            raise ValueError('Invalid mode! Modes: [\\'a\\', \\'r\\', \\'w\\']')\n\n        if self._file is None:\n            self._file = h5py.File(self.path, mode=mode)", "docstring": "Open the container file.\n\nArgs:\nmode (str): Either 'r' for read-only, 'w' for truncate and write or\n'a' for append. (default: 'a').\nIf ``None``, uses ``self.mode``.", "source": "juraj-google-style"}
{"code": "def write_gff_file(self, outfile, force_rerun=False):\n        \n        if ssbio.utils.force_rerun(outfile=outfile, flag=force_rerun):\n            with open(outfile, \"w\") as out_handle:\n                GFF.write([self], out_handle)\n\n        self.feature_path = outfile", "docstring": "Write a GFF file for the protein features, ``features`` will now load directly from this file.\n\nArgs:\noutfile (str): Path to new FASTA file to be written to\nforce_rerun (bool): If an existing file should be overwritten", "source": "juraj-google-style"}
{"code": "def get_sari_score(source_ids, prediction_ids, list_of_targets, max_gram_size=4, beta_for_deletion=0):\n    addition_scores = []\n    keep_scores = []\n    deletion_scores = []\n    for n in range(1, (max_gram_size + 1)):\n        source_counts = _get_ngram_counter(source_ids, n)\n        prediction_counts = _get_ngram_counter(prediction_ids, n)\n        target_counts = collections.Counter()\n        weighted_target_counts = collections.Counter()\n        num_nonempty_targets = 0\n        for target_ids_i in list_of_targets:\n            target_counts_i = _get_ngram_counter(target_ids_i, n)\n            if target_counts_i:\n                weighted_target_counts += target_counts_i\n                num_nonempty_targets += 1\n        for gram in weighted_target_counts.keys():\n            weighted_target_counts[gram] /= num_nonempty_targets\n            target_counts[gram] = 1\n        keep_scores.append(get_keep_score(source_counts, prediction_counts, weighted_target_counts))\n        deletion_scores.append(get_deletion_score(source_counts, prediction_counts, weighted_target_counts, beta_for_deletion))\n        addition_scores.append(get_addition_score(source_counts, prediction_counts, target_counts))\n    avg_keep_score = (sum(keep_scores) / max_gram_size)\n    avg_addition_score = (sum(addition_scores) / max_gram_size)\n    avg_deletion_score = (sum(deletion_scores) / max_gram_size)\n    sari = (((avg_keep_score + avg_addition_score) + avg_deletion_score) / 3.0)\n    return (sari, avg_keep_score, avg_addition_score, avg_deletion_score)", "docstring": "Compute the SARI score for a single prediction and one or more targets.\n\nArgs:\nsource_ids: a list / np.array of SentencePiece IDs\nprediction_ids: a list / np.array of SentencePiece IDs\nlist_of_targets: a list of target ID lists / np.arrays\nmax_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams,\nbigrams, and trigrams)\nbeta_for_deletion: beta for deletion F score.\n\nReturns:\nthe SARI score and its three components: add, keep, and deletion scores", "source": "codesearchnet"}
{"code": "def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor:\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n    pooled_output = text_outputs[0][:, 0, :]\n    text_features = self.text_projection(pooled_output)\n    return text_features", "docstring": "Returns:\ntext_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by\napplying the projection layer to the final [CLS] hidden state of Text-Transformer.\n\nExamples:\n\n```python\n>>> from transformers import AutoTokenizer, ChineseCLIPModel\n\n>>> model = ChineseCLIPModel.from_pretrained(\"OFA-Sys/chinese-clip-vit-base-patch16\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"OFA-Sys/chinese-clip-vit-base-patch16\")\n\n>>> inputs = tokenizer([\"杰尼龟\", \"妙蛙种子\", \"小火龙\", \"皮卡丘\"], padding=True, return_tensors=\"pt\")\n>>> text_features = model.get_text_features(**inputs)\n>>> text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True)\n```", "source": "github-repos"}
{"code": "def add_update_users(self, users, capacity=None):\n        \n        \n        if not isinstance(users, list):\n            raise HDXError('Users should be a list!')\n        for user in users:\n            self.add_update_user(user, capacity)", "docstring": "Add new or update existing users in organization with new metadata. Capacity eg. member, admin\nmust be supplied either within the User object or dictionary or using the capacity argument (which takes\nprecedence).\n\nArgs:\nusers (List[Union[User,Dict,str]]): A list of either user ids or users metadata from User objects or dictionaries\ncapacity (Optional[str]): Capacity of users eg. member, admin. Defaults to None.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def replace_drive_enclosure(self, information):\n        \n\n        uri = \"{}/replaceDriveEnclosure\".format(self.data[\"uri\"])\n        result = self._helper.create(information, uri)\n        self.refresh()\n\n        return result", "docstring": "When a drive enclosure has been physically replaced, initiate the replacement operation that enables the\nnew drive enclosure to take over as a replacement for the prior drive enclosure. The request requires\nspecification of both the serial numbers of the original drive enclosure and its replacement to be provided.\n\nArgs:\ninformation: Options to replace the drive enclosure.\n\nReturns:\ndict: SAS Logical Interconnect.", "source": "juraj-google-style"}
{"code": "def load(nifti_filename):\n    \n    \n    nifti_filename = os.path.expanduser(nifti_filename)\n\n    try:\n        data = nib.load(nifti_filename)\n        img = data.get_data()\n\n    except Exception as e:\n        raise ValueError(\"Could not load file {0} for conversion.\"\n                         .format(nifti_filename))\n        raise\n\n    return img", "docstring": "Import a nifti file into a numpy array. TODO:  Currently only\ntransfers raw data for compatibility with annotation and ND formats\n\nArguments:\nnifti_filename (str):  A string filename of a nifti datafile\n\nReturns:\nA numpy array with data from the nifti file", "source": "juraj-google-style"}
{"code": "def confirm(question):\n    if FORCE_YES:\n        return True\n    while True:\n        answer = input((question + ' <Yes|No>')).lower()\n        if ((answer == 'yes') or (answer == 'y')):\n            confirmed = True\n            break\n        if ((answer == 'no') or (answer == 'n')):\n            confirmed = False\n            break\n    return confirmed", "docstring": "Ask the user if he really want something to happen.\n\nArgs:\nquestion(str): What can happen\n\nReturns:\n(boolean): Confirmed or not", "source": "codesearchnet"}
{"code": "def set_size(self, height=220, width=350, height_threshold=120, width_threshold=160):\n    self.set_integer('height', height)\n    self.set_integer('width', width)\n    self.set_integer('small_height_threshold', height_threshold)\n    self.set_integer('small_width_threshold', width_threshold)", "docstring": "Set the size of the chart.\n\nArgs:\nheight (int): height in pixels.\nwidth (int): width in pixels.\nheight_threshold (int): height threshold in pixels\nwidth_threshold (int): width threshold in pixesls", "source": "codesearchnet"}
{"code": "def _lookup_tensor_name(self, tensor):\n    return self._tensor_aliases.get(tensor.name, tensor.name)", "docstring": "Look up the name of a graph tensor.\n\nThis method maps the name of a debugger-generated Identity or\nDebugIdentityV2 tensor to the name of the original instrumented tensor,\nif `tensor` is such a debugger-created tensor.\nOtherwise, it returns the name of `tensor` as is.\n\nArgs:\ntensor: The graph tensor to look up the name for.\n\nReturns:\nName of the original instrumented tensor as known to the debugger.", "source": "github-repos"}
{"code": "def get_variantid(variant_obj, family_id):\n    \n    new_id = parse_document_id(\n        chrom=variant_obj['chromosome'],\n        pos=str(variant_obj['position']),\n        ref=variant_obj['reference'],\n        alt=variant_obj['alternative'],\n        variant_type=variant_obj['variant_type'],\n        case_id=family_id,\n    )\n    return new_id", "docstring": "Create a new variant id.\n\nArgs:\nvariant_obj(dict)\nfamily_id(str)\n\nReturns:\nnew_id(str): The new variant id", "source": "juraj-google-style"}
{"code": "def __is_json_error(self, status, headers):\n    content_header = headers.get('content-type', '')\n    (content_type, unused_params) = cgi.parse_header(content_header)\n    return (status.startswith('400') and (content_type.lower() in _ALL_JSON_CONTENT_TYPES))", "docstring": "Determine if response is an error.\n\nArgs:\nstatus: HTTP status code.\nheaders: Dictionary of (lowercase) header name to value.\n\nReturns:\nTrue if the response was an error, else False.", "source": "codesearchnet"}
{"code": "def get_size(self, value=None):\n        \n        if value is None:\n            return sum(cls_val.get_size(obj_val) for obj_val, cls_val in\n                       self._get_attributes())\n        elif isinstance(value, type(self)):\n            return value.get_size()\n        else:\n            msg = \"{} is not an instance of {}\".format(value,\n                                                       type(self).__name__)\n            raise PackException(msg)", "docstring": "Calculate the total struct size in bytes.\n\nFor each struct attribute, sum the result of each one's ``get_size()``\nmethod.\n\nArgs:\nvalue: In structs, the user can assign other value instead of a\nclass' instance.\n\nReturns:\nint: Total number of bytes used by the struct.\n\nRaises:\nException: If the struct is not valid.", "source": "juraj-google-style"}
{"code": "def _GetNetworkInfo(self, signatures_key):\n    \n    network_info = {}\n    for category in signatures_key.GetSubkeys():\n      for signature in category.GetSubkeys():\n        profile_guid_value = signature.GetValueByName('ProfileGuid')\n        if profile_guid_value:\n          profile_guid = profile_guid_value.GetDataAsObject()\n        else:\n          continue\n\n        default_gateway_mac_value = signature.GetValueByName(\n            'DefaultGatewayMac')\n        if default_gateway_mac_value:\n          default_gateway_mac = ':'.join([\n              '{0:02x}'.format(octet)\n              for octet in bytearray(default_gateway_mac_value.data)])\n        else:\n          default_gateway_mac = None\n\n        dns_suffix_value = signature.GetValueByName('DnsSuffix')\n        if dns_suffix_value:\n          dns_suffix = dns_suffix_value.GetDataAsObject()\n        else:\n          dns_suffix = None\n\n        network_info[profile_guid] = (default_gateway_mac, dns_suffix)\n\n    return network_info", "docstring": "Retrieves the network info within the signatures subkey.\n\nArgs:\nsignatures_key (dfwinreg.WinRegistryKey): a Windows Registry key.\n\nReturns:\ndict[str, tuple]: a tuple of default_gateway_mac and dns_suffix per\nprofile identifier (GUID).", "source": "juraj-google-style"}
{"code": "def setup(self, check_all=None, exclude_private=None,\n              exclude_uppercase=None, exclude_capitalized=None,\n              exclude_unsupported=None, excluded_names=None,\n              minmax=None, dataframe_format=None):\n        \n        assert self.shellwidget is not None\n        \n        self.check_all = check_all\n        self.exclude_private = exclude_private\n        self.exclude_uppercase = exclude_uppercase\n        self.exclude_capitalized = exclude_capitalized\n        self.exclude_unsupported = exclude_unsupported\n        self.excluded_names = excluded_names\n        self.minmax = minmax\n        self.dataframe_format = dataframe_format\n        \n        if self.editor is not None:\n            self.editor.setup_menu(minmax)\n            self.editor.set_dataframe_format(dataframe_format)\n            self.exclude_private_action.setChecked(exclude_private)\n            self.exclude_uppercase_action.setChecked(exclude_uppercase)\n            self.exclude_capitalized_action.setChecked(exclude_capitalized)\n            self.exclude_unsupported_action.setChecked(exclude_unsupported)\n            self.refresh_table()\n            return\n\n        self.editor = RemoteCollectionsEditorTableView(\n                        self,\n                        data=None,\n                        minmax=minmax,\n                        shellwidget=self.shellwidget,\n                        dataframe_format=dataframe_format)\n\n        self.editor.sig_option_changed.connect(self.sig_option_changed.emit)\n        self.editor.sig_files_dropped.connect(self.import_data)\n        self.editor.sig_free_memory.connect(self.sig_free_memory.emit)\n\n        self.setup_option_actions(exclude_private, exclude_uppercase,\n                                  exclude_capitalized, exclude_unsupported)\n\n        \n\n        self.tools_layout = QHBoxLayout()\n        toolbar = self.setup_toolbar()\n        for widget in toolbar:\n            self.tools_layout.addWidget(widget)\n        self.tools_layout.addStretch()\n        self.setup_options_button()\n\n        \n\n        layout = create_plugin_layout(self.tools_layout, self.editor)\n        self.setLayout(layout)\n\n        self.sig_option_changed.connect(self.option_changed)", "docstring": "Setup the namespace browser with provided settings.\n\nArgs:\ndataframe_format (string): default floating-point format for\nDataFrame editor", "source": "juraj-google-style"}
{"code": "def _verify_static_batch_size_equality(tensors, columns):\n    expected_batch_size = None\n    for i in range(0, len(tensors)):\n        if tensors[i].shape.dims[0].value is not None:\n            if expected_batch_size is None:\n                bath_size_column_index = i\n                expected_batch_size = tensors[i].shape.dims[0]\n            elif not expected_batch_size.is_compatible_with(tensors[i].shape.dims[0]):\n                raise ValueError('Batch size (first dimension) of each feature must be same. Batch size of columns ({}, {}): ({}, {})'.format(columns[bath_size_column_index].name, columns[i].name, expected_batch_size, tensors[i].shape.dims[0]))", "docstring": "Validates that the first dim (batch size) of all tensors are equal or None.\n\nArgs:\ntensors: list of tensors to check.\ncolumns: list of feature columns matching tensors. Will be used for error\nmessaging.\n\nRaises:\nValueError: if one of the tensors has a variant batch size", "source": "github-repos"}
{"code": "def whois_emails(self, emails):\n        \n        api_name = 'opendns-whois-emails'\n        fmt_url_path = u'whois/emails/{0}'\n        return self._multi_get(api_name, fmt_url_path, emails)", "docstring": "Calls WHOIS Email end point\n\nArgs:\nemails: An enumerable of string Emails\nReturns:\nA dict of {email: domain_result}", "source": "juraj-google-style"}
{"code": "def __init__(self, grammar, latent_size, num_units):\n    \n    super(ProbabilisticGrammar, self).__init__()\n    self.grammar = grammar\n    self.latent_size = latent_size\n    self.lstm = tf.compat.v1.nn.rnn_cell.LSTMCell(num_units)\n    self.output_layer = tf.keras.layers.Dense(len(grammar.production_rules))", "docstring": "Constructs a probabilistic grammar.\n\nArgs:\ngrammar: An object representing a grammar. It has members\n`nonterminal_symbols`, `alphabet`, `production_rules`, and\n`start_symbol`, and a method `mask` determining (in)valid\nproduction rules given a symbol.\nlatent_size: Number of dimensions in the latent code.\nnum_units: Number of units in the LSTM cell.", "source": "juraj-google-style"}
{"code": "async def enqueue(self, query, queue_index=None, stop_current=False, shuffle=False):\n        \n\n        if query is None or query == \"\":\n            return\n\n        self.statuslog.info(\"Parsing {}\".format(query))\n        self.logger.debug(\"Enqueueing from query\")\n\n        indexnum = None\n        if queue_index is not None:\n            try:\n                indexnum = int(queue_index) - 1\n            except TypeError:\n                self.statuslog.error(\"Play index argument must be a number\")\n                return\n            except ValueError:\n                self.statuslog.error(\"Play index argument must be a number\")\n                return\n\n        if not self.vready:\n            self.parse_query(query, indexnum, stop_current, shuffle)\n        else:\n            parse_thread = threading.Thread(\n                target=self.parse_query,\n                args=[query, indexnum, stop_current, shuffle])\n            \n            parse_thread.start()", "docstring": "Queues songs based on either a YouTube search or a link\n\nArgs:\nquery (str): Either a search term or a link\nqueue_index (str): The queue index to enqueue at (None for end)\nstop_current (bool): Whether to stop the current song after the songs are queued\nshuffle (bool): Whether to shuffle the added songs", "source": "juraj-google-style"}
{"code": "def get_associated_profiles(self):\n        \n        uri = \"{}/associatedProfiles\".format(self.data['uri'])\n        return self._helper.do_get(uri)", "docstring": "Gets the URIs of profiles which are using an Ethernet network.\n\nArgs:\nid_or_uri: Can be either the logical interconnect group id or the logical interconnect group uri\n\nReturns:\nlist: URIs of the associated profiles.", "source": "juraj-google-style"}
{"code": "def gbest_idx(swarm):\n    \n    best = 0\n    cmp = comparator(swarm[best].best_fitness)\n    for (idx, particle) in enumerate(swarm):\n        if cmp(particle.best_fitness, swarm[best].best_fitness):\n            best = idx\n    return best", "docstring": "gbest Neighbourhood topology function.\n\nArgs:\nswarm: list: The list of particles.\n\nReturns:\nint: The index of the gbest particle.", "source": "juraj-google-style"}
{"code": "def _parse_query_key(self, key, val, is_escaped):\n        \n        if key.endswith('__contains'):\n            key = key[:-10]\n            val = self._parse_query_modifier('contains', val, is_escaped)\n        elif key.endswith('__range'):\n            key = key[:-7]\n            val = self._parse_query_modifier('range', val, is_escaped)\n        elif key.endswith('__startswith'):\n            key = key[:-12]\n            val = self._parse_query_modifier('startswith', val, is_escaped)\n        elif key.endswith('__endswith'):\n            key = key[:-10]\n            val = self._parse_query_modifier('endswith', val, is_escaped)\n        \n        elif key.endswith('__lt'):\n            key = key[:-4]\n            val = self._parse_query_modifier('lt', val, is_escaped)\n        \n        elif key.endswith('__gt'):\n            key = key[:-4]\n            val = self._parse_query_modifier('gt', val, is_escaped)\n        \n        elif key.endswith('__lte'):\n            key = key[:-5]\n            val = self._parse_query_modifier('lte', val, is_escaped)\n        \n        elif key.endswith('__gte'):\n            key = key[:-5]\n            val = self._parse_query_modifier('gte', val, is_escaped)\n        elif key != 'NOKEY' and not is_escaped:\n            val = self._escape_query(val)\n        return key, val", "docstring": "Strips query modifier from key and call's the appropriate value modifier.\n\nArgs:\nkey (str): Query key\nval: Query value\n\nReturns:\nParsed query key and value.", "source": "juraj-google-style"}
{"code": "def set_device_policy(device_policy):\n    if device_policy == 'silent':\n        context.context().device_policy = context.DEVICE_PLACEMENT_SILENT\n    elif device_policy == 'silent_for_int32':\n        context.context().device_policy = context.DEVICE_PLACEMENT_SILENT_FOR_INT32\n    elif device_policy == 'warn':\n        context.context().device_policy = context.DEVICE_PLACEMENT_WARN\n    elif device_policy == 'explicit':\n        context.context().device_policy = context.DEVICE_PLACEMENT_EXPLICIT\n    elif device_policy is None:\n        context.context().device_policy = None\n    else:\n        raise ValueError(f'Invalid argument `device_policy`: {device_policy!r}. Please refer to https:", "docstring": "Sets the current thread device policy.\n\nThe device policy controls how operations requiring inputs on a specific\ndevice (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1).\n\nWhen using the default, an appropriate policy will be picked automatically.\nThe default policy may change over time.\n\nThis function only sets the device policy for the current thread. Any\nsubsequently started thread will again use the default policy.\n\nArgs:\ndevice_policy: A device policy.\nValid values:\n- None: Switch to a system default.\n- 'warn': Copies the tensors which are not on the right device and logs a\nwarning.\n- 'explicit': Raises an error if the placement is not as required.\n- 'silent': Silently copies the tensors. Note that this may hide\nperformance problems as there is no notification provided when\noperations are blocked on the tensor being copied between devices.\n- 'silent_for_int32': silently copies `int32` tensors, raising errors on\nthe other ones.\n\nRaises:\nValueError: If an invalid `device_policy` is passed.", "source": "github-repos"}
{"code": "def WriteUInt160(self, value):\n    if (type(value) is UInt160):\n        value.Serialize(self)\n    else:\n        raise Exception('value must be UInt160 instance ')", "docstring": "Write a UInt160 type to the stream.\n\nArgs:\nvalue (UInt160):\n\nRaises:\nException: when `value` is not of neocore.UInt160 type.", "source": "codesearchnet"}
{"code": "def CopyFrom(self, other_msg):\n    \n    if self is other_msg:\n      return\n    self.Clear()\n    self.MergeFrom(other_msg)", "docstring": "Copies the content of the specified message into the current message.\n\nThe method clears the current message and then merges the specified\nmessage using MergeFrom.\n\nArgs:\nother_msg: Message to copy into the current one.", "source": "juraj-google-style"}
{"code": "def _update_section_state(line_info, state):\n    section_updated = False\n    google_section_permitted = _google_section_permitted(line_info, state)\n    google_section = google_section_permitted and _google_section(line_info)\n    if google_section:\n        state.section.format = Formats.GOOGLE\n        state.section.title = google_section\n        line_info.remaining = _get_after_google_header(line_info)\n        line_info.remaining_raw = line_info.remaining\n        section_updated = True\n    rst_section = _rst_section(line_info)\n    if rst_section:\n        state.section.format = Formats.RST\n        state.section.title = rst_section\n        line_info.remaining = _get_after_directive(line_info)\n        line_info.remaining_raw = line_info.remaining\n        section_updated = True\n    numpy_section = _numpy_section(line_info)\n    if numpy_section:\n        state.section.format = Formats.NUMPY\n        state.section.title = numpy_section\n        line_info.remaining = ''\n        line_info.remaining_raw = line_info.remaining\n        section_updated = True\n    if section_updated:\n        state.section.new = True\n        state.section.indentation = line_info.indentation\n        state.section.line1_indentation = line_info.next.indentation\n    else:\n        state.section.new = False", "docstring": "Uses line_info to determine the current section of the docstring.\n\nUpdates state and line_info.remaining.\n\nArgs:\nline_info: Information about the current line.\nstate: The state of the parser.", "source": "github-repos"}
{"code": "def load_qrandom():\n    fname = 'datasets/qrandom.npy'\n    with pkg_resources.resource_stream(__name__, fname) as f:\n        return np.load(f)", "docstring": "Loads a set of 10000 random numbers generated by qrandom.\n\nThis dataset can be used when you want to do some limited tests with \"true\"\nrandom data without an internet connection.\n\nReturns:\nint array\nthe dataset", "source": "codesearchnet"}
{"code": "def output_reference(self, name):\n    if (name not in self.output_names):\n        raise ValueError('Invalid output \"{}\"'.format(name))\n    return Reference(step_name=self.name_in_workflow, output_name=name)", "docstring": "Return a reference to the given output for use in an input\nof a next Step.\n\nFor a Step named `echo` that has an output called `echoed`, the\nreference `echo/echoed` is returned.\n\nArgs:\nname (str): the name of the Step output\nRaises:\nValueError: The name provided is not a valid output name for this\nStep.", "source": "codesearchnet"}
{"code": "def __init__(self, graph_def, input_tensors, output_tensors, input_arrays_with_shape=None, output_arrays=None, experimental_debug_info_func=None):\n    super(TFLiteConverter, self).__init__(graph_def, input_tensors, output_tensors, input_arrays_with_shape, output_arrays, experimental_debug_info_func)", "docstring": "Constructor for TFLiteConverter.\n\nArgs:\ngraph_def: Frozen TensorFlow GraphDef.\ninput_tensors: List of input tensors. Type and shape are computed using\n`foo.shape` and `foo.dtype`.\noutput_tensors: List of output tensors (only .name is used from this).\ninput_arrays_with_shape: Tuple of strings representing input tensor names\nand list of integers representing input shapes (e.g., [(\"foo\" : [1, 16,\n16, 3])]). Use only when graph cannot be loaded into TensorFlow and when\n`input_tensors` and `output_tensors` are None. (default None)\noutput_arrays: List of output tensors to freeze graph with. Use only when\ngraph cannot be loaded into TensorFlow and when `input_tensors` and\n`output_tensors` are None. (default None)\nexperimental_debug_info_func: An experimental function to retrieve the\ngraph debug info for a set of nodes from the `graph_def`.\n\nRaises:\nValueError: Invalid arguments.", "source": "github-repos"}
{"code": "def coder_benchmark_factory(coder, generate_fn):\n\n    class CoderBenchmark(object):\n\n        def __init__(self, num_elements_per_benchmark):\n            self._coder = coders.IterableCoder(coder)\n            self._list = [generate_fn() for _ in range(num_elements_per_benchmark)]\n\n        def __call__(self):\n            _ = self._coder.decode(self._coder.encode(self._list))\n    CoderBenchmark.__name__ = '%s, %s' % (generate_fn.__name__, str(coder))\n    return CoderBenchmark", "docstring": "Creates a benchmark that encodes and decodes a list of elements.\n\nArgs:\ncoder: coder to use to encode an element.\ngenerate_fn: a callable that generates an element.", "source": "github-repos"}
{"code": "def _pack(formatstring, value):\n    \n    _checkString(formatstring, description='formatstring', minlength=1)\n\n    try:\n        result = struct.pack(formatstring, value)\n    except:\n        errortext = 'The value to send is probably out of range, as the num-to-bytestring conversion failed.'\n        errortext += ' Value: {0!r} Struct format code is: {1}'\n        raise ValueError(errortext.format(value, formatstring))\n\n    if sys.version_info[0] > 2:\n        return str(result, encoding='latin1')  \n    return result", "docstring": "Pack a value into a bytestring.\n\nUses the built-in :mod:`struct` Python module.\n\nArgs:\n* formatstring (str): String for the packing. See the :mod:`struct` module for details.\n* value (depends on formatstring): The value to be packed\n\nReturns:\nA bytestring (str).\n\nRaises:\nValueError\n\nNote that the :mod:`struct` module produces byte buffers for Python3,\nbut bytestrings for Python2. This is compensated for automatically.", "source": "juraj-google-style"}
{"code": "def __init__(self, xid=None, command=None, flags=None, meter_id=None,\n                 bands=None):\n        \n        super().__init__(xid)\n        self.command = command\n        self.flags = flags\n        self.meter_id = meter_id\n        self.bands = bands", "docstring": "Create a MeterMod with the optional parameters below.\n\nArgs:\nxid (int): Headers transaction id. Defaults to random.\ncommand (MeterModCommand): One of OFPMC_*.\nflags (MeterFlags): One of OFPMF_*.\nmeter_id (int): Meter instance.\nbands (MeterBandHeader): The bands length is inferred from the\nlength field in the header.", "source": "juraj-google-style"}
{"code": "def _einsum_v1_parse_and_resolve_equation(equation, input_shapes):\n    equation = equation.replace(' ', '')\n    match = re.match('^([a-zA-Z,.]+)(->[a-zA-Z.]*)?$', equation)\n    if not match:\n        raise ValueError(f'Indices have incorrect format. Received: {equation}.')\n    input_axis_labels = match.group(1).split(',')\n    output_axis_labels = match.group(2)[2:] if match.group(2) else None\n    if len(input_shapes) != len(input_axis_labels):\n        raise ValueError(f'Got {len(input_shapes)} arguments for equation \"{equation}\", expecting {len(input_axis_labels)}.')\n    ellipsis_axes = ''\n    if '...' in equation:\n        unused = ''.join((c for c in string.ascii_letters if c not in ''.join(input_axis_labels)))\n        for i, ax in enumerate(input_axis_labels):\n            if '...' in ax:\n                parts = ax.split('...')\n                if len(parts) != 2:\n                    raise ValueError(f'Unable to resolve ellipsis. Excess number found: {len(parts) - 1} vs 1.')\n                if input_shapes[i].ndims is None:\n                    raise ValueError('Unable to statically infer ellipsis axes. The input shapes has a dynamic dimensionality.')\n                n = input_shapes[i].ndims - len(''.join(parts))\n                if n < 0:\n                    raise ValueError('Ellipses lengths do not match.')\n                if len(unused) < n:\n                    raise ValueError('Unable to resolve ellipsis, too many distinct labels.')\n                replace_axes = unused[-n:] if n > 0 else ''\n                input_axis_labels[i] = input_axis_labels[i].replace('...', replace_axes)\n                if len(replace_axes) > len(ellipsis_axes):\n                    ellipsis_axes = replace_axes\n        if any(('.' in ax for ax in input_axis_labels)):\n            raise ValueError(f'Period \".\" found outside of ellipsis in input {input_axis_labels}.')\n        if output_axis_labels is not None:\n            output_axis_labels = output_axis_labels.replace('...', ellipsis_axes)\n            if '.' in output_axis_labels:\n                raise ValueError(f'Period \".\" found outside of ellipsis in output {output_axis_labels}.')\n    if output_axis_labels is None:\n        axis_labels = set(''.join(input_axis_labels)) - set(ellipsis_axes)\n        indices = ''.join(sorted(axis_labels))\n        counts = {ax: 0 for ax in indices}\n        for axes_ in input_axis_labels:\n            for ax in axes_:\n                if ax not in ellipsis_axes:\n                    counts[ax] += 1\n        output_axis_labels = ellipsis_axes + ''.join(sorted((ax for ax in axis_labels if counts[ax] == 1)))\n    return (input_axis_labels, output_axis_labels)", "docstring": "Helper for einsum() that splits/resolves inputs & outputs.\n\nArgs:\nequation: Equation string given as argument to einsum().\ninput_shapes: List of the shapes of all inputs given to einsum()\n\nReturns:\ninput_axis_labels, output_axis_labels where:\ninput_axis_labels: List of length len(input_shapes) of strings\nrepresenting the character label for each dimension of each given input,\nresolving any broadcast (...) axes,\noutput_axis_labels: A string of character labels for each axes of output\ntensor, filling in missing output subscripts and broadcast axes.\n\nRaises:\nValueError: If equation is in the uncorrect format, incorrect number of\ninputs given or broadcast axes \"...\" or output axes could not be resolved.", "source": "github-repos"}
{"code": "def update(self, rid, data, raise_on_error=True):\n        \n        cache_data = {'cache-date': self._dt_to_epoch(datetime.now()), 'cache-data': data}\n        return self.ds.put(rid, cache_data, raise_on_error)", "docstring": "Write updated cache data to the DataStore.\n\nArgs:\nrid (str): The record identifier.\ndata (dict): The record data.\nraise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.\n\nReturns:\nobject : Python request response.", "source": "juraj-google-style"}
{"code": "def ExtractEvents(\n      self, parser_mediator, registry_key, codepage='cp1252', **kwargs):\n    \n    self._ParseMRUListKey(parser_mediator, registry_key, codepage=codepage)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\ncodepage (Optional[str]): extended ASCII string codepage.", "source": "juraj-google-style"}
{"code": "def expected_h(nvals, fit=\"RANSAC\"):\n  \n  rsvals = [expected_rs(n) for n in nvals]\n  poly = poly_fit(np.log(nvals), np.log(rsvals), 1, fit=fit)\n  return poly[0]", "docstring": "Uses expected_rs to calculate the expected value for the Hurst exponent h\nbased on the values of n used for the calculation.\n\nArgs:\nnvals (iterable of int):\nthe values of n used to calculate the individual (R/S)_n\n\nKWargs:\nfit (str):\nthe fitting method to use for the line fit, either 'poly' for normal\nleast squares polynomial fitting or 'RANSAC' for RANSAC-fitting which\nis more robust to outliers\n\nReturns:\nfloat:\nexpected h for white noise", "source": "juraj-google-style"}
{"code": "def _ParsePlistKeyValue(self, knowledge_base, name, value):\n    \n    if not knowledge_base.GetValue('keyboard_layout'):\n      if name in self._PLIST_KEYS:\n        if isinstance(value, (list, tuple)):\n          value = value[0]\n\n        _, _, keyboard_layout = value.rpartition('.')\n\n        knowledge_base.SetValue('keyboard_layout', keyboard_layout)", "docstring": "Parses a plist key value.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nname (str): name of the plist key.\nvalue (str): value of the plist key.", "source": "juraj-google-style"}
{"code": "def _log_device_compatibility_check(policy_name, gpu_details_list):\n    if policy_name != 'mixed_float16':\n        return\n    supported_device_strs = []\n    unsupported_device_strs = []\n    for details in gpu_details_list:\n        name = details.get('device_name', 'Unknown GPU')\n        cc = details.get('compute_capability')\n        if cc:\n            device_str = '%s, compute capability %s.%s' % (name, cc[0], cc[1])\n            if cc >= (7, 0):\n                supported_device_strs.append(device_str)\n            else:\n                unsupported_device_strs.append(device_str)\n        else:\n            unsupported_device_strs.append(name + ', no compute capability (probably not an Nvidia GPU)')\n    if unsupported_device_strs:\n        warning_str = _COMPAT_CHECK_WARNING_PREFIX + '\\n'\n        if supported_device_strs:\n            warning_str += 'Some of your GPUs may run slowly with dtype policy mixed_float16 because they do not all have compute capability of at least 7.0. Your GPUs:\\n'\n        elif len(unsupported_device_strs) == 1:\n            warning_str += 'Your GPU may run slowly with dtype policy mixed_float16 because it does not have compute capability of at least 7.0. Your GPU:\\n'\n        else:\n            warning_str += 'Your GPUs may run slowly with dtype policy mixed_float16 because they do not have compute capability of at least 7.0. Your GPUs:\\n'\n        for device_str in _dedup_strings(supported_device_strs + unsupported_device_strs):\n            warning_str += '  ' + device_str + '\\n'\n        warning_str += 'See https:\n        warning_str += _COMPAT_CHECK_WARNING_SUFFIX\n        tf_logging.warning(warning_str)\n    elif not supported_device_strs:\n        tf_logging.warning('%s\\nThe dtype policy mixed_float16 may run slowly because this machine does not have a GPU. Only Nvidia GPUs with compute capability of at least 7.0 run quickly with mixed_float16.\\n%s' % (_COMPAT_CHECK_WARNING_PREFIX, _COMPAT_CHECK_WARNING_SUFFIX))\n    elif len(supported_device_strs) == 1:\n        tf_logging.info('%s\\nYour GPU will likely run quickly with dtype policy mixed_float16 as it has compute capability of at least 7.0. Your GPU: %s' % (_COMPAT_CHECK_OK_PREFIX, supported_device_strs[0]))\n    else:\n        tf_logging.info('%s\\nYour GPUs will likely run quickly with dtype policy mixed_float16 as they all have compute capability of at least 7.0' % _COMPAT_CHECK_OK_PREFIX)", "docstring": "Logs a compatibility check if the devices support the policy.\n\nCurrently only logs for the policy mixed_float16.\n\nArgs:\npolicy_name: The name of the dtype policy.\ngpu_details_list: A list of dicts, one dict per GPU. Each dict\nis the device details for a GPU, as returned by\n`tf.config.experimental.get_device_details()`.", "source": "github-repos"}
{"code": "def load(self, validate=True):\n    self._load()\n    try:\n        self.config = self._load_config(self.system_config_file)\n        user = self._load_config(self.global_config_file)\n        config = self._load_config(self.config_file)\n        local = self._load_config(self.config_local_file)\n        for conf in [user, config, local]:\n            self.config = self._merge(self.config, conf)\n        if validate:\n            self.config = Schema(self.SCHEMA).validate(self.config)\n        self.config = configobj.ConfigObj(self.config, write_empty_values=True)\n        self.config.filename = self.config_file\n        self._resolve_paths(self.config, self.config_file)\n    except Exception as ex:\n        raise ConfigError(ex)", "docstring": "Loads config from all the config files.\n\nArgs:\nvalidate (bool): optional flag to tell dvc if it should validate\nthe config or just load it as is. 'True' by default.\n\n\nRaises:\ndvc.config.ConfigError: thrown if config has invalid format.", "source": "codesearchnet"}
{"code": "def __init__(self, url, access_token, index,\n                 source=\"parsedmarc\", verify=True, timeout=60):\n        \n        url = urlparse(url)\n        self.url = \"{0}:\n                                                                   url.netloc)\n        self.access_token = access_token.lstrip(\"Splunk \")\n        self.index = index\n        self.host = socket.getfqdn()\n        self.source = source\n        self.session = requests.Session()\n        self.timeout = timeout\n        self.session.verify = verify\n        self._common_data = dict(host=self.host, source=self.source,\n                                 index=self.index)\n\n        self.session.headers = {\n            \"User-Agent\": \"parsedmarc/{0}\".format(__version__),\n            \"Authorization\": \"Splunk {0}\".format(self.access_token)\n        }", "docstring": "Initializes the HECClient\nArgs:\nurl (str): The URL of the HEC\naccess_token (str): The HEC access token\nindex (str): The name of the index\nsource (str): The source name\nverify (bool): Verify SSL certificates\ntimeout (float): Number of seconds to wait for the server to send\ndata before giving up", "source": "juraj-google-style"}
{"code": "def comment(data, what):\n    data = data.splitlines()\n    data = map((lambda x: (('\n    return '\\n'.join(data)", "docstring": "Comments line containing `what` in string `data`.\n\nArgs:\ndata (str): Configuration file in string.\nwhat (str): Line which will be commented out.\n\nReturns:\nstr: Configuration file with commented `what`.", "source": "codesearchnet"}
{"code": "def expect_false(condition, msg, extras=None):\n    try:\n        asserts.assert_false(condition, msg, extras)\n    except signals.TestSignal as e:\n        logging.exception('Expected a `False` value, got `True`.')\n        recorder.add_error(e)", "docstring": "Expects an expression evaluates to False.\n\nIf the expectation is not met, the test is marked as fail after its\nexecution finishes.\n\nArgs:\nexpr: The expression that is evaluated.\nmsg: A string explaining the details in case of failure.\nextras: An optional field for extra information to be included in test\nresult.", "source": "github-repos"}
{"code": "def markdown_to_text(body):\n    \n    \n    md = markdown.markdown(body, extensions=[\n        'markdown.extensions.extra'\n    ])\n\n    \n    soup = BeautifulSoup(md, 'html.parser')\n\n    \n    return soup.get_text()", "docstring": "Converts markdown to text.\n\nArgs:\nbody: markdown (or plaintext, or maybe HTML) input\n\nReturns:\nPlaintext with all tags and frills removed", "source": "juraj-google-style"}
{"code": "def __init__(self, event_timestamp, duration=5):\n    \n    super(TimeSlice, self).__init__()\n    self.duration = duration\n    self.event_timestamp = event_timestamp", "docstring": "Initializes the time slice.\n\nArgs:\nevent_timestamp (int): event timestamp of the time slice or None.\nduration (Optional[int]): duration of the time slice in minutes.\nThe default is 5, which represent 2.5 minutes before and 2.5 minutes\nafter the event timestamp.", "source": "juraj-google-style"}
{"code": "def attention_mask_ignore_padding(inputs, dtype=tf.float32):\n    inputs = rename_length_to_memory_length(inputs)\n    return (mtf.cast(mtf.equal(inputs, 0), dtype) * (- 1000000000.0))", "docstring": "Bias for encoder-decoder attention.\n\nArgs:\ninputs: a mtf.Tensor with shape [..., length_dim]\ndtype: a tf.dtype\n\nReturns:\na mtf.Tensor with shape [..., memory_length_dim]", "source": "codesearchnet"}
{"code": "def clean(self, force: bool=False):\n    with (yield from self._lock):\n        for connection in tuple(self.ready):\n            if (force or connection.closed()):\n                connection.close()\n                self.ready.remove(connection)", "docstring": "Clean closed connections.\n\nArgs:\nforce: Clean connected and idle connections too.\n\nCoroutine.", "source": "codesearchnet"}
{"code": "def protocol_version_to_kmip_version(value):\n    \n    if not isinstance(value, ProtocolVersion):\n        return None\n\n    if value.major == 1:\n        if value.minor == 0:\n            return enums.KMIPVersion.KMIP_1_0\n        elif value.minor == 1:\n            return enums.KMIPVersion.KMIP_1_1\n        elif value.minor == 2:\n            return enums.KMIPVersion.KMIP_1_2\n        elif value.minor == 3:\n            return enums.KMIPVersion.KMIP_1_3\n        elif value.minor == 4:\n            return enums.KMIPVersion.KMIP_1_4\n        else:\n            return None\n    else:\n        return None", "docstring": "Convert a ProtocolVersion struct to its KMIPVersion enumeration equivalent.\n\nArgs:\nvalue (ProtocolVersion): A ProtocolVersion struct to be converted into\na KMIPVersion enumeration.\n\nReturns:\nKMIPVersion: The enumeration equivalent of the struct. If the struct\ncannot be converted to a valid enumeration, None is returned.", "source": "juraj-google-style"}
{"code": "def get(name):\n    for matcher in matchers:\n        if ((matcher.__name__ == name) or (getattr(matcher, 'name', None) == name)):\n            return matcher", "docstring": "Returns a matcher instance by class or alias name.\n\nArguments:\nname (str): matcher class name or alias.\n\nReturns:\nmatcher: found matcher instance, otherwise ``None``.", "source": "codesearchnet"}
{"code": "def key_prefix(self) -> str:\n    return self.__class__.__qualname__", "docstring": "Prefix for key to avoid collisions from different Processors.\n\nDefaults to classname. Processor() should override this if, for example, it\naccepts arguments that change output of __call__.\n\nReturns:\nPrefix that will be added to key.", "source": "github-repos"}
{"code": "def NewFromJSON(data):\n    if data.get('shakes', None):\n        shakes = [Shake.NewFromJSON(shk) for shk in data.get('shakes')]\n    else:\n        shakes = None\n    return User(id=data.get('id', None), name=data.get('name', None), profile_image_url=data.get('profile_image_url', None), about=data.get('about', None), website=data.get('website', None), shakes=shakes)", "docstring": "Create a new User instance from a JSON dict.\n\nArgs:\ndata (dict): JSON dictionary representing a user.\n\nReturns:\nA User instance.", "source": "codesearchnet"}
{"code": "def apply(self, func, **kwargs):\n        \n        oid = self.oid\n        self.call_queue.append((func, kwargs))\n\n        def call_queue_closure(oid_obj, call_queues):\n            for func, kwargs in call_queues:\n                if isinstance(func, ray.ObjectID):\n                    func = ray.get(func)\n                if isinstance(kwargs, ray.ObjectID):\n                    kwargs = ray.get(kwargs)\n\n                oid_obj = func(oid_obj, **kwargs)\n\n            return oid_obj\n\n        oid = deploy_ray_func.remote(\n            call_queue_closure, oid, kwargs={\"call_queues\": self.call_queue}\n        )\n        self.call_queue = []\n\n        return PyarrowOnRayFramePartition(oid)", "docstring": "Apply a function to the object stored in this partition.\n\nNote: It does not matter if func is callable or an ObjectID. Ray will\nhandle it correctly either way. The keyword arguments are sent as a\ndictionary.\n\nArgs:\nfunc: The function to apply.\n\nReturns:\nA RayRemotePartition object.", "source": "juraj-google-style"}
{"code": "def _GetStatus(self, two_factor=False):\n    \n    params = ['status']\n    if two_factor:\n      params += ['--twofactor']\n    retcode = self._RunOsLoginControl(params)\n    if retcode is None:\n      if self.oslogin_installed:\n        self.logger.warning('OS Login not installed.')\n        self.oslogin_installed = False\n      return None\n\n    \n    self.oslogin_installed = True\n    if not os.path.exists(constants.OSLOGIN_NSS_CACHE):\n      return False\n    return not retcode", "docstring": "Check whether OS Login is installed.\n\nArgs:\ntwo_factor: bool, True if two factor should be enabled.\n\nReturns:\nbool, True if OS Login is installed.", "source": "juraj-google-style"}
{"code": "def from_representation(self, representation):\n    object_dict = {}\n    failed = {}\n    for (name, field) in self.fields.items():\n        if (name not in representation):\n            continue\n        try:\n            if ((not isinstance(representation[name], (list, tuple))) and field.many):\n                raise ValueError('field should be sequence')\n            source = _source(name, field)\n            value = representation[name]\n            if field.many:\n                if (not field.allow_null):\n                    object_dict[source] = [field.from_representation(single_value) for single_value in value]\n                else:\n                    object_dict[source] = [(field.from_representation(single_value) if (single_value is not None) else None) for single_value in value]\n            elif (not field.allow_null):\n                object_dict[source] = field.from_representation(value)\n            else:\n                object_dict[source] = (field.from_representation(value) if value else None)\n        except ValueError as err:\n            failed[name] = str(err)\n    if failed:\n        try:\n            self.validate(object_dict)\n            raise DeserializationError()\n        except DeserializationError as err:\n            err.failed = failed\n            raise\n    return object_dict", "docstring": "Convert given representation dict into internal object.\n\nInternal object is simply a dictionary of values with respect to field\nsources.\n\nThis does not check if all required fields exist or values are\nvalid in terms of value validation\n(see: :meth:`BaseField.validate()`) but still requires all of passed\nrepresentation values to be well formed representation (success call\nto ``field.from_representation``).\n\nIn case of malformed representation it will run additional validation\nonly to provide a full detailed exception about all that might be\nwrong with provided representation.\n\nArgs:\nrepresentation (dict): dictionary with field representation values\n\nRaises:\nDeserializationError: when at least one representation field\nis not formed as expected by field object. Information\nabout additional forbidden/missing/invalid fields is provided\nas well.", "source": "codesearchnet"}
{"code": "def _average_precision(self, rec, prec):\n        \n        \n        mrec = np.concatenate(([0.], rec, [1.]))\n        mpre = np.concatenate(([0.], prec, [0.]))\n\n        \n        for i in range(mpre.size - 1, 0, -1):\n            mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n        \n        i = np.where(mrec[1:] != mrec[:-1])[0]\n\n        \n        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n        return ap", "docstring": "calculate average precision\n\nParams:\n----------\nrec : numpy.array\ncumulated recall\nprec : numpy.array\ncumulated precision\nReturns:\n----------\nap as float", "source": "juraj-google-style"}
{"code": "def old_tracer_correlation( self ):\n        \n        if self.has_run:\n            return self.atoms.sum_dr_squared() / float( self.number_of_jumps )\n        else:\n            return None", "docstring": "Deprecated tracer correlation factor for this simulation.\n\nArgs:\nNone\n\nReturns:\n(Float): The tracer correlation factor, f.\n\nNotes:\nThis function assumes that the jump distance between sites has\nbeen normalised to a=1. If the jump distance is not equal to 1\nthen the value returned by this function should be divided by a^2.\nEven better, use `self.tracer_correlation`.", "source": "juraj-google-style"}
{"code": "def listen_now_items(self):\n    response = self._call(mc_calls.ListenNowGetListenNowItems)\n    listen_now_item_list = response.body.get('listennow_items', [])\n    listen_now_items = defaultdict(list)\n    for item in listen_now_item_list:\n        type_ = f\"{ListenNowItemType(item['type']).name}s\"\n        listen_now_items[type_].append(item)\n    return dict(listen_now_items)", "docstring": "Get a listing of Listen Now items.\n\nNote:\nThis does not include situations;\nuse the :meth:`situations` method instead.\n\nReturns:\ndict: With ``albums`` and ``stations`` keys of listen now items.", "source": "codesearchnet"}
{"code": "def get_instance_type_parameter(self, name: str, node: 'cfg.CFGNode | None'=None):\n    del name\n    if node is None:\n        node = self.ctx.root_node\n    return self.ctx.new_unsolvable(node)", "docstring": "Get a cfg.Variable of the instance's values for the type parameter.\n\nTreating self as an abstract.Instance, gets the variable of its values for\nthe given type parameter. For the real implementation, see\nSimpleValue.get_instance_type_parameter.\n\nArgs:\nname: The name of the type parameter.\nnode: Optionally, the current CFG node.\n\nReturns:\nA Variable which may be empty.", "source": "github-repos"}
{"code": "def concept(self, mechanism, purviews=False, cause_purviews=False, effect_purviews=False):\n    log.debug('Computing concept %s...', mechanism)\n    if (not mechanism):\n        log.debug('Empty concept; returning null concept')\n        return self.null_concept\n    cause = self.mic(mechanism, purviews=(cause_purviews or purviews))\n    effect = self.mie(mechanism, purviews=(effect_purviews or purviews))\n    log.debug('Found concept %s', mechanism)\n    return Concept(mechanism=mechanism, cause=cause, effect=effect, subsystem=self)", "docstring": "Return the concept specified by a mechanism within this subsytem.\n\nArgs:\nmechanism (tuple[int]): The candidate set of nodes.\n\nKeyword Args:\npurviews (tuple[tuple[int]]): Restrict the possible purviews to\nthose in this list.\ncause_purviews (tuple[tuple[int]]): Restrict the possible cause\npurviews to those in this list. Takes precedence over\n``purviews``.\neffect_purviews (tuple[tuple[int]]): Restrict the possible effect\npurviews to those in this list. Takes precedence over\n``purviews``.\n\nReturns:\nConcept: The pair of maximally irreducible cause/effect repertoires\nthat constitute the concept specified by the given mechanism.", "source": "codesearchnet"}
{"code": "def longest_one_seg_prefix(self, word):\n    match = self.seg_regex.match(word)\n    if match:\n        return match.group(0)\n    else:\n        return ''", "docstring": "Return longest IPA Unicode prefix of `word`\n\nArgs:\nword (unicode): word as IPA string\n\nReturns:\nunicode: longest single-segment prefix of `word`", "source": "codesearchnet"}
{"code": "def initialize_from_assignments(assignments, k, max_assign_weight=0.75):\n    \n    cells = len(assignments)\n    init_W = np.zeros((k, cells))\n    for i, a in enumerate(assignments):\n        \n        \n        init_W[a, i] = max_assign_weight\n        for a2 in range(k):\n            if a2!=a:\n                init_W[a2, i] = (1-max_assign_weight)/(k-1)\n    return init_W/init_W.sum(0)", "docstring": "Creates a weight initialization matrix from Poisson clustering assignments.\n\nArgs:\nassignments (array): 1D array of integers, of length cells\nk (int): number of states/clusters\nmax_assign_weight (float, optional): between 0 and 1 - how much weight to assign to the highest cluster. Default: 0.75\n\nReturns:\ninit_W (array): k x cells", "source": "juraj-google-style"}
{"code": "def cmd2(command, shell=False, detatch=False, verbose=False, verbout=None):\n    \n    import shlex\n    if isinstance(command, (list, tuple)):\n        raise ValueError('command tuple not supported yet')\n    args = shlex.split(command, posix=not WIN32)\n    if verbose is True:\n        verbose = 2\n    if verbout is None:\n        verbout = verbose >= 1\n    if verbose >= 2:\n        print('+=== START CMD2 ===')\n        print('Command:')\n        print(command)\n        if verbout:\n            print('----')\n            print('Stdout:')\n    proc = subprocess.Popen(args, stdout=subprocess.PIPE,\n                            stderr=subprocess.STDOUT, shell=shell,\n                            universal_newlines=True)\n    if detatch:\n        info = {'proc': proc}\n    else:\n        write_fn = sys.stdout.write\n        flush_fn = sys.stdout.flush\n        logged_out = []\n        for line in _run_process(proc):\n            \n            line_ = line if six.PY2 else line\n            if len(line_) > 0:\n                if verbout:\n                    write_fn(line_)\n                    flush_fn()\n                logged_out.append(line)\n        try:\n            from utool import util_str  \n            \n            out = ''.join(logged_out)\n        except UnicodeDecodeError:\n            from utool import util_str  \n            logged_out = util_str.ensure_unicode_strlist(logged_out)\n            \n            out = ''.join(logged_out)\n            \n            \n        (out_, err) = proc.communicate()\n        ret = proc.wait()\n        info = {\n            'out': out,\n            'err': err,\n            'ret': ret,\n        }\n    if verbose >= 2:\n        print('L___ END CMD2 ___')\n    return info", "docstring": "Trying to clean up cmd\n\nArgs:\ncommand (str): string command\nshell (bool): if True, process is run in shell\ndetatch (bool): if True, process is run in background\nverbose (int): verbosity mode\nverbout (bool): if True, `command` writes to stdout in realtime.\ndefaults to True iff verbose > 0\n\nReturns:\ndict: info - information about command status", "source": "juraj-google-style"}
{"code": "def expand_value_set_definition(self, value_set: value_set_pb2.ValueSet) -> value_set_pb2.ValueSet:\n    base_url, request_url = _expansion_request_url_for_value_set_url(value_set.url.value)\n    request_json = json_format.print_fhir_to_json_string(value_set).encode('utf-8')\n    session_ = self.create_session()\n    session_.headers.update({'Accept': 'application/json', 'Content-Type': 'application/json'})\n    auth = self.auth_per_terminology_server.get(base_url)\n    if auth is not None:\n        if isinstance(auth, tuple) and len(auth) == 2:\n            logging.debug('Using Basic auth for auth')\n            session_.auth = auth\n        else:\n            logging.debug('Using Bearer token for auth')\n            session_.headers['Authorization'] = auth\n    logging.info('Expanding value set url: %s version: %s using terminology service: %s', value_set.url.value, value_set.version.value, base_url)\n    with session_ as session:\n\n        def request_func(offset: int) -> requests.Response:\n            return session.post(request_url, data=request_json, params={'offset': offset})\n        expanded_value_set = _paginate_expand_value_set_request(request_func, value_set.url.value, value_set.version.value)\n    logging.info('Retrieved %d codes for value set url: %s version: %s using terminology service: %s', len(expanded_value_set.expansion.contains), value_set.url.value, value_set.version.value, base_url)\n    return expanded_value_set", "docstring": "Expands the value set definition using a terminology server.\n\nRequests an expansion of the given value set from the appropriate\nterminology server. Attempts to expand arbitrary value sets by passing their\nentire definition to the terminology service for expansion.\n\nIf possible, requests expansion from the domain associated with the value\nset's URL. If the value set URL is not associated with a known terminology\nservice, uses the tx.fhir.org service as it is able to expand value sets\ndefined outside its own specifications.\n\nRetrieves the current definition of the value set from the terminology\nservice as well as its expansion.\n\nArgs:\nvalue_set: The value set to expand.\n\nReturns:\nThe current definition of the value set from the server with its expanded\ncodes present.", "source": "github-repos"}
{"code": "def get(cls, resource_id):\n        \n        res = Resource.get(resource_id)\n        return cls(res) if res else None", "docstring": "Returns the class object identified by `resource_id`\n\nArgs:\nresource_id (str): Unique EC2 Instance ID to load from database\n\nReturns:\nEC2 Instance object if found, else None", "source": "juraj-google-style"}
{"code": "def plot_heatmap(data, title='Heatmap', show_legend=True, show_labels=True, label_fmt='.2f', vmin=None, vmax=None, figsize=None, label_color='w', cmap='RdBu', **kwargs):\n    (fig, ax) = plt.subplots(figsize=figsize)\n    heatmap = ax.pcolor(data, vmin=vmin, vmax=vmax, cmap=cmap)\n    ax.invert_yaxis()\n    if (title is not None):\n        plt.title(title)\n    if show_legend:\n        fig.colorbar(heatmap)\n    if show_labels:\n        vals = data.values\n        for x in range(data.shape[0]):\n            for y in range(data.shape[1]):\n                plt.text((x + 0.5), (y + 0.5), format(vals[(y, x)], label_fmt), horizontalalignment='center', verticalalignment='center', color=label_color)\n    plt.yticks(np.arange(0.5, len(data.index), 1), data.index)\n    plt.xticks(np.arange(0.5, len(data.columns), 1), data.columns)\n    return plt", "docstring": "Plot a heatmap using matplotlib's pcolor.\n\nArgs:\n* data (DataFrame): DataFrame to plot. Usually small matrix (ex.\ncorrelation matrix).\n* title (string): Plot title\n* show_legend (bool): Show color legend\n* show_labels (bool): Show value labels\n* label_fmt (str): Label format string\n* vmin (float): Min value for scale\n* vmax (float): Max value for scale\n* cmap (string): Color map\n* kwargs: Passed to matplotlib's pcolor", "source": "codesearchnet"}
{"code": "def render_policy_template(account_number='', app='coreforrest', env='dev', group='forrest', items=None, pipeline_settings=None, region='us-east-1', service=''):\n    statements = []\n    rendered_service_policy = get_template('infrastructure/iam/{0}.json.j2'.format(service), account_number=account_number, app=app, env=env, group=group, region=region, items=items, settings=pipeline_settings)\n    try:\n        statement_block = json.loads(rendered_service_policy)\n        statements.append(statement_block)\n    except ValueError:\n        LOG.debug('Need to make %s template into list.', service)\n        statements = json.loads('[{0}]'.format(rendered_service_policy))\n    LOG.debug('Rendered IAM Policy statements: %s', statements)\n    return statements", "docstring": "Render IAM Policy template.\n\nTo support multiple statement blocks, JSON objects can be separated by a\ncomma. This function attempts to turn any invalid JSON into a valid list\nbased on this comma separated assumption.\n\nArgs:\naccount_number (str): AWS Account number.\napp (str): Name of Spinnaker Application.\nenv (str): Environment/Account in AWS\ngroup (str):A Application group/namespace\nitems (list): Resource names used to create a Policy per Resource.\nregion (str): AWS region.\npipeline_settings (dict): Settings from *pipeline.json*.\nservice (str): Name of cloud service to find matching IAM Policy\ntemplate.\n\nReturns:\nlist: IAM Policy :obj:`dict` statements for the given service.", "source": "codesearchnet"}
{"code": "def parse(self, argument):\n    if isinstance(argument, list):\n        return argument\n    elif (not argument):\n        return []\n    else:\n        if self._comma_compat:\n            argument = argument.replace(',', ' ')\n        return argument.split()", "docstring": "Parses argument as whitespace-separated list of strings.\n\nIt also parses argument as comma-separated list of strings if requested.\n\nArgs:\nargument: string argument passed in the commandline.\n\nReturns:\n[str], the parsed flag value.", "source": "codesearchnet"}
{"code": "def must_run_on_cpu(node, pin_variables_on_cpu=False):\n    if isinstance(node, ops.Operation):\n        node_def = node.node_def\n    else:\n        assert isinstance(node, node_def_pb2.NodeDef)\n        node_def = node\n    if pin_variables_on_cpu and _is_variable_op(node_def.op):\n        return True\n    if node_def.op == 'Const':\n        dtype = node_def.attr['dtype'].type\n        if dtype == dtypes.string or dtype == dtypes.int32:\n            return True\n    if node_def.op in ['DynamicStitch', 'ParallelDynamicStitch']:\n        dtype = node_def.attr['T'].type\n        if dtype == dtypes.int32:\n            return True\n    if node_def.op in ['Cast']:\n        dtype = node_def.attr['SrcT'].type\n        if dtype == dtypes.int32:\n            return True\n    return False", "docstring": "Returns True if the given node_def must run on CPU, otherwise False.\n\nArgs:\nnode: The node to be assigned to a device. Could be either an ops.Operation\nor NodeDef.\npin_variables_on_cpu: If True, this function will return False if node_def\nrepresents a variable-related op.\n\nReturns:\nTrue if the given node must run on CPU, otherwise False.", "source": "github-repos"}
{"code": "def master(self, task_type=None, task_id=None, rpc_layer=None):\n    if task_type is not None and task_id is not None:\n        master = self.cluster_spec().task_address(task_type, task_id)\n        return format_master_url(master, rpc_layer or self._rpc_layer)\n    return self._cluster_resolvers[0].master(rpc_layer=rpc_layer)", "docstring": "Returns the master address to use when creating a session.\n\nThis usually returns the master from the first ClusterResolver passed in,\nbut you can override this by specifying the task_type and task_id.\n\nNote: this is only useful for TensorFlow 1.x.\n\nArgs:\ntask_type: (Optional) The type of the TensorFlow task of the master.\ntask_id: (Optional) The index of the TensorFlow task of the master.\nrpc_layer: (Optional) The RPC protocol for the given cluster.\n\nReturns:\nThe name or URL of the session master.", "source": "github-repos"}
{"code": "def clean_all(G, settings):\n    quiet = settings['quiet']\n    recon = settings['recon']\n    sprint = settings['sprint']\n    error = settings['error']\n    all_outputs = []\n    for node in G.nodes(data=True):\n        if ('output' in node[1]):\n            for item in get_all_outputs(node[1]):\n                all_outputs.append(item)\n    all_outputs.append('.shastore')\n    retcode = 0\n    for item in sorted(all_outputs):\n        if os.path.isfile(item):\n            if recon:\n                sprint('Would remove file: {}'.format(item))\n                continue\n            sprint(\"Attempting to remove file '{}'\", level='verbose')\n            try:\n                os.remove(item)\n                sprint('Removed file', level='verbose')\n            except:\n                errmes = \"Error: file '{}' failed to be removed\"\n                error(errmes.format(item))\n                retcode = 1\n    if ((not retcode) and (not recon)):\n        sprint('All clean', color=True)\n    return retcode", "docstring": "Removes all the output files from all targets. Takes\nthe graph as the only argument\n\nArgs:\nThe networkx graph object\nThe settings dictionary\n\nReturns:\n0 if successful\n1 if removing even one file failed", "source": "codesearchnet"}
{"code": "def language_from_str(language_def, metamodel):\n    \n\n    if type(language_def) is not text:\n        raise TextXError(\"textX accepts only unicode strings.\")\n\n    if metamodel.debug:\n        metamodel.dprint(\"*** PARSING LANGUAGE DEFINITION ***\")\n\n    \n    if metamodel.debug in textX_parsers:\n        parser = textX_parsers[metamodel.debug]\n    else:\n        \n        \n        parser = ParserPython(textx_model, comment_def=comment,\n                              ignore_case=False,\n                              reduce_tree=False,\n                              memoization=metamodel.memoization,\n                              debug=metamodel.debug,\n                              file=metamodel.file)\n\n        \n        textX_parsers[metamodel.debug] = parser\n\n    \n    try:\n        parse_tree = parser.parse(language_def)\n    except NoMatch as e:\n        line, col = parser.pos_to_linecol(e.position)\n        raise TextXSyntaxError(text(e), line, col)\n\n    \n    \n    lang_parser = visit_parse_tree(parse_tree,\n                                   TextXVisitor(parser, metamodel))\n\n    \n    metamodel.validate()\n\n    \n    lang_parser.metamodel = metamodel\n    metamodel._parser_blueprint = lang_parser\n\n    if metamodel.debug:\n        \n        PMDOTExporter().exportFile(\n            lang_parser.parser_model,\n            \"{}_parser_model.dot\".format(metamodel.rootcls.__name__))\n\n    return lang_parser", "docstring": "Constructs parser and initializes metamodel from language description\ngiven in textX language.\n\nArgs:\nlanguage_def (str): A language description in textX.\nmetamodel (TextXMetaModel): A metamodel to initialize.\n\nReturns:\nParser for the new language.", "source": "juraj-google-style"}
{"code": "def __add__(self, r):\n    if not isinstance(r, TestResult):\n        raise TypeError('Operand %s of type %s is not a TestResult.' % (r, type(r)))\n    sum_result = TestResult()\n    for name in sum_result.__dict__:\n        r_value = getattr(r, name)\n        l_value = getattr(self, name)\n        if isinstance(r_value, list):\n            setattr(sum_result, name, l_value + r_value)\n    return sum_result", "docstring": "Overrides '+' operator for TestResult class.\n\nThe add operator merges two TestResult objects by concatenating all of\ntheir lists together.\n\nArgs:\nr: another instance of TestResult to be added\n\nReturns:\nA TestResult instance that's the sum of two TestResult instances.", "source": "github-repos"}
{"code": "def panel(self, panel_id):\n        \n        if not isinstance(panel_id, ObjectId):\n            panel_id = ObjectId(panel_id)\n        panel_obj = self.panel_collection.find_one({'_id': panel_id})\n        return panel_obj", "docstring": "Fetch a gene panel by '_id'.\n\nArgs:\npanel_id (str, ObjectId): str or ObjectId of document ObjectId\n\nReturns:\ndict: panel object or `None` if panel not found", "source": "juraj-google-style"}
{"code": "def get_labels(self, **query_params):\n    labels = self.get_labels_json(self.base_uri, query_params=query_params)\n    labels_list = []\n    for label_json in labels:\n        labels_list.append(self.create_label(label_json))\n    return labels_list", "docstring": "Get the labels attached to this board. Returns a label of Label\nobjects.\n\nReturns:\nlist(Label): The labels attached to this board", "source": "codesearchnet"}
{"code": "def CreateExtensionSetting(client, feed_items, campaign_feed, feed_item_ids,\n                           platform_restrictions=None):\n  \n  campaign_extension_setting_service = client.GetService(\n      'CampaignExtensionSettingService', 'v201809')\n\n  extension_feed_items = [{\n      CreateSitelinkFeedItem(feed_items, feed_item_id)\n  } for feed_item_id in feed_item_ids]\n\n  extension_setting = {\n      'extensions': extension_feed_items\n  }\n\n  if platform_restrictions:\n    extension_setting['platformRestrictions'] = platform_restrictions\n\n  campaign_extension_setting = {\n      'campaignId': campaign_feed['campaignId'],\n      'extensionType': 'SITELINK',\n      'extensionSetting': extension_setting\n  }\n\n  operation = {\n      'operand': campaign_extension_setting,\n      'operator': 'ADD'\n  }\n\n  campaign_extension_setting_service.mutate([operation])", "docstring": "Creates the extension setting for a list of Feed Items.\n\nArgs:\nclient: an AdWordsClient instance.\nfeed_items: the list of all Feed Items.\ncampaign_feed: the original Campaign Feed.\nfeed_item_ids: the Ids of the feed items for which extension settings should\nbe created.\nplatform_restrictions: an optional Platform Restriction for the Feed items.", "source": "juraj-google-style"}
{"code": "def get(self, center, target, date):\n    if ((center.index, target.index) in self.segments):\n        (pos, vel) = self.segments[(center.index, target.index)].compute_and_differentiate(date.jd)\n        sign = 1\n    else:\n        (pos, vel) = self.segments[(target.index, center.index)].compute_and_differentiate(date.jd)\n        sign = (- 1)\n    if (len(pos) == 3):\n        pv = np.concatenate((pos, (vel / S_PER_DAY)))\n    elif (len(pos) == 6):\n        pv = np.array(pos)\n    else:\n        raise JplError('Unknown state vector format')\n    return ((sign * pv) * 1000)", "docstring": "Retrieve the position and velocity of a target with respect to a center\n\nArgs:\ncenter (Target):\ntarget (Target):\ndate (Date):\nReturn:\nnumpy.array: length-6 array position and velocity (in m and m/s) of the\ntarget, with respect to the center", "source": "codesearchnet"}
{"code": "def variable_summaries(vars_, groups=None, scope='weights'):\n  \n  groups = groups or {r'all': r'.*'}\n  grouped = collections.defaultdict(list)\n  for var in vars_:\n    for name, pattern in groups.items():\n      if re.match(pattern, var.name):\n        name = re.sub(pattern, name, var.name)\n        grouped[name].append(var)\n  for name in groups:\n    if name not in grouped:\n      tf.logging.warn(\"No variables matching '{}' group.\".format(name))\n  summaries = []\n  \n  for name, vars_ in grouped.items():\n    vars_ = [tf.reshape(var, [-1]) for var in vars_]\n    vars_ = tf.concat(vars_, 0)\n    summaries.append(tf.summary.histogram(scope + '/' + name, vars_))\n  return tf.summary.merge(summaries)", "docstring": "Create histogram summaries for the provided variables.\n\nSummaries can be grouped via regexes matching variables names.\n\nArgs:\nvars_: List of variables to summarize.\ngroups: Mapping of name to regex for grouping summaries.\nscope: Name scope for this operation.\n\nReturns:\nSummary tensor.", "source": "juraj-google-style"}
{"code": "def _gen(self, optimized, splitstring):\n        \n        \n        \n        self.resolved = {}\n        \n        \n        for nt in self.grammar.grammar_nonterminals_map:\n            for i in self.grammar.grammar_nonterminals_map[nt]:\n                if self.grammar.grammar_rules[i][0] not in self.resolved\\\n                        and not isinstance(self.grammar.grammar_rules[i][1], (set, tuple)):\n                    if self.grammar.grammar_rules[i][1] != '@empty_set' \\\n                            and self.grammar.grammar_rules[i][1] in self.grammar.grammar_terminals:\n\n                        if splitstring:\n                            self.resolved[\n                                self.grammar.grammar_rules[i][0]] = self.grammar.grammar_rules[i][1]\n                        else:\n                            if self.grammar.grammar_rules[i][1] == '&':\n                                self.resolved[self.grammar.grammar_rules[i][0]] = ' '\n                            else:\n                                self.resolved[\n                                    self.grammar.grammar_rules[i][0]] = self.grammar.grammar_rules[i][1]\n                        \n                        \n                        if self._checkfinal(self.grammar.grammar_rules[i][0]):\n                            return self.resolved[self.grammar.grammar_rules[i][0]]\n                        if self.grammar.grammar_rules[i][0] not in self.bfs_queue:\n                            self.bfs_queue.append(self.grammar.grammar_rules[i][0])\n                    if self.grammar.grammar_rules[i][1] == '@empty_set':\n                        self.resolved[self.grammar.grammar_rules[i][0]] = ''\n                        \n                        self.bfs_queue.append(self.grammar.grammar_rules[i][0])\n                    if optimized and self._check_self_to_empty(\n                            self.grammar.grammar_rules[i][1]):\n                        self.resolved[self.grammar.grammar_rules[i][0]] = ''\n                        \n                        if self.grammar.grammar_rules[i][0] not in self.bfs_queue:\n                            self.bfs_queue.append(self.grammar.grammar_rules[i][0])\n\n        \n        \n        \n        change = 1\n        while change:\n            change = 0\n            if not change:\n                ret = self._check_self_nonterminals(optimized)\n                if ret == 1:\n                    change = 1\n                elif ret != 0:\n                    return ret\n            if not change:\n                while not change and len(self.bfs_queue) > 0:\n                    myntr = self.bfs_queue.pop()\n                    ret = self._check_self_replicate(myntr)\n                    if ret == 1:\n                        change = 1\n                    elif ret != 0:\n                        return ret\n                    if optimized and self._check_intemediate(\n                            myntr, self.maxstate):\n                        change = 1\n                        break", "docstring": "Generates a new random object generated from the nonterminal\nArgs:\noptimized (bool): mode of operation - if enabled not all\nCNF rules are included (mitigate O(n^3))\nsplitstring (bool): A boolean for enabling or disabling\nReturns:\nstr: The generated string", "source": "juraj-google-style"}
{"code": "def _case_helper(cond_fn, pred_fn_pairs, default, exclusive, name, allow_python_preds=False, **cond_kwargs):\n    predicates, actions = _case_verify_and_canonicalize_args(pred_fn_pairs, exclusive, name, allow_python_preds)\n    with ops.name_scope(name, 'case', [predicates]):\n        if default is None:\n            default, predicates, actions = _case_create_default_action(predicates, actions)\n        fn = default\n        for predicate, action in reversed(list(zip(predicates, actions))):\n            fn = functools.partial(cond_fn, predicate, true_fn=action, false_fn=fn, **cond_kwargs)\n        if exclusive:\n            with ops.control_dependencies([_assert_at_most_n_true(predicates, n=1, msg='Input error: exclusive=True')]):\n                return fn()\n        else:\n            return fn()", "docstring": "Implementation of case that allows for different cond functions.\n\nArgs:\ncond_fn: method that has signature and semantics of `cond` above.\npred_fn_pairs: Dict or list of pairs of a boolean scalar tensor, and a\ncallable which returns a list of tensors.\ndefault: Optional callable that returns a list of tensors.\nexclusive: True iff at most one predicate is allowed to evaluate to `True`.\nname: A name for this operation (optional).\nallow_python_preds: if true, pred_fn_pairs may contain Python bools in\naddition to boolean Tensors\n**cond_kwargs: keyword arguments that will be passed to `cond_fn`.\n\nReturns:\nThe tensors returned by the first pair whose predicate evaluated to True, or\nthose returned by `default` if none does.\n\nRaises:\nTypeError: If `pred_fn_pairs` is not a list/dictionary.\nTypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.\nTypeError: If `fns[i]` is not callable for any i, or `default` is not\ncallable.", "source": "github-repos"}
{"code": "def __init__(self, package, ad):\n        \n        super(SnippetClient, self).__init__(app_name=package, ad=ad)\n        self.package = package\n        self._ad = ad\n        self._adb = ad.adb\n        self._proc = None", "docstring": "Initializes a SnippetClient.\n\nArgs:\npackage: (str) The package name of the apk where the snippets are\ndefined.\nad: (AndroidDevice) the device object associated with this client.", "source": "juraj-google-style"}
{"code": "def BreachDepressions(dem, in_place=False, topology='D8'):\n    if (type(dem) is not rdarray):\n        raise Exception('A richdem.rdarray or numpy.ndarray is required!')\n    if (topology not in ['D8', 'D4']):\n        raise Exception('Unknown topology!')\n    if (not in_place):\n        dem = dem.copy()\n    _AddAnalysis(dem, 'BreachDepressions(dem)')\n    demw = dem.wrap()\n    if (topology == 'D8'):\n        _richdem.rdBreachDepressionsD8(demw)\n    elif (topology == 'D4'):\n        _richdem.rdBreachDepressionsD4(demw)\n    dem.copyFromWrapped(demw)\n    if (not in_place):\n        return dem", "docstring": "Breaches all depressions in a DEM.\n\nArgs:\ndem     (rdarray): An elevation model\nin_place (bool):   If True, the DEM is modified in place and there is\nno return; otherwise, a new, altered DEM is returned.\ntopology (string): A topology indicator\n\nReturns:\nDEM without depressions.", "source": "codesearchnet"}
{"code": "def Parse(self, value):\n    \n\n    value_line = value.split(' ')\n    if len(value_line) < 3:\n      raise TextFSMTemplateError('Expect at least 3 tokens on line.')\n\n    if not value_line[2].startswith('('):\n      \n      options = value_line[1]\n      for option in options.split(','):\n        self._AddOption(option)\n      \n      [option.OnCreateOptions() for option in self.options]\n\n      self.name = value_line[2]\n      self.regex = ' '.join(value_line[3:])\n    else:\n      \n      \n      self.name = value_line[1]\n      self.regex = ' '.join(value_line[2:])\n\n    if len(self.name) > self.max_name_len:\n      raise TextFSMTemplateError(\n          \"Invalid Value name '%s' or name too long.\" % self.name)\n\n    if (not re.match(r'^\\(.*\\)$', self.regex) or\n        self.regex.count('(') != self.regex.count(')')):\n      raise TextFSMTemplateError(\n          \"Value '%s' must be contained within a '()' pair.\" % self.regex)\n\n    self.template = re.sub(r'^\\(', '(?P<%s>' % self.name, self.regex)", "docstring": "Parse a 'Value' declaration.\n\nArgs:\nvalue: String line from a template file, must begin with 'Value '.\n\nRaises:\nTextFSMTemplateError: Value declaration contains an error.", "source": "juraj-google-style"}
{"code": "def allreduce(self, x, mesh_axes, reduction_fn_string):\n    \n    if not mesh_axes:\n      return x\n    x = x.to_laid_out_tensor()\n    if reduction_fn_string == \"SUM\":\n      group_assignment = self._create_group_assignment(mesh_axes)\n      group_size = len(group_assignment[0])\n      tf_in = x.one_slice\n      dtype = tf_in.dtype\n      if dtype == tf.float32:\n        cast_to_float32 = False\n      elif dtype == tf.bfloat16:\n        cast_to_float32 = (\n            group_size > self._allreduce_in_bfloat16_max_group_size)\n      else:\n        tf.logging.info(\"Casting %s to float32 for allreduce\" % tf_in.dtype)\n        cast_to_float32 = True\n      if cast_to_float32:\n        tf_in = tf.cast(tf_in, tf.float32)\n      tf_out = tpu_ops.cross_replica_sum(tf_in, group_assignment)\n      if cast_to_float32:\n        tf_out = tf.cast(tf_out, dtype)\n      return self.LaidOutTensor([tf_out])\n    else:\n      for axis in mesh_axes:\n        x = self.allconcat(x, axis, 0, stack=True)\n        x = self.LaidOutTensor(\n            [mtf.reduction_fn(reduction_fn_string)(x.one_slice, 0)])\n      return x", "docstring": "Grouped allreduce, (summed across the given dimensions).\n\nArgs:\nx: a LaidOutTensor\nmesh_axes: a list of integers\nreduction_fn_string: \"SUM\"\nReturns:\na LaidOutTensor\nRaises:\nValueError: if the reduction is not yet implemented.", "source": "juraj-google-style"}
{"code": "def do_ams_patch(endpoint, path, body, access_token):\n    \n    headers = {\"Content-Type\": json_acceptformat,\n               \"DataServiceVersion\": dsversion_min,\n               \"MaxDataServiceVersion\": dsversion_max,\n               \"Accept\": json_acceptformat,\n               \"Accept-Charset\" : charset,\n               \"Authorization\": \"Bearer \" + access_token,\n               \"x-ms-version\" : xmsversion}\n    response = requests.patch(endpoint, data=body, headers=headers, allow_redirects=False)\n    \n    \n    if response.status_code == 301:\n        redirected_url = ''.join([response.headers['location'], path])\n        response = requests.patch(redirected_url, data=body, headers=headers)\n    return response", "docstring": "Do a AMS PATCH request and return JSON.\nArgs:\nendpoint (str): Azure Media Services Initial Endpoint.\npath (str): Azure Media Services Endpoint Path.\nbody  (str): Azure Media Services Content Body.\naccess_token (str): A valid Azure authentication token.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def list_sku_versions(access_token, subscription_id, location, publisher, offer, sku):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/providers/Microsoft.Compute/',\n                        'locations/', location,\n                        '/publishers/', publisher,\n                        '/artifacttypes/vmimage/offers/', offer,\n                        '/skus/', sku,\n                        '/versions?api-version=', COMP_API])\n    return do_get(endpoint, access_token)", "docstring": "List available versions for a given publisher's sku.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nlocation (str): Azure data center location. E.g. westus.\npublisher (str): VM image publisher. E.g. MicrosoftWindowsServer.\noffer (str): VM image offer. E.g. WindowsServer.\nsku (str): VM image sku. E.g. 2016-Datacenter.\n\nReturns:\nHTTP response with JSON list of versions.", "source": "juraj-google-style"}
{"code": "def get_forced_variation(self, experiment_key, user_id):\n    if (not self.is_valid):\n        self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_forced_variation'))\n        return None\n    if (not validator.is_non_empty_string(experiment_key)):\n        self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))\n        return None\n    if (not isinstance(user_id, string_types)):\n        self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))\n        return None\n    forced_variation = self.config.get_forced_variation(experiment_key, user_id)\n    return (forced_variation.key if forced_variation else None)", "docstring": "Gets the forced variation for a given user and experiment.\n\nArgs:\nexperiment_key: A string key identifying the experiment.\nuser_id: The user ID.\n\nReturns:\nThe forced variation key. None if no forced variation key.", "source": "codesearchnet"}
{"code": "def insert_before(self, value: Union[RawValue, Value],\n                      raw: bool = False) -> \"ArrayEntry\":\n        \n        return ArrayEntry(self.index, self.before, self.after.cons(self.value),\n                          self._cook_value(value, raw), self.parinst,\n                          self.schema_node, datetime.now())", "docstring": "Insert a new entry before the receiver.\n\nArgs:\nvalue: The value of the new entry.\nraw: Flag to be set if `value` is raw.\n\nReturns:\nAn instance node of the new inserted entry.", "source": "juraj-google-style"}
{"code": "def Artifacts(self, os_name=None, cpe=None, label=None):\n    hit = (lambda x: ((x[0] == x[1]) or (not x[0])))\n    seq = [(self.os_name, os_name), (self.cpe, cpe), (self.label, label)]\n    return all(map(hit, seq))", "docstring": "Whether the conditions applies, modulo host data.\n\nArgs:\nos_name: An OS string.\ncpe: A CPE string.\nlabel: A label string.\n\nReturns:\nTrue if os_name, cpe or labels match. Empty values are ignored.", "source": "codesearchnet"}
{"code": "async def game(\n            self, short_name, *, id=None,\n            text=None, parse_mode=(), link_preview=True,\n            geo=None, period=60, contact=None, game=False, buttons=None\n    ):\n        \n        result = types.InputBotInlineResultGame(\n            id=id or '',\n            short_name=short_name,\n            send_message=await self._message(\n                text=text, parse_mode=parse_mode, link_preview=link_preview,\n                geo=geo, period=period,\n                contact=contact,\n                game=game,\n                buttons=buttons\n            )\n        )\n        if id is None:\n            result.id = hashlib.sha256(bytes(result)).hexdigest()\n\n        return result", "docstring": "Creates a new inline result of game type.\n\nArgs:\nshort_name (`str`):\nThe short name of the game to use.", "source": "juraj-google-style"}
{"code": "def Analyze(self, source_path, output_writer):\n    if (not os.path.exists(source_path)):\n        raise RuntimeError('No such source: {0:s}.'.format(source_path))\n    scan_context = source_scanner.SourceScannerContext()\n    scan_path_spec = None\n    scan_step = 0\n    scan_context.OpenSourcePath(source_path)\n    while True:\n        self._source_scanner.Scan(scan_context, auto_recurse=self._auto_recurse, scan_path_spec=scan_path_spec)\n        if (not scan_context.updated):\n            break\n        if (not self._auto_recurse):\n            output_writer.WriteScanContext(scan_context, scan_step=scan_step)\n        scan_step += 1\n        if (scan_context.source_type in [definitions.SOURCE_TYPE_DIRECTORY, definitions.SOURCE_TYPE_FILE]):\n            break\n        for locked_scan_node in scan_context.locked_scan_nodes:\n            self._PromptUserForEncryptedVolumeCredential(scan_context, locked_scan_node, output_writer)\n        if (not self._auto_recurse):\n            scan_node = scan_context.GetUnscannedScanNode()\n            if (not scan_node):\n                return\n            scan_path_spec = scan_node.path_spec\n    if self._auto_recurse:\n        output_writer.WriteScanContext(scan_context)", "docstring": "Analyzes the source.\n\nArgs:\nsource_path (str): the source path.\noutput_writer (StdoutWriter): the output writer.\n\nRaises:\nRuntimeError: if the source path does not exists, or if the source path\nis not a file or directory, or if the format of or within the source\nfile is not supported.", "source": "codesearchnet"}
{"code": "def get_site_spd_dos(self, site):\n        \n        spd_dos = dict()\n        for orb, pdos in self.pdos[site].items():\n            orbital_type = _get_orb_type(orb)\n            if orbital_type in spd_dos:\n                spd_dos[orbital_type] = add_densities(spd_dos[orbital_type], pdos)\n            else:\n                spd_dos[orbital_type] = pdos\n        return {orb: Dos(self.efermi, self.energies, densities)\n                for orb, densities in spd_dos.items()}", "docstring": "Get orbital projected Dos of a particular site\n\nArgs:\nsite: Site in Structure associated with CompleteDos.\n\nReturns:\ndict of {orbital: Dos}, e.g. {\"s\": Dos object, ...}", "source": "juraj-google-style"}
{"code": "def rot90(array, k=1, axes=(0, 1)):\n    array = convert_to_tensor(array)\n    if array.shape.rank < 2:\n        raise ValueError(f'Input array must have at least 2 dimensions. Received: array.ndim={array.shape.rank}')\n    if len(axes) != 2 or axes[0] == axes[1]:\n        raise ValueError(f'Invalid axes: {axes}. Axes must be a tuple of two different dimensions.')\n    k = k % 4\n    if k == 0:\n        return array\n    axes = tuple((axis if axis >= 0 else array.shape.rank + axis for axis in axes))\n    perm = [i for i in range(array.shape.rank) if i not in axes]\n    perm.extend(axes)\n    array = tf.transpose(array, perm)\n    shape = tf.shape(array)\n    non_rot_shape = shape[:-2]\n    h, w = (shape[-2], shape[-1])\n    array = tf.reshape(array, tf.concat([[-1], [h, w]], axis=0))\n    array = tf.reverse(array, axis=[2])\n    array = tf.transpose(array, [0, 2, 1])\n    if k % 2 == 1:\n        final_h, final_w = (w, h)\n    else:\n        final_h, final_w = (h, w)\n    if k > 1:\n        array = tf.reshape(array, tf.concat([[-1], [final_h, final_w]], axis=0))\n        for _ in range(k - 1):\n            array = tf.reverse(array, axis=[2])\n            array = tf.transpose(array, [0, 2, 1])\n    final_shape = tf.concat([non_rot_shape, [final_h, final_w]], axis=0)\n    array = tf.reshape(array, final_shape)\n    inv_perm = [0] * len(perm)\n    for i, p in enumerate(perm):\n        inv_perm[p] = i\n    array = tf.transpose(array, inv_perm)\n    return array", "docstring": "Rotate an array by 90 degrees in the specified plane.\n\nArgs:\narray: Input tensor\nk: Number of 90-degree rotations (default=1)\naxes: Tuple of two axes that define the plane of rotation.\nDefaults to (0, 1).\n\nReturns:\nRotated tensor with correct shape transformation", "source": "github-repos"}
{"code": "def update(self, id, newObj):\n        \n        newObj = self.validation(newObj)\n        for obj in self.model.db:\n            if obj[\"id\"] != id:\n                continue\n\n            newObj.pop(\"id\", None)\n            obj.update(newObj)\n            obj = self._cast_model(obj)\n            if not self._batch.enable.is_set():\n                self.model.save_db()\n            return obj\n\n        return None", "docstring": "Update a object\nArgs:\nid (int): Target Object ID\nnewObj (object): New object will be merged into original object\nReturns:\nObject: Updated object\nNone: If specified object id is not found\nMultipleInvalid: If input object is invaild", "source": "juraj-google-style"}
{"code": "def grappler_optimize(graph, fetches=None, config_proto=None):\n    if config_proto is None:\n        config_proto = config_pb2.ConfigProto()\n        config_proto.graph_options.rewrite_options.min_graph_nodes = -1\n    if fetches is not None:\n        for fetch in fetches:\n            graph.add_to_collection('train_op', fetch)\n    metagraph = saver.export_meta_graph(graph_def=graph.as_graph_def())\n    return tf_optimizer.OptimizeGraph(config_proto, metagraph)", "docstring": "Tries to optimize the provided graph using grappler.\n\nArgs:\ngraph: A `tf.Graph` instance containing the graph to optimize.\nfetches: An optional list of `Tensor`s to fetch (i.e. not optimize away).\nGrappler uses the 'train_op' collection to look for fetches, so if not\nprovided this collection should be non-empty.\nconfig_proto: An optional `tf.compat.v1.ConfigProto` to use when rewriting\nthe graph.\n\nReturns:\nA `tf.compat.v1.GraphDef` containing the rewritten graph.", "source": "github-repos"}
{"code": "def apply_inverse(self, y, in_place=False):\n        r\n        return cho_solve(self._factor, y, overwrite_b=in_place)", "docstring": "r\"\"\"\nApply the inverse of the covariance matrix to the input by solving\n\n.. math::\n\nK\\,x = y\n\nArgs:\ny (ndarray[nsamples] or ndadrray[nsamples, nrhs]): The vector or\nmatrix :math:`y`.\nin_place (Optional[bool]): Should the data in ``y`` be overwritten\nwith the result :math:`x`? (default: ``False``)", "source": "juraj-google-style"}
{"code": "def to_csv(self, filename: str, latexify_names: bool=False):\n    elements = set()\n    for entry in self.entries:\n        elements.update(entry.composition.elements)\n    elements = sorted(list(elements), key=(lambda a: a.X))\n    writer = csv.writer(open(filename, 'w'), delimiter=unicode2str(','), quotechar=unicode2str('\"'), quoting=csv.QUOTE_MINIMAL)\n    writer.writerow(((['Name'] + elements) + ['Energy']))\n    for entry in self.entries:\n        row = [(entry.name if (not latexify_names) else re.sub('([0-9]+)', '_{\\\\1}', entry.name))]\n        row.extend([entry.composition[el] for el in elements])\n        row.append(entry.energy)\n        writer.writerow(row)", "docstring": "Exports PDEntries to a csv\n\nArgs:\nfilename: Filename to write to.\nentries: PDEntries to export.\nlatexify_names: Format entry names to be LaTex compatible,\ne.g., Li_{2}O", "source": "codesearchnet"}
{"code": "def default(self, obj):\n    if isinstance(obj, decimal.Decimal):\n        obj = format(obj, 'f')\n        str_digit = text_type(obj)\n        return (str_digit.rstrip('0').rstrip('.') if ('.' in str_digit) else str_digit)\n    elif isinstance(obj, phonenumbers.PhoneNumber):\n        return phonenumbers.format_number(obj, phonenumbers.PhoneNumberFormat.E164)\n    elif isinstance(obj, pendulum.Pendulum):\n        return text_type(obj)\n    elif isinstance(obj, arrow.Arrow):\n        return text_type(obj)\n    elif isinstance(obj, (datetime.datetime, datetime.date)):\n        return obj.isoformat()\n    try:\n        return list(iter(obj))\n    except TypeError:\n        pass\n    return super(FleakerJSONEncoder, self).default(obj)", "docstring": "Encode individual objects into their JSON representation.\n\nThis method is used by :class:`flask.json.JSONEncoder` to encode\nindividual items in the JSON object.\n\nArgs:\nobj (object): Any Python object we wish to convert to JSON.\n\nReturns:\nstr: The stringified, valid JSON representation of our provided\nobject.", "source": "codesearchnet"}
{"code": "def index(self, entries):\n    if (not self.is_empty()):\n        raise ValueError('Cannot call index again on a non-empty index')\n    if (not isinstance(entries, list)):\n        queue = deque([])\n        for (key, minhash, size) in entries:\n            if (size <= 0):\n                raise ValueError('Set size must be positive')\n            queue.append((key, minhash, size))\n        entries = list(queue)\n    if (len(entries) == 0):\n        raise ValueError('entries is empty')\n    (sizes, counts) = np.array(sorted(Counter((e[2] for e in entries)).most_common())).T\n    partitions = optimal_partitions(sizes, counts, len(self.indexes))\n    for (i, (lower, upper)) in enumerate(partitions):\n        (self.lowers[i], self.uppers[i]) = (lower, upper)\n    entries.sort(key=(lambda e: e[2]))\n    curr_part = 0\n    for (key, minhash, size) in entries:\n        if (size > self.uppers[curr_part]):\n            curr_part += 1\n        for r in self.indexes[curr_part]:\n            self.indexes[curr_part][r].insert(key, minhash)", "docstring": "Index all sets given their keys, MinHashes, and sizes.\nIt can be called only once after the index is created.\n\nArgs:\nentries (`iterable` of `tuple`): An iterable of tuples, each must be\nin the form of `(key, minhash, size)`, where `key` is the unique\nidentifier of a set, `minhash` is the MinHash of the set,\nand `size` is the size or number of unique items in the set.\n\nNote:\n`size` must be positive.", "source": "codesearchnet"}
{"code": "def _parse_block_ref(cls, block_ref, deprecated=False):\n    if (deprecated and (block_ref is None)):\n        return None\n    if isinstance(block_ref, LocalId):\n        return block_ref\n    is_valid_deprecated = (deprecated and cls.DEPRECATED_ALLOWED_ID_RE.match(block_ref))\n    is_valid = cls.ALLOWED_ID_RE.match(block_ref)\n    if (is_valid or is_valid_deprecated):\n        return block_ref\n    else:\n        raise InvalidKeyError(cls, block_ref)", "docstring": "Given `block_ref`, tries to parse it into a valid block reference.\n\nReturns `block_ref` if it is valid.\n\nRaises:\nInvalidKeyError: if `block_ref` is invalid.", "source": "codesearchnet"}
{"code": "def set_card_simple(self, title, content):\n        \n        self.response.card.type = 'Simple'\n        self.response.card.title = title\n        self.response.card.content = content", "docstring": "Set response card as simple type.\n\ntitle and content cannot exceed 8,000 characters.\n\nArgs:\ntitle: str. Title of Simple or Standard type card.\ncontent: str. Content of Simple type card.", "source": "juraj-google-style"}
{"code": "def control(controllee: Union[('cirq.Gate', op_tree.OP_TREE)], control_qubits: Sequence['cirq.Qid']=None, default: Any=RaiseTypeErrorIfNotProvided) -> Any:\n    if (control_qubits is None):\n        control_qubits = []\n    controller = getattr(controllee, 'controlled_by', None)\n    result = (NotImplemented if (controller is None) else controller(*control_qubits))\n    if (result is not NotImplemented):\n        return result\n    if isinstance(controllee, collections.Iterable):\n        return op_tree.transform_op_tree(controllee, op_transformation=(lambda op: control(op, control_qubits)))\n    if (default is not RaiseTypeErrorIfNotProvided):\n        return default\n    if (controller is None):\n        raise TypeError(\"object of type '{}' has no controlled_by method.\".format(type(controllee)))\n    raise TypeError(\"object of type '{}' does have a controlled_by method, but it returned NotImplemented.\".format(type(controllee)))", "docstring": "Returns a Controlled version of the given value, if defined.\n\nControllees define how to be controlled by defining a method\ncontrolled_by(self, control_qubits). Note that the method may return\nNotImplemented to indicate a particular controlling can't be done.\n\nArgs:\ncontrollee: The gate, operation or iterable of operations to control.\ncontrol_qubits: A list of Qids that would control this controllee.\ndefault: Determines the fallback behavior when `controllee` doesn't\nhave a controlling defined. If `default` is not set and the\nfallback occurs, a TypeError is raised instead.\n\nReturns:\nIf `controllee` has a controlled_by method that returns something\nbesides NotImplemented, that result is returned. For an OP_TREE,\ntransformation is applied at the leaf. Otherwise, if a default value\nwas specified, the default value is returned.\n\nRaises:\nTypeError: `controllee` doesn't have a controlled_by method (or that\nmethod returned NotImplemented) and no `default` was specified.", "source": "codesearchnet"}
{"code": "def validate_composite_type_param(type_param, error_msg_prefix):\n    possible_classes = [type, TypeConstraint]\n    is_not_type_constraint = not is_typing_generic(type_param) and (not isinstance(type_param, tuple(possible_classes))) and (type_param is not None) and (getattr(type_param, '__module__', None) != 'typing')\n    if sys.version_info.major == 3 and sys.version_info.minor >= 10:\n        if isinstance(type_param, types.UnionType):\n            is_not_type_constraint = False\n    if is_not_type_constraint:\n        raise TypeError('%s must be a non-sequence, a type, or a TypeConstraint. %s is an instance of %s.' % (error_msg_prefix, type_param, type_param.__class__.__name__))", "docstring": "Determines if an object is a valid type parameter to a\n:class:`CompositeTypeHint`.\n\nImplements sanity checking to disallow things like::\n\nList[1, 2, 3] or Dict[5].\n\nArgs:\ntype_param: An object instance.\nerror_msg_prefix (:class:`str`): A string prefix used to format an error\nmessage in the case of an exception.\n\nRaises:\nTypeError: If the passed **type_param** is not a valid type\nparameter for a :class:`CompositeTypeHint`.", "source": "github-repos"}
{"code": "def Get(self, name, default=utils.NotAValue, context=None):\n    if (not self.initialized):\n        if (name not in self.constants):\n            raise RuntimeError((\"Error while retrieving %s: Configuration hasn't been initialized yet.\" % name))\n    if context:\n        if (isinstance(context, string_types) or (not isinstance(context, collections.Iterable))):\n            raise ValueError(('context should be a list, got %r' % context))\n    calc_context = context\n    cache_key = (name, tuple((context or ())))\n    if ((default is utils.NotAValue) and (cache_key in self.cache)):\n        return self.cache[cache_key]\n    if (context is None):\n        calc_context = self.context\n    type_info_obj = self.FindTypeInfo(name)\n    (_, return_value) = self._GetValue(name, context=calc_context, default=default)\n    if (return_value is default):\n        return default\n    try:\n        return_value = self.InterpolateValue(return_value, default_section=name.split('.')[0], type_info_obj=type_info_obj, context=calc_context)\n    except (lexer.ParseError, ValueError) as e:\n        if (default is not utils.NotAValue):\n            return default\n        raise ConfigFormatError(('While parsing %s: %s' % (name, e)))\n    try:\n        new_value = type_info_obj.Validate(return_value)\n        if (new_value is not None):\n            return_value = new_value\n    except ValueError:\n        if (default is not utils.NotAValue):\n            return default\n        raise\n    if (default is utils.NotAValue):\n        self.cache[cache_key] = return_value\n    return return_value", "docstring": "Get the value contained  by the named parameter.\n\nThis method applies interpolation/escaping of the named parameter and\nretrieves the interpolated value.\n\nArgs:\nname: The name of the parameter to retrieve. This should be in the format\nof \"Section.name\"\ndefault: If retrieving the value results in an error, return this default.\ncontext: A list of context strings to resolve the configuration. This is a\nset of roles the caller is current executing with. For example (client,\nwindows). If not specified we take the context from the current thread's\nTLS stack.\n\nReturns:\nThe value of the parameter.\nRaises:\nConfigFormatError: if verify=True and the config doesn't validate.\nRuntimeError: if a value is retrieved before the config is initialized.\nValueError: if a bad context is passed.", "source": "codesearchnet"}
{"code": "def utterances_from_dir(eaf_dir: Path, tier_prefixes: Tuple[(str, ...)]) -> List[Utterance]:\n    logger.info('EAF from directory: {}, searching with tier_prefixes {}'.format(eaf_dir, tier_prefixes))\n    utterances = []\n    for eaf_path in eaf_dir.glob('**/*.eaf'):\n        eaf_utterances = utterances_from_eaf(eaf_path, tier_prefixes)\n        utterances.extend(eaf_utterances)\n    return utterances", "docstring": "Returns the utterances found in ELAN files in a directory.\n\nRecursively explores the directory, gathering ELAN files and extracting\nutterances from them for tiers that start with the specified prefixes.\n\nArgs:\neaf_dir: A path to the directory to be searched\ntier_prefixes: Stings matching the start of ELAN tier names that are to\nbe extracted. For example, if you want to extract from tiers \"xv-Jane\"\nand \"xv-Mark\", then tier_prefixes = [\"xv\"] would do the job.\n\nReturns:\nA list of Utterance objects.", "source": "codesearchnet"}
{"code": "def loads(s, single=False, version=_default_version,\n          strict=False, errors='warn'):\n    \n    ms = deserialize(s, version=version, strict=strict, errors=errors)\n    if single:\n        return next(ms)\n    else:\n        return ms", "docstring": "Deserialize SimpleMRS string representations\n\nArgs:\ns (str): a SimpleMRS string\nsingle (bool): if `True`, only return the first Xmrs object\nReturns:\na generator of Xmrs objects (unless *single* is `True`)", "source": "juraj-google-style"}
{"code": "def select_with_index(self, selector=IndexedElement, transform=identity):\n    if self.closed():\n        raise ValueError('Attempt to call select_with_index() on a closed Queryable.')\n    if (not is_callable(selector)):\n        raise TypeError('select_with_index() parameter selector={0} is not callable'.format(repr(selector)))\n    if (not is_callable(transform)):\n        raise TypeError('select_with_index() parameter item_selector={0} is not callable'.format(repr(selector)))\n    return self._create(itertools.starmap(selector, enumerate(imap(transform, iter(self)))))", "docstring": "Transforms each element of a sequence into a new form, incorporating\nthe index of the element.\n\nEach element is transformed through a selector function which accepts\nthe element value and its zero-based index in the source sequence. The\ngenerated sequence is lazily evaluated.\n\nNote: This method uses deferred execution.\n\nArgs:\nselector: A binary function mapping the index of a value in\nthe source sequence and the element value itself to the\ncorresponding value in the generated sequence. The two\npositional arguments of the selector function are the zero-\nbased index of the current element and the value of the current\nelement. The return value should be the corresponding value in\nthe result sequence. The default selector produces an IndexedElement\ncontaining the index and the element giving this function\nsimilar behaviour to the built-in enumerate().\n\nReturns:\nA Queryable whose elements are the result of invoking the selector\nfunction on each element of the source sequence\n\nRaises:\nValueError: If this Queryable has been closed.\nTypeError: If selector is not callable.", "source": "codesearchnet"}
{"code": "def _ParseMFTAttribute(self, parser_mediator, mft_entry, mft_attribute):\n    \n    if mft_entry.is_empty() or mft_entry.base_record_file_reference != 0:\n      return\n\n    if mft_attribute.attribute_type in [\n        self._MFT_ATTRIBUTE_STANDARD_INFORMATION,\n        self._MFT_ATTRIBUTE_FILE_NAME]:\n\n      file_attribute_flags = getattr(\n          mft_attribute, 'file_attribute_flags', None)\n      name = getattr(mft_attribute, 'name', None)\n      parent_file_reference = getattr(\n          mft_attribute, 'parent_file_reference', None)\n\n      event_data = NTFSFileStatEventData()\n      event_data.attribute_type = mft_attribute.attribute_type\n      event_data.file_attribute_flags = file_attribute_flags\n      event_data.file_reference = mft_entry.file_reference\n      event_data.is_allocated = mft_entry.is_allocated()\n      event_data.name = name\n      event_data.parent_file_reference = parent_file_reference\n\n      try:\n        creation_time = mft_attribute.get_creation_time_as_integer()\n      except OverflowError as exception:\n        parser_mediator.ProduceExtractionWarning((\n            'unable to read the creation timestamp from MFT attribute: '\n            '0x{0:08x} with error: {1!s}').format(\n                mft_attribute.attribute_type, exception))\n        creation_time = None\n\n      if creation_time is not None:\n        date_time = self._GetDateTime(creation_time)\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_CREATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      try:\n        modification_time = mft_attribute.get_modification_time_as_integer()\n      except OverflowError as exception:\n        parser_mediator.ProduceExtractionWarning((\n            'unable to read the modification timestamp from MFT attribute: '\n            '0x{0:08x} with error: {1!s}').format(\n                mft_attribute.attribute_type, exception))\n        modification_time = None\n\n      if modification_time is not None:\n        date_time = self._GetDateTime(modification_time)\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      try:\n        access_time = mft_attribute.get_access_time_as_integer()\n      except OverflowError as exception:\n        parser_mediator.ProduceExtractionWarning((\n            'unable to read the access timestamp from MFT attribute: '\n            '0x{0:08x} with error: {1!s}').format(\n                exception, mft_attribute.attribute_type))\n        access_time = None\n\n      if access_time is not None:\n        date_time = self._GetDateTime(access_time)\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      try:\n        entry_modification_time = (\n            mft_attribute.get_entry_modification_time_as_integer())\n      except OverflowError as exception:\n        parser_mediator.ProduceExtractionWarning((\n            'unable to read the entry modification timestamp from MFT '\n            'attribute: 0x{0:08x} with error: {1!s}').format(\n                mft_attribute.attribute_type, exception))\n        entry_modification_time = None\n\n      if entry_modification_time is not None:\n        date_time = self._GetDateTime(entry_modification_time)\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_ENTRY_MODIFICATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    elif mft_attribute.attribute_type == self._MFT_ATTRIBUTE_OBJECT_ID:\n      display_name = '$MFT: {0:d}-{1:d}'.format(\n          mft_entry.file_reference & 0xffffffffffff,\n          mft_entry.file_reference >> 48)\n\n      if mft_attribute.droid_file_identifier:\n        try:\n          self._ParseDistributedTrackingIdentifier(\n              parser_mediator, mft_attribute.droid_file_identifier,\n              display_name)\n\n        except (TypeError, ValueError) as exception:\n          parser_mediator.ProduceExtractionWarning((\n              'unable to read droid file identifier from attribute: 0x{0:08x} '\n              'with error: {1!s}').format(\n                  mft_attribute.attribute_type, exception))\n\n      if mft_attribute.birth_droid_file_identifier:\n        try:\n          self._ParseDistributedTrackingIdentifier(\n              parser_mediator, mft_attribute.droid_file_identifier,\n              display_name)\n\n        except (TypeError, ValueError) as exception:\n          parser_mediator.ProduceExtractionWarning((\n              'unable to read birth droid file identifier from attribute: '\n              '0x{0:08x} with error: {1!s}').format(\n                  mft_attribute.attribute_type, exception))", "docstring": "Extract data from a NFTS $MFT attribute.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmft_entry (pyfsntfs.file_entry): MFT entry.\nmft_attribute (pyfsntfs.attribute): MFT attribute.", "source": "juraj-google-style"}
{"code": "def _ParseSourcePathOption(self, options):\n    \n    self._source_path = self.ParseStringOption(options, self._SOURCE_OPTION)\n    if not self._source_path:\n      raise errors.BadConfigOption('Missing source path.')\n\n    self._source_path = os.path.abspath(self._source_path)", "docstring": "Parses the source path option.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "juraj-google-style"}
{"code": "def add_outbound_connection(self, uri):\n        \n        LOGGER.debug(\"Adding connection to %s\", uri)\n        conn = OutboundConnection(\n            connections=self._connections,\n            endpoint=uri,\n            dispatcher=self._dispatcher,\n            zmq_identity=self._zmq_identity,\n            secured=self._secured,\n            server_public_key=self._server_public_key,\n            server_private_key=self._server_private_key,\n            future_callback_threadpool=self._future_callback_threadpool,\n            heartbeat=True,\n            connection_timeout=self._connection_timeout)\n\n        self.outbound_connections[uri] = conn\n        conn.start()\n\n        self._add_connection(conn, uri)\n\n        connect_message = ConnectionRequest(endpoint=self._public_endpoint)\n        conn.send(\n            validator_pb2.Message.NETWORK_CONNECT,\n            connect_message.SerializeToString(),\n            callback=partial(\n                self._connect_callback,\n                connection=conn,\n            ))\n\n        return conn", "docstring": "Adds an outbound connection to the network.\n\nArgs:\nuri (str): The zmq-style (e.g. tcp://hostname:port) uri\nto attempt to connect to.", "source": "juraj-google-style"}
{"code": "def __eq__(self, other):\n        \n        res = False\n        if len(self) == len(other):\n            if np.all(self._z == other.z) and np.all(self._x == other.x):\n                res = True\n        return res", "docstring": "Return True if all Pauli terms are equal.\n\nArgs:\nother (Pauli): other pauli\n\nReturns:\nbool: are self and other equal.", "source": "juraj-google-style"}
{"code": "def imresize(img, size, return_scale=False, interpolation='bilinear'):\n    (h, w) = img.shape[:2]\n    resized_img = cv2.resize(img, size, interpolation=interp_codes[interpolation])\n    if (not return_scale):\n        return resized_img\n    else:\n        w_scale = (size[0] / w)\n        h_scale = (size[1] / h)\n        return (resized_img, w_scale, h_scale)", "docstring": "Resize image to a given size.\n\nArgs:\nimg (ndarray): The input image.\nsize (tuple): Target (w, h).\nreturn_scale (bool): Whether to return `w_scale` and `h_scale`.\ninterpolation (str): Interpolation method, accepted values are\n\"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\".\n\nReturns:\ntuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or\n`resized_img`.", "source": "codesearchnet"}
{"code": "def scan_meta_graph_def(meta_graph_def, op_denylist):\n    ops_in_metagraph = set(meta_graph_lib.ops_used_by_graph_def(meta_graph_def.graph_def))\n    denylisted_ops = op_denylist & ops_in_metagraph\n    if denylisted_ops:\n        print('MetaGraph with tag set %s contains the following denylisted ops:' % meta_graph_def.meta_info_def.tags, denylisted_ops)\n    else:\n        print('MetaGraph with tag set %s does not contain the default denylisted ops:' % meta_graph_def.meta_info_def.tags, op_denylist)", "docstring": "Scans meta_graph_def and reports if there are ops on denylist.\n\nPrint ops if they are on denylist, or print success if no denylisted ops\nfound.\n\nArgs:\nmeta_graph_def: MetaGraphDef protocol buffer.\nop_denylist: set of ops to scan for.", "source": "github-repos"}
{"code": "def report_error(self, read_tuple_name, error_name, wrong='', message='', warning=False):\n    if ((not self.report_only_first) or (error_name not in self.reported_errors)):\n        print('\\t'.join([('error' if (warning == False) else 'warning'), read_tuple_name, error_name, wrong, message]))\n    self.reported_errors.add(error_name)\n    if warning:\n        self.warning_has_been_reported = True\n    else:\n        self.error_has_been_reported = True", "docstring": "Report an error.\n\nArgs:\nread_tuple_name (): Name of the read tuple.\nerror_name (): Name of the error.\nwrong (str): What is wrong.\nmessage (str): Additional msessage to be printed.\nwarning (bool): Warning (not an error).", "source": "codesearchnet"}
{"code": "def assign_device(cls, core):\n    return Sharding(proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.MAXIMAL, tile_assignment_dimensions=[1], tile_assignment_devices=[core]))", "docstring": "Returns an AssignDevice sharding attribute.\n\nThis causes an op to be computed in its entirety only on one core in\nthe XLA device.\nArgs:\ncore: The core to assign this Op to.", "source": "github-repos"}
{"code": "def get_url_distribution(self, params=None):\n        \n        params = params or {}\n        all_responses = {}\n        api_name = 'virustotal-url-distribution'\n\n        response_chunks = self._request_reports(list(params.keys()), list(params.values()), 'url/distribution')\n        self._extract_response_chunks(all_responses, response_chunks, api_name)\n\n        return all_responses", "docstring": "Retrieves a live feed with the latest URLs submitted to VT.\n\nArgs:\nresources: a dictionary with name and value for optional arguments\nReturns:\nA dict with the VT report.", "source": "juraj-google-style"}
{"code": "def func_load(code, defaults=None, closure=None, globs=None):\n    if isinstance(code, (tuple, list)):\n        code, defaults, closure = code\n        if isinstance(defaults, list):\n            defaults = tuple(defaults)\n\n    def ensure_value_to_cell(value):\n        \n\n        def dummy_fn():\n            value\n        cell_value = dummy_fn.__closure__[0]\n        if not isinstance(value, type(cell_value)):\n            return cell_value\n        return value\n    if closure is not None:\n        closure = tuple((ensure_value_to_cell(_) for _ in closure))\n    try:\n        raw_code = codecs.decode(code.encode('ascii'), 'base64')\n    except (UnicodeEncodeError, binascii.Error):\n        raw_code = code.encode('raw_unicode_escape')\n    code = marshal.loads(raw_code)\n    if globs is None:\n        globs = globals()\n    return python_types.FunctionType(code, globs, name=code.co_name, argdefs=defaults, closure=closure)", "docstring": "Deserializes a user defined function.\n\nArgs:\ncode: bytecode of the function.\ndefaults: defaults of the function.\nclosure: closure of the function.\nglobs: dictionary of global objects.\n\nReturns:\nA function object.", "source": "github-repos"}
{"code": "def restore(self, state):\n        \n        self._clear()\n        self._parseUserInfo({'labels': state['labels']})\n        self._parseNodes(state['nodes'])\n        self._keep_version = state['keep_version']", "docstring": "Unserialize saved note data.\n\nArgs:\nstate (dict): Serialized state to load.", "source": "juraj-google-style"}
{"code": "def get_or_create_hosted_zone(client, zone_name):\n    zone_id = get_hosted_zone_by_name(client, zone_name)\n    if zone_id:\n        return zone_id\n    logger.debug('Zone %s does not exist, creating.', zone_name)\n    reference = uuid.uuid4().hex\n    response = client.create_hosted_zone(Name=zone_name, CallerReference=reference)\n    return parse_zone_id(response['HostedZone']['Id'])", "docstring": "Get the Id of an existing zone, or create it.\n\nArgs:\nclient (:class:`botocore.client.Route53`): The connection used to\ninteract with Route53's API.\nzone_name (string): The name of the DNS hosted zone to create.\n\nReturns:\nstring: The Id of the Hosted Zone.", "source": "codesearchnet"}
{"code": "def get_end(pos, alt, category, snvend=None, svend=None, svlen=None):\n    end = pos\n    if (category in ('snv', 'indel', 'cancer')):\n        end = snvend\n    elif (category == 'sv'):\n        end = svend\n        if (svend == pos):\n            if svlen:\n                end = (pos + svlen)\n        if (':' in alt):\n            match = BND_ALT_PATTERN.match(alt)\n            if match:\n                end = int(match.group(2))\n    return end", "docstring": "Return the end coordinate for a variant\n\nArgs:\npos(int)\nalt(str)\ncategory(str)\nsnvend(str)\nsvend(int)\nsvlen(int)\n\nReturns:\nend(int)", "source": "codesearchnet"}
{"code": "def __eq__(self, other):\n    return isinstance(other, ArgumentPlaceholder)", "docstring": "Tests for equality of two placeholder objects.\n\nArgs:\nother: Another placeholder object to compare to.\n\nThis method is used only for test code. All placeholder objects are\nequal to each other.", "source": "github-repos"}
{"code": "def __init__(self, endpoint_name, sagemaker_session=None):\n        \n        super(ChainerPredictor, self).__init__(endpoint_name, sagemaker_session, npy_serializer, numpy_deserializer)", "docstring": "Initialize an ``ChainerPredictor``.\n\nArgs:\nendpoint_name (str): The name of the endpoint to perform inference on.\nsagemaker_session (sagemaker.session.Session): Session object which manages interactions with\nAmazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one\nusing the default AWS configuration chain.", "source": "juraj-google-style"}
{"code": "def GetStorageMediaImageTypeIndicators(cls, path_spec, resolver_context=None):\n    if ((cls._storage_media_image_remainder_list is None) or (cls._storage_media_image_store is None)):\n        (specification_store, remainder_list) = cls._GetSpecificationStore(definitions.FORMAT_CATEGORY_STORAGE_MEDIA_IMAGE)\n        cls._storage_media_image_remainder_list = remainder_list\n        cls._storage_media_image_store = specification_store\n    if (cls._storage_media_image_scanner is None):\n        cls._storage_media_image_scanner = cls._GetSignatureScanner(cls._storage_media_image_store)\n    return cls._GetTypeIndicators(cls._storage_media_image_scanner, cls._storage_media_image_store, cls._storage_media_image_remainder_list, path_spec, resolver_context=resolver_context)", "docstring": "Determines if a file contains a supported storage media image types.\n\nArgs:\npath_spec (PathSpec): path specification.\nresolver_context (Optional[Context]): resolver context, where None\nrepresents the built-in context which is not multi process safe.\n\nReturns:\nlist[str]: supported format type indicators.", "source": "codesearchnet"}
{"code": "def offset(self, num_to_skip):\n        \n        return self.__class__(\n            self._parent,\n            projection=self._projection,\n            field_filters=self._field_filters,\n            orders=self._orders,\n            limit=self._limit,\n            offset=num_to_skip,\n            start_at=self._start_at,\n            end_at=self._end_at,\n        )", "docstring": "Skip to an offset in a query.\n\nIf the current query already has specified an offset, this will\noverwrite it.\n\nArgs:\nnum_to_skip (int): The number of results to skip at the beginning\nof query results. (Must be non-negative.)\n\nReturns:\n~.firestore_v1beta1.query.Query: An offset query. Acts as a\ncopy of the current query, modified with the newly added\n\"offset\" field.", "source": "juraj-google-style"}
{"code": "def _ParseRecord(self, parser_mediator, text_file_object):\n    \n    try:\n      title = text_file_object.readline()\n    except UnicodeDecodeError:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to read and decode title')\n      return False\n\n    if not title:\n      return False\n\n    try:\n      url = text_file_object.readline()\n    except UnicodeDecodeError:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to read and decode url')\n      return False\n\n    try:\n      timestamp = text_file_object.readline()\n    except UnicodeDecodeError:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to read and decode timestamp')\n      return False\n\n    try:\n      popularity_index = text_file_object.readline()\n    except UnicodeDecodeError:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to read and decode popularity index')\n      return False\n\n    event_data = OperaGlobalHistoryEventData()\n\n    event_data.url = url.strip()\n\n    title = title.strip()\n    if title != event_data.url:\n      event_data.title = title\n\n    popularity_index = popularity_index.strip()\n    try:\n      event_data.popularity_index = int(popularity_index, 10)\n    except ValueError:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to convert popularity index: {0:s}'.format(popularity_index))\n\n    if event_data.popularity_index < 0:\n      event_data.description = 'First and Only Visit'\n    else:\n      event_data.description = 'Last Visit'\n\n    timestamp = timestamp.strip()\n    try:\n      timestamp = int(timestamp, 10)\n    except ValueError:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to convert timestamp: {0:s}'.format(timestamp))\n      timestamp = None\n\n    if timestamp is None:\n      date_time = dfdatetime_semantic_time.SemanticTime('Invalid')\n    else:\n      date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    return True", "docstring": "Parses an Opera global history record.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ntext_file_object (dfvfs.TextFile): text file.\n\nReturns:\nbool: True if the record was successfully parsed.", "source": "juraj-google-style"}
{"code": "def create_lb_with_nat_pool(access_token, subscription_id, resource_group, lb_name, public_ip_id, fe_start_port, fe_end_port, backend_port, location):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/loadBalancers/', lb_name, '?api-version=', NETWORK_API])\n    lb_body = {'location': location}\n    frontendipcconfig = {'name': 'LoadBalancerFrontEnd'}\n    fipc_properties = {'publicIPAddress': {'id': public_ip_id}}\n    frontendipcconfig['properties'] = fipc_properties\n    properties = {'frontendIPConfigurations': [frontendipcconfig]}\n    properties['backendAddressPools'] = [{'name': 'bepool'}]\n    inbound_natpool = {'name': 'natpool'}\n    lbfe_id = (((((('/subscriptions/' + subscription_id) + '/resourceGroups/') + resource_group) + '/providers/Microsoft.Network/loadBalancers/') + lb_name) + '/frontendIPConfigurations/LoadBalancerFrontEnd')\n    ibnp_properties = {'frontendIPConfiguration': {'id': lbfe_id}}\n    ibnp_properties['protocol'] = 'tcp'\n    ibnp_properties['frontendPortRangeStart'] = fe_start_port\n    ibnp_properties['frontendPortRangeEnd'] = fe_end_port\n    ibnp_properties['backendPort'] = backend_port\n    inbound_natpool['properties'] = ibnp_properties\n    properties['inboundNatPools'] = [inbound_natpool]\n    lb_body['properties'] = properties\n    body = json.dumps(lb_body)\n    return do_put(endpoint, body, access_token)", "docstring": "Create a load balancer with inbound NAT pools.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nlb_name (str): Name of the new load balancer.\npublic_ip_id (str): Public IP address resource id.\nfe_start_port (int): Start of front-end port range.\nfe_end_port (int): End of front-end port range.\nbackend_port (int): Back end port for VMs.\nlocation (str): Azure data center location. E.g. westus.\n\nReturns:\nHTTP response. Load Balancer JSON body.", "source": "codesearchnet"}
{"code": "def on_train_end(self, logs=None):", "docstring": "Called at the end of training.\n\nSubclasses should override for any actions to run.\n\nArgs:\nlogs: Dict. Currently the output of the last call to `on_epoch_end()`\nis passed to this argument for this method but that may change in\nthe future.", "source": "github-repos"}
{"code": "def get_nondebug_quantized_model(self) -> bytes:\n    return self._get_quantized_model(is_debug=False)", "docstring": "Returns a non-instrumented quantized model.\n\nConvert the quantized model with the initialized converter and\nreturn bytes for nondebug model. The model will not be instrumented with\nnumeric verification operations.\n\nReturns:\nModel bytes corresponding to the model.\nRaises:\nValueError: if converter is not passed to the debugger.", "source": "github-repos"}
{"code": "def __format__(self, format_spec=None):\n        \n        if not format_spec:  \n            return str(self)\n        elif format_spec == 'url':\n            return self.to_url()\n        elif format_spec.startswith('url:'):\n            parts = format_spec.split(':')[1:]\n            site = parts[0]\n            if len(parts) > 1:\n                country = parts[1]\n            else:\n                country = 'us'\n            return self.to_url(site, country)\n        elif format_spec == 'urn':\n            return self.to_urn()\n        else:\n            raise ValueError('Unknown format_spec %r' % format_spec)", "docstring": "Extended pretty printing for ISBN strings.\n\nArgs:\nformat_spec (str): Extended format to use\n\nReturns:\n``str``: Human readable string representation of ``Isbn`` object\n\nRaises:\nValueError: Unknown value for ``format_spec``", "source": "juraj-google-style"}
{"code": "def CreateSitelinkFeedItem(feed_items, feed_item_id):\n    site_link_from_feed = feed_items[feed_item_id]\n    site_link_feed_item = {'sitelinkText': site_link_from_feed['text'], 'sitelinkLine2': site_link_from_feed['line2'], 'sitelinkLine3': site_link_from_feed['line3']}\n    if (('finalUrls' in site_link_from_feed) and site_link_from_feed['finalUrls']):\n        site_link_feed_item['sitelinkFinalUrls'] = {'urls': site_link_from_feed['finalUrls']}\n        if ('finalMobileUrls' in site_link_from_feed):\n            site_link_feed_item['sitelinkFinalMobileUrls'] = {'urls': site_link_from_feed['finalMobileUrls']}\n        site_link_feed_item['sitelinkTrackingUrlTemplate'] = site_link_from_feed['trackingUrlTemplate']\n    else:\n        site_link_feed_item['sitelinkUrl'] = site_link_from_feed['url']\n    return site_link_feed_item", "docstring": "Creates a Sitelink Feed Item.\n\nArgs:\nfeed_items: a list of all Feed Items.\nfeed_item_id: the Id of a specific Feed Item for which a Sitelink Feed Item\nshould be created.\n\nReturns:\nThe new Sitelink Feed Item.", "source": "codesearchnet"}
{"code": "async def is_try_or_pull_request(self):\n    tasks = [asyncio.ensure_future(link.is_try_or_pull_request()) for link in self.links]\n    tasks.insert(0, asyncio.ensure_future(is_try_or_pull_request(self.context, self.task)))\n    conditions = (await raise_future_exceptions(tasks))\n    return any(conditions)", "docstring": "Determine if any task in the chain is a try task.\n\nReturns:\nbool: True if a task is a try task.", "source": "codesearchnet"}
{"code": "def memory_write8(self, addr, data, zone=None):\n        \n        return self.memory_write(addr, data, zone, 8)", "docstring": "Writes bytes to memory of a target system.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): start address to write to\ndata (list): list of bytes to write\nzone (str): optional memory zone to access\n\nReturns:\nNumber of bytes written to target.\n\nRaises:\nJLinkException: on memory access error.", "source": "juraj-google-style"}
{"code": "def resize(self, image: np.ndarray, size: Dict[str, int], anti_aliasing: bool=True, anti_aliasing_sigma=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n    requires_backends(self, 'scipy')\n    output_shape = (size['height'], size['width'])\n    image = to_channel_dimension_format(image, ChannelDimension.LAST)\n    image, output_shape = _preprocess_resize_output_shape(image, output_shape)\n    input_shape = image.shape\n    factors = np.divide(input_shape, output_shape)\n    ndi_mode = 'mirror'\n    cval = 0\n    order = 1\n    if anti_aliasing:\n        if anti_aliasing_sigma is None:\n            anti_aliasing_sigma = np.maximum(0, (factors - 1) / 2)\n        else:\n            anti_aliasing_sigma = np.atleast_1d(anti_aliasing_sigma) * np.ones_like(factors)\n            if np.any(anti_aliasing_sigma < 0):\n                raise ValueError('Anti-aliasing standard deviation must be greater than or equal to zero')\n            elif np.any((anti_aliasing_sigma > 0) & (factors <= 1)):\n                warnings.warn('Anti-aliasing standard deviation greater than zero but not down-sampling along all axes')\n        filtered = ndi.gaussian_filter(image, anti_aliasing_sigma, cval=cval, mode=ndi_mode)\n    else:\n        filtered = image\n    zoom_factors = [1 / f for f in factors]\n    out = ndi.zoom(filtered, zoom_factors, order=order, mode=ndi_mode, cval=cval, grid_mode=True)\n    image = _clip_warp_output(image, out)\n    image = to_channel_dimension_format(image, input_data_format, ChannelDimension.LAST)\n    image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n    return image", "docstring": "Resize an image as per the original implementation.\n\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nsize (`Dict[str, int]`):\nDictionary containing the height and width to resize the image to.\nanti_aliasing (`bool`, *optional*, defaults to `True`):\nWhether to apply anti-aliasing when downsampling the image.\nanti_aliasing_sigma (`float`, *optional*, defaults to `None`):\nStandard deviation for Gaussian kernel when downsampling the image. If `None`, it will be calculated\nautomatically.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred from the input\nimage.", "source": "github-repos"}
{"code": "def _detect(self):\n    results = []\n    for c in self.slither.contracts_derived:\n        ret = self.detect_uninitialized(c)\n        for (variable, functions) in ret:\n            info = '{}.{} ({}) is never initialized. It is used in:\\n'\n            info = info.format(variable.contract.name, variable.name, variable.source_mapping_str)\n            for f in functions:\n                info += '\\t- {} ({})\\n'.format(f.name, f.source_mapping_str)\n            source = [variable.source_mapping]\n            source += [f.source_mapping for f in functions]\n            json = self.generate_json_result(info)\n            self.add_variable_to_json(variable, json)\n            self.add_functions_to_json(functions, json)\n            results.append(json)\n    return results", "docstring": "Detect uninitialized state variables\n\nRecursively visit the calls\nReturns:\ndict: [contract name] = set(state variable uninitialized)", "source": "codesearchnet"}
{"code": "def add_children(self, children):\n        \n\n        self._children += [c for c in children if c not in self._children]", "docstring": "Adds new children nodes after filtering for duplicates\n\nArgs:\nchildren (list): list of OmniTree nodes to add as children", "source": "juraj-google-style"}
{"code": "def fail_steamid(channel):\n    gui = ui_embed.UI(channel, \"That SteamID doesn't exist.\", 'You can get your SteamID by going to your profile page and looking at the url, or you can set a custom ID by going to edit profile on your profile page.', modulename=modulename, colour=35071)\n    return gui", "docstring": "Creates an embed UI for invalid SteamIDs\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\n\nReturns:\nui (ui_embed.UI): The embed UI object", "source": "codesearchnet"}
{"code": "def __init__(self,\n               power=0.,\n               validate_args=False,\n               name=\"power_transform\"):\n    \n    self._graph_parents = []\n    self._name = name\n    self._validate_args = validate_args\n    with self._name_scope(\"init\"):\n      power = tf.get_static_value(\n          tf.convert_to_tensor(value=power, name=\"power\"))\n    if power is None or power < 0:\n      raise ValueError(\"`power` must be a non-negative TF constant.\")\n    self._power = power\n    super(PowerTransform, self).__init__(\n        forward_min_event_ndims=0,\n        validate_args=validate_args,\n        name=name)", "docstring": "Instantiates the `PowerTransform` bijector.\n\nArgs:\npower: Python `float` scalar indicating the transform power, i.e.,\n`Y = g(X) = (1 + X * c)**(1 / c)` where `c` is the `power`.\nvalidate_args: Python `bool` indicating whether arguments should be\nchecked for correctness.\nname: Python `str` name given to ops managed by this object.\n\nRaises:\nValueError: if `power < 0` or is not known statically.", "source": "juraj-google-style"}
{"code": "def unkown_field(self, value=None):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `unkown_field`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `unkown_field`')\n    self._unkown_field = value", "docstring": "Corresponds to IDD Field `unkown_field` Empty field in data.\n\nArgs:\nvalue (str): value for IDD Field `unkown_field`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def sysmeta_add_preferred(sysmeta_pyxb, node_urn):\n    if (not has_replication_policy(sysmeta_pyxb)):\n        sysmeta_set_default_rp(sysmeta_pyxb)\n    rp_pyxb = sysmeta_pyxb.replicationPolicy\n    _add_node(rp_pyxb, 'pref', node_urn)\n    _remove_node(rp_pyxb, 'block', node_urn)", "docstring": "Add a remote Member Node to the list of preferred replication targets to this\nSystem Metadata object.\n\nAlso remove the target MN from the list of blocked Member Nodes if present.\n\nIf the target MN is already in the preferred list and not in the blocked list, this\nfunction is a no-op.\n\nArgs:\nsysmeta_pyxb : SystemMetadata PyXB object.\nSystem Metadata in which to add the preferred replication target.\n\nIf the System Metadata does not already have a Replication Policy, a default\nreplication policy which enables replication is added and populated with the\npreferred replication target.\n\nnode_urn : str\nNode URN of the remote MN that will be added. On the form\n``urn:node:MyMemberNode``.", "source": "codesearchnet"}
{"code": "def _SetYaraRules(self, yara_rules_string):\n    if (not yara_rules_string):\n        return\n    analyzer_object = analyzers_manager.AnalyzersManager.GetAnalyzerInstance('yara')\n    analyzer_object.SetRules(yara_rules_string)\n    self._analyzers.append(analyzer_object)", "docstring": "Sets the Yara rules.\n\nArgs:\nyara_rules_string (str): unparsed Yara rule definitions.", "source": "codesearchnet"}
{"code": "def get_cn_dict(self, structure, n, use_weights=False):\n    siw = self.get_nn_info(structure, n)\n    cn_dict = {}\n    for i in siw:\n        site_element = i['site'].species_string\n        if (site_element not in cn_dict):\n            if use_weights:\n                cn_dict[site_element] = i['weight']\n            else:\n                cn_dict[site_element] = 1\n        elif use_weights:\n            cn_dict[site_element] += i['weight']\n        else:\n            cn_dict[site_element] += 1\n    return cn_dict", "docstring": "Get coordination number, CN, of each element bonded to site with index n in structure\n\nArgs:\nstructure (Structure): input structure\nn (integer): index of site for which to determine CN.\nuse_weights (boolean): flag indicating whether (True)\nto use weights for computing the coordination number\nor not (False, default: each coordinated site has equal\nweight).\n\nReturns:\ncn (dict): dictionary of CN of each element bonded to site", "source": "codesearchnet"}
{"code": "def _get_dependent_variables(input_ops, output_ops):\n    output_ops = nest.map_structure(gen_array_ops.identity, output_ops)\n    inbetween_ops = op_selector.get_backward_walk_ops(seed_ops=output_ops, stop_at_ts=input_ops, inclusive=False, only_differentiable=True)\n    var_ops = (op for op in inbetween_ops if op.type in VAR_OP_TYPES)\n    var_names = (op.name for op in var_ops)\n    tf_vars = (get_variable_by_name(var_name) for var_name in var_names)\n    tf_vars = [v for v in tf_vars if v is not None]\n    return tf_vars", "docstring": "Finds variables involved in the subgraph between input_ops and output_ops.\n\nArgs:\ninput_ops: Flattened list of input ops\noutput_ops: Flattened list of output ops\n\nReturns:\nA list of variables", "source": "github-repos"}
{"code": "def parse_GDS(filepath):\n    \n    dataset_lines = []\n    subsets = {}\n    database = None\n    dataset_name = None\n    with utils.smart_open(filepath) as soft:\n        groupper = groupby(soft, lambda x: x.startswith(\"^\"))\n        for is_new_entry, group in groupper:\n            if is_new_entry:\n                entry_type, entry_name = __parse_entry(next(group))\n                logger.debug(\"%s: %s\" % (entry_type.upper(), entry_name))\n                if entry_type == \"SUBSET\":\n                    is_data, data_group = next(groupper)\n                    message = (\"The key is not False, probably there is an \"\n                               \"error in the SOFT file\")\n                    assert not is_data, message\n                    subset_metadata = parse_metadata(data_group)\n                    subsets[entry_name] = GDSSubset(name=entry_name,\n                                                    metadata=subset_metadata)\n                elif entry_type == \"DATABASE\":\n\n                    is_data, data_group = next(groupper)\n                    message = (\"The key is not False, probably there is an \"\n                               \"error in the SOFT file\")\n                    assert not is_data, message\n                    database_metadata = parse_metadata(data_group)\n                    database = GEODatabase(name=entry_name,\n                                           metadata=database_metadata)\n                elif entry_type == \"DATASET\":\n                    is_data, data_group = next(groupper)\n                    dataset_name = entry_name\n                    for line in data_group:\n                        dataset_lines.append(line.rstrip())\n                else:\n                    logger.error(\"Cannot recognize type %s\" % entry_type)\n\n    metadata = parse_metadata(dataset_lines)\n    columns = parse_GDS_columns(dataset_lines, subsets)\n    table = parse_table_data(dataset_lines)\n    return GDS(name=dataset_name, metadata=metadata, columns=columns,\n               table=table, subsets=subsets, database=database)", "docstring": "Parse GDS SOFT file.\n\nArgs:\nfilepath (:obj:`str`): Path to GDS SOFT file.\n\nReturns:\n:obj:`GEOparse.GDS`: A GDS object.", "source": "juraj-google-style"}
{"code": "def convert_squeeze(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting squeeze ...')\n    if (len(params['axes']) > 1):\n        raise AssertionError('Cannot convert squeeze by multiple dimensions')\n\n    def target_layer(x, axis=int(params['axes'][0])):\n        import tensorflow as tf\n        return tf.squeeze(x, axis=axis)\n    lambda_layer = keras.layers.Lambda(target_layer)\n    layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert squeeze operation.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def script_dir(pyobject, follow_symlinks=True):\n    \n    if getattr(sys, 'frozen', False):  \n        path = abspath(sys.executable)\n    else:\n        path = inspect.getabsfile(pyobject)\n    if follow_symlinks:\n        path = realpath(path)\n    return dirname(path)", "docstring": "Get current script's directory\n\nArgs:\npyobject (Any): Any Python object in the script\nfollow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.\n\nReturns:\nstr: Current script's directory", "source": "juraj-google-style"}
{"code": "def __init__(self, expr, weld_type, df=None, column_name=None, index_type=None, index_name=None):\n        \n        self.expr = expr\n        self.weld_type = weld_type\n        self.dim = 1\n        self.df = df\n        self.column_name = column_name\n        self.index_type = index_type\n        self.index_name = index_name", "docstring": "Summary\n\nTODO: Implement an actual Index Object like how Pandas does\nArgs:\nexpr (TYPE): Description\nweld_type (TYPE): Description\ndf (None, optional): Description\ncolumn_name (None, optional): Description", "source": "juraj-google-style"}
{"code": "def visualize_decision(features, labels, true_w_b, candidate_w_bs, fname):\n    fig = figure.Figure(figsize=(6, 6))\n    canvas = backend_agg.FigureCanvasAgg(fig)\n    ax = fig.add_subplot(1, 1, 1)\n    ax.scatter(features[(:, 0)], features[(:, 1)], c=np.float32(labels[(:, 0)]), cmap=cm.get_cmap('binary'), edgecolors='k')\n\n    def plot_weights(w, b, **kwargs):\n        (w1, w2) = w\n        x1s = np.linspace((- 1), 1, 100)\n        x2s = ((- ((w1 * x1s) + b)) / w2)\n        ax.plot(x1s, x2s, **kwargs)\n    for (w, b) in candidate_w_bs:\n        plot_weights(w, b, alpha=(1.0 / np.sqrt(len(candidate_w_bs))), lw=1, color='blue')\n    if (true_w_b is not None):\n        plot_weights(*true_w_b, lw=4, color='green', label='true separator')\n    ax.set_xlim([(- 1.5), 1.5])\n    ax.set_ylim([(- 1.5), 1.5])\n    ax.legend()\n    canvas.print_figure(fname, format='png')\n    print('saved {}'.format(fname))", "docstring": "Utility method to visualize decision boundaries in R^2.\n\nArgs:\nfeatures: Input points, as a Numpy `array` of shape `[num_examples, 2]`.\nlabels: Numpy `float`-like array of shape `[num_examples, 1]` giving a\nlabel for each point.\ntrue_w_b: A `tuple` `(w, b)` where `w` is a Numpy array of\nshape `[2]` and `b` is a scalar `float`, interpreted as a\ndecision rule of the form `dot(features, w) + b > 0`.\ncandidate_w_bs: Python `iterable` containing tuples of the same form as\ntrue_w_b.\nfname: The filename to save the plot as a PNG image (Python `str`).", "source": "codesearchnet"}
{"code": "def get_percentile_to_value_dict(self, percentile_list):\n    result = {}\n    total = 0\n    percentile_list_index = 0\n    count_at_percentile = 0\n    percentile_list = list(set(percentile_list))\n    percentile_list.sort()\n    for index in range(self.counts_len):\n        total += self.get_count_at_index(index)\n        while True:\n            if (not count_at_percentile):\n                if (percentile_list_index == len(percentile_list)):\n                    return result\n                percentile = percentile_list[percentile_list_index]\n                percentile_list_index += 1\n                if (percentile > 100):\n                    return result\n                count_at_percentile = self.get_target_count_at_percentile(percentile)\n            if (total >= count_at_percentile):\n                value_at_index = self.get_value_from_index(index)\n                if percentile:\n                    result[percentile] = self.get_highest_equivalent_value(value_at_index)\n                else:\n                    result[percentile] = self.get_lowest_equivalent_value(value_at_index)\n                count_at_percentile = 0\n            else:\n                break\n    return result", "docstring": "A faster alternative to query values for a list of percentiles.\n\nArgs:\npercentile_list: a list of percentiles in any order, dups will be ignored\neach element in the list must be a float value in [0.0 .. 100.0]\nReturns:\na dict of percentile values indexed by the percentile", "source": "codesearchnet"}
{"code": "def find_clients(self, hosts):\n    clients = []\n    for host in hosts:\n        clients.append(self._get_client_by_hostname(host))\n    return [client for client in clients if (client is not None)]", "docstring": "Finds GRR clients given a list of hosts.\n\nArgs:\nhosts: List of hostname FQDNs\n\nReturns:\nList of GRR client objects.", "source": "codesearchnet"}
{"code": "def crud_handler(Model, name=None, **kwds):\n    \n\n    \n    from nautilus.network.events import combine_action_handlers\n    from . import update_handler, create_handler, delete_handler, read_handler\n\n    \n    return combine_action_handlers(\n        create_handler(Model, name=name),\n        read_handler(Model, name=name),\n        update_handler(Model, name=name),\n        delete_handler(Model, name=name),\n    )", "docstring": "This action handler factory reaturns an action handler that\nresponds to actions with CRUD types (following nautilus conventions)\nand performs the necessary mutation on the model's database.\n\nArgs:\nModel (nautilus.BaseModel): The model to delete when the action\nreceived.\n\nReturns:\nfunction(type, payload): The action handler for this model", "source": "juraj-google-style"}
{"code": "def _AssertGrayscaleImage(image):\n    return control_flow_ops.with_dependencies(_CheckGrayscaleImage(image, require_static=False), image)", "docstring": "Assert that we are working with a properly shaped grayscale image.\n\nPerforms the check statically if possible (i.e. if the shape\nis statically known). Otherwise adds a control dependency\nto an assert op that checks the dynamic shape.\n\nArgs:\nimage: >= 2-D Tensor of size [*, 1]\n\nRaises:\nValueError: if image.shape is not a [>= 2] vector or if\nlast dimension is not size 1.\n\nReturns:\nIf the shape of `image` could be verified statically, `image` is\nreturned unchanged, otherwise there will be a control dependency\nadded that asserts the correct dynamic shape.", "source": "github-repos"}
{"code": "def format_diff_pyxb(a_pyxb, b_pyxb):\n    \n    return '\\n'.join(\n        difflib.ndiff(\n            serialize_to_xml_str(a_pyxb).splitlines(),\n            serialize_to_xml_str(b_pyxb).splitlines(),\n        )\n    )", "docstring": "Create a diff between two PyXB objects.\n\nArgs:\na_pyxb: PyXB object\nb_pyxb: PyXB object\n\nReturns:\nstr : `Differ`-style delta", "source": "juraj-google-style"}
{"code": "def __init__(self, source_geo_def, target_geo_def):\n        \n\n        self.source_geo_def = source_geo_def\n        self.target_geo_def = target_geo_def", "docstring": "Initialize resampler with geolocation information.\n\nArgs:\nsource_geo_def (SwathDefinition, AreaDefinition):\nGeolocation definition for the data to be resampled\ntarget_geo_def (CoordinateDefinition, AreaDefinition):\nGeolocation definition for the area to resample data to.", "source": "juraj-google-style"}
{"code": "def delay_response(delay):\n    \n    delay = min(float(delay), 10)\n\n    time.sleep(delay)\n\n    return jsonify(\n        get_dict(\"url\", \"args\", \"form\", \"data\", \"origin\", \"headers\", \"files\")\n    )", "docstring": "Returns a delayed response (max of 10 seconds).\n---\ntags:\n- Dynamic data\nparameters:\n- in: path\nname: delay\ntype: int\nproduces:\n- application/json\nresponses:\n200:\ndescription: A delayed response.", "source": "juraj-google-style"}
{"code": "def restore(self, output):\n    pass", "docstring": "Create an accumulator based on 'output'.\n\nThis method creates a new accumulator with identical internal state to the\none used to create the data in 'output'. This means that if you do\n\noutput_data = combiner.extract(accumulator_1)\naccumulator_2 = combiner.restore(output_data)\n\nthen accumulator_1 and accumulator_2 will have identical internal state, and\ncomputations using either of them will be equivalent.\n\nArgs:\noutput: The data output from a previous computation. Should be in the same\nform as provided by 'extract_output'.\n\nReturns:\nA new accumulator.", "source": "github-repos"}
{"code": "def _GetAuthCookie(self, auth_token):\n\t\t\n\t\t\n\t\tcontinue_location = \"http:\n\t\targs = {\"continue\": continue_location, \"auth\": auth_token}\n\t\treq = self._CreateRequest(\"https:\n\t\ttry:\n\t\t\tresponse = self.opener.open(req)\n\t\texcept urllib2.HTTPError, e:\n\t\t\tresponse = e\n\t\tif (response.code != 302 or\n\t\t\t\tresponse.info()[\"location\"] != continue_location):\n\t\t\traise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, response.headers, response.fp)\n\t\tself.authenticated = True", "docstring": "Fetches authentication cookies for an authentication token.\n\nArgs:\nauth_token: The authentication token returned by ClientLogin.\n\nRaises:\nHTTPError: If there was an error fetching the authentication cookies.", "source": "juraj-google-style"}
{"code": "def __init__(self, fetches):\n    if isinstance(fetches, wrapt.ObjectProxy):\n        self._fetch_type = type(fetches.__wrapped__)\n    else:\n        self._fetch_type = type(fetches)\n    self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]\n    self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)", "docstring": "Creates a _ListFetchMapper.\n\nArgs:\nfetches: List, tuple, or namedtuple of fetches.", "source": "github-repos"}
{"code": "def mpim_close(self, *, channel: str, **kwargs) -> SlackResponse:\n        \n        kwargs.update({\"channel\": channel})\n        return self.api_call(\"mpim.close\", json=kwargs)", "docstring": "Closes a multiparty direct message channel.\n\nArgs:\nchannel (str): Multiparty Direct message channel to close. e.g. 'G1234567890'", "source": "juraj-google-style"}
{"code": "def __eq__(self, other):\n        \n        if not isinstance(other, LocationDescriptor):\n            return False\n\n        nbr_of_sub_locations = self.nbr_of_sub_locations()\n\n        if nbr_of_sub_locations != other.nbr_of_sub_locations():\n            return False\n\n        for i in range(nbr_of_sub_locations):\n            if self._locations_list[i] != other._locations_list[i]:\n                return False\n\n        return True", "docstring": "Detect if another object is equal to this :class:`LocationDescriptor` object.\n\nArgs:\nother: object to test.", "source": "juraj-google-style"}
{"code": "def get_by_resource(self, resource_uri):\n    uri = (((self.URI + self.RESOURCES_PATH) + '/') + resource_uri)\n    return self._client.get(id_or_uri=uri)", "docstring": "Gets all the labels for the specified resource\n\nArgs:\nresource_uri: The resource URI\n\nReturns:\ndict: Resource Labels", "source": "codesearchnet"}
{"code": "def find(self, collection, query):\n        \n        obj = getattr(self.db, collection)\n        result = obj.find(query)\n        return result", "docstring": "Search a collection for the query provided. Just a raw interface to\nmongo to do any query you want.\n\nArgs:\ncollection: The db collection. See main class documentation.\nquery: A mongo find query.\nReturns:\npymongo Cursor object with the results.", "source": "juraj-google-style"}
{"code": "def process(self, batch, *args, **kwargs):\n        \n        if self.postprocessing is not None:\n            batch = self.postprocessing(batch)\n        return batch", "docstring": "Process a list of examples to create a batch.\n\nPostprocess the batch with user-provided Pipeline.\n\nArgs:\nbatch (list(object)): A list of object from a batch of examples.\nReturns:\nobject: Processed object given the input and custom\npostprocessing Pipeline.", "source": "juraj-google-style"}
{"code": "def Shell(device, *command):\n    if command:\n        return device.StreamingShell(' '.join(command))\n    else:\n        terminal_prompt = device.InteractiveShell()\n        print(terminal_prompt.decode('utf-8'))\n        while True:\n            cmd = input('> ')\n            if (not cmd):\n                continue\n            elif (cmd == 'exit'):\n                break\n            else:\n                stdout = device.InteractiveShell(cmd, strip_cmd=True, delim=terminal_prompt, strip_delim=True)\n                if stdout:\n                    if isinstance(stdout, bytes):\n                        stdout = stdout.decode('utf-8')\n                        print(stdout)\n        device.Close()", "docstring": "Runs a command on the device and prints the stdout.\n\nArgs:\ncommand: Command to run on the target.", "source": "codesearchnet"}
{"code": "def _summary_iterator(test_dir):\n    event_paths = sorted(glob.glob(os.path.join(test_dir, 'event*')))\n    return summary_iterator.summary_iterator(event_paths[-1])", "docstring": "Reads events from test_dir/events.\n\nArgs:\ntest_dir: Name of the test directory.\n\nReturns:\nA summary_iterator", "source": "github-repos"}
{"code": "def __init__(self, details, _class):\n\t\t\n\n\t\t\n\t\tif not isinstance(details, dict):\n\t\t\traise ValueError('details in ' + self.__class__.__name__ + '.' + sys._getframe().f_code.co_name + ' must be a dict')\n\n\t\t\n\t\tself.validation_failures = {}\n\n\t\t\n\t\tself._class = _class\n\n\t\t\n\t\tself._optional = False\n\n\t\t\n\t\tif '__optional__' in details:\n\n\t\t\t\n\t\t\tif isinstance(details['__optional__'], bool):\n\t\t\t\tself._optional = details['__optional__']\n\n\t\t\t\n\t\t\telse:\n\t\t\t\tsys.stderr.write('\"' + str(details['__optional__']) + '\" is not a valid value for __optional__, assuming false')\n\n\t\t\t\n\t\t\tdel details['__optional__']\n\n\t\t\n\t\tself._special = {}\n\n\t\t\n\t\tfor k in (tuple(details.keys())):\n\n\t\t\t\n\t\t\toMatch = _specialKey.match(k)\n\t\t\tif oMatch:\n\n\t\t\t\t\n\t\t\t\tself._special[oMatch.group(1)] = details[k]\n\t\t\t\tdel details[k]", "docstring": "Constructor\n\nInitialises the instance\n\nArguments:\ndetails {dict} -- Details describing the type of values allowed for\nthe node\n_class {str} -- The class of the child\n\nRaises:\nValueError\n\nReturns:\n_BaseNode", "source": "juraj-google-style"}
{"code": "def _ScheduleTask(self, task):\n    if self._processing_profiler:\n        self._processing_profiler.StartTiming('schedule_task')\n    try:\n        self._task_queue.PushItem(task, block=False)\n        is_scheduled = True\n    except errors.QueueFull:\n        is_scheduled = False\n    if self._processing_profiler:\n        self._processing_profiler.StopTiming('schedule_task')\n    return is_scheduled", "docstring": "Schedules a task.\n\nArgs:\ntask (Task): task.\n\nReturns:\nbool: True if the task was scheduled.", "source": "codesearchnet"}
{"code": "def read_into(self, buffer, viewport=None, components=3, *, attachment=0, alignment=1, dtype='f1', write_offset=0) -> None:\n    if (type(buffer) is Buffer):\n        buffer = buffer.mglo\n    return self.mglo.read_into(buffer, viewport, components, attachment, alignment, dtype, write_offset)", "docstring": "Read the content of the framebuffer into a buffer.\n\nArgs:\nbuffer (bytearray): The buffer that will receive the pixels.\nviewport (tuple): The viewport.\ncomponents (int): The number of components to read.\n\nKeyword Args:\nattachment (int): The color attachment.\nalignment (int): The byte alignment of the pixels.\ndtype (str): Data type.\nwrite_offset (int): The write offset.", "source": "codesearchnet"}
{"code": "def get_graph(self, run_key, device_name, debug=False):\n    \n    return self.get_graphs(run_key, debug=debug).get(device_name, None)", "docstring": "Get the runtime GraphDef proto associated with a run key and a device.\n\nArgs:\nrun_key: A Session.run kay.\ndevice_name: Name of the device in question.\ndebug: Whether the debugger-decoratedgraph is to be retrieved.\n\nReturns:\nA `GraphDef` proto.", "source": "juraj-google-style"}
{"code": "def check_plugin(self, plugin):\n        \n        \n        vcf_section = self[plugin]\n        \n        try:\n            vcf_field = vcf_section['field']\n            if not  vcf_field in self.vcf_columns:\n                raise ValidateError(\n                        \"field has to be in {0}\\n\"\n                        \"Wrong field name in plugin: {1}\".format(\n                        self.vcf_columns, plugin\n                    ))\n            if vcf_field == 'INFO':\n                try:\n                    info_key = vcf_section['info_key']\n\n                    if info_key == 'CSQ':\n                        try:\n                            csq_key = vcf_section['csq_key']\n                        except KeyError:\n                            raise ValidateError(\n                        \"CSQ entrys has to refer to an csq field.\\n\"\n                        \"Refer with keyword 'csq_key'\\n\"\n                        \"csq_key is missing in section: {0}\".format(\n                            plugin\n                            )\n                        )\n\n\n                except KeyError:\n                    raise ValidateError(\n                        \"INFO entrys has to refer to an INFO field.\\n\"\n                        \"Refer with keyword 'info_key'\\n\"\n                        \"info_key is missing in section: {0}\".format(\n                            plugin\n                            )\n                        )\n        except KeyError:\n            raise ValidateError(\n                \"Vcf entrys have to refer to a field in the VCF with keyword\"\n                \" 'field'.\\nMissing keyword 'field' in plugin: {0}\".format(\n                  plugin\n                ))\n\n        try:\n            data_type = vcf_section['data_type']\n            if not data_type in self.data_types:\n                raise ValidateError(\n                    \"data_type has to be in {0}\\n\"\n                    \"Wrong data_type in plugin: {1}\".format(\n                        self.data_types, plugin)\n                    )\n        except KeyError:\n            raise ValidateError(\n                \"Vcf entrys have to refer to a data type in the VCF with \"\n                \"keyword 'data_type'.\\n\"\n                \"Missing data_type in plugin: {0}\".format(plugin)\n                )\n\n        \n        separators = vcf_section.get('separators', None)\n        if separators:\n            if len(separators) == 1:\n                self[plugin]['separators'] = list(separators)\n        else:\n            if data_type != 'flag':\n                raise ValidateError(\n                    \"If data_type != flag the separators have to be defined\"\n                    \"Missing separators in plugin: {0}\".format(plugin)\n                    )\n                \n        \n        record_rule = vcf_section.get('record_rule', None)\n        \n        if record_rule:\n            if not record_rule in ['min', 'max']:\n                raise ValidateError(\n                    \"Record rules have to be in {0}\\n\"\n                    \"Wrong record_rule in plugin: {1}\".format(\n                        ['min', 'max'], plugin)\n                )\n        else:\n            self.logger.info(\"Setting record rule to default: 'max'\")\n                \n        return True", "docstring": "Check if the section is in the proper format vcf format.\n\nArgs:\nvcf_section (dict): The information from a vcf section\n\nReturns:\nTrue is it is in the proper format", "source": "juraj-google-style"}
{"code": "def _emit_tensor_snapshot(self, tensor: _TensorTracker, timestamp: int, pid: int, tid: int, value: step_stats_pb2.NodeOutput) -> None:\n    desc = str(value.tensor_description).replace('\"', '')\n    snapshot = {'tensor_description': desc}\n    self._chrome_trace.emit_obj_snapshot('Tensor', tensor.name, timestamp, pid, tid, tensor.object_id, snapshot)", "docstring": "Generate Chrome Trace snapshot event for a computed Tensor.\n\nArgs:\ntensor: A 'TensorTracker' object.\ntimestamp:  The timestamp of this snapshot as a long integer.\npid: The pid assigned for showing the device where this op ran.\ntid: The tid of the thread computing the tensor snapshot.\nvalue: A JSON-compliant snapshot of the object.", "source": "github-repos"}
{"code": "def sysprep(disk, distro, loader=None, backend='direct', **kwargs):\n    if (loader is None):\n        loader = PackageLoader('lago', 'templates')\n    sysprep_file = _render_template(distro, loader=loader, **kwargs)\n    cmd = ['virt-sysprep', '-a', disk]\n    cmd.extend(['--commands-from-file', sysprep_file])\n    env = os.environ.copy()\n    if ('LIBGUESTFS_BACKEND' not in env):\n        env['LIBGUESTFS_BACKEND'] = backend\n    ret = utils.run_command(cmd, env=env)\n    if ret:\n        raise RuntimeError(('Failed to bootstrap %s\\ncommand:%s\\nstdout:%s\\nstderr:%s' % (disk, ' '.join((('\"%s\"' % elem) for elem in cmd)), ret.out, ret.err)))", "docstring": "Run virt-sysprep on the ``disk``, commands are built from the distro\nspecific template and arguments passed in ``kwargs``. If no template is\navailable it will default to ``sysprep-base.j2``.\n\nArgs:\ndisk(str): path to disk\ndistro(str): distro to render template for\nloader(jinja2.BaseLoader): Jinja2 template loader, if not passed,\nwill search Lago's package.\nbackend(str): libguestfs backend to use\n**kwargs(dict): environment variables for Jinja2 template\n\nReturns:\nNone\n\nRaises:\nRuntimeError: On virt-sysprep none 0 exit code.", "source": "codesearchnet"}
{"code": "def save_output(results, output_directory=\"output\"):\n    \n\n    aggregate_reports = results[\"aggregate_reports\"]\n    forensic_reports = results[\"forensic_reports\"]\n\n    if os.path.exists(output_directory):\n        if not os.path.isdir(output_directory):\n            raise ValueError(\"{0} is not a directory\".format(output_directory))\n    else:\n        os.makedirs(output_directory)\n\n    with open(\"{0}\".format(os.path.join(output_directory, \"aggregate.json\")),\n              \"w\", newline=\"\\n\", encoding=\"utf-8\") as agg_json:\n        agg_json.write(json.dumps(aggregate_reports, ensure_ascii=False,\n                                  indent=2))\n\n    with open(\"{0}\".format(os.path.join(output_directory, \"aggregate.csv\")),\n              \"w\", newline=\"\\n\", encoding=\"utf-8\") as agg_csv:\n        csv = parsed_aggregate_reports_to_csv(aggregate_reports)\n        agg_csv.write(csv)\n\n    with open(\"{0}\".format(os.path.join(output_directory, \"forensic.json\")),\n              \"w\", newline=\"\\n\", encoding=\"utf-8\") as for_json:\n        for_json.write(json.dumps(forensic_reports, ensure_ascii=False,\n                                  indent=2))\n\n    with open(\"{0}\".format(os.path.join(output_directory, \"forensic.csv\")),\n              \"w\", newline=\"\\n\", encoding=\"utf-8\") as for_csv:\n        csv = parsed_forensic_reports_to_csv(forensic_reports)\n        for_csv.write(csv)\n\n    samples_directory = os.path.join(output_directory, \"samples\")\n    if not os.path.exists(samples_directory):\n        os.makedirs(samples_directory)\n\n    sample_filenames = []\n    for forensic_report in forensic_reports:\n        sample = forensic_report[\"sample\"]\n        message_count = 0\n        parsed_sample = forensic_report[\"parsed_sample\"]\n        subject = parsed_sample[\"filename_safe_subject\"]\n        filename = subject\n\n        while filename in sample_filenames:\n            message_count += 1\n            filename = \"{0} ({1})\".format(subject, message_count)\n\n        sample_filenames.append(filename)\n\n        filename = \"{0}.eml\".format(filename)\n        path = os.path.join(samples_directory, filename)\n        with open(path, \"w\", newline=\"\\n\", encoding=\"utf-8\") as sample_file:\n            sample_file.write(sample)", "docstring": "Save report data in the given directory\n\nArgs:\nresults (OrderedDict): Parsing results\noutput_directory: The patch to the directory to save in", "source": "juraj-google-style"}
{"code": "def _ReadEventDataIntoEvent(self, event):\n    \n    if self._storage_type != definitions.STORAGE_TYPE_SESSION:\n      return\n\n    event_data_identifier = event.GetEventDataIdentifier()\n    if event_data_identifier:\n      lookup_key = event_data_identifier.CopyToString()\n      event_data = self._event_data[lookup_key]\n\n      for attribute_name, attribute_value in event_data.GetAttributes():\n        setattr(event, attribute_name, attribute_value)", "docstring": "Reads the data into the event.\n\nThis function is intended to offer backwards compatible event behavior.\n\nArgs:\nevent (EventObject): event.", "source": "juraj-google-style"}
{"code": "def report(self, verbose=1):\n    lines = []\n    if (verbose >= 2):\n        lines.append(self._status_line(tense='past'))\n        if (verbose >= 3):\n            (unit, mag) = _choose_unit(self.total_time, self.unit, self._asciimode)\n            lines.append('    body took: {total:.{pr}{t}} {unit}'.format(total=(self.total_time / mag), t=self._precision_type, pr=self._precision, unit=unit))\n        lines.append('    time per loop: {}'.format(self._seconds_str()))\n    else:\n        line = ('Timed ' + self._seconds_str())\n        if self.label:\n            line += (' for ' + self.label)\n        lines.append(line)\n    text = '\\n'.join(lines)\n    return text", "docstring": "Creates a human readable report\n\nArgs:\nverbose (int): verbosity level. Either 1, 2, or 3.\n\nReturns:\nstr: the report\n\nSeeAlso:\ntimerit.Timerit.print\n\nExample:\n>>> import math\n>>> ti = Timerit(num=1).call(math.factorial, 5)\n>>> print(ti.report(verbose=1))\nTimed best=...s, mean=...s", "source": "codesearchnet"}
{"code": "def GetSortedEvents(self, time_range=None):\n    \n    filter_expression = None\n    if time_range:\n      filter_expression = []\n\n      if time_range.start_timestamp:\n        filter_expression.append(\n            '_timestamp >= {0:d}'.format(time_range.start_timestamp))\n\n      if time_range.end_timestamp:\n        filter_expression.append(\n            '_timestamp <= {0:d}'.format(time_range.end_timestamp))\n\n      filter_expression = ' AND '.join(filter_expression)\n\n    event_generator = self._GetAttributeContainers(\n        self._CONTAINER_TYPE_EVENT, filter_expression=filter_expression,\n        order_by='_timestamp')\n\n    for event in event_generator:\n      if hasattr(event, 'event_data_row_identifier'):\n        event_data_identifier = identifiers.SQLTableIdentifier(\n            'event_data', event.event_data_row_identifier)\n        event.SetEventDataIdentifier(event_data_identifier)\n\n        del event.event_data_row_identifier\n\n      yield event", "docstring": "Retrieves the events in increasing chronological order.\n\nArgs:\ntime_range (Optional[TimeRange]): time range used to filter events\nthat fall in a specific period.\n\nYield:\nEventObject: event.", "source": "juraj-google-style"}
{"code": "class RandomNormal(RandomInitializer):\n\n    def __init__(self, mean=0.0, stddev=0.05, seed=None):\n        self.mean = mean\n        self.stddev = stddev\n        super().__init__(seed=seed)\n\n    def __call__(self, shape, dtype=None):\n        return random.normal(shape=shape, mean=self.mean, stddev=self.stddev, seed=self.seed, dtype=dtype)\n\n    def get_config(self):\n        base_config = super().get_config()\n        config = {'mean': self.mean, 'stddev': self.stddev}\n        return {**base_config, **config}", "docstring": "Random normal initializer.\n\nDraws samples from a normal distribution for given parameters.\n\nExamples:\n\n>>> # Standalone usage:\n>>> initializer = RandomNormal(mean=0.0, stddev=1.0)\n>>> values = initializer(shape=(2, 2))\n\n>>> # Usage in a Keras layer:\n>>> initializer = RandomNormal(mean=0.0, stddev=1.0)\n>>> layer = Dense(3, kernel_initializer=initializer)\n\nArgs:\nmean: A python scalar or a scalar keras tensor. Mean of the random\nvalues to generate.\nstddev: A python scalar or a scalar keras tensor. Standard deviation of\nthe random values to generate.\nseed: A Python integer or instance of\n`keras.backend.SeedGenerator`.\nUsed to make the behavior of the initializer\ndeterministic. Note that an initializer seeded with an integer\nor `None` (unseeded) will produce the same random values\nacross multiple calls. To get different random values\nacross multiple calls, use as seed an instance\nof `keras.backend.SeedGenerator`.", "source": "github-repos"}
{"code": "def __init__(self, data_type, unit=None,\n                 analysis_period=None, metadata=None):\n        \n        assert hasattr(data_type, 'isDataType'), \\\n            'data_type must be a Ladybug DataType. Got {}'.format(type(data_type))\n        if unit is None:\n            unit = data_type.units[0]\n        else:\n            data_type.is_unit_acceptable(unit)\n        if analysis_period is not None:\n            assert hasattr(analysis_period, 'isAnalysisPeriod'), \\\n                'analysis_period must be a Ladybug AnalysisPeriod. Got {}'.format(\n                    type(analysis_period))\n        if metadata is not None:\n            assert isinstance(metadata, dict), \\\n                'metadata must be a dictionary. Got {}'.format(type(metadata))\n\n        self._data_type = data_type\n        self._unit = unit\n        self._analysis_period = analysis_period\n        self._metadata = metadata or {}", "docstring": "Initiate Ladybug header for lists.\n\nArgs:\ndata_type: A DataType object. (e.g. Temperature)\nunit: data_type unit (Default: None)\nanalysis_period: A Ladybug analysis period (Defualt: None)\nmetadata: Optional dictionary of additional metadata,\ncontaining information such as 'source', 'city', or 'zone'.", "source": "juraj-google-style"}
{"code": "def JoinPath(self, path_segments):\n    \n    \n    \n    first_path_segment = None\n\n    if path_segments and platform.system() == 'Windows':\n      \n      first_path_segment = path_segments[0]\n      first_path_segment_length = len(first_path_segment)\n      first_path_segment_prefix = None\n\n      \n      if (first_path_segment_length >= 7 and\n          first_path_segment.startswith('\\\\\\\\.\\\\') and\n          first_path_segment[5:7] == ':\\\\'):\n        first_path_segment_prefix = first_path_segment[4:6]\n        first_path_segment = first_path_segment[7:]\n\n      \n      elif (first_path_segment_length >= 4 and\n            first_path_segment[:4] in ['\\\\\\\\.\\\\', '\\\\\\\\?\\\\']):\n        first_path_segment_prefix = first_path_segment[:4]\n        first_path_segment = first_path_segment[4:]\n\n      \n      elif first_path_segment_length >= 2 and first_path_segment[1] == ':':\n        first_path_segment_prefix = first_path_segment[:2]\n        first_path_segment = first_path_segment[2:]\n\n      \n      elif first_path_segment.startswith('\\\\\\\\'):\n        prefix, _, remainder = first_path_segment[2:].partition(\n            self.PATH_SEPARATOR)\n\n        first_path_segment_prefix = '\\\\\\\\{0:s}'.format(prefix)\n        first_path_segment = '\\\\{0:s}'.format(remainder)\n\n      if first_path_segment_prefix:\n        first_path_segment, _, remainder = first_path_segment.partition(\n            self.PATH_SEPARATOR)\n\n        if not remainder:\n          _ = path_segments.pop(0)\n        else:\n          path_segments[0] = remainder\n\n        first_path_segment = ''.join([\n            first_path_segment_prefix, first_path_segment])\n\n      else:\n        first_path_segment = None\n\n    \n    \n\n    \n    path_segments = [\n        segment.split(self.PATH_SEPARATOR) for segment in path_segments]\n\n    \n    path_segments = [\n        element for sublist in path_segments for element in sublist]\n\n    \n    path_segments = list(filter(None, path_segments))\n\n    if first_path_segment is None:\n      path = '{0:s}{1:s}'.format(\n          self.PATH_SEPARATOR, self.PATH_SEPARATOR.join(path_segments))\n    else:\n      path = first_path_segment\n      if path_segments:\n        path = '{0:s}{1:s}{2:s}'.format(\n            path, self.PATH_SEPARATOR, self.PATH_SEPARATOR.join(path_segments))\n\n    return path", "docstring": "Joins the path segments into a path.\n\nArgs:\npath_segments (list[str]): path segments.\n\nReturns:\nstr: joined path segments prefixed with the path separator.", "source": "juraj-google-style"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_stream = utils.BytearrayStream()\n    if self._unique_identifier:\n        self._unique_identifier.write(local_stream, kmip_version=kmip_version)\n    self.length = local_stream.length()\n    super(ArchiveResponsePayload, self).write(output_stream, kmip_version=kmip_version)\n    output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the Archive response payload to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the data attribute is not defined.", "source": "codesearchnet"}
{"code": "def from_config(cls, config):\n    config.pop('dtype', None)\n    return cls(**config)", "docstring": "Instantiates an initializer from a configuration dictionary.\n\nExample:\n\n```python\ninitializer = RandomUniform(-1, 1)\nconfig = initializer.get_config()\ninitializer = RandomUniform.from_config(config)\n```\n\nArgs:\nconfig: A Python dictionary.\nIt will typically be the output of `get_config`.\n\nReturns:\nAn Initializer instance.", "source": "github-repos"}
{"code": "def UploadFilePath(self, filepath, offset=0, amount=None):\n    \n    return self._UploadChunkStream(\n        self._streamer.StreamFilePath(filepath, offset=offset, amount=amount))", "docstring": "Uploads chunks of a file on a given path to the transfer store flow.\n\nArgs:\nfilepath: A path to the file to upload.\noffset: An integer offset at which the file upload should start on.\namount: An upper bound on number of bytes to stream. If it is `None` then\nthe whole file is uploaded.\n\nReturns:\nA `BlobImageDescriptor` object.", "source": "juraj-google-style"}
{"code": "def delete_folder(self, folder):\n    if (not is_valid_uuid(folder)):\n        raise StorageArgumentException('Invalid UUID for folder: {0}'.format(folder))\n    self._authenticated_request.to_endpoint('folder/{}/'.format(folder)).delete()", "docstring": "Delete a folder. It will recursively delete all the content.\n\nArgs:\nfolder_id (str): The UUID of the folder to be deleted.\n\nReturns:\nNone\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: 403\nStorageNotFoundException: 404\nHTTPError: other non-20x error codes", "source": "codesearchnet"}
{"code": "def Deserialize(self, reader: BinaryReader):\n        \n\n        self.Type = StateType(reader.ReadByte())\n\n        self.Key = reader.ReadVarBytes(max=100)\n        self.Field = reader.ReadVarString(max=32).decode('utf-8')\n        self.Value = reader.ReadVarBytes(max=65535)\n\n        if self.Type == StateType.Account:\n            self.CheckAccountState()\n        elif self.Type == StateType.Validator:\n            self.CheckValidatorState()", "docstring": "Deserialize full object.\n\nArgs:\nreader (neocore.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):\n    mask = input_ids.ne(padding_idx).int()\n    incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask\n    return incremental_indices.long() + padding_idx", "docstring": "Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols\nare ignored. This is modified from fairseq's *utils.make_positions*.\n\nArgs:\ninput_ids (`torch.LongTensor`):\nIndices of input sequence tokens in the vocabulary.\n\nReturns: torch.Tensor", "source": "github-repos"}
{"code": "async def fetch(self, method, url, params=None, headers=None, data=None):\n    logger.debug('Sending request %s %s:\\n%r', method, url, data)\n    for retry_num in range(MAX_RETRIES):\n        try:\n            async with self.fetch_raw(method, url, params=params, headers=headers, data=data) as res:\n                async with async_timeout.timeout(REQUEST_TIMEOUT):\n                    body = (await res.read())\n            logger.debug('Received response %d %s:\\n%r', res.status, res.reason, body)\n        except asyncio.TimeoutError:\n            error_msg = 'Request timed out'\n        except aiohttp.ServerDisconnectedError as err:\n            error_msg = 'Server disconnected error: {}'.format(err)\n        except (aiohttp.ClientError, ValueError) as err:\n            error_msg = 'Request connection error: {}'.format(err)\n        else:\n            break\n        logger.info('Request attempt %d failed: %s', retry_num, error_msg)\n    else:\n        logger.info('Request failed after %d attempts', MAX_RETRIES)\n        raise exceptions.NetworkError(error_msg)\n    if (res.status != 200):\n        logger.info('Request returned unexpected status: %d %s', res.status, res.reason)\n        raise exceptions.NetworkError('Request return unexpected status: {}: {}'.format(res.status, res.reason))\n    return FetchResponse(res.status, body)", "docstring": "Make an HTTP request.\n\nAutomatically uses configured HTTP proxy, and adds Google authorization\nheader and cookies.\n\nFailures will be retried MAX_RETRIES times before raising NetworkError.\n\nArgs:\nmethod (str): Request method.\nurl (str): Request URL.\nparams (dict): (optional) Request query string parameters.\nheaders (dict): (optional) Request headers.\ndata: (str): (optional) Request body data.\n\nReturns:\nFetchResponse: Response data.\n\nRaises:\nNetworkError: If the request fails.", "source": "codesearchnet"}
{"code": "def can_create(self):\n    if (self.data.get('key_name') and self.data.get('value_name') and self.data.get('value_type')):\n        return True\n    return False", "docstring": "If the key_name, value_name, and value_type has been provided returns that the\nRegistry Key can be created, otherwise returns that the Registry Key cannot be created.\n\nReturns:", "source": "codesearchnet"}
{"code": "def strace(device, trace_address, breakpoint_address):\n    jlink = pylink.JLink()\n    jlink.open()\n    jlink.power_on()\n    jlink.set_tif(pylink.JLinkInterfaces.SWD)\n    jlink.connect(device)\n    jlink.reset()\n    jlink.breakpoint_clear_all()\n    op = pylink.JLinkStraceOperation.TRACE_START\n    jlink.strace_clear_all()\n    jlink.strace_start()\n    bphandle = jlink.breakpoint_set(breakpoint_address, thumb=True)\n    trhandle = jlink.strace_code_fetch_event(op, address=trace_address)\n    jlink.restart()\n    time.sleep(1)\n    while True:\n        if jlink.halted():\n            break\n    while True:\n        instructions = jlink.strace_read(1)\n        if (len(instructions) == 0):\n            break\n        instruction = instructions[0]\n        print(jlink.disassemble_instruction(instruction))\n    jlink.power_off()\n    jlink.close()", "docstring": "Implements simple trace using the STrace API.\n\nArgs:\ndevice (str): the device to connect to\ntrace_address (int): address to begin tracing from\nbreakpoint_address (int): address to breakpoint at\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def resolve_class(classref):\n    if (classref is None):\n        return None\n    elif isinstance(classref, six.class_types):\n        return classref\n    elif isinstance(classref, six.string_types):\n        return import_class(classref)\n    else:\n        raise ValueError((\"Unable to resolve class for '%s'\" % classref))", "docstring": "Attempt to return a Python class for the input class reference.\n\nIf `classref` is a class or None, return it. If `classref` is a\npython classpath (e.g., \"foo.bar.MyClass\") import the class and return\nit.\n\nArgs:\nclassref: A fully-qualified Python path to class, or a Python class.\n\nReturns:\nA class.", "source": "codesearchnet"}
{"code": "def load_test_config_file(test_config_path, tb_filters=None):\n    configs = _load_config_file(test_config_path)\n    if tb_filters:\n        tbs = []\n        for tb in configs[keys.Config.key_testbed.value]:\n            if tb[keys.Config.key_testbed_name.value] in tb_filters:\n                tbs.append(tb)\n        if len(tbs) != len(tb_filters):\n            raise MoblyConfigError('Expect to find %d test bed configs, found %d. Check if you have the correct test bed names.' % (len(tb_filters), len(tbs)))\n        configs[keys.Config.key_testbed.value] = tbs\n    mobly_params = configs.get(keys.Config.key_mobly_params.value, {})\n    log_path = mobly_params.get(keys.Config.key_log_path.value, _DEFAULT_LOG_PATH)\n    if ENV_MOBLY_LOGPATH in os.environ:\n        log_path = os.environ[ENV_MOBLY_LOGPATH]\n    log_path = utils.abs_path(log_path)\n    _validate_test_config(configs)\n    _validate_testbed_configs(configs[keys.Config.key_testbed.value])\n    test_configs = []\n    for original_bed_config in configs[keys.Config.key_testbed.value]:\n        test_run_config = TestRunConfig()\n        test_run_config.testbed_name = original_bed_config[keys.Config.key_testbed_name.value]\n        test_run_config.test_bed_name = test_run_config.testbed_name\n        test_run_config.log_path = log_path\n        test_run_config.controller_configs = original_bed_config.get(keys.Config.key_testbed_controllers.value, {})\n        test_run_config.user_params = original_bed_config.get(keys.Config.key_testbed_test_params.value, {})\n        test_configs.append(test_run_config)\n    return test_configs", "docstring": "Processes the test configuration file provied by user.\n\nLoads the configuration file into a dict, unpacks each testbed\nconfig into its own dict, and validate the configuration in the\nprocess.\n\nArgs:\ntest_config_path: Path to the test configuration file.\ntb_filters: A subset of test bed names to be pulled from the config\nfile. If None, then all test beds will be selected.\n\nReturns:\nA list of test configuration dicts to be passed to\ntest_runner.TestRunner.", "source": "github-repos"}
{"code": "def add_figure(self, key, url, **kwargs):\n        \n        figure = self._check_metadata_for_file(key=key, url=url, **kwargs)\n\n        for dict_key in (\n            'caption',\n            'label',\n            'material',\n            'filename',\n            'url',\n            'original_url',\n        ):\n            if kwargs.get(dict_key) is not None:\n                figure[dict_key] = kwargs[dict_key]\n\n        if key_already_there(figure, self.record.get('figures', ())):\n            raise ValueError(\n                'There\\'s already a figure with the key %s.'\n                % figure['key']\n            )\n\n        self._append_to('figures', figure)\n        self.add_document", "docstring": "Add a figure.\n\nArgs:\nkey (string): document key\nurl (string): document url\nKeyword Args:\ncaption (string): simple description\nlabel (string):\nmaterial (string):\noriginal_url (string): original url\nfilename (string): current url\n\nReturns: None", "source": "juraj-google-style"}
{"code": "def get_permissions(self):\n    user_role = (self.last_login_role() if self.last_login_role_key else self.role_set[0].role)\n    return user_role.get_permissions()", "docstring": "Permissions of the user.\n\nReturns:\nList of Permission objects.", "source": "codesearchnet"}
{"code": "def _prepare_init_params_from_job_description(cls, job_details):\n        \n        init_params = dict()\n\n        init_params['model_name'] = job_details['ModelName']\n        init_params['instance_count'] = job_details['TransformResources']['InstanceCount']\n        init_params['instance_type'] = job_details['TransformResources']['InstanceType']\n        init_params['volume_kms_key'] = job_details['TransformResources'].get('VolumeKmsKeyId')\n        init_params['strategy'] = job_details.get('BatchStrategy')\n        init_params['assemble_with'] = job_details['TransformOutput'].get('AssembleWith')\n        init_params['output_path'] = job_details['TransformOutput']['S3OutputPath']\n        init_params['output_kms_key'] = job_details['TransformOutput'].get('KmsKeyId')\n        init_params['accept'] = job_details['TransformOutput'].get('Accept')\n        init_params['max_concurrent_transforms'] = job_details.get('MaxConcurrentTransforms')\n        init_params['max_payload'] = job_details.get('MaxPayloadInMB')\n        init_params['base_transform_job_name'] = job_details['TransformJobName']\n\n        return init_params", "docstring": "Convert the transform job description to init params that can be handled by the class constructor\n\nArgs:\njob_details (dict): the returned job details from a describe_transform_job API call.\n\nReturns:\ndict: The transformed init_params", "source": "juraj-google-style"}
{"code": "def run(main, argv=None, flags_parser=parse_flags_with_usage):\n    try:\n        args = _run_init((sys.argv if (argv is None) else argv), flags_parser)\n        while _init_callbacks:\n            callback = _init_callbacks.popleft()\n            callback()\n        try:\n            _run_main(main, args)\n        except UsageError as error:\n            usage(shorthelp=True, detailed_error=error, exitcode=error.exitcode)\n        except:\n            if FLAGS.pdb_post_mortem:\n                traceback.print_exc()\n                pdb.post_mortem()\n            raise\n    except Exception as e:\n        _call_exception_handlers(e)\n        raise", "docstring": "Begins executing the program.\n\nArgs:\nmain: The main function to execute. It takes an single argument \"argv\",\nwhich is a list of command line arguments with parsed flags removed.\nIf it returns an integer, it is used as the process's exit code.\nargv: A non-empty list of the command line arguments including program name,\nsys.argv is used if None.\nflags_parser: Callable[[List[Text]], Any], the function used to parse flags.\nThe return value of this function is passed to `main` untouched.\nIt must guarantee FLAGS is parsed after this function is called.\n- Parses command line flags with the flag module.\n- If there are any errors, prints usage().\n- Calls main() with the remaining arguments.\n- If main() raises a UsageError, prints usage and the error message.", "source": "codesearchnet"}
{"code": "def convert_variable_to_constant(self, incoming_edge, tensor_data):\n    raise NotImplementedError", "docstring": "Converts a variable in this Convertible and its dependencies.\n\nThis method should make sure that a converted copy of itself is present in\nthe converted graph, and that all Convertibles depending on this one also go\nthrough the same process.\n\nArgs:\nincoming_edge: The graph edge into this Convertible that is being\nconverted to a constant.\ntensor_data: The tensor representing the constant.", "source": "github-repos"}
{"code": "def _get_original_composition_ratio(self, reaction):\n        \n        if self.c1_original == self.c2_original:\n            return 1\n        c1_coeff = reaction.get_coeff(self.c1_original) \\\n            if self.c1_original in reaction.reactants else 0\n        c2_coeff = reaction.get_coeff(self.c2_original) \\\n            if self.c2_original in reaction.reactants else 0\n        return c1_coeff * 1.0 / (c1_coeff + c2_coeff)", "docstring": "Returns the molar mixing ratio between the reactants with ORIGINAL (\ninstead of processed) compositions for a reaction.\n\nArgs:\nreaction (Reaction): Reaction object that contains the original\nreactant compositions.\n\nReturns:\nThe molar mixing ratio between the original reactant\ncompositions for a reaction.", "source": "juraj-google-style"}
{"code": "def getEstTraitCovar(self,term_i=None):\n        \n        assert self.P>1, 'Trait covars not defined for single trait analysis'\n        \n        if term_i==None:\n            RV=SP.zeros((self.P,self.P))\n            for term_i in range(self.n_terms): RV+=self.vd.getTerm(term_i).getTraitCovar().K()\n        else:\n            assert term_i<self.n_terms, 'Term index non valid'\n            RV = self.vd.getTerm(term_i).getTraitCovar().K()\n        return RV", "docstring": "Returns explicitly the estimated trait covariance matrix\n\nArgs:\nterm_i:     index of the term we are interested in", "source": "juraj-google-style"}
{"code": "def _gather_all_deps(self, args, kwargs):\n    depends = []\n    count = 0\n    for dep in args:\n        if isinstance(dep, Future):\n            if (self.tasks[dep.tid]['status'] not in FINAL_STATES):\n                count += 1\n            depends.extend([dep])\n    for key in kwargs:\n        dep = kwargs[key]\n        if isinstance(dep, Future):\n            if (self.tasks[dep.tid]['status'] not in FINAL_STATES):\n                count += 1\n            depends.extend([dep])\n    for dep in kwargs.get('inputs', []):\n        if isinstance(dep, Future):\n            if (self.tasks[dep.tid]['status'] not in FINAL_STATES):\n                count += 1\n            depends.extend([dep])\n    return (count, depends)", "docstring": "Count the number of unresolved futures on which a task depends.\n\nArgs:\n- args (List[args]) : The list of args list to the fn\n- kwargs (Dict{kwargs}) : The dict of all kwargs passed to the fn\n\nReturns:\n- count, [list of dependencies]", "source": "codesearchnet"}
{"code": "def write_csv_from_dict(filename, input_dict):\n    f = open(PATH_TO_DIR + '/data/' + filename, 'w')\n    for k, v in input_dict.items():\n        line = k\n        for item in v:\n            line += ',' + item\n        f.write(line + '\\n')\n    f.flush()\n    print('Wrote to file %s' % filename)\n    check_with_golden(filename)", "docstring": "Writes out a `.csv` file from an input dictionary.\n\nAfter writing out the file, it checks the new list against the golden\nto make sure golden file is up-to-date.\n\nArgs:\nfilename: String that is the output file name.\ninput_dict: Dictionary that is to be written out to a `.csv` file.", "source": "github-repos"}
{"code": "def Approve(self, request, global_params=None):\n    config = self.GetMethodConfig('Approve')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Approves or rejects a pending build. If approved, the returned LRO will be analogous to the LRO returned from a CreateBuild call. If rejected, the returned LRO will be immediately done.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsBuildsApproveRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def forward(self, inputs, expert_size):\n    input_list = inputs.split(expert_size, dim=0)\n    output_list = []\n    for i in range(self.num_experts):\n        output_list.append(F.linear(input_list[i], self.weight[i]))\n    results = torch.cat(output_list, dim=0)\n    return results", "docstring": "Forward pass of the JetMoeParallelExperts module.\n\nArgs:\ninputs (Tensor):\nInput tensor.\nexpert_size:\nExpert size information.\n\nReturns:\nTensor: Output tensor.", "source": "github-repos"}
{"code": "def SMGetJobDictionaries(self, domain='kSMDomainSystemLaunchd'):\n    \n    cfstring_launchd = ctypes.c_void_p.in_dll(self.dll, domain)\n    return CFArray(self.dll.SMCopyAllJobDictionaries(cfstring_launchd))", "docstring": "Copy all Job Dictionaries from the ServiceManagement.\n\nArgs:\ndomain: The name of a constant in Foundation referencing the domain.\nWill copy all launchd services by default.\n\nReturns:\nA marshalled python list of dicts containing the job dictionaries.", "source": "juraj-google-style"}
{"code": "def decode_list(self, ids):\n    decoded_ids = []\n    for id_ in ids:\n        if (0 <= id_ < self._num_reserved_ids):\n            decoded_ids.append(RESERVED_TOKENS[int(id_)])\n        else:\n            decoded_ids.append((id_ - self._num_reserved_ids))\n    return [str(d) for d in decoded_ids]", "docstring": "Transform a sequence of int ids into a their string versions.\n\nThis method supports transforming individual input/output ids to their\nstring versions so that sequence to/from text conversions can be visualized\nin a human readable format.\n\nArgs:\nids: list of integers to be converted.\n\nReturns:\nstrs: list of human-readable string.", "source": "codesearchnet"}
{"code": "def has_course_mode(self, course_run_id, mode):\n        \n        course_modes = self.get_course_modes(course_run_id)\n        return any(course_mode for course_mode in course_modes if course_mode['slug'] == mode)", "docstring": "Query the Enrollment API to see whether a course run has a given course mode available.\n\nArguments:\ncourse_run_id (str): The string value of the course run's unique identifier\n\nReturns:\nbool: Whether the course run has the given mode avaialble for enrollment.", "source": "juraj-google-style"}
{"code": "def __call__(self, old, new):\n        \n        if not new or not self.precondition():\n            return\n        self.validate(old, new)", "docstring": "Validate the `new` translation against the `old` one.\n\nNo checks are needed for deleted translations\n\nArgs:\nold: The old translation.\nnew: The new translation.\nRaises:\nA ValidationError with an appropriate message.", "source": "juraj-google-style"}
{"code": "def merge_tree(dest: Any, src: Any, merge_fn: Optional[Callable[[KeyPath, Any, Any], Any]]=None, root_path: Optional[KeyPath]=None) -> Any:\n    if not root_path:\n        root_path = KeyPath()\n    if isinstance(dest, dict) and isinstance(src, dict):\n        return _merge_dict_into_dict(dest, src, merge_fn, root_path)\n    if isinstance(dest, list) and isinstance(src, dict):\n        return _merge_dict_into_list(dest, src, root_path)\n    if merge_fn:\n        return merge_fn(root_path, dest, src)\n    return src", "docstring": "Deep merge two (maybe) hierarchical values.\n\nArgs:\ndest: Destination value.\nsrc: Source value. When source value is a dict, it's considered as a\npatch (delta) to the destination when destination is a dict or list.\nFor other source types, it's considered as a new value that will replace\ndest completely.\nmerge_fn: A function to handle value merge that will be called for updated\nor added keys. If a branch is added/updated, the root of branch will be\npassed to merge_fn.\nthe signature of function is: (path, left_value, right_value) ->\nfinal_value\nIf a key is only present in src dict, old_value is MISSING_VALUE.\nIf a key is only present in dest dict, new_value is MISSING_VALUE.\nOtherwise both new_value and old_value are filled.\n\nIf final value is MISSING_VALUE, it will be removed from its parent\ncollection.\nroot_path: KeyPath of dest.\n\nReturns:\nMerged value.\n\nRaises:\nKeyError: Dict keys are not integers when merging into a list.", "source": "github-repos"}
{"code": "def tabledata_list(self, table_name, start_index=None, max_results=None, page_token=None):\n    url = (Api._ENDPOINT + (Api._TABLEDATA_PATH % table_name))\n    args = {}\n    if start_index:\n        args['startIndex'] = start_index\n    if max_results:\n        args['maxResults'] = max_results\n    if (page_token is not None):\n        args['pageToken'] = page_token\n    return datalab.utils.Http.request(url, args=args, credentials=self._credentials)", "docstring": "Retrieves the contents of a table.\n\nArgs:\ntable_name: the name of the table as a tuple of components.\nstart_index: the index of the row at which to start retrieval.\nmax_results: an optional maximum number of rows to retrieve.\npage_token: an optional token to continue the retrieval.\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "codesearchnet"}
{"code": "def _ParseLogLine(self, parser_mediator, structure, key):\n    \n    time_elements_tuple = self._GetTimeElementsTuple(structure)\n\n    try:\n      date_time = dfdatetime_time_elements.TimeElements(\n          time_elements_tuple=time_elements_tuple)\n    except ValueError:\n      parser_mediator.ProduceExtractionWarning(\n          'invalid date time value: {0!s}'.format(structure.date_time))\n      return\n\n    self._last_month = time_elements_tuple[1]\n\n    \n    \n    if key == 'logline':\n      self._previous_structure = structure\n    else:\n      structure = self._previous_structure\n\n    event_data = MacAppFirewallLogEventData()\n    event_data.action = structure.action\n    event_data.agent = structure.agent\n    event_data.computer_name = structure.computer_name\n    \n    \n    event_data.process_name = structure.process_name.strip()\n    event_data.status = structure.status\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_ADDED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parse a single log line and produce an event object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nkey (str): identifier of the structure of tokens.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.", "source": "juraj-google-style"}
{"code": "def custom_licenses(self):\n    buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n    result = self._dll.JLINK_EMU_GetLicenses(buf, self.MAX_BUF_SIZE)\n    if (result < 0):\n        raise errors.JLinkException(result)\n    return ctypes.string_at(buf).decode()", "docstring": "Returns a string of the installed licenses the J-Link has.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nString of the contents of the custom licenses the J-Link has.", "source": "codesearchnet"}
{"code": "def cast_to_floatx(x):\n    if isinstance(x, (tensor_lib.Tensor, variables_module.Variable, sparse_tensor.SparseTensor)):\n        return math_ops.cast(x, dtype=floatx())\n    return numpy_compat.np_asarray(x, dtype=floatx())", "docstring": "Cast a Numpy array to the default Keras float type.\n\nArgs:\nx: Numpy array or TensorFlow tensor.\n\nReturns:\nThe same array (Numpy array if `x` was a Numpy array, or TensorFlow tensor\nif `x` was a tensor), cast to its new type.\n\nExample:\n\n>>> tf.keras.backend.floatx()\n'float32'\n>>> arr = np.array([1.0, 2.0], dtype='float64')\n>>> arr.dtype\ndtype('float64')\n>>> new_arr = cast_to_floatx(arr)\n>>> new_arr\narray([1.,  2.], dtype=float32)\n>>> new_arr.dtype\ndtype('float32')", "source": "github-repos"}
{"code": "def _ResolveVars(value):\n    if isinstance(value, dict):\n        resolved_value = {}\n        for k, v in value.items():\n            resolved_value[k] = MessageValue._ResolveVars(v)\n        return resolved_value\n    if isinstance(value, list):\n        return [MessageValue._ResolveVars(v) for v in value]\n    if isinstance(value, stl.base.QualifierValue.Resolved):\n        return value.Generate()\n    if isinstance(value, stl.base.LocalVar):\n        if value.value is None:\n            raise ValueError(\"LocalVar '%s' does not have a value.\" % value.name)\n        return value.value\n    if isinstance(value, stl.base.Func):\n        return value.Run()\n    if isinstance(value, MessageValue):\n        return value._EncodeToString()\n    return value", "docstring": "Resolve any variables or run any functions in |value|.\n\nArgs:\nvalue: Value which may have variables or functions to resolve.\nReturns:\nResolved value.\nRaises:\nValueError: If a concrete value for |value| cannot be determined.", "source": "github-repos"}
{"code": "def from_file(cls, fp, format_=None, fps=None, **kwargs):\n    if (format_ is None):\n        text = fp.read()\n        fragment = text[:10000]\n        format_ = autodetect_format(fragment)\n        fp = io.StringIO(text)\n    impl = get_format_class(format_)\n    subs = cls()\n    subs.format = format_\n    subs.fps = fps\n    impl.from_file(subs, fp, format_, fps=fps, **kwargs)\n    return subs", "docstring": "Read subtitle file from file object.\n\nSee :meth:`SSAFile.load()` for full description.\n\nNote:\nThis is a low-level method. Usually, one of :meth:`SSAFile.load()`\nor :meth:`SSAFile.from_string()` is preferable.\n\nArguments:\nfp (file object): A file object, ie. :class:`io.TextIOBase` instance.\nNote that the file must be opened in text mode (as opposed to binary).\n\nReturns:\nSSAFile", "source": "codesearchnet"}
{"code": "def inverseHistogram(hist, bin_range):\n    data = (hist.astype(float) / np.min(hist[np.nonzero(hist)]))\n    new_data = np.empty(shape=np.sum(data, dtype=int))\n    i = 0\n    xvals = np.linspace(bin_range[0], bin_range[1], len(data))\n    for (d, x) in zip(data, xvals):\n        new_data[i:(i + d)] = x\n        i += int(d)\n    return new_data", "docstring": "sample data from given histogram and min, max values within range\n\nReturns:\nnp.array: data that would create the same histogram as given", "source": "codesearchnet"}
{"code": "def get_program_type_by_slug(self, slug):\n    return self._load_data(self.PROGRAM_TYPES_ENDPOINT, resource_id=slug, default=None)", "docstring": "Get a program type by its slug.\n\nArguments:\nslug (str): The slug to identify the program type.\n\nReturns:\ndict: A program type object.", "source": "codesearchnet"}
{"code": "def ParseReceiverData(\n      self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    data = {}\n    key_url = self._GetRowValue(query_hash, row, 'request_key')\n\n    data_dict = {}\n    description = 'MacKeeper Entry'\n    \n    \n    if key_url.endswith('plist'):\n      description = 'Configuration Definition'\n      data['text'] = 'Plist content added to cache.'\n\n    elif key_url.startswith('http:\n      description = 'MacKeeper Event'\n      try:\n        _, _, part = key_url.partition('?')\n        data['text'] = part.replace('&', ' ')\n      except UnicodeDecodeError:\n        data['text'] = 'N/A'\n\n    elif key_url.startswith('http:\n      description = 'Account Activity'\n      _, _, activity = key_url.partition('\n      if activity:\n        data['text'] = 'Action started: {0:s}'.format(activity)\n      else:\n        data['text'] = 'Unknown activity.'\n\n    elif key_url.startswith('http:\n      description = 'Chat '\n      try:\n        jquery = self._GetRowValue(query_hash, row, 'data')\n        jquery = codecs.decode(jquery, 'utf-8')\n      except UnicodeDecodeError:\n        jquery = ''\n\n      data_dict = self._ExtractJQuery(jquery)\n      data = self._ParseChatData(data_dict)\n\n      data['entry_type'] = data_dict.get('type', '')\n      if data['entry_type'] == 'comment':\n        description += 'Comment'\n      elif data['entry_type'] == 'outgoing':\n        description += 'Outgoing Message'\n      elif data['entry_type'] == 'incoming':\n        description += 'Incoming Message'\n      else:\n        \n        description += 'Entry'\n        data['text'] = ';'.join(self._DictToListOfStrings(data_dict))\n        if not data['text']:\n          data['text'] = 'No additional data.'\n\n    event_data = MacKeeperCacheEventData()\n    event_data.description = description\n    event_data.event_type = data.get('event_type', None)\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.record_id = data.get('id', None)\n    event_data.room = data.get('room', None)\n    event_data.text = data.get('text', None)\n    event_data.url = key_url\n    event_data.user_name = data.get('user', None)\n    event_data.user_sid = data.get('sid', None)\n\n    time_value = self._GetRowValue(query_hash, row, 'time_string')\n    if isinstance(time_value, py2to3.INTEGER_TYPES):\n      date_time = dfdatetime_java_time.JavaTime(timestamp=time_value)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_ADDED)\n\n    else:\n      try:\n        timestamp = timelib.Timestamp.FromTimeString(time_value)\n      except errors.TimestampError:\n        parser_mediator.ProduceExtractionWarning(\n            'Unable to parse time string: {0:s}'.format(time_value))\n        return\n\n      event = time_events.TimestampEvent(\n          timestamp, definitions.TIME_DESCRIPTION_ADDED)\n\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a single row from the receiver and cache response table.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def _build_recursive_hd_gather(input_tensors, devices, red_op):\n    num_devices = len(devices)\n    num_hops = int(math.log(num_devices, 2))\n    if num_devices != 2 ** num_hops:\n        raise ValueError('num_devices must be a power of 2')\n    chunks = input_tensors\n    for h in range(0, num_hops):\n        span = 2 ** h\n        group_size = span * 2\n        new_chunks = [[] for _ in devices]\n        for d in range(0, num_devices):\n            if d % group_size >= group_size / 2:\n                continue\n            left_dev = devices[d]\n            right_dev = devices[d + span]\n            left_split = array_ops.split(chunks[d], 2)\n            right_split = array_ops.split(chunks[d + span], 2)\n            with ops.device(left_dev):\n                new_chunks[d] = red_op(left_split[0], right_split[0])\n            with ops.device(right_dev):\n                new_chunks[d + span] = red_op(left_split[1], right_split[1])\n        chunks = new_chunks\n    return chunks", "docstring": "Construct the gather phase of recursive halving-doubling all-reduce.\n\nArgs:\ninput_tensors: list of `tf.Tensor` to be elementwise reduced.\ndevices: a list of strings naming the devices hosting input_tensors,\nwhich will also be used to host the (partial) reduction values.\nred_op: a binary elementwise reduction Op.\n\nReturns:\nlist of `tf.Tensor` which are the fully reduced tensor shards.\n\nRaises:\nValueError: num_devices not a power of 2, or tensor len not divisible\nby 2 the proper number of times.", "source": "github-repos"}
{"code": "def sheets_tab_create(config, auth, sheet_url_or_name, sheet_tab):\n    sheet_id, tab_id = sheets_tab_id(config, auth, sheet_url_or_name, sheet_tab)\n    if tab_id is None:\n        sheets_batch_update(config, auth, sheet_url_or_name, {'requests': [{'addSheet': {'properties': {'title': sheet_tab}}}]})", "docstring": "Create a tab in a sheet.\n\nArgs:\nconfig - see starthinker/util/configuration.py\nauth - user or service\nurl_or_name - one of: URL, document title, or id\nsheet_tab - name of tab to get id for\n\nNo Return", "source": "github-repos"}
{"code": "def compress_file(filepath, compression='gz'):\n    if (compression not in ['gz', 'bz2']):\n        raise ValueError(\"Supported compression formats are 'gz' and 'bz2'.\")\n    from monty.io import zopen\n    if (not filepath.lower().endswith(('.%s' % compression))):\n        with open(filepath, 'rb') as f_in, zopen(('%s.%s' % (filepath, compression)), 'wb') as f_out:\n            f_out.writelines(f_in)\n        os.remove(filepath)", "docstring": "Compresses a file with the correct extension. Functions like standard\nUnix command line gzip and bzip2 in the sense that the original\nuncompressed files are not retained.\n\nArgs:\nfilepath (str): Path to file.\ncompression (str): A compression mode. Valid options are \"gz\" or\n\"bz2\". Defaults to \"gz\".", "source": "codesearchnet"}
{"code": "def sg_summary_image(tensor, prefix=None, name=None):\n    r\n    \n    prefix = '' if prefix is None else prefix + '/'\n    \n    name = prefix + _pretty_name(tensor) if name is None else prefix + name\n    \n    if not tf.get_variable_scope().reuse:\n        tf.summary.image(name + '-im', tensor)", "docstring": "r\"\"\"Register `tensor` to summary report as `image`\n\nArgs:\ntensor: A tensor to log as image\nprefix: A `string`. A prefix to display in the tensor board web UI.\nname: A `string`. A name to display in the tensor board web UI.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _Matches(path, pattern_list):\n    return any((fnmatch.fnmatchcase(path, pattern) for pattern in pattern_list))", "docstring": "Returns true if path matches any patten found in pattern_list.\n\nArgs:\npath: A dot separated path to a package, class, method or variable\npattern_list: A list of wildcard patterns\n\nReturns:\nTrue if path matches any wildcard found in pattern_list.", "source": "codesearchnet"}
{"code": "def readSchedules(self, tableset):\n    self.setContext('readSchedules')\n    try:\n        req_table = binascii.hexlify(str(tableset).zfill(1))\n        req_str = (('01523102303037' + req_table) + '282903')\n        self.request(False)\n        req_crc = self.calc_crc16(req_str[2:].decode('hex'))\n        req_str += req_crc\n        self.m_serial_port.write(req_str.decode('hex'))\n        raw_ret = self.m_serial_port.getResponse(self.getContext())\n        self.serialPostEnd()\n        return_crc = self.calc_crc16(raw_ret[1:(- 2)])\n        if (tableset == ReadSchedules.Schedules_1_To_4):\n            unpacked_read = self.unpackStruct(raw_ret, self.m_schd_1_to_4)\n            self.convertData(unpacked_read, self.m_schd_1_to_4, self.m_kwh_precision)\n            if (str(return_crc) == str(self.m_schd_1_to_4['crc16'][MeterData.StringValue])):\n                ekm_log('Schedules 1 to 4 CRC success (06 return')\n                self.setContext('')\n                return True\n        elif (tableset == ReadSchedules.Schedules_5_To_6):\n            unpacked_read = self.unpackStruct(raw_ret, self.m_schd_5_to_6)\n            self.convertData(unpacked_read, self.m_schd_5_to_6, self.m_kwh_precision)\n            if (str(return_crc) == str(self.m_schd_5_to_6['crc16'][MeterData.StringValue])):\n                ekm_log('Schedules 5 to 8 CRC success (06 return)')\n                self.setContext('')\n                return True\n    except:\n        ekm_log(traceback.format_exc(sys.exc_info()))\n    self.setContext('')\n    return False", "docstring": "Serial call to read schedule tariffs buffer\n\nArgs:\ntableset (int): :class:`~ekmmeters.ReadSchedules` buffer to return.\n\nReturns:\nbool: True on completion and ACK.", "source": "codesearchnet"}
{"code": "def load_extra_data(cls, data):\n    try:\n        cls._extra_config.update(json.loads(data))\n    except ValueError as exception:\n        sys.stderr.write('Could convert to JSON. {0:s}'.format(exception))\n        exit((- 1))", "docstring": "Loads extra JSON configuration parameters from a data buffer.\n\nThe data buffer must represent a JSON object.\n\nArgs:\ndata: str, the buffer to load the JSON data from.", "source": "codesearchnet"}
{"code": "def aggregate_single_gradient(grad_and_vars, use_mean, check_inf_nan):\n    grads = [g for (g, _) in grad_and_vars]\n    grad = tf.add_n(grads)\n    if (use_mean and (len(grads) > 1)):\n        grad = tf.multiply(grad, (1.0 / len(grads)))\n    v = grad_and_vars[0][1]\n    if check_inf_nan:\n        has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))\n        return ((grad, v), has_nan_or_inf)\n    else:\n        return ((grad, v), None)", "docstring": "Calculate the average gradient for a shared variable across all towers.\n\nNote that this function provides a synchronization point across all towers.\n\nArgs:\ngrad_and_vars: A list or tuple of (gradient, variable) tuples. Each\n(gradient, variable) pair within the outer list represents the gradient\nof the variable calculated for a single tower, and the number of pairs\nequals the number of towers.\nuse_mean: if True, mean is taken, else sum of gradients is taken.\ncheck_inf_nan: check grads for nans and infs.\n\nReturns:\nThe tuple ([(average_gradient, variable),], has_nan_or_inf) where the\ngradient has been averaged across all towers. The variable is chosen from\nthe first tower. The has_nan_or_inf indicates the grads has nan or inf.", "source": "codesearchnet"}
{"code": "def model_from_path(model_path, fuzziness=False):\n    app_name = '.'.join(model_path.split('.')[:(- 1)])\n    model_name = model_path.split('.')[(- 1)]\n    if (not app_name):\n        return None\n    module = importlib.import_module(app_name)\n    try:\n        model = getattr(module, model_name)\n    except AttributeError:\n        try:\n            model = getattr(getattr(module, 'models'), model_name)\n        except AttributeError:\n            model = get_model(model_name, app_name, fuzziness=fuzziness)\n    return model", "docstring": "Find the model class for a given model path like 'project.app.model'\n\nArgs:\npath (str): dot-delimited model path, like 'project.app.model'\n\nReturns:\nDjango Model-based class", "source": "codesearchnet"}
{"code": "def _CreateRouteTripsFolder(self, parent, route, style_id=None, schedule=None):\n    if (not route.trips):\n        return None\n    trips = list(route.trips)\n    trips.sort(key=(lambda x: x.trip_id))\n    trips_folder = self._CreateFolder(parent, 'Trips', visible=False)\n    for trip in trips:\n        if (self.date_filter and (not trip.service_period.IsActiveOn(self.date_filter))):\n            continue\n        if trip.trip_headsign:\n            description = ('Headsign: %s' % trip.trip_headsign)\n        else:\n            description = None\n        coordinate_list = []\n        for (secs, stoptime, tp) in trip.GetTimeInterpolatedStops():\n            if (self.altitude_per_sec > 0):\n                coordinate_list.append((stoptime.stop.stop_lon, stoptime.stop.stop_lat, ((secs - (3600 * 4)) * self.altitude_per_sec)))\n            else:\n                coordinate_list.append((stoptime.stop.stop_lon, stoptime.stop.stop_lat))\n        placemark = self._CreatePlacemark(trips_folder, trip.trip_id, style_id=style_id, visible=False, description=description)\n        self._CreateLineString(placemark, coordinate_list)\n    return trips_folder", "docstring": "Create a KML Folder containing all the trips in the route.\n\nThe folder contains a placemark for each of these trips. If there are no\ntrips in the route, no folder is created and None is returned.\n\nArgs:\nparent: The parent ElementTree.Element instance.\nroute: The transitfeed.Route instance.\nstyle_id: A style id string for the placemarks or None.\n\nReturns:\nThe Folder ElementTree.Element instance or None.", "source": "codesearchnet"}
{"code": "def list_skus(access_token, subscription_id, location, publisher, offer):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/', 'locations/', location, '/publishers/', publisher, '/artifacttypes/vmimage/offers/', offer, '/skus?api-version=', COMP_API])\n    return do_get(endpoint, access_token)", "docstring": "List available VM image skus for a publisher offer.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nlocation (str): Azure data center location. E.g. westus.\npublisher (str): VM image publisher. E.g. MicrosoftWindowsServer.\noffer (str): VM image offer. E.g. WindowsServer.\n\nReturns:\nHTTP response with JSON list of skus.", "source": "codesearchnet"}
{"code": "def split(self, value, lengths, name=None):\n    return self._implementation.split(value, lengths, name=name)", "docstring": "Split the values of a `Tensor` into the TensorArray.\n\nArgs:\nvalue: (N+1)-D.  Tensor of type `dtype`.  The Tensor to split.\nlengths: 1-D.  int32 vector with the lengths to use when splitting `value`\nalong its first dimension.\nname: A name for the operation (optional).\n\nReturns:\nA new TensorArray object with flow that ensures the split occurs.\nUse this object for all subsequent operations.\n\nRaises:\nValueError: if the shape inference fails.", "source": "github-repos"}
{"code": "def JoinKeyPath(path_segments):\n  \n  \n  \n\n  \n  path_segments = [\n      segment.split(definitions.KEY_PATH_SEPARATOR)\n      for segment in path_segments]\n\n  \n  path_segments = [\n      element for sublist in path_segments for element in sublist]\n\n  \n  path_segments = filter(None, path_segments)\n\n  key_path = definitions.KEY_PATH_SEPARATOR.join(path_segments)\n  if not key_path.startswith('HKEY_'):\n    key_path = '{0:s}{1:s}'.format(definitions.KEY_PATH_SEPARATOR, key_path)\n  return key_path", "docstring": "Joins the path segments into key path.\n\nArgs:\npath_segments (list[str]): Windows Registry key path segments.\n\nReturns:\nstr: key path.", "source": "juraj-google-style"}
{"code": "def LoadConfig(configuration):\n    parser = ConfigParser()\n    configuration.log.debug('Attempting to parse configuration file: %s', configuration.config_file)\n    parser.read(configuration.config_file)\n    default = 'DEFAULT'\n    default_source = FixValue(parser.get(default, Config.OPT_SOURCE))\n    default_cache = FixValue(parser.get(default, Config.OPT_CACHE))\n    configuration.timestamp_dir = FixValue(parser.get(default, Config.OPT_TIMESTAMP_DIR))\n    if parser.has_option(default, Config.OPT_LOCKFILE):\n        configuration.lockfile = FixValue(parser.get(default, Config.OPT_LOCKFILE))\n    if not configuration.maps:\n        maplist = FixValue(parser.get(default, Config.OPT_MAPS))\n        if maplist:\n            configuration.maps = [m.strip() for m in maplist.split(',')]\n        else:\n            configuration.maps = []\n    for map_name in configuration.maps:\n        map_options = MapOptions()\n        source = default_source\n        cache = default_cache\n        if parser.has_section(map_name):\n            if parser.has_option(map_name, Config.OPT_SOURCE):\n                source = FixValue(parser.get(map_name, Config.OPT_SOURCE))\n            if parser.has_option(map_name, Config.OPT_CACHE):\n                cache = FixValue(parser.get(map_name, Config.OPT_CACHE))\n        map_options.source = Options(parser.items(default), source)\n        map_options.cache = Options(parser.items(default), cache)\n        if parser.has_section(map_name):\n            options = Options(parser.items(map_name), source)\n            map_options.source.update(options)\n            options = Options(parser.items(map_name), cache)\n            map_options.cache.update(options)\n        map_options.source['name'] = source\n        map_options.cache['name'] = cache\n        configuration.options[map_name] = map_options\n    configuration.log.info('Configured maps are: %s', ', '.join(configuration.maps))\n    configuration.log.debug('loaded configuration: %r', configuration)", "docstring": "Load the on-disk configuration file and merge it into config.\n\nArgs:\nconfiguration: a config.Config object\n\nRaises:\nerror.NoConfigFound: no configuration file was found", "source": "github-repos"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    file_header_map = self._GetDataTypeMap('asl_file_header')\n\n    try:\n      file_header, _ = self._ReadStructureFromFileObject(\n          file_object, 0, file_header_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.UnableToParseFile(\n          'Unable to parse file header with error: {0!s}'.format(\n              exception))\n\n    if file_header.signature != self._FILE_SIGNATURE:\n      raise errors.UnableToParseFile('Invalid file signature.')\n\n    \n\n    file_size = file_object.get_size()\n\n    if file_header.first_log_entry_offset > 0:\n      last_log_entry_offset = 0\n      file_offset = file_header.first_log_entry_offset\n\n      while file_offset < file_size:\n        last_log_entry_offset = file_offset\n\n        try:\n          file_offset = self._ParseRecord(\n              parser_mediator, file_object, file_offset)\n        except errors.ParseError as exception:\n          parser_mediator.ProduceExtractionWarning(\n              'unable to parse record with error: {0!s}'.format(exception))\n          return\n\n        if file_offset == 0:\n          break\n\n      if last_log_entry_offset != file_header.last_log_entry_offset:\n        parser_mediator.ProduceExtractionWarning(\n            'last log entry offset does not match value in file header.')", "docstring": "Parses an ASL file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def _on_channel_open(self, channel):\n        \n        channel.add_on_close_callback(self._on_channel_close)\n        channel.add_on_cancel_callback(self._on_cancel)\n\n        channel.basic_qos(callback=self._on_qosok, **config.conf[\"qos\"])", "docstring": "Callback used when a channel is opened.\n\nThis registers all the channel callbacks.\n\nArgs:\nchannel (pika.channel.Channel): The channel that successfully opened.", "source": "juraj-google-style"}
{"code": "def GetSubkeyByIndex(self, index):\n    \n    if not self._registry_key and self._registry:\n      self._GetKeyFromRegistry()\n\n    subkeys = list(self._subkeys.values())\n\n    if index < 0 or index >= len(subkeys):\n      raise IndexError('Index out of bounds.')\n\n    return subkeys[index]", "docstring": "Retrieves a subkey by index.\n\nArgs:\nindex (int): index of the subkey.\n\nReturns:\nWinRegistryKey: Windows Registry subkey or None if not found.\n\nRaises:\nIndexError: if the index is out of bounds.", "source": "juraj-google-style"}
{"code": "def partition(self, id_):\n    from ..orm import Partition as OrmPartition\n    from sqlalchemy import or_\n    from ..identity import PartialPartitionName\n    if isinstance(id_, PartitionIdentity):\n        id_ = id_.id_\n    elif isinstance(id_, PartialPartitionName):\n        id_ = id_.promote(self.bundle.identity.name)\n    session = self.bundle.dataset._database.session\n    q = session.query(OrmPartition).filter((OrmPartition.d_vid == self.bundle.dataset.vid)).filter(or_((OrmPartition.id == str(id_).encode('ascii')), (OrmPartition.vid == str(id_).encode('ascii'))))\n    try:\n        orm_partition = q.one()\n        return self.bundle.wrap_partition(orm_partition)\n    except NoResultFound:\n        orm_partition = None\n    if (not orm_partition):\n        q = session.query(OrmPartition).filter((OrmPartition.d_vid == self.bundle.dataset.vid)).filter((OrmPartition.name == str(id_).encode('ascii')))\n        try:\n            orm_partition = q.one()\n            return self.bundle.wrap_partition(orm_partition)\n        except NoResultFound:\n            orm_partition = None\n    return orm_partition", "docstring": "Get a partition by the id number.\n\nArguments:\nid_ -- a partition id value\n\nReturns:\nA partitions.Partition object\n\nThrows:\na Sqlalchemy exception if the partition either does not exist or\nis not unique\n\nBecause this method works on the bundle, the id_ ( without version information )\nis equivalent to the vid ( with version information )", "source": "codesearchnet"}
{"code": "def on_success(self, inv_plugin, emit_set_slot):\n    self.dirty = set()\n    self.apply(inv_plugin)\n    for changed_slot in self.dirty:\n        emit_set_slot(changed_slot)", "docstring": "Called when the click was successful\nand should be applied to the inventory.\n\nArgs:\ninv_plugin (InventoryPlugin): inventory plugin instance\nemit_set_slot (func): function to signal a slot change,\nshould be InventoryPlugin().emit_set_slot", "source": "codesearchnet"}
{"code": "def housekeeping(self, **kwargs):\n    path = ('/projects/%s/housekeeping' % self.get_id())\n    self.manager.gitlab.http_post(path, **kwargs)", "docstring": "Start the housekeeping task.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabHousekeepingError: If the server failed to perform the\nrequest", "source": "codesearchnet"}
{"code": "def assignHolidayDate(self, holiday, month, day):\n        \n        holiday += 1\n        if (month > 12) or (month < 0) or (day > 31) or (day < 0) or (holiday < 1) or (holiday > Extents.Holidays):\n            ekm_log(\"Out of bounds: month \" + str(month) + \" day \" + str(day) + \" holiday \" + str(holiday))\n            return False\n\n        day_str = \"Holiday_\" + str(holiday) + \"_Day\"\n        mon_str = \"Holiday_\" + str(holiday) + \"_Month\"\n        if day_str not in self.m_holiday_date_params:\n            ekm_log(\"Incorrect index: \" + day_str)\n            return False\n        if mon_str not in self.m_holiday_date_params:\n            ekm_log(\"Incorrect index: \" + mon_str)\n            return False\n        self.m_holiday_date_params[day_str] = day\n        self.m_holiday_date_params[mon_str] = month\n        return True", "docstring": "Set a singe holiday day and month in object buffer.\n\nThere is no class style enum for holidays.\n\nArgs:\nholiday (int): 0-19 or range(Extents.Holidays).\nmonth (int): Month 1-12.\nday (int): Day 1-31\n\nReturns:\nbool: True on completion.", "source": "juraj-google-style"}
{"code": "def delete(self, **options):\n    fut = delete_async(self.key(), **options)\n    fut.get_result()", "docstring": "Permanently delete this blob from Blobstore.\n\nArgs:\n**options: Options for create_rpc().", "source": "codesearchnet"}
{"code": "def send_msg_to_webhook(self, message):\n        \n        payload = {\n            'content':message\n        }\n\n        header = {\n            'Content-Type':'application/json'\n        }\n\n        try:\n            request = requests.post(\n                self.api_url,\n                headers=header,\n                json=payload\n            )\n            request.raise_for_status()\n        except Exception as error_msg:  \n            warning_msg = (\n                'EXCEPTION: UNABLE TO COMMIT LOG MESSAGE' +\n                '\\n\\texception={0}'.format(repr(error_msg)) +\n                '\\n\\tmessage={0}'.format(message)\n            )\n            warnings.warn(\n                warning_msg,\n                exceptions.WebhookFailedEmitWarning\n            )", "docstring": "separated Requests logic for easier testing\n\nArgs:\nmessage (str): actual logging string to be passed to REST endpoint\n\nTodo:\n* Requests.text/json return for better testing options", "source": "juraj-google-style"}
{"code": "def _decompose_and_get_unitary(val: Union[('cirq.Operation', 'cirq.Gate')]) -> np.ndarray:\n    from cirq.protocols.apply_unitary import apply_unitary, ApplyUnitaryArgs\n    from cirq.protocols.decompose import decompose_once, decompose_once_with_qubits\n    from cirq import Gate, LineQubit, Operation\n    if isinstance(val, Operation):\n        qubits = val.qubits\n        decomposed_val = decompose_once(val, default=None)\n    elif isinstance(val, Gate):\n        qubits = tuple(LineQubit.range(val.num_qubits()))\n        decomposed_val = decompose_once_with_qubits(val, qubits, default=None)\n    if (decomposed_val is not None):\n        n = len(qubits)\n        state = np.eye((1 << n), dtype=np.complex128)\n        state.shape = ((2,) * (2 * n))\n        buffer = np.zeros(state.shape, dtype=np.complex128)\n        qubit_map = {q: i for (i, q) in enumerate(qubits)}\n        result = state\n        for op in decomposed_val:\n            indices = [qubit_map[q] for q in op.qubits]\n            result = apply_unitary(unitary_value=op, args=ApplyUnitaryArgs(state, buffer, indices), default=None)\n            if (result is None):\n                return None\n            if (result is buffer):\n                buffer = state\n            state = result\n        if (result is not None):\n            return result.reshape(((1 << n), (1 << n)))", "docstring": "Try to decompose a cirq.Operation or cirq.Gate, and return its unitary\nif it exists.\n\nReturns:\nIf `val` can be decomposed into unitaries, calculate the resulting\nunitary and return it. If it doesn't exist, None is returned.", "source": "codesearchnet"}
{"code": "def CopyFromDateTimeString(self, time_string):\n    date_time_values = self._CopyDateTimeFromString(time_string)\n    year = date_time_values.get('year', 0)\n    month = date_time_values.get('month', 0)\n    day_of_month = date_time_values.get('day_of_month', 0)\n    hours = date_time_values.get('hours', 0)\n    minutes = date_time_values.get('minutes', 0)\n    seconds = date_time_values.get('seconds', 0)\n    microseconds = date_time_values.get('microseconds', None)\n    if (year > 9999):\n        raise ValueError('Unsupported year value: {0:d}.'.format(year))\n    timestamp = self._GetNumberOfSecondsFromElements(year, month, day_of_month, hours, minutes, seconds)\n    timestamp = (float(timestamp) / definitions.SECONDS_PER_DAY)\n    timestamp += self._DELPHI_TO_POSIX_BASE\n    if (microseconds is not None):\n        timestamp += (float(microseconds) / definitions.MICROSECONDS_PER_DAY)\n    self._normalized_timestamp = None\n    self._timestamp = timestamp\n    self.is_local_time = False", "docstring": "Copies a Delphi TDateTime timestamp from a string.\n\nArgs:\ntime_string (str): date and time value formatted as:\nYYYY-MM-DD hh:mm:ss.######[+-]##:##\n\nWhere # are numeric digits ranging from 0 to 9 and the seconds\nfraction can be either 3 or 6 digits. The time of day, seconds\nfraction and time zone offset are optional. The default time zone\nis UTC.\n\nRaises:\nValueError: if the time string is invalid or not supported.", "source": "codesearchnet"}
{"code": "def set_local_interface(self, value=None, default=False, disable=False):\n        \n        return self._configure_mlag('local-interface', value, default, disable)", "docstring": "Configures the mlag local-interface value\n\nArgs:\nvalue (str): The value to configure the local-interface\ndefault (bool): Configures the local-interface using the\ndefault keyword\ndisable (bool): Negates the local-interface using the no keyword\n\nReturns:\nbool: Returns True if the commands complete successfully", "source": "juraj-google-style"}
{"code": "def match_date(date):\n    \n    date_pattern = re.compile(\"^(19|20)\\d\\d[- /.](0[1-9]|1[012])[- /.](0[1-9]|[12][0-9]|3[01])\")\n    if re.match(date_pattern, date):\n        return True\n\n    return False", "docstring": "Check if a string is a valid date\n\nArgs:\ndate(str)\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def profile_operations(self, options):\n    opts = _build_options(options)\n    tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()\n    try:\n        tfprof_node.ParseFromString(print_mdl.Profile('op'.encode('utf-8'), opts.SerializeToString()))\n    except message.DecodeError as e:\n        sys.stderr.write('Cannot parse returned proto: %s.\\n' % e)\n    return tfprof_node", "docstring": "Profile the statistics of the Operation types (e.g.\n\nMatMul, Conv2D).\n\nArgs:\noptions: A dict of options. See core/profiler/g3doc/options.md.\n\nReturns:\na MultiGraphNodeProto that records the results.", "source": "github-repos"}
{"code": "def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        tstream = BytearrayStream()\n\n        self.hashing_algorithm.write(tstream, kmip_version=kmip_version)\n        self.digest_value.write(tstream, kmip_version=kmip_version)\n        self.key_format_type.write(tstream, kmip_version=kmip_version)\n\n        self.length = tstream.length()\n        super(Digest, self).write(ostream, kmip_version=kmip_version)\n        ostream.write(tstream.buffer)", "docstring": "Write the data encoding the Digest object to a stream.\n\nArgs:\nostream (Stream): A data stream in which to encode object data,\nsupporting a write method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def show(self, objtype, objid):\n        \n        url = self._object_url(objtype, int(objid))\n        return self._make_request(url, method=\"get\")", "docstring": "Query for a specific resource by ID\n\nArgs:\nobjtype (str): object type, e.g. 'device', 'interface'\nobjid (int): object ID (DeviceID, etc.)\nReturns:\nA dict with that object\nRaises:\nrequests.exceptions.HTTPError", "source": "juraj-google-style"}
{"code": "def wait_for_stop(self, timeout=None):\n    return self._stop_event.wait(timeout)", "docstring": "Wait till the Coordinator is told to stop.\n\nArgs:\ntimeout: Float.  Sleep for up to that many seconds waiting for\nshould_stop() to become True.\n\nReturns:\nTrue if the Coordinator is told stop, False if the timeout expired.", "source": "github-repos"}
{"code": "def extract_certs(certs_txt: str) -> List[crypto.X509]:\n    pattern = '-----BEGIN CERTIFICATE-----.+?-----END CERTIFICATE-----'\n    certs_txt = re.findall(pattern, certs_txt, flags=re.DOTALL)\n    certs = [crypto.load_certificate(crypto.FILETYPE_PEM, cert_txt) for cert_txt in certs_txt]\n    return certs", "docstring": "Extracts pycrypto X509 objects from SSL certificates chain string.\n\nArgs:\ncerts_txt: SSL certificates chain string.\n\nReturns:\nresult: List of pycrypto X509 objects.", "source": "codesearchnet"}
{"code": "def __init__(self, file_handle, schema):\n    if not file_handle.writable():\n        raise ValueError('Output stream must be writable')\n    self._file_handle = file_handle\n    avro_schema = fastavro.parse_schema(get_avro_schema_from_table_schema(schema))\n    self._avro_writer = fastavro.write.Writer(self._file_handle, avro_schema)", "docstring": "Initialize an AvroRowWriter.\n\nArgs:\nfile_handle (io.IOBase): Output stream to write Avro records to.\nschema (Dict[Text, Any]): BigQuery table schema.", "source": "github-repos"}
{"code": "def _ParseFileEntry(self, knowledge_base, file_entry):\n    \n    if file_entry.link:\n      \n      _, _, time_zone = file_entry.link.partition('zoneinfo/')\n\n    else:\n      \n      file_object = file_entry.GetFileObject()\n\n      time_zone = None\n      try:\n        time_zone_file = tz.tzfile(file_object)\n        date_time = datetime.datetime(2017, 1, 1)\n        time_zone = time_zone_file.tzname(date_time)\n\n      except ValueError:\n        \n        logger.error('Unable to read time zone information file.')\n\n      finally:\n        file_object.close()\n\n    \n    if time_zone:\n      try:\n        knowledge_base.SetTimeZone(time_zone)\n      except ValueError:\n        \n        logger.error('Unable to set time zone in knowledge base.')", "docstring": "Parses artifact file system data for a preprocessing attribute.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nfile_entry (dfvfs.FileEntry): file entry that contains the artifact\nvalue data.\n\nRaises:\nerrors.PreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def Merge(self, other):\n    if type(self) != type(other):\n        raise TypeError('Attempt to Merge() differently typed Maps: %r != %r' % (type(self), type(other)))\n    if other.GetModifyTimestamp() and self.GetModifyTimestamp():\n        if other.GetModifyTimestamp() < self.GetModifyTimestamp():\n            raise error.InvalidMerge('Attempt to Merge a map with an older modify time into a newer one: other: %s, self: %s' % (other.GetModifyTimestamp(), self.GetModifyTimestamp()))\n    if other.GetUpdateTimestamp() and self.GetUpdateTimestamp():\n        if other.GetUpdateTimestamp() < self.GetUpdateTimestamp():\n            raise error.InvalidMerge('Attempt to Merge a map with an older update time into a newer one: other: %s, self: %s' % (other.GetUpdateTimestamp(), self.GetUpdateTimestamp()))\n    self.log.info('merging from a map of %d entries', len(other))\n    merge_count = 0\n    for their_entry in other:\n        if their_entry not in self:\n            if self.Add(their_entry):\n                merge_count += 1\n    self.log.info('%d of %d entries were new or modified', merge_count, len(other))\n    if merge_count > 0:\n        self.SetModifyTimestamp(other.GetModifyTimestamp())\n    self.SetUpdateTimestamp(other.GetUpdateTimestamp())\n    return merge_count > 0", "docstring": "Update this Map based on another Map.\n\nWalk over other and for each entry, Add() it if it doesn't\nexist -- this will update changed entries as well as adding\nnew ones.\n\nArgs:\nother: A maps.Map instance.\n\nReturns:\nTrue if anything was added or modified, False if\nnothing changed.\n\nRaises:\nTypeError: Merging differently typed Maps.\nInvalidMerge: Attempt to Merge an older map into a newer one.", "source": "github-repos"}
{"code": "def ProcessNewBlock(self, block):\n    added = set()\n    changed = set()\n    deleted = set()\n    try:\n        for tx in block.FullTransactions:\n            for (index, output) in enumerate(tx.outputs):\n                state = self.CheckAddressState(output.ScriptHash)\n                if ((state & AddressState.InWallet) > 0):\n                    key = CoinReference(tx.Hash, index)\n                    if (key in self._coins.keys()):\n                        coin = self._coins[key]\n                        coin.State |= CoinState.Confirmed\n                        changed.add(coin)\n                    else:\n                        newcoin = Coin.CoinFromRef(coin_ref=key, tx_output=output, state=CoinState.Confirmed, transaction=tx)\n                        self._coins[key] = newcoin\n                        added.add(newcoin)\n                    if ((state & AddressState.WatchOnly) > 0):\n                        self._coins[key].State |= CoinState.WatchOnly\n                        changed.add(self._coins[key])\n        for tx in block.FullTransactions:\n            for input in tx.inputs:\n                if (input in self._coins.keys()):\n                    if (self._coins[input].Output.AssetId == Blockchain.SystemShare().Hash):\n                        coin = self._coins[input]\n                        coin.State |= (CoinState.Spent | CoinState.Confirmed)\n                        changed.add(coin)\n                    else:\n                        deleted.add(self._coins[input])\n                        del self._coins[input]\n        for claimTx in [tx for tx in block.Transactions if (tx.Type == TransactionType.ClaimTransaction)]:\n            for ref in claimTx.Claims:\n                if (ref in self._coins.keys()):\n                    deleted.add(self._coins[ref])\n                    del self._coins[ref]\n        self._current_height += 1\n        self.OnProcessNewBlock(block, added, changed, deleted)\n        if (((len(added) + len(deleted)) + len(changed)) > 0):\n            self.BalanceChanged()\n    except Exception as e:\n        traceback.print_stack()\n        traceback.print_exc()\n        logger.error(('could not process %s ' % e))", "docstring": "Processes a block on the blockchain.  This should be done in a sequential order, ie block 4 should be\nonly processed after block 3.\n\nArgs:\nblock: (neo.Core.Block) a block on the blockchain.", "source": "codesearchnet"}
{"code": "def _GetStat(self):\n    stat_object = vfs_stat.VFSStat()\n    stat_object.size = self.path_spec.range_size\n    stat_object.type = stat_object.TYPE_FILE\n    return stat_object", "docstring": "Retrieves a stat object.\n\nReturns:\nVFSStat: a stat object.\n\nRaises:\nBackEndError: when the encoded stream is missing.", "source": "codesearchnet"}
{"code": "def from_celery(cls, worker_name, job_dict, celery_app):\n    if ((not isinstance(job_dict, dict)) or ('id' not in job_dict)):\n        raise JobStatInvalid('The job description is missing important fields.')\n    async_result = AsyncResult(id=job_dict['id'], app=celery_app)\n    a_info = (async_result.info if isinstance(async_result.info, dict) else None)\n    return JobStats(name=(a_info.get('name', '') if (a_info is not None) else ''), job_id=job_dict['id'], job_type=(a_info.get('type', '') if (a_info is not None) else ''), workflow_id=(a_info.get('workflow_id', '') if (a_info is not None) else ''), queue=(a_info.get('queue', '') if (a_info is not None) else ''), start_time=(a_info.get('start_time', None) if (a_info is not None) else None), arguments=(a_info.get('arguments', {}) if (a_info is not None) else {}), acknowledged=job_dict['acknowledged'], func_name=job_dict['type'], hostname=job_dict['hostname'], worker_name=worker_name, worker_pid=job_dict['worker_pid'], routing_key=job_dict['delivery_info']['routing_key'])", "docstring": "Create a JobStats object from the dictionary returned by celery.\n\nArgs:\nworker_name (str): The name of the worker this jobs runs on.\njob_dict (dict): The dictionary as returned by celery.\ncelery_app: Reference to a celery application object.\n\nReturns:\nJobStats: A fully initialized JobStats object.", "source": "codesearchnet"}
{"code": "def get_metrics_namespace(self) -> str:\n    return 'BeamML_Onnx'", "docstring": "Returns:\nA namespace for metrics collected by the RunInference transform.", "source": "github-repos"}
{"code": "def initialize_schema(connection):\n  \n  cursor = connection.cursor()\n  cursor.execute(\"PRAGMA application_id={}\".format(_TENSORBOARD_APPLICATION_ID))\n  cursor.execute(\"PRAGMA user_version={}\".format(_TENSORBOARD_USER_VERSION))\n  with connection:\n    for statement in _SCHEMA_STATEMENTS:\n      lines = statement.strip('\\n').split('\\n')\n      message = lines[0] + ('...' if len(lines) > 1 else '')\n      logger.debug('Running DB init statement: %s', message)\n      cursor.execute(statement)", "docstring": "Initializes the TensorBoard sqlite schema using the given connection.\n\nArgs:\nconnection: A sqlite DB connection.", "source": "juraj-google-style"}
{"code": "def create_datasets():\n    if use_device:\n        datasets = []\n        for i in range(len(self.embedding_devices)):\n            datasets.append(dataset_ops.DatasetV2.from_tensor_slices({'feature': [[[i % self._num_cores_per_replica]]]}).repeat())\n        return datasets\n    else:\n        dataset = strategy.distribute_datasets_from_function(input_fn, options=distribute_lib.InputOptions(experimental_fetch_to_device=False))\n        return [dataset]", "docstring": "Creates either a per-replica dataset, or multiple per-devices ones.\n\nThis function explicitly creates per-device datasets because the strategy\ndoes not produce a distributed dataset in the model-parallel case; there\nis only one replica. Without this consideration, the embeddings would be\nread as [0, 0] instead of the expected [0, 1] since all the devices would\nreceive the same value.\n\nReturns:\nA list of one or more dataset(s).", "source": "github-repos"}
{"code": "def aggregate(self, dataset_ids=None, boundary='exact', side='left', func='mean', **dim_kwargs):\n    new_scn = self.copy(datasets=dataset_ids)\n    for (src_area, ds_ids) in new_scn.iter_by_area():\n        if (src_area is None):\n            for ds_id in ds_ids:\n                new_scn.datasets[ds_id] = self[ds_id]\n            continue\n        if (boundary != 'exact'):\n            raise NotImplementedError(\"boundary modes appart from 'exact' are not implemented yet.\")\n        target_area = src_area.aggregate(**dim_kwargs)\n        resolution = max(target_area.pixel_size_x, target_area.pixel_size_y)\n        for ds_id in ds_ids:\n            res = self[ds_id].coarsen(boundary=boundary, side=side, func=func, **dim_kwargs)\n            new_scn.datasets[ds_id] = getattr(res, func)()\n            new_scn.datasets[ds_id].attrs['area'] = target_area\n            new_scn.datasets[ds_id].attrs['resolution'] = resolution\n    return new_scn", "docstring": "Create an aggregated version of the Scene.\n\nArgs:\ndataset_ids (iterable): DatasetIDs to include in the returned\n`Scene`. Defaults to all datasets.\nfunc (string): Function to apply on each aggregation window. One of\n'mean', 'sum', 'min', 'max', 'median', 'argmin',\n'argmax', 'prod', 'std', 'var'.\n'mean' is the default.\nboundary: Not implemented.\nside: Not implemented.\ndim_kwargs: the size of the windows to aggregate.\n\nReturns:\nA new aggregated scene\n\nSee also:\nxarray.DataArray.coarsen\n\nExample:\n`scn.aggregate(func='min', x=2, y=2)` will aggregate 2x2 pixels by\napplying the `min` function.", "source": "codesearchnet"}
{"code": "def BuildDefaultValue(self, value_cls):\n    try:\n        return value_cls()\n    except Exception as e:\n        logging.exception(e)\n        raise DefaultValueError((\"Can't create default for value %s: %s\" % (value_cls.__name__, e)))", "docstring": "Renders default value of a given class.\n\nArgs:\nvalue_cls: Default value of this class will be rendered. This class has to\nbe (or to be a subclass of) a self.value_class (i.e. a class that this\nrenderer is capable of rendering).\n\nReturns:\nAn initialized default value.\n\nRaises:\nDefaultValueError: if something goes wrong.", "source": "codesearchnet"}
{"code": "def remove(self, key):\n    self.raise_error_if_not_open()\n    if (key in self._file):\n        del self._file[key]", "docstring": "Remove the data stored for the given key.\n\nArgs:\nkey (str): Key of the data to remove.\n\nNote:\nThe container has to be opened in advance.", "source": "codesearchnet"}
{"code": "def load(config):\n    \n\n    if config.sys_path:\n        logger.debug(\"Appending %s to sys.path.\", config.sys_path)\n        sys.path.append(config.sys_path)\n        logger.debug(\"sys.path is now %s\", sys.path)\n    if config.lookups:\n        for key, handler in config.lookups.items():\n            register_lookup_handler(key, handler)\n\n    return config", "docstring": "Loads a stacker configuration by modifying sys paths, loading lookups,\netc.\n\nArgs:\nconfig (:class:`Config`): the stacker config to load.\n\nReturns:\n:class:`Config`: the stacker config provided above.", "source": "juraj-google-style"}
{"code": "def parse_init_dat(infile):\n    \n\n    \n    init_dict = {}\n\n    log.debug('{}: reading file...'.format(infile))\n    with open(infile, 'r') as f:\n        \n        head = [next(f).strip() for x in range(2)]\n\n    summary = head[0].split()\n    difficulty = summary[1]\n\n    top_template_info = head[1].split()\n    top_template_pdbchain = top_template_info[3]\n    top_template_pdb = top_template_pdbchain[:4]\n    top_template_chain = top_template_pdbchain[4:]\n\n    init_dict['difficulty'] = difficulty\n    init_dict['top_template_pdb'] = top_template_pdb\n    init_dict['top_template_chain'] = top_template_chain\n\n    return init_dict", "docstring": "Parse the main init.dat file which contains the modeling results\n\nThe first line of the file init.dat contains stuff like::\n\n\"120 easy  40   8\"\n\nThe other lines look like this::\n\n\"     161   11.051   1  1guqA MUSTER\"\n\nand getting the first 10 gives you the top 10 templates used in modeling\n\nArgs:\ninfile (stt): Path to init.dat\n\nReturns:\ndict: Dictionary of parsed information", "source": "juraj-google-style"}
{"code": "def resolve_mode(self, name):\n        \n        if name not in settings.CODEMIRROR_MODES:\n            msg = (\"Given config name '{}' does not exists in \"\n                   \"'settings.CODEMIRROR_MODES'.\")\n            raise UnknowModeError(msg.format(name))\n\n        return settings.CODEMIRROR_MODES.get(name)", "docstring": "From given mode name, return mode file path from\n``settings.CODEMIRROR_MODES`` map.\n\nArguments:\nname (string): Mode name.\n\nRaises:\nKeyError: When given name does not exist in\n``settings.CODEMIRROR_MODES``.\n\nReturns:\nstring: Mode file path.", "source": "juraj-google-style"}
{"code": "class PatchTSMixerLinearHead(nn.Module):\n\n    def __init__(self, config: PatchTSMixerConfig, distribution_output=None):\n        super().__init__()\n        self.head_aggregation = config.head_aggregation\n        self.output_range = config.output_range\n        if config.head_aggregation is None:\n            mul_factor = config.num_patches\n        else:\n            mul_factor = 1\n        self.distribution_output = distribution_output\n        if distribution_output is None:\n            self.projection = nn.Linear(config.d_model * config.num_input_channels * mul_factor, config.num_targets)\n        else:\n            self.projection = distribution_output.get_parameter_projection(config.d_model * config.num_input_channels * mul_factor)\n        if config.head_aggregation is None:\n            self.flatten = nn.Flatten(start_dim=-3)\n        else:\n            self.flatten = nn.Flatten(start_dim=-2)\n        self.dropout = nn.Dropout(config.head_dropout)\n\n    def forward(self, hidden_features):\n        \n        hidden_features = hidden_features.transpose(-1, -2)\n        if self.head_aggregation == 'use_last':\n            hidden_features = hidden_features[..., -1]\n        elif self.head_aggregation == 'max_pool':\n            hidden_features = hidden_features.max(dim=-1).values\n        elif self.head_aggregation == 'avg_pool':\n            hidden_features = hidden_features.mean(dim=-1)\n        if self.flatten:\n            hidden_features = self.flatten(hidden_features)\n        hidden_features = self.dropout(hidden_features)\n        hidden_features = self.projection(hidden_features)\n        if self.distribution_output is None and self.output_range is not None:\n            hidden_features = torch.sigmoid(hidden_features) * (self.output_range[1] - self.output_range[0]) + self.output_range[0]\n        return hidden_features", "docstring": "Linear head for Classification and Regression.\n\nArgs:\nconfig (`PatchTSMixerConfig`):\nConfiguration.", "source": "github-repos"}
{"code": "def field(*, validate: Optional[Callable[[_In], _OutT]]=None, **kwargs: Any) -> dataclasses.Field[_OutT]:\n    if validate is None:\n        return dataclasses.field(**kwargs)\n    else:\n        field_ = _Field(validate=validate, field_kwargs=kwargs)\n        return typing.cast(dataclasses.Field, field_)", "docstring": "Like `dataclasses.field`, but allow `validator`.\n\nArgs:\nvalidate: A callable `(x) -> x` called each time the variable is assigned.\n**kwargs: Kwargs forwarded to `dataclasses.field`\n\nReturns:\nThe field.", "source": "github-repos"}
{"code": "def for_new_graph(*args, **kwargs):\n  \n  graph = tf.Graph()\n  with graph.as_default():\n    return for_default_graph(*args, **kwargs)", "docstring": "Creates a Bookkeeper for a new graph.\n\nYou must use `m.g.as_default()` to put the graph in scope:\n\nm = Bookkeeper.for_new_graph()\nwith m.g.as_default():\n...\n\nArgs:\n*args: Arguments to pass into Bookkeeper's constructor.\n**kwargs: Arguments to pass into Bookkeeper's constructor.\nReturns:\nA new Bookkeeper.", "source": "juraj-google-style"}
{"code": "def progress(self):\n    return Progress(done=len(self._get_all_set_properties()), base=len(worker_mapping()))", "docstring": "Get progress.\n\nReturns:\nnamedtuple: :class:`Progress`.", "source": "codesearchnet"}
{"code": "def inspect_node(self, node_id):\n    url = self._url('/nodes/{0}', node_id)\n    return self._result(self._get(url), True)", "docstring": "Retrieve low-level information about a swarm node\n\nArgs:\nnode_id (string): ID of the node to be inspected.\n\nReturns:\nA dictionary containing data about this node.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def __init__(self, match=Match(), table_id=0xff, out_port=Port.OFPP_NONE):\n        \n        super().__init__()\n        self.match = match\n        self.table_id = table_id\n        self.out_port = out_port", "docstring": "Create a AggregateStatsRequest with the optional parameters below.\n\nArgs:\nmatch (~pyof.v0x01.common.flow_match.Match): Fields to match.\ntable_id (int): ID of table to read (from pyof_table_stats) 0xff\nfor all tables or 0xfe for emergency.\nout_port (int): Require matching entries to include this as an\noutput port. A value of OFPP_NONE indicates no restriction.", "source": "juraj-google-style"}
{"code": "def expected_counts(dataframe, rownames, colnames):\n    \n    cont_table = contingency_table(dataframe, rownames=rownames, colnames=colnames, margins=True)\n    row_counts = cont_table['All']\n    column_counts = cont_table.loc['All']\n    total_observations = cont_table['All']['All']\n\n    \n    for column in cont_table.columns:\n        for row in cont_table.index:\n            cont_table[column][row] = column_counts[column]*row_counts[row]/total_observations\n    return cont_table", "docstring": "Expected counts of the multivariate frequency distribution of the variables given the\nnull hypothesis of complete independence between variables.\nArgs:\nrownames: the column name or list of columns names that make the keys of the rows\ncolnames: the column name or list of columns names that make the keys of the columns", "source": "juraj-google-style"}
{"code": "class PerceiverAudioPostprocessor(nn.Module):\n\n    def __init__(self, config: PerceiverConfig, in_channels: int, postproc_type: str='patches') -> None:\n        super().__init__()\n        if postproc_type not in ('patches',):\n            raise ValueError('Invalid postproc_type!')\n        self.classifier = nn.Linear(in_channels, config.samples_per_patch)\n\n    def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor]=None, modality_sizes=None) -> torch.Tensor:\n        logits = self.classifier(inputs)\n        return torch.reshape(logits, [inputs.shape[0], -1])", "docstring": "Audio postprocessing for Perceiver. Can be used to convert the decoder output to audio features.\n\nArgs:\nconfig ([*PerceiverConfig*]):\nModel configuration.\nin_channels (`int`):\nNumber of channels in the input.\npostproc_type (`str`, *optional*, defaults to `\"patches\"`):\nPostprocessor type to use. Currently, only \"patches\" is supported.", "source": "github-repos"}
{"code": "def dataset(self, mode, hparams=None, global_step=None, **kwargs):\n    datasets = [p.dataset(mode, **kwargs) for p in self.problems]\n    datasets = [d.map((lambda x, i=j: self.normalize_example(dict(x, problem_id=tf.constant([i])), hparams))) for (j, d) in enumerate(datasets)]\n    if (mode is problem.DatasetSplit.TRAIN):\n        if (global_step is None):\n            global_step = tf.train.get_or_create_global_step()\n        pmf = get_schedule_distribution(self.schedule, global_step)\n        return get_multi_dataset(datasets, pmf)\n    elif self.only_eval_first_problem:\n        return datasets[0]\n    else:\n        datasets = [d.repeat() for d in datasets]\n        return tf.data.Dataset.zip(tuple(datasets)).flat_map((lambda *x: functools.reduce(tf.data.Dataset.concatenate, map(tf.data.Dataset.from_tensors, x))))", "docstring": "Returns a dataset containing examples from multiple problems.\n\nArgs:\nmode: A member of problem.DatasetSplit.\nhparams: A tf.HParams object, the model hparams.\nglobal_step: A scalar tensor used to compute the sampling distribution.\nIf global_step is None, we call tf.train.get_or_create_global_step by\ndefault.\n**kwargs: Keywords for problem.Problem.Dataset.\n\nReturns:\nA dataset containing examples from multiple problems.", "source": "codesearchnet"}
{"code": "def set_number_of_partitions(self, number_of_partitions):\n    if self._frozen:\n        if self._number_of_partitions != number_of_partitions:\n            raise ValueError(f\"Can't set number_of_partitions to {number_of_partitions} since it has been frozen to use {self._number_of_partitions}.\")\n    else:\n        self._number_of_partitions = number_of_partitions", "docstring": "Sets the number of partitions for the current policy.\n\nIf the policy has been frozen then shard_dimension must match the\nexisting setting.\n\nArgs:\nnumber_of_partitions: The number of partitions to use in the policy.\n\nRaises:\nValueError: If the policy has been frozen and shard_dimension\ndiffers from the frozen value.", "source": "github-repos"}
{"code": "def to_script(self, wf_name='wf'):\n    self._closed()\n    script = []\n    params = []\n    returns = []\n    for (name, typ) in self.wf_inputs.items():\n        params.append(\"{}='{}'\".format(name, typ))\n        returns.append(name)\n    script.append('{} = {}.add_inputs({})'.format(', '.join(returns), wf_name, ', '.join(params)))\n    returns = []\n    for (name, step) in self.wf_steps.items():\n        pyname = step.python_name\n        returns = ['{}_{}'.format(pyname, o) for o in step['out']]\n        params = ['{}={}'.format(name, python_name(param)) for (name, param) in step['in'].items()]\n        script.append('{} = {}.{}({})'.format(', '.join(returns), wf_name, pyname, ', '.join(params)))\n    params = []\n    for (name, details) in self.wf_outputs.items():\n        params.append('{}={}'.format(name, python_name(details['outputSource'])))\n    script.append('{}.add_outputs({})'.format(wf_name, ', '.join(params)))\n    return '\\n'.join(script)", "docstring": "Generated and print the scriptcwl script for the currunt workflow.\n\nArgs:\nwf_name (str): string used for the WorkflowGenerator object in the\ngenerated script (default: ``wf``).", "source": "codesearchnet"}
{"code": "def safe_indicator(self, indicator, errors='strict'):\n    if (indicator is not None):\n        try:\n            indicator = quote(self.s(str(indicator), errors=errors), safe='~')\n        except KeyError:\n            indicator = quote(bytes(indicator), safe='~')\n    return indicator", "docstring": "Indicator encode value for safe HTTP request.\n\nArgs:\nindicator (string): Indicator to URL Encode\nerrors (string): The error handler type.\n\nReturns:\n(string): The urlencoded string", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]:\n    residual = hidden_states\n    hidden_states = self.layer_norm1(hidden_states)\n    hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.layer_norm2(hidden_states)\n    hidden_states = self.mlp(hidden_states)\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`):\nInput to the layer of shape `(batch, seq_len, embed_dim)`.\nattention_mask (`torch.FloatTensor`):\nAttention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.\noutput_attentions (`bool`, *optional*, defaults to `False`):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def __solve_for_scalar(expr, vars):\n    \n    var = solve(expr, vars).value\n    try:\n        scalar = repeated.getvalue(var)\n    except TypeError:\n        raise errors.EfilterTypeError(\n            root=expr, query=expr.source,\n            message=\"Wasn't expecting more than one value here. Got %r.\"\n            % (var,))\n\n    if isinstance(scalar, row_tuple.RowTuple):\n        try:\n            return scalar.get_singleton()\n        except ValueError:\n            raise errors.EfilterTypeError(\n                root=expr, query=expr.source,\n                message=\"Was expecting a scalar value here. Got %r.\"\n                % (scalar,))\n    else:\n        return scalar", "docstring": "Helper: solve 'expr' always returning a scalar (not IRepeated).\n\nIf the output of 'expr' is a single value or a single RowTuple with a single\ncolumn then return the value in that column. Otherwise raise.\n\nArguments:\nexpr: Expression to solve.\nvars: The scope.\n\nReturns:\nA scalar value (not an IRepeated).\n\nRaises:\nEfilterTypeError if it cannot get a scalar.", "source": "juraj-google-style"}
{"code": "def safe_rt(resource_type, lower=False):\n        \n        if resource_type is not None:\n            resource_type = resource_type.replace(' ', '_')\n            if lower:\n                resource_type = resource_type.lower()\n        return resource_type", "docstring": "Format the Resource Type.\n\nTakes Custom Indicator types with a space character and return a *safe* string.\n\n(e.g. *User Agent* is converted to User_Agent or user_agent.)\n\nArgs:\nresource_type (string): The resource type to format.\nlower (boolean): Return type in all lower case\n\nReturns:\n(string): The formatted resource type.", "source": "juraj-google-style"}
{"code": "def collection(self, collection_id):\n        \n        child_path = self._path + (collection_id,)\n        return self._client.collection(*child_path)", "docstring": "Create a sub-collection underneath the current document.\n\nArgs:\ncollection_id (str): The sub-collection identifier (sometimes\nreferred to as the \"kind\").\n\nReturns:\n~.firestore_v1beta1.collection.CollectionReference: The\nchild collection.", "source": "juraj-google-style"}
{"code": "def write(self, file_name):\n    try:\n        assert (file_name[(- 6):] == '.xhtml')\n    except (AssertionError, IndexError):\n        raise ValueError('filename must end with .xhtml')\n    with open(file_name, 'wb') as f:\n        f.write(self.content.encode('utf-8'))", "docstring": "Writes the chapter object to an xhtml file.\n\nArgs:\nfile_name (str): The full name of the xhtml file to save to.", "source": "codesearchnet"}
{"code": "def create_autoscale_setting(access_token, subscription_id, resource_group, setting_name, vmss_name, location, minval, maxval, default, autoscale_rules, notify=None):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/microsoft.insights/autoscaleSettings/', setting_name, '?api-version=', INSIGHTS_API])\n    autoscale_setting = {'location': location}\n    profile = {'name': 'Profile1'}\n    capacity = {'minimum': str(minval)}\n    capacity['maximum'] = str(maxval)\n    capacity['default'] = str(default)\n    profile['capacity'] = capacity\n    profile['rules'] = autoscale_rules\n    profiles = [profile]\n    properties = {'name': setting_name}\n    properties['profiles'] = profiles\n    properties['targetResourceUri'] = ((((('/subscriptions/' + subscription_id) + '/resourceGroups/') + resource_group) + '/providers/Microsoft.Compute/virtualMachineScaleSets/') + vmss_name)\n    properties['enabled'] = True\n    if (notify is not None):\n        notification = {'operation': 'Scale'}\n        email = {'sendToSubscriptionAdministrato': False}\n        email['sendToSubscriptionCoAdministrators'] = False\n        email['customEmails'] = [notify]\n        notification = {'email': email}\n        properties['notifications'] = [notification]\n    autoscale_setting['properties'] = properties\n    body = json.dumps(autoscale_setting)\n    return do_put(endpoint, body, access_token)", "docstring": "Create a new autoscale setting for a scale set.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nsetting_name (str): Name of the autoscale setting.\nvmss_name (str): Name of scale set to apply scale events to.\nlocation (str): Azure data center location. E.g. westus.\nminval (int): Minimum number of VMs.\nmaxval (int): Maximum number of VMs.\ndefault (int): Default VM number when no data available.\nautoscale_rules (list): List of outputs from create_autoscale_rule().\nnotify (str): Optional.\nReturns:\nHTTP response. JSON body of autoscale setting.", "source": "codesearchnet"}
{"code": "def isPortAvailable(port='/dev/ttyUSB0'):\n        \n        isPortAvailable = serial.tools.list_ports.grep(port)\n\n        try:\n            next(isPortAvailable)\n            available = True\n        except StopIteration:\n            available = False\n\n        return available", "docstring": "Checks whether specified port is available.\n\nSource code derived from @lqdev suggestion per #38\n\nArgs:\nport: Serial port location i.e. 'COM1'. Default is /dev/ttyUSB0\n\nReturns:\navailable: Boolean value indicating presence of port", "source": "juraj-google-style"}
{"code": "def export(bundle, force=False, force_restricted=False):\n    if (not ckan):\n        raise EnvironmentError(MISSING_CREDENTIALS_MSG)\n    try:\n        ckan.action.package_create(**_convert_bundle(bundle))\n    except ckanapi.ValidationError:\n        if force:\n            logger.warning('{} dataset already exported, but new export forced. Continue to export dataset stuff.'.format(bundle.dataset))\n        else:\n            raise\n    access = bundle.dataset.config.metadata.about.access\n    if ((access == 'restricted') and force_restricted):\n        access = 'private'\n    assert access, 'CKAN publishing requires access level.'\n    if (access in ('internal', 'controlled', 'restricted', 'census')):\n        raise UnpublishedAccessError('{} dataset can not be published because of {} access.'.format(bundle.dataset.vid, bundle.dataset.config.metadata.about.access))\n    elif (access == 'public'):\n        user_roles = [{'user': 'visitor', 'domain_object': bundle.dataset.vid.lower(), 'roles': ['editor']}, {'user': 'logged_in', 'domain_object': bundle.dataset.vid.lower(), 'roles': ['editor']}]\n    elif (access == 'registered'):\n        user_roles = [{'user': 'visitor', 'domain_object': bundle.dataset.vid.lower(), 'roles': []}, {'user': 'logged_in', 'domain_object': bundle.dataset.vid.lower(), 'roles': ['editor']}]\n    elif (access in ('private', 'licensed', 'test')):\n        user_roles = [{'user': 'visitor', 'domain_object': bundle.dataset.vid.lower(), 'roles': []}, {'user': 'logged_in', 'domain_object': bundle.dataset.vid.lower(), 'roles': []}]\n        organization_users = ckan.action.organization_show(id=CKAN_CONFIG.organization)['users']\n        for user in organization_users:\n            (user_roles.append({'user': user['id'], 'domain_object': bundle.dataset.vid.lower(), 'roles': ['editor']}),)\n    for role in user_roles:\n        ckan.action.user_role_update(**role)\n    for partition in bundle.partitions:\n        ckan.action.resource_create(**_convert_partition(partition))\n    ckan.action.resource_create(**_convert_schema(bundle))\n    for (name, external) in six.iteritems(bundle.dataset.config.metadata.external_documentation):\n        ckan.action.resource_create(**_convert_external(bundle, name, external))", "docstring": "Exports bundle to ckan instance.\n\nArgs:\nbundle (ambry.bundle.Bundle):\nforce (bool, optional): if True, ignore existance error and continue to export.\nforce_restricted (bool, optional): if True, then export restricted bundles as private (for debugging\npurposes).\n\nRaises:\nEnvironmentError: if ckan credentials are missing or invalid.\nUnpublishedAccessError: if dataset has unpublished access - one from ('internal', 'test',\n'controlled', 'restricted', 'census').", "source": "codesearchnet"}
{"code": "def _batch_norm_op(self, input_batch, mean, variance, use_batch_stats, stat_dtype):\n    if self._fused:\n        (batch_norm_op, mean, variance) = self._fused_batch_norm_op(input_batch, self._moving_mean, self._moving_variance, use_batch_stats)\n    else:\n        batch_norm_op = tf.nn.batch_normalization(input_batch, mean, variance, self._beta, self._gamma, self._eps, name='batch_norm')\n        if (input_batch.dtype.base_dtype != stat_dtype):\n            mean = tf.cast(mean, stat_dtype)\n            variance = tf.cast(variance, stat_dtype)\n    return (batch_norm_op, mean, variance)", "docstring": "Creates a batch normalization op.\n\nIt uses the tf.nn.batch_normalization op by default and the\ntf.nn.fused_batch_norm op to support fused batch normalization.\n\nArgs:\ninput_batch: A input Tensor of arbitrary dimension.\nmean: A mean tensor, of the same dtype as `input_batch`.\nvariance: A variance tensor, of the same dtype as `input_batch`.\nuse_batch_stats: A bool value that indicates whether the operation should\nuse the batch statistics.\nstat_dtype: TensorFlow datatype used for the moving mean and variance.\n\nReturns:\nA batch normalization operation.\nThe current mean tensor, of datatype `stat_dtype`.\nThe current variance tensor, of datatype `stat_dtype`.", "source": "codesearchnet"}
{"code": "def output(self, stream, value):\n        \n        if stream not in self.outputs:\n            raise ValueError(\"Stream is not an output of this operator.\")\n        e = self.expression(value)\n        e._stream = stream\n        return e", "docstring": "SPL output port assignment expression.\n\nArguments:\nstream(Stream): Output stream the assignment is for.\nvalue(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.\n\nReturns:\nExpression: Output assignment expression that is valid as a the context of this operator.", "source": "juraj-google-style"}
{"code": "def sentences(self, index = None):\n        \n        if index is None:\n            return self.select(Sentence,None,True,default_ignore_structure)\n        else:\n            if index < 0:\n                index = self.count(Sentence,None,True,default_ignore_structure) + index\n            for i,e in enumerate(self.select(Sentence,None,True,default_ignore_structure)):\n                if i == index:\n                    return e\n            raise IndexError", "docstring": "Returns a generator of Sentence elements found (recursively) under this element\n\nArguments:\nindex (int or None): If set to an integer, will retrieve and return the n'th element (starting at 0) instead of returning a generator of all", "source": "juraj-google-style"}
{"code": "def start(self, input_data, output_data, transform_resources, **kwargs):\n        \n        self.transform_resources = transform_resources\n        self.input_data = input_data\n        self.output_data = output_data\n\n        image = self.primary_container['Image']\n        instance_type = transform_resources['InstanceType']\n        instance_count = 1\n\n        environment = self._get_container_environment(**kwargs)\n\n        \n        self.container = _SageMakerContainer(instance_type, instance_count, image, self.local_session)\n        self.container.serve(self.primary_container['ModelDataUrl'], environment)\n\n        serving_port = get_config_value('local.serving_port', self.local_session.config) or 8080\n        _wait_for_serving_container(serving_port)\n\n        \n        endpoint_url = 'http:\n        response, code = _perform_request(endpoint_url)\n        if code == 200:\n            execution_parameters = json.loads(response.read())\n            \n            for setting in ('BatchStrategy', 'MaxPayloadInMB'):\n                if setting not in kwargs and setting in execution_parameters:\n                    kwargs[setting] = execution_parameters[setting]\n\n        \n        kwargs.update(self._get_required_defaults(**kwargs))\n\n        self.start_time = datetime.datetime.now()\n        self.batch_strategy = kwargs['BatchStrategy']\n        if 'Environment' in kwargs:\n            self.environment = kwargs['Environment']\n\n        \n        self._perform_batch_inference(input_data, output_data, **kwargs)\n        self.end_time = datetime.datetime.now()\n        self.state = self._COMPLETED", "docstring": "Start the Local Transform Job\n\nArgs:\ninput_data (dict): Describes the dataset to be transformed and the location where it is stored.\noutput_data (dict): Identifies the location where to save the results from the transform job\ntransform_resources (dict): compute instances for the transform job. Currently only supports local or\nlocal_gpu\n**kwargs: additional arguments coming from the boto request object", "source": "juraj-google-style"}
{"code": "def add_state(self, state_name, initial_state, batch_size=None):\n    \n    state_shape = initial_state.get_shape().as_list()\n    full_shape = [batch_size] + state_shape\n    if not batch_size:\n      \n      \n      \n      \n      \n      shape_proto = self._as_shape_proto([0] + state_shape)\n      batch_size = 1\n    else:\n      shape_proto = self._as_shape_proto([batch_size] + state_shape)\n\n    \n    \n    \n    tiles = [batch_size] + ([1] * len(initial_state.get_shape()))\n    feed_op = tf.placeholder_with_default(\n        tf.tile(\n            tf.expand_dims(initial_state, [0]), tiles),\n        shape=full_shape,\n        name='%s_feed' % state_name)\n    s = {'feed_op': feed_op,\n         'feed_type': initial_state.dtype,\n         'feed_shape': shape_proto}\n    self._states[state_name] = s", "docstring": "Adds a state to the state saver.\n\nArgs:\nstate_name: The name of this state.\ninitial_state: The initial state vector. Only zeros are supported.\nbatch_size: The batch_size or None for unknown.", "source": "juraj-google-style"}
{"code": "def from_csv(cls, filename=None, text=None):\n    if ((filename is None) and (text is None)):\n        raise LegendError('You must provide a filename or CSV text.')\n    if (filename is not None):\n        with open(filename, 'r') as f:\n            text = f.read()\n    try:\n        f = StringIO(text)\n    except TypeError:\n        f = StringIO(unicode(text))\n    r = csv.DictReader(f, skipinitialspace=True)\n    (list_of_Decors, components) = ([], [])\n    kind = 'component'\n    for row in r:\n        (d, component) = ({}, {})\n        for (k, v) in row.items():\n            if (k in [None, '']):\n                continue\n            if (v in [None, '']):\n                if (k.lower() not in ['color', 'colour']):\n                    continue\n            if (k[:4].lower() == 'comp'):\n                prop = ' '.join(k.split()[1:])\n                if (v.lower() == 'true'):\n                    component[prop] = True\n                elif (v.lower() == 'false'):\n                    component[prop] = False\n                else:\n                    try:\n                        component[prop] = float(v)\n                    except ValueError:\n                        component[prop] = v.lower()\n            elif (k[:5].lower() == 'curve'):\n                prop = ' '.join(k.split()[1:])\n                component[prop] = v.lower()\n                kind = 'curve'\n            else:\n                try:\n                    d[k] = float(v)\n                except ValueError:\n                    d[k] = v.lower()\n        this_component = Component(component)\n        d[kind] = this_component\n        if (this_component in components):\n            with warnings.catch_warnings():\n                warnings.simplefilter('always')\n                w = 'This legend contains duplicate components.'\n                warnings.warn(w)\n        components.append(this_component)\n        list_of_Decors.append(Decor(d))\n    return cls(list_of_Decors)", "docstring": "Read CSV text and generate a Legend.\n\nArgs:\nstring (str): The CSV string.\n\nIn the first row, list the properties. Precede the properties of the\ncomponent with 'comp ' or 'component '. For example:\n\ncolour,  width, comp lithology, comp colour\n#FFFFFF, 0, ,\n#F7E9A6, 3, Sandstone, Grey\n#FF99CC, 2, Anhydrite,\n... etc\n\nNote:\nTo edit a legend, the easiest thing to do is probably this:\n\n- `legend.to_csv()`\n- Edit the legend, call it `new_legend`.\n- `legend = Legend.from_csv(text=new_legend)`", "source": "codesearchnet"}
{"code": "def list_metadata(self, resource):\n    self.metadata_service.set_auth(self._token_metadata)\n    return self.metadata_service.list(resource)", "docstring": "List all keys associated with the given resource.\n\nArgs:\nresource (intern.resource.boss.BossResource)\n\nReturns:\n(list)\n\nRaises:\nrequests.HTTPError on a failure.", "source": "codesearchnet"}
{"code": "def read_molden(inputfile, start_index=0, get_bonds=True):\n    from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian\n    with open(inputfile, 'r') as f:\n        found = False\n        while (not found):\n            line = f.readline()\n            if ('[N_GEO]' in line):\n                found = True\n                number_of_molecules = int(f.readline().strip())\n        energies = []\n        found = False\n        while (not found):\n            line = f.readline()\n            if ('energy' in line):\n                found = True\n                for _ in range(number_of_molecules):\n                    energies.append(float(f.readline().strip()))\n        found = False\n        while (not found):\n            line = f.readline()\n            if ('[GEOMETRIES] (XYZ)' in line):\n                found = True\n                current_line = f.tell()\n                number_of_atoms = int(f.readline().strip())\n                f.seek(current_line)\n        cartesians = []\n        for energy in energies:\n            cartesian = Cartesian.read_xyz(f, start_index=start_index, get_bonds=get_bonds, nrows=number_of_atoms, engine='python')\n            cartesian.metadata['energy'] = energy\n            cartesians.append(cartesian)\n    return cartesians", "docstring": "Read a molden file.\n\nArgs:\ninputfile (str):\nstart_index (int):\n\nReturns:\nlist: A list containing :class:`~chemcoord.Cartesian` is returned.", "source": "codesearchnet"}
{"code": "def do_patch(endpoint, body, access_token):\n    \n    headers = {\"content-type\": \"application/json\", \"Authorization\": 'Bearer ' + access_token}\n    headers['User-Agent'] = get_user_agent()\n    return requests.patch(endpoint, data=body, headers=headers)", "docstring": "Do an HTTP PATCH request and return JSON.\n\nArgs:\nendpoint (str): Azure Resource Manager management endpoint.\nbody (str): JSON body of information to patch.\naccess_token (str): A valid Azure authentication token.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def _new_open_bin(self, remaining_rect):\n    factories_to_delete = set()\n    new_bin = None\n    for (key, binfac) in self._empty_bins.items():\n        a_rectangle_fits = False\n        for (_, rect) in remaining_rect.items():\n            if binfac.fits_inside(rect[0], rect[1]):\n                a_rectangle_fits = True\n                break\n        if (not a_rectangle_fits):\n            factories_to_delete.add(key)\n            continue\n        new_bin = binfac.new_bin()\n        if (new_bin is None):\n            continue\n        self._open_bins.append(new_bin)\n        if binfac.is_empty():\n            factories_to_delete.add(key)\n        break\n    for f in factories_to_delete:\n        del self._empty_bins[f]\n    return new_bin", "docstring": "Extract the next bin where at least one of the rectangles in\nrem\n\nArguments:\nremaining_rect (dict): rectangles not placed yet\n\nReturns:\nPackingAlgorithm: Initialized empty packing bin.\nNone: No bin big enough for the rectangle was found", "source": "codesearchnet"}
{"code": "def exchange(self, pubkey):\n        \n        try:\n            return self.priv.exchange(c_ec.ECDH(), pubkey.publ)\n        except ValueError as e:\n            raise s_exc.BadEccExchange(mesg=str(e))", "docstring": "Perform a ECDH key exchange with a public key.\n\nArgs:\npubkey (PubKey): A PubKey to perform the ECDH with.\n\nReturns:\nbytes: The ECDH bytes. This is deterministic for a given pubkey\nand private key.", "source": "juraj-google-style"}
{"code": "def upload_from_url(cls, url, store=None, filename=None):\n    if (store is None):\n        store = 'auto'\n    elif store:\n        store = '1'\n    else:\n        store = '0'\n    data = {'source_url': url, 'store': store}\n    if filename:\n        data['filename'] = filename\n    result = uploading_request('POST', 'from_url/', data=data)\n    if ('token' not in result):\n        raise APIError('could not find token in result: {0}'.format(result))\n    file_from_url = cls.FileFromUrl(result['token'])\n    return file_from_url", "docstring": "Uploads file from given url and returns ``FileFromUrl`` instance.\n\nArgs:\n- url (str): URL of file to upload to\n- store (Optional[bool]): Should the file be automatically stored\nupon upload. Defaults to None.\n- False - do not store file\n- True - store file (can result in error if autostore\nis disabled for project)\n- None - use project settings\n- filename (Optional[str]): Name of the uploaded file. If this not\nspecified the filename will be obtained from response headers\nor source URL. Defaults to None.\n\nReturns:\n``FileFromUrl`` instance", "source": "codesearchnet"}
{"code": "def _get_resource_view(self, resource_view):\n        \n        \n        if isinstance(resource_view, dict):\n            resource_view = ResourceView(resource_view, configuration=self.configuration)\n        if isinstance(resource_view, ResourceView):\n            return resource_view\n        raise HDXError('Type %s is not a valid resource view!' % type(resource_view).__name__)", "docstring": "Get resource view id\n\nArgs:\nresource_view (Union[ResourceView,Dict]): ResourceView metadata from a ResourceView object or dictionary\n\nReturns:\nResourceView: ResourceView object", "source": "juraj-google-style"}
{"code": "def to_bytesize(value, default_unit=None, base=DEFAULT_BASE):\n    if isinstance(value, (int, float)):\n        return unitized(value, default_unit, base)\n    if (value is None):\n        return None\n    try:\n        if (value[(- 1)].lower() == 'b'):\n            value = value[:(- 1)]\n        unit = value[(- 1):].lower()\n        if unit.isdigit():\n            unit = default_unit\n        else:\n            value = value[:(- 1)]\n        return unitized(to_number(float, value), unit, base)\n    except (IndexError, TypeError, ValueError):\n        return None", "docstring": "Convert `value` to bytes, accepts notations such as \"4k\" to mean 4096 bytes\n\nArgs:\nvalue (str | unicode | int | None): Number of bytes optionally suffixed by a char from UNITS\ndefault_unit (str | unicode | None): Default unit to use for unqualified values\nbase (int): Base to use (usually 1024)\n\nReturns:\n(int | None): Deduced bytesize value, if possible", "source": "codesearchnet"}
{"code": "def read_trailer(self):\n    _logger.debug('Reading chunked trailer.')\n    trailer_data_list = []\n    while True:\n        trailer_data = (yield from self._connection.readline())\n        trailer_data_list.append(trailer_data)\n        if (not trailer_data.strip()):\n            break\n    return b''.join(trailer_data_list)", "docstring": "Read the HTTP trailer fields.\n\nReturns:\nbytes: The trailer data.\n\nCoroutine.", "source": "codesearchnet"}
{"code": "def show(self, *args, **kwargs):\n    plt = self.get_pourbaix_plot(*args, **kwargs)\n    plt.show()", "docstring": "Shows the pourbaix plot\n\nArgs:\n*args: args to get_pourbaix_plot\n**kwargs: kwargs to get_pourbaix_plot\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def random_square_mask(shape, fraction):\n    mask = np.ones(shape)\n    patch_area = ((shape[0] * shape[1]) * fraction)\n    patch_dim = np.int(math.floor(math.sqrt(patch_area)))\n    if ((patch_area == 0) or (patch_dim == 0)):\n        return mask\n    x = np.random.randint((shape[0] - patch_dim))\n    y = np.random.randint((shape[1] - patch_dim))\n    mask[(x:(x + patch_dim), y:(y + patch_dim), :)] = 0\n    return mask", "docstring": "Create a numpy array with specified shape and masked fraction.\n\nArgs:\nshape: tuple, shape of the mask to create.\nfraction: float, fraction of the mask area to populate with `mask_scalar`.\n\nReturns:\nnumpy.array: A numpy array storing the mask.", "source": "codesearchnet"}
{"code": "def non_trainable_variables(self):\n    return tuple(self._flatten(predicate=_is_non_trainable_variable, expand_composites=True))", "docstring": "Sequence of non-trainable variables owned by this module and its submodules.\n\nNote: this method uses reflection to find variables on the current instance\nand submodules. For performance reasons you may wish to cache the result\nof calling this method if you don't expect the return value to change.\n\nReturns:\nA sequence of variables for the current module (sorted by attribute\nname) followed by variables from all submodules recursively (breadth\nfirst).", "source": "github-repos"}
{"code": "def Cancel(self, request, global_params=None):\n    config = self.GetMethodConfig('Cancel')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Requests that a job be cancelled. This call will return immediately, and the client will need to poll for the job status to see if the cancel completed successfully. Cancelled jobs may still incur costs.\n\nArgs:\nrequest: (BigqueryJobsCancelRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(JobCancelResponse) The response message.", "source": "github-repos"}
{"code": "def download_and_extract(path, url, input_filename, target_filename):\n    logging.info(('Downloading and extracting data to: %s' % path))\n    input_file = find_file(path, input_filename)\n    target_file = find_file(path, target_filename)\n    if (input_file and target_file):\n        logging.info(('Already downloaded and extracted %s.' % url))\n        return (input_file, target_file)\n    compressed_file = download_from_url(path, url)\n    logging.info(('Extracting %s.' % compressed_file))\n    with tarfile.open(compressed_file, 'r:gz') as corpus_tar:\n        corpus_tar.extractall(path)\n    input_file = find_file(path, input_filename)\n    target_file = find_file(path, target_filename)\n    if (input_file and target_file):\n        return (input_file, target_file)\n    raise OSError(('Download/extraction failed for url %s to path %s' % (url, path)))", "docstring": "Extract files from downloaded compressed archive file.\n\nArgs:\npath: string directory where the files will be downloaded\nurl: url containing the compressed input and target files\ninput_filename: name of file containing data in source language\ntarget_filename: name of file containing data in target language\n\nReturns:\nFull paths to extracted input and target files.\n\nRaises:\nOSError: if the the download/extraction fails.", "source": "codesearchnet"}
{"code": "def expand_tile(units, axis):\n    \n    assert axis in (1, 2)\n    n_time_steps = tf.shape(units)[1]\n    repetitions = [1, 1, 1, 1]\n    repetitions[axis] = n_time_steps\n    return tf.tile(tf.expand_dims(units, axis), repetitions)", "docstring": "Expand and tile tensor along given axis\nArgs:\nunits: tf tensor with dimensions [batch_size, time_steps, n_input_features]\naxis: axis along which expand and tile. Must be 1 or 2", "source": "juraj-google-style"}
{"code": "def remove_location(self, location):\n    res = self._remove_hdxobject(self.data.get('groups'), location, matchon='name')\n    if (not res):\n        res = self._remove_hdxobject(self.data.get('groups'), location.upper(), matchon='name')\n    if (not res):\n        res = self._remove_hdxobject(self.data.get('groups'), location.lower(), matchon='name')\n    return res", "docstring": "Remove a location. If the location is already added, it is ignored.\n\nArgs:\nlocation (str): Location to remove\n\nReturns:\nbool: True if location removed or False if not", "source": "codesearchnet"}
{"code": "def _add_dns_records(self, conf, mgmts):\n        \n\n        nets = conf['nets']\n        dns_mgmt = mgmts[-1]\n        LOGGER.debug('Using network %s as main DNS server', dns_mgmt)\n        forward = conf['nets'][dns_mgmt].get('gw')\n        dns_records = {}\n        for net_name, net_spec in nets.iteritems():\n            dns_records.update(net_spec['mapping'].copy())\n            if net_name not in mgmts:\n                net_spec['dns_forward'] = forward\n\n        for mgmt in mgmts:\n            if nets[mgmt].get('dns_records'):\n                nets[mgmt]['dns_records'].update(dns_records)\n            else:\n                nets[mgmt]['dns_records'] = dns_records", "docstring": "Add DNS records dict('dns_records') to ``conf`` for each\nmanagement network. Add DNS forwarder IP('dns_forward') for each none\nmanagement network.\n\n\nArgs:\nconf(spec): spec\nmgmts(list): management networks names\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def get_answers_for_student(student_item):\n    submissions = sub_api.get_submissions(student_item)\n    if (not submissions):\n        return Answers()\n    latest_submission = submissions[0]\n    latest_answer_item = latest_submission.get('answer', {})\n    return Answers(latest_answer_item.get(ANSWER_LIST_KEY, []))", "docstring": "Retrieve answers from backend for a student and question\n\nArgs:\nstudent_item (dict): The location of the problem this submission is\nassociated with, as defined by a course, student, and item.\n\nReturns:\nAnswers: answers for the student", "source": "codesearchnet"}
{"code": "def _validate_recurse_directive_types(current_schema_type, field_schema_type, context):\n    type_hints = context['type_equivalence_hints'].get(field_schema_type)\n    type_hints_inverse = context['type_equivalence_hints_inverse'].get(field_schema_type)\n    allowed_current_types = {field_schema_type}\n    if (type_hints and isinstance(type_hints, GraphQLUnionType)):\n        allowed_current_types.update(type_hints.types)\n    if (type_hints_inverse and isinstance(type_hints_inverse, GraphQLUnionType)):\n        allowed_current_types.update(type_hints_inverse.types)\n    current_scope_is_allowed = (current_schema_type in allowed_current_types)\n    is_implemented_interface = (isinstance(field_schema_type, GraphQLInterfaceType) and isinstance(current_schema_type, GraphQLObjectType) and (field_schema_type in current_schema_type.interfaces))\n    if (not any((current_scope_is_allowed, is_implemented_interface))):\n        raise GraphQLCompilationError(u'Edges expanded with a @recurse directive must either be of the same type as their enclosing scope, a supertype of the enclosing scope, or be of an interface type that is implemented by the type of their enclosing scope. Enclosing scope type: {}, edge type: {}'.format(current_schema_type, field_schema_type))", "docstring": "Perform type checks on the enclosing type and the recursed type for a recurse directive.\n\nArgs:\ncurrent_schema_type: GraphQLType, the schema type at the current location\nfield_schema_type: GraphQLType, the schema type at the inner scope\ncontext: dict, various per-compilation data (e.g. declared tags, whether the current block\nis optional, etc.). May be mutated in-place in this function!", "source": "codesearchnet"}
{"code": "def output_shapes(self):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), self._element_spec)", "docstring": "Returns the shape of each component of an element of this iterator.\n\nReturns:\nA (nested) structure of `tf.TensorShape` objects corresponding to each\ncomponent of an element of this dataset.", "source": "github-repos"}
{"code": "def subcomponents(self, subcomponents):\n    for arg in self.args:\n        if (arg.__class__.__name__ == 'Function'):\n            subcomponents.append(arg.to_string())\n            if (arg.function_type == 'primary'):\n                arg.subcomponents(subcomponents)\n        else:\n            subcomponents.append(arg.to_string())\n    return subcomponents", "docstring": "Generate subcomponents of the BEL subject or object\n\nThese subcomponents are used for matching parts of a BEL\nsubject or Object in the Edgestore.\n\nArgs:\nAST\nsubcomponents:  Pass an empty list to start a new subcomponents request\n\nReturns:\nList[str]: subcomponents of BEL subject or object", "source": "codesearchnet"}
{"code": "def logistic(x: Union[float, np.ndarray],\n             k: float,\n             theta: float) -> Optional[float]:\n    r\n    \n    if x is None or k is None or theta is None:\n        return None\n    \n    return 1 / (1 + np.exp(-k * (x - theta)))", "docstring": "r\"\"\"\nStandard logistic function.\n\n.. math::\n\ny = \\frac {1} {1 + e^{-k (x - \\theta)}}\n\nArgs:\nx: :math:`x`\nk: :math:`k`\ntheta: :math:`\\theta`\n\nReturns:\n:math:`y`", "source": "juraj-google-style"}
{"code": "def create(self, document_data):\n    batch = self._client.batch()\n    batch.create(self, document_data)\n    write_results = batch.commit()\n    return _first_write_result(write_results)", "docstring": "Create the current document in the Firestore database.\n\nArgs:\ndocument_data (dict): Property names and values to use for\ncreating a document.\n\nReturns:\ngoogle.cloud.firestore_v1beta1.types.WriteResult: The\nwrite result corresponding to the committed document. A write\nresult contains an ``update_time`` field.\n\nRaises:\n~google.cloud.exceptions.Conflict: If the document already exists.", "source": "codesearchnet"}
{"code": "def getall(self):\n    matches = ROUTES_RE.findall(self.config)\n    routes = dict()\n    for match in matches:\n        ip_dest = match[0]\n        next_hop = match[1]\n        next_hop_ip = (None if (match[2] is '') else match[2])\n        distance = int(match[3])\n        data = {}\n        data['tag'] = (None if (match[4] is '') else int(match[4]))\n        data['route_name'] = (None if (match[5] is '') else match[5])\n        ip_dict = routes[ip_dest] = routes.get(ip_dest, {})\n        nh_dict = ip_dict[next_hop] = ip_dict.get(next_hop, {})\n        nhip_dict = nh_dict[next_hop_ip] = nh_dict.get(next_hop_ip, {})\n        nhip_dict[distance] = data\n    return routes", "docstring": "Return all ip routes configured on the switch as a resource dict\n\nReturns:\ndict: An dict object of static route entries in the form::\n\n{ ip_dest:\n{ next_hop:\n{ next_hop_ip:\n{ distance:\n{ 'tag': tag,\n'route_name': route_name\n}\n}\n}\n}\n}\n\nIf the ip address specified does not have any associated\nstatic routes, then None is returned.\n\nNotes:\nThe keys ip_dest, next_hop, next_hop_ip, and distance in\nthe returned dictionary are the values of those components\nof the ip route specification. If a route does not contain\na next_hop_ip, then that key value will be set as 'None'.", "source": "codesearchnet"}
{"code": "def transformer_latent_decoder(x, encoder_output, ed_attention_bias, hparams, name=None):\n    with tf.variable_scope(name, default_name='transformer_latent_dec'):\n        batch_size = common_layers.shape_list(x)[0]\n        compressed_img_len = (hparams.img_len \n        x = tf.reshape(x, [batch_size, compressed_img_len, (compressed_img_len * hparams.num_latents), hparams.hidden_size])\n        (decoder_input, _, _) = cia.prepare_decoder(x, hparams)\n        decoder_output = cia.transformer_decoder_layers(decoder_input, encoder_output, (hparams.num_latent_layers or hparams.num_hidden_layers), hparams, attention_type=hparams.latent_attention_type, encoder_decoder_attention_bias=ed_attention_bias, name='decoder')\n        decoder_output = tf.reshape(decoder_output, [batch_size, ((compressed_img_len ** 2) * hparams.num_latents), hparams.hidden_size])\n        return decoder_output", "docstring": "Transformer decoder over latents using latent_attention_type.\n\nArgs:\nx: Tensor of shape [batch, length_q, hparams.hidden_size]. length_q is the\nlatent length, which is\nheight * width * hparams.num_latents / (2**hparams.num_compress_steps).\nencoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size].\ned_attention_bias: Tensor which broadcasts with shape [batch,\nhparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias.\nhparams: HParams.\nname: string, variable scope.\n\nReturns:\nTensor of shape [batch, length_q, hparams.hidden_size].", "source": "codesearchnet"}
{"code": "def _get_row_fetcher(self, start_row=0, max_rows=None, page_size=_DEFAULT_PAGE_SIZE):\n    if (not start_row):\n        start_row = 0\n    elif (start_row < 0):\n        if (self.length >= 0):\n            start_row += self.length\n        else:\n            raise Exception('Cannot use negative indices for table of unknown length')\n    schema = self.schema._bq_schema\n    name_parts = self._name_parts\n\n    def _retrieve_rows(page_token, count):\n        page_rows = []\n        if (max_rows and (count >= max_rows)):\n            page_token = None\n        else:\n            if (max_rows and (page_size > (max_rows - count))):\n                max_results = (max_rows - count)\n            else:\n                max_results = page_size\n            try:\n                if page_token:\n                    response = self._api.tabledata_list(name_parts, page_token=page_token, max_results=max_results)\n                else:\n                    response = self._api.tabledata_list(name_parts, start_index=start_row, max_results=max_results)\n            except Exception as e:\n                raise e\n            page_token = (response['pageToken'] if ('pageToken' in response) else None)\n            if ('rows' in response):\n                page_rows = response['rows']\n        rows = []\n        for row_dict in page_rows:\n            rows.append(_parser.Parser.parse_row(schema, row_dict))\n        return (rows, page_token)\n    return _retrieve_rows", "docstring": "Get a function that can retrieve a page of rows.\n\nThe function returned is a closure so that it can have a signature suitable for use\nby Iterator.\n\nArgs:\nstart_row: the row to start fetching from; default 0.\nmax_rows: the maximum number of rows to fetch (across all calls, not per-call). Default\nis None which means no limit.\npage_size: the maximum number of results to fetch per page; default 1024.\nReturns:\nA function that can be called repeatedly with a page token and running count, and that\nwill return an array of rows and a next page token; when the returned page token is None\nthe fetch is complete.", "source": "codesearchnet"}
{"code": "def add_key_value(self, key, value):\n        \n        key = self._metadata_map.get(key, key)\n        if key in ['dateAdded', 'lastModified']:\n            self._data[key] = self._utils.format_datetime(value, date_format='%Y-%m-%dT%H:%M:%SZ')\n        elif key == 'confidence':\n            self._data[key] = int(value)\n        elif key == 'rating':\n            self._data[key] = float(value)\n        elif key == 'unique_id':\n            self._unique_id = quote_plus(value)\n        else:\n            self._data[key] = value", "docstring": "Converts the value and adds it as a data field.\n\nArgs:\nkey:\nvalue:", "source": "juraj-google-style"}
{"code": "def has_attribute(self, attribute: str) -> bool:\n        \n        return any([\n            key_node.value == attribute for key_node, _ in self.yaml_node.value\n        ])", "docstring": "Whether the node has an attribute with the given name.\n\nUse only if is_mapping() returns True.\n\nArgs:\nattribute: The name of the attribute to check for.\n\nReturns:\nTrue iff the attribute is present.", "source": "juraj-google-style"}
{"code": "def _gal2idx(self, gal):\n    l = coordinates.Longitude(gal.l, wrap_angle=(180.0 * units.deg))\n    j = (self._inv_pix_scale * (l.deg - self._l_bounds[0])).astype('i4')\n    k = (self._inv_pix_scale * (gal.b.deg - self._b_bounds[0])).astype('i4')\n    idx = ((((j < 0) | (j >= self._shape[0])) | (k < 0)) | (k >= self._shape[1]))\n    if np.any(idx):\n        j[idx] = (- 1)\n        k[idx] = (- 1)\n    return (j, k, (~ idx))", "docstring": "Converts from Galactic coordinates to pixel indices.\n\nArgs:\ngal (:obj:`astropy.coordinates.SkyCoord`): Galactic coordinates. Must\nstore an array of coordinates (i.e., not be scalar).\n\nReturns:\n``j, k, mask`` - Pixel indices of the coordinates, as well as a mask\nof in-bounds coordinates. Outputs have the same shape as the input\ncoordinates.", "source": "codesearchnet"}
{"code": "def deleted(self, main_type, sub_type, deleted_since, owner=None, filters=None, params=None):\n        \n        params = params or {}\n\n        if filters and filters.filters:\n            params['filters'] = filters.filters_string\n        if owner:\n            params['owner'] = owner\n        if deleted_since:\n            params['deleteSince'] = deleted_since\n\n        if not sub_type:\n            url = '/v2/{}/deleted'.format(main_type)\n        else:\n            url = '/v2/{}/{}/deleted'.format(main_type, sub_type)\n\n        return self.tcex.session.get(url, params=params)", "docstring": "Args:\nowner:\nfilters:\nmain_type:\nsub_type:\ndeleted_since:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def raw_filter(self, filters):\n    return SearchResult(self, self._api.get(self._href, **{'filter[]': filters}))", "docstring": "Sends all filters to the API.\n\nNo fancy, just a wrapper. Any advanced functionality shall be implemented as another method.\n\nArgs:\nfilters: List of filters (strings)\n\nReturns: :py:class:`SearchResult`", "source": "codesearchnet"}
{"code": "def get_instances(serials):\n    _validate_device_existence(serials)\n    results = []\n    for s in serials:\n        results.append(AndroidDevice(s))\n    return results", "docstring": "Create AndroidDevice instances from a list of serials.\n\nArgs:\nserials: A list of android device serials.\n\nReturns:\nA list of AndroidDevice objects.", "source": "github-repos"}
{"code": "def Extract(self, components):\n    \n\n    rundll_index = -1\n    for index, component in enumerate(components):\n      if component.lower().endswith(\"rundll32.exe\"):\n        rundll_index = index\n\n    if rundll_index == -1:\n      return []\n\n    components = components[(rundll_index + 1):]\n\n    \n    \n    \n    last_component = components[-1].rsplit(\",\", 1)[0]\n\n    extracted_path = \" \".join(components[0:-1] + [last_component])\n    return [extracted_path]", "docstring": "Extracts interesting paths from a given path.\n\nArgs:\ncomponents: Source string represented as a list of components.\n\nReturns:\nA list of extracted paths (as strings).", "source": "juraj-google-style"}
{"code": "def company(self, **kwargs):\n        \n        path = self._get_path('company')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Search for companies by name.\n\nArgs:\nquery: CGI escpaed string.\npage: (optional) Minimum value of 1. Expected value is an integer.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(Certificate, self).read(istream, kmip_version=kmip_version)\n    tstream = BytearrayStream(istream.read(self.length))\n    self.certificate_type = CertificateType()\n    self.certificate_value = CertificateValue()\n    self.certificate_type.read(tstream, kmip_version=kmip_version)\n    self.certificate_value.read(tstream, kmip_version=kmip_version)\n    self.is_oversized(tstream)", "docstring": "Read the data encoding the Certificate object and decode it into its\nconstituent parts.\n\nArgs:\nistream (Stream): A data stream containing encoded object data,\nsupporting a read method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def deconv_elems_1d(x, factor, out_depth=None):\n  \n  out_depth = out_depth or x.get_shape().as_list()[-1]\n  x = tf.expand_dims(x, 1)  \n  x = layers().Conv2DTranspose(\n      filters=out_depth,\n      kernel_size=(1, factor),\n      strides=(1, factor),\n      padding=\"valid\",\n      data_format=\"channels_last\",\n  )(x)  \n  x = tf.squeeze(x, 1)  \n  return x", "docstring": "Increase the length and change the dimensionality.\n\nExpand/project each positions of dim depth of the input into\nfactor*tokens of dim out_depth\n\nArgs:\nx (tf.Tensor): shape [batch_size, length, depth]\nfactor (int): Multiplicative factor of each tokens.\nout_depth (int): Output depth (if None, keep depth constant)\n\nReturns:\ntf.Tensor: shape [batch_size, length*factor, out_depth]", "source": "juraj-google-style"}
{"code": "def conv_output_length(input_length, filter_size, padding, stride, dilation=1):\n    if input_length is None:\n        return None\n    assert padding in {'same', 'valid', 'full'}\n    dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)\n    if padding == 'same':\n        output_length = input_length\n    elif padding == 'valid':\n        output_length = input_length - dilated_filter_size + 1\n    elif padding == 'full':\n        output_length = input_length + dilated_filter_size - 1\n    return (output_length + stride - 1)", "docstring": "Determines output length of a convolution given input length.\n\nArgs:\ninput_length: integer.\nfilter_size: integer.\npadding: one of \"same\", \"valid\", \"full\".\nstride: integer.\ndilation: dilation rate, integer.\n\nReturns:\nThe output length (integer).", "source": "github-repos"}
{"code": "def wind_direction(self, value=999.0):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `wind_direction`'.format(value))\n            if value < 0.0:\n                raise ValueError('value need to be greater or equal 0.0 '\n                                 'for field `wind_direction`')\n            if value > 360.0:\n                raise ValueError('value need to be smaller 360.0 '\n                                 'for field `wind_direction`')\n\n        self._wind_direction = value", "docstring": "Corresponds to IDD Field `wind_direction`\n\nArgs:\nvalue (float): value for IDD Field `wind_direction`\nUnit: degrees\nvalue >= 0.0\nvalue <= 360.0\nMissing value: 999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "class Sum(Metric):\n\n    def __init__(self, name='sum', dtype=None):\n        super().__init__(name=name, dtype=dtype)\n        self.total = self.add_variable(shape=(), initializer=initializers.Zeros(), dtype=self.dtype, name='total')\n\n    def update_state(self, values, sample_weight=None):\n        values, _ = reduce_to_samplewise_values(values, sample_weight, reduce_fn=ops.sum, dtype=self.dtype)\n        self.total.assign_add(ops.sum(values))\n\n    def reset_state(self):\n        self.total.assign(0)\n\n    def result(self):\n        return ops.cast(self.total, self.dtype)", "docstring": "Compute the (weighted) sum of the given values.\n\nFor example, if `values` is `[1, 3, 5, 7]` then their sum is 16.\nIf `sample_weight` was specified as `[1, 1, 0, 0]` then the sum would be 4.\n\nThis metric creates one variable, `total`.\nThis is ultimately returned as the sum value.\n\nArgs:\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.\n\nExample:\n\n>>> m = metrics.Sum()\n>>> m.update_state([1, 3, 5, 7])\n>>> m.result()\n16.0\n\n>>> m = metrics.Sum()\n>>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])\n>>> m.result()\n4.0", "source": "github-repos"}
{"code": "def to_grid_locator(latitude, longitude, precision='square'):\n    \n    if precision not in ('square', 'subsquare', 'extsquare'):\n        raise ValueError('Unsupported precision value %r' % precision)\n\n    if not -90 <= latitude <= 90:\n        raise ValueError('Invalid latitude value %r' % latitude)\n    if not -180 <= longitude <= 180:\n        raise ValueError('Invalid longitude value %r' % longitude)\n\n    latitude += 90.0\n    longitude += 180.0\n\n    locator = []\n\n    field = int(longitude / LONGITUDE_FIELD)\n    locator.append(chr(field + 65))\n    longitude -= field * LONGITUDE_FIELD\n\n    field = int(latitude / LATITUDE_FIELD)\n    locator.append(chr(field + 65))\n    latitude -= field * LATITUDE_FIELD\n\n    square = int(longitude / LONGITUDE_SQUARE)\n    locator.append(str(square))\n    longitude -= square * LONGITUDE_SQUARE\n\n    square = int(latitude / LATITUDE_SQUARE)\n    locator.append(str(square))\n    latitude -= square * LATITUDE_SQUARE\n\n    if precision in ('subsquare', 'extsquare'):\n        subsquare = int(longitude / LONGITUDE_SUBSQUARE)\n        locator.append(chr(subsquare + 97))\n        longitude -= subsquare * LONGITUDE_SUBSQUARE\n\n        subsquare = int(latitude / LATITUDE_SUBSQUARE)\n        locator.append(chr(subsquare + 97))\n        latitude -= subsquare * LATITUDE_SUBSQUARE\n\n    if precision == 'extsquare':\n        extsquare = int(longitude / LONGITUDE_EXTSQUARE)\n        locator.append(str(extsquare))\n\n        extsquare = int(latitude / LATITUDE_EXTSQUARE)\n        locator.append(str(extsquare))\n\n    return ''.join(locator)", "docstring": "Calculate Maidenhead locator from latitude and longitude.\n\nArgs:\nlatitude (float): Position's latitude\nlongitude (float): Position's longitude\nprecision (str): Precision with which generate locator string\n\nReturns:\nstr: Maidenhead locator for latitude and longitude\n\nRaise:\nValueError: Invalid precision identifier\nValueError: Invalid latitude or longitude value", "source": "juraj-google-style"}
{"code": "def __init__(self, pyof_class, items=None):\n        \n        self._pyof_class = pyof_class\n        super().__init__(items)", "docstring": "Create a FixedTypeList with the parameters follows.\n\nArgs:\npyof_class (:obj:`type`): Class of the items to be stored.\nitems (iterable, ``pyof_class``): Items to be stored.", "source": "juraj-google-style"}
{"code": "def run_build_model(self, num_runs=5, silent=False, force_rerun=False):\n    self.mutation_ddG_avg_outfile = 'Average_{}.fxout'.format(op.splitext(self.repaired_pdb_outfile)[0])\n    self.mutation_ddG_raw_outfile = 'Raw_{}.fxout'.format(op.splitext(self.repaired_pdb_outfile)[0])\n    foldx_build_model = 'foldx --command=BuildModel --pdb={} --mutant-file={} --numberOfRuns={}'.format(self.repaired_pdb_outfile, op.basename(self.mutation_infile), num_runs)\n    ssbio.utils.command_runner(shell_command=foldx_build_model, force_rerun_flag=force_rerun, silent=silent, outfile_checker=self.mutation_ddG_avg_outfile, cwd=self.foldx_dir)", "docstring": "Run FoldX BuildModel command with a mutant file input.\n\nOriginal command::\n\nfoldx --command=BuildModel --pdb=4bxi_Repair.pdb --mutant-file=individual_list.txt --numberOfRuns=5\n\nArgs:\nnum_runs (int):\nsilent (bool): If FoldX output should be silenced from printing to the shell.\nforce_rerun (bool): If FoldX BuildModel should be rerun even if the results file exists.", "source": "codesearchnet"}
{"code": "def replace(template, **replacements):\n    if not isinstance(template, str):\n        raise ValueError('Expected string template, got %s' % type(template))\n    for k in replacements:\n        replacements[k] = _convert_to_ast(replacements[k])\n    template_str = parser.STANDARD_PREAMBLE + textwrap.dedent(template)\n    nodes = parser.parse(template_str, preamble_len=parser.STANDARD_PREAMBLE_LEN, single_node=False)\n    results = []\n    for node in nodes:\n        node = ReplaceTransformer(replacements).visit(node)\n        if isinstance(node, (list, tuple)):\n            results.extend(node)\n        else:\n            results.append(node)\n    results = [qual_names.resolve(r) for r in results]\n    return results", "docstring": "Replaces placeholders in a Python template.\n\nAST Name and Tuple nodes always receive the context that inferred from\nthe template. However, when replacing more complex nodes (that can potentially\ncontain Name children), then the caller is responsible for setting the\nappropriate context.\n\nArgs:\ntemplate: A string representing Python code. Any symbol name can be used\nthat appears in the template code can be used as placeholder.\n**replacements: A mapping from placeholder names to (lists of) AST nodes\nthat these placeholders will be replaced by. String values are also\nsupported as a shorthand for AST Name nodes with the respective ID.\n\nReturns:\nAn AST node or list of AST nodes with the replacements made. If the\ntemplate was a function, a list will be returned. If the template was a\nnode, the same node will be returned. If the template was a string, an\nAST node will be returned (a `Module` node in the case of a multi-line\nstring, an `Expr` node otherwise).\n\nRaises:\nValueError: if the arguments are incorrect.", "source": "github-repos"}
{"code": "def Get(self, request, global_params=None):\n    config = self.GetMethodConfig('Get')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.\n\nArgs:\nrequest: (CloudbuildOperationsGetRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def FromDictionary(cls, msg_dict):\n        \n\n        level = msg_dict.get('level')\n        msg = msg_dict.get('message')\n        now = msg_dict.get('now_time')\n        created = msg_dict.get('created_time')\n        count = msg_dict.get('count', 1)\n        msg_id = msg_dict.get('id', 0)\n\n        new_msg = ServiceMessage(level, msg, msg_id, created, now)\n        if count > 1:\n            new_msg.count = count\n\n        return new_msg", "docstring": "Create from a dictionary with kv pairs.\n\nArgs:\nmsg_dict (dict): A dictionary with information as created by to_dict()\n\nReturns:\nServiceMessage: the converted message", "source": "juraj-google-style"}
{"code": "def get_symmetrized_structure(self):\n    ds = self.get_symmetry_dataset()\n    sg = SpacegroupOperations(self.get_space_group_symbol(), self.get_space_group_number(), self.get_symmetry_operations())\n    return SymmetrizedStructure(self._structure, sg, ds['equivalent_atoms'], ds['wyckoffs'])", "docstring": "Get a symmetrized structure. A symmetrized structure is one where the\nsites have been grouped into symmetrically equivalent groups.\n\nReturns:\n:class:`pymatgen.symmetry.structure.SymmetrizedStructure` object.", "source": "codesearchnet"}
{"code": "def score_cosine(self, term1, term2, **kwargs):\n\n        \n\n        t1_kde = self.kde(term1, **kwargs)\n        t2_kde = self.kde(term2, **kwargs)\n\n        return 1-distance.cosine(t1_kde, t2_kde)", "docstring": "Compute a weighting score based on the cosine distance between the\nkernel density estimates of two terms.\n\nArgs:\nterm1 (str)\nterm2 (str)\n\nReturns: float", "source": "juraj-google-style"}
{"code": "def Open(self, path, ascii_codepage='cp1252'):\n    \n    path_specification = self._path_resolver.ResolvePath(path)\n    if path_specification is None:\n      return None\n\n    return self._OpenPathSpec(path_specification)", "docstring": "Opens the Windows Registry file specified by the path.\n\nArgs:\npath (str): path of the Windows Registry file.\nascii_codepage (Optional[str]): ASCII string codepage.\n\nReturns:\nWinRegistryFile: Windows Registry file or None.", "source": "juraj-google-style"}
{"code": "def _ScanFileSystemForWindowsDirectory(self, path_resolver):\n    result = False\n    for windows_path in self._WINDOWS_DIRECTORIES:\n        windows_path_spec = path_resolver.ResolvePath(windows_path)\n        result = (windows_path_spec is not None)\n        if result:\n            self._windows_directory = windows_path\n            break\n    return result", "docstring": "Scans a file system for a known Windows directory.\n\nArgs:\npath_resolver (WindowsPathResolver): Windows path resolver.\n\nReturns:\nbool: True if a known Windows directory was found.", "source": "codesearchnet"}
{"code": "def _encode_constraints(self, builder: expressions.Builder, element_definition: ElementDefinition) -> List[validation_pb2.SqlRequirement]:\n    result: List[validation_pb2.SqlRequirement] = []\n    constraints: List[Constraint] = cast(Any, element_definition).constraint\n    root_constraints: List[Constraint] = []\n    if isinstance(builder.return_type, _fhir_path_data_types.StructureDataType):\n        root_constraints = cast(Any, builder.return_type.root_element_definition).constraint\n    dedup_constraint_keys: Set[str] = set()\n    for constraint in itertools.chain(constraints, root_constraints):\n        constraint_key: str = cast(Any, constraint).key.value\n        if constraint_key in dedup_constraint_keys:\n            continue\n        dedup_constraint_keys.add(constraint_key)\n        if constraint_key in self._options.skip_keys:\n            continue\n        fhir_path_expression: str = cast(Any, constraint).expression.value\n        if '%resource' in fhir_path_expression or 'comparator' in fhir_path_expression:\n            continue\n        element_definition_path = self._abs_path_invocation(builder)\n        constraint_key_column_name: str = _key_to_sql_column_name(constraint_key)\n        column_name_base: str = _path_to_sql_column_name(element_definition_path)\n        column_name = f'{column_name_base}_{constraint_key_column_name}'\n        if column_name in self._requirement_column_names:\n            self._error_reporter.report_fhir_path_error(element_definition_path, fhir_path_expression, f'Duplicate FHIRPath requirement: {column_name}.')\n            continue\n        if cast(Any, constraint).severity.value == 0:\n            self._error_reporter.report_fhir_path_error(element_definition_path, fhir_path_expression, 'Constraint severity must be set.')\n            continue\n        if self._options.expr_replace_list:\n            for replacement in self._options.expr_replace_list.replacement:\n                if (not replacement.element_path or replacement.element_path == element_definition_path) and replacement.expression_to_replace == fhir_path_expression:\n                    fhir_path_expression = replacement.replacement_expression\n        struct_def = cast(_fhir_path_data_types.StructureDataType, builder.get_root_builder().return_type)\n        result_constraint = self._encode_fhir_path_constraint(struct_def, fhir_path_expression, builder)\n        if result_constraint is None:\n            continue\n        if any((node.return_type.url in ('http:\n            self._error_reporter.report_fhir_path_error(self._abs_path_invocation(builder), result_constraint.builder.fhir_path, 'Constraints involving Extension or Resource fields are not supported. Unable to enforce this constraint because it references a field with an unsupported \"Extension\" or \"Resource\" type field which is not included in the database schema.')\n            continue\n        type_ = validation_pb2.ValidationType.VALIDATION_TYPE_FHIR_PATH_CONSTRAINT\n        severity = cast(Any, constraint).severity\n        severity_value_field = severity.DESCRIPTOR.fields_by_name.get('value')\n        severity_str = codes.enum_value_descriptor_to_code_string(severity_value_field.enum_type.values_by_number[severity.value])\n        try:\n            validation_severity = validation_pb2.ValidationSeverity.Value(f'SEVERITY_{severity_str.upper()}')\n        except ValueError:\n            self._error_reporter.report_fhir_path_warning(element_definition_path, fhir_path_expression, f'Unknown validation severity conversion: {severity_str}.')\n            validation_severity = validation_pb2.ValidationSeverity.SEVERITY_WARNING\n        requirement = validation_pb2.SqlRequirement(column_name=column_name, sql_expression=result_constraint.sql, fhir_path_sql_expression=result_constraint.fhir_path_sql, severity=validation_severity, type=type_, element_path=element_definition_path, description=cast(Any, constraint).human.value, fhir_path_key=constraint_key, fhir_path_expression=result_constraint.builder.fhir_path, fields_referenced_by_expression=sorted(result_constraint.builder.node.find_paths_referenced()))\n        self._requirement_column_names.add(column_name)\n        result.append(requirement)\n    return result", "docstring": "Returns a list of `SqlRequirement`s for FHIRPath constraints.\n\nArgs:\nbuilder: The builder containing the element to encode constraints for.\nelement_definition: Element definition passed from the parent.\n\nReturns:\nA list of `SqlRequirement`s expressing FHIRPath constraints defined on the\n`element_definition` and `builder` if applicable.", "source": "github-repos"}
{"code": "def create(self, uri, local_path):\n        \n        matches = self.schema_pattern.search(uri)\n\n        if not matches:\n            logger.error(\"Unknown uri schema: '%s'. Added schemas: %s\", uri, list(self.handlers.keys()))\n            return None\n\n        schema = matches.group(1)\n        url = matches.group(2)\n\n        return self.handlers[schema](url, local_path)", "docstring": "Create a project handler\n\nArgs:\nuri (str): schema://something formatted uri\nlocal_path (str): the project configs directory\n\nReturn:\nProjectHandler derived class instance", "source": "juraj-google-style"}
{"code": "def _SanitizedArgSpec(obj):\n    output_string = ''\n    unsanitized_arg_spec = tf_inspect.getargspec(obj)\n    for clean_attr in ('args', 'varargs', 'keywords'):\n        output_string += '%s=%s, ' % (clean_attr, getattr(unsanitized_arg_spec, clean_attr))\n    if unsanitized_arg_spec.defaults:\n        sanitized_defaults = []\n        for val in unsanitized_arg_spec.defaults:\n            str_val = str(val)\n            if ' at 0x' in str_val:\n                sanitized_defaults.append('%s instance>' % str_val.split(' at ')[0])\n            else:\n                sanitized_defaults.append(str_val)\n        output_string += 'defaults=%s, ' % sanitized_defaults\n    else:\n        output_string += 'defaults=None'\n    return output_string", "docstring": "Get an ArgSpec string that is free of addresses.\n\nWe have callables as function arg defaults. This results in addresses in\ngetargspec output. This function returns a sanitized string list of base\nclasses.\n\nArgs:\nobj: A python routine for us the create the sanitized arspec of.\n\nReturns:\nstring, a string representation of the argspec.", "source": "github-repos"}
{"code": "def timTuVi(cuc, ngaySinhAmLich):\n    \n    cungDan = 3  \n    cucBanDau = cuc\n    if cuc not in [2, 3, 4, 5, 6]:  \n        raise Exception(\"Số cục phải là 2, 3, 4, 5, 6\")\n    while cuc < ngaySinhAmLich:\n        cuc += cucBanDau\n        cungDan += 1  \n    saiLech = cuc - ngaySinhAmLich\n    if saiLech % 2 is 1:\n        saiLech = -saiLech  \n    return dichCung(cungDan, saiLech)", "docstring": "Tìm vị trí của sao Tử vi\n\nArgs:\ncuc (TYPE): Description\nngaySinhAmLich (TYPE): Description\n\nReturns:\nTYPE: Description\n\nRaises:\nException: Description", "source": "juraj-google-style"}
{"code": "def filter_lines(code, line_spec):\n    code_lines = code.splitlines()\n    line_specs = [line_denom.strip() for line_denom in line_spec.split(',')]\n    single_lines = set(map(int, filter((lambda line: ('-' not in line)), line_specs)))\n    line_ranges = set(filter((lambda line: ('-' in line)), line_specs))\n    for line_range in line_ranges:\n        (begin, end) = line_range.split('-')\n        if (not begin):\n            begin = 1\n        if (not end):\n            end = len(code_lines)\n        single_lines.update(range(int(begin), (int(end) + 1)))\n    keep_lines = []\n    for (line_number, line) in enumerate(code_lines, 1):\n        if (line_number in single_lines):\n            keep_lines.append(line)\n    return '\\n'.join(keep_lines)", "docstring": "Removes all lines not matching the line_spec.\n\nArgs:\ncode The code to filter\nline_spec The line specification. This should be a comma-separated\nstring of lines or line ranges, e.g. 1,2,5-12,15\nIf a line range starts with -, all lines up to this line are\nincluded.\nIf a line range ends with -, all lines from this line on are\nincluded.\nAll lines mentioned (ranges are inclusive) are used.\nReturns:\nOnly the specified lines.", "source": "codesearchnet"}
{"code": "def post_create_app(cls, app, **settings):\n    super(MarshmallowAwareApp, cls).post_create_app(app, **settings)\n    marsh.init_app(app)\n    return app", "docstring": "Automatically register and init the Flask Marshmallow extension.\n\nArgs:\napp (flask.Flask): The application instance in which to initialize\nFlask Marshmallow upon.\n\nKwargs:\nsettings (dict): The settings passed to this method from the\nparent app.\n\nReturns:\nflask.Flask: The Flask application that was passed in.", "source": "codesearchnet"}
{"code": "def _ln_rnn(x, gamma, beta):\n    r\n    \n    mean, variance = tf.nn.moments(x, axes=[len(x.get_shape()) - 1], keep_dims=True)\n\n    \n    x = (x - mean) / tf.sqrt(variance + tf.sg_eps)\n\n    \n    return gamma * x + beta", "docstring": "r\"\"\"Applies layer normalization.\nNormalizes the last dimension of the tensor `x`.\n\nArgs:\nx: A `Tensor`.\ngamma: A constant `Tensor`. Scale parameter. Default is 1.\nbeta: A constant `Tensor`. Offset parameter. Default is 0.\n\nReturns:\nA `Tensor` with the same shape as `x`.", "source": "juraj-google-style"}
{"code": "def batch_decode(self, sequences: Union[List[int], List[List[int]], 'np.ndarray', 'torch.Tensor', 'tf.Tensor'], skip_special_tokens: bool=False, clean_up_tokenization_spaces: Optional[bool]=None, **kwargs) -> List[str]:\n    return [self.decode(seq, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs) for seq in sequences]", "docstring": "Convert a list of lists of token ids into a list of strings by calling decode.\n\nArgs:\nsequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`):\nList of tokenized input ids. Can be obtained using the `__call__` method.\nskip_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not to remove special tokens in the decoding.\nclean_up_tokenization_spaces (`bool`, *optional*):\nWhether or not to clean up the tokenization spaces. If `None`, will default to\n`self.clean_up_tokenization_spaces`.\nkwargs (additional keyword arguments, *optional*):\nWill be passed to the underlying model specific decode method.\n\nReturns:\n`List[str]`: The list of decoded sentences.", "source": "github-repos"}
{"code": "def to_json(self, from_api: dict=None, from_json: dict=None, parents: dict={}) -> dict:\n    if from_api:\n        from_json = deepcopy(from_api)\n    for key, value in from_json.items():\n        if not isinstance(value, dict):\n            continue\n        if '$ref' in value:\n            ref = value['$ref']\n            parents.setdefault(ref, 0)\n            if parents[ref] < self.recursion_depth:\n                parents[ref] += 1\n                from_json[key] = self.to_json(from_api=self.api_document['schemas'][ref]['properties'], parents=parents)\n                parents[ref] -= 1\n            else:\n                from_json[key] = None\n        else:\n            self.to_json(from_json=value, parents=parents)\n    return from_json", "docstring": "Returns a Discovery API Document schema with all refrences extrapolated.\n\nRecursively crawls the discovery document reference tree to build document.\nLeverages recursion depth passed in constructor to stop if necessary.\n\nArgs:\nfrom_api: the api schema to extrapolate\nfrom_json: new object with references replaced, not passed by caller\nparents: used to track recursion depth for a specific schema branch\n\nReturns:\nA Discovery API Document schema object.", "source": "github-repos"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    if token_ids_1 is None:\n        return [1] + [0] * len(token_ids_0) + [1]\n    return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1]", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def price(self, valuation_date, market, model=None, pricing_context=None, name=None):\n    del model, pricing_context\n    name = name or self._name + '_price'\n    with tf.name_scope(name):\n        discount_curve = market.discount_curve\n        reference_curve = market.reference_curve\n        libor_rate = rc.get_rate_index(market, self._start_date, rc.RateIndexType.LIBOR, dtype=self._dtype)\n        libor_rate = tf.repeat(tf.convert_to_tensor(libor_rate, dtype=self._dtype), self._num_cashflows)\n        discount_factors = discount_curve.get_discount_factor(self._payment_dates)\n        forward_rates = reference_curve.get_forward_rate(self._accrual_start_date, self._accrual_end_date, self._daycount_fractions)\n        forward_rates = tf.where(self._daycount_fractions > 0.0, forward_rates, tf.zeros_like(forward_rates))\n        forward_rates = tf.where(self._coupon_end_dates < valuation_date, tf.constant(0.0, dtype=self._dtype), tf.where(self._coupon_start_dates < valuation_date, libor_rate, forward_rates))\n        coupon_rate = self._coupon_multiplier * (forward_rates + self._coupon_basis)\n        cashflow_pvs = self._notional * (self._daycount_fractions * coupon_rate * discount_factors)\n        return tf.math.reduce_sum(tf.reshape(cashflow_pvs, (self._batch_size, self._num_cashflows)), axis=1)", "docstring": "Returns the present value of the stream on the valuation date.\n\nArgs:\nvaluation_date: A scalar `DateTensor` specifying the date on which\nvaluation is being desired.\nmarket: A namedtuple of type `InterestRateMarket` which contains the\nnecessary information for pricing the cashflow stream.\nmodel: Reserved for future use.\npricing_context: Additional context relevant for pricing.\nname: Python str. The name to give to the ops created by this function.\nDefault value: `None` which maps to 'price'.\n\nReturns:\nA Rank 1 `Tensor` of real type containing the modeled price of each stream\ncontract based on the input market data.", "source": "github-repos"}
{"code": "def Serialize(self, writer):\n        \n        super(SpentCoinState, self).Serialize(writer)\n\n        writer.WriteUInt256(self.TransactionHash)\n        writer.WriteUInt32(self.TransactionHeight)\n        writer.WriteVarInt(len(self.Items))\n\n        for item in self.Items:\n            writer.WriteUInt16(item.index)\n            writer.WriteUInt32(item.height)", "docstring": "Serialize full object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def stringify_default(default: Any) -> str:\n    if isinstance(default, bool):\n        return f'`{default}`'\n    elif isinstance(default, enum.Enum):\n        return f'`{str(default)}`'\n    elif isinstance(default, int):\n        return str(default)\n    elif isinstance(default, float):\n        result = str(default)\n        return str(round(default, 2)) if len(result) > 6 else result\n    elif isinstance(default, str):\n        return str(default) if default.isnumeric() else f'`\"{default}\"`'\n    elif isinstance(default, type):\n        return f'`{default.__name__}`'\n    else:\n        return f'`{default}`'", "docstring": "Returns the string representation of a default value, as used in docstring: numbers are left as is, all other\nobjects are in backtiks.\n\nArgs:\ndefault (`Any`): The default value to process\n\nReturns:\n`str`: The string representation of that default.", "source": "github-repos"}
{"code": "def from_shape(cls, shape):\n        \n        \n        \n        if shape.__class__ is cls:\n            return shape\n\n        else:\n            error = linearization_error(shape.nodes)\n            if error < _ERROR_VAL:\n                linearized = cls(shape, error)\n                return linearized\n\n            else:\n                return shape", "docstring": "Try to linearize a curve (or an already linearized curve).\n\nArgs:\nshape (Union[SubdividedCurve, \\\n~bezier._geometric_intersection.Linearization]): A curve or an\nalready linearized curve.\n\nReturns:\nUnion[SubdividedCurve, \\\n~bezier._geometric_intersection.Linearization]: The\n(potentially linearized) curve.", "source": "juraj-google-style"}
{"code": "def db_dp010(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `db_dp010`'.format(value))\n    self._db_dp010 = value", "docstring": "Corresponds to IDD Field `db_dp010`\nmean coincident dry-bulb temperature to\nDew-point temperature corresponding to 1.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `db_dp010`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def transition_complete(self, pipeline_key):\n    \n    def txn():\n      pipeline_record = db.get(pipeline_key)\n      if pipeline_record is None:\n        logging.warning(\n            'Tried to mark pipeline ID \"%s\" as complete but it does not exist.',\n            pipeline_key.name())\n        raise db.Rollback()\n      if pipeline_record.status not in (\n          _PipelineRecord.WAITING, _PipelineRecord.RUN):\n        logging.warning(\n            'Tried to mark pipeline ID \"%s\" as complete, found bad state: %s',\n            pipeline_key.name(), pipeline_record.status)\n        raise db.Rollback()\n\n      pipeline_record.status = _PipelineRecord.DONE\n      pipeline_record.finalized_time = self._gettime()\n      pipeline_record.put()\n\n    db.run_in_transaction(txn)", "docstring": "Marks the given pipeline as complete.\n\nDoes nothing if the pipeline is no longer in a state that can be completed.\n\nArgs:\npipeline_key: db.Key of the _PipelineRecord that has completed.", "source": "juraj-google-style"}
{"code": "def add_or_update(self, section, key, value):\n    updates = self.update(section, key, value)\n    if (updates == 0):\n        self.add(section, key, value)\n    return updates", "docstring": "Update the key or, if no previous value existed, add it.\n\nReturns:\nint: Number of updated lines.", "source": "codesearchnet"}
{"code": "def delete_many(self, keys, noreply=None):\n    if (not keys):\n        return True\n    if (noreply is None):\n        noreply = self.default_noreply\n    cmds = []\n    for key in keys:\n        cmds.append((((b'delete ' + self.check_key(key)) + (b' noreply' if noreply else b'')) + b'\\r\\n'))\n    self._misc_cmd(cmds, b'delete', noreply)\n    return True", "docstring": "A convenience function to delete multiple keys.\n\nArgs:\nkeys: list(str), the list of keys to delete.\nnoreply: optional bool, True to not wait for the reply (defaults to\nself.default_noreply).\n\nReturns:\nTrue. If an exception is raised then all, some or none of the keys\nmay have been deleted. Otherwise all the keys have been sent to\nmemcache for deletion and if noreply is False, they have been\nacknowledged by memcache.", "source": "codesearchnet"}
{"code": "def changed(dirname, filename='.md5', args=None, glob=None):\n    \n    root = Path(dirname)\n    if not root.exists():\n        \n        return True\n\n    cachefile = root / filename\n    current_digest = cachefile.open().read() if cachefile.exists() else \"\"\n    \n    _digest = digest(dirname, glob=glob)\n    if args and args.verbose:  \n        print(\"md5:\", _digest)\n    has_changed = current_digest != _digest\n\n    if has_changed:\n        with open(os.path.join(dirname, filename), 'w') as fp:\n            fp.write(_digest)\n\n    return has_changed", "docstring": "Has `glob` changed in `dirname`\n\nArgs:\ndirname: directory to measure\nfilename: filename to store checksum", "source": "juraj-google-style"}
{"code": "def __init__(self, client, base_path):\n        \n        self._client = client\n        self._base_path = base_path\n        self._queue_path = posixpath.join(self._base_path, 'queue', '')\n        self._counter_path = posixpath.join(self._queue_path, 'counter')\n        self._ensure_counter()\n        self._ensure_queue()", "docstring": "Initialise the class.\n\nArgs:\nclient (:class:`consulate.Consul`): A :class:`consulate.Consul` instance.\nbase_path (str): the base path to use in Consul.", "source": "juraj-google-style"}
{"code": "def save(self, force=False):\n        \n        from time import time\n\n        \n        \n        \n        from datetime import datetime\n        savefreq = TaskDB.get_option(\"savefreq\", 2, int)\n        \n        if self.lastsave is not None:\n            delta = (datetime.fromtimestamp(time()) -\n                     datetime.fromtimestamp(self.lastsave)) \n            elapsed = int(delta.total_seconds()/60)\n        else:\n            elapsed = savefreq + 1\n\n        if elapsed > savefreq or force:\n            if not writeable:\n                \n                \n                self.lastsave = time()\n                msg.std(\"Skipping database write to disk by setting.\", 2)\n                return\n\n            import json\n            try:\n                entities, compkeys = _json_clean(self.entities)\n                jdb = {\"entities\": entities,\n                       \"compkeys\": compkeys,\n                       \"uuids\": self.uuids}\n                with open(self.dbpath, 'w') as f:\n                    json.dump(jdb, f)\n            except: \n                from acorn.msg import err\n                import sys\n                raise\n                err(\"{}: {}\".format(*sys.exc_info()[0:2]))\n\n            self.lastsave = time()", "docstring": "Serializes the database file to disk.\n\nArgs:\nforce (bool): when True, the elapsed time since last save is ignored\nand the database is saved anyway (subject to global\n:data:`writeable` setting).", "source": "juraj-google-style"}
{"code": "def sg_argmax(tensor, opt):\n    r\n    opt += tf.sg_opt(axis=tensor.get_shape().ndims-1)\n    return tf.argmax(tensor, opt.axis, opt.name)", "docstring": "r\"\"\"Returns the indices of the maximum values along the specified axis.\n\nSee `tf.argmax()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` (automatically given by chain).\nopt:\naxis: Target axis. Default is the last one.\nname: If provided, replace current tensor's name.\n\nReturns:\nA `Tensor`.", "source": "juraj-google-style"}
{"code": "def all(self, data={}, **kwargs):\n    return super(Invoice, self).all(data, **kwargs)", "docstring": "Fetch all Invoice entities\n\nReturns:\nDictionary of Invoice data", "source": "codesearchnet"}
{"code": "def _format_variant(self, case_id, gemini_variant, individual_objs, index=0, add_all_info=False):\n    chrom = gemini_variant['chrom']\n    if (chrom.startswith('chr') or chrom.startswith('CHR')):\n        chrom = chrom[3:]\n    variant_dict = {'CHROM': chrom, 'POS': str(gemini_variant['start']), 'ID': gemini_variant['rs_ids'], 'REF': gemini_variant['ref'], 'ALT': gemini_variant['alt'], 'QUAL': gemini_variant['qual'], 'FILTER': gemini_variant['filter']}\n    variant = Variant(**variant_dict)\n    variant.update_variant_id(gemini_variant['variant_id'])\n    logger.debug('Creating a variant object of variant {0}'.format(variant.variant_id))\n    variant['index'] = index\n    self._add_most_severe_consequence(variant, gemini_variant)\n    self._add_impact_severity(variant, gemini_variant)\n    variant.start = int(gemini_variant['start'])\n    variant.stop = int(gemini_variant['end'])\n    if (self.variant_type == 'sv'):\n        variant.sv_type = gemini_variant['sub_type']\n        variant.stop = int(gemini_variant['end'])\n        self._add_sv_coordinates(variant)\n    else:\n        self._add_transcripts(variant, gemini_variant)\n        self._add_thousand_g(variant, gemini_variant)\n        self._add_exac(variant, gemini_variant)\n        self._add_gmaf(variant, gemini_variant)\n        if gemini_variant['cadd_scaled']:\n            variant.cadd_score = gemini_variant['cadd_scaled']\n        polyphen = gemini_variant['polyphen_pred']\n        if polyphen:\n            variant.add_severity('Polyphen', polyphen)\n        sift = gemini_variant['sift_pred']\n        if sift:\n            variant.add_severity('SIFT', sift)\n    self._add_hgnc_symbols(variant)\n    if (self.variant_type == 'snv'):\n        self._add_genes(variant)\n    self._add_consequences(variant)\n    if add_all_info:\n        self._add_genotypes(variant, gemini_variant, case_id, individual_objs)\n        if (self.variant_type == 'sv'):\n            self._add_genes(variant)\n    return variant", "docstring": "Make a puzzle variant from a gemini variant\n\nArgs:\ncase_id (str): related case id\ngemini_variant (GeminiQueryRow): The gemini variant\nindividual_objs (list(dict)): A list of Individuals\nindex(int): The index of the variant\n\nReturns:\nvariant (dict): A Variant object", "source": "codesearchnet"}
{"code": "def _ConvertDictToObject(cls, json_dict):\n    class_type = json_dict.get('__type__', None)\n    if (not class_type):\n        return json_dict\n    if (class_type == 'bytes'):\n        return binascii.a2b_qp(json_dict['stream'])\n    if (class_type == 'tuple'):\n        return tuple(cls._ConvertListToObject(json_dict['values']))\n    if (class_type == 'collections.Counter'):\n        return cls._ConvertDictToCollectionsCounter(json_dict)\n    if (class_type == 'AttributeContainer'):\n        container_type = json_dict.get('__container_type__', None)\n    elif (class_type == 'PathSpec'):\n        return cls._ConvertDictToPathSpec(json_dict)\n    else:\n        raise ValueError('Unsupported class type: {0:s}'.format(class_type))\n    container_class = containers_manager.AttributeContainersManager.GetAttributeContainer(container_type)\n    if (not container_class):\n        raise ValueError('Unsupported container type: {0:s}'.format(container_type))\n    container_object = container_class()\n    supported_attribute_names = container_object.GetAttributeNames()\n    for (attribute_name, attribute_value) in iter(json_dict.items()):\n        if ((container_type not in ('event', 'event_data')) and (attribute_name not in supported_attribute_names)):\n            if (attribute_name not in ('__container_type__', '__type__')):\n                logger.debug('[ConvertDictToObject] unsupported attribute name: {0:s}.{1:s}'.format(container_type, attribute_name))\n            continue\n        if isinstance(attribute_value, dict):\n            attribute_value = cls._ConvertDictToObject(attribute_value)\n        elif isinstance(attribute_value, list):\n            attribute_value = cls._ConvertListToObject(attribute_value)\n        setattr(container_object, attribute_name, attribute_value)\n    return container_object", "docstring": "Converts a JSON dict into an object.\n\nThe dictionary of the JSON serialized objects consists of:\n{\n'__type__': 'AttributeContainer'\n'__container_type__': ...\n...\n}\n\nHere '__type__' indicates the object base type. In this case\n'AttributeContainer'.\n\n'__container_type__' indicates the attribute container type.\n\nThe rest of the elements of the dictionary make up the attributes.\n\nArgs:\njson_dict (dict[str, object]): JSON serialized objects.\n\nReturns:\nAttributeContainer|dict|list|tuple: deserialized object.\n\nRaises:\nValueError: if the class type or container type is not supported.", "source": "codesearchnet"}
{"code": "def _deferred_pool_runner(has_chief, num_workers, initializer=None, share_gpu=True):\n    container = []\n\n    def get_or_create():\n        if not container:\n            cluster_spec = multi_worker_test_base.create_cluster_spec(has_chief=has_chief, num_workers=num_workers, num_ps=0, has_eval=False)\n            runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec, initializer=initializer, share_gpu=share_gpu)\n            container.append(runner)\n        return container[0]\n    return get_or_create", "docstring": "Returns a callable that returns the pool runner.\n\nIt creates the pool runner only upon first invocation. This avoids creating it\nwhen this file is imported.\n\nArgs:\nhas_chief: whether there should be a chief.\nnum_workers: the number of workers excluding the chief.\ninitializer: initializer of each process.\nshare_gpu: whether to share GPU between the workers.\n\nReturns:\nA callable that returns the runner.", "source": "github-repos"}
{"code": "def normalize_version(version):\n    \n    rv = []\n    for x in version.split(\".\"):\n        try:\n            rv.append(int(x))\n        except ValueError:\n            for y in re.split(\"([0-9]+)\", x):\n                if y == '':\n                    continue\n                try:\n                    rv.append(int(y))\n                except ValueError:\n                    rv.append(y)\n    return rv", "docstring": "Helper function to normalize version.\nReturns a comparable object.\nArgs:\nversion (str) version, e.g. \"0.1.0\"", "source": "juraj-google-style"}
{"code": "def __init__(self, use_resource_alias: bool=False, value_set_codes_table: Optional[str]=None, value_set_codes_definitions: Optional[fhir_package.FhirPackageManager]=None) -> None:\n    self._use_resource_alias = use_resource_alias\n    self._value_set_codes_table = value_set_codes_table\n    self._value_set_codes_definitions = value_set_codes_definitions", "docstring": "Creates a SparkSqlInterpreter.\n\nArgs:\nuse_resource_alias: Determines whether it is necessary to call the\nresource table directly through an alias.\nvalue_set_codes_table: The name of the database table containing value set\ncode definitions. Used when building SQL for memberOf expressions. If\ngiven, value set definitions needed for memberOf expressions will be\nretrieved from this table if they can not be found in\n`value_set_codes_definitions`. If neither this nor\n`value_set_codes_definitions` is given, no memberOf SQL will be\ngenerated.\nvalue_set_codes_definitions: A package manager containing value set\ndefinitions which can be used to build SQL for memberOf expressions.\nThese value set definitions can be consulted in favor of using an\nexternal `value_set_codes_table`. If neither this nor\n`value_set_codes_definitions` is given, no memberOf SQL will be\ngenerated.", "source": "github-repos"}
{"code": "def tf_solve(self, fn_x, x_init, base_value, target_value, estimated_improvement=None):\n        \n        return super(LineSearch, self).tf_solve(fn_x, x_init, base_value, target_value, estimated_improvement)", "docstring": "Iteratively optimizes $f(x)$ for $x$ on the line between $x'$ and $x_0$.\n\nArgs:\nfn_x: A callable returning the value $f(x)$ at $x$.\nx_init: Initial solution guess $x_0$.\nbase_value: Value $f(x')$ at $x = x'$.\ntarget_value: Value $f(x_0)$ at $x = x_0$.\nestimated_improvement: Estimated improvement for $x = x_0$, $f(x')$ if None.\n\nReturns:\nA solution $x$ to the problem as given by the solver.", "source": "juraj-google-style"}
{"code": "def list(self, *args, **kwargs):\n    return [self.prepare_model(n) for n in self.client.api.nodes(*args, **kwargs)]", "docstring": "List swarm nodes.\n\nArgs:\nfilters (dict): Filters to process on the nodes list. Valid\nfilters: ``id``, ``name``, ``membership`` and ``role``.\nDefault: ``None``\n\nReturns:\nA list of :py:class:`Node` objects.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.\n\nExample:\n\n>>> client.nodes.list(filters={'role': 'manager'})", "source": "codesearchnet"}
{"code": "def _MaybeNewName(self, name):\n    if not name:\n        return name\n    if name == self._old[:-1]:\n        return self._module_name\n    before, match, after = name.partition(self._old)\n    if match and (not before):\n        return self._new + after\n    else:\n        return name", "docstring": "Decides if a name should be replaced.\n\nArgs:\nname: A name for which a prefix should be changed.\n\nReturns:\nIf name is local to the module described by old_module_name the\nold_module_part will be replaced by new_module_name and returned,\notherwise node.name will be returned.", "source": "github-repos"}
{"code": "def __init__(self, state_regex, regex, actions, next_state, flags=re.I):\n    \n    self.state_regex = re.compile(\n        state_regex, re.DOTALL | re.M | re.S | re.U | flags)\n    self.regex = re.compile(regex, re.DOTALL | re.M | re.S | re.U | flags)\n    self.re_str = regex\n    self.actions = []\n    if actions:\n      self.actions = actions.split(',')\n\n    self.next_state = next_state", "docstring": "Initializes the token object.\n\nArgs:\n\nstate_regex: If this regular expression matches the current state this\nrule is considered.\nregex: A regular expression to try and match from the current point.\nactions: A command separated list of method names in the Lexer to call.\nnext_state: The next state we transition to if this Token matches.\nflags: re flags.", "source": "juraj-google-style"}
{"code": "def shared_s3_app_bucket(self, include_region=False):\n    if include_region:\n        shared_s3_app_bucket = self.format['shared_s3_app_region_bucket'].format(**self.data)\n    else:\n        shared_s3_app_bucket = self.format['shared_s3_app_bucket'].format(**self.data)\n    return shared_s3_app_bucket", "docstring": "Generate shared s3 application bucket name.\n\nArgs:\ninclude_region (bool): Include region in the name generation.", "source": "codesearchnet"}
{"code": "def checksum(self, path):\n    try:\n        return self._gcsIO().checksum(path)\n    except Exception as e:\n        raise BeamIOError('Checksum operation failed', {path: e})", "docstring": "Fetch checksum metadata of a file on the\n:class:`~apache_beam.io.filesystem.FileSystem`.\n\nArgs:\npath: string path of a file.\n\nReturns: string containing checksum\n\nRaises:\n``BeamIOError``: if path isn't a file or doesn't exist.", "source": "github-repos"}
{"code": "def MultiDeleteAttributes(self,\n                            subjects,\n                            attributes,\n                            start=None,\n                            end=None,\n                            sync=True):\n    \n    for subject in subjects:\n      self.DeleteAttributes(\n          subject, attributes, start=start, end=end, sync=sync)", "docstring": "Remove all specified attributes from a list of subjects.\n\nArgs:\nsubjects: The list of subjects that will have these attributes removed.\nattributes: A list of attributes.\nstart: A timestamp, attributes older than start will not be deleted.\nend: A timestamp, attributes newer than end will not be deleted.\nsync: If true we block until the operation completes.", "source": "juraj-google-style"}
{"code": "def _get_dump_file_path(dump_root, device_name, debug_node_name):\n    dump_root = os.path.join(dump_root, debug_data.device_name_to_device_path(device_name))\n    if '/' in debug_node_name:\n        dump_dir = os.path.join(dump_root, os.path.dirname(debug_node_name))\n        dump_file_name = re.sub(':', '_', os.path.basename(debug_node_name))\n    else:\n        dump_dir = dump_root\n        dump_file_name = re.sub(':', '_', debug_node_name)\n    now_microsec = int(round(time.time() * 1000 * 1000))\n    dump_file_name += '_%d' % now_microsec\n    return os.path.join(dump_dir, dump_file_name)", "docstring": "Get the file path of the dump file for a debug node.\n\nArgs:\ndump_root: (str) Root dump directory.\ndevice_name: (str) Name of the device that the debug node resides on.\ndebug_node_name: (str) Name of the debug node, e.g.,\ncross_entropy/Log:0:DebugIdentity.\n\nReturns:\n(str) Full path of the dump file.", "source": "github-repos"}
{"code": "def parse_table_name(bigquery_table):\n  \n\n  id_name = bigquery_table.split(':')\n  if len(id_name) != 2:\n    raise ValueError('Bigquery table name should be in the form '\n                     'project_id:dataset.table_name. Got %s' % bigquery_table)\n  return id_name[1]", "docstring": "Giving a string a:b.c, returns b.c.\n\nArgs:\nbigquery_table: full table name project_id:dataset:table\n\nReturns:\ndataset:table\n\nRaises:\nValueError: if a, b, or c contain the character ':'.", "source": "juraj-google-style"}
{"code": "def setSeasonSchedules(self, cmd_dict=None, password=\"00000000\"):\n        \n        result = False\n        self.setContext(\"setSeasonSchedules\")\n\n        if not cmd_dict:\n            cmd_dict = self.m_seasons_sched_params\n\n        try:\n            if not self.request(False):\n                self.writeCmdMsg(\"Bad read CRC on setting\")\n            else:\n                if not self.serialCmdPwdAuth(password):\n                    self.writeCmdMsg(\"Password failure\")\n                else:\n                    req_table = \"\"\n                    req_table += binascii.hexlify(str(cmd_dict[\"Season_1_Start_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Season_1_Start_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Season_1_Schedule\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Season_2_Start_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Season_2_Start_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Season_2_Schedule\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Season_3_Start_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Season_3_Start_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Season_3_Schedule\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Season_4_Start_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Season_4_Start_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Season_4_Schedule\"]).zfill(2))\n                    req_table += binascii.hexlify(str(0).zfill(24))\n                    req_str = \"015731023030383028\" + req_table + \"2903\"\n                    req_str += self.calc_crc16(req_str[2:].decode(\"hex\"))\n                    self.m_serial_port.write(req_str.decode(\"hex\"))\n                    if self.m_serial_port.getResponse(self.getContext()).encode(\"hex\") == \"06\":\n                        self.writeCmdMsg(\"Success(setSeasonSchedules): 06 returned.\")\n                        result = True\n            self.serialPostEnd()\n        except:\n            ekm_log(traceback.format_exc(sys.exc_info()))\n\n        self.setContext(\"\")\n        return result", "docstring": "Serial command to set seasons table.\n\nIf no dictionary is passed, the meter object buffer is used.\n\nArgs:\ncmd_dict (dict): Optional dictionary of season schedules.\npassword (str): Optional password\n\nReturns:\nbool: True on completion and ACK.", "source": "juraj-google-style"}
{"code": "def stations(self, station, limit=10):\n    query = {'start': 1, 'S': (station + '?'), 'REQ0JourneyStopsB': limit}\n    rsp = requests.get('http:\n    return parse_stations(rsp.text)", "docstring": "Find stations for given queries\n\nArgs:\nstation (str): search query\nlimit (int): limit number of results", "source": "codesearchnet"}
{"code": "def _parse_batch_get(get_doc_response, reference_map, client):\n    result_type = get_doc_response.WhichOneof('result')\n    if (result_type == 'found'):\n        reference = _get_reference(get_doc_response.found.name, reference_map)\n        data = _helpers.decode_dict(get_doc_response.found.fields, client)\n        snapshot = DocumentSnapshot(reference, data, exists=True, read_time=get_doc_response.read_time, create_time=get_doc_response.found.create_time, update_time=get_doc_response.found.update_time)\n    elif (result_type == 'missing'):\n        snapshot = DocumentSnapshot(None, None, exists=False, read_time=get_doc_response.read_time, create_time=None, update_time=None)\n    else:\n        raise ValueError('`BatchGetDocumentsResponse.result` (a oneof) had a field other than `found` or `missing` set, or was unset')\n    return snapshot", "docstring": "Parse a `BatchGetDocumentsResponse` protobuf.\n\nArgs:\nget_doc_response (~google.cloud.proto.firestore.v1beta1.\\\nfirestore_pb2.BatchGetDocumentsResponse): A single response (from\na stream) containing the \"get\" response for a document.\nreference_map (Dict[str, .DocumentReference]): A mapping (produced\nby :func:`_reference_info`) of fully-qualified document paths to\ndocument references.\nclient (~.firestore_v1beta1.client.Client): A client that has\na document factory.\n\nReturns:\n[.DocumentSnapshot]: The retrieved snapshot.\n\nRaises:\nValueError: If the response has a ``result`` field (a oneof) other\nthan ``found`` or ``missing``.", "source": "codesearchnet"}
{"code": "def _replay(self, trial_id: int, dna: DNA, reward: Union[None, float, Tuple[float]]):\n    del trial_id\n    if reward is not None:\n        self._feedback(dna, reward)", "docstring": "Replay a single DNA from the history for state recovery.\n\nThe default implementation to call `DNAGenerator._feedback`. Subclasses that\nhave states and can be recovered from replaying the history should override\nthis method. See class `Sweeping` as an example.\n\nArgs:\ntrial_id: A zero-based integer as the trial ID for the DNA.\ndna: A historically proposed DNA.\nreward: The reward for the DNA. If None, the reward is not yet fed back\nto the optimizer.", "source": "github-repos"}
{"code": "def start(period: int) -> threading.Event:\n    global _heartbeat_timer\n    if _heartbeat_timer is not None:\n        logging.warning('A heartbeat thread is already running, skipping this one.')\n        return _heartbeat_timer\n    task_id = config.client_id()\n    num_tasks = config.num_clients()\n    if task_id == 0:\n        token = np.random.randint(0, pow(2, 16) - 1)\n        signal = np.full([num_tasks], token, dtype=np.int32)\n    else:\n        signal = np.zeros([num_tasks], dtype=np.int32)\n    logging.info('Initial heartbeat signal: %s', signal)\n    device = tf_device.DeviceSpec(job=config.job_name(), replica=0, task=task_id, device_type='CPU', device_index=0)\n    with ops.device(device):\n        signal = all_reduce(constant_op.constant(signal), group_size=num_tasks, group_key=0, instance_key=0, timeout=max(period - 10, 2)).numpy()\n    logging.info('Merged heartbeat signal %s', signal)\n    if task_id == 0:\n        if not np.all(signal == token):\n            logging.fatal('Merged heartbeat signal has value != %d', token)\n    else:\n        if len(set(signal)) != 1:\n            logging.fatal('Merged heartbeat signal has unequal elements')\n        token = signal[0]\n    _heartbeat_timer = threading.Event()\n\n    def stop_heartbeat():\n        logging.info('Stopping the heartbeat thread')\n        _heartbeat_timer.set()\n        time.sleep(max(period \n    atexit.register(stop_heartbeat)\n    thread = threading.Thread(target=_heartbeat, args=[period, _heartbeat_timer, token, num_tasks, task_id, device], daemon=True)\n    thread.start()\n    return _heartbeat_timer", "docstring": "Starts a persistent thread exchanging heartbeats between workers.\n\nArgs:\nperiod: Heartbeat interval in seconds. Heartbeat timeout is set to the\nlarger of `period` - 10 and 2s.\n\nReturns:\nA threading.Event object. Users can choose to call its set() method to shut\ndown the heartbeat service gracefully. This isn't necessary in most cases,\nbecause the heartbeat service automatically shuts down at successful program\nexit through atexit handlers. But in situations when atexit handlers are not\ninvoked, such as when multiprocessing processes exit in tests, users can\nmanually request a shutdown.", "source": "github-repos"}
{"code": "def get_by_index(self, index):\n        \n        if index >= len(self._datasets):\n            raise DataInvalidIndex('A dataset with index {} does not exist'.format(index))\n\n        return self._datasets[index]", "docstring": "Return a dataset by its index.\n\nArgs:\nindex (int): The index of the dataset that should be returned.\n\nRaises:\nDataInvalidIndex: If the index does not represent a valid dataset.", "source": "juraj-google-style"}
{"code": "def adaptive_enc_mask(x_len, chunk_start_idx, left_window=0, right_window=0):\n    chunk_start_idx = torch.Tensor(chunk_start_idx).long()\n    start_pad = torch.nn.functional.pad(chunk_start_idx, (1, 0))\n    end_pad = torch.nn.functional.pad(chunk_start_idx, (0, 1), value=x_len)\n    seq_range = torch.arange(0, x_len).unsqueeze(-1)\n    idx = ((seq_range < end_pad) & (seq_range >= start_pad)).nonzero()[:, 1]\n    seq_range_expand = torch.arange(0, x_len).unsqueeze(0).expand(x_len, -1)\n    idx_left = idx - left_window\n    idx_left[idx_left < 0] = 0\n    boundary_left = start_pad[idx_left]\n    mask_left = seq_range_expand >= boundary_left.unsqueeze(-1)\n    idx_right = idx + right_window\n    idx_right[idx_right > len(chunk_start_idx)] = len(chunk_start_idx)\n    boundary_right = end_pad[idx_right]\n    mask_right = seq_range_expand < boundary_right.unsqueeze(-1)\n    return mask_left & mask_right", "docstring": "The function is very important for Transformer Transducer Streaming mode\nArgs:\nxs_len (int): sequence length\nchunk_start_idx (list): first idx of each chunk, such as [0,18,36,48]. It also supports adaptive chunk size [0,10,15,45]\nleft_window (int): how many left chunks can be seen\nright_window (int): how many right chunks can be seen. It is used for chunk overlap model.\nReturns:\nmask (torch.Tensor): a mask tensor for streaming model", "source": "github-repos"}
{"code": "def register_command(self, name, handler, validator):\n    self._commands[name] = (handler, validator)", "docstring": "Register a coroutine command handler.\n\nThis handler will be called whenever a command message is received\nfrom the client, whose operation key matches ``name``.  The handler\nwill be called as::\n\nresponse_payload = await handler(cmd_payload, context)\n\nIf the coroutine returns, it will be assumed to have completed\ncorrectly and its return value will be sent as the result of the\ncommand.  If the coroutine wishes to signal an error handling the\ncommand, it must raise a ServerCommandError exception that contains a\nstring reason code for the error.  This will generate an error\nresponse to the command.\n\nThe cmd_payload is first verified using the SchemaVerifier passed in\n``validator`` and handler is only called if verification succeeds. If\nverification fails, a failure response to the command is returned\nautomatically to the client.\n\nArgs:\nname (str): The unique command name that will be used to dispatch\nclient command messages to this handler.\nhandler (coroutine function): A coroutine function that will be\ncalled whenever this command is received.\nvalidator (SchemaVerifier): A validator object for checking the\ncommand payload before calling this handler.", "source": "codesearchnet"}
{"code": "class PromptDepthAnythingReassembleStage(nn.Module):\n\n    def __init__(self, config):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList()\n        for channels, factor in zip(config.neck_hidden_sizes, config.reassemble_factors):\n            self.layers.append(PromptDepthAnythingReassembleLayer(config, channels=channels, factor=factor))\n\n    def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]:\n        \n        out = []\n        for i, hidden_state in enumerate(hidden_states):\n            hidden_state = hidden_state[:, 1:]\n            batch_size, _, num_channels = hidden_state.shape\n            hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels)\n            hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()\n            hidden_state = self.layers[i](hidden_state)\n            out.append(hidden_state)\n        return out", "docstring": "This class reassembles the hidden states of the backbone into image-like feature representations at various\nresolutions.\n\nThis happens in 3 stages:\n1. Take the patch embeddings and reshape them to image-like feature representations.\n2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`.\n3. Resizing the spatial dimensions (height, width).\n\nArgs:\nconfig (`[PromptDepthAnythingConfig]`):\nModel configuration class defining the model architecture.", "source": "github-repos"}
{"code": "def add_server(self, hostname, port, use_ssl, tls_ctx=None):\n        \n        if not use_ssl and tls_ctx:\n            raise ValueError(\"Cannot specify a TLS context and not use SSL!\")\n        server = ldap3.Server(\n            hostname,\n            port=port,\n            use_ssl=use_ssl,\n            tls=tls_ctx\n        )\n        self._server_pool.add(server)\n        return server", "docstring": "Add an additional server to the server pool and return the\nfreshly created server.\n\nArgs:\nhostname (str): Hostname of the server\nport (int): Port of the server\nuse_ssl (bool): True if SSL is to be used when connecting.\ntls_ctx (ldap3.Tls): An optional TLS context object to use\nwhen connecting.\n\nReturns:\nldap3.Server: The freshly created server object.", "source": "juraj-google-style"}
{"code": "def GetSourceStrings(cls, event):\n    formatter_object = cls.GetFormatterObject(event.data_type)\n    return formatter_object.GetSources(event)", "docstring": "Retrieves the formatted source strings for a specific event object.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nlist[str, str]: short and long version of the source of the event.", "source": "codesearchnet"}
{"code": "def _FormatIPToken(self, token_data):\n    data = ''.join(['{0:02x}'.format(byte) for byte in token_data.data])\n    return {'IPv4_Header': data}", "docstring": "Formats an IPv4 packet header token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_ip): AUT_IP token data.\n\nReturns:\ndict[str, str]: token values.", "source": "codesearchnet"}
{"code": "def delete(self, json=None):\n        \n        return self._call('delete', url=self.endpoint, json=json)", "docstring": "Send a DELETE request and return the JSON decoded result.\n\nArgs:\njson (dict, optional): Object to encode and send in request.\n\nReturns:\nmixed: JSON decoded response data.", "source": "juraj-google-style"}
{"code": "def set_imu_callback(self, callback, data=None):\n    self.imu_callback = callback\n    self.imu_callback_data = data", "docstring": "Register a callback for incoming IMU data packets.\n\nThis method allows you to pass in a callbable which will be called on\nreceipt of each IMU data packet sent by this SK8 device. Set to `None`\nto disable it again.\n\nArgs:\ncallback: a callable with the following signature:\n(acc, gyro, mag, imu_index, seq, timestamp, data)\nwhere:\nacc, gyro, mag = sensor data ([x,y,z] in each case)\nimu_index = originating IMU number (int, 0-4)\nseq = packet sequence number (int, 0-255)\ntimestamp = value of time.time() when packet received\ndata = value of `data` parameter passed to this method\ndata: an optional arbitrary object that will be passed as a\nparameter to the callback", "source": "codesearchnet"}
{"code": "def _GenerateZipInfo(self, arcname=None, compress_type=None, st=None):\n    if (st is None):\n        st = os.stat_result((33188, 0, 0, 0, 0, 0, 0, 0, 0, 0))\n    mtime = time.localtime((st.st_mtime or time.time()))\n    date_time = mtime[0:6]\n    if (arcname is None):\n        raise ValueError('An arcname must be provided.')\n    zinfo = zipfile.ZipInfo(arcname, date_time)\n    zinfo.external_attr = ((st[0] & 65535) << 16)\n    if (compress_type is None):\n        zinfo.compress_type = self._compression\n    else:\n        zinfo.compress_type = compress_type\n    zinfo.file_size = 0\n    zinfo.compress_size = 0\n    zinfo.flag_bits = 8\n    zinfo.CRC = 134695760\n    zinfo.extra = struct.pack('<HHIIHH', 22613, 12, 0, 0, 0, 0)\n    return zinfo", "docstring": "Generate ZipInfo instance for the given name, compression and stat.\n\nArgs:\narcname: The name in the archive this should take.\ncompress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED)\nst: An optional stat object to be used for setting headers.\n\nReturns:\nZipInfo instance.\n\nRaises:\nValueError: If arcname is not provided.", "source": "codesearchnet"}
{"code": "def authorization_url(self, **kwargs):\n    kwargs.setdefault('access_type', 'offline')\n    (url, state) = self.oauth2session.authorization_url(self.client_config['auth_uri'], **kwargs)\n    return (url, state)", "docstring": "Generates an authorization URL.\n\nThis is the first step in the OAuth 2.0 Authorization Flow. The user's\nbrowser should be redirected to the returned URL.\n\nThis method calls\n:meth:`requests_oauthlib.OAuth2Session.authorization_url`\nand specifies the client configuration's authorization URI (usually\nGoogle's authorization server) and specifies that \"offline\" access is\ndesired. This is required in order to obtain a refresh token.\n\nArgs:\nkwargs: Additional arguments passed through to\n:meth:`requests_oauthlib.OAuth2Session.authorization_url`\n\nReturns:\nTuple[str, str]: The generated authorization URL and state. The\nuser must visit the URL to complete the flow. The state is used\nwhen completing the flow to verify that the request originated\nfrom your application. If your application is using a different\n:class:`Flow` instance to obtain the token, you will need to\nspecify the ``state`` when constructing the :class:`Flow`.", "source": "codesearchnet"}
{"code": "def _concat(prefix, suffix, static=False):\n    if isinstance(prefix, tensor.Tensor):\n        p = prefix\n        p_static = tensor_util.constant_value(prefix)\n        if p.shape.ndims == 0:\n            p = array_ops.expand_dims(p, 0)\n        elif p.shape.ndims != 1:\n            raise ValueError('prefix tensor must be either a scalar or vector, but saw tensor: %s' % p)\n    else:\n        p = tensor_shape.TensorShape(prefix)\n        p_static = p.as_list() if p.ndims is not None else None\n        p = constant_op.constant(p.as_list(), dtype=dtypes.int32) if p.is_fully_defined() else None\n    if isinstance(suffix, tensor.Tensor):\n        s = suffix\n        s_static = tensor_util.constant_value(suffix)\n        if s.shape.ndims == 0:\n            s = array_ops.expand_dims(s, 0)\n        elif s.shape.ndims != 1:\n            raise ValueError('suffix tensor must be either a scalar or vector, but saw tensor: %s' % s)\n    else:\n        s = tensor_shape.TensorShape(suffix)\n        s_static = s.as_list() if s.ndims is not None else None\n        s = constant_op.constant(s.as_list(), dtype=dtypes.int32) if s.is_fully_defined() else None\n    if static:\n        shape = tensor_shape.TensorShape(p_static).concatenate(s_static)\n        shape = shape.as_list() if shape.ndims is not None else None\n    else:\n        if p is None or s is None:\n            raise ValueError('Provided a prefix or suffix of None: %s and %s' % (prefix, suffix))\n        shape = array_ops.concat((p, s), 0)\n    return shape", "docstring": "Concat that enables int, Tensor, or TensorShape values.\n\nThis function takes a size specification, which can be an integer, a\nTensorShape, or a Tensor, and converts it into a concatenated Tensor\n(if static = False) or a list of integers (if static = True).\n\nArgs:\nprefix: The prefix; usually the batch size (and/or time step size).\n(TensorShape, int, or Tensor.)\nsuffix: TensorShape, int, or Tensor.\nstatic: If `True`, return a python list with possibly unknown dimensions.\nOtherwise return a `Tensor`.\n\nReturns:\nshape: the concatenation of prefix and suffix.\n\nRaises:\nValueError: if `suffix` is not a scalar or vector (or TensorShape).\nValueError: if prefix or suffix was `None` and asked for dynamic\nTensors out.", "source": "github-repos"}
{"code": "def delete(self, key):\n        \n        dct = self\n        keys = key.split('.')\n        last_key = keys[-1]\n        for k in keys:\n            \n            \n            if k == last_key:\n                del dct[k]\n                break\n\n            \n            if isinstance(dct, DotDict):\n                dct = super(DotDict, dct).__getitem__(k)\n\n            \n            \n            else:\n                dct = dct.__getitem__(k)\n                if not isinstance(dct, (DotDict, dict)):\n                    raise KeyError(\n                        'Subkey \"{}\" in \"{}\" invalid for deletion'.format(k, key)\n                    )", "docstring": "Remove a value from the `DotDict`.\n\nThe `key` parameter can either be a regular string key,\ne.g. \"foo\", or it can be a string key with dot notation,\ne.g. \"foo.bar.baz\", to signify a nested element.\n\nIf the key does not exist in the `DotDict`, it will continue\nsilently.\n\nArgs:\nkey (str): The key to remove.", "source": "juraj-google-style"}
{"code": "def max_intensity(item_a, time_a, item_b, time_b, max_value):\n    \n    intensity_a = item_a.max_intensity(time_a)\n    intensity_b = item_b.max_intensity(time_b)\n    diff = np.sqrt((intensity_a - intensity_b) ** 2)\n    return np.minimum(diff, max_value) / float(max_value)", "docstring": "RMS difference in maximum intensity\n\nArgs:\nitem_a: STObject from the first set in ObjectMatcher\ntime_a: Time integer being evaluated\nitem_b: STObject from the second set in ObjectMatcher\ntime_b: Time integer being evaluated\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "juraj-google-style"}
{"code": "def filter_on_submodules(all_modules, submodules):\n    filtered_modules = []\n    for mod in all_modules:\n        for submodule in submodules:\n            for package in PACKAGES:\n                if package + submodule in mod.__name__:\n                    filtered_modules.append(mod)\n    return filtered_modules", "docstring": "Filters all the modules based on the modules flag.\n\nThe module flag has to be relative to the core package imported.\nFor example, if `module=keras.layers` then, this function will return\nall the modules in the submodule.\n\nArgs:\nall_modules: All the modules in the core package.\nsubmodules: Submodules to filter from all the modules.\n\nReturns:\nAll the modules in the submodule.", "source": "github-repos"}
{"code": "def get_transformed_output_time(self, window: 'BoundedWindow', input_timestamp: Timestamp) -> Timestamp:\n    return input_timestamp", "docstring": "Given input time and output window, returns output time for window.\n\nIf TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED is used in the\nWindowing, the output timestamp for the given window will be the earliest\nof the timestamps returned by get_transformed_output_time() for elements\nof the window.\n\nArguments:\nwindow: Output window of element.\ninput_timestamp: Input timestamp of element as a timeutil.Timestamp\nobject.\n\nReturns:\nTransformed timestamp.", "source": "github-repos"}
{"code": "def _TensorArrayScatterGrad(op: ops.Operation, flow):\n    handle = op.inputs[0]\n    indices = op.inputs[1]\n    dtype = op.get_attr('T')\n    grad_source = _GetGradSource(flow)\n    flow_out = array_ops.identity(op.outputs[0], 'flow_out')\n    with ops.control_dependencies([flow_out]):\n        flow = array_ops.identity(flow, 'write_barrier')\n    g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow, colocate_with_first_write_call=False).grad(source=grad_source, flow=flow)\n    grad = g.gather(indices)\n    return [None, None, grad, flow]", "docstring": "Gradient for TensorArrayScatter.\n\nArgs:\nop: Forward TensorArrayScatter op.\nflow: Gradient `Tensor` flow to TensorArrayScatter.\n\nReturns:\nA grad `Tensor`, the gradient created in upstream ReadGrads or PackGrad.", "source": "github-repos"}
{"code": "def fetch(self, invoice_id, data={}, **kwargs):\n    return super(Invoice, self).fetch(invoice_id, data, **kwargs)", "docstring": "Fetch Invoice for given Id\n\nArgs:\ninvoice_id : Id for which invoice object has to be retrieved\n\nReturns:\nInvoice dict for given invoice Id", "source": "codesearchnet"}
{"code": "def split_once(self, horizontal: bool, position: int) -> None:\n        \n        cdata = self._as_cdata()\n        lib.TCOD_bsp_split_once(cdata, horizontal, position)\n        self._unpack_bsp_tree(cdata)", "docstring": "Split this partition into 2 sub-partitions.\n\nArgs:\nhorizontal (bool):\nposition (int):", "source": "juraj-google-style"}
{"code": "def _get_new_global_index(self, index_override):\n    if index_override is None:\n        global_index = self._next_global_index\n    else:\n        if index_override in self._used_global_indices:\n            raise ValueError('Index %d was already used by another call to add')\n        global_index = index_override\n    self._used_global_indices.add(global_index)\n    while self._next_global_index in self._used_global_indices:\n        self._next_global_index += 1\n    return global_index", "docstring": "Return the next unused argument index in order or use an override.\n\nArgs:\nindex_override: An index to use instead of the next available or None\nto use the next available.\n\nReturns:\nA valid global_index to use for the next hint argument.\n\nRaises:\nValueError: If the index_override is already used by another hint.", "source": "github-repos"}
{"code": "def restore_saved_local_scope(self, saved_variables, args_mapping, line_number):\n    restore_nodes = list()\n    for var in saved_variables:\n        if (var.RHS in args_mapping):\n            restore_nodes.append(RestoreNode(((var.RHS + ' = ') + args_mapping[var.RHS]), var.RHS, [var.LHS], line_number=line_number, path=self.filenames[(- 1)]))\n        else:\n            restore_nodes.append(RestoreNode(((var.RHS + ' = ') + var.LHS), var.RHS, [var.LHS], line_number=line_number, path=self.filenames[(- 1)]))\n    for (node, successor) in zip(restore_nodes, restore_nodes[1:]):\n        node.connect(successor)\n    if restore_nodes:\n        self.nodes[(- 1)].connect(restore_nodes[0])\n        self.nodes.extend(restore_nodes)\n    return restore_nodes", "docstring": "Restore the previously saved variables to their original values.\n\nArgs:\nsaved_variables(list[SavedVariable])\nargs_mapping(dict): A mapping of call argument to definition argument.\nline_number(int): Of the def of the function call about to be entered into.\n\nNote: We do not need connect_if_allowed because of the\npreceding call to save_local_scope.", "source": "codesearchnet"}
{"code": "def default_pass_manager_simulator(basis_gates):\n    \n    pass_manager = PassManager()\n\n    pass_manager.append(Unroller(basis_gates))\n\n    pass_manager.append([RemoveResetInZeroState(), Depth(), FixedPoint('depth')],\n                        do_while=lambda property_set: not property_set['depth_fixed_point'])\n\n    return pass_manager", "docstring": "The default pass manager without a coupling map.\n\nArgs:\nbasis_gates (list[str]): list of basis gate names to unroll to.\n\nReturns:\nPassManager: A passmanager that just unrolls, without any optimization.", "source": "juraj-google-style"}
{"code": "def convert(self, obj):\n        \n        if isinstance(obj, pobjects.SymmetricKey):\n            return self._build_core_key(obj, secrets.SymmetricKey)\n        elif isinstance(obj, secrets.SymmetricKey):\n            return self._build_pie_key(obj, pobjects.SymmetricKey)\n        elif isinstance(obj, pobjects.PublicKey):\n            return self._build_core_key(obj, secrets.PublicKey)\n        elif isinstance(obj, secrets.PublicKey):\n            return self._build_pie_key(obj, pobjects.PublicKey)\n        elif isinstance(obj, pobjects.PrivateKey):\n            return self._build_core_key(obj, secrets.PrivateKey)\n        elif isinstance(obj, secrets.PrivateKey):\n            return self._build_pie_key(obj, pobjects.PrivateKey)\n        elif isinstance(obj, pobjects.Certificate):\n            return self._build_core_certificate(obj)\n        elif isinstance(obj, secrets.Certificate):\n            return self._build_pie_certificate(obj)\n        elif isinstance(obj, pobjects.SecretData):\n            return self._build_core_secret_data(obj)\n        elif isinstance(obj, secrets.SecretData):\n            return self._build_pie_secret_data(obj)\n        elif isinstance(obj, pobjects.OpaqueObject):\n            return self._build_core_opaque_object(obj)\n        elif isinstance(obj, secrets.OpaqueObject):\n            return self._build_pie_opaque_object(obj)\n        else:\n            raise TypeError(\"object type unsupported and cannot be converted\")", "docstring": "Convert a Pie object into a core secret object and vice versa.\n\nArgs:\nobj (various): A Pie or core secret object to convert into the\nopposite object space. Required.\n\nRaises:\nTypeError: if the object type is unrecognized or unsupported.", "source": "juraj-google-style"}
{"code": "def info_qry(tickers, flds) -> str:\n    full_list = '\\n'.join(([f'tickers: {tickers[:8]}'] + [f'         {tickers[n:(n + 8)]}' for n in range(8, len(tickers), 8)]))\n    return f", "docstring": "Logging info for given tickers and fields\n\nArgs:\ntickers: tickers\nflds: fields\n\nReturns:\nstr\n\nExamples:\n>>> print(info_qry(\n...     tickers=['NVDA US Equity'], flds=['Name', 'Security_Name']\n... ))\ntickers: ['NVDA US Equity']\nfields:  ['Name', 'Security_Name']", "source": "codesearchnet"}
{"code": "def raise_io_error(self, errno, filename=None):\n        \n        raise IOError(errno, self._error_message(errno), filename)", "docstring": "Raises IOError.\nThe error message is constructed from the given error code and shall\nstart with the error in the real system.\n\nArgs:\nerrno: A numeric error code from the C variable errno.\nfilename: The name of the affected file, if any.", "source": "juraj-google-style"}
{"code": "def parse_coach_ec_df(infile):\n    \n\n    ec_df = pd.read_table(infile, delim_whitespace=True,\n                          names=['pdb_template', 'tm_score', 'rmsd', 'seq_ident', 'seq_coverage',\n                                 'c_score', 'ec_number', 'binding_residues'])\n\n    ec_df['pdb_template_id'] = ec_df['pdb_template'].apply(lambda x: x[:4])\n    ec_df['pdb_template_chain'] = ec_df['pdb_template'].apply(lambda x: x[4])\n\n    ec_df = ec_df[['pdb_template_id', 'pdb_template_chain', 'tm_score', 'rmsd',\n                   'seq_ident', 'seq_coverage', 'c_score', 'ec_number', 'binding_residues']]\n    ec_df['c_score'] = pd.to_numeric(ec_df.c_score, errors='coerce')\n\n    return ec_df", "docstring": "Parse the EC.dat output file of COACH and return a dataframe of results\n\nEC.dat contains the predicted EC number and active residues.\nThe columns are: PDB_ID, TM-score, RMSD, Sequence identity,\nCoverage, Confidence score, EC number, and Active site residues\n\nArgs:\ninfile (str): Path to EC.dat\n\nReturns:\nDataFrame: Pandas DataFrame summarizing EC number predictions", "source": "juraj-google-style"}
{"code": "def file_md5( filename ):\n    \n    with zopen( filename, 'r' ) as f:\n        file_string = f.read()\n    try: \n        file_string = file_string.decode()\n    except AttributeError:\n        pass\n    return( md5sum( file_string ) )", "docstring": "Generate the md5 checksum for a file\n\nArgs:\nfilename (Str): The file to be checksummed.\n\nReturns:\n(Str): The hex checksum\n\nNotes:\nIf the file is gzipped, the md5 checksum returned is\nfor the uncompressed ASCII file.", "source": "juraj-google-style"}
{"code": "def underlying_variable(t):\n  \n  t = underlying_variable_ref(t)\n  assert t is not None\n  \n  if not hasattr(tf.get_default_graph(), \"var_index\"):\n    tf.get_default_graph().var_index = {}\n  var_index = tf.get_default_graph().var_index\n  for v in tf.global_variables()[len(var_index):]:\n    var_index[v.name] = v\n  return var_index[t.name]", "docstring": "Find the underlying tf.Variable object.\n\nArgs:\nt: a Tensor\n\nReturns:\ntf.Variable.", "source": "juraj-google-style"}
{"code": "def push(self, document=None):\n    if (self.document is None):\n        if (document is None):\n            doc = Document()\n        else:\n            doc = document\n    elif (document is None):\n        doc = self.document\n    else:\n        raise ValueError('Cannot push() a different document from existing session.document')\n    self.connect()\n    if (not self.connected):\n        raise IOError(\"Cannot push session document because we failed to connect to the server (to start the server, try the 'bokeh serve' command)\")\n    self._connection.push_doc(doc)\n    if (self._document is None):\n        self._attach_document(doc)", "docstring": "Push the given document to the server and record it as session.document.\n\nIf this is called more than once, the Document has to be the same (or None\nto mean \"session.document\").\n\n.. note::\nAutomatically calls :func:`~connect` before pushing.\n\nArgs:\ndocument (:class:`~bokeh.document.Document`, optional) :\nThe document which will be kept in sync with the server document.\nNone to use session.document or create a new document.", "source": "codesearchnet"}
{"code": "def get_mail_keys(message, complete=True):\n    if complete:\n        log.debug('Get all headers')\n        all_headers_keys = {i.lower() for i in message.keys()}\n        all_parts = ((ADDRESSES_HEADERS | OTHERS_PARTS) | all_headers_keys)\n    else:\n        log.debug('Get only mains headers')\n        all_parts = (ADDRESSES_HEADERS | OTHERS_PARTS)\n    log.debug('All parts to get: {}'.format(', '.join(all_parts)))\n    return all_parts", "docstring": "Given an email.message.Message, return a set with all email parts to get\n\nArgs:\nmessage (email.message.Message): email message object\ncomplete (bool): if True returns all email headers\n\nReturns:\nset with all email parts", "source": "codesearchnet"}
{"code": "def dumpfile(item, path):\n    with io.open(path, 'wb') as fd:\n        fd.write(en(item))", "docstring": "Dump an object to a file by path.\n\nArgs:\nitem (object): The object to serialize.\npath (str): The file path to save.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def supported_device(self, index=0):\n    if ((not util.is_natural(index)) or (index >= self.num_supported_devices())):\n        raise ValueError('Invalid index.')\n    info = structs.JLinkDeviceInfo()\n    result = self._dll.JLINKARM_DEVICE_GetInfo(index, ctypes.byref(info))\n    return info", "docstring": "Gets the device at the given ``index``.\n\nArgs:\nself (JLink): the ``JLink`` instance\nindex (int): the index of the device whose information to get\n\nReturns:\nA ``JLinkDeviceInfo`` describing the requested device.\n\nRaises:\nValueError: if index is less than 0 or >= supported device count.", "source": "codesearchnet"}
{"code": "def WaitUntilDone(self, timeout=None):\n    utils.Poll(generator=self.GetState, condition=(lambda s: (s != self.__class__.STATE_RUNNING)), timeout=timeout)\n    self.target_file = self.target_file.Get()\n    return self", "docstring": "Wait until the operation is done.\n\nArgs:\ntimeout: timeout in seconds. None means default timeout (1 hour).\n0 means no timeout (wait forever).\nReturns:\nOperation object with refreshed target_file.\nRaises:\nPollTimeoutError: if timeout is reached.", "source": "codesearchnet"}
{"code": "def ExportClientsByKeywords(keywords, filename, token=None):\n  r\n  index = client_index.CreateClientIndex(token=token)\n  client_list = index.LookupClients(keywords)\n  logging.info(\"found %d clients\", len(client_list))\n  if not client_list:\n    return\n\n  writer = csv.DictWriter([\n      u\"client_id\",\n      u\"hostname\",\n      u\"last_seen\",\n      u\"os\",\n      u\"os_release\",\n      u\"os_version\",\n      u\"users\",\n      u\"ips\",\n      u\"macs\",\n  ])\n  writer.WriteHeader()\n\n  for client in aff4.FACTORY.MultiOpen(client_list, token=token):\n    s = client.Schema\n    writer.WriteRow({\n        u\"client_id\": client.urn.Basename(),\n        u\"hostname\": client.Get(s.HOSTNAME),\n        u\"os\": client.Get(s.SYSTEM),\n        u\"os_release\": client.Get(s.OS_RELEASE),\n        u\"os_version\": client.Get(s.OS_VERSION),\n        u\"ips\": client.Get(s.HOST_IPS),\n        u\"macs\": client.Get(s.MAC_ADDRESS),\n        u\"users\": \"\\n\".join(client.Get(s.USERNAMES, [])),\n        u\"last_seen\": client.Get(s.PING),\n    })\n\n  with io.open(filename, \"w\") as csv_out:\n    csv_out.write(writer.Content())", "docstring": "r\"\"\"A script to export clients summaries selected by a keyword search.\n\nThis script does a client search for machines matching all of keywords and\nwrites a .csv summary of the results to filename. Multi-value fields are '\\n'\nseparated.\n\nArgs:\nkeywords: a list of keywords to search for\nfilename: the name of the file to write to, will be replaced if already\npresent\ntoken: datastore token.", "source": "juraj-google-style"}
{"code": "def ravel(x):\n    if any_symbolic_tensors((x,)):\n        return Ravel().symbolic_call(x)\n    return backend.numpy.ravel(x)", "docstring": "Return a contiguous flattened tensor.\n\nA 1-D tensor, containing the elements of the input, is returned.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput tensor.", "source": "github-repos"}
{"code": "def properties_with_values(self, include_defaults=True):\n    return self.query_properties_with_values((lambda prop: prop.serialized), include_defaults)", "docstring": "Collect a dict mapping property names to their values.\n\nThis method *always* traverses the class hierarchy and includes\nproperties defined on any parent classes.\n\nNon-serializable properties are skipped and property values are in\n\"serialized\" format which may be slightly different from the values\nyou would normally read from the properties; the intent of this method\nis to return the information needed to losslessly reconstitute the\nobject instance.\n\nArgs:\ninclude_defaults (bool, optional) :\nWhether to include properties that haven't been explicitly set\nsince the object was created. (default: True)\n\nReturns:\ndict : mapping from property names to their values", "source": "codesearchnet"}
{"code": "def global_step(device=''):\n  \n  global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP)\n  if global_step_ref:\n    return global_step_ref[0]\n  else:\n    collections = [\n        VARIABLES_TO_RESTORE,\n        tf.GraphKeys.GLOBAL_VARIABLES,\n        tf.GraphKeys.GLOBAL_STEP,\n    ]\n    \n    with tf.device(variable_device(device, 'global_step')):\n      return tf.get_variable('global_step', shape=[], dtype=tf.int64,\n                             initializer=tf.zeros_initializer(),\n                             trainable=False, collections=collections)", "docstring": "Returns the global step variable.\n\nArgs:\ndevice: Optional device to place the variable. It can be an string or a\nfunction that is called to get the device for the variable.\n\nReturns:\nthe tensor representing the global step variable.", "source": "juraj-google-style"}
{"code": "def retransmit(self, data):\n        \n\n        \n        \n        \n        if data[\"euuid\"] in self.event_uuids:\n            \n            self.event_uuids[data[\"euuid\"]] += 1\n\n            \n            \n            if (self.event_uuids[data[\"euuid\"]] > self.max_retries or\n                    data[\"cuuid\"] not in self.registry):\n                logger.warning(\"<%s> Retry limit exceeded. \"\n                               \"Timed out waiting for client for \"\n                               \"event: %s\" % (data[\"cuuid\"], data[\"euuid\"]))\n                logger.warning(\"<%s> Deleting event from currently processing \"\n                               \"event uuids\" % data[\"cuuid\"])\n                del self.event_uuids[data[\"euuid\"]]\n            else:\n                \n                logger.debug(\"<%s> Timed out waiting for response. Retry %s. \"\n                             \"Retransmitting message: \"\n                             \"%s\" % (data[\"cuuid\"],\n                                     pformat(self.event_uuids[data[\"euuid\"]]),\n                                     data[\"response\"]))\n\n                \n                host = self.registry[data[\"cuuid\"]][\"host\"]\n                port = self.registry[data[\"cuuid\"]][\"port\"]\n\n                \n                self.listener.send_datagram(data[\"response\"], (host, port))\n\n                \n                logger.debug(\"<%s> Scheduling to retry in %s \"\n                             \"seconds\" % (data[\"cuuid\"], str(self.timeout)))\n                self.listener.call_later(self.timeout, self.retransmit, data)", "docstring": "Processes messages that have been delivered from the listener.\n\nArgs:\ndata (dict): A dictionary containing the uuid, euuid, and message\nresponse. E.g. {\"cuuid\": x, \"euuid\": y, \"response\": z}.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def time_stats(self, **kwargs):\n    if ('time_stats' in self.attributes):\n        return self.attributes['time_stats']\n    path = ('%s/%s/time_stats' % (self.manager.path, self.get_id()))\n    return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "Get time stats for the object.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabTimeTrackingError: If the time tracking update cannot be done", "source": "codesearchnet"}
{"code": "def random_subsets(self, relative_sizes, by_duration=False, balance_labels=False, label_list_ids=None):\n    resulting_sets = {}\n    next_bigger_subset = self.corpus\n    for relative_size in reversed(relative_sizes):\n        generator = SubsetGenerator(next_bigger_subset, random_seed=self.random_seed)\n        if by_duration:\n            sv = generator.random_subset_by_duration(relative_size, balance_labels=balance_labels, label_list_ids=label_list_ids)\n        else:\n            sv = generator.random_subset(relative_size, balance_labels=balance_labels, label_list_ids=label_list_ids)\n        resulting_sets[relative_size] = sv\n    return resulting_sets", "docstring": "Create a bunch of subsets with the given sizes relative to the size or duration of the full corpus.\nBasically the same as calling ``random_subset`` or ``random_subset_by_duration`` multiple times\nwith different values. But this method makes sure that every subset contains only utterances,\nthat are also contained in the next bigger subset.\n\nArgs:\nrelative_sizes (list): A list of numbers between 0 and 1 indicating the sizes of the desired subsets,\nrelative to the full corpus.\nby_duration (bool): If True the size measure is the duration of all utterances in a subset/corpus.\nbalance_labels (bool): If True the labels contained in a subset are chosen to be balanced\nas far as possible.\nlabel_list_ids (list): List of label-list ids. If none is given, all label-lists are considered\nfor balancing. Otherwise only the ones that are in the list are considered.\n\nReturns:\ndict : A dictionary containing all subsets with the relative size as key.", "source": "codesearchnet"}
{"code": "def convert_pil_frames_to_video(videos: List[VideoInput]) -> List[Union['np.ndarray', 'torch.Tensor']]:\n    if not isinstance(videos[0], (list, tuple)):\n        return videos\n    video_converted = []\n    for video in videos:\n        video = [np.array(frame) for frame in video]\n        video = np.stack(video)\n        video_converted.append(video)\n    return video_converted", "docstring": "Given a batch of videos, converts each video to a 4D array. If video is already in array type,\nit is simply returned. We assume that all inputs in the list are in the same format, based on the type of the first element.\n\nArgs:\nvideos (`VideoInput`):\nVideo inputs to turn into a list of videos.", "source": "github-repos"}
{"code": "def get_string(self, byte_count=_MAX_INT):\n    return self.fdp.ConsumeString(byte_count)", "docstring": "Consume a string with given constraints based on a consumed bool.\n\nArgs:\nbyte_count: Byte count that defaults to _MAX_INT.\n\nReturns:\nConsumed string based on input bytes and constraints.", "source": "github-repos"}
{"code": "def resolve(self, reference_path_or_paths: Optional[Union[str, List[str]]]=None) -> Union[Tuple[symbolic.Symbolic, utils.KeyPath], List[Tuple[symbolic.Symbolic, utils.KeyPath]]]:\n    single_input = False\n    if reference_path_or_paths is None:\n        reference_paths = self.reference_paths\n    elif isinstance(reference_path_or_paths, str):\n        reference_paths = [utils.KeyPath.parse(reference_path_or_paths)]\n        single_input = True\n    elif isinstance(reference_path_or_paths, utils.KeyPath):\n        reference_paths = [reference_path_or_paths]\n        single_input = True\n    elif isinstance(reference_path_or_paths, list):\n        paths = []\n        for path in reference_path_or_paths:\n            if isinstance(path, str):\n                path = utils.KeyPath.parse(path)\n            elif not isinstance(path, utils.KeyPath):\n                raise ValueError(\"Argument 'reference_path_or_paths' must be None, a string, KeyPath object, a list of strings, or a list of KeyPath objects.\")\n            paths.append(path)\n        reference_paths = paths\n    else:\n        raise ValueError(\"Argument 'reference_path_or_paths' must be None, a string, KeyPath object, a list of strings, or a list of KeyPath objects.\")\n    resolved_paths = []\n    for reference_path in reference_paths:\n        parent = self.sym_parent\n        while parent is not None and (not reference_path.exists(parent)):\n            parent = getattr(parent, 'sym_parent', None)\n        if parent is None:\n            raise ValueError(f\"Cannot resolve '{reference_path}': parent not found.\")\n        resolved_paths.append((parent, parent.sym_path + reference_path))\n    return resolved_paths if not single_input else resolved_paths[0]", "docstring": "Resolve reference paths based on the location of this node.\n\nArgs:\nreference_path_or_paths: (Optional) a string or KeyPath as a reference\npath or a list of strings or KeyPath objects as a list of\nreference paths.\nIf this argument is not provided, prebound reference paths of this\nobject will be used.\n\nReturns:\nA tuple (or list of tuple) of (resolved parent, resolved full path)", "source": "github-repos"}
{"code": "def _populate_quantization_component_spec(quant_method: _QuantizationMethod) -> None:\n    updated_component_spec = dict()\n    if quant_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_INT8 or quant_method.preset_method == _PresetMethod.METHOD_DYNAMIC_RANGE_INT8:\n        updated_component_spec[_QuantizationComponent.COMPONENT_ACTIVATION] = _QuantizationComponentSpec(quantization_component=_QuantizationComponent.COMPONENT_ACTIVATION, tensor_type=_TensorType.TENSORTYPE_INT_8)\n        updated_component_spec[_QuantizationComponent.COMPONENT_WEIGHT] = _QuantizationComponentSpec(quantization_component=_QuantizationComponent.COMPONENT_WEIGHT, tensor_type=_TensorType.TENSORTYPE_INT_8)\n        updated_component_spec[_QuantizationComponent.COMPONENT_BIAS] = _QuantizationComponentSpec(quantization_component=_QuantizationComponent.COMPONENT_BIAS, tensor_type=_TensorType.TENSORTYPE_INT_32)\n    elif quant_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8:\n        updated_component_spec[_QuantizationComponent.COMPONENT_WEIGHT] = _QuantizationComponentSpec(quantization_component=_QuantizationComponent.COMPONENT_WEIGHT, tensor_type=_TensorType.TENSORTYPE_INT_8)\n    if quant_method.quantization_component_specs:\n        for component_spec in quant_method.quantization_component_specs:\n            if component_spec.quantization_component in [_QuantizationComponent.COMPONENT_WEIGHT, _QuantizationComponent.COMPONENT_ACTIVATION]:\n                if component_spec.tensor_type != _TensorType.TENSORTYPE_INT_8:\n                    raise ValueError('Only int8 precision is supported for input operands.')\n            elif component_spec.tensor_type != _TensorType.TENSORTYPE_INT_32:\n                raise ValueError('Only int32 precision is supported for bias.')\n            updated_component_spec[component_spec.quantization_component] = component_spec\n    del quant_method.quantization_component_specs[:]\n    quant_method.quantization_component_specs.extend(updated_component_spec.values())\n    if (quant_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_INT8 or quant_method.preset_method == _PresetMethod.METHOD_DYNAMIC_RANGE_INT8) and len(quant_method.quantization_component_specs) != 3:\n        raise ValueError('Only 3 components are needed for', quant_method)\n    elif quant_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8 and len(quant_method.quantization_component_specs) != 1:\n        raise ValueError('At least one component spec needs to be specified.')", "docstring": "Populates default values for QuantizationComponentSpec.\n\nArgs:\nquant_method: The quantization method to be updated.", "source": "github-repos"}
{"code": "def render_template(template_name, info, out_path=None):\n    env = Environment(loader=PackageLoader('iotile.build', 'config/templates'), trim_blocks=True, lstrip_blocks=True)\n    template = env.get_template(template_name)\n    result = template.render(info)\n    if (out_path is not None):\n        with open(out_path, 'wb') as outfile:\n            outfile.write(result.encode('utf-8'))\n    return result", "docstring": "Render a template using the variables in info.\n\nYou can optionally render to a file by passing out_path.\n\nArgs:\ntemplate_name (str): The name of the template to load.  This must\nbe a file in config/templates inside this package\nout_path (str): An optional path of where to save the output\nfile, otherwise it is just returned as a string.\ninfo (dict): A dictionary of variables passed into the template to\nperform substitutions.\n\nReturns:\nstring: The rendered template data.", "source": "codesearchnet"}
{"code": "def parse_device_list(device_list_str, key):\n    \n    clean_lines = new_str(device_list_str, 'utf-8').strip().split('\\n')\n    results = []\n    for line in clean_lines:\n        tokens = line.strip().split('\\t')\n        if len(tokens) == 2 and tokens[1] == key:\n            results.append(tokens[0])\n    return results", "docstring": "Parses a byte string representing a list of devices.\n\nThe string is generated by calling either adb or fastboot. The tokens in\neach string is tab-separated.\n\nArgs:\ndevice_list_str: Output of adb or fastboot.\nkey: The token that signifies a device in device_list_str.\n\nReturns:\nA list of android device serial numbers.", "source": "juraj-google-style"}
{"code": "def __init__(self, uploader, mode='wb'):\n    self._uploader = uploader\n    self.mode = mode\n    self._position = 0", "docstring": "Initializes the stream.\n\nArgs:\nuploader: (Uploader) Filesystem dependent implementation.\nmode: (string) Python mode attribute for this stream.", "source": "github-repos"}
{"code": "class SampleTSPredictionOutput(ModelOutput):\n    sequences: Optional[torch.FloatTensor] = None", "docstring": "Base class for time series model's predictions outputs that contains the sampled values from the chosen\ndistribution.\n\nArgs:\nsequences (`torch.FloatTensor` of shape `(batch_size, num_samples, prediction_length)` or `(batch_size, num_samples, prediction_length, input_size)`):\nSampled values from the chosen distribution.", "source": "github-repos"}
{"code": "def fts_match_all(self, fts, inv):\n        \n        return all([self.fts_match(fts, s) for s in inv])", "docstring": "Return `True` if all segments in `inv` matches the features in fts\n\nArgs:\nfts (list): a collection of (value, feature) tuples\ninv (list): a collection of IPA segments represented as Unicode\nstrings\n\nReturns:\nbool: `True` if all segments in `inv` matches the features in `fts`", "source": "juraj-google-style"}
{"code": "def clean_for_storage(self, data):\n    data = self.data_to_unicode(data)\n    if isinstance(data, dict):\n        for k in dict(data).keys():\n            if (k == '_id'):\n                del data[k]\n                continue\n            if ('.' in k):\n                new_k = k.replace('.', '_')\n                data[new_k] = data[k]\n                del data[k]\n                k = new_k\n            if isinstance(data[k], dict):\n                data[k] = self.clean_for_storage(data[k])\n            elif isinstance(data[k], list):\n                data[k] = [self.clean_for_storage(item) for item in data[k]]\n    return data", "docstring": "Clean data in preparation for storage.\n\nDeletes items with key having a '.' or is '_id'. Also deletes those items\nwhose value is a dictionary or a list.\n\nArgs:\ndata: Sample data dictionary to be cleaned.\n\nReturns:\nCleaned data dictionary.", "source": "codesearchnet"}
{"code": "def resume(self, email, master_token, state=None, sync=True):\n    auth = APIAuth(self.OAUTH_SCOPES)\n    ret = auth.load(email, master_token, android_id=get_mac())\n    if ret:\n        self.load(auth, state, sync)\n    return ret", "docstring": "Authenticate to Google with the provided master token & sync.\n\nArgs:\nemail (str): The account to use.\nmaster_token (str): The master token.\nstate (dict): Serialized state to load.\n\nRaises:\nLoginException: If there was a problem logging in.", "source": "codesearchnet"}
{"code": "def merge(tup):\n    \n    if not all(tuple(ts.shape[1:] == tup[0].shape[1:] for ts in tup[1:])):\n        raise ValueError('Timeseries to merge must have compatible shapes')\n    indices = np.vstack(tuple(ts.tspan for ts in tup)).argsort()\n    return np.vstack((tup))[indices]", "docstring": "Merge several timeseries\nArguments:\ntup: sequence of Timeseries, with the same shape except for axis 0\nReturns:\nResulting merged timeseries which can have duplicate time points.", "source": "juraj-google-style"}
{"code": "def softsign(x):\n    return ops.softsign(x)", "docstring": "Softsign activation function.\n\nSoftsign is defined as: `softsign(x) = x / (abs(x) + 1)`.\n\nArgs:\nx: Input tensor.", "source": "github-repos"}
{"code": "def token_request(self, authorization_code):\n    if (not self._client.token_endpoint):\n        return None\n    request = {'grant_type': 'authorization_code', 'code': authorization_code, 'redirect_uri': self._redirect_uri}\n    logger.debug('making token request: %s', request)\n    client_auth_method = self._client.registration_response.get('token_endpoint_auth_method', 'client_secret_basic')\n    auth_header = _ClientAuthentication(self._client.client_id, self._client.client_secret)(client_auth_method, request)\n    resp = self._provider_configuration.requests_session.post(self._client.token_endpoint, data=request, headers=auth_header).json()\n    logger.debug('received token response: %s', json.dumps(resp))\n    if ('error' in resp):\n        token_resp = TokenErrorResponse(**resp)\n    else:\n        token_resp = AccessTokenResponse(**resp)\n        token_resp.verify(keyjar=self._client.keyjar)\n        if ('id_token' in resp):\n            token_resp['id_token_jwt'] = resp['id_token']\n    return token_resp", "docstring": "Makes a token request.  If the 'token_endpoint' is not configured in the provider metadata, no request will\nbe made.\n\nArgs:\nauthorization_code (str): authorization code issued to client after user authorization\n\nReturns:\nUnion[AccessTokenResponse, TokenErrorResponse, None]: The parsed token response, or None if no token\nrequest was performed.", "source": "codesearchnet"}
{"code": "def instantiate(config):\n\n    \n\n    for handle, cfg in list(config[\"apps\"].items()):\n        if not cfg.get(\"enabled\", True):\n            continue\n        app = get_application(handle)\n        instances[app.handle] = app(cfg)", "docstring": "instantiate all registered vodka applications\n\nArgs:\nconfig (dict or MungeConfig): configuration object", "source": "juraj-google-style"}
{"code": "class MeanSquaredLogarithmicError(MeanMetricWrapper):\n\n    def __init__(self, name='mean_squared_logarithmic_error', dtype=None):\n        super(MeanSquaredLogarithmicError, self).__init__(mean_squared_logarithmic_error, name, dtype=dtype)", "docstring": "Computes the mean squared logarithmic error between `y_true` and `y_pred`.\n\nArgs:\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.\n\nStandalone usage:\n\n>>> m = tf.keras.metrics.MeanSquaredLogarithmicError()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])\n>>> m.result().numpy()\n0.12011322\n\n>>> m.reset_state()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],\n...                sample_weight=[1, 0])\n>>> m.result().numpy()\n0.24022643\n\nUsage with `compile()` API:\n\n```python\nmodel.compile(\noptimizer='sgd',\nloss='mse',\nmetrics=[tf.keras.metrics.MeanSquaredLogarithmicError()])\n```", "source": "github-repos"}
{"code": "def extract_lookups(value):\n    lookups = set()\n    if isinstance(value, basestring):\n        lookups = lookups.union(extract_lookups_from_string(value))\n    elif isinstance(value, list):\n        for v in value:\n            lookups = lookups.union(extract_lookups(v))\n    elif isinstance(value, dict):\n        for v in value.values():\n            lookups = lookups.union(extract_lookups(v))\n    return lookups", "docstring": "Recursively extracts any stack lookups within the data structure.\n\nArgs:\nvalue (one of str, list, dict): a structure that contains lookups to\noutput values\n\nReturns:\nlist: list of lookups if any", "source": "codesearchnet"}
{"code": "def get_identifier(identifier, module_globals, module_name):\n    \n    if isinstance(identifier, six.string_types):\n        fn = module_globals.get(identifier)\n        if fn is None:\n            raise ValueError('Unknown {}: {}'.format(module_name, identifier))\n        return fn\n    elif callable(identifier):\n        return identifier\n    else:\n        raise ValueError('Could not interpret identifier')", "docstring": "Helper utility to retrieve the callable function associated with a string identifier.\n\nArgs:\nidentifier: The identifier. Could be a string or function.\nmodule_globals: The global objects of the module.\nmodule_name: The module name\n\nReturns:\nThe callable associated with the identifier.", "source": "juraj-google-style"}
{"code": "def value_matrix(self):\n    if self.__value_matrix:\n        return self.__value_matrix\n    self.__value_matrix = [[value_dp.data for value_dp in value_dp_list] for value_dp_list in self.value_dp_matrix]\n    return self.__value_matrix", "docstring": "Converted rows of tabular data.\n\nReturns:\n|list| or |tuple|: Table rows.", "source": "codesearchnet"}
{"code": "def _is_test_class(obj):\n    return tf_inspect.isclass(obj) and 'TestCase' in (p.__name__ for p in tf_inspect.getmro(obj))", "docstring": "Check if arbitrary object is a test class (not a test object!).\n\nArgs:\nobj: An arbitrary object from within a module.\n\nReturns:\nTrue iff obj is a test class inheriting at some point from a module\nnamed \"TestCase\". This is because we write tests using different underlying\ntest libraries.", "source": "github-repos"}
{"code": "def setup_and_load_epoch(hparams, data_dir, which_epoch_data=None):\n    t2t_env = rl_utils.setup_env(hparams, batch_size=hparams.real_batch_size, max_num_noops=hparams.max_num_noops)\n    if (which_epoch_data is not None):\n        if (which_epoch_data == 'last'):\n            which_epoch_data = infer_last_epoch_num(data_dir)\n        assert isinstance(which_epoch_data, int), '{}'.format(type(which_epoch_data))\n        t2t_env.start_new_epoch(which_epoch_data, data_dir)\n    else:\n        t2t_env.start_new_epoch((- 999))\n    return t2t_env", "docstring": "Load T2TGymEnv with data from one epoch.\n\nArgs:\nhparams: hparams.\ndata_dir: data directory.\nwhich_epoch_data: data from which epoch to load.\n\nReturns:\nenv.", "source": "codesearchnet"}
{"code": "def _parse(json_str: str, primitive_cls: Type[Date], *, default_timezone: str) -> Date:\n    try:\n        dt = datetime.datetime.strptime(json_str, '%Y')\n        return _primitive_time_utils.build_date_like(dt, default_timezone, _primitive_time_utils.DateTimePrecision.YEAR, primitive_cls)\n    except ValueError:\n        pass\n    try:\n        dt = datetime.datetime.strptime(json_str, '%Y-%m')\n        return _primitive_time_utils.build_date_like(dt, default_timezone, _primitive_time_utils.DateTimePrecision.MONTH, primitive_cls)\n    except ValueError:\n        pass\n    try:\n        dt = datetime.datetime.strptime(json_str, '%Y-%m-%d')\n        return _primitive_time_utils.build_date_like(dt, default_timezone, _primitive_time_utils.DateTimePrecision.DAY, primitive_cls)\n    except ValueError:\n        pass\n    raise fhir_errors.InvalidFhirError('Invalid Date.')", "docstring": "Parses the json_str into a Date FHIR primitive.\n\nArgs:\njson_str: The raw JSON string to parse.\nprimitive_cls: The FHIR primitive to parse into.\ndefault_timezone: The default timezone to use when parsing in the event that\nno timezone information is present.\n\nReturns:\nA FHIR primitive Date.\n\nRaises:\nfhir_errors.InvalidFhirError: In the event that no datetime format was\nable to properly parse the json_str.", "source": "github-repos"}
{"code": "def get_flux_biases_from_cache(cur, chains, system_name, chain_strength, max_age=3600):\n    select = '\\n        SELECT\\n            flux_bias\\n        FROM flux_bias_view WHERE\\n            chain_length = :chain_length AND\\n            nodes = :nodes AND\\n            chain_strength = :chain_strength AND\\n            system_name = :system_name AND\\n            insert_time >= :time_limit;\\n        '\n    encoded_data = {'chain_strength': _encode_real(chain_strength), 'system_name': system_name, 'time_limit': (datetime.datetime.now() + datetime.timedelta(seconds=(- max_age)))}\n    flux_biases = {}\n    for chain in chains:\n        encoded_data['chain_length'] = len(chain)\n        encoded_data['nodes'] = json.dumps(sorted(chain), separators=(',', ':'))\n        row = cur.execute(select, encoded_data).fetchone()\n        if (row is None):\n            raise MissingFluxBias\n        flux_bias = _decode_real(*row)\n        if (flux_bias == 0):\n            continue\n        flux_biases.update({v: flux_bias for v in chain})\n    return flux_biases", "docstring": "Determine the flux biases for all of the the given chains, system and chain strength.\n\nArgs:\ncur (:class:`sqlite3.Cursor`):\nAn sqlite3 cursor. This function is meant to be run within a :obj:`with` statement.\n\nchains (iterable):\nAn iterable of chains. Each chain is a collection of nodes. Chains in embedding act as\none node.\n\nsystem_name (str):\nThe unique name of a system.\n\nchain_strength (float):\nThe magnitude of the negative quadratic bias that induces the given chain in an Ising\nproblem.\n\nmax_age (int, optional, default=3600):\nThe maximum age (in seconds) for the flux_bias offsets.\n\nReturns:\ndict: A dict where the keys are the nodes in the chains and the values are the flux biases.", "source": "codesearchnet"}
{"code": "def _with_tensor_ranks_only(self) -> 'TypeSpec':\n\n    def relax(value):\n        if isinstance(value, TypeSpec):\n            return value._with_tensor_ranks_only()\n        elif isinstance(value, tensor_shape.TensorShape) and value.rank is not None:\n            return tensor_shape.TensorShape([None] * value.rank)\n        else:\n            return value\n    return self._deserialize(nest.map_structure(relax, self._serialize()))", "docstring": "Returns a TypeSpec compatible with `self`, with tensor shapes relaxed.\n\nReturns:\nA `TypeSpec` that is compatible with `self`, where any `TensorShape`\ninformation has been relaxed to include only tensor rank (and not\nthe dimension sizes for individual axes).", "source": "github-repos"}
{"code": "def delete_pipeline(self, pipeline_key):\n\t\t\n\t\tif pipeline_key:\n\t\t\turi = '/'.join([\n\t\t\t\t\t\t\tself.api_uri,\n\t\t\t\t\t\t\tself.pipelines_suffix,\n\t\t\t\t\t\t\tpipeline_key\n\t\t\t\t\t\t\t])\n\t\t\treturn self._req('delete', uri)\n\t\telse:\n\t\t\treturn requests.codes.bad_request, None", "docstring": "Deletes the pipeline specified by the key\nArgs:\nreturns \t(status code for the DELETE request, success message dict)\nexpect (200 , {'success': 'true'}) for successful execution}", "source": "juraj-google-style"}
{"code": "def read(self, size=None):\n    \n    if not self._is_open:\n      raise IOError('Not opened.')\n\n    if self._current_offset < 0:\n      raise IOError(\n          'Invalid current offset: {0:d} value less than zero.'.format(\n              self._current_offset))\n\n    if self._file_data is None or self._current_offset >= self._size:\n      return b''\n\n    if size is None:\n      size = self._size\n    if self._current_offset + size > self._size:\n      size = self._size - self._current_offset\n\n    start_offset = self._current_offset\n    self._current_offset += size\n    return self._file_data[start_offset:self._current_offset]", "docstring": "Reads a byte string from the file-like object at the current offset.\n\nThe function will read a byte string of the specified size or\nall of the remaining data if no size was specified.\n\nArgs:\nsize (Optional[int]): number of bytes to read, where None is all\nremaining data.\n\nReturns:\nbytes: data read.\n\nRaises:\nIOError: if the read failed.\nOSError: if the read failed.", "source": "juraj-google-style"}
{"code": "def operate(self, point):\n    affine_point = np.array([point[0], point[1], point[2], 1])\n    return np.dot(self.affine_matrix, affine_point)[0:3]", "docstring": "Apply the operation on a point.\n\nArgs:\npoint: Cartesian coordinate.\n\nReturns:\nCoordinates of point after operation.", "source": "codesearchnet"}
{"code": "def read_string(self, key, embedded=True):\n    data = None\n    if (key is not None):\n        key_type = self.variable_type(key)\n        data = self.db.read(key.strip())\n        if (data is not None):\n            try:\n                data = json.loads(data)\n                if embedded:\n                    data = self.read_embedded(data, key_type)\n                if (data is not None):\n                    data = u'{}'.format(data)\n            except ValueError as e:\n                err = u'Failed loading JSON data ({}). Error: ({})'.format(data, e)\n                self.tcex.log.error(err)\n    else:\n        self.tcex.log.warning(u'The key field was None.')\n    return data", "docstring": "Read method of CRUD operation for string data.\n\nArgs:\nkey (string): The variable to read from the DB.\nembedded (boolean): Resolve embedded variables.\n\nReturns:\n(string): Results retrieved from DB.", "source": "codesearchnet"}
{"code": "def unicode_convert(obj):\n    \n    try:\n        if isinstance(obj, dict):\n            return {unicode_convert(key): unicode_convert(value) for key, value in obj.items()}\n        elif isinstance(obj, list):\n            return [unicode_convert(element) for element in obj]\n        elif isinstance(obj, str):\n            return obj \n        elif isinstance(obj, six.text_type):\n            return obj.encode('utf-8')\n        elif isinstance(obj, six.integer_types):\n            return obj\n        else:\n            return obj\n    except:\n        return obj", "docstring": "Converts unicode objects to anscii.\n\nArgs:\nobj (object): The object to convert.\nReturns:\nThe object converted to anscii, if possible. For ``dict`` and ``list``, the object type is maintained.", "source": "juraj-google-style"}
{"code": "def extend(self, other):\n    orig_num_lines = self.num_lines()\n    self._lines.extend(other.lines)\n    for line_index in other.font_attr_segs:\n        self._font_attr_segs[orig_num_lines + line_index] = other.font_attr_segs[line_index]\n    for key in other.annotations:\n        if isinstance(key, int):\n            self._annotations[orig_num_lines + key] = other.annotations[key]\n        else:\n            self._annotations[key] = other.annotations[key]", "docstring": "Extend this instance of RichTextLines with another instance.\n\nThe extension takes effect on the text lines, the font attribute segments,\nas well as the annotations. The line indices in the font attribute\nsegments and the annotations are adjusted to account for the existing\nlines. If there are duplicate, non-line-index fields in the annotations,\nthe value from the input argument \"other\" will override that in this\ninstance.\n\nArgs:\nother: (RichTextLines) The other RichTextLines instance to be appended at\nthe end of this instance.", "source": "github-repos"}
{"code": "def inverse(self):\n    inverse_circ = self.copy(name=(self.name + '_dg'))\n    inverse_circ.data = []\n    for (inst, qargs, cargs) in reversed(self.data):\n        inverse_circ.data.append((inst.inverse(), qargs, cargs))\n    return inverse_circ", "docstring": "Invert this circuit.\n\nThis is done by recursively inverting all gates.\n\nReturns:\nQuantumCircuit: the inverted circuit\n\nRaises:\nQiskitError: if the circuit cannot be inverted.", "source": "codesearchnet"}
{"code": "def run_schedule(inputs: Dict[EventSetNode, EventSet], schedule: Schedule, verbose: int, check_execution: bool, force_garbage_collector_interval: Optional[float]=10) -> Dict[EventSetNode, EventSet]:\n    data = {**inputs}\n    gc_begin_time = time.time()\n    num_steps = len(schedule.steps)\n    for step_idx, step in enumerate(schedule.steps):\n        operator_def = step.op.definition\n        implementation_cls = implementation_lib.get_implementation_class(operator_def.key)\n        implementation = implementation_cls(step.op)\n        if verbose == 1:\n            print(f'    {step_idx + 1} / {num_steps}: {step.op.operator_key()}', file=sys.stderr, end='', flush=True)\n        elif verbose >= 2:\n            print('=============================', file=sys.stderr)\n            print(f'{step_idx + 1} / {num_steps}: Run {step.op}', file=sys.stderr, flush=True)\n        operator_inputs = {input_key: data[input_node] for input_key, input_node in step.op.inputs.items()}\n        if verbose >= 2:\n            print(f'Inputs:\\n{operator_inputs}\\n', file=sys.stderr, flush=True)\n        begin_time = time.perf_counter()\n        if check_execution:\n            operator_outputs = implementation.call(**operator_inputs)\n        else:\n            operator_outputs = implementation(**operator_inputs)\n        end_time = time.perf_counter()\n        if verbose == 1:\n            print(f' [{end_time - begin_time:.5f} s]', file=sys.stderr, flush=True)\n        elif verbose >= 2:\n            print(f'Outputs:\\n{operator_outputs}\\n', file=sys.stderr)\n            print(f'Duration: {end_time - begin_time} s', file=sys.stderr, flush=True)\n        for output_key, output_node in step.op.outputs.items():\n            output_evset = operator_outputs[output_key]\n            output_evset._internal_node = output_node\n            data[output_node] = output_evset\n        for node in step.released_nodes:\n            assert node in data\n            del data[node]\n        if force_garbage_collector_interval is not None and time.time() - gc_begin_time >= force_garbage_collector_interval:\n            begin_gc = time.time()\n            if verbose >= 2:\n                print('Garbage collection', file=sys.stderr, flush=True, end='')\n            gc.collect()\n            gc_begin_time = time.time()\n            if verbose >= 2:\n                print(f' [{gc_begin_time - begin_gc:.5f} s]', file=sys.stderr, flush=True)\n    return data", "docstring": "Evaluates a schedule on a dictionary of input\n[`EventSets`][temporian.EventSet].\n\nArgs:\ninputs: Mapping of EventSetNodes to materialized EventSets.\nschedule: Sequence of operators to apply on the data.\nverbose: If >0, prints details about the execution on the standard error\noutput. The larger the number, the more information is displayed.\ncheck_execution: If `True`, data of the intermediate results of the\noperators is checked against its expected structure and raises if\nit differs.\nforce_garbage_collector_interval: If set, triggers the garbage\ncollection every \"force_garbage_collector_interval\" seconds.", "source": "github-repos"}
{"code": "def add_outputs(self, *args, **kwargs):\n    if 'names' in kwargs:\n        return [self._outputs.add(arg, name=name) for arg, name in zip(args, kwargs['names'])]\n    else:\n        return [self._outputs.add(arg) for arg in args]", "docstring": "Add a sequence of outputs to the function invocation.\n\nArgs:\n*args: List of outputs to be converted (should be tf.Tensor).\n**kwargs: See\n\nReturns:\nWrapped outputs (identity standins that have additional metadata). These\nare also tf.Tensor's.", "source": "github-repos"}
{"code": "def add_ephemeral_listener(self, callback, event_type=None):\n        \n        listener_id = uuid4()\n        self.ephemeral_listeners.append(\n            {\n                'uid': listener_id,\n                'callback': callback,\n                'event_type': event_type\n            }\n        )\n        return listener_id", "docstring": "Add a callback handler for ephemeral events going to this room.\n\nArgs:\ncallback (func(room, event)): Callback called when an ephemeral event arrives.\nevent_type (str): The event_type to filter for.\nReturns:\nuuid.UUID: Unique id of the listener, can be used to identify the listener.", "source": "juraj-google-style"}
{"code": "def find_element_by_class(self, class_, update=False) -> Elements:\n    return self.find_element(by=By.CLASS, value=class_, update=update)", "docstring": "Finds an element by class.\n\nArgs:\nclass_: The class of the element to be found.\nupdate: If the interface has changed, this option should be True.\n\nReturns:\nThe element if it was found.\n\nRaises:\nNoSuchElementException - If the element wasn't found.\n\nUsage:\nelement = driver.find_element_by_class('foo')", "source": "codesearchnet"}
{"code": "def _remove_files(files):\n    logger.debug('Request for file removal (_remove_files()).')\n    for fn in files:\n        if os.path.exists(fn):\n            logger.debug((\"Removing '%s'.\" % fn))\n            os.remove(fn)", "docstring": "Remove all given files.\n\nArgs:\nfiles (list): List of filenames, which will be removed.", "source": "codesearchnet"}
{"code": "def outer_horizontal_border_top(self):\n    return u'{lm}{lv}{hz}{rv}'.format(lm=(' ' * self.margins.left), lv=self.border_style.top_left_corner, rv=self.border_style.top_right_corner, hz=self.outer_horizontals())", "docstring": "The complete outer top horizontal border section, including left and right margins.\n\nReturns:\nstr: The top menu border.", "source": "codesearchnet"}
{"code": "def _inchi_labels(mol):\n    obconv = ob.OBConversion()\n    obconv.SetOutFormat(str('inchi'))\n    obconv.AddOption(str('a'), ob.OBConversion.OUTOPTIONS)\n    obconv.AddOption(str('X'), ob.OBConversion.OUTOPTIONS, str('DoNotAddH'))\n    inchi_text = obconv.WriteString(mol)\n    match = re.search('InChI=(?P<inchi>.+)\\\\nAuxInfo=.+/N:(?P<labels>[0-9,;]+)/(E:(?P<eq_atoms>[0-9,;\\\\(\\\\)]*)/)?', inchi_text)\n    inchi = match.group('inchi')\n    label_text = match.group('labels')\n    eq_atom_text = match.group('eq_atoms')\n    heavy_atom_labels = tuple([int(i) for i in label_text.replace(';', ',').split(',')])\n    eq_atoms = []\n    if (eq_atom_text is not None):\n        eq_tokens = re.findall('\\\\(((?:[0-9]+,)+[0-9]+)\\\\)', eq_atom_text.replace(';', ','))\n        eq_atoms = tuple([tuple([int(i) for i in t.split(',')]) for t in eq_tokens])\n    return (heavy_atom_labels, eq_atoms, inchi)", "docstring": "Get the inchi canonical labels of the heavy atoms in the molecule\n\nArgs:\nmol: The molecule. OpenBabel OBMol object\n\nReturns:\nThe label mappings. List of tuple of canonical label,\noriginal label\nList of equivalent atoms.", "source": "codesearchnet"}
{"code": "def __init__(self, rfc2579_date_time_tuple=None):\n    \n    super(RFC2579DateTime, self).__init__()\n    self._number_of_seconds = None\n    self._precision = definitions.PRECISION_100_MILLISECONDS\n    self.day_of_month = None\n    self.hours = None\n    self.deciseconds = None\n    self.minutes = None\n    self.month = None\n    self.seconds = None\n    self.year = None\n\n    if rfc2579_date_time_tuple:\n      if len(rfc2579_date_time_tuple) < 10:\n        raise ValueError(\n            'Invalid RFC2579 date-time tuple 10 elements required.')\n\n      if rfc2579_date_time_tuple[0] < 0 or rfc2579_date_time_tuple[0] > 65536:\n        raise ValueError('Year value out of bounds.')\n\n      if rfc2579_date_time_tuple[1] not in range(1, 13):\n        raise ValueError('Month value out of bounds.')\n\n      days_per_month = self._GetDaysPerMonth(\n          rfc2579_date_time_tuple[0], rfc2579_date_time_tuple[1])\n      if (rfc2579_date_time_tuple[2] < 1 or\n          rfc2579_date_time_tuple[2] > days_per_month):\n        raise ValueError('Day of month value out of bounds.')\n\n      if rfc2579_date_time_tuple[3] not in range(0, 24):\n        raise ValueError('Hours value out of bounds.')\n\n      if rfc2579_date_time_tuple[4] not in range(0, 60):\n        raise ValueError('Minutes value out of bounds.')\n\n      \n      if rfc2579_date_time_tuple[5] not in range(0, 60):\n        raise ValueError('Seconds value out of bounds.')\n\n      if rfc2579_date_time_tuple[6] < 0 or rfc2579_date_time_tuple[6] > 9:\n        raise ValueError('Deciseconds value out of bounds.')\n\n      if rfc2579_date_time_tuple[7] not in ('+', '-'):\n        raise ValueError('Direction from UTC value out of bounds.')\n\n      if rfc2579_date_time_tuple[8] not in range(0, 14):\n        raise ValueError('Hours from UTC value out of bounds.')\n\n      if rfc2579_date_time_tuple[9] not in range(0, 60):\n        raise ValueError('Minutes from UTC value out of bounds.')\n\n      time_zone_offset = (\n          (rfc2579_date_time_tuple[8] * 60) + rfc2579_date_time_tuple[9])\n\n      \n      \n      if rfc2579_date_time_tuple[7] != '-':\n        time_zone_offset = -time_zone_offset\n\n      self.year, self.month, self.day_of_month, self.hours, self.minutes = (\n          self._AdjustForTimeZoneOffset(\n              rfc2579_date_time_tuple[0], rfc2579_date_time_tuple[1],\n              rfc2579_date_time_tuple[2], rfc2579_date_time_tuple[3],\n              rfc2579_date_time_tuple[4], time_zone_offset))\n\n      self.deciseconds = rfc2579_date_time_tuple[6]\n      self.seconds = rfc2579_date_time_tuple[5]\n\n      self._number_of_seconds = self._GetNumberOfSecondsFromElements(\n          self.year, self.month, self.day_of_month, self.hours, self.minutes,\n          self.seconds)", "docstring": "Initializes a RFC2579 date-time.\n\nArgs:\nrfc2579_date_time_tuple:\n(Optional[tuple[int, int, int, int, int, int, int]]):\nRFC2579 date-time time, contains year, month, day of month, hours,\nminutes, seconds and deciseconds.\n\nRaises:\nValueError: if the system time is invalid.", "source": "juraj-google-style"}
{"code": "def convert_fields(fields, field_values):\n    _convert_fields(fields, field_values, context=_ConversionContext.VALUE)", "docstring": "Type-checks and converts each field in `field_values` (in place).\n\nArgs:\nfields: A list of `ExtensionTypeField` objects.\nfield_values: A `dict` mapping field names to values.  Must contain an entry\nfor each field.  I.e., `set(field_values.keys())` must be equal to\n`set([f.name for f in fields])`.\n\nRaises:\nValueError: If the keys of `field_values` do not match the names of\nthe fields in `fields`.\nTypeError: If any value in `field_values` does not have the type indicated\nby the corresponding `ExtensionTypeField` object.", "source": "github-repos"}
{"code": "def _get_credentials(vcap_services, service_name=None):\n    service_name = (service_name or os.environ.get('STREAMING_ANALYTICS_SERVICE_NAME', None))\n    services = vcap_services['streaming-analytics']\n    creds = None\n    for service in services:\n        if (service['name'] == service_name):\n            creds = service['credentials']\n            break\n    if (creds is None):\n        raise ValueError((('Streaming Analytics service ' + str(service_name)) + ' was not found in VCAP_SERVICES'))\n    return creds", "docstring": "Retrieves the credentials of the VCAP Service of the specified `service_name`.  If\n`service_name` is not specified, it takes the information from STREAMING_ANALYTICS_SERVICE_NAME environment\nvariable.\n\nArgs:\nvcap_services (dict): A dict representation of the VCAP Services information.\nservice_name (str): One of the service name stored in `vcap_services`\n\nReturns:\ndict: A dict representation of the credentials.\n\nRaises:\nValueError:  Cannot find `service_name` in `vcap_services`", "source": "codesearchnet"}
{"code": "def build_list(self, title=None, items=None):\n    list_card = _ListSelector(self._speech, display_text=self._display_text, title=title, items=items)\n    return list_card", "docstring": "Presents the user with a vertical list of multiple items.\n\nAllows the user to select a single item.\nSelection generates a user query containing the title of the list item\n\n*Note* Returns a completely new object,\nand does not modify the existing response object\nTherefore, to add items, must be assigned to new variable\nor call the method directly after initializing list\n\nexample usage:\n\nsimple = ask('I speak this text')\nmylist = simple.build_list('List Title')\nmylist.add_item('Item1', 'key1')\nmylist.add_item('Item2', 'key2')\n\nreturn mylist\n\nArguments:\ntitle {str} -- Title displayed at top of list card\n\nReturns:\n_ListSelector -- [_Response object exposing the add_item method]", "source": "codesearchnet"}
{"code": "def edit_distance_1(self, word):\n        \n        word = word.lower()\n        if self._check_if_should_check(word) is False:\n            return {word}\n        letters = self._word_frequency.letters\n        splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]\n        deletes = [L + R[1:] for L, R in splits if R]\n        transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]\n        replaces = [L + c + R[1:] for L, R in splits if R for c in letters]\n        inserts = [L + c + R for L, R in splits for c in letters]\n        return set(deletes + transposes + replaces + inserts)", "docstring": "Compute all strings that are one edit away from `word` using only\nthe letters in the corpus\n\nArgs:\nword (str): The word for which to calculate the edit distance\nReturns:\nset: The set of strings that are edit distance one from the \\\nprovided word", "source": "juraj-google-style"}
{"code": "def DEFINE_alias(name, original_name, flag_values=FLAGS, module_name=None):\n    if (original_name not in flag_values):\n        raise UnrecognizedFlagError(original_name)\n    flag = flag_values[original_name]\n\n    class _Parser(ArgumentParser):\n        'The parser for the alias flag calls the original flag parser.'\n\n        def parse(self, argument):\n            flag.parse(argument)\n            return flag.value\n\n    class _FlagAlias(Flag):\n        'Overrides Flag class so alias value is copy of original flag value.'\n\n        @property\n        def value(self):\n            return flag.value\n\n        @value.setter\n        def value(self, value):\n            flag.value = value\n    help_msg = ('Alias for --%s.' % flag.name)\n    DEFINE_flag(_FlagAlias(_Parser(), flag.serializer, name, flag.default, help_msg, boolean=flag.boolean), flag_values, module_name)", "docstring": "Defines an alias flag for an existing one.\n\nArgs:\nname: A string, name of the alias flag.\noriginal_name: A string, name of the original flag.\nflag_values: FlagValues object with which the flag will be registered.\nmodule_name: A string, the name of the module that defines this flag.\n\nRaises:\ngflags.FlagError:\nUnrecognizedFlagError: if the referenced flag doesn't exist.\nDuplicateFlagError: if the alias name has been used by some existing flag.", "source": "codesearchnet"}
{"code": "def __densify_border(self):\n    if isinstance(self._input_geom, MultiPolygon):\n        polygons = [polygon for polygon in self._input_geom]\n    else:\n        polygons = [self._input_geom]\n    points = []\n    for polygon in polygons:\n        if (len(polygon.interiors) == 0):\n            exterior = LineString(polygon.exterior)\n            points += self.__fixed_interpolation(exterior)\n        else:\n            exterior = LineString(polygon.exterior)\n            points += self.__fixed_interpolation(exterior)\n            for j in range(len(polygon.interiors)):\n                interior = LineString(polygon.interiors[j])\n                points += self.__fixed_interpolation(interior)\n    return points", "docstring": "Densify the border of a polygon.\n\nThe border is densified by a given factor (by default: 0.5).\n\nThe complexity of the polygon's geometry is evaluated in order\nto densify the borders of its interior rings as well.\n\nReturns:\nlist: a list of points where each point is represented by\na list of its reduced coordinates\n\nExample:\n[[X1, Y1], [X2, Y2], ..., [Xn, Yn]", "source": "codesearchnet"}
{"code": "def _FormatOtherFileToken(self, token_data):\n    \n    \n    \n    timestamp = token_data.microseconds + (\n        token_data.timestamp * definitions.MICROSECONDS_PER_SECOND)\n    date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n        timestamp=timestamp)\n    date_time_string = date_time.CopyToDateTimeString()\n\n    return {\n        'string': token_data.name.rstrip('\\x00'),\n        'timestamp': date_time_string}", "docstring": "Formats an other file token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_other_file32): AUT_OTHER_FILE32 token data.\n\nReturns:\ndict[str, str]: token values.", "source": "juraj-google-style"}
{"code": "def get_examples(self, compact=False):\n        \n        \n        examples = copy.deepcopy(self._examples)\n        if not compact:\n            return examples\n\n        def make_compact(d):\n            \n            \n            if not isinstance(d, dict):\n                return\n            for key in d:\n                if isinstance(d[key], dict):\n                    inner_d = d[key]\n                    if len(inner_d) == 1 and '.tag' in inner_d:\n                        d[key] = inner_d['.tag']\n                    else:\n                        make_compact(inner_d)\n                if isinstance(d[key], list):\n                    for item in d[key]:\n                        make_compact(item)\n\n        for example in examples.values():\n            if (isinstance(example.value, dict) and\n                    len(example.value) == 1 and '.tag' in example.value):\n                \n                \n                example.value = example.value['.tag']\n            else:\n                make_compact(example.value)\n\n        return examples", "docstring": "Returns an OrderedDict mapping labels to Example objects.\n\nArgs:\ncompact (bool): If True, union members of void type are converted\nto their compact representation: no \".tag\" key or containing\ndict, just the tag as a string.", "source": "juraj-google-style"}
{"code": "def wait_all(jobs, timeout=None):\n    return Job._wait(jobs, timeout, concurrent.futures.ALL_COMPLETED)", "docstring": "Return when at all of the specified jobs have completed or timeout expires.\n\nArgs:\njobs: a Job or list of Jobs to wait on.\ntimeout: a timeout in seconds to wait for. None (the default) means no timeout.\nReturns:\nA list of the jobs that have now completed or None if there were no jobs.", "source": "codesearchnet"}
{"code": "def create(self):\n    input_params = {'type': self.type, 'data': self.data, 'name': self.name, 'priority': self.priority, 'port': self.port, 'ttl': self.ttl, 'weight': self.weight, 'flags': self.flags, 'tags': self.tags}\n    data = self.get_data(('domains/%s/records' % self.domain), type=POST, params=input_params)\n    if data:\n        self.id = data['domain_record']['id']", "docstring": "Creates a new record for a domain.\n\nArgs:\ntype (str): The type of the DNS record (e.g. A, CNAME, TXT).\nname (str): The host name, alias, or service being defined by the\nrecord.\ndata (int): Variable data depending on record type.\npriority (int): The priority for SRV and MX records.\nport (int): The port for SRV records.\nttl (int): The time to live for the record, in seconds.\nweight (int): The weight for SRV records.\nflags (int): An unsigned integer between 0-255 used for CAA records.\ntags (string): The parameter tag for CAA records. Valid values are\n\"issue\", \"wildissue\", or \"iodef\"", "source": "codesearchnet"}
{"code": "def mark_all_as_done(self, **kwargs):\n        \n        result = self.gitlab.http_post('/todos/mark_as_done', **kwargs)\n        try:\n            return int(result)\n        except ValueError:\n            return 0", "docstring": "Mark all the todos as done.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabTodoError: If the server failed to perform the request\n\nReturns:\nint: The number of todos maked done", "source": "juraj-google-style"}
{"code": "def get_group_key(self, devices):\n    with self._lock:\n        devices_key = ','.join(devices)\n        if devices_key not in self._known_groups:\n            self._known_groups[devices_key] = self._get_new_group_key(devices)\n        return self._known_groups[devices_key]", "docstring": "Returns a group key for the list of local devices.\n\nThe same group key is returned if the list of local devices is the same.\n\nArgs:\ndevices: a list of local canonical device strings in a collective group.\n\nReturns:\na group key.", "source": "github-repos"}
{"code": "def array_to_base64_png(array):\n  \n  \n  \n  array = np.array(array, dtype=np.float32)\n  if len(array.shape) != 2:\n    raise ValueError(\n        \"Expected rank-2 array; received rank-%d array.\" % len(array.shape))\n  if not np.size(array):\n    raise ValueError(\n        \"Cannot encode an empty array (size: %s) as image.\" % (array.shape,))\n\n  is_infinity = np.isinf(array)\n  is_positive = array > 0.0\n  is_positive_infinity = np.logical_and(is_infinity, is_positive)\n  is_negative_infinity = np.logical_and(is_infinity,\n                                        np.logical_not(is_positive))\n  is_nan = np.isnan(array)\n  finite_indices = np.where(np.logical_and(np.logical_not(is_infinity),\n                                           np.logical_not(is_nan)))\n  if np.size(finite_indices):\n    \n    minval = np.min(array[finite_indices])\n    maxval = np.max(array[finite_indices])\n    scaled = np.array((array - minval) / (maxval - minval) * 255,\n                      dtype=np.uint8)\n    rgb = np.repeat(np.expand_dims(scaled, -1), IMAGE_COLOR_CHANNELS, axis=-1)\n  else:\n    rgb = np.zeros(array.shape + (IMAGE_COLOR_CHANNELS,), dtype=np.uint8)\n\n  \n  rgb[is_positive_infinity] = POSITIVE_INFINITY_RGB\n  rgb[is_negative_infinity] = NEGATIVE_INFINITY_RGB\n  rgb[is_nan] = NAN_RGB\n\n  image_encoded = base64.b64encode(encoder.encode_png(rgb))\n  return image_encoded", "docstring": "Convert an array into base64-enoded PNG image.\n\nArgs:\narray: A 2D np.ndarray or nested list of items.\n\nReturns:\nA base64-encoded string the image. The image is grayscale if the array is\n2D. The image is RGB color if the image is 3D with lsat dimension equal to\n3.\n\nRaises:\nValueError: If the input `array` is not rank-2, or if the rank-2 `array` is\nempty.", "source": "juraj-google-style"}
{"code": "def act_on_cloned_repo(self, path: Union[str, pathlib.Path],\n                           api) -> Optional[HookResult]:", "docstring": "Do something with a cloned repo.\n\nArgs:\npath: Path to the repo.\napi: An instance of :py:class:`repobee.github_api.GitHubAPI`.\n\nReturns:\noptionally returns a HookResult namedtuple for reporting the\noutcome of the hook. May also return None, in which case no\nreporting will be performed for the hook.", "source": "juraj-google-style"}
{"code": "def plot_efficiency(self, key='wall_time', what='good+bad', nmax=5, ax=None, **kwargs):\n    (ax, fig, plt) = get_ax_fig_plt(ax=ax)\n    lw = kwargs.pop('linewidth', 2.0)\n    msize = kwargs.pop('markersize', 10)\n    what = what.split('+')\n    timers = self.timers()\n    peff = self.pefficiency()\n    n = len(timers)\n    xx = np.arange(n)\n    ax.set_prop_cycle(color=['g', 'b', 'c', 'm', 'y', 'k'])\n    (lines, legend_entries) = ([], [])\n    if ('good' in what):\n        good = peff.good_sections(key=key, nmax=nmax)\n        for g in good:\n            yy = peff[g][key]\n            (line,) = ax.plot(xx, yy, '-->', linewidth=lw, markersize=msize)\n            lines.append(line)\n            legend_entries.append(g)\n    if ('bad' in what):\n        bad = peff.bad_sections(key=key, nmax=nmax)\n        for b in bad:\n            yy = peff[b][key]\n            (line,) = ax.plot(xx, yy, '-.<', linewidth=lw, markersize=msize)\n            lines.append(line)\n            legend_entries.append(b)\n    if ('total' not in legend_entries):\n        yy = peff['total'][key]\n        (total_line,) = ax.plot(xx, yy, 'r', linewidth=lw, markersize=msize)\n        lines.append(total_line)\n        legend_entries.append('total')\n    ax.legend(lines, legend_entries, loc='best', shadow=True)\n    ax.set_xlabel('Total_NCPUs')\n    ax.set_ylabel('Efficiency')\n    ax.grid(True)\n    labels = [('MPI=%d, OMP=%d' % (t.mpi_nprocs, t.omp_nthreads)) for t in timers]\n    ax.set_xticks(xx)\n    ax.set_xticklabels(labels, fontdict=None, minor=False, rotation=15)\n    return fig", "docstring": "Plot the parallel efficiency\n\nArgs:\nkey: Parallel efficiency is computed using the wall_time.\nwhat: Specifies what to plot: `good` for sections with good parallel efficiency.\n`bad` for sections with bad efficiency. Options can be concatenated with `+`.\nnmax: Maximum number of entries in plot\nax: matplotlib :class:`Axes` or None if a new figure should be created.\n\n================  ====================================================\nkwargs            Meaning\n================  ====================================================\nlinewidth         matplotlib linewidth. Default: 2.0\nmarkersize        matplotlib markersize. Default: 10\n================  ====================================================\n\nReturns:\n`matplotlib` figure", "source": "codesearchnet"}
{"code": "def disable_plugin(self, name):\n    url = self._url('/plugins/{0}/disable', name)\n    res = self._post(url)\n    self._raise_for_status(res)\n    return True", "docstring": "Disable an installed plugin.\n\nArgs:\nname (string): The name of the plugin. The ``:latest`` tag is\noptional, and is the default if omitted.\n\nReturns:\n``True`` if successful", "source": "codesearchnet"}
{"code": "def add_vcenter(self, **kwargs):\n    config = ET.Element('config')\n    vcenter = ET.SubElement(config, 'vcenter', xmlns='urn:brocade.com:mgmt:brocade-vswitch')\n    id = ET.SubElement(vcenter, 'id')\n    id.text = kwargs.pop('id')\n    credentials = ET.SubElement(vcenter, 'credentials')\n    url = ET.SubElement(credentials, 'url')\n    url.text = kwargs.pop('url')\n    username = ET.SubElement(credentials, 'username')\n    username.text = kwargs.pop('username')\n    password = ET.SubElement(credentials, 'password')\n    password.text = kwargs.pop('password')\n    try:\n        self._callback(config)\n        return True\n    except Exception as error:\n        logging.error(error)\n        return False", "docstring": "Add vCenter on the switch\n\nArgs:\nid(str) : Name of an established vCenter\nurl (bool) : vCenter URL\nusername (str): Username of the vCenter\npassword (str): Password of the vCenter\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nNone", "source": "codesearchnet"}
{"code": "def _to_tensor(self, array, min_dim, return_tensors):\n    if return_tensors == 'pt':\n        array = torch.from_numpy(array)\n        return array.unsqueeze(1) if array.ndim < min_dim else array\n    return array", "docstring": "Convert numpy array to tensor and ensure proper dimensionality.\nArgs:\narray: The numpy array to convert\nmin_dim: The minimum number of dimensions the result should have\nreturn_tensors: The type of tensors to return (e.g., \"pt\" for PyTorch tensors)\nReturns:\nThe converted array or tensor with proper dimensions", "source": "github-repos"}
{"code": "def modify_model_backprop(model, backprop_modifier):\n    modified_model = _MODIFIED_MODEL_CACHE.get((model, backprop_modifier))\n    if (modified_model is not None):\n        return modified_model\n    model_path = os.path.join(tempfile.gettempdir(), (next(tempfile._get_candidate_names()) + '.h5'))\n    try:\n        model.save(model_path)\n        modifier_fn = _BACKPROP_MODIFIERS.get(backprop_modifier)\n        if (modifier_fn is None):\n            raise ValueError(\"'{}' modifier is not supported\".format(backprop_modifier))\n        modifier_fn(backprop_modifier)\n        with tf.get_default_graph().gradient_override_map({'Relu': backprop_modifier}):\n            modified_model = load_model(model_path)\n            _MODIFIED_MODEL_CACHE[(model, backprop_modifier)] = modified_model\n            return modified_model\n    finally:\n        os.remove(model_path)", "docstring": "Creates a copy of model by modifying all activations to use a custom op to modify the backprop behavior.\n\nArgs:\nmodel:  The `keras.models.Model` instance.\nbackprop_modifier: One of `{'guided', 'rectified'}`\n\nReturns:\nA copy of model with modified activations for backwards pass.", "source": "codesearchnet"}
{"code": "def remove(path):\n    if os.path.isdir(path):\n        return __rmtree(path)\n    else:\n        return __rmfile(path)", "docstring": "Delete a file or directory.\n\nArgs:\npath (str): Path to the file or directory that needs to be deleted.\n\nReturns:\nbool: True if the operation is successful, False otherwise.", "source": "codesearchnet"}
{"code": "def _create_stage_submission_env_dependencies(temp_dir):\n    try:\n        local_dependency_file_path = os.path.join(temp_dir, SUBMISSION_ENV_DEPENDENCIES_FILE)\n        dependencies = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'])\n        local_python_path = f'Python Path: {sys.executable}\\n'\n        with open(local_dependency_file_path, 'w') as f:\n            f.write(local_python_path + str(dependencies))\n        return [Stager._create_file_stage_to_artifact(local_dependency_file_path, SUBMISSION_ENV_DEPENDENCIES_FILE)]\n    except Exception as e:\n        _LOGGER.warning(\"Couldn't stage a list of installed dependencies in submission environment. Got exception: %s\", e)\n        return []", "docstring": "Create and stage a file with list of dependencies installed in the\nsubmission environment.\n\nThis list can be used at runtime to compare against the dependencies in the\nruntime environment. This allows runners to warn users about any potential\ndependency mismatches and help debug issues related to\nenvironment mismatches.\n\nArgs:\ntemp_dir: path to temporary location where the file should be\ndownloaded.\n\nReturns:\nA list of ArtifactInformation of local file path that will be staged to\nthe staging location.", "source": "github-repos"}
{"code": "def __init__(self, channel):\n        \n        self.ReportErrorEvent = channel.unary_unary(\n            \"/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent\",\n            request_serializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_report__errors__service__pb2.ReportErrorEventRequest.SerializeToString,\n            response_deserializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_report__errors__service__pb2.ReportErrorEventResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def get_sleep_timer(self):\n    resp = self.avTransport.GetRemainingSleepTimerDuration([('InstanceID', 0)])\n    if resp['RemainingSleepTimerDuration']:\n        times = resp['RemainingSleepTimerDuration'].split(':')\n        return (((int(times[0]) * 3600) + (int(times[1]) * 60)) + int(times[2]))\n    else:\n        return None", "docstring": "Retrieves remaining sleep time, if any\n\nReturns:\nint or NoneType: Number of seconds left in timer. If there is no\nsleep timer currently set it will return None.", "source": "codesearchnet"}
{"code": "def heightmap_has_land_on_border(hm: np.ndarray, waterlevel: float) -> bool:\n    \n    return bool(\n        lib.TCOD_heightmap_has_land_on_border(_heightmap_cdata(hm), waterlevel)\n    )", "docstring": "Returns True if the map edges are below ``waterlevel``, otherwise False.\n\nArgs:\nhm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.\nwaterLevel (float): The water level to use.\n\nReturns:\nbool: True if the map edges are below ``waterlevel``, otherwise False.", "source": "juraj-google-style"}
{"code": "def add_dr( self, dr ):\n         \n        this_bin = int( dr / self.dr ) \n        if this_bin > self.number_of_bins:\n            raise IndexError( 'dr is larger than rdf max_r' )\n        self.data[ this_bin ] += 1", "docstring": "Add an observed interatomic distance to the g(r) data at dr.\n\nArgs:\ndr (Float): the interatomic distance, dr.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def assert_child_key_has_value(self, parent, child, caller):\n        \n        assert parent, (\"parent parameter must be specified.\")\n        assert child, (\"child parameter must be specified.\")\n        self.assert_key_has_value(parent, caller)\n\n        try:\n            child_exists = child in self[parent]\n        except TypeError as err:\n            \n            raise ContextError(\n                f\"context['{parent}'] must be iterable and contain '{child}' \"\n                f\"for {caller}. {err}\") from err\n\n        if child_exists:\n            if self[parent][child] is None:\n                raise KeyInContextHasNoValueError(\n                    f\"context['{parent}']['{child}'] must have a value for \"\n                    f\"{caller}.\")\n        else:\n            raise KeyNotInContextError(\n                f\"context['{parent}']['{child}'] doesn't \"\n                f\"exist. It must exist for {caller}.\")", "docstring": "Assert that context contains key that has child which has a value.\n\nArgs:\nparent: parent key\nchild: validate this sub-key of parent exists AND isn't None.\ncaller: string. calling function name - this used to construct\nerror messages\n\nRaises:\nKeyNotInContextError: Key doesn't exist\nKeyInContextHasNoValueError: context[key] is None\nAssertionError: if key is None", "source": "juraj-google-style"}
{"code": "def list_media_services(access_token, subscription_id):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/microsoft.media/mediaservices?api-version=', MEDIA_API])\n    return do_get(endpoint, access_token)", "docstring": "List the media services in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def get_all_subclasses(asts):\n    hierarchy = {}\n    for ast in asts:\n        hierarchy.update(ast.Visit(visitors.ExtractSuperClasses()))\n\n    def filter_superclasses(superclasses):\n        return [superclass for superclass in superclasses if is_complete(superclass)]\n    hierarchy = {cls: filter_superclasses(superclasses) for cls, superclasses in hierarchy.items() if is_complete(cls)}\n    return utils.invert_dict(hierarchy)", "docstring": "Compute a class->subclasses mapping.\n\nArgs:\nasts: A list of ASTs.\n\nReturns:\nA dictionary, mapping instances of pytd.Type (types) to lists of\npytd.Class (the derived classes).", "source": "github-repos"}
{"code": "def format_tasks(tasks):\n    return [('%d : %s (%s)' % (task.key.id(), task.description, ('done' if task.done else ('created %s' % task.created)))) for task in tasks]", "docstring": "Converts a list of tasks to a list of string representations.\n\nArgs:\ntasks: A list of the tasks to convert.\nReturns:\nA list of string formatted tasks.", "source": "codesearchnet"}
{"code": "def double_width(k, v, p):\n    if isinstance(p, Conv2D) and k.key == 'filters':\n        return 2 * v\n    return v", "docstring": "A rebind rule for doubling the filters for Conv2D layers.\n\nArgs:\nk: A `pg.KeyPath` object representing the location of current node.\nv: The value of current node.\np: The parent of current node.\n\nReturns:\nThe output value for current node.", "source": "github-repos"}
{"code": "def __init__(self, site=None):\n        \n        super(FileFormatError, self).__init__()\n        self.site = site", "docstring": "Initialise a new ``FileFormatError`` object.\n\nArgs:\nsite (str): Remote site name to display in error message", "source": "juraj-google-style"}
{"code": "def call_for_each_replica(strategy, fn, args=None, kwargs=None):\n    if args is None:\n        args = ()\n    if kwargs is None:\n        kwargs = {}\n    if isinstance(fn, def_function.Function):\n        if fn._jit_compile and all([_is_gpu_device(d) for d in strategy.extended.worker_devices]):\n            return _call_for_each_replica(strategy, fn, args, kwargs)\n        if strategy not in _cfer_fn_cache:\n            _cfer_fn_cache[strategy] = weakref.WeakKeyDictionary()\n        wrapped = _cfer_fn_cache[strategy].get(fn)\n        if wrapped is None:\n\n            def wrapped_fn(*args, **kwargs):\n                return call_for_each_replica(strategy, fn.python_function, args, kwargs)\n            wrapped = fn._clone(python_function=wrapped_fn)\n            _cfer_fn_cache[strategy][fn] = wrapped\n        return wrapped(*args, **kwargs)\n    if context.executing_eagerly():\n        logging.log_first_n(logging.WARN, 'Using %s eagerly has significant overhead currently. We will be working on improving this in the future, but for now please wrap `call_for_each_replica` or `experimental_run` or `run` inside a tf.function to get the best performance.' % strategy.__class__.__name__, 5)\n    else:\n        fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx())\n    return _call_for_each_replica(strategy, fn, args, kwargs)", "docstring": "Call `fn` on each worker devices(replica).\n\nIt's highly recommended to wrap the call to this function inside a\n`tf.function`, otherwise the performance is poor.\n\nArgs:\nstrategy: `tf.distribute.Strategy`.\nfn: function to call on each worker devices.\nargs: positional arguments to `fn`.\nkwargs: keyword arguments to `fn`.\n\nReturns:\nWrapped returned value of `fn` from all replicas.", "source": "github-repos"}
{"code": "def smash(self):\n    self._initialize_smash()\n    try:\n        stack_name = self._config.get('environment', {}).get('stack_name', None)\n        response = self._cloudFormation.describe_stacks(StackName=stack_name)\n        logging.debug('smash pre-flight returned: {}'.format(json.dumps(response, indent=4, default=json_util.default)))\n    except ClientError as wtf:\n        logging.warning('your stack is in another castle [0].')\n        return False\n    except Exception as wtf:\n        logging.error('failed to find intial status of smash candidate: {}'.format(wtf))\n        return False\n    response = self._cloudFormation.delete_stack(StackName=stack_name)\n    logging.info('delete started for stack: {}'.format(stack_name))\n    logging.debug('delete_stack returned: {}'.format(json.dumps(response, indent=4)))\n    return self.poll_stack()", "docstring": "Smash the given stack\n\nArgs:\nNone\n\nReturns:\nTrue if True\n\nTodo:\nFigure out what could go wrong and take steps\nto hanlde problems.", "source": "codesearchnet"}
{"code": "def select_charset(self, charset):\n        \n        charsets = {'USA':0,\n                   'France':1,\n                   'Germany':2,\n                   'UK':3, \n                   'Denmark':4,\n                   'Sweden':5, \n                   'Italy':6, \n                   'Spain':7,\n                   'Japan':8, \n                   'Norway':9, \n                   'Denmark II':10, \n                   'Spain II':11, \n                   'Latin America':12, \n                   'South Korea':13, \n                   'Legal':64, \n                   }\n        if charset in charsets:\n            self.send(chr(27)+'R'+chr(charsets[charset]))\n        else:\n            raise RuntimeError('Invalid charset.')", "docstring": "Select international character set and changes codes in code table accordingly\n\nArgs:\ncharset: String. The character set we want.\nReturns:\nNone\nRaises:\nRuntimeError: Invalid charset.", "source": "juraj-google-style"}
{"code": "def load_examples(tmp_dir, prop_train=0.09, prop_val=0.01):\n  \n\n  infile = generator_utils.maybe_download(tmp_dir, _TAR, _URL)\n  tf.logging.info('Loading examples')\n\n  all_examples = []\n  for i, d in enumerate(csv.DictReader(gzip.open(infile), delimiter='\\t')):\n    if i % 100000 == 0:\n      tf.logging.info('%d examples have been loaded....' % i)\n    ex = {x: int(y) if y.isdigit() else y for x, y in d.items()}\n    all_examples.append(ex)\n\n  random.seed(1)\n  random.shuffle(all_examples)\n  n_train = int(len(all_examples) * prop_train)\n  n_val = n_train + int(len(all_examples) * prop_val)\n  train = all_examples[:n_train]\n  val = all_examples[n_train:n_val]\n  test = []\n  for e in all_examples[n_val:]:\n    if e['n_intervening'] == e['n_diff_intervening']:\n      test.append(e)\n\n  return all_examples, train, val, test", "docstring": "Loads exampls from the tsv file.\n\nArgs:\ntmp_dir: temp directory.\nprop_train: proportion of the train data\nprop_val: proportion of the validation data\n\nReturns:\nAll examples in the dataset pluse train, test, and development splits.", "source": "juraj-google-style"}
{"code": "def on_train_batch_begin(self, batch, logs=None):\n    self.on_batch_begin(batch, logs=logs)", "docstring": "Called at the beginning of a training batch in `fit` methods.\n\nSubclasses should override for any actions to run.\n\nNote that if the `steps_per_execution` argument to `compile` in\n`tf.keras.Model` is set to `N`, this method will only be called every `N`\nbatches.\n\nArgs:\nbatch: Integer, index of batch within the current epoch.\nlogs: Dict, contains the return value of `model.train_step`. Typically,\nthe values of the `Model`'s metrics are returned.  Example:\n`{'loss': 0.2, 'accuracy': 0.7}`.", "source": "github-repos"}
{"code": "def _sd_of_runs(stats, mean, key='runs'):\n    num_runs = len(stats[key])\n    first = stats[key][0]\n    standard_deviation = {}\n    for stat_key in first:\n        if isinstance(first[stat_key], numbers.Number):\n            standard_deviation[stat_key] = math.sqrt((sum((((run[stat_key] - mean[stat_key]) ** 2) for run in stats[key])) / float(num_runs)))\n    return standard_deviation", "docstring": "Obtain the standard deviation of stats.\n\nArgs:\nstats: dict; A set of stats, structured as above.\nmean: dict; Mean for each key in stats.\nkey: str; Optional key to determine where list of runs is found in stats", "source": "codesearchnet"}
{"code": "def add(self, rid, data, raise_on_error=True):\n        \n        cache_data = {'cache-date': self._dt_to_epoch(datetime.now()), 'cache-data': data}\n        return self.ds.post(rid, cache_data, raise_on_error)", "docstring": "Write cache data to the data store.\n\nArgs:\nrid (str): The record identifier.\ndata (dict): The record data.\nraise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.\n\nReturns:\nobject : Python request response.", "source": "juraj-google-style"}
{"code": "def sparse_categorical_accuracy(y_true, y_pred):\n    y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n    y_true = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_true)\n    y_pred_rank = y_pred.shape.ndims\n    y_true_rank = y_true.shape.ndims\n    if y_true_rank is not None and y_pred_rank is not None and (len(backend.int_shape(y_true)) == len(backend.int_shape(y_pred))):\n        y_true = array_ops.squeeze(y_true, [-1])\n    y_pred = math_ops.argmax(y_pred, axis=-1)\n    if backend.dtype(y_pred) != backend.dtype(y_true):\n        y_pred = math_ops.cast(y_pred, backend.dtype(y_true))\n    return math_ops.cast(math_ops.equal(y_true, y_pred), backend.floatx())", "docstring": "Calculates how often predictions match integer labels.\n\nStandalone usage:\n>>> y_true = [2, 1]\n>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]\n>>> m = tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred)\n>>> assert m.shape == (2,)\n>>> m.numpy()\narray([0., 1.], dtype=float32)\n\nYou can provide logits of classes as `y_pred`, since argmax of\nlogits and probabilities are same.\n\nArgs:\ny_true: Integer ground truth values.\ny_pred: The prediction values.\n\nReturns:\nSparse categorical accuracy values.", "source": "github-repos"}
{"code": "def shutdown(cluster_info, queues=['input']):\n\n    def _shutdown(iter):\n        host = util.get_ip_address()\n        executor_id = util.read_executor_id()\n        mgr = _get_manager(cluster_info, host, executor_id)\n        for node in cluster_info:\n            if ((node['host'] == host) and (node['executor_id'] == executor_id)):\n                tb_pid = node['tb_pid']\n                if (tb_pid != 0):\n                    logging.info('Stopping tensorboard (pid={0})'.format(tb_pid))\n                    subprocess.Popen(['kill', str(tb_pid)])\n        logging.info('Stopping all queues')\n        for q in queues:\n            try:\n                queue = mgr.get_queue(q)\n                logging.info('Feeding None into {0} queue'.format(q))\n                queue.put(None, block=True)\n            except (AttributeError, KeyError):\n                msg = \"Queue '{}' not found on this node, check for exceptions on other nodes.\".format(q)\n                raise Exception(msg)\n        logging.info(\"Setting mgr.state to 'stopped'\")\n        mgr.set('state', 'stopped')\n        return [True]\n    return _shutdown", "docstring": "Stops all TensorFlow nodes by feeding ``None`` into the multiprocessing.Queues.\n\nArgs:\n:cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc).\n:queues: *INTERNAL_USE*\n\nReturns:\nA nodeRDD.mapPartitions() function", "source": "codesearchnet"}
{"code": "def get_unique_directives(ast):\n    if (not ast.directives):\n        return dict()\n    result = dict()\n    for directive_obj in ast.directives:\n        directive_name = directive_obj.name.value\n        if (directive_name in ALLOWED_DUPLICATED_DIRECTIVES):\n            pass\n        elif (directive_name in result):\n            raise GraphQLCompilationError(u'Directive was unexpectedly applied twice in the same location: {} {}'.format(directive_name, ast.directives))\n        else:\n            result[directive_name] = directive_obj\n    return result", "docstring": "Return a dict of directive name to directive object for the given AST node.\n\nAny directives that are allowed to exist more than once on any AST node are ignored.\nFor any directives that can only exist up to once, we verify that they are not duplicated\nraising GraphQLCompilationError in case we find them more than once on the AST node.\n\nArgs:\nast: GraphQL AST node, obtained from the graphql library\n\nReturns:\ndict of string to directive object", "source": "codesearchnet"}
{"code": "def get_local_current_sample(ip):\n    \n    valid_ip_pat = re.compile(\n      \"^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$\"\n    )\n    if not valid_ip_pat.match(ip):\n      raise ValueError(\"ip address invalid\")\n\n    url = \"http:\n    headers = { \"Content-Type\": \"application/json\" }\n\n    r = requests.get(url, headers=headers)\n    return r.json()", "docstring": "Gets current sample from *local* Neurio device IP address.\n\nThis is a static method. It doesn't require a token to authenticate.\n\nNote, call get_user_information to determine local Neurio IP addresses.\n\nArgs:\nip (string): address of local Neurio device\n\nReturns:\ndictionary object containing current sample information", "source": "juraj-google-style"}
{"code": "def _PrintTSKPartitionIdentifiersOverview(self, volume_system, volume_identifiers):\n    header = 'The following partitions were found:\\n'\n    self._output_writer.Write(header)\n    column_names = ['Identifier', 'Offset (in bytes)', 'Size (in bytes)']\n    table_view = views.CLITabularTableView(column_names=column_names)\n    for volume_identifier in sorted(volume_identifiers):\n        volume = volume_system.GetVolumeByIdentifier(volume_identifier)\n        if (not volume):\n            raise errors.SourceScannerError('Partition missing for identifier: {0:s}.'.format(volume_identifier))\n        volume_extent = volume.extents[0]\n        volume_offset = '{0:d} (0x{0:08x})'.format(volume_extent.offset)\n        volume_size = self._FormatHumanReadableSize(volume_extent.size)\n        table_view.AddRow([volume.identifier, volume_offset, volume_size])\n    self._output_writer.Write('\\n')\n    table_view.Write(self._output_writer)\n    self._output_writer.Write('\\n')", "docstring": "Prints an overview of TSK partition identifiers.\n\nArgs:\nvolume_system (dfvfs.TSKVolumeSystem): volume system.\nvolume_identifiers (list[str]): allowed volume identifiers.\n\nRaises:\nSourceScannerError: if a volume cannot be resolved from the volume\nidentifier.", "source": "codesearchnet"}
{"code": "def model_variables(scope=None):\n    return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES, scope)", "docstring": "Returns all variables in the MODEL_VARIABLES collection.\n\nArgs:\nscope: (Optional.) A string. If supplied, the resulting list is filtered to\ninclude only items whose `name` attribute matches `scope` using\n`re.match`. Items without a `name` attribute are never returned if a scope\nis supplied. The choice of `re.match` means that a `scope` without special\ntokens filters by prefix.\n\nReturns:\nA list of local Variable objects.", "source": "github-repos"}
{"code": "def add_stickiness(self):\n    stickiness_dict = {}\n    env = boto3.session.Session(profile_name=self.env, region_name=self.region)\n    elbclient = env.client('elb')\n    elb_settings = self.properties['elb']\n    for listener in elb_settings.get('ports'):\n        if listener.get('stickiness'):\n            sticky_type = listener['stickiness']['type'].lower()\n            externalport = int(listener['loadbalancer'].split(':')[(- 1)])\n            policyname_tmp = '{0}-{1}-{2}-{3}'\n            if (sticky_type == 'app'):\n                cookiename = listener['stickiness']['cookie_name']\n                policy_key = cookiename.replace('.', '')\n                policyname = policyname_tmp.format(self.app, sticky_type, externalport, policy_key)\n                elbclient.create_app_cookie_stickiness_policy(LoadBalancerName=self.app, PolicyName=policyname, CookieName=cookiename)\n                stickiness_dict[externalport] = policyname\n            elif (sticky_type == 'elb'):\n                cookie_ttl = listener['stickiness'].get('cookie_ttl', None)\n                policyname = policyname_tmp.format(self.app, sticky_type, externalport, cookie_ttl)\n                if cookie_ttl:\n                    elbclient.create_lb_cookie_stickiness_policy(LoadBalancerName=self.app, PolicyName=policyname, CookieExpirationPeriod=cookie_ttl)\n                else:\n                    elbclient.create_lb_cookie_stickiness_policy(LoadBalancerName=self.app, PolicyName=policyname)\n                stickiness_dict[externalport] = policyname\n    return stickiness_dict", "docstring": "Adds stickiness policy to created ELB\n\nReturns:\ndict: A dict of stickiness policies and ports::\n\nexample:\n{\n80: \"$policy_name\"\n}", "source": "codesearchnet"}
{"code": "def check_and_update_resources(num_cpus, num_gpus, resources):\n    if (resources is None):\n        resources = {}\n    resources = resources.copy()\n    assert ('CPU' not in resources)\n    assert ('GPU' not in resources)\n    if (num_cpus is not None):\n        resources['CPU'] = num_cpus\n    if (num_gpus is not None):\n        resources['GPU'] = num_gpus\n    if ('CPU' not in resources):\n        resources['CPU'] = multiprocessing.cpu_count()\n    gpu_ids = ray.utils.get_cuda_visible_devices()\n    if (('GPU' in resources) and (gpu_ids is not None) and (resources['GPU'] > len(gpu_ids))):\n        raise Exception('Attempting to start raylet with {} GPUs, but CUDA_VISIBLE_DEVICES contains {}.'.format(resources['GPU'], gpu_ids))\n    if ('GPU' not in resources):\n        resources['GPU'] = _autodetect_num_gpus()\n        if (gpu_ids is not None):\n            resources['GPU'] = min(resources['GPU'], len(gpu_ids))\n    resources = {resource_label: resource_quantity for (resource_label, resource_quantity) in resources.items() if (resource_quantity != 0)}\n    for (_, resource_quantity) in resources.items():\n        assert (isinstance(resource_quantity, int) or isinstance(resource_quantity, float))\n        if (isinstance(resource_quantity, float) and (not resource_quantity.is_integer())):\n            raise ValueError('Resource quantities must all be whole numbers. Received {}.'.format(resources))\n        if (resource_quantity < 0):\n            raise ValueError('Resource quantities must be nonnegative. Received {}.'.format(resources))\n        if (resource_quantity > ray_constants.MAX_RESOURCE_QUANTITY):\n            raise ValueError('Resource quantities must be at most {}.'.format(ray_constants.MAX_RESOURCE_QUANTITY))\n    return resources", "docstring": "Sanity check a resource dictionary and add sensible defaults.\n\nArgs:\nnum_cpus: The number of CPUs.\nnum_gpus: The number of GPUs.\nresources: A dictionary mapping resource names to resource quantities.\n\nReturns:\nA new resource dictionary.", "source": "codesearchnet"}
{"code": "def _GetSignatureMatchParserNames(self, file_object):\n    parser_names = []\n    scan_state = pysigscan.scan_state()\n    self._file_scanner.scan_file_object(scan_state, file_object)\n    for scan_result in iter(scan_state.scan_results):\n        format_specification = self._formats_with_signatures.GetSpecificationBySignature(scan_result.identifier)\n        if (format_specification.identifier not in parser_names):\n            parser_names.append(format_specification.identifier)\n    return parser_names", "docstring": "Determines if a file-like object matches one of the known signatures.\n\nArgs:\nfile_object (file): file-like object whose contents will be checked\nfor known signatures.\n\nReturns:\nlist[str]: parser names for which the contents of the file-like object\nmatches their known signatures.", "source": "codesearchnet"}
{"code": "def _GetSignatureScanner(cls, specification_store):\n    \n    signature_scanner = pysigscan.scanner()\n    signature_scanner.set_scan_buffer_size(cls._SCAN_BUFFER_SIZE)\n\n    for format_specification in specification_store.specifications:\n      for signature in format_specification.signatures:\n        pattern_offset = signature.offset\n\n        if pattern_offset is None:\n          signature_flags = pysigscan.signature_flags.NO_OFFSET\n        elif pattern_offset < 0:\n          pattern_offset *= -1\n          signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END\n        else:\n          signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START\n\n        signature_scanner.add_signature(\n            signature.identifier, pattern_offset, signature.pattern,\n            signature_flags)\n\n    return signature_scanner", "docstring": "Initializes a signature scanner based on a specification store.\n\nArgs:\nspecification_store (FormatSpecificationStore): specification store.\n\nReturns:\npysigscan.scanner: signature scanner.", "source": "juraj-google-style"}
{"code": "def Verify(self, written_keys):\n    self.log.debug('verification starting on %r', self.temp_cache_filename)\n    cache_data = self.GetMap(self.temp_cache_filename)\n    map_entry_count = len(cache_data)\n    self.log.debug('entry count: %d', map_entry_count)\n    if map_entry_count <= 0:\n        self.log.error('The files cache being verified \"%r\" is empty.', self.temp_cache_filename)\n        raise error.EmptyMap(self.temp_cache_filename + ' is empty')\n    cache_keys = set()\n    try:\n        while 1:\n            entry = cache_data.PopItem()\n            cache_keys.update(self._ExpectedKeysForEntry(entry))\n    except KeyError:\n        pass\n    missing_from_cache = written_keys - cache_keys\n    if missing_from_cache:\n        self.log.warning('verify failed: %d missing from the on-disk cache', len(missing_from_cache))\n        if len(missing_from_cache) < 1000:\n            self.log.debug('keys missing from the on-disk cache: %r', missing_from_cache)\n        else:\n            self.log.debug('More than 1000 keys missing from cache. Not printing.')\n        self._Rollback()\n        return False\n    missing_from_map = cache_keys - written_keys\n    if missing_from_map:\n        self.log.warning('verify failed: %d keys found, unexpected in the on-disk cache', len(missing_from_map))\n        if len(missing_from_map) < 1000:\n            self.log.debug('keys missing from map: %r', missing_from_map)\n        else:\n            self.log.debug('More than 1000 keys missing from map.  Not printing.')\n        self._Rollback()\n        return False\n    return True", "docstring": "Verify that the cache is correct.\n\nPerform some unit tests on the written data, such as reading it\nback and verifying that it parses and has the entries we expect.\n\nArgs:\nwritten_keys: a set of keys that should have been written to disk.\n\nReturns:\na boolean indicating success.\n\nRaises:\nEmptyMap: The cache being verified is empty.", "source": "github-repos"}
{"code": "def mementoweb_api_tags(url):\n    \n    memento_url = \"http:\n\n    r = requests.get(memento_url + url)\n\n    if r.status_code != 200:\n        return []\n\n    data = r.json().get(\"mementos\", {}).get(\"list\", [])\n\n    if not data:\n        return []\n\n    resources = (\n        TimeResource(\n            url=item.get(\"uri\", \"\"),\n            date=item.get(\"datetime\", \"\"),\n            val=item.get(\"datetime\", \"\").split(\"-\")[0],\n            source=\"MementoWeb.org\",\n        )\n        for item in data\n    )\n\n    \n    resource_dict = {\n        res.val: res\n        for res in resources\n    }\n\n    return sorted(resource_dict.values(), key=lambda x: x.val)", "docstring": "Parse list of :class:`TimeResource` objects based on the mementoweb.org.\n\nArgs:\nurl (str): Any url.\n\nReturns:\nlist: :class:`TimeResource` objects.", "source": "juraj-google-style"}
{"code": "def create_project(self, resource):\n    self.project_service.set_auth(self._token_project)\n    return self.project_service.create(resource)", "docstring": "Create the entity described by the given resource.\n\nArgs:\nresource (intern.resource.boss.BossResource)\n\nReturns:\n(intern.resource.boss.BossResource): Returns resource of type\nrequested on success.\n\nRaises:\nrequests.HTTPError on failure.", "source": "codesearchnet"}
{"code": "def add_node(self, node_name):\n    graph = self.graph\n    if (node_name in graph):\n        raise KeyError(('node %s already exists' % node_name))\n    graph[node_name] = set()", "docstring": "Add a node if it does not exist yet, or error out.\n\nArgs:\nnode_name (str): The unique name of the node to add.\n\nRaises:\nKeyError: Raised if a node with the same name already exist in the\ngraph", "source": "codesearchnet"}
{"code": "def handle(self, message):\n    opcode = message['op']\n    if (opcode == 10):\n        self.on_hello(message)\n    elif (opcode == 11):\n        self.on_heartbeat(message)\n    elif (opcode == 0):\n        self.on_message(message)\n    else:\n        logger.debug('Not a message we handle: OPCODE {}'.format(opcode))\n    return", "docstring": "Dispatches messages to appropriate handler based on opcode\n\nArgs:\nmessage (dict): Full message from Discord websocket connection", "source": "codesearchnet"}
{"code": "def _ParseAnalysisPluginOptions(self, options):\n    \n    \n    analysis_plugin_info = self._analysis_manager.GetAllPluginInformation()\n    \n    analysis_plugin_names = {\n        name.lower() for name, _, _ in analysis_plugin_info}\n\n    analysis_plugins = self.ParseStringOption(options, 'analysis_plugins')\n    if not analysis_plugins:\n      return\n\n    \n    requested_plugin_names = {\n        name.strip().lower() for name in analysis_plugins.split(',')}\n\n    \n    difference = requested_plugin_names.difference(analysis_plugin_names)\n    if difference:\n      raise errors.BadConfigOption(\n          'Non-existent analysis plugins specified: {0:s}'.format(\n              ' '.join(difference)))\n\n    self._analysis_plugins = self._GetAnalysisPlugins(analysis_plugins)\n\n    for analysis_plugin in self._analysis_plugins:\n      helpers_manager.ArgumentHelperManager.ParseOptions(\n          options, analysis_plugin)", "docstring": "Parses the analysis plugin options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.", "source": "juraj-google-style"}
{"code": "def lookup(self, obj):\n    return self._registered_map[self.get_registered_name(obj)]", "docstring": "Looks up the registered object using the predicate.\n\nArgs:\nobj: Object to pass to each of the registered predicates to look up the\nregistered object.\nReturns:\nThe object registered with the first passing predicate.\nRaises:\nLookupError if the object does not match any of the predicate functions.", "source": "github-repos"}
{"code": "def rgstr_stamps_root(rgstr_stamps):\n    \n    rgstr_stamps = sanitize_rgstr_stamps(rgstr_stamps)\n    f.root.rgstr_stamps = rgstr_stamps\n    return rgstr_stamps", "docstring": "Register stamps with the root timer (see subdivision()).\n\nArgs:\nrgstr_stamps (list, tuple): Collection of identifiers, passed through\nset(), then each is passed through str().\n\nReturns:\nlist: Implemented registered stamp collection.", "source": "juraj-google-style"}
{"code": "def _mean_of_runs(stats, key='runs'):\n    \n\n    num_runs = len(stats[key])\n    first = stats[key][0]\n\n    mean = {}\n    for stat_key in first:\n        \n        if isinstance(first[stat_key], numbers.Number):\n            mean[stat_key] = sum(run[stat_key]\n                                 for run in stats[key]) / float(num_runs)\n\n    return mean", "docstring": "Obtain the mean of stats.\n\nArgs:\nstats: dict; A set of stats, structured as above.\nkey: str; Optional key to determine where list of runs is found in stats", "source": "juraj-google-style"}
{"code": "def decode_field(self, field, value):\n        \n        for decoder in _GetFieldCodecs(field, 'decoder'):\n            result = decoder(field, value)\n            value = result.value\n            if result.complete:\n                return value\n        if isinstance(field, messages.MessageField):\n            field_value = self.decode_message(\n                field.message_type, json.dumps(value))\n        elif isinstance(field, messages.EnumField):\n            value = GetCustomJsonEnumMapping(\n                field.type, json_name=value) or value\n            try:\n                field_value = super(\n                    _ProtoJsonApiTools, self).decode_field(field, value)\n            except messages.DecodeError:\n                if not isinstance(value, six.string_types):\n                    raise\n                field_value = None\n        else:\n            field_value = super(\n                _ProtoJsonApiTools, self).decode_field(field, value)\n        return field_value", "docstring": "Decode the given JSON value.\n\nArgs:\nfield: a messages.Field for the field we're decoding.\nvalue: a python value we'd like to decode.\n\nReturns:\nA value suitable for assignment to field.", "source": "juraj-google-style"}
{"code": "class MimiEncoderOutput(ModelOutput):\n    audio_codes: Optional[torch.LongTensor] = None\n    encoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None", "docstring": "Args:\naudio_codes (`torch.LongTensor`  of shape `(batch_size, num_quantizers, codes_length)`, *optional*):\nDiscret code embeddings computed using `model.encode`.\nencoder_past_key_values (`Cache`, *optional*):\nPre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer.\nThis typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\nThe model will output the same cache format that is fed as input.\n\nIf `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't\nhave their past key value states given to this model).", "source": "github-repos"}
{"code": "def GetSubFileEntryByName(self, name, case_sensitive=True):\n    name_lower = name.lower()\n    matching_sub_file_entry = None\n    for sub_file_entry in self.sub_file_entries:\n        if (sub_file_entry.name == name):\n            return sub_file_entry\n        if ((not case_sensitive) and (sub_file_entry.name.lower() == name_lower)):\n            if (not matching_sub_file_entry):\n                matching_sub_file_entry = sub_file_entry\n    return matching_sub_file_entry", "docstring": "Retrieves a sub file entry by name.\n\nArgs:\nname (str): name of the file entry.\ncase_sensitive (Optional[bool]): True if the name is case sensitive.\n\nReturns:\nFileEntry: a file entry or None if not available.", "source": "codesearchnet"}
{"code": "def get_capture_handler_config_by_name(self, name):\n        \n        handler_confs = []\n        for address, stream_capturer in self._stream_capturers.iteritems():\n            handler_data = stream_capturer[0].dump_handler_config_data()\n            for h in handler_data:\n                if h['handler']['name'] == name:\n                    handler_confs.append(h)\n\n        return handler_confs", "docstring": "Return data for handlers of a given name.\n\nArgs:\nname:\nName of the capture handler(s) to return config data for.\n\nReturns:\nDictionary dump from the named capture handler as given by\nthe :func:`SocketStreamCapturer.dump_handler_config_data` method.", "source": "juraj-google-style"}
{"code": "def watch_key(self):\n    return _get_tensor_watch_key(self.node_name, self.output_slot, self.debug_op)", "docstring": "Watch key identities a debug watch on a tensor.\n\nReturns:\n(`str`) A watch key, in the form of `tensor_name`:`debug_op`.", "source": "github-repos"}
{"code": "def set_speech_text(self, text):\n        \n        self.response.outputSpeech.type = 'PlainText'\n        self.response.outputSpeech.text = text", "docstring": "Set response output speech as plain text type.\n\nArgs:\ntext: str. Response speech used when type is 'PlainText'. Cannot exceed\n8,000 characters.", "source": "juraj-google-style"}
{"code": "def get_all(self, key=None):\n        \n        key = self.definition.main_key if key is None else key\n        key = self.definition.key_synonyms.get(key, key)\n        entries = self._get_all(key)\n        if key in self.definition.scalar_nonunique_keys:\n            return set(entries)\n        return entries", "docstring": "Returns all data entries for a particular key. Default is the main key.\n\nArgs:\n\nkey (str): key whose values to return (default: main key)\n\nReturns:\n\nList of all data entries for the key", "source": "juraj-google-style"}
{"code": "def encode_dataset(dataset, vocabulary):\n\n    def encode(features):\n        return {k: vocabulary.encode_tf(v) for (k, v) in features.items()}\n    return dataset.map(encode, num_parallel_calls=tf.data.experimental.AUTOTUNE)", "docstring": "Encode from strings to token ids.\n\nArgs:\ndataset: a tf.data.Dataset with string values.\nvocabulary: a mesh_tensorflow.transformer.Vocabulary\nReturns:\na tf.data.Dataset with integer-vector values ending in EOS=1", "source": "codesearchnet"}
{"code": "def _spin_product(variables):\n    \n    multiplier, multiplicand, product, aux = variables\n\n    return BinaryQuadraticModel({multiplier: -.5,\n                                 multiplicand: -.5,\n                                 product: -.5,\n                                 aux: -1.},\n                                {(multiplier, multiplicand): .5,\n                                 (multiplier, product): .5,\n                                 (multiplier, aux): 1.,\n                                 (multiplicand, product): .5,\n                                 (multiplicand, aux): 1.,\n                                 (product, aux): 1.},\n                                2.,\n                                Vartype.SPIN)", "docstring": "Create a bqm with a gap of 2 that represents the product of two variables.\n\nNote that spin-product requires an auxiliary variable.\n\nArgs:\nvariables (list):\nmultiplier, multiplicand, product, aux\n\nReturns:\n:obj:`.BinaryQuadraticModel`", "source": "juraj-google-style"}
{"code": "def _extract_variable_parts(variable_key, variable):\n    (name, offset, partitioned) = (None, None, False)\n    if variable._save_slice_info:\n        name = variable_key[:variable_key.rfind('/')]\n        if (not variable._save_slice_info.full_name.endswith(name)):\n            raise RuntimeError('Unexpected handling of partitioned variable.')\n        offset = variable._save_slice_info.var_offset[0]\n        partitioned = True\n    return (partitioned, name, offset)", "docstring": "Matches a variable to individual parts.\n\nArgs:\nvariable_key: String identifier of the variable in the module scope.\nvariable: Variable tensor.\n\nReturns:\npartitioned: Whether the variable is partitioned.\nname: Name of the variable up to the partitioning.\noffset: Offset of the variable into the full variable.\n\nRaises:\nRuntimeError: In case of unexpected variable format.", "source": "codesearchnet"}
{"code": "def _on_response_message(self, sequence, topic, message):\n        \n\n        try:\n            conn_key = self._find_connection(topic)\n            context = self.conns.get_context(conn_key)\n        except ArgumentError:\n            self._logger.warn(\"Dropping message that does not correspond with a known connection, message=%s\", message)\n            return\n\n        if 'client' in message and message['client'] != self.name:\n            self._logger.debug(\"Dropping message that is for another client %s, we are %s\", message['client'], self.name)\n\n        if messages.DisconnectionResponse.matches(message):\n            self.conns.finish_disconnection(conn_key, message['success'], message.get('failure_reason', None))\n        elif messages.OpenInterfaceResponse.matches(message):\n            self.conns.finish_operation(conn_key, message['success'], message.get('failure_reason', None))\n        elif messages.RPCResponse.matches(message):\n            rpc_message = messages.RPCResponse.verify(message)\n            self.conns.finish_operation(conn_key, rpc_message['success'], rpc_message.get('failure_reason', None), rpc_message.get('status', None), rpc_message.get('payload', None))\n        elif messages.ProgressNotification.matches(message):\n            progress_callback = context.get('progress_callback', None)\n            if progress_callback is not None:\n                progress_callback(message['done_count'], message['total_count'])\n        elif messages.ScriptResponse.matches(message):\n            if 'progress_callback' in context:\n                del context['progress_callback']\n\n            self.conns.finish_operation(conn_key, message['success'], message.get('failure_reason', None))\n        elif messages.DisconnectionNotification.matches(message):\n            try:\n                conn_key = self._find_connection(topic)\n                conn_id = self.conns.get_connection_id(conn_key)\n            except ArgumentError:\n                self._logger.warn(\"Dropping disconnect notification that does not correspond with a known connection, topic=%s\", topic)\n                return\n\n            self.conns.unexpected_disconnect(conn_key)\n            self._trigger_callback('on_disconnect', self.id, conn_id)\n        else:\n            self._logger.warn(\"Invalid response message received, message=%s\", message)", "docstring": "Process a response message received\n\nArgs:\nsequence (int): The sequence number of the packet received\ntopic (string): The topic this message was received on\nmessage (dict): The message itself", "source": "juraj-google-style"}
{"code": "def load_feature_lists(self, feature_lists):\n    column_names = []\n    feature_ranges = []\n    running_feature_count = 0\n    for list_id in feature_lists:\n        feature_list_names = load_lines((self.features_dir + 'X_train_{}.names'.format(list_id)))\n        column_names.extend(feature_list_names)\n        start_index = running_feature_count\n        end_index = ((running_feature_count + len(feature_list_names)) - 1)\n        running_feature_count += len(feature_list_names)\n        feature_ranges.append([list_id, start_index, end_index])\n    X_train = np.hstack([load((self.features_dir + 'X_train_{}.pickle'.format(list_id))) for list_id in feature_lists])\n    X_test = np.hstack([load((self.features_dir + 'X_test_{}.pickle'.format(list_id))) for list_id in feature_lists])\n    df_train = pd.DataFrame(X_train, columns=column_names)\n    df_test = pd.DataFrame(X_test, columns=column_names)\n    return (df_train, df_test, feature_ranges)", "docstring": "Load pickled features for train and test sets, assuming they are saved\nin the `features` folder along with their column names.\n\nArgs:\nfeature_lists: A list containing the names of the feature lists to load.\n\nReturns:\nA tuple containing 3 items: train dataframe, test dataframe,\nand a list describing the index ranges for the feature lists.", "source": "codesearchnet"}
{"code": "def from_dict(self, graph_dict):\n        \n\n        self.reset_graph()\n        for new_node in graph_dict:\n            self.add_node(new_node)\n        for ind_node, dep_nodes in graph_dict.items():\n            if not isinstance(dep_nodes, collections.Iterable):\n                raise TypeError('%s: dict values must be lists' % ind_node)\n            for dep_node in dep_nodes:\n                self.add_edge(ind_node, dep_node)", "docstring": "Reset the graph and build it from the passed dictionary.\n\nThe dictionary takes the form of {node_name: [directed edges]}\n\nArgs:\ngraph_dict (dict): The dictionary used to create the graph.\n\nRaises:\nTypeError: Raised if the value of items in the dict are not lists.", "source": "juraj-google-style"}
{"code": "def _set_input_tensors(self, interpreter: _interpreter.Interpreter, tensor_data: Sequence[np.ndarray], initialize: bool) -> None:\n    input_details = interpreter.get_input_details()\n    if len(input_details) != len(tensor_data):\n        raise ValueError('Number of inputs provided ({}) does not match number of inputs to the model ({})'.format(len(tensor_data), len(input_details)))\n    if initialize:\n        for input_detail, tensor in zip(input_details, tensor_data):\n            interpreter.resize_tensor_input(input_detail['index'], tensor.shape)\n        interpreter.allocate_tensors()\n    for input_detail, tensor in zip(input_details, tensor_data):\n        if tensor.dtype == np.float32 and input_detail['dtype'] == np.int8:\n            quant_params = _get_quant_params(input_detail)\n            if quant_params:\n                scale, zero_point = quant_params\n                tensor = np.round(tensor / scale + zero_point).astype(np.int8)\n        interpreter.set_tensor(input_detail['index'], tensor)", "docstring": "Sets input tensors into TFLite model Interpreter.\n\nArgs:\ninterpreter: a tf.lite.Interpreter object with allocated tensors.\ntensor_data: a list of Numpy array data.\ninitialize: set to true when input is first set for the interpreter, to\nset input shapes and allocate tensors.\n\nRaises:\nValueError: when inputs can't be set, or size of provided inputs does not\nmatch size of model inputs.", "source": "github-repos"}
{"code": "def export(self, top=True):\n        \n        out = []\n        if top:\n            out.append(self._internal_name)\n        out.append(self._to_str(self.typical_or_extreme_period_name))\n        out.append(self._to_str(self.typical_or_extreme_period_type))\n        out.append(self._to_str(self.period_start_day))\n        out.append(self._to_str(self.period_end_day))\n        return \",\".join(out)", "docstring": "Exports object to its string representation.\n\nArgs:\ntop (bool):  if True appends `internal_name` before values.\nAll non list objects should be exported with value top=True,\nall list objects, that are embedded in as fields inlist objects\nshould be exported with `top`=False\n\nReturns:\nstr: The objects string representation", "source": "juraj-google-style"}
{"code": "async def find_deleted(self, seq_set: SequenceSet, selected: SelectedMailbox) -> Sequence[int]:\n    session_flags = selected.session_flags\n    return [msg.uid async for (_, msg) in self.find(seq_set, selected) if (Deleted in msg.get_flags(session_flags))]", "docstring": "Return all the active message UIDs that have the ``\\\\Deleted`` flag.\n\nArgs:\nseq_set: The sequence set of the possible messages.\nselected: The selected mailbox session.", "source": "codesearchnet"}
{"code": "def take_screenshot(self, destination, prefix='screenshot', all_displays=False):\n    filename = self.generate_filename(prefix, extension_name='png')\n    filename_no_extension, _ = os.path.splitext(filename)\n    device_path = os.path.join('/storage/emulated/0/', filename)\n    self.adb.shell(['screencap', '-p', '-a' if all_displays else '', device_path], timeout=TAKE_SCREENSHOT_TIMEOUT_SECOND)\n    utils.create_dir(destination)\n    if all_displays:\n        pic_paths = []\n        png_files = [device_path]\n        png_files = self.adb.shell('ls /storage/emulated/0/*.png').decode('utf-8').split('\\n')\n        for device_path in png_files:\n            if device_path.find(filename_no_extension) < 0:\n                continue\n            self.adb.pull([device_path, destination])\n            pic_paths.append(os.path.join(destination, os.path.basename(device_path)))\n            self.log.debug('Screenshot taken, saved on the host: %s', pic_paths[-1])\n            self.adb.shell(['rm', device_path])\n        return pic_paths\n    self.adb.pull([device_path, destination])\n    pic_path = os.path.join(destination, filename)\n    self.log.debug('Screenshot taken, saved on the host: %s', pic_path)\n    self.adb.shell(['rm', device_path])\n    return pic_path", "docstring": "Takes a screenshot of the device.\n\nArgs:\ndestination: string, full path to the directory to save in.\nprefix: string, prefix file name of the screenshot.\nall_displays: bool, if true will take a screenshot on all connnected\ndisplays, if false will take a screenshot on the default display.\n\nReturns:\nstring, full path to the screenshot file on the host, or\nlist[str], when all_displays is True, the full paths to the screenshot\nfiles on the host.", "source": "github-repos"}
{"code": "def extend(self, *bindings):\n    self._bindings.extend(self._preprocess(bindings))\n    return self", "docstring": "Append the given bindings to this keymap.\n\nArguments:\n*bindings (Binding): Bindings to be added.\nReturns:\nKeymap: self", "source": "codesearchnet"}
{"code": "def init_test_examples_dependencies() -> Tuple[Dict[str, List[str]], List[str]]:\n    test_example_deps = {}\n    all_examples = []\n    for framework in ['flax', 'pytorch', 'tensorflow']:\n        test_files = list((PATH_TO_EXAMPLES / framework).glob('test_*.py'))\n        all_examples.extend(test_files)\n        examples = [f for f in (PATH_TO_EXAMPLES / framework).glob('**/*.py') if f.parent != PATH_TO_EXAMPLES / framework]\n        all_examples.extend(examples)\n        for test_file in test_files:\n            with open(test_file, 'r', encoding='utf-8') as f:\n                content = f.read()\n            test_example_deps[str(test_file.relative_to(PATH_TO_REPO))] = [str(e.relative_to(PATH_TO_REPO)) for e in examples if e.name in content]\n            test_example_deps[str(test_file.relative_to(PATH_TO_REPO))].append(str(test_file.relative_to(PATH_TO_REPO)))\n    return (test_example_deps, all_examples)", "docstring": "The test examples do not import from the examples (which are just scripts, not modules) so we need some extra\ncare initializing the dependency map, which is the goal of this function. It initializes the dependency map for\nexample files by linking each example to the example test file for the example framework.\n\nReturns:\n`Tuple[Dict[str, List[str]], List[str]]`: A tuple with two elements: the initialized dependency map which is a\ndict test example file to list of example files potentially tested by that test file, and the list of all\nexample files (to avoid recomputing it later).", "source": "github-repos"}
{"code": "def IsErrorSuppressedByNolint(category, linenum):\n  \n  return (_global_error_suppressions.get(category, False) or\n          linenum in _error_suppressions.get(category, set()) or\n          linenum in _error_suppressions.get(None, set()))", "docstring": "Returns true if the specified error category is suppressed on this line.\n\nConsults the global error_suppressions map populated by\nParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.\n\nArgs:\ncategory: str, the category of the error.\nlinenum: int, the current line number.\nReturns:\nbool, True iff the error should be suppressed due to a NOLINT comment or\nglobal suppression.", "source": "juraj-google-style"}
{"code": "def get_response(response: Dict[(str, Any)]) -> JSONRPCResponse:\n    if ('error' in response):\n        return ErrorResponse(**response)\n    return SuccessResponse(**response)", "docstring": "Converts a deserialized response into a JSONRPCResponse object.\n\nThe dictionary be either an error or success response, never a notification.\n\nArgs:\nresponse: Deserialized response dictionary. We can assume the response is valid\nJSON-RPC here, since it passed the jsonschema validation.", "source": "codesearchnet"}
{"code": "def read(self, size=None):\n    \n    if not self._is_open:\n      raise IOError('Not opened.')\n\n    if self._current_offset < 0:\n      raise IOError(\n          'Invalid current offset: {0:d} value less than zero.'.format(\n              self._current_offset))\n\n    if self._decoded_stream_size is None:\n      self._decoded_stream_size = self._GetDecodedStreamSize()\n\n    if self._decoded_stream_size < 0:\n      raise IOError('Invalid decoded stream size.')\n\n    if self._current_offset >= self._decoded_stream_size:\n      return b''\n\n    if self._realign_offset:\n      self._AlignDecodedDataOffset(self._current_offset)\n      self._realign_offset = False\n\n    if size is None:\n      size = self._decoded_stream_size\n    if self._current_offset + size > self._decoded_stream_size:\n      size = self._decoded_stream_size - self._current_offset\n\n    decoded_data = b''\n\n    if size == 0:\n      return decoded_data\n\n    while size > self._decoded_data_size:\n      decoded_data = b''.join([\n          decoded_data,\n          self._decoded_data[self._decoded_data_offset:]])\n\n      remaining_decoded_data_size = (\n          self._decoded_data_size - self._decoded_data_offset)\n\n      self._current_offset += remaining_decoded_data_size\n      size -= remaining_decoded_data_size\n\n      if self._current_offset >= self._decoded_stream_size:\n        break\n\n      read_count = self._ReadEncodedData(self._ENCODED_DATA_BUFFER_SIZE)\n      self._decoded_data_offset = 0\n      if read_count == 0:\n        break\n\n    if size > 0:\n      slice_start_offset = self._decoded_data_offset\n      slice_end_offset = slice_start_offset + size\n\n      decoded_data = b''.join([\n          decoded_data,\n          self._decoded_data[slice_start_offset:slice_end_offset]])\n\n      self._decoded_data_offset += size\n      self._current_offset += size\n\n    return decoded_data", "docstring": "Reads a byte string from the file-like object at the current offset.\n\nThe function will read a byte string of the specified size or\nall of the remaining data if no size was specified.\n\nArgs:\nsize (Optional[int]): number of bytes to read, where None is all\nremaining data.\n\nReturns:\nbytes: data read.\n\nRaises:\nIOError: if the read failed.\nOSError: if the read failed.", "source": "juraj-google-style"}
{"code": "def modified_files(root, tracked_only=False, commit=None):\n    \n    assert os.path.isabs(root), \"Root has to be absolute, got: %s\" % root\n\n    command = ['hg', 'status']\n    if commit:\n        command.append('--change=%s' % commit)\n\n    \n    status_lines = subprocess.check_output(command).decode('utf-8').split(\n        os.linesep)\n\n    modes = ['M', 'A']\n    if not tracked_only:\n        modes.append(r'\\?')\n    modes_str = '|'.join(modes)\n\n    modified_file_status = utils.filter_lines(\n        status_lines,\n        r'(?P<mode>%s) (?P<filename>.+)' % modes_str,\n        groups=('filename', 'mode'))\n\n    return dict((os.path.join(root, filename), mode)\n                for filename, mode in modified_file_status)", "docstring": "Returns a list of files that has been modified since the last commit.\n\nArgs:\nroot: the root of the repository, it has to be an absolute path.\ntracked_only: exclude untracked files when True.\ncommit: SHA1 of the commit. If None, it will get the modified files in the\nworking copy.\n\nReturns: a dictionary with the modified files as keys, and additional\ninformation as value. In this case it adds the status returned by\nhg status.", "source": "juraj-google-style"}
{"code": "def set_exception(self, exception):\n    if self.done():\n        raise RuntimeError('set_exception can only be called once.')\n    self._exception = exception\n    self._trigger()", "docstring": "Set the result of the future to the given exception.\n\nArgs:\nexception (:exc:`Exception`): The exception raised.", "source": "codesearchnet"}
{"code": "def process_update(x):\n    if callable(x):\n        update = lambda: process_update(x())\n        return update()\n    elif isinstance(x, ops.Operation):\n        update = x\n    elif hasattr(x, 'op'):\n        update = x.op\n    else:\n        update = tensor_conversion.convert_to_tensor_v2_with_dispatch(x)\n    reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update])\n    update._unconditional_update = update not in reachable\n    return update", "docstring": "Standardize update ops.\n\nArgs:\nx: Tensor, op, or callable.\n\nReturns:\nAn update op.", "source": "github-repos"}
{"code": "def get_application_configurations(self, name=None):\n    if hasattr(self, 'applicationConfigurations'):\n        return self._get_elements(self.applicationConfigurations, 'applicationConfigurations', ApplicationConfiguration, None, name)", "docstring": "Retrieves application configurations for this instance.\n\nArgs:\nname (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a\nregular expression. If `name` is not supplied, then all application configurations are returned.\n\nReturns:\nlist(ApplicationConfiguration): A list of application configurations matching the given `name`.\n\n.. versionadded 1.12", "source": "codesearchnet"}
{"code": "def add_user(self, user_obj):\n    LOG.info('Adding user %s to the database', user_obj['email'])\n    if (not ('_id' in user_obj)):\n        user_obj['_id'] = user_obj['email']\n    try:\n        self.user_collection.insert_one(user_obj)\n        LOG.debug('User inserted')\n    except DuplicateKeyError as err:\n        raise IntegrityError('User {} already exists in database'.format(user_obj['email']))\n    return user_obj", "docstring": "Add a user object to the database\n\nArgs:\nuser_obj(scout.models.User): A dictionary with user information\n\nReturns:\nuser_info(dict): a copy of what was inserted", "source": "codesearchnet"}
{"code": "def wait_for_fresh_games(self, poll_interval=15.0):\n        \n        wait_until_game = self.read_wait_cell()\n        if not wait_until_game:\n            return\n        latest_game = self.latest_game_number\n        last_latest = latest_game\n        while latest_game < wait_until_game:\n            utils.dbg('Latest game {} not yet at required game {} '\n                      '(+{}, {:0.3f} games/sec)'.format(\n                          latest_game,\n                          wait_until_game,\n                          latest_game - last_latest,\n                          (latest_game - last_latest) / poll_interval\n                      ))\n            time.sleep(poll_interval)\n            last_latest = latest_game\n            latest_game = self.latest_game_number", "docstring": "Block caller until required new games have been played.\n\nArgs:\npoll_interval:  number of seconds to wait between checks\n\nIf the cell `table_state=metadata:wait_for_game_number` exists,\nthen block the caller, checking every `poll_interval` seconds,\nuntil `table_state=metadata:game_counter is at least the value\nin that cell.", "source": "juraj-google-style"}
{"code": "def remove_role(self, databaseName, roleName, collectionName=None):\n        \n        role = {\"databaseName\" : databaseName,\n                \"roleName\" : roleName}\n        \n        if collectionName:\n            role[\"collectionName\"] = collectionName\n        \n        if role in self.roles:\n            self.roles.remove(role)", "docstring": "Remove one role\n\nArgs:\ndatabaseName (str): Database Name\nroleName (RoleSpecs): role\n\nKeyword Args:\ncollectionName (str): Collection", "source": "juraj-google-style"}
{"code": "def _process_update(self, item, feed_item):\n    item['name'] = feed_item.get(FieldMap.CREATIVE_NAME, None)\n    self._associate_third_party_urls(feed_item, item)\n    self._associate_click_tags(feed_item, item)", "docstring": "Updates a creative based on the values from the feed.\n\nArgs:\nitem: Object representing the creative to be updated, this object is\nupdated directly.\nfeed_item: Feed item representing creative values from the Bulkdozer feed.", "source": "github-repos"}
{"code": "def pixelate(x, severity=1):\n    c = [0.6, 0.5, 0.4, 0.3, 0.25][(severity - 1)]\n    shape = x.shape\n    x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8))\n    x = x.resize((int((shape[1] * c)), int((shape[0] * c))))\n    x = x.resize((shape[1], shape[0]))\n    return np.asarray(x)", "docstring": "Pixelate images.\n\nConduct pixelating corruptions to images by first shrinking the images and\nthen resizing to original size.\n\nArgs:\nx: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\nseverity: integer, severity of corruption.\n\nReturns:\nnumpy array, image with uint8 pixels in [0,255]. Applied pixelating\ncorruption.", "source": "codesearchnet"}
{"code": "def generate_proxy_api_files(output_files: list[str], proxy_module_root: str, output_dir: str):\n    for file in output_files:\n        file_dir = os.path.dirname(file)\n        if not os.path.isdir(file_dir):\n            os.makedirs(file_dir)\n        module = get_module(file_dir, output_dir)\n        content = f'from {proxy_module_root}.{module} import *'\n        with open(file, 'w') as f:\n            f.write(content)", "docstring": "Creates __init__.py files in proxy format for the Python API.\n\nArgs:\noutput_files: List of __init__.py file paths to create.\nproxy_module_root: Module root for proxy-import format. If specified, proxy\nfiles with content like `from proxy_module_root.proxy_module import *`\nwill be created to enable import resolution under TensorFlow.\noutput_dir: output API root directory.", "source": "github-repos"}
{"code": "def _prefix(self):\n    return self._checkpoint_prefix", "docstring": "A common prefix for all checkpoints saved with this manager.\n\nFor example, if `directory` (a constructor argument) were `\"/tmp/tf-model\"`,\n`prefix` would be `\"/tmp/tf-model/ckpt\"` and checkpoints would generally be\nnumbered `\"/tmp/tf-model/ckpt-1\"`, `\"/tmp/tf-model/ckpt-2\"`, and so on. Each\ncheckpoint has several associated files\n(e.g. `\"/tmp/tf-model/ckpt-2.index\"`).\n\nReturns:\nA string prefix.", "source": "github-repos"}
{"code": "def finish_operation(self, conn_or_internal_id, success, *args):\n    data = {'id': conn_or_internal_id, 'success': success, 'callback_args': args}\n    action = ConnectionAction('finish_operation', data, sync=False)\n    self._actions.put(action)", "docstring": "Finish an operation on a connection.\n\nArgs:\nconn_or_internal_id (string, int): Either an integer connection id or a string\ninternal_id\nsuccess (bool): Whether the operation was successful\nfailure_reason (string): Optional reason why the operation failed\nresult (dict): Optional dictionary containing the results of the operation", "source": "codesearchnet"}
{"code": "def attach_bytes(key, the_bytes):\n    tf_v1.add_to_collection(_ATTACHMENT_COLLECTION_INTERNAL, module_attachment_pb2.ModuleAttachment(key=key, value=the_bytes))", "docstring": "Adds a ModuleAttachment to the current graph.\n\nArgs:\nkey: A string with the unique key of the attachment.\nthe_bytes: A bytes object with the serialized attachment.", "source": "codesearchnet"}
{"code": "def flat_transforms_to_matrices(transforms):\n    with ops.name_scope('flat_transforms_to_matrices'):\n        transforms = ops.convert_to_tensor(transforms, name='transforms')\n        if transforms.shape.ndims not in (1, 2):\n            raise ValueError('Transforms should be 1D or 2D, got: %s' % transforms)\n        transforms = array_ops.reshape(transforms, constant_op.constant([-1, 8]))\n        num_transforms = array_ops.shape(transforms)[0]\n        return array_ops.reshape(array_ops.concat([transforms, array_ops.ones([num_transforms, 1])], axis=1), constant_op.constant([-1, 3, 3]))", "docstring": "Converts `tf.contrib.image` projective transforms to affine matrices.\n\nNote that the output matrices map output coordinates to input coordinates. For\nthe forward transformation matrix, call `tf.linalg.inv` on the result.\n\nArgs:\ntransforms: Vector of length 8, or batches of transforms with shape `(N,\n8)`.\n\nReturns:\n3D tensor of matrices with shape `(N, 3, 3)`. The output matrices map the\n*output coordinates* (in homogeneous coordinates) of each transform to the\ncorresponding *input coordinates*.\n\nRaises:\nValueError: If `transforms` have an invalid shape.", "source": "github-repos"}
{"code": "def classify_format(f):\n    (l0, l1) = _get_two_lines(f)\n    if loader.glove.check_valid(l0, l1):\n        return _glove\n    elif loader.word2vec_text.check_valid(l0, l1):\n        return _word2vec_text\n    elif loader.word2vec_bin.check_valid(l0, l1):\n        return _word2vec_bin\n    else:\n        raise OSError(b'Invalid format')", "docstring": "Determine the format of word embedding file by their content. This operation\nonly looks at the first two lines and does not check the sanity of input\nfile.\n\nArgs:\nf (Filelike):\n\nReturns:\nclass", "source": "codesearchnet"}
{"code": "def get_pose_error(target_pose, current_pose):\n    \n    error = np.zeros(6)\n\n    \n    target_pos = target_pose[:3, 3]\n    current_pos = current_pose[:3, 3]\n    pos_err = target_pos - current_pos\n\n    \n    r1 = current_pose[:3, 0]\n    r2 = current_pose[:3, 1]\n    r3 = current_pose[:3, 2]\n    r1d = target_pose[:3, 0]\n    r2d = target_pose[:3, 1]\n    r3d = target_pose[:3, 2]\n    rot_err = 0.5 * (np.cross(r1, r1d) + np.cross(r2, r2d) + np.cross(r3, r3d))\n\n    error[:3] = pos_err\n    error[3:] = rot_err\n    return error", "docstring": "Computes the error corresponding to target pose - current pose as a 6-dim vector.\nThe first 3 components correspond to translational error while the last 3 components\ncorrespond to the rotational error.\n\nArgs:\ntarget_pose: a 4x4 homogenous matrix for the target pose\ncurrent_pose: a 4x4 homogenous matrix for the current pose\n\nReturns:\nA 6-dim numpy array for the pose error.", "source": "juraj-google-style"}
{"code": "def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]:\n    raise NotImplementedError", "docstring": "Returns a list of dictionaries, containing titles and text of the retrieved documents.\n\nArgs:\ndoc_ids (`np.ndarray` of shape `(batch_size, n_docs)`):\nA tensor of document indices.", "source": "github-repos"}
{"code": "def _flatten_resource(self, resource: message.Message, select_expr: Mapping[str, python_compiled_expressions.PythonCompiledExpression]) -> Dict[str, Any]:\n    flat_resource = {}\n    for col_name, expr in select_expr.items():\n        messages = expr.evaluate(resource).messages\n        if len(messages) > 1:\n            flat_resource[col_name] = []\n            for msg in messages:\n                flat_resource[col_name].append(proto_utils.get_value_at_field(msg, 'value'))\n        elif len(messages) == 1:\n            flat_resource[col_name] = proto_utils.get_value_at_field(messages[0], 'value')\n        else:\n            flat_resource[col_name] = None\n    return flat_resource", "docstring": "Returns a dictionary representing a resource.\n\nEach key matches a column name from the view config select provided by the\nuser. The corresponding value is the value found in the resource or a list\nof matching values in the resource.\n\nArgs:\nresource: a singular resource from the bundle returned from the FHIR\nserver.\nselect_expr: a dictionary representing the column name and compiled fhir\npath for each select expression.", "source": "github-repos"}
{"code": "def ParseCallsRow(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    call_type = self._GetRowValue(query_hash, row, 'type')\n    call_type = self.CALL_TYPE.get(call_type, 'UNKNOWN')\n    duration = self._GetRowValue(query_hash, row, 'duration')\n    timestamp = self._GetRowValue(query_hash, row, 'date')\n\n    event_data = AndroidCallEventData()\n    event_data.call_type = call_type\n    event_data.duration = self._GetRowValue(query_hash, row, 'duration')\n    event_data.name = self._GetRowValue(query_hash, row, 'name')\n    event_data.number = self._GetRowValue(query_hash, row, 'number')\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n\n    date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, 'Call Started')\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    if duration:\n      if isinstance(duration, py2to3.STRING_TYPES):\n        try:\n          duration = int(duration, 10)\n        except ValueError:\n          duration = 0\n\n      \n      timestamp += duration * 1000\n\n      date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(date_time, 'Call Ended')\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a Call record row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def resize(img, size, interpolation=Image.BILINEAR):\n    if (not _is_pil_image(img)):\n        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n    if (not (isinstance(size, int) or (isinstance(size, Iterable) and (len(size) == 2)))):\n        raise TypeError('Got inappropriate size arg: {}'.format(size))\n    if isinstance(size, int):\n        (w, h) = img.size\n        if (((w <= h) and (w == size)) or ((h <= w) and (h == size))):\n            return img\n        if (w < h):\n            ow = size\n            oh = int(((size * h) / w))\n            return img.resize((ow, oh), interpolation)\n        else:\n            oh = size\n            ow = int(((size * w) / h))\n            return img.resize((ow, oh), interpolation)\n    else:\n        return img.resize(size[::(- 1)], interpolation)", "docstring": "r\"\"\"Resize the input PIL Image to the given size.\n\nArgs:\nimg (PIL Image): Image to be resized.\nsize (sequence or int): Desired output size. If size is a sequence like\n(h, w), the output size will be matched to this. If size is an int,\nthe smaller edge of the image will be matched to this number maintaing\nthe aspect ratio. i.e, if height > width, then image will be rescaled to\n:math:`\\left(\\text{size} \\times \\frac{\\text{height}}{\\text{width}}, \\text{size}\\right)`\ninterpolation (int, optional): Desired interpolation. Default is\n``PIL.Image.BILINEAR``\n\nReturns:\nPIL Image: Resized image.", "source": "codesearchnet"}
{"code": "def config(self, configlet=None, plane='sdr', **attributes):\n        \n        begin = time.time()\n        label = self._chain.target_device.config(configlet, plane, **attributes)\n        elapsed = time.time() - begin\n        if label:\n            self.emit_message(\"Configuration change last {:.0f}s. Label: {}\".format(elapsed, label),\n                              log_level=logging.INFO)\n        else:\n            self.emit_message(\"Configuration failed.\", log_level=logging.WARNING)\n\n        return label", "docstring": "Configure the device.\n\nThis method applies configuration to the device.\n\nArgs:\nconfiglet (text): The configuration template.\nplane (text): sdr or admin\nattributes (dict): The dictionary of attributes used in template.\n\nReturns:\nA string with commit label or None", "source": "juraj-google-style"}
{"code": "def pad(self, file, size=6):\n        \n        for element in self.splitter.split(file):\n            if _validate_payload_size(element, size):\n                yield element", "docstring": "Group together as many records as possible to fit in the specified size\n\nThis SingleRecordStrategy will not group any record and will return them one by one as\nlong as they are within the maximum size.\n\nArgs:\nfile (str): file path to read the records from.\nsize (int): maximum size in MB that each group of records will be fitted to.\npassing 0 means unlimited size.\n\nReturns:\ngenerator of records", "source": "juraj-google-style"}
{"code": "def restore_or_initialize(self):\n    if self._latest_checkpoint is not None:\n        self._checkpoint.restore(self._latest_checkpoint)\n        if self._checkpoint_interval is not None:\n            self._last_checkpoint_step = _evaluate(self._step_counter)\n        return self._latest_checkpoint\n    if self._init_fn is not None:\n        self._init_fn()\n        logging.info('Customized initialization is done through the passed `init_fn`.')\n    return None", "docstring": "Restore items in `checkpoint` from the latest checkpoint file.\n\nThis method will first try to restore from the most recent checkpoint in\n`directory`. If no checkpoints exist in `directory`, and `init_fn` is\nspecified, this method will call `init_fn` to do customized\ninitialization. This can be used to support initialization from pretrained\nmodels.\n\nNote that unlike `tf.train.Checkpoint.restore()`, this method doesn't return\na load status object that users can run assertions on\n(e.g. assert_consumed()). Thus to run assertions, users should directly use\n`tf.train.Checkpoint.restore()` method.\n\nReturns:\nThe restored checkpoint path if the latest checkpoint is found and\nrestored. Otherwise None.", "source": "github-repos"}
{"code": "def call(poly, args):\n    args = list(args)\n    if (len(args) < poly.dim):\n        args = (args + ([np.nan] * (poly.dim - len(args))))\n    elif (len(args) > poly.dim):\n        raise ValueError('too many arguments')\n    (x0, x1) = ([], [])\n    for (idx, arg) in enumerate(args):\n        if isinstance(arg, Poly):\n            poly_ = Poly({tuple(np.eye(poly.dim)[idx]): np.array(1)})\n            x0.append(poly_)\n            x1.append(arg)\n            args[idx] = np.nan\n    if x0:\n        poly = call(poly, args)\n        return substitute(poly, x0, x1)\n    masks = np.zeros(len(args), dtype=bool)\n    for (idx, arg) in enumerate(args):\n        if (np.ma.is_masked(arg) or np.any(np.isnan(arg))):\n            masks[idx] = True\n            args[idx] = 0\n    shape = np.array(args[np.argmax([np.prod(np.array(arg).shape) for arg in args])]).shape\n    args = np.array([(np.ones(shape, dtype=int) * arg) for arg in args])\n    A = {}\n    for key in poly.keys:\n        key_ = (np.array(key) * (1 - masks))\n        val = np.outer(poly.A[key], np.prod((args.T ** key_).T, axis=0))\n        val = np.reshape(val, (poly.shape + tuple(shape)))\n        val = np.where((val != val), 0, val)\n        mkey = tuple((np.array(key) * masks))\n        if (not (mkey in A)):\n            A[mkey] = val\n        else:\n            A[mkey] = (A[mkey] + val)\n    out = Poly(A, poly.dim, None, None)\n    if (out.keys and (not np.sum(out.keys))):\n        out = out.A[out.keys[0]]\n    elif (not out.keys):\n        out = np.zeros(out.shape, dtype=out.dtype)\n    return out", "docstring": "Evaluate a polynomial along specified axes.\n\nArgs:\npoly (Poly):\nInput polynomial.\nargs (numpy.ndarray):\nArgument to be evaluated. Masked values keeps the variable intact.\n\nReturns:\n(Poly, numpy.ndarray):\nIf masked values are used the Poly is returned. Else an numpy array\nmatching the polynomial's shape is returned.", "source": "codesearchnet"}
{"code": "def add_reciprocal_link(self, target, weight):\n    if (not isinstance(target, list)):\n        target_list = [target]\n    else:\n        target_list = target\n    for t in target_list:\n        self.add_link(t, weight)\n        t.add_link(self, weight)", "docstring": "Add links pointing in either direction between ``self`` and ``target``.\n\nThis creates a ``Link`` from ``self`` to ``target`` and a ``Link``\nfrom ``target`` to ``self`` of equal weight. If ``target`` is a list\nof ``Node`` 's, repeat this for each one.\n\nArgs:\ntarget (Node or list[Node]):\nweight (int or float):\n\nReturns: None\n\nExample:\n>>> node_1 = Node('One')\n>>> node_2 = Node('Two')\n>>> node_1.add_reciprocal_link(node_2, 5)\n>>> new_link_1 = node_1.link_list[0]\n>>> new_link_2 = node_2.link_list[0]\n>>> print(new_link_1)\nnode.Link instance pointing to node with value \"Two\" with weight 5\n>>> print(new_link_2)\nnode.Link instance pointing to node with value \"One\" with weight 5", "source": "codesearchnet"}
{"code": "def resolve_theme(self, name):\n    if (name not in settings.CODEMIRROR_THEMES):\n        msg = \"Given theme name '{}' does not exists in 'settings.CODEMIRROR_THEMES'.\"\n        raise UnknowThemeError(msg.format(name))\n    return settings.CODEMIRROR_THEMES.get(name)", "docstring": "From given theme name, return theme file path from\n``settings.CODEMIRROR_THEMES`` map.\n\nArguments:\nname (string): Theme name.\n\nRaises:\nKeyError: When given name does not exist in\n``settings.CODEMIRROR_THEMES``.\n\nReturns:\nstring: Theme file path.", "source": "codesearchnet"}
{"code": "def random_transform(self, x, seed=None):\n    params = self.get_random_transform(x.shape, seed)\n    return self.apply_transform(x, params)", "docstring": "Applies a random transformation to an image.\n\nArgs:\nx: 3D tensor, single image.\nseed: Random seed.\n\nReturns:\nA randomly transformed version of the input (same shape).", "source": "github-repos"}
{"code": "def _parse_email(self, val):\n    ret = {'type': None, 'value': None}\n    try:\n        ret['type'] = val[1]['type']\n    except (KeyError, ValueError, TypeError):\n        pass\n    ret['value'] = val[3].strip()\n    try:\n        self.vars['email'].append(ret)\n    except AttributeError:\n        self.vars['email'] = []\n        self.vars['email'].append(ret)", "docstring": "The function for parsing the vcard email addresses.\n\nArgs:\nval (:obj:`list`): The value to parse.", "source": "codesearchnet"}
{"code": "def prepare_config(config: Optional[config_pb2.ConfigProto]) -> config_pb2.ConfigProto:\n    allow_soft_placement = not force_gpu\n    if config is None:\n        config = context.context().config\n        config.allow_soft_placement = allow_soft_placement\n    elif not allow_soft_placement and config.allow_soft_placement:\n        config_copy = context.context().config\n        config = config_copy\n        config.allow_soft_placement = False\n    config.graph_options.optimizer_options.opt_level = -1\n    config.graph_options.rewrite_options.constant_folding = rewriter_config_pb2.RewriterConfig.OFF\n    config.graph_options.rewrite_options.pin_to_host_optimization = rewriter_config_pb2.RewriterConfig.OFF\n    return config", "docstring": "Returns a config for sessions.\n\nArgs:\nconfig: An optional config_pb2.ConfigProto to use to configure the\nsession.\n\nReturns:\nA config_pb2.ConfigProto object.", "source": "github-repos"}
{"code": "def gibbs_binding_energy(self, eads=False):\n        \n\n        n = self.get_unit_primitive_area\n        Nads = self.Nads_in_slab\n\n        BE = (self.energy - n * self.clean_entry.energy) / Nads - \\\n             sum([ads.energy_per_atom for ads in self.adsorbates])\n        return BE * Nads if eads else BE", "docstring": "Returns the adsorption energy or Gibb's binding energy\nof an adsorbate on a surface\nArgs:\neads (bool): Whether to calculate the adsorption energy\n(True) or the binding energy (False) which is just\nadsorption energy normalized by number of adsorbates.", "source": "juraj-google-style"}
{"code": "def predict_classes(self, x, batch_size=32, verbose=0):\n    warnings.warn('`model.predict_classes()` is deprecated and will be removed after 2021-01-01. Please use instead:* `np.argmax(model.predict(x), axis=-1)`,   if your model does multi-class classification   (e.g. if it uses a `softmax` last-layer activation).* `(model.predict(x) > 0.5).astype(\"int32\")`,   if your model does binary classification   (e.g. if it uses a `sigmoid` last-layer activation).')\n    proba = self.predict(x, batch_size=batch_size, verbose=verbose)\n    if proba.shape[-1] > 1:\n        return proba.argmax(axis=-1)\n    else:\n        return (proba > 0.5).astype('int32')", "docstring": "Generate class predictions for the input samples.\n\nThe input samples are processed batch by batch.\n\nArgs:\nx: input data, as a Numpy array or list of Numpy arrays\n(if the model has multiple inputs).\nbatch_size: integer.\nverbose: verbosity mode, 0 or 1.\n\nReturns:\nA numpy array of class predictions.", "source": "github-repos"}
{"code": "def output(self, _filename):\n        \n\n        for c in self.contracts:\n            (name, inheritance, var, func_summaries, modif_summaries) = c.get_summary()\n            txt = \"\\nContract %s\"%name\n            txt += '\\nContract vars: '+str(var)\n            txt += '\\nInheritance:: '+str(inheritance)\n            table = PrettyTable([\"Function\",\n                                 \"Visibility\",\n                                 \"Modifiers\",\n                                 \"Read\",\n                                 \"Write\",\n                                 \"Internal Calls\",\n                                 \"External Calls\"])\n            for (_c_name, f_name, visi, modifiers, read, write, internal_calls, external_calls) in func_summaries:\n                read = self._convert(read)\n                write = self._convert(write)\n                internal_calls = self._convert(internal_calls)\n                external_calls = self._convert(external_calls)\n                table.add_row([f_name, visi, modifiers, read, write, internal_calls, external_calls])\n            txt += \"\\n \\n\"+str(table)\n            table = PrettyTable([\"Modifiers\",\n                                 \"Visibility\",\n                                 \"Read\",\n                                 \"Write\",\n                                 \"Internal Calls\",\n                                 \"External Calls\"])\n            for (_c_name, f_name, visi, _, read, write, internal_calls, external_calls) in modif_summaries:\n                read = self._convert(read)\n                write = self._convert(write)\n                internal_calls = self._convert(internal_calls)\n                external_calls = self._convert(external_calls)\n                table.add_row([f_name, visi, read, write, internal_calls, external_calls])\n            txt += \"\\n\\n\"+str(table)\n            txt += \"\\n\"\n            self.info(txt)", "docstring": "_filename is not used\nArgs:\n_filename(string)", "source": "juraj-google-style"}
{"code": "def check_password_hash(password, password_hash, salt, N=1 << 14, r=8, p=1, buflen=64):\n    \n    candidate_hash = generate_password_hash(password, salt, N, r, p, buflen)\n\n    return safe_str_cmp(password_hash, candidate_hash)", "docstring": "Given a password, hash, salt this function verifies the password is equal to hash/salt.\n\nArgs:\n- ``password``: The password to perform check on.\n\nReturns:\n- ``bool``", "source": "juraj-google-style"}
{"code": "def get_metrics_namespace(self) -> str:\n    return 'BeamML_HuggingFaceModelHandler_Tensor'", "docstring": "Returns:\nA namespace for metrics collected by the RunInference transform.", "source": "github-repos"}
{"code": "def _checkInt(inputvalue, minvalue=None, maxvalue=None, description='inputvalue'):\n    \n    if not isinstance(description, str):\n        raise TypeError('The description should be a string. Given: {0!r}'.format(description))\n\n    if not isinstance(inputvalue, (int, long)):\n        raise TypeError('The {0} must be an integer. Given: {1!r}'.format(description, inputvalue))\n\n    if not isinstance(minvalue, (int, long, type(None))):\n        raise TypeError('The minvalue must be an integer or None. Given: {0!r}'.format(minvalue))\n\n    if not isinstance(maxvalue, (int, long, type(None))):\n        raise TypeError('The maxvalue must be an integer or None. Given: {0!r}'.format(maxvalue))\n\n    _checkNumerical(inputvalue, minvalue, maxvalue, description)", "docstring": "Check that the given integer is valid.\n\nArgs:\n* inputvalue (int or long): The integer to be checked\n* minvalue (int or long, or None): Minimum value of the integer\n* maxvalue (int or long, or None): Maximum value of the integer\n* description (string): Used in error messages for the checked inputvalue\n\nRaises:\nTypeError, ValueError\n\nNote: Can not use the function :func:`_checkString`, as that function uses this function internally.", "source": "juraj-google-style"}
{"code": "def int_shape(x):\n    try:\n        shape = x.shape\n        if not isinstance(shape, tuple):\n            shape = tuple(shape.as_list())\n        return shape\n    except ValueError:\n        return None", "docstring": "Returns the shape of tensor or variable as a tuple of int or None entries.\n\nArgs:\nx: Tensor or variable.\n\nReturns:\nA tuple of integers (or None entries).\n\nExamples:\n\n>>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))\n>>> tf.keras.backend.int_shape(input)\n(2, 4, 5)\n>>> val = np.array([[1, 2], [3, 4]])\n>>> kvar = tf.keras.backend.variable(value=val)\n>>> tf.keras.backend.int_shape(kvar)\n(2, 2)", "source": "github-repos"}
{"code": "def parse_page(raw_page):\n  \n  ret = {\"title\": get_title(raw_page), \"id\": get_id(raw_page)}\n  if \":\" in ret[\"title\"]:\n    return None\n  ret[\"revisions\"] = get_revisions(raw_page)\n  return ret", "docstring": "Create a dictionary with title, id, and list of revisions.\n\nThe dictionary contains:\n\"title\": a string\n\"id\": an integer\n\"revisions\": a list of strings\n\nArgs:\nraw_page: a string\n\nReturns:\na dictionary, or None in the case of an error.", "source": "juraj-google-style"}
{"code": "def ToParameter(item: StackItem):\n        \n        if isinstance(item, Array) or isinstance(item, Struct):\n            items = item.GetArray()\n            output = [ContractParameter.ToParameter(subitem) for subitem in items]\n            return ContractParameter(type=ContractParameterType.Array, value=output)\n\n        elif isinstance(item, Boolean):\n            return ContractParameter(type=ContractParameterType.Boolean, value=item.GetBoolean())\n\n        elif isinstance(item, ByteArray):\n            return ContractParameter(type=ContractParameterType.ByteArray, value=item.GetByteArray())\n\n        elif isinstance(item, Integer):\n            return ContractParameter(type=ContractParameterType.Integer, value=str(item.GetBigInteger()))\n\n        elif isinstance(item, InteropInterface):\n            return ContractParameter(type=ContractParameterType.InteropInterface, value=item.GetInterface())", "docstring": "Convert a StackItem to a ContractParameter object\n\nArgs:\nitem (neo.VM.InteropService.StackItem) The item to convert to a ContractParameter object\n\nReturns:\nContractParameter", "source": "juraj-google-style"}
{"code": "def GetSitelinksFromFeed(client, feed):\n  \n  \n  feed_mappings = GetFeedMapping(client, feed, PLACEHOLDER_TYPE_SITELINKS)\n\n  feed_items = {}\n\n  for feed_item in GetFeedItems(client, feed):\n    site_link_from_feed = {}\n\n    for attribute_value in feed_item['attributeValues']:\n      if attribute_value['feedAttributeId'] in feed_mappings:\n        for field_id in feed_mappings[attribute_value['feedAttributeId']]:\n          if field_id == SITE_LINK_FIELDS['TEXT']:\n            site_link_from_feed['text'] = attribute_value['stringValue']\n          elif field_id == SITE_LINK_FIELDS['URL']:\n            site_link_from_feed['url'] = attribute_value['stringValue']\n          elif field_id == SITE_LINK_FIELDS['FINAL_URLS']:\n            site_link_from_feed['finalUrls'] = attribute_value['stringValues']\n          elif field_id == SITE_LINK_FIELDS['FINAL_MOBILE_URLS']:\n            site_link_from_feed['finalMobileUrls'] = attribute_value[\n                'stringValues']\n          elif field_id == SITE_LINK_FIELDS['TRACKING_URL_TEMPLATE']:\n            site_link_from_feed['trackingUrlTemplate'] = attribute_value[\n                'stringValue']\n          elif field_id == SITE_LINK_FIELDS['LINE2']:\n            site_link_from_feed['line2'] = attribute_value['stringValue']\n          elif field_id == SITE_LINK_FIELDS['LINE3']:\n            site_link_from_feed['line3'] = attribute_value['stringValue']\n          else:\n            print 'No applicable Site Link Field found for Id: %s' % field_id\n\n    feed_items[feed_item['feedItemId']] = site_link_from_feed\n\n  return feed_items", "docstring": "Gets the sitelinks from a feed.\n\nArgs:\nclient: an AdWordsClient instance.\nfeed: the feed used to retrieve sitelinks.\n\nReturns:\nA dictionary mapping the feed item ID to SiteLinkFromFeed.", "source": "juraj-google-style"}
{"code": "def runTemplate(id, data={}):\n    conn = Qubole.agent()\n    path = (str(id) + '/run')\n    res = conn.post(Template.element_path(path), data)\n    cmdType = res['command_type']\n    cmdId = res['id']\n    cmdClass = eval(cmdType)\n    cmd = cmdClass.find(cmdId)\n    while (not Command.is_done(cmd.status)):\n        time.sleep(Qubole.poll_interval)\n        cmd = cmdClass.find(cmd.id)\n    return Template.getResult(cmdClass, cmd)", "docstring": "Run an existing Template and waits for the Result.\nPrints result to stdout.\n\nArgs:\n`id`: ID of the template to run\n`data`: json data containing the input_vars\n\nReturns:\nAn integer as status (0: success, 1: failure)", "source": "codesearchnet"}
{"code": "def to_service(self, service, version):\n    service_url = self._service_locator.get_service_url(service, version)\n    return self.__copy_and_set('service_url', self.__strip_trailing_slashes(service_url))", "docstring": "Sets the service name and version the request should target\n\nArgs:\nservice (str): The name of the service as displayed in the services.json file\nversion (str): The version of the service as displayed in the services.json file\n\nReturns:\nThe request builder instance in order to chain calls", "source": "codesearchnet"}
{"code": "def _variable_on_cpu(name, shape, initializer):\n    with tf.device('/cpu:0'):\n        dtype = (tf.float16 if FLAGS.use_fp16 else tf.float32)\n        var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n    return var", "docstring": "Helper to create a Variable stored on CPU memory.\n\nArgs:\nname: name of the variable\nshape: list of ints\ninitializer: initializer for Variable\n\nReturns:\nVariable Tensor", "source": "codesearchnet"}
{"code": "def create_domain(provider, context, **kwargs):\n    \n    session = get_session(provider.region)\n    client = session.client(\"route53\")\n    domain = kwargs.get(\"domain\")\n    if not domain:\n        logger.error(\"domain argument or BaseDomain variable not provided.\")\n        return False\n    zone_id = create_route53_zone(client, domain)\n    return {\"domain\": domain, \"zone_id\": zone_id}", "docstring": "Create a domain within route53.\n\nArgs:\nprovider (:class:`stacker.providers.base.BaseProvider`): provider\ninstance\ncontext (:class:`stacker.context.Context`): context instance\n\nReturns: boolean for whether or not the hook succeeded.", "source": "juraj-google-style"}
{"code": "def combine_last_two_dimensions(x):\n    x_shape = common_layers.shape_list(x)\n    (a, b) = x_shape[(- 2):]\n    return tf.reshape(x, (x_shape[:(- 2)] + [(a * b)]))", "docstring": "Reshape x so that the last two dimension become one.\n\nArgs:\nx: a Tensor with shape [..., a, b]\n\nReturns:\na Tensor with shape [..., ab]", "source": "codesearchnet"}
{"code": "def sinh(x):\n    if any_symbolic_tensors((x,)):\n        return Sinh().symbolic_call(x)\n    return backend.numpy.sinh(x)", "docstring": "Hyperbolic sine, element-wise.\n\nArguments:\nx: Input tensor.\n\nReturns:\nOutput tensor of same shape as `x`.", "source": "github-repos"}
{"code": "def is_disconnected(self, node_id):\n        \n        conn = self._conns.get(node_id)\n        if conn is None:\n            return False\n        return conn.disconnected()", "docstring": "Check whether the node connection has been disconnected or failed.\n\nA disconnected node has either been closed or has failed. Connection\nfailures are usually transient and can be resumed in the next ready()\ncall, but there are cases where transient failures need to be caught\nand re-acted upon.\n\nArguments:\nnode_id (int): the id of the node to check\n\nReturns:\nbool: True iff the node exists and is disconnected", "source": "juraj-google-style"}
{"code": "def get_favorite_radio_shows(self, start=0, max_items=100):\n    message = 'The output type of this method will probably change in the future to use SoCo data structures'\n    warnings.warn(message, stacklevel=2)\n    return self.__get_favorites(RADIO_SHOWS, start, max_items)", "docstring": "Get favorite radio shows from Sonos' Radio app.\n\nReturns:\ndict: A dictionary containing the total number of favorites, the\nnumber of favorites returned, and the actual list of favorite radio\nshows, represented as a dictionary with `title` and `uri` keys.\n\nDepending on what you're building, you'll want to check to see if the\ntotal number of favorites is greater than the amount you\nrequested (`max_items`), if it is, use `start` to page through and\nget the entire list of favorites.", "source": "codesearchnet"}
{"code": "def fts_match_all(self, fts, inv):\n    return all([self.fts_match(fts, s) for s in inv])", "docstring": "Return `True` if all segments in `inv` matches the features in fts\n\nArgs:\nfts (list): a collection of (value, feature) tuples\ninv (list): a collection of IPA segments represented as Unicode\nstrings\n\nReturns:\nbool: `True` if all segments in `inv` matches the features in `fts`", "source": "codesearchnet"}
{"code": "def error(message):\n    fail = '\\x1b[91m'\n    end = '\\x1b[0m'\n    sys.exit(((fail + 'Error: {}'.format(message)) + end))", "docstring": "Throw an error with the given message and immediately quit.\n\nArgs:\nmessage(str): The message to display.", "source": "codesearchnet"}
{"code": "def __init__(self, axis=None):\n    super().__init__()\n    if axis is None:\n        axis = []\n    self._axis = axis", "docstring": "Initializes a Squeeze layer.\n\nArgs:\naxis: An optional list of ints. Defaults to []. If specified, only\nsqueezes the dimensions listed. The dimension index starts at 0. It is\nan error to squeeze a dimension that is not 1. Must be in the range\n[-rank(input), rank(input)). Must be specified if input is\na RaggedTensor.", "source": "github-repos"}
{"code": "def dispatch(self, msg):\n    logger.debug(f'Got message: {msg}')\n    futures = []\n    matched = False\n    for behaviour in (x for x in self.behaviours if x.match(msg)):\n        futures.append(self.submit(behaviour.enqueue(msg)))\n        logger.debug(f'Message enqueued to behaviour: {behaviour}')\n        self.traces.append(msg, category=str(behaviour))\n        matched = True\n    if (not matched):\n        logger.warning(f'No behaviour matched for message: {msg}')\n        self.traces.append(msg)\n    return futures", "docstring": "Dispatch the message to every behaviour that is waiting for\nit using their templates match.\n\nArgs:\nmsg (spade.message.Messagge): the message to dispatch.\n\nReturns:\nlist(asyncio.Future): a list of futures of the append of the message at each matched behaviour.", "source": "codesearchnet"}
{"code": "def _create_environment(config, outdir):\n  \n  if isinstance(config.env, str):\n    env = gym.make(config.env)\n  else:\n    env = config.env()\n  \n  \n  if not hasattr(env, 'spec'):\n    setattr(env, 'spec', getattr(env, 'spec', None))\n  if config.max_length:\n    env = tools.wrappers.LimitDuration(env, config.max_length)\n  env = gym.wrappers.Monitor(\n      env, outdir, lambda unused_episode_number: True)\n  if isinstance(env.action_space, gym.spaces.Box):\n    env = tools.wrappers.RangeNormalize(env)\n    env = tools.wrappers.ClipAction(env)\n  elif isinstance(env.action_space, gym.spaces.Discrete):\n    env = tools.wrappers.RangeNormalize(env, action=False)\n  else:\n    message = \"Unsupported action space '{}'\".format(type(env.action_space))\n    raise NotImplementedError(message)\n  env = tools.wrappers.ConvertTo32Bit(env)\n  env = tools.wrappers.CacheSpaces(env)\n  return env", "docstring": "Constructor for an instance of the environment.\n\nArgs:\nconfig: Object providing configurations via attributes.\noutdir: Directory to store videos in.\n\nRaises:\nNotImplementedError: For action spaces other than Box and Discrete.\n\nReturns:\nWrapped OpenAI Gym environment.", "source": "juraj-google-style"}
{"code": "def get_relevant_paths_and_versions(self, config: 'XLAConfigOptions'):\n    if self.ld_library_path is None:\n        self.ld_library_path = os.environ.get('LD_LIBRARY_PATH', None)\n    if config.host_compiler == HostCompiler.CLANG:\n        self.clang_path = _find_executable_or_die('clang', self.clang_path)\n        self.clang_major_version = self.clang_major_version or _get_clang_major_version(self.clang_path)\n        self.lld_path = self.lld_path or shutil.which('ld.lld')\n    elif config.host_compiler == HostCompiler.GCC:\n        self.gcc_path = _find_executable_or_die('gcc', self.gcc_path)\n        self.gcc_major_version = self.gcc_major_version or _get_gcc_major_version(self.gcc_path)\n    if config.backend == Backend.CUDA:\n        if config.cuda_compiler == CudaCompiler.CLANG:\n            self.clang_path = _find_executable_or_die('clang', self.clang_path)\n        if not self.cuda_compute_capabilities:\n            self.cuda_compute_capabilities = _get_cuda_compute_capabilities_or_die()", "docstring": "Gets paths and versions as needed by the config.\n\nArgs:\nconfig: XLAConfigOptions instance that determines what paths and versions\nto try to autoconfigure.", "source": "github-repos"}
{"code": "def run(self, resources):\n    hwman = resources['connection']\n    con = hwman.hwman.controller()\n    test_interface = con.test_interface()\n    try:\n        test_interface.synchronize_clock()\n        print(('Time currently set at %s' % test_interface.current_time_str()))\n    except:\n        raise ArgumentError('Error setting RTC time, check if controller actually has RTC or if iotile-support-lib-controller-3 is updated')", "docstring": "Sets the RTC timestamp to UTC.\n\nArgs:\nresources (dict): A dictionary containing the required resources that\nwe needed access to in order to perform this step.", "source": "codesearchnet"}
{"code": "def prefer_type(self, prefer, over):\n    self._write_lock.acquire()\n    try:\n        if self._preferred(preferred=over, over=prefer):\n            raise ValueError(('Type %r is already preferred over %r.' % (over, prefer)))\n        prefs = self._prefer_table.setdefault(prefer, set())\n        prefs.add(over)\n    finally:\n        self._write_lock.release()", "docstring": "Prefer one type over another type, all else being equivalent.\n\nWith abstract base classes (Python's abc module) it is possible for\na type to appear to be a subclass of another type without the supertype\nappearing in the subtype's MRO. As such, the supertype has no order\nwith respect to other supertypes, and this may lead to amguity if two\nimplementations are provided for unrelated abstract types.\n\nIn such cases, it is possible to disambiguate by explictly telling the\nfunction to prefer one type over the other.\n\nArguments:\nprefer: Preferred type (class).\nover: The type we don't like (class).\n\nRaises:\nValueError: In case of logical conflicts.", "source": "codesearchnet"}
{"code": "def from_stream(cls, stream):\n    fields = _magic_parser(stream, magic=cls.MAGIC)\n    if fields:\n        fields.pop('iter')\n        return cls(fields)\n    else:\n        return None", "docstring": "Read the first occurrence of ScfCycle from stream.\n\nReturns:\nNone if no `ScfCycle` entry is found.", "source": "codesearchnet"}
{"code": "def register(config_class, video_processor_class, exist_ok=False):\n    VIDEO_PROCESSOR_MAPPING.register(config_class, video_processor_class, exist_ok=exist_ok)", "docstring": "Register a new video processor for this class.\n\nArgs:\nconfig_class ([`PretrainedConfig`]):\nThe configuration corresponding to the model to register.\nvideo_processor_class ([`BaseVideoProcessor`]):\nThe video processor to register.", "source": "github-repos"}
{"code": "def step(self, action):\n    self._agent.act(action)\n    self._handle_command_buffer()\n    self._client.release()\n    self._client.acquire()\n    return self._get_single_state()", "docstring": "Supplies an action to the main agent and tells the environment to tick once.\nPrimary mode of interaction for single agent environments.\n\nArgs:\naction (np.ndarray): An action for the main agent to carry out on the next tick.\n\nReturns:\ntuple: The (state, reward, terminal, info) tuple for the agent. State is a dictionary\nfrom sensor enum (see :obj:`holodeck.sensors.Sensors`) to np.ndarray.\nReward is the float reward returned by the environment.\nTerminal is the bool terminal signal returned by the environment.\nInfo is any additional info, depending on the world. Defaults to None.", "source": "codesearchnet"}
{"code": "def _Inject(self, position, call):\n    self.EnsureGdbPosition(position[0], position[1], None)\n    self.ClearBreakpoints()\n    self._AddThreadSpecificBreakpoint(position)\n    gdb.parse_and_eval(('%s = 1' % GdbCache.PENDINGCALLS_TO_DO))\n    gdb.parse_and_eval(('%s = 1' % GdbCache.PENDINGBUSY))\n    try:\n        self.Continue(position)\n        if (not gdb.selected_thread().is_stopped()):\n            raise RuntimeError('Gdb is not acting as expected, is it being run in async mode?')\n    finally:\n        gdb.parse_and_eval(('%s = 0' % GdbCache.PENDINGBUSY))\n    self.Call(position, call)", "docstring": "Injects evaluation of 'call' in a safe location in the inferior.\n\nDue to the way these injected function calls work, gdb must not be killed\nuntil the call has returned. If that happens, the inferior will be sent\nSIGTRAP upon attempting to return from the dummy frame gdb constructs for\nus, and will most probably crash.\nArgs:\nposition: array of pid, tid, framedepth specifying the requested position.\ncall: Any expression gdb can evaluate. Usually a function call.\nRaises:\nRuntimeError: if gdb is not being run in synchronous exec mode.", "source": "codesearchnet"}
{"code": "def _construct_field_operator_expression_dict(expression_list):\n    between_operators = (u'<=', u'>=')\n    inverse_operator = {u'>=': u'<=', u'<=': u'>='}\n    local_field_to_expressions = {}\n    remaining_expression_list = deque([])\n    for expression in expression_list:\n        if all((isinstance(expression, BinaryComposition), (expression.operator in between_operators), (isinstance(expression.left, LocalField) or isinstance(expression.right, LocalField)))):\n            if isinstance(expression.right, LocalField):\n                new_operator = inverse_operator[expression.operator]\n                new_expression = BinaryComposition(new_operator, expression.right, expression.left)\n            else:\n                new_expression = expression\n            field_name = new_expression.left.field_name\n            expressions_dict = local_field_to_expressions.setdefault(field_name, {})\n            expressions_dict.setdefault(new_expression.operator, []).append(new_expression)\n        else:\n            remaining_expression_list.append(expression)\n    return (local_field_to_expressions, remaining_expression_list)", "docstring": "Construct a mapping from local fields to specified operators, and corresponding expressions.\n\nArgs:\nexpression_list: list of expressions to analyze\n\nReturns:\nlocal_field_to_expressions:\ndict mapping local field names to \"operator -> list of BinaryComposition\" dictionaries,\nfor each BinaryComposition operator involving the LocalField\nremaining_expression_list:\nlist of remaining expressions that were *not*\nBinaryCompositions on a LocalField using any of the between operators", "source": "codesearchnet"}
{"code": "def declaration_path(decl):\n    if (not decl):\n        return []\n    if (not decl.cache.declaration_path):\n        result = [decl.name]\n        parent = decl.parent\n        while parent:\n            if parent.cache.declaration_path:\n                result.reverse()\n                decl.cache.declaration_path = (parent.cache.declaration_path + result)\n                return decl.cache.declaration_path\n            else:\n                result.append(parent.name)\n                parent = parent.parent\n        result.reverse()\n        decl.cache.declaration_path = result\n        return result\n    return decl.cache.declaration_path", "docstring": "Returns a list of parent declarations names.\n\nArgs:\ndecl (declaration_t): declaration for which declaration path\nshould be calculated.\n\nReturns:\nlist[(str | basestring)]: list of names, where first item is the top\nparent name and last item the inputted\ndeclaration name.", "source": "codesearchnet"}
{"code": "def get_wells(self, uwis=None):\n        \n        if uwis is None:\n            return Project(self.__list)\n        return Project([w for w in self if w.uwi in uwis])", "docstring": "Returns a new Project with only the wells named by UWI.\n\nArgs:\nuwis (list): list or tuple of UWI strings.\n\nReturns:\nproject.", "source": "juraj-google-style"}
{"code": "def _CopyFromDateTimeValues(self, date_time_values):\n    \n    year = date_time_values.get('year', 0)\n    month = date_time_values.get('month', 0)\n    day_of_month = date_time_values.get('day_of_month', 0)\n    hours = date_time_values.get('hours', 0)\n    minutes = date_time_values.get('minutes', 0)\n    seconds = date_time_values.get('seconds', 0)\n    microseconds = date_time_values.get('microseconds', 0)\n\n    precision_helper = precisions.PrecisionHelperFactory.CreatePrecisionHelper(\n        self._precision)\n\n    fraction_of_second = precision_helper.CopyMicrosecondsToFractionOfSecond(\n        microseconds)\n\n    self._normalized_timestamp = None\n    self._number_of_seconds = self._GetNumberOfSecondsFromElements(\n        year, month, day_of_month, hours, minutes, seconds)\n    self._time_elements_tuple = (\n        year, month, day_of_month, hours, minutes, seconds)\n    self.fraction_of_second = fraction_of_second\n    self.is_local_time = False", "docstring": "Copies time elements from date and time values.\n\nArgs:\ndate_time_values  (dict[str, int]): date and time values, such as year,\nmonth, day of month, hours, minutes, seconds, microseconds.\n\nRaises:\nValueError: if no helper can be created for the current precision.", "source": "juraj-google-style"}
{"code": "def _CheckByteStreamSize(self, byte_stream, byte_offset, data_type_size):\n    try:\n        byte_stream_size = len(byte_stream)\n    except Exception as exception:\n        raise errors.MappingError(exception)\n    if ((byte_stream_size - byte_offset) < data_type_size):\n        raise errors.ByteStreamTooSmallError('Byte stream too small requested: {0:d} available: {1:d}'.format(data_type_size, byte_stream_size))", "docstring": "Checks if the byte stream is large enough for the data type.\n\nArgs:\nbyte_stream (bytes): byte stream.\nbyte_offset (int): offset into the byte stream where to start.\ndata_type_size (int): data type size.\n\nRaises:\nByteStreamTooSmallError: if the byte stream is too small.\nMappingError: if the size of the byte stream cannot be determined.", "source": "codesearchnet"}
{"code": "def splitEkmDate(dateint):\n    date_str = str(dateint)\n    dt = namedtuple('EkmDate', ['yy', 'mm', 'dd', 'weekday', 'hh', 'minutes', 'ss'])\n    if (len(date_str) != 14):\n        dt.yy = dt.mm = dt.dd = dt.weekday = dt.hh = dt.minutes = dt.ss = 0\n        return dt\n    dt.yy = int(date_str[0:2])\n    dt.mm = int(date_str[2:4])\n    dt.dd = int(date_str[4:6])\n    dt.weekday = int(date_str[6:8])\n    dt.hh = int(date_str[8:10])\n    dt.minutes = int(date_str[10:12])\n    dt.ss = int(date_str[12:14])\n    return dt", "docstring": "Break out a date from Omnimeter read.\n\nNote a corrupt date will raise an exception when you\nconvert it to int to hand to this method.\n\nArgs:\ndateint (int):  Omnimeter datetime as int.\n\nReturns:\ntuple: Named tuple which breaks out as followws:\n\n========== =====================\nyy         Last 2 digits of year\nmm         Month 1-12\ndd         Day 1-31\nweekday    Zero based weekday\nhh         Hour 0-23\nminutes    Minutes 0-59\nss         Seconds 0-59\n========== =====================", "source": "codesearchnet"}
{"code": "def render(self, data):\n        \n        renderers = {\n            \"text/csv\": self._render_as_csv,\n            \"text/html\": self._render_as_html,\n            None: self._render_as_html,\n        }\n        render = renderers[data.content_type]\n        return render(data)", "docstring": "Renders the reports based on data.content_type's value.\n\nArguments:\ndata (ReportViewRequestData): The report data. data.content_type\nis used to determine how the reports are rendered.\n\nReturns:\nHTTPResponse: The rendered version of the report.", "source": "juraj-google-style"}
{"code": "def get_hook(hook_name):\n    \n    if not pkg_resources.resource_exists(__name__, hook_name):\n        raise HookNotFoundError\n    return pkg_resources.resource_string(__name__, hook_name)", "docstring": "Returns the specified hook.\n\nArgs:\nhook_name (str)\n\nReturns:\nstr - (the content of) the hook\n\nRaises:\nHookNotFoundError", "source": "juraj-google-style"}
{"code": "def sheetNames(book=None):\n    \n    if book:\n        if not book.lower() in [x.lower() for x in bookNames()]:\n            return False\n    else:\n        book=activeBook()\n    if not book:\n        return False\n    poBook=PyOrigin.WorksheetPages(book)\n    if not len(poBook):\n        return None\n    return [x.GetName() for x in poBook.Layers()]", "docstring": "return sheet names of a book.\n\nArgs:\nbook (str, optional): If a book is given, pull names from\nthat book. Otherwise, try the active one\n\nReturns:\nlist of sheet names (typical case).\nNone if book has no sheets.\nFalse if book doesn't exlist.", "source": "juraj-google-style"}
{"code": "def snow_depth(self, value=999.0):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `snow_depth`'.format(value))\n    self._snow_depth = value", "docstring": "Corresponds to IDD Field `snow_depth`\n\nArgs:\nvalue (float): value for IDD Field `snow_depth`\nUnit: cm\nMissing value: 999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def write(self, data):\n    block_remaining = (_BLOCK_SIZE - (self.__position % _BLOCK_SIZE))\n    if (block_remaining < _HEADER_LENGTH):\n        self.__writer.write(('\\x00' * block_remaining))\n        self.__position += block_remaining\n        block_remaining = _BLOCK_SIZE\n    if (block_remaining < (len(data) + _HEADER_LENGTH)):\n        first_chunk = data[:(block_remaining - _HEADER_LENGTH)]\n        self.__write_record(_RECORD_TYPE_FIRST, first_chunk)\n        data = data[len(first_chunk):]\n        while True:\n            block_remaining = (_BLOCK_SIZE - (self.__position % _BLOCK_SIZE))\n            if (block_remaining >= (len(data) + _HEADER_LENGTH)):\n                self.__write_record(_RECORD_TYPE_LAST, data)\n                break\n            else:\n                chunk = data[:(block_remaining - _HEADER_LENGTH)]\n                self.__write_record(_RECORD_TYPE_MIDDLE, chunk)\n                data = data[len(chunk):]\n    else:\n        self.__write_record(_RECORD_TYPE_FULL, data)", "docstring": "Write single record.\n\nArgs:\ndata: record data to write as string, byte array or byte sequence.", "source": "codesearchnet"}
{"code": "def license():\n    from os.path import join\n    with open(join(__path__[0], 'LICENSE.txt')) as lic:\n        print(lic.read())", "docstring": "Print the Bokeh license to the console.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def create_view(operations, operation):\n    \n    operations.execute(\"CREATE VIEW %s AS %s\" % (\n        operation.target.name,\n        operation.target.sqltext\n    ))", "docstring": "Implements ``CREATE VIEW``.\n\nArgs:\noperations: instance of ``alembic.operations.base.Operations``\noperation: instance of :class:`.ReversibleOp`\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def has_apical_dendrite(neuron, min_number=1, treefun=_read_neurite_type):\n    types = [treefun(n) for n in neuron.neurites]\n    return CheckResult((types.count(NeuriteType.apical_dendrite) >= min_number))", "docstring": "Check if a neuron has apical dendrites\n\nArguments:\nneuron(Neuron): The neuron object to test\nmin_number: minimum number of apical dendrites required\ntreefun: Optional function to calculate the tree type of neuron's\nneurites\n\nReturns:\nCheckResult with result", "source": "codesearchnet"}
{"code": "def transcripts(self, build='37', hgnc_id=None):\n        \n        \n        query = {'build': build}\n        if hgnc_id:\n            query['hgnc_id'] = hgnc_id\n        \n        return self.transcript_collection.find(query)", "docstring": "Return all transcripts.\n\nIf a gene is specified return all transcripts for the gene\n\nArgs:\nbuild(str)\nhgnc_id(int)\n\nReturns:\niterable(transcript)", "source": "juraj-google-style"}
{"code": "def _bfd_multiplier(self, **kwargs):\n        \n        method_name = 'rbridge_id_router_router_bgp_router_bgp_attributes_' \\\n                      'bfd_interval_multiplier'\n        bfd_multiplier = getattr(self._rbridge, method_name)\n        config = bfd_multiplier(**kwargs)\n        if kwargs['delete']:\n            tag = 'multiplier'\n            config.find('.\n        return config", "docstring": "Return the BFD multiplier XML.\n\nYou should not use this method.\nYou probably want `BGP.bfd`.\n\nArgs:\nmin_tx (str): BFD transmit interval in milliseconds (300, 500, etc)\ndelete (bool): Remove the configuration if ``True``.\n\nReturns:\nXML to be passed to the switch.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def content(self):\n    as_text = (self.content_type in _content_types.UTF8_TYPES)\n    return self.get_data(as_text=as_text)", "docstring": "The request incoming data.\n\nIt automatic decodes from utf-8\n\nReturns:\n(obj): incoming data", "source": "codesearchnet"}
{"code": "def product_name(self):\n    buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n    self._dll.JLINKARM_EMU_GetProductName(buf, self.MAX_BUF_SIZE)\n    return ctypes.string_at(buf).decode()", "docstring": "Returns the product name of the connected J-Link.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nProduct name.", "source": "codesearchnet"}
{"code": "def chain_part_functions(fns: Sequence[PartFn], match_fns: Sequence[MatchFn] | None=None) -> PartFn:\n    return functools.partial(_chain_part_functions, _to_tuple_fns(fns, match_fns))", "docstring": "Chain the `fns` and execute them concurrently.\n\nSee file comment.\n\nArgs:\nfns: sequence of part functions to chain.\nmatch_fns: sequence of functions that return True if the part should be\nprocessed by the part function. When the part should not be processed, the\npart function will not be called and the part will be passed as is. When\nmatch_fns is not provided, all parts are processed by default.\n\nReturns:\nPart function that is a chain of the provided Sequence of functions.\n\nRaises:\nValueError: if the length of fns and match_fns is not the same (when\nmatch_fns is provided).", "source": "github-repos"}
{"code": "def get_structure_by_formula(self, formula, **kwargs):\n    structures = []\n    sql = ('select file, sg from data where formula=\"- %s -\"' % Composition(formula).hill_formula)\n    text = self.query(sql).split('\\n')\n    text.pop(0)\n    for l in text:\n        if l.strip():\n            (cod_id, sg) = l.split('\\t')\n            r = requests.get(('http:\n            try:\n                s = Structure.from_str(r.text, fmt='cif', **kwargs)\n                structures.append({'structure': s, 'cod_id': int(cod_id), 'sg': sg})\n            except Exception:\n                import warnings\n                warnings.warn(('\\nStructure.from_str failed while parsing CIF file:\\n%s' % r.text))\n                raise\n    return structures", "docstring": "Queries the COD for structures by formula. Requires mysql executable to\nbe in the path.\n\nArgs:\ncod_id (int): COD id.\nkwargs: All kwargs supported by\n:func:`pymatgen.core.structure.Structure.from_str`.\n\nReturns:\nA list of dict of the format\n[{\"structure\": Structure, \"cod_id\": cod_id, \"sg\": \"P n m a\"}]", "source": "codesearchnet"}
{"code": "def add_maps(self, parent, root_path=''):\n    for mapsource in self.map_folders[root_path]['maps']:\n        parent.append(self.get_network_link(mapsource))\n    for folder in self.map_folders[root_path]['folders']:\n        kml_folder_obj = kml_folder(folder)\n        parent.append(kml_folder_obj)\n        self.add_maps(parent=kml_folder_obj, root_path=F_SEP.join((root_path, folder)))", "docstring": "Recursively add maps in a folder hierarchy.\n\nArgs:\nparent (KMLElement): KMLElement to which we want to append child folders or maps respectively\nroot_path (str): path of 'parent'", "source": "codesearchnet"}
{"code": "def __init__(self, backend_wsgi_app, config_manager=None):\n    \n    if config_manager is None:\n      config_manager = api_config_manager.ApiConfigManager()\n    self.config_manager = config_manager\n\n    self._backend = backend_wsgi_app\n    self._dispatchers = []\n    for base_path in self._backend.base_paths:\n      self._add_dispatcher('%sexplorer/?$' % base_path,\n                           self.handle_api_explorer_request)\n      self._add_dispatcher('%sstatic/.*$' % base_path,\n                           self.handle_api_static_request)\n\n    \n    api_config_response = self.get_api_configs()\n    if api_config_response:\n      self.config_manager.process_api_config_response(api_config_response)\n    else:\n      raise api_exceptions.ApiConfigurationError('get_api_configs() returned no configs')", "docstring": "Constructor for EndpointsDispatcherMiddleware.\n\nArgs:\nbackend_wsgi_app: A WSGI server that serves the app's endpoints.\nconfig_manager: An ApiConfigManager instance that allows a caller to\nset up an existing configuration for testing.", "source": "juraj-google-style"}
{"code": "def es_get_class_defs(cls_def, cls_name):\n    rtn_dict = {key: value for (key, value) in cls_def.items() if key.startswith('kds_es')}\n    for key in rtn_dict:\n        del cls_def[key]\n    return rtn_dict", "docstring": "Reads through the class defs and gets the related es class\ndefintions\n\nArgs:\n-----\nclass_defs: RdfDataset of class definitions", "source": "codesearchnet"}
{"code": "def _buckets(data, bucket_count=None):\n    import tensorflow.compat.v1 as tf\n    if (bucket_count is None):\n        bucket_count = summary_v2.DEFAULT_BUCKET_COUNT\n    with tf.name_scope('buckets', values=[data, bucket_count]), tf.control_dependencies([tf.assert_scalar(bucket_count), tf.assert_type(bucket_count, tf.int32)]):\n        data = tf.reshape(data, shape=[(- 1)])\n        data = tf.cast(data, tf.float64)\n        is_empty = tf.equal(tf.size(input=data), 0)\n\n        def when_empty():\n            return tf.constant([], shape=(0, 3), dtype=tf.float64)\n\n        def when_nonempty():\n            min_ = tf.reduce_min(input_tensor=data)\n            max_ = tf.reduce_max(input_tensor=data)\n            range_ = (max_ - min_)\n            is_singular = tf.equal(range_, 0)\n\n            def when_nonsingular():\n                bucket_width = (range_ / tf.cast(bucket_count, tf.float64))\n                offsets = (data - min_)\n                bucket_indices = tf.cast(tf.floor((offsets / bucket_width)), dtype=tf.int32)\n                clamped_indices = tf.minimum(bucket_indices, (bucket_count - 1))\n                one_hots = tf.one_hot(clamped_indices, depth=bucket_count)\n                bucket_counts = tf.cast(tf.reduce_sum(input_tensor=one_hots, axis=0), dtype=tf.float64)\n                edges = tf.linspace(min_, max_, (bucket_count + 1))\n                left_edges = edges[:(- 1)]\n                right_edges = edges[1:]\n                return tf.transpose(a=tf.stack([left_edges, right_edges, bucket_counts]))\n\n            def when_singular():\n                center = min_\n                bucket_starts = tf.stack([(center - 0.5)])\n                bucket_ends = tf.stack([(center + 0.5)])\n                bucket_counts = tf.stack([tf.cast(tf.size(input=data), tf.float64)])\n                return tf.transpose(a=tf.stack([bucket_starts, bucket_ends, bucket_counts]))\n            return tf.cond(is_singular, when_singular, when_nonsingular)\n        return tf.cond(is_empty, when_empty, when_nonempty)", "docstring": "Create a TensorFlow op to group data into histogram buckets.\n\nArguments:\ndata: A `Tensor` of any shape. Must be castable to `float64`.\nbucket_count: Optional positive `int` or scalar `int32` `Tensor`.\nReturns:\nA `Tensor` of shape `[k, 3]` and type `float64`. The `i`th row is\na triple `[left_edge, right_edge, count]` for a single bucket.\nThe value of `k` is either `bucket_count` or `1` or `0`.", "source": "codesearchnet"}
{"code": "def line_on_device(\n        device: 'cirq.google.XmonDevice',\n        length: int,\n        method: LinePlacementStrategy = greedy.GreedySequenceSearchStrategy()\n) -> GridQubitLineTuple:\n    \n    return method.place_line(device, length)", "docstring": "Searches for linear sequence of qubits on device.\n\nArgs:\ndevice: Google Xmon device instance.\nlength: Desired number of qubits making up the line.\nmethod: Line placement method. Defaults to\ncirq.greedy.GreedySequenceSearchMethod.\n\nReturns:\nLine sequences search results.", "source": "juraj-google-style"}
{"code": "def ResolveSubjectDestination(subject, regexes):\n  \n  components = Components(subject)\n  if not components:\n    \n    return \"aff4\", \"\"\n  \n  path = utils.JoinPath(*[ConvertStringToFilename(x) for x in components])\n  for route in regexes:\n    m = route.match(path)\n    if m:\n      value = m.group(\"path\")\n      if value:\n        base = os.path.basename(value)\n        dirname = os.path.dirname(value)\n        return base, dirname\n  \n  return \"aff4\", \"\"", "docstring": "Returns the directory/filename where the subject will be stored.\n\nArgs:\nsubject: The subject.\nregexes: The list of regular expressions by priority.\n\nReturns:\nFile name and directory.", "source": "juraj-google-style"}
{"code": "def resize_annotation(self, annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float=0.5, interpolation: 'F.InterpolationMode'=None):\n    interpolation = interpolation if interpolation is not None else F.InterpolationMode.NEAREST\n    ratio_height, ratio_width = [target / orig for target, orig in zip(target_size, orig_size)]\n    new_annotation = {}\n    new_annotation['size'] = target_size\n    for key, value in annotation.items():\n        if key == 'boxes':\n            boxes = value\n            scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height], dtype=torch.float32, device=boxes.device)\n            new_annotation['boxes'] = scaled_boxes\n        elif key == 'area':\n            area = value\n            scaled_area = area * (ratio_width * ratio_height)\n            new_annotation['area'] = scaled_area\n        elif key == 'masks':\n            masks = value[:, None]\n            masks = [F.resize(mask, target_size, interpolation=interpolation) for mask in masks]\n            masks = torch.stack(masks).to(torch.float32)\n            masks = masks[:, 0] > threshold\n            new_annotation['masks'] = masks\n        elif key == 'size':\n            new_annotation['size'] = target_size\n        else:\n            new_annotation[key] = value\n    return new_annotation", "docstring": "Resizes an annotation to a target size.\n\nArgs:\nannotation (`Dict[str, Any]`):\nThe annotation dictionary.\norig_size (`Tuple[int, int]`):\nThe original size of the input image.\ntarget_size (`Tuple[int, int]`):\nThe target size of the image, as returned by the preprocessing `resize` step.\nthreshold (`float`, *optional*, defaults to 0.5):\nThe threshold used to binarize the segmentation masks.\nresample (`InterpolationMode`, defaults to `InterpolationMode.NEAREST`):\nThe resampling filter to use when resizing the masks.", "source": "github-repos"}
{"code": "def range(self, location, distance):\n        \n        return (segment.range(location, distance) for segment in self)", "docstring": "Test whether locations are within a given range of ``location``.\n\nArgs:\nlocation (Point): Location to test range against\ndistance (float): Distance to test location is within\n\nReturns:\nlist of list of Point: Groups of points in range per segment", "source": "juraj-google-style"}
{"code": "def start_apppool(name):\n    ps_cmd = ['Start-WebAppPool', \"'{0}'\".format(name)]\n    cmd_ret = _srvmgr(ps_cmd)\n    return (cmd_ret['retcode'] == 0)", "docstring": "Start an IIS application pool.\n\n.. versionadded:: 2017.7.0\n\nArgs:\nname (str): The name of the App Pool to start.\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.start_apppool name='MyTestPool'", "source": "codesearchnet"}
{"code": "def __init__(self, shape=None, dtype=dtypes.float32):\n    self._shape = tensor_shape.as_shape(shape)\n    self._dtype = dtypes.as_dtype(dtype)", "docstring": "Constructs a type specification for a `tf.sparse.SparseTensor`.\n\nArgs:\nshape: The dense shape of the `SparseTensor`, or `None` to allow any dense\nshape.\ndtype: `tf.DType` of values in the `SparseTensor`.", "source": "github-repos"}
{"code": "def parse_request() -> Dict[(str, str)]:\n    in_lines = sys.stdin.readlines()\n    LOGGER.debug('Received request \"%s\"', in_lines)\n    request = {}\n    for line in in_lines:\n        if (not line.strip()):\n            continue\n        parts = line.split('=', 1)\n        assert (len(parts) == 2)\n        request[parts[0].strip()] = parts[1].strip()\n    return request", "docstring": "Parse the request of the git credential API from stdin.\n\nReturns:\nA dictionary with all key-value pairs of the request", "source": "codesearchnet"}
{"code": "def write_markdown_to_file(self, f):\n    \n    print(\"---\", file=f)\n    print(\"---\", file=f)\n    print(\"<!-- This file is machine generated: DO NOT EDIT! -->\", file=f)\n    print(\"\", file=f)\n    \n    \n    print(\"\n    if self._prefix:\n      print(self._prefix, file=f)\n    print(\"[TOC]\", file=f)\n    print(\"\", file=f)\n    if self._module is not None:\n      self._write_module_markdown_to_file(f, self._module)", "docstring": "Prints this library to file `f`.\n\nArgs:\nf: File to write to.\n\nReturns:\nDictionary of documented members.", "source": "juraj-google-style"}
{"code": "def register_model(self, *fields, **kw):\n    \n\n    index         = PonyWhooshIndex(pw=self)\n    index._kw     = kw\n    index._fields = fields\n\n    def inner(model):\n      \n\n      index._name = model._table_\n      if not index._name:\n        index._name  = model.__name__\n\n      self._entities[index._name]     = model\n      index._schema_attrs             = {}\n      index._primary_key_is_composite = model._pk_is_composite_\n      index._primary_key              = [f.name for f in model._pk_attrs_]\n      index._primary_key_type         = 'list'\n      type_attribute                  = {}\n\n      for field in model._attrs_:\n        if field.is_relation:\n          continue\n\n        assert hasattr(field, \"name\") and hasattr(field, \"py_type\")\n\n        fname = field.name\n        if hasattr(field.name, \"__name__\"):\n            fname = field.name.__name__\n\n        stored = kw.get(\"stored\", False)\n        if fname in index._primary_key:\n            kw[\"stored\"] = True\n        \n        ftype = field.py_type.__name__\n        if ftype in ['date', 'datetime', 'datetime.date']:\n            kw[\"stored\"] = stored\n            continue\n\n        fwhoosh = fwhoosh = whoosh.fields.TEXT(**kw)\n\n        if field == model._pk_:\n            index._primary_key_type = ftype\n            fwhoosh = whoosh.fields.ID(stored=True, unique=True)\n\n        if fname in index._fields:\n          if not field.is_string:\n            if ftype in ['int', 'float']:\n              fwhoosh = whoosh.fields.NUMERIC(**kw)\n            elif ftype == 'bool':\n              fwhoosh = whoosh.fields.BOOLEAN(stored=True)\n\n        type_attribute[fname]      = ftype\n        index._schema_attrs[fname] = fwhoosh\n        kw[\"stored\"]               = stored\n\n      index._schema = whoosh.fields.Schema(**index._schema_attrs)\n\n      self.register_index(index)\n\n      def _middle_save_(obj, status):\n        \n\n        writer   = index._whoosh.writer(timeout=self.writer_timeout)\n        dict_obj = obj.to_dict()\n\n        def dumps(v):\n          if sys.version_info[0] < 3:\n            if isinstance(v, int):\n              return unicode(v)\n            if isinstance(v, float):\n              return '%.9f' % v\n            return unicode(v)\n          else:\n            if isinstance(v, int):\n              return str(v)\n            if isinstance(v, float):\n              return int(float(v))\n            return str(v)\n\n        attrs = {}\n        if sys.version_info[0] < 3:\n          for k, v in dict_obj.iteritems():\n            if k in index._schema_attrs.keys():\n              attrs[k] = dumps(v)\n        else:\n          for k, v in dict_obj.items():\n            if k in list(index._schema_attrs.keys()):\n              attrs[k] = dumps(v)\n\n        if status == 'inserted':\n          writer.add_document(**attrs)\n        elif status == 'updated':\n          writer.update_document(**attrs)\n        elif status in set(['marked_to_delete', 'deleted', 'cancelled']):\n          writer.delete_by_term(primary, attrs[primary])\n\n        writer.commit()\n        return obj._after_save_\n\n      index._model       = model\n      model._after_save_ = _middle_save_\n      model._pw_index_   = index\n      model.search       =  model._pw_index_.search\n      return model\n    return inner", "docstring": "Registers a single model for fulltext search. This basically creates\na simple PonyWhoosh.Index for the model and calls self.register_index on it.\n\nArgs:\n*fields: all the fields indexed from the model.\n**kw: The options for each field, sortedby, stored ...", "source": "juraj-google-style"}
{"code": "def select(self, index_or_name: Union[int, str, List[str]]) -> Union[int, str]:\n    selected_name = index_or_name if isinstance(index_or_name, str) else None\n    index = -1\n    if isinstance(index_or_name, list):\n        for name in index_or_name:\n            index = self.indexof(name)\n            if index != -1:\n                selected_name = name\n                break\n    else:\n        index = self.indexof(index_or_name)\n    if index == -1:\n        raise ValueError(f'Tab not found: {index_or_name!r}')\n    self._sync_members(selected=index)\n    self._run_javascript(f\"\\n        const tabButtons = document.querySelectorAll('\n    return selected_name or index", "docstring": "Selects a tab identified by an index or name.\n\nArgs:\nindex_or_name: The index or name of the tab to select. If a list of names\nis provided, the first name in the list that is found will be selected.\n\nReturns:\nThe index (if the index was provided) or name of the selected tab.", "source": "github-repos"}
{"code": "def geojson_polygon_to_mask(feature, shape, lat_idx, lon_idx):\n    \n    import matplotlib\n\n    \n    matplotlib.use('agg')\n\n    import matplotlib.pyplot as plt\n    from matplotlib import patches\n    import numpy as np\n\n    \n    if feature.geometry.type not in ('Polygon', 'MultiPolygon'):\n        raise ValueError(\"Cannot handle feature of type \" + feature.geometry.type)\n\n    \n    dpi = 100\n\n    \n    \n    fig = plt.figure(frameon=False, dpi=dpi, )\n    fig.set_size_inches(shape[1] / float(dpi), shape[0] / float(dpi))\n    ax = plt.Axes(fig, [0., 0., 1., 1.])\n    ax.set_axis_off()\n    \n    ax.set_xlim([0, shape[1]])\n    \n    ax.set_ylim([0, shape[0]])\n    fig.add_axes(ax)\n    \n\n    \n    if feature.geometry.type == 'Polygon':\n        coords = [feature.geometry.coordinates]\n    else:\n        coords = feature.geometry.coordinates\n\n    for poly_coords in coords:\n        \n        \n        for i, outline in enumerate(poly_coords):\n            \n            \n            value = 0. if i == 0 else 1.\n\n            \n            outline = np.array(outline)\n            xs = lon_idx(outline[:, 0])\n            ys = lat_idx(outline[:, 1])\n\n            \n            poly = patches.Polygon(list(zip(xs, ys)),\n                                   facecolor=(value, value, value),\n                                   edgecolor='none',\n                                   antialiased=True)\n            ax.add_patch(poly)\n\n    \n    \n    fig.canvas.draw()\n    data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n    \n    data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))[:, :, 0]\n    \n\n    \n    assert data.shape[0] == shape[0]\n    assert data.shape[1] == shape[1]\n\n    \n    data = 1. - data.astype(float) / 255.  \n\n    \n    data = data[::-1, :]\n\n    \n    plt.close('all')\n\n    return data", "docstring": "Convert a GeoJSON polygon feature to a numpy array\n\nArgs:\nfeature (pygeoj.Feature): polygon feature to draw\nshape (tuple(int, int)): shape of 2D target numpy array to draw polygon in\nlat_idx (func): function converting a latitude to the (fractional) row index in the map\nlon_idx (func): function converting a longitude to the (fractional) column index in the map\n\nReturns:\nnp.array: mask, background is zero, foreground is one", "source": "juraj-google-style"}
{"code": "def set_headline(self, level, message, timestamp=None, now_reference=None):\n        \n\n        if self.headline is not None and self.headline.message == message:\n            self.headline.created = monotonic()\n            self.headline.count += 1\n            return\n\n        msg_object = ServiceMessage(level, message, self._last_message_id, timestamp, now_reference)\n        self.headline = msg_object\n        self._last_message_id += 1", "docstring": "Set the persistent headline message for this service.\n\nArgs:\nlevel (int): The level of the message (info, warning, error)\nmessage (string): The message contents\ntimestamp (float): An optional monotonic value in seconds for when the message was created\nnow_reference (float): If timestamp is not relative to monotonic() as called from this\nmodule then this should be now() as seen by whoever created the timestamp.", "source": "juraj-google-style"}
{"code": "def set_description(self, vrf_name, description=None, default=False,\n                        disable=False):\n        \n        cmds = self.command_builder('description', value=description,\n                                    default=default, disable=disable)\n        return self.configure_vrf(vrf_name, cmds)", "docstring": "Configures the VRF description\n\nArgs:\nvrf_name (str): The VRF name to configure\ndescription(str): The string to set the vrf description to\ndefault (bool): Configures the vrf description to its default value\ndisable (bool): Negates the vrf description\n\nReturns:\nTrue if the operation was successful otherwise False", "source": "juraj-google-style"}
{"code": "def expected_error(self, expected: str) -> str:\n        \n\n        if self.finished:\n            return 'Expected {} but found end of source'.format(expected)\n        else:\n            return 'Expected {} but found {} at index {}'.format(expected, self.next_token(), self.position)", "docstring": "Generate a basic error to include the current state.\n\nA parser can supply only a representation of what it is expecting to\nthis method and the reader will provide the context, including the index\nto the error.\n\nArgs:\nexpected: A representation of what the parser is currently expecting\n\nReturns:\nA full error message", "source": "juraj-google-style"}
{"code": "def find_modules(module_path):\n    \n    if module_path.is_file():\n        if module_path.suffix == '.py':\n            yield module_path\n    elif module_path.is_dir():\n        pyfiles = glob.glob('{}*.py'.format(module_path), recursive=True)\n        yield from (Path(pyfile) for pyfile in pyfiles)", "docstring": "Find all modules in the module (possibly package) represented by `module_path`.\n\nArgs:\nmodule_path: A pathlib.Path to a Python package or module.\n\nReturns: An iterable of paths Python modules (i.e. *py files).", "source": "juraj-google-style"}
{"code": "def find_matching_model_files(check_all: bool=False):\n    module_diff_files = None\n    if not check_all:\n        module_diff_files = set()\n        repo = Repo(PATH_TO_REPO)\n        for modified_file_diff in repo.index.diff(None):\n            if modified_file_diff.a_path.startswith('src/transformers'):\n                module_diff_files.add(os.path.join(PATH_TO_REPO, modified_file_diff.a_path))\n        for modified_file_diff in repo.index.diff(repo.refs.main.commit):\n            if modified_file_diff.a_path.startswith('src/transformers'):\n                module_diff_files.add(os.path.join(PATH_TO_REPO, modified_file_diff.a_path))\n        if len(module_diff_files) == 0:\n            return None\n    modeling_glob_pattern = os.path.join(PATH_TO_TRANSFORMERS, 'modelsmodeling_**')\n    potential_files = glob.glob(modeling_glob_pattern)\n    image_processing_glob_pattern = os.path.join(PATH_TO_TRANSFORMERS, 'modelsimage_processing_*_fast.py')\n    potential_files += glob.glob(image_processing_glob_pattern)\n    exclude_substrings = ['modeling_tf_', 'modeling_flax_']\n    matching_files = []\n    for file_path in potential_files:\n        if os.path.isfile(file_path):\n            filename = os.path.basename(file_path)\n            is_excluded = any((exclude in filename for exclude in exclude_substrings))\n            if not is_excluded:\n                matching_files.append(file_path)\n    if not check_all:\n        matching_files = sorted([file for file in matching_files if file in module_diff_files])\n    print('    Checking auto_docstrings in the following files:' + '\\n    - ' + '\\n    - '.join(matching_files))\n    return matching_files", "docstring": "Find all model files in the transformers repo that should be checked for @auto_docstring,\nexcluding files with certain substrings.\nReturns:\nList of file paths.", "source": "github-repos"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: Optional[torch.Tensor]=None, output_attentions: bool=False):\n    residual = hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, object_queries=object_queries, output_attentions=output_attentions)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    if self.training:\n        if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():\n            clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n            hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`): attention mask of size\n`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative\nvalues.\nobject_queries (`torch.FloatTensor`, *optional*): object queries, to be added to hidden_states.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def __init__(self, coder=coders.BytesCoder(), compression_type=CompressionTypes.AUTO, with_filename=False):\n    super().__init__()\n    source_from_file = partial(_create_tfrecordio_source, compression_type=compression_type, coder=coder)\n    self._read_all_files = ReadAllFiles(splittable=False, compression_type=compression_type, desired_bundle_size=0, min_bundle_size=0, source_from_file=source_from_file, with_filename=with_filename)", "docstring": "Initialize the ``ReadAllFromTFRecord`` transform.\n\nArgs:\ncoder: Coder used to decode each record.\ncompression_type: Used to handle compressed input files. Default value\nis CompressionTypes.AUTO, in which case the file_path's extension will\nbe used to detect the compression.\nwith_filename: If True, returns a Key Value with the key being the file\nname and the value being the actual data. If False, it only returns\nthe data.", "source": "github-repos"}
{"code": "def _set_textarea(el, value):\n    if isinstance(value, dict):\n        el.text = value['val']\n    elif (type(value) in [list, tuple]):\n        el.text = '\\n\\n'.join((('-- %s --\\n%s' % (item['source'], item['val'])) for item in value))\n    else:\n        el.text = value", "docstring": "Set content of given textarea element `el` to `value`.\n\nArgs:\nel (obj): Reference to textarea element you wish to set.\nvalue (obj/list): Value to which the `el` will be set.", "source": "codesearchnet"}
{"code": "def get_input(self, name, ds):\n    columns = self.inputs.get(name)\n    df = ds.get_dataframe()\n    for column in columns:\n        if (column not in df.columns):\n            df[column] = self.defaults.get(column)\n    return df[columns]", "docstring": "Retrieves the content of an input given a DataSource. The input acts like a filter over the outputs of the DataSource.\n\nArgs:\nname (str): The name of the input.\nds (openflow.DataSource): The DataSource that will feed the data.\n\nReturns:\npandas.DataFrame: The content of the input.", "source": "codesearchnet"}
{"code": "def zeros(shape, dtype=None):\n    return backend.numpy.zeros(shape, dtype=dtype)", "docstring": "Return a new tensor of given shape and type, filled with zeros.\n\nArgs:\nshape: Shape of the new tensor.\ndtype: Desired data type of the tensor.\n\nReturns:\nTensor of zeros with the given shape and dtype.", "source": "github-repos"}
{"code": "def try_storage(self, identifier, req, resp, resource, uri_kwargs):\n        \n        if identifier is None:\n            user = None\n\n        \n        \n        elif self.user_storage is not None:\n            user = self.user_storage.get_user(\n                self, identifier, req, resp, resource, uri_kwargs\n            )\n\n        \n        \n        \n        \n        elif self.user_storage is None and not self.only_with_storage:\n            user = {\n                'identified_with': self,\n                'identifier': identifier\n            }\n\n        else:  \n            \n            \n            user = None\n\n        return user", "docstring": "Try to find user in configured user storage object.\n\nArgs:\nidentifier: User identifier.\n\nReturns:\nuser object.", "source": "juraj-google-style"}
{"code": "def __init__(self, value=None):\n        \n        super(QueryFunction, self).__init__(\n            QueryFunctionEnum, value, Tags.QUERY_FUNCTION)", "docstring": "Construct a QueryFunction object.\n\nArgs:\nvalue (QueryFunction enum): A QueryFunction enumeration value,\n(e.g., QueryFunction.QUERY_OPERATIONS). Optional, default to\nNone.", "source": "juraj-google-style"}
{"code": "def compare_modules(file_, imports):\n    \n    modules = parse_requirements(file_)\n\n    imports = [imports[i][\"name\"] for i in range(len(imports))]\n    modules = [modules[i][\"name\"] for i in range(len(modules))]\n    modules_not_imported = set(modules) - set(imports)\n\n    return modules_not_imported", "docstring": "Compare modules in a file to imported modules in a project.\n\nArgs:\nfile_ (str): File to parse for modules to be compared.\nimports (tuple): Modules being imported in the project.\n\nReturns:\ntuple: The modules not imported in the project, but do exist in the\nspecified file.", "source": "juraj-google-style"}
{"code": "def load_text_file(self, filename, encoding='utf-8', tokenizer=None):\n    with load_file(filename, encoding=encoding) as data:\n        self.load_text(data, tokenizer)", "docstring": "Load in a text file from which to generate a word frequency list\n\nArgs:\nfilename (str): The filepath to the text file to be loaded\nencoding (str): The encoding of the text file\ntokenizer (function): The function to use to tokenize a string", "source": "codesearchnet"}
{"code": "def _ReadFileEntry(self, file_object, file_offset):\n    \n    if self.file_format == 'bin-big-endian':\n      data_type_map = self._CPIO_BINARY_BIG_ENDIAN_FILE_ENTRY\n      file_entry_data_size = self._CPIO_BINARY_BIG_ENDIAN_FILE_ENTRY_SIZE\n    elif self.file_format == 'bin-little-endian':\n      data_type_map = self._CPIO_BINARY_LITTLE_ENDIAN_FILE_ENTRY\n      file_entry_data_size = self._CPIO_BINARY_LITTLE_ENDIAN_FILE_ENTRY_SIZE\n    elif self.file_format == 'odc':\n      data_type_map = self._CPIO_PORTABLE_ASCII_FILE_ENTRY\n      file_entry_data_size = self._CPIO_PORTABLE_ASCII_FILE_ENTRY_SIZE\n    elif self.file_format in ('crc', 'newc'):\n      data_type_map = self._CPIO_NEW_ASCII_FILE_ENTRY\n      file_entry_data_size = self._CPIO_NEW_ASCII_FILE_ENTRY_SIZE\n\n    file_entry = self._ReadStructure(\n        file_object, file_offset, file_entry_data_size, data_type_map,\n        'file entry')\n\n    file_offset += file_entry_data_size\n\n    if self.file_format in ('bin-big-endian', 'bin-little-endian'):\n      file_entry.modification_time = (\n          (file_entry.modification_time.upper << 16) |\n          file_entry.modification_time.lower)\n\n      file_entry.file_size = (\n          (file_entry.file_size.upper << 16) | file_entry.file_size.lower)\n\n    if self.file_format == 'odc':\n      for attribute_name in self._CPIO_ATTRIBUTE_NAMES_ODC:\n        value = getattr(file_entry, attribute_name, None)\n        try:\n          value = int(value, 8)\n        except ValueError:\n          raise errors.FileFormatError(\n              'Unable to convert attribute: {0:s} into an integer'.format(\n                  attribute_name))\n\n        value = setattr(file_entry, attribute_name, value)\n\n    elif self.file_format in ('crc', 'newc'):\n      for attribute_name in self._CPIO_ATTRIBUTE_NAMES_CRC:\n        value = getattr(file_entry, attribute_name, None)\n        try:\n          value = int(value, 16)\n        except ValueError:\n          raise errors.FileFormatError(\n              'Unable to convert attribute: {0:s} into an integer'.format(\n                  attribute_name))\n\n        value = setattr(file_entry, attribute_name, value)\n\n    path_data = file_object.read(file_entry.path_size)\n\n    file_offset += file_entry.path_size\n\n    \n    path = path_data.decode('ascii')\n    path, _, _ = path.partition('\\x00')\n\n    if self.file_format in ('bin-big-endian', 'bin-little-endian'):\n      padding_size = file_offset % 2\n      if padding_size > 0:\n        padding_size = 2 - padding_size\n\n    elif self.file_format == 'odc':\n      padding_size = 0\n\n    elif self.file_format in ('crc', 'newc'):\n      padding_size = file_offset % 4\n      if padding_size > 0:\n        padding_size = 4 - padding_size\n\n    file_offset += padding_size\n\n    archive_file_entry = CPIOArchiveFileEntry()\n\n    archive_file_entry.data_offset = file_offset\n    archive_file_entry.data_size = file_entry.file_size\n    archive_file_entry.group_identifier = file_entry.group_identifier\n    archive_file_entry.inode_number = file_entry.inode_number\n    archive_file_entry.modification_time = file_entry.modification_time\n    archive_file_entry.path = path\n    archive_file_entry.mode = file_entry.mode\n    archive_file_entry.size = (\n        file_entry_data_size + file_entry.path_size + padding_size +\n        file_entry.file_size)\n    archive_file_entry.user_identifier = file_entry.user_identifier\n\n    file_offset += file_entry.file_size\n\n    if self.file_format in ('bin-big-endian', 'bin-little-endian'):\n      padding_size = file_offset % 2\n      if padding_size > 0:\n        padding_size = 2 - padding_size\n\n    elif self.file_format == 'odc':\n      padding_size = 0\n\n    elif self.file_format in ('crc', 'newc'):\n      padding_size = file_offset % 4\n      if padding_size > 0:\n        padding_size = 4 - padding_size\n\n    if padding_size > 0:\n      archive_file_entry.size += padding_size\n\n    return archive_file_entry", "docstring": "Reads a file entry.\n\nArgs:\nfile_object (FileIO): file-like object.\nfile_offset (int): offset of the data relative from the start of\nthe file-like object.\n\nReturns:\nCPIOArchiveFileEntry: a file entry.\n\nRaises:\nFileFormatError: if the file entry cannot be read.", "source": "juraj-google-style"}
{"code": "def random_masking(self, sequence: tf.Tensor, noise: tf.Tensor | None=None):\n    batch_size, seq_length, dim = shape_list(sequence)\n    len_keep = int(seq_length * (1 - self.config.mask_ratio))\n    if noise is None:\n        noise = tf.random.uniform(shape=(batch_size, seq_length), minval=0.0, maxval=1.0)\n    ids_shuffle = tf.argsort(noise, axis=1)\n    ids_restore = tf.argsort(ids_shuffle, axis=1)\n    ids_keep = ids_shuffle[:, :len_keep]\n    sequence_unmasked = tf.gather(sequence, axis=1, batch_dims=1, indices=ids_keep)\n    mask_keep = tf.zeros((batch_size, len_keep))\n    mask_remove = tf.ones((batch_size, seq_length - len_keep))\n    mask = tf.concat([mask_keep, mask_remove], axis=-1)\n    mask = tf.gather(mask, axis=1, batch_dims=1, indices=ids_restore)\n    return (sequence_unmasked, mask, ids_restore)", "docstring": "Perform per-sample random masking by per-sample shuffling. Per-sample shuffling is done by argsort random\nnoise.\n\nArgs:\nsequence (`tf.Tensor` of shape `(batch_size, sequence_length, dim)`)\nnoise (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*) which is\nmainly used for testing purposes to control randomness and maintain the reproducibility", "source": "github-repos"}
{"code": "def _EvaluateExpression(frame, expression):\n    try:\n        code = compile(expression, '<watched_expression>', 'eval')\n    except (TypeError, ValueError) as e:\n        return (False, {'isError': True, 'refersTo': 'VARIABLE_NAME', 'description': {'format': 'Invalid expression', 'parameters': [str(e)]}})\n    except SyntaxError as e:\n        return (False, {'isError': True, 'refersTo': 'VARIABLE_NAME', 'description': {'format': 'Expression could not be compiled: $0', 'parameters': [e.msg]}})\n    try:\n        return (True, native.CallImmutable(frame, code))\n    except BaseException as e:\n        return (False, {'isError': True, 'refersTo': 'VARIABLE_VALUE', 'description': {'format': 'Exception occurred: $0', 'parameters': [str(e)]}})", "docstring": "Compiles and evaluates watched expression.\n\nArgs:\nframe: evaluation context.\nexpression: watched expression to compile and evaluate.\n\nReturns:\n(False, status) on error or (True, value) on success.", "source": "codesearchnet"}
{"code": "def _maxSizeCheck(cls, obj):\n    fail = False\n    size = 0\n    if isinstance(obj, numbers.Number):\n        if (obj > constants.MAX_FRAME_SIZE):\n            fail = True\n            size = obj\n    elif hasattr(obj, '__len__'):\n        size = len(obj)\n        fail = (size > constants.MAX_FRAME_SIZE)\n    if fail:\n        raise MaxSizeException(('Frame size %s > %s (MAX_FRAME_SIZE)' % (size, constants.MAX_FRAME_SIZE)))", "docstring": "Raise a MaxSizeException if ``obj`` exceeds MAX_FRAME_SIZE\n\nArgs:\nobj (numbers.Number or collection):\n\nRaises:\n:class:`fileseq.exceptions.MaxSizeException`:", "source": "codesearchnet"}
{"code": "def AddStopTimeObject(self, stoptime, schedule=None, problems=None):\n    \n    if schedule is None:\n      schedule = self._schedule\n    if schedule is None:\n      warnings.warn(\"No longer supported. _schedule attribute is used to get \"\n                    \"stop_times table\", DeprecationWarning)\n    if problems is None:\n      problems = schedule.problem_reporter\n\n    new_secs = stoptime.GetTimeSecs()\n    cursor = schedule._connection.cursor()\n    cursor.execute(\"SELECT max(stop_sequence), max(arrival_secs), \"\n                   \"max(departure_secs) FROM stop_times WHERE trip_id=?\",\n                   (self.trip_id,))\n    row = cursor.fetchone()\n    if row[0] is None:\n      \n      stoptime.stop_sequence = 1\n      if new_secs == None:\n        problems.OtherProblem(\n            'No time for first StopTime of trip_id \"%s\"' % (self.trip_id,))\n    else:\n      stoptime.stop_sequence = row[0] + 1\n      prev_secs = max(row[1], row[2])\n      if new_secs != None and new_secs < prev_secs:\n        problems.OtherProblem(\n            'out of order stop time for stop_id=%s trip_id=%s %s < %s' %\n            (util.EncodeUnicode(stoptime.stop_id),\n             util.EncodeUnicode(self.trip_id),\n             util.FormatSecondsSinceMidnight(new_secs),\n             util.FormatSecondsSinceMidnight(prev_secs)))\n    self._AddStopTimeObjectUnordered(stoptime, schedule)", "docstring": "Add a StopTime object to the end of this trip.\n\nArgs:\nstoptime: A StopTime object. Should not be reused in multiple trips.\nschedule: Schedule object containing this trip which must be\npassed to Trip.__init__ or here\nproblems: ProblemReporter object for validating the StopTime in its new\nhome\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _kl_beta_beta(d1, d2, name=None):\n\n    def delta(fn, is_property=True):\n        fn1 = getattr(d1, fn)\n        fn2 = getattr(d2, fn)\n        return fn2 - fn1 if is_property else fn2() - fn1()\n    with ops.name_scope(name, 'kl_beta_beta', values=[d1.concentration1, d1.concentration0, d1.total_concentration, d2.concentration1, d2.concentration0, d2.total_concentration]):\n        return delta('_log_normalization', is_property=False) - math_ops.digamma(d1.concentration1) * delta('concentration1') - math_ops.digamma(d1.concentration0) * delta('concentration0') + math_ops.digamma(d1.total_concentration) * delta('total_concentration')", "docstring": "Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta.\n\nArgs:\nd1: instance of a Beta distribution object.\nd2: instance of a Beta distribution object.\nname: (optional) Name to use for created operations.\ndefault is \"kl_beta_beta\".\n\nReturns:\nBatchwise KL(d1 || d2)", "source": "github-repos"}
{"code": "def init(name, *args):\n    matcher = get(name)\n    if (not matcher):\n        raise ValueError('Cannot find matcher: {}'.format(name))\n    return matcher(*args)", "docstring": "Initializes a matcher instance passing variadic arguments to\nits constructor. Acts as a delegator proxy.\n\nArguments:\nname (str): matcher class name or alias to execute.\n*args (mixed): variadic argument\n\nReturns:\nmatcher: matcher instance.\n\nRaises:\nValueError: if matcher was not found.", "source": "codesearchnet"}
{"code": "def LogHttpAdminUIAccess(self, request, response):\n    event_id = self.GetNewEventId()\n    api_method = response.headers.get('X-API-Method', 'unknown')\n    api_reason = response.headers.get('X-GRR-Reason', 'none')\n    log_msg = ('%s API call [%s] by %s (reason: %s): %s [%d]' % (event_id, api_method, request.user, api_reason, request.full_path, response.status_code))\n    logging.info(log_msg)\n    if (response.headers.get('X-No-Log') != 'True'):\n        if data_store.RelationalDBEnabled():\n            entry = rdf_objects.APIAuditEntry.FromHttpRequestResponse(request, response)\n            data_store.REL_DB.WriteAPIAuditEntry(entry)", "docstring": "Log an http based api call.\n\nArgs:\nrequest: A WSGI request object.\nresponse: A WSGI response object.", "source": "codesearchnet"}
{"code": "def apply_to_structure(self, structure):\n    def_struct = structure.copy()\n    old_latt = def_struct.lattice.matrix\n    new_latt = np.transpose(np.dot(self, np.transpose(old_latt)))\n    def_struct.lattice = Lattice(new_latt)\n    return def_struct", "docstring": "Apply the deformation gradient to a structure.\n\nArgs:\nstructure (Structure object): the structure object to\nbe modified by the deformation", "source": "codesearchnet"}
{"code": "def _load_hdf5(self, filename, parent_level='CellpyData'):\n    if (not os.path.isfile(filename)):\n        self.logger.info(f'file does not exist: {filename}')\n        raise IOError\n    store = pd.HDFStore(filename)\n    required_keys = ['dfdata', 'dfsummary', 'info']\n    required_keys = [((('/' + parent_level) + '/') + _) for _ in required_keys]\n    for key in required_keys:\n        if (key not in store.keys()):\n            self.logger.info(f'This hdf-file is not good enough - at least one key is missing: {key}')\n            raise Exception(f'OH MY GOD! At least one crucial keyis missing {key}!')\n    self.logger.debug(f'Keys in current hdf5-file: {store.keys()}')\n    data = DataSet()\n    if (parent_level != 'CellpyData'):\n        self.logger.debug('Using non-default parent label for the hdf-store: {}'.format(parent_level))\n    infotable = store.select((parent_level + '/info'))\n    try:\n        data.cellpy_file_version = self._extract_from_dict(infotable, 'cellpy_file_version')\n    except Exception as e:\n        data.cellpy_file_version = 0\n        warnings.warn(f'Unhandled exception raised: {e}')\n    if (data.cellpy_file_version < MINIMUM_CELLPY_FILE_VERSION):\n        raise WrongFileVersion\n    if (data.cellpy_file_version > CELLPY_FILE_VERSION):\n        raise WrongFileVersion\n    data.dfsummary = store.select((parent_level + '/dfsummary'))\n    data.dfdata = store.select((parent_level + '/dfdata'))\n    try:\n        data.step_table = store.select((parent_level + '/step_table'))\n    except Exception as e:\n        self.logging.debug('could not get step_table from cellpy-file')\n        data.step_table = pd.DataFrame()\n        warnings.warn(f'Unhandled exception raised: {e}')\n    try:\n        fidtable = store.select((parent_level + '/fidtable'))\n        fidtable_selected = True\n    except Exception as e:\n        self.logging.debug('could not get fid-table from cellpy-file')\n        fidtable = []\n        warnings.warn('no fidtable - you should update your hdf5-file')\n        fidtable_selected = False\n    self.logger.debug('  h5')\n    newtests = []\n    data = self._load_infotable(data, infotable, filename)\n    if fidtable_selected:\n        (data.raw_data_files, data.raw_data_files_length) = self._convert2fid_list(fidtable)\n    else:\n        data.raw_data_files = None\n        data.raw_data_files_length = None\n    newtests.append(data)\n    store.close()\n    return newtests", "docstring": "Load a cellpy-file.\n\nArgs:\nfilename (str): Name of the cellpy file.\nparent_level (str) (optional): name of the parent level\n(defaults to \"CellpyData\")\n\nReturns:\nloaded datasets (DataSet-object)", "source": "codesearchnet"}
{"code": "def user_activity_stats_by_date(self, username, date, grouped=None):\n    request_url = '{}/api/0/user/{}/activity/{}'.format(self.instance, username, date)\n    payload = {}\n    if (username is not None):\n        payload['username'] = username\n    if (date is not None):\n        payload['date'] = date\n    if (grouped is not None):\n        payload['grouped'] = grouped\n    return_value = self._call_api(request_url, params=payload)\n    return return_value['activities']", "docstring": "Retrieve activity information about a specific user on the specified date.\n\nParams:\nusername (string): filters the username of the user whose activity you are interested in.\ndate (string): filters by the date of interest, best provided in ISO format: YYYY-MM-DD\ngrouped (boolean): filters whether or not to group the commits\n\nReturns:\nlist: A list of activities done by a given user on some particular\ndate for all the projects for given Pagure instance.", "source": "codesearchnet"}
{"code": "def __init__(self, datastore, serializer=None):\n    \n    super(SerializerShimDatastore, self).__init__(datastore)\n\n    if serializer:\n      self.serializer = serializer\n\n    \n    test = { 'value': repr(self) }\n    errstr = 'Serializer error: serialized value does not match original'\n    assert self.serializer.loads(self.serializer.dumps(test)) == test, errstr", "docstring": "Initializes internals and tests the serializer.\n\nArgs:\ndatastore: a child datastore for the ShimDatastore superclass.\n\nserializer: a serializer object (responds to loads and dumps).", "source": "juraj-google-style"}
{"code": "def power(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return Power().symbolic_call(x1, x2)\n    return backend.numpy.power(x1, x2)", "docstring": "First tensor elements raised to powers from second tensor, element-wise.\n\nArgs:\nx1: The bases.\nx2: The exponents.\n\nReturns:\nOutput tensor, the bases in `x1` raised to the exponents in `x2`.", "source": "github-repos"}
{"code": "def _ParseIdentifierMappingRecord(self, parser_mediator, table_name, esedb_record):\n    record_values = self._GetRecordValues(parser_mediator, table_name, esedb_record)\n    identifier = record_values.get('IdIndex', None)\n    if (identifier is None):\n        parser_mediator.ProduceExtractionWarning('IdIndex value missing from table: SruDbIdMapTable')\n        return (None, None)\n    identifier_type = record_values.get('IdType', None)\n    if (identifier_type not in self._SUPPORTED_IDENTIFIER_TYPES):\n        parser_mediator.ProduceExtractionWarning('unsupported IdType value: {0!s} in table: SruDbIdMapTable'.format(identifier_type))\n        return (None, None)\n    mapped_value = record_values.get('IdBlob', None)\n    if (mapped_value is None):\n        parser_mediator.ProduceExtractionWarning('IdBlob value missing from table: SruDbIdMapTable')\n        return (None, None)\n    if (identifier_type == 3):\n        try:\n            fwnt_identifier = pyfwnt.security_identifier()\n            fwnt_identifier.copy_from_byte_stream(mapped_value)\n            mapped_value = fwnt_identifier.get_string()\n        except IOError:\n            parser_mediator.ProduceExtractionWarning('unable to decode IdBlob value as Windows NT security identifier')\n            return (None, None)\n    else:\n        try:\n            mapped_value = mapped_value.decode('utf-16le').rstrip('\\x00')\n        except UnicodeDecodeError:\n            parser_mediator.ProduceExtractionWarning('unable to decode IdBlob value as UTF-16 little-endian string')\n            return (None, None)\n    return (identifier, mapped_value)", "docstring": "Extracts an identifier mapping from a SruDbIdMapTable record.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ntable_name (str): name of the table the record is stored in.\nesedb_record (pyesedb.record): record.\n\nReturns:\ntuple[int, str]: numeric identifier and its string representation or\nNone, None if no identifier mapping can be retrieved from the record.", "source": "codesearchnet"}
{"code": "def get_processors(processor_cat, prop_defs, data_attr=None):\n    \n    processor_defs = prop_defs.get(processor_cat,[])\n    processor_list = []\n    for processor in processor_defs:\n        proc_class = PropertyProcessor[processor['rdf_type'][0]]\n        processor_list.append(proc_class(processor.get('kds_params', [{}]),\n                                         data_attr))\n    return processor_list", "docstring": "reads the prop defs and adds applicable processors for the property\n\nArgs:\nprocessor_cat(str): The category of processors to retreive\nprop_defs: property defintions as defined by the rdf defintions\ndata_attr: the attr to manipulate during processing.\n\nReturns:\nlist: a list of processors", "source": "juraj-google-style"}
{"code": "def get_string(strings: Sequence[str],\n               prefix: str,\n               ignoreleadingcolon: bool = False,\n               precedingline: str = \"\") -> Optional[str]:\n    \n    s = get_what_follows(strings, prefix, precedingline=precedingline)\n    if ignoreleadingcolon:\n        f = s.find(\":\")\n        if f != -1:\n            s = s[f+1:].strip()\n    if len(s) == 0:\n        return None\n    return s", "docstring": "Find a string as per :func:`get_what_follows`.\n\nArgs:\nstrings: see :func:`get_what_follows`\nprefix: see :func:`get_what_follows`\nignoreleadingcolon: if ``True``, restrict the result to what comes\nafter its first colon (and whitespace-strip that)\nprecedingline: see :func:`get_what_follows`\n\nReturns:\nthe line fragment", "source": "juraj-google-style"}
{"code": "def usage(shorthelp=False, writeto_stdout=False, detailed_error=None,\n          exitcode=None):\n  \n  if writeto_stdout:\n    stdfile = sys.stdout\n  else:\n    stdfile = sys.stderr\n\n  doc = sys.modules['__main__'].__doc__\n  if not doc:\n    doc = '\\nUSAGE: %s [flags]\\n' % sys.argv[0]\n    doc = flags.text_wrap(doc, indent='       ', firstline_indent='')\n  else:\n    \n    num_specifiers = doc.count('%') - 2 * doc.count('%%')\n    try:\n      doc %= (sys.argv[0],) * num_specifiers\n    except (OverflowError, TypeError, ValueError):\n      \n      pass\n  if shorthelp:\n    flag_str = FLAGS.main_module_help()\n  else:\n    flag_str = FLAGS.get_help()\n  try:\n    stdfile.write(doc)\n    if flag_str:\n      stdfile.write('\\nflags:\\n')\n      stdfile.write(flag_str)\n    stdfile.write('\\n')\n    if detailed_error is not None:\n      stdfile.write('\\n%s\\n' % detailed_error)\n  except IOError as e:\n    \n    \n    if e.errno != errno.EPIPE:\n      raise\n  if exitcode is not None:\n    sys.exit(exitcode)", "docstring": "Writes __main__'s docstring to stderr with some help text.\n\nArgs:\nshorthelp: bool, if True, prints only flags from the main module,\nrather than all flags.\nwriteto_stdout: bool, if True, writes help message to stdout,\nrather than to stderr.\ndetailed_error: str, additional detail about why usage info was presented.\nexitcode: optional integer, if set, exits with this status code after\nwriting help.", "source": "juraj-google-style"}
{"code": "def GetObject(self, identifier):\n    \n    cache_value = self._values.get(identifier, None)\n    if not cache_value:\n      return None\n\n    return cache_value.vfs_object", "docstring": "Retrieves a cached object based on the identifier.\n\nThis method ignores the cache value reference count.\n\nArgs:\nidentifier (str): VFS object identifier.\n\nReturns:\nobject: cached VFS object or None if not cached.", "source": "juraj-google-style"}
{"code": "def get_priority(priority):\n    \n    if isinstance(priority, int):\n        if priority < 0 or priority > 100:\n            raise ValueError('priority must be between 0 and 100')\n        return priority\n    elif isinstance(priority, Priority):\n        return priority.value\n    elif isinstance(priority, str):\n        return Priority[priority.upper()].value\n    else:\n        raise TypeError('priority must be an integer or Priority enum value')", "docstring": "Get priority value.\n\nArgs:\npriority (int or str or :obj:`Priority`): Priority.\n\nReturns:\nint: The priority value.", "source": "juraj-google-style"}
{"code": "def _on_trace(self, sequence, topic, message):\n    try:\n        conn_key = self._find_connection(topic)\n        conn_id = self.conns.get_connection_id(conn_key)\n    except ArgumentError:\n        self._logger.warn('Dropping trace message that does not correspond with a known connection, topic=%s', topic)\n        return\n    try:\n        tracing = messages.TracingNotification.verify(message)\n        self._trigger_callback('on_trace', conn_id, tracing['trace'])\n    except Exception:\n        self._logger.exception('Error processing trace conn_id=%d', conn_id)", "docstring": "Process a trace received from a device.\n\nArgs:\nsequence (int): The sequence number of the packet received\ntopic (string): The topic this message was received on\nmessage (dict): The message itself", "source": "codesearchnet"}
{"code": "def _build_colocation_attr_map(input_map, absolute_import_scope):\n  \n  colocation_attr_map = collections.defaultdict(_ConsistentValue)\n  used_outputs_of_imported_ops = collections.defaultdict(set)\n  \n  for imported_tensor_name, mapped_tensor in input_map.items():\n    imported_tensor_name = absolute_import_scope + \"/\" + imported_tensor_name\n    imported_op_name, imported_index = _split_tensor_name(imported_tensor_name)\n    key = tf.compat.as_bytes(\"loc:@\" + imported_op_name)\n    colocation_attr_map[key].Set(\n        mapped_tensor.op.colocation_groups(),\n        {\"reason\": \"input '%s' is substituted by '%s'\" % (\n            imported_tensor_name, mapped_tensor.name)})\n    used_outputs_of_imported_ops[imported_op_name].add(imported_index)\n  \n  \n  for imported_op_name, used_outputs in used_outputs_of_imported_ops.items():\n    imported_op = tf_v1.get_default_graph().get_operation_by_name(\n        imported_op_name)\n    unused_outputs = set(range(len(imported_op.outputs))) - used_outputs\n    if not unused_outputs: continue\n    key = tf.compat.as_bytes(\"loc:@\" + imported_op_name)\n    if imported_op.colocation_groups() != [key]:\n      \n      \n      raise ValueError(\n          \"Internal error: tensors from op '%s' are partially remapped in \"\n          \"import but op.colocation_groups=%s cannot be captured in a \"\n          \"simple rewrite rule.\" %\n          (imported_op_name, imported_op.colocation_groups()))\n    colocation_attr_map[key].Set(\n        [key],\n        {\"reason\": \"tensor '%s:%s' is not substituted by inputs\" % (\n            imported_op_name,\n            \",\".join(str(i) for i in sorted(unused_outputs)))})\n\n  return colocation_attr_map", "docstring": "Returns a dict mapping from pre-import to post-import colocation attrs.\n\nArgs:\ninput_map: as for fix_colocation_after_import.\nabsolute_import_scope: as for fix_colocation_after_import.\n\nReturns:\nA dict that maps bytes `\"loc:@\" + absolute_import_scope + \"/foo\"`\nto _ConsistentValues set to the lists of bytes `[\"loc:@...\", ...]`\naccording to the rewriting scheme of fix_colocation_after_import.\nIn case of an inconsistent rewriting, _ConsistentValue.has_error is true.", "source": "juraj-google-style"}
{"code": "def parse_GDS(filepath):\n    dataset_lines = []\n    subsets = {}\n    database = None\n    dataset_name = None\n    with utils.smart_open(filepath) as soft:\n        groupper = groupby(soft, (lambda x: x.startswith('^')))\n        for (is_new_entry, group) in groupper:\n            if is_new_entry:\n                (entry_type, entry_name) = __parse_entry(next(group))\n                logger.debug(('%s: %s' % (entry_type.upper(), entry_name)))\n                if (entry_type == 'SUBSET'):\n                    (is_data, data_group) = next(groupper)\n                    message = 'The key is not False, probably there is an error in the SOFT file'\n                    assert (not is_data), message\n                    subset_metadata = parse_metadata(data_group)\n                    subsets[entry_name] = GDSSubset(name=entry_name, metadata=subset_metadata)\n                elif (entry_type == 'DATABASE'):\n                    (is_data, data_group) = next(groupper)\n                    message = 'The key is not False, probably there is an error in the SOFT file'\n                    assert (not is_data), message\n                    database_metadata = parse_metadata(data_group)\n                    database = GEODatabase(name=entry_name, metadata=database_metadata)\n                elif (entry_type == 'DATASET'):\n                    (is_data, data_group) = next(groupper)\n                    dataset_name = entry_name\n                    for line in data_group:\n                        dataset_lines.append(line.rstrip())\n                else:\n                    logger.error(('Cannot recognize type %s' % entry_type))\n    metadata = parse_metadata(dataset_lines)\n    columns = parse_GDS_columns(dataset_lines, subsets)\n    table = parse_table_data(dataset_lines)\n    return GDS(name=dataset_name, metadata=metadata, columns=columns, table=table, subsets=subsets, database=database)", "docstring": "Parse GDS SOFT file.\n\nArgs:\nfilepath (:obj:`str`): Path to GDS SOFT file.\n\nReturns:\n:obj:`GEOparse.GDS`: A GDS object.", "source": "codesearchnet"}
{"code": "def random_shift(image, wsr=0.1, hsr=0.1):\n  \n  height, width, _ = common_layers.shape_list(image)\n  width_range, height_range = wsr*width, hsr*height\n  height_translations = tf.random_uniform((1,), -height_range, height_range)\n  width_translations = tf.random_uniform((1,), -width_range, width_range)\n  translations = tf.concat((height_translations, width_translations), axis=0)\n  return tf.contrib.image.translate(image, translations=translations)", "docstring": "Apply random horizontal and vertical shift to images.\n\nThis is the default data-augmentation strategy used on CIFAR in Glow.\n\nArgs:\nimage: a 3-D Tensor\nwsr: Width shift range, as a float fraction of the width.\nhsr: Height shift range, as a float fraction of the width.\nReturns:\nimages: images translated by the provided wsr and hsr.", "source": "juraj-google-style"}
{"code": "def get_2d_local_memory(x, query_shape, memory_flange):\n  \n  (_, height, width, depth_x) = common_layers.shape_list(x)\n  x_center_blocks = _extract_blocks(x, query_shape[0], query_shape[1])\n  \n  \n  paddings = [[0, 0], [memory_flange[0], memory_flange[0]],\n              [memory_flange[1], memory_flange[1]], [0, 0]]\n  padded_x = tf.pad(x, paddings)\n  padded_x.set_shape([None, height+2*memory_flange[0],\n                      width+2*memory_flange[1], depth_x])\n  x_outer_memory_blocks = _extract_blocks(padded_x,\n                                          memory_flange[0], memory_flange[1])\n  \n  \n\n  \n  \n  \n  x_left_blocks, x_right_blocks = _get_left_right_blocks(\n      x_outer_memory_blocks)\n  t_hw_block = lambda x: tf.transpose(x, [0, 2, 1, 4, 3, 5])\n  \n  \n  \n  \n  x_top_center_blocks, x_bottom_center_blocks = (\n      map(t_hw_block, _get_left_right_blocks(\n          t_hw_block(x_outer_memory_blocks))))\n\n  \n  x_left_corner_blocks, x_right_corner_blocks = _split_along_width(\n      x_outer_memory_blocks)\n  \n  \n  \n  \n  \n\n  t_hw = lambda x: tf.transpose(x, [0, 2, 1, 3, 4, 5])\n  x_top_left_corner_blocks, x_bottom_left_corner_blocks = (\n      map(t_hw, _split_along_width(t_hw(x_left_corner_blocks))))\n  x_top_right_corner_blocks, x_bottom_right_corner_blocks = (\n      map(t_hw, _split_along_width(t_hw(x_right_corner_blocks))))\n\n  \n  \n  \n  \n  \n  \n  \n  \n  \n  x_top_memory = tf.concat(\n      [x_top_left_corner_blocks,\n       x_top_center_blocks,\n       x_top_right_corner_blocks], axis=4)\n  x_middle_memory = tf.concat(\n      [x_left_blocks, x_center_blocks, x_right_blocks], axis=4)\n  x_bottom_memory = tf.concat(\n      [x_bottom_left_corner_blocks,\n       x_bottom_center_blocks,\n       x_bottom_right_corner_blocks], axis=4)\n\n  \n  x = tf.concat([x_top_memory, x_middle_memory, x_bottom_memory], axis=3)\n  return x", "docstring": "Stitches together the local 2d memory blocks.\n\nArgs:\nx: a [batch, height, width, depth tensor]\nquery_shape: 2-d integer list of query shape\nmemory_flange: 2-d integer list of memory flanges\n\nReturns:\nx: A [batch, num_h_blocks, num_w_blocks,\nquery_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]]\ntensor.", "source": "juraj-google-style"}
{"code": "def get_ops_from_nodedef(node_def):\n    if not node_def.device:\n        node_def.device = '/cpu:0'\n    kernel_class = _pywrap_kernel_registry.TryFindKernelClass(node_def.SerializeToString())\n    op = str(node_def.op)\n    if kernel_class or op in OPS_WITHOUT_KERNEL_ALLOWLIST:\n        return (op, str(kernel_class.decode('utf-8')) if kernel_class else None)\n    else:\n        tf_logging.warning('Warning: no kernel found for op %s', op)\n        return None", "docstring": "Gets the op and kernel needed from the given NodeDef.\n\nArgs:\nnode_def: TF NodeDef to get op/kernel information.\n\nReturns:\nA tuple of (op_name, kernel_name). If the op is not in the allowlist of ops\nwithout kernel and there is no kernel found, then return None.", "source": "github-repos"}
{"code": "def __init__(self, filename, compression_type=None):\n    self._filename = ops.convert_to_tensor(filename, dtypes.string, name='filename')\n    self._compression_type = convert.optional_param_to_tensor('compression_type', compression_type, argument_default='', argument_dtype=dtypes.string)", "docstring": "Initializes a `TFRecordWriter`.\n\nArgs:\nfilename: a string path indicating where to write the TFRecord data.\ncompression_type: (Optional.) a string indicating what type of compression\nto use when writing the file. See `tf.io.TFRecordCompressionType` for\nwhat types of compression are available. Defaults to `None`.", "source": "github-repos"}
{"code": "def read_full(fileobj, size):\n    \n\n    if size < 0:\n        raise ValueError(\"size must not be negative\")\n\n    data = fileobj.read(size)\n    if len(data) != size:\n        raise IOError\n    return data", "docstring": "Like fileobj.read but raises IOError if not all requested data is\nreturned.\n\nIf you want to distinguish IOError and the EOS case, better handle\nthe error yourself instead of using this.\n\nArgs:\nfileobj (fileobj)\nsize (int): amount of bytes to read\nRaises:\nIOError: In case read fails or not enough data is read", "source": "juraj-google-style"}
{"code": "def stat(self, path=None, client_kwargs=None, header=None):\n        \n        \n        stat = OrderedDict((\n            (\"st_mode\", 0), (\"st_ino\", 0), (\"st_dev\", 0), (\"st_nlink\", 0),\n            (\"st_uid\", 0), (\"st_gid\", 0), (\"st_size\", 0), (\"st_atime\", 0),\n            (\"st_mtime\", 0), (\"st_ctime\", 0)))\n\n        \n        header = self.head(path, client_kwargs, header)\n        for key, method in (\n                ('st_size', self._getsize_from_header),\n                ('st_ctime', self._getctime_from_header),\n                ('st_mtime', self._getmtime_from_header),):\n            try:\n                stat[key] = int(method(header))\n            except UnsupportedOperation:\n                continue\n\n        \n        if self.islink(path=path, header=header):\n            \n            stat['st_mode'] = S_IFLNK\n        elif ((not path or path[-1] == '/' or self.is_locator(path)) and not\n                stat['st_size']):\n            \n            stat['st_mode'] = S_IFDIR\n        else:\n            \n            stat['st_mode'] = S_IFREG\n\n        \n        sub = self._CHAR_FILTER.sub\n        for key, value in tuple(header.items()):\n            stat['st_' + sub('', key.lower())] = value\n\n        \n        stat_result = namedtuple('stat_result', tuple(stat))\n        stat_result.__name__ = 'os.stat_result'\n        stat_result.__module__ = 'pycosio'\n        return stat_result(**stat)", "docstring": "Get the status of an object.\n\nArgs:\npath (str): File path or URL.\nclient_kwargs (dict): Client arguments.\nheader (dict): Object header.\n\nReturns:\nos.stat_result: Stat result object", "source": "juraj-google-style"}
{"code": "def mtr_tr_dense(sz):\n    n = (2 ** sz)\n    hparams = mtf_bitransformer_base()\n    hparams.d_model = 1024\n    hparams.max_length = 256\n    hparams.batch_size = 128\n    hparams.d_ff = int((4096 * n))\n    hparams.d_kv = 128\n    hparams.encoder_num_heads = int((8 * n))\n    hparams.decoder_num_heads = int((8 * n))\n    hparams.learning_rate_decay_steps = 51400\n    hparams.layout = 'batch:batch;vocab:model;d_ff:model;heads:model'\n    hparams.mesh_shape = 'batch:32'\n    hparams.label_smoothing = 0.1\n    hparams.layer_prepostprocess_dropout = 0.1\n    hparams.attention_dropout = 0.1\n    hparams.relu_dropout = 0.1\n    return hparams", "docstring": "Series of machine translation models.\n\nAll models are trained on sequences of 256 tokens.\n\nYou can use the dataset translate_enfr_wmt32k_packed.\n154000 steps = 3 epochs.\n\nArgs:\nsz: an integer\n\nReturns:\na hparams", "source": "codesearchnet"}
{"code": "def insert(self, keys, values, name=None):\n    with ops.name_scope(name, '%s_lookup_table_insert' % self.name, [self.resource_handle, keys, values]):\n        keys = ops.convert_to_tensor(keys, self._key_dtype, name='keys')\n        values = ops.convert_to_tensor(values, self._value_dtype, name='values')\n        with ops.colocate_with(self.resource_handle):\n            op = gen_lookup_ops.lookup_table_insert_v2(self.resource_handle, keys, values)\n    return op", "docstring": "Associates `keys` with `values`.\n\nArgs:\nkeys: Keys to insert. Can be a tensor of any shape. Must match the table's\nkey type.\nvalues: Values to be associated with keys. Must be a tensor of the same\nshape as `keys` and match the table's value type.\nname: A name for the operation (optional).\n\nReturns:\nThe created Operation.\n\nRaises:\nTypeError: when `keys` or `values` doesn't match the table data\ntypes.", "source": "github-repos"}
{"code": "def delete_node(self, node_name):\n        \n        graph = self.graph\n        if node_name not in graph:\n            raise KeyError('node %s does not exist' % node_name)\n        graph.pop(node_name)\n\n        for node, edges in graph.items():\n            if node_name in edges:\n                edges.remove(node_name)", "docstring": "Deletes this node and all edges referencing it.\n\nArgs:\nnode_name (str): The name of the node to delete.\n\nRaises:\nKeyError: Raised if the node does not exist in the graph.", "source": "juraj-google-style"}
{"code": "def default_local_init_op():\n    return control_flow_ops.group(variables.local_variables_initializer(), lookup_ops.tables_initializer(), resources.initialize_resources(resources.local_resources()))", "docstring": "Returns an op that groups the default local init ops.\n\nThis op is used during session initialization when a Scaffold is\ninitialized without specifying the local_init_op arg. It includes\n`tf.compat.v1.local_variables_initializer`,\n`tf.compat.v1.tables_initializer`, and also\ninitializes local session resources.\n\nReturns:\nThe default Scaffold local init op.", "source": "github-repos"}
{"code": "def IsEquivalent(self, other):\n    if (self.name and other.name):\n        return (self.name == other.name)\n    if self.name:\n        (self_family, self_version_tuple) = self._FAMILY_AND_VERSION_PER_NAME.get(self.name, self._DEFAULT_FAMILY_AND_VERSION)\n        return ((self_family == other.family) and (self_version_tuple == other.version_tuple))\n    if (self.family and self.version):\n        if other.name:\n            (other_family, other_version_tuple) = self._FAMILY_AND_VERSION_PER_NAME.get(other.name, self._DEFAULT_FAMILY_AND_VERSION)\n        else:\n            other_family = other.family\n            other_version_tuple = other.version_tuple\n        return ((self.family == other_family) and (self.version_tuple == other_version_tuple))\n    if self.family:\n        if other.name:\n            (other_family, _) = self._FAMILY_AND_VERSION_PER_NAME.get(other.name, self._DEFAULT_FAMILY_AND_VERSION)\n        else:\n            other_family = other.family\n        return (self.family == other_family)\n    return False", "docstring": "Determines if 2 operating system artifacts are equivalent.\n\nThis function compares the operating systems based in order of:\n* name derived from product\n* family and version\n* family\n\nArgs:\nother (OperatingSystemArtifact): operating system artifact attribute\ncontainer to compare with.\n\nReturns:\nbool: True if the operating systems are considered equivalent, False if\nthe most specific criteria do no match, or no criteria are available.", "source": "codesearchnet"}
{"code": "def load_values(self, dictionary, as_defaults=False, flat=False):\n    if flat:\n        separator = self.settings.str_path_separator\n        flat_dictionary = dictionary\n        dictionary = collections.OrderedDict()\n        for (k, v) in flat_dictionary.items():\n            k_parts = k.split(separator)\n            c = dictionary\n            for (i, kp) in enumerate(k_parts):\n                if (i >= (len(k_parts) - 1)):\n                    c[kp] = v\n                else:\n                    if (kp not in c):\n                        c[kp] = collections.OrderedDict()\n                    c = c[kp]\n    for (name, value) in dictionary.items():\n        if (name not in self):\n            if as_defaults:\n                if isinstance(value, dict):\n                    self[name] = self.create_section()\n                    self[name].load_values(value, as_defaults=as_defaults)\n                else:\n                    self[name] = self.create_item(name, default=value)\n            else:\n                pass\n            continue\n        resolution = self._get_item_or_section(name, handle_not_found=False)\n        if is_config_item(resolution):\n            if as_defaults:\n                resolution.default = value\n            else:\n                resolution.value = value\n        else:\n            resolution.load_values(value, as_defaults=as_defaults)", "docstring": "Import config values from a dictionary.\n\nWhen ``as_defaults`` is set to ``True``, the values\nimported will be set as defaults. This can be used to\ndeclare the sections and items of configuration.\nValues of sections and items in ``dictionary`` can be\ndictionaries as well as instances of :class:`.Item` and\n:class:`.Config`.\n\nArgs:\ndictionary:\nas_defaults: if ``True``, the imported values will be set as defaults.", "source": "codesearchnet"}
{"code": "def length(self, rows=None):\n    rows = (tf.range(self._capacity) if (rows is None) else rows)\n    return tf.gather(self._length, rows)", "docstring": "Tensor holding the current length of episodes.\n\nArgs:\nrows: Episodes to select length from, defaults to all.\n\nReturns:\nBatch tensor of sequence lengths.", "source": "codesearchnet"}
{"code": "def update(self, config_dict: dict[str, Any]):\n    for key, value in config_dict.items():\n        setattr(self, key, value)", "docstring": "Updates attributes of this class with attributes from `config_dict`.\n\nArgs:\nconfig_dict (`Dict[str, Any]`): Dictionary of attributes that should be updated for this class.", "source": "github-repos"}
{"code": "def return_selected_form_items(form_info):\n        \n        selected_keys = []\n        selected_names = []\n        for chosen in form_info:\n            if chosen['choice']:\n                selected_keys.append(chosen['key'])\n                selected_names.append(chosen['name'])\n\n        return selected_keys, selected_names", "docstring": "It returns chosen keys list from a given form.\n\nArgs:\nform_info: serialized list of dict form data\nReturns:\nselected_keys(list): Chosen keys list\nselected_names(list): Chosen channels' or subscribers' names.", "source": "juraj-google-style"}
{"code": "def random_name(num_surnames=2):\n    \n    a = []\n\n    \n    if random.random() < _PROB_PREF:\n        a.append(_prefixes[random.randint(0, len(_prefixes) - 1)])\n\n    \n    a.append(_forenames[random.randint(0, len(_forenames) - 1)])\n\n    \n    for i in range(num_surnames):\n        a.append(_surnames[random.randint(0, len(_surnames) - 1)])\n\n    \n    if random.random() < _PROB_SUFF:\n        a.append(_suffixes[random.randint(0, len(_suffixes) - 1)])\n\n    return \" \".join(a)", "docstring": "Returns a random person name\n\nArguments:\nnum_surnames -- number of surnames", "source": "juraj-google-style"}
{"code": "def ring_position(self):\n    if (self.type != EventType.TABLET_PAD_RING):\n        raise AttributeError(_wrong_prop.format(self.type))\n    return self._libinput.libinput_event_tablet_pad_get_ring_position(self._handle)", "docstring": "The current position of the ring, in degrees\ncounterclockwise from the northern-most point of the ring in\nthe tablet's current logical orientation.\n\nIf the source is\n:attr:`~libinput.constant.TabletPadRingAxisSource.FINGER`,\nlibinput sends a terminating event with a ring value of -1 when\nthe finger is lifted from the ring. A caller may use this information\nto e.g. determine if kinetic scrolling should be triggered.\n\nFor events not of type\n:attr:`~libinput.constant.EventType.TABLET_PAD_RING`, this property\nraises :exc:`AttributeError`.\n\nReturns:\nfloat: The current value of the the axis. -1 if the finger was\nlifted.\nRaises:\nAttributeError", "source": "codesearchnet"}
{"code": "def agent_heartbeat(self, agent_id, metrics, run_states):\n    mutation = gql('\\n        mutation Heartbeat(\\n            $id: ID!,\\n            $metrics: JSONString,\\n            $runState: JSONString\\n        ) {\\n            agentHeartbeat(input: {\\n                id: $id,\\n                metrics: $metrics,\\n                runState: $runState\\n            }) {\\n                agent {\\n                    id\\n                }\\n                commands\\n            }\\n        }\\n        ')\n    try:\n        response = self.gql(mutation, variable_values={'id': agent_id, 'metrics': json.dumps(metrics), 'runState': json.dumps(run_states)})\n    except Exception as e:\n        message = ast.literal_eval(e.args[0])['message']\n        logger.error('Error communicating with W&B: %s', message)\n        return []\n    else:\n        return json.loads(response['agentHeartbeat']['commands'])", "docstring": "Notify server about agent state, receive commands.\n\nArgs:\nagent_id (str): agent_id\nmetrics (dict): system metrics\nrun_states (dict): run_id: state mapping\nReturns:\nList of commands to execute.", "source": "codesearchnet"}
{"code": "def flip(x, axis=None):\n    if any_symbolic_tensors((x,)):\n        return Flip(axis=axis).symbolic_call(x)\n    return backend.numpy.flip(x, axis=axis)", "docstring": "Reverse the order of elements in the tensor along the given axis.\n\nThe shape of the tensor is preserved, but the elements are reordered.\n\nArgs:\nx: Input tensor.\naxis: Axis or axes along which to flip the tensor. The default,\n`axis=None`, will flip over all of the axes of the input tensor.\n\nReturns:\nOutput tensor with entries of `axis` reversed.", "source": "github-repos"}
{"code": "def get_parent(self, tree, alt=None):\n    parent = self.parent_db.get(tree.path)\n    if (not parent):\n        return alt\n    return list(parent)[0]", "docstring": "Get parent for given `tree` or `alt` if not found.\n\nArgs:\ntree (obj): :class:`.Tree` instance, which is already stored in DB.\nalt (obj, default None): Alternative value returned when `tree` is\nnot found.\n\nReturns:\nobj: :class:`.Tree` parent to given `tree`.", "source": "codesearchnet"}
{"code": "def _render_fluent_timestep(self, fluent_type: str, fluents: Sequence[Tuple[(str, np.array)]], fluent_variables: Sequence[Tuple[(str, List[str])]]) -> None:\n    for (fluent_pair, variable_list) in zip(fluents, fluent_variables):\n        (name, fluent) = fluent_pair\n        (_, variables) = variable_list\n        print(name)\n        fluent = fluent.flatten()\n        for (variable, value) in zip(variables, fluent):\n            print('- {}: {} = {}'.format(fluent_type, variable, value))\n    print()", "docstring": "Prints `fluents` of given `fluent_type` as list of instantiated variables\nwith corresponding values.\n\nArgs:\nfluent_type (str): Fluent type.\nfluents (Sequence[Tuple[str, np.array]]): List of pairs (fluent_name, fluent_values).\nfluent_variables (Sequence[Tuple[str, List[str]]]): List of pairs (fluent_name, args).", "source": "codesearchnet"}
{"code": "def handler(self, direction, verb, priority=10):\n\n    def parent_fn(func):\n\n        @functools.wraps(func)\n        def child_fn(msg):\n            func(msg)\n        self.register_event(direction, verb, child_fn, priority=priority)\n        return child_fn\n    return parent_fn", "docstring": "Register this function as an event handler.\n\nArgs:\ndirection (str): ``in``, ``out``, ``both``, ``raw``.\nverb (str): Event name.\npriority (int): Handler priority (lower priority executes first).\n\nExample:\nThese handlers print out a pretty raw log::\n\nreactor = girc.Reactor()\n\n@reactor.handler('in', 'raw', priority=1)\ndef handle_raw_in(event):\nprint(event['server'].name, ' ->', escape(event['data']))\n\n\n@reactor.handler('out', 'raw', priority=1)\ndef handle_raw_out(event):\nprint(event['server'].name, '<- ', escape(event['data']))", "source": "codesearchnet"}
{"code": "def from_file(cls, path):\n    with open(path, 'r', errors='replace') as f:\n        return cls(f.read())", "docstring": "Create a text from a file.\n\nArgs:\npath (str): The file path.", "source": "codesearchnet"}
{"code": "def start(self, host, nornir):\n        \n        self.host = host\n        self.nornir = nornir\n\n        try:\n            logger.debug(\"Host %r: running task %r\", self.host.name, self.name)\n            r = self.task(self, **self.params)\n            if not isinstance(r, Result):\n                r = Result(host=host, result=r)\n\n        except NornirSubTaskError as e:\n            tb = traceback.format_exc()\n            logger.error(\n                \"Host %r: task %r failed with traceback:\\n%s\",\n                self.host.name,\n                self.name,\n                tb,\n            )\n            r = Result(host, exception=e, result=str(e), failed=True)\n\n        except Exception as e:\n            tb = traceback.format_exc()\n            logger.error(\n                \"Host %r: task %r failed with traceback:\\n%s\",\n                self.host.name,\n                self.name,\n                tb,\n            )\n            r = Result(host, exception=e, result=tb, failed=True)\n\n        r.name = self.name\n        r.severity_level = logging.ERROR if r.failed else self.severity_level\n\n        self.results.insert(0, r)\n        return self.results", "docstring": "Run the task for the given host.\n\nArguments:\nhost (:obj:`nornir.core.inventory.Host`): Host we are operating with. Populated right\nbefore calling the ``task``\nnornir(:obj:`nornir.core.Nornir`): Populated right before calling\nthe ``task``\n\nReturns:\nhost (:obj:`nornir.core.task.MultiResult`): Results of the task and its subtasks", "source": "juraj-google-style"}
{"code": "def query(starttime, endtime, output=None, *filenames):\n    if (not output):\n        output = ((((filenames[0].replace('.pcap', '') + starttime.isoformat()) + '-') + endtime.isoformat()) + '.pcap')\n    else:\n        output = output\n    with open(output, 'w') as outfile:\n        for filename in filenames:\n            log.info(('pcap.query: processing %s...' % filename))\n            with open(filename, 'r') as stream:\n                for (header, packet) in stream:\n                    if (packet is not None):\n                        if ((header.timestamp >= starttime) and (header.timestamp <= endtime)):\n                            outfile.write(packet, header=header)", "docstring": "Given a time range and input file, query creates a new file with only\nthat subset of data. If no outfile name is given, the new file name is the\nold file name with the time range appended.\n\nArgs:\nstarttime:\nThe datetime of the beginning time range to be extracted from the files.\nendtime:\nThe datetime of the end of the time range to be extracted from the files.\noutput:\nOptional: The output file name. Defaults to\n[first filename in filenames][starttime]-[endtime].pcap\nfilenames:\nA tuple of one or more file names to extract data from.", "source": "codesearchnet"}
{"code": "def check_interactive_docker_worker(link):\n    errors = []\n    log.info('Checking for {} {} interactive docker-worker'.format(link.name, link.task_id))\n    try:\n        if link.task['payload']['features'].get('interactive'):\n            errors.append('{} is interactive: task.payload.features.interactive!'.format(link.name))\n        if link.task['payload']['env'].get('TASKCLUSTER_INTERACTIVE'):\n            errors.append('{} is interactive: task.payload.env.TASKCLUSTER_INTERACTIVE!'.format(link.name))\n    except KeyError:\n        errors.append('check_interactive_docker_worker: {} task definition is malformed!'.format(link.name))\n    return errors", "docstring": "Given a task, make sure the task was not defined as interactive.\n\n* ``task.payload.features.interactive`` must be absent or False.\n* ``task.payload.env.TASKCLUSTER_INTERACTIVE`` must be absent or False.\n\nArgs:\nlink (LinkOfTrust): the task link we're checking.\n\nReturns:\nlist: the list of error errors.  Success is an empty list.", "source": "codesearchnet"}
{"code": "def _expand_json(self, j):\n        \n        decompressed_json = copy.copy(j)\n        decompressed_json.pop('blob', None)  \n\n        \n        compressed_data = base64.b64decode(j['blob'])\n        original_json = zlib.decompress(compressed_data).decode('utf-8')\n\n        decompressed_json['users'] = json.loads(original_json)  \n\n        return decompressed_json", "docstring": "Decompress the BLOB portion of the usernotes.\n\nArguments:\nj: the JSON returned from the wiki page (dict)\n\nReturns a Dict with the 'blob' key removed and a 'users' key added", "source": "juraj-google-style"}
{"code": "def _assert_same_graph(original_item, item) -> None:\n    original_graph = getattr(original_item, 'graph', None)\n    graph = getattr(item, 'graph', None)\n    if original_graph and graph and (original_graph is not graph):\n        raise ValueError('%s must be from the same graph as %s (graphs are %s and %s).' % (item, original_item, graph, original_graph))", "docstring": "Fail if the 2 items are from different graphs.\n\nArgs:\noriginal_item: Original item to check against.\nitem: Item to check.\n\nRaises:\nValueError: if graphs do not match.", "source": "github-repos"}
{"code": "def list_vmss(access_token, subscription_id, resource_group):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Compute/virtualMachineScaleSets',\n                        '?api-version=', COMP_API])\n    return do_get_next(endpoint, access_token)", "docstring": "List VM Scale Sets in a resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\n\nReturns:\nHTTP response. JSON body of a list of scale set model views.", "source": "juraj-google-style"}
{"code": "def get_user(self, identified_with, identifier, req, resp, resource, uri_kwargs):\n    stored_value = self.kv_store.get(self._get_storage_key(identified_with, identifier))\n    if (stored_value is not None):\n        user = self.serialization.loads(stored_value.decode())\n    else:\n        user = None\n    return user", "docstring": "Get user object for given identifier.\n\nArgs:\nidentified_with (object): authentication middleware used\nto identify the user.\nidentifier: middleware specifix user identifier (string or tuple\nin case of all built in authentication middleware classes).\n\nReturns:\ndict: user object stored in Redis if it exists, otherwise ``None``", "source": "codesearchnet"}
{"code": "def export_as_file(self, file_path, cv_source):\n    if os.path.exists(file_path):\n        raise exceptions.UserError('{} already exists'.format(file_path))\n    with open(file_path, 'wb') as f:\n        f.write(self.export_as_code(cv_source).encode('utf8'))", "docstring": "Export the ensemble as a single Python file and saves it to `file_path`.\n\nThis is EXPERIMENTAL as putting different modules together would probably wreak havoc\nespecially on modules that make heavy use of global variables.\n\nArgs:\nfile_path (str, unicode): Absolute/local path of place to save file in\n\ncv_source (str, unicode): String containing actual code for base learner\ncross-validation used to generate secondary meta-features.", "source": "codesearchnet"}
{"code": "def add_topic(self, topic):\n    if (topic in self._topics):\n        return Future().success(set(self._topics))\n    self._topics.add(topic)\n    return self.cluster.request_update()", "docstring": "Add a topic to the list of topics tracked via metadata.\n\nArguments:\ntopic (str): topic to track\n\nReturns:\nFuture: resolves after metadata request/response", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, prev_group_token: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]:\n    if self.with_group_token:\n        group_token = self.group_token.expand(hidden_states.size(0), -1, -1)\n        if self.group_projector is not None:\n            group_token = group_token + self.group_projector(prev_group_token)\n    else:\n        group_token = None\n    x = hidden_states\n    cat_x = self.concat_x(x, group_token)\n    for layer in self.layers:\n        layer_out = layer(cat_x, attention_mask=None, causal_attention_mask=None)\n        cat_x = layer_out[0]\n    x, group_token = self.split_x(cat_x)\n    attention = None\n    if self.downsample is not None:\n        x, attention = self.downsample(x, group_token)\n    outputs = (x, group_token)\n    if output_attentions:\n        outputs = outputs + (attention,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n`(config.encoder_attention_heads,)`.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the grouping tensors of Grouping block.", "source": "github-repos"}
{"code": "def get_tensor_layout(self, path):\n    raise NotImplementedError()", "docstring": "Retrieve the `TensorLayout` for the intermediate tensor.\n\nArgs:\npath: a string path for the corresponding tensor.\n\nreturn:\nThe `TensorLayout` for the intermediate tensor, which can be used\nby `backend.relayout()` to reshard the tensor. Could also return\nNone.", "source": "github-repos"}
{"code": "def __getitem__(self, id):\n        \n        if not isinstance(id, int):\n            raise TypeError(id)\n        return self._map[id]", "docstring": "Return the worksheet with the given id.\n\nArgs:\nid: numeric id of the worksheet\nReturns:\nWorkSheet: contained worksheet object\nRaises:\nTypeError: if ``id`` is not an ``int``\nKeyError: if the spreadsheet has no worksheet with the given ``id``", "source": "juraj-google-style"}
{"code": "def set_distribution(value):\n    global_state.set_global_attribute(GLOBAL_ATTRIBUTE_NAME, value)", "docstring": "Set the distribution as the global distribution setting.\n\nArgs:\nvalue: a `Distribution` instance.", "source": "github-repos"}
{"code": "def _ExtractExtensionInstallEvents(self, settings_dict, parser_mediator):\n    for (extension_id, extension) in sorted(settings_dict.items()):\n        install_time = extension.get('install_time', None)\n        if (not install_time):\n            parser_mediator.ProduceExtractionWarning('installation time missing for extension ID {0:s}'.format(extension_id))\n            continue\n        try:\n            install_time = int(install_time, 10)\n        except ValueError:\n            parser_mediator.ProduceExtractionWarning('unable to convert installation time for extension ID {0:s}'.format(extension_id))\n            continue\n        manifest = extension.get('manifest', None)\n        if (not manifest):\n            parser_mediator.ProduceExtractionWarning('manifest missing for extension ID {0:s}'.format(extension_id))\n            continue\n        event_data = ChromeExtensionInstallationEventData()\n        event_data.extension_id = extension_id\n        event_data.extension_name = manifest.get('name', None)\n        event_data.path = extension.get('path', None)\n        date_time = dfdatetime_webkit_time.WebKitTime(timestamp=install_time)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ADDED)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extract extension installation events.\n\nArgs:\nsettings_dict (dict[str: object]): settings data from a Preferences file.\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.", "source": "codesearchnet"}
{"code": "def node_name(self):\n    return self._node_name", "docstring": "Name of the node from which the tensor value was dumped.\n\nReturns:\n(`str`) name of the node watched by the debug op.", "source": "github-repos"}
{"code": "def modify_site(name, sourcepath=None, apppool=None, preload=None):\n    site_path = 'IIS:\\\\Sites\\\\{0}'.format(name)\n    current_sites = list_sites()\n    if (name not in current_sites):\n        log.debug(\"Site '%s' not defined.\", name)\n        return False\n    ps_cmd = list()\n    if sourcepath:\n        ps_cmd.extend(['Set-ItemProperty', '-Path', \"'{0}'\".format(site_path), '-Name', 'PhysicalPath', '-Value', \"'{0}'\".format(sourcepath)])\n    if apppool:\n        if (apppool in list_apppools()):\n            log.debug('Utilizing pre-existing application pool: %s', apppool)\n        else:\n            log.debug('Application pool will be created: %s', apppool)\n            create_apppool(apppool)\n        if ps_cmd:\n            ps_cmd.append(';')\n        ps_cmd.extend(['Set-ItemProperty', '-Path', \"'{0}'\".format(site_path), '-Name', 'ApplicationPool', '-Value', \"'{0}'\".format(apppool)])\n    if preload:\n        ps_cmd.extend(['Set-ItemProperty', '-Path', \"'{0}'\".format(site_path), '-Name', 'applicationDefaults.preloadEnabled', '-Value', '{0};'.format(preload)])\n    cmd_ret = _srvmgr(ps_cmd)\n    if (cmd_ret['retcode'] != 0):\n        msg = 'Unable to modify site: {0}\\nError: {1}'.format(name, cmd_ret['stderr'])\n        raise CommandExecutionError(msg)\n    log.debug('Site modified successfully: %s', name)\n    return True", "docstring": "Modify a basic website in IIS.\n\n.. versionadded:: 2017.7.0\n\nArgs:\nname (str): The IIS site name.\nsourcepath (str): The physical path of the IIS site.\napppool (str): The name of the IIS application pool.\npreload (bool): Whether preloading should be enabled\n\nReturns:\nbool: True if successful, otherwise False.\n\n.. note::\n\nIf an application pool is specified, and that application pool does not\nalready exist, it will be created.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.modify_site name='My Test Site' sourcepath='c:\\\\new_path' apppool='NewTestPool' preload=True", "source": "codesearchnet"}
{"code": "def aggregate_global_cache(self, global_tt_summary_cache):\n    agg_fn_map = self._parameters.get_signature_to_agg_fn_map()\n    signature_idx_map = self._signature_types()\n    aggregation_result = []\n    for signature, idx in sorted(signature_idx_map.items(), key=operator.itemgetter(1)):\n        if signature not in agg_fn_map:\n            raise RuntimeError('No aggregation function is defined for signature %s.' % signature)\n        signature_tensor = global_tt_summary_cache[:, :, idx]\n        agg_fn = agg_fn_map[signature]\n        agg_tensor = agg_fn(signature_tensor, axis=0)\n        aggregation_result.append(agg_tensor)\n    merged_signatures = array_ops_stack.stack(aggregation_result)\n    transposed_signatures = array_ops.transpose(merged_signatures)\n    return array_ops.expand_dims(transposed_signatures, axis=0)", "docstring": "Merges the given caches on tpu.\n\nArgs:\nglobal_tt_summary_cache: The global tensor tracer summary cache tensor\nwith shape (num_cores, num_traced_tensors, num_traced_signatures). First\ndimension corresponds to core_id, where global_tpu_cache_tensor[i]\ncorrespond to the local cache from core-i.\nReturns:\nAn aggregated tf.Tensor.\nRaises:\nRuntimeError: if there is no aggregate function defined for a signature.", "source": "github-repos"}
{"code": "def _log_every_n_to_logger(n, logger, level, message, *args):\n    logger = (logger or logging.getLogger())\n\n    def _gen():\n        while True:\n            for _ in range(n):\n                (yield False)\n            logger.log(level, message, *args)\n            (yield True)\n    gen = _gen()\n    return (lambda : six.next(gen))", "docstring": "Logs the given message every n calls to a logger.\n\nArgs:\nn: Number of calls before logging.\nlogger: The logger to which to log.\nlevel: The logging level (e.g. logging.INFO).\nmessage: A message to log\n*args: Any format args for the message.\nReturns:\nA method that logs and returns True every n calls.", "source": "codesearchnet"}
{"code": "def _qInstallMessageHandler(handler):\n\n    def messageOutputHandler(*args):\n        if (len(args) == 3):\n            (msgType, logContext, msg) = args\n        elif (len(args) == 2):\n            (msgType, msg) = args\n            logContext = None\n        else:\n            raise TypeError('handler expected 2 or 3 arguments, got {0}'.format(len(args)))\n        if isinstance(msg, bytes):\n            msg = msg.decode()\n        handler(msgType, logContext, msg)\n    passObject = (messageOutputHandler if handler else handler)\n    if (Qt.IsPySide or Qt.IsPyQt4):\n        return Qt._QtCore.qInstallMsgHandler(passObject)\n    elif (Qt.IsPySide2 or Qt.IsPyQt5):\n        return Qt._QtCore.qInstallMessageHandler(passObject)", "docstring": "Install a message handler that works in all bindings\n\nArgs:\nhandler: A function that takes 3 arguments, or None", "source": "codesearchnet"}
{"code": "def log_get(recipe_id=[], timezone='America/Los_Angeles', days=1):\n    body = {'resourceNames': ['projects/%s' % UI_PROJECT], 'filter': '       logName=\"projects/%s/logs/StarThinker\"        AND labels.version=\"%s\"        AND labels.layer=\"JOB\"     ' % (UI_PROJECT, LOG_VERSION), 'orderBy': 'timestamp desc', 'pageSize': 1000}\n    if recipe_id:\n        if isinstance(recipe_id, str):\n            recipe_id = [recipe_id]\n        body['filter'] += ' AND ( %s )' % ' OR '.join(('operation.id=\"%s\"' % r for r in recipe_id))\n    for entry in API_StackDriver(Configuration(service=UI_SERVICE, project=UI_PROJECT), 'service', iterate=True).entries().list(body=body).execute():\n        yield entry", "docstring": "Returns last actionable job run for a specific recipe or all recipes.\n\nPulls status entries from StackDriver in reverse order.  A single recipe may\nbe run multiple times for multiple tasks at different hours, do not\nassume a JOB_END means a recipe is complete.  Only way to ensure a recipe is\ncomplete\nis to compare all tasks run against all tasks in recipe ( not done by log\ncode).\n\nArgs: - recipe_id ( string or list ) - Optional, if provided returns a single\nrecord for a single job. - timezone ( string ) - The local timezone to cast\nall record times into.\n\nReturns:\n- ( iterator ) - Each log entry.", "source": "github-repos"}
{"code": "def __init__(self, parser, *, pytype_single_args=None, overrides=None):\n    self._parser = parser\n    self._overrides = overrides or []\n    self.pytype_single_args = pytype_single_args or {}", "docstring": "Initialize a parser.\n\nArgs:\nparser: An argparse.ArgumentParser or compatible object\npytype_single_args: Args passed to pytype\noverrides: Pytype args that the tool overrides (will be put into the tool\nargs, with the corresponding pytype opts getting their default values)", "source": "github-repos"}
{"code": "def optimizer(name):\n  \n  warn_msg = (\"Please update `registry.optimizer` callsite \"\n              \"(likely due to a `HParams.optimizer` value)\")\n  if name == \"SGD\":\n    name = \"sgd\"\n    tf.logging.warning(\"'SGD' optimizer now keyed by 'sgd'. %s\" % warn_msg)\n  elif name == \"RMSProp\":\n    name = \"rms_prop\"\n    tf.logging.warning(\n        \"'RMSProp' optimizer now keyed by 'rms_prop'. %s\" % warn_msg)\n  else:\n    snake_name = misc_utils.camelcase_to_snakecase(name)\n    if name != snake_name:\n      tf.logging.warning(\n          \"optimizer names now keyed by snake_case names. %s\" % warn_msg)\n      name = snake_name\n  return Registries.optimizers[name]", "docstring": "Get pre-registered optimizer keyed by name.\n\n`name` should be snake case, though SGD -> sgd, RMSProp -> rms_prop and\nUpperCamelCase -> snake_case conversions included for legacy support.\n\nArgs:\nname: name of optimizer used in registration. This should be a snake case\nidentifier, though others supported for legacy reasons.\n\nReturns:\noptimizer", "source": "juraj-google-style"}
{"code": "def _CountClientStatisticByLabel(self, day_buckets, extract_statistic_fn):\n    \n    counts = collections.defaultdict(int)\n    now = rdfvalue.RDFDatetime.Now()\n    for info in self.IterateAllClientsFullInfo(batch_size=db.MAX_COUNT):\n      if not info.metadata.ping:\n        continue\n      statistic_value = extract_statistic_fn(info)\n      for client_label in info.GetLabelsNames(owner=\"GRR\"):\n        for day_bucket in day_buckets:\n          time_boundary = now - rdfvalue.Duration.FromDays(day_bucket)\n          if info.metadata.ping > time_boundary:\n            \n            \n            counts[(statistic_value, client_label, day_bucket)] += 1\n    return dict(counts)", "docstring": "Returns client-activity metrics for a particular statistic.\n\nArgs:\nday_buckets: A set of n-day-active buckets.\nextract_statistic_fn: A function that extracts the statistic's value from\na ClientFullInfo object.", "source": "juraj-google-style"}
{"code": "def is_subgroup(self, supergroup):\n    warnings.warn('This is not fully functional. Only trivial subsets are tested right now. ')\n    return set(self.symmetry_ops).issubset(supergroup.symmetry_ops)", "docstring": "True if this group is a subgroup of the supplied group.\n\nArgs:\nsupergroup (SymmetryGroup): Supergroup to test.\n\nReturns:\nTrue if this group is a subgroup of the supplied group.", "source": "codesearchnet"}
{"code": "def FromTimestampToLdap(self, ts):\n    if self.conf.get('ad'):\n        t = time.strftime('%Y%m%d%H%M%S.0Z', time.gmtime(ts))\n    else:\n        t = time.strftime('%Y%m%d%H%M%SZ', time.gmtime(ts))\n    return t", "docstring": "Transforms nss_cache internal timestamp into a LDAP timestamp.\n\nArgs:\nts: number of seconds since epoch\n\nReturns:\nLDAP format timestamp string.", "source": "github-repos"}
{"code": "def insert(self, index, value):\n    if (value in self):\n        raise ValueError\n    index = self._fix_neg_index(index)\n    self._dict[value] = index\n    for elem in self._list[index:]:\n        self._dict[elem] += 1\n    self._list.insert(index, value)", "docstring": "Insert value at index.\n\nArgs:\nindex (int): Index to insert value at\nvalue: Value to insert\nRaises:\nValueError: If value already in self\nIndexError: If start or end are out of range", "source": "codesearchnet"}
{"code": "def check_coordinates(chromosome, pos, coordinates):\n    \n    chrom_match = CHR_PATTERN.match(chromosome)\n    chrom = chrom_match.group(2)\n\n    if chrom != coordinates['chrom']:\n        return False\n    \n    if (pos >= coordinates['start'] and pos <= coordinates['end']):\n        return True\n\n    return False", "docstring": "Check if the variant is in the interval given by the coordinates\n\nArgs:\nchromosome(str): Variant chromosome\npos(int): Variant position\ncoordinates(dict): Dictionary with the region of interest", "source": "juraj-google-style"}
{"code": "def get_ip_address(domain):\n    \n    if \":\n        domain = \"http:\n\n    hostname = urlparse(domain).netloc\n\n    if not hostname:\n        raise ValueError(\"Can't parse hostname!\")\n\n    return socket.gethostbyname(hostname)", "docstring": "Get IP address for given `domain`. Try to do smart parsing.\n\nArgs:\ndomain (str): Domain or URL.\n\nReturns:\nstr: IP address.\n\nRaises:\nValueError: If can't parse the domain.", "source": "juraj-google-style"}
{"code": "def update(self, forecasts, observations):\n        \n        if len(observations.shape) == 1:\n            obs_cdfs = np.zeros((observations.size, self.thresholds.size))\n            for o, observation in enumerate(observations):\n                obs_cdfs[o, self.thresholds >= observation] = 1\n        else:\n            obs_cdfs = observations\n        self.errors[\"F_2\"] += np.sum(forecasts ** 2, axis=0)\n        self.errors[\"F_O\"] += np.sum(forecasts * obs_cdfs, axis=0)\n        self.errors[\"O_2\"] += np.sum(obs_cdfs ** 2, axis=0)\n        self.errors[\"O\"] += np.sum(obs_cdfs, axis=0)\n        self.num_forecasts += forecasts.shape[0]", "docstring": "Update the statistics with forecasts and observations.\n\nArgs:\nforecasts: The discrete Cumulative Distribution Functions of\nobservations:", "source": "juraj-google-style"}
{"code": "def get(self, personId):\n    check_type(personId, basestring, may_be_none=False)\n    json_data = self._session.get(((API_ENDPOINT + '/') + personId))\n    return self._object_factory(OBJECT_TYPE, json_data)", "docstring": "Get a person's details, by ID.\n\nArgs:\npersonId(basestring): The ID of the person to be retrieved.\n\nReturns:\nPerson: A Person object with the details of the requested person.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"}
{"code": "def reformat_python_docstrings(top_dirs: List[str], correct_copyright_lines: List[str], show_only: bool=True, rewrite: bool=False, process_only_filenum: int=None) -> None:\n    filenum = 0\n    for top_dir in top_dirs:\n        for (dirpath, dirnames, filenames) in walk(top_dir):\n            for filename in filenames:\n                fullname = join(dirpath, filename)\n                extension = splitext(filename)[1]\n                if (extension != PYTHON_EXTENSION):\n                    continue\n                filenum += 1\n                if (process_only_filenum and (filenum != process_only_filenum)):\n                    continue\n                log.info('Processing file {}: {}', filenum, fullname)\n                proc = PythonProcessor(full_path=fullname, top_dir=top_dir, correct_copyright_lines=correct_copyright_lines)\n                if show_only:\n                    proc.show()\n                elif rewrite:\n                    proc.rewrite_file()", "docstring": "Walk a directory, finding Python files and rewriting them.\n\nArgs:\ntop_dirs: list of directories to descend into\ncorrect_copyright_lines:\nlist of lines (without newlines) representing the copyright\ndocstring block, including the transition lines of equals\nsymbols\nshow_only: show results (to stdout) only; don't rewrite\nrewrite: write the changes\nprocess_only_filenum: only process this file number (1-based index);\nfor debugging only", "source": "codesearchnet"}
{"code": "def chained(self, text=None, fore=None, back=None, style=None):\n        \n        self.data = ''.join((\n            self.data,\n            self.color(text=text, fore=fore, back=back, style=style),\n        ))\n        return self", "docstring": "Called by the various 'color' methods to colorize a single string.\nThe RESET_ALL code is appended to the string unless text is empty.\nRaises ValueError on invalid color names.\n\nArguments:\ntext  : String to colorize, or None for  BG/Style change.\nfore  : Name of fore color to use.\nback  : Name of back color to use.\nstyle : Name of style to use.", "source": "juraj-google-style"}
{"code": "def get_nltk_builder(languages):\n    \n    all_stemmers = []\n    all_stopwords_filters = []\n    all_word_characters = set()\n\n    for language in languages:\n        if language == \"en\":\n            \n            all_stemmers.append(lunr.stemmer.stemmer)\n            all_stopwords_filters.append(stop_word_filter)\n            all_word_characters.update({r\"\\w\"})\n        else:\n            stopwords, word_characters = _get_stopwords_and_word_characters(language)\n            all_stemmers.append(\n                Pipeline.registered_functions[\"stemmer-{}\".format(language)]\n            )\n            all_stopwords_filters.append(\n                generate_stop_word_filter(stopwords, language=language)\n            )\n            all_word_characters.update(word_characters)\n\n    builder = Builder()\n    multi_trimmer = generate_trimmer(\"\".join(sorted(all_word_characters)))\n    Pipeline.register_function(\n        multi_trimmer, \"lunr-multi-trimmer-{}\".format(\"-\".join(languages))\n    )\n    builder.pipeline.reset()\n\n    for fn in chain([multi_trimmer], all_stopwords_filters, all_stemmers):\n        builder.pipeline.add(fn)\n    for fn in all_stemmers:\n        builder.search_pipeline.add(fn)\n\n    return builder", "docstring": "Returns a builder with stemmers for all languages added to it.\n\nArgs:\nlanguages (list): A list of supported languages.", "source": "juraj-google-style"}
{"code": "def getModPath(self, *paths):\n        \n        dirn = self.getModDir()\n        return s_common.genpath(dirn, *paths)", "docstring": "Construct a path relative to this module's working directory.\n\nArgs:\n*paths: A list of path strings\n\nNotes:\nThis creates the module specific directory if it does not exist.\n\nReturns:\n(str): The full path (or None if no cortex dir is configured).", "source": "juraj-google-style"}
{"code": "def _randomize_direction(base_heading, sigma) -> int:\n    val = MissionWeather._gauss(base_heading, sigma)\n    val = MissionWeather._normalize_direction(val)\n    return val", "docstring": "Creates a variation in direction\n\nArgs:\nbase_heading: base direction\nsigma: sigma value for gaussian variation\n\nReturns: random direction", "source": "codesearchnet"}
{"code": "def get_slot(self, *args, **kwargs):\n    return self._opt.get_slot(*args, **kwargs)", "docstring": "Return a slot named \"name\" created for \"var\" by the Optimizer.\n\nThis simply wraps the get_slot() from the actual optimizer.\n\nArgs:\n*args: Arguments for get_slot().\n**kwargs: Keyword arguments for get_slot().\n\nReturns:\nThe `Variable` for the slot if it was created, `None` otherwise.", "source": "github-repos"}
{"code": "def verify(self, obj):\n        \n\n        if obj != self._literal:\n            raise ValidationError(\"Object is not equal to literal\",\n                                  reason='%s is not equal to %s' % (str(obj), str(self._literal)), object=obj)\n\n        return obj", "docstring": "Verify that the object conforms to this verifier's schema\n\nArgs:\nobj (object): A python object to verify\n\nRaises:\nValidationError: If there is a problem verifying the dictionary, a\nValidationError is thrown with at least the reason key set indicating\nthe reason for the lack of validation.", "source": "juraj-google-style"}
{"code": "def model_from_json(json_string, custom_objects=None):\n    from keras.src.saving import serialization_lib\n    model_config = json.loads(json_string)\n    return serialization_lib.deserialize_keras_object(model_config, custom_objects=custom_objects)", "docstring": "Parses a JSON model configuration string and returns a model instance.\n\nExample:\n\n>>> model = keras.Sequential([\n...     keras.layers.Dense(5, input_shape=(3,)),\n...     keras.layers.Softmax()])\n>>> config = model.to_json()\n>>> loaded_model = keras.models.model_from_json(config)\n\nArgs:\njson_string: JSON string encoding a model configuration.\ncustom_objects: Optional dictionary mapping names\n(strings) to custom classes or functions to be\nconsidered during deserialization.\n\nReturns:\nA Keras model instance (uncompiled).", "source": "github-repos"}
{"code": "def dump_orm_object_as_insert_sql(engine: Engine, obj: object, fileobj: TextIO) -> None:\n    insp = inspect(obj)\n    meta = MetaData(bind=engine)\n    table_name = insp.mapper.mapped_table.name\n    table = Table(table_name, meta, autoload=True)\n    query = select(table.columns)\n    for orm_pkcol in insp.mapper.primary_key:\n        core_pkcol = table.columns.get(orm_pkcol.name)\n        pkval = getattr(obj, orm_pkcol.name)\n        query = query.where((core_pkcol == pkval))\n    cursor = engine.execute(query)\n    row = cursor.fetchone()\n    row_dict = dict(row)\n    statement = table.insert(values=row_dict)\n    insert_str = get_literal_query(statement, bind=engine)\n    writeline_nl(fileobj, insert_str)", "docstring": "Takes a SQLAlchemy ORM object, and writes ``INSERT`` SQL to replicate it\nto the output file-like object.\n\nArgs:\nengine: SQLAlchemy :class:`Engine`\nobj: SQLAlchemy ORM object to write\nfileobj: file-like object to write to", "source": "codesearchnet"}
{"code": "def from_voigt(cls, voigt_input):\n    voigt_input = np.array(voigt_input)\n    rank = (sum(voigt_input.shape) \n    t = cls(np.zeros(([3] * rank)))\n    if (voigt_input.shape != t._vscale.shape):\n        raise ValueError('Invalid shape for voigt matrix')\n    voigt_input = (voigt_input / t._vscale)\n    this_voigt_map = t.get_voigt_dict(rank)\n    for ind in this_voigt_map:\n        t[ind] = voigt_input[this_voigt_map[ind]]\n    return cls(t)", "docstring": "Constructor based on the voigt notation vector or matrix.\n\nArgs:\nvoigt_input (array-like): voigt input for a given tensor", "source": "codesearchnet"}
{"code": "def _client_send(self, msg):\n        \n        try:\n            self._client.write(msg.encode(\"utf8\") + b'\\n')\n            self._client.flush()\n            self.log.debug('Snippet sent %s.', msg)\n        except socket.error as e:\n            raise Error(\n                self._ad,\n                'Encountered socket error \"%s\" sending RPC message \"%s\"' %\n                (e, msg))", "docstring": "Sends an Rpc message through the connection.\n\nArgs:\nmsg: string, the message to send.\n\nRaises:\nError: a socket error occurred during the send.", "source": "juraj-google-style"}
{"code": "def FindCoinsByVins(self, vins):\n    ret = []\n    for coin in self.GetCoins():\n        coinref = coin.Reference\n        for vin in vins:\n            if ((coinref.PrevIndex == vin.PrevIndex) and (coinref.PrevHash == vin.PrevHash)):\n                ret.append(coin)\n    return ret", "docstring": "Looks through the current collection of coins in a wallet\nand chooses coins that match the specified CoinReference objects.\n\nArgs:\nvins: A list of ``neo.Core.CoinReference`` objects.\n\nReturns:\nlist: A list of ``neo.Wallet.Coin`` objects.", "source": "codesearchnet"}
{"code": "def is_match(self, subject: Union[(Expression, FlatTerm)]) -> bool:\n    try:\n        next(self.match(subject))\n    except StopIteration:\n        return False\n    return True", "docstring": "Check if the given subject matches any pattern in the net.\n\nArgs:\nsubject:\nThe subject that is matched. Must be constant.\n\nReturns:\nTrue, if any pattern matches the subject.", "source": "codesearchnet"}
{"code": "def get_cosine_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float=0.5, last_epoch: int=-1):\n    lr_lambda = partial(_get_cosine_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles)\n    return LambdaLR(optimizer, lr_lambda, last_epoch)", "docstring": "Create a schedule with a learning rate that decreases following the values of the cosine function between the\ninitial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the\ninitial lr set in the optimizer.\n\nArgs:\noptimizer ([`~torch.optim.Optimizer`]):\nThe optimizer for which to schedule the learning rate.\nnum_warmup_steps (`int`):\nThe number of steps for the warmup phase.\nnum_training_steps (`int`):\nThe total number of training steps.\nnum_cycles (`float`, *optional*, defaults to 0.5):\nThe number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0\nfollowing a half-cosine).\nlast_epoch (`int`, *optional*, defaults to -1):\nThe index of the last epoch when resuming training.\n\nReturn:\n`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.", "source": "github-repos"}
{"code": "def json_compat_obj_encode(data_type, obj, caller_permissions=None, alias_validators=None,\n                           old_style=False, for_msgpack=False, should_redact=False):\n    \n    serializer = StoneToPythonPrimitiveSerializer(\n        caller_permissions, alias_validators, for_msgpack, old_style, should_redact)\n    return serializer.encode(data_type, obj)", "docstring": "Encodes an object into a JSON-compatible dict based on its type.\n\nArgs:\ndata_type (Validator): Validator for obj.\nobj (object): Object to be serialized.\ncaller_permissions (list): The list of raw-string caller permissions\nwith which to serialize.\n\nReturns:\nAn object that when passed to json.dumps() will produce a string\ngiving the JSON-encoded object.\n\nSee json_encode() for additional information about validation.", "source": "juraj-google-style"}
{"code": "def Run(self, request, global_params=None):\n    config = self.GetMethodConfig('Run')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Runs a `BuildTrigger` at a particular source revision.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsTriggersRunRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def call(self, batch_size: Optional[int], input_points: Optional[Tuple[tf.Tensor, tf.Tensor]], input_labels: tf.Tensor | None, input_boxes: tf.Tensor | None, input_masks: tf.Tensor | None) -> Tuple[tf.Tensor, tf.Tensor]:\n    sparse_embeddings = None\n    if input_points is not None:\n        batch_size, point_batch_size = shape_list(input_points)[:2]\n        if input_labels is None:\n            raise ValueError('If points are provided, labels must also be provided.')\n        point_embeddings = self._embed_points(input_points, input_labels, pad=input_boxes is None)\n        sparse_embeddings = tf.zeros((batch_size, point_batch_size, 0, self.hidden_size), dtype=point_embeddings.dtype)\n        sparse_embeddings = tf.concat([sparse_embeddings, point_embeddings], axis=2)\n    if input_boxes is not None:\n        batch_size = shape_list(input_boxes)[0]\n        box_embeddings = self._embed_boxes(input_boxes)\n        if sparse_embeddings is None:\n            sparse_embeddings = box_embeddings\n        else:\n            sparse_embeddings = tf.concat([sparse_embeddings, box_embeddings], axis=2)\n    if input_masks is not None:\n        dense_embeddings = self.mask_embed(input_masks)\n    else:\n        dense_embeddings = self.no_mask_embed[0]\n        dense_embeddings = tf.reshape(dense_embeddings, (1, -1, 1, 1))\n        dense_embeddings = tf.tile(dense_embeddings, (batch_size, 1, self.image_embedding_size[0], self.image_embedding_size[1]))\n    if sparse_embeddings is None:\n        sparse_embeddings = tf.zeros((batch_size, 0, 1, self.hidden_size), dtype=dense_embeddings.dtype)\n    return (sparse_embeddings, dense_embeddings)", "docstring": "Embeds different types of prompts, returning both sparse and dense embeddings.\n\nArgs:\npoints (`tf.Tensor`, *optional*):\npoint coordinates and labels to embed.\nboxes (`tf.Tensor`, *optional*):\nboxes to embed\nmasks (`tf.Tensor`, *optional*):\nmasks to embed", "source": "github-repos"}
{"code": "def get(self, container_id):\n    resp = self.client.api.inspect_container(container_id)\n    return self.prepare_model(resp)", "docstring": "Get a container by name or ID.\n\nArgs:\ncontainer_id (str): Container name or ID.\n\nReturns:\nA :py:class:`Container` object.\n\nRaises:\n:py:class:`docker.errors.NotFound`\nIf the container does not exist.\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def padFrameRange(frange, zfill):\n        \n        def _do_pad(match):\n            \n            result = list(match.groups())\n            result[1] = pad(result[1], zfill)\n            if result[4]:\n                result[4] = pad(result[4], zfill)\n            return ''.join((i for i in result if i))\n        return PAD_RE.sub(_do_pad, frange)", "docstring": "Return the zero-padded version of the frame range string.\n\nArgs:\nfrange (str): a frame range to test\nzfill (int):\n\nReturns:\nstr:", "source": "juraj-google-style"}
{"code": "def _FormatForCommand(token):\n    if not isinstance(token, str):\n        token = str(token)\n    if token.startswith('_'):\n        return token\n    return token.replace('_', '-')", "docstring": "Replaces underscores with hyphens, unless the token starts with a token.\n\nThis is because we typically prefer hyphens to underscores at the command\nline, but we reserve hyphens at the start of a token for flags. This becomes\nrelevant when --verbose is activated, so that things like __str__ don't get\ntransformed into --str--, which would get confused for a flag.\n\nArgs:\ntoken: The token to transform.\nReturns:\nThe transformed token.", "source": "github-repos"}
{"code": "def request(self, send_terminator = False):\n        \n        try:\n            retA = self.requestA()\n            retB = self.requestB()\n            if retA and retB:\n                self.makeAB()\n                self.calculateFields()\n                self.updateObservers()\n                return True\n        except:\n            ekm_log(traceback.format_exc(sys.exc_info()))\n\n        return False", "docstring": "Combined A and B read for V4 meter.\n\nArgs:\nsend_terminator (bool): Send termination string at end of read.\n\nReturns:\nbool: True on completion.", "source": "juraj-google-style"}
{"code": "def file_modify(filename, settings):\n    for (k, v) in settings.items():\n        if (k == 'mode'):\n            os.chmod(filename, v)\n        if (k == 'owners'):\n            os.chown(filename, v)", "docstring": "Modifies file access\n\nArgs:\nfilename (str): Filename.\nsettings (dict): Can be \"mode\" or \"owners\"", "source": "codesearchnet"}
{"code": "def Mint(self, wallet, mint_to_addr, attachment_args, invoke_attrs=None):\n    invoke_args = [self.ScriptHash.ToString(), 'mintTokens', []]\n    invoke_args = (invoke_args + attachment_args)\n    (tx, fee, results, num_ops, engine_success) = TestInvokeContract(wallet, invoke_args, None, True, from_addr=mint_to_addr, invoke_attrs=invoke_attrs)\n    return (tx, fee, results)", "docstring": "Call the \"mintTokens\" function of the smart contract.\n\nArgs:\nwallet (neo.Wallets.Wallet): a wallet instance.\nmint_to_addr (str): public address of the account to mint the tokens to.\nattachment_args: (list): a list of arguments used to attach neo and/or gas to an invoke, eg ['--attach-gas=10.0','--attach-neo=3']\ninvoke_attrs: (list): a list of TransactionAttributes to be attached to the mint transaction\nReturns:\ntuple:\nInvocationTransaction: the transaction.\nint: the transaction fee.\nlist: the neo VM evaluation stack results.", "source": "codesearchnet"}
{"code": "def raster_dilation(rasterfile):\n    if is_string(rasterfile):\n        origin_raster = RasterUtilClass.read_raster(str(rasterfile))\n    elif isinstance(rasterfile, Raster):\n        origin_raster = rasterfile.data\n    elif isinstance(rasterfile, numpy.ndarray):\n        origin_raster = rasterfile\n    else:\n        return 'Your rasterfile has a wrong type. Type must be string or numpy.array or class Raster in pygeoc.'\n    min_value_raster = origin_raster.min()\n    dilation_raster = numpy.zeros((origin_raster.shape[0], origin_raster.shape[1]))\n    add_row = numpy.full((1, origin_raster.shape[1]), min_value_raster)\n    temp_origin_raster = numpy.vstack((numpy.vstack((add_row, origin_raster)), add_row))\n    add_col = numpy.full(((origin_raster.shape[0] + 2), 1), min_value_raster)\n    expand_origin_raster = numpy.hstack((numpy.hstack((add_col, temp_origin_raster)), add_col))\n    for i in range(origin_raster.shape[0]):\n        for j in range(origin_raster.shape[1]):\n            max_pixel_value = min_value_raster\n            for k in range(3):\n                for l in range(3):\n                    if (expand_origin_raster[((i + k), (j + l))] >= max_pixel_value):\n                        max_pixel_value = expand_origin_raster[((i + k), (j + l))]\n                dilation_raster[(i, j)] = max_pixel_value\n    return dilation_raster", "docstring": "Dilate the raster image.\n\nFind the max pixel's value in 8-neighborhood. Then change the compute\npixel's value into the max pixel's value.\n\nArgs:\nrasterfile: input original raster image, type can be filename(string,\nlike \"test1.tif\"), rasterfile(class Raster) or numpy.ndarray.\n\nReturns:\ndilation_raster: raster image after dilation, type is numpy.ndarray.", "source": "codesearchnet"}
{"code": "def dom_processing(self, value):\n        \n        if value == self._defaults['domProcessing'] and 'domProcessing' in self._values:\n            del self._values['domProcessing']\n        else:\n            self._values['domProcessing'] = value", "docstring": "The dom_processing property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def __init__(self, app):\n        \n\n        self.app = app\n        self.user_manager = app.user_manager\n\n        \n        self.password_crypt_context = CryptContext(\n            schemes=self.user_manager.USER_PASSLIB_CRYPTCONTEXT_SCHEMES,\n            **self.user_manager.USER_PASSLIB_CRYPTCONTEXT_KEYWORDS)", "docstring": "Create a passlib CryptContext.\n\nArgs:\npassword_hash(str): The name of a valid passlib password hash.\nExamples: ``'bcrypt', 'pbkdf2_sha512', 'sha512_crypt' or 'argon2'``.\n\nExample:\n``password_manager = PasswordManager('bcrypt')``", "source": "juraj-google-style"}
{"code": "def picture_view(request, user_id, year=None):\n    try:\n        user = User.objects.get(id=user_id)\n    except User.DoesNotExist:\n        raise Http404\n    default_image_path = os.path.join(settings.PROJECT_ROOT, 'static/img/default_profile_pic.png')\n    if (user is None):\n        raise Http404\n    else:\n        if (year is None):\n            preferred = user.preferred_photo\n            if (preferred is None):\n                data = user.default_photo\n                if (data is None):\n                    image_buffer = io.open(default_image_path, mode='rb')\n                else:\n                    image_buffer = io.BytesIO(data)\n            else:\n                data = preferred.binary\n                if data:\n                    image_buffer = io.BytesIO(data)\n                else:\n                    image_buffer = io.open(default_image_path, mode='rb')\n        else:\n            grade_number = Grade.number_from_name(year)\n            if user.photos.filter(grade_number=grade_number).exists():\n                data = user.photos.filter(grade_number=grade_number).first().binary\n            else:\n                data = None\n            if data:\n                image_buffer = io.BytesIO(data)\n            else:\n                image_buffer = io.open(default_image_path, mode='rb')\n        response = HttpResponse(content_type='image/jpeg')\n        response['Content-Disposition'] = 'filename={}_{}.jpg'.format(user_id, (year or preferred))\n        try:\n            img = image_buffer.read()\n        except UnicodeDecodeError:\n            img = io.open(default_image_path, mode='rb').read()\n        image_buffer.close()\n        response.write(img)\n        return response", "docstring": "Displays a view of a user's picture.\n\nArgs:\nuser_id\nThe ID of the user whose picture is being fetched.\nyear\nThe user's picture from this year is fetched. If not\nspecified, use the preferred picture.", "source": "codesearchnet"}
{"code": "def call_later(self, delay, callback):\n        \n        if hasattr(self._connection.ioloop, \"call_later\"):\n            self._connection.ioloop.call_later(delay, callback)\n        else:\n            self._connection.ioloop.add_timeout(delay, callback)", "docstring": "Schedule a one-shot timeout given delay seconds.\n\nThis method is only useful for compatibility with older versions of pika.\n\nArgs:\ndelay (float): Non-negative number of seconds from now until\nexpiration\ncallback (method): The callback method, having the signature\n`callback()`", "source": "juraj-google-style"}
{"code": "def collect(val, collections, default_collections):\n    if collections is None:\n        collections = default_collections\n    for key in collections:\n        ops.add_to_collection(key, val)", "docstring": "Adds keys to a collection.\n\nArgs:\nval: The value to add per each key.\ncollections: A collection of keys to add.\ndefault_collections: Used if collections is None.", "source": "github-repos"}
{"code": "def __init__(self, user_assist_guid):\n    \n    key_path = self._KEY_PATH_FORMAT.format(user_assist_guid)\n    super(UserAssistWindowsRegistryKeyPathFilter, self).__init__(key_path)", "docstring": "Initializes Windows Registry key filter.\n\nArgs:\nuser_assist_guid (str): UserAssist GUID.", "source": "juraj-google-style"}
{"code": "def _GetTableNames(self, database):\n    table_names = []\n    for esedb_table in database.tables:\n        table_names.append(esedb_table.name)\n    return table_names", "docstring": "Retrieves the table names in a database.\n\nArgs:\ndatabase (pyesedb.file): ESE database.\n\nReturns:\nlist[str]: table names.", "source": "codesearchnet"}
{"code": "def filter_lines(lines, filter_regex, groups=None):\n    \n    pattern = re.compile(filter_regex)\n    for line in lines:\n        match = pattern.search(line)\n        if match:\n            if groups is None:\n                yield line\n            elif len(groups) == 1:\n                yield match.group(groups[0])\n            else:\n                matched_groups = match.groupdict()\n                yield tuple(matched_groups.get(group) for group in groups)", "docstring": "Filters out the lines not matching the pattern.\n\nArgs:\nlines: list[string]: lines to filter.\npattern: string: regular expression to filter out lines.\n\nReturns: list[string]: the list of filtered lines.", "source": "juraj-google-style"}
{"code": "def main(params=None):\n    \n    \n    parser = getParser()\n\n    if params != None:\n        args = parser.parse_args(params)\n    else:\n        args = parser.parse_args()\n\n    print(general.title(banner.text))\n\n    sayingHello =  + general.LICENSE_URL + \"\\n\"\n    print(general.info(sayingHello))\n\n    \n    urlDict = {}\n    if args.url !=None:\n        urlDict[str(args.url)] = None\n    elif args.platforms != None:\n        for p in args.platforms:\n            with open(args.config, \"r\") as iF:\n                lines = iF.read().splitlines()\n                for l in lines:\n                    platform = l.split('\\t')[0]\n                    url = l.split('\\t')[1]\n                    notFound = l.split('\\t')[2]\n                    if p == platform:\n                        urlDict[url] = notFound\n\n    if not os.path.exists(args.output_folder):\n        os.makedirs(args.output_folder)\n\n    \n    enumerateURL(urlDict, args.output_folder, startIndex = args.start_index, maxErrors = args.max_errors)", "docstring": "Main loop for the enumeration\n\nArgs:\n-----\nparams: A list with the parameters as grabbed by the terminal. It is\nNone when this is called by an entry_point.", "source": "juraj-google-style"}
{"code": "def _generate_key_map(entity_list, key, entity_class):\n    \n\n    key_map = {}\n    for obj in entity_list:\n      key_map[obj[key]] = entity_class(**obj)\n\n    return key_map", "docstring": "Helper method to generate map from key to entity object for given list of dicts.\n\nArgs:\nentity_list: List consisting of dict.\nkey: Key in each dict which will be key in the map.\nentity_class: Class representing the entity.\n\nReturns:\nMap mapping key to entity object.", "source": "juraj-google-style"}
{"code": "def type(self, value):\n        \n        if value == self._defaults['type'] and 'type' in self._values:\n            del self._values['type']\n        else:\n            self._values['type'] = value", "docstring": "The type property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def register(self, name):\n\n    def decorator(func):\n        'Inner decorator, not used directly.\\n\\n            Args:\\n                func: obj. Parameterless function to register.\\n\\n            Returns:\\n                func: decorated function.\\n            '\n        self.logic[name] = func\n\n        @wraps(func)\n        def wrapper():\n            'Wrapper, not used directly.'\n            raise RuntimeError('working outside of request context')\n        return wrapper\n    return decorator", "docstring": "Decorator for registering a named function in the sesion logic.\n\nArgs:\nname: str. Function name.\nfunc: obj. Parameterless function to register.\n\nThe following named functions must be registered:\n'LaunchRequest' - logic for launch request.\n'SessionEndedRequest': logic for session ended request.\n\nIn addition, all intents must be registered by their names specified\nin the intent schema.\n\nThe aliased decorators: @launch, @intent(name), and @session_ended exist\nas a convenience for registering specific functions.", "source": "codesearchnet"}
{"code": "def stop_standing_subprocess(proc):\n    import psutil\n    pid = proc.pid\n    logging.debug('Stopping standing subprocess %d', pid)\n    process = psutil.Process(pid)\n    failed = []\n    try:\n        children = process.children(recursive=True)\n    except AttributeError:\n        children = process.get_children(recursive=True)\n    for child in children:\n        try:\n            child.kill()\n            child.wait(timeout=10)\n        except psutil.NoSuchProcess:\n            pass\n        except:\n            failed.append(child.pid)\n            logging.exception('Failed to kill standing subprocess %d', child.pid)\n    try:\n        process.kill()\n        process.wait(timeout=10)\n    except psutil.NoSuchProcess:\n        pass\n    except:\n        failed.append(pid)\n        logging.exception('Failed to kill standing subprocess %d', pid)\n    if failed:\n        raise Error(('Failed to kill standing subprocesses: %s' % failed))\n    if proc.stdout:\n        proc.stdout.close()\n    if proc.stderr:\n        proc.stderr.close()\n    proc.wait()\n    logging.debug('Stopped standing subprocess %d', pid)", "docstring": "Stops a subprocess started by start_standing_subprocess.\n\nBefore killing the process, we check if the process is running, if it has\nterminated, Error is raised.\n\nCatches and ignores the PermissionError which only happens on Macs.\n\nArgs:\nproc: Subprocess to terminate.\n\nRaises:\nError: if the subprocess could not be stopped.", "source": "codesearchnet"}
{"code": "def order_verification(self, institute, case, user, link, variant):\n        \n        LOG.info(\"Creating event for ordering validation for variant\" \\\n                    \" {0}\".format(variant['display_name']))\n\n        updated_variant = self.variant_collection.find_one_and_update(\n            {'_id': variant['_id']},\n            {'$set': {'sanger_ordered': True}},\n            return_document=pymongo.ReturnDocument.AFTER\n        )\n\n        self.create_event(\n            institute=institute,\n            case=case,\n            user=user,\n            link=link,\n            category='variant',\n            verb='sanger',\n            variant=variant,\n            subject=variant['display_name'],\n        )\n\n        LOG.info(\"Creating event for ordering sanger for case\" \\\n                    \" {0}\".format(case['display_name']))\n\n        self.create_event(\n            institute=institute,\n            case=case,\n            user=user,\n            link=link,\n            category='case',\n            verb='sanger',\n            variant=variant,\n            subject=variant['display_name'],\n        )\n        return updated_variant", "docstring": "Create an event for a variant verification for a variant\nand an event for a variant verification for a case\n\nArguments:\ninstitute (dict): A Institute object\ncase (dict): Case object\nuser (dict): A User object\nlink (str): The url to be used in the event\nvariant (dict): A variant object\n\nReturns:\nupdated_variant(dict)", "source": "juraj-google-style"}
{"code": "def scalar_spec(value_spec: pg.typing.ValueSpec) -> pg.typing.ValueSpec:\n    return pg.typing.Union([value_spec, pg.typing.Callable([pg.typing.Int()], returns=value_spec)])", "docstring": "Returns the value spec for a schedule scalar.\n\nArgs:\nvalue_spec: a value spec for the schedule-based scalar type.\n\nReturns:\nA value spec for either the value itself or a callable that produces such\nvalue based on a step (integer).", "source": "github-repos"}
{"code": "def fermi_fourier_trans_inverse_4(qubits):\n    (yield (fswap(qubits[1], qubits[2]),))\n    (yield fermi_fourier_trans_2(qubits[0], qubits[1]))\n    (yield fermi_fourier_trans_2(qubits[2], qubits[3]))\n    (yield fswap(qubits[1], qubits[2]))\n    (yield fermi_fourier_trans_2(qubits[0], qubits[1]))\n    (yield cirq.S(qubits[2]))\n    (yield fermi_fourier_trans_2(qubits[2], qubits[3]))\n    (yield fswap(qubits[1], qubits[2]))", "docstring": "The reverse fermionic Fourier transformation implemented on 4 qubits\non a line, which maps the momentum picture to the position picture.\nUsing the fast Fourier transformation algorithm, the circuit can be\ndecomposed into 2-mode fermionic Fourier transformation, the fermionic\nSWAP gates, and single-qubit rotations.\n\nArgs:\nqubits: list of four qubits", "source": "codesearchnet"}
{"code": "def load_tiff_multipage(tiff_filename, dtype='float32'):\n    \n    if not os.path.isfile(tiff_filename):\n        raise RuntimeError('could not find file \"%s\"' % tiff_filename)\n\n    \n    data = tiff.imread(tiff_filename)\n\n    im = []\n\n    while True:\n\n        Xi = numpy.array(data, dtype=dtype)\n        if Xi.ndim == 2:\n            Xi = Xi[numpy.newaxis, ...]  \n        im.append(Xi)\n\n        try:\n            data.seek(data.tell()+1)\n        except EOFError:\n            break  \n\n    im = numpy.concatenate(im, axis=0)  \n    im = numpy.rollaxis(im, 1)\n    im = numpy.rollaxis(im, 2)\n\n    return im", "docstring": "Load a multipage tiff into a single variable in x,y,z format.\n\nArguments:\ntiff_filename:     Filename of source data\ndtype:             data type to use for the returned tensor\n\nReturns:\nArray containing contents from input tiff file in xyz order", "source": "juraj-google-style"}
{"code": "def read(self, queue, name=None):\n    if isinstance(queue, tensor_lib.Tensor):\n        queue_ref = queue\n    else:\n        queue_ref = queue.queue_ref\n    if self._reader_ref.dtype == dtypes.resource:\n        return gen_io_ops.reader_read_v2(self._reader_ref, queue_ref, name=name)\n    else:\n        old_queue_op = gen_data_flow_ops.fake_queue(queue_ref)\n        return gen_io_ops.reader_read(self._reader_ref, old_queue_op, name=name)", "docstring": "Returns the next record (key, value) pair produced by a reader.\n\nWill dequeue a work unit from queue if necessary (e.g. when the\nReader needs to start reading from a new file since it has\nfinished with the previous file).\n\nArgs:\nqueue: A Queue or a mutable string Tensor representing a handle\nto a Queue, with string work items.\nname: A name for the operation (optional).\n\nReturns:\nA tuple of Tensors (key, value).\nkey: A string scalar Tensor.\nvalue: A string scalar Tensor.", "source": "github-repos"}
{"code": "def upload_file(filename: str, config: Config, full_table_id: str, action: Action=Action.APPEND, service_account_email: Optional[str]=None) -> None:\n    if service_account_email:\n        auth_config = AuthConfig(service_account_email=service_account_email)\n    else:\n        auth_config = None\n    table_metadata = build_table_metadata(full_table_id)\n    credentials = get_credentials(auth_config)\n    bq_legacy_client = get_bq_legacy_client(table_metadata.project_id, credentials)\n    table_exists = get_table(bq_legacy_client, table_metadata) is not None\n    if table_exists and action == Action.REPLACE:\n        bq_legacy_client.delete_table(table_metadata.full_table_id)\n        table_exists = False\n    if not table_exists:\n        schema = generate_bigquery_schema(config)\n        create_table(bq_legacy_client, table_metadata, schema)\n    row_count = get_csv_row_count(filename)\n    columns = [column.bq_name for column in config]\n    with open(filename, 'r') as f:\n        csv = DictReader(f, fieldnames=columns)\n        buffer_: List[Row] = []\n        buffer_size = row_count \n        buffer = Buffer[Row](buffer_, buffer_size, lambda rows: upload_rows(bq_legacy_client, table_metadata, rows))\n        for row in csv:\n            buffer.push(row)\n        buffer.flush(force=True)", "docstring": "Upload a data file conforming to the given config to BigQuery.\n\nArgs:\n* filename: Local path to csv file to be uploaded\n* config: Config key from configs.CONFIGS dictionary\n* full_table_id: BigQuery table id\n* action: APPEND to table or REPLACE table\n* service_account_email: Email address of service account", "source": "github-repos"}
{"code": "def register_tensor_conversion_function_internal(base_type, conversion_func, priority=100):\n    base_types = base_type if isinstance(base_type, tuple) else (base_type,)\n    if any((not isinstance(x, type) for x in base_types)):\n        raise TypeError(f'Argument `base_type` must be a type or a tuple of types. Obtained: {base_type}')\n    del base_types\n    if not callable(conversion_func):\n        raise TypeError(f'Argument `conversion_func` must be callable. Received {conversion_func}.')\n    with _tensor_conversion_func_lock:\n        _tensor_conversion_func_registry[priority].append((base_type, conversion_func))\n        _tensor_conversion_func_cache.clear()", "docstring": "Internal version of register_tensor_conversion_function.\n\nSee docstring of `register_tensor_conversion_function` for details.\n\nThe internal version of the function allows registering conversions\nfor types in the _UNCONVERTIBLE_TYPES tuple.\n\nArgs:\nbase_type: The base type or tuple of base types for all objects that\n`conversion_func` accepts.\nconversion_func: A function that converts instances of `base_type` to\n`Tensor`.\npriority: Optional integer that indicates the priority for applying this\nconversion function. Conversion functions with smaller priority values run\nearlier than conversion functions with larger priority values. Defaults to\n100.\n\nRaises:\nTypeError: If the arguments do not have the appropriate type.", "source": "github-repos"}
{"code": "def LockRetryWrapper(self,\n                       subject,\n                       retrywrap_timeout=1,\n                       retrywrap_max_timeout=10,\n                       blocking=True,\n                       lease_time=None):\n    \n    timeout = 0\n    while timeout < retrywrap_max_timeout:\n      try:\n        return self.DBSubjectLock(subject, lease_time=lease_time)\n      except DBSubjectLockError:\n        if not blocking:\n          raise\n        stats_collector_instance.Get().IncrementCounter(\"datastore_retries\")\n        time.sleep(retrywrap_timeout)\n        timeout += retrywrap_timeout\n\n    raise DBSubjectLockError(\"Retry number exceeded.\")", "docstring": "Retry a DBSubjectLock until it succeeds.\n\nArgs:\nsubject: The subject which the lock applies to.\nretrywrap_timeout: How long to wait before retrying the lock.\nretrywrap_max_timeout: The maximum time to wait for a retry until we\nraise.\nblocking: If False, raise on first lock failure.\nlease_time: lock lease time in seconds.\n\nReturns:\nThe DBSubjectLock object\n\nRaises:\nDBSubjectLockError: If the maximum retry count has been reached.", "source": "juraj-google-style"}
{"code": "def run_user_main(wrapped_test_module):\n    tree = ast.parse(tf_inspect.getsource(wrapped_test_module))\n    target = ast.dump(ast.parse('if __name__ == \"__main__\": pass').body[0].test)\n    for expr in reversed(tree.body):\n        if isinstance(expr, ast.If) and ast.dump(expr.test) == target:\n            break\n    else:\n        raise NotImplementedError(f'Could not find `if __name__ == \"main\":` block in {wrapped_test_module.__name__}.')\n    new_ast = ast.Module(body=expr.body, type_ignores=[])\n    exec(compile(new_ast, '<ast>', 'exec'), globals(), wrapped_test_module.__dict__)", "docstring": "Runs the \"if __name__ == '__main__'\" at the bottom of a module.\n\nTensorFlow practice is to have a main if at the bottom of the module which\nmight call an API compat function before calling test.main().\n\nSince this is a statement, not a function, we can't cleanly reference it, but\nwe can inspect it from the user module and run it in the context of that\nmodule so all imports and variables are available to it.\n\nArgs:\nwrapped_test_module: The user-provided test code to run.\n\nRaises:\nNotImplementedError: If main block was not found in module. This should not\nbe caught, as it is likely an error on the user's part -- absltest is all\ntoo happy to report a successful status (and zero tests executed) if a\nuser forgets to end a class with \"test.main()\".", "source": "github-repos"}
{"code": "def get_reference_points(spatial_shapes, valid_ratios, device):\n    reference_points_list = []\n    for level, (height, width) in enumerate(spatial_shapes):\n        ref_y, ref_x = meshgrid(torch.linspace(0.5, height - 0.5, height, dtype=torch.float32, device=device), torch.linspace(0.5, width - 0.5, width, dtype=torch.float32, device=device), indexing='ij')\n        ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height)\n        ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width)\n        ref = torch.stack((ref_x, ref_y), -1)\n        reference_points_list.append(ref)\n    reference_points = torch.cat(reference_points_list, 1)\n    reference_points = reference_points[:, :, None] * valid_ratios[:, None]\n    return reference_points", "docstring": "Get reference points for each feature map. Used in decoder.\n\nArgs:\nspatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):\nSpatial shapes of each feature map.\nvalid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):\nValid ratios of each feature map.\ndevice (`torch.device`):\nDevice on which to create the tensors.\nReturns:\n`torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)`", "source": "github-repos"}
{"code": "def get(self):\n    if self.call_queue:\n        return self.apply((lambda df: df)).data\n    else:\n        return self.data.copy()", "docstring": "Flushes the call_queue and returns the data.\n\nNote: Since this object is a simple wrapper, just return the data.\n\nReturns:\nThe object that was `put`.", "source": "codesearchnet"}
{"code": "def wulff_from_chempot(self, delu_dict=None, delu_default=0, symprec=1e-05, no_clean=False, no_doped=False):\n    latt = SpacegroupAnalyzer(self.ucell_entry.structure).get_conventional_standard_structure().lattice\n    miller_list = self.all_slab_entries.keys()\n    e_surf_list = []\n    for hkl in miller_list:\n        gamma = self.get_stable_entry_at_u(hkl, delu_dict=delu_dict, delu_default=delu_default, no_clean=no_clean, no_doped=no_doped)[1]\n        e_surf_list.append(gamma)\n    return WulffShape(latt, miller_list, e_surf_list, symprec=symprec)", "docstring": "Method to get the Wulff shape at a specific chemical potential.\n\nArgs:\ndelu_dict (Dict): Dictionary of the chemical potentials to be set as\nconstant. Note the key should be a sympy Symbol object of the\nformat: Symbol(\"delu_el\") where el is the name of the element.\ndelu_default (float): Default value for all unset chemical potentials\nsymprec (float): See WulffShape.\nno_doped (bool): Consider stability of clean slabs only.\nno_clean (bool): Consider stability of doped slabs only.\n\nReturns:\n(WulffShape): The WulffShape at u_ref and u_ads.", "source": "codesearchnet"}
{"code": "def destroy_walker(self, walker):\n        \n\n        if walker.buffered:\n            self._queue_walkers.remove(walker)\n        else:\n            self._virtual_walkers.remove(walker)", "docstring": "Destroy a previously created stream walker.\n\nArgs:\nwalker (StreamWalker): The walker to remove from internal updating\nlists.", "source": "juraj-google-style"}
{"code": "def get_service_credentials(pipeline_options):\n    return _Credentials.get_service_credentials(pipeline_options)", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\nGet credentials to access Google services.\nArgs:\npipeline_options: Pipeline options, used in creating credentials\nlike impersonated credentials.\n\nReturns:\nA ``_ApitoolsCredentialsAdapter`` object or None if credentials\nnot found. Returned object is thread-safe.", "source": "github-repos"}
{"code": "def __init__(self, devpath):\n        \n        self._fd = None\n        self._devpath = None\n        self._open(devpath)", "docstring": "Instantiate an I2C object and open the i2c-dev device at the\nspecified path.\n\nArgs:\ndevpath (str): i2c-dev device path.\n\nReturns:\nI2C: I2C object.\n\nRaises:\nI2CError: if an I/O or OS error occurs.", "source": "juraj-google-style"}
{"code": "def is_valid_package_name(name, raise_error=False):\n    is_valid = PACKAGE_NAME_REGEX.match(name)\n    if (raise_error and (not is_valid)):\n        raise PackageRequestError(('Not a valid package name: %r' % name))\n    return is_valid", "docstring": "Test the validity of a package name string.\n\nArgs:\nname (str): Name to test.\nraise_error (bool): If True, raise an exception on failure\n\nReturns:\nbool.", "source": "codesearchnet"}
{"code": "def process_event(event):\n    \n    if event.type == EventType.ON_CONVERSATION_TURN_STARTED:\n        print()\n\n    print(event)\n\n    if (event.type == EventType.ON_CONVERSATION_TURN_FINISHED and\n            event.args and not event.args['with_follow_on_turn']):\n        print()\n    if event.type == EventType.ON_DEVICE_ACTION:\n        for command, params in event.actions:\n            print('Do command', command, 'with params', str(params))", "docstring": "Pretty prints events.\n\nPrints all events that occur with two spaces between each new\nconversation and a single space between turns of a conversation.\n\nArgs:\nevent(event.Event): The current event to process.", "source": "juraj-google-style"}
{"code": "def retrieve_template(self):\n    links = self.retrieve_instance_links()\n    self.log.debug('Links is \\n%s', pformat(links))\n    self.pipeline_config['instance_links'].update(links)\n    jsondata = get_template(template_file='infrastructure/app_data.json.j2', appinfo=self.appinfo, pipeline_config=self.pipeline_config, formats=self.generated, run_as_user=DEFAULT_RUN_AS_USER)\n    self.log.debug('jsondata is %s', pformat(jsondata))\n    return jsondata", "docstring": "Sets the instance links with pipeline_configs and then renders template files\n\nReturns:\njsondata: A json objects containing templates", "source": "codesearchnet"}
{"code": "def value_report(self, address, zipcode, report_type=\"full\", format_type=\"json\"):\n        \n        query_params = {\n            \"report_type\": report_type,\n            \"format\": format_type,\n            \"address\": address,\n            \"zipcode\": zipcode\n        }\n\n        return self._api_client.fetch_synchronous(\"property/value_report\", query_params)", "docstring": "Call the value_report component\n\nValue Report only supports a single address.\n\nArgs:\n- address\n- zipcode\n\nKwargs:\n- report_type - \"full\" or \"summary\". Default is \"full\".\n- format_type - \"json\", \"pdf\", \"xlsx\" or \"all\". Default is \"json\".", "source": "juraj-google-style"}
{"code": "def get_enterprise_customer_or_404(enterprise_uuid):\n    EnterpriseCustomer = apps.get_model('enterprise', 'EnterpriseCustomer')\n    try:\n        enterprise_uuid = UUID(enterprise_uuid)\n        return EnterpriseCustomer.objects.get(uuid=enterprise_uuid)\n    except (TypeError, ValueError, EnterpriseCustomer.DoesNotExist):\n        LOGGER.error('Unable to find enterprise customer for UUID: [%s]', enterprise_uuid)\n        raise Http404", "docstring": "Given an EnterpriseCustomer UUID, return the corresponding EnterpriseCustomer or raise a 404.\n\nArguments:\nenterprise_uuid (str): The UUID (in string form) of the EnterpriseCustomer to fetch.\n\nReturns:\n(EnterpriseCustomer): The EnterpriseCustomer given the UUID.", "source": "codesearchnet"}
{"code": "def create(cls, application_namespace, application_data):\n    namespace = ApplicationNamespace(application_namespace)\n    data = ApplicationData(application_data)\n    return ApplicationSpecificInformation(application_namespace=namespace, application_data=data)", "docstring": "Construct an ApplicationSpecificInformation object from provided data\nand namespace values.\n\nArgs:\napplication_namespace (str): The name of the application namespace.\napplication_data (str): Application data related to the namespace.\n\nReturns:\nApplicationSpecificInformation: The newly created set of\napplication information.\n\nExample:\n>>> x = ApplicationSpecificInformation.create('namespace', 'data')\n>>> x.application_namespace.value\n'namespace'\n>>> x.application_data.value\n'data'", "source": "codesearchnet"}
{"code": "def set_quickchart_resource(self, resource):\n    if (isinstance(resource, int) and (not isinstance(resource, bool))):\n        resource = self.get_resources()[resource]\n    if (isinstance(resource, hdx.data.resource.Resource) or isinstance(resource, dict)):\n        res = resource.get('id')\n        if (res is None):\n            resource = resource['name']\n        else:\n            resource = res\n    elif (not isinstance(resource, str)):\n        raise hdx.data.hdxobject.HDXError(('Resource id cannot be found in type %s!' % type(resource).__name__))\n    if (is_valid_uuid(resource) is True):\n        search = 'id'\n    else:\n        search = 'name'\n    changed = False\n    for dataset_resource in self.resources:\n        if (dataset_resource[search] == resource):\n            dataset_resource.enable_dataset_preview()\n            self.preview_resource()\n            changed = True\n        else:\n            dataset_resource.disable_dataset_preview()\n    return changed", "docstring": "Set the resource that will be used for displaying QuickCharts in dataset preview\n\nArgs:\nresource (Union[hdx.data.resource.Resource,Dict,str,int]): Either resource id or name, resource metadata from a Resource object or a dictionary or position\n\nReturns:\nbool: Returns True if resource for QuickCharts in dataset preview set or False if not", "source": "codesearchnet"}
{"code": "def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, List[Tuple]]=None):\n    out_logits, out_bbox = (outputs.logits, outputs.pred_boxes)\n    if target_sizes is not None:\n        if len(out_logits) != len(target_sizes):\n            raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n    prob = nn.functional.softmax(out_logits, -1)\n    scores, labels = prob[..., :-1].max(-1)\n    boxes = center_to_corners_format(out_bbox)\n    if target_sizes is not None:\n        if isinstance(target_sizes, List):\n            img_h = torch.Tensor([i[0] for i in target_sizes])\n            img_w = torch.Tensor([i[1] for i in target_sizes])\n        else:\n            img_h, img_w = target_sizes.unbind(1)\n        scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)\n        boxes = boxes * scale_fct[:, None, :]\n    results = []\n    for s, l, b in zip(scores, labels, boxes):\n        score = s[s > threshold]\n        label = l[s > threshold]\n        box = b[s > threshold]\n        results.append({'scores': score, 'labels': label, 'boxes': box})\n    return results", "docstring": "Converts the raw output of [`DetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,\nbottom_right_x, bottom_right_y) format. Only supports PyTorch.\n\nArgs:\noutputs ([`DetrObjectDetectionOutput`]):\nRaw outputs of the model.\nthreshold (`float`, *optional*):\nScore threshold to keep object detection predictions.\ntarget_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):\nTensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size\n`(height, width)` of each image in the batch. If unset, predictions will not be resized.\nReturns:\n`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image\nin the batch as predicted by the model.", "source": "github-repos"}
{"code": "def setup(self, socket_type, complete_or_error_queue):\n        \n        try:\n            if self._secured:\n                if self._server_public_key is None or \\\n                        self._server_private_key is None:\n                    raise LocalConfigurationError(\n                        \"Attempting to start socket in secure mode, \"\n                        \"but complete server keys were not provided\")\n\n            self._event_loop = zmq.asyncio.ZMQEventLoop()\n            asyncio.set_event_loop(self._event_loop)\n            self._context = zmq.asyncio.Context()\n            self._socket = self._context.socket(socket_type)\n\n            self._socket.set(zmq.TCP_KEEPALIVE, 1)\n            self._socket.set(zmq.TCP_KEEPALIVE_IDLE, self._connection_timeout)\n            self._socket.set(zmq.TCP_KEEPALIVE_INTVL, self._heartbeat_interval)\n\n            if socket_type == zmq.DEALER:\n                self._socket.identity = \"{}-{}\".format(\n                    self._zmq_identity,\n                    hashlib.sha512(uuid.uuid4().hex.encode()\n                                   ).hexdigest()[:23]).encode('ascii')\n\n                if self._secured:\n                    \n\n                    public_key, secretkey = zmq.curve_keypair()\n                    self._socket.curve_publickey = public_key\n                    self._socket.curve_secretkey = secretkey\n                    self._socket.curve_serverkey = self._server_public_key\n\n                self._socket.connect(self._address)\n            elif socket_type == zmq.ROUTER:\n                if self._secured:\n                    auth = AsyncioAuthenticator(self._context)\n                    self._auth = auth\n                    auth.start()\n                    auth.configure_curve(domain='*',\n                                         location=zmq.auth.CURVE_ALLOW_ANY)\n\n                    self._socket.curve_secretkey = self._server_private_key\n                    self._socket.curve_publickey = self._server_public_key\n                    self._socket.curve_server = True\n\n                try:\n                    self._socket.bind(self._address)\n                except zmq.error.ZMQError as e:\n                    raise LocalConfigurationError(\n                        \"Can't bind to {}: {}\".format(self._address,\n                                                      str(e)))\n                else:\n                    LOGGER.info(\"Listening on %s\", self._address)\n\n            self._dispatcher.add_send_message(self._connection,\n                                              self.send_message)\n            self._dispatcher.add_send_last_message(self._connection,\n                                                   self.send_last_message)\n\n            asyncio.ensure_future(self._remove_expired_futures(),\n                                  loop=self._event_loop)\n\n            asyncio.ensure_future(self._receive_message(),\n                                  loop=self._event_loop)\n\n            asyncio.ensure_future(self._dispatch_message(),\n                                  loop=self._event_loop)\n\n            self._dispatcher_queue = asyncio.Queue()\n\n            if self._monitor:\n                self._monitor_fd = \"inproc:\n                    _generate_id()[0:5])\n                self._monitor_sock = self._socket.get_monitor_socket(\n                    zmq.EVENT_DISCONNECTED,\n                    addr=self._monitor_fd)\n                asyncio.ensure_future(self._monitor_disconnects(),\n                                      loop=self._event_loop)\n\n        except Exception as e:\n            \n            \n            complete_or_error_queue.put_nowait(e)\n            self._close_sockets()\n            raise\n\n        if self._heartbeat:\n            asyncio.ensure_future(self._do_heartbeat(), loop=self._event_loop)\n\n        \n        complete_or_error_queue.put_nowait(_STARTUP_COMPLETE_SENTINEL)\n\n        asyncio.ensure_future(self._notify_started(), loop=self._event_loop)\n\n        self._event_loop.run_forever()\n        \n        \n        self._event_loop.close()\n        self._close_sockets()", "docstring": "Setup the asyncio event loop.\n\nArgs:\nsocket_type (int from zmq.*): One of zmq.DEALER or zmq.ROUTER\ncomplete_or_error_queue (queue.Queue): A way to propagate errors\nback to the calling thread. Needed since this function is\ndirectly used in Thread.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def enc(self, byts, asscd=None):\n    iv = os.urandom(16)\n    encryptor = AESGCM(self.ekey)\n    byts = encryptor.encrypt(iv, byts, asscd)\n    envl = {'iv': iv, 'data': byts, 'asscd': asscd}\n    return s_msgpack.en(envl)", "docstring": "Encrypt the given bytes and return an envelope dict in msgpack form.\n\nArgs:\nbyts (bytes): The message to be encrypted.\nasscd (bytes): Extra data that needs to be authenticated (but not encrypted).\n\nReturns:\nbytes: The encrypted message. This is a msgpacked dictionary\ncontaining the IV, ciphertext, and associated data.", "source": "codesearchnet"}
{"code": "def __init__(self, name: str, snap_type: str):\n        \n        self._type = snap_type\n        self._channel = SnapshotChannel()\n        Command.__init__(self, duration=0, name=name)\n        Instruction.__init__(self, self, self._channel, name=name)", "docstring": "Create new snapshot command.\n\nArgs:\nname (str): Snapshot name which is used to identify the snapshot in the output.\nsnap_type (str): Type of snapshot, e.g., “state” (take a snapshot of the quantum state).\nThe types of snapshots offered are defined in a separate specification\ndocument for simulators.", "source": "juraj-google-style"}
{"code": "def step(self, actions):\n    \n    if self._store_rollouts and \\\n        self._rollouts_by_epoch_and_split[self.current_epoch]:\n      raise ValueError(\n          \"Data for current epoch has already been loaded from disk.\"\n      )\n    (obs, unclipped_rewards, dones) = self._step(actions)\n    obs = self._preprocess_observations(obs)\n    (min_reward, max_reward) = self.reward_range\n    rewards = np.around(np.clip(unclipped_rewards, min_reward, max_reward))\n    if self._store_rollouts:\n      unclipped_rewards = unclipped_rewards.astype(np.float64)\n      encoded_obs = self._encode_observations(obs)\n      for (rollout, frame, action) in zip(\n          self._current_batch_rollouts, self._current_batch_frames, actions\n      ):\n        rollout.append(frame._replace(action=action))\n\n      \n      self._current_batch_frames = [\n          Frame(*orud, action=None)\n          for orud in zip(encoded_obs, rewards, unclipped_rewards, dones)\n      ]\n    return (obs, rewards, dones)", "docstring": "Makes a step in all environments.\n\nDoes any preprocessing and records frames.\n\nArgs:\nactions: Batch of actions.\n\nReturns:\n(obs, rewards, dones) - batches of observations, rewards and done flags\nrespectively.\n\nRaises:\nValueError: when the data for current epoch has already been loaded.", "source": "juraj-google-style"}
{"code": "def forward(self, image_embeddings: torch.Tensor, image_positional_embeddings: torch.Tensor, sparse_prompt_embeddings: torch.Tensor, dense_prompt_embeddings: torch.Tensor, multimask_output: bool, output_attentions: Optional[bool]=None, attention_similarity: Optional[torch.Tensor]=None, target_embedding: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor]:\n    batch_size, num_channels, height, width = image_embeddings.shape\n    point_batch_size = sparse_prompt_embeddings.shape[1]\n    output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)\n    output_tokens = output_tokens.repeat(batch_size, point_batch_size, 1, 1)\n    if sparse_prompt_embeddings.sum().item() != 0:\n        tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=2)\n    else:\n        tokens = output_tokens\n    point_embeddings = tokens.to(self.iou_token.weight.dtype)\n    image_embeddings = image_embeddings + dense_prompt_embeddings\n    image_embeddings = image_embeddings.repeat_interleave(point_batch_size, 0)\n    image_positional_embeddings = image_positional_embeddings.repeat_interleave(point_batch_size, 0)\n    point_embedding, image_embeddings, attentions = self.transformer(point_embeddings=point_embeddings, image_embeddings=image_embeddings, image_positional_embeddings=image_positional_embeddings, attention_similarity=attention_similarity, target_embedding=target_embedding, output_attentions=output_attentions)\n    iou_token_out = point_embedding[:, :, 0, :]\n    mask_tokens_out = point_embedding[:, :, 1:1 + self.num_mask_tokens, :]\n    image_embeddings = image_embeddings.transpose(2, 3).reshape(batch_size * point_batch_size, num_channels, height, width)\n    upscaled_embedding = self.upscale_conv1(image_embeddings)\n    upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding))\n    upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding))\n    hyper_in_list = []\n    for i in range(self.num_mask_tokens):\n        current_mlp = self.output_hypernetworks_mlps[i]\n        hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])]\n    hyper_in = torch.stack(hyper_in_list, dim=2)\n    _, num_channels, height, width = upscaled_embedding.shape\n    upscaled_embedding = upscaled_embedding.reshape(batch_size, point_batch_size, num_channels, height * width)\n    masks = (hyper_in @ upscaled_embedding).reshape(batch_size, point_batch_size, -1, height, width)\n    iou_pred = self.iou_prediction_head(iou_token_out)\n    if multimask_output:\n        mask_slice = slice(1, None)\n    else:\n        mask_slice = slice(0, 1)\n    masks = masks[:, :, mask_slice, :, :]\n    iou_pred = iou_pred[:, :, mask_slice]\n    outputs = (masks, iou_pred)\n    if output_attentions:\n        outputs = outputs + (attentions,)\n    else:\n        outputs = outputs + (None,)\n    return outputs", "docstring": "Predict masks given image and prompt embeddings.\n\nArgs:\nimage_embeddings (`torch.Tensor`):\nthe embeddings from the image encoder\nimage_positional_embedding (`torch.Tensor`):\npositional encoding with the shape of image_embeddings\nsparse_prompt_embeddings (`torch.Tensor`):\nThe embeddings of the points and boxes\ndense_prompt_embeddings (`torch.Tensor`):\nthe embeddings of the mask inputs\nmultimask_output (bool):\nWhether to return multiple masks or a single mask.\noutput_attentions (bool, *optional*):\nWhether or not to return the attentions tensors of all attention layers.", "source": "github-repos"}
{"code": "def _add_remove_team_member(self, url, email_address=None, account_id=None):\n        \n\n        if not email_address and not account_id:\n            raise HSException(\"No email address or account_id specified\")\n\n        data = {}\n        if account_id is not None:\n            data = {\n                \"account_id\": account_id\n            }\n        else:\n            data = {\n                \"email_address\": email_address\n            }\n\n        request = self._get_request()\n        response = request.post(url, data)\n\n        return response", "docstring": "Add or Remove a team member\n\nWe use this function for two different tasks because they have the same\nAPI call\n\nArgs:\n\nemail_address (str):    Email address of the Account to add/remove\n\naccount_id (str):       ID of the Account to add/remove\n\nReturns:\nA Team object", "source": "juraj-google-style"}
{"code": "def _scope_vals(self, vals):\n    if isinstance(vals, (list, tuple)):\n        return vals\n    elif isinstance(vals, dict):\n        return vals.values()\n    else:\n        return [vals]", "docstring": "Return a list of values to pass to `name_scope()`.\n\nArgs:\nvals: A tensor, a list or tuple of tensors, or a dictionary.\n\nReturns:\nThe values in vals as a list.", "source": "github-repos"}
{"code": "def _profile_table(self, batch_id):\n    message = self._execute_command(batch_id, 'RAY.TABLE_LOOKUP', ray.gcs_utils.TablePrefix.PROFILE, '', batch_id.binary())\n    if (message is None):\n        return []\n    gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(message, 0)\n    profile_events = []\n    for i in range(gcs_entries.EntriesLength()):\n        profile_table_message = ray.gcs_utils.ProfileTableData.GetRootAsProfileTableData(gcs_entries.Entries(i), 0)\n        component_type = decode(profile_table_message.ComponentType())\n        component_id = binary_to_hex(profile_table_message.ComponentId())\n        node_ip_address = decode(profile_table_message.NodeIpAddress(), allow_none=True)\n        for j in range(profile_table_message.ProfileEventsLength()):\n            profile_event_message = profile_table_message.ProfileEvents(j)\n            profile_event = {'event_type': decode(profile_event_message.EventType()), 'component_id': component_id, 'node_ip_address': node_ip_address, 'component_type': component_type, 'start_time': profile_event_message.StartTime(), 'end_time': profile_event_message.EndTime(), 'extra_data': json.loads(decode(profile_event_message.ExtraData()))}\n            profile_events.append(profile_event)\n    return profile_events", "docstring": "Get the profile events for a given batch of profile events.\n\nArgs:\nbatch_id: An identifier for a batch of profile events.\n\nReturns:\nA list of the profile events for the specified batch.", "source": "codesearchnet"}
{"code": "def _Open(self, path_spec=None, mode='rb'):\n    \n    if not self._file_object_set_in_init and not path_spec:\n      raise ValueError('Missing path specification.')\n\n    if not self._file_object_set_in_init:\n      if not path_spec.HasParent():\n        raise errors.PathSpecError(\n            'Unsupported path specification without parent.')\n\n      self._encryption_method = getattr(path_spec, 'encryption_method', None)\n\n      if self._encryption_method is None:\n        raise errors.PathSpecError(\n            'Path specification missing encryption method.')\n\n      self._file_object = resolver.Resolver.OpenFileObject(\n          path_spec.parent, resolver_context=self._resolver_context)\n\n    self._path_spec = path_spec", "docstring": "Opens the file-like object.\n\nArgs:\npath_spec (Optional[PathSpec]): path specification.\nmode (Optional[str]): file access mode.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file-like object could not be opened.\nOSError: if the file-like object could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def search(self, search_space, valid_data, init_args=[], train_args=[], init_kwargs={}, train_kwargs={}, module_args={}, module_kwargs={}, max_search=None, shuffle=True, verbose=True, seed=None, **score_kwargs):\n    self._clear_state(seed)\n    self.search_space = search_space\n    n_models_scored = 0\n    for (bracket_index, bracket) in enumerate(self.hyperband_schedule):\n        (n_starting_configurations, _) = bracket[0]\n        configurations = list(self.config_generator(search_space, max_search=n_starting_configurations, rng=self.rng, shuffle=True))\n        for (band_index, (n_i, r_i)) in enumerate(bracket):\n            assert (len(configurations) <= n_i)\n            scored_configurations = []\n            for (i, configuration) in enumerate(configurations):\n                cur_model_index = n_models_scored\n                configuration['n_epochs'] = r_i\n                (score, model) = self._test_model_config(f'{band_index}_{i}', configuration, valid_data, init_args=init_args, train_args=train_args, init_kwargs=init_kwargs, train_kwargs=train_kwargs, module_args=module_args, module_kwargs=module_kwargs, verbose=verbose, **score_kwargs)\n                scored_configurations.append((score, cur_model_index, configuration))\n                n_models_scored += 1\n            scored_configurations.sort(key=(lambda x: x[0]), reverse=True)\n            if ((band_index + 1) < len(bracket)):\n                (n_to_keep, _) = bracket[(band_index + 1)]\n                configurations = [x[2] for x in scored_configurations][:n_to_keep]\n    print(('=' * 60))\n    print(f'[SUMMARY]')\n    print(f'Best model: [{self.best_index}]')\n    print(f'Best config: {self.best_config}')\n    print(f'Best score: {self.best_score}')\n    print(('=' * 60))\n    return self._load_best_model(clean_up=True)", "docstring": "Performs hyperband search according to the generated schedule.\n\nAt the beginning of each bracket, we generate a\nlist of random configurations and perform\nsuccessive halving on it; we repeat this process\nfor the number of brackets in the schedule.\n\nArgs:\ninit_args: (list) positional args for initializing the model\ntrain_args: (list) positional args for training the model\nvalid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of\nX (data) and Y (labels) for the dev split\nsearch_space: see ModelTuner's config_generator() documentation\nmax_search: see ModelTuner's config_generator() documentation\nshuffle: see ModelTuner's config_generator() documentation\n\nReturns:\nbest_model: the highest performing trained model found by Hyperband\nbest_config: (dict) the config corresponding to the best model\n\nNote: Initialization is performed by ModelTuner instead of passing a\npre-initialized model so that tuning may be performed over all model\nparameters, including the network architecture (which is defined before\nthe train loop).", "source": "codesearchnet"}
{"code": "def get_serialization_context(self, driver_id):\n        \n        \n        \n        \n        \n        with self.lock:\n            if driver_id not in self.serialization_context_map:\n                _initialize_serialization(driver_id)\n            return self.serialization_context_map[driver_id]", "docstring": "Get the SerializationContext of the driver that this worker is processing.\n\nArgs:\ndriver_id: The ID of the driver that indicates which driver to get\nthe serialization context for.\n\nReturns:\nThe serialization context of the given driver.", "source": "juraj-google-style"}
{"code": "def _VerifyExplicitPaddings(self, tensor_in_sizes, filter_in_sizes, strides, padding, dilations=(1, 1), test_grappler_layout_optimizer=False, tol=1e-05, fp16_tol=0.001):\n    input_tensor = self._CreateNumpyTensor(tensor_in_sizes)\n    filter_tensor = self._CreateNumpyTensor(filter_in_sizes)\n    input_tensor = array_ops.pad(input_tensor, [(0, 0)] + padding + [(0, 0)])\n    dilations = list(dilations)\n    conv2d_result = nn_ops.conv2d(input_tensor, filter_tensor, [1] + list(strides) + [1], 'VALID', dilations=[1] + dilations + [1])\n    expected = list(self.evaluate(array_ops.reshape(conv2d_result, [-1])))\n    self._VerifyValues(tensor_in_sizes, filter_in_sizes, strides, padding, expected, dilations, test_grappler_layout_optimizer=test_grappler_layout_optimizer, tol=tol, fp16_tol=fp16_tol)", "docstring": "Verifies Conv2D with explicit padding generates correct values.\n\nIt does this by comparing with Conv2D without explicit padding. This\nfunction assumes Conv2D without explicit padding works correctly.\n\nArgs:\ntensor_in_sizes: Input tensor dimensions in [batch, input_rows,\ninput_cols, input_depth].\nfilter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,\ninput_depth, output_depth].\nstrides: [row_stride, col_stride] for the convolution;\npadding: Explicit padding amounts.\ndilations: Dilation values\ntest_grappler_layout_optimizer: If True, allow the Grappler layout\noptimizer to run, which turns NHWC Conv2Ds on the GPU to NCHW Conv2Ds.\ntol: The absolute and relative tolerance for non-fp16 dtypes.\nfp16_tol: The absolute and relative tolerance for fp16.", "source": "github-repos"}
{"code": "def heightmap_normalize(hm: np.ndarray, mi: float=0.0, ma: float=1.0) -> None:\n    lib.TCOD_heightmap_normalize(_heightmap_cdata(hm), mi, ma)", "docstring": "Normalize heightmap values between ``mi`` and ``ma``.\n\nArgs:\nmi (float): The lowest value after normalization.\nma (float): The highest value after normalization.", "source": "codesearchnet"}
{"code": "def get_app_names(self):\n    app_names = set()\n    for name in self.apps:\n        app_names.add(name)\n    return app_names", "docstring": "Return application names.\n\nReturn the list of application names that are available in the\ndatabase.\n\nReturns:\nset of str.", "source": "codesearchnet"}
{"code": "def doit(self, classes=None, recursive=True, indices=None, max_terms=None, **kwargs):\n    return super().doit(classes, recursive, indices=indices, max_terms=max_terms, **kwargs)", "docstring": "Write out the indexed sum explicitly\n\nIf `classes` is None or :class:`IndexedSum` is in `classes`,\n(partially) write out the indexed sum in to an explicit sum of terms.\nIf `recursive` is True, write out each of the new sum's summands by\ncalling its :meth:`doit` method.\n\nArgs:\nclasses (None or list): see :meth:`.Expression.doit`\nrecursive (bool): see :meth:`.Expression.doit`\nindices (list): List of :class:`IdxSym` indices for which the sum\nshould be expanded. If `indices` is a subset of the indices\nover which the sum runs, it will be partially expanded. If not\ngiven, expand the sum completely\nmax_terms (int): Number of terms after which to truncate the sum.\nThis is particularly useful for infinite sums. If not given,\nexpand all terms of the sum. Cannot be combined with `indices`\nkwargs: keyword arguments for recursive calls to\n:meth:`doit`. See :meth:`.Expression.doit`", "source": "codesearchnet"}
{"code": "def delete_issue(self, issue_id, params=None):\n    return self._delete((self.API_URL + 'issue/{}'.format(issue_id)), params=params)", "docstring": "Deletes an individual issue.\n\nIf the issue has sub-tasks you must set the deleteSubtasks=true parameter to delete the issue. You cannot delete\nan issue without deleting its sub-tasks.\n\nArgs:\nissue_id:\nparams:\n\nReturns:", "source": "codesearchnet"}
{"code": "def get_details(app='groupproject', env='dev', region='us-east-1'):\n    \n    url = '{host}/applications/{app}'.format(host=API_URL, app=app)\n\n    request = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n\n    if not request.ok:\n        raise SpinnakerAppNotFound('\"{0}\" not found.'.format(app))\n\n    app_details = request.json()\n\n    LOG.debug('App details: %s', app_details)\n    group = app_details['attributes'].get('repoProjectKey')\n    project = app_details['attributes'].get('repoSlug')\n    generated = gogoutils.Generator(group, project, env=env, region=region, formats=APP_FORMATS)\n\n    LOG.debug('Application details: %s', generated)\n    return generated", "docstring": "Extract details for Application.\n\nArgs:\napp (str): Application Name\nenv (str): Environment/account to get details from\n\nReturns:\ncollections.namedtuple with _group_, _policy_, _profile_, _role_,\n_user_.", "source": "juraj-google-style"}
{"code": "def _GetComparable(self, sub_comparable_string=''):\n    \n    string_parts = []\n\n    string_parts.append(getattr(self.parent, 'comparable', ''))\n    string_parts.append('type: {0:s}'.format(self.type_indicator))\n\n    if sub_comparable_string:\n      string_parts.append(', {0:s}'.format(sub_comparable_string))\n    string_parts.append('\\n')\n\n    return ''.join(string_parts)", "docstring": "Retrieves the comparable representation.\n\nThis is a convenience function for constructing comparables.\n\nArgs:\nsub_comparable_string (str): sub comparable string.\n\nReturns:\nstr: comparable representation of the path specification.", "source": "juraj-google-style"}
{"code": "def evaluate_cut(uncut_subsystem, cut, unpartitioned_ces):\n    \n    log.debug('Evaluating %s...', cut)\n\n    cut_subsystem = uncut_subsystem.apply_cut(cut)\n\n    if config.ASSUME_CUTS_CANNOT_CREATE_NEW_CONCEPTS:\n        mechanisms = unpartitioned_ces.mechanisms\n    else:\n        \n        \n        mechanisms = set(\n            unpartitioned_ces.mechanisms +\n            list(cut_subsystem.cut_mechanisms))\n\n    partitioned_ces = ces(cut_subsystem, mechanisms)\n\n    log.debug('Finished evaluating %s.', cut)\n\n    phi_ = ces_distance(unpartitioned_ces, partitioned_ces)\n\n    return SystemIrreducibilityAnalysis(\n        phi=phi_,\n        ces=unpartitioned_ces,\n        partitioned_ces=partitioned_ces,\n        subsystem=uncut_subsystem,\n        cut_subsystem=cut_subsystem)", "docstring": "Compute the system irreducibility for a given cut.\n\nArgs:\nuncut_subsystem (Subsystem): The subsystem without the cut applied.\ncut (Cut): The cut to evaluate.\nunpartitioned_ces (CauseEffectStructure): The cause-effect structure of\nthe uncut subsystem.\n\nReturns:\nSystemIrreducibilityAnalysis: The |SystemIrreducibilityAnalysis| for\nthat cut.", "source": "juraj-google-style"}
{"code": "def CreateFeedItemAddOperation(name, price, date, ad_customizer_feed):\n    feed_item = {'feedId': ad_customizer_feed['feedId'], 'attributeValues': [{'feedAttributeId': ad_customizer_feed['feedAttributes'][0]['id'], 'stringValue': name}, {'feedAttributeId': ad_customizer_feed['feedAttributes'][1]['id'], 'stringValue': price}, {'feedAttributeId': ad_customizer_feed['feedAttributes'][2]['id'], 'stringValue': date}]}\n    operation = {'operator': 'ADD', 'operand': feed_item}\n    return operation", "docstring": "Creates a FeedItemOperation.\n\nThe generated FeedItemOperation will create a FeedItem with the specified\nvalues when sent to FeedItemService.mutate.\n\nArgs:\nname: the value for the name attribute of the FeedItem.\nprice: the value for the price attribute of the FeedItem.\ndate: the value for the date attribute of the FeedItem.\nad_customizer_feed: the AdCustomizerFeed we're associating the FeedItems\nwith.\n\nReturns:\nA new FeedItemOperation for adding a FeedItem.", "source": "codesearchnet"}
{"code": "def get_site_t2g_eg_resolved_dos(self, site):\n        \n        t2g_dos = []\n        eg_dos = []\n        for s, atom_dos in self.pdos.items():\n            if s == site:\n                for orb, pdos in atom_dos.items():\n                    if orb in (Orbital.dxy, Orbital.dxz, Orbital.dyz):\n                        t2g_dos.append(pdos)\n                    elif orb in (Orbital.dx2, Orbital.dz2):\n                        eg_dos.append(pdos)\n        return {\"t2g\": Dos(self.efermi, self.energies,\n                           functools.reduce(add_densities, t2g_dos)),\n                \"e_g\": Dos(self.efermi, self.energies,\n                           functools.reduce(add_densities, eg_dos))}", "docstring": "Get the t2g, eg projected DOS for a particular site.\n\nArgs:\nsite: Site in Structure associated with CompleteDos.\n\nReturns:\nA dict {\"e_g\": Dos, \"t2g\": Dos} containing summed e_g and t2g DOS\nfor the site.", "source": "juraj-google-style"}
{"code": "def sequence_like(instance, args):\n    if _is_mutable_mapping(instance):\n        result = dict(zip(_tf_core_sorted(instance), args))\n        instance_type = type(instance)\n        if instance_type == _collections.defaultdict:\n            d = _collections.defaultdict(instance.default_factory)\n        else:\n            d = instance_type()\n        for key in instance:\n            d[key] = result[key]\n        return d\n    elif _is_mapping(instance):\n        result = dict(zip(_tf_core_sorted(instance), args))\n        instance_type = type(instance)\n        if not getattr(instance_type, '__supported_by_tf_nest__', False):\n            tf_logging.log_first_n(tf_logging.WARN, 'Mapping types may not work well with tf.nest. Prefer using MutableMapping for {}'.format(instance_type), 1)\n        try:\n            return instance_type(((key, result[key]) for key in instance))\n        except TypeError as err:\n            raise TypeError('Error creating an object of type {} like {}. Note that it must accept a single positional argument representing an iterable of key-value pairs, in addition to self. Cause: {}'.format(type(instance), instance, err))\n    elif _is_mapping_view(instance):\n        return list(args)\n    elif is_namedtuple(instance) or _is_attrs(instance):\n        if isinstance(instance, _wrapt.ObjectProxy):\n            instance_type = type(instance.__wrapped__)\n        else:\n            instance_type = type(instance)\n        return instance_type(*args)\n    elif _is_composite_tensor(instance):\n        assert len(args) == 1\n        spec = instance._type_spec\n        return spec._from_components(args[0])\n    elif _is_type_spec(instance):\n        assert len(args) == 1\n        return instance._from_components(args[0])\n    elif isinstance(instance, range):\n        return sequence_like(list(instance), args)\n    elif isinstance(instance, _wrapt.ObjectProxy):\n        return type(instance)(sequence_like(instance.__wrapped__, args))\n    elif isinstance(instance, CustomNestProtocol):\n        metadata = instance.__tf_flatten__()[0]\n        return instance.__tf_unflatten__(metadata, tuple(args))\n    else:\n        return type(instance)(args)", "docstring": "Converts the sequence `args` to the same type as `instance`.\n\nArgs:\ninstance: an instance of `tuple`, `list`, `namedtuple`, `dict`,\n`collections.OrderedDict`, or `composite_tensor.Composite_Tensor` or\n`type_spec.TypeSpec`.\nargs: items to be converted to the `instance` type.\n\nReturns:\n`args` with the type of `instance`.", "source": "github-repos"}
{"code": "def request_stop(self):\n    raise StopIteration('step_fn has requested the iterations to stop.')", "docstring": "Exit the training loop by causing `should_stop()` to return `True`.\n\nCauses `step_fn` to exit by raising an exception.\n\nRaises:\nStopIteration", "source": "github-repos"}
{"code": "def _preprocess_resize_output_shape(image, output_shape):\n    output_shape = tuple(output_shape)\n    output_ndim = len(output_shape)\n    input_shape = image.shape\n    if output_ndim > image.ndim:\n        input_shape += (1,) * (output_ndim - image.ndim)\n        image = np.reshape(image, input_shape)\n    elif output_ndim == image.ndim - 1:\n        output_shape = output_shape + (image.shape[-1],)\n    elif output_ndim < image.ndim:\n        raise ValueError('output_shape length cannot be smaller than the image number of dimensions')\n    return (image, output_shape)", "docstring": "Validate resize output shape according to input image.\n\nArgs:\nimage (`np.ndarray`):\nImage to be resized.\noutput_shape (`iterable`):\nSize of the generated output image `(rows, cols[, ...][, dim])`. If `dim` is not provided, the number of\nchannels is preserved.\n\nReturns\nimage (`np.ndarray`):\nThe input image, but with additional singleton dimensions appended in the case where `len(output_shape) >\ninput.ndim`.\noutput_shape (`Tuple`):\nThe output shape converted to tuple.\n\nRaises ------ ValueError:\nIf output_shape length is smaller than the image number of dimensions.\n\nNotes ----- The input image is reshaped if its number of dimensions is not equal to output_shape_length.", "source": "github-repos"}
{"code": "def keypoint_flip(bbox, d, rows, cols):\n    if (d == 0):\n        bbox = keypoint_vflip(bbox, rows, cols)\n    elif (d == 1):\n        bbox = keypoint_hflip(bbox, rows, cols)\n    elif (d == (- 1)):\n        bbox = keypoint_hflip(bbox, rows, cols)\n        bbox = keypoint_vflip(bbox, rows, cols)\n    else:\n        raise ValueError('Invalid d value {}. Valid values are -1, 0 and 1'.format(d))\n    return bbox", "docstring": "Flip a keypoint either vertically, horizontally or both depending on the value of `d`.\n\nRaises:\nValueError: if value of `d` is not -1, 0 or 1.", "source": "codesearchnet"}
{"code": "def argv(cls, name, short_name=None, type=None, help=None):\n    cls.__hierarchy.append(argv.Argv(name, short_name, type, help))", "docstring": "Set command line arguments as a source\n\nParses the command line arguments described by the parameters.\n\nArgs:\nname: the long name of the argument (foo)\nshort_name: the optional short name of the argument (f)\ntype: the optional type of the argument, defaults to bool\nhelp: the optional help text for the argument", "source": "codesearchnet"}
{"code": "def path_to_string(path):\n    if isinstance(path, os.PathLike):\n        return os.fspath(path)\n    return path", "docstring": "Convert `PathLike` objects to their string representation.\n\nIf given a non-string typed path object, converts it to its string\nrepresentation.\n\nIf the object passed to `path` is not among the above, then it is\nreturned unchanged. This allows e.g. passthrough of file objects\nthrough this function.\n\nArgs:\npath: `PathLike` object that represents a path\n\nReturns:\nA string representation of the path argument, if Python support exists.", "source": "github-repos"}
{"code": "def quarter_ellipsis_functions(xx, yy):\n    npxx = np.array(xx)\n    npyy = np.array(yy)\n    if np.any((npxx == npyy)):\n        raise RuntimeError('Invalid points for quarter_ellipsis_functions')\n    if (np.all((npxx < npyy)) or np.all((npxx > npyy))):\n        if (npxx[0] < npyy[0]):\n            p1 = npxx\n            p2 = npyy\n        else:\n            p1 = npyy\n            p2 = npxx\n        c_lower = np.array([p1[0], p2[1]])\n        c_upper = np.array([p2[0], p1[1]])\n        b2 = ((p2[1] - p1[1]) ** 2)\n    else:\n        if (npxx[0] < npyy[0]):\n            p1 = npxx\n            p2 = npyy\n        else:\n            p1 = npyy\n            p2 = npxx\n        c_lower = np.array([p2[0], p1[1]])\n        c_upper = np.array([p1[0], p2[1]])\n        b2 = ((p1[1] - p2[1]) ** 2)\n    b2overa2 = (b2 / ((p2[0] - p1[0]) ** 2))\n\n    def lower(x):\n        return (c_lower[1] - np.sqrt((b2 - (b2overa2 * ((x - c_lower[0]) ** 2)))))\n\n    def upper(x):\n        return (c_upper[1] + np.sqrt((b2 - (b2overa2 * ((x - c_upper[0]) ** 2)))))\n    return {'lower': lower, 'upper': upper}", "docstring": "Method that creates two quarter-ellipse functions based on points xx and yy. The ellipsis is supposed to\nbe aligned with the axes. The two ellipsis pass through the two points xx and yy.\n\nArgs:\nxx:\nFirst point\nyy:\nSecond point\n\nReturns:\nA dictionary with the lower and upper quarter ellipsis functions.", "source": "codesearchnet"}
{"code": "def _replace_oov(original_vocab, line):\n    return u' '.join([(word if (word in original_vocab) else u'UNK') for word in line.split()])", "docstring": "Replace out-of-vocab words with \"UNK\".\n\nThis maintains compatibility with published results.\n\nArgs:\noriginal_vocab: a set of strings (The standard vocabulary for the dataset)\nline: a unicode string - a space-delimited sequence of words.\n\nReturns:\na unicode string - a space-delimited sequence of words.", "source": "codesearchnet"}
{"code": "def GetRealPath(filename):\n  \n  if os.path.isabs(filename):                \n    return filename\n\n  if filename.startswith('./') or  filename.startswith('../'): \n    return os.path.abspath(filename)\n\n  path = os.getenv('PATH', '')\n  for directory in path.split(':'):\n    tryname = os.path.join(directory, filename)\n    if os.path.exists(tryname):\n      if not os.path.isabs(directory):  \n        return os.path.abspath(tryname)\n      return tryname\n  if os.path.exists(filename):\n    return os.path.abspath(filename)\n  return None", "docstring": "Given an executable filename, find in the PATH or find absolute path.\nArgs:\nfilename  An executable filename (string)\nReturns:\nAbsolute version of filename.\nNone if filename could not be found locally, absolutely, or in PATH", "source": "juraj-google-style"}
{"code": "def _fetch_events_files_on_disk(self):\n    all_files = tf.io.gfile.listdir(self._events_directory)\n    relevant_files = [file_name for file_name in all_files if _DEBUGGER_EVENTS_FILE_NAME_REGEX.match(file_name)]\n    return sorted(relevant_files, key=self._obtain_file_index)", "docstring": "Obtains the names of debugger-related events files within the directory.\n\nReturns:\nThe names of the debugger-related events files written to disk. The names\nare sorted in increasing events file index.", "source": "codesearchnet"}
{"code": "def __init__(self, device, configs=None):\n        \n        self._device = device\n        self._configs = configs", "docstring": "Constructor of the class.\n\nThe constructor is the only place to pass in a config. If you need to\nchange the config later, you should unregister the service instance\nfrom `ServiceManager` and register again with the new config.\n\nArgs:\ndevice: the device object this service is associated with.\nconfig: optional configuration defined by the author of the service\nclass.", "source": "juraj-google-style"}
{"code": "def process_tag(self, tag_proc_name, tag):\n    tag_processor = self.tag_procs[tag_proc_name]\n    db_entry = (tag_processor.get_name(tag), tag_processor.get_entry_type(tag), tag_processor.get_filename(tag))\n    self.zeal_db.insert(*db_entry)\n    self.entry_count += 1", "docstring": "Process a tag with a tag processor and insert a DB entry.\n\nArgs:\ntag_proc_name: A string key that maps to the TagProcessor to use.\ntag: A BeautifulSoup Tag to process.", "source": "codesearchnet"}
{"code": "def protocol_version_to_kmip_version(value):\n    if (not isinstance(value, ProtocolVersion)):\n        return None\n    if (value.major == 1):\n        if (value.minor == 0):\n            return enums.KMIPVersion.KMIP_1_0\n        elif (value.minor == 1):\n            return enums.KMIPVersion.KMIP_1_1\n        elif (value.minor == 2):\n            return enums.KMIPVersion.KMIP_1_2\n        elif (value.minor == 3):\n            return enums.KMIPVersion.KMIP_1_3\n        elif (value.minor == 4):\n            return enums.KMIPVersion.KMIP_1_4\n        else:\n            return None\n    else:\n        return None", "docstring": "Convert a ProtocolVersion struct to its KMIPVersion enumeration equivalent.\n\nArgs:\nvalue (ProtocolVersion): A ProtocolVersion struct to be converted into\na KMIPVersion enumeration.\n\nReturns:\nKMIPVersion: The enumeration equivalent of the struct. If the struct\ncannot be converted to a valid enumeration, None is returned.", "source": "codesearchnet"}
{"code": "def append_paulis(self, paulis=None, pauli_labels=None):\n        \n        return self.insert_paulis(None, paulis=paulis, pauli_labels=pauli_labels)", "docstring": "Append pauli at the end.\n\nArgs:\npaulis (Pauli): the to-be-inserted or appended pauli\npauli_labels (list[str]): the to-be-inserted or appended pauli label\n\nReturns:\nPauli: self", "source": "juraj-google-style"}
{"code": "def get_account_info(self):\n    request = self._get_request()\n    response = request.get(self.ACCOUNT_INFO_URL)\n    self.account.json_data = response['account']\n    return self.account", "docstring": "Get current account information\n\nThe information then will be saved in `self.account` so that you can\naccess the information like this:\n\n>>> hsclient = HSClient()\n>>> acct = hsclient.get_account_info()\n>>> print acct.email_address\n\nReturns:\nAn Account object", "source": "codesearchnet"}
{"code": "def set_mode(self, name, value=None, default=False, disable=False):\n    string = 'switchport mode'\n    command = self.command_builder(string, value=value, default=default, disable=disable)\n    return self.configure_interface(name, command)", "docstring": "Configures the switchport mode\n\nArgs:\nname (string): The interface identifier to create the logical\nlayer 2 switchport for.  The name must be the full interface\nname and not an abbreviated interface name (eg Ethernet1, not\nEt1)\n\nvalue (string): The value to set the mode to.  Accepted values\nfor this argument are access or trunk\n\ndefault (bool): Configures the mode parameter to its default\nvalue using the EOS CLI\n\ndisable (bool): Negate the mode parameter using the EOS CLI\n\nReturns:\nTrue if the create operation succeeds otherwise False.", "source": "codesearchnet"}
{"code": "def Run(self, conf, args):\n    try:\n        options, args = self.parser.parse_args(args)\n    except SystemExit as e:\n        return e.code\n    if options.maps:\n        self.log.info('Setting configured maps to %s', options.maps)\n        conf.maps = options.maps\n    if not options.incremental:\n        self.log.debug('performing FULL update of caches')\n    else:\n        self.log.debug('performing INCREMENTAL update of caches')\n    if options.delay:\n        self.log.info('Delaying %d seconds before executing', options.delay)\n        time.sleep(options.delay)\n    return self.UpdateMaps(conf, incremental=options.incremental, force_write=options.force_write, force_lock=options.force_lock)", "docstring": "Run the Update command.\n\nSee Command.Run() for full documentation on the Run() method.\n\nArgs:\nconf: a nss_cache.config.Config object\nargs: a list of arguments to be parsed by this command\n\nReturns:\n0 on success, nonzero on error", "source": "github-repos"}
{"code": "def _tag_sharding_attribute_for_dequeued_tensor(tensor, dims):\n    if dims is None:\n        return xla_sharding.replicate(tensor, assign_tuple_sharding=True)\n    elif np.prod(dims) == 1:\n        return xla_sharding.assign_device(tensor, 0, assign_tuple_sharding=True)\n    else:\n        tile_assignment = np.arange(np.prod(dims)).reshape(dims)\n        return xla_sharding.tile(tensor=tensor, tile_assignment=tile_assignment, assign_tuple_sharding=True)", "docstring": "Tags appropriate XLA sharding attribute to the dequeued tensor.\n\nThe sharding attribute of the dequeued tensor will be a tuple.\n\nArgs:\ntensor: The dequeued tensor on TPU.\ndims: A list of integer describes how the tensor is partitioned.\n\nReturns:\nThe same tensor with the xla_sharding attribute.", "source": "github-repos"}
{"code": "def stack(x, axis=0):\n    if any_symbolic_tensors((x,)):\n        return Stack(axis=axis).symbolic_call(x)\n    return backend.numpy.stack(x, axis=axis)", "docstring": "Join a sequence of tensors along a new axis.\n\nThe `axis` parameter specifies the index of the new axis in the\ndimensions of the result.\n\nArgs:\nx: A sequence of tensors.\naxis: Axis along which to stack. Defaults to `0`.\n\nReturns:\nThe stacked tensor.", "source": "github-repos"}
{"code": "def _AddPathSegments(self, path, ignore_list):\n    path_segments = path.split(self._path_segment_separator)\n    for (path_segment_index, path_segment) in enumerate(path_segments):\n        if (path_segment_index not in self.path_segments_per_index):\n            self.path_segments_per_index[path_segment_index] = {}\n        if (path_segment_index not in ignore_list):\n            path_segments = self.path_segments_per_index[path_segment_index]\n            if (path_segment not in path_segments):\n                path_segments[path_segment] = []\n            paths_per_segment_list = path_segments[path_segment]\n            paths_per_segment_list.append(path)", "docstring": "Adds the path segments to the table.\n\nArgs:\npath: a string containing the path.\nignore_list: a list of path segment indexes to ignore, where 0 is the\nindex of the first path segment relative from the root.", "source": "codesearchnet"}
{"code": "def labels_in_range(self, start, end, fully_included=False):\n    if fully_included:\n        intervals = self.label_tree.envelop(start, end)\n    else:\n        intervals = self.label_tree.overlap(start, end)\n    return [iv.data for iv in intervals]", "docstring": "Return a list of labels, that are within the given range.\nAlso labels that only overlap are included.\n\nArgs:\nstart(float): Start-time in seconds.\nend(float): End-time in seconds.\nfully_included(bool): If ``True``, only labels fully included\nin the range are returned. Otherwise\nalso overlapping ones are returned.\n(default ``False``)\n\nReturns:\nlist: List of labels in the range.\n\nExample:\n>>> ll = LabelList(labels=[\n>>>     Label('a', 3.2, 4.5),\n>>>     Label('b', 5.1, 8.9),\n>>>     Label('c', 7.2, 10.5),\n>>>     Label('d', 10.5, 14)\n>>>])\n>>> ll.labels_in_range(6.2, 10.1)\n[Label('b', 5.1, 8.9), Label('c', 7.2, 10.5)]", "source": "codesearchnet"}
{"code": "def mel_spectrogram(self, sequence: np.ndarray):\n    mel_specs = []\n    for seq in sequence:\n        window = np.hanning(self.window_size + 1)[:-1]\n        mel_specs.append(spectrogram(waveform=seq, window=window, frame_length=self.window_size, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters))\n    mel_specs = np.array(mel_specs)\n    return mel_specs", "docstring": "Generates MelSpectrogram.\n\nArgs:\nsequence (`numpy.ndarray`):\nThe sequence of which the mel-spectrogram will be computed.", "source": "github-repos"}
{"code": "def _parse_services(self, service_config: dict, service_name: str,\n                        service_list: dict) -> dict:\n        \n        for key, value in service_list['services'][service_name].items():\n            service_config[key] = value\n            if 'command' in key:\n                key = \"args\"\n                service_config['args'] = value\n                service_config.pop('command')\n            if 'ports' in key:\n                endpoint_spec = self._parse_ports(value)\n                service_config['endpoint_spec'] = endpoint_spec\n                service_config.pop('ports')\n            if 'volumes' in key:\n                volume_spec = self._parse_volumes(value)\n                service_config['mounts'] = volume_spec\n                service_config.pop('volumes')\n            if 'deploy' in key:\n                self._parse_deploy(value, service_config)\n                service_config.pop('deploy')\n            if 'networks' in key:\n                network_spec = self._parse_networks(service_list)\n                service_config['networks'] = network_spec\n            if 'logging' in key:\n                self._parse_logging(value, service_config)\n                service_config.pop('logging')\n            if 'environment' in key:\n                service_config['env'] = value\n                service_config.pop('environment')\n\n        \n        return service_config", "docstring": "Parse the docker compose file.\n\nArgs:\nservice_config (dict): Service configurations from the compose file\nservice_name (string): Name of the services\nservice_list (dict): Service configuration list\n\nReturns:\ndict, service specifications extracted from the compose file", "source": "juraj-google-style"}
{"code": "def _create_handler(self, config):\n    if (config is None):\n        raise ValueError('No handler config to create handler from.')\n    if ('name' not in config):\n        raise ValueError('Handler name is required.')\n    handler_name = config['name']\n    module_name = handler_name.rsplit('.', 1)[0]\n    class_name = handler_name.rsplit('.', 1)[(- 1)]\n    module = import_module(module_name)\n    handler_class = getattr(module, class_name)\n    instance = handler_class(**config)\n    return instance", "docstring": "Creates a handler from its config.\n\nParams:\nconfig:      handler config\nReturns:\nhandler instance", "source": "codesearchnet"}
{"code": "def download_items(cache_fn, start=None):\n    \n    with SqliteDict(cache_fn) as db:\n        last_id = db.get(\"last_id\", 0) if not start else start\n        _download_items(db, last_id)\n        db.commit()", "docstring": "Open the `cache_fn` as database and download all not-yet downloaded items.\n\nArgs:\ncache_fn (str): Path to the sqlite database. If not exists, it will be\ncreated.\nstart (int, default None): If set, start from this sysno.", "source": "juraj-google-style"}
{"code": "def _ParseEntryObjectOffsets(self, file_object, file_offset):\n    \n    entry_array_object = self._ParseEntryArrayObject(file_object, file_offset)\n\n    entry_object_offsets = list(entry_array_object.entry_object_offsets)\n    while entry_array_object.next_entry_array_offset != 0:\n      entry_array_object = self._ParseEntryArrayObject(\n          file_object, entry_array_object.next_entry_array_offset)\n      entry_object_offsets.extend(entry_array_object.entry_object_offsets)\n\n    return entry_object_offsets", "docstring": "Parses entry array objects for the offset of the entry objects.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object.\nfile_offset (int): offset of the first entry array object relative to\nthe start of the file-like object.\n\nReturns:\nlist[int]: offsets of the entry objects.", "source": "juraj-google-style"}
{"code": "def create_heart(self, git_repo_url, max_commits=10, weeks_from_now=1):\n        \n        self.weeks_from_now = weeks_from_now\n        self.end_date = self.get_end_date()\n        try:\n            self.repository_name = git_repo_url.split('/')[-1][:-4]\n            self.git_repo_url = git_repo_url\n            self.max_commits = max_commits\n            self.do_commits()\n            self.do_commit_amends()\n        except IndexError as ie:\n            raise ErrorMessage(\n                \"Please provide the correct URL for the Repository\")\n        except Exception as e:\n            raise ErrorMessage(str(e))", "docstring": "Creates heart on the Summary.\nArgs:\ngit_repo_url: The url (ssh or https) of the Repository, used for cloning\nmax_commits: Maximum number of commits in a day\nweeks_from_now: The number of week from this week the Heart's Right center boundary will be.", "source": "juraj-google-style"}
{"code": "def __init__(self, network, scope='network-baseline', summary_labels=()):\n        \n        self.network = Network.from_spec(\n            spec=network,\n            kwargs=dict(summary_labels=summary_labels)\n        )\n        assert len(self.network.internals_spec()) == 0\n\n        self.linear = Linear(size=1, bias=0.0, scope='prediction', summary_labels=summary_labels)\n\n        super(NetworkBaseline, self).__init__(scope=scope, summary_labels=summary_labels)", "docstring": "Network baseline.\n\nArgs:\nnetwork_spec: Network specification dict", "source": "juraj-google-style"}
{"code": "def iterator_full_type_from_spec(element_spec):\n    args = fulltypes_for_flat_tensors(element_spec)\n    return full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_PRODUCT, args=[full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_ITERATOR, args=[full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_PRODUCT, args=args)])])", "docstring": "Returns a FullTypeDef for an iterator for the elements.\n\nArgs:\nelement_spec: A nested structure of `tf.TypeSpec` objects representing the\nelement type specification.\n\nReturns:\nA FullTypeDef for an iterator for the element tensor representation.", "source": "github-repos"}
{"code": "def get(self, center, target, date):\n        \n\n        if (center.index, target.index) in self.segments:\n            pos, vel = self.segments[center.index, target.index].compute_and_differentiate(date.jd)\n            sign = 1\n        else:\n            \n            \n            \n            pos, vel = self.segments[target.index, center.index].compute_and_differentiate(date.jd)\n            sign = -1\n\n        \n        if len(pos) == 3:\n            \n            \n            pv = np.concatenate((pos, vel / S_PER_DAY))\n        elif len(pos) == 6:\n            pv = np.array(pos)\n        else:\n            raise JplError(\"Unknown state vector format\")\n\n        return sign * pv * 1000", "docstring": "Retrieve the position and velocity of a target with respect to a center\n\nArgs:\ncenter (Target):\ntarget (Target):\ndate (Date):\nReturn:\nnumpy.array: length-6 array position and velocity (in m and m/s) of the\ntarget, with respect to the center", "source": "juraj-google-style"}
{"code": "def __sub__(self, other: 'TensorFluent') -> 'TensorFluent':\n        \n        return self._binary_op(self, other, tf.subtract, tf.float32)", "docstring": "Returns a TensorFluent for the subtraction arithmetic operator.\n\nArgs:\nself: The first operand.\nother: The second operand.\n\nReturns:\nA TensorFluent wrapping the operator's output.", "source": "juraj-google-style"}
{"code": "def combine(path1, path2):\n    \n    \n    if not path1:\n        return path2.lstrip()\n    return \"{}/{}\".format(path1.rstrip(\"/\"), path2.lstrip(\"/\"))", "docstring": "Join two paths together.\n\nThis is faster than :func:`~fs.path.join`, but only works when the\nsecond path is relative, and there are no back references in either\npath.\n\nArguments:\npath1 (str): A PyFilesytem path.\npath2 (str): A PyFilesytem path.\n\nReturns:\nstr: The joint path.\n\nExample:\n>>> combine(\"foo/bar\", \"baz\")\n'foo/bar/baz'", "source": "juraj-google-style"}
{"code": "def train(self, mode=True):\n    super().train(mode)\n    if mode:\n        mu.apply_leaf(self, mu.set_train_mode)\n    return self", "docstring": "r\"\"\"\nSets the module in training mode.\n\nThis has any effect only on certain modules. See documentations of\nparticular modules for details of their behaviors in training/evaluation\nmode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,\netc.\n\nReturns:\nModule: self", "source": "codesearchnet"}
{"code": "def exp(x):\n    if any_symbolic_tensors((x,)):\n        return Exp().symbolic_call(x)\n    return backend.numpy.exp(x)", "docstring": "Calculate the exponential of all elements in the input tensor.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput tensor, element-wise exponential of `x`.", "source": "github-repos"}
{"code": "def BDEVolumeOpen(bde_volume, path_spec, file_object, key_chain):\n  \n  password = key_chain.GetCredential(path_spec, 'password')\n  if password:\n    bde_volume.set_password(password)\n\n  recovery_password = key_chain.GetCredential(path_spec, 'recovery_password')\n  if recovery_password:\n    bde_volume.set_recovery_password(recovery_password)\n\n  startup_key = key_chain.GetCredential(path_spec, 'startup_key')\n  if startup_key:\n    bde_volume.read_startup_key(startup_key)\n\n  bde_volume.open_file_object(file_object)", "docstring": "Opens the BDE volume using the path specification.\n\nArgs:\nbde_volume (pybde.volume): BDE volume.\npath_spec (PathSpec): path specification.\nfile_object (FileIO): file-like object.\nkey_chain (KeyChain): key chain.", "source": "juraj-google-style"}
{"code": "def get_image_patches(self, image: np.array, grid_pinpoints, size: tuple, patch_size: int, resample: PILImageResampling, data_format: ChannelDimension, input_data_format: ChannelDimension) -> List[np.array]:\n    if not isinstance(grid_pinpoints, list):\n        raise TypeError('grid_pinpoints must be a list of possible resolutions.')\n    possible_resolutions = grid_pinpoints\n    image_size = get_image_size(image, channel_dim=input_data_format)\n    best_resolution = select_best_resolution(image_size, possible_resolutions)\n    resized_image = self._resize_for_patching(image, best_resolution, resample=resample, input_data_format=input_data_format)\n    padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=input_data_format)\n    patches = divide_to_patches(padded_image, patch_size=patch_size, input_data_format=input_data_format)\n    patches = [to_channel_dimension_format(patch, channel_dim=data_format, input_channel_dim=input_data_format) for patch in patches]\n    resized_original_image = resize(image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format)\n    image_patches = [resized_original_image] + patches\n    return image_patches", "docstring": "Process an image with variable resolutions by dividing it into patches.\n\nArgs:\nimage (np.array):\nThe input image to be processed.\ngrid_pinpoints (List):\nA string representation of a list of possible resolutions.\nsize (`tuple`):\nSize to resize the original image to.\npatch_size (`int`):\nSize of the patches to divide the image into.\nresample (`PILImageResampling`):\nResampling filter to use if resizing the image.\ndata_format (`ChannelDimension` or `str`):\nThe channel dimension format for the output image.\ninput_data_format (`ChannelDimension` or `str`):\nThe channel dimension format of the input image.\n\nReturns:\nList[np.array]: A list of NumPy arrays containing the processed image patches.", "source": "github-repos"}
{"code": "def create_creation_event(self):\n    event = self.create_audit_event(code='AUDIT_CREATE')\n    if self._meta.create_message:\n        event.body = self._meta.create_message['message']\n        event.code = self._meta.create_message['code']\n        event.meta = self.parse_meta(self._meta.create_message['meta'])\n    self.create_event_callback(event)\n    event.save()\n    return event", "docstring": "Parse the create message DSL to insert the data into the Event.\n\nReturns:\nfleaker.peewee.EventStorageMixin:\nA new Event instance with data put in it", "source": "codesearchnet"}
{"code": "def ordered_repr(obj: object, attrlist: Iterable[str],\n                 joiner: str = COMMA_SPACE) -> str:\n    \n    return \"<{classname}({kvp})>\".format(\n        classname=type(obj).__name__,\n        kvp=joiner.join(\"{}={}\".format(a, repr(getattr(obj, a)))\n                        for a in attrlist)\n    )", "docstring": "Shortcut to make :func:`repr` functions ordered.\nDefine your :func:`__repr__` like this:\n\n.. code-block:: python\n\ndef __repr__(self):\nreturn ordered_repr(self, [\"field1\", \"field2\", \"field3\"])\n\nArgs:\nobj: object to display\nattrlist: iterable of attribute names\njoiner: string with which to join the elements\n\nReturns:\nstring: :func:`repr`-style representation", "source": "juraj-google-style"}
{"code": "def init(library: typing.Union[str, types.ModuleType]) -> None:\n    \n    if isinstance(library, types.ModuleType):\n        library = library.__name__\n\n    if library not in manager._handlers:\n        raise ValueError(\"Possible values are <{}>, not <{}>\".format(manager._handlers.keys(),\n                                                                     library))\n\n    manager.init(library, asynclib)\n    asynclib.lib_name = library\n    asynclib._init = True", "docstring": "Must be called at some point after import and before your event loop\nis run.\n\nPopulates the asynclib instance of _AsyncLib with methods relevant to the\nasync library you are using.\n\nThe supported libraries at the moment are:\n- curio\n- trio\n\nArgs:\nlibrary (str or module): Either the module name as a string or the\nimported module itself. E.g. ``multio.init(curio)``.", "source": "juraj-google-style"}
{"code": "def generate_query_key(self, serializer):\n        \n        rewritten = []\n        last = len(self.field) - 1\n        s = serializer\n        field = None\n        for i, field_name in enumerate(self.field):\n            \n            \n            \n            \n            \n            fields = s.fields\n            if field_name not in fields:\n                fields = getattr(s, 'get_all_fields', lambda: {})()\n\n            if field_name == 'pk':\n                rewritten.append('pk')\n                continue\n\n            if field_name not in fields:\n                raise ValidationError(\n                    \"Invalid filter field: %s\" % field_name\n                )\n\n            field = fields[field_name]\n\n            \n            \n            model_field_name = field.source or field_name\n            model_field = get_model_field(s.get_model(), model_field_name)\n            if isinstance(model_field, RelatedObject):\n                model_field_name = model_field.field.related_query_name()\n\n            \n            \n            rewritten.append(model_field_name)\n\n            if i == last:\n                break\n\n            \n            s = getattr(field, 'serializer', None)\n            if isinstance(s, serializers.ListSerializer):\n                s = s.child\n            if not s:\n                raise ValidationError(\n                    \"Invalid nested filter field: %s\" % field_name\n                )\n\n        if self.operator:\n            rewritten.append(self.operator)\n\n        return ('__'.join(rewritten), field)", "docstring": "Get the key that can be passed to Django's filter method.\n\nTo account for serialier field name rewrites, this method\ntranslates serializer field names to model field names\nby inspecting `serializer`.\n\nFor example, a query like `filter{users.events}` would be\nreturned as `users__events`.\n\nArguments:\nserializer: A DRF serializer\n\nReturns:\nA filter key.", "source": "juraj-google-style"}
{"code": "def delete(self, wait_for_deletion=True):\n    if self.exists():\n        try:\n            self._api.objects_delete(self._bucket, self._key)\n        except Exception as e:\n            raise e\n        if wait_for_deletion:\n            for _ in range(_MAX_POLL_ATTEMPTS):\n                objects = Objects(self._bucket, prefix=self.key, delimiter='/', context=self._context)\n                if any(((o.key == self.key) for o in objects)):\n                    time.sleep(_POLLING_SLEEP)\n                    continue\n                break\n            else:\n                logging.error('Failed to see object deletion after %d attempts.', _MAX_POLL_ATTEMPTS)", "docstring": "Deletes this object from its bucket.\n\nArgs:\nwait_for_deletion: If True, we poll until this object no longer appears in\nobjects.list operations for this bucket before returning.\n\nRaises:\nException if there was an error deleting the object.", "source": "codesearchnet"}
{"code": "def _apply(self, ctx: ExtensionContext) -> AugmentedDict:\n        \n        node_key, node_value = ctx.node\n\n        def process(pattern: Pattern[str], _str: str) -> Any:\n            _match = pattern.match(_str)\n            if _match is None:\n                return _str\n            \n            \n            placeholder, varname = _match.group(1), _match.group(2)\n            varval = self.vars.get(varname, None)\n            if varval is None and self.fail_on_unset:\n                raise ExtensionError(\"Variable '{}' is unset.\".format(varname))\n\n            return _str.replace(placeholder, varval or self.default)\n\n        _pattern = re.compile(self.__pattern__)\n        node_key = process(_pattern, node_key)\n        node_value = process(_pattern, node_value)\n\n        return {node_key: node_value}", "docstring": "Replaces any {{var::*}} directives with it's actual variable value or a default.\n\nArgs:\nctx: The processing context.\n\nReturns:\nReturns the altered node key and value.", "source": "juraj-google-style"}
{"code": "def get_public_ip(access_token, subscription_id, resource_group, ip_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/', 'publicIPAddresses/', ip_name, '?api-version=', NETWORK_API])\n    return do_get(endpoint, access_token)", "docstring": "Get details about the named public ip address.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\npublic_ip_name (str): Name of the public ip address resource.\n\nReturns:\nHTTP response. Public IP address JSON body.", "source": "codesearchnet"}
{"code": "def set_settings(self, settings):\n        \n        for k, v in settings.items():\n            setattr(self, k, v)", "docstring": "Set every given settings as object attributes.\n\nArgs:\nsettings (dict): Dictionnary of settings.", "source": "juraj-google-style"}
{"code": "def MaxBipartiteMatching(self, graph):\n    self.g = nx.Graph(graph)\n    self.left = set((n for n, d in self.g.nodes(data=True) if not d['bipartite']))\n    self.right = set(self.g) - self.left\n    self.num_matched = 0\n    self.s = set()\n    self.t = set()\n    self.matches = {}\n    self.slack = {}\n    self.slackx = {}\n    self.prev = {}\n    self.labels = {}\n    for x in self.left:\n        self.labels[x] = max([val['weight'] for val in self.g[x].values()])\n    for y in self.right:\n        self.labels[y] = 0\n    while self.num_matched != len(self.left):\n        self._Augment()\n    ret = {}\n    for k in self.left:\n        ret[k] = self.matches[k]\n    return ret", "docstring": "Find a maximum matching for a bipartite graph.\n\nThis is O(n^3) implementation of the Hungarian method for complete bipartite\nmatching problems.\n\nArgs:\ngraph: A networkx graph object, assumed to be bipartite.\nReturns:\nA dictionary keyed on node names in left to node names in right.", "source": "github-repos"}
{"code": "def __init__(self, variables, name='ShardedVariable'):\n    super(ShardedVariableMixin, self).__init__()\n    self._variables = variables\n    self._name = name\n    if not isinstance(variables, Sequence) or not variables or any((not isinstance(v, variables_lib.Variable) for v in variables)):\n        raise TypeError(f'Argument `variables` should be a non-empty list of `variables.Variable`s. Received {variables}')\n    var_dtypes = {v.dtype for v in variables}\n    if len(var_dtypes) > 1:\n        raise ValueError(f'All elements in argument `variables` must have the same dtype. Received dtypes: {[v.dtype for v in variables]}')\n    first_var = variables[0]\n    self._dtype = first_var.dtype\n    higher_dim_shapes = {tuple(v.shape.as_list()[1:]) for v in variables}\n    if len(higher_dim_shapes) > 1:\n        raise ValueError(f'All elements in argument `variables` must have the same shapes except for the first axis. Received shapes: {[v.shape for v in variables]}')\n    first_dim = sum((int(v.shape.as_list()[0]) for v in variables))\n    self._shape = tensor_shape.TensorShape([first_dim] + first_var.shape.as_list()[1:])\n    for v in variables:\n        v._sharded_container = weakref.ref(self)\n    self._var_offsets = [[0 for _ in range(len(first_var.shape))] for _ in range(len(variables))]\n    for i in range(1, len(variables)):\n        self._var_offsets[i][0] += self._var_offsets[i - 1][0] + variables[i - 1].shape.as_list()[0]\n    save_slice_info = [v._get_save_slice_info() for v in variables]\n    if any((slice_info is not None for slice_info in save_slice_info)):\n        raise ValueError(f'`SaveSliceInfo` should not be set for all elements in argument `variables`. `ShardedVariable` will infer `SaveSliceInfo` according to the order of the elements `variables`. Received save slice info {save_slice_info}')\n    self._saving_variable = resource_variable_ops.UninitializedVariable(shape=self._shape, dtype=self._dtype, name=self._name, trainable=self._variables[0].trainable, synchronization=variables_lib.VariableSynchronization.NONE, aggregation=variables_lib.VariableAggregation.NONE)", "docstring": "Treats `variables` as shards of a larger Variable.\n\nExample:\n\n```\nvariables = [\ntf.Variable(..., shape=(10, 100), dtype=tf.float32),\ntf.Variable(..., shape=(15, 100), dtype=tf.float32),\ntf.Variable(..., shape=(5, 100), dtype=tf.float32)\n]\nsharded_variable = ShardedVariableMixin(variables)\nassert sharded_variable.shape.as_list() == [30, 100]\n```\n\nArgs:\nvariables: A list of `ResourceVariable`s that comprise this sharded\nvariable. Variables should not be shared between different\n`ShardedVariableMixin` objects.\nname: String. Name of this container. Defaults to \"ShardedVariable\".", "source": "github-repos"}
{"code": "def partial_tile(tensor, tile_assignment, use_sharding_op=False, unspecified_dims=None):\n    return Sharding.partial_tile(tile_assignment).apply_to_tensor(tensor, use_sharding_op=use_sharding_op, unspecified_dims=unspecified_dims or [])", "docstring": "Returns a tensor that has tiled sharding.\n\nArgs:\ntensor: A tf.Tensor to shard.\ntile_assignment: An np.ndarray describing the topology of the tiling and\nwhich device will compute which part of the topology. It must have one\nmore dimension than tensor, and the last dimension represents partially\nreplicated tiles.\nuse_sharding_op: If true, adds a sharding op to set the sharding.\nunspecified_dims: An optional list of dimensions unspecified.", "source": "github-repos"}
{"code": "def fit_transform(self, col):\n        \n\n        if self.anonymize:\n            col = self.anonymize_column(col)\n\n        self._fit(col)\n        return self.transform(col)", "docstring": "Prepare the transformer and return processed data.\n\nArgs:\ncol(pandas.DataFrame): Data to transform.\n\nReturns:\npandas.DataFrame", "source": "juraj-google-style"}
{"code": "def call_with_mapped_args(self, mapped_args: MappedArgs[FrameType]) -> _HasReturnT:", "docstring": "Calls this function with the given mapped arguments.\n\nArgs:\nmapped_args: The function arguments mapped to parameter names.\n\nReturns:\nAn object with information about the result of the function call, with a\nget_return_value() method that retrieves the return value.", "source": "github-repos"}
{"code": "def __init__(self, path: utils.KeyPath, target: 'Symbolic', field: Optional[pg_typing.Field], old_value: Any, new_value: Any):\n    self.path = path\n    self.target = target\n    self.field = field\n    self.old_value = old_value\n    self.new_value = new_value", "docstring": "Constructor.\n\nArgs:\npath: KeyPath of the field that is updated.\ntarget: Parent of updated field.\nfield: Specification of the updated field.\nold_value: Old value of the field.\nnew_value: New value of the field.", "source": "github-repos"}
{"code": "def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):\n    with tf.name_scope('loss', [logits, labels]):\n        (logits, labels) = _pad_tensors_to_same_length(logits, labels)\n        with tf.name_scope('smoothing_cross_entropy', [logits, labels]):\n            confidence = (1.0 - smoothing)\n            low_confidence = ((1.0 - confidence) / tf.to_float((vocab_size - 1)))\n            soft_targets = tf.one_hot(tf.cast(labels, tf.int32), depth=vocab_size, on_value=confidence, off_value=low_confidence)\n            xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=soft_targets)\n            normalizing_constant = (- ((confidence * tf.log(confidence)) + ((tf.to_float((vocab_size - 1)) * low_confidence) * tf.log((low_confidence + 1e-20)))))\n            xentropy -= normalizing_constant\n        weights = tf.to_float(tf.not_equal(labels, 0))\n        return ((xentropy * weights), weights)", "docstring": "Calculate cross entropy loss while ignoring padding.\n\nArgs:\nlogits: Tensor of size [batch_size, length_logits, vocab_size]\nlabels: Tensor of size [batch_size, length_labels]\nsmoothing: Label smoothing constant, used to determine the on and off values\nvocab_size: int size of the vocabulary\nReturns:\nReturns a float32 tensor with shape\n[batch_size, max(length_logits, length_labels)]", "source": "codesearchnet"}
{"code": "def select_by_value(self, value):\n        \n        self._selected_key = None\n        self._selected_item = None\n        for k in self.children:\n            item = self.children[k]\n            if item.get_text() == value:\n                item.attributes['selected'] = 'selected'\n                self._selected_key = k\n                self._selected_item = item\n            else:\n                if 'selected' in item.attributes:\n                    del item.attributes['selected']", "docstring": "Selects a DropDownItem by means of the contained text-\n\nArgs:\nvalue (str): Textual content of the DropDownItem that have to be selected.", "source": "juraj-google-style"}
{"code": "def __init__(self, maximum_number_of_cached_values):\n    \n    if maximum_number_of_cached_values <= 0:\n      raise ValueError(\n          'Invalid maximum number of cached objects value zero or less.')\n\n    super(ObjectsCache, self).__init__()\n    self._maximum_number_of_cached_values = maximum_number_of_cached_values\n    self._values = {}", "docstring": "Initializes the resolver objects cache object.\n\nArgs:\nmaximum_number_of_cached_values (int): maximum number of cached values.\n\nRaises:\nValueError: when the maximum number of cached objects is 0 or less.", "source": "juraj-google-style"}
{"code": "def get_storage(self, id_or_uri):\n        \n        uri = self.URI + \"/{}/storage\".format(extract_id_from_uri(id_or_uri))\n        return self._client.get(uri)", "docstring": "Get storage details of an OS Volume.\n\nArgs:\nid_or_uri: ID or URI of the OS Volume.\n\nReturns:\ndict: Storage details", "source": "juraj-google-style"}
{"code": "def process_exception_message(exception):\n    exception_message = str(exception)\n    for replace_char in ['\\t', '\\n', '\\\\n']:\n        exception_message = exception_message.replace(replace_char, ('' if (replace_char != '\\t') else ' '))\n    return exception_message.replace('section', 'alias')", "docstring": "Process an exception message.\n\nArgs:\nexception: The exception to process.\n\nReturns:\nA filtered string summarizing the exception.", "source": "codesearchnet"}
{"code": "def SetEncodedValue(env, name, value, encoding=None):\n    name = Encode(name, encoding=encoding)\n    if value is None:\n        env.pop(name, None)\n        return\n    env[name] = Encode(value, encoding=encoding)", "docstring": "Sets the value of name in env to an encoded value.\n\nArgs:\nenv: {str: str}, The env dict.\nname: str, The env var name.\nvalue: str or unicode, The value for name. If None then name is removed from\nenv.\nencoding: str, The encoding to use or None to try to infer it.", "source": "github-repos"}
{"code": "def Analyze(self, hashes):\n    hash_analyses = []\n    for digest in hashes:\n        json_response = self._QueryHash(digest)\n        hash_analysis = interface.HashAnalysis(digest, json_response)\n        hash_analyses.append(hash_analysis)\n    return hash_analyses", "docstring": "Looks up hashes in Viper using the Viper HTTP API.\n\nArgs:\nhashes (list[str]): hashes to look up.\n\nReturns:\nlist[HashAnalysis]: hash analysis.\n\nRaises:\nRuntimeError: If no host has been set for Viper.", "source": "codesearchnet"}
{"code": "def remove_block(self, block, index=\"-1\"):\n        \n        self[index][\"__blocks__\"].remove(block)\n        self[index][\"__names__\"].remove(block.raw())", "docstring": "Remove block element from scope\nArgs:\nblock (Block): Block object", "source": "juraj-google-style"}
{"code": "def _find_classes(self, dir):\n    if (sys.version_info >= (3, 5)):\n        classes = [d.name for d in os.scandir(dir) if d.is_dir()]\n    else:\n        classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\n    classes.sort()\n    class_to_idx = {classes[i]: i for i in range(len(classes))}\n    return (classes, class_to_idx)", "docstring": "Finds the class folders in a dataset.\n\nArgs:\ndir (string): Root directory path.\n\nReturns:\ntuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.\n\nEnsures:\nNo class is a subdirectory of another.", "source": "codesearchnet"}
{"code": "def GetPluginObjectByName(cls, plugin_name):\n    plugin_class = cls._plugin_classes.get(plugin_name, None)\n    if plugin_class:\n        return plugin_class()\n    return None", "docstring": "Retrieves a specific plugin object by its name.\n\nArgs:\nplugin_name (str): name of the plugin.\n\nReturns:\nBasePlugin: a plugin object or None if not available.", "source": "codesearchnet"}
{"code": "def _SmallestColSize(self, text):\n    if (not text):\n        return 0\n    stripped = terminal.StripAnsiText(text)\n    return max((len(word) for word in stripped.split()))", "docstring": "Finds the largest indivisible word of a string.\n\n...and thus the smallest possible column width that can contain that\nword unsplit over rows.\n\nArgs:\ntext: A string of text potentially consisting of words.\n\nReturns:\nInteger size of the largest single word in the text.", "source": "codesearchnet"}
{"code": "def _wait_and_retry(provider, job_id, poll_interval, retries, job_descriptor):\n    while True:\n        tasks = provider.lookup_job_tasks({'*'}, job_ids=[job_id])\n        running_tasks = set()\n        completed_tasks = set()\n        canceled_tasks = set()\n        fully_failed_tasks = set()\n        task_fail_count = dict()\n        message_task = None\n        task_dict = dict()\n        for t in tasks:\n            task_id = job_model.numeric_task_id(t.get_field('task-id'))\n            task_dict[task_id] = t\n            status = t.get_field('task-status')\n            if (status == 'FAILURE'):\n                task_fail_count[task_id] = (task_fail_count.get(task_id, 0) + 1)\n                if (task_fail_count[task_id] > retries):\n                    fully_failed_tasks.add(task_id)\n                    message_task = t\n            elif (status == 'CANCELED'):\n                canceled_tasks.add(task_id)\n                if (not message_task):\n                    message_task = t\n            elif (status == 'SUCCESS'):\n                completed_tasks.add(task_id)\n            elif (status == 'RUNNING'):\n                running_tasks.add(task_id)\n        retry_tasks = set(task_fail_count).difference(fully_failed_tasks).difference(running_tasks).difference(completed_tasks).difference(canceled_tasks)\n        if ((not retry_tasks) and (not running_tasks)):\n            if message_task:\n                return [provider.get_tasks_completion_messages([message_task])]\n            return []\n        for task_id in retry_tasks:\n            identifier = ('{}.{}'.format(job_id, task_id) if task_id else job_id)\n            print('  {} (attempt {}) failed. Retrying.'.format(identifier, task_fail_count[task_id]))\n            msg = task_dict[task_id].get_field('status-message')\n            print('  Failure message: {}'.format(msg))\n            _retry_task(provider, job_descriptor, task_id, (task_fail_count[task_id] + 1))\n        SLEEP_FUNCTION(poll_interval)", "docstring": "Wait for job and retry any tasks that fail.\n\nStops retrying an individual task when: it succeeds, is canceled, or has been\nretried \"retries\" times.\n\nThis function exits when there are no tasks running and there are no tasks\neligible to be retried.\n\nArgs:\nprovider: job service provider\njob_id: a single job ID (string) to wait for\npoll_interval: integer seconds to wait between iterations\nretries: number of retries\njob_descriptor: job descriptor used to originally submit job\n\nReturns:\nEmpty list if there was no error,\na list containing an error message from a failed task otherwise.", "source": "codesearchnet"}
{"code": "def display_required_items(msg_type):\n    print(('Configure a profile for: ' + msg_type))\n    print('You will need the following information:')\n    for (k, v) in CONFIG[msg_type]['settings'].items():\n        print(('   * ' + v))\n    print('Authorization/credentials required:')\n    for (k, v) in CONFIG[msg_type]['auth'].items():\n        print(('   * ' + v))", "docstring": "Display the required items needed to configure a profile for the given\nmessage type.\n\nArgs:\n:msg_type: (str) message type to create config entry.", "source": "codesearchnet"}
{"code": "def path_get(p: tcod.path.AStar, idx: int) -> Tuple[int, int]:\n    \n    x = ffi.new(\"int *\")\n    y = ffi.new(\"int *\")\n    lib.TCOD_path_get(p._path_c, idx, x, y)\n    return x[0], y[0]", "docstring": "Get a point on a path.\n\nArgs:\np (AStar): An AStar instance.\nidx (int): Should be in range: 0 <= inx < :any:`path_size`", "source": "juraj-google-style"}
{"code": "def _get_elements(self, site):\n        \n        try:\n            if isinstance(site.specie, Element):\n                return [site.specie]\n            return [Element(site.specie)]\n        except:\n            return site.species.elements", "docstring": "Get the list of elements for a Site\n\nArgs:\nsite (Site): Site to assess\nReturns:\n[Element]: List of elements", "source": "juraj-google-style"}
{"code": "def proportions_from_distribution(table, label, sample_size, column_name='Random Sample'):\n    proportions = sample_proportions(sample_size, table.column(label))\n    return table.with_column('Random Sample', proportions)", "docstring": "Adds a column named ``column_name`` containing the proportions of a random\ndraw using the distribution in ``label``.\n\nThis method uses ``np.random.multinomial`` to draw ``sample_size`` samples\nfrom the distribution in ``table.column(label)``, then divides by\n``sample_size`` to create the resulting column of proportions.\n\nArgs:\n``table``: An instance of ``Table``.\n\n``label``: Label of column in ``table``. This column must contain a\ndistribution (the values must sum to 1).\n\n``sample_size``: The size of the sample to draw from the distribution.\n\n``column_name``: The name of the new column that contains the sampled\nproportions. Defaults to ``'Random Sample'``.\n\nReturns:\nA copy of ``table`` with a column ``column_name`` containing the\nsampled proportions. The proportions will sum to 1.\n\nThrows:\n``ValueError``: If the ``label`` is not in the table, or if\n``table.column(label)`` does not sum to 1.", "source": "codesearchnet"}
{"code": "def FoldValue(self, value):\n    if ((value is False) and (self._data_type_definition.false_value is not None)):\n        return self._data_type_definition.false_value\n    if ((value is True) and (self._data_type_definition.true_value is not None)):\n        return self._data_type_definition.true_value\n    raise ValueError('No matching True and False values')", "docstring": "Folds the data type into a value.\n\nArgs:\nvalue (object): value.\n\nReturns:\nobject: folded value.\n\nRaises:\nValueError: if the data type definition cannot be folded into the value.", "source": "codesearchnet"}
{"code": "def fetch(self, url):\n    opener = self._urllib.build_opener()\n    opener.addheaders = self._requestHeaders.items()\n    response = opener.open(url)\n    headers = response.info()\n    raw = response.read()\n    raw = raw.decode('utf8')\n    if (not ('Content-Type' in headers)):\n        raise OEmbedError('Missing mime-type in response')\n    if ((headers['Content-Type'].find('application/xml') != (- 1)) or (headers['Content-Type'].find('text/xml') != (- 1))):\n        response = OEmbedResponse.newFromXML(raw)\n    elif ((headers['Content-Type'].find('application/json') != (- 1)) or (headers['Content-Type'].find('text/javascript') != (- 1)) or (headers['Content-Type'].find('text/json') != (- 1))):\n        response = OEmbedResponse.newFromJSON(raw)\n    else:\n        raise OEmbedError(('Invalid mime-type in response - %s' % headers['Content-Type']))\n    return response", "docstring": "Fetch url and create a response object according to the mime-type.\n\nArgs:\nurl: The url to fetch data from\n\nReturns:\nOEmbedResponse object according to data fetched", "source": "codesearchnet"}
{"code": "def aggr(array, op, initial_value, ty):\n    \n    weld_obj = WeldObject(encoder_, decoder_)\n\n    array_var = weld_obj.update(array)\n    if isinstance(array, WeldObject):\n        array_var = array.obj_id\n        weld_obj.dependencies[array_var] = array\n\n    weld_template = \n    weld_obj.weld_code = weld_template % {\n        \"array\": array_var, \"ty\": ty, \"op\": op}\n    return weld_obj", "docstring": "Computes the aggregate of elements in the array.\n\nArgs:\narray (WeldObject / Numpy.ndarray): Input array to aggregate\nop (str): Op string used to aggregate the array (+ / *)\ninitial_value (int): Initial value for aggregation\nty (WeldType): Type of each element in the input array\n\n\nReturns:\nA WeldObject representing this computation", "source": "juraj-google-style"}
{"code": "def _list_samples(self, predicate=None):\n        \n        cursor = self.database[self.sample_collection].find(predicate, {'_id':0, 'md5':1})\n        return [item['md5'] for item in cursor]", "docstring": "List all samples that meet the predicate or all if predicate is not specified.\n\nArgs:\npredicate: Match samples against this predicate (or all if not specified)\n\nReturns:\nList of the md5s for the matching samples", "source": "juraj-google-style"}
{"code": "def _process_sum_prod(self, func, **kwargs):\n        \n        axis = kwargs.get(\"axis\", 0)\n        min_count = kwargs.get(\"min_count\", 0)\n\n        def sum_prod_builder(df, **kwargs):\n            return func(df, **kwargs)\n\n        if min_count <= 1:\n            return self._full_reduce(axis, sum_prod_builder)\n        else:\n            return self._full_axis_reduce(axis, sum_prod_builder)", "docstring": "Calculates the sum or product of the DataFrame.\n\nArgs:\nfunc: Pandas func to apply to DataFrame.\nignore_axis: Whether to ignore axis when raising TypeError\nReturn:\nA new QueryCompiler object with sum or prod of the object.", "source": "juraj-google-style"}
{"code": "def simple_layer_stack(include_encdec_attention,\n                       num_layers=6,\n                       d_ff=2048,\n                       num_heads=8,\n                       d_kv=128,\n                       dropout_rate=0.1):\n  \n  ret = []\n  for _ in xrange(num_layers):\n    ret.append(\n        transformer_layers.SelfAttention(\n            num_heads=num_heads,\n            key_value_size=d_kv,\n            attention_kwargs={\"dropout_rate\": dropout_rate}))\n    if include_encdec_attention:\n      ret.append(\n          transformer_layers.EncDecAttention(\n              num_heads=num_heads,\n              key_value_size=d_kv,\n              attention_kwargs={\"dropout_rate\": dropout_rate}))\n    ret.append(\n        transformer_layers.DenseReluDense(\n            hidden_size=d_ff,\n            dropout_rate=dropout_rate))\n  return transformer.LayerStack(ret)", "docstring": "Create a layer stack.\n\nArgs:\ninclude_encdec_attention: a boolean\nnum_layers: an integer\nd_ff: an integer\nnum_heads: an integer\nd_kv: an integer\ndropout_rate: a float\n\nReturns:\na LayerStack", "source": "juraj-google-style"}
{"code": "def invert(self) -> Rotation:\n    if self._rot_mats is not None:\n        return Rotation(rot_mats=invert_rot_mat(self._rot_mats), quats=None)\n    elif self._quats is not None:\n        return Rotation(rot_mats=None, quats=invert_quat(self._quats), normalize_quats=False)\n    else:\n        raise ValueError('Both rotations are None')", "docstring": "Returns the inverse of the current Rotation.\n\nReturns:\nThe inverse of the current Rotation", "source": "github-repos"}
{"code": "def get_vcf_entry(variant_obj, case_id=None):\n    if (variant_obj['category'] == 'snv'):\n        var_type = 'TYPE'\n    else:\n        var_type = 'SVTYPE'\n    info_field = ';'.join([('END=' + str(variant_obj['end'])), ((var_type + '=') + variant_obj['sub_category'].upper())])\n    variant_string = '{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}'.format(variant_obj['chromosome'], variant_obj['position'], variant_obj['dbsnp_id'], variant_obj['reference'], variant_obj['alternative'], variant_obj['quality'], ';'.join(variant_obj['filters']), info_field)\n    if case_id:\n        variant_string += '\\tGT'\n        for sample in variant_obj['samples']:\n            variant_string += ('\\t' + sample['genotype_call'])\n    return variant_string", "docstring": "Get vcf entry from variant object\n\nArgs:\nvariant_obj(dict)\nReturns:\nvariant_string(str): string representing variant in vcf format", "source": "codesearchnet"}
{"code": "def int(name, default=None, allow_none=False, fallback=None):\n    value = read(name, default, allow_none, fallback=fallback)\n    if isinstance(value, builtins.str):\n        value = value.strip()\n    if ((value is None) and allow_none):\n        return None\n    else:\n        return builtins.int(value)", "docstring": "Get a string environment value or the default.\n\nArgs:\nname: The environment variable name\ndefault: The default value to use if no environment variable is found\nallow_none: If the return value can be `None` (i.e. optional)", "source": "codesearchnet"}
{"code": "def to_representation(self, obj):\n    representation = {}\n    for (name, field) in self.fields.items():\n        if field.write_only:\n            continue\n        attribute = self.get_attribute(obj, (field.source or name))\n        if (attribute is None):\n            representation[name] = ([] if field.many else None)\n        elif field.many:\n            representation[name] = [field.to_representation(item) for item in attribute]\n        else:\n            representation[name] = field.to_representation(attribute)\n    return representation", "docstring": "Convert given internal object instance into representation dict.\n\nRepresentation dict may be later serialized to the content-type\nof choice in the resource HTTP method handler.\n\nThis loops over all fields and retrieves source keys/attributes as\nfield values with respect to optional field sources and converts each\none using ``field.to_representation()`` method.\n\nArgs:\nobj (object): internal object that needs to be represented\n\nReturns:\ndict: representation dictionary", "source": "codesearchnet"}
{"code": "def find_field_names(fields, model=DEFAULT_MODEL, app=DEFAULT_APP, score_cutoff=50, pad_with_none=False):\n    fields = util.listify(fields)\n    model = get_model(model, app)\n    available_field_names = model._meta.get_all_field_names()\n    matched_fields = []\n    for field_name in fields:\n        match = fuzzy.extractOne(str(field_name), available_field_names)\n        if (match and (match[1] is not None) and (match[1] >= score_cutoff)):\n            matched_fields += [match[0]]\n        elif pad_with_none:\n            matched_fields += [None]\n    return matched_fields", "docstring": "Use fuzzy string matching to find similar model field names without consulting a synonyms list\n\nReturns:\nlist: A list model field names (strings) sorted from most likely to least likely.\n[] If no similar field names could be found in the indicated model\n[None] If none found and and `pad_with_none` set\n\nExamples:\n\n>>> find_field_names(['date_time', 'title_prefix', 'sales'], model='WikiItem')\n['date', 'model', 'net_sales']", "source": "codesearchnet"}
{"code": "def validate(item, namespace='accounts', version=2, context=None):\n    \n    if namespace == 'accounts':\n        if version == 2:\n            schema = v2.AccountSchema(strict=True, context=context)\n            return schema.load(item).data\n        elif version == 1:\n            return v1.AccountSchema(strict=True).load(item).data\n        raise InvalidSWAGDataException('Schema version is not supported. Version: {}'.format(version))\n    raise InvalidSWAGDataException('Namespace not supported. Namespace: {}'.format(namespace))", "docstring": "Validate item against version schema.\n\nArgs:\nitem: data object\nnamespace: backend namespace\nversion: schema version\ncontext: schema context object", "source": "juraj-google-style"}
{"code": "def get_job(self):\n    return Job(self.rest_client.make_request(self.job), self.rest_client)", "docstring": "Get the Streams job that owns this view.\n\nReturns:\nJob: Streams Job owning this view.", "source": "codesearchnet"}
{"code": "class JavaJarExpansionService(object):\n\n    def __init__(self, path_to_jar, extra_args=None, classpath=None, append_args=None):\n        if extra_args and append_args:\n            raise ValueError('Only one of extra_args or append_args may be provided')\n        self.path_to_jar = path_to_jar\n        self._extra_args = extra_args\n        self._classpath = classpath or []\n        self._service_count = 0\n        self._append_args = append_args or []\n\n    def is_existing_service(self):\n        return subprocess_server.is_service_endpoint(self.path_to_jar)\n\n    @staticmethod\n    def _expand_jars(jar):\n        if glob.glob(jar):\n            return glob.glob(jar)\n        elif isinstance(jar, str) and (jar.startswith('http:\n            return [subprocess_server.JavaJarServer.local_jar(jar)]\n        else:\n            try:\n                group_id, artifact_id, version = jar.split(':')\n            except ValueError:\n                logging.warning('Unable to parse %s into group:artifact:version.', jar)\n                return [jar]\n            path = subprocess_server.JavaJarServer.local_jar(subprocess_server.JavaJarServer.path_to_maven_jar(artifact_id, group_id, version))\n            return [path]\n\n    def _default_args(self):\n        \n        to_stage = ','.join([self.path_to_jar] + sum((JavaJarExpansionService._expand_jars(jar) for jar in self._classpath or []), []))\n        args = ['{{PORT}}', f'--filesToStage={to_stage}']\n        if subprocess_server.SubprocessServer._cache._live_owners:\n            args.append('--alsoStartLoopbackWorker')\n        return args\n\n    def __enter__(self):\n        if self._service_count == 0:\n            self.path_to_jar = subprocess_server.JavaJarServer.local_jar(self.path_to_jar)\n            if self._extra_args is None:\n                self._extra_args = self._default_args() + self._append_args\n            logging.info('Starting a JAR-based expansion service from JAR %s ' + ('and with classpath: %s' % self._classpath if self._classpath else ''), self.path_to_jar)\n            classpath_urls = [subprocess_server.JavaJarServer.local_jar(path) for jar in self._classpath for path in JavaJarExpansionService._expand_jars(jar)]\n            self._service_provider = subprocess_server.JavaJarServer(ExpansionAndArtifactRetrievalStub, self.path_to_jar, self._extra_args, classpath=classpath_urls)\n            self._service = self._service_provider.__enter__()\n        self._service_count += 1\n        return self._service\n\n    def __exit__(self, *args):\n        self._service_count -= 1\n        if self._service_count == 0:\n            self._service_provider.__exit__(*args)", "docstring": "An expansion service based on an Java Jar file.\n\nThis can be passed into an ExternalTransform as the expansion_service\nargument which will spawn a subprocess using this jar to expand the\ntransform.\n\nArgs:\npath_to_jar: the path to a locally available executable jar file to be used\nto start up the expansion service.\nextra_args: arguments to be provided when starting up the\nexpansion service using the jar file. These arguments will replace the\ndefault arguments.\nclasspath: Additional dependencies to be added to the classpath.\nappend_args: arguments to be provided when starting up the\nexpansion service using the jar file. These arguments will be appended to\nthe default arguments.", "source": "github-repos"}
{"code": "def assert_raises(expected_exception, extras=None, *args, **kwargs):\n    context = _AssertRaisesContext(expected_exception, extras=extras)\n    return context", "docstring": "Assert that an exception is raised when a function is called.\n\nIf no exception is raised, test fail. If an exception is raised but not\nof the expected type, the exception is let through.\n\nThis should only be used as a context manager:\nwith assert_raises(Exception):\nfunc()\n\nArgs:\nexpected_exception: An exception class that is expected to be\nraised.\nextras: An optional field for extra information to be included in\ntest result.", "source": "github-repos"}
{"code": "def __init__(self, todo_tasklet, limit):\n    \n    self._todo_tasklet = todo_tasklet\n    self._limit = limit\n    \n    \n    self._queues = {}\n    self._running = []  \n    self._cache = {}", "docstring": "Init.\n\nArgs:\ntodo_tasklet: the tasklet that actually fires RPC and waits on a MultiRPC.\nIt should take a list of (future, arg) pairs and an \"options\" as\narguments. \"options\" are rpc options.\nlimit: max number of items to batch for each distinct value of \"options\".", "source": "juraj-google-style"}
{"code": "def render(self, program: moderngl.Program, mode=None, vertices=(- 1), first=0, instances=1):\n    vao = self.instance(program)\n    if (mode is None):\n        mode = self.mode\n    vao.render(mode, vertices=vertices, first=first, instances=instances)", "docstring": "Render the VAO.\n\nArgs:\nprogram: The ``moderngl.Program``\n\nKeyword Args:\nmode: Override the draw mode (``TRIANGLES`` etc)\nvertices (int): The number of vertices to transform\nfirst (int): The index of the first vertex to start with\ninstances (int): The number of instances", "source": "codesearchnet"}
{"code": "def MultiDelete(self, urns, token=None):\n    \n    urns = [rdfvalue.RDFURN(urn) for urn in urns]\n\n    if token is None:\n      token = data_store.default_token\n\n    for urn in urns:\n      if urn.Path() == \"/\":\n        raise ValueError(\"Can't delete root URN. Please enter a valid URN\")\n\n    deletion_pool = DeletionPool(token=token)\n    deletion_pool.MultiMarkForDeletion(urns)\n\n    marked_root_urns = deletion_pool.root_urns_for_deletion\n    marked_urns = deletion_pool.urns_for_deletion\n\n    logging.debug(u\"Found %d objects to remove when removing %s\",\n                  len(marked_urns), urns)\n\n    logging.debug(u\"Removing %d root objects when removing %s: %s\",\n                  len(marked_root_urns), urns, marked_root_urns)\n\n    pool = data_store.DB.GetMutationPool()\n    for root in marked_root_urns:\n      \n      \n      \n      self._DeleteChildFromIndex(root, mutation_pool=pool)\n\n    for urn_to_delete in marked_urns:\n      try:\n        self.intermediate_cache.ExpireObject(urn_to_delete.Path())\n      except KeyError:\n        pass\n\n    pool.DeleteSubjects(marked_urns)\n    pool.Flush()\n\n    \n    self.Flush()\n\n    logging.debug(\"Removed %d objects\", len(marked_urns))", "docstring": "Drop all the information about given objects.\n\nDANGEROUS! This recursively deletes all objects contained within the\nspecified URN.\n\nArgs:\nurns: Urns of objects to remove.\ntoken: The Security Token to use for opening this item.\n\nRaises:\nValueError: If one of the urns is too short. This is a safety check to\nensure the root is not removed.", "source": "juraj-google-style"}
{"code": "def get(cls, issue_id):\n        \n        res = Issue.get(issue_id, IssueType.get(cls.issue_type).issue_type_id)\n        return cls(res) if res else None", "docstring": "Returns the class object identified by `issue_id`\n\nArgs:\nissue_id (str): Unique EC2 Instance ID to load from database\n\nReturns:\nEC2 Instance object if found, else None", "source": "juraj-google-style"}
{"code": "def payments(self, virtual_account_id, data={}, **kwargs):\n        \n        url = \"{}/{}/payments\".format(self.base_url, virtual_account_id)\n        return self.get_url(url, data, **kwargs)", "docstring": "Fetch Payment for Virtual Account Id\n\nArgs:\nvirtual_account_id :\nId for which Virtual Account objects has to be retrieved\n\nReturns:\nPayment dict for given Virtual Account Id", "source": "juraj-google-style"}
{"code": "def convert_file_size_to_int(size: Union[int, str]):\n    if isinstance(size, int):\n        return size\n    if size.upper().endswith('GIB'):\n        return int(size[:-3]) * 2 ** 30\n    if size.upper().endswith('MIB'):\n        return int(size[:-3]) * 2 ** 20\n    if size.upper().endswith('KIB'):\n        return int(size[:-3]) * 2 ** 10\n    if size.upper().endswith('GB'):\n        int_size = int(size[:-2]) * 10 ** 9\n        return int_size \n    if size.upper().endswith('MB'):\n        int_size = int(size[:-2]) * 10 ** 6\n        return int_size \n    if size.upper().endswith('KB'):\n        int_size = int(size[:-2]) * 10 ** 3\n        return int_size \n    raise ValueError(\"`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.\")", "docstring": "Converts a size expressed as a string with digits an unit (like `\"5MB\"`) to an integer (in bytes).\n\nArgs:\nsize (`int` or `str`): The size to convert. Will be directly returned if an `int`.\n\nExample:\n```py\n>>> convert_file_size_to_int(\"1MiB\")\n1048576\n```", "source": "github-repos"}
{"code": "def _ProcessUnknownMessages(message, encoded_message):\n    if (not encoded_message):\n        return message\n    decoded_message = json.loads(six.ensure_str(encoded_message))\n    message_fields = ([x.name for x in message.all_fields()] + list(message.all_unrecognized_fields()))\n    missing_fields = [x for x in decoded_message.keys() if (x not in message_fields)]\n    for field_name in missing_fields:\n        message.set_unrecognized_field(field_name, decoded_message[field_name], messages.Variant.STRING)\n    return message", "docstring": "Store any remaining unknown fields as strings.\n\nProtoRPC currently ignores unknown values for which no type can be\ndetermined (and logs a \"No variant found\" message). For the purposes\nof reserializing, this is quite harmful (since it throws away\ninformation). Here we simply add those as unknown fields of type\nstring (so that they can easily be reserialized).\n\nArgs:\nmessage: Proto message we've decoded thus far.\nencoded_message: JSON string we're decoding.\n\nReturns:\nmessage, with any remaining unrecognized fields saved.", "source": "codesearchnet"}
{"code": "def GetColocationGroups(self):\n    return tf_item.TF_GetColocationGroups(self.tf_item)", "docstring": "Return a list of hard colocation constraints.\n\nAll the nodes in a colocation tuple must be placed on the same device for\nthe model to work.\n\nReturns:\nA list of colocation tuples.", "source": "github-repos"}
{"code": "def market_normal(self, session, after_open, before_close) -> Session:\n        \n        logger = logs.get_logger(self.market_normal)\n\n        if session not in self.exch: return SessNA\n        ss = self.exch[session]\n\n        s_time = shift_time(ss[0], int(after_open) + 1)\n        e_time = shift_time(ss[-1], -int(before_close))\n\n        request_cross = pd.Timestamp(s_time) >= pd.Timestamp(e_time)\n        session_cross = pd.Timestamp(ss[0]) >= pd.Timestamp(ss[1])\n        if request_cross and (not session_cross):\n            logger.warning(f'end time {e_time} is earlier than {s_time} ...')\n            return SessNA\n\n        return Session(s_time, e_time)", "docstring": "Time intervals between market\n\nArgs:\nsession: [allday, day, am, pm, night]\nafter_open: mins after open\nbefore_close: mins before close\n\nReturns:\nSession of start_time and end_time", "source": "juraj-google-style"}
{"code": "def Map(fn, *args, **kwargs):\n    if not callable(fn):\n        raise TypeError('Map can be used only with callable objects. Received %r instead.' % fn)\n    from apache_beam.transforms.util import fn_takes_side_inputs\n    if fn_takes_side_inputs(fn):\n        wrapper = lambda x, *args, **kwargs: [fn(x, *args, **kwargs)]\n    else:\n        wrapper = lambda x: [fn(x)]\n    label = 'Map(%s)' % ptransform.label_from_callable(fn)\n    if hasattr(fn, '__name__'):\n        wrapper.__name__ = fn.__name__\n    type_hints = get_type_hints(fn).with_defaults(typehints.decorators.IOTypeHints.from_callable(fn))\n    if type_hints.input_types is not None:\n        wrapper = with_input_types(*type_hints.input_types[0], **type_hints.input_types[1])(wrapper)\n    output_hint = type_hints.simple_output_type(label)\n    if output_hint:\n        wrapper = with_output_types(typehints.Iterable[_strip_output_annotations(output_hint)])(wrapper)\n    wrapper._argspec_fn = fn\n    pardo = FlatMap(wrapper, *args, **kwargs)\n    pardo.label = label\n    return pardo", "docstring": ":func:`Map` is like :func:`FlatMap` except its callable returns only a\nsingle element.\n\nArgs:\nfn (callable): a callable object.\n*args: positional arguments passed to the transform callable.\n**kwargs: keyword arguments passed to the transform callable.\n\nReturns:\n~apache_beam.pvalue.PCollection:\nA :class:`~apache_beam.pvalue.PCollection` containing the\n:func:`Map` outputs.\n\nRaises:\nTypeError: If the **fn** passed as argument is not a callable.\nTypical error is to pass a :class:`DoFn` instance which is supported only\nfor :class:`ParDo`.", "source": "github-repos"}
{"code": "def for_new_graph(*args, **kwargs):\n    graph = tf.Graph()\n    with graph.as_default():\n        return for_default_graph(*args, **kwargs)", "docstring": "Creates a Bookkeeper for a new graph.\n\nYou must use `m.g.as_default()` to put the graph in scope:\n\nm = Bookkeeper.for_new_graph()\nwith m.g.as_default():\n...\n\nArgs:\n*args: Arguments to pass into Bookkeeper's constructor.\n**kwargs: Arguments to pass into Bookkeeper's constructor.\nReturns:\nA new Bookkeeper.", "source": "codesearchnet"}
{"code": "def MROMerge(input_seqs):\n    seqs = [Dedup(s) for s in input_seqs]\n    try:\n        return MergeSequences(seqs)\n    except ValueError as e:\n        raise MROError(input_seqs) from e", "docstring": "Merge a sequence of MROs into a single resulting MRO.\n\nArgs:\ninput_seqs: A sequence of MRO sequences.\n\nReturns:\nA single resulting MRO.\n\nRaises:\nMROError: If we discovered an illegal inheritance.", "source": "github-repos"}
{"code": "def to_yaml(self, **kwargs):\n    raise RuntimeError('Method `model.to_yaml()` has been removed due to security risk of arbitrary code execution. Please use `model.to_json()` instead.')", "docstring": "Returns a yaml string containing the network configuration.\n\nNote: Since TF 2.6, this method is no longer supported and will raise a\nRuntimeError.\n\nTo load a network from a yaml save file, use\n`keras.models.model_from_yaml(yaml_string, custom_objects={})`.\n\n`custom_objects` should be a dictionary mapping\nthe names of custom losses / layers / etc to the corresponding\nfunctions / classes.\n\nArgs:\n**kwargs: Additional keyword arguments\nto be passed to `yaml.dump()`.\n\nReturns:\nA YAML string.\n\nRaises:\nRuntimeError: announces that the method poses a security risk", "source": "github-repos"}
{"code": "def unravel_sections(section_data):\n    sections = []\n    for (type, subsection_list) in section_data.items():\n        for section in subsection_list:\n            section['sectionType'] = type\n            sections.append(section)\n    return sections", "docstring": "Unravels section type dictionary into flat list of sections with\nsection type set as an attribute.\n\nArgs:\nsection_data(dict): Data return from py:method::get_sections\n\nReturns:\nlist: Flat list of sections with ``sectionType`` set to\ntype (i.e. recitation, lecture, etc)", "source": "codesearchnet"}
{"code": "def create_all(cls, list_of_kwargs):\n        \n        try:\n            return cls.add_all([\n                cls.new(**kwargs) if kwargs is not None else None for kwargs in list_of_kwargs])\n        except:\n            cls.session.rollback()\n            raise", "docstring": "Batch method for creating a list of instances\n\nArgs:\nlist_of_kwargs(list of dicts): hereA list of dicts where\neach dict denotes the keyword args that you would pass\nto the create method separately\n\nExamples:\n\n>>> Customer.create_all([\n... {'name': 'Vicky', 'age': 34, 'user_id': 1},\n... {'name': 'Ron', 'age': 40, 'user_id': 1, 'gender': 'Male'}])", "source": "juraj-google-style"}
{"code": "def create_index(self, model, waiting_models):\n    bucket_name = model._get_bucket_name()\n    bucket_type = client.bucket_type(settings.DEFAULT_BUCKET_TYPE)\n    index_name = ('%s_%s' % (settings.DEFAULT_BUCKET_TYPE, bucket_name))\n    bucket = bucket_type.bucket(bucket_name)\n    try:\n        client.get_search_index(index_name)\n        if (not (bucket.get_property('search_index') == index_name)):\n            bucket.set_property('search_index', index_name)\n            print(('+ %s (%s) search index is created.' % (model.__name__, index_name)))\n    except RiakError:\n        try:\n            client.create_search_index(index_name, index_name, self.n_val)\n            bucket.set_property('search_index', index_name)\n            print(('+ %s (%s) search index is created.' % (model.__name__, index_name)))\n        except RiakError:\n            print(('+ %s (%s) search index checking operation is taken to queue.' % (model.__name__, index_name)))\n            waiting_models.append(model)", "docstring": "Creates search indexes.\n\nArgs:\nmodel: model to execute\nwaiting_models: if riak can't return response immediately, model is taken to queue.\nAfter first execution session, method is executed with waiting models and controlled.\nAnd be ensured that all given models are executed properly.\n\nReturns:", "source": "codesearchnet"}
{"code": "def create_nsg_rule(access_token, subscription_id, resource_group, nsg_name, nsg_rule_name, description, protocol='Tcp', source_range='*', destination_range='*', source_prefix='*', destination_prefix='*', access='Allow', priority=100, direction='Inbound'):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/networkSecurityGroups/', nsg_name, '/securityRules/', nsg_rule_name, '?api-version=', NETWORK_API])\n    properties = {'description': description}\n    properties['protocol'] = protocol\n    properties['sourcePortRange'] = source_range\n    properties['destinationPortRange'] = destination_range\n    properties['sourceAddressPrefix'] = source_prefix\n    properties['destinationAddressPrefix'] = destination_prefix\n    properties['access'] = access\n    properties['priority'] = priority\n    properties['direction'] = direction\n    ip_body = {'properties': properties}\n    body = json.dumps(ip_body)\n    return do_put(endpoint, body, access_token)", "docstring": "Create network security group rule.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nnsg_name (str): Name of the Network Security Group.\nnsg_rule_name (str): Name of the new rule.\ndescription (str): Description.\nprotocol (str): Optional protocol. Default Tcp.\nsource_range (str): Optional source IP range. Default '*'.\ndestination_range (str): Destination IP range. Default *'.\nsource_prefix (str): Source DNS prefix. Default '*'.\ndestination_prefix (str): Destination prefix. Default '*'.\naccess (str): Allow or deny rule. Default Allow.\npriority: Relative priority. Default 100.\ndirection: Inbound or Outbound. Default Inbound.\n\nReturns:\nHTTP response. NSG JSON rule body.", "source": "codesearchnet"}
{"code": "def register(self, token, regexp):\n    self._tokens.append((token, re.compile(regexp)))", "docstring": "Register a token.\n\nArgs:\ntoken (Token): the token class to register\nregexp (str): the regexp for that token", "source": "codesearchnet"}
{"code": "def get_pattern_additional_cycles(self, patternnumber):\n        \n        _checkPatternNumber(patternnumber)\n\n        address = _calculateRegisterAddress('cycles', patternnumber)\n        return self.read_register(address)", "docstring": "Get the number of additional cycles for a given pattern.\n\nArgs:\npatternnumber (integer): 0-7\n\nReturns:\nThe number of additional cycles (int).", "source": "juraj-google-style"}
{"code": "def routerify(obj):\n    \n    router = Router()\n    for info in get_routing_attributes(obj):\n        router.add_route(*info)\n    obj.__growler_router = router\n    return router", "docstring": "Scan through attributes of object parameter looking for any which\nmatch a route signature.\nA router will be created and added to the object with parameter.\n\nArgs:\nobj (object): The object (with attributes) from which to\nsetup a router\n\nReturns:\nRouter: The router created from attributes in the object.", "source": "juraj-google-style"}
{"code": "def _tf_predict(model_dir, input_csvlines):\n    with tf.Graph().as_default(), tf.Session() as sess:\n        (input_alias_map, output_alias_map) = _tf_load_model(sess, model_dir)\n        csv_tensor_name = list(input_alias_map.values())[0]\n        results = sess.run(fetches=output_alias_map, feed_dict={csv_tensor_name: input_csvlines})\n    if (len(input_csvlines) == 1):\n        for (k, v) in six.iteritems(results):\n            if (not isinstance(v, (list, np.ndarray))):\n                results[k] = [v]\n    for (k, v) in six.iteritems(results):\n        if any((isinstance(x, bytes) for x in v)):\n            results[k] = [x.decode('utf-8') for x in v]\n    return results", "docstring": "Prediction with a tf savedmodel.\n\nArgs:\nmodel_dir: directory that contains a saved model\ninput_csvlines: list of csv strings\n\nReturns:\nDict in the form tensor_name:prediction_list. Note that the value is always\na list, even if there was only 1 row in input_csvlines.", "source": "codesearchnet"}
{"code": "def prune(self, cutoff: int = 2):\n        \n\n        \n        for node_pair in tqdm(list(permutations(self.nodes(), 2))):\n            paths = [\n                list(pairwise(path))\n                for path in nx.all_simple_paths(self, *node_pair, cutoff)\n            ]\n            if len(paths) > 1:\n                for path in paths:\n                    if len(path) == 1:\n                        self.delete_edge(*path[0])\n                        if any(self.degree(n) == 0 for n in path[0]):\n                            self.add_edge(*path[0])\n                        break", "docstring": "Prunes the CAG by removing redundant paths. If there are multiple\n(directed) paths between two nodes, this function removes all but the\nlongest paths. Subsequently, it restricts the graph to the largest\nconnected component.\n\nArgs:\ncutoff: The maximum path length to consider for finding redundant\npaths. Higher values of this parameter correspond to more\naggressive pruning.", "source": "juraj-google-style"}
{"code": "def str_to_mac(mac_string):\n    sp = mac_string.split(':')\n    mac_string = ''.join(sp)\n    return binascii.unhexlify(mac_string)", "docstring": "Convert a readable string to a MAC address\n\nArgs:\nmac_string (str): a readable string (e.g. '01:02:03:04:05:06')\nReturns:\nstr: a MAC address in hex form", "source": "codesearchnet"}
{"code": "def add(self, *dic):\n    dicList = list(flatten(dic))\n    for d in dicList:\n        di = []\n        for k in d:\n            di.append(Pair(k, IntegerSingle(d[k])))\n        dictSingle = DictSingle(di)\n        self._add([dictSingle], self.l)", "docstring": "add a config to StartCalendarInterval.\n\nArgs:\n*dic (dict): dictionary with format {'Day': 12, 'Hour': 34} Avaliable keys are Month, Day, Weekday, Hour, Minute. *Note the uppercase.* You can use gen(), genMix() to generate complex config dictionary.", "source": "codesearchnet"}
{"code": "def hill_climb(nsteps, start_node, get_next_node):\n    outputs = []\n    best_score = (- float('inf'))\n    for step in range(nsteps):\n        (next_node, score, output) = get_next_node(copy.deepcopy(start_node))\n        if (score > best_score):\n            start_node = copy.deepcopy(next_node)\n            best_score = score\n            outputs.append(output)\n    return (start_node, best_score, outputs)", "docstring": "Modular hill climbing algorithm.\n\nExample:\n>>> def get_next_node(node):\n...     a, b = random.sample(range(len(node)), 2)\n...     node[a], node[b] = node[b], node[a]\n...     plaintext = decrypt(node, ciphertext)\n...     score = lantern.score(plaintext, *fitness_functions)\n...     return node, score, Decryption(plaintext, ''.join(node), score)\n>>> final_node, best_score, outputs = hill_climb(10, \"ABC\", get_next_node)\n\nArgs:\nnsteps (int): The number of neighbours to visit\nstart_node: The starting node\nget_next_node (function): Function to return the next node\nthe score of the current node and any optional output from the current node\n\nReturns:\nThe highest node found, the score of this node and the outputs from the best nodes along the way", "source": "codesearchnet"}
{"code": "def get_input(self, name, ds):\n        \n        columns = self.inputs.get(name)\n        df = ds.get_dataframe()\n\n        \n        for column in columns:\n            if column not in df.columns:\n                df[column] = self.defaults.get(column)\n\n        return df[columns]", "docstring": "Retrieves the content of an input given a DataSource. The input acts like a filter over the outputs of the DataSource.\n\nArgs:\nname (str): The name of the input.\nds (openflow.DataSource): The DataSource that will feed the data.\n\nReturns:\npandas.DataFrame: The content of the input.", "source": "juraj-google-style"}
{"code": "def print_projects(projects=None):\n    \n    grouped_by = {}\n    if not projects:\n        print(\n            \"Your selection didn't include any projects for this experiment.\")\n        return\n\n    for name in projects:\n        prj = projects[name]\n\n        if prj.GROUP not in grouped_by:\n            grouped_by[prj.GROUP] = []\n\n        grouped_by[prj.GROUP].append(\"{name}/{group}\".format(\n            name=prj.NAME, group=prj.GROUP))\n\n    for name in grouped_by:\n        print(\"group: {0}\".format(name))\n        group_projects = sorted(grouped_by[name])\n        for prj in group_projects:\n            prj_cls = projects[prj]\n\n            version_str = None\n            if hasattr(prj_cls, 'versions'):\n                version_str = \", \".join(prj_cls.versions())\n\n            project_id = \"{0}/{1}\".format(prj_cls.NAME, prj_cls.GROUP)\n\n            project_str = \\\n                \"  name: {id:<32} version: {version:<24} source: {src}\".format(\n                    id=str(project_id),\n                    version=str(prj_cls.VERSION),\n                    src=str(prj_cls.SRC_FILE))\n            print(project_str)\n            if prj_cls.__doc__:\n                docstr = prj_cls.__doc__.strip(\"\\n \")\n                print(\"    description: {desc}\".format(desc=docstr))\n            if version_str:\n                print(\"    versions: {versions}\".format(versions=version_str))\n        print()", "docstring": "Print a list of projects registered for that experiment.\n\nArgs:\nexp: The experiment to print all projects for.", "source": "juraj-google-style"}
{"code": "def _is_of_type(self, path, st_flag, follow_symlinks=True):\n        \n        path = make_string_path(path)\n        if path is None:\n            raise TypeError\n        try:\n            obj = self.resolve(path, follow_symlinks)\n            if obj:\n                self.raise_for_filepath_ending_with_separator(\n                    path, obj, macos_handling=not follow_symlinks)\n                return S_IFMT(obj.st_mode) == st_flag\n        except (IOError, OSError):\n            return False\n        return False", "docstring": "Helper function to implement isdir(), islink(), etc.\n\nSee the stat(2) man page for valid stat.S_I* flag values\n\nArgs:\npath: Path to file to stat and test\nst_flag: The stat.S_I* flag checked for the file's st_mode\n\nReturns:\n(boolean) `True` if the st_flag is set in path's st_mode.\n\nRaises:\nTypeError: if path is None", "source": "juraj-google-style"}
{"code": "def apply_fixup_array(bin_view, fx_offset, fx_count, entry_size):\n    \n    fx_array = bin_view[fx_offset:fx_offset+(2 * fx_count)]\n    \n    fx_len = fx_count - 1\n    \n    sector_size = int(entry_size / fx_len)\n    index = 1\n    position = (sector_size * index) - 2\n    while (position <= entry_size):\n        if bin_view[position:position+2].tobytes() == fx_array[:2].tobytes():\n            \n            bin_view[position:position+2] = fx_array[index * 2:(index * 2) + 2]\n        else:\n            _MOD_LOGGER.error(\"Error applying the fixup array\")\n            raise FixUpError(f\"Signature {fx_array[:2].tobytes()} does not match {bin_view[position:position+2].tobytes()} at offset {position}.\")\n        index += 1\n        position = (sector_size * index) - 2\n    _MOD_LOGGER.info(\"Fix up array applied successfully.\")", "docstring": "This function reads the fixup array and apply the correct values\nto the underlying binary stream. This function changes the bin_view\nin memory.\n\nArgs:\nbin_view (memoryview of bytearray) - The binary stream\nfx_offset (int) - Offset to the fixup array\nfx_count (int) - Number of elements in the fixup array\nentry_size (int) - Size of the MFT entry", "source": "juraj-google-style"}
{"code": "def build_exon(exon_info, build='37'):\n    try:\n        chrom = exon_info['chrom']\n    except KeyError:\n        raise KeyError('Exons has to have a chromosome')\n    try:\n        start = int(exon_info['start'])\n    except KeyError:\n        raise KeyError('Exon has to have a start')\n    except TypeError:\n        raise TypeError('Exon start has to be integer')\n    try:\n        end = int(exon_info['end'])\n    except KeyError:\n        raise KeyError('Exon has to have a end')\n    except TypeError:\n        raise TypeError('Exon end has to be integer')\n    try:\n        rank = int(exon_info['rank'])\n    except KeyError:\n        raise KeyError('Exon has to have a rank')\n    except TypeError:\n        raise TypeError('Exon rank has to be integer')\n    try:\n        exon_id = exon_info['exon_id']\n    except KeyError:\n        raise KeyError('Exons has to have a id')\n    try:\n        transcript = exon_info['transcript']\n    except KeyError:\n        raise KeyError('Exons has to have a transcript')\n    try:\n        hgnc_id = int(exon_info['hgnc_id'])\n    except KeyError:\n        raise KeyError('Exons has to have a hgnc_id')\n    except TypeError:\n        raise TypeError('hgnc_id has to be integer')\n    exon_obj = Exon(exon_id=exon_id, chrom=chrom, start=start, end=end, rank=rank, transcript=transcript, hgnc_id=hgnc_id, build=build)\n    return exon_obj", "docstring": "Build a Exon object object\n\nArgs:\nexon_info(dict): Exon information\n\nReturns:\nexon_obj(Exon)\n\n\"exon_id\": str, # str(chrom-start-end)\n\"chrom\": str,\n\"start\": int,\n\"end\": int,\n\"transcript\": str, # ENST ID\n\"hgnc_id\": int,      # HGNC_id\n\"rank\": int, # Order of exon in transcript\n\"build\": str, # Genome build", "source": "codesearchnet"}
{"code": "def to_representation(self, instance):\n        \n        updated_course = copy.deepcopy(instance)\n        enterprise_customer_catalog = self.context['enterprise_customer_catalog']\n        updated_course['enrollment_url'] = enterprise_customer_catalog.get_course_enrollment_url(\n            updated_course['key']\n        )\n        for course_run in updated_course['course_runs']:\n            course_run['enrollment_url'] = enterprise_customer_catalog.get_course_run_enrollment_url(\n                course_run['key']\n            )\n        return updated_course", "docstring": "Return the updated course data dictionary.\n\nArguments:\ninstance (dict): The course data.\n\nReturns:\ndict: The updated course data.", "source": "juraj-google-style"}
{"code": "def FormatType(self, level_name, class_problist):\n    \n    class_problist.sort()\n    output = []\n    for classname, problist in class_problist:\n      output.append('<h4 class=\"issueHeader\"><a name=\"%s%s\">%s</a></h4><ul>\\n' %\n                    (level_name, classname, UnCamelCase(classname)))\n      for e in problist.problems:\n        self.FormatException(e, output)\n      if problist.dropped_count:\n        output.append('<li>and %d more of this type.' %\n                      (problist.dropped_count))\n      output.append('</ul>\\n')\n    return ''.join(output)", "docstring": "Write the HTML dumping all problems of one type.\n\nArgs:\nlevel_name: string such as \"Error\" or \"Warning\"\nclass_problist: sequence of tuples (class name,\nBoundedProblemList object)\n\nReturns:\nHTML in a string", "source": "juraj-google-style"}
{"code": "def restore(self, restored_tensors, unused_restored_shapes):\n    with ops.control_dependencies([self._create_op]):\n        return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(self.resource_handle, stamp_token=restored_tensors[0], tree_ensemble_serialized=restored_tensors[1])", "docstring": "Restores the associated tree ensemble from 'restored_tensors'.\n\nArgs:\nrestored_tensors: the tensors that were loaded from a checkpoint.\nunused_restored_shapes: the shapes this object should conform to after\nrestore. Not meaningful for trees.\n\nReturns:\nThe operation that restores the state of the tree ensemble variable.", "source": "github-repos"}
{"code": "def evaluate_partition(self, direction, mechanism, purview, partition, repertoire=None):\n    if (repertoire is None):\n        repertoire = self.repertoire(direction, mechanism, purview)\n    partitioned_repertoire = self.partitioned_repertoire(direction, partition)\n    phi = repertoire_distance(direction, repertoire, partitioned_repertoire)\n    return (phi, partitioned_repertoire)", "docstring": "Return the |small_phi| of a mechanism over a purview for the given\npartition.\n\nArgs:\ndirection (Direction): |CAUSE| or |EFFECT|.\nmechanism (tuple[int]): The nodes in the mechanism.\npurview (tuple[int]): The nodes in the purview.\npartition (Bipartition): The partition to evaluate.\n\nKeyword Args:\nrepertoire (np.array): The unpartitioned repertoire.\nIf not supplied, it will be computed.\n\nReturns:\ntuple[int, np.ndarray]: The distance between the unpartitioned and\npartitioned repertoires, and the partitioned repertoire.", "source": "codesearchnet"}
{"code": "def merge_leading_dims(array_or_tensor, n_dims=2):\n    tensor = tf.convert_to_tensor(array_or_tensor)\n    tensor_shape_static = tensor.get_shape()\n    if (tensor_shape_static.dims is None):\n        raise ValueError(\"Can't merge leading dimensions of tensor of unknown rank.\")\n    tensor_shape_list = tensor_shape_static.as_list()\n    if (n_dims > len(tensor_shape_list)):\n        return array_or_tensor\n    if tensor_shape_static.is_fully_defined():\n        new_shape = ([np.prod(tensor_shape_list[:n_dims])] + tensor_shape_list[n_dims:])\n        return tf.reshape(tensor, new_shape)\n    tensor_shape = tf.shape(tensor)\n    new_first_dim = tf.reduce_prod(tensor_shape[:n_dims], keepdims=True)\n    other_dims = tensor_shape[n_dims:]\n    new_size = tf.concat([new_first_dim, other_dims], 0)\n    result = tf.reshape(tensor, new_size)\n    if all(((value is not None) for value in tensor_shape_list[:n_dims])):\n        merged_leading_size = np.prod(tensor_shape_list[:n_dims])\n    else:\n        merged_leading_size = None\n    result.set_shape(([merged_leading_size] + tensor_shape_list[n_dims:]))\n    return result", "docstring": "Merge the first dimensions of a tensor.\n\nArgs:\narray_or_tensor: Tensor to have its first dimensions merged. Can also\nbe an array or numerical value, which will be converted to a tensor\nfor batch application, if needed.\nn_dims: Number of dimensions to merge.\n\nReturns:\nEither the input value converted to a Tensor, with the requested dimensions\nmerged, or the unmodified input value if the input has less than `n_dims`\ndimensions.\n\nRaises:\nValueError: If the rank of `array_or_tensor` is not well-defined.", "source": "codesearchnet"}
{"code": "def VerifyStructure(self, parser_mediator, lines):\n    \n    match_generator = self._VERIFICATION_GRAMMAR.scanString(lines, maxMatches=1)\n    return bool(list(match_generator))", "docstring": "Verifies that this is a bash history file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between\nparsers and other components, such as storage and dfvfs.\nlines (str): one or more lines from the text file.\n\nReturns:\nbool: True if this is the correct parser, False otherwise.", "source": "juraj-google-style"}
{"code": "def add(self, *value):\n        \n        flattenedValueList = list(flatten(value))\n        return self._add(flattenedValueList, self.value)", "docstring": "convert value and add to self.value\n\nSubclass must overwrite this method.\nSubclass are responsible of creating whatever single instance it need from its ``add(*value)`` and call ``_add()`` to add them to ``self.value``\n\nArgs:\n*value: the value to be added", "source": "juraj-google-style"}
{"code": "def wait_for_prompt(self, timeout_s=None):\n    \n    with self._cond:\n      if self._prompt:\n        if timeout_s is None:\n          self._cond.wait(3600 * 24 * 365)\n        else:\n          self._cond.wait(timeout_s)\n      if self._response is None:\n        raise PromptUnansweredError\n      return self._response", "docstring": "Wait for the user to respond to the current prompt.\n\nArgs:\ntimeout_s: Seconds to wait before raising a PromptUnansweredError.\n\nReturns:\nA string response, or the empty string if text_input was False.\n\nRaises:\nPromptUnansweredError: Timed out waiting for the user to respond.", "source": "juraj-google-style"}
{"code": "def _calc_rms(mol1, mol2, clabel1, clabel2):\n        \n        obmol1 = BabelMolAdaptor(mol1).openbabel_mol\n        obmol2 = BabelMolAdaptor(mol2).openbabel_mol\n\n        cmol1 = ob.OBMol()\n        for i in clabel1:\n            oa1 = obmol1.GetAtom(i)\n            a1 = cmol1.NewAtom()\n            a1.SetAtomicNum(oa1.GetAtomicNum())\n            a1.SetVector(oa1.GetVector())\n        cmol2 = ob.OBMol()\n        for i in clabel2:\n            oa2 = obmol2.GetAtom(i)\n            a2 = cmol2.NewAtom()\n            a2.SetAtomicNum(oa2.GetAtomicNum())\n            a2.SetVector(oa2.GetVector())\n\n        aligner = ob.OBAlign(True, False)\n        aligner.SetRefMol(cmol1)\n        aligner.SetTargetMol(cmol2)\n        aligner.Align()\n        return aligner.GetRMSD()", "docstring": "Calculate the RMSD.\n\nArgs:\nmol1: The first molecule. OpenBabel OBMol or pymatgen Molecule\nobject\nmol2: The second molecule. OpenBabel OBMol or pymatgen Molecule\nobject\nclabel1: The atom indices that can reorder the first molecule to\nuniform atom order\nclabel1: The atom indices that can reorder the second molecule to\nuniform atom order\n\nReturns:\nThe RMSD.", "source": "juraj-google-style"}
{"code": "def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n    with open(json_file_path, 'w', encoding='utf-8') as writer:\n        config_dict = self.to_dict()\n        json_string = json.dumps(config_dict, indent=2, sort_keys=True) + '\\n'\n        writer.write(json_string)", "docstring": "Save this instance to a JSON file.\n\nArgs:\njson_file_path (Union[str, os.PathLike]): Path to the JSON file in which this configuration instance's parameters will be saved.", "source": "github-repos"}
{"code": "def quota(self):\n    response = self._call(mm_calls.ClientState, self.uploader_id)\n    client_state = response.body.clientstate_response\n    return (client_state.total_track_count, client_state.locker_track_limit)", "docstring": "Get the uploaded track count and allowance.\n\nReturns:\ntuple: Number of uploaded tracks, number of tracks allowed.", "source": "codesearchnet"}
{"code": "def Get(self, request, global_params=None):\n    config = self.GetMethodConfig('Get')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Gets information about a snapshot.\n\nArgs:\nrequest: (DataflowProjectsSnapshotsGetRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Snapshot) The response message.", "source": "github-repos"}
{"code": "def make_message(self, data):\n    data = self.codec.loads(data)\n    msg = Message(data.get('data'), *data.get('args', []), **data.get('kwargs', {}))\n    msg.meta.update(data.get('meta'))\n    self.trigger('make_message', data, msg)\n    return msg", "docstring": "Create a Message instance from data, data will be loaded\nvia munge according to the codec specified in the\ntransport_content_type attribute\n\nReturns:\n\nMessage: message object", "source": "codesearchnet"}
{"code": "def _replace_tensors_by_numpy_ndarrays(repr_ds_map: rd.RepresentativeDatasetMapping) -> None:\n    with session.Session() as sess:\n        for signature_def_key in repr_ds_map:\n            ds = repr_ds_map[signature_def_key]\n            repr_ds_map[signature_def_key] = rd.replace_tensors_by_numpy_ndarrays(ds, sess)", "docstring": "Replaces tf.Tensors by their evaluated numpy arrays.\n\nThis assumes that tf.Tensors in representative samples are created in the\ndefault Graph. It will raise an error if tensors are created in a different\ngraph.\n\nArgs:\nrepr_ds_map: SignatureDef key -> RepresentativeDataset mapping.", "source": "github-repos"}
{"code": "def calc_crc16(buf):\n        \n        crc_table = [0x0000, 0xc0c1, 0xc181, 0x0140, 0xc301, 0x03c0, 0x0280, 0xc241,\n                     0xc601, 0x06c0, 0x0780, 0xc741, 0x0500, 0xc5c1, 0xc481, 0x0440,\n                     0xcc01, 0x0cc0, 0x0d80, 0xcd41, 0x0f00, 0xcfc1, 0xce81, 0x0e40,\n                     0x0a00, 0xcac1, 0xcb81, 0x0b40, 0xc901, 0x09c0, 0x0880, 0xc841,\n                     0xd801, 0x18c0, 0x1980, 0xd941, 0x1b00, 0xdbc1, 0xda81, 0x1a40,\n                     0x1e00, 0xdec1, 0xdf81, 0x1f40, 0xdd01, 0x1dc0, 0x1c80, 0xdc41,\n                     0x1400, 0xd4c1, 0xd581, 0x1540, 0xd701, 0x17c0, 0x1680, 0xd641,\n                     0xd201, 0x12c0, 0x1380, 0xd341, 0x1100, 0xd1c1, 0xd081, 0x1040,\n                     0xf001, 0x30c0, 0x3180, 0xf141, 0x3300, 0xf3c1, 0xf281, 0x3240,\n                     0x3600, 0xf6c1, 0xf781, 0x3740, 0xf501, 0x35c0, 0x3480, 0xf441,\n                     0x3c00, 0xfcc1, 0xfd81, 0x3d40, 0xff01, 0x3fc0, 0x3e80, 0xfe41,\n                     0xfa01, 0x3ac0, 0x3b80, 0xfb41, 0x3900, 0xf9c1, 0xf881, 0x3840,\n                     0x2800, 0xe8c1, 0xe981, 0x2940, 0xeb01, 0x2bc0, 0x2a80, 0xea41,\n                     0xee01, 0x2ec0, 0x2f80, 0xef41, 0x2d00, 0xedc1, 0xec81, 0x2c40,\n                     0xe401, 0x24c0, 0x2580, 0xe541, 0x2700, 0xe7c1, 0xe681, 0x2640,\n                     0x2200, 0xe2c1, 0xe381, 0x2340, 0xe101, 0x21c0, 0x2080, 0xe041,\n                     0xa001, 0x60c0, 0x6180, 0xa141, 0x6300, 0xa3c1, 0xa281, 0x6240,\n                     0x6600, 0xa6c1, 0xa781, 0x6740, 0xa501, 0x65c0, 0x6480, 0xa441,\n                     0x6c00, 0xacc1, 0xad81, 0x6d40, 0xaf01, 0x6fc0, 0x6e80, 0xae41,\n                     0xaa01, 0x6ac0, 0x6b80, 0xab41, 0x6900, 0xa9c1, 0xa881, 0x6840,\n                     0x7800, 0xb8c1, 0xb981, 0x7940, 0xbb01, 0x7bc0, 0x7a80, 0xba41,\n                     0xbe01, 0x7ec0, 0x7f80, 0xbf41, 0x7d00, 0xbdc1, 0xbc81, 0x7c40,\n                     0xb401, 0x74c0, 0x7580, 0xb541, 0x7700, 0xb7c1, 0xb681, 0x7640,\n                     0x7200, 0xb2c1, 0xb381, 0x7340, 0xb101, 0x71c0, 0x7080, 0xb041,\n                     0x5000, 0x90c1, 0x9181, 0x5140, 0x9301, 0x53c0, 0x5280, 0x9241,\n                     0x9601, 0x56c0, 0x5780, 0x9741, 0x5500, 0x95c1, 0x9481, 0x5440,\n                     0x9c01, 0x5cc0, 0x5d80, 0x9d41, 0x5f00, 0x9fc1, 0x9e81, 0x5e40,\n                     0x5a00, 0x9ac1, 0x9b81, 0x5b40, 0x9901, 0x59c0, 0x5880, 0x9841,\n                     0x8801, 0x48c0, 0x4980, 0x8941, 0x4b00, 0x8bc1, 0x8a81, 0x4a40,\n                     0x4e00, 0x8ec1, 0x8f81, 0x4f40, 0x8d01, 0x4dc0, 0x4c80, 0x8c41,\n                     0x4400, 0x84c1, 0x8581, 0x4540, 0x8701, 0x47c0, 0x4680, 0x8641,\n                     0x8201, 0x42c0, 0x4380, 0x8341, 0x4100, 0x81c1, 0x8081, 0x4040]\n\n        crc = 0xffff\n        for c in buf:\n            index = (crc ^ ord(c)) & 0xff\n            crct = crc_table[index]\n            crc = (crc >> 8) ^ crct\n        crc = (crc << 8) | (crc >> 8)\n        crc &= 0x7F7F\n\n        return \"%04x\" % crc", "docstring": "Drop in pure python replacement for ekmcrc.c extension.\n\nArgs:\nbuf (bytes): String or byte array (implicit Python 2.7 cast)\n\nReturns:\nstr: 16 bit CRC per EKM Omnimeters formatted as hex string.", "source": "juraj-google-style"}
{"code": "def _get_course_content(course_id, course_url, sailthru_client, site_code, config):\n    \n    \n    cache_key = \"{}:{}\".format(site_code, course_url)\n    response = cache.get(cache_key)\n    if not response:\n        try:\n            sailthru_response = sailthru_client.api_get(\"content\", {\"id\": course_url})\n            if not sailthru_response.is_ok():\n                response = {}\n            else:\n                response = sailthru_response.json\n                cache.set(cache_key, response, config.get('SAILTHRU_CACHE_TTL_SECONDS'))\n\n        except SailthruClientError:\n            response = {}\n\n        if not response:\n            logger.error('Could not get course data from Sailthru on enroll/purchase event. '\n                         'Calling Ecommerce Course API to get course info for enrollment confirmation email')\n            response = _get_course_content_from_ecommerce(course_id, site_code=site_code)\n            if response:\n                cache.set(cache_key, response, config.get('SAILTHRU_CACHE_TTL_SECONDS'))\n\n    return response", "docstring": "Get course information using the Sailthru content api or from cache.\n\nIf there is an error, just return with an empty response.\n\nArguments:\ncourse_id (str): course key of the course\ncourse_url (str): LMS url for course info page.\nsailthru_client (object): SailthruClient\nsite_code (str): site code\nconfig (dict): config options\n\nReturns:\ncourse information from Sailthru", "source": "juraj-google-style"}
{"code": "def asdict_with_event(self):\n    event = threading.Event()\n    with self._lock:\n        self._update_events.add(event)\n    return (self._asdict(), event)", "docstring": "Get a dict representation of this object and an update event.\n\nReturns:\nstate: Dict representation of this object.\nupdate_event: An event that is guaranteed to be set if an update has been\ntriggered since the returned dict was generated.", "source": "codesearchnet"}
{"code": "def make_pool3d_tests(pool_op):\n\n    def f(options, expected_tf_failures=0):\n        \n        test_parameters = [{'ksize': [[1, 1, 1, 1, 1], [1, 2, 2, 2, 1], [1, 2, 3, 4, 1]], 'strides': [[1, 1, 1, 1, 1], [1, 2, 1, 2, 1], [1, 2, 2, 4, 1]], 'input_shape': [[1, 1, 1, 1, 1], [1, 16, 15, 14, 1], [3, 16, 15, 14, 3]], 'padding': ['SAME', 'VALID'], 'data_format': ['NDHWC']}]\n\n        def build_graph(parameters):\n            input_tensor = tf.compat.v1.placeholder(dtype=tf.float32, name='input', shape=parameters['input_shape'])\n            out = pool_op(input_tensor, ksize=parameters['ksize'], strides=parameters['strides'], data_format=parameters['data_format'], padding=parameters['padding'])\n            return ([input_tensor], [out])\n\n        def build_inputs(parameters, sess, inputs, outputs):\n            input_values = create_tensor_data(tf.float32, parameters['input_shape'])\n            return ([input_values], sess.run(outputs, feed_dict=dict(zip(inputs, [input_values]))))\n        extra_convert_options = ExtraConvertOptions()\n        extra_convert_options.allow_custom_ops = True\n        make_zip_of_tests(options, test_parameters, build_graph, build_inputs, extra_convert_options, expected_tf_failures=expected_tf_failures)\n    return f", "docstring": "Make a set of tests to do pooling.\n\nArgs:\npool_op: TensorFlow pooling operation to test  i.e. `tf.nn.max_pool3d`.\n\nReturns:\nA function representing the true generator (after curried pool_op).", "source": "github-repos"}
{"code": "def add_string_pairs_from_text_view_element(xib_file, results, text_view, special_ui_components_prefix):\n    \n    text_view_entry_comment = extract_element_internationalized_comment(text_view)\n    if text_view_entry_comment is None:\n        return\n\n    if text_view.hasAttribute('usesAttributedText') and text_view.attributes['usesAttributedText'].value == 'YES':\n        add_string_pairs_from_attributed_ui_element(results, text_view, text_view_entry_comment)\n    else:\n        try:\n            text_view_entry_key = text_view.attributes['text'].value\n            results.append((text_view_entry_key, text_view_entry_comment + ' default text value'))\n        except KeyError:\n            pass\n    warn_if_element_not_of_class(text_view, 'TextView', special_ui_components_prefix)", "docstring": "Adds string pairs from a textview element.\n\nArgs:\nxib_file (str): Path to the xib file.\nresults (list): The list to add the results to.\ntext_view(element): The textview element from the xib, to extract the string pairs from.\nspecial_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)", "source": "juraj-google-style"}
{"code": "def acquire(self, uuid_path, subnet=None):\n    try:\n        with self._create_lock():\n            if subnet:\n                LOGGER.debug('Trying to acquire subnet {}'.format(subnet))\n                acquired_subnet = self._acquire_given_subnet(uuid_path, subnet)\n            else:\n                LOGGER.debug('Trying to acquire a free subnet')\n                acquired_subnet = self._acquire(uuid_path)\n            return acquired_subnet\n    except (utils.TimerException, IOError):\n        raise LagoSubnetLeaseLockException(self.path)", "docstring": "Lease a free subnet for the given uuid path.\nIf subnet is given, try to lease that subnet, otherwise try to lease a\nfree subnet.\n\nArgs:\nuuid_path (str): Path to the uuid file of a :class:`lago.Prefix`\nsubnet (str): A subnet to lease.\nReturns:\nnetaddr.IPAddress: An object which represents the subnet.\n\nRaises:\nLagoSubnetLeaseException:\n1. If this store is full\n2. If the requested subnet is already taken.\nLagoSubnetLeaseLockException:\nIf the lock to self.path can't be acquired.", "source": "codesearchnet"}
{"code": "def interpolate(features, hparams, decode_hp):\n    (inputs, targets) = (features['inputs'], features['targets'])\n    inputs = tf.unstack(inputs, axis=1)\n    targets = tf.unstack(targets, axis=1)\n    coeffs = np.linspace(0.0, 1.0, decode_hp.num_interp)\n    (first_frame, last_frame) = (inputs[0], targets[(- 1)])\n    (first_top_z, first_level_eps) = frame_to_latents(first_frame, hparams)\n    (last_top_z, last_level_eps) = frame_to_latents(last_frame, hparams)\n    first_lats = (first_level_eps + [first_top_z])\n    last_lats = (last_level_eps + [last_top_z])\n    interp_lats = []\n    lat_iterator = enumerate(zip(first_lats, last_lats))\n    for (level_ind, (first_lat, last_lat)) in lat_iterator:\n        if (level_ind in decode_hp.level_interp):\n            if (decode_hp.channel_interp == 'all'):\n                interp_lat = glow_ops.linear_interpolate(first_lat, last_lat, coeffs)\n            else:\n                interp_lat = glow_ops.linear_interpolate_rank(first_lat, last_lat, coeffs, decode_hp.rank_interp)\n        else:\n            interp_lat = tf.tile(first_lat, [decode_hp.num_interp, 1, 1, 1])\n        interp_lats.append(interp_lat)\n    level_eps_interp = interp_lats[:(hparams.n_levels - 1)]\n    z_top_interp = interp_lats[(- 1)]\n    images = latents_to_frames(z_top_interp, level_eps_interp, hparams)\n    return (images, first_frame, last_frame)", "docstring": "Interpolate between the first input frame and last target frame.\n\nArgs:\nfeatures: dict of tensors\nhparams: HParams, training hparams.\ndecode_hp: HParams, decode hparams.\nReturns:\nimages: interpolated images, 4-D Tensor, shape=(num_interp, H, W, C)\nfirst_frame: image, 3-D Tensor, shape=(1, H, W, C)\nlast_frame: image, 3-D Tensor, shape=(1, H, W, C)", "source": "codesearchnet"}
{"code": "def write(self, obj: BioCDocument or BioCPassage or BioCSentence):\n        \n        if self.level == DOCUMENT and not isinstance(obj, BioCDocument):\n            raise ValueError\n        if self.level == PASSAGE and not isinstance(obj, BioCPassage):\n            raise ValueError\n        if self.level == SENTENCE and not isinstance(obj, BioCSentence):\n            raise ValueError\n        self.writer.write(BioCJSONEncoder().default(obj))", "docstring": "Encode and write a single object.\n\nArgs:\nobj: an instance of BioCDocument, BioCPassage, or BioCSentence\n\nReturns:", "source": "juraj-google-style"}
{"code": "def draw(vertexes, edges):\n    \n    \n    \n    \n    Xs = []  \n    Ys = []  \n\n    sug = _build_sugiyama_layout(vertexes, edges)\n\n    for vertex in sug.g.sV:\n        \n        Xs.append(vertex.view.xy[0] - vertex.view.w / 2.0)\n        Xs.append(vertex.view.xy[0] + vertex.view.w / 2.0)\n        Ys.append(vertex.view.xy[1])\n        Ys.append(vertex.view.xy[1] + vertex.view.h)\n\n    for edge in sug.g.sE:\n        for x, y in edge.view._pts:  \n            Xs.append(x)\n            Ys.append(y)\n\n    minx = min(Xs)\n    miny = min(Ys)\n    maxx = max(Xs)\n    maxy = max(Ys)\n\n    canvas_cols = int(math.ceil(math.ceil(maxx) - math.floor(minx))) + 1\n    canvas_lines = int(round(maxy - miny))\n\n    canvas = AsciiCanvas(canvas_cols, canvas_lines)\n\n    \n    for edge in sug.g.sE:\n        \n        assert len(edge.view._pts) > 1\n        for index in range(1, len(edge.view._pts)):\n            start = edge.view._pts[index - 1]\n            end = edge.view._pts[index]\n\n            start_x = int(round(start[0] - minx))\n            start_y = int(round(start[1] - miny))\n            end_x = int(round(end[0] - minx))\n            end_y = int(round(end[1] - miny))\n\n            assert start_x >= 0\n            assert start_y >= 0\n            assert end_x >= 0\n            assert end_y >= 0\n\n            canvas.line(start_x, start_y, end_x, end_y, \"*\")\n\n    for vertex in sug.g.sV:\n        \n        x = vertex.view.xy[0] - vertex.view.w / 2.0\n        y = vertex.view.xy[1]\n\n        canvas.box(\n            int(round(x - minx)),\n            int(round(y - miny)),\n            vertex.view.w,\n            vertex.view.h,\n        )\n\n        canvas.text(\n            int(round(x - minx)) + 1, int(round(y - miny)) + 1, vertex.data\n        )\n\n    canvas.draw()", "docstring": "Build a DAG and draw it in ASCII.\n\nArgs:\nvertexes (list): list of graph vertexes.\nedges (list): list of graph edges.", "source": "juraj-google-style"}
{"code": "def cardinal(self, to):\n    return sum((m.cardinal(to) for m in self.submodules))", "docstring": "Return the number of dependencies of this package to the given node.\n\nArgs:\nto (Package/Module): target node.\n\nReturns:\nint: number of dependencies.", "source": "codesearchnet"}
{"code": "def _ReadCharacterDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):\n    return self._ReadFixedSizeDataTypeDefinition(definitions_registry, definition_values, data_types.CharacterDefinition, definition_name, self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE, is_member=is_member, supported_size_values=(1, 2, 4))", "docstring": "Reads a character data type definition.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\ndefinition_values (dict[str, object]): definition values.\ndefinition_name (str): name of the definition.\nis_member (Optional[bool]): True if the data type definition is a member\ndata type definition.\n\nReturns:\nCharacterDataTypeDefinition: character data type definition.", "source": "codesearchnet"}
{"code": "def __init__(self,\n                 unique_identifier=None):\n        \n        super(ActivateRequestPayload, self).__init__(\n            tag=enums.Tags.REQUEST_PAYLOAD)\n        self.unique_identifier = unique_identifier\n        self.validate()", "docstring": "Construct a ActivateRequestPayload object.\nArgs:\nunique_identifier (UniqueIdentifier): The UUID of a managed\ncryptographic object.", "source": "juraj-google-style"}
{"code": "def greater_equal(x, y):\n    return math_ops.greater_equal(x, y)", "docstring": "Element-wise truth value of (x >= y).\n\nArgs:\nx: Tensor or variable.\ny: Tensor or variable.\n\nReturns:\nA bool tensor.", "source": "github-repos"}
{"code": "def save(obj, filename, protocol=4):\n    with open(filename, 'wb') as f:\n        pickle.dump(obj, f, protocol=protocol)", "docstring": "Serialize an object to disk using pickle protocol.\n\nArgs:\nobj: The object to serialize.\nfilename: Path to the output file.\nprotocol: Version of the pickle protocol.", "source": "codesearchnet"}
{"code": "def xresnet50_2(pretrained=False, **kwargs):\n    \n    model = XResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n    if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['xresnet50']))\n    return model", "docstring": "Constructs a XResNet-50 model.\n\nArgs:\npretrained (bool): If True, returns a model pre-trained on ImageNet", "source": "juraj-google-style"}
{"code": "def CheckHashes(hash_ids):\n  \n  return {\n      k: bool(v)\n      for k, v in data_store.REL_DB.ReadHashBlobReferences(hash_ids).items()\n  }", "docstring": "Checks if files with given hashes are present in the file store.\n\nArgs:\nhash_ids: A list of SHA256HashID objects.\n\nReturns:\nA dict where SHA256HashID objects are keys. Corresponding values\nmay be False (if hash id is not present) or True if it is not present.", "source": "juraj-google-style"}
{"code": "def is_expired(self):\n    expiration_time = (self.created_at + datetime.timedelta(days=1))\n    return (timezone.now() > expiration_time)", "docstring": "Determine if the confirmation has expired.\n\nReturns:\nbool:\n``True`` if the confirmation has expired and ``False``\notherwise.", "source": "codesearchnet"}
{"code": "def revnet_step(name, x, hparams, reverse=True):\n  \n  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n    if hparams.coupling == \"additive\":\n      coupling_layer = functools.partial(\n          additive_coupling, name=\"additive\", reverse=reverse,\n          mid_channels=hparams.coupling_width,\n          activation=hparams.activation, dropout=hparams.coupling_dropout)\n    else:\n      coupling_layer = functools.partial(\n          affine_coupling, name=\"affine\", reverse=reverse,\n          mid_channels=hparams.coupling_width,\n          activation=hparams.activation, dropout=hparams.coupling_dropout)\n    ops = [\n        functools.partial(actnorm, name=\"actnorm\", reverse=reverse),\n        functools.partial(invertible_1x1_conv, name=\"invertible\",\n                          reverse=reverse), coupling_layer]\n\n    if reverse:\n      ops = ops[::-1]\n\n    objective = 0.0\n    for op in ops:\n      x, curr_obj = op(x=x)\n      objective += curr_obj\n    return x, objective", "docstring": "One step of glow generative flow.\n\nActnorm + invertible 1X1 conv + affine_coupling.\n\nArgs:\nname: used for variable scope.\nx: input\nhparams: coupling_width is the only hparam that is being used in\nthis function.\nreverse: forward or reverse pass.\nReturns:\nz: Output of one step of reversible flow.", "source": "juraj-google-style"}
{"code": "def from_json(cls, json):\n    params = dict(((str(k), v) for (k, v) in json.iteritems() if (k in cls._PARAMS)))\n    if (cls._OFFSET_PARAM in params):\n        params[cls._OFFSET_PARAM] = base64.b64decode(params[cls._OFFSET_PARAM])\n    return cls(**params)", "docstring": "Creates an instance of the InputReader for the given input shard's state.\n\nArgs:\njson: The InputReader state as a dict-like object.\n\nReturns:\nAn instance of the InputReader configured using the given JSON parameters.", "source": "codesearchnet"}
{"code": "def write_build_info(filename, key_value_list):\n    build_info = {}\n    if cuda_config:\n        build_info.update(cuda_config.config)\n    if tensorrt_config:\n        build_info.update(tensorrt_config.config)\n    for arg in key_value_list:\n        key, value = arg.split('=')\n        if value.lower() == 'true':\n            build_info[key] = True\n        elif value.lower() == 'false':\n            build_info[key] = False\n        else:\n            build_info[key] = value.format(**build_info)\n    sorted_build_info_pairs = sorted(build_info.items())\n    contents = '\\n\n    open(filename, 'w').write(contents)", "docstring": "Writes a Python that describes the build.\n\nArgs:\nfilename: filename to write to.\nkey_value_list: A list of \"key=value\" strings that will be added to the\nmodule's \"build_info\" dictionary as additional entries.", "source": "github-repos"}
{"code": "def _send_socket_request(self, xml_request):\n        \n        def to_variant(number):\n            buff = []\n            while number:\n                byte = number % 128\n                number = number \n                if number > 0:\n                    byte |= 0x80\n                buff.append(chr(byte))\n            return ''.join(buff)\n\n        def from_variant(stream):\n            used = 0\n            number = 0\n            q = 1\n            while True:\n                byte = ord(stream[used])\n                used += 1\n                number += q * (byte & 0x7F)\n                q *= 128\n                if byte&0x80==0:\n                    break\n            return (number, used)\n\n        def encode_fields(fields):\n            chunks = []\n            for field_id, message in fields.items():\n                chunks.append(to_variant((field_id << 3) | 2)) \n                chunks.append(to_variant(len(message)))\n                chunks.append(message)\n            return ''.join(chunks)\n\n        def decode_fields(stream):\n            fields = {}\n            offset = 0\n            stream_lenght = len(stream)\n            while offset<stream_lenght:\n                field_header, used = from_variant(stream[offset:])\n                offset += used\n                wire_type = field_header & 0x07\n                field_id = field_header >> 3\n                if wire_type==2:\n                    message_lenght, used = from_variant(stream[offset:])\n                    offset += used\n                    fields[field_id] = stream[offset:offset+message_lenght]\n                    offset += message_lenght\n                elif wire_type==0:\n                    fields[field_id], used = from_variant(stream[offset:])\n                    offset += used\n                elif wire_type==1:\n                    fields[field_id] = stream[offset:offset+8]\n                    offset += 8\n                elif wire_type==3:\n                    raise ConnectionError()\n                elif wire_type==4:\n                    raise ConnectionError()\n                elif wire_type==5:\n                    fields[field_id] = stream[offse:offset+4]\n                    offset += 4\n                else:\n                    raise ConnectionError()\n            return fields\n\n\n        def make_header(lenght):\n            result = []\n            result.append(chr((lenght & 0x000000FF)))\n            result.append(chr((lenght & 0x0000FF00) >> 8))\n            result.append(chr((lenght & 0x00FF0000) >> 16))\n            result.append(chr((lenght & 0xFF000000) >> 24))\n            return '\\t\\t\\x00\\x00' + ''.join(result)\n\n        def parse_header(header):\n            if len(header) == 8 and header[0] == '\\t' and header[1] == '\\t' and\\\n                    header[2] == '\\00' and header[3] == '\\00':\n                return ord(header[4]) | (ord(header[5]) << 8) |\\\n                        (ord(header[6]) << 16) | (ord(header[7]) << 24)\n            else:\n                raise ConnectionError()\n\n        def socket_send(data):\n            sent_bytes = 0\n            failures = 0\n            total_bytes = len(data)\n            while sent_bytes < total_bytes:\n                sent = self._connection.send(data[sent_bytes:])\n                if sent == 0:\n                    failures += 1\n                    if failures > 5:\n                        raise ConnectionError()\n                    continue\n                sent_bytes += sent\n\n        def socket_recieve(lenght):\n            total_recieved = 0\n            failures = 5\n            recieved_chunks = []\n            while total_recieved<lenght:\n                chunk = self._connection.recv(lenght-total_recieved)\n                if not chunk:\n                    failures += 1\n                    if failures > 5:\n                        raise ConnectionError()\n                    continue\n                recieved_chunks.append(chunk)\n                total_recieved += len(chunk)\n            return ''.join(recieved_chunks)\n\n        encoded_message = encode_fields({1: xml_request,\n                                         2: self._storage if self._storage else \"special:detect-storage\"})\n        header = make_header(len(encoded_message))\n\n        try: \n            socket_send(header+encoded_message)\n        except (ConnectionError, socket.error):\n            self._connection.close()\n            self._open_connection()\n            socket_send(header+encoded_message)\n\n        \n        header = socket_recieve(8)\n        lenght = parse_header(header)\n        encoded_response = socket_recieve(lenght)\n        response = decode_fields(encoded_response)\n        \n        \n        return response[1]", "docstring": "Send a request via protobuf.\n\nArgs:\nxml_request -- A fully formed xml request string for the CPS.\n\nReturns:\nThe raw xml response string.", "source": "juraj-google-style"}
{"code": "def checkTUN(self):\n    packet = self._TUN._tun.read(self._TUN._tun.mtu)\n    return packet", "docstring": "Checks the TUN adapter for data and returns any that is found.\n\nReturns:\npacket: Data read from the TUN adapter", "source": "codesearchnet"}
{"code": "def with_contextual_override(func: Callable[..., Any]) -> Callable[..., Any]:\n    with contextual_override() as current_context:\n        pass\n\n    def _func(*args, **kwargs) -> Any:\n        with contextual_override(**current_context):\n            return func(*args, **kwargs)\n    return _func", "docstring": "Wraps a user function with the access to the current contextual override.\n\nThe wrapped function can be called from another thread.\n\nArgs:\nfunc: The user function to be wrapped.\n\nReturns:\nA wrapper function that have the access to the current contextual override,\nwhich can be called from another thread.", "source": "github-repos"}
{"code": "def __init__(self, auth, api='/logs/search', **kwargs):\n        \n        self.api = api\n        self.log = auth.log\n        try:\n            self.url = '%s%s' % (auth.get_url(), self.api)\n        except AttributeError:\n            self.url = 'https:\n\n        try:\n            self.auth = auth.get_auth()\n        except AttributeError:\n            self.auth = auth", "docstring": "Search the logs.\n\nArgs:\nauth (Client): Authentication object\napi (str): Api endpath", "source": "juraj-google-style"}
{"code": "def read(cls, data):\n        \n        if isinstance(data, OrderedDict):\n            return cls(data)\n        elif isinstance(data, basestring)\\\n             and data.startswith((\"http:\n            return cls(request(data))\n        elif isinstance(data, basestring):\n            try:\n                json_dict = json.loads(data, object_pairs_hook=OrderedDict)\n                return cls(json_dict)\n            except ValueError:\n                raise\n        else:\n            try:\n                json_dict = json.load(data, object_pairs_hook=OrderedDict)\n                return cls(json_dict)\n            except ValueError:\n                raise", "docstring": "Reads data from URL or OrderedDict.\nArgs:\ndata: can be a URL pointing to a JSONstat file, a JSON string\nor an OrderedDict.\n\nReturns:\nAn object of class Collection populated with data.", "source": "juraj-google-style"}
{"code": "def netmiko_send_config(\n    task: Task,\n    config_commands: Optional[List[str]] = None,\n    config_file: Optional[str] = None,\n    **kwargs: Any\n) -> Result:\n    \n    net_connect = task.host.get_connection(\"netmiko\", task.nornir.config)\n    net_connect.enable()\n    if config_commands:\n        result = net_connect.send_config_set(config_commands=config_commands, **kwargs)\n    elif config_file:\n        result = net_connect.send_config_from_file(config_file=config_file, **kwargs)\n    else:\n        raise ValueError(\"Must specify either config_commands or config_file\")\n\n    return Result(host=task.host, result=result, changed=True)", "docstring": "Execute Netmiko send_config_set method (or send_config_from_file)\n\nArguments:\nconfig_commands: Commands to configure on the remote network device.\nconfig_file: File to read configuration commands from.\nkwargs: Additional arguments to pass to method.\n\nReturns:\nResult object with the following attributes set:\n* result (``str``): string showing the CLI from the configuration changes.", "source": "juraj-google-style"}
{"code": "def __type_to_tag(self, type_: Type) -> str:\n        \n        if type_ in scalar_type_to_tag:\n            return scalar_type_to_tag[type_]\n\n        if is_generic_list(type_):\n            return 'tag:yaml.org,2002:seq'\n\n        if is_generic_dict(type_):\n            return 'tag:yaml.org,2002:map'\n\n        if type_ in self._registered_classes.values():\n            return '!{}'.format(type_.__name__)\n\n        raise RuntimeError((\n            'Unknown type {} in type_to_tag,'  \n            ' please report a YAtiML bug.').format(type_))", "docstring": "Convert a type to the corresponding YAML tag.\n\nArgs:\ntype_: The type to convert\n\nReturns:\nA string containing the YAML tag.", "source": "juraj-google-style"}
{"code": "def nhapDaiHan(self, cucSo, gioiTinh):\n    for cung in self.thapNhiCung:\n        khoangCach = khoangCachCung(cung.cungSo, self.cungMenh, gioiTinh)\n        cung.daiHan((cucSo + (khoangCach * 10)))\n    return self", "docstring": "Nhap dai han\n\nArgs:\ncucSo (TYPE): Description\ngioiTinh (TYPE): Description\n\nReturns:\nTYPE: Description", "source": "codesearchnet"}
{"code": "def setup(logdir='log'):\n    logger = logging.getLogger()\n    logger.setLevel(logging.DEBUG)\n    logdir = os.path.normpath(logdir)\n    if (not os.path.exists(logdir)):\n        os.makedirs(logdir)\n    t = datetime.datetime.now()\n    logfile = '{year:04d}{mon:02d}{day:02d}-{hour:02d}{min:02d}{sec:02d}.log'.format(year=t.year, mon=t.month, day=t.day, hour=t.hour, min=t.minute, sec=t.second)\n    logfile = os.path.join(logdir, logfile)\n    filehandler = logging.handlers.RotatingFileHandler(filename=logfile, maxBytes=((10 * 1024) * 1024), backupCount=100)\n    filehandler.setLevel(logging.DEBUG)\n    fileformatter = logging.Formatter('%(asctime)s %(levelname)-8s: %(message)s')\n    filehandler.setFormatter(fileformatter)\n    logger.addHandler(filehandler)\n    streamhandler = logging.StreamHandler()\n    streamhandler.setLevel(logging.WARNING)\n    streamformatter = logging.Formatter('%(levelname)s: %(message)s')\n    streamhandler.setFormatter(streamformatter)\n    logger.addHandler(streamhandler)", "docstring": "Set up dual logging to console and to logfile.\n\nWhen this function is called, it first creates the given directory. It then\ncreates a logfile and passes all log messages to come to it. The logfile\nname encodes the date and time when it was created, for example\n\"20181115-153559.txt\". All messages with a log level of at least \"WARNING\"\nare also forwarded to the console.\n\nArgs:\nlogdir: path of the directory where to store the log files. Both a\nrelative or an absolute path may be specified. If a relative path is\nspecified, it is interpreted relative to the working directory.\nIf no directory is given, the logs are written to a folder called\n\"log\" in the working directory.", "source": "codesearchnet"}
{"code": "def remove_sonos_playlist(self, sonos_playlist):\n        \n        object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)\n        return self.contentDirectory.DestroyObject([('ObjectID', object_id)])", "docstring": "Remove a Sonos playlist.\n\nArgs:\nsonos_playlist (DidlPlaylistContainer): Sonos playlist to remove\nor the item_id (str).\n\nReturns:\nbool: True if succesful, False otherwise\n\nRaises:\nSoCoUPnPException: If sonos_playlist does not point to a valid\nobject.", "source": "juraj-google-style"}
{"code": "def _ParseValueData(self, knowledge_base, value_data):\n    \n    if not isinstance(value_data, py2to3.UNICODE_TYPE):\n      raise errors.PreProcessFail(\n          'Unsupported Windows Registry value type: {0:s} for '\n          'artifact: {1:s}.'.format(\n              type(value_data), self.ARTIFACT_DEFINITION_NAME))\n\n    if not knowledge_base.GetHostname():\n      hostname_artifact = artifacts.HostnameArtifact(name=value_data)\n      knowledge_base.SetHostname(hostname_artifact)", "docstring": "Parses Windows Registry value data for a preprocessing attribute.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nvalue_data (object): Windows Registry value data.\n\nRaises:\nerrors.PreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def add_backend_policy(self, json_data):\n        \n        env = boto3.session.Session(profile_name=self.env, region_name=self.region)\n        elbclient = env.client('elb')\n\n        \n        for job in json.loads(json_data)['job']:\n            for listener in job['listeners']:\n                instance_port = listener['internalPort']\n                backend_policy_list = listener['backendPolicies']\n                if backend_policy_list:\n                    LOG.info('Adding backend server policies: %s', backend_policy_list)\n                    elbclient.set_load_balancer_policies_for_backend_server(\n                        LoadBalancerName=self.app, InstancePort=instance_port, PolicyNames=backend_policy_list)", "docstring": "Attaches backend server policies to an ELB\n\nArgs:\njson_data (json): return data from ELB upsert", "source": "juraj-google-style"}
{"code": "def click_exists(self, timeout=0):\n        \n        e = self.get(timeout=timeout, raise_error=False)\n        if e is None:\n            return False\n        e.click()\n        return True", "docstring": "Wait element and perform click\n\nArgs:\ntimeout (float): timeout for wait\n\nReturns:\nbool: if successfully clicked", "source": "juraj-google-style"}
{"code": "def countriesdata(cls, use_live=True):\n        \n        \n        if cls._countriesdata is None:\n            countries = None\n            if use_live:\n                try:\n                    countries = hxl.data(cls._ochaurl)\n                except IOError:\n                    logger.exception('Download from OCHA feed failed! Falling back to stored file.')\n            if countries is None:\n                countries = hxl.data(\n                    script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',\n                                         Country), allow_local=True)\n            cls.set_countriesdata(countries)\n        return cls._countriesdata", "docstring": "Read countries data from OCHA countries feed (falling back to file)\n\nArgs:\nuse_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n\nReturns:\nList[Dict[Dict]]: Countries dictionaries", "source": "juraj-google-style"}
{"code": "def as_saver_def(self):\n    return self.saver_def", "docstring": "Generates a `SaverDef` representation of this saver.\n\nReturns:\nA `SaverDef` proto.", "source": "github-repos"}
{"code": "def is_mobile_number_portable_region(region_code):\n    \n    metadata = PhoneMetadata.metadata_for_region(region_code, None)\n    if metadata is None:\n        return False\n    return metadata.mobile_number_portable_region", "docstring": "Returns true if the supplied region supports mobile number portability.\nReturns false for invalid, unknown or regions that don't support mobile\nnumber portability.\n\nArguments:\nregion_code -- the region for which we want to know whether it supports mobile number\nportability or not.", "source": "juraj-google-style"}
{"code": "def update_with_zero_body(self, uri=None, timeout=-1, custom_headers=None):\n        \n        if not uri:\n            uri = self.data['uri']\n\n        logger.debug('Update with zero length body (uri = %s)' % uri)\n        resource_data = self._helper.do_put(uri, None, timeout, custom_headers)\n\n        return resource_data", "docstring": "Makes a PUT request to update a resource when no request body is required.\n\nArgs:\nuri: Allows to use a different URI other than resource URI\ntimeout: Timeout in seconds. Wait for task completion by default.\nThe timeout does not abort the operation in OneView; it just stops waiting for its completion.\ncustom_headers: Allows to set custom HTTP headers.\n\nReturns:\nA dict with updated resource data.", "source": "juraj-google-style"}
{"code": "def make_state_space_model(self, num_timesteps, param_vals=None, initial_state_prior=None, initial_step=0):\n    return self._make_state_space_model(num_timesteps=num_timesteps, param_map=self._canonicalize_param_vals_as_map(param_vals), initial_state_prior=initial_state_prior, initial_step=initial_step)", "docstring": "Instantiate this model as a Distribution over specified `num_timesteps`.\n\nArgs:\nnum_timesteps: Python `int` number of timesteps to model.\nparam_vals: a list of `Tensor` parameter values in order corresponding to\n`self.parameters`, or a dict mapping from parameter names to values.\ninitial_state_prior: an optional `Distribution` instance overriding the\ndefault prior on the model's initial state. This is used in forecasting\n(\"today's prior is yesterday's posterior\").\ninitial_step: optional `int` specifying the initial timestep to model.\nThis is relevant when the model contains time-varying components,\ne.g., holidays or seasonality.\n\nReturns:\ndist: a `LinearGaussianStateSpaceModel` Distribution object.", "source": "codesearchnet"}
{"code": "class AlphaDropout(Layer):\n\n    def __init__(self, rate, noise_shape=None, seed=None, **kwargs):\n        super().__init__(**kwargs)\n        if not 0 <= rate <= 1:\n            raise ValueError(f'Invalid value received for argument `rate`. Expected a float value between 0 and 1. Received: rate={rate}')\n        self.rate = rate\n        self.seed = seed\n        self.noise_shape = noise_shape\n        if rate > 0:\n            self.seed_generator = backend.random.SeedGenerator(seed)\n        self.supports_masking = True\n        self._build_at_init()\n\n    def call(self, inputs, training=False):\n        if training and self.rate > 0:\n            noise_shape = self._get_concrete_noise_shape(inputs, self.noise_shape)\n            alpha = 1.6732632423543772\n            scale = 1.0507009873554805\n            alpha_p = -alpha * scale\n            kept_idx = ops.greater_equal(ops.random.uniform(noise_shape, seed=self.seed_generator), self.rate)\n            kept_idx = ops.cast(kept_idx, inputs.dtype)\n            a = ((1 - self.rate) * (1 + self.rate * alpha_p ** 2)) ** (-0.5)\n            b = -a * alpha_p * self.rate\n            x = inputs * kept_idx + alpha_p * (1 - kept_idx)\n            return a * x + b\n        return inputs\n\n    def compute_output_shape(self, input_shape):\n        return input_shape\n\n    def _get_concrete_noise_shape(self, inputs, noise_shape):\n        if noise_shape is None:\n            return ops.shape(inputs)\n        concrete_inputs_shape = ops.shape(inputs)\n        concrete_noise_shape = []\n        for i, value in enumerate(noise_shape):\n            concrete_noise_shape.append(concrete_inputs_shape[i] if value is None else value)\n        return concrete_noise_shape\n\n    def get_config(self):\n        base_config = super().get_config()\n        config = {'rate': self.rate, 'seed': self.seed, 'noise_shape': self.noise_shape}\n        return {**base_config, **config}", "docstring": "Applies Alpha Dropout to the input.\n\nAlpha Dropout is a `Dropout` that keeps mean and variance of inputs\nto their original values, in order to ensure the self-normalizing property\neven after this dropout.\nAlpha Dropout fits well to Scaled Exponential Linear Units (SELU) by\nrandomly setting activations to the negative saturation value.\n\nArgs:\nrate: Float between 0 and 1. The multiplicative noise will have\nstandard deviation `sqrt(rate / (1 - rate))`.\nnoise_shape: 1D integer tensor representing the shape of the\nbinary alpha dropout mask that will be multiplied with the input.\nFor instance, if your inputs have shape\n`(batch_size, timesteps, features)` and\nyou want the alpha dropout mask to be the same for all timesteps,\nyou can use `noise_shape=(batch_size, 1, features)`.\nseed: A Python integer to use as random seed.\n\nCall arguments:\ninputs: Input tensor (of any rank).\ntraining: Python boolean indicating whether the layer should behave in\ntraining mode (adding alpha dropout) or in inference mode\n(doing nothing).", "source": "github-repos"}
{"code": "def __init__(self, default: typing.Optional[float]=MISSING_VALUE, min_value: typing.Optional[float]=None, max_value: typing.Optional[float]=None, is_noneable: bool=False, frozen: bool=False):\n    super().__init__(float, default, min_value, max_value, is_noneable, frozen)", "docstring": "Constructor.\n\nArgs:\ndefault: (Optional) default value for this spec.\nmin_value: (Optional) minimum value of acceptable values.\nmax_value: (Optional) maximum value of acceptable values.\nis_noneable: If True, None is acceptable.\nfrozen: If True, values other than the default value is not accceptable.", "source": "github-repos"}
{"code": "def _construct_forward_backward(self, num_doutputs):\n    trainable_outputs = [output for output in self._func_graph.outputs[:num_doutputs] if backprop_util.IsTrainable(output)]\n    signature = []\n    for t in trainable_outputs:\n        signature.append(tensor_lib.TensorSpec(*default_gradient.shape_and_dtype(t)))\n\n    def _backprop_function(*grad_ys):\n        with ops.device(None):\n            return gradients_util._GradientsHelper(trainable_outputs, self._func_graph.inputs, grad_ys=grad_ys, src_graph=self._func_graph)\n    with self._func_graph.as_default():\n        backwards_graph = func_graph_module.FuncGraph(_backward_name(self._func_graph.name))\n        func_graph_module.func_graph_from_py_func(name=backwards_graph.name, python_func=_backprop_function, args=[], kwargs={}, signature=signature, func_graph=backwards_graph)\n        backwards_graph_captures = backwards_graph.external_captures\n        captures_from_forward = [c for c in backwards_graph_captures if not isinstance(c, ops.EagerTensor) and c.graph is self._func_graph]\n        existing_outputs = object_identity.ObjectIdentitySet(self._func_graph.outputs)\n        for capture in captures_from_forward:\n            if capture not in existing_outputs:\n                existing_outputs.add(capture)\n                self._func_graph.outputs.append(capture)\n        forward_function, backward_function = _create_forward_backward_with_graph(self._attrs, self._func_graph, backwards_graph)\n        return (forward_function, backward_function)", "docstring": "Constructs a pair of forward and backward functions.\n\nArgs:\nnum_doutputs: The constructed backprop function will take output gradients\nfor the first `num_doutputs` outputs of the forward function. Defaults\nto the number of outputs for the inference function, but when\nhigher-order gradients are computed this will increase to include side\noutputs.\n\nReturns:\nA pair of (forward_function, backward_function):\nforward_function: A re-generated inference function (an\nAtomicFunction) to account for new side outputs, if any extra\nwere required when building the backward pass.\nbackward_function: A ConcreteFunction that Takes `num_doutputs`\narguments and returns gradients with respect to inputs of the forward\nfunction.", "source": "github-repos"}
{"code": "def _FormatValue(self, value, level=0):\n\n    def FormatDictItem(key_value):\n        'Formats single dictionary item.'\n        (key, value) = key_value\n        return ((self._FormatValue(key, (level + 1)) + ': ') + self._FormatValue(value, (level + 1)))\n\n    def LimitedEnumerate(items, formatter, level=0):\n        'Returns items in the specified enumerable enforcing threshold.'\n        count = 0\n        limit = (self.max_sublist_items if (level > 0) else self.max_list_items)\n        for item in items:\n            if (count == limit):\n                (yield '...')\n                break\n            (yield formatter(item))\n            count += 1\n\n    def FormatList(items, formatter, level=0):\n        'Formats a list using a custom item formatter enforcing threshold.'\n        return ', '.join(LimitedEnumerate(items, formatter, level=level))\n    if isinstance(value, _PRIMITIVE_TYPES):\n        return _TrimString(repr(value), self.max_value_len)\n    if isinstance(value, _DATE_TYPES):\n        return str(value)\n    if (level > self.max_depth):\n        return str(type(value))\n    if isinstance(value, dict):\n        return (('{' + FormatList(six.iteritems(value), FormatDictItem)) + '}')\n    if isinstance(value, _VECTOR_TYPES):\n        return _ListTypeFormatString(value).format(FormatList(value, (lambda item: self._FormatValue(item, (level + 1))), level=level))\n    if isinstance(value, types.FunctionType):\n        return ('function ' + value.__name__)\n    if (hasattr(value, '__dict__') and value.__dict__):\n        return self._FormatValue(value.__dict__, level)\n    return str(type(value))", "docstring": "Pretty-prints an object for a logger.\n\nThis function is very similar to the standard pprint. The main difference\nis that it enforces limits to make sure we never produce an extremely long\nstring or take too much time.\n\nArgs:\nvalue: Python object to print.\nlevel: current recursion level.\n\nReturns:\nFormatted string.", "source": "codesearchnet"}
{"code": "def catch(func, *args, **kwargs):\n    try:\n        func(*args, **kwargs)\n    except Exception as e:\n        return e", "docstring": "Call the supplied function with the supplied arguments,\ncatching and returning any exception that it throws.\n\nArguments:\nfunc: the function to run.\n*args: positional arguments to pass into the function.\n**kwargs: keyword arguments to pass into the function.\nReturns:\nIf the function throws an exception, return the exception.\nIf the function does not throw an exception, return None.", "source": "codesearchnet"}
{"code": "def cancel(self, invoice_id, **kwargs):\n        \n        url = \"{}/{}/cancel\".format(self.base_url, invoice_id)\n        return self.post_url(url, {}, **kwargs)", "docstring": "Cancel an unpaid Invoice with given ID via API\nIt can only be called on an invoice that is not in the paid state.\n\nArgs:\ninvoice_id : Id for cancel the invoice\nReturns:\nThe response for the API will be the invoice entity, similar to create/update API response, with status attribute's value as cancelled", "source": "juraj-google-style"}
{"code": "def _get_example(filepath: str, filename: str, tag: Tag, sdk: int) -> Example:\n    context_line = tag.context_line if tag.context_line <= tag.line_start else tag.context_line - (tag.line_finish - tag.line_start)\n    return Example(sdk=SdkEnum(sdk), tag=tag, filepath=filepath, status=STATUS_UNSPECIFIED, type=_get_object_type(filename, filepath), code=_get_content(filepath, tag.line_start, tag.line_finish), url_vcs=_get_url_vcs(filepath), context_line=context_line)", "docstring": "Return an Example by filepath and filename.\n\nArgs:\nfilepath: path of the example's file.\nfilename: name of the example's file.\ntag: tag of the example.\n\nReturns:\nParsed Example object.", "source": "github-repos"}
{"code": "def read(self, domain, type_name, search_command, body=None):\n        \n        return self._request(domain, type_name, search_command, 'GET', body)", "docstring": "Read entry in ThreatConnect Data Store\n\nArgs:\ndomain (string): One of 'local', 'organization', or 'system'.\ntype_name (string): This is a free form index type name. The ThreatConnect API will use\nthis resource verbatim.\nsearch_command (string): Search command to pass to ES.\nbody (str): JSON body", "source": "juraj-google-style"}
{"code": "def _build(self, inputs, prev_state):\n    input_size = inputs.get_shape()[1]\n    weight_shape = (input_size, self._hidden_size)\n    u_shape = (self._hidden_size, self._hidden_size)\n    bias_shape = (self._hidden_size,)\n\n    def _get_variable(name, shape):\n        return tf.get_variable(name, shape, dtype=inputs.dtype, initializer=self._initializers.get(name), partitioner=self._partitioners.get(name), regularizer=self._regularizers.get(name))\n    pre_highway_wt = _get_variable(self.WT, weight_shape)\n    pre_highway_wh = _get_variable(self.WH, weight_shape)\n    state = prev_state\n    for layer_index in xrange(self._num_layers):\n        layer_str = str(layer_index)\n        layer_wt = _get_variable((self.WT + layer_str), u_shape)\n        layer_bt = _get_variable((self.BT + layer_str), bias_shape)\n        layer_wh = _get_variable((self.WH + layer_str), u_shape)\n        layer_bh = _get_variable((self.BH + layer_str), bias_shape)\n        linear_t = (tf.matmul(state, layer_wt) + layer_bt)\n        linear_h = (tf.matmul(state, layer_wh) + layer_bh)\n        if (layer_index == 0):\n            linear_t += tf.matmul(inputs, pre_highway_wt)\n            linear_h += tf.matmul(inputs, pre_highway_wh)\n        output_t = tf.sigmoid(linear_t)\n        output_h = tf.tanh(linear_h)\n        state = ((state * (1 - output_t)) + (output_h * output_t))\n    return (state, state)", "docstring": "Connects the highway core module into the graph.\n\nArgs:\ninputs: Tensor of size `[batch_size, input_size]`.\nprev_state: Tensor of size `[batch_size, hidden_size]`.\n\nReturns:\nA tuple (output, next_state) where `output` is a Tensor of size\n`[batch_size, hidden_size]` and `next_state` is a Tensor of size\n`[batch_size, hidden_size]`.\n\nRaises:\nValueError: If connecting the module into the graph any time after the\nfirst time, and the inferred size of the inputs does not match previous\ninvocations.", "source": "codesearchnet"}
{"code": "def get_block_details(self, block_ids):\n    if (not hasattr(block_ids, '__iter__')):\n        block_ids = [block_ids]\n    for _id in block_ids:\n        block_key = self._db.get_block(_id)[0]\n        block_data = self._db.get_all_field_value(block_key)\n        for key in block_data:\n            for char in ['[', '{']:\n                if (char in block_data[key]):\n                    block_data[key] = ast.literal_eval(str(block_data[key]))\n        (yield block_data)", "docstring": "Get details of scheduling or processing block\n\nArgs:\nblock_ids (list): List of block IDs", "source": "codesearchnet"}
{"code": "def weak_scaling(timing_stats, scaling_var, data_points):\n    timing_data = dict()\n    proc_counts = []\n    bench_means = []\n    bench_mins = []\n    bench_maxs = []\n    model_means = []\n    model_mins = []\n    model_maxs = []\n    for point in data_points:\n        size = point[0]\n        proc = point[1]\n        try:\n            model_data = timing_stats[size][proc]['model'][scaling_var]\n            bench_data = timing_stats[size][proc]['bench'][scaling_var]\n        except KeyError:\n            continue\n        proc_counts.append(proc)\n        model_means.append(model_data['mean'])\n        model_mins.append(model_data['min'])\n        model_maxs.append(model_data['max'])\n        bench_means.append(bench_data['mean'])\n        bench_mins.append(bench_data['min'])\n        bench_maxs.append(bench_data['max'])\n    timing_data['bench'] = dict(mins=bench_mins, means=bench_means, maxs=bench_maxs)\n    timing_data['model'] = dict(mins=model_mins, means=model_means, maxs=model_maxs)\n    timing_data['proc_counts'] = [int(pc[1:]) for pc in proc_counts]\n    return timing_data", "docstring": "Generate data for plotting weak scaling.  The data points keep\na constant amount of work per processor for each data point.\n\nArgs:\ntiming_stats: the result of the generate_timing_stats function\nscaling_var: the variable to select from the timing_stats dictionary\n(can be provided in configurations via the 'scaling_var' key)\ndata_points: the list of size and processor counts to use as data\n(can be provided in configurations via the 'weak_scaling_points' key)\n\nReturns:\nA dict of the form:\n{'bench' : {'mins' : [], 'means' : [], 'maxs' : []},\n'model' : {'mins' : [], 'means' : [], 'maxs' : []},\n'proc_counts' : []}", "source": "codesearchnet"}
{"code": "def set_lock_config(self, device_label, volume=None, voice_level=None,\n                        auto_lock_enabled=None):\n        \n        response = None\n        data = {}\n        if volume:\n            data['volume'] = volume\n        if voice_level:\n            data['voiceLevel'] = voice_level\n        if auto_lock_enabled is not None:\n            data['autoLockEnabled'] = auto_lock_enabled\n        try:\n            response = requests.put(\n                urls.lockconfig(self._giid, device_label),\n                headers={\n                    'Content-Type': 'application/json',\n                    'Cookie': 'vid={}'.format(self._vid)},\n                data=json.dumps(data))\n        except requests.exceptions.RequestException as ex:\n            raise RequestError(ex)\n        _validate_response(response)", "docstring": "Set lock configuration\n\nArgs:\ndevice_label (str): device label of lock\nvolume (str): 'SILENCE', 'LOW' or 'HIGH'\nvoice_level (str): 'ESSENTIAL' or 'NORMAL'\nauto_lock_enabled (boolean): auto lock enabled", "source": "juraj-google-style"}
{"code": "def download_image(self, device_label, image_id, file_name):\n        \n        response = None\n        try:\n            response = requests.get(\n                urls.download_image(self._giid, device_label, image_id),\n                headers={\n                    'Cookie': 'vid={}'.format(self._vid)},\n                stream=True)\n        except requests.exceptions.RequestException as ex:\n            raise RequestError(ex)\n        _validate_response(response)\n        with open(file_name, 'wb') as image_file:\n            for chunk in response.iter_content(chunk_size=1024):\n                if chunk:\n                    image_file.write(chunk)", "docstring": "Download image taken by a smartcam\n\nArgs:\ndevice_label (str): device label of camera\nimage_id (str): image id from image series\nfile_name (str): path to file", "source": "juraj-google-style"}
{"code": "def get_eligible_features(examples, num_mutants):\n  \n  features_dict = (\n      get_numeric_features_to_observed_range(\n          examples))\n\n  features_dict.update(\n      get_categorical_features_to_sampling(\n          examples, num_mutants))\n\n  \n  \n  features_list = []\n  for k, v in sorted(features_dict.items()):\n    v['name'] = k\n    features_list.append(v)\n  return features_list", "docstring": "Returns a list of JSON objects for each feature in the examples.\n\nThis list is used to drive partial dependence plots in the plugin.\n\nArgs:\nexamples: Examples to examine to determine the eligible features.\nnum_mutants: The number of mutations to make over each feature.\n\nReturns:\nA list with a JSON object for each feature.\nNumeric features are represented as {name: observedMin: observedMax:}.\nCategorical features are repesented as {name: samples:[]}.", "source": "juraj-google-style"}
{"code": "def _parse_compound_info(self, line):\n    for (k, regexes) in six.iteritems(self.compound_regex):\n        for reg in regexes:\n            if self.compound_info[k]:\n                continue\n            m = re.search(reg, line, re.IGNORECASE)\n            if m:\n                self.compound_info[k] = m.group(1).strip()\n    self._get_other_names(line)", "docstring": "Parse and extract all compound data by looping through the dictionary of compound_info regexs\n\nupdates self.compound_info\n\nArgs:\nline (str): line of the msp file", "source": "codesearchnet"}
{"code": "def _make_intermediates_match(branch_graphs, branch_optionals):\n    new_branch_optionals = []\n    intermediates_size = max((len(o) for o in branch_optionals))\n    for i, branch_graph in enumerate(branch_graphs):\n        other_optionals = _create_none_optionals(branch_graph, intermediates_size - len(branch_optionals[i]))\n        new_branch_optionals.append(branch_optionals[i] + other_optionals)\n    return new_branch_optionals", "docstring": "Returns new optionals lists that have matching signatures.\n\nThis is done by mirroring each list in the other using none optionals.\nThere is no merging of like optionals.\n\nArgs:\nbranch_graphs: `list` of `FuncGraph`.\nbranch_optionals: `list` of `list`s of optional `Tensor`s from other\nbranch_graphs\n\nReturns:\nA `list` of `list`s of `Tensor`s for each branch_graph. Each list has the\nsame number of `Tensor`s, all of which will be optionals of the same\nshape/type.", "source": "github-repos"}
{"code": "def _check_for_definition(iface, cls, tag, defines):\n    attributes = (attr for attr in iface.__abstractmethods__ if hasattr(getattr(iface, attr), tag))\n    for attribute in attributes:\n        for node in cls.__mro__:\n            if (hasattr(node, attribute) and defines(getattr(node, attribute))):\n                return True\n    try:\n        attribute\n        return False\n    except NameError:\n        return True", "docstring": "Check for a valid definition of a value.\n\nArgs:\niface (Iface): An Iface specification.\ncls (type): Some type to check for a definition.\ntag (str): The name of the tag attribute used to mark the abstract\nmethods.\ndefines (callable): A callable that accepts an attribute and returns\nTrue if the attribute is a valid definition.\n\nReturns:\nbool: Whether or not the definition is found.", "source": "codesearchnet"}
{"code": "def inject_argument_info_in_traceback(fn, object_name=None):\n    if backend.backend() == 'tensorflow':\n        from tensorflow import errors as tf_errors\n    else:\n        tf_errors = None\n\n    @wraps(fn)\n    def error_handler(*args, **kwargs):\n        if not is_traceback_filtering_enabled():\n            return fn(*args, **kwargs)\n        signature = None\n        bound_signature = None\n        try:\n            return fn(*args, **kwargs)\n        except Exception as e:\n            if hasattr(e, '_keras_call_info_injected'):\n                raise e\n            signature = inspect.signature(fn)\n            try:\n                bound_signature = signature.bind(*args, **kwargs)\n            except TypeError:\n                raise e\n            arguments_context = []\n            for arg in list(signature.parameters.values()):\n                if arg.name in bound_signature.arguments:\n                    value = tree.map_structure(format_argument_value, bound_signature.arguments[arg.name])\n                else:\n                    value = arg.default\n                arguments_context.append(f'  • {arg.name}={value}')\n            if arguments_context:\n                arguments_context = '\\n'.join(arguments_context)\n                if tf_errors is not None and isinstance(e, tf_errors.OpError):\n                    message = e.message\n                elif e.args:\n                    message = e.args[0]\n                else:\n                    message = ''\n                display_name = f'{(object_name if object_name else fn.__name__)}'\n                message = f'Exception encountered when calling {display_name}.\\n\\n\\x1b[1m{message}\\x1b[0m\\n\\nArguments received by {display_name}:\\n{arguments_context}'\n                if tf_errors is not None and isinstance(e, tf_errors.OpError):\n                    new_e = e.__class__(e.node_def, e.op, message, e.error_code)\n                else:\n                    try:\n                        new_e = e.__class__(message)\n                    except TypeError:\n                        new_e = RuntimeError(message)\n                new_e._keras_call_info_injected = True\n            else:\n                new_e = e\n            raise new_e.with_traceback(e.__traceback__) from None\n        finally:\n            del signature\n            del bound_signature\n    return error_handler", "docstring": "Add information about call argument values to an error message.\n\nArguments:\nfn: Function to wrap. Exceptions raised by the this function will be\nre-raised with additional information added to the error message,\ndisplaying the values of the different arguments that the function\nwas called with.\nobject_name: String, display name of the class/function being called,\ne.g. `'layer \"layer_name\" (LayerClass)'`.\n\nReturns:\nA wrapped version of `fn`.", "source": "github-repos"}
{"code": "def forward(self, hidden_states: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Any, Any]:\n    hidden_states_2, self_attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=encoder_attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions)\n    hidden_states_2 = nn.functional.dropout(hidden_states_2, p=self.dropout, training=self.training)\n    hidden_states = hidden_states + hidden_states_2\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    residual = hidden_states\n    cross_attn_weights = None\n    hidden_states = hidden_states if position_embeddings is None else hidden_states + position_embeddings\n    hidden_states_2, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, reference_points=reference_points, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list)\n    hidden_states_2 = nn.functional.dropout(hidden_states_2, p=self.dropout, training=self.training)\n    hidden_states = self.gateway(residual, hidden_states_2)\n    hidden_states_2 = self.activation_fn(self.fc1(hidden_states))\n    hidden_states_2 = nn.functional.dropout(hidden_states_2, p=self.activation_dropout, training=self.training)\n    hidden_states_2 = self.fc2(hidden_states_2)\n    hidden_states_2 = nn.functional.dropout(hidden_states_2, p=self.dropout, training=self.training)\n    hidden_states = hidden_states + hidden_states_2\n    hidden_states = self.final_layer_norm(hidden_states.clamp(min=-65504, max=65504))\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (self_attn_weights, cross_attn_weights)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`):\nInput to the layer of shape `(seq_len, batch, embed_dim)`.\nposition_embeddings (`torch.FloatTensor`, *optional*):\nPosition embeddings that are added to the queries and keys in the self-attention layer.\nreference_points (`torch.FloatTensor`, *optional*):\nReference points.\nspatial_shapes (`torch.LongTensor`, *optional*):\nSpatial shapes.\nlevel_start_index (`torch.LongTensor`, *optional*):\nLevel start index.\nencoder_hidden_states (`torch.FloatTensor`):\ncross attention input to the layer of shape `(seq_len, batch, embed_dim)`\nencoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size\n`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative\nvalues.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def pseudo_with_symbol(self, symbol, allow_multi=False):\n    pseudos = self.select_symbols(symbol, ret_list=True)\n    if ((not pseudos) or ((len(pseudos) > 1) and (not allow_multi))):\n        raise ValueError(('Found %d occurrences of symbol %s' % (len(pseudos), symbol)))\n    if (not allow_multi):\n        return pseudos[0]\n    else:\n        return pseudos", "docstring": "Return the pseudo with the given chemical symbol.\n\nArgs:\nsymbols: String with the chemical symbol of the element\nallow_multi: By default, the method raises ValueError\nif multiple occurrences are found. Use allow_multi to prevent this.\n\nRaises:\nValueError if symbol is not found or multiple occurences are present and not allow_multi", "source": "codesearchnet"}
{"code": "def __init__(self, formatter, object_representer):\n        \n        super().__init__(formatter)\n        self.object_representer = object_representer\n        logger.debug('obj loader set')", "docstring": "Initialize formatter and object representer.\n\nArgs:\nformatter: Callable object/function that will format object loaded\nfrom in file. Formatter signature:\niterable = formatter(iterable)\nobject_representer: An ObjectRepresenter instance.", "source": "juraj-google-style"}
{"code": "def stop_server(self, grace=1.0):\n    self._server_lock.acquire()\n    try:\n        if not self._server_started:\n            raise ValueError('Server has not started running')\n        if self._stop_requested:\n            raise ValueError('Server has already stopped')\n        self._stop_requested = True\n        return self.server.stop(grace=grace)\n    finally:\n        self._server_lock.release()", "docstring": "Request server stopping.\n\nOnce stopped, server cannot be stopped or started again. This method is\nnon-blocking. Call `wait()` on the returned event to block until the server\nhas completely stopped.\n\nArgs:\ngrace: Grace period in seconds to be used when calling `server.stop()`.\n\nRaises:\nValueError: If server stop has already been requested, or if the server\nhas not started running yet.\n\nReturns:\nA threading.Event that will be set when the server has completely stopped.", "source": "github-repos"}
{"code": "def _AddEdge(self, start_node, end_node):\n    self.graph[start_node].outgoing.append(end_node)\n    if (end_node in self.graph):\n        self.graph[end_node].incoming.append(start_node)", "docstring": "Add a directed edge to the graph.\n\nAdd the end to the list of outgoing nodes of the start and the start to the\nlist of incoming nodes of the end node.\n\nArgs:\nstart_node: name of the start node\nend_node: name of the end node", "source": "codesearchnet"}
{"code": "def put(self, key, value):\n    \n    if value is None:\n      self.delete(key)\n    else:\n      self._collection(key)[key] = value", "docstring": "Stores the object `value` named by `key`.\n\nStores the object in the collection corresponding to ``key.path``.\n\nArgs:\nkey: Key naming `value`\nvalue: the object to store.", "source": "juraj-google-style"}
{"code": "def aggregate(self):\n    (_, indices, inverse) = np.unique(self.record.sample, axis=0, return_index=True, return_inverse=True)\n    order = np.argsort(indices)\n    indices = indices[order]\n    record = self.record[indices]\n    record.num_occurrences = 0\n    for (old_idx, new_idx) in enumerate(inverse):\n        new_idx = order[new_idx]\n        record[new_idx].num_occurrences += self.record[old_idx].num_occurrences\n    return type(self)(record, self.variables, copy.deepcopy(self.info), self.vartype)", "docstring": "Create a new SampleSet with repeated samples aggregated.\n\nReturns:\n:obj:`.SampleSet`\n\nNote:\n:attr:`.SampleSet.record.num_occurrences` are accumulated but no\nother fields are.", "source": "codesearchnet"}
{"code": "def get_task_scfcycles(self, nids=None, wslice=None, task_class=None, exclude_ok_tasks=False):\n        \n        select_status = [self.S_RUN] if exclude_ok_tasks else [self.S_RUN, self.S_OK]\n        tasks_cycles = []\n\n        for task in self.select_tasks(nids=nids, wslice=wslice):\n            \n            if task.status not in select_status or task.cycle_class is None:\n                continue\n            if task_class is not None and not task.isinstance(task_class):\n                continue\n            try:\n                cycle = task.cycle_class.from_file(task.output_file.path)\n                if cycle is not None:\n                    tasks_cycles.append((task, cycle))\n            except Exception:\n                \n                pass\n\n        return tasks_cycles", "docstring": "Return list of (taks, scfcycle) tuples for all the tasks in the flow with a SCF algorithm\ne.g. electronic GS-SCF iteration, DFPT-SCF iterations etc.\n\nArgs:\nnids: List of node identifiers.\nwslice: Slice object used to select works.\ntask_class: String or class used to select tasks. Ignored if None.\nexclude_ok_tasks: True if only running tasks should be considered.\n\nReturns:\nList of `ScfCycle` subclass instances.", "source": "juraj-google-style"}
{"code": "def get_country_by_name(self, country_name: str) -> typing.Optional['Country']:\n        \n        VALID_STR.validate(country_name, 'get_country_by_name', exc=ValueError)\n        if country_name not in self._countries_by_name.keys():\n            for country in self.countries:\n\n                if country.country_name == country_name:\n                    self._countries_by_name[country_name] = country\n                    return country\n            raise ValueError(country_name)\n        else:\n            return self._countries_by_name[country_name]", "docstring": "Gets a country from its name\n\nArgs:\ncountry_name: country name\n\nReturns: Country", "source": "juraj-google-style"}
{"code": "def sampling_query(sql, fields=None, count=5, sampling=None):\n    if (sampling is None):\n        sampling = Sampling.default(count=count, fields=fields)\n    return sampling(sql)", "docstring": "Returns a sampling query for the SQL object.\n\nArgs:\nsql: the SQL object to sample\nfields: an optional list of field names to retrieve.\ncount: an optional count of rows to retrieve which is used if a specific\nsampling is not specified.\nsampling: an optional sampling strategy to apply to the table.\nReturns:\nA SQL query string for sampling the input sql.", "source": "codesearchnet"}
{"code": "async def update(\n        self,\n        service_id: str,\n        version: str,\n        *,\n        image: str = None,\n        rollback: bool = False\n    ) -> bool:\n        \n        if image is None and rollback is False:\n            raise ValueError(\"You need to specify an image.\")\n\n        inspect_service = await self.inspect(service_id)\n        spec = inspect_service[\"Spec\"]\n\n        if image is not None:\n            spec[\"TaskTemplate\"][\"ContainerSpec\"][\"Image\"] = image\n\n        params = {\"version\": version}\n        if rollback is True:\n            params[\"rollback\"] = \"previous\"\n\n        data = json.dumps(clean_map(spec))\n\n        await self.docker._query_json(\n            \"services/{service_id}/update\".format(service_id=service_id),\n            method=\"POST\",\n            data=data,\n            params=params,\n        )\n        return True", "docstring": "Update a service.\nIf rollback is True image will be ignored.\n\nArgs:\nservice_id: ID or name of the service.\nversion: Version of the service that you want to update.\nrollback: Rollback the service to the previous service spec.\n\nReturns:\nTrue if successful.", "source": "juraj-google-style"}
{"code": "def issue_closed(issue_key, server=None, username=None, password=None):\n    if (not issue_key):\n        return None\n    jira_ = _get_jira(server=server, username=username, password=password)\n    try:\n        ticket = jira_.issue(issue_key)\n    except jira.exceptions.JIRAError:\n        return None\n    return (ticket.fields().status.name == 'Closed')", "docstring": "Check if the issue is closed.\n\nissue_key\nThe JIRA iD of the ticket to close.\n\nReturns:\n\n- ``True``: the ticket exists and it is closed.\n- ``False``: the ticket exists and it has not been closed.\n- ``None``: the ticket does not exist.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' jira.issue_closed NE-123", "source": "codesearchnet"}
{"code": "def fn_with_code_in_docstring():\n    return True", "docstring": "This has code in the docstring.\n\n\n\nExample:\nx = fn_with_code_in_docstring()\nindentation_matters = True\n\n\n\nReturns:\nTrue.", "source": "github-repos"}
{"code": "def update_scores(self, scores: torch.FloatTensor, g_values: torch.FloatTensor) -> torch.FloatTensor:\n    _, _, depth = g_values.shape\n    probs = torch.softmax(scores, dim=1)\n    for i in range(depth):\n        g_values_at_depth = g_values[:, :, i]\n        g_mass_at_depth = (g_values_at_depth * probs).sum(axis=1, keepdims=True)\n        probs = probs * (1 + g_values_at_depth - g_mass_at_depth)\n    log_probs = torch.log(probs)\n    log_probs = torch.where(torch.isfinite(log_probs), log_probs, torch.finfo(log_probs.dtype).min)\n    return log_probs", "docstring": "Updates scores using the g values.\n\nWe assume that the scores are in the log space.\nArgs:\nscores (`torch.FloatTensor`): Scores (batch_size, vocab_size).\ng_values (`torch.FloatTensor`): G values (batch_size, vocab_size, depth).\n\nReturns:\nUpdated scores (batch_size, vocab_size).", "source": "github-repos"}
{"code": "def num_samples(self, dataset_split):\n    return {problem.DatasetSplit.TRAIN: 1000000, problem.DatasetSplit.EVAL: 10000, problem.DatasetSplit.TEST: 10000}[dataset_split]", "docstring": "Determine the dataset sized given a dataset_split.\n\nArgs:\ndataset_split: A problem.DatasetSplit.\n\nReturns:\nThe desired number of samples for this dataset_split.", "source": "codesearchnet"}
{"code": "def delete(self, interface, vrid):\n    vrrp_str = ('no vrrp %d' % vrid)\n    return self.configure_interface(interface, vrrp_str)", "docstring": "Deletes a vrrp instance from an interface\n\nNote:\nThis method will attempt to delete the vrrp from the node's\noperational config. If the vrrp does not exist on the\ninterface then this method will not perform any changes\nbut still return True\n\nArgs:\ninterface (string): The interface to configure.\nvrid (integer): The vrid number for the vrrp to be deleted.\n\nReturns:\nTrue if the vrrp could be deleted otherwise False (see Node)", "source": "codesearchnet"}
{"code": "def assert_rank_in(x, ranks, data=None, summarize=None, message=None, name=None):\n    with ops.name_scope(name, 'assert_rank_in', (x,) + tuple(ranks) + tuple(data or [])):\n        if not isinstance(x, sparse_tensor.SparseTensor):\n            x = ops.convert_to_tensor(x, name='x')\n        ranks = tuple([ops.convert_to_tensor(rank, name='rank') for rank in ranks])\n        message = _message_prefix(message)\n        if context.executing_eagerly() or isinstance(x, sparse_tensor.SparseTensor):\n            name = ''\n        else:\n            name = x.name\n        if data is None:\n            data = [message, 'Tensor %s must have rank in' % name] + list(ranks) + ['Received shape: ', array_ops.shape(x)]\n        try:\n            assert_op = _assert_ranks_condition(x, ranks, _static_rank_in, _dynamic_rank_in, data, summarize)\n        except ValueError as e:\n            if e.args[0] == 'Static rank condition failed':\n                raise ValueError('%sTensor %s must have rank in %s.  Received rank %d, shape %s' % (message, name, tuple((r.item() for r in e.args[2])), e.args[1], x.get_shape()))\n            else:\n                raise\n    return assert_op", "docstring": "Assert `x` has rank in `ranks`.\n\nExample of adding a dependency to an operation:\n\n```python\nwith tf.control_dependencies([tf.compat.v1.assert_rank_in(x, (2, 4))]):\noutput = tf.reduce_sum(x)\n```\n\nArgs:\nx:  Numeric `Tensor`.\nranks:  Iterable of scalar `Tensor` objects.\ndata:  The tensors to print out if the condition is False.  Defaults to\nerror message and first few entries of `x`.\nsummarize: Print this many entries of each tensor.\nmessage: A string to prefix to the default message.\nname: A name for this operation (optional).\nDefaults to \"assert_rank_in\".\n\nReturns:\nOp raising `InvalidArgumentError` unless rank of `x` is in `ranks`.\nIf static checks determine `x` has matching rank, a `no_op` is returned.\n\nRaises:\nValueError:  If static checks determine `x` has mismatched rank.", "source": "github-repos"}
{"code": "def select_serial_number_row(self, serial_number):\n    sheet = self.table\n    col = self.db_sheet_cols.id\n    rows = (sheet.loc[(:, col)] == serial_number)\n    return sheet.loc[(rows, :)]", "docstring": "Select row for identification number serial_number\n\nArgs:\nserial_number: serial number\n\nReturns:\npandas.DataFrame", "source": "codesearchnet"}
{"code": "def add(self, arg, options=None):\n    fut = tasklets.Future(('%s.add(%s, %s)' % (self, arg, options)))\n    todo = self._queues.get(options)\n    if (todo is None):\n        utils.logging_debug('AutoBatcher(%s): creating new queue for %r', self._todo_tasklet.__name__, options)\n        if (not self._queues):\n            eventloop.add_idle(self._on_idle)\n        todo = self._queues[options] = []\n    todo.append((fut, arg))\n    if (len(todo) >= self._limit):\n        del self._queues[options]\n        self.run_queue(options, todo)\n    return fut", "docstring": "Adds an arg and gets back a future.\n\nArgs:\narg: one argument for _todo_tasklet.\noptions: rpc options.\n\nReturn:\nAn instance of future, representing the result of running\n_todo_tasklet without batching.", "source": "codesearchnet"}
{"code": "def check_config_attributes_being_used(config_class):\n    signature = dict(inspect.signature(config_class.__init__).parameters)\n    parameter_names = [x for x in list(signature.keys()) if x not in ['self', 'kwargs']]\n    parameter_defaults = [signature[param].default for param in parameter_names]\n    reversed_attribute_map = {}\n    if len(config_class.attribute_map) > 0:\n        reversed_attribute_map = {v: k for k, v in config_class.attribute_map.items()}\n    config_source_file = inspect.getsourcefile(config_class)\n    model_dir = os.path.dirname(config_source_file)\n    modeling_paths = [os.path.join(model_dir, fn) for fn in os.listdir(model_dir) if fn.startswith('modeling_')]\n    modeling_sources = []\n    for path in modeling_paths:\n        if os.path.isfile(path):\n            with open(path, encoding='utf8') as fp:\n                modeling_sources.append(fp.read())\n    unused_attributes = []\n    for config_param, default_value in zip(parameter_names, parameter_defaults):\n        attributes = [config_param]\n        if config_param in reversed_attribute_map:\n            attributes.append(reversed_attribute_map[config_param])\n        if not check_attribute_being_used(config_class, attributes, default_value, modeling_sources):\n            unused_attributes.append(attributes[0])\n    return sorted(unused_attributes)", "docstring": "Check the arguments in `__init__` of `config_class` are used in the modeling files in the same directory\n\nArgs:\nconfig_class (`type`):\nThe configuration class for which the arguments in its `__init__` will be checked.", "source": "github-repos"}
{"code": "def create(cls, session, attributes=None, relationships=None):\n    resource_type = cls._resource_type()\n    resource_path = cls._resource_path()\n    url = session._build_url(resource_path)\n    json = build_request_body(resource_type, None, attributes=attributes, relationships=relationships)\n    process = cls._mk_one(session)\n    return session.post(url, CB.json(201, process), json=json)", "docstring": "Create a resource of the resource.\n\nThis should only be called from sub-classes\n\nArgs:\n\nsession(Session): The session to create the resource in.\n\nattributes(dict): Any attributes that are valid for the\ngiven resource type.\n\nrelationships(dict): Any relationships that are valid for the\ngiven resource type.\n\nReturns:\n\nResource: An instance of a resource.", "source": "codesearchnet"}
{"code": "def start(self, name: str, increment_count: bool = True) -> None:\n        \n        if not self._timing:\n            return\n        now = get_now_utc_pendulum()\n\n        \n        if self._stack:\n            last = self._stack[-1]\n            self._totaldurations[last] += now - self._starttimes[last]\n\n        \n        if name not in self._starttimes:\n            self._totaldurations[name] = datetime.timedelta()\n            self._count[name] = 0\n        self._starttimes[name] = now\n        if increment_count:\n            self._count[name] += 1\n        self._stack.append(name)", "docstring": "Start a named timer.\n\nArgs:\nname: name of the timer\nincrement_count: increment the start count for this timer", "source": "juraj-google-style"}
{"code": "def get(cls, keyval, key='id', user_id=None):\n    if (keyval is None):\n        return None\n    if ((key in cls.__table__.columns) and cls.__table__.columns[key].primary_key):\n        return cls.query.get(keyval)\n    else:\n        result = cls.query.filter((getattr(cls, key) == keyval))\n        return result.first()", "docstring": "Fetches a single instance which has value `keyval`\nfor the attribute `key`.\n\nArgs:\n\nkeyval: The value of the attribute.\n\nkey (str, optional):  The attribute to search by. By default,\nit is 'id'.\n\nReturns:\n\nA model instance if found. Else None.\n\nExamples:\n\n>>> User.get(35)\nuser35@i.com\n\n>>> User.get('user35@i.com', key='email')\nuser35@i.com", "source": "codesearchnet"}
{"code": "def safe_rt(resource_type, lower=False):\n    if (resource_type is not None):\n        resource_type = resource_type.replace(' ', '_')\n        if lower:\n            resource_type = resource_type.lower()\n    return resource_type", "docstring": "Format the Resource Type.\n\nTakes Custom Indicator types with a space character and return a *safe* string.\n\n(e.g. *User Agent* is converted to User_Agent or user_agent.)\n\nArgs:\nresource_type (string): The resource type to format.\nlower (boolean): Return type in all lower case\n\nReturns:\n(string): The formatted resource type.", "source": "codesearchnet"}
{"code": "def start(self, hostname=None, port=None, templates_path=None):\n    self.hostname = (hostname if hostname else 'localhost')\n    if port:\n        self.port = port\n    elif (not self.port):\n        self.port = unused_port(self.hostname)\n    if templates_path:\n        self.loaders.insert(0, jinja2.FileSystemLoader(templates_path))\n        self._set_loaders()\n    self.setup_routes()\n    self.runner = aioweb.AppRunner(self.app)\n    return self.agent.submit(start_server_in_loop(self.runner, self.hostname, self.port, self.agent))", "docstring": "Starts the web interface.\n\nArgs:\nhostname (str, optional): host name to listen from. (Default value = None)\nport (int, optional): port to listen from. (Default value = None)\ntemplates_path (str, optional): path to look for templates. (Default value = None)", "source": "codesearchnet"}
{"code": "def makefile(self):\n    return self.env.get_template('Makefile.j2').render(metadata=self.metadata, package=self.package)", "docstring": "Generate the documentation Makefile.\n\nReturns:\n(str): the contents of the `Makefile`.", "source": "codesearchnet"}
{"code": "def get_what_follows_raw(s: str, prefix: str, onlyatstart: bool=True, stripwhitespace: bool=True) -> Tuple[(bool, str)]:\n    prefixstart = s.find(prefix)\n    if (((prefixstart == 0) and onlyatstart) or ((prefixstart != (- 1)) and (not onlyatstart))):\n        resultstart = (prefixstart + len(prefix))\n        result = s[resultstart:]\n        if stripwhitespace:\n            result = result.strip()\n        return (True, result)\n    return (False, '')", "docstring": "Find the part of ``s`` that is after ``prefix``.\n\nArgs:\ns: string to analyse\nprefix: prefix to find\nonlyatstart: only accept the prefix if it is right at the start of\n``s``\nstripwhitespace: remove whitespace from the result\n\nReturns:\ntuple: ``(found, result)``", "source": "codesearchnet"}
{"code": "def prepend(self, key, value, expire=0, noreply=None):\n        \n        if noreply is None:\n            noreply = self.default_noreply\n        return self._store_cmd(b'prepend', {key: value}, expire, noreply)[key]", "docstring": "The memcached \"prepend\" command.\n\nArgs:\nkey: str, see class docs for details.\nvalue: str, see class docs for details.\nexpire: optional int, number of seconds until the item is expired\nfrom the cache, or zero for no expiry (the default).\nnoreply: optional bool, True to not wait for the reply (defaults to\nself.default_noreply).\n\nReturns:\nTrue.", "source": "juraj-google-style"}
{"code": "def tanh(x):\n    return ops.tanh(x)", "docstring": "Hyperbolic tangent activation function.\n\nIt is defined as:\n`tanh(x) = sinh(x) / cosh(x)`, i.e.\n`tanh(x) = ((exp(x) - exp(-x)) / (exp(x) + exp(-x)))`.\n\nArgs:\nx: Input tensor.", "source": "github-repos"}
{"code": "def disconnect_sync(self, connection_handle):\n        \n\n        self.bable.disconnect(connection_handle=connection_handle, sync=True)", "docstring": "Synchronously disconnect from whoever has connected to us\n\nArgs:\nconnection_handle (int): The handle of the connection we wish to disconnect.", "source": "juraj-google-style"}
{"code": "def predict_on_batch(self, x):\n    self._check_call_args('predict_on_batch')\n    if self._distribution_strategy and distribute_lib.in_cross_replica_context():\n        raise NotImplementedError('`predict_on_batch` is not supported for models distributed with tf.distribute.Strategy.')\n    inputs, _, _ = self._standardize_user_data(x, extract_tensors_from_dataset=True)\n    if self.run_eagerly or self._distribution_strategy:\n        inputs = training_utils_v1.cast_if_floating_dtype(inputs)\n        if isinstance(inputs, collections.abc.Sequence):\n            if len(inputs) == 1:\n                inputs = inputs[0]\n        return self(inputs)\n    self._make_predict_function()\n    outputs = self.predict_function(inputs)\n    if len(outputs) == 1:\n        return outputs[0]\n    return outputs", "docstring": "Returns predictions for a single batch of samples.\n\nArgs:\nx: Input data. It could be:\n- A Numpy array (or array-like), or a list of arrays\n(in case the model has multiple inputs).\n- A TensorFlow tensor, or a list of tensors\n(in case the model has multiple inputs).\n- A `tf.data` dataset.\n\nReturns:\nNumpy array(s) of predictions.\n\nRaises:\nValueError: In case of mismatch between given number of inputs and\nexpectations of the model.", "source": "github-repos"}
{"code": "def replace(self, pattern, replacement):\n    for (i, line) in enumerate(self):\n        if (pattern in line):\n            self[i] = line.replace(pattern, replacement)", "docstring": "Replace all instances of a pattern with a replacement.\n\nArgs:\npattern (str): Pattern to replace\nreplacement (str): Text to insert", "source": "codesearchnet"}
{"code": "def _insert_layers(self, layers, relevant_nodes=None):\n    layers = nest.flatten(layers)\n    tf_utils.assert_no_legacy_layers(layers)\n    node_to_depth = {}\n    for depth, nodes in self._nodes_by_depth.items():\n        node_to_depth.update({node: depth for node in nodes})\n    if not relevant_nodes:\n        relevant_nodes = nest.flatten([layer._inbound_nodes for layer in layers])\n    network_nodes = set(relevant_nodes + list(node_to_depth.keys()))\n\n    def _get_min_depth(node):\n        \n        min_depth = 0\n        for layer, node_id, _, _ in node.iterate_inbound():\n            inbound_node = layer._inbound_nodes[node_id]\n            if inbound_node in node_to_depth:\n                min_depth = min(min_depth, node_to_depth[inbound_node])\n            elif inbound_node not in network_nodes:\n                continue\n            else:\n                return None\n        return min_depth - 1\n    unprocessed_nodes = copy.copy(relevant_nodes)\n    i = 0\n    while unprocessed_nodes:\n        i += 1\n        if i > 10000:\n            raise ValueError('Layers could not be added due to missing dependencies.')\n        node = unprocessed_nodes.pop(0)\n        depth = _get_min_depth(node)\n        if depth is None:\n            unprocessed_nodes.append(node)\n            continue\n        node_key = _make_node_key(node.layer.name, node.layer._inbound_nodes.index(node))\n        if node_key not in self._network_nodes:\n            node_to_depth[node] = depth\n            self._network_nodes.add(node_key)\n            self._nodes_by_depth[depth].append(node)\n    layer_set = set(self._self_tracked_trackables)\n    deferred_layers = []\n    for layer in layers:\n        if layer not in layer_set:\n            self._self_tracked_trackables.append(layer)\n            deferred_layers.append(layer)\n            self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)\n            layer_set.add(layer)\n    self._handle_deferred_layer_dependencies(deferred_layers)\n    self._compute_tensor_usage_count()", "docstring": "Inserts Layers into the Network after Network creation.\n\nThis is only valid for Keras Graph Networks.  Layers added via this function\nwill be included in the `call` computation and `get_config` of this Network.\nThey will not be added to the Network's outputs.\n\n\nArgs:\nlayers: Arbitrary nested structure of Layers. Layers must be reachable\nfrom one or more of the `keras.Input` Tensors that correspond to this\nNetwork's inputs.\nrelevant_nodes: Nodes from the Layers that should be considered part of\nthis Network. If `None`, all Nodes will be considered part of this\nNetwork.\n\nRaises:\nValueError: If the layers depend on `Input`s not found in this Model.", "source": "github-repos"}
{"code": "def CheckAltTokens(filename, clean_lines, linenum, error):\n  \n  line = clean_lines.elided[linenum]\n\n  \n  if Match(r'^\\s*\n    return\n\n  \n  \n  \n  \n  \n  \n  \n  \n  if line.find('') >= 0:\n    return\n\n  for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):\n    error(filename, linenum, 'readability/alt_tokens', 2,\n          'Use operator %s instead of %s' % (\n              _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))", "docstring": "Check alternative keywords being used in boolean expressions.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def parse_args(self, arglist=None):\n    args = self._parser.parse_args(args=arglist)\n    sub_cmd = args.loam_sub_name\n    if (sub_cmd is None):\n        for (opt, sct) in self._opt_bare.items():\n            self._conf[sct][opt] = getattr(args, opt, None)\n    else:\n        for (opt, sct) in self._opt_cmds[sub_cmd].items():\n            self._conf[sct][opt] = getattr(args, opt, None)\n    return args", "docstring": "Parse arguments and update options accordingly.\n\nArgs:\narglist (list of str): list of arguments to parse. If set to None,\n``sys.argv[1:]`` is used.\n\nReturns:\n:class:`Namespace`: the argument namespace returned by the\n:class:`argparse.ArgumentParser`.", "source": "codesearchnet"}
{"code": "def convert_to_rgb(video: np.array, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.array:\n    if not isinstance(video, np.ndarray):\n        raise ValueError(f'Video has to be a numpy array to convert to RGB format, but found {type(video)}')\n    if input_data_format is None:\n        input_data_format = infer_channel_dimension_format(video)\n    video = to_channel_dimension_format(video, ChannelDimension.FIRST, input_channel_dim=input_data_format)\n    if video.shape[-3] == 3:\n        return video\n    if video.shape[-3] == 1:\n        return video.repeat(3, -3)\n    if not (video[..., 3, :, :] < 255).any():\n        return video\n    alpha = video[..., 3, :, :] / 255.0\n    video = (1 - alpha[..., None, :, :]) * 255 + alpha[..., None, :, :] * video[..., 3, :, :]\n    return video", "docstring": "Convert video to RGB by blending the transparency layer if it's in RGBA format, otherwise simply returns it.\n\nArgs:\nvideo (`np.array`):\nThe video to convert.\ndata_format (`ChannelDimension`, *optional*):\nThe channel dimension format of the output video. If unset, will use the inferred format from the input.\ninput_data_format (`ChannelDimension`, *optional*):\nThe channel dimension format of the input video. If unset, will use the inferred format from the input.", "source": "github-repos"}
{"code": "def compose(*coros):\n    coros = list(coros)\n\n    @asyncio.coroutine\n    def reducer(acc, coro):\n        return (yield from coro(acc))\n\n    @asyncio.coroutine\n    def wrapper(acc):\n        return (yield from reduce(reducer, coros, initializer=acc, right=True))\n    return wrapper", "docstring": "Creates a coroutine function based on the composition of the passed\ncoroutine functions.\n\nEach function consumes the yielded result of the coroutine that follows.\n\nComposing coroutine functions f(), g(), and h() would produce\nthe result of f(g(h())).\n\nArguments:\n*coros (coroutinefunction): variadic coroutine functions to compose.\n\nRaises:\nRuntimeError: if cannot execute a coroutine function.\n\nReturns:\ncoroutinefunction\n\nUsage::\n\nasync def sum_1(num):\nreturn num + 1\n\nasync def mul_2(num):\nreturn num * 2\n\ncoro = paco.compose(sum_1, mul_2, sum_1)\nawait coro(2)\n# => 7", "source": "codesearchnet"}
{"code": "def _ParseShVariables(self, lines):\n    paths = {}\n    for line in lines:\n        for entry in line:\n            if ('=' in entry):\n                (target, vals) = (entry.split('=', 1) + [''])[:2]\n                if vals:\n                    path_vals = vals.split(':')\n                else:\n                    path_vals = []\n                self._ExpandPath(target, path_vals, paths)\n            elif (entry not in self._SH_CONTINUATION):\n                break\n    return paths", "docstring": "Extract env_var and path values from sh derivative shells.\n\nIterates over each line, word by word searching for statements that set the\npath. These are either variables, or conditions that would allow a variable\nto be set later in the line (e.g. export).\n\nArgs:\nlines: A list of lines, each of which is a list of space separated words.\n\nReturns:\na dictionary of path names and values.", "source": "codesearchnet"}
{"code": "def _contains_nd(nodes, point):\n    r\n    min_vals = np.min(nodes, axis=1)\n    if not np.all(min_vals <= point):\n        return False\n\n    max_vals = np.max(nodes, axis=1)\n    if not np.all(point <= max_vals):\n        return False\n\n    return True", "docstring": "r\"\"\"Predicate indicating if a point is within a bounding box.\n\n.. note::\n\nThere is also a Fortran implementation of this function, which\nwill be used if it can be built.\n\nArgs:\nnodes (numpy.ndarray): A set of points.\npoint (numpy.ndarray): A 1D NumPy array representing a point\nin the same dimension as ``nodes``.\n\nReturns:\nbool: Indicating containment.", "source": "juraj-google-style"}
{"code": "def _apply_sparse_duplicate_indices(self, grad, var):\n    summed_values, unique_indices = _deduplicate_indexed_slices(values=grad.values, indices=grad.indices)\n    gradient_no_duplicate_indices = indexed_slices.IndexedSlices(indices=unique_indices, values=summed_values, dense_shape=grad.dense_shape)\n    return self._apply_sparse(gradient_no_duplicate_indices, var)", "docstring": "Add ops to apply sparse gradients to `var`, with repeated sparse indices.\n\nOptimizers which override this method must deal with IndexedSlices objects\nsuch as the following:\n\nIndexedSlicesValue(values=[1, 1], indices=[0, 0], dense_shape=[1])\n\nThe correct interpretation is:\n\nIndexedSlicesValue(values=[2], indices=[0], dense_shape=[1])\n\nMany optimizers deal incorrectly with repeated indices when updating based\non sparse gradients (e.g. summing squares rather than squaring the sum, or\napplying momentum terms multiple times). Adding first is always the correct\nbehavior, so this is enforced here by reconstructing the IndexedSlices to\nhave only unique indices, then calling _apply_sparse.\n\nOptimizers which deal correctly with repeated indices may instead override\nthis method to avoid the overhead of summing indices.\n\nArgs:\ngrad: `IndexedSlices`.\nvar: A `Variable` object.\n\nReturns:\nAn `Operation`.", "source": "github-repos"}
{"code": "def VerifyStructure(self, parser_mediator, line):\n    try:\n        structure = self._DPKG_LOG_LINE.parseString(line)\n    except pyparsing.ParseException as exception:\n        logger.debug('Unable to parse Debian dpkg.log file with error: {0!s}'.format(exception))\n        return False\n    return (('date_time' in structure) and ('body' in structure))", "docstring": "Verifies if a line from a text file is in the expected format.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nline (str): line from a text file.\n\nReturns:\nbool: True if the line is in the expected format, False if not.", "source": "codesearchnet"}
{"code": "def get_temp_dir(self) -> str:\n    if not self._tempdir:\n        self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())\n    return self._tempdir", "docstring": "Returns a unique temporary directory for the test to use.\n\nIf you call this method multiple times during in a test, it will return the\nsame folder. However, across different runs the directories will be\ndifferent. This will ensure that across different runs tests will not be\nable to pollute each others environment.\nIf you need multiple unique directories within a single test, you should\nuse tempfile.mkdtemp as follows:\ntempfile.mkdtemp(dir=self.get_temp_dir()):\n\nReturns:\nstring, the path to the unique temporary directory created for this test.", "source": "github-repos"}
{"code": "def traverse_levelorder(self, leaves=True, internal=True):\n    q = deque()\n    q.append(self)\n    while (len(q) != 0):\n        n = q.popleft()\n        if ((leaves and n.is_leaf()) or (internal and (not n.is_leaf()))):\n            (yield n)\n        q.extend(n.children)", "docstring": "Perform a levelorder traversal starting at this ``Node`` object\n\nArgs:\n``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``", "source": "codesearchnet"}
{"code": "def search(self, terms):\n    messages = self._connection.get(('search/%s' % urllib.quote_plus(terms)), key='messages')\n    if messages:\n        messages = [Message(self, message) for message in messages]\n    return messages", "docstring": "Search transcripts.\n\nArgs:\nterms (str): Terms for search\n\nReturns:\narray. Messages", "source": "codesearchnet"}
{"code": "def get_type(self, index):\n\n        \n        if index < 0 or index >= len(self._types):\n            raise ValueError(\"Index for getting order parameter type\"\n                             \" out-of-bounds!\")\n        return self._types[index]", "docstring": "Return type of order parameter at the index provided and\nrepresented by a short string.\n\nArgs:\nindex (int): index of order parameter for which type is\nto be returned.\nReturns:\nstr: OP type.", "source": "juraj-google-style"}
{"code": "def copy_default_config_to_user_directory(basename, clobber=False, dst_dir='~/.config/scriptabit'):\n    dst_dir = os.path.expanduser(dst_dir)\n    dst = os.path.join(dst_dir, basename)\n    src = resource_filename(Requirement.parse('scriptabit'), os.path.join('scriptabit', basename))\n    if (not os.path.exists(dst_dir)):\n        os.makedirs(dst_dir)\n    if (clobber or (not os.path.isfile(dst))):\n        shutil.copy(src, dst)", "docstring": "Copies the default configuration file into the user config directory.\n\nArgs:\nbasename (str): The base filename.\nclobber (bool): If True, the default will be written even if a user\nconfig already exists.\ndst_dir (str): The destination directory.", "source": "codesearchnet"}
{"code": "def turtle_to_texture(turtle_program, turn_amount=DEFAULT_TURN,\n                      initial_angle=DEFAULT_INITIAL_ANGLE, resolution=1):\n    \n    generator = branching_turtle_generator(\n        turtle_program, turn_amount, initial_angle, resolution)\n    return texture_from_generator(generator)", "docstring": "Makes a texture from a turtle program.\n\nArgs:\nturtle_program (str): a string representing the turtle program; see the\ndocstring of `branching_turtle_generator` for more details\nturn_amount (float): amount to turn in degrees\ninitial_angle (float): initial orientation of the turtle\nresolution (int): if provided, interpolation amount for visible lines\n\nReturns:\ntexture: A texture.", "source": "juraj-google-style"}
{"code": "def run_step(self, representer):\n    assert representer, 'ObjectRepresenter instance required to run ObjectRewriterStep.'\n    rewriter = ObjectRewriter(self.context.get_formatted_iterable, representer)\n    super().run_step(rewriter)", "docstring": "Do the object in-out rewrite.\n\nArgs:\nrepresenter: A pypyr.filesystem.ObjectRepresenter instance.", "source": "codesearchnet"}
{"code": "def p44(msg):\n    d = hex2bin(data(msg))\n    if (d[34] == '0'):\n        return None\n    p = bin2int(d[35:46])\n    return p", "docstring": "Static pressure.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nint: static pressure in hPa", "source": "codesearchnet"}
{"code": "def generate_timing_stats(file_list, var_list):\n    \n    timing_result = dict()\n    timing_summary = dict()\n    for file in file_list:\n        timing_result[file] = functions.parse_gptl(file, var_list)\n    for var in var_list:\n        var_time = []\n        for f, data in timing_result.items():\n            try:\n                var_time.append(data[var])\n            except:\n                continue\n        if len(var_time):\n            timing_summary[var] = {'mean': np.mean(var_time),\n                                   'max': np.max(var_time),\n                                   'min': np.min(var_time),\n                                   'std': np.std(var_time)}\n    return timing_summary", "docstring": "Parse all of the timing files, and generate some statistics\nabout the run.\n\nArgs:\nfile_list: A list of timing files to parse\nvar_list: A list of variables to look for in the timing file\n\nReturns:\nA dict containing values that have the form:\n[mean, min, max, mean, standard deviation]", "source": "juraj-google-style"}
{"code": "def dt_dt(sdat, tstart=None, tend=None):\n    tseries = sdat.tseries_between(tstart, tend)\n    time = tseries['t'].values\n    temp = tseries['Tmean'].values\n    dtdt = ((temp[1:] - temp[:(- 1)]) / (time[1:] - time[:(- 1)]))\n    return (dtdt, time[:(- 1)])", "docstring": "Derivative of temperature.\n\nCompute dT/dt as a function of time using an explicit Euler scheme.\n\nArgs:\nsdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.\ntstart (float): time at which the computation should start. Use the\nbeginning of the time series data if set to None.\ntend (float): time at which the computation should end. Use the\nend of the time series data if set to None.\nReturns:\ntuple of :class:`numpy.array`: derivative of temperature and time\narrays.", "source": "codesearchnet"}
{"code": "def GetUsedMemory(self):\n    try:\n        memory_info = self._process.memory_info()\n    except psutil.NoSuchProcess:\n        return None\n    memory_data = getattr(memory_info, 'data', 0)\n    memory_shared = getattr(memory_info, 'shared', 0)\n    return (memory_data + memory_shared)", "docstring": "Retrieves the amount of memory used by the process.\n\nReturns:\nint: amount of memory in bytes used by the process or None\nif not available.", "source": "codesearchnet"}
{"code": "def normal(key, shape, dtype=dtypes.float32):\n    key = tf_np.asarray(key, dtype=_RNG_KEY_DTYPE)\n    return tf_np.asarray(stateless_random_ops.stateless_random_normal(shape, seed=_key2seed(key), dtype=dtype))", "docstring": "Sample standard-normal random values.\n\nArgs:\nkey: the RNG key.\nshape: the shape of the result.\ndtype: the dtype of the result.\n\nReturns:\nRandom values in standard-normal distribution.", "source": "github-repos"}
{"code": "def plot_feature_correlation_heatmap(df, features, font_size=9, figsize=(15, 15), save_filename=None):\n    \n\n    features = features[:]\n    features += ['target']\n\n    mcorr = df[features].corr()\n    mask = np.zeros_like(mcorr, dtype=np.bool)\n    mask[np.triu_indices_from(mask)] = True\n\n    cmap = sns.diverging_palette(220, 10, as_cmap=True)\n\n    fig = plt.figure(figsize=figsize)\n    heatmap = sns.heatmap(\n        mcorr,\n        mask=mask,\n        cmap=cmap,\n        square=True,\n        annot=True,\n        fmt='0.2f',\n        annot_kws={'size': font_size},\n    )\n\n    heatmap.tick_params(axis='both', which='major', labelsize=font_size)\n    heatmap.tick_params(axis='both', which='minor', labelsize=font_size)\n\n    heatmap.set_xticklabels(features, rotation=90)\n    heatmap.set_yticklabels(reversed(features))\n\n    plt.show()\n\n    if save_filename is not None:\n        fig.savefig(save_filename, dpi=300)", "docstring": "Plot a correlation heatmap between every feature pair.\n\nArgs:\ndf: Pandas dataframe containing the target column (named 'target').\nfeatures: The list of features to include in the correlation plot.\nfont_size: Font size for heatmap cells and axis labels.\nfigsize: The size of the plot.\nsave_filename: (Optional) The path of the file to save a high-res version of the plot to.", "source": "juraj-google-style"}
{"code": "def verify_docker_image_sha(chain, link):\n    cot = link.cot\n    task = link.task\n    errors = []\n    if isinstance(task['payload'].get('image'), dict):\n        docker_image_task_id = task['extra']['chainOfTrust']['inputs']['docker-image']\n        log.debug('Verifying {} {} against docker-image {}'.format(link.name, link.task_id, docker_image_task_id))\n        if (docker_image_task_id != task['payload']['image']['taskId']):\n            errors.append(\"{} {} docker-image taskId isn't consistent!: {} vs {}\".format(link.name, link.task_id, docker_image_task_id, task['payload']['image']['taskId']))\n        else:\n            path = task['payload']['image']['path']\n            image_hash = cot['environment']['imageArtifactHash']\n            (alg, sha) = image_hash.split(':')\n            docker_image_link = chain.get_link(docker_image_task_id)\n            upstream_sha = docker_image_link.cot['artifacts'].get(path, {}).get(alg)\n            if (upstream_sha is None):\n                errors.append('{} {} docker-image docker sha {} is missing! {}'.format(link.name, link.task_id, alg, docker_image_link.cot['artifacts'][path]))\n            elif (upstream_sha != sha):\n                errors.append(\"{} {} docker-image docker sha doesn't match! {} {} vs {}\".format(link.name, link.task_id, alg, sha, upstream_sha))\n            else:\n                log.debug('Found matching docker-image sha {}'.format(upstream_sha))\n    else:\n        prebuilt_task_types = chain.context.config['prebuilt_docker_image_task_types']\n        if ((prebuilt_task_types != 'any') and (link.task_type not in prebuilt_task_types)):\n            errors.append('Task type {} not allowed to use a prebuilt docker image!'.format(link.task_type))\n    raise_on_errors(errors)", "docstring": "Verify that built docker shas match the artifact.\n\nArgs:\nchain (ChainOfTrust): the chain we're operating on.\nlink (LinkOfTrust): the task link we're checking.\n\nRaises:\nCoTError: on failure.", "source": "codesearchnet"}
{"code": "def _BuildFindSpecsFromGroupName(self, group_name, environment_variables):\n    definition = self._artifacts_registry.GetDefinitionByName(group_name)\n    if (not definition):\n        return None\n    return self._BuildFindSpecsFromArtifact(definition, environment_variables)", "docstring": "Builds find specifications from a artifact group name.\n\nArgs:\ngroup_name (str): artifact group name.\nenvironment_variables (list[str]): environment variable attributes used to\ndynamically populate environment variables in file and registry\nartifacts.\n\nReturns:\nlist[dfwinreg.FindSpec|dfvfs.FindSpec]: find specifications or None if no\nartifact with the given name can be retrieved.", "source": "codesearchnet"}
{"code": "def _Open(self, path_spec, mode='rb'):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    file_object = resolver.Resolver.OpenFileObject(\n        path_spec.parent, resolver_context=self._resolver_context)\n\n    try:\n      \n      file_object.seek(0, os.SEEK_SET)\n\n      \n      \n      tar_file = tarfile.open(mode='r:', fileobj=file_object)\n    except:\n      file_object.close()\n      raise\n\n    self._file_object = file_object\n    self._tar_file = tar_file", "docstring": "Opens the file system defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nmode (Optional[str]): file access mode. The default is 'rb' which\nrepresents read-only binary.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file system could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def from_json(cls, json_value: Any, *, value_spec: Optional[pg_typing.Dict]=None, allow_partial: bool=False, root_path: Optional[utils.KeyPath]=None, **kwargs) -> 'Dict':\n    return cls({k: base.from_json(v, root_path=utils.KeyPath(k, root_path), allow_partial=allow_partial, **kwargs) for k, v in json_value.items()}, value_spec=value_spec, root_path=root_path, allow_partial=allow_partial)", "docstring": "Class method that load an symbolic Dict from a JSON value.\n\nArgs:\njson_value: Input JSON value, only JSON dict is acceptable.\nvalue_spec: An optional value spec to apply.\nallow_partial: Whether to allow members of the dict to be partial.\nroot_path: KeyPath of loaded object in its object tree.\n**kwargs: Allow passing through keyword arguments that are not applicable.\n\nReturns:\nA schemaless symbolic dict. For example::\n\nd = Dict.from_json({\n'a': {\n'_type': '__main__.Foo',\n'f1': 1,\n'f2': {\n'f21': True\n}\n}\n})\n\nassert d.value_spec is None\n# Okay:\nd.b = 1\n\n# a.f2 is bound by class Foo's field 'f2' definition (assume it defines\n# a schema for the Dict field).\nassert d.a.f2.value_spec is not None\n\n# Not okay:\nd.a.f2.abc = 1", "source": "github-repos"}
{"code": "def get_account_name(config, auth, account):\n    account_id, advertiser_ids = parse_account(config, auth, account)\n    is_superuser, profile_id = get_profile_for_api(config, auth, account_id)\n    response = API_DCM(config, auth, internal=is_superuser).accounts().get(id=account_id, profileId=profile_id).execute()\n    return response['name']", "docstring": "Return the name of a DCM account given the account ID.\n\nArgs:\n* auth: (string) Either user or service.\n* account: (string) [account:advertiser@profile] token.\n\nReturns:\n* Profile ID.\n\nRaises:\n* If current credentials do not have a profile for this account.", "source": "github-repos"}
{"code": "def build_srpm(specfile, save_dir):\n    \n    logger.info('Starting rpmbuild to build: {0} SRPM.'.format(specfile))\n    if save_dir != get_default_save_path():\n        try:\n            msg = subprocess.Popen(\n                ['rpmbuild',\n                 '--define', '_sourcedir {0}'.format(save_dir),\n                 '--define', '_builddir {0}'.format(save_dir),\n                 '--define', '_srcrpmdir {0}'.format(save_dir),\n                 '--define', '_rpmdir {0}'.format(save_dir),\n                 '-bs', specfile], stdout=subprocess.PIPE).communicate(\n                 )[0].strip()\n        except OSError:\n            logger.error(\n                \"Rpmbuild failed for specfile: {0} and save_dir: {1}\".format(\n                    specfile, save_dir), exc_info=True)\n            msg = 'Rpmbuild failed. See log for more info.'\n        return msg\n    else:\n        if not os.path.exists(save_dir):\n            raise IOError(\"Specify folder to store a file (SAVE_DIR) \"\n                          \"or install rpmdevtools.\")\n        try:\n            msg = subprocess.Popen(\n                ['rpmbuild',\n                 '--define', '_sourcedir {0}'.format(save_dir + '/SOURCES'),\n                 '--define', '_builddir {0}'.format(save_dir + '/BUILD'),\n                 '--define', '_srcrpmdir {0}'.format(save_dir + '/SRPMS'),\n                 '--define', '_rpmdir {0}'.format(save_dir + '/RPMS'),\n                 '-bs', specfile], stdout=subprocess.PIPE).communicate(\n                )[0].strip()\n        except OSError:\n            logger.error(\"Rpmbuild failed for specfile: {0} and save_dir: \"\n                         \"{1}\".format(specfile, save_dir), exc_info=True)\n            msg = 'Rpmbuild failed. See log for more info.'\n        return msg", "docstring": "Builds a srpm from given specfile using rpmbuild.\nGenerated srpm is stored in directory specified by save_dir.\n\nArgs:\nspecfile: path to a specfile\nsave_dir: path to source and build tree", "source": "juraj-google-style"}
{"code": "def pop_parameter(key):\n    \n    names = key.split('/')\n    if len(names) > 1:\n        with parameter_scope(names[0]):\n            return pop_parameter('/'.join(names[1:]))\n    global current_scope\n    param = current_scope.get(key, None)\n    if param is not None:\n        del current_scope[key]\n    return param", "docstring": "Remove and get parameter by key.\n\nArgs:\nkey(str): Key of parameter.\n\nReturns: ~nnabla.Variable\nParameter if key found, otherwise None.", "source": "juraj-google-style"}
{"code": "def _collect_paths(element):\n    \n    output = []\n\n    \n    path = vectors.el_to_path_vector(element)\n    root = path[0]\n    params = element.params if element.params else None\n    match = root.find(element.getTagName(), params)\n\n    if len(match) == 1:\n        output.append(\n            PathCall(\"find\", 0, [element.getTagName(), params])\n        )\n\n    \n    output.extend(path_patterns.neighbours_pattern(element))\n\n    \n    \n    output.extend(path_patterns.predecesors_pattern(element, root))\n\n    index_backtrack = []\n    last_index_backtrack = []\n    params_backtrack = []\n    last_params_backtrack = []\n\n    \n    for el in reversed(path):\n        \n        if not el.parent:\n            continue\n\n        tag_name = el.getTagName()\n        match = el.parent.wfind(tag_name).childs\n        index = match.index(el)\n\n        index_backtrack.append(\n            PathCall(\"wfind\", index, [tag_name])\n        )\n        last_index_backtrack.append(\n            PathCall(\"wfind\", index - len(match), [tag_name])\n        )\n\n        \n        if el.params:\n            match = el.parent.wfind(tag_name, el.params).childs\n            index = match.index(el)\n\n            params_backtrack.append(\n                PathCall(\"wfind\", index, [tag_name, el.params])\n            )\n            last_params_backtrack.append(\n                PathCall(\"wfind\", index - len(match), [tag_name, el.params])\n            )\n        else:\n            params_backtrack.append(\n                PathCall(\"wfind\", index, [tag_name])\n            )\n            last_params_backtrack.append(\n                PathCall(\"wfind\", index - len(match), [tag_name])\n            )\n\n    output.extend([\n        Chained(reversed(params_backtrack)),\n        Chained(reversed(last_params_backtrack)),\n        Chained(reversed(index_backtrack)),\n        Chained(reversed(last_index_backtrack)),\n    ])\n\n    return output", "docstring": "Collect all possible path which leads to `element`.\n\nFunction returns standard path from root element to this, reverse path,\nwhich uses negative indexes for path, also some pattern matches, like\n\"this is element, which has neighbour with id 7\" and so on.\n\nArgs:\nelement (obj): HTMLElement instance.\n\nReturns:\nlist: List of :class:`.PathCall` and :class:`.Chained` objects.", "source": "juraj-google-style"}
{"code": "def _GetDelayImportTimestamps(self, pefile_object):\n    delay_import_timestamps = []\n    if (not hasattr(pefile_object, 'DIRECTORY_ENTRY_DELAY_IMPORT')):\n        return delay_import_timestamps\n    for importdata in pefile_object.DIRECTORY_ENTRY_DELAY_IMPORT:\n        dll_name = importdata.dll\n        try:\n            dll_name = dll_name.decode('ascii')\n        except UnicodeDecodeError:\n            dll_name = dll_name.decode('ascii', errors='replace')\n        timestamp = getattr(importdata.struct, 'dwTimeStamp', 0)\n        delay_import_timestamps.append([dll_name, timestamp])\n    return delay_import_timestamps", "docstring": "Retrieves timestamps from delay import entries, if available.\n\nArgs:\npefile_object (pefile.PE): pefile object.\n\nReturns:\ntuple[str, int]: name of the DLL being imported and the second is\nthe timestamp of the entry.", "source": "codesearchnet"}
{"code": "def get_discount_curve(discount_curve_types: List[Union[curve_types_lib.RiskFreeCurve, curve_types_lib.RateIndexCurve]], market: pmd.ProcessedMarketData, mask: List[int]) -> rate_curve.RateCurve:\n    discount_curves = [market.yield_curve(curve_type) for curve_type in discount_curve_types]\n    discounts = []\n    dates = []\n    interpolation_method = None\n    interpolate_rates = None\n    for curve in discount_curves:\n        discount, date = curve.discount_factors_and_dates()\n        discounts.append(discount)\n        dates.append(date)\n        interpolation_method = curve.interpolation_method\n        interpolate_rates = curve.interpolate_rates\n    all_discounts = tf.stack(pad.pad_tensors(discounts), axis=0)\n    all_dates = pad.pad_date_tensors(dates)\n    all_dates = dateslib.DateTensor.stack(dates, axis=0)\n    prepare_discounts = tf.gather(all_discounts, mask)\n    prepare_dates = dateslib.dates_from_ordinals(tf.gather(all_dates.ordinal(), mask))\n    discount_curve = rate_curve.RateCurve(prepare_dates, prepare_discounts, market.date, interpolator=interpolation_method, interpolate_rates=interpolate_rates)\n    return discount_curve", "docstring": "Builds a batched discount curve.\n\nGiven a list of discount curve an integer mask, creates a discount curve\nobject to compute discount factors against the list of discount curves.\n\n#### Example\n```none\ncurve_types = [RiskFreeCurve(\"USD\"), RiskFreeCurve(\"AUD\")]\n# A mask to price a batch of 7 instruments with the corresponding discount\n# curves [\"USD\", \"AUD\", \"AUD\", \"AUD\" \"USD\", \"USD\", \"AUD\"].\nmask = [0, 1, 1, 1, 0, 0, 1]\nmarket = MarketDataDict(...)\nget_discount_curve(curve_types, market, mask)\n# Returns a RateCurve object that can compute a discount factors for a\n# batch of 7 dates.\n```\n\nArgs:\ndiscount_curve_types: A list of curve types.\nmarket: an instance of the processed market data.\nmask: An integer mask.\n\nReturns:\nAn instance of `RateCurve`.", "source": "github-repos"}
{"code": "def get(self):\n    with warnings.catch_warnings(record=False):\n        warnings.simplefilter('ignore')\n        return np.nanquantile(self._queue, self._q)", "docstring": "Calculates and returns the specified quantile of the current sliding\nwindow.\n\nReturns:\nfloat: The specified quantile of the values in the current sliding window.\nReturns NaN if the window is empty.", "source": "github-repos"}
{"code": "def get_position_encoding(length, hidden_size, min_timescale=1.0, max_timescale=10000.0):\n    position = tf.to_float(tf.range(length))\n    num_timescales = (hidden_size \n    log_timescale_increment = (math.log((float(max_timescale) / float(min_timescale))) / (tf.to_float(num_timescales) - 1))\n    inv_timescales = (min_timescale * tf.exp((tf.to_float(tf.range(num_timescales)) * (- log_timescale_increment))))\n    scaled_time = (tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0))\n    signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)\n    return signal", "docstring": "Return positional encoding.\n\nCalculates the position encoding as a mix of sine and cosine functions with\ngeometrically increasing wavelengths.\nDefined and formulized in Attention is All You Need, section 3.5.\n\nArgs:\nlength: Sequence length.\nhidden_size: Size of the\nmin_timescale: Minimum scale that will be applied at each position\nmax_timescale: Maximum scale that will be applied at each position\n\nReturns:\nTensor with shape [length, hidden_size]", "source": "codesearchnet"}
{"code": "def install_hook(self, hook_name, hook_content):\n    hook_path = os.path.join(self.path, '.git/hooks', hook_name)\n    with open(hook_path, 'w') as f:\n        f.write(hook_content)\n    os.chmod(hook_path, ((stat.S_IEXEC | stat.S_IREAD) | stat.S_IWRITE))", "docstring": "Install the repository hook for this repo.\n\nArgs:\nhook_name (str)\nhook_content (str)", "source": "codesearchnet"}
{"code": "def get_variable_name_from_bird(bird_conf):\n    \n    bird_variable_pattern = re.compile(\n        r, re.VERBOSE\n    )\n\n    with open(bird_conf, 'r') as content:\n        for line in content.readlines():\n            variable_match = bird_variable_pattern.search(line)\n            if variable_match:\n                return variable_match.group('name')\n\n    return None", "docstring": "Return the variable name set in Bird configuration.\n\nThe variable name in Bird configuration is set with the keyword 'define',\nhere is an example:\n\ndefine ACAST_PS_ADVERTISE =\n\nand we exract the string between the word 'define' and the equals sign.\n\nArguments:\nbird_conf (str): The absolute file name path of Bird configuration.\n\nReturns:\nThe variable name as a string or None if it isn't found.", "source": "juraj-google-style"}
{"code": "def add_forwarding_rules(self, forwarding_rules):\n    rules_dict = [rule.__dict__ for rule in forwarding_rules]\n    return self.get_data(('load_balancers/%s/forwarding_rules/' % self.id), type=POST, params={'forwarding_rules': rules_dict})", "docstring": "Adds new forwarding rules to a LoadBalancer.\n\nArgs:\nforwarding_rules (obj:`list`): A list of `ForwrdingRules` objects", "source": "codesearchnet"}
{"code": "class XLMPoolerAnswerClass(nn.Module):\n\n    def __init__(self, config: XLMConfig):\n        super().__init__()\n        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)\n        self.activation = nn.Tanh()\n        self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)\n\n    def forward(self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, cls_index: Optional[torch.LongTensor]=None) -> torch.FloatTensor:\n        \n        hsz = hidden_states.shape[-1]\n        assert start_states is not None or start_positions is not None, 'One of start_states, start_positions should be not None'\n        if start_positions is not None:\n            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)\n            start_states = hidden_states.gather(-2, start_positions).squeeze(-2)\n        if cls_index is not None:\n            cls_index = cls_index[:, None, None].expand(-1, -1, hsz)\n            cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2)\n        else:\n            cls_token_state = hidden_states[:, -1, :]\n        x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))\n        x = self.activation(x)\n        x = self.dense_1(x).squeeze(-1)\n        return x", "docstring": "Compute SQuAD 2.0 answer class from classification and start tokens hidden states.\n\nArgs:\nconfig ([`XLMConfig`]):\nThe config used by the model, will be used to grab the `hidden_size` of the model.", "source": "github-repos"}
{"code": "def _inplace_helper(x, i, v, op):\n    x = ops.convert_to_tensor(x)\n    v = ops.convert_to_tensor(v, x.dtype)\n    if i is None:\n        return array_ops.reshape(op(array_ops.reshape(x, [1, -1]), [0], array_ops.reshape(v, [1, -1])), array_ops.shape(x))\n    i = math_ops.cast(i, dtypes.int32)\n    if i.get_shape().ndims == 0:\n        return op(x, array_ops.reshape(i, [1]), array_ops.expand_dims(v, 0))\n    return op(x, i, v)", "docstring": "Applies an inplace op on (x, i, v).\n\nop is one of gen_array_ops.alias_inplace_update,\ngen_array_ops.alias_inplace_add, or gen_array_ops.alias_inplace_sub.\n\nIf i is None, x and v must be the same shape. Computes\nx op v;\nIf i is a scalar, x has a rank 1 higher than v's. Computes\nx[i, :] op v;\nOtherwise, x and v must have the same rank. Computes\nx[i, :] op v;\n\nArgs:\nx: A Tensor.\ni: None, a scalar or a vector.\nv: A Tensor.\nop: alias_inplace_update, alias_inplace_add, or alias_inplace_sub.\n\nReturns:\nReturns x.", "source": "github-repos"}
{"code": "def signUserCsr(self, xcsr, signas, outp=None):\n    pkey = xcsr.get_pubkey()\n    name = xcsr.get_subject().CN\n    return self.genUserCert(name, csr=pkey, signas=signas, outp=outp)", "docstring": "Signs a user CSR with a CA keypair.\n\nArgs:\ncert (OpenSSL.crypto.X509Req): The certificate signing request.\nsignas (str): The CA keypair name to sign the CSR with.\noutp (synapse.lib.output.Output): The output buffer.\n\nExamples:\ncdir.signUserCsr(mycsr, 'myca')\n\nReturns:\n((OpenSSL.crypto.PKey, OpenSSL.crypto.X509)): Tuple containing the public key and certificate objects.", "source": "codesearchnet"}
{"code": "def attach(self, engine, log_handler, event_name):\n        \n        if event_name not in State.event_to_attr:\n            raise RuntimeError(\"Unknown event name '{}'\".format(event_name))\n\n        engine.add_event_handler(event_name, log_handler, self, event_name)", "docstring": "Attach the logger to the engine and execute `log_handler` function at `event_name` events.\n\nArgs:\nengine (Engine): engine object.\nlog_handler (callable): a logging handler to execute\nevent_name: event to attach the logging handler to. Valid events are from :class:`~ignite.engine.Events`\nor any `event_name` added by :meth:`~ignite.engine.Engine.register_events`.", "source": "juraj-google-style"}
{"code": "def save_screenshot(self, filename, quietly=False):\n    imgData = self.take_screenshot()\n    try:\n        with open(filename, 'wb') as f:\n            f.write(b64decode(imgData.encode('ascii')))\n    except IOError as err:\n        if (not quietly):\n            raise err", "docstring": "Save the screenshot to local.\n\nSupport:\nAndroid iOS Web(WebView)\n\nArgs:\nfilename(str): The path to save the image.\nquietly(bool): If True, omit the IOError when\nfailed to save the image.\n\nReturns:\nWebElement Object.\n\nRaises:\nWebDriverException.\nIOError.", "source": "codesearchnet"}
{"code": "def _ParsePerformanceOptions(self, options):\n    self._buffer_size = getattr(options, 'buffer_size', 0)\n    if self._buffer_size:\n        try:\n            if (self._buffer_size[(- 1)].lower() == 'm'):\n                self._buffer_size = int(self._buffer_size[:(- 1)], 10)\n                self._buffer_size *= self._BYTES_IN_A_MIB\n            else:\n                self._buffer_size = int(self._buffer_size, 10)\n        except ValueError:\n            raise errors.BadConfigOption('Invalid buffer size: {0!s}.'.format(self._buffer_size))\n    self._queue_size = self.ParseNumericOption(options, 'queue_size')", "docstring": "Parses the performance options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "codesearchnet"}
{"code": "def _ConsumeInteger(tokenizer, is_signed=False, is_long=False):\n  \n  try:\n    result = ParseInteger(tokenizer.token, is_signed=is_signed, is_long=is_long)\n  except ValueError as e:\n    raise tokenizer.ParseError(str(e))\n  tokenizer.NextToken()\n  return result", "docstring": "Consumes an integer number from tokenizer.\n\nArgs:\ntokenizer: A tokenizer used to parse the number.\nis_signed: True if a signed integer must be parsed.\nis_long: True if a long integer must be parsed.\n\nReturns:\nThe integer parsed.\n\nRaises:\nParseError: If an integer with given characteristics couldn't be consumed.", "source": "juraj-google-style"}
{"code": "def get_sample_dataset(dataset_properties):\n    \n    kwargs = dataset_properties.copy()\n    data_type = kwargs.pop('type')\n    if data_type == 'multiclass':\n        try:\n            X, y = datasets.make_classification(random_state=8, **kwargs)\n            splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)\n        except Exception as e:\n            raise exceptions.UserError(repr(e))\n    elif data_type == 'iris':\n        X, y = datasets.load_iris(return_X_y=True)\n        splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)\n    elif data_type == 'mnist':\n        X, y = datasets.load_digits(return_X_y=True)\n        splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)\n    elif data_type == 'breast_cancer':\n        X, y = datasets.load_breast_cancer(return_X_y=True)\n        splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)\n    elif data_type == 'boston':\n        X, y = datasets.load_boston(return_X_y=True)\n        splits = model_selection.KFold(n_splits=2, random_state=8).split(X)\n    elif data_type == 'diabetes':\n        X, y = datasets.load_diabetes(return_X_y=True)\n        splits = model_selection.KFold(n_splits=2, random_state=8).split(X)\n    else:\n        raise exceptions.UserError('Unknown dataset type {}'.format(dataset_properties['type']))\n    return X, y, splits", "docstring": "Returns sample dataset\n\nArgs:\ndataset_properties (dict): Dictionary corresponding to the properties of the dataset\nused to verify the estimator and metric generators.\n\nReturns:\nX (array-like): Features array\n\ny (array-like): Labels array\n\nsplits (iterator): This is an iterator that returns train test splits for\ncross-validation purposes on ``X`` and ``y``.", "source": "juraj-google-style"}
{"code": "def getEntity(self, name):\n        \n        return lock_and_call(\n            lambda: Entity(self._impl.getEntity(name)),\n            self._lock\n        )", "docstring": "Get entity corresponding to the specified name (looks for it in all\ntypes of entities).\n\nArgs:\nname: Name of the entity.\n\nRaises:\nTypeError: if the specified entity does not exist.\n\nReturns:\nThe AMPL entity with the specified name.", "source": "juraj-google-style"}
{"code": "def user_picklist(i_info, command):\n    \n    valid_entry = False\n    awsc.get_all_aminames(i_info)\n    list_instances(i_info, \"\", True)\n    msg_txt = (\"Enter {0}\n               \" [{2}0 aborts{1}]: \".format(C_WARN, C_NORM, C_TI,\n                                            command, len(i_info)))\n    while not valid_entry:\n        entry_raw = obtain_input(msg_txt)\n        try:\n            entry_int = int(entry_raw)\n        except ValueError:\n            entry_int = 999\n        (tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command)\n    return tar_idx", "docstring": "Display list of instances matching args and ask user to select target.\n\nInstance list displayed and user asked to enter the number corresponding\nto the desired target instance, or '0' to abort.\n\nArgs:\ni_info (dict): information on instances and details.\ncommand (str): command specified on the command line.\nReturns:\ntar_idx (int): the dictionary index number of the targeted instance.", "source": "juraj-google-style"}
{"code": "def convert_phase(component, subcomponent=SubComponent.UNSPECIFIED):\n    if component not in Component:\n        raise ValueError('Given component name not found')\n    if subcomponent not in SubComponent:\n        raise ValueError('Given subcomponent name not found')\n    if subcomponent != SubComponent.UNSPECIFIED and subcomponent.component != component:\n        raise ValueError(\"component and subcomponent name don't match\")\n\n    def report_error(error_data: converter_error_data_pb2.ConverterErrorData):\n        error_data.component = component.value\n        if not error_data.subcomponent:\n            error_data.subcomponent = subcomponent.name\n        tflite_metrics = metrics.TFLiteConverterMetrics()\n        tflite_metrics.set_converter_error(error_data)\n\n    def report_error_message(error_message: Text):\n        error_data = converter_error_data_pb2.ConverterErrorData()\n        error_data.error_message = error_message\n        report_error(error_data)\n\n    def actual_decorator(func):\n\n        @functools.wraps(func)\n        def wrapper(*args, **kwargs):\n            try:\n                return func(*args, **kwargs)\n            except ConverterError as converter_error:\n                if converter_error.errors:\n                    for error_data in converter_error.errors:\n                        report_error(error_data)\n                else:\n                    report_error_message(str(converter_error))\n                raise converter_error from None\n            except Exception as error:\n                report_error_message(str(error))\n                raise error from None\n        return wrapper\n    return actual_decorator", "docstring": "The decorator to identify converter component and subcomponent.\n\nArgs:\ncomponent: Converter component name.\nsubcomponent: Converter subcomponent name.\n\nReturns:\nForward the result from the wrapped function.\n\nRaises:\nValueError: if component and subcomponent name is not valid.", "source": "github-repos"}
{"code": "def dropout_no_scaling(x, keep_prob):\n    if (keep_prob == 1.0):\n        return x\n    mask = tf.less(tf.random_uniform(tf.shape(x)), keep_prob)\n    return (x * cast_like(mask, x))", "docstring": "Like tf.nn.dropout, but does not scale up.  Works on integers also.\n\nArgs:\nx: a Tensor\nkeep_prob: a floating point number\n\nReturns:\nTensor of the same shape as x.", "source": "codesearchnet"}
{"code": "def stopService(self):\n    self._service.factory.stopTrying()\n    (yield self._service.factory.stopFactory())\n    (yield service.MultiService.stopService(self))", "docstring": "Gracefully stop the service.\n\nReturns:\ndefer.Deferred: a Deferred which is triggered when the service has\nfinished shutting down.", "source": "codesearchnet"}
{"code": "def patch_traces(self, project_id, traces):\n        \n        traces_pb = _traces_mapping_to_pb(traces)\n        self._gapic_api.patch_traces(project_id, traces_pb)", "docstring": "Sends new traces to Stackdriver Trace or updates existing traces.\n\nArgs:\nproject_id (Optional[str]): ID of the Cloud project where the trace\ndata is stored.\ntraces (dict): Required. The traces to be patched in the API call.", "source": "juraj-google-style"}
{"code": "def dependency_to_rpm(dep, runtime):\n    \n    logger.debug('Dependencies provided: {0} runtime: {1}.'.format(\n        dep, runtime))\n    converted = []\n    if not len(dep.specs):\n        converted.append(['Requires', dep.project_name])\n    else:\n        for ver_spec in dep.specs:\n            if ver_spec[0] == '!=':\n                converted.append(\n                    ['Conflicts', dep.project_name, '=', ver_spec[1]])\n            elif ver_spec[0] == '==':\n                converted.append(\n                    ['Requires', dep.project_name, '=', ver_spec[1]])\n            else:\n                converted.append(\n                    ['Requires', dep.project_name, ver_spec[0], ver_spec[1]])\n\n    if not runtime:\n        for conv in converted:\n            conv[0] = \"Build\" + conv[0]\n    logger.debug('Converted dependencies: {0}.'.format(converted))\n\n    return converted", "docstring": "Converts a dependency got by pkg_resources.Requirement.parse()\nto RPM format.\nArgs:\ndep - a dependency retrieved by pkg_resources.Requirement.parse()\nruntime - whether the returned dependency should be runtime (True)\nor build time (False)\nReturns:\nList of semi-SPECFILE dependencies (package names are not properly\nconverted yet).\nFor example: [['Requires', 'jinja2'],\n['Conflicts', 'jinja2', '=', '2.0.1']]", "source": "juraj-google-style"}
{"code": "def get_arg_value(node, arg_name, arg_pos=None):\n    if arg_name is not None:\n        for kw in node.keywords:\n            if kw.arg == arg_name:\n                return (True, kw.value)\n    if arg_pos is not None:\n        idx = 0\n        for arg in node.args:\n            if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):\n                continue\n            if idx == arg_pos:\n                return (True, arg)\n            idx += 1\n    return (False, None)", "docstring": "Get the value of an argument from a ast.Call node.\n\nThis function goes through the positional and keyword arguments to check\nwhether a given argument was used, and if so, returns its value (the node\nrepresenting its value).\n\nThis cannot introspect *args or **args, but it safely handles *args in\nPython3.5+.\n\nArgs:\nnode: The ast.Call node to extract arg values from.\narg_name: The name of the argument to extract.\narg_pos: The position of the argument (in case it's passed as a positional\nargument).\n\nReturns:\nA tuple (arg_present, arg_value) containing a boolean indicating whether\nthe argument is present, and its value in case it is.", "source": "github-repos"}
{"code": "def speed_difference(points):\n    \n    data = [0]\n    for before, after in pairwise(points):\n        data.append(before.vel - after.vel)\n    return data", "docstring": "Computes the speed difference between each adjacent point\n\nArgs:\npoints (:obj:`Point`)\nReturns:\n:obj:`list` of int: Indexes of changepoints", "source": "juraj-google-style"}
{"code": "def get_meas_los(self, user_lo_config):\n        \n        try:\n            _m_los = self.default_meas_los.copy()\n        except KeyError:\n            raise PulseError('Default measurement frequencies not exist.')\n\n        for channel, lo_freq in user_lo_config.meas_lo_dict().items():\n            _m_los[channel.index] = lo_freq\n\n        if _m_los == self.default_meas_los:\n            return None\n        return _m_los", "docstring": "Embed default meas LO frequencies from backend and format them to list object.\nIf configured lo frequency is the same as default, this method returns `None`.\n\nArgs:\nuser_lo_config (LoConfig): A dictionary of LOs to format.\n\nReturns:\nlist: A list of meas LOs.\n\nRaises:\nPulseError: when LO frequencies are missing.", "source": "juraj-google-style"}
{"code": "def _make_3d(field, twod):\n    \n    shp = list(field.shape)\n    if twod and 'X' in twod:\n        shp.insert(1, 1)\n    elif twod:\n        shp.insert(0, 1)\n    return field.reshape(shp)", "docstring": "Add a dimension to field if necessary.\n\nArgs:\nfield (numpy.array): the field that need to be 3d.\ntwod (str): 'XZ', 'YZ' or None depending on what is relevant.\nReturns:\nnumpy.array: reshaped field.", "source": "juraj-google-style"}
{"code": "def generate_reciprocal_vectors_squared(a1, a2, a3, encut):\n    \n    for vec in genrecip(a1, a2, a3, encut):\n        yield np.dot(vec, vec)", "docstring": "Generate reciprocal vector magnitudes within the cutoff along the specied\nlattice vectors.\nArgs:\na1: Lattice vector a (in Bohrs)\na2: Lattice vector b (in Bohrs)\na3: Lattice vector c (in Bohrs)\nencut: Reciprocal vector energy cutoff\n\nReturns:\n[[g1^2], [g2^2], ...] Square of reciprocal vectors (1/Bohr)^2\ndetermined by a1, a2, a3 and whose magntidue is less than gcut^2.", "source": "juraj-google-style"}
{"code": "def load_from_files(files, globs: Optional[Dict[str, Any]]=None, set_up: Optional[Callable[[Any], None]]=None, tear_down: Optional[Callable[[Any], None]]=None) -> doctest.DocFileSuite:\n    if globs is None:\n        globs = {}\n    files = [os.fspath(f) for f in files]\n    globs['_print_if_not_none'] = _print_if_not_none\n    return doctest.DocFileSuite(*files, module_relative=False, parser=FencedCellParser(fence_label='python'), globs=globs, setUp=set_up, tearDown=tear_down, checker=FencedCellOutputChecker(), optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL | doctest.DONT_ACCEPT_BLANKLINE)", "docstring": "Creates a doctest suite from the files list.\n\nArgs:\nfiles: A list of file paths to test.\nglobs: The global namespace the tests are run in.\nset_up: Run before each test, receives the test as argument.\ntear_down: Run after each test, receives the test as argument.\n\nReturns:\nA DocFileSuite containing the tests.", "source": "github-repos"}
{"code": "def load_feather(protein_feather, length_filter_pid=None, copynum_scale=False, copynum_df=None):\n    protein_df = pd.read_feather(protein_feather).set_index('index')\n    from ssbio.protein.sequence.properties.residues import _aa_property_dict_one, EXTENDED_AA_PROPERTY_DICT_ONE\n    aggregators = {'aa_count_bulk': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Bulky'], 'subseqs': ['metal_2_5D', 'metal_3D']}, 'aa_count_carb': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Carbonylation susceptible'], 'subseqs': ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']}, 'aa_count_chrg': {'residues': _aa_property_dict_one['Charged'], 'subseqs': ['metal_2_5D', 'metal_3D', 'csa_2_5D', 'sites_2_5D', 'acc_2D', 'acc_3D', 'surface_3D']}, 'aa_count_poschrg': {'residues': _aa_property_dict_one['Basic'], 'subseqs': ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']}, 'aa_count_negchrg': {'residues': _aa_property_dict_one['Acidic'], 'subseqs': ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']}, 'aa_count_tmstab': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM stabilizing'], 'subseqs': ['tm_2D', 'tm_3D']}, 'aa_count_tmunstab': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM to Thr stabilizing'], 'subseqs': ['tm_2D', 'tm_3D']}, 'aa_count_dis': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Disorder promoting'], 'subseqs': ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D', 'dna_2_5D']}, 'aa_count_ord': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Order promoting'], 'subseqs': ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D', 'dna_2_5D']}}\n    for (suffix, info) in aggregators.items():\n        agg_residues = info['residues']\n        for prefix in info['subseqs']:\n            to_add_idxes = []\n            for agg_res in agg_residues:\n                to_add_idx = ((prefix + '_aa_count_') + agg_res)\n                if (to_add_idx in protein_df.index):\n                    to_add_idxes.append(to_add_idx)\n            subseq_agged_col = protein_df.loc[(to_add_idxes, :)].sum()\n            protein_df.loc[((prefix + '_') + suffix)] = subseq_agged_col\n    if length_filter_pid:\n        keep_cols = protein_df.loc['aa_count_total'][(protein_df.loc['aa_count_total'] > (protein_df.at[('aa_count_total', 'K12')] * length_filter_pid))].index\n        protein_df = protein_df[keep_cols]\n    if copynum_scale:\n        if (not isinstance(copynum_df, pd.DataFrame)):\n            raise ValueError('Please supply copy numbers')\n        protein_id = op.basename(protein_feather).split('_protein')[0]\n        if (protein_id in copynum_df.index):\n            copynum = copynum_df.at[(protein_id, 'copynum')]\n            if (copynum > 0):\n                protein_df = (protein_df * copynum)\n    return protein_df", "docstring": "Load a feather of amino acid counts for a protein.\n\nArgs:\nprotein_feather (str): path to feather file\ncopynum_scale (bool): if counts should be multiplied by protein copy number\ncopynum_df (DataFrame): DataFrame of copy numbers\n\nReturns:\nDataFrame: of counts with some aggregated together", "source": "codesearchnet"}
{"code": "def getText(page, output = \"text\"):\n    \n    CheckParent(page)\n    dl = page.getDisplayList()\n    \n    formats = (\"text\", \"html\", \"json\", \"xml\", \"xhtml\", \"dict\", \"rawdict\")\n    \n    images = (0, 1, 1, 0, 1, 1, 1)      \n    try:\n        f = formats.index(output.lower())\n    except:\n        f = 0\n    flags = TEXT_PRESERVE_LIGATURES | TEXT_PRESERVE_WHITESPACE\n    if images[f] :\n        flags |= TEXT_PRESERVE_IMAGES\n    tp = dl.getTextPage(flags)     \n    t = tp._extractText(f)\n    del dl\n    del tp\n    return t", "docstring": "Extract a document page's text.\n\nArgs:\noutput: (str) text, html, dict, json, rawdict, xhtml or xml.\n\nReturns:\nthe output of TextPage methods extractText, extractHTML, extractDICT, extractJSON, extractRAWDICT, extractXHTML or etractXML respectively. Default and misspelling choice is \"text\".", "source": "juraj-google-style"}
{"code": "def getattr(self, c, attr, default=None, match_only=None):\n    matching_decor = self.get_decor(c, match_only=match_only)\n    try:\n        return getattr(matching_decor, attr)\n    except AttributeError:\n        return default", "docstring": "Get the attribute of a component.\n\nArgs:\nc (component): The component to look up.\nattr (str): The attribute to get.\ndefault (str): What to return in the event of no match.\nmatch_only (list of str): The component attributes to include in the\ncomparison. Default: All of them.\n\nReturns:\nobj. The specified attribute of the matching Decor in the Legend.", "source": "codesearchnet"}
{"code": "def FromString(val):\n        \n        \n\n        if isinstance(val, bytes):\n            val = val.decode('utf-8')\n\n        try:\n            return ContractParameterType[val]\n        except Exception as e:\n            \n            pass\n\n        \n        try:\n            if isinstance(val, (bytearray, bytes)):\n                int_val = int.from_bytes(val, 'little')\n            else:\n                int_val = int.from_bytes(binascii.unhexlify(val), 'little')\n        except (binascii.Error, TypeError) as e:\n            \n            int_val = int(val)\n\n        return ContractParameterType(int_val)", "docstring": "Create a ContractParameterType object from a str\n\nArgs:\nval (str): the value to be converted to a ContractParameterType.\nval can be hex encoded (b'07'), int (7), string int (\"7\"), or string literal (\"String\")\n\nReturns:\nContractParameterType", "source": "juraj-google-style"}
{"code": "def DeregisterHelper(cls, helper_class):\n    helper_name = helper_class.NAME.lower()\n    if (helper_name not in cls._helper_classes):\n        raise KeyError('Helper class not set for name: {0:s}.'.format(helper_class.NAME))\n    del cls._helper_classes[helper_name]", "docstring": "Deregisters a helper class.\n\nThe helper classes are identified based on their lower case name.\n\nArgs:\nhelper_class (type): class object of the argument helper.\n\nRaises:\nKeyError: if helper class is not set for the corresponding name.", "source": "codesearchnet"}
{"code": "def destroy_unit(self, unit):\n    if isinstance(unit, Unit):\n        unit = unit.name\n    else:\n        unit = str(unit)\n    self._single_request('Units.Delete', unitName=unit)\n    return True", "docstring": "Delete a unit from the cluster\n\nArgs:\nunit (str, Unit): The Unit, or name of the unit to delete\n\nReturns:\nTrue: The unit was deleted\n\nRaises:\nfleet.v1.errors.APIError: Fleet returned a response code >= 400", "source": "codesearchnet"}
{"code": "def scatter(self, indices, value, name=None):\n    return self._implementation.scatter(indices, value, name=name)", "docstring": "Scatter the values of a `Tensor` in specific indices of a `TensorArray`.\n\nArgs:\nindices: A `1-D` `Tensor` taking values in `[0, max_value)`.  If the\n`TensorArray` is not dynamic, `max_value=size()`.\nvalue: (N+1)-D.  Tensor of type `dtype`.  The Tensor to unpack.\nname: A name for the operation (optional).\n\nReturns:\nA new TensorArray object with flow that ensures the scatter occurs.\nUse this object for all subsequent operations.\n\nRaises:\nValueError: if the shape inference fails.", "source": "github-repos"}
{"code": "def circuit_to_dag(circuit):\n    dagcircuit = DAGCircuit()\n    dagcircuit.name = circuit.name\n    for register in circuit.qregs:\n        dagcircuit.add_qreg(register)\n    for register in circuit.cregs:\n        dagcircuit.add_creg(register)\n    for (instruction, qargs, cargs) in circuit.data:\n        if (instruction.control is None):\n            control = None\n        else:\n            control = (instruction.control[0], instruction.control[1])\n        dagcircuit.apply_operation_back(instruction.copy(), qargs, cargs, control)\n    return dagcircuit", "docstring": "Build a ``DAGCircuit`` object from a ``QuantumCircuit``.\n\nArgs:\ncircuit (QuantumCircuit): the input circuit.\n\nReturn:\nDAGCircuit: the DAG representing the input circuit.", "source": "codesearchnet"}
{"code": "def to_dict(self):\n    return {'node': [v.to_dict() for v in self.vertices], 'edge': [e.to_dict() for e in self.edges]}", "docstring": "Returns a simplified dictionary representing the Graph.\n\nReturns:\nA dictionary that can easily be serialized to JSON.", "source": "codesearchnet"}
{"code": "def _prepare_4d_attention_mask(attention_mask: Optional[torch.Tensor], sequence_length: int, dtype: torch.dtype, device: torch.device, is_causal: bool=True) -> Optional[torch.Tensor]:\n    min_value = torch.finfo(dtype).min if dtype.is_floating_point else torch.iinfo(dtype).min\n    if attention_mask is not None:\n        attention_mask = attention_mask.view(attention_mask.shape[0], 1, 1, -1)\n        attention_mask = attention_mask * min_value\n    if is_causal:\n        causal_mask = torch.triu(torch.ones((sequence_length, sequence_length), dtype=dtype, device=device) * min_value, diagonal=1)\n        causal_mask = causal_mask.view(1, 1, sequence_length, sequence_length)\n        if attention_mask is not None:\n            attention_mask = torch.minimum(attention_mask, causal_mask)\n        else:\n            attention_mask = causal_mask\n    return attention_mask", "docstring": "Creates 4D attention mask and combines causal and padding masks if needed.\n\nArgs:\nattention_mask: Optional tensor of shape (batch_size, seq_length) containing padding mask\nsequence_length: Length of the sequence\ndtype: Data type of the mask\ndevice: Device of the mask\nis_causal: Whether to apply causal masking\n\nReturns:\n4D attention mask of shape (batch_size, 1, seq_length, seq_length)", "source": "github-repos"}
{"code": "def set_weights(distribution_strategy, dist_model, weights):\n    assign_ops = []\n    for layer in dist_model.layers:\n        num_param = len(layer.weights)\n        layer_weights = weights[:num_param]\n        for sw, w in zip(layer.weights, layer_weights):\n            if ops.executing_eagerly_outside_functions():\n                sw.assign(w)\n            else:\n                assign_ops.append(distribution_strategy.unwrap(sw.assign(w)))\n        weights = weights[num_param:]\n    if not ops.executing_eagerly_outside_functions():\n        backend.get_session(assign_ops).run(assign_ops)", "docstring": "Sets the weights of the replicated models.\n\nThe weights of the replicated models are set to the weights of the original\nmodel. The weights of the replicated model are Mirrored variables and hence\nwe need to use the `update` call within a DistributionStrategy scope.\n\nArgs:\ndistribution_strategy: DistributionStrategy used to distribute training\nand validation.\ndist_model: The replicated models on the different devices.\nweights: The weights of the original model.", "source": "github-repos"}
{"code": "def _upload_artifacts_to_path(self, mirror=False):\n        \n        if not os.listdir(self.artifact_path) or not self.artifact_path:\n            raise S3ArtifactNotFound\n\n        uploaded = False\n        if self.s3props.get(\"content_metadata\"):\n            LOG.info(\"Uploading in multiple parts to set metadata\")\n            uploaded = self.content_metadata_uploads(mirror=mirror)\n\n        if not uploaded:\n            cmd = self._get_upload_cmd(mirror=mirror)\n            result = subprocess.run(cmd, check=True, shell=True, stdout=subprocess.PIPE)\n            LOG.debug(\"Upload Command Ouput: %s\", result.stdout)\n\n        LOG.info(\"Uploaded artifacts to %s bucket\", self.bucket)", "docstring": "Recursively upload directory contents to S3.\n\nArgs:\nmirror (bool): If true, uses a flat directory structure instead of nesting under a version.", "source": "juraj-google-style"}
{"code": "def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, past_key_values_length=0, training=False):\n    assert not (input_ids is None and inputs_embeds is None)\n    if input_ids is not None:\n        check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n        inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n    input_shape = shape_list(inputs_embeds)[:-1]\n    if token_type_ids is None:\n        token_type_ids = tf.fill(dims=input_shape, value=0)\n    if position_ids is None:\n        if input_ids is not None:\n            position_ids = self.create_position_ids_from_input_ids(input_ids=input_ids, past_key_values_length=past_key_values_length)\n        else:\n            position_ids = tf.expand_dims(tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0)\n    position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)\n    token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)\n    final_embeddings = inputs_embeds + position_embeds + token_type_embeds\n    final_embeddings = self.LayerNorm(inputs=final_embeddings)\n    final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n    return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\nfinal_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github-repos"}
{"code": "def _count_condition(values, weights=None, metrics_collections=None, updates_collections=None):\n    check_ops.assert_type(values, dtypes.bool)\n    count = metric_variable([], dtypes.float32, name='count')\n    values = math_ops.cast(values, dtypes.float32)\n    if weights is not None:\n        with ops.control_dependencies((check_ops.assert_rank_in(weights, (0, array_ops.rank(values))),)):\n            weights = math_ops.cast(weights, dtypes.float32)\n            values = math_ops.multiply(values, weights)\n    value_tensor = _aggregate_variable(count, metrics_collections)\n    update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))\n    if updates_collections:\n        ops.add_to_collections(updates_collections, update_op)\n    return (value_tensor, update_op)", "docstring": "Sums the weights of cases where the given values are True.\n\nIf `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\nArgs:\nvalues: A `bool` `Tensor` of arbitrary size.\nweights: Optional `Tensor` whose rank is either 0, or the same rank as\n`values`, and must be broadcastable to `values` (i.e., all dimensions must\nbe either `1`, or the same as the corresponding `values` dimension).\nmetrics_collections: An optional list of collections that the metric\nvalue variable should be added to.\nupdates_collections: An optional list of collections that the metric update\nops should be added to.\n\nReturns:\nvalue_tensor: A `Tensor` representing the current value of the metric.\nupdate_op: An operation that accumulates the error from a batch of data.\n\nRaises:\nValueError: If `weights` is not `None` and its shape doesn't match `values`,\nor if either `metrics_collections` or `updates_collections` are not a list\nor tuple.", "source": "github-repos"}
{"code": "def add_byte_counter(self, reader):\n\n    def update_bytes_read(record_size, is_record_size=False, **kwargs):\n        if is_record_size:\n            self.read_counter.add_bytes_read(record_size)\n    if isinstance(reader, observable.ObservableMixin):\n        reader.register_observer(update_bytes_read)", "docstring": "Adds byte counter observer to a side input reader.\n\nArgs:\nreader: A reader that should inherit from ObservableMixin to have\nbytes tracked.", "source": "github-repos"}
{"code": "def read(self, size=None):\n    \n    if not self._is_open:\n      raise IOError('Not opened.')\n\n    if self._current_offset < 0:\n      raise IOError(\n          'Invalid current offset: {0:d} value less than zero.'.format(\n              self._current_offset))\n\n    if self._uncompressed_stream_size is None:\n      self._uncompressed_stream_size = self._GetUncompressedStreamSize()\n\n    if self._uncompressed_stream_size < 0:\n      raise IOError('Invalid uncompressed stream size.')\n\n    if self._current_offset >= self._uncompressed_stream_size:\n      return b''\n\n    if self._realign_offset:\n      self._AlignUncompressedDataOffset(self._current_offset)\n      self._realign_offset = False\n\n    if size is None:\n      size = self._uncompressed_stream_size\n    if self._current_offset + size > self._uncompressed_stream_size:\n      size = self._uncompressed_stream_size - self._current_offset\n\n    uncompressed_data = b''\n\n    if size == 0:\n      return uncompressed_data\n\n    while size > self._uncompressed_data_size:\n      uncompressed_data = b''.join([\n          uncompressed_data,\n          self._uncompressed_data[self._uncompressed_data_offset:]])\n\n      remaining_uncompressed_data_size = (\n          self._uncompressed_data_size - self._uncompressed_data_offset)\n\n      self._current_offset += remaining_uncompressed_data_size\n      size -= remaining_uncompressed_data_size\n\n      if self._current_offset >= self._uncompressed_stream_size:\n        break\n\n      read_count = self._ReadCompressedData(self._COMPRESSED_DATA_BUFFER_SIZE)\n      self._uncompressed_data_offset = 0\n      if read_count == 0:\n        break\n\n    if size > 0:\n      slice_start_offset = self._uncompressed_data_offset\n      slice_end_offset = slice_start_offset + size\n\n      uncompressed_data = b''.join([\n          uncompressed_data,\n          self._uncompressed_data[slice_start_offset:slice_end_offset]])\n\n      self._uncompressed_data_offset += size\n      self._current_offset += size\n\n    return uncompressed_data", "docstring": "Reads a byte string from the file-like object at the current offset.\n\nThe function will read a byte string of the specified size or\nall of the remaining data if no size was specified.\n\nArgs:\nsize (Optional[int]): number of bytes to read, where None is all\nremaining data.\n\nReturns:\nbytes: data read.\n\nRaises:\nIOError: if the read failed.\nOSError: if the read failed.", "source": "juraj-google-style"}
{"code": "def apply_to_structure(self, structure):\n        \n        def_struct = structure.copy()\n        old_latt = def_struct.lattice.matrix\n        new_latt = np.transpose(np.dot(self, np.transpose(old_latt)))\n        def_struct.lattice = Lattice(new_latt)\n        return def_struct", "docstring": "Apply the deformation gradient to a structure.\n\nArgs:\nstructure (Structure object): the structure object to\nbe modified by the deformation", "source": "juraj-google-style"}
{"code": "def _call_api(self, verb, url, **request_kwargs):\n    api = 'https:\n    auth_headers = {'Authorization': 'token {}'.format(self.api_token)}\n    headers = {**auth_headers, **request_kwargs.pop('headers', {})}\n    return getattr(requests, verb)(api, headers=headers, **request_kwargs)", "docstring": "Perform a github API call\n\nArgs:\nverb (str): Can be \"post\", \"put\", or \"get\"\nurl (str): The base URL with a leading slash for Github API (v3)\nauth (str or HTTPBasicAuth): A Github API token or a HTTPBasicAuth object", "source": "codesearchnet"}
{"code": "def prepare_question_encoder(inputs, hparams):\n  \n  encoder_input = inputs\n  \n  encoder_padding = common_attention.embedding_to_padding(encoder_input)\n  ignore_padding = common_attention.attention_bias_ignore_padding(\n      encoder_padding)\n  encoder_self_attention_bias = ignore_padding\n  if hparams.pos == \"timing\":\n    encoder_input = common_attention.add_timing_signal_1d(encoder_input)\n  elif hparams.pos == \"emb\":\n    encoder_input = common_attention.add_positional_embedding(\n        encoder_input, hparams.max_length, \"inputs_positional_embedding\",\n        None)\n  return (encoder_input, encoder_self_attention_bias)", "docstring": "Prepare question encoder.\n\nArgs:\ninputs: a Tensor.\nhparams: run hyperparameters\n\nReturns:\nencoder_input: a Tensor, bottom of encoder stack\nencoder_self_attention_bias: a bias tensor for use in encoder self-attention", "source": "juraj-google-style"}
{"code": "def ExtractEvents(\n      self, parser_mediator, registry_key, codepage='cp1252', **kwargs):\n    \n    self._ParseMRUListExKey(parser_mediator, registry_key, codepage=codepage)\n\n    if registry_key.name == 'RecentDocs':\n      \n      \n      for subkey in registry_key.GetSubkeys():\n        self._ParseMRUListExKey(parser_mediator, subkey, codepage=codepage)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\ncodepage (Optional[str]): extended ASCII string codepage.", "source": "juraj-google-style"}
{"code": "def easeOutBounce(n):\n    _checkRange(n)\n    if (n < (1 / 2.75)):\n        return ((7.5625 * n) * n)\n    elif (n < (2 / 2.75)):\n        n -= (1.5 / 2.75)\n        return (((7.5625 * n) * n) + 0.75)\n    elif (n < (2.5 / 2.75)):\n        n -= (2.25 / 2.75)\n        return (((7.5625 * n) * n) + 0.9375)\n    else:\n        n -= (2.65 / 2.75)\n        return (((7.5625 * n) * n) + 0.984375)", "docstring": "A bouncing tween function that hits the destination and then bounces to rest.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "codesearchnet"}
{"code": "def _load_certificate(location):\n    if location.startswith('https:\n        _log.info('Downloading x509 certificate from %s', location)\n        with requests.Session() as session:\n            session.mount('https:\n            response = session.get(location, timeout=30)\n            response.raise_for_status()\n            return response.text\n    else:\n        _log.info('Loading local x509 certificate from %s', location)\n        with open(location, 'rb') as fd:\n            return fd.read().decode('ascii')", "docstring": "Load a certificate from the given location.\n\nArgs:\nlocation (str): The location to load. This can either be an HTTPS URL or an absolute file\npath. This is intended to be used with PEM-encoded certificates and therefore assumes\nASCII encoding.\n\nReturns:\nstr: The PEM-encoded certificate as a unicode string.\n\nRaises:\nrequests.exception.RequestException: Any exception requests could raise.\nIOError: If the location provided could not be opened and read.", "source": "codesearchnet"}
{"code": "def _schedule_shards(cls, spec, readers, queue_name, base_path, mr_state):\n    shard_states = []\n    for (shard_number, input_reader) in enumerate(readers):\n        shard_state = model.ShardState.create_new(spec.mapreduce_id, shard_number)\n        shard_state.shard_description = str(input_reader)\n        shard_states.append(shard_state)\n    existing_shard_states = db.get((shard.key() for shard in shard_states))\n    existing_shard_keys = set((shard.key() for shard in existing_shard_states if (shard is not None)))\n    db.put((shard for shard in shard_states if (shard.key() not in existing_shard_keys)), config=util.create_datastore_write_config(spec))\n    writer_class = spec.mapper.output_writer_class()\n    writers = ([None] * len(readers))\n    if writer_class:\n        for (shard_number, shard_state) in enumerate(shard_states):\n            writers[shard_number] = writer_class.create(mr_state.mapreduce_spec, shard_state.shard_number, (shard_state.retries + 1), mr_state.writer_state)\n    for (shard_number, (input_reader, output_writer)) in enumerate(zip(readers, writers)):\n        shard_id = model.ShardState.shard_id_from_number(spec.mapreduce_id, shard_number)\n        task = MapperWorkerCallbackHandler._state_to_task(model.TransientShardState(base_path, spec, shard_id, 0, input_reader, input_reader, output_writer=output_writer, handler=spec.mapper.handler), shard_states[shard_number])\n        MapperWorkerCallbackHandler._add_task(task, spec, queue_name)", "docstring": "Prepares shard states and schedules their execution.\n\nEven though this method does not schedule shard task and save shard state\ntransactionally, it's safe for taskqueue to retry this logic because\nthe initial shard_state for each shard is the same from any retry.\nThis is an important yet reasonable assumption on model.ShardState.\n\nArgs:\nspec: mapreduce specification as MapreduceSpec.\nreaders: list of InputReaders describing shard splits.\nqueue_name: The queue to run this job on.\nbase_path: The base url path of mapreduce callbacks.\nmr_state: The MapReduceState of current job.", "source": "codesearchnet"}
{"code": "def compare_jsone_task_definition(parent_link, rebuilt_definitions):\n    diffs = []\n    for compare_definition in rebuilt_definitions['tasks']:\n        if ('taskId' in compare_definition):\n            del compare_definition['taskId']\n        compare_definition = remove_empty_keys(compare_definition)\n        runtime_definition = remove_empty_keys(parent_link.task)\n        diff = list(dictdiffer.diff(compare_definition, runtime_definition))\n        if diff:\n            diffs.append(pprint.pformat(diff))\n            continue\n        log.info('{}: Good.'.format(parent_link.name))\n        break\n    else:\n        error_msg = \"{} {}: the runtime task doesn't match any rebuilt definition!\\n{}\".format(parent_link.name, parent_link.task_id, pprint.pformat(diffs))\n        log.critical(error_msg)\n        raise CoTError(error_msg)", "docstring": "Compare the json-e rebuilt task definition vs the runtime definition.\n\nArgs:\nparent_link (LinkOfTrust): the parent link to test.\nrebuilt_definitions (dict): the rebuilt task definitions.\n\nRaises:\nCoTError: on failure.", "source": "codesearchnet"}
{"code": "def multisorted(items, *keys):\n    \n    if len(keys) == 0:\n        keys = [asc()]\n    for key in reversed(keys):\n        items = sorted(items, key=key.func, reverse=key.reverse)\n    return items", "docstring": "Sort by multiple attributes.\n\nArgs:\nitems: An iterable series to be sorted.\n*keys: Key objects which extract key values from the items.\nThe first key will be the most significant, and the\nlast key the least significant. If no key functions\nare provided, the items will be sorted in ascending\nnatural order.\nReturns:\nA list of items sorted according to keys.", "source": "juraj-google-style"}
{"code": "def process(self, elem: t.Any):\n    pass", "docstring": "Process is the operation that will be rate limited.\n\nResults will be yielded each time time the process method is called.\n\nArgs:\nelem: The individual element to process.\n\nReturns:\nOutput can be anything, output will be the output of the RateLimit\nPTransform.", "source": "github-repos"}
{"code": "def convert_videos_to_summaries(input_videos, output_videos, target_videos, tag, decode_hparams, display_ground_truth=False):\n    fps = decode_hparams.frames_per_second\n    border_percent = decode_hparams.border_percent\n    max_outputs = decode_hparams.max_display_outputs\n    target_steps = target_videos.shape[1]\n    all_summaries = []\n    input_videos = create_border(input_videos, color='blue', border_percent=border_percent)\n    target_videos = create_border(target_videos, color='red', border_percent=border_percent)\n    output_videos = create_border(output_videos, color='red', border_percent=border_percent)\n    all_input = np.concatenate((input_videos, target_videos), axis=1)\n    all_output = np.concatenate((input_videos, output_videos), axis=1)\n    (output_summ_vals, _) = common_video.py_gif_summary(('%s/output' % tag), all_output, max_outputs=max_outputs, fps=fps, return_summary_value=True)\n    all_summaries.extend(output_summ_vals)\n    if display_ground_truth:\n        (input_summ_vals, _) = common_video.py_gif_summary(('%s/input' % tag), all_input, max_outputs=max_outputs, fps=fps, return_summary_value=True)\n        all_summaries.extend(input_summ_vals)\n    iterable = zip(output_videos[(:max_outputs, :target_steps)], target_videos[:max_outputs])\n    for (ind, (input_video, output_video)) in enumerate(iterable):\n        (t, h, w, c) = input_video.shape\n        input_frames = np.reshape(input_video, ((t * h), w, c))\n        output_frames = np.reshape(output_video, ((t * h), w, c))\n        all_frames = np.concatenate((input_frames, output_frames), axis=1)\n        tag = ('input/output/%s_sample_%d' % (tag, ind))\n        frame_by_frame_summ = image_utils.image_to_tf_summary_value(all_frames, tag=tag)\n        all_summaries.append(frame_by_frame_summ)\n    return all_summaries", "docstring": "Converts input, output and target videos into video summaries.\n\nArgs:\ninput_videos: 5-D NumPy array, (NTHWC) conditioning frames.\noutput_videos: 5-D NumPy array, (NTHWC) model predictions.\ntarget_videos: 5-D NumPy array, (NTHWC) target frames.\ntag: tf summary tag.\ndecode_hparams: HParams.\ndisplay_ground_truth: Whether or not to display ground truth videos.\nReturns:\nsummaries: a list of tf frame-by-frame and video summaries.", "source": "codesearchnet"}
{"code": "def find(self, username):\n    filter = ['(uid={})'.format(username)]\n    results = self.client.search(filter)\n    if (len(results) < 1):\n        raise ldap_tools.exceptions.NoUserFound('User ({}) not found'.format(username))\n        return\n    elif (len(results) > 1):\n        raise ldap_tools.exceptions.TooManyResults('Multiple users found. Please narrow your search.')\n        return\n    else:\n        return results", "docstring": "Find user with given username.\n\nArgs:\nusername Username of the user to search for\n\nRaises:\nldap_tools.exceptions.NoUserFound: No users returned by LDAP\nldap_tools.exceptions.TooManyResults:\nMultiple users returned by LDAP", "source": "codesearchnet"}
{"code": "def _prepare_summary_table(rows):\n    if (not rows):\n        return []\n    key_field = 'job-name'\n    if (key_field not in rows[0]):\n        key_field = 'job-id'\n    grouped = collections.defaultdict((lambda : collections.defaultdict((lambda : []))))\n    for row in rows:\n        grouped[row.get(key_field, '')][row.get('status', '')] += [row]\n    new_rows = []\n    for job_key in sorted(grouped.keys()):\n        group = grouped.get(job_key, None)\n        canonical_status = ['RUNNING', 'SUCCESS', 'FAILURE', 'CANCEL']\n        for status in (canonical_status + sorted(group.keys())):\n            if (status not in group):\n                continue\n            task_count = len(group[status])\n            del group[status]\n            if task_count:\n                summary_row = collections.OrderedDict()\n                summary_row[key_field] = job_key\n                summary_row['status'] = status\n                summary_row['task-count'] = task_count\n                new_rows.append(summary_row)\n    return new_rows", "docstring": "Create a new table that is a summary of the input rows.\n\nAll with the same (job-name or job-id, status) go together.\n\nArgs:\nrows: the input rows, a list of dictionaries.\nReturns:\nA new row set of summary information.", "source": "codesearchnet"}
{"code": "def on_test_begin(self, logs=None):\n    logs = self._process_logs(logs)\n    for callback in self.callbacks:\n        callback.on_test_begin(logs)", "docstring": "Calls the `on_test_begin` methods of its callbacks.\n\nArgs:\nlogs: Dict. Currently no data is passed to this argument for this method\nbut that may change in the future.", "source": "github-repos"}
{"code": "class BaseModelOutputWithCLSToken(ModelOutput):\n    last_hidden_state: Optional[torch.FloatTensor] = None\n    cls_token_value: Optional[torch.FloatTensor] = None\n    hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None", "docstring": "Base class for model's outputs, with potential hidden states and attentions.\n\nArgs:\nlast_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\nSequence of hidden-states at the output of the last layer of the model.\ncls_token_value (`torch.FloatTensor` of shape `(batch_size, 1, hidden_size)`):\nClassification token at the output of the last layer of the model.\nhidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\nTuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\nshape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer\nplus the initial embedding outputs.", "source": "github-repos"}
{"code": "def __init__(self, session_root, watch_fn=None, thread_name_filter=None):\n    self._session_root = session_root\n    self._watch_fn = watch_fn\n    self._thread_name_filter = thread_name_filter\n    self._session_wrapper = None", "docstring": "Create a local debugger command-line interface (CLI) hook.\n\nArgs:\nsession_root: See doc of\n`dumping_wrapper.DumpingDebugWrapperSession.__init__`.\nwatch_fn: See doc of\n`dumping_wrapper.DumpingDebugWrapperSession.__init__`.\nthread_name_filter: Regular-expression white list for threads on which the\nwrapper session will be active. See doc of `BaseDebugWrapperSession` for\nmore details.", "source": "github-repos"}
{"code": "def rename(self, name):\n        \n        return self.client.api.rename(self.id, name)", "docstring": "Rename this container. Similar to the ``docker rename`` command.\n\nArgs:\nname (str): New name for the container\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "async def _get_person_json(self, id_, url_params=None):\n        \n        url = self.url_builder(\n            'person/{person_id}',\n            dict(person_id=id_),\n            url_params=url_params or OrderedDict(),\n        )\n        data = await self.get_data(url)\n        return data", "docstring": "Retrieve raw person JSON by ID.\n\nArguments:\nid_ (:py:class:`int`): The person's TMDb ID.\nurl_params (:py:class:`dict`): Any additional URL parameters.\n\nReturns:\n:py:class:`dict`: The JSON data.", "source": "juraj-google-style"}
{"code": "def get_evaluation_parameter(self, parameter_name, default_value=None):\n    if (('evaluation_parameters' in self._expectations_config) and (parameter_name in self._expectations_config['evaluation_parameters'])):\n        return self._expectations_config['evaluation_parameters'][parameter_name]\n    else:\n        return default_value", "docstring": "Get an evaluation parameter value that has been stored in meta.\n\nArgs:\nparameter_name (string): The name of the parameter to store.\ndefault_value (any): The default value to be returned if the parameter is not found.\n\nReturns:\nThe current value of the evaluation parameter.", "source": "codesearchnet"}
{"code": "def _send_data(self, data, start_offset, file_len):\n    headers = {}\n    end_offset = ((start_offset + len(data)) - 1)\n    if data:\n        headers['content-range'] = ('bytes %d-%d/%s' % (start_offset, end_offset, file_len))\n    else:\n        headers['content-range'] = ('bytes */%s' % file_len)\n    (status, response_headers, content) = self._api.put_object(self._path_with_token, payload=data, headers=headers)\n    if (file_len == '*'):\n        expected = 308\n    else:\n        expected = 200\n    errors.check_status(status, [expected], self._path, headers, response_headers, content, {'upload_path': self._path_with_token})", "docstring": "Send the block to the storage service.\n\nThis is a utility method that does not modify self.\n\nArgs:\ndata: data to send in str.\nstart_offset: start offset of the data in relation to the file.\nfile_len: an int if this is the last data to append to the file.\nOtherwise '*'.", "source": "codesearchnet"}
{"code": "def all_v2_summary_ops():\n    if context.executing_eagerly():\n        return None\n    return ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION)", "docstring": "Returns all V2-style summary ops defined in the current default graph.\n\nThis includes ops from TF 2.0 tf.summary and TF 1.x tf.contrib.summary (except\nfor `tf.contrib.summary.graph` and `tf.contrib.summary.import_event`), but\ndoes *not* include TF 1.x tf.summary ops.\n\nReturns:\nList of summary ops, or None if called under eager execution.", "source": "github-repos"}
{"code": "def build_inception_graph(self):\n    image_str_tensor = tf.placeholder(tf.string, shape=[None])\n    image = tf.map_fn(_util.decode_and_resize, image_str_tensor, back_prop=False, dtype=tf.uint8)\n    image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n    image = tf.subtract(image, 0.5)\n    image = tf.multiply(image, 2.0)\n    with slim.arg_scope(_inceptionlib.inception_v3_arg_scope()):\n        (_, end_points) = _inceptionlib.inception_v3(image, is_training=False)\n    inception_embeddings = end_points['PreLogits']\n    inception_embeddings = tf.squeeze(inception_embeddings, [1, 2], name='SpatialSqueeze')\n    return (image_str_tensor, inception_embeddings)", "docstring": "Builds an inception graph and add the necessary input & output tensors.\n\nTo use other Inception models modify this file. Also preprocessing must be\nmodified accordingly.\n\nSee tensorflow/contrib/slim/python/slim/nets/inception_v3.py for\ndetails about InceptionV3.\n\nReturns:\ninput_jpeg: A placeholder for jpeg string batch that allows feeding the\nInception layer with image bytes for prediction.\ninception_embeddings: The embeddings tensor.", "source": "codesearchnet"}
{"code": "def set_dataset_date(self, dataset_date, dataset_end_date=None, date_format=None):\n        \n        \n        parsed_date = self._parse_date(dataset_date, date_format)\n        if dataset_end_date is None:\n            self.set_dataset_date_from_datetime(parsed_date)\n        else:\n            parsed_end_date = self._parse_date(dataset_end_date, date_format)\n            self.set_dataset_date_from_datetime(parsed_date, parsed_end_date)", "docstring": "Set dataset date from string using specified format. If no format is supplied, the function will guess.\nFor unambiguous formats, this should be fine.\n\nArgs:\ndataset_date (str): Dataset date string\ndataset_end_date (Optional[str]): Dataset end date string\ndate_format (Optional[str]): Date format. If None is given, will attempt to guess. Defaults to None.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def add(self, payload=None):\n        \n        try:\n            db = self._client[self.database]\n            col = db[WORKFLOW_DATA_COLLECTION_NAME]\n            return str(col.insert_one({\n                DataStoreDocumentSection.Meta:\n                    payload if isinstance(payload, dict) else {},\n                DataStoreDocumentSection.Data: {}\n            }).inserted_id)\n\n        except ConnectionFailure:\n            raise DataStoreNotConnected()", "docstring": "Adds a new document to the data store and returns its id.\n\nArgs:\npayload (dict): Dictionary of initial data that should be stored\nin the new document in the meta section.\n\nRaises:\nDataStoreNotConnected: If the data store is not connected to the server.\n\nReturns:\nstr: The id of the newly created document.", "source": "juraj-google-style"}
{"code": "def _countIdentities(self, nodes):\n    return len([x for x in nodes if x.op == 'Identity'])", "docstring": "Count the number of \"Identity\" op types in the list of proto nodes.\n\nArgs:\nnodes: NodeDefs of the graph.\n\nReturns:\nThe number of nodes with op type \"Identity\" found.", "source": "github-repos"}
{"code": "def add_comment(self, app_id, record_id, field_id, message):\n    self._swimlane.request('post', 'app/{0}/record/{1}/{2}/comment'.format(app_id, record_id, field_id), json={'message': message, 'createdDate': pendulum.now().to_rfc3339_string()})", "docstring": "Directly add a comment to a record without retrieving the app or record first\n\nWarnings:\nDoes not perform any app, record, or field ID validation\n\nArgs:\napp_id (str): Full App ID string\nrecord_id (str): Full parent Record ID string\nfield_id (str): Full field ID to target reference field on parent Record string\nmessage (str): New comment message body", "source": "codesearchnet"}
{"code": "def load_user_config(vcs):\n    config_path = os.path.join(vcs.path, 'eci.yaml')\n    if (not os.path.exists(config_path)):\n        raise ConfigNotFoundError\n    with open(config_path, 'r') as f:\n        try:\n            config = yaml.safe_load(f)\n        except yaml.YAMLError:\n            raise ConfigFormatError\n    if (not isinstance(config, dict)):\n        raise ConfigFormatError\n    for (k, v) in _default_config.iteritems():\n        config.setdefault(k, v)\n    for (k, v) in _config_types.iteritems():\n        if (not isinstance(config[k], v)):\n            raise ConfigFormatError\n    return config", "docstring": "Load the user config\n\nArgs:\nvcs (easyci.vcs.base.Vcs) - the vcs object for the current project\n\nReturns:\ndict - the config\n\nRaises:\nConfigFormatError\nConfigNotFoundError", "source": "codesearchnet"}
{"code": "def should_collapse(self, value: Any, name: Optional[str], root_path: KeyPath, parent: Any, collapse_level: Optional[int]=1, uncollapse: Union[KeyPathSet, base.NodeFilter]=None) -> bool:\n    if collapse_level is None or collapse_level > 0:\n        return False\n    if callable(uncollapse):\n        return not uncollapse(root_path, value, parent)\n    if root_path in uncollapse:\n        return False\n    if name is not None and isinstance(value, (bool, int, float, str, type(None))):\n        return False\n    return True", "docstring": "Returns True if the object should be collapsed.\n\nArgs:\nvalue: The value to render.\nname: The referred field name of the value.\nroot_path: The root path of the value.\nparent: The parent of the value.\ncollapse_level: The level of collapsing. If 0, the object will be\ncollapsed (without showing its sub-nodes). If 1, the immediate sub-nodes\nwill be shown in collapsed form. If None, all sub-tree will be shown.\nuncollapse: Indivdual nodes to uncollapse. It can be a KeyPathSet or a\nfunction that takes (root_path, value, parent) and returns a KeyPathSet.\n\nReturns:\nTrue if the object should be collapsed.", "source": "github-repos"}
{"code": "def _peer_get_bfd(self, tx, rx, multiplier):\n    tx = self._callback(tx, handler='get_config')\n    rx = self._callback(rx, handler='get_config')\n    multiplier = self._callback(multiplier, handler='get_config')\n    tx = pynos.utilities.return_xml(str(tx))\n    rx = pynos.utilities.return_xml(str(rx))\n    multiplier = pynos.utilities.return_xml(str(multiplier))\n    config = pynos.utilities.merge_xml(tx, rx)\n    return pynos.utilities.merge_xml(config, multiplier)", "docstring": "Get and merge the `bfd` config from global BGP.\n\nYou should not use this method.\nYou probably want `BGP.bfd`.\n\nArgs:\ntx: XML document with the XML to get the transmit interval.\nrx: XML document with the XML to get the receive interval.\nmultiplier: XML document with the XML to get the interval\nmultiplier.\n\nReturns:\nMerged XML document.\n\nRaises:\nNone", "source": "codesearchnet"}
{"code": "def adjoint(self, name: str='adjoint') -> 'LinearOperator':\n    if self.is_self_adjoint is True:\n        return self\n    with self._name_scope(name):\n        return self._linop_adjoint()", "docstring": "Returns the adjoint of the current `LinearOperator`.\n\nGiven `A` representing this `LinearOperator`, return `A*`.\nNote that calling `self.adjoint()` and `self.H` are equivalent.\n\nArgs:\nname:  A name for this `Op`.\n\nReturns:\n`LinearOperator` which represents the adjoint of this `LinearOperator`.", "source": "github-repos"}
{"code": "def slice_inputs(indices_dataset, inputs):\n    inputs = array_slicing.convert_to_sliceable(self._inputs, target_backend='tensorflow')\n    inputs = tree.lists_to_tuples(inputs)\n    dataset = tf.data.Dataset.zip((indices_dataset, tf.data.Dataset.from_tensors(inputs).repeat()))\n\n    def grab_batch(i, data):\n\n        def grab_one(x):\n            if isinstance(x, array_slicing.TensorflowSparseWrapper):\n                return array_slicing.slice_tensorflow_sparse_wrapper(x, i)\n            if isinstance(x, (list, tuple, dict)):\n                return None\n            if tf.is_tensor(x):\n                return tf.gather(x, i, axis=0)\n            return x\n        return tree.traverse(grab_one, data)\n    dataset = dataset.map(grab_batch, num_parallel_calls=tf.data.AUTOTUNE)\n    options = tf.data.Options()\n    options.experimental_optimization.apply_default_optimizations = False\n    if self._shuffle:\n        options.experimental_external_state_policy = tf.data.experimental.ExternalStatePolicy.IGNORE\n    dataset = dataset.with_options(options)\n    return dataset", "docstring": "Slice inputs into a Dataset of batches.\n\nGiven a Dataset of batch indices and the unsliced inputs,\nthis step slices the inputs in a parallelized fashion\nand produces a dataset of input batches.\n\nArgs:\nindices_dataset: A Dataset of batched indices.\ninputs: A python data structure that contains the inputs,\ntargets, and possibly sample weights.\n\nReturns:\nA Dataset of input batches matching the batch indices.", "source": "github-repos"}
{"code": "def _make_signature_checker(api_signature, signature):\n    if not (isinstance(signature, dict) and all((isinstance(k, (str, int)) for k in signature))):\n        raise TypeError('signatures must be dictionaries mapping parameter names to type annotations.')\n    checkers = []\n    param_names = list(api_signature.parameters)\n    for param_name, param_type in signature.items():\n        if isinstance(param_name, int) and param_name < len(api_signature.parameters):\n            param_name = list(api_signature.parameters.values())[param_name].name\n        param = api_signature.parameters.get(param_name, None)\n        if param is None:\n            raise ValueError(f'signature includes annotation for unknown parameter {param_name!r}.')\n        if param.kind not in (tf_inspect.Parameter.POSITIONAL_ONLY, tf_inspect.Parameter.POSITIONAL_OR_KEYWORD):\n            raise ValueError(f\"Dispatch currently only supports type annotations for positional parameters; can't handle annotation for {param.kind!r} parameter {param_name}.\")\n        checker = make_type_checker(param_type)\n        index = param_names.index(param_name)\n        checkers.append((index, checker))\n    return _api_dispatcher.PySignatureChecker(checkers)", "docstring": "Builds a PySignatureChecker for the given type signature.\n\nArgs:\napi_signature: The `inspect.Signature` of the API whose signature is\nbeing checked.\nsignature: Dictionary mapping parameter names to type annotations.\n\nReturns:\nA `PySignatureChecker`.", "source": "github-repos"}
{"code": "def generate_surface_vectors(self, film_millers, substrate_millers):\n        \n        vector_sets = []\n\n        for f in film_millers:\n            film_slab = SlabGenerator(self.film, f, 20, 15,\n                                      primitive=False).get_slab()\n            film_vectors = reduce_vectors(film_slab.lattice.matrix[0],\n                                          film_slab.lattice.matrix[1])\n\n            for s in substrate_millers:\n                substrate_slab = SlabGenerator(self.substrate, s, 20, 15,\n                                               primitive=False).get_slab()\n                substrate_vectors = reduce_vectors(\n                    substrate_slab.lattice.matrix[0],\n                    substrate_slab.lattice.matrix[1])\n\n                vector_sets.append((film_vectors, substrate_vectors, f, s))\n\n        return vector_sets", "docstring": "Generates the film/substrate slab combinations for a set of given\nmiller indicies\n\nArgs:\nfilm_millers(array): all miller indices to generate slabs for\nfilm\nsubstrate_millers(array): all miller indicies to generate slabs\nfor substrate", "source": "juraj-google-style"}
{"code": "def convert_to_dataframe(ds: xr.Dataset) -> pd.DataFrame:\n    if len(ds.coords):\n        df = ds.to_dataframe().reset_index()\n    else:\n        ds = ds.compute().to_dict(data='list')\n        df = pd.DataFrame({k: [v['data']] for k, v in ds['data_vars'].items()})\n    return df", "docstring": "Convert xarray Dataset to pandas DataFrame.\n\nArgs:\nds (xr.Dataset): xarray Dataset to be converted.\n\nReturns:\npd.DataFrame: Pandas DataFrame containing the data from the xarray Dataset.", "source": "github-repos"}
{"code": "def RegisterHelper(cls, resolver_helper):\n    if (resolver_helper.type_indicator in cls._resolver_helpers):\n        raise KeyError('Resolver helper object already set for type indicator: {0!s}.'.format(resolver_helper.type_indicator))\n    cls._resolver_helpers[resolver_helper.type_indicator] = resolver_helper", "docstring": "Registers a path specification resolver helper.\n\nArgs:\nresolver_helper (ResolverHelper): resolver helper.\n\nRaises:\nKeyError: if resolver helper object is already set for the corresponding\ntype indicator.", "source": "codesearchnet"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    if token_ids_1 is None:\n        return [1] + [0] * len(token_ids_0) + [1]\n    return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1]", "docstring": "Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` methods.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of ids.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nSet to True if the token list is already formatted with special tokens for the model\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def convert_structure_to_signature(structure, arg_names=None, signature_context=None):\n\n    def encode_arg(arg, path):\n        \n        if isinstance(arg, tensor_lib.Tensor):\n            user_specified_name = None\n            try:\n                user_specified_name = compat.as_str(arg.op.get_attr('_user_specified_name'))\n            except (ValueError, AttributeError):\n                pass\n            if path and user_specified_name and (user_specified_name != path[0]):\n                name = user_specified_name\n            else:\n                name = tensor_lib.sanitize_spec_name('_'.join((str(p) for p in path)))\n            return tensor_lib.TensorSpec(arg.shape, arg.dtype, name)\n        if isinstance(arg, resource_variable_ops.ResourceVariable):\n            return trace_type.from_value(arg, signature_context)\n        if isinstance(arg, composite_tensor.CompositeTensor):\n            return arg._type_spec\n        if isinstance(arg, (int, float, bool, str, type(None), dtypes.DType, tensor_lib.TensorSpec, type_spec.TypeSpec)):\n            return arg\n        return UnknownArgument()\n    flattened = nest.flatten_with_tuple_paths(structure)\n    if arg_names:\n        if len(arg_names) != len(structure):\n            raise ValueError(\"Passed in arg_names don't match actual signature (%s).\" % arg_names)\n        flattened = [((arg_names[path[0]],) + path[1:], arg) for path, arg in flattened]\n    mapped = [encode_arg(arg, path) for path, arg in flattened]\n    return nest.pack_sequence_as(structure, mapped)", "docstring": "Convert a potentially nested structure to a signature.\n\nArgs:\nstructure: Structure to convert, where top level collection is a list or a\ntuple.\narg_names: Optional list of arguments that has equal number of elements as\n`structure` and is used for naming corresponding TensorSpecs.\nsignature_context: TraceType InternalTracingContext to generate alias_ids\nfor mutable objects, like ResourceVariables.\n\nReturns:\nIdentical structure that has TensorSpec objects instead of Tensors and\nUnknownArgument instead of any unsupported types.", "source": "github-repos"}
{"code": "def _extract_token_timestamps(self, generate_outputs, alignment_heads, time_precision=0.02, num_frames=None, num_input_ids=None):\n    cross_attentions = []\n    for i in range(self.config.decoder_layers):\n        cross_attentions.append(torch.cat([x[i] for x in generate_outputs.cross_attentions], dim=2))\n    weights = torch.stack([cross_attentions[l][:, h] for l, h in alignment_heads])\n    weights = weights.permute([1, 0, 2, 3])\n    weight_length = None\n    if 'beam_indices' in generate_outputs:\n        weight_length = (generate_outputs.beam_indices != -1).sum(-1).max()\n        beam_indices = generate_outputs.beam_indices[:, :weight_length]\n        if num_input_ids is not None and num_input_ids > 1:\n            weight_length += num_input_ids - 1\n            beam_indices_first_step_unrolled = torch.ones(beam_indices.shape[0], num_input_ids - 1, device=beam_indices.device, dtype=torch.long) * beam_indices[:, 0:1]\n            unrolled_beam_indices = torch.cat([beam_indices_first_step_unrolled, beam_indices], dim=-1)\n        else:\n            unrolled_beam_indices = beam_indices\n        unrolled_beam_indices = unrolled_beam_indices.masked_fill(unrolled_beam_indices == -1, 0)\n        weights = torch.stack([torch.index_select(weights[:, :, i, :], dim=0, index=unrolled_beam_indices[:, i]) for i in range(unrolled_beam_indices.shape[1])], dim=2)\n    input_length = weight_length or cross_attentions[0].shape[2]\n    batch_size = generate_outputs.sequences.shape[0]\n    timestamps = torch.zeros((batch_size, input_length + 1), dtype=torch.float32, device=generate_outputs.sequences.device)\n    if num_frames is not None:\n        if isinstance(num_frames, int):\n            weights = weights[..., :num_frames \n        elif isinstance(num_frames, (list, tuple, np.ndarray)) and len(np.unique(num_frames)) == 1:\n            weights = weights[..., :num_frames[0] \n        elif isinstance(num_frames, torch.Tensor) and len(torch.unique(num_frames)) == 1:\n            weights = weights[..., :num_frames[0] \n        else:\n            repeat_time = batch_size if isinstance(num_frames, int) else batch_size \n            num_frames = num_frames.cpu() if isinstance(num_frames, torch.Tensor) else num_frames\n            num_frames = np.repeat(num_frames, repeat_time)\n    if num_frames is None or isinstance(num_frames, int):\n        std = torch.std(weights, dim=-2, keepdim=True, unbiased=False)\n        mean = torch.mean(weights, dim=-2, keepdim=True)\n        weights = (weights - mean) / std\n        weights = _median_filter(weights, self.config.median_filter_width)\n        weights = weights.mean(dim=1)\n    for batch_idx in range(batch_size):\n        if num_frames is not None and isinstance(num_frames, (tuple, list, np.ndarray, torch.Tensor)):\n            matrix = weights[batch_idx, ..., :num_frames[batch_idx] \n            std = torch.std(matrix, dim=-2, keepdim=True, unbiased=False)\n            mean = torch.mean(matrix, dim=-2, keepdim=True)\n            matrix = (matrix - mean) / std\n            matrix = _median_filter(matrix, self.config.median_filter_width)\n            matrix = matrix.mean(dim=0)\n        else:\n            matrix = weights[batch_idx]\n        text_indices, time_indices = _dynamic_time_warping(-matrix.cpu().double().numpy())\n        jumps = np.pad(np.diff(text_indices), (1, 0), constant_values=1).astype(bool)\n        jump_times = time_indices[jumps] * time_precision\n        timestamps[batch_idx, 1:] = torch.tensor(jump_times)\n    return timestamps", "docstring": "Calculates token-level timestamps using the encoder-decoder cross-attentions and dynamic time-warping (DTW) to\nmap each output token to a position in the input audio. If `num_frames` is specified, the encoder-decoder\ncross-attentions will be cropped before applying DTW.\n\nReturns:\ntensor containing the timestamps in seconds for each predicted token", "source": "github-repos"}
{"code": "def run(self, input_dir, output_file_path):\n    \n    logging.info('Running defense %s', self.submission_id)\n    tmp_run_dir = self.temp_copy_extracted_submission()\n    output_dir = os.path.dirname(output_file_path)\n    output_filename = os.path.basename(output_file_path)\n    cmd = ['--network=none',\n           '-m=24g',\n           '--cpus=3.75',\n           '-v', '{0}:/input_images:ro'.format(input_dir),\n           '-v', '{0}:/output_data'.format(output_dir),\n           '-v', '{0}:/code'.format(tmp_run_dir),\n           '-w', '/code',\n           self.container_name,\n           './' + self.entry_point,\n           '/input_images',\n           '/output_data/' + output_filename]\n    elapsed_time_sec = self.run_with_time_limit(cmd)\n    sudo_remove_dirtree(tmp_run_dir)\n    return elapsed_time_sec", "docstring": "Runs defense inside Docker.\n\nArgs:\ninput_dir: directory with input (adversarial images).\noutput_file_path: path of the output file.\n\nReturns:\nhow long it took to run submission in seconds", "source": "juraj-google-style"}
{"code": "def validate(cls, job_config):\n    \n    if job_config.output_writer_cls != cls:\n      raise errors.BadWriterParamsError(\n          \"Expect output writer class %r, got %r.\" %\n          (cls, job_config.output_writer_cls))", "docstring": "Validates relevant parameters.\n\nThis method can validate fields which it deems relevant.\n\nArgs:\njob_config: an instance of map_job.JobConfig.\n\nRaises:\nerrors.BadWriterParamsError: required parameters are missing or invalid.", "source": "juraj-google-style"}
{"code": "def load_info(cat):\n    res = _load_yaml_(f'{PKG_PATH}/markets/{cat}.yml')\n    root = os.environ.get('BBG_ROOT', '').replace('\\\\', '/')\n    if (not root):\n        return res\n    for (cat, ovrd) in _load_yaml_(f'{root}/markets/{cat}.yml').items():\n        if isinstance(ovrd, dict):\n            if (cat in res):\n                res[cat].update(ovrd)\n            else:\n                res[cat] = ovrd\n        if (isinstance(ovrd, list) and isinstance(res[cat], list)):\n            res[cat] += ovrd\n    return res", "docstring": "Load parameters for assets\n\nArgs:\ncat: category\n\nReturns:\ndict\n\nExamples:\n>>> import pandas as pd\n>>>\n>>> assets = load_info(cat='assets')\n>>> all(cat in assets for cat in ['Equity', 'Index', 'Curncy', 'Corp'])\nTrue\n>>> os.environ['BBG_PATH'] = ''\n>>> exch = load_info(cat='exch')\n>>> pd.Series(exch['EquityUS']).allday\n[400, 2000]\n>>> test_root = f'{PKG_PATH}/tests'\n>>> os.environ['BBG_PATH'] = test_root\n>>> ovrd_exch = load_info(cat='exch')\n>>> # Somehow os.environ is not set properly in doctest environment\n>>> ovrd_exch.update(_load_yaml_(f'{test_root}/markets/exch.yml'))\n>>> pd.Series(ovrd_exch['EquityUS']).allday\n[300, 2100]", "source": "codesearchnet"}
{"code": "def _bind_length_scalar_handlers(tids, scalar_factory, lns=_NON_ZERO_LENGTH_LNS):\n    handler = partial(_length_scalar_handler, scalar_factory)\n    return _bind_length_handlers(tids, handler, lns)", "docstring": "Binds a set of scalar handlers for an inclusive range of low-nibble values.\n\nArgs:\ntids (Sequence[int]): The Type IDs to bind to.\nscalar_factory (Callable): The factory for the scalar parsing function.\nThis function can itself return a function representing a thunk to defer the\nscalar parsing or a direct value.\nlns (Sequence[int]): The low-nibble lengths to bind to.", "source": "codesearchnet"}
{"code": "def auth_criteria(self):\n    auth = {}\n    for attr in dir(self):\n        if (attr != 'auth_criteria'):\n            attribute = getattr(self, attr)\n            if (isinstance(attribute, Callable) and hasattr(attribute, '_service_auth')):\n                auth[getattr(self, attr)._service_auth] = attribute\n    return auth", "docstring": "This attribute provides the mapping of services to their auth requirement\n\nReturns:\n(dict) : the mapping from services to their auth requirements.", "source": "codesearchnet"}
{"code": "def _run_graph_for_calibration(float_model_dir: str, signature_keys: Sequence[str], tags: Collection[str], representative_dataset: rd.RepresentativeDatasetOrMapping, force_graph_mode_calibration: bool) -> None:\n    try:\n        _validate_representative_dataset(representative_dataset, signature_keys)\n    except Exception as ex:\n        raise ValueError('Invalid representative dataset.') from ex\n    representative_dataset_map = representative_dataset\n    if not isinstance(representative_dataset, Mapping):\n        representative_dataset_map = {signature_keys[0]: representative_dataset}\n    try:\n        if context.executing_eagerly() and (not force_graph_mode_calibration):\n            logging.info('Calibration step is executed in eager mode.')\n            _run_graph_for_calibration_eager_mode(float_model_dir, tags, representative_dataset_map)\n        else:\n            logging.info('Calibration step is executed in graph mode.')\n            _run_graph_for_calibration_graph_mode(float_model_dir, tags, representative_dataset_map)\n    except Exception as ex:\n        raise ValueError('Failed to run graph for post-training quantization calibration.') from ex\n    logging.info('Calibration step complete.')", "docstring": "Runs the graph for calibration using representative datasets.\n\nArgs:\nfloat_model_dir: Path to the model to calibrate.\nsignature_keys: Sequence of keys identifying SignatureDef containing inputs\nand outputs.\ntags: Collection of tags identifying the MetaGraphDef within the SavedModel\nto analyze.\nrepresentative_dataset: An iterator that returns a dictionary of {input_key:\ninput_value} or a mapping from signature keys to such iterators. When\n`signature_keys` contains more than one signature key,\n`representative_datsaet` should be a mapping that maps each signature keys\nto the corresponding representative dataset.\nforce_graph_mode_calibration: If set to true, it forces calibration in graph\nmodel instead of eager mode when the context is in eager mode.\n\nRaises:\nValueError iff:\n* The representative dataset format is invalid.\n* It fails to run the functions using the representative datasets.", "source": "github-repos"}
{"code": "def _validate_cluster_spec(cluster_spec, task_type, task_id):\n    allowed_task_types = ('chief', 'worker', 'evaluator', 'ps', None)\n    cluster_spec = normalize_cluster_spec(cluster_spec)\n    if any((job not in allowed_task_types for job in cluster_spec.jobs)):\n        raise ValueError('Disallowed task type found in cluster spec. Allowed types are {} and the cluster spec is {}.'.format(allowed_task_types, cluster_spec))\n    if task_type not in allowed_task_types:\n        raise ValueError('Unrecognized task_type: {}, valid task types are: {}'.format(task_type, allowed_task_types))\n    if task_type and task_type not in cluster_spec.jobs and (task_type != 'evaluator'):\n        raise ValueError('`task_type` %r not found in cluster_spec.' % task_type)\n    if task_count(cluster_spec, 'chief') > 1:\n        raise ValueError(\"There must be at most one 'chief' job.\")\n    if task_count(cluster_spec, 'evaluator') > 1:\n        raise ValueError(\"There must be at most one 'evaluator' job.\")\n    if task_type in cluster_spec.jobs and task_id >= task_count(cluster_spec, task_type):\n        raise ValueError('The `task_id` %d exceeds the maximum id of %s.' % (task_id, task_type))", "docstring": "Validates `cluster_spec`.\n\nIt checks:\n1) task type is one of \"chief\", \"worker\", \"ps\", \"evaluator\", or not provided\n(None).\n2) whether there is such a task type as `task_type` in the `cluster_spec`. The\nonly exception is `evaluator`. In other words, it is still a valid\nconfiguration when `task_type` is `evaluator` but it doesn't appear in\n`cluster_spec`.\n3) whether there is at most one \"chief\" job.\n4) whether there is at most one \"evaluator\" job.\n5) whether the `task_id` is smaller than the number of tasks for that\nparticular `task_type`.\n\nArgs:\ncluster_spec: a dict, `ClusterDef` or `ClusterSpec` object to be validated.\ntask_type: string indicating the type of the task.\ntask_id: the id of the `task_type` in this cluster.\n\nRaises:\nValueError: if `cluster_spec` fails any check.", "source": "github-repos"}
{"code": "def get_environment_details(zone, environment):\n    default_context = google.datalab.Context.default()\n    url = (Api._ENDPOINT + (Api._ENVIRONMENTS_PATH_FORMAT % (default_context.project_id, zone, environment)))\n    return google.datalab.utils.Http.request(url, credentials=default_context.credentials)", "docstring": "Issues a request to Composer to get the environment details.\n\nArgs:\nzone: GCP zone of the composer environment\nenvironment: name of the Composer environment\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "codesearchnet"}
{"code": "def __matches(s1, s2, ngrams_fn, n=3):\n    \n    ngrams1, ngrams2 = set(ngrams_fn(s1, n=n)), set(ngrams_fn(s2, n=n))\n    return ngrams1.intersection(ngrams2)", "docstring": "Returns the n-grams that match between two sequences\n\nSee also: SequenceMatcher.get_matching_blocks\n\nArgs:\ns1: a string\ns2: another string\nn: an int for the n in n-gram\n\nReturns:\nset:", "source": "juraj-google-style"}
{"code": "def roll_to_business_day(self, date_tensor, roll_convention):\n    if roll_convention == constants.BusinessDayConvention.NONE:\n        return date_tensor\n    rolled_ordinals_table = self._compute_rolled_dates_table(roll_convention)\n    ordinals_with_offset = date_tensor.ordinal() - self._ordinal_offset + 1\n    rolled_ordinals = self._gather(rolled_ordinals_table, ordinals_with_offset)\n    with tf.control_dependencies(self._assert_ordinals_in_bounds(rolled_ordinals)):\n        return dt.from_ordinals(rolled_ordinals, validate=False)", "docstring": "Rolls the given dates to business dates according to given convention.\n\nArgs:\ndate_tensor: DateTensor of dates to roll from.\nroll_convention: BusinessDayConvention. Determines how to roll a date that\nfalls on a holiday.\n\nReturns:\nThe resulting DateTensor.", "source": "github-repos"}
{"code": "def __init__(self, flow_obj, parent_runner=None, runner_args=None,\n               token=None):\n    \n    self.token = token or flow_obj.token\n    self.parent_runner = parent_runner\n\n    \n    if parent_runner is not None:\n      self.queue_manager = parent_runner.queue_manager\n    else:\n      \n      self.queue_manager = queue_manager.QueueManager(token=self.token)\n      self.queue_manager.FreezeTimestamp()\n\n    self.queued_replies = []\n\n    self.outbound_lock = threading.Lock()\n    self.flow_obj = flow_obj\n\n    \n    if runner_args is not None:\n      self.runner_args = runner_args\n      self.session_id = self.GetNewSessionID()\n      self.flow_obj.urn = self.session_id\n\n      \n      self.context = self.InitializeContext(runner_args)\n      self.flow_obj.context = self.context\n      self.context.session_id = self.session_id\n\n    else:\n      \n      \n      \n      self.context = self.flow_obj.context\n\n      self.runner_args = self.flow_obj.runner_args\n\n    \n    self.flow_obj.urn = self.session_id = self.context.session_id\n\n    \n    \n    self.sent_replies = []", "docstring": "Constructor for the Flow Runner.\n\nArgs:\nflow_obj: The flow object this runner will run states for.\nparent_runner: The parent runner of this runner.\nrunner_args: A FlowRunnerArgs() instance containing initial values. If not\nspecified, we use the runner_args from the flow_obj.\ntoken: An instance of access_control.ACLToken security token.", "source": "juraj-google-style"}
{"code": "def _PrintCheckDependencyStatus(\n      self, dependency, result, status_message, verbose_output=True):\n    \n    if not result or dependency.is_optional:\n      if dependency.is_optional:\n        status_indicator = '[OPTIONAL]'\n      else:\n        status_indicator = '[FAILURE]'\n\n      print('{0:s}\\t{1:s}'.format(status_indicator, status_message))\n\n    elif verbose_output:\n      print('[OK]\\t\\t{0:s}'.format(status_message))", "docstring": "Prints the check dependency status.\n\nArgs:\ndependency (DependencyDefinition): dependency definition.\nresult (bool): True if the Python module is available and conforms to\nthe minimum required version, False otherwise.\nstatus_message (str): status message.\nverbose_output (Optional[bool]): True if output should be verbose.", "source": "juraj-google-style"}
{"code": "def switch_to_line_in(self, source=None):\n        \n        if source:\n            uid = source.uid\n        else:\n            uid = self.uid\n\n        self.avTransport.SetAVTransportURI([\n            ('InstanceID', 0),\n            ('CurrentURI', 'x-rincon-stream:{0}'.format(uid)),\n            ('CurrentURIMetaData', '')\n        ])", "docstring": "Switch the speaker's input to line-in.\n\nArgs:\nsource (SoCo): The speaker whose line-in should be played.\nDefault is line-in from the speaker itself.", "source": "juraj-google-style"}
{"code": "def set(self, key, samples, sampling_rate):\n    if (not np.issubdtype(samples.dtype, np.floating)):\n        raise ValueError('Samples are required as np.float32!')\n    if (len(samples.shape) > 1):\n        raise ValueError('Only single channel supported!')\n    self.raise_error_if_not_open()\n    if (key in self._file):\n        del self._file[key]\n    samples = (samples * MAX_INT16_VALUE).astype(np.int16)\n    dset = self._file.create_dataset(key, data=samples)\n    dset.attrs[SAMPLING_RATE_ATTR] = sampling_rate", "docstring": "Set the samples and sampling-rate for the given key.\nExisting data will be overwritten.\nThe samples have to have ``np.float32`` datatype and values in\nthe range of -1.0 and 1.0.\n\nArgs:\nkey (str): A key to store the data for.\nsamples (numpy.ndarray): 1-D array of audio samples (np.float32).\nsampling_rate (int): The sampling-rate of the audio samples.\n\nNote:\nThe container has to be opened in advance.", "source": "codesearchnet"}
{"code": "def setValues(self, values):\n        \n        if isinstance(values, dict):\n            indices, values = list(zip(*values.items()))\n            indices = Utils.toTupleArray(indices)\n            if any(isinstance(value, basestring) for value in values):\n                values = list(map(str, values))\n                self._impl.setValuesTaStr(indices, values, len(values))\n            elif all(isinstance(value, Real) for value in values):\n                values = list(map(float, values))\n                self._impl.setValuesTaDbl(indices, values, len(values))\n            else:\n                raise TypeError\n        elif isinstance(values, (list, tuple)):\n            if any(isinstance(value, basestring) for value in values):\n                values = list(map(str, values))\n                self._impl.setValuesStr(values, len(values))\n            elif all(isinstance(value, Real) for value in values):\n                values = list(map(float, values))\n                self._impl.setValuesDbl(values, len(values))\n            else:\n                raise TypeError\n        else:\n            if np is not None and isinstance(values, np.ndarray):\n                self.setValues(DataFrame.fromNumpy(values).toList())\n                return\n            Entity.setValues(self, values)", "docstring": "Assign the values (string or float) to the parameter instances with the\nspecified indices, equivalent to the AMPL code:\n\n.. code-block:: ampl\n\nlet {i in indices} par[i] := values[i];\n\nArgs:\nvalues: list, dictionary or :class:`~amplpy.DataFrame` with the\nindices and the values to be set.\n\nRaises:\nTypeError: If called on a scalar parameter.", "source": "juraj-google-style"}
{"code": "def Print(x, data, message, **kwargs):  \n  \n  return PrintOperation(x, data, message, **kwargs).outputs[0]", "docstring": "Call tf.Print.\n\nArgs:\nx: a Tensor.\ndata: a list of Tensor\nmessage: a string\n**kwargs: keyword arguments to tf.Print\nReturns:\na Tensor which is identical in value to x", "source": "juraj-google-style"}
{"code": "def imrotate(img, angle, center=None, scale=1.0, border_value=0, auto_bound=False):\n    if ((center is not None) and auto_bound):\n        raise ValueError('`auto_bound` conflicts with `center`')\n    (h, w) = img.shape[:2]\n    if (center is None):\n        center = (((w - 1) * 0.5), ((h - 1) * 0.5))\n    assert isinstance(center, tuple)\n    matrix = cv2.getRotationMatrix2D(center, (- angle), scale)\n    if auto_bound:\n        cos = np.abs(matrix[(0, 0)])\n        sin = np.abs(matrix[(0, 1)])\n        new_w = ((h * sin) + (w * cos))\n        new_h = ((h * cos) + (w * sin))\n        matrix[(0, 2)] += ((new_w - w) * 0.5)\n        matrix[(1, 2)] += ((new_h - h) * 0.5)\n        w = int(np.round(new_w))\n        h = int(np.round(new_h))\n    rotated = cv2.warpAffine(img, matrix, (w, h), borderValue=border_value)\n    return rotated", "docstring": "Rotate an image.\n\nArgs:\nimg (ndarray): Image to be rotated.\nangle (float): Rotation angle in degrees, positive values mean\nclockwise rotation.\ncenter (tuple): Center of the rotation in the source image, by default\nit is the center of the image.\nscale (float): Isotropic scale factor.\nborder_value (int): Border value.\nauto_bound (bool): Whether to adjust the image size to cover the whole\nrotated image.\n\nReturns:\nndarray: The rotated image.", "source": "codesearchnet"}
{"code": "def memcache_get(self, key, for_cas=False, namespace=None, use_cache=False,\n                   deadline=None):\n    \n    if not isinstance(key, basestring):\n      raise TypeError('key must be a string; received %r' % key)\n    if not isinstance(for_cas, bool):\n      raise TypeError('for_cas must be a bool; received %r' % for_cas)\n    if namespace is None:\n      namespace = namespace_manager.get_namespace()\n    options = (for_cas, namespace, deadline)\n    batcher = self.memcache_get_batcher\n    if use_cache:\n      return batcher.add_once(key, options)\n    else:\n      return batcher.add(key, options)", "docstring": "An auto-batching wrapper for memcache.get() or .get_multi().\n\nArgs:\nkey: Key to set.  This must be a string; no prefix is applied.\nfor_cas: If True, request and store CAS ids on the Context.\nnamespace: Optional namespace.\ndeadline: Optional deadline for the RPC.\n\nReturns:\nA Future (!) whose return value is the value retrieved from\nmemcache, or None.", "source": "juraj-google-style"}
{"code": "def from_func_graph(name: Union[str, bytes], graph: func_graph_module.FuncGraph, attrs: Dict[str, attr_value_pb2.AttrValue], function_type: Optional[function_type_lib.FunctionType]=None, overwrite: bool=False) -> AtomicFunction:\n    if attrs and attributes_lib.IMPLEMENTS in attrs:\n        has_resource_vars = any((inp.dtype == dtypes.resource for inp in graph.inputs))\n        captured_inputs = graph.external_captures + graph.deferred_external_captures\n        assert not any((has_resource_vars, captured_inputs)), 'Function {name} has \"{attr}={value}\" attribute and thus can not depend on any tensors outside of its signature or modify variables. \\n\\nNote: variables are always captured and cause function re-tracing for every variable called.\\n  inputs: {inputs}\\n  captures: {captured}\\n\\nTo pass a variable to such function use  use variable.read_value().'.format(name=graph.name, attr=attributes_lib.IMPLEMENTS, value=attrs[attributes_lib.IMPLEMENTS], inputs=graph.inputs, captured=captured_inputs)\n    input_ops = set((arg.op for arg in graph.inputs))\n    operations = [op for op in graph.get_operations() if op not in input_ops]\n    graph_output_names = graph._output_names\n    if graph_output_names is not None and all((ops.tensor_id(t) in graph_output_names for t in graph.outputs)):\n        output_names = [compat.as_bytes(graph_output_names[ops.tensor_id(t)]) for t in graph.outputs]\n        if len(set(output_names)) != len(output_names):\n            output_names = []\n    else:\n        output_names = []\n    with graph._c_graph.get() as c_graph:\n        fn = pywrap_tf_session.TF_GraphToFunction_wrapper(c_graph, compat.as_str(name), False, [o._c_op for o in operations], [t._as_tf_output() for t in graph.inputs], [t._as_tf_output() for t in graph.outputs], output_names, [o._c_op for o in graph.control_outputs], [], None, compat.as_str(''))\n    attrs = attributes_lib.parse_func_attrs(attrs or {})\n    for attr_name, attr_value in attrs.items():\n        serialized = attr_value.SerializeToString()\n        pywrap_tf_session.TF_FunctionSetAttrValueProto(fn, compat.as_str(attr_name), serialized)\n    name = compat.as_bytes(name)\n    bound_context = context.context()\n    if overwrite and bound_context.has_function(name):\n        bound_context.remove_function(name)\n    bound_context.add_c_function(fn)\n    pywrap_tf_session.TF_DeleteFunction(fn)\n    call_options = CallOptions(collective_manager_ids_used=getattr(graph, 'collective_manager_ids_used', []), control_captures=graph.function_captures.control, is_stateful=any((op._is_stateful for op in operations)))\n    if not function_type:\n        function_type = function_type_utils.derive_from_graph(graph)\n    return AtomicFunction(name, bound_context, function_type, list(graph._functions.values()), call_options, cached_graph=graph)", "docstring": "Initializes an AtomicFunction from FuncGraph.\n\nArgs:\nname: str, the name for the created function.\ngraph: Graph, the graph containing the operations in the function\nattrs: dict mapping names of attributes to their AttrValue values\nfunction_type: known FunctionType to use, otherwise one is derived.\noverwrite: overwrites function definition in the current context if needed\n\nReturns:\nAn AtomicFunction instance.", "source": "github-repos"}
{"code": "def _create_produce_requests(self, collated):\n    requests = {}\n    for (node_id, batches) in six.iteritems(collated):\n        requests[node_id] = self._produce_request(node_id, self.config['acks'], self.config['request_timeout_ms'], batches)\n    return requests", "docstring": "Transfer the record batches into a list of produce requests on a\nper-node basis.\n\nArguments:\ncollated: {node_id: [RecordBatch]}\n\nReturns:\ndict: {node_id: ProduceRequest} (version depends on api_version)", "source": "codesearchnet"}
{"code": "def restore_site_properties(self, site_property='ff_map', filename=None):\n    if (not (self.control_params['filetype'] == 'pdb')):\n        raise ValueError()\n    filename = (filename or self.control_params['output'])\n    bma = BabelMolAdaptor.from_file(filename, 'pdb')\n    pbm = pb.Molecule(bma._obmol)\n    assert (len(pbm.residues) == sum([x['number'] for x in self.param_list]))\n    packed_mol = self.convert_obatoms_to_molecule(pbm.residues[0].atoms, residue_name=pbm.residues[0].name, site_property=site_property)\n    for resid in pbm.residues[1:]:\n        mol = self.convert_obatoms_to_molecule(resid.atoms, residue_name=resid.name, site_property=site_property)\n        for site in mol:\n            packed_mol.append(site.species, site.coords, properties=site.properties)\n    return packed_mol", "docstring": "Restore the site properties for the final packed molecule.\n\nArgs:\nsite_property (str):\nfilename (str): path to the final packed molecule.\n\nReturns:\nMolecule", "source": "codesearchnet"}
{"code": "def framebuffer(self, color_attachments=(), depth_attachment=None) -> 'Framebuffer':\n        \n\n        if type(color_attachments) is Texture or type(color_attachments) is Renderbuffer:\n            color_attachments = (color_attachments,)\n\n        ca_mglo = tuple(x.mglo for x in color_attachments)\n        da_mglo = None if depth_attachment is None else depth_attachment.mglo\n\n        res = Framebuffer.__new__(Framebuffer)\n        res.mglo, res._size, res._samples, res._glo = self.mglo.framebuffer(ca_mglo, da_mglo)\n        res._color_attachments = tuple(color_attachments)\n        res._depth_attachment = depth_attachment\n        res.ctx = self\n        res.extra = None\n        return res", "docstring": "A :py:class:`Framebuffer` is a collection of buffers that can be used as the destination for rendering.\nThe buffers for Framebuffer objects reference images from either Textures or Renderbuffers.\n\nArgs:\ncolor_attachments (list): A list of :py:class:`Texture` or :py:class:`Renderbuffer` objects.\ndepth_attachment (Renderbuffer or Texture): The depth attachment.\n\nReturns:\n:py:class:`Framebuffer` object", "source": "juraj-google-style"}
{"code": "def extract_paths(self, paths, ignore_nopath):\n    try:\n        super().extract_paths(paths=paths, ignore_nopath=ignore_nopath)\n    except ExtractPathError as err:\n        LOGGER.debug('%s: failed extracting files: %s', self.vm.name(), err.message)\n        if self._has_guestfs:\n            self.extract_paths_dead(paths, ignore_nopath)\n        else:\n            raise", "docstring": "Extract the given paths from the domain\n\nAttempt to extract all files defined in ``paths`` with the method\ndefined in :func:`~lago.plugins.vm.VMProviderPlugin.extract_paths`,\nif it fails, and `guestfs` is available it will try extracting the\nfiles with guestfs.\n\nArgs:\npaths(list of tuples): files to extract in\n`[(src1, dst1), (src2, dst2)...]` format.\nignore_nopath(boolean): if True will ignore none existing paths.\n\nReturns:\nNone\n\nRaises:\n:exc:`~lago.plugins.vm.ExtractPathNoPathError`: if a none existing\npath was found on the VM, and `ignore_nopath` is False.\n:exc:`~lago.plugins.vm.ExtractPathError`: on all other failures.", "source": "codesearchnet"}
{"code": "def get(self, key):\n        \n        lock.acquire()\n        try:\n            if key not in self:\n                return None\n\n            current_time = time.time()\n            if self[key].expire > current_time:\n                return self[key].value\n\n            \n            deletes = []\n            for k, val in self.items():\n                if val.expire <= current_time:\n                    deletes.append(k)\n            for k in deletes:\n                del self[k]\n\n            return None\n        finally:\n            lock.release()", "docstring": "Get an object from the cache\n\nArguments:\nkey (str): Cache key\n\nReturns:\nCached object", "source": "juraj-google-style"}
{"code": "def is_valid_isbn(isbn):\n    \n    length = len(isbn)\n\n    if length == 10:\n        return is_isbn10_valid(isbn)\n    elif length == 13:\n        return is_isbn13_valid(isbn)\n\n    return False", "docstring": "Validate given `isbn`. Wrapper for :func:`is_isbn10_valid`/\n:func:`is_isbn13_valid`.\n\nArgs:\nisbn (str/list): ISBN number as string or list of digits.\n\nNote:\nFunction doesn't require `isbn` type to be specified (it can be both\n10/13 isbn's versions).\n\nReturns:\nbool: ``True`` if ISBN is valid.", "source": "juraj-google-style"}
{"code": "def __init__(self, submission_id, submissions, storage_bucket):\n    \n    self.submission_id = submission_id\n    self.storage_bucket = storage_bucket\n    self.type = None\n    self.submission = None\n    if submission_id in submissions.attacks:\n      self.type = TYPE_NONTARGETED\n      self.submission = submissions.attacks[submission_id]\n    elif submission_id in submissions.targeted_attacks:\n      self.type = TYPE_TARGETED\n      self.submission = submissions.targeted_attacks[submission_id]\n    elif submission_id in submissions.defenses:\n      self.type = TYPE_DEFENSE\n      self.submission = submissions.defenses[submission_id]\n    else:\n      raise WorkerError(\n          'Submission with ID \"{0}\" not found'.format(submission_id))\n    self.submission_dir = None\n    self.extracted_submission_dir = None", "docstring": "Initializes ExecutableSubmission.\n\nArgs:\nsubmission_id: ID of the submissions\nsubmissions: instance of CompetitionSubmissions with all submissions\nstorage_bucket: storage bucket where all submissions are stored\n\nRaises:\nWorkerError: if submission was not found", "source": "juraj-google-style"}
{"code": "def prune_unused_nodes(meta_graph, signature_def):\n    graph = tf_v1.Graph()\n    with graph.as_default():\n        tf_v1.train.import_meta_graph(meta_graph, input_map={}, import_scope='')\n        used_node_names = set()\n        for (_, tensor_def) in signature_def.outputs.items():\n            output_tensor = graph.get_tensor_by_name(tensor_def.name)\n            mark_backward(output_tensor, used_node_names)\n        node_filter_in_list = []\n        for node in meta_graph.graph_def.node:\n            if ((node.name in used_node_names) or (node.op == 'VarHandleOp')):\n                node_filter_in_list.append(node)\n        del meta_graph.graph_def.node[:]\n        meta_graph.graph_def.node.extend(node_filter_in_list)\n    del graph", "docstring": "Function to prune unused ops given a signature def.\n\nThis function does a graph traversal through from all outputs as\ndefined in the signature_def to collect all used nodes. Then, any\nnodes which are unused can be discarded. This is useful for graph which are\nexecuting eagerly or on TPUs.\n\nArgs:\nmeta_graph: The input/output MetaGraphDef for which we wish to prune.\nsignature_def: A SignatureDef which specifies the outputs from which we wish\nto start graph traversal.", "source": "codesearchnet"}
{"code": "def albedo(self, value=999.0):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `albedo`'.format(value))\n    self._albedo = value", "docstring": "Corresponds to IDD Field `albedo`\n\nArgs:\nvalue (float): value for IDD Field `albedo`\nMissing value: 999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def write_auth(msg_type, profile_name, auth, cfg):\n    key_fmt = ((profile_name + '_') + msg_type)\n    pwd = []\n    for (k, v) in CONFIG[msg_type]['auth'].items():\n        pwd.append(auth[k])\n    if (len(pwd) > 1):\n        cfg.pwd[key_fmt] = ' :: '.join(pwd)\n    else:\n        cfg.pwd[key_fmt] = pwd[0]", "docstring": "Write the settings into the auth portion of the cfg.\n\nArgs:\n:msg_type: (str) message type to create config entry.\n:profile_name: (str) name of the profile entry\n:auth: (dict) auth parameters\n:cfg: (jsonconfig.Config) config instance.", "source": "codesearchnet"}
{"code": "def set_global_step(self, new_global_step, name=None):\n    return gen_data_flow_ops.accumulator_set_global_step(self._accumulator_ref, math_ops.cast(ops.convert_to_tensor(new_global_step), _dtypes.int64), name=name)", "docstring": "Sets the global time step of the accumulator.\n\nThe operation logs a warning if we attempt to set to a time step that is\nlower than the accumulator's own time step.\n\nArgs:\nnew_global_step: Value of new time step. Can be a variable or a constant\nname: Optional name for the operation.\n\nReturns:\nOperation that sets the accumulator's time step.", "source": "github-repos"}
{"code": "def _trigger(self):\n        \n        self._completed.set()\n        for callback in self._callbacks:\n            callback(self)", "docstring": "Trigger all callbacks registered to this Future.\n\nThis method is called internally by the batch once the batch\ncompletes.\n\nArgs:\nmessage_id (str): The message ID, as a string.", "source": "juraj-google-style"}
{"code": "def get_corner(self, time):\n    if (self.start_time <= time <= self.end_time):\n        diff = (time - self.start_time)\n        return (self.i[diff][(0, 0)], self.j[diff][(0, 0)])\n    else:\n        return ((- 1), (- 1))", "docstring": "Gets the corner array indices of the STObject at a given time that corresponds\nto the upper left corner of the bounding box for the STObject.\n\nArgs:\ntime: time at which the corner is being extracted.\n\nReturns:\ncorner index.", "source": "codesearchnet"}
{"code": "def decorate(self, name_or_func):\n    \n    if os.environ.get(\"SC2_NO_STOPWATCH\"):\n      return name_or_func if callable(name_or_func) else lambda func: func\n\n    def decorator(name, func):\n      @functools.wraps(func)\n      def _stopwatch(*args, **kwargs):\n        with self(name):\n          return func(*args, **kwargs)\n      return _stopwatch\n    if callable(name_or_func):\n      return decorator(name_or_func.__name__, name_or_func)\n    else:\n      return lambda func: decorator(name_or_func, func)", "docstring": "Decorate a function/method to check its timings.\n\nTo use the function's name:\n@sw.decorate\ndef func():\npass\n\nTo name it explicitly:\n@sw.decorate(\"name\")\ndef random_func_name():\npass\n\nArgs:\nname_or_func: the name or the function to decorate.\n\nReturns:\nIf a name is passed, returns this as a decorator, otherwise returns the\ndecorated function.", "source": "juraj-google-style"}
{"code": "def _PrintTSKPartitionIdentifiersOverview(\n      self, volume_system, volume_identifiers):\n    \n    header = 'The following partitions were found:\\n'\n    self._output_writer.Write(header)\n\n    column_names = ['Identifier', 'Offset (in bytes)', 'Size (in bytes)']\n    table_view = views.CLITabularTableView(column_names=column_names)\n\n    for volume_identifier in sorted(volume_identifiers):\n      volume = volume_system.GetVolumeByIdentifier(volume_identifier)\n      if not volume:\n        raise errors.SourceScannerError(\n            'Partition missing for identifier: {0:s}.'.format(\n                volume_identifier))\n\n      volume_extent = volume.extents[0]\n      volume_offset = '{0:d} (0x{0:08x})'.format(volume_extent.offset)\n      volume_size = self._FormatHumanReadableSize(volume_extent.size)\n\n      table_view.AddRow([volume.identifier, volume_offset, volume_size])\n\n    self._output_writer.Write('\\n')\n    table_view.Write(self._output_writer)\n    self._output_writer.Write('\\n')", "docstring": "Prints an overview of TSK partition identifiers.\n\nArgs:\nvolume_system (dfvfs.TSKVolumeSystem): volume system.\nvolume_identifiers (list[str]): allowed volume identifiers.\n\nRaises:\nSourceScannerError: if a volume cannot be resolved from the volume\nidentifier.", "source": "juraj-google-style"}
{"code": "def crop(img, i, j, h, w):\n    if (not _is_pil_image(img)):\n        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n    return img.crop((j, i, (j + w), (i + h)))", "docstring": "Crop the given PIL Image.\n\nArgs:\nimg (PIL Image): Image to be cropped.\ni (int): i in (i,j) i.e coordinates of the upper left corner.\nj (int): j in (i,j) i.e coordinates of the upper left corner.\nh (int): Height of the cropped image.\nw (int): Width of the cropped image.\n\nReturns:\nPIL Image: Cropped image.", "source": "codesearchnet"}
{"code": "def is_fit_to_structure(self, structure, tol=0.01):\n    return ((self - self.fit_to_structure(structure)) < tol).all()", "docstring": "Tests whether a tensor is invariant with respect to the\nsymmetry operations of a particular structure by testing\nwhether the residual of the symmetric portion is below a\ntolerance\n\nArgs:\nstructure (Structure): structure to be fit to\ntol (float): tolerance for symmetry testing", "source": "codesearchnet"}
{"code": "def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):\n    return convert_graphdef(input_data, input_tensors, output_tensors, *args, **kwargs)", "docstring": "Convert a TensorFlow GraphDef to TFLite.\n\nThis function is deprecated. Please use `tf.lite.TFLiteConverter` API instead.\nConversion can be customized by providing arguments that are forwarded to\n`build_model_flags` and `build_conversion_flags` (see documentation for\ndetails).\nArgs:\ninput_data: Input data (i.e. often `sess.graph_def`).\ninput_tensors: List of input tensors. Type and shape are computed using\n`foo.shape` and `foo.dtype`.\noutput_tensors: List of output tensors (only .name is used from this).\n*args: See `build_model_flags` and `build_conversion_flags`.\n**kwargs: See `build_model_flags` and `build_conversion_flags`.\n\nReturns:\nThe converted TensorFlow Lite model in a bytes array.\n\nRaises:\nDefined in `convert`.", "source": "github-repos"}
{"code": "def proc_val(key, val):\n        \n        float_keys = ('etot_conv_thr','forc_conv_thr','conv_thr','Hubbard_U','Hubbard_J0','defauss',\n                      'starting_magnetization',)\n\n        int_keys = ('nstep','iprint','nberrycyc','gdir','nppstr','ibrav','nat','ntyp','nbnd','nr1',\n                    'nr2','nr3','nr1s','nr2s','nr3s','nspin','nqx1','nqx2','nqx3','lda_plus_u_kind',\n                    'edir','report','esm_nfit','space_group','origin_choice','electron_maxstep',\n                    'mixing_ndim','mixing_fixed_ns','ortho_para','diago_cg_maxiter','diago_david_ndim',\n                    'nraise','bfgs_ndim','if_pos','nks','nk1','nk2','nk3','sk1','sk2','sk3','nconstr')\n\n        bool_keys = ('wf_collect','tstress','tprnfor','lkpoint_dir','tefield','dipfield','lelfield',\n                     'lorbm','lberry','lfcpopt','monopole','nosym','nosym_evc','noinv','no_t_rev',\n                     'force_symmorphic','use_all_frac','one_atom_occupations','starting_spin_angle',\n                     'noncolin','x_gamma_extrapolation','lda_plus_u','lspinorb','london',\n                     'ts_vdw_isolated','xdm','uniqueb','rhombohedral','realxz','block',\n                     'scf_must_converge','adaptive_thr','diago_full_acc','tqr','remove_rigid_rot',\n                     'refold_pos')\n\n        def smart_int_or_float(numstr):\n            if numstr.find(\".\") != -1 or numstr.lower().find(\"e\") != -1:\n                return float(numstr)\n            else:\n                return int(numstr)\n\n        try:\n            if key in bool_keys:\n                if val.lower() == \".true.\":\n                    return True\n                elif val.lower() == \".false.\":\n                    return False\n                else:\n                    raise ValueError(key + \" should be a boolean type!\")\n\n            if key in float_keys:\n                return float(re.search(r\"^-?\\d*\\.?\\d*d?-?\\d*\", val.lower()).group(0).replace(\"d\", \"e\"))\n\n            if key in int_keys:\n                return int(re.match(r\"^-?[0-9]+\", val).group(0))\n\n        except ValueError:\n            pass\n\n        try:\n            val = val.replace(\"d\",\"e\")\n            return smart_int_or_float(val)\n        except ValueError:\n            pass\n\n        if \"true\" in val.lower():\n            return True\n        if \"false\" in val.lower():\n            return False\n\n        m = re.match(r\"^[\\\"|'](.+)[\\\"|']$\", val)\n        if m:\n            return m.group(1)", "docstring": "Static helper method to convert PWINPUT parameters to proper type, e.g.,\nintegers, floats, etc.\n\nArgs:\nkey: PWINPUT parameter key\nval: Actual value of PWINPUT parameter.", "source": "juraj-google-style"}
{"code": "def add_sources_argument(cls, group, allow_filters=True, prefix=None, add_root_paths=False):\n    prefix = (prefix or cls.argument_prefix)\n    group.add_argument(('--%s-sources' % prefix), action='store', nargs='+', dest=('%s_sources' % prefix.replace('-', '_')), help=('%s source files to parse' % prefix))\n    if allow_filters:\n        group.add_argument(('--%s-source-filters' % prefix), action='store', nargs='+', dest=('%s_source_filters' % prefix.replace('-', '_')), help=('%s source files to ignore' % prefix))\n    if add_root_paths:\n        group.add_argument(('--%s-source-roots' % prefix), action='store', nargs='+', dest=('%s_source_roots' % prefix.replace('-', '_')), help=('%s source root directories allowing files to be referenced relatively to those' % prefix))", "docstring": "Subclasses may call this to add sources and source_filters arguments.\n\nArgs:\ngroup: arparse.ArgumentGroup, the extension argument group\nallow_filters: bool,  Whether the extension wishes to expose a\nsource_filters argument.\nprefix: str, arguments have to be namespaced.", "source": "codesearchnet"}
{"code": "def _save_and_log_checkpoint(self, actor):\n        \n        actor_id = self._worker.actor_id\n        checkpoint_info = self._worker.actor_checkpoint_info[actor_id]\n        checkpoint_info.num_tasks_since_last_checkpoint += 1\n        now = int(1000 * time.time())\n        checkpoint_context = ray.actor.CheckpointContext(\n            actor_id, checkpoint_info.num_tasks_since_last_checkpoint,\n            now - checkpoint_info.last_checkpoint_timestamp)\n        \n        \n        if actor.should_checkpoint(checkpoint_context):\n            try:\n                now = int(1000 * time.time())\n                checkpoint_id = (self._worker.raylet_client.\n                                 prepare_actor_checkpoint(actor_id))\n                checkpoint_info.checkpoint_ids.append(checkpoint_id)\n                actor.save_checkpoint(actor_id, checkpoint_id)\n                if (len(checkpoint_info.checkpoint_ids) >\n                        ray._config.num_actor_checkpoints_to_keep()):\n                    actor.checkpoint_expired(\n                        actor_id,\n                        checkpoint_info.checkpoint_ids.pop(0),\n                    )\n                checkpoint_info.num_tasks_since_last_checkpoint = 0\n                checkpoint_info.last_checkpoint_timestamp = now\n            except Exception:\n                \n                traceback_str = ray.utils.format_error_message(\n                    traceback.format_exc())\n                ray.utils.push_error_to_driver(\n                    self._worker,\n                    ray_constants.CHECKPOINT_PUSH_ERROR,\n                    traceback_str,\n                    driver_id=self._worker.task_driver_id)", "docstring": "Save an actor checkpoint if necessary and log any errors.\n\nArgs:\nactor: The actor to checkpoint.\n\nReturns:\nThe result of the actor's user-defined `save_checkpoint` method.", "source": "juraj-google-style"}
{"code": "def assign(self, value, use_locking=False, name=None, read_value=True):\n    assign = state_ops.assign(self._variable, value, use_locking=use_locking, name=name)\n    if read_value:\n        return assign\n    return assign.op", "docstring": "Assigns a new value to the variable.\n\nThis is essentially a shortcut for `assign(self, value)`.\n\nArgs:\nvalue: A `Tensor`. The new value for this variable.\nuse_locking: If `True`, use locking during the assignment.\nname: The name of the operation to be created\nread_value: if True, will return something which evaluates to the new\nvalue of the variable; if False will return the assign op.\n\nReturns:\nA `Tensor` that will hold the new value of this variable after\nthe assignment has completed.", "source": "github-repos"}
{"code": "def convert_persistent_value(self, shift, instruction):\n    command_dict = {'name': 'pv', 't0': (shift + instruction.start_time), 'ch': instruction.channels[0].name, 'val': instruction.command.value}\n    return self._qobj_model(**command_dict)", "docstring": "Return converted `PersistentValueInstruction`.\n\nArgs:\nshift(int): Offset time.\ninstruction (PersistentValueInstruction): persistent value instruction.\nReturns:\ndict: Dictionary of required parameters.", "source": "codesearchnet"}
{"code": "def create_chunker(self, chunk_size):\n        \n        rolling_hash = _rabinkarprh.RabinKarpHash(self.window_size, self._seed)\n        rolling_hash.set_threshold(1.0 / chunk_size)\n        return RabinKarpCDC._Chunker(rolling_hash)", "docstring": "Create a chunker performing content-defined chunking (CDC) using Rabin Karp's rolling hash scheme with a\nspecific, expected chunk size.\n\nArgs:\nchunk_size (int): (Expected) target chunk size.\n\nReturns:\nBaseChunker: A chunker object.", "source": "juraj-google-style"}
{"code": "def open_model(self, model_path, audit=False):\n        \n        if audit:\n            self._add_entry(templates.FILE_OPEN_AUDIT\n                                     .format(model_path=model_path))\n        else:\n            self._add_entry(templates.FILE_OPEN\n                                     .format(model_path=model_path))", "docstring": "Append a open non-workshared model entry to the journal.\n\nThis instructs Revit to open a non-workshared model.\n\nArgs:\nmodel_path (str): full path to non-workshared model\naudit (bool): if True audits the model when opening", "source": "juraj-google-style"}
{"code": "def _field_to_json(field, row_value):\n    if (row_value is None):\n        return None\n    if (field.mode == 'REPEATED'):\n        return _repeated_field_to_json(field, row_value)\n    if (field.field_type == 'RECORD'):\n        return _record_field_to_json(field.fields, row_value)\n    return _scalar_field_to_json(field, row_value)", "docstring": "Convert a field into JSON-serializable values.\n\nArgs:\nfield ( \\\n:class:`~google.cloud.bigquery.schema.SchemaField`, \\\n):\nThe SchemaField to use for type conversion and field name.\n\nrow_value (Union[ \\\nSequence[list], \\\nany, \\\n]):\nRow data to be inserted. If the SchemaField's mode is\nREPEATED, assume this is a list. If not, the type\nis inferred from the SchemaField's field_type.\n\nReturns:\nany:\nA JSON-serializable object.", "source": "codesearchnet"}
{"code": "def _evaluateTFLiteModelUsingSignatureDef(self, tflite_model, signature_key, inputs):\n    interpreter = Interpreter(model_content=tflite_model)\n    signature_runner = interpreter.get_signature_runner(signature_key)\n    return signature_runner(**inputs)", "docstring": "Evaluates the model on the `inputs`.\n\nArgs:\ntflite_model: TensorFlow Lite model.\nsignature_key: Signature key.\ninputs: Map from input tensor names in the SignatureDef to tensor value.\n\nReturns:\nDictionary of outputs.\nKey is the output name in the SignatureDef 'signature_key'\nValue is the output value", "source": "github-repos"}
{"code": "def generate_typegraph(program: cfg.Program, var_table: dict[int, str], loader: jinja2.BaseLoader) -> str:\n    encoder = typegraph_serializer.TypegraphEncoder()\n    enc_prog = encoder.default(program)\n    return _generate_visualization(template_file=_TYPEGRAPH_TEMPLATE_NAME, loader=loader, program=json.dumps(enc_prog), query_table=enc_prog['queries'], var_table=var_table)", "docstring": "Generate the visualization webpage.\n\nArgs:\nprogram: cfg.Program. The instance of the program to visualize.\nvar_table: dict[int, str]. A mapping of cfg.Variable IDs to names.\nloader: A jinja2 loader\n\nReturns:\nstr. The rendered visualization page.", "source": "github-repos"}
{"code": "def get_by(self, field, value):\n    if (not field):\n        logger.exception(RESOURCE_CLIENT_INVALID_FIELD)\n        raise ValueError(RESOURCE_CLIENT_INVALID_FIELD)\n    filter = '\"{0}=\\'{1}\\'\"'.format(field, value)\n    results = self.get_all(filter=filter)\n    if ('.' not in field):\n        results = [item for item in results if (str(item.get(field, '')).lower() == value.lower())]\n    return results", "docstring": "Get the resource by passing a field and its value.\n\nNote:\nThis function uses get_all passing a filter.The search is case-insensitive.\n\nArgs:\nfield: Field name to filter.\nvalue: Value to filter.\n\nReturns:\ndict", "source": "codesearchnet"}
{"code": "def Serialize(self, writer):\n        \n        super(AccountState, self).Serialize(writer)\n        writer.WriteUInt160(self.ScriptHash)\n        writer.WriteBool(self.IsFrozen)\n        writer.WriteVarInt(len(self.Votes))\n        for vote in self.Votes:\n            writer.WriteBytes(vote)\n\n        blen = len(self.Balances)\n        writer.WriteVarInt(blen)\n\n        for key, fixed8 in self.Balances.items():\n            writer.WriteUInt256(key)\n            writer.WriteFixed8(fixed8)", "docstring": "Serialize full object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def default(self, value):\n    if isinstance(value, messages.Enum):\n        return str(value)\n    if (six.PY3 and isinstance(value, bytes)):\n        return value.decode('utf8')\n    if isinstance(value, messages.Message):\n        result = {}\n        for field in value.all_fields():\n            item = value.get_assigned_value(field.name)\n            if (item not in (None, [], ())):\n                result[field.name] = self.__protojson_protocol.encode_field(field, item)\n        for unknown_key in value.all_unrecognized_fields():\n            (unrecognized_field, _) = value.get_unrecognized_field_info(unknown_key)\n            result[unknown_key] = unrecognized_field\n        return result\n    return super(MessageJSONEncoder, self).default(value)", "docstring": "Return dictionary instance from a message object.\n\nArgs:\nvalue: Value to get dictionary for.  If not encodable, will\ncall superclasses default method.", "source": "codesearchnet"}
{"code": "def FindNode(self, component_path):\n    node = self.state.component_tree\n    for component in component_path:\n        node = node[component]\n    return node", "docstring": "Find the node in the component_tree from component_path.\n\nArgs:\ncomponent_path: A list of components which reference a node in the\ncomponent tree. This allows us to resume processing in the tree.\n\nReturns:\nA node in the component_tree.", "source": "codesearchnet"}
{"code": "def GetTermSize(self):\n    return self._term_size", "docstring": "Returns the terminal (x, y) dimensions in characters.\n\nReturns:\n(x, y): A tuple of the terminal x and y dimensions.", "source": "github-repos"}
{"code": "def __init__(self, **kwargs):\n    \n    if kwargs:\n      raise ValueError('Unused keyword arguments: {0:s}.'.format(\n          ', '.join(kwargs)))\n\n    super(Decrypter, self).__init__()", "docstring": "Initializes a decrypter.\n\nArgs:\nkwargs (dict): keyword arguments depending on the decrypter.\n\nRaises:\nValueError: when there are unused keyword arguments.", "source": "juraj-google-style"}
{"code": "def __init__(self, xid=None, experimenter=None, exp_type=None, data=b''):\n        \n        super().__init__(xid)\n        self.experimenter = experimenter\n        self.exp_type = exp_type\n        self.data = data", "docstring": "Create a ExperimenterHeader with the optional parameters below.\n\nArgs:\nxid (int): xid to be used on the message header.\nexperimenter (int): Vendor ID:\nMSB 0: low-order bytes are IEEE OUI.\nMSB != 0: defined by ONF.\nexp_type (int): Experimenter defined.", "source": "juraj-google-style"}
{"code": "def to_geojson(self, filename, proj, metadata=None):\n    if (metadata is None):\n        metadata = {}\n    json_obj = {'type': 'FeatureCollection', 'features': [], 'properties': {}}\n    json_obj['properties']['times'] = self.times.tolist()\n    json_obj['properties']['dx'] = self.dx\n    json_obj['properties']['step'] = self.step\n    json_obj['properties']['u'] = self.u.tolist()\n    json_obj['properties']['v'] = self.v.tolist()\n    for (k, v) in metadata.items():\n        json_obj['properties'][k] = v\n    for (t, time) in enumerate(self.times):\n        feature = {'type': 'Feature', 'geometry': {'type': 'Polygon'}, 'properties': {}}\n        boundary_coords = self.boundary_polygon(time)\n        lonlat = np.vstack(proj(boundary_coords[0], boundary_coords[1], inverse=True))\n        lonlat_list = lonlat.T.tolist()\n        if (len(lonlat_list) > 0):\n            lonlat_list.append(lonlat_list[0])\n        feature['geometry']['coordinates'] = [lonlat_list]\n        for attr in ['timesteps', 'masks', 'x', 'y', 'i', 'j']:\n            feature['properties'][attr] = getattr(self, attr)[t].tolist()\n        feature['properties']['attributes'] = {}\n        for (attr_name, steps) in self.attributes.items():\n            feature['properties']['attributes'][attr_name] = steps[t].tolist()\n        json_obj['features'].append(feature)\n    file_obj = open(filename, 'w')\n    json.dump(json_obj, file_obj, indent=1, sort_keys=True)\n    file_obj.close()\n    return", "docstring": "Output the data in the STObject to a geoJSON file.\n\nArgs:\nfilename: Name of the file\nproj: PyProj object for converting the x and y coordinates back to latitude and longitue values.\nmetadata: Metadata describing the object to be included in the top-level properties.", "source": "codesearchnet"}
{"code": "def convert_datetime_type(obj):\n    if (pd and (obj is pd.NaT)):\n        return np.nan\n    if (pd and isinstance(obj, pd.Period)):\n        return (obj.to_timestamp().value / (10 ** 6.0))\n    if (pd and isinstance(obj, _pd_timestamp)):\n        return (obj.value / (10 ** 6.0))\n    elif (pd and isinstance(obj, pd.Timedelta)):\n        return (obj.value / (10 ** 6.0))\n    elif isinstance(obj, dt.datetime):\n        diff = (obj.replace(tzinfo=None) - DT_EPOCH)\n        return (diff.total_seconds() * 1000.0)\n    elif isinstance(obj, dt.date):\n        return ((dt.datetime(*obj.timetuple()[:6]) - DT_EPOCH).total_seconds() * 1000)\n    elif isinstance(obj, np.datetime64):\n        epoch_delta = (obj - NP_EPOCH)\n        return (epoch_delta / NP_MS_DELTA)\n    elif isinstance(obj, dt.time):\n        return (((((obj.hour * 3600) + (obj.minute * 60)) + obj.second) * 1000) + (obj.microsecond / 1000.0))", "docstring": "Convert any recognized date, time, or datetime value to floating point\nmilliseconds since epoch.\n\nArg:\nobj (object) : the object to convert\n\nReturns:\nfloat : milliseconds", "source": "codesearchnet"}
{"code": "def delete_vmss_vms(access_token, subscription_id, resource_group, vmss_name, vm_ids):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,\n                        '/delete?api-version=', COMP_API])\n    body = '{\"instanceIds\" : ' + vm_ids + '}'\n    return do_post(endpoint, body, access_token)", "docstring": "Delete a VM in a VM Scale Set.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvmss_name (str): Name of the virtual machine scale set.\nvm_ids (str): String representation of a JSON list of VM IDs. E.g. '[1,2]'.\n\nReturns:\nHTTP response.", "source": "juraj-google-style"}
{"code": "def _CopyDateTimeFromStringISO8601(self, time_string):\n    if (not time_string):\n        raise ValueError('Invalid time string.')\n    time_string_length = len(time_string)\n    (year, month, day_of_month) = self._CopyDateFromString(time_string)\n    if (time_string_length <= 10):\n        return {'year': year, 'month': month, 'day_of_month': day_of_month}\n    if (time_string[10] != 'T'):\n        raise ValueError('Invalid time string - missing as date and time separator.')\n    (hours, minutes, seconds, microseconds, time_zone_offset) = self._CopyTimeFromStringISO8601(time_string[11:])\n    if time_zone_offset:\n        (year, month, day_of_month, hours, minutes) = self._AdjustForTimeZoneOffset(year, month, day_of_month, hours, minutes, time_zone_offset)\n    date_time_values = {'year': year, 'month': month, 'day_of_month': day_of_month, 'hours': hours, 'minutes': minutes, 'seconds': seconds}\n    if (microseconds is not None):\n        date_time_values['microseconds'] = microseconds\n    return date_time_values", "docstring": "Copies a date and time from an ISO 8601 date and time string.\n\nArgs:\ntime_string (str): time value formatted as:\nhh:mm:ss.######[+-]##:##\n\nWhere # are numeric digits ranging from 0 to 9 and the seconds\nfraction can be either 3 or 6 digits. The fraction of second and\ntime zone offset are optional.\n\nReturns:\ntuple[int, int, int, int, int]: hours, minutes, seconds, microseconds,\ntime zone offset in minutes.\n\nRaises:\nValueError: if the time string is invalid or not supported.", "source": "codesearchnet"}
{"code": "def set_record(self, name, record_id, record):\n    if (name not in self._cache):\n        self._cache[name] = {}\n    self._cache[name][record_id] = record", "docstring": "Save a record into the cache.\n\nArgs:\nname (string): The name to save the model under.\nrecord_id (int): The record id.\nrecord (:class:`cinder_data.model.CinderModel`): The model", "source": "codesearchnet"}
{"code": "def sg_prod(tensor, opt):\n    r\n    return tf.reduce_prod(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)", "docstring": "r\"\"\"Computes the product of elements across axis of a tensor.\n\nSee `tf.reduce_prod()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` (automatically given by chain).\nopt:\naxis : A tuple/list of integers or an integer. The axis to reduce.\nkeep_dims: If true, retains reduced dimensions with length 1.\nname: If provided, replace current tensor's name.\n\nReturns:\nA `Tensor`.", "source": "juraj-google-style"}
{"code": "def AddArg(self, argument):\n    self.args.append(argument)\n    if (len(self.args) > self.number_of_args):\n        raise errors.ParseError('Too many arguments for this expression.')\n    elif (len(self.args) == self.number_of_args):\n        return True\n    return False", "docstring": "Adds a new argument to this expression.\n\nArgs:\nargument (str): argument to add.\n\nReturns:\nTrue if the argument is the last argument, False otherwise.\n\nRaises:\nParseError: If there are too many arguments.", "source": "codesearchnet"}
{"code": "def __checkDecisionParameters(self, result, **values):\n\t\t\n\t\terror = []\n\n\t\tif not result:\n\t\t\terror.append('Function parameter (result array) should contain one or more header string!')\n\n\t\tif not values:\n\t\t\terror.append('Function parameter (values variables) should contain one or more variable')\n\n\t\tfor header in result:\n\t\t\tif not header in self.header:\n\t\t\t\terror.append('String (' + header + ') in result is not in header!')\n\n\t\tfor header in values:\n\t\t\tif not header in self.header:\n\t\t\t\terror.append('Variable (' + header + ') in values is not in header!')\n\t\t\telif not values[header].split():\n\t\t\t\terror.append('Variable (' + header + ') in values is empty string')\n\n\t\tif error:\n\t\t\treturn error", "docstring": "Checker of decision parameters, it will raise ValueError if finds something wrong.\n\nArgs:\nresult (array of str): See public decision methods\n**values (array of str): See public decision methods\n\nRaise:\nValueError: Result array none.\nValueError: Values dict none.\nValueError: Not find result key in header.\nValueError: Result value is empty.\n\nReturns:\nError array values", "source": "juraj-google-style"}
{"code": "def send_msg(self, address, args=[]):\n        \n        if not address.startswith('/'):\n            address = '/{}'.format(address)\n    \n        msg = osc_message_builder.OscMessageBuilder(address=address)\n    \n        for arg in args:\n            msg.add_arg(arg)\n        self.conn.send(msg.build())\n        return", "docstring": "Send multiple args into a single message to a given address.\n\nArgs:\naddress (str): OSC Address.\nargs (list): Arguments to be parsed in VVVV.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.Exchange = channel.unary_unary(\n        '/communicator_objects.UnityToExternal/Exchange',\n        request_serializer=mlagents_dot_envs_dot_communicator__objects_dot_unity__message__pb2.UnityMessage.SerializeToString,\n        response_deserializer=mlagents_dot_envs_dot_communicator__objects_dot_unity__message__pb2.UnityMessage.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def create_pipeline_field(self, pipeline_key, name, field_type, **kwargs):\n\t\t\n\n\t\turi = '/'.join([self.api_uri,\n\t\t\t\t\t\tself.pipelines_suffix,\n\t\t\t\t\t\tpipeline_key,\n\t\t\t\t\t\tself.fields_suffix\n\t\t\t\t\t\t])\n\t\t\n\t\tcode, data = self._create_field(uri, name, field_type, **kwargs)\n\t\t\n\t\treturn code, data", "docstring": "Creates a pipeline field with the provided attributes.\nArgs:\npipeline_key\tspecifying the pipeline to add the field to\nname\t\t\trequired name string\nfield_type\t\trequired type string [TEXT_INPUT, DATE or PERSON]\nkwargs\t\t\t{}\nreturn\t\t\t(status code, field dict)", "source": "juraj-google-style"}
{"code": "def write(self, data):\n    \n    block_remaining = _BLOCK_SIZE - self.__position % _BLOCK_SIZE\n\n    if block_remaining < _HEADER_LENGTH:\n      \n      self.__writer.write('\\x00' * block_remaining)\n      self.__position += block_remaining\n      block_remaining = _BLOCK_SIZE\n\n    if block_remaining < len(data) + _HEADER_LENGTH:\n      first_chunk = data[:block_remaining - _HEADER_LENGTH]\n      self.__write_record(_RECORD_TYPE_FIRST, first_chunk)\n      data = data[len(first_chunk):]\n\n      while True:\n        block_remaining = _BLOCK_SIZE - self.__position % _BLOCK_SIZE\n        if block_remaining >= len(data) + _HEADER_LENGTH:\n          self.__write_record(_RECORD_TYPE_LAST, data)\n          break\n        else:\n          chunk = data[:block_remaining - _HEADER_LENGTH]\n          self.__write_record(_RECORD_TYPE_MIDDLE, chunk)\n          data = data[len(chunk):]\n    else:\n      self.__write_record(_RECORD_TYPE_FULL, data)", "docstring": "Write single record.\n\nArgs:\ndata: record data to write as string, byte array or byte sequence.", "source": "juraj-google-style"}
{"code": "def inv_attractor(dx, alpha: float=300, gamma: int=2):\n    return dx.div(1 + alpha * dx.pow(gamma))", "docstring": "Inverse attractor: dc = dx / (1 + alpha*dx^gamma), where dx = a - c, a = attractor point, c = bin center, dc = shift in bin center\nThis is the default one according to the accompanying paper.\n\nArgs:\ndx (`torch.Tensor`):\nThe difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center.\nalpha (`float`, *optional*, defaults to 300):\nProportional Attractor strength. Determines the absolute strength. Lower alpha = greater attraction.\ngamma (`int`, *optional*, defaults to 2):\nExponential Attractor strength. Determines the \"region of influence\" and indirectly number of bin centers affected.\nLower gamma = farther reach.\n\nReturns:\ntorch.Tensor: Delta shifts - dc; New bin centers = Old bin centers + dc", "source": "github-repos"}
{"code": "def convert_bboxes_from_albumentations(bboxes, target_format, rows, cols, check_validity=False):\n    \n    return [convert_bbox_from_albumentations(bbox, target_format, rows, cols, check_validity) for bbox in bboxes]", "docstring": "Convert a list of bounding boxes from the format used by albumentations to a format, specified\nin `target_format`.\n\nArgs:\nbboxes (list): List of bounding box with coordinates in the format used by albumentations\ntarget_format (str): required format of the output bounding box. Should be 'coco' or 'pascal_voc'.\nrows (int): image height\ncols (int): image width\ncheck_validity (bool): check if all boxes are valid boxes", "source": "juraj-google-style"}
{"code": "def filter_by_analysis_period(self, analysis_period):\n        \n        _filtered_data = self.filter_by_months(analysis_period.months_int)\n        _filtered_data.header._analysis_period = analysis_period\n        return _filtered_data", "docstring": "Filter the Data Collection based on an analysis period.\n\nArgs:\nanalysis period: A Ladybug analysis period\n\nReturn:\nA new Data Collection with filtered data", "source": "juraj-google-style"}
{"code": "def is_datafile_valid(datafile):\n  \n\n  try:\n    datafile_json = json.loads(datafile)\n  except:\n    return False\n\n  try:\n    jsonschema.Draft4Validator(constants.JSON_SCHEMA).validate(datafile_json)\n  except:\n    return False\n\n  return True", "docstring": "Given a datafile determine if it is valid or not.\n\nArgs:\ndatafile: JSON string representing the project.\n\nReturns:\nBoolean depending upon whether datafile is valid or not.", "source": "juraj-google-style"}
{"code": "def Open(self, file_object):\n    file_object.seek(0, os.SEEK_SET)\n    signature_data = file_object.read(6)\n    self.file_format = None\n    if (len(signature_data) > 2):\n        if (signature_data[:2] == self._CPIO_SIGNATURE_BINARY_BIG_ENDIAN):\n            self.file_format = 'bin-big-endian'\n        elif (signature_data[:2] == self._CPIO_SIGNATURE_BINARY_LITTLE_ENDIAN):\n            self.file_format = 'bin-little-endian'\n        elif (signature_data == self._CPIO_SIGNATURE_PORTABLE_ASCII):\n            self.file_format = 'odc'\n        elif (signature_data == self._CPIO_SIGNATURE_NEW_ASCII):\n            self.file_format = 'newc'\n        elif (signature_data == self._CPIO_SIGNATURE_NEW_ASCII_WITH_CHECKSUM):\n            self.file_format = 'crc'\n    if (self.file_format is None):\n        raise IOError('Unsupported CPIO format.')\n    self._file_object = file_object\n    self._file_size = file_object.get_size()\n    self._ReadFileEntries(self._file_object)", "docstring": "Opens the CPIO archive file.\n\nArgs:\nfile_object (FileIO): a file-like object.\n\nRaises:\nIOError: if the file format signature is not supported.\nOSError: if the file format signature is not supported.", "source": "codesearchnet"}
{"code": "async def _call_rpc(self, header):\n        \n\n        length, _, cmd, feature, address = struct.unpack(\"<BBBBB\", bytes(header))\n        rpc_id = (feature << 8) | cmd\n\n        payload = self.rpc_payload[:length]\n\n        self._logger.debug(\"Calling RPC %d:%04X with %s\", address, rpc_id, binascii.hexlify(payload))\n\n        exception = None\n        response = None\n\n        try:\n            response = await self.send_rpc(self.CLIENT_ID, str(self.device.iotile_id), address, rpc_id, bytes(payload), timeout=30.0)\n        except VALID_RPC_EXCEPTIONS as err:\n            exception = err\n        except Exception as err:\n            self._logger.exception(\"Error calling RPC %d:%04X\", address, rpc_id)\n            exception = err\n\n        status, response = pack_rpc_response(response, exception)\n        resp_header = struct.pack(\"<BBBB\", status, 0, 0, len(response))\n\n        await self._send_notification(self.ReceiveHeaderHandle, resp_header)\n\n        if len(response) > 0:\n            await self._send_notification(self.ReceivePayloadHandle, response)", "docstring": "Call an RPC given a header and possibly a previously sent payload\n\nArgs:\nheader (bytearray): The RPC header we should call", "source": "juraj-google-style"}
{"code": "def del_method(self, m):\n        \n        if isinstance(m, types.FunctionType) and not iscoroutinefunction(m):\n            wrkey = ('function', id(m))\n        else:\n            f, obj = get_method_vars(m)\n            wrkey = (f, id(obj))\n        if wrkey in self:\n            del self[wrkey]", "docstring": "Remove an instance method or function if it exists\n\nArgs:\nm: The instance method or function to remove", "source": "juraj-google-style"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(GetResponsePayload, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = utils.BytearrayStream(input_stream.read(self.length))\n    if self.is_tag_next(enums.Tags.OBJECT_TYPE, local_stream):\n        self._object_type = primitives.Enumeration(enum=enums.ObjectType, tag=enums.Tags.OBJECT_TYPE)\n        self._object_type.read(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Parsed payload encoding is missing the object type field.')\n    if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):\n        self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)\n        self._unique_identifier.read(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Parsed payload encoding is missing the unique identifier field.')\n    self.secret = self.secret_factory.create(self.object_type)\n    if self.is_tag_next(self._secret.tag, local_stream):\n        self._secret.read(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Parsed payload encoding is missing the secret field.')\n    self.is_oversized(local_stream)", "docstring": "Read the data encoding the Get response payload and decode it\ninto its constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the object type, unique identifier, or\nsecret attributes are missing from the encoded payload.", "source": "codesearchnet"}
{"code": "def CacheFileObject(self, path_spec, file_object):\n    \n    self._file_object_cache.CacheObject(path_spec.comparable, file_object)", "docstring": "Caches a file-like object based on a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nfile_object (FileIO): file-like object.", "source": "juraj-google-style"}
{"code": "def _collect_tokens(self, node: dict) -> list:\n    tokens = [self._termination_char] if self._termination_char in node else []\n    for token, subtrie_head in node.items():\n        if token != self._termination_char:\n            subtokens = self._collect_tokens(subtrie_head)\n            tokens.extend([token + subtoken for subtoken in subtokens])\n    return tokens", "docstring": "Generates all tokens in the Trie starting from a given node.\n\nArgs:\nnode (dict): The node in the Trie from which tokens need to be generated.\n\nReturns:\nlist: List of tokens generated from the given node.", "source": "github-repos"}
{"code": "def _get_query_results(self, job_id, retry, project=None, timeout_ms=None, location=None):\n    extra_params = {'maxResults': 0}\n    if (project is None):\n        project = self.project\n    if (timeout_ms is not None):\n        extra_params['timeoutMs'] = timeout_ms\n    if (location is None):\n        location = self.location\n    if (location is not None):\n        extra_params['location'] = location\n    path = '/projects/{}/queries/{}'.format(project, job_id)\n    resource = self._call_api(retry, method='GET', path=path, query_params=extra_params)\n    return _QueryResults.from_api_repr(resource)", "docstring": "Get the query results object for a query job.\n\nArguments:\njob_id (str): Name of the query job.\nretry (google.api_core.retry.Retry):\n(Optional) How to retry the RPC.\nproject (str):\n(Optional) project ID for the query job (defaults to the\nproject of the client).\ntimeout_ms (int):\n(Optional) number of milliseconds the the API call should\nwait for the query to complete before the request times out.\nlocation (str): Location of the query job.\n\nReturns:\ngoogle.cloud.bigquery.query._QueryResults:\nA new ``_QueryResults`` instance.", "source": "codesearchnet"}
{"code": "def prune_intermediate_layers(node):\n    if not node.get('children'):\n        return\n    layer_blocks = [(i, child) for i, child in enumerate(node['children']) if is_layer_block(child)]\n    if len(layer_blocks) > 2:\n        to_remove = [i for i, _ in layer_blocks[1:-1]]\n        node['children'] = [child for i, child in enumerate(node['children']) if i not in to_remove]\n    for child in node['children']:\n        prune_intermediate_layers(child)", "docstring": "Recursively removes intermediate layers from the tree to improve readability.\nKeeps at least the first and last layers if many consecutive layers are present.\n\nArgs:\nnode (`dict`): The root or subnode to prune recursively.", "source": "github-repos"}
{"code": "def __init__(self, all_reduce_alg='nccl', num_packs=1):\n    self._all_reduce_alg = all_reduce_alg\n    self._num_packs = num_packs\n    self._simple_cross_replica_ops = ReductionToOneDevice()\n    super(AllReduceCrossDeviceOps, self).__init__()", "docstring": "Initializes the object.\n\nArgs:\nall_reduce_alg: the all-reduce algorithm to use, currently only \"nccl\" or\n\"hierarchical_copy\" are supported.\nnum_packs: a non-negative integer. The number of packs to split values\ninto. If zero, no packing will be done.", "source": "github-repos"}
{"code": "def read_named_csv(name, data_path=DATA_PATH, nrows=None, verbose=True):\n    \n    if os.path.isfile(name):\n        try:\n            return read_json(name)\n        except (IOError, UnicodeDecodeError, json.JSONDecodeError):\n            pass\n        try:\n            return read_csv(name, nrows=nrows)\n        except (IOError, pd.errors.ParserError):\n            pass\n        try:\n            return read_txt(name, nrows=nrows)\n        except (IOError, UnicodeDecodeError):\n            pass\n    data_path = expand_filepath(data_path)\n    if os.path.isfile(os.path.join(data_path, name)):\n        return read_csv(os.path.join(data_path, name), nrows=nrows)\n    if name in DATASET_NAME2FILENAME:\n        name = DATASET_NAME2FILENAME[name]\n        if name.lower().endswith('.txt') or name.lower().endswith('.txt.gz'):\n            return read_text(os.path.join(data_path, name), nrows=nrows)\n        else:\n            return read_csv(os.path.join(data_path, name), nrows=nrows)\n    try:\n        return read_csv(os.path.join(data_path, name + '.csv.gz'), nrows=nrows)\n    except IOError:\n        pass\n    try:\n        return read_csv(os.path.join(data_path, name + '.csv'), nrows=nrows)\n    except IOError:\n        pass\n    try:\n        return read_json(os.path.join(data_path, name + '.json'))\n    except IOError:\n        pass\n    try:\n        return read_txt(os.path.join(data_path, name + '.txt'), verbose=verbose)\n    except IOError:\n        pass\n\n    \n    \n    try:\n        return KeyedVectors.load_word2vec_format(os.path.join(BIGDATA_PATH, name + '.bin.gz'), binary=True)\n    except IOError:\n        pass\n    except ValueError:\n        pass\n    try:\n        return read_txt(os.path.join(BIGDATA_PATH, name + '.txt'), verbose=verbose)\n    except IOError:\n        pass", "docstring": "Convert a dataset in a local file (usually a CSV) into a Pandas DataFrame\n\nTODO: should be called read_named_dataset\n\nArgs:\n`name` is assumed not to have an extension (like \".csv\"), alternative extensions are tried automatically.file", "source": "juraj-google-style"}
{"code": "def get_new_address(self, id=None, endpoint=None):\n        \n        return self._call_endpoint(GET_NEW_ADDRESS, id=id, endpoint=endpoint)", "docstring": "Create new address\nArgs:\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"}
{"code": "def ListPlugins(logdir):\n    plugins_dir = os.path.join(logdir, _PLUGINS_DIR)\n    try:\n        entries = tf.io.gfile.listdir(plugins_dir)\n    except tf.errors.NotFoundError:\n        return []\n    return [x.rstrip('/') for x in entries if (x.endswith('/') or _IsDirectory(plugins_dir, x))]", "docstring": "List all the plugins that have registered assets in logdir.\n\nIf the plugins_dir does not exist, it returns an empty list. This maintains\ncompatibility with old directories that have no plugins written.\n\nArgs:\nlogdir: A directory that was created by a TensorFlow events writer.\n\nReturns:\na list of plugin names, as strings", "source": "codesearchnet"}
{"code": "def resource_import(filename: str, *, module: Optional[epath.PathLike]=None) -> str:\n    path = epath.resource_path(module) if module else _static_path()\n    path = path.joinpath(filename)\n    content = path.read_text()\n    if path.suffix == '.css':\n        return f'<style>{content}</style>'\n    elif path.suffix == '.js':\n        return f'<script>{content}</script>'\n    else:\n        raise ValueError('')", "docstring": "Returns the `HTML` associated with the resource.\n\nArgs:\nfilename: Path to the `.css`, `.js` resource\nmodule: Python module name from which the filename is relative too.", "source": "github-repos"}
{"code": "def bytes_to_long(bytesdata: bytes) -> int:\n    \n    assert len(bytesdata) == 8\n    return sum((b << (k * 8) for k, b in enumerate(bytesdata)))", "docstring": "Converts an 8-byte sequence to a long integer.\n\nArgs:\nbytesdata: 8 consecutive bytes, as a ``bytes`` object, in\nlittle-endian format (least significant byte [LSB] first)\n\nReturns:\ninteger", "source": "juraj-google-style"}
{"code": "def open_required(func):\n\n    @functools.wraps(func)\n    def wrapper(self, *args, **kwargs):\n        'Wrapper function to check that the given ``JLink`` has been\\n            opened.\\n\\n            Args:\\n              self (JLink): the ``JLink`` instance\\n              args: list of arguments to pass to the wrapped function\\n              kwargs: key-word arguments dict to pass to the wrapped function\\n\\n            Returns:\\n              The return value of the wrapped function.\\n\\n            Raises:\\n              JLinkException: if the J-Link DLL is not open or the J-Link is\\n                  disconnected.\\n            '\n        if (not self.opened()):\n            raise errors.JLinkException('J-Link DLL is not open.')\n        elif (not self.connected()):\n            raise errors.JLinkException('J-Link connection has been lost.')\n        return func(self, *args, **kwargs)\n    return wrapper", "docstring": "Decorator to specify that the J-Link DLL must be opened, and a\nJ-Link connection must be established.\n\nArgs:\nfunc (function): function being decorated\n\nReturns:\nThe wrapper function.", "source": "codesearchnet"}
{"code": "def _config_session():\n    config = tf.ConfigProto()\n    config.gpu_options.allow_growth = True\n    config.gpu_options.visible_device_list = '0'\n    return tf.Session(config=config)", "docstring": "Configure session for particular device\n\nReturns:\ntensorflow.Session", "source": "codesearchnet"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return token_ids_0 + [self.sep_token_id]\n    sep = [self.sep_token_id]\n    return token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A BERT sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def char_decode(self, sequences):\n    decode_strs = [seq.replace(' ', '') for seq in self.char_tokenizer.batch_decode(sequences)]\n    return decode_strs", "docstring": "Convert a list of lists of char token ids into a list of strings by calling char tokenizer.\n\nArgs:\nsequences (`torch.Tensor`):\nList of tokenized input ids.\nReturns:\n`List[str]`: The list of char decoded sentences.", "source": "github-repos"}
{"code": "def detect_content_type(self, path=None, payload=None, objectInput=None):\n    if objectInput:\n        message = 'Detection content type with file object is not stable.'\n        log.exception(message)\n        raise TikaAppError(message)\n    f = file_path(path, payload, objectInput)\n    switches = ['-d', f]\n    result = self._command_template(switches).lower()\n    return (result, path, f)", "docstring": "Return the content type of passed file or payload.\n\nArgs:\npath (string): Path of file to analyze\npayload (string): Payload base64 to analyze\nobjectInput (object): file object/standard input to analyze\n\nReturns:\ncontent type of file (string)", "source": "codesearchnet"}
{"code": "def update(self, data):\n    for (key, value) in data.items():\n        setattr(self, key, value)", "docstring": "Update the current memory record with the given data dict.\n\nArgs:\ndata (dict): Data dictionary to update the record attributes with.", "source": "codesearchnet"}
{"code": "def __init__(self, content=None, min=0, max=HUGE, name=None):\n    assert 0 <= min <= max <= HUGE, (min, max)\n    if content is not None:\n        content = tuple(map(tuple, content))\n        assert len(content), repr(content)\n        for alt in content:\n            assert len(alt), repr(alt)\n    self.content = content\n    self.min = min\n    self.max = max\n    self.name = name", "docstring": "Initializer.\n\nArgs:\ncontent: optional sequence of subsequences of patterns; if absent,\nmatches one node; if present, each subsequence is an alternative [*]\nmin: optional minimum number of times to match, default 0\nmax: optional maximum number of times to match, default HUGE\nname: optional name assigned to this match\n\n[*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is\nequivalent to (a b c | d e | f g h); if content is None,\nthis is equivalent to '.' in regular expression terms.\nThe min and max parameters work as follows:\nmin=0, max=maxint: .*\nmin=1, max=maxint: .+\nmin=0, max=1: .?\nmin=1, max=1: .\nIf content is not None, replace the dot with the parenthesized\nlist of alternatives, e.g. (a b c | d e | f g h)*", "source": "github-repos"}
{"code": "def __init__(self, config=None, start=True):\n    config = config or DispatcherConfig()\n    if config.fault_tolerant_mode and (not config.work_dir):\n        raise ValueError('Cannot enable fault tolerant mode without configuring a work dir. Make sure to set `work_dir` in the `config` object passed to `DispatcherServer`.')\n    self._config = config\n    if isinstance(config, service_config_pb2.DispatcherConfig):\n        config_proto = config\n    else:\n        config_proto = service_config_pb2.DispatcherConfig(port=config.port, protocol=config.protocol, work_dir=config.work_dir, fault_tolerant_mode=config.fault_tolerant_mode, worker_addresses=config.worker_addresses, job_gc_check_interval_ms=config.job_gc_check_interval_ms, job_gc_timeout_ms=config.job_gc_timeout_ms, worker_timeout_ms=config.worker_timeout_ms, worker_max_concurrent_snapshots=config.worker_max_concurrent_snapshots)\n    self._server = _pywrap_server_lib.TF_DATA_NewDispatchServer(config_proto.SerializeToString())\n    if start:\n        self._server.start()", "docstring": "Creates a new dispatch server.\n\nArgs:\nconfig: (Optional.) A `tf.data.experimental.service.DispatcherConfig`\nconfiguration. If `None`, the dispatcher will use default configuration\nvalues.\nstart: (Optional.) Boolean, indicating whether to start the server after\ncreating it. Defaults to True.", "source": "github-repos"}
{"code": "def _add_bound_method(self, bound_method, identify_observed):\n        \n\n        inst = bound_method.__self__\n        method_name = bound_method.__name__\n        key = self.make_key(bound_method)\n        if key not in self.observers:\n            self.observers[key] = ObserverBoundMethod(\n                inst, method_name, identify_observed, (key, self.observers))\n            return True\n        else:\n            return False", "docstring": "Add an bound method as an observer.\n\nArgs:\nbound_method: The bound method to add as an observer.\nidentify_observed: See the docstring for add_observer.\n\nReturns:\nTrue if the bound method is added, otherwise False.", "source": "juraj-google-style"}
{"code": "def sget_steptime(self, cycle, step, dataset_number=None):\n    dataset_number = self._validate_dataset_number(dataset_number)\n    if (dataset_number is None):\n        self._report_empty_dataset()\n        return\n    cycle_index_header = self.headers_normal.cycle_index_txt\n    step_time_header = self.headers_normal.step_time_txt\n    step_index_header = self.headers_normal.step_index_txt\n    test = self.datasets[dataset_number].dfdata\n    if isinstance(step, (list, tuple)):\n        warnings.warn(f'The varialbe step is a list.Should be an integer.{step}')\n        step = step[0]\n    c = test.loc[(((test[cycle_index_header] == cycle) & (test[step_index_header] == step)), :)]\n    if (not self.is_empty(c)):\n        t = c[step_time_header]\n        return t\n    else:\n        return None", "docstring": "Returns step time for cycle, step.\n\nConvinience function; same as issuing\ndfdata[(dfdata[cycle_index_header] == cycle) &\n(dfdata[step_index_header] == step)][step_time_header]\n\nArgs:\ncycle: cycle number\nstep: step number\ndataset_number: the dataset number (automatic selection if None)\n\nReturns:\npandas.Series or None if empty", "source": "codesearchnet"}
{"code": "def execute_command(self, command: str, read: bool=False) -> None:\n    if self.debug:\n        print(command)\n        return 'SIMULATING VALUE' if read else True\n    else:\n        print('\\nCOMMAND\\n', command, '\\n' + '-' * 40)\n        try:\n            cmd = subprocess.run(command, shell=True, capture_output=read, text=True, check=True)\n            if read:\n                return cmd.stdout.strip()\n            return True\n        except subprocess.CalledProcessError as e:\n            return False", "docstring": "Helper function that either executes or prints each command.\n\nArgs:\ncommand - a command line command, typically a gcloud command.\nread - if True, the commands output is passed back to the caller.\n\nReturns:\nBool - if command value is not required, simply indicate successor failure.\nString - if read is specified, the command output or error is returned.", "source": "github-repos"}
{"code": "def _parse_vars(self, tokens):\n    key_values = {}\n    for token in tokens:\n        if token.startswith('\n            break\n        else:\n            (k, v) = token.split('=', 1)\n            key = k.strip()\n            key_values[key] = v.strip()\n    return key_values", "docstring": "Given an iterable of tokens, returns variables and their values as a\ndictionary.\n\nFor example:\n['dtap=prod', 'comment=some comment']\nReturns:\n{'dtap': 'prod', 'comment': 'some comment'}", "source": "codesearchnet"}
{"code": "def pot_string_from_file(filename='feff.inp'):\n        \n        with zopen(filename, \"rt\") as f_object:\n            f = f_object.readlines()\n            ln = -1\n            pot_str = [\"POTENTIALS\\n\"]\n            pot_tag = -1\n            pot_data = 0\n            pot_data_over = 1\n\n            sep_line_pattern = [re.compile('ipot.*Z.*tag.*lmax1.*lmax2.*spinph'),\n                                re.compile('^[*]+.*[*]+$')]\n\n            for line in f:\n                if pot_data_over == 1:\n                    ln += 1\n                    if pot_tag == -1:\n                        pot_tag = line.find(\"POTENTIALS\")\n                        ln = 0\n                    if pot_tag >= 0 and ln > 0 and pot_data_over > 0:\n                        try:\n                            if len(sep_line_pattern[0].findall(line)) > 0 or \\\n                                            len(sep_line_pattern[1].findall(line)) > 0:\n                                pot_str.append(line)\n                            elif int(line.split()[0]) == pot_data:\n                                pot_data += 1\n                                pot_str.append(line.replace(\"\\r\", \"\"))\n                        except (ValueError, IndexError):\n                            if pot_data > 0:\n                                pot_data_over = 0\n\n        return ''.join(pot_str).rstrip('\\n')", "docstring": "Reads Potential parameters from a feff.inp or FEFFPOT file.\nThe lines are arranged as follows:\n\nipot   Z   element   lmax1   lmax2   stoichometry   spinph\n\nArgs:\nfilename: file name containing potential data.\n\nReturns:\nFEFFPOT string.", "source": "juraj-google-style"}
{"code": "def patch_toText(self, patches):\n    text = []\n    for patch in patches:\n        text.append(str(patch))\n    return ''.join(text)", "docstring": "Take a list of patches and return a textual representation.\n\nArgs:\npatches: Array of Patch objects.\n\nReturns:\nText representation of patches.", "source": "codesearchnet"}
{"code": "def list_attributes(self, name):\n        \n        result = self.client.service.getListAttributes(name, self.proxy_id)\n        if isinstance(result, list) and len(result) == 1:\n            return result[0]\n        return result", "docstring": "Look up the attributes of a list.\n\nArgs:\nname (str): The name of the list\n\nReturns:\ndict: attributes of the list", "source": "juraj-google-style"}
{"code": "def create_token(self,\n                     token_name,\n                     project_name,\n                     dataset_name,\n                     is_public):\n        \n        url = self.url() + '/nd/resource/dataset/{}'.format(\n            dataset_name) + '/project/{}'.format(project_name) + \\\n            '/token/{}/'.format(token_name)\n\n        json = {\n            \"token_name\": token_name,\n            \"public\": is_public\n        }\n\n        req = self.remote_utils.post_url(url, json=json)\n\n        if req.status_code is not 201:\n            raise RemoteDataUploadError('Cout not upload {}:'.format(req.text))\n        if req.content == \"\" or req.content == b'':\n            return True\n        else:\n            return False", "docstring": "Creates a token with the given parameters.\nArguments:\nproject_name (str): Project name\ndataset_name (str): Dataset name project is based on\ntoken_name (str): Token name\nis_public (int): 1 is public. 0 is not public\nReturns:\nbool: True if project created, false if not created.", "source": "juraj-google-style"}
{"code": "def Scan(self, scan_context, auto_recurse=True, scan_path_spec=None):\n    \n    if not scan_context:\n      raise ValueError('Invalid scan context.')\n\n    scan_context.updated = False\n\n    if scan_path_spec:\n      scan_node = scan_context.GetScanNode(scan_path_spec)\n\n    else:\n      scan_node = scan_context.GetUnscannedScanNode()\n\n    if scan_node:\n      self._ScanNode(scan_context, scan_node, auto_recurse=auto_recurse)", "docstring": "Scans for supported formats.\n\nArgs:\nscan_context (SourceScannerContext): source scanner context.\nauto_recurse (Optional[bool]): True if the scan should automatically\nrecurse as far as possible.\nscan_path_spec (Optional[PathSpec]): path specification to indicate\nwhere the source scanner should continue scanning, where None\nindicates the scanner will start with the sources.\n\nRaises:\nValueError: if the scan context is invalid.", "source": "juraj-google-style"}
{"code": "def update_nanopubstore_start_dt(url: str, start_dt: str):\n    \n\n    hostname = urllib.parse.urlsplit(url)[1]\n\n    start_dates_doc = state_mgmt.get(start_dates_doc_key)\n    if not start_dates_doc:\n        start_dates_doc = {\n            \"_key\": start_dates_doc_key,\n            \"start_dates\": [{\"nanopubstore\": hostname, \"start_dt\": start_dt}],\n        }\n        state_mgmt.insert(start_dates_doc)\n    else:\n        for idx, start_date in enumerate(start_dates_doc[\"start_dates\"]):\n            if start_date[\"nanopubstore\"] == hostname:\n                start_dates_doc[\"start_dates\"][idx][\"start_dt\"] = start_dt\n                break\n        else:\n            start_dates_doc[\"start_dates\"].append(\n                {\"nanopubstore\": hostname, \"start_dt\": start_dt}\n            )\n\n        state_mgmt.replace(start_dates_doc)", "docstring": "Add nanopubstore start_dt to belapi.state_mgmt collection\n\nArgs:\nurl: url of nanopubstore\nstart_dt: datetime of last query against nanopubstore for new ID's", "source": "juraj-google-style"}
{"code": "def __init__(self, seed_fn, desc=None):  \n        \n        if desc is None:\n            desc = u'Query({})'.format(getattr(seed_fn, '__name__', ''))\n\n        self.seed_fn = seed_fn\n        self.transforms = []\n        self.desc_stack = []\n        self.desc = desc", "docstring": "Configure the `Query`.\n\nArgs:\nseed_fn (callable): Callable with no arguments that produces a list of values.\n\nKeyword Args:\ndesc (str): A description of the query, used in log messages.\nIf not provided, defaults to the name of the seed function.\n\nReturns:\nQuery", "source": "juraj-google-style"}
{"code": "def add_triple(self, p, o, auto_refresh=True):\n\n\t\t\n\n\t\tself.rdf.graph.add((self.uri, p, self._handle_object(o)))\n\n\t\t\n\t\tself._handle_triple_refresh(auto_refresh)", "docstring": "add triple by providing p,o, assumes s = subject\n\nArgs:\np (rdflib.term.URIRef): predicate\no (): object\nauto_refresh (bool): whether or not to update object-like self.rdf.triples\n\nReturns:\nNone: adds triple to self.rdf.graph", "source": "juraj-google-style"}
{"code": "def get_associated_resource(self, task):\n    if (not task):\n        raise HPOneViewUnknownType(MSG_INVALID_TASK)\n    if ((task['category'] != 'tasks') and (task['category'] != 'backups')):\n        raise HPOneViewUnknownType(MSG_UNKNOWN_OBJECT_TYPE)\n    if (task['type'] == 'TaskResourceV2'):\n        resource_uri = task['associatedResource']['resourceUri']\n        if (resource_uri and resource_uri.startswith('/rest/appliance/support-dumps/')):\n            return (task, resource_uri)\n    elif (task['type'] == 'BACKUP'):\n        task = self._connection.get(task['taskUri'])\n        resource_uri = task['uri']\n    else:\n        raise HPOneViewInvalidResource((MSG_TASK_TYPE_UNRECONIZED % task['type']))\n    entity = {}\n    if resource_uri:\n        entity = self._connection.get(resource_uri)\n    return (task, entity)", "docstring": "Retrieve a resource associated with a task.\n\nArgs:\ntask: task dict\n\nReturns:\ntuple: task (updated), the entity found (dict)", "source": "codesearchnet"}
{"code": "def plot_vec(axis, step, var):\n    \n    xmesh, ymesh, vec1, vec2 = get_meshes_vec(step, var)\n    dipz = step.geom.nztot \n    if conf.field.shift:\n        vec1 = np.roll(vec1, conf.field.shift, axis=0)\n        vec2 = np.roll(vec2, conf.field.shift, axis=0)\n    if step.geom.spherical or conf.plot.ratio is None:\n        dipx = dipz\n    else:\n        dipx = step.geom.nytot if step.geom.twod_yz else step.geom.nxtot\n        dipx = int(dipx \n    axis.quiver(xmesh[::dipx, ::dipz], ymesh[::dipx, ::dipz],\n                vec1[::dipx, ::dipz], vec2[::dipx, ::dipz],\n                linewidths=1)", "docstring": "Plot vector field.\n\nArgs:\naxis (:class:`matplotlib.axes.Axes`): the axis handler of an\nexisting matplotlib figure where the vector field should\nbe plotted.\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nvar (str): the vector field name.", "source": "juraj-google-style"}
{"code": "def set_session(self, headers=None):\n    if (headers is None):\n        headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'}\n    elif (not isinstance(headers, dict)):\n        raise TypeError('\"headers\" must be a dict object')\n    self.session = Session(self.proxy_pool)\n    self.session.headers.update(headers)", "docstring": "Init session with default or custom headers\n\nArgs:\nheaders: A dict of headers (default None, thus using the default\nheader to init the session)", "source": "codesearchnet"}
{"code": "def StringEscape(self, string, match, **_):\n    \n    precondition.AssertType(string, Text)\n    if match.group(1) in \"'\\\"rnbt\":\n      self.string += compatibility.UnescapeString(string)\n    else:\n      self.string += string", "docstring": "Escape backslashes found inside a string quote.\n\nBackslashes followed by anything other than ['\"rnbt] will just be included\nin the string.\n\nArgs:\nstring: The string that matched.\nmatch: The match object (m.group(1) is the escaped code)", "source": "juraj-google-style"}
{"code": "def _get_bundles_by_type(self, type):\n        \n        bundles = {}\n        bundle_definitions = self.config.get(type)\n        if bundle_definitions is None:\n            return bundles\n        \n        for bundle_name, paths in bundle_definitions.items():\n            bundle_files = []\n            \n            for path in paths:\n                \n                pattern = abspath = os.path.join(self.basedir, path)\n                \n                \n                \n                \n                \n                assetdir = os.path.dirname(abspath)\n                \n                \n                \n                fnames = [os.path.join(assetdir, fname)\n                          for fname in os.listdir(assetdir)]\n                expanded_fnames = fnmatch.filter(fnames, pattern)\n                bundle_files.extend(sorted(expanded_fnames))\n            bundles[bundle_name] = bundle_files\n\n        return bundles", "docstring": "Get a dictionary of bundles for requested type.\n\nArgs:\ntype: 'javascript' or 'css'", "source": "juraj-google-style"}
{"code": "def __init__(self, streaming_buffer, writer_spec=None):\n    \n    self._streaming_buffer = streaming_buffer\n    self._no_dup = False\n    if writer_spec:\n      self._no_dup = writer_spec.get(self._NO_DUPLICATE, False)\n    if self._no_dup:\n      \n      \n      \n      self._seg_index = int(streaming_buffer.name.rsplit(\"-\", 1)[1])\n      \n      \n      \n      \n      self._seg_valid_length = 0", "docstring": "Initialize a GoogleCloudStorageOutputWriter instance.\n\nArgs:\nstreaming_buffer: an instance of writable buffer from cloudstorage_api.\n\nwriter_spec: the specification for the writer.", "source": "juraj-google-style"}
{"code": "def __init__(self, specification_store, signature_identifiers):\n    \n    super(SignaturesFileEntryFilter, self).__init__()\n    self._file_scanner = None\n    self._signature_identifiers = []\n\n    self._file_scanner = self._GetScanner(\n        specification_store, signature_identifiers)", "docstring": "Initializes a signature-based file entry filter.\n\nArgs:\nspecification_store (FormatSpecificationStore): a specification store.\nsignature_identifiers (list[str]): signature identifiers.", "source": "juraj-google-style"}
{"code": "def calculate_sun_from_hoy(self, hoy, is_solar_time=False):\n        \n        datetime = DateTime.from_hoy(hoy, self.is_leap_year)\n        return self.calculate_sun_from_date_time(datetime, is_solar_time)", "docstring": "Get Sun data for an hour of the year.\n\nArgs:\ndatetime: Ladybug datetime\nis_solar_time: A boolean to indicate if the input hour is solar time\n(Default: False).\n\nReturns:\nA sun object for this particular time", "source": "juraj-google-style"}
{"code": "def preemphasis(signal, shift=1, cof=0.98):\n    \n\n    rolled_signal = np.roll(signal, shift)\n    return signal - cof * rolled_signal", "docstring": "preemphasising on the signal.\n\nArgs:\nsignal (array): The input signal.\nshift (int): The shift step.\ncof (float): The preemphasising coefficient. 0 equals to no filtering.\n\nReturns:\narray: The pre-emphasized signal.", "source": "juraj-google-style"}
{"code": "def __init__(self, operator, left, right):\n        \n        super(BinaryComposition, self).__init__(operator, left, right)\n        self.operator = operator\n        self.left = left\n        self.right = right\n        self.validate()", "docstring": "Construct an expression that connects two expressions with an operator.\n\nArgs:\noperator: unicode, specifying where the field was declared\nleft: Expression on the left side of the binary operator\nright: Expression on the right side of the binary operator\n\nReturns:\nnew BinaryComposition object", "source": "juraj-google-style"}
{"code": "async def update(self, service_id: str, version: str, *, image: str=None, rollback: bool=False) -> bool:\n    if ((image is None) and (rollback is False)):\n        raise ValueError('You need to specify an image.')\n    inspect_service = (await self.inspect(service_id))\n    spec = inspect_service['Spec']\n    if (image is not None):\n        spec['TaskTemplate']['ContainerSpec']['Image'] = image\n    params = {'version': version}\n    if (rollback is True):\n        params['rollback'] = 'previous'\n    data = json.dumps(clean_map(spec))\n    (await self.docker._query_json('services/{service_id}/update'.format(service_id=service_id), method='POST', data=data, params=params))\n    return True", "docstring": "Update a service.\nIf rollback is True image will be ignored.\n\nArgs:\nservice_id: ID or name of the service.\nversion: Version of the service that you want to update.\nrollback: Rollback the service to the previous service spec.\n\nReturns:\nTrue if successful.", "source": "codesearchnet"}
{"code": "def _generate_G_points(self, kpoint):\n        \n        gpoints = []\n        for i in range(2 * self._nbmax[2] + 1):\n            i3 = i - 2 * self._nbmax[2] - 1 if i > self._nbmax[2] else i\n            for j in range(2 * self._nbmax[1] + 1):\n                j2 = j - 2 * self._nbmax[1] - 1 if j > self._nbmax[1] else j\n                for k in range(2 * self._nbmax[0] + 1):\n                    k1 = k - 2 * self._nbmax[0] - 1 if k > self._nbmax[0] else k\n                    G = np.array([k1, j2, i3])\n                    v = kpoint + G\n                    g = np.linalg.norm(np.dot(v, self.b))\n                    E = g ** 2 / self._C\n                    if E < self.encut:\n                        gpoints.append(G)\n        return np.array(gpoints, dtype=np.float64)", "docstring": "Helper function to generate G-points based on nbmax.\n\nThis function iterates over possible G-point values and determines\nif the energy is less than G_{cut}. Valid values are appended to\nthe output array. This function should not be called outside of\ninitialization.\n\nArgs:\nkpoint (np.array): the array containing the current k-point value\n\nReturns:\na list containing valid G-points", "source": "juraj-google-style"}
{"code": "def Normalize(self, fraction=1.0):\n        \n        if self.log:\n            raise ValueError(\"Pmf is under a log transform\")\n\n        total = self.Total()\n        if total == 0.0:\n            raise ValueError('total probability is zero.')\n            logging.warning('Normalize: total probability is zero.')\n            return total\n\n        factor = float(fraction) / total\n        for x in self.d:\n            self.d[x] *= factor\n\n        return total", "docstring": "Normalizes this PMF so the sum of all probs is fraction.\n\nArgs:\nfraction: what the total should be after normalization\n\nReturns: the total probability before normalizing", "source": "juraj-google-style"}
{"code": "def make_pixel_mask(image: 'torch.Tensor', output_size: Tuple[int, int]) -> 'torch.Tensor':\n    input_height, input_width = image.shape[-2:]\n    batch_size = image.size(0)\n    mask = torch.zeros((batch_size, *output_size), dtype=torch.long)\n    mask[:input_height, :input_width] = 1\n    return mask", "docstring": "Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.\n\nArgs:\nimage (`np.ndarray`):\nImage to make the pixel mask for.\noutput_size (`Tuple[int, int]`):\nOutput size of the mask.", "source": "github-repos"}
{"code": "def get_audience(self, audience_id):\n    \n\n    audience = self.audience_id_map.get(audience_id)\n    if audience:\n      return audience\n\n    self.logger.error('Audience ID \"%s\" is not in datafile.' % audience_id)\n    self.error_handler.handle_error(exceptions.InvalidAudienceException((enums.Errors.INVALID_AUDIENCE_ERROR)))", "docstring": "Get audience object for the provided audience ID.\n\nArgs:\naudience_id: ID of the audience.\n\nReturns:\nDict representing the audience.", "source": "juraj-google-style"}
{"code": "def get(self, branch='master', filename=''):\n        \n        file_contents = ''\n\n        if self.runway_dir:\n            file_contents = self.local_file(filename=filename)\n        else:\n            file_contents = self.remote_file(branch=branch, filename=filename)\n\n        return file_contents", "docstring": "Retrieve _filename_ from GitLab.\n\nArgs:\nbranch (str): Git Branch to find file.\nfilename (str): Name of file to retrieve relative to root of Git\nrepository, or _runway_dir_ if specified.\n\nReturns:\nstr: Contents of file.", "source": "juraj-google-style"}
{"code": "def load_strain(self, strain_id, strain_genome_file):\n    strain_gp = GEMPRO(gem_name=strain_id, genome_path=strain_genome_file, write_protein_fasta_files=False)\n    self.strains.append(strain_gp)\n    return self.strains.get_by_id(strain_id)", "docstring": "Load a strain as a new GEM-PRO by its ID and associated genome file. Stored in the ``strains`` attribute.\n\nArgs:\nstrain_id (str): Strain ID\nstrain_genome_file (str): Path to strain genome file", "source": "codesearchnet"}
{"code": "def SignFile(self, in_filename, out_filename=None):\n    \n    if out_filename is None:\n      out_filename = \"%s.signed\" % in_filename\n\n    args = [\n        \"-certs\", self.cert, \"-key\", self.key, \"-n\", self.application, \"-t\",\n        \"http:\n        \"-in\", in_filename, \"-out\", out_filename\n    ]\n\n    try:\n      output_log = io.StringIO()\n      ossl = pexpect.spawn(\"osslsigncode\", args)\n      \n      ossl.logfile_read = output_log\n      ossl.expect(\"Enter PEM pass phrase\")\n      ossl.sendline(self.password)\n      ossl.wait()\n    except pexpect.ExceptionPexpect:\n      output_log.seek(0)\n      logging.exception(output_log.read())\n      raise\n\n    if not os.path.exists(out_filename):\n      raise SigningError(\"Expected output %s not created\" % out_filename)\n\n    try:\n      subprocess.check_call([\"osslsigncode\", \"verify\", \"-in\", out_filename])\n    except subprocess.CalledProcessError:\n      logging.exception(\"Bad signature verification on %s\", out_filename)\n      raise SigningError(\"Bad signature verification on %s\" % out_filename)\n\n    return out_filename", "docstring": "Sign a file using osslsigncode.\n\nArgs:\nin_filename: file to read from\nout_filename: file to output to, if none we output to the same filename as\nthe input with a .signed suffix.\n\nReturns:\noutput filename string\nRaises:\npexpect.ExceptionPexpect: if the expect invocation of osslsigncode fails.\nSigningError: for signing failures.", "source": "juraj-google-style"}
{"code": "def save_checkpoint(model, filename, optimizer=None, meta=None):\n    if (meta is None):\n        meta = {}\n    elif (not isinstance(meta, dict)):\n        raise TypeError('meta must be a dict or None, but got {}'.format(type(meta)))\n    meta.update(mmcv_version=mmcv.__version__, time=time.asctime())\n    mmcv.mkdir_or_exist(osp.dirname(filename))\n    if hasattr(model, 'module'):\n        model = model.module\n    checkpoint = {'meta': meta, 'state_dict': weights_to_cpu(model.state_dict())}\n    if (optimizer is not None):\n        checkpoint['optimizer'] = optimizer.state_dict()\n    torch.save(checkpoint, filename)", "docstring": "Save checkpoint to file.\n\nThe checkpoint will have 3 fields: ``meta``, ``state_dict`` and\n``optimizer``. By default ``meta`` will contain version and time info.\n\nArgs:\nmodel (Module): Module whose params are to be saved.\nfilename (str): Checkpoint filename.\noptimizer (:obj:`Optimizer`, optional): Optimizer to be saved.\nmeta (dict, optional): Metadata to be saved in checkpoint.", "source": "codesearchnet"}
{"code": "def _check_sensor_platform_consistency(self, sensor):\n    ref_sensor = SENSORS.get(self.platform, None)\n    if (ref_sensor and (not (sensor == ref_sensor))):\n        logger.error('Sensor-Platform mismatch: {} is not a payload of {}. Did you choose the correct reader?'.format(sensor, self.platform))", "docstring": "Make sure sensor and platform are consistent\n\nArgs:\nsensor (str) : Sensor name from YAML dataset definition\n\nRaises:\nValueError if they don't match", "source": "codesearchnet"}
{"code": "def read_config(config_filepath, logger=logging.getLogger('ProsperCommon')):\n    config_parser = configparser.ConfigParser(interpolation=ExtendedInterpolation(), allow_no_value=True, delimiters='=', inline_comment_prefixes='\n    logger.debug('config_filepath=%s', config_filepath)\n    with open(config_filepath, 'r') as filehandle:\n        config_parser.read_file(filehandle)\n    return config_parser", "docstring": "fetch and parse config file\n\nArgs:\nconfig_filepath (str): path to config file.  abspath > relpath\nlogger (:obj:`logging.Logger`): logger to catch error msgs", "source": "codesearchnet"}
{"code": "def Check(self, error, filename, linenum):\n    if Match('T(EST|est)', self.current_function):\n        base_trigger = self._TEST_TRIGGER\n    else:\n        base_trigger = self._NORMAL_TRIGGER\n    trigger = (base_trigger * (2 ** _VerboseLevel()))\n    if (self.lines_in_function > trigger):\n        error_level = int(math.log((self.lines_in_function / base_trigger), 2))\n        if (error_level > 5):\n            error_level = 5\n        error(filename, linenum, 'readability/fn_size', error_level, ('Small and focused functions are preferred: %s has %d non-comment lines (error triggered by exceeding %d lines).' % (self.current_function, self.lines_in_function, trigger)))", "docstring": "Report if too many lines in function body.\n\nArgs:\nerror: The function to call with any errors found.\nfilename: The name of the current file.\nlinenum: The number of the line to check.", "source": "codesearchnet"}
{"code": "def instance_default(self, obj):\n        \n        return self.property.themed_default(obj.__class__, self.name, obj.themed_values())", "docstring": "Get the default value that will be used for a specific instance.\n\nArgs:\nobj (HasProps) : The instance to get the default value for.\n\nReturns:\nobject", "source": "juraj-google-style"}
{"code": "def exists(self, path: str) -> bool:\n    raise NotImplementedError", "docstring": "Check if the provided path exists on the FileSystem.\n\nArgs:\npath: string path that needs to be checked.\n\nReturns: boolean flag indicating if path exists", "source": "github-repos"}
{"code": "def _eligible_features_from_example_handler(self, request):\n    features_list = inference_utils.get_eligible_features(self.examples[0:NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS)\n    return http_util.Respond(request, features_list, 'application/json')", "docstring": "Returns a list of JSON objects for each feature in the example.\n\nArgs:\nrequest: A request for features.\n\nReturns:\nA list with a JSON object for each feature.\nNumeric features are represented as {name: observedMin: observedMax:}.\nCategorical features are repesented as {name: samples:[]}.", "source": "codesearchnet"}
{"code": "def plot_stacked_hist(self, key=\"wall_time\", nmax=5, ax=None, **kwargs):\n        \n        ax, fig, plt = get_ax_fig_plt(ax=ax)\n\n        mpi_rank = \"0\"\n        timers = self.timers(mpi_rank=mpi_rank)\n        n = len(timers)\n\n        names, values = [], []\n        rest = np.zeros(n)\n\n        for idx, sname in enumerate(self.section_names(ordkey=key)):\n            sections = self.get_sections(sname)\n            svals = np.asarray([s.__dict__[key] for s in sections])\n            if idx < nmax:\n                names.append(sname)\n                values.append(svals)\n            else:\n                rest += svals\n\n        names.append(\"others (nmax=%d)\" % nmax)\n        values.append(rest)\n\n        \n        ind = np.arange(n) \n        width = 0.35       \n        colors = nmax * ['r', 'g', 'b', 'c', 'k', 'y', 'm']\n\n        bars = []\n        bottom = np.zeros(n)\n        for idx, vals in enumerate(values):\n            color = colors[idx]\n            bar = ax.bar(ind, vals, width, color=color, bottom=bottom)\n            bars.append(bar)\n            bottom += vals\n\n        ax.set_ylabel(key)\n        ax.set_title(\"Stacked histogram with the %d most important sections\" % nmax)\n\n        ticks = ind + width / 2.0\n        labels = [\"MPI=%d, OMP=%d\" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]\n        ax.set_xticks(ticks)\n        ax.set_xticklabels(labels, rotation=15)\n\n        \n        ax.legend([bar[0] for bar in bars], names, loc=\"best\")\n\n        return fig", "docstring": "Plot stacked histogram of the different timers.\n\nArgs:\nkey: Keyword used to extract data from the timers. Only the first `nmax`\nsections with largest value are show.\nmmax: Maximum nuber of sections to show. Other entries are grouped together\nin the `others` section.\nax: matplotlib :class:`Axes` or None if a new figure should be created.\n\nReturns:\n`matplotlib` figure", "source": "juraj-google-style"}
{"code": "def array(self):\n    url = '{}/{}'.format(__endpoint__, self.type.RESOURCE)\n    return RestClient.get(url, self.params)[self.type.RESOURCE]", "docstring": "Get all resources and return the result as an array\n\nReturns:\narray of str: Array of resources", "source": "codesearchnet"}
{"code": "def createCategoryFilter(self, retina_name, filter_name, body, ):\n        \n\n        resourcePath = '/classify/create_category_filter'\n        method = 'POST'\n\n        queryParams = {}\n        headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}\n        postData = None\n\n        queryParams['retina_name'] = retina_name\n        queryParams['filter_name'] = filter_name\n        postData = body\n        response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)\n            \n        return category_filter.CategoryFilter(**response.json())", "docstring": "get filter for classifier\nArgs:\nfilter_name, str: A unique name for the filter. (required)\nbody, FilterTrainingObject: The list of positive and negative (optional) example items. (required)\nretina_name, str: The retina name (required)\nReturns: CategoryFilter", "source": "juraj-google-style"}
{"code": "class AlignVisionBlock(nn.Module):\n\n    def __init__(self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int, expand_ratio: int, kernel_size: int, drop_rate: float, id_skip: bool, adjust_padding: bool):\n        super().__init__()\n        self.expand_ratio = expand_ratio\n        self.expand = True if self.expand_ratio != 1 else False\n        expand_in_dim = in_dim * expand_ratio\n        if self.expand:\n            self.expansion = AlignVisionExpansionLayer(config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride)\n        self.depthwise_conv = AlignVisionDepthwiseLayer(config=config, in_dim=expand_in_dim if self.expand else in_dim, stride=stride, kernel_size=kernel_size, adjust_padding=adjust_padding)\n        self.squeeze_excite = AlignVisionSqueezeExciteLayer(config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand)\n        self.projection = AlignVisionFinalBlockLayer(config=config, in_dim=expand_in_dim if self.expand else in_dim, out_dim=out_dim, stride=stride, drop_rate=drop_rate, id_skip=id_skip)\n\n    def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:\n        embeddings = hidden_states\n        if self.expand_ratio != 1:\n            hidden_states = self.expansion(hidden_states)\n        hidden_states = self.depthwise_conv(hidden_states)\n        hidden_states = self.squeeze_excite(hidden_states)\n        hidden_states = self.projection(embeddings, hidden_states)\n        return hidden_states", "docstring": "This corresponds to the block module of original the EfficientNet vision encoder implementation.\n\nArgs:\nconfig ([`AlignVisionConfig`]):\nModel configuration class.\nin_dim (`int`):\nNumber of input channels.\nout_dim (`int`):\nNumber of output channels.\nstride (`int`):\nStride size to be used in convolution layers.\nexpand_ratio (`int`):\nExpand ratio to set the output dimensions for the expansion and squeeze-excite layers.\nkernel_size (`int`):\nKernel size for the depthwise convolution layer.\ndrop_rate (`float`):\nDropout rate to be used in the final phase of each block.\nid_skip (`bool`):\nWhether to apply dropout and sum the final hidden states with the input embeddings during the final phase\nof each block. Set to `True` for the first block of each stage.\nadjust_padding (`bool`):\nWhether to apply padding to only right and bottom side of the input kernel before the depthwise convolution\noperation, set to `True` for inputs with odd input sizes.", "source": "github-repos"}
{"code": "def trace_buffer_capacity(self):\n    cmd = enums.JLinkTraceCommand.GET_CONF_CAPACITY\n    data = ctypes.c_uint32(0)\n    res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n    if (res == 1):\n        raise errors.JLinkException('Failed to get trace buffer size.')\n    return data.value", "docstring": "Retrieves the trace buffer's current capacity.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\nThe current capacity of the trace buffer.  This is not necessarily\nthe maximum possible size the buffer could be configured with.", "source": "codesearchnet"}
{"code": "def filter(self, filter_fn=None, desc=None, **kwargs):\n    if ((filter_fn is not None) and kwargs):\n        raise TypeError('Must supply either a filter_fn or attribute filter parameters to filter(), but not both.')\n    if ((filter_fn is None) and (not kwargs)):\n        raise TypeError('Must supply one of filter_fn or one or more attribute filter parameters to filter().')\n    if (desc is None):\n        if (filter_fn is not None):\n            desc = getattr(filter_fn, '__name__', '')\n        elif kwargs:\n            desc = u', '.join([u'{}={!r}'.format(key, value) for (key, value) in kwargs.items()])\n    desc = u'filter({})'.format(desc)\n    if kwargs:\n\n        def filter_fn(elem):\n            return all(((getattr(elem, filter_key) == filter_value) for (filter_key, filter_value) in kwargs.items()))\n    return self.transform((lambda xs: (x for x in xs if filter_fn(x))), desc=desc)", "docstring": "Return a copy of this query, with some values removed.\n\nExample usages:\n\n.. code:: python\n\n# Returns a query that matches even numbers\nq.filter(filter_fn=lambda x: x % 2)\n\n# Returns a query that matches elements with el.description == \"foo\"\nq.filter(description=\"foo\")\n\nKeyword Args:\nfilter_fn (callable): If specified, a function that accepts one argument (the element)\nand returns a boolean indicating whether to include that element in the results.\n\nkwargs: Specify attribute values that an element must have to be included in the results.\n\ndesc (str): A description of the filter, for use in log messages.\nDefaults to the name of the filter function or attribute.\n\nRaises:\nTypeError: neither or both of `filter_fn` and `kwargs` are provided.", "source": "codesearchnet"}
{"code": "def format_ascii(sensor_graph):\n    cmdfile = CommandFile('Sensor Graph', '1.0')\n    cmdfile.add('set_online', False)\n    cmdfile.add('clear')\n    cmdfile.add('reset')\n    for node in sensor_graph.dump_nodes():\n        cmdfile.add('add_node', node)\n    for streamer in sensor_graph.streamers:\n        other = 255\n        if (streamer.with_other is not None):\n            other = streamer.with_other\n        args = [streamer.selector, streamer.dest, streamer.automatic, streamer.format, streamer.report_type, other]\n        cmdfile.add('add_streamer', *args)\n    for (stream, value) in sorted(sensor_graph.constant_database.items(), key=(lambda x: x[0].encode())):\n        cmdfile.add('push_reading', stream, value)\n    cmdfile.add('persist')\n    cmdfile.add('set_online', True)\n    return cmdfile.dump()", "docstring": "Format this sensor graph as a loadable ascii file format.\n\nThis includes commands to reset and clear previously stored\nsensor graphs.\n\nNB. This format does not include any required configuration\nvariables that were specified in this sensor graph, so you\nshould also output tha information separately in, e.g.\nthe config format.\n\nArgs:\nsensor_graph (SensorGraph): the sensor graph that we want to format\n\nReturns:\nstr: The ascii output lines concatenated as a single string", "source": "codesearchnet"}
{"code": "def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, data_type, use_gpu, grouped_conv=False, data_format='NHWC', dilations=None, tolerance=None):\n    input_size = 1\n    filter_size = 1\n    for s in tensor_in_sizes:\n        input_size *= s\n    for s in filter_in_sizes:\n        filter_size *= s\n    x1 = [f * 1.0 / input_size for f in range(1, input_size + 1)]\n    x1 = np.array(x1).reshape(tensor_in_sizes)\n    x2 = [f * 1.0 / filter_size for f in range(1, filter_size + 1)]\n    x2 = np.array(x2).reshape(filter_in_sizes)\n    strides = [1, stride, stride, 1]\n    if isinstance(padding, list):\n        padding = [(0, 0)] + padding + [(0, 0)]\n    np_result = _DepthwiseConv2dNumpy(x1, x2, strides, padding, 'NHWC', dilations)\n    ops.reset_default_graph()\n    graph = ops.get_default_graph()\n    with self.session(graph=graph, use_gpu=use_gpu) as sess:\n        tolerance = tolerance or {dtypes.float16: 0.04, dtypes.float32: 1e-05, dtypes.float64: 1e-12, dtypes.bfloat16: 0.01}[data_type]\n        t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=data_type)\n        t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=data_type)\n        if data_format == 'NCHW':\n            t1 = array_ops.transpose(t1, [0, 3, 1, 2])\n            strides = [1, 1, stride, stride]\n            if isinstance(padding, list):\n                padding = [padding[0], padding[3], padding[1], padding[2]]\n        if dilations is None:\n            with sess.graph._kernel_label_map({'DepthwiseConv2dNative': 'cudnn_grouped_convolution'} if grouped_conv else {}):\n                conv_native = nn_ops.depthwise_conv2d_native(t1, t2, strides=strides, data_format=data_format, padding=padding)\n            if data_format == 'NCHW':\n                conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])\n            try:\n                native_result = self.evaluate(conv_native)\n            except errors.InvalidArgumentError as e:\n                if \"No OpKernel was registered to support Op 'DepthwiseConv2dNative'\" in e.message:\n                    tf_logging.warn('Skipping grouped convolution test')\n                    return\n                raise e\n        conv_interface = nn_impl.depthwise_conv2d(t1, t2, strides=strides, padding=padding, data_format=data_format, dilations=dilations)\n        if data_format == 'NCHW':\n            conv_interface = array_ops.transpose(conv_interface, [0, 2, 3, 1])\n        interface_result = self.evaluate(conv_interface)\n    if dilations is None:\n        self.assertAllClose(native_result, np_result, atol=tolerance, rtol=tolerance)\n    self.assertAllClose(interface_result, np_result, atol=tolerance, rtol=tolerance)", "docstring": "Verifies the output values of the convolution function.\n\nArgs:\ntensor_in_sizes: Input tensor dimensions in [batch, input_rows,\ninput_cols, input_depth].\nfilter_in_sizes: Filter tensor dimensions in [filter_rows, filter_cols,\ninput_depth, depth_multiplier].\nstride: Stride.\npadding: Padding type.\ndata_type: The data type to use.\nuse_gpu: Whether to use GPU.\ngrouped_conv: Whether to use cuDNN 7's grouped convolution.\ndata_format: The data_format of the input. \"NHWC\" or \"NCHW\".\ndilations: A list of 2 elements, representing the dilations.\ntolerance: The absolute and relative tolarance when verifying the output.", "source": "github-repos"}
{"code": "def install_event_handlers(self, categories=None, handlers=None):\n        \n        if categories is not None and handlers is not None:\n            raise ValueError(\"categories and handlers are mutually exclusive!\")\n\n        from .events import get_event_handler_classes\n        if categories:\n            raise NotImplementedError()\n            handlers = [cls() for cls in get_event_handler_classes(categories=categories)]\n        else:\n            handlers = handlers or [cls() for cls in get_event_handler_classes()]\n\n        self._event_handlers = handlers", "docstring": "Install the `EventHandlers for this `Node`. If no argument is provided\nthe default list of handlers is installed.\n\nArgs:\ncategories: List of categories to install e.g. base + can_change_physics\nhandlers: explicit list of :class:`EventHandler` instances.\nThis is the most flexible way to install handlers.\n\n.. note::\n\ncategories and handlers are mutually exclusive.", "source": "juraj-google-style"}
{"code": "def remove_bond(self, idx1, idx2):\n    for obbond in ob.OBMolBondIter(self._obmol):\n        if (((obbond.GetBeginAtomIdx() == idx1) and (obbond.GetEndAtomIdx() == idx2)) or ((obbond.GetBeginAtomIdx() == idx2) and (obbond.GetEndAtomIdx() == idx1))):\n            self._obmol.DeleteBond(obbond)", "docstring": "Remove a bond from an openbabel molecule\n\nArgs:\nidx1: The atom index of one of the atoms participating the in bond\nidx2: The atom index of the other atom participating in the bond", "source": "codesearchnet"}
{"code": "def convert_videos_to_summaries(input_videos, output_videos, target_videos,\n                                tag, decode_hparams,\n                                display_ground_truth=False):\n  \n  fps = decode_hparams.frames_per_second\n  border_percent = decode_hparams.border_percent\n  max_outputs = decode_hparams.max_display_outputs\n  target_steps = target_videos.shape[1]\n  all_summaries = []\n  input_videos = create_border(\n      input_videos, color=\"blue\", border_percent=border_percent)\n  target_videos = create_border(\n      target_videos, color=\"red\", border_percent=border_percent)\n  output_videos = create_border(\n      output_videos, color=\"red\", border_percent=border_percent)\n\n  all_input = np.concatenate((input_videos, target_videos), axis=1)\n  all_output = np.concatenate((input_videos, output_videos), axis=1)\n  output_summ_vals, _ = common_video.py_gif_summary(\n      \"%s/output\" % tag, all_output, max_outputs=max_outputs, fps=fps,\n      return_summary_value=True)\n  all_summaries.extend(output_summ_vals)\n\n  \n  if display_ground_truth:\n    input_summ_vals, _ = common_video.py_gif_summary(\n        \"%s/input\" % tag, all_input, max_outputs=max_outputs, fps=fps,\n        return_summary_value=True)\n    all_summaries.extend(input_summ_vals)\n\n  \n  iterable = zip(output_videos[:max_outputs, :target_steps],\n                 target_videos[:max_outputs])\n  for ind, (input_video, output_video) in enumerate(iterable):\n    t, h, w, c = input_video.shape\n    \n    input_frames = np.reshape(input_video, (t*h, w, c))\n    output_frames = np.reshape(output_video, (t*h, w, c))\n\n    \n    all_frames = np.concatenate((input_frames, output_frames), axis=1)\n    tag = \"input/output/%s_sample_%d\" % (tag, ind)\n    frame_by_frame_summ = image_utils.image_to_tf_summary_value(\n        all_frames, tag=tag)\n    all_summaries.append(frame_by_frame_summ)\n  return all_summaries", "docstring": "Converts input, output and target videos into video summaries.\n\nArgs:\ninput_videos: 5-D NumPy array, (NTHWC) conditioning frames.\noutput_videos: 5-D NumPy array, (NTHWC) model predictions.\ntarget_videos: 5-D NumPy array, (NTHWC) target frames.\ntag: tf summary tag.\ndecode_hparams: HParams.\ndisplay_ground_truth: Whether or not to display ground truth videos.\nReturns:\nsummaries: a list of tf frame-by-frame and video summaries.", "source": "juraj-google-style"}
{"code": "def generate_json_schema(cls, schema, context=DEFAULT_DICT):\n    schema = cls._get_schema(schema)\n    return cls(context=context).dump(schema).data", "docstring": "Generate a JSON Schema from a Marshmallow schema.\n\nArgs:\nschema (marshmallow.Schema|str): The Marshmallow schema, or the\nPython path to one, to create the JSON schema for.\n\nKeyword Args:\nfile_pointer (file, optional): The path or pointer to the file\nto write this schema to. If not provided, the schema will be\ndumped to ``sys.stdout``.\n\nReturns:\ndict: The JSON schema in dictionary form.", "source": "codesearchnet"}
{"code": "def _clean_url(url):\n    if (url == 'default'):\n        url = DEFAULT_SERVER_HTTP_URL\n    if url.startswith('ws'):\n        raise ValueError('url should be the http or https URL for the server, not the websocket URL')\n    return url.rstrip('/')", "docstring": "Produce a canonical Bokeh server URL.\n\nArgs:\nurl (str)\nA URL to clean, or \"defatul\". If \"default\" then the\n``BOKEH_SERVER_HTTP_URL`` will be returned.\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def encode(self, tf_graph_predictions):\n    \n    row = []\n    for col in self._header:\n      row.append(str(tf_graph_predictions[col]))\n\n    return ','.join(row)", "docstring": "Encodes the graph json prediction into csv.\n\nArgs:\ntf_graph_predictions: python dict.\n\nReturns:\ncsv string.", "source": "juraj-google-style"}
{"code": "def lookup_value(self, api_name, key):\n        \n        if api_name in self._cache:\n            return self._cache[api_name].get(key, None)\n        return None", "docstring": "Add the value of an API call to the cache.\n\nArgs:\napi_name: a string name of the API. Keys and values are segmented by api_name.\nkey: a string key for the specific call.", "source": "juraj-google-style"}
{"code": "def make_triple(sub, pred, obj):\n    \n    return \"{s} {p} {o} .\".format(s=sub, p=pred, o=obj)", "docstring": "Takes a subject predicate and object and joins them with a space\nin between\n\nArgs:\nsub -- Subject\npred -- Predicate\nobj  -- Object\nReturns\nstr", "source": "juraj-google-style"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    cls = [self.cls_token_id]\n    sep = [self.sep_token_id]\n    return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A ConvBERT sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def to_sql(self, view: views.View, limit: Optional[int]=None) -> str:\n    encoder = _spark_interpreter.SparkSqlInterpreter(value_set_codes_table='VALUESET_VIEW')\n    dataset = f'{self._fhir_dataset}'\n    sql_generator = runner_utils.RunnerSqlGenerator(view, encoder, dataset, self._snake_case_resource_tables)\n    sql_statement = sql_generator.build_sql_statement()\n    valuesets_clause = sql_generator.build_valueset_expression(self._value_set_codes_table)\n    if limit is not None and limit < 1:\n        raise ValueError('Query limits must be positive integers.')\n    limit_clause = '' if limit is None else f' LIMIT {limit}'\n    return f'{valuesets_clause}{sql_statement}{limit_clause}'", "docstring": "Returns the SQL used to run the given view in Spark.\n\nArgs:\nview: the view used to generate the SQL.\nlimit: optional limit to attach to the generated SQL.\n\nReturns:\nThe SQL used to run the given view.", "source": "github-repos"}
{"code": "def copy_workspace(self, uri, new_name):\n    payload = {'isPublic': True, 'newName': new_name}\n    return self._api.request('post', (((('/api/documents/' + uri['did']) + '/workspaces/') + uri['wvm']) + '/copy'), body=payload)", "docstring": "Copy the current workspace.\n\nArgs:\n- uri (dict): the uri of the workspace being copied. Needs to have a did and wid key.\n- new_name (str): the new name of the copied workspace.\n\nReturns:\n- requests.Response: Onshape response data", "source": "codesearchnet"}
{"code": "def compile_keywords(keywords):\n    mdt = []\n    cz_keywords = []\n    en_keywords = []\n    for keyword in keywords:\n        keyword = keyword_to_info(keyword.encode('utf-8'))\n        if (not keyword):\n            continue\n        cz_keywords.append({'uid': keyword['uid'], 'zahlavi': keyword['zahlavi'], 'zdroj': 'czenas'})\n        if keyword.get('mdt'):\n            mdt.append({'mdt': keyword['mdt'], 'mrf': keyword['mrf']})\n        angl_ekvivalent = keyword.get('angl_ekvivalent')\n        if angl_ekvivalent:\n            en_keywords.append({'zahlavi': angl_ekvivalent, 'zdroj': (keyword.get('zdroj_angl_ekvivalentu') or 'eczenas')})\n    return (mdt, cz_keywords, en_keywords)", "docstring": "Translate `keywords` to full keyword records as they are used in Aleph.\n\nReturns tuple with three lists, each of which is later used in different\npart of the MRC/MARC record.\n\nArgs:\nkeywords (list): List of keyword strings.\n\nReturns:\ntuple: (mdt_list, cz_keyword_list, en_keyword_list)", "source": "codesearchnet"}
{"code": "def GetVShadowStoreByPathSpec(self, path_spec):\n    store_index = vshadow.VShadowPathSpecGetStoreIndex(path_spec)\n    if (store_index is None):\n        return None\n    return self._vshadow_volume.get_store(store_index)", "docstring": "Retrieves a VSS store for a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\npyvshadow.store: a VSS store or None if not available.", "source": "codesearchnet"}
{"code": "def add_string_parameters(self, string):\n    if isinstance(string, list):\n        for x in string:\n            self.add_string_parameters(x)\n        return\n    self._parameters.append((('{ \"value\": \"' + string) + '\" }'))", "docstring": "Add given string parameters to the internal list.\n\nArgs:\nstring (list of str or str): A string or list of strings to add to the parameters.", "source": "codesearchnet"}
{"code": "def file_digest(source):\n    hash_sha256 = hashlib.sha256()\n    should_close = False\n    if isinstance(source, six.string_types):\n        should_close = True\n        source = open(source, 'rb')\n    for chunk in iter((lambda : source.read(_BUFFER_SIZE)), b''):\n        hash_sha256.update(chunk)\n    if should_close:\n        source.close()\n    return hash_sha256.hexdigest()", "docstring": "Calculates SHA256 digest of a file.\n\nArgs:\nsource: either a file-like object or a path to file", "source": "codesearchnet"}
{"code": "def _new_open_bin(self, remaining_rect):\n        \n        factories_to_delete = set() \n        new_bin = None\n\n        for key, binfac in self._empty_bins.items():\n\n            \n            \n            a_rectangle_fits = False\n            for _, rect in remaining_rect.items():\n                if binfac.fits_inside(rect[0], rect[1]):\n                    a_rectangle_fits = True\n                    break\n\n            if not a_rectangle_fits:\n                factories_to_delete.add(key)\n                continue\n           \n            \n            new_bin = binfac.new_bin()\n            if new_bin is None:\n                continue\n            self._open_bins.append(new_bin)\n\n            \n            if binfac.is_empty():\n                factories_to_delete.add(key)\n       \n            break\n\n        \n        for f in factories_to_delete:\n            del self._empty_bins[f]\n\n        return new_bin", "docstring": "Extract the next bin where at least one of the rectangles in\nrem\n\nArguments:\nremaining_rect (dict): rectangles not placed yet\n\nReturns:\nPackingAlgorithm: Initialized empty packing bin.\nNone: No bin big enough for the rectangle was found", "source": "juraj-google-style"}
{"code": "def find_element(driver, elem_path, by=CSS, timeout=TIMEOUT, poll_frequency=0.5):\n    wait = WebDriverWait(driver, timeout, poll_frequency)\n    return wait.until(EC.presence_of_element_located((by, elem_path)))", "docstring": "Find and return an element once located\n\nfind_element locates an element on the page, waiting\nfor up to timeout seconds. The element, when located,\nis returned. If not located, a TimeoutException is raised.\n\nArgs:\ndriver (selenium webdriver or element): A driver or element\nelem_path (str): String used to located the element\nby (selenium By): Selenium By reference\ntimeout (int): Selenium Wait timeout, in seconds\npoll_frequency (float): Selenium Wait polling frequency, in seconds\n\nReturns:\nelement: Selenium element\n\nRaises:\nTimeoutException: Raised when target element isn't located", "source": "codesearchnet"}
{"code": "def create_issues_report(self, timeout=(- 1)):\n    uri = '{}/issues/'.format(self.data['uri'])\n    return self._helper.create_report(uri, timeout)", "docstring": "Creates an unexpected zoning report for a SAN.\n\nArgs:\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation in\nOneView, just stops waiting for its completion.\n\nReturns:\nlist: A list of FCIssueResponse dict.", "source": "codesearchnet"}
{"code": "def save_features(self, train_features, test_features, feature_names, feature_list_id):\n        \n\n        self.save_feature_names(feature_names, feature_list_id)\n        self.save_feature_list(train_features, 'train', feature_list_id)\n        self.save_feature_list(test_features, 'test', feature_list_id)", "docstring": "Save features for the training and test sets to disk, along with their metadata.\n\nArgs:\ntrain_features: A NumPy array of features for the training set.\ntest_features: A NumPy array of features for the test set.\nfeature_names: A list containing the names of the feature columns.\nfeature_list_id: The name for this feature list.", "source": "juraj-google-style"}
{"code": "def channel_interpolate(layer1, n_channel1, layer2, n_channel2):\n\n    def inner(T):\n        batch_n = T(layer1).get_shape().as_list()[0]\n        arr1 = T(layer1)[(..., n_channel1)]\n        arr2 = T(layer2)[(..., n_channel2)]\n        weights = (np.arange(batch_n) / float((batch_n - 1)))\n        S = 0\n        for n in range(batch_n):\n            S += ((1 - weights[n]) * tf.reduce_mean(arr1[n]))\n            S += (weights[n] * tf.reduce_mean(arr2[n]))\n        return S\n    return inner", "docstring": "Interpolate between layer1, n_channel1 and layer2, n_channel2.\n\nOptimize for a convex combination of layer1, n_channel1 and\nlayer2, n_channel2, transitioning across the batch.\n\nArgs:\nlayer1: layer to optimize 100% at batch=0.\nn_channel1: neuron index to optimize 100% at batch=0.\nlayer2: layer to optimize 100% at batch=N.\nn_channel2: neuron index to optimize 100% at batch=N.\n\nReturns:\nObjective", "source": "codesearchnet"}
{"code": "def add_cohp_dict(self, cohp_dict, key_sort_func=None):\n    if key_sort_func:\n        keys = sorted(cohp_dict.keys(), key=key_sort_func)\n    else:\n        keys = cohp_dict.keys()\n    for label in keys:\n        self.add_cohp(label, cohp_dict[label])", "docstring": "Adds a dictionary of COHPs with an optional sorting function\nfor the keys.\n\nArgs:\ncohp_dict: dict of the form {label: Cohp}\n\nkey_sort_func: function used to sort the cohp_dict keys.", "source": "codesearchnet"}
{"code": "def returns(desc=None, printer=None, data=True):\n    \n\n    if data is False:\n        raise ArgumentError(\"Specifying non data return type in returns is no longer supported\")\n\n    def _returns(func):\n        annotated(func)\n        func.custom_returnvalue(printer, desc)\n        return func\n\n    return _returns", "docstring": "Specify how the return value of this function should be handled.\n\nArgs:\ndesc (str): A deprecated description of the return value\nprinter (callable): A callable function that can format this return value\ndata (bool): A deprecated parameter for specifying that this function\nreturns data.", "source": "juraj-google-style"}
{"code": "def recursive_copy(source, destination):\n    \n    if os.path.isdir(source):\n        copy_tree(source, destination)", "docstring": "A wrapper around distutils.dir_util.copy_tree but won't throw any exception when the source\ndirectory does not exist.\n\nArgs:\nsource (str): source path\ndestination (str): destination path", "source": "juraj-google-style"}
{"code": "def update_ports(self, ports, id_or_uri, timeout=-1):\n        \n        resources = merge_default_values(ports, {'type': 'port'})\n\n        uri = self._client.build_uri(id_or_uri) + \"/update-ports\"\n        return self._client.update(resources, uri, timeout)", "docstring": "Updates the interconnect ports.\n\nArgs:\nid_or_uri: Can be either the interconnect id or the interconnect uri.\nports (list): Ports to update.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: The interconnect.", "source": "juraj-google-style"}
{"code": "def google_maps_geoloc_link(data):\n    if isinstance(data, str):\n        lat_lon = ip_geoloc(data)\n        if (lat_lon is None):\n            return ''\n        (lat, lon) = lat_lon\n    else:\n        (lat, lon) = data\n    loc = ('%s,%s' % (lat, lon))\n    return ('https:", "docstring": "Get a link to google maps pointing on this IP's geolocation.\n\nArgs:\ndata (str/tuple): IP address or (latitude, longitude).\n\nReturns:\nstr: a link to google maps pointing on this IP's geolocation.", "source": "codesearchnet"}
{"code": "def put_content(self, url, content):\n        \n        cache_path = self._url_to_path(url)\n\n        \n        try:\n            dir = os.path.dirname(cache_path)\n            os.makedirs(dir)\n        except OSError as e:\n            if e.errno != errno.EEXIST:\n                raise Error('Failed to create cache directories for ' % cache_path)\n\n        try:\n            with open(cache_path, 'wb') as f:\n                f.write(content)\n        except IOError:\n            raise Error('Failed to cache content as %s for %s' % (cache_path, url))", "docstring": "Stores the content of a resource into the disk cache.\n\nArgs:\nurl: The url of the resource\ncontent: The content of the resource\n\nRaises:\nCacheError: If the content cannot be put in cache", "source": "juraj-google-style"}
{"code": "def renew(self, requested_timeout=None):\n    if self._has_been_unsubscribed:\n        raise SoCoException('Cannot renew subscription once unsubscribed')\n    if (not self.is_subscribed):\n        raise SoCoException('Cannot renew subscription before subscribing')\n    if (self.time_left == 0):\n        raise SoCoException('Cannot renew subscription after expiry')\n    headers = {'SID': self.sid}\n    if (requested_timeout is None):\n        requested_timeout = self.requested_timeout\n    if (requested_timeout is not None):\n        headers['TIMEOUT'] = 'Second-{}'.format(requested_timeout)\n    response = requests.request('SUBSCRIBE', (self.service.base_url + self.service.event_subscription_url), headers=headers)\n    response.raise_for_status()\n    timeout = response.headers['timeout']\n    if (timeout.lower() == 'infinite'):\n        self.timeout = None\n    else:\n        self.timeout = int(timeout.lstrip('Second-'))\n    self._timestamp = time.time()\n    self.is_subscribed = True\n    log.info('Renewed subscription to %s, sid: %s', (self.service.base_url + self.service.event_subscription_url), self.sid)", "docstring": "Renew the event subscription.\n\nYou should not try to renew a subscription which has been\nunsubscribed, or once it has expired.\n\nArgs:\nrequested_timeout (int, optional): The period for which a renewal\nrequest should be made. If None (the default), use the timeout\nrequested on subscription.", "source": "codesearchnet"}
{"code": "def depth(script, iterations=3, viewpoint=(0, 0, 0), selected=False):\n    filter_xml = ''.join(['  <filter name=\"Depth Smooth\">\\n', '    <Param name=\"stepSmoothNum\" ', 'value=\"{:d}\" '.format(iterations), 'description=\"Smoothing steps\" ', 'type=\"RichInt\" ', '/>\\n', '    <Param name=\"viewPoint\" ', 'x=\"{}\" '.format(viewpoint[0]), 'y=\"{}\" '.format(viewpoint[1]), 'z=\"{}\" '.format(viewpoint[2]), 'description=\"Smoothing steps\" ', 'type=\"RichPoint3f\" ', '/>\\n', '    <Param name=\"Selected\" ', 'value=\"{}\" '.format(str(selected).lower()), 'description=\"Affect only selected faces\" ', 'type=\"RichBool\" ', '/>\\n', '  </filter>\\n'])\n    util.write_filter(script, filter_xml)\n    return None", "docstring": "A laplacian smooth that is constrained to move vertices only along the\nview direction.\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter to.\niterations (int): The number of times that the whole algorithm (normal\nsmoothing + vertex fitting) is iterated.\nviewpoint (vector tuple or list): The position of the view point that\nis used to get the constraint direction.\nselected (bool): If selected the filter is performed only on the\nselected faces\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "codesearchnet"}
{"code": "def format_search_results(self, search_results):\n    formatted_lines = []\n    for search_result in search_results:\n        lines = self._format_search_result(search_result)\n        formatted_lines.extend(lines)\n    return formatted_lines", "docstring": "Format search results.\n\nArgs:\nsearch_results (list of `ResourceSearchResult`): Search to format.\n\nReturns:\nList of 2-tuple: Text and color to print in.", "source": "codesearchnet"}
{"code": "def set_message(self, title, msg, typ, url=None):\n        \n        return self.user.send_notification(title=title,\n                                           message=msg,\n                                           typ=typ,\n                                           url=url)", "docstring": "Sets user notification message.\n\nArgs:\ntitle: Msg. title\nmsg:  Msg. text\ntyp: Msg. type\nurl: Additional URL (if exists)\n\nReturns:\nMessage ID.", "source": "juraj-google-style"}
{"code": "def _AssertValidators(self, validators):\n    for validator in sorted(validators, key=(lambda validator: validator.insertion_index)):\n        try:\n            validator.verify(self)\n        except exceptions.ValidationError as e:\n            message = validator.print_flags_with_values(self)\n            raise exceptions.IllegalFlagValueError(('%s: %s' % (message, str(e))))", "docstring": "Assert if all validators in the list are satisfied.\n\nAsserts validators in the order they were created.\nArgs:\nvalidators: Iterable(validators.Validator), validators to be\nverified\nRaises:\nAttributeError: if validators work with a non-existing flag.\nIllegalFlagValueError: if validation fails for at least one validator", "source": "codesearchnet"}
{"code": "def validate_to_schema(nanopub, schema) -> Tuple[(bool, List[Tuple[(str, str)]])]:\n    v = jsonschema.Draft4Validator(schema)\n    messages = []\n    errors = sorted(v.iter_errors(nanopub), key=(lambda e: e.path))\n    for error in errors:\n        for suberror in sorted(error.context, key=(lambda e: e.schema_path)):\n            print(list(suberror.schema_path), suberror.message, sep=', ')\n            messages.append(('ERROR', suberror.message))\n    is_valid = True\n    if errors:\n        is_valid = False\n    return (is_valid, messages)", "docstring": "Validate nanopub against jsonschema for nanopub\n\nArgs:\nnanopub (Mapping[str, Any]): nanopub dict\nschema (Mapping[str, Any]): nanopub schema\n\nReturns:\nTuple[bool, List[str]]:\nbool: Is valid?  Yes = True, No = False\nList[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('Error|Warning', msg)\ne.g. [('ERROR', \"'subject' is a required property\")]", "source": "codesearchnet"}
{"code": "def all_sum(tensors):\n    return _apply_all_reduce('sum', tensors)", "docstring": "Returns a list of tensors with the all-reduce sum across `tensors`.\n\nThe computation is done with an all-reduce operation, so if only some of the\nreturned tensors are evaluated then the computation will hang.\n\nArgs:\ntensors: The input tensors across which to sum; must be assigned\nto GPU devices.\n\nReturns:\nList of tensors, each with the sum of the input tensors, where tensor i has\nthe same device as `tensors[i]`.", "source": "github-repos"}
{"code": "def on_predict_begin(self, logs=None):", "docstring": "Called at the beginning of prediction.\n\nSubclasses should override for any actions to run.\n\nArgs:\nlogs: Dict. Currently no data is passed to this argument for this method\nbut that may change in the future.", "source": "github-repos"}
{"code": "def optimize_for_inference(input_graph_def: graph_pb2.GraphDef, input_node_names: Sequence[str], output_node_names: Sequence[str], placeholder_type_enum: int, toco_compatible: bool=False, placeholder_to_const_names=None) -> graph_pb2.GraphDef:\n    ensure_graph_is_valid(input_graph_def)\n    optimized_graph_def = input_graph_def\n    optimized_graph_def = convert_placeholder_to_const(optimized_graph_def, placeholder_to_const_names)\n    optimized_graph_def = strip_unused_lib.strip_unused(optimized_graph_def, input_node_names, output_node_names, placeholder_type_enum)\n    optimized_graph_def = graph_util.remove_training_nodes(optimized_graph_def, output_node_names)\n    optimized_graph_def = fuse_decomposed_batch_norm(optimized_graph_def)\n    optimized_graph_def = fold_batch_norms(optimized_graph_def)\n    if not toco_compatible:\n        optimized_graph_def = fuse_resize_and_conv(optimized_graph_def, output_node_names)\n    ensure_graph_is_valid(optimized_graph_def)\n    return optimized_graph_def", "docstring": "Applies a series of inference optimizations on the input graph.\n\nArgs:\ninput_graph_def: A GraphDef containing a training model.\ninput_node_names: A list of names of the nodes that are fed inputs during\ninference.\noutput_node_names: A list of names of the nodes that produce the final\nresults.\nplaceholder_type_enum: The AttrValue enum for the placeholder data type, or\na list that specifies one value per input node name.\ntoco_compatible: Boolean, if True, only runs optimizations that result in\nTOCO compatible graph operations (default=False).\nplaceholder_to_const_names: A list of names of the PlaceholderWithDefault\nnodes to be converted to Constant.\n\nReturns:\nAn optimized version of the input graph.", "source": "github-repos"}
{"code": "def _normalize_mlengine_job_id(job_id):\n    match = re.search('\\\\d|\\\\{{2}', job_id)\n    if (match and (match.start() == 0)):\n        job = 'z_{}'.format(job_id)\n    else:\n        job = job_id\n    tracker = 0\n    cleansed_job_id = ''\n    for m in re.finditer('\\\\{{2}.+?\\\\}{2}', job):\n        cleansed_job_id += re.sub('[^0-9a-zA-Z]+', '_', job[tracker:m.start()])\n        cleansed_job_id += job[m.start():m.end()]\n        tracker = m.end()\n    cleansed_job_id += re.sub('[^0-9a-zA-Z]+', '_', job[tracker:])\n    return cleansed_job_id", "docstring": "Replaces invalid MLEngine job_id characters with '_'.\n\nThis also adds a leading 'z' in case job_id starts with an invalid\ncharacter.\n\nArgs:\njob_id: A job_id str that may have invalid characters.\n\nReturns:\nA valid job_id representation.", "source": "codesearchnet"}
{"code": "def get_array_from_hist2D(hist: Hist, set_zero_to_NaN: bool=True, return_bin_edges: bool=False) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]:\n    shape = (hist.GetYaxis().GetNbins(), hist.GetXaxis().GetNbins())\n    hist_array = np.array([hist.GetBinContent(x) for x in range(1, hist.GetNcells()) if ((not hist.IsBinUnderflow(x)) and (not hist.IsBinOverflow(x)))])\n    hist_array = hist_array.reshape(shape)\n    hist_array = hist_array.T\n    if set_zero_to_NaN:\n        hist_array[(hist_array == 0)] = np.nan\n    if return_bin_edges:\n        x_bin_edges = get_bin_edges_from_axis(hist.GetXaxis())\n        y_bin_edges = get_bin_edges_from_axis(hist.GetYaxis())\n        epsilon = 1e-09\n        x_range = np.arange(np.amin(x_bin_edges), (np.amax(x_bin_edges) + epsilon), hist.GetXaxis().GetBinWidth(1))\n        y_range = np.arange(np.amin(y_bin_edges), (np.amax(y_bin_edges) + epsilon), hist.GetYaxis().GetBinWidth(1))\n    else:\n        x_range = np.array([hist.GetXaxis().GetBinCenter(i) for i in range(1, (hist.GetXaxis().GetNbins() + 1))])\n        y_range = np.array([hist.GetYaxis().GetBinCenter(i) for i in range(1, (hist.GetYaxis().GetNbins() + 1))])\n    (X, Y) = np.meshgrid(x_range, y_range)\n    return (X, Y, hist_array)", "docstring": "Extract x, y, and bin values from a 2D ROOT histogram.\n\nConverts the histogram into a numpy array, and suitably processes it for a surface plot\nby removing 0s (which can cause problems when taking logs), and returning a set of (x, y) mesh\nvalues utilziing either the bin edges or bin centers.\n\nNote:\nThis is a different format than the 1D version!\n\nArgs:\nhist (ROOT.TH2): Histogram to be converted.\nset_zero_to_NaN: If true, set 0 in the array to NaN. Useful with matplotlib so that it will\nignore the values when plotting. See comments in this function for more details. Default: True.\nreturn_bin_edges: Return x and y using bin edges instead of bin centers.\nReturns:\nContains (x values, y values, numpy array of hist data) where (x, y) are values on a\ngrid (from np.meshgrid) using the selected bin values.", "source": "codesearchnet"}
{"code": "def __init__(self, metagraph, ignore_colocation=True, ignore_user_placement=False):\n    self._metagraph = metagraph\n    self._item_graph = meta_graph_pb2.MetaGraphDef()\n    self._item_graph.CopyFrom(metagraph)\n    self._ignore_colocation = ignore_colocation\n    self._ignore_user_placement = ignore_user_placement\n    self._tf_item = None\n    self._BuildTFItem()", "docstring": "Creates an Item.\n\nArgs:\nmetagraph: a TensorFlow metagraph.\nignore_colocation: if set, the tool will ignore all the colocation\nconstraints generated by TensorFlow.\nignore_user_placement: if set, all the placement annotations annotated in\nthe metagraph will be ignored.\nRaises:\nValueError: the metagraph is incomplete or invalid.", "source": "github-repos"}
{"code": "def register(self, token, regexp):\n        \n        self._tokens.append((token, re.compile(regexp)))", "docstring": "Register a token.\n\nArgs:\ntoken (Token): the token class to register\nregexp (str): the regexp for that token", "source": "juraj-google-style"}
{"code": "def __init__(self, value=None, length=0):\n        \n        super().__init__(value)\n        self.length = length\n        self._fmt = '!{}{}'.format(self.length, 's')", "docstring": "Create a Char with the optional parameters below.\n\nArgs:\nvalue: The character to be build.\nlength (int): Character size.", "source": "juraj-google-style"}
{"code": "def parse_vhdl_file(fname):\n  \n  with open(fname, 'rt') as fh:\n    text = fh.read()\n  return parse_vhdl(text)", "docstring": "Parse a named VHDL file\n\nArgs:\nfname(str): Name of file to parse\nReturns:\nParsed objects.", "source": "juraj-google-style"}
{"code": "def heightmap_rain_erosion(hm: np.ndarray, nbDrops: int, erosionCoef: float, sedimentationCoef: float, rnd: Optional[tcod.random.Random]=None) -> None:\n    lib.TCOD_heightmap_rain_erosion(_heightmap_cdata(hm), nbDrops, erosionCoef, sedimentationCoef, (rnd.random_c if rnd else ffi.NULL))", "docstring": "Simulate the effect of rain drops on the terrain, resulting in erosion.\n\n``nbDrops`` should be at least hm.size.\n\nArgs:\nhm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.\nnbDrops (int): Number of rain drops to simulate.\nerosionCoef (float): Amount of ground eroded on the drop's path.\nsedimentationCoef (float): Amount of ground deposited when the drops\nstops to flow.\nrnd (Optional[Random]): A tcod.Random instance, or None.", "source": "codesearchnet"}
{"code": "def read_html_file(data_dir, fileroot, encoding=None):\n    fname = os.path.join(data_dir, RAW_HTML_DIRNAME, (fileroot + RAW_HTML_EXT))\n    encodings = ((encoding,) if encoding else ('utf-8', 'iso-8859-1'))\n    for encoding in encodings:\n        try:\n            with io.open(fname, mode='rt', encoding=encoding) as f:\n                raw_html = f.read()\n            break\n        except (UnicodeDecodeError, UnicodeError):\n            raw_html = None\n    return ftfy.fix_encoding(raw_html).strip()", "docstring": "Read the HTML file corresponding to identifier ``fileroot``\nin the raw HTML directory below the root ``data_dir``.\n\nArgs:\ndata_dir (str)\nfileroot (str)\nencoding (str)\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def begin_scan(self, callback=None, interval=DEF_SCAN_INTERVAL, window=DEF_SCAN_WINDOW):\n    logger.debug('configuring scan parameters')\n    self.api.ble_cmd_gap_set_scan_parameters(interval, window, 1)\n    self._set_state(self._STATE_CONFIGURE_SCAN)\n    self.api.ble_cmd_gap_discover(1)\n    self._wait_for_state(self._STATE_CONFIGURE_SCAN)\n    logger.debug('starting async scan for devices')\n    self.scan_targets = None\n    self.scan_callback = callback\n    self._set_state(self._STATE_SCANNING)\n    return True", "docstring": "Begins a BLE scan and returns immediately.\n\nUsing this method you can begin a BLE scan and leave the dongle in scanning\nmode in the background. It will remain in scanning mode until you call the\n:meth:`end_scan` method or the :meth:`reset` method.\n\nArgs:\ncallback (callbable): a callback that will be called for each new device\ndiscovered by the scanning process. Will be passed a single argument,\na :class:`ScanResult` object. May be None if not needed.\ninterval (int): BLE scan interval, in units of 625us\nwindow (int): BLE scan window, in units of 625us\n\nReturns:\nTrue on success, False otherwise.", "source": "codesearchnet"}
{"code": "def split(input_file, file_1, file_2, no_in_first_file):\n    \n\n    \n    with open(input_file) as f:\n        feat_collection = geojson.load(f)\n\n    features = feat_collection['features']\n    feat_collection_1 = geojson.FeatureCollection(features[0:no_in_first_file])\n    feat_collection_2 = geojson.FeatureCollection(features[no_in_first_file:])\n\n    with open(file_1, 'w') as f:\n        geojson.dump(feat_collection_1, f)\n\n    with open(file_2, 'w') as f:\n        geojson.dump(feat_collection_2, f)", "docstring": "Split a geojson in two separate files.\n\nArgs:\ninput_file (str): Input filename.\nfile_1 (str): Output file name 1.\nfile_2 (str): Output file name 2.\nno_features (int): Number of features in input_file to go to file_1.\noutput_file (str): Output file name.", "source": "juraj-google-style"}
{"code": "def find_backend(line: str) -> Optional[str]:\n    if _re_test_backend.search(line) is None:\n        return None\n    backends = [b[0] for b in _re_backend.findall(line)]\n    backends.sort()\n    return '_and_'.join(backends)", "docstring": "Find one (or multiple) backend in a code line of the init.\n\nArgs:\nline (`str`): A code line of the main init.\n\nReturns:\nOptional[`str`]: If one (or several) backend is found, returns it. In the case of multiple backends (the line\ncontains `if is_xxx_available() and `is_yyy_available()`) returns all backends joined on `_and_` (so\n`xxx_and_yyy` for instance).", "source": "github-repos"}
{"code": "def dump(config):\n    \n\n    return yaml.safe_dump(\n        config.to_primitive(),\n        default_flow_style=False,\n        encoding='utf-8',\n        allow_unicode=True)", "docstring": "Dumps a stacker Config object as yaml.\n\nArgs:\nconfig (:class:`Config`): the stacker Config object.\nstream (stream): an optional stream object to write to.\n\nReturns:\nstr: the yaml formatted stacker Config.", "source": "juraj-google-style"}
{"code": "def read_nmr_efg_tensor(self):\n    header_pattern = 'Electric field gradients \\\\(V/A\\\\^2\\\\)\\\\n-*\\\\n ion\\\\s+V_xx\\\\s+V_yy\\\\s+V_zz\\\\s+V_xy\\\\s+V_xz\\\\s+V_yz\\\\n-*\\\\n'\n    row_pattern = '\\\\d+\\\\s+([-\\\\d\\\\.]+)\\\\s+([-\\\\d\\\\.]+)\\\\s+([-\\\\d\\\\.]+)\\\\s+([-\\\\d\\\\.]+)\\\\s+([-\\\\d\\\\.]+)\\\\s+([-\\\\d\\\\.]+)'\n    footer_pattern = '-*\\\\n'\n    data = self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float)\n    tensors = [make_symmetric_matrix_from_upper_tri(d) for d in data]\n    self.data['unsym_efg_tensor'] = tensors\n    return tensors", "docstring": "Parses the NMR Electric Field Gradient Raw Tensors\n\nReturns:\nA list of Electric Field Gradient Tensors in the order of Atoms from OUTCAR", "source": "codesearchnet"}
{"code": "def copy(self, **override_parameters_kwargs):\n    parameters = dict(self.parameters, **override_parameters_kwargs)\n    return type(self)(**parameters)", "docstring": "Creates a deep copy of the distribution.\n\nNote: the copy distribution may continue to depend on the original\ninitialization arguments.\n\nArgs:\n**override_parameters_kwargs: String/value dictionary of initialization\narguments to override with new values.\n\nReturns:\ndistribution: A new instance of `type(self)` initialized from the union\nof self.parameters and override_parameters_kwargs, i.e.,\n`dict(self.parameters, **override_parameters_kwargs)`.", "source": "github-repos"}
{"code": "def with_stack(cls, stack, severity, message, **kwargs):\n    stack = _dedup_opcodes(stack) if stack else None\n    opcode = stack[-1].current_opcode if stack else None\n    if opcode is None:\n        return cls(severity, message, **kwargs)\n    else:\n        return cls(severity, message, filename=opcode.code.filename, line=opcode.line, endline=opcode.endline, col=opcode.col, endcol=opcode.endcol, methodname=opcode.code.name, opcode_name=opcode.__class__.__name__, traceback=_make_traceback_str(stack), **kwargs)", "docstring": "Return an error using a stack for position information.\n\nArgs:\nstack: A list of state.Frame or state.SimpleFrame objects.\nseverity: The error level (error or warning), an integer.\nmessage: The error message string.\n**kwargs: Additional keyword args to pass onto the class ctor.\n\nReturns:\nAn Error object.", "source": "github-repos"}
{"code": "def exp(x):\n    return math_ops.exp(x)", "docstring": "Element-wise exponential.\n\nArgs:\nx: Tensor or variable.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def _GetDateTime(self, filetime):\n    \n    if filetime == 0:\n      return dfdatetime_semantic_time.SemanticTime('Not set')\n\n    return dfdatetime_filetime.Filetime(timestamp=filetime)", "docstring": "Retrieves the date and time from a FILETIME timestamp.\n\nArgs:\nfiletime (int): FILETIME timestamp.\n\nReturns:\ndfdatetime.DateTimeValues: date and time.", "source": "juraj-google-style"}
{"code": "def __init__(self, config_file=None, config_header=None):\n    \n    self.config_file = config_file or CONFIG\n    self.config_header = config_header\n    self.config = parser.Parser()\n    self.config.read(self.config_file)", "docstring": "Constructor.\n\nArgs:\nconfig_file: string, the location of the config file.\nconfig_header: string, the message to write at the top of the config.", "source": "juraj-google-style"}
{"code": "def GetEstimatedYear(self):\n    if self._preferred_year:\n        return self._preferred_year\n    if self._knowledge_base.year:\n        return self._knowledge_base.year\n    year = self._GetEarliestYearFromFileEntry()\n    if (not year):\n        year = self._GetLatestYearFromFileEntry()\n    if (not year):\n        year = timelib.GetCurrentYear()\n    return year", "docstring": "Retrieves an estimate of the year.\n\nThis function determines the year in the following manner:\n* see if the user provided a preferred year;\n* see if knowledge base defines a year e.g. derived from preprocessing;\n* determine the year based on the file entry metadata;\n* default to the current year;\n\nReturns:\nint: estimated year.", "source": "codesearchnet"}
{"code": "def divide_to_patches(image: np.array, patch_size: int, input_data_format) -> List[np.array]:\n    patches = []\n    height, width = get_image_size(image, channel_dim=input_data_format)\n    for i in range(0, height, patch_size):\n        for j in range(0, width, patch_size):\n            if input_data_format == ChannelDimension.LAST:\n                patch = image[i:i + patch_size, j:j + patch_size]\n            else:\n                patch = image[:, i:i + patch_size, j:j + patch_size]\n            patches.append(patch)\n    return patches", "docstring": "Divides an image into patches of a specified size.\n\nArgs:\nimage (`np.array`):\nThe input image.\npatch_size (`int`):\nThe size of each patch.\ninput_data_format (`ChannelDimension` or `str`):\nThe channel dimension format of the input image.\n\nReturns:\nlist: A list of np.array representing the patches.", "source": "github-repos"}
{"code": "def _GetActualMessage(self):\n    if six.PY2:\n        return self._actual.message\n    return self._actual.args[0] if self._actual.args else ''", "docstring": "Returns the \"message\" portion of an exception.\n\nMany Python 2 exceptions have a \"message\" attribute, so return that directly\nin Python 2. However, this attribute is never present in Python 3, so return\nthe first argument passed to the exception instance as the message.\n\nReturns:\nString", "source": "github-repos"}
{"code": "def load_b26_file(file_name):\n    \n    \n\n    assert os.path.exists(file_name)\n\n    with open(file_name, 'r') as infile:\n        data = yaml.safe_load(infile)\n    return data", "docstring": "loads a .b26 file into a dictionary\n\nArgs:\nfile_name:\n\nReturns: dictionary with keys instrument, scripts, probes", "source": "juraj-google-style"}
{"code": "def append_from_list(self, content, fill_title=False):\n        \n        row_index = 0\n        for row in content:\n            tr = TableRow()\n            column_index = 0\n            for item in row:\n                if row_index == 0 and fill_title:\n                    ti = TableTitle(item)\n                else:\n                    ti = TableItem(item)\n                tr.append(ti, str(column_index))\n                column_index = column_index + 1\n            self.append(tr, str(row_index))\n            row_index = row_index + 1", "docstring": "Appends rows created from the data contained in the provided\nlist of tuples of strings. The first tuple of the list can be\nset as table title.\n\nArgs:\ncontent (list): list of tuples of strings. Each tuple is a row.\nfill_title (bool): if true, the first tuple in the list will\nbe set as title.", "source": "juraj-google-style"}
{"code": "def struct_member_error(err, sid, name, offset, size):\n    (exception, msg) = STRUCT_ERROR_MAP[err]\n    struct_name = idc.GetStrucName(sid)\n    return exception('AddStructMember(struct=\"{}\", member=\"{}\", offset={}, size={}) failed: {}'.format(struct_name, name, offset, size, msg))", "docstring": "Create and format a struct member exception.\n\nArgs:\nerr: The error value returned from struct member creation\nsid: The struct id\nname: The member name\noffset: Memeber offset\nsize: Member size\n\nReturns:\nA ``SarkErrorAddStructMemeberFailed`` derivative exception, with an\ninformative message.", "source": "codesearchnet"}
{"code": "def validate_language_key(obj, key):\n    \n    backend = bigchaindb.config['database']['backend']\n\n    if backend == 'localmongodb':\n        data = obj.get(key, {})\n        if isinstance(data, dict):\n            validate_all_values_for_key_in_obj(data, 'language', validate_language)\n        elif isinstance(data, list):\n            validate_all_values_for_key_in_list(data, 'language', validate_language)", "docstring": "Validate all nested \"language\" key in `obj`.\n\nArgs:\nobj (dict): dictionary whose \"language\" key is to be validated.\n\nReturns:\nNone: validation successful\n\nRaises:\nValidationError: will raise exception in case language is not valid.", "source": "juraj-google-style"}
{"code": "def split_input(cls, mapper_spec, _reader=blobstore.BlobReader):\n    params = _get_params(mapper_spec)\n    blob_key = params[cls.BLOB_KEY_PARAM]\n    zip_input = zipfile.ZipFile(_reader(blob_key))\n    zfiles = zip_input.infolist()\n    total_size = sum((x.file_size for x in zfiles))\n    num_shards = min(mapper_spec.shard_count, cls._MAX_SHARD_COUNT)\n    size_per_shard = (total_size \n    shard_start_indexes = [0]\n    current_shard_size = 0\n    for (i, fileinfo) in enumerate(zfiles):\n        current_shard_size += fileinfo.file_size\n        if (current_shard_size >= size_per_shard):\n            shard_start_indexes.append((i + 1))\n            current_shard_size = 0\n    if (shard_start_indexes[(- 1)] != len(zfiles)):\n        shard_start_indexes.append(len(zfiles))\n    return [cls(blob_key, start_index, end_index, _reader) for (start_index, end_index) in zip(shard_start_indexes, shard_start_indexes[1:])]", "docstring": "Returns a list of input shard states for the input spec.\n\nArgs:\nmapper_spec: The MapperSpec for this InputReader. Must contain\n'blob_key' parameter with one blob key.\n_reader: a callable that returns a file-like object for reading blobs.\nUsed for dependency injection.\n\nReturns:\nA list of InputReaders spanning files within the zip.", "source": "codesearchnet"}
{"code": "def to_cache_timer(datetime_func):\n    \n    if datetime_func is None:\n        datetime_func = datetime.utcnow\n\n    def _timer():\n        \n        return (datetime_func() - datetime(1970, 1, 1)).total_seconds()\n\n    return _timer", "docstring": "Converts a datetime_func to a timestamp_func.\n\nArgs:\ndatetime_func (callable[[datatime]]): a func that returns the current\ntime\n\nReturns:\ntime_func (callable[[timestamp]): a func that returns the timestamp\nfrom the epoch", "source": "juraj-google-style"}
{"code": "def c_overturned(step):\n    \n    rbot, rtop = misc.get_rbounds(step)\n    cinit, rad = init_c_overturn(step)\n    radf = (rtop**3 + rbot**3 - rad**3)**(1 / 3)\n    return cinit, radf", "docstring": "Theoretical overturned concentration.\n\nThis compute the resulting composition profile if fractional\ncrystallization of a SMO is assumed and then a purely radial\noverturn happens.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nReturns:\ntuple of :class:`numpy.array`: the composition and the radial position\nat which it is evaluated.", "source": "juraj-google-style"}
{"code": "def _add_result(self, dict_entry, entry, dt, start_time):\n    time_entry = {}\n    time_entry['dt'] = dt\n    time_entry['start_time'] = start_time\n    dict_entry[entry] = time_entry", "docstring": "Adds a result to the dictionary.\n\nArgs:\ndict_entry: main dict to add entry\nentry: slot for this entry (likely an integer)\ndt: the timing for the entry\nstart_time: when the entry started unix time float", "source": "codesearchnet"}
{"code": "def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, expected):\n    total_size_1 = 1\n    total_size_2 = 1\n    for s in tensor_in_sizes:\n        total_size_1 *= s\n    for s in filter_in_sizes:\n        total_size_2 *= s\n    x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)], dtype=np.float32).reshape(tensor_in_sizes)\n    x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)], dtype=np.float32).reshape(filter_in_sizes)\n    with self.session() as sess:\n        t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=np.float32)\n        t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=np.float32)\n        with self.test_scope():\n            conv = nn_ops.depthwise_conv2d_native(t1, t2, strides=[1, stride, stride, 1], padding=padding)\n        value = sess.run(conv, {t1: x1, t2: x2})\n    print('value = ', value)\n    self.assertArrayNear(expected, np.ravel(value), 0.0001)\n    self.assertShapeEqual(value, conv)", "docstring": "Verifies the output values of the depthwise convolution function.\n\nArgs:\ntensor_in_sizes: Input tensor dimensions in\n[batch, input_rows, input_cols, input_depth].\nfilter_in_sizes: Filter tensor dimensions in\n[filter_rows, filter_cols, input_depth, depth_multiplier].\nstride: Stride.\npadding: Padding type.\nexpected: An array containing the expected operation outputs.", "source": "github-repos"}
{"code": "def get_all_instances(include_fastboot=False):\n    if include_fastboot:\n        serial_list = (list_adb_devices() + list_fastboot_devices())\n        return get_instances(serial_list)\n    return get_instances(list_adb_devices())", "docstring": "Create AndroidDevice instances for all attached android devices.\n\nArgs:\ninclude_fastboot: Whether to include devices in bootloader mode or not.\n\nReturns:\nA list of AndroidDevice objects each representing an android device\nattached to the computer.", "source": "codesearchnet"}
{"code": "def replace_in_list(stringlist: Iterable[str],\n                    replacedict: Dict[str, str]) -> List[str]:\n    \n    newlist = []\n    for fromstring in stringlist:\n        newlist.append(multiple_replace(fromstring, replacedict))\n    return newlist", "docstring": "Returns a list produced by applying :func:`multiple_replace` to every\nstring in ``stringlist``.\n\nArgs:\nstringlist: list of source strings\nreplacedict: dictionary mapping \"original\" to \"replacement\" strings\n\nReturns:\nlist of final strings", "source": "juraj-google-style"}
{"code": "def _assert_rank_condition(x, rank, static_condition, dynamic_condition, data, summarize):\n    assert_type(rank, dtypes.int32)\n    rank_static = tensor_util.constant_value(rank)\n    if rank_static is not None:\n        if rank_static.ndim != 0:\n            raise ValueError('Rank must be a scalar.')\n        x_rank_static = x.get_shape().ndims\n        if x_rank_static is not None:\n            if not static_condition(x_rank_static, rank_static):\n                raise ValueError('Static rank condition failed', x_rank_static, rank_static)\n            return control_flow_ops.no_op(name='static_checks_determined_all_ok')\n    condition = dynamic_condition(array_ops.rank(x), rank)\n    if rank_static is None:\n        this_data = ['Rank must be a scalar. Received rank: ', rank]\n        rank_check = assert_rank(rank, 0, data=this_data)\n        condition = control_flow_ops.with_dependencies([rank_check], condition)\n    return control_flow_assert.Assert(condition, data, summarize=summarize)", "docstring": "Assert `x` has a rank that satisfies a given condition.\n\nArgs:\nx:  Numeric `Tensor`.\nrank:  Scalar `Tensor`.\nstatic_condition:   A python function that takes `[actual_rank, given_rank]`\nand returns `True` if the condition is satisfied, `False` otherwise.\ndynamic_condition:  An `op` that takes [actual_rank, given_rank] and return\n`True` if the condition is satisfied, `False` otherwise.\ndata:  The tensors to print out if the condition is false.  Defaults to\nerror message and first few entries of `x`.\nsummarize: Print this many entries of each tensor.\n\nReturns:\nOp raising `InvalidArgumentError` if `x` fails dynamic_condition.\n\nRaises:\nValueError:  If static checks determine `x` fails static_condition.", "source": "github-repos"}
{"code": "def __init__(self, client, conv_states, user_list, sync_timestamp):\n        self._client = client  \n        self._conv_dict = {}  \n        self._sync_timestamp = sync_timestamp  \n        self._user_list = user_list  \n\n        \n        \n        for conv_state in conv_states:\n            self._add_conversation(conv_state.conversation, conv_state.event,\n                                   conv_state.event_continuation_token)\n\n        self._client.on_state_update.add_observer(self._on_state_update)\n        self._client.on_connect.add_observer(self._sync)\n        self._client.on_reconnect.add_observer(self._sync)\n\n        self.on_event = event.Event('ConversationList.on_event')\n        \n\n        self.on_typing = event.Event('ConversationList.on_typing')\n        \n\n        self.on_watermark_notification = event.Event(\n            'ConversationList.on_watermark_notification'\n        )", "docstring": ":class:`.Event` fired when an event occurs in any conversation.\n\nArgs:\nconv_event: :class:`ConversationEvent` that occurred.", "source": "juraj-google-style"}
{"code": "def confirm(question):\n    \n    if FORCE_YES:\n        return True\n\n    while True:\n        answer = input(question + ' <Yes|No>').lower()\n\n        if answer == 'yes' or answer == 'y':\n            confirmed = True\n            break\n        if answer == 'no' or answer == 'n':\n            confirmed = False\n            break\n\n    return confirmed", "docstring": "Ask the user if he really want something to happen.\n\nArgs:\nquestion(str): What can happen\n\nReturns:\n(boolean): Confirmed or not", "source": "juraj-google-style"}
{"code": "def get_domain_workgroup():\n    with salt.utils.winapi.Com():\n        conn = wmi.WMI()\n        for computer in conn.Win32_ComputerSystem():\n            if computer.PartOfDomain:\n                return {'Domain': computer.Domain}\n            else:\n                return {'Workgroup': computer.Workgroup}", "docstring": "Get the domain or workgroup the computer belongs to.\n\n.. versionadded:: 2015.5.7\n.. versionadded:: 2015.8.2\n\nReturns:\nstr: The name of the domain or workgroup\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt 'minion-id' system.get_domain_workgroup", "source": "codesearchnet"}
{"code": "def bilinearly_sampled_image(texture, uv):\n    \n    h, w = tf.unstack(tf.shape(texture)[:2])\n    u, v = tf.split(uv, 2, axis=-1)\n    v = 1.0 - v  \n    u, v = u * tf.to_float(w) - 0.5, v * tf.to_float(h) - 0.5\n    u0, u1 = tf.floor(u), tf.ceil(u)\n    v0, v1 = tf.floor(v), tf.ceil(v)\n    uf, vf = u - u0, v - v0\n    u0, u1, v0, v1 = map(tf.to_int32, [u0, u1, v0, v1])\n\n    def sample(u, v):\n        vu = tf.concat([v % h, u % w], axis=-1)\n        return tf.gather_nd(texture, vu)\n\n    s00, s01 = sample(u0, v0), sample(u0, v1)\n    s10, s11 = sample(u1, v0), sample(u1, v1)\n    s0 = s00 * (1.0 - vf) + s01 * vf\n    s1 = s10 * (1.0 - vf) + s11 * vf\n    s = s0 * (1.0 - uf) + s1 * uf\n    return s", "docstring": "Build bilinear texture sampling graph.\n\nCoordinate transformation rules match OpenGL GL_REPEAT wrapping and GL_LINEAR\ninterpolation modes.\n\nArgs:\ntexture: [tex_h, tex_w, channel_n] tensor.\nuv: [frame_h, frame_h, 2] tensor with per-pixel UV coordinates in range [0..1]\n\nReturns:\n[frame_h, frame_h, channel_n] tensor with per-pixel sampled values.", "source": "juraj-google-style"}
{"code": "def RunScripts(self, script_dict):\n    metadata_types = ['%s-script-url', '%s-script']\n    metadata_keys = [(key % self.script_type) for key in metadata_types]\n    metadata_keys = [key for key in metadata_keys if script_dict.get(key)]\n    if (not metadata_keys):\n        self.logger.info('No %s scripts found in metadata.', self.script_type)\n    for metadata_key in metadata_keys:\n        metadata_script = script_dict.get(metadata_key)\n        self._MakeExecutable(metadata_script)\n        self._RunScript(metadata_key, metadata_script)", "docstring": "Run the metadata scripts; execute a URL script first if one is provided.\n\nArgs:\nscript_dict: a dictionary mapping metadata keys to script files.", "source": "codesearchnet"}
{"code": "def author_id_normalize_and_schema(uid, schema=None):\n\n    def _get_uid_normalized_in_schema(_uid, _schema):\n        (regex, template) = _RE_AUTHORS_UID[_schema]\n        match = regex.match(_uid)\n        if match:\n            return template.format(match.group('uid'))\n    if (idutils.is_orcid(uid) and (schema in (None, 'ORCID'))):\n        return (idutils.normalize_orcid(uid), 'ORCID')\n    if (schema and (schema not in _RE_AUTHORS_UID)):\n        raise UnknownUIDSchema(uid)\n    if schema:\n        normalized_uid = _get_uid_normalized_in_schema(uid, schema)\n        if normalized_uid:\n            return (normalized_uid, schema)\n        else:\n            raise SchemaUIDConflict(schema, uid)\n    (match_schema, normalized_uid) = (None, None)\n    for candidate_schema in _RE_AUTHORS_UID:\n        candidate_uid = _get_uid_normalized_in_schema(uid, candidate_schema)\n        if candidate_uid:\n            if match_schema:\n                raise UnknownUIDSchema(uid)\n            match_schema = candidate_schema\n            normalized_uid = candidate_uid\n    if match_schema:\n        return (normalized_uid, match_schema)\n    raise UnknownUIDSchema(uid)", "docstring": "Detect and normalize an author UID schema.\n\nArgs:\nuid (string): a UID string\nschema (string): try to resolve to schema\n\nReturns:\nTuple[string, string]: a tuple (uid, schema) where:\n- uid: the UID normalized to comply with the id.json schema\n- schema: a schema of the UID or *None* if not recognised\n\nRaise:\nUnknownUIDSchema: if UID is too little to definitively guess the schema\nSchemaUIDConflict: if specified schema is not matching the given UID", "source": "codesearchnet"}
{"code": "def load_configuration(yaml: yaml.ruamel.yaml.YAML, filename: str) -> DictLike:\n    with open(filename, 'r') as f:\n        config = yaml.load(f)\n    return config", "docstring": "Load an analysis configuration from a file.\n\nArgs:\nyaml: YAML object to use in loading the configuration.\nfilename: Filename of the YAML configuration file.\nReturns:\ndict-like object containing the loaded configuration", "source": "codesearchnet"}
{"code": "def is_within_strict_int_range(lower_bound: int, upper_bound: int) -> RuleChecker[Numeric]:\n\n    def _checker(value: Numeric) -> RuleOutput:\n        if lower_bound < value < upper_bound:\n            return None\n        else:\n            return 'Value is not within the strict range.'\n    return _checker", "docstring": "Checks if the provided numeric value IS strictly bounded by integers\ni.e. (lower_bound, upper_bound) with both bounds exclusive.\n\nArgs:\n* lower_bound: lowest integer value (exclusive)\n* upper_bound: highest integer value (exclusive)\n\nReturns:\n* None: if lower_bound < value < upper_bound\n* Error message, otherwise", "source": "github-repos"}
{"code": "def get_client_kwargs(self, path):\n        \n        container, obj = self.split_locator(path)\n        kwargs = dict(container=container)\n        if obj:\n            kwargs['obj'] = obj\n        return kwargs", "docstring": "Get base keyword arguments for client for a\nspecific path.\n\nArgs:\npath (str): Absolute path or URL.\n\nReturns:\ndict: client args", "source": "juraj-google-style"}
{"code": "def _ip_unnumbered_type(self, **kwargs):\n        \n        method_name = 'interface_%s_ip_ip_config_unnumbered_ip_donor_'\\\n            'interface_type' % kwargs['int_type']\n        ip_unnumbered_type = getattr(self._interface, method_name)\n        config = ip_unnumbered_type(**kwargs)\n        if kwargs['delete']:\n            tag = 'ip-donor-interface-type'\n            config.find('.\n        return config", "docstring": "Return the `ip unnumbered` donor type XML.\n\nYou should not use this method.\nYou probably want `Interface.ip_unnumbered`.\n\nArgs:\nint_type (str): Type of interface. (gigabitethernet,\ntengigabitethernet etc).\ndelete (bool): Remove the configuration if ``True``.\nip_donor_interface_type (str): The donor interface type (loopback)\n\nReturns:\nXML to be passed to the switch.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def is_native_ion_gate(gate: ops.Gate) -> bool:\n    return isinstance(gate, (ops.XXPowGate, ops.MeasurementGate, ops.XPowGate, ops.YPowGate, ops.ZPowGate))", "docstring": "Check if a gate is a native ion gate.\n\nArgs:\ngate: Input gate.\n\nReturns:\nTrue if the gate is native to the ion, false otherwise.", "source": "codesearchnet"}
{"code": "def log_estimator_evaluation_result(self, eval_results):\n    if (not isinstance(eval_results, dict)):\n        tf.logging.warning('eval_results should be directory for logging. Got %s', type(eval_results))\n        return\n    global_step = eval_results[tf.GraphKeys.GLOBAL_STEP]\n    for key in sorted(eval_results):\n        if (key != tf.GraphKeys.GLOBAL_STEP):\n            self.log_metric(key, eval_results[key], global_step=global_step)", "docstring": "Log the evaluation result for a estimator.\n\nThe evaluate result is a directory that contains metrics defined in\nmodel_fn. It also contains a entry for global_step which contains the value\nof the global step when evaluation was performed.\n\nArgs:\neval_results: dict, the result of evaluate() from a estimator.", "source": "codesearchnet"}
{"code": "def dawsn(x, name=None):\n    with ops.name_scope(name, 'dawsn', [x]):\n        return gen_special_math_ops.dawsn(x)", "docstring": "Computes Dawson's integral of `x` element-wise.\n\nDawson's integral is defined as `exp(-x**2)` times the integral of\n`exp(t**2)` from `0` to `x`, with the domain of definition all real numbers.\n\nDawson's function is odd.\n>>> tf.math.special.dawsn([-1., -0.5, 0.5, 1.]).numpy()\narray([-0.5380795, -0.4244364, 0.4244364,  0.5380795], dtype=float32)\n\nThis implementation is based off of the Cephes math library.\n\nArgs:\nx: A `Tensor` or `SparseTensor`. Must be one of the following types:\n`float32`, `float64`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.dawsn\n@end_compatibility", "source": "github-repos"}
{"code": "def tag_file(filename, artist, title, year=None, genre=None, artwork_url=None, album=None, track_number=None, url=None):\n    \n\n    try:\n        audio = EasyMP3(filename)\n        audio.tags = None\n        audio[\"artist\"] = artist\n        audio[\"title\"] = title\n        if year:\n            audio[\"date\"] = str(year)\n        if album:\n            audio[\"album\"] = album\n        if track_number:\n            audio[\"tracknumber\"] = track_number\n        if genre:\n            audio[\"genre\"] = genre\n        if url: \n            audio[\"website\"] = url\n        audio.save()\n\n        if artwork_url:\n\n            artwork_url = artwork_url.replace('https', 'http')\n\n            mime = 'image/jpeg'\n            if '.jpg' in artwork_url:\n                mime = 'image/jpeg'\n            if '.png' in artwork_url:\n                mime = 'image/png'\n\n            if '-large' in artwork_url:\n                new_artwork_url = artwork_url.replace('-large', '-t500x500')\n                try:\n                    image_data = requests.get(new_artwork_url).content\n                except Exception as e:\n                    \n                    image_data = requests.get(artwork_url).content\n            else:\n                image_data = requests.get(artwork_url).content\n\n            audio = MP3(filename, ID3=OldID3)\n            audio.tags.add(\n                APIC(\n                    encoding=3,  \n                    mime=mime,\n                    type=3,  \n                    desc='Cover',\n                    data=image_data\n                )\n            )\n            audio.save()\n\n        \n        if url:\n            audio = MP3(filename, ID3=OldID3)\n            audio.tags.add( WXXX( encoding=3, url=url ) )\n            audio.save()\n\n        return True\n\n    except Exception as e:\n        puts(colored.red(\"Problem tagging file: \") + colored.white(\"Is this file a WAV?\"))\n        return False", "docstring": "Attempt to put ID3 tags on a file.\n\nArgs:\nartist (str):\ntitle (str):\nyear (int):\ngenre (str):\nartwork_url (str):\nalbum (str):\ntrack_number (str):\nfilename (str):\nurl (str):", "source": "juraj-google-style"}
{"code": "def compose(self, *args, **kwargs):\n        \n        linebreak = kwargs.pop(\"linebreak\", \"\\n\")\n        \n        if len(args) > 0:\n            self.args = args\n        self._update(**kwargs)\n        \n        fkwargs = {}    \n        modtmpl = []    \n        \n        \n        for line in self:\n            cline = copy(line)\n            \n            for match in self._regex.findall(line):\n                search = \"[{}]\".format(\"|\".join(match))\n                name, indent, delim, qual, _ = match\n                if indent != \"\":\n                    indent = \" \"*int(indent)\n                delim = delim.replace(\"\\\\|\", \"|\")\n                \n                data = getattr(self, name, None)\n                \n                if data is None:\n                    cline = cline.replace(search, \"\")\n                    continue\n                elif delim.isdigit():\n                    fkwargs[name] = getattr(self, \"_fmt_\"+name)()\n                else:\n                    fkwargs[name] = linebreak.join([indent+k+delim+qual+v+qual for k, v in data.items()])\n                cline = cline.replace(search, \"{\"+name+\"}\")\n            modtmpl.append(cline)\n        modtmpl = \"\\n\".join(modtmpl)\n        print(modtmpl)\n        dct = self.get_kwargs()\n        dct.update(fkwargs)\n        return self._constructor(textobj=modtmpl.format(*self.args, **dct))", "docstring": "Generate a file from the current template and given arguments.\n\nWarning:\nMake certain to check the formatted editor for correctness!\n\nArgs:\nargs: Positional arguments to update the template\nkwargs: Keyword arguments to update the template\n\nReturns:\neditor: An editor containing the formatted template.", "source": "juraj-google-style"}
{"code": "def _AlignUncompressedDataOffset(self, uncompressed_data_offset):\n    \n    if self._zip_ext_file:\n      self._zip_ext_file.close()\n      self._zip_ext_file = None\n\n    try:\n      \n      \n      \n      self._zip_ext_file = self._zip_file.open(self._zip_info, 'r')\n    except zipfile.BadZipfile as exception:\n      raise IOError(\n          'Unable to open ZIP file with error: {0!s}'.format(exception))\n\n    self._uncompressed_data = b''\n    self._uncompressed_data_size = 0\n    self._uncompressed_data_offset = 0\n\n    while uncompressed_data_offset > 0:\n      self._ReadCompressedData(self._UNCOMPRESSED_DATA_BUFFER_SIZE)\n\n      if uncompressed_data_offset < self._uncompressed_data_size:\n        self._uncompressed_data_offset = uncompressed_data_offset\n        break\n\n      uncompressed_data_offset -= self._uncompressed_data_size", "docstring": "Aligns the compressed file with the uncompressed data offset.\n\nArgs:\nuncompressed_data_offset (int): uncompressed data offset.\n\nRaises:\nIOError: if the ZIP file could not be opened.\nOSError: if the ZIP file could not be opened.", "source": "juraj-google-style"}
{"code": "def _ParseLogline(self, parser_mediator, structure):\n    \n    \n    month, day_of_month, year, hours, minutes, seconds, milliseconds = (\n        structure.date_time)\n\n    time_elements_tuple = (\n        year, month, day_of_month, hours, minutes, seconds, milliseconds)\n\n    try:\n      date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(\n          time_elements_tuple=time_elements_tuple)\n    except ValueError:\n      parser_mediator.ProduceExtractionWarning(\n          'invalid date time value: {0!s}'.format(structure.date_time))\n      return\n\n    event_data = SkyDriveOldLogEventData()\n    event_data.log_level = structure.log_level\n    event_data.offset = self.offset\n    event_data.source_code = structure.source_code\n    event_data.text = structure.text\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_ADDED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    self._last_date_time = date_time\n    self._last_event_data = event_data", "docstring": "Parse a logline and store appropriate attributes.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.", "source": "juraj-google-style"}
{"code": "def _decode_linear_biases(linear_string, nodelist):\n    linear_bytes = base64.b64decode(linear_string)\n    return dict(zip(nodelist, struct.unpack(('<' + ('d' * (len(linear_bytes)", "docstring": "Inverse of _serialize_linear_biases.\n\nArgs:\nlinear_string (str): base 64 encoded string of little endian\n8 byte floats, one for each of the nodes in nodelist.\nnodelist (list): list of the form [node1, node2, ...].\n\nReturns:\ndict: linear biases in a dict.\n\nExamples:\n>>> _decode_linear_biases('AAAAAAAA8L8AAAAAAADwPwAAAAAAAAAA', [1, 2, 3])\n{1: -1.0, 2: 1.0, 3: 0.0}\n>>> _decode_linear_biases('AAAAAAAA8L8AAAAAAADwPwAAAAAAAAAA', [3, 2, 1])\n{1: 0.0, 2: 1.0, 3: -1.0}", "source": "codesearchnet"}
{"code": "def connect(self, address) -> bytes:\n    stdout = self._exec_adb_cmd('connect', address, shell=False, timeout=None, stderr=None)\n    if PATTERN_ADB_CONNECT_SUCCESS.match(stdout.decode('utf-8')) is None:\n        raise AdbError(cmd=f'connect {address}', stdout=stdout, stderr='', ret_code=0)\n    return stdout", "docstring": "Executes the `adb connect` command with proper status checking.\n\nArgs:\naddress: string, the address of the Android instance to connect to.\n\nReturns:\nThe stdout content.\n\nRaises:\nAdbError: if the connection failed.", "source": "github-repos"}
{"code": "def getGridByCard(self, gssha_card_name):\n    with tmp_chdir(self.project_directory):\n        if (gssha_card_name not in (self.INPUT_MAPS + self.WMS_DATASETS)):\n            raise ValueError('Card {0} not found in valid grid cards ...'.format(gssha_card_name))\n        gssha_grid_card = self.getCard(gssha_card_name)\n        if (gssha_grid_card is None):\n            raise ValueError('{0} card not found ...'.format(gssha_card_name))\n        gssha_pro_card = self.getCard('\n        if (gssha_pro_card is None):\n            raise ValueError('\n        return GDALGrid(gssha_grid_card.value.strip('\"').strip(\"'\"), gssha_pro_card.value.strip('\"').strip(\"'\"))", "docstring": "Returns GDALGrid object of GSSHA grid\n\nParamters:\ngssha_card_name(str): Name of GSSHA project card for grid.\n\nReturns:\nGDALGrid", "source": "codesearchnet"}
{"code": "def parse_from_xml(root):\n    \n\n    \n    if root.tag != 'ubcpi':\n        raise UpdateFromXmlError(_('Every peer instruction tool must contain an \"ubcpi\" element.'))\n\n    display_name_el = root.find('display_name')\n    if display_name_el is None:\n        raise UpdateFromXmlError(_('Every peer instruction tool must contain a \"display_name\" element.'))\n    else:\n        display_name = _safe_get_text(display_name_el)\n\n    rationale_size_min = int(root.attrib['rationale_size_min']) if 'rationale_size_min' in root.attrib else None\n    rationale_size_max = int(root.attrib['rationale_size_max']) if 'rationale_size_max' in root.attrib else None\n\n    question_el = root.find('question')\n    if question_el is None:\n        raise UpdateFromXmlError(_('Every peer instruction must tool contain a \"question\" element.'))\n    else:\n        question = parse_question_xml(question_el)\n\n    options_el = root.find('options')\n    if options_el is None:\n        raise UpdateFromXmlError(_('Every peer instruction must tool contain a \"options\" element.'))\n    else:\n        options, correct_answer, correct_rationale = parse_options_xml(options_el)\n\n    seeds_el = root.find('seeds')\n    if seeds_el is None:\n        raise UpdateFromXmlError(_('Every peer instruction must tool contain a \"seeds\" element.'))\n    else:\n        seeds = parse_seeds_xml(seeds_el)\n\n    algo = unicode(root.attrib['algorithm']) if 'algorithm' in root.attrib else None\n    num_responses = unicode(root.attrib['num_responses']) if 'num_responses' in root.attrib else None\n\n    return {\n        'display_name': display_name,\n        'question_text': question,\n        'options': options,\n        'rationale_size': {'min': rationale_size_min, 'max': rationale_size_max},\n        'correct_answer': correct_answer,\n        'correct_rationale': correct_rationale,\n        'seeds': seeds,\n        'algo': {\"name\": algo, 'num_responses': num_responses}\n    }", "docstring": "Update the UBCPI XBlock's content from an XML definition.\n\nWe need to be strict about the XML we accept, to avoid setting\nthe XBlock to an invalid state (which will then be persisted).\n\nArgs:\nroot (lxml.etree.Element): The XML definition of the XBlock's content.\n\nReturns:\nA dictionary of all of the XBlock's content.\n\nRaises:\nUpdateFromXmlError: The XML definition is invalid", "source": "juraj-google-style"}
{"code": "def learn(self, features, labels):\n        \n        labels = np.ravel(labels)\n        self.__learn_labels(labels)\n        if len(labels) == 0:\n            return\n\n        labels = self.labels.transform(labels)\n        if self.feature_length > 0 and hasattr(self.clf, 'partial_fit'):\n            \n            self.clf = self.clf.partial_fit(features, labels)\n        else:\n            self.clf = self.clf.fit(features, labels)\n            self.feature_length = len(features[0])", "docstring": "Fits the classifier\n\nIf it's state is empty, the classifier is fitted, if not\nthe classifier is partially fitted.\nSee sklearn's SGDClassifier fit and partial_fit methods.\n\nArgs:\nfeatures (:obj:`list` of :obj:`list` of :obj:`float`)\nlabels (:obj:`list` of :obj:`str`): Labels for each set of features.\nNew features are learnt.", "source": "juraj-google-style"}
{"code": "def get_type_key(self, seen: set['BaseValue'] | None=None):\n    return self.get_default_type_key()", "docstring": "Build a key from the information used to perform type matching.\n\nGet a hashable object containing this value's type information. Type keys\nare only compared amongst themselves, so we don't care what the internals\nlook like, only that values with different types *always* have different\ntype keys and values with the same type preferably have the same type key.\n\nArgs:\nseen: The set of values seen before while computing the type key.\n\nReturns:\nA hashable object built from this value's type information.", "source": "github-repos"}
{"code": "def get_stored_metadata(self, temp_ver):\n        \n        with open(self._prefixed('%s.metadata' % temp_ver.name)) as f:\n            return json.load(f)", "docstring": "Retrieves the metadata for the given template version from the store\n\nArgs:\ntemp_ver (TemplateVersion): template version to retrieve the\nmetadata for\n\nReturns:\ndict: the metadata of the given template version", "source": "juraj-google-style"}
{"code": "def loss(logits, labels):\n    labels = tf.cast(labels, tf.int64)\n    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')\n    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')\n    tf.add_to_collection('losses', cross_entropy_mean)\n    return tf.add_n(tf.get_collection('losses'), name='total_loss')", "docstring": "Add L2Loss to all the trainable variables.\n\nAdd summary for \"Loss\" and \"Loss/avg\".\nArgs:\nlogits: Logits from inference().\nlabels: Labels from distorted_inputs or inputs(). 1-D tensor\nof shape [batch_size]\n\nReturns:\nLoss tensor of type float.", "source": "codesearchnet"}
{"code": "def DeregisterDefinition(self, artifact_definition):\n    \n    artifact_definition_name = artifact_definition.name.lower()\n    if artifact_definition_name not in self._artifact_definitions:\n      raise KeyError(\n          'Artifact definition not set for name: {0:s}.'.format(\n              artifact_definition.name))\n\n    del self._artifact_definitions[artifact_definition_name]", "docstring": "Deregisters an artifact definition.\n\nArtifact definitions are identified based on their lower case name.\n\nArgs:\nartifact_definition (ArtifactDefinition): an artifact definition.\n\nRaises:\nKeyError: if an artifact definition is not set for the corresponding name.", "source": "juraj-google-style"}
{"code": "def cost(self, logits, target):\n    logits = tf.reshape(logits, [(self._num_steps * self._batch_size), (- 1)])\n    target = tf.reshape(target, [(self._num_steps * self._batch_size), (- 1)])\n    xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=target)\n    loss = tf.reduce_sum(xent)\n    return (loss / self._batch_size)", "docstring": "Returns cost.\n\nArgs:\nlogits: model output.\ntarget: target.\n\nReturns:\nCross-entropy loss for a sequence of logits. The loss will be averaged\nacross time steps if time_average_cost was enabled at construction time.", "source": "codesearchnet"}
{"code": "def trace(self, predicate):\n        \n        self._handler = predicate\n        if self.threading_support is None or self.threading_support:\n            self._threading_previous = getattr(threading, '_trace_hook', None)\n            threading.settrace(self)\n        self._previous = sys.gettrace()\n        sys.settrace(self)\n        return self", "docstring": "Starts tracing with the given callable.\n\nArgs:\npredicate (callable that accepts a single :obj:`hunter.Event` argument):\nReturn:\nself", "source": "juraj-google-style"}
{"code": "def get_dataclass(self, json_dataclass: type[T]) -> T:\n    if not mime_types.is_dataclass(self.mimetype):\n        raise ValueError('Part is not a dataclass.')\n    try:\n        return json_dataclass.from_json(self.text)\n    except AttributeError as e:\n        raise ValueError(f'{json_dataclass.__name__} is not a valid json dataclass') from e", "docstring": "Returns representation of the Part as a given dataclass.\n\nArgs:\njson_dataclass: A dataclass that can be converted to/from JSON.\n\nReturns:\nThe dataclass representation of the Part.", "source": "github-repos"}
{"code": "def ProcessFile(filename, vlevel, extra_check_functions=None):\n    _SetVerboseLevel(vlevel)\n    _BackupFilters()\n    if (not ProcessConfigOverrides(filename)):\n        _RestoreFilters()\n        return\n    lf_lines = []\n    crlf_lines = []\n    try:\n        if (filename == '-'):\n            lines = codecs.StreamReaderWriter(sys.stdin, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace').read().split('\\n')\n        else:\n            lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\\n')\n        for linenum in range((len(lines) - 1)):\n            if lines[linenum].endswith('\\r'):\n                lines[linenum] = lines[linenum].rstrip('\\r')\n                crlf_lines.append((linenum + 1))\n            else:\n                lf_lines.append((linenum + 1))\n    except IOError:\n        _cpplint_state.PrintError((\"Skipping input '%s': Can't open for reading\\n\" % filename))\n        _RestoreFilters()\n        return\n    file_extension = filename[(filename.rfind('.') + 1):]\n    if ((filename != '-') and (file_extension not in GetAllExtensions())):\n        bazel_gen_files = set(['external/local_config_cc/libtool', 'external/local_config_cc/make_hashed_objlist.py', 'external/local_config_cc/wrapped_ar', 'external/local_config_cc/wrapped_clang', 'external/local_config_cc/xcrunwrapper.sh'])\n        if (not (filename in bazel_gen_files)):\n            _cpplint_state.PrintError(('Ignoring %s; not a valid file name (%s)\\n' % (filename, ', '.join(GetAllExtensions()))))\n    else:\n        ProcessFileData(filename, file_extension, lines, Error, extra_check_functions)\n        if (lf_lines and crlf_lines):\n            for linenum in crlf_lines:\n                Error(filename, linenum, 'whitespace/newline', 1, 'Unexpected \\\\r (^M) found; better to use only \\\\n')\n    _RestoreFilters()", "docstring": "Does google-lint on a single file.\n\nArgs:\nfilename: The name of the file to parse.\n\nvlevel: The level of errors to report.  Every error of confidence\n>= verbose_level will be reported.  0 is a good default.\n\nextra_check_functions: An array of additional check functions that will be\nrun on each source line. Each function takes 4\narguments: filename, clean_lines, line, error", "source": "codesearchnet"}
{"code": "def visit_indexer(self, indexer: _evaluation.IndexerNode) -> _sql_data_types.Select:\n    collection_result = self.visit(indexer.collection)\n    index_result = self.visit(indexer.index)\n    indexed_collection = f'SELECT ROW_NUMBER() OVER() AS row_,\\n{collection_result.sql_alias}\\nFROM {collection_result.to_subquery()}'\n    sql_alias = f'indexed_{collection_result.sql_alias}'\n    return _sql_data_types.Select(select_part=_sql_data_types.Identifier(collection_result.sql_alias, collection_result.sql_data_type, _sql_alias=sql_alias), from_part=f'({indexed_collection}) AS inner_tbl', where_part=f'(inner_tbl.row_ - 1) = {index_result.as_operand()}')", "docstring": "Translates a FHIRPath indexer expression to Standard SQL.\n\nArgs:\nindexer: The `_Indexer` Expression node.\n\nReturns:\nA compiled Standard SQL expression.", "source": "github-repos"}
{"code": "def solve(ast, builtins_pytd, protocols_pytd):\n    builtins_pytd = transforms.RemoveMutableParameters(builtins_pytd)\n    builtins_pytd = visitors.LookupClasses(builtins_pytd)\n    protocols_pytd = visitors.LookupClasses(protocols_pytd)\n    ast = visitors.LookupClasses(ast, builtins_pytd)\n    return (TypeSolver(ast, builtins_pytd, protocols_pytd).solve(), extract_local(ast))", "docstring": "Solve the unknowns in a pytd AST using the standard Python builtins.\n\nArgs:\nast: A pytd.TypeDeclUnit, containing classes named ~unknownXX.\nbuiltins_pytd: A pytd for builtins.\nprotocols_pytd: A pytd for protocols.\n\nReturns:\nA tuple of (1) a dictionary (str->str) mapping unknown class names to known\nclass names and (2) a pytd.TypeDeclUnit of the complete classes in ast.", "source": "github-repos"}
{"code": "def generate_sentence(self, chain):\n\n    def weighted_choice(choices):\n        total_weight = sum((weight for (val, weight) in choices))\n        rand = random.uniform(0, total_weight)\n        upto = 0\n        for (val, weight) in choices:\n            if ((upto + weight) >= rand):\n                return val\n            upto += weight\n    sentence = list(random.choice(chain.startwords))\n    while (not (sentence[(- 1)][(- 1)] in ['.', '?', '!'])):\n        sentence.append(weighted_choice(chain.content[tuple(sentence[(- 2):])].items()))\n    return ' '.join(sentence)", "docstring": "!DEMO!\nDemo function that shows how to generate a simple sentence starting with\nuppercase letter without lenght limit.\n\nArgs:\nchain: MarkovChain that will be used to generate sentence", "source": "codesearchnet"}
{"code": "def process_event(self, event_name: str, data: dict):\n        \n        if (isinstance(self.opt.get(\"learning_rate\", None), float) and\n                isinstance(self.opt.get(\"learning_rate_decay\", None), float)):\n            pass\n        else:\n            if event_name == 'after_train_log':\n                if (self.get_learning_rate_variable() is not None) and ('learning_rate' not in data):\n                    data['learning_rate'] = float(K.get_value(self.get_learning_rate_variable()))\n                    \n                if (self.get_momentum_variable() is not None) and ('momentum' not in data):\n                    data['momentum'] = float(K.get_value(self.get_momentum_variable()))\n                    \n            else:\n                super().process_event(event_name, data)", "docstring": "Process event after epoch\nArgs:\nevent_name: whether event is send after epoch or batch.\nSet of values: ``\"after_epoch\", \"after_batch\"``\ndata: event data (dictionary)\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def from_filenames(poscar_filenames, transformations=None, extend_collection=False):\n    tstructs = []\n    for filename in poscar_filenames:\n        with open(filename, 'r') as f:\n            tstructs.append(TransformedStructure.from_poscar_string(f.read(), []))\n    return StandardTransmuter(tstructs, transformations, extend_collection=extend_collection)", "docstring": "Convenient constructor to generates a POSCAR transmuter from a list of\nPOSCAR filenames.\n\nArgs:\nposcar_filenames: List of POSCAR filenames\ntransformations: New transformations to be applied to all\nstructures.\nextend_collection:\nSame meaning as in __init__.", "source": "codesearchnet"}
{"code": "def export(self, top=True):\n        \n        out = []\n        if top:\n            out.append(self._internal_name)\n        out.append(self._to_str(self.number_of_records_per_hour))\n        out.append(self._to_str(self.data_period_name_or_description))\n        out.append(self._to_str(self.data_period_start_day_of_week))\n        out.append(self._to_str(self.data_period_start_day))\n        out.append(self._to_str(self.data_period_end_day))\n        return \",\".join(out)", "docstring": "Exports object to its string representation.\n\nArgs:\ntop (bool):  if True appends `internal_name` before values.\nAll non list objects should be exported with value top=True,\nall list objects, that are embedded in as fields inlist objects\nshould be exported with `top`=False\n\nReturns:\nstr: The objects string representation", "source": "juraj-google-style"}
{"code": "def _consume_line(line_info, state):\n    _update_section_state(line_info, state)\n    if state.section.title is None:\n        if state.summary.permitted:\n            if line_info.remaining:\n                state.summary.lines.append(line_info.remaining)\n            elif state.summary.lines:\n                state.summary.permitted = False\n        else:\n            state.description.lines.append(line_info.remaining_raw)\n    else:\n        state.summary.permitted = False\n    if state.section.new and state.section.format == Formats.RST:\n        directive = _get_directive(line_info)\n        directive_tokens = directive.split()\n        if state.section.title == Sections.ARGS:\n            name = directive_tokens[-1]\n            arg = _get_or_create_arg_by_name(state, name, is_kwarg=directive_tokens[0] == 'key')\n            if len(directive_tokens) == 3:\n                arg.type.lines.append(directive_tokens[1])\n            state.current_arg = arg\n        elif state.section.title == Sections.TYPE:\n            name = directive_tokens[-1]\n            arg = _get_or_create_arg_by_name(state, name)\n            state.current_arg = arg\n    if state.section.format == Formats.NUMPY and _line_is_hyphens(line_info.remaining):\n        return\n    if state.section.title == Sections.ARGS:\n        if state.section.format == Formats.GOOGLE:\n            _consume_google_args_line(line_info, state)\n        elif state.section.format == Formats.RST:\n            state.current_arg.description.lines.append(line_info.remaining.strip())\n        elif state.section.format == Formats.NUMPY:\n            line_stripped = line_info.remaining.strip()\n            if _is_arg_name(line_stripped):\n                arg = _get_or_create_arg_by_name(state, line_stripped)\n                state.current_arg = arg\n            elif _line_is_numpy_parameter_type(line_info):\n                possible_args, type_data = line_stripped.split(':', 1)\n                arg_names = _as_arg_names(possible_args)\n                if arg_names:\n                    for arg_name in arg_names:\n                        arg = _get_or_create_arg_by_name(state, arg_name)\n                        arg.type.lines.append(type_data)\n                        state.current_arg = arg\n                elif state.current_arg:\n                    state.current_arg.description.lines.append(line_info.remaining.strip())\n                else:\n                    pass\n            elif state.current_arg:\n                state.current_arg.description.lines.append(line_info.remaining.strip())\n            else:\n                pass\n    elif state.section.title == Sections.RETURNS:\n        state.returns.lines.append(line_info.remaining.strip())\n    elif state.section.title == Sections.YIELDS:\n        state.yields.lines.append(line_info.remaining.strip())\n    elif state.section.title == Sections.RAISES:\n        state.raises.lines.append(line_info.remaining.strip())\n    elif state.section.title == Sections.TYPE:\n        if state.section.format == Formats.RST:\n            assert state.current_arg is not None\n            state.current_arg.type.lines.append(line_info.remaining.strip())\n        else:\n            pass", "docstring": "Consumes one line of text, updating the state accordingly.\n\nWhen _consume_line is called, part of the line may already have been processed\nfor header information.\n\nArgs:\nline_info: Information about the current and next line of the docstring.\nstate: The state of the docstring parser.", "source": "github-repos"}
{"code": "def output_waiting(self):\n    buf = array.array('I', [0])\n    try:\n        fcntl.ioctl(self._fd, termios.TIOCOUTQ, buf, True)\n    except OSError as e:\n        raise SerialError(e.errno, ('Querying output waiting: ' + e.strerror))\n    return buf[0]", "docstring": "Query the number of bytes waiting to be written to the serial port.\n\nReturns:\nint: number of bytes waiting to be written.\n\nRaises:\nSerialError: if an I/O or OS error occurs.", "source": "codesearchnet"}
{"code": "async def get_headline(self, name):\n        \n\n        resp = await self.send_command(OPERATIONS.CMD_QUERY_HEADLINE, {'name': name},\n                                       MESSAGES.QueryHeadlineResponse, timeout=5.0)\n\n        if resp is not None:\n            resp = states.ServiceMessage.FromDictionary(resp)\n\n        return resp", "docstring": "Get stored messages for a service.\n\nArgs:\nname (string): The name of the service to get messages from.\n\nReturns:\nServiceMessage: the headline or None if no headline has been set", "source": "juraj-google-style"}
{"code": "def update_metadata(self, resource, keys_vals):\n        \n        self.metadata_service.set_auth(self._token_metadata)\n        self.metadata_service.update(resource, keys_vals)", "docstring": "Updates key-value pairs with the given resource.\n\nWill attempt to update all key-value pairs even if some fail.\nKeys must already exist.\n\nArgs:\nresource (intern.resource.boss.BossResource)\nkeys_vals (dictionary): Collection of key-value pairs to update on\nthe given resource.\n\nRaises:\nHTTPErrorList on failure.", "source": "juraj-google-style"}
{"code": "def __init__(self, storage_writer, knowledge_base, data_location=None):\n    \n    super(AnalysisMediator, self).__init__()\n    self._abort = False\n    self._data_location = data_location\n    self._event_filter_expression = None\n    self._knowledge_base = knowledge_base\n    self._mount_path = None\n    self._storage_writer = storage_writer\n    self._text_prepend = None\n\n    self.last_activity_timestamp = 0.0\n    self.number_of_produced_analysis_reports = 0\n    self.number_of_produced_event_tags = 0", "docstring": "Initializes an analysis plugin mediator.\n\nArgs:\nstorage_writer (StorageWriter): storage writer.\nknowledge_base (KnowledgeBase): contains information from the source\ndata needed for analysis.\ndata_location (Optional[str]): location of data files used during\nanalysis.", "source": "juraj-google-style"}
{"code": "def make_tensor_model_fn(model_fn: str) -> TensorInferenceFn:\n\n    def attr_fn(batch: Sequence[torch.Tensor], model: torch.nn.Module, device: str, inference_args: Optional[dict[str, Any]]=None, model_id: Optional[str]=None) -> Iterable[PredictionResult]:\n        with torch.no_grad():\n            batched_tensors = torch.stack(batch)\n            batched_tensors = _convert_to_device(batched_tensors, device)\n            pred_fn = getattr(model, model_fn)\n            predictions = pred_fn(batched_tensors, **inference_args)\n            return utils._convert_to_result(batch, predictions, model_id)\n    return attr_fn", "docstring": "Produces a TensorInferenceFn that uses a method of the model other that\nthe forward() method.\n\nArgs:\nmodel_fn: A string name of the method to be used. This is accessed through\ngetattr(model, model_fn)", "source": "github-repos"}
{"code": "def install_exception_handler(handler):\n  \n  if not isinstance(handler, ExceptionHandler):\n    raise TypeError('handler of type %s does not inherit from ExceptionHandler'\n                    % type(handler))\n  EXCEPTION_HANDLERS.append(handler)", "docstring": "Installs an exception handler.\n\nArgs:\nhandler: ExceptionHandler, the exception handler to install.\n\nRaises:\nTypeError: Raised when the handler was not of the correct type.\n\nAll installed exception handlers will be called if main() exits via\nan abnormal exception, i.e. not one of SystemExit, KeyboardInterrupt,\nFlagsError or UsageError.", "source": "juraj-google-style"}
{"code": "def _get_context_id(self, context):\n    if context in self._context_to_id:\n        return self._context_to_id[context]\n    graph_is_new = False\n    with self._context_lock:\n        if context not in self._context_to_id:\n            graph_is_new = True\n            context_id = _get_id()\n            self._context_to_id[context] = context_id\n    if graph_is_new:\n        self.get_writer().WriteDebuggedGraph(debug_event_pb2.DebuggedGraph(graph_id=context_id, graph_name=getattr(context, 'name', None), outer_context_id=self._get_outer_context_id(context)))\n    return self._context_to_id[context]", "docstring": "Get a unique ID for an op-construction context (e.g., a graph).\n\nIf the graph has been encountered before, reuse the same unique ID.\nWhen encountering a new context (graph), this methods writes a DebugEvent\nproto with the debugged_graph field to the proper DebugEvent file.\n\nArgs:\ncontext: A context to get the unique ID for. Must be hashable. E.g., a\nGraph object.\n\nReturns:\nA unique ID for the context.", "source": "github-repos"}
{"code": "def _process_using_meta_feature_generator(self, X, meta_feature_generator):\n        \n\n        all_learner_meta_features = []\n        for idx, base_learner in enumerate(self.base_learners):\n            single_learner_meta_features = getattr(base_learner,\n                                                   self.meta_feature_generators[idx])(X)\n\n            if len(single_learner_meta_features.shape) == 1:\n                single_learner_meta_features = single_learner_meta_features.reshape(-1, 1)\n            all_learner_meta_features.append(single_learner_meta_features)\n\n        all_learner_meta_features = np.concatenate(all_learner_meta_features, axis=1)\n\n        out = getattr(self.secondary_learner, meta_feature_generator)(all_learner_meta_features)\n\n        return out", "docstring": "Process using secondary learner meta-feature generator\n\nSince secondary learner meta-feature generator can be anything e.g. predict, predict_proba,\nthis internal method gives the ability to use any string. Just make sure secondary learner\nhas the method.\n\nArgs:\nX (array-like): Features array\n\nmeta_feature_generator (str, unicode): Method for use by secondary learner", "source": "juraj-google-style"}
{"code": "def retry_target(target, predicate, sleep_generator, deadline, on_error=None):\n    if (deadline is not None):\n        deadline_datetime = (datetime_helpers.utcnow() + datetime.timedelta(seconds=deadline))\n    else:\n        deadline_datetime = None\n    last_exc = None\n    for sleep in sleep_generator:\n        try:\n            return target()\n        except Exception as exc:\n            if (not predicate(exc)):\n                raise\n            last_exc = exc\n            if (on_error is not None):\n                on_error(exc)\n        now = datetime_helpers.utcnow()\n        if ((deadline_datetime is not None) and (deadline_datetime < now)):\n            six.raise_from(exceptions.RetryError('Deadline of {:.1f}s exceeded while calling {}'.format(deadline, target), last_exc), last_exc)\n        _LOGGER.debug('Retrying due to {}, sleeping {:.1f}s ...'.format(last_exc, sleep))\n        time.sleep(sleep)\n    raise ValueError('Sleep generator stopped yielding sleep values.')", "docstring": "Call a function and retry if it fails.\n\nThis is the lowest-level retry helper. Generally, you'll use the\nhigher-level retry helper :class:`Retry`.\n\nArgs:\ntarget(Callable): The function to call and retry. This must be a\nnullary function - apply arguments with `functools.partial`.\npredicate (Callable[Exception]): A callable used to determine if an\nexception raised by the target should be considered retryable.\nIt should return True to retry or False otherwise.\nsleep_generator (Iterable[float]): An infinite iterator that determines\nhow long to sleep between retries.\ndeadline (float): How long to keep retrying the target.\non_error (Callable): A function to call while processing a retryable\nexception.  Any error raised by this function will *not* be\ncaught.\n\nReturns:\nAny: the return value of the target function.\n\nRaises:\ngoogle.api_core.RetryError: If the deadline is exceeded while retrying.\nValueError: If the sleep generator stops yielding values.\nException: If the target raises a method that isn't retryable.", "source": "codesearchnet"}
{"code": "def __init__(self, cluster_resolver=None, communication_options=None, *, mesh=None):\n    self._validate_init_args(mesh, cluster_resolver)\n    if not mesh:\n        if not cluster_resolver:\n            cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()\n        dtensor_env_var = _parse_dtensor_env_var_from_cluster_resolver(cluster_resolver)\n        _config_dtensor_env_var(dtensor_env_var)\n        mesh = _build_distributed_mesh(dtensor_util.DEFAULT_BATCH_MESH_DIM_NAME)\n    extended = dtensor_strategy_extended.DTensorStrategyExtended(container_strategy=self, mesh=mesh)\n    super().__init__(extended)\n    self._mesh = mesh\n    self._cluster_resolver = cluster_resolver", "docstring": "Creates the strategy.\n\nArgs:\ncluster_resolver: optional\n`tf.distribute.cluster_resolver.ClusterResolver`. In case neither `mesh`\nnor `cluster_resolver` are provided,\n`tf.distribute.cluster_resolver.TFConfigClusterResolver` is used.\ncommunication_options: currently ignore.\nmesh: optional Dtensor global mesh for the computation. Note that either\n`mesh` or the `cluster_resolver` should be provided. and not both.", "source": "github-repos"}
{"code": "def _ragged_getitem(rt_input, key_list):\n    if not key_list:\n        return rt_input\n    row_key = key_list[0]\n    inner_keys = key_list[1:]\n    if row_key is Ellipsis:\n        expanded_key_list = _expand_ellipsis(key_list, rt_input.shape.ndims)\n        return _ragged_getitem(rt_input, expanded_key_list)\n    if row_key is array_ops.newaxis:\n        inner_rt = _ragged_getitem(rt_input, inner_keys)\n        nsplits = tensor_shape.dimension_at_index(inner_rt.row_splits.shape, 0)\n        if nsplits.value is not None:\n            nsplits = nsplits.value\n        else:\n            nsplits = array_ops.shape(inner_rt.row_splits, out_type=inner_rt.row_splits.dtype)[0]\n        return ragged_tensor.RaggedTensor.from_uniform_row_length(inner_rt, nsplits - 1, nrows=1, validate=False)\n    if isinstance(row_key, slice):\n        sliced_rt_input = _slice_ragged_row_dimension(rt_input, row_key)\n        if rt_input.uniform_row_length is not None:\n            sliced_rt_input = ragged_tensor.RaggedTensor.from_uniform_row_length(sliced_rt_input.values, rt_input.uniform_row_length, nrows=sliced_rt_input.nrows())\n        return _ragged_getitem_inner_dimensions(sliced_rt_input, inner_keys)\n    else:\n        starts = rt_input.row_splits[:-1]\n        limits = rt_input.row_splits[1:]\n        if context.executing_eagerly():\n            try:\n                if int(row_key) >= len(starts):\n                    raise IndexError('Row key {} out of bounds'.format(row_key))\n            except (TypeError, ValueError):\n                pass\n        row = rt_input.values[starts[row_key]:limits[row_key]]\n        return row.__getitem__(inner_keys)", "docstring": "Helper for indexing and slicing ragged tensors with __getitem__().\n\nExtracts the specified piece of the `rt_input`.  See\n`RaggedTensor.__getitem__` for examples and restrictions.\n\nArgs:\nrt_input: The `RaggedTensor` from which a piece should be returned.\nkey_list: The list of keys specifying which piece to return. Each key\ncorresponds with a separate dimension.\n\nReturns:\nThe indicated piece of rt_input.\n\nRaises:\nValueError: If `key_list` is not supported.\nTypeError: If any keys in `key_list` have an unsupported type.", "source": "github-repos"}
{"code": "def parsed_forensic_reports_to_csv(reports):\n    \n    fields = [\"feedback_type\", \"user_agent\", \"version\", \"original_envelope_id\",\n              \"original_mail_from\", \"original_rcpt_to\", \"arrival_date\",\n              \"arrival_date_utc\", \"subject\", \"message_id\",\n              \"authentication_results\", \"dkim_domain\", \"source_ip_address\",\n              \"source_country\", \"source_reverse_dns\", \"source_base_domain\",\n              \"delivery_result\", \"auth_failure\", \"reported_domain\",\n              \"authentication_mechanisms\", \"sample_headers_only\"]\n\n    if type(reports) == OrderedDict:\n        reports = [reports]\n    csv_file = StringIO()\n    csv_writer = DictWriter(csv_file, fieldnames=fields)\n    csv_writer.writeheader()\n    for report in reports:\n        row = report.copy()\n        row[\"source_ip_address\"] = report[\"source\"][\"ip_address\"]\n        row[\"source_reverse_dns\"] = report[\"source\"][\"reverse_dns\"]\n        row[\"source_base_domain\"] = report[\"source\"][\"base_domain\"]\n        row[\"source_country\"] = report[\"source\"][\"country\"]\n        del row[\"source\"]\n        row[\"subject\"] = report[\"parsed_sample\"][\"subject\"]\n        row[\"auth_failure\"] = \",\".join(report[\"auth_failure\"])\n        authentication_mechanisms = report[\"authentication_mechanisms\"]\n        row[\"authentication_mechanisms\"] = \",\".join(\n            authentication_mechanisms)\n        del row[\"sample\"]\n        del row[\"parsed_sample\"]\n        csv_writer.writerow(row)\n\n    return csv_file.getvalue()", "docstring": "Converts one or more parsed forensic reports to flat CSV format, including\nheaders\n\nArgs:\nreports: A parsed forensic report or list of parsed forensic reports\n\nReturns:\nstr: Parsed forensic report data in flat CSV format, including headers", "source": "juraj-google-style"}
{"code": "def GetRegistryFileMapping(self, registry_file):\n    \n    if not registry_file:\n      return ''\n\n    candidate_mappings = []\n    for mapping in self._REGISTRY_FILE_MAPPINGS_NT:\n      if not mapping.unique_key_paths:\n        continue\n\n      \n      match = True\n      for key_path in mapping.unique_key_paths:\n        registry_key = registry_file.GetKeyByPath(key_path)\n        if not registry_key:\n          match = False\n\n      if match:\n        candidate_mappings.append(mapping)\n\n    if not candidate_mappings:\n      return ''\n\n    if len(candidate_mappings) == 1:\n      return candidate_mappings[0].key_path_prefix\n\n    key_path_prefixes = frozenset([\n        mapping.key_path_prefix for mapping in candidate_mappings])\n\n    expected_key_path_prefixes = frozenset([\n        'HKEY_CURRENT_USER',\n        'HKEY_CURRENT_USER\\\\Software\\\\Classes'])\n\n    if key_path_prefixes == expected_key_path_prefixes:\n      return 'HKEY_CURRENT_USER'\n\n    raise RuntimeError('Unable to resolve Windows Registry file mapping.')", "docstring": "Determines the Registry file mapping based on the content of the file.\n\nArgs:\nregistry_file (WinRegistyFile): Windows Registry file.\n\nReturns:\nstr: key path prefix or an empty string.\n\nRaises:\nRuntimeError: if there are multiple matching mappings and\nthe correct mapping cannot be resolved.", "source": "juraj-google-style"}
{"code": "def move(self, to_project_id, **kwargs):\n        \n        path = '%s/%s/move' % (self.manager.path, self.get_id())\n        data = {'to_project_id': to_project_id}\n        server_data = self.manager.gitlab.http_post(path, post_data=data,\n                                                    **kwargs)\n        self._update_attrs(server_data)", "docstring": "Move the issue to another project.\n\nArgs:\nto_project_id(int): ID of the target project\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabUpdateError: If the issue could not be moved", "source": "juraj-google-style"}
{"code": "def hr_dp004(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `hr_dp004`'.format(value))\n    self._hr_dp004 = value", "docstring": "Corresponds to IDD Field `hr_dp004`\nhumidity ratio corresponding to\nDew-point temperature corresponding to 0.4% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `hr_dp004`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def _compute_theoretical_jacobian(x, x_shape, x_data, dy, dy_shape, dx, extra_feed_dict):\n    if x.dtype.is_complex:\n        x_shape = tuple(x_shape) + (2,)\n    dy_factor = 2 if dy.dtype.is_complex else 1\n    x_size = _product(x_shape)\n    x_val_size = _product(x_shape[1:])\n    dy_size = _product(dy_shape) * dy_factor\n    jacobian = np.zeros((x_size, dy_size), dtype=x.dtype.real_dtype.as_numpy_dtype)\n    dy_data = np.zeros(dy_shape, dtype=dy.dtype.as_numpy_dtype)\n    dy_data_flat = dy_data.ravel().view(dy.dtype.real_dtype.as_numpy_dtype)\n    sess = ops.get_default_session()\n    for col in range(dy_size):\n        dy_data_flat[col] = 1\n        if isinstance(dx, indexed_slices.IndexedSlices):\n            backprop_indices, backprop_values = sess.run([dx.indices, dx.values], feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))\n            for i, v in zip(backprop_indices, backprop_values):\n                r_begin = i * x_val_size\n                r_end = r_begin + x_val_size\n                jacobian[r_begin:r_end, col] += v.flat\n        else:\n            assert isinstance(dx, tensor.Tensor), 'dx = ' + str(dx)\n            backprop = sess.run(dx, feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))\n            jacobian[:, col] = backprop.ravel().view(jacobian.dtype)\n        dy_data_flat[col] = 0\n    if not dy_size:\n        backprop = sess.run(dx, feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))\n        if backprop.shape != x_data.shape:\n            raise ValueError('Empty gradient has wrong shape: expected %s, got %s' % (x_data.shape, backprop.shape))\n        if np.any(backprop):\n            raise ValueError('Empty tensor with nonzero gradients')\n    logging.vlog(1, 'Theoretical Jacobian =\\n%s', jacobian)\n    return jacobian", "docstring": "Computes the theoretical Jacobian for dy/dx.\n\nComputes the theoretical Jacobian using the ops generated by\ncompute_gradient().\n\nArgs:\nx: the tensor \"x\".\nx_shape: the dimensions of x as a tuple or an array of ints.\nx_data: a numpy parray as the input data for x\ndy: the tensor \"dy\".\ndy_shape: the dimensions of dy as a tuple or an array of ints.\ndx: Tensor or IndexedSlices representing dx\nextra_feed_dict: dict that allows fixing specified tensor values\nduring the jacobian calculation.\n\nReturns:\nA 2-d numpy array representing the Jacobian for dy/dx. It has \"x_size\" rows\nand \"dy_size\" columns where \"x_size\" is the number of elements in x and\n\"dy_size\" is the number of elements in dy.\n\nRaises:\nValueError: If `dy` is empty but the gradient is nonzero.", "source": "github-repos"}
{"code": "def lowpass_filter(data: FLOATS_TYPE, sampling_freq_hz: float, cutoff_freq_hz: float, numtaps: int) -> FLOATS_TYPE:\n    coeffs = firwin(numtaps=numtaps, cutoff=normalized_frequency(cutoff_freq_hz, sampling_freq_hz), pass_zero=True)\n    filtered_data = lfilter(b=coeffs, a=1.0, x=data)\n    return filtered_data", "docstring": "Apply a low-pass filter to the data.\n\nArgs:\ndata: time series of the data\nsampling_freq_hz: sampling frequency :math:`f_s`, in Hz\n(or other consistent units)\ncutoff_freq_hz: filter cutoff frequency in Hz\n(or other consistent units)\nnumtaps: number of filter taps\n\nReturns:\nfiltered data\n\nNote: number of filter taps = filter order + 1", "source": "codesearchnet"}
{"code": "def __add__(self, other):\n        \n        if not all(np.equal(self.x, other.x)):\n            raise ValueError(\"X axis values are not compatible!\")\n        return self.__class__(self.x, self.y + other.y, *self._args,\n                              **self._kwargs)", "docstring": "Add two Spectrum object together. Checks that x scales are the same.\nOtherwise, a ValueError is thrown.\n\nArgs:\nother: Another Spectrum object\n\nReturns:\nSum of the two Spectrum objects", "source": "juraj-google-style"}
{"code": "def set_lock_state(self, code, device_label, state):\n    response = None\n    try:\n        response = requests.put(urls.set_lockstate(self._giid, device_label, state), headers={'Accept': 'application/json, text/javascript, */*; q=0.01', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps({'code': str(code)}))\n    except requests.exceptions.RequestException as ex:\n        raise RequestError(ex)\n    _validate_response(response)\n    return json.loads(response.text)", "docstring": "Lock or unlock\n\nArgs:\ncode (str): Lock code\ndevice_label (str): device label of lock\nstate (str): 'lock' or 'unlock'", "source": "codesearchnet"}
{"code": "def _ConcatGradHelper(op: ops.Operation, grad, start_value_index, end_value_index, dim_index):\n\n    def _CreateDenseMaskAndBegin(sizes, concat_dim):\n        \n        shape_of_shape = array_ops.shape(sizes[0])\n        mask = array_ops.concat([array_ops.zeros(array_ops.expand_dims(concat_dim, 0), dtype=dtypes.int32), [1], array_ops.zeros(shape_of_shape - concat_dim - 1, dtype=dtypes.int32)], 0)\n        begin = array_ops.zeros(shape_of_shape, dtype=dtypes.int32)\n        return (mask, begin)\n\n    def _ExtractInputShapes(inputs):\n        \n        if context.executing_eagerly():\n            return array_ops.shape_n(inputs)\n        sizes = []\n        fully_known = True\n        for x in inputs:\n            input_shape = array_ops.shape(x)\n            if not isinstance(input_shape, tensor.Tensor) or input_shape.op.type != 'Const':\n                fully_known = False\n                break\n            sizes.append(input_shape)\n        if fully_known:\n            return sizes\n        else:\n            return array_ops.shape_n(inputs)\n    if len(op.inputs) == 2:\n        return grad + [None] if end_value_index <= dim_index else [None] + grad\n    concat_dim = op.inputs[dim_index]\n    input_values = op.inputs[start_value_index:end_value_index]\n    out_grads = []\n    if isinstance(grad, tensor.Tensor):\n        if context.executing_eagerly() or isinstance(concat_dim, ops.EagerTensor):\n            non_neg_concat_dim = concat_dim._numpy().item(0) % input_values[0]._rank()\n            sizes = pywrap_tfe.TFE_Py_TensorShapeSlice(input_values, non_neg_concat_dim)\n            out_grads = array_ops.split(grad, sizes, non_neg_concat_dim)\n        else:\n            if constant_op.is_constant(concat_dim):\n                grad_context = control_flow_util.GetOutputContext(grad.op)\n                dim_context = control_flow_util.GetOutputContext(concat_dim.op)\n                if dim_context != grad_context:\n                    value = tensor_util.constant_value(concat_dim)\n                    concat_dim = constant_op.constant(value=value, dtype=concat_dim.dtype)\n            non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0])\n            sizes = _ExtractInputShapes(input_values)\n            if len(sizes) > 16:\n                sizes = array_ops.squeeze(array_ops.slice(array_ops_stack.stack(sizes, axis=1), [non_neg_concat_dim, 0], [1, -1]))\n                out_grads = array_ops.split(grad, sizes, non_neg_concat_dim)\n            else:\n                offset = gen_array_ops.concat_offset(non_neg_concat_dim, sizes)\n                for begin, size in zip(offset, sizes):\n                    out_grads.append(array_ops.slice(grad, begin, size))\n    elif isinstance(grad, indexed_slices_lib.IndexedSlices):\n        non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0])\n        concat_dim_static = tensor_util.constant_value(concat_dim)\n        if concat_dim_static is None:\n            raise ValueError('Can only compute IndexedSlices gradient with statically-known concat_dim')\n        if concat_dim_static < 0:\n            rank = tensor_util.constant_value(array_ops.rank(input_values[0]))\n            if rank is None:\n                raise ValueError('Can only compute IndexedSlices gradient with negative concat_dim when first value rank is statically-known.')\n            concat_dim_static %= rank\n        sizes = [array_ops.shape(x) for x in input_values]\n        if concat_dim_static > 0:\n            mask, begin = _CreateDenseMaskAndBegin(sizes, non_neg_concat_dim)\n            for size in sizes:\n                new_values = array_ops.slice(grad.values, begin, array_ops.concat([[-1], array_ops.slice(size, [1], [-1])], 0))\n                out_grads.append(indexed_slices_lib.IndexedSlices(new_values, grad.indices, size))\n                begin = math_ops.add(begin, size * mask)\n        else:\n            start = constant_op.constant(0, dtype=grad.indices.dtype)\n            for size in sizes:\n                size_concat_dim = array_ops.gather(size, non_neg_concat_dim)\n                if size_concat_dim.dtype != grad.indices.dtype:\n                    size_concat_dim = math_ops.cast(size_concat_dim, dtype=grad.indices.dtype)\n                end = start + size_concat_dim\n                indices_to_select = array_ops.squeeze(array_ops.where(math_ops.logical_and(grad.indices >= start, grad.indices < end)), axis=[1])\n                new_indices = array_ops.gather(grad.indices, indices_to_select) - start\n                new_values = array_ops.gather(grad.values, indices_to_select)\n                out_grads.append(indexed_slices_lib.IndexedSlices(new_values, new_indices, size))\n                start = end\n    else:\n        raise TypeError('Expected Tensor or IndexedSlices, got %s' % type(grad))\n    return out_grads + [None] if end_value_index <= dim_index else [None] + out_grads", "docstring": "Gradient for concat op.\n\nArgs:\nop: An operation.\ngrad: `Tensor` or `IndexedSlices` representing the gradients with respect to\neach output of the op.\nstart_value_index: An integer index of the first value in the op.inputs.\nend_value_index: An integer index of the last value in the op.inputs.\ndim_index: An integer index of concat_dim or axis parameter in op.inputs.\n\nReturns:\nTensors representing the partial gradients with respect to each input\nof the op.\n\nRaises:\nValueError: if concat_dim/axis is not statically known.", "source": "github-repos"}
{"code": "def delete_keyvault(access_token, subscription_id, rgname, vault_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults/', vault_name, '?api-version=', KEYVAULT_API])\n    return do_delete(endpoint, access_token)", "docstring": "Deletes a key vault in the named resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\nvault_name (str): Name of the new key vault.\n\nReturns:\nHTTP response. 200 OK.", "source": "codesearchnet"}
{"code": "def get_user_stats(self, users, lang=None, concepts=None, since=None, recalculate=True):\n    only_one_user = False\n    if (not isinstance(users, list)):\n        users = [users]\n        only_one_user = True\n    if recalculate:\n        if (lang is None):\n            raise ValueError('Recalculation without lang is not supported.')\n        time_start = time_lib()\n        concepts_to_recalculate = Concept.objects.get_concepts_to_recalculate(users, lang, concepts)\n        LOGGER.debug('user_stats - getting identifying concepts to recalculate: %ss', (time_lib() - time_start))\n        time_start = time_lib()\n        self.recalculate_concepts(concepts_to_recalculate, lang)\n        LOGGER.debug('user_stats - recalculating concepts: %ss', (time_lib() - time_start))\n    qs = self.prepare_related().filter(user__in=users, concept__active=True)\n    if (concepts is not None):\n        qs = qs.filter(concept__in=concepts)\n    if (lang is not None):\n        qs = qs.filter(concept__lang=lang)\n    if (since is not None):\n        qs = qs.filter(time__gte=since)\n    data = defaultdict((lambda : defaultdict((lambda : {}))))\n    for user_stat in qs:\n        data[user_stat.user_id][user_stat.concept.identifier][user_stat.stat] = user_stat.value\n    if only_one_user:\n        return data[(users[0].pk if (type(users[0]) == User) else users[0])]\n    return data", "docstring": "Finds all UserStats of given concepts and users.\nRecompute UserStats if necessary\n\nArgs:\nusers (Optional[list of users] or [user]): list of primary keys of user or users\nDefaults to None meaning all users.\nlang (string): use only concepts witch the lang. Defaults to None meaning all languages.\nconcepts (Optional[list of concepts]): list of primary keys of concepts or concepts\nDefaults to None meaning all concepts.\n\nReturns:\ndict: user_id  -> dict (concept_identifier - > (stat_name  -> value)) -- for more users\ndict: concept_identifier - > (stat_name  -> value) -- for one user", "source": "codesearchnet"}
{"code": "def info(self, channel_id):\n    resource = 'v1/channel.info?channel_id={}'.format(channel_id)\n    resp = self._rtm_client.get(resource)\n    if resp.is_fail():\n        raise RTMServiceError('Failed to get channel information', resp)\n    return resp.data['result']", "docstring": "Gets channel information by channel id\n\nArgs:\nchannel_id(int): the id of channel\n\nReturns:\nChannel\n\nThrows:\nRTMServiceError when request failed", "source": "codesearchnet"}
{"code": "def get_sample_window(self, type_tag, size):\n        \n        md5_list = self.data_store.get_sample_window(type_tag, size)\n        return self.store_sample_set(md5_list)", "docstring": "Get a sample from the DataStore.\nArgs:\ntype_tag: the type of samples ('pcap','exe','pdf')\nsize: the size of the window in MegaBytes (10 = 10MB)\nReturns:\nA sample_set handle which represents the newest samples within the size window", "source": "juraj-google-style"}
{"code": "def diff_parameters(old_params, new_params):\n    [changes, diff] = diff_dictionaries(old_params, new_params)\n    if (changes == 0):\n        return []\n    return diff", "docstring": "Compares the old vs. new parameters and returns a \"diff\"\n\nIf there are no changes, we return an empty list.\n\nArgs:\nold_params(dict): old paramters\nnew_params(dict): new parameters\n\nReturns:\nlist: A list of differences", "source": "codesearchnet"}
{"code": "async def update_example_status(example: Example, client: GRPCClient):\n    datasets: List[api_pb2.Dataset] = []\n    for emulator in example.tag.emulators:\n        dataset: Dataset = example.tag.datasets[emulator.topic.source_dataset]\n        datasets.append(api_pb2.Dataset(type=api_pb2.EmulatorType.Value(f'EMULATOR_TYPE_{emulator.type.upper()}'), options={'topic': emulator.topic.id}, dataset_path=dataset.file_name))\n    files: List[api_pb2.SnippetFile] = [api_pb2.SnippetFile(name=example.filepath, content=example.code, is_main=True)]\n    for file in example.tag.files:\n        files.append(api_pb2.SnippetFile(name=file.name, content=file.content, is_main=False))\n    pipeline_id = await client.run_code(example.code, example.sdk, example.tag.pipeline_options, datasets, files=files)\n    example.pipeline_id = pipeline_id\n    status = await client.check_status(pipeline_id)\n    while status in [STATUS_VALIDATING, STATUS_PREPARING, STATUS_COMPILING, STATUS_EXECUTING]:\n        await asyncio.sleep(Config.PAUSE_DELAY)\n        status = await client.check_status(pipeline_id)\n    example.status = status", "docstring": "Receive status for examples and update example.status and pipeline_id\n\nUse client to send requests to the backend:\n1. Start code processing.\n2. Ping the backend while status is STATUS_VALIDATING/\nSTATUS_PREPARING/STATUS_COMPILING/STATUS_EXECUTING\nUpdate example.status with resulting status.\n\nArgs:\nexample: beam example for processing and updating status and pipeline_id.\nclient: client to send requests to the server.", "source": "github-repos"}
{"code": "def _CallAndUpdateTrace(component, args, component_trace, treatment='class', target=None):\n    if not target:\n        target = component\n    filename, lineno = inspectutils.GetFileAndLine(component)\n    metadata = decorators.GetMetadata(component)\n    fn = component.__call__ if treatment == 'callable' else component\n    parse = _MakeParseFn(fn, metadata)\n    (varargs, kwargs), consumed_args, remaining_args, capacity = parse(args)\n    if inspectutils.IsCoroutineFunction(fn):\n        loop = asyncio.get_event_loop()\n        component = loop.run_until_complete(fn(*varargs, **kwargs))\n    else:\n        component = fn(*varargs, **kwargs)\n    if treatment == 'class':\n        action = trace.INSTANTIATED_CLASS\n    elif treatment == 'routine':\n        action = trace.CALLED_ROUTINE\n    else:\n        action = trace.CALLED_CALLABLE\n    component_trace.AddCalledComponent(component, target, consumed_args, filename, lineno, capacity, action=action)\n    return (component, remaining_args)", "docstring": "Call the component by consuming args from args, and update the FireTrace.\n\nThe component could be a class, a routine, or a callable object. This function\ncalls the component and adds the appropriate action to component_trace.\n\nArgs:\ncomponent: The component to call\nargs: Args for calling the component\ncomponent_trace: FireTrace object that contains action trace\ntreatment: Type of treatment used. Indicating whether we treat the component\nas a class, a routine, or a callable.\ntarget: Target in FireTrace element, default is None. If the value is None,\nthe component itself will be used as target.\nReturns:\ncomponent: The object that is the result of the callable call.\nremaining_args: The remaining args that haven't been consumed yet.", "source": "github-repos"}
{"code": "def ParseConversationRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    event_data = TangoAndroidConversationEventData()\n    event_data.conversation_identifier = self._GetRowValue(query_hash, row, 'conv_id')\n    date_time = dfdatetime_semantic_time.NotSet()\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a conversation row from the database.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from query.", "source": "codesearchnet"}
{"code": "def _match_protocol_attribute(self, left, other_type, attribute, subst, view):\n    left_attribute, left_is_bound = self._get_attribute_for_protocol_matching(left.cls, attribute, instance=left, unbind=True)\n    if left_attribute is None:\n        if attribute == '__iter__':\n            left_attribute = self.ctx.convert.constant_to_var(pytd_utils.DummyMethod('__iter__', 'self'))\n        else:\n            _, left_attribute = self.ctx.attribute_handler.get_attribute(self._node, left, attribute)\n    assert left_attribute, f'Attr {attribute!r} not found on {left.full_name}'\n    protocol_attribute_var, _ = self._get_attribute_for_protocol_matching(other_type, attribute, instance=None, unbind=left_is_bound)\n    assert protocol_attribute_var\n    if any((abstract_utils.is_callable(v) for v in left_attribute.data)) and all((abstract_utils.is_callable(protocol_attribute) for protocol_attribute in protocol_attribute_var.data)) and (not isinstance(other_type, abstract.ParameterizedClass)):\n        return subst\n    subst = subst.copy()\n    left_type_params = {t.full_name: t for cls in left.cls.mro for t in cls.template}\n    for k, t in left_type_params.items():\n        if k not in subst:\n            subst[k] = left.get_instance_type_parameter(k)\n            self._type_params.seen.add(t)\n    new_substs = []\n    for new_view in abstract_utils.get_views([left_attribute], self._node):\n        new_view.update(view)\n        bad_matches = []\n        for protocol_attribute in protocol_attribute_var.data:\n            protocol_attribute_types = list(self._get_attribute_types(other_type, protocol_attribute))\n            for protocol_attribute_type in protocol_attribute_types:\n                match_result = self.match_var_against_type(left_attribute, protocol_attribute_type, subst, new_view)\n                if match_result is None:\n                    bad_matches.append((new_view[left_attribute].data, protocol_attribute))\n                    break\n                else:\n                    new_substs.append(match_result)\n            else:\n                break\n        else:\n            bad_left, bad_right = zip(*bad_matches)\n            self._protocol_error = error_types.ProtocolTypeError(left.cls, other_type, attribute, self.ctx.convert.merge_values(bad_left), self.ctx.convert.merge_values(bad_right))\n            return None\n    return self._merge_substs(subst, new_substs)", "docstring": "Checks whether left and other_type are compatible in the given attribute.\n\nArgs:\nleft: An instance of a type.\nother_type: A protocol.\nattribute: An attribute name.\nsubst: The current type parameter assignment.\nview: The current mapping of Variable to Value.\n\nReturns:\nA new type parameter assignment if the matching succeeded, None otherwise.", "source": "github-repos"}
{"code": "def _prevent_2nd_derivative(x):\n  \n  def grad(dy):\n    return array_ops.prevent_gradient(\n        dy, message=\"Second derivative is not implemented.\")\n\n  return tf.identity(x), grad", "docstring": "Disables computation of the second derivatives for a tensor.\n\nNB: you need to apply a non-identity function to the output tensor for the\nexception to be raised.\n\nArguments:\nx: A tensor.\n\nReturns:\nA tensor with the same value and the same derivative as x, but that raises\nLookupError when trying to compute the second derivatives.", "source": "juraj-google-style"}
{"code": "def update_in_hdx(self, update_resources=True, update_resources_by_name=True, remove_additional_resources=False, create_default_views=True, hxl_update=True):\n    loaded = False\n    if ('id' in self.data):\n        self._check_existing_object('dataset', 'id')\n        if self._dataset_load_from_hdx(self.data['id']):\n            loaded = True\n        else:\n            logger.warning(('Failed to load dataset with id %s' % self.data['id']))\n    if (not loaded):\n        self._check_existing_object('dataset', 'name')\n        if (not self._dataset_load_from_hdx(self.data['name'])):\n            raise HDXError('No existing dataset to update!')\n    self._dataset_merge_hdx_update(update_resources=update_resources, update_resources_by_name=update_resources_by_name, remove_additional_resources=remove_additional_resources, create_default_views=create_default_views, hxl_update=hxl_update)", "docstring": "Check if dataset exists in HDX and if so, update it\n\nArgs:\nupdate_resources (bool): Whether to update resources. Defaults to True.\nupdate_resources_by_name (bool): Compare resource names rather than position in list. Defaults to True.\nremove_additional_resources (bool): Remove additional resources found in dataset. Defaults to False.\ncreate_default_views (bool): Whether to call package_create_default_resource_views. Defaults to True.\nhxl_update (bool): Whether to call package_hxl_update. Defaults to True.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def extract(self, text: str) -> List[Extraction]:\n    doc = self._parser(text)\n    extractions = list()\n    for sent in doc.sents:\n        this_extraction = Extraction(value=sent.text, extractor_name=self.name, start_token=sent[0], end_token=sent[(- 1)], start_char=sent.text[0], end_char=sent.text[(- 1)])\n        extractions.append(this_extraction)\n    return extractions", "docstring": "Splits text by sentences.\n\nArgs:\ntext (str): Input text to be extracted.\n\nReturns:\nList[Extraction]: the list of extraction or the empty list if there are no matches.", "source": "codesearchnet"}
{"code": "def number_to_day(self, day_number):\n    return [calendar.day_name[6], calendar.day_name[0], calendar.day_name[1], calendar.day_name[2], calendar.day_name[3], calendar.day_name[4], calendar.day_name[5]][day_number]", "docstring": "Returns localized day name by its CRON number\n\nArgs:\nday_number: Number of a day\nReturns:\nDay corresponding to day_number\nRaises:\nIndexError: When day_number is not found", "source": "codesearchnet"}
{"code": "def length(text, maxval=None, encoding=None):\n    \n    maxval = maxval or 4351\n    try:\n        assert not isinstance(text, six.binary_type)\n    except AssertionError:\n        raise TypeError('helpers.length requires a unicode argument')\n    return sum(2 if ord(x) > maxval else 1 for x in unicodedata.normalize('NFC', text))", "docstring": "Count the length of a str the way Twitter does,\ndouble-counting \"wide\" characters (e.g. ideographs, emoji)\n\nArgs:\ntext (str): Text to count. Must be a unicode string in Python 2\nmaxval (int): The maximum encoding that will be counted as 1 character.\nDefaults to 4351 (ჿ GEORGIAN LETTER LABIAL SIGN, U+10FF)\n\nReturns:\nint", "source": "juraj-google-style"}
{"code": "def titles(self, unique=False):\n        \n        if unique:\n            return tools.uniqued(title for _, title in self.iterfiles())\n        return [title for _, title in self.iterfiles()]", "docstring": "Return a list of all available spreadsheet titles.\n\nArgs:\nunique (bool): drop duplicates\nReturns:\nlist: list of title/name strings", "source": "juraj-google-style"}
{"code": "def _closeElements(childs, HTMLElement):\n    \n    out = []\n\n    \n    for e in childs:\n        if not e.isTag():\n            out.append(e)\n            continue\n\n        if not e.isNonPairTag() and not e.isEndTag() and not e.isComment() \\\n           and e.endtag is None:\n            e.childs = _closeElements(e.childs, HTMLElement)\n\n            out.append(e)\n            out.append(HTMLElement(\"</\" + e.getTagName() + \">\"))\n\n            \n            e.endtag = out[-1]\n            out[-1].openertag = e\n        else:\n            out.append(e)\n\n    return out", "docstring": "Create `endtags` to elements which looks like openers, but doesn't have\nproper :attr:`HTMLElement.endtag`.\n\nArgs:\nchilds (list): List of childs (:class:`HTMLElement` obj) - typically\nfrom :attr:`HTMLElement.childs` property.\n\nReturns:\nlist: List of closed elements.", "source": "juraj-google-style"}
{"code": "def transform_regex_replace(source, pattern, rewrite, name=None):\n    \n\n    with ops.name_scope(name, \"TransformRegexReplace\", [source]):\n        source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string)\n        if isinstance(source, tf.SparseTensor):\n            result = tf.SparseTensor(\n                indices=source.indices,\n                values=ops_module.transform_regex_replace(source.values, pattern, rewrite),\n                dense_shape=source.dense_shape\n            )\n        else:\n            result = ops_module.transform_regex_replace(source, pattern, rewrite)\n\n        return result", "docstring": "Replace all substrings from `needle` to corresponding strings in `haystack` with source.\n\nArgs:\nsource: `Tensor` or `SparseTensor` of any shape, source strings for replacing.\npattern: List of RE2 patterns to search in source\nrewrite: List of strings to replace with. Should have same length as `needle`.\nname: A name for the operation (optional).\nReturns:\n`Tensor` or `SparseTensor` of same shape and size as input.", "source": "juraj-google-style"}
{"code": "def channels_unarchive(self, *, channel: str, **kwargs) -> SlackResponse:\n        \n        self._validate_xoxp_token()\n        kwargs.update({\"channel\": channel})\n        return self.api_call(\"channels.unarchive\", json=kwargs)", "docstring": "Unarchives a channel.\n\nArgs:\nchannel (str): The channel id. e.g. 'C1234567890'", "source": "juraj-google-style"}
{"code": "def flush(self, hard=False):\n        \n        if not self.servers:\n            return\n        if hard:\n            self.client.flush_all()\n            self.reset_stats()\n        else:\n            from uuid import uuid4\n            tag = uuid4().hex\n            if self.debug:\n                tag = \"flushed\" + tag\n            self.current = tag", "docstring": "Drop existing entries from the cache.\n\nArgs:\nhard (bool): If True, all current entries are flushed from the\nserver(s), which affects all users. If False, only the local\nprocess is affected.", "source": "juraj-google-style"}
{"code": "def _CanPlaceOnSingleLine(line):\n    token_types = [x.type for x in line.tokens]\n    if style.Get('SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED') and any((token_types[token_index - 1] == token.COMMA for token_index, token_type in enumerate(token_types[1:], start=1) if token_type == token.RPAR)):\n        return False\n    if style.Get('FORCE_MULTILINE_DICT') and token.LBRACE in token_types:\n        return False\n    indent_amt = style.Get('INDENT_WIDTH') * line.depth\n    last = line.last\n    last_index = -1\n    if last.is_pylint_comment or last.is_pytype_comment or last.is_copybara_comment:\n        last = last.previous_token\n        last_index = -2\n    if last is None:\n        return True\n    return last.total_length + indent_amt <= style.Get('COLUMN_LIMIT') and (not any((tok.is_comment for tok in line.tokens[:last_index])))", "docstring": "Determine if the logical line can go on a single line.\n\nArguments:\nline: (logical_line.LogicalLine) The line currently being formatted.\n\nReturns:\nTrue if the line can or should be added to a single line. False otherwise.", "source": "github-repos"}
{"code": "def nearest_neighbors(self, word, top_k=10):\n    \n    \n    point = self[word]\n    diff = self.vectors - point\n    distances = np.linalg.norm(diff, axis=1)\n    top_ids = distances.argsort()[1:top_k+1]\n    return [self.vocabulary.id_word[i] for i in top_ids]", "docstring": "Return the nearest k words to the given `word`.\n\nArgs:\nword (string): single word.\ntop_k (integer): decides how many neighbors to report.\n\nReturns:\nA list of words sorted by the distances. The closest is the first.\n\nNote:\nL2 metric is used to calculate distances.", "source": "juraj-google-style"}
{"code": "def _fulfillment_from_details(data, _depth=0):\n    if (_depth == 100):\n        raise ThresholdTooDeep()\n    if (data['type'] == 'ed25519-sha-256'):\n        public_key = base58.b58decode(data['public_key'])\n        return Ed25519Sha256(public_key=public_key)\n    if (data['type'] == 'threshold-sha-256'):\n        threshold = ThresholdSha256(data['threshold'])\n        for cond in data['subconditions']:\n            cond = _fulfillment_from_details(cond, (_depth + 1))\n            threshold.add_subfulfillment(cond)\n        return threshold\n    raise UnsupportedTypeError(data.get('type'))", "docstring": "Load a fulfillment for a signing spec dictionary\n\nArgs:\ndata: tx.output[].condition.details dictionary", "source": "codesearchnet"}
{"code": "def splay_health(health_target):\n    \n    HealthCheck = collections.namedtuple('HealthCheck', ['path', 'port', 'proto', 'target'])\n\n    proto, health_port_path = health_target.split(':')\n    port, *health_path = health_port_path.split('/')\n\n    if proto == 'TCP':\n        path = ''\n    elif not health_path:\n        path = '/healthcheck'\n    else:\n        path = '/{0}'.format('/'.join(health_path))\n\n    target = '{0}:{1}{2}'.format(proto, port, path)\n\n    health = HealthCheck(path, port, proto, target)\n    LOG.info(health)\n\n    return health", "docstring": "Set Health Check path, port, and protocol.\n\nArgs:\nhealth_target (str): The health target. ie ``HTTP:80``\nReturns:\nHealthCheck: A **collections.namedtuple** class with *path*, *port*,\n*proto*, and *target* attributes.", "source": "juraj-google-style"}
{"code": "def ParseSMS(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    phone_number = self._GetRowValue(query_hash, row, 'dstnum_sms')\n    if phone_number:\n        phone_number = phone_number.replace(' ', '')\n    event_data = SkypeSMSEventData()\n    event_data.number = phone_number\n    event_data.query = query\n    event_data.text = self._GetRowValue(query_hash, row, 'msg_sms')\n    timestamp = self._GetRowValue(query_hash, row, 'time_sms')\n    if timestamp:\n        date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, 'SMS from Skype')\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an SMS.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from query.", "source": "codesearchnet"}
{"code": "def add_child_url(self, url: str, inline: bool=False, link_type: Optional[LinkType]=None, post_data: Optional[str]=None, level: Optional[int]=None, replace: bool=False):\n    url_properties = URLProperties()\n    url_properties.level = ((self.url_record.level + 1) if (level is None) else level)\n    url_properties.inline_level = (((self.url_record.inline_level or 0) + 1) if inline else None)\n    url_properties.parent_url = self.url_record.url\n    url_properties.root_url = (self.url_record.root_url or self.url_record.url)\n    url_properties.link_type = link_type\n    url_data = URLData()\n    url_data.post_data = post_data\n    if replace:\n        self.app_session.factory['URLTable'].remove_many([url])\n    self.add_url(url, url_properties, url_data)", "docstring": "Add links scraped from the document with automatic values.\n\nArgs:\nurl: A full URL. (It can't be a relative path.)\ninline: Whether the URL is an embedded object.\nlink_type: Expected link type.\npost_data: URL encoded form data. The request will be made using\nPOST. (Don't use this to upload files.)\nlevel: The child depth of this URL.\nreplace: Whether to replace the existing entry in the database\ntable so it will be redownloaded again.\n\nThis function provides values automatically for:\n\n* ``inline``\n* ``level``\n* ``parent``: The referrering page.\n* ``root``\n\nSee also :meth:`add_url`.", "source": "codesearchnet"}
{"code": "def from_files(cls, secrets=None, storage=None, scopes=None, no_webserver=False):\n        \n        creds = oauth2.get_credentials(scopes, secrets, storage, no_webserver)\n        return cls(creds)", "docstring": "Return a spreadsheet collection making OAauth 2.0 credentials.\n\nArgs:\nsecrets (str): location of secrets file (default: ``%r``)\nstorage (str): location of storage file (default: ``%r``)\nscopes: scope URL(s) or ``'read'`` or ``'write'`` (default: ``%r``)\nno_webserver (bool): URL/code prompt instead of webbrowser auth\nReturns:\nSheets: new Sheets instance with OAauth 2.0 credentials", "source": "juraj-google-style"}
{"code": "def first_return_times(dts, c=None, d=0.0):\n    if (c is None):\n        c = dts.mean()\n    vmrt = distob.vectorize(analyses1.first_return_times)\n    all_intervals = vmrt(dts, c, d)\n    if hasattr(type(all_intervals), '__array_interface__'):\n        return np.ravel(all_intervals)\n    else:\n        return np.hstack([distob.gather(ilist) for ilist in all_intervals])", "docstring": "For an ensemble of time series, return the set of all time intervals\nbetween successive returns to value c for all instances in the ensemble.\nIf c is not given, the default is the mean across all times and across all\ntime series in the ensemble.\n\nArgs:\ndts (DistTimeseries)\n\nc (float): Optional target value (default is the ensemble mean value)\n\nd (float): Optional min distance from c to be attained between returns\n\nReturns:\narray of time intervals (Can take the mean of these to estimate the\nexpected first return time for the whole ensemble)", "source": "codesearchnet"}
{"code": "def load_terms(fo: IO, metadata: dict, forceupdate: bool):\n    \n\n    version = metadata[\"metadata\"][\"version\"]\n\n    \n    with timy.Timer(\"Load Terms\") as timer:\n        es = bel.db.elasticsearch.get_client()\n\n        es_version = version.replace(\"T\", \"\").replace(\"-\", \"\").replace(\":\", \"\")\n        index_prefix = f\"terms_{metadata['metadata']['namespace'].lower()}\"\n        index_name = f\"{index_prefix}_{es_version}\"\n\n        \n        if not elasticsearch.index_exists(es, index_name):\n            elasticsearch.create_terms_index(es, index_name)\n        elif forceupdate:  \n            index_name += \"_alt\"\n            elasticsearch.create_terms_index(es, index_name)\n        else:\n            return  \n\n        terms_iterator = terms_iterator_for_elasticsearch(fo, index_name)\n        elasticsearch.bulk_load_docs(es, terms_iterator)\n\n        \n        index_names = elasticsearch.get_all_index_names(es)\n        for name in index_names:\n            if name != index_name and index_prefix in name:\n                elasticsearch.delete_index(es, name)\n\n        \n        elasticsearch.add_index_alias(es, index_name, terms_alias)\n\n        log.info(\n            \"Load namespace terms\",\n            elapsed=timer.elapsed,\n            namespace=metadata[\"metadata\"][\"namespace\"],\n        )\n\n    \n    with timy.Timer(\"Load Term Equivalences\") as timer:\n        arango_client = arangodb.get_client()\n        belns_db = arangodb.get_belns_handle(arango_client)\n        arangodb.batch_load_docs(\n            belns_db, terms_iterator_for_arangodb(fo, version), on_duplicate=\"update\"\n        )\n\n        log.info(\n            \"Loaded namespace equivalences\",\n            elapsed=timer.elapsed,\n            namespace=metadata[\"metadata\"][\"namespace\"],\n        )\n\n        \n        remove_old_equivalence_edges = f\n        remove_old_equivalence_nodes = f\n        arangodb.aql_query(belns_db, remove_old_equivalence_edges)\n        arangodb.aql_query(belns_db, remove_old_equivalence_nodes)\n\n    \n    metadata[\"_key\"] = f\"Namespace_{metadata['metadata']['namespace']}\"\n    try:\n        belns_db.collection(arangodb.belns_metadata_name).insert(metadata)\n    except ArangoError as ae:\n        belns_db.collection(arangodb.belns_metadata_name).replace(metadata)", "docstring": "Load terms into Elasticsearch and ArangoDB\n\nForceupdate will create a new index in Elasticsearch regardless of whether\nan index with the resource version already exists.\n\nArgs:\nfo: file obj - terminology file\nmetadata: dict containing the metadata for terminology\nforceupdate: force full update - e.g. don't leave Elasticsearch indexes\nalone if their version ID matches", "source": "juraj-google-style"}
{"code": "def plot(data, output_dir_path='.', width=10, height=8):\n    \n    if not isinstance(data, pd.DataFrame):\n        data = pd.DataFrame(data)\n    plot_accuracy(data, output_dir_path=output_dir_path,\n                  width=width, height=height)\n    plot_loss(data, output_dir_path, width=width, height=height)", "docstring": "Create two plots: 1) loss 2) accuracy.\nArgs:\ndata: Panda dataframe in *the* format.", "source": "juraj-google-style"}
{"code": "def from_str(format: str, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False) -> 'PipelineDataFormat':\n    if format == 'json':\n        return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)\n    elif format == 'csv':\n        return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)\n    elif format == 'pipe':\n        return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)\n    else:\n        raise KeyError(f'Unknown reader {format} (Available reader are json/csv/pipe)')", "docstring": "Creates an instance of the right subclass of [`~pipelines.PipelineDataFormat`] depending on `format`.\n\nArgs:\nformat (`str`):\nThe format of the desired pipeline. Acceptable values are `\"json\"`, `\"csv\"` or `\"pipe\"`.\noutput_path (`str`, *optional*):\nWhere to save the outgoing data.\ninput_path (`str`, *optional*):\nWhere to look for the input data.\ncolumn (`str`, *optional*):\nThe column to read.\noverwrite (`bool`, *optional*, defaults to `False`):\nWhether or not to overwrite the `output_path`.\n\nReturns:\n[`~pipelines.PipelineDataFormat`]: The proper data format.", "source": "github-repos"}
{"code": "def _ExtractInterfaceMetadata(self, metadata):\n    \n    interfaces = []\n    for network_interface in metadata:\n      mac_address = network_interface.get('mac')\n      interface = self.network_utils.GetNetworkInterface(mac_address)\n      ip_addresses = []\n      if interface:\n        ip_addresses.extend(network_interface.get('forwardedIps', []))\n        if self.ip_aliases:\n          ip_addresses.extend(network_interface.get('ipAliases', []))\n        if self.target_instance_ips:\n          ip_addresses.extend(network_interface.get('targetInstanceIps', []))\n        interfaces.append(NetworkDaemon.NetworkInterface(\n            interface, ip_addresses, network_interface.get('ip', [])))\n      else:\n        message = 'Network interface not found for MAC address: %s.'\n        self.logger.warning(message, mac_address)\n    return interfaces", "docstring": "Extracts network interface metadata.\n\nArgs:\nmetadata: dict, the metadata response with the new network interfaces.\n\nReturns:\nlist, a list of NetworkInterface objects.", "source": "juraj-google-style"}
{"code": "def get_next(self, protocol='http', format=False, policy='loop'):\n    if (not self.proxies[protocol]):\n        return None\n    if (policy == 'loop'):\n        idx = self.idx[protocol]\n        self.idx[protocol] = ((idx + 1) % len(self.proxies[protocol]))\n    elif (policy == 'random'):\n        idx = random.randint(0, (self.proxy_num(protocol) - 1))\n    else:\n        self.logger.error('Unsupported get_next policy: {}'.format(policy))\n        exit()\n    proxy = self.proxies[protocol][self.addr_list[protocol][idx]]\n    if (proxy.weight < random.random()):\n        return self.get_next(protocol, format, policy)\n    if format:\n        return proxy.format()\n    else:\n        return proxy", "docstring": "Get the next proxy\n\nArgs:\nprotocol (str): 'http' or 'https'. (default 'http')\nformat (bool): Whether to format the proxy. (default False)\npolicy (str): Either 'loop' or 'random', indicating the policy of\ngetting the next proxy. If set to 'loop', will return proxies\nin turn, otherwise will return a proxy randomly.\n\nReturns:\nProxy or dict: If format is true, then return the formatted proxy\nwhich is compatible with requests.Session parameters,\notherwise a Proxy object.", "source": "codesearchnet"}
{"code": "def det(x):\n    if any_symbolic_tensors((x,)):\n        return Det().symbolic_call(x)\n    return _det(x)", "docstring": "Computes the determinant of a square tensor.\n\nArgs:\nx: Input tensor of shape `(..., M, M)`.\n\nReturns:\nA tensor of shape `(...,)` representing the determinant of `x`.", "source": "github-repos"}
{"code": "def main(argv=None):\n    if (argv is None):\n        argv = sys.argv[1:]\n    args = _get_parser().parse_args(argv)\n    mand(args.module_seq)", "docstring": "Execute each module in the same interpreter.\n\nArgs:\nargv: Each item of argv will be treated as a separate\nmodule with potential arguments\neach item may be a string or a sequence of strings.\nIf a given argument is a string, then treat string as\nshell arguments and split accordingly.\nIf the given argument is a tuple or list, then assume\nthat the given arguments are already parsed.\nThe first item of each argument should be a module or module path", "source": "codesearchnet"}
{"code": "def fit(self, trX, trY, batch_size=64, n_epochs=1, len_filter=LenFilter(), snapshot_freq=1, path=None):\n    if (len_filter is not None):\n        (trX, trY) = len_filter.filter(trX, trY)\n    trY = standardize_targets(trY, cost=self.cost)\n    n = 0.0\n    t = time()\n    costs = []\n    for e in range(n_epochs):\n        epoch_costs = []\n        for (xmb, ymb) in self.iterator.iterXY(trX, trY):\n            c = self._train(xmb, ymb)\n            epoch_costs.append(c)\n            n += len(ymb)\n            if (self.verbose >= 2):\n                n_per_sec = (n / (time() - t))\n                n_left = (len(trY) - (n % len(trY)))\n                time_left = (n_left / n_per_sec)\n                sys.stdout.write(('\\rEpoch %d Seen %d samples Avg cost %0.4f Time left %d seconds' % (e, n, np.mean(epoch_costs[(- 250):]), time_left)))\n                sys.stdout.flush()\n        costs.extend(epoch_costs)\n        status = ('Epoch %d Seen %d samples Avg cost %0.4f Time elapsed %d seconds' % (e, n, np.mean(epoch_costs[(- 250):]), (time() - t)))\n        if (self.verbose >= 2):\n            sys.stdout.write(('\\r' + status))\n            sys.stdout.flush()\n            sys.stdout.write('\\n')\n        elif (self.verbose == 1):\n            print(status)\n        if (path and ((e % snapshot_freq) == 0)):\n            save(self, '{0}.{1}'.format(path, e))\n    return costs", "docstring": "Train model on given training examples and return the list of costs after each minibatch is processed.\n\nArgs:\ntrX (list) -- Inputs\ntrY (list) -- Outputs\nbatch_size (int, optional) -- number of examples in a minibatch (default 64)\nn_epochs (int, optional)  -- number of epochs to train for (default 1)\nlen_filter (object, optional) -- object to filter training example by length (default LenFilter())\nsnapshot_freq (int, optional) -- number of epochs between saving model snapshots (default 1)\npath (str, optional) -- prefix of path where model snapshots are saved.\nIf None, no snapshots are saved (default None)\n\nReturns:\nlist -- costs of model after processing each minibatch", "source": "codesearchnet"}
{"code": "def setup_test_logger(log_path, prefix=None, alias='latest', console_level=logging.INFO):\n    utils.create_dir(log_path)\n    _setup_test_logger(log_path, console_level, prefix)\n    logging.debug('Test output folder: \"%s\"', log_path)\n    if alias:\n        create_latest_log_alias(log_path, alias=alias)", "docstring": "Customizes the root logger for a test run.\n\nIn addition to configuring the Mobly logging handlers, this also sets two\nattributes on the `logging` module for the output directories:\n\nroot_output_path: path to the directory for the entire test run.\nlog_path: same as `root_output_path` outside of a test class run. In the\ncontext of a test class run, this is the output directory for files\nspecific to a test class.\n\nArgs:\nlog_path: string, the location of the report file.\nprefix: optional string, a prefix for each log line in terminal.\nalias: optional string, The name of the alias to use for the latest log\ndirectory. If a falsy value is provided, then the alias directory\nwill not be created, which is useful to save storage space when the\nstorage system (e.g. ZIP files) does not properly support\nshortcut/symlinks.\nconsole_level: optional logging level, log level threshold used for log\nmessages printed to the console. Logs with a level less severe than\nconsole_level will not be printed to the console.", "source": "github-repos"}
{"code": "def get_files(self, commit, paths, recursive=False):\n        \n        filtered_file_infos = []\n        for path in paths:\n            fi = self.inspect_file(commit, path)\n            if fi.file_type == proto.FILE:\n                filtered_file_infos.append(fi)\n            else:\n                filtered_file_infos += self.list_file(commit, path, recursive=recursive)\n\n        filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]\n\n        return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}", "docstring": "Returns the contents of a list of files at a specific Commit as a\ndictionary of file paths to data.\n\nParams:\n* commit: A tuple, string, or Commit object representing the commit.\n* paths: A list of paths to retrieve.\n* recursive: If True, will go into each directory in the list\nrecursively.", "source": "juraj-google-style"}
{"code": "def move_to(self, folder):\n    if isinstance(folder, Folder):\n        self.move_to(folder.id)\n    else:\n        self._move_to(folder)", "docstring": "Moves the email to the folder specified by the folder parameter.\n\nArgs:\nfolder: A string containing the folder ID the message should be moved to, or a Folder instance", "source": "codesearchnet"}
{"code": "def _split_generators(self, dl_manager):\n    \n    path = dl_manager.download_and_extract(_DOWNLOAD_URL)\n    return [\n        tfds.core.SplitGenerator(\n            name=tfds.Split.TEST,\n            num_shards=1,\n            gen_kwargs={'data_dir': os.path.join(path, _DIRNAME)})\n    ]", "docstring": "Return the test split of Cifar10.\n\nArgs:\ndl_manager: download manager object.\n\nReturns:\ntest split.", "source": "juraj-google-style"}
{"code": "def _parse_volumes(volume_values: dict) -> str:\n        \n        for v_values in volume_values:\n            for v_key, v_value in v_values.items():\n                if v_key == 'source':\n                    if v_value == '.':\n                        source = os.path.dirname(\n                            os.path.abspath(__file__))\n                    else:\n                        source = v_value\n                if v_key == 'target':\n                    target = v_value\n            volume_spec = [source + ':' + target]\n            return volume_spec", "docstring": "Parse volumes key.\n\nArgs:\nvolume_values (dict): volume configuration values\n\nReturns:\nstring, volume specification with mount source and container path", "source": "juraj-google-style"}
{"code": "def run(self):\n    if not self._test_run_infos:\n        raise Error('No tests to execute.')\n    self._test_run_metadata.set_start_point()\n    utils.create_dir(self._test_run_metadata.root_output_path)\n    summary_writer = records.TestSummaryWriter(self._test_run_metadata.summary_file_path)\n\n    def sigterm_handler(*args):\n        logging.warning('Test received a SIGTERM. Aborting all tests.')\n        raise signals.TestAbortAll('Test received a SIGTERM.')\n    signal.signal(signal.SIGTERM, sigterm_handler)\n    try:\n        for test_run_info in self._test_run_infos:\n            test_config = test_run_info.config.copy()\n            test_config.log_path = self._test_run_metadata.root_output_path\n            test_config.summary_writer = summary_writer\n            test_config.test_class_name_suffix = test_run_info.test_class_name_suffix\n            try:\n                self._run_test_class(config=test_config, test_class=test_run_info.test_class, tests=test_run_info.tests)\n            except signals.TestAbortAll as e:\n                logging.warning('Abort all subsequent test classes. Reason: %s', e)\n                raise\n    finally:\n        summary_writer.dump(self.results.summary_dict(), records.TestSummaryEntryType.SUMMARY)\n        self._test_run_metadata.set_end_point()\n        summary_lines = [f'Summary for test run {self._test_run_metadata.run_id}:', f'Total time elapsed {self._test_run_metadata.time_elapsed_sec}s', f'Artifacts are saved in \"{self._test_run_metadata.root_output_path}\"', f'Test summary saved in \"{self._test_run_metadata.summary_file_path}\"', f'Test results: {self.results.summary_str()}']\n        logging.info('\\n'.join(summary_lines))", "docstring": "Executes tests.\n\nThis will instantiate controller and test classes, execute tests, and\nprint a summary.\n\nThis meethod should usually be called within the runner's `mobly_logger`\ncontext. If you must use this method outside of the context, you should\nmake sure `self._test_run_metadata.generate_test_run_log_path` is called\nbefore each invocation of `run`.\n\nRaises:\nError: if no tests have previously been added to this runner using\nadd_test_class(...).", "source": "github-repos"}
{"code": "def from_json(cls, data):\n        \n        \n        required_keys = ('hum_type', 'hum_value')\n        optional_keys = {'barometric_pressure': 101325,\n                         'schedule': '', 'wet_bulb_range': ''}\n        for key in required_keys:\n            assert key in data, 'Required key \"{}\" is missing!'.format(key)\n        for key, val in optional_keys.items():\n            if key not in data:\n                data[key] = val\n\n        return cls(data['hum_type'], data['hum_value'], data['barometric_pressure'],\n                   data['schedule'], data['wet_bulb_range'])", "docstring": "Create a Humidity Condition from a dictionary.\n\nArgs:\ndata = {\n\"hum_type\": string,\n\"hum_value\": float,\n\"barometric_pressure\": float,\n\"schedule\": string,\n\"wet_bulb_range\": string}", "source": "juraj-google-style"}
{"code": "def non_fluent_size(self) -> Sequence[Sequence[int]]:\n    fluents = self.domain.non_fluents\n    ordering = self.domain.non_fluent_ordering\n    return self._fluent_size(fluents, ordering)", "docstring": "The size of each non-fluent in canonical order.\n\nReturns:\nSequence[Sequence[int]]: A tuple of tuple of integers\nrepresenting the shape and size of each non-fluent.", "source": "codesearchnet"}
{"code": "def Process(self, parser_mediator, plist_name, top_level, **kwargs):\n    super(MacUserPlugin, self).Process(parser_mediator, plist_name=self.PLIST_PATH, top_level=top_level)", "docstring": "Check if it is a valid MacOS system  account plist file name.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nplist_name (str): name of the plist.\ntop_level (dict[str, object]): plist top-level key.", "source": "codesearchnet"}
{"code": "def __init__(self, uri=None, method=None, headers=None):\n    \n    self.headers = headers or {}\n    self._body_parts = []\n    if method is not None:\n      self.method = method\n    if isinstance(uri, (str, unicode)):\n      uri = Uri.parse_uri(uri)\n    self.uri = uri or Uri()\n    self.headers['MIME-version'] = '1.0'\n    self.headers['Connection'] = 'close'", "docstring": "Construct an HTTP request.\n\nArgs:\nuri: The full path or partial path as a Uri object or a string.\nmethod: The HTTP method for the request, examples include 'GET', 'POST',\netc.\nheaders: dict of strings The HTTP headers to include in the request.", "source": "juraj-google-style"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n    if token_ids_1 is None:\n        return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    cls = [self.cls_token_id]\n    sep = [self.sep_token_id]\n    return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A DeBERTa sequence has the following format:\n\n- single sequence: [CLS] X [SEP]\n- pair of sequences: [CLS] A [SEP] B [SEP]\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def update_vmss(access_token, subscription_id, resource_group, vmss_name, body):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,\n                        '?api-version=', COMP_API])\n    return do_put(endpoint, body, access_token)", "docstring": "Update a VMSS with a new JSON body. E.g. do a GET, change something, call this.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvm_name (str): Name of the virtual machine.\nbody (dict): JSON body of the VM scale set.\n\nReturns:\nHTTP response.", "source": "juraj-google-style"}
{"code": "def from_spec(cls, spec, name=None):\n    return cls(spec.shape, spec.dtype, name or spec.name)", "docstring": "Returns a `TensorSpec` with the same shape and dtype as `spec`.\n\n>>> spec = tf.TensorSpec(shape=[8, 3], dtype=tf.int32, name=\"OriginalName\")\n>>> tf.TensorSpec.from_spec(spec, \"NewName\")\nTensorSpec(shape=(8, 3), dtype=tf.int32, name='NewName')\n\nArgs:\nspec: The `TypeSpec` used to create the new `TensorSpec`.\nname: The name for the new `TensorSpec`.  Defaults to `spec.name`.", "source": "github-repos"}
{"code": "def fit(self, col):\n        \n        dates = self.safe_datetime_cast(col)\n        self.default_val = dates.groupby(dates).count().index[0].timestamp() * 1e9", "docstring": "Prepare the transformer to convert data.\n\nArgs:\ncol(pandas.DataFrame): Data to transform.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def filepattern(self, data_dir, mode, shard=None):\n    path = os.path.join(data_dir, self.dataset_filename())\n    shard_str = (('-%05d' % shard) if (shard is not None) else '')\n    if (mode == DatasetSplit.TRAIN):\n        suffix = 'train'\n    elif (mode in [DatasetSplit.EVAL, tf.estimator.ModeKeys.PREDICT]):\n        suffix = 'dev'\n    else:\n        assert (mode == DatasetSplit.TEST)\n        suffix = 'test'\n    return ('%s-%s%s*' % (path, suffix, shard_str))", "docstring": "Get filepattern for data files for mode.\n\nMatches mode to a suffix.\n* DatasetSplit.TRAIN: train\n* DatasetSplit.EVAL: dev\n* DatasetSplit.TEST: test\n* tf.estimator.ModeKeys.PREDICT: dev\n\nArgs:\ndata_dir: str, data directory.\nmode: DatasetSplit\nshard: int, if provided, will only read data from the specified shard.\n\nReturns:\nfilepattern str", "source": "codesearchnet"}
{"code": "def is_unitary(\n        matrix: np.ndarray,\n        *,\n        rtol: float = 1e-5,\n        atol: float = 1e-8) -> bool:\n    \n    return (matrix.shape[0] == matrix.shape[1] and\n            np.allclose(matrix.dot(np.conj(matrix.T)), np.eye(matrix.shape[0]),\n                        rtol=rtol,\n                        atol=atol))", "docstring": "Determines if a matrix is approximately unitary.\n\nA matrix is unitary if it's square and its adjoint is its inverse.\n\nArgs:\nmatrix: The matrix to check.\nrtol: The per-matrix-entry relative tolerance on equality.\natol: The per-matrix-entry absolute tolerance on equality.\n\nReturns:\nWhether the matrix is unitary within the given tolerance.", "source": "juraj-google-style"}
{"code": "def set_column_sizes(self, values):\n    self.style['grid-template-columns'] = ' '.join(map((lambda value: (str(value) if str(value).endswith('%') else (str(value) + '%'))), values))", "docstring": "Sets the size value for each column\n\nArgs:\nvalues (iterable of int or str): values are treated as percentage.", "source": "codesearchnet"}
{"code": "def get_all_ad_units(inventory_service):\n    statement = ad_manager.StatementBuilder(version='v201811').OrderBy('id', ascending=True)\n    keep_iterating = True\n    total_results = 0\n    found_ad_units = []\n    while keep_iterating:\n        page = inventory_service.getAdUnitsByStatement(statement.ToStatement())\n        if (('results' in page) and len(page['results'])):\n            total_results = page['totalResultSetSize']\n            found_ad_units.extend(page['results'])\n        statement.offset += statement.limit\n        keep_iterating = (statement.offset < total_results)\n    return found_ad_units", "docstring": "Download all ad units.\n\nArgs:\ninventory_service: An instance of the InventoryService.\n\nReturns:\nA list containing all ad units.", "source": "codesearchnet"}
{"code": "def write_image(self, stream, image_format='svg', **kwargs):\n    plt = self.get_plot(**kwargs)\n    f = plt.gcf()\n    f.set_size_inches((12, 10))\n    plt.savefig(stream, format=image_format)", "docstring": "Writes the phase diagram to an image in a stream.\n\nArgs:\nstream:\nstream to write to. Can be a file stream or a StringIO stream.\nimage_format\nformat for image. Can be any of matplotlib supported formats.\nDefaults to svg for best results for vector graphics.\n\\\\*\\\\*kwargs: Pass through to get_plot functino.", "source": "codesearchnet"}
{"code": "def serialize(obj):\n    \n    LOGGER.debug('serialize(%s)', obj)\n\n    if isinstance(obj, datetime.date):\n        return simplejson.dumps(obj, default=encoders.as_date)\n\n    elif hasattr(obj, '__dict__'):\n        return simplejson.dumps(obj, default=encoders.as_object)\n\n    return simplejson.dumps(obj)", "docstring": "Serialize the given object into JSON.\n\nArgs:\nobj: the object to be serialized.\n\nReturns:\n(str): JSON representation of the given object.", "source": "juraj-google-style"}
{"code": "def add_plot_boundary(ax, padding=0.125):\n    nodes = np.asfortranarray(np.vstack([line.get_xydata() for line in ax.lines]).T)\n    (left, right, bottom, top) = _helpers.bbox(nodes)\n    center_x = (0.5 * (right + left))\n    delta_x = (right - left)\n    center_y = (0.5 * (top + bottom))\n    delta_y = (top - bottom)\n    multiplier = ((1.0 + padding) * 0.5)\n    ax.set_xlim((center_x - (multiplier * delta_x)), (center_x + (multiplier * delta_x)))\n    ax.set_ylim((center_y - (multiplier * delta_y)), (center_y + (multiplier * delta_y)))", "docstring": "Add a buffer of empty space around a plot boundary.\n\n.. note::\n\nThis only uses ``line`` data from the axis. It **could**\nuse ``patch`` data, but doesn't at this time.\n\nArgs:\nax (matplotlib.artist.Artist): A matplotlib axis.\npadding (Optional[float]): Amount (as a fraction of width and height)\nof padding to add around data. Defaults to ``0.125``.", "source": "codesearchnet"}
{"code": "def slice(self, start, size):\n        \n        return SeriesWeld(\n            grizzly_impl.slice(\n                self.expr,\n                start,\n                size,\n                self.weld_type\n            ),\n            self.weld_type,\n            self.df,\n            self.column_name\n        )", "docstring": "Summary\n\nArgs:\nstart (TYPE): Description\nsize (TYPE): Description\n\nReturns:\nTYPE: Description", "source": "juraj-google-style"}
{"code": "def stop(self, consumer):\n        \n        stopped_workflows = []\n        for request in [r for r in consumer.controller.state.active_requests]:\n            job = AsyncResult(request.id)\n\n            workflow_id = job.result['workflow_id']\n            if workflow_id not in stopped_workflows:\n                client = Client(\n                    SignalConnection(**consumer.app.user_options['config'].signal,\n                                     auto_connect=True),\n                    request_key=workflow_id)\n                client.send(Request(action='stop_workflow'))\n\n                stopped_workflows.append(workflow_id)", "docstring": "This function is called when the worker received a request to terminate.\n\nUpon the termination of the worker, the workflows for all running jobs are\nstopped gracefully.\n\nArgs:\nconsumer (Consumer): Reference to the consumer object that handles messages\nfrom the broker.", "source": "juraj-google-style"}
{"code": "def HashIt(self):\n    while True:\n        interval = self._GetNextInterval()\n        if (interval is None):\n            break\n        self.file.seek(interval.start, os.SEEK_SET)\n        block = self.file.read((interval.end - interval.start))\n        if (len(block) != (interval.end - interval.start)):\n            raise RuntimeError('Short read on file.')\n        self._HashBlock(block, interval.start, interval.end)\n        self._AdjustIntervals(interval.start, interval.end)\n    results = []\n    for finger in self.fingers:\n        res = {}\n        leftover = finger.CurrentRange()\n        if leftover:\n            if ((len(finger.ranges) > 1) or (leftover.start != self.filelength) or (leftover.end != self.filelength)):\n                raise RuntimeError('Non-empty range remains.')\n        res.update(finger.metadata)\n        for hasher in finger.hashers:\n            res[hasher.name] = hasher.digest()\n        results.append(res)\n    self.fingers = []\n    return sorted(results, key=(lambda r: r['name']))", "docstring": "Finalizing function for the Fingerprint class.\n\nThis method applies all the different hash functions over the\npreviously specified different ranges of the input file, and\ncomputes the resulting hashes.\n\nAfter calling HashIt, the state of the object is reset to its\ninitial state, with no fingers defined.\n\nReturns:\nAn array of dicts, with each dict containing name of fingerprint\ntype, names of hashes and values, and additional, type-dependent\nkey / value pairs, such as an array of SignedData tuples for the\nPE/COFF fingerprint type.\n\nRaises:\nRuntimeError: when internal inconsistencies occur.", "source": "codesearchnet"}
{"code": "def __init__(self, option_strings, dest, help, metavar, flag_instance):  \n    \n    del dest\n    self._flag_instance = flag_instance\n    flag_names = [self._flag_instance.name]\n    if self._flag_instance.short_name:\n      flag_names.append(self._flag_instance.short_name)\n    self._flag_names = frozenset(flag_names)\n    super(_BooleanFlagAction, self).__init__(\n        option_strings=option_strings,\n        dest=argparse.SUPPRESS,\n        nargs=0,  \n        help=help,\n        metavar=metavar)", "docstring": "Initializes _BooleanFlagAction.\n\nArgs:\noption_strings: See argparse.Action.\ndest: Ignored. The flag is always defined with dest=argparse.SUPPRESS.\nhelp: See argparse.Action.\nmetavar: See argparse.Action.\nflag_instance: absl.flags.Flag, the absl flag instance.", "source": "juraj-google-style"}
{"code": "def get_all_doctest_files() -> List[str]:\n    py_files = [str(x.relative_to(PATH_TO_REPO)) for x in PATH_TO_REPO.glob('***.md')]\n    test_files_to_run = py_files + md_files\n    test_files_to_run = ['/'.join(Path(x).parts) for x in test_files_to_run]\n    test_files_to_run = [x for x in test_files_to_run if 'models/deprecated' not in x]\n    test_files_to_run = [x for x in test_files_to_run if x.startswith(('src/', 'docs/source/en/'))]\n    test_files_to_run = [x for x in test_files_to_run if not x.endswith(('__init__.py',))]\n    with open('utils/not_doctested.txt') as fp:\n        not_doctested = {x.split(' ')[0] for x in fp.read().strip().split('\\n')}\n    test_files_to_run = [x for x in test_files_to_run if x not in not_doctested]\n    return sorted(test_files_to_run)", "docstring": "Return the complete list of python and Markdown files on which we run doctest.\n\nAt this moment, we restrict this to only take files from `src/` or `docs/source/en/` that are not in `utils/not_doctested.txt`.\n\nReturns:\n`List[str]`: The complete list of Python and Markdown files on which we run doctest.", "source": "github-repos"}
{"code": "def get_master_port(real_launcher=False):\n    master_port_base = os.environ.get('DS_TEST_PORT', DEFAULT_MASTER_PORT)\n    if not real_launcher:\n        master_port_base = str(int(master_port_base) + 1)\n    return master_port_base", "docstring": "When using a single gpu launcher emulation (i.e. not deepspeed or python -m torch.distributed)\nthe issue is that once the port is tied it can't be used anywhere else outside of this process,\nsince torch.dist doesn't free the port until the process exits. Therefore for the sake of being\nable to run both emulated launcher and normal launcher tests we need 2 distinct ports.\n\nThis function will give the right port in the right context. For real launcher it'll give the\nbase port, for emulated launcher it'll give the base port + 1. In both cases a string is\nreturned.\n\nArgs:\n`real_launcher`: whether a real launcher is going to be used, or the emulated one", "source": "github-repos"}
{"code": "def mark_deprecated(replaced_by):\n\n    def decorator(fn):\n\n        @wraps(fn)\n        def wrapper(*args, **kw):\n            from peltak.core import shell\n            if shell.is_tty:\n                warnings.warn('This command is has been deprecated. Please use {new} instead.'.format(new=replaced_by))\n            return fn(*args, **kw)\n        return wrapper\n    return decorator", "docstring": "Mark command as deprecated.\n\nArgs:\nreplaced_by (str):\nThe command that deprecated this command and should be used instead.", "source": "codesearchnet"}
{"code": "def GetZipInfoByPathSpec(self, path_spec):\n    location = getattr(path_spec, 'location', None)\n    if (location is None):\n        raise errors.PathSpecError('Path specification missing location.')\n    if (not location.startswith(self.LOCATION_ROOT)):\n        raise errors.PathSpecError('Invalid location in path specification.')\n    if (len(location) > 1):\n        return self._zip_file.getinfo(location[1:])\n    return None", "docstring": "Retrieves the ZIP info for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\nzipfile.ZipInfo: a ZIP info object or None if not available.\n\nRaises:\nPathSpecError: if the path specification is incorrect.", "source": "codesearchnet"}
{"code": "def screenshot(path=None):\n    \n    if not _rootinitialized:\n        raise TDLError('Initialize first with tdl.init')\n    if isinstance(path, str):\n        _lib.TCOD_sys_save_screenshot(_encodeString(path))\n    elif path is None: \n        filelist = _os.listdir('.')\n        n = 1\n        filename = 'screenshot%.3i.png' % n\n        while filename in filelist:\n            n += 1\n            filename = 'screenshot%.3i.png' % n\n        _lib.TCOD_sys_save_screenshot(_encodeString(filename))\n    else: \n        \n        tmpname = _os.tempnam()\n        _lib.TCOD_sys_save_screenshot(_encodeString(tmpname))\n        with tmpname as tmpfile:\n            path.write(tmpfile.read())\n        _os.remove(tmpname)", "docstring": "Capture the screen and save it as a png file.\n\nIf path is None then the image will be placed in the current\nfolder with the names:\n``screenshot001.png, screenshot002.png, ...``\n\nArgs:\npath (Optional[Text]): The file path to save the screenshot.", "source": "juraj-google-style"}
{"code": "def get_body(name):\n    \n\n    body = Pck()[name]\n    body.propagate = lambda date: get_orbit(name, date)\n    return body", "docstring": "Retrieve the Body structure of a JPL .bsp file object\n\nArgs:\nname (str)\nReturn:\n:py:class:`~beyond.constants.Body`", "source": "juraj-google-style"}
{"code": "def parse_verilog(text):\n  \n  lex = VerilogLexer\n\n  name = None\n  kind = None\n  saved_type = None\n  mode = 'input'\n  ptype = 'wire'\n\n  metacomments = []\n  parameters = []\n  param_items = []\n\n  generics = []\n  ports = collections.OrderedDict()\n  sections = []\n  port_param_index = 0\n  last_item = None\n  array_range_start_pos = 0\n\n  objects = []\n\n  for pos, action, groups in lex.run(text):\n    if action == 'metacomment':\n      if last_item is None:\n        metacomments.append(groups[0])\n      else:\n        last_item.desc = groups[0]\n\n    if action == 'section_meta':\n      sections.append((port_param_index, groups[0]))\n\n    elif action == 'module':\n      kind = 'module'\n      name = groups[0]\n      generics = []\n      ports = collections.OrderedDict()\n      param_items = []\n      sections = []\n      port_param_index = 0\n\n    elif action == 'parameter_start':\n      net_type, vec_range = groups\n\n      new_ptype = ''\n      if net_type is not None:\n        new_ptype += net_type\n\n      if vec_range is not None:\n        new_ptype += ' ' + vec_range\n\n      ptype = new_ptype\n\n    elif action == 'param_item':\n      generics.append(VerilogParameter(groups[0], 'in', ptype))\n\n    elif action == 'module_port_start':\n      new_mode, net_type, signed, vec_range = groups\n\n      new_ptype = ''\n      if net_type is not None:\n        new_ptype += net_type\n\n      if signed is not None:\n        new_ptype += ' ' + signed\n\n      if vec_range is not None:\n        new_ptype += ' ' + vec_range\n\n      \n      for i in param_items:\n        ports[i] = VerilogParameter(i, mode, ptype)\n\n      param_items = []\n      if len(ports) > 0:\n        last_item = next(reversed(ports))\n\n      \n      mode = new_mode\n      ptype = new_ptype\n\n    elif action == 'port_param':\n      ident = groups[0]\n\n      param_items.append(ident)\n      port_param_index += 1\n\n    elif action == 'end_module':\n      \n      for i in param_items:\n        ports[i] = VerilogParameter(i, mode, ptype)\n\n      vobj = VerilogModule(name, ports.values(), generics, dict(sections), metacomments)\n      objects.append(vobj)\n      last_item = None\n      metacomments = []\n\n  return objects", "docstring": "Parse a text buffer of Verilog code\n\nArgs:\ntext (str): Source code to parse\nReturns:\nList of parsed objects.", "source": "juraj-google-style"}
{"code": "def recall(truth, recommend, k=None):\n    \n    if len(truth) == 0:\n        if len(recommend) == 0:\n            return 1.\n        return 0.\n\n    if k is None:\n        k = len(recommend)\n    return count_true_positive(truth, recommend[:k]) / float(truth.size)", "docstring": "Recall@k.\n\nArgs:\ntruth (numpy 1d array): Set of truth samples.\nrecommend (numpy 1d array): Ordered set of recommended samples.\nk (int): Top-k items in `recommend` will be recommended.\n\nReturns:\nfloat: Recall@k.", "source": "juraj-google-style"}
{"code": "def from_response(self, response_data):\n        \n        return HSAccessTokenAuth(\n            response_data['access_token'],\n            response_data['token_type'], \n            response_data['refresh_token'],\n            response_data['expires_in'], \n            response_data.get('state') \n        )", "docstring": "Builds a new HSAccessTokenAuth straight from response data\n\nArgs:\nresponse_data (dict): Response data to use\n\nReturns:\nA HSAccessTokenAuth objet", "source": "juraj-google-style"}
{"code": "def LoadFromString(cls, yaml_doc):\n    \n    return cls(**googleads.common.LoadFromString(\n        yaml_doc, cls._YAML_KEY, cls._REQUIRED_INIT_VALUES,\n        cls._OPTIONAL_INIT_VALUES))", "docstring": "Creates an AdWordsClient with information stored in a yaml string.\n\nArgs:\nyaml_doc: The yaml string containing the cached AdWords data.\n\nReturns:\nAn AdWordsClient initialized with the values cached in the string.\n\nRaises:\nA GoogleAdsValueError if the given yaml string does not contain the\ninformation necessary to instantiate a client object - either a\nrequired key was missing or an OAuth2 key was missing.", "source": "juraj-google-style"}
{"code": "def compose_r(self, r: Rotation) -> Rotation:\n    r1 = self.get_rot_mats()\n    r2 = r.get_rot_mats()\n    new_rot_mats = rot_matmul(r1, r2)\n    return Rotation(rot_mats=new_rot_mats, quats=None)", "docstring": "Compose the rotation matrices of the current Rotation object with those of another.\n\nArgs:\nr:\nAn update rotation object\nReturns:\nAn updated rotation object", "source": "github-repos"}
{"code": "def update_thread(cls, session, conversation, thread):\n        \n        data = thread.to_api()\n        data['reload'] = True\n        return cls(\n            '/conversations/%s/threads/%d.json' % (\n                conversation.id, thread.id,\n            ),\n            data=data,\n            request_type=RequestPaginator.PUT,\n            singleton=True,\n            session=session,\n        )", "docstring": "Update a thread.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nconversation (helpscout.models.Conversation): The conversation\nthat the thread belongs to.\nthread (helpscout.models.Thread): The thread to be updated.\n\nReturns:\nhelpscout.models.Conversation: Conversation including freshly\nupdated thread.", "source": "juraj-google-style"}
{"code": "def _prune_hit(hit, model):\n    hit_id = hit['_id']\n    hit_index = hit['_index']\n    if model.objects.in_search_queryset(hit_id, index=hit_index):\n        logger.debug(\"%s with id=%s exists in the '%s' index queryset.\", model, hit_id, hit_index)\n        return None\n    else:\n        logger.debug(\"%s with id=%s does not exist in the '%s' index queryset and will be pruned.\", model, hit_id, hit_index)\n        return model(pk=hit_id)", "docstring": "Check whether a document should be pruned.\n\nThis method uses the SearchDocumentManagerMixin.in_search_queryset method\nto determine whether a 'hit' (search document) should be pruned from an index,\nand if so it returns the hit as a Django object(id=hit_id).\n\nArgs:\nhit: dict object the represents a document as returned from the scan_index\nfunction. (Contains object id and index.)\nmodel: the Django model (not object) from which the document was derived.\nUsed to get the correct model manager and bulk action.\n\nReturns:\nan object of type model, with id=hit_id. NB this is not the object\nitself, which by definition may not exist in the underlying database,\nbut a temporary object with the document id - which is enough to create\na 'delete' action.", "source": "codesearchnet"}
{"code": "def get_value_or_block_until_ready(self, side_input, task: TransformExecutor, block_until: Timestamp) -> Any:\n    with self._lock:\n        view = self._views[side_input]\n        if view.watermark and view.watermark.output_watermark >= block_until:\n            view.value = self._pvalue_to_value(side_input, view.elements)\n            return view.value\n        else:\n            view.blocked_tasks.append((task, block_until))\n            task.blocked = True", "docstring": "Returns the value of a view whose task is unblocked or blocks its task.\n\nIt gets the value of a view whose watermark has been updated and\nsurpasses a given value.\n\nArgs:\nside_input: ``_UnpickledSideInput`` value.\ntask: ``TransformExecutor`` task waiting on a side input.\nblock_until: Timestamp after which the task gets unblocked.\n\nReturns:\nThe ``SideInputMap`` value of a view when the tasks it blocks are\nunblocked. Otherwise, None.", "source": "github-repos"}
{"code": "def call(self, y_true, y_pred):\n    if tensor_util.is_tf_type(y_pred) and tensor_util.is_tf_type(y_true):\n        y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)\n    ag_fn = autograph.tf_convert(self.fn, ag_ctx.control_status_ctx())\n    return ag_fn(y_true, y_pred, **self._fn_kwargs)", "docstring": "Invokes the `LossFunctionWrapper` instance.\n\nArgs:\ny_true: Ground truth values.\ny_pred: The predicted values.\n\nReturns:\nLoss values per sample.", "source": "github-repos"}
{"code": "def on_value_event(self, event):\n    if self._dump_dir:\n        self._write_value_event(event)\n    else:\n        value = event.summary.value[0]\n        tensor_value = debug_data.load_tensor_from_event(event)\n        self._event_listener_servicer.debug_tensor_values[value.node_name].append(tensor_value)\n        items = event.summary.value[0].node_name.split(':')\n        node_name = items[0]\n        output_slot = int(items[1])\n        debug_op = items[2]\n        if (node_name, output_slot, debug_op) in self._event_listener_servicer.breakpoints:\n            return debug_service_pb2.EventReply()", "docstring": "Implementation of the tensor value-carrying Event proto callback.\n\nWrites the Event proto to the file system for testing. The path written to\nfollows the same pattern as the file:// debug URLs of tfdbg, i.e., the\nname scope of the op becomes the directory structure under the dump root\ndirectory.\n\nArgs:\nevent: The Event proto carrying a tensor value.\n\nReturns:\nIf the debug node belongs to the set of currently activated breakpoints,\na `EventReply` proto will be returned.", "source": "github-repos"}
{"code": "def embed_image_html(image):\n    \n    if image is None:\n        return None\n    elif isinstance(image, PIL.Image.Image):\n        pass\n    elif isinstance(image, np.ndarray):\n        image = PIL.Image.fromarray(image)\n    else:\n        raise ValueError('image must be a PIL.Image or a np.ndarray')\n\n    \n    fmt = image.format\n    if not fmt:\n        \n        fmt = 'jpeg'\n    else:\n        fmt = fmt.lower()\n\n    string_buf = StringIO()\n    image.save(string_buf, format=fmt)\n    data = string_buf.getvalue().encode('base64').replace('\\n', '')\n    return 'data:image/%s;base64,%s' % (fmt, data)", "docstring": "Returns an image embedded in HTML base64 format\n(Based on Caffe's web_demo)\nArguments:\nimage -- a PIL.Image or np.ndarray", "source": "juraj-google-style"}
{"code": "def _jit_get_rotation_matrix(axis, angle):\n    \n    axis = _jit_normalize(axis)\n    a = m.cos(angle / 2)\n    b, c, d = axis * m.sin(angle / 2)\n    rot_matrix = np.empty((3, 3))\n    rot_matrix[0, 0] = a**2 + b**2 - c**2 - d**2\n    rot_matrix[0, 1] = 2. * (b * c - a * d)\n    rot_matrix[0, 2] = 2. * (b * d + a * c)\n    rot_matrix[1, 0] = 2. * (b * c + a * d)\n    rot_matrix[1, 1] = a**2 + c**2 - b**2 - d**2\n    rot_matrix[1, 2] = 2. * (c * d - a * b)\n    rot_matrix[2, 0] = 2. * (b * d - a * c)\n    rot_matrix[2, 1] = 2. * (c * d + a * b)\n    rot_matrix[2, 2] = a**2 + d**2 - b**2 - c**2\n    return rot_matrix", "docstring": "Returns the rotation matrix.\n\nThis function returns a matrix for the counterclockwise rotation\naround the given axis.\nThe Input angle is in radians.\n\nArgs:\naxis (vector):\nangle (float):\n\nReturns:\nRotation matrix (np.array):", "source": "juraj-google-style"}
{"code": "def pauli_single(cls, num_qubits, index, pauli_label):\n    tmp = Pauli.from_label(pauli_label)\n    z = np.zeros(num_qubits, dtype=np.bool)\n    x = np.zeros(num_qubits, dtype=np.bool)\n    z[index] = tmp.z[0]\n    x[index] = tmp.x[0]\n    return cls(z, x)", "docstring": "Generate single qubit pauli at index with pauli_label with length num_qubits.\n\nArgs:\nnum_qubits (int): the length of pauli\nindex (int): the qubit index to insert the single qubii\npauli_label (str): pauli\n\nReturns:\nPauli: single qubit pauli", "source": "codesearchnet"}
{"code": "def convert_to_rgb(image: ImageInput) -> ImageInput:\n    requires_backends(convert_to_rgb, ['vision'])\n    if not isinstance(image, PIL.Image.Image):\n        return image\n    if image.mode == 'RGB':\n        return image\n    image = image.convert('RGB')\n    return image", "docstring": "Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image\nas is.\nArgs:\nimage (Image):\nThe image to convert.", "source": "github-repos"}
{"code": "def add_item(self, item):\n    if (not (isinstance(item.name, basestring) and isinstance(item.description, basestring))):\n        raise TypeError('Name and description should be strings, are of type {} and {}'.format(type(item.name), type(item.description)))\n    if (not isinstance(item.flag_type, FlagType)):\n        raise TypeError('Flag type should be of type FlagType, is of {}'.format(type(item.flag_type)))\n    if (item.name not in self._flags):\n        if (item.default is not None):\n            if (item.default is not False):\n                item.description = (item.description + ' (default: %(default)s)')\n            self._flags[item.name] = item\n        else:\n            self._flags[item.name] = item", "docstring": "Add single command line flag\n\nArguments:\nname (:obj:`str`): Name of flag used in command line\nflag_type (:py:class:`snap_plugin.v1.plugin.FlagType`):\nIndication if flag should store value or is simple bool flag\ndescription (:obj:`str`): Flag description used in command line\ndefault (:obj:`object`, optional): Optional default value for flag\n\nRaises:\nTypeError: Provided wrong arguments or arguments of wrong types, method will raise TypeError", "source": "codesearchnet"}
{"code": "def _ConvertHeaderToId(header):\n        \n        if not (header.startswith('<') or header.endswith('>')):\n            raise exceptions.BatchError(\n                'Invalid value for Content-ID: %s' % header)\n        if '+' not in header:\n            raise exceptions.BatchError(\n                'Invalid value for Content-ID: %s' % header)\n        _, request_id = header[1:-1].rsplit('+', 1)\n\n        return urllib_parse.unquote(request_id)", "docstring": "Convert a Content-ID header value to an id.\n\nPresumes the Content-ID header conforms to the format that\n_ConvertIdToHeader() returns.\n\nArgs:\nheader: A string indicating the Content-ID header value.\n\nReturns:\nThe extracted id value.\n\nRaises:\nBatchError if the header is not in the expected format.", "source": "juraj-google-style"}
{"code": "def ScanForVolumeSystem(self, source_path_spec):\n    \n    if source_path_spec.type_indicator == definitions.TYPE_INDICATOR_VSHADOW:\n      \n      \n      return None\n\n    if source_path_spec.IsVolumeSystemRoot():\n      return source_path_spec\n\n    if source_path_spec.type_indicator == (\n        definitions.TYPE_INDICATOR_APFS_CONTAINER):\n      \n      \n      \n      return None\n\n    try:\n      type_indicators = analyzer.Analyzer.GetVolumeSystemTypeIndicators(\n          source_path_spec, resolver_context=self._resolver_context)\n    except (IOError, RuntimeError) as exception:\n      raise errors.BackEndError((\n          'Unable to process source path specification with error: '\n          '{0!s}').format(exception))\n\n    if not type_indicators:\n      return None\n\n    if len(type_indicators) > 1:\n      raise errors.BackEndError(\n          'Unsupported source found more than one volume system types.')\n\n    if (type_indicators[0] == definitions.TYPE_INDICATOR_TSK_PARTITION and\n        source_path_spec.type_indicator in [\n            definitions.TYPE_INDICATOR_TSK_PARTITION]):\n      return None\n\n    if type_indicators[0] in definitions.VOLUME_SYSTEM_TYPE_INDICATORS:\n      return path_spec_factory.Factory.NewPathSpec(\n          type_indicators[0], location='/', parent=source_path_spec)\n\n    return path_spec_factory.Factory.NewPathSpec(\n        type_indicators[0], parent=source_path_spec)", "docstring": "Scans the path specification for a supported volume system format.\n\nArgs:\nsource_path_spec (PathSpec): source path specification.\n\nReturns:\nPathSpec: volume system path specification or None if no supported volume\nsystem type was found.\n\nRaises:\nBackEndError: if the source cannot be scanned or more than one volume\nsystem type is found.", "source": "juraj-google-style"}
{"code": "def _Open(self, hostname, port):\n    try:\n        self._xmlrpc_server = SimpleXMLRPCServer.SimpleXMLRPCServer((hostname, port), logRequests=False, allow_none=True)\n    except SocketServer.socket.error as exception:\n        logger.warning('Unable to bind a RPC server on {0:s}:{1:d} with error: {2!s}'.format(hostname, port, exception))\n        return False\n    self._xmlrpc_server.register_function(self._callback, self._RPC_FUNCTION_NAME)\n    return True", "docstring": "Opens the RPC communication channel for clients.\n\nArgs:\nhostname (str): hostname or IP address to connect to for requests.\nport (int): port to connect to for requests.\n\nReturns:\nbool: True if the communication channel was successfully opened.", "source": "codesearchnet"}
{"code": "def make_persister(self, to_persist):\n    if (not self.meta_data):\n        raise Exception(\"Root not set. Can't create persister.\")\n\n    def persister(c, broker):\n        if (c in to_persist):\n            self.dehydrate(c, broker)\n    return persister", "docstring": "Returns a function that hydrates components as they are evaluated. The\nfunction should be registered as an observer on a Broker just before\nexecution.\n\nArgs:\nto_persist (set): Set of components to persist. Skip everything\nelse.", "source": "codesearchnet"}
{"code": "def search(self, search_string):\n    updates = Updates()\n    found = updates.updates\n    if isinstance(search_string, six.string_types):\n        search_string = [search_string]\n    if isinstance(search_string, six.integer_types):\n        search_string = [six.text_type(search_string)]\n    for update in self._updates:\n        for find in search_string:\n            if (find == update.Identity.UpdateID):\n                found.Add(update)\n                continue\n            if (find in [('KB' + item) for item in update.KBArticleIDs]):\n                found.Add(update)\n                continue\n            if (find in [item for item in update.KBArticleIDs]):\n                found.Add(update)\n                continue\n            if (find in update.Title):\n                found.Add(update)\n                continue\n    return updates", "docstring": "Search for either a single update or a specific list of updates. GUIDs\nare searched first, then KB numbers, and finally Titles.\n\nArgs:\n\nsearch_string (str, list): The search string to use to find the\nupdate. This can be the GUID or KB of the update (preferred). It can\nalso be the full Title of the update or any part of the Title. A\npartial Title search is less specific and can return multiple\nresults.\n\nReturns:\nUpdates: An instance of Updates with the results of the search\n\nCode Example:\n\n.. code-block:: python\n\nimport salt.utils.win_update\nwua = salt.utils.win_update.WindowsUpdateAgent()\n\n# search for a single update and show its details\nupdates = wua.search('KB3194343')\nupdates.list()\n\n# search for a list of updates and show their details\nupdates = wua.search(['KB3195432', '12345678-abcd-1234-abcd-1234567890ab'])\nupdates.list()", "source": "codesearchnet"}
{"code": "def _update_install_json(self, install_json):\n        \n        updated = False\n        \n        install_json.setdefault('features', [])\n        for feature in self.features:\n            if feature not in install_json.get('features'):\n                install_json['features'].append(feature)\n                updated = True\n                \n                self.package_data['updates'].append(\n                    {'action': 'Updated Feature:', 'output': feature}\n                )\n\n        return install_json, updated", "docstring": "Write install.json file.\n\nArgs:\ninstall_json (dict): The contents of the install.json file.\n\nReturns:\ndict, bool: The contents of the install.json file and boolean value that is True if\nan update was made.", "source": "juraj-google-style"}
{"code": "def prompt_for_password(url, user=None, default_user=None):\n    if (user is None):\n        default_user = (default_user or getpass.getuser())\n        while (user is None):\n            user = compat.console_input('Enter username for {} [{}]: '.format(url, default_user))\n            if ((user.strip() == '') and default_user):\n                user = default_user\n    if user:\n        pw = getpass.getpass('Enter password for {}@{} (Ctrl+C to abort): '.format(user, url))\n        if (pw or (pw == '')):\n            return (user, pw)\n    return None", "docstring": "Prompt for username and password.\n\nIf a user name is passed, only prompt for a password.\nArgs:\nurl (str): hostname\nuser (str, optional):\nPass a valid name to skip prompting for a user name\ndefault_user (str, optional):\nPass a valid name that is used as default when prompting\nfor a user name\nRaises:\nKeyboardInterrupt if user hits Ctrl-C\nReturns:\n(username, password) or None", "source": "codesearchnet"}
{"code": "def healthy_services(self, role=None):\n    try:\n        query = self.rr.table(self.table)\n        if role:\n            query = query.get_all(role, index='role')\n        query = query.filter((lambda svc: (r.now().sub(svc['last_heartbeat']) < svc['ttl']))).order_by('load')\n        result = query.run()\n        return result\n    except r.ReqlNonExistenceError:\n        return []", "docstring": "Look up healthy services in the registry.\n\nA service is considered healthy if its 'last_heartbeat' was less than\n'ttl' seconds ago\n\nArgs:\nrole (str, optional): role name\n\nReturns:\nIf `role` is supplied, returns list of healthy services for the\ngiven role, otherwise returns list of all healthy services. May\nreturn an empty list.", "source": "codesearchnet"}
{"code": "def get_select_expressions(self) -> Tuple[column_expression_builder.ColumnExpressionBuilder, ...]:\n    return self._fields", "docstring": "Returns the fields used in the view and their corresponding expressions.\n\nReturns:\nAn immutable dictionary of selected field names and the expression\nused to populate them.", "source": "github-repos"}
{"code": "def _serialize_signature_def_map(signature_def_map: _SignatureDefMap) -> dict[str, bytes]:\n    signature_def_map_serialized = {}\n    for key, signature_def in signature_def_map.items():\n        signature_def_map_serialized[key] = signature_def.SerializeToString()\n    return signature_def_map_serialized", "docstring": "Serializes SignatureDef values in `signature_def_map`.\n\nArgs:\nsignature_def_map: Signature key -> SignatureDef mapping.\n\nReturns:\nSignature def map where the values (`SignatureDef`) are serialized.", "source": "github-repos"}
{"code": "def __add__(self, other):\n        \n        if not all(np.equal(self.frequencies, other.frequencies)):\n            raise ValueError(\"Frequencies of both DOS are not compatible!\")\n        densities = self.densities + other.densities\n        return PhononDos(self.frequencies, densities)", "docstring": "Adds two DOS together. Checks that frequency scales are the same.\nOtherwise, a ValueError is thrown.\n\nArgs:\nother: Another DOS object.\n\nReturns:\nSum of the two DOSs.", "source": "juraj-google-style"}
{"code": "def format(self, s, pretty=None, expand=None):\n    if (pretty is None):\n        pretty = self.format_pretty\n    if (expand is None):\n        expand = self.format_expand\n    formatter = ObjectStringFormatter(self, pretty=pretty, expand=expand)\n    return formatter.format(s)", "docstring": "Format a string.\n\nArgs:\ns (str): String to format, eg \"hello {name}\"\npretty (bool): If True, references to non-string attributes such as\nlists are converted to basic form, with characters such as\nbrackets and parenthesis removed. If None, defaults to the\nobject's 'format_pretty' attribute.\nexpand (`StringFormatType`): Expansion mode. If None, will default\nto the object's 'format_expand' attribute.\n\nReturns:\nThe formatting string.", "source": "codesearchnet"}
{"code": "def get_es_ids(obj, def_obj):\n    \n    try:\n        path = \"\"\n        for base in [def_obj.__class__] + list(def_obj.__class__.__bases__):\n\n            if hasattr(base, 'es_defs') and base.es_defs:\n                path = \"%s/%s/\" % (base.es_defs['kds_esIndex'][0],\n                                   base.es_defs['kds_esDocType'][0])\n                continue\n    except KeyError:\n        path = \"\"\n    if def_obj.subject.type == 'uri':\n        obj['uri'] = def_obj.subject.clean_uri\n        obj['id'] = path + make_es_id(obj['uri'])\n    elif def_obj.subject.type == 'bnode':\n        obj['id'] = path + def_obj.bnode_id()\n    else:\n        obj['id'] = path + make_es_id(str(obj['value']))\n    return obj", "docstring": "Returns the object updated with the 'id' and 'uri' fields for the\nelasticsearch document\n\nargs:\nobj: data object to update\ndef_obj: the class instance that has defintion values", "source": "juraj-google-style"}
{"code": "def filter_devices(ads, func):\n    \n    results = []\n    for ad in ads:\n        if func(ad):\n            results.append(ad)\n    return results", "docstring": "Finds the AndroidDevice instances from a list that match certain\nconditions.\n\nArgs:\nads: A list of AndroidDevice instances.\nfunc: A function that takes an AndroidDevice object and returns True\nif the device satisfies the filter condition.\n\nReturns:\nA list of AndroidDevice instances that satisfy the filter condition.", "source": "juraj-google-style"}
{"code": "def _clip(params, ids, max_norm):\n\n    def _rank(x):\n        \n        rank = ops.convert_to_tensor(x).get_shape().ndims\n        if rank:\n            return (rank, True)\n        else:\n            return (array_ops.rank(x), False)\n    if max_norm is None:\n        return params\n    ids_rank, ids_static = _rank(ids)\n    params_rank, params_static = _rank(params)\n    return clip_ops.clip_by_norm(params, max_norm, axes=list(range(ids_rank, params_rank)) if ids_static and params_static else math_ops.range(ids_rank, params_rank))", "docstring": "Helper function for _embedding_lookup_and_transform.\n\nThis function optionally clips embeddings to an l2-norm of max_norm.\n\nArgs:\nparams: A `Tensor` of embeddings retrieved by `gather`.\nids: The `ids` argument that was passed to `gather`.\nmax_norm: If not `None`, each embedding is clipped if its l2-norm is larger\nthan this value.\n\nReturns:\nA `Tensor` with the same type as `params`.", "source": "github-repos"}
{"code": "def _VerifyValues(self, image, ksizes, strides, rates, padding, patches):\n    ksizes = [1] + ksizes + [1]\n    strides = [1] + strides + [1]\n    rates = [1] + rates + [1]\n    with self.session():\n        image_placeholder = array_ops.placeholder(dtypes.float32)\n        with self.test_scope():\n            out_tensor = array_ops.extract_image_patches(image_placeholder, ksizes=ksizes, strides=strides, rates=rates, padding=padding, name='im2col')\n        feed_dict = {image_placeholder: image}\n        self.assertAllClose(patches, out_tensor.eval(feed_dict=feed_dict))", "docstring": "Tests input-output pairs for the ExtractImagePatches op.\n\nArgs:\nimage: Input tensor with shape: [batch, in_rows, in_cols, depth].\nksizes: Patch size specified as: [ksize_rows, ksize_cols].\nstrides: Output strides, specified as [stride_rows, stride_cols].\nrates: Atrous rates, specified as [rate_rows, rate_cols].\npadding: Padding type.\npatches: Expected output.", "source": "github-repos"}
{"code": "def get_failed_enrollment_message(cls, users, enrolled_in):\n    failed_emails = [user.email for user in users]\n    return ('error', _('The following learners could not be enrolled in {enrolled_in}: {user_list}').format(enrolled_in=enrolled_in, user_list=', '.join(failed_emails)))", "docstring": "Create message for the users who were not able to be enrolled in a course or program.\n\nArgs:\nusers: An iterable of users who were not successfully enrolled\nenrolled_in (str): A string identifier for the course or program with which enrollment was attempted\n\nReturns:\ntuple: A 2-tuple containing a message type and message text", "source": "codesearchnet"}
{"code": "def state_updates(self):\n    warnings.warn('`Model.state_updates` will be removed in a future version. This property should not be used in TensorFlow 2.0, as `updates` are applied automatically.')\n    state_updates = []\n    for layer in self.layers:\n        if getattr(layer, 'stateful', False):\n            if hasattr(layer, 'updates'):\n                state_updates += layer.updates\n    return state_updates", "docstring": "Deprecated, do NOT use!\n\nReturns the `updates` from all layers that are stateful.\n\nThis is useful for separating training updates and\nstate updates, e.g. when we need to update a layer's internal state\nduring prediction.\n\nReturns:\nA list of update ops.", "source": "github-repos"}
{"code": "def decode_conjure_enum_type(cls, obj, conjure_type):\n        \n        if not (isinstance(obj, str) or str(type(obj)) == \"<type 'unicode'>\"):\n            raise Exception(\n                'Expected to find str type but found {} instead'.format(\n                    type(obj)))\n\n        if obj in conjure_type.__members__:\n            return conjure_type[obj]\n\n        else:\n            return conjure_type[\"UNKNOWN\"]", "docstring": "Decodes json into a conjure enum type.\n\nArgs:\nobj: the json object to decode\nconjure_type: a class object which is the enum type\nwe're decoding into.\nReturns:\nAn instance of enum of type conjure_type.", "source": "juraj-google-style"}
{"code": "def init_c_overturn(step):\n    \n    rbot, rtop = misc.get_rbounds(step)\n    xieut = step.sdat.par['tracersin']['fe_eut']\n    k_fe = step.sdat.par['tracersin']['k_fe']\n    xi0l = step.sdat.par['tracersin']['fe_cont']\n    xi0s = k_fe * xi0l\n    xired = xi0l / xieut\n    rsup = (rtop**3 - xired**(1 / (1 - k_fe)) *\n            (rtop**3 - rbot**3))**(1 / 3)\n\n    def initprof(rpos):\n        \n        if rpos < rsup:\n            return xi0s * ((rtop**3 - rbot**3) /\n                           (rtop**3 - rpos**3))**(1 - k_fe)\n        return xieut\n\n    rad = np.linspace(rbot, rtop, 500)\n    initprof = np.vectorize(initprof)\n    return initprof(rad), rad", "docstring": "Initial concentration.\n\nThis compute the resulting composition profile if fractional\ncrystallization of a SMO is assumed.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nReturns:\ntuple of :class:`numpy.array`: the composition and the radial position\nat which it is evaluated.", "source": "juraj-google-style"}
{"code": "def fit(self, vecs, iter=20, seed=123):\n    assert (vecs.dtype == np.float32)\n    assert (vecs.ndim == 2)\n    (N, D) = vecs.shape\n    assert (self.Ks < N), 'the number of training vector should be more than Ks'\n    assert ((D % self.M) == 0), 'input dimension must be dividable by M'\n    self.Ds = int((D / self.M))\n    np.random.seed(seed)\n    if self.verbose:\n        print('iter: {}, seed: {}'.format(iter, seed))\n    self.codewords = np.zeros((self.M, self.Ks, self.Ds), dtype=np.float32)\n    for m in range(self.M):\n        if self.verbose:\n            print('Training the subspace: {} / {}'.format(m, self.M))\n        vecs_sub = vecs[(:, (m * self.Ds):((m + 1) * self.Ds))]\n        (self.codewords[m], _) = kmeans2(vecs_sub, self.Ks, iter=iter, minit='points')\n    return self", "docstring": "Given training vectors, run k-means for each sub-space and create\ncodewords for each sub-space.\n\nThis function should be run once first of all.\n\nArgs:\nvecs (np.ndarray): Training vectors with shape=(N, D) and dtype=np.float32.\niter (int): The number of iteration for k-means\nseed (int): The seed for random process\n\nReturns:\nobject: self", "source": "codesearchnet"}
{"code": "def isCaCert(self, name):\n    crtpath = self._getPathJoin('cas', ('%s.crt' % name))\n    return os.path.isfile(crtpath)", "docstring": "Checks if a CA certificate exists.\n\nArgs:\nname (str): The name of the CA keypair.\n\nExamples:\nCheck if the CA certificate for \"myca\" exists:\n\nexists = cdir.isCaCert('myca')\n\nReturns:\nbool: True if the certificate is present, False otherwise.", "source": "codesearchnet"}
{"code": "def plot_neuron(ax, nrn, neurite_type=NeuriteType.all, plane='xy', soma_outline=True, diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA):\n    plot_soma(ax, nrn.soma, plane=plane, soma_outline=soma_outline, linewidth=linewidth, color=color, alpha=alpha)\n    for neurite in iter_neurites(nrn, filt=tree_type_checker(neurite_type)):\n        plot_tree(ax, neurite, plane=plane, diameter_scale=diameter_scale, linewidth=linewidth, color=color, alpha=alpha)\n    ax.set_title(nrn.name)\n    ax.set_xlabel(plane[0])\n    ax.set_ylabel(plane[1])", "docstring": "Plots a 2D figure of the neuron, that contains a soma and the neurites\n\nArgs:\nax(matplotlib axes): on what to plot\nneurite_type(NeuriteType): an optional filter on the neurite type\nnrn(neuron): neuron to be plotted\nsoma_outline(bool): should the soma be drawn as an outline\nplane(str): Any pair of 'xyz'\ndiameter_scale(float): Scale factor multiplied with segment diameters before plotting\nlinewidth(float): all segments are plotted with this width, but only if diameter_scale=None\ncolor(str or None): Color of plotted values, None corresponds to default choice\nalpha(float): Transparency of plotted values", "source": "codesearchnet"}
{"code": "def union_with_variable(self, variable: str, replacement: VariableReplacement) -> 'Substitution':\n    new_subst = Substitution(self)\n    new_subst.try_add_variable(variable, replacement)\n    return new_subst", "docstring": "Try to create a new substitution with the given variable added.\n\nSee :meth:`try_add_variable` for a version of this method that modifies the substitution\nin place.\n\nArgs:\nvariable_name:\nThe name of the variable to add.\nreplacement:\nThe substitution for the variable.\n\nReturns:\nThe new substitution with the variable_name added or merged.\n\nRaises:\nValueError:\nif the variable cannot be merged because it conflicts with the existing\nsubstitution for the variable.", "source": "codesearchnet"}
{"code": "def log_transition(self, transition, from_state, instance, *args, **kwargs):\n        \n        logger = logging.getLogger('xworkflows.transitions')\n        try:\n            instance_repr = u(repr(instance), 'ignore')\n        except (UnicodeEncodeError, UnicodeDecodeError):\n            instance_repr = u(\"<bad repr>\")\n        logger.info(\n            u(\"%s performed transition %s.%s (%s -> %s)\"), instance_repr,\n            self.__class__.__name__, transition.name, from_state.name,\n            transition.target.name)", "docstring": "Log a transition.\n\nArgs:\ntransition (Transition): the name of the performed transition\nfrom_state (State): the source state\ninstance (object): the modified object\n\nKwargs:\nAny passed when calling the transition", "source": "juraj-google-style"}
{"code": "def compute_bleu_summaries(hook_args):\n  \n  decode_hparams = hook_args.decode_hparams\n\n  if not (decode_hparams.decode_reference and decode_hparams.decode_to_file):\n    return None\n\n  values = []\n  bleu = 100 * bleu_hook.bleu_wrapper(\n      decode_hparams.decode_reference, decode_hparams.decode_to_file)\n  values.append(tf.Summary.Value(tag=\"BLEU\", simple_value=bleu))\n  tf.logging.info(\"%s: BLEU = %6.2f\" % (decode_hparams.decode_to_file, bleu))\n  if hook_args.hparams.mlperf_mode:\n    current_step = decode_hparams.mlperf_decode_step\n    mlperf_log.transformer_print(\n        key=mlperf_log.EVAL_TARGET, value=decode_hparams.mlperf_threshold)\n    mlperf_log.transformer_print(\n        key=mlperf_log.EVAL_ACCURACY,\n        value={\n            \"epoch\": max(current_step \n                         0),\n            \"value\": bleu\n        })\n    mlperf_log.transformer_print(key=mlperf_log.EVAL_STOP)\n\n  if bleu >= decode_hparams.mlperf_threshold:\n    decode_hparams.set_hparam(\"mlperf_success\", True)\n\n  return values", "docstring": "Compute BLEU core summaries using the decoder output.\n\nArgs:\nhook_args: DecodeHookArgs namedtuple\nReturns:\nA list of tf.Summary values if hook_args.hparams contains the\nreference file and the translated file.", "source": "juraj-google-style"}
{"code": "def generate_md5_key(list_of_arguments):\n    \n    for arg in list_of_arguments:\n        if not isinstance(arg, string_types):\n            raise SyntaxError(\"Error in generate_md5_key: \"\n                              \"Argument: {0} is a {1}\".format(arg, type(arg)))\n\n    hash = hashlib.md5()\n    hash.update(' '.join(list_of_arguments).encode('utf-8'))\n    return hash.hexdigest()", "docstring": "Generate an md5-key from a list of arguments.\n\nArgs:\nlist_of_arguments: A list of strings\n\nReturns:\nA md5-key object generated from the list of strings.", "source": "juraj-google-style"}
{"code": "def get_kinds(start=None, end=None):\n  \n  q = Kind.query()\n  if start is not None and start != '':\n    q = q.filter(Kind.key >= Kind.key_for_kind(start))\n  if end is not None:\n    if end == '':\n      return []\n    q = q.filter(Kind.key < Kind.key_for_kind(end))\n\n  return [x.kind_name for x in q]", "docstring": "Return all kinds in the specified range, for the current namespace.\n\nArgs:\nstart: only return kinds >= start if start is not None.\nend: only return kinds < end if end is not None.\n\nReturns:\nA list of kind names between the (optional) start and end values.", "source": "juraj-google-style"}
{"code": "def get_pdf_from_html(html: str, header_html: str=None, footer_html: str=None, wkhtmltopdf_filename: str=_WKHTMLTOPDF_FILENAME, wkhtmltopdf_options: Dict[(str, Any)]=None, file_encoding: str='utf-8', debug_options: bool=False, debug_content: bool=False, debug_wkhtmltopdf_args: bool=True, fix_pdfkit_encoding_bug: bool=None, processor: str=_DEFAULT_PROCESSOR) -> bytes:\n    result = make_pdf_from_html(on_disk=False, html=html, header_html=header_html, footer_html=footer_html, wkhtmltopdf_filename=wkhtmltopdf_filename, wkhtmltopdf_options=wkhtmltopdf_options, file_encoding=file_encoding, debug_options=debug_options, debug_content=debug_content, debug_wkhtmltopdf_args=debug_wkhtmltopdf_args, fix_pdfkit_encoding_bug=fix_pdfkit_encoding_bug, processor=processor)\n    return result", "docstring": "Takes HTML and returns a PDF.\n\nSee the arguments to :func:`make_pdf_from_html` (except ``on_disk``).\n\nReturns:\nthe PDF binary as a ``bytes`` object", "source": "codesearchnet"}
{"code": "def checkpoint(self, tasks=None):\n    with self.checkpoint_lock:\n        checkpoint_queue = None\n        if tasks:\n            checkpoint_queue = tasks\n        else:\n            checkpoint_queue = self.tasks\n        checkpoint_dir = '{0}/checkpoint'.format(self.run_dir)\n        checkpoint_dfk = (checkpoint_dir + '/dfk.pkl')\n        checkpoint_tasks = (checkpoint_dir + '/tasks.pkl')\n        if (not os.path.exists(checkpoint_dir)):\n            try:\n                os.makedirs(checkpoint_dir)\n            except FileExistsError:\n                pass\n        with open(checkpoint_dfk, 'wb') as f:\n            state = {'rundir': self.run_dir, 'task_count': self.task_count}\n            pickle.dump(state, f)\n        count = 0\n        with open(checkpoint_tasks, 'ab') as f:\n            for task_id in checkpoint_queue:\n                if ((not self.tasks[task_id]['checkpoint']) and self.tasks[task_id]['app_fu'].done() and (self.tasks[task_id]['app_fu'].exception() is None)):\n                    hashsum = self.tasks[task_id]['hashsum']\n                    if (not hashsum):\n                        continue\n                    t = {'hash': hashsum, 'exception': None, 'result': None}\n                    try:\n                        r = self.memoizer.hash_lookup(hashsum).result()\n                    except Exception as e:\n                        t['exception'] = e\n                    else:\n                        t['result'] = r\n                    pickle.dump(t, f)\n                    count += 1\n                    self.tasks[task_id]['checkpoint'] = True\n                    logger.debug('Task {} checkpointed'.format(task_id))\n        self.checkpointed_tasks += count\n        if (count == 0):\n            if (self.checkpointed_tasks == 0):\n                logger.warn('No tasks checkpointed so far in this run. Please ensure caching is enabled')\n            else:\n                logger.debug('No tasks checkpointed in this pass.')\n        else:\n            logger.info('Done checkpointing {} tasks'.format(count))\n        return checkpoint_dir", "docstring": "Checkpoint the dfk incrementally to a checkpoint file.\n\nWhen called, every task that has been completed yet not\ncheckpointed is checkpointed to a file.\n\nKwargs:\n- tasks (List of task ids) : List of task ids to checkpoint. Default=None\nif set to None, we iterate over all tasks held by the DFK.\n\n.. note::\nCheckpointing only works if memoization is enabled\n\nReturns:\nCheckpoint dir if checkpoints were written successfully.\nBy default the checkpoints are written to the RUNDIR of the current\nrun under RUNDIR/checkpoints/{tasks.pkl, dfk.pkl}", "source": "codesearchnet"}
{"code": "async def checked_run(*cmd):\n  \n\n  \n  logging.info('Running: %s', expand_cmd_str(cmd))\n  with logged_timer('{} finished'.format(get_cmd_name(cmd))):\n    p = await asyncio.create_subprocess_exec(\n        *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT)\n\n    \n    chunks = []\n    while True:\n      chunk = await p.stdout.read(16 * 1024)\n      if not chunk:\n        break\n      chunks.append(chunk)\n\n    \n    await p.wait()\n    stdout = b''.join(chunks).decode()[:-1]\n    if p.returncode:\n      raise RuntimeError('Return code {} from process: {}\\n{}'.format(\n          p.returncode, expand_cmd_str(cmd), stdout))\n\n    return stdout", "docstring": "Run the given subprocess command in a coroutine.\n\nArgs:\n*cmd: the command to run and its arguments.\n\nReturns:\nThe output that the command wrote to stdout.\n\nRaises:\nRuntimeError: if the command returns a non-zero result.", "source": "juraj-google-style"}
{"code": "def make_movie(structures, output_filename='movie.mp4', zoom=1.0, fps=20, bitrate='10000k', quality=1, **kwargs):\n    vis = StructureVis(**kwargs)\n    vis.show_help = False\n    vis.redraw()\n    vis.zoom(zoom)\n    sigfig = int((math.floor(math.log10(len(structures))) + 1))\n    filename = (('image{0:0' + str(sigfig)) + 'd}.png')\n    for (i, s) in enumerate(structures):\n        vis.set_structure(s)\n        vis.write_image(filename.format(i), 3)\n    filename = (('image%0' + str(sigfig)) + 'd.png')\n    args = ['ffmpeg', '-y', '-i', filename, '-q:v', str(quality), '-r', str(fps), '-b:v', str(bitrate), output_filename]\n    subprocess.Popen(args)", "docstring": "Generate a movie from a sequence of structures using vtk and ffmpeg.\n\nArgs:\nstructures ([Structure]): sequence of structures\noutput_filename (str): filename for structure output. defaults to\nmovie.mp4\nzoom (float): A zoom to be applied to the visualizer. Defaults to 1.0.\nfps (int): Frames per second for the movie. Defaults to 20.\nbitrate (str): Video bitate.  Defaults to \"10000k\" (fairly high\nquality).\nquality (int): A quality scale. Defaults to 1.\n\\\\*\\\\*kwargs: Any kwargs supported by StructureVis to modify the images\ngenerated.", "source": "codesearchnet"}
{"code": "def resolve(self, context, provider):\n        \n        resolve_variables(self.variables, context, provider)\n        self.blueprint.resolve_variables(self.variables)", "docstring": "Resolve the Stack variables.\n\nThis resolves the Stack variables and then prepares the Blueprint for\nrendering by passing the resolved variables to the Blueprint.\n\nArgs:\ncontext (:class:`stacker.context.Context`): stacker context\nprovider (:class:`stacker.provider.base.BaseProvider`): subclass of\nthe base provider", "source": "juraj-google-style"}
{"code": "def _PrintParsersCounter(self, parsers_counter, session_identifier=None):\n    \n    if not parsers_counter:\n      return\n\n    title = 'Events generated per parser'\n    if session_identifier:\n      title = '{0:s}: {1:s}'.format(title, session_identifier)\n\n    table_view = views.ViewsFactory.GetTableView(\n        self._views_format_type,\n        column_names=['Parser (plugin) name', 'Number of events'],\n        title=title)\n\n    for key, value in sorted(parsers_counter.items()):\n      if key == 'total':\n        continue\n      table_view.AddRow([key, value])\n\n    table_view.AddRow(['Total', parsers_counter['total']])\n\n    table_view.Write(self._output_writer)", "docstring": "Prints the parsers counter\n\nArgs:\nparsers_counter (collections.Counter): number of events per parser or\nparser plugin.\nsession_identifier (Optional[str]): session identifier.", "source": "juraj-google-style"}
{"code": "def _ReSearch(self):\n    self.Search(*self._last_search_params)", "docstring": "Performs self.Search again with the previously used parameters.\n\nReturns:\nself.Search result.", "source": "github-repos"}
{"code": "def with_rank_at_most(self, rank):\n        \n        if self.ndims is not None and self.ndims > rank:\n            raise ValueError(\"Shape %s must have rank at most %d\" % (self, rank))\n        else:\n            return self", "docstring": "Returns a shape based on `self` with at most the given rank.\n\nArgs:\nrank: An integer.\n\nReturns:\nA shape that is at least as specific as `self` with at most the given\nrank.\n\nRaises:\nValueError: If `self` does not represent a shape with at most the given\n`rank`.", "source": "juraj-google-style"}
{"code": "def _init_step(pos, prev_step, func, psi_1, psi_2, quad_step):\n    phi_0 = pos.f\n    derphi_0 = pos.df\n    step = func(psi_1 * prev_step)\n    can_take = step.f > phi_0\n    result = _StepGuessResult(step=step, func_evals=1, can_take=can_take, may_terminate=tf.zeros_like(can_take))\n    if quad_step:\n        q_koef = step.f - phi_0 - step.x * derphi_0\n        quad_step_success = tf.logical_and(step.f <= phi_0, q_koef > 0.0)\n\n        def update_result_1():\n            new_x = tf.compat.v1.where(quad_step_success, -0.5 * tf.math.divide_no_nan(derphi_0 * step.x ** 2, q_koef), result.step.x)\n            return _StepGuessResult(step=func(new_x), func_evals=result.func_evals + 1, can_take=tf.math.logical_or(result.can_take, quad_step_success), may_terminate=tf.math.logical_or(result.may_terminate, quad_step_success))\n        result = tf.cond(tf.math.reduce_any(quad_step_success), update_result_1, lambda: result)\n\n    def update_result_2():\n        new_x = tf.compat.v1.where(can_take, result.step.x, psi_2 * prev_step)\n        return _StepGuessResult(step=func(new_x), func_evals=result.func_evals + 1, can_take=tf.ones_like(can_take), may_terminate=result.may_terminate)\n    result = tf.cond(tf.math.reduce_all(result.can_take), lambda: result, update_result_2)\n    return result", "docstring": "Finds initial step size for line seacrh at given point.\n\nCorresponds to I1-I2 in [HZ2006].\n\nArgs:\npos: ValueAndGradient for current point.\nprev_step: Step size at previous iteration.\nfunc: Callable taking real `Tensor` and returning ValueAndGradient,\ndescribes scalar function for line search.\npsi_1: Real scalar `Tensor`. Factor to multiply previous step to get right\npoint for quadratic interpolation.\npsi_2: Real scalar `Tesnor`. Factor to multiply previous step if qudratic\ninterpolation failed.\nquad_step: Boolean. Whether to try quadratic interpolation.\n\nReturns:\n_StepGuessResult namedtuple containing initial guess and additional data.", "source": "github-repos"}
{"code": "def _type_digest(self, config: bool) -> Dict[(str, Any)]:\n    res = {'base': self.yang_type()}\n    if (self.name is not None):\n        res['derived'] = self.name\n    return res", "docstring": "Return receiver's type digest.\n\nArgs:\nconfig: Specifies whether the type is on a configuration node.", "source": "codesearchnet"}
{"code": "def tomof(self, maxline=MAX_MOF_LINE):\n    mof = []\n    mof.append(_qualifiers_tomof(self.qualifiers, MOF_INDENT, maxline))\n    mof.append(u'class ')\n    mof.append(self.classname)\n    mof.append(u' ')\n    if (self.superclass is not None):\n        mof.append(u': ')\n        mof.append(self.superclass)\n        mof.append(u' ')\n    mof.append(u'{\\n')\n    for p in self.properties.itervalues():\n        mof.append(u'\\n')\n        mof.append(p.tomof(False, MOF_INDENT, maxline))\n    for m in self.methods.itervalues():\n        mof.append(u'\\n')\n        mof.append(m.tomof(MOF_INDENT, maxline))\n    mof.append(u'\\n};\\n')\n    return u''.join(mof)", "docstring": "Return a MOF string with the declaration of this CIM class.\n\nThe returned MOF string conforms to the ``classDeclaration``\nABNF rule defined in :term:`DSP0004`.\n\nThe order of properties, methods, parameters, and qualifiers is\npreserved.\n\nThe :attr:`~pywbem.CIMClass.path` attribute of this object will not be\nincluded in the returned MOF string.\n\nConsistent with that, class path information is not included in the\nreturned MOF string.\n\nReturns:\n\n:term:`unicode string`: MOF string.", "source": "codesearchnet"}
{"code": "def LearnToExecute(batch_size, max_length=1, max_nesting=1, token_by_char=True, mode=Mode.TRAIN_COMBINE, loss_threshold=0.1, min_tries=DEFAULT_MIN_CURRICULUM_EVAL_TRIES, task_type=TaskType.ALG_CTRL):\n    if (mode == Mode.TRAIN_COMBINE):\n        curriculum = CombineCurriculum(max_length, max_nesting, loss_threshold, min_tries=min_tries)\n    elif (mode == Mode.TRAIN_MIX):\n        curriculum = MixCurriculum(max_length, max_nesting, loss_threshold, min_tries=min_tries)\n    elif (mode == Mode.TRAIN_NAIVE):\n        curriculum = NaiveCurriculum(max_length, max_nesting, loss_threshold, min_tries=min_tries)\n    elif (mode == Mode.TEST):\n        curriculum = BaselineCurriculum(max_length, max_nesting, loss_threshold, min_tries=0)\n    else:\n        raise ValueError('Invalid mode.')\n    lte = LearnToExecuteState(batch_size, max_length, max_nesting, curriculum, token_by_char, task_type=task_type)\n    types_ = (tf.float32, tf.float32, tf.float32, tf.int64, tf.int64)\n    shapes_ = (tf.TensorShape([lte.num_steps, batch_size, lte.vocab_size]), tf.TensorShape([lte.num_steps_out, batch_size, lte.vocab_size]), tf.TensorShape([lte.num_steps_out, batch_size, lte.vocab_size]), tf.TensorShape([batch_size]), tf.TensorShape([batch_size]))\n    dataset = tf.data.Dataset.from_generator(lte.make_batch, types_, shapes_)\n    dataset.state = lte\n    return dataset", "docstring": "Factory method for LearnToExecute Dataset module.\n\nArgs:\nbatch_size: (int). The number of elements in a mini-batch.\nmax_length: (int). Maximum character length.\nmax_nesting: (int). Maximum level of statement nesting.\ntoken_by_char: (bool). Tokenize by character or words?\nmode: (string). Either 'train', 'test'.\nloss_threshold: (int) curriculum threshold for error below which increase\nthe task difficulty.\nmin_tries: (int) minimum update tries for curriculum difficulty level.\ntask_type: (string) defines the task by allowable ops (see TASK_TYPE_OPS).\n\nReturns:\ntf.Data.Dataset for LearnToExecute sample generator with the\nLearnToExecuteState monkey patched into the `state` attribute.\n\nRaises:\nValueError: in case of bad `mode`.", "source": "codesearchnet"}
{"code": "def __validate_path_parameters(self, field, path_parameters):\n    for param in path_parameters:\n        segment_list = param.split('.')\n        if (segment_list[0] != field.name):\n            raise TypeError((\"Subfield %r can't come from field %r.\" % (param, field.name)))\n        self.__validate_simple_subfield(field.name, field, segment_list[1:])", "docstring": "Verifies that all path parameters correspond to an existing subfield.\n\nArgs:\nfield: An instance of a subclass of messages.Field. Should be the root\nlevel property name in each path parameter in path_parameters. For\nexample, if the field is called 'foo', then each path parameter should\nbegin with 'foo.'.\npath_parameters: A list of Strings representing URI parameter variables.\n\nRaises:\nTypeError: If one of the path parameters does not start with field.name.", "source": "codesearchnet"}
{"code": "def execute(self, command, timeout=1):\n        \n        self.send(command)\n        return self.read_untill(timeout)", "docstring": "Execute rcon command on server and fetch result\nArgs:\ncommand --- executed command\ntimeout --- read timeout\n\nReturns: bytes response", "source": "juraj-google-style"}
{"code": "def token_network_register(self, registry_address: PaymentNetworkID, token_address: TokenAddress, channel_participant_deposit_limit: TokenAmount, token_network_deposit_limit: TokenAmount, retry_timeout: NetworkTimeout=DEFAULT_RETRY_TIMEOUT) -> TokenNetworkAddress:\n    if (not is_binary_address(registry_address)):\n        raise InvalidAddress('registry_address must be a valid address in binary')\n    if (not is_binary_address(token_address)):\n        raise InvalidAddress('token_address must be a valid address in binary')\n    if (token_address in self.get_tokens_list(registry_address)):\n        raise AlreadyRegisteredTokenAddress('Token already registered')\n    contracts_version = self.raiden.contract_manager.contracts_version\n    registry = self.raiden.chain.token_network_registry(registry_address)\n    try:\n        if (contracts_version == DEVELOPMENT_CONTRACT_VERSION):\n            return registry.add_token_with_limits(token_address=token_address, channel_participant_deposit_limit=channel_participant_deposit_limit, token_network_deposit_limit=token_network_deposit_limit)\n        else:\n            return registry.add_token_without_limits(token_address=token_address)\n    except RaidenRecoverableError as e:\n        if ('Token already registered' in str(e)):\n            raise AlreadyRegisteredTokenAddress('Token already registered')\n        raise\n    finally:\n        next_block = (self.raiden.get_block_number() + 1)\n        waiting.wait_for_block(self.raiden, next_block, retry_timeout)", "docstring": "Register the `token_address` in the blockchain. If the address is already\nregistered but the event has not been processed this function will block\nuntil the next block to make sure the event is processed.\n\nRaises:\nInvalidAddress: If the registry_address or token_address is not a valid address.\nAlreadyRegisteredTokenAddress: If the token is already registered.\nTransactionThrew: If the register transaction failed, this may\nhappen because the account has not enough balance to pay for the\ngas or this register call raced with another transaction and lost.", "source": "codesearchnet"}
{"code": "def get_output_batch_type(self, input_element_type) -> typing.Optional[typing.Union[TypeConstraint, type]]:\n    output_batch_type = None\n    if self._process_defined and self._process_yields_batches:\n        output_batch_type = self._get_element_type_from_return_annotation(self.process, input_element_type)\n    if self._process_batch_defined and (not self._process_batch_yields_elements):\n        process_batch_type = self._get_element_type_from_return_annotation(self.process_batch, self._get_input_batch_type_normalized(input_element_type))\n        if output_batch_type is not None and (not process_batch_type == output_batch_type):\n            raise TypeError(f'DoFn {self!r} yields batches from both process and process_batch, but they produce different types:\\n process: {output_batch_type}\\n process_batch: {process_batch_type!r}')\n        output_batch_type = process_batch_type\n    return output_batch_type", "docstring": "Determine the batch type produced by this DoFn's ``process_batch``\nimplementation and/or its ``process`` implementation with\n``@yields_batch``.\n\nThe default implementation of this method observes the return type\nannotations on ``process_batch`` and/or ``process``.  A Batched DoFn may\noverride this method if a dynamic approach is required.\n\nArgs:\ninput_element_type: The **element type** of the input PCollection this\nDoFn is being applied to.\n\nReturns:\n``None`` if this DoFn will never yield batches, else a Beam typehint or\na native Python typehint.", "source": "github-repos"}
{"code": "def load_file_to_str(path):\n    \n    \n    with open(path, 'rt') as f:\n        string = f.read().replace(linesep, '')\n    if not string:\n        raise LoadError('%s file is empty!' % path)\n    return string", "docstring": "Load file into a string removing newlines\n\nArgs:\npath (str): Path to file\n\nReturns:\nstr: String contents of file", "source": "juraj-google-style"}
{"code": "def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:\n    raise NotImplementedError", "docstring": "Save only the vocabulary of the tokenizer (vocabulary + added tokens).\n\nThis method won't save the configuration and special token mappings of the tokenizer. Use\n[`~PreTrainedTokenizerFast._save_pretrained`] to save the whole state of the tokenizer.\n\nArgs:\nsave_directory (`str`):\nThe directory in which to save the vocabulary.\nfilename_prefix (`str`, *optional*):\nAn optional prefix to add to the named of the saved files.\n\nReturns:\n`Tuple(str)`: Paths to the files saved.", "source": "github-repos"}
{"code": "def _transform_variable_to_expression(expression, node, context):\n    variable_name = expression.variable_name\n    if (not variable_name.startswith(u'$')):\n        raise AssertionError(u'Unexpectedly received variable name {} that is not prefixed with \"$\"'.format(variable_name))\n    return bindparam(variable_name[1:])", "docstring": "Transform a Variable compiler expression into its SQLAlchemy expression representation.\n\nArgs:\nexpression: expression, Variable compiler expression.\nnode: SqlNode, the SqlNode the expression applies to.\ncontext: CompilationContext, global compilation state and metadata.\n\nReturns:\nExpression, SQLAlchemy expression.", "source": "codesearchnet"}
{"code": "def from_array(array):\n    \n    try:\n        raw_data = blosc.pack_array(array)\n    except Exception as e:\n        raise ValueError(\"Could not compress data from array. {}\".format(e))\n\n    return raw_data", "docstring": "Export a numpy array to a blosc array.\n\nArguments:\narray: The numpy array to compress to blosc array\n\nReturns:\nBytes/String. A blosc compressed array", "source": "juraj-google-style"}
{"code": "def dprintx(passeditem, special=False):\n    \n    if DEBUGALL:\n        if special:\n            from pprint import pprint\n            pprint(passeditem)\n        else:\n            print(\"%s%s%s\" % (C_TI, passeditem, C_NORM))", "docstring": "Print Text if DEBUGALL set, optionally with PrettyPrint.\n\nArgs:\npasseditem (str): item to print\nspecial (bool): determines if item prints with PrettyPrint\nor regular print.", "source": "juraj-google-style"}
{"code": "def verified(context, collaborator, test, outpath=None):\n    \n    written_files = 0\n    collaborator = collaborator or 'cust000'\n    LOG.info('Exporting verified variants for cust {}'.format(collaborator))\n\n    adapter = context.obj['adapter']\n    verified_vars = adapter.verified(institute_id=collaborator)\n    LOG.info('FOUND {} verified variants for institute {}'.format(len(verified_vars), collaborator))\n\n\n    if not verified_vars:\n        LOG.warning('There are no verified variants for institute {} in database!'.format(collaborator))\n        return None\n\n    document_lines = export_verified_variants(verified_vars)\n\n    today = datetime.datetime.now().strftime('%Y-%m-%d')\n    document_name = '.'.join(['verified_variants', collaborator, today]) + '.xlsx'\n\n    \n    if test and document_lines:\n        written_files +=1\n        LOG.info('Success. Verified variants file contains {} lines'.format(len(document_lines)))\n        return written_files\n\n    \n    \n    if not outpath:\n        outpath = str(os.getcwd())\n    workbook = Workbook(os.path.join(outpath,document_name))\n    Report_Sheet = workbook.add_worksheet()\n\n    \n    row = 0\n    for col,field in enumerate(VERIFIED_VARIANTS_HEADER):\n        Report_Sheet.write(row,col,field)\n\n    \n    for row, line in enumerate(document_lines,1): \n        for col, field in enumerate(line): \n            Report_Sheet.write(row,col,field)\n    workbook.close()\n\n    if os.path.exists(os.path.join(outpath,document_name)):\n        LOG.info('Success. Verified variants file of {} lines was written to disk'. format(len(document_lines)))\n        written_files += 1\n\n    return written_files", "docstring": "Export variants which have been verified for an institute\nand write them to an excel file.\n\nArgs:\ncollaborator(str): institute id\ntest(bool): True if the function is called for testing purposes\noutpath(str): path to output file\n\nReturns:\nwritten_files(int): number of written or simulated files", "source": "juraj-google-style"}
{"code": "def alias_tags(tags_list, alias_map):\n\n    def _alias_dict(tags):\n        tags_ = [alias_map.get(t, t) for t in tags]\n        return list(set([t for t in tags_ if (t is not None)]))\n    tags_list_ = [_alias_dict(tags) for tags in tags_list]\n    return tags_list_", "docstring": "update tags to new values\n\nArgs:\ntags_list (list):\nalias_map (list): list of 2-tuples with regex, value\n\nReturns:\nlist: updated tags\n\nCommandLine:\npython -m utool.util_tags alias_tags --show\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_tags import *  # NOQA\n>>> import utool as ut\n>>> tags_list = [['t1', 't2'], [], ['t3'], ['t4', 't5']]\n>>> ut.build_alias_map()\n>>> result = alias_tags(tags_list, alias_map)\n>>> print(result)", "source": "codesearchnet"}
{"code": "def taubin(script, iterations=10, t_lambda=0.5, t_mu=(- 0.53), selected=False):\n    filter_xml = ''.join(['  <filter name=\"Taubin Smooth\">\\n', '    <Param name=\"lambda\" ', 'value=\"{}\" '.format(t_lambda), 'description=\"Lambda\" ', 'type=\"RichFloat\" ', '/>\\n', '    <Param name=\"mu\" ', 'value=\"{}\" '.format(t_mu), 'description=\"mu\" ', 'type=\"RichFloat\" ', '/>\\n', '    <Param name=\"stepSmoothNum\" ', 'value=\"{:d}\" '.format(iterations), 'description=\"Smoothing steps\" ', 'type=\"RichInt\" ', '/>\\n', '    <Param name=\"Selected\" ', 'value=\"{}\" '.format(str(selected).lower()), 'description=\"Affect only selected faces\" ', 'type=\"RichBool\" ', '/>\\n', '  </filter>\\n'])\n    util.write_filter(script, filter_xml)\n    return None", "docstring": "The lambda & mu Taubin smoothing, it make two steps of smoothing, forth\nand back, for each iteration.\n\nBased on:\nGabriel Taubin\n\"A signal processing approach to fair surface design\"\nSiggraph 1995\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter to.\niterations (int): The number of times that the taubin smoothing is\niterated. Usually it requires a larger number of iteration than the\nclassical laplacian.\nt_lambda (float): The lambda parameter of the Taubin Smoothing algorithm\nt_mu (float): The mu parameter of the Taubin Smoothing algorithm\nselected (bool): If selected the filter is performed only on the\nselected faces\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "codesearchnet"}
{"code": "def get_channel_dimension_axis(image: np.ndarray, input_data_format: Optional[Union[ChannelDimension, str]]=None) -> int:\n    if input_data_format is None:\n        input_data_format = infer_channel_dimension_format(image)\n    if input_data_format == ChannelDimension.FIRST:\n        return image.ndim - 3\n    elif input_data_format == ChannelDimension.LAST:\n        return image.ndim - 1\n    raise ValueError(f'Unsupported data format: {input_data_format}')", "docstring": "Returns the channel dimension axis of the image.\n\nArgs:\nimage (`np.ndarray`):\nThe image to get the channel dimension axis of.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the image. If `None`, will infer the channel dimension from the image.\n\nReturns:\nThe channel dimension axis of the image.", "source": "github-repos"}
{"code": "def CopyToDateTimeString(self):\n    if (self._timestamp is None):\n        return None\n    (number_of_days, hours, minutes, seconds) = self._GetTimeValues(int(self._timestamp))\n    (year, month, day_of_month) = self._GetDateValuesWithEpoch(number_of_days, self._EPOCH)\n    microseconds = int(((self._timestamp % 1) * definitions.MICROSECONDS_PER_SECOND))\n    return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:06d}'.format(year, month, day_of_month, hours, minutes, seconds, microseconds)", "docstring": "Copies the Cocoa timestamp to a date and time string.\n\nReturns:\nstr: date and time value formatted as: YYYY-MM-DD hh:mm:ss.###### or\nNone if the timestamp cannot be copied to a date and time string.", "source": "codesearchnet"}
{"code": "def look_up(self, **keys: Dict[InstanceName, ScalarValue]) -> \"ArrayEntry\":\n        \n        if not isinstance(self.schema_node, ListNode):\n            raise InstanceValueError(self.json_pointer(), \"lookup on non-list\")\n        try:\n            for i in range(len(self.value)):\n                en = self.value[i]\n                flag = True\n                for k in keys:\n                    if en[k] != keys[k]:\n                        flag = False\n                        break\n                if flag:\n                    return self._entry(i)\n            raise NonexistentInstance(self.json_pointer(), \"entry lookup failed\")\n        except KeyError:\n            raise NonexistentInstance(self.json_pointer(), \"entry lookup failed\") from None\n        except TypeError:\n            raise InstanceValueError(self.json_pointer(), \"lookup on non-list\") from None", "docstring": "Return the entry with matching keys.\n\nArgs:\nkeys: Keys and values specified as keyword arguments.\n\nRaises:\nInstanceValueError: If the receiver's value is not a YANG list.\nNonexistentInstance: If no entry with matching keys exists.", "source": "juraj-google-style"}
{"code": "def from_service_account_file(cls, filename, **kwargs):\n        \n        info, signer = _service_account_info.from_filename(\n            filename, require=['client_email', 'token_uri'])\n        return cls._from_signer_and_info(signer, info, **kwargs)", "docstring": "Creates a Credentials instance from a service account json file.\n\nArgs:\nfilename (str): The path to the service account json file.\nkwargs: Additional arguments to pass to the constructor.\n\nReturns:\ngoogle.auth.service_account.Credentials: The constructed\ncredentials.", "source": "juraj-google-style"}
{"code": "def _fdopen(self, *args, **kwargs):\n        \n        if not is_int_type(args[0]):\n            raise TypeError('an integer is required')\n        return FakeFileOpen(self.filesystem)(*args, **kwargs)", "docstring": "Redirector to open() builtin function.\n\nArgs:\n*args: Pass through args.\n**kwargs: Pass through kwargs.\n\nReturns:\nFile object corresponding to file_des.\n\nRaises:\nTypeError: if file descriptor is not an integer.", "source": "juraj-google-style"}
{"code": "def assertArrayNear(self, farray1, farray2, err, msg=None):\n    self.assertEqual(len(farray1), len(farray2), msg=msg)\n    for f1, f2 in zip(farray1, farray2):\n        self.assertNear(float(f1), float(f2), err, msg=msg)", "docstring": "Asserts that two float arrays are near each other.\n\nChecks that for all elements of farray1 and farray2\n|f1 - f2| < err.  Asserts a test failure if not.\n\nArgs:\nfarray1: a list of float values.\nfarray2: a list of float values.\nerr: a float value.\nmsg: Optional message to report on failure.", "source": "github-repos"}
{"code": "def set_window_size(self, width, height, window_handle='current'):\n        \n        self._execute(Command.SET_WINDOW_SIZE, {\n            'width': int(width),\n            'height': int(height),\n            'window_handle': window_handle})", "docstring": "Sets the width and height of the current window.\n\nSupport:\nWeb(WebView)\n\nArgs:\nwidth(int): the width in pixels.\nheight(int): the height in pixels.\nwindow_handle(str): Identifier of window_handle,\ndefault to 'current'.\n\nReturns:\nWebDriver Object.", "source": "juraj-google-style"}
{"code": "def matches_alias(self, alias: str) -> bool:\n    del self\n    del alias\n    return False", "docstring": "Indicates whether the expression will be selected as the given alias.\n\nIntended to be over-ridden by sub-classes which can safely implement it.\nGiven an expression and an alias, indicates whether the expression will be\nSELECT'd as the given alias. For example, an expression like `SELECT a.b`\nmatches the alias 'b', making it equivalent to the expression\n`SELECT a.b AS b`.\n\nArgs:\nalias: The alias to compare the expression against.\n\nReturns:\nTrue when the expression evaluates to the same name as the alias and False\notherwise.", "source": "github-repos"}
{"code": "def get_patched_request(requires, patchlist):\n    rules = {'': (True, True, True), '!': (False, False, False), '~': (False, False, True), '^': (True, True, True)}\n    requires = [(Requirement(x) if (not isinstance(x, Requirement)) else x) for x in requires]\n    appended = []\n    for patch in patchlist:\n        if (patch and (patch[0] in ('!', '~', '^'))):\n            ch = patch[0]\n            name = Requirement(patch[1:]).name\n        else:\n            ch = ''\n            name = Requirement(patch).name\n        rule = rules[ch]\n        replaced = (ch == '^')\n        for (i, req) in enumerate(requires):\n            if ((req is None) or (req.name != name)):\n                continue\n            if (not req.conflict):\n                replace = rule[0]\n            elif (not req.weak):\n                replace = rule[1]\n            else:\n                replace = rule[2]\n            if replace:\n                if replaced:\n                    requires[i] = None\n                else:\n                    requires[i] = Requirement(patch)\n                    replaced = True\n        if (not replaced):\n            appended.append(Requirement(patch))\n    result = ([x for x in requires if (x is not None)] + appended)\n    return result", "docstring": "Apply patch args to a request.\n\nFor example, consider:\n\n>>> print get_patched_request([\"foo-5\", \"bah-8.1\"], [\"foo-6\"])\n[\"foo-6\", \"bah-8.1\"]\n>>> print get_patched_request([\"foo-5\", \"bah-8.1\"], [\"^bah\"])\n[\"foo-5\"]\n\nThe following rules apply wrt how normal/conflict/weak patches override\n(note though that the new request is always added, even if it doesn't\noverride an existing request):\n\nPATCH  OVERRIDES: foo  !foo  ~foo\n-----  ---------- ---  ----  -----\nfoo               Y    Y     Y\n!foo              N    N     N\n~foo              N    N     Y\n^foo              Y    Y     Y\n\nArgs:\nrequires (list of str or `version.Requirement`): Request.\npatchlist (list of str): List of patch requests.\n\nReturns:\nList of `version.Requirement`: Patched request.", "source": "codesearchnet"}
{"code": "def __field_to_parameter_type(self, field):\n    variant = field.variant\n    if (variant == messages.Variant.MESSAGE):\n        raise TypeError(\"A message variant can't be used in a parameter.\")\n    custom_variant_map = {messages.Variant.SINT32: 'int32', messages.Variant.SINT64: 'int64', messages.Variant.BOOL: 'boolean', messages.Variant.ENUM: 'string'}\n    return (custom_variant_map.get(variant) or variant.name.lower())", "docstring": "Converts the field variant type into a string describing the parameter.\n\nArgs:\nfield: An instance of a subclass of messages.Field.\n\nReturns:\nA string corresponding to the variant enum of the field, with a few\nexceptions. In the case of signed ints, the 's' is dropped; for the BOOL\nvariant, 'boolean' is used; and for the ENUM variant, 'string' is used.\n\nRaises:\nTypeError: if the field variant is a message variant.", "source": "codesearchnet"}
{"code": "def copy_pkg(self, filename, id_=(- 1)):\n    self._copy(filename, id_=id_, file_type=PKG_FILE_TYPE)", "docstring": "Copy a package to the distribution server.\n\nBundle-style packages must be zipped prior to copying.\n\nArgs:\nfilename: Full path to file to upload.\nid_: ID of Package object to associate with, or -1 for new\npackages (default).", "source": "codesearchnet"}
{"code": "def _Enter(tensor, frame_name, is_constant=False, parallel_iterations=10, use_ref=True, use_input_shape=True, name=None):\n    tensor = ops.internal_convert_to_tensor_or_composite(tensor, as_ref=True)\n    if isinstance(tensor, tensor_lib.Tensor):\n        if tensor.dtype._is_ref_dtype and use_ref:\n            result = gen_control_flow_ops.ref_enter(tensor, frame_name, is_constant, parallel_iterations, name=name)\n        else:\n            result = gen_control_flow_ops.enter(tensor, frame_name, is_constant, parallel_iterations, name=name)\n        if use_input_shape:\n            result.set_shape(tensor.get_shape())\n        return result\n    elif isinstance(tensor, composite_tensor.CompositeTensor):\n\n        def enter_component(t):\n            return _Enter(t, frame_name, is_constant, parallel_iterations, use_ref, use_input_shape)\n        return nest.map_structure(enter_component, tensor, expand_composites=True)\n    else:\n        raise TypeError(f\"'tensor' must be a Tensor or CompositeTensor. Received: {type(tensor)}.\")", "docstring": "Creates or finds a child frame, and makes `tensor` available to it.\n\nThe unique `frame_name` is used by the `Executor` to identify frames. If\n`is_constant` is true, `tensor` is a constant in the child frame; otherwise\nit may be changed in the child frame. At most `parallel_iterations`\niterations are run in parallel in the child frame.\n\nArgs:\ntensor: The tensor to be made available to the child frame.\nframe_name: The name of the child frame.\nis_constant: If true, the output is constant within the child frame.\nparallel_iterations: The number of iterations allowed to run in parallel.\nuse_ref: If true, use ref_enter if tensor is of ref type.\nuse_input_shape: If true, set the result's shape based on tensor's shape.\nname: A name for this operation (optional).\n\nReturns:\nThe same tensor as `tensor`.\n\nRaises:\nValueError: If any tensor in `tensor` has a less specific shape\nthan its corresponding shape in `shape_invariant`.", "source": "github-repos"}
{"code": "def get_vep_info(vep_string, vep_header):\n    \n    \n    vep_annotations = [\n        dict(zip(vep_header, vep_annotation.split('|'))) \n        for vep_annotation in vep_string.split(',')\n    ]\n    \n    return vep_annotations", "docstring": "Make the vep annotations into a dictionaries\n\nA vep dictionary will have the vep column names as keys and\nthe vep annotations as values.\nThe dictionaries are stored in a list\n\nArgs:\nvep_string (string): A string with the CSQ annotation\nvep_header (list): A list with the vep header\n\nReturn:\nvep_annotations (list): A list of vep dicts", "source": "juraj-google-style"}
{"code": "def peek_step(self, val: ObjectValue, sn: 'DataNode') -> Tuple[(Value, 'DataNode')]:\n    cn = sn.get_data_child(self.name, self.namespace)\n    try:\n        return (val[cn.iname()], cn)\n    except (IndexError, KeyError, TypeError):\n        return (None, cn)", "docstring": "Return member value addressed by the receiver + its schema node.\n\nArgs:\nval: Current value (object).\nsn:  Current schema node.", "source": "codesearchnet"}
{"code": "def get_stats_for_node_def(graph, node, statistic_type) -> Any:\n    try:\n        stats_func = _stats_registry.lookup(node.op + ',' + statistic_type)\n        result = stats_func(graph, node)\n    except LookupError:\n        result = OpStats(statistic_type)\n    return result", "docstring": "Looks up the node's statistics function in the registry and calls it.\n\nThis function takes a Graph object and a NodeDef from a GraphDef, and if\nthere's an associated statistics method, calls it and returns a result. If no\nfunction has been registered for the particular node type, it returns an empty\nstatistics object.\n\nArgs:\ngraph: A Graph object that's been set up with the node's graph.\nnode: A NodeDef describing the operator.\nstatistic_type: A string identifying the statistic we're interested in.\n\nReturns:\nAn OpStats object containing information about resource usage.", "source": "github-repos"}
{"code": "def add_archive_as_dir(self, zip_file_obj):\n        \n        BalancedDiscStorage._check_interface(zip_file_obj)\n\n        file_hash = self._get_hash(zip_file_obj)\n        dir_path = self._create_dir_path(file_hash)\n        full_path = os.path.join(dir_path, file_hash)\n\n        if os.path.exists(full_path):\n            shutil.rmtree(full_path)\n\n        os.mkdir(full_path)\n\n        try:\n            self._unpack_zip(zip_file_obj, full_path)\n        except Exception:\n            shutil.rmtree(full_path)\n            raise\n\n        return PathAndHash(path=full_path, hash=file_hash)", "docstring": "Add archive to the storage and unpack it.\n\nArgs:\nzip_file_obj (file): Opened file-like object.\n\nReturns:\nobj: Path where the `zip_file_obj` was unpacked wrapped in \\\n:class:`.PathAndHash` structure.\n\nRaises:\nValueError: If there is too many files in .zip archive. \\\nSee :attr:`._max_zipfiles` for details.\nAssertionError: If the `zip_file_obj` is not file-like object.", "source": "juraj-google-style"}
{"code": "def fetch_token(self, **kwargs):\n    kwargs.setdefault('client_secret', self.client_config['client_secret'])\n    return self.oauth2session.fetch_token(self.client_config['token_uri'], **kwargs)", "docstring": "Completes the Authorization Flow and obtains an access token.\n\nThis is the final step in the OAuth 2.0 Authorization Flow. This is\ncalled after the user consents.\n\nThis method calls\n:meth:`requests_oauthlib.OAuth2Session.fetch_token`\nand specifies the client configuration's token URI (usually Google's\ntoken server).\n\nArgs:\nkwargs: Arguments passed through to\n:meth:`requests_oauthlib.OAuth2Session.fetch_token`. At least\none of ``code`` or ``authorization_response`` must be\nspecified.\n\nReturns:\nMapping[str, str]: The obtained tokens. Typically, you will not use\nreturn value of this function and instead and use\n:meth:`credentials` to obtain a\n:class:`~google.auth.credentials.Credentials` instance.", "source": "codesearchnet"}
{"code": "def attention_bias_batch(batch_coordinates_q,\n                         batch_coordinates_k=None,\n                         condition_fn=None):\n  \n  if batch_coordinates_k is None:\n    batch_coordinates_k = batch_coordinates_q\n\n  \n  def to_float(bc):\n    bc = tf.squeeze(bc, 1)\n    bc = tf.to_float(bc)\n    return bc\n\n  \n  bc_v = tf.expand_dims(to_float(batch_coordinates_q), 1)\n  bc_h = tf.expand_dims(to_float(batch_coordinates_k), 0)\n  bias_batch = bc_h - bc_v\n  bias_batch = condition_fn(bias_batch)\n  bias_batch *= -1e9\n  return bias_batch", "docstring": "Generate a mask to prevent the batch to attend to each others.\n\nArgs:\nbatch_coordinates_q: Int-like Tensor of shape [length_q, 1] containing the\ncoordinates of the batches\nbatch_coordinates_k: Int-like Tensor of shape [length_k, 1] containing the\ncoordinates of the batches. If None, do self-attention.\ncondition_fn: Callable defining the attention mask.\n\nReturns:\nFloat-like Tensor of shape [length_q, length_k] containing either 0 or\n-infinity (-1e9).", "source": "juraj-google-style"}
{"code": "def hashed(field_name, percent, fields=None, count=0):\n    if (field_name is None):\n        raise Exception('Hash field must be specified')\n\n    def _hashed_sampling(sql):\n        projection = Sampling._create_projection(fields)\n        sql = ('SELECT %s FROM (%s) WHERE MOD(ABS(FARM_FINGERPRINT(CAST(%s AS STRING))), 100) < %d' % (projection, sql, field_name, percent))\n        if (count != 0):\n            sql = ('%s LIMIT %d' % (sql, count))\n        return sql\n    return _hashed_sampling", "docstring": "Provides a sampling strategy based on hashing and selecting a percentage of data.\n\nArgs:\nfield_name: the name of the field to hash.\npercent: the percentage of the resulting hashes to select.\nfields: an optional list of field names to retrieve.\ncount: optional maximum count of rows to pick.\nReturns:\nA sampling function that can be applied to get a hash-based sampling.", "source": "codesearchnet"}
{"code": "def parse(self, **global_args):\n        \n\n        if self.build_file not in ParseContext._parsed:\n            \n            \n            butcher_context = {}\n            for str_to_exec in self._strs_to_exec:\n                ast = compile(str_to_exec, '<string>', 'exec')\n                exec_function(ast, butcher_context)\n\n            with ParseContext.activate(self):\n                startdir = os.path.abspath(os.curdir)\n                try:\n                    os.chdir(self.build_file.path_on_disk)\n                    if self.build_file not in ParseContext._parsed:\n                        ParseContext._parsed.add(self.build_file)\n                        eval_globals = copy.copy(butcher_context)\n                        eval_globals.update(\n                            {'ROOT_DIR': self.build_file.path_on_disk,\n                             '__file__': 'bogus please fix this'})\n                        eval_globals.update(global_args)\n                        exec_function(self.build_file.code, eval_globals)\n                finally:\n                    os.chdir(startdir)", "docstring": "Entry point to parsing a BUILD file.\n\nArgs:\n**global_args: Variables to include in the parsing environment.", "source": "juraj-google-style"}
{"code": "def __init__(self, root_path, root_url, site_title, site_desc=None):\n\t\t\n\t\tself.root_path = root_path\n\t\tself.root_url = root_url\n\t\tself.site_title = site_title\n\t\tself.site_desc = site_desc\n\n\t\tself.cm = russell.content.ContentManager(root_url) \n\t\tself.pages = self.cm.pages\n\t\tself.posts = self.cm.posts\n\t\tself.tags = self.cm.tags\n\n\t\tself.asset_hash = {}\n\n\t\tself.jinja = jinja2.Environment(\n\t\t\tloader=jinja2.FileSystemLoader(os.path.join(root_path, 'templates')),\n\t\t)\n\t\tself.jinja.globals.update({\n\t\t\t'a': make_link,\n\t\t\t'asset_hash': self.asset_hash,\n\t\t\t'asset_url': self.get_asset_url,\n\t\t\t'now': datetime.now(),\n\t\t\t'root_url': self.root_url,\n\t\t\t'site_description': self.site_desc,\n\t\t\t'site_title': self.site_title,\n\t\t\t'tags': self.tags,\n\t\t})", "docstring": "Constructor.\n\nArgs:\nroot_path (str): Full path to the directory which contains the posts,\npages, templates etc. directories.\nroot_url (str): The root URL of your website.\nsite_title (str): The title of your website.\nsite_desc (str): A subtitle or description of your website.", "source": "juraj-google-style"}
{"code": "def query(self, expr, **kwargs):\n    columns = self.columns\n\n    def query_builder(df, **kwargs):\n        df = df.copy()\n        df.index = pandas.RangeIndex(len(df))\n        df.columns = columns\n        df.query(expr, inplace=True, **kwargs)\n        df.columns = pandas.RangeIndex(len(df.columns))\n        return df\n    func = self._prepare_method(query_builder, **kwargs)\n    new_data = self._map_across_full_axis(1, func)\n    new_index = self.compute_index(0, new_data, True)\n    return self.__constructor__(new_data, new_index, self.columns, self.dtypes)", "docstring": "Query columns of the DataManager with a boolean expression.\n\nArgs:\nexpr: Boolean expression to query the columns with.\n\nReturns:\nDataManager containing the rows where the boolean expression is satisfied.", "source": "codesearchnet"}
{"code": "def read(self, nodes=None, **kwargs):\n    if (nodes is None):\n        required_nodes = (self.wishlist - set(self.datasets.keys()))\n        nodes = self.dep_tree.leaves(nodes=required_nodes)\n    return self._read_datasets(nodes, **kwargs)", "docstring": "Load datasets from the necessary reader.\n\nArgs:\nnodes (iterable): DependencyTree Node objects\n**kwargs: Keyword arguments to pass to the reader's `load` method.\n\nReturns:\nDatasetDict of loaded datasets", "source": "codesearchnet"}
{"code": "def create_resource(self, resource_type=None, uri=None):\n    if (resource_type in [NonRDFSource, Binary, BasicContainer, DirectContainer, IndirectContainer]):\n        return resource_type(self, uri)\n    else:\n        raise TypeError('expecting Resource type, such as BasicContainer or NonRDFSource')", "docstring": "Convenience method for creating a new resource\n\nNote: A Resource is instantiated, but is not yet created.  Still requires resource.create().\n\nArgs:\nuri (rdflib.term.URIRef, str): uri of resource to create\nresource_type (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer):  resource type to create\n\nReturns:\n(NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): instance of appropriate type", "source": "codesearchnet"}
{"code": "def month_name_to_number(month, to_int=False):\n    \n    number = {\n        'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05',\n        'Jun': '06', 'Jul': '07', 'Aug': '08', 'Sep': '09', 'Oct': '10',\n        'Nov': '11', 'Dec': '12',\n    }.get(month)\n    return int(number) if to_int else number", "docstring": "Convert a month name (MMM) to its number (01-12).\n\nArgs:\nmonth (str): 3-letters string describing month.\nto_int (bool): cast number to int or not.\n\nReturns:\nstr/int: the month's number (between 01 and 12).", "source": "juraj-google-style"}
{"code": "def pad(self, images: 'torch.Tensor', size: int) -> 'torch.Tensor':\n    height, width = get_image_size(images, ChannelDimension.FIRST)\n    pad_height = (height \n    pad_width = (width \n    return F.pad(images, (0, 0, pad_width, pad_height), padding_mode='symmetric')", "docstring": "Pad an image to make the height and width divisible by `size`.\n\nArgs:\nimages (`torch.Tensor`):\nImages to pad.\nsize (`int`):\nThe size to make the height and width divisible by.\n\nReturns:\n`torch.Tensor`: The padded images.", "source": "github-repos"}
{"code": "def signature(cert, sig, body):\n    body = six.b(body)\n    sig = base64.decodestring(sig)\n    padder = padding.PKCS1v15()\n    public_key = cert.public_key()\n    try:\n        public_key.verify(sig, body, padder, hashes.SHA1())\n        return True\n    except InvalidSignature:\n        warnings.warn('Signature verification failed.')\n        return False", "docstring": "Validate data request signature.\n\nSee `validate.request` for additional info.\n\nArgs:\ncert: cryptography.hazmat.backends.openssl.x509._Certificate. The Amazon\nsigning certificate.\nsig: str. Signature header value sent by request.\nbody: str. HTTPS request body.\n\nReturns:\nbool: True if valid, False otherwise.", "source": "codesearchnet"}
{"code": "def get_cost_per_kg(self, comp):\n        \n        comp = comp if isinstance(comp, Composition) else Composition(comp)\n        return self.get_cost_per_mol(comp) / (\n            comp.weight.to(\"kg\") * const.N_A)", "docstring": "Get best estimate of minimum cost/kg based on known data\n\nArgs:\ncomp:\nComposition as a pymatgen.core.structure.Composition\nReturns:\nfloat of cost/kg", "source": "juraj-google-style"}
{"code": "def Deserialize(self, reader):\n        \n        super(SpentCoinState, self).Deserialize(reader)\n\n        self.TransactionHash = reader.ReadUInt256()\n        self.TransactionHeight = reader.ReadUInt32()\n\n        count = reader.ReadVarInt()\n\n        items = [0] * count\n        for i in range(0, count):\n            index = reader.ReadUInt16()\n            height = reader.ReadUInt32()\n            items[i] = SpentCoinItem(index=index, height=height)\n\n        self.Items = items", "docstring": "Deserialize full object.\n\nArgs:\nreader (neocore.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def WriteSignedBinaryBlobs(binary_urn, blobs, token=None):\n    if _ShouldUseLegacyDatastore():\n        aff4.FACTORY.Delete(binary_urn, token=token)\n        with data_store.DB.GetMutationPool() as mutation_pool:\n            with aff4.FACTORY.Create(binary_urn, collects.GRRSignedBlob, mode='w', mutation_pool=mutation_pool, token=token) as fd:\n                for blob in blobs:\n                    fd.Add(blob, mutation_pool=mutation_pool)\n    if data_store.RelationalDBEnabled():\n        blob_references = rdf_objects.BlobReferences()\n        current_offset = 0\n        for blob in blobs:\n            blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(blob.SerializeToString())\n            blob_references.items.Append(rdf_objects.BlobReference(offset=current_offset, size=len(blob.data), blob_id=blob_id))\n            current_offset += len(blob.data)\n        data_store.REL_DB.WriteSignedBinaryReferences(_SignedBinaryIDFromURN(binary_urn), blob_references)", "docstring": "Saves signed blobs to the datastore.\n\nIf a signed binary with the given URN already exists, its contents will get\noverwritten.\n\nArgs:\nbinary_urn: RDFURN that should serve as a unique identifier for the binary.\nblobs: An Iterable of signed blobs to write to the datastore.\ntoken: ACL token to use with the legacy (non-relational) datastore.", "source": "codesearchnet"}
{"code": "def initialize(self, prefix_name='default', *args, **kwargs):\n    if self.loaded:\n        raise WorkdirError(('Workdir %s already initialized' % self.path))\n    if (not os.path.exists(self.path)):\n        LOGGER.debug('Creating workdir %s', self.path)\n        os.makedirs(self.path)\n    self.prefixes[prefix_name] = self.prefix_class(self.join(prefix_name), *args, **kwargs)\n    self.prefixes[prefix_name].initialize()\n    if (self.current is None):\n        self._set_current(prefix_name)\n    self.load()\n    return self.prefixes[prefix_name]", "docstring": "Initializes a workdir by adding a new prefix to the workdir.\n\nArgs:\nprefix_name(str): Name of the new prefix to add\n*args: args to pass along to the prefix constructor\n*kwargs: kwargs to pass along to the prefix constructor\n\nReturns:\nThe newly created prefix\n\nRaises:\nPrefixAlreadyExists: if the prefix name already exists in the\nworkdir", "source": "codesearchnet"}
{"code": "def _get_dtype_from_nested_lists(list_or_tuple):\n    for elem in list_or_tuple:\n        if isinstance(elem, core.Tensor):\n            return elem.dtype.base_dtype\n        elif isinstance(elem, (list, tuple)):\n            maybe_dtype = _get_dtype_from_nested_lists(elem)\n            if maybe_dtype is not None:\n                return maybe_dtype\n    return None", "docstring": "Returns the dtype of any tensor-like object in `list_or_tuple`, if found.\n\nArgs:\nlist_or_tuple: A list or tuple representing an object that can be converted\nto a `tf.Tensor`.\n\nReturns:\nThe dtype of any tensor-like object in `list_or_tuple`, or `None` if no\nsuch object exists.", "source": "github-repos"}
{"code": "def make_value_from_datastore(self, value):\n    \n\n    if value is None:\n      return None\n    _json = json.loads(value, cls=JsonDecoder)\n    if self.data_type == dict:\n      return _json\n    return self.data_type.from_json(_json)", "docstring": "Convert value from datastore representation.\n\nArgs:\nvalue: datastore value.\n\nReturns:\nvalue to store in the model.", "source": "juraj-google-style"}
{"code": "def job_stories(self, raw=False, limit=None):\n    job_stories = self._get_stories('jobstories', limit)\n    if raw:\n        job_stories = [story.raw for story in job_stories]\n    return job_stories", "docstring": "Returns list of item ids of latest Job stories\n\nArgs:\nlimit (int): specifies the number of stories to be returned.\nraw (bool): Flag to indicate whether to transform all\nobjects into raw json.\n\nReturns:\n`list` object containing ids of Job stories.", "source": "codesearchnet"}
{"code": "def register(cls, config_class, model_class, exist_ok=False) -> None:\n    if hasattr(model_class, 'config_class') and model_class.config_class.__name__ != config_class.__name__:\n        raise ValueError(f'The model class you are passing has a `config_class` attribute that is not consistent with the config class you passed (model has {model_class.config_class} and you passed {config_class}. Fix one of those so they match!')\n    cls._model_mapping.register(config_class, model_class, exist_ok=exist_ok)", "docstring": "Register a new model for this class.\n\nArgs:\nconfig_class ([`PretrainedConfig`]):\nThe configuration corresponding to the model to register.\nmodel_class ([`PreTrainedModel`]):\nThe model to register.", "source": "github-repos"}
{"code": "def train_and_maybe_evaluate(hparams):\n    schema = taxi.read_schema(hparams.schema_file)\n    tf_transform_output = tft.TFTransformOutput(hparams.tf_transform_dir)\n    train_input = lambda: model.input_fn(hparams.train_files, tf_transform_output, batch_size=TRAIN_BATCH_SIZE)\n    eval_input = lambda: model.input_fn(hparams.eval_files, tf_transform_output, batch_size=EVAL_BATCH_SIZE)\n    train_spec = tf_estimator.TrainSpec(train_input, max_steps=hparams.train_steps)\n    serving_receiver_fn = lambda: model.example_serving_receiver_fn(tf_transform_output, schema)\n    exporter = tf_estimator.FinalExporter('chicago-taxi', serving_receiver_fn)\n    eval_spec = tf_estimator.EvalSpec(eval_input, steps=hparams.eval_steps, exporters=[exporter], name='chicago-taxi-eval')\n    run_config = tf_estimator.RunConfig(save_checkpoints_steps=999, keep_checkpoint_max=1)\n    serving_model_dir = os.path.join(hparams.output_dir, SERVING_MODEL_DIR)\n    run_config = run_config.replace(model_dir=serving_model_dir)\n    estimator = model.build_estimator(tf_transform_output, hidden_units=[max(2, int(FIRST_DNN_LAYER_SIZE * DNN_DECAY_FACTOR ** i)) for i in range(NUM_DNN_LAYERS)], config=run_config)\n    tf_estimator.train_and_evaluate(estimator, train_spec, eval_spec)\n    return estimator", "docstring": "Run the training and evaluate using the high level API.\n\nArgs:\nhparams: Holds hyperparameters used to train the model as name/value pairs.\n\nReturns:\nThe estimator that was used for training (and maybe eval)", "source": "github-repos"}
{"code": "def SetAndLoadTagFile(self, tagging_file_path):\n    \n    tag_file = tagging_file.TaggingFile(tagging_file_path)\n    self._tagging_rules = tag_file.GetEventTaggingRules()", "docstring": "Sets the tag file to be used by the plugin.\n\nArgs:\ntagging_file_path (str): path of the tagging file.", "source": "juraj-google-style"}
{"code": "def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):\n    return self.client.api.get_archive(self.id, path, chunk_size)", "docstring": "Retrieve a file or folder from the container in the form of a tar\narchive.\n\nArgs:\npath (str): Path to the file or folder to retrieve\nchunk_size (int): The number of bytes returned by each iteration\nof the generator. If ``None``, data will be streamed as it is\nreceived. Default: 2 MB\n\nReturns:\n(tuple): First element is a raw tar data stream. Second element is\na dict containing ``stat`` information on the specified ``path``.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.\n\nExample:\n\n>>> f = open('./sh_bin.tar', 'wb')\n>>> bits, stat = container.get_archive('/bin/sh')\n>>> print(stat)\n{'name': 'sh', 'size': 1075464, 'mode': 493,\n'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}\n>>> for chunk in bits:\n...    f.write(chunk)\n>>> f.close()", "source": "codesearchnet"}
{"code": "def skip(reason, extras=None):\n    raise signals.TestSkip(reason, extras)", "docstring": "Skip a test.\n\nArgs:\nreason: The reason this test is skipped.\nextras: An optional field for extra information to be included in\ntest result.\n\nRaises:\nsignals.TestSkip: Mark a test as skipped.", "source": "github-repos"}
{"code": "def stop(self, timeout_s=None):\n    self._stopping.set()\n    with self._current_phase_thread_lock:\n        phase_thread = self._current_phase_thread\n        if (not phase_thread):\n            return\n    if phase_thread.is_alive():\n        phase_thread.kill()\n        _LOG.debug('Waiting for cancelled phase to exit: %s', phase_thread)\n        timeout = timeouts.PolledTimeout.from_seconds(timeout_s)\n        while (phase_thread.is_alive() and (not timeout.has_expired())):\n            time.sleep(0.1)\n        _LOG.debug('Cancelled phase %s exit', (\"didn't\" if phase_thread.is_alive() else 'did'))\n    self.test_state.stop_running_phase()", "docstring": "Stops execution of the current phase, if any.\n\nIt will raise a ThreadTerminationError, which will cause the test to stop\nexecuting and terminate with an ERROR state.\n\nArgs:\ntimeout_s: int or None, timeout in seconds to wait for the phase to stop.", "source": "codesearchnet"}
{"code": "def parse_gene_panel(path, institute='cust000', panel_id='test', panel_type='clinical', date=datetime.now(), version=1.0, display_name=None, genes=None):\n    LOG.info('Parsing gene panel %s', panel_id)\n    gene_panel = {}\n    gene_panel['path'] = path\n    gene_panel['type'] = panel_type\n    gene_panel['date'] = date\n    gene_panel['panel_id'] = panel_id\n    gene_panel['institute'] = institute\n    version = (version or 1.0)\n    gene_panel['version'] = float(version)\n    gene_panel['display_name'] = (display_name or panel_id)\n    if (not path):\n        panel_handle = genes\n    else:\n        panel_handle = get_file_handle(gene_panel['path'])\n    gene_panel['genes'] = parse_genes(gene_lines=panel_handle)\n    return gene_panel", "docstring": "Parse the panel info and return a gene panel\n\nArgs:\npath(str): Path to panel file\ninstitute(str): Name of institute that owns the panel\npanel_id(str): Panel id\ndate(datetime.datetime): Date of creation\nversion(float)\nfull_name(str): Option to have a long name\n\nReturns:\ngene_panel(dict)", "source": "codesearchnet"}
{"code": "def dot_product(p1, p2, o=(0, 0)):\n    \n    v1 = vector(o, p1)\n    v2 = vector(o, p2)\n    return v1[0] * v2[0] + v1[1] * v2[1]", "docstring": "Returns dot product\nArgs:\np1, p2: point (x, y)\no: origin", "source": "juraj-google-style"}
{"code": "def from_spec(cls, spec: Spec, _run_init: bool=True) -> Union[Self, type[Self]]:\n    if spec.type is None:\n        raise ValueError(f'Spec type not found in {spec}')\n    subspace = _spec_type_to_subspace(spec.type)\n    subclass: type[Self] = _KNOWN_SPECIFIABLE[subspace].get(spec.type, None)\n    if subclass is None:\n        raise ValueError(f\"Unknown spec type '{spec.type}' in {spec}\")\n    if spec.config is None:\n        return subclass\n    kwargs = {k: _specifiable_from_spec_helper(v, _run_init) for k, v in spec.config.items()}\n    if _run_init:\n        kwargs['_run_init'] = True\n    return subclass(**kwargs)", "docstring": "Generate a `Specifiable` subclass object based on a spec.\n\nArgs:\nspec: the specification of a `Specifiable` subclass object\n_run_init: whether to call `__init__` or not for the initial instantiation\n\nReturns:\nSelf: the `Specifiable` subclass object", "source": "github-repos"}
{"code": "def _distributed_apply(self, distribution, grads_and_vars, global_step=None, name=None):\n    reduced_grads = distribution.extended.batch_reduce_to(ds_reduce_util.ReduceOp.SUM, grads_and_vars)\n    var_list = [v for _, v in grads_and_vars]\n    grads_and_vars = zip(reduced_grads, var_list)\n    with ops.init_scope():\n        self._create_slots(var_list)\n\n    def update(v, g):\n        \n        assert v is not None\n        try:\n            g = indexed_slices.convert_to_tensor_or_indexed_slices(g)\n        except TypeError:\n            raise TypeError('Gradient must be convertible to a Tensor or IndexedSlices, or None: %s' % g)\n        if not isinstance(g, (tensor.Tensor, indexed_slices.IndexedSlices)):\n            raise TypeError('Gradient must be a Tensor, IndexedSlices, or None: %s' % g)\n        p = _get_processor(v)\n        if context.executing_eagerly() or (resource_variable_ops.is_resource_variable(v) and (not v._in_graph_mode)):\n            scope_name = v.name.split(':')[0]\n        else:\n            scope_name = v.op.name\n        with ops.name_scope('update_' + scope_name):\n            return p.update_op(self, g)\n    with ops.name_scope(name, self._name) as name:\n        self._prepare()\n        update_ops = [op for grad, var in grads_and_vars for op in distribution.extended.update(var, update, args=(grad,), group=False)]\n\n        def finish(self, update_ops):\n            return self._finish(update_ops, 'update')\n        non_slot_devices = distribution.extended.non_slot_devices(var_list)\n        finish_updates = distribution.extended.update_non_slot(non_slot_devices, finish, args=(self, update_ops), group=False)\n        if global_step is None:\n            apply_updates = distribution.group(finish_updates, name=name)\n        else:\n            with ops.control_dependencies(finish_updates):\n                apply_updates = distribution.extended.update(global_step, state_ops.assign_add, args=(1,), kwargs={'name': name})\n        if not context.executing_eagerly():\n            if isinstance(apply_updates, tensor.Tensor):\n                apply_updates = apply_updates.op\n            train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)\n            if apply_updates not in train_op:\n                train_op.append(apply_updates)\n        return apply_updates", "docstring": "A version of `apply_gradients` for cross-replica context.\n\nThis is a version of `apply_gradients()` for when you are using a\n`DistributionStrategy` and are in a cross-replica context. If in a\nreplica context, use `apply_gradients()` as normal.\n\nArgs:\ndistribution: A `DistributionStrategy` object.\ngrads_and_vars: List of (gradient, variable) pairs as returned by\n`compute_gradients()`, and then aggregated across replicas.\nglobal_step: Optional (mirrored) `Variable` to increment by one\nafter the variables have been updated.\nname: Optional name for the returned operation.  Default to the\nname passed to the `Optimizer` constructor.\n\nReturns:\nAn `Operation` that applies the specified gradients across all\nreplicas. If `global_step` was not None, that operation also\nincrements `global_step`", "source": "github-repos"}
{"code": "def find_call(self, path, method):\n    if (not path.endswith('/')):\n        path += '/'\n    path = path.split('/')[1:]\n    return self._recursive_route_match(self._routes, path, method, [])", "docstring": "Find callable for the specified URL path and HTTP method.\n\nArgs:\npath (:obj:`str`): URL path to match\nmethod (:obj:`str`): HTTP method\n\nNote:\nA trailing '/' is always assumed in the path.", "source": "codesearchnet"}
{"code": "def _GetRowValue(self, query_hash, row, value_name):\n    \n    keys_name_to_index_map = self._keys_per_query.get(query_hash, None)\n    if not keys_name_to_index_map:\n      keys_name_to_index_map = {\n          name: index for index, name in enumerate(row.keys())}\n      self._keys_per_query[query_hash] = keys_name_to_index_map\n\n    value_index = keys_name_to_index_map.get(value_name)\n\n    \n    \n    return row[value_index]", "docstring": "Retrieves a value from the row.\n\nArgs:\nquery_hash (int): hash of the query, that uniquely identifies the query\nthat produced the row.\nrow (sqlite3.Row): row.\nvalue_name (str): name of the value.\n\nReturns:\nobject: value.", "source": "juraj-google-style"}
{"code": "def setup(self, puller: bool=None, subscriptions: Dict[(str, Any)]={}):\n    if puller:\n        puller = self._zmq.socket(zmq.PULL)\n        (ip, port, host) = self.rslv('rcv')\n        puller.bind('tcp:\n        self.poll(puller)\n    if subscriptions:\n        for publisher in subscriptions:\n            self.add(publisher, subscriptions[publisher].get('slots'), subscriptions[publisher].get('buffer-length'))\n        logger.info('Listening to %s', {k: (1 if (subscriptions[k].get('slots') is None) else len(subscriptions[k].get('slots'))) for k in subscriptions})", "docstring": "Sets up this Node with the specified Interfaces before it is run.\n\nArgs:\npuller: Indication if a Puller Interface should be created.\nsubscriptions: Collection of the Subscriber Interfaces to be created and their Slots.", "source": "codesearchnet"}
{"code": "def _take_lease(self, lease, uuid_path, safe=True):\n        \n        if safe:\n            lease_taken_by = self._lease_valid(lease)\n            if lease_taken_by and lease_taken_by != uuid_path:\n                raise LagoSubnetLeaseTakenException(\n                    lease.subnet, lease_taken_by\n                )\n\n        with open(uuid_path) as f:\n            uuid = f.read()\n        with open(lease.path, 'wt') as f:\n            utils.json_dump((uuid_path, uuid), f)\n\n        LOGGER.debug(\n            'Assigned subnet lease {} to {}'.format(lease.path, uuid_path)\n        )", "docstring": "Persist the given lease to the store and make the prefix in uuid_path\nhis owner\n\nArgs:\nlease(lago.subnet_lease.Lease): Object representation of the lease\nuuid_path (str): Path to the prefix uuid\nsafe (bool): If true (the default), validate the the lease\nisn't taken.\n\nRaises:\nLagoSubnetLeaseException: If safe == True and the lease is already\ntaken.", "source": "juraj-google-style"}
{"code": "def recipe_iam(config, auth_write, role, email):\n    iam(config, {'auth': auth_write, 'role': role, 'email': email})", "docstring": "Sets project permissions for an email.\n\nArgs:\nauth_write (authentication) - Credentials used for writing data.\nrole (string) - projects/[project name]/roles/[role name]\nemail (string) - Email address to grant role to.", "source": "github-repos"}
{"code": "def create_from_binary(cls, ignore_signature_check, binary_view):\n        \n        sig, fx_offset, fx_count, lsn, seq_number, hard_link_count, first_attr_offset, \\\n        usage_flags, entry_len, alloc_len, base_record, next_attr_id, record_n = \\\n            cls._REPR.unpack(binary_view[:cls._REPR.size])\n\n        baad = None\n        if not ignore_signature_check:\n            if sig == b\"FILE\":\n                baad = False\n            elif sig == b\"BAAD\":\n                baad = True\n            else:\n                raise HeaderError(\"Entry has no valid signature.\", \"MFTHeader\")\n\n        if fx_offset < MFTHeader._REPR.size: \n            raise HeaderError(\"Fix up array begins within the header.\", \"MFTHeader\")\n        if first_attr_offset < cls._REPR.size: \n            raise HeaderError(\"First attribute offset points to inside of the header.\", \"MFTHeader\")\n        if entry_len > alloc_len: \n            raise HeaderError(\"Logical size of the MFT is bigger than MFT allocated size.\", \"MFTHeader\")\n\n        file_ref, file_seq = get_file_reference(base_record)\n        nw_obj = cls((baad, fx_offset, fx_count, lsn, seq_number, hard_link_count,\n            first_attr_offset, MftUsageFlags(usage_flags), entry_len, alloc_len,\n            file_ref, file_seq, next_attr_id, record_n))\n\n        return nw_obj", "docstring": "Creates a new object MFTHeader from a binary stream. The binary\nstream can be represented by a byte string, bytearray or a memoryview of the\nbytearray.\n\nArgs:\nbinary_view (memoryview of bytearray) - A binary stream with the\ninformation of the attribute\n\nReturns:\nMFTHeader: New object using hte binary stream as source", "source": "juraj-google-style"}
{"code": "def get_value(self):\n    try:\n        self.raw_value\n    except (AttributeError, KeyError) as err:\n        self._reraise_if_required(err)\n        default_value = self.default_value\n        if self.transform_default:\n            return self.transform(default_value)\n        return default_value\n    else:\n        value = {}\n        for (key, subsetting) in self.settings.items():\n            value[key] = subsetting.get_value()\n        return value", "docstring": "Return dictionary with values of subsettings.\n\nReturns:\ndict: values of subsettings.", "source": "codesearchnet"}
{"code": "def make_legacy_input_feature_spec(include_label=True):\n    result = {}\n    if include_label:\n        result['clicked'] = tf.io.FixedLenFeature(shape=[], dtype=tf.int64)\n    for name in _INTEGER_COLUMN_NAMES:\n        result[name] = tf.io.FixedLenFeature(shape=[], dtype=tf.int64, default_value=-1)\n    for name in _CATEGORICAL_COLUMN_NAMES:\n        result[name] = tf.io.FixedLenFeature(shape=[], dtype=tf.string, default_value='')\n    return result", "docstring": "Input schema definition.\n\nArgs:\ninclude_label: Indicates whether the label feature should be included.\nReturns:\nA `Schema` object.", "source": "github-repos"}
{"code": "def register(self, type_name: str, cls: Type[Any], override_existing: bool=False) -> None:\n    if type_name in self._type_to_cls_map and (not override_existing):\n        raise KeyError(f'Type {type_name!r} has already been registered with class {self._type_to_cls_map[type_name].__name__}.')\n    self._type_to_cls_map[type_name] = cls", "docstring": "Register a ``symbolic.Object`` class with a type name.\n\nArgs:\ntype_name: String identifier for the class, which will be used as the\nvalue of `_type` property when deciding which class to construct object\nwhen converting a JSON value to object.\ncls: Class to register.\noverride_existing: Whether allow to override existing value if type name\nis already registered.\n\nRaises:\nKeyError: If type_name is already registered and override_existing is set\nto False.", "source": "github-repos"}
{"code": "def __call__(self, input_ids: torch.LongTensor, z_threshold: float=3.0, return_dict: bool=False) -> Union[WatermarkDetectorOutput, np.array]:\n    if input_ids[0, 0] == self.bos_token_id:\n        input_ids = input_ids[:, 1:]\n    if input_ids.shape[-1] - self.processor.context_width < 1:\n        raise ValueError(f'Must have at least `1` token to score after the first min_prefix_len={self.processor.context_width} tokens required by the seeding scheme.')\n    num_tokens_scored, green_token_count = self._score_ngrams_in_passage(input_ids)\n    z_score = self._compute_z_score(green_token_count, num_tokens_scored)\n    prediction = z_score > z_threshold\n    if return_dict:\n        p_value = self._compute_pval(z_score)\n        confidence = 1 - p_value\n        return WatermarkDetectorOutput(num_tokens_scored=num_tokens_scored, num_green_tokens=green_token_count, green_fraction=green_token_count / num_tokens_scored, z_score=z_score, p_value=p_value, prediction=prediction, confidence=confidence)\n    return prediction", "docstring": "Args:\ninput_ids (`torch.LongTensor`):\nThe watermark generated text. It is advised to remove the prompt, which can affect the detection.\nz_threshold (`Dict`, *optional*, defaults to `3.0`):\nChanging this threshold will change the sensitivity of the detector. Higher z threshold gives less\nsensitivity and vice versa for lower z threshold.\nreturn_dict (`bool`,  *optional*, defaults to `False`):\nWhether to return `~generation.WatermarkDetectorOutput` or not. If not it will return boolean predictions,\nma\nReturn:\n[`~generation.WatermarkDetectorOutput`] or `np.array`: A [`~generation.WatermarkDetectorOutput`]\nif `return_dict=True` otherwise a `np.array`.", "source": "github-repos"}
{"code": "def get_profiles(adapter, vcf_file):\n    vcf = get_file_handle(vcf_file)\n    individuals = vcf.samples\n    profiles = {individual: [] for individual in individuals}\n    for profile_variant in adapter.profile_variants():\n        ref = profile_variant['ref']\n        alt = profile_variant['alt']\n        pos = profile_variant['pos']\n        end = (pos + 1)\n        chrom = profile_variant['chrom']\n        region = f'{chrom}:{pos}-{end}'\n        found_variant = False\n        for variant in vcf(region):\n            variant_id = get_variant_id(variant)\n            if (variant_id == profile_variant['_id']):\n                found_variant = True\n                for (i, individual) in enumerate(individuals):\n                    genotype = GENOTYPE_MAP[variant.gt_types[i]]\n                    if (genotype == 'hom_alt'):\n                        gt_str = f'{alt}{alt}'\n                    elif (genotype == 'het'):\n                        gt_str = f'{ref}{alt}'\n                    else:\n                        gt_str = f'{ref}{ref}'\n                    profiles[individual].append(gt_str)\n                break\n        if (not found_variant):\n            for individual in individuals:\n                profiles[individual].append(f'{ref}{ref}')\n    return profiles", "docstring": "Given a vcf, get a profile string for each sample in the vcf\nbased on the profile variants in the database\n\nArgs:\nadapter(MongoAdapter): Adapter to mongodb\nvcf_file(str): Path to vcf file\n\nReturns:\nprofiles (dict(str)): The profiles (given as strings) for each sample\nin vcf.", "source": "codesearchnet"}
{"code": "def _evolve(self, state, qargs=None):\n        \n        \n        if qargs is not None:\n            return SuperOp(self)._evolve(state, qargs)\n        \n        state = self._format_state(state, density_matrix=True)\n        if state.shape[0] != self._input_dim:\n            raise QiskitError(\n                \"QuantumChannel input dimension is not equal to state dimension.\"\n            )\n        return np.einsum('AB,AiBj->ij', state,\n                         np.reshape(self._data, self._bipartite_shape))", "docstring": "Evolve a quantum state by the QuantumChannel.\n\nArgs:\nstate (QuantumState): The input statevector or density matrix.\nqargs (list): a list of QuantumState subsystem positions to apply\nthe operator on.\n\nReturns:\nDensityMatrix: the output quantum state as a density matrix.\n\nRaises:\nQiskitError: if the operator dimension does not match the\nspecified QuantumState subsystem dimensions.", "source": "juraj-google-style"}
{"code": "def stop(self, drain_queue_and_join=True):\n    with self.start_stop_lock:\n        if not self.running:\n            return\n        self.running = False\n        if drain_queue_and_join:\n            while True:\n                try:\n                    value = self.future_queue.get(block=True, timeout=0.1)\n                    if isinstance(value, Exception):\n                        raise value\n                    inputs = value.get()\n                    self.future_queue.task_done()\n                    if inputs is not None:\n                        self.ready_queue.put(inputs)\n                except queue.Empty:\n                    break\n            self.run_thread.join()\n        self.run_thread = None\n        _SHARED_SEQUENCES[self.uid] = None", "docstring": "Stops running threads and wait for them to exit, if necessary.\n\nThis method is thread safe and is called from various threads. Note that\nthe `drain_queue_and_join` argument must be set correctly.\nIt is safe to call this method multiple times, extra calls are ignored.\n\nArgs:\ndrain_queue_and_join: set to True to drain the queue of pending\nitems and wait for the worker thread to complete. Set to False\nif invoked from a worker thread to avoid deadlocks. Note that\nsetting this to False means this enqueuer won't be reused.", "source": "github-repos"}
{"code": "def Validate(self, problems, validate_children=True):\n    self.ValidateRouteId(problems)\n    self.ValidateServicePeriod(problems)\n    self.ValidateDirectionId(problems)\n    self.ValidateTripId(problems)\n    self.ValidateShapeIdsExistInShapeList(problems)\n    self.ValidateRouteIdExistsInRouteList(problems)\n    self.ValidateServiceIdExistsInServiceList(problems)\n    self.ValidateBikesAllowed(problems)\n    self.ValidateWheelchairAccessible(problems)\n    if (self._schedule and validate_children):\n        self.ValidateChildren(problems)", "docstring": "Validate attributes of this object.\n\nCheck that this object has all required values set to a valid value without\nreference to the rest of the schedule. If the _schedule attribute is set\nthen check that references such as route_id and service_id are correct.\n\nArgs:\nproblems: A ProblemReporter object\nvalidate_children: if True and the _schedule attribute is set than call\nValidateChildren", "source": "codesearchnet"}
{"code": "def experimental_set_type(self, type_proto) -> None:\n    with self.graph._c_graph.get() as c_graph:\n        if type_proto.type_id not in (full_type_pb2.TFT_UNSET, full_type_pb2.TFT_PRODUCT):\n            raise ValueError('error setting the type of ', self.name, ': expected TFT_UNSET or TFT_PRODUCT, got ', type_proto.type_id)\n        with c_api_util.tf_buffer(type_proto.SerializeToString()) as serialized:\n            pywrap_tf_session.SetFullType(c_graph, self._c_op, serialized)", "docstring": "Sets the corresponding node's `experimental_type` field.\n\nSee the description of `NodeDef.experimental_type` for more info.\n\nArgs:\ntype_proto: A FullTypeDef proto message. The root type_if of this object\nmust be `TFT_PRODUCT`, even for ops which only have a singlre return\nvalue.", "source": "github-repos"}
{"code": "def volatility(self, strike: types.FloatTensor, expiry_dates: Optional[types.DateTensor]=None, expiry_times: Optional[types.FloatTensor]=None, term: Optional[types.Period]=None) -> types.FloatTensor:\n    pass", "docstring": "Returns the interpolated volatility on a specified set of expiries.\n\nArgs:\nstrike: The strikes for which the interpolation is desired.\nexpiry_dates: Optional input specifying the expiry dates for which\ninterpolation is desired. The user should supply either `expiry_dates`\nor `expiry_times` for interpolation.\nexpiry_times: Optional real `Tensor` containing the time to expiration\nfor which interpolation is desired. The user should supply either\n`expiry_dates` or `expiry_times` for interpolation.\nterm: Optional input specifying the term of the underlying rate for\nwhich the interpolation is desired. Relevant for interest rate implied\nvolatility data.\n\nReturns:\nA `Tensor` of the same shape as `expiry` with the interpolated volatility\nfrom the volatility surface.", "source": "github-repos"}
{"code": "def write_payload(payload=None, objectInput=None):\n    temp = tempfile.mkstemp()[1]\n    log.debug('Write payload in temp file {!r}'.format(temp))\n    with open(temp, 'wb') as f:\n        if payload:\n            payload = base64.b64decode(payload)\n        elif objectInput:\n            if six.PY3:\n                payload = objectInput.buffer.read()\n            elif six.PY2:\n                payload = objectInput.read()\n        f.write(payload)\n    return temp", "docstring": "This function writes a base64 payload or file object on disk.\n\nArgs:\npayload (string): payload in base64\nobjectInput (object): file object/standard input to analyze\n\nReturns:\nPath of file", "source": "codesearchnet"}
{"code": "def adaptive_set(\n        self,\n        reannealing_per=50,\n        thermostat=0.9,\n        t_min=0.001,\n        t_default=1.0\n    ):\n        \n        self.__reannealing_per = reannealing_per\n        self.__thermostat = thermostat\n        self.__t_min = t_min\n        self.__t_default = t_default", "docstring": "Init for Adaptive Simulated Annealing.\n\nArgs:\nreannealing_per:    How often will this model reanneals there per cycles.\nthermostat:         Thermostat.\nt_min:              The minimum temperature.\nt_default:          The default temperature.", "source": "juraj-google-style"}
{"code": "def from_dict(cls, tx):\n        \n        inputs = [Input.from_dict(input_) for input_ in tx['inputs']]\n        outputs = [Output.from_dict(output) for output in tx['outputs']]\n        return cls(tx['operation'], tx['asset'], inputs, outputs,\n                   tx['metadata'], tx['version'], hash_id=tx['id'])", "docstring": "Transforms a Python dictionary to a Transaction object.\n\nArgs:\ntx_body (dict): The Transaction to be transformed.\n\nReturns:\n:class:`~bigchaindb.common.transaction.Transaction`", "source": "juraj-google-style"}
{"code": "def writeTable(self, tableName):\n    lock_and_call((lambda : self._impl.writeTable(tableName)), self._lock)", "docstring": "Write the table corresponding to the specified name, equivalent to the\nAMPL statement\n\n.. code-block:: ampl\n\nwrite table tableName;\n\nArgs:\ntableName: Name of the table to be written.", "source": "codesearchnet"}
{"code": "def _get_offset_from_gcs(self):\n    headers = {'content-range': 'bytes */*'}\n    (status, response_headers, content) = self._api.put_object(self._path_with_token, headers=headers)\n    errors.check_status(status, [308], self._path, headers, response_headers, content, {'upload_path': self._path_with_token})\n    val = response_headers.get('range')\n    if (val is None):\n        return (- 1)\n    (_, offset) = val.rsplit('-', 1)\n    return int(offset)", "docstring": "Get the last offset that has been written to GCS.\n\nThis is a utility method that does not modify self.\n\nReturns:\nan int of the last offset written to GCS by this upload, inclusive.\n-1 means nothing has been written.", "source": "codesearchnet"}
{"code": "def autodiscover(self, message):\n        \n        \n        if message[\"version\"] in self.allowed_versions:\n            logger.debug(\"<%s> Client version matches server \"\n                         \"version.\" % message[\"cuuid\"])\n            response = serialize_data({\"method\": \"OHAI Client\",\n                                       \"version\": self.version,\n                                       \"server_name\": self.server_name},\n                                      self.compression,\n                                      encryption=False)\n        else:\n            logger.warning(\"<%s> Client version %s does not match allowed server \"\n                           \"versions %s\" % (message[\"cuuid\"],\n                                            message[\"version\"],\n                                            self.version))\n            response = serialize_data({\"method\": \"BYE REGISTER\"},\n                                      self.compression,\n                                      encryption=False)\n\n        return response", "docstring": "This function simply returns the server version number as a response\nto the client.\n\nArgs:\nmessage (dict): A dictionary of the autodiscover message from the\nclient.\n\nReturns:\nA JSON string of the \"OHAI Client\" server response with the server's\nversion number.\n\nExamples:\n>>> response\n'{\"method\": \"OHAI Client\", \"version\": \"1.0\"}'", "source": "juraj-google-style"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor):\n    image_outputs = self.vision_tower(pixel_values).last_hidden_state\n    return self.multi_modal_projector(image_outputs)", "docstring": "Obtains image last hidden states from the vision tower and apply multimodal projection.\n\nArgs:\npixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)\nReturns:\nimage_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).", "source": "github-repos"}
{"code": "def get_symmetric_wallace_tensor(self, tau):\n    wallace = self.get_wallace_tensor(tau)\n    return Tensor((0.5 * (wallace + np.transpose(wallace, [2, 3, 0, 1]))))", "docstring": "Gets the symmetrized wallace tensor for determining\nyield strength criteria.\n\nArgs:\ntau (3x3 array-like): stress at which to evaluate\nthe wallace tensor.", "source": "codesearchnet"}
{"code": "class Constant(Initializer):\n\n    def __init__(self, value=0):\n        self.value = value\n\n    def __call__(self, shape, dtype=None, **kwargs):\n        \n        del kwargs\n        return constant_op.constant(self.value, dtype=_get_dtype(dtype), shape=shape)\n\n    def get_config(self):\n        return {'value': self.value}", "docstring": "Initializer that generates tensors with constant values.\n\nAlso available via the shortcut function `tf.keras.initializers.constant`.\n\nOnly scalar values are allowed.\nThe constant value provided must be convertible to the dtype requested\nwhen calling the initializer.\n\nExamples:\n\n>>> # Standalone usage:\n>>> initializer = tf.keras.initializers.Constant(3.)\n>>> values = initializer(shape=(2, 2))\n\n>>> # Usage in a Keras layer:\n>>> initializer = tf.keras.initializers.Constant(3.)\n>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\nArgs:\nvalue: A Python scalar.", "source": "github-repos"}
{"code": "def concrete(self, other=None):\n        \n\n        new_system = self.clone()\n        if other:\n            new_system.applyFeatures(other, missing=\"other\")\n        soft_features = self.getValue(SoftFeatures.SOFT, [])\n        score = 0\n        for f in sorted(soft_features, key=lambda f: f.soft, reverse=True):\n            try:\n                new_system.applyFeatures(f, missing=\"other\")\n                score += f.soft\n            except:\n                pass\n        new_system.delValue(SoftFeatures.SOFT)\n        return new_system, score", "docstring": "Return copy and score after being applied other system and soft features.\n\nArgs:\n\n- other(system, optional): system to apply just before soft features.\n\nReturn(tuple): tuple of the resulting system and its score.", "source": "juraj-google-style"}
{"code": "def find_custom_args_with_details(file_content: str, custom_args_var_name: str) -> list[dict]:\n    escaped_variable_name = re.escape(custom_args_var_name)\n    regex_pattern = f'^\\\\s*({escaped_variable_name})\\\\s*=\\\\s*(r?\\\\\"\\\\\"\\\\\")(.*?)(\\\\\"\\\\\"\\\\\")'\n    flags = re.MULTILINE | re.DOTALL\n    match = re.search(regex_pattern, file_content, flags)\n    if match:\n        content = match.group(3).strip()\n        return content\n    return None", "docstring": "Find the given custom args variable in the file content and return its content.\n\nArgs:\nfile_content: The string content of the Python file.\ncustom_args_var_name: The name of the custom args variable.", "source": "github-repos"}
{"code": "def from_string(rxn_string):\n    (rct_str, prod_str) = rxn_string.split('->')\n\n    def get_comp_amt(comp_str):\n        return {Composition(m.group(2)): float((m.group(1) or 1)) for m in re.finditer('([\\\\d\\\\.]*(?:[eE]-?[\\\\d\\\\.]+)?)\\\\s*([A-Z][\\\\w\\\\.\\\\(\\\\)]*)', comp_str)}\n    return BalancedReaction(get_comp_amt(rct_str), get_comp_amt(prod_str))", "docstring": "Generates a balanced reaction from a string. The reaction must\nalready be balanced.\n\nArgs:\nrxn_string:\nThe reaction string. For example, \"4 Li + O2-> 2Li2O\"\n\nReturns:\nBalancedReaction", "source": "codesearchnet"}
{"code": "def list_devices(device_type=None):\n    return distribution_lib.list_devices(device_type)", "docstring": "Return all the available devices based on the device type.\n\nNote: in a distributed setting, global devices are returned.\n\nArgs:\ndevice_type: string, one of `\"cpu\"`, `\"gpu\"` or `\"tpu\"`.\nDefaults to `\"gpu\"` or `\"tpu\"` if available when\n`device_type` is not provided. Otherwise\nwill return the `\"cpu\"` devices.\n\nReturn:\nList of devices that are available for distribute computation.", "source": "github-repos"}
{"code": "def forward(self, hidden_states: List[torch.Tensor], patch_height, patch_width) -> List[torch.Tensor]:\n    batch_size = hidden_states[0].shape[0]\n    hidden_states = torch.cat(hidden_states, dim=0)\n    cls_token, hidden_states = (hidden_states[:, 0], hidden_states[:, 1:])\n    total_batch_size, sequence_length, num_channels = hidden_states.shape\n    hidden_states = hidden_states.reshape(total_batch_size, patch_height, patch_width, num_channels)\n    hidden_states = hidden_states.permute(0, 3, 1, 2).contiguous()\n    if self.readout_type == 'project':\n        hidden_states = hidden_states.flatten(2).permute((0, 2, 1))\n        readout = cls_token.unsqueeze(dim=1).expand_as(hidden_states)\n        hidden_states = torch.cat((hidden_states, readout), -1)\n    elif self.readout_type == 'add':\n        hidden_states = hidden_states + cls_token.unsqueeze(-1)\n    out = []\n    for stage_idx, hidden_state in enumerate(hidden_states.split(batch_size, dim=0)):\n        if self.readout_type == 'project':\n            hidden_state = self.readout_projects[stage_idx](hidden_state)\n        hidden_state = hidden_state.permute(0, 2, 1).reshape(batch_size, -1, patch_height, patch_width)\n        hidden_state = self.layers[stage_idx](hidden_state)\n        out.append(hidden_state)\n    return out", "docstring": "Args:\nhidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`):\nList of hidden states from the backbone.", "source": "github-repos"}
{"code": "def parse_case_data(config=None, ped=None, owner=None, vcf_snv=None, vcf_sv=None, vcf_cancer=None, vcf_str=None, peddy_ped=None, peddy_sex=None, peddy_check=None, delivery_report=None, multiqc=None):\n    config_data = (copy.deepcopy(config) or {})\n    if ('analysis_date' not in config_data):\n        config_data['analysis_date'] = datetime.datetime.now()\n    if ped:\n        (family_id, samples) = parse_ped(ped)\n        config_data['family'] = family_id\n        config_data['samples'] = samples\n    if ('owner' not in config_data):\n        if (not owner):\n            raise SyntaxError('Case has no owner')\n        else:\n            config_data['owner'] = owner\n    if ('gene_panels' in config_data):\n        config_data['gene_panels'] = [panel.strip() for panel in config_data['gene_panels']]\n        config_data['default_gene_panels'] = [panel.strip() for panel in config_data['default_gene_panels']]\n    config_data['peddy_ped'] = (peddy_ped or config_data.get('peddy_ped'))\n    config_data['peddy_sex_check'] = (peddy_sex or config_data.get('peddy_sex'))\n    config_data['peddy_ped_check'] = (peddy_check or config_data.get('peddy_check'))\n    add_peddy_information(config_data)\n    config_data['multiqc'] = (multiqc or config_data.get('multiqc'))\n    config_data['vcf_snv'] = (vcf_snv if vcf_snv else config_data.get('vcf_snv'))\n    config_data['vcf_sv'] = (vcf_sv if vcf_sv else config_data.get('vcf_sv'))\n    config_data['vcf_str'] = (vcf_str if vcf_str else config_data.get('vcf_str'))\n    log.debug('Config vcf_str set to {0}'.format(config_data['vcf_str']))\n    config_data['vcf_cancer'] = (vcf_cancer if vcf_cancer else config_data.get('vcf_cancer'))\n    config_data['delivery_report'] = (delivery_report if delivery_report else config_data.get('delivery_report'))\n    config_data['rank_model_version'] = config_data.get('rank_model_version')\n    config_data['rank_score_threshold'] = config_data.get('rank_score_threshold', 0)\n    config_data['track'] = config_data.get('track', 'rare')\n    if config_data['vcf_cancer']:\n        config_data['track'] = 'cancer'\n    return config_data", "docstring": "Parse all data necessary for loading a case into scout\n\nThis can be done either by providing a VCF file and other information\non the command line. Or all the information can be specified in a config file.\nPlease see Scout documentation for further instructions.\n\nArgs:\nconfig(dict): A yaml formatted config file\nped(iterable(str)): A ped formatted family file\nowner(str): The institute that owns a case\nvcf_snv(str): Path to a vcf file\nvcf_str(str): Path to a VCF file\nvcf_sv(str): Path to a vcf file\nvcf_cancer(str): Path to a vcf file\npeddy_ped(str): Path to a peddy ped\nmultiqc(str): Path to dir with multiqc information\n\nReturns:\nconfig_data(dict): Holds all the necessary information for loading\nScout", "source": "codesearchnet"}
{"code": "def switch_to_window(self, window_name):\n    data = {'name': window_name}\n    self._execute(Command.SWITCH_TO_WINDOW, data)", "docstring": "Switch to the given window.\n\nSupport:\nWeb(WebView)\n\nArgs:\nwindow_name(str): The window to change focus to.\n\nReturns:\nWebDriver Object.", "source": "codesearchnet"}
{"code": "def get(self, tx_id):\n        \n        pool = current_app.config['bigchain_pool']\n\n        with pool() as bigchain:\n            tx = bigchain.get_transaction(tx_id)\n\n        if not tx:\n            return make_error(404)\n\n        return tx.to_dict()", "docstring": "API endpoint to get details about a transaction.\n\nArgs:\ntx_id (str): the id of the transaction.\n\nReturn:\nA JSON string containing the data about the transaction.", "source": "juraj-google-style"}
{"code": "def run_inference(self, batch: Sequence[np.ndarray], engine: TensorRTEngine, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n    return self.inference_fn(batch, engine, inference_args)", "docstring": "Runs inferences on a batch of Tensors and returns an Iterable of\nTensorRT Predictions.\n\nArgs:\nbatch: A np.ndarray or a np.ndarray that represents a concatenation\nof multiple arrays as a batch.\nengine: A TensorRT engine.\ninference_args: Any additional arguments for an inference\nthat are not applicable to TensorRT.\n\nReturns:\nAn Iterable of type PredictionResult.", "source": "github-repos"}
{"code": "def Images(self, run, tag):\n    \n    accumulator = self.GetAccumulator(run)\n    return accumulator.Images(tag)", "docstring": "Retrieve the image events associated with a run and tag.\n\nArgs:\nrun: A string name of the run for which values are retrieved.\ntag: A string name of the tag for which values are retrieved.\n\nRaises:\nKeyError: If the run is not found, or the tag is not available for\nthe given run.\n\nReturns:\nAn array of `event_accumulator.ImageEvents`.", "source": "juraj-google-style"}
{"code": "def join(self, *args, **kwargs):\n        \n        super(ThreadReturn, self).join(*args, **kwargs)\n        return self._return", "docstring": "Joins the thread.\n\nArgs:\nself (ThreadReturn): the ``ThreadReturn`` instance\nargs: optional list of arguments\nkwargs: optional key-word arguments\n\nReturns:\nThe return value of the exited thread.", "source": "juraj-google-style"}
{"code": "def authenticate(self):\n    basic_auth = request.authorization\n    is_valid = False\n    user = None\n    if basic_auth:\n        (is_valid, user) = self.check_basic_auth(basic_auth.username, basic_auth.password)\n    else:\n        token = request.headers.get('Authorization', None)\n        param_token = request.args.get('access_token')\n        if (token or param_token):\n            if token:\n                token = token[6:]\n            else:\n                token = param_token\n            log.debug('Received token: %s', token)\n            (is_valid, user) = self.check_token_auth(token)\n    return (is_valid, user)", "docstring": "Authenticate user by any means and return either true or false.\n\nArgs:\n\nReturns:\ntuple (is_valid, username): True is valid user, False if not", "source": "codesearchnet"}
{"code": "def FindFileByName(self, file_name):\n    try:\n        return self._file_descriptors[file_name]\n    except KeyError:\n        pass\n    try:\n        file_proto = self._internal_db.FindFileByName(file_name)\n    except KeyError as error:\n        if self._descriptor_db:\n            file_proto = self._descriptor_db.FindFileByName(file_name)\n        else:\n            raise error\n    if (not file_proto):\n        raise KeyError(('Cannot find a file named %s' % file_name))\n    return self._ConvertFileProtoToFileDescriptor(file_proto)", "docstring": "Gets a FileDescriptor by file name.\n\nArgs:\nfile_name: The path to the file to get a descriptor for.\n\nReturns:\nA FileDescriptor for the named file.\n\nRaises:\nKeyError: if the file cannot be found in the pool.", "source": "codesearchnet"}
{"code": "def output_vars(self, transitive: bool=False) -> Set[str]:\n    output_vars = set()\n\n    def list_var_defs(k, v, p):\n        del k, p\n        if isinstance(v, SymbolDefinition):\n            output_vars.add(v.name)\n        if isinstance(v, Function):\n            return pg.TraverseAction.CONTINUE\n        return pg.TraverseAction.ENTER\n    pg.traverse(self.line(), list_var_defs)\n    if transitive:\n        parent_func = self.parent_func()\n        if parent_func is not None:\n            for i in range(self.line_number(), len(parent_func.body)):\n                line = parent_func.body[i]\n                line_input_vars = line.input_vars()\n                if output_vars & line_input_vars:\n                    output_vars.update(line.output_vars())\n    return output_vars", "docstring": "Returns the output context from this instruction.\n\nArgs:\ntransitive: If True, transitive output context will be included.\n\nReturns:\nA set of output variable names.", "source": "github-repos"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    file_offset = 0\n    try:\n        (timestamp, event_data) = self._ReadEntry(parser_mediator, file_object, file_offset)\n    except errors.ParseError as exception:\n        raise errors.UnableToParseFile('Unable to parse first utmp entry with error: {0!s}'.format(exception))\n    if (not event_data.username):\n        raise errors.UnableToParseFile('Unable to parse first utmp entry with error: missing username')\n    if (not timestamp):\n        raise errors.UnableToParseFile('Unable to parse first utmp entry with error: missing timestamp')\n    date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_START)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n    file_offset = file_object.tell()\n    file_size = file_object.get_size()\n    while (file_offset < file_size):\n        if parser_mediator.abort:\n            break\n        try:\n            (timestamp, event_data) = self._ReadEntry(parser_mediator, file_object, file_offset)\n        except errors.ParseError:\n            break\n        date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_START)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n        file_offset = file_object.tell()", "docstring": "Parses an utmp file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def _adjusted_script_code(self, script):\n        \n        script_code = ByteData()\n        if script[0] == len(script) - 1:\n            return script\n        script_code += VarInt(len(script))\n        script_code += script\n        return script_code", "docstring": "Checks if the script code pased in to the sighash function is already\nlength-prepended\nThis will break if there's a redeem script that's just a pushdata\nThat won't happen in practice\n\nArgs:\nscript (bytes): the spend script\nReturns:\n(bytes): the length-prepended script (if necessary)", "source": "juraj-google-style"}
{"code": "def _ParseFileEntry(self, knowledge_base, file_entry):\n    \n    if not file_entry or not file_entry.link:\n      raise errors.PreProcessFail(\n          'Unable to read: {0:s} with error: not a symbolic link'.format(\n              self.ARTIFACT_DEFINITION_NAME))\n\n    _, _, time_zone = file_entry.link.partition('zoneinfo/')\n    \n    if time_zone:\n      try:\n        knowledge_base.SetTimeZone(time_zone)\n      except ValueError:\n        \n        pass", "docstring": "Parses artifact file system data for a preprocessing attribute.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nfile_entry (dfvfs.FileEntry): file entry that contains the artifact\nvalue data.\n\nRaises:\nerrors.PreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def _ExtractContentFromDataStream(\n      self, mediator, file_entry, data_stream_name):\n    \n    self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING\n\n    if self._processing_profiler:\n      self._processing_profiler.StartTiming('extracting')\n\n    self._event_extractor.ParseDataStream(\n        mediator, file_entry, data_stream_name)\n\n    if self._processing_profiler:\n      self._processing_profiler.StopTiming('extracting')\n\n    self.processing_status = definitions.STATUS_INDICATOR_RUNNING\n\n    self.last_activity_timestamp = time.time()", "docstring": "Extracts content from a data stream.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\nfile_entry (dfvfs.FileEntry): file entry to extract its content.\ndata_stream_name (str): name of the data stream whose content is to be\nextracted.", "source": "juraj-google-style"}
{"code": "def affine_coupling(name, x, mid_channels=512, activation='relu', reverse=False, dropout=0.0):\n    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n        x_shape = common_layers.shape_list(x)\n        (x1, x2) = tf.split(x, num_or_size_splits=2, axis=(- 1))\n        z1 = x1\n        log_scale_and_shift = conv_stack('nn', x1, mid_channels, x_shape[(- 1)], activation=activation, dropout=dropout)\n        shift = log_scale_and_shift[(:, :, :, 0::2)]\n        scale = tf.nn.sigmoid((log_scale_and_shift[(:, :, :, 1::2)] + 2.0))\n        if (not reverse):\n            z2 = ((x2 + shift) * scale)\n        else:\n            z2 = ((x2 / scale) - shift)\n        objective = tf.reduce_sum(tf.log(scale), axis=[1, 2, 3])\n        if reverse:\n            objective *= (- 1)\n        return (tf.concat([z1, z2], axis=3), objective)", "docstring": "Reversible affine coupling layer.\n\nArgs:\nname: variable scope.\nx: 4-D Tensor.\nmid_channels: number of channels in the coupling layer.\nactivation: Can be either \"relu\" or \"gatu\".\nreverse: Forward or reverse operation.\ndropout: default, 0.0\nReturns:\noutput: x shifted and scaled by an affine transformation.\nobjective: log-determinant of the jacobian", "source": "codesearchnet"}
{"code": "def _Execute(self, http):\n        \n        message = mime_multipart.MIMEMultipart('mixed')\n        \n        setattr(message, '_write_headers', lambda self: None)\n\n        \n        for key in self.__request_response_handlers:\n            msg = mime_nonmultipart.MIMENonMultipart('application', 'http')\n            msg['Content-Transfer-Encoding'] = 'binary'\n            msg['Content-ID'] = self._ConvertIdToHeader(key)\n\n            body = self._SerializeRequest(\n                self.__request_response_handlers[key].request)\n            msg.set_payload(body)\n            message.attach(msg)\n\n        request = http_wrapper.Request(self.__batch_url, 'POST')\n        request.body = message.as_string()\n        request.headers['content-type'] = (\n            'multipart/mixed; boundary=\"%s\"') % message.get_boundary()\n\n        response = http_wrapper.MakeRequest(http, request)\n\n        if response.status_code >= 300:\n            raise exceptions.HttpError.FromResponse(response)\n\n        \n        header = 'content-type: %s\\r\\n\\r\\n' % response.info['content-type']\n\n        content = response.content\n        if isinstance(content, bytes) and self.__response_encoding:\n            content = response.content.decode(self.__response_encoding)\n\n        parser = email_parser.Parser()\n        mime_response = parser.parsestr(header + content)\n\n        if not mime_response.is_multipart():\n            raise exceptions.BatchError(\n                'Response not in multipart/mixed format.')\n\n        for part in mime_response.get_payload():\n            request_id = self._ConvertHeaderToId(part['Content-ID'])\n            response = self._DeserializeResponse(part.get_payload())\n\n            \n            \n            \n            self.__request_response_handlers[request_id] = (\n                self.__request_response_handlers[request_id]._replace(\n                    response=response))", "docstring": "Serialize batch request, send to server, process response.\n\nArgs:\nhttp: A httplib2.Http object to be used to make the request with.\n\nRaises:\nhttplib2.HttpLib2Error if a transport error has occured.\napiclient.errors.BatchError if the response is the wrong format.", "source": "juraj-google-style"}
{"code": "def GetResults(self):\n    result = analyzer_result.AnalyzerResult()\n    result.analyzer_name = self.NAME\n    result.attribute_name = self._ATTRIBUTE_NAME\n    rule_names = [match.rule for match in self._matches]\n    result.attribute_value = ','.join(rule_names)\n    return [result]", "docstring": "Retrieves results of the most recent analysis.\n\nReturns:\nlist[AnalyzerResult]: results.", "source": "codesearchnet"}
{"code": "def get_proj(prj_code):\n    if (prj_code in CUSTOM_PRJ):\n        proj = pyproj.Proj(CUSTOM_PRJ[prj_code])\n    else:\n        proj = pyproj.Proj(init=prj_code)\n    return proj", "docstring": "Helper method for handling projection codes that are unknown to pyproj\n\nArgs:\nprj_code (str): an epsg proj code\n\nReturns:\nprojection: a pyproj projection", "source": "codesearchnet"}
{"code": "def set_disk_timeout(timeout, power='ac', scheme=None):\n    return _set_powercfg_value(scheme=scheme, sub_group='SUB_DISK', setting_guid='DISKIDLE', power=power, value=timeout)", "docstring": "Set the disk timeout in minutes for the given power scheme\n\nArgs:\ntimeout (int):\nThe amount of time in minutes before the disk will timeout\n\npower (str):\nSet the value for AC or DC power. Default is ``ac``. Valid options\nare:\n\n- ``ac`` (AC Power)\n- ``dc`` (Battery)\n\nscheme (str):\nThe scheme to use, leave as ``None`` to use the current. Default is\n``None``. This can be the GUID or the Alias for the Scheme. Known\nAliases are:\n\n- ``SCHEME_BALANCED`` - Balanced\n- ``SCHEME_MAX`` - Power saver\n- ``SCHEME_MIN`` - High performance\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\nCLI Example:\n\n.. code-block:: bash\n\n# Sets the disk timeout to 30 minutes on battery\nsalt '*' powercfg.set_disk_timeout 30 power=dc", "source": "codesearchnet"}
{"code": "def _process_new(self, feed_item):\n    if feed_item.get(FieldMap.AD_ACTIVE, None):\n        self._wait_all_creative_activation(feed_item)\n    campaign = self._campaign_dao.get(feed_item, required=True)\n    creative_assignments = []\n    placement_assignments = []\n    event_tag_assignments = []\n    self._process_assignments(feed_item, creative_assignments, placement_assignments, event_tag_assignments, campaign)\n    creative_rotation = {'creativeAssignments': creative_assignments}\n    self._setup_rotation_strategy(creative_rotation, feed_item)\n    delivery_schedule = {'impressionRatio': '1', 'priority': feed_item.get(FieldMap.AD_PRIORITY, None), 'hardCutoff': feed_item.get(FieldMap.AD_HARDCUTOFF, None)}\n    ad = {'active': feed_item.get(FieldMap.AD_ACTIVE, None), 'archived': feed_item.get(FieldMap.AD_ARCHIVED, None), 'campaignId': campaign['id'], 'creativeRotation': creative_rotation, 'deliverySchedule': delivery_schedule, 'endTime': feed_item.get(FieldMap.AD_END_DATE, None) if 'T' in feed_item.get(FieldMap.AD_END_DATE, None) else StringExtensions.convertDateStrToDateTimeStr(feed_item.get(FieldMap.AD_END_DATE, None), '23:59:59'), 'name': feed_item.get(FieldMap.AD_NAME, None), 'placementAssignments': placement_assignments, 'startTime': feed_item.get(FieldMap.AD_START_DATE, None) if 'T' in feed_item.get(FieldMap.AD_START_DATE, None) else StringExtensions.convertDateStrToDateTimeStr(feed_item.get(FieldMap.AD_START_DATE, None)), 'type': feed_item.get(FieldMap.AD_TYPE, 'AD_SERVING_STANDARD_AD'), 'eventTagOverrides': event_tag_assignments}\n    self._process_landing_page(ad, feed_item)\n    return ad", "docstring": "Creates a new ad DCM object from a feed item representing an ad from the Bulkdozer feed.\n\nThis function simply creates the object to be inserted later by the BaseDAO\nobject.\n\nArgs:\nfeed_item: Feed item representing the ad from the Bulkdozer feed.\n\nReturns:\nAn ad object ready to be inserted in DCM through the API.", "source": "github-repos"}
{"code": "def set_window_position(self, x, y, window_handle='current'):\n    self._execute(Command.SET_WINDOW_POSITION, {'x': int(x), 'y': int(y), 'window_handle': window_handle})", "docstring": "Sets the x,y position of the current window.\n\nSupport:\nWeb(WebView)\n\nArgs:\nx(int): the x-coordinate in pixels.\ny(int): the y-coordinate in pixels.\nwindow_handle(str): Identifier of window_handle,\ndefault to 'current'.\n\nReturns:\nWebDriver Object.", "source": "codesearchnet"}
{"code": "def ping(self, suffix='public_tokens/'):\n        \n        return self.remote_utils.ping(super(neuroRemote, self).url(), suffix)", "docstring": "Return the status-code of the API (estimated using the public-tokens\nlookup page).\n\nArguments:\nsuffix (str : 'public_tokens/'): The url endpoint to check\n\nReturns:\nint: status code", "source": "juraj-google-style"}
{"code": "def add_to_gitignore(line: str):\n    \n    if not line.endswith('\\n'):\n        line = f'{line}\\n'\n    if GIT_IGNORE.exists():\n        if line in GIT_IGNORE.read_text(encoding='utf8'):\n            return\n        previous_content = GIT_IGNORE.read_text(encoding='utf8')\n    else:\n        previous_content = ''\n    GIT_IGNORE.write_text(previous_content + line, encoding='utf8')", "docstring": "Adds a line to the .gitignore file of the repo\n\nArgs:\nline: line to add", "source": "juraj-google-style"}
{"code": "def normalize(array, min_value=0., max_value=1.):\n    \n    arr_min = np.min(array)\n    arr_max = np.max(array)\n    normalized = (array - arr_min) / (arr_max - arr_min + K.epsilon())\n    return (max_value - min_value) * normalized + min_value", "docstring": "Normalizes the numpy array to (min_value, max_value)\n\nArgs:\narray: The numpy array\nmin_value: The min value in normalized array (Default value = 0)\nmax_value: The max value in normalized array (Default value = 1)\n\nReturns:\nThe array normalized to range between (min_value, max_value)", "source": "juraj-google-style"}
{"code": "def data(self, resource_value, return_value=False):\n    if return_value:\n        self._request_entity = None\n        self._request.add_payload('returnValue', True)\n    self._request_uri = '{}/{}/data'.format(self._request_uri, resource_value)", "docstring": "Alias for metric_name method\n\n+--------------+------------------------------------+\n| HTTP Method  | API Endpoint URI's                 |\n+==============+====================================+\n| POST         | /v2/customMetrics/{id}|{name}/data |\n+--------------+------------------------------------+\n\nExample\n-------\n\nThe weight value is optional.\n\n.. code-block:: javascript\n\n{\n\"value\": 1,\n\"weight\": 1,\n}\n\n**Keyed Example**\n\nThe weight value is optional.\n\n.. code-block:: javascript\n\n{\n\"value\": 1,\n\"weight\": 1,\n\"name\": \"src1\"\n}\n\nArgs:\nresource_name (string): The metric name.", "source": "codesearchnet"}
{"code": "def complete(self):\n    return ((self.header is not None) and (self.metadata is not None) and (self.content is not None) and (self.header.get('num_buffers', 0) == len(self._buffers)))", "docstring": "Returns whether all required parts of a message are present.\n\nReturns:\nbool : True if the message is complete, False otherwise", "source": "codesearchnet"}
{"code": "def linear(m=1, b=0):\n    \n    def f(i):\n        return m * i + b\n    return partial(force, sequence=_advance(f))", "docstring": "Return a driver function that can advance a sequence of linear values.\n\n.. code-block:: none\n\nvalue = m * i + b\n\nArgs:\nm (float) : a slope for the linear driver\nx (float) : an offset for the linear driver", "source": "juraj-google-style"}
{"code": "def create_document(self, name='Test Document', owner_type=0, public=True):\n        \n\n        payload = {\n            'name': name,\n            'ownerType': owner_type,\n            'isPublic': public\n        }\n\n        return self._api.request('post', '/api/documents', body=payload)", "docstring": "Create a new document.\n\nArgs:\n- name (str, default='Test Document'): The doc name\n- owner_type (int, default=0): 0 for user, 1 for company, 2 for team\n- public (bool, default=False): Whether or not to make doc public\n\nReturns:\n- requests.Response: Onshape response data", "source": "juraj-google-style"}
{"code": "def properties(self, var_or_nodeid, as_list=False):\n        \n        props = []\n        if var_or_nodeid in self._vars:\n            props = self._vars[var_or_nodeid]['props']\n        elif var_or_nodeid in self._eps:\n            var = self._eps[var_or_nodeid][3].get(IVARG_ROLE)\n            props = self._vars.get(var, {}).get('props', [])\n        else:\n            raise KeyError(var_or_nodeid)\n        if not as_list:\n            props = dict(props)\n        return props", "docstring": "Return a dictionary of variable properties for *var_or_nodeid*.\n\nArgs:\nvar_or_nodeid: if a variable, return the properties\nassociated with the variable; if a nodeid, return the\nproperties associated with the intrinsic variable of the\npredication given by the nodeid", "source": "juraj-google-style"}
{"code": "def __init__(self, extensions):\n    \n    super(ExtensionsFileEntryFilter, self).__init__()\n    self._extensions = extensions", "docstring": "Initializes an extensions-based file entry filter.\n\nAn extension is defined as \"pdf\" as in \"document.pdf\".\n\nArgs:\nextensions (list[str]): a list of extension strings.", "source": "juraj-google-style"}
{"code": "def perfcounters(infile):\n    \n    measurements = []\n    with open(infile, 'r') as in_file:\n        read_struct(in_file)\n        for region_struct in read_structs(in_file):\n            region = region_struct[\"1\"][1]\n            core_info = region_struct[\"Region Info\"]\n            measurements += \\\n                get_measurements(region, core_info, region_struct)\n\n            for table_struct in read_tables(in_file):\n                core_info = None\n                if \"Event\" in table_struct:\n                    offset = 1\n                    core_info = table_struct[\"Event\"][offset:]\n                    measurements += get_measurements(region, core_info,\n                                                     table_struct, offset)\n                elif \"Metric\" in table_struct:\n                    core_info = table_struct[\"Metric\"]\n                    measurements += get_measurements(region, core_info,\n                                                     table_struct)\n    return measurements", "docstring": "Get a complete list of all measurements.\n\nArgs:\ninfile: The filestream containing all likwid output.\n\nReturns:\nA list of all measurements extracted from likwid's file stream.", "source": "juraj-google-style"}
{"code": "def index_file(self, f, overwrite=False):\n        \n        if isinstance(f, six.string_types):\n            f = self.layout.get_file(f)\n\n        if f.path in self.file_index and not overwrite:\n            return\n\n        if 'suffix' not in f.entities:  \n            return\n\n        md = self._get_metadata(f.path)\n\n        for md_key, md_val in md.items():\n            if md_key not in self.key_index:\n                self.key_index[md_key] = {}\n            self.key_index[md_key][f.path] = md_val\n            self.file_index[f.path][md_key] = md_val", "docstring": "Index metadata for the specified file.\n\nArgs:\nf (BIDSFile, str): A BIDSFile or path to an indexed file.\noverwrite (bool): If True, forces reindexing of the file even if\nan entry already exists.", "source": "juraj-google-style"}
{"code": "def call(self, input_ids: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, inputs_embeds: Optional[tf.Tensor]=None) -> tf.Tensor:\n    if input_ids is None and inputs_embeds is None:\n        raise ValueError('You have to specify either input_ids or inputs_embeds')\n    if inputs_embeds is None:\n        check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n        inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n    input_shape = shape_list(inputs_embeds)[:-1]\n    if position_ids is None:\n        position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)\n    position_embeds = tf.gather(params=self.position_embedding, indices=position_ids)\n    position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1))\n    final_embeddings = inputs_embeds + position_embeds\n    return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\nfinal_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github-repos"}
{"code": "def GetRowCache(self, query):\n    query_hash = hash(query)\n    if (query_hash not in self._row_caches):\n        self._row_caches[query_hash] = set()\n    return self._row_caches[query_hash]", "docstring": "Retrieves the row cache for a specific query.\n\nThe row cache is a set that contains hashes of values in a row. The row\ncache is used to find duplicate row when a database and a database with\na WAL file is parsed.\n\nArgs:\nquery (str): query.\n\nReturns:\nset: hashes of the rows that have been parsed.", "source": "codesearchnet"}
{"code": "def get_states(self):\n    stamp_token, num_trees, num_finalized_trees, num_attempted_layers, nodes_range = gen_boosted_trees_ops.boosted_trees_get_ensemble_states(self.resource_handle)\n    return (array_ops.identity(stamp_token, name='stamp_token'), array_ops.identity(num_trees, name='num_trees'), array_ops.identity(num_finalized_trees, name='num_finalized_trees'), array_ops.identity(num_attempted_layers, name='num_attempted_layers'), array_ops.identity(nodes_range, name='last_layer_nodes_range'))", "docstring": "Returns states of the tree ensemble.\n\nReturns:\nstamp_token, num_trees, num_finalized_trees, num_attempted_layers and\nrange of the nodes in the latest layer.", "source": "github-repos"}
{"code": "def __get_keywords(self):\n    txt = self.text\n    for line in txt:\n        for word in split_words(line):\n            (yield word)", "docstring": "Get all the keywords related of this page\n\nReturns:\nAn array of strings", "source": "codesearchnet"}
{"code": "def _CheckForRestartAndMaybePurge(self, event):\n    if (event.HasField('session_log') and (event.session_log.status == event_pb2.SessionLog.START)):\n        self._Purge(event, by_tags=False)", "docstring": "Check and discard expired events using SessionLog.START.\n\nCheck for a SessionLog.START event and purge all previously seen events\nwith larger steps, because they are out of date. Because of supervisor\nthreading, it is possible that this logic will cause the first few event\nmessages to be discarded since supervisor threading does not guarantee\nthat the START message is deterministically written first.\n\nThis method is preferred over _CheckForOutOfOrderStepAndMaybePurge which\ncan inadvertently discard events due to supervisor threading.\n\nArgs:\nevent: The event to use as reference. If the event is a START event, all\npreviously seen events with a greater event.step will be purged.", "source": "codesearchnet"}
{"code": "def data_struct_array(sample, **vectors):\n    if (not len(sample)):\n        sample = np.zeros((0, 0), dtype=np.int8)\n    else:\n        sample = np.asarray(sample, dtype=np.int8)\n        if (sample.ndim < 2):\n            sample = np.expand_dims(sample, 0)\n    (num_samples, num_variables) = sample.shape\n    if ('num_occurrences' not in vectors):\n        vectors['num_occurrences'] = ([1] * num_samples)\n    datavectors = {}\n    datatypes = [('sample', np.dtype(np.int8), (num_variables,))]\n    for (kwarg, vector) in vectors.items():\n        dtype = (float if (kwarg == 'energy') else None)\n        datavectors[kwarg] = vector = np.asarray(vector, dtype)\n        if ((len(vector.shape) < 1) or (vector.shape[0] != num_samples)):\n            msg = '{} and sample have a mismatched shape {}, {}. They must have the same size in the first axis.'.format(kwarg, vector.shape, sample.shape)\n            raise ValueError(msg)\n        datatypes.append((kwarg, vector.dtype, vector.shape[1:]))\n    if ('energy' not in datavectors):\n        raise TypeError('data_struct_array() needs keyword-only argument energy')\n    elif (datavectors['energy'].shape != (num_samples,)):\n        raise ValueError('energy should be a vector of length {}'.format(num_samples))\n    data = np.rec.array(np.zeros(num_samples, dtype=datatypes))\n    data['sample'] = sample\n    for (kwarg, vector) in datavectors.items():\n        data[kwarg] = vector\n    return data", "docstring": "Combine samples and per-sample data into a numpy structured array.\n\nArgs:\nsample (array_like):\nSamples, in any form that can be converted into a numpy array.\n\nenergy (array_like, required):\nRequired keyword argument. Energies, in any form that can be converted into a numpy\n1-dimensional array.\n\n**kwargs (array_like):\nOther per-sample data, in any form that can be converted into a numpy array.\n\nReturns:\n:obj:`~numpy.ndarray`: A numpy structured array. Has fields ['sample', 'energy', 'num_occurrences', **kwargs]", "source": "codesearchnet"}
{"code": "def from_fortran_src(cls, fortran_src: str, dir: str = \".\"):\n        \n        import tempfile\n        fp = tempfile.NamedTemporaryFile('w+t', delete=False, dir=dir)\n        fp.writelines(fortran_src)\n        fp.close()\n        G = cls.from_fortran_file(fp.name, dir)\n        os.remove(fp.name)\n        return G", "docstring": "Create a GroundedFunctionNetwork instance from a string with raw\nFortran code.\n\nArgs:\nfortran_src: A string with Fortran source code.\ndir: (Optional) - the directory in which the temporary Fortran file\nwill be created (make sure you have write permission!) Defaults to\nthe current directory.\nReturns:\nA GroundedFunctionNetwork instance", "source": "juraj-google-style"}
{"code": "def created(cls, data=None):\n        \n        if cls.expose_status:  \n            cls.response.content_type = 'application/json'\n            cls.response._status_line = '201 Created'\n\n        return cls(201, data=data).to_json", "docstring": "Shortcut API for HTTP 201 `Created` response.\n\nArgs:\ndata (object): Response key/value data.\n\nReturns:\nWSResponse Instance.", "source": "juraj-google-style"}
{"code": "def _generic_fit(fqdn, result, scorer, yP=None, *argl, **argd):\n    \n    out = None\n    if len(argl) > 0:\n        machine = argl[0]\n        out = {}\n        if hasattr(machine, \"best_score_\"):\n            out[\"score\"] = machine.best_score_\n            \n        \n        \n        \n        yL = _do_auto_predict(*argl[0:2])\n        yscore = scorer(fqdn, yL, yP, *argl, **argd)\n        if yscore is not None:\n            out.update(yscore)\n\n    return out", "docstring": "Performs the generic fit tests that are common to both classifier and\nregressor; uses `scorer` to score the predicted values given by the machine\nwhen tested against its training set.\n\nArgs:\nscorer (function): called on the result of `machine.predict(Xtrain,\nytrain)`.", "source": "juraj-google-style"}
{"code": "def default_metric_definitions(cls, toolkit):\n    if (toolkit is RLToolkit.COACH):\n        return [{'Name': 'reward-training', 'Regex': '^Training>.*Total reward=(.*?),'}, {'Name': 'reward-testing', 'Regex': '^Testing>.*Total reward=(.*?),'}]\n    elif (toolkit is RLToolkit.RAY):\n        float_regex = '[-+]?[0-9]*\\\\.?[0-9]+([eE][-+]?[0-9]+)?'\n        return [{'Name': 'episode_reward_mean', 'Regex': ('episode_reward_mean: (%s)' % float_regex)}, {'Name': 'episode_reward_max', 'Regex': ('episode_reward_max: (%s)' % float_regex)}]", "docstring": "Provides default metric definitions based on provided toolkit.\n\nArgs:\ntoolkit(sagemaker.rl.RLToolkit): RL Toolkit to be used for training.\n\nReturns:\nlist: metric definitions", "source": "codesearchnet"}
{"code": "def calculate_stress(self, strain):\n        \n        strain = np.array(strain)\n        if strain.shape == (6,):\n            strain = Strain.from_voigt(strain)\n        assert strain.shape == (3, 3), \"Strain must be 3x3 or voigt-notation\"\n        stress_matrix = self.einsum_sequence([strain]*(self.order - 1)) \\\n                / factorial(self.order - 1)\n        return Stress(stress_matrix)", "docstring": "Calculate's a given elastic tensor's contribution to the\nstress using Einstein summation\n\nArgs:\nstrain (3x3 array-like): matrix corresponding to strain", "source": "juraj-google-style"}
{"code": "def convert_response(check_response, project_id):\n    if ((not check_response) or (not check_response.checkErrors)):\n        return _IS_OK\n    theError = check_response.checkErrors[0]\n    error_tuple = _CHECK_ERROR_CONVERSION.get(theError.code, _IS_UNKNOWN)\n    if (error_tuple[1].find(u'{') == (- 1)):\n        return error_tuple\n    updated_msg = error_tuple[1].format(project_id=project_id, detail=(theError.detail or u''))\n    return (error_tuple[0], updated_msg, error_tuple[2])", "docstring": "Computes a http status code and message `CheckResponse`\n\nThe return value a tuple (code, message, api_key_is_bad) where\n\ncode: is the http status code\nmessage: is the message to return\napi_key_is_bad: indicates that a given api_key is bad\n\nArgs:\ncheck_response (:class:`endpoints_management.gen.servicecontrol_v1_messages.CheckResponse`):\nthe response from calling an api\n\nReturns:\ntuple(code, message, bool)", "source": "codesearchnet"}
{"code": "def test_on_batch(self, x, y=None, sample_weight=None, return_dict=False):\n    raise NotImplementedError", "docstring": "Test the model on a single batch of samples.\n\nArgs:\nx: Input data. Must be array-like.\ny: Target data. Must be array-like.\nsample_weight: Optional array of the same length as x, containing\nweights to apply to the model's loss for each sample.\nIn the case of temporal data, you can pass a 2D array\nwith shape `(samples, sequence_length)`, to apply a different\nweight to every timestep of every sample.\nreturn_dict: If `True`, loss and metric results are returned as a\ndict, with each key being the name of the metric. If `False`,\nthey are returned as a list.\n\nReturns:\nA scalar loss value (when no metrics and `return_dict=False`),\na list of loss and metric values\n(if there are metrics and `return_dict=False`), or a dict of\nmetric and loss values (if `return_dict=True`).", "source": "github-repos"}
{"code": "def _clone_sequential_model(model, clone_function, input_tensors=None):\n    if not isinstance(model, Sequential):\n        raise ValueError(f'Expected `model` argument to be a `Sequential` model instance. Received: model={model}')\n    if not callable(clone_function):\n        raise ValueError(f'Expected `clone_function` argument to be a callable. Received: clone_function={clone_function}')\n    new_layers = [clone_function(layer) for layer in model.layers]\n    if isinstance(model._layers[0], InputLayer):\n        ref_input_layer = model._layers[0]\n        input_name = ref_input_layer.name\n        input_batch_shape = ref_input_layer.batch_shape\n        input_dtype = ref_input_layer._dtype\n    else:\n        input_name = None\n        input_dtype = None\n        input_batch_shape = None\n    if input_tensors is not None:\n        if isinstance(input_tensors, (list, tuple)):\n            if len(input_tensors) != 1:\n                raise ValueError('Argument `input_tensors` must contain a single tensor.')\n            input_tensors = input_tensors[0]\n        if not isinstance(input_tensors, backend.KerasTensor):\n            raise ValueError(f'Argument `input_tensors` must be a KerasTensor. Received invalid value: input_tensors={input_tensors}')\n        inputs = Input(tensor=input_tensors, name=input_name)\n        new_layers = [inputs] + new_layers\n    elif input_batch_shape is not None:\n        inputs = Input(batch_shape=input_batch_shape, dtype=input_dtype, name=input_name)\n        new_layers = [inputs] + new_layers\n    cloned_model = Sequential(new_layers, name=model.name, trainable=model.trainable)\n    if model.compiled:\n        compiled_config = model.get_compile_config()\n        cloned_model.compile_from_config(compiled_config)\n    return cloned_model", "docstring": "Clone a `Sequential` model instance.\n\nModel cloning is similar to calling a model on new inputs,\nexcept that it creates new layers (and thus new weights) instead\nof sharing the weights of the existing layers.\n\nArgs:\nmodel: Instance of `Sequential`.\ninput_tensors: optional list of input tensors\nto build the model upon. If not provided,\nplaceholders will be created.\nclone_function: callable to be applied on non-input layers in the model.\nBy default, it clones the layer (without copying the weights).\n\nReturns:\nAn instance of `Sequential` reproducing the behavior\nof the original model, on top of new inputs tensors,\nusing newly instantiated weights.", "source": "github-repos"}
{"code": "def _FormatSourceShort(self, event):\n    (source_short, _) = self._output_mediator.GetFormattedSources(event)\n    if (source_short is None):\n        data_type = getattr(event, 'data_type', 'UNKNOWN')\n        raise errors.NoFormatterFound('Unable to find event formatter for: {0:s}.'.format(data_type))\n    return source_short", "docstring": "Formats the short source.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: short source field.\n\nRaises:\nNoFormatterFound: If no event formatter can be found to match the data\ntype in the event.", "source": "codesearchnet"}
{"code": "def get_column_names(self, X):\n        \n        if isinstance(X, pd.DataFrame):\n            return X.columns\n\n        return range(X.shape[1])", "docstring": "Return iterable containing columns for the given array X.\n\nArgs:\nX: `numpy.ndarray` or `pandas.DataFrame`.\n\nReturns:\niterable: columns for the given matrix.", "source": "juraj-google-style"}
{"code": "def request(self, result_limit, result_start, filters=None, params=None):\n        \n        return self.tc_requests.request(\n            self.api_type,\n            self.api_sub_type,\n            result_limit,\n            result_start,\n            owner=self.owner,\n            filters=filters,\n            params=params,\n        )", "docstring": "Gets the Indicator/Group/Victim or Security Labels\nArgs:\nfilters:\nowner:\nresult_limit:\nresult_start:\nparams: parameters to pass in to get the objects\n\nReturns:", "source": "juraj-google-style"}
{"code": "def unit_is_related(self, location, worksheet):\n    same_worksheet = (worksheet == self.worksheet)\n    if isinstance(location, (tuple, list)):\n        return ((location[0] >= self.start[0]) and (location[0] < self.end[0]) and (location[1] >= self.start[1]) and (location[1] < self.end[1]) and same_worksheet)\n    else:\n        return same_worksheet", "docstring": "Checks for relationship between a unit location and this block.\n\nReturns:\nTrue if the location is related to this block.", "source": "codesearchnet"}
{"code": "def find_first_file_with_ext(base_paths, prefix, exts):\n    for base_path in base_paths:\n        for ext in exts:\n            filename = os.path.join(base_path, ('%s%s' % (prefix, ext)))\n            if (os.path.exists(filename) and os.path.isfile(filename)):\n                logger.debug('Found first file with relevant extension: %s', filename)\n                return (base_path, ext)\n    logger.debug('No files found for prefix %s, extensions %s', prefix, ', '.join(exts))\n    return (None, None)", "docstring": "Runs through the given list of file extensions and returns the first file with the given base\npath and extension combination that actually exists.\n\nArgs:\nbase_paths: The base paths in which to search for files.\nprefix: The filename prefix of the file for which to search.\nexts: An ordered list of file extensions for which to search.\n\nReturns:\nOn success, a 2-tuple containing the base path in which the file was found, and the extension of the file.\nOn failure, returns (None, None).", "source": "codesearchnet"}
{"code": "def pretokenized_t2t_dataset(dataset_name=gin.REQUIRED,\n                             text2self=False,\n                             data_dir=gin.REQUIRED,\n                             dataset_split=\"train\",\n                             batch_size=gin.REQUIRED,\n                             sequence_length=gin.REQUIRED,\n                             vocabulary=None):\n  \n  del vocabulary\n  filepattern = os.path.join(\n      data_dir, dataset_name + \"-\" + dataset_split + \"-*\")\n  filenames = tf.gfile.Glob(filepattern)\n  tf.logging.info(\"Found %s files matching %s\" % (len(filenames), filepattern))\n  if not filenames:\n    raise ValueError(\"No matching files found\")\n  dataset = pretokenized_tfrecord_dataset(\n      filenames=filenames,\n      text2self=text2self,\n      eos_included=True,\n      repeat=dataset_split == \"train\",\n      batch_size=batch_size,\n      sequence_length=sequence_length)\n  if dataset_split == \"train\":\n    dataset = dataset.shuffle(1000)\n  return dataset", "docstring": "Loads the Tensor2tensor dataset specified by dataset_name.\n\nArgs:\ndataset_name: TensorFlow Datasets dataset name.\ntext2self: a boolean\ndata_dir: string, data_dir for TensorFlow Datasets\ndataset_split: a string - \"train\" or \"dev\"\nbatch_size: an integer\nsequence_length: an integer\nvocabulary: ignored\n\nReturns:\nA tf.data.Dataset of batches", "source": "juraj-google-style"}
{"code": "def enable_argscope_for_function(func, log_shape=True):\n    assert callable(func), 'func should be a callable'\n\n    @wraps(func)\n    def wrapped_func(*args, **kwargs):\n        actual_args = copy.copy(get_arg_scope()[func.__name__])\n        actual_args.update(kwargs)\n        out_tensor = func(*args, **actual_args)\n        in_tensor = args[0]\n        ctx = get_current_tower_context()\n        name = (func.__name__ if ('name' not in kwargs) else kwargs['name'])\n        if log_shape:\n            if (('tower' not in ctx.ns_name.lower()) or ctx.is_main_training_tower):\n                if isinstance(out_tensor, tuple):\n                    out_tensor_descr = out_tensor[0]\n                else:\n                    out_tensor_descr = out_tensor\n                logger.info(('%20s: %20s -> %20s' % (name, in_tensor.shape.as_list(), out_tensor_descr.shape.as_list())))\n        return out_tensor\n    wrapped_func.symbolic_function = None\n    return wrapped_func", "docstring": "Decorator for function to support argscope\n\nExample:\n\n.. code-block:: python\n\nfrom mylib import myfunc\nmyfunc = enable_argscope_for_function(myfunc)\n\nArgs:\nfunc: A function mapping one or multiple tensors to one or multiple\ntensors.\nlog_shape (bool): Specify whether the first input resp. output tensor\nshape should be printed once.\n\nRemarks:\nIf the function ``func`` returns multiple input or output tensors,\nonly the first input/output tensor shape is displayed during logging.\n\nReturns:\nThe decorated function.", "source": "codesearchnet"}
{"code": "def console_set_default_background(\n    con: tcod.console.Console, col: Tuple[int, int, int]\n) -> None:\n    \n    lib.TCOD_console_set_default_background(_console(con), col)", "docstring": "Change the default background color for a console.\n\nArgs:\ncon (Console): Any Console instance.\ncol (Union[Tuple[int, int, int], Sequence[int]]):\nAn (r, g, b) sequence or Color instance.\n\n.. deprecated:: 8.5\nUse :any:`Console.default_bg` instead.", "source": "juraj-google-style"}
{"code": "def marginalize_out(node_indices, tpm):\n    \n    return tpm.sum(tuple(node_indices), keepdims=True) / (\n        np.array(tpm.shape)[list(node_indices)].prod())", "docstring": "Marginalize out nodes from a TPM.\n\nArgs:\nnode_indices (list[int]): The indices of nodes to be marginalized out.\ntpm (np.ndarray): The TPM to marginalize the node out of.\n\nReturns:\nnp.ndarray: A TPM with the same number of dimensions, with the nodes\nmarginalized out.", "source": "juraj-google-style"}
{"code": "def angle_to_name(angle, segments=8, abbr=False):\n    if (segments == 4):\n        string = COMPASS_NAMES[((int(((angle + 45) / 90)) % 4) * 2)]\n    elif (segments == 8):\n        string = COMPASS_NAMES[((int(((angle + 22.5) / 45)) % 8) * 2)]\n    elif (segments == 16):\n        string = COMPASS_NAMES[(int(((angle + 11.25) / 22.5)) % 16)]\n    else:\n        raise ValueError(('Segments parameter must be 4, 8 or 16 not %r' % segments))\n    if abbr:\n        return ''.join((i[0].capitalize() for i in string.split('-')))\n    else:\n        return string", "docstring": "Convert angle in to direction name.\n\nArgs:\nangle (float): Angle in degrees to convert to direction name\nsegments (int): Number of segments to split compass in to\nabbr (bool): Whether to return abbreviated direction string\n\nReturns:\nstr: Direction name for ``angle``", "source": "codesearchnet"}
{"code": "def verify_gmt_integrity(gmt):\n    set_ids = [d[SET_IDENTIFIER_FIELD] for d in gmt]\n    assert (len(set(set_ids)) == len(set_ids)), 'Set identifiers should be unique. set_ids: {}'.format(set_ids)", "docstring": "Make sure that set ids are unique.\n\nArgs:\ngmt (GMT object): list of dicts\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def get_optimizer_experimental_options(self):\n    rewrite_options = self.config.graph_options.rewrite_options\n    options = {}\n\n    def rewriter_toggle(option):\n        attr = getattr(rewrite_options, option)\n        if attr != 0:\n            options[option] = attr == rewriter_config_pb2.RewriterConfig.ON\n\n    def rewriter_bool(option):\n        options[option] = getattr(rewrite_options, option)\n    rewriter_toggle('layout_optimizer')\n    rewriter_toggle('constant_folding')\n    rewriter_toggle('shape_optimization')\n    rewriter_toggle('remapping')\n    rewriter_toggle('arithmetic_optimization')\n    rewriter_toggle('dependency_optimization')\n    rewriter_toggle('loop_optimization')\n    rewriter_toggle('function_optimization')\n    rewriter_toggle('debug_stripper')\n    rewriter_bool('disable_model_pruning')\n    rewriter_toggle('scoped_allocator_optimization')\n    rewriter_toggle('pin_to_host_optimization')\n    rewriter_toggle('implementation_selector')\n    rewriter_toggle('auto_mixed_precision')\n    rewriter_toggle('use_plugin_optimizers')\n    rewriter_bool('disable_meta_optimizer')\n    rewriter_toggle('auto_mixed_precision_onednn_bfloat16')\n    rewriter_toggle('auto_mixed_precision_mkl')\n    if rewrite_options.min_graph_nodes != 0:\n        options['min_graph_nodes'] = rewrite_options.min_graph_nodes\n    return options", "docstring": "Get experimental options for the optimizer.\n\nReturns:\nDictionary of current option values", "source": "github-repos"}
{"code": "def tan(cls, x: 'TensorFluent') -> 'TensorFluent':\n    return cls._unary_op(x, tf.tan, tf.float32)", "docstring": "Returns a TensorFluent for the tan function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the tan function.", "source": "codesearchnet"}
{"code": "def _populate(cls, as_of=None, delete=False):\n        \n        billing_cycle_helper = get_billing_cycle()\n        billing_cycles_exist = BillingCycle.objects.exists()\n\n        try:\n            current_billing_cycle = BillingCycle.objects.as_of(date=as_of)\n        except BillingCycle.DoesNotExist:\n            current_billing_cycle = None\n\n        \n        if not billing_cycles_exist:\n            delete = False\n\n        \n        if billing_cycles_exist and not current_billing_cycle:\n            raise CannotPopulateForDateOutsideExistingCycles()\n\n        \n        \n        omit_current = (current_billing_cycle and delete)\n\n        stop_date = as_of + relativedelta(years=settings.SWIFTWIND_BILLING_CYCLE_YEARS)\n        date_ranges = billing_cycle_helper.generate_date_ranges(as_of, stop_date=stop_date, omit_current=omit_current)\n        date_ranges = list(date_ranges)\n\n        beginning_date = date_ranges[0][0]\n\n        with db_transaction.atomic():\n\n            if delete:\n                \n                cls.objects.filter(start_date__gte=beginning_date).delete()\n\n            for start_date, end_date in date_ranges:\n                exists = BillingCycle.objects.filter(date_range=(start_date, end_date)).exists()\n                if exists:\n                    if delete:\n                        raise Exception(\n                            'It should not be possible to get here as future billing cycles have just been deleted'\n                        )\n                    else:\n                        \n                        pass\n                else:\n                    BillingCycle.objects.create(\n                        date_range=(start_date, end_date),\n                    )", "docstring": "Populate the table with billing cycles starting from `as_of`\n\nArgs:\nas_of (date): The date at which to begin the populating\ndelete (bool): Should future billing cycles be deleted?", "source": "juraj-google-style"}
{"code": "def to_channel_dimension_format(image: np.ndarray, channel_dim: Union[ChannelDimension, str], input_channel_dim: Optional[Union[ChannelDimension, str]]=None) -> np.ndarray:\n    if not isinstance(image, np.ndarray):\n        raise ValueError(f'Input image must be of type np.ndarray, got {type(image)}')\n    if input_channel_dim is None:\n        input_channel_dim = infer_channel_dimension_format(image)\n    target_channel_dim = ChannelDimension(channel_dim)\n    if input_channel_dim == target_channel_dim:\n        return image\n    if target_channel_dim == ChannelDimension.FIRST:\n        image = image.transpose((2, 0, 1))\n    elif target_channel_dim == ChannelDimension.LAST:\n        image = image.transpose((1, 2, 0))\n    else:\n        raise ValueError('Unsupported channel dimension format: {}'.format(channel_dim))\n    return image", "docstring": "Converts `image` to the channel dimension format specified by `channel_dim`.\n\nArgs:\nimage (`numpy.ndarray`):\nThe image to have its channel dimension set.\nchannel_dim (`ChannelDimension`):\nThe channel dimension format to use.\ninput_channel_dim (`ChannelDimension`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred from the input image.\n\nReturns:\n`np.ndarray`:\nThe image with the channel dimension set to `channel_dim`.", "source": "github-repos"}
{"code": "def constant(x: A) -> Callable[(..., A)]:\n\n    def constanted(*args, **kwargs):\n        return x\n    return constanted", "docstring": "Produce a function that always returns a supplied value.\n\nArgs:\nx: Any object.\n\nReturns:\nA function that accepts any number of positional and keyword arguments, discards them, and returns ``x``.", "source": "codesearchnet"}
{"code": "def add_hparam(self, name, value):\n    \n    \n    \n    \n    if getattr(self, name, None) is not None:\n      raise ValueError('Hyperparameter name is reserved: %s' % name)\n    if isinstance(value, (list, tuple)):\n      if not value:\n        raise ValueError(\n            'Multi-valued hyperparameters cannot be empty: %s' % name)\n      self._hparam_types[name] = (type(value[0]), True)\n    else:\n      self._hparam_types[name] = (type(value), False)\n    setattr(self, name, value)", "docstring": "Adds {name, value} pair to hyperparameters.\n\nArgs:\nname: Name of the hyperparameter.\nvalue: Value of the hyperparameter. Can be one of the following types:\nint, float, string, int list, float list, or string list.\n\nRaises:\nValueError: if one of the arguments is invalid.", "source": "juraj-google-style"}
{"code": "async def openurl(url, **opts):\n    if (url.find(':\n        newurl = alias(url)\n        if (newurl is None):\n            raise s_exc.BadUrl(f':\n        url = newurl\n    info = s_urlhelp.chopurl(url)\n    info.update(opts)\n    host = info.get('host')\n    port = info.get('port')\n    auth = None\n    user = info.get('user')\n    if (user is not None):\n        passwd = info.get('passwd')\n        auth = (user, {'passwd': passwd})\n    scheme = info.get('scheme')\n    if (scheme == 'cell'):\n        path = info.get('path')\n        name = info.get('name', '*')\n        host = info.get('host')\n        if host:\n            path = path.strip('/')\n            path = os.path.join(host, path)\n        if (':' in path):\n            (path, name) = path.split(':')\n        full = os.path.join(path, 'sock')\n        link = (await s_link.unixconnect(full))\n    elif (scheme == 'unix'):\n        (path, name) = info.get('path').split(':')\n        link = (await s_link.unixconnect(path))\n    else:\n        path = info.get('path')\n        name = info.get('name', path[1:])\n        sslctx = None\n        if (scheme == 'ssl'):\n            certpath = info.get('certdir')\n            certdir = s_certdir.CertDir(certpath)\n            sslctx = certdir.getClientSSLContext()\n        link = (await s_link.connect(host, port, ssl=sslctx))\n    prox = (await Proxy.anit(link, name))\n    prox.onfini(link)\n    try:\n        (await prox.handshake(auth=auth))\n    except Exception:\n        (await prox.fini())\n        raise\n    return prox", "docstring": "Open a URL to a remote telepath object.\n\nArgs:\nurl (str): A telepath URL.\n**opts (dict): Telepath connect options.\n\nReturns:\n(synapse.telepath.Proxy): A telepath proxy object.\n\nThe telepath proxy may then be used for sync or async calls:\n\nproxy = openurl(url)\nvalue = proxy.getFooThing()\n\n... or ...\n\nproxy = await openurl(url)\nvalu = await proxy.getFooThing()\n\n... or ...\n\nasync with await openurl(url) as proxy:\nvalu = await proxy.getFooThing()", "source": "codesearchnet"}
{"code": "def set_all_curriculums_to_lesson_num(self, lesson_num):\n    for (_, curriculum) in self.brains_to_curriculums.items():\n        curriculum.lesson_num = lesson_num", "docstring": "Sets all the curriculums in this meta curriculum to a specified\nlesson number.\n\nArgs:\nlesson_num (int): The lesson number which all the curriculums will\nbe set to.", "source": "codesearchnet"}
{"code": "def get_compile_flags():\n    flags = []\n    flags.append('-I%s' % get_include())\n    flags.append('-D_GLIBCXX_USE_CXX11_ABI=%d' % _CXX11_ABI_FLAG)\n    cxx_version_flag = None\n    if _CXX_VERSION == 201103:\n        cxx_version_flag = '--std=c++11'\n    elif _CXX_VERSION == 201402:\n        cxx_version_flag = '--std=c++14'\n    elif _CXX_VERSION == 201703:\n        cxx_version_flag = '--std=c++17'\n    elif _CXX_VERSION == 202002:\n        cxx_version_flag = '--std=c++20'\n    if cxx_version_flag:\n        flags.append(cxx_version_flag)\n    flags.append('-DEIGEN_MAX_ALIGN_BYTES=%d' % pywrap_tf_session.get_eigen_max_align_bytes())\n    return flags", "docstring": "Returns the compilation flags for compiling with TensorFlow.\n\nThe returned list of arguments can be passed to the compiler for compiling\nagainst TensorFlow headers. The result is platform dependent.\n\nFor example, on a typical Linux system with Python 3.7 the following command\nprints `['-I/usr/local/lib/python3.7/dist-packages/tensorflow/include',\n'-D_GLIBCXX_USE_CXX11_ABI=1', '-DEIGEN_MAX_ALIGN_BYTES=64']`\n\n>>> print(tf.sysconfig.get_compile_flags())\n\nReturns:\nA list of strings for the compiler flags.", "source": "github-repos"}
{"code": "def release_port(upnp, external_port):\n    mapping = upnp.getspecificportmapping(external_port, 'UDP')\n    if (mapping is None):\n        log.error('could not find a port mapping', external=external_port)\n        return False\n    else:\n        log.debug('found existing port mapping', mapping=mapping)\n    if upnp.deleteportmapping(external_port, 'UDP'):\n        log.info('successfully released port mapping', external=external_port)\n        return True\n    log.warning('could not release port mapping, check your router for stale mappings')\n    return False", "docstring": "Try to release the port mapping for `external_port`.\n\nArgs:\nexternal_port (int): the port that was previously forwarded to.\n\nReturns:\nsuccess (boolean): if the release was successful.", "source": "codesearchnet"}
{"code": "def extract_element_internationalized_comment(element):\n    element_entry_comment = get_element_attribute_or_empty(element, 'userLabel')\n    if (element_entry_comment == ''):\n        try:\n            element_entry_comment = element.getElementsByTagName('string')[0].firstChild.nodeValue\n        except Exception:\n            element_entry_comment = ''\n    if (not element_entry_comment.lower().startswith(JT_INTERNATIONALIZED_COMMENT_PREFIX)):\n        return None\n    else:\n        return element_entry_comment[len(JT_INTERNATIONALIZED_COMMENT_PREFIX):]", "docstring": "Extracts the xib element's comment, if the element has been internationalized.\n\nArgs:\nelement (element): The element from which to extract the comment.\n\nReturns:\nThe element's internationalized comment, None if it does not exist, or hasn't been internationalized (according\nto the JTLocalize definitions).", "source": "codesearchnet"}
{"code": "def xarrayfunc(func):\n\n    @wraps(func)\n    def wrapper(*args, **kwargs):\n        if any((isinstance(arg, xr.DataArray) for arg in args)):\n            newargs = []\n            for arg in args:\n                if isinstance(arg, xr.DataArray):\n                    newargs.append(arg.values)\n                else:\n                    newargs.append(arg)\n            return dc.full_like(args[0], func(*newargs, **kwargs))\n        else:\n            return func(*args, **kwargs)\n    return wrapper", "docstring": "Make a function compatible with xarray.DataArray.\n\nThis function is intended to be used as a decorator like::\n\n>>> @dc.xarrayfunc\n>>> def func(array):\n...     # do something\n...     return newarray\n>>>\n>>> result = func(array)\n\nArgs:\nfunc (function): Function to be wrapped. The first argument\nof the function must be an array to be processed.\n\nReturns:\nwrapper (function): Wrapped function.", "source": "codesearchnet"}
{"code": "def get_decoder_self_attention_bias(length):\n    with tf.name_scope('decoder_self_attention_bias'):\n        valid_locs = tf.matrix_band_part(tf.ones([length, length]), (- 1), 0)\n        valid_locs = tf.reshape(valid_locs, [1, 1, length, length])\n        decoder_bias = (_NEG_INF * (1.0 - valid_locs))\n    return decoder_bias", "docstring": "Calculate bias for decoder that maintains model's autoregressive property.\n\nCreates a tensor that masks out locations that correspond to illegal\nconnections, so prediction at position i cannot draw information from future\npositions.\n\nArgs:\nlength: int length of sequences in batch.\n\nReturns:\nfloat tensor of shape [1, 1, length, length]", "source": "codesearchnet"}
{"code": "def update_x(self, x, indices=None):\n    x = _make_np_bool(x)\n    if (indices is None):\n        if (len(self._x) != len(x)):\n            raise QiskitError('During updating whole x, you can not change the number of qubits.')\n        self._x = x\n    else:\n        if ((not isinstance(indices, list)) and (not isinstance(indices, np.ndarray))):\n            indices = [indices]\n        for (p, idx) in enumerate(indices):\n            self._x[idx] = x[p]\n    return self", "docstring": "Update partial or entire x.\n\nArgs:\nx (numpy.ndarray or list): to-be-updated x\nindices (numpy.ndarray or list or optional): to-be-updated qubit indices\n\nReturns:\nPauli: self\n\nRaises:\nQiskitError: when updating whole x, the number of qubits must be the same.", "source": "codesearchnet"}
{"code": "def set_stderrthreshold(s):\n  \n  if s in converter.ABSL_LEVELS:\n    FLAGS.stderrthreshold = converter.ABSL_LEVELS[s]\n  elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES:\n    FLAGS.stderrthreshold = s\n  else:\n    raise ValueError(\n        'set_stderrthreshold only accepts integer absl logging level '\n        'from -3 to 1, or case-insensitive string values '\n        \"'debug', 'info', 'warning', 'error', and 'fatal'. \"\n        'But found \"{}\" ({}).'.format(s, type(s)))", "docstring": "Sets the stderr threshold to the value passed in.\n\nArgs:\ns: str|int, valid strings values are case-insensitive 'debug',\n'info', 'warning', 'error', and 'fatal'; valid integer values are\nlogging.DEBUG|INFO|WARNING|ERROR|FATAL.\n\nRaises:\nValueError: Raised when s is an invalid value.", "source": "juraj-google-style"}
{"code": "class XGBoostModelHandlerSciPy(XGBoostModelHandler[scipy.sparse.csr_matrix, PredictionResult, Union[xgboost.Booster, xgboost.XGBModel]]):\n\n    def run_inference(self, batch: Sequence[scipy.sparse.csr_matrix], model: Union[xgboost.Booster, xgboost.XGBModel], inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n        \n        return self._inference_fn(batch, model, inference_args)\n\n    def get_num_bytes(self, batch: Sequence[scipy.sparse.csr_matrix]) -> int:\n        \n        return sum((sys.getsizeof(element) for element in batch))", "docstring": "Implementation of the ModelHandler interface for XGBoost\nusing scipy matrices as input.\n\nExample Usage::\n\npcoll | RunInference(\nXGBoostModelHandlerSciPy(\nmodel_class=\"XGBoost Model Class\",\nmodel_state=\"my_model_state.json\")))\n\nArgs:\nmodel_class: class of the XGBoost model that defines the model\nstructure.\nmodel_state: path to a json file that contains the model's\nconfiguration.\ninference_fn: the inference function to use during RunInference.\ndefault=default_xgboost_inference_fn", "source": "github-repos"}
{"code": "def extract_model_metrics(model):\n    if getattr(model, '_compile_metrics', None):\n        return {m.name: m for m in model._compile_metric_functions}\n    return None", "docstring": "Convert metrics from a Keras model `compile` API to dictionary.\n\nThis is used for converting Keras models to SavedModels.\n\nArgs:\nmodel: A `tf.keras.Model` object.\n\nReturns:\nDictionary mapping metric names to metric instances. May return `None` if\nthe model does not contain any metrics.", "source": "github-repos"}
{"code": "def download_file_by_name(url, target_folder, file_name, mkdir=False):\n    __hdr__ = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive'}\n    if (not os.path.isdir(target_folder)):\n        if mkdir:\n            preparedir(target_folder)\n        else:\n            created = preparedir(target_folder, False)\n            if (not created):\n                raise ValueError(('Failed to find %s.' % target_folder))\n    file_path = os.path.join(target_folder, file_name)\n    if (sys.version_info < (3, 0)):\n        _download_py2(url, file_path, __hdr__)\n    else:\n        _download_py3(url, file_path, __hdr__)", "docstring": "Download a file to a directory.\n\nArgs:\nurl: A string to a valid URL.\ntarget_folder: Target folder for download (e.g. c:/ladybug)\nfile_name: File name (e.g. testPts.zip).\nmkdir: Set to True to create the directory if doesn't exist (Default: False)", "source": "codesearchnet"}
{"code": "def setup_data_split(X, y, tokenizer, proc_data_dir, **kwargs):\n    \n    X_train, X_val, X_test, y_train, y_val, y_test = split_data(X, y)\n\n    \n    tokenizer.build_vocab(X_train)\n\n    process_save(X_train, y_train, tokenizer, path.join(\n        proc_data_dir, 'train.bin'), train=True, **kwargs)\n    process_save(X_val, y_val, tokenizer, path.join(\n        proc_data_dir, 'val.bin'), **kwargs)\n    process_save(X_test, y_test, tokenizer, path.join(\n        proc_data_dir, 'test.bin'), **kwargs)", "docstring": "Setup data while splitting into a training, validation, and test set.\n\nArgs:\nX: text data,\ny: data labels,\ntokenizer: A Tokenizer instance\nproc_data_dir: Directory for the split and processed data", "source": "juraj-google-style"}
{"code": "def _map_seqprop_resnums_to_structprop_chain_index(self, resnums, seqprop=None, structprop=None, chain_id=None, use_representatives=False):\n    resnums = ssbio.utils.force_list(resnums)\n    if use_representatives:\n        seqprop = self.representative_sequence\n        structprop = self.representative_structure\n        chain_id = self.representative_chain\n        if (not structprop):\n            raise ValueError('No representative structure set, please specify sequence, structure, and chain ID')\n    elif ((not seqprop) or (not structprop) or (not chain_id)):\n        raise ValueError('Please specify sequence, structure, and chain ID')\n    if self.representative_structure:\n        if (structprop.id == self.representative_structure.id):\n            full_structure_id = '{}-{}'.format(structprop.id, chain_id).replace('REP-', '')\n        else:\n            full_structure_id = '{}-{}'.format(structprop.id, chain_id)\n    else:\n        full_structure_id = '{}-{}'.format(structprop.id, chain_id)\n    aln_id = '{}_{}'.format(seqprop.id, full_structure_id)\n    access_key = '{}_chain_index'.format(aln_id)\n    if (access_key not in seqprop.letter_annotations):\n        raise KeyError('{}: structure mapping {} not available in sequence letter annotations. Was alignment parsed? Run ``align_seqprop_to_structprop`` with ``parse=True``.'.format(access_key, aln_id))\n    chain_index_mapping = seqprop.letter_annotations[access_key]\n    resnum_to_chain_index = {}\n    for x in resnums:\n        ix = (chain_index_mapping[(x - 1)] - 1)\n        if np.isnan(ix):\n            log.warning('{}-{}, {}: no equivalent residue found in structure sequence'.format(structprop.id, chain_id, x))\n        else:\n            resnum_to_chain_index[int(x)] = int(ix)\n    return resnum_to_chain_index", "docstring": "Map a residue number in any SeqProp to the mapping index in the StructProp + chain ID. This does not provide\na mapping to residue number, only a mapping to the index which then can be mapped to the structure resnum!\n\nArgs:\nresnums (int, list): Residue numbers in the sequence\nseqprop (SeqProp): SeqProp object\nstructprop (StructProp): StructProp object\nchain_id (str): Chain ID to map to index\nuse_representatives (bool): If representative sequence/structure/chain should be used in mapping\n\nReturns:\ndict: Mapping of resnums to indices", "source": "codesearchnet"}
{"code": "def cleanup(pin=None, assert_exists=False):\n    \n    if pin is None:\n        \n        for pin in list(_open):\n            cleanup(pin)\n        return\n    if not isinstance(pin, int):\n        raise TypeError(\"pin must be an int, got: {}\".format(pin))\n\n    state = _open.get(pin)\n    if state is None:\n        if assert_exists:\n            raise ValueError(\"pin {} was not setup\".format(pin))\n        return\n    state.value.close()\n    state.direction.close()\n    if os.path.exists(gpiopath(pin)):\n        log.debug(\"Unexporting pin {0}\".format(pin))\n        with _export_lock:\n            with open(pjoin(gpio_root, 'unexport'), 'w') as f:\n                _write(f, pin)\n\n    del _open[pin]", "docstring": "Cleanup the pin by closing and unexporting it.\n\nArgs:\npin (int, optional): either the pin to clean up or None (default).\nIf None, clean up all pins.\nassert_exists: if True, raise a ValueError if the pin was not\nsetup. Otherwise, this function is a NOOP.", "source": "juraj-google-style"}
{"code": "def get_name(principal):\n    if isinstance(principal, pywintypes.SIDType):\n        sid_obj = principal\n    else:\n        if (principal is None):\n            principal = 'S-1-0-0'\n        try:\n            sid_obj = win32security.ConvertStringSidToSid(principal)\n        except pywintypes.error:\n            try:\n                sid_obj = win32security.LookupAccountName(None, principal)[0]\n            except pywintypes.error:\n                sid_obj = principal\n    try:\n        return win32security.LookupAccountSid(None, sid_obj)[0]\n    except (pywintypes.error, TypeError) as exc:\n        message = 'Error resolving \"{0}\"'.format(principal)\n        if (type(exc) == pywintypes.error):\n            win_error = win32api.FormatMessage(exc.winerror).rstrip('\\n')\n            message = '{0}: {1}'.format(message, win_error)\n        log.exception(message)\n        raise CommandExecutionError(message, exc)", "docstring": "Gets the name from the specified principal.\n\nArgs:\n\nprincipal (str):\nFind the Normalized name based on this. Can be a PySID object, a SID\nstring, or a user name in any capitalization.\n\n.. note::\nSearching based on the user name can be slow on hosts connected\nto large Active Directory domains.\n\nReturns:\nstr: The name that corresponds to the passed principal\n\nUsage:\n\n.. code-block:: python\n\nsalt.utils.win_dacl.get_name('S-1-5-32-544')\nsalt.utils.win_dacl.get_name('adminisTrators')", "source": "codesearchnet"}
{"code": "def cholesky(self, name: str='cholesky') -> 'LinearOperator':\n    if not self._can_use_cholesky():\n        raise ValueError('Cannot take the Cholesky decomposition: Not a positive definite self adjoint matrix.')\n    with self._name_scope(name):\n        return self._linop_cholesky()", "docstring": "Returns a Cholesky factor as a `LinearOperator`.\n\nGiven `A` representing this `LinearOperator`, if `A` is positive definite\nself-adjoint, return `L`, where `A = L L^T`, i.e. the cholesky\ndecomposition.\n\nArgs:\nname:  A name for this `Op`.\n\nReturns:\n`LinearOperator` which represents the lower triangular matrix\nin the Cholesky decomposition.\n\nRaises:\nValueError: When the `LinearOperator` is not hinted to be positive\ndefinite and self adjoint.", "source": "github-repos"}
{"code": "def to_list(self):\n    if not isinstance(self.row_splits, ops.EagerTensor):\n        raise ValueError('to_list can only be used in eager mode.')\n    row_splits = self.row_splits.numpy().tolist()\n    values = self.values\n    if isinstance(values, RaggedTensor):\n        return [values[row_splits[i]:row_splits[i + 1]].to_list() for i in range(len(row_splits) - 1)]\n    else:\n        if hasattr(values, 'numpy'):\n            values_as_list = values.numpy().tolist()\n        elif hasattr(values, 'to_list'):\n            values_as_list = values.to_list()\n        else:\n            raise ValueError('values must be convertible to a list')\n        return [values_as_list[row_splits[i]:row_splits[i + 1]] for i in range(len(row_splits) - 1)]", "docstring": "Returns a nested Python `list` with the values for this `RaggedTensor`.\n\nRequires that `rt` was constructed in eager execution mode.\n\nReturns:\nA nested Python `list`.", "source": "github-repos"}
{"code": "def _get_events_data(object_key: str) -> List[dict]:\n    events_data = []\n    key = _keys.events_data(object_key)\n    for event_id in _get_events_list(object_key):\n        event_dict = literal_eval(DB.get_hash_value(key, event_id))\n        events_data.append(event_dict)\n    return events_data", "docstring": "Get the list of event data for the object with the specified key.\n\nArgs:\nobject_key (str): Key of an object in the database.", "source": "codesearchnet"}
{"code": "def get_arp_table(self, switch_ip, ip=None, mac=None, interf=None, arp_type=None):\n    node = natlas_node(switch_ip)\n    if (node.try_snmp_creds(self.config.snmp_creds) == 0):\n        return []\n    arp = node.get_arp_table()\n    if (arp == None):\n        return []\n    if ((((ip == None) & (mac == None)) & (interf == None)) & (arp_type == None)):\n        return arp\n    interf = (str(interf) if vlan else None)\n    ret = []\n    for a in arp:\n        if (ip != None):\n            if (re.match(ip, a.ip) == None):\n                continue\n        if (mac != None):\n            if (re.match(mac, a.mac) == None):\n                continue\n        if (interf != None):\n            if (re.match(interf, str(a.interf)) == None):\n                continue\n        if (arp_type != None):\n            if (re.match(arp_type, a.arp_type) == None):\n                continue\n        ret.append(a)\n    return ret", "docstring": "Get the ARP table from a switch.\n\nArgs:\nswitch_ip           IP address of the device\nip                  Filter results by IP (regex)\nmac                 Filter results by MAC (regex)\ninterf              Filter results by INTERFACE (regex)\narp_type            Filter results by ARP Type\n\nReturn:\nArray of natlas_arp objects", "source": "codesearchnet"}
{"code": "def get_int_list(self, min_length=_MIN_LENGTH, max_length=_MAX_LENGTH, min_int=_MIN_INT, max_int=_MAX_INT):\n    length = self.get_int(min_length, max_length)\n    return self.fdp.ConsumeIntListInRange(length, min_int, max_int)", "docstring": "Consume a signed integer list with given constraints.\n\nArgs:\nmin_length: The minimum length of the list.\nmax_length: The maximum length of the list.\nmin_int: Minimum allowed integer.\nmax_int: Maximum allowed integer.\n\nReturns:\nConsumed integer list based on input bytes and constraints.", "source": "github-repos"}
{"code": "def match_regex(self, regex: Pattern, required: bool = False,\n                    meaning: str = \"\") -> str:\n        \n        mo = regex.match(self.input, self.offset)\n        if mo:\n            self.offset = mo.end()\n            return mo.group()\n        if required:\n            raise UnexpectedInput(self, meaning)", "docstring": "Parse input based on a regular expression .\n\nArgs:\nregex: Compiled regular expression object.\nrequired: Should the exception be raised on unexpected input?\nmeaning: Meaning of `regex` (for use in error messages).\n\nRaises:\nUnexpectedInput: If no syntactically correct keyword is found.", "source": "juraj-google-style"}
{"code": "def getline(self, lnum=None):\n        \n        return self._vim.current.buffer[lnum] if lnum else self._vim.current.line", "docstring": "Get a line from the current buffer.\n\nArgs:\nlnum (Optional[str]): Number of the line to get, current if ``None``.\n\nTodo:\n- Give this more behavior of Vim ``getline()``?\n- ``buffer[index]`` is zero-based, this is probably too confusing", "source": "juraj-google-style"}
{"code": "def get_config():\n    cmd = 'Get-DscConfiguration | Select-Object * -ExcludeProperty Cim*'\n    try:\n        raw_config = _pshell(cmd, ignore_retcode=True)\n    except CommandExecutionError as exc:\n        if ('Current configuration does not exist' in exc.info['stderr']):\n            raise CommandExecutionError('Not Configured')\n        raise\n    config = dict()\n    if raw_config:\n        if ('ConfigurationName' in raw_config[0]):\n            config[raw_config[0]['ConfigurationName']] = {}\n        for item in raw_config:\n            config[item['ConfigurationName']][item['ResourceId']] = {}\n            for key in item:\n                if (key not in ['ConfigurationName', 'ResourceId']):\n                    config[item['ConfigurationName']][item['ResourceId']][key] = item[key]\n    return config", "docstring": "Get the current DSC Configuration\n\nReturns:\ndict: A dictionary representing the DSC Configuration on the machine\n\nRaises:\nCommandExecutionError: On failure\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' dsc.get_config", "source": "codesearchnet"}
{"code": "def get_2d_sincos_pos_embed(embed_dim, grid_size, add_cls_token=False):\n    grid_h = tf.range(grid_size, dtype=tf.float32)\n    grid_w = tf.range(grid_size, dtype=tf.float32)\n    grid = tf.meshgrid(grid_w, grid_h)\n    grid = tf.stack(grid, axis=0)\n    grid = tf.reshape(grid, [2, 1, grid_size, grid_size])\n    pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)\n    if add_cls_token:\n        pos_embed = tf.concat([tf.zeros((1, embed_dim)), pos_embed], axis=0)\n    return pos_embed", "docstring": "Create 2D sin/cos positional embeddings.\n\nArgs:\nembed_dim (`int`):\nEmbedding dimension.\ngrid_size (`int`):\nThe grid height and width.\nadd_cls_token (`bool`, *optional*, defaults to `False`):\nWhether or not to add a classification (CLS) token.\n\nReturns:\n(`tf.Tensor` of shape (grid_size*grid_size, embed_dim) or (1+grid_size*grid_size, embed_dim): the position\nembeddings (with or without classification token)", "source": "github-repos"}
{"code": "def remove_node(self, node):\n        \n        if node not in self.node_list:\n            return\n        self.node_list.remove(node)\n        \n        for n in self.node_list:\n            n.link_list = [link for link in n.link_list if\n                           link.target != node]", "docstring": "Remove a node from ``self.node_list`` and links pointing to it.\n\nIf ``node`` is not in the graph, do nothing.\n\nArgs:\nnode (Node): The node to be removed\n\nReturns: None\n\nExample:\n>>> from blur.markov.node import Node\n>>> node_1 = Node('One')\n>>> graph = Graph([node_1])\n>>> graph.remove_node(node_1)\n>>> len(graph.node_list)\n0", "source": "juraj-google-style"}
{"code": "def get_help_data(filepath):\n    try:\n        with open(filepath, 'r') as file:\n            return _json.load(file, object_pairs_hook=OrderedDict)\n    except Exception as e:\n        logger.error('Could not load file {}'.format(filepath))\n        logger.exception(e)\n        return {}", "docstring": "Get the json data from a help file\n\nArgs:\nfilepath (str): The file path for the help file\n\nReturns:\ndata: The json data from a help file", "source": "codesearchnet"}
{"code": "def _get_data_iterator_from_dataset(dataset, dataset_type_spec):\n    if dataset_type_spec is list:\n        if len(dataset) == 0:\n            raise ValueError('Received an empty list dataset. Please provide a non-empty list of arrays.')\n        expected_shape = None\n        for i, element in enumerate(dataset):\n            if not isinstance(element, np.ndarray):\n                raise ValueError(f'Expected a list of `numpy.ndarray` objects,Received: {type(element)} at index {i}.')\n            if expected_shape is None:\n                expected_shape = element.shape\n            elif element.shape[0] != expected_shape[0]:\n                raise ValueError(f'Received a list of NumPy arrays with different lengths.Mismatch found at index {i}, Expected shape={expected_shape} Received shape={np.array(element).shape}.Please provide a list of NumPy arrays of the same length.')\n        return iter(zip(*dataset))\n    elif dataset_type_spec is tuple:\n        if len(dataset) == 0:\n            raise ValueError('Received an empty list dataset.Please provide a non-empty tuple of arrays.')\n        expected_shape = None\n        for i, element in enumerate(dataset):\n            if not isinstance(element, np.ndarray):\n                raise ValueError(f'Expected a tuple of `numpy.ndarray` objects,Received: {type(element)} at index {i}.')\n            if expected_shape is None:\n                expected_shape = element.shape\n            elif element.shape[0] != expected_shape[0]:\n                raise ValueError(f'Received a tuple of NumPy arrays with different lengths.Mismatch found at index {i}, Expected shape={expected_shape} Received shape={np.array(element).shape}.Please provide a tuple of NumPy arrays of the same length.')\n        return iter(zip(*dataset))\n    elif dataset_type_spec is tf.data.Dataset:\n        if is_batched(dataset):\n            dataset = dataset.unbatch()\n        return iter(dataset)\n    elif is_torch_dataset(dataset):\n        return iter(dataset)\n    elif dataset_type_spec is np.ndarray:\n        return iter(dataset)\n    raise ValueError(f'Invalid dataset_type_spec: {dataset_type_spec}')", "docstring": "Get the iterator from a dataset.\n\nArgs:\ndataset: A `tf.data.Dataset`, a `torch.utils.data.Dataset` object,\nor a list/tuple of arrays.\ndataset_type_spec: The type of the dataset.\n\nReturns:\niterator: An `iterator` object.", "source": "github-repos"}
{"code": "def add(self, message):\n    if (not isinstance(message, ValidationMessage)):\n        raise TypeError('Argument must of type ValidationMessage')\n    self.messages.append(message)", "docstring": "Add a new validation message to this instance.\n\nArgs:\nmessage (ValidationMessage): A validation message to add to this instance's list of messages.", "source": "codesearchnet"}
{"code": "def to_json(self):\n    (d, ps) = self._to_json()\n    if (len(ps) == 0):\n        return {'name': d}\n    else:\n        return {'name': d, 'args': ps}", "docstring": "Convert to json serializable dictionary.\n\nReturns:\ndict: dictionary of descriptor", "source": "codesearchnet"}
{"code": "def get_labels(self, plt, label_fontsize=10):\n        \n\n        \n        if len(self.slab_regions) > 1:\n            label_in_vac = (self.slab_regions[0][1] + self.slab_regions[1][0])/2\n            if abs(self.slab_regions[0][0]-self.slab_regions[0][1]) > \\\n                    abs(self.slab_regions[1][0]-self.slab_regions[1][1]):\n                label_in_bulk = self.slab_regions[0][1]/2\n            else:\n                label_in_bulk = (self.slab_regions[1][1] + self.slab_regions[1][0]) / 2\n        else:\n            label_in_bulk = (self.slab_regions[0][0] + self.slab_regions[0][1])/2\n            if self.slab_regions[0][0] > 1-self.slab_regions[0][1]:\n                label_in_vac = self.slab_regions[0][0] / 2\n            else:\n                label_in_vac = (1 + self.slab_regions[0][1]) / 2\n\n        plt.plot([0, 1], [self.vacuum_locpot]*2, 'b--', zorder=-5, linewidth=1)\n        xy = [label_in_bulk, self.vacuum_locpot+self.ave_locpot*0.05]\n        plt.annotate(r\"$V_{vac}=%.2f$\" %(self.vacuum_locpot), xy=xy,\n                     xytext=xy, color='b', fontsize=label_fontsize)\n\n        \n        plt.plot([0, 1], [self.efermi]*2, 'g--',\n                 zorder=-5, linewidth=3)\n        xy = [label_in_bulk, self.efermi+self.ave_locpot*0.05]\n        plt.annotate(r\"$E_F=%.2f$\" %(self.efermi), xytext=xy,\n                     xy=xy, fontsize=label_fontsize, color='g')\n\n        \n        plt.plot([0, 1], [self.ave_bulk_p]*2, 'r--', linewidth=1., zorder=-1)\n        xy = [label_in_vac, self.ave_bulk_p + self.ave_locpot * 0.05]\n        plt.annotate(r\"$V^{interior}_{slab}=%.2f$\" % (self.ave_bulk_p),\n                     xy=xy, xytext=xy, color='r', fontsize=label_fontsize)\n\n        \n        plt.plot([label_in_vac]*2, [self.efermi, self.vacuum_locpot],\n                 'k--', zorder=-5, linewidth=2)\n        xy = [label_in_vac, self.efermi + self.ave_locpot * 0.05]\n        plt.annotate(r\"$\\Phi=%.2f$\" %(self.work_function),\n                     xy=xy, xytext=xy, fontsize=label_fontsize)\n\n        return plt", "docstring": "Handles the optional labelling of the plot with relevant quantities\nArgs:\nplt (plt): Plot of the locpot vs c axis\nlabel_fontsize (float): Fontsize of labels\nReturns Labelled plt", "source": "juraj-google-style"}
{"code": "def conv3d(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:\n    out = nn_ops.conv3d(input_tensor, self.filters, strides=[1, 1, 2, 1, 1], dilations=[1, 1, 1, 1, 1], padding=padding, data_format='NDHWC')\n    if has_bias:\n        out = nn_ops.bias_add(out, self.bias)\n    if activation_fn is not None:\n        out = activation_fn(out)\n    return {'output': out}", "docstring": "Performs a 3D convolution operation.\n\nArgs:\ninput_tensor: Input tensor to perform convolution on.\n\nReturns:\nA map of: output key -> output result.", "source": "github-repos"}
{"code": "def execute(source, optimize=True, output=sys.stdout, input=sys.stdin, steps=(- 1)):\n    from crianza import compiler\n    code = compiler.compile(parser.parse(source), optimize=optimize)\n    machine = Machine(code, output=output, input=input)\n    return machine.run(steps)", "docstring": "Compiles and runs program, returning the machine used to execute the\ncode.\n\nArgs:\noptimize: Whether to optimize the code after parsing it.\noutput: Stream which program can write output to.\ninput: Stream which program can read input from.\nsteps: An optional maximum number of instructions to execute on the\nvirtual machine.  Set to -1 for no limit.\n\nReturns:\nA Machine instance.", "source": "codesearchnet"}
{"code": "def stoichiometry( self ):\n        \n        return Counter( { label: number for label, number in zip( self.atoms, self.atom_numbers ) } )", "docstring": "Stoichiometry for this POSCAR, as a Counter.\ne.g. AB_2O_4 -> Counter( { 'A': 1, 'B': 2, O: 4 } )\n\nArgs:\nNone\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def sdk_version(self, value):\n        \n        if value == self._defaults['ai.internal.sdkVersion'] and 'ai.internal.sdkVersion' in self._values:\n            del self._values['ai.internal.sdkVersion']\n        else:\n            self._values['ai.internal.sdkVersion'] = value", "docstring": "The sdk_version property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def restore_from_checkpoint(self, session, inception_checkpoint_file, trained_checkpoint_file):\n    inception_exclude_scopes = ['InceptionV3/AuxLogits', 'InceptionV3/Logits', 'global_step', 'final_ops']\n    reader = tf.train.NewCheckpointReader(inception_checkpoint_file)\n    var_to_shape_map = reader.get_variable_to_shape_map()\n    all_vars = tf.contrib.slim.get_variables_to_restore(exclude=inception_exclude_scopes)\n    inception_vars = {var.op.name: var for var in all_vars if (var.op.name in var_to_shape_map)}\n    inception_saver = tf.train.Saver(inception_vars)\n    inception_saver.restore(session, inception_checkpoint_file)\n    trained_vars = tf.contrib.slim.get_variables_to_restore(exclude=(inception_exclude_scopes + inception_vars.keys()))\n    trained_saver = tf.train.Saver(trained_vars)\n    trained_saver.restore(session, trained_checkpoint_file)", "docstring": "To restore model variables from the checkpoint file.\n\nThe graph is assumed to consist of an inception model and other\nlayers including a softmax and a fully connected layer. The former is\npre-trained and the latter is trained using the pre-processed data. So\nwe restore this from two checkpoint files.\nArgs:\nsession: The session to be used for restoring from checkpoint.\ninception_checkpoint_file: Path to the checkpoint file for the Inception\ngraph.\ntrained_checkpoint_file: path to the trained checkpoint for the other\nlayers.", "source": "codesearchnet"}
{"code": "def __init__(self, encoder=None, encoder_config=None):\n    \n    if encoder and encoder_config:\n      raise ValueError(\"If encoder is provided, encoder_config must be None.\")\n    if encoder:\n      encoder_config = text_lib.TextEncoderConfig(\n          encoder_cls=type(encoder),\n          vocab_size=encoder.vocab_size)\n    elif encoder_config:\n      encoder = encoder_config.encoder\n\n    self._encoder = encoder\n    self._encoder_config = encoder_config", "docstring": "Constructs a Text FeatureConnector.\n\nArgs:\nencoder: `tfds.features.text.TextEncoder`, an encoder that can convert\ntext to integers. If None, the text will be utf-8 byte-encoded.\nencoder_config: `tfds.features.text.TextEncoderConfig`, needed if\nrestoring from a file with `load_metadata`.", "source": "juraj-google-style"}
{"code": "def profile_write(self, profile, outfile=None):\n    if (outfile is None):\n        outfile = '{}.json'.format(profile.get('profile_name').replace(' ', '_').lower())\n    fqpn = os.path.join(self.profile_dir, outfile)\n    if os.path.isfile(fqpn):\n        print('Append to File: {}{}{}'.format(c.Style.BRIGHT, c.Fore.CYAN, fqpn))\n        with open(fqpn, 'r+') as fh:\n            try:\n                data = json.load(fh, object_pairs_hook=OrderedDict)\n            except ValueError as e:\n                self.handle_error('Can not parse JSON data ({}).'.format(e))\n            data.append(profile)\n            fh.seek(0)\n            fh.write(json.dumps(data, indent=2, sort_keys=True))\n            fh.truncate()\n    else:\n        print('Create File: {}{}{}'.format(c.Style.BRIGHT, c.Fore.CYAN, fqpn))\n        with open(fqpn, 'w') as fh:\n            data = [profile]\n            fh.write(json.dumps(data, indent=2, sort_keys=True))", "docstring": "Write the profile to the output directory.\n\nArgs:\nprofile (dict): The dictionary containting the profile settings.\noutfile (str, optional): Defaults to None. The filename for the profile.", "source": "codesearchnet"}
{"code": "def _is_in_targets(self, site, targets):\n    elems = self._get_elements(site)\n    for elem in elems:\n        if (elem not in targets):\n            return False\n    return True", "docstring": "Test whether a site contains elements in the target list\n\nArgs:\nsite (Site): Site to assess\ntargets ([Element]) List of elements\nReturns:\n(boolean) Whether this site contains a certain list of elements", "source": "codesearchnet"}
{"code": "def __init__(self, model=None, env=None, options=None):\n        \n        self.event = Event.create(__name__)\n        self.options = options\n        self.model = {} if not isinstance(model, dict) else model\n        self.data = PipelineData()\n        self.data.env_list[0].update([] if env is None else env)\n        self.logger = Logger.get_logger(__name__)\n        self.variables = {}", "docstring": "Initializing pipeline with definition (loaded from a yaml file).\n\nArgs:\nmodel (dict): if you have a model defined in your pipeline definition (yaml)\nenv (dict): the env as defined (if) per matrix\noptions (dict): command line options for spline", "source": "juraj-google-style"}
{"code": "def __init__(self, x: int, y: int=1, **kwargs):\n    self.z = x + y + sum(kwargs.values())", "docstring": "Class A.\n\nArgs:\nx: The first integer.\ny: The second integer.\n**kwargs: Other arguments.", "source": "github-repos"}
{"code": "def capture_by_value(self, graph: Any, tensor: core.Tensor, name: Optional[str]=None) -> core.Tensor:\n    if isinstance(tensor, core.Value):\n        if name is None:\n            name = str(pywrap_tfe.TFE_Py_UID())\n        if tensor.dtype in dtypes.TF_VALUE_DTYPES and functools.reduce(lambda a, b: a * b, tensor.shape, 1) <= _EAGER_CONST_THRESHOLD:\n            graph_const = self.by_val_internal.get(id(tensor))\n            if graph_const is None:\n                graph_const = tensor._capture_as_const(name)\n                if graph_const is None:\n                    graph_const = self._create_placeholder_helper(graph, tensor, name)\n                self.add_or_replace(key=id(tensor), external=tensor, internal=graph_const, is_by_ref=False)\n                graph.inputs.append(graph_const)\n            graph_const._record_tape(tensor)\n            return graph_const\n        return self._create_placeholder_helper(graph, tensor, name)\n    if tensor.graph is not graph:\n        graph._validate_in_scope(tensor)\n        if name is None:\n            assert tensor.op is not None, (tensor.__class__, dir(tensor), tensor.__class__.__name__)\n            name = tensor.op.name\n        return graph._capture_helper(tensor, name)\n    return tensor", "docstring": "Captures `tensor` if it's external to this graph.\n\nIf `tensor` is from a different graph, returns a placeholder for it.\n`tensor` and the placeholder will appear in self.captures, and the\nplaceholder will appear in self.inputs.  Multiple calls to this method with\nthe same `tensor` argument will return the same placeholder. If `tensor` is\nfrom this graph, returns `tensor`.\n\nArgs:\ngraph: The FuncGraph that captures this tensor.\ntensor: Tensor. May be from this FuncGraph or a different graph.\nname: Optional name if a placeholder is created.\n\nReturns:\nTensor from this FuncGraph.\n\nRaises:\nInaccessibleTensorError: if any tensors are accessed in a manner that\nbypasses the mechanisms required for the data dependencies to be correctly\nwired.", "source": "github-repos"}
{"code": "def __init__(self, version: str = None, api_url: str = None) -> None:\n        \n\n        bel_versions = bel_specification.get_bel_versions()\n\n        \n        if not version:\n            self.version = config[\"bel\"][\"lang\"][\"default_bel_version\"]\n        else:\n            self.version = version\n\n        if self.version not in bel_versions:\n            log.warning(\n                f\"Cannot validate with invalid version: {self.version} in BEL Versions: {bel_versions}\"\n            )\n\n        if not api_url:\n            self.api_url = config[\"bel_api\"][\"servers\"][\"api_url\"]\n        else:\n            self.api_url = api_url\n\n        \n        \n        self.validation_messages = []\n\n        \n        self.spec = bel_specification.get_specification(self.version)\n\n        \n\n        \n        \n        try:\n            parser_fn = self.spec[\"admin\"][\"parser_fn\"]\n\n            parser_name = os.path.basename(parser_fn).replace(\".py\", \"\")\n            module_spec = importlib.util.spec_from_file_location(parser_name, parser_fn)\n            imported_parser = importlib.util.module_from_spec(module_spec)\n            module_spec.loader.exec_module(imported_parser)\n            self.parser = imported_parser.BELParser()\n        except Exception as e:\n            \n            raise bel_ex.NoParserFound(f\"Version: {self.version} Msg: {e}\")", "docstring": "Initialize BEL object used for validating/processing/etc BEL statements\n\nArgs:\nversion (str): BEL Version, defaults to config['bel']['lang']['default_bel_version']\napi_url (str): BEL API endpoint,  defaults to config['bel_api']['servers']['api_url']", "source": "juraj-google-style"}
{"code": "def CompileReport(self, mediator):\n    \n    \n    \n    path_specs_per_labels_counter = collections.Counter()\n    tags = []\n    while self._ContinueReportCompilation():\n      try:\n        self._LogProgressUpdateIfReasonable()\n        hash_analysis = self.hash_analysis_queue.get(\n            timeout=self._analysis_queue_timeout)\n      except Queue.Empty:\n        \n        \n        continue\n      pathspecs, labels, new_tags = self._HandleHashAnalysis(\n          hash_analysis)\n\n      tags.extend(new_tags)\n      for label in labels:\n        path_specs_per_labels_counter[label] += len(pathspecs)\n\n    self._analyzer.SignalAbort()\n\n    lines_of_text = ['{0:s} hash tagging results'.format(self.NAME)]\n    for label, count in sorted(path_specs_per_labels_counter.items()):\n      line_of_text = (\n          '{0:d} path specifications tagged with label: {1:s}'.format(\n              count, label))\n      lines_of_text.append(line_of_text)\n    lines_of_text.append('')\n    report_text = '\\n'.join(lines_of_text)\n\n    for event_tag in tags:\n      mediator.ProduceEventTag(event_tag)\n\n    return reports.AnalysisReport(\n        plugin_name=self.NAME, text=report_text)", "docstring": "Compiles an analysis report.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between\nanalysis plugins and other components, such as storage and dfvfs.\n\nReturns:\nAnalysisReport: report.", "source": "juraj-google-style"}
{"code": "def watch(static_root, watch_paths=None, on_reload=None, host='localhost', port=5555, server_base_path='/', watcher_interval=1.0, recursive=True, open_browser=True, open_browser_delay=1.0):\n    server = httpwatcher.HttpWatcherServer(static_root, watch_paths=watch_paths, on_reload=on_reload, host=host, port=port, server_base_path=server_base_path, watcher_interval=watcher_interval, recursive=recursive, open_browser=open_browser, open_browser_delay=open_browser_delay)\n    server.listen()\n    try:\n        tornado.ioloop.IOLoop.current().start()\n    except KeyboardInterrupt:\n        server.shutdown()", "docstring": "Initialises an HttpWatcherServer to watch the given path for changes. Watches until the IO loop\nis terminated, or a keyboard interrupt is intercepted.\n\nArgs:\nstatic_root: The path whose contents are to be served and watched.\nwatch_paths: The paths to be watched for changes. If not supplied, this defaults to the static root.\non_reload: An optional callback to pass to the watcher server that will be executed just before the\nserver triggers a reload in connected clients.\nhost: The host to which to bind our server.\nport: The port to which to bind our server.\nserver_base_path: If the content is to be served from a non-standard base path, specify it here.\nwatcher_interval: The maximum refresh rate of the watcher server.\nrecursive: Whether to monitor the watch path recursively.\nopen_browser: Whether or not to automatically attempt to open the user's browser at the root URL of\nthe project (default: True).\nopen_browser_delay: The number of seconds to wait before attempting to open the user's browser.", "source": "codesearchnet"}
{"code": "def most_visited_pages_stats():\n    \n    stats = {'more_than_10': [], 'less_than_10': {}}\n\n    counter = Counter(list(RequestLog.objects.values_list('url', flat=True)))\n    most_visited_pages = counter.most_common()\n    bounds = (10000, 1000, 100, 10)\n    subsets = [[] for _ in bounds]\n\n    for u, c in most_visited_pages:\n        if url_is_ignored(u):\n            continue\n        if c >= bounds[0]:\n            subsets[0].append([u, c])\n        elif c < bounds[-1]:\n            subsets[-1].append([u, c])\n        else:\n            for i, bound in enumerate(bounds[:-1]):\n                if bound > c >= bounds[i+1]:\n                    subsets[i+1].append([u, c])\n                    break\n\n    stats['more_than_10'] = [\n        {'bound': bound, 'subset': subset}\n        for bound, subset in zip(bounds[:-1], subsets[:-1])]\n\n    for subset in subsets[:-1]:\n        for uc in subset:\n            if url_is_project(uc[0]):\n                if url_is_asset(uc[0]):\n                    uc.append(ASSET)\n                else:\n                    uc.append(PROJECT)\n            else:\n                if url_is_asset(uc[0]):\n                    uc.append(OLD_ASSET)\n                elif url_is_common_asset(uc[0]):\n                    uc.append(COMMON_ASSET)\n                elif url_is_old_project(uc[0]):\n                    uc.append(OLD_PROJECT)\n                elif url_is_false_negative(uc[0]):\n                    uc.append(FALSE_NEGATIVE)\n                else:\n                    uc.append(SUSPICIOUS)\n\n    occurrences = {name: {'distinct': 0, 'total': 0}\n                   for name in set(URL_TYPE.keys()) - {IGNORED}}\n\n    for u, c in subsets[-1]:\n        if url_is_project(u):\n            if url_is_asset(u):\n                occurrences[ASSET]['distinct'] += 1\n                occurrences[ASSET]['total'] += c\n            else:\n                occurrences[PROJECT]['distinct'] += 1\n                occurrences[PROJECT]['total'] += c\n        else:\n            if url_is_asset(u):\n                occurrences[OLD_ASSET]['distinct'] += 1\n                occurrences[OLD_ASSET]['total'] += c\n            elif url_is_common_asset(u):\n                occurrences[COMMON_ASSET]['distinct'] += 1\n                occurrences[COMMON_ASSET]['total'] += c\n            elif url_is_old_project(u):\n                occurrences[OLD_PROJECT]['distinct'] += 1\n                occurrences[OLD_PROJECT]['total'] += c\n            elif url_is_false_negative(u):\n                occurrences[FALSE_NEGATIVE]['distinct'] += 1\n                occurrences[FALSE_NEGATIVE]['total'] += c\n            else:\n                occurrences[SUSPICIOUS]['distinct'] += 1\n                occurrences[SUSPICIOUS]['total'] += c\n\n    stats['less_than_10'] = occurrences\n\n    return stats", "docstring": "Get stats for most visited pages.\n\nArgs:\nlogs (list): logs data to use.\n\nReturns:\ndict: more_than_10 and less_than_10: list of dict (bound + url list).", "source": "juraj-google-style"}
{"code": "def save_json(dictionary, path, pretty=False, sortkeys=False):\n    \n    \n    with open(path, 'w') as f:\n        if pretty:\n            indent = 2\n            separators = (',', ': ')\n        else:\n            indent = None\n            separators = (', ', ': ')\n        json.dump(dictionary, f, indent=indent, sort_keys=sortkeys, separators=separators)", "docstring": "Save dictionary to JSON file preserving order if it is an OrderedDict\n\nArgs:\ndictionary (Dict): Python dictionary to save\npath (str): Path to JSON file\npretty (bool): Whether to pretty print. Defaults to False.\nsortkeys (bool): Whether to sort dictionary keys. Defaults to False.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def set_of_vars(lovs):\n    return set((var for pvars in lovs for svars in pvars for var in svars))", "docstring": "Build set of variables from list.\n\nArgs:\nlovs: nested lists of variables such as the one produced by\n:func:`list_of_vars`.\nReturns:\nset of str: flattened set of all the variables present in the\nnested lists.", "source": "codesearchnet"}
{"code": "def __init__(self, daily_req_limit=None, dup_interval=None):\n    \n    self.daily_req_limit = daily_req_limit\n    self.dup_interval = dup_interval", "docstring": "Create flow throttler object.\n\nArgs:\ndaily_req_limit: Number of flows allow per user per client. Integer.\ndup_interval: rdfvalue.Duration time during which duplicate flows will be\nblocked.", "source": "juraj-google-style"}
{"code": "def __init__(self, exprs):\n    self.exprs = exprs", "docstring": "Initialize a conjunction.\n\nArgs:\nexprs: A set. The subterms.", "source": "github-repos"}
{"code": "def build(self):\n    if self.colour:\n        embed = discord.Embed(title=self.title, type='rich', description=self.description, colour=self.colour)\n    else:\n        embed = discord.Embed(title=self.title, type='rich', description=self.description)\n    if self.thumbnail:\n        embed.set_thumbnail(url=self.thumbnail)\n    if self.image:\n        embed.set_image(url=self.image)\n    embed.set_author(name='Modis', url='https:\n    for pack in self.datapacks:\n        embed.add_field(name=pack[0], value=pack[1], inline=pack[2])\n    return embed", "docstring": "Builds Discord embed GUI\n\nReturns:\ndiscord.Embed: Built GUI", "source": "codesearchnet"}
{"code": "def ensure_dir_path(self, path, relative=False):\n    if (not relative):\n        rel_path = self.relpath(path)\n    else:\n        rel_path = path\n    if self.is_locator(rel_path, relative=True):\n        path = path.rstrip('/')\n    elif rel_path:\n        path = (path.rstrip('/') + '/')\n    return path", "docstring": "Ensure the path is a dir path.\n\nShould end with '/' except for schemes and locators.\n\nArgs:\npath (str): Path or URL.\nrelative (bool): Path is relative to current root.\n\nReturns:\npath: dir path", "source": "codesearchnet"}
{"code": "def fields_to_string(fields, values={}):\n    items = [repr(field['name']) + ':' + repr(values.get(field['name'], field.get('default', ''))) + ',' + ('  \n    return '{\\n  %s\\n}' % '\\n  '.join(items) if items else '{}'", "docstring": "Converts fields to a dictionary of parameters as a string.\n\nUsed to generate input blocks in generated code.\n\nFor example:\n{\n'auth':'user', # authentication to use for call.\n'name':'value', # parameter to pass to function.\n}\n\nArgs:\n- fields: (list) Contains {\"field\":{...}} objects to be rendered as imputs.\n- values: (dict) Default values to use for each field.\n\nReturns:\nString representing python code to be written to a generated file.", "source": "github-repos"}
{"code": "def impulse_noise(x, severity=1):\n  \n  c = [.03, .06, .09, 0.17, 0.27][severity - 1]\n  x = tfds.core.lazy_imports.skimage.util.random_noise(\n      np.array(x) / 255., mode='s&p', amount=c)\n  x_clip = np.clip(x, 0, 1) * 255\n  return around_and_astype(x_clip)", "docstring": "Impulse noise corruption to images.\n\nArgs:\nx: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\nseverity: integer, severity of corruption.\n\nReturns:\nnumpy array, image with uint8 pixels in [0,255]. Added impulse noise.", "source": "juraj-google-style"}
{"code": "def repr(self, changed_widgets=None):\n        \n        if changed_widgets is None:\n            changed_widgets = {}\n        local_changed_widgets = {}\n        _innerHTML = self.innerHTML(local_changed_widgets)\n\n        if self._ischanged() or ( len(local_changed_widgets) > 0 ):\n            self._backup_repr = ''.join(('<', self.type, ' ', self._repr_attributes, '>', \n                                        _innerHTML, '</', self.type, '>'))\n            \n            \n        if self._ischanged():\n            \n            \n            changed_widgets[self] = self._backup_repr\n            self._set_updated()\n        else:\n            changed_widgets.update(local_changed_widgets)\n        return self._backup_repr", "docstring": "It is used to automatically represent the object to HTML format\npacks all the attributes, children and so on.\n\nArgs:\nchanged_widgets (dict): A dictionary containing a collection of tags that have to be updated.\nThe tag that have to be updated is the key, and the value is its textual repr.", "source": "juraj-google-style"}
{"code": "def diff_bisectSplit(self, text1, text2, x, y, deadline):\n    text1a = text1[:x]\n    text2a = text2[:y]\n    text1b = text1[x:]\n    text2b = text2[y:]\n    diffs = self.diff_main(text1a, text2a, False, deadline)\n    diffsb = self.diff_main(text1b, text2b, False, deadline)\n    return (diffs + diffsb)", "docstring": "Given the location of the 'middle snake', split the diff in two parts\nand recurse.\n\nArgs:\ntext1: Old string to be diffed.\ntext2: New string to be diffed.\nx: Index of split point in text1.\ny: Index of split point in text2.\ndeadline: Time at which to bail if not yet complete.\n\nReturns:\nArray of diff tuples.", "source": "codesearchnet"}
{"code": "def modify_model_backprop(model, backprop_modifier):\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n\n    \n    modified_model = _MODIFIED_MODEL_CACHE.get((model, backprop_modifier))\n    if modified_model is not None:\n        return modified_model\n\n    model_path = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + '.h5')\n    try:\n        \n        model.save(model_path)\n\n        \n        modifier_fn = _BACKPROP_MODIFIERS.get(backprop_modifier)\n        if modifier_fn is None:\n            raise ValueError(\"'{}' modifier is not supported\".format(backprop_modifier))\n        modifier_fn(backprop_modifier)\n\n        \n        with tf.get_default_graph().gradient_override_map({'Relu': backprop_modifier}):\n            \n            modified_model = load_model(model_path)\n\n            \n            _MODIFIED_MODEL_CACHE[(model, backprop_modifier)] = modified_model\n            return modified_model\n    finally:\n        os.remove(model_path)", "docstring": "Creates a copy of model by modifying all activations to use a custom op to modify the backprop behavior.\n\nArgs:\nmodel:  The `keras.models.Model` instance.\nbackprop_modifier: One of `{'guided', 'rectified'}`\n\nReturns:\nA copy of model with modified activations for backwards pass.", "source": "juraj-google-style"}
{"code": "def get_facets(qhull_data, joggle=False):\n    if joggle:\n        return ConvexHull(qhull_data, qhull_options='QJ i').simplices\n    else:\n        return ConvexHull(qhull_data, qhull_options='Qt i').simplices", "docstring": "Get the simplex facets for the Convex hull.\n\nArgs:\nqhull_data (np.ndarray): The data from which to construct the convex\nhull as a Nxd array (N being number of data points and d being the\ndimension)\njoggle (boolean): Whether to joggle the input to avoid precision\nerrors.\n\nReturns:\nList of simplices of the Convex Hull.", "source": "codesearchnet"}
{"code": "def pull_reply(self, param=None, must=[APIKEY]):\n        \n        param = {} if param is None else param\n        r = self.verify_param(param, must)\n        if not r.is_succ():\n            return r\n        h = CommonResultHandler(lambda rsp: {VERSION_V1:rsp[SMS_REPLY] if SMS_REPLY in rsp else None, VERSION_V2:rsp}[self.version()])\n        return self.path('pull_reply.json').post(param, h, r)", "docstring": "获取回复短信\n\n参数名 类型 是否必须 描述 示例\napikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526\npage_size Integer 否 每页个数，最大100个，默认20个 20\n\nArgs:\nparam:\nResults:\nResult", "source": "juraj-google-style"}
{"code": "def survey_basis(self, keys=None, alias=None, step=None):\n    if (keys is None):\n        keys = [k for (k, v) in self.data.items() if isinstance(v, Curve)]\n    else:\n        keys = utils.flatten_list(keys)\n    (starts, stops, steps) = ([], [], [])\n    for k in keys:\n        d = self.get_curve(k, alias=alias)\n        if (keys and (d is None)):\n            continue\n        try:\n            starts.append(d.basis[0])\n            stops.append(d.basis[(- 1)])\n            steps.append((d.basis[1] - d.basis[0]))\n        except Exception as e:\n            pass\n    if (starts and stops and steps):\n        step = (step or min(steps))\n        return np.arange(min(starts), (max(stops) + 1e-09), step)\n    else:\n        return None", "docstring": "Look at the basis of all the curves in ``well.data`` and return a\nbasis with the minimum start, maximum depth, and minimum step.\n\nArgs:\nkeys (list): List of strings: the keys of the data items to\nsurvey, if not all of them.\nalias (dict): a dictionary mapping mnemonics to lists of mnemonics.\nstep (float): a new step, if you want to change it.\n\nReturns:\nndarray. The most complete common basis.", "source": "codesearchnet"}
{"code": "def get_url_preview(self, url, ts=None):\n        \n        params = {'url': url}\n        if ts:\n            params['ts'] = ts\n        return self._send(\n            \"GET\", \"\",\n            query_params=params,\n            api_path=\"/_matrix/media/r0/preview_url\"\n        )", "docstring": "Get preview for URL.\n\nArgs:\nurl (str): URL to get a preview\nts (double): The preferred point in time to return\na preview for. The server may return a newer\nversion if it does not have the requested\nversion available.", "source": "juraj-google-style"}
{"code": "def __write_to_fil_heavy(self, filename_out, *args, **kwargs):\n        \n\n        \n        chunk_dim = self.__get_chunk_dimensions()\n        blob_dim = self.__get_blob_dimensions(chunk_dim)\n        n_blobs = self.container.calc_n_blobs(blob_dim)\n\n        \n        n_bytes  = self.header[b'nbits'] / 8\n        with open(filename_out, \"wb\") as fileh:\n            fileh.write(generate_sigproc_header(self)) \n\n        logger.info('Using %i n_blobs to write the data.'% n_blobs)\n        for ii in range(0, n_blobs):\n            logger.info('Reading %i of %i' % (ii + 1, n_blobs))\n\n            bob = self.container.read_blob(blob_dim,n_blob=ii)\n\n            \n            with open(filename_out, \"a\") as fileh:\n                j = bob\n                if n_bytes == 4:\n                    np.float32(j.ravel()).tofile(fileh)\n                elif n_bytes == 2:\n                    np.int16(j.ravel()).tofile(fileh)\n                elif n_bytes == 1:\n                    np.int8(j.ravel()).tofile(fileh)", "docstring": "Write data to .fil file.\n\nArgs:\nfilename_out (str): Name of output file", "source": "juraj-google-style"}
{"code": "def traverse_preorder(self, leaves=True, internal=True):\n    for node in self.root.traverse_preorder(leaves=leaves, internal=internal):\n        (yield node)", "docstring": "Perform a preorder traversal of the ``Node`` objects in this ``Tree``\n\nArgs:\n``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``", "source": "codesearchnet"}
{"code": "def leap_days_between(start_date, end_date):\n\n    def leap_days_since_year_0(date_tensor):\n        year = date_tensor.year()\n        month = date_tensor.month()\n        leap_years_since_0 = year \n        needs_adjustment = is_leap_year(year) & (month <= 2)\n        return leap_years_since_0 - tf.where(needs_adjustment, 1, 0)\n    return leap_days_since_year_0(end_date) - leap_days_since_year_0(start_date)", "docstring": "Calculates number of leap days (29 Feb) between two dates.\n\n'start_date' is included and 'end_date' is excluded from the period.\n\nFor example, for dates `2019-12-24` and `2024-3-10` the result is\n2: there is 29 Feb 2020 and 29 Feb 2024 between 24 Dec 2019 (inclusive) and\n10 Mar 2024 (exclusive).\n\nIf `end_date` is earlier than `start_date`, the result will be negative or\nzero.\n\nArgs:\nstart_date: DateTensor.\nend_date: DateTensor compatible with `start_date`.\n\nReturns:\nTensor of type 'int32'.", "source": "github-repos"}
{"code": "def _QueryHash(self, nsrl_socket, digest):\n    try:\n        query = 'QUERY {0:s}\\n'.format(digest).encode('ascii')\n    except UnicodeDecodeError:\n        logger.error('Unable to encode digest: {0!s} to ASCII.'.format(digest))\n        return False\n    response = None\n    try:\n        nsrl_socket.sendall(query)\n        response = nsrl_socket.recv(self._RECEIVE_BUFFER_SIZE)\n    except socket.error as exception:\n        logger.error('Unable to query nsrlsvr with error: {0!s}.'.format(exception))\n    if (not response):\n        return False\n    response = response.strip()\n    return (response == b'OK 1')", "docstring": "Queries nsrlsvr for a specific hash.\n\nArgs:\nnsrl_socket (socket._socketobject): socket of connection to nsrlsvr.\ndigest (str): hash to look up.\n\nReturns:\nbool: True if the hash was found, False if not or None on error.", "source": "codesearchnet"}
{"code": "def _create_or_restore_slot_variable(self, slot_variable_position, slot_name, variable):\n    variable_key = _var_key(variable)\n    slot_dict = self._slots.get(variable_key, {})\n    slot_variable = slot_dict.get(slot_name, None)\n    if slot_variable is None and context.executing_eagerly() and slot_variable_position.is_simple_variable() and (not ops.get_default_graph()._variable_creator_stack or self._distribution_strategy):\n        initializer = trackable.CheckpointInitialValueCallable(checkpoint_position=slot_variable_position)\n        slot_variable = self.add_slot(var=variable, initializer=initializer, slot_name=slot_name, shape=slot_variable_position.value_shape())\n    if slot_variable is not None:\n        slot_variable_position.restore(slot_variable)\n    else:\n        self._deferred_slot_restorations.setdefault(slot_name, {}).setdefault(variable_key, []).append(slot_variable_position)", "docstring": "Restore a slot variable's value, possibly creating it.\n\nCalled when a variable which has an associated slot variable is created or\nrestored. When executing eagerly, we create the slot variable with a\nrestoring initializer.\n\nNo new variables are created when graph building. Instead,\n_restore_slot_variable catches these after normal creation and adds restore\nops to the graph. This method is nonetheless important when graph building\nfor the case when a slot variable has already been created but `variable`\nhas just been added to a dependency graph (causing us to realize that the\nslot variable needs to be restored).\n\nArgs:\nslot_variable_position: A `trackable._CheckpointPosition` object\nindicating the slot variable `Trackable` object to be restored.\nslot_name: The name of this `Optimizer`'s slot to restore into.\nvariable: The variable object this slot is being created for.", "source": "github-repos"}
{"code": "def MapByteStream(self, byte_stream, byte_offset=0, **unused_kwargs):\n    \n    return byte_stream[byte_offset:byte_offset + self.byte_size]", "docstring": "Maps the data type on a byte stream.\n\nArgs:\nbyte_stream (bytes): byte stream.\nbyte_offset (Optional[int]): offset into the byte stream where to start.\n\nReturns:\nobject: mapped value.\n\nRaises:\nMappingError: if the data type definition cannot be mapped on\nthe byte stream.", "source": "juraj-google-style"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor]=None):\n    pixel_values = pixel_values.type(self.visual.dtype)\n    image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)\n    return image_embeds", "docstring": "Encodes images into continuous embeddings that can be forwarded to the language model.\n\nArgs:\npixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\nThe tensors corresponding to the input images.\nimage_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):\nThe temporal, height and width of feature shape of each image in LLM.", "source": "github-repos"}
{"code": "def nic_s(msg):\n    tc = typecode(msg)\n    if (tc != 31):\n        raise RuntimeError(('%s: Not a status operation message, expecting TC = 31' % msg))\n    msgbin = common.hex2bin(msg)\n    nic_s = int(msgbin[75])\n    return nic_s", "docstring": "Obtain NIC supplement bit, TC=31 message\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string\n\nReturns:\nint: NICs number (0 or 1)", "source": "codesearchnet"}
{"code": "def get_identity_broadcaster(cls, nvals, dtype=None):\n    return _GatherLayerBroadcaster(math_ops.range(nvals, dtype=dtype))", "docstring": "Create an identity broadcaster.\n\nTODO(martinz): an identity broadcaster can be far more efficient than a\ngeneric broadcaster. Add an optimized implementation.\nArgs:\nnvals: the number of values for the broadcaster.\ndtype: the dtype of the broadcaster, or None to use the dtype of nvals.\n\nReturns:\nan identity broadcaster from [0....nvals-1] to [0...nvals-1]", "source": "github-repos"}
{"code": "def get_length(alt_len, ref_len, category, pos, end, svtype=None, svlen=None):\n    length = (- 1)\n    if (category in ('snv', 'indel', 'cancer')):\n        if (ref_len == alt_len):\n            length = alt_len\n        else:\n            length = abs((ref_len - alt_len))\n    elif (category == 'sv'):\n        if (svtype == 'bnd'):\n            length = int(100000000000.0)\n        elif svlen:\n            length = abs(int(svlen))\n        elif end:\n            if (end != pos):\n                length = (end - pos)\n    return length", "docstring": "Return the length of a variant\n\nArgs:\nalt_len(int)\nref_len(int)\ncategory(str)\nsvtype(str)\nsvlen(int)", "source": "codesearchnet"}
{"code": "def load(self, path):\n    path = os.path.expandvars(os.path.expanduser(path))\n    gdg = cgaddag.gdg_load(path.encode('ascii'))\n    if (not gdg):\n        errno = ctypes.c_int.in_dll(ctypes.pythonapi, 'errno').value\n        raise OSError(errno, os.strerror(errno), path)\n    self.__del__()\n    self.gdg = gdg.contents", "docstring": "Load a GADDAG from file, replacing the words currently in this GADDAG.\n\nArgs:\npath: path to saved GADDAG to be loaded.", "source": "codesearchnet"}
{"code": "def _ValidateCacheEntryHeader(self, cache_entry_header):\n    \n    return (\n        cache_entry_header.request_size > 0 and\n        cache_entry_header.request_size < self._MAXIMUM_URL_LENGTH and\n        cache_entry_header.major_format_version == 1 and\n        cache_entry_header.last_fetched_time > 0 and\n        cache_entry_header.fetch_count > 0)", "docstring": "Determines whether the values in the cache entry header are valid.\n\nArgs:\ncache_entry_header (firefox_cache1_entry_header): cache entry header.\n\nReturns:\nbool: True if the cache entry header is valid.", "source": "juraj-google-style"}
{"code": "def get_canonical_path(resource_key, pk=None):\n        \n\n        if resource_key not in resource_map:\n            \n            return None\n\n        base_path = get_script_prefix() + resource_map[resource_key]['path']\n        if pk:\n            return '%s/%s/' % (base_path, pk)\n        else:\n            return base_path", "docstring": "Return canonical resource path.\n\nArguments:\nresource_key - Canonical resource key\ni.e. Serializer.get_resource_key().\npk - (Optional) Object's primary key for a single-resource URL.\nReturns: Absolute URL as string.", "source": "juraj-google-style"}
{"code": "def RemoveObject(self, identifier):\n    if (identifier not in self._values):\n        raise KeyError('Missing cached object for identifier: {0:s}'.format(identifier))\n    del self._values[identifier]", "docstring": "Removes a cached object based on the identifier.\n\nThis method ignores the cache value reference count.\n\nArgs:\nidentifier (str): VFS object identifier.\n\nRaises:\nKeyError: if the VFS object is not found in the cache.", "source": "codesearchnet"}
{"code": "def register_actor(name, actor_handle):\n    if (not isinstance(name, str)):\n        raise TypeError('The name argument must be a string.')\n    if (not isinstance(actor_handle, ray.actor.ActorHandle)):\n        raise TypeError('The actor_handle argument must be an ActorHandle object.')\n    actor_name = _calculate_key(name)\n    pickled_state = pickle.dumps(actor_handle)\n    already_exists = _internal_kv_put(actor_name, pickled_state)\n    if already_exists:\n        actor_handle._ray_new_actor_handles.pop()\n        raise ValueError('Error: the actor with name={} already exists'.format(name))", "docstring": "Register a named actor under a string key.\n\nArgs:\nname: The name of the named actor.\nactor_handle: The actor object to be associated with this name", "source": "codesearchnet"}
{"code": "def label(self, input_grid):\n    marked = self.find_local_maxima(input_grid)\n    marked = np.where((marked >= 0), 1, 0)\n    markers = splabel(marked)[0]\n    return markers", "docstring": "Labels input grid using enhanced watershed algorithm.\n\nArgs:\ninput_grid (numpy.ndarray): Grid to be labeled.\n\nReturns:\nArray of labeled pixels", "source": "codesearchnet"}
{"code": "def read_geojson(filename):\n    \n    json_file = open(filename)\n    data = json.load(json_file)\n    json_file.close()\n    times = data[\"properties\"][\"times\"]\n    main_data = dict(timesteps=[], masks=[], x=[], y=[], i=[], j=[])\n    attribute_data = dict()\n    for feature in data[\"features\"]:\n        for main_name in main_data.keys():\n            main_data[main_name].append(np.array(feature[\"properties\"][main_name]))\n        for k, v in feature[\"properties\"][\"attributes\"].items():\n            if k not in attribute_data.keys():\n                attribute_data[k] = [np.array(v)]\n            else:\n                attribute_data[k].append(np.array(v))\n    kwargs = {}\n    for kw in [\"dx\", \"step\", \"u\", \"v\"]:\n        if kw in data[\"properties\"].keys():\n            kwargs[kw] = data[\"properties\"][kw]\n    sto = STObject(main_data[\"timesteps\"], main_data[\"masks\"], main_data[\"x\"], main_data[\"y\"],\n                   main_data[\"i\"], main_data[\"j\"], times[0], times[-1], **kwargs)\n    for k, v in attribute_data.items():\n        sto.attributes[k] = v\n    return sto", "docstring": "Reads a geojson file containing an STObject and initializes a new STObject from the information in the file.\n\nArgs:\nfilename: Name of the geojson file\n\nReturns:\nan STObject", "source": "juraj-google-style"}
{"code": "def minute(self, value=None):\n    if (value is not None):\n        try:\n            value = int(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type int for field `minute`'.format(value))\n        if (value < 0):\n            raise ValueError('value need to be greater or equal 0 for field `minute`')\n        if (value > 60):\n            raise ValueError('value need to be smaller 60 for field `minute`')\n    self._minute = value", "docstring": "Corresponds to IDD Field `minute`\n\nArgs:\nvalue (int): value for IDD Field `minute`\nvalue >= 0\nvalue <= 60\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def AddProcessingOptions(self, argument_group):\n    \n    argument_helper_names = ['temporary_directory', 'zeromq']\n    if self._CanEnforceProcessMemoryLimit():\n      argument_helper_names.append('process_resources')\n    helpers_manager.ArgumentHelperManager.AddCommandLineArguments(\n        argument_group, names=argument_helper_names)\n\n    argument_group.add_argument(\n        '--worker-memory-limit', '--worker_memory_limit',\n        dest='worker_memory_limit', action='store', type=int,\n        metavar='SIZE', help=(\n            'Maximum amount of memory (data segment and shared memory) '\n            'a worker process is allowed to consume in bytes, where 0 '\n            'represents no limit. The default limit is 2147483648 (2 GiB). '\n            'If a worker process exceeds this limit is is killed by the main '\n            '(foreman) process.'))", "docstring": "Adds processing options to the argument group\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "juraj-google-style"}
{"code": "def load_config(paths=DEFAULT_CONFIG_PATHS):\n    config = Config()\n    for path in paths:\n        if os.path.isfile(path):\n            config.load_pyfile(path)\n    return config", "docstring": "Attempt to load config from paths, in order.\n\nArgs:\npaths (List[string]): list of paths to python files\n\nReturn:\nConfig: loaded config", "source": "codesearchnet"}
{"code": "def dummy_inputs(self):\n    batch_size = 2\n    num_visual_features = 10\n    input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32)\n    visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim))\n    visual_pos = tf.random.uniform((batch_size, num_visual_features, 4))\n    return {'input_ids': input_ids, 'visual_feats': visual_feats, 'visual_pos': visual_pos}", "docstring": "Dummy inputs to build the network.\n\nReturns:\ntf.Tensor with dummy inputs", "source": "github-repos"}
{"code": "def configure_profile(msg_type, profile_name, data, auth):\n    \n    with jsonconfig.Config(\"messages\", indent=4) as cfg:\n        write_data(msg_type, profile_name, data, cfg)\n        write_auth(msg_type, profile_name, auth, cfg)\n\n    print(\"[+] Configuration entry for <\" + profile_name + \"> created.\")\n    print(\"[+] Configuration file location: \" + cfg.filename)", "docstring": "Create the profile entry.\n\nArgs:\n:msg_type: (str) message type to create config entry.\n:profile_name: (str) name of the profile entry\n:data: (dict) dict values for the 'settings'\n:auth: (dict) auth parameters", "source": "juraj-google-style"}
{"code": "def set_api_url(self, api_url=\"https:\n        \n        old_api_url = self._api_url\n        old_lang = self._lang\n        self._lang = lang.lower()\n        self._api_url = api_url.format(lang=self._lang)\n        try:\n            self._get_site_info()\n            self.__supported_languages = None  \n        except MediaWikiException:\n            \n            self._api_url = old_api_url\n            self._lang = old_lang\n            raise MediaWikiAPIURLError(api_url)\n        self.clear_memoized()", "docstring": "Set the API URL and language\n\nArgs:\napi_url (str): API URL to use\nlang (str): Language of the API URL\nRaises:\n:py:func:`mediawiki.exceptions.MediaWikiAPIURLError`: if the \\\nurl is not a valid MediaWiki site", "source": "juraj-google-style"}
{"code": "def __init__(self, output_mediator):\n    \n    super(JSONOutputModule, self).__init__(output_mediator)\n    self._event_counter = 0", "docstring": "Initializes the output module object.\n\nArgs:\noutput_mediator (OutputMediator): mediates interactions between output\nmodules and other components, such as storage and dfvfs.", "source": "juraj-google-style"}
{"code": "def fit(self, x_train, y_train, x_valid=None, y_valid=None, epochs=1, batch_size=32, verbose=1, callbacks=None, shuffle=True):\n    p = IndexTransformer(initial_vocab=self.initial_vocab, use_char=self.use_char)\n    p.fit(x_train, y_train)\n    embeddings = filter_embeddings(self.embeddings, p._word_vocab.vocab, self.word_embedding_dim)\n    model = BiLSTMCRF(char_vocab_size=p.char_vocab_size, word_vocab_size=p.word_vocab_size, num_labels=p.label_size, word_embedding_dim=self.word_embedding_dim, char_embedding_dim=self.char_embedding_dim, word_lstm_size=self.word_lstm_size, char_lstm_size=self.char_lstm_size, fc_dim=self.fc_dim, dropout=self.dropout, embeddings=embeddings, use_char=self.use_char, use_crf=self.use_crf)\n    (model, loss) = model.build()\n    model.compile(loss=loss, optimizer=self.optimizer)\n    trainer = Trainer(model, preprocessor=p)\n    trainer.train(x_train, y_train, x_valid, y_valid, epochs=epochs, batch_size=batch_size, verbose=verbose, callbacks=callbacks, shuffle=shuffle)\n    self.p = p\n    self.model = model", "docstring": "Fit the model for a fixed number of epochs.\n\nArgs:\nx_train: list of training data.\ny_train: list of training target (label) data.\nx_valid: list of validation data.\ny_valid: list of validation target (label) data.\nbatch_size: Integer.\nNumber of samples per gradient update.\nIf unspecified, `batch_size` will default to 32.\nepochs: Integer. Number of epochs to train the model.\nverbose: Integer. 0, 1, or 2. Verbosity mode.\n0 = silent, 1 = progress bar, 2 = one line per epoch.\ncallbacks: List of `keras.callbacks.Callback` instances.\nList of callbacks to apply during training.\nshuffle: Boolean (whether to shuffle the training data\nbefore each epoch). `shuffle` will default to True.", "source": "codesearchnet"}
{"code": "def add(self, other):\n        \n        if not isinstance(other, Chi):\n            other = Chi(other)\n        if self.dim != other.dim:\n            raise QiskitError(\"other QuantumChannel dimensions are not equal\")\n        return Chi(self._data + other.data, self._input_dims,\n                   self._output_dims)", "docstring": "Return the QuantumChannel self + other.\n\nArgs:\nother (QuantumChannel): a quantum channel.\n\nReturns:\nChi: the linear addition self + other as a Chi object.\n\nRaises:\nQiskitError: if other is not a QuantumChannel subclass, or\nhas incompatible dimensions.", "source": "juraj-google-style"}
{"code": "def upload(self, params={}):\n    if (self.upload_token is not None):\n        status = self.check()\n        if (status['status'] != 4):\n            return self.commit()\n        else:\n            self.new_slice()\n            while (self.slice_task_id != 0):\n                self.upload_slice()\n            return self.commit()\n    else:\n        self.create(self.prepare_video_params(**params))\n        self.create_file()\n        self.new_slice()\n        while (self.slice_task_id != 0):\n            self.upload_slice()\n        return self.commit()", "docstring": "start uploading the file until upload is complete or error.\nThis is the main method to used, If you do not care about\nstate of process.\n\nArgs:\nparams: a dict object describe video info, eg title,\ntags, description, category.\nall video params see the doc of prepare_video_params.\n\nReturns:\nreturn video_id if upload successfully", "source": "codesearchnet"}
{"code": "def import_object_from_path(path, object):\n    with open(path) as f:\n        return import_object_from_string_code(f.read(), object)", "docstring": "Used to import an object from an absolute path.\n\nThis function takes an absolute path and imports it as a Python module.\nIt then returns the object with name `object` from the imported module.\n\nArgs:\npath (string): Absolute file path of .py file to import\n\nobject (string): Name of object to extract from imported module", "source": "codesearchnet"}
{"code": "def _protobuf_value_type(value):\n    if value.HasField('number_value'):\n        return api_pb2.DATA_TYPE_FLOAT64\n    if value.HasField('string_value'):\n        return api_pb2.DATA_TYPE_STRING\n    if value.HasField('bool_value'):\n        return api_pb2.DATA_TYPE_BOOL\n    return None", "docstring": "Returns the type of the google.protobuf.Value message as an api.DataType.\n\nReturns None if the type of 'value' is not one of the types supported in\napi_pb2.DataType.\n\nArgs:\nvalue: google.protobuf.Value message.", "source": "codesearchnet"}
{"code": "def log_every_n_seconds(level, msg, n_seconds, *args):\n    should_log = _seconds_have_elapsed(get_absl_logger().findCaller(), n_seconds)\n    log_if(level, msg, should_log, *args)", "docstring": "Logs 'msg % args' at level 'level' iff 'n_seconds' elapsed since last call.\n\nLogs the first call, logs subsequent calls if 'n' seconds have elapsed since\nthe last logging call from the same call site (file + line). Not thread-safe.\n\nArgs:\nlevel: int, the absl logging level at which to log.\nmsg: str, the message to be logged.\nn_seconds: float or int, seconds which should elapse before logging again.\n*args: The args to be substitued into the msg.", "source": "codesearchnet"}
{"code": "def Name(self):\n    name = ''\n    if self.Version:\n        name = self.Version.UserAgent\n    return name", "docstring": "Get the peer name.\n\nReturns:\nstr:", "source": "codesearchnet"}
{"code": "def infer_namespace(ac):\n    namespaces = infer_namespaces(ac)\n    if (not namespaces):\n        return None\n    if (len(namespaces) > 1):\n        raise BioutilsError('Multiple namespaces possible for {}'.format(ac))\n    return namespaces[0]", "docstring": "Infer the single namespace of the given accession\n\nThis function is convenience wrapper around infer_namespaces().\nReturns:\n* None if no namespaces are inferred\n* The (single) namespace if only one namespace is inferred\n* Raises an exception if more than one namespace is inferred\n\n>>> infer_namespace(\"ENST00000530893.6\")\n'ensembl'\n\n>>> infer_namespace(\"NM_01234.5\")\n'refseq'\n\n>>> infer_namespace(\"A2BC19\")\n'uniprot'\n\nN.B. The following test is disabled because Python 2 and Python 3\nhandle doctest exceptions differently. :-(\nX>>> infer_namespace(\"P12345\")\nTraceback (most recent call last):\n...\nbioutils.exceptions.BioutilsError: Multiple namespaces possible for P12345\n\n>>> infer_namespace(\"BOGUS99\") is None\nTrue", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor]=None, layer_idx: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Zamba2HybridDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, transformer_hidden_states: Optional[torch.Tensor]=None, **kwargs) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n    residual = hidden_states\n    hidden_states = hidden_states + transformer_hidden_states if transformer_hidden_states is not None else hidden_states\n    hidden_states = self.input_layernorm(hidden_states)\n    hidden_states = self.mamba(hidden_states=hidden_states, cache_params=past_key_value, attention_mask=attention_mask)\n    self_attn_weights = None\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (self_attn_weights,)\n    if use_cache:\n        outputs += (past_key_value,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`, *optional*): attention mask of size\n`(batch, sequence_length)` where padding elements are indicated by 0.\npast_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.\nuse_cache (`bool`, *optional*):\nIf set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n(see `past_key_values`).\ncache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\nIndices depicting the position of the input sequence tokens in the sequence.", "source": "github-repos"}
{"code": "def hardware_version(self):\n        \n        version = self._dll.JLINKARM_GetHardwareVersion()\n        major = version / 10000 % 100\n        minor = version / 100 % 100\n        return '%d.%02d' % (major, minor)", "docstring": "Returns the hardware version of the connected J-Link as a\nmajor.minor string.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nHardware version string.", "source": "juraj-google-style"}
{"code": "def __init__(self, directory, loader_factory, path_filter=lambda x: True):\n    \n    if directory is None:\n      raise ValueError('A directory is required')\n    if loader_factory is None:\n      raise ValueError('A loader factory is required')\n    self._directory = directory\n    self._path = None\n    self._loader_factory = loader_factory\n    self._loader = None\n    self._path_filter = path_filter\n    self._ooo_writes_detected = False\n    \n    self._finalized_sizes = {}", "docstring": "Constructs a new DirectoryWatcher.\n\nArgs:\ndirectory: The directory to load files from.\nloader_factory: A factory for creating loaders. The factory should take a\npath and return an object that has a Load method returning an\niterator that will yield all events that have not been yielded yet.\npath_filter: If specified, only paths matching this filter are loaded.\n\nRaises:\nValueError: If path_provider or loader_factory are None.", "source": "juraj-google-style"}
{"code": "def info(self, **kwargs):\n    path = self._get_series_id_season_number_episode_number_path('info')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the primary information about a TV episode by combination of a\nseason and episode number.\n\nArgs:\nlanguage: (optional) ISO 639 code.\nappend_to_response: (optional) Comma separated, any TV series\nmethod.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def forward(self, evoformer_output_dict, aatype, mask=None, _offload_inference=False):\n    s = evoformer_output_dict['single']\n    if mask is None:\n        mask = s.new_ones(s.shape[:-1])\n    s = self.layer_norm_s(s)\n    z = self.layer_norm_z(evoformer_output_dict['pair'])\n    z_reference_list = None\n    if _offload_inference:\n        assert sys.getrefcount(evoformer_output_dict['pair']) == 2\n        evoformer_output_dict['pair'] = evoformer_output_dict['pair'].cpu()\n        z_reference_list = [z]\n        z = None\n    s_initial = s\n    s = self.linear_in(s)\n    rigids = Rigid.identity(s.shape[:-1], s.dtype, s.device, self.training, fmt='quat')\n    outputs = []\n    for i in range(self.config.num_blocks):\n        s = s + self.ipa(s, z, rigids, mask, _offload_inference=_offload_inference, _z_reference_list=z_reference_list)\n        s = self.ipa_dropout(s)\n        s = self.layer_norm_ipa(s)\n        s = self.transition(s)\n        rigids = rigids.compose_q_update_vec(self.bb_update(s))\n        backb_to_global = Rigid(Rotation(rot_mats=rigids.get_rots().get_rot_mats(), quats=None), rigids.get_trans())\n        backb_to_global = backb_to_global.scale_translation(self.config.trans_scale_factor)\n        unnormalized_angles, angles = self.angle_resnet(s, s_initial)\n        all_frames_to_global = self.torsion_angles_to_frames(backb_to_global, angles, aatype)\n        pred_xyz = self.frames_and_literature_positions_to_atom14_pos(all_frames_to_global, aatype)\n        scaled_rigids = rigids.scale_translation(self.config.trans_scale_factor)\n        preds = {'frames': scaled_rigids.to_tensor_7(), 'sidechain_frames': all_frames_to_global.to_tensor_4x4(), 'unnormalized_angles': unnormalized_angles, 'angles': angles, 'positions': pred_xyz, 'states': s}\n        outputs.append(preds)\n        rigids = rigids.stop_rot_gradient()\n    del z, z_reference_list\n    if _offload_inference:\n        evoformer_output_dict['pair'] = evoformer_output_dict['pair'].to(s.device)\n    outputs = dict_multimap(torch.stack, outputs)\n    outputs['single'] = s\n    return outputs", "docstring": "Args:\nevoformer_output_dict:\nDictionary containing:\n\"single\":\n[*, N_res, C_s] single representation\n\"pair\":\n[*, N_res, N_res, C_z] pair representation\naatype:\n[*, N_res] amino acid indices\nmask:\nOptional [*, N_res] sequence mask\nReturns:\nA dictionary of outputs", "source": "github-repos"}
{"code": "def atmospheric_station_pressure(self, value=999999):\n        \n        if value is not None:\n            try:\n                value = int(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type int '\n                    'for field `atmospheric_station_pressure`'.format(value))\n            if value <= 31000:\n                raise ValueError('value need to be greater 31000 '\n                                 'for field `atmospheric_station_pressure`')\n            if value >= 120000:\n                raise ValueError('value need to be smaller 120000 '\n                                 'for field `atmospheric_station_pressure`')\n\n        self._atmospheric_station_pressure = value", "docstring": "Corresponds to IDD Field `atmospheric_station_pressure`\n\nArgs:\nvalue (int): value for IDD Field `atmospheric_station_pressure`\nUnit: Pa\nvalue > 31000\nvalue < 120000\nMissing value: 999999\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def update_ip_info(self, since_days=10, save=False, force=False):\n        \n        \n        try:\n            last_check = IPInfoCheck.objects.get(\n                ip_address=self.client_ip_address)\n\n            \n            since_last = datetime.date.today() - last_check.date\n            if since_last <= datetime.timedelta(days=since_days):\n                if not self.ip_info or (\n                        self.ip_info != last_check.ip_info and force):\n                    self.ip_info = last_check.ip_info\n                    self.save()\n                    return True\n                elif save:\n                    self.save()\n                return False\n\n            \n            ip_info, created = IPInfo.get_or_create_from_ip(\n                self.client_ip_address)\n\n            \n            last_check.date = datetime.date.today()\n            last_check.save()\n\n            \n            if created:\n                last_check.ip_info = ip_info\n                self.ip_info = ip_info\n                self.save()\n                return True\n            elif save:\n                self.save()\n\n            return False\n\n        except IPInfoCheck.DoesNotExist:\n            \n            self.ip_info = IPInfoCheck.check_ip(self.client_ip_address)\n            self.save()\n\n            return True", "docstring": "Update the IP info.\n\nArgs:\nsince_days (int): if checked less than this number of days ago,\ndon't check again (default to 10 days).\nsave (bool): whether to save anyway or not.\nforce (bool): whether to update ip_info to last checked one.\n\nReturns:\nbool: check was run. IPInfo might not have been updated.", "source": "juraj-google-style"}
{"code": "def _fix_fdef_in_place(fdef, functions, shared_name_suffix, new_gradient_op_types):\n    orig_name = fdef.signature.name\n    contains_unsaved_custom_gradients = False\n    for node_def in fdef.node_def:\n        fix_node_def(node_def, functions, shared_name_suffix)\n        op_type = _get_gradient_op_type(node_def)\n        if op_type is not None:\n            if op_type in new_gradient_op_types:\n                node_def.attr['_gradient_op_type'].s = compat.as_bytes(new_gradient_op_types[op_type])\n            else:\n                contains_unsaved_custom_gradients = True\n    if contains_unsaved_custom_gradients:\n        logging.warning('Importing a function (%s) with ops with unsaved custom gradients. Will likely fail if a gradient is requested.', fdef.signature.name)\n    fdef.signature.name = _clean_function_name(fdef.signature.name)\n    return orig_name", "docstring": "Fixes a FunctionDef proto to be loaded in current context.\n\nIn particular, when loading a function library into an eager context, one\nmust rename the functions to avoid conflicts with existent functions.\n\nArgs:\nfdef: FunctionDef proto to fix. It is mutated in-place.\nfunctions: map from function name to a ConcreteFunction instance.\nshared_name_suffix: A unique string for this load which helps to avoid\n`shared_name` collisions across loads. Two functions from the same load\nusing the same `shared_name` still need to share, but functions from\ndifferent loads with the same `shared_name` should not.\nnew_gradient_op_types: map from old gradient op type to newly generated op\ntype.\n\nReturns:\norig_name: original value of fdef.signature.name", "source": "github-repos"}
{"code": "def UsesArtifact(self, artifacts):\n    \n    \n    \n    \n    if isinstance(artifacts, string_types):\n      return artifacts in self.artifacts\n    else:\n      return any(True for artifact in artifacts if artifact in self.artifacts)", "docstring": "Determines if the check uses the specified artifact.\n\nArgs:\nartifacts: Either a single artifact name, or a list of artifact names\n\nReturns:\nTrue if the check uses a specific artifact.", "source": "juraj-google-style"}
{"code": "def _get_account_xml(soco):\n    device = (soco or discovery.any_soco())\n    log.debug('Fetching account data from %s', device)\n    settings_url = 'http:\n    result = requests.get(settings_url).content\n    log.debug('Account data: %s', result)\n    return result", "docstring": "Fetch the account data from a Sonos device.\n\nArgs:\nsoco (SoCo): a SoCo instance to query. If soco is `None`, a\nrandom device will be used.\n\nReturns:\nstr: a byte string containing the account data xml", "source": "codesearchnet"}
{"code": "def GetTimeOfDay(self):\n    normalized_timestamp = self._GetNormalizedTimestamp()\n    if (normalized_timestamp is None):\n        return (None, None, None)\n    (_, hours, minutes, seconds) = self._GetTimeValues(normalized_timestamp)\n    return (hours, minutes, seconds)", "docstring": "Retrieves the time of day represented by the date and time values.\n\nReturns:\ntuple[int, int, int]: hours, minutes, seconds or (None, None, None)\nif the date and time values do not represent a time of day.", "source": "codesearchnet"}
{"code": "def make_sgf(move_history, result_string, ruleset='Chinese', komi=7.5, white_name=PROGRAM_IDENTIFIER, black_name=PROGRAM_IDENTIFIER, comments=[]):\n    boardsize = go.N\n    game_moves = ''.join((translate_sgf_move(*z) for z in itertools.zip_longest(move_history, comments)))\n    result = result_string\n    return SGF_TEMPLATE.format(**locals())", "docstring": "Turn a game into SGF.\n\nDoesn't handle handicap games or positions with incomplete history.\n\nArgs:\nmove_history: iterable of PlayerMoves\nresult_string: \"B+R\", \"W+0.5\", etc.\ncomments: iterable of string/None. Will be zipped with move_history.", "source": "codesearchnet"}
{"code": "def _error_and_gradient(self, x):\n        \n        coords = x.reshape((self.m, self.n))\n        d = squareform(pdist(coords))\n        diff = self.D - d\n        error = self._error(diff)\n        gradient = self._gradient(diff, d, coords)\n        return error, gradient.ravel()", "docstring": "Compute the error and the gradient.\n\nThis is the function optimized by :obj:`scipy.optimize.minimize`.\n\nArgs:\nx (`array-like`): [`m` * `n`, ] matrix.\n\nReturns:\n`tuple`: containing:\n\n- Error (`float`)\n- Gradient (`np.array`) [`m`, `n`]", "source": "juraj-google-style"}
{"code": "def copy_pkg(self, filename, _):\n        \n        basename = os.path.basename(filename)\n        self._copy(filename, os.path.join(self.connection[\"mount_point\"],\n                                          \"Packages\", basename))", "docstring": "Copy a package to the repo's Package subdirectory.\n\nArgs:\nfilename: Path for file to copy.\n_: Ignored. Used for compatibility with JDS repos.", "source": "juraj-google-style"}
{"code": "def recipe_twitter(config, auth_read, auth_write, recipe_name, twitter_secret, recipe_slug, twitter_key):\n    dataset(config, {'description': 'Create a dataset where data will be combined and transfored for upload.', 'auth': auth_write, 'dataset': recipe_slug})\n    sheets(config, {'description': 'Read mapping of hash tags to line item toggles from sheets.', 'auth': auth_read, 'template': {'sheet': 'https:\n    twitter(config, {'description': 'Read trends from Twitter and place into BigQuery.', 'auth': auth_write, 'secret': twitter_secret, 'key': twitter_key, 'trends': {'places': {'single_cell': True, 'bigquery': {'dataset': recipe_slug, 'query': 'SELECT DISTINCT WOEID FROM {dataset}.Twitter_Triggers', 'legacy': False, 'parameters': {'dataset': recipe_slug}}}}, 'out': {'bigquery': {'dataset': recipe_slug, 'table': 'Twitter_Trends_Place'}}})\n    google_api(config, {'description': 'Combine sheet and twitter data into API operations for each line item.  Match all possibilities and PAUSE if no trigger match.', 'auth': auth_write, 'api': 'displayvideo', 'version': 'v1', 'function': 'advertisers.lineItems.patch', 'kwargs_remote': {'bigquery': {'dataset': recipe_slug, 'query': \"\\n           SELECT\\n             CAST(S.Advertiser_Id AS STRING) advertiserId,\\n             CAST(S.Line_Item_Id AS STRING) AS lineItemId,\\n             STRUCT(\\n               IF(LOGICAL_OR(T.Name is NULL), 'ENTITY_STATUS_ACTIVE', 'ENTITY_STATUS_PAUSED') AS entityStatus\\n             ) AS body,\\n             'entityStatus' AS updateMask,\\n           FROM `{dataset}.Twitter_Triggers` AS S\\n           LEFT JOIN `{dataset}.Twitter_Trends_Place` As T\\n           ON S.WOEID=T.WOEID AND REPLACE(LOWER(S.Hashtag), '", "docstring": "Adjusts line item settings based on Twitter hashtags and locations specified in\na sheet.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nauth_write (authentication) - Credentials used for writing data.\nrecipe_name (string) - Name of sheet where Line Item settings will be read from.\ntwitter_secret (string) - Twitter API secret token.\nrecipe_slug (string) - Name of Google BigQuery dataset to create.\ntwitter_key (string) - Twitter API key token.", "source": "github-repos"}
{"code": "def get(self, name, default=None):\n        \n        option = self._options.get(name, None)\n        if option is None:\n\n            return default\n\n        return option.__get__(self)", "docstring": "Fetch an option from the dictionary.\n\nArgs:\nname (str): The name of the option.\ndefault: The value to return if the name is missing.\n\nReturns:\nany: The value stored by the option.\n\nThis method resolves the option to its value rather than returning\nthe option object itself. Use the 'options()' method or this object's\niter to get the raw options.", "source": "juraj-google-style"}
{"code": "def parse_psqs(psqs_results_file):\n    \n\n    \n\n    psqs_results = pd.read_csv(psqs_results_file, sep='\\t', header=None)\n    psqs_results['pdb_file'] = psqs_results[0].apply(lambda x: str(x).strip('./').strip('.pdb'))\n    psqs_results = psqs_results.rename(columns = {1:'psqs_local', 2:'psqs_burial', 3:'psqs_contact', 4:'psqs_total'}).drop(0, axis=1)\n    psqs_results['u_pdb'] = psqs_results['pdb_file'].apply(lambda x: x.upper() if len(x)==4 else np.nan)\n    psqs_results['i_entry_name'] = psqs_results['pdb_file'].apply(lambda x: x.split('_model1')[0] if len(x)>4 else np.nan)\n    psqs_results = psqs_results[pd.notnull(psqs_results.psqs_total)]\n\n    return psqs_results", "docstring": "Parse a PSQS result file and returns a Pandas DataFrame of the results\n\nArgs:\npsqs_results_file: Path to psqs results file\n\nReturns:\nPandas DataFrame: Summary of PSQS results", "source": "juraj-google-style"}
{"code": "def clear(self, rows=None):\n    \n    rows = tf.range(self._capacity) if rows is None else rows\n    assert rows.shape.ndims == 1\n    return tf.scatter_update(self._length, rows, tf.zeros_like(rows))", "docstring": "Reset episodes in the memory.\n\nInternally, this only sets their lengths to zero. The memory entries will\nbe overridden by future calls to append() or replace().\n\nArgs:\nrows: Episodes to clear, defaults to all.\n\nReturns:\nOperation.", "source": "juraj-google-style"}
{"code": "def _convert_reward(self, reward):\n    if (not np.isfinite(reward).all()):\n        raise ValueError('Infinite reward encountered.')\n    return np.array(reward, dtype=np.float32)", "docstring": "Convert the reward to 32 bits.\n\nArgs:\nreward: Numpy reward.\n\nRaises:\nValueError: Rewards contain infinite values.\n\nReturns:\nNumpy reward with 32-bit data type.", "source": "codesearchnet"}
{"code": "def find_surface_sites_by_height(self, slab, height=0.9, xy_tol=0.05):\n    m_projs = np.array([np.dot(site.coords, self.mvec) for site in slab.sites])\n    mask = ((m_projs - np.amax(m_projs)) >= (- height))\n    surf_sites = [slab.sites[n] for n in np.where(mask)[0]]\n    if xy_tol:\n        surf_sites = [s for (h, s) in zip(m_projs[mask], surf_sites)]\n        surf_sites.reverse()\n        (unique_sites, unique_perp_fracs) = ([], [])\n        for site in surf_sites:\n            this_perp = (site.coords - np.dot(site.coords, self.mvec))\n            this_perp_frac = slab.lattice.get_fractional_coords(this_perp)\n            if (not in_coord_list_pbc(unique_perp_fracs, this_perp_frac)):\n                unique_sites.append(site)\n                unique_perp_fracs.append(this_perp_frac)\n        surf_sites = unique_sites\n    return surf_sites", "docstring": "This method finds surface sites by determining which sites are within\na threshold value in height from the topmost site in a list of sites\n\nArgs:\nsite_list (list): list of sites from which to select surface sites\nheight (float): threshold in angstroms of distance from topmost\nsite in slab along the slab c-vector to include in surface\nsite determination\nxy_tol (float): if supplied, will remove any sites which are\nwithin a certain distance in the miller plane.\n\nReturns:\nlist of sites selected to be within a threshold of the highest", "source": "codesearchnet"}
{"code": "def map_batch_parallel(input_list, batch_size, item_mapper=None, batch_mapper=None, flatten=True, n_jobs=(- 1), **kwargs):\n    if ((item_mapper is None) and (batch_mapper is None)):\n        raise ValueError('You should specify either batch_mapper or item_mapper.')\n    if (batch_mapper is None):\n        batch_mapper = _default_batch_mapper\n    batches = split_into_batches(input_list, batch_size, batch_storage_dir='')\n    all_batch_results = Parallel(n_jobs=n_jobs, **kwargs)((delayed(batch_mapper)(batch['data'], item_mapper) for batch in progressbar(batches, desc='Batches', total=len(batches), file=sys.stdout)))\n    if flatten:\n        final_result = []\n        for batch_result in all_batch_results:\n            final_result.extend(batch_result)\n    else:\n        final_result = all_batch_results\n    return final_result", "docstring": "Split the data into batches and process each batch in its own thread.\n\nArgs:\ninput_list: An input object that has a list-like interface (indexing and slicing).\nitem_mapper: (optional) A function to apply to each item in the batch.\nbatch_mapper: (optional) A function to apply to each batch. Either item_mapper or batch_mapper must be set.\nflatten: Whether to unwrap individual batch results or keep them grouped by batch.\nn_jobs: The number of parallel processing jobs. -1 will use the number of CPUs on the system.\nbatch_size: The maximum number of input items in each batch. -1 will store all data as a single batch.\n**kwargs: Additional keyword arguments to joblib.Parallel.\n\nReturns:\nA list representing the combined output from the mapper function called on all input items of each batch.", "source": "codesearchnet"}
{"code": "def difference(self, second_iterable, selector=identity):\n    if self.closed():\n        raise ValueError('Attempt to call difference() on a closed Queryable.')\n    if (not is_iterable(second_iterable)):\n        raise TypeError('Cannot compute difference() with second_iterableof non-iterable {0}'.format(str(type(second_iterable))[7:(- 2)]))\n    if (not is_callable(selector)):\n        raise TypeError('difference() parameter selector={0} is not callable'.format(repr(selector)))\n    return self._create(self._generate_difference_result(second_iterable, selector))", "docstring": "Returns those elements which are in the source sequence which are not\nin the second_iterable.\n\nThis method is equivalent to the Except() LINQ operator, renamed to a\nvalid Python identifier.\n\nNote: This method uses deferred execution, but as soon as execution\ncommences the entirety of the second_iterable is consumed;\ntherefore, although the source sequence may be infinite the\nsecond_iterable must be finite.\n\nArgs:\nsecond_iterable: Elements from this sequence are excluded from the\nreturned sequence. This sequence will be consumed in its\nentirety, so must be finite.\n\nselector: A optional single argument function with selects from the\nelements of both sequences the values which will be\ncompared for equality. If omitted the identity function will\nbe used.\n\nReturns:\nA sequence containing all elements in the source sequence except\nthose which are also members of the second sequence.\n\nRaises:\nValueError: If the Queryable has been closed.\nTypeError: If the second_iterable is not in fact iterable.\nTypeError: If the selector is not callable.", "source": "codesearchnet"}
{"code": "def transformer_revnet_encoder(encoder_input,\n                               encoder_self_attention_bias,\n                               hparams,\n                               name=\"encoder\"):\n  \n\n  def f(x, side_input):\n    \n    encoder_self_attention_bias = side_input[0]\n\n    old_hid_size = hparams.hidden_size\n    hparams.hidden_size = old_hid_size \n\n    with tf.variable_scope(\"self_attention\"):\n      y = common_attention.multihead_attention(\n          common_layers.layer_preprocess(\n              x, hparams), None, encoder_self_attention_bias,\n          hparams.attention_key_channels or hparams.hidden_size,\n          hparams.attention_value_channels or hparams.hidden_size,\n          hparams.hidden_size, hparams.num_heads, hparams.attention_dropout)\n      y = common_layers.layer_postprocess(x, y, hparams)\n    hparams.hidden_size = old_hid_size\n    return y\n\n  def g(x):\n    \n    old_hid_size = hparams.hidden_size\n    hparams.hidden_size = old_hid_size \n\n    with tf.variable_scope(\"ffn\"):\n      y = transformer.transformer_ffn_layer(\n          common_layers.layer_preprocess(x, hparams), hparams)\n      y = common_layers.layer_postprocess(x, y, hparams)\n    hparams.hidden_size = old_hid_size\n    return y\n\n  x1, x2 = tf.split(encoder_input, 2, axis=-1)\n\n  with tf.variable_scope(name):\n    y1, y2 = tf.contrib.layers.rev_block(\n        x1,\n        x2,\n        f,\n        g,\n        num_layers=hparams.num_hidden_layers,\n        f_side_input=[encoder_self_attention_bias],\n        is_training=hparams.mode == tf.estimator.ModeKeys.TRAIN)\n    y = tf.concat([y1, y2], axis=-1)\n\n  return common_layers.layer_preprocess(y, hparams)", "docstring": "A stack of transformer layers.\n\nArgs:\nencoder_input: a Tensor\nencoder_self_attention_bias: bias Tensor for self-attention\n(see common_attention.attention_bias())\nhparams: hyperparameters for model\nname: a string\n\nReturns:\ny: a Tensors", "source": "juraj-google-style"}
{"code": "def right_shift_blockwise(x, query_shape, name=None):\n  \n  with tf.variable_scope(\n      name, default_name=\"right_shift_blockwise\", values=[x]):\n    x_list_shape = x.get_shape().as_list()\n    x_shape = common_layers.shape_list(x)\n    \n    x = tf.expand_dims(x, axis=1)\n    x = pad_to_multiple_2d(x, query_shape)\n    padded_x_shape = common_layers.shape_list(x)\n    \n    x_indices = gather_indices_2d(x, query_shape, query_shape)\n    x_new = get_shifted_center_blocks(x, x_indices)\n\n    \n    output = scatter_blocks_2d(x_new, x_indices, padded_x_shape)\n    \n    output = tf.squeeze(output, axis=1)\n    \n    output = tf.slice(output, [0, 0, 0, 0], [-1, x_shape[1], x_shape[2], -1])\n    output.set_shape(x_list_shape)\n    return output", "docstring": "Right shifts once in every block.\n\nArgs:\nx: a tensor of shape [batch, height, width, depth]\nquery_shape: A 2d tuple of ints\nname: a string\n\nReturns:\noutput: a tensor of the same shape as x", "source": "juraj-google-style"}
{"code": "def __init__(self, variant, building):\n        \n        self.variant = variant\n        self.building = building", "docstring": "Create a package variant.\n\nArgs:\nvariant (`Variant`): Package variant.\nbuilding (bool): True if a build is occurring.", "source": "juraj-google-style"}
{"code": "def key_vals_dict_to_tuple_list(key_vals_dict, fill=float('nan')):\n    \n\n    tuple_list = [ ]\n\n    if not key_vals_dict: return tuple_list\n\n    vlen = max([len(vs) for vs in itertools.chain(*key_vals_dict.values())])\n\n    for k, vs in key_vals_dict.items():\n        try:\n            tuple_list.extend([k + tuple(v) + (fill, )*(vlen - len(v)) for v in vs])\n        except TypeError:\n            \n            tuple_list.extend([(k, ) + tuple(v) + (fill, )*(vlen - len(v)) for v in vs])\n\n\n    return tuple_list", "docstring": "Convert ``key_vals_dict`` to `tuple_list``.\n\nArgs:\nkey_vals_dict (dict): The first parameter.\nfill: a value to fill missing data\n\nReturns:\nA list of tuples", "source": "juraj-google-style"}
{"code": "def to_string(cls, error_code):\n        \n        if error_code == cls.ZONE_NOT_FOUND_ERROR:\n            return 'Zone not found'\n        return super(JLinkReadErrors, cls).to_string(error_code)", "docstring": "Returns the string message for the given ``error_code``.\n\nArgs:\ncls (JLinkReadErrors): the ``JLinkReadErrors`` class\nerror_code (int): error code to convert\n\nReturns:\nAn error string corresponding to the error code.\n\nRaises:\nValueError: if the error code is invalid.", "source": "juraj-google-style"}
{"code": "def DeleteCampaignFeed(client, campaign_feed):\n    campaign_feed_service = client.GetService('CampaignFeedService', 'v201809')\n    operation = {'operand': campaign_feed, 'operator': 'REMOVE'}\n    campaign_feed_service.mutate([operation])", "docstring": "Deletes a campaign feed.\n\nArgs:\nclient: an AdWordsClient instance.\ncampaign_feed: the campaign feed to delete.", "source": "codesearchnet"}
{"code": "def open(self, host, port=23):\n    self._telnet_client.open(host, port)\n    config_str = self._telnet_client.cmd('MN?')\n    if config_str.startswith('MN='):\n        config_str = config_str[len('MN='):]\n    self.properties = dict(zip(['model', 'max_freq', 'max_atten'], config_str.split('-', 2)))\n    self.max_atten = float(self.properties['max_atten'])", "docstring": "Opens a telnet connection to the desired AttenuatorDevice and\nqueries basic information.\n\nArgs:\nhost: A valid hostname (IP address or DNS-resolvable name) to an\nMC-DAT attenuator instrument.\nport: An optional port number (defaults to telnet default 23)", "source": "codesearchnet"}
{"code": "def sample(self, count=5, fields=None, sampling=None, use_cache=True, dialect=None, billing_tier=None):\n    return Query.sampling_query(self._sql, self._context, count=count, fields=fields, sampling=sampling, udfs=self._udfs, data_sources=self._data_sources).results(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier)", "docstring": "Retrieves a sampling of rows for the query.\n\nArgs:\ncount: an optional count of rows to retrieve which is used if a specific\nsampling is not specified (default 5).\nfields: the list of fields to sample (default None implies all).\nsampling: an optional sampling strategy to apply to the table.\nuse_cache: whether to use cached results or not (default True).\ndialect : {'legacy', 'standard'}, default 'legacy'\n'legacy' : Use BigQuery's legacy SQL dialect.\n'standard' : Use BigQuery's standard SQL (beta), which is\ncompliant with the SQL 2011 standard.\nbilling_tier: Limits the billing tier for this job. Queries that have resource\nusage beyond this tier will fail (without incurring a charge). If unspecified, this\nwill be set to your project default. This can also be used to override your\nproject-wide default billing tier on a per-query basis.\nReturns:\nA QueryResultsTable containing a sampling of the result set.\nRaises:\nException if the query could not be executed or query response was malformed.", "source": "codesearchnet"}
{"code": "def review_score(self, reviewer, product):\n    return self._g.retrieve_review(reviewer, product).score", "docstring": "Find a review score from a given reviewer to a product.\n\nArgs:\nreviewer: Reviewer i.e. an instance of :class:`ria.bipartite.Reviewer`.\nproduct: Product i.e. an instance of :class:`ria.bipartite.Product`.\n\nReturns:\nA review object representing the review from the reviewer to the product.", "source": "codesearchnet"}
{"code": "def get_request_header(self):\n    if (self._client_id is not None):\n        self._request_header.client_identifier.resource = self._client_id\n    return self._request_header", "docstring": "Return ``request_header`` for use when constructing requests.\n\nReturns:\nPopulated request header.", "source": "codesearchnet"}
{"code": "def fit_texture(layer):\n    \n    x, y = layer\n    x = (x - np.nanmin(x)) / (np.nanmax(x) - np.nanmin(x))\n    y = (y - np.nanmin(y)) / (np.nanmax(y) - np.nanmin(y))\n    return x, y", "docstring": "Fits a layer into a texture by scaling each axis to (0, 1).\n\nDoes not preserve aspect ratio (TODO: make this an option).\n\nArgs:\nlayer (layer): the layer to scale\n\nReturns:\ntexture: A texture.", "source": "juraj-google-style"}
{"code": "def pnl_search(self, asset_manager_id, pnl_type, business_date, **kwargs):\n    self.logger.info(('Retrieving Pnls - Asset Manager: %s - Business Date: %s' % (asset_manager_id, business_date)))\n    url = ('%s/pnls/%s' % (self.endpoint, asset_manager_id))\n    search_params = {'pnl_type': pnl_type, 'business_date': business_date.isoformat()}\n    for (param_key, param_val) in kwargs.items():\n        if (not param_val):\n            continue\n        search_params[param_key] = (','.join(param_val) if isinstance(param_val, list) else param_val)\n    response = self.session.get(url, params=search_params)\n    if response.ok:\n        json_body = response.json()\n        results = json_body.get('items')\n        next_hash_key = json_body.get('next_hash_key')\n        next_range_key = json_body.get('next_range_key')\n        pnls = [json_to_pnl(pnl_json) for pnl_json in results]\n        self.logger.info('Retrieved %s Pnl records.', len(pnls))\n        return (next_hash_key, next_range_key, pnls)\n    else:\n        self.logger.error(response.text)\n        response.raise_for_status()", "docstring": "Search pnl records.\n\nArgs:\nasset_manager_id (int): id of asset manager owning the pnl records\npnl_type (str): either \"Position\" or \"Transaction\nbusiness_date (date): date of the pnl records to return\nbook_ids (list): book id filter on pnl records\nasset_ids (list): asset id filter on pnl records\ntransaction_ids (list): transactino id filter on pnl records\nnext_hash_key (str): continuation hash key for paging the results\nnext_range_key (str): continuation range key for paging the results\npage_size (int): the number of results to return", "source": "codesearchnet"}
{"code": "def ParseMany(text):\n    precondition.AssertType(text, Text)\n    if compatibility.PY2:\n        text = text.encode('utf-8')\n    return list(yaml.safe_load_all(text))", "docstring": "Parses many YAML documents into a list of Python objects.\n\nArgs:\ntext: A YAML source with multiple documents embedded.\n\nReturns:\nA list of Python data structures corresponding to the YAML documents.", "source": "codesearchnet"}
{"code": "def from_proto(saver_def, import_scope=None):\n    return Saver(saver_def=saver_def, name=import_scope)", "docstring": "Returns a `Saver` object created from `saver_def`.\n\nArgs:\nsaver_def: a `SaverDef` protocol buffer.\nimport_scope: Optional `string`. Name scope to use.\n\nReturns:\nA `Saver` built from saver_def.", "source": "github-repos"}
{"code": "def plot_tree3d(ax, tree, diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA):\n    segs = [(s[0][COLS.XYZ], s[1][COLS.XYZ]) for s in iter_segments(tree)]\n    linewidth = _get_linewidth(tree, diameter_scale=diameter_scale, linewidth=linewidth)\n    color = _get_color(color, tree.type)\n    collection = Line3DCollection(segs, color=color, linewidth=linewidth, alpha=alpha)\n    ax.add_collection3d(collection)\n    _update_3d_datalim(ax, tree)", "docstring": "Generates a figure of the tree in 3d.\n\nIf the tree contains one single point the plot will be empty \\\nsince no segments can be constructed.\n\nArgs:\nax(matplotlib axes): on what to plot\ntree(neurom.core.Tree or neurom.core.Neurite): plotted tree\ndiameter_scale(float): Scale factor multiplied with segment diameters before plotting\nlinewidth(float): all segments are plotted with this width, but only if diameter_scale=None\ncolor(str or None): Color of plotted values, None corresponds to default choice\nalpha(float): Transparency of plotted values", "source": "codesearchnet"}
{"code": "def get_dependencies(self):\n    all_deps = OrderedSet()\n    for (key, _) in list(self.__config.items()):\n        if (key in self.__cli):\n            continue\n        if key.endswith('sources'):\n            all_deps |= self.get_sources(key[:((len('sources') * (- 1)) - 1)])\n    for (key, _) in list(self.__cli.items()):\n        if key.endswith('sources'):\n            all_deps |= self.get_sources(key[:((len('sources') * (- 1)) - 1)])\n    if (self.conf_file is not None):\n        all_deps.add(self.conf_file)\n    all_deps.add(self.get_path('sitemap', rel_to_cwd=True))\n    cwd = os.getcwd()\n    return [os.path.relpath(fname, cwd) for fname in all_deps if fname]", "docstring": "Retrieve the set of all dependencies for a given configuration.\n\nReturns:\nutils.utils.OrderedSet: The set of all dependencies for the\ntracked configuration.", "source": "codesearchnet"}
{"code": "def patch_deepCopy(self, patches):\n    \n    patchesCopy = []\n    for patch in patches:\n      patchCopy = patch_obj()\n      \n      patchCopy.diffs = patch.diffs[:]\n      patchCopy.start1 = patch.start1\n      patchCopy.start2 = patch.start2\n      patchCopy.length1 = patch.length1\n      patchCopy.length2 = patch.length2\n      patchesCopy.append(patchCopy)\n    return patchesCopy", "docstring": "Given an array of patches, return another array that is identical.\n\nArgs:\npatches: Array of Patch objects.\n\nReturns:\nArray of Patch objects.", "source": "juraj-google-style"}
{"code": "def assistant_from_yaml(cls, source, y, superassistant, fully_loaded=True, role=settings.DEFAULT_ASSISTANT_ROLE):\n    name = os.path.splitext(os.path.basename(source))[0]\n    yaml_checker.check(source, y)\n    assistant = yaml_assistant.YamlAssistant(name, y, source, superassistant, fully_loaded=fully_loaded, role=role)\n    return assistant", "docstring": "Constructs instance of YamlAssistant loaded from given structure y, loaded\nfrom source file source.\n\nArgs:\nsource: path to assistant source file\ny: loaded yaml structure\nsuperassistant: superassistant of this assistant\nReturns:\nYamlAssistant instance constructed from y with source file source\nRaises:\nYamlError: if the assistant is malformed", "source": "codesearchnet"}
{"code": "def notify(self, new_issues, existing_issues, fixed_issues):\n        \n        if len(new_issues + existing_issues + fixed_issues) > 0:\n            maxlen = max(len(x['properties']['source']) for x in (new_issues + existing_issues + fixed_issues)) + 2\n            text_tmpl = get_template('domain_hijacking.txt')\n            html_tmpl = get_template('domain_hijacking.html')\n            issues_text = text_tmpl.render(\n                new_issues=new_issues,\n                existing_issues=existing_issues,\n                fixed_issues=fixed_issues,\n                maxlen=maxlen\n            )\n            issues_html = html_tmpl.render(\n                new_issues=new_issues,\n                existing_issues=existing_issues,\n                fixed_issues=fixed_issues,\n                maxlen=maxlen\n            )\n\n            try:\n                send_notification(\n                    subsystem=self.name,\n                    recipients=[NotificationContact('email', addr) for addr in self.recipients],\n                    subject=self.subject,\n                    body_html=issues_html,\n                    body_text=issues_text\n                )\n            except Exception as ex:\n                self.log.exception('Failed sending notification email: {}'.format(ex))", "docstring": "Send notifications (email, slack, etc.) for any issues that are currently open or has just been closed\n\nArgs:\nnew_issues (`list` of :obj:`DomainHijackIssue`): List of newly discovered issues\nexisting_issues (`list` of :obj:`DomainHijackIssue`): List of existing open issues\nfixed_issues (`list` of `dict`): List of fixed issues\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _GetNumberOfSeconds(self, fat_date_time):\n    \n    day_of_month = (fat_date_time & 0x1f)\n    month = ((fat_date_time >> 5) & 0x0f)\n    year = (fat_date_time >> 9) & 0x7f\n\n    days_per_month = self._GetDaysPerMonth(year, month)\n    if day_of_month < 1 or day_of_month > days_per_month:\n      raise ValueError('Day of month value out of bounds.')\n\n    number_of_days = self._GetDayOfYear(1980 + year, month, day_of_month)\n    number_of_days -= 1\n    for past_year in range(0, year):\n      number_of_days += self._GetNumberOfDaysInYear(past_year)\n\n    fat_date_time >>= 16\n\n    seconds = (fat_date_time & 0x1f) * 2\n    minutes = (fat_date_time >> 5) & 0x3f\n    hours = (fat_date_time >> 11) & 0x1f\n\n    if hours not in range(0, 24):\n      raise ValueError('Hours value out of bounds.')\n\n    if minutes not in range(0, 60):\n      raise ValueError('Minutes value out of bounds.')\n\n    if seconds not in range(0, 60):\n      raise ValueError('Seconds value out of bounds.')\n\n    number_of_seconds = (((hours * 60) + minutes) * 60) + seconds\n    number_of_seconds += number_of_days * definitions.SECONDS_PER_DAY\n    return number_of_seconds", "docstring": "Retrieves the number of seconds from a FAT date time.\n\nArgs:\nfat_date_time (int): FAT date time.\n\nReturns:\nint: number of seconds since January 1, 1980 00:00:00.\n\nRaises:\nValueError: if the month, day of month, hours, minutes or seconds\nvalue is out of bounds.", "source": "juraj-google-style"}
{"code": "def __init__(self, map_name, timestamp_dir, cache_options, automount_mountpoint=None):\n    super(AutomountUpdater, self).__init__(map_name, timestamp_dir, cache_options, automount_mountpoint)\n    self.local_master = False\n    if self.OPT_LOCAL_MASTER in cache_options:\n        if cache_options[self.OPT_LOCAL_MASTER] == 'yes':\n            self.local_master = True", "docstring": "Initialize automount-specific updater options.\n\nArgs:\nmap_name: A string representing the type of the map we are an Updater for.\ntimestamp_dir: A string with the directory containing our timestamp files.\ncache_options: A dict containing the options for any caches we create.\nautomount_mountpoint: An optional string containing automount path info.", "source": "github-repos"}
{"code": "def get_versions(self):\n\n\t\t\n\n\t\t\n\t\tversions_response = self.repo.api.http_request('GET', '%s/fcr:versions' % self.uri)\n\n\t\t\n\t\tversions_graph = self.repo.api.parse_rdf_payload(versions_response.content, versions_response.headers)\n\n\t\t\n\t\tfor version_uri in versions_graph.objects(self.uri, self.rdf.prefixes.fedora.hasVersion):\n\n\t\t\t\n\t\t\tversion_label = versions_graph.value(version_uri, self.rdf.prefixes.fedora.hasVersionLabel, None).toPython()\n\n\t\t\t\n\t\t\tself._affix_version(version_uri, version_label)", "docstring": "retrieves all versions of an object, and stores them at self.versions\n\nArgs:\nNone\n\nReturns:\nNone: appends instances", "source": "juraj-google-style"}
{"code": "def parse_ids(chrom, pos, ref, alt, case_id, variant_type):\n    \n    ids = {}\n    pos = str(pos)\n\n    ids['simple_id'] = parse_simple_id(chrom, pos, ref, alt)\n    ids['variant_id'] = parse_variant_id(chrom, pos, ref, alt, variant_type)\n    ids['display_name'] = parse_display_name(chrom, pos, ref, alt, variant_type)\n    ids['document_id'] = parse_document_id(chrom, pos, ref, alt, variant_type, case_id)\n\n    return ids", "docstring": "Construct the necessary ids for a variant\n\nArgs:\nchrom(str): Variant chromosome\npos(int): Variant position\nref(str): Variant reference\nalt(str): Variant alternative\ncase_id(str): Unique case id\nvariant_type(str): 'clinical' or 'research'\n\nReturns:\nids(dict): Dictionary with the relevant ids", "source": "juraj-google-style"}
{"code": "def all(self, data={}, **kwargs):\n    return super(VirtualAccount, self).all(data, **kwargs)", "docstring": "Fetch all Virtual Account entities\n\nReturns:\nDictionary of Virtual Account data", "source": "codesearchnet"}
{"code": "def _parse_banners(self):\n    motd_value = login_value = None\n    matches = re.findall('^banner\\\\s+(login|motd)\\\\s?$\\n(.*?)$\\nEOF$\\n', self.config, (re.DOTALL | re.M))\n    for match in matches:\n        if (match[0].strip() == 'motd'):\n            motd_value = match[1]\n        elif (match[0].strip() == 'login'):\n            login_value = match[1]\n    return dict(banner_motd=motd_value, banner_login=login_value)", "docstring": "Parses the global config and returns the value for both motd\nand login banners.\n\nReturns:\ndict: The configure value for modtd and login banners. If the\nbanner is not set it will return a value of None for that\nkey. The returned dict object is intendd to be merged\ninto the resource dict", "source": "codesearchnet"}
{"code": "def _list_like_func(self, func, axis, *args, **kwargs):\n        \n        func_prepared = self._prepare_method(\n            lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs))\n        )\n        new_data = self._map_across_full_axis(axis, func_prepared)\n        \n        new_index = (\n            [f if isinstance(f, string_types) else f.__name__ for f in func]\n            if axis == 0\n            else self.index\n        )\n        new_columns = (\n            [f if isinstance(f, string_types) else f.__name__ for f in func]\n            if axis == 1\n            else self.columns\n        )\n        return self.__constructor__(new_data, new_index, new_columns)", "docstring": "Apply list-like function across given axis.\n\nArgs:\nfunc: The function to apply.\naxis: Target axis to apply the function along.\n\nReturns:\nA new PandasQueryCompiler.", "source": "juraj-google-style"}
{"code": "def load_settings(path, setttings_only = True):\n        \n\n\n        \n        if not os.path.exists(path):\n            print(path)\n            raise AttributeError('Path given does not exist!')\n\n        tag = '_'.join(os.path.basename(os.path.dirname(os.path.abspath(path) + '/')).split('_')[3:])\n\n        search_str = os.path.abspath(path)+'/*'+tag +'.b26'\n        fname = glob.glob(search_str)\n        if len(fname)>1:\n            print(('warning more than one .b26 file found, loading ', fname[0]))\n        elif len(fname) == 0:\n            print(('no .b26 file found in folder {:s},  check path !'.format(search_str)))\n            return\n        fname = fname[0]\n        fname = Script.check_filename(fname)\n        settings = load_b26_file(fname)['scripts']\n\n        if len(list(settings.keys())) == 1 and setttings_only:\n            settings = settings[list(settings.keys())[0]]['settings']\n\n        return settings", "docstring": "loads the settings that has been save with Script.save_b26.\nArgs:\npath: path to folder saved by Script.save_b26\nsetttings_only: if true returns only the settings if the .b26 file contains only a single script\nReturns:\na dictionary with the settings", "source": "juraj-google-style"}
{"code": "def _ParseKey(self, knowledge_base, registry_key, value_name):\n    \n    try:\n      registry_value = registry_key.GetValueByName(value_name)\n    except IOError as exception:\n      raise errors.PreProcessFail((\n          'Unable to retrieve Windows Registry key: {0:s} value: {1:s} '\n          'with error: {2!s}').format(\n              registry_key.path, value_name, exception))\n\n    if registry_value:\n      value_object = registry_value.GetDataAsObject()\n      if value_object:\n        self._ParseValueData(knowledge_base, value_object)", "docstring": "Parses a Windows Registry key for a preprocessing attribute.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\nvalue_name (str): name of the Windows Registry value.\n\nRaises:\nPreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def approx_eq(val: Any, other: Any, *, atol: Union[(int, float)]=1e-08) -> bool:\n    approx_eq_getter = getattr(val, '_approx_eq_', None)\n    if (approx_eq_getter is not None):\n        result = approx_eq_getter(other, atol)\n        if (result is not NotImplemented):\n            return result\n    other_approx_eq_getter = getattr(other, '_approx_eq_', None)\n    if (other_approx_eq_getter is not None):\n        result = other_approx_eq_getter(val, atol)\n        if (result is not NotImplemented):\n            return result\n    if isinstance(val, (int, float)):\n        if (not isinstance(other, (int, float))):\n            return False\n        return _isclose(val, other, atol=atol)\n    if isinstance(val, complex):\n        if (not isinstance(other, complex)):\n            return False\n        return _isclose(val, other, atol=atol)\n    result = _approx_eq_iterables(val, other, atol=atol)\n    if (result is NotImplemented):\n        return (val == other)\n    return result", "docstring": "Approximately compares two objects.\n\nIf `val` implements SupportsApproxEquality protocol then it is invoked and\ntakes precedence over all other checks:\n- For primitive numeric types `int` and `float` approximate equality is\ndelegated to math.isclose().\n- For complex primitive type the real and imaginary parts are treated\nindependently and compared using math.isclose().\n- For `val` and `other` both iterable of the same length, consecutive\nelements are compared recursively. Types of `val` and `other` does not\nnecessarily needs to match each other. They just need to be iterable and\nhave the same structure.\n\nArgs:\nval: Source object for approximate comparison.\nother: Target object for approximate comparison.\natol: The minimum absolute tolerance. See np.isclose() documentation for\ndetails. Defaults to 1e-8 which matches np.isclose() default\nabsolute tolerance.\n\nReturns:\nTrue if objects are approximately equal, False otherwise.", "source": "codesearchnet"}
{"code": "def is_http_running_on(port):\n  \n  try:\n    conn = httplib.HTTPConnection('127.0.0.1:' + str(port))\n    conn.connect()\n    conn.close()\n    return True\n  except Exception:\n    return False", "docstring": "Check if an http server runs on a given port.\n\nArgs:\nThe port to check.\nReturns:\nTrue if it is used by an http server. False otherwise.", "source": "juraj-google-style"}
{"code": "def stacked_highway_cnn(units: tf.Tensor, n_hidden_list: List, filter_width=3, use_batch_norm=False, use_dilation=False, training_ph=None):\n    for (n_layer, n_hidden) in enumerate(n_hidden_list):\n        input_units = units\n        if (input_units.get_shape().as_list()[(- 1)] != n_hidden):\n            input_units = tf.layers.dense(input_units, n_hidden)\n        if use_dilation:\n            dilation_rate = (2 ** n_layer)\n        else:\n            dilation_rate = 1\n        units = tf.layers.conv1d(units, n_hidden, filter_width, padding='same', dilation_rate=dilation_rate, kernel_initializer=INITIALIZER())\n        if use_batch_norm:\n            units = tf.layers.batch_normalization(units, training=training_ph)\n        sigmoid_gate = tf.layers.dense(input_units, 1, activation=tf.sigmoid, kernel_initializer=INITIALIZER())\n        input_units = ((sigmoid_gate * input_units) + ((1 - sigmoid_gate) * units))\n        input_units = tf.nn.relu(input_units)\n    units = input_units\n    return units", "docstring": "Highway convolutional network. Skip connection with gating\nmechanism.\n\nArgs:\nunits: a tensorflow tensor with dimensionality [None, n_tokens, n_features]\nn_hidden_list: list with number of hidden units at the output of each layer\nfilter_width: width of the kernel in tokens\nuse_batch_norm: whether to use batch normalization between layers\nuse_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ...\ntraining_ph: boolean placeholder determining whether is training phase now or not.\nIt is used only for batch normalization to determine whether to use\ncurrent batch average (std) or memory stored average (std)\nReturns:\nunits: tensor at the output of the last convolutional layer\nwith dimensionality [None, n_tokens, n_hidden_list[-1]]", "source": "codesearchnet"}
{"code": "def _parse_normalization(normalization):\n    parsed_normalization = None\n    if isinstance(normalization, dict):\n        if (len(normalization.keys()) == 1):\n            items = list(normalization.items())[0]\n            if (len(items) == 2):\n                if (items[1] and isinstance(items[1], dict)):\n                    parsed_normalization = items\n                else:\n                    parsed_normalization = items[0]\n    elif isinstance(normalization, STR_TYPE):\n        parsed_normalization = normalization\n    return parsed_normalization", "docstring": "Parse a normalization item.\n\nTransform dicts into a tuple containing the normalization\noptions. If a string is found, the actual value is used.\n\nArgs:\nnormalization: Normalization to parse.\n\nReturns:\nTuple or string containing the parsed normalization.", "source": "codesearchnet"}
{"code": "def module_entry(yfile):\n    ytxt = yfile.read()\n    mp = ModuleParser(ytxt)\n    mst = mp.statement()\n    submod = (mst.keyword == 'submodule')\n    import_only = True\n    rev = ''\n    features = []\n    includes = []\n    rec = {}\n    for sst in mst.substatements:\n        if ((not rev) and (sst.keyword == 'revision')):\n            rev = sst.argument\n        elif (import_only and (sst.keyword in data_kws)):\n            import_only = False\n        elif (sst.keyword == 'feature'):\n            features.append(sst.argument)\n        elif submod:\n            continue\n        elif (sst.keyword == 'namespace'):\n            rec['namespace'] = sst.argument\n        elif (sst.keyword == 'include'):\n            rd = sst.find1('revision-date')\n            includes.append((sst.argument, (rd.argument if rd else None)))\n    rec['import-only'] = import_only\n    rec['features'] = features\n    if submod:\n        rec['revision'] = rev\n        submodmap[mst.argument] = rec\n    else:\n        rec['includes'] = includes\n        modmap[(mst.argument, rev)] = rec", "docstring": "Add entry for one file containing YANG module text.\n\nArgs:\nyfile (file): File containing a YANG module or submodule.", "source": "codesearchnet"}
{"code": "def __init__(self, name, display_name='', description='', default=False):\n        \n        self._name = name\n        self._display_name = display_name\n        self._description = description\n        self._default = default", "docstring": "Attribute constructor.\n\nArgs:\nname (str): Attribute name.\ndisplay_name (str): Attribute display name.\ndescription (str): Attribute description.\ndefault (bool): Whether the attribute is a default\nattribute of the corresponding datasets.", "source": "juraj-google-style"}
{"code": "def set_window_size(self, width, height, window_handle='current'):\n    self._execute(Command.SET_WINDOW_SIZE, {'width': int(width), 'height': int(height), 'window_handle': window_handle})", "docstring": "Sets the width and height of the current window.\n\nSupport:\nWeb(WebView)\n\nArgs:\nwidth(int): the width in pixels.\nheight(int): the height in pixels.\nwindow_handle(str): Identifier of window_handle,\ndefault to 'current'.\n\nReturns:\nWebDriver Object.", "source": "codesearchnet"}
{"code": "def id_in_cluster(cluster_spec, task_type, task_id):\n    _validate_cluster_spec(cluster_spec, task_type, task_id)\n    cluster_spec = normalize_cluster_spec(cluster_spec).as_dict()\n    if task_type == 'chief':\n        return 0\n    if task_type == 'worker':\n        return task_id + len(cluster_spec.get('chief', []))\n    if task_type == 'evaluator':\n        return task_id\n    raise ValueError('There is no id for task_type %r' % task_type)", "docstring": "Returns a unique id for the task in the `task_type`'s cluster.\n\nIt returns an id ranging from [0, `worker_count(task_type, task_id)`).\n\nNote: this function assumes that \"evaluate\" job is in its own cluster or its\nown partition of a cluster.\n\nArgs:\ncluster_spec: a dict, `ClusterDef` or `ClusterSpec` object to be validated.\ntask_type: string indicating the type of the task.\ntask_id: the id of the `task_type` in this cluster.\n\nReturns:\nan int indicating the unique id.\n\nThrows:\nValueError: if `task_type` is not \"chief\", \"worker\" or \"evaluator\".", "source": "github-repos"}
{"code": "def _make_actor_method_executor(self, method_name, method, actor_imported):\n\n    def actor_method_executor(dummy_return_id, actor, *args):\n        self._worker.actor_task_counter += 1\n        try:\n            if is_class_method(method):\n                method_returns = method(*args)\n            else:\n                method_returns = method(actor, *args)\n        except Exception as e:\n            if (isinstance(actor, ray.actor.Checkpointable) and (self._worker.actor_task_counter != 1)):\n                self._save_and_log_checkpoint(actor)\n            raise e\n        else:\n            if isinstance(actor, ray.actor.Checkpointable):\n                if (self._worker.actor_task_counter == 1):\n                    if actor_imported:\n                        self._restore_and_log_checkpoint(actor)\n                else:\n                    self._save_and_log_checkpoint(actor)\n            return method_returns\n    return actor_method_executor", "docstring": "Make an executor that wraps a user-defined actor method.\n\nThe wrapped method updates the worker's internal state and performs any\nnecessary checkpointing operations.\n\nArgs:\nmethod_name (str): The name of the actor method.\nmethod (instancemethod): The actor method to wrap. This should be a\nmethod defined on the actor class and should therefore take an\ninstance of the actor as the first argument.\nactor_imported (bool): Whether the actor has been imported.\nCheckpointing operations will not be run if this is set to\nFalse.\n\nReturns:\nA function that executes the given actor method on the worker's\nstored instance of the actor. The function also updates the\nworker's internal state to record the executed method.", "source": "codesearchnet"}
{"code": "def read_serializable_array(self, class_name, max_size=sys.maxsize):\n        \n        module = '.'.join(class_name.split('.')[:-1])\n        class_name = class_name.split('.')[-1]\n        class_attr = getattr(importlib.import_module(module), class_name)\n        length = self.read_var_int(max_size=max_size)\n        items = []\n        try:\n            for _ in range(0, length):\n                item = class_attr()\n                item.Deserialize(self)\n                items.append(item)\n        except Exception as e:\n            raise SDKException(ErrorCode.param_err(\"Couldn't deserialize %s\" % e))\n        return items", "docstring": "Deserialize a stream into the object specific by `class_name`.\n\nArgs:\nclass_name (str): a full path to the class to be deserialized into. e.g. 'neo.Core.Block.Block'\nmax_size (int): (Optional) maximum number of bytes to read.\n\nReturns:\nlist: list of `class_name` objects deserialized from the stream.", "source": "juraj-google-style"}
{"code": "def unserialize_data(data, compression=False, encryption=False):\n    try:\n        if encryption:\n            data = encryption.decrypt(data)\n    except Exception as err:\n        logger.error(('Decryption Error: ' + str(err)))\n        message = False\n    try:\n        if compression:\n            data = binascii.a2b_base64(data)\n            data = zlib.decompress(data)\n            message = json.loads(data)\n    except Exception as err:\n        logger.error(('Decompression Error: ' + str(err)))\n        message = False\n    decoded_message = data.decode()\n    if ((not encryption) and (not compression)):\n        message = json.loads(decoded_message)\n    return message", "docstring": "Unserializes the packet data and converts it from json format to normal\nPython datatypes.\n\nIf you choose to enable encryption and/or compression when serializing\ndata, you MUST enable the same options when unserializing data.\n\nArgs:\ndata (str): The raw, serialized packet data delivered from the transport\nprotocol.\ncompression (boolean): True or False value on whether or not to\nuncompress the serialized data.\nencryption (rsa.encryption): An encryption instance used to decrypt the\nmessage if encryption is desired.\n\nReturns:\nThe message unserialized in normal Python datatypes.", "source": "codesearchnet"}
{"code": "def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Tuple]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, XLMProphetNetSeq2SeqModelOutput]:\n    use_cache = use_cache if use_cache is not None else self.config.use_cache\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    if encoder_outputs is None:\n        encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n    decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, return_dict=return_dict)\n    if not return_dict:\n        return decoder_outputs + encoder_outputs\n    return XLMProphetNetSeq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, last_hidden_state_ngram=decoder_outputs.last_hidden_state_ngram, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_ngram_hidden_states=decoder_outputs.hidden_states_ngram, decoder_attentions=decoder_outputs.attentions, decoder_ngram_attentions=decoder_outputs.ngram_attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, XLMProphetNetModel\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"patrickvonplaten/xprophetnet-large-uncased-standalone\")\n>>> model = XLMProphetNetModel.from_pretrained(\"patrickvonplaten/xprophetnet-large-uncased-standalone\")\n\n>>> input_ids = tokenizer(\n...     \"Studies have been shown that owning a dog is good for you\", return_tensors=\"pt\"\n... ).input_ids  # Batch size 1\n>>> decoder_input_ids = tokenizer(\"Studies show that\", return_tensors=\"pt\").input_ids  # Batch size 1\n>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)\n\n>>> last_hidden_states = outputs.last_hidden_state  # main stream hidden states\n>>> last_hidden_states_ngram = outputs.last_hidden_state_ngram  # predict hidden states\n```", "source": "github-repos"}
{"code": "def bootstrap(score_objs, n_boot=1000):\n    all_samples = np.random.choice(score_objs, size=(n_boot, len(score_objs)), replace=True)\n    return all_samples.sum(axis=1)", "docstring": "Given a set of DistributedROC or DistributedReliability objects, this function performs a\nbootstrap resampling of the objects and returns n_boot aggregations of them.\n\nArgs:\nscore_objs: A list of DistributedROC or DistributedReliability objects. Objects must have an __add__ method\nn_boot (int): Number of bootstrap samples\n\nReturns:\nAn array of DistributedROC or DistributedReliability", "source": "codesearchnet"}
{"code": "def __init__(self, client_id, client_secret):\n        \n        self.box_request = BoxRestRequest(client_id, client_secret)\n        self.client_id = client_id\n        self.client_secret = client_secret", "docstring": "Constructor\n\nArgs:\nclient_id (str): Client ID provided by Box.\n\nclient_secret (str): Client Secret provided by Box.", "source": "juraj-google-style"}
{"code": "def delete_resource_view(self, resource_view):\n        \n        \n        if isinstance(resource_view, str):\n            if is_valid_uuid(resource_view) is False:\n                raise HDXError('%s is not a valid resource view id!' % resource_view)\n            resource_view = ResourceView({'id': resource_view}, configuration=self.configuration)\n        else:\n            resource_view = self._get_resource_view(resource_view)\n            if 'id' not in resource_view:\n                found = False\n                title = resource_view.get('title')\n                for rv in self.get_resource_views():\n                    if resource_view['title'] == rv['title']:\n                        resource_view = rv\n                        found = True\n                        break\n                if not found:\n                    raise HDXError('No resource views have title %s in this resource!' % title)\n        resource_view.delete_from_hdx()", "docstring": "Delete a resource view from the resource and HDX\n\nArgs:\nresource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def remove_vtep(self, name, vtep, vlan=None):\n        \n        if not vlan:\n            cmd = 'vxlan flood vtep remove {}'.format(vtep)\n        else:\n            cmd = 'vxlan vlan {} flood vtep remove {}'.format(vlan, vtep)\n        return self.configure_interface(name, cmd)", "docstring": "Removes a VTEP endpoint from the global or local flood list\n\nEosVersion:\n4.13.7M\n\nArgs:\nname (str): The name of the interface to configure\nvtep (str): The IP address of the remote VTEP endpoint to add\nvlan (str): The VLAN ID associated with this VTEP.  If the VLAN\nkeyword is used, then the VTEP is configured as a local flood\nendpoing\n\nReturns:\nTrue if the command completes successfully", "source": "juraj-google-style"}
{"code": "def load_index(self, filename, reindex=False):\n    self._reset_index()\n    with open(filename, 'r') as fobj:\n        data = json.load(fobj)\n    for (path, file) in data.items():\n        (ents, domains) = (file['entities'], file['domains'])\n        (root, f) = (dirname(path), basename(path))\n        if reindex:\n            self._index_file(root, f, domains)\n        else:\n            f = self._make_file_object(root, f)\n            tags = {k: Tag(self.entities[k], v) for (k, v) in ents.items()}\n            f.tags = tags\n            self.files[f.path] = f\n            for (ent, val) in f.entities.items():\n                self.entities[ent].add_file(f.path, val)", "docstring": "Load the Layout's index from a plaintext file.\n\nArgs:\nfilename (str): Path to the plaintext index file.\nreindex (bool): If True, discards entity values provided in the\nloaded index and instead re-indexes every file in the loaded\nindex against the entities defined in the config. Default is\nFalse, in which case it is assumed that all entity definitions\nin the loaded index are correct and do not need any further\nvalidation.\n\nNote: At the moment, directory-specific config files aren't serialized.\nThis means reconstructed indexes will only work properly in cases\nwhere there aren't multiple layout specs within a project.", "source": "codesearchnet"}
{"code": "def delete_snl(self, snl_ids):\n        \n        try:\n            payload = {\"ids\": json.dumps(snl_ids)}\n            response = self.session.post(\n                \"{}/snl/delete\".format(self.preamble), data=payload)\n\n            if response.status_code in [200, 400]:\n                resp = json.loads(response.text, cls=MontyDecoder)\n                if resp[\"valid_response\"]:\n                    if resp.get(\"warning\"):\n                        warnings.warn(resp[\"warning\"])\n                    return resp\n                else:\n                    raise MPRestError(resp[\"error\"])\n\n            raise MPRestError(\"REST error with status code {} and error {}\"\n                              .format(response.status_code, response.text))\n\n        except Exception as ex:\n            raise MPRestError(str(ex))", "docstring": "Delete earlier submitted SNLs.\n\n.. note::\n\nAs of now, this MP REST feature is open only to a select group of\nusers. Opening up submissions to all users is being planned for\nthe future.\n\nArgs:\nsnl_ids: List of SNL ids.\n\nRaises:\nMPRestError", "source": "juraj-google-style"}
{"code": "def save(self, branch, commit_message, **kwargs):\n    self.branch = branch\n    self.commit_message = commit_message\n    self.file_path = self.file_path.replace('/', '%2F')\n    super(ProjectFile, self).save(**kwargs)", "docstring": "Save the changes made to the file to the server.\n\nThe object is updated to match what the server returns.\n\nArgs:\nbranch (str): Branch in which the file will be updated\ncommit_message (str): Message to send with the commit\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabUpdateError: If the server cannot perform the request", "source": "codesearchnet"}
{"code": "def parse(self, filepath, content):\n    try:\n        parsed = yaml.load(content)\n    except yaml.YAMLError as exc:\n        msg = 'No YAML object could be decoded from file: {}\\n{}'\n        raise SettingsBackendError(msg.format(filepath, exc))\n    return parsed", "docstring": "Parse opened settings content using YAML parser.\n\nArgs:\nfilepath (str): Settings object, depends from backend\ncontent (str): Settings content from opened file, depends from\nbackend.\n\nRaises:\nboussole.exceptions.SettingsBackendError: If parser can not decode\na valid YAML object.\n\nReturns:\ndict: Dictionnary containing parsed setting elements.", "source": "codesearchnet"}
{"code": "def find_centroid_alleles(alleles, bp=28, t=0.025):\n    centroid_alleles = set()\n    len_allele = group_alleles_by_size(alleles)\n    for (length, seqs) in len_allele.items():\n        if (len(seqs) == 1):\n            centroid_alleles.add(seqs[0])\n            continue\n        seq_arr = seq_int_arr(seqs)\n        starts_ends_idxs = group_alleles_by_start_end_Xbp(seq_arr, bp=bp)\n        for (k, idxs) in starts_ends_idxs.items():\n            if (len(idxs) == 1):\n                centroid_alleles.add(seqs[idxs[0]])\n                continue\n            seq_arr_subset = seq_arr[idxs]\n            dists = pdist(seq_arr_subset, 'hamming')\n            cl = allele_clusters(dists, t=t)\n            dm_sq = squareform(dists)\n            for (cl_key, cl_idxs) in cl.items():\n                if ((len(cl_idxs) == 1) or (len(cl_idxs) == 2)):\n                    centroid_alleles.add(seq_int_arr_to_nt(seq_arr_subset[cl_idxs[0]]))\n                    continue\n                dm_sub = dm_subset(dm_sq, cl_idxs)\n                min_idx = min_row_dist_sum_idx(dm_sub)\n                centroid_alleles.add(seq_int_arr_to_nt(seq_arr_subset[min_idx]))\n    return centroid_alleles", "docstring": "Reduce list of alleles to set of centroid alleles based on size grouping, ends matching and hierarchical clustering\n\nWorkflow for finding centroid alleles:\n\n- grouping by size (e.g. 100bp, 101bp, 103bp, etc)\n- then grouped by `bp` nucleotides at ends matching\n- size and ends grouped alleles hierarchically clustered (Hamming distance, complete linkage)\n- tree cutting at threshold `t`\n- select allele with minimum distance to other alleles in cluster as centroid\n\nArgs:\nalleles (iterable): collection of allele nucleotide sequences\nbp (int): number of bp matching at allele ends for size grouping (default=28 due to default blastn megablast word size)\nt (float): cluster generation (tree cutting) distance threshold for size grouped alleles\n\nReturns:\nset of str: centroid alleles", "source": "codesearchnet"}
{"code": "def _get_reference_classnames(self, classname, namespace, resultclass_name, role):\n    self._validate_namespace(namespace)\n    result_classes = self._classnamedict(resultclass_name, namespace)\n    rtn_classnames_set = set()\n    role = (role.lower() if role else role)\n    for cl in self._get_association_classes(namespace):\n        for prop in six.itervalues(cl.properties):\n            if ((prop.type == 'reference') and self._ref_prop_matches(prop, classname, cl.classname, result_classes, role)):\n                rtn_classnames_set.add(cl.classname)\n    return list(rtn_classnames_set)", "docstring": "Get list of classnames that are references for which this classname\nis a target filtered by the result_class and role parameters if they\nare none.\nThis is a common method used by all of the other reference and\nassociator methods to create a list of reference classnames\n\nReturns:\nlist of classnames that satisfy the criteria.", "source": "codesearchnet"}
{"code": "def __init__(self, error_name, error_id, error_msg, token_value):\n    self.error_name = error_name\n    self.error_id = error_id\n    self.error_msg = error_msg\n    self._token_value = token_value", "docstring": "Create a LexerError that matches |token_value|.\n\nArgs:\nerror_name: A short, human readable name for the error,\nusing lowercase-with-dashes-format.\nerror_id: An integer to identify a specific error:\n100s: Lexer errors.\n200s: Low level parsing errors.\n300s: High level parsing errors.\nerror_msg: A message to display with this error that describes\nclearly what caused the error.\ntoken_value: A string to match against the token that the lexer\nfailed at (or None to match against every token).\n\nReturns:\nLexerError that matches against |token_value|.", "source": "github-repos"}
{"code": "def nuc_p(msg):\n    \n    tc = typecode(msg)\n\n    if typecode(msg) < 5 or typecode(msg) > 22:\n        raise RuntimeError(\n            \"%s: Not a surface position message (5<TC<8), \\\n            airborne position message (8<TC<19), \\\n            or airborne position with GNSS height (20<TC<22)\" % msg\n        )\n\n    try:\n        NUCp = uncertainty.TC_NUCp_lookup[tc]\n        HPL = uncertainty.NUCp[NUCp]['HPL']\n        RCu = uncertainty.NUCp[NUCp]['RCu']\n        RCv = uncertainty.NUCp[NUCp]['RCv']\n    except KeyError:\n        HPL, RCu, RCv = uncertainty.NA, uncertainty.NA, uncertainty.NA\n\n\n    if tc in [20, 21]:\n        RCv = uncertainty.NA\n\n    return HPL, RCu, RCv", "docstring": "Calculate NUCp, Navigation Uncertainty Category - Position (ADS-B version 1)\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string,\n\nReturns:\nint: Horizontal Protection Limit\nint: 95% Containment Radius - Horizontal (meters)\nint: 95% Containment Radius - Vertical (meters)", "source": "juraj-google-style"}
{"code": "def _update_in_hdx(self, object_type, id_field_name, file_to_upload=None, **kwargs):\n    self._check_load_existing_object(object_type, id_field_name)\n    self._merge_hdx_update(object_type, id_field_name, file_to_upload, **kwargs)", "docstring": "Helper method to check if HDX object exists in HDX and if so, update it\n\nArgs:\nobject_type (str): Description of HDX object type (for messages)\nid_field_name (str): Name of field containing HDX object identifier\nfile_to_upload (Optional[str]): File to upload to HDX\n**kwargs: See below\noperation (string): Operation to perform eg. patch. Defaults to update.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return self.prefix_tokens + token_ids_0 + self.suffix_tokens\n    return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. The special tokens depend on calling set_lang.\n\nAn NLLB sequence has the following format, where `X` represents the sequence:\n\n- `input_ids` (for encoder) `X [eos, src_lang_code]`\n- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`\n\nBOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a\nseparator.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def print_stack_events(self):\n    first_token = '7be7981bd6287dd8112305e8f3822a6f'\n    keep_going = True\n    next_token = first_token\n    current_request_token = None\n    rows = []\n    try:\n        while (keep_going and next_token):\n            if (next_token == first_token):\n                response = self._cf_client.describe_stack_events(StackName=self._stack_name)\n            else:\n                response = self._cf_client.describe_stack_events(StackName=self._stack_name, NextToken=next_token)\n            next_token = response.get('NextToken', None)\n            for event in response['StackEvents']:\n                row = []\n                event_time = event.get('Timestamp')\n                request_token = event.get('ClientRequestToken', 'unknown')\n                if (current_request_token is None):\n                    current_request_token = request_token\n                elif (current_request_token != request_token):\n                    keep_going = False\n                    break\n                row.append(event_time.strftime('%x %X'))\n                row.append(event.get('LogicalResourceId'))\n                row.append(event.get('ResourceStatus'))\n                row.append(event.get('ResourceStatusReason', ''))\n                rows.append(row)\n        if (len(rows) > 0):\n            print('\\nEvents for the current upsert:')\n            print(tabulate(rows, headers=['Time', 'Logical ID', 'Status', 'Message']))\n            return True\n        else:\n            print('\\nNo stack events found\\n')\n    except Exception as wtf:\n        print(wtf)\n    return False", "docstring": "List events from the given stack\n\nArgs:\nNone\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def ssh(container, cmd='', user='root', password='root'):\n    \n    ip = get_ip(container)\n    ssh_cmd = 'sshpass -p \\'%s\\' ssh -A -t -o StrictHostKeyChecking=no \\'%s\\'@%s' % (password, user, ip)\n    local('ssh -A -t -o StrictHostKeyChecking=no -i \"%s\" %s@%s %s %s' % (\n        env.key_filename, env.user, env.host, ssh_cmd, cmd))", "docstring": "SSH into a running container, using the host as a jump host. This requires\nthe container to have a running sshd process.\n\nArgs:\n* container: Container name or ID\n* cmd='': Command to run in the container\n* user='root': SSH username\n* password='root': SSH password", "source": "juraj-google-style"}
{"code": "def targets(self):\n    return self._targets", "docstring": "Return the unique names of ops to run.\n\nReturns:\nA list of strings.", "source": "github-repos"}
{"code": "def switch_to_window(page_class, webdriver):\n        \n        window_list = list(webdriver.window_handles)\n        original_window = webdriver.current_window_handle\n        for window_handle in window_list:\n            webdriver.switch_to_window(window_handle)\n            try:\n                return PageFactory.create_page(page_class, webdriver)\n            except:\n                pass\n\n        webdriver.switch_to_window(original_window)\n        raise WindowNotFoundError(\n            u(\"Window {0} not found.\").format(page_class.__class__.__name__))", "docstring": "Utility method for switching between windows.  It will search through currently open\nwindows, then switch to the window matching the provided PageObject class.\n\nArgs:\npage_class (PageObject): Page class to search for/instantiate.\nwebdriver (WebDriver): Selenium webdriver.\n\nUsage::\n\nWebUtils.switch_to_window(DetailsPopUpPage, driver) # switches to the pop up window.", "source": "juraj-google-style"}
{"code": "def set(self, key, value, **kwargs):\n        \n        path = '%s/%s' % (self.path, key.replace('/', '%2F'))\n        data = {'value': value}\n        server_data = self.gitlab.http_put(path, post_data=data, **kwargs)\n        return self._obj_cls(self, server_data)", "docstring": "Create or update the object.\n\nArgs:\nkey (str): The key of the object to create/update\nvalue (str): The value to set for the object\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabSetError: If an error occured\n\nReturns:\nobj: The created/updated attribute", "source": "juraj-google-style"}
{"code": "def write_fasta_file(self, outfile, force_rerun=False):\n        \n        if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n            SeqIO.write(self, outfile, \"fasta\")\n\n        \n        self.sequence_path = outfile", "docstring": "Write a FASTA file for the protein sequence, ``seq`` will now load directly from this file.\n\nArgs:\noutfile (str): Path to new FASTA file to be written to\nforce_rerun (bool): If an existing file should be overwritten", "source": "juraj-google-style"}
{"code": "def client(self):\n    if self.proxy:\n        proxyhandler = urllib.ProxyHandler({'http': self.proxy})\n        opener = urllib.build_opener(proxyhandler)\n        urllib.install_opener(opener)\n        transport = ProxyTransport()\n    if (not hasattr(self, '_client')):\n        transport = None\n        if self.pypi:\n            if self.proxy:\n                logger.info('Using provided proxy: {0}.'.format(self.proxy))\n            self._client = xmlrpclib.ServerProxy(settings.PYPI_URL, transport=transport)\n            self._client_set = True\n        else:\n            self._client = None\n    return self._client", "docstring": "XMLRPC client for PyPI. Always returns the same instance.\n\nIf the package is provided as a path to compressed source file,\nPyPI will not be used and the client will not be instantiated.\n\nReturns:\nXMLRPC client for PyPI or None.", "source": "codesearchnet"}
{"code": "def psq2(d1, d2):\n    \n    d1, d2 = flatten(d1), flatten(d2)\n\n    def f(p):\n        return sum((p ** 2) * np.nan_to_num(np.log(p * len(p))))\n\n    return abs(f(d1) - f(d2))", "docstring": "Compute the PSQ2 measure.\n\nArgs:\nd1 (np.ndarray): The first distribution.\nd2 (np.ndarray): The second distribution.", "source": "juraj-google-style"}
{"code": "def myRank(grade, badFormat, year, length):\n    return int((sorted(everyonesAverage(year, badFormat, length), reverse=True).index(grade) + 1))", "docstring": "rank of candidateNumber in year\n\nArguments:\ngrade {int} -- a weighted average for a specific candidate number and year\nbadFormat {dict} -- candNumber : [results for candidate]\nyear {int} -- year you are in\nlength {int} -- length of each row in badFormat divided by 2\n\n\n\nReturns:\nint -- rank of candidateNumber in year", "source": "codesearchnet"}
{"code": "def conversations_replies(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:\n        \n        kwargs.update({\"channel\": channel, \"ts\": ts})\n        return self.api_call(\"conversations.replies\", http_verb=\"GET\", params=kwargs)", "docstring": "Retrieve a thread of messages posted to a conversation\n\nArgs:\nchannel (str): Conversation ID to fetch thread from. e.g. 'C1234567890'\nts (str): Unique identifier of a thread's parent message. e.g. '1234567890.123456'", "source": "juraj-google-style"}
{"code": "def __init__(self, scope, parent, name, result, args=None, paren=False):\n        \n        CodeExpression.__init__(self, scope, parent, name, result, paren)\n        self.arguments = args or ()", "docstring": "Constructor for operators.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.\nname (str): The name of the operator in the program.\nresult (str): The return type of the operator in the program.\n\nKwargs:\nargs (tuple): Initial tuple of arguments.\nparen (bool): Whether the expression is enclosed in parentheses.", "source": "juraj-google-style"}
{"code": "def NewFromJSON(data):\n        \n        if data.get('shakes', None):\n            shakes = [Shake.NewFromJSON(shk) for shk in data.get('shakes')]\n        else:\n            shakes = None\n\n        return User(\n            id=data.get('id', None),\n            name=data.get('name', None),\n            profile_image_url=data.get('profile_image_url', None),\n            about=data.get('about', None),\n            website=data.get('website', None),\n            shakes=shakes)", "docstring": "Create a new User instance from a JSON dict.\n\nArgs:\ndata (dict): JSON dictionary representing a user.\n\nReturns:\nA User instance.", "source": "juraj-google-style"}
{"code": "def get_backend(self, name=None, **kwargs):\n    backends = self.backends(name, **kwargs)\n    if (len(backends) > 1):\n        raise QiskitBackendNotFoundError('More than one backend matches the criteria')\n    elif (not backends):\n        raise QiskitBackendNotFoundError('No backend matches the criteria')\n    return backends[0]", "docstring": "Return a single backend matching the specified filtering.\n\nArgs:\nname (str): name of the backend.\n**kwargs (dict): dict used for filtering.\n\nReturns:\nBaseBackend: a backend matching the filtering.\n\nRaises:\nQiskitBackendNotFoundError: if no backend could be found or\nmore than one backend matches.", "source": "codesearchnet"}
{"code": "def decompress_dir(path):\n    \n    for parent, subdirs, files in os.walk(path):\n        for f in files:\n            decompress_file(os.path.join(parent, f))", "docstring": "Recursively decompresses all files in a directory.\n\nArgs:\npath (str): Path to parent directory.", "source": "juraj-google-style"}
{"code": "def validate_all_values_for_key(obj, key, validation_fun):\n    \n    for vkey, value in obj.items():\n        if vkey == key:\n            validation_fun(value)\n        elif isinstance(value, dict):\n            validate_all_values_for_key(value, key, validation_fun)", "docstring": "Validate value for all (nested) occurrence  of `key` in `obj`\nusing `validation_fun`.\n\nArgs:\nobj (dict): dictionary object.\nkey (str): key whose value is to be validated.\nvalidation_fun (function): function used to validate the value\nof `key`.\n\nRaises:\nValidationError: `validation_fun` will raise this error on failure", "source": "juraj-google-style"}
{"code": "def be2le_state_by_state(tpm):\n    le = np.empty(tpm.shape)\n    N = tpm.shape[0]\n    n = int(log2(N))\n    for i in range(N):\n        le[(i, :)] = tpm[(be2le(i, n), :)]\n    return le", "docstring": "Convert a state-by-state TPM from big-endian to little-endian or vice\nversa.\n\nArgs:\ntpm (np.ndarray): A state-by-state TPM.\n\nReturns:\nnp.ndarray: The state-by-state TPM in the other indexing format.\n\nExample:\n>>> tpm = np.arange(16).reshape([4, 4])\n>>> be2le_state_by_state(tpm)\narray([[ 0.,  1.,  2.,  3.],\n[ 8.,  9., 10., 11.],\n[ 4.,  5.,  6.,  7.],\n[12., 13., 14., 15.]])", "source": "codesearchnet"}
{"code": "def generate_orders(events, sell_delay=5, sep=','):\n    sell_delay = (float(unicode(sell_delay)) or 1)\n    for (i, (t, row)) in enumerate(events.iterrows()):\n        for (sym, event) in row.to_dict().iteritems():\n            if (event and (not np.isnan(event))):\n                if (event > 0):\n                    sell_event_i = min((i + sell_delay), (len(events) - 1))\n                    sell_event_t = events.index[sell_event_i]\n                    sell_event = events[sym][sell_event_i]\n                    if np.isnan(sell_event):\n                        events[sym][sell_event_t] = (- 1)\n                    else:\n                        events[sym][sell_event_t] += (- 1)\n                order = (t.year, t.month, t.day, sym, ('Buy' if (event > 0) else 'Sell'), (abs(event) * 100))\n                if isinstance(sep, basestring):\n                    (yield sep.join(order))\n                (yield order)", "docstring": "Generate CSV orders based on events indicated in a DataFrame\n\nArguments:\nevents (pandas.DataFrame): Table of NaNs or 1's, one column for each symbol.\n1 indicates a BUY event. -1 a SELL event. nan or 0 is a nonevent.\nsell_delay (float): Number of days to wait before selling back the shares bought\nsep (str or None): if sep is None, orders will be returns as tuples of `int`s, `float`s, and `str`s\notherwise the separator will be used to join the order parameters into the yielded str\n\nReturns:\ngenerator of str: yielded CSV rows in the format (yr, mo, day, symbol, Buy/Sell, shares)", "source": "codesearchnet"}
{"code": "def __query_node(self, ip, host):\n    host = util.shorten_host_name(host, self.config.host_domains)\n    (node, node_updated) = self.__get_known_node(ip, host)\n    if (node == None):\n        node = natlas_node()\n        node.name = host\n        node.ip = [ip]\n        state = NODE_NEW\n    else:\n        if (node.snmpobj.success == 1):\n            return (node, NODE_KNOWN)\n        if (node_updated == 1):\n            state = NODE_NEWIP\n        else:\n            state = NODE_KNOWN\n        node.name = host\n    if (ip == 'UNKNOWN'):\n        return (node, state)\n    if ((ip == '0.0.0.0') | (ip == '')):\n        return (node, state)\n    if (node.try_snmp_creds(self.config.snmp_creds) == 0):\n        return (node, state)\n    node.name = node.get_system_name(self.config.host_domains)\n    if (node.name != host):\n        if (state == NODE_NEW):\n            (node2, node_updated2) = self.__get_known_node(ip, host)\n            if ((node2 != None) & (node_updated2 == 0)):\n                return (node, NODE_KNOWN)\n            if (node_updated2 == 1):\n                state = NODE_NEWIP\n    if ((node.name == None) | (node.name == '')):\n        node.name = node.get_ipaddr()\n    node.opts.get_serial = True\n    node.query_node()\n    return (node, state)", "docstring": "Query this node.\nReturn node details and if we already knew about it or if this is a new node.\nDon't save the node to the known list, just return info about it.\n\nArgs:\nip:                 IP Address of the node.\nhost:               Hostname of this known (if known from CDP/LLDP)\n\nReturns:\nnatlas_node:        Node of this object\nint:                NODE_NEW   = Newly discovered node\nNODE_NEWIP = Already knew about this node but not by this IP\nNODE_KNOWN = Already knew about this node", "source": "codesearchnet"}
{"code": "def replace_batch_norm(model):\n    for name, module in model.named_children():\n        if isinstance(module, nn.BatchNorm2d):\n            new_module = DetaFrozenBatchNorm2d(module.num_features)\n            if not module.weight.device == torch.device('meta'):\n                new_module.weight.data.copy_(module.weight)\n                new_module.bias.data.copy_(module.bias)\n                new_module.running_mean.data.copy_(module.running_mean)\n                new_module.running_var.data.copy_(module.running_var)\n            model._modules[name] = new_module\n        if len(list(module.children())) > 0:\n            replace_batch_norm(module)", "docstring": "Recursively replace all `torch.nn.BatchNorm2d` with `DetaFrozenBatchNorm2d`.\n\nArgs:\nmodel (torch.nn.Module):\ninput model", "source": "github-repos"}
{"code": "def locate_module(module_id: str, module_type: str=None):\n    entry_point = None\n    if module_type:\n        entry_point = ('ehforwarderbot.%s' % module_type)\n    module_id = module_id.split('\n    if entry_point:\n        for i in pkg_resources.iter_entry_points(entry_point):\n            if (i.name == module_id):\n                return i.load()\n    return pydoc.locate(module_id)", "docstring": "Locate module by module ID\n\nArgs:\nmodule_id: Module ID\nmodule_type: Type of module, one of ``'master'``, ``'slave'`` and ``'middleware'``", "source": "codesearchnet"}
{"code": "def change_t(self, t):\n        \n        t = super().change_t(t)\n        self.__now_cycles += 1\n        if self.__now_cycles % self.__reannealing_per == 0:\n            t = t * self.__thermostat\n            \n            if t < self.__t_min:\n                t = self.__t_default\n        return t", "docstring": "Change temperature.\n\nOverride.\n\nArgs:\nt:    Now temperature.\n\nReturns:\nNext temperature.", "source": "juraj-google-style"}
{"code": "def set_parameter_vector(self, vector, include_frozen=False):\n        \n        v = self.parameter_vector\n        if include_frozen:\n            v[:] = vector\n        else:\n            v[self.unfrozen_mask] = vector\n        self.parameter_vector = v\n        self.dirty = True", "docstring": "Set the parameter values to the given vector\n\nArgs:\nvector (array[vector_size] or array[full_size]): The target\nparameter vector. This must be in the same order as\n``parameter_names`` and it should only include frozen\nparameters if ``include_frozen`` is ``True``.\ninclude_frozen (Optional[bool]): Should the frozen parameters be\nincluded in the returned value? (default: ``False``)", "source": "juraj-google-style"}
{"code": "def _pearson_correlation(self, imgs_to_decode):\n    (x, y) = (imgs_to_decode.astype(float), self.feature_images.astype(float))\n    return self._xy_corr(x, y)", "docstring": "Decode images using Pearson's r.\n\nComputes the correlation between each input image and each feature\nimage across voxels.\n\nArgs:\nimgs_to_decode: An ndarray of images to decode, with voxels in rows\nand images in columns.\n\nReturns:\nAn n_features x n_images 2D array, with each cell representing the\npearson correlation between the i'th feature and the j'th image\nacross all voxels.", "source": "codesearchnet"}
{"code": "def process(self, tensor):\n        \n        for processor in self.preprocessors:\n            tensor = processor.process(tensor=tensor)\n        return tensor", "docstring": "Process state.\n\nArgs:\ntensor: tensor to process\n\nReturns: processed state", "source": "juraj-google-style"}
{"code": "def substitute(self, var_map, cont=False, tag=None):\n        \n        return self.apply(substitute, var_map=var_map, cont=cont, tag=tag)", "docstring": "Substitute sub-expressions both on the lhs and rhs\n\nArgs:\nvar_map (dict): Dictionary with entries of the form\n``{expr: substitution}``", "source": "juraj-google-style"}
{"code": "def to_sql(self, view: views.View, limit: Optional[int]=None) -> str:\n    sql_generator = self._build_sql_generator(view)\n    sql_statement = sql_generator.build_sql_statement()\n    view_table_name = f'{self._value_set_codes_table.project}.{self._value_set_codes_table.dataset_id}.{self._value_set_codes_table.table_id}'\n    valuesets_clause = sql_generator.build_valueset_expression(view_table_name)\n    if limit is not None and limit < 1:\n        raise ValueError('Query limits must be positive integers.')\n    limit_clause = '' if limit is None else f' LIMIT {limit}'\n    return f'{valuesets_clause}{sql_statement}{limit_clause}'", "docstring": "Returns the SQL used to run the given view in BigQuery.\n\nArgs:\nview: the view used to generate the SQL.\nlimit: optional limit to attach to the generated SQL.\n\nReturns:\nThe SQL used to run the given view.", "source": "github-repos"}
{"code": "def etm_supported(self):\n        \n        res = self._dll.JLINKARM_ETM_IsPresent()\n        if (res == 1):\n            return True\n\n        \n        \n        \n        info = ctypes.c_uint32(0)\n        index = enums.JLinkROMTable.ETM\n        res = self._dll.JLINKARM_GetDebugInfo(index, ctypes.byref(info))\n        if (res == 1):\n            return False\n\n        return True", "docstring": "Returns if the CPU core supports ETM.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\n``True`` if the CPU has the ETM unit, otherwise ``False``.", "source": "juraj-google-style"}
{"code": "def pred(scores: jax.Array, rows: jax.Array, cols: jax.Array, N: int) -> jax.Array:\n    r: jax.Array = 2 * jax.ops.segment_sum(scores.take(cols), rows, N) - scores.sum()\n    return r > 0", "docstring": "Predicts the target output from the learned scores and input entries.\n\nArgs:\nscores (jax.Array): Contribution scores of features.\nrows (jax.Array): Row indices of True values in the input.\ncols (jax.Array): Column indices of True values in the input.\nN (int): The number of input entries.\n\nReturns:\nres (jax.Array): A prediction of the target.", "source": "github-repos"}
{"code": "def convert_source_tokens_to_target_tokens(self, input_ids, source_tokenizer, destination_tokenizer):\n    text = source_tokenizer.batch_decode(input_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n    dest_ids = destination_tokenizer(text, add_special_tokens=True, return_tensors='pt')['input_ids']\n    return dest_ids.to(input_ids.device)", "docstring": "Convert token IDs from one tokenizer to another.\nArgs:\ninput_ids: The input token IDs.\nsource_tokenizer: The source tokenizer.\ndestination_tokenizer: The destination tokenizer.\nReturns:\nThe converted token IDs.", "source": "github-repos"}
{"code": "def _CreatePerformanceTarget(client, campaign_group_id):\n    cgpt_service = client.GetService('CampaignGroupPerformanceTargetService', version='v201809')\n    operations = [{'operator': 'ADD', 'operand': {'campaignGroupId': campaign_group_id, 'performanceTarget': {'efficiencyTargetType': 'CPC_LESS_THAN_OR_EQUAL_TO', 'efficiencyTargetValue': 3000000, 'spendTargetType': 'MAXIMUM', 'spendTarget': {'microAmount': 500000000}, 'volumeGoalType': 'MAXIMIZE_CLICKS', 'volumeTargetValue': 3000, 'startDate': datetime.datetime.now().strftime('%Y%m%d'), 'endDate': (datetime.datetime.now() + datetime.timedelta(90)).strftime('%Y%m%d')}}}]\n    cgpt = cgpt_service.mutate(operations)['value'][0]\n    print(('Campaign performance target with ID \"%d\" was added for campaign group ID \"%d\".' % (cgpt['id'], cgpt['campaignGroupId'])))", "docstring": "Creates a performance target for the campaign group.\n\nArgs:\nclient: an AdWordsClient instance.\ncampaign_group_id: an integer ID for the campaign group.", "source": "codesearchnet"}
{"code": "def create_proxy_api_files(output_files, proxy_module_root, output_dir):\n    for file_path in output_files:\n        module = get_module(os.path.dirname(file_path), output_dir)\n        if not os.path.isdir(os.path.dirname(file_path)):\n            os.makedirs(os.path.dirname(file_path))\n        contents = f'from {proxy_module_root}.{module} import *'\n        with open(file_path, 'w') as fp:\n            fp.write(contents)", "docstring": "Creates __init__.py files in proxy format for the Python API.\n\nArgs:\noutput_files: List of __init__.py file paths to create.\nproxy_module_root: Module root for proxy-import format. If specified, proxy\nfiles with content like `from proxy_module_root.proxy_module import *`\nwill be created to enable import resolution under TensorFlow.\noutput_dir: output API root directory.", "source": "github-repos"}
{"code": "def __init__(self, to_track: Dict):\n    self.to_track = to_track\n    self._seen: Set[str] = set()", "docstring": "This class \"tracks\" a python dictionary by keeping track of which item is accessed.\n\nArgs:\nto_track (Dict): The dictionary we wish to track", "source": "github-repos"}
{"code": "def major_complex(network, state):\n    log.info('Calculating major complex...')\n    result = complexes(network, state)\n    if result:\n        result = max(result)\n    else:\n        empty_subsystem = Subsystem(network, state, ())\n        result = _null_sia(empty_subsystem)\n    log.info('Finished calculating major complex.')\n    return result", "docstring": "Return the major complex of the network.\n\nArgs:\nnetwork (Network): The |Network| of interest.\nstate (tuple[int]): The state of the network (a binary tuple).\n\nReturns:\nSystemIrreducibilityAnalysis: The |SIA| for the |Subsystem| with\nmaximal |big_phi|.", "source": "codesearchnet"}
{"code": "def show_available_noise_curves(return_curves=True, print_curves=False):\n    if ((return_curves is False) and (print_curves is False)):\n        raise ValueError(('Both return curves and print_curves are False.' + ' You will not see the options'))\n    cfd = os.path.dirname(os.path.abspath(__file__))\n    curves = [curve.split('.')[0] for curve in os.listdir((cfd + '/noise_curves/'))]\n    if print_curves:\n        for f in curves:\n            print(f)\n    if return_curves:\n        return curves\n    return", "docstring": "List available sensitivity curves\n\nThis function lists the available sensitivity curve strings in noise_curves folder.\n\nArgs:\nreturn_curves (bool, optional): If True, return a list of curve options.\nprint_curves (bool, optional): If True, print each curve option.\n\nReturns:\n(optional list of str): List of curve options.\n\nRaises:\nValueError: Both args are False.", "source": "codesearchnet"}
{"code": "def run_processor(\n        processorClass,\n        ocrd_tool=None,\n        mets_url=None,\n        resolver=None,\n        workspace=None,\n        page_id=None,\n        log_level=None,\n        input_file_grp=None,\n        output_file_grp=None,\n        parameter=None,\n        working_dir=None,\n): \n    \n    workspace = _get_workspace(\n        workspace,\n        resolver,\n        mets_url,\n        working_dir\n    )\n    if parameter is not None:\n        if not ':\n            fname = os.path.abspath(parameter)\n        else:\n            fname = workspace.download_url(parameter)\n        with open(fname, 'r') as param_json_file:\n            parameter = json.load(param_json_file)\n    else:\n        parameter = {}\n    log.debug(\"Running processor %s\", processorClass)\n    processor = processorClass(\n        workspace,\n        ocrd_tool=ocrd_tool,\n        page_id=page_id,\n        input_file_grp=input_file_grp,\n        output_file_grp=output_file_grp,\n        parameter=parameter\n    )\n    ocrd_tool = processor.ocrd_tool\n    name = '%s v%s' % (ocrd_tool['executable'], processor.version)\n    otherrole = ocrd_tool['steps'][0]\n    log.debug(\"Processor instance %s (%s doing %s)\", processor, name, otherrole)\n    processor.process()\n    workspace.mets.add_agent(\n        name=name,\n        _type='OTHER',\n        othertype='SOFTWARE',\n        role='OTHER',\n        otherrole=otherrole\n    )\n    workspace.save_mets()\n    return processor", "docstring": "Create a workspace for mets_url and run processor through it\n\nArgs:\nparameter (string): URL to the parameter", "source": "juraj-google-style"}
{"code": "def set_status(self, trial, status):\n        \n        trial.status = status\n        if status in [Trial.TERMINATED, Trial.ERROR]:\n            self.try_checkpoint_metadata(trial)", "docstring": "Sets status and checkpoints metadata if needed.\n\nOnly checkpoints metadata if trial status is a terminal condition.\nPENDING, PAUSED, and RUNNING switches have checkpoints taken care of\nin the TrialRunner.\n\nArgs:\ntrial (Trial): Trial to checkpoint.\nstatus (Trial.status): Status to set trial to.", "source": "juraj-google-style"}
{"code": "def unnest_collection(collection, df_list):\n    \n    for item in collection['link']['item']:\n        if item['class'] == 'dataset':\n            df_list.append(Dataset.read(item['href']).write('dataframe'))\n        elif item['class'] == 'collection':\n            nested_collection = request(item['href'])\n            unnest_collection(nested_collection, df_list)", "docstring": "Unnest collection structure extracting all its datasets and converting \\\nthem to Pandas Dataframes.\n\nArgs:\ncollection (OrderedDict): data in JSON-stat format, previously \\\ndeserialized to a python object by \\\njson.load() or json.loads(),\ndf_list (list): list variable which will contain the converted \\\ndatasets.\n\nReturns:\nNothing.", "source": "juraj-google-style"}
{"code": "def retransmit(self, data):\n        \n\n        \n        \n        if data[\"method\"] == \"REGISTER\":\n            if not self.registered and self.register_retries < self.max_retries:\n                logger.debug(\"<%s> Timeout exceeded. \" % str(self.cuuid) + \\\n                              \"Retransmitting REGISTER request.\")\n                self.register_retries += 1\n                self.register(data[\"address\"], retry=False)\n            else:\n                logger.debug(\"<%s> No need to retransmit.\" % str(self.cuuid))\n\n        if data[\"method\"] == \"EVENT\":\n            if data[\"euuid\"] in self.event_uuids:\n                \n                self.event_uuids[data[\"euuid\"]][\"retry\"] += 1\n\n                if self.event_uuids[data[\"euuid\"]][\"retry\"] > self.max_retries:\n                    logger.debug(\"<%s> Max retries exceeded. Timed out waiting \"\n                                  \"for server for event: %s\" % (data[\"cuuid\"],\n                                                                data[\"euuid\"]))\n                    logger.debug(\"<%s> <euuid:%s> Deleting event from currently \"\n                                  \"processing event uuids\" % (data[\"cuuid\"],\n                                                              str(data[\"euuid\"])))\n                    del self.event_uuids[data[\"euuid\"]]\n                else:\n                    \n                    self.listener.send_datagram(\n                        serialize_data(data, self.compression,\n                                       self.encryption, self.server_key),\n                        self.server)\n\n                    \n                    logger.debug(\"<%s> <euuid:%s> Scheduling to retry in %s \"\n                                  \"seconds\" % (data[\"cuuid\"],\n                                               str(data[\"euuid\"]),\n                                               str(self.timeout)))\n                    self.listener.call_later(\n                        self.timeout, self.retransmit, data)\n            else:\n                logger.debug(\"<%s> <euuid:%s> No need to \"\n                              \"retransmit.\" % (str(self.cuuid),\n                                               str(data[\"euuid\"])))", "docstring": "Processes messages that have been delivered from the transport\nprotocol.\n\nArgs:\ndata (dict): A dictionary containing the packet data to resend.\n\nReturns:\nNone\n\nExamples:\n>>> data\n{'method': 'REGISTER', 'address': ('192.168.0.20', 40080)}", "source": "juraj-google-style"}
{"code": "class DPTFeatureFusionLayer(nn.Module):\n\n    def __init__(self, config, align_corners=True):\n        super().__init__()\n        self.align_corners = align_corners\n        self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True)\n        self.residual_layer1 = DPTPreActResidualLayer(config)\n        self.residual_layer2 = DPTPreActResidualLayer(config)\n\n    def forward(self, hidden_state, residual=None):\n        if residual is not None:\n            if hidden_state.shape != residual.shape:\n                residual = nn.functional.interpolate(residual, size=(hidden_state.shape[2], hidden_state.shape[3]), mode='bilinear', align_corners=False)\n            hidden_state = hidden_state + self.residual_layer1(residual)\n        hidden_state = self.residual_layer2(hidden_state)\n        hidden_state = nn.functional.interpolate(hidden_state, scale_factor=2, mode='bilinear', align_corners=self.align_corners)\n        hidden_state = self.projection(hidden_state)\n        return hidden_state", "docstring": "Feature fusion layer, merges feature maps from different stages.\n\nArgs:\nconfig (`[DPTConfig]`):\nModel configuration class defining the model architecture.\nalign_corners (`bool`, *optional*, defaults to `True`):\nThe align_corner setting for bilinear upsample.", "source": "github-repos"}
{"code": "def proto_refactor_files(dest_dir, namespace, namespace_path):\n    \n    for dn, dns, fns in os.walk(dest_dir):\n        for fn in fns:\n            fn = os.path.join(dn, fn)\n            if fnmatch.fnmatch(fn, '*.proto'):\n                data = proto_refactor(fn, namespace, namespace_path)\n                with open(fn, 'w') as f:\n                    f.write(data)", "docstring": "This method runs the refactoring on all the Protobuf files in the\nDropsonde repo.\n\nArgs:\ndest_dir (str): directory where the Protobuf files lives.\nnamespace (str): the desired package name (i.e. \"dropsonde.py2\")\nnamespace_path (str): the desired path corresponding to the package\nname (i.e. \"dropsonde/py2\")", "source": "juraj-google-style"}
{"code": "def _create(self, monomer, mon_vector):\n        \n        while self.length != (self.n_units-1):\n            if self.linear_chain:\n                move_direction = np.array(mon_vector) / np.linalg.norm(mon_vector)\n            else:\n                move_direction = self._next_move_direction()\n            self._add_monomer(monomer.copy(), mon_vector, move_direction)", "docstring": "create the polymer from the monomer\n\nArgs:\nmonomer (Molecule)\nmon_vector (numpy.array): molecule vector that starts from the\nstart atom index to the end atom index", "source": "juraj-google-style"}
{"code": "def save_as(self, filename: str) -> None:\n    lib.TCOD_image_save(self.image_c, filename.encode('utf-8'))", "docstring": "Save the Image to a 32-bit .bmp or .png file.\n\nArgs:\nfilename (Text): File path to same this Image.", "source": "codesearchnet"}
{"code": "def start(self):\n    if (not self.started):\n        self.started = True\n        self.executor = ThreadPoolExecutor(max_workers=32)\n        self.poller = self.executor.submit(self.poll_events)\n    else:\n        raise IllegalStateError('Dispatcher is already started.')", "docstring": "Starts the event dispatcher.\n\nInitiates executor and start polling events.\n\nRaises:\nIllegalStateError: Can't start a dispatcher again when it's already\nrunning.", "source": "codesearchnet"}
{"code": "def report_fhir_path_warning(self, element_path: str, fhir_path_constraint: str, msg: str) -> None:", "docstring": "Reports a FHIRPath constraint warning during validation and/or encoding.\n\nArgs:\nelement_path: The path to the field that the constraint is defined on.\nfhir_path_constraint: The FHIRPath constraint expression.\nmsg: The warning message produced.", "source": "github-repos"}
{"code": "def color_gen_map(colors: Iterable[Tuple[(int, int, int)]], indexes: Iterable[int]) -> List[Color]:\n    ccolors = ffi.new('TCOD_color_t[]', colors)\n    cindexes = ffi.new('int[]', indexes)\n    cres = ffi.new('TCOD_color_t[]', (max(indexes) + 1))\n    lib.TCOD_color_gen_map(cres, len(ccolors), ccolors, cindexes)\n    return [Color._new_from_cdata(cdata) for cdata in cres]", "docstring": "Return a smoothly defined scale of colors.\n\nIf ``indexes`` is [0, 3, 9] for example, the first color from ``colors``\nwill be returned at 0, the 2nd will be at 3, and the 3rd will be at 9.\nAll in-betweens will be filled with a gradient.\n\nArgs:\ncolors (Iterable[Union[Tuple[int, int, int], Sequence[int]]]):\nArray of colors to be sampled.\nindexes (Iterable[int]): A list of indexes.\n\nReturns:\nList[Color]: A list of Color instances.\n\nExample:\n>>> tcod.color_gen_map([(0, 0, 0), (255, 128, 0)], [0, 5])\n[Color(0, 0, 0), Color(51, 25, 0), Color(102, 51, 0), \\\nColor(153, 76, 0), Color(204, 102, 0), Color(255, 128, 0)]", "source": "codesearchnet"}
{"code": "def from_node(cls, node):\n    if (not isinstance(node, aioxmpp.stanza.Message)):\n        raise AttributeError('node must be a aioxmpp.stanza.Message instance')\n    msg = cls()\n    msg._to = node.to\n    msg._sender = node.from_\n    if (None in node.body):\n        msg.body = node.body[None]\n    else:\n        for key in node.body.keys():\n            msg.body = node.body[key]\n            break\n    for data in node.xep0004_data:\n        if (data.title == SPADE_X_METADATA):\n            for field in data.fields:\n                if (field.var != '_thread_node'):\n                    msg.set_metadata(field.var, field.values[0])\n                else:\n                    msg.thread = field.values[0]\n    return msg", "docstring": "Creates a new spade.message.Message from an aixoxmpp.stanza.Message\n\nArgs:\nnode (aioxmpp.stanza.Message): an aioxmpp Message\n\nReturns:\nspade.message.Message: a new spade Message", "source": "codesearchnet"}
{"code": "def _render_fluent_timestep(self,\n            fluent_type: str,\n            fluents: Sequence[Tuple[str, np.array]],\n            fluent_variables: Sequence[Tuple[str, List[str]]]) -> None:\n        \n        for fluent_pair, variable_list in zip(fluents, fluent_variables):\n            name, fluent = fluent_pair\n            _, variables = variable_list\n            print(name)\n            fluent = fluent.flatten()\n            for variable, value in zip(variables, fluent):\n                print('- {}: {} = {}'.format(fluent_type, variable, value))\n        print()", "docstring": "Prints `fluents` of given `fluent_type` as list of instantiated variables\nwith corresponding values.\n\nArgs:\nfluent_type (str): Fluent type.\nfluents (Sequence[Tuple[str, np.array]]): List of pairs (fluent_name, fluent_values).\nfluent_variables (Sequence[Tuple[str, List[str]]]): List of pairs (fluent_name, args).", "source": "juraj-google-style"}
{"code": "def console_print_ex(\n    con: tcod.console.Console,\n    x: int,\n    y: int,\n    flag: int,\n    alignment: int,\n    fmt: str,\n) -> None:\n    \n    lib.TCOD_console_printf_ex(_console(con), x, y, flag, alignment, _fmt(fmt))", "docstring": "Print a string on a console using a blend mode and alignment mode.\n\nArgs:\ncon (Console): Any Console instance.\nx (int): Character x position from the left.\ny (int): Character y position from the top.\n\n.. deprecated:: 8.5\nUse :any:`Console.print_` instead.", "source": "juraj-google-style"}
{"code": "def bulkWrite(self, endpoint, buffer, timeout = 100):\n        r\n        return self.dev.write(endpoint, buffer, timeout)", "docstring": "r\"\"\"Perform a bulk write request to the endpoint specified.\n\nArguments:\nendpoint: endpoint number.\nbuffer: sequence data buffer to write.\nThis parameter can be any sequence type.\ntimeout: operation timeout in milliseconds. (default: 100)\nReturns the number of bytes written.", "source": "juraj-google-style"}
{"code": "def _recursive_remove_blank_dirs(self, path):\n        \n        path = os.path.abspath(path)\n\n        \n        if path == self.path or len(path) <= len(self.path):\n            return\n\n        \n        if not os.path.exists(path):\n            return self._recursive_remove_blank_dirs(\n                os.path.dirname(path)\n            )\n\n        \n        if os.listdir(path):\n            return\n\n        \n        shutil.rmtree(path)\n\n        \n        return self._recursive_remove_blank_dirs(\n            os.path.dirname(path)\n        )", "docstring": "Make sure, that blank directories are removed from the storage.\n\nArgs:\npath (str): Path which you suspect that is blank.", "source": "juraj-google-style"}
{"code": "def triangle_area(point1, point2, point3):\n    \n\n    \n    a = point_distance(point1, point2)\n    b = point_distance(point1, point3)\n    c = point_distance(point2, point3)\n\n    \n    s = (a + b + c) / 2.0\n\n    \n    return math.sqrt(s * (s - a) * (s - b) * (s - c))", "docstring": "Uses Heron's formula to find the area of a triangle\nbased on the coordinates of three points.\n\nArgs:\npoint1: list or tuple, the x y coordinate of point one.\n\npoint2: list or tuple, the x y coordinate of point two.\n\npoint3: list or tuple, the x y coordinate of point three.\n\nReturns:\nThe area of a triangle as a floating point number.\n\nRequires:\nThe math module, point_distance().", "source": "juraj-google-style"}
{"code": "def grad_dot(dy, x1, x2):\n  \n  if len(numpy.shape(x1)) == 1:\n    dy = numpy.atleast_2d(dy)\n  elif len(numpy.shape(x2)) == 1:\n    dy = numpy.transpose(numpy.atleast_2d(dy))\n    x2 = numpy.transpose(numpy.atleast_2d(x2))\n  x2_t = numpy.transpose(numpy.atleast_2d(\n      numpy.sum(x2, axis=tuple(numpy.arange(numpy.ndim(x2) - 2)))))\n  dy_x2 = numpy.sum(dy, axis=tuple(-numpy.arange(numpy.ndim(x2) - 2) - 2))\n  return numpy.reshape(numpy.dot(dy_x2, x2_t), numpy.shape(x1))", "docstring": "Gradient of NumPy dot product w.r.t. to the left hand side.\n\nArgs:\ndy: The gradient with respect to the output.\nx1: The left hand side of the `numpy.dot` function.\nx2: The right hand side\n\nReturns:\nThe gradient with respect to `x1` i.e. `x2.dot(dy.T)` with all the\nbroadcasting involved.", "source": "juraj-google-style"}
{"code": "def _publish_internal(self, push_messages):\n    import requests\n    response = requests.post(((self.host + self.api_url) + '/push/send'), data=json.dumps([pm.get_payload() for pm in push_messages]), headers={'accept': 'application/json', 'accept-encoding': 'gzip, deflate', 'content-type': 'application/json'})\n    try:\n        response_data = response.json()\n    except ValueError:\n        response.raise_for_status()\n        raise PushServerError('Invalid server response', response)\n    if ('errors' in response_data):\n        raise PushServerError('Request failed', response, response_data=response_data, errors=response_data['errors'])\n    if ('data' not in response_data):\n        raise PushServerError('Invalid server response', response, response_data=response_data)\n    response.raise_for_status()\n    if (len(push_messages) != len(response_data['data'])):\n        raise PushServerError(('Mismatched response length. Expected %d %s but only received %d' % (len(push_messages), ('receipt' if (len(push_messages) == 1) else 'receipts'), len(response_data['data']))), response, response_data=response_data)\n    receipts = []\n    for (i, receipt) in enumerate(response_data['data']):\n        receipts.append(PushResponse(push_message=push_messages[i], status=receipt.get('status', PushResponse.ERROR_STATUS), message=receipt.get('message', ''), details=receipt.get('details', None)))\n    return receipts", "docstring": "Send push notifications\n\nThe server will validate any type of syntax errors and the client will\nraise the proper exceptions for the user to handle.\n\nEach notification is of the form:\n{\n'to': 'ExponentPushToken[xxx]',\n'body': 'This text gets display in the notification',\n'badge': 1,\n'data': {'any': 'json object'},\n}\n\nArgs:\npush_messages: An array of PushMessage objects.", "source": "codesearchnet"}
{"code": "def pubsub_pop_message(self, deadline=None):\n        \n        if not self.subscribed:\n            excep = ClientError(\"you must subscribe before using \"\n                                \"pubsub_pop_message\")\n            raise tornado.gen.Return(excep)\n        reply = None\n        try:\n            reply = self._reply_list.pop(0)\n            raise tornado.gen.Return(reply)\n        except IndexError:\n            pass\n        if deadline is not None:\n            td = timedelta(seconds=deadline)\n            yield self._condition.wait(timeout=td)\n        else:\n            yield self._condition.wait()\n        try:\n            reply = self._reply_list.pop(0)\n        except IndexError:\n            pass\n        raise tornado.gen.Return(reply)", "docstring": "Pops a message for a subscribed client.\n\nArgs:\ndeadline (int): max number of seconds to wait (None => no timeout)\n\nReturns:\nFuture with the popped message as result (or None if timeout\nor ConnectionError object in case of connection errors\nor ClientError object if you are not subscribed)", "source": "juraj-google-style"}
{"code": "def _prepare_for_training(self, job_name=None):\n    if (job_name is not None):\n        self._current_job_name = job_name\n    else:\n        if self.base_job_name:\n            base_name = self.base_job_name\n        elif isinstance(self, sagemaker.algorithm.AlgorithmEstimator):\n            base_name = self.algorithm_arn.split('/')[(- 1)]\n        else:\n            base_name = base_name_from_image(self.train_image())\n        self._current_job_name = name_from_base(base_name)\n    if (self.output_path is None):\n        local_code = get_config_value('local.local_code', self.sagemaker_session.config)\n        if (self.sagemaker_session.local_mode and local_code):\n            self.output_path = ''\n        else:\n            self.output_path = 's3:", "docstring": "Set any values in the estimator that need to be set before training.\n\nArgs:\n* job_name (str): Name of the training job to be created. If not specified, one is generated,\nusing the base name given to the constructor if applicable.", "source": "codesearchnet"}
{"code": "def DeserializeExclusiveData(self, reader):\n        \n        self.Type = TransactionType.StateTransaction\n\n        self.Descriptors = reader.ReadSerializableArray('neo.Core.State.StateDescriptor.StateDescriptor')", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):\n\nRaises:\nException: If the transaction type is incorrect or if there are no claims.", "source": "juraj-google-style"}
{"code": "def __call__(self, raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], padding: Union[bool, str, PaddingStrategy]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, verbose: bool=True, **kwargs) -> BatchEncoding:\n    is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1\n    if is_batched_numpy and len(raw_speech.shape) > 2:\n        raise ValueError(f'Only mono-channel audio is supported for input to {self}')\n    is_batched = is_batched_numpy or (isinstance(raw_speech, (list, tuple)) and isinstance(raw_speech[0], (np.ndarray, tuple, list)))\n    if is_batched and (not isinstance(raw_speech[0], np.ndarray)):\n        raw_speech = [np.asarray(speech) for speech in raw_speech]\n    elif not is_batched and (not isinstance(raw_speech, np.ndarray)):\n        raw_speech = np.asarray(raw_speech)\n    if not is_batched:\n        raw_speech = [raw_speech]\n    if self.do_normalize:\n        raw_speech = [(x - np.mean(x)) / np.sqrt(np.var(x) + 1e-05) for x in raw_speech]\n    encoded_inputs = BatchEncoding({'input_values': raw_speech})\n    padded_inputs = self.pad(encoded_inputs, padding=padding, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=self.return_attention_mask, return_tensors=return_tensors, verbose=verbose)\n    return padded_inputs", "docstring": "Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of\nsequences.\n\nArgs:\nraw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):\nThe sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float\nvalues, a list of numpy array or a list of list of float values. Must be mono channel audio, not\nstereo, i.e. single float per timestep.\n\npadding_side (`str`, *optional*):\nThe side on which the model should have padding applied. Should be selected between ['right', 'left'].\nDefault value is picked from the class attribute of the same name.", "source": "github-repos"}
{"code": "def run(self, args):\n        \n        jlink = self.create_jlink(args)\n        if args.list:\n            print('Built-in Licenses: %s' % ', '.join(jlink.licenses.split(',')))\n            print('Custom Licenses: %s' % ', '.join(jlink.custom_licenses.split(',')))\n        elif args.add is not None:\n            if jlink.add_license(args.add):\n                print('Successfully added license.')\n            else:\n                print('License already exists.')\n        elif args.erase:\n            if jlink.erase_licenses():\n                print('Successfully erased all custom licenses.')\n            else:\n                print('Failed to erase custom licenses.')", "docstring": "Runs the license command.\n\nArgs:\nself (LicenseCommand): the ``LicenseCommand`` instance\nargs (Namespace): the arguments passed on the command-line\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def use_gradient(grad_f):\n    grad_f_name = register_to_random_name(grad_f)\n\n    def function_wrapper(f):\n\n        def inner(*inputs):\n            state = {'out_value': None}\n            out = f(*inputs)\n\n            def store_out(out_value):\n                'Store the value of out to a python variable.'\n                state['out_value'] = out_value\n            store_name = ('store_' + f.__name__)\n            store = tf.py_func(store_out, [out], (), stateful=True, name=store_name)\n\n            def mock_f(*inputs):\n                'Mimic f by retrieving the stored value of out.'\n                return state['out_value']\n            with tf.control_dependencies([store]):\n                with gradient_override_map({'PyFunc': grad_f_name}):\n                    mock_name = ('mock_' + f.__name__)\n                    mock_out = tf.py_func(mock_f, inputs, out.dtype, stateful=True, name=mock_name)\n                    mock_out.set_shape(out.get_shape())\n            return mock_out\n        return inner\n    return function_wrapper", "docstring": "Decorator for easily setting custom gradients for TensorFlow functions.\n\n* DO NOT use this function if you need to serialize your graph.\n* This function will cause the decorated function to run slower.\n\nExample:\n\ndef _foo_grad(op, grad): ...\n\n@use_gradient(_foo_grad)\ndef foo(x1, x2, x3): ...\n\nArgs:\ngrad_f: function to use as gradient.\n\nReturns:\nA decorator to apply to the function you wish to override the gradient of.", "source": "codesearchnet"}
{"code": "def MergeBaseClass(cls, base):\n    bases = tuple((b for b in cls.bases if b != base))\n    bases += tuple((b for b in base.bases if b not in bases))\n    method_names = [m.name for m in cls.methods]\n    methods = cls.methods + tuple((m for m in base.methods if m.name not in method_names))\n    constant_names = [c.name for c in cls.constants]\n    constants = cls.constants + tuple((c for c in base.constants if c.name not in constant_names))\n    class_names = [c.name for c in cls.classes]\n    classes = cls.classes + tuple((c for c in base.classes if c.name not in class_names))\n    decorators = cls.decorators or base.decorators\n    if cls.slots:\n        slots = cls.slots + tuple((s for s in base.slots or () if s not in cls.slots))\n    else:\n        slots = base.slots\n    return pytd.Class(name=cls.name, keywords=cls.keywords or base.keywords, bases=bases, methods=methods, constants=constants, classes=classes, decorators=decorators, slots=slots, template=cls.template or base.template)", "docstring": "Merge a base class into a subclass.\n\nArguments:\ncls: The subclass to merge values into. pytd.Class.\nbase: The superclass whose values will be merged. pytd.Class.\n\nReturns:\na pytd.Class of the two merged classes.", "source": "github-repos"}
{"code": "def check_mailfy(self, query, kwargs={}):\n        \n        import re\n        import requests\n\n        s = requests.Session()\n\n        \n        r1 = s.get(\"https:\n        csrf_token = re.findall(\"csrf_token\", r1.text)[0]\n\n        \n        r2 = s.post(\n            'https:\n            data={\"email\": query},\n            headers={\"X-CSRFToken\": csrf_token}\n        )\n\n        if '{\"email\": [{\"message\": \"Another account is using' in r2.text:\n            return r2.text\n        else:\n            return None", "docstring": "Verifying a mailfy query in this platform.\n\nThis might be redefined in any class inheriting from Platform. The only\ncondition is that any of this should return a dictionary as defined.\n\nArgs:\n-----\nquery: The element to be searched.\nkwargs: Dictionary with extra parameters. Just in case.\n\nReturn:\n-------\nReturns the collected data if exists or None if not.", "source": "juraj-google-style"}
{"code": "def get_frame(self, index=None, onset=None):\n        \n        if onset:\n            index = int(onset * self.fps)\n\n        return super(VideoStim, self).get_frame(index)", "docstring": "Overrides the default behavior by giving access to the onset\nargument.\n\nArgs:\nindex (int): Positional index of the desired frame.\nonset (float): Onset (in seconds) of the desired frame.", "source": "juraj-google-style"}
{"code": "def is_process_running(process_name):\n    \n    is_running = False\n\n    \n    if os.path.isfile('/usr/bin/pgrep'):\n        dev_null = open(os.devnull, 'wb')\n        returncode = subprocess.call(['/usr/bin/pgrep', process_name],\n                                     stdout=dev_null)\n        is_running = bool(returncode == 0)\n\n    return is_running", "docstring": "Check if a process with the given name is running.\n\nArgs:\n(str): Process name, e.g. \"Sublime Text\"\n\nReturns:\n(bool): True if the process is running", "source": "juraj-google-style"}
{"code": "def get_variant_genotypes(self, variant):\n        \n        if not self.has_index:\n            raise NotImplementedError(\"Not implemented when IMPUTE2 file is \"\n                                      \"not indexed (see genipe)\")\n\n        \n        try:\n            impute2_chrom = CHROM_STR_TO_INT[variant.chrom.name]\n        except KeyError:\n            raise ValueError(\n                \"Invalid chromosome ('{}') for IMPUTE2.\".format(variant.chrom)\n            )\n\n        variant_info = self._impute2_index[\n            (self._impute2_index.chrom == impute2_chrom) &\n            (self._impute2_index.pos == variant.pos)\n        ]\n\n        if variant_info.shape[0] == 0:\n            logging.variant_not_found(variant)\n            return []\n\n        elif variant_info.shape[0] == 1:\n            return self._get_biallelic_variant(variant, variant_info)\n\n        else:\n            return self._get_multialleic_variant(variant, variant_info)", "docstring": "Get the genotypes from a well formed variant instance.\n\nArgs:\nmarker (Variant): A Variant instance.\n\nReturns:\nA list of Genotypes instance containing a pointer to the variant as\nwell as a vector of encoded genotypes.", "source": "juraj-google-style"}
{"code": "def post_warning(self, name, message):\n    self.post_command(OPERATIONS.CMD_POST_MESSAGE, _create_message(name, states.WARNING_LEVEL, message))", "docstring": "Asynchronously post a user facing warning message about a service.\n\nArgs:\nname (string): The name of the service\nmessage (string): The user facing warning message that will be stored\nfor the service and can be queried later.", "source": "codesearchnet"}
{"code": "def list(self, container_or_share_name, container=None, account=None):\n        \n        key = self.storage_client.storage_accounts.list_keys(self.resource_group_name, account).keys[0].value\n        if container:\n            bs = BlockBlobService(account_name=account, account_key=key)\n            container_list = []\n            for i in bs.list_blobs(container_or_share_name).items:\n                container_list.append(i.name)\n            return container_list\n        elif not container:\n            fs = FileService(account_name=account, account_key=key)\n            container_list = []\n            for i in fs.list_directories_and_files(container_or_share_name).items:\n                container_list.append(i.name)\n            return container_list\n        else:\n            raise ValueError(\"You have to pass a value for container param\")", "docstring": "List the blobs/files inside a container/share_name.\nArgs:\ncontainer_or_share_name(str): Name of the container/share_name where we want to list the blobs/files.\ncontainer(bool): flag to know it you are listing files or blobs.\naccount(str): The name of the storage account.", "source": "juraj-google-style"}
{"code": "def step1_get_authorize_url(self, redirect_uri=None, state=None):\n    if (redirect_uri is not None):\n        logger.warning('The redirect_uri parameter for OAuth2WebServerFlow.step1_get_authorize_url is deprecated. Please move to passing the redirect_uri in via the constructor.')\n        self.redirect_uri = redirect_uri\n    if (self.redirect_uri is None):\n        raise ValueError('The value of redirect_uri must not be None.')\n    query_params = {'client_id': self.client_id, 'redirect_uri': self.redirect_uri, 'scope': self.scope}\n    if (state is not None):\n        query_params['state'] = state\n    if (self.login_hint is not None):\n        query_params['login_hint'] = self.login_hint\n    if self._pkce:\n        if (not self.code_verifier):\n            self.code_verifier = _pkce.code_verifier()\n        challenge = _pkce.code_challenge(self.code_verifier)\n        query_params['code_challenge'] = challenge\n        query_params['code_challenge_method'] = 'S256'\n    query_params.update(self.params)\n    return _helpers.update_query_params(self.auth_uri, query_params)", "docstring": "Returns a URI to redirect to the provider.\n\nArgs:\nredirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob'\nfor a non-web-based application, or a URI that\nhandles the callback from the authorization server.\nThis parameter is deprecated, please move to passing\nthe redirect_uri in via the constructor.\nstate: string, Opaque state string which is passed through the\nOAuth2 flow and returned to the client as a query parameter\nin the callback.\n\nReturns:\nA URI as a string to redirect the user to begin the authorization\nflow.", "source": "codesearchnet"}
{"code": "def setup_service(api_name, api_version, credentials=None):\n  \n  if not credentials:\n    credentials = oauth2client.client.GoogleCredentials.get_application_default(\n    )\n  return apiclient.discovery.build(\n      api_name, api_version, credentials=credentials)", "docstring": "Configures genomics API client.\n\nArgs:\napi_name: Name of the Google API (for example: \"genomics\")\napi_version: Version of the API (for example: \"v2alpha1\")\ncredentials: Credentials to be used for the gcloud API calls.\n\nReturns:\nA configured Google Genomics API client with appropriate credentials.", "source": "juraj-google-style"}
{"code": "def flatten(self, max_value: int) -> FrozenSet[int]:\n        \n        return frozenset(self.iter(max_value))", "docstring": "Return a set of all values contained in the sequence set.\n\nArgs:\nmax_value: The maximum value, in place of any ``*``.", "source": "juraj-google-style"}
{"code": "def recoverURL(self, url):\n        \n        \n        self.setUserAgent()\n\n        \n        if \"https:\n            self.setProxy(protocol = \"https\")\n        else:\n            self.setProxy(protocol = \"http\")\n\n        \n        if \".onion\" in url:\n            try:\n                \n                pass\n            except:\n                \n                \n                pass\n            url = url.replace(\".onion\", \".onion.cab\")\n\n        \n        try:\n            recurso = self.br.open(url)\n        except:\n            \n            return None\n\n        html = recurso.read()\n\n        return html", "docstring": "Public method to recover a resource.\n\nArgs:\n-----\nurl: The URL to be collected.\n\nReturns:\n--------\nReturns a resource that has to be read, for instance, with html = self.br.read()", "source": "juraj-google-style"}
{"code": "def print_type(self, t, literal=False) -> str:", "docstring": "Returns a string of the type of t.\n\nFor example, if t is `0`, then this method returns \"int\" with literal=False\nor `Literal[0]` with literal=True.\n\nArgs:\nt: An abstract value.\nliteral: Whether to print literals literally.", "source": "github-repos"}
{"code": "def render_table(data, headers=None):\n    \n    builder = HtmlBuilder()\n    builder._render_objects(data, headers, datatype='dict')\n    return builder._to_html()", "docstring": "Return a dictionary list formatted as a HTML table.\n\nArgs:\ndata: a list of dictionaries, one per row.\nheaders: the keys in the dictionary to use as table columns, in order.", "source": "juraj-google-style"}
{"code": "def run_benchmarks(benchmark_suite, verbose=True):\n\n    def run(benchmark: BenchmarkFactoryFn, size: int):\n        benchmark_instance_callable = benchmark(size)\n        start = time.time()\n        _ = benchmark_instance_callable()\n        return time.time() - start\n    cost_series = collections.defaultdict(list)\n    size_series = collections.defaultdict(list)\n    for benchmark_config in benchmark_suite:\n        name = str(benchmark_config)\n        num_runs = benchmark_config.num_runs\n        if isinstance(benchmark_config, LinearRegressionBenchmarkConfig):\n            size = benchmark_config.starting_point\n            step = benchmark_config.increment\n        else:\n            assert isinstance(benchmark_config, BenchmarkConfig)\n            size = benchmark_config.size\n            step = 0\n        for run_id in range(num_runs):\n            gc.collect()\n            time_cost = run(benchmark_config.benchmark, size)\n            cost_series[name].append(time_cost)\n            size_series[name].append(size)\n            if verbose:\n                per_element_cost = time_cost / size\n                print('%s: run %d of %d, per element time cost: %g sec' % (name, run_id + 1, num_runs, per_element_cost))\n            size += step\n        if verbose:\n            print('')\n    if verbose:\n        pad_length = max([len(str(bc)) for bc in benchmark_suite])\n        for benchmark_config in benchmark_suite:\n            name = str(benchmark_config)\n            if isinstance(benchmark_config, LinearRegressionBenchmarkConfig):\n                from scipy import stats\n                print()\n                gradient, intercept, r_value, p_value, std_err = stats.linregress(size_series[name], cost_series[name])\n                print('Fixed cost  ', intercept)\n                print('Per-element ', gradient)\n                print('R^2         ', r_value ** 2)\n            else:\n                assert isinstance(benchmark_config, BenchmarkConfig)\n                per_element_median_cost = numpy.median(cost_series[name]) / benchmark_config.size\n                std = numpy.std(cost_series[name]) / benchmark_config.size\n                print('%s: p. element median time cost: %g sec, relative std: %.2f%%' % (name.ljust(pad_length, ' '), per_element_median_cost, std * 100 / per_element_median_cost))\n    return (size_series, cost_series)", "docstring": "Runs benchmarks, and collects execution times.\n\nA simple instrumentation to run a callable several times, collect and print\nits execution times.\n\nArgs:\nbenchmark_suite: A list of BenchmarkConfig.\nverbose: bool, whether to print benchmark results to stdout.\n\nReturns:\nA dictionary of the form string -> list of floats. Keys of the dictionary\nare benchmark names, values are execution times in seconds for each run.", "source": "github-repos"}
{"code": "def erfinv(x):\n    if any_symbolic_tensors((x,)):\n        return Erfinv().symbolic_call(x)\n    x = backend.convert_to_tensor(x)\n    return backend.math.erfinv(x)", "docstring": "Computes the inverse error function of `x`, element-wise.\n\nArgs:\nx: Input tensor.\n\nReturns:\nA tensor with the same dtype as `x`.\n\nExample:\n\n>>> x = np.array([-0.5, -0.2, -0.1, 0.0, 0.3])\n>>> keras.ops.erfinv(x)\narray([-0.47694, -0.17914, -0.08886,  0. ,  0.27246], dtype=float32)", "source": "github-repos"}
{"code": "def closest_point(a, b, p):\n    ap = [(p[0] - a[0]), (p[1] - a[1])]\n    ab = [(b[0] - a[0]), (b[1] - a[1])]\n    mag = float(((ab[0] ** 2) + (ab[1] ** 2)))\n    proj = dot(ap, ab)\n    if (mag == 0):\n        dist = 0\n    else:\n        dist = (proj / mag)\n    if (dist < 0):\n        return [a[0], a[1]]\n    elif (dist > 1):\n        return [b[0], b[1]]\n    else:\n        return [(a[0] + (ab[0] * dist)), (a[1] + (ab[1] * dist))]", "docstring": "Finds closest point in a line segment\n\nArgs:\na ([float, float]): x and y coordinates. Line start\nb ([float, float]): x and y coordinates. Line end\np ([float, float]): x and y coordinates. Point to find in the segment\nReturns:\n(float, float): x and y coordinates of the closest point", "source": "codesearchnet"}
{"code": "def getfileversion(self):\n    (status, major_v, minor_v, release, info) = _C.Hgetfileversion(self._id)\n    _checkErr('getfileversion', status, 'cannot get file version')\n    return (major_v, minor_v, release, info)", "docstring": "Get file version info.\n\nArgs:\nno argument\nReturns:\n4-element tuple with the following components:\n-major version number (int)\n-minor version number (int)\n-complete library version number (int)\n-additional information (string)\n\nC library equivalent : Hgetlibversion", "source": "codesearchnet"}
{"code": "def __init__(self, *args, **kwargs):\n        \n        super(ClaimTransaction, self).__init__(*args, **kwargs)\n\n        self.Type = TransactionType.ClaimTransaction", "docstring": "Create an instance.\n\nArgs:\n*args:\n**kwargs:", "source": "juraj-google-style"}
{"code": "def add_ensembl_info(genes, ensembl_lines):\n    \n    \n    LOG.info(\"Adding ensembl coordinates\")\n    \n    if isinstance(ensembl_lines, DataFrame):\n        ensembl_genes = parse_ensembl_gene_request(ensembl_lines)\n    else:\n        ensembl_genes = parse_ensembl_genes(ensembl_lines)\n\n    for ensembl_gene in ensembl_genes:\n        gene_obj = genes.get(ensembl_gene['hgnc_id'])\n        if not gene_obj:\n            continue\n        gene_obj['chromosome'] = ensembl_gene['chrom']\n        gene_obj['start'] = ensembl_gene['gene_start']\n        gene_obj['end'] = ensembl_gene['gene_end']\n        \n        \n        gene_obj['ensembl_gene_id'] = ensembl_gene['ensembl_gene_id']", "docstring": "Add the coordinates from ensembl\n\nArgs:\ngenes(dict): Dictionary with all genes\nensembl_lines(iteable): Iteable with raw ensembl info", "source": "juraj-google-style"}
{"code": "def AddArguments(cls, argument_group):\n    \n    argument_group.add_argument(\n        '--server', dest='server', type=str, action='store',\n        default=cls._DEFAULT_SERVER, metavar='HOSTNAME',\n        help='The hostname or server IP address of the server.')\n    argument_group.add_argument(\n        '--port', dest='port', type=int, action='store',\n        default=cls._DEFAULT_PORT, metavar='PORT',\n        help='The port number of the server.')", "docstring": "Adds command line arguments the helper supports to an argument group.\n\nThis function takes an argument parser or an argument group object and adds\nto it all the command line arguments this helper supports.\n\nArgs:\nargument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\nargparse group.", "source": "juraj-google-style"}
{"code": "def _clean_url(url):\n    \n    if url == 'default':\n        url = DEFAULT_SERVER_HTTP_URL\n\n    if url.startswith(\"ws\"):\n        raise ValueError(\"url should be the http or https URL for the server, not the websocket URL\")\n\n    return url.rstrip(\"/\")", "docstring": "Produce a canonical Bokeh server URL.\n\nArgs:\nurl (str)\nA URL to clean, or \"defatul\". If \"default\" then the\n``BOKEH_SERVER_HTTP_URL`` will be returned.\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "async def skip(self, query=\"1\"):\n        \n\n        if not self.state == 'ready':\n            logger.debug(\"Trying to skip from wrong state '{}'\".format(self.state))\n            return\n\n        if query == \"\":\n            query = \"1\"\n        elif query == \"all\":\n            query = str(len(self.queue) + 1)\n\n        try:\n            num = int(query)\n        except TypeError:\n            self.statuslog.error(\"Skip argument must be a number\")\n        except ValueError:\n            self.statuslog.error(\"Skip argument must be a number\")\n        else:\n            self.statuslog.info(\"Skipping\")\n\n            for i in range(num - 1):\n                if len(self.queue) > 0:\n                    self.prev_queue.append(self.queue.pop(0))\n\n            try:\n                self.streamer.stop()\n            except Exception as e:\n                logger.exception(e)", "docstring": "The skip command\n\nArgs:\nquery (str): The number of items to skip", "source": "juraj-google-style"}
{"code": "def rotate(self, vecs):\n        \n        assert vecs.dtype == np.float32\n        assert vecs.ndim in [1, 2]\n\n        if vecs.ndim == 2:\n            return vecs @ self.R\n        elif vecs.ndim == 1:\n            return (vecs.reshape(1, -1) @ self.R).reshape(-1)", "docstring": "Rotate input vector(s) by the rotation matrix.`\n\nArgs:\nvecs (np.ndarray): Input vector(s) with dtype=np.float32.\nThe shape can be a single vector (D, ) or several vectors (N, D)\n\nReturns:\nnp.ndarray: Rotated vectors with the same shape and dtype to the input vecs.", "source": "juraj-google-style"}
{"code": "def commit(self, sourcedir, targetdir, abs_config, abs_sourcedir,\n               abs_targetdir):\n        \n        config_path, config_filename = os.path.split(abs_config)\n\n        if not os.path.exists(config_path):\n            os.makedirs(config_path)\n        if not os.path.exists(abs_sourcedir):\n            os.makedirs(abs_sourcedir)\n        if not os.path.exists(abs_targetdir):\n            os.makedirs(abs_targetdir)\n\n        \n        self.backend_engine.dump({\n            'SOURCES_PATH': sourcedir,\n            'TARGET_PATH': targetdir,\n            \"LIBRARY_PATHS\": [],\n            \"OUTPUT_STYLES\": \"nested\",\n            \"SOURCE_COMMENTS\": False,\n            \"EXCLUDES\": []\n        }, abs_config, indent=4)", "docstring": "Commit project structure and configuration file\n\nArgs:\nsourcedir (string): Source directory path.\ntargetdir (string): Compiled files target directory path.\nabs_config (string): Configuration file absolute path.\nabs_sourcedir (string): ``sourcedir`` expanded as absolute path.\nabs_targetdir (string): ``targetdir`` expanded as absolute path.", "source": "juraj-google-style"}
{"code": "def abspath(self, path):\n        \n        if not path.startswith(os.path.sep) or path.startswith('~'):\n            path = os.path.expanduser(os.path.join(self.base_path, path))\n        return path", "docstring": "Transform the path to an absolute path\n\nArgs:\npath (string): The path to transform to an absolute path\n\nReturns:\nstring: The absolute path to the file", "source": "juraj-google-style"}
{"code": "def LockedWrite(self, cache_data):\n        \n        if isinstance(cache_data, six.text_type):\n            cache_data = cache_data.encode(encoding=self._encoding)\n\n        with self._thread_lock:\n            if not self._EnsureFileExists():\n                return False\n            with self._process_lock_getter() as acquired_plock:\n                if not acquired_plock:\n                    return False\n                with open(self._filename, 'wb') as f:\n                    f.write(cache_data)\n                return True", "docstring": "Acquire an interprocess lock and write a string.\n\nThis method safely acquires the locks then writes a string\nto the cache file. If the string is written successfully\nthe function will return True, if the write fails for any\nreason it will return False.\n\nArgs:\ncache_data: string or bytes to write.\n\nReturns:\nbool: success", "source": "juraj-google-style"}
{"code": "def __clean__(struct: Union[dict, list]) -> Union[dict, list]:\n    if isinstance(struct, dict):\n        for key, value in struct.items():\n            if isinstance(value, bytes):\n                struct[key] = base64.standard_b64encode(value).decode('ascii')\n            elif isinstance(value, date):\n                struct[key] = str(value)\n            else:\n                API.__clean__(value)\n    elif isinstance(struct, list):\n        for index, value in enumerate(struct):\n            if isinstance(value, bytes):\n                struct[index] = base64.standard_b64encode(value).decode('ascii')\n            elif isinstance(value, date):\n                struct[index] = str(value)\n            else:\n                API.__clean__(value)\n    return struct", "docstring": "Helper to recursively clean up JSON data for API call.\n\nConverts bytes -> base64.\nConverts date -> str (yyyy-mm-dd).\nTODO: Add Converts datetime, time -> string.\n\nArgs:\nstruct: The kwargs being cleaned up.\n\nReturns:\nstruct: The kwargs with replacments.", "source": "github-repos"}
{"code": "def get_public_tokens(self):\n    r = self.remote_utils.get_url((self.url() + 'public_tokens/'))\n    return r.json()", "docstring": "Get a list of public tokens available on this server.\n\nArguments:\nNone\n\nReturns:\nstr[]: list of public tokens", "source": "codesearchnet"}
{"code": "def setOutputHandler(self, outputhandler):\n        \n        class OutputHandlerInternal(amplpython.OutputHandler):\n            def output(self, kind, msg):\n                outputhandler.output(kind, msg)\n\n        self._outputhandler = outputhandler\n        self._outputhandler_internal = OutputHandlerInternal()\n        lock_and_call(\n            lambda: self._impl.setOutputHandler(\n                self._outputhandler_internal\n            ),\n            self._lock\n        )", "docstring": "Sets a new output handler.\n\nArgs:\noutputhandler: The function handling the AMPL output derived from\ninterpreting user commands.", "source": "juraj-google-style"}
{"code": "def add_scalar_value(self, value_buf):\n    self.__container_node.add_child(_Node(value_buf))\n    self.current_container_length += len(value_buf)", "docstring": "Add a node to the tree containing a scalar value.\n\nArgs:\nvalue_buf (bytearray): bytearray containing the scalar value.", "source": "codesearchnet"}
{"code": "def _move_bee(self, bee, new_values):\n    score = np.nan_to_num(new_values[0])\n    if (bee.score > score):\n        bee.failed_trials += 1\n    else:\n        bee.values = new_values[1]\n        bee.score = score\n        bee.error = new_values[2]\n        bee.failed_trials = 0\n        self._logger.log('debug', 'Bee assigned to new merged position')", "docstring": "Moves a bee to a new position if new fitness score is better than\nthe bee's current fitness score\n\nArgs:\nbee (EmployerBee): bee to move\nnew_values (tuple): (new score, new values, new fitness function\nreturn value)", "source": "codesearchnet"}
{"code": "def merge(profile, branch, merge_into):\n    data = merges.merge(profile, branch, merge_into)\n    return data", "docstring": "Merge a branch into another branch.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nbranch\nThe name of the branch to merge.\n\nmerge_into\nThe name of the branch you want to merge into.\n\nReturns:\nA dict wtih data about the merge.", "source": "codesearchnet"}
{"code": "def set_brightness(self, brightness):\n        \n        if not 25 <= brightness <= 255:\n            raise ValueError(\"The brightness needs to be between 25 and 255.\")\n\n        payload = self.generate_payload(SET, {self.DPS_INDEX_BRIGHTNESS: brightness})\n        data = self._send_receive(payload)\n        return data", "docstring": "Set the brightness value of an rgb bulb.\n\nArgs:\nbrightness(int): Value for the brightness (25-255).", "source": "juraj-google-style"}
{"code": "def _accept(random_sample: float, cost_diff: float,\n            temp: float) -> Tuple[bool, float]:\n    \n    exponent = -cost_diff / temp\n    if exponent >= 0.0:\n        return True, 1.0\n    else:\n        probability = math.exp(exponent)\n    return probability > random_sample, probability", "docstring": "Calculates probability and draws if solution should be accepted.\n\nBased on exp(-Delta*E/T) formula.\n\nArgs:\nrandom_sample: Uniformly distributed random number in the range [0, 1).\ncost_diff: Cost difference between new and previous solutions.\ntemp: Current temperature.\n\nReturns:\nTuple of boolean and float, with boolean equal to True if solution is\naccepted, and False otherwise. The float value is acceptance\nprobability.", "source": "juraj-google-style"}
{"code": "def __pipeline_image(image, options):\n    \n    results = []\n\n    \n\n    \n    if 'resolutions' in options:\n        resolutions = options['resolutions']  \n        for res in resolutions:\n            img_rs = resize(image, res)  \n\n            \n            \n            results.append(img_rs)\n\n    \n    if 'wmark-img' in options:\n        wtrmk_path = options['wmark-img']\n        if wtrmk_path:\n            if len(results) == 0:\n                image = watermark_image(image, wtrmk_path)  \n            else:\n                for i in range(0, len(results)):\n                    results[i] = watermark_image(\n                        results[i], wtrmk_path)  \n\n    if 'wmark-txt' in options:\n        wtrmk_txt = options['wmark-txt']\n        if wtrmk_txt:\n            if len(results) == 0:\n                image = watermark_text(image, wtrmk_txt)  \n            else:\n                for i in range(0, len(results)):\n                    results[i] = watermark_text(results[i],\n                                                wtrmk_txt)  \n\n    \n    if len(results) == 0:\n        results.append(image)\n    \n    return results", "docstring": "Sends an image through a processing pipeline.\nApplies all (relevant) provided options to a given image.\nArgs:\nimage: An instance of a PIL Image.\noptions: Options to apply to the image (i.e. resolutions).\nReturns:\nA list containing instances of PIL Images. This list will always be length\n1 if no options exist that require multiple copies to be created for a single\nimage (i.e resolutions).", "source": "juraj-google-style"}
{"code": "def get_subset_in_chemsys(self, chemsys: List[str]):\n        \n        chemsys = set(chemsys)\n        if not chemsys.issubset(self.chemsys):\n            raise ValueError(\"%s is not a subset of %s\" % (chemsys,\n                                                           self.chemsys))\n        subset = set()\n        for e in self.entries:\n            elements = [sp.symbol for sp in e.composition.keys()]\n            if chemsys.issuperset(elements):\n                subset.add(e)\n        return EntrySet(subset)", "docstring": "Returns an EntrySet containing only the set of entries belonging to\na particular chemical system (in this definition, it includes all sub\nsystems). For example, if the entries are from the\nLi-Fe-P-O system, and chemsys=[\"Li\", \"O\"], only the Li, O,\nand Li-O entries are returned.\n\nArgs:\nchemsys: Chemical system specified as list of elements. E.g.,\n[\"Li\", \"O\"]\n\nReturns:\nEntrySet", "source": "juraj-google-style"}
{"code": "def ParseOptions(cls, options, configuration_object):\n    \n    if not isinstance(configuration_object, tools.CLITool):\n      raise errors.BadConfigObject(\n          'Configuration object is not an instance of CLITool')\n\n    temporary_directory = getattr(options, 'temporary_directory', None)\n    if temporary_directory and not os.path.isdir(temporary_directory):\n      raise errors.BadConfigOption(\n          'No such temporary directory: {0:s}'.format(temporary_directory))\n\n    setattr(configuration_object, '_temporary_directory', temporary_directory)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.", "source": "juraj-google-style"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_buffer = utils.BytearrayStream()\n    if self._private_key_unique_identifier:\n        self._private_key_unique_identifier.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The CreateKeyPair response payload is missing the private key unique identifier field.')\n    if self._public_key_unique_identifier:\n        self._public_key_unique_identifier.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The CreateKeyPair response payload is missing the public key unique identifier field.')\n    if self._private_key_template_attribute:\n        self._private_key_template_attribute.write(local_buffer, kmip_version=kmip_version)\n    if self._public_key_template_attribute:\n        self._public_key_template_attribute.write(local_buffer, kmip_version=kmip_version)\n    self.length = local_buffer.length()\n    super(CreateKeyPairResponsePayload, self).write(output_buffer, kmip_version=kmip_version)\n    output_buffer.write(local_buffer.buffer)", "docstring": "Write the data encoding the CreateKeyPair response payload to a buffer.\n\nArgs:\noutput_buffer (stream): A data buffer in which to encode object\ndata, supporting a write method.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nInvalidField: Raised if the private key unique identifier or the\npublic key unique identifier is not defined.", "source": "codesearchnet"}
{"code": "def hist(self, lumping=None, summary=False, sort=True, plot=True, legend=None, ax=None):\n    comps = []\n    labels = []\n    entries = defaultdict(int)\n    for i in self:\n        if lumping:\n            k = i.primary[lumping]\n        elif summary:\n            k = i.primary.summary()\n        else:\n            k = i.primary\n        comps.append(i.primary)\n        labels.append(i.primary.summary())\n        entries[k] += i.thickness\n    if sort:\n        allitems = sorted(entries.items(), key=(lambda i: i[1]), reverse=True)\n        (ents, counts) = zip(*allitems)\n    else:\n        (ents, counts) = (tuple(entries.keys()), tuple(entries.values()))\n    if plot:\n        if (ax is None):\n            (fig, ax) = plt.subplots()\n            return_ax = False\n        else:\n            return_ax = True\n        ind = np.arange(len(ents))\n        bars = ax.bar(ind, counts, align='center')\n        ax.set_xticks(ind)\n        ax.set_xticklabels(labels)\n        if legend:\n            colours = [legend.get_colour(c) for c in comps]\n            for (b, c) in zip(bars, colours):\n                b.set_color(c)\n        ax.set_ylabel('Thickness [m]')\n    else:\n        bars = []\n    if (plot and return_ax):\n        return (counts, ents, ax)\n    return (counts, ents, bars)", "docstring": "Plots a histogram and returns the data for it.\n\nArgs:\nlumping (str): If given, the bins will be lumped based on this\nattribute of the primary components of the intervals\nencountered.\nsummary (bool): If True, the summaries of the components are\nreturned as the bins. Otherwise, the default behaviour is to\nreturn the Components themselves.\nsort (bool): If True (default), the histogram is sorted by value,\nstarting with the largest.\nplot (bool): If True (default), produce a bar plot.\nlegend (Legend): The legend with which to colour the bars.\nax (axis): An axis object, which will be returned if provided.\nIf you don't provide one, it will be created but not returned.\n\nReturns:\nTuple: A tuple of tuples of entities and counts.\n\nTODO:\nDeal with numeric properties, so I can histogram 'Vp' values, say.", "source": "codesearchnet"}
{"code": "def url_to_filename(url, index='index.html', alt_char=False):\n    \n    assert isinstance(url, str), 'Expect str. Got {}.'.format(type(url))\n    url_split_result = urllib.parse.urlsplit(url)\n\n    filename = url_split_result.path.split('/')[-1]\n\n    if not filename:\n        filename = index\n\n    if url_split_result.query:\n        if alt_char:\n            query_delim = '@'\n        else:\n            query_delim = '?'\n\n        filename = '{0}{1}{2}'.format(\n            filename, query_delim, url_split_result.query\n        )\n\n    return filename", "docstring": "Return a filename from a URL.\n\nArgs:\nurl (str): The URL.\nindex (str): If a filename could not be derived from the URL path,\nuse index instead. For example, ``/images/`` will return\n``index.html``.\nalt_char (bool): If True, the character for the query deliminator\nwill be ``@`` intead of ``?``.\n\nThis function does not include the directories and does not sanitize\nthe filename.\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def GetIapKey(key_id):\n    global _KEY_CACHE\n    key = _KEY_CACHE.get(key_id)\n    if (not key):\n        resp = requests.get('https:\n        if (resp.status_code != 200):\n            raise KeysCanNotBeFetchedError('Unable to fetch IAP keys: {} / {} / {}'.format(resp.status_code, resp.headers, resp.text))\n        _KEY_CACHE = resp.json()\n        key = _KEY_CACHE.get(key_id)\n        if (not key):\n            raise KeyNotFoundError('Key {!r} not found'.format(key_id))\n    return key", "docstring": "Retrieves a public key from the list published by Identity-Aware Proxy.\n\nThe key file is re-fetched if necessary.\n\nArgs:\nkey_id: Key id.\n\nReturns:\nString with a key.\n\nRaises:\nKeyNotFoundError: if the key is not found in the key file.\nKeysCanNotBeFetchedError: if the key file can't be fetched.", "source": "codesearchnet"}
{"code": "def token_accuracy(labels, outputs):\n  \n  weights = tf.to_float(tf.not_equal(labels, 0))\n  return tf.metrics.accuracy(labels, outputs, weights=weights)", "docstring": "Compute tokenwise (elementwise) accuracy.\n\nArgs:\nlabels: ground-truth labels, shape=(batch, seq_length)\noutputs: predicted tokens, shape=(batch, seq_length)\nReturns:\nTwo ops, one for getting the current average accuracy and another for\nupdating the running average estimate.", "source": "juraj-google-style"}
{"code": "def write(self, value, *labels):\n    raise NotImplementedError", "docstring": "Writes the value to the given cache.\n\nArgs:\nvalue: An encodable (with corresponding PCoder) value\n*labels: List of labels for PCollection instance", "source": "github-repos"}
{"code": "def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):\n    if dilation_rate is None:\n        dilation_rate = [1] * num_spatial_dims\n    elif len(dilation_rate) != num_spatial_dims:\n        raise ValueError(f'`len(dilation_rate)` should be {num_spatial_dims}. Received: dilation_rate={dilation_rate} of length {len(dilation_rate)}')\n    dilation_rate = np.array(dilation_rate, dtype=np.int32)\n    if np.any(dilation_rate < 1):\n        raise ValueError(f'all values of `dilation_rate` must be positive. Received: dilation_rate={dilation_rate}')\n    if strides is None:\n        strides = [1] * num_spatial_dims\n    elif len(strides) != num_spatial_dims:\n        raise ValueError(f'`len(strides)` should be {num_spatial_dims}. Received: strides={strides} of length {len(strides)}')\n    strides = np.array(strides, dtype=np.int32)\n    if np.any(strides < 1):\n        raise ValueError(f'all values of `strides` must be positive. Received: strides={strides}')\n    if np.any(strides > 1) and np.any(dilation_rate > 1):\n        raise ValueError(f'`strides > 1` not supported in conjunction with `dilation_rate > 1`. Received: strides={strides} and dilation_rate={dilation_rate}')\n    return (strides, dilation_rate)", "docstring": "Helper function for verifying strides and dilation_rate arguments.\n\nThis is used by `convolution` and `pool`.\n\nArgs:\nnum_spatial_dims: int\nstrides: Optional.  List of N ints >= 1.  Defaults to `[1]*N`.  If any value\nof strides is > 1, then all values of dilation_rate must be 1.\ndilation_rate: Optional.  List of N ints >= 1.  Defaults to `[1]*N`.  If any\nvalue of dilation_rate is > 1, then all values of strides must be 1.\n\nReturns:\nNormalized (strides, dilation_rate) as int32 numpy arrays of shape\n[num_spatial_dims].\n\nRaises:\nValueError: if the parameters are invalid.", "source": "github-repos"}
{"code": "def check_url(url):\n        \n        request = urllib2.Request(url)\n        try:\n            response = urlopen(request)\n            return True, response.code\n        except urllib2.HTTPError as e:\n            return False, e.code", "docstring": "Check if resource at URL is fetchable. (by trying to fetch it and checking for 200 status.\n\nArgs:\nurl (str): Url to check.\n\nReturns:\nReturns a tuple of {True/False, response code}", "source": "juraj-google-style"}
{"code": "def build_counter_list(counter_list):\n    counters = []\n    index = 0\n    for (obj, instance, counter_name) in counter_list:\n        try:\n            counter = Counter.build_counter(obj, instance, index, counter_name)\n            index += 1\n            counters.append(counter)\n        except CommandExecutionError as exc:\n            log.debug(exc.strerror)\n            continue\n    return counters", "docstring": "r'''\nCreate a list of Counter objects to be used in the pdh query\n\nArgs:\ncounter_list (list):\nA list of tuples containing counter information. Each tuple should\ncontain the object, instance, and counter name. For example, to\nget the ``% Processor Time`` counter for all Processors on the\nsystem (``\\Processor(*)\\% Processor Time``) you would pass a tuple\nlike this:\n\n```\ncounter_list = [('Processor', '*', '% Processor Time')]\n```\n\nIf there is no ``instance`` for the counter, pass ``None``\n\nMultiple counters can be passed like so:\n\n```\ncounter_list = [('Processor', '*', '% Processor Time'),\n('System', None, 'Context Switches/sec')]\n```\n\n.. note::\nInvalid counters are ignored\n\nReturns:\nlist: A list of Counter objects", "source": "codesearchnet"}
{"code": "def _fillBorder(self, image, color):\n    height, width, depth = image.shape\n    if depth != color.shape[0]:\n        raise ValueError('Image (%d) and color (%d) depths must match.' % (depth, color.shape[0]))\n    image[0:height, 0, 0:depth] = color\n    image[0:height, width - 1, 0:depth] = color\n    image[0, 0:width, 0:depth] = color\n    image[height - 1, 0:width, 0:depth] = color\n    return image", "docstring": "Fill the border of the image.\n\nArgs:\nimage: Numpy array of shape [height, width, depth].\ncolor: Numpy color of shape [depth] and either contents RGB/RGBA.\n\nReturns:\nimage of original shape with border filled with \"color\".\n\nRaises:\nValueError: Depths of image and color don\"t match.", "source": "github-repos"}
{"code": "def rebin(d, n_x, n_y=None):\n    \n\n    if d.ndim == 2:\n        if n_y is None:\n            n_y = 1\n        if n_x is None:\n            n_x = 1\n        d = d[:int(d.shape[0] \n        d = d.reshape((d.shape[0] \n        d = d.mean(axis=3)\n        d = d.mean(axis=1)\n    elif d.ndim == 1:\n        d = d[:int(d.shape[0] \n        d = d.reshape((d.shape[0] \n        d = d.mean(axis=1)\n    else:\n        raise RuntimeError(\"Only NDIM <= 2 supported\")\n    return d", "docstring": "Rebin data by averaging bins together\n\nArgs:\nd (np.array): data\nn_x (int): number of bins in x dir to rebin into one\nn_y (int): number of bins in y dir to rebin into one\n\nReturns:\nd: rebinned data with shape (n_x, n_y)", "source": "juraj-google-style"}
{"code": "def diagflat(x, k=0):\n    if any_symbolic_tensors((x,)):\n        return Diagflat(k=k).symbolic_call(x)\n    return backend.numpy.diagflat(x, k=k)", "docstring": "Create a two-dimensional array with the flattened input on\nthe k-th diagonal.\n\nArgs:\nx: Input tensor to be flattened and placed on the diagonal.\nk: The diagonal to place the flattened input. Defaults to `0`.\nUse `k > 0` for diagonals above the main diagonal,\nand `k < 0` for diagonals below the main diagonal.\n\nReturns:\nA 2-D tensor with the flattened input on the specified diagonal.", "source": "github-repos"}
{"code": "def delete_note(self, note_id):\n    (note, status) = self.trash_note(note_id)\n    if (status == (- 1)):\n        return (note, status)\n    params = ('/i/%s' % str(note_id))\n    request = Request(url=(DATA_URL + params), method='DELETE')\n    request.add_header(self.header, self.get_token())\n    try:\n        response = urllib2.urlopen(request)\n    except IOError as e:\n        return (e, (- 1))\n    except HTTPError as e:\n        if (e.code == 401):\n            raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.')\n        else:\n            return (e, (- 1))\n    return ({}, 0)", "docstring": "Method to permanently delete a note\n\nArguments:\n- note_id (string): key of the note to trash\n\nReturns:\nA tuple `(note, status)`\n\n- note (dict): an empty dict or an error message\n- status (int): 0 on success and -1 otherwise", "source": "codesearchnet"}
{"code": "def _resize_volumes(self, x, depth_factor, height_factor, width_factor, data_format):\n    if data_format == 'channels_first':\n        output = ops.repeat(x, depth_factor, axis=2)\n        output = ops.repeat(output, height_factor, axis=3)\n        output = ops.repeat(output, width_factor, axis=4)\n        return output\n    elif data_format == 'channels_last':\n        output = ops.repeat(x, depth_factor, axis=1)\n        output = ops.repeat(output, height_factor, axis=2)\n        output = ops.repeat(output, width_factor, axis=3)\n        return output\n    else:\n        raise ValueError(f'Invalid data_format: {data_format}')", "docstring": "Resizes the volume contained in a 5D tensor.\n\nArgs:\nx: Tensor or variable to resize.\ndepth_factor: Positive integer.\nheight_factor: Positive integer.\nwidth_factor: Positive integer.\ndata_format: One of `\"channels_first\"`, `\"channels_last\"`.\n\nReturns:\nResized tensor.", "source": "github-repos"}
{"code": "def _cross_suppression(boxes, box_slice, iou_threshold, inner_idx, tile_size):\n    batch_size = array_ops.shape(boxes)[0]\n    new_slice = array_ops.slice(boxes, [0, inner_idx * tile_size, 0], [batch_size, tile_size, 4])\n    iou = _bbox_overlap(new_slice, box_slice)\n    box_slice_after_suppression = array_ops.expand_dims(math_ops.cast(math_ops.reduce_all(iou < iou_threshold, [1]), box_slice.dtype), 2) * box_slice\n    return (boxes, box_slice_after_suppression, iou_threshold, inner_idx + 1)", "docstring": "Suppress boxes between different tiles.\n\nArgs:\nboxes: a tensor of shape [batch_size, num_boxes_with_padding, 4]\nbox_slice: a tensor of shape [batch_size, tile_size, 4]\niou_threshold: a scalar tensor\ninner_idx: a scalar tensor representing the tile index of the tile\nthat is used to supress box_slice\ntile_size: an integer representing the number of boxes in a tile\n\nReturns:\nboxes: unchanged boxes as input\nbox_slice_after_suppression: box_slice after suppression\niou_threshold: unchanged", "source": "github-repos"}
{"code": "def AddDirectory(self, path):\n    \n    if self.file_system.FileEntryExistsByPath(path):\n      raise ValueError('Path: {0:s} already set.'.format(path))\n\n    self._AddParentDirectories(path)\n    self.file_system.AddFileEntry(\n        path, file_entry_type=definitions.FILE_ENTRY_TYPE_DIRECTORY)", "docstring": "Adds a directory to the fake file system.\n\nNote that this function will create parent directories if needed.\n\nArgs:\npath (str): path of the directory within the fake file system.\n\nRaises:\nValueError: if the path is already set.", "source": "juraj-google-style"}
{"code": "def sudo_remove_dirtree(dir_name):\n  \n  try:\n    subprocess.check_output(['sudo', 'rm', '-rf', dir_name])\n  except subprocess.CalledProcessError as e:\n    raise WorkerError('Can''t remove directory {0}'.format(dir_name), e)", "docstring": "Removes directory tree as a superuser.\n\nArgs:\ndir_name: name of the directory to remove.\n\nThis function is necessary to cleanup directories created from inside a\nDocker, since they usually written as a root, thus have to be removed as a\nroot.", "source": "juraj-google-style"}
{"code": "def editTemplate(id, data):\n    conn = Qubole.agent()\n    return conn.put(Template.element_path(id), data)", "docstring": "Edit an existing template.\n\nArgs:\n`id`:   ID of the template to edit\n`data`: json data to be updated\nReturns:\nDictionary containing the updated details of the template.", "source": "codesearchnet"}
{"code": "def from_config(cls, gitlab_id=None, config_files=None):\n        \n        config = gitlab.config.GitlabConfigParser(gitlab_id=gitlab_id,\n                                                  config_files=config_files)\n        return cls(config.url, private_token=config.private_token,\n                   oauth_token=config.oauth_token,\n                   ssl_verify=config.ssl_verify, timeout=config.timeout,\n                   http_username=config.http_username,\n                   http_password=config.http_password,\n                   api_version=config.api_version,\n                   per_page=config.per_page)", "docstring": "Create a Gitlab connection from configuration files.\n\nArgs:\ngitlab_id (str): ID of the configuration section.\nconfig_files list[str]: List of paths to configuration files.\n\nReturns:\n(gitlab.Gitlab): A Gitlab connection.\n\nRaises:\ngitlab.config.GitlabDataError: If the configuration is not correct.", "source": "juraj-google-style"}
{"code": "def _align_output_features_output_indices(out_features: Optional[list[str]], out_indices: Optional[Union[list[int], tuple[int]]], stage_names: list[str]):\n    if out_indices is None and out_features is None:\n        out_indices = [len(stage_names) - 1]\n        out_features = [stage_names[-1]]\n    elif out_indices is None and out_features is not None:\n        out_indices = [stage_names.index(layer) for layer in out_features]\n    elif out_features is None and out_indices is not None:\n        out_features = [stage_names[idx] for idx in out_indices]\n    return (out_features, out_indices)", "docstring": "Finds the corresponding `out_features` and `out_indices` for the given `stage_names`.\n\nThe logic is as follows:\n- `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the\n`out_indices`.\n- `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the\n`out_features`.\n- `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage.\n- `out_indices` and `out_features` set: input `out_indices` and `out_features` are returned.\n\nArgs:\nout_features (`List[str]`): The names of the features for the backbone to output.\nout_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output.\nstage_names (`List[str]`): The names of the stages of the backbone.", "source": "github-repos"}
{"code": "def train(cluster_info, cluster_meta, feed_timeout=600, qname='input'):\n  \n  def _train(iter):\n    \n    mgr = _get_manager(cluster_info, util.get_ip_address(), util.read_executor_id())\n    try:\n      queue = mgr.get_queue(qname)\n      equeue = mgr.get_queue('error')\n    except (AttributeError, KeyError):\n      msg = \"Queue '{}' not found on this node, check for exceptions on other nodes.\".format(qname)\n      raise Exception(msg)\n\n    state = str(mgr.get('state'))\n    logging.info(\"mgr.state={0}\".format(state))\n    terminating = state == \"'terminating'\"\n    if terminating:\n      logging.info(\"mgr is terminating, skipping partition\")\n      count = sum(1 for item in iter)\n      logging.info(\"Skipped {0} items from partition\".format(count))\n    else:\n      logging.info(\"Feeding partition {0} into {1} queue {2}\".format(iter, qname, queue))\n      count = 0\n      for item in iter:\n        count += 1\n        queue.put(item, block=True)\n\n      \n      joinThr = Thread(target=queue.join)\n      joinThr.start()\n      timeout = feed_timeout\n      while (joinThr.isAlive()):\n        if (not equeue.empty()):\n          e_str = equeue.get()\n          equeue.task_done()\n          raise Exception(\"exception in worker:\\n\" + e_str)\n        time.sleep(1)\n        timeout -= 1\n        if timeout <= 0:\n          raise Exception(\"Timeout while feeding partition\")\n\n      logging.info(\"Processed {0} items in partition\".format(count))\n\n    \n    if not terminating:\n      state = str(mgr.get('state'))\n      terminating = state == \"'terminating'\"\n      if terminating:\n        try:\n          logging.info(\"TFSparkNode: requesting stop\")\n          client = reservation.Client(cluster_meta['server_addr'])\n          client.request_stop()\n          client.close()\n        except Exception as e:\n          \n          logging.debug(\"Error while requesting stop: {0}\".format(e))\n\n    return [terminating]\n\n  return _train", "docstring": "Feeds Spark partitions into the shared multiprocessing.Queue.\n\nArgs:\n:cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc)\n:cluster_meta: dictionary of cluster metadata (e.g. cluster_id, reservation.Server address, etc)\n:feed_timeout: number of seconds after which data feeding times out (600 sec default)\n:qname: *INTERNAL_USE*\n\nReturns:\nA dataRDD.mapPartitions() function", "source": "juraj-google-style"}
{"code": "def str(name, default=None, allow_none=False, fallback=None):\n    \n    value = read(name, default, allow_none, fallback=fallback)\n    if value is None and allow_none:\n        return None\n    else:\n        return builtins.str(value).strip()", "docstring": "Get a string based environment value or the default.\n\nArgs:\nname: The environment variable name\ndefault: The default value to use if no environment variable is found\nallow_none: If the return value can be `None` (i.e. optional)", "source": "juraj-google-style"}
{"code": "def GetUserById(self, local_id):\n    user = self.rpc_helper.GetAccountInfoById(local_id)\n    return GitkitUser.FromApiResponse(user)", "docstring": "Gets user info by id.\n\nArgs:\nlocal_id: string, the user id at Gitkit server.\n\nReturns:\nGitkitUser, containing the user info.", "source": "codesearchnet"}
{"code": "def get(self, page=0, size=10):\n    dash_list = r_db.zrevrange(config.DASH_ID_KEY, 0, (- 1), True)\n    id_list = dash_list[(page * size):((page * size) + size)]\n    dash_meta = []\n    data = []\n    if id_list:\n        dash_meta = r_db.hmget(config.DASH_META_KEY, [i[0] for i in id_list])\n        data = [json.loads(i) for i in dash_meta]\n    return build_response(dict(data=data, code=200))", "docstring": "Get dashboard meta info from in page `page` and page size is `size`.\n\nArgs:\npage: page number.\nsize: size number.\n\nReturns:\nlist of dict containing the dash_id and accordingly meta info.\nmaybe empty list [] when page * size > total dashes in db. that's reasonable.", "source": "codesearchnet"}
{"code": "def get_converter(in_type, out_type, *args, **kwargs):\n    \n    convs = pliers.converters.__all__\n\n    \n    \n    out_type = listify(out_type)[::-1]\n    default_convs = config.get_option('default_converters')\n\n    for ot in out_type:\n        conv_str = '%s->%s' % (in_type.__name__, ot.__name__)\n        if conv_str in default_convs:\n            convs = list(default_convs[conv_str]) + convs\n\n    for name in convs:\n        cls = getattr(pliers.converters, name)\n        if not issubclass(cls, Converter):\n            continue\n\n        available = cls.available if issubclass(\n            cls, EnvironmentKeyMixin) else True\n        if cls._input_type == in_type and cls._output_type in out_type \\\n                and available:\n            conv = cls(*args, **kwargs)\n            return conv\n\n    return None", "docstring": "Scans the list of available Converters and returns an instantiation\nof the first one whose input and output types match those passed in.\n\nArgs:\nin_type (type): The type of input the converter must have.\nout_type (type): The type of output the converter must have.\nargs, kwargs: Optional positional and keyword arguments to pass onto\nmatching Converter's initializer.", "source": "juraj-google-style"}
{"code": "def compute_matches(self, args: list[types.Arg], match_all_views: bool, keep_all_views: bool=False, alias_map: datatypes.UnionFind | None=None) -> list[GoodMatch]:\n    matches = None\n    has_self = args and args[0].name == 'self'\n    for arg in args:\n        match_result = self.compute_one_match(arg.value, arg.typ, arg.name, match_all_views, keep_all_views, alias_map)\n        if not match_result.success:\n            if matches:\n                self._error_subst = matches[0].subst\n                bad_param = self._get_bad_type(arg.name, arg.typ)\n            else:\n                bad_param = match_result.bad_matches[0].expected\n            raise error_types.MatchError(bad_param)\n        if keep_all_views or any((m.subst for m in match_result.good_matches)):\n            typ = cast(abstract.BaseValue, arg.typ)\n            matches = self._merge_matches(arg.name, typ, matches, match_result.good_matches, keep_all_views, has_self)\n    return matches if matches else [GoodMatch.default()]", "docstring": "Compute information about type parameters using one-way unification.\n\nGiven the arguments of a function call, try to find substitutions that match\nthem against their expected types.\n\nArgs:\nargs: A sequence of function arguments.\nmatch_all_views: If True, every possible match must succeed for the\noverall match to be considered a success. Otherwise, the overall match\nsucceeds as long as at least one possible match succeeds.\nkeep_all_views: If True, avoid optimizations that discard views.\nalias_map: Optionally, a datatypes.UnionFind, which stores all the type\nrenaming information, mapping of type parameter name to its\nrepresentative.\n\nReturns:\nA sequence of GoodMatch results containing the computed substitutions.\nRaises:\nMatchError: if any of the arguments does not match its expected type.", "source": "github-repos"}
{"code": "def __get_conn(self, flag_force_new=False, filename=None):\n        \n        flag_open_new = flag_force_new or not self._conn_is_open()\n\n        if flag_open_new:\n            if filename is None:\n                filename = self.filename\n            \n            conn = self._get_conn(filename)\n            self._conn = conn\n        else:\n            conn = self._conn\n        return conn", "docstring": "Returns connection to database. Tries to return existing connection, unless flag_force_new\n\nArgs:\nflag_force_new:\nfilename:\n\nReturns: sqlite3.Connection object\n\n**Note** this is a private method because you can get a connection to any file, so it has to\nbe used in the right moment", "source": "juraj-google-style"}
{"code": "def make_same_degree(nodes1, nodes2):\n    (_, num_nodes1) = nodes1.shape\n    (_, num_nodes2) = nodes2.shape\n    for _ in six.moves.xrange((num_nodes2 - num_nodes1)):\n        nodes1 = _curve_helpers.elevate_nodes(nodes1)\n    for _ in six.moves.xrange((num_nodes1 - num_nodes2)):\n        nodes2 = _curve_helpers.elevate_nodes(nodes2)\n    return (nodes1, nodes2)", "docstring": "Degree-elevate a curve so two curves have matching degree.\n\nArgs:\nnodes1 (numpy.ndarray): Set of control points for a\nB |eacute| zier curve.\nnodes2 (numpy.ndarray): Set of control points for a\nB |eacute| zier curve.\n\nReturns:\nTuple[numpy.ndarray, numpy.ndarray]: The potentially degree-elevated\nnodes passed in.", "source": "codesearchnet"}
{"code": "def method_not_allowed(cls, errors=None):\n    if cls.expose_status:\n        cls.response.content_type = 'application/json'\n        cls.response._status_line = '405 Method Not Allowed'\n    return cls(405, None, errors).to_json", "docstring": "Shortcut API for HTTP 405 `Method not allowed` response.\n\nArgs:\nerrors (list): Response key/value data.\n\nReturns:\nWSResponse Instance.", "source": "codesearchnet"}
{"code": "def events(config):\n    \n    celery_app = create_app(config)\n\n    for event in event_stream(celery_app, filter_by_prefix='task'):\n        try:\n            yield create_event_model(event)\n        except JobEventTypeUnsupported:\n            pass", "docstring": "Return a generator that yields workflow events.\n\nFor every workflow event that is sent from celery this generator yields an event\nobject.\n\nArgs:\nconfig (Config): Reference to the configuration object from which the\nsettings are retrieved.\n\nReturns:\ngenerator: A generator that returns workflow events.", "source": "juraj-google-style"}
{"code": "def leaves(self, nodes=None, unique=True):\n        \n        if nodes is None:\n            return super(DependencyTree, self).leaves(unique=unique)\n\n        res = list()\n        for child_id in nodes:\n            for sub_child in self._all_nodes[child_id].leaves(unique=unique):\n                if not unique or sub_child not in res:\n                    res.append(sub_child)\n        return res", "docstring": "Get the leaves of the tree starting at this root.\n\nArgs:\nnodes (iterable): limit leaves for these node names\nunique: only include individual leaf nodes once\n\nReturns:\nlist of leaf nodes", "source": "juraj-google-style"}
{"code": "def random_restore(\n    rnd: Optional[tcod.random.Random], backup: tcod.random.Random\n) -> None:\n    \n    lib.TCOD_random_restore(rnd.random_c if rnd else ffi.NULL, backup.random_c)", "docstring": "Restore a random number generator from a backed up copy.\n\nArgs:\nrnd (Optional[Random]): A Random instance, or None to use the default.\nbackup (Random): The Random instance which was used as a backup.\n\n.. deprecated:: 8.4\nYou can use the standard library copy and pickle modules to save a\nrandom state.", "source": "juraj-google-style"}
{"code": "def _trigger(self):\n    self._completed.set()\n    for callback in self._callbacks:\n        callback(self)", "docstring": "Trigger all callbacks registered to this Future.\n\nThis method is called internally by the batch once the batch\ncompletes.\n\nArgs:\nmessage_id (str): The message ID, as a string.", "source": "codesearchnet"}
{"code": "def select_inputs(self, address, nfees, ntokens, min_confirmations=6):\n    unspents = self._t.get(address, min_confirmations=min_confirmations)['unspents']\n    unspents = [u for u in unspents if (u not in self._spents.queue)]\n    if (len(unspents) == 0):\n        raise Exception('No spendable outputs found')\n    fees = [u for u in unspents if (u['amount'] == self.fee)][:nfees]\n    tokens = [u for u in unspents if (u['amount'] == self.token)][:ntokens]\n    if ((len(fees) != nfees) or (len(tokens) != ntokens)):\n        raise SpoolFundsError('Not enough outputs to spend. Refill your wallet')\n    if (self._spents.qsize() > (self.SPENTS_QUEUE_MAXSIZE - (nfees + ntokens))):\n        [self._spents.get() for i in range((((self._spents.qsize() + nfees) + ntokens) - self.SPENTS_QUEUE_MAXSIZE))]\n    [self._spents.put(fee) for fee in fees]\n    [self._spents.put(token) for token in tokens]\n    return (fees + tokens)", "docstring": "Selects the inputs for the spool transaction.\n\nArgs:\naddress (str): bitcoin address to select inputs for\nnfees (int): number of fees\nntokens (int): number of tokens\nmin_confirmations (Optional[int]): minimum number of required\nconfirmations; defaults to 6", "source": "codesearchnet"}
{"code": "def _FormatSourceShort(self, event):\n    \n    source_short, _ = self._output_mediator.GetFormattedSources(event)\n    if source_short is None:\n      data_type = getattr(event, 'data_type', 'UNKNOWN')\n      raise errors.NoFormatterFound(\n          'Unable to find event formatter for: {0:s}.'.format(data_type))\n\n    return source_short", "docstring": "Formats the short source.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: short source field.\n\nRaises:\nNoFormatterFound: If no event formatter can be found to match the data\ntype in the event.", "source": "juraj-google-style"}
{"code": "def insert_tag(tag, before, root):\n    \n    if not before:\n        root.childs.append(tag)\n        tag.parent = root\n        return\n\n    if type(before) in [tuple, list]:\n        before = first(before)\n\n    \n    if not hasattr(before, \"parent\"):\n        raise ValueError(\"Input must be double-linked!\")\n\n    \n    parent = before.parent\n    parent.childs.insert(\n        parent.childs.index(before),\n        tag\n    )\n    tag.parent = parent", "docstring": "Insert `tag` before `before` tag if present. If not, insert it into `root`.\n\nArgs:\ntag (obj): HTMLElement instance.\nbefore (obj): HTMLElement instance.\nroot (obj): HTMLElement instance.", "source": "juraj-google-style"}
{"code": "def Add(self, other):\n    if (len(self.data) != len(other.data)):\n        raise RuntimeError('Can only add series of identical lengths.')\n    for i in range(len(self.data)):\n        if (self.data[i][1] != other.data[i][1]):\n            raise RuntimeError('Timestamp mismatch.')\n        if ((self.data[i][0] is None) and (other.data[i][0] is None)):\n            continue\n        self.data[i][0] = ((self.data[i][0] or 0) + (other.data[i][0] or 0))", "docstring": "Add other to self pointwise.\n\nRequires that both self and other are of the same length, and contain\nidentical timestamps. Typically this means that Normalize has been called\non both with identical time parameters.\n\nArgs:\nother: The sequence to add to self.\n\nRaises:\nRuntimeError: other does not contain the same timestamps as self.", "source": "codesearchnet"}
{"code": "def top(self, **kwargs):\n    return self.client.api.top(self.id, **kwargs)", "docstring": "Display the running processes of the container.\n\nArgs:\nps_args (str): An optional arguments passed to ps (e.g. ``aux``)\n\nReturns:\n(str): The output of the top\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def forward(self, inputs, expert_size):\n    input_list = inputs.split(expert_size, dim=0)\n    output_list = []\n    for i in range(self.num_experts):\n        output_list.append(F.linear(input_list[i], self.weight[i]))\n    results = torch.cat(output_list, dim=0)\n    return results", "docstring": "Forward pass of the GraniteMoeParallelExperts module.\n\nArgs:\ninputs (Tensor):\nInput tensor.\nexpert_size:\nExpert size information.\n\nReturns:\nTensor: Output tensor.", "source": "github-repos"}
{"code": "def as_allocate_quota_request(self, timer=datetime.utcnow):\n    if (not self.service_name):\n        raise ValueError(u'the service name must be set')\n    if (not self.operation_id):\n        raise ValueError(u'the operation id must be set')\n    if (not self.operation_name):\n        raise ValueError(u'the operation name must be set')\n    op = super(Info, self).as_operation(timer=timer)\n    labels = {}\n    if self.client_ip:\n        labels[_KNOWN_LABELS.SCC_CALLER_IP.label_name] = self.client_ip\n    if self.referer:\n        labels[_KNOWN_LABELS.SCC_REFERER.label_name] = self.referer\n    qop = sc_messages.QuotaOperation(operationId=op.operationId, methodName=op.operationName, consumerId=op.consumerId, quotaMode=sc_messages.QuotaOperation.QuotaModeValueValuesEnum.BEST_EFFORT)\n    qop.labels = encoding.PyValueToMessage(sc_messages.QuotaOperation.LabelsValue, labels)\n    quota_info = (self.quota_info if self.quota_info else {})\n    qop.quotaMetrics = [sc_messages.MetricValueSet(metricName=name, metricValues=[sc_messages.MetricValue(int64Value=cost)]) for (name, cost) in quota_info.items()]\n    allocate_quota_request = sc_messages.AllocateQuotaRequest(allocateOperation=qop)\n    if self.config_id:\n        allocate_quota_request.serviceConfigId = self.config_id\n    return sc_messages.ServicecontrolServicesAllocateQuotaRequest(serviceName=self.service_name, allocateQuotaRequest=allocate_quota_request)", "docstring": "Makes a `ServicecontrolServicesAllocateQuotaRequest` from this instance\n\nReturns:\na ``ServicecontrolServicesAllocateQuotaRequest``\n\nRaises:\nValueError: if the fields in this instance are insufficient to\nto create a valid ``ServicecontrolServicesAllocateQuotaRequest``", "source": "codesearchnet"}
{"code": "def _PrintParsersCounter(self, parsers_counter, session_identifier=None):\n    if (not parsers_counter):\n        return\n    title = 'Events generated per parser'\n    if session_identifier:\n        title = '{0:s}: {1:s}'.format(title, session_identifier)\n    table_view = views.ViewsFactory.GetTableView(self._views_format_type, column_names=['Parser (plugin) name', 'Number of events'], title=title)\n    for (key, value) in sorted(parsers_counter.items()):\n        if (key == 'total'):\n            continue\n        table_view.AddRow([key, value])\n    table_view.AddRow(['Total', parsers_counter['total']])\n    table_view.Write(self._output_writer)", "docstring": "Prints the parsers counter\n\nArgs:\nparsers_counter (collections.Counter): number of events per parser or\nparser plugin.\nsession_identifier (Optional[str]): session identifier.", "source": "codesearchnet"}
{"code": "def iso_date(d) -> str:\n    if isinstance(d, datetime):\n        return d.isoformat()\n    elif isinstance(d, date):\n        return datetime.combine(d, datetime.min.time()).isoformat()\n    else:\n        try:\n            datetime.strptime(d, '%Y-%m-%dT%H:%M:%S')\n            return d\n        except ValueError:\n            try:\n                datetime.strptime(d, '%Y-%m-%d')\n                return (d + 'T00:00:00')\n            except ValueError:\n                pass\n    raise ISODateError('Can not convert value to ISO format for kg')", "docstring": "Return iso format of a date\n\nArgs:\nd:\nReturns: str", "source": "codesearchnet"}
{"code": "def Convert(self, metadata, grr_message, token=None):\n    \n    return self.BatchConvert([(metadata, grr_message)], token=token)", "docstring": "Converts GrrMessage into a set of RDFValues.\n\nArgs:\nmetadata: ExportedMetadata to be used for conversion.\ngrr_message: GrrMessage to be converted.\ntoken: Security token.\n\nReturns:\nList or generator with resulting RDFValues.", "source": "juraj-google-style"}
{"code": "def _elevate_nodes(nodes):\n    (dimension, num_nodes) = np.shape(nodes)\n    new_nodes = np.empty((dimension, (num_nodes + 1)), order='F')\n    multipliers = np.arange(1, num_nodes, dtype=_FLOAT64)[(np.newaxis, :)]\n    denominator = float(num_nodes)\n    new_nodes[(:, 1:(- 1))] = ((multipliers * nodes[(:, :(- 1))]) + ((denominator - multipliers) * nodes[(:, 1:)]))\n    new_nodes /= denominator\n    new_nodes[(:, 0)] = nodes[(:, 0)]\n    new_nodes[(:, (- 1))] = nodes[(:, (- 1))]\n    return new_nodes", "docstring": "r\"\"\"Degree-elevate a B |eacute| zier curves.\n\nDoes this by converting the current nodes :math:`v_0, \\ldots, v_n`\nto new nodes :math:`w_0, \\ldots, w_{n + 1}` where\n\n.. math::\n\n\\begin{align*}\nw_0 &= v_0 \\\\\nw_j &= \\frac{j}{n + 1} v_{j - 1} + \\frac{n + 1 - j}{n + 1} v_j \\\\\nw_{n + 1} &= v_n\n\\end{align*}\n\n.. note::\n\nThere is also a Fortran implementation of this function, which\nwill be used if it can be built.\n\nArgs:\nnodes (numpy.ndarray): The nodes defining a curve.\n\nReturns:\nnumpy.ndarray: The nodes of the degree-elevated curve.", "source": "codesearchnet"}
{"code": "def from_question_encoder_generator_configs(cls, question_encoder_config: PretrainedConfig, generator_config: PretrainedConfig, **kwargs) -> PretrainedConfig:\n    return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and\ndecoder model configuration.\n\nReturns:\n[`EncoderDecoderConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def _merge_beam_dim(tensor):\n  \n  shape = common_layers.shape_list(tensor)\n  shape[0] *= shape[1]  \n  shape.pop(1)  \n  return tf.reshape(tensor, shape)", "docstring": "Reshapes first two dimensions in to single dimension.\n\nArgs:\ntensor: Tensor to reshape of shape [A, B, ...]\n\nReturns:\nReshaped tensor of shape [A*B, ...]", "source": "juraj-google-style"}
{"code": "def _add_dependency(self, dependency, var_name=None):\n    if (var_name is None):\n        var_name = next(self.temp_var_names)\n    if ((dependency, var_name) not in self.dependencies):\n        self.dependencies.append((dependency, var_name))\n    return var_name", "docstring": "Adds the given dependency and returns the variable name to use to access it. If `var_name`\nis not given then a random one will be created.\n\nArgs:\ndependency (str):\nvar_name (str, optional):\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def parse_criteria(criteria_string):\n    toks = criteria_string.split()\n\n    def parse_sym(sym):\n        if (sym == '*'):\n            return [el.symbol for el in Element]\n        else:\n            m = re.match('\\\\{(.*)\\\\}', sym)\n            if m:\n                return [s.strip() for s in m.group(1).split(',')]\n            else:\n                return [sym]\n\n    def parse_tok(t):\n        if re.match('\\\\w+-\\\\d+', t):\n            return {'task_id': t}\n        elif ('-' in t):\n            elements = [parse_sym(sym) for sym in t.split('-')]\n            chemsyss = []\n            for cs in itertools.product(*elements):\n                if (len(set(cs)) == len(cs)):\n                    cs = [Element(s).symbol for s in cs]\n                    chemsyss.append('-'.join(sorted(cs)))\n            return {'chemsys': {'$in': chemsyss}}\n        else:\n            all_formulas = set()\n            explicit_els = []\n            wild_card_els = []\n            for sym in re.findall('(\\\\*[\\\\.\\\\d]*|\\\\{.*\\\\}[\\\\.\\\\d]*|[A-Z][a-z]*)[\\\\.\\\\d]*', t):\n                if (('*' in sym) or ('{' in sym)):\n                    wild_card_els.append(sym)\n                else:\n                    m = re.match('([A-Z][a-z]*)[\\\\.\\\\d]*', sym)\n                    explicit_els.append(m.group(1))\n            nelements = (len(wild_card_els) + len(set(explicit_els)))\n            parts = re.split('(\\\\*|\\\\{.*\\\\})', t)\n            parts = [parse_sym(s) for s in parts if (s != '')]\n            for f in itertools.product(*parts):\n                c = Composition(''.join(f))\n                if (len(c) == nelements):\n                    for e in c.keys():\n                        Element(e.symbol)\n                    all_formulas.add(c.reduced_formula)\n            return {'pretty_formula': {'$in': list(all_formulas)}}\n    if (len(toks) == 1):\n        return parse_tok(toks[0])\n    else:\n        return {'$or': list(map(parse_tok, toks))}", "docstring": "Parses a powerful and simple string criteria and generates a proper\nmongo syntax criteria.\n\nArgs:\ncriteria_string (str): A string representing a search criteria.\nAlso supports wild cards. E.g.,\nsomething like \"*2O\" gets converted to\n{'pretty_formula': {'$in': [u'B2O', u'Xe2O', u\"Li2O\", ...]}}\n\nOther syntax examples:\nmp-1234: Interpreted as a Materials ID.\nFe2O3 or *2O3: Interpreted as reduced formulas.\nLi-Fe-O or *-Fe-O: Interpreted as chemical systems.\n\nYou can mix and match with spaces, which are interpreted as\n\"OR\". E.g., \"mp-1234 FeO\" means query for all compounds with\nreduced formula FeO or with materials_id mp-1234.\n\nReturns:\nA mongo query dict.", "source": "codesearchnet"}
{"code": "def Calls(self, conditions=None):\n    results = set()\n    if (conditions is None):\n        conditions = [None]\n    for condition in conditions:\n        for c in self.Match(*condition):\n            results.update(self._registry.get(c, []))\n    return results", "docstring": "Find the methods that evaluate data that meets this condition.\n\nArgs:\nconditions: A tuple of (artifact, os_name, cpe, label)\n\nReturns:\nA list of methods that evaluate the data.", "source": "codesearchnet"}
{"code": "def update_fitness(objective_function, particle):\n    fitness = objective_function(particle.position)\n    best_fitness = particle.best_fitness\n    cmp = comparator(fitness)\n    if ((best_fitness is None) or cmp(fitness, best_fitness)):\n        best_position = particle.position\n        return particle._replace(fitness=fitness, best_fitness=fitness, best_position=best_position)\n    else:\n        return particle._replace(fitness=fitness)", "docstring": "Calculates and updates the fitness and best_fitness of a particle.\n\nFitness is calculated using the 'problem.fitness' function.\n\nArgs:\nproblem: The optimization problem encapsulating the fitness function\nand optimization type.\nparticle: cipy.algorithms.pso.Particle: Particle to update the fitness\nfor.\n\nReturns:\ncipy.algorithms.pso.Particle: A new particle with the updated fitness.", "source": "codesearchnet"}
{"code": "def sg_gpus():\n    global _gpus\n    if (_gpus is None):\n        local_device_protos = device_lib.list_local_devices()\n        _gpus = len([x.name for x in local_device_protos if (x.device_type == 'GPU')])\n    return max(_gpus, 1)", "docstring": "r\"\"\" Gets current available GPU nums\n\nReturns:\nA integer : total # of GPUs available", "source": "codesearchnet"}
{"code": "def with_inverse(points, noise):\n    n_points = (len(points) / 2)\n    break_point = n_points\n    points_part = copy.deepcopy(points)\n    points_part = list(reversed(points_part))\n    part = kalman_filter(points_part, noise)\n    total = kalman_filter(points, noise)\n    result = (list(reversed(part))[:break_point] + total[break_point:])\n    result[break_point] = point_mean(part[break_point], total[break_point])\n    return result", "docstring": "Smooths a set of points\n\nIt smooths them twice, once in given order, another one in the reverse order.\nThe the first half of the results will be taken from the reverse order and\nthe second half from the normal order.\n\nArgs:\npoints (:obj:`list` of :obj:`Point`)\nnoise (float): Expected noise, the higher it is the more the path will\nbe smoothed.\nReturns:\n:obj:`list` of :obj:`Point`", "source": "codesearchnet"}
{"code": "def emit(self, event, *args, **kwargs):\n    listeners = self._listeners[event]\n    listeners = itertools.chain(listeners, self._once[event])\n    self._once[event] = []\n    for listener in listeners:\n        self._loop.call_soon(functools.partial(self._dispatch, event, listener, *args, **kwargs))\n    return self", "docstring": "Call each listener for the event with the given arguments.\n\nArgs:\nevent (str): The event to trigger listeners on.\n*args: Any number of positional arguments.\n**kwargs: Any number of keyword arguments.\n\nThis method passes all arguments other than the event name directly\nto the listeners. If a listener raises an exception for any reason the\n'listener-error', or current value of LISTENER_ERROR_EVENT, is emitted.\nListeners to this event are given the event name, listener object, and\nthe exception raised. If an error listener fails it does so silently.\n\nAll event listeners are fired in a deferred way so this method returns\nimmediately. The calling coro must yield at some point for the event\nto propagate to the listeners.", "source": "codesearchnet"}
{"code": "def repr_result(obj: Any, elements: List[str],\n                with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:\n    \n    if with_addr:\n        return \"<{qualname}({elements}) at {addr}>\".format(\n            qualname=obj.__class__.__qualname__,\n            elements=joiner.join(elements),\n            addr=hex(id(obj)))\n    else:\n        return \"{qualname}({elements})\".format(\n            qualname=obj.__class__.__qualname__,\n            elements=joiner.join(elements))", "docstring": "Internal function to make a :func:`repr`-style representation of an object.\n\nArgs:\nobj: object to display\nelements: list of object ``attribute=value`` strings\nwith_addr: include the memory address of ``obj``\njoiner: string with which to join the elements\n\nReturns:\nstring: :func:`repr`-style representation", "source": "juraj-google-style"}
{"code": "def endpoint_from_production_variants(self, name, production_variants, tags=None, kms_key=None, wait=True):\n    if (not _deployment_entity_exists((lambda : self.sagemaker_client.describe_endpoint_config(EndpointConfigName=name)))):\n        config_options = {'EndpointConfigName': name, 'ProductionVariants': production_variants}\n        if tags:\n            config_options['Tags'] = tags\n        if kms_key:\n            config_options['KmsKeyId'] = kms_key\n        self.sagemaker_client.create_endpoint_config(**config_options)\n    return self.create_endpoint(endpoint_name=name, config_name=name, tags=tags, wait=wait)", "docstring": "Create an SageMaker ``Endpoint`` from a list of production variants.\n\nArgs:\nname (str): The name of the ``Endpoint`` to create.\nproduction_variants (list[dict[str, str]]): The list of production variants to deploy.\ntags (list[dict[str, str]]): A list of key-value pairs for tagging the endpoint (default: None).\nkms_key (str): The KMS key that is used to encrypt the data on the storage volume attached\nto the instance hosting the endpoint.\nwait (bool): Whether to wait for the endpoint deployment to complete before returning (default: True).\n\nReturns:\nstr: The name of the created ``Endpoint``.", "source": "codesearchnet"}
{"code": "def _render_our_module_key_flags(self, module, output_lines, prefix=''):\n    \n    key_flags = self.get_key_flags_for_module(module)\n    if key_flags:\n      self._render_module_flags(module, key_flags, output_lines, prefix)", "docstring": "Returns a help string for the key flags of a given module.\n\nArgs:\nmodule: module|str, the module to render key flags for.\noutput_lines: [str], a list of strings.  The generated help message\nlines will be appended to this list.\nprefix: str, a string that is prepended to each generated help line.", "source": "juraj-google-style"}
{"code": "def generate_output_events(self, source, key, val, line='2', hr=True, show_name=False, colorize=True):\n    output = generate_output(line=line, short=(HR_RDAP[source][key]['_short'] if hr else key), name=(HR_RDAP[source][key]['_name'] if (hr and show_name) else None), is_parent=(False if ((val is None) or (len(val) == 0)) else True), value=('None' if ((val is None) or (len(val) == 0)) else None), colorize=colorize)\n    if (val is not None):\n        count = 0\n        for item in val:\n            try:\n                action = item['action']\n            except KeyError:\n                action = None\n            try:\n                timestamp = item['timestamp']\n            except KeyError:\n                timestamp = None\n            try:\n                actor = item['actor']\n            except KeyError:\n                actor = None\n            if (count > 0):\n                output += generate_output(line=str((int(line) + 1)), is_parent=True, colorize=colorize)\n            output += generate_output(line=str((int(line) + 1)), short=(HR_RDAP_COMMON[key]['action']['_short'] if hr else 'action'), name=(HR_RDAP_COMMON[key]['action']['_name'] if (hr and show_name) else None), value=action, colorize=colorize)\n            output += generate_output(line=str((int(line) + 1)), short=(HR_RDAP_COMMON[key]['timestamp']['_short'] if hr else 'timestamp'), name=(HR_RDAP_COMMON[key]['timestamp']['_name'] if (hr and show_name) else None), value=timestamp, colorize=colorize)\n            output += generate_output(line=str((int(line) + 1)), short=(HR_RDAP_COMMON[key]['actor']['_short'] if hr else 'actor'), name=(HR_RDAP_COMMON[key]['actor']['_name'] if (hr and show_name) else None), value=actor, colorize=colorize)\n            count += 1\n    return output", "docstring": "The function for generating CLI output RDAP events results.\n\nArgs:\nsource (:obj:`str`): The parent key 'network' or 'objects'\n(required).\nkey (:obj:`str`): The event key 'events' or 'events_actor'\n(required).\nval (:obj:`dict`): The event dictionary (required).\nline (:obj:`str`): The line number (0-4). Determines indentation.\nDefaults to '0'.\nhr (:obj:`bool`): Enable human readable key translations. Defaults\nto True.\nshow_name (:obj:`bool`): Show human readable name (default is to\nonly show short). Defaults to False.\ncolorize (:obj:`bool`): Colorize the console output with ANSI\ncolors. Defaults to True.\n\nReturns:\nstr: The generated output.", "source": "codesearchnet"}
{"code": "def new_product(self, name):\n    n = self._product_cls(self, name, summary_cls=self._summary_cls)\n    self.graph.add_node(n)\n    self.products.append(n)\n    return n", "docstring": "Create a new product.\n\nArgs:\nname: name of the new product.\n\nReturns:\nA new product instance.", "source": "codesearchnet"}
{"code": "def emit(self, record):\n    \n    try:\n      message = self.format(record)\n      log_record = LogRecord(\n          record.levelno, record.name, os.path.basename(record.pathname),\n          record.lineno, int(record.created * 1000), message,\n      )\n      self._test_record.add_log_record(log_record)\n      self._notify_update()\n    except Exception:  \n      self.handleError(record)", "docstring": "Save a logging.LogRecord to our test record.\n\nLogs carry useful metadata such as the logger name and level information.\nWe capture this in a structured format in the test record to enable\nfiltering by client applications.\n\nArgs:\nrecord: A logging.LogRecord to record.", "source": "juraj-google-style"}
{"code": "def is_custom_device(device_name):\n    return context().is_custom_device(device_name)", "docstring": "Calls TFE_IsCustomDevice.\n\nEnables using C extensions specifying a custom device from Python. See the\nexperimental eager C API in tensorflow/c/eager/c_api_experimental.h for\ndetails.\n\nArgs:\ndevice_name: A string indicating the name to check whether it is a\nregistered custom device.\n\nReturns:\nA boolean.", "source": "github-repos"}
{"code": "def read_excel(filename, dataset_class=dataset.pandas_dataset.PandasDataset, expectations_config=None, autoinspect_func=None, *args, **kwargs):\n    df = pd.read_excel(filename, *args, **kwargs)\n    if isinstance(df, dict):\n        for key in df:\n            df[key] = _convert_to_dataset_class(df[key], dataset_class, expectations_config, autoinspect_func)\n    else:\n        df = _convert_to_dataset_class(df, dataset_class, expectations_config, autoinspect_func)\n    return df", "docstring": "Read a file using Pandas read_excel and return a great_expectations dataset.\n\nArgs:\nfilename (string): path to file to read\ndataset_class (Dataset class): class to which to convert resulting Pandas df\nexpectations_config (string): path to great_expectations config file\n\nReturns:\ngreat_expectations dataset or ordered dict of great_expectations datasets,\nif multiple worksheets are imported", "source": "codesearchnet"}
{"code": "def get_energy_tersoff(structure, gulp_cmd='gulp'):\n    \n    gio = GulpIO()\n    gc = GulpCaller(gulp_cmd)\n    gin = gio.tersoff_input(structure)\n    gout = gc.run(gin)\n    return gio.get_energy(gout)", "docstring": "Compute the energy of a structure using Tersoff potential.\n\nArgs:\nstructure: pymatgen.core.structure.Structure\ngulp_cmd: GULP command if not in standard place", "source": "juraj-google-style"}
{"code": "def run(self, input_dir, output_dir, epsilon):\n    \n    logging.info('Running attack %s', self.submission_id)\n    tmp_run_dir = self.temp_copy_extracted_submission()\n    cmd = ['--network=none',\n           '-m=24g',\n           '--cpus=3.75',\n           '-v', '{0}:/input_images:ro'.format(input_dir),\n           '-v', '{0}:/output_images'.format(output_dir),\n           '-v', '{0}:/code'.format(tmp_run_dir),\n           '-w', '/code',\n           self.container_name,\n           './' + self.entry_point,\n           '/input_images',\n           '/output_images',\n           str(epsilon)]\n    elapsed_time_sec = self.run_with_time_limit(cmd)\n    sudo_remove_dirtree(tmp_run_dir)\n    return elapsed_time_sec", "docstring": "Runs attack inside Docker.\n\nArgs:\ninput_dir: directory with input (dataset).\noutput_dir: directory where output (adversarial images) should be written.\nepsilon: maximum allowed size of adversarial perturbation,\nshould be in range [0, 255].\n\nReturns:\nhow long it took to run submission in seconds", "source": "juraj-google-style"}
{"code": "def fn(x: tuple[int, str]):\n    return x", "docstring": "Test function\n\nArgs:\nx: The input\n\n\nReturns:\nThe output", "source": "github-repos"}
{"code": "def MergeMessage(self, source, destination, replace_message_field=False, replace_repeated_field=False):\n    tree = _FieldMaskTree(self)\n    tree.MergeMessage(source, destination, replace_message_field, replace_repeated_field)", "docstring": "Merges fields specified in FieldMask from source to destination.\n\nArgs:\nsource: Source message.\ndestination: The destination message to be merged into.\nreplace_message_field: Replace message field if True. Merge message\nfield if False.\nreplace_repeated_field: Replace repeated field if True. Append\nelements of repeated field if False.", "source": "codesearchnet"}
{"code": "def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:\n    height, width = image_size\n    raw_size = None\n    if max_size is not None:\n        min_original_size = float(min((height, width)))\n        max_original_size = float(max((height, width)))\n        if max_original_size / min_original_size * size > max_size:\n            raw_size = max_size * min_original_size / max_original_size\n            size = int(round(raw_size))\n    if height <= width and height == size or (width <= height and width == size):\n        oh, ow = (height, width)\n    elif width < height:\n        ow = size\n        if max_size is not None and raw_size is not None:\n            oh = int(raw_size * height / width)\n        else:\n            oh = int(size * height / width)\n    else:\n        oh = size\n        if max_size is not None and raw_size is not None:\n            ow = int(raw_size * width / height)\n        else:\n            ow = int(size * width / height)\n    return (oh, ow)", "docstring": "Computes the output image size given the input image size and the desired output size.\n\nArgs:\nimage_size (`Tuple[int, int]`):\nThe input image size.\nsize (`int`):\nThe desired output size.\nmax_size (`int`, *optional*):\nThe maximum allowed output size.", "source": "github-repos"}
{"code": "def gunzip_file(infile, outfile=None, outdir=None, delete_original=False, force_rerun_flag=False):\n    if (not outfile):\n        outfile = infile.replace('.gz', '')\n    if (not outdir):\n        outdir = ''\n    else:\n        outdir = op.dirname(infile)\n    outfile = op.join(outdir, op.basename(outfile))\n    if force_rerun(flag=force_rerun_flag, outfile=outfile):\n        gz = gzip.open(infile, 'rb')\n        decoded = gz.read()\n        with open(outfile, 'wb') as new_file:\n            new_file.write(decoded)\n        gz.close()\n        log.debug('{}: file unzipped'.format(outfile))\n    else:\n        log.debug('{}: file already unzipped'.format(outfile))\n    if delete_original:\n        os.remove(infile)\n    return outfile", "docstring": "Decompress a gzip file and optionally set output values.\n\nArgs:\ninfile: Path to .gz file\noutfile: Name of output file\noutdir: Path to output directory\ndelete_original: If original .gz file should be deleted\nforce_rerun_flag: If file should be decompressed if outfile already exists\n\nReturns:\nstr: Path to decompressed file", "source": "codesearchnet"}
{"code": "def subtree(self, root_path: Union[int, str, KeyPath]) -> Optional['KeyPathSet']:\n    root_path = KeyPath.from_value(root_path)\n    if not root_path:\n        return self\n    root = self._trie\n    for key in root_path.keys:\n        if key not in root:\n            return None\n        root = root[key]\n    ret = KeyPathSet()\n    ret._trie = root\n    return ret", "docstring": "Returns the relative paths of the sub-tree rooted at the given path.\n\nArgs:\nroot_path: A KeyPath for the root of the sub-tree.\n\nReturns:\nA KeyPathSet that contains all the child paths of the given root path.\nPlease note that the returned value share the same trie as the current\nvalue. So addition/removal of paths in the returned value will also\naffect the current value. If there is no child path under the given root\npath, None will be returned.", "source": "github-repos"}
{"code": "def get_min_max_value(self) -> tuple[float, float]:\n    return (self._statistics.min_max_statistics.global_min, self._statistics.min_max_statistics.global_max)", "docstring": "Calculates the global min and max values.\n\nReturns:\n(min_value, max_value): Min and max calculated using MinMax", "source": "github-repos"}
{"code": "def _ConvertInteger(value):\n  \n  if isinstance(value, float) and not value.is_integer():\n    raise ParseError('Couldn\\'t parse integer: {0}.'.format(value))\n\n  if isinstance(value, six.text_type) and value.find(' ') != -1:\n    raise ParseError('Couldn\\'t parse integer: \"{0}\".'.format(value))\n\n  return int(value)", "docstring": "Convert an integer.\n\nArgs:\nvalue: A scalar value to convert.\n\nReturns:\nThe integer value.\n\nRaises:\nParseError: If an integer couldn't be consumed.", "source": "juraj-google-style"}
{"code": "def asset(self, asset_id, asset_type, action='GET'):\n        \n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        if asset_type == 'PHONE':\n            return self.tc_requests.victim_phone_asset(\n                self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action\n            )\n        if asset_type == 'EMAIL':\n            return self.tc_requests.victim_email_asset(\n                self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action\n            )\n        if asset_type == 'NETWORK':\n            return self.tc_requests.victim_network_asset(\n                self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action\n            )\n        if asset_type == 'SOCIAL':\n            return self.tc_requests.victim_social_asset(\n                self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action\n            )\n        if asset_type == 'WEB':\n            return self.tc_requests.victim_web_asset(\n                self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action\n            )\n        self._tcex.handle_error(\n            925, ['asset_type', 'asset', 'asset_type', 'asset_type', asset_type]\n        )\n        return None", "docstring": "Gets a asset of a Victim\n\nValid asset_type:\n+ PHONE\n+ EMAIL\n+ NETWORK\n+ SOCIAL\n+ WEB\n\nArgs:\nasset_type:\nasset_id:\naction:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _embedding_lookup_for_sparse_tensor(inp: sparse_tensor.SparseTensor, weight: Optional[sparse_tensor.SparseTensor], table: tf_variables.Variable, feature: tpu_embedding_v2_utils.FeatureConfig) -> tensor.Tensor:\n    inp_rank = inp.shape.rank\n    if not feature.output_shape and feature.max_sequence_length > 0 and (inp_rank is None or inp_rank == 2):\n        batch_size = math_ops.cast(array_ops.shape(inp)[0], dtype=dtypes.int64)\n        sparse_shape = array_ops_stack.stack([batch_size, feature.max_sequence_length], axis=0)\n        truncated_inp = sparse_ops.sparse_slice(inp, start=[0, 0], size=sparse_shape)\n        dense_output_shape = array_ops_stack.stack([batch_size, feature.max_sequence_length, feature.table.dim], axis=0)\n        return array_ops.scatter_nd(truncated_inp.indices, array_ops.gather(table.read_value(), truncated_inp.values), dense_output_shape)\n    else:\n        if feature.max_sequence_length > 0:\n            logging.warning('max_sequence_length setting will be ignored because the rank of the input tensor is %d which is not 2.', inp_rank)\n        if not feature.validate_weights_and_indices and inp_rank is not None and (inp_rank <= 2):\n            return embedding_ops.embedding_lookup_sparse_v2(table, inp, sp_weights=weight, combiner=feature.table.combiner)\n        else:\n            return embedding_ops.safe_embedding_lookup_sparse_v2(table, inp, sparse_weights=weight, combiner=feature.table.combiner)", "docstring": "Embedding lookup for sparse tensor based on its feature config.\n\nArgs:\ninp: a single SparseTensor input.\nweight: None or SparseTensor which has the same shape of the input.\ntable: a table variable.\nfeature: a feature config.\n\nReturns:\nEmbedding lookup result.", "source": "github-repos"}
{"code": "def MakeHistFromList(t, name=''):\n    \n    hist = Hist(name=name)\n    [hist.Incr(x) for x in t]\n    return hist", "docstring": "Makes a histogram from an unsorted sequence of values.\n\nArgs:\nt: sequence of numbers\nname: string name for this histogram\n\nReturns:\nHist object", "source": "juraj-google-style"}
{"code": "def kill_all_processes(self, check_alive=True, allow_graceful=False):\n    if (ray_constants.PROCESS_TYPE_RAYLET in self.all_processes):\n        self._kill_process_type(ray_constants.PROCESS_TYPE_RAYLET, check_alive=check_alive, allow_graceful=allow_graceful)\n    for process_type in list(self.all_processes.keys()):\n        self._kill_process_type(process_type, check_alive=check_alive, allow_graceful=allow_graceful)", "docstring": "Kill all of the processes.\n\nNote that This is slower than necessary because it calls kill, wait,\nkill, wait, ... instead of kill, kill, ..., wait, wait, ...\n\nArgs:\ncheck_alive (bool): Raise an exception if any of the processes were\nalready dead.", "source": "codesearchnet"}
{"code": "def do_reset_ids(concatenated_meta_df, data_df, concat_direction):\n    \n    if concat_direction == \"horiz\":\n\n        \n        assert concatenated_meta_df.index.equals(data_df.columns), (\n            \"cids in concatenated_meta_df do not agree with cids in data_df.\")\n\n        \n        reset_ids_in_meta_df(concatenated_meta_df)\n\n        \n        \n        data_df.columns = pd.Index(concatenated_meta_df.index.values)\n\n    elif concat_direction == \"vert\":\n\n        \n        assert concatenated_meta_df.index.equals(data_df.index), (\n            \"rids in concatenated_meta_df do not agree with rids in data_df.\")\n\n        \n        reset_ids_in_meta_df(concatenated_meta_df)\n\n        \n        \n        data_df.index = pd.Index(concatenated_meta_df.index.values)", "docstring": "Reset ids in concatenated metadata and data dfs to unique integers and\nsave the old ids in a metadata column.\n\nNote that the dataframes are modified in-place.\n\nArgs:\nconcatenated_meta_df (pandas df)\ndata_df (pandas df)\nconcat_direction (string): 'horiz' or 'vert'\n\nReturns:\nNone (dfs modified in-place)", "source": "juraj-google-style"}
{"code": "def _manual_repartition(self, axis, repartition_func, **kwargs):\n        \n        func = self._prepare_method(repartition_func, **kwargs)\n        return self.data.manual_shuffle(axis, func)", "docstring": "This method applies all manual partitioning functions.\n\nArgs:\naxis: The axis to shuffle data along.\nrepartition_func: The function used to repartition data.\n\nReturns:\nA `BaseFrameManager` object.", "source": "juraj-google-style"}
{"code": "def __init__(self, source_dict, url, path, token):\n        \n        self._vault_url = url\n        self._path = path\n        self._token = token\n        super(VaultLoader, self).__init__(source_dict)", "docstring": "Initializer.\n\nArgs:\nsource_dict: used to initialize the class. Use constructors to read from Vault.\nurl: Vault url\npath: Vault path where secrets are stored\nvault_token: token (must have access to vault path)", "source": "juraj-google-style"}
{"code": "def run_parallel(self, para_func):\n    if self.timer:\n        start_timer = time.time()\n    with mp.Pool(self.num_processors) as pool:\n        print('start pool with {} processors: {} total processes.\\n'.format(self.num_processors, len(self.args)))\n        results = [pool.apply_async(para_func, arg) for arg in self.args]\n        out = [r.get() for r in results]\n        out = {key: np.concatenate([out_i[key] for out_i in out]) for key in out[0].keys()}\n    if self.timer:\n        print('SNR calculation time:', (time.time() - start_timer))\n    return out", "docstring": "Run parallel calulation\n\nThis will run the parallel calculation on self.num_processors.\n\nArgs:\npara_func (obj): Function object to be used in parallel.\n\nReturns:\n(dict): Dictionary with parallel results.", "source": "codesearchnet"}
{"code": "def _encode_fhir_path_builder(self, builder: expressions.Builder) -> Optional[str]:\n    try:\n        sql_expression = self._bq_interpreter.encode(builder)\n    except Exception as e:\n        self._error_reporter.report_fhir_path_error(self._abs_path_invocation(builder), str(builder), self._error_message_for_exception(e))\n        return None\n    return sql_expression", "docstring": "Returns a Standard SQL translation of the constraint `fhir_path_expression`.\n\nIf an error is encountered during encoding, the associated error reporter\nwill be notified, and this method will return `None`.\n\nArgs:\nbuilder: Builder containing the information to be encoded to Standard SQL.\n\nReturns:\nA Standard SQL encoding of the constraint `fhir_path_expression` upon\nsuccessful completion. The SQL will evaluate to a single boolean\nindicating whether the constraint is satisfied.", "source": "github-repos"}
{"code": "def _read_metrics(repo, metrics, branch):\n    res = {}\n    for (out, typ, xpath) in metrics:\n        assert (out.scheme == 'local')\n        if (not typ):\n            typ = os.path.splitext(out.path.lower())[1].replace('.', '')\n        if out.use_cache:\n            open_fun = open\n            path = repo.cache.local.get(out.checksum)\n        else:\n            open_fun = repo.tree.open\n            path = out.path\n        try:\n            with open_fun(path) as fd:\n                metric = _read_metric(fd, typ=typ, xpath=xpath, rel_path=out.rel_path, branch=branch)\n        except IOError as e:\n            if (e.errno == errno.ENOENT):\n                logger.warning(NO_METRICS_FILE_AT_REFERENCE_WARNING.format(out.rel_path, branch))\n                metric = None\n            else:\n                raise\n        if (not metric):\n            continue\n        res[out.rel_path] = metric\n    return res", "docstring": "Read the content of each metric file and format it.\n\nArgs:\nmetrics (list): List of metric touples\nbranch (str): Branch to look up for metrics.\n\nReturns:\nA dict mapping keys with metrics path name and content.\nFor example:\n\n{'metric.csv': (\"value_mse  deviation_mse   data_set\\n\"\n\"0.421601   0.173461        train\\n\"\n\"0.67528    0.289545        testing\\n\"\n\"0.671502   0.297848        validation\\n\")}", "source": "codesearchnet"}
{"code": "def check_response_code(response, expected_response_code):\n    if (response.status_code == expected_response_code):\n        pass\n    elif (response.status_code == RATE_LIMIT_RESPONSE_CODE):\n        raise RateLimitError(response)\n    else:\n        raise ApiError(response)", "docstring": "Check response code against the expected code; raise ApiError.\n\nChecks the requests.response.status_code against the provided expected\nresponse code (erc), and raises a ApiError if they do not match.\n\nArgs:\nresponse(requests.response): The response object returned by a request\nusing the requests package.\nexpected_response_code(int): The expected response code (HTTP response\ncode).\n\nRaises:\nApiError: If the requests.response.status_code does not match the\nprovided expected response code (erc).", "source": "codesearchnet"}
{"code": "async def send_message(\n        self, request: str, response_expected: bool, **kwargs: Any\n    ) -> Response:\n        \n        with async_timeout.timeout(self.timeout):\n            async with self.session.post(\n                self.endpoint, data=request, ssl=self.ssl\n            ) as response:\n                response_text = await response.text()\n                return Response(response_text, raw=response)", "docstring": "Transport the message to the server and return the response.\n\nArgs:\nrequest: The JSON-RPC request string.\nresponse_expected: Whether the request expects a response.\n\nReturns:\nA Response object.", "source": "juraj-google-style"}
{"code": "def _handle_failure_and_recovery(self, e, on_failure_fn, on_transient_failure_fn, on_recovery_fn, worker_device_name):\n    if on_failure_fn:\n        on_failure_fn(e)\n    with self._cluster_update_lock:\n        self._cluster_due_for_update_or_finish.set()\n        self._worker_up_cond.wait(_WORKER_MAXIMUM_RECOVERY_SEC)\n        if self._error_from_recovery:\n            try:\n                raise self._error_from_recovery\n            finally:\n                self._error_from_recovery = None\n        logging.info('Worker %s has been recovered.', worker_device_name)\n    if on_recovery_fn:\n        logging.info('Worker %s calling on_recovery_fn', worker_device_name)\n        with self.wait_on_failure(on_recovery_fn=on_recovery_fn, on_transient_failure_fn=on_transient_failure_fn, worker_device_name=worker_device_name):\n            on_recovery_fn()", "docstring": "Call failure fn, wait for cluster to recover, then call recovery fn.\n\nArgs:\ne: the Exception thrown during closure execution.\non_failure_fn: an optional function to run if preemption happens.\non_transient_failure_fn: an optional function to run if transient failure\nhappens.\non_recovery_fn: an optional function to run when a worker is recovered\nfrom preemption.\nworker_device_name: the device name of the worker instance that is passing\nthrough the failure.", "source": "github-repos"}
{"code": "def _GetDaysPerMonth(self, year, month):\n    \n    if month not in range(1, 13):\n      raise ValueError('Month value out of bounds.')\n\n    days_per_month = self._DAYS_PER_MONTH[month - 1]\n    if month == 2 and self._IsLeapYear(year):\n      days_per_month += 1\n\n    return days_per_month", "docstring": "Retrieves the number of days in a month of a specific year.\n\nArgs:\nyear (int): year e.g. 1970.\nmonth (int): month, where 1 represents January.\n\nReturns:\nint: number of days in the month.\n\nRaises:\nValueError: if the month value is out of bounds.", "source": "juraj-google-style"}
{"code": "def merge_json_fhir_string_into_proto(raw_json: str, target: message.Message, *, validate: bool=True, default_timezone: str=_primitive_time_utils.SIMPLE_ZULU) -> None:\n    json_value = load_json(raw_json)\n    merge_json_fhir_object_into_proto(json_value, target, validate=validate, default_timezone=default_timezone)", "docstring": "Merges the provided raw_json string into a target Message.\n\nArgs:\nraw_json: The JSON to parse and merge into target.\ntarget: The Message instance to merge raw_json into.\nvalidate: A Boolean value indicating if validation should be performed on\nthe resultant Message. Validation takes the form of ensuring that basic\nchecks such as cardinality guarantees, required field adherence, etc. are\nmet. Defaults to True.\ndefault_timezone: A string specifying the timezone string to use for time-\nlike FHIR data during parsing. Defaults to 'Z' for UTC.\n\nRaises:\nfhir_errors.InvalidFhirError: In the event that validation fails after\nparsing.", "source": "github-repos"}
{"code": "def parse_cscore(infile):\n    \n\n    cscore_dict = {}\n\n    with open(infile, 'r') as f:\n        for ll in f.readlines():\n            \n            if ll.lower().startswith('model1'):\n                l = ll.split()\n\n                cscore = l[1]\n                tmscore_full = l[2].split('+-')\n                tmscore = tmscore_full[0]\n                tmscore_err = tmscore_full[1]\n                rmsd_full = l[3].split('+-')\n                rmsd = rmsd_full[0]\n                rmsd_err = rmsd_full[1]\n\n                cscore_dict['c_score'] = float(cscore)\n                cscore_dict['tm_score'] = float(tmscore)\n                cscore_dict['tm_score_err'] = float(tmscore_err)\n                cscore_dict['rmsd'] = float(rmsd)\n                cscore_dict['rmsd_err'] = float(rmsd_err)\n\n    return cscore_dict", "docstring": "Parse the cscore file to return a dictionary of scores.\n\nArgs:\ninfile (str): Path to cscore\n\nReturns:\ndict: Dictionary of scores", "source": "juraj-google-style"}
{"code": "def process_attributes_of_node(attrs, node_name, class_type):\n    attrs['_nodes'] = {}\n    attrs['_linked_models'] = defaultdict(list)\n    attrs['_debug_linked_models'] = defaultdict(list)\n    attrs['_lazy_linked_models'] = defaultdict(list)\n    attrs['_fields'] = {}\n    attrs['_uniques'] = []\n    for (key, attr) in list(attrs.items()):\n        if (hasattr(attr, '__base__') and (getattr(attr.__base__, '_TYPE', '') in ['Node', 'ListNode'])):\n            attrs['_nodes'][key] = attrs[key]\n        else:\n            attr_type = getattr(attr, '_TYPE', '')\n            if (attr_type == 'Model'):\n                attrs[('%s_id' % key)] = ''\n                lnk_mdl_ins = attrs[key]\n                lnk = {'null': (lnk_mdl_ins.null or (class_type == 'ListNode')), 'link_source': True, 'mdl': lnk_mdl_ins.__class__, 'o2o': lnk_mdl_ins._is_one_to_one, 'm2m': (class_type == 'ListNode'), 'reverse': lnk_mdl_ins.reverse_name, 'verbose': lnk_mdl_ins.verbose_name, 'field': key, 'is_set': False}\n                attrs['_linked_models'][attr.__class__.__name__].append(lnk)\n                debug_lnk = lnk.copy()\n                debug_lnk['lnksrc'] = 'process_attributes_of_node'\n                attrs['_debug_linked_models'][attr.__class__.__name__].append(debug_lnk)\n            elif (attr_type == 'Field'):\n                attr.name = key\n                attrs['_fields'][key] = attr\n                if attr.unique:\n                    attrs['_uniques'].append(key)\n            elif (attr_type == 'Link'):\n                attrs[('%s_id' % key)] = ''\n                lzy_lnk = attrs[key]\n                attrs['_lazy_linked_models'][key].append({'from': node_name, 'to': lzy_lnk.link_to, 'o2o': lzy_lnk.one_to_one, 'verbose': lzy_lnk.verbose_name, 'reverse': lzy_lnk.reverse_name, 'field': key})", "docstring": "prepare the model fields, nodes and relations\n\nArgs:\nnode_name (str): name of the node we are currently processing\nattrs (dict): attribute dict\nclass_type (str): Type of class.\nCan be one of these: 'ListNode', 'Model', 'Node'", "source": "codesearchnet"}
{"code": "def serialize(self):\n    lines = []\n    for criterion in self.filter_criteria:\n        lines.append(criterion.name())\n        lines.append(criterion.serialize())\n    return '\\n'.join(lines)", "docstring": "Return a string representing the subview with all of its filter criteria.\n\nReturns:\nstr: String with subview definition.", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n    \n    self.ListEntityTypes = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.EntityTypes/ListEntityTypes',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.ListEntityTypesRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.ListEntityTypesResponse.FromString,\n        )\n    self.GetEntityType = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.EntityTypes/GetEntityType',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.GetEntityTypeRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.EntityType.FromString,\n        )\n    self.CreateEntityType = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.EntityTypes/CreateEntityType',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.CreateEntityTypeRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.EntityType.FromString,\n        )\n    self.UpdateEntityType = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.EntityTypes/UpdateEntityType',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.UpdateEntityTypeRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.EntityType.FromString,\n        )\n    self.DeleteEntityType = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.EntityTypes/DeleteEntityType',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.DeleteEntityTypeRequest.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n    self.BatchUpdateEntityTypes = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.EntityTypes/BatchUpdateEntityTypes',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchUpdateEntityTypesRequest.SerializeToString,\n        response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n    self.BatchDeleteEntityTypes = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.EntityTypes/BatchDeleteEntityTypes',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchDeleteEntityTypesRequest.SerializeToString,\n        response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n    self.BatchCreateEntities = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.EntityTypes/BatchCreateEntities',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchCreateEntitiesRequest.SerializeToString,\n        response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n    self.BatchUpdateEntities = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.EntityTypes/BatchUpdateEntities',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchUpdateEntitiesRequest.SerializeToString,\n        response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n    self.BatchDeleteEntities = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.EntityTypes/BatchDeleteEntities',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_entity__type__pb2.BatchDeleteEntitiesRequest.SerializeToString,\n        response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def analyze(fqdn, result, argl, argd):\n    package = fqdn.split('.')[0]\n    if (package not in _methods):\n        _load_methods(package)\n    if ((_methods[package] is not None) and (fqdn in _methods[package])):\n        return _methods[package][fqdn](fqdn, result, *argl, **argd)", "docstring": "Analyzes the result from calling the method with the specified FQDN.\n\nArgs:\nfqdn (str): full-qualified name of the method that was called.\nresult: result of calling the method with `fqdn`.\nargl (tuple): positional arguments passed to the method call.\nargd (dict): keyword arguments passed to the method call.", "source": "codesearchnet"}
{"code": "def create_tensorboard_process(self):\n    port = 6006\n    for _ in range(100):\n        p = subprocess.Popen(['tensorboard', '--logdir', self.logdir, '--host', 'localhost', '--port', str(port)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n        self.event.wait(5)\n        if p.poll():\n            port += 1\n        else:\n            return (port, p)\n    raise OSError('No available ports to start TensorBoard. Attempted all ports between 6006 and 6105')", "docstring": "Create a TensorBoard process.\n\nReturns:\ntuple: A tuple containing:\nint: The port number.\nprocess: The TensorBoard process.\n\nRaises:\nOSError: If no ports between 6006 and 6105 are available for starting TensorBoard.", "source": "codesearchnet"}
{"code": "def filter_children(self, ctype: ContentType = None) -> List[SchemaNode]:\n        \n        if ctype is None:\n            ctype = self.content_type()\n        return [c for c in self.children if\n                not isinstance(c, (RpcActionNode, NotificationNode)) and\n                c.content_type().value & ctype.value != 0]", "docstring": "Return receiver's children based on content type.\n\nArgs:\nctype: Content type.", "source": "juraj-google-style"}
{"code": "def _child(details):\n\t\n\n\t\n\tif isinstance(details, list):\n\n\t\t\n\t\treturn OptionsNode(details)\n\n\t\n\telif isinstance(details, dict):\n\n\t\t\n\t\tif '__array__' in details:\n\t\t\treturn ArrayNode(details)\n\n\t\t\n\t\telif '__hash__' in details:\n\t\t\treturn HashNode(details)\n\n\t\t\n\t\telif '__type__' in details:\n\n\t\t\t\n\t\t\tif isinstance(details['__type__'], (dict,list)):\n\t\t\t\treturn _child(details['__type__'])\n\n\t\t\t\n\t\t\telse:\n\t\t\t\treturn Node(details)\n\n\t\t\n\t\telse:\n\t\t\treturn Parent(details)\n\n\t\n\telif isinstance(details, basestring):\n\n\t\t\n\t\treturn Node(details)\n\n\t\n\telse:\n\t\traise TypeError('details')", "docstring": "Child\n\nA private function to figure out the child node type\n\nArguments:\ndetails {dict} -- A dictionary describing a data point\n\nReturns:\n_NodeInterface", "source": "juraj-google-style"}
{"code": "def reformat_to_pretty_xml(doc_xml):\n    \n    assert isinstance(doc_xml, str)\n    dom_obj = xml.dom.minidom.parseString(doc_xml)\n    pretty_xml = dom_obj.toprettyxml(indent='  ')\n    \n    return re.sub(r'^\\s*$\\n', r'', pretty_xml, flags=re.MULTILINE)", "docstring": "Pretty print XML doc.\n\nArgs:\ndoc_xml : str\nWell formed XML doc\n\nReturns:\nstr: Pretty printed XML doc", "source": "juraj-google-style"}
{"code": "def filter_moving_count(self: EventSetOrNode, window_length: Duration) -> EventSetOrNode:\n    from temporian.core.operators.filter_moving_count import filter_moving_count\n    return filter_moving_count(self, window_length=window_length)", "docstring": "Filters out events such that no more than one output event is within\na tailing time window of `window_length`.\n\nFiltering is applied in chronological order: An event received at time t\nis filtered out if there is a non-filtered out event in\n(t-window_length, t].\n\nThis operator is different from `(evset.moving_count(window_length)\n== 0).filter()`. In `filter_moving_count` a filtered event does not\nblock following events.\n\nUsage example:\n```python\n>>> a = tp.event_set(timestamps=[1, 2, 3])\n>>> b = a.filter_moving_count(window_length=1.5)\n>>> b\nindexes: []\nfeatures: []\nevents:\n(2 events):\ntimestamps: [1. 3.]\n...\n\n```\n\nReturns:\nEventSet without features with the filtered events.", "source": "github-repos"}
{"code": "def _pool(inputs, initial_value, reduce_fn, pool_size, strides=None, padding='valid'):\n    if padding not in ('same', 'valid'):\n        raise ValueError(f\"Invalid padding '{padding}', must be 'same' or 'valid'.\")\n    padding = padding.upper()\n    return np.array(lax.reduce_window(inputs, initial_value, reduce_fn, pool_size, strides, padding))", "docstring": "Helper function to define pooling functions.\n\nArgs:\ninputs: input data of shape `N+2`.\ninitial_value: the initial value for the reduction.\nreduce_fn: a reduce function of the form `(T, T) -> T`.\npool_size: a sequence of `N` integers, representing the window size to\nreduce over.\nstrides: a sequence of `N` integers, representing the inter-window\nstrides (default: `(1, ..., 1)`).\npadding: either the string `same` or `valid`.\n\nReturns:\nThe output of the reduction for each window slice.", "source": "github-repos"}
{"code": "def parse_gene_panel(path, institute='cust000', panel_id='test', panel_type='clinical', date=datetime.now(), \n                     version=1.0, display_name=None, genes = None):\n    \n    LOG.info(\"Parsing gene panel %s\", panel_id)\n    gene_panel = {}\n\n    gene_panel['path'] = path\n    gene_panel['type'] = panel_type\n    gene_panel['date'] = date\n    gene_panel['panel_id'] = panel_id\n    gene_panel['institute'] = institute\n    version = version or 1.0\n    gene_panel['version'] = float(version)\n    gene_panel['display_name'] = display_name or panel_id\n\n    if not path:\n        panel_handle = genes\n    else:\n        panel_handle = get_file_handle(gene_panel['path'])\n    gene_panel['genes'] = parse_genes(gene_lines=panel_handle)\n\n    return gene_panel", "docstring": "Parse the panel info and return a gene panel\n\nArgs:\npath(str): Path to panel file\ninstitute(str): Name of institute that owns the panel\npanel_id(str): Panel id\ndate(datetime.datetime): Date of creation\nversion(float)\nfull_name(str): Option to have a long name\n\nReturns:\ngene_panel(dict)", "source": "juraj-google-style"}
{"code": "def patch(self, payload, append_to_arrays=True):\n    if (not isinstance(payload, dict)):\n        raise ValueError(\"The 'payload' parameter must be provided a dictionary object.\")\n    payload = self.__class__.set_id_in_fkeys(payload)\n    if append_to_arrays:\n        for key in payload:\n            val = payload[key]\n            if (type(val) == list):\n                val.extend(getattr(self, key))\n                payload[key] = list(set(val))\n    payload = self.check_boolean_fields(payload)\n    payload = self.__class__.add_model_name_to_payload(payload)\n    self.debug_logger.debug('PATCHING payload {}'.format(json.dumps(payload, indent=4)))\n    res = requests.patch(url=self.record_url, json=payload, headers=HEADERS, verify=False)\n    self.write_response_html_to_file(res, 'bob.html')\n    res.raise_for_status()\n    json_res = res.json()\n    self.debug_logger.debug('Success')\n    self.attrs = json_res\n    return json_res", "docstring": "Patches current record and udpates the current instance's 'attrs'\nattribute to reflect the new changes.\n\nArgs:\npayload - hash. This will be JSON-formatted prior to sending the request.\n\nReturns:\n`dict`. The JSON formatted response.\n\nRaises:\n`requests.exceptions.HTTPError`: The status code is not ok.", "source": "codesearchnet"}
{"code": "def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        tstream = BytearrayStream()\n\n        self.application_namespace.write(tstream, kmip_version=kmip_version)\n        self.application_data.write(tstream, kmip_version=kmip_version)\n\n        self.length = tstream.length()\n        super(ApplicationSpecificInformation, self).write(\n            ostream,\n            kmip_version=kmip_version\n        )\n        ostream.write(tstream.buffer)", "docstring": "Write the data encoding the ApplicationSpecificInformation object to a\nstream.\n\nArgs:\nostream (Stream): A data stream in which to encode object data,\nsupporting a write method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def add_annotation(self, state_or_vector, text, **kwargs):\n    if (isinstance(state_or_vector, (list, np.ndarray, tuple)) and (len(state_or_vector) == 3)):\n        vec = state_or_vector\n    else:\n        raise Exception(('Position needs to be specified by a qubit ' + 'state or a 3D vector.'))\n    self.annotations.append({'position': vec, 'text': text, 'opts': kwargs})", "docstring": "Add a text or LaTeX annotation to Bloch sphere,\nparametrized by a qubit state or a vector.\n\nArgs:\nstate_or_vector (array_like):\nPosition for the annotation.\nQobj of a qubit or a vector of 3 elements.\ntext (str):\nAnnotation text.\nYou can use LaTeX, but remember to use raw string\ne.g. r\"$\\\\langle x \\\\rangle$\"\nor escape backslashes\ne.g. \"$\\\\\\\\langle x \\\\\\\\rangle$\".\n**kwargs:\nOptions as for mplot3d.axes3d.text, including:\nfontsize, color, horizontalalignment, verticalalignment.\nRaises:\nException: If input not array_like or tuple.", "source": "codesearchnet"}
{"code": "def put(self, resource_json: Dict[str, Any], parent_bundle: Optional[Dict[str, Any]]=None) -> None:\n    if parent_bundle is None:\n        self.resources_by_uri[resource_json['url']] = resource_json\n    else:\n        self.resources_by_uri[resource_json['url']] = parent_bundle", "docstring": "Puts the given resource into this collection.\n\nAdds the resource represented by `resource_json` found inside\n`parent_bundle` into this collection for subsequent lookup via the Get\nmethod. `parent_bundle` may be None if `resource_json` is not located inside\na bundle.\n\nArgs:\nresource_json: The JSON object representing the resource.\nparent_bundle: The bundle `resource_json` is located inside, if any.", "source": "github-repos"}
{"code": "def extract_images(self, f):\n        \n        print('Extracting', f.name)\n        with gzip.GzipFile(fileobj=f) as bytestream:\n            magic = self._read32(bytestream)\n            if magic != 2051:\n                raise ValueError('Invalid magic number %d in MNIST image file: %s' %\n                                 (magic, f.name))\n            num_images = self._read32(bytestream)\n            rows = self._read32(bytestream)\n            cols = self._read32(bytestream)\n            buf = bytestream.read(rows * cols * num_images)\n            data = np.frombuffer(buf, dtype=np.uint8)\n            data = data.reshape(num_images, rows, cols, 1)\n            return data", "docstring": "Extract the images into a 4D uint8 numpy array [index, y, x, depth].\nArgs:\nf: A file object that can be passed into a gzip reader.\nReturns:\ndata: A 4D unit8 numpy array [index, y, x, depth].\nRaises:\nValueError: If the bytestream does not start with 2051.", "source": "juraj-google-style"}
{"code": "def get_stream_action_type(stream_arn):\n    stream_type_map = {'kinesis': awacs.kinesis.Action, 'dynamodb': awacs.dynamodb.Action}\n    stream_type = stream_arn.split(':')[2]\n    try:\n        return stream_type_map[stream_type]\n    except KeyError:\n        raise ValueError((\"Invalid stream type '%s' in arn '%s'\" % (stream_type, stream_arn)))", "docstring": "Returns the awacs Action for a stream type given an arn\n\nArgs:\nstream_arn (str): The Arn of the stream.\n\nReturns:\n:class:`awacs.aws.Action`: The appropriate stream type awacs Action\nclass\n\nRaises:\nValueError: If the stream type doesn't match kinesis or dynamodb.", "source": "codesearchnet"}
{"code": "def __fill_buffer(self, size=0):\n    read_size = min(max(size, self.__buffer_size), MAX_BLOB_FETCH_SIZE)\n    self.__buffer = fetch_data(self.__blob_key, self.__position, ((self.__position + read_size) - 1))\n    self.__buffer_position = 0\n    self.__eof = (len(self.__buffer) < read_size)", "docstring": "Fills the internal buffer.\n\nArgs:\nsize: Number of bytes to read. Will be clamped to\n[self.__buffer_size, MAX_BLOB_FETCH_SIZE].", "source": "codesearchnet"}
{"code": "def _peek(self, chars=1):\n        \n        line = self._socket.recv(chars, socket.MSG_PEEK)\n        logger.debug('Server sent (peek): ' + line.rstrip())\n        return line", "docstring": "Peek at the data in the server response.\n\nPeeking should only be done when the response can be predicted.\nMake sure that the socket will not block by requesting too\nmuch data from it while peeking.\n\nArgs:\nchars -- the number of characters to peek.", "source": "juraj-google-style"}
{"code": "def subgroup_tile(cls, tile_assignment, subgroup_modes):\n    if not isinstance(tile_assignment, _np.ndarray):\n        raise TypeError('SubgroupTile assignment must be of type np.ndarray')\n    if not isinstance(subgroup_modes, list):\n        raise TypeError('subgroup_modes in subgroup manual must be of type list')\n    if len(tile_assignment.shape) < len(subgroup_modes):\n        raise TypeError('SubgroupTile assignment must have rank larger than length of subgroup_modes')\n    for sharding_type in subgroup_modes:\n        if sharding_type not in [xla_data_pb2.OpSharding.REPLICATED, xla_data_pb2.OpSharding.MANUAL]:\n            raise TypeError('Each sharding_type in subgroup_modes in subgroup manual must be of type xla_data_pb2.OpSharding.REPLICATED or xla_data_pb2.OpSharding.MANUAL')\n    dims = list(tile_assignment.shape)\n    flattened_devices = tile_assignment.reshape(-1, order='C')\n    return Sharding(proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.OTHER, tile_assignment_dimensions=dims, tile_assignment_devices=list(flattened_devices), last_tile_dims=list(subgroup_modes)))", "docstring": "Returns a subgroup manual sharding attribute.\n\nThis is similar to tile(), but tile_assignment has one or more dimension\nthan the tensor, and subgroup_modes define the sharding types in the last\ndimensions of tile_assignment.\n\nArgs:\ntile_assignment: An np.ndarray describing the topology of the tiling and\nwhich device will compute which part of the topology.\nsubgroup_modes: sharding types for the dimension more than the tensor\nshape rank.\n\nRaises:\nTypeError: tile_assignment was not of np.array type or subgroup_modes\nhas unsupported sharding type.", "source": "github-repos"}
{"code": "def all_tokens(self, delimiter=' '):\n    tokens = set()\n    for label in self:\n        tokens = tokens.union(set(label.tokenized(delimiter=delimiter)))\n    return tokens", "docstring": "Return a list of all tokens occurring in the label-list.\n\nArgs:\ndelimiter (str): The delimiter used to split labels into tokens\n(see :meth:`audiomate.annotations.Label.tokenized`).\n\nReturns:\n:class:`set`: A set of distinct tokens.", "source": "codesearchnet"}
{"code": "def initialize(self):\n    return self._initializer", "docstring": "Initialize underlying iterators.\n\nReturns:\nA list of any initializer ops that should be run.", "source": "github-repos"}
{"code": "def residual_block_v1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):\n    if backend.image_data_format() == 'channels_last':\n        bn_axis = 3\n    else:\n        bn_axis = 1\n    if conv_shortcut:\n        shortcut = layers.Conv2D(4 * filters, 1, strides=stride, name=name + '_0_conv')(x)\n        shortcut = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_0_bn')(shortcut)\n    else:\n        shortcut = x\n    x = layers.Conv2D(filters, 1, strides=stride, name=name + '_1_conv')(x)\n    x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_1_bn')(x)\n    x = layers.Activation('relu', name=name + '_1_relu')(x)\n    x = layers.Conv2D(filters, kernel_size, padding='SAME', name=name + '_2_conv')(x)\n    x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_2_bn')(x)\n    x = layers.Activation('relu', name=name + '_2_relu')(x)\n    x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)\n    x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_3_bn')(x)\n    x = layers.Add(name=name + '_add')([shortcut, x])\n    x = layers.Activation('relu', name=name + '_out')(x)\n    return x", "docstring": "A residual block for ResNet*_v1.\n\nArgs:\nx: Input tensor.\nfilters: No of filters in the bottleneck layer.\nkernel_size: Kernel size of the bottleneck layer. Defaults to `3`.\nstride: Stride of the first layer. Defaults to `1`.\nconv_shortcut: Use convolution shortcut if `True`, otherwise\nuse identity shortcut. Defaults to `True`\nname(optional): Name of the block\n\nReturns:\nOutput tensor for the residual block.", "source": "github-repos"}
{"code": "def reorder_resources(self, resource_ids, hxl_update=True):\n    dataset_id = self.data.get('id')\n    if (not dataset_id):\n        raise HDXError('Dataset has no id! It must be read, created or updated first.')\n    data = {'id': dataset_id, 'order': resource_ids}\n    self._write_to_hdx('reorder', data, 'package_id')\n    if hxl_update:\n        self.hxl_update()", "docstring": "Reorder resources in dataset according to provided list.\nIf only some resource ids are supplied then these are\nassumed to be first and the other resources will stay in\ntheir original order.\n\nArgs:\nresource_ids (List[str]): List of resource ids\nhxl_update (bool): Whether to call package_hxl_update. Defaults to True.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def merge_and_fit(self, segment):\n        \n        self.points = sort_segment_points(self.points, segment.points)\n        return self", "docstring": "Merges another segment with this one, ordering the points based on a\ndistance heuristic\n\nArgs:\nsegment (:obj:`Segment`): Segment to merge with\nReturns:\n:obj:`Segment`: self", "source": "juraj-google-style"}
{"code": "def remove_item(self, item):\n        \n        for idx, _item in enumerate(self.items):\n            if item == _item:\n                del self.items[idx]\n                return True\n        return False", "docstring": "Remove the specified item from the menu.\n\nArgs:\nitem (MenuItem): the item to be removed.\n\nReturns:\nbool: True if the item was removed; False otherwise.", "source": "juraj-google-style"}
{"code": "def __expand_meta_datas(meta_datas, meta_datas_expanded):\n    if isinstance(meta_datas, dict):\n        meta_datas_expanded.append(meta_datas)\n    elif isinstance(meta_datas, list):\n        for meta_data in meta_datas:\n            __expand_meta_datas(meta_data, meta_datas_expanded)", "docstring": "expand meta_datas to one level\n\nArgs:\nmeta_datas (dict/list): maybe in nested format\n\nReturns:\nlist: expanded list in one level\n\nExamples:\n>>> meta_datas = [\n[\ndict1,\ndict2\n],\ndict3\n]\n>>> meta_datas_expanded = []\n>>> __expand_meta_datas(meta_datas, meta_datas_expanded)\n>>> print(meta_datas_expanded)\n[dict1, dict2, dict3]", "source": "codesearchnet"}
{"code": "def UpdateChainAndProcess(self, parser_mediator, registry_key, **kwargs):\n    parser_mediator.AppendToParserChain(self)\n    try:\n        self.Process(parser_mediator, registry_key, **kwargs)\n    finally:\n        parser_mediator.PopFromParserChain()", "docstring": "Updates the parser chain and processes a Windows Registry key or value.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\n\nRaises:\nValueError: If the Windows Registry key is not set.", "source": "codesearchnet"}
{"code": "def draw(self, filename, color=True):\n        \n        verify_dependencies(['pgv'])\n        if not hasattr(self, '_results'):\n            raise RuntimeError(\"Graph cannot be drawn before it is executed. \"\n                               \"Try calling run() first.\")\n\n        g = pgv.AGraph(directed=True)\n        g.node_attr['colorscheme'] = 'set312'\n\n        for elem in self._results:\n            if not hasattr(elem, 'history'):\n                continue\n            log = elem.history\n\n            while log:\n                \n                source_from = log.parent[6] if log.parent else ''\n                s_node = hash((source_from, log[2]))\n                s_color = stim_list.index(log[2])\n                s_color = s_color % 12 + 1\n\n                t_node = hash((log[6], log[7]))\n                t_style = 'filled,' if color else ''\n                t_style += 'dotted' if log.implicit else ''\n                if log[6].endswith('Extractor'):\n                    t_color = '\n                elif log[6].endswith('Filter'):\n                    t_color = '\n                else:\n                    t_color = '\n\n                r_node = hash((log[6], log[5]))\n                r_color = stim_list.index(log[5])\n                r_color = r_color % 12 + 1\n\n                \n                if color:\n                    g.add_node(s_node, label=log[2], shape='ellipse',\n                               style='filled', fillcolor=s_color)\n                    g.add_node(t_node, label=log[6], shape='box',\n                               style=t_style, fillcolor=t_color)\n                    g.add_node(r_node, label=log[5], shape='ellipse',\n                               style='filled', fillcolor=r_color)\n                else:\n                    g.add_node(s_node, label=log[2], shape='ellipse')\n                    g.add_node(t_node, label=log[6], shape='box',\n                               style=t_style)\n                    g.add_node(r_node, label=log[5], shape='ellipse')\n\n                \n                g.add_edge(s_node, t_node, style=t_style)\n                g.add_edge(t_node, r_node, style=t_style)\n                log = log.parent\n\n        g.draw(filename, prog='dot')", "docstring": "Render a plot of the graph via pygraphviz.\n\nArgs:\nfilename (str): Path to save the generated image to.\ncolor (bool): If True, will color graph nodes based on their type,\notherwise will draw a black-and-white graph.", "source": "juraj-google-style"}
{"code": "def update_script(self, script_body):\n        \n        uri = \"{}/script\".format(self.data['uri'])\n\n        return self._helper.update(script_body, uri=uri)", "docstring": "Updates the configuration script of the enclosure-group with the specified URI.\n\nArgs:\nid_or_uri: Resource id or resource uri.\nscript_body:  Configuration script.\n\nReturns:\ndict: Updated enclosure group.", "source": "juraj-google-style"}
{"code": "def map(self, map_fn: Callable[..., _Tout], *trees: Tree[_Tin], is_leaf: Optional[LeafFn]=None) -> Tree[_Tout]:\n    return self.backend.map(map_fn, *trees, is_leaf=is_leaf)", "docstring": "Same as `tree.map_structure`.\n\nArgs:\nmap_fn: Worker function\n*trees: Nested input to pass to the `map_fn`\nis_leaf: Don't recurse into leaf if `is_leaf(node)` is `True`\n\nReturns:\nThe nested structure after `map_fn` has been applied.", "source": "github-repos"}
{"code": "def fit_effective_mass(distances, energies, parabolic=True):\n    if parabolic:\n        fit = np.polyfit(distances, energies, 2)\n        c = (2 * fit[0])\n    else:\n\n        def f(x, alpha, d):\n            top = (np.sqrt(((((4 * alpha) * d) * (x ** 2)) + 1)) - 1)\n            bot = (2 * alpha)\n            return (top / bot)\n        bounds = ((1e-08, (- np.inf)), (np.inf, np.inf))\n        (popt, _) = curve_fit(f, distances, energies, p0=[1.0, 1.0], bounds=bounds)\n        c = (2 * popt[1])\n    eff_mass = (((angstrom_to_bohr ** 2) / eV_to_hartree) / c)\n    return eff_mass", "docstring": "Fit the effective masses using either a parabolic or nonparabolic fit.\n\nArgs:\ndistances (:obj:`numpy.ndarray`): The x-distances between k-points in\nreciprocal Angstroms, normalised to the band extrema.\nenergies (:obj:`numpy.ndarray`): The band eigenvalues normalised to the\neigenvalue of the band extrema.\nparabolic (:obj:`bool`, optional): Use a parabolic fit of the band\nedges. If ``False`` then nonparabolic fitting will be attempted.\nDefaults to ``True``.\n\nReturns:\nfloat: The effective mass in units of electron rest mass, :math:`m_0`.", "source": "codesearchnet"}
{"code": "def _GetAttributeContainerByIndex(self, container_type, index):\n    sequence_number = (index + 1)\n    query = 'SELECT _data FROM {0:s} WHERE rowid = {1:d}'.format(container_type, sequence_number)\n    try:\n        self._cursor.execute(query)\n    except sqlite3.OperationalError as exception:\n        raise IOError('Unable to query storage file with error: {0!s}'.format(exception))\n    row = self._cursor.fetchone()\n    if row:\n        identifier = identifiers.SQLTableIdentifier(container_type, sequence_number)\n        if (self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB):\n            serialized_data = zlib.decompress(row[0])\n        else:\n            serialized_data = row[0]\n        if self._storage_profiler:\n            self._storage_profiler.Sample('read', container_type, len(serialized_data), len(row[0]))\n        attribute_container = self._DeserializeAttributeContainer(container_type, serialized_data)\n        attribute_container.SetIdentifier(identifier)\n        return attribute_container\n    count = self._CountStoredAttributeContainers(container_type)\n    index -= count\n    serialized_data = self._GetSerializedAttributeContainerByIndex(container_type, index)\n    attribute_container = self._DeserializeAttributeContainer(container_type, serialized_data)\n    if attribute_container:\n        identifier = identifiers.SQLTableIdentifier(container_type, sequence_number)\n        attribute_container.SetIdentifier(identifier)\n    return attribute_container", "docstring": "Retrieves a specific attribute container.\n\nArgs:\ncontainer_type (str): attribute container type.\nindex (int): attribute container index.\n\nReturns:\nAttributeContainer: attribute container or None if not available.\n\nRaises:\nIOError: when there is an error querying the storage file.\nOSError: when there is an error querying the storage file.", "source": "codesearchnet"}
{"code": "def _runDecodeProtoTests(self, fields, case_sizes, batch_shape, batch, message_type, message_format, sanitize, force_disordered=False):\n    if force_disordered:\n        assert not sanitize\n        extra_fields = test_example_pb2.ExtraFields()\n        extra_fields.string_value = 'IGNORE ME'\n        extra_fields.bool_value = False\n        extra_msg = extra_fields.SerializeToString()\n        batch = [extra_msg + msg for msg in batch]\n    batch = np.array(batch, dtype=object)\n    batch = np.reshape(batch, batch_shape)\n    field_names = [f.name for f in fields]\n    output_types = [f.dtype for f in fields]\n    with self.cached_session() as sess:\n        sizes, vtensor = self._decode_module.decode_proto(batch, message_type=message_type, field_names=field_names, output_types=output_types, message_format=message_format, sanitize=sanitize)\n        vlist = sess.run([sizes] + vtensor)\n        sizes = vlist[0]\n        value_tensors = vlist[1:]\n        self.assertTrue(np.all(np.array(sizes.shape) == batch_shape + [len(field_names)]))\n        self.assertEqual(len(sizes.flat), len(case_sizes))\n        self.assertTrue(np.all(sizes.flat == np.array(case_sizes, dtype=np.int32)))\n        field_dict = dict(zip(field_names, value_tensors))\n        self._compareProtos(batch_shape, sizes, fields, field_dict)", "docstring": "Run decode tests on a batch of messages.\n\nArgs:\nfields: list of test_example_pb2.FieldSpec (types and expected values)\ncase_sizes: expected sizes array\nbatch_shape: the shape of the input tensor of serialized messages\nbatch: list of serialized messages\nmessage_type: descriptor name for messages\nmessage_format: format of messages, 'text' or 'binary'\nsanitize: whether to sanitize binary protobuf inputs\nforce_disordered: whether to force fields encoded out of order.", "source": "github-repos"}
{"code": "def ReadManyFromPath(filepath):\n  \n  with io.open(filepath, mode=\"r\", encoding=\"utf-8\") as filedesc:\n    return ReadManyFromFile(filedesc)", "docstring": "Reads a Python object stored in a specified YAML file.\n\nArgs:\nfilepath: A filepath to the YAML file.\n\nReturns:\nA Python data structure corresponding to the YAML in the given file.", "source": "juraj-google-style"}
{"code": "def console_print(con: tcod.console.Console, x: int, y: int, fmt: str) -> None:\n    \n    lib.TCOD_console_printf(_console(con), x, y, _fmt(fmt))", "docstring": "Print a color formatted string on a console.\n\nArgs:\ncon (Console): Any Console instance.\nx (int): Character x position from the left.\ny (int): Character y position from the top.\nfmt (AnyStr): A unicode or bytes string optionaly using color codes.\n\n.. deprecated:: 8.5\nUse :any:`Console.print_` instead.", "source": "juraj-google-style"}
{"code": "def set_clbit(self, clbit, element):\n        \n        self.clbit_layer[self.cregs.index(clbit)] = element", "docstring": "Sets the clbit to the element\nArgs:\nclbit (cbit): Element of self.cregs.\nelement (DrawElement): Element to set in the clbit", "source": "juraj-google-style"}
{"code": "def auto_shard_dataset(dataset, num_shards, index, num_replicas_in_sync=None):\n    if isinstance(dataset, distribute_types.DistributedDatasetInterface):\n        return dataset.auto_shard(num_shards, index)\n    if dataset.options().experimental_distribute.auto_shard_policy != AutoShardPolicy.OFF:\n        if num_replicas_in_sync is None:\n            num_replicas_in_sync = 1\n        if isinstance(dataset, data_types.DatasetV1):\n            return distribute._AutoShardDatasetV1(dataset, num_shards, index, num_replicas_in_sync)\n        else:\n            return distribute._AutoShardDataset(dataset, num_shards, index, num_replicas_in_sync)\n    else:\n        return dataset", "docstring": "Shard the input pipeline by sharding the underlying list of files.\n\nArgs:\ndataset: A `tf.data.Dataset` instance, typically the result of a bunch of\ndataset transformations.\nnum_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of\nshards operating in parallel. Same usage as in `tf.data.Dataset.shard`.\nindex: A `tf.int64` scalar `tf.Tensor`, representing the worker index.\nSame usage as in `tf.data.Dataset.shard`.\nnum_replicas_in_sync: An integer representing the total number of replicas\nacross all workers. This is used in the rewrite when sharding by data.\n\nReturns:\nA modified `Dataset` obtained by updating the pipeline sharded by the\nfiles. The input dataset will be returned if we cannot automatically\ndetermine a good way to shard the input dataset.", "source": "github-repos"}
{"code": "def _maybe_download_corpora(tmp_dir, dataset_split):\n  \n  cnn_filename = \"cnn_stories.tgz\"\n  cnn_finalpath = os.path.join(tmp_dir, \"cnn/stories/\")\n  dailymail_filename = \"dailymail_stories.tgz\"\n  dailymail_finalpath = os.path.join(tmp_dir, \"dailymail/stories/\")\n  if not tf.gfile.Exists(cnn_finalpath):\n    cnn_file = generator_utils.maybe_download_from_drive(\n        tmp_dir, cnn_filename, _CNN_STORIES_DRIVE_URL)\n    with tarfile.open(cnn_file, \"r:gz\") as cnn_tar:\n      cnn_tar.extractall(tmp_dir)\n  if not tf.gfile.Exists(dailymail_finalpath):\n    dailymail_file = generator_utils.maybe_download_from_drive(\n        tmp_dir, dailymail_filename, _DAILYMAIL_STORIES_DRIVE_URL)\n    with tarfile.open(dailymail_file, \"r:gz\") as dailymail_tar:\n      dailymail_tar.extractall(tmp_dir)\n\n  cnn_files = tf.gfile.Glob(cnn_finalpath + \"*\")\n  dailymail_files = tf.gfile.Glob(dailymail_finalpath + \"*\")\n  all_files = cnn_files + dailymail_files\n\n  if dataset_split == problem.DatasetSplit.TRAIN:\n    urls_path = generator_utils.maybe_download(tmp_dir, \"all_train.txt\",\n                                               _TRAIN_URLS)\n  elif dataset_split == problem.DatasetSplit.EVAL:\n    urls_path = generator_utils.maybe_download(tmp_dir, \"all_val.txt\",\n                                               _DEV_URLS)\n  else:\n    urls_path = generator_utils.maybe_download(tmp_dir, \"all_test.txt\",\n                                               _TEST_URLS)\n\n  return all_files, urls_path", "docstring": "Download corpora if necessary and unzip them.\n\nArgs:\ntmp_dir: directory containing dataset.\ndataset_split: whether we're in train/dev/test mode.\n\nReturns:\nList of all files generated and path to file containing\ntrain/dev/test split info.", "source": "juraj-google-style"}
{"code": "def AddAnalysisReport(self, analysis_report):\n    \n    self._RaiseIfNotWritable()\n\n    analysis_report = self._PrepareAttributeContainer(analysis_report)\n\n    self.analysis_reports.append(analysis_report)", "docstring": "Adds an analysis report.\n\nArgs:\nanalysis_report (AnalysisReport): analysis report.\n\nRaises:\nIOError: when the storage writer is closed.\nOSError: when the storage writer is closed.", "source": "juraj-google-style"}
{"code": "def CompileFilter(self, filter_expression):\n    filter_parser = pfilter.BaseParser(filter_expression).Parse()\n    matcher = filter_parser.Compile(pfilter.PlasoAttributeFilterImplementation)\n    self._filter_expression = filter_expression\n    self._matcher = matcher", "docstring": "Compiles the filter expression.\n\nThe filter expression contains an object filter expression.\n\nArgs:\nfilter_expression (str): filter expression.\n\nRaises:\nParseError: if the filter expression cannot be parsed.", "source": "codesearchnet"}
{"code": "def history(self, image):\n    res = self._get(self._url('/images/{0}/history', image))\n    return self._result(res, True)", "docstring": "Show the history of an image.\n\nArgs:\nimage (str): The image to show history for\n\nReturns:\n(str): The history of the image\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def run(self, data, max_epochs=1):\n        \n\n        self.state = State(dataloader=data, epoch=0, max_epochs=max_epochs, metrics={})\n\n        try:\n            self._logger.info(\"Engine run starting with max_epochs={}.\".format(max_epochs))\n            start_time = time.time()\n            self._fire_event(Events.STARTED)\n            while self.state.epoch < max_epochs and not self.should_terminate:\n                self.state.epoch += 1\n                self._fire_event(Events.EPOCH_STARTED)\n                hours, mins, secs = self._run_once_on_dataset()\n                self._logger.info(\"Epoch[%s] Complete. Time taken: %02d:%02d:%02d\", self.state.epoch, hours, mins, secs)\n                if self.should_terminate:\n                    break\n                self._fire_event(Events.EPOCH_COMPLETED)\n\n            self._fire_event(Events.COMPLETED)\n            time_taken = time.time() - start_time\n            hours, mins, secs = _to_hours_mins_secs(time_taken)\n            self._logger.info(\"Engine run complete. Time taken %02d:%02d:%02d\" % (hours, mins, secs))\n\n        except BaseException as e:\n            self._logger.error(\"Engine run is terminating due to exception: %s.\", str(e))\n            self._handle_exception(e)\n\n        return self.state", "docstring": "Runs the process_function over the passed data.\n\nArgs:\ndata (Iterable): Collection of batches allowing repeated iteration (e.g., list or `DataLoader`).\nmax_epochs (int, optional): max epochs to run for (default: 1).\n\nReturns:\nState: output state.", "source": "juraj-google-style"}
{"code": "def union_of_bboxes(height, width, bboxes, erosion_rate=0.0, to_int=False):\n    \n    x1, y1 = width, height\n    x2, y2 = 0, 0\n    for b in bboxes:\n        w, h = b[2] - b[0], b[3] - b[1]\n        lim_x1, lim_y1 = b[0] + erosion_rate * w, b[1] + erosion_rate * h\n        lim_x2, lim_y2 = b[2] - erosion_rate * w, b[3] - erosion_rate * h\n        x1, y1 = np.min([x1, lim_x1]), np.min([y1, lim_y1])\n        x2, y2 = np.max([x2, lim_x2]), np.max([y2, lim_y2])\n    return x1, y1, x2, y2", "docstring": "Calculate union of bounding boxes.\n\nArgs:\nheight (float): Height of image or space.\nwidth (float): Width of image or space.\nbboxes (list): List like bounding boxes. Format is `[x_min, y_min, x_max, y_max]`.\nerosion_rate (float): How much each bounding box can be shrinked, useful for erosive cropping.\nSet this in range [0, 1]. 0 will not be erosive at all, 1.0 can make any bbox to lose its volume.", "source": "juraj-google-style"}
{"code": "def register_proto_function(collection_name, proto_type=None, to_proto=None, from_proto=None) -> None:\n    if to_proto and (not callable(to_proto)):\n        raise TypeError('to_proto must be callable.')\n    if from_proto and (not callable(from_proto)):\n        raise TypeError('from_proto must be callable.')\n    _proto_function_registry.register((proto_type, to_proto, from_proto), collection_name)", "docstring": "Registers `to_proto` and `from_proto` functions for collection_name.\n\n`to_proto` function converts a Python object to the corresponding protocol\nbuffer, and returns the protocol buffer.\n\n`from_proto` function converts protocol buffer into a Python object, and\nreturns the object..\n\nArgs:\ncollection_name: Name of the collection.\nproto_type: Protobuf type, such as `saver_pb2.SaverDef`,\n`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..\nto_proto: Function that implements Python object to protobuf conversion.\nfrom_proto: Function that implements protobuf to Python object conversion.", "source": "github-repos"}
{"code": "def _check_root_tag(self, root):\n    supported = self.supported_tags()\n    if (root.tag in supported):\n        return\n    error = 'Document root element ({0}) not one of ({1})'\n    raise UnsupportedRootElementError(message=error.format(root.tag, supported), expected=supported, found=root.tag)", "docstring": "Check that the XML element tree has a supported root element.\n\nArgs:\nroot (etree.Element)\n\nRaises:\nUnsupportedRootElementError", "source": "codesearchnet"}
{"code": "def get_client(self, name):\n        \n        mech = self.get(name)\n        return mech if isinstance(mech, ClientMechanism) else None", "docstring": "Like :meth:`.get`, but only mechanisms inheriting\n:class:`ClientMechanism` will be returned.\n\nArgs:\nname: The SASL mechanism name.\n\nReturns:\nThe mechanism object or ``None``", "source": "juraj-google-style"}
{"code": "def check_mailfy(self, query, kwargs={}):\n        \n        data = self.launchQueryForMode(query=query, mode=\"mailfy\")\n        if self._somethingFound(data, mode=\"mailfy\"):\n            return data\n        return None", "docstring": "Verifying a mailfy query in this platform.\n\nThis might be redefined in any class inheriting from Platform. The only\ncondition is that any of this should return a dictionary as defined.\n\nArgs:\n-----\nquery: The element to be searched.\nkwargs: Dictionary with extra parameters. Just in case.\n\nReturn:\n-------\nReturns the collected data if exists or None if not.", "source": "juraj-google-style"}
{"code": "def _run(broker, graph=None, root=None, context=None, inventory=None):\n    if (not root):\n        context = (context or HostContext)\n        broker[context] = context()\n        return dr.run(graph, broker=broker)\n    if os.path.isdir(root):\n        return process_dir(broker, root, graph, context, inventory=inventory)\n    else:\n        with extract(root) as ex:\n            return process_dir(broker, ex.tmp_dir, graph, context, inventory=inventory)", "docstring": "run is a general interface that is meant for stand alone scripts to use\nwhen executing insights components.\n\nArgs:\nroot (str): None will causes a host collection in which command and\nfile specs are run. A directory or archive path will cause\ncollection from the directory or archive, and only file type specs\nor those that depend on `insights.core.context.HostArchiveContext`\nwill execute.\ncomponent (function or class): The component to execute. Will only execute\nthe component and its dependency graph. If None, all components with\nmet dependencies will execute.\n\nReturns:\nbroker: object containing the result of the evaluation.", "source": "codesearchnet"}
{"code": "def get_params_and_defaults(param_list, db):\n    \n    return [[p, d] for p, d in db.get_all_values_of_all_params().items()]", "docstring": "Deduce [parameter, default] pairs from simulations available in the db.\n\nArgs:\nparam_list (list): List of parameters to query for.\ndb (DatabaseManager): Database where to query for defaults.", "source": "juraj-google-style"}
{"code": "def _dedup_strings(device_strs):\n    new_device_strs = []\n    for device_str, vals in itertools.groupby(device_strs):\n        num = len(list(vals))\n        if num == 1:\n            new_device_strs.append(device_str)\n        else:\n            new_device_strs.append('%s (x%d)' % (device_str, num))\n    return new_device_strs", "docstring": "Groups together consecutive identical strings.\n\nFor example, given:\n['GPU 1', 'GPU 2', 'GPU 2', 'GPU 3', 'GPU 3', 'GPU 3']\nThis function returns:\n['GPU 1', 'GPU 2 (x2)', 'GPU 3 (x3)']\n\nArgs:\ndevice_strs: A list of strings, each representing a device.\n\nReturns:\nA copy of the input, but identical consecutive strings are merged into a\nsingle string.", "source": "github-repos"}
{"code": "def add_skip_connection(self, u, v, connection_type):\n        \n        if connection_type not in [self.CONCAT_CONNECT, self.ADD_CONNECT]:\n            raise ValueError(\n                \"connection_type should be NetworkDescriptor.CONCAT_CONNECT \"\n                \"or NetworkDescriptor.ADD_CONNECT.\"\n            )\n        self.skip_connections.append((u, v, connection_type))", "docstring": "Add a skip-connection to the descriptor.\nArgs:\nu: Number of convolutional layers before the starting point.\nv: Number of convolutional layers before the ending point.\nconnection_type: Must be either CONCAT_CONNECT or ADD_CONNECT.", "source": "juraj-google-style"}
{"code": "def _send(self, line):\n        \n        if not line.endswith('\\r\\n'):\n            if line.endswith('\\n'):\n                logger.debug('Fixing bare LF before sending data to socket')\n                line = line[0:-1] + '\\r\\n'\n            else:\n                logger.debug(\n                    'Fixing missing CRLF before sending data to socket')\n                line = line + '\\r\\n'\n        logger.debug('Client sent: ' + line.rstrip())\n        self._socket.send(line)", "docstring": "Write a line of data to the server.\n\nArgs:\nline -- A single line of data to write to the socket.", "source": "juraj-google-style"}
{"code": "def get_paths(self, key):\n        \n        final_paths = []\n\n        if key in self.__cli:\n            paths = self.__cli[key] or []\n            from_conf = False\n        else:\n            paths = self.__config.get(key) or []\n            from_conf = True\n\n        for path in flatten_list(paths):\n            final_path = self.__abspath(path, from_conf)\n            if final_path:\n                final_paths.append(final_path)\n\n        return final_paths", "docstring": "Same as `ConfigParser.get_path` for a list of paths.\n\nArgs:\nkey: str, the key to lookup the paths with\n\nReturns:\nlist: The paths.", "source": "juraj-google-style"}
{"code": "class PerceiverMultimodalPostprocessor(nn.Module):\n\n    def __init__(self, modalities: Mapping[str, PostprocessorType], input_is_dict: bool=False):\n        super().__init__()\n        self.modalities = nn.ModuleDict(modalities)\n        self.input_is_dict = input_is_dict\n\n    def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor]=None, modality_sizes=None) -> Mapping[str, torch.Tensor]:\n        if not self.input_is_dict:\n            if modality_sizes is None:\n                raise ValueError('Modality sizes should be specified if input is not a dictionary.')\n            inputs = restructure(modality_sizes=modality_sizes, inputs=inputs)\n        outputs = {modality: postprocessor(inputs[modality], pos=pos, modality_sizes=None) for modality, postprocessor in self.modalities.items()}\n        return outputs", "docstring": "Multimodal postprocessing for Perceiver. Can be used to combine modality-specific postprocessors into a single\npostprocessor.\n\nArgs:\nmodalities (`Mapping[str, PostprocessorType]`):\nDictionary mapping modality name to postprocessor class for that modality.\ninput_is_dict (`bool`, *optional*, defaults to `False`):\nIf True, input is assumed to be dictionary structured, and outputs keep the same dictionary shape. If\nFalse, input is a tensor which is sliced up during postprocessing by *modality_sizes*.", "source": "github-repos"}
{"code": "def __init__(self, filename, events=None):\n        \n        self.filename = os.path.abspath(filename)\n        self.stat = os.stat(self.filename)\n        self.start_datetime, self.end_datetime = None, None\n\n        self._events = []\n        self._events_by_baseclass = collections.defaultdict(list)\n\n        if events is not None:\n            for ev in events:\n                self.append(ev)", "docstring": "List of ABINIT events.\n\nArgs:\nfilename: Name of the file\nevents: List of Event objects", "source": "juraj-google-style"}
{"code": "def auto_forward(auto=True):\n    \n    global __auto_forward_state\n    prev = __auto_forward_state\n    __auto_forward_state = auto\n    yield\n    __auto_forward_state = prev", "docstring": "Context for dynamic graph execution mode.\n\nArgs:\nauto (bool): Whether forward computation is executed during a\ncomputation graph construction.\n\nReturns: bool", "source": "juraj-google-style"}
{"code": "def get_value(self, tau):\n    tau = np.asarray(tau)\n    (alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag) = self.coefficients\n    k = get_kernel_value(alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag, tau.flatten())\n    return np.asarray(k).reshape(tau.shape)", "docstring": "Compute the value of the term for an array of lags\n\nArgs:\ntau (array[...]): An array of lags where the term should be\nevaluated.\n\nReturns:\nThe value of the term for each ``tau``. This will have the same\nshape as ``tau``.", "source": "codesearchnet"}
{"code": "def _ParseHeader(self, format_type, value_data):\n    data_type_map_name = self._HEADER_DATA_TYPE_MAP_NAMES.get(format_type, None)\n    if (not data_type_map_name):\n        raise errors.ParseError('Unsupported format type: {0:d}'.format(format_type))\n    data_type_map = self._GetDataTypeMap(data_type_map_name)\n    try:\n        header = self._ReadStructureFromByteStream(value_data, 0, data_type_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse header value with error: {0!s}'.format(exception))\n    header_data_size = data_type_map.GetByteSize()\n    if (format_type == self._FORMAT_TYPE_10):\n        header_data_size = header.signature\n    cache_header = AppCompatCacheHeader()\n    cache_header.header_size = header_data_size\n    cache_header.number_of_cached_entries = getattr(header, 'number_of_cached_entries', 0)\n    return cache_header", "docstring": "Parses the header.\n\nArgs:\nformat_type (int): format type.\nvalue_data (bytes): value data.\n\nReturns:\nAppCompatCacheHeader: header.\n\nRaises:\nParseError: if the value data could not be parsed.", "source": "codesearchnet"}
{"code": "def seek(self, offset: int, whence: Literal[0, 1, 2]=0) -> int:", "docstring": "Changes the current position of the file.\n\nArgs:\noffset: Offset from the position to a reference point.\nwhence: The reference point, with 0 meaning the beginning of the file,\n1 meaning the current position, or 2 meaning the end of the file.\n\nReturns:\nThe position from the beginning of the file.", "source": "github-repos"}
{"code": "def clear_signature_defs(tflite_model):\n    model = tflite_model\n    if not isinstance(tflite_model, bytearray):\n        model = bytearray(tflite_model)\n    return signature_def_util.ClearSignatureDefs(model)", "docstring": "Clears SignatureDefs from the Metadata of a TfLite flatbuffer buffer.\n\nArgs:\ntflite_model: TFLite model buffer to remove signature_defs.\n\nReturns:\nbuffer: A TFLite model binary identical to model buffer with\nno SignatureDef metadata.\n\nRaises:\nValueError:\ntflite_model buffer does not contain a valid TFLite model.", "source": "github-repos"}
{"code": "def restore(self, state):\n        \n\n        self.storage.restore(state.get('storage'))\n\n        dump_walker = state.get('dump_walker')\n        if dump_walker is not None:\n            dump_walker = self.storage.restore_walker(dump_walker)\n\n        self.dump_walker = dump_walker\n        self.next_id = state.get('next_id', 1)", "docstring": "Restore the state of this subsystem from a prior call to dump().\n\nCalling restore must be properly sequenced with calls to other\nsubsystems that include stream walkers so that their walkers are\nproperly restored.\n\nArgs:\nstate (dict): The results of a prior call to dump().", "source": "juraj-google-style"}
{"code": "def delete_user_role(self, user, role):\n    self.project_service.set_auth(self._token_project)\n    self.project_service.delete_user_role(user, role)", "docstring": "Remove role from given user.\n\nArgs:\nuser (string): User name.\nrole (string): Role to remove.\n\nRaises:\nrequests.HTTPError on failure.", "source": "codesearchnet"}
{"code": "def _DefaultValueConstructorForField(field):\n    if _IsMapField(field):\n        return _GetInitializeDefaultForMap(field)\n    if (field.label == _FieldDescriptor.LABEL_REPEATED):\n        if (field.has_default_value and (field.default_value != [])):\n            raise ValueError(('Repeated field default value not empty list: %s' % field.default_value))\n        if (field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE):\n            message_type = field.message_type\n\n            def MakeRepeatedMessageDefault(message):\n                return containers.RepeatedCompositeFieldContainer(message._listener_for_children, field.message_type)\n            return MakeRepeatedMessageDefault\n        else:\n            type_checker = type_checkers.GetTypeChecker(field)\n\n            def MakeRepeatedScalarDefault(message):\n                return containers.RepeatedScalarFieldContainer(message._listener_for_children, type_checker)\n            return MakeRepeatedScalarDefault\n    if (field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE):\n        message_type = field.message_type\n\n        def MakeSubMessageDefault(message):\n            result = message_type._concrete_class()\n            result._SetListener((_OneofListener(message, field) if (field.containing_oneof is not None) else message._listener_for_children))\n            return result\n        return MakeSubMessageDefault\n\n    def MakeScalarDefault(message):\n        return field.default_value\n    return MakeScalarDefault", "docstring": "Returns a function which returns a default value for a field.\n\nArgs:\nfield: FieldDescriptor object for this field.\n\nThe returned function has one argument:\nmessage: Message instance containing this field, or a weakref proxy\nof same.\n\nThat function in turn returns a default value for this field.  The default\nvalue may refer back to |message| via a weak reference.", "source": "codesearchnet"}
{"code": "def _DocPackageFromTop(self, packages, showprivate=False, showinh=False):\n        \n        appIndex = ''\n        if not isinstance(packages, list):\n            packages = [packages]\n\n        if os.path.exists('content'):\n            shutil.rmtree('content')\n        os.makedirs('content')\n\n        appIndex += r % ('API Index')\n\n        \n        for i in range(len(packages)):\n            \n            package = packages[i]\n            try:\n                name = package.__displayname__\n            except AttributeError:\n                name = package.__name__\n            \n            path = 'content/%s' % package.__name__\n            if os.path.exists(path):\n                shutil.rmtree(path)\n            os.makedirs(path)\n\n            \n            \n            \n            meta = 'About %s\\n%s\\n' % (name, '='*len('About ' + name))\n            author = getattr(package, \"__author__\", None)\n            license = getattr(package, \"__license__\", None)\n            copyright = getattr(package, \"__copyright__\", None)\n            version = getattr(package, \"__version__\", None)\n            if author: meta += '\\n* Author: %s' % author\n            if license: meta += '\\n* License: %s' % license\n            if copyright: meta += '\\n* Copyright: %s' % copyright\n            if version: meta += '\\n* Version: %s' % version\n            about = '%s/%s' % (path, 'index.rst')\n\n            this_toc = r % (name)\n\n            this_toc += self._MakePackagePages(package, showprivate=showprivate, showinh=showinh)\n            this_toc = this_toc.replace('%s/' % path, '')\n\n            with open(about, 'w') as f:\n                f.write('%s\\n\\n' % meta)\n                if package.__doc__:\n                    f.write(package.__doc__)\n                f.write(this_toc)\n\n            appIndex += '\\n   %s' % about\n\n        \n        return appIndex", "docstring": "Generates all of the documentation for given packages and\nappends new tocrees to the index. All documentation pages will be under the\nset relative path.\n\nArgs:\npackages (list(module)): A package or list of packages that contain submodules to document\nshowprivate (bool): A flag for whether or not to display private members\n\nReturns:\nstr: The new content to append to the index", "source": "juraj-google-style"}
{"code": "def _make_tensor_trace_fun(self, tensor_name, tensor_trace_order):\n\n    def _print_tensor(tensor_name, num_elements, tensor, output_tensor):\n        \n        if self._parameters.is_brief_mode():\n            if tensor_name not in tensor_trace_order.tensorname_to_cache_idx:\n                raise ValueError('Tensor %s with name %s is not in the tensorname_to_cache_idx' % (tensor, tensor_name))\n            msg = '%d' % tensor_trace_order.tensorname_to_cache_idx[tensor_name]\n        else:\n            msg = '\"%s\"' % tensor_name\n        if self._parameters.trace_dir:\n            output_path = os.path.join(self._parameters.trace_dir, _TRACE_FILE_NAME + self._get_outfile_suffix())\n            output_stream = _OUTPUT_STREAM_ESCAPE + output_path\n        else:\n            output_stream = sys.stderr\n        return logging_ops.print_v2(msg, array_ops.shape(output_tensor), '@', self._replica_id, '\\n', output_tensor, '\\n', summarize=num_elements, output_stream=output_stream)\n\n    def _show_part_tensor(tensor):\n        \n        return _print_tensor(tensor_name, _TRACE_MODE_PART_TENSOR_SIZE, tensor, tensor)\n\n    def _show_full_tensor(tensor):\n        \n        return _print_tensor(tensor_name, -1, tensor, tensor)\n    if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_PART_TENSOR:\n        return _show_part_tensor\n    if self._parameters.trace_mode in (tensor_tracer_flags.TRACE_MODE_NAN_INF, tensor_tracer_flags.TRACE_MODE_NORM, tensor_tracer_flags.TRACE_MODE_FULL_TENSOR, tensor_tracer_flags.TRACE_MODE_MAX_ABS, tensor_tracer_flags.TRACE_MODE_SUMMARY, tensor_tracer_flags.TRACE_MODE_HISTORY):\n        return _show_full_tensor\n    raise RuntimeError('Full tensor support is not available with trace mode %s' % self._parameters.trace_mode)", "docstring": "Makes the tensor tracing function called by outside compilation.\n\nArgs:\ntensor_name: name of the tensor being traced.\ntensor_trace_order: TensorTraceOrder object holding tensorname to id map.\nReturns:\nA function to be passed as the first argument to outside compilation.\n\nRaises:\nRuntimeError: If the trace mode is invalid.", "source": "github-repos"}
{"code": "def delete_metadata(self, resource, keys):\n    self.metadata_service.set_auth(self._token_metadata)\n    self.metadata_service.delete(resource, keys)", "docstring": "Deletes the given key-value pairs associated with the given resource.\n\nWill attempt to delete all key-value pairs even if some fail.\n\nArgs:\nresource (intern.resource.boss.BossResource)\nkeys (list)\n\nRaises:\nHTTPErrorList on failure.", "source": "codesearchnet"}
{"code": "def returns_collection(return_type: FhirPathDataType) -> bool:\n    return return_type and return_type.returns_collection()", "docstring": "Indicates if return_type will evaluate to a collection.\n\nArgs:\nreturn_type: The data type to describe.\n\nReturns:\nTrue in the following circumstances\n- `return_type` represents an element with cardinality greater than one.\n- `return_type` represents an element with a cardinality less than or\nequal to one, but that element is a child of a collection and will\nevaluate to a collection. For example, the path Patient.name.use will\nreturn a collection, despite 'use' being a scalar, because it is a child\nof the collection, 'name.'\nFalse if `return_type` represents a scalar element whose parents are all\nalso scalars.", "source": "github-repos"}
{"code": "def create(cls, endpoint_name, json_body, original_response):\n        \n\n        if endpoint_name == \"property/value_report\":\n            return ValueReportResponse(endpoint_name, json_body, original_response)\n\n        if endpoint_name == \"property/rental_report\":\n            return RentalReportResponse(endpoint_name, json_body, original_response)\n\n        prefix = endpoint_name.split(\"/\")[0]\n\n        if prefix == \"block\":\n            return BlockResponse(endpoint_name, json_body, original_response)\n\n        if prefix == \"zip\":\n            return ZipCodeResponse(endpoint_name, json_body, original_response)\n\n        if prefix == \"msa\":\n            return MsaResponse(endpoint_name, json_body, original_response)\n\n        return PropertyResponse(endpoint_name, json_body, original_response)", "docstring": "Factory for creating the correct type of Response based on the data.\nArgs:\nendpoint_name (str) - The endpoint of the request, such as \"property/value\"\njson_body - The response body in json format.\noriginal_response (response object) - server response returned from an http request.", "source": "juraj-google-style"}
{"code": "def podcasts(self, *, device_id=None):\n    if (device_id is None):\n        device_id = self.device_id\n    podcast_list = []\n    for chunk in self.podcasts_iter(device_id=device_id, page_size=49995):\n        podcast_list.extend(chunk)\n    return podcast_list", "docstring": "Get a listing of subsribed podcast series.\n\nParamaters:\ndevice_id (str, Optional): A mobile device ID.\nDefault: Use ``device_id`` of the :class:`MobileClient` instance.\n\nReturns:\nlist: Podcast series dict.", "source": "codesearchnet"}
{"code": "def index_file(self, f, overwrite=False):\n    if isinstance(f, six.string_types):\n        f = self.layout.get_file(f)\n    if ((f.path in self.file_index) and (not overwrite)):\n        return\n    if ('suffix' not in f.entities):\n        return\n    md = self._get_metadata(f.path)\n    for (md_key, md_val) in md.items():\n        if (md_key not in self.key_index):\n            self.key_index[md_key] = {}\n        self.key_index[md_key][f.path] = md_val\n        self.file_index[f.path][md_key] = md_val", "docstring": "Index metadata for the specified file.\n\nArgs:\nf (BIDSFile, str): A BIDSFile or path to an indexed file.\noverwrite (bool): If True, forces reindexing of the file even if\nan entry already exists.", "source": "codesearchnet"}
{"code": "def GetEventFormatter(self, event):\n    data_type = getattr(event, 'data_type', None)\n    if (not data_type):\n        return None\n    return formatters_manager.FormattersManager.GetFormatterObject(event.data_type)", "docstring": "Retrieves the event formatter for a specific event type.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nEventFormatter: event formatter or None.", "source": "codesearchnet"}
{"code": "def getCard(self, name):\n        \n        cards = self.projectCards\n\n        for card in cards:\n            if card.name.upper() == name.upper():\n                return card\n\n        return None", "docstring": "Retrieve card object for given card name.\n\nArgs:\nname (str): Name of card to be retrieved.\n\nReturns:\n:class:`.ProjectCard` or None: Project card object. Will return None if the card is not available.", "source": "juraj-google-style"}
{"code": "def add_payload(self, key, val, append=False):\n        \n        if append:\n            self._payload.setdefault(key, []).append(val)\n        else:\n            self._payload[key] = val", "docstring": "Add a key value pair to payload for this request.\n\nArgs:\nkey (str): The payload key.\nval (str): The payload value.\nappend (bool, default:False): Indicate whether the value should be appended or\noverwritten.", "source": "juraj-google-style"}
{"code": "def assert_broadcastable(weights, values):\n    with ops.name_scope(None, 'assert_broadcastable', (weights, values)) as scope:\n        with ops.name_scope(None, 'weights', (weights,)) as weights_scope:\n            weights = ops.convert_to_tensor(weights, name=weights_scope)\n            weights_shape = array_ops.shape(weights, name='shape')\n            weights_rank = array_ops.rank(weights, name='rank')\n        weights_rank_static = tensor_util.constant_value(weights_rank)\n        with ops.name_scope(None, 'values', (values,)) as values_scope:\n            values = ops.convert_to_tensor(values, name=values_scope)\n            values_shape = array_ops.shape(values, name='shape')\n            values_rank = array_ops.rank(values, name='rank')\n        values_rank_static = tensor_util.constant_value(values_rank)\n        if weights_rank_static is not None and values_rank_static is not None:\n            if weights_rank_static == 0:\n                return control_flow_ops.no_op(name='static_scalar_check_success')\n            if weights_rank_static != values_rank_static:\n                raise ValueError(f'{_ASSERT_BROADCASTABLE_ERROR_PREFIX} values.rank={values_rank_static}. weights.rank={weights_rank_static}. values.shape={values.shape}. weights.shape={weights.shape}. Received weights={weights}, values={values}')\n            weights_shape_static = tensor_util.constant_value(weights_shape)\n            values_shape_static = tensor_util.constant_value(values_shape)\n            if weights_shape_static is not None and values_shape_static is not None:\n                ndims = len(values_shape_static)\n                assert ndims == len(weights_shape_static)\n                for i in range(ndims):\n                    if weights_shape_static[i] not in (1, values_shape_static[i]):\n                        raise ValueError(f'{_ASSERT_BROADCASTABLE_ERROR_PREFIX} Mismatch at dim {i}. values.shape={values_shape_static}, weights.shape={weights_shape_static}. Received weights={weights}, values={values}')\n                return control_flow_ops.no_op(name='static_dims_check_success')\n        is_scalar = math_ops.equal(0, weights_rank, name='is_scalar')\n        data = (_ASSERT_BROADCASTABLE_ERROR_PREFIX, 'weights.shape=', weights.name, weights_shape, 'values.shape=', values.name, values_shape, 'is_scalar=', is_scalar)\n        is_valid_shape = cond.cond(is_scalar, lambda: is_scalar, lambda: _has_valid_nonscalar_shape(weights_rank, weights_shape, values_rank, values_shape), name='is_valid_shape')\n        return control_flow_assert.Assert(is_valid_shape, data, name=scope)", "docstring": "Asserts `weights` can be broadcast to `values`.\n\nIn `tf.losses` and `tf.metrics`, we support limited weight broadcasting. We\nlet weights be either scalar, or the same rank as the target values, with each\ndimension either 1, or the same as the corresponding values dimension.\n\nArgs:\nweights: `Tensor` of weights.\nvalues: `Tensor` of values to which weights are applied.\n\nReturns:\n`Operation` raising `InvalidArgumentError` if `weights` has incorrect shape.\n`no_op` if static checks determine `weights` has correct shape.\n\nRaises:\nValueError:  If static checks determine `weights` has incorrect shape.", "source": "github-repos"}
{"code": "def _get_parser_call_method(self, parser_to_method):\n\n    def inner_call(args=None, instance=None):\n        'Allows to call the method invoked from the command line or\\n            provided argument.\\n\\n            Args:\\n                args: list of arguments to parse, defaults to command line\\n                arguments\\n                instance: an instance of the decorated class. If instance is\\n                None, the default, and __init__ is decorated the object will be\\n                instantiated on the fly from the command line arguments\\n            '\n        parser = self._cls.parser\n        namespace = parser.parse_args(_get_args_to_parse(args, sys.argv))\n        if (instance is None):\n            if ('__init__' not in parser_to_method):\n                raise ParseThisError(\"'__init__' method is not decorated. Please provide an instance to '{}.parser.call' or decorate the '__init___' method with 'create_parser'\".format(self._cls.__name__))\n            instance = _call_method_from_namespace(self._cls, '__init__', namespace)\n        method_name = parser_to_method[namespace.method]\n        return _call_method_from_namespace(instance, method_name, namespace)\n    return inner_call", "docstring": "Return the parser special method 'call' that handles sub-command\ncalling.\n\nArgs:\nparser_to_method: mapping of the parser registered name\nto the method it is linked to", "source": "codesearchnet"}
{"code": "def forward(self, x):\n    embeddings = self.embedding_convPxP(x).flatten(2)\n    embeddings = nn.functional.pad(embeddings, (1, 0))\n    embeddings = embeddings.permute(0, 2, 1)\n    batch_size, sequence_length, embedding_dim = embeddings.shape\n    embeddings = embeddings + self.positional_encoding_1d(batch_size, sequence_length, embedding_dim, device=embeddings.device, dtype=embeddings.dtype)\n    for i in range(4):\n        embeddings = self.transformer_encoder[i](embeddings)\n    return embeddings", "docstring": "Forward pass\n\nArgs:\nx (torch.Tensor - NCHW): Input feature tensor\n\nReturns:\ntorch.Tensor - Transformer output embeddings of shape (batch_size, sequence_length, embedding_dim)", "source": "github-repos"}
{"code": "def _parent_info(self):\n    parent_doc = self.parent\n    if (parent_doc is None):\n        parent_path = _helpers.DOCUMENT_PATH_DELIMITER.join((self._client._database_string, 'documents'))\n    else:\n        parent_path = parent_doc._document_path\n    expected_prefix = _helpers.DOCUMENT_PATH_DELIMITER.join((parent_path, self.id))\n    return (parent_path, expected_prefix)", "docstring": "Get fully-qualified parent path and prefix for this collection.\n\nReturns:\nTuple[str, str]: Pair of\n\n* the fully-qualified (with database and project) path to the\nparent of this collection (will either be the database path\nor a document path).\n* the prefix to a document in this collection.", "source": "codesearchnet"}
{"code": "def build(X_df=None, y_df=None):\n    if (X_df is None):\n        (X_df, _) = load_data()\n    if (y_df is None):\n        (_, y_df) = load_data()\n    features = get_contrib_features()\n    mapper_X = ballet.feature.make_mapper(features)\n    X = mapper_X.fit_transform(X_df)\n    encoder_y = get_target_encoder()\n    y = encoder_y.fit_transform(y_df)\n    return {'X_df': X_df, 'features': features, 'mapper_X': mapper_X, 'X': X, 'y_df': y_df, 'encoder_y': encoder_y, 'y': y}", "docstring": "Build features and target\n\nArgs:\nX_df (DataFrame): raw variables\ny_df (DataFrame): raw target\n\nReturns:\ndict with keys X_df, features, mapper_X, X, y_df, encoder_y, y", "source": "codesearchnet"}
{"code": "def matmul_without_tf32(a, b, *args, **kwargs):\n    if config.tensor_float_32_execution_enabled() and a.dtype == 'float32':\n        a = math_ops.cast(a, 'float64')\n        b = math_ops.cast(b, 'float64')\n        ret = math_ops.matmul(a, b, *args, **kwargs)\n        return math_ops.cast(ret, a.dtype)\n    elif config.tensor_float_32_execution_enabled() and a.dtype == 'complex64':\n        a = math_ops.cast(a, 'complex128')\n        b = math_ops.cast(b, 'complex128')\n        ret = math_ops.matmul(a, b, *args, **kwargs)\n        return math_ops.cast(ret, a.dtype)\n    else:\n        return math_ops.matmul(a, b, *args, **kwargs)", "docstring": "Run matmul but cast float32 inputs to float64 if TensorFloat-32 is enabled.\n\nThis effectively runs matmul without TensorFloat-32. It should only be used in\ntests when verifying some other op or functions works correctly, e.g. to test\n`tf.linalg.sqrtm` by matrix multiplying the output of the op by itself. In\nsuch cases, the matmul itself is not being tested so it's OK to run it with\nhigher precision.\n\nIf a matmul itself is being tested, or some other op which uses matmul, use\n`run_without_tensor_float_32` instead.\n\nThis also casts complex64 inputs to complex128, since TensorFloat-32 can also\nbe used with complex64\n\nArgs:\na: First input to tf.linalg.matmul\nb: Second input to tf.linalg.matmul\nargs: Other positional arguments to tf.linalg.matmul\n**kwargs: Other keyword arguments to tf.linalg.matmul\n\nReturns:\nA tensor with the same type as `a`.", "source": "github-repos"}
{"code": "def output_sector_csv(self, csv_path, file_dict_key, out_path):\n    csv_file = (csv_path + '{0}_{1}_{2}_{3}.csv'.format(file_dict_key, self.ensemble_name, self.member, self.run_date.strftime(self.date_format)))\n    if exists(csv_file):\n        csv_data = pd.read_csv(csv_file)\n        if (self.inds is None):\n            lon_obj = csv_data.loc[(:, 'Centroid_Lon')]\n            lat_obj = csv_data.loc[(:, 'Centroid_Lat')]\n            self.inds = np.where(((((self.ne_lat >= lat_obj) & (self.sw_lat <= lat_obj)) & (self.ne_lon >= lon_obj)) & (self.sw_lon <= lon_obj)))[0]\n        if (np.shape(self.inds)[0] > 0):\n            csv_data = csv_data.reindex(np.array(self.inds))\n            sector_csv_filename = (out_path + '{0}_{1}_{2}_{3}.csv'.format(file_dict_key, self.ensemble_name, self.member, self.run_date.strftime(self.date_format)))\n            print(('Output sector csv file ' + sector_csv_filename))\n            csv_data.to_csv(sector_csv_filename, na_rep='nan', float_format='%0.5f', index=False)\n            os.chmod(sector_csv_filename, 438)\n        else:\n            print('No {0} {1} sector data found'.format(self.member, self.run_date.strftime('%Y%m%d')))\n    else:\n        print('No {0} {1} csv file found'.format(self.member, self.run_date.strftime('%Y%m%d')))\n    return", "docstring": "Segment forecast tracks to only output data contined within a\nregion in the CONUS, as defined by the mapfile.\n\nArgs:\ncsv_path(str): Path to the full CONUS csv file.\nfile_dict_key(str): Dictionary key for the csv files,\ncurrently either 'track_step' or 'track_total'\nout_path (str): Path to output new segmented csv files.\nReturns:\nSegmented forecast tracks in a csv file.", "source": "codesearchnet"}
{"code": "def slice_element_urls(element_definition: ElementDefinition) -> List[str]:\n    result: List[str] = []\n    if proto_utils.field_is_set(element_definition, 'type'):\n        type_refs: List[StructureDefinition] = proto_utils.get_value_at_field(element_definition, 'type')\n        profile_lists = [cast(Any, t).profile for t in type_refs]\n        urls = [cast(Any, profile).value for profile in itertools.chain.from_iterable(profile_lists)]\n        result.extend(urls)\n    return result", "docstring": "Returns the list of profile urls for the given slice element.\n\nArgs:\nelement_definition: The `ElementDefinition` whose profile urls we are\nretrieving.\n\nReturns:\nA list of strings representing the element's profile urls.", "source": "github-repos"}
{"code": "def _validate_alias_command_level(alias, command):\n    alias_collision_table = AliasManager.build_collision_table([alias])\n    if (not alias_collision_table):\n        return\n    command_collision_table = AliasManager.build_collision_table([command])\n    alias_collision_levels = alias_collision_table.get(alias.split()[0], [])\n    command_collision_levels = command_collision_table.get(command.split()[0], [])\n    if (set(alias_collision_levels) & set(command_collision_levels)):\n        raise CLIError(COMMAND_LVL_ERROR.format(alias, command))", "docstring": "Make sure that if the alias is a reserved command, the command that the alias points to\nin the command tree does not conflict in levels.\n\ne.g. 'dns' -> 'network dns' is valid because dns is a level 2 command and network dns starts at level 1.\nHowever, 'list' -> 'show' is not valid because list and show are both reserved commands at level 2.\n\nArgs:\nalias: The name of the alias.\ncommand: The command that the alias points to.", "source": "codesearchnet"}
{"code": "def from_versions(cls, versions):\n        \n        range = cls(None)\n        range.bounds = []\n        for version in dedup(sorted(versions)):\n            lower = _LowerBound(version, True)\n            upper = _UpperBound(version, True)\n            bound = _Bound(lower, upper)\n            range.bounds.append(bound)\n        return range", "docstring": "Create a range from a list of versions.\n\nThis method creates a range that contains only the given versions and\nno other. Typically the range looks like (for eg) \"==3|==4|==5.1\".\n\nArgs:\nversions: List of Version objects.\n\nReturns:\n`VersionRange` object.", "source": "juraj-google-style"}
{"code": "def _check_sensor_platform_consistency(self, sensor):\n        \n        ref_sensor = SENSORS.get(self.platform, None)\n        if ref_sensor and not sensor == ref_sensor:\n            logger.error('Sensor-Platform mismatch: {} is not a payload '\n                         'of {}. Did you choose the correct reader?'\n                         .format(sensor, self.platform))", "docstring": "Make sure sensor and platform are consistent\n\nArgs:\nsensor (str) : Sensor name from YAML dataset definition\n\nRaises:\nValueError if they don't match", "source": "juraj-google-style"}
{"code": "def compute(self, batch_values, accumulator=None):\n    pass", "docstring": "Compute a step in this computation, returning a new accumulator.\n\nThis method computes a step of the computation described by this Combiner.\nIf an accumulator is passed, the data in that accumulator is also used; so\ncompute(batch_values) results in f(batch_values), while\ncompute(batch_values, accumulator) results in\nmerge(f(batch_values), accumulator).\n\nArgs:\nbatch_values: A list of ndarrays representing the values of the inputs for\nthis step of the computation.\naccumulator: the current accumulator. Can be None.\n\nReturns:\nAn accumulator that includes the passed batch of inputs.", "source": "github-repos"}
{"code": "def pretty_print_config_to_json(self, configs):\n    \n    descriptor = self.get_directory_list_doc(configs)\n    return json.dumps(descriptor, sort_keys=True, indent=2,\n                      separators=(',', ': '))", "docstring": "JSON string description of a protorpc.remote.Service in a discovery doc.\n\nArgs:\nconfigs: Either a single dict or a list of dicts containing the service\nconfigurations to list.\n\nReturns:\nstring, The directory list document as a JSON string.", "source": "juraj-google-style"}
{"code": "def isclose(x1, x2, rtol=1e-05, atol=1e-08, equal_nan=False):\n    if any_symbolic_tensors((x1, x2)):\n        return Isclose(equal_nan=equal_nan).symbolic_call(x1, x2, rtol, atol)\n    return backend.numpy.isclose(x1, x2, rtol, atol, equal_nan)", "docstring": "Return whether two tensors are element-wise almost equal.\n\nArgs:\nx1: First input tensor.\nx2: Second input tensor.\nrtol: Relative tolerance.\natol: Absolute tolerance.\nequal_nan: If `True`, element-wise NaNs are considered equal.\n\nReturns:\nOutput boolean tensor.", "source": "github-repos"}
{"code": "def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes, conv_strides, padding):\n    x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)\n    x2 = np.random.rand(*filter_in_sizes).astype(np.float32)\n    with self.cached_session(use_gpu=False):\n        t1 = constant_op.constant(x1, shape=tensor_in_sizes)\n        t2 = constant_op.constant(x2, shape=filter_in_sizes)\n        strides = [1] + conv_strides + [1]\n        conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding)\n        os.environ['TF_USE_DEEP_CONV2D'] = '0'\n        values_expect = self.evaluate([conv])\n        os.environ['TF_USE_DEEP_CONV2D'] = '1'\n        values_test = self.evaluate([conv])\n        self.assertAllClose(values_expect, values_test, rtol=1e-05, atol=1e-05)", "docstring": "Verifies that DeepConv2D and Conv2D produce the same values.\n\nArgs:\ntensor_in_sizes: Input tensor dimensions in\n[batch, input_rows, input_cols, input_depth].\nfilter_in_sizes: Filter tensor dimensions in\n[kernel_rows, kernel_cols, input_depth, output_depth].\nconv_strides: [row_stride, col_stride] for the convolution;\npadding: Padding type.", "source": "github-repos"}
{"code": "def trigger(self, event_name, *args, **kwargs):\n\t\t\n\t\tev = Event(event_name, self)\n\t\tev.trigger(*args, **kwargs)\n\t\treturn ev", "docstring": "Trigger an event on this context.\n\nParams:\nevent_name (string):\nEvent name to trigger\n\nArgs and kwargs are passed to each handler - see the bubbler.Event class\nfor more information.\n\nReturns:\nbubbler.Event:\nEvent instance after execution of all handlers", "source": "juraj-google-style"}
{"code": "def jsonRender(self, def_buf):\n    try:\n        ret_dict = SerialBlock()\n        ret_dict[Field.Meter_Address] = self.getMeterAddress()\n        for fld in def_buf:\n            compare_fld = fld.upper()\n            if ((not ('RESERVED' in compare_fld)) and (not ('CRC' in compare_fld))):\n                ret_dict[str(fld)] = def_buf[fld][MeterData.StringValue]\n    except:\n        ekm_log(traceback.format_exc(sys.exc_info()))\n        return ''\n    return json.dumps(ret_dict, indent=4)", "docstring": "Translate the passed serial block into string only JSON.\n\nArgs:\ndef_buf (SerialBlock): Any :class:`~ekmmeters.SerialBlock` object.\n\nReturns:\nstr: JSON rendering of meter record.", "source": "codesearchnet"}
{"code": "def get_ethernet_networks(self):\n        \n        network_uris = self.data.get('networkUris')\n        networks = []\n        if network_uris:\n            for uri in network_uris:\n                networks.append(self._ethernet_networks.get_by_uri(uri))\n        return networks", "docstring": "Gets a list of associated ethernet networks of an uplink set.\n\nArgs:\nid_or_uri: Can be either the uplink set id or the uplink set uri.\n\nReturns:\nlist: Associated ethernet networks.", "source": "juraj-google-style"}
{"code": "def ndtr(x, name='ndtr'):\n    with tf.name_scope(name):\n        x = tf.convert_to_tensor(value=x, name='x')\n        if (dtype_util.as_numpy_dtype(x.dtype) not in [np.float32, np.float64]):\n            raise TypeError(('x.dtype=%s is not handled, see docstring for supported types.' % x.dtype))\n        return _ndtr(x)", "docstring": "Normal distribution function.\n\nReturns the area under the Gaussian probability density function, integrated\nfrom minus infinity to x:\n\n```\n1       / x\nndtr(x)  = ----------  |    exp(-0.5 t**2) dt\nsqrt(2 pi)  /-inf\n\n= 0.5 (1 + erf(x / sqrt(2)))\n= 0.5 erfc(x / sqrt(2))\n```\n\nArgs:\nx: `Tensor` of type `float32`, `float64`.\nname: Python string. A name for the operation (default=\"ndtr\").\n\nReturns:\nndtr: `Tensor` with `dtype=x.dtype`.\n\nRaises:\nTypeError: if `x` is not floating-type.", "source": "codesearchnet"}
{"code": "def convert_item_to_command_line_arg(self, action, key, value):\n        \n        args = []\n\n        if action is None:\n            command_line_key = \\\n                self.get_command_line_key_for_unknown_config_file_setting(key)\n        else:\n            command_line_key = action.option_strings[-1]\n\n        \n        if action is not None and isinstance(action, ACTION_TYPES_THAT_DONT_NEED_A_VALUE):\n            if value.lower() in (\"true\", \"yes\", \"1\"):\n                args.append( command_line_key )\n            elif value.lower() in (\"false\", \"no\", \"0\"):\n                \n                pass\n            else:\n                self.error(\"Unexpected value for %s: '%s'. Expecting 'true', \"\n                           \"'false', 'yes', 'no', '1' or '0'\" % (key, value))\n        elif isinstance(value, list):\n            if action is None or isinstance(action, argparse._AppendAction):\n                for list_elem in value:\n                    args.append( command_line_key )\n                    args.append( str(list_elem) )\n            elif (isinstance(action, argparse._StoreAction) and action.nargs in ('+', '*')) or (\n                isinstance(action.nargs, int) and action.nargs > 1):\n                args.append( command_line_key )\n                for list_elem in value:\n                    args.append( str(list_elem) )\n            else:\n                self.error((\"%s can't be set to a list '%s' unless its action type is changed \"\n                            \"to 'append' or nargs is set to '*', '+', or > 1\") % (key, value))\n        elif isinstance(value, str):\n            args.append( command_line_key )\n            args.append( value )\n        else:\n            raise ValueError(\"Unexpected value type %s for value: %s\" % (\n                type(value), value))\n\n        return args", "docstring": "Converts a config file or env var key + value to a list of\ncommandline args to append to the commandline.\n\nArgs:\naction: The argparse Action object for this setting, or None if this\nconfig file setting doesn't correspond to any defined\nconfigargparse arg.\nkey: string (config file key or env var name)\nvalue: parsed value of type string or list", "source": "juraj-google-style"}
{"code": "def run(self, text):\n        \n        for pp in self.pre_processors:\n            text = pp.run(text)\n        return text", "docstring": "Run each substitution on ``text``.\n\nArgs:\ntext (string): the input text.\n\nReturns:\nstring: text after all substitutions have been sequentially\napplied.", "source": "juraj-google-style"}
{"code": "def right_margin(self, margin):\n        \n        if margin >=1 and margin <=255:\n            self.send(chr(27)+'Q'+chr(margin))\n        else:\n            raise RuntimeError('Invalid margin parameter in function rightMargin')", "docstring": "Specify the right margin.\n\nArgs:\nmargin: The right margin, in character width, must be less than the media's width.\nReturns:\nNone\nRaises:\nRuntimeError: Invalid margin parameter", "source": "juraj-google-style"}
{"code": "def updateAccount(self, subject, person, vendorSpecific=None):\n        \n        response = self.updateAccountResponse(subject, person, vendorSpecific)\n        return self._read_boolean_response(response)", "docstring": "See Also: updateAccountResponse()\n\nArgs:\nsubject:\nperson:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def get_box_field(self, box_key, field_key = None):\n\t\t\n\t\t\n\t\tself._raise_unimplemented_error()\n\t\t\n\t\turi = '/'.join([self.api_uri,\n\t\t\t\t\t\tself.boxes_suffix,\n\t\t\t\t\t\tbox_key,\n\t\t\t\t\t\tself.fields_suffix\n\t\t\t\t\t\t])\n\t\tif field_key:\n\t\t\turi = '/'.join([uri, field_key])\n\n\t\treturn self._req('get', uri)", "docstring": "Gets one/all field in a box\nArgs:\nbox_key \t\tkey for pipeline\nfield_key \t\t\tkey for field (default: None i.e. ALL)\nreturns\t\t\t\tstatus code, field dict or list thereof", "source": "juraj-google-style"}
{"code": "def _compute_enlarge_labels(self, locator, base_index):\n    base_index_type = type(base_index)\n    locator_as_index = base_index_type(locator)\n    nan_labels = locator_as_index.difference(base_index)\n    common_labels = locator_as_index.intersection(base_index)\n    if (len(common_labels) == 0):\n        raise KeyError('None of [{labels}] are in the [{base_index_name}]'.format(labels=list(locator_as_index), base_index_name=base_index))\n    return nan_labels", "docstring": "Helper for _enlarge_axis, compute common labels and extra labels.\n\nReturns:\nnan_labels: The labels needs to be added", "source": "codesearchnet"}
{"code": "def get(self, url):\n        \n\n        self._driver.get(url)\n\n        if self.bot_diary:\n            self.bot_diary.add_auto_entry(\n                \"I went on\",\n                target=url,\n                take_screenshot=True\n            )\n\n        if BROME_CONFIG['proxy_driver']['intercept_javascript_error']:\n            self.init_javascript_error_interception()\n\n        return True", "docstring": "Navigate to a specific url\n\nThis specific implementation inject a javascript\nscript to intercept the javascript error\n\nConfigurable with the \"proxy_driver:intercept_javascript_error\" config\n\nArgs:\nurl (str): the url to navigate to\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def delete(self, id, **kwargs):\n        \n        if id is None:\n            path = self.path\n        else:\n            if not isinstance(id, int):\n                id = id.replace('/', '%2F')\n            path = '%s/%s' % (self.path, id)\n        self.gitlab.http_delete(path, **kwargs)", "docstring": "Delete an object on the server.\n\nArgs:\nid: ID of the object to delete\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabDeleteError: If the server cannot perform the request", "source": "juraj-google-style"}
{"code": "def add_property_orders(query_proto, *orders):\n  \n  for order in orders:\n    proto = query_proto.order.add()\n    if order[0] == '-':\n      order = order[1:]\n      proto.direction = query_pb2.PropertyOrder.DESCENDING\n    else:\n      proto.direction = query_pb2.PropertyOrder.ASCENDING\n    proto.property.name = order", "docstring": "Add ordering constraint for the given datastore.Query proto message.\n\nArgs:\nquery_proto: datastore.Query proto message.\norders: list of propertype name string, default to ascending\norder and set descending if prefixed by '-'.\n\nUsage:\n>>> add_property_orders(query_proto, 'foo')  # sort by foo asc\n>>> add_property_orders(query_proto, '-bar')  # sort by bar desc", "source": "juraj-google-style"}
{"code": "def _send_notification(self, handle, value):\n        \n\n        value_len = len(value)\n        value = bytes(value)\n\n        payload = struct.pack(\"<BHB%ds\" % value_len, 0xFF, handle, value_len, value)\n\n        response = self._send_command(2, 5, payload)\n        result, = unpack(\"<H\", response.payload)\n        if result != 0:\n            return False, {'reason': 'Error code from BLED112 notifying a value', 'code': result, 'handle': handle, 'value': value}\n\n        return True, None", "docstring": "Send a notification to all connected clients on a characteristic\n\nArgs:\nhandle (int): The handle we wish to notify on\nvalue (bytearray): The value we wish to send", "source": "juraj-google-style"}
{"code": "def egress(self, envelope, http_headers, operation, binding_options):\n    \n    if self._logger.isEnabledFor(logging.INFO):\n      service_name = operation.binding.wsdl.services.keys()[0]\n      self._logger.info(_REQUEST_LOG_LINE, service_name, operation.name,\n                        binding_options['address'])\n\n    if self._logger.isEnabledFor(logging.DEBUG):\n      http_headers_safe = http_headers.copy()\n      if self._AUTHORIZATION_HEADER in http_headers_safe:\n        http_headers_safe[self._AUTHORIZATION_HEADER] = self._REDACTED\n\n      request_string = etree.tostring(envelope, pretty_print=True)\n      safe_request = self._DEVELOPER_TOKEN_SUB.sub(\n          self._REDACTED, request_string.decode('utf-8'))\n      self._logger.debug(\n          _REQUEST_XML_LOG_LINE, http_headers_safe, safe_request)\n\n    return envelope, http_headers", "docstring": "Overrides the egress function ror request logging.\n\nArgs:\nenvelope: An Element with the SOAP request data.\nhttp_headers: A dict of the current http headers.\noperation: The SoapOperation instance.\nbinding_options: An options dict for the SOAP binding.\n\nReturns:\nA tuple of the envelope and headers.", "source": "juraj-google-style"}
{"code": "def produce(self, **kwargs):\n    produce_args = self._produce_params.copy()\n    produce_args.update(kwargs)\n    if self._class:\n        return getattr(self.instance, self.produce_method)(**produce_args)\n    produce_args.update(self._hyperparameters)\n    return self.primitive(**produce_args)", "docstring": "Call the primitive function, or the predict method of the primitive.\n\nThe given keyword arguments will be passed directly to the primitive,\nif it is a simple function, or to the `produce` method of the\nprimitive instance specified in the JSON annotation, if it is a class.\n\nIf any of the arguments expected by the fit method had been given\nduring the MLBlock initialization, they will be passed as well.\n\nReturns:\nThe output of the call to the primitive function or primitive\nproduce method.", "source": "codesearchnet"}
{"code": "def sort_dependencies(self, image, dependencies=None):\n        \n        if dependencies is None:\n            dependencies = OrderedDict()  \n\n        if image in dependencies:\n            return\n\n        requires = self.ymldefs[image].get('requires', [])\n\n        for dep in requires:\n            self.sort_dependencies(dep, dependencies)\n\n        dependencies[image] = None\n        return dependencies.keys()", "docstring": "Topologically sort the docker commands by their requirements\n\nNote:\nCircular \"requires\" dependencies are assumed to have already been checked in\nget_external_base_image, they are not checked here\n\nArgs:\nimage (str): process this docker image's dependencies\ndependencies (OrderedDict): running cache of sorted dependencies (ordered dict)\n\nReturns:\nList[str]: list of dependencies a topologically-sorted build order", "source": "juraj-google-style"}
{"code": "def Feed(self, size=512):\n    data = self.file_object.read(size)\n    Lexer.Feed(self, data)\n    return len(data)", "docstring": "Feed data into the buffer.\n\nArgs:\nsize: optional data size to read form the file-like object.", "source": "codesearchnet"}
{"code": "def get_group_by_name(self, group_name: str) -> typing.Optional['Group']:\n    VALID_STR.validate(group_name, 'get_group_by_name')\n    for group in self.groups:\n        if (group.group_name == group_name):\n            return group\n    return None", "docstring": "Gets a group from its name\n\nArgs:\ngroup_name:\n\nReturns: Group", "source": "codesearchnet"}
{"code": "def euler_angles_1q(unitary_matrix):\n    if (unitary_matrix.shape != (2, 2)):\n        raise QiskitError('euler_angles_1q: expected 2x2 matrix')\n    phase = (la.det(unitary_matrix) ** ((- 1.0) / 2.0))\n    U = (phase * unitary_matrix)\n    if (abs(U[(0, 0)]) > _CUTOFF_PRECISION):\n        theta = (2 * math.acos(abs(U[(0, 0)])))\n    else:\n        theta = (2 * math.asin(abs(U[(1, 0)])))\n    phase11 = 0.0\n    phase10 = 0.0\n    if (abs(math.cos((theta / 2.0))) > _CUTOFF_PRECISION):\n        phase11 = (U[(1, 1)] / math.cos((theta / 2.0)))\n    if (abs(math.sin((theta / 2.0))) > _CUTOFF_PRECISION):\n        phase10 = (U[(1, 0)] / math.sin((theta / 2.0)))\n    phiplambda = (2 * math.atan2(np.imag(phase11), np.real(phase11)))\n    phimlambda = (2 * math.atan2(np.imag(phase10), np.real(phase10)))\n    phi = 0.0\n    if ((abs(U[(0, 0)]) > _CUTOFF_PRECISION) and (abs(U[(1, 0)]) > _CUTOFF_PRECISION)):\n        phi = ((phiplambda + phimlambda) / 2.0)\n        lamb = ((phiplambda - phimlambda) / 2.0)\n    elif (abs(U[(0, 0)]) < _CUTOFF_PRECISION):\n        lamb = (- phimlambda)\n    else:\n        lamb = phiplambda\n    Rzphi = np.array([[np.exp((((- 1j) * phi) / 2.0)), 0], [0, np.exp(((1j * phi) / 2.0))]], dtype=complex)\n    Rytheta = np.array([[np.cos((theta / 2.0)), (- np.sin((theta / 2.0)))], [np.sin((theta / 2.0)), np.cos((theta / 2.0))]], dtype=complex)\n    Rzlambda = np.array([[np.exp((((- 1j) * lamb) / 2.0)), 0], [0, np.exp(((1j * lamb) / 2.0))]], dtype=complex)\n    V = np.dot(Rzphi, np.dot(Rytheta, Rzlambda))\n    if (la.norm((V - U)) > _CUTOFF_PRECISION):\n        raise QiskitError('euler_angles_1q: incorrect result')\n    return (theta, phi, lamb)", "docstring": "Compute Euler angles for a single-qubit gate.\n\nFind angles (theta, phi, lambda) such that\nunitary_matrix = phase * Rz(phi) * Ry(theta) * Rz(lambda)\n\nArgs:\nunitary_matrix (ndarray): 2x2 unitary matrix\n\nReturns:\ntuple: (theta, phi, lambda) Euler angles of SU(2)\n\nRaises:\nQiskitError: if unitary_matrix not 2x2, or failure", "source": "codesearchnet"}
{"code": "def getPaddingNum(chars):\n        \n        match = PRINTF_SYNTAX_PADDING_RE.match(chars)\n        if match:\n            return int(match.group(1))\n\n        try:\n            return sum([PAD_MAP[char] for char in chars])\n        except KeyError:\n            msg = \"Detected an unsupported padding character: \\\"{}\\\".\"\n            msg += \" Supported padding characters: {} or printf syntax padding\"\n            msg += \" %<int>d\"\n            raise ValueError(msg.format(char, str(PAD_MAP.keys())))", "docstring": "Given a supported group of padding characters, return the amount of padding.\n\nArgs:\nchars (str): a supported group of padding characters\n\nReturns:\nint:\n\nRaises:\nValueError: if unsupported padding character is detected", "source": "juraj-google-style"}
{"code": "def __init__(self, table, info):\n    \n    self._table = table\n    self._info = info", "docstring": "Initializes a TableMetadata instance.\n\nArgs:\ntable: the Table object this belongs to.\ninfo: The BigQuery information about this table as a Python dictionary.", "source": "juraj-google-style"}
{"code": "def start_test(self, pipeline):\n    \n    global _TEST_MODE, _TEST_ROOT_PIPELINE_KEY\n    self.start(pipeline, return_task=True)\n    _TEST_MODE = True\n    _TEST_ROOT_PIPELINE_KEY = pipeline._pipeline_key\n    try:\n      self.evaluate_test(pipeline, root=True)\n    finally:\n      _TEST_MODE = False", "docstring": "Starts a pipeline in the test mode.\n\nArgs:\npipeline: The Pipeline instance to test.", "source": "juraj-google-style"}
{"code": "def createAndStartSwarm(client, clientInfo='', clientKey='', params='', minimumWorkers=None, maximumWorkers=None, alreadyRunning=False):\n    if (minimumWorkers is None):\n        minimumWorkers = Configuration.getInt('nupic.hypersearch.minWorkersPerSwarm')\n    if (maximumWorkers is None):\n        maximumWorkers = Configuration.getInt('nupic.hypersearch.maxWorkersPerSwarm')\n    return ClientJobsDAO.get().jobInsert(client=client, cmdLine='$HYPERSEARCH', clientInfo=clientInfo, clientKey=clientKey, alreadyRunning=alreadyRunning, params=params, minimumWorkers=minimumWorkers, maximumWorkers=maximumWorkers, jobType=ClientJobsDAO.JOB_TYPE_HS)", "docstring": "Create and start a swarm job.\n\nArgs:\nclient - A string identifying the calling client. There is a small limit\nfor the length of the value. See ClientJobsDAO.CLIENT_MAX_LEN.\nclientInfo - JSON encoded dict of client specific information.\nclientKey - Foreign key. Limited in length, see ClientJobsDAO._initTables.\nparams - JSON encoded dict of the parameters for the job. This can be\nfetched out of the database by the worker processes based on the jobID.\nminimumWorkers - The minimum workers to allocate to the swarm. Set to None\nto use the default.\nmaximumWorkers - The maximum workers to allocate to the swarm. Set to None\nto use the swarm default. Set to 0 to use the maximum scheduler value.\nalreadyRunning - Insert a job record for an already running process. Used\nfor testing.", "source": "codesearchnet"}
{"code": "def wrap_rich_text_lines(inp, cols):\n    new_line_indices = []\n    if not isinstance(inp, RichTextLines):\n        raise ValueError('Invalid type of input screen_output')\n    if not isinstance(cols, int):\n        raise ValueError('Invalid type of input cols')\n    out = RichTextLines([])\n    row_counter = 0\n    for i, line in enumerate(inp.lines):\n        new_line_indices.append(out.num_lines())\n        if i in inp.annotations:\n            out.annotations[row_counter] = inp.annotations[i]\n        if len(line) <= cols:\n            out.lines.append(line)\n            if i in inp.font_attr_segs:\n                out.font_attr_segs[row_counter] = inp.font_attr_segs[i]\n            row_counter += 1\n        else:\n            wlines = []\n            osegs = []\n            if i in inp.font_attr_segs:\n                osegs = inp.font_attr_segs[i]\n            idx = 0\n            while idx < len(line):\n                if idx + cols > len(line):\n                    rlim = len(line)\n                else:\n                    rlim = idx + cols\n                wlines.append(line[idx:rlim])\n                for seg in osegs:\n                    if seg[0] < rlim and seg[1] >= idx:\n                        if seg[0] >= idx:\n                            lb = seg[0] - idx\n                        else:\n                            lb = 0\n                        if seg[1] < rlim:\n                            rb = seg[1] - idx\n                        else:\n                            rb = rlim - idx\n                        if rb > lb:\n                            wseg = (lb, rb, seg[2])\n                            if row_counter not in out.font_attr_segs:\n                                out.font_attr_segs[row_counter] = [wseg]\n                            else:\n                                out.font_attr_segs[row_counter].append(wseg)\n                idx += cols\n                row_counter += 1\n            out.lines.extend(wlines)\n    for key in inp.annotations:\n        if not isinstance(key, int):\n            out.annotations[key] = inp.annotations[key]\n    return (out, new_line_indices)", "docstring": "Wrap RichTextLines according to maximum number of columns.\n\nProduces a new RichTextLines object with the text lines, font_attr_segs and\nannotations properly wrapped. This ought to be used sparingly, as in most\ncases, command handlers producing RichTextLines outputs should know the\nscreen/panel width via the screen_info kwarg and should produce properly\nlength-limited lines in the output accordingly.\n\nArgs:\ninp: Input RichTextLines object.\ncols: Number of columns, as an int.\n\nReturns:\n1) A new instance of RichTextLines, with line lengths limited to cols.\n2) A list of new (wrapped) line index. For example, if the original input\nconsists of three lines and only the second line is wrapped, and it's\nwrapped into two lines, this return value will be: [0, 1, 3].\nRaises:\nValueError: If inputs have invalid types.", "source": "github-repos"}
{"code": "def obtain_all_bond_lengths(sp1, sp2, default_bl=None):\n    \n    if isinstance(sp1, Element):\n        sp1 = sp1.symbol\n    if isinstance(sp2, Element):\n        sp2 = sp2.symbol\n    syms = tuple(sorted([sp1, sp2]))\n    if syms in bond_lengths:\n        return bond_lengths[syms].copy()\n    elif default_bl is not None:\n        return {1: default_bl}\n    else:\n        raise ValueError(\"No bond data for elements {} - {}\".format(*syms))", "docstring": "Obtain bond lengths for all bond orders from bond length database\n\nArgs:\nsp1 (Specie): First specie.\nsp2 (Specie): Second specie.\ndefault_bl: If a particular type of bond does not exist, use this\nbond length as a default value (bond order = 1).\nIf None, a ValueError will be thrown.\n\nReturn:\nA dict mapping bond order to bond length in angstrom", "source": "juraj-google-style"}
{"code": "def _compute_full_path(self, fn_parent_ref, fn_parent_seq):\n    names = []\n    root_id = 5\n    (index, seq) = (fn_parent_ref, fn_parent_seq)\n    is_orphan = False\n    while (index != root_id):\n        try:\n            parent_entry = self[index]\n            if (seq != parent_entry.header.seq_number):\n                is_orphan = True\n                break\n            else:\n                parent_fn_attr = parent_entry.get_main_filename_attr()\n                (index, seq) = (parent_fn_attr.content.parent_ref, parent_fn_attr.content.parent_seq)\n                names.append(parent_fn_attr.content.name)\n        except ValueError as e:\n            is_orphan = True\n            break\n    return (is_orphan, '\\\\'.join(reversed(names)))", "docstring": "Based on the parent reference and sequence, computes the full path.\n\nThe majority of the files in a filesystem has a very small amount of\nparent directories. By definition, a filesystem is expected to have\nmuch smaller amount of directories than files. As such we use a function\nwith the minimal amount of arguments to find a parent, that way we can\ncache the results easily and speed up the overall code.\n\nArgs:\nfn_parent_ref (int): Parent reference number\nfn_parent_seq (int): Parent sequence number\n\nReturns:\ntuple(bool, str): A tuple where the first element is a boolean that\nis ``True`` if the the file is orphan and ``False`` if not. The\nsecond element is a string with the full path without the file name", "source": "codesearchnet"}
{"code": "def apply_and_name(self, aggregator):\n    reduced_df = self._apply(aggregator)\n    if (len(self.names) != len(reduced_df.columns)):\n        raise IndexError('ColumnFunction creates more columns than it has names for.')\n    reduced_df.columns = self.names\n    return reduced_df", "docstring": "Fetches the row-aggregated input columns for this ColumnFunction.\n\nArgs:\naggregator (Aggregator)\n\nReturns:\npd.DataFrame: The dataframe has columns with names self.names\nthat were created by this ColumnFunction,\nand is indexed by the index that was passed to\naggregator.aggregate(index).", "source": "codesearchnet"}
{"code": "def delete_qubits(self, indices):\n        \n        if not isinstance(indices, list):\n            indices = [indices]\n\n        self._z = np.delete(self._z, indices)\n        self._x = np.delete(self._x, indices)\n\n        return self", "docstring": "Delete pauli at the indices.\n\nArgs:\nindices(list[int]): the indices of to-be-deleted paulis\n\nReturns:\nPauli: self", "source": "juraj-google-style"}
{"code": "def find(self, **kwargs):\n        \n\n        if len(kwargs) != 1:\n            raise ValueError(\"One and only one keyword argument accepted\")\n\n        key = list(kwargs.keys())[0]\n        value = list(kwargs.values())[0]\n        ret = None\n        for row in self.values():\n            if row[key] == value:\n                ret = row\n                break\n        return ret", "docstring": "Finds row matching specific field value\n\nArgs:\n**kwargs: (**only one argument accepted**) fielname=value, e.g., formula=\"OH\"\n\nReturns: list element or None", "source": "juraj-google-style"}
{"code": "def _load_from_cache_if_available(self, key):\n    \n    if key in self._cache:\n      entity = self._cache[key]  \n      if entity is None or entity._key == key:\n        \n        \n        raise tasklets.Return(entity)", "docstring": "Returns a cached Model instance given the entity key if available.\n\nArgs:\nkey: Key instance.\n\nReturns:\nA Model instance if the key exists in the cache.", "source": "juraj-google-style"}
{"code": "def browse(self, max_lines=None, headers=None):\n    \n    if self.path.startswith('gs:\n      lines = CsvFile._read_gcs_lines(self.path, max_lines)\n    else:\n      lines = CsvFile._read_local_lines(self.path, max_lines)\n    if len(lines) == 0:\n      return pd.DataFrame(columns=headers)\n    columns_size = len(next(csv.reader([lines[0]], delimiter=self._delimiter)))\n    if headers is None:\n      headers = ['col' + newstr(e) for e in range(columns_size)]\n    if len(headers) != columns_size:\n      raise Exception('Number of columns in CSV do not match number of headers')\n    buf = StringIO()\n    for line in lines:\n      buf.write(line)\n      buf.write('\\n')\n    buf.seek(0)\n    df = pd.read_csv(buf, names=headers, delimiter=self._delimiter)\n    for key, col in df.iteritems():\n      if self._is_probably_categorical(col):\n        df[key] = df[key].astype('category')\n    return df", "docstring": "Try reading specified number of lines from the CSV object.\nArgs:\nmax_lines: max number of lines to read. If None, the whole file is read\nheaders: a list of strings as column names. If None, it will use \"col0, col1...\"\nReturns:\nA pandas DataFrame with the schema inferred from the data.\nRaises:\nException if the csv object cannot be read or not enough lines to read, or the\nheaders size does not match columns size.", "source": "juraj-google-style"}
{"code": "def get_diff_coeff(hvec, n=1):\n    \n    hvec = np.array(hvec, dtype=np.float)\n    acc = len(hvec)\n    exp = np.column_stack([np.arange(acc)]*acc)\n    a = np.vstack([hvec] * acc) ** exp\n    b = np.zeros(acc)\n    b[n] = factorial(n)\n    return np.linalg.solve(a, b)", "docstring": "Helper function to find difference coefficients of an\nderivative on an arbitrary mesh.\n\nArgs:\nhvec (1D array-like): sampling stencil\nn (int): degree of derivative to find", "source": "juraj-google-style"}
{"code": "def _GetDataTypeMap(self, name):\n    \n    data_type_map = self._data_type_maps.get(name, None)\n    if not data_type_map:\n      data_type_map = self._fabric.CreateDataTypeMap(name)\n      self._data_type_maps[name] = data_type_map\n\n    return data_type_map", "docstring": "Retrieves a data type map defined by the definition file.\n\nThe data type maps are cached for reuse.\n\nArgs:\nname (str): name of the data type as defined by the definition file.\n\nReturns:\ndtfabric.DataTypeMap: data type map which contains a data type definition,\nsuch as a structure, that can be mapped onto binary data.", "source": "juraj-google-style"}
{"code": "def prefix(self: EventSetOrNode, prefix: str) -> EventSetOrNode:\n    from temporian.core.operators.prefix import prefix as _prefix\n    return _prefix(self, prefix=prefix)", "docstring": "Adds a prefix to the names of the features in an\n[`EventSet`][temporian.EventSet].\n\nUsage example:\n```python\n>>> a = tp.event_set(\n...    timestamps=[0, 1],\n...    features={\"f1\": [0, 2], \"f2\": [5, 6]}\n... )\n>>> b = a * 5\n\n>>> # Prefix before glue to avoid duplicated names\n>>> c = tp.glue(a.prefix(\"original_\"), b.prefix(\"result_\"))\n>>> c\nindexes: ...\n'original_f1': [0 2]\n'original_f2': [5 6]\n'result_f1': [ 0 10]\n'result_f2': [25 30]\n...\n\n```\n\nArgs:\nprefix: Prefix to add in front of the feature names.\n\nReturns:\nPrefixed EventSet.", "source": "github-repos"}
{"code": "def check_errors(self, is_global=False):\n    \n    errors = self.global_errors if is_global else self.errors\n    if errors:\n      print('dfTimewolf encountered one or more errors:')\n      for error, critical in errors:\n        print('{0:s}  {1:s}'.format('CRITICAL: ' if critical else '', error))\n        if critical:\n          print('Critical error found. Aborting.')\n          sys.exit(-1)", "docstring": "Checks for errors and exits if any of them are critical.\n\nArgs:\nis_global: If True, check the global_errors attribute. If false, check the\nerror attribute.", "source": "juraj-google-style"}
{"code": "def get_config(model_type: str, feature: str) -> OnnxConfig:\n    return FeaturesManager._SUPPORTED_MODEL_TYPE[model_type][feature]", "docstring": "Gets the OnnxConfig for a model_type and feature combination.\n\nArgs:\nmodel_type (`str`):\nThe model type to retrieve the config for.\nfeature (`str`):\nThe feature to retrieve the config for.\n\nReturns:\n`OnnxConfig`: config for the combination", "source": "github-repos"}
{"code": "def move_all_files_from_subfolders_to_top(folder_path, delete_subfolders=False, copy=False):\n    \n    for item in os.listdir(folder_path):\n        sub_path = os.path.join(folder_path, item)\n\n        if os.path.isdir(sub_path):\n\n            for sub_item in os.listdir(sub_path):\n                src = os.path.join(sub_path, sub_item)\n                target = os.path.join(folder_path, sub_item)\n\n                if copy:\n                    if os.path.isfile(src):\n                        shutil.copy(src, target)\n                    else:\n                        shutil.copytree(src, target)\n                else:\n                    shutil.move(src, target)\n\n            if delete_subfolders:\n                shutil.rmtree(sub_path)", "docstring": "Move all files/folder from all subfolders of `folder_path` on top into `folder_path`.\n\nArgs:\nfolder_path (str): Path of the folder.\ndelete_subfolders (bool): If True the subfolders are deleted after all items are moved out of it.\ncopy (bool): If True copies the files instead of moving. (default False)", "source": "juraj-google-style"}
{"code": "def __init__(self, zone, environment):\n    \n    self._zone = zone\n    self._environment = environment\n    self._gcs_dag_location = None", "docstring": "Initializes an instance of a Composer object.\n\nArgs:\nzone: Zone in which Composer environment has been created.\nenvironment: Name of the Composer environment.", "source": "juraj-google-style"}
{"code": "def to_proto(self, export_scope=None):\n    if export_scope is None:\n        return self.saver_def\n    if not (self.saver_def.filename_tensor_name.startswith(export_scope) and self.saver_def.save_tensor_name.startswith(export_scope) and self.saver_def.restore_op_name.startswith(export_scope)):\n        return None\n    saver_def = saver_pb2.SaverDef()\n    saver_def.CopyFrom(self.saver_def)\n    saver_def.filename_tensor_name = ops.strip_name_scope(saver_def.filename_tensor_name, export_scope)\n    saver_def.save_tensor_name = ops.strip_name_scope(saver_def.save_tensor_name, export_scope)\n    saver_def.restore_op_name = ops.strip_name_scope(saver_def.restore_op_name, export_scope)\n    return saver_def", "docstring": "Converts this `Saver` to a `SaverDef` protocol buffer.\n\nArgs:\nexport_scope: Optional `string`. Name scope to remove.\n\nReturns:\nA `SaverDef` protocol buffer.", "source": "github-repos"}
{"code": "def add_keyword(self, keyword, schema=None, source=None):\n    keyword_dict = self._sourced_dict(source, value=keyword)\n    if (schema is not None):\n        keyword_dict['schema'] = schema\n    self._append_to('keywords', keyword_dict)", "docstring": "Add a keyword.\n\nArgs:\nkeyword(str): keyword to add.\nschema(str): schema to which the keyword belongs.\nsource(str): source for the keyword.", "source": "codesearchnet"}
{"code": "def create_group(self, name):\n        \n        self.project_service.set_auth(self._token_project)\n        return self.project_service.create_group(name)", "docstring": "Create a new group.\n\nArgs:\nname (string): Name of the group to create.\n\nReturns:\n(bool): True on success.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def is_collection(return_type: FhirPathDataType) -> bool:\n    return return_type and return_type.cardinality == Cardinality.COLLECTION", "docstring": "Indicates if the return type represents a collection.\n\nArgs:\nreturn_type: The data type to describe.\n\nReturns:\nTrue if `return_type` represents an element with cardinality greater than\none. False otherwise.", "source": "github-repos"}
{"code": "def timestamp_ids(self, time_precision=0.02):\n    return self.convert_tokens_to_ids(['<|%.2f|>' % (i * time_precision) for i in range(1500 + 1)])", "docstring": "Compute the timestamp token ids for a given precision and save to least-recently used (LRU) cache.\n\nArgs:\ntime_precision (`float`, *optional*, defaults to 0.02):\nThe time ratio to convert from token to time.", "source": "github-repos"}
{"code": "def console_map_string_to_font(s: str, fontCharX: int, fontCharY: int) -> None:\n    \n    lib.TCOD_console_map_string_to_font_utf(_unicode(s), fontCharX, fontCharY)", "docstring": "Remap a string of codes to a contiguous set of tiles.\n\nArgs:\ns (AnyStr): A string of character codes to map to new values.\nThe null character `'\\\\x00'` will prematurely end this\nfunction.\nfontCharX (int): The starting X tile coordinate on the loaded tileset.\n0 is the leftmost tile.\nfontCharY (int): The starting Y tile coordinate on the loaded tileset.\n0 is the topmost tile.", "source": "juraj-google-style"}
{"code": "def get(self, *, txid, headers=None):\n        \n        block_list = self.transport.forward_request(\n            method='GET',\n            path=self.path,\n            params={'transaction_id': txid},\n            headers=headers,\n        )\n        return block_list[0] if len(block_list) else None", "docstring": "Get the block that contains the given transaction id (``txid``)\nelse return ``None``\n\nArgs:\ntxid (str): Transaction id.\nheaders (dict): Optional headers to pass to the request.\n\nReturns:\n:obj:`list` of :obj:`int`: List of block heights.", "source": "juraj-google-style"}
{"code": "def run_inference(self, batch: Sequence[numpy.ndarray], model: BaseEstimator, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n    predictions = self._model_inference_fn(model, batch, inference_args)\n    return utils._convert_to_result(batch, predictions, model_id=self._model_uri)", "docstring": "Runs inferences on a batch of numpy arrays.\n\nArgs:\nbatch: A sequence of examples as numpy arrays. They should\nbe single examples.\nmodel: A numpy model or pipeline. Must implement predict(X).\nWhere the parameter X is a numpy array.\ninference_args: Any additional arguments for an inference.\n\nReturns:\nAn Iterable of type PredictionResult.", "source": "github-repos"}
{"code": "def Get(self, request, global_params=None):\n    config = self.GetMethodConfig('Get')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role.\n\nArgs:\nrequest: (BigqueryJobsGetRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Job) The response message.", "source": "github-repos"}
{"code": "def contact(self, id):\n        \n        try:\n            json = self.skype.conn(\"POST\", \"{0}/users/batch/profiles\".format(SkypeConnection.API_USER),\n                                   json={\"usernames\": [id]}, auth=SkypeConnection.Auth.SkypeToken).json()\n            contact = SkypeContact.fromRaw(self.skype, json[0])\n            if contact.id not in self.contactIds:\n                self.contactIds.append(contact.id)\n            return self.merge(contact)\n        except SkypeApiException as e:\n            if len(e.args) >= 2 and getattr(e.args[1], \"status_code\", None) == 403:\n                \n                return None\n            raise", "docstring": "Retrieve all details for a specific contact, including fields such as birthday and mood.\n\nArgs:\nid (str): user identifier to lookup\n\nReturns:\nSkypeContact: resulting contact object", "source": "juraj-google-style"}
{"code": "def get_stored_variation(self, experiment, user_profile):\n    user_id = user_profile.user_id\n    variation_id = user_profile.get_variation_for_experiment(experiment.id)\n    if variation_id:\n        variation = self.config.get_variation_from_id(experiment.key, variation_id)\n        if variation:\n            self.logger.info(('Found a stored decision. User \"%s\" is in variation \"%s\" of experiment \"%s\".' % (user_id, variation.key, experiment.key)))\n            return variation\n    return None", "docstring": "Determine if the user has a stored variation available for the given experiment and return that.\n\nArgs:\nexperiment: Object representing the experiment for which user is to be bucketed.\nuser_profile: UserProfile object representing the user's profile.\n\nReturns:\nVariation if available. None otherwise.", "source": "codesearchnet"}
{"code": "def plot_waves(self, ax=None, fontsize=12, **kwargs):\n    (ax, fig, plt) = get_ax_fig_plt(ax)\n    ax.grid(True)\n    ax.set_xlabel('r [Bohr]')\n    ax.set_ylabel('$r\\\\phi,\\\\, r\\\\tilde\\\\phi\\\\, [Bohr]^{-\\\\frac{1}{2}}$')\n    for (state, rfunc) in self.pseudo_partial_waves.items():\n        ax.plot(rfunc.mesh, (rfunc.mesh * rfunc.values), lw=2, label=('PS-WAVE: ' + state))\n    for (state, rfunc) in self.ae_partial_waves.items():\n        ax.plot(rfunc.mesh, (rfunc.mesh * rfunc.values), lw=2, label=('AE-WAVE: ' + state))\n    ax.legend(loc='best', shadow=True, fontsize=fontsize)\n    return fig", "docstring": "Plot the AE and the pseudo partial waves.\n\nArgs:\nax: matplotlib :class:`Axes` or None if a new figure should be created.\nfontsize: fontsize for legends and titles\n\nReturns: `matplotlib` figure", "source": "codesearchnet"}
{"code": "def get_all_dataset_names(configuration=None, **kwargs):\n        \n        \n        dataset = Dataset(configuration=configuration)\n        dataset['id'] = 'all dataset names'  \n        return dataset._write_to_hdx('list', kwargs, 'id')", "docstring": "Get all dataset names in HDX\n\nArgs:\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n**kwargs: See below\nlimit (int): Number of rows to return. Defaults to all dataset names.\noffset (int): Offset in the complete result for where the set of returned dataset names should begin\n\nReturns:\nList[str]: list of all dataset names in HDX", "source": "juraj-google-style"}
{"code": "def get(self, group=None, backend=None):\n    from .options import Store, Options\n    keywords = {}\n    groups = (Options._option_groups if (group is None) else [group])\n    backend = (backend if backend else Store.current_backend)\n    for group in groups:\n        optsobj = Store.lookup_options(backend, self._obj, group)\n        keywords = dict(keywords, **optsobj.kwargs)\n    return Options(**keywords)", "docstring": "Returns the corresponding Options object.\n\nArgs:\ngroup: The options group. Flattens across groups if None.\nbackend: Current backend if None otherwise chosen backend.\n\nReturns:\nOptions object associated with the object containing the\napplied option keywords.", "source": "codesearchnet"}
{"code": "def line_id(self, lat):\n        \n        if self.grid == 'WAC':\n            line = np.rint(1.0 + self.LINE_PROJECTION_OFFSET -\n                           self.A_AXIS_RADIUS * np.pi * lat / (self.MAP_SCALE * 1e-3 * 180))\n        else:\n            line = np.rint(float(self.LINE_PROJECTION_OFFSET) - float(self.MAP_RESOLUTION)\n                           * (lat - float(self.CENTER_LATITUDE))) + 1\n        return self._control_line(line)", "docstring": "Return the corresponding line\n\nArgs:\nlat (int): latitude in degree\n\nReturns:\nCorreponding line", "source": "juraj-google-style"}
{"code": "def pose_inv(pose):\n    pose_inv = np.zeros((4, 4))\n    pose_inv[(:3, :3)] = pose[(:3, :3)].T\n    pose_inv[(:3, 3)] = (- pose_inv[(:3, :3)].dot(pose[(:3, 3)]))\n    pose_inv[(3, 3)] = 1.0\n    return pose_inv", "docstring": "Computes the inverse of a homogenous matrix corresponding to the pose of some\nframe B in frame A. The inverse is the pose of frame A in frame B.\n\nArgs:\npose: numpy array of shape (4,4) for the pose to inverse\n\nReturns:\nnumpy array of shape (4,4) for the inverse pose", "source": "codesearchnet"}
{"code": "def create_position_ids_from_inputs_embeds(self, inputs_embeds):\n    input_shape = inputs_embeds.size()[:-1]\n    sequence_length = input_shape[1]\n    position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device)\n    return position_ids.unsqueeze(0).expand(input_shape)", "docstring": "We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.\n\nArgs:\ninputs_embeds: torch.Tensor\n\nReturns: torch.Tensor", "source": "github-repos"}
{"code": "def CopyFromStringTuple(self, time_elements_tuple):\n    \n    if len(time_elements_tuple) < 7:\n      raise ValueError((\n          'Invalid time elements tuple at least 7 elements required,'\n          'got: {0:d}').format(len(time_elements_tuple)))\n\n    super(TimeElementsWithFractionOfSecond, self).CopyFromStringTuple(\n        time_elements_tuple)\n\n    try:\n      fraction_of_second = decimal.Decimal(time_elements_tuple[6])\n    except (TypeError, ValueError):\n      raise ValueError('Invalid fraction of second value: {0!s}'.format(\n          time_elements_tuple[6]))\n\n    if fraction_of_second < 0.0 or fraction_of_second >= 1.0:\n      raise ValueError('Fraction of second value: {0:f} out of bounds.'.format(\n          fraction_of_second))\n\n    self.fraction_of_second = fraction_of_second", "docstring": "Copies time elements from string-based time elements tuple.\n\nArgs:\ntime_elements_tuple (Optional[tuple[str, str, str, str, str, str, str]]):\ntime elements, contains year, month, day of month, hours, minutes,\nseconds and fraction of seconds.\n\nRaises:\nValueError: if the time elements tuple is invalid.", "source": "juraj-google-style"}
{"code": "def process_sequence(sequence, rules=None, skip_non_vietnamese=True):\n    result = ''\n    raw = result\n    result_parts = []\n    if (rules is None):\n        rules = get_telex_definition()\n    accepted_chars = _accepted_chars(rules)\n    for key in sequence:\n        if (key not in accepted_chars):\n            result_parts.append(result)\n            result_parts.append(key)\n            result = ''\n            raw = ''\n        else:\n            (result, raw) = process_key(string=result, key=key, fallback_sequence=raw, rules=rules, skip_non_vietnamese=skip_non_vietnamese)\n    result_parts.append(result)\n    return ''.join(result_parts)", "docstring": "\\\nConvert a key sequence into a Vietnamese string with diacritical marks.\n\nArgs:\nrules (optional): see docstring for process_key().\nskip_non_vietnamese (optional): see docstring for process_key().\n\nIt even supports continous key sequences connected by separators.\ni.e. process_sequence('con meof.ddieen') should work.", "source": "codesearchnet"}
{"code": "def add_mixin(self, mixin):\n        \n        raw = mixin.tokens[0][0].raw()\n        if raw in self._mixins:\n            self._mixins[raw].append(mixin)\n        else:\n            self._mixins[raw] = [mixin]", "docstring": "Add mixin to scope\nArgs:\nmixin (Mixin): Mixin object", "source": "juraj-google-style"}
{"code": "def describe(self, **kwargs):\n    description = {'label': self.label, 'details': inspect.cleandoc(self.details), 'type': ('list of {}'.format(self.type) if self.many else self.type), 'spec': self.spec, 'read_only': self.read_only, 'write_only': self.write_only, 'allow_null': self.allow_null}\n    description.update(kwargs)\n    return description", "docstring": "Describe this field instance for purpose of self-documentation.\n\nArgs:\nkwargs (dict): dictionary of additional description items for\nextending default description\n\nReturns:\ndict: dictionary of description items\n\nSuggested way for overriding description fields or extending it with\nadditional items is calling super class method with new/overriden\nfields passed as keyword arguments like following:\n\n.. code-block:: python\n\nclass DummyField(BaseField):\ndef description(self, **kwargs):\nsuper().describe(is_dummy=True, **kwargs)", "source": "codesearchnet"}
{"code": "def create_streaming_endpoint(access_token, name, description='New Streaming Endpoint', scale_units='1'):\n    path = '/StreamingEndpoints'\n    endpoint = ''.join([ams_rest_endpoint, path])\n    body = (((((('{ \\t\\t\"Id\":null, \\t\\t\"Name\":\"' + name) + '\", \\t\\t\"Description\":\"') + description) + '\", \\t\\t\"Created\":\"0001-01-01T00:00:00\", \\t\\t\"LastModified\":\"0001-01-01T00:00:00\", \\t\\t\"State\":null, \\t\\t\"HostName\":null, \\t\\t\"ScaleUnits\":\"') + scale_units) + '\", \\t\\t\"CrossSiteAccessPolicies\":{ \\t\\t\\t\"ClientAccessPolicy\":\"<access-policy><cross-domain-access><policy><allow-from http-request-headers=\\\\\"*\\\\\"><domain uri=\\\\\"http:\n    return do_ams_post(endpoint, path, body, access_token)", "docstring": "Create Media Service Streaming Endpoint.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nname (str): A Media Service Streaming Endpoint Name.\ndescription (str): A Media Service Streaming Endpoint Description.\nscale_units (str): A Media Service Scale Units Number.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def NewRow(self, value=''):\n    newrow = self.row_class()\n    newrow.row = (self.size + 1)\n    newrow.table = self\n    headers = self._Header()\n    for header in headers:\n        newrow[header] = value\n    return newrow", "docstring": "Fetches a new, empty row, with headers populated.\n\nArgs:\nvalue: Initial value to set each row entry to.\n\nReturns:\nA Row() object.", "source": "codesearchnet"}
{"code": "def recipe_video(config, auth_read, sheet, tab, project, dataset, table):\n    sheets(config, {'__comment__': 'Copy the tamplate sheet to the users sheet.  If it already exists, nothing happens.', 'auth': auth_read, 'template': {'sheet': 'https:\n    video(config, {'__comment__': 'Read video effects and values from sheet and/or bigquery.', 'auth': auth_read, 'sheets': {'sheet': sheet, 'tab': tab}, 'bigquery': {'project': project, 'dataset': dataset, 'table': table}})", "docstring": "Add images, text, and audio to videos.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nsheet (string) - Name or URL of sheet.\ntab (string) - Name of sheet tab.\nproject (string) - Google Cloud Project Identifier.\ndataset (string) - Name of dataset.\ntable (string) - Name of table.", "source": "github-repos"}
{"code": "def encrypt(self, mesg):\n    seqn = next(self._tx_sn)\n    rv = self._tx_tinh.enc(s_msgpack.en((seqn, mesg)))\n    return rv", "docstring": "Wrap a message with a sequence number and encrypt it.\n\nArgs:\nmesg: The mesg to encrypt.\n\nReturns:\nbytes: The encrypted message.", "source": "codesearchnet"}
{"code": "def unpack(self, buff=None, offset=0):\n        \n        band_type = UBInt16(enum_ref=MeterBandType)\n        band_type.unpack(buff, offset)\n        self.__class__ = MeterBandType(band_type.value).find_class()\n\n        length = UBInt16()\n        length.unpack(buff, offset=offset+2)\n\n        super().unpack(buff[:offset+length.value], offset)", "docstring": "Unpack *buff* into this object.\n\nThis method will convert a binary data into a readable value according\nto the attribute format.\n\nArgs:\nbuff (bytes): Binary buffer.\noffset (int): Where to begin unpacking.\n\nRaises:\n:exc:`~.exceptions.UnpackException`: If unpack fails.", "source": "juraj-google-style"}
{"code": "def list2str(self, l: List, joiner: str) -> str:\n    result = str()\n    for item in l:\n        if isinstance(item, list):\n            result = ((result + self.list2str(item, joiner)) + joiner)\n        elif isinstance(item, dict):\n            result = ((result + self.dict2str(item, joiner)) + joiner)\n        elif item:\n            result = ((result + str(item)) + joiner)\n    return result", "docstring": "Convert list to str as input for tokenizer\n\nArgs:\nl (list): list for converting\njoiner (str): join the elements using this string to separate them.\n\nReturns: the value of the list as a string", "source": "codesearchnet"}
{"code": "def ndtri(p, name=\"ndtri\"):\n  \n\n  with tf.name_scope(name):\n    p = tf.convert_to_tensor(value=p, name=\"p\")\n    if dtype_util.as_numpy_dtype(p.dtype) not in [np.float32, np.float64]:\n      raise TypeError(\n          \"p.dtype=%s is not handled, see docstring for supported types.\"\n          % p.dtype)\n    return _ndtri(p)", "docstring": "The inverse of the CDF of the Normal distribution function.\n\nReturns x such that the area under the pdf from minus infinity to x is equal\nto p.\n\nA piece-wise rational approximation is done for the function.\nThis is a port of the implementation in netlib.\n\nArgs:\np: `Tensor` of type `float32`, `float64`.\nname: Python string. A name for the operation (default=\"ndtri\").\n\nReturns:\nx: `Tensor` with `dtype=p.dtype`.\n\nRaises:\nTypeError: if `p` is not floating-type.", "source": "juraj-google-style"}
{"code": "def __update_cleanup_paths(new_path):\n    cleanup_dirs = settings.CFG['cleanup_paths'].value\n    cleanup_dirs = set(cleanup_dirs)\n    cleanup_dirs.add(new_path)\n    cleanup_dirs = list(cleanup_dirs)\n    settings.CFG['cleanup_paths'] = cleanup_dirs", "docstring": "Add the new path to the list of paths to clean up afterwards.\n\nArgs:\nnew_path: Path to the directory that need to be cleaned up.", "source": "codesearchnet"}
{"code": "def pretty_print_fhir_to_json_string_for_analytics(fhir_proto: message.Message, *, indent_size: int=2) -> str:\n    printer = _json_printer.JsonPrinter.pretty_printer_for_analytics(_PRIMITIVE_HANDLER, indent_size=indent_size)\n    return printer.print(fhir_proto)", "docstring": "Returns an Analytic FHIR JSON representation with spaces and newlines.\n\nArgs:\nfhir_proto: The proto to serialize into a \"pretty\" JSON string.\nindent_size: An integer denoting the size of space indentation for lexical\nscoping. Defaults to 2.\n\nReturns:\nAn Analytic FHIR JSON representation with spaces and newlines.", "source": "github-repos"}
{"code": "def split_sequence_columns_v2(feature_columns):\n    sequence_columns = []\n    non_sequence_columns = []\n    for column in feature_columns:\n        if not isinstance(column, (_TPUEmbeddingColumnV2, _TPUSharedEmbeddingColumnV2)):\n            raise TypeError(f'column must be a _TPUEmbeddingColumnV2 or _TPUSharedEmbeddingColumnV2 but got {type(column)} instead.')\n        if column.is_sequence_column():\n            sequence_columns.append(column)\n        else:\n            non_sequence_columns.append(column)\n    return (sequence_columns, non_sequence_columns)", "docstring": "Split a list of _TPUEmbeddingColumn into sequence and non-sequence columns.\n\nFor use in a TPUEstimator model_fn function. E.g.\n\ndef model_fn(features):\nsequence_columns, feature_columns = (\ntf.tpu.feature_column.split_sequence_columns(feature_columns))\ninput = tf.feature_column.input_layer(\nfeatures=features, feature_columns=feature_columns)\nsequence_features, sequence_lengths = (\ntf.contrib.feature_column.sequence_input_layer(\nfeatures=features, feature_columns=sequence_columns))\n\nArgs:\nfeature_columns: A list of _TPUEmbeddingColumns to split.\n\nReturns:\nTwo lists of _TPUEmbeddingColumns, the first is the sequence columns and the\nsecond is the non-sequence columns.", "source": "github-repos"}
{"code": "async def reclaim_task(context, task):\n    while True:\n        log.debug(('waiting %s seconds before reclaiming...' % context.config['reclaim_interval']))\n        (await asyncio.sleep(context.config['reclaim_interval']))\n        if (task != context.task):\n            return\n        log.debug('Reclaiming task...')\n        try:\n            context.reclaim_task = (await context.temp_queue.reclaimTask(get_task_id(context.claim_task), get_run_id(context.claim_task)))\n            clean_response = deepcopy(context.reclaim_task)\n            clean_response['credentials'] = '{********}'\n            log.debug('Reclaim task response:\\n{}'.format(pprint.pformat(clean_response)))\n        except taskcluster.exceptions.TaskclusterRestFailure as exc:\n            if (exc.status_code == 409):\n                log.debug('409: not reclaiming task.')\n                if (context.proc and (task == context.task)):\n                    message = 'Killing task after receiving 409 status in reclaim_task'\n                    log.warning(message)\n                    (await context.proc.stop())\n                    raise ScriptWorkerTaskException(message, exit_code=context.config['invalid_reclaim_status'])\n                break\n            else:\n                raise", "docstring": "Try to reclaim a task from the queue.\n\nThis is a keepalive / heartbeat.  Without it the job will expire and\npotentially be re-queued.  Since this is run async from the task, the\ntask may complete before we run, in which case we'll get a 409 the next\ntime we reclaim.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context\n\nRaises:\ntaskcluster.exceptions.TaskclusterRestFailure: on non-409 status_code\nfrom taskcluster.aio.Queue.reclaimTask()", "source": "codesearchnet"}
{"code": "def __init__(self, cell):\n    self._cell = cell", "docstring": "Creates a new StringGaugeCell.\n\nArgs:\ncell: A c pointer of TFE_MonitoringStringGaugeCell.", "source": "github-repos"}
{"code": "def checkUser(self, user):\n        \n        return not self.conn(\"POST\", \"{0}/GetCredentialType.srf\".format(SkypeConnection.API_MSACC),\n                             json={\"username\": user}).json().get(\"IfExistsResult\")", "docstring": "Query a username or email address to see if a corresponding Microsoft account exists.\n\nArgs:\nuser (str): username or email address of an account\n\nReturns:\nbool: whether the account exists", "source": "juraj-google-style"}
{"code": "def head(self, n=10):\n    r = self.__repr__().split('\\n')\n    print('\\n'.join(r[:n]), end=' ')", "docstring": "Display the top of the file.\n\nArgs:\nn (int): Number of lines to display", "source": "codesearchnet"}
{"code": "def mesh_split(tensor, device_mesh, tensor_split_dims_mapping, use_sharding_op=False, manual_mesh_dims=None, unspecified_dims=None):\n    sharding = mesh_split_sharding(device_mesh, tensor_split_dims_mapping, manual_mesh_dims)\n    return sharding.apply_to_tensor(tensor, use_sharding_op=use_sharding_op, unspecified_dims=unspecified_dims or [])", "docstring": "Returns a tensor that is split along multiple dimensions in a device mesh.\n\nArgs:\ntensor: A tf.Tensor to split.\ndevice_mesh: An np.ndarray describing the topology of the device mesh and\neach element is the ID of the device in the topology.\ntensor_split_dims_mapping: A list of integers that map each tensor axis to\nthe device mesh axis along which it is sharded. Its length is the tensor\nrank, and tensor_split_dims_mapping[i] is device mesh axis for tensor\ndimension i. Use -1 for tensor dimensions that are not sharded.\nuse_sharding_op: If true, adds a sharding op to set the sharding.\nmanual_mesh_dims: An optional list of mesh dims for manual subgroups.\nunspecified_dims: An optional list of dimensions unspecified.\n\nRaises:\nValueError: The number of tensor split dimensions is larger than device mesh\nrank.", "source": "github-repos"}
{"code": "def categorical(logits, num_samples, dtype=None, seed=None, name=None):\n    with ops.name_scope(name, 'categorical', [logits]):\n        return multinomial_categorical_impl(logits, num_samples, dtype, seed)", "docstring": "Draws samples from a categorical distribution.\n\nExample:\n\n```python\n# samples has shape [1, 5], where each value is either 0 or 1 with equal\n# probability.\nsamples = tf.random.categorical(tf.math.log([[0.5, 0.5]]), 5)\n```\n\nArgs:\nlogits: 2-D Tensor with shape `[batch_size, num_classes]`.  Each slice\n`[i, :]` represents the unnormalized log-probabilities for all classes.\nnum_samples: 0-D.  Number of independent samples to draw for each row slice.\ndtype: The integer type of the output: `int32` or `int64`. Defaults to\n`int64`.\nseed: A Python integer. Used to create a random seed for the distribution.\nSee `tf.random.set_seed` for behavior.\nname: Optional name for the operation.\n\nReturns:\nThe drawn samples of shape `[batch_size, num_samples]`.", "source": "github-repos"}
{"code": "def resize(self, image: np.ndarray, size: Dict[str, int], size_divisor: int=0, resample: PILImageResampling=PILImageResampling.BILINEAR, data_format=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n    max_size = kwargs.pop('max_size', None)\n    size = get_size_dict(size, max_size=max_size, default_to_square=False)\n    if 'shortest_edge' in size and 'longest_edge' in size:\n        size, max_size = (size['shortest_edge'], size['longest_edge'])\n    elif 'height' in size and 'width' in size:\n        size = (size['height'], size['width'])\n        max_size = None\n    else:\n        raise ValueError(f\"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got {size.keys()}.\")\n    size = get_maskformer_resize_output_image_size(image=image, size=size, max_size=max_size, size_divisor=size_divisor, default_to_square=False, input_data_format=input_data_format)\n    image = resize(image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)\n    return image", "docstring": "Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an\nint, smaller edge of the image will be matched to this number.\n\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nsize (`Dict[str, int]`):\nThe size of the output image.\nsize_divisor (`int`, *optional*, defaults to 0):\nIf `size_divisor` is given, the output image size will be divisible by the number.\nresample (`PILImageResampling` resampling filter, *optional*, defaults to `PILImageResampling.BILINEAR`):\nResampling filter to use when resizing the image.\ndata_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format for the output image. If unset, the channel dimension format of the input\nimage is used.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def AddVSSProcessingOptions(self, argument_group):\n    \n    argument_group.add_argument(\n        '--no_vss', '--no-vss', dest='no_vss', action='store_true',\n        default=False, help=(\n            'Do not scan for Volume Shadow Snapshots (VSS). This means that '\n            'Volume Shadow Snapshots (VSS) are not processed.'))\n\n    argument_group.add_argument(\n        '--vss_only', '--vss-only', dest='vss_only', action='store_true',\n        default=False, help=(\n            'Do not process the current volume if Volume Shadow Snapshots '\n            '(VSS) have been selected.'))\n\n    argument_group.add_argument(\n        '--vss_stores', '--vss-stores', dest='vss_stores', action='store',\n        type=str, default=None, help=(\n            'Define Volume Shadow Snapshots (VSS) (or stores that need to be '\n            'processed. A range of stores can be defined as: \"3..5\". '\n            'Multiple stores can be defined as: \"1,3,5\" (a list of comma '\n            'separated values). Ranges and lists can also be combined as: '\n            '\"1,3..5\". The first store is 1. All stores can be defined as: '\n            '\"all\".'))", "docstring": "Adds the VSS processing options to the argument group.\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "juraj-google-style"}
{"code": "def imrescale(img, scale, return_scale=False, interpolation='bilinear'):\n    (h, w) = img.shape[:2]\n    if isinstance(scale, (float, int)):\n        if (scale <= 0):\n            raise ValueError('Invalid scale {}, must be positive.'.format(scale))\n        scale_factor = scale\n    elif isinstance(scale, tuple):\n        max_long_edge = max(scale)\n        max_short_edge = min(scale)\n        scale_factor = min((max_long_edge / max(h, w)), (max_short_edge / min(h, w)))\n    else:\n        raise TypeError('Scale must be a number or tuple of int, but got {}'.format(type(scale)))\n    new_size = _scale_size((w, h), scale_factor)\n    rescaled_img = imresize(img, new_size, interpolation=interpolation)\n    if return_scale:\n        return (rescaled_img, scale_factor)\n    else:\n        return rescaled_img", "docstring": "Resize image while keeping the aspect ratio.\n\nArgs:\nimg (ndarray): The input image.\nscale (float or tuple[int]): The scaling factor or maximum size.\nIf it is a float number, then the image will be rescaled by this\nfactor, else if it is a tuple of 2 integers, then the image will\nbe rescaled as large as possible within the scale.\nreturn_scale (bool): Whether to return the scaling factor besides the\nrescaled image.\ninterpolation (str): Same as :func:`resize`.\n\nReturns:\nndarray: The rescaled image.", "source": "codesearchnet"}
{"code": "def setData(self, index, value, role=DTYPE_CHANGE_ROLE):\n    if ((role != DTYPE_CHANGE_ROLE) or (not index.isValid())):\n        return False\n    if (not self.editable()):\n        return False\n    self.layoutAboutToBeChanged.emit()\n    dtype = SupportedDtypes.dtype(value)\n    currentDtype = np.dtype(index.data(role=DTYPE_ROLE))\n    if (dtype is not None):\n        if (dtype != currentDtype):\n            columnName = self._dataFrame.columns[index.row()]\n            try:\n                if (dtype == np.dtype('<M8[ns]')):\n                    if (currentDtype in SupportedDtypes.boolTypes()):\n                        raise Exception(\"Can't convert a boolean value into a datetime value.\")\n                    self._dataFrame[columnName] = self._dataFrame[columnName].apply(pandas.to_datetime)\n                else:\n                    self._dataFrame[columnName] = self._dataFrame[columnName].astype(dtype)\n                self.dtypeChanged.emit(index.row(), dtype)\n                self.layoutChanged.emit()\n                return True\n            except Exception:\n                message = ('Could not change datatype %s of column %s to datatype %s' % (currentDtype, columnName, dtype))\n                self.changeFailed.emit(message, index, dtype)\n                raise\n    return False", "docstring": "Updates the datatype of a column.\n\nThe model must be initated with a dataframe already, since valid\nindexes are necessary. The `value` is a translated description of the\ndata type. The translations can be found at\n`qtpandas.translation.DTypeTranslator`.\n\nIf a datatype can not be converted, e.g. datetime to integer, a\n`NotImplementedError` will be raised.\n\nArgs:\nindex (QtCore.QModelIndex): The index of the column to be changed.\nvalue (str): The description of the new datatype, e.g.\n`positive kleine ganze Zahl (16 Bit)`.\nrole (Qt.ItemDataRole, optional): The role, which accesses and\nchanges data. Defaults to `DTYPE_CHANGE_ROLE`.\n\nRaises:\nNotImplementedError: If an error during conversion occured.\n\nReturns:\nbool: `True` if the datatype could be changed, `False` if not or if\nthe new datatype equals the old one.", "source": "codesearchnet"}
{"code": "def drug_matches_criteria(drug: Drug, **criteria: Dict[str, bool]) -> bool:\n    \n    for attribute, value in criteria.items():\n        if getattr(drug, attribute) != value:\n            return False\n    return True", "docstring": "Determines whether a drug, passed as an instance of :class:`.Drug`, matches\nthe specified criteria.\n\nArgs:\ndrug: a :class:`.Drug` instance\ncriteria: ``name=value`` pairs to match against the attributes of\nthe :class:`Drug` class. For example, you can include keyword\narguments like ``antidepressant=True``.", "source": "juraj-google-style"}
{"code": "def verify_cot_signatures(chain):\n    \n    for link in chain.links:\n        unsigned_path = link.get_artifact_full_path('public/chain-of-trust.json')\n        ed25519_signature_path = link.get_artifact_full_path('public/chain-of-trust.json.sig')\n        verify_link_ed25519_cot_signature(chain, link, unsigned_path, ed25519_signature_path)", "docstring": "Verify the signatures of the chain of trust artifacts populated in ``download_cot``.\n\nPopulate each link.cot with the chain of trust json body.\n\nArgs:\nchain (ChainOfTrust): the chain of trust to add to.\n\nRaises:\nCoTError: on failure.", "source": "juraj-google-style"}
{"code": "def dvds_upcoming(self, **kwargs):\n        \n        path = self._get_path('dvds_upcoming')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Gets the upcoming movies from the API.\n\nArgs:\npage_limit (optional): number of movies to show per page, default=16\npage (optional): results page number, default=1\ncountry (optional): localized data for selected country, default=\"us\"\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def post_attention(self, token, x):\n    \n    with tf.control_dependencies([\n        self.previous_segment.assign(token[0]),\n        self.previous_vals.assign(token[1]),\n        self.previous_bias.assign(token[2]),\n        ]):\n      return tf.identity(x)", "docstring": "Called after self-attention. The memory can be updated here.\n\nArgs:\ntoken: Data returned by pre_attention, which can be used to carry over\nstate related to the current memory operation.\nx: a Tensor of data after self-attention and feed-forward\nReturns:\na (possibly modified) version of the input x", "source": "juraj-google-style"}
{"code": "def swo_set_host_buffer_size(self, buf_size):\n        \n        buf = ctypes.c_uint32(buf_size)\n        res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.SET_BUFFERSIZE_HOST,\n                                             ctypes.byref(buf))\n        if res < 0:\n            raise errors.JLinkException(res)\n\n        return None", "docstring": "Sets the size of the buffer used by the host to collect SWO data.\n\nArgs:\nself (JLink): the ``JLink`` instance\nbuf_size (int): the new size of the host buffer\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error", "source": "juraj-google-style"}
{"code": "def register(self, command: str, handler: Any):\n    if (not command.startswith('/')):\n        command = f'/{command}'\n    LOG.info('Registering %s to %s', command, handler)\n    self._routes[command].append(handler)", "docstring": "Register a new handler for a specific slash command\n\nArgs:\ncommand: Slash command\nhandler: Callback", "source": "codesearchnet"}
{"code": "def _ProcessMetadataFile(self, mediator, file_entry):\n    \n    self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING\n\n    self._event_extractor.ParseFileEntryMetadata(mediator, file_entry)\n    for data_stream in file_entry.data_streams:\n      if self._abort:\n        break\n      self.last_activity_timestamp = time.time()\n\n      self._event_extractor.ParseMetadataFile(\n          mediator, file_entry, data_stream.name)", "docstring": "Processes a metadata file.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\nfile_entry (dfvfs.FileEntry): file entry of the metadata file.", "source": "juraj-google-style"}
{"code": "def render_root_node_with_subs(root_node, subs):\n    \n    def rec(node, acc):\n        if isinstance(node, e_nodes.EndOfStreamNode):\n            pass  \n        elif isinstance(node, e_nodes.OpenStartElementNode):\n            acc.append(\"<\")\n            acc.append(node.tag_name())\n            for child in node.children():\n                if isinstance(child, e_nodes.AttributeNode):\n                    acc.append(\" \")\n                    acc.append(validate_name(child.attribute_name().string()))\n                    acc.append(\"=\\\"\")\n                    \n                    \n                    rec(child.attribute_value(), acc)\n                    acc.append(\"\\\"\")\n            acc.append(\">\")\n            for child in node.children():\n                rec(child, acc)\n            acc.append(\"</\")\n            acc.append(validate_name(node.tag_name()))\n            acc.append(\">\\n\")\n        elif isinstance(node, e_nodes.CloseStartElementNode):\n            pass  \n        elif isinstance(node, e_nodes.CloseEmptyElementNode):\n            pass  \n        elif isinstance(node, e_nodes.CloseElementNode):\n            pass  \n        elif isinstance(node, e_nodes.ValueNode):\n            acc.append(escape_value(node.children()[0].string()))\n        elif isinstance(node, e_nodes.AttributeNode):\n            pass  \n        elif isinstance(node, e_nodes.CDataSectionNode):\n            acc.append(\"<![CDATA[\")\n            \n            acc.append(escape_value(node.cdata()))\n            acc.append(\"]]>\")\n        elif isinstance(node, e_nodes.EntityReferenceNode):\n            acc.append(escape_value(node.entity_reference()))\n        elif isinstance(node, e_nodes.ProcessingInstructionTargetNode):\n            acc.append(escape_value(node.processing_instruction_target()))\n        elif isinstance(node, e_nodes.ProcessingInstructionDataNode):\n            acc.append(escape_value(node.string()))\n        elif isinstance(node, e_nodes.TemplateInstanceNode):\n            raise UnexpectedElementException(\"TemplateInstanceNode\")\n        elif isinstance(node, e_nodes.NormalSubstitutionNode):\n            sub = subs[node.index()]\n\n            if isinstance(sub, e_nodes.BXmlTypeNode):\n                sub = render_root_node(sub.root())\n            else:\n                sub = escape_value(sub.string())\n\n            acc.append(sub)\n        elif isinstance(node, e_nodes.ConditionalSubstitutionNode):\n            sub = subs[node.index()]\n\n            if isinstance(sub, e_nodes.BXmlTypeNode):\n                sub = render_root_node(sub.root())\n            else:\n                sub = escape_value(sub.string())\n\n            acc.append(sub)\n        elif isinstance(node, e_nodes.StreamStartNode):\n            pass  \n\n    acc = []\n    for c in root_node.template().children():\n        rec(c, acc)\n    return \"\".join(acc)", "docstring": "render the given root node using the given substitutions into XML.\n\nArgs:\nroot_node (e_nodes.RootNode): the node to render.\nsubs (list[str]): the substitutions that maybe included in the XML.\n\nReturns:\nstr: the rendered XML document.", "source": "juraj-google-style"}
{"code": "def abs(cls, x: 'TensorFluent') -> 'TensorFluent':\n    return cls._unary_op(x, tf.abs, tf.float32)", "docstring": "Returns a TensorFluent for the abs function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the abs function.", "source": "codesearchnet"}
{"code": "def APFSUnlockVolume(fsapfs_volume, path_spec, key_chain):\n  \n  is_locked = fsapfs_volume.is_locked()\n  if is_locked:\n    password = key_chain.GetCredential(path_spec, 'password')\n    if password:\n      fsapfs_volume.set_password(password)\n\n    recovery_password = key_chain.GetCredential(path_spec, 'recovery_password')\n    if recovery_password:\n      fsapfs_volume.set_recovery_password(recovery_password)\n\n    is_locked = not fsapfs_volume.unlock()\n\n  return not is_locked", "docstring": "Unlocks an APFS volume using the path specification.\n\nArgs:\nfsapfs_volume (pyapfs.volume): APFS volume.\npath_spec (PathSpec): path specification.\nkey_chain (KeyChain): key chain.\n\nReturns:\nbool: True if the volume is unlocked, False otherwise.", "source": "juraj-google-style"}
{"code": "def download(self, streamed=False, action=None, chunk_size=1024, **kwargs):\n    path = ('/projects/%s/export/download' % self.project_id)\n    result = self.manager.gitlab.http_get(path, streamed=streamed, raw=True, **kwargs)\n    return utils.response_content(result, streamed, action, chunk_size)", "docstring": "Download the archive of a project export.\n\nArgs:\nstreamed (bool): If True the data will be processed by chunks of\n`chunk_size` and each chunk is passed to `action` for\nreatment\naction (callable): Callable responsible of dealing with chunk of\ndata\nchunk_size (int): Size of each chunk\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the server failed to perform the request\n\nReturns:\nstr: The blob content if streamed is False, None otherwise", "source": "codesearchnet"}
{"code": "def committed(self, partition):\n    assert (self.config['api_version'] >= (0, 8, 1)), 'Requires >= Kafka 0.8.1'\n    assert (self.config['group_id'] is not None), 'Requires group_id'\n    if (not isinstance(partition, TopicPartition)):\n        raise TypeError('partition must be a TopicPartition namedtuple')\n    if self._subscription.is_assigned(partition):\n        committed = self._subscription.assignment[partition].committed\n        if (committed is None):\n            self._coordinator.refresh_committed_offsets_if_needed()\n            committed = self._subscription.assignment[partition].committed\n    else:\n        commit_map = self._coordinator.fetch_committed_offsets([partition])\n        if (partition in commit_map):\n            committed = commit_map[partition].offset\n        else:\n            committed = None\n    return committed", "docstring": "Get the last committed offset for the given partition.\n\nThis offset will be used as the position for the consumer\nin the event of a failure.\n\nThis call may block to do a remote call if the partition in question\nisn't assigned to this consumer or if the consumer hasn't yet\ninitialized its cache of committed offsets.\n\nArguments:\npartition (TopicPartition): The partition to check.\n\nReturns:\nThe last committed offset, or None if there was no prior commit.", "source": "codesearchnet"}
{"code": "def consume_socket_output(frames, demux=False):\n    if (demux is False):\n        return six.binary_type().join(frames)\n    out = [None, None]\n    for frame in frames:\n        assert (frame != (None, None))\n        if (frame[0] is not None):\n            if (out[0] is None):\n                out[0] = frame[0]\n            else:\n                out[0] += frame[0]\n        elif (out[1] is None):\n            out[1] = frame[1]\n        else:\n            out[1] += frame[1]\n    return tuple(out)", "docstring": "Iterate through frames read from the socket and return the result.\n\nArgs:\n\ndemux (bool):\nIf False, stdout and stderr are multiplexed, and the result is the\nconcatenation of all the frames. If True, the streams are\ndemultiplexed, and the result is a 2-tuple where each item is the\nconcatenation of frames belonging to the same stream.", "source": "codesearchnet"}
{"code": "def hide_tool(self, context_name, tool_name):\n        \n        data = self._context(context_name)\n        hidden_tools = data[\"hidden_tools\"]\n        if tool_name not in hidden_tools:\n            self._validate_tool(context_name, tool_name)\n            hidden_tools.add(tool_name)\n            self._flush_tools()", "docstring": "Hide a tool so that it is not exposed in the suite.\n\nArgs:\ncontext_name (str): Context containing the tool.\ntool_name (str): Name of tool to hide.", "source": "juraj-google-style"}
{"code": "def peek_record(self, model_class, record_id):\n        \n        if self._cache:\n            return self._cache.get_record(model_class.__name__, record_id)\n        else:\n            return None", "docstring": "Return an instance of the model_class from the cache if it is present.\n\nArgs:\nmodel_class (:class:`cinder_data.model.CinderModel`): A subclass of\n:class:`cinder_data.model.CinderModel` of your chosen model.\nrecord_id (int): The id of the record requested.\n\nReturns:\n:class:`cinder_data.model.CinderModel`: An instance of model_class or None.", "source": "juraj-google-style"}
{"code": "def get_help_commands(server_prefix):\n    datapacks = []\n    _dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n    for module_name in os.listdir('{}/../'.format(_dir)):\n        if ((not module_name.startswith('_')) and (not module_name.startswith('!'))):\n            help_command = '`{}help {}`'.format(server_prefix, module_name)\n            datapacks.append((module_name, help_command, True))\n    return datapacks", "docstring": "Get the help commands for all modules\n\nArgs:\nserver_prefix: The server command prefix\n\nReturns:\ndatapacks (list): A list of datapacks for the help commands for all the modules", "source": "codesearchnet"}
{"code": "def _GetNextLogCountPerToken(token):\n    \n    global _log_counter_per_token  \n    _log_counter_per_token[token] = 1 + _log_counter_per_token.get(token, -1)\n    return _log_counter_per_token[token]", "docstring": "Wrapper for _log_counter_per_token.\n\nArgs:\ntoken: The token for which to look up the count.\n\nReturns:\nThe number of times this function has been called with\n*token* as an argument (starting at 0)", "source": "juraj-google-style"}
{"code": "def _prepare_grid(self, times, grid_step):\n    grid = tf.range(0.0, times[-1], grid_step, dtype=self._dtype)\n    all_times = tf.concat([grid, times], axis=0)\n    mask = tf.concat([tf.zeros_like(grid, dtype=tf.bool), tf.ones_like(times, dtype=tf.bool)], axis=0)\n    perm = tf.argsort(all_times, stable=True)\n    all_times = tf.gather(all_times, perm)\n    mask = tf.gather(mask, perm)\n    return (all_times, mask)", "docstring": "Prepares grid of times for path generation.\n\nArgs:\ntimes:  Rank 1 `Tensor` of increasing positive real values. The times at\nwhich the path points are to be evaluated.\ngrid_step: Rank 0 real `Tensor`. Maximal distance between points in\nresulting grid.\n\nReturns:\nTuple `(all_times, mask)`.\n`all_times` is 1-D real `Tensor` containing all points from 'times` and\nwhose intervals are at most `grid_step`.\n`mask` is a boolean 1-D tensor of the same shape as 'all_times', showing\nwhich elements of 'all_times' correspond to values from `times`.\nGuarantees that times[0]=0 and grid_step[0]=False.\n'all_times` is sorted ascending and may contain duplicates.", "source": "github-repos"}
{"code": "def update_score_summary(sender, **kwargs):\n    score = kwargs['instance']\n    try:\n        score_summary = ScoreSummary.objects.get(student_item=score.student_item)\n        score_summary.latest = score\n        if score.reset:\n            score_summary.highest = score\n        elif (score.to_float() > score_summary.highest.to_float()):\n            score_summary.highest = score\n        score_summary.save()\n    except ScoreSummary.DoesNotExist:\n        ScoreSummary.objects.create(student_item=score.student_item, highest=score, latest=score)\n    except DatabaseError as err:\n        logger.exception(u'Error while updating score summary for student item {}'.format(score.student_item))", "docstring": "Listen for new Scores and update the relevant ScoreSummary.\n\nArgs:\nsender: not used\n\nKwargs:\ninstance (Score): The score model whose save triggered this receiver.", "source": "codesearchnet"}
{"code": "def set_size(self, width, height):\n    if (width is not None):\n        try:\n            width = to_pix(int(width))\n        except ValueError:\n            pass\n        self.style['width'] = width\n    if (height is not None):\n        try:\n            height = to_pix(int(height))\n        except ValueError:\n            pass\n        self.style['height'] = height", "docstring": "Set the widget size.\n\nArgs:\nwidth (int or str): An optional width for the widget (es. width=10 or width='10px' or width='10%').\nheight (int or str): An optional height for the widget (es. height=10 or height='10px' or height='10%').", "source": "codesearchnet"}
{"code": "def __init__(self, params=None, connection_string=None):\n        \n\n        if params is None and connection_string is None:\n            raise RuntimeError(\"Please provide either 'params' or 'connection_string'\")\n\n        if params is not None and connection_string is not None:\n            raise RuntimeError(\"Please provide only on of 'params' or 'connection_string'\")\n\n        if params is not None:\n            \n            \n            connection_string_no_pw = self.get_connection_string(params=params, hide_password=True)\n            config.logger.info(\"Client connecting to: \" + connection_string_no_pw)\n\n            \n            connection_string = self.get_connection_string(params=params, hide_password=False)\n\n        else:\n            \n            config.logger.info(\"Client connecting to: \" + connection_string)\n\n        \n        self.engine = sa.create_engine(connection_string)\n\n        \n        if connection_string.startswith('sqlite:\n\n            def on_connect(conn, _):\n                conn.execute('pragma foreign_keys=ON')\n\n            from sqlalchemy import event\n            event.listen(self.engine, 'connect', on_connect)\n\n        \n        self.session_maker = orm.sessionmaker(bind=self.get_engine())", "docstring": "Instantiate a client object\n\nA client can be configured either from a parameters dictionary ``params`` or directly\nfrom an :mod:`sqlalchemy` connection string ``connection_string``. Exactly one of the two\nmust be provided.\n\nArgs:\nparams (dict): database configuration, as defined in :mod:`ozelot.config`\nconnection_string (str): :mod:`sqlalchemy` connection string", "source": "juraj-google-style"}
{"code": "def create(cls, tx_signers, recipients, metadata=None, asset=None):\n    (inputs, outputs) = cls.validate_create(tx_signers, recipients, asset, metadata)\n    return cls(cls.CREATE, {'data': asset}, inputs, outputs, metadata)", "docstring": "A simple way to generate a `CREATE` transaction.\n\nNote:\nThis method currently supports the following Cryptoconditions\nuse cases:\n- Ed25519\n- ThresholdSha256\n\nAdditionally, it provides support for the following BigchainDB\nuse cases:\n- Multiple inputs and outputs.\n\nArgs:\ntx_signers (:obj:`list` of :obj:`str`): A list of keys that\nrepresent the signers of the CREATE Transaction.\nrecipients (:obj:`list` of :obj:`tuple`): A list of\n([keys],amount) that represent the recipients of this\nTransaction.\nmetadata (dict): The metadata to be stored along with the\nTransaction.\nasset (dict): The metadata associated with the asset that will\nbe created in this Transaction.\n\nReturns:\n:class:`~bigchaindb.common.transaction.Transaction`", "source": "codesearchnet"}
{"code": "def list_tags():\n    codes = _AutoCodes()\n    grouped = set([(k, '/{0}'.format(k), codes[k], codes['/{0}'.format(k)]) for k in codes if (not k.startswith('/'))])\n    found = [c for r in grouped for c in r[:2]]\n    missing = set([(('', r[0], None, r[1]) if r[0].startswith('/') else (r[0], '', r[1], None)) for r in _AutoCodes().items() if (r[0] not in found)])\n    grouped |= missing\n    payload = sorted([i for i in grouped if (i[2] is None)], key=(lambda x: x[3]))\n    grouped -= set(payload)\n    payload.extend(sorted([i for i in grouped if (i[2] < 10)], key=(lambda x: x[2])))\n    grouped -= set(payload)\n    payload.extend(sorted([i for i in grouped if i[0].startswith('auto')], key=(lambda x: x[2])))\n    grouped -= set(payload)\n    payload.extend(sorted([i for i in grouped if (not i[0].startswith('hi'))], key=(lambda x: x[2])))\n    grouped -= set(payload)\n    payload.extend(sorted(grouped, key=(lambda x: x[2])))\n    return tuple(payload)", "docstring": "Lists the available tags.\n\nReturns:\nTuple of tuples. Child tuples are four items: ('opening tag', 'closing tag', main ansi value, closing ansi value).", "source": "codesearchnet"}
{"code": "def join_pretty_tensors(tensors, output, join_function=None, name='join'):\n    if (not tensors):\n        raise ValueError('pretty_tensors must be a non-empty sequence.')\n    with output.g.name_scope(name):\n        if (join_function is None):\n            last_dim = (len(tensors[0].shape) - 1)\n            return output.with_tensor(tf.concat(tensors, last_dim))\n        else:\n            return output.with_tensor(join_function(tensors))", "docstring": "Joins the list of pretty_tensors and sets head of output_pretty_tensor.\n\nArgs:\ntensors: A sequence of Layers or SequentialLayerBuilders to join.\noutput: A pretty_tensor to set the head with the result.\njoin_function: A function to join the tensors, defaults to concat on the\nlast dimension.\nname: A name that is used for the name_scope\nReturns:\nThe result of calling with_tensor on output\nRaises:\nValueError: if pretty_tensors is None or empty.", "source": "codesearchnet"}
{"code": "def setup(docker_mount=None, force=False):\n    \n\n    if not is_ubuntu() and not is_boot2docker():\n        raise Exception('Head In The Clouds Docker is only supported on Ubuntu')\n\n    \n    if os.path.exists('dot_dockercfg') and not fabric.contrib.files.exists('~/.dockercfg'):\n        put('dot_dockercfg', '~/.dockercfg')\n\n    if not fabric.contrib.files.exists('~/.ssh/id_rsa'):\n        fab.run('ssh-keygen -t rsa -N \"\" -f ~/.ssh/id_rsa')\n\n    if docker_is_installed() and not force:\n        return\n\n    for attempt in range(3):\n        sudo('wget -qO- https:\n        sudo('sh -c \"echo deb http:\n        with settings(warn_only=True):\n            sudo('apt-get update')\n            failed = sudo('apt-get install -y lxc-docker sshpass curl').failed\n            if not failed:\n                break\n\n    if docker_mount:\n        create_docker_mount(docker_mount)", "docstring": "Prepare a vanilla server by installing docker, curl, and sshpass. If a file called ``dot_dockercfg``\nexists in the current working directory, it is uploaded as ``~/.dockercfg``.\n\nArgs:\n* docker_mount=None: Partition that will be mounted as /var/lib/docker", "source": "juraj-google-style"}
{"code": "def label_durations(self, label_list_ids=None):\n        \n        duration = collections.defaultdict(int)\n\n        for utterance in self.utterances.values():\n            for label_value, utt_count in utterance.label_total_duration(label_list_ids=label_list_ids).items():\n                duration[label_value] += utt_count\n\n        return duration", "docstring": "Return a dictionary containing the total duration, every label-value in this corpus is occurring.\n\nArgs:\nlabel_list_ids (list): If not None, only labels from label-lists with an id contained in this list\nare considered.\n\nReturns:\ndict: A dictionary containing the total duration with the label-value as key.", "source": "juraj-google-style"}
{"code": "def ReadClientFullInfo(self, client_id):\n    \n    result = self.MultiReadClientFullInfo([client_id])\n    try:\n      return result[client_id]\n    except KeyError:\n      raise UnknownClientError(client_id)", "docstring": "Reads full client information for a single client.\n\nArgs:\nclient_id: A GRR client id string, e.g. \"C.ea3b2b71840d6fa7\".\n\nReturns:\nA `ClientFullInfo` instance for given client.\n\nRaises:\nUnknownClientError: if no client with such id was found.", "source": "juraj-google-style"}
{"code": "def convert_x_www_form_urlencoded_to_dict(post_data):\n    if isinstance(post_data, str):\n        converted_dict = {}\n        for k_v in post_data.split('&'):\n            try:\n                (key, value) = k_v.split('=')\n            except ValueError:\n                raise Exception('Invalid x_www_form_urlencoded data format: {}'.format(post_data))\n            converted_dict[key] = unquote(value)\n        return converted_dict\n    else:\n        return post_data", "docstring": "convert x_www_form_urlencoded data to dict\n\nArgs:\npost_data (str): a=1&b=2\n\nReturns:\ndict: {\"a\":1, \"b\":2}", "source": "codesearchnet"}
{"code": "def _read_config(filename):\n    parser = configparser.RawConfigParser()\n    if (filename and (not parser.read(filename))):\n        sys.stderr.write((\"Unable to open configuration file %s. Use --config='' to disable this warning.\\n\" % filename))\n    config = {}\n    for (section, defaults) in BASE_CONFIG.items():\n        if (section == 'patterns'):\n            continue\n        for (name, descr) in defaults.items():\n            (kind, default) = descr\n            if ((section in parser.sections()) and (name in parser.options(section))):\n                if (kind == 'int'):\n                    value = parser.getint(section, name)\n                elif (kind == 'float'):\n                    value = parser.getfloat(section, name)\n                elif (kind == 'bool'):\n                    value = parser.getboolean(section, name)\n                else:\n                    value = parser.get(section, name)\n            else:\n                value = default\n            config[name] = value\n    if ('patterns' in parser.sections()):\n        patterns = [parser.get('patterns', opt) for opt in parser.options('patterns')]\n    else:\n        patterns = DEFAULT_PATTERNS\n    config['patterns'] = patterns\n    return config", "docstring": "Read configuration from the given file.\n\nParsing is performed through the configparser library.\n\nReturns:\ndict: a flattened dict of (option_name, value), using defaults.", "source": "codesearchnet"}
{"code": "def remove_site(self):\n    params = dict(oxd_id=self.oxd_id)\n    logger.debug('Sending command `remove_site` with params %s', params)\n    response = self.msgr.request('remove_site', **params)\n    logger.debug('Received response: %s', response)\n    if (response['status'] == 'error'):\n        raise OxdServerError(response['data'])\n    return response['data']['oxd_id']", "docstring": "Cleans up the data for the site.\n\nReturns:\noxd_id if the process was completed without error\n\nRaises:\nOxdServerError if there was an issue with the operation", "source": "codesearchnet"}
{"code": "def plot_brillouin_zone_from_kpath(kpath, ax=None, **kwargs):\n    \n    lines = [[kpath.kpath['kpoints'][k] for k in p]\n             for p in kpath.kpath['path']]\n    return plot_brillouin_zone(bz_lattice=kpath.prim_rec, lines=lines, ax=ax,\n                               labels=kpath.kpath['kpoints'], **kwargs)", "docstring": "Gives the plot (as a matplotlib object) of the symmetry line path in\nthe Brillouin Zone.\n\nArgs:\nkpath (HighSymmKpath): a HighSymmKPath object\nax: matplotlib :class:`Axes` or None if a new figure should be created.\n**kwargs: provided by add_fig_kwargs decorator\n\nReturns:\nmatplotlib figure", "source": "juraj-google-style"}
{"code": "def get_model(servoid):\n    data = []\n    data.append(9)\n    data.append(servoid)\n    data.append(EEP_READ_REQ)\n    data.append(MODEL_NO1_EEP)\n    data.append(BYTE1)\n    send_data(data)\n    rxdata = []\n    try:\n        rxdata = SERPORT.read(12)\n        return (ord(rxdata[9]) & 255)\n    except:\n        raise HerkulexError('could not communicate with motors')", "docstring": "Get the servo model\n\nThis function gets the model of the herkules servo, provided its id\n\nArgs:\nservoid(int): the id of the servo\n\nReturns:\nint:  an integer corresponding to the model number\n0x06 for DRS-602\n0x04 for DRS-402\n0x02 for DRS-202", "source": "codesearchnet"}
{"code": "def Delete(self, request, global_params=None):\n    config = self.GetMethodConfig('Delete')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Deletes a `WorkerPool`.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsWorkerPoolsDeleteRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def delete_vmss(access_token, subscription_id, resource_group, vmss_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete a virtual machine scale set.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvmss_name (str): Name of the virtual machine scale set.\n\nReturns:\nHTTP response.", "source": "codesearchnet"}
{"code": "def contains(self, value, equality_comparer=operator.eq):\n    if self.closed():\n        raise ValueError('Attempt to call contains() on a closed Queryable.')\n    if (not is_callable(equality_comparer)):\n        raise TypeError('contains() parameter equality_comparer={0} is not callable'.format(repr(equality_comparer)))\n    if (equality_comparer is operator.eq):\n        return (value in self._iterable)\n    for item in self:\n        if equality_comparer(value, item):\n            return True\n    return False", "docstring": "Determines whether the sequence contains a particular value.\n\nExecution is immediate. Depending on the type of the sequence, all or\nnone of the sequence may be consumed by this operation.\n\nNote: This method uses immediate execution.\n\nArgs:\nvalue: The value to test for membership of the sequence\n\nReturns:\nTrue if value is in the sequence, otherwise False.\n\nRaises:\nValueError: If the Queryable has been closed.", "source": "codesearchnet"}
{"code": "def sg_summary_gradient(tensor, gradient, prefix=None, name=None):\n    prefix = ('' if (prefix is None) else (prefix + '/'))\n    name = ((prefix + _pretty_name(tensor)) if (name is None) else (prefix + name))\n    _scalar((name + '/grad'), tf.reduce_mean(tf.abs(gradient)))\n    _histogram((name + '/grad-h'), tf.abs(gradient))", "docstring": "r\"\"\"Register `tensor` to summary report as `gradient`\n\nArgs:\ntensor: A `Tensor` to log as gradient\ngradient: A 0-D `Tensor`. A gradient to log\nprefix: A `string`. A prefix to display in the tensor board web UI.\nname: A `string`. A name to display in the tensor board web UI.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def multi_label_train_test_split(y, test_size=0.2):\n    \n    if test_size <= 0 or test_size >= 1:\n        raise ValueError(\"`test_size` should be between 0 and 1\")\n\n    \n    frac = Fraction(test_size).limit_denominator()\n    test_folds, total_folds = frac.numerator, frac.denominator\n    logger.warn('Inferring test_size as {}/{}. Generating {} folds. The algorithm might fail if denominator is large.'\n                .format(test_folds, total_folds, total_folds))\n\n    folds = equal_distribution_folds(y, folds=total_folds)\n    test_indices = np.concatenate(folds[:test_folds])\n    train_indices = np.concatenate(folds[test_folds:])\n    return train_indices, test_indices", "docstring": "Creates a test split with roughly the same multi-label distribution in `y`.\n\nArgs:\ny: The multi-label outputs.\ntest_size: The test size in [0, 1]\n\nReturns:\nThe train and test indices.", "source": "juraj-google-style"}
{"code": "def set_icon_file(self, filename, rel='icon'):\n    (mimetype, encoding) = mimetypes.guess_type(filename)\n    self.add_child('favicon', ('<link rel=\"%s\" href=\"%s\" type=\"%s\" />' % (rel, filename, mimetype)))", "docstring": "Allows to define an icon for the App\n\nArgs:\nfilename (str): the resource file name (ie. \"/res:myicon.png\")\nrel (str): leave it unchanged (standard \"icon\")", "source": "codesearchnet"}
{"code": "def get_policy(observations, hparams, action_space):\n    if (not isinstance(action_space, gym.spaces.Discrete)):\n        raise ValueError('Expecting discrete action space.')\n    obs_shape = common_layers.shape_list(observations)\n    (frame_height, frame_width) = obs_shape[2:4]\n    if (hparams.policy_problem_name == 'dummy_policy_problem_ttt'):\n        tf.logging.info('Using DummyPolicyProblemTTT for the policy.')\n        policy_problem = tic_tac_toe_env.DummyPolicyProblemTTT()\n    else:\n        tf.logging.info('Using DummyPolicyProblem for the policy.')\n        policy_problem = DummyPolicyProblem(action_space, frame_height, frame_width)\n    trainer_lib.add_problem_hparams(hparams, policy_problem)\n    hparams.force_full_predict = True\n    model = registry.model(hparams.policy_network)(hparams, tf.estimator.ModeKeys.TRAIN)\n    try:\n        num_target_frames = hparams.video_num_target_frames\n    except AttributeError:\n        num_target_frames = 1\n    features = {'inputs': observations, 'input_action': tf.zeros((obs_shape[:2] + [1]), dtype=tf.int32), 'input_reward': tf.zeros((obs_shape[:2] + [1]), dtype=tf.int32), 'targets': tf.zeros(((obs_shape[:1] + [num_target_frames]) + obs_shape[2:])), 'target_action': tf.zeros((obs_shape[:1] + [num_target_frames, 1]), dtype=tf.int32), 'target_reward': tf.zeros((obs_shape[:1] + [num_target_frames, 1]), dtype=tf.int32), 'target_policy': tf.zeros(((obs_shape[:1] + [num_target_frames]) + [action_space.n])), 'target_value': tf.zeros((obs_shape[:1] + [num_target_frames]))}\n    with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):\n        t2t_model.create_dummy_vars()\n        (targets, _) = model(features)\n    return (targets['target_policy'][(:, 0, :)], targets['target_value'][(:, 0)])", "docstring": "Get a policy network.\n\nArgs:\nobservations: observations\nhparams: parameters\naction_space: action space\n\nReturns:\nTuple (action logits, value).", "source": "codesearchnet"}
{"code": "def __init__(self, api_login, api_key):\n        \n        self.login = api_login\n        self.key = api_key\n        self.api_url = self.api_base_url.format(api_version=self.api_version)", "docstring": "Initializes OpenLoad instance with given parameters and formats api base url.\n\nArgs:\napi_login (str): API Login found in openload.co\napi_key (str): API Key found in openload.co\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def match_any(patterns, name):\n    if (not patterns):\n        return True\n    return any((match(pattern, name) for pattern in patterns))", "docstring": "Test if a name matches any of a list of patterns.\n\nWill return `True` if ``patterns`` is an empty list.\n\nArguments:\npatterns (list): A list of wildcard pattern, e.g ``[\"*.py\",\n\"*.pyc\"]``\nname (str): A filename.\n\nReturns:\nbool: `True` if the name matches at least one of the patterns.", "source": "codesearchnet"}
{"code": "def cas(self, key, value, cas, expire=0, noreply=False):\n    return self._store_cmd(b'cas', {key: value}, expire, noreply, cas)[key]", "docstring": "The memcached \"cas\" command.\n\nArgs:\nkey: str, see class docs for details.\nvalue: str, see class docs for details.\ncas: int or str that only contains the characters '0'-'9'.\nexpire: optional int, number of seconds until the item is expired\nfrom the cache, or zero for no expiry (the default).\nnoreply: optional bool, False to wait for the reply (the default).\n\nReturns:\nIf noreply is True, always returns True. Otherwise returns None if\nthe key didn't exist, False if it existed but had a different cas\nvalue and True if it existed and was changed.", "source": "codesearchnet"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_buffer = utils.BytearrayStream()\n    if self._object_type:\n        self._object_type.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The Create request payload is missing the object type field.')\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        if self._template_attribute:\n            self._template_attribute.write(local_buffer, kmip_version=kmip_version)\n        else:\n            raise exceptions.InvalidField('The Create request payload is missing the template attribute field.')\n    elif self._template_attribute:\n        attributes = objects.convert_template_attribute_to_attributes(self._template_attribute)\n        attributes.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The Create request payload is missing the template attribute field.')\n    self.length = local_buffer.length()\n    super(CreateRequestPayload, self).write(output_buffer, kmip_version=kmip_version)\n    output_buffer.write(local_buffer.buffer)", "docstring": "Write the data encoding the Create request payload to a buffer.\n\nArgs:\noutput_buffer (stream): A data buffer in which to encode object\ndata, supporting a write method.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nInvalidField: Raised if the object type attribute or template\nattribute is not defined.", "source": "codesearchnet"}
{"code": "def __init__(self, name, description, *labels):\n    super(Counter, self).__init__('Counter', _counter_methods, len(labels), name, description, *labels)", "docstring": "Creates a new Counter.\n\nArgs:\nname: name of the new metric.\ndescription: description of the new metric.\n*labels: The label list of the new metric.", "source": "github-repos"}
{"code": "def _MaxPoolGradGrad(self, orig_input, orig_output, grad, window_rows, window_cols, row_stride, col_stride, padding):\n    return gen_nn_ops.max_pool_grad_grad(orig_input, orig_output, grad, [1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1], padding)", "docstring": "Max Pooling Second-Order Gradient.\n\nArgs:\norig_input: A float Tensor. The original input tensor.\norig_output: A float Tensor. The original output tensor.\ngrad: A float Tensor.\nThe 4D (batch x out_rows x out_cols x depth) output backprop.\nwindow_rows: integer. Kernel size along rows dimension.\nwindow_cols: integer. Kernel size along cols dimension.\nrow_stride: integer. Stride along rows dimension\ncol_stride: integer. Stride along cols dimension\npadding: PoolingOpDef.Padding.  Padding type.\n\nReturns:\nA Tensor.", "source": "github-repos"}
{"code": "def in_port(self):\n    in_port = self.match.get_field(OxmOfbMatchField.OFPXMT_OFB_IN_PORT)\n    return int.from_bytes(in_port, 'big')", "docstring": "Retrieve the 'in_port' that generated the PacketIn.\n\nThis method will look for the OXM_TLV with type OFPXMT_OFB_IN_PORT on\nthe `oxm_match_fields` field from `match` field and return its value,\nif the OXM exists.\n\nReturns:\nThe integer number of the 'in_port' that generated the PacketIn if\nit exists. Otherwise return None.", "source": "codesearchnet"}
{"code": "def _poll_server_till_success(max_attempts, sleep_per_poll_sec, debug_server_url, dump_dir, server, gpu_memory_fraction=1.0):\n    poll_count = 0\n    config = config_pb2.ConfigProto(gpu_options=config_pb2.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction))\n    with session.Session(config=config) as sess:\n        for poll_count in range(max_attempts):\n            server.clear_data()\n            print('Polling: poll_count = %d' % poll_count)\n            x_init_name = 'x_init_%d' % poll_count\n            x_init = constant_op.constant([42.0], shape=[1], name=x_init_name)\n            x = variables.Variable(x_init, name=x_init_name)\n            run_options = config_pb2.RunOptions()\n            debug_utils.add_debug_tensor_watch(run_options, x_init_name, 0, debug_urls=[debug_server_url])\n            try:\n                sess.run(x.initializer, options=run_options)\n            except errors.FailedPreconditionError:\n                pass\n            if dump_dir:\n                if os.path.isdir(dump_dir) and debug_data.DebugDumpDir(dump_dir).size > 0:\n                    file_io.delete_recursively(dump_dir)\n                    print('Poll succeeded.')\n                    return True\n                else:\n                    print('Poll failed. Sleeping for %f s' % sleep_per_poll_sec)\n                    time.sleep(sleep_per_poll_sec)\n            elif server.debug_tensor_values:\n                print('Poll succeeded.')\n                return True\n            else:\n                print('Poll failed. Sleeping for %f s' % sleep_per_poll_sec)\n                time.sleep(sleep_per_poll_sec)\n        return False", "docstring": "Poll server until success or exceeding max polling count.\n\nArgs:\nmax_attempts: (int) How many times to poll at maximum\nsleep_per_poll_sec: (float) How many seconds to sleep for after each\nunsuccessful poll.\ndebug_server_url: (str) gRPC URL to the debug server.\ndump_dir: (str) Dump directory to look for files in. If None, will directly\ncheck data from the server object.\nserver: The server object.\ngpu_memory_fraction: (float) Fraction of GPU memory to be\nallocated for the Session used in server polling.\n\nReturns:\n(bool) Whether the polling succeeded within max_polls attempts.", "source": "github-repos"}
{"code": "def get_session(region, profile=None):\n    \n    if profile is None:\n        logger.debug(\"No AWS profile explicitly provided. \"\n                     \"Falling back to default.\")\n        profile = default_profile\n\n    logger.debug(\"Building session using profile \\\"%s\\\" in region \\\"%s\\\"\"\n                 % (profile, region))\n\n    session = boto3.Session(region_name=region, profile_name=profile)\n    c = session._session.get_component('credential_provider')\n    provider = c.get_provider('assume-role')\n    provider.cache = credential_cache\n    provider._prompter = ui.getpass\n    return session", "docstring": "Creates a boto3 session with a cache\n\nArgs:\nregion (str): The region for the session\nprofile (str): The profile for the session\n\nReturns:\n:class:`boto3.session.Session`: A boto3 session with\ncredential caching", "source": "juraj-google-style"}
{"code": "def __init__(self, action_type=None, nw_tos=None):\n        \n        super().__init__(action_type, length=8)\n        self.nw_tos = nw_tos", "docstring": "Create an ActionNWTos with the optional parameters below.\n\nArgs:\naction_type (:class:`~pyof.v0x01.common.action.ActionType`):\n:attr:`~ActionType.OFPAT_SET_NW_SRC` or\n:attr:`~ActionType.OFPAT_SET_NW_DST`.\nnw_tos (int): IP ToS (DSCP field, 6 bits).", "source": "juraj-google-style"}
{"code": "def wait_until(what, times=(- 1)):\n    while times:\n        logger.info('Waiting times left %d', times)\n        try:\n            if (what() is True):\n                return True\n        except:\n            logger.exception('Wait failed')\n        else:\n            logger.warning('Trial[%d] failed', times)\n        times -= 1\n        time.sleep(1)\n    return False", "docstring": "Wait until `what` return True\n\nArgs:\nwhat (Callable[bool]): Call `wait()` again and again until it returns True\ntimes (int): Maximum times of trials before giving up\n\nReturns:\nTrue if success, False if times threshold reached", "source": "codesearchnet"}
{"code": "def _sendPostDict(post_dict):\n    \n    downer = Downloader()\n    downer.headers[\"Referer\"] = settings.EDEPOSIT_EXPORT_REFERER\n    data = downer.download(settings.ALEPH_EXPORT_URL, post=post_dict)\n    rheaders = downer.response_headers\n\n    error_msg = rheaders.get(\"aleph-info\", \"\").lower().strip()\n    if \"aleph-info\" in rheaders and error_msg.startswith(\"error\"):\n        raise ExportRejectedException(\n            \"Export request was rejected by import webform: %s\" %\n            rheaders[\"aleph-info\"]\n        )\n\n    return data", "docstring": "Send `post_dict` to the :attr:`.ALEPH_EXPORT_URL`.\n\nArgs:\npost_dict (dict): dictionary from :class:`PostData.get_POST_data()`\n\nReturns:\nstr: Reponse from webform.", "source": "juraj-google-style"}
{"code": "def Incr(self, x, term=1):\n        \n        self.d[x] = self.d.get(x, 0) + term", "docstring": "Increments the freq/prob associated with the value x.\n\nArgs:\nx: number value\nterm: how much to increment by", "source": "juraj-google-style"}
{"code": "def get_properties(self):\n    names = inspect.getmembers(self, predicate=(lambda x: (not inspect.ismethod(x))))\n    return [x[0] for x in names if ((not x[0].startswith('_')) and (x[0] not in self._ignored_properties))]", "docstring": "Get a list of all of the public data properties of this class.\n\nReturns:\nlist of str: A list of all of the public properties in this class.", "source": "codesearchnet"}
{"code": "def check_params_sync(model_params, original_params):\n    for mp, op in zip(model_params, original_params):\n        if isinstance(mp, DTensor):\n            mp = mp.to_local()\n        if isinstance(op, DTensor):\n            op = op.to_local()\n        if not torch.allclose(mp.data, op.data, rtol=0, atol=0):\n            raise RuntimeError(f'Parameters out of sync: model param {mp.data} != original param {op.data}')\n    return True", "docstring": "Check if original_params are being updated in sync with model parameters.\n\nArgs:\nmodel_params: Iterator of model parameters after update\noriginal_params: List of original parameters before DDP wrapping", "source": "github-repos"}
{"code": "def convert_polygons_to_lines(src_polygons, dst_lines, crs=None, add_allone_col=False):\n    gdf = gpd.read_file(src_polygons)\n    geom_coords = gdf['geometry']\n    lines = []\n    row_ids = []\n    for (i_row, pol) in tqdm(enumerate(geom_coords), total=len(geom_coords)):\n        boundary = pol.boundary\n        if (boundary.type == 'MultiLineString'):\n            for line in boundary:\n                lines.append(line)\n                row_ids.append(i_row)\n        else:\n            lines.append(boundary)\n            row_ids.append(i_row)\n    gdf_lines = gdf.drop('geometry', axis=1).iloc[(row_ids, :)]\n    gdf_lines['Coordinates'] = lines\n    gdf_lines = gpd.GeoDataFrame(gdf_lines, geometry='Coordinates', crs=gdf.crs)\n    if (crs is not None):\n        gdf_lines = gdf_lines.to_crs(crs)\n    if add_allone_col:\n        gdf_lines['ALLONE'] = 1\n    Path(dst_lines).parent.mkdir(exist_ok=True, parents=True)\n    gdf_lines.to_file(dst_lines)\n    return 0", "docstring": "Convert polygons to lines.\n\nArguments:\nsrc_polygons {path to geopandas-readable file} -- Filename of the the polygon vector dataset to be\nconverted to lines.\ndst_lines {[type]} -- Filename where to write the line vector dataset to.\n\nKeyword Arguments:\ncrs {dict or str} -- Output projection parameters as string or in dictionary format.\nThis will reproject the data when a crs is given (not {None}) (default: {None}).\nadd_allone_col {bool} -- Add an additional attribute column with all ones.\nThis is useful, e.g. in case you want to use the lines with gdal_proximity afterwards (default: {True}).\n\nReturns:\nint -- Exit code 0 if successeful.", "source": "codesearchnet"}
{"code": "def make_call_types(f, globals_d):\n    arg_spec = getargspec(f)\n    args = [k for k in arg_spec.args if (k != 'self')]\n    defaults = {}\n    if arg_spec.defaults:\n        default_args = args[(- len(arg_spec.defaults)):]\n        for (a, default) in zip(default_args, arg_spec.defaults):\n            defaults[a] = default\n    if (not getattr(f, '__annotations__', None)):\n        annotations = make_annotations(f, globals_d)\n    else:\n        annotations = f.__annotations__\n    call_types = OrderedDict()\n    for a in args:\n        anno = anno_with_default(annotations[a], defaults.get(a, NO_DEFAULT))\n        assert isinstance(anno, Anno), ('Argument %r has type %r which is not an Anno' % (a, anno))\n        call_types[a] = anno\n    return_type = anno_with_default(annotations.get('return', None))\n    if (return_type is Any):\n        return_type = Anno('Any return value', Any, 'return')\n    assert ((return_type is None) or isinstance(return_type, Anno)), ('Return has type %r which is not an Anno' % (return_type,))\n    return (call_types, return_type)", "docstring": "Make a call_types dictionary that describes what arguments to pass to f\n\nArgs:\nf: The function to inspect for argument names (without self)\nglobals_d: A dictionary of globals to lookup annotation definitions in", "source": "codesearchnet"}
{"code": "def get_profiles(self, cmd):\n    if cmd not in self._views:\n        raise ValueError('No autoprofiler for command: {}, was run'.format(cmd))\n    return self._views[cmd]", "docstring": "Returns profiling results for each step at which `cmd` was run.\n\nArgs:\ncmd: string, profiling command used in an `add_auto_profiling` call.\n\nReturns:\ndict[int: (MultiGraphNodeProto | GraphNodeProto)]. Keys are steps at which\nthe profiling command was run. Values are the outputs of profiling.\nFor \"code\" and \"op\" commands this will be a `MultiGraphNodeProto`, for\n\"scope\" and \"graph\" commands this will be a `GraphNodeProto.\n\nRaises:\nValueError: if `cmd` was never run (either because no session.run call was\nmade or because there was no `add_auto_profiling` call with the specified\n`cmd`.", "source": "github-repos"}
{"code": "def guess_dir_structure(dir):\n        \n        subdir = os.listdir(dir)[0]\n        \n        if subdir.startswith('n') and \\\n                os.path.isdir(os.path.join(dir, subdir)):\n            dir_structure = 'train'\n        else:\n            dir_structure = 'original'\n        logger.info(\n            \"[ILSVRC12] Assuming directory {} has '{}' structure.\".format(\n                dir, dir_structure))\n        return dir_structure", "docstring": "Return the directory structure of \"dir\".\n\nArgs:\ndir(str): something like '/path/to/imagenet/val'\n\nReturns:\neither 'train' or 'original'", "source": "juraj-google-style"}
{"code": "def synchronize_clock(self, offset):\n    self.time_offset = (offset - self.uptime)\n    self.is_utc = True\n    if self.has_rtc:\n        self.stored_offset = self.time_offset", "docstring": "Persistently synchronize the clock to UTC time.\n\nArgs:\noffset (int): The number of seconds since 1/1/2000 00:00Z", "source": "codesearchnet"}
{"code": "def get_members(cls, session, team_or_id):\n    if isinstance(team_or_id, Person):\n        team_or_id = team_or_id.id\n    return cls(('/teams/%d/members.json' % team_or_id), session=session, out_type=User)", "docstring": "List the members for the team.\n\nArgs:\nteam_or_id (helpscout.models.Person or int): Team or the ID of\nthe team to get the folders for.\n\nReturns:\nRequestPaginator(output_type=helpscout.models.Users): Users\niterator.", "source": "codesearchnet"}
{"code": "def get_pending_file_rename():\n    vnames = ('PendingFileRenameOperations', 'PendingFileRenameOperations2')\n    key = 'SYSTEM\\\\CurrentControlSet\\\\Control\\\\Session Manager'\n    for vname in vnames:\n        reg_ret = __utils__['reg.read_value']('HKLM', key, vname)\n        if reg_ret['success']:\n            log.debug('Found key: %s', key)\n            if (reg_ret['vdata'] and (reg_ret['vdata'] != '(value not set)')):\n                return True\n        else:\n            log.debug('Unable to access key: %s', key)\n    return False", "docstring": "Determine whether there are pending file rename operations that require a\nreboot.\n\n.. versionadded:: 2016.11.0\n\nReturns:\nbool: ``True`` if there are pending file rename operations, otherwise\n``False``\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' system.get_pending_file_rename", "source": "codesearchnet"}
{"code": "def describe(o):\n    from inspect import getmodule\n    from acorn.logging.decoration import _fqdn\n    fqdn = _fqdn(o, False)\n    if (fqdn is None):\n        return json_describe(o, str(type(o)))\n    package = fqdn.split('.')[0]\n    global _package_desc\n    if (package not in _package_desc):\n        from acorn.config import descriptors\n        spack = descriptors(package)\n        if (spack is None):\n            _package_desc[package] = None\n            return json_describe(o, fqdn)\n        else:\n            _package_desc[package] = spack\n    if (_package_desc[package] is None):\n        return json_describe(o, fqdn)\n    elif (fqdn in _package_desc[package]):\n        return json_describe(o, fqdn, _package_desc[package][fqdn])\n    else:\n        return json_describe(o, fqdn)", "docstring": "Describes the object using developer-specified attributes specific to\neach main object type.\n\nReturns:\ndict: keys are specific attributes tailored to the specific object type,\nthough `fqdn` is common to all descriptions; values are the corresponding\nattribute values which are *simple* types that can easily be serialized to\nJSON.", "source": "codesearchnet"}
{"code": "def cert_chain(certs):\n    if (len(certs) < 2):\n        warnings.warn('Certificate chain contains < 3 certificates.')\n        return False\n    cert = certs[0]\n    today = datetime.datetime.today()\n    if (not (today > cert.not_valid_before)):\n        warnings.warn('Certificate Not Before date is invalid.')\n        return False\n    if (not (today < cert.not_valid_after)):\n        warnings.warn('Certificate Not After date is invalid.')\n        return False\n    oid_san = x509.oid.ExtensionOID.SUBJECT_ALTERNATIVE_NAME\n    ext = cert.extensions.get_extension_for_oid(oid_san)\n    sans = ext.value.get_values_for_type(x509.DNSName)\n    if (not ('echo-api.amazon.com' in sans)):\n        return False\n    for i in range((len(certs) - 1)):\n        if (not (certs[i].issuer == certs[(i + 1)].subject)):\n            return False\n    return True", "docstring": "Validate PEM-encoded X.509 certificate chain.\n\nSee `validate.request` for additional info.\n\nArgs:\ncerts: list. The certificate chain as a list of\ncryptography.hazmat.backends.openssl.x509._Certificate certificates.\nSee `validate.retrieve` to create certs obj.\n\nReturns:\nbool: True if valid, False otherwise.", "source": "codesearchnet"}
{"code": "def cds_identifier_validator(record, result):\n    record_external_identifiers = get_value(record, 'external_system_identifiers', [])\n    result_external_identifiers = get_value(result, '_source.external_system_identifiers', [])\n    record_external_identifiers = {external_id['value'] for external_id in record_external_identifiers if (external_id['schema'] == 'CDS')}\n    result_external_identifiers = {external_id['value'] for external_id in result_external_identifiers if (external_id['schema'] == 'CDS')}\n    return bool((record_external_identifiers & result_external_identifiers))", "docstring": "Ensure that the two records have the same CDS identifier.\n\nThis is needed because the search is done only for\n``external_system_identifiers.value``, which might cause false positives in\ncase the matched record has an identifier with the same ``value`` but\n``schema`` different from CDS.\n\nArgs:\nrecord (dict): the given record we are trying to match with similar ones in INSPIRE.\nresult (dict): possible match returned by the ES query that needs to be validated.\n\nReturns:\nbool: validation decision.", "source": "codesearchnet"}
{"code": "def __init__(self, handle, dtype, session):\n    self._handle = compat.as_str_any(handle)\n    self._resource_handle = None\n    self._dtype = dtype\n    self._session = session\n    self._auto_gc_enabled = True", "docstring": "Constructs a new tensor handle.\n\nA tensor handle for a persistent tensor is a python string\nthat has the form of \"tensor_name;unique_id;device_name\".\n\nArgs:\nhandle: A tensor handle.\ndtype: The data type of the tensor represented by `handle`.\nsession: The session in which the tensor is produced.", "source": "github-repos"}
{"code": "def _create_min_max_boundaries(max_length, min_boundary=_MIN_BOUNDARY, boundary_scale=_BOUNDARY_SCALE):\n    bucket_boundaries = []\n    x = min_boundary\n    while (x < max_length):\n        bucket_boundaries.append(x)\n        x = max((x + 1), int((x * boundary_scale)))\n    buckets_min = ([0] + bucket_boundaries)\n    buckets_max = (bucket_boundaries + [(max_length + 1)])\n    return (buckets_min, buckets_max)", "docstring": "Create min and max boundary lists up to max_length.\n\nFor example, when max_length=24, min_boundary=4 and boundary_scale=2, the\nreturned values will be:\nbuckets_min = [0, 4, 8, 16, 24]\nbuckets_max = [4, 8, 16, 24, 25]\n\nArgs:\nmax_length: The maximum length of example in dataset.\nmin_boundary: Minimum length in boundary.\nboundary_scale: Amount to scale consecutive boundaries in the list.\n\nReturns:\nmin and max boundary lists", "source": "codesearchnet"}
{"code": "def reaction_formula(reaction, compound_formula):\n\n    def multiply_formula(compound_list):\n        for (compound, count) in compound_list:\n            (yield (count * compound_formula[compound.name]))\n    for (compound, _) in reaction.compounds:\n        if (compound.name not in compound_formula):\n            return None\n    else:\n        left_form = reduce(operator.or_, multiply_formula(reaction.left), Formula())\n        right_form = reduce(operator.or_, multiply_formula(reaction.right), Formula())\n    return (left_form, right_form)", "docstring": "Calculate formula compositions for both sides of the specified reaction.\n\nIf the compounds in the reaction all have formula, then calculate and\nreturn the chemical compositions for both sides, otherwise return `None`.\n\nArgs:\nreaction: :class:`psamm.reaction.Reaction`.\ncompound_formula: a map from compound id to formula.", "source": "codesearchnet"}
{"code": "def reduce(x, op='sum'):\n    import warnings\n    warnings.warn('Deprecated API. Use ``sum`` or ``mean`` instead.', DeprecationWarning)\n    from .function_bases import reduce_sum, reduce_mean\n    if (op == 'sum'):\n        return reduce_sum(x)\n    elif (op == 'mean'):\n        return reduce_mean(x)\n    raise ValueError()", "docstring": "Reduction function with given operation.\n\nArgs:\nx (Variable): An input.\nop (str): 'sum' or 'mean'.\n\nNote:\nThis is deprecated. Use ``mean`` or ``sum`` instead.", "source": "codesearchnet"}
{"code": "def inspect(lines):\n    labels = set()\n    count = 0\n    exp = re.compile('>.*?<([\\\\w ]+)>')\n    valid = False\n    for line in lines:\n        if line.startswith('M  END\\n'):\n            valid = True\n        elif line.startswith('$$$$'):\n            count += 1\n            valid = False\n        else:\n            result = exp.match(line)\n            if result:\n                labels.add(result.group(1))\n    if valid:\n        count += 1\n    return (list(labels), count)", "docstring": "Inspect SDFile list of string\n\nReturns:\ntuple: (data label list, number of records)", "source": "codesearchnet"}
{"code": "def GetFormattedEventObject(cls, event):\n    \n    time_string = timelib.Timestamp.CopyToIsoFormat(event.timestamp)\n\n    lines_of_text = [\n        '+-' * 40,\n        '[Timestamp]:',\n        '  {0:s}'.format(time_string)]\n\n    pathspec = getattr(event, 'pathspec', None)\n    if pathspec:\n      lines_of_text.append('[Pathspec]:')\n      attribute_string = pathspec.comparable.replace('\\n', '\\n  ')\n      attribute_string = '  {0:s}\\n'.format(attribute_string)\n      lines_of_text.append(attribute_string)\n\n    \n\n    lines_of_text.append('[Reserved attributes]:')\n    out_additional = ['[Additional attributes]:']\n\n    for attribute_name, attribute_value in sorted(event.GetAttributes()):\n      if attribute_name not in definitions.RESERVED_VARIABLE_NAMES:\n        attribute_string = '  {{{0!s}}} {1!s}'.format(\n            attribute_name, attribute_value)\n        out_additional.append(attribute_string)\n\n      elif attribute_name not in ('pathspec', 'tag'):\n        attribute_string = '  {{{0!s}}} {1!s}'.format(\n            attribute_name, attribute_value)\n        lines_of_text.append(attribute_string)\n\n    lines_of_text.append('')\n    out_additional.append('')\n\n    lines_of_text.extend(out_additional)\n    return '\\n'.join(lines_of_text)", "docstring": "Retrieves a string representation of the event.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: string representation of the event.", "source": "juraj-google-style"}
{"code": "def print_graph(self, format=None, output=sys.stdout, depth=0, **kwargs):\n    graph = self.as_graph(depth=depth)\n    graph.print(format=format, output=output, **kwargs)", "docstring": "Print the graph for self's nodes.\n\nArgs:\nformat (str): output format (csv, json or text).\noutput (file): file descriptor on which to write.\ndepth (int): depth of the graph.", "source": "codesearchnet"}
{"code": "def san_managers(self):\n    if (not self.__san_managers):\n        self.__san_managers = SanManagers(self.__connection)\n    return self.__san_managers", "docstring": "Gets the SanManagers API client.\n\nReturns:\nSanManagers:", "source": "codesearchnet"}
{"code": "def usergroups_create(self, *, name: str, **kwargs) -> SlackResponse:\n    self._validate_xoxp_token()\n    kwargs.update({'name': name})\n    return self.api_call('usergroups.create', json=kwargs)", "docstring": "Create a User Group\n\nArgs:\nname (str): A name for the User Group. Must be unique among User Groups.\ne.g. 'My Test Team'", "source": "codesearchnet"}
{"code": "def convert(self, calibration_input_fn=None):\n    assert not self._converted\n    device_requested = array_ops.zeros([]).device\n    if 'gpu' not in device_requested.lower():\n        raise ValueError(f'Specified device is not a GPU: {device_requested}')\n    if 'gpu:0' not in device_requested.lower():\n        self._device = device_requested\n        logging.info(f'Placing imported graph from `{self._input_saved_model_dir}` on device: {self._device}')\n    if self._need_calibration and (not calibration_input_fn):\n        raise ValueError('Should specify calibration_input_fn because INT8 calibration is needed')\n    if not self._need_calibration and calibration_input_fn:\n        raise ValueError('Should not specify calibration_input_fn because INT8 calibration is not needed')\n    self._saved_model = load.load(self._input_saved_model_dir, self._input_saved_model_tags)\n    func = self._saved_model.signatures[self._input_saved_model_signature_key]\n    if self.freeze:\n        frozen_func = convert_to_constants.convert_variables_to_constants_v2(func)\n    else:\n        inlined_graph_def = _apply_inlining(func)\n        _annotate_variable_ops(func, inlined_graph_def)\n        frozen_func = _construct_function_from_graph_def(func, inlined_graph_def)\n    frozen_graph_def = frozen_func.graph.as_graph_def()\n    logging.info('Clearing prior device assignments in loaded saved model')\n    for node in frozen_graph_def.node:\n        node.device = ''\n    if self._device is None:\n        grappler_meta_graph_def = saver.export_meta_graph(graph_def=frozen_graph_def, graph=frozen_func.graph)\n    else:\n        with ops.Graph().as_default() as graph, ops.device(self._device):\n            importer.import_graph_def(frozen_graph_def, name='')\n            grappler_meta_graph_def = saver.export_meta_graph(graph_def=graph.as_graph_def(), graph=graph)\n    fetch_collection = meta_graph_pb2.CollectionDef()\n    for array in frozen_func.inputs + frozen_func.outputs:\n        fetch_collection.node_list.value.append(array.name)\n    grappler_meta_graph_def.collection_def['train_op'].CopyFrom(fetch_collection)\n    self._converted_graph_def = self._run_conversion(grappler_meta_graph_def)\n    self._converted_func = _construct_function_from_graph_def(func, self._converted_graph_def, frozen_func)\n    if self._need_calibration:\n        if not self._need_trt_profiles():\n            self._execute_calibration(calibration_input_fn)\n        else:\n            self._calibration_input_fn = calibration_input_fn\n    self._converted = True\n    graphviz_path = os.environ.get('TF_TRT_EXPORT_GRAPH_VIZ_PATH', default=None)\n    if graphviz_path is not None:\n        try:\n            trt_utils.draw_graphdef_as_graphviz(graphdef=self._converted_func.graph.as_graph_def(add_shapes=True), dot_output_filename=graphviz_path)\n        except Exception as e:\n            logging.error(f'An Exception occurred during the export of the graph visualization: {e}')\n    return self._converted_func", "docstring": "Convert the input SavedModel in 2.0 format.\n\nArgs:\ncalibration_input_fn: a generator function that yields input data as a\nlist or tuple or dict, which will be used to execute the converted\nsignature for calibration. All the returned input data should have the\nsame shape. Example: `def input_fn(): yield input1, input2, input3`\n\nIf dynamic_shape_mode==False, (or if the graph has static input shapes)\nthen we run calibration and build the calibrated engine during\nconversion.\n\nIf dynamic_shape_mode==True (and the graph has any unknown input\nshape), then the reference to calibration_input_fn is stored, and the\ncalibration is actually performed when we build the engine (see\nbuild()).\n\nRaises:\nValueError: if the input combination is invalid.\n\nReturns:\nThe TF-TRT converted Function.", "source": "github-repos"}
{"code": "def visualize_computed_pcoll(pcoll_name: str, pcoll: beam.pvalue.PCollection, max_n: int, max_duration_secs: float, dynamic_plotting_interval: Optional[int]=None, include_window_info: bool=False, display_facets: bool=False) -> None:\n    pipeline = ie.current_env().user_pipeline(pcoll.pipeline)\n    rm = ie.current_env().get_recording_manager(pipeline, create_if_absent=True)\n    stream = rm.read(pcoll_name, pcoll, max_n=max_n, max_duration_secs=max_duration_secs)\n    if stream:\n        visualize(stream, dynamic_plotting_interval=dynamic_plotting_interval, include_window_info=include_window_info, display_facets=display_facets, element_type=pcoll.element_type)", "docstring": "A simple visualize alternative.\n\nWhen the pcoll_name and pcoll pair identifies a watched and computed\nPCollection in the current interactive environment without ambiguity, an\nElementStream can be built directly from cache. Returns immediately, the\nvisualization is asynchronous, but guaranteed to end in the near future.\n\nArgs:\npcoll_name: the variable name of the PCollection.\npcoll: the PCollection to be visualized.\nmax_n: the maximum number of elements to visualize.\nmax_duration_secs: max duration of elements to read in seconds.\ndynamic_plotting_interval: the interval in seconds between visualization\nupdates if provided; otherwise, no dynamic plotting.\ninclude_window_info: whether to include windowing info in the elements.\ndisplay_facets: whether to display the facets widgets.", "source": "github-repos"}
{"code": "def _batch_transpose(mat):\n  \n  n = distribution_util.prefer_static_rank(mat)\n  perm = tf.range(n)\n  perm = tf.concat([perm[:-2], [perm[-1], perm[-2]]], axis=0)\n  return tf.transpose(a=mat, perm=perm)", "docstring": "Transpose a possibly batched matrix.\n\nArgs:\nmat: A `tf.Tensor` of shape `[..., n, m]`.\n\nReturns:\nA tensor of shape `[..., m, n]` with matching batch dimensions.", "source": "juraj-google-style"}
{"code": "def is_profile_supported(self, conformance_clause, authentication_suite):\n        \n        return (self.is_conformance_clause_supported(conformance_clause) and\n                self.is_authentication_suite_supported(authentication_suite))", "docstring": "Check if a profile is supported by the client.\n\nArgs:\nconformance_clause (ConformanceClause):\nauthentication_suite (AuthenticationSuite):\n\nReturns:\nbool: True if the profile is supported, False otherwise.\n\nExample:\n>>> client.is_profile_supported(\n... ConformanceClause.DISCOVER_VERSIONS,\n... AuthenticationSuite.BASIC)\nTrue", "source": "juraj-google-style"}
{"code": "def identity(shape: Tuple[int, ...], dtype: Optional[torch.dtype]=None, device: Optional[torch.device]=None, requires_grad: bool=True, fmt: str='quat') -> Rigid:\n    return Rigid(Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt), identity_trans(shape, dtype, device, requires_grad))", "docstring": "Constructs an identity transformation.\n\nArgs:\nshape:\nThe desired shape\ndtype:\nThe dtype of both internal tensors\ndevice:\nThe device of both internal tensors\nrequires_grad:\nWhether grad should be enabled for the internal tensors\nReturns:\nThe identity transformation", "source": "github-repos"}
{"code": "def get_defaults(path):\n    defaults = {}\n    if os.path.isfile(path):\n        with open(path) as f:\n            for line in f:\n                line = line.strip()\n                if (('=' not in line) or line.startswith('\n                    continue\n                (k, v) = line.split('=', 1)\n                v = v.strip('\"').strip(\"'\")\n                defaults[k] = v\n        return defaults\n    else:\n        return {}", "docstring": "Reads file for configuration defaults.\n\nArguments:\n- path (str) Absolute filepath (usually ~/.licenser)\n\nReturns:\n- (dict) Defaults for name, email, license, .txt extension", "source": "codesearchnet"}
{"code": "def fit(self, y):\n    self.ndim_ = y.ndim\n    return self", "docstring": "Fit the transformer to a target y.\n\nReturns:\nTargetReshaper\nA reference to the current instance of TargetReshaper.", "source": "github-repos"}
{"code": "def get_input_arrays(self):\n    if self._has_valid_tensors():\n        return [_get_tensor_name(tensor) for tensor in self._input_tensors]\n    else:\n        return [name for name, _ in self._input_arrays_with_shape]", "docstring": "Returns a list of the names of the input tensors.\n\nReturns:\nList of strings.", "source": "github-repos"}
{"code": "def CheckSchema(self, database):\n    schema_match = False\n    if self.SCHEMAS:\n        for schema in self.SCHEMAS:\n            if (database and (database.schema == schema)):\n                schema_match = True\n    return schema_match", "docstring": "Checks the schema of a database with that defined in the plugin.\n\nArgs:\ndatabase (SQLiteDatabase): database.\n\nReturns:\nbool: True if the schema of the database matches that defined by\nthe plugin, or False if the schemas do not match or no schema\nis defined by the plugin.", "source": "codesearchnet"}
{"code": "def Serialize(self, writer):\n        \n        writer.WriteVarBytes(self.Script)\n        writer.WriteVarBytes(self.ParameterList)\n        writer.WriteByte(self.ReturnType)", "docstring": "Serialize full object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def compile_into_spirv(raw, stage, filepath, language='glsl', optimization='size', suppress_warnings=False, warnings_as_errors=False):\n    stage = stages_mapping[stage]\n    lang = languages_mapping[language]\n    opt = opt_mapping[optimization]\n    options = lib.shaderc_compile_options_initialize()\n    lib.shaderc_compile_options_set_source_language(options, lang)\n    lib.shaderc_compile_options_set_optimization_level(options, opt)\n    lib.shaderc_compile_options_set_target_env(options, lib.shaderc_target_env_vulkan, 0)\n    lib.shaderc_compile_options_set_auto_bind_uniforms(options, False)\n    lib.shaderc_compile_options_set_include_callbacks(options, lib.resolve_callback, lib.release_callback, ffi.NULL)\n    if suppress_warnings:\n        lib.shaderc_compile_options_set_suppress_warnings(options)\n    if warnings_as_errors:\n        lib.shaderc_compile_options_set_warnings_as_errors(options)\n    compiler = lib.shaderc_compiler_initialize()\n    result = lib.shaderc_compile_into_spv(compiler, raw, len(raw), stage, str.encode(filepath), b'main', options)\n    status = lib.shaderc_result_get_compilation_status(result)\n    if (status != lib.shaderc_compilation_status_success):\n        msg = _get_log(result)\n        lib.shaderc_compile_options_release(options)\n        lib.shaderc_result_release(result)\n        lib.shaderc_compiler_release(compiler)\n        raise CompilationError(msg)\n    length = lib.shaderc_result_get_length(result)\n    output_pointer = lib.shaderc_result_get_bytes(result)\n    tmp = bytearray(length)\n    ffi.memmove(tmp, output_pointer, length)\n    spirv = bytes(tmp)\n    lib.shaderc_compile_options_release(options)\n    lib.shaderc_result_release(result)\n    lib.shaderc_compiler_release(compiler)\n    return spirv", "docstring": "Compile shader code into Spir-V binary.\n\nThis function uses shaderc to compile your glsl or hlsl code into Spir-V\ncode. You can refer to the shaderc documentation.\n\nArgs:\nraw (bytes): glsl or hlsl code (bytes format, not str)\nstage (str): Pipeline stage in ['vert', 'tesc', 'tese', 'geom',\n'frag', 'comp']\nfilepath (str): Absolute path of the file (needed for #include)\nlanguage (str): 'glsl' or 'hlsl'\noptimization (str): 'zero' (no optimization) or 'size' (reduce size)\nsuppress_warnings (bool): True to suppress warnings\nwarnings_as_errors (bool): Turn warnings into errors\n\nReturns:\nbytes: Compiled Spir-V binary.\n\nRaises:\nCompilationError: If compilation fails.", "source": "codesearchnet"}
{"code": "def get_integer_index(\n    miller_index: bool, round_dp: int = 4, verbose: bool = True\n) -> Tuple[int, int, int]:\n    \n    miller_index = np.asarray(miller_index)\n\n    \n    \n    miller_index /= min([m for m in miller_index if m != 0])\n    miller_index /= np.max(np.abs(miller_index))\n\n    \n    md = [Fraction(n).limit_denominator(12).denominator for n in miller_index]\n    miller_index *= reduce(lambda x, y: x * y, md)\n    int_miller_index = np.int_(np.round(miller_index, 1))\n    miller_index /= np.abs(reduce(gcd, int_miller_index))\n\n    \n    miller_index = np.array([round(h, round_dp) for h in miller_index])\n\n    \n    int_miller_index = np.int_(np.round(miller_index, 1))\n    if np.any(np.abs(miller_index - int_miller_index) > 1e-6) and verbose:\n        warnings.warn(\"Non-integer encountered in Miller index\")\n    else:\n        miller_index = int_miller_index\n\n    \n    miller_index += 0  \n\n    def n_minus(index):\n        return len([h for h in index if h < 0])\n\n    if n_minus(miller_index) > n_minus(miller_index * -1):\n        miller_index *= -1\n\n    \n    \n    if (\n        sum(miller_index != 0) == 2\n        and n_minus(miller_index) == 1\n        and abs(min(miller_index)) > max(miller_index)\n    ):\n        miller_index *= -1\n\n    return tuple(miller_index)", "docstring": "Attempt to convert a vector of floats to whole numbers.\n\nArgs:\nmiller_index (list of float): A list miller indexes.\nround_dp (int, optional): The number of decimal places to round the\nmiller index to.\nverbose (bool, optional): Whether to print warnings.\n\nReturns:\n(tuple): The Miller index.", "source": "juraj-google-style"}
{"code": "def target_batch_encode_plus(self, answer: List[str], add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Optional[Union[bool, str]]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n    padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)\n    return self._target_batch_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)", "docstring": "Prepare answer strings for the model.\n\nArgs:\nanswer `List[str]`:\nCorresponding answer supervision to the queries for training the model.", "source": "github-repos"}
{"code": "def from_config(cls, config):\n    return cls(**config)", "docstring": "Instantiates a `LearningRateSchedule` from its config.\n\nArgs:\nconfig: Output of `get_config()`.\n\nReturns:\nA `LearningRateSchedule` instance.", "source": "github-repos"}
{"code": "def _process_landing_page(self, item, feed_item):\n    if feed_item.get(FieldMap.AD_LANDING_PAGE_ID, ''):\n        landing_page = self._landing_page_dao.get(feed_item, required=True)\n        item['clickThroughUrl'] = {'landingPageId': landing_page['id']}\n    if feed_item.get(FieldMap.AD_URL_SUFFIX, ''):\n        item['clickThroughUrlSuffixProperties'] = {'overrideInheritedSuffix': True, 'clickThroughUrlSuffix': feed_item.get(FieldMap.AD_URL_SUFFIX, '')}\n    else:\n        item['clickThroughUrlSuffixProperties'] = {'overrideInheritedSuffix': False}", "docstring": "Configures ad landing page.\n\nArgs:\nitem: DCM ad object to update.\nfeed_item: Feed item representing the ad from the Bulkdozer feed", "source": "github-repos"}
{"code": "def timed_display(msg):\n    \n    def print_header(msg, newline=True):\n        \n        if sys.stdout.isatty():\n            print('\\r', end=Style.BRIGHT + Fore.BLUE)\n        print(' {} '.format(msg).center(_ncols(), '='),\n              end='\\n{}'.format(Style.RESET_ALL)\n              if newline else Style.RESET_ALL)\n        sys.stdout.flush()\n\n    def print_message(msg):\n        \n        if sys.stdout.isatty():\n            print('\\r', end='')\n            msg = msg.ljust(_ncols())\n        print(msg, end='')\n        sys.stdout.flush()\n\n    start = time.time()\n    print_header(msg)\n    with hidden_cursor():\n        try:\n            yield print_message\n        finally:\n            delta = time.time() - start\n            print_header('completed in {:.2f}s'.format(delta), False)", "docstring": "A timed block to run tasks with titles and success/failure messages.\n\nArgs:\nmsg: The header message to print at the beginning of the timed block.", "source": "juraj-google-style"}
{"code": "def _from_dataframe(dataframe, default_type='STRING'):\n    type_mapping = {'i': 'INTEGER', 'b': 'BOOLEAN', 'f': 'FLOAT', 'O': 'STRING', 'S': 'STRING', 'U': 'STRING', 'M': 'TIMESTAMP'}\n    fields = []\n    for (column_name, dtype) in dataframe.dtypes.iteritems():\n        fields.append({'name': column_name, 'type': type_mapping.get(dtype.kind, default_type)})\n    return fields", "docstring": "Infer a BigQuery table schema from a Pandas dataframe. Note that if you don't explicitly set\nthe types of the columns in the dataframe, they may be of a type that forces coercion to\nSTRING, so even though the fields in the dataframe themselves may be numeric, the type in the\nderived schema may not be. Hence it is prudent to make sure the Pandas dataframe is typed\ncorrectly.\n\nArgs:\ndataframe: The DataFrame.\ndefault_type : The default big query type in case the type of the column does not exist in\nthe schema. Defaults to 'STRING'.\nReturns:\nA list of dictionaries containing field 'name' and 'type' entries, suitable for use in a\nBigQuery Tables resource schema.", "source": "codesearchnet"}
{"code": "def __init__(self, my_api_key):\n\t\t\n\t\tsuper(self.__class__, self).__init__(my_api_key)\n\n\t\tself.sort_by_postfix = '?sortBy='\n\t\tself.boxes_suffix = 'boxes'\n\t\tself.stages_suffix = 'stages'\n\t\tself.pipelines_suffix = 'pipelines'\n\t\tself.search_suffix = 'search?query='\n\t\tself.snippets_suffix = 'snippets'\n\t\tself.fields_suffix = 'fields'\n\t\tself.newsfeed_suffix = 'newsfeed'\n\t\tself.threads_suffix = 'threads'\n\t\tself.comments_suffix = 'comments'\n\t\tself.files_suffix = 'files'\n\t\tself.file_contents_suffix = 'contents'\n\t\tself.file_link_suffix = 'link'\n\t\tself.reminders_suffix = 'reminders'\n\t\tself.detail_level_suffix = '?detailLevel='\n\n\t\tif DEBUG:\n\t\t\tprint((self.api_uri))", "docstring": "Initializes an instance of the class with an api key\nAllows multiple instances with distinct keys.\nArgs:\nmy_api_key\tapi key for this instance", "source": "juraj-google-style"}
{"code": "def get_submission_variants(form_fields):\n    \n\n    clinvars = []\n\n    \n    \n    if 'all_vars' in form_fields:\n        for field, value in form_fields.items():\n            if field.startswith('local_id'):\n                clinvars.append(form_fields[field].replace('local_id@',''))\n    else:\n        clinvars = [form_fields['main_var']] \n\n    return clinvars", "docstring": "Extracts a list of variant ids from the clinvar submission form in blueprints/variants/clinvar.html (creation of a new clinvar submission).\n\nArgs:\nform_fields(dict): it's the submission form dictionary. Keys have the same names as CLINVAR_HEADER and CASEDATA_HEADER\n\nReturns:\nclinvars: A list of variant IDs", "source": "juraj-google-style"}
{"code": "def modified_lu(q):\n    q = q.assemble()\n    (m, b) = (q.shape[0], q.shape[1])\n    S = np.zeros(b)\n    q_work = np.copy(q)\n    for i in range(b):\n        S[i] = ((- 1) * np.sign(q_work[(i, i)]))\n        q_work[(i, i)] -= S[i]\n        q_work[((i + 1):m, i)] /= q_work[(i, i)]\n        q_work[((i + 1):m, (i + 1):b)] -= np.outer(q_work[((i + 1):m, i)], q_work[(i, (i + 1):b)])\n    L = np.tril(q_work)\n    for i in range(b):\n        L[(i, i)] = 1\n    U = np.triu(q_work)[(:b, :)]\n    return (ray.get(core.numpy_to_dist.remote(ray.put(L))), U, S)", "docstring": "Perform a modified LU decomposition of a matrix.\n\nThis takes a matrix q with orthonormal columns, returns l, u, s such that\nq - s = l * u.\n\nArgs:\nq: A two dimensional orthonormal matrix q.\n\nReturns:\nA tuple of a lower triangular matrix l, an upper triangular matrix u,\nand a a vector representing a diagonal matrix s such that\nq - s = l * u.", "source": "codesearchnet"}
{"code": "def is_orthogonal(matrix: np.ndarray, *, rtol: float=1e-05, atol: float=1e-08) -> bool:\n    return ((matrix.shape[0] == matrix.shape[1]) and np.all((np.imag(matrix) == 0)) and np.allclose(matrix.dot(matrix.T), np.eye(matrix.shape[0]), rtol=rtol, atol=atol))", "docstring": "Determines if a matrix is approximately orthogonal.\n\nA matrix is orthogonal if it's square and real and its transpose is its\ninverse.\n\nArgs:\nmatrix: The matrix to check.\nrtol: The per-matrix-entry relative tolerance on equality.\natol: The per-matrix-entry absolute tolerance on equality.\n\nReturns:\nWhether the matrix is orthogonal within the given tolerance.", "source": "codesearchnet"}
{"code": "def insecure_channel(target, options=None, *, loop=None, executor=None,\n                    standalone_pool_for_streaming=False):\n    \n    return Channel(_grpc.insecure_channel(target, options), loop, executor, standalone_pool_for_streaming)", "docstring": "Creates an insecure Channel to a server.\n\nArgs:\ntarget: The server address\noptions: An optional list of key-value pairs (channel args in gRPC runtime)\nto configure the channel.\n\nReturns:\nA Channel object.", "source": "juraj-google-style"}
{"code": "def _convert_variables_to_tensors(self):\n    return self", "docstring": "Converts ResourceVariable components to Tensors.\n\nOverride this method to explicitly convert ResourceVariables embedded in the\nCompositeTensor to Tensors. By default, it returns the CompositeTensor\nunchanged.\n\nReturns:\nA CompositeTensor with all its ResourceVariable components converted to\nTensors.", "source": "github-repos"}
{"code": "def recipe_dbm_to_storage(config, auth_read, dbm_report_id, auth_write, dbm_report_name, dbm_bucket, dbm_path):\n    dbm(config, {'auth': auth_read, 'report': {'report_id': dbm_report_id, 'name': dbm_report_name}, 'out': {'storage': {'auth': auth_write, 'bucket': dbm_bucket, 'path': dbm_path}}})", "docstring": "Move existing DV360 report into a Storage bucket.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\ndbm_report_id (integer) - DV360 report ID given in UI, not needed if name used.\nauth_write (authentication) - Credentials used for writing data.\ndbm_report_name (string) - Name of report, not needed if ID used.\ndbm_bucket (string) - Google cloud bucket.\ndbm_path (string) - Path and filename to write to.", "source": "github-repos"}
{"code": "def __init__( self, lattice, hamiltonian ):\n        \n        expected_hamiltonian_values = [ 'nearest-neighbour' ]\n        if hamiltonian not in expected_hamiltonian_values:\n            raise ValueError( hamiltonian )\n        self.site_energies = lattice.site_energies\n        self.nn_energy = lattice.nn_energy\n        self.cn_energy = lattice.cn_energies\n        self.connected_site_pairs = lattice.connected_site_pairs()\n        self.max_coordination_per_site = lattice.max_site_coordination_numbers()\n        self.site_specific_coordination_per_site = lattice.site_specific_coordination_numbers()\n        if hamiltonian == 'nearest-neighbour':\n            self.generate_nearest_neighbour_lookup_table()", "docstring": "Initialise a LookupTable object instance.\n\nArgs:\nlattice (lattice_mc.Lattice): The lattice object, used to define the allowed jumps.\nhamiltonian (Str): The model Hamiltonian used to define the jump energies.\nAllowed values = `nearest-neigbour`\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def to_pandas(self, is_transposed=False):\n    if is_transposed:\n        return self.transpose().to_pandas(False).T\n    else:\n        retrieved_objects = [[obj.to_pandas() for obj in part] for part in self.partitions]\n        if all((isinstance(part, pandas.Series) for row in retrieved_objects for part in row)):\n            axis = 0\n        elif all((isinstance(part, pandas.DataFrame) for row in retrieved_objects for part in row)):\n            axis = 1\n        else:\n            ErrorMessage.catch_bugs_and_request_email(True)\n        df_rows = [pandas.concat([part for part in row], axis=axis) for row in retrieved_objects if (not all((part.empty for part in row)))]\n        if (len(df_rows) == 0):\n            return pandas.DataFrame()\n        else:\n            return pandas.concat(df_rows)", "docstring": "Convert this object into a Pandas DataFrame from the partitions.\n\nArgs:\nis_transposed: A flag for telling this object that the external\nrepresentation is transposed, but not the internal.\n\nReturns:\nA Pandas DataFrame", "source": "codesearchnet"}
{"code": "def is_supported(cls, file=None, request=None, response=None,\n                     url_info=None):\n        \n        tests = (\n            (response, cls.is_response),\n            (file, cls.is_file),\n            (request, cls.is_request),\n            (url_info, cls.is_url)\n        )\n\n        for instance, method in tests:\n            if instance:\n                try:\n                    result = method(instance)\n                except NotImplementedError:\n                    pass\n                else:\n                    if result:\n                        return True\n                    elif result is VeryFalse:\n                        return VeryFalse", "docstring": "Given the hints, return whether the document is supported.\n\nArgs:\nfile: A file object containing the document.\nrequest (:class:`.http.request.Request`): An HTTP request.\nresponse (:class:`.http.request.Response`): An HTTP response.\nurl_info (:class:`.url.URLInfo`): A URLInfo.\n\nReturns:\nbool: If True, the reader should be able to read it.", "source": "juraj-google-style"}
{"code": "def _strip_variable_names(self, summaries):\n    result = set()\n    for s in summaries:\n        if '/' not in s.tag:\n            result.add(s)\n        else:\n            split_tag = s.tag.split('/')\n            if 'variable' in split_tag[0]:\n                result.add(s._replace(tag=split_tag[-1]))\n            else:\n                result.add(s)\n    return result", "docstring": "Remove `variable_n` from summary tag\n\n`variable_n` tag names are added with random numbers. Removing them\nensures deterministic tag names.\n\nArgs:\nsummaries: A `set` of `_ObservedSummary` values.\n\nReturns:\nA new `set` of `_ObservedSummary` values with layer prefixes\nremoved.", "source": "github-repos"}
{"code": "def case_to_clinVars(self, case_id):\n    query = dict(case_id=case_id, csv_type='variant')\n    clinvar_objs = list(self.clinvar_collection.find(query))\n    submitted_vars = {}\n    for clinvar in clinvar_objs:\n        submitted_vars[clinvar.get('local_id')] = clinvar\n    return submitted_vars", "docstring": "Get all variants included in clinvar submissions for a case\n\nArgs:\ncase_id(str): a case _id\n\nReturns:\nsubmission_variants(dict): keys are variant ids and values are variant submission objects", "source": "codesearchnet"}
{"code": "def serialize_cert_to_pem(cert_obj):\n    \n    return cert_obj.public_bytes(\n        encoding=cryptography.hazmat.primitives.serialization.Encoding.PEM\n    )", "docstring": "Serialize certificate to PEM.\n\nThe certificate can be also be a Certificate Signing Request (CSR).\n\nArgs:\ncert_obj: cryptography.Certificate\n\nReturns:\nbytes: PEM encoded certificate", "source": "juraj-google-style"}
{"code": "def _get_metric_histogram(histogram_proto):\n    ret = dict()\n    ret['min'] = histogram_proto.min\n    ret['max'] = histogram_proto.max\n    ret['num'] = histogram_proto.num\n    ret['sum'] = histogram_proto.sum\n    bucket_limits = histogram_proto.bucket_limit\n    bucket_vals = histogram_proto.bucket\n    ret['histogram'] = {}\n    bucket_limits.insert(0, 0)\n    for lb, ub, val in zip(bucket_limits[:-1], bucket_limits[1:], bucket_vals):\n        ret['histogram'][lb, ub] = val\n    return ret", "docstring": "Convert a histogram proto into a dict.\n\nArgs:\nhistogram_proto: a proto containing a Sampler metric's result histogram.\n\nReturns:\nA dict containing summary statistics and the raw histogram values.", "source": "github-repos"}
{"code": "def ParseDownloadsRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    event_data = FirefoxDownloadEventData()\n    event_data.full_path = self._GetRowValue(query_hash, row, 'target')\n    event_data.mime_type = self._GetRowValue(query_hash, row, 'mimeType')\n    event_data.name = self._GetRowValue(query_hash, row, 'name')\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.received_bytes = self._GetRowValue(query_hash, row, 'currBytes')\n    event_data.referrer = self._GetRowValue(query_hash, row, 'referrer')\n    event_data.temporary_location = self._GetRowValue(query_hash, row, 'tempPath')\n    event_data.total_bytes = self._GetRowValue(query_hash, row, 'maxBytes')\n    event_data.url = self._GetRowValue(query_hash, row, 'source')\n    timestamp = self._GetRowValue(query_hash, row, 'startTime')\n    if timestamp:\n        date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_START)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n    timestamp = self._GetRowValue(query_hash, row, 'endTime')\n    if timestamp:\n        date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_END)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a downloads row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "codesearchnet"}
{"code": "def create(self, validated_data):\n    email = validated_data.pop('email')\n    password = validated_data.pop('password')\n    user = get_user_model()(**validated_data)\n    user.set_password(password)\n    user.email = email\n    email_query = models.EmailAddress.objects.filter(email=email)\n    if email_query.exists():\n        existing_email = email_query.get()\n        existing_email.send_duplicate_notification()\n    else:\n        user.save()\n        email_instance = models.EmailAddress.objects.create(email=email, user=user)\n        email_instance.send_confirmation()\n        signals.user_registered.send(sender=self.__class__, user=user)\n    return user", "docstring": "Create a new user from the data passed to the serializer.\n\nIf the provided email has not been verified yet, the user is\ncreated and a verification email is sent to the address.\nOtherwise we send a notification to the email address that\nsomeone attempted to register with an email that's already been\nverified.\n\nArgs:\nvalidated_data (dict):\nThe data passed to the serializer after it has been\nvalidated.\n\nReturns:\nA new user created from the provided data.", "source": "codesearchnet"}
{"code": "def from_pymatgen_molecule(cls, molecule):\n    new = cls(atoms=[el.value for el in molecule.species], coords=molecule.cart_coords)\n    return new._to_numeric()", "docstring": "Create an instance of the own class from a pymatgen molecule\n\nArgs:\nmolecule (:class:`pymatgen.core.structure.Molecule`):\n\nReturns:\nCartesian:", "source": "codesearchnet"}
{"code": "def get_dense_tensor(self, transformation_cache, state_manager):\n    return transformation_cache.get(self, state_manager)", "docstring": "Returns dense `Tensor` representing numeric feature.\n\nArgs:\ntransformation_cache: A `FeatureTransformationCache` object to access\nfeatures.\nstate_manager: A `StateManager` to create / access resources such as\nlookup tables.\n\nReturns:\nDense `Tensor` created within `transform_feature`.", "source": "github-repos"}
{"code": "def apply(self, dataset, flat=False, expanded=None, ranges={}, all_values=False):\n    dimension = self.dimension\n    if (expanded is None):\n        expanded = (not ((dataset.interface.gridded and (dimension in dataset.kdims)) or (dataset.interface.multi and dataset.interface.isscalar(dataset, dimension))))\n    if isinstance(dataset, Graph):\n        if ((dimension in dataset.kdims) and all_values):\n            dimension = dataset.nodes.kdims[2]\n        dataset = (dataset if (dimension in dataset) else dataset.nodes)\n    data = dataset.dimension_values(dimension, expanded=expanded, flat=flat)\n    for o in self.ops:\n        args = o['args']\n        fn_args = [data]\n        for arg in args:\n            if isinstance(arg, dim):\n                arg = arg.apply(dataset, flat, expanded, ranges, all_values)\n            fn_args.append(arg)\n        args = tuple((fn_args[::(- 1)] if o['reverse'] else fn_args))\n        eldim = dataset.get_dimension(dimension)\n        drange = ranges.get(eldim.name, {})\n        drange = drange.get('combined', drange)\n        kwargs = o['kwargs']\n        if ((o['fn'] is norm) and (drange != {}) and (not (('min' in kwargs) and ('max' in kwargs)))):\n            data = o['fn'](data, *drange)\n        else:\n            data = o['fn'](*args, **kwargs)\n    return data", "docstring": "Evaluates the transform on the supplied dataset.\n\nArgs:\ndataset: Dataset object to evaluate the expression on\nflat: Whether to flatten the returned array\nexpanded: Whether to use the expanded expand values\nranges: Dictionary for ranges for normalization\nall_values: Whether to evaluate on all values\nWhether to evaluate on all available values, for some\nelement types, such as Graphs, this may include values\nnot included in the referenced column\n\nReturns:\nvalues: NumPy array computed by evaluating the expression", "source": "codesearchnet"}
{"code": "def extend(self, trajectory):\n        \n        if self.time_step != trajectory.time_step:\n            raise ValueError('Trajectory not extended: Time steps of trajectories is incompatible')\n\n        if len(self.species) != len(trajectory.species) and self.species != trajectory.species:\n            raise ValueError('Trajectory not extended: species in trajectory do not match')\n\n        self.to_positions()\n        trajectory.to_positions()\n\n        self.frac_coords = np.concatenate((self.frac_coords, trajectory.frac_coords), axis=0)\n        self.lattice, self.constant_lattice = self._combine_attribute(self.lattice, trajectory.lattice,\n                                                                      self.frac_coords.shape[0],\n                                                                      trajectory.frac_coords.shape[0])\n        self.site_properties = self._combine_attribute(self.site_properties, trajectory.site_properties,\n                                                       self.frac_coords.shape[0], trajectory.frac_coords.shape[0])", "docstring": "Concatenate another trajectory\n\nArgs:\ntrajectory (Trajectory): Trajectory to add", "source": "juraj-google-style"}
{"code": "def create_order(self, debtor, is_vat_included=True, due_date=None, heading='', text_line1='', text_line2='', debtor_data=None, delivery_data=None, products=None, project=None, other_reference='', model=models.Order, **extra):\n    debtor_data = (debtor_data or {})\n    delivery_data = (delivery_data or {})\n    delivery_date = delivery_data.get('date', datetime.datetime.now())\n    our_reference = extra.get('our_reference', debtor.our_reference)\n    currency = extra.get('currency', debtor.currency)\n    layout = extra.get('layout', debtor.layout)\n    term_of_payment = extra.get('term_of_payment', debtor.term_of_payment)\n    date = extra.get('date', datetime.datetime.now())\n    order_input = {'debtor': debtor, 'number': extra.get('number', 1), 'project': project}\n    for dd in ['name', 'address', 'postal_code', 'city', 'country', 'ean']:\n        order_input[('debtor_%s' % dd)] = debtor_data.get(dd, getattr(debtor, dd))\n    for dd in ['address', 'postal_code', 'city', 'country']:\n        order_input[('delivery_%s' % dd)] = delivery_data.get(dd, getattr(debtor, dd))\n    order_input.update({'delivery_date': (delivery_date or datetime.datetime.now()), 'heading': heading, 'text_line1': text_line1, 'text_line2': text_line2, 'is_archived': extra.get('is_archived', 0), 'is_sent': extra.get('is_sent', 0), 'net_amount': extra.get('net_amount', 0), 'vat_amount': extra.get('vat_amount', 0), 'gross_amount': extra.get('gross_amount', 0), 'margin': extra.get('margin', 0), 'margin_as_percent': extra.get('margin_as_percent', 0), 'date': date, 'our_reference': our_reference, 'other_reference': other_reference, 'currency': currency, 'exchange_rate': extra.get('exchange_rate', 1.0), 'is_vat_included': is_vat_included, 'layout': layout, 'due_date': (due_date or datetime.datetime.now()), 'term_of_payment': term_of_payment})\n    order_input.update(extra)\n    order = self.create(model, **order_input)\n    if products:\n        for product in products:\n            self.create_orderline(order, product)\n    return order", "docstring": "Create a new Order.\n\nArgs:\ndebtor (Debtor): the debtor of the order\ndebtor_data (mapping): map of debtor data {'postal_code: .., 'city': .., 'ean': ..}\ndefaults to values on debitor instance for missing values\ndelivery_data (mapping): map of delivery data {'address': ..., 'postal_code': ...}\ndefaults to values on debitor instance for missing values\ndue_date (datetime): due date\nheading (string): heading to be displayed in the order pdf\ntext_line1 (string): first order description line\ntext_line2 (string): second order description line\nother_reference (string): custom string to be used for identification\nextra (mapping): mapping of extra values to be passed in to the server call\nReturns:\nOrder instance", "source": "codesearchnet"}
{"code": "def _IsIdentifier(cls, string):\n    \n    return (\n        string and not string[0].isdigit() and\n        all(character.isalnum() or character == '_' for character in string))", "docstring": "Checks if a string contains an identifier.\n\nArgs:\nstring (str): string to check.\n\nReturns:\nbool: True if the string contains an identifier, False otherwise.", "source": "juraj-google-style"}
{"code": "def _load_yaml_(file_name):\n    \n    if not os.path.exists(file_name): return dict()\n\n    with open(file_name, 'r', encoding='utf-8') as fp:\n        return YAML().load(stream=fp)", "docstring": "Load assets infomation from file\n\nArgs:\nfile_name: file name\n\nReturns:\ndict", "source": "juraj-google-style"}
{"code": "def window_reverse(windows, window_size, height, width):\n    num_channels = windows.shape[-1]\n    windows = windows.view(-1, height \n    windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels)\n    return windows", "docstring": "Merges windows to produce higher resolution features.\nArgs:\nwindows (`torch.FloatTensor` of shape `(num_windows * batch_size, window_size, window_size, num_channels)`):\nInput windows\nwindow_size (`int`):\nWindow size\nheight (`int`):\nHeight of the resized audio\nwidth (`int`):\nWidth of the resized audio", "source": "github-repos"}
{"code": "def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool=False, **kwargs):\n    use_auth_token = kwargs.pop('use_auth_token', None)\n    if use_auth_token is not None:\n        warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)\n        if kwargs.get('token', None) is not None:\n            raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')\n        kwargs['token'] = use_auth_token\n    if os.path.isfile(save_directory):\n        raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file')\n    os.makedirs(save_directory, exist_ok=True)\n    if push_to_hub:\n        commit_message = kwargs.pop('commit_message', None)\n        repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1])\n        repo_id = self._create_repo(repo_id, **kwargs)\n        files_timestamps = self._get_files_timestamps(save_directory)\n    if self._auto_class is not None:\n        custom_object_save(self, save_directory, config=self)\n    output_image_processor_file = os.path.join(save_directory, IMAGE_PROCESSOR_NAME)\n    self.to_json_file(output_image_processor_file)\n    logger.info(f'Image processor saved in {output_image_processor_file}')\n    if push_to_hub:\n        self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get('token'))\n    return [output_image_processor_file]", "docstring": "Save an image processor object to the directory `save_directory`, so that it can be re-loaded using the\n[`~image_processing_utils.ImageProcessingMixin.from_pretrained`] class method.\n\nArgs:\nsave_directory (`str` or `os.PathLike`):\nDirectory where the image processor JSON file will be saved (will be created if it does not exist).\npush_to_hub (`bool`, *optional*, defaults to `False`):\nWhether or not to push your model to the Hugging Face model hub after saving it. You can specify the\nrepository you want to push to with `repo_id` (will default to the name of `save_directory` in your\nnamespace).\nkwargs (`Dict[str, Any]`, *optional*):\nAdditional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.", "source": "github-repos"}
{"code": "def format_script(sensor_graph):\n    \n\n    records = []\n\n    records.append(SetGraphOnlineRecord(False, address=8))\n    records.append(ClearDataRecord(address=8))\n    records.append(ResetGraphRecord(address=8))\n\n    for node in sensor_graph.nodes:\n        records.append(AddNodeRecord(str(node), address=8))\n\n    for streamer in sensor_graph.streamers:\n        records.append(AddStreamerRecord(streamer, address=8))\n\n    for stream, value in sorted(sensor_graph.constant_database.items(), key=lambda x: x[0].encode()):\n        records.append(SetConstantRecord(stream, value, address=8))\n\n    records.append(PersistGraphRecord(address=8))\n\n    records.append(ClearConfigVariablesRecord())\n    for slot in sorted(sensor_graph.config_database, key=lambda x: x.encode()):\n        for config_id in sorted(sensor_graph.config_database[slot]):\n            config_type, value = sensor_graph.config_database[slot][config_id]\n            byte_value = _convert_to_bytes(config_type, value)\n\n            records.append(SetConfigRecord(slot, config_id, byte_value))\n\n    \n    app_tag = sensor_graph.metadata_database.get('app_tag')\n    app_version = sensor_graph.metadata_database.get('app_version')\n\n    if app_tag is not None:\n        records.append(SetDeviceTagRecord(app_tag=app_tag, app_version=app_version))\n\n    script = UpdateScript(records)\n    return script.encode()", "docstring": "Create a binary script containing this sensor graph.\n\nThis function produces a repeatable script by applying a known sorting\norder to all constants and config variables when iterating over those\ndictionaries.\n\nArgs:\nsensor_graph (SensorGraph): the sensor graph that we want to format\n\nReturns:\nbytearray: The binary script data.", "source": "juraj-google-style"}
{"code": "def CheckDataVisiblity(self, value):\n    \n    if not self.data_visibility_policy:\n      return None\n\n    visible, reason = self.data_visibility_policy.IsDataVisible(\n        DetermineType(value))\n\n    if visible:\n      return None\n\n    return {\n        'status': {\n            'isError': True,\n            'refersTo': 'VARIABLE_NAME',\n            'description': {\n                'format': reason\n            }\n        }\n    }", "docstring": "Returns a status object if the given name is not visible.\n\nArgs:\nvalue: The value to check.  The actual value here is not important but the\nvalue's metadata (e.g. package and type) will be checked.\n\nReturns:\nNone if the value is visible.  A variable structure with an error status\nif the value should not be visible.", "source": "juraj-google-style"}
{"code": "def remove_service(self, service):\n        \n\n        url = self._url('/services/{0}', service)\n        resp = self._delete(url)\n        self._raise_for_status(resp)\n        return True", "docstring": "Stop and remove a service.\n\nArgs:\nservice (str): Service name or ID\n\nReturns:\n``True`` if successful.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def tables_get(self, table_name):\n    \n    url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)\n    return datalab.utils.Http.request(url, credentials=self._credentials)", "docstring": "Issues a request to retrieve information about a table.\n\nArgs:\ntable_name: a tuple representing the full name of the table.\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"}
{"code": "def _execute_command(self, key, *args):\n        \n        client = self.redis_clients[key.redis_shard_hash() % len(\n            self.redis_clients)]\n        return client.execute_command(*args)", "docstring": "Execute a Redis command on the appropriate Redis shard based on key.\n\nArgs:\nkey: The object ID or the task ID that the query is about.\nargs: The command to run.\n\nReturns:\nThe value returned by the Redis command.", "source": "juraj-google-style"}
{"code": "def sanity_check_states(states_spec):\n    \n    \n    states = copy.deepcopy(states_spec)\n\n    \n    is_unique = ('shape' in states)\n    if is_unique:\n        states = dict(state=states)\n\n    \n    for name, state in states.items():\n        \n        if isinstance(state['shape'], int):\n            state['shape'] = (state['shape'],)\n\n        \n        if 'type' not in state:\n            state['type'] = 'float'\n\n    return states, is_unique", "docstring": "Sanity checks a states dict, used to define the state space for an MDP.\nThrows an error or warns if mismatches are found.\n\nArgs:\nstates_spec (Union[None,dict]): The spec-dict to check (or None).\n\nReturns: Tuple of 1) the state space desc and 2) whether there is only one component in the state space.", "source": "juraj-google-style"}
{"code": "def inside_function() -> bool:\n    return get_default_graph().building_function", "docstring": "Indicates whether the caller code is executing inside a `tf.function`.\n\nReturns:\nBoolean, True if the caller code is executing inside a `tf.function`\nrather than eagerly.\n\nExample:\n\n>>> tf.inside_function()\nFalse\n>>> @tf.function\n... def f():\n...   print(tf.inside_function())\n>>> f()\nTrue", "source": "github-repos"}
{"code": "def key_exists(self, namespace, key):\n    return ((namespace in self.__data) and (key in self.__data[namespace]))", "docstring": "Checks a namespace for the existence of a specific key\n\nArgs:\nnamespace (str): Namespace to check in\nkey (str): Name of the key to check for\n\nReturns:\n`True` if key exists in the namespace, else `False`", "source": "codesearchnet"}
{"code": "def __init__(self, reactants, products):\n        \n        self._input_reactants = reactants\n        self._input_products = products\n        self._all_comp = reactants + products\n\n        els = set()\n        for c in self.all_comp:\n            els.update(c.elements)\n        els = sorted(els)\n\n        \n        \n        \n        \n        \n        \n        \n        \n\n        \n        rp_mat = np.array([[c[el] for el in els] for c in self._all_comp])\n        f_mat = np.concatenate([np.zeros((len(rp_mat), 1)), rp_mat], axis=1)\n        f_mat[len(reactants), 0] = 1  \n        b = np.zeros(len(els) + 1)\n        b[0] = 1\n        coeffs, res, _, s = np.linalg.lstsq(f_mat.T, b, rcond=None)\n\n        \n        \n        \n        \n        if sum(np.abs(s) > 1e-12) == len(f_mat):\n            if res.size > 0 and res[0] > self.TOLERANCE ** 2:\n                raise ReactionError(\"Reaction cannot be balanced.\")\n            else:\n                ok = True\n        else:\n            \n            ok = False\n            n_constr = len(rp_mat) - np.linalg.matrix_rank(rp_mat)\n            f_mat = np.concatenate([np.zeros((len(rp_mat), n_constr)),\n                                    rp_mat], axis=1)\n            b = np.zeros(f_mat.shape[1])\n            b[:n_constr] = 1\n\n            \n            for inds in itertools.combinations(range(len(reactants),\n                                                     len(f_mat)),\n                                               n_constr):\n                f_mat[:, :n_constr] = 0\n                for j, i in enumerate(inds):\n                    f_mat[i, j] = 1\n                \n                coeffs, res, _, s = np.linalg.lstsq(f_mat.T, b, rcond=None)\n                if sum(np.abs(s) > 1e-12) == len(self._all_comp) and \\\n                        (res.size == 0 or res[0] < self.TOLERANCE ** 2):\n                    ok = True\n                    break\n\n        if not ok:\n            r_mat = np.array([[c[el] for el in els] for c in reactants])\n            reactants_underdetermined = (\n                np.linalg.lstsq(r_mat.T, np.zeros(len(els)), rcond=None)[2]\n                != len(reactants))\n            if reactants_underdetermined:\n                raise ReactionError(\"Reaction cannot be balanced. \"\n                                    \"Reactants are underdetermined.\")\n            raise ReactionError(\"Reaction cannot be balanced. \"\n                                \"Unknown error, please report.\")\n\n        self._els = els\n        self._coeffs = coeffs", "docstring": "Reactants and products to be specified as list of\npymatgen.core.structure.Composition.  e.g., [comp1, comp2]\n\nArgs:\nreactants ([Composition]): List of reactants.\nproducts ([Composition]): List of products.", "source": "juraj-google-style"}
{"code": "def validate(self):\n    if (self.value is not None):\n        if (not isinstance(self.value, six.integer_types)):\n            raise TypeError('expected (one of): {0}, observed: {1}'.format(six.integer_types, type(self.value)))\n        elif (self.value > LongInteger.MAX):\n            raise ValueError('long integer value greater than accepted max')\n        elif (self.value < LongInteger.MIN):\n            raise ValueError('long integer value less than accepted min')", "docstring": "Verify that the value of the LongInteger is valid.\n\nRaises:\nTypeError: if the value is not of type int or long\nValueError: if the value cannot be represented by a signed 64-bit\ninteger", "source": "codesearchnet"}
{"code": "def run_coroutine(self, cor, *args, **kwargs):\n    if self.stopping:\n        raise LoopStoppingError(('Could not launch coroutine because loop is shutting down: %s' % cor))\n    self.start()\n    cor = _instaniate_coroutine(cor, args, kwargs)\n    if self.inside_loop():\n        raise InternalError('BackgroundEventLoop.run_coroutine called from inside event loop, would have deadlocked.')\n    future = self.launch_coroutine(cor)\n    return future.result()", "docstring": "Run a coroutine to completion and return its result.\n\nThis method may only be called outside of the event loop.\nAttempting to call it from inside the event loop would deadlock\nand will raise InternalError instead.\n\nArgs:\ncor (coroutine): The coroutine that we wish to run in the\nbackground and wait until it finishes.\n\nReturns:\nobject: Whatever the coroutine cor returns.", "source": "codesearchnet"}
{"code": "def _check_for_definition(iface, cls, tag, defines):\n    \n    attributes = (\n        attr\n        for attr in iface.__abstractmethods__\n        if hasattr(getattr(iface, attr), tag)\n    )\n    for attribute in attributes:\n\n        for node in cls.__mro__:\n\n            if hasattr(node, attribute) and defines(getattr(node, attribute)):\n\n                return True\n\n    try:\n\n        attribute\n        return False\n\n    except NameError:\n\n        \n        \n        return True", "docstring": "Check for a valid definition of a value.\n\nArgs:\niface (Iface): An Iface specification.\ncls (type): Some type to check for a definition.\ntag (str): The name of the tag attribute used to mark the abstract\nmethods.\ndefines (callable): A callable that accepts an attribute and returns\nTrue if the attribute is a valid definition.\n\nReturns:\nbool: Whether or not the definition is found.", "source": "juraj-google-style"}
{"code": "def setup(self, *args):\n        \n        self.setup_formatters(*args)\n        if self.columns:\n            self.print_header()\n        elif self.border and not self.csv:\n            self.print_line(self.make_horizontal_border())", "docstring": "Do preparations before printing the first row\n\nArgs:\n*args: first row cells", "source": "juraj-google-style"}
{"code": "def _AssignTimestamps(pcoll, timestamp: Union[str, dict[str, str]], language: Optional[str]=None):\n    timestamp_fn = _as_callable_for_pcoll(pcoll, timestamp, 'timestamp', language)\n    T = TypeVar('T')\n    return pcoll | beam.Map(lambda x: TimestampedValue(x, timestamp_fn(x))).with_input_types(T).with_output_types(T)", "docstring": "Assigns a new timestamp each element of its input.\n\nThis can be useful when reading records that have the timestamp embedded\nin them, for example with various file types or other sources that by default\nset all timestamps to the infinite past.\n\nNote that the timestamp should only be set forward, as setting it backwards\nmay not cause it to hold back an already advanced watermark and the data\ncould become droppably late.\n\nArgs:\ntimestamp: A field, callable, or expression giving the new timestamp.\nlanguage: The language of the timestamp expression.\nerror_handling: Whether and how to handle errors during timestamp\nevaluation.", "source": "github-repos"}
{"code": "def get_category(self, column):\n    result = pd.Series(index=column.index)\n    for (category, stats) in self.probability_map.items():\n        (start, end) = stats[0]\n        result[((start < column) & (column < end))] = category\n    return result", "docstring": "Returns categories for the specified numeric values\n\nArgs:\ncolumn(pandas.Series): Values to transform into categories\n\nReturns:\npandas.Series", "source": "codesearchnet"}
{"code": "def GetRpcServer(options):\n    rpc_server_class = HttpRpcServer\n\n    def GetUserCredentials():\n        'Prompts the user for a username and password.'\n        global global_status\n        st = global_status\n        global_status = None\n        email = options.email\n        if (email is None):\n            email = GetEmail(('Email (login for uploading to %s)' % options.server))\n        password = getpass.getpass(('Password for %s: ' % email))\n        global_status = st\n        return (email, password)\n    host = (options.host or options.server).lower()\n    if ((host == 'localhost') or host.startswith('localhost:')):\n        email = options.email\n        if (email is None):\n            email = 'test@example.com'\n            logging.info(('Using debug user %s.  Override with --email' % email))\n        server = rpc_server_class(options.server, (lambda : (email, 'password')), host_override=options.host, extra_headers={'Cookie': ('dev_appserver_login=\"%s:False\"' % email)}, save_cookies=options.save_cookies)\n        server.authenticated = True\n        return server\n    return rpc_server_class(options.server, GetUserCredentials, host_override=options.host, save_cookies=options.save_cookies)", "docstring": "Returns an instance of an AbstractRpcServer.\n\nReturns:\nA new AbstractRpcServer, on which RPC calls can be made.", "source": "codesearchnet"}
{"code": "def reduce_mat(mat, mag, r_matrix):\n        \n        max_j = abs(int(round(np.linalg.det(mat) / mag)))\n        reduced = False\n        for h in range(3):\n            k = h + 1 if h + 1 < 3 else abs(2 - h)\n            l = h + 2 if h + 2 < 3 else abs(1 - h)\n            j = np.arange(-max_j, max_j + 1)\n            for j1, j2 in itertools.product(j, repeat=2):\n                temp = mat[h] + j1 * mat[k] + j2 * mat[l]\n                if all([np.round(x, 5).is_integer() for x in list(temp / mag)]):\n                    mat_copy = mat.copy()\n                    mat_copy[h] = np.array([int(round(ele / mag)) for ele in temp])\n                    new_mat = np.dot(mat_copy, np.linalg.inv(r_matrix.T))\n                    if all([np.round(x, 5).is_integer() for x in list(np.ravel(new_mat))]):\n                        reduced = True\n                        mat[h] = np.array([int(round(ele / mag)) for ele in temp])\n                        break\n            if reduced:\n                break\n\n        if not reduced:\n            warnings.warn(\"Matrix reduction not performed, may lead to non-primitive gb cell.\")\n        return mat", "docstring": "Reduce integer array mat's determinant mag times by linear combination\nof its row vectors, so that the new array after rotation (r_matrix) is\nstill an integer array\n\nArgs:\nmat (3 by 3 array): input matrix\nmag (integer): reduce times for the determinant\nr_matrix (3 by 3 array): rotation matrix\nReturn:\nthe reduced integer array", "source": "juraj-google-style"}
{"code": "def get(self, catID, includeRelationships=False):\n        \n        url = '%(base_url)s/record/%(catID)s' % {\n            'base_url': self.base_url, 'catID': catID\n        }\n        r = self.gbdx_connection.get(url)\n        r.raise_for_status()\n        return r.json()", "docstring": "Retrieves the strip footprint WKT string given a cat ID.\n\nArgs:\ncatID (str): The source catalog ID from the platform catalog.\nincludeRelationships (bool): whether to include graph links to related objects.  Default False.\n\nReturns:\nrecord (dict): A dict object identical to the json representation of the catalog record", "source": "juraj-google-style"}
{"code": "def enable_logging(log_level):\n    root_logger = logging.getLogger()\n    root_logger.setLevel(logging.DEBUG)\n    logfile_handler = logging.StreamHandler(_LOGFILE_STREAM)\n    logfile_handler.setLevel(logging.DEBUG)\n    logfile_handler.setFormatter(logging.Formatter('%(levelname)s [%(asctime)s][%(name)s] %(message)s'))\n    root_logger.addHandler(logfile_handler)\n    if (signal.getsignal(signal.SIGTERM) == signal.SIG_DFL):\n        signal.signal(signal.SIGTERM, _logfile_sigterm_handler)\n    if log_level:\n        handler = logging.StreamHandler()\n        handler.setFormatter(_LogColorFormatter())\n        root_logger.setLevel(log_level)\n        root_logger.addHandler(handler)", "docstring": "Configure the root logger and a logfile handler.\n\nArgs:\nlog_level: The logging level to set the logger handler.", "source": "codesearchnet"}
{"code": "def ParseRecord(self, parser_mediator, key, structure):\n    \n    if key not in self._SUPPORTED_KEYS:\n      raise errors.ParseError(\n          'Unable to parse record, unknown structure: {0:s}'.format(key))\n\n    self._ParseLogLine(parser_mediator, key, structure)", "docstring": "Parses a log record structure and produces events.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nkey (str): name of the parsed structure.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.\n\nRaises:\nParseError: when the structure type is unknown.", "source": "juraj-google-style"}
{"code": "def section(self, regex, config='running_config'):\n        \n        if config in ['running_config', 'startup_config']:\n            config = getattr(self, config)\n        match = re.search(regex, config, re.M)\n        if not match:\n            raise TypeError('config section not found')\n        block_start, line_end = match.regs[0]\n\n        match = re.search(r'^[^\\s]', config[line_end:], re.M)\n        if not match:\n            raise TypeError('could not find end block')\n        _, block_end = match.regs[0]\n\n        block_end = line_end + block_end\n        return config[block_start:block_end]", "docstring": "Returns a section of the config\n\nArgs:\nregex (str): A valid regular expression used to select sections\nof configuration to return\nconfig (str): The configuration to return.  Valid values for config\nare \"running_config\" or \"startup_config\".  The default value\nis \"running_config\"\n\nReturns:\nThe configuration section as a string object.", "source": "juraj-google-style"}
{"code": "def get_string(self, sort_keys=False, pretty=False):\n        \n        keys = self.keys()\n        if sort_keys:\n            keys = sorted(keys)\n        lines = []\n        for k in keys:\n            if k == \"MAGMOM\" and isinstance(self[k], list):\n                value = []\n\n                if (isinstance(self[k][0], list) or isinstance(self[k][0], Magmom)) and \\\n                        (self.get(\"LSORBIT\") or self.get(\"LNONCOLLINEAR\")):\n                    value.append(\" \".join(str(i) for j in self[k] for i in j))\n                elif self.get(\"LSORBIT\") or self.get(\"LNONCOLLINEAR\"):\n                    for m, g in itertools.groupby(self[k]):\n                        value.append(\"3*{}*{}\".format(len(tuple(g)), m))\n                else:\n                    \n                    \n                    for m, g in itertools.groupby(self[k], lambda x: float(x)):\n                        value.append(\"{}*{}\".format(len(tuple(g)), m))\n\n                lines.append([k, \" \".join(value)])\n            elif isinstance(self[k], list):\n                lines.append([k, \" \".join([str(i) for i in self[k]])])\n            else:\n                lines.append([k, self[k]])\n\n        if pretty:\n            return str(tabulate([[l[0], \"=\", l[1]] for l in lines],\n                                tablefmt=\"plain\"))\n        else:\n            return str_delimited(lines, None, \" = \") + \"\\n\"", "docstring": "Returns a string representation of the INCAR.  The reason why this\nmethod is different from the __str__ method is to provide options for\npretty printing.\n\nArgs:\nsort_keys (bool): Set to True to sort the INCAR parameters\nalphabetically. Defaults to False.\npretty (bool): Set to True for pretty aligned output. Defaults\nto False.", "source": "juraj-google-style"}
{"code": "async def setup_swiss_points(self, match_win: float = None, match_tie: float = None, game_win: float = None, game_tie: float = None, bye: float = None):\n        \n        params = {}\n        if match_win is not None:\n            params['pts_for_match_win'] = match_win\n        if match_win is not None:\n            params['pts_for_match_tie'] = match_tie\n        if match_win is not None:\n            params['pts_for_game_win'] = game_win\n        if match_win is not None:\n            params['pts_for_game_tie'] = game_tie\n        if match_win is not None:\n            params['pts_for_bye'] = bye\n        assert_or_raise(len(params) > 0, ValueError, 'At least one of the points must be given')\n        await self.update(**params)", "docstring": "|methcoro|\n\nArgs:\nmatch_win\nmatch_tie\ngame_win\ngame_tie\nbye\n\nRaises:\nAPIException", "source": "juraj-google-style"}
{"code": "def __init__(self, custom_op_registerers=None, **kwargs):\n    self._custom_op_registerers = custom_op_registerers or []\n    super(InterpreterWithCustomOps, self).__init__(**kwargs)", "docstring": "Constructor.\n\nArgs:\ncustom_op_registerers: List of str (symbol names) or functions that take a\npointer to a MutableOpResolver and register a custom op. When passing\nfunctions, use a pybind function that takes a uintptr_t that can be\nrecast as a pointer to a MutableOpResolver.\n**kwargs: Additional arguments passed to Interpreter.\n\nRaises:\nValueError: If the interpreter was unable to create.", "source": "github-repos"}
{"code": "def signature(self, name, file_name, file_type, file_text, **kwargs):\n    group_obj = Signature(name, file_name, file_type, file_text, **kwargs)\n    return self._group(group_obj)", "docstring": "Add Signature data to Batch object.\n\nValid file_types:\n+ Snort ®\n+ Suricata\n+ YARA\n+ ClamAV ®\n+ OpenIOC\n+ CybOX ™\n+ Bro\n+ Regex\n+ SPL - Splunk ® Search Processing Language\n\nArgs:\nname (str): The name for this Group.\nfile_name (str): The name for the attached signature for this Group.\nfile_type (str): The signature type for this Group.\nfile_text (str): The signature content for this Group.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nxid (str, kwargs): The external id for this Group.\n\nReturns:\nobj: An instance of Signature.", "source": "codesearchnet"}
{"code": "def get_dofn_specs(dofn: 'DoFn') -> tuple[set[StateSpec], set[TimerSpec]]:\n    from apache_beam.runners.common import MethodWrapper\n    from apache_beam.transforms.core import _DoFnParam\n    from apache_beam.transforms.core import _StateDoFnParam\n    from apache_beam.transforms.core import _TimerDoFnParam\n    all_state_specs = set()\n    all_timer_specs = set()\n    for method_name in dir(dofn):\n        if not isinstance(getattr(dofn, method_name, None), types.MethodType):\n            continue\n        method = MethodWrapper(dofn, method_name)\n        param_ids = [d.param_id for d in method.defaults if isinstance(d, _DoFnParam)]\n        if len(param_ids) != len(set(param_ids)):\n            raise ValueError('DoFn %r has duplicate %s method parameters: %s.' % (dofn, method_name, param_ids))\n        for d in method.defaults:\n            if isinstance(d, _StateDoFnParam):\n                all_state_specs.add(d.state_spec)\n            elif isinstance(d, _TimerDoFnParam):\n                all_timer_specs.add(d.timer_spec)\n    return (all_state_specs, all_timer_specs)", "docstring": "Gets the state and timer specs for a DoFn, if any.\n\nArgs:\ndofn (apache_beam.transforms.core.DoFn): The DoFn instance to introspect for\ntimer and state specs.", "source": "github-repos"}
{"code": "def update(self, *names: str) -> 'ListTree':\n        \n        for name in names:\n            parts = name.split(self._delimiter)\n            self._root.add(*parts)\n        return self", "docstring": "Add all the mailbox names to the tree, filling in any missing nodes.\n\nArgs:\nnames: The names of the mailboxes.", "source": "juraj-google-style"}
{"code": "def _create_variable(self, next_creator, **kwargs):\n    if kwargs.pop('per_worker_variable', False):\n        logging.info('Creating per worker variable')\n        return self._create_per_worker_variable(next_creator, **kwargs)\n    var_creator = self._create_var_creator(next_creator, **kwargs)\n    if 'colocate_with' in kwargs:\n        colocate_with = kwargs['colocate_with']\n        with ops.device(None):\n            with ops.colocate_with(colocate_with):\n                var = var_creator(**kwargs)\n                logging.debug('Creating variable (name:%s, shape:%r) that colocates with %s', var.name, var.shape, kwargs['colocate_with'].name)\n                return var\n    if self._variable_partitioner is None:\n        return self._create_variable_round_robin(var_creator, **kwargs)\n    name = kwargs.get('name', None)\n    dtype = kwargs.get('dtype', None)\n    shape = kwargs.get('shape', None)\n    initial_value = kwargs.get('initial_value', None)\n    if initial_value is None:\n        v = next_creator(**kwargs)\n        if not isinstance(v, resource_variable_ops.UninitializedVariable):\n            raise ValueError('It looks like you are using `ParameterServerStrategy` with a `variable_partitioner`, and trying to create a variable without specifying `initial_value`. This is not allowed. Please specify the `initial_value`.')\n        elif shape is None or dtype is None:\n            raise ValueError('It looks like you are trying to load a `SavedModel` using `tf.saved_model.load` within a `ParameterServerStrategy` scope, but the `SavedModel` is missing shape or dtype information.')\n        else:\n\n            def initializer(shape, dtype, **kwargs):\n                if 'partition_shape' in kwargs:\n                    shape = kwargs['partition_shape']\n                return array_ops.zeros(shape, dtype)\n            initial_value = functools.partial(initializer, shape=shape, dtype=dtype)\n    init_from_fn = callable(initial_value)\n    if init_from_fn and (shape is None or dtype is None):\n        init_from_fn = False\n        initial_value = initial_value()\n    if not init_from_fn:\n        initial_value = ops.convert_to_tensor(initial_value, dtype=dtype)\n        dtype = initial_value.dtype\n        shape = initial_value.shape\n    else:\n        shape = tensor_shape.as_shape(shape)\n    if shape.rank == 0:\n        return self._create_variable_round_robin(var_creator, **kwargs)\n    num_partitions = self._variable_partitioner(shape=shape, dtype=dtype)\n    if not num_partitions or num_partitions[0] == 0 or any((v != 1 for v in num_partitions[1:])):\n        raise ValueError('variable_partitioner must return a list/tuple whose elements are 1 besides the first element (non-zero), got: %r' % num_partitions)\n    if num_partitions[0] == 1:\n        return self._create_variable_round_robin(var_creator, **kwargs)\n    num_partitions = min(num_partitions[0], shape[0])\n    base = shape[0] \n    extra = shape[0] % num_partitions\n    offsets = []\n    for i in range(num_partitions):\n        if i == 0:\n            offsets.append(0)\n        else:\n            prev_shard_size = base + (1 if i - 1 < extra else 0)\n            offsets.append(offsets[i - 1] + prev_shard_size)\n    offsets.append(shape[0])\n\n    def init_shard_fn(shard_index):\n        if not init_from_fn:\n            logging.log_if(logging.WARN, _INEFFICIENT_INIT_WARNING % name, shard_index == 0 and shape.num_elements() > _LARGE_VARIABLE_NUM_ELEMENTS)\n            return initial_value[offsets[shard_index]:offsets[shard_index + 1]]\n        partition_shape = (offsets[shard_index + 1] - offsets[shard_index],) + shape[1:]\n        partition_offset = (offsets[shard_index],) + (0,) * len(shape[1:])\n        arg_spec = tf_inspect.getfullargspec(initial_value)\n        if 'shard_info' not in arg_spec.args and 'shard_info' not in arg_spec.kwonlyargs:\n            try:\n                value = initial_value(partition_shape=partition_shape, partition_offset=partition_offset)\n            except (TypeError, ValueError):\n                value = initial_value()\n            if value.shape == partition_shape:\n                return value\n            else:\n                logging.log_if(logging.WARN, _INEFFICIENT_INIT_WARNING % name, shard_index == 0 and shape.num_elements() > _LARGE_VARIABLE_NUM_ELEMENTS)\n                return value[offsets[shard_index]:offsets[shard_index + 1]]\n        else:\n            return initial_value(shard_info=trackable.ShardInfo(shape=tensor_shape.as_shape(partition_shape), offset=partition_offset))\n    var_list = []\n    for i in range(num_partitions):\n        kwargs['shape'] = (offsets[i + 1] - offsets[i],) + shape[1:]\n        kwargs['initial_value'] = lambda: init_shard_fn(i)\n        if name is not None:\n            kwargs['name'] = '{}/part_{}'.format(name, i)\n        var_list.append(self._create_variable_round_robin(var_creator, **kwargs))\n    result = sharded_variable.ShardedVariable(var_list)\n    return result", "docstring": "Implements StrategyExtendedV2._create_variable.\n\nCreates a `Variable` or a `ShardedVariable`. A `ShardedVariable` will be\ncreated if satisfying all the following criteria:\n1. `self._variable_partitioner` results in more than one partition on the\nfirst axis.\n2. variable's rank is greater than 0.\n3. variable is not colocated with another variable.\nOtherwise a `Variable` will be created.\n\nArgs:\nnext_creator: See `variable_scope.variable_creator_scope`; the next\ncreator in the chain.\n**kwargs: Passed through to the next creator.\n\nReturns:\nA `Variable` or `ShardedVariable`.", "source": "github-repos"}
{"code": "def _StartMonitoringProcess(self, process):\n    if (process is None):\n        raise ValueError('Missing process.')\n    pid = process.pid\n    if (pid in self._process_information_per_pid):\n        raise KeyError('Already monitoring process (PID: {0:d}).'.format(pid))\n    if (pid in self._rpc_clients_per_pid):\n        raise KeyError('RPC client (PID: {0:d}) already exists'.format(pid))\n    rpc_client = plaso_xmlrpc.XMLProcessStatusRPCClient()\n    rpc_port = process.rpc_port.value\n    time_waited_for_process = 0.0\n    while (not rpc_port):\n        time.sleep(0.1)\n        rpc_port = process.rpc_port.value\n        time_waited_for_process += 0.1\n        if (time_waited_for_process >= self._RPC_SERVER_TIMEOUT):\n            raise IOError('RPC client unable to determine server (PID: {0:d}) port.'.format(pid))\n    hostname = 'localhost'\n    if (not rpc_client.Open(hostname, rpc_port)):\n        raise IOError('RPC client unable to connect to server (PID: {0:d}) http:\n    self._rpc_clients_per_pid[pid] = rpc_client\n    self._process_information_per_pid[pid] = process_info.ProcessInfo(pid)", "docstring": "Starts monitoring a process.\n\nArgs:\nprocess (MultiProcessBaseProcess): process.\n\nRaises:\nIOError: if the RPC client cannot connect to the server.\nKeyError: if the process is not registered with the engine or\nif the process is already being monitored.\nOSError: if the RPC client cannot connect to the server.\nValueError: if the process is missing.", "source": "codesearchnet"}
{"code": "def extract_anomalies(y_true, smoothed_errors, window_size, batch_size, error_buffer):\n    \n    if len(y_true) <= batch_size * window_size:\n        raise ValueError(\"Window size (%s) larger than y_true (len=%s).\"\n                         % (batch_size, len(y_true)))\n\n    num_windows = int((len(y_true) - (batch_size * window_size)) / batch_size)\n\n    anomalies_indices = []\n\n    for i in range(num_windows + 1):\n        prev_index = i * batch_size\n        curr_index = (window_size * batch_size) + (i * batch_size)\n\n        if i == num_windows + 1:\n            curr_index = len(y_true)\n\n        window_smoothed_errors = smoothed_errors[prev_index:curr_index]\n        window_y_true = y_true[prev_index:curr_index]\n\n        epsilon, sd_threshold = compute_threshold(window_smoothed_errors, error_buffer)\n\n        window_anom_indices = get_anomalies(\n            window_smoothed_errors,\n            window_y_true,\n            sd_threshold,\n            i,\n            anomalies_indices,\n            error_buffer\n        )\n\n        \n        \n        \n\n        \n        mu = np.mean(window_smoothed_errors)\n        smoothed_errors_inv = [mu + (mu - e) for e in window_smoothed_errors]\n        epsilon_inv, sd_inv = compute_threshold(smoothed_errors_inv, error_buffer)\n        inv_anom_indices = get_anomalies(\n            smoothed_errors_inv,\n            window_y_true,\n            sd_inv,\n            i,\n            anomalies_indices,\n            len(y_true)\n        )\n\n        anomalies_indices = list(set(anomalies_indices + inv_anom_indices))\n\n        anomalies_indices.extend([i_a + i * batch_size for i_a in window_anom_indices])\n\n    \n    anomalies_indices = sorted(list(set(anomalies_indices)))\n    anomalies_groups = [list(group) for group in mit.consecutive_groups(anomalies_indices)]\n    anomaly_sequences = [(g[0], g[-1]) for g in anomalies_groups if not g[0] == g[-1]]\n\n    \n    anomalies_scores = []\n    for e_seq in anomaly_sequences:\n        denominator = np.mean(smoothed_errors) + np.std(smoothed_errors)\n        score = max([\n            abs(smoothed_errors[x] - epsilon) / denominator\n            for x in range(e_seq[0], e_seq[1])\n        ])\n\n        anomalies_scores.append(score)\n\n    return anomaly_sequences, anomalies_scores", "docstring": "Extracts anomalies from the errors.\nArgs:\ny_true ():\nsmoothed_errors ():\nwindow_size (int):\nbatch_size (int):\nerror_buffer (int):\nReturns:", "source": "juraj-google-style"}
{"code": "def call(self, command, *args):\n        \n        return self.rpc.call(str(command), *args)", "docstring": "Passes an arbitrary command to the coin daemon.\n\nArgs:\ncommand (str): command to be sent to the coin daemon", "source": "juraj-google-style"}
{"code": "def random_int_generator(maxrange):\n    try:\n        return random.randint(0, maxrange)\n    except:\n        (line, filename, synerror) = trace()\n        raise ArcRestHelperError({'function': 'random_int_generator', 'line': line, 'filename': filename, 'synerror': synerror})\n    finally:\n        pass", "docstring": "Generates a random integer from 0 to `maxrange`, inclusive.\n\nArgs:\nmaxrange (int): The upper range of integers to randomly choose.\n\nReturns:\nint: The randomly generated integer from :py:func:`random.randint`.\n\nExamples:\n>>> arcresthelper.common.random_int_generator(15)\n9", "source": "codesearchnet"}
{"code": "def plot_zt_dop(self, temps='all', output='average', relaxation_time=1e-14):\n    import matplotlib.pyplot as plt\n    if (output == 'average'):\n        zt = self._bz.get_zt(relaxation_time=relaxation_time, output='average')\n    elif (output == 'eigs'):\n        zt = self._bz.get_zt(relaxation_time=relaxation_time, output='eigs')\n    tlist = (sorted(zt['n'].keys()) if (temps == 'all') else temps)\n    plt.figure(figsize=(22, 14))\n    for (i, dt) in enumerate(['n', 'p']):\n        plt.subplot((121 + i))\n        for temp in tlist:\n            if (output == 'eigs'):\n                for xyz in range(3):\n                    plt.semilogx(self._bz.doping[dt], zip(*zt[dt][temp])[xyz], marker='s', label=(((str(xyz) + ' ') + str(temp)) + ' K'))\n            elif (output == 'average'):\n                plt.semilogx(self._bz.doping[dt], zt[dt][temp], marker='s', label=(str(temp) + ' K'))\n        plt.title((dt + '-type'), fontsize=20)\n        if (i == 0):\n            plt.ylabel('zT', fontsize=30.0)\n        plt.xlabel('Doping concentration ($cm^{-3}$)', fontsize=30.0)\n        p = ('lower right' if (i == 0) else '')\n        plt.legend(loc=p, fontsize=15)\n        plt.grid()\n        plt.xticks(fontsize=25)\n        plt.yticks(fontsize=25)\n    plt.tight_layout()\n    return plt", "docstring": "Plot the figure of merit zT in function of doping levels for different\ntemperatures.\n\nArgs:\ntemps: the default 'all' plots all the temperatures in the analyzer.\nSpecify a list of temperatures if you want to plot only some.\noutput: with 'average' you get an average of the three directions\nwith 'eigs' you get all the three directions.\nrelaxation_time: specify a constant relaxation time value\n\nReturns:\na matplotlib object", "source": "codesearchnet"}
{"code": "def handle_unsubscribe(self, request, path):\n        \n        \n        ret = []\n        if path:\n            \n            name = path[0]\n            child = self.children[name]\n            ret += child.handle_unsubscribe(request, path[1:])\n            if not child.children and not child.update_requests \\\n                    and not child.delta_requests:\n                del self.children[name]\n        else:\n            \n            if request in self.update_requests:\n                self.update_requests.remove(request)\n            else:\n                self.delta_requests.remove(request)\n            ret.append(request.return_response())\n        return ret", "docstring": "Remove from the notifier list and send a return\n\nArgs:\nrequest (Subscribe): The original subscribe request\npath (list): The relative path from ourself\n\nReturns:\nlist: [(callback, Response)] that need to be called", "source": "juraj-google-style"}
{"code": "def query_dict_to_string(query):\n        \n\n        query_params = []\n\n        for key, value in query.items():\n            query_params.append(key + \"=\" + value)\n\n        return \"&\".join(query_params)", "docstring": "Convert an OrderedDict to a query string.\n\nArgs:\nquery (obj): The key value object with query params.\n\nReturns:\nstr: The query string.\n\nNote:\nThis method does the same as urllib.parse.urlencode except\nthat it doesn't actually encode the values.", "source": "juraj-google-style"}
{"code": "def from_lasio_curve(cls, curve, depth=None, basis=None, start=None, stop=None, step=0.1524, run=(- 1), null=(- 999.25), service_company=None, date=None):\n    data = curve.data\n    unit = curve.unit\n    if (depth is not None):\n        d = np.diff(depth)\n        if (not np.allclose((d - np.mean(d)), np.zeros_like(d))):\n            m = 'Irregular sampling in depth is not supported. '\n            m += 'Interpolating to regular basis.'\n            warnings.warn(m)\n            step = np.nanmedian(d)\n            (start, stop) = (depth[0], (depth[(- 1)] + 1e-05))\n            basis = np.arange(start, stop, step)\n            data = np.interp(basis, depth, data)\n        else:\n            step = np.nanmedian(d)\n            start = depth[0]\n    if (start is None):\n        if (basis is not None):\n            start = basis[0]\n            step = (basis[1] - basis[0])\n        else:\n            raise CurveError('You must provide a basis or a start depth.')\n    if (step == 0):\n        if (stop is None):\n            raise CurveError('You must provide a step or a stop depth.')\n        else:\n            step = ((stop - start) / (curve.data.shape[0] - 1))\n    params = {}\n    params['mnemonic'] = curve.mnemonic\n    params['description'] = curve.descr\n    params['start'] = start\n    params['step'] = step\n    params['units'] = unit\n    params['run'] = run\n    params['null'] = null\n    params['service_company'] = service_company\n    params['date'] = date\n    params['code'] = curve.API_code\n    return cls(data, params=params)", "docstring": "Makes a curve object from a lasio curve object and either a depth\nbasis or start and step information.\n\nArgs:\ncurve (ndarray)\ndepth (ndarray)\nbasis (ndarray)\nstart (float)\nstop (float)\nstep (float): default: 0.1524\nrun (int): default: -1\nnull (float): default: -999.25\nservice_company (str): Optional.\ndata (str): Optional.\n\nReturns:\nCurve. An instance of the class.", "source": "codesearchnet"}
{"code": "def value_to_pytd_type(self, node, v, seen, view):\n    if isinstance(v, (abstract.Empty, typing_overlay.Never)):\n        return pytd.NothingType()\n    elif isinstance(v, abstract.TYPE_VARIABLE_INSTANCES):\n        return self._type_variable_to_pytd_type(node, v, seen, view)\n    elif isinstance(v, (typing_overlay.TypeVar, typing_overlay.ParamSpec)):\n        return pytd.NamedType('builtins.type')\n    elif isinstance(v, dataclass_overlay.FieldInstance):\n        if not v.default:\n            return pytd.AnythingType()\n        return pytd_utils.JoinTypes((self.value_to_pytd_type(node, d, seen, view) for d in v.default.data))\n    elif isinstance(v, attr_overlay.AttribInstance):\n        ret = self.value_to_pytd_type(node, v.typ, seen, view)\n        md = metadata.to_pytd(v.to_metadata())\n        return pytd.Annotated(ret, (\"'pytype_metadata'\", md))\n    elif isinstance(v, special_builtins.PropertyInstance):\n        return pytd.NamedType('builtins.property')\n    elif isinstance(v, typed_dict.TypedDict):\n        return pytd.NamedType(v.props.name)\n    elif isinstance(v, abstract.FUNCTION_TYPES):\n        try:\n            signatures = function.get_signatures(v)\n        except NotImplementedError:\n            return pytd.NamedType('typing.Callable')\n        if len(signatures) == 1:\n            val = self.signature_to_callable(signatures[0])\n            if not isinstance(v, abstract.PYTD_FUNCTION_TYPES) or not val.formal:\n                return self.value_instance_to_pytd_type(node, val, None, seen, view)\n        return pytd.NamedType('typing.Callable')\n    elif isinstance(v, (abstract.ClassMethod, abstract.StaticMethod)):\n        return self.value_to_pytd_type(node, v.method, seen, view)\n    elif isinstance(v, (special_builtins.IsInstance, special_builtins.ClassMethodCallable)):\n        return pytd.NamedType('typing.Callable')\n    elif isinstance(v, abstract.Class):\n        param = self.value_instance_to_pytd_type(node, v, None, seen, view)\n        return pytd.GenericType(base_type=pytd.NamedType('builtins.type'), parameters=(param,))\n    elif isinstance(v, abstract.Module):\n        return pytd.Alias(v.name, pytd.Module(v.name, module_name=v.full_name))\n    elif self._output_mode >= Converter.OutputMode.LITERAL and isinstance(v, abstract.ConcreteValue) and isinstance(v.pyval, (int, str, bytes)):\n        return pytd.Literal(repr(v.pyval))\n    elif isinstance(v, abstract.SimpleValue):\n        ret = self.value_instance_to_pytd_type(node, v.cls, v, seen=seen, view=view)\n        ret.Visit(visitors.FillInLocalPointers({'builtins': self.ctx.loader.builtins}))\n        return ret\n    elif isinstance(v, abstract.Union):\n        return pytd_utils.JoinTypes((self.value_to_pytd_type(node, o, seen, view) for o in v.options))\n    elif isinstance(v, special_builtins.SuperInstance):\n        return pytd.NamedType('builtins.super')\n    elif isinstance(v, abstract.TypeParameter):\n        if self._detailed:\n            return pytd.NamedType('typing.TypeVar')\n        else:\n            return pytd.AnythingType()\n    elif isinstance(v, abstract.ParamSpec):\n        if self._detailed:\n            return pytd.NamedType('typing.ParamSpec')\n        else:\n            return pytd.AnythingType()\n    elif isinstance(v, abstract.Unsolvable):\n        return pytd.AnythingType()\n    elif isinstance(v, abstract.Unknown):\n        return pytd.NamedType(v.class_name)\n    elif isinstance(v, abstract.BuildClass):\n        return pytd.NamedType('typing.Callable')\n    elif isinstance(v, abstract.FinalAnnotation):\n        param = self.value_to_pytd_type(node, v.annotation, seen, view)\n        return pytd.GenericType(base_type=pytd.NamedType('typing.Final'), parameters=(param,))\n    elif isinstance(v, abstract.SequenceLength):\n        return pytd.Annotated(base_type=pytd.NamedType('SequenceLength'), annotations=(str(v.length), str(v.splat)))\n    elif isinstance(v, abstract.Concatenate):\n        return pytd.NamedType('typing.Concatenate')\n    elif isinstance(v, function.ParamSpecMatch):\n        return pytd.AnythingType()\n    elif isinstance(v, abstract.ParamSpecArgs):\n        return pytd.AnythingType()\n    else:\n        raise NotImplementedError(v.__class__.__name__)", "docstring": "Get a PyTD type representing this object, as seen at a node.\n\nArgs:\nnode: The node from which we want to observe this object.\nv: The object.\nseen: The set of values seen before while computing the type.\nview: A Variable -> binding map.\n\nReturns:\nA PyTD type.", "source": "github-repos"}
{"code": "def _unverified_decode(token):\n    \n    token = _helpers.to_bytes(token)\n\n    if token.count(b'.') != 2:\n        raise ValueError(\n            'Wrong number of segments in token: {0}'.format(token))\n\n    encoded_header, encoded_payload, signature = token.split(b'.')\n    signed_section = encoded_header + b'.' + encoded_payload\n    signature = _helpers.padded_urlsafe_b64decode(signature)\n\n    \n    header = _decode_jwt_segment(encoded_header)\n    payload = _decode_jwt_segment(encoded_payload)\n\n    return header, payload, signed_section, signature", "docstring": "Decodes a token and does no verification.\n\nArgs:\ntoken (Union[str, bytes]): The encoded JWT.\n\nReturns:\nTuple[str, str, str, str]: header, payload, signed_section, and\nsignature.\n\nRaises:\nValueError: if there are an incorrect amount of segments in the token.", "source": "juraj-google-style"}
{"code": "def find_container_traits(cls_or_string):\n    if utils.is_str(cls_or_string):\n        if (not templates.is_instantiation(cls_or_string)):\n            return None\n        name = templates.name(cls_or_string)\n        if name.startswith('std::'):\n            name = name[len('std::'):]\n        if name.startswith('std::tr1::'):\n            name = name[len('std::tr1::'):]\n        for cls_traits in all_container_traits:\n            if (cls_traits.name() == name):\n                return cls_traits\n    else:\n        if isinstance(cls_or_string, class_declaration.class_types):\n            if (cls_or_string.cache.container_traits is not None):\n                return cls_or_string.cache.container_traits\n        for cls_traits in all_container_traits:\n            if cls_traits.is_my_case(cls_or_string):\n                if isinstance(cls_or_string, class_declaration.class_types):\n                    cls_or_string.cache.container_traits = cls_traits\n                return cls_traits", "docstring": "Find the container traits type of a declaration.\n\nArgs:\ncls_or_string (str | declarations.declaration_t): a string\n\nReturns:\ndeclarations.container_traits: a container traits", "source": "codesearchnet"}
{"code": "def parse_vasprun( self ):\n                    \n        self.vasprun_filename = match_filename( 'vasprun.xml' )\n        if not self.vasprun_filename:\n            raise FileNotFoundError( 'Could not find vasprun.xml or vasprun.xml.gz file' )\n        try:\n            self.vasprun = Vasprun( self.vasprun_filename, parse_potcar_file=False )\n        except ET.ParseError:\n            self.vasprun = None\n        except:\n            raise", "docstring": "Read in `vasprun.xml` as a pymatgen Vasprun object.\n\nArgs:\nNone\n\nReturns:\nNone\n\nNone:\nIf the vasprun.xml is not well formed this method will catch the ParseError\nand set self.vasprun = None.", "source": "juraj-google-style"}
{"code": "def normalize_digits_only(number, keep_non_digits=False):\n    \n    number = unicod(number)\n    number_length = len(number)\n    normalized_digits = U_EMPTY_STRING\n    for ii in range(number_length):\n        d = unicode_digit(number[ii], -1)\n        if d != -1:\n            normalized_digits += unicod(d)\n        elif keep_non_digits:\n            normalized_digits += number[ii]\n    return normalized_digits", "docstring": "Normalizes a string of characters representing a phone number.\n\nThis converts wide-ascii and arabic-indic numerals to European numerals,\nand strips punctuation and alpha characters (optional).\n\nArguments:\nnumber -- a string representing a phone number\nkeep_non_digits -- whether to keep non-digits\n\nReturns the normalized string version of the phone number.", "source": "juraj-google-style"}
{"code": "def sparse_intersection_indices_and_values(x1, x2):\n    ones1 = tf.sparse.map_values(ones_like_int8, x1)\n    ones2 = tf.sparse.map_values(ones_like_int8, x2)\n    intersection_extra_dim = tf.sets.intersection(tf.sparse.expand_dims(ones1, axis=-1), tf.sparse.expand_dims(ones2, axis=-1))\n\n    def empty_intersection():\n        return (tf.zeros((0, x1.shape.rank), dtype=tf.int64), tf.zeros((0,), dtype=x1.values.dtype), tf.zeros((0,), dtype=x2.values.dtype))\n\n    def non_empty_intersection():\n        intersection = tf.sparse.reshape(intersection_extra_dim, x1.dense_shape)\n        zeros1 = tf.sparse.map_values(zeros_like_int8, x1)\n        zeros2 = tf.sparse.map_values(zeros_like_int8, x2)\n        mask1 = tf.sparse.add(zeros1, intersection)\n        mask2 = tf.sparse.add(zeros2, intersection)\n        return (intersection.indices, tf.sparse.retain(x1, tf.cast(mask1.values, tf.bool)).values, tf.sparse.retain(x2, tf.cast(mask2.values, tf.bool)).values)\n    return tf.cond(tf.equal(tf.size(intersection_extra_dim), 0), empty_intersection, non_empty_intersection)", "docstring": "Compute the indices for the intersection of two `tf.SparseTensor`s and\nmodify the values for these indices.\n\nArgs:\nx1: the first `tf.SparseTensor`.\nx2: the second `tf.SparseTensor`.\nReturns: A tuple containing:\n- the indices for the intersection\n- `x1` values for the intersection indices (some values were removed)\n- `x2` values for the intersection indices (some values were removed)", "source": "github-repos"}
{"code": "def parse(self, filename):\n    with io.open(filename, 'r', encoding='utf-8') as _:\n        lines = _.readlines()\n    all_source_files = set()\n    source_map = {}\n    lineno = 0\n    root = None\n    index = None\n    cur_level = (- 1)\n    parent_queue = []\n    for line in lines:\n        try:\n            (level, line) = dedent(line)\n            if line.startswith('\n                lineno += 1\n                continue\n            elif line.startswith('\\\\\n                line = line[1:]\n        except IndentError as exc:\n            error('bad-indent', 'Invalid indentation', filename=filename, lineno=lineno, column=exc.column)\n        if (not line):\n            lineno += 1\n            continue\n        source_file = dequote(line)\n        if (not source_file):\n            lineno += 1\n            continue\n        if (source_file in all_source_files):\n            error('sitemap-duplicate', 'Filename listed twice', filename=filename, lineno=lineno, column=((level * 8) + 1))\n        all_source_files.add(source_file)\n        source_map[source_file] = (lineno, ((level * 8) + 1))\n        page = OrderedDict()\n        if ((root is not None) and (level == 0)):\n            error('sitemap-error', 'Sitemaps only support one root', filename=filename, lineno=lineno, column=0)\n        if (root is None):\n            root = page\n            index = source_file\n        else:\n            lvl_diff = (cur_level - level)\n            while (lvl_diff >= 0):\n                parent_queue.pop()\n                lvl_diff -= 1\n            parent_queue[(- 1)][source_file] = page\n        parent_queue.append(page)\n        cur_level = level\n        lineno += 1\n    return Sitemap(root, filename, index, source_map)", "docstring": "Parse a sitemap file.\n\nArgs:\nfilename: str, the path to the sitemap file.\n\nReturns:\nSitemap: the generated sitemap.", "source": "codesearchnet"}
{"code": "def port_create_gre(br, port, id, remote):\n    \n    if not 0 <= id < 2**32:\n        return False\n    elif not __salt__['dig.check_ip'](remote):\n        return False\n    elif not bridge_exists(br):\n        return False\n    elif port in port_list(br):\n        cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)\n        result = __salt__['cmd.run_all'](cmd)\n        return _retcode_to_bool(result['retcode'])\n    else:\n        cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \\\n              'options:key={3}'.format(br, port, remote, id)\n        result = __salt__['cmd.run_all'](cmd)\n        return _retcode_to_bool(result['retcode'])", "docstring": "Generic Routing Encapsulation - creates GRE tunnel between endpoints.\n\nArgs:\nbr: A string - bridge name.\nport: A string - port name.\nid: An integer - unsigned 32-bit number, tunnel's key.\nremote: A string - remote endpoint's IP address.\n\nReturns:\nTrue on success, else False.\n\n.. versionadded:: 2016.3.0\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10", "source": "juraj-google-style"}
{"code": "def from_proto(context_def, import_scope=None):\n    ret = WhileContext(context_def=context_def, import_scope=import_scope)\n    ret.Enter()\n    for nested_def in context_def.nested_contexts:\n        from_control_flow_context_def(nested_def, import_scope=import_scope)\n    ret.Exit()\n    return ret", "docstring": "Returns a `WhileContext` object created from `context_def`.\n\nArgs:\ncontext_def: A `WhileContextDef` protocol buffer.\nimport_scope: Optional `string`. Name scope to add.\n\nReturns:\nA `WhileContext` Python object.", "source": "github-repos"}
{"code": "def _get_parameter_conversion_entry(parameter_config):\n    entry = _PARAM_CONVERSION_MAP.get(parameter_config.get('type'))\n    if ((entry is None) and ('enum' in parameter_config)):\n        entry = _PARAM_CONVERSION_MAP['enum']\n    return entry", "docstring": "Get information needed to convert the given parameter to its API type.\n\nArgs:\nparameter_config: The dictionary containing information specific to the\nparameter in question. This is retrieved from request.parameters in the\nmethod config.\n\nReturns:\nThe entry from _PARAM_CONVERSION_MAP with functions/information needed to\nvalidate and convert the given parameter from a string to the type expected\nby the API.", "source": "codesearchnet"}
{"code": "def _execute_primitives(self, commands):\n    for p in commands:\n        if (self._scanchain and self._scanchain._debug):\n            print('  Executing', p)\n        p.execute(self)", "docstring": "Run a list of executable primitives on this controller, and distribute the returned data to the associated TDOPromises.\n\nArgs:\ncommands: A list of Executable Primitives to be run in order.", "source": "codesearchnet"}
{"code": "def _write_log(self, version_key, meta_data, index_fields):\n        \n        meta_data = meta_data or {}\n        meta_data.update({\n            'version_key': version_key,\n            'timestamp': time.time(),\n        })\n        obj = log_bucket.new(data=meta_data)\n        obj.add_index('version_key_bin', version_key)\n        obj.add_index('timestamp_int', int(meta_data['timestamp']))\n        for field, index_type in index_fields:\n            obj.add_index('%s_%s' % (field, index_type), meta_data.get(field, \"\"))\n        obj.store()", "docstring": "Creates a log entry for current object,\nArgs:\nversion_key(str): Version_bucket key from _write_version().\nmeta_data (dict): JSON serializable meta data for logging of save operation.\n{'lorem': 'ipsum', 'dolar': 5}\nindex_fields (list): Tuple list for secondary indexing keys in riak (with 'bin' or 'int').\n[('lorem','bin'),('dolar','int')]\n\nReturns:", "source": "juraj-google-style"}
{"code": "def add_document(self, key, url, **kwargs):\n        \n        document = self._check_metadata_for_file(key=key, url=url, **kwargs)\n\n        for dict_key in (\n            'description',\n            'fulltext',\n            'hidden',\n            'material',\n            'original_url',\n            'url',\n            'filename',\n        ):\n            if kwargs.get(dict_key):\n                document[dict_key] = kwargs[dict_key]\n\n        if key_already_there(document, self.record.get('documents', ())):\n            raise ValueError(\n                'There\\'s already a document with the key %s.'\n                % document['key']\n            )\n\n        self._append_to('documents', document)", "docstring": "Adds document to record\nArgs:\nkey (string): document key\nurl (string): document url\nKeyword Args:\ndescription (string): simple description\nfulltext (bool): mark if this is a full text\nhidden (bool): is document should be hidden\nmaterial (string):\noriginal_url (string): original url\nfilename (string): current url\n\n\nReturns: None", "source": "juraj-google-style"}
{"code": "def module_help(self, module):\n    helplist = []\n    self._render_our_module_key_flags(module, helplist)\n    return '\\n'.join(helplist)", "docstring": "Describes the key flags of a module.\n\nArgs:\nmodule: module|str, the module to describe the key flags for.\n\nReturns:\nstr, describing the key flags of a module.", "source": "codesearchnet"}
{"code": "def _get_named_attributes(self):\n    for (cls, instance) in zip(self.get_class_attributes(), self._get_instance_attributes()):\n        (attr_name, cls_value) = cls\n        instance_value = instance[1]\n        (yield (attr_name, instance_value, cls_value))", "docstring": "Return generator for attribute's name, instance and class values.\n\nAdd attribute name to meth:`_get_attributes` for a better debugging\nmessage, so user can find the error easier.\n\nReturns:\ngenerator: Tuple with attribute's name, instance and class values.", "source": "codesearchnet"}
{"code": "def check_initialized(self):\n    for (name, field) in self.__by_name.items():\n        value = getattr(self, name)\n        if (value is None):\n            if field.required:\n                raise ValidationError(('Message %s is missing required field %s' % (type(self).__name__, name)))\n        else:\n            try:\n                if (isinstance(field, MessageField) and issubclass(field.message_type, Message)):\n                    if field.repeated:\n                        for item in value:\n                            item_message_value = field.value_to_message(item)\n                            item_message_value.check_initialized()\n                    else:\n                        message_value = field.value_to_message(value)\n                        message_value.check_initialized()\n            except ValidationError as err:\n                if (not hasattr(err, 'message_name')):\n                    err.message_name = type(self).__name__\n                raise", "docstring": "Check class for initialization status.\n\nCheck that all required fields are initialized\n\nRaises:\nValidationError: If message is not initialized.", "source": "codesearchnet"}
{"code": "def get_cudnn_version():\n    key = 'cudnn_ver'\n    cmds = cmds_all[PLATFORM.lower()][key]\n    out, err = run_shell_cmd(cmds[0])\n    if err and FLAGS.debug:\n        print('Error in finding `cudnn.h`:\\n %s' % str(err))\n    if len(out.split(b' ')) > 1:\n        cmd = cmds[0] + ' | ' + cmds[1]\n        out_re, err_re = run_shell_cmd(cmd)\n        if err_re and FLAGS.debug:\n            print('Error in detecting cuDNN version:\\n %s' % str(err_re))\n        return out_re.strip(b'\\n')\n    else:\n        return", "docstring": "Retrieves the version of cuDNN library detected.\n\nReturns:\nString that is the version of cuDNN library detected.\ne.g. '7.5.0'", "source": "github-repos"}
{"code": "def GetSources(self, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    file_system_type = getattr(event, 'file_system_type', 'UNKNOWN')\n    timestamp_desc = getattr(event, 'timestamp_desc', 'Time')\n    source_long = '{0:s} {1:s}'.format(file_system_type, timestamp_desc)\n\n    return self.SOURCE_SHORT, source_long", "docstring": "Determines the the short and long source for an event object.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): short and long source string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def _is_op_stateful(op):\n    if op.type == 'GlobalIterId':\n        return False\n    if op.type == 'UpdateFdoWithGlobalMinibatchStatistics':\n        return False\n    if op.type == 'CollectiveGatherV2' and op.get_attr('is_stateless'):\n        return False\n    if op.type == 'CollectiveAllToAllV2' and op.get_attr('is_stateless'):\n        return False\n    return op._is_stateful", "docstring": "Check whether an op is stateful.\n\nThis helper function handles two special cases to make the stateful analysis\nconsistent with the mlir side effect analysis.\n1. GlobalIterIdOp should be stateless.\n2. CollectiveGatherV2 with attribute is_stateless to be True should be\nstateless.\n\nArgs:\nop: Operation\n\nReturns:\nBoolean indicates whether the operation is stateless or not.", "source": "github-repos"}
{"code": "def _CreateWindowsPathResolver(\n      self, file_system, mount_point, environment_variables):\n    \n    if environment_variables is None:\n      environment_variables = []\n\n    path_resolver = windows_path_resolver.WindowsPathResolver(\n        file_system, mount_point)\n\n    for environment_variable in environment_variables:\n      name = environment_variable.name.lower()\n      if name not in ('systemroot', 'userprofile'):\n        continue\n\n      path_resolver.SetEnvironmentVariable(\n          environment_variable.name, environment_variable.value)\n\n    return path_resolver", "docstring": "Create a Windows path resolver and sets the environment variables.\n\nArgs:\nfile_system (dfvfs.FileSystem): file system.\nmount_point (dfvfs.PathSpec): mount point path specification.\nenvironment_variables (list[EnvironmentVariableArtifact]): environment\nvariables.\n\nReturns:\ndfvfs.WindowsPathResolver: Windows path resolver.", "source": "juraj-google-style"}
{"code": "def _info_from_string(info_string):\n    try:\n        json_value = json.loads(info_string)\n    except ValueError:\n        raise ValueError(('invalid JSON: %r' % (info_string,)))\n    if (not isinstance(json_value, dict)):\n        raise ValueError(('not a JSON object: %r' % (json_value,)))\n    if (json_value.get('version') != version.VERSION):\n        raise ValueError(('incompatible version: %r' % (json_value,)))\n    expected_keys = frozenset(_TENSORBOARD_INFO_FIELDS)\n    actual_keys = frozenset(json_value)\n    if (expected_keys != actual_keys):\n        raise ValueError(('bad keys on TensorBoardInfo (missing: %s; extraneous: %s)' % ((expected_keys - actual_keys), (actual_keys - expected_keys))))\n    for key in _TENSORBOARD_INFO_FIELDS:\n        field_type = _TENSORBOARD_INFO_FIELDS[key]\n        if (not isinstance(json_value[key], field_type.serialized_type)):\n            raise ValueError(('expected %r of type %s, but found: %r' % (key, field_type.serialized_type, json_value[key])))\n        json_value[key] = field_type.deserialize(json_value[key])\n    return TensorBoardInfo(**json_value)", "docstring": "Parse a `TensorBoardInfo` object from its string representation.\n\nArgs:\ninfo_string: A string representation of a `TensorBoardInfo`, as\nproduced by a previous call to `_info_to_string`.\n\nReturns:\nA `TensorBoardInfo` value.\n\nRaises:\nValueError: If the provided string is not valid JSON, or if it does\nnot represent a JSON object with a \"version\" field whose value is\n`tensorboard.version.VERSION`, or if it has the wrong set of\nfields, or if at least one field is of invalid type.", "source": "codesearchnet"}
{"code": "class IntGELU(nn.Module):\n\n    def __init__(self, quant_mode=True, force_dequant='none'):\n        super().__init__()\n        self.quant_mode = quant_mode\n        if force_dequant in ['nonlinear', 'gelu']:\n            logger.info('Force dequantize gelu')\n            self.quant_mode = False\n        if not self.quant_mode:\n            self.activation_fn = nn.GELU()\n        self.k = 1.4142\n        self.const = 14\n        self.coeff = [-0.2888, -1.769, 1]\n        self.coeff[2] /= self.coeff[0]\n\n    def int_erf(self, x_int, scaling_factor):\n        b_int = torch.floor(self.coeff[1] / scaling_factor)\n        c_int = torch.floor(self.coeff[2] / scaling_factor ** 2)\n        sign = torch.sign(x_int)\n        abs_int = torch.min(torch.abs(x_int), -b_int)\n        y_int = sign * ((abs_int + b_int) ** 2 + c_int)\n        scaling_factor = scaling_factor ** 2 * self.coeff[0]\n        y_int = floor_ste.apply(y_int / 2 ** self.const)\n        scaling_factor = scaling_factor * 2 ** self.const\n        return (y_int, scaling_factor)\n\n    def forward(self, x, scaling_factor=None):\n        if not self.quant_mode:\n            return (self.activation_fn(x), None)\n        x_int = x / scaling_factor\n        sigmoid_int, sigmoid_scaling_factor = self.int_erf(x_int, scaling_factor / self.k)\n        shift_int = 1.0 \n        x_int = x_int * (sigmoid_int + shift_int)\n        scaling_factor = scaling_factor * sigmoid_scaling_factor / 2\n        return (x_int * scaling_factor, scaling_factor)", "docstring": "Quantized version of `torch.nn.GELU`. Adds quantization-specific arguments on top of `torch.nn.GELU`.\n\nArgs:\nquant_mode (`bool`, *optional*, defaults to `False`):\nWhether or not the layer is quantized.\nforce_dequant (`str`, *optional*, defaults to `\"none\"`):\nForce dequantize the layer if either \"gelu\" or \"nonlinear\" is given.", "source": "github-repos"}
{"code": "def UpdateNumberOfEvents(self, number_of_consumed_events, number_of_produced_events):\n    consumed_events_delta = 0\n    if (number_of_consumed_events is not None):\n        if (number_of_consumed_events < self.number_of_consumed_events):\n            raise ValueError('Number of consumed events smaller than previous update.')\n        consumed_events_delta = (number_of_consumed_events - self.number_of_consumed_events)\n        self.number_of_consumed_events = number_of_consumed_events\n        self.number_of_consumed_events_delta = consumed_events_delta\n    produced_events_delta = 0\n    if (number_of_produced_events is not None):\n        if (number_of_produced_events < self.number_of_produced_events):\n            raise ValueError('Number of produced events smaller than previous update.')\n        produced_events_delta = (number_of_produced_events - self.number_of_produced_events)\n        self.number_of_produced_events = number_of_produced_events\n        self.number_of_produced_events_delta = produced_events_delta\n    return ((consumed_events_delta > 0) or (produced_events_delta > 0))", "docstring": "Updates the number of events.\n\nArgs:\nnumber_of_consumed_events (int): total number of events consumed by\nthe process.\nnumber_of_produced_events (int): total number of events produced by\nthe process.\n\nReturns:\nbool: True if either number of events has increased.\n\nRaises:\nValueError: if the consumed or produced number of events is smaller\nthan the value of the previous update.", "source": "codesearchnet"}
{"code": "def download_file_maybe_extract(url, directory, filename=None, extension=None, check_files=[]):\n    if (filename is None):\n        filename = _get_filename_from_url(url)\n    filepath = os.path.join(directory, filename)\n    check_files = [os.path.join(directory, f) for f in check_files]\n    if ((len(check_files) > 0) and _check_download(*check_files)):\n        return filepath\n    if (not os.path.isdir(directory)):\n        os.makedirs(directory)\n    logger.info('Downloading {}'.format(filename))\n    if ('drive.google.com' in url):\n        _download_file_from_drive(filepath, url)\n    else:\n        with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:\n            urllib.request.urlretrieve(url, filename=filepath, reporthook=_reporthook(t))\n    _maybe_extract(compressed_filename=filepath, directory=directory, extension=extension)\n    if (not _check_download(*check_files)):\n        raise ValueError('[DOWNLOAD FAILED] `*check_files` not found')\n    return filepath", "docstring": "Download the file at ``url`` to ``directory``. Extract to ``directory`` if tar or zip.\n\nArgs:\nurl (str): Url of file.\ndirectory (str): Directory to download to.\nfilename (str, optional): Name of the file to download; Otherwise, a filename is extracted\nfrom the url.\nextension (str, optional): Extension of the file; Otherwise, attempts to extract extension\nfrom the filename.\ncheck_files (list of str): Check if these files exist, ensuring the download succeeded.\nIf these files exist before the download, the download is skipped.\n\nReturns:\n(str): Filename of download file.\n\nRaises:\nValueError: Error if one of the ``check_files`` are not found following the download.", "source": "codesearchnet"}
{"code": "def wait(self, container, timeout=None, condition=None):\n    url = self._url('/containers/{0}/wait', container)\n    params = {}\n    if (condition is not None):\n        if utils.version_lt(self._version, '1.30'):\n            raise errors.InvalidVersion('wait condition is not supported for API version < 1.30')\n        params['condition'] = condition\n    res = self._post(url, timeout=timeout, params=params)\n    return self._result(res, True)", "docstring": "Block until a container stops, then return its exit code. Similar to\nthe ``docker wait`` command.\n\nArgs:\ncontainer (str or dict): The container to wait on. If a dict, the\n``Id`` key is used.\ntimeout (int): Request timeout\ncondition (str): Wait until a container state reaches the given\ncondition, either ``not-running`` (default), ``next-exit``,\nor ``removed``\n\nReturns:\n(dict): The API's response as a Python dictionary, including\nthe container's exit code under the ``StatusCode`` attribute.\n\nRaises:\n:py:class:`requests.exceptions.ReadTimeout`\nIf the timeout is exceeded.\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def GetPresetByName(self, name):\n    \n    name = name.lower()\n    return self._definitions.get(name, None)", "docstring": "Retrieves a specific preset definition by name.\n\nArgs:\nname (str): name of the preset.\n\nReturns:\nParserPreset: a parser preset or None if not available.", "source": "juraj-google-style"}
{"code": "def send_status(status: 'EFBStatus'):\n    \n    global middlewares, master\n    if status is None:\n        return\n\n    s: 'Optional[EFBStatus]' = status\n\n    \n    for i in middlewares:\n        s = i.process_status(cast('EFBStatus', s))\n        if s is None:\n            return\n\n    status = cast('EFBStatus', s)\n\n    status.verify()\n\n    status.destination_channel.send_status(status)", "docstring": "Deliver a message to the destination channel.\n\nArgs:\nstatus (EFBStatus): The status", "source": "juraj-google-style"}
{"code": "def exists(self):\n    session = client.get_client().create_session()\n    ret = (self._base_query(session).count() > 0)\n    session.close()\n    return ret", "docstring": "Check if a target exists\n\nThis function is called by :mod:`luigi` to check if a task output exists. By default,\n:mod:`luigi` considers a task as complete if all it targets (outputs) exist.\n\nReturns:\nbool: ``True`` if target exists, ``False`` otherwise", "source": "codesearchnet"}
{"code": "def atol_for_validation(self) -> float:\n    return 0.0001", "docstring": "What absolute tolerance value to use during model conversion validation.\n\nReturns:\nFloat absolute tolerance value.", "source": "github-repos"}
{"code": "def program_to_text(program):\n\n    def label(node):\n        return '<%d>%s' % (node.id, node.name)\n    s = io.StringIO()\n    seen = set()\n    for node in cfg_utils.order_nodes(program.cfg_nodes):\n        seen.add(node)\n        s.write(f'{label(node)}\\n')\n        s.write(f'  From: {', '.join((label(n) for n in node.incoming))}\\n')\n        s.write(f'  To: {', '.join((label(n) for n in node.outgoing))}\\n')\n        s.write('\\n')\n        variables = {value.variable for value in node.bindings}\n        for var in sorted(variables, key=lambda v: v.id):\n            s.write('  %s\\n' % _pretty_variable(var).replace('\\n', '\\n  '))\n        s.write('\\n')\n    return s.getvalue()", "docstring": "Generate a text (CFG nodes + assignments) version of a program.\n\nFor debugging only.\n\nArgs:\nprogram: An instance of cfg.Program\n\nReturns:\nA string representing all of the data for this program.", "source": "github-repos"}
{"code": "def group_id(self, resource_id):\n    if (self._name != 'group'):\n        self._request_uri = '{}/{}'.format(self._api_uri, resource_id)", "docstring": "Update the request URI to include the Group ID for specific group retrieval.\n\nArgs:\nresource_id (string): The group id.", "source": "codesearchnet"}
{"code": "def recreate_function(saved_function, concrete_functions):\n    function_spec = _deserialize_function_spec_as_nonmethod(saved_function.function_spec)\n\n    def restored_function_body(*args, **kwargs):\n        \n        if not saved_function.concrete_functions:\n            raise ValueError('Found zero restored functions for caller function.')\n        inputs = (args, kwargs)\n        for allow_conversion in [False, True]:\n            for function_name in saved_function.concrete_functions:\n                function = concrete_functions[function_name]\n                if any([inp is None for inp in function.captured_inputs]):\n                    raise ValueError('Looks like you are trying to run a loaded non-Keras model that was trained using tf.distribute.experimental.ParameterServerStrategy with variable partitioning, which is not currently supported. Try using Keras to define your model if possible.')\n                if _concrete_function_callable_with(function, inputs, allow_conversion):\n                    return _call_concrete_function(function, inputs)\n        signature_descriptions = []\n\n        def _pretty_format_positional(positional):\n            return 'Positional arguments ({} total):\\n    * {}'.format(len(positional), '\\n    * '.join((pprint.pformat(a) for a in positional)))\n        for index, function_name in enumerate(saved_function.concrete_functions):\n            concrete_function = concrete_functions[function_name]\n            positional, keyword = concrete_function.structured_input_signature\n            signature_descriptions.append('Option {}:\\n  {}\\n  Keyword arguments: {}'.format(index + 1, _pretty_format_positional(positional), keyword))\n        raise ValueError(f'Could not find matching concrete function to call loaded from the SavedModel. Got:\\n  {_pretty_format_positional(args)}\\n  Keyword arguments: {kwargs}\\n\\n Expected these arguments to match one of the following {len(saved_function.concrete_functions)} option(s):\\n\\n{(chr(10) + chr(10)).join(signature_descriptions)}')\n    concrete_function_objects = []\n    for concrete_function_name in saved_function.concrete_functions:\n        concrete_function_objects.append(concrete_functions[concrete_function_name])\n    for cf in concrete_function_objects:\n        set_preinitialized_function_spec(cf, function_spec)\n    restored_function = RestoredFunction(restored_function_body, restored_function_body.__name__, function_spec, concrete_function_objects)\n    return tf_decorator.make_decorator(restored_function_body, restored_function, decorator_argspec=function_spec.fullargspec)", "docstring": "Creates a `Function` from a `SavedFunction`.\n\nArgs:\nsaved_function: `SavedFunction` proto.\nconcrete_functions: map from function name to `ConcreteFunction`. As a side\neffect of this function, the `FunctionSpec` from `saved_function` is added\nto each `ConcreteFunction` in this map.\n\nReturns:\nA `Function`.", "source": "github-repos"}
{"code": "def uninstall(path, restart=False):\n    cmd = ['wusa.exe', '/uninstall', '/quiet']\n    kb = os.path.splitext(os.path.basename(path))[0]\n    if os.path.exists(path):\n        cmd.append(path)\n    else:\n        cmd.append('/kb:{0}'.format((kb[2:] if kb.lower().startswith('kb') else kb)))\n    if restart:\n        cmd.append('/forcerestart')\n    else:\n        cmd.append('/norestart')\n    ret_code = __salt__['cmd.retcode'](cmd, ignore_retcode=True)\n    errors = {(- 2145116156): '{0} does not support uninstall'.format(kb), 2359303: '{0} not installed'.format(kb), 87: 'Unknown error. Try specifying an .msu file'}\n    if (ret_code in errors):\n        raise CommandExecutionError(errors[ret_code])\n    elif ret_code:\n        raise CommandExecutionError('Unknown error: {0}'.format(ret_code))\n    return True", "docstring": "Uninstall a specific KB.\n\nArgs:\n\npath (str):\nThe full path to the msu file to uninstall. This can also be just\nthe name of the KB to uninstall\n\nrestart (bool):\n``True`` to force a restart if required by the installation. Adds\nthe ``/forcerestart`` switch to the ``wusa.exe`` command. ``False``\nwill add the ``/norestart`` switch instead. Default is ``False``\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\nRaises:\nCommandExecutionError: If an error is encountered\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' wusa.uninstall KB123456\n\n# or\n\nsalt '*' wusa.uninstall C:/temp/KB123456.msu", "source": "codesearchnet"}
{"code": "def __getitem__(cls, args):\n        \n        \n        type_, bound, keyfunc = cls._get_args(args)\n        keyfunc_name = cls._get_fullname(keyfunc)\n        identity = cls._identity\n        BaseClass, MetaClass = cls._get_bases(type_)\n        instantiate = cls._instantiate\n\n        @six.add_metaclass(MetaClass)  \n        class _BoundedSubclass(BaseClass):  \n            \n\n            def __new__(cls, __value, *args, **kwargs):\n                \n                \n                instance = instantiate(\n                    BaseClass, type_, __value, *args, **kwargs\n                )\n                cmp_val = keyfunc(instance)\n                if bound.start is not None or bound.stop is not None:\n                    if bound.start is not None and cmp_val < bound.start:\n                        if keyfunc is not identity:\n                            raise ValueError(\n                                \"The value of {}({}) [{}] is below the minimum\"\n                                \" allowed value of {}.\".format(\n                                    keyfunc_name,\n                                    repr(__value),\n                                    repr(cmp_val),\n                                    bound.start,\n                                )\n                            )\n                        raise ValueError(\n                            \"The value {} is below the minimum allowed value \"\n                            \"of {}.\".format(repr(__value), bound.start)\n                        )\n                    if bound.stop is not None and cmp_val > bound.stop:\n                        if keyfunc is not identity:\n                            raise ValueError(\n                                \"The value of {}({}) [{}] is above the maximum\"\n                                \" allowed value of {}.\".format(\n                                    keyfunc_name,\n                                    repr(__value),\n                                    repr(cmp_val),\n                                    bound.stop,\n                                )\n                            )\n                        raise ValueError(\n                            \"The value {} is above the maximum allowed value \"\n                            \"of {}.\".format(repr(__value), bound.stop)\n                        )\n                elif not cmp_val:\n                    raise ValueError(\n                        \"{}({}) is False\".format(keyfunc_name, repr(instance))\n                    )\n                return instance\n\n        _BoundedSubclass.__type__ = type_\n        _BoundedSubclass.__class_repr__ = cls._get_class_repr(\n            type_, bound, keyfunc, keyfunc_name\n        )\n        return _BoundedSubclass", "docstring": "Create a new subclass of a type bounded by the arguments.\n\nIf a callable is passed as the third argument of the slice, it will be\nused as the comparison function for the boundaries.\n\nArgs:\nargs: A tuple with two or three parameters: a type, a slice\nrepresenting the minimum and maximum lengths allowed for values\nof that type and, optionally, a function to use on values\nbefore comparing against the bounds.", "source": "juraj-google-style"}
{"code": "def apply_range_set(self, hist: Hist) -> None:\n    axis = self.axis(hist)\n    assert (not isinstance(self.min_val, float))\n    assert (not isinstance(self.max_val, float))\n    min_val = self.min_val(axis)\n    max_val = self.max_val(axis)\n    self.axis(hist).SetRange(min_val, max_val)", "docstring": "Apply the associated range set to the axis of a given hist.\n\nNote:\nThe min and max values should be bins, not user ranges! For more, see the binning\nexplanation in ``apply_func_to_find_bin(...)``.\n\nArgs:\nhist: Histogram to which the axis range restriction should be applied.\nReturns:\nNone. The range is set on the axis.", "source": "codesearchnet"}
{"code": "def RunStateMethod(self,\n                     method_name,\n                     request=None,\n                     responses=None,\n                     event=None,\n                     direct_response=None):\n    \n    client_id = None\n    try:\n      self.context.current_state = method_name\n      if request and responses:\n        client_id = request.client_id or self.runner_args.client_id\n        logging.debug(\"%s Running %s with %d responses from %s\",\n                      self.session_id, method_name, len(responses), client_id)\n\n      else:\n        logging.debug(\"%s Running state method %s\", self.session_id,\n                      method_name)\n\n      \n      self.hunt_obj.HeartBeat()\n      try:\n        method = getattr(self.hunt_obj, method_name)\n      except AttributeError:\n        raise flow_runner.FlowRunnerError(\n            \"Flow %s has no state method %s\" %\n            (self.hunt_obj.__class__.__name__, method_name))\n\n      if direct_response:\n        method(direct_response)\n      elif method_name == \"Start\":\n        method()\n      else:\n        \n        responses = flow_responses.Responses.FromLegacyResponses(\n            request=request, responses=responses)\n\n        if responses.status:\n          self.SaveResourceUsage(request.client_id, responses.status)\n\n        stats_collector_instance.Get().IncrementCounter(\"grr_worker_states_run\")\n\n        method(responses)\n\n    \n    \n    except Exception as e:  \n\n      \n      stats_collector_instance.Get().IncrementCounter(\"grr_flow_errors\")\n\n      stats_collector_instance.Get().IncrementCounter(\n          \"flow_errors\", fields=[self.hunt_obj.Name()])\n      logging.exception(\"Hunt %s raised %s.\", self.session_id, e)\n\n      self.Error(traceback.format_exc(), client_id=client_id)\n\n    finally:\n      if event:\n        event.set()", "docstring": "Completes the request by calling the state method.\n\nArgs:\nmethod_name: The name of the state method to call.\nrequest: A RequestState protobuf.\nresponses: A list of GrrMessages responding to the request.\nevent: A threading.Event() instance to signal completion of this request.\ndirect_response: A flow.Responses() object can be provided to avoid\ncreation of one.", "source": "juraj-google-style"}
{"code": "def add(self, resource, provider_uri_or_id, timeout=(- 1)):\n    uri = (self._provider_client.build_uri(provider_uri_or_id) + '/device-managers')\n    return self._client.create(resource=resource, uri=uri, timeout=timeout)", "docstring": "Adds a Device Manager under the specified provider.\n\nArgs:\nresource (dict): Object to add.\nprovider_uri_or_id: ID or URI of provider.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.\n\nReturns:\ndict: Added SAN Manager.", "source": "codesearchnet"}
{"code": "def get_manual_homology_models(self, input_dict, outdir=None, clean=True, force_rerun=False):\n    if outdir:\n        outdir_set = True\n    else:\n        outdir_set = False\n    counter = 0\n    for g in tqdm(self.genes):\n        if (g.id not in input_dict):\n            continue\n        if (not outdir_set):\n            outdir = g.protein.structure_dir\n            if (not outdir):\n                raise ValueError('Output directory must be specified')\n        for (hid, hdict) in input_dict[g.id].items():\n            if (('model_file' not in hdict) or ('file_type' not in hdict)):\n                raise KeyError('\"model_file\" and \"file_type\" must be keys in the manual input dictionary.')\n            new_homology = g.protein.load_pdb(pdb_id=hid, pdb_file=hdict['model_file'], file_type=hdict['file_type'], is_experimental=False)\n            if clean:\n                new_homology.load_structure_path(new_homology.clean_structure(outdir=outdir, force_rerun=force_rerun), hdict['file_type'])\n            else:\n                copy_to = op.join(outdir, op.basename(hdict['model_file']))\n                if ssbio.utils.force_rerun(force_rerun, copy_to):\n                    log.debug('{}: copying model from original directory to GEM-PRO directory'.format(op.basename(hdict['model_file'])))\n                    shutil.copy2(hdict['model_file'], outdir)\n                    new_homology.load_structure_path(copy_to, hdict['file_type'])\n                else:\n                    log.debug('{}: homology model already copied to directory'.format(copy_to))\n                    new_homology.load_structure_path(copy_to, hdict['file_type'])\n            new_homology.update(hdict)\n            log.debug('{}: updated homology model information and copied model file.'.format(g.id))\n        counter += 1\n    log.info('Updated homology model information for {} genes.'.format(counter))", "docstring": "Copy homology models to the GEM-PRO project.\n\nRequires an input of a dictionary formatted like so::\n\n{\nmodel_gene: {\nhomology_model_id1: {\n'model_file': '/path/to/homology/model.pdb',\n'file_type': 'pdb'\n'additional_info': info_value\n},\nhomology_model_id2: {\n'model_file': '/path/to/homology/model.pdb'\n'file_type': 'pdb'\n}\n}\n}\n\nArgs:\ninput_dict (dict): Dictionary of dictionaries of gene names to homology model IDs and other information\noutdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories\nwere not created initially\nclean (bool): If homology files should be cleaned and saved as a new PDB file\nforce_rerun (bool): If homology files should be copied again even if they exist in the GEM-PRO directory", "source": "codesearchnet"}
{"code": "def remove_tag(self, tag):\n        \n        \n        return self._remove_hdxobject(self.data.get('tags'), tag, matchon='name')", "docstring": "Remove a tag\n\nArgs:\ntag (str): Tag to remove\n\nReturns:\nbool: True if tag removed or False if not", "source": "juraj-google-style"}
{"code": "def expectation(self, function):\n    return self._expectation(function)", "docstring": "Returns an estimate of the expectation value of the given function.\n\nArgs:\nfunction: Mapping from a 2D tensor of bitstrings to a possibly nested\nstructure.  The structure must have atomic elements all of which are\nfloat tensors with the same batch size as the input bitstrings.", "source": "github-repos"}
{"code": "def kmeans_pp(data, k, centers=None):\n    \n    \n    \n    genes, cells = data.shape\n    if sparse.issparse(data) and not sparse.isspmatrix_csc(data):\n        data = sparse.csc_matrix(data)\n    num_known_centers = 0\n    if centers is None:\n        centers = np.zeros((genes, k))\n    else:\n        num_known_centers = centers.shape[1]\n        centers = np.concatenate((centers, np.zeros((genes, k-num_known_centers))), 1)\n    distances = np.zeros((cells, k))\n    distances[:] = np.inf\n    if num_known_centers == 0:\n        init = np.random.randint(0, cells)\n        if sparse.issparse(data):\n            centers[:,0] = data[:, init].toarray().flatten()\n        else:\n            centers[:,0] = data[:, init]\n        num_known_centers+=1\n    available_cells = list(range(cells))\n    for c in range(num_known_centers, k):\n        c2 = c-1\n        \n        \n        if sparse.issparse(data):\n            lls = poisson_ll(data, centers[:,c2:c2+1]).flatten()\n            distances[:,c2] = 1 + lls.max() - lls\n            distances[:,c2] /= distances[:,c2].max()\n        else:\n            for cell in range(cells):\n                distances[cell, c2] = poisson_dist(data[:,cell], centers[:,c2])\n        \n        \n        min_distances = np.min(distances, 1)\n        min_distances = min_distances**2\n        min_distances = min_distances[available_cells]\n        \n        min_dist = np.random.choice(available_cells,\n                p=min_distances/min_distances.sum())\n        available_cells.pop(available_cells.index(min_dist))\n        if sparse.issparse(data):\n            centers[:,c] = data[:, min_dist].toarray().flatten()\n        else:\n            centers[:,c] = data[:, min_dist]\n    lls = poisson_ll(data, centers)\n    new_assignments = np.argmax(lls, 1)\n    centers[centers==0.0] = eps\n    return centers, new_assignments", "docstring": "Generates kmeans++ initial centers.\n\nArgs:\ndata (array): A 2d array- genes x cells\nk (int): Number of clusters\ncenters (array, optional): if provided, these are one or more known cluster centers. 2d array of genes x number of centers (<=k).\n\nReturns:\ncenters - a genes x k array of cluster means.\nassignments - a cells x 1 array of cluster assignments", "source": "juraj-google-style"}
{"code": "def channels_replies(self, *, channel: str, thread_ts: str, **kwargs) -> SlackResponse:\n        \n        kwargs.update({\"channel\": channel, \"thread_ts\": thread_ts})\n        return self.api_call(\"channels.replies\", http_verb=\"GET\", params=kwargs)", "docstring": "Retrieve a thread of messages posted to a channel\n\nArgs:\nchannel (str): The channel id. e.g. 'C1234567890'\nthread_ts (str): The timestamp of an existing message with 0 or more replies.\ne.g. '1234567890.123456'", "source": "juraj-google-style"}
{"code": "def CopyAttributesFromSessionCompletion(self, session_completion):\n    \n    if self.identifier != session_completion.identifier:\n      raise ValueError('Session identifier mismatch.')\n\n    self.aborted = session_completion.aborted\n\n    if session_completion.analysis_reports_counter:\n      self.analysis_reports_counter = (\n          session_completion.analysis_reports_counter)\n\n    self.completion_time = session_completion.timestamp\n\n    if session_completion.event_labels_counter:\n      self.event_labels_counter = session_completion.event_labels_counter\n\n    if session_completion.parsers_counter:\n      self.parsers_counter = session_completion.parsers_counter", "docstring": "Copies attributes from a session completion.\n\nArgs:\nsession_completion (SessionCompletion): session completion attribute\ncontainer.\n\nRaises:\nValueError: if the identifier of the session completion does not match\nthat of the session.", "source": "juraj-google-style"}
{"code": "def set_maximum(self, q_data, marked, center, bin_lower, foothills):\n    as_bin = []\n    as_glob = []\n    marked_so_far = []\n    will_be_considered_again = False\n    as_bin.append(center)\n    center_data = q_data[center]\n    while (len(as_bin) > 0):\n        p = as_bin.pop((- 1))\n        if (marked[p] != self.UNMARKED):\n            continue\n        marked[p] = q_data[center]\n        marked_so_far.append(p)\n        for (index, val) in np.ndenumerate(marked[((p[0] - 1):(p[0] + 2), (p[1] - 1):(p[1] + 2))]):\n            if (val == self.UNMARKED):\n                pixel = (((index[0] - 1) + p[0]), ((index[1] - 1) + p[1]))\n                p_data = q_data[pixel]\n                if ((not will_be_considered_again) and (p_data >= 0) and (p_data < center_data)):\n                    will_be_considered_again = True\n                if ((p_data >= bin_lower) and (np.abs((center_data - p_data)) <= self.delta)):\n                    as_bin.append(pixel)\n                elif (p_data >= 0):\n                    as_glob.append(pixel)\n    if (bin_lower == 0):\n        will_be_considered_again = False\n    big_enough = (len(marked_so_far) >= self.max_size)\n    if big_enough:\n        foothills.append((center, as_glob))\n    elif will_be_considered_again:\n        for m in marked_so_far:\n            marked[m] = self.UNMARKED\n        del as_bin[:]\n        del as_glob[:]\n        del marked_so_far[:]\n    return (big_enough or (not will_be_considered_again))", "docstring": "Grow a region at a certain bin level and check if the region has reached the maximum size.\n\nArgs:\nq_data: Quantized data array\nmarked: Array marking points that are objects\ncenter: Coordinates of the center pixel of the region being grown\nbin_lower: Intensity level of lower bin being evaluated\nfoothills: List of points that are associated with a center but fall outside the the size or\nintensity criteria\nReturns:\nTrue if the object is finished growing and False if the object should be grown again at the next\nthreshold level.", "source": "codesearchnet"}
{"code": "def load_bmp(path):\n    surface = object.__new__(Surface)\n    surface._ptr = check_ptr_err(lib.SDL_LoadBMP_RW(lib.SDL_RWFromFile(path, 'rb'), 1))\n    return surface", "docstring": "Load a surface from a file.\n\nArgs:\npath (str): Path to the BMP file to load.\n\nReturns:\nSurface: A surface containing the pixels loaded from the file.\n\nRaises:\nSDLError: If the file cannot be loaded.", "source": "codesearchnet"}
{"code": "def _to_values_def(self, export_scope=None):\n    values_def = control_flow_pb2.ValuesDef()\n    values_def.values.extend([ops.strip_name_scope(v, export_scope) for v in sorted(self._values)])\n    for k, v in self._external_values.items():\n        k = ops.strip_name_scope(k, export_scope)\n        values_def.external_values[k] = ops.strip_name_scope(v.name, export_scope)\n    return values_def", "docstring": "Converts the values to a `ValuesDef` protocol buffer.\n\nArgs:\nexport_scope: Optional `string`. Name scope to remove.\n\nReturns:\nA `ValuesDef` protocol buffer.", "source": "github-repos"}
{"code": "def pickle_load(cls, filepath):\n    if os.path.isdir(filepath):\n        for (dirpath, dirnames, filenames) in os.walk(filepath):\n            fnames = [f for f in filenames if (f == cls.PICKLE_FNAME)]\n            if fnames:\n                if (len(fnames) == 1):\n                    filepath = os.path.join(dirpath, fnames[0])\n                    break\n                else:\n                    err_msg = ('Found multiple databases:\\n %s' % str(fnames))\n                    raise RuntimeError(err_msg)\n        else:\n            err_msg = ('Cannot find %s inside directory %s' % (cls.PICKLE_FNAME, filepath))\n            raise ValueError(err_msg)\n    with open(filepath, 'rb') as fh:\n        new = pickle.load(fh)\n    from .flows import Flow\n    (flow_workdirs, new.flows) = (new.flows, [])\n    for flow in map(Flow.pickle_load, flow_workdirs):\n        new.add_flow(flow)\n    return new", "docstring": "Loads the object from a pickle file.\n\nArgs:\nfilepath: Filename or directory name. It filepath is a directory, we\nscan the directory tree starting from filepath and we\nread the first pickle database. Raise RuntimeError if multiple\ndatabases are found.", "source": "codesearchnet"}
{"code": "def _check_self_to_empty(self, stateid):\n        \n        x_term = stateid.rfind('@')\n        y_term = stateid.rfind('A')\n        if y_term > x_term:\n            x_term = y_term\n        ids = stateid[x_term + 1:].split(',')\n        if len(ids) < 2:\n            return 0\n        if ids[0] == ids[1]:\n            \n            return 1\n        return 0", "docstring": "Because of the optimization, the rule for empty states is missing\nA check takes place live\nArgs:\nstateid (int): The state identifier\nReturns:\nbool: A true or false response", "source": "juraj-google-style"}
{"code": "def GetArchiveTypeIndicators(cls, path_spec, resolver_context=None):\n    \n    if (cls._archive_remainder_list is None or\n        cls._archive_store is None):\n      specification_store, remainder_list = cls._GetSpecificationStore(\n          definitions.FORMAT_CATEGORY_ARCHIVE)\n      cls._archive_remainder_list = remainder_list\n      cls._archive_store = specification_store\n\n    if cls._archive_scanner is None:\n      cls._archive_scanner = cls._GetSignatureScanner(cls._archive_store)\n\n    return cls._GetTypeIndicators(\n        cls._archive_scanner, cls._archive_store,\n        cls._archive_remainder_list, path_spec,\n        resolver_context=resolver_context)", "docstring": "Determines if a file contains a supported archive types.\n\nArgs:\npath_spec (PathSpec): path specification.\nresolver_context (Optional[Context]): resolver context, where None\nrepresents the built-in context which is not multi process safe.\n\nReturns:\nlist[str]: supported format type indicators.", "source": "juraj-google-style"}
{"code": "def check_url(url):\n    request = urllib2.Request(url)\n    try:\n        response = urlopen(request)\n        return (True, response.code)\n    except urllib2.HTTPError as e:\n        return (False, e.code)", "docstring": "Check if resource at URL is fetchable. (by trying to fetch it and checking for 200 status.\n\nArgs:\nurl (str): Url to check.\n\nReturns:\nReturns a tuple of {True/False, response code}", "source": "codesearchnet"}
{"code": "def retrieve_info_for_model(model_type, frameworks: Optional[List[str]]=None):\n    if model_type not in auto_module.MODEL_NAMES_MAPPING:\n        raise ValueError(f'{model_type} is not a valid model type.')\n    model_name = auto_module.MODEL_NAMES_MAPPING[model_type]\n    config_class = auto_module.configuration_auto.CONFIG_MAPPING_NAMES[model_type]\n    if model_type in auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES:\n        tokenizer_classes = auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES[model_type]\n        tokenizer_class = tokenizer_classes[0] if tokenizer_classes[0] is not None else tokenizer_classes[1]\n    else:\n        tokenizer_class = None\n    image_processor_classes = auto_module.image_processing_auto.IMAGE_PROCESSOR_MAPPING_NAMES.get(model_type, None)\n    if isinstance(image_processor_classes, tuple):\n        image_processor_class, image_processor_fast_class = image_processor_classes\n    else:\n        image_processor_class = image_processor_classes\n        image_processor_fast_class = None\n    feature_extractor_class = auto_module.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES.get(model_type, None)\n    processor_class = auto_module.processing_auto.PROCESSOR_MAPPING_NAMES.get(model_type, None)\n    model_files = get_model_files(model_type, frameworks=frameworks)\n    model_camel_cased = config_class.replace('Config', '')\n    available_frameworks = []\n    for fname in model_files['model_files']:\n        if 'modeling_tf' in str(fname):\n            available_frameworks.append('tf')\n        elif 'modeling_flax' in str(fname):\n            available_frameworks.append('flax')\n        elif 'modeling' in str(fname):\n            available_frameworks.append('pt')\n    if frameworks is None:\n        frameworks = get_default_frameworks()\n    frameworks = [f for f in frameworks if f in available_frameworks]\n    model_classes = retrieve_model_classes(model_type, frameworks=frameworks)\n    model_upper_cased = model_camel_cased.upper()\n    model_patterns = ModelPatterns(model_name, checkpoint=find_base_model_checkpoint(model_type, model_files=model_files), model_type=model_type, model_camel_cased=model_camel_cased, model_lower_cased=model_files['module_name'], model_upper_cased=model_upper_cased, config_class=config_class, tokenizer_class=tokenizer_class, image_processor_class=image_processor_class, image_processor_fast_class=image_processor_fast_class, feature_extractor_class=feature_extractor_class, processor_class=processor_class)\n    return {'frameworks': frameworks, 'model_classes': model_classes, 'model_files': model_files, 'model_patterns': model_patterns}", "docstring": "Retrieves all the information from a given model_type.\n\nArgs:\nmodel_type (`str`): A valid model type (like \"bert\" or \"gpt2\")\nframeworks (`List[str]`, *optional*):\nIf passed, will only keep the info corresponding to the passed frameworks.\n\nReturns:\n`Dict`: A dictionary with the following keys:\n- **frameworks** (`List[str]`): The list of frameworks that back this model type.\n- **model_classes** (`Dict[str, List[str]]`): The model classes implemented for that model type.\n- **model_files** (`Dict[str, Union[Path, List[Path]]]`): The files associated with that model type.\n- **model_patterns** (`ModelPatterns`): The various patterns for the model.", "source": "github-repos"}
{"code": "def percent_point(self, U):\n        \n        self.check_fit()\n\n        if not 0 < U < 1:\n            raise ValueError('cdf value must be in [0,1]')\n\n        return scipy.optimize.brentq(self.cumulative_distribution, -1000.0, 1000.0, args=(U))", "docstring": "Given a cdf value, returns a value in original space.\n\nArgs:\nU: `int` or `float` cdf value in [0,1]\n\nReturns:\nfloat: value in original space", "source": "juraj-google-style"}
{"code": "def get_surveys(self):\n        \n        payload = { \n            'Request': 'getSurveys',\n            'Format': 'JSON'\n            }\n        r = self._session.get(QUALTRICS_URL, params=payload)\n        output = r.json()\n        return output['Result']['Surveys']", "docstring": "Gets all surveys in account\n\nArgs:\nNone\n\nReturns:\nlist: a list of all surveys", "source": "juraj-google-style"}
{"code": "def eval_algorithm(closing, low, high):\n        \n        if high - low == 0: \n            return 100 * (closing - low)\n        else:\n            return 100 * (closing - low) / (high - low)", "docstring": "Evaluates the SO algorithm\n\nArgs:\nclosing: Float of current closing price.\nlow: Float of lowest low closing price throughout some duration.\nhigh: Float of highest high closing price throughout some duration.\n\nReturns:\nFloat SO between 0 and 100.", "source": "juraj-google-style"}
{"code": "def build_from_token_counts(self, token_counts, min_count, num_iterations=4):\n        \n        self._init_alphabet_from_tokens(six.iterkeys(token_counts))\n\n        \n        \n        self._init_subtokens_from_list(list(self._alphabet))\n\n        \n        \n        \n        if min_count < 1:\n            min_count = 1\n        for i in xrange(num_iterations):\n\n            \n            \n            subtoken_counts = collections.defaultdict(int)\n            for token, count in six.iteritems(token_counts):\n                escaped_token = _escape_token(token, self._alphabet)\n                subtokens = self._escaped_token_to_subtoken_strings(escaped_token)\n                start = 0\n                for subtoken in subtokens:\n                    for end in xrange(start + 1, len(escaped_token) + 1):\n                        new_subtoken = escaped_token[start:end]\n                        subtoken_counts[new_subtoken] += count\n                    start += len(subtoken)\n\n            \n            len_to_subtoken_strings = []\n            for subtoken_string, count in six.iteritems(subtoken_counts):\n                lsub = len(subtoken_string)\n                if count >= min_count:\n                    while len(len_to_subtoken_strings) <= lsub:\n                        len_to_subtoken_strings.append(set())\n                    len_to_subtoken_strings[lsub].add(subtoken_string)\n\n            \n            \n            \n            new_subtoken_strings = []\n            for lsub in xrange(len(len_to_subtoken_strings) - 1, 0, -1):\n                subtoken_strings = len_to_subtoken_strings[lsub]\n                for subtoken_string in subtoken_strings:\n                    count = subtoken_counts[subtoken_string]\n                    if count >= min_count:\n                        \n                        \n                        if subtoken_string not in self._alphabet:\n                            new_subtoken_strings.append((count, subtoken_string))\n                        for l in xrange(1, lsub):\n                            subtoken_counts[subtoken_string[:l]] -= count\n\n            \n            \n            new_subtoken_strings.extend((subtoken_counts.get(a, 0), a) for a in self._alphabet)\n            new_subtoken_strings.sort(reverse=True)\n\n            \n            self._init_subtokens_from_list([subtoken for _, subtoken in new_subtoken_strings])", "docstring": "Train a SubwordTextTokenizer based on a dictionary of word counts.\n\nArgs:\ntoken_counts: a dictionary of Unicode strings to int.\nmin_count: an integer - discard subtokens with lower counts.\nnum_iterations: an integer; how many iterations of refinement.", "source": "juraj-google-style"}
{"code": "def register(self, task_json=None, json_filename=None):\n    if ((not task_json) and (not json_filename)):\n        raise Exception(\"Both task json and filename can't be none.\")\n    if (task_json and json_filename):\n        raise Exception(\"Both task json and filename can't be provided.\")\n    if json_filename:\n        task_json = json.load(open(json_filename, 'r'))\n    r = self.gbdx_connection.post(self._base_url, json=task_json)\n    raise_for_status(r)\n    return r.text", "docstring": "Registers a new GBDX task.\n\nArgs:\ntask_json (dict): Dictionary representing task definition.\njson_filename (str): A full path of a file with json representing the task definition.\nOnly one out of task_json and json_filename should be provided.\nReturns:\nResponse (str).", "source": "codesearchnet"}
{"code": "def FromDBInstance(db_token):\n        \n        hash_ar = bytearray(binascii.unhexlify(db_token.ContractHash))\n        hash_ar.reverse()\n        hash = UInt160(data=hash_ar)\n        token = NEP5Token(script=None)\n        token.SetScriptHash(hash)\n        token.name = db_token.Name\n        token.symbol = db_token.Symbol\n        token.decimals = db_token.Decimals\n        return token", "docstring": "Get a NEP5Token instance from a database token.\n\nArgs:\ndb_token (neo.Implementations.Wallets.peewee.Models.NEP5Token):\n\nReturns:\nNEP5Token: self.", "source": "juraj-google-style"}
{"code": "def post_process(self, outputs, target_sizes):\n    logger.warning_once('`post_process` is deprecated and will be removed in v5 of Transformers, please use `post_process_object_detection` instead, with `threshold=0.` for equivalent results.')\n    out_logits, out_bbox = (outputs.logits, outputs.pred_boxes)\n    if len(out_logits) != len(target_sizes):\n        raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n    if target_sizes.shape[1] != 2:\n        raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch')\n    prob = out_logits.sigmoid()\n    topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)\n    scores = topk_values\n    topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor')\n    labels = topk_indexes % out_logits.shape[2]\n    boxes = center_to_corners_format(out_bbox)\n    boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))\n    img_h, img_w = target_sizes.unbind(1)\n    scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)\n    boxes = boxes * scale_fct[:, None, :]\n    results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]\n    return results", "docstring": "Converts the raw output of [`YolosForObjectDetection`] into final bounding boxes in (top_left_x,\ntop_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.\n\nArgs:\noutputs ([`YolosObjectDetectionOutput`]):\nRaw outputs of the model.\ntarget_sizes (`torch.Tensor` of shape `(batch_size, 2)`):\nTensor containing the size (height, width) of each image of the batch. For evaluation, this must be the\noriginal image size (before any data augmentation). For visualization, this should be the image size\nafter data augment, but before padding.\nReturns:\n`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image\nin the batch as predicted by the model.", "source": "github-repos"}
{"code": "def dump_table_as_insert_sql(engine: Engine, table_name: str, fileobj: TextIO, wheredict: Dict[(str, Any)]=None, include_ddl: bool=False, multirow: bool=False) -> None:\n    log.info('dump_data_as_insert_sql: table_name={}', table_name)\n    writelines_nl(fileobj, [SEP1, sql_comment('Data for table: {}'.format(table_name)), SEP2, sql_comment('Filters: {}'.format(wheredict))])\n    dialect = engine.dialect\n    if (not dialect.supports_multivalues_insert):\n        multirow = False\n    if multirow:\n        log.warning('dump_data_as_insert_sql: multirow parameter substitution not working yet')\n        multirow = False\n    meta = MetaData(bind=engine)\n    log.debug('... retrieving schema')\n    table = Table(table_name, meta, autoload=True)\n    if include_ddl:\n        log.debug('... producing DDL')\n        dump_ddl(table.metadata, dialect_name=engine.dialect.name, fileobj=fileobj)\n    log.debug('... fetching records')\n    query = select(table.columns)\n    if wheredict:\n        for (k, v) in wheredict.items():\n            col = table.columns.get(k)\n            query = query.where((col == v))\n    cursor = engine.execute(query)\n    if multirow:\n        row_dict_list = []\n        for r in cursor:\n            row_dict_list.append(dict(r))\n        if row_dict_list:\n            statement = table.insert().values(row_dict_list)\n            insert_str = get_literal_query(statement, bind=engine)\n            writeline_nl(fileobj, insert_str)\n        else:\n            writeline_nl(fileobj, sql_comment('No data!'))\n    else:\n        found_one = False\n        for r in cursor:\n            found_one = True\n            row_dict = dict(r)\n            statement = table.insert(values=row_dict)\n            insert_str = get_literal_query(statement, bind=engine)\n            writeline_nl(fileobj, insert_str)\n        if (not found_one):\n            writeline_nl(fileobj, sql_comment('No data!'))\n    writeline_nl(fileobj, SEP2)\n    log.debug('... done')", "docstring": "Reads a table from the database, and writes SQL to replicate the table's\ndata to the output ``fileobj``.\n\nArgs:\nengine: SQLAlchemy :class:`Engine`\ntable_name: name of the table\nfileobj: file-like object to write to\nwheredict: optional dictionary of ``{column_name: value}`` to use as\n``WHERE`` filters\ninclude_ddl: if ``True``, include the DDL to create the table as well\nmultirow: write multi-row ``INSERT`` statements", "source": "codesearchnet"}
{"code": "def from_dict(cls, d):\n        \n        labels_dict = d['labels_dict']\n        projections = {}\n        structure = None\n        if isinstance(list(d['bands'].values())[0], dict):\n            eigenvals = {Spin(int(k)): np.array(d['bands'][k]['data'])\n                         for k in d['bands']}\n        else:\n            eigenvals = {Spin(int(k)): d['bands'][k] for k in d['bands']}\n        if 'structure' in d:\n            structure = Structure.from_dict(d['structure'])\n        if d.get('projections'):\n            projections = {Spin(int(spin)): np.array(v)\n                           for spin, v in d[\"projections\"].items()}\n\n        return BandStructure(\n            d['kpoints'], eigenvals,\n            Lattice(d['lattice_rec']['matrix']), d['efermi'],\n            labels_dict, structure=structure, projections=projections)", "docstring": "Create from dict.\n\nArgs:\nA dict with all data for a band structure object.\n\nReturns:\nA BandStructure object", "source": "juraj-google-style"}
{"code": "def from_dict(cls, image_processor_dict: dict[str, Any], **kwargs):\n    image_processor_dict = image_processor_dict.copy()\n    return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)\n    if 'size' in kwargs and 'size' in image_processor_dict:\n        image_processor_dict['size'] = kwargs.pop('size')\n    if 'crop_size' in kwargs and 'crop_size' in image_processor_dict:\n        image_processor_dict['crop_size'] = kwargs.pop('crop_size')\n    image_processor = cls(**image_processor_dict)\n    to_remove = []\n    for key, value in kwargs.items():\n        if hasattr(image_processor, key):\n            setattr(image_processor, key, value)\n            to_remove.append(key)\n    for key in to_remove:\n        kwargs.pop(key, None)\n    logger.info(f'Image processor {image_processor}')\n    if return_unused_kwargs:\n        return (image_processor, kwargs)\n    else:\n        return image_processor", "docstring": "Instantiates a type of [`~image_processing_utils.ImageProcessingMixin`] from a Python dictionary of parameters.\n\nArgs:\nimage_processor_dict (`Dict[str, Any]`):\nDictionary that will be used to instantiate the image processor object. Such a dictionary can be\nretrieved from a pretrained checkpoint by leveraging the\n[`~image_processing_utils.ImageProcessingMixin.to_dict`] method.\nkwargs (`Dict[str, Any]`):\nAdditional parameters from which to initialize the image processor object.\n\nReturns:\n[`~image_processing_utils.ImageProcessingMixin`]: The image processor object instantiated from those\nparameters.", "source": "github-repos"}
{"code": "def document(self, name, file_name, owner=None, **kwargs):\n        \n        return Document(self.tcex, name, file_name, owner=owner, **kwargs)", "docstring": "Create the Document TI object.\n\nArgs:\nowner:\nname:\nfile_name:\n**kwargs:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def evaluate_forward(distribution, x_data, parameters=None, cache=None):\n    assert (len(x_data) == len(distribution)), ('distribution %s is not of length %d' % (distribution, len(x_data)))\n    assert hasattr(distribution, '_cdf'), 'distribution require the `_cdf` method to function.'\n    cache = (cache if (cache is not None) else {})\n    parameters = load_parameters(distribution, '_cdf', parameters=parameters, cache=cache)\n    cache[distribution] = x_data\n    out = numpy.zeros(x_data.shape)\n    out[:] = distribution._cdf(x_data, **parameters)\n    return out", "docstring": "Evaluate forward Rosenblatt transformation.\n\nArgs:\ndistribution (Dist):\nDistribution to evaluate.\nx_data (numpy.ndarray):\nLocations for where evaluate forward transformation at.\nparameters (:py:data:typing.Any):\nCollection of parameters to override the default ones in the\ndistribution.\ncache (:py:data:typing.Any):\nA collection of previous calculations in case the same distribution\nturns up on more than one occasion.\n\nReturns:\nThe cumulative distribution values of ``distribution`` at location\n``x_data`` using parameters ``parameters``.", "source": "codesearchnet"}
{"code": "def read(self, length=-1):\n        \n        if 0 <= length < len(self):\n            newpos = self.pos + length\n            data = self.buf[self.pos:newpos]\n            self.pos = newpos\n            self.__discard()\n            return data\n\n        data = self.buf[self.pos:]\n        self.clear()\n        return data", "docstring": "Reads from the FIFO.\n\nReads as much data as possible from the FIFO up to the specified\nlength. If the length argument is negative or ommited all data\ncurrently available in the FIFO will be read. If there is no data\navailable in the FIFO an empty string is returned.\n\nArgs:\nlength: The amount of data to read from the FIFO. Defaults to -1.", "source": "juraj-google-style"}
{"code": "def make_sine_surface(dims=DEFAULT_DIMS, offset=0.5, scale=1.0):\n    gradients = (((np.array(make_gradients(dims)) - offset) * scale) * np.pi)\n    return np.sin(np.linalg.norm(gradients, axis=0))", "docstring": "Makes a surface from the 3D sine function.\n\nArgs:\ndims (pair): the dimensions of the surface to create\noffset (float): an offset applied to the function\nscale (float): a scale applied to the sine frequency\n\nReturns:\nsurface: A surface.", "source": "codesearchnet"}
{"code": "def cast(self, dtype: tf.DType) -> 'TensorFluent':\n        \n        if self.dtype == dtype:\n            return self\n        t = tf.cast(self.tensor, dtype)\n        scope = self.scope.as_list()\n        batch = self.batch\n        return TensorFluent(t, scope, batch=batch)", "docstring": "Returns a TensorFluent for the cast operation with given `dtype`.\n\nArgs:\ndtype: The output's data type.\n\nReturns:\nA TensorFluent wrapping the cast operation.", "source": "juraj-google-style"}
{"code": "def get_keys(data_list, leading_columns=LEADING_COLUMNS):\n    \n    all_keys = set().union(*(list(d.keys()) for d in data_list))\n\n    leading_keys = []\n\n    for key in leading_columns:\n        if key not in all_keys:\n            continue\n        leading_keys.append(key)\n        all_keys.remove(key)\n\n    return leading_keys + sorted(all_keys)", "docstring": "Gets all possible keys from a list of dicts, sorting by leading_columns first\n\nArgs:\ndata_list: list of dicts to pull keys from\nleading_columns: list of keys to put first in the result\n\nReturns:\nlist of keys to be included as columns in excel worksheet", "source": "juraj-google-style"}
{"code": "def _interpretPayload(functioncode, payload):\n    r\n    raise NotImplementedError()\n    output = ''\n    output += 'Modbus payload decoder\\n'\n    output += 'Input payload (length {} characters): {!r} \\n'.format(len(payload), payload)\n    output += 'Function code: {} (dec).\\n'.format(functioncode)\n    \n    if len(payload) == 4:\n        FourbyteMessageFirstHalfValue = _twoByteStringToNum(payload[0:2])\n        FourbyteMessageSecondHalfValue = _twoByteStringToNum(payload[2:4])\n\n\n    return output", "docstring": "r\"\"\"Generate a human readable description of a Modbus payload.\n\nArgs:\n* functioncode (int): Function code\n* payload (str): The payload that should be interpreted. It should be a byte string.\n\nReturns:\nA descriptive string.\n\nFor example, the payload ``'\\x10\\x01\\x00\\x01'`` for functioncode 3 should give something like::\n\nTODO: Update", "source": "juraj-google-style"}
{"code": "def _push(project):\n    repo = project.repo\n    remote_name = project.get('project', 'remote')\n    remote = repo.remote(remote_name)\n    result = _call_remote_push(remote)\n    failures = lfilter(complement(did_git_push_succeed), result)\n    if failures:\n        for push_info in failures:\n            logger.error('Failed to push ref {from_ref} to {to_ref}'.format(from_ref=push_info.local_ref.name, to_ref=push_info.remote_ref.name))\n        raise BalletError('Push failed')", "docstring": "Push default branch and project template branch to remote\n\nWith default config (i.e. remote and branch names), equivalent to::\n\n$ git push origin master:master project-template:project-template\n\nRaises:\nballet.exc.BalletError: Push failed in some way", "source": "codesearchnet"}
{"code": "def __call__(self, shape, dtype=None, **kwargs):\n    _validate_kwargs(self.__class__.__name__, kwargs)\n    dtype = _get_dtype(dtype)\n    if not dtype.is_numpy_compatible or dtype == dtypes.string:\n        raise ValueError('Expected numeric or boolean dtype, got %s.' % dtype)\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    return array_ops.ones(shape, dtype)", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor. Only numeric or boolean dtypes are\nsupported. If not specified, `tf.keras.backend.floatx()` is used,\nwhich default to `float32` unless you configured it otherwise\n(via `tf.keras.backend.set_floatx(float_dtype)`).\n**kwargs: Additional keyword arguments.", "source": "github-repos"}
{"code": "async def rewind(self, query='1'):\n    if (not (self.state == 'ready')):\n        logger.debug(\"Trying to rewind from wrong state '{}'\".format(self.state))\n        return\n    if (query == ''):\n        query = '1'\n    try:\n        num = int(query)\n    except TypeError:\n        self.statuslog.error('Rewind argument must be a number')\n    except ValueError:\n        self.statuslog.error('Rewind argument must be a number')\n    else:\n        if (len(self.prev_queue) == 0):\n            self.statuslog.error('No songs to rewind')\n            return\n        if (num < 0):\n            self.statuslog.error('Rewind must be postitive or 0')\n            return\n        elif (num > len(self.prev_queue)):\n            self.statuslog.warning('Rewinding to start')\n        else:\n            self.statuslog.info('Rewinding')\n        for i in range((num + 1)):\n            if (len(self.prev_queue) > 0):\n                self.queue.insert(0, self.prev_queue.pop())\n        try:\n            self.streamer.stop()\n        except Exception as e:\n            logger.exception(e)", "docstring": "The rewind command\n\nArgs:\nquery (str): The number of items to skip", "source": "codesearchnet"}
{"code": "def _publish_actor_class_to_key(self, key, actor_class_info):\n        \n        \n        \n        self._worker.redis_client.hmset(key, actor_class_info)\n        self._worker.redis_client.rpush(\"Exports\", key)", "docstring": "Push an actor class definition to Redis.\n\nThe is factored out as a separate function because it is also called\non cached actor class definitions when a worker connects for the first\ntime.\n\nArgs:\nkey: The key to store the actor class info at.\nactor_class_info: Information about the actor class.", "source": "juraj-google-style"}
{"code": "def push_file(self, local_source, remote_dir):\n        \n        remote_dest = remote_dir + '/' + os.path.basename(local_source)\n\n        try:\n            self.makedirs(remote_dir, exist_ok=True)\n        except IOError as e:\n            logger.exception(\"Pushing {0} to {1} failed\".format(local_source, remote_dir))\n            if e.errno == 2:\n                raise BadScriptPath(e, self.hostname)\n            elif e.errno == 13:\n                raise BadPermsScriptPath(e, self.hostname)\n            else:\n                logger.exception(\"File push failed due to SFTP client failure\")\n                raise FileCopyException(e, self.hostname)\n        try:\n            self.sftp_client.put(local_source, remote_dest, confirm=True)\n            \n            self.sftp_client.chmod(remote_dest, 0o777)\n        except Exception as e:\n            logger.exception(\"File push from local source {} to remote destination {} failed\".format(\n                local_source, remote_dest))\n            raise FileCopyException(e, self.hostname)\n\n        return remote_dest", "docstring": "Transport a local file to a directory on a remote machine\n\nArgs:\n- local_source (string): Path\n- remote_dir (string): Remote path\n\nReturns:\n- str: Path to copied file on remote machine\n\nRaises:\n- BadScriptPath : if script path on the remote side is bad\n- BadPermsScriptPath : You do not have perms to make the channel script dir\n- FileCopyException : FileCopy failed.", "source": "juraj-google-style"}
{"code": "def matches(self, msg_seq: int, msg: MessageInterface) -> bool:\n        \n        return all(crit.matches(msg_seq, msg) for crit in self.all_criteria)", "docstring": "The message matches if all the defined search key criteria match.\n\nArgs:\nmsg_seq: The message sequence ID.\nmsg: The message object.", "source": "juraj-google-style"}
{"code": "def is_gpu(self):\n    return (self._device.get_info(cl.device_info.TYPE) == cl.device_type.GPU)", "docstring": "Check if the device associated with this environment is a GPU.\n\nReturns:\nboolean: True if the device is an GPU, false otherwise.", "source": "codesearchnet"}
{"code": "def _empty_resource_attributes(self):\n\n\t\t\n\n\t\tself.status_code = 404\n\t\tself.headers = {}\n\t\tself.exists = False\n\n\t\t\n\t\tself.rdf = self._build_rdf()\n\n\t\t\n\t\tif type(self) == NonRDFSource:\n\t\t\tself.binary.empty()", "docstring": "small method to empty values if resource is removed or absent\n\nArgs:\nNone\n\nReturn:\nNone: empties selected resource attributes", "source": "juraj-google-style"}
{"code": "def get(self, request, *args, **kwargs):\n    context = self.get_context_data(**kwargs)\n    context.update(self.extra_context)\n    context['crumbs'] = self.get_crumbs()\n    context['title'] = self.title\n    context['suit'] = ('suit' in settings.INSTALLED_APPS)\n    if ((context.get('dashboard_grid', None) is None) and self.grid):\n        context['dashboard_grid'] = self.grid\n    return self.render_to_response(context)", "docstring": "Django view get function.\n\nAdd items of extra_context, crumbs and grid to context.\n\nArgs:\nrequest (): Django's request object.\n*args (): request args.\n**kwargs (): request kwargs.\n\nReturns:\nresponse: render to response with context.", "source": "codesearchnet"}
{"code": "def normal_var(data, mean):\n    \n    if not isinstance(data, np.ndarray):\n        data = np.array(data)\n\n    cumm = [0.0]\n    cumm.extend(np.cumsum(np.power(np.abs(data - mean), 2)))\n\n    def cost(s, t):\n        \n        dist = float(t - s)\n        diff = cumm[t] - cumm[s]\n        return dist * np.log(diff/dist)\n\n    return cost", "docstring": "Creates a segment cost function for a time series with a\nNormal distribution with changing variance\n\nArgs:\ndata (:obj:`list` of float): 1D time series data\nvariance (float): variance\nReturns:\nfunction: Function with signature\n(int, int) -> float\nwhere the first arg is the starting index, and the second\nis the last arg. Returns the cost of that segment", "source": "juraj-google-style"}
{"code": "def dump(self, destination, with_defaults=False):\n        \n        if isinstance(destination, six.string_types):\n            with open(destination, 'w', encoding='utf-8') as f:\n                self._rw.dump_config_to_file(self._config, f, with_defaults=with_defaults)\n        else:\n            self._rw.dump_config_to_file(self._config, destination, with_defaults=with_defaults)", "docstring": "Write configuration values to the specified destination.\n\nArgs:\ndestination:\nwith_defaults (bool): if ``True``, values of items with no custom values will be included in the output\nif they have a default value set.", "source": "juraj-google-style"}
{"code": "def parse(self, text):\n    tokens = self.lex(text)\n    parser = Parser(tokens)\n    return parser.parse()", "docstring": "Parse self.text.\n\nArgs:\ntext (str): the text to lex\n\nReturns:\nobject: a node representing the current rule.", "source": "codesearchnet"}
{"code": "def run_query(query: str) -> None:\n    try:\n        result = parse_query(query)\n    except Exception as e:\n        result = f'ERROR: {type(e).__name__}: {e.__str__()}.'\n        return result\n    return filter_records(convert_to_dataframe(result), query)", "docstring": "Run a query and display the result.\n\nArgs:\nquery (str): The query to be executed.", "source": "github-repos"}
{"code": "def to_weld_type(weld_type, dim):\n    \n    for i in xrange(dim):\n        weld_type = WeldVec(weld_type)\n    return weld_type", "docstring": "Summary\n\nArgs:\nweld_type (TYPE): Description\ndim (TYPE): Description\n\nReturns:\nTYPE: Description", "source": "juraj-google-style"}
{"code": "def get_firmware(self):\n    firmware_uri = self._helper.build_subresource_uri(self.data['uri'], subresource_path=self.FIRMWARE_PATH)\n    return self._helper.do_get(firmware_uri)", "docstring": "Gets the installed firmware for a logical interconnect.\n\nReturns:\ndict: LIFirmware.", "source": "codesearchnet"}
{"code": "def disaggregate_wind(wind_daily, method='equal', a=None, b=None, t_shift=None):\n    assert (method in ('equal', 'cosine', 'random')), 'Invalid method'\n    wind_eq = melodist.distribute_equally(wind_daily)\n    if (method == 'equal'):\n        wind_disagg = wind_eq\n    elif (method == 'cosine'):\n        assert (None not in (a, b, t_shift))\n        wind_disagg = _cosine_function(np.array([wind_eq.values, wind_eq.index.hour]), a, b, t_shift)\n    elif (method == 'random'):\n        wind_disagg = (wind_eq * ((- np.log(np.random.rand(len(wind_eq)))) ** 0.3))\n    return wind_disagg", "docstring": "general function for windspeed disaggregation\n\nArgs:\nwind_daily: daily values\nmethod: keyword specifying the disaggregation method to be used\na: parameter a for the cosine function\nb: parameter b for the cosine function\nt_shift: parameter t_shift for the cosine function\n\nReturns:\nDisaggregated hourly values of windspeed.", "source": "codesearchnet"}
{"code": "def _ParseFValue(self, registry_key):\n    \n    registry_value = registry_key.GetValueByName('F')\n    if not registry_value:\n      raise errors.ParseError(\n          'missing value: \"F\" in Windows Registry key: {0:s}.'.format(\n              registry_key.name))\n\n    f_value_map = self._GetDataTypeMap('f_value')\n\n    try:\n      return self._ReadStructureFromByteStream(\n          registry_value.data, 0, f_value_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError(exception)", "docstring": "Parses an F value.\n\nArgs:\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\n\nReturns:\nf_value: F value stored in the Windows Registry key.\n\nRaises:\nParseError: if the Windows Registry key does not contain an F value or\nF value cannot be parsed.", "source": "juraj-google-style"}
{"code": "def SetLookupHash(self, lookup_hash):\n    \n    if lookup_hash not in self.SUPPORTED_HASHES:\n      raise ValueError('Unsupported lookup hash: {0!s}'.format(lookup_hash))\n\n    self.lookup_hash = lookup_hash", "docstring": "Sets the hash to query.\n\nArgs:\nlookup_hash (str): name of the hash attribute to look up.\n\nRaises:\nValueError: if the lookup hash is not supported.", "source": "juraj-google-style"}
{"code": "def _full_pred_succ_maps(self, pred_map, succ_map, input_circuit, wire_map):\n    full_pred_map = {}\n    full_succ_map = {}\n    for w in input_circuit.input_map:\n        if (w in wire_map):\n            full_pred_map[wire_map[w]] = pred_map[wire_map[w]]\n            full_succ_map[wire_map[w]] = succ_map[wire_map[w]]\n        else:\n            full_succ_map[w] = self.output_map[w]\n            full_pred_map[w] = self._multi_graph.predecessors(self.output_map[w])[0]\n            if (len(list(self._multi_graph.predecessors(self.output_map[w]))) != 1):\n                raise DAGCircuitError(('too many predecessors for %s[%d] output node' % (w[0], w[1])))\n    return (full_pred_map, full_succ_map)", "docstring": "Map all wires of the input circuit.\n\nMap all wires of the input circuit to predecessor and\nsuccessor nodes in self, keyed on wires in self.\n\nArgs:\npred_map (dict): comes from _make_pred_succ_maps\nsucc_map (dict): comes from _make_pred_succ_maps\ninput_circuit (DAGCircuit): the input circuit\nwire_map (dict): the map from wires of input_circuit to wires of self\n\nReturns:\ntuple: full_pred_map, full_succ_map (dict, dict)\n\nRaises:\nDAGCircuitError: if more than one predecessor for output nodes", "source": "codesearchnet"}
{"code": "def __live_receivers(signal):\n    with __lock:\n        __purge()\n        receivers = [funcref() for funcref in __receivers[signal]]\n    return receivers", "docstring": "Return all signal handlers that are currently still alive for the\ninput `signal`.\n\nArgs:\nsignal: A signal name.\n\nReturns:\nA list of callable receivers for the input signal.", "source": "codesearchnet"}
{"code": "def push(self, targets, jobs=None, remote=None, show_checksums=False):\n    return self.repo.cache.local.push(targets, jobs=jobs, remote=self._get_cloud(remote, 'push'), show_checksums=show_checksums)", "docstring": "Push data items in a cloud-agnostic way.\n\nArgs:\ntargets (list): list of targets to push to the cloud.\njobs (int): number of jobs that can be running simultaneously.\nremote (dvc.remote.base.RemoteBase): optional remote to push to.\nBy default remote from core.remote config option is used.\nshow_checksums (bool): show checksums instead of file names in\ninformation messages.", "source": "codesearchnet"}
{"code": "def load(path, compile=True, options=None):\n    metadata = saved_metadata_pb2.SavedMetadata()\n    meta_graph_def = loader_impl.parse_saved_model(path).meta_graphs[0]\n    object_graph_def = meta_graph_def.object_graph_def\n    path_to_metadata_pb = os.path.join(path, constants.SAVED_METADATA_PATH)\n    if gfile.Exists(path_to_metadata_pb):\n        try:\n            with gfile.GFile(path_to_metadata_pb, 'rb') as f:\n                file_content = f.read()\n            metadata.ParseFromString(file_content)\n        except message.DecodeError as e:\n            raise IOError('Cannot parse keras metadata {}: {}.'.format(path_to_metadata_pb, str(e)))\n    else:\n        logging.warning('SavedModel saved prior to TF 2.5 detected when loading Keras model. Please ensure that you are saving the model with model.save() or tf.keras.models.save_model(), *NOT* tf.saved_model.save(). To confirm, there should be a file named \"keras_metadata.pb\" in the SavedModel directory.')\n        _read_legacy_metadata(object_graph_def, metadata)\n    if not metadata.nodes:\n        return tf_load.load(path, options=options)\n    keras_loader = KerasObjectLoader(metadata, object_graph_def)\n    keras_loader.load_layers(compile=compile)\n    nodes_to_load = {'root': None}\n    for node_id, loaded_node in keras_loader.loaded_nodes.items():\n        nodes_to_load[keras_loader.get_path(node_id)] = loaded_node\n    loaded = tf_load.load_partial(path, nodes_to_load, options=options)\n    keras_loader.finalize_objects()\n    keras_loader.del_tracking()\n    model = loaded['root']\n    if isinstance(model, training_lib.Model) and compile:\n        training_config = model._serialized_attributes['metadata'].get('training_config', None)\n        if training_config is not None:\n            model.compile(**saving_utils.compile_args_from_training_config(training_config), from_serialized=True)\n            saving_utils.try_build_compiled_arguments(model)\n            if isinstance(model.optimizer, optimizer_v2.OptimizerV2):\n                if model.optimizer.get_slot_names():\n                    logging.warning('Your optimizer uses slots. Slots cannot be restored from saved_model, as a result, your model is starting with  a new initialized optimizer.')\n        else:\n            logging.warning('No training configuration found in save file, so the model was *not* compiled. Compile it manually.')\n    if not context.executing_eagerly():\n        sess = backend.get_session()\n        sess.run(ops.get_collection(ops.GraphKeys.TABLE_INITIALIZERS))\n    return model", "docstring": "Loads Keras objects from a SavedModel.\n\nAny Keras layer or model saved to the SavedModel will be loaded back\nas Keras objects. Other objects are loaded as regular trackable objects (same\nas `tf.saved_model.load`).\n\nCurrently, Keras saving/loading only retains the Keras object's weights,\nlosses, and call function.\n\nThe loaded model can be re-compiled, but the original optimizer, compiled loss\nfunctions, and metrics are not retained. This is temporary, and `model.save`\nwill soon be able to serialize compiled models.\n\nArgs:\npath: Path to SavedModel.\ncompile: If true, compile the model after loading it.\noptions: Optional `tf.saved_model.LoadOptions` object that specifies\noptions for loading from SavedModel.\n\n\nReturns:\nObject loaded from SavedModel.", "source": "github-repos"}
{"code": "def remove_foothills(self, q_data, marked, bin_num, bin_lower, centers, foothills):\n    hills = []\n    for foot in foothills:\n        center = foot[0]\n        hills[:] = foot[1][:]\n        while (len(hills) > 0):\n            pt = hills.pop((- 1))\n            marked[pt] = self.GLOBBED\n            for (s_index, val) in np.ndenumerate(marked[((pt[0] - 1):(pt[0] + 2), (pt[1] - 1):(pt[1] + 2))]):\n                index = (((s_index[0] - 1) + pt[0]), ((s_index[1] - 1) + pt[1]))\n                if (val == self.UNMARKED):\n                    if ((q_data[index] >= 0) and (q_data[index] < bin_lower) and ((q_data[index] <= q_data[pt]) or self.is_closest(index, center, centers, bin_num))):\n                        hills.append(index)\n    del foothills[:]", "docstring": "Mark points determined to be foothills as globbed, so that they are not included in\nfuture searches. Also searches neighboring points to foothill points to determine\nif they should also be considered foothills.\n\nArgs:\nq_data: Quantized data\nmarked: Marked\nbin_num: Current bin being searched\nbin_lower: Next bin being searched\ncenters: dictionary of local maxima considered to be object centers\nfoothills: List of foothill points being removed.", "source": "codesearchnet"}
{"code": "def source(self, value=None):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `source`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `source`')\n    self._source = value", "docstring": "Corresponds to IDD Field `source`\n\nArgs:\nvalue (str): value for IDD Field `source`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def set_user_info(self, nick, user='*', real='*'):\n    if self.connected:\n        raise Exception(\"Can't set user info now, we're already connected!\")\n    if (not self.connected):\n        self.nick = nick\n    self.connect_info['user'] = {'nick': nick, 'user': user, 'real': real}", "docstring": "Sets user info for this server, to be used before connection.\n\nArgs:\nnick (str): Nickname to use.\nuser (str): Username to use.\nreal (str): Realname to use.", "source": "codesearchnet"}
{"code": "def validate_id_pool(self, id_or_uri, ids_pools):\n    uri = ((self._client.build_uri(id_or_uri) + '/validate?idList=') + '&idList='.join(ids_pools))\n    return self._client.get(uri)", "docstring": "Validates an ID pool.\n\nArgs:\nid_or_uri:\nID or URI of range.\nids_pools (list):\nList of Id Pools.\n\nReturns:\ndict: A dict containing a list with IDs.", "source": "codesearchnet"}
{"code": "def update_from_json(self, path=join('config', 'hdx_dataset_static.json')):\n        \n        \n        super(Dataset, self).update_from_json(path)\n        self.separate_resources()", "docstring": "Update dataset metadata with static metadata from JSON file\n\nArgs:\npath (str): Path to JSON dataset metadata. Defaults to config/hdx_dataset_static.json.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def __init__(self, *args, **kwargs):\n        \n        super(MinerTransaction, self).__init__(*args, **kwargs)\n        self.Type = TransactionType.MinerTransaction", "docstring": "Create an instance.\n\nArgs:\n*args:\n**kwargs:", "source": "juraj-google-style"}
{"code": "def IsActiveOn(self, date, date_object=None):\n    if (date in self.date_exceptions):\n        (exception_type, _) = self.date_exceptions[date]\n        if (exception_type == self._EXCEPTION_TYPE_ADD):\n            return True\n        else:\n            return False\n    if (self.start_date and self.end_date and (self.start_date <= date) and (date <= self.end_date)):\n        if (date_object is None):\n            date_object = util.DateStringToDateObject(date)\n        return self.day_of_week[date_object.weekday()]\n    return False", "docstring": "Test if this service period is active on a date.\n\nArgs:\ndate: a string of form \"YYYYMMDD\"\ndate_object: a date object representing the same date as date.\nThis parameter is optional, and present only for performance\nreasons.\nIf the caller constructs the date string from a date object\nthat date object can be passed directly, thus avoiding the\ncostly conversion from string to date object.\n\nReturns:\nTrue iff this service is active on date.", "source": "codesearchnet"}
{"code": "def get_task_states(self, job_configs):\n    if self._context_handle:\n        job_names, task_nums = zip(*job_configs)\n        return pywrap_tfe.TFE_GetTaskStates(self._context_handle, job_names, task_nums)\n    else:\n        raise ValueError('Context is not initialized.')", "docstring": "Get task states from the Coordination Service.\n\nArgs:\njob_configs: A list of tuples of job name and task number.\n\nReturns:\nA list of TF_Status.", "source": "github-repos"}
{"code": "def getById(self, Id):\n        \n        \n        csvsource = CSVSource(self.source, self.factory, self.key())\n        try:\n            for item in csvsource.items():\n                if Id == item.getId():\n                    return item\n        except StopIteration:\n            return None", "docstring": "Returns ICachableItem that matches id\n\nArgs:\nid: String that identifies the item to return whose key matches", "source": "juraj-google-style"}
{"code": "def shuffle(self, func, lengths, **kwargs):\n    num_splits = len(lengths)\n    kwargs['manual_partition'] = True\n    kwargs['_lengths'] = lengths\n    args = [self.axis, func, num_splits, kwargs, False]\n    args.extend(self.list_of_blocks)\n    return self._wrap_partitions(self.deploy_axis_func(*args))", "docstring": "Shuffle the order of the data in this axis based on the `lengths`.\n\nExtends `BaseFrameAxisPartition.shuffle`.\n\nArgs:\nfunc: The function to apply before splitting.\nlengths: The list of partition lengths to split the result into.\n\nReturns:\nA list of RemotePartition objects split by `lengths`.", "source": "codesearchnet"}
{"code": "def create_graph_from_data(self, data):\n        \n        warnings.warn(\"An exhaustive search of the causal structure of CGNN without\"\n                      \" skeleton is super-exponential in the number of variables.\")\n\n        \n        nb_vars = len(list(data.columns))\n        data = scale(data.values).astype('float32')\n\n        candidates = [np.reshape(np.array(i), (nb_vars, nb_vars)) for i in itertools.product([0, 1], repeat=nb_vars*nb_vars)\n                      if (np.trace(np.reshape(np.array(i), (nb_vars, nb_vars))) == 0\n                          and nx.is_directed_acyclic_graph(nx.DiGraph(np.reshape(np.array(i), (nb_vars, nb_vars)))))]\n\n        warnings.warn(\"A total of {} graphs will be evaluated.\".format(len(candidates)))\n        scores = [parallel_graph_evaluation(data, i, nh=self.nh, nb_runs=self.nb_runs, gpu=self.gpu,\n                                            nb_jobs=self.nb_jobs, lr=self.lr, train_epochs=self.train_epochs,\n                                            test_epochs=self.test_epochs, verbose=self.verbose) for i in candidates]\n        final_candidate = candidates[scores.index(min(scores))]\n        output = np.zeros(final_candidate.shape)\n\n        \n        for (i, j), x in np.ndenumerate(final_candidate):\n            if x > 0:\n                cand = final_candidate\n                cand[i, j] = 0\n                output[i, j] = min(scores) - scores[candidates.index(cand)]\n\n        return nx.DiGraph(candidates[output],\n                          {idx: i for idx, i in enumerate(data.columns)})", "docstring": "Use CGNN to create a graph from scratch. All the possible structures\nare tested, which leads to a super exponential complexity. It would be\npreferable to start from a graph skeleton for large graphs.\n\nArgs:\ndata (pandas.DataFrame): Observational data on which causal\ndiscovery has to be performed.\nReturns:\nnetworkx.DiGraph: Solution given by CGNN.", "source": "juraj-google-style"}
{"code": "def get_snapshots(self):\n    data = self.get_data(('volumes/%s/snapshots/' % self.id))\n    snapshots = list()\n    for jsond in data[u'snapshots']:\n        snapshot = Snapshot(**jsond)\n        snapshot.token = self.token\n        snapshots.append(snapshot)\n    return snapshots", "docstring": "Retrieve the list of snapshots that have been created from a volume.\n\nArgs:", "source": "codesearchnet"}
{"code": "def log(self, logger=None, label=None, eager=False):\n    if self.closed():\n        raise ValueError('Attempt to call log() on a closed Queryable.')\n    if (logger is None):\n        return self\n    if (label is None):\n        label = repr(self)\n    if eager:\n        return self._create(self._eager_log_result(logger, label))\n    return self._create(self._generate_lazy_log_result(logger, label))", "docstring": "Log query result consumption details to a logger.\n\nArgs:\nlogger: Any object which supports a debug() method which accepts a\nstr, such as a Python standard library logger object from the\nlogging module.  If logger is not provided or is None, this\nmethod has no logging side effects.\n\nlabel: An optional label which will be inserted into each line of\nlogging output produced by this particular use of log\n\neager: An optional boolean which controls how the query result will\nbe consumed.  If True, the sequence will be consumed and logged\nin its entirety. If False (the default) the sequence will be\nevaluated and logged lazily as it consumed.\n\nWarning: Use of eager=True requires use of sufficient memory to\nhold the entire sequence which is obviously not possible with\ninfinite sequences.  Use with care!\n\nReturns:\nA queryable over the unaltered source sequence.\n\nRaises:\nAttributeError: If logger does not support a debug() method.\nValueError: If the Queryable has been closed.", "source": "codesearchnet"}
{"code": "def square(duration: int, amp: complex, period: float=None, phase: float=0, name: str=None) -> SamplePulse:\n    if (period is None):\n        period = duration\n    return _sampled_square_pulse(duration, amp, period, phase=phase, name=name)", "docstring": "Generates square wave `SamplePulse`.\n\nApplies `left` sampling strategy to generate discrete pulse from continuous function.\n\nArgs:\nduration: Duration of pulse. Must be greater than zero.\namp: Pulse amplitude. Wave range is [-amp, amp].\nperiod: Pulse period, units of dt. If `None` defaults to single cycle.\nphase: Pulse phase.\nname: Name of pulse.", "source": "codesearchnet"}
{"code": "def download(path='.', url=None, unpack=False):\n    \n\n    if url is None:\n        url = 'https:\n    if os.path.exists(path) and os.path.isdir(path):\n        basename = os.path.basename(url).split('?')[0]\n        filename = os.path.join(path, basename)\n    else:\n        filename = path\n\n    f = open(filename, 'wb')\n\n    u = urlopen(url)\n    file_size = int(u.headers[\"Content-Length\"][0])\n    print(\"Downloading the latest Neurosynth files: {0} bytes: {1}\".format(\n        url, file_size))\n\n    bytes_dl = 0\n    block_size = 8192\n    while True:\n        buffer = u.read(block_size)\n        if not buffer:\n            break\n        bytes_dl += len(buffer)\n        f.write(buffer)\n        p = float(bytes_dl) / file_size\n        status = r\"{0}  [{1:.2%}]\".format(bytes_dl, p)\n        status = status + chr(8) * (len(status) + 1)\n        sys.stdout.write(status)\n\n    f.close()\n\n    if unpack:\n        import tarfile\n        tarfile.open(filename, 'r:gz').extractall(os.path.dirname(filename))", "docstring": "Download the latest data files.\nArgs:\npath (str): Location to save the retrieved data files. Defaults to\ncurrent directory.\nunpack (bool): If True, unzips the data file post-download.", "source": "juraj-google-style"}
{"code": "def qrandom(n):\n  \n  import quantumrandom\n  return np.concatenate([\n    quantumrandom.get_data(data_type='uint16', array_length=1024)\n    for i in range(int(np.ceil(n/1024.0)))\n  ])[:n]", "docstring": "Creates an array of n true random numbers obtained from the quantum random\nnumber generator at qrng.anu.edu.au\n\nThis function requires the package quantumrandom and an internet connection.\n\nArgs:\nn (int):\nlength of the random array\n\nReturn:\narray of ints:\narray of truly random unsigned 16 bit int values", "source": "juraj-google-style"}
{"code": "def save_feature_names(self, feature_names, feature_list_id):\n        \n\n        save_lines(feature_names, self.features_dir + 'X_train_{}.names'.format(feature_list_id))", "docstring": "Save the names of the features for the given feature list to a metadata file.\nExample: `save_feature_names(['num_employees', 'stock_price'], 'company')`.\n\nArgs:\nfeature_names: A list containing the names of the features, matching the column order.\nfeature_list_id: The name for this feature list.", "source": "juraj-google-style"}
{"code": "def make_gym_env(name, rl_env_max_episode_steps=(- 1), maxskip_env=False, rendered_env=False, rendered_env_resize_to=None, sticky_actions=False):\n    env = gym.make(name)\n    return gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env, rendered_env, rendered_env_resize_to, sticky_actions)", "docstring": "Create a gym env optionally with a time limit and maxskip wrapper.\n\nNOTE: The returned env may already be wrapped with TimeLimit!\n\nArgs:\nname: `str` - base name of the gym env to make.\nrl_env_max_episode_steps: `int` or None - Using any value < 0 returns the\nenv as-in, otherwise we impose the requested timelimit. Setting this to\nNone returns a wrapped env that doesn't have a step limit.\nmaxskip_env: whether to also use MaxAndSkip wrapper before time limit.\nrendered_env: whether to force render for observations. Use this for\nenvironments that are not natively rendering the scene for observations.\nrendered_env_resize_to: a list of [height, width] to change the original\nresolution of the native environment render.\nsticky_actions: whether to use sticky_actions before MaxAndSkip wrapper.\n\nReturns:\nAn instance of `gym.Env` or `gym.Wrapper`.", "source": "codesearchnet"}
{"code": "def from_dict(cls, config_dict, **kwargs):\n    config = cls(**config_dict)\n    to_remove = []\n    for key, value in kwargs.items():\n        if hasattr(config, key):\n            setattr(config, key, value)\n            to_remove.append(key)\n    for key in to_remove:\n        kwargs.pop(key, None)\n    return config", "docstring": "Constructs a CacheConfig instance from a dictionary of parameters.\nArgs:\nconfig_dict (Dict[str, Any]): Dictionary containing configuration parameters.\n**kwargs: Additional keyword arguments to override dictionary values.\n\nReturns:\nCacheConfig: Instance of CacheConfig constructed from the dictionary.", "source": "github-repos"}
{"code": "def __init__(self, access_token=None, rate_limit=True):\n        \n        super(Search, self).__init__()\n        self.session = self.get_session(access_token=access_token,\n                                        rate_limit=rate_limit)\n        self._ignore_codes = []\n        if rate_limit:\n            self._ignore_codes.append(429)", "docstring": "Construct a Search object.\n\nArgs:\naccess_token (str): A valid Companies House API. If an\naccess token isn't specified then looks for *CompaniesHouseKey*\nor COMPANIES_HOUSE_KEY environment variables. Defaults to None.", "source": "juraj-google-style"}
{"code": "def __init__(self, quant_debug_model_path: Optional[str]=None, quant_debug_model_content: Optional[bytes]=None, float_model_path: Optional[str]=None, float_model_content: Optional[bytes]=None, debug_dataset: Optional[Callable[[], Iterable[Sequence[np.ndarray]]]]=None, debug_options: Optional[QuantizationDebugOptions]=None, converter: Optional[TFLiteConverter]=None) -> None:\n    self._data_gen = debug_dataset\n    self._debug_options = debug_options or QuantizationDebugOptions()\n    self.converter = None\n    self.calibrated_model = None\n    self.float_model = None\n    self._float_interpreter = None\n    if converter is not None:\n        if self._debug_options.model_debug_metrics:\n            old_optimizations = converter.optimizations\n            self.converter = self._set_converter_options_for_float(converter)\n            self.float_model = self.converter.convert()\n            converter.optimizations = old_optimizations\n        self.converter = self._set_converter_options_for_calibration(converter)\n        self.calibrated_model = self.converter.convert()\n        self._init_from_converter(self._debug_options, self.converter, self.calibrated_model, float_model=self.float_model)\n    else:\n        self._quant_interpreter = _interpreter.Interpreter(quant_debug_model_path, quant_debug_model_content, experimental_preserve_all_tensors=self._debug_options.layer_direct_compare_metrics is not None)\n        if self._debug_options.model_debug_metrics:\n            self._float_interpreter = _interpreter.Interpreter(float_model_path, float_model_content)\n    self._initialize_stats()", "docstring": "Runs the TFLite debugging model with given debug options.\n\nArgs:\nquant_debug_model_path: Path to the quantized debug TFLite model file.\nquant_debug_model_content: Content of the quantized debug TFLite model.\nfloat_model_path: Path to float TFLite model file.\nfloat_model_content: Content of the float TFLite model.\ndebug_dataset: a factory function that returns dataset generator which is\nused to generate input samples (list of np.ndarray) for the model. The\ngenerated elements must have same types and shape as inputs to the\nmodel.\ndebug_options: Debug options to debug the given model.\nconverter: Optional, use converter instead of quantized model.\n\nRaises:\nValueError: If the debugger was unable to be created.\n\nAttributes:\nlayer_statistics: results of error metrics for each NumericVerify op\nresults. in {layer_name: {metric_name: metric}} format.\nmodel_statistics: results of error metrics for difference between float\nand quantized models. in {metric_name: metric} format.", "source": "github-repos"}
{"code": "def _has_no_variables(sess: session.Session) -> bool:\n    for op in sess.graph.get_operations():\n        if op.type.startswith('Variable') or op.type.endswith('VariableOp'):\n            return False\n    return True", "docstring": "Determines if the graph has any variables.\n\nArgs:\nsess: TensorFlow Session.\n\nReturns:\nBool.", "source": "github-repos"}
{"code": "def resolve(node, source_info, graphs, definition_factory=Definition):\n    visitor = TreeAnnotator(source_info, graphs, definition_factory)\n    node = visitor.visit(node)\n    return node", "docstring": "Resolves reaching definitions for each symbol.\n\nArgs:\nnode: ast.AST\nsource_info: transformer.SourceInfo\ngraphs: Dict[ast.FunctionDef, cfg.Graph]\ndefinition_factory: Callable[[], Definition]\nReturns:\nast.AST", "source": "github-repos"}
{"code": "def from_file(filename, file_format='xyz'):\n    mols = list(pb.readfile(str(file_format), str(filename)))\n    return BabelMolAdaptor(mols[0].OBMol)", "docstring": "Uses OpenBabel to read a molecule from a file in all supported formats.\n\nArgs:\nfilename: Filename of input file\nfile_format: String specifying any OpenBabel supported formats.\n\nReturns:\nBabelMolAdaptor object", "source": "codesearchnet"}
{"code": "def send_eager_tracebacks(destinations, origin_stack, send_source=True):\n    _send_call_tracebacks(destinations, origin_stack, is_eager_execution=True, send_source=send_source)", "docstring": "Send the tracebacks of an eager execution call to debug server(s).\n\nArgs:\ndestinations: gRPC destination addresses, a `str` or a `list` of `str`s,\ne.g., \"localhost:4242\". If a `list`, gRPC requests containing the same\norigin_stack: The traceback of the eager operation invocation.\nsend_source: Whether the source files involved in the op tracebacks but\noutside the TensorFlow library are to be sent.", "source": "github-repos"}
{"code": "def _finalize_outputs(cls, mapreduce_spec, mapreduce_state):\n    if (mapreduce_spec.mapper.output_writer_class() and (mapreduce_state.result_status == model.MapreduceState.RESULT_SUCCESS)):\n        mapreduce_spec.mapper.output_writer_class().finalize_job(mapreduce_state)", "docstring": "Finalize outputs.\n\nArgs:\nmapreduce_spec: an instance of MapreduceSpec.\nmapreduce_state: an instance of MapreduceState.", "source": "codesearchnet"}
{"code": "def init_class_and_forward_node(self, node, cls, container=None, extra_key=None):\n    cls_key = cls.expr if cls.is_late_annotation() and (not cls.resolved) else cls\n    cache = self._instance_cache[cls_key]\n    key = (self.current_opcode, extra_key)\n    status = instance = cache.get(key)\n    if not instance or isinstance(instance, _InitClassState):\n        clsvar = cls.to_variable(node)\n        instantiate_directly = any((v is _InitClassState.INSTANTIATING for v in cache.values()))\n        cache[key] = _InitClassState.INSTANTIATING\n        node, instance = self._instantiate_var(node, clsvar, container, instantiate_directly)\n        if instantiate_directly or status is _InitClassState.INITIALIZING:\n            self._mark_maybe_missing_members(instance.data)\n        else:\n            cache[key] = _InitClassState.INITIALIZING\n            node = self.call_init(node, instance)\n        cache[key] = instance\n    return (node, instance)", "docstring": "Instantiate a class, and also call __init__.\n\nCalling __init__ can be expensive, so this method caches its created\ninstances. If you don't need __init__ called, use cls.instantiate instead.\n\nArgs:\nnode: The current node.\ncls: The class to instantiate.\ncontainer: Optionally, a container to pass to the class's instantiate()\nmethod, so that type parameters in the container's template are\ninstantiated to TypeParameterInstance.\nextra_key: Optionally, extra information about the location at which the\ninstantion occurs. By default, this method keys on the current opcode\nand the class, which sometimes isn't enough to disambiguate callers that\nshouldn't get back the same cached instance.\n\nReturns:\nA tuple of node and instance variable.", "source": "github-repos"}
{"code": "def optimizer_arguments(self, states, internals, actions, terminal, reward, next_states, next_internals):\n    arguments = dict(time=self.global_timestep, variables=self.get_variables(), arguments=dict(states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, next_states=next_states, next_internals=next_internals, update=tf.constant(value=True)), fn_reference=self.fn_reference, fn_loss=self.fn_loss)\n    if (self.global_model is not None):\n        arguments['global_variables'] = self.global_model.get_variables()\n    return arguments", "docstring": "Returns the optimizer arguments including the time, the list of variables to optimize,\nand various functions which the optimizer might require to perform an update step.\n\nArgs:\nstates (dict): Dict of state tensors.\ninternals (dict): Dict of prior internal state tensors.\nactions (dict): Dict of action tensors.\nterminal: 1D boolean is-terminal tensor.\nreward: 1D (float) rewards tensor.\nnext_states (dict): Dict of successor state tensors.\nnext_internals (dict): Dict of posterior internal state tensors.\n\nReturns:\nOptimizer arguments as dict to be used as **kwargs to the optimizer.", "source": "codesearchnet"}
{"code": "def create_sample(question: Union[str, List[str]], context: Union[str, List[str]]) -> Union[SquadExample, List[SquadExample]]:\n    if isinstance(question, list):\n        return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)]\n    else:\n        return SquadExample(None, question, context, None, None, None)", "docstring": "QuestionAnsweringPipeline leverages the [`SquadExample`] internally. This helper method encapsulate all the\nlogic for converting question(s) and context(s) to [`SquadExample`].\n\nWe currently support extractive question answering.\n\nArguments:\nquestion (`str` or `List[str]`): The question(s) asked.\ncontext (`str` or `List[str]`): The context(s) in which we will look for the answer.\n\nReturns:\nOne or a list of [`SquadExample`]: The corresponding [`SquadExample`] grouping question and context.", "source": "github-repos"}
{"code": "def info(name):\n    \n    try:\n        groupObj = _get_group_object(name)\n        gr_name = groupObj.Name\n        gr_mem = [_get_username(x) for x in groupObj.members()]\n    except pywintypes.com_error as exc:\n        msg = 'Failed to access group {0}. {1}'.format(\n            name, win32api.FormatMessage(exc.excepinfo[5]))\n        log.debug(msg)\n        return False\n\n    if not gr_name:\n        return False\n\n    return {'name': gr_name,\n            'passwd': None,\n            'gid': None,\n            'members': gr_mem}", "docstring": "Return information about a group\n\nArgs:\n\nname (str):\nThe name of the group for which to get information\n\nReturns:\ndict: A dictionary of information about the group\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' group.info foo", "source": "juraj-google-style"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    prefix_ones = [1] * len(self.prefix_tokens)\n    suffix_ones = [1] * len(self.suffix_tokens)\n    if token_ids_1 is None:\n        return prefix_ones + [0] * len(token_ids_0) + suffix_ones\n    return prefix_ones + [0] * len(token_ids_0) + [0] * len(token_ids_1) + suffix_ones", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def _pre_commit(self, transaction, *args, **kwargs):\n    transaction._clean_up()\n    transaction._begin(retry_id=self.retry_id)\n    self.current_id = transaction._id\n    if (self.retry_id is None):\n        self.retry_id = self.current_id\n    try:\n        return self.to_wrap(transaction, *args, **kwargs)\n    except:\n        transaction._rollback()\n        raise", "docstring": "Begin transaction and call the wrapped callable.\n\nIf the callable raises an exception, the transaction will be rolled\nback. If not, the transaction will be \"ready\" for ``Commit`` (i.e.\nit will have staged writes).\n\nArgs:\ntransaction (~.firestore_v1beta1.transaction.Transaction): A\ntransaction to execute the callable within.\nargs (Tuple[Any, ...]): The extra positional arguments to pass\nalong to the wrapped callable.\nkwargs (Dict[str, Any]): The extra keyword arguments to pass\nalong to the wrapped callable.\n\nReturns:\nAny: result of the wrapped callable.\n\nRaises:\nException: Any failure caused by ``to_wrap``.", "source": "codesearchnet"}
{"code": "def from_pickle(cls, path):\n    with open(os.path.expanduser(path), 'rb') as pickle:\n        return cPickle.Unpickler(pickle).load()", "docstring": "Load all objects from pickle file and return as dict.\n\nThe dict returned will have keys named the same as the\nJSSObject classes contained, and the values will be\nJSSObjectLists of all full objects of that class (for example,\nthe equivalent of my_jss.Computer().retrieve_all()).\n\nThis method can potentially take a very long time!\n\nPickling is Python's method for serializing/deserializing\nPython objects. This allows you to save a fully functional\nJSSObject to disk, and then load it later, without having to\nretrieve it from the JSS.\n\nArgs:\npath: String file path to the file you wish to load from.\nPath will have ~ expanded prior to opening.", "source": "codesearchnet"}
{"code": "def _create_variables_and_slots(self) -> Dict[str, Dict[str, tf_variables.Variable]]:\n    variables = {}\n    for stacked_table_name, tables in self._stacked_table_to_tables.items():\n        variables[stacked_table_name] = self._create_variables(tables, stacked_table_name=stacked_table_name)\n    return variables", "docstring": "Create variables for TPU embeddings.\n\nReturns:\nA dict of dicts. The outer dict is keyed by the table names and the inner\ndicts are keyed by 'parameters' and the slot variable names.", "source": "github-repos"}
{"code": "def bulk_write(self, metrics):\n    actions = []\n    index = self.get_index()\n    for metric in metrics:\n        actions.append({'index': {'_index': index, '_type': self.doc_type}})\n        actions.append(metric)\n    try:\n        self.client.bulk(actions)\n    except TransportError as exc:\n        logger.warning('bulk_write metrics %r failure %r', metrics, exc)", "docstring": "Write multiple metrics to elasticsearch in one request\n\nArgs:\nmetrics (list): data with mappings to send to elasticsearch", "source": "codesearchnet"}
{"code": "def register_add_grad(left_type, right_type, add_grad_function):\n    key = (left_type, right_type)\n    if (key in grad_adders):\n        raise ValueError(('Types %s already mapped to %s' % (key, grad_adders[key])))\n    grad_adders[key] = add_grad_function", "docstring": "Register a new gradient adder supporting the given types.\n\nGradient adders are used to add (in the sense of arithmetic addition)\nintermediate adjoint and tangent variables.\nTODO: Link to the document explaining the overall terminology and mechanics.\n\nArgs:\nleft_type: A Python type object. The data type of the left operand\nsupported by the adder.\nright_type: A Python type object. The data type of the right operand\nsupported by the adder.\nadd_grad_function: A binary function that takes two arguments, left and\nright, of the types left_type and right_type respectively, and returns\ntheir sum. For example, the gradient adder for Numpy objects is np.add.\n\nRaises:\nValueError: If the given type pair was already registered.", "source": "codesearchnet"}
{"code": "def _full_axis_reduce(self, axis, func, alternate_index=None):\n        \n        result = self.data.map_across_full_axis(axis, func)\n        if axis == 0:\n            columns = alternate_index if alternate_index is not None else self.columns\n            return self.__constructor__(result, index=[\"__reduced__\"], columns=columns)\n        else:\n            index = alternate_index if alternate_index is not None else self.index\n            return self.__constructor__(result, index=index, columns=[\"__reduced__\"])", "docstring": "Applies map that reduce Manager to series but require knowledge of full axis.\n\nArgs:\nfunc: Function to reduce the Manager by. This function takes in a Manager.\naxis: axis to apply the function to.\nalternate_index: If the resulting series should have an index\ndifferent from the current query_compiler's index or columns.\n\nReturn:\nPandas series containing the reduced data.", "source": "juraj-google-style"}
{"code": "def __init__(self, data_store, subject, lease_time=None):\n    \n    self.subject = utils.SmartStr(subject)\n    self.store = data_store\n    \n    self.expires = None\n    self.locked = False\n    if lease_time is None:\n      raise ValueError(\"Trying to lock without a lease time.\")\n    self._Acquire(lease_time)\n    self.lease_time = lease_time", "docstring": "Obtain the subject lock for lease_time seconds.\n\nThis is never called directly but produced from the\nDataStore.LockedSubject() factory.\n\nArgs:\ndata_store: A data_store handler.\nsubject: The name of a subject to lock.\nlease_time: The minimum length of time the lock will remain valid in\nseconds. Note this will be converted to usec for storage.\n\nRaises:\nValueError: No lease time was provided.", "source": "juraj-google-style"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(SignatureVerifyResponsePayload, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = utils.BytearrayStream(input_stream.read(self.length))\n    if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):\n        self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)\n        self._unique_identifier.read(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Parsed payload encoding is missing the unique identifier field.')\n    if self.is_tag_next(enums.Tags.VALIDITY_INDICATOR, local_stream):\n        self._validity_indicator = primitives.Enumeration(enums.ValidityIndicator, tag=enums.Tags.VALIDITY_INDICATOR)\n        self._validity_indicator.read(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Parsed payload encoding is missing the validity indicator field.')\n    if self.is_tag_next(enums.Tags.DATA, local_stream):\n        self._data = primitives.ByteString(tag=enums.Tags.DATA)\n        self._data.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.CORRELATION_VALUE, local_stream):\n        self._correlation_value = primitives.ByteString(tag=enums.Tags.CORRELATION_VALUE)\n        self._correlation_value.read(local_stream, kmip_version=kmip_version)\n    self.is_oversized(local_stream)", "docstring": "Read the data encoding the SignatureVerify response payload and decode\nit into its constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the data attribute is missing from the\nencoded payload.", "source": "codesearchnet"}
{"code": "def __init__(self, *, object_hook=None, parse_float=None, parse_int=None,\n                 parse_constant=None, strict=True, object_pairs_hook=None):\n        \n        try:\n            super().__init__(\n                object_hook=self.object_hook,\n                parse_float=parse_float,\n                parse_int=parse_int,\n                parse_constant=parse_constant,\n                strict=strict,\n                object_pairs_hook=object_pairs_hook\n            )\n        except Exception:\n            log.exception('Failed loading JSON data')", "docstring": "Initialize the class, overriding the object hook\n\nArgs:\nobject_hook:\nparse_float:\nparse_int:\nparse_constant:\nstrict:\nobject_pairs_hook:", "source": "juraj-google-style"}
{"code": "def _output_dir(self, ext, is_instance=False, interpolatable=False, autohinted=False, is_variable=False):\n    assert (not (is_variable and any([is_instance, interpolatable])))\n    if is_variable:\n        dir_prefix = 'variable_'\n    elif is_instance:\n        dir_prefix = 'instance_'\n    else:\n        dir_prefix = 'master_'\n    dir_suffix = ('_interpolatable' if interpolatable else '')\n    output_dir = ((dir_prefix + ext) + dir_suffix)\n    if autohinted:\n        output_dir = os.path.join('autohinted', output_dir)\n    return output_dir", "docstring": "Generate an output directory.\n\nArgs:\next: extension string.\nis_instance: The output is instance font or not.\ninterpolatable: The output is interpolatable or not.\nautohinted: The output is autohinted or not.\nis_variable: The output is variable font or not.\nReturn:\noutput directory string.", "source": "codesearchnet"}
{"code": "def add_send_last_message(self, connection, send_last_message):\n        \n        self._send_last_message[connection] = send_last_message\n        LOGGER.debug(\"Added send_last_message function \"\n                     \"for connection %s\", connection)", "docstring": "Adds a send_last_message function to the Dispatcher's\ndictionary of functions indexed by connection.\n\nArgs:\nconnection (str): A locally unique identifier\nprovided by the receiver of messages.\nsend_last_message (fn): The method that should be called\nby the dispatcher to respond to messages which\narrive via connection, when the connection should be closed\nafter the message has been sent.", "source": "juraj-google-style"}
{"code": "def print(self, format=TEXT, output=sys.stdout, **kwargs):\n        \n        if format is None:\n            format = TEXT\n        if format == TEXT:\n            print(self._to_text(**kwargs), file=output)\n        elif format == CSV:\n            print(self._to_csv(**kwargs), file=output)\n        elif format == JSON:\n            print(self._to_json(**kwargs), file=output)", "docstring": "Print the object in a file or on standard output by default.\n\nArgs:\nformat (str): output format (csv, json or text).\noutput (file):\ndescriptor to an opened file (default to standard output).\n**kwargs (): additional arguments.", "source": "juraj-google-style"}
{"code": "def stop_rot_gradient(self) -> Rigid:\n    return self.apply_rot_fn(lambda r: r.detach())", "docstring": "Detaches the underlying rotation object\n\nReturns:\nA transformation object with detached rotations", "source": "github-repos"}
{"code": "def _Initialize(self, http, url):\n    self.EnsureUninitialized()\n    if (self.http is None):\n        self.__http = (http or http_wrapper.GetHttp())\n    self.__url = url", "docstring": "Initialize this download by setting self.http and self.url.\n\nWe want the user to be able to override self.http by having set\nthe value in the constructor; in that case, we ignore the provided\nhttp.\n\nArgs:\nhttp: An httplib2.Http instance or None.\nurl: The url for this transfer.\n\nReturns:\nNone. Initializes self.", "source": "codesearchnet"}
{"code": "def Compile(self, filter_implementation):\n    \n    self.attribute = self.swap_source.get(self.attribute, self.attribute)\n    arguments = [self.attribute]\n    op_str = self.operator.lower()\n    operator = filter_implementation.OPS.get(op_str, None)\n\n    if not operator:\n      raise errors.ParseError('Unknown operator {0:s} provided.'.format(\n          self.operator))\n\n    \n    \n    \n    if self.attribute == 'timestamp':\n      args = []\n      for argument in self.args:\n        args.append(DateCompareObject(argument))\n      self.args = args\n\n    for argument in self.args:\n      if isinstance(argument, DateCompareObject):\n        if 'Less' in str(operator):\n          TimeRangeCache.SetUpperTimestamp(argument.data)\n        else:\n          TimeRangeCache.SetLowerTimestamp(argument.data)\n    arguments.extend(self.args)\n    expander = filter_implementation.FILTERS['ValueExpander']\n    ops = operator(arguments=arguments, value_expander=expander)\n    if not self.bool_value:\n      if hasattr(ops, 'FlipBool'):\n        ops.FlipBool()\n\n    return ops", "docstring": "Compiles the filter implementation.\n\nArgs:\nfilter_implementation: a filter object (instance of objectfilter.TODO).\n\nReturns:\nA filter operator (instance of TODO).\n\nRaises:\nParserError: if an unknown operator is provided.", "source": "juraj-google-style"}
{"code": "def _client_receive(self):\n    try:\n        response = self._client.readline()\n        self.log.debug('Snippet received: %s', response)\n        return response\n    except socket.error as e:\n        raise Error(self._ad, ('Encountered socket error reading RPC response \"%s\"' % e))", "docstring": "Receives the server's response of an Rpc message.\n\nReturns:\nRaw byte string of the response.\n\nRaises:\nError: a socket error occurred during the read.", "source": "codesearchnet"}
{"code": "def getServerSSLContext(self, hostname=None):\n        \n        sslctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n        if hostname is None:\n            hostname = socket.gethostname()\n        certfile = self.getHostCertPath(hostname)\n        if certfile is None:\n            raise s_exc.NoCertKey('Missing .crt for %s' % hostname)\n        keyfile = self.getHostKeyPath(hostname)\n        if keyfile is None:\n            raise s_exc.NoCertKey('Missing .key for %s' % hostname)\n\n        sslctx.load_cert_chain(certfile, keyfile)\n\n        return sslctx", "docstring": "Returns an ssl.SSLContext appropriate to listen on a socket\n\nArgs:\nhostname:  if None, the value from socket.gethostname is used to find the key in the servers directory.\nThis name should match the not-suffixed part of two files ending in .key and .crt in the hosts subdirectory", "source": "juraj-google-style"}
{"code": "def start(self, host, nornir):\n    self.host = host\n    self.nornir = nornir\n    try:\n        logger.debug('Host %r: running task %r', self.host.name, self.name)\n        r = self.task(self, **self.params)\n        if (not isinstance(r, Result)):\n            r = Result(host=host, result=r)\n    except NornirSubTaskError as e:\n        tb = traceback.format_exc()\n        logger.error('Host %r: task %r failed with traceback:\\n%s', self.host.name, self.name, tb)\n        r = Result(host, exception=e, result=str(e), failed=True)\n    except Exception as e:\n        tb = traceback.format_exc()\n        logger.error('Host %r: task %r failed with traceback:\\n%s', self.host.name, self.name, tb)\n        r = Result(host, exception=e, result=tb, failed=True)\n    r.name = self.name\n    r.severity_level = (logging.ERROR if r.failed else self.severity_level)\n    self.results.insert(0, r)\n    return self.results", "docstring": "Run the task for the given host.\n\nArguments:\nhost (:obj:`nornir.core.inventory.Host`): Host we are operating with. Populated right\nbefore calling the ``task``\nnornir(:obj:`nornir.core.Nornir`): Populated right before calling\nthe ``task``\n\nReturns:\nhost (:obj:`nornir.core.task.MultiResult`): Results of the task and its subtasks", "source": "codesearchnet"}
{"code": "def fork(self, command: Command) \\\n            -> Tuple['SelectedMailbox', Iterable[Response]]:\n        \n        frozen = _Frozen(self)\n        cls = type(self)\n        copy = cls(self._guid, self._readonly, self._permanent_flags,\n                   self._session_flags, self._selected_set, self._lookup,\n                   _mod_sequence=self._mod_sequence,\n                   _prev=frozen, _messages=self._messages)\n        if self._prev is not None:\n            with_uid: bool = getattr(command, 'uid', False)\n            untagged = self._compare(self._prev, frozen, with_uid)\n        else:\n            untagged = []\n        return copy, untagged", "docstring": "Compares the state of the current object to that of the last fork,\nreturning the untagged responses that reflect any changes. A new copy\nof the object is also returned, ready for the next command.\n\nArgs:\ncommand: The command that was finished.", "source": "juraj-google-style"}
{"code": "def pluralize(singular):\n    if (singular in UNCOUNTABLES):\n        return singular\n    for i in IRREGULAR:\n        if (i[0] == singular):\n            return i[1]\n    for i in PLURALIZE_PATTERNS:\n        if re.search(i[0], singular):\n            return re.sub(i[0], i[1], singular)", "docstring": "Convert singular word to its plural form.\n\nArgs:\nsingular: A word in its singular form.\n\nReturns:\nThe word in its plural form.", "source": "codesearchnet"}
{"code": "def add_prefix(self, prefix, flags, prf):\n    self._req(('prefix add %s %s %s' % (prefix, flags, prf)))\n    time.sleep(1)\n    self._req('netdataregister')", "docstring": "Add network prefix.\n\nArgs:\nprefix (str): network prefix.\nflags (str): network prefix flags, please refer thread documentation for details\nprf (str): network prf, please refer thread documentation for details", "source": "codesearchnet"}
{"code": "def resolve_symbols(self, database, link_resolver, page=None):\n        \n\n        page = page or self.root\n\n        if page.ast is None and not page.generated:\n            with io.open(page.source_file, 'r', encoding='utf-8') as _:\n                page.ast = cmark.hotdoc_to_ast(_.read(), self)\n\n        page.resolve_symbols(self, database, link_resolver)\n        self.__update_dep_map(page, page.symbols)\n\n        for pagename in page.subpages:\n            cpage = self.__all_pages[pagename]\n            self.resolve_symbols(database, link_resolver, page=cpage)", "docstring": "Will call resolve_symbols on all the stale subpages of the tree.\nArgs:\npage: hotdoc.core.tree.Page, the page to resolve symbols in,\nwill recurse on potential subpages.", "source": "juraj-google-style"}
{"code": "def remove_codeblock_syntax_sentinals(code_text):\n    r\n    flags = re.MULTILINE | re.DOTALL\n    code_text_ = code_text\n    code_text_ = re.sub(r'^ *\n    code_text_ = re.sub(r'^ *\n    code_text_ = re.sub(r'^ *\n    code_text_ = code_text_.rstrip()\n    return code_text_", "docstring": "r\"\"\"\nRemoves template comments and vim sentinals\n\nArgs:\ncode_text (str):\n\nReturns:\nstr: code_text_", "source": "juraj-google-style"}
{"code": "async def init(\n        self,\n        *,\n        advertise_addr: str = None,\n        listen_addr: str = \"0.0.0.0:2377\",\n        force_new_cluster: bool = False,\n        swarm_spec: Mapping = None\n    ) -> str:\n        \n\n        data = {\n            \"AdvertiseAddr\": advertise_addr,\n            \"ListenAddr\": listen_addr,\n            \"ForceNewCluster\": force_new_cluster,\n            \"Spec\": swarm_spec,\n        }\n\n        response = await self.docker._query_json(\"swarm/init\", method=\"POST\", data=data)\n\n        return response", "docstring": "Initialize a new swarm.\n\nArgs:\nListenAddr: listen address used for inter-manager communication\nAdvertiseAddr: address advertised to other nodes.\nForceNewCluster: Force creation of a new swarm.\nSwarmSpec: User modifiable swarm configuration.\n\nReturns:\nid of the swarm node", "source": "juraj-google-style"}
{"code": "def path_set_md5(url):\n    (scheme, netloc, path, query_string, fragment) = urlsplit(url)\n    path += '.md5'\n    return urlunsplit((scheme, netloc, path, query_string, fragment))", "docstring": "Given a file URL, return a md5 query of the file\n\nArgs:\nurl: a given URL\nReturns:\nURL of the md5 file", "source": "codesearchnet"}
{"code": "def near_reduce(self, coords_set, threshold=1e-4):\n        \n        unique_coords = []\n        coords_set = [self.slab.lattice.get_fractional_coords(coords)\n                      for coords in coords_set]\n        for coord in coords_set:\n            if not in_coord_list_pbc(unique_coords, coord, threshold):\n                unique_coords += [coord]\n        return [self.slab.lattice.get_cartesian_coords(coords)\n                for coords in unique_coords]", "docstring": "Prunes coordinate set for coordinates that are within\nthreshold\n\nArgs:\ncoords_set (Nx3 array-like): list or array of coordinates\nthreshold (float): threshold value for distance", "source": "juraj-google-style"}
{"code": "async def addNodeTag(self, iden, tag, valu=(None, None)):\n        \n        buid = s_common.uhex(iden)\n\n        parts = tag.split('.')\n        self._reqUserAllowed('tag:add', *parts)\n\n        async with await self.cell.snap(user=self.user) as snap:\n            with s_provenance.claim('coreapi', meth='tag:add', user=snap.user.iden):\n\n                node = await snap.getNodeByBuid(buid)\n                if node is None:\n                    raise s_exc.NoSuchIden(iden=iden)\n\n                await node.addTag(tag, valu=valu)\n                return node.pack()", "docstring": "Add a tag to a node specified by iden.\n\nArgs:\niden (str): A hex encoded node BUID.\ntag (str):  A tag string.\nvalu (tuple):  A time interval tuple or (None, None).", "source": "juraj-google-style"}
{"code": "def validate_element(self, value):\n    if isinstance(value, bytes):\n        try:\n            six.text_type(value, 'UTF-8')\n        except UnicodeDecodeError as err:\n            try:\n                _ = self.name\n            except AttributeError:\n                validation_error = ValidationError(('Field encountered non-UTF-8 string %r: %s' % (value, err)))\n            else:\n                validation_error = ValidationError(('Field %s encountered non-UTF-8 string %r: %s' % (self.name, value, err)))\n                validation_error.field_name = self.name\n            raise validation_error\n    else:\n        return super(StringField, self).validate_element(value)\n    return value", "docstring": "Validate StringField allowing for str and unicode.\n\nRaises:\nValidationError if a str value is not UTF-8.", "source": "codesearchnet"}
{"code": "def set_control_scheme(self, agent_name, control_scheme):\n    if (agent_name not in self.agents):\n        print(('No such agent %s' % agent_name))\n    else:\n        self.agents[agent_name].set_control_scheme(control_scheme)", "docstring": "Set the control scheme for a specific agent.\n\nArgs:\nagent_name (str): The name of the agent to set the control scheme for.\ncontrol_scheme (int): A control scheme value (see :obj:`holodeck.agents.ControlSchemes`)", "source": "codesearchnet"}
{"code": "def dvds_top_rentals(self, **kwargs):\n        \n        path = self._get_path('dvds_top_rentals')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Gets the current opening movies from the API.\n\nArgs:\nlimit (optional): limits the number of movies returned, default=10\ncountry (optional): localized data for selected country, default=\"us\"\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def learn(self, grad_arr):\n        \n        if grad_arr.ndim > 3:\n            grad_arr = grad_arr.reshape((\n                grad_arr.shape[0],\n                grad_arr.shape[1],\n                -1\n            ))\n            grad_arr = grad_arr[:, -1]\n        elif grad_arr.ndim == 3:\n            grad_arr = grad_arr[:, -1]\n\n        delta_arr, _, grads_list = self.__lstm_model.hidden_back_propagate(grad_arr)\n        grads_list.insert(0, None)\n        grads_list.insert(0, None)\n\n        self.__lstm_model.optimize(\n            grads_list,\n            self.__learning_rate,\n            1\n        )\n\n        return delta_arr", "docstring": "Update this Discriminator by ascending its stochastic gradient.\n\nArgs:\ngrad_arr:   `np.ndarray` of gradients.\n\nReturns:\n`np.ndarray` of delta or gradients.", "source": "juraj-google-style"}
{"code": "async def send_command(self, command):\n        \n        command = \"{}\\r\\n\".format(command).encode(\"ascii\", errors=\"backslashreplace\")\n\n        self.write(command)\n\n        \n        await self.drain()", "docstring": "Sends the given command to the server.\n\nArgs:\ncommand (str): Command to send to the server.\n\nRaises:\nConnectionResetError: If the connection with the server is lost.\n(Shouldn't it raise BrokenPipeError too ?)", "source": "juraj-google-style"}
{"code": "def random(cls, num_qubits, seed=None):\n        \n        if seed is not None:\n            np.random.seed(seed)\n        z = np.random.randint(2, size=num_qubits).astype(np.bool)\n        x = np.random.randint(2, size=num_qubits).astype(np.bool)\n        return cls(z, x)", "docstring": "Return a random Pauli on number of qubits.\n\nArgs:\nnum_qubits (int): the number of qubits\nseed (int): Optional. To set a random seed.\nReturns:\nPauli: the random pauli", "source": "juraj-google-style"}
{"code": "def ReadByte(self, do_ord=True):\n    try:\n        if do_ord:\n            return ord(self.stream.read(1))\n        return self.stream.read(1)\n    except Exception as e:\n        logger.error('ord expected character but got none')\n    return 0", "docstring": "Read a single byte.\n\nArgs:\ndo_ord (bool): (default True) convert the byte to an ordinal first.\n\nReturns:\nbytes: a single byte if successful. 0 (int) if an exception occurred.", "source": "codesearchnet"}
{"code": "def quaternion_from_euler(angles, order='yzy'):\n    \n    angles = np.asarray(angles, dtype=float)\n    quat = quaternion_from_axis_rotation(angles[0], order[0])\\\n        * (quaternion_from_axis_rotation(angles[1], order[1])\n           * quaternion_from_axis_rotation(angles[2], order[2]))\n    quat.normalize(inplace=True)\n    return quat", "docstring": "Generate a quaternion from a set of Euler angles.\n\nArgs:\nangles (array_like): Array of Euler angles.\norder (str): Order of Euler rotations.  'yzy' is default.\n\nReturns:\nQuaternion: Quaternion representation of Euler rotation.", "source": "juraj-google-style"}
{"code": "def merge_layouts(layouts):\n    layout = layouts[0].clone()\n    for l in layouts[1:]:\n        layout.files.update(l.files)\n        layout.domains.update(l.domains)\n        for (k, v) in l.entities.items():\n            if (k not in layout.entities):\n                layout.entities[k] = v\n            else:\n                layout.entities[k].files.update(v.files)\n    return layout", "docstring": "Utility function for merging multiple layouts.\n\nArgs:\nlayouts (list): A list of BIDSLayout instances to merge.\nReturns:\nA BIDSLayout containing merged files and entities.\nNotes:\nLayouts will be merged in the order of the elements in the list. I.e.,\nthe first Layout will be updated with all values in the 2nd Layout,\nthen the result will be updated with values from the 3rd Layout, etc.\nThis means that order matters: in the event of entity or filename\nconflicts, later layouts will take precedence.", "source": "codesearchnet"}
{"code": "def aoi(self, **kwargs):\n    g = self._parse_geoms(**kwargs)\n    if (g is None):\n        return self\n    else:\n        return self[g]", "docstring": "Subsets the Image by the given bounds\n\nArgs:\nbbox (list): optional. A bounding box array [minx, miny, maxx, maxy]\nwkt (str): optional. A WKT geometry string\ngeojson (str): optional. A GeoJSON geometry dictionary\n\nReturns:\nimage: an image instance of the same type", "source": "codesearchnet"}
{"code": "def SetCodepage(self, codepage):\n    try:\n        codecs.getencoder(codepage)\n        self._codepage = codepage\n    except LookupError:\n        raise ValueError('Unsupported codepage: {0:s}'.format(codepage))", "docstring": "Sets the codepage.\n\nArgs:\ncodepage (str): codepage.\n\nRaises:\nValueError: if the codepage is not supported.", "source": "codesearchnet"}
{"code": "def _partitioner(shape, dtype):\n    if axis >= len(shape):\n        raise ValueError(f'Cannot partition variable along axis {axis} when shape is only {shape}')\n    dtype = dtypes.as_dtype(dtype)\n    if dtype.base_dtype == dtypes.string:\n        bytes_per_element = bytes_per_string_element\n    else:\n        bytes_per_element = dtype.size\n    total_size_bytes = shape.num_elements() * bytes_per_element\n    partitions = total_size_bytes / min_slice_size\n    partitions_list = [1] * len(shape)\n    partitions_list[axis] = max(1, min(shape.dims[axis].value, max_partitions, int(math.ceil(partitions))))\n    return partitions_list", "docstring": "Partitioner that partitions list for a variable of given shape and type.\n\nEx: Consider partitioning a variable of type float32 with\nshape=[1024, 1024].\nIf `max_partitions` >= 16, this function would return\n[(1024 * 1024 * 4) / (256 * 1024), 1] = [16, 1].\nIf `max_partitions` < 16, this function would return\n[`max_partitions`, 1].\n\nArgs:\nshape: Shape of the variable.\ndtype: Type of the variable.\n\nReturns:\nList of partitions for each axis (currently only one axis can be\npartitioned).\n\nRaises:\nValueError: If axis to partition along does not exist for the variable.", "source": "github-repos"}
{"code": "def handle_import_error(caught_exc, name):\n    for template in TEMPLATES:\n        expected_msg = template.format(name)\n        if (caught_exc.args == (expected_msg,)):\n            return\n    raise caught_exc", "docstring": "Allow or re-raise an import error.\n\nThis is to distinguish between expected and unexpected import errors.\nIf the module is not found, it simply means the Cython / Fortran speedups\nwere not built with the package. If the error message is different, e.g.\n``... undefined symbol: __curve_intersection_MOD_all_intersections``, then\nthe import error **should** be raised.\n\nArgs:\ncaught_exc (ImportError): An exception caught when trying to import\na Cython module.\nname (str): The name of the module. For example, for the module\n``bezier._curve_speedup``, the name is ``\"_curve_speedup\"``.\n\nRaises:\nImportError: If the error message is different than the basic\n\"missing module\" error message.", "source": "codesearchnet"}
{"code": "def code_memory_read(self, addr, num_bytes):\n    buf_size = num_bytes\n    buf = (ctypes.c_uint8 * buf_size)()\n    res = self._dll.JLINKARM_ReadCodeMem(addr, buf_size, buf)\n    if (res < 0):\n        raise errors.JLinkException(res)\n    return list(buf)[:res]", "docstring": "Reads bytes from code memory.\n\nNote:\nThis is similar to calling ``memory_read`` or ``memory_read8``,\nexcept that this uses a cache and reads ahead.  This should be used\nin instances where you want to read a small amount of bytes at a\ntime, and expect to always read ahead.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): starting address from which to read\nnum_bytes (int): number of bytes to read\n\nReturns:\nA list of bytes read from the target.\n\nRaises:\nJLinkException: if memory could not be read.", "source": "codesearchnet"}
{"code": "def _update_replica(self, update_fn, value, **kwargs):\n    if self._policy:\n        return self._policy._update_replica(self, update_fn, value, **kwargs)\n    raise NotImplementedError(f'DistributedVariable._update_replica requires a valid VariablePolicy. Please set the policy via the `var_policy` argument in the constructor, or override this method in sub-classes which support cross-replica accesses. Type name is {type(self)}')", "docstring": "Applies updates in one replica.\n\nArgs:\nupdate_fn: A callable to update the variable. It should has the same\nsignature as `Variable.assign()`.\nvalue: value to be passed to `update_fn`.\n**kwargs: remaining arguments to `update_fn`.\n\nReturns:\nUpdated variable or `tf.Operation`.", "source": "github-repos"}
{"code": "def overlapping(self, variant_obj):\n    category = ('snv' if (variant_obj['category'] == 'sv') else 'sv')\n    query = {'$and': [{'case_id': variant_obj['case_id']}, {'category': category}, {'hgnc_ids': {'$in': variant_obj['hgnc_ids']}}]}\n    sort_key = [('rank_score', pymongo.DESCENDING)]\n    variants = self.variant_collection.find(query).sort(sort_key).limit(30)\n    return variants", "docstring": "Return overlapping variants.\n\nLook at the genes that a variant overlaps to.\nThen return all variants that overlap these genes.\n\nIf variant_obj is sv it will return the overlapping snvs and oposite\nThere is a problem when SVs are huge since there are to many overlapping variants.\n\nArgs:\nvariant_obj(dict)\n\nReturns:\nvariants(iterable(dict))", "source": "codesearchnet"}
{"code": "def get_job(self, job_resource_name: str) -> Dict:\n    return self.service.projects().programs().jobs().get(name=job_resource_name).execute()", "docstring": "Returns metadata about a previously created job.\n\nSee get_job_result if you want the results of the job and not just\nmetadata about the job.\n\nParams:\njob_resource_name: A string of the form\n`projects/project_id/programs/program_id/jobs/job_id`.\n\nReturns:\nA dictionary containing the metadata.", "source": "codesearchnet"}
{"code": "def filter_out_spontaneous_genes(genes, custom_spont_id=None):\n    \n    new_genes = DictList()\n    for gene in genes:\n        if not is_spontaneous(gene, custom_id=custom_spont_id):\n            new_genes.append(gene)\n\n    return new_genes", "docstring": "Return the DictList of genes that are not spontaneous in a model.\n\nArgs:\ngenes (DictList): Genes DictList\ncustom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``\n\nReturns:\nDictList: genes excluding ones that are spontaneous", "source": "juraj-google-style"}
{"code": "def atan(cls, x: 'TensorFluent') -> 'TensorFluent':\n        \n        return cls._unary_op(x, tf.atan2, tf.float32)", "docstring": "Returns a TensorFluent for the arctan function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the arctan function.", "source": "juraj-google-style"}
{"code": "def _GetArchiveTypes(self, mediator, path_spec):\n    \n    try:\n      type_indicators = analyzer.Analyzer.GetArchiveTypeIndicators(\n          path_spec, resolver_context=mediator.resolver_context)\n    except IOError as exception:\n      type_indicators = []\n\n      warning_message = (\n          'analyzer failed to determine archive type indicators '\n          'with error: {0!s}').format(exception)\n      mediator.ProduceExtractionWarning(warning_message, path_spec=path_spec)\n\n    return type_indicators", "docstring": "Determines if a data stream contains an archive such as: TAR or ZIP.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\npath_spec (dfvfs.PathSpec): path specification of the data stream.\n\nReturns:\nlist[str]: dfVFS archive type indicators found in the data stream.", "source": "juraj-google-style"}
{"code": "def generate_defect_structure(self, supercell=(1, 1, 1)):\n        \n        defect_structure = self.bulk_structure.copy()\n        defect_structure.make_supercell(supercell)\n\n        \n        \n        defect_properties = self.site.properties.copy()\n        if ('velocities' in self.bulk_structure.site_properties) and \\\n            'velocities' not in defect_properties:\n            if all( vel == self.bulk_structure.site_properties['velocities'][0]\n                    for vel in self.bulk_structure.site_properties['velocities']):\n                defect_properties['velocities'] = self.bulk_structure.site_properties['velocities'][0]\n            else:\n                raise ValueError(\"No velocity property specified for defect site and \"\n                                 \"bulk_structure velocities are not homogeneous. Please specify this \"\n                                 \"property within the initialized defect_site object.\")\n\n        \n        site_properties_for_fake_struct = {prop: [val] for prop,val in defect_properties.items()}\n        struct_for_defect_site = Structure( self.bulk_structure.copy().lattice,\n                                             [self.site.specie],\n                                             [self.site.frac_coords],\n                                             to_unit_cell=True,\n                                             site_properties = site_properties_for_fake_struct)\n        struct_for_defect_site.make_supercell(supercell)\n        defect_site = struct_for_defect_site[0]\n\n        defect_structure.append(self.site.specie.symbol, defect_site.coords, coords_are_cartesian=True,\n                                properties = defect_site.properties)\n        defect_structure.set_charge(self.charge)\n        return defect_structure", "docstring": "Returns Defective Interstitial structure, decorated with charge\nArgs:\nsupercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix", "source": "juraj-google-style"}
{"code": "def matmul_and_same_scale(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:\n    out = math_ops.matmul(input_tensor, self.filters, name='sample/matmul')\n    if self.same_scale_op == 'concatenate':\n        ones = array_ops.ones_like(out)\n        out = array_ops.concat([out, ones], 0)\n    elif self.same_scale_op == 'gather':\n        out = array_ops.gather(out, indices=[0], axis=0)\n    elif self.same_scale_op == 'max_pool':\n        out = nn_ops.max_pool(out, ksize=3, strides=1, padding='SAME')\n    elif self.same_scale_op == 'pad':\n        paddings = array_ops.ones((array_ops.rank(out), 2), dtype=dtypes.int32)\n        out = array_ops.pad(out, paddings, 'CONSTANT')\n    elif self.same_scale_op == 'reshape':\n        out = array_ops.reshape(out, [-1])\n    elif self.same_scale_op == 'select':\n        rng = np.random.default_rng(seed=1234)\n        condition = ops.convert_to_tensor(rng.uniform(low=0.0, high=1.0, size=out.shape) < 0.5)\n        ones = array_ops.ones_like(out)\n        out = math_ops.select(condition, out, ones)\n    elif self.same_scale_op == 'slice':\n        begin = array_ops.zeros(array_ops.rank(out), dtype=dtypes.int32)\n        size = array_ops.ones(array_ops.rank(out), dtype=dtypes.int32)\n        out = array_ops.slice(out, begin, size)\n    elif self.same_scale_op == 'transpose':\n        out = array_ops.transpose(out)\n    else:\n        raise NotImplementedError('{} is not implemented for integration test.'.format(self.same_scale_op))\n    return {'output': out}", "docstring": "Performs a matrix multiplication.\n\nArgs:\ninput_tensor: Input tensor to matmul with the filter.\n\nReturns:\nA map of: output key -> output result.", "source": "github-repos"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Optional[Union[int, List[int]]]=None, vision_feature_select_strategy: Optional[str]=None, **kwargs):\n    vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer\n    vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy\n    if vision_feature_select_strategy not in ['default', 'full']:\n        raise ValueError(f'Unexpected select feature strategy: {self.config.vision_feature_select_strategy}')\n    kwargs = {k: v for k, v in kwargs.items() if v is not None}\n    image_outputs = self.vision_tower(pixel_values, output_hidden_states=True, **kwargs)\n    if isinstance(vision_feature_layer, int):\n        selected_image_feature = image_outputs.hidden_states[vision_feature_layer]\n        if vision_feature_select_strategy == 'default':\n            selected_image_feature = selected_image_feature[:, 1:]\n    else:\n        hs_pool = [image_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer]\n        if vision_feature_select_strategy == 'default':\n            hs_pool = [hs[:, 1:] for hs in hs_pool]\n        selected_image_feature = torch.cat(hs_pool, dim=-1)\n    image_features = self.multi_modal_projector(selected_image_feature)\n    return image_features", "docstring": "Obtains image last hidden states from the vision tower and apply multimodal projection.\n\nArgs:\npixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):\nThe tensors corresponding to the input images.\nvision_feature_layer (`Union[int, List[int]]`, *optional*):\nThe index of the layer to select the vision feature. If multiple indices are provided,\nthe vision feature of the corresponding indices will be concatenated to form the\nvision features.\nvision_feature_select_strategy (`str`, *optional*):\nThe feature selection strategy used to select the vision feature from the vision backbone.\nCan be one of `\"default\"` or `\"full\"`\nReturns:\nimage_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).", "source": "github-repos"}
{"code": "def get_slices(lines, clean_lines):\n    indices = np.zeros(len(lines))\n    for i in range(len(lines) - 1):\n        j = i + 1\n        while not clean_lines[j] and j < len(lines) - 1:\n            j += 1\n        if len(clean_lines[i]) < 200 and len(clean_lines[i]) > 3 and (len(clean_lines[j]) < 200) and (len(clean_lines[j]) > 3) and (not clean_lines[i].startswith('[MISSING_PAGE')) and (clean_lines[i] == clean_lines[j] or ratio(clean_lines[i], clean_lines[j]) > 0.9):\n            indices[i:j] = 1\n    ids = np.where(indices)[0]\n    slices = []\n    if len(ids) == 0:\n        return slices\n    j0 = 0\n    for j, x in enumerate(np.diff(ids) > 3):\n        if x:\n            slices.append((ids[j0], ids[j] + 2))\n            j0 = j + 1\n    slices.append((ids[j0], ids[-1] + 2))\n    return [sli for sli in slices if sli[1] - sli[0] > 15]", "docstring": "Get slices of text based on specific criteria within the lines.\n\nThis function identifies and returns slices of text from the input lines based on certain conditions.\n\nThese conditions were chosen by the Nougat authors:\n- The slice is less than 200 characters long.\n- The slice is more than 3 characters long.\n- The slice does not start with \"[MISSING_PAGE\".\n- The slice is either the same as the next slice or the ratio of the two in terms of Levensthein distance is\ngreater than 0.9.\n\nArgs:\nlines (`List[str]`):\nThe list of lines containing the text.\nclean_lines (`List[str]`):\nA cleaned version of the text (without numbers).\n\nReturns:\n`List[tuple]`: A list of tuples representing the start and end indices of text slices.", "source": "github-repos"}
{"code": "def extract_attribute_array(self, data_array, var_name):\n        \n        if var_name not in self.attributes.keys():\n            self.attributes[var_name] = []\n        for t in range(self.times.size):\n            self.attributes[var_name].append(data_array[self.i[t], self.j[t]])", "docstring": "Extracts data from a 2D array that has the same dimensions as the grid used to identify the object.\n\nArgs:\ndata_array: 2D numpy array", "source": "juraj-google-style"}
{"code": "def update_phase(self, environment, data, prediction, user, item, correct, time, answer_id, **kwargs):\n    pass", "docstring": "After the prediction update the environment and persist some\ninformation for the predictive model.\n\nArgs:\nenvironment (proso.models.environment.Environment):\nenvironment where all the important data are persist\ndata (object):\ndata from the prepare phase\nuser (int):\nidentifier of the user answering the question\nitem (int):\nidentifier of the question item\ncorrect (bool):\ncorretness of the answer", "source": "codesearchnet"}
{"code": "def __init__(\n      self, resolver_context, file_system, path_spec, is_root=False,\n      is_virtual=False, vslvm_logical_volume=None):\n    \n    if not is_virtual and vslvm_logical_volume is None:\n      vslvm_logical_volume = file_system.GetLVMLogicalVolumeByPathSpec(\n          path_spec)\n    if not is_virtual and vslvm_logical_volume is None:\n      raise errors.BackEndError(\n          'Missing vslvm logical volume in non-virtual file entry.')\n\n    super(LVMFileEntry, self).__init__(\n        resolver_context, file_system, path_spec, is_root=is_root,\n        is_virtual=is_virtual)\n    self._name = None\n    self._vslvm_logical_volume = vslvm_logical_volume\n\n    if self._is_virtual:\n      self.entry_type = definitions.FILE_ENTRY_TYPE_DIRECTORY\n    else:\n      self.entry_type = definitions.FILE_ENTRY_TYPE_FILE", "docstring": "Initializes a file entry.\n\nArgs:\nresolver_context (Context): resolver context.\nfile_system (FileSystem): file system.\npath_spec (PathSpec): path specification.\nis_root (Optional[bool]): True if the file entry is the root file entry\nof the corresponding file system.\nis_virtual (Optional[bool]): True if the file entry is a virtual file\nvslvm_logical_volume (Optional[pyvslvm.logical_volume]): a LVM logical\nvolume.", "source": "juraj-google-style"}
{"code": "def _extract_storage_api_response_error(message):\n  \n  try:\n    if len(message) == 3:\n      \n      data = json.loads(message[2])\n      return data['error']['errors'][0]['message']\n  except Exception:\n    pass\n  return message", "docstring": "A helper function to extract user-friendly error messages from service exceptions.\n\nArgs:\nmessage: An error message from an exception. If this is from our HTTP client code, it\nwill actually be a tuple.\n\nReturns:\nA modified version of the message that is less cryptic.", "source": "juraj-google-style"}
{"code": "def get_logfile_name(tags):\n  \n  if not os.path.exists(sd.LOG_DIR):\n    os.mkdir(sd.LOG_DIR)\n  filename = \"log\"\n  for tag in tags:\n    filename += \"_{}\".format(tag)\n  filename += \".txt\"\n  filename = os.path.join(sd.LOG_DIR,filename)\n  return filename", "docstring": "Formulates a log file name that incorporates the provided tags.\n\nThe log file will be located in ``scgpm_seqresults_dnanexus.LOG_DIR``.\n\nArgs:\ntags: `list` of tags to append to the log file name. Each tag will be '_' delimited. Each tag\nwill be added in the same order as provided.", "source": "juraj-google-style"}
{"code": "def _list_to_complex_array(complex_list):\n    \n    arr = np.asarray(complex_list, dtype=np.complex_)\n    if not arr.shape[-1] == 2:\n        raise QiskitError('Inner most nested list is not of length 2.')\n\n    return arr[..., 0] + 1j*arr[..., 1]", "docstring": "Convert nested list of shape (..., 2) to complex numpy array with shape (...)\n\nArgs:\ncomplex_list (list): List to convert.\n\nReturns:\nnp.ndarray: Complex numpy aray\n\nRaises:\nQiskitError: If inner most array of input nested list is not of length 2.", "source": "juraj-google-style"}
{"code": "def _TabbedContinuationAlignPadding(spaces, align_style, tab_width):\n    if align_style in ('FIXED', 'VALIGN-RIGHT'):\n        if spaces > 0:\n            return '\\t' * int((spaces + tab_width - 1) / tab_width)\n        return ''\n    return ' ' * spaces", "docstring": "Build padding string for continuation alignment in tabbed indentation.\n\nArguments:\nspaces: (int) The number of spaces to place before the token for alignment.\nalign_style: (str) The alignment style for continuation lines.\ntab_width: (int) Number of columns of each tab character.\n\nReturns:\nA padding string for alignment with style specified by align_style option.", "source": "github-repos"}
{"code": "def ReadVFS(pathspec, offset, length, progress_callback=None):\n  \n  fd = VFSOpen(pathspec, progress_callback=progress_callback)\n  fd.Seek(offset)\n  return fd.Read(length)", "docstring": "Read from the VFS and return the contents.\n\nArgs:\npathspec: path to read from\noffset: number of bytes to skip\nlength: number of bytes to read\nprogress_callback: A callback to indicate that the open call is still\nworking but needs more time.\n\nReturns:\nVFS file contents", "source": "juraj-google-style"}
{"code": "def clear_operations_touching(self,\n                                  qubits: Iterable[ops.Qid],\n                                  moment_indices: Iterable[int]):\n        \n        qubits = frozenset(qubits)\n        for k in moment_indices:\n            if 0 <= k < len(self._moments):\n                self._moments[k] = self._moments[k].without_operations_touching(\n                    qubits)", "docstring": "Clears operations that are touching given qubits at given moments.\n\nArgs:\nqubits: The qubits to check for operations on.\nmoment_indices: The indices of moments to check for operations\nwithin.", "source": "juraj-google-style"}
{"code": "def read_stream(self, start_offset=0, byte_count=None):\n    try:\n        return self._api.object_download(self._bucket, self._key, start_offset=start_offset, byte_count=byte_count)\n    except Exception as e:\n        raise e", "docstring": "Reads the content of this object as text.\n\nArgs:\nstart_offset: the start offset of bytes to read.\nbyte_count: the number of bytes to read. If None, it reads to the end.\nReturns:\nThe text content within the object.\nRaises:\nException if there was an error requesting the object's content.", "source": "codesearchnet"}
{"code": "def get_local_aws_session():\n    if (not all((app_config.aws_api.access_key, app_config.aws_api.secret_key))):\n        return boto3.session.Session()\n    else:\n        session_args = [app_config.aws_api.access_key, app_config.aws_api.secret_key]\n        if app_config.aws_api.session_token:\n            session_args.append(app_config.aws_api.session_token)\n        return boto3.session.Session(*session_args)", "docstring": "Returns a session for the local instance, not for a remote account\n\nReturns:\n:obj:`boto3:boto3.session.Session`", "source": "codesearchnet"}
{"code": "def maybe_download_image_dataset(image_ids, target_dir):\n  \n\n  tf.gfile.MakeDirs(target_dir)\n\n  num_images = len(image_ids)\n\n  for i, image_id in enumerate(image_ids):\n\n    destination = os.path.join(target_dir, \"%s.jpg\" % i)\n    tmp_destination = \"%s.temp\" % destination\n\n    source_url = (\"http:\n                  \"section_image_download/%s\" % image_id)\n\n    if tf.gfile.Exists(destination):\n      tf.logging.info(\"Image with ID already present, \"\n                      \"skipping download (%s of %s).\" % (\n                          i+1, num_images\n                      ))\n      continue\n\n    tf.logging.info(\"Downloading image with id %s (%s of %s)\" % (\n        image_id, i+1, num_images\n    ))\n\n    response = requests.get(source_url, stream=True)\n\n    response.raise_for_status()\n\n    with tf.gfile.Open(tmp_destination, \"w\") as f:\n      for block in response.iter_content(1024):\n        f.write(block)\n\n    tf.gfile.Rename(tmp_destination, destination)", "docstring": "Download a set of images from api.brain-map.org to `target_dir`.\n\nArgs:\nimage_ids: list, a list of image ids.\ntarget_dir: str, a directory to which to download the images.", "source": "juraj-google-style"}
{"code": "def ExpectedEnginesToBuild(self, run_params):\n    if run_params.dynamic_shape:\n        return ['TRTEngineOp_000']\n    else:\n        return ['TRTEngineOp_000', 'TRTEngineOp_001']", "docstring": "Check that the expected engine is built.\n\nArgs:\nrun_params: the run parameters.\n\nReturns:\nthe expected engines to build.\n\nThe squeeze op is not converted by TensorRT in implicit batch mode.\nBecause of this we have two TRTEngineOp in the graphs: one for the\nsubgraph before 'squeeze(q,0)', and another one for the rest of the ops\nafter the 'squeeze(q,0)'.\n\nIn explicit batch mode the whole graph is converted using a single engine.", "source": "github-repos"}
{"code": "def as_data_frame(self, **kwargs):\n    try:\n        import pandas as pd\n    except ImportError:\n        raise ImportError(\"What are you doing trying to export a Layout as a pandas DataFrame when you don't have pandas installed? Eh? Eh?\")\n    if kwargs:\n        files = self.get(return_type='obj', **kwargs)\n    else:\n        files = self.files.values()\n    data = pd.DataFrame.from_records([f.entities for f in files])\n    data.insert(0, 'path', [f.path for f in files])\n    return data", "docstring": "Return information for all Files tracked in the Layout as a pandas\nDataFrame.\n\nArgs:\nkwargs: Optional keyword arguments passed on to get(). This allows\none to easily select only a subset of files for export.\nReturns:\nA pandas DataFrame, where each row is a file, and each column is\na tracked entity. NaNs are injected whenever a file has no\nvalue for a given attribute.", "source": "codesearchnet"}
{"code": "def _container_handler(ion_type, ctx):\n    \n    transition = None\n    first = True\n    at_top = ctx.depth == 0\n    while True:\n        data_event, self = (yield transition)\n        if data_event is not None and data_event.type is ReadEventType.SKIP:\n            yield ctx.read_data_transition(ctx.remaining, self, skip=True)\n\n        if ctx.queue.position == ctx.limit:\n            \n            \n            yield Transition(\n                IonEvent(IonEventType.CONTAINER_END, ion_type, depth=ctx.depth-1),\n                ctx.whence\n            )\n\n        if ion_type is IonType.STRUCT:\n            \n            self_handler = _create_delegate_handler(self)\n            (field_sid, _), _ = yield ctx.immediate_transition(\n                _var_uint_field_handler(self_handler, ctx)\n            )\n            field_name = SymbolToken(None, field_sid)\n        else:\n            field_name = None\n\n        expects_ivm = first and at_top\n        transition = ctx.immediate_transition(\n            _start_type_handler(field_name, self, ctx, expects_ivm, at_top=at_top)\n        )\n        first = False", "docstring": "Handler for the body of a container (or the top-level stream).\n\nArgs:\nion_type (Optional[IonType]): The type of the container or ``None`` for the top-level.\nctx (_HandlerContext): The context for the container.", "source": "juraj-google-style"}
{"code": "def _ProcessFileEntryDataStream(self, mediator, file_entry, data_stream):\n    \n    display_name = mediator.GetDisplayName()\n    data_stream_name = getattr(data_stream, 'name', '') or ''\n    logger.debug((\n        '[ProcessFileEntryDataStream] processing data stream: \"{0:s}\" of '\n        'file entry: {1:s}').format(data_stream_name, display_name))\n\n    mediator.ClearEventAttributes()\n\n    if data_stream and self._analyzers:\n      \n      \n      self._AnalyzeDataStream(mediator, file_entry, data_stream.name)\n\n    self._ExtractMetadataFromFileEntry(mediator, file_entry, data_stream)\n\n    \n    \n    if not data_stream:\n      return\n\n    \n    skip_content_extraction = self._CanSkipContentExtraction(file_entry)\n    if skip_content_extraction:\n      display_name = mediator.GetDisplayName()\n      logger.debug(\n          'Skipping content extraction of: {0:s}'.format(display_name))\n      self.processing_status = definitions.STATUS_INDICATOR_IDLE\n      return\n\n    path_spec = copy.deepcopy(file_entry.path_spec)\n    if data_stream and not data_stream.IsDefault():\n      path_spec.data_stream = data_stream.name\n\n    archive_types = []\n    compressed_stream_types = []\n\n    if self._process_compressed_streams:\n      compressed_stream_types = self._GetCompressedStreamTypes(\n          mediator, path_spec)\n\n    if not compressed_stream_types:\n      archive_types = self._GetArchiveTypes(mediator, path_spec)\n\n    if archive_types:\n      if self._process_archives:\n        self._ProcessArchiveTypes(mediator, path_spec, archive_types)\n\n      if dfvfs_definitions.TYPE_INDICATOR_ZIP in archive_types:\n        \n        self._ExtractContentFromDataStream(\n            mediator, file_entry, data_stream.name)\n\n    elif compressed_stream_types:\n      self._ProcessCompressedStreamTypes(\n          mediator, path_spec, compressed_stream_types)\n\n    else:\n      self._ExtractContentFromDataStream(\n          mediator, file_entry, data_stream.name)", "docstring": "Processes a specific data stream of a file entry.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\nfile_entry (dfvfs.FileEntry): file entry containing the data stream.\ndata_stream (dfvfs.DataStream): data stream or None if the file entry\nhas no data stream.", "source": "juraj-google-style"}
{"code": "def get_lower_bound(self):\n    lower_bounds = []\n    for distribution in self.distribs.values():\n        lower_bound = distribution.percent_point((distribution.mean / 10000))\n        if (not pd.isnull(lower_bound)):\n            lower_bounds.append(lower_bound)\n    return min(lower_bounds)", "docstring": "Compute the lower bound to integrate cumulative density.\n\nReturns:\nfloat: lower bound for cumulative density integral.", "source": "codesearchnet"}
{"code": "def iter_archive(self, resource):\n    if isinstance(resource, six.string_types):\n        resource = resource_lib.Resource(path=resource)\n    return extractor.iter_archive(resource.path, resource.extract_method)", "docstring": "Returns iterator over files within archive.\n\n**Important Note**: caller should read files as they are yielded.\nReading out of order is slow.\n\nArgs:\nresource: path to archive or `tfds.download.Resource`.\n\nReturns:\nGenerator yielding tuple (path_within_archive, file_obj).", "source": "codesearchnet"}
{"code": "def snake_case_to_headless_camel_case(snake_string):\n  \n  return ''.join([snake_string.split('_')[0]] +\n                 list(sub_string.capitalize()\n                      for sub_string in snake_string.split('_')[1:]))", "docstring": "Convert snake_case to headlessCamelCase.\n\nArgs:\nsnake_string: The string to be converted.\nReturns:\nThe input string converted to headlessCamelCase.", "source": "juraj-google-style"}
{"code": "def __call__(self, stream, content_type=CONTENT_TYPE_NPY):\n        \n        try:\n            if content_type == CONTENT_TYPE_CSV:\n                return np.genfromtxt(codecs.getreader('utf-8')(stream), delimiter=',', dtype=self.dtype)\n            elif content_type == CONTENT_TYPE_JSON:\n                return np.array(json.load(codecs.getreader('utf-8')(stream)), dtype=self.dtype)\n            elif content_type == CONTENT_TYPE_NPY:\n                return np.load(BytesIO(stream.read()))\n        finally:\n            stream.close()", "docstring": "Decode from serialized data into a Numpy array.\n\nArgs:\nstream (stream): The response stream to be deserialized.\ncontent_type (str): The content type of the response. Can accept CSV, JSON, or NPY data.\n\nReturns:\nobject: Body of the response deserialized into a Numpy array.", "source": "juraj-google-style"}
{"code": "def info_gen(self, code, message, compressed=False):\n        \n        if \"COMPRESS=GZIP\" in message:\n            return self.__info_gzip_gen()\n        if compressed:\n            return self.__info_yenczlib_gen()\n        return self.__info_plain_gen()", "docstring": "Dispatcher for the info generators.\n\nDetermines which __info_*_gen() should be used based on the supplied\nparameters.\n\nArgs:\ncode: The status code for the command response.\nmessage: The status message for the command reponse.\ncompressed: Force decompression. Useful for xz* commands.\n\nReturns:\nAn info generator.", "source": "juraj-google-style"}
{"code": "def concretize(self):\n        \n        dfa = DFA(self.alphabet)\n        for state in self.states:\n            for arc in state.arcs:\n                for char in arc.guard:\n                    dfa.add_arc(arc.src_state, arc.dst_state, char)\n\n        for i in xrange(len(self.states)):\n            if self.states[i].final:\n                dfa[i].final = True\n        return dfa", "docstring": "Transforms the SFA into a DFA\nArgs:\nNone\nReturns:\nDFA: The generated DFA", "source": "juraj-google-style"}
{"code": "def add_file(self, filename, file_content):\n    self._group_data['fileName'] = filename\n    self._file_content = file_content", "docstring": "Add a file for Document and Report types.\n\nExample::\n\ndocument = tcex.batch.group('Document', 'My Document')\ndocument.add_file('my_file.txt', 'my contents')\n\nArgs:\nfilename (str): The name of the file.\nfile_content (bytes|method|str): The contents of the file or callback to get contents.", "source": "codesearchnet"}
{"code": "def mockenv_context(*remove, **update):\n    env = os.environ\n    update = update or {}\n    remove = remove or []\n    stomped = (set(update.keys()) | set(remove)) & set(env.keys())\n    update_after = {k: env[k] for k in stomped}\n    remove_after = frozenset((k for k in update if k not in env))\n    try:\n        env.update(update)\n        [env.pop(k, None) for k in remove]\n        yield\n    finally:\n        env.update(update_after)\n        [env.pop(k) for k in remove_after]", "docstring": "Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv\n\nThe `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.\n\nArgs:\nremove: Environment variables to remove.\nupdate: Dictionary of environment variables and values to add/update.", "source": "github-repos"}
{"code": "async def ping(self, conversation_id: uuid.UUID=None) -> float:\n    cmd = convo.Ping(conversation_id=(conversation_id or uuid.uuid4()))\n    result = (await self.dispatcher.start_conversation(cmd))\n    return (await result)", "docstring": "Send a message to the remote server to check liveness.\n\nReturns:\nThe round-trip time to receive a Pong message in fractional seconds\n\nExamples:\n\n>>> async with connect() as conn:\n>>>     print(\"Sending a PING to the server\")\n>>>     time_secs = await conn.ping()\n>>>     print(\"Received a PONG after {} secs\".format(time_secs))", "source": "codesearchnet"}
{"code": "def create_inputs(inspecs):\n    \n    ret = []\n    for i in inspecs:\n        v = nn.Variable(i.shape, need_grad=i.need_grad)\n        v.d = i.init(v.shape)\n        ret.append(v)\n    return ret", "docstring": "Create input :obj:`nnabla.Variable` from :obj:`Inspec`.\n\nArgs:\ninspecs (:obj:`list` of :obj:`Inspec`): A list of ``Inspec``.\n\nReturns:\n:obj:`list` of :obj:`nnabla.Variable`: Input variables.", "source": "juraj-google-style"}
{"code": "def infeed_dequeue_tuple(dtypes, shapes, name=None):\n    for dtype in dtypes:\n        if dtype not in _SUPPORTED_INFEED_DTYPES:\n            raise TypeError('{} is not a supported TPU infeed type. Supported types are: {}'.format(dtype, list(_SUPPORTED_INFEED_DTYPES)))\n    return gen_tpu_ops.infeed_dequeue_tuple(dtypes, shapes, name=name)", "docstring": "A placeholder op for values fed into the TPU simultaneously as a tuple.\n\nArgs:\ndtypes: A list of `tf.DType`s that has length `>= 1`. The element types of\neach element in `outputs`.\nshapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). The\nshapes of each tensor in `outputs`.\nname: A name for the operation (optional).\n\nReturns:\nA list of `Tensor` objects of type `dtypes`.\nA list of tensors that will be provided using the infeed mechanism.\n\nRaises:\nTypeError: If a type in 'dtypes` is not a supported infeed type.", "source": "github-repos"}
{"code": "def _list_objects(self, client_kwargs, path, max_request_entries):\n        \n        kwargs = dict()\n        if max_request_entries:\n            kwargs['max_keys'] = max_request_entries\n\n        bucket = self._get_bucket(client_kwargs)\n\n        while True:\n            with _handle_oss_error():\n                response = bucket.list_objects(prefix=path, **kwargs)\n\n            if not response.object_list:\n                \n                \n                raise _ObjectNotFoundError('Not found: %s' % path)\n\n            for obj in response.object_list:\n                yield obj.key, self._model_to_dict(obj, ('key',))\n\n            \n            if response.next_marker:\n                client_kwargs['marker'] = response.next_marker\n            else:\n                \n                break", "docstring": "Lists objects.\n\nargs:\nclient_kwargs (dict): Client arguments.\npath (str): Path relative to current locator.\nmax_request_entries (int): If specified, maximum entries returned\nby request.\n\nReturns:\ngenerator of tuple: object name str, object header dict", "source": "juraj-google-style"}
{"code": "def _CreateStyleForRoute(self, doc, route):\n    style_id = ('route_%s' % route.route_id)\n    style = ET.SubElement(doc, 'Style', {'id': style_id})\n    linestyle = ET.SubElement(style, 'LineStyle')\n    width = ET.SubElement(linestyle, 'width')\n    type_to_width = {0: '3', 1: '3', 2: '5', 3: '1'}\n    width.text = type_to_width.get(route.route_type, '1')\n    if route.route_color:\n        color = ET.SubElement(linestyle, 'color')\n        red = route.route_color[0:2].lower()\n        green = route.route_color[2:4].lower()\n        blue = route.route_color[4:6].lower()\n        color.text = ('ff%s%s%s' % (blue, green, red))\n    return style_id", "docstring": "Create a KML Style element for the route.\n\nThe style sets the line colour if the route colour is specified. The\nline thickness is set depending on the vehicle type.\n\nArgs:\ndoc: The KML Document ElementTree.Element instance.\nroute: The transitfeed.Route to create the style for.\n\nReturns:\nThe id of the style as a string.", "source": "codesearchnet"}
{"code": "def __init__(self, **kwargs):\n    super(_Merge, self).__init__(**kwargs)\n    self.supports_masking = True", "docstring": "Initializes a Merge layer.\n\nArgs:\n**kwargs: standard layer keyword arguments.", "source": "github-repos"}
{"code": "def _find_matching_instance(cache_key):\n    infos = get_all()\n    candidates = [info for info in infos if (info.cache_key == cache_key)]\n    for candidate in sorted(candidates, key=(lambda x: x.port)):\n        return candidate\n    return None", "docstring": "Find a running TensorBoard instance compatible with the cache key.\n\nReturns:\nA `TensorBoardInfo` object, or `None` if none matches the cache key.", "source": "codesearchnet"}
{"code": "def get_decoder_config(self, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, feature: str='default') -> OnnxConfig:\n    decoder_config.encoder_hidden_size = encoder_config.hidden_size\n    return VisionEncoderDecoderDecoderOnnxConfig(decoder_config, feature)", "docstring": "Returns ONNX decoder config for `VisionEncoderDecoder` model.\n\nArgs:\nencoder_config (`PretrainedConfig`):\nThe encoder model's configuration to use when exporting to ONNX.\ndecoder_config (`PretrainedConfig`):\nThe decoder model's configuration to use when exporting to ONNX\nfeature (`str`, *optional*):\nThe type of feature to export the model with.\n\nReturns:\n[`VisionEncoderDecoderDecoderOnnxConfig`]: An instance of the ONNX configuration object.", "source": "github-repos"}
{"code": "def set_address(self, name, value=None, default=False, disable=False):\n    commands = [('interface %s' % name)]\n    commands.append(self.command_builder('ip address', value=value, default=default, disable=disable))\n    return self.configure(commands)", "docstring": "Configures the interface IP address\n\nArgs:\nname (string): The interface identifier to apply the interface\nconfig to\n\nvalue (string): The IP address and mask to set the interface to.\nThe value should be in the format of A.B.C.D/E\n\ndefault (bool): Configures the address parameter to its default\nvalue using the EOS CLI default command\n\ndisable (bool): Negates the address parameter value using the\nEOS CLI no command\n\nReturns:\nTrue if the operation succeeds otherwise False.", "source": "codesearchnet"}
{"code": "def verify(self, message, signature):\n        \n        message = _helpers._to_bytes(message, encoding='utf-8')\n        signature = _helpers._to_bytes(signature, encoding='utf-8')\n        try:\n            crypto.verify(self._pubkey, signature, message, 'sha256')\n            return True\n        except crypto.Error:\n            return False", "docstring": "Verifies a message against a signature.\n\nArgs:\nmessage: string or bytes, The message to verify. If string, will be\nencoded to bytes as utf-8.\nsignature: string or bytes, The signature on the message. If string,\nwill be encoded to bytes as utf-8.\n\nReturns:\nTrue if message was signed by the private key associated with the\npublic key that this object was constructed with.", "source": "juraj-google-style"}
{"code": "def __init__(self, identifier, configuration):\n    \n    super(SampleFileProfiler, self).__init__()\n    self._identifier = identifier\n    self._path = configuration.directory\n    self._profile_measurements = {}\n    self._sample_file = None\n    self._start_time = None", "docstring": "Initializes a sample file profiler.\n\nSample files are gzip compressed UTF-8 encoded CSV files.\n\nArgs:\nidentifier (str): identifier of the profiling session used to create\nthe sample filename.\nconfiguration (ProfilingConfiguration): profiling configuration.", "source": "juraj-google-style"}
{"code": "def get_mode_group(self, group):\n\t\t\n\n\t\thmodegroup = self._libinput.libinput_device_tablet_pad_get_mode_group(\n\t\t\tself._handle, group)\n\t\tif hmodegroup:\n\t\t\treturn TabletPadModeGroup(hmodegroup, self._libinput)\n\t\treturn None", "docstring": "While a reference is kept by the caller, the returned mode group\nwill compare equal with mode group returned by each subsequent call of\nthis method with the same index and mode group returned from\n:attr:`~libinput.event.TabletPadEvent.mode_group`, provided\nthe event was generated by this mode group.\n\nArgs:\ngroup (int): A mode group index.\nReturns:\n~libinput.define.TabletPadModeGroup: The mode group with the given\nindex or :obj:`None` if an invalid index is given.", "source": "juraj-google-style"}
{"code": "def _run(self, num_iters):\n    graph = ops.Graph()\n    with graph.as_default():\n        init, output = self._build_graph()\n    with session_lib.Session(graph=graph) as session:\n        init.run()\n        _ = session.run(output)\n        start_time = time.time()\n        for _ in range(num_iters):\n            _ = session.run(output)\n        duration = time.time() - start_time\n    print('%f secs per enqueue-dequeue' % (duration / num_iters))\n    self.report_benchmark(name='fifo_queue', iters=num_iters, wall_time=duration / num_iters)\n    return duration", "docstring": "Benchmarks enqueueing and dequeueing from a FIFOQueue.\n\nArgs:\nnum_iters: The number of iterations to run.\n\nReturns:\nThe duration of the run in seconds.", "source": "github-repos"}
{"code": "def __add__(self, other):\n        \n        \n        copy = self.__class__(self.copy())\n        return copy.merge(other)", "docstring": "Add other in self and return new dict\n\nArgs:\nother: dict to add in self\n\nReturns: Merged dict\n\nExample:\n\n>>> from ww import d\n>>> current_dict = d({1: 1, 2: 2, 3: 3})\n>>> to_merge_dict = {3: 4, 4: 5}\n>>> current_dict + to_merge_dict\n{1: 1, 2: 2, 3: 4, 4: 5}", "source": "juraj-google-style"}
{"code": "def master_key_from_entropy(passphrase='', strength=128):\n    if ((strength % 32) != 0):\n        raise ValueError('strength must be a multiple of 32')\n    if ((strength < 128) or (strength > 256)):\n        raise ValueError('strength should be >= 128 and <= 256')\n    entropy = rand_bytes((strength \n    m = Mnemonic(language='english')\n    n = m.to_mnemonic(entropy)\n    return (HDPrivateKey.master_key_from_seed(Mnemonic.to_seed(n, passphrase)), n)", "docstring": "Generates a master key from system entropy.\n\nArgs:\nstrength (int): Amount of entropy desired. This should be\na multiple of 32 between 128 and 256.\npassphrase (str): An optional passphrase for the generated\nmnemonic string.\n\nReturns:\nHDPrivateKey, str:\na tuple consisting of the master\nprivate key and a mnemonic string from which the seed\ncan be recovered.", "source": "codesearchnet"}
{"code": "def denyMapIdentity(self, subject, vendorSpecific=None):\n        \n        response = self.denyMapIdentityResponse(subject, vendorSpecific)\n        return self._read_boolean_response(response)", "docstring": "See Also: denyMapIdentityResponse()\n\nArgs:\nsubject:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def GetProcessedTaskByIdentifier(self, task_identifier):\n    with self._lock:\n        task = self._tasks_processing.get(task_identifier, None)\n        if (not task):\n            task = self._tasks_queued.get(task_identifier, None)\n        if (not task):\n            task = self._tasks_abandoned.get(task_identifier, None)\n        if (not task):\n            raise KeyError('Status of task {0:s} is unknown'.format(task_identifier))\n    return task", "docstring": "Retrieves a task that has been processed.\n\nArgs:\ntask_identifier (str): unique identifier of the task.\n\nReturns:\nTask: a task that has been processed.\n\nRaises:\nKeyError: if the task was not processing, queued or abandoned.", "source": "codesearchnet"}
{"code": "def compute_ssim(image1, image2, gaussian_kernel_sigma=1.5,\n                 gaussian_kernel_width=11):\n    \n    gaussian_kernel_1d = get_gaussian_kernel(\n        gaussian_kernel_width, gaussian_kernel_sigma)\n    return SSIM(image1, gaussian_kernel_1d).ssim_value(image2)", "docstring": "Computes SSIM.\n\nArgs:\nim1: First PIL Image object to compare.\nim2: Second PIL Image object to compare.\n\nReturns:\nSSIM float value.", "source": "juraj-google-style"}
{"code": "def __init__(self, endpoint_name, sagemaker_session=None):\n        \n        super(SKLearnPredictor, self).__init__(endpoint_name, sagemaker_session, npy_serializer, numpy_deserializer)", "docstring": "Initialize an ``SKLearnPredictor``.\n\nArgs:\nendpoint_name (str): The name of the endpoint to perform inference on.\nsagemaker_session (sagemaker.session.Session): Session object which manages interactions with\nAmazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one\nusing the default AWS configuration chain.", "source": "juraj-google-style"}
{"code": "def convert_debug_info_func(saved_debug_info):\n\n    def f(original_nodes):\n        \n        del original_nodes\n        return saved_debug_info\n    return f", "docstring": "Returns a method to retrieve the `GraphDebugInfo` from the original graph.\n\nArgs:\nsaved_debug_info: The `GraphDebugInfo` containing all the debug info.\n\nReturns:\nA function which retrieves the stack traces from the original graph and\nconverts them to a `GraphDebugInfo` for a given set of nodes.", "source": "github-repos"}
{"code": "def __handle_variable(self, shell_entry, output):\n    if ('variable' in shell_entry):\n        variable_name = shell_entry['variable']\n        self.pipeline.variables[variable_name] = '\\n'.join(output)", "docstring": "Saving output for configured variable name.\n\nArgs:\nshell_entry(dict): shell based configuration (shell, docker container or Python).\noutput: list of strings representing output of last shell", "source": "codesearchnet"}
{"code": "def __init__(self, func, lower_control_flow, aggressive_inlining, variable_names_allowlist=None, variable_names_denylist=None, session=None):\n    self._session = session\n    session.run(variables.global_variables_initializer())\n    for op in ops.get_default_graph().get_collection(VAR_ASSIGN_COLLECTION):\n        session.run(op)\n    super(_FunctionConverterDataInGraph, self).__init__(func, lower_control_flow, aggressive_inlining, variable_names_allowlist, variable_names_denylist)", "docstring": "Creates the conversion data for the given function.\n\nArgs:\nfunc: ConcreteFunction.\nlower_control_flow: Boolean indicating whether or not to lower control\nflow ops such as If and While.\naggressive_inlining: Boolean indicating whether or not to do aggressive\nfunction inlining (might be unsafe if function has stateful ops, not\nproperly connected to control outputs).\nvariable_names_allowlist: The set of variable names to convert (by\ndefault, all variables are converted).\nvariable_names_denylist: The set of variable names to omit converting to\nconstants.\nsession: Session object.", "source": "github-repos"}
{"code": "def func_callsig(func, with_name=True):\n    \n    import inspect\n    argspec = inspect.getargspec(func)\n    (args, varargs, varkw, defaults) = argspec\n    callsig = inspect.formatargspec(*argspec[0:3])\n    if with_name:\n        callsig = get_callable_name(func) + callsig\n    return callsig", "docstring": "String of function call signature\n\nArgs:\nfunc (function): live python function\n\nReturns:\nstr: callsig\n\nCommandLine:\npython -m utool.util_str --exec-func_callsig\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_str import *  # NOQA\n>>> func = func_str\n>>> callsig = func_callsig(func)\n>>> result = str(callsig)\n>>> print(result)\nfunc_str(func, args, kwargs, type_aliases, packed, packkw, truncate)", "source": "juraj-google-style"}
{"code": "def build_recursive_localize_env(destination, inputs):\n  \n  export_input_dirs = '\\n'.join([\n      'export {0}={1}/{2}'.format(var.name, destination.rstrip('/'),\n                                  var.docker_path.rstrip('/'))\n      for var in inputs\n      if var.recursive and var.docker_path\n  ])\n  return export_input_dirs", "docstring": "Return a multi-line string with export statements for the variables.\n\nArguments:\ndestination: Folder where the data will be put.\nFor example /mnt/data\ninputs: a list of InputFileParam\n\nReturns:\na multi-line string with a shell script that sets environment variables\ncorresponding to the inputs.", "source": "juraj-google-style"}
{"code": "def to_title_caps(underscore_case):\n    r\n    words = underscore_case.split('_')\n    words2 = [\n        word[0].upper() + word[1:]\n        for count, word in enumerate(words)\n    ]\n    title_str = ' '.join(words2)\n    return title_str", "docstring": "r\"\"\"\nArgs:\nunderscore_case (?):\n\nReturns:\nstr: title_str\n\nCommandLine:\npython -m utool.util_str --exec-to_title_caps\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_str import *  # NOQA\n>>> underscore_case = 'the_foo_bar_func'\n>>> title_str = to_title_caps(underscore_case)\n>>> result = ('title_str = %s' % (str(title_str),))\n>>> print(result)\ntitle_str = The Foo Bar Func", "source": "juraj-google-style"}
{"code": "def parse(input_string, prefix=''):\n  \n\n  tree = parser.parse(input_string)\n  visitor = ChatlVisitor(prefix)\n\n  visit_parse_tree(tree, visitor)\n\n  return visitor.parsed", "docstring": "Parses the given DSL string and returns parsed results.\n\nArgs:\ninput_string (str): DSL string\nprefix (str): Optional prefix to add to every element name, useful to namespace things\n\nReturns:\ndict: Parsed content", "source": "juraj-google-style"}
{"code": "def list_workflows(config):\n    \n    workflows = []\n    for path in config.workflows:\n        filenames = glob.glob(os.path.join(os.path.abspath(path), '*.py'))\n\n        for filename in filenames:\n            module_name = os.path.splitext(os.path.basename(filename))[0]\n            workflow = Workflow()\n            try:\n                workflow.load(module_name, validate_arguments=False, strict_dag=True)\n                workflows.append(workflow)\n            except DirectedAcyclicGraphInvalid as e:\n                raise WorkflowDefinitionError(workflow_name=module_name,\n                                              graph_name=e.graph_name)\n            except WorkflowImportError:\n                continue\n\n    return workflows", "docstring": "List all available workflows.\n\nReturns a list of all workflows that are available from the paths specified\nin the config. A workflow is defined as a Python file with at least one DAG.\n\nArgs:\nconfig (Config): Reference to the configuration object from which the\nsettings are retrieved.\n\nReturns:\nlist: A list of workflows.", "source": "juraj-google-style"}
{"code": "def detect_format(program, attributes) -> str:\n    \n\n    def fmt(attr):\n        \n\n        return attr.array_length * attr.dimension, attr.shape\n\n    return ' '.join('%d%s' % fmt(program[a]) for a in attributes)", "docstring": "Detect format for vertex attributes.\nThe format returned does not contain padding.\n\nArgs:\nprogram (Program): The program.\nattributes (list): A list of attribute names.\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def on_heartbeat(self, message):\n    logger.info('Got a heartbeat')\n    logger.info('Heartbeat message: {}'.format(message))\n    self.heartbeat_thread.update_sequence(message['d'])\n    return", "docstring": "Runs on a heartbeat event from websocket connection\n\nArgs:\nmessage (dict): Full message from Discord websocket connection\"", "source": "codesearchnet"}
{"code": "def Pack(cls, obj, version):\n    \n    if isinstance(obj, ServiceQuery):\n      return str(obj)\n    return obj", "docstring": "Pack the given object using AdWords-specific logic.\n\nArgs:\nobj: an object to be packed for SOAP using AdWords-specific logic, if\napplicable.\nversion: the version of the current API, e.g. 'v201809'\n\nReturns:\nThe given object packed with AdWords-specific logic for SOAP, if\napplicable. Otherwise, returns the given object unmodified.", "source": "juraj-google-style"}
{"code": "def get_attribute(self, attribute: str) -> 'Node':\n    matches = [value_node for (key_node, value_node) in self.yaml_node.value if (key_node.value == attribute)]\n    if (len(matches) != 1):\n        raise SeasoningError('Attribute not found, or found multiple times: {}'.format(matches))\n    return Node(matches[0])", "docstring": "Returns the node representing the given attribute's value.\n\nUse only if is_mapping() returns true.\n\nArgs:\nattribute: The name of the attribute to retrieve.\n\nRaises:\nKeyError: If the attribute does not exist.\n\nReturns:\nA node representing the value.", "source": "codesearchnet"}
{"code": "def __build_config_block(self, config_block_node):\n    node_lists = []\n    for line_node in config_block_node:\n        if isinstance(line_node, pegnode.ConfigLine):\n            node_lists.append(self.__build_config(line_node))\n        elif isinstance(line_node, pegnode.OptionLine):\n            node_lists.append(self.__build_option(line_node))\n        elif isinstance(line_node, pegnode.ServerLine):\n            node_lists.append(self.__build_server(line_node))\n        elif isinstance(line_node, pegnode.BindLine):\n            node_lists.append(self.__build_bind(line_node))\n        elif isinstance(line_node, pegnode.AclLine):\n            node_lists.append(self.__build_acl(line_node))\n        elif isinstance(line_node, pegnode.BackendLine):\n            node_lists.append(self.__build_usebackend(line_node))\n        elif isinstance(line_node, pegnode.UserLine):\n            node_lists.append(self.__build_user(line_node))\n        elif isinstance(line_node, pegnode.GroupLine):\n            node_lists.append(self.__build_group(line_node))\n        else:\n            pass\n    return node_lists", "docstring": "parse `config_block` in each section\n\nArgs:\nconfig_block_node (TreeNode): Description\n\nReturns:\n[line_node1, line_node2, ...]", "source": "codesearchnet"}
{"code": "def pickle_load(cls, filepath, spectator_mode=True, remove_lock=False):\n    if os.path.isdir(filepath):\n        for (dirpath, dirnames, filenames) in os.walk(filepath):\n            fnames = [f for f in filenames if (f == cls.PICKLE_FNAME)]\n            if fnames:\n                if (len(fnames) == 1):\n                    filepath = os.path.join(dirpath, fnames[0])\n                    break\n                else:\n                    err_msg = ('Found multiple databases:\\n %s' % str(fnames))\n                    raise RuntimeError(err_msg)\n        else:\n            err_msg = ('Cannot find %s inside directory %s' % (cls.PICKLE_FNAME, filepath))\n            raise ValueError(err_msg)\n    if (remove_lock and os.path.exists((filepath + '.lock'))):\n        try:\n            os.remove((filepath + '.lock'))\n        except:\n            pass\n    with FileLock(filepath):\n        with open(filepath, 'rb') as fh:\n            flow = pmg_pickle_load(fh)\n    if (flow.VERSION != cls.VERSION):\n        msg = ('File flow version %s != latest version %s\\n.Regenerate the flow to solve the problem ' % (flow.VERSION, cls.VERSION))\n        warnings.warn(msg)\n    flow.set_spectator_mode(spectator_mode)\n    flow.check_status()\n    return flow", "docstring": "Loads the object from a pickle file and performs initial setup.\n\nArgs:\nfilepath: Filename or directory name. It filepath is a directory, we\nscan the directory tree starting from filepath and we\nread the first pickle database. Raise RuntimeError if multiple\ndatabases are found.\nspectator_mode: If True, the nodes of the flow are not connected by signals.\nThis option is usually used when we want to read a flow\nin read-only mode and we want to avoid callbacks that can change the flow.\nremove_lock:\nTrue to remove the file lock if any (use it carefully).", "source": "codesearchnet"}
{"code": "def _get_attrs_items(obj):\n    attrs = getattr(obj.__class__, '__attrs_attrs__')\n    attr_names = (a.name for a in attrs)\n    return [(attr_name, getattr(obj, attr_name)) for attr_name in attr_names]", "docstring": "Returns a list of (name, value) pairs from an attrs instance.\n\nTODO(b/268078256): check if this comment is valid, and if so, ensure it's\nhandled in the function below.\nThe list will be sorted by name.\n\nArgs:\nobj: an object.\n\nReturns:\nA list of (attr_name, attr_value) pairs, sorted by attr_name.", "source": "github-repos"}
{"code": "def FormatArtifacts(self, artifacts):\n    \n    artifact_definitions = [artifact.AsDict() for artifact in artifacts]\n    json_data = json.dumps(artifact_definitions)\n    return json_data", "docstring": "Formats artifacts to desired output format.\n\nArgs:\nartifacts (list[ArtifactDefinition]): artifact definitions.\n\nReturns:\nstr: formatted string of artifact definition.", "source": "juraj-google-style"}
{"code": "def dist_point_line(p, l1, l2):\n    \n    cross_prod = np.cross(l2 - l1, p - l1)\n    return np.linalg.norm(cross_prod) / np.linalg.norm(l2 - l1)", "docstring": "compute the orthogonal distance between from the line that goes through\nthe points l1, l2 and the point p\n\nArgs:\np, l1, l2 : iterable\npoint\nindices 0, 1, 2 corresponding to cartesian coordinates", "source": "juraj-google-style"}
{"code": "def plot_real_feature(df, feature_name, bins=50, figsize=(15, 15)):\n    ix_negative_target = df[(df.target == 0)].index\n    ix_positive_target = df[(df.target == 1)].index\n    plt.figure(figsize=figsize)\n    ax_overall_dist = plt.subplot2grid((3, 2), (0, 0), colspan=2)\n    ax_target_conditional_dist = plt.subplot2grid((3, 2), (1, 0), colspan=2)\n    ax_botplot = plt.subplot2grid((3, 2), (2, 0))\n    ax_violin_plot = plt.subplot2grid((3, 2), (2, 1))\n    ax_overall_dist.set_title('Distribution of {}'.format(feature_name), fontsize=16)\n    sns.distplot(df[feature_name], bins=50, ax=ax_overall_dist)\n    sns.distplot(df.loc[ix_positive_target][feature_name], bins=bins, ax=ax_target_conditional_dist, label='Positive Target')\n    sns.distplot(df.loc[ix_negative_target][feature_name], bins=bins, ax=ax_target_conditional_dist, label='Negative Target')\n    ax_target_conditional_dist.legend(loc='upper right', prop={'size': 14})\n    sns.boxplot(y=feature_name, x='target', data=df, ax=ax_botplot)\n    sns.violinplot(y=feature_name, x='target', data=df, ax=ax_violin_plot)\n    plt.show()", "docstring": "Plot the distribution of a real-valued feature conditioned by the target.\n\nExamples:\n`plot_real_feature(X, 'emb_mean_euclidean')`\n\nArgs:\ndf: Pandas dataframe containing the target column (named 'target').\nfeature_name: The name of the feature to plot.\nbins: The number of histogram bins for the distribution plot.\nfigsize: The size of the plotted figure.", "source": "codesearchnet"}
{"code": "def _PrintExtractionStatusUpdateLinear(self, processing_status):\n    \n    for worker_status in processing_status.workers_status:\n      status_line = (\n          '{0:s} (PID: {1:d}) - events produced: {2:d} - file: {3:s} '\n          '- running: {4!s}\\n').format(\n              worker_status.identifier, worker_status.pid,\n              worker_status.number_of_produced_events,\n              worker_status.display_name,\n              worker_status.status not in definitions.ERROR_STATUS_INDICATORS)\n      self._output_writer.Write(status_line)", "docstring": "Prints an extraction status update in linear mode.\n\nArgs:\nprocessing_status (ProcessingStatus): processing status.", "source": "juraj-google-style"}
{"code": "def sign_adaptation(control: FloatNest, output: FloatTensor, set_point: FloatTensor, adaptation_rate: FloatTensor=0.01) -> FloatNest:\n\n    def _get_new_control(control, output, set_point):\n        new_control = mcmc_util.choose((output > set_point), (control * (1.0 + adaptation_rate)), (control / (1.0 + adaptation_rate)))\n        return new_control\n    output = maybe_broadcast_structure(output, control)\n    set_point = maybe_broadcast_structure(set_point, control)\n    return tf.nest.map_structure(_get_new_control, control, output, set_point)", "docstring": "A function to do simple sign-based control of a variable.\n\n```\ncontrol = control * (1. + adaptation_rate) ** sign(output - set_point)\n```\n\nArgs:\ncontrol: The control variable.\noutput: The output variable.\nset_point: The set point for `output`. This function will adjust `control`\nso that `output` matches `set_point`.\nadaptation_rate: Adaptation rate.\n\nReturns:\ncontrol: New control.", "source": "codesearchnet"}
{"code": "def principal_direction_extent(points):\n    points = np.copy(points)\n    points -= np.mean(points, axis=0)\n    (_, eigv) = pca(points)\n    extent = np.zeros(3)\n    for i in range(eigv.shape[1]):\n        scalar_projs = np.sort(np.array([np.dot(p, eigv[(:, i)]) for p in points]))\n        extent[i] = scalar_projs[(- 1)]\n        if (scalar_projs[0] < 0.0):\n            extent -= scalar_projs[0]\n    return extent", "docstring": "Calculate the extent of a set of 3D points.\n\nThe extent is defined as the maximum distance between\nthe projections on the principal directions of the covariance matrix\nof the points.\n\nParameter:\npoints : a 2D numpy array of points\n\nReturns:\nextents : the extents for each of the eigenvectors of the cov matrix\neigs : eigenvalues of the covariance matrix\neigv : respective eigenvectors of the covariance matrix", "source": "codesearchnet"}
{"code": "def get_gdns_publisher(config, metrics, **kwargs):\n    \n    builder = gdns_publisher.GDNSPublisherBuilder(\n        config, metrics, **kwargs)\n    return builder.build_publisher()", "docstring": "Get a GDNSPublisher client.\n\nA factory function that validates configuration and returns a\npublisher client (:interface:`gordon.interfaces.IMessageHandler`)\nprovider.\n\nArgs:\nconfig (dict): Google Cloud DNS API related configuration.\nmetrics (obj): :interface:`IMetricRelay` implementation.\nkwargs (dict): Additional keyword arguments to pass to the\npublisher.\nReturns:\nA :class:`GDNSPublisher` instance.", "source": "juraj-google-style"}
{"code": "def not_implemented(cls, errors=None):\n    if cls.expose_status:\n        cls.response.content_type = 'application/json'\n        cls.response._status_line = '501 Not Implemented'\n    return cls(501, None, errors).to_json", "docstring": "Shortcut API for HTTP 501 `Not Implemented` response.\n\nArgs:\nerrors (list): Response key/value data.\n\nReturns:\nWSResponse Instance.", "source": "codesearchnet"}
{"code": "def word_probability(self, word, total_words=None):\n    if (total_words is None):\n        total_words = self._word_frequency.total_words\n    return (self._word_frequency.dictionary[word] / total_words)", "docstring": "Calculate the probability of the `word` being the desired, correct\nword\n\nArgs:\nword (str): The word for which the word probability is \\\ncalculated\ntotal_words (int): The total number of words to use in the \\\ncalculation; use the default for using the whole word \\\nfrequency\nReturns:\nfloat: The probability that the word is the correct word", "source": "codesearchnet"}
{"code": "def _restore_volume(self, fade):\n        \n        self.device.mute = self.mute\n\n        \n        \n        \n        \n        if self.volume == 100:\n            fixed_vol = self.device.renderingControl.GetOutputFixed(\n                [('InstanceID', 0)])['CurrentFixed']\n        else:\n            fixed_vol = False\n\n        \n        if not fixed_vol:\n            self.device.bass = self.bass\n            self.device.treble = self.treble\n            self.device.loudness = self.loudness\n\n            if fade:\n                \n                \n                self.device.volume = 0\n                self.device.ramp_to_volume(self.volume)\n            else:\n                \n                self.device.volume = self.volume", "docstring": "Reinstate volume.\n\nArgs:\nfade (bool): Whether volume should be faded up on restore.", "source": "juraj-google-style"}
{"code": "def eval(self, amplstatements, **kwargs):\n    if (self._langext is not None):\n        amplstatements = self._langext.translate(amplstatements, **kwargs)\n    lock_and_call((lambda : self._impl.eval(amplstatements)), self._lock)\n    self._errorhandler_wrapper.check()", "docstring": "Parses AMPL code and evaluates it as a possibly empty sequence of AMPL\ndeclarations and statements.\n\nAs a side effect, it invalidates all entities (as the passed statements\ncan contain any arbitrary command); the lists of entities will be\nre-populated lazily (at first access)\n\nThe output of interpreting the statements is passed to the current\nOutputHandler (see getOutputHandler and\nsetOutputHandler).\n\nBy default, errors and warnings are printed on stdout.\nThis behavior can be changed reassigning an ErrorHandler\nusing setErrorHandler.\n\nArgs:\namplstatements: A collection of AMPL statements and declarations to\nbe passed to the interpreter.\n\nRaises:\nRuntimeError: if the input is not a complete AMPL statement (e.g.\nif it does not end with semicolon) or if the underlying\ninterpreter is not running.", "source": "codesearchnet"}
{"code": "def load_and_save_resfile(filename, outfile=None, outdir=None, mass=1.0):\n    d = CellpyData()\n    if (not outdir):\n        outdir = prms.Paths['cellpydatadir']\n    if (not outfile):\n        outfile = (os.path.basename(filename).split('.')[0] + '.h5')\n        outfile = os.path.join(outdir, outfile)\n    print('filename:', filename)\n    print('outfile:', outfile)\n    print('outdir:', outdir)\n    print('mass:', mass, 'mg')\n    d.from_raw(filename)\n    d.set_mass(mass)\n    d.make_step_table()\n    d.make_summary()\n    d.save(filename=outfile)\n    d.to_csv(datadir=outdir, cycles=True, raw=True, summary=True)\n    return outfile", "docstring": "Load a raw data file and save it as cellpy-file.\n\nArgs:\nmass (float): active material mass [mg].\noutdir (path): optional, path to directory for saving the hdf5-file.\noutfile (str): optional, name of hdf5-file.\nfilename (str): name of the resfile.\n\nReturns:\nout_file_name (str): name of saved file.", "source": "codesearchnet"}
{"code": "def dag_to_circuit(dag):\n    \n    qregs = collections.OrderedDict()\n    for qreg in dag.qregs.values():\n        qreg_tmp = QuantumRegister(qreg.size, name=qreg.name)\n        qregs[qreg.name] = qreg_tmp\n    cregs = collections.OrderedDict()\n    for creg in dag.cregs.values():\n        creg_tmp = ClassicalRegister(creg.size, name=creg.name)\n        cregs[creg.name] = creg_tmp\n\n    name = dag.name or None\n    circuit = QuantumCircuit(*qregs.values(), *cregs.values(), name=name)\n\n    for node in dag.topological_op_nodes():\n        qubits = []\n        for qubit in node.qargs:\n            qubits.append(qregs[qubit[0].name][qubit[1]])\n\n        clbits = []\n        for clbit in node.cargs:\n            clbits.append(cregs[clbit[0].name][clbit[1]])\n\n        \n        if node.condition is None:\n            control = None\n        else:\n            control = (node.condition[0], node.condition[1])\n\n        inst = node.op.copy()\n        inst.control = control\n        circuit.append(inst, qubits, clbits)\n    return circuit", "docstring": "Build a ``QuantumCircuit`` object from a ``DAGCircuit``.\n\nArgs:\ndag (DAGCircuit): the input dag.\n\nReturn:\nQuantumCircuit: the circuit representing the input dag.", "source": "juraj-google-style"}
{"code": "def call(self, input_ids: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, token_type_ids: Optional[tf.Tensor]=None, inputs_embeds: Optional[tf.Tensor]=None, training: bool=False) -> tf.Tensor:\n    assert not (input_ids is None and inputs_embeds is None)\n    if input_ids is not None:\n        input_shape = shape_list(input_ids)\n    else:\n        input_shape = shape_list(inputs_embeds)[:-1]\n    seq_length = input_shape[1]\n    if token_type_ids is None:\n        token_type_ids = tf.fill(dims=input_shape + [self.number_of_token_type_embeddings], value=0)\n    if position_ids is None:\n        position_ids = tf.expand_dims(tf.range(start=0, limit=seq_length), axis=0)\n        position_ids = tf.broadcast_to(position_ids, shape=input_shape)\n        if self.reset_position_index_per_cell:\n            col_index = IndexMap(token_type_ids[:, :, 1], self.config.type_vocab_sizes[1], batch_dims=1)\n            row_index = IndexMap(token_type_ids[:, :, 2], self.config.type_vocab_sizes[2], batch_dims=1)\n            full_index = ProductIndexMap(col_index, row_index)\n            first_position_per_segment = reduce_min(position_ids, full_index)[0]\n            first_position = gather(first_position_per_segment, full_index)\n            position = tf.expand_dims(tf.range(start=0, limit=seq_length), axis=0)\n            position_ids = tf.math.minimum(self.max_position_embeddings - 1, position - first_position)\n    if input_ids is not None:\n        check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n        inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n    position_embeddings = tf.gather(self.position_embeddings, indices=position_ids)\n    final_embeddings = inputs_embeds + position_embeddings\n    for i in range(self.number_of_token_type_embeddings):\n        name = f'token_type_embeddings_{i}'\n        final_embeddings += tf.gather(params=getattr(self, name), indices=token_type_ids[:, :, i])\n    final_embeddings = self.LayerNorm(inputs=final_embeddings)\n    final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n    return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\nfinal_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github-repos"}
{"code": "def get_countries(is_legacy_xml=False):\n    \n\n    \n    countries = {}\n\n    \n    if sys.platform == 'win32' and getattr(sys, 'frozen', False):\n\n        data_dir = path.dirname(sys.executable)  \n\n    else:\n\n        data_dir = path.dirname(__file__)\n\n    if is_legacy_xml:\n\n        log.debug('Opening country code legacy XML: {0}'.format(\n                str(data_dir) + '/data/iso_3166-1_list_en.xml'))\n\n        \n        f = io.open(str(data_dir) + '/data/iso_3166-1_list_en.xml', 'r',\n                    encoding='ISO-8859-1')\n\n        \n        data = f.read()\n\n        \n        if not data:  \n\n            return {}\n\n        \n        dom = parseString(data)\n\n        \n        entries = dom.getElementsByTagName('ISO_3166-1_Entry')\n\n        \n        for entry in entries:\n\n            \n            code = entry.getElementsByTagName(\n                'ISO_3166-1_Alpha-2_Code_element')[0].firstChild.data\n            name = entry.getElementsByTagName(\n                'ISO_3166-1_Country_name')[0].firstChild.data\n\n            \n            countries[code] = name.title()\n\n    else:\n\n        log.debug('Opening country code CSV: {0}'.format(\n                str(data_dir) + '/data/iso_3166-1_list_en.xml'))\n\n        \n        f = io.open(str(data_dir) + '/data/iso_3166-1.csv', 'r',\n                    encoding='utf-8')\n\n        \n        csv_reader = csv.reader(f, delimiter=',', quotechar='\"')\n\n        \n        for row in csv_reader:\n\n            \n            code = row[0]\n            name = row[1]\n\n            \n            countries[code] = name\n\n    return countries", "docstring": "The function to generate a dictionary containing ISO_3166-1 country codes\nto names.\n\nArgs:\nis_legacy_xml (:obj:`bool`): Whether to use the older country code\nlist (iso_3166-1_list_en.xml).\n\nReturns:\ndict: A mapping of country codes as the keys to the country names as\nthe values.", "source": "juraj-google-style"}
{"code": "def get_variant_by_name(self, name):\n        \n        try:\n            geno = self.df.loc[:, name].values\n            info = self.map_info.loc[name, :]\n\n        except KeyError:\n            \n            \n            logging.variant_name_not_found(name)\n            return []\n\n        else:\n            return [Genotypes(\n                Variant(info.name, info.chrom, info.pos, [info.a1, info.a2]),\n                geno,\n                reference=info.a2,\n                coded=info.a1,\n                multiallelic=False,\n            )]", "docstring": "Get the genotypes for a given variant (by name).\n\nArgs:\nname (str): The name of the variant to retrieve the genotypes.\n\nReturns:\nlist: A list of Genotypes. This is a list in order to keep the same\nbehaviour as the other functions.", "source": "juraj-google-style"}
{"code": "def write_seq_as_temp_fasta(seq):\n    sr = ssbio.protein.sequence.utils.cast_to_seq_record(seq, id='tempfasta')\n    return write_fasta_file(seq_records=sr, outname='temp', outdir=tempfile.gettempdir(), force_rerun=True)", "docstring": "Write a sequence as a temporary FASTA file\n\nArgs:\nseq (str, Seq, SeqRecord): Sequence string, Biopython Seq or SeqRecord object\n\nReturns:\nstr: Path to temporary FASTA file (located in system temporary files directory)", "source": "codesearchnet"}
{"code": "def get_parts_of_url_path(url):\n    \n    parsed = urlparse(url)\n    path = unquote(parsed.path).lstrip('/')\n    parts = path.split('/')\n    return parts", "docstring": "Given a url, take out the path part and split it by '/'.\n\nArgs:\nurl (str): the url slice\n\nreturns\nlist: parts after the domain name of the URL", "source": "juraj-google-style"}
{"code": "def as_dataframe(self, pattern='*', max_rows=None):\n    data = []\n    for (i, group) in enumerate(self.list(pattern)):\n        if ((max_rows is not None) and (i >= max_rows)):\n            break\n        parent = self._group_dict.get(group.parent_id)\n        parent_display_name = ('' if (parent is None) else parent.display_name)\n        data.append([group.id, group.display_name, group.parent_id, parent_display_name, group.is_cluster, group.filter])\n    return pandas.DataFrame(data, columns=self._DISPLAY_HEADERS)", "docstring": "Creates a pandas dataframe from the groups that match the filters.\n\nArgs:\npattern: An optional pattern to further filter the groups. This can\ninclude Unix shell-style wildcards. E.g. ``\"Production *\"``,\n``\"*-backend\"``.\nmax_rows: The maximum number of groups to return. If None, return all.\n\nReturns:\nA pandas dataframe containing matching groups.", "source": "codesearchnet"}
{"code": "def create_releasenotes(project_dir=os.curdir, bugtracker_url=''):\n    \n    pkg_info_file = os.path.join(project_dir, 'PKG-INFO')\n    if os.path.exists(pkg_info_file):\n        return\n\n    with open('RELEASE_NOTES', 'wb') as releasenotes_fd:\n        releasenotes_fd.write(\n            get_releasenotes(\n                project_dir=project_dir,\n                bugtracker_url=bugtracker_url,\n            ).encode('utf-8') + b'\\n'\n        )", "docstring": "Creates the release notes file, if not in a package.\n\nArgs:\nproject_dir(str): Path to the git repo of the project.\nbugtracker_url(str): Url to the bug tracker for the issues.\n\nReturns:\nNone\n\nRaises:\nRuntimeError: If the release notes could not be retrieved", "source": "juraj-google-style"}
{"code": "def post_create_app(cls, app, **settings):\n    register_errorhandler = settings.pop('register_errorhandler', True)\n    if register_errorhandler:\n        AppException.register_errorhandler(app)\n    return app", "docstring": "Register the errorhandler for the AppException to the passed in\nApp.\n\nArgs:\napp (fleaker.base.BaseApplication): A Flask application that\nextends the Fleaker Base Application, such that the hooks are\nimplemented.\n\nKwargs:\nregister_errorhandler (bool): A boolean indicating if we want to\nautomatically register an errorhandler for the\n:class:`AppException` exception class after we create this App.\nPass ``False`` to prevent registration. Default is ``True``.\n\n\nReturns:\nfleaker.base.BaseApplication: Returns the app it was given.", "source": "codesearchnet"}
{"code": "def _AggregatedGrads(grads, op, gradient_uid, loop_state, aggregation_method=None):\n    if aggregation_method is None:\n        aggregation_method = AggregationMethod.DEFAULT\n    valid_aggregation_methods = [AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE, AggregationMethod.EXPERIMENTAL_ACCUMULATE_N]\n    if aggregation_method not in valid_aggregation_methods:\n        raise ValueError(f'Invalid `aggregation_method` specified {aggregation_method}. Accepted values are {valid_aggregation_methods}.')\n    out_grads = _GetGrads(grads, op)\n    for i, out_grad in enumerate(out_grads):\n        if loop_state:\n            if isinstance(out_grad, (tensor_lib.Tensor, indexed_slices.IndexedSlices)):\n                assert control_flow_util.IsLoopSwitch(op)\n                continue\n        if isinstance(out_grad, collections_abc.Sequence) and (not all((isinstance(g, (tensor_lib.Tensor, indexed_slices.IndexedSlices)) for g in out_grad if g is not None))):\n            raise TypeError(f'Invalid gradient {out_grad} [index = {i}]. Gradients have to be either all Tensors or all IndexedSlices')\n        if out_grad:\n            if len(out_grad) < 2:\n                used = 'nop'\n                out_grads[i] = out_grad[0]\n            elif all((isinstance(g, tensor_lib.Tensor) for g in out_grad if g is not None)):\n                tensor_shape = _AccumulatorShape(out_grad)\n                if aggregation_method in [AggregationMethod.EXPERIMENTAL_TREE, AggregationMethod.EXPERIMENTAL_ACCUMULATE_N]:\n                    used = 'tree'\n                    with ops.name_scope(op.name + '_gradient_sum'):\n                        running_sum = out_grad[0]\n                        for grad in out_grad[1:]:\n                            running_sum = math_ops.add_n([running_sum, grad])\n                        out_grads[i] = running_sum\n                else:\n                    used = 'add_n'\n                    out_grads[i] = _MultiDeviceAddN(out_grad, gradient_uid)\n                logging.vlog(2, '  _AggregatedGrads %d x %s using %s', len(out_grad), tensor_shape, used)\n            else:\n                out_grads[i] = backprop_util.AggregateIndexedSlicesGradients(out_grad)\n        else:\n            out_grads[i] = None\n    return out_grads", "docstring": "Get the aggregated gradients for op.\n\nArgs:\ngrads: The map of memoized gradients.\nop: The op to get gradients for.\ngradient_uid: A unique identifier within the graph indicating\nwhich invocation of gradients is being executed. Used to cluster\nops for compilation.\nloop_state: An object for maintaining the state of the while loops in the\ngraph. It is of type ControlFlowState. None if the graph\ncontains no while loops.\naggregation_method: Specifies the method used to combine gradient terms.\nAccepted values are constants defined in the class `AggregationMethod`.\n\nReturns:\nA list of gradients, one per each output of `op`. If the gradients\nfor a particular output is a list, this function aggregates it\nbefore returning.\n\nRaises:\nTypeError: if the incoming grads are not Tensors or IndexedSlices.\nValueError: if the arguments are invalid.", "source": "github-repos"}
{"code": "def url(self, endpoint=''):\n        \n        if not endpoint.startswith('/'):\n            endpoint = \"/\" + endpoint\n        return self.protocol + \":", "docstring": "Get the base URL of the Remote.\n\nArguments:\nNone\nReturns:\n`str` base URL", "source": "juraj-google-style"}
{"code": "def _sort_course_modes(self, modes):\n        \n        def slug_weight(mode):\n            \n            sorting_slugs = COURSE_MODE_SORT_ORDER\n            sorting_slugs_size = len(sorting_slugs)\n            if mode['slug'] in sorting_slugs:\n                return sorting_slugs_size - sorting_slugs.index(mode['slug'])\n            return 0\n        \n        return sorted(modes, key=slug_weight, reverse=True)", "docstring": "Sort the course mode dictionaries by slug according to the COURSE_MODE_SORT_ORDER constant.\n\nArguments:\nmodes (list): A list of course mode dictionaries.\nReturns:\nlist: A list with the course modes dictionaries sorted by slug.", "source": "juraj-google-style"}
{"code": "def sg_reuse(tensor, **opt):\n    opt = tf.sg_opt(opt)\n    assert hasattr(tensor, '_sugar'), 'cannot reuse this node.'\n    assert (opt.input is not None), 'input is mandatory.'\n    (nodes, prev) = ([tensor], tensor._sugar.prev)\n    while (prev is not None):\n        nodes = ([prev] + nodes)\n        prev = (prev._sugar.prev if hasattr(prev, '_sugar') else None)\n    out = opt.input\n    for node in nodes[1:]:\n        if node._sugar.is_layer:\n            fn = tf.sg_layer_func(node._sugar.func)\n            if node._sugar.arg.scope_name:\n                with tf.variable_scope(node._sugar.arg.scope_name):\n                    out = fn(out, **(node._sugar.arg + tf.sg_opt(name=node._sugar.name, reuse=True)))\n            else:\n                out = fn(out, **(node._sugar.arg + tf.sg_opt(name=node._sugar.name, reuse=True)))\n        else:\n            out = node._sugar.func(out, node._sugar.arg)\n    return out", "docstring": "r\"\"\" Reconstruct computational graph of `tensor` so all the parameters\ncan be reused and replace its input tensor with `opt.input`.\n\nArgs:\ntensor: A `Tensor` (automatically given by chaining).\n**opt:\ninput: A `Tensor` that will replace the original input tensor.\n\nReturns:\nReconstructed tensor nodes.", "source": "codesearchnet"}
{"code": "def _GetExpectedFractionalMaxPoolResult(self, input_tensor, row_seq, col_seq, overlapping):\n    input_shape = input_tensor.shape\n    output_shape = (input_shape[0], len(row_seq) - 1, len(col_seq) - 1, input_shape[3])\n    output_tensor = np.zeros(shape=output_shape, dtype=input_tensor.dtype)\n    for batch in range(input_shape[0]):\n        for channel in range(input_shape[3]):\n            two_dim_slice = input_tensor[batch, :, :, channel]\n            tmp = self._MaxPoolAlongRows(two_dim_slice, row_seq, overlapping)\n            output_tensor[batch, :, :, channel] = self._MaxPoolAlongCols(tmp, col_seq, overlapping)\n    return output_tensor", "docstring": "Get expected fractional max pool result.\n\nrow_seq and col_seq together defines the fractional pooling region.\n\nArgs:\ninput_tensor: Original input tensor, assuming it is a 4-D tensor, with\ndimension as [batch, height/row, width/column, channels/depth].\nrow_seq: Cumulative pooling sequence along row.\ncol_seq: Cumulative pooling sequence along column.\noverlapping: Use overlapping when doing pooling.\n\nReturns:\nA 4-D tensor that is the result of max pooling on input_tensor based on\npooling region defined by row_seq and col_seq, conditioned on whether or\nnot overlapping is used.", "source": "github-repos"}
{"code": "def authenticate(self):\n        \n        basic_auth = request.authorization\n        is_valid = False\n        user = None\n        if basic_auth:\n            is_valid, user = self.check_basic_auth(\n                basic_auth.username, basic_auth.password\n            )\n        else:  \n            token = request.headers.get('Authorization', None)\n            param_token = request.args.get('access_token')\n            if token or param_token:\n                if token:\n                    \n                    \n                    token = token[6:]\n                else:\n                    \n                    token = param_token\n                log.debug('Received token: %s', token)\n\n                is_valid, user = self.check_token_auth(token)\n        return (is_valid, user)", "docstring": "Authenticate user by any means and return either true or false.\n\nArgs:\n\nReturns:\ntuple (is_valid, username): True is valid user, False if not", "source": "juraj-google-style"}
{"code": "def __init__(self, name: str, object_id: int, timestamp: int, pid: int, allocator: str, num_bytes: int) -> None:\n    self._name = name\n    self._pid = pid\n    self._object_id = object_id\n    self._create_time = timestamp\n    self._allocator = allocator\n    self._num_bytes = num_bytes\n    self._ref_times = []\n    self._unref_times = []", "docstring": "Creates an object to track tensor references.\n\nThis class is not thread safe and is intended only for internal use by\nthe 'Timeline' class in this file.\n\nArgs:\nname:  The name of the Tensor as a string.\nobject_id:  Chrome Trace object identifier assigned for this Tensor.\ntimestamp:  The creation timestamp of this event as a long integer.\npid:  Process identifier of the associated device, as an integer.\nallocator:  Name of the allocator used to create the Tensor.\nnum_bytes:  Number of bytes allocated (long integer).\n\nReturns:\nA 'TensorTracker' object.", "source": "github-repos"}
{"code": "def get_broadcast_shape(*tensors):\n  \n  \n  s_shape = tensors[0].shape\n  for t in tensors[1:]:\n    s_shape = tf.broadcast_static_shape(s_shape, t.shape)\n  if tensorshape_util.is_fully_defined(s_shape):\n    return tensorshape_util.as_list(s_shape)\n\n  \n  d_shape = tf.shape(input=tensors[0])\n  for t in tensors[1:]:\n    d_shape = tf.broadcast_dynamic_shape(d_shape, tf.shape(input=t))\n  return d_shape", "docstring": "Get broadcast shape as a Python list of integers (preferred) or `Tensor`.\n\nArgs:\n*tensors:  One or more `Tensor` objects (already converted!).\n\nReturns:\nbroadcast shape:  Python list (if shapes determined statically), otherwise\nan `int32` `Tensor`.", "source": "juraj-google-style"}
{"code": "def get_timestamp(self, cycle=None, dataset_number=None,\n                      in_minutes=False, full=True):\n        \n\n        dataset_number = self._validate_dataset_number(dataset_number)\n        if dataset_number is None:\n            self._report_empty_dataset()\n            return\n        cycle_index_header = self.headers_normal.cycle_index_txt\n        timestamp_header = self.headers_normal.test_time_txt\n\n        v = pd.Series()\n        test = self.datasets[dataset_number].dfdata\n        if cycle:\n            c = test[(test[cycle_index_header] == cycle)]\n            if not self.is_empty(c):\n                v = c[timestamp_header]\n\n        else:\n            if not full:\n                self.logger.debug(\"getting timestapm for all cycles\")\n                v = []\n                no_cycles = np.amax(test[cycle_index_header])\n                for j in range(1, no_cycles + 1):\n                    txt = \"Cycle  %i:  \" % j\n                    self.logger.debug(txt)\n                    c = test[(test[cycle_index_header] == j)]\n                    v.append(c[timestamp_header])\n            else:\n                self.logger.debug(\"returning full timestamp col\")\n                v = test[timestamp_header]\n                if in_minutes and v is not None:\n                    v /= 60.0\n        if in_minutes and v is not None:\n            v /= 60.0\n        return v", "docstring": "Returns timestamps (in sec or minutes (if in_minutes==True)).\n\nArgs:\ncycle: cycle number (all if None)\ndataset_number: first dataset if None\nin_minutes: return values in minutes instead of seconds if True\nfull: valid only for cycle=None (i.e. all cycles), returns the full\npandas.Series if True, else a list of pandas.Series\n\nReturns:\npandas.Series (or list of pandas.Series if cycle=None og full=False)", "source": "juraj-google-style"}
{"code": "def __init__(self, **kwargs):\n        \n        try:\n            arguments = Adapter(CollectorStage.schema_complete().validate(kwargs))\n            self.stage = arguments.stage\n            self.status = arguments.status\n            self.events = arguments.events\n        except SchemaError as exception:\n            Logger.get_logger(__name__).error(exception)\n            raise RuntimeError(str(exception))", "docstring": "Initializing and validating fields.\n\nArgs:\nkwargs (dict): application command line options.\n\nRaises:\nRuntimeError: when validation of parameters has failed.", "source": "juraj-google-style"}
{"code": "def dotcase(text, acronyms=None):\n    \n    words, _case, _sep = case_parse.parse_case(text, acronyms)\n    return '.'.join([w.lower() for w in words])", "docstring": "Return text in dot.case style.\n\nArgs:\ntext: input string to convert case\ndetect_acronyms: should attempt to detect acronyms\nacronyms: a list of acronyms to detect\n\n>>> dotcase(\"hello world\")\n'hello.world'\n>>> dotcase(\"helloHTMLWorld\", True, [\"HTML\"])\n'hello.html.world'", "source": "juraj-google-style"}
{"code": "def get_aws_session(account):\n    \n    from cloud_inquisitor.config import dbconfig\n    from cloud_inquisitor.plugins.types.accounts import AWSAccount\n\n    if not isinstance(account, AWSAccount):\n        raise InquisitorError('Non AWSAccount passed to get_aws_session, got {}'.format(account.__class__.__name__))\n\n    \n    session = get_local_aws_session()\n    if session.get_credentials().method in ['iam-role', 'env', 'explicit']:\n        sts = session.client('sts')\n    else:\n        \n        \n        temp_sts = session.client('sts')\n\n        audit_sts_role = temp_sts.assume_role(\n            RoleArn=app_config.aws_api.instance_role_arn,\n            RoleSessionName='inquisitor'\n        )\n        sts = boto3.session.Session(\n            audit_sts_role['Credentials']['AccessKeyId'],\n            audit_sts_role['Credentials']['SecretAccessKey'],\n            audit_sts_role['Credentials']['SessionToken']\n        ).client('sts')\n\n    role = sts.assume_role(\n        RoleArn='arn:aws:iam::{}:role/{}'.format(\n            account.account_number,\n            dbconfig.get('role_name', default='cinq_role')\n        ),\n        RoleSessionName='inquisitor'\n    )\n\n    sess = boto3.session.Session(\n        role['Credentials']['AccessKeyId'],\n        role['Credentials']['SecretAccessKey'],\n        role['Credentials']['SessionToken']\n    )\n\n    return sess", "docstring": "Function to return a boto3 Session based on the account passed in the first argument.\n\nArgs:\naccount (:obj:`Account`): Account to create the session object for\n\nReturns:\n:obj:`boto3:boto3.session.Session`", "source": "juraj-google-style"}
{"code": "def resnet50(pretrained=False, **kwargs):\n    \n    model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n    if pretrained:\n        model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n    return model", "docstring": "Constructs a ResNet-50 model.\n\nArgs:\npretrained (bool): If True, returns a model pre-trained on ImageNet", "source": "juraj-google-style"}
{"code": "def reverse_transform(self, column):\n        \n        self.check_data_type()\n\n        return pd.DataFrame({self.col_name: np.log(column[self.col_name])})", "docstring": "Applies the natural logarithm function to turn positive values into real ranged values.\n\nArgs:\ncolumn (pandas.DataFrame): Data to transform.\n\nReturns:\npd.DataFrame", "source": "juraj-google-style"}
{"code": "def _get_table_names(statement):\n    parts = statement.to_unicode().split()\n    tables = set()\n    for (i, token) in enumerate(parts):\n        if ((token.lower() == 'from') or token.lower().endswith('join')):\n            tables.add(parts[(i + 1)].rstrip(';'))\n    return list(tables)", "docstring": "Returns table names found in the query.\n\nNOTE. This routine would use the sqlparse parse tree, but vnames don't parse very well.\n\nArgs:\nstatement (sqlparse.sql.Statement): parsed by sqlparse sql statement.\n\nReturns:\nlist of str", "source": "codesearchnet"}
{"code": "def setup_model(x, y, model_type='random_forest', seed=None, **kwargs):\n    assert ((len(x) > 1) and (len(y) > 1)), 'Not enough data objects to train on (minimum is at least two, you have (x: {0}) and (y: {1}))'.format(len(x), len(y))\n    sets = namedtuple('Datasets', ['train', 'test'])\n    (x_train, x_test, y_train, y_test) = train_test_split(x, y, random_state=seed, shuffle=False)\n    x = sets(x_train, x_test)\n    y = sets(y_train, y_test)\n    if ((model_type == 'random_forest') or (model_type == 'rf')):\n        model = rf.RandomForest(x, y, random_state=seed, **kwargs)\n    elif ((model_type == 'deep_neural_network') or (model_type == 'dnn')):\n        model = dnn.DeepNeuralNetwork(x, y, **kwargs)\n    else:\n        raise ValueError('Invalid model type kwarg')\n    return model", "docstring": "Initializes a machine learning model\n\nArgs:\nx: Pandas DataFrame, X axis of features\ny: Pandas Series, Y axis of targets\nmodel_type: Machine Learning model to use\nValid values: 'random_forest'\nseed: Random state to use when splitting sets and creating the model\n**kwargs: Scikit Learn's RandomForestClassifier kwargs\n\nReturns:\nTrained model instance of model_type", "source": "codesearchnet"}
{"code": "def emit(self, value):\n    \n    if not self._tstate.output_writer:\n      logging.error(\"emit is called, but no output writer is set.\")\n      return\n    self._tstate.output_writer.write(value)", "docstring": "Emits a value to output writer.\n\nArgs:\nvalue: a value of type expected by the output writer.", "source": "juraj-google-style"}
{"code": "def _set_read_only_resource_inputs_attr(op, branch_graphs):\n    read_only_indices = set(range(len(op.inputs) - 1))\n    for branch_graph in branch_graphs:\n        assert len(branch_graph.inputs) == len(op.inputs) - 1, 'should never happen'\n        if not read_only_indices:\n            break\n        branch_read_only_indices = acd.get_read_only_resource_input_indices_graph(branch_graph)\n        read_only_indices = read_only_indices.intersection(branch_read_only_indices)\n    read_only_indices = [i + 1 for i in read_only_indices]\n    ops.set_int_list_attr(op, acd.READ_ONLY_RESOURCE_INPUTS_ATTR, sorted(read_only_indices))", "docstring": "Sets the list of resource inputs which are read-only.\n\nThis is used by AutomaticControlDependencies.\n\nArgs:\nop: If or Case Operation.\nbranch_graphs: List of branch FuncGraphs.", "source": "github-repos"}
{"code": "def get_javascript_error(self, return_type='string'):\n    if BROME_CONFIG['proxy_driver']['intercept_javascript_error']:\n        js_errors = self._driver.execute_script('return window.jsErrors; window.jsErrors = [];')\n        if (not js_errors):\n            js_errors = []\n        if (return_type == 'list'):\n            if len(js_errors):\n                return js_errors\n            else:\n                return []\n        elif len(js_errors):\n            return os.linesep.join(js_errors)\n        else:\n            return self.no_javascript_error_string\n    elif (return_type == 'list'):\n        return []\n    else:\n        return self.no_javascript_error_string", "docstring": "Return the gathered javascript error\n\nArgs:\nreturn_type: 'string' | 'list'; default: 'string'", "source": "codesearchnet"}
{"code": "def inflate_nd_checker(identifier, definition):\n    if isinstance(definition, bool):\n        return Checker(name=identifier, passes=definition)\n    elif isinstance(definition, dict):\n        return Checker(definition.pop('name', identifier), **definition)\n    else:\n        raise ValueError(('%s type is not supported for no-data checkers, use bool or dict' % type(definition)))", "docstring": "Inflate a no-data checker from a basic definition.\n\nArgs:\nidentifier (str): the no-data checker identifier / name.\ndefinition (bool/dict): a boolean acting as \"passes\" or a full\ndict definition with \"passes\" and \"allow_failure\".\n\nReturns:\nChecker: a checker instance.\n\nRaises:\nValueError: when the definition type is not bool or dict.", "source": "codesearchnet"}
{"code": "def get_all_threads(self, expand=False):\n    if (not expand):\n        return self._request_threads(self._url.catalog())\n    thread_ids = self.get_all_thread_ids()\n    threads = [self.get_thread(id, raise_404=False) for id in thread_ids]\n    return filter(None, threads)", "docstring": "Return every thread on this board.\n\nIf not expanded, result is same as get_threads run across all board pages,\nwith last 3-5 replies included.\n\nUses the catalog when not expanding, and uses the flat thread ID listing\nat /{board}/threads.json when expanding for more efficient resource usage.\n\nIf expanded, all data of all threads is returned with no omitted posts.\n\nArgs:\nexpand (bool): Whether to download every single post of every thread.\nIf enabled, this option can be very slow and bandwidth-intensive.\n\nReturns:\nlist of :mod:`basc_py4chan.Thread`: List of Thread objects representing every thread on this board.", "source": "codesearchnet"}
{"code": "def init(self, force_deploy=False, client=None):\n        \n        _force_deploy = self.provider_conf.force_deploy\n        self.provider_conf.force_deploy = _force_deploy or force_deploy\n        self._provider_conf = self.provider_conf.to_dict()\n        r = api.Resources(self._provider_conf, client=client)\n        r.launch()\n        roles = r.get_roles()\n        networks = r.get_networks()\n\n        return (_to_enos_roles(roles),\n                _to_enos_networks(networks))", "docstring": "Reserve and deploys the nodes according to the resources section\n\nIn comparison to the vagrant provider, networks must be characterized\nas in the networks key.\n\nArgs:\nforce_deploy (bool): True iff the environment must be redeployed\nRaises:\nMissingNetworkError: If one network is missing in comparison to\nwhat is claimed.\nNotEnoughNodesError: If the `min` constraints can't be met.", "source": "juraj-google-style"}
{"code": "def identity(self):\n    return self._implementation.identity()", "docstring": "Returns a TensorArray with the same content and properties.\n\nReturns:\nA new TensorArray object with flow that ensures the control dependencies\nfrom the contexts will become control dependencies for writes, reads, etc.\nUse this object for all subsequent operations.", "source": "github-repos"}
{"code": "def __init__(self, name, description, *labels):\n    super(IntGauge, self).__init__('IntGauge', _int_gauge_methods, len(labels), name, description, *labels)", "docstring": "Creates a new IntGauge.\n\nArgs:\nname: name of the new metric.\ndescription: description of the new metric.\n*labels: The label list of the new metric.", "source": "github-repos"}
{"code": "def _ExtractRequestSummaryFields(document):\n    headers = document.childAtPath('Header/RequestHeader')\n    body = document.childAtPath('Body')\n    summary_fields = {'methodName': body.getChildren()[0].name}\n    client_customer_id = headers.getChild('clientCustomerId')\n    if (client_customer_id is not None):\n        summary_fields['clientCustomerId'] = client_customer_id.text\n    network_code = headers.getChild('networkCode')\n    if (network_code is not None):\n        summary_fields['networkCode'] = network_code.text\n    return summary_fields", "docstring": "Extract logging fields from the request's suds.sax.element.Element.\n\nArgs:\ndocument: A suds.sax.element.Element instance containing the API request.\n\nReturns:\nA dict mapping logging field names to their corresponding value.", "source": "codesearchnet"}
{"code": "def remove_node(self, node):\n    if (node not in self.node_list):\n        return\n    self.node_list.remove(node)\n    for n in self.node_list:\n        n.link_list = [link for link in n.link_list if (link.target != node)]", "docstring": "Remove a node from ``self.node_list`` and links pointing to it.\n\nIf ``node`` is not in the graph, do nothing.\n\nArgs:\nnode (Node): The node to be removed\n\nReturns: None\n\nExample:\n>>> from blur.markov.node import Node\n>>> node_1 = Node('One')\n>>> graph = Graph([node_1])\n>>> graph.remove_node(node_1)\n>>> len(graph.node_list)\n0", "source": "codesearchnet"}
{"code": "def _prefix_from_prefix_int(self, prefixlen):\n    if (not isinstance(prefixlen, (int, long))):\n        raise NetmaskValueError(('%r is not an integer' % prefixlen))\n    prefixlen = int(prefixlen)\n    if (not (0 <= prefixlen <= self._max_prefixlen)):\n        raise NetmaskValueError(('%d is not a valid prefix length' % prefixlen))\n    return prefixlen", "docstring": "Validate and return a prefix length integer.\n\nArgs:\nprefixlen: An integer containing the prefix length.\n\nReturns:\nThe input, possibly converted from long to int.\n\nRaises:\nNetmaskValueError: If the input is not an integer, or out of range.", "source": "codesearchnet"}
{"code": "def matches(self, other, **kwargs):\n        \n        from pymatgen.analysis.structure_matcher import StructureMatcher\n        m = StructureMatcher(**kwargs)\n        return m.fit(Structure.from_sites(self), Structure.from_sites(other))", "docstring": "Check whether this structure is similar to another structure.\nBasically a convenience method to call structure matching fitting.\n\nArgs:\nother (IStructure/Structure): Another structure.\n**kwargs: Same **kwargs as in\n:class:`pymatgen.analysis.structure_matcher.StructureMatcher`.\n\nReturns:\n(bool) True is the structures are similar under some affine\ntransformation.", "source": "juraj-google-style"}
{"code": "def are_equal_elements(a_el, b_el):\n    \n    if a_el.tagName != b_el.tagName:\n        return False\n    if sorted(a_el.attributes.items()) != sorted(b_el.attributes.items()):\n        return False\n    if len(a_el.childNodes) != len(b_el.childNodes):\n        return False\n    for a_child_el, b_child_el in zip(a_el.childNodes, b_el.childNodes):\n        if a_child_el.nodeType != b_child_el.nodeType:\n            return False\n        if (\n            a_child_el.nodeType == a_child_el.TEXT_NODE\n            and a_child_el.data != b_child_el.data\n        ):\n            return False\n        if a_child_el.nodeType == a_child_el.ELEMENT_NODE and not are_equal_elements(\n            a_child_el, b_child_el\n        ):\n            return False\n    return True", "docstring": "Normalize and compare ElementTrees for equality.\n\nArgs:\na_el: ElementTree\nb_el: ElementTree\nElementTrees to compare for equality.\n\nReturns:\nbool: ``True`` if the ElementTrees are semantically equivalent.", "source": "juraj-google-style"}
{"code": "def build_constraint(cls, fhir_path_expression: str, key: str='key-1', severity: codes_pb2.ConstraintSeverityCode.Value=codes_pb2.ConstraintSeverityCode.ERROR) -> datatypes_pb2.ElementDefinition.Constraint:\n    return datatypes_pb2.ElementDefinition.Constraint(key=datatypes_pb2.Id(value=key), expression=datatypes_pb2.String(value=fhir_path_expression), severity=datatypes_pb2.ElementDefinition.Constraint.SeverityCode(value=severity))", "docstring": "Returns an `ElementDefinition.Constraint` for a FHIRPath expression.\n\nArgs:\nfhir_path_expression: The raw FHIRPath expression.\nkey: The FHIRPath constraint unique identifier. Defaults to 'key-1'.\nseverity: The constraint severity.  Defaults to ERROR.\n\nReturns:\nAn instance of `ElementDefinition.Constraint` capturing the raw underlying\n`fhir_path_expression`.", "source": "github-repos"}
{"code": "def _do_pass(self, pass_, dag, options):\n    if (not options['ignore_requires']):\n        for required_pass in pass_.requires:\n            dag = self._do_pass(required_pass, dag, options)\n    if (pass_ not in self.valid_passes):\n        if pass_.is_transformation_pass:\n            pass_.property_set = self.fenced_property_set\n            new_dag = pass_.run(dag)\n            if (not isinstance(new_dag, DAGCircuit)):\n                raise TranspilerError(('Transformation passes should return a transformed dag.The pass %s is returning a %s' % (type(pass_).__name__, type(new_dag))))\n            dag = new_dag\n        elif pass_.is_analysis_pass:\n            pass_.property_set = self.property_set\n            pass_.run(FencedDAGCircuit(dag))\n        else:\n            raise TranspilerError('I dont know how to handle this type of pass')\n        self._update_valid_passes(pass_, options['ignore_preserves'])\n    return dag", "docstring": "Do a pass and its \"requires\".\n\nArgs:\npass_ (BasePass): Pass to do.\ndag (DAGCircuit): The dag on which the pass is ran.\noptions (dict): PassManager options.\nReturns:\nDAGCircuit: The transformed dag in case of a transformation pass.\nThe same input dag in case of an analysis pass.\nRaises:\nTranspilerError: If the pass is not a proper pass instance.", "source": "codesearchnet"}
{"code": "def _query(self, key_pos: int, src: Any, use_inferred: bool) -> Any:\n    if key_pos == len(self._keys):\n        return src\n    key = self.keys[key_pos]\n    if hasattr(src, 'sym_hasattr'):\n        if src.sym_hasattr(key):\n            if use_inferred:\n                v = src.sym_inferred(key)\n            else:\n                v = src.sym_getattr(key)\n            return self._query(key_pos + 1, v, use_inferred)\n    elif hasattr(src, '__getitem__'):\n        if isinstance(key, int):\n            if not hasattr(src, '__len__'):\n                raise KeyError(f\"Cannot query index ({key}) on object ({src!r}): '__len__' does not exist.\")\n            if key < len(src):\n                return self._query(key_pos + 1, src[key], use_inferred)\n        else:\n            if not hasattr(src, '__contains__'):\n                raise KeyError(f\"Cannot query key ({key!r}) on object ({src!r}): '__contains__' does not exist.\")\n            if key in src:\n                return self._query(key_pos + 1, src[key], use_inferred)\n    else:\n        raise KeyError(f\"Cannot query sub-key {key!r} of object ({src!r}): '__getitem__' does not exist. (path={KeyPath(self.keys[:key_pos])})\")\n    raise KeyError(f'Path {KeyPath(self._keys[:key_pos + 1])!r} does not exist: key {key!r} is absent from innermost value {src!r}.')", "docstring": "Query the value of current path up to key_pos from an object.\n\nArgs:\nkey_pos: Start position in self._keys.\nsrc: Source value to query.\nuse_inferred: If True, infer `pg.Inferential` values. Otherwise returns\ntheir symbolic form. Applicable only for symbolic values.\n\nReturns:\nValue from src if path exists.\n\nRaises:\nKeyError: Path doesn't exist in src.", "source": "github-repos"}
{"code": "def start(self, request: Request) -> Response:\n        \n        if self._session_state != SessionState.ready:\n            raise RuntimeError('Session already started')\n\n        assert not self._request\n        self._request = request\n        _logger.debug(__('Client fetch request {0}.', request))\n\n        connection = yield from self._acquire_request_connection(request)\n        full_url = connection.proxied and not connection.tunneled\n\n        self._stream = stream = self._stream_factory(connection)\n\n        yield from self._stream.reconnect()\n\n        request.address = connection.address\n\n        self.event_dispatcher.notify(self.Event.begin_request, request)\n        write_callback = functools.partial(self.event_dispatcher.notify, self.Event.request_data)\n        stream.data_event_dispatcher.add_write_listener(write_callback)\n\n        yield from stream.write_request(request, full_url=full_url)\n\n        if request.body:\n            assert 'Content-Length' in request.fields\n            length = int(request.fields['Content-Length'])\n            yield from stream.write_body(request.body, length=length)\n\n        stream.data_event_dispatcher.remove_write_listener(write_callback)\n        self.event_dispatcher.notify(self.Event.end_request, request)\n\n        read_callback = functools.partial(self.event_dispatcher.notify, self.Event.response_data)\n        stream.data_event_dispatcher.add_read_listener(read_callback)\n\n        self._response = response = yield from stream.read_response()\n        response.request = request\n\n        self.event_dispatcher.notify(self.Event.begin_response, response)\n\n        self._session_state = SessionState.request_sent\n\n        return response", "docstring": "Begin a HTTP request\n\nArgs:\nrequest: Request information.\n\nReturns:\nA response populated with the HTTP headers.\n\nOnce the headers are received, call :meth:`download`.\n\nCoroutine.", "source": "juraj-google-style"}
{"code": "def true_events(network, previous_state, current_state, next_state, indices=None, major_complex=None):\n    if major_complex:\n        nodes = major_complex.subsystem.node_indices\n    elif indices:\n        nodes = indices\n    else:\n        major_complex = compute.major_complex(network, current_state)\n        nodes = major_complex.subsystem.node_indices\n    return events(network, previous_state, current_state, next_state, nodes)", "docstring": "Return all mechanisms that have true causes and true effects within the\ncomplex.\n\nArgs:\nnetwork (Network): The network to analyze.\nprevious_state (tuple[int]): The state of the network at ``t - 1``.\ncurrent_state (tuple[int]): The state of the network at ``t``.\nnext_state (tuple[int]): The state of the network at ``t + 1``.\n\nKeyword Args:\nindices (tuple[int]): The indices of the major complex.\nmajor_complex (AcSystemIrreducibilityAnalysis): The major complex. If\n``major_complex`` is given then ``indices`` is ignored.\n\nReturns:\ntuple[Event]: List of true events in the major complex.", "source": "codesearchnet"}
{"code": "def Search(self, artifact, os_name=None, cpe=None, label=None):\n    hit = (lambda x: ((x[0] == x[1]) or (not x[0])))\n    seq = [(artifact, self.artifact), (os_name, self.os_name), (cpe, self.cpe), (label, self.label)]\n    return all(map(hit, seq))", "docstring": "Whether the condition contains the specified values.\n\nArgs:\nartifact: A string identifier for the artifact.\nos_name: An OS string.\ncpe: A CPE string.\nlabel: A label string.\n\nReturns:\nTrue if the values match the non-empty query attributes.\nEmpty query attributes are ignored in the comparison.", "source": "codesearchnet"}
{"code": "def __init__(self, parent, isolated=True, function_name=None):\n    self.parent = parent\n    self.isolated = isolated\n    self.function_name = function_name\n    self.isolated_names = set()\n    self.read = set()\n    self.modified = set()\n    self.deleted = set()\n    self.bound = set()\n    self.globals = set()\n    self.nonlocals = set()\n    self.annotations = set()\n    self.params = weakref.WeakValueDictionary()\n    self.is_final = False", "docstring": "Create a new scope.\n\nArgs:\nparent: A Scope or None.\nisolated: Whether the scope is isolated, that is, whether variables\nmodified in this scope should be considered modified in the parent\nscope.\nfunction_name: Name of the function owning this scope.", "source": "github-repos"}
{"code": "def _is_tensor(x):\n    return isinstance(x, (tensor_lib.Tensor, variables.Variable))", "docstring": "Returns `True` if `x` is a symbolic tensor-like object.\n\nArgs:\nx: A python object to check.\n\nReturns:\n`True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`.", "source": "github-repos"}
{"code": "def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple[int, int]]]=None) -> 'torch.Tensor':\n    class_queries_logits = outputs.class_queries_logits\n    masks_queries_logits = outputs.masks_queries_logits\n    masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]\n    masks_probs = masks_queries_logits.sigmoid()\n    segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs)\n    batch_size = class_queries_logits.shape[0]\n    if target_sizes is not None:\n        if batch_size != len(target_sizes):\n            raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n        semantic_segmentation = []\n        for idx in range(batch_size):\n            resized_logits = torch.nn.functional.interpolate(segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)\n            semantic_map = resized_logits[0].argmax(dim=0)\n            semantic_segmentation.append(semantic_map)\n    else:\n        semantic_segmentation = segmentation.argmax(dim=1)\n        semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]\n    return semantic_segmentation", "docstring": "Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports\nPyTorch.\n\nArgs:\noutputs ([`MaskFormerForInstanceSegmentation`]):\nRaw outputs of the model.\ntarget_sizes (`List[Tuple[int, int]]`, *optional*):\nList of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested\nfinal size (height, width) of each prediction. If left to None, predictions will not be resized.\nReturns:\n`List[torch.Tensor]`:\nA list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)\ncorresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each\n`torch.Tensor` correspond to a semantic class id.", "source": "github-repos"}
{"code": "def dew_point_temperature(self, value=99.9):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `dew_point_temperature`'.format(value))\n            if value <= -70.0:\n                raise ValueError('value need to be greater -70.0 '\n                                 'for field `dew_point_temperature`')\n            if value >= 70.0:\n                raise ValueError('value need to be smaller 70.0 '\n                                 'for field `dew_point_temperature`')\n\n        self._dew_point_temperature = value", "docstring": "Corresponds to IDD Field `dew_point_temperature`\n\nArgs:\nvalue (float): value for IDD Field `dew_point_temperature`\nUnit: C\nvalue > -70.0\nvalue < 70.0\nMissing value: 99.9\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "async def enqueue(self, query, queue_index=None, stop_current=False, shuffle=False):\n    if ((query is None) or (query == '')):\n        return\n    self.statuslog.info('Parsing {}'.format(query))\n    self.logger.debug('Enqueueing from query')\n    indexnum = None\n    if (queue_index is not None):\n        try:\n            indexnum = (int(queue_index) - 1)\n        except TypeError:\n            self.statuslog.error('Play index argument must be a number')\n            return\n        except ValueError:\n            self.statuslog.error('Play index argument must be a number')\n            return\n    if (not self.vready):\n        self.parse_query(query, indexnum, stop_current, shuffle)\n    else:\n        parse_thread = threading.Thread(target=self.parse_query, args=[query, indexnum, stop_current, shuffle])\n        parse_thread.start()", "docstring": "Queues songs based on either a YouTube search or a link\n\nArgs:\nquery (str): Either a search term or a link\nqueue_index (str): The queue index to enqueue at (None for end)\nstop_current (bool): Whether to stop the current song after the songs are queued\nshuffle (bool): Whether to shuffle the added songs", "source": "codesearchnet"}
{"code": "def get_clusters_interfaces(clusters, extra_cond=(lambda nic: True)):\n    interfaces = {}\n    for cluster in clusters:\n        nics = get_cluster_interfaces(cluster, extra_cond=extra_cond)\n        interfaces.setdefault(cluster, nics)\n    return interfaces", "docstring": "Returns for each cluster the available cluster interfaces\n\nArgs:\nclusters (str): list of the clusters\nextra_cond (lambda): extra predicate to filter network card retrieved\nfrom the API. E.g lambda nic: not nic['mounted'] will retrieve all the\nusable network cards that are not mounted by default.\n\nReturns:\ndict of cluster with their associated nic names\n\nExamples:\n.. code-block:: python\n\n# pseudo code\nactual = get_clusters_interfaces([\"paravance\"])\nexpected = {\"paravance\": [\"eth0\", \"eth1\"]}\nassertDictEquals(expected, actual)", "source": "codesearchnet"}
{"code": "def visualize_instance_html(self, exp, label, div_name, exp_object_name,\n                                text=True, opacity=True):\n        \n        if not text:\n            return u''\n        text = (self.indexed_string.raw_string()\n                .encode('utf-8', 'xmlcharrefreplace').decode('utf-8'))\n        text = re.sub(r'[<>&]', '|', text)\n        exp = [(self.indexed_string.word(x[0]),\n                self.indexed_string.string_position(x[0]),\n                x[1]) for x in exp]\n        all_occurrences = list(itertools.chain.from_iterable(\n            [itertools.product([x[0]], x[1], [x[2]]) for x in exp]))\n        all_occurrences = [(x[0], int(x[1]), x[2]) for x in all_occurrences]\n        ret =  % (exp_object_name, json.dumps(all_occurrences), label,\n                   json.dumps(text), div_name, json.dumps(opacity))\n        return ret", "docstring": "Adds text with highlighted words to visualization.\n\nArgs:\nexp: list of tuples [(id, weight), (id,weight)]\nlabel: label id (integer)\ndiv_name: name of div object to be used for rendering(in js)\nexp_object_name: name of js explanation object\ntext: if False, return empty\nopacity: if True, fade colors according to weight", "source": "juraj-google-style"}
{"code": "def remove_indirect_links(g, alg='aracne', **kwargs):\n    alg = {'aracne': aracne, 'nd': network_deconvolution, 'clr': clr}[alg]\n    mat = np.array(nx.adjacency_matrix(g).todense())\n    return nx.relabel_nodes(nx.DiGraph(alg(mat, **kwargs)), {idx: i for (idx, i) in enumerate(list(g.nodes()))})", "docstring": "Apply deconvolution to a networkx graph.\n\nArgs:\ng (networkx.Graph): Graph to apply deconvolution to\nalg (str): Algorithm to use ('aracne', 'clr', 'nd')\nkwargs (dict): extra options for algorithms\n\nReturns:\nnetworkx.Graph: graph with undirected links removed.", "source": "codesearchnet"}
{"code": "def pack_image_features(self, image_features, image_sizes, vision_feature_select_strategy, image_newline=None):\n    new_image_features = []\n    feature_lens = []\n    for image_idx, image_feature in enumerate(image_features):\n        if image_feature.shape[0] > 1:\n            base_image_feature = image_feature[0]\n            image_feature = image_feature[1:]\n            height = width = self.config.vision_config.image_size \n            num_patch_height, num_patch_width = get_anyres_image_grid_shape(image_sizes[image_idx], self.config.image_grid_pinpoints, self.config.vision_config.image_size)\n            if np.prod(image_feature.shape) % (num_patch_height * num_patch_width * height * width) != 0 and vision_feature_select_strategy == 'default':\n                logger.warning_once('Image feature shape does not line up with the provided patch size. You may be using the `default` vision_feature_select_strategy with a visual encoder that does not have CLS.')\n            image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1)\n            image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()\n            image_feature = image_feature.flatten(1, 2).flatten(2, 3)\n            image_feature = unpad_image(image_feature, image_sizes[image_idx])\n            if image_newline is not None:\n                image_feature = torch.cat((image_feature, image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device, image_feature.dtype)), dim=-1)\n            image_feature = image_feature.flatten(1, 2).transpose(0, 1)\n            image_feature = torch.cat((base_image_feature, image_feature), dim=0)\n        else:\n            image_feature = image_feature[0]\n            if image_newline is not None:\n                image_feature = torch.cat((image_feature, image_newline[None].to(image_feature)), dim=0)\n        new_image_features.append(image_feature)\n        feature_lens.append(image_feature.size(0))\n    feature_lens = torch.tensor(feature_lens, dtype=torch.long, device=image_features[0].device)\n    return (new_image_features, feature_lens)", "docstring": "Reshape, unpad and then pack each image_feature into a single image_features tensor containing all visual vectors.\n\nArgs:\nimage_features (`List[torch.Tensor]` of length num_images, each of shape `(num_patches, image_length, embed_dim)`)\nList of image feature tensor, each contains all the visual feature of all patches.\nimage_sizes (`torch.Tensor` of shape `(num_images, 2)`)\nActual image size of each images (H, W).\nvision_feature_select_strategy (`str`)\nThe feature selection strategy used to select the vision feature from the vision backbone.\nimage_newline (`torch.Tensor` of shape `(embed_dim)`)\nNew line embedding vector.\nReturns:\nimage_features (`torch.Tensor` of shape `(all_feat_len, embed_dim)`)\nfeature_lens (`List[int]`)\ntoken length of each image in image_features", "source": "github-repos"}
{"code": "def Patch(self, request, global_params=None):\n    config = self.GetMethodConfig('Patch')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Updates a `BuildTrigger` by its project ID and trigger ID. This API is experimental.\n\nArgs:\nrequest: (CloudbuildProjectsTriggersPatchRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(BuildTrigger) The response message.", "source": "github-repos"}
{"code": "def get_image_and_mask(self, label, positive_only=True, hide_rest=False, num_features=5, min_weight=0.0):\n    if (label not in self.local_exp):\n        raise KeyError('Label not in explanation')\n    segments = self.segments\n    image = self.image\n    exp = self.local_exp[label]\n    mask = np.zeros(segments.shape, segments.dtype)\n    if hide_rest:\n        temp = np.zeros(self.image.shape)\n    else:\n        temp = self.image.copy()\n    if positive_only:\n        fs = [x[0] for x in exp if ((x[1] > 0) and (x[1] > min_weight))][:num_features]\n        for f in fs:\n            temp[(segments == f)] = image[(segments == f)].copy()\n            mask[(segments == f)] = 1\n        return (temp, mask)\n    else:\n        for (f, w) in exp[:num_features]:\n            if (np.abs(w) < min_weight):\n                continue\n            c = (0 if (w < 0) else 1)\n            mask[(segments == f)] = (1 if (w < 0) else 2)\n            temp[(segments == f)] = image[(segments == f)].copy()\n            temp[((segments == f), c)] = np.max(image)\n            for cp in [0, 1, 2]:\n                if (c == cp):\n                    continue\n        return (temp, mask)", "docstring": "Init function.\n\nArgs:\nlabel: label to explain\npositive_only: if True, only take superpixels that contribute to\nthe prediction of the label. Otherwise, use the top\nnum_features superpixels, which can be positive or negative\ntowards the label\nhide_rest: if True, make the non-explanation part of the return\nimage gray\nnum_features: number of superpixels to include in explanation\nmin_weight: TODO\n\nReturns:\n(image, mask), where image is a 3d numpy array and mask is a 2d\nnumpy array that can be used with\nskimage.segmentation.mark_boundaries", "source": "codesearchnet"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    if token_ids_1 is not None:\n        return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]\n    return [0] * len(token_ids_0) + [1]", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def sample_observed_state(self, s: pd.Series) -> Dict:\n        \n\n        return {\n            n[0]: {\n                i.name: np.random.normal(s[n[0]] * i.mean, i.stdev)\n                for i in n[1][\"indicators\"].values()\n            }\n            for n in self.nodes(data=True)\n        }", "docstring": "Sample observed state vector. This is the implementation of the\nemission function.\n\nArgs:\ns: Latent state vector.\n\nReturns:\nObserved state vector.", "source": "juraj-google-style"}
{"code": "def get_mailcap_entry(self, url):\n        \n\n        for parser in mime_parsers.parsers:\n            if parser.pattern.match(url):\n                \n                \n                \n                \n                try:\n                    modified_url, content_type = parser.get_mimetype(url)\n                except Exception as e:\n                    \n                    \n                    _logger.warning('parser %s raised an exception', parser)\n                    _logger.exception(e)\n                    raise exceptions.MailcapEntryNotFound()\n                if not content_type:\n                    _logger.info('Content type could not be determined')\n                    raise exceptions.MailcapEntryNotFound()\n                elif content_type == 'text/html':\n                    _logger.info('Content type text/html, deferring to browser')\n                    raise exceptions.MailcapEntryNotFound()\n\n                command, entry = mailcap.findmatch(\n                    self._mailcap_dict, content_type, filename=modified_url)\n                if not entry:\n                    _logger.info('Could not find a valid mailcap entry')\n                    raise exceptions.MailcapEntryNotFound()\n\n                return command, entry\n\n        \n        raise exceptions.MailcapEntryNotFound()", "docstring": "Search through the mime handlers list and attempt to find the\nappropriate command to open the provided url with.\n\nWill raise a MailcapEntryNotFound exception if no valid command exists.\n\nParams:\nurl (text): URL that will be checked\n\nReturns:\ncommand (text): The string of the command that should be executed\nin a subprocess to open the resource.\nentry (dict): The full mailcap entry for the corresponding command", "source": "juraj-google-style"}
{"code": "def export(self, name=None):\n    with ops.name_scope(name, '%s_Export' % self.name, [self.resource_handle]):\n        exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2(self.resource_handle, self._key_dtype, self._value_dtype)\n    exported_values.set_shape(exported_keys.get_shape().concatenate(self._value_shape))\n    return (exported_keys, exported_values)", "docstring": "Returns tensors of all keys and values in the table.\n\nArgs:\nname: A name for the operation (optional).\n\nReturns:\nA pair of tensors with the first tensor containing all keys and the\nsecond tensors containing all values in the table.", "source": "github-repos"}
{"code": "def Logger(name, debug=False, facility=None):\n  \n  logger = logging.getLogger(name)\n  logger.handlers = []\n  logger.addHandler(logging.NullHandler())\n  logger.propagate = False\n  logger.setLevel(logging.DEBUG)\n  formatter = logging.Formatter(name + ': %(levelname)s %(message)s')\n\n  if debug:\n    \n    console_handler = logging.StreamHandler()\n    console_handler.setLevel(logging.DEBUG)\n    console_handler.setFormatter(formatter)\n    logger.addHandler(console_handler)\n\n  if facility:\n    \n    syslog_handler = logging.handlers.SysLogHandler(\n        address=constants.SYSLOG_SOCKET, facility=facility)\n    syslog_handler.setLevel(logging.INFO)\n    syslog_handler.setFormatter(formatter)\n    logger.addHandler(syslog_handler)\n\n  return logger", "docstring": "Get a logging object with handlers for sending logs to SysLog.\n\nArgs:\nname: string, the name of the logger which will be added to log entries.\ndebug: bool, True if debug output should write to the console.\nfacility: int, an encoding of the SysLog handler's facility and priority.\n\nReturns:\nlogging object, an object for logging entries.", "source": "juraj-google-style"}
{"code": "def strip_hidden(key_tuples, visibilities):\n    \n    result = []\n    for key_tuple in key_tuples:\n        if len(key_tuple) != len(visibilities):\n            raise ValueError(\n                \"length of key tuple {} is not equal to length of visibilities {}\".format(\n                    key_tuple, visibilities\n                )\n            )\n        filtered_tuple = tuple(item for item, visible in zip(key_tuple, visibilities) if visible)\n        result.append(filtered_tuple)\n    return result", "docstring": "Filter each tuple according to visibility.\n\nArgs:\nkey_tuples: A sequence of tuples of equal length (i.e. rectangular)\nvisibilities: A sequence of booleans equal in length to the tuples contained in key_tuples.\n\nReturns:\nA sequence equal in length to key_tuples where the items are tuples with a length corresponding\nto the number of items in visibility which are True.", "source": "juraj-google-style"}
{"code": "def find_paths_referenced(self) -> Collection[str]:\n    _, paths = self._find_paths_referenced()\n    return set(paths)", "docstring": "Finds paths for any elements referenced in this expression.\n\nFor example, given the expression 'a.b.where(c > d.e).f' returns paths\n{'a', 'a.b', 'a.b.c', 'a.b.d', 'a.b.d.e', 'a.b.f'}\n\nReturns:\nA collections of paths referenced in the expression.", "source": "github-repos"}
{"code": "def debug_watch_keys(self, node_name, device_name=None):\n    try:\n        device_name = self._infer_device_name(device_name, node_name)\n    except ValueError:\n        return []\n    if node_name not in self._debug_watches[device_name]:\n        return []\n    watch_keys = []\n    for watched_slot in self._debug_watches[device_name][node_name]:\n        debug_ops = self._debug_watches[device_name][node_name][watched_slot]\n        for debug_op in debug_ops:\n            watch_keys.append(_get_tensor_watch_key(node_name, watched_slot, debug_op))\n    return watch_keys", "docstring": "Get all tensor watch keys of given node according to partition graphs.\n\nArgs:\nnode_name: (`str`) name of the node.\ndevice_name: (`str`) name of the device. If there is only one device or if\nnode_name exists on only one device, this argument is optional.\n\nReturns:\n(`list` of `str`) all debug tensor watch keys. Returns an empty list if\nthe node name does not correspond to any debug watch keys.\n\nRaises:\n`LookupError`: If debug watch information has not been loaded from\npartition graphs yet.", "source": "github-repos"}
{"code": "def _log_score(score):\n    \n    logger.info(\n        \"Score of ({}/{}) set for submission {}\"\n        .format(score.points_earned, score.points_possible, score.submission.uuid)\n    )", "docstring": "Log the creation of a score.\n\nArgs:\nscore (Score): The score model.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def instantiate(self, substitutions):\n        \n        param_dict = self.substitute_params(substitutions)\n        pkg, ident = self.name.rsplit(\".\", 1)\n        pkg = \"malcolm.modules.%s\" % pkg\n        try:\n            ob = importlib.import_module(pkg)\n        except ImportError as e:\n            raise_with_traceback(\n                ImportError(\"\\n%s:%d:\\n%s\" % (\n                    self.filename, self.lineno, e)))\n        try:\n            ob = getattr(ob, ident)\n        except AttributeError:\n            raise_with_traceback(\n                ImportError(\"\\n%s:%d:\\nPackage %r has no ident %r\" % (\n                    self.filename, self.lineno, pkg, ident)))\n        try:\n            model = MethodModel.from_callable(ob, returns=False)\n            args = model.validate(param_dict)\n            ret = ob(**args)\n        except Exception as e:\n            sourcefile = inspect.getsourcefile(ob)\n            lineno = inspect.getsourcelines(ob)[1]\n            raise_with_traceback(\n                YamlError(\"\\n%s:%d:\\n%s:%d:\\n%s\" % (\n                    self.filename, self.lineno, sourcefile, lineno, e)))\n        else:\n            return ret", "docstring": "Keep recursing down from base using dotted name, then call it with\nself.params and args\n\nArgs:\nsubstitutions (dict): Substitutions to make to self.param_dict\n\nReturns:\nThe found object called with (*args, map_from_d)\n\nE.g. if ob is malcolm.parts, and name is \"ca.CADoublePart\", then the\nobject will be malcolm.parts.ca.CADoublePart", "source": "juraj-google-style"}
{"code": "def nonzero_monies(self):\n    return [copy.copy(m) for m in self._money_obs if (m.amount != 0)]", "docstring": "Get a list of the underlying ``Money`` instances that are not zero\n\nReturns:\n([Money]): A list of zero or more money instances. Currencies will be unique.", "source": "codesearchnet"}
{"code": "def has_chosen(state, correct, msgs):\n    \n    if not issubclass(type(correct), int):\n        raise InstructorError(\n            \"Inside `has_chosen()`, the argument `correct` should be an integer.\"\n        )\n\n    student_process = state.student_process\n    if not isDefinedInProcess(MC_VAR_NAME, student_process):\n        raise InstructorError(\"Option not available in the student process\")\n    else:\n        selected_option = getOptionFromProcess(student_process, MC_VAR_NAME)\n        if not issubclass(type(selected_option), int):\n            raise InstructorError(\"selected_option should be an integer\")\n\n        if selected_option < 1 or correct < 1:\n            raise InstructorError(\n                \"selected_option and correct should be greater than zero\"\n            )\n\n        if selected_option > len(msgs) or correct > len(msgs):\n            raise InstructorError(\"there are not enough feedback messages defined\")\n\n        feedback_msg = msgs[selected_option - 1]\n\n        state.reporter.success_msg = msgs[correct - 1]\n\n        state.do_test(EqualTest(selected_option, correct, feedback_msg))", "docstring": "Test multiple choice exercise.\n\nTest for a MultipleChoiceExercise. The correct answer (as an integer) and feedback messages\nare passed to this function.\n\nArgs:\ncorrect (int): the index of the correct answer (should be an instruction). Starts at 1.\nmsgs (list(str)): a list containing all feedback messages belonging to each choice of the\nstudent. The list should have the same length as the number of options.", "source": "juraj-google-style"}
{"code": "def _initialize_splittable_and_unsplittable_dims(self, default_splittability, exception_dims_iterable=None):\n    default_dims = set()\n    exception_dims = set()\n    if exception_dims_iterable:\n        exception_dims.update(exception_dims_iterable)\n    for t in itertools.chain(self.inputs, self.outputs):\n        for dim_name in t.shape.dimension_names:\n            if (dim_name not in exception_dims):\n                default_dims.add(dim_name)\n    if (default_splittability == 'splittable'):\n        return (frozenset(default_dims), frozenset(exception_dims))\n    elif (default_splittability == 'unsplittable'):\n        return (frozenset(exception_dims), frozenset(default_dims))\n    else:\n        raise ValueError('default_splittability should be either \"splittable\" or \"unsplittable\" but was {}'.format(default_splittability))", "docstring": "Initializer for splittable_dims and unsplittable_dims.\n\nHelper method to categorize all dimensions in the input/output tensors as\neither splittable or unsplittable.\n\nArgs:\ndefault_splittability: a string which is either \"splittable\" or\n\"unsplittable\".\nexception_dims_iterable: an optional iterable of names of dimensions\nwhich are exceptions to the default splittability.\n\nReturns:\nsplittable_dims and unsplittable_dims, two frozensets of names of\ndimensions (strings)\n\nRaises:\nValueError: default_splittability is not one of \"splittable\" or\n\"unsplittable\".", "source": "codesearchnet"}
{"code": "def _source_file_paths_outside_tensorflow_py_library(code_defs, id_to_string):\n    file_ids = set()\n    for code_def in code_defs:\n        for trace in code_def.traces:\n            file_ids.add(trace.file_id)\n    non_tf_files = (id_to_string[file_id] for file_id in file_ids)\n    non_tf_files = (f for f in non_tf_files if not source_utils.guess_is_tensorflow_py_library(f) and gfile.Exists(f))\n    return non_tf_files", "docstring": "Extract source file paths outside TensorFlow Python library.\n\nArgs:\ncode_defs: An iterable of `CodeDef` protos, i.e., an iterable of stack\ntraces.\nid_to_string: A proto map from integer ids to strings.\n\nReturns:\nAn iterable of source file paths outside the TensorFlow Python library.", "source": "github-repos"}
{"code": "def ParseAccountInformation(\n      self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    display_name = self._GetRowValue(query_hash, row, 'given_displayname')\n    fullname = self._GetRowValue(query_hash, row, 'fullname')\n\n    \n    \n    username = '{0!s} <{1!s}>'.format(fullname, display_name)\n\n    event_data = SkypeAccountEventData()\n    event_data.country = self._GetRowValue(query_hash, row, 'country')\n    event_data.display_name = display_name\n    event_data.email = self._GetRowValue(query_hash, row, 'emails')\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.username = username\n\n    timestamp = self._GetRowValue(query_hash, row, 'profile_timestamp')\n    if timestamp:\n      date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(date_time, 'Profile Changed')\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'authreq_timestamp')\n    if timestamp:\n      date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, 'Authenticate Request')\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'lastonline_timestamp')\n    if timestamp:\n      date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(date_time, 'Last Online')\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'mood_timestamp')\n    if timestamp:\n      date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(date_time, 'Mood Event')\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'sent_authrequest_time')\n    if timestamp:\n      date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(date_time, 'Auth Request Sent')\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'lastused_timestamp')\n    if timestamp:\n      date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(date_time, 'Last Used')\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses account information.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row with account information.", "source": "juraj-google-style"}
{"code": "def __delitem__(self, keyword):\n        \n        status = False\n        if keyword:\n            if not self.case_sensitive:\n                keyword = keyword.lower()\n            current_dict = self.keyword_trie_dict\n            character_trie_list = []\n            for letter in keyword:\n                if letter in current_dict:\n                    character_trie_list.append((letter, current_dict))\n                    current_dict = current_dict[letter]\n                else:\n                    \n                    current_dict = None\n                    break\n            \n            if current_dict and self._keyword in current_dict:\n                \n                character_trie_list.append((self._keyword, current_dict))\n                character_trie_list.reverse()\n\n                for key_to_remove, dict_pointer in character_trie_list:\n                    if len(dict_pointer.keys()) == 1:\n                        dict_pointer.pop(key_to_remove)\n                    else:\n                        \n                        \n                        dict_pointer.pop(key_to_remove)\n                        break\n                \n                status = True\n                self._terms_in_trie -= 1\n        return status", "docstring": "To remove keyword from the dictionary\npass the keyword and the clean name it maps to.\n\nArgs:\nkeyword : string\nkeyword that you want to remove if it's present\n\nExamples:\n>>> keyword_processor.add_keyword('Big Apple')\n>>> del keyword_processor['Big Apple']", "source": "juraj-google-style"}
{"code": "class Poisson(reduction_metrics.MeanMetricWrapper):\n\n    def __init__(self, name='poisson', dtype=None):\n        super().__init__(fn=poisson, name=name, dtype=dtype)\n\n    def get_config(self):\n        return {'name': self.name, 'dtype': self.dtype}", "docstring": "Computes the Poisson metric between `y_true` and `y_pred`.\n\nFormula:\n\n```python\nmetric = y_pred - y_true * log(y_pred)\n```\n\nArgs:\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.\n\nExamples:\n\n>>> m = keras.metrics.Poisson()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])\n>>> m.result()\n0.49999997\n\n>>> m.reset_state()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],\n...                sample_weight=[1, 0])\n>>> m.result()\n0.99999994\n\nUsage with `compile()` API:\n\n```python\nmodel.compile(optimizer='sgd',\nloss='mse',\nmetrics=[keras.metrics.Poisson()])\n```", "source": "github-repos"}
{"code": "def imshow(img, win_name='', wait_time=0):\n    cv2.imshow(win_name, imread(img))\n    cv2.waitKey(wait_time)", "docstring": "Show an image.\n\nArgs:\nimg (str or ndarray): The image to be displayed.\nwin_name (str): The window name.\nwait_time (int): Value of waitKey param.", "source": "codesearchnet"}
{"code": "class CLIPEncoder(nn.Module):\n\n    def __init__(self, config: CLIPConfig):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    @can_return_tuple\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutput:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for idx, encoder_layer in enumerate(self.layers):\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions)\n            else:\n                layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`CLIPEncoderLayer`].\n\nArgs:\nconfig: CLIPConfig", "source": "github-repos"}
{"code": "def _load_config(self, client_secrets_file, client_id, client_secret):\n    if (client_id and client_secret):\n        (self.client_id, self.client_secret) = (client_id, client_secret)\n        return\n    if client_secrets_file:\n        self._load_client_secrets(client_secrets_file)\n        return\n    if ('GOOGLE_OAUTH2_CLIENT_SECRETS_FILE' in self.app.config):\n        self._load_client_secrets(self.app.config['GOOGLE_OAUTH2_CLIENT_SECRETS_FILE'])\n        return\n    try:\n        (self.client_id, self.client_secret) = (self.app.config['GOOGLE_OAUTH2_CLIENT_ID'], self.app.config['GOOGLE_OAUTH2_CLIENT_SECRET'])\n    except KeyError:\n        raise ValueError('OAuth2 configuration could not be found. Either specify the client_secrets_file or client_id and client_secret or set the app configuration variables GOOGLE_OAUTH2_CLIENT_SECRETS_FILE or GOOGLE_OAUTH2_CLIENT_ID and GOOGLE_OAUTH2_CLIENT_SECRET.')", "docstring": "Loads oauth2 configuration in order of priority.\n\nPriority:\n1. Config passed to the constructor or init_app.\n2. Config passed via the GOOGLE_OAUTH2_CLIENT_SECRETS_FILE app\nconfig.\n3. Config passed via the GOOGLE_OAUTH2_CLIENT_ID and\nGOOGLE_OAUTH2_CLIENT_SECRET app config.\n\nRaises:\nValueError if no config could be found.", "source": "codesearchnet"}
{"code": "def ClaimNotificationsForCollection(cls, token=None, start_time=None, lease_time=200, collection=None):\n\n    class CollectionFilter(object):\n\n        def __init__(self, collection):\n            self.collection = collection\n\n        def FilterRecord(self, notification):\n            if (self.collection is None):\n                self.collection = notification.result_collection_urn\n            return (self.collection != notification.result_collection_urn)\n    f = CollectionFilter(collection)\n    results = []\n    with aff4.FACTORY.OpenWithLock(RESULT_NOTIFICATION_QUEUE, aff4_type=HuntResultQueue, lease_time=300, blocking=True, blocking_sleep_interval=15, blocking_lock_timeout=600, token=token) as queue:\n        for record in queue.ClaimRecords(record_filter=f.FilterRecord, start_time=start_time, timeout=lease_time, limit=100000):\n            results.append(record)\n    return (f.collection, results)", "docstring": "Return unclaimed hunt result notifications for collection.\n\nArgs:\ntoken: The security token to perform database operations with.\nstart_time: If set, an RDFDateTime indicating at what point to start\nclaiming notifications. Only notifications with a timestamp after this\npoint will be claimed.\nlease_time: How long to claim the notifications for.\ncollection: The urn of the collection to find notifications for. If unset,\nthe earliest (unclaimed) notification will determine the collection.\n\nReturns:\nA pair (collection, results) where collection is the collection\nthat notifications were retrieved for and results is a list of\nRecord objects which identify GrrMessage within the result\ncollection.", "source": "codesearchnet"}
{"code": "def create_win_salt_restart_task():\n    cmd = 'cmd'\n    args = '/c ping -n 3 127.0.0.1 && net stop salt-minion && net start salt-minion'\n    return __salt__['task.create_task'](name='restart-salt-minion', user_name='System', force=True, action_type='Execute', cmd=cmd, arguments=args, trigger_type='Once', start_date='1975-01-01', start_time='01:00')", "docstring": "Create a task in Windows task scheduler to enable restarting the salt-minion\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' service.create_win_salt_restart_task()", "source": "codesearchnet"}
{"code": "def init(dvc_dir):\n    config_file = os.path.join(dvc_dir, Config.CONFIG)\n    open(config_file, 'w+').close()\n    return Config(dvc_dir)", "docstring": "Initializes dvc config.\n\nArgs:\ndvc_dir (str): path to .dvc directory.\n\nReturns:\ndvc.config.Config: config object.", "source": "codesearchnet"}
{"code": "def build_nccl_all_reduce(input_tensors, red_op, un_op=None):\n    if red_op == math_ops.add:\n        output_tensors = nccl_ops.all_sum(input_tensors)\n    else:\n        raise ValueError('red_op not supported by NCCL all-reduce: ', red_op)\n    if un_op:\n        un_op_wrapped = []\n        for t in output_tensors:\n            with ops.colocate_with(t):\n                un_op_wrapped.append(un_op(t))\n        output_tensors = un_op_wrapped\n    return output_tensors", "docstring": "Build a subgraph that does one full all-reduce, using NCCL.\n\nArgs:\ninput_tensors: list of `tf.Tensor` of same-shape and type values to\nbe reduced.\nred_op: binary elementwise reduction operator. Must be one of\n{tf.add}\nun_op: optional unary elementwise Op to apply to fully-reduce values.\n\nReturns:\nlist of `tf.Tensor` of reduced values.\n\nRaises:\nValueError: red_op not supported.", "source": "github-repos"}
{"code": "class PatchTSMixerGatedAttention(nn.Module):\n\n    def __init__(self, in_size: int, out_size: int):\n        super().__init__()\n        self.attn_layer = nn.Linear(in_size, out_size)\n        self.attn_softmax = nn.Softmax(dim=-1)\n\n    def forward(self, inputs):\n        attn_weight = self.attn_softmax(self.attn_layer(inputs))\n        inputs = inputs * attn_weight\n        return inputs", "docstring": "Module that applies gated attention to input data.\n\nArgs:\nin_size (`int`): The input size.\nout_size (`int`): The output size.", "source": "github-repos"}
{"code": "def validate_format(self, **kwargs):\n        \n        args = dict(\n            dict_type=self._dict,\n            allow_no_value=self._allow_no_value,\n            inline_comment_prefixes=self._inline_comment_prefixes,\n            strict=self._strict,\n            empty_lines_in_values=self._empty_lines_in_values\n        )\n        args.update(kwargs)\n        parser = ConfigParser(**args)\n        updated_cfg = str(self)\n        parser.read_string(updated_cfg)", "docstring": "Call ConfigParser to validate config\n\nArgs:\nkwargs: are passed to :class:`configparser.ConfigParser`", "source": "juraj-google-style"}
{"code": "def get_project_details(self, project_id):\n    if (not is_valid_uuid(project_id)):\n        raise StorageArgumentException('Invalid UUID for project_id: {0}'.format(project_id))\n    return self._authenticated_request.to_endpoint('project/{}/'.format(project_id)).return_body().get()", "docstring": "Get information on a given project\n\nArgs:\nproject_id (str): The UUID of the requested project.\n\nReturns:\nA dictionary describing the project::\n\n{\nu'collab_id': 2271,\nu'created_by': u'303447',\nu'created_on': u'2017-03-10T12:50:06.077891Z',\nu'description': u'',\nu'entity_type': u'project',\nu'modified_by': u'303447',\nu'modified_on': u'2017-03-10T12:50:06.077946Z',\nu'name': u'2271',\nu'uuid': u'3abd8742-d069-44cf-a66b-2370df74a682'\n}\n\nRaises:\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "codesearchnet"}
{"code": "def _resource_apply_dense(self, grad, handle):\n    raise NotImplementedError()", "docstring": "Add ops to apply dense gradients to the variable `handle`.\n\nArgs:\ngrad: a `Tensor` representing the gradient.\nhandle: a `Tensor` of dtype `resource` which points to the variable\nto be updated.\n\nReturns:\nAn `Operation` which updates the value of the variable.", "source": "github-repos"}
{"code": "def add_output(self, output):\n        \n        if not isinstance(output, Output):\n            raise TypeError('`output` must be an Output instance or None')\n        self.outputs.append(output)", "docstring": "Adds an output to a Transaction's list of outputs.\n\nArgs:\noutput (:class:`~bigchaindb.common.transaction.\nOutput`): An Output to be added to the\nTransaction.", "source": "juraj-google-style"}
{"code": "def is_bit_mask(enumeration, potential_mask):\n    if (not isinstance(potential_mask, six.integer_types)):\n        return False\n    mask_enumerations = (CryptographicUsageMask, ProtectionStorageMask, StorageStatusMask)\n    if (enumeration not in mask_enumerations):\n        return False\n    mask = 0\n    for value in [e.value for e in enumeration]:\n        if ((value & potential_mask) == value):\n            mask |= value\n    if (mask != potential_mask):\n        return False\n    return True", "docstring": "A utility function that checks if the provided value is a composite bit\nmask of enumeration values in the specified enumeration class.\n\nArgs:\nenumeration (class): One of the mask enumeration classes found in this\nfile. These include:\n* Cryptographic Usage Mask\n* Protection Storage Mask\n* Storage Status Mask\npotential_mask (int): A potential bit mask composed of enumeration\nvalues belonging to the enumeration class.\n\nReturns:\nTrue: if the potential mask is a valid bit mask of the mask enumeration\nFalse: otherwise", "source": "codesearchnet"}
{"code": "def filter(self, field_name, operand, value):\n    if (operand not in self._FILTER_OPERANDS):\n        raise ValueError('Operand must be one of {}'.format(', '.join(self._FILTER_OPERANDS)))\n    record_stub = record_factory(self._app)\n    field = record_stub.get_field(field_name)\n    self._raw['filters'].append({'fieldId': field.id, 'filterType': operand, 'value': field.get_report(value)})", "docstring": "Adds a filter to report\n\nNotes:\nAll filters are currently AND'ed together\n\nArgs:\nfield_name (str): Target field name to filter on\noperand (str): Operand used in comparison. See `swimlane.core.search` for options\nvalue: Target value used in comparision", "source": "codesearchnet"}
{"code": "def CreateDataTypeMap(self, definition_name):\n    data_type_definition = self._definitions_registry.GetDefinitionByName(definition_name)\n    if (not data_type_definition):\n        return None\n    return DataTypeMapFactory.CreateDataTypeMapByType(data_type_definition)", "docstring": "Creates a specific data type map by name.\n\nArgs:\ndefinition_name (str): name of the data type definition.\n\nReturns:\nDataTypeMap: data type map or None if the date type definition\nis not available.", "source": "codesearchnet"}
{"code": "def lex_index(n, k, lst):\n    \n    if len(lst) != k:\n        raise VisualizationError(\"list should have length k\")\n    comb = list(map(lambda x: n - 1 - x, lst))\n    dualm = sum([n_choose_k(comb[k - 1 - i], i + 1) for i in range(k)])\n    return int(dualm)", "docstring": "Return  the lex index of a combination..\n\nArgs:\nn (int): the total number of options .\nk (int): The number of elements.\nlst (list): list\n\nReturns:\nint: returns int index for lex order\n\nRaises:\nVisualizationError: if length of list is not equal to k", "source": "juraj-google-style"}
{"code": "def get_site_orbital_dos(self, site, orbital):\n        \n        return Dos(self.efermi, self.energies, self.pdos[site][orbital])", "docstring": "Get the Dos for a particular orbital of a particular site.\n\nArgs:\nsite: Site in Structure associated with CompleteDos.\norbital: Orbital in the site.\n\nReturns:\nDos containing densities for orbital of site.", "source": "juraj-google-style"}
{"code": "def _init_profile_batch(self, profile_batch):\n    profile_batch_error_message = 'profile_batch must be a non-negative integer or 2-tuple of positive integers. A pair of positive integers signifies a range of batches to profile. Found: {}'.format(profile_batch)\n    if isinstance(profile_batch, str):\n        profile_batch = str(profile_batch).split(',')\n        profile_batch = nest.map_structure(int, profile_batch)\n    if isinstance(profile_batch, int):\n        self._start_batch = profile_batch\n        self._stop_batch = profile_batch\n    elif isinstance(profile_batch, (tuple, list)) and len(profile_batch) == 2:\n        self._start_batch, self._stop_batch = profile_batch\n    else:\n        raise ValueError(profile_batch_error_message)\n    if self._start_batch < 0 or self._stop_batch < self._start_batch:\n        raise ValueError(profile_batch_error_message)\n    self._profiler_started = False\n    if self._start_batch > 0:\n        self._start_profiler(logdir='')\n        self._stop_profiler(save=False)\n    self._is_tracing = False\n    self._should_trace = not (self._start_batch == 0 and self._stop_batch == 0)", "docstring": "Validate profile_batch value and set the range of batches to profile.\nSets values of _start_batch and _stop_batch attributes,\nspecifying the start and stop batch to profile.\nSetting `profile_batch=0` disables profiling.\n\nArgs:\nprofile_batch: The range of batches to profile. Should be a non-negative\ninteger or a comma separated string of pair of positive integers. A pair\nof positive integers signify a range of batches to profile.\n\nRaises:\nValueError: If profile_batch is not an integer or a comma separated pair\nof positive integers.", "source": "github-repos"}
{"code": "def get_creator_by_name(name):\n    \n    return {'docker(container)': Container.creator,\n            'shell': Bash.creator, 'docker(image)': Image.creator,\n            'python': Script.creator, 'packer': Packer.creator,\n            'ansible(simple)': Ansible.creator}[name]", "docstring": "Get creator function by name.\n\nArgs:\nname (str): name of the creator function.\n\nReturns:\nfunction: creater function.", "source": "juraj-google-style"}
{"code": "def email_address(self, address, owner=None, **kwargs):\n        \n        return EmailAddress(self.tcex, address, owner=owner, **kwargs)", "docstring": "Create the Email Address TI object.\n\nArgs:\nowner:\naddress:\n**kwargs:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def camel_to_snake(name):\n    s1 = re.sub('(.)([A-Z][a-z]+)', '\\\\1_\\\\2', name)\n    return re.sub('([a-z0-9])([A-Z])', '\\\\1_\\\\2', s1).lower()", "docstring": "Converts CamelCase to snake_case.\n\nArgs:\nname (string): The name to convert from CamelCase to snake_case.\n\nReturns:\nstring: Converted string.", "source": "codesearchnet"}
{"code": "def __edit_distance_alt(self, words):\n    words = [x.lower() for x in words]\n    return [e2 for e1 in words for e2 in self.edit_distance_1(e1)]", "docstring": "Compute all strings that are 1 edits away from all the words using\nonly the letters in the corpus\n\nArgs:\nwords (list): The words for which to calculate the edit distance\nReturns:\nset: The set of strings that are edit distance two from the \\\nprovided words", "source": "codesearchnet"}
{"code": "def get_audio_features(self, input_features: torch.FloatTensor, feature_attention_mask: Optional[torch.LongTensor]=None, audio_feature_lengths: Optional[torch.LongTensor]=None):\n    if feature_attention_mask is not None:\n        audio_feature_lengths = torch.sum(feature_attention_mask, dim=1)\n        input_features = input_features.permute(0, 2, 1)[feature_attention_mask.bool()].permute(1, 0)\n    else:\n        audio_feature_lengths = None\n    audio_feat_lengths, audio_output_lengths = self.audio_tower._get_feat_extract_output_lengths(audio_feature_lengths if audio_feature_lengths is not None else feature_attention_mask.sum(-1))\n    feature_lens = audio_feature_lengths if audio_feature_lengths is not None else feature_attention_mask.sum(-1)\n    audio_outputs = self.audio_tower(input_features, feature_lens=feature_lens, aftercnn_lens=audio_feat_lengths)\n    audio_features = audio_outputs.last_hidden_state\n    if audio_features.shape[0] != sum(audio_output_lengths.tolist()):\n        raise ValueError('length of audio_features should match audio_output_lengths')\n    return audio_features", "docstring": "Encodes audios into continuous embeddings that can be forwarded to the language model.\n\nArgs:\ninput_features (`torch.FloatTensor`):\nThe tensors corresponding to the input audios.\nfeature_attention_mask (`torch.LongTensor`, *optional*):\nMask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`:\naudio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*):\nThe length of feature shape of each audio in LLM.", "source": "github-repos"}
{"code": "def ReadTimestamp(filename):\n    if not os.path.exists(filename):\n        return None\n    try:\n        timestamp_file = open(filename, 'r')\n        timestamp_string = timestamp_file.read().strip()\n    except IOError as e:\n        logging.warning('error opening timestamp file: %s', e)\n        timestamp_string = None\n    else:\n        timestamp_file.close()\n    logging.debug('read timestamp %s from file %r', timestamp_string, filename)\n    if timestamp_string is not None:\n        try:\n            timestamp = time.strptime(timestamp_string + ' UTC', '%Y-%m-%dT%H:%M:%SZ %Z')\n        except ValueError as e:\n            logging.error('cannot parse timestamp file %r: %s', filename, e)\n            timestamp = None\n    else:\n        timestamp = None\n    logging.debug('Timestamp is: %r', timestamp)\n    now = time.gmtime()\n    logging.debug('      Now is: %r', now)\n    if timestamp > now:\n        logging.warning('timestamp %r (%r) from %r is in the future, now is %r', timestamp_string, time.mktime(timestamp), filename, time.mktime(now))\n        if time.mktime(timestamp) - time.mktime(now) >= 60 * 60:\n            logging.info('Resetting timestamp to now.')\n            timestamp = now\n    return timestamp", "docstring": "Return a timestamp from a file.\n\nThe timestamp file format is a single line, containing a string in the\nISO-8601 format YYYY-MM-DDThh:mm:ssZ (i.e. UTC time).  We do not support\nall ISO-8601 formats for reasons of convenience in the code.\n\nTimestamps internal to nss_cache deliberately do not carry milliseconds.\n\nArgs:\nfilename:  A String naming the file to read from.\n\nReturns:\nA time.struct_time, or None if the timestamp file doesn't\nexist or has errors.", "source": "github-repos"}
{"code": "def get_contact(self, response=None, nir=None, handle=None, retry_count=3, dt_format=None):\n    if (response or (nir == 'krnic')):\n        contact_response = response\n    else:\n        contact_response = self._net.get_http_raw(url=str(NIR_WHOIS[nir]['url']).format(handle), retry_count=retry_count, headers=NIR_WHOIS[nir]['request_headers'], request_type=NIR_WHOIS[nir]['request_type'])\n    return self.parse_fields(response=contact_response, fields_dict=NIR_WHOIS[nir]['contact_fields'], dt_format=dt_format, hourdelta=int(NIR_WHOIS[nir]['dt_hourdelta']), is_contact=True)", "docstring": "The function for retrieving and parsing NIR whois data based on\nNIR_WHOIS contact_fields.\n\nArgs:\nresponse (:obj:`str`): Optional response object, this bypasses the\nlookup.\nnir (:obj:`str`): The NIR to query ('jpnic' or 'krnic'). Required\nif response is None.\nhandle (:obj:`str`): For NIRs that have separate contact queries\n(JPNIC), this is the contact handle to use in the query.\nDefaults to None.\nretry_count (:obj:`int`): The number of times to retry in case\nsocket errors, timeouts, connection resets, etc. are\nencountered. Defaults to 3.\ndt_format (:obj:`str`): The format of datetime fields if known.\nDefaults to None.\n\nReturns:\ndict: Mapping of the fields provided in contact_fields, to their\nparsed results.", "source": "codesearchnet"}
{"code": "def rmdir(path, dir_fd=None):\n    system = get_instance(path)\n    system.remove(system.ensure_dir_path(path))", "docstring": "Remove a directory.\n\nEquivalent to \"os.rmdir\".\n\nArgs:\npath (path-like object): Path or URL.\ndir_fd: directory descriptors;\nsee the os.rmdir() description for how it is interpreted.\nNot supported on cloud storage objects.", "source": "codesearchnet"}
{"code": "def set_viewbox(self, x, y, w, h):\n    self.attributes['viewBox'] = ('%s %s %s %s' % (x, y, w, h))\n    self.attributes['preserveAspectRatio'] = 'none'", "docstring": "Sets the origin and size of the viewbox, describing a virtual view area.\n\nArgs:\nx (int): x coordinate of the viewbox origin\ny (int): y coordinate of the viewbox origin\nw (int): width of the viewbox\nh (int): height of the viewbox", "source": "codesearchnet"}
{"code": "def to_grid_locator(latitude, longitude, precision='square'):\n    if (precision not in ('square', 'subsquare', 'extsquare')):\n        raise ValueError(('Unsupported precision value %r' % precision))\n    if (not ((- 90) <= latitude <= 90)):\n        raise ValueError(('Invalid latitude value %r' % latitude))\n    if (not ((- 180) <= longitude <= 180)):\n        raise ValueError(('Invalid longitude value %r' % longitude))\n    latitude += 90.0\n    longitude += 180.0\n    locator = []\n    field = int((longitude / LONGITUDE_FIELD))\n    locator.append(chr((field + 65)))\n    longitude -= (field * LONGITUDE_FIELD)\n    field = int((latitude / LATITUDE_FIELD))\n    locator.append(chr((field + 65)))\n    latitude -= (field * LATITUDE_FIELD)\n    square = int((longitude / LONGITUDE_SQUARE))\n    locator.append(str(square))\n    longitude -= (square * LONGITUDE_SQUARE)\n    square = int((latitude / LATITUDE_SQUARE))\n    locator.append(str(square))\n    latitude -= (square * LATITUDE_SQUARE)\n    if (precision in ('subsquare', 'extsquare')):\n        subsquare = int((longitude / LONGITUDE_SUBSQUARE))\n        locator.append(chr((subsquare + 97)))\n        longitude -= (subsquare * LONGITUDE_SUBSQUARE)\n        subsquare = int((latitude / LATITUDE_SUBSQUARE))\n        locator.append(chr((subsquare + 97)))\n        latitude -= (subsquare * LATITUDE_SUBSQUARE)\n    if (precision == 'extsquare'):\n        extsquare = int((longitude / LONGITUDE_EXTSQUARE))\n        locator.append(str(extsquare))\n        extsquare = int((latitude / LATITUDE_EXTSQUARE))\n        locator.append(str(extsquare))\n    return ''.join(locator)", "docstring": "Calculate Maidenhead locator from latitude and longitude.\n\nArgs:\nlatitude (float): Position's latitude\nlongitude (float): Position's longitude\nprecision (str): Precision with which generate locator string\n\nReturns:\nstr: Maidenhead locator for latitude and longitude\n\nRaise:\nValueError: Invalid precision identifier\nValueError: Invalid latitude or longitude value", "source": "codesearchnet"}
{"code": "def update_user_attributes(self, user, claims):\n        \n\n        required_fields = [field.name for field in user._meta.fields if field.blank is False]\n\n        for field, claim in settings.CLAIM_MAPPING.items():\n            if hasattr(user, field):\n                if claim in claims:\n                    setattr(user, field, claims[claim])\n                    logger.debug(\"Attribute '{}' for user '{}' was set to '{}'.\".format(field, user, claims[claim]))\n                else:\n                    if field in required_fields:\n                        msg = \"Claim not found in access token: '{}'. Check ADFS claims mapping.\"\n                        raise ImproperlyConfigured(msg.format(claim))\n                    else:\n                        msg = \"Claim '{}' for user field '{}' was not found in the access token for user '{}'. \" \\\n                              \"Field is not required and will be left empty\".format(claim, field, user)\n                        logger.warning(msg)\n            else:\n                msg = \"User model has no field named '{}'. Check ADFS claims mapping.\"\n                raise ImproperlyConfigured(msg.format(field))", "docstring": "Updates user attributes based on the CLAIM_MAPPING setting.\n\nArgs:\nuser (django.contrib.auth.models.User): User model instance\nclaims (dict): claims from the access token", "source": "juraj-google-style"}
{"code": "def is_extension_type(tensor):\n    return isinstance(tensor, composite_tensor.CompositeTensor)", "docstring": "Returns whether a tensor is of an ExtensionType.\n\ngithub.com/tensorflow/community/pull/269\nCurrently it works by checking if `tensor` is a `CompositeTensor` instance,\nbut this will be changed to use an appropriate extensiontype protocol\ncheck once ExtensionType is made public.\n\nArgs:\ntensor: An object to test\n\nReturns:\nTrue if the tensor is an extension type object, false if not.", "source": "github-repos"}
{"code": "def iaf_hparams(hidden_size=512, filter_size=4096):\n  \n  hparams = common_hparams.basic_params1()\n\n  \n  hparams.hidden_size = hidden_size\n  hparams.add_hparam(\"attention_key_channels\", None)\n  hparams.add_hparam(\"attention_value_channels\", None)\n  hparams.add_hparam(\"num_heads\", 4)\n  hparams.add_hparam(\"attention_dropout\", 0.1)\n  hparams.add_hparam(\"shared_rel\", False)\n  hparams.add_hparam(\"block_width\", 1)\n  hparams.add_hparam(\"block_length\", 1)\n  hparams.add_hparam(\"q_filter_width\", 1)\n  hparams.add_hparam(\"kv_filter_width\", 1)\n\n  \n  hparams.layer_preprocess_sequence = \"n\"\n  hparams.layer_prepostprocess_dropout = 0.1\n  hparams.norm_type = \"layer\"\n  hparams.norm_epsilon = 1e-06\n  hparams.layer_prepostprocess_dropout_broadcast_dims = \"\"\n  hparams.layer_postprocess_sequence = \"da\"\n\n  \n  hparams.add_hparam(\"filter_size\", filter_size)\n  hparams.add_hparam(\"ffn_layer\", \"conv_hidden_relu\")\n  hparams.add_hparam(\"relu_dropout\", 0.1)\n  return hparams", "docstring": "Create hyperpameters for inverse autoregressive flows.\n\nArgs:\nhidden_size: Width of attention layers and neural network output layer.\nfilter_size: Hidden layer width for neural network.\n\nReturns:\nhparams: Hyperpameters with basic presets for inverse autoregressive flows.", "source": "juraj-google-style"}
{"code": "def setup_logging(verbosity, formats=None):\n    \n    if formats is None:\n        formats = {}\n\n    log_level = logging.INFO\n\n    log_format = formats.get(\"info\", INFO_FORMAT)\n\n    if sys.stdout.isatty():\n        log_format = formats.get(\"color\", COLOR_FORMAT)\n\n    if verbosity > 0:\n        log_level = logging.DEBUG\n        log_format = formats.get(\"debug\", DEBUG_FORMAT)\n\n    if verbosity < 2:\n        logging.getLogger(\"botocore\").setLevel(logging.CRITICAL)\n\n    hdlr = logging.StreamHandler()\n    hdlr.setFormatter(ColorFormatter(log_format, ISO_8601))\n    logging.root.addHandler(hdlr)\n    logging.root.setLevel(log_level)", "docstring": "Configure a proper logger based on verbosity and optional log formats.\n\nArgs:\nverbosity (int): 0, 1, 2\nformats (dict): Optional, looks for `info`, `color`, and `debug` keys\nwhich may override the associated default log formats.", "source": "juraj-google-style"}
{"code": "def metadata_extractor(self):\n    if (not hasattr(self, '_local_file')):\n        raise AttributeError('local_file attribute must be set before calling metadata_extractor')\n    if (not hasattr(self, '_metadata_extractor')):\n        if self.local_file.endswith('.whl'):\n            logger.info('Getting metadata from wheel using WheelMetadataExtractor.')\n            extractor_cls = metadata_extractors.WheelMetadataExtractor\n        else:\n            logger.info('Getting metadata from setup.py using SetupPyMetadataExtractor.')\n            extractor_cls = metadata_extractors.SetupPyMetadataExtractor\n        base_python_version = (self.base_python_version or self.template_base_py_ver)\n        self._metadata_extractor = extractor_cls(self.local_file, self.name, self.name_convertor, self.version, self.rpm_name, self.venv, base_python_version)\n    return self._metadata_extractor", "docstring": "Returns an instance of proper MetadataExtractor subclass.\nAlways returns the same instance.\n\nReturns:\nThe proper MetadataExtractor subclass according to local file\nsuffix.", "source": "codesearchnet"}
{"code": "def cmd2(command, shell=False, detatch=False, verbose=False, verbout=None):\n    import shlex\n    if isinstance(command, (list, tuple)):\n        raise ValueError('command tuple not supported yet')\n    args = shlex.split(command, posix=(not WIN32))\n    if (verbose is True):\n        verbose = 2\n    if (verbout is None):\n        verbout = (verbose >= 1)\n    if (verbose >= 2):\n        print('+=== START CMD2 ===')\n        print('Command:')\n        print(command)\n        if verbout:\n            print('----')\n            print('Stdout:')\n    proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=shell, universal_newlines=True)\n    if detatch:\n        info = {'proc': proc}\n    else:\n        write_fn = sys.stdout.write\n        flush_fn = sys.stdout.flush\n        logged_out = []\n        for line in _run_process(proc):\n            line_ = (line if six.PY2 else line)\n            if (len(line_) > 0):\n                if verbout:\n                    write_fn(line_)\n                    flush_fn()\n                logged_out.append(line)\n        try:\n            from utool import util_str\n            out = ''.join(logged_out)\n        except UnicodeDecodeError:\n            from utool import util_str\n            logged_out = util_str.ensure_unicode_strlist(logged_out)\n            out = ''.join(logged_out)\n        (out_, err) = proc.communicate()\n        ret = proc.wait()\n        info = {'out': out, 'err': err, 'ret': ret}\n    if (verbose >= 2):\n        print('L___ END CMD2 ___')\n    return info", "docstring": "Trying to clean up cmd\n\nArgs:\ncommand (str): string command\nshell (bool): if True, process is run in shell\ndetatch (bool): if True, process is run in background\nverbose (int): verbosity mode\nverbout (bool): if True, `command` writes to stdout in realtime.\ndefaults to True iff verbose > 0\n\nReturns:\ndict: info - information about command status", "source": "codesearchnet"}
{"code": "def GetVSSStoreIdentifiers(self, volume_system, volume_identifiers):\n    \n    print_header = True\n    while True:\n      if print_header:\n        self._PrintVSSStoreIdentifiersOverview(\n            volume_system, volume_identifiers)\n\n        print_header = False\n\n      self._output_writer.Write('\\n')\n\n      lines = self._textwrapper.wrap(self._USER_PROMPT_VSS)\n      self._output_writer.Write('\\n'.join(lines))\n      self._output_writer.Write('\\n\\nVSS identifier(s): ')\n\n      try:\n        selected_volumes = self._ReadSelectedVolumes(\n            volume_system, prefix='vss')\n        if (not selected_volumes or\n            not set(selected_volumes).difference(volume_identifiers)):\n          break\n      except ValueError:\n        pass\n\n      self._output_writer.Write('\\n')\n\n      lines = self._textwrapper.wrap(\n          'Unsupported VSS identifier(s), please try again or abort with '\n          'Ctrl^C.')\n      self._output_writer.Write('\\n'.join(lines))\n      self._output_writer.Write('\\n\\n')\n\n    return selected_volumes", "docstring": "Retrieves VSS store identifiers.\n\nThis method can be used to prompt the user to provide VSS store identifiers.\n\nArgs:\nvolume_system (VShadowVolumeSystem): volume system.\nvolume_identifiers (list[str]): volume identifiers including prefix.\n\nReturns:\nlist[str]: selected volume identifiers including prefix or None.", "source": "juraj-google-style"}
{"code": "def gen_public_api(output_dir: str, output_package: str, root_init_template: str, api_version: int, compat_api_versions: Sequence[int], compat_init_templates: Sequence[str], use_lazy_loading: bool, file_prefixes_to_strip: Sequence[str], mapping_files: Sequence[str], packages_to_ignore: Sequence[str], module_prefix: str, root_file_name: str, output_files: Set[str]):\n    public_api = get_public_api(mapping_files, file_prefixes_to_strip, packages_to_ignore, output_package, module_prefix)\n    root_entrypoints_by_module = public_api.v2_entrypoints_by_module\n    root_generated_imports_by_module = public_api.v2_generated_imports_by_module\n    if api_version == 1:\n        root_entrypoints_by_module = public_api.v1_entrypoints_by_module\n        root_generated_imports_by_module = public_api.v1_generated_imports_by_module\n    for compat_version in compat_api_versions:\n        compat_package = f'{output_package}.compat'\n        compat_version_package = f'{compat_package}.v{compat_version}'\n        public_api.v2_generated_imports_by_module[compat_package].add(compat_version_package)\n        public_api.v1_generated_imports_by_module[compat_package].add(compat_version_package)\n    _gen_init_files(output_dir, output_package, api_version, root_entrypoints_by_module, root_generated_imports_by_module, public_api.docs_by_module, root_init_template, file_prefixes_to_strip, use_lazy_loading, module_prefix, output_files, root_file_name=root_file_name)\n    for compat_index, compat_version in enumerate(compat_api_versions):\n        compat_output_dir = os.path.join(output_dir, 'compat', f'v{compat_version}')\n        os.makedirs(compat_output_dir, exist_ok=True)\n        compat_version = int(compat_version)\n        compat_entrypoints_by_module = public_api.v2_entrypoints_by_module\n        compat_generated_imports_by_module = public_api.v2_generated_imports_by_module\n        if compat_version == 1:\n            compat_entrypoints_by_module = public_api.v1_entrypoints_by_module\n            compat_generated_imports_by_module = public_api.v1_generated_imports_by_module\n        _gen_init_files(compat_output_dir, output_package, compat_version, compat_entrypoints_by_module, compat_generated_imports_by_module, public_api.docs_by_module, compat_init_templates[compat_index] if compat_init_templates else '', file_prefixes_to_strip, use_lazy_loading, module_prefix, output_files, subpackage_rewrite=f'{output_package}.compat.v{compat_version}')\n        for nested_compat_index, nested_compat_version in enumerate(compat_api_versions):\n            nested_compat_version = int(nested_compat_version)\n            nested_compat_output_dir = os.path.join(compat_output_dir, 'compat', f'v{nested_compat_version}')\n            nested_compat_entrypoints_by_module = public_api.v2_entrypoints_by_module\n            nested_compat_generated_imports_by_module = public_api.v2_generated_imports_by_module\n            if nested_compat_version == 1:\n                nested_compat_entrypoints_by_module = public_api.v1_entrypoints_by_module\n                nested_compat_generated_imports_by_module = public_api.v1_generated_imports_by_module\n            os.makedirs(nested_compat_output_dir, exist_ok=True)\n            gen_nested_compat_files(nested_compat_output_dir, output_package, nested_compat_version, nested_compat_entrypoints_by_module, nested_compat_generated_imports_by_module, public_api.docs_by_module, compat_init_templates[nested_compat_index] if compat_init_templates else '', file_prefixes_to_strip, use_lazy_loading, compat_api_versions, module_prefix, output_files)", "docstring": "Generates the public API for tensorflow.\n\nArgs:\noutput_dir: The directory to output the files to.\noutput_package: The package to use for the imports.\nroot_init_template: The template for the root init file.\napi_version: The version of the API to generate.\ncompat_api_versions: The versions of the compat APIs to generate.\ncompat_init_templates: The templates for the compat init files.\nuse_lazy_loading: Whether to use lazy loading or not.\nfile_prefixes_to_strip: The prefixes to strip from the file names of the\nimports.\nmapping_files: The mapping files created by the API Extractor.\npackages_to_ignore: A list of python packages that should be ignored when\nsearching for tf_exports.\nmodule_prefix: A prefix to add to the non-generated imports.\nroot_file_name: The file name that should be generated for the top level\nAPI.\noutput_files: List of files expected to generate.", "source": "github-repos"}
{"code": "def get_info(ads):\n    infos = []\n    for ad in ads:\n        device_info = ad.device_info\n        user_added_info = {k: str(v) for k, v in device_info['user_added_info'].items()}\n        device_info['user_added_info'] = user_added_info\n        infos.append(device_info)\n    return infos", "docstring": "Get information on a list of AndroidDevice objects.\n\nArgs:\nads: A list of AndroidDevice objects.\n\nReturns:\nA list of dict, each representing info for an AndroidDevice objects.\nEverything in this dict should be yaml serializable.", "source": "github-repos"}
{"code": "def spawn_program(self, name, arguments=[], timeout=30, exclusive=False):\n    logger.debug('Spawning program for interaction ...')\n    if exclusive:\n        kill_longrunning(self.config)\n    return RunningProgram(self, name, arguments, timeout)", "docstring": "Spawns a program in the working directory.\n\nThis method allows the interaction with the running program,\nbased on the returned RunningProgram object.\n\nArgs:\nname (str):        The name of the program to be executed.\narguments (tuple): Command-line arguments for the program.\ntimeout (int):     The timeout for execution.\nexclusive (bool):  Prevent parallel validation runs on the\ntest machines, e.g. when doing performance\nmeasurements for submitted code.\n\nReturns:\nRunningProgram: An object representing the running program.", "source": "codesearchnet"}
{"code": "def IsWalletTransaction(self, tx):\n    for (key, contract) in self._contracts.items():\n        for output in tx.outputs:\n            if (output.ScriptHash.ToBytes() == contract.ScriptHash.ToBytes()):\n                return True\n        for script in tx.scripts:\n            if script.VerificationScript:\n                if (bytes(contract.Script) == script.VerificationScript):\n                    return True\n    for watch_script_hash in self._watch_only:\n        for output in tx.outputs:\n            if (output.ScriptHash == watch_script_hash):\n                return True\n        for script in tx.scripts:\n            if (Crypto.ToScriptHash(script.VerificationScript, unhex=False) == watch_script_hash):\n                return True\n    return False", "docstring": "Verifies if a transaction belongs to the wallet.\n\nArgs:\ntx (TransactionOutput):an instance of type neo.Core.TX.Transaction.TransactionOutput to verify.\n\nReturns:\nbool: True, if transaction belongs to wallet. False, if not.", "source": "codesearchnet"}
{"code": "def testSaveAndLoadSingleVariable(self, shard_config):\n    strategy = self._create_strategy(shard_config[0])\n    with strategy.scope():\n        var = variables_lib.Variable([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])\n    model_dir = self.get_temp_dir()\n    save.save(var, model_dir)\n    strategy2 = self._create_strategy(shard_config[1])\n    with strategy2.scope():\n        loaded = load.load(model_dir)\n    if shard_config[1] > 1:\n        loaded = array_ops.concat(loaded.variables, axis=0)\n    self.assertLen(loaded.numpy(), 6)\n    if shard_config[0] > 1:\n        var = array_ops.concat(var.variables, axis=0)\n    self.assertAllClose(var.numpy(), loaded.numpy())", "docstring": "Test saving and loading ShardedVariable with different numbers of shards.\n\nLoading tf.Variables into multiple Shards is not yet supported\n\nArgs:\nshard_config: The number of shards to use before and after loading. For\nexample, [2, 1] means to create and save the variable with 2 shards and\nload it into 1 shard (i.e., a regular tf.Variable).", "source": "github-repos"}
{"code": "def _buffer_incomplete_responses(raw_output, buf):\n    if raw_output:\n        if buf:\n            raw_output = b''.join([buf, raw_output])\n            buf = None\n        if (b'\\n' not in raw_output):\n            buf = raw_output\n            raw_output = None\n        elif (not raw_output.endswith(b'\\n')):\n            remainder_offset = (raw_output.rindex(b'\\n') + 1)\n            buf = raw_output[remainder_offset:]\n            raw_output = raw_output[:remainder_offset]\n    return (raw_output, buf)", "docstring": "It is possible for some of gdb's output to be read before it completely finished its response.\nIn that case, a partial mi response was read, which cannot be parsed into structured data.\nWe want to ALWAYS parse complete mi records. To do this, we store a buffer of gdb's\noutput if the output did not end in a newline.\n\nArgs:\nraw_output: Contents of the gdb mi output\nbuf (str): Buffered gdb response from the past. This is incomplete and needs to be prepended to\ngdb's next output.\n\nReturns:\n(raw_output, buf)", "source": "codesearchnet"}
{"code": "def activate_vacation(self, endtime: datetime, temperature: float):\n        \n        data = {\n            \"endtime\": endtime.strftime(\"%Y_%m_%d %H:%M\"),\n            \"temperature\": temperature,\n        }\n        return self._restCall(\"home/heating/activateVacation\", json.dumps(data))", "docstring": "activates the vatation mode until the given time\n\nArgs:\nendtime(datetime): the time when the vatation mode should automatically be disabled\ntemperature(float): the settemperature during the vacation mode", "source": "juraj-google-style"}
{"code": "def Key(self):\n    return getattr(self, self._KEY)", "docstring": "Return unique identifier for this MapEntry object.\n\nReturns:\nA str which contains the name of the attribute to be used as an index\nvalue for a maps.MapEntry instance in a maps.Map.", "source": "github-repos"}
{"code": "def WriteBlobsWithUnknownHashes(self, blobs_data):\n    blobs_ids = [rdf_objects.BlobID.FromBlobData(d) for d in blobs_data]\n    self.WriteBlobs(dict(zip(blobs_ids, blobs_data)))\n    return blobs_ids", "docstring": "Calculates hash ids and writes contents of given data blobs.\n\nArgs:\nblobs_data: An iterable of bytes.\n\nReturns:\nA list of rdf_objects.BlobID objects with each blob id corresponding\nto an element in the original blobs_data argument.", "source": "codesearchnet"}
{"code": "def encode_structure(nested_structure):\n    return _map_structure(nested_structure, _get_encoders())", "docstring": "Encodes nested structures composed of encodable types into a proto.\n\nArgs:\nnested_structure: Structure to encode.\n\nReturns:\nEncoded proto.\n\nRaises:\nNotEncodableError: For values for which there are no encoders.", "source": "github-repos"}
{"code": "def has_nrows(\n    state,\n    incorrect_msg=\"Your query returned a table with {{n_stu}} row{{'s' if n_stu > 1 else ''}} while it should return a table with {{n_sol}} row{{'s' if n_sol > 1 else ''}}.\",\n):\n    \n\n    \n    has_result(state)\n\n    \n    n_stu = len(next(iter(state.student_result.values())))\n    n_sol = len(next(iter(state.solution_result.values())))\n\n    if n_stu != n_sol:\n        _msg = state.build_message(\n            incorrect_msg, fmt_kwargs={\"n_stu\": n_stu, \"n_sol\": n_sol}\n        )\n        state.do_test(_msg)\n\n    return state", "docstring": "Test whether the student and solution query results have equal numbers of rows.\n\nArgs:\nincorrect_msg: If specified, this overrides the automatically generated feedback message\nin case the number of rows in the student and solution query don't match.", "source": "juraj-google-style"}
{"code": "def delete(self, dash_id):\n    removed_info = dict(time_modified=r_db.zscore(config.DASH_ID_KEY, dash_id), meta=r_db.hget(config.DASH_META_KEY, dash_id), content=r_db.hget(config.DASH_CONTENT_KEY, dash_id))\n    r_db.zrem(config.DASH_ID_KEY, dash_id)\n    r_db.hdel(config.DASH_META_KEY, dash_id)\n    r_db.hdel(config.DASH_CONTENT_KEY, dash_id)\n    return {'removed_info': removed_info}", "docstring": "Delete a dash meta and content, return updated dash content.\n\nActually, just remove it to a specfied place in database.\n\nArgs:\ndash_id: dashboard id.\n\nReturns:\nRedirect to home page.", "source": "codesearchnet"}
{"code": "def insert_arguments_into_sql_query(compilation_result, arguments):\n    \n    if compilation_result.language != SQL_LANGUAGE:\n        raise AssertionError(u'Unexpected query output language: {}'.format(compilation_result))\n    base_query = compilation_result.query\n    return base_query.params(**arguments)", "docstring": "Insert the arguments into the compiled SQL query to form a complete query.\n\nArgs:\ncompilation_result: CompilationResult, compilation result from the GraphQL compiler.\narguments: Dict[str, Any], parameter name -> value, for every parameter the query expects.\n\nReturns:\nSQLAlchemy Selectable, a executable SQL query with parameters bound.", "source": "juraj-google-style"}
{"code": "def no_company_with_insufficient_companies_house_data(value):\n    for (prefix, name) in company_types_with_insufficient_companies_house_data:\n        if value.upper().startswith(prefix):\n            raise ValidationError(MESSAGE_INSUFFICIENT_DATA, params={'name': name})", "docstring": "Confirms that the company number is not for for a company that\nCompanies House does not hold information on.\n\nArgs:\nvalue (string): The company number to check.\n\nRaises:\ndjango.forms.ValidationError", "source": "codesearchnet"}
{"code": "def universal_transformer_layer(x, hparams, ffn_unit, attention_unit, pad_remover=None):\n\n    def add_vanilla_transformer_layer(x, num_layers, name):\n        'Passes the input through num_layers of vanilla transformer layers.\\n\\n    Args:\\n     x: input\\n     num_layers: number of layers\\n     name: string, prefix of layer names\\n\\n    Returns:\\n       output of vanilla_transformer_layer\\n    '\n        if hparams.add_position_timing_signal:\n            x = common_attention.add_timing_signal_1d(x)\n        for layer in range(num_layers):\n            with tf.variable_scope((name + ('layer_%d' % layer))):\n                x = ffn_unit(attention_unit(x))\n        return x\n    with tf.variable_scope(('universal_transformer_%s' % hparams.recurrence_type)):\n        if (hparams.mix_with_transformer and ('before_ut' in hparams.mix_with_transformer)):\n            x = add_vanilla_transformer_layer(x, hparams.num_mixedin_layers, 'before_ut_')\n        if (hparams.recurrence_type == 'act'):\n            (output, extra_output) = universal_transformer_act(x, hparams, ffn_unit, attention_unit)\n        else:\n            (ut_function, initializer) = get_ut_layer(x, hparams, ffn_unit, attention_unit, pad_remover)\n            (output, _, extra_output) = tf.foldl(ut_function, tf.range(hparams.num_rec_steps), initializer=initializer)\n            if ((hparams.recurrence_type == 'lstm') and hparams.get('use_memory_as_final_state', False)):\n                output = extra_output\n        if (hparams.mix_with_transformer and ('after_ut' in hparams.mix_with_transformer)):\n            output = add_vanilla_transformer_layer(output, hparams.num_mixedin_layers, 'after_ut_')\n        return (output, extra_output)", "docstring": "Core function applying the universal transformer layer.\n\nArgs:\nx: input\nhparams: model hyper-parameters\nffn_unit: feed-forward unit\nattention_unit: multi-head attention unit\npad_remover: to mask out padding in convolutional layers (efficiency).\n\nReturns:\nthe output tensor,  extra output (can be memory, ponder time, etc.)\n\nRaises:\nValueError: Unknown recurrence type", "source": "codesearchnet"}
{"code": "def process_request(self, request, credential=None):\n    self._client_identity = [None, None]\n    header = request.request_header\n    self._set_protocol_version(header.protocol_version)\n    max_response_size = None\n    if header.maximum_response_size:\n        max_response_size = header.maximum_response_size.value\n    now = int(time.time())\n    if header.time_stamp:\n        then = header.time_stamp.value\n        if ((now >= then) and ((now - then) < 60)):\n            self._logger.info('Received request at time: {0}'.format(time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(then))))\n        elif (now < then):\n            self._logger.warning('Received request with future timestamp. Received timestamp: {0}, Current timestamp: {1}'.format(then, now))\n            raise exceptions.InvalidMessage('Future request rejected by server.')\n        else:\n            self._logger.warning('Received request with old timestamp. Possible replay attack. Received timestamp: {0}, Current timestamp: {1}'.format(then, now))\n            raise exceptions.InvalidMessage('Stale request rejected by server.')\n    else:\n        self._logger.info('Received request at time: {0}'.format(time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(now))))\n    self.is_asynchronous = False\n    if (header.asynchronous_indicator is not None):\n        self.is_asynchronous = header.asynchronous_indicator.value\n    if self.is_asynchronous:\n        raise exceptions.InvalidMessage('Asynchronous operations are not supported.')\n    if header.authentication:\n        if header.authentication.credentials:\n            auth_credentials = header.authentication.credentials[0]\n        else:\n            auth_credentials = None\n    else:\n        auth_credentials = None\n    self._verify_credential(auth_credentials, credential)\n    batch_error_option = enums.BatchErrorContinuationOption.STOP\n    if (header.batch_error_cont_option is not None):\n        batch_error_option = header.batch_error_cont_option.value\n    if (batch_error_option == enums.BatchErrorContinuationOption.UNDO):\n        raise exceptions.InvalidMessage('Undo option for batch handling is not supported.')\n    batch_order_option = False\n    if header.batch_order_option:\n        batch_order_option = header.batch_order_option.value\n    response_batch = self._process_batch(request.batch_items, batch_error_option, batch_order_option)\n    response = self._build_response(header.protocol_version, response_batch)\n    return (response, max_response_size, header.protocol_version)", "docstring": "Process a KMIP request message.\n\nThis routine is the main driver of the KmipEngine. It breaks apart and\nprocesses the request header, handles any message errors that may\nresult, and then passes the set of request batch items on for\nprocessing. This routine is thread-safe, allowing multiple client\nconnections to use the same KmipEngine.\n\nArgs:\nrequest (RequestMessage): The request message containing the batch\nitems to be processed.\ncredential (string): Identifying information about the client\nobtained from the client certificate. Optional, defaults to\nNone.\n\nReturns:\nResponseMessage: The response containing all of the results from\nthe request batch items.", "source": "codesearchnet"}
{"code": "def latest_file(path_name, keyword='', ext='', **kwargs) -> str:\n    \n    files = all_files(\n        path_name=path_name, keyword=keyword, ext=ext, full_path=True\n    )\n\n    if not files:\n        from xbbg.io import logs\n\n        logger = logs.get_logger(latest_file, level=kwargs.pop('log', 'warning'))\n        logger.debug(f'file is not found in folder: {path_name}')\n        return ''\n\n    modified_time = [os.path.getmtime(f) for f in files]\n    files = [f for (dt, f) in sorted(zip(modified_time, files))]\n\n    return files[-1]", "docstring": "Latest modified file in folder\n\nArgs:\npath_name: full path name\nkeyword: keyword to search\next: file extension\n\nReturns:\nstr: latest file name", "source": "juraj-google-style"}
{"code": "def compare(self, value, expectation, regex_expr=False):\n    return compare(value, expectation, regex_expr=regex_expr)", "docstring": "Compares two values with regular expression matching support.\n\nArguments:\nvalue (mixed): value to compare.\nexpectation (mixed): value to match.\nregex_expr (bool, optional): enables string based regex matching.\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def _create_inbound_stream(self, config=None):\n    if (config is None):\n        raise ValueError('No stream config to create stream from.')\n    name = self._get_stream_name(config)\n    stream_handlers = self._get_stream_handlers(config, name)\n    stream_input = config.get('input', None)\n    if (stream_input is None):\n        raise cfg.AitConfigMissing(\"inbound stream {}'s input\".format(name))\n    if (type(stream_input[0]) is int):\n        return PortInputStream(name, stream_input, stream_handlers, zmq_args={'zmq_context': self.broker.context, 'zmq_proxy_xsub_url': self.broker.XSUB_URL, 'zmq_proxy_xpub_url': self.broker.XPUB_URL})\n    else:\n        return ZMQStream(name, stream_input, stream_handlers, zmq_args={'zmq_context': self.broker.context, 'zmq_proxy_xsub_url': self.broker.XSUB_URL, 'zmq_proxy_xpub_url': self.broker.XPUB_URL})", "docstring": "Creates an inbound stream from its config.\n\nParams:\nconfig:       stream configuration as read by ait.config\nReturns:\nstream:       a Stream\nRaises:\nValueError:   if any of the required config values are missing", "source": "codesearchnet"}
{"code": "def failure_message(description, options):\n    \n\n    message = \"expected to find {}\".format(description)\n\n    if options[\"count\"] is not None:\n        message += \" {count} {times}\".format(\n            count=options[\"count\"],\n            times=declension(\"time\", \"times\", options[\"count\"]))\n    elif options[\"between\"] is not None:\n        between = options[\"between\"]\n        if between:\n            first, last = between[0], between[-1]\n        else:\n            first, last = None, None\n\n        message += \" between {first} and {last} times\".format(\n            first=first,\n            last=last)\n    elif options[\"maximum\"] is not None:\n        message += \" at most {maximum} {times}\".format(\n            maximum=options[\"maximum\"],\n            times=declension(\"time\", \"times\", options[\"maximum\"]))\n    elif options[\"minimum\"] is not None:\n        message += \" at least {minimum} {times}\".format(\n            minimum=options[\"minimum\"],\n            times=declension(\"time\", \"times\", options[\"minimum\"]))\n\n    return message", "docstring": "Returns a expectation failure message for the given query description.\n\nArgs:\ndescription (str): A description of the failed query.\noptions (Dict[str, Any]): The query options.\n\nReturns:\nstr: A message describing the failure.", "source": "juraj-google-style"}
{"code": "def to_dataframe(self, start_row=0, max_rows=None):\n    \n    fetcher = self._get_row_fetcher(start_row=start_row,\n                                    max_rows=max_rows,\n                                    page_size=self._MAX_PAGE_SIZE)\n    count = 0\n    page_token = None\n\n    \n    \n    df_list = []\n    df = None\n\n    while True:\n      page_rows, page_token = fetcher(page_token, count)\n      if len(page_rows):\n        count += len(page_rows)\n        df_list.append(pandas.DataFrame.from_records(page_rows))\n      if not page_token:\n        break\n    if df_list:\n      df = pandas.concat(df_list, ignore_index=True, copy=False)\n\n    \n    ordered_fields = [field.name for field in self.schema]\n    return df[ordered_fields] if df is not None else pandas.DataFrame()", "docstring": "Exports the table to a Pandas dataframe.\n\nArgs:\nstart_row: the row of the table at which to start the export (default 0)\nmax_rows: an upper limit on the number of rows to export (default None)\nReturns:\nA Pandas dataframe containing the table data.", "source": "juraj-google-style"}
{"code": "def copy_remote_file(web_file, destination):\n    \n    size = 0\n    dir_name = os.path.dirname(destination)\n    if not os.path.exists(dir_name):\n        os.makedirs(dir_name)\n\n    with open(destination, 'wb') as file_:\n        chunk_size = 8 * 1024\n        for chunk in web_file.iter_content(chunk_size=chunk_size):\n            if chunk:\n                file_.write(chunk)\n                size += len(chunk)\n    return size", "docstring": "Check if exist the destination path, and copy the online resource\nfile to local.\n\nArgs:\n:web_file: reference to online file resource to take.\n:destination: path to store the file.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.CompleteQuery = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.Completion/CompleteQuery\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_completion__service__pb2.CompleteQueryRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_completion__service__pb2.CompleteQueryResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def _FormatUsername(self, event):\n    \n    username = self._output_mediator.GetUsername(event)\n    return self._FormatField(username)", "docstring": "Formats the username.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: formatted username field.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.GetRequiredPlugins = channel.unary_unary(\n        '/pulumirpc.LanguageRuntime/GetRequiredPlugins',\n        request_serializer=language__pb2.GetRequiredPluginsRequest.SerializeToString,\n        response_deserializer=language__pb2.GetRequiredPluginsResponse.FromString,\n        )\n    self.Run = channel.unary_unary(\n        '/pulumirpc.LanguageRuntime/Run',\n        request_serializer=language__pb2.RunRequest.SerializeToString,\n        response_deserializer=language__pb2.RunResponse.FromString,\n        )\n    self.GetPluginInfo = channel.unary_unary(\n        '/pulumirpc.LanguageRuntime/GetPluginInfo',\n        request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n        response_deserializer=plugin__pb2.PluginInfo.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def NCHWToNHWC(input_tensor: Union[tensor_lib.Tensor, list[int]]) -> Union[tensor_lib.Tensor, list[int]]:\n    new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}\n    if isinstance(input_tensor, tensor_lib.Tensor):\n        ndims = input_tensor.shape.ndims\n        return array_ops.transpose(input_tensor, new_axes[ndims])\n    else:\n        ndims = len(input_tensor)\n        return [input_tensor[a] for a in new_axes[ndims]]", "docstring": "Converts the input from the NCHW format to NHWC.\n\nArgs:\ninput_tensor: a 4- or 5-D tensor, or an array representing shape\n\nReturns:\nconverted tensor or shape array", "source": "github-repos"}
{"code": "def untar(file_path, extract_folder=None):\n    \n    file_path = Path(file_path)\n    if extract_folder is None:\n        extract_folder = file_path.parent\n    extract_folder = Path(extract_folder)\n    tar = tarfile.open(file_path)\n    tar.extractall(extract_folder)\n    tar.close()", "docstring": "Simple tar archive extractor\n\nArgs:\nfile_path: path to the tar file to be extracted\nextract_folder: folder to which the files will be extracted", "source": "juraj-google-style"}
{"code": "def _get_native_delegate_pointer(self):\n    return self._delegate_ptr", "docstring": "Returns the native TfLiteDelegate pointer.\n\nIt is not safe to copy this pointer because it needs to be freed.\n\nReturns:\nTfLiteDelegate *", "source": "github-repos"}
{"code": "def make_serializable(json):\n    \n    new_dict = dict()\n    for key, value in iteritems(json):\n        if is_valid_json(value):\n            new_dict[key] = value\n\n    return new_dict", "docstring": "This function ensures that the dictionary is JSON serializable. If not,\nkeys with non-serializable values are removed from the return value.\n\nArgs:\njson (dict): Dictionary to convert to serializable\n\nReturns:\nnew_dict (dict): New dictionary with non JSON serializable values removed", "source": "juraj-google-style"}
{"code": "def resolve_type(self, name: str | pytd_node.Node) -> pytd.Type:\n    if isinstance(name, (pytd.GenericType, pytd.AnythingType)):\n        return name\n    if isinstance(name, pytd.NamedType):\n        name = name.name\n    assert isinstance(name, str), f'Expected str, got {name}'\n    if name == 'nothing':\n        return pytd.NothingType()\n    base_type = self.type_map.get(name)\n    if base_type is None:\n        module, dot, tail = name.partition('.')\n        full_name = self.module_path_map.get(module, module) + dot + tail\n        base_type = pytd.NamedType(full_name)\n    return base_type", "docstring": "Return the fully resolved name for an alias.\n\nArgs:\nname: The name of the type or alias.\n\nReturns:\nA pytd.NamedType with the fully resolved and qualified name.", "source": "github-repos"}
{"code": "def _handle_changed_fields(self, old_data):\n        \n        for link in self.get_links(is_set=False):\n            fld_id = un_camel_id(link['field'])\n            if not old_data or old_data.get(fld_id) != self._data[fld_id]:\n                \n                if self._data[fld_id]:  \n                    linked_mdl = getattr(self, link['field'])\n                    self._add_back_link(linked_mdl, link)", "docstring": "Looks for changed relation fields between new and old data (before/after save).\nCreates back_link references for updated fields.\n\nArgs:\nold_data: Object's data before save.", "source": "juraj-google-style"}
{"code": "def GetDisplayName(self, file_entry=None):\n    if (file_entry is None):\n        file_entry = self._file_entry\n    if (file_entry is None):\n        raise ValueError('Missing file entry')\n    path_spec = getattr(file_entry, 'path_spec', None)\n    relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(path_spec, mount_path=self._mount_path)\n    if (not relative_path):\n        return file_entry.name\n    return self.GetDisplayNameForPathSpec(path_spec)", "docstring": "Retrieves the display name for a file entry.\n\nArgs:\nfile_entry (Optional[dfvfs.FileEntry]): file entry object, where None\nwill return the display name of self._file_entry.\n\nReturns:\nstr: human readable string that describes the path to the file entry.\n\nRaises:\nValueError: if the file entry is missing.", "source": "codesearchnet"}
{"code": "def _visualize(self, x_label, y_labels, ticks, overlay, draw, annotate, width=6, height=4):\n    for label in y_labels:\n        if (not all((isinstance(x, numbers.Real) for x in self[label]))):\n            raise ValueError(\"The column '{0}' contains non-numerical values. A plot cannot be drawn for this column.\".format(label))\n    n = len(y_labels)\n    colors = list(itertools.islice(itertools.cycle(self.chart_colors), n))\n    if (overlay and (n > 1)):\n        (_, axis) = plt.subplots(figsize=(width, height))\n        if (x_label is not None):\n            axis.set_xlabel(x_label)\n        for (label, color) in zip(y_labels, colors):\n            draw(axis, label, color)\n        if (ticks is not None):\n            annotate(axis, ticks)\n        axis.legend(y_labels, loc=2, bbox_to_anchor=(1.05, 1))\n        type(self).plots.append(axis)\n    else:\n        (fig, axes) = plt.subplots(n, 1, figsize=(width, (height * n)))\n        if (not isinstance(axes, collections.Iterable)):\n            axes = [axes]\n        for (axis, y_label, color) in zip(axes, y_labels, colors):\n            draw(axis, y_label, color)\n            axis.set_ylabel(y_label, fontsize=16)\n            if (x_label is not None):\n                axis.set_xlabel(x_label, fontsize=16)\n            if (ticks is not None):\n                annotate(axis, ticks)\n            type(self).plots.append(axis)", "docstring": "Generic visualization that overlays or separates the draw function.\n\nRaises:\nValueError: The Table contains non-numerical values in columns\nother than `column_for_categories`", "source": "codesearchnet"}
{"code": "def expandEntitiesFromEmail(e):\n    email = {}\n    email['type'] = 'i3visio.email'\n    email['value'] = e\n    email['attributes'] = []\n    alias = {}\n    alias['type'] = 'i3visio.alias'\n    alias['value'] = e.split('@')[0]\n    alias['attributes'] = []\n    domain = {}\n    domain['type'] = 'i3visio.domain'\n    domain['value'] = e.split('@')[1]\n    domain['attributes'] = []\n    return [email, alias, domain]", "docstring": "Method that receives an email an creates linked entities\n\nArgs:\n-----\ne:   Email to verify.\n\nReturns:\n--------\nThree different values: email, alias and domain in a list.", "source": "codesearchnet"}
{"code": "def ProduceExtractionWarning(self, message, path_spec=None):\n    \n    if not self._storage_writer:\n      raise RuntimeError('Storage writer not set.')\n\n    if not path_spec and self._file_entry:\n      path_spec = self._file_entry.path_spec\n\n    parser_chain = self.GetParserChain()\n    warning = warnings.ExtractionWarning(\n        message=message, parser_chain=parser_chain, path_spec=path_spec)\n    self._storage_writer.AddWarning(warning)\n    self._number_of_warnings += 1\n\n    self.last_activity_timestamp = time.time()", "docstring": "Produces an extraction warning.\n\nArgs:\nmessage (str): message of the warning.\npath_spec (Optional[dfvfs.PathSpec]): path specification, where None\nwill use the path specification of current file entry set in\nthe mediator.\n\nRaises:\nRuntimeError: when storage writer is not set.", "source": "juraj-google-style"}
{"code": "def from_config(cls, config):\n    return cls(**config)", "docstring": "Creates a quantizer from its config.\n\nThis method is the reverse of `get_config`,\ncapable of instantiating the same quantizer from the config\ndictionary.\n\nThis method is used by Keras `model_to_estimator`, saving and\nloading models to HDF5 formats, Keras model cloning, some visualization\nutilities, and exporting models to and from JSON.\n\nArgs:\nconfig: A Python dictionary, typically the output of get_config.\n\nReturns:\nA quantizer instance.", "source": "github-repos"}
{"code": "def onWith(self, evnt, func):\n        \n        self.on(evnt, func)\n        \n        \n        try:\n            yield self\n        finally:\n            self.off(evnt, func)", "docstring": "A context manager which can be used to add a callback and remove it when\nusing a ``with`` statement.\n\nArgs:\nevnt (str):         An event name\nfunc (function):    A callback function to receive event tufo", "source": "juraj-google-style"}
{"code": "def invert(self) -> Rigid:\n    rot_inv = self._rots.invert()\n    trn_inv = rot_inv.apply(self._trans)\n    return Rigid(rot_inv, -1 * trn_inv)", "docstring": "Inverts the transformation.\n\nReturns:\nThe inverse transformation.", "source": "github-repos"}
{"code": "def _add_strings_to_commastring(self, field, strings):\n        \n        \n        allstringsadded = True\n        for string in strings:\n            if not self._add_string_to_commastring(field, string):\n                allstringsadded = False\n        return allstringsadded", "docstring": "Add a list of strings to a comma separated list of strings\n\nArgs:\nfield (str): Field containing comma separated list\nstrings (List[str]): list of strings to add\n\nReturns:\nbool: True if all strings added or False if any already present.", "source": "juraj-google-style"}
{"code": "def write_record(cls, file_handle, value):\n    encoded_length = struct.pack(b'<Q', len(value))\n    file_handle.write(b''.join([encoded_length, struct.pack(b'<I', cls._masked_crc32c(encoded_length)), value, struct.pack(b'<I', cls._masked_crc32c(value))]))", "docstring": "Encode a value as a TFRecord.\n\nArgs:\nfile_handle: The file to write to.\nvalue: A bytes object representing content of the record.", "source": "github-repos"}
{"code": "def from_bytes(cls, bt):\n        \n        log.debug(\"Parsing email from bytes\")\n        if six.PY2:\n            raise MailParserEnvironmentError(\n                \"Parsing from bytes is valid only for Python 3.x version\")\n        message = email.message_from_bytes(bt)\n        return cls(message)", "docstring": "Init a new object from bytes.\n\nArgs:\nbt (bytes-like object): raw email as bytes-like object\n\nReturns:\nInstance of MailParser", "source": "juraj-google-style"}
{"code": "def add_router(self, path, router):\n    if (self.strict_router_check and (not isinstance(router, Router))):\n        raise TypeError(('Expected object of type Router, found %r' % type(router)))\n    log.info('{} Adding router {} on path {}', id(self), router, path)\n    self.middleware.add(path=path, func=router, method_mask=HTTPMethod.ALL)", "docstring": "Adds a router to the list of routers\n\nArgs:\npath (str or regex): The path on which the router binds\nrouter (growler.Router): The router which will respond to\nrequests\n\nRaises:\nTypeError: If `strict_router_check` attribute is True and\nthe router is not an instance of growler.Router.", "source": "codesearchnet"}
{"code": "def init(deb1, deb2=False):\n    global DEBUG\n    global DEBUGALL\n    DEBUG = deb1\n    DEBUGALL = deb2", "docstring": "Initialize DEBUG and DEBUGALL.\n\nAllows other modules to set DEBUG and DEBUGALL, so their\ncall to dprint or dprintx generate output.\n\nArgs:\ndeb1 (bool): value of DEBUG to set\ndeb2 (bool): optional - value of DEBUGALL to set,\ndefaults to False.", "source": "codesearchnet"}
{"code": "def round(x):\n    return math_ops.round(x)", "docstring": "Element-wise rounding to the closest integer.\n\nIn case of tie, the rounding mode used is \"half to even\".\n\nArgs:\nx: Tensor or variable.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def GetPluginObjects(cls, plugin_names):\n    plugin_objects = {}\n    for (plugin_name, plugin_class) in iter(cls._plugin_classes.items()):\n        if (plugin_name not in plugin_names):\n            continue\n        plugin_objects[plugin_name] = plugin_class()\n    return plugin_objects", "docstring": "Retrieves the plugin objects.\n\nArgs:\nplugin_names (list[str]): names of plugins that should be retrieved.\n\nReturns:\ndict[str, AnalysisPlugin]: analysis plugins per name.", "source": "codesearchnet"}
{"code": "def from_dict(event_dict):\n    \n    return SnippetEvent(\n        callback_id=event_dict['callbackId'],\n        name=event_dict['name'],\n        creation_time=event_dict['time'],\n        data=event_dict['data'])", "docstring": "Create a SnippetEvent object from a dictionary.\n\nArgs:\nevent_dict: a dictionary representing an event.\n\nReturns:\nA SnippetEvent object.", "source": "juraj-google-style"}
{"code": "def _ReadSequenceDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):\n    if is_member:\n        supported_definition_values = self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_MEMBER_DATA_TYPE\n    else:\n        supported_definition_values = self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_DATA_TYPE\n    return self._ReadElementSequenceDataTypeDefinition(definitions_registry, definition_values, data_types.SequenceDefinition, definition_name, supported_definition_values)", "docstring": "Reads a sequence data type definition.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\ndefinition_values (dict[str, object]): definition values.\ndefinition_name (str): name of the definition.\nis_member (Optional[bool]): True if the data type definition is a member\ndata type definition.\n\nReturns:\nSequenceDefinition: sequence data type definition.\n\nRaises:\nDefinitionReaderError: if the definitions values are missing or if\nthe format is incorrect.", "source": "codesearchnet"}
{"code": "def __init__(self, conf):\n    if not isinstance(conf, dict):\n        raise RuntimeError('Source constructor not passed a dictionary')\n    self.conf = conf\n    self.log = logging.getLogger(__name__)", "docstring": "Initialise the Source object.\n\nArgs:\nconf: A dictionary of key/value pairs.\n\nRaises:\nRuntimeError: object wasn't initialised with a dict", "source": "github-repos"}
{"code": "def CreateCustomizerFeed(client, feed_name):\n  \n  \n  ad_customizer_feed_service = client.GetService('AdCustomizerFeedService',\n                                                 'v201809')\n  customizer_feed = {\n      'feedName': feed_name,\n      'feedAttributes': [\n          {'type': 'STRING', 'name': 'Name'},\n          {'type': 'STRING', 'name': 'Price'},\n          {'type': 'DATE_TIME', 'name': 'Date'}\n      ]\n  }\n\n  feed_service_operation = {\n      'operator': 'ADD',\n      'operand': customizer_feed\n  }\n\n  response = ad_customizer_feed_service.mutate([feed_service_operation])\n\n  if response and 'value' in response:\n    feed = response['value'][0]\n    feed_data = {\n        'feedId': feed['feedId'],\n        'nameId': feed['feedAttributes'][0]['id'],\n        'priceId': feed['feedAttributes'][1]['id'],\n        'dateId': feed['feedAttributes'][2]['id']\n    }\n    print ('Feed with name \"%s\" and ID %s was added with:\\n'\n           '\\tName attribute ID %s and price attribute ID %s and date attribute'\n           'ID %s') % (feed['feedName'], feed['feedId'], feed_data['nameId'],\n                       feed_data['priceId'], feed_data['dateId'])\n    return feed\n  else:\n    raise errors.GoogleAdsError('No feeds were added')", "docstring": "Creates a new AdCustomizerFeed.\n\nArgs:\nclient: an AdWordsClient instance.\nfeed_name: the name for the new AdCustomizerFeed.\n\nReturns:\nThe new AdCustomizerFeed.", "source": "juraj-google-style"}
{"code": "def _merge_doc(original, to_merge):\n    if (not original):\n        return (to_merge or '')\n    if (not to_merge):\n        return (original or '')\n    sections = []\n    for name in ('usage', 'arguments', 'options'):\n        sections.append(_merge_section(_get_section(name, original), _get_section(name, to_merge)))\n    return format_usage('\\n\\n'.join((s for s in sections)).rstrip())", "docstring": "Merge two usage strings together.\n\nArgs:\noriginal: The source of headers and initial section lines.\nto_merge: The source for the additional section lines to append.\n\nReturns:\nA new usage string that contains information from both usage strings.", "source": "codesearchnet"}
{"code": "def check_file(path: str) -> str:\n    if os.path.isfile(path):\n        return path\n    else:\n        raise argparse.ArgumentTypeError(f\"'{path}' is not found.\")", "docstring": "Check if a given filepath exists or not.\n\nArgs:\npath (str): Model path\n\nRaises:\nFileNotFoundError: Raise if given path does not exist.\n\nReturns:\nstr: A model path.", "source": "github-repos"}
{"code": "def get_text_features(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> tf.Tensor:\n    text_features = self.groupvit.get_text_features(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n    return text_features", "docstring": "Returns:\ntext_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying\nthe projection layer to the pooled output of [`TFGroupViTTextModel`].\n\nExamples:\n\n```python\n>>> from transformers import CLIPTokenizer, TFGroupViTModel\n\n>>> model = TFGroupViTModel.from_pretrained(\"nvidia/groupvit-gcc-yfcc\")\n>>> tokenizer = CLIPTokenizer.from_pretrained(\"nvidia/groupvit-gcc-yfcc\")\n\n>>> inputs = tokenizer([\"a photo of a cat\", \"a photo of a dog\"], padding=True, return_tensors=\"tf\")\n>>> text_features = model.get_text_features(**inputs)\n```", "source": "github-repos"}
{"code": "def Get(self, request, global_params=None):\n    config = self.GetMethodConfig('Get')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Gets the specified table resource by table ID. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table.\n\nArgs:\nrequest: (BigqueryTablesGetRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Table) The response message.", "source": "github-repos"}
{"code": "def email(self, name, to, from_addr, subject, body, header, owner=None, **kwargs):\n    return Email(self.tcex, name, to, from_addr, subject, body, header, owner=owner, **kwargs)", "docstring": "Create the Email TI object.\n\nArgs:\nowner:\nto:\nfrom_addr:\nname:\nsubject:\nheader:\nbody:\n**kwargs:\n\nReturn:", "source": "codesearchnet"}
{"code": "def write_input(self, output_dir=\".\", make_dir_if_not_present=True):\n        \n        if make_dir_if_not_present and not os.path.exists(output_dir):\n            os.makedirs(output_dir)\n        for k, v in self.items():\n            with zopen(os.path.join(output_dir, k), \"wt\") as f:\n                f.write(v.__str__())", "docstring": "Write VASP input to a directory.\n\nArgs:\noutput_dir (str): Directory to write to. Defaults to current\ndirectory (\".\").\nmake_dir_if_not_present (bool): Create the directory if not\npresent. Defaults to True.", "source": "juraj-google-style"}
{"code": "def validate_items(self):\n    logger.debug(fmt('Validating {}', self))\n    from python_jsonschema_objects import classbuilder\n    if (self.__itemtype__ is None):\n        return\n    type_checks = self.__itemtype__\n    if (not isinstance(type_checks, (tuple, list))):\n        type_checks = ([type_checks] * len(self.data))\n    elif (len(type_checks) > len(self.data)):\n        raise ValidationError('{1} does not have sufficient elements to validate against {0}'.format(self.__itemtype__, self.data))\n    typed_elems = []\n    for (elem, typ) in zip(self.data, type_checks):\n        if isinstance(typ, dict):\n            for (param, paramval) in six.iteritems(typ):\n                validator = registry(param)\n                if (validator is not None):\n                    validator(paramval, elem, typ)\n            typed_elems.append(elem)\n        elif util.safe_issubclass(typ, classbuilder.LiteralValue):\n            val = typ(elem)\n            val.validate()\n            typed_elems.append(val)\n        elif util.safe_issubclass(typ, classbuilder.ProtocolBase):\n            if (not isinstance(elem, typ)):\n                try:\n                    if isinstance(elem, (six.string_types, six.integer_types, float)):\n                        val = typ(elem)\n                    else:\n                        val = typ(**util.coerce_for_expansion(elem))\n                except TypeError as e:\n                    raise ValidationError(\"'{0}' is not a valid value for '{1}': {2}\".format(elem, typ, e))\n            else:\n                val = elem\n            val.validate()\n            typed_elems.append(val)\n        elif util.safe_issubclass(typ, ArrayWrapper):\n            val = typ(elem)\n            val.validate()\n            typed_elems.append(val)\n        elif isinstance(typ, (classbuilder.TypeProxy, classbuilder.TypeRef)):\n            try:\n                if isinstance(elem, (six.string_types, six.integer_types, float)):\n                    val = typ(elem)\n                else:\n                    val = typ(**util.coerce_for_expansion(elem))\n            except TypeError as e:\n                raise ValidationError(\"'{0}' is not a valid value for '{1}': {2}\".format(elem, typ, e))\n            else:\n                val.validate()\n                typed_elems.append(val)\n    self._dirty = False\n    self._typed = typed_elems\n    return typed_elems", "docstring": "Validates the items in the backing array, including\nperforming type validation.\n\nSets the _typed property and clears the dirty flag as a side effect\n\nReturns:\nThe typed array", "source": "codesearchnet"}
{"code": "def remove_user_from_template(self, template_id, account_id=None, email_address=None):\n    return self._add_remove_user_template(self.TEMPLATE_REMOVE_USER_URL, template_id, account_id, email_address)", "docstring": "Removes the specified Account's access to the specified Template\n\nArgs:\n\ntemplate_id (str):      The id of the template to remove the account's access from.\n\naccount_id (str):       The id of the account to remove access from the template. The account id prevails if both account_id and email_address are provided.\n\nemail_address (str):    The email address of the account to remove access from.\n\nReturns:\nAn Template object", "source": "codesearchnet"}
{"code": "def distinct_values_of(self, field, count_deleted=False):\n        \n        solr_params = \"facet=true&facet.field=%s&rows=0\" % field\n        result = self.riak_http_search_query(self.index_name, solr_params, count_deleted)\n        facet_fields = result['facet_counts']['facet_fields'][field]\n        keys = facet_fields[0::2]\n        vals = facet_fields[1::2]\n\n        return dict(zip(keys, vals))", "docstring": "Uses riak http search query endpoint for advanced SOLR queries.\n\nArgs:\nfield (str): facet field\ncount_deleted (bool): ignore deleted or not\n\nReturns:\n(dict): pairs of field values and number of counts", "source": "juraj-google-style"}
{"code": "def get_linenumbers(functions, module, searchstr='def {}(image):\\n'):\n    lines = inspect.getsourcelines(module)[0]\n    line_numbers = {}\n    for function in functions:\n        try:\n            line_numbers[function] = (lines.index(searchstr.format(function)) + 1)\n        except ValueError:\n            print('Can not find `{}`'.format(searchstr.format(function)))\n            line_numbers[function] = 0\n    return line_numbers", "docstring": "Returns a dictionary which maps function names to line numbers.\n\nArgs:\nfunctions: a list of function names\nmodule:    the module to look the functions up\nsearchstr: the string to search for\nReturns:\nA dictionary with functions as keys and their line numbers as values.", "source": "codesearchnet"}
{"code": "def __init__(\n      self, location=None, parent=None, volume_index=None, **kwargs):\n    \n    if not parent:\n      raise ValueError('Missing parent value.')\n\n    super(APFSContainerPathSpec, self).__init__(parent=parent, **kwargs)\n    self.location = location\n    self.volume_index = volume_index", "docstring": "Initializes a path specification.\n\nNote that an APFS container path specification must have a parent.\n\nArgs:\nlocation (Optional[str]): location.\nparent (Optional[PathSpec]): parent path specification.\nvolume_index (Optional[int]): index of the volume within the container.\n\nRaises:\nValueError: when parent is not set.", "source": "juraj-google-style"}
{"code": "def extend(self, trajectory):\n    if (self.time_step != trajectory.time_step):\n        raise ValueError('Trajectory not extended: Time steps of trajectories is incompatible')\n    if ((len(self.species) != len(trajectory.species)) and (self.species != trajectory.species)):\n        raise ValueError('Trajectory not extended: species in trajectory do not match')\n    self.to_positions()\n    trajectory.to_positions()\n    self.frac_coords = np.concatenate((self.frac_coords, trajectory.frac_coords), axis=0)\n    (self.lattice, self.constant_lattice) = self._combine_attribute(self.lattice, trajectory.lattice, self.frac_coords.shape[0], trajectory.frac_coords.shape[0])\n    self.site_properties = self._combine_attribute(self.site_properties, trajectory.site_properties, self.frac_coords.shape[0], trajectory.frac_coords.shape[0])", "docstring": "Concatenate another trajectory\n\nArgs:\ntrajectory (Trajectory): Trajectory to add", "source": "codesearchnet"}
{"code": "def clone(self, name=None):\n    if (name is None):\n        name = (self.module_name + '_clone')\n    return MLP(name=name, output_sizes=self.output_sizes, activation=self.activation, activate_final=self.activate_final, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, use_bias=self.use_bias, use_dropout=self.use_dropout)", "docstring": "Creates a new MLP with the same structure.\n\nArgs:\nname: Optional string specifying the name of the new module. The default\nname is constructed by appending \"_clone\" to the original name.\n\nReturns:\nA cloned `MLP` module.", "source": "codesearchnet"}
{"code": "def getOSName(self):\n    _system = platform.system()\n    if (_system in [self.__class__.OS_WINDOWS, self.__class__.OS_MAC, self.__class__.OS_LINUX]):\n        if (_system == self.__class__.OS_LINUX):\n            _dist = platform.linux_distribution()[0]\n            if (_dist.lower() == self.__class__.OS_UBUNTU.lower()):\n                return self.__class__.OS_UBUNTU\n            elif (_dist.lower() == self.__class__.OS_DEBIAN.lower()):\n                return self.__class__.OS_DEBIAN\n            elif (_dist.lower() == self.__class__.OS_CENTOS.lower()):\n                return self.__class__.OS_CENTOS\n            elif (_dist.lower() == self.__class__.OS_REDHAT.lower()):\n                return self.__class__.OS_REDHAT\n            elif (_dist.lower() == self.__class__.OS_KALI.lower()):\n                return self.__class__.OS_KALI\n        return _system\n    else:\n        return None", "docstring": "Get the OS name. If OS is linux, returns the Linux distribution name\n\nReturns:\nstr: OS name", "source": "codesearchnet"}
{"code": "def _prefix_exists_in_gcs(gcs_prefix, credentials=None):\n    gcs_service = _get_storage_service(credentials)\n    (bucket_name, prefix) = gcs_prefix[len('gs:\n    request = gcs_service.objects().list(bucket=bucket_name, prefix=prefix, maxResults=1)\n    response = request.execute()\n    return response.get('items', None)", "docstring": "Check whether there is a GCS object whose name starts with the prefix.\n\nSince GCS doesn't actually have folders, this is how we check instead.\n\nArgs:\ngcs_prefix: The path; should start with 'gs://'.\ncredentials: Optional credential to be used to load the file from gcs.\n\nReturns:\nTrue if the prefix matches at least one object in GCS.\n\nRaises:\nerrors.HttpError: if it can't talk to the server", "source": "codesearchnet"}
{"code": "def with_extrapolation(points, noise, n_points):\n    n_points = 10\n    return kalman_filter((extrapolate_points(points, n_points) + points), noise)[n_points:]", "docstring": "Smooths a set of points, but it extrapolates some points at the beginning\n\nArgs:\npoints (:obj:`list` of :obj:`Point`)\nnoise (float): Expected noise, the higher it is the more the path will\nbe smoothed.\nReturns:\n:obj:`list` of :obj:`Point`", "source": "codesearchnet"}
{"code": "def _produce_request(self, node_id, acks, timeout, batches):\n    produce_records_by_partition = collections.defaultdict(dict)\n    for batch in batches:\n        topic = batch.topic_partition.topic\n        partition = batch.topic_partition.partition\n        buf = batch.records.buffer()\n        produce_records_by_partition[topic][partition] = buf\n    kwargs = {}\n    if (self.config['api_version'] >= (0, 11)):\n        version = 3\n        kwargs = dict(transactional_id=None)\n    elif (self.config['api_version'] >= (0, 10)):\n        version = 2\n    elif (self.config['api_version'] == (0, 9)):\n        version = 1\n    else:\n        version = 0\n    return ProduceRequest[version](required_acks=acks, timeout=timeout, topics=[(topic, list(partition_info.items())) for (topic, partition_info) in six.iteritems(produce_records_by_partition)], **kwargs)", "docstring": "Create a produce request from the given record batches.\n\nReturns:\nProduceRequest (version depends on api_version)", "source": "codesearchnet"}
{"code": "def _apply_conv(self, inputs, w):\n    \n    outputs = tf.nn.convolution(inputs, w, strides=self._stride,\n                                padding=self._conv_op_padding,\n                                dilation_rate=self._rate,\n                                data_format=self._data_format)\n    return outputs", "docstring": "Apply a convolution operation on `inputs` using variable `w`.\n\nArgs:\ninputs: A Tensor of shape `data_format` and of type `tf.float16`,\n`tf.bfloat16` or `tf.float32`.\nw: A weight matrix of the same type as `inputs`.\n\nReturns:\noutputs: The result of the convolution operation on `inputs`.", "source": "juraj-google-style"}
{"code": "def install(path, capture_error=False):  \n    \n    cmd = '%s -m pip install -U . ' % _process.python_executable()\n\n    if has_requirements(path):\n        cmd += '-r requirements.txt'\n\n    logger.info('Installing module with the following command:\\n%s', cmd)\n\n    _process.check_error(shlex.split(cmd), _errors.InstallModuleError, cwd=path, capture_error=capture_error)", "docstring": "Install a Python module in the executing Python environment.\nArgs:\npath (str):  Real path location of the Python module.\ncapture_error (bool): Default false. If True, the running process captures the\nstderr, and appends it to the returned Exception message in case of errors.", "source": "juraj-google-style"}
{"code": "def indicator_arrays(tc_entity_array):\n    type_dict = {}\n    for ea in tc_entity_array:\n        type_dict.setdefault(ea['type'], []).append(ea['value'])\n    return type_dict", "docstring": "Convert TCEntityArray to Indicator Type dictionary.\n\nArgs:\ntc_entity_array (dictionary): The TCEntityArray to convert.\n\nReturns:\n(dictionary): Dictionary containing arrays of indicators for each indicator type.", "source": "codesearchnet"}
{"code": "def _update_token(self, request):\n    self._source_credentials.refresh(request)\n    body = {'delegates': self._delegates, 'scope': self._target_scopes, 'lifetime': (str(self._lifetime) + 's')}\n    headers = {'Content-Type': 'application/json'}\n    self._source_credentials.apply(headers)\n    (self.token, self.expiry) = _make_iam_token_request(request=request, principal=self._target_principal, headers=headers, body=body)", "docstring": "Updates credentials with a new access_token representing\nthe impersonated account.\n\nArgs:\nrequest (google.auth.transport.requests.Request): Request object\nto use for refreshing credentials.", "source": "codesearchnet"}
{"code": "def register(self, message, host):\n    cuuid = message['cuuid']\n    if (len(self.registry) > self.registration_limit):\n        logger.warning(('<%s> Registration limit exceeded' % cuuid))\n        response = serialize_data({'method': 'BYE REGISTER'}, self.compression, encryption=False)\n        return response\n    data = {'host': host[0], 'port': host[1], 'time': datetime.now()}\n    return_msg = {'method': 'OK REGISTER'}\n    if (('encryption' in message) and self.encryption):\n        data['encryption'] = PublicKey(message['encryption'][0], message['encryption'][1])\n        self.encrypted_hosts[host] = cuuid\n        return_msg['encryption'] = [self.encryption.n, self.encryption.e]\n    if (cuuid in self.registry):\n        for key in data:\n            self.registry[cuuid][key] = data[key]\n    else:\n        self.registry[cuuid] = data\n        self.registry[cuuid]['authenticated'] = False\n    response = serialize_data(return_msg, self.compression, encryption=False)\n    logger.debug(('<%s> Registry entries:' % cuuid))\n    for (key, value) in self.registry.items():\n        logger.debug(('<%s> %s %s' % (str(cuuid), str(key), pformat(value))))\n    return response", "docstring": "This function will register a particular client in the server's\nregistry dictionary.\n\nAny clients that are registered will be able to send and recieve events\nto and from the server.\n\nArgs:\nmessage (dict): The client message from the client who wants to\nregister.\nhost (tuple): The (address, port) tuple of the client that is\nregistering.\n\nReturns:\nA server response with an \"OK REGISTER\" if the registration was\nsuccessful or a \"BYE REGISTER\" if unsuccessful.", "source": "codesearchnet"}
{"code": "def __init__(self, parent, *args, **kwargs):\n        \n        self.parent = parent\n        super().__init__(*args, **kwargs)", "docstring": "A simple landing view, template may be overwriten to customize.\n\nArgs:\nparent (Group): ``Group`` host of ``self``.", "source": "juraj-google-style"}
{"code": "def image(cam):\n    (yield marv.set_header(title=cam.topic))\n    msg = (yield marv.pull(cam))\n    if (msg is None):\n        return\n    pytype = get_message_type(cam)\n    rosmsg = pytype()\n    rosmsg.deserialize(msg.data)\n    name = '{}.jpg'.format(cam.topic.replace('/', ':')[1:])\n    imgfile = (yield marv.make_file(name))\n    img = imgmsg_to_cv2(rosmsg, 'rgb8')\n    cv2.imwrite(imgfile.path, img, (cv2.IMWRITE_JPEG_QUALITY, 60))\n    (yield marv.push(imgfile))", "docstring": "Extract first image of input stream to jpg file.\n\nArgs:\ncam: Input stream of raw rosbag messages.\n\nReturns:\nFile instance for first image of input stream.", "source": "codesearchnet"}
{"code": "def report_progress(stream=None):\n    if (stream is None):\n        stream = sys.stderr\n    for reporter in _reporters:\n        reporter(stream)", "docstring": "Report progress from any currently installed reporters.\n\nArgs:\nstream: The text stream (default: sys.stderr) to which\nprogress will be reported.", "source": "codesearchnet"}
{"code": "def get_rel_pos(self, q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:\n    max_rel_dist = int(2 * max(q_size, k_size) - 1)\n    rel_pos_resized = F.interpolate(rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode='linear')\n    rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)\n    q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)\n    k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)\n    relative_coords = q_coords - k_coords + (k_size - 1) * max(q_size / k_size, 1.0)\n    return rel_pos_resized[relative_coords.long()]", "docstring": "Get relative positional embeddings according to the relative positions of\nquery and key sizes.\n\nArgs:\nq_size (int):\nsize of the query.\nk_size (int):\nsize of key k.\nrel_pos (`torch.Tensor`):\nrelative position embeddings (L, channel).\n\nReturns:\nExtracted positional embeddings according to relative positions.", "source": "github-repos"}
{"code": "def rotate(self, matrix, tol=0.001):\n    matrix = SquareTensor(matrix)\n    if (not matrix.is_rotation(tol)):\n        raise ValueError('Rotation matrix is not valid.')\n    sop = SymmOp.from_rotation_and_translation(matrix, [0.0, 0.0, 0.0])\n    return self.transform(sop)", "docstring": "Applies a rotation directly, and tests input matrix to ensure a valid\nrotation.\n\nArgs:\nmatrix (3x3 array-like): rotation matrix to be applied to tensor\ntol (float): tolerance for testing rotation matrix validity", "source": "codesearchnet"}
{"code": "def parse_GDS_columns(lines, subsets):\n    data = []\n    index = []\n    for line in lines:\n        line = line.rstrip()\n        if line.startswith('\n            tmp = __parse_entry(line)\n            data.append(tmp[1])\n            index.append(tmp[0])\n    df = DataFrame(data, index=index, columns=['description'])\n    subset_ids = defaultdict(dict)\n    for (subsetname, subset) in iteritems(subsets):\n        for expid in subset.metadata['sample_id'][0].split(','):\n            try:\n                subset_type = subset.get_type()\n                subset_ids[subset_type][expid] = subset.metadata['description'][0]\n            except Exception as err:\n                logger.error(('Error processing subsets: %s for subset %s' % (subset.get_type(), subsetname)))\n    return df.join(DataFrame(subset_ids))", "docstring": "Parse list of line with columns description from SOFT file of GDS.\n\nArgs:\nlines (:obj:`Iterable`): Iterator over the lines.\nsubsets (:obj:`dict` of :obj:`GEOparse.GDSSubset`): Subsets to use.\n\nReturns:\n:obj:`pandas.DataFrame`: Columns description.", "source": "codesearchnet"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_buffer = utils.BytearrayStream()\n    if self._unique_identifier:\n        self._unique_identifier.write(local_buffer, kmip_version=kmip_version)\n    self.length = local_buffer.length()\n    super(GetAttributeListRequestPayload, self).write(output_buffer, kmip_version=kmip_version)\n    output_buffer.write(local_buffer.buffer)", "docstring": "Write the data encoding the GetAttributeList request payload to a\nstream.\n\nArgs:\noutput_buffer (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def set_encapsulation(self, name, vid, default=False, disable=False):\n    if ('.' not in name):\n        raise NotImplementedError('parameter encapsulation can only be set on subinterfaces')\n    if (name[0:2] not in ['Et', 'Po']):\n        raise NotImplementedError('parameter encapsulation can only be set on Ethernet and Port-Channel subinterfaces')\n    commands = [('interface %s' % name)]\n    commands.append(self.command_builder('encapsulation dot1q vlan', str(vid), default=default, disable=disable))\n    return self.configure(commands)", "docstring": "Configures the subinterface encapsulation value\n\nArgs:\nname (string): The interface identifier.  It must be a full\ninterface name (ie Ethernet, not Et)\nvid (int): The vlan id number\ndefault (boolean): Specifies to default the subinterface\nencapsulation\ndisable (boolean): Specifies to disable the subinterface\nencapsulation\n\nReturns:\nTrue if the operation succeeds otherwise False is returned", "source": "codesearchnet"}
{"code": "def frag2text(endpoint, stype, selector, clean=False, raw=False, verbose=False):\n    try:\n        return main(endpoint, stype, selector, clean, raw, verbose)\n    except StandardError as err:\n        return err", "docstring": "returns Markdown text of selected fragment.\n\nArgs:\nendpoint: URL, file, or HTML string\nstype: { 'css' | 'xpath' }\nselector: CSS selector or XPath expression\nReturns:\nMarkdown text\nOptions:\nclean: cleans fragment (lxml.html.clean defaults)\nraw: returns raw HTML fragment\nverbose: show http status, encoding, headers", "source": "codesearchnet"}
{"code": "def indexed_slices_intersection_indices_and_values(x1, x2):\n    dim_0 = x1.dense_shape[0]\n    x1_indices_expanded = tf.expand_dims(x1.indices, axis=1)\n    x2_indices_expanded = tf.expand_dims(x2.indices, axis=1)\n    x1_indices_count = x1_indices_expanded.shape[0]\n    x2_indices_count = x2_indices_expanded.shape[0]\n    x1_indices_one_hot = tf.scatter_nd(x1_indices_expanded, ones_bool((x1_indices_count,)), (dim_0,))\n    x2_indices_one_hot = tf.scatter_nd(x2_indices_expanded, ones_bool((x2_indices_count,)), (dim_0,))\n    intersection_indices = tf.squeeze(tf.where(tf.math.logical_and(x1_indices_one_hot, x2_indices_one_hot)), axis=-1)\n    intersection_indices_count = tf.shape(intersection_indices)[0]\n\n    def empty_intersection():\n        return (intersection_indices, tf.zeros((0,) + x1.values.shape[1:], x1.dtype), tf.zeros((0,) + x2.values.shape[1:], x2.dtype))\n\n    def non_empty_intersection():\n\n        def values_for_intersection(indices_expanded, indices_count, values):\n            indices_indices = tf.scatter_nd(indices_expanded, tf.range(indices_count), (dim_0,))\n            to_intersection_indices = tf.gather(indices_indices, intersection_indices)\n            return tf.gather(values, to_intersection_indices)\n        x1_values_for_intersection = tf.cond(tf.equal(x1_indices_count, intersection_indices_count), lambda: x1.values, lambda: values_for_intersection(x1_indices_expanded, x1_indices_count, x1.values))\n        x2_values_for_intersection = tf.cond(tf.equal(x2_indices_count, intersection_indices_count), lambda: x2.values, lambda: values_for_intersection(x2_indices_expanded, x2_indices_count, x2.values))\n        return (intersection_indices, x1_values_for_intersection, x2_values_for_intersection)\n    return tf.cond(tf.equal(intersection_indices_count, 0), empty_intersection, non_empty_intersection)", "docstring": "Compute the indices for the intersection of two `tf.IndexedSlices` and\nmodify the values for these indices.\n\nArgs:\nx1: the first `tf.IndexedSlices`.\nx2: the second `tf.IndexedSlices`.\nReturns: A tuple containing:\n- the indices for the intersection\n- `x1` values for the intersection indices (some values were removed)\n- `x2` values for the intersection indices (some values were removed)", "source": "github-repos"}
{"code": "def start_task(self, task_type_str, current_task_index=None):\n    assert (task_type_str in self._task_dict), 'Task type has not been started yet: {}'.format(task_type_str)\n    if (current_task_index is not None):\n        self._task_dict[task_type_str]['task_idx'] = current_task_index\n    else:\n        self._task_dict[task_type_str]['task_idx'] += 1\n    self._log_progress_if_interval_elapsed()", "docstring": "Call when processing is about to start on a single task of the given task\ntype, typically at the top inside of the loop that processes the tasks.\n\nArgs:\ntask_type_str (str):\nThe name of the task, used as a dict key and printed in the progress\nupdates.\n\ncurrent_task_index (int):\nIf the task processing loop may skip or repeat tasks, the index of the\ncurrent task must be provided here. This parameter can normally be left\nunset.", "source": "codesearchnet"}
{"code": "def _GetImportTimestamps(self, pefile_object):\n    import_timestamps = []\n    if (not hasattr(pefile_object, 'DIRECTORY_ENTRY_IMPORT')):\n        return import_timestamps\n    for importdata in pefile_object.DIRECTORY_ENTRY_IMPORT:\n        dll_name = getattr(importdata, 'dll', '')\n        try:\n            dll_name = dll_name.decode('ascii')\n        except UnicodeDecodeError:\n            dll_name = dll_name.decode('ascii', errors='replace')\n        if (not dll_name):\n            dll_name = '<NO DLL NAME>'\n        timestamp = getattr(importdata.struct, 'TimeDateStamp', 0)\n        if timestamp:\n            import_timestamps.append([dll_name, timestamp])\n    return import_timestamps", "docstring": "Retrieves timestamps from the import directory, if available.\n\nArgs:\npefile_object (pefile.PE): pefile object.\n\nReturns:\nlist[int]: import timestamps.", "source": "codesearchnet"}
{"code": "def check_grads(self, f, args, atol=None, rtol=None, delta=None):\n    if delta is None:\n        dtype = np_utils.result_type(*args)\n        epsilon = onp.finfo(dtype).eps\n        delta = epsilon ** (1.0 / 3.0)\n    theoretical, numerical = gradient_checker_v2.compute_gradient(to_tf_fn(f), args, delta=delta)\n    self.assertAllClose(theoretical, numerical, check_dtypes=False, atol=atol, rtol=rtol)", "docstring": "Check gradients against finite differences.\n\nArgs:\nf: function to check at ``f(*args)``.\nargs: a list or tuple of argument values.\natol: absolute tolerance for gradient equality.\nrtol: relative tolerance for gradient equality.\ndelta: step size used for finite differences.", "source": "github-repos"}
{"code": "def add_user_to_template(self, template_id, account_id=None, email_address=None):\n    return self._add_remove_user_template(self.TEMPLATE_ADD_USER_URL, template_id, account_id, email_address)", "docstring": "Gives the specified Account access to the specified Template\n\nArgs:\n\ntemplate_id (str):      The id of the template to give the account access to\n\naccount_id (str):       The id of the account to give access to the template. The account id prevails if both account_id and email_address are provided.\n\nemail_address (str):    The email address of the account to give access to.\n\nReturns:\nA Template object", "source": "codesearchnet"}
{"code": "def AddCustomJsonEnumMapping(enum_type, python_name, json_name, package=None):\n    if (not issubclass(enum_type, messages.Enum)):\n        raise exceptions.TypecheckError(('Cannot set JSON enum mapping for non-enum \"%s\"' % enum_type))\n    if (python_name not in enum_type.names()):\n        raise exceptions.InvalidDataError(('Enum value %s not a value for type %s' % (python_name, enum_type)))\n    field_mappings = _JSON_ENUM_MAPPINGS.setdefault(enum_type, {})\n    _CheckForExistingMappings('enum', enum_type, python_name, json_name)\n    field_mappings[python_name] = json_name", "docstring": "Add a custom wire encoding for a given enum value.\n\nThis is primarily used in generated code, to handle enum values\nwhich happen to be Python keywords.\n\nArgs:\nenum_type: (messages.Enum) An enum type\npython_name: (basestring) Python name for this value.\njson_name: (basestring) JSON name to be used on the wire.\npackage: (NoneType, optional) No effect, exists for legacy compatibility.", "source": "codesearchnet"}
{"code": "def discard_observer(self, observer):\n    discarded = False\n    key = self.make_key(observer)\n    if (key in self.observers):\n        del self.observers[key]\n        discarded = True\n    return discarded", "docstring": "Un-register an observer.\n\nArgs:\nobserver: The observer to un-register.\n\nReturns true if an observer was removed, otherwise False.", "source": "codesearchnet"}
{"code": "def persist(id_obj, filename=None, suffix=None):\n    if (suffix is None):\n        suffix = '.pickle'\n    if hasattr(id_obj, 'id'):\n        ident = id_obj.id\n    else:\n        ident = str(id(id_obj))\n    if (filename is None):\n        filename = '{obj_id}{suffix}'.format(obj_id=ident, suffix=suffix)\n    with open(filename, 'wb') as obj_file:\n        dill.dump(id_obj, obj_file)\n    return os.path.abspath(filename)", "docstring": "Persist an object in the filesystem.\n\nThis will generate a pickled version of the given obj in the filename path.\nObjects shall provide an id() method to be able to use this persistence API.\nIf not, we will use the id() builtin of python to generate an identifier\nfor you.\n\nThe file will be created, if it does not exist.\nIf the file already exists, we will overwrite it.\n\nArgs:\nid_obj (Any): An identifiable object you want to persist in the\nfilesystem.", "source": "codesearchnet"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return cls + token_ids_0 + sep\n    return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A RemBERT sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added\ntoken_ids_1 (`List[int]`, *optional*, defaults to `None`):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def _join_index_objects(self, axis, other_index, how, sort=True):\n        \n        if isinstance(other_index, list):\n            joined_obj = self.columns if not axis else self.index\n            \n            for obj in other_index:\n                joined_obj = joined_obj.join(obj, how=how)\n\n            return joined_obj\n        if not axis:\n            return self.columns.join(other_index, how=how, sort=sort)\n        else:\n            return self.index.join(other_index, how=how, sort=sort)", "docstring": "Joins a pair of index objects (columns or rows) by a given strategy.\n\nArgs:\naxis: The axis index object to join (0 for columns, 1 for index).\nother_index: The other_index to join on.\nhow: The type of join to join to make (e.g. right, left).\n\nReturns:\nJoined indices.", "source": "juraj-google-style"}
{"code": "def type_parameter(self, unknown: _UnknownType, base_class: pytd.Class, item: pytd.TemplateItem) -> StrictType:\n    assert is_unknown(unknown)\n    name = unknown.name + '.' + base_class.name + '.' + item.type_param.name\n    return StrictType(name)", "docstring": "This generates the type parameter when matching against a generic type.\n\nFor example, when we match ~unknown1 against list[T], we need an additional\ntype to model the T in \"~unknown1[T]\". This type would have the name\n\"~unknown1.list.T\".\n\nArgs:\nunknown: An unknown type. This is the type that's matched against\nbase_class[T].\nbase_class: The base class of the generic we're matching the unknown\nagainst. E.g. \"list\".\nitem: The actual type parameter. (\"T\" in the examples above).\n\nReturns:\nA type (pytd.Node) to represent this type parameter.", "source": "github-repos"}
{"code": "def check(self, read_tuple_name):\n        \n\n        parts = read_tuple_name.split(\"__\")\n\n        if len(parts[0]) != self.prefix_width or len(parts[1]) != self.read_tuple_id_width:\n            return False\n\n        segments = parts[2][1:-1].split(\"),(\")\n        for segment in segments:\n            int_widths = list(map(len, segment.split(\",\")))\n            if self.genome_id_width != int_widths[0]:\n                return False\n            if self.chr_id_width != int_widths[1]:\n                return False\n            if self.coor_width != int_widths[3] or self.coor_width != int_widths[4]:\n                return False\n\n        return True", "docstring": "Check if the given read tuple name satisfies this profile.\n\nArgs:\nread_tuple_name (str): Read tuple name.", "source": "juraj-google-style"}
{"code": "def plot_projectors(self, ax=None, fontsize=12, **kwargs):\n        \n        ax, fig, plt = get_ax_fig_plt(ax)\n        title = kwargs.pop(\"title\", \"Projectors\")\n        ax.grid(True)\n        ax.set_xlabel('r [Bohr]')\n        ax.set_ylabel(r\"$r\\tilde p\\, [Bohr]^{-\\frac{1}{2}}$\")\n\n        \n        \n\n        for state, rfunc in self.projector_functions.items():\n            ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, label=\"TPROJ: \" + state)\n\n        ax.legend(loc=\"best\", shadow=True, fontsize=fontsize)\n\n        return fig", "docstring": "Plot the PAW projectors.\n\nArgs:\nax: matplotlib :class:`Axes` or None if a new figure should be created.\n\nReturns: `matplotlib` figure", "source": "juraj-google-style"}
{"code": "def _ExtractPath(response, pathspec_attribute=None):\n  \n  path_specification = response\n\n  if pathspec_attribute is not None:\n    if response.HasField(pathspec_attribute):\n      path_specification = response.Get(pathspec_attribute)\n\n  if path_specification.HasField(\"pathspec\"):\n    path_specification = path_specification.pathspec\n\n  if path_specification.HasField(\"path\"):\n    path_specification = path_specification.path\n\n  if isinstance(path_specification, Text):\n    return path_specification\n  return None", "docstring": "Returns the path from a client action response as a string.\n\nArgs:\nresponse: A client action response.\npathspec_attribute: Specifies the field which stores the pathspec.\n\nReturns:\nThe path as a string or None if no path is found.", "source": "juraj-google-style"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_stream = BytearrayStream()\n    if self._username:\n        self._username.write(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Username/password credential struct missing the username.')\n    if self._password:\n        self._password.write(local_stream, kmip_version=kmip_version)\n    self.length = local_stream.length()\n    super(UsernamePasswordCredential, self).write(output_stream, kmip_version=kmip_version)\n    output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the UsernamePasswordCredential struct to a\nstream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the username is not defined.", "source": "codesearchnet"}
{"code": "def predict_proba(self, a, b, **kwargs):\n        \n        a = scale(a).reshape((-1, 1))\n        b = scale(b).reshape((-1, 1))\n\n        return self.anm_score(b, a) - self.anm_score(a, b)", "docstring": "Prediction method for pairwise causal inference using the ANM model.\n\nArgs:\na (numpy.ndarray): Variable 1\nb (numpy.ndarray): Variable 2\n\nReturns:\nfloat: Causation score (Value : 1 if a->b and -1 if b->a)", "source": "juraj-google-style"}
{"code": "def _unescape_token(escaped_token):\n    \n\n    def match(m):\n        if m.group(1) is None:\n            return u\"_\" if m.group(0) == u\"\\\\u\" else u\"\\\\\"\n\n        try:\n            return six.unichr(int(m.group(1)))\n        except (ValueError, OverflowError):\n            return \"\"\n\n    trimmed = escaped_token[:-1] if escaped_token.endswith(\"_\") else escaped_token\n    return _UNESCAPE_REGEX.sub(match, trimmed)", "docstring": "Inverse of _escape_token().\nArgs:\nescaped_token: a unicode string\nReturns:\ntoken: a unicode string", "source": "juraj-google-style"}
{"code": "def prepare_headers(self, http_headers, soap_action):\n        \n\n        headers = {'Content-Type': 'text/xml; charset=\"utf-8\"'}\n        if soap_action is not None:\n            headers.update({'SOAPACTION': '\"{}\"'.format(soap_action)})\n        if http_headers is not None:\n            headers.update(http_headers)\n        return headers", "docstring": "Prepare the http headers for sending.\n\nAdd the SOAPACTION header to the others.\n\nArgs:\nhttp_headers (dict): A dict in the form {'Header': 'Value,..}\ncontaining http headers to use for the http request.\nsoap_action (str): The value of the SOAPACTION header.\n\nReturns:\ndict: headers including the SOAPACTION header.", "source": "juraj-google-style"}
{"code": "def parse(f, encoding='utf-8'):\n    \n    if hasattr(f, 'read'):\n        for event in _parse(f):\n            yield event\n    else:\n        with io.open(f, encoding=encoding) as fh:\n            for event in _parse(fh):\n                yield event", "docstring": "Parse the TDL file *f* and yield the interpreted contents.\n\nIf *f* is a filename, the file is opened and closed when the\ngenerator has finished, otherwise *f* is an open file object and\nwill not be closed when the generator has finished.\n\nArgs:\nf (str, file): a filename or open file object\nencoding (str): the encoding of the file (default: `\"utf-8\"`;\nignored if *f* is an open file)", "source": "juraj-google-style"}
{"code": "def _sym_missing(self) -> typing.Dict[Union[str, int], Any]:\n    missing = dict()\n    if self._value_spec and self._value_spec.schema:\n        matched_keys, _ = self._value_spec.schema.resolve(self.keys())\n        for key_spec, keys in matched_keys.items():\n            field = self._value_spec.schema[key_spec]\n            assert keys or isinstance(key_spec, pg_typing.NonConstKey), key_spec\n            if keys:\n                for key in keys:\n                    v = self.sym_getattr(key)\n                    if utils.MISSING_VALUE == v:\n                        missing[key] = field.value.default\n                    elif isinstance(v, base.Symbolic):\n                        missing_child = v.sym_missing(flatten=False)\n                        if missing_child:\n                            missing[key] = missing_child\n    else:\n        for k, v in self.sym_items():\n            if isinstance(v, base.Symbolic):\n                missing_child = v.sym_missing(flatten=False)\n                if missing_child:\n                    missing[k] = missing_child\n    return missing", "docstring": "Returns missing values.\n\nReturns:\nA dict of key to MISSING_VALUE.", "source": "github-repos"}
{"code": "def set_submission_objects(form_fields):\n    \n    variant_ids = get_submission_variants(form_fields) \n\n    \n    variant_objs = get_objects_from_form(variant_ids, form_fields, 'variant')\n\n    \n    casedata_objs = get_objects_from_form(variant_ids, form_fields, 'casedata')\n\n    return (variant_objs, casedata_objs)", "docstring": "Creates a list of submission objects (variant and case-data) from the clinvar submission form in blueprints/variants/clinvar.html.\n\nArgs:\nform_fields(dict): it's the submission form dictionary. Keys have the same names as CLINVAR_HEADER and CASEDATA_HEADER\n\nReturns:\nsubmission_objects(list): a list of variant and case-data submission objects, ready to be included in the clinvar database collection", "source": "juraj-google-style"}
{"code": "def to_list(x):\n    if isinstance(x, list):\n        return x\n    return [x]", "docstring": "Normalizes a list/tensor into a list.\n\nIf a tensor is passed, we return\na list of size 1 containing the tensor.\n\nArgs:\nx: target object to be normalized.\n\nReturns:\nA list.", "source": "github-repos"}
{"code": "def AddTimeZoneOption(self, argument_group):\n    argument_group.add_argument('-z', '--zone', '--timezone', dest='timezone', action='store', type=str, default=None, help='explicitly define the timezone. Typically the timezone is determined automatically where possible otherwise it will default to UTC. Use \"-z list\" to see a list of available timezones.')", "docstring": "Adds the time zone option to the argument group.\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "codesearchnet"}
{"code": "def add_deploy(state, deploy_func, *args, **kwargs):\n    \n\n    frameinfo = get_caller_frameinfo()\n    kwargs['frameinfo'] = frameinfo\n\n    for host in state.inventory:\n        deploy_func(state, host, *args, **kwargs)", "docstring": "Prepare & add an deploy to pyinfra.state by executing it on all hosts.\n\nArgs:\nstate (``pyinfra.api.State`` obj): the deploy state to add the operation\ndeploy_func (function): the operation function from one of the modules,\nie ``server.user``\nargs/kwargs: passed to the operation function", "source": "juraj-google-style"}
{"code": "def _GenDiscoveryDocCallback(args, discovery_func=_GenDiscoveryDoc):\n  \n  discovery_paths = discovery_func(args.service, args.output,\n                                   hostname=args.hostname,\n                                   application_path=args.application)\n  for discovery_path in discovery_paths:\n    print 'API discovery document written to %s' % discovery_path", "docstring": "Generate discovery docs to files.\n\nArgs:\nargs: An argparse.Namespace object to extract parameters from\ndiscovery_func: A function that generates discovery docs and stores them to\nfiles, accepting a list of service names, a discovery doc format, and an\noutput directory.", "source": "juraj-google-style"}
{"code": "def _retry_from_retry_config(retry_params, retry_codes):\n    exception_classes = [_exception_class_for_grpc_status_name(code) for code in retry_codes]\n    return retry.Retry(retry.if_exception_type(*exception_classes), initial=(retry_params['initial_retry_delay_millis'] / _MILLIS_PER_SECOND), maximum=(retry_params['max_retry_delay_millis'] / _MILLIS_PER_SECOND), multiplier=retry_params['retry_delay_multiplier'], deadline=(retry_params['total_timeout_millis'] / _MILLIS_PER_SECOND))", "docstring": "Creates a Retry object given a gapic retry configuration.\n\nArgs:\nretry_params (dict): The retry parameter values, for example::\n\n{\n\"initial_retry_delay_millis\": 1000,\n\"retry_delay_multiplier\": 2.5,\n\"max_retry_delay_millis\": 120000,\n\"initial_rpc_timeout_millis\": 120000,\n\"rpc_timeout_multiplier\": 1.0,\n\"max_rpc_timeout_millis\": 120000,\n\"total_timeout_millis\": 600000\n}\n\nretry_codes (sequence[str]): The list of retryable gRPC error code\nnames.\n\nReturns:\ngoogle.api_core.retry.Retry: The default retry object for the method.", "source": "codesearchnet"}
{"code": "def split(input_layer, split_dim=0, num_splits=2):\n  \n  shape = input_layer.shape\n  _check_split_dims(num_splits, split_dim, shape)\n  splits = tf.split(\n      value=input_layer, num_or_size_splits=num_splits, axis=split_dim)\n  return input_layer.with_sequence(splits)", "docstring": "Splits this Tensor along the split_dim into num_splits Equal chunks.\n\nExamples:\n\n* `[1, 2, 3, 4] -> [1, 2], [3, 4]`\n* `[[1, 1], [2, 2], [3, 3], [4, 4]] -> [[1, 1], [2, 2]], [[3, 3], [4, 4]]`\n\nArgs:\ninput_layer: The chainable object, supplied.\nsplit_dim: The dimension to split along. Defaults to batch.\nnum_splits: The number of splits.\nReturns:\nA list of PrettyTensors.\nRaises:\nValueError: If split_dim is out of range or isn't divided evenly by\nnum_splits.", "source": "juraj-google-style"}
{"code": "def normalize_tuple(value, n, name):\n    if isinstance(value, int):\n        return (value,) * n\n    else:\n        try:\n            value_tuple = tuple(value)\n        except TypeError:\n            raise ValueError('The `' + name + '` argument must be a tuple of ' + str(n) + ' integers. Received: ' + str(value))\n        if len(value_tuple) != n:\n            raise ValueError('The `' + name + '` argument must be a tuple of ' + str(n) + ' integers. Received: ' + str(value))\n        for single_value in value_tuple:\n            try:\n                int(single_value)\n            except (ValueError, TypeError):\n                raise ValueError('The `' + name + '` argument must be a tuple of ' + str(n) + ' integers. Received: ' + str(value) + ' including element ' + str(single_value) + ' of type' + ' ' + str(type(single_value)))\n        return value_tuple", "docstring": "Transforms a single integer or iterable of integers into an integer tuple.\n\nArgs:\nvalue: The value to validate and convert. Could an int, or any iterable of\nints.\nn: The size of the tuple to be returned.\nname: The name of the argument being validated, e.g. \"strides\" or\n\"kernel_size\". This is only used to format error messages.\n\nReturns:\nA tuple of n integers.\n\nRaises:\nValueError: If something else than an int/long or iterable thereof was\npassed.", "source": "github-repos"}
{"code": "def __fa_process_sequence(self, sequence, avoid, initial_state, execution_state, trace_current, next_addr):\n    ip = sequence.address\n    next_ip = None\n    while ip:\n        try:\n            instr = sequence.fetch(ip)\n        except ReilSequenceInvalidAddressError:\n            assert (split_address(ip)[1] == 0)\n            next_ip = ip\n            break\n        try:\n            target_addr = sequence.get_next_address(ip)\n        except ReilSequenceInvalidAddressError:\n            target_addr = next_addr\n        next_ip = self.__process_instr(instr, avoid, target_addr, initial_state, execution_state, trace_current)\n        try:\n            ip = (next_ip if next_ip else sequence.get_next_address(ip))\n        except ReilSequenceInvalidAddressError:\n            break\n    return next_ip", "docstring": "Process a REIL sequence.\n\nArgs:\nsequence (ReilSequence): A REIL sequence to process.\navoid (list): List of address to avoid.\ninitial_state: Initial state.\nexecution_state: Execution state queue.\ntrace_current (list): Current trace.\nnext_addr: Address of the next instruction following the current one.\n\nReturns:\nReturns the next instruction to execute in case there is one, otherwise returns None.", "source": "codesearchnet"}
{"code": "def _StartAnalysisProcesses(self, storage_writer, analysis_plugins):\n    logger.info('Starting analysis plugins.')\n    for analysis_plugin in analysis_plugins.values():\n        self._analysis_plugins[analysis_plugin.NAME] = analysis_plugin\n        process = self._StartWorkerProcess(analysis_plugin.NAME, storage_writer)\n        if (not process):\n            logger.error('Unable to create analysis process: {0:s}'.format(analysis_plugin.NAME))\n    logger.info('Analysis plugins running')", "docstring": "Starts the analysis processes.\n\nArgs:\nstorage_writer (StorageWriter): storage writer.\nanalysis_plugins (dict[str, AnalysisPlugin]): analysis plugins that\nshould be run and their names.", "source": "codesearchnet"}
{"code": "def dimensions(self, selection='all', label=False):\n    if (label in ['name', True]):\n        label = 'short'\n    elif (label == 'label'):\n        label = 'long'\n    elif label:\n        raise ValueError(\"label needs to be one of True, False, 'name' or 'label'\")\n    lambdas = {'k': ((lambda x: x.kdims), {'full_breadth': False}), 'v': ((lambda x: x.vdims), {}), 'c': ((lambda x: x.cdims), {})}\n    aliases = {'key': 'k', 'value': 'v', 'constant': 'c'}\n    if (selection in ['all', 'ranges']):\n        groups = [d for d in self._dim_groups if (d != 'cdims')]\n        dims = [dim for group in groups for dim in getattr(self, group)]\n    elif isinstance(selection, list):\n        dims = [dim for group in selection for dim in getattr(self, ('%sdims' % aliases.get(group)))]\n    elif (aliases.get(selection) in lambdas):\n        selection = aliases.get(selection, selection)\n        (lmbd, kwargs) = lambdas[selection]\n        key_traversal = self.traverse(lmbd, **kwargs)\n        dims = [dim for keydims in key_traversal for dim in keydims]\n    else:\n        raise KeyError((\"Invalid selection %r, valid selections include'all', 'value' and 'key' dimensions\" % repr(selection)))\n    return [((dim.label if (label == 'long') else dim.name) if label else dim) for dim in dims]", "docstring": "Lists the available dimensions on the object\n\nProvides convenient access to Dimensions on nested Dimensioned\nobjects. Dimensions can be selected by their type, i.e. 'key'\nor 'value' dimensions. By default 'all' dimensions are\nreturned.\n\nArgs:\nselection: Type of dimensions to return\nThe type of dimension, i.e. one of 'key', 'value',\n'constant' or 'all'.\nlabel: Whether to return the name, label or Dimension\nWhether to return the Dimension objects (False),\nthe Dimension names (True/'name') or labels ('label').\n\nReturns:\nList of Dimension objects or their names or labels", "source": "codesearchnet"}
{"code": "def mixture(val: Any, default: Any=RaiseTypeErrorIfNotProvided) -> Sequence[Tuple[(float, Any)]]:\n    getter = getattr(val, '_mixture_', None)\n    result = (NotImplemented if (getter is None) else getter())\n    if (result is not NotImplemented):\n        return result\n    if (default is not RaiseTypeErrorIfNotProvided):\n        return default\n    if (getter is None):\n        raise TypeError(\"object of type '{}' has no _mixture_ method.\".format(type(val)))\n    raise TypeError(\"object of type '{}' does have a _mixture_ method, but it returned NotImplemented.\".format(type(val)))", "docstring": "Return a sequence of tuples representing a probabilistic combination.\n\nA mixture is described by an iterable of tuples of the form\n\n(probability of object, object)\n\nThe probability components of the tuples must sum to 1.0 and be between\n0 and 1 (inclusive).\n\nArgs:\nval: The value whose mixture is being computed.\ndefault: A default value if val does not support mixture.\n\nReturns:\nAn iterable of tuples of size 2. The first element of the tuple is a\nprobability (between 0 and 1) and the second is the object that occurs\nwith that probability in the mixture. The probabilities will sum to 1.0.", "source": "codesearchnet"}
{"code": "def process(self, element):\n    text_line = element.strip()\n    if not text_line:\n        self.empty_line_counter.inc(1)\n    words = re.findall(\"[\\\\w\\\\']+\", text_line, re.UNICODE)\n    for w in words:\n        self.words_counter.inc()\n        self.word_lengths_counter.inc(len(w))\n        self.word_lengths_dist.update(len(w))\n    return words", "docstring": "Returns an iterator over the words of this element.\n\nThe element is a line of text.  If the line is blank, note that, too.\n\nArgs:\nelement: the element being processed\n\nReturns:\nThe processed element.", "source": "github-repos"}
{"code": "def GetEntries(\n      self, parser_mediator, cookie_data=None, url=None, **kwargs):\n    \n    fields = cookie_data.split('.')\n    number_of_fields = len(fields)\n\n    if number_of_fields != 1:\n      parser_mediator.ProduceExtractionWarning(\n          'unsupported number of fields: {0:d} in cookie: {1:s}'.format(\n              number_of_fields, self.COOKIE_NAME))\n      return\n\n    try:\n      \n      last_visit_posix_time = int(fields[0], 10) / 10000000\n    except ValueError:\n      last_visit_posix_time = None\n\n    if last_visit_posix_time is not None:\n      date_time = dfdatetime_posix_time.PosixTime(\n          timestamp=last_visit_posix_time)\n      timestamp_description = definitions.TIME_DESCRIPTION_LAST_VISITED\n    else:\n      date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n      timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME\n\n    event_data = GoogleAnalyticsEventData('utmt')\n    event_data.cookie_name = self.COOKIE_NAME\n    event_data.url = url\n\n    event = time_events.DateTimeValuesEvent(date_time, timestamp_description)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts event objects from the cookie.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\ncookie_data (bytes): cookie data.\nurl (str): URL or path where the cookie got set.", "source": "juraj-google-style"}
{"code": "def remove_server(self, name):\n        \n        cmd = self.command_builder('no ntp server', value=name)\n        return self.configure(cmd)", "docstring": "Remove an NTP server entry from the node config\n\nArgs:\nname (string): The IP address or FQDN of the NTP server.\n\nReturns:\nTrue if the operation succeeds, otherwise False.", "source": "juraj-google-style"}
{"code": "def value(self):\n    if self.isenum():\n        if isinstance(self._value, self.enum_ref):\n            return self._value.value\n        return self._value\n    elif self.is_bitmask():\n        return self._value.bitmask\n    else:\n        return self._value", "docstring": "Return this type's value.\n\nReturns:\nobject: The value of an enum, bitmask, etc.", "source": "codesearchnet"}
{"code": "def _evolve_subsystem(self, state, qargs):\n        \n        mat = np.reshape(self.data, self._shape)\n        \n        \n        state_size = len(state)\n        state_dims = self._automatic_dims(None, state_size)\n        if self.input_dims() != len(qargs) * (2,):\n            raise QiskitError(\n                \"Operator input dimensions are not compatible with state subsystem dimensions.\"\n            )\n        if state.ndim == 1:\n            \n            tensor = np.reshape(state, state_dims)\n            indices = [len(state_dims) - 1 - qubit for qubit in qargs]\n            tensor = self._einsum_matmul(tensor, mat, indices)\n            return np.reshape(tensor, state_size)\n        \n        tensor = np.reshape(state, 2 * state_dims)\n        indices = [len(state_dims) - 1 - qubit for qubit in qargs]\n        right_shift = len(state_dims)\n        \n        tensor = self._einsum_matmul(tensor, mat, indices)\n        \n        \n        \n        tensor = self._einsum_matmul(\n            tensor, np.conj(mat), indices, shift=right_shift)\n        return np.reshape(tensor, [state_size, state_size])", "docstring": "Evolve a quantum state by the operator.\n\nArgs:\nstate (QuantumState): The input statevector or density matrix.\nqargs (list): a list of QuantumState subsystem positions to apply\nthe operator on.\n\nReturns:\nQuantumState: the output quantum state.\n\nRaises:\nQiskitError: if the operator dimension does not match the\nspecified QuantumState subsystem dimensions.", "source": "juraj-google-style"}
{"code": "def run(app: web.Application):\n    host = app['config']['host']\n    port = app['config']['port']\n    web.run_app(app, host=host, port=port)", "docstring": "Runs the application in an async context.\nThis function will block indefinitely until the application is shut down.\n\nArgs:\napp (web.Application):\nThe Aiohttp Application as created by `create_app()`", "source": "codesearchnet"}
{"code": "def from_conv_part_data(conv_part_data, self_user_id):\n    user_id = UserID(chat_id=conv_part_data.id.chat_id, gaia_id=conv_part_data.id.gaia_id)\n    return User(user_id, conv_part_data.fallback_name, None, None, [], ((self_user_id == user_id) or (self_user_id is None)))", "docstring": "Construct user from ``ConversationParticipantData`` message.\n\nArgs:\nconv_part_id: ``ConversationParticipantData`` message.\nself_user_id (~hangups.user.UserID or None): The ID of the current\nuser. If ``None``, assume ``conv_part_id`` is the current user.\n\nReturns:\n:class:`~hangups.user.User` object.", "source": "codesearchnet"}
{"code": "def _validate_connection(self, action, uuid, key):\n    if (uuid not in self._connections):\n        self._logger.warn('Received message for device with no connection 0x%X', uuid)\n        return None\n    data = self._connections[uuid]\n    if (key != data['key']):\n        self._logger.warn('Received message for device with incorrect key, uuid=0x%X', uuid)\n        return None\n    return data['connection_id']", "docstring": "Validate that a message received for a device has the right key\n\nIf this action is valid the corresponding internal connection id to\nbe used with the DeviceManager is returned, otherwise None is returned\nand an invalid message status is published.\n\nArgs:\nslug (string): The slug for the device we're trying to connect to\nuuid (int): The uuid corresponding to the slug\nkey (string): The key passed in when this device was first connected\nto\n\nReturns:\nint: if the action is allowed, otherwise None", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]:\n    if not isinstance(hidden_states, (tuple, list)):\n        raise TypeError('hidden_states should be a tuple or list of tensors')\n    if len(hidden_states) != len(self.config.neck_hidden_sizes):\n        raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.')\n    if self.reassemble_stage is not None:\n        hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)\n    features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]\n    output = self.fusion_stage(features)\n    return output", "docstring": "Args:\nhidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):\nList of hidden states from the backbone.", "source": "github-repos"}
{"code": "def update_object(self, ref, payload, return_fields=None):\n    query_params = self._build_query_params(return_fields=return_fields)\n    opts = self._get_request_options(data=payload)\n    url = self._construct_url(ref, query_params)\n    self._log_request('put', url, opts)\n    r = self.session.put(url, **opts)\n    self._validate_authorized(r)\n    if (r.status_code != requests.codes.ok):\n        self._check_service_availability('update', r, ref)\n        raise ib_ex.InfobloxCannotUpdateObject(response=jsonutils.loads(r.content), ref=ref, content=r.content, code=r.status_code)\n    return self._parse_reply(r)", "docstring": "Update an Infoblox object\n\nArgs:\nref      (str): Infoblox object reference\npayload (dict): Payload with data to send\nReturns:\nThe object reference of the updated object\nRaises:\nInfobloxException", "source": "codesearchnet"}
{"code": "def _init_log(level=logging.DEBUG):\n    \n    log = logging.getLogger(__file__)\n    log.setLevel(level)\n    handler = logging.StreamHandler(sys.stdout)\n    handler.setLevel(level)\n    formatter = logging.Formatter('%(asctime)s: %(message)s',\n                                  '%Y/%m/%d-%H:%M:%S')\n    handler.setFormatter(formatter)\n    log.addHandler(handler)\n    return log", "docstring": "Initialise the logging object.\nArgs:\nlevel (int): Logging level.\nReturns:\nLogger: Python logging object.", "source": "juraj-google-style"}
{"code": "def get_formatter(self):\n    if (not self.fmt):\n        self.fmt = '%(asctime)s.%(msecs)03d {host} {progname} (%(process)d): %(message)s'.format(host=self.hostname, progname=self.progname)\n    if (not self.datefmt):\n        self.datefmt = '%Y-%m-%dT%H:%M:%S'\n    return logging.Formatter(fmt=self.fmt, datefmt=self.datefmt)", "docstring": "Create a fully configured `logging.Formatter`\n\nExample of formatted log message:\n2017-08-27T20:19:24.424 cpm-example-gew1 progname (23123): hello\n\nReturns:\n(obj): Instance of `logging.Formatter`", "source": "codesearchnet"}
{"code": "def config_get(config, *path, default=None):\n    \n    o = object()\n    result = get_in(config, path, default=o)\n    if result is not o:\n        return result\n    else:\n        return default", "docstring": "Get a configuration option following a path through the config\n\nExample usage:\n\n>>> config_get(config,\n'problem', 'problem_type_details', 'scorer',\ndefault='accuracy')\n\nArgs:\nconfig (dict): config dict\n*path (list[str]): List of config sections and options to follow.\ndefault (default=None): A default value to return in the case that\nthe option does not exist.", "source": "juraj-google-style"}
{"code": "def xmoe2_v1():\n    hparams = xmoe2_dense(0)\n    moe.set_default_moe_hparams(hparams)\n    hparams.decoder_layers = (['local_att', 'local_att', 'drd', 'att', 'drd', 'local_att', 'local_att', 'hmoe'] * 4)[:(- 1)]\n    hparams.d_ff = 2048\n    hparams.d_kv = 128\n    hparams.moe_hidden_size = 32768\n    hparams.mesh_shape = 'b0:4;b1:8'\n    hparams.layout = 'outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0'\n    hparams.outer_batch_size = 4\n    hparams.moe_num_experts = [8, 4]\n    hparams.num_heads = 4\n    return hparams", "docstring": "Model incorporating mixture-of-experts and local-attention.\n\n~6B parameters\n\n32 experts in 3 hierarchichal moe layers.\n\nReturns:\na hparams", "source": "codesearchnet"}
{"code": "def __init__(self, name, data=b'', data_type=definitions.REG_NONE, offset=0):\n    \n    super(FakeWinRegistryValue, self).__init__()\n    self._data = data\n    self._data_type = data_type\n    self._data_size = len(data)\n    self._name = name\n    self._offset = offset", "docstring": "Initializes a Windows Registry value.\n\nArgs:\nname (str): name of the Windows Registry value.\ndata (Optional[bytes]): value data.\ndata_type (Optional[int]): value data type.\noffset (Optional[int]): offset of the value within the Windows Registry\nfile.", "source": "juraj-google-style"}
{"code": "def depth_soil_specific_heat(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `depth_soil_specific_heat`'.format(value))\n\n        self._depth_soil_specific_heat = value", "docstring": "Corresponds to IDD Field `depth_soil_specific_heat`\n\nArgs:\nvalue (float): value for IDD Field `depth_soil_specific_heat`\nUnit: J/kg-K,\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def __call__(self, shape, dtype=None):\n    dtype = standardize_dtype(dtype)\n    return ops.zeros(shape, dtype=dtype)", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor. Only numeric or boolean dtypes\nare supported. If not specified, `keras.backend.floatx()`\nis used, which default to `float32` unless you configured it\notherwise (via `keras.backend.set_floatx(float_dtype)`).", "source": "github-repos"}
{"code": "def flatten_excel(path='.', ext='xlsx', sheetname=0, skiprows=None, header=0, date_parser=parse_date, verbosity=0, output_ext=None):\n    \n\n    date_parser = date_parser or (lambda x: x)\n    dotted_ext, dotted_output_ext = None, None\n    if ext != None and output_ext != None:\n        dotted_ext = ('' if ext.startswith('.') else '.') + ext\n        dotted_output_ext = ('' if output_ext.startswith('.') else '.') + output_ext\n    table = {}\n    for file_properties in util.find_files(path, ext=ext or '', verbosity=verbosity):\n        file_path = file_properties['path']\n        if output_ext and (dotted_output_ext + '.') in file_path:\n            continue\n        df = dataframe_from_excel(file_path, sheetname=sheetname, header=header, skiprows=skiprows)\n        df = flatten_dataframe(df, verbosity=verbosity)\n        if dotted_ext != None and dotted_output_ext != None:\n            df.to_csv(file_path[:-len(dotted_ext)] + dotted_output_ext + dotted_ext)\n    return table", "docstring": "Load all Excel files in the given path, write .flat.csv files, return `DataFrame` dict\n\nArguments:\npath (str): file or folder to retrieve CSV files and `pandas.DataFrame`s from\next (str): file name extension (to filter files by)\ndate_parser (function): if the MultiIndex can be interpretted as a datetime, this parser will be used\n\nReturns:\ndict of DataFrame: { file_path: flattened_data_frame }", "source": "juraj-google-style"}
{"code": "def set_kernel_process_name(name):\n  \n  if not isinstance(name, bytes):\n    name = name.encode('ascii', 'replace')\n  try:\n    \n    with open('/proc/self/comm', 'wb') as proc_comm:\n      proc_comm.write(name[:15])\n  except EnvironmentError:\n    try:\n      import ctypes\n    except ImportError:\n      return  \n    try:\n      libc = ctypes.CDLL('libc.so.6')\n    except EnvironmentError:\n      return  \n    pr_set_name = ctypes.c_ulong(15)  \n    zero = ctypes.c_ulong(0)\n    try:\n      libc.prctl(pr_set_name, name, zero, zero, zero)\n      \n    except AttributeError:\n      return", "docstring": "Changes the Kernel's /proc/self/status process name on Linux.\n\nThe kernel name is NOT what will be shown by the ps or top command.\nIt is a 15 character string stored in the kernel's process table that\nis included in the kernel log when a process is OOM killed.\nThe first 15 bytes of name are used.  Non-ASCII unicode is replaced with '?'.\n\nDoes nothing if /proc/self/comm cannot be written or prctl() fails.\n\nArgs:\nname: bytes|unicode, the Linux kernel's command name to set.", "source": "juraj-google-style"}
{"code": "def paint(self):\n    snippet = {'fill-extrusion-opacity': VectorStyle.get_style_value(self.opacity), 'fill-extrusion-color': VectorStyle.get_style_value(self.color), 'fill-extrusion-base': VectorStyle.get_style_value(self.base), 'fill-extrusion-height': VectorStyle.get_style_value(self.height)}\n    if self.translate:\n        snippet['fill-extrusion-translate'] = self.translate\n    return snippet", "docstring": "Renders a javascript snippet suitable for use as a mapbox-gl fill-extrusion paint entry\n\nReturns:\nA dict that can be converted to a mapbox-gl javascript paint snippet", "source": "codesearchnet"}
{"code": "def __call__(self, shape, dtype=None, **kwargs):\n    raise NotImplementedError", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor. If not provided will return tensor\nof `tf.float32`.\n**kwargs: Additional keyword arguments. Accepted values:\n`partition_shape` and `partition_offset`. Used when creating a single\npartition in a partitioned variable. `partition_shape` is the shape of\nthe partition (i.e. the shape of the returned tensor) and\n`partition_offset` is a tuple of `int` specifying the offset of this\npartition w.r.t each axis. For example, a tensor of shape `(30, 100)`\ncan be partitioned into two partitions: `p0` of shape `(10, 100)` and\n`p1` of shape `(20, 100)`; if the initializer is called with\n`partition_shape=(20, 100)` and `partition_offset=(10, 0)`, it should\nreturn the value for `p1`.", "source": "github-repos"}
{"code": "def make_file_extension_assertion(extension):\n    \n    def file_extension_assertion(file_path):\n        base, ext = os.path.splitext(file_path)\n        if ext.lower() != extension:\n            raise argparse.ArgumentTypeError('File must have ' + extension + ' extension')\n        return file_path\n    return file_extension_assertion", "docstring": "Function factory for file extension argparse assertion\nArgs:\nextension (string): the file extension to assert\n\nReturns:\nstring: the supplied extension, if assertion is successful.", "source": "juraj-google-style"}
{"code": "def _handle_start_dag(self, request):\n    dag_name = self._queue_dag(name=request.payload['name'], data=request.payload['data'])\n    return Response(success=(dag_name is not None), uid=request.uid, payload={'dag_name': dag_name})", "docstring": "The handler for the start_dag request.\n\nThe start_dag request creates a new dag and adds it to the queue.\n\nArgs:\nrequest (Request): Reference to a request object containing the\nincoming request. The payload has to contain the\nfollowing fields:\n'name': the name of the dag that should be started\n'data': the data that is passed onto the start tasks\n\nReturns:\nResponse: A response object containing the following fields:\n- dag_name: The name of the started dag.", "source": "codesearchnet"}
{"code": "def _get_update(self, variant):\n        \n        update = {\n                '$inc': {\n                    'homozygote': variant.get('homozygote', 0),\n                    'hemizygote': variant.get('hemizygote', 0),\n                    'observations': 1\n                },\n                '$set': {\n                    'chrom': variant.get('chrom'),\n                    'start': variant.get('pos'),\n                    'end': variant.get('end'),\n                    'ref': variant.get('ref'),\n                    'alt': variant.get('alt'),\n                }\n             }\n        if variant.get('case_id'):\n            update['$push'] = {\n                                'families': {\n                                '$each': [variant.get('case_id')],\n                                '$slice': -50\n                                }\n                            }\n        return update", "docstring": "Convert a variant to a proper update\n\nArgs:\nvariant(dict)\n\nReturns:\nupdate(dict)", "source": "juraj-google-style"}
{"code": "def RemapOperator(opcode_name):\n    old_name_to_new_name = {'CONVOLUTION': 'CONV_2D', 'DEPTHWISE_CONVOLUTION': 'DEPTHWISE_CONV_2D', 'AVERAGE_POOL': 'AVERAGE_POOL_2D', 'MAX_POOL': 'MAX_POOL_2D', 'L2_POOL': 'L2_POOL_2D', 'SIGMOID': 'LOGISTIC', 'L2NORM': 'L2_NORMALIZATION', 'LOCAL_RESPONSE_NORM': 'LOCAL_RESPONSE_NORMALIZATION', 'Basic_RNN': 'RNN'}\n    return old_name_to_new_name[opcode_name] if opcode_name in old_name_to_new_name else opcode_name", "docstring": "Go from old schema op name to new schema op name.\n\nArgs:\nopcode_name: String representing the ops (see :schema.fbs).\nReturns:\nConverted opcode_name from V1 to V2.", "source": "github-repos"}
{"code": "def find_field(item_list, cond, comparator, target_field):\n    for item in item_list:\n        if comparator(item, cond) and target_field in item:\n            return item[target_field]\n    return None", "docstring": "Finds the value of a field in a dict object that satisfies certain\nconditions.\n\nArgs:\nitem_list: A list of dict objects.\ncond: A param that defines the condition.\ncomparator: A function that checks if an dict satisfies the condition.\ntarget_field: Name of the field whose value to be returned if an item\nsatisfies the condition.\n\nReturns:\nTarget value or None if no item satisfies the condition.", "source": "github-repos"}
{"code": "def where(self, cond, other, **kwargs):\n        \n\n        assert isinstance(\n            cond, type(self)\n        ), \"Must have the same DataManager subclass to perform this operation\"\n        if isinstance(other, type(self)):\n            \n            \n            \n            \n            \n            def where_builder_first_pass(cond, other, **kwargs):\n                return cond.where(cond, other, **kwargs)\n\n            def where_builder_second_pass(df, new_other, **kwargs):\n                return df.where(new_other.eq(True), new_other, **kwargs)\n\n            first_pass = cond._inter_manager_operations(\n                other, \"left\", where_builder_first_pass\n            )\n            final_pass = self._inter_manager_operations(\n                first_pass, \"left\", where_builder_second_pass\n            )\n            return self.__constructor__(final_pass.data, self.index, self.columns)\n        else:\n            axis = kwargs.get(\"axis\", 0)\n            \n            \n            if isinstance(other, pandas.Series):\n                other.index = pandas.RangeIndex(len(other.index))\n\n            def where_builder_series(df, cond):\n                if axis == 0:\n                    df.index = pandas.RangeIndex(len(df.index))\n                    cond.index = pandas.RangeIndex(len(cond.index))\n                else:\n                    df.columns = pandas.RangeIndex(len(df.columns))\n                    cond.columns = pandas.RangeIndex(len(cond.columns))\n                return df.where(cond, other, **kwargs)\n\n            reindexed_self, reindexed_cond, a = self.copartition(\n                axis, cond, \"left\", False\n            )\n            \n            reindexed_cond = reindexed_cond[0]\n            new_data = reindexed_self.inter_data_operation(\n                axis, lambda l, r: where_builder_series(l, r), reindexed_cond\n            )\n            return self.__constructor__(new_data, self.index, self.columns)", "docstring": "Gets values from this manager where cond is true else from other.\n\nArgs:\ncond: Condition on which to evaluate values.\n\nReturns:\nNew DataManager with updated data and index.", "source": "juraj-google-style"}
{"code": "def loss(logits, labels):\n  \n  \n  labels = tf.cast(labels, tf.int64)\n  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n      labels=labels, logits=logits, name='cross_entropy_per_example')\n  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')\n  tf.add_to_collection('losses', cross_entropy_mean)\n\n  \n  \n  return tf.add_n(tf.get_collection('losses'), name='total_loss')", "docstring": "Add L2Loss to all the trainable variables.\n\nAdd summary for \"Loss\" and \"Loss/avg\".\nArgs:\nlogits: Logits from inference().\nlabels: Labels from distorted_inputs or inputs(). 1-D tensor\nof shape [batch_size]\n\nReturns:\nLoss tensor of type float.", "source": "juraj-google-style"}
{"code": "def set_topics(self, topics):\n    if set(topics).difference(self._topics):\n        future = self.cluster.request_update()\n    else:\n        future = Future().success(set(topics))\n    self._topics = set(topics)\n    return future", "docstring": "Set specific topics to track for metadata.\n\nArguments:\ntopics (list of str): topics to check for metadata\n\nReturns:\nFuture: resolves after metadata request/response", "source": "codesearchnet"}
{"code": "def filter_alias_create_namespace(namespace):\n\n    def filter_string(s):\n        return ' '.join(s.strip().split())\n    namespace.alias_name = filter_string(namespace.alias_name)\n    namespace.alias_command = filter_string(namespace.alias_command)\n    return namespace", "docstring": "Filter alias name and alias command inside alias create namespace to appropriate strings.\n\nArgs\nnamespace: The alias create namespace.\n\nReturns:\nFiltered namespace where excessive whitespaces are removed in strings.", "source": "codesearchnet"}
{"code": "def of_cte(cls, header: Optional[ContentTransferEncodingHeader]) \\\n            -> 'MessageDecoder':\n        \n        if header is None:\n            return _NoopDecoder()\n        hdr_str = str(header).lower()\n        custom = cls.registry.get(hdr_str)\n        if custom is not None:\n            return custom\n        elif hdr_str in ('7bit', '8bit'):\n            return _NoopDecoder()\n        elif hdr_str == 'quoted-printable':\n            return _QuotedPrintableDecoder()\n        elif hdr_str == 'base64':\n            return _Base64Decoder()\n        else:\n            raise NotImplementedError(hdr_str)", "docstring": "Return a decoder from the CTE header value.\n\nThere is built-in support for ``7bit``, ``8bit``, ``quoted-printable``,\nand ``base64`` CTE header values. Decoders can be added or overridden\nwith the :attr:`.registry` dictionary.\n\nArgs:\nheader: The CTE header value.", "source": "juraj-google-style"}
{"code": "def learn(self, iter_n=500, k_step=10):\n        \n        generative_model, discriminative_model = self.__GAN.train(\n            self.__true_sampler,\n            self.__generative_model,\n            self.__discriminative_model,\n            iter_n=iter_n,\n            k_step=k_step\n        )\n        self.__generative_model = generative_model\n        self.__discriminative_model = discriminative_model", "docstring": "Learning.\n\nArgs:\niter_n:     The number of training iterations.\nk_step:     The number of learning of the `discriminator`.", "source": "juraj-google-style"}
{"code": "def save_users(users, path=settings.LOGIN_FILE):\n    \n    with open(path, \"w\") as fh:\n        for username, data in users.items():\n            pass_line = username + \":\" + \":\".join([\n                data[\"pass_hash\"],\n                data[\"uid\"],\n                data[\"gid\"],\n                data[\"full_name\"],\n                data[\"home\"],\n                data[\"shell\"]\n            ])\n\n            fh.write(pass_line + \"\\n\")", "docstring": "Save dictionary with user data to passwd file (default\n:attr:`ftp.settings.LOGIN_FILE`).\n\nArgs:\nusers (dict): dictionary with user data. For details look at dict\nreturned from :func:`load_users`.\npath (str, default settings.LOGIN_FILE): path of the file, where the\ndata will be stored (default :attr:`ftp.settings.LOGIN_FILE`).", "source": "juraj-google-style"}
{"code": "def memoizedmethod(method):\n    \n    method_name = method.__name__\n\n    @wraps(method)\n    def patched(self, *args, **kwargs):\n        \n        \n        try:\n            return self._cache[method_name]\n\n        \n        except KeyError:\n            result = self._cache[method_name] = method(\n                self, *args, **kwargs)\n            return result\n\n    return patched", "docstring": "Decorator that caches method result.\n\nArgs:\nmethod (function): Method\n\nReturns:\nfunction: Memoized method.\n\nNotes:\nTarget method class needs as \"_cache\" attribute (dict).\n\nIt is the case of \"ObjectIOBase\" and all its subclasses.", "source": "juraj-google-style"}
{"code": "def __init__(self, num_workers, *unused_args, **unused_kwargs):\n    super().__init__(*unused_args, **unused_kwargs)\n    self._num_workers = num_workers\n    self._successful_ops = util.MovingSum(window_ms=1000, bucket_ms=1000)\n    self._first_instant = datetime.datetime.now()\n    self._throttled_secs = Metrics.counter(RampupThrottlingFn, 'cumulativeThrottlingSeconds')", "docstring": "Initializes a ramp-up throttler transform.\n\nArgs:\nnum_workers: A hint for the expected number of workers, used to derive\nthe local rate limit.", "source": "github-repos"}
{"code": "def __call__(self, fn):\n        \n\n        def output(app, *args, **kwargs):\n            \n            data = fn(app, *args, **kwargs)\n            attr = getattr(app, self.attribute)\n            if isinstance(data, list) and isinstance(attr, list):\n                getattr(app, self.attribute).extend(data)\n            elif isinstance(attr, list):\n                getattr(app, self.attribute).append(data)\n            else:\n                setattr(app, self.attribute, data)\n            return data\n\n        return output", "docstring": "Implement __call__ function for decorator.\n\nArgs:\nfn (function): The decorated function.\n\nReturns:\nfunction: The custom decorator function.", "source": "juraj-google-style"}
{"code": "def get_file(profile, branch, file_path):\n    branch_sha = get_branch_sha(profile, branch)\n    tree = get_files_in_branch(profile, branch_sha)\n    match = None\n    for item in tree:\n        if (item.get('path') == file_path):\n            match = item\n            break\n    file_sha = match.get('sha')\n    blob = blobs.get_blob(profile, file_sha)\n    content = blob.get('content')\n    decoded_content = b64decode(content)\n    return decoded_content.decode('utf-8')", "docstring": "Get a file from a branch.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nbranch\nThe name of a branch.\n\nfile_path\nThe path of the file to fetch.\n\nReturns:\nThe (UTF-8 encoded) content of the file, as a string.", "source": "codesearchnet"}
{"code": "def refs(self, type='all', **kwargs):\n        \n        path = '%s/%s/refs' % (self.manager.path, self.get_id())\n        data = {'type': type}\n        return self.manager.gitlab.http_get(path, query_data=data, **kwargs)", "docstring": "List the references the commit is pushed to.\n\nArgs:\ntype (str): The scope of references ('branch', 'tag' or 'all')\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the references could not be retrieved\n\nReturns:\nlist: The references the commit is pushed to.", "source": "juraj-google-style"}
{"code": "def sin(duration: int, amp: complex, freq: float=None, phase: float=0, name: str=None) -> SamplePulse:\n    if (freq is None):\n        freq = (1 / duration)\n    return _sampled_sin_pulse(duration, amp, freq, phase=phase, name=name)", "docstring": "Generates sine wave `SamplePulse`.\n\nArgs:\nduration: Duration of pulse. Must be greater than zero.\namp: Pulse amplitude.\nfreq: Pulse frequency, units of 1/dt. If `None` defaults to single cycle.\nphase: Pulse phase.\nname: Name of pulse.", "source": "codesearchnet"}
{"code": "def solve2x2(lhs, rhs):\n    if (np.abs(lhs[(1, 0)]) > np.abs(lhs[(0, 0)])):\n        ratio = (lhs[(0, 0)] / lhs[(1, 0)])\n        denominator = (lhs[(0, 1)] - (ratio * lhs[(1, 1)]))\n        if (denominator == 0.0):\n            return (True, None, None)\n        y_val = ((rhs[0] - (ratio * rhs[1])) / denominator)\n        x_val = ((rhs[1] - (lhs[(1, 1)] * y_val)) / lhs[(1, 0)])\n        return (False, x_val, y_val)\n    else:\n        if (lhs[(0, 0)] == 0.0):\n            return (True, None, None)\n        ratio = (lhs[(1, 0)] / lhs[(0, 0)])\n        denominator = (lhs[(1, 1)] - (ratio * lhs[(0, 1)]))\n        if (denominator == 0.0):\n            return (True, None, None)\n        y_val = ((rhs[1] - (ratio * rhs[0])) / denominator)\n        x_val = ((rhs[0] - (lhs[(0, 1)] * y_val)) / lhs[(0, 0)])\n        return (False, x_val, y_val)", "docstring": "Solve a square 2 x 2 system via LU factorization.\n\nThis is meant to be a stand-in for LAPACK's ``dgesv``, which just wraps\ntwo calls to ``dgetrf`` and ``dgetrs``. We wrap for two reasons:\n\n* We seek to avoid exceptions as part of the control flow (which is\nwhat :func`numpy.linalg.solve` does).\n* We seek to avoid excessive type- and size-checking, since this\nspecial case is already known.\n\nArgs:\nlhs (numpy.ndarray) A ``2 x 2`` array of real numbers.\nrhs (numpy.ndarray) A 1D array of 2 real numbers.\n\nReturns:\nTuple[bool, float, float]: A triple of\n\n* A flag indicating if ``lhs`` is a singular matrix.\n* The first component of the solution.\n* The second component of the solution.", "source": "codesearchnet"}
{"code": "def create_application_configuration(self, name, properties, description=None):\n        \n        if not hasattr(self, 'applicationConfigurations'):\n            raise NotImplementedError()\n\n        cv = ApplicationConfiguration._props(name, properties, description)\n\n        res = self.rest_client.session.post(self.applicationConfigurations,\n            headers = {'Accept' : 'application/json'},\n            json=cv)\n        _handle_http_errors(res)\n        return ApplicationConfiguration(res.json(), self.rest_client)", "docstring": "Create an application configuration.\n\nArgs:\nname (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a\n.. versionadded 1.12", "source": "juraj-google-style"}
{"code": "def export_analytics_data_to_csv(data, output_folder, result_info_key, identifier_keys):\n    workbook = create_excel_workbook(data, result_info_key, identifier_keys)\n    suffix = '.csv'\n    if (not os.path.exists(output_folder)):\n        os.makedirs(output_folder)\n    for worksheet in workbook.worksheets:\n        file_name = utilities.convert_title_to_snake_case(worksheet.title)\n        file_path = os.path.join(output_folder, (file_name + suffix))\n        mode = 'w'\n        if (sys.version_info[0] < 3):\n            mode = 'wb'\n        with io.open(file_path, mode) as output_file:\n            csv_writer = csv.writer(output_file)\n            for row in worksheet.rows:\n                csv_writer.writerow([cell.value for cell in row])\n    print('Saved CSV files to {}'.format(output_folder))", "docstring": "Creates CSV files containing data returned by the Analytics API.\nCreates one file per requested endpoint and saves it into the\nspecified output_folder\n\nArgs:\ndata: Analytics API data as a list of dicts\noutput_folder: Path to a folder to save the CSV files into", "source": "codesearchnet"}
{"code": "def get_tensor_num_entries(self, tensor_name, partial_layout=None, mesh_dimension_to_size=None):\n    shape = self.get_tensor_shape(tensor_name)\n    num_entries = 1\n    for dim in shape.dims:\n        num_entries = (num_entries * dim.value)\n    if (not partial_layout):\n        return num_entries\n    for mtf_dimension_name in self.get_tensor_mtf_dimension_names(tensor_name):\n        if (mtf_dimension_name not in partial_layout):\n            continue\n        mesh_dimension_name = partial_layout[mtf_dimension_name]\n        mesh_dimension_size = mesh_dimension_to_size[mesh_dimension_name]\n        num_entries = int(math.ceil((num_entries / mesh_dimension_size)))\n    return num_entries", "docstring": "The number of entries in a tensor.\n\nIf partial_layout is specified, then mesh_dimension_to_size must also be. In\nthis case, the number of entries on a single device is returned.\n\nArgs:\ntensor_name: a string, name of a tensor in the graph.\npartial_layout: an optional {string: string}, from MTF dimension name to\nmesh dimension name.\nmesh_dimension_to_size: an optional {string: int}, from mesh dimension\nname to size.\n\nReturns:\nan integer", "source": "codesearchnet"}
{"code": "def validate_read(self, address):\n    if (not any((address.startswith(ns) for ns in self._read_list))):\n        raise AuthorizationException(address=address)", "docstring": "Raises an exception if the address is not allowed to be read in\nthis context, based on txn inputs.\n\nArgs:\naddress (str): An address to be validated.\n\nReturns:\nNone\n\nRaises:\nAuthorizationException", "source": "codesearchnet"}
{"code": "def _ReadMemberFooter(self, file_object):\n    \n    file_offset = file_object.get_offset()\n    member_footer = self._ReadStructure(\n        file_object, file_offset, self._MEMBER_FOOTER_SIZE,\n        self._MEMBER_FOOTER, 'member footer')\n\n    self.uncompressed_data_size = member_footer.uncompressed_data_size", "docstring": "Reads a member footer.\n\nArgs:\nfile_object (FileIO): file-like object to read from.\n\nRaises:\nFileFormatError: if the member footer cannot be read.", "source": "juraj-google-style"}
{"code": "def retrieve(self, block_height, headers=None):\n    path = (self.path + block_height)\n    return self.transport.forward_request(method='GET', path=path, headers=None)", "docstring": "Retrieves the block with the given ``block_height``.\n\nArgs:\nblock_height (str): height of the block to retrieve.\nheaders (dict): Optional headers to pass to the request.\n\nReturns:\ndict: The block with the given ``block_height``.", "source": "codesearchnet"}
{"code": "def GetArtifactPathDependencies(rdf_artifact):\n    deps = set()\n    for source in rdf_artifact.sources:\n        for (arg, value) in iteritems(source.attributes):\n            paths = []\n            if (arg in ['path', 'query']):\n                paths.append(value)\n            if (arg == 'key_value_pairs'):\n                paths.extend([x['key'] for x in value])\n            if (arg in ['keys', 'paths', 'path_list', 'content_regex_list']):\n                paths.extend(value)\n            for path in paths:\n                for match in artifact_utils.INTERPOLATED_REGEX.finditer(path):\n                    deps.add(match.group()[2:(- 2)])\n    deps.update(GetArtifactParserDependencies(rdf_artifact))\n    return deps", "docstring": "Return a set of knowledgebase path dependencies.\n\nArgs:\nrdf_artifact: RDF artifact object.\n\nReturns:\nA set of strings for the required kb objects e.g.\n[\"users.appdata\", \"systemroot\"]", "source": "codesearchnet"}
{"code": "def _coords2idx(self, coords):\n        \n\n        x = self._coords2vec(coords)\n        idx = self._kd.query(x, p=self._metric_p,\n                             distance_upper_bound=self._max_pix_scale)\n        return idx[1]", "docstring": "Converts from sky coordinates to pixel indices.\n\nArgs:\ncoords (:obj:`astropy.coordinates.SkyCoord`): Sky coordinates.\n\nReturns:\nPixel indices of the coordinates, with the same shape as the input\ncoordinates. Pixels which are outside the map are given an index\nequal to the number of pixels in the map.", "source": "juraj-google-style"}
{"code": "def indentjoin(strlist, indent='\\n    ', suffix=''):\n    indent_ = indent\n    strlist = list(strlist)\n    if (len(strlist) == 0):\n        return ''\n    return (indent_ + indent_.join([(six.text_type(str_) + suffix) for str_ in strlist]))", "docstring": "r\"\"\"\nConvineince indentjoin\n\nsimilar to '\\n    '.join(strlist) but indent is also prefixed\n\nArgs:\nstrlist (?):\nindent  (str):\nsuffix  (str):\n\nReturns:\nstr: joined list", "source": "codesearchnet"}
{"code": "def init(self, address, hard_reset=False):\n        \n        self.address = address\n\n        if hard_reset:\n            \n            \n            \n            \n            pass\n\n        \n        for i in range(Dongle.PORT_RETRIES):\n            try:\n                logger.debug('Setting up BGAPI, attempt {}/{}'.format(i + 1, Dongle.PORT_RETRIES))\n                self.api = BlueGigaAPI(port=self.address, callbacks=self, baud=Dongle.BAUDRATE, timeout=DEF_TIMEOUT)\n                self.api.start_daemon()\n                break\n            except serial.serialutil.SerialException as e:\n                logger.debug('Failed to init BlueGigaAPI: {}, attempt {}/{}'.format(e, i + 1, Dongle.PORT_RETRIES))\n                time.sleep(0.1)\n\n        if self.api is None:\n            return False\n\n        time.sleep(0.5) \n        self.get_supported_connections()\n        logger.info('Dongle supports {} connections'.format(self.supported_connections))\n        if self.supported_connections == -1:\n            logger.error('Failed to retrieve number of supported connections from the dongle! (try reinserting it)')\n            return False\n        \n        self.conn_state = {x: self._STATE_IDLE for x in range(self.supported_connections)}\n        self.reset()\n\n        self._cbthread = threading.Thread(target=self._cbthreadfunc)\n        self._cbthread.setDaemon(True)\n        self._cbthread_q = Queue()\n        self._cbthread.start()\n        return True", "docstring": "Open the serial connection to a dongle at the supplied address.\n\nArgs:\naddress (str): the serial port address of the BLED112 dongle, e.g. 'COM5'\nhard_reset (bool): not currently used\n\nReturns:\nTrue if a connection with the dongle was established, False otherwise.", "source": "juraj-google-style"}
{"code": "def __call__(self, fn):\n        \n\n        def loop(app, *args, **kwargs):\n            \n\n            \n            r = []\n            arg_data = app.tcex.playbook.read(getattr(app.args, self.arg))\n            arg_type = app.tcex.playbook.variable_type(getattr(app.args, self.arg))\n            if not isinstance(arg_data, list):\n                arg_data = [arg_data]\n\n            if not arg_data:\n                app.tcex.exit(1, 'No data retrieved for arg ({}).'.format(self.arg))\n\n            for s in arg_data:\n                if s is None and self.default is not None:\n                    \n                    s = self.default\n                    app.tcex.log.debug(\n                        'a null input was provided, using default value \"{}\" instead.'.format(s)\n                    )\n\n                if self.fail_on is not None:\n                    if s in self.fail_on:\n                        app.tcex.playbook.exit(\n                            1,\n                            'Arg value for IterateOnArg matched fail_on value ({}).'.format(\n                                self.fail_on\n                            ),\n                        )\n\n                \n                if (\n                    arg_type not in ['Binary', 'BinaryArray']\n                    and app.tcex.log.getEffectiveLevel() == 10\n                ):\n                    log_string = str(s)\n                    if len(log_string) > 100:\n                        log_string = '{} ...'.format(log_string[:100])\n                    app.tcex.log.debug('input value: {}'.format(log_string))\n\n                \n                args_list = list(args)\n                try:\n                    args_list[0] = s\n                except IndexError:\n                    args_list.append(s)\n                args = tuple(args_list)\n                r.append(fn(app, *args, **kwargs))\n            return r\n\n        return loop", "docstring": "Implement __call__ function for decorator.\n\nArgs:\nfn (function): The decorated function.\n\nReturns:\nfunction: The custom decorator function.", "source": "juraj-google-style"}
{"code": "def create(self, batch_outs):\n    raise NotImplementedError('Must be implemented in subclasses.')", "docstring": "Creates the initial results from the first batch outputs.\n\nArgs:\nbatch_outs: A list of batch-level outputs.", "source": "github-repos"}
{"code": "def fill_slot(self, filler_pipeline_key, slot, value):\n    \n    if not isinstance(filler_pipeline_key, db.Key):\n      filler_pipeline_key = db.Key(filler_pipeline_key)\n\n    if _TEST_MODE:\n      slot._set_value_test(filler_pipeline_key, value)\n    else:\n      encoded_value = json.dumps(value,\n                                       sort_keys=True,\n                                       cls=mr_util.JsonEncoder)\n      value_text = None\n      value_blob = None\n      if len(encoded_value) <= _MAX_JSON_SIZE:\n        value_text = db.Text(encoded_value)\n      else:\n        \n        value_blob = _write_json_blob(encoded_value, filler_pipeline_key.name())\n\n      def txn():\n        slot_record = db.get(slot.key)\n        if slot_record is None:\n          raise UnexpectedPipelineError(\n              'Tried to fill missing slot \"%s\" '\n              'by pipeline ID \"%s\" with value: %r'\n              % (slot.key, filler_pipeline_key.name(), value))\n        \n        \n        \n        \n        \n        slot_record.filler = filler_pipeline_key\n        slot_record.value_text = value_text\n        slot_record.value_blob = value_blob\n        slot_record.status = _SlotRecord.FILLED\n        slot_record.fill_time = self._gettime()\n        slot_record.put()\n        task = taskqueue.Task(\n            url=self.barrier_handler_path,\n            params=dict(\n                slot_key=slot.key,\n                use_barrier_indexes=True),\n            headers={'X-Ae-Slot-Key': slot.key,\n                     'X-Ae-Filler-Pipeline-Key': filler_pipeline_key})\n        task.add(queue_name=self.queue_name, transactional=True)\n      db.run_in_transaction_options(\n          db.create_transaction_options(propagation=db.ALLOWED), txn)\n\n    self.session_filled_output_names.add(slot.name)", "docstring": "Fills a slot, enqueueing a task to trigger pending barriers.\n\nArgs:\nfiller_pipeline_key: db.Key or stringified key of the _PipelineRecord\nthat filled this slot.\nslot: The Slot instance to fill.\nvalue: The serializable value to assign.\n\nRaises:\nUnexpectedPipelineError if the _SlotRecord for the 'slot' could not\nbe found in the Datastore.", "source": "juraj-google-style"}
{"code": "def search(self, *arg, **kw):\n    output = {'cant_results': 0, 'matched_terms': defaultdict(set), 'results': {}, 'runtime': 0}\n    indexes = self.indexes()\n    models = kw.get('models', list(self._entities.values()))\n    if (sys.version_info[0] < 3):\n        models = [(self._entities.get(model, None) if (isinstance(model, str) or isinstance(model, unicode)) else model) for model in models]\n        models = filter((lambda x: (x is not None)), models)\n    else:\n        models = [(self._entities.get(model, None) if (isinstance(model, str) or isinstance(model, str)) else model) for model in models]\n        models = [x for x in models if (x is not None)]\n    if ((models == []) or (not models)):\n        models = list(self._entities.values())\n    if self.debug:\n        print('SEARCHING ON MODELS -> ', models)\n    indexes = [m._pw_index_ for m in models if hasattr(m, '_pw_index_')]\n    if (indexes == []):\n        return output\n    (runtime, cant) = (0, 0)\n    ma = defaultdict(set)\n    for index in indexes:\n        res = index.search(*arg, **kw)\n        runtime += res['runtime']\n        cant += res['cant_results']\n        if (res['cant_results'] > 0):\n            output['results'][index._name] = {'items': res['results'], 'matched_terms': res['matched_terms']}\n            for (k, ts) in list(res['matched_terms'].items()):\n                for t in ts:\n                    ma[k].add(t)\n    output['cant_results'] = cant\n    output['matched_terms'] = {k: list(v) for (k, v) in list(ma.items())}\n    output['runtime'] = runtime\n    return output", "docstring": "A full search function. This allows you to search expression\nusing the following arguments.\n\nArg:\nquery (str): The search string expression.\n\nOptional Args:\n- include_entity (bool): include in each result the entity values associated of the fields stored.\n- add_wildcards (bool): set it if you want to consider matches that have prefix or suffixes the query.\n- something (bool): set `add_willcards` in case of none results for the query.\n- fields (list): specified the fields names that you want to consider.\n- except_fields (list): specified the fields names to not consider in the search.\n- models (list): a list of name of model to search or even the models from the database.\n\nReturns:\n(dict): A python dictionary with the results.", "source": "codesearchnet"}
{"code": "def Run(self, force=False):\n    \n    if not self.locked:\n      raise aff4.LockError(\"CronJob must be locked for Run() to be called.\")\n\n    self.KillOldFlows()\n\n    \n    current_flow_urn = self.Get(self.Schema.CURRENT_FLOW_URN)\n    if current_flow_urn:\n      current_flow = aff4.FACTORY.Open(current_flow_urn, token=self.token)\n      runner = current_flow.GetRunner()\n      if not runner.IsRunning():\n        if runner.context.state == rdf_flow_runner.FlowContext.State.ERROR:\n          self.Set(\n              self.Schema.LAST_RUN_STATUS,\n              rdf_cronjobs.CronJobRunStatus(\n                  status=rdf_cronjobs.CronJobRunStatus.Status.ERROR))\n          stats_collector_instance.Get().IncrementCounter(\n              \"cron_job_failure\", fields=[self.urn.Basename()])\n        else:\n          self.Set(\n              self.Schema.LAST_RUN_STATUS,\n              rdf_cronjobs.CronJobRunStatus(\n                  status=rdf_cronjobs.CronJobRunStatus.Status.OK))\n\n          start_time = self.Get(self.Schema.LAST_RUN_TIME)\n          elapsed = time.time() - start_time.AsSecondsSinceEpoch()\n          stats_collector_instance.Get().RecordEvent(\n              \"cron_job_latency\", elapsed, fields=[self.urn.Basename()])\n\n        self.DeleteAttribute(self.Schema.CURRENT_FLOW_URN)\n        self.Flush()\n\n    if not force and not self.DueToRun():\n      return\n\n    \n    cron_args = self.Get(self.Schema.CRON_ARGS)\n    cron_args.flow_runner_args.base_session_id = self.urn\n\n    flow_urn = flow.StartAFF4Flow(\n        runner_args=cron_args.flow_runner_args,\n        args=cron_args.flow_args,\n        token=self.token,\n        sync=False)\n\n    self.Set(self.Schema.CURRENT_FLOW_URN, flow_urn)\n    self.Set(self.Schema.LAST_RUN_TIME, rdfvalue.RDFDatetime.Now())\n    self.Flush()", "docstring": "Do the actual work of the Cron.\n\nWill first check if DueToRun is True.\n\nCronJob object must be locked (i.e. opened via OpenWithLock) for Run() to be\ncalled.\n\nArgs:\nforce: If True, the job will run no matter what (i.e. even if DueToRun()\nreturns False).\n\nRaises:\nLockError: if the object is not locked.", "source": "juraj-google-style"}
{"code": "def parse(self, key, value):\n    if (value is not None):\n        try:\n            return self._parser(value)\n        except Exception:\n            raise ParsingError('Error parsing {}'.format(key))\n    elif (self._default is not SENTINAL):\n        return self._default\n    else:\n        raise KeyError(key)", "docstring": "Parse the environment value for a given key against the schema.\n\nArgs:\nkey: The name of the environment variable.\nvalue: The value to be parsed.", "source": "codesearchnet"}
{"code": "def score_intersect(self, term1, term2, **kwargs):\n\n        \n\n        t1_kde = self.kde(term1, **kwargs)\n        t2_kde = self.kde(term2, **kwargs)\n\n        \n        overlap = np.minimum(t1_kde, t2_kde)\n        return np.trapz(overlap)", "docstring": "Compute the geometric area of the overlap between the kernel density\nestimates of two terms.\n\nArgs:\nterm1 (str)\nterm2 (str)\n\nReturns: float", "source": "juraj-google-style"}
{"code": "def loopUntil(self, condition=None, timeout: float=0) -> Iterator[object]:\n    endTime = (time.time() + timeout)\n    while True:\n        test = (condition and condition())\n        if test:\n            (yield test)\n            return\n        elif (timeout and (time.time() > endTime)):\n            (yield False)\n            return\n        else:\n            (yield test)\n        self.waitOnUpdate(((endTime - time.time()) if timeout else 0))", "docstring": "Iterate until condition is met, with optional timeout in seconds.\nThe yielded value is that of the condition or False when timed out.\n\nArgs:\ncondition: Predicate function that is tested after every network\nupdate.\ntimeout: Maximum time in seconds to wait.\nIf 0 then no timeout is used.", "source": "codesearchnet"}
{"code": "def merge(self, decision_point: pg.geno.DecisionPoint, parent_decisions: List[Union[int, List[int], float, None]], global_state: pg.geno.AttributeDict, step: int) -> Union[int, List[int], float]:", "docstring": "Implementation of point-wise decision making.\n\nArgs:\ndecision_point: Decision point for recombination.\nparent_decisions: A list of parent's decisions. Each item should be an\nint as an active single-choice decision, a list of int as active multi-\nchoice decisions, a float as an active float decision, or None for\ninactive decision point (whose parent space is not chosen).\nglobal_state: An optional keyword argument as the global state. Subclass\ncan omit.\nstep: An optional keyword argument as the current step. Subclass can omit.\n\nReturns:\nAn int, list of int or float as the decision made for the decision point.", "source": "github-repos"}
{"code": "def ctc_unique_labels(labels, name=None):\n    with ops.name_scope(name, 'ctc_unique_labels', [labels]):\n        labels = ops.convert_to_tensor(labels, name='labels')\n\n        def _unique(x):\n            u = array_ops.unique(x)\n            y = array_ops.pad(u.y, [[0, _get_dim(u.idx, 0) - _get_dim(u.y, 0)]])\n            y = math_ops.cast(y, dtypes.int64)\n            return [y, u.idx]\n        return map_fn.map_fn(_unique, labels, dtype=[dtypes.int64, dtypes.int32])", "docstring": "Get unique labels and indices for batched labels for `tf.nn.ctc_loss`.\n\nFor use with `tf.nn.ctc_loss` optional argument `unique`: This op can be\nused to preprocess labels in input pipeline to for better speed/memory use\ncomputing the ctc loss on TPU.\n\nExample:\nctc_unique_labels([[3, 4, 4, 3]]) ->\nunique labels padded with 0: [[3, 4, 0, 0]]\nindices of original labels in unique: [0, 1, 1, 0]\n\nArgs:\nlabels: tensor of shape [batch_size, max_label_length] padded with 0.\nname: A name for this `Op`. Defaults to \"ctc_unique_labels\".\n\nReturns:\ntuple of\n- unique labels, tensor of shape `[batch_size, max_label_length]`\n- indices into unique labels, shape `[batch_size, max_label_length]`", "source": "github-repos"}
{"code": "def port_add(br, port, may_exist=False, internal=False):\n    param_may_exist = _param_may_exist(may_exist)\n    cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)\n    if internal:\n        cmd += ' -- set interface {0} type=internal'.format(port)\n    result = __salt__['cmd.run_all'](cmd)\n    retcode = result['retcode']\n    return _retcode_to_bool(retcode)", "docstring": "Creates on bridge a new port named port.\n\nReturns:\nTrue on success, else False.\n\nArgs:\nbr: A string - bridge name\nport: A string - port name\nmay_exist: Bool, if False - attempting to create a port that exists returns False.\ninternal: A boolean to create an internal interface if one does not exist.\n\n.. versionadded:: 2016.3.0\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.port_add br0 8080", "source": "codesearchnet"}
{"code": "def to_batched_tensor_list(element_spec, element):\n    return _to_tensor_list_helper(lambda state, spec, component: state + spec._to_batched_tensor_list(component), element_spec, element)", "docstring": "Returns a tensor list representation of the element.\n\nArgs:\nelement_spec: A nested structure of `tf.TypeSpec` objects representing to\nelement type specification.\nelement: The element to convert to tensor list representation.\n\nReturns:\nA tensor list representation of `element`.\n\nRaises:\nValueError: If `element_spec` and `element` do not have the same number of\nelements or if the two structures are not nested in the same way or the\nrank of any of the tensors in the tensor list representation is 0.\nTypeError: If `element_spec` and `element` differ in the type of sequence\nin any of their substructures.", "source": "github-repos"}
{"code": "def detect_shadowing_definitions(self, contract):\n    result = []\n    for function in (contract.functions + contract.modifiers):\n        if (function.contract != contract):\n            continue\n        for variable in function.variables:\n            overshadowed = []\n            for scope_contract in ([contract] + contract.inheritance):\n                for scope_function in scope_contract.functions:\n                    if ((variable.name == scope_function.name) and (scope_function.contract == scope_contract)):\n                        overshadowed.append((self.OVERSHADOWED_FUNCTION, scope_contract.name, scope_function))\n                for scope_modifier in scope_contract.modifiers:\n                    if ((variable.name == scope_modifier.name) and (scope_modifier.contract == scope_contract)):\n                        overshadowed.append((self.OVERSHADOWED_MODIFIER, scope_contract.name, scope_modifier))\n                for scope_event in scope_contract.events:\n                    if ((variable.name == scope_event.name) and (scope_event.contract == scope_contract)):\n                        overshadowed.append((self.OVERSHADOWED_EVENT, scope_contract.name, scope_event))\n                for scope_state_variable in scope_contract.variables:\n                    if ((variable.name == scope_state_variable.name) and (scope_state_variable.contract == scope_contract)):\n                        overshadowed.append((self.OVERSHADOWED_STATE_VARIABLE, scope_contract.name, scope_state_variable))\n            if overshadowed:\n                result.append((contract.name, function.name, variable, overshadowed))\n    return result", "docstring": "Detects if functions, access modifiers, events, state variables, and local variables are named after\nreserved keywords. Any such definitions are returned in a list.\n\nReturns:\nlist of tuple: (type, contract name, definition)", "source": "codesearchnet"}
{"code": "def _validate_state_root(self, state_root):\n        \n        if self._state_root_regex.fullmatch(state_root) is None:\n            LOGGER.debug('Invalid state root: %s', state_root)\n            raise _ResponseFailed(self._status.INVALID_ROOT)", "docstring": "Validates a state root, raising a ResponseFailed error if invalid.\n\nArgs:\nstate_root (str): The state_root to validate\n\nRaises:\nResponseFailed: The state_root was invalid, and a status of\nINVALID_ROOT will be sent with the response.", "source": "juraj-google-style"}
{"code": "def _build_graph(self):\n    q = data_flow_ops.FIFOQueue(1, 'float')\n    init = q.enqueue(1.0)\n    x = q.dequeue()\n    q_inc = q.enqueue(x + 1)\n    return (init, q_inc)", "docstring": "Builds a graph that enqueues and dequeues a single float.\n\nReturns:\nA tuple with the graph init tensor and graph output tensor.", "source": "github-repos"}
{"code": "def frag2text(endpoint, stype, selector,\n              clean=False, raw=False, verbose=False):\n    \n    try:\n        return main(endpoint, stype, selector, clean, raw, verbose)\n    except StandardError as err:\n        return err", "docstring": "returns Markdown text of selected fragment.\n\nArgs:\nendpoint: URL, file, or HTML string\nstype: { 'css' | 'xpath' }\nselector: CSS selector or XPath expression\nReturns:\nMarkdown text\nOptions:\nclean: cleans fragment (lxml.html.clean defaults)\nraw: returns raw HTML fragment\nverbose: show http status, encoding, headers", "source": "juraj-google-style"}
{"code": "def parse_content_type(headers: MutableMapping) -> Tuple[(Optional[str], str)]:\n    content_type = headers.get('content-type')\n    if (not content_type):\n        return (None, 'utf-8')\n    else:\n        (type_, parameters) = cgi.parse_header(content_type)\n        encoding = parameters.get('charset', 'utf-8')\n        return (type_, encoding)", "docstring": "Find content-type and encoding of the response\n\nArgs:\nheaders: Response headers\n\nReturns:\n:py:class:`tuple` (content-type, encoding)", "source": "codesearchnet"}
{"code": "def from_function(cls, function):\n    module_name = function.__module__\n    function_name = function.__name__\n    class_name = ''\n    function_source_hasher = hashlib.sha1()\n    try:\n        source = inspect.getsource(function)\n        if (sys.version_info[0] >= 3):\n            source = source.encode()\n        function_source_hasher.update(source)\n        function_source_hash = function_source_hasher.digest()\n    except (IOError, OSError, TypeError):\n        function_source_hash = b''\n    return cls(module_name, function_name, class_name, function_source_hash)", "docstring": "Create a FunctionDescriptor from a function instance.\n\nThis function is used to create the function descriptor from\na python function. If a function is a class function, it should\nnot be used by this function.\n\nArgs:\ncls: Current class which is required argument for classmethod.\nfunction: the python function used to create the function\ndescriptor.\n\nReturns:\nThe FunctionDescriptor instance created according to the function.", "source": "codesearchnet"}
{"code": "def put(self, type: Type[T], item: T) -> None:\n        \n        LOGGER.info(\"Getting SinkHandlers for \\\"{type}\\\"\".format(type=type.__name__))\n        try:\n            handlers = self._put_types[type]\n        except KeyError:\n            try:\n                LOGGER.info(\"Building new SinkHandlers for \\\"{type}\\\"\".format(type=type.__name__))\n                handlers = self._put_handlers(type)\n            except NoConversionError:\n                handlers = None\n            self._get_types[type] = handlers\n\n        LOGGER.info(\"Creating new PipelineContext\")\n        context = self._new_context()\n\n        LOGGER.info(\"Sending item \\\"{item}\\\" to SourceHandlers\".format(item=item))\n        if handlers is not None:\n            for handler in handlers:\n                handler.put(item, context)", "docstring": "Puts an objects into the data pipeline. The object may be transformed into a new type for insertion if necessary.\n\nArgs:\nitem: The object to be inserted into the data pipeline.", "source": "juraj-google-style"}
{"code": "def _get(self, url, params=None):\n    if (not params):\n        params = {}\n    params.update({'login': self.login, 'key': self.key})\n    response_json = requests.get((self.api_url + url), params).json()\n    return self._process_response(response_json)", "docstring": "Used by every other method, it makes a GET request with the given params.\n\nArgs:\nurl (str): relative path of a specific service (account_info, ...).\nparams (:obj:`dict`, optional): contains parameters to be sent in the GET request.\n\nReturns:\ndict: results of the response of the GET request.", "source": "codesearchnet"}
{"code": "def __init__(self,\n                 top_probs=5):\n        \n        self.top_probs = top_probs\n\n        self._sess = None\n        self._tf_input_var = None\n        self._tf_predict_var = None\n        self._model_name = None\n        self._latest_ckpt_name = None\n        self._latest_ckpt_time = None", "docstring": "Create a new instance of this model.\n\n`BaseModel` is an interface and should only be instantiated via a\nsubclass.\n\nArgs:\ntop_probs (int): Number of classes to display per result. For\ninstance, VGG16 has 1000 classes, we don't want to display a\nvisualization for every single possibility.  Defaults to 5.", "source": "juraj-google-style"}
{"code": "def invoice_access(request, access_code):\n    invoices = commerce.Invoice.objects.filter(user__attendee__access_code=access_code).order_by('-issue_time')\n    if (not invoices):\n        raise Http404()\n    unpaid = invoices.filter(status=commerce.Invoice.STATUS_UNPAID)\n    paid = invoices.filter(status=commerce.Invoice.STATUS_PAID)\n    if unpaid:\n        invoice = unpaid[0]\n    elif paid:\n        invoice = paid[0]\n    else:\n        invoice = invoices[0]\n    return redirect('invoice', invoice.id, access_code)", "docstring": "Redirects to an invoice for the attendee that matches the given access\ncode, if any.\n\nIf the attendee has multiple invoices, we use the following tie-break:\n\n- If there's an unpaid invoice, show that, otherwise\n- If there's a paid invoice, show the most recent one, otherwise\n- Show the most recent invoid of all\n\nArguments:\n\naccess_code (castable to int): The access code for the user whose\ninvoice you want to see.\n\nReturns:\nredirect:\nRedirect to the selected invoice for that user.\n\nRaises:\nHttp404: If the user has no invoices.", "source": "codesearchnet"}
{"code": "def _req(self, req):\n    logger.debug('DUT> %s', req)\n    (self._log and self.pause())\n    times = 3\n    res = None\n    while times:\n        times = (times - 1)\n        try:\n            self._sendline(req)\n            self._expect(req)\n            line = None\n            res = []\n            while True:\n                line = self._readline()\n                logger.debug('Got line %s', line)\n                if (line == 'Done'):\n                    break\n                if line:\n                    res.append(line)\n            break\n        except:\n            logger.exception('Failed to send command')\n            self.close()\n            self._init()\n    (self._log and self.resume())\n    return res", "docstring": "Send command and wait for response.\n\nThe command will be repeated 3 times at most in case data loss of serial port.\n\nArgs:\nreq (str): Command to send, please do not include new line in the end.\n\nReturns:\n[str]: The output lines", "source": "codesearchnet"}
{"code": "def _get_snpeff_transcript(self, transcript_info):\n        \n        transcript = Transcript(\n                hgnc_symbol = transcript_info.get('Gene_Name'),\n                transcript_id = transcript_info.get('Feature'),\n                ensembl_id = transcript_info.get('Gene_ID'),\n                biotype = transcript_info.get('Transcript_BioType'),\n                consequence = transcript_info.get('Annotation'),\n                exon = transcript_info.get('Rank'),\n                HGVSc = transcript_info.get('HGVS.c'),\n                HGVSp = transcript_info.get('HGVS.p')\n            )\n        return transcript", "docstring": "Create a transcript based on the snpeff annotation\n\nArgs:\ntranscript_info (dict): A dict with snpeff info\n\nReturns:\ntranscript (puzzle.models.Transcript): A Transcripts", "source": "juraj-google-style"}
{"code": "def from_index_amount(cls, matrixpos, amt):\n    f = np.identity(3)\n    f[matrixpos] += amt\n    return cls(f)", "docstring": "Factory method for constructing a Deformation object\nfrom a matrix position and amount\n\nArgs:\nmatrixpos (tuple): tuple corresponding the matrix position to\nhave a perturbation added\namt (float): amount to add to the identity matrix at position\nmatrixpos", "source": "codesearchnet"}
{"code": "def add_criterion(self, name, priority, and_or, search_type, value):\n    criterion = SearchCriteria(name, priority, and_or, search_type, value)\n    self.criteria.append(criterion)", "docstring": "Add a search criteria object to a smart group.\n\nArgs:\nname: String Criteria type name (e.g. \"Application Title\")\npriority: Int or Str number priority of criterion.\nand_or: Str, either \"and\" or \"or\".\nsearch_type: String Criteria search type. (e.g. \"is\", \"is\nnot\", \"member of\", etc). Construct a SmartGroup with the\ncriteria of interest in the web interface to determine\nwhat range of values are available.\nvalue: String value to search for/against.", "source": "codesearchnet"}
{"code": "def _get_error_generator(type, obj, schema_dir=None, version=DEFAULT_VER, default='core'):\n    if (schema_dir is None):\n        schema_dir = os.path.abspath((((os.path.dirname(__file__) + '/schemas-') + version) + '/'))\n    try:\n        schema_path = find_schema(schema_dir, type)\n        schema = load_schema(schema_path)\n    except (KeyError, TypeError):\n        try:\n            schema_path = find_schema(schema_dir, default)\n            schema = load_schema(schema_path)\n        except (KeyError, TypeError):\n            if (schema_dir is not None):\n                return None\n            raise SchemaInvalidError(\"Cannot locate a schema for the object's type, nor the base schema ({}.json).\".format(default))\n    if ((type == 'observed-data') and (schema_dir is None)):\n        schema['allOf'][1]['properties']['objects'] = {'objects': {'type': 'object', 'minProperties': 1}}\n    validator = load_validator(schema_path, schema)\n    try:\n        error_gen = validator.iter_errors(obj)\n    except schema_exceptions.RefResolutionError:\n        raise SchemaInvalidError('Invalid JSON schema: a JSON reference failed to resolve')\n    return error_gen", "docstring": "Get a generator for validating against the schema for the given object type.\n\nArgs:\ntype (str): The object type to find the schema for.\nobj: The object to be validated.\nschema_dir (str): The path in which to search for schemas.\nversion (str): The version of the STIX specification to validate\nagainst. Only used to find base schemas when schema_dir is None.\ndefault (str): If the schema for the given type cannot be found, use\nthe one with this name instead.\n\nReturns:\nA generator for errors found when validating the object against the\nappropriate schema, or None if schema_dir is None and the schema\ncannot be found.", "source": "codesearchnet"}
{"code": "def distance2bbox(points, distance: torch.Tensor, reg_scale: float) -> torch.Tensor:\n    reg_scale = abs(reg_scale)\n    top_left_x = points[..., 0] - (0.5 * reg_scale + distance[..., 0]) * (points[..., 2] / reg_scale)\n    top_left_y = points[..., 1] - (0.5 * reg_scale + distance[..., 1]) * (points[..., 3] / reg_scale)\n    bottom_right_x = points[..., 0] + (0.5 * reg_scale + distance[..., 2]) * (points[..., 2] / reg_scale)\n    bottom_right_y = points[..., 1] + (0.5 * reg_scale + distance[..., 3]) * (points[..., 3] / reg_scale)\n    bboxes = torch.stack([top_left_x, top_left_y, bottom_right_x, bottom_right_y], -1)\n    return corners_to_center_format(bboxes)", "docstring": "Decodes edge-distances into bounding box coordinates.\n\nArgs:\npoints (`torch.Tensor`):\n(batch_size, num_boxes, 4) or (num_boxes, 4) format, representing [x_center, y_center, width, height]\ndistance (`torch.Tensor`):\n(batch_size, num_boxes, 4) or (num_boxes, 4), representing distances from the point to the left, top, right, and bottom boundaries.\nreg_scale (`float`):\nControls the curvature of the Weighting Function.\nReturns:\n`torch.Tensor`: Bounding boxes in (batch_size, num_boxes, 4) or (num_boxes, 4) format, representing [x_center, y_center, width, height]", "source": "github-repos"}
{"code": "def has_platform(self, platform):\n        \n        if platform and not isinstance(platform, dict):\n            parts = platform.split('/')\n            if len(parts) > 3 or len(parts) < 1:\n                raise InvalidArgument(\n                    '\"{0}\" is not a valid platform descriptor'.format(platform)\n                )\n            platform = {'os': parts[0]}\n            if len(parts) > 2:\n                platform['variant'] = parts[2]\n            if len(parts) > 1:\n                platform['architecture'] = parts[1]\n        return normalize_platform(\n            platform, self.client.version()\n        ) in self.attrs['Platforms']", "docstring": "Check whether the given platform identifier is available for this\ndigest.\n\nArgs:\nplatform (str or dict): A string using the ``os[/arch[/variant]]``\nformat, or a platform dictionary.\n\nReturns:\n(bool): ``True`` if the platform is recognized as available,\n``False`` otherwise.\n\nRaises:\n:py:class:`docker.errors.InvalidArgument`\nIf the platform argument is not a valid descriptor.", "source": "juraj-google-style"}
{"code": "def get_template_object(template_file=''):\n    \n    jinja_template_paths_obj = []\n\n    if TEMPLATES_PATH:\n        external_templates = pathlib.Path(TEMPLATES_PATH).expanduser().resolve()\n        assert os.path.isdir(external_templates), 'External template path \"{0}\" not found'.format(external_templates)\n        jinja_template_paths_obj.append(external_templates)\n\n    jinja_template_paths_obj.append(LOCAL_TEMPLATES)\n    jinja_template_paths = [str(path) for path in jinja_template_paths_obj]\n\n    jinjaenv = jinja2.Environment(loader=jinja2.FileSystemLoader(jinja_template_paths))\n\n    try:\n        template = jinjaenv.get_template(template_file)\n    except jinja2.TemplateNotFound:\n        message = 'Unable to find template \"{template_file}\" in paths {paths}'.format(\n            template_file=template_file, paths=jinjaenv.loader.searchpath)\n        LOG.error(message)\n        raise ForemastTemplateNotFound(message)\n\n    return template", "docstring": "Retrieve template.\n\nArgs:\ntemplate_file (str): Name of template file.\n\nReturns:\njinja2.Template: Template ready to render.\n\nRaises:\nAssertionError: Configured path for templates does not exist.\n:obj:`foremast.exceptions.ForemastTemplateNotFound`: Requested template\nis not available.", "source": "juraj-google-style"}
{"code": "def load(path: str) -> Callable[..., Dict[str, EventSetNode]]:\n    g = _load_graph(path)\n    inputs = g.named_inputs\n    assert inputs is not None\n    input_names = list(inputs.keys())\n\n    @compile\n    def fn(*args: EventSetNode, **kwargs: EventSetNode) -> Dict[str, EventSetNode]:\n        kwargs = _kwargs_from_args_and_kwargs(input_names, args, kwargs)\n        return g.apply_on_inputs(named_inputs=kwargs)\n    fn.__signature__ = inspect.signature(fn).replace(parameters=[inspect.Parameter(name=k, annotation=EventSetNode, kind=inspect.Parameter.POSITIONAL_OR_KEYWORD) for k in inputs])\n    return fn", "docstring": "Loads a compiled Temporian function from a file.\n\nThe loaded function receives the same positional and keyword arguments and\napplies the same operator graph to its inputs as when it was saved.\n\nArgs:\npath: The path to load the function from.\n\nReturns:\nThe loaded function.", "source": "github-repos"}
{"code": "def encipher_shift(plaintext, plain_vocab, shift):\n  \n  ciphertext = []\n  cipher = ShiftEncryptionLayer(plain_vocab, shift)\n\n  for _, sentence in enumerate(plaintext):\n    cipher_sentence = []\n    for _, character in enumerate(sentence):\n      encrypted_char = cipher.encrypt_character(character)\n      cipher_sentence.append(encrypted_char)\n    ciphertext.append(cipher_sentence)\n\n  return ciphertext", "docstring": "Encrypt plain text with a single shift layer.\n\nArgs:\nplaintext (list of list of Strings): a list of plain text to encrypt.\nplain_vocab (list of Integer): unique vocabularies being used.\nshift (Integer): number of shift, shift to the right if shift is positive.\nReturns:\nciphertext (list of Strings): encrypted plain text.", "source": "juraj-google-style"}
{"code": "def clustering_factory(clf):\n    \n    required_methods = ['fit', 'fit_predict']\n\n    for method in required_methods:\n        if not hasattr(clf, method):\n            raise TypeError('\"{}\" is not in clf. Did you '\n                            'pass a clusterer instance?'.format(method))\n\n    additional_methods = {\n        'plot_silhouette': plot_silhouette,\n        'plot_elbow_curve': plot_elbow_curve\n    }\n\n    for key, fn in six.iteritems(additional_methods):\n        if hasattr(clf, key):\n            warnings.warn('\"{}\" method already in clf. '\n                          'Overriding anyway. This may '\n                          'result in unintended behavior.'.format(key))\n        setattr(clf, key, types.MethodType(fn, clf))\n    return clf", "docstring": "Embeds scikit-plot plotting methods in an sklearn clusterer instance.\n\nArgs:\nclf: Scikit-learn clusterer instance\n\nReturns:\nThe same scikit-learn clusterer instance passed in **clf** with\nembedded scikit-plot instance methods.\n\nRaises:\nValueError: If **clf** does not contain the instance methods necessary\nfor scikit-plot instance methods.", "source": "juraj-google-style"}
{"code": "def http_download(url, target_path):\n    \n    r = requests.get(url, stream=True)\n    with open(target_path, 'wb') as f:\n        \n        for chunk in r.iter_content(chunk_size=1024):\n            if chunk:\n                f.write(chunk)\n    return target_path", "docstring": "Download file to local\nArgs:\n- url(string): url request path\n- target_path(string): download destination", "source": "juraj-google-style"}
{"code": "def convert_sbml_model(model):\n    \n    biomass_reactions = set()\n    for reaction in model.reactions:\n        \n        if reaction.id not in model.limits:\n            lower, upper = parse_flux_bounds(reaction)\n            if lower is not None or upper is not None:\n                model.limits[reaction.id] = reaction.id, lower, upper\n\n        \n        objective = parse_objective_coefficient(reaction)\n        if objective is not None and objective != 0:\n            biomass_reactions.add(reaction.id)\n\n    if len(biomass_reactions) == 1:\n        model.biomass_reaction = next(iter(biomass_reactions))\n\n    \n    convert_model_entries(model)\n\n    \n    if model.extracellular_compartment is None:\n        extracellular = detect_extracellular_compartment(model)\n        model.extracellular_compartment = extracellular\n\n    \n    convert_exchange_to_compounds(model)", "docstring": "Convert raw SBML model to extended model.\n\nArgs:\nmodel: :class:`NativeModel` obtained from :class:`SBMLReader`.", "source": "juraj-google-style"}
{"code": "def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding, expected):\n    for data_format in GetTestConfigs():\n        self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding, data_format, expected)", "docstring": "Verifies the output values of the pooling function.\n\nArgs:\npool_func: Function to be called, co.MaxPool, co.AvgPool,\nor the Lua version.\ninput_sizes: Input tensor dimensions.\nksize: The kernel size dimensions\nstrides: The stride dimensions\npadding: Padding type.\nexpected: An array containing the expected operation outputs.", "source": "github-repos"}
{"code": "def is_monotonic(neurite, tol):\n    for node in neurite.iter_sections():\n        sec = node.points\n        for point_id in range((len(sec) - 1)):\n            if (sec[(point_id + 1)][COLS.R] > (sec[point_id][COLS.R] + tol)):\n                return False\n        if ((node.parent is not None) and (sec[0][COLS.R] > (node.parent.points[(- 1)][COLS.R] + tol))):\n            return False\n    return True", "docstring": "Check if neurite tree is monotonic\n\nIf each child has smaller or equal diameters from its parent\n\nArgs:\nneurite(Neurite): neurite to operate on\ntol(float): tolerance\n\nReturns:\nTrue if neurite monotonic", "source": "codesearchnet"}
{"code": "def ParseCall(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    guid = self._GetRowValue(query_hash, row, 'guid')\n    is_incoming = self._GetRowValue(query_hash, row, 'is_incoming')\n    videostatus = self._GetRowValue(query_hash, row, 'videostatus')\n    try:\n        aux = guid\n        if aux:\n            aux_list = aux.split('-')\n            src_aux = aux_list[0]\n            dst_aux = aux_list[1]\n        else:\n            src_aux = 'Unknown [no GUID]'\n            dst_aux = 'Unknown [no GUID]'\n    except IndexError:\n        src_aux = 'Unknown [{0:s}]'.format(guid)\n        dst_aux = 'Unknown [{0:s}]'.format(guid)\n    if (is_incoming == '0'):\n        user_start_call = True\n        source = src_aux\n        ip_address = self._GetRowValue(query_hash, row, 'ip_address')\n        if ip_address:\n            destination = '{0:s} <{1:s}>'.format(dst_aux, ip_address)\n        else:\n            destination = dst_aux\n    else:\n        user_start_call = False\n        source = src_aux\n        destination = dst_aux\n    call_identifier = self._GetRowValue(query_hash, row, 'id')\n    event_data = SkypeCallEventData()\n    event_data.dst_call = destination\n    event_data.offset = call_identifier\n    event_data.query = query\n    event_data.src_call = source\n    event_data.user_start_call = user_start_call\n    event_data.video_conference = (videostatus == '3')\n    timestamp = self._GetRowValue(query_hash, row, 'try_call')\n    event_data.call_type = 'WAITING'\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype')\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n    try:\n        timestamp = self._GetRowValue(query_hash, row, 'accept_call')\n        timestamp = int(timestamp)\n    except (ValueError, TypeError):\n        timestamp = None\n    if timestamp:\n        event_data.call_type = 'ACCEPTED'\n        date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype')\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n        try:\n            call_duration = self._GetRowValue(query_hash, row, 'call_duration')\n            call_duration = int(call_duration)\n        except (ValueError, TypeError):\n            parser_mediator.ProduceExtractionWarning('unable to determine when call: {0:s} was finished.'.format(call_identifier))\n            call_duration = None\n        if call_duration:\n            timestamp += call_duration\n            event_data.call_type = 'FINISHED'\n            date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n            event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype')\n            parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a call.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from query.\nquery (Optional[str]): query.", "source": "codesearchnet"}
{"code": "def wait_for(\n        self, timeout=10000, interval=1000,\n        asserter=lambda x: x):\n        \n        if not callable(asserter):\n            raise TypeError('Asserter must be callable.')\n        @retry(\n            retry_on_exception=lambda ex: isinstance(ex, WebDriverException),\n            stop_max_delay=timeout,\n            wait_fixed=interval\n        )\n        def _wait_for(driver):\n            asserter(driver)\n            return driver\n\n        return _wait_for(self)", "docstring": "Wait for driver till satisfy the given condition\n\nSupport:\nAndroid iOS Web(WebView)\n\nArgs:\ntimeout(int): How long we should be retrying stuff.\ninterval(int): How long between retries.\nasserter(callable): The asserter func to determine the result.\n\nReturns:\nReturn the driver.\n\nRaises:\nWebDriverException.", "source": "juraj-google-style"}
{"code": "def to(self, fmt=None, filename=None):\n    from pymatgen.io.xyz import XYZ\n    from pymatgen.io.gaussian import GaussianInput\n    from pymatgen.io.babel import BabelMolAdaptor\n    fmt = ('' if (fmt is None) else fmt.lower())\n    fname = os.path.basename((filename or ''))\n    if ((fmt == 'xyz') or fnmatch(fname.lower(), '*.xyz*')):\n        writer = XYZ(self)\n    elif any([((fmt == r) or fnmatch(fname.lower(), '*.{}*'.format(r))) for r in ['gjf', 'g03', 'g09', 'com', 'inp']]):\n        writer = GaussianInput(self)\n    elif ((fmt == 'json') or fnmatch(fname, '*.json*') or fnmatch(fname, '*.mson*')):\n        if filename:\n            with zopen(filename, 'wt', encoding='utf8') as f:\n                return json.dump(self.as_dict(), f)\n        else:\n            return json.dumps(self.as_dict())\n    elif ((fmt == 'yaml') or fnmatch(fname, '*.yaml*')):\n        import ruamel.yaml as yaml\n        if filename:\n            with zopen(fname, 'wt', encoding='utf8') as f:\n                return yaml.safe_dump(self.as_dict(), f)\n        else:\n            return yaml.safe_dump(self.as_dict())\n    else:\n        m = re.search('\\\\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)', fname.lower())\n        if ((not fmt) and m):\n            fmt = m.group(1)\n        writer = BabelMolAdaptor(self)\n        return writer.write_file(filename, file_format=fmt)\n    if filename:\n        writer.write_file(filename)\n    else:\n        return str(writer)", "docstring": "Outputs the molecule to a file or string.\n\nArgs:\nfmt (str): Format to output to. Defaults to JSON unless filename\nis provided. If fmt is specifies, it overrides whatever the\nfilename is. Options include \"xyz\", \"gjf\", \"g03\", \"json\". If\nyou have OpenBabel installed, any of the formats supported by\nOpenBabel. Non-case sensitive.\nfilename (str): If provided, output will be written to a file. If\nfmt is not specified, the format is determined from the\nfilename. Defaults is None, i.e. string output.\n\nReturns:\n(str) if filename is None. None otherwise.", "source": "codesearchnet"}
{"code": "def usufyToCsvExport(d, fPath):\n    from pyexcel_io import get_data\n    try:\n        oldData = {'OSRFramework': get_data(fPath)}\n    except:\n        oldData = {'OSRFramework': []}\n    tabularData = _generateTabularData(d, oldData)\n    from pyexcel_io import save_data\n    save_data(fPath, tabularData['OSRFramework'])", "docstring": "Workaround to export to a CSV file.\n\nArgs:\n-----\nd: Data to export.\nfPath: File path for the output file.", "source": "codesearchnet"}
{"code": "def GetParserAndPluginNames(cls, parser_filter_expression=None):\n    \n    parser_and_plugin_names = []\n    for parser_name, parser_class in cls.GetParsers(\n        parser_filter_expression=parser_filter_expression):\n      parser_and_plugin_names.append(parser_name)\n\n      if parser_class.SupportsPlugins():\n        for plugin_name, _ in parser_class.GetPlugins():\n          parser_and_plugin_names.append(\n              '{0:s}/{1:s}'.format(parser_name, plugin_name))\n\n    return parser_and_plugin_names", "docstring": "Retrieves the parser and parser plugin names.\n\nArgs:\nparser_filter_expression (Optional[str]): parser filter expression,\nwhere None represents all parsers and plugins.\n\nReturns:\nlist[str]: parser and parser plugin names.", "source": "juraj-google-style"}
{"code": "def get(self, statediag, dfaaccepted):\n        \n\n        newstatediag = {}\n\n        newstate = PDAState()\n        newstate.id = 'AI,I'  \n        newstate.type = 1\n        newstate.sym = '@wrapping'\n        transitions = {}\n        transitions[(0, 0)] = [0]\n        newstate.trans = transitions\n        i = 0\n        newstatediag[i] = newstate\n        \n        \n        for stateid in statediag:\n            state = statediag[stateid]\n            \n            if state.type == 2:\n                for state2id in dfaaccepted:\n                    \n                    if state.id[1] == state2id:\n                        \n                        state.trans['AI,I'] = ['@wrapping']\n                        \n                        break\n            i = i + 1\n            newstatediag[i] = state\n        return newstatediag", "docstring": "# - Remove all the POP (type - 2) transitions to state 0,non DFA accepted\n# for symbol @closing\n# - Generate the accepted transitions\n- Replace DFA accepted States with a push - pop symbol and two extra states\nArgs:\nstatediag (list): The states of the PDA\ndfaaccepted (list):The list of DFA accepted states\nReturns:\nlist: A cleaned, smaller list of DFA states", "source": "juraj-google-style"}
{"code": "def _ParseInternetPasswordRecord(self, parser_mediator, record):\n    \n    key = record.get('_key_', None)\n    if not key or not key.startswith(b'ssgp'):\n      raise errors.ParseError((\n          'Unsupported Internet password record key value does not start '\n          'with: \"ssgp\".'))\n\n    protocol_string = codecs.decode('{0:08x}'.format(record['ptcl']), 'hex')\n    protocol_string = codecs.decode(protocol_string, 'utf-8')\n\n    event_data = KeychainInternetRecordEventData()\n    event_data.account_name = self._ParseBinaryDataAsString(\n        parser_mediator, record['acct'])\n    event_data.comments = self._ParseBinaryDataAsString(\n        parser_mediator, record['crtr'])\n    event_data.entry_name = self._ParseBinaryDataAsString(\n        parser_mediator, record['PrintName'])\n    event_data.protocol = self._PROTOCOL_TRANSLATION_DICT.get(\n        protocol_string, protocol_string)\n    ssgp_hash = codecs.encode(key[4:], 'hex')\n    event_data.ssgp_hash = codecs.decode(ssgp_hash, 'utf-8')\n    event_data.text_description = self._ParseBinaryDataAsString(\n        parser_mediator, record['desc'])\n    event_data.type_protocol = self._ParseBinaryDataAsString(\n        parser_mediator, record['atyp'])\n    event_data.where = self._ParseBinaryDataAsString(\n        parser_mediator, record['srvr'])\n\n    date_time = self._ParseDateTimeValue(parser_mediator, record['cdat'])\n    if date_time:\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_CREATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    date_time = self._ParseDateTimeValue(parser_mediator, record['mdat'])\n    if date_time:\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts the information from an Internet password record.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrecord (dict[str, object]): database record.\n\nRaises:\nParseError: if Internet password record cannot be parsed.", "source": "juraj-google-style"}
{"code": "def custom_getter(self, activation_dtype=tf.bfloat16):\n    \n    def getter_fn(getter, *args, **kwargs):\n      requested_dtype = kwargs[\"dtype\"]\n      if requested_dtype in (tf.bfloat16, tf.float32):\n        kwargs[\"dtype\"] = tf.bfloat16\n        kwargs[\"initializer\"] = _EncodingInitializer(\n            kwargs[\"initializer\"], self)\n        ret = self._decode_with_identity_gradient(getter(*args, **kwargs))\n        return tf.cast(ret, activation_dtype)\n      return getter(*args, **kwargs)\n    return getter_fn", "docstring": "A custom getter that uses the encoding for bfloat16 and float32 vars.\n\nWhen a bfloat16 or float32 variable is requsted, an encoded float16\nvaraible is created, which is then decoded and cast to a bfloat16\nactivation.\n\nArgs:\nactivation_dtype: a dtype to which to convert the decoded value.\n\nReturns:\na function.", "source": "juraj-google-style"}
{"code": "def is_initialised( self ):\n        \n        if not self.lattice:\n            raise AttributeError('Running a simulation needs the lattice to be initialised')\n        if not self.atoms:\n            raise AttributeError('Running a simulation needs the atoms to be initialised')\n        if not self.number_of_jumps and not self.for_time:\n            raise AttributeError('Running a simulation needs number_of_jumps or for_time to be set')", "docstring": "Check whether the simulation has been initialised.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _collect_unused(self, start: GridQubit, used: Set[GridQubit]) -> Set[GridQubit]:\n\n    def collect(n: GridQubit, visited: Set[GridQubit]):\n        visited.add(n)\n        for m in self._c_adj[n]:\n            if ((m not in used) and (m not in visited)):\n                collect(m, visited)\n    visited = set()\n    collect(start, visited)\n    return visited", "docstring": "Lists all the qubits that are reachable from given qubit.\n\nArgs:\nstart: The first qubit for which connectivity should be calculated.\nMight be a member of used set.\nused: Already used qubits, which cannot be used during the\ncollection.\n\nReturns:\nSet of qubits that are reachable from starting qubit without\ntraversing any of the used qubits.", "source": "codesearchnet"}
{"code": "def _ReadPaddingDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):\n    if (not is_member):\n        error_message = 'data type only supported as member'\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    definition_object = self._ReadDataTypeDefinition(definitions_registry, definition_values, data_types.PaddingDefinition, definition_name, self._SUPPORTED_DEFINITION_VALUES_PADDING)\n    alignment_size = definition_values.get('alignment_size', None)\n    if (not alignment_size):\n        error_message = 'missing alignment_size'\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    try:\n        int(alignment_size)\n    except ValueError:\n        error_message = 'unuspported alignment size attribute: {0!s}'.format(alignment_size)\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    if (alignment_size not in (2, 4, 8, 16)):\n        error_message = 'unuspported alignment size value: {0!s}'.format(alignment_size)\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    definition_object.alignment_size = alignment_size\n    return definition_object", "docstring": "Reads a padding data type definition.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\ndefinition_values (dict[str, object]): definition values.\ndefinition_name (str): name of the definition.\nis_member (Optional[bool]): True if the data type definition is a member\ndata type definition.\n\nReturns:\nPaddingtDefinition: padding definition.\n\nRaises:\nDefinitionReaderError: if the definitions values are missing or if\nthe format is incorrect.", "source": "codesearchnet"}
{"code": "def _virtual_molecule(self, mol, ilabels, eq_atoms):\n        \n        vmol = ob.OBMol()\n\n        non_unique_atoms = set([a for g in eq_atoms for a in g])\n        all_atoms = set(range(1, len(ilabels) + 1))\n        unique_atom_labels = sorted(all_atoms - non_unique_atoms)\n\n        \n        for i in unique_atom_labels:\n            orig_idx = ilabels[i-1]\n            oa1 = mol.GetAtom(orig_idx)\n            a1 = vmol.NewAtom()\n            a1.SetAtomicNum(oa1.GetAtomicNum())\n            a1.SetVector(oa1.GetVector())\n\n        \n        if vmol.NumAtoms() < 3:\n            for symm in eq_atoms:\n                c1x, c1y, c1z = self._group_centroid(mol, ilabels, symm)\n                min_distance = float(\"inf\")\n                for i in range(1, vmol.NumAtoms()+1):\n                    va = vmol.GetAtom(i)\n                    distance = math.sqrt((c1x - va.x())**2 + (c1y - va.y())**2\n                                         + (c1z - va.z())**2)\n                    if distance < min_distance:\n                        min_distance = distance\n                if min_distance > 0.2:\n                    a1 = vmol.NewAtom()\n                    a1.SetAtomicNum(9)\n                    a1.SetVector(c1x, c1y, c1z)\n\n        return vmol", "docstring": "Create a virtual molecule by unique atoms, the centriods of the\nequivalent atoms\n\nArgs:\nmol: The molecule. OpenBabel OBMol object\nilables: inchi label map\neq_atoms: equivalent atom labels\nfarthest_group_idx: The equivalent atom group index in which\nthere is the farthest atom to the centroid\n\nReturn:\nThe virtual molecule", "source": "juraj-google-style"}
{"code": "def expand(self, pcoll: beam.PCollection[Union[beam.Row, NamedTuple]]) -> beam.PCollection[common_types.InstanceDictType]:\n    return pcoll | beam.Map(lambda x: x._asdict())", "docstring": "Args:\npcoll: A PCollection of NamedTuples or Rows.\nReturns:\nA PCollection of dictionaries.", "source": "github-repos"}
{"code": "def get_encoder_from_vocab(vocab_filepath):\n  \n  if not tf.gfile.Exists(vocab_filepath):\n    raise ValueError(\"Vocab file does not exist: {}.\".format(vocab_filepath))\n\n  tf.logging.info(\"Found vocab file: %s\", vocab_filepath)\n  encoder = text_encoder.SubwordTextEncoder(vocab_filepath)\n  return encoder", "docstring": "Get encoder from vocab file.\n\nIf vocab is not found in output dir, it will be copied there by\ncopy_vocab_to_output_dir to clarify the vocab used to generate the data.\n\nArgs:\nvocab_filepath: path to vocab, either local or cns\n\nReturns:\nA SubwordTextEncoder vocabulary object. None if the output_parallel_text\nis set.", "source": "juraj-google-style"}
{"code": "def sunset(self, date=None, zenith=None):\n    return (segment.sunset(date, zenith) for segment in self)", "docstring": "Calculate sunset times for locations.\n\nArgs:\ndate (datetime.date): Calculate rise or set for given date\nzenith (str): Calculate sunset events, or start of twilight times\n\nReturns:\nlist of list of datetime.datetime: The time for the sunset for each\npoint in each segment", "source": "codesearchnet"}
{"code": "def directional_poisson_ratio(self, n, m, tol=1e-8):\n        \n        n, m = get_uvec(n), get_uvec(m)\n        if not np.abs(np.dot(n, m)) < tol:\n            raise ValueError(\"n and m must be orthogonal\")\n        v = self.compliance_tensor.einsum_sequence([n]*2 + [m]*2)\n        v *= -1 / self.compliance_tensor.einsum_sequence([n]*4)\n        return v", "docstring": "Calculates the poisson ratio for a specific direction\nrelative to a second, orthogonal direction\n\nArgs:\nn (3-d vector): principal direction\nm (3-d vector): secondary direction orthogonal to n\ntol (float): tolerance for testing of orthogonality", "source": "juraj-google-style"}
{"code": "def service_messages(self, short_name):\n    if (short_name not in self.services):\n        raise ArgumentError('Unknown service name', short_name=short_name)\n    return list(self.services[short_name]['state'].messages)", "docstring": "Get the messages stored for a service.\n\nArgs:\nshort_name (string): The short name of the service to get messages for\n\nReturns:\nlist(ServiceMessage): A list of the ServiceMessages stored for this service", "source": "codesearchnet"}
{"code": "def collapse_addresses(addresses):\n    \n    i = 0\n    addrs = []\n    ips = []\n    nets = []\n\n    \n    for ip in addresses:\n        if isinstance(ip, _BaseAddress):\n            if ips and ips[-1]._version != ip._version:\n                raise TypeError(\"%s and %s are not of the same version\" % (\n                                 ip, ips[-1]))\n            ips.append(ip)\n        elif ip._prefixlen == ip._max_prefixlen:\n            if ips and ips[-1]._version != ip._version:\n                raise TypeError(\"%s and %s are not of the same version\" % (\n                                 ip, ips[-1]))\n            try:\n                ips.append(ip.ip)\n            except AttributeError:\n                ips.append(ip.network_address)\n        else:\n            if nets and nets[-1]._version != ip._version:\n                raise TypeError(\"%s and %s are not of the same version\" % (\n                                 ip, nets[-1]))\n            nets.append(ip)\n\n    \n    ips = sorted(set(ips))\n    nets = sorted(set(nets))\n\n    while i < len(ips):\n        (first, last) = _find_address_range(ips[i:])\n        i = ips.index(last) + 1\n        addrs.extend(summarize_address_range(first, last))\n\n    return iter(_collapse_addresses_recursive(sorted(\n        addrs + nets, key=_BaseNetwork._get_networks_key)))", "docstring": "Collapse a list of IP objects.\n\nExample:\ncollapse_addresses([IPv4Network('192.0.2.0/25'),\nIPv4Network('192.0.2.128/25')]) ->\n[IPv4Network('192.0.2.0/24')]\n\nArgs:\naddresses: An iterator of IPv4Network or IPv6Network objects.\n\nReturns:\nAn iterator of the collapsed IPv(4|6)Network objects.\n\nRaises:\nTypeError: If passed a list of mixed version objects.", "source": "juraj-google-style"}
{"code": "def is_flat(neurite, tol, method='tolerance'):\n    ext = principal_direction_extent(neurite.points[(:, COLS.XYZ)])\n    assert (method in ('tolerance', 'ratio')), \"Method must be one of 'tolerance', 'ratio'\"\n    if (method == 'ratio'):\n        sorted_ext = np.sort(ext)\n        return ((sorted_ext[0] / sorted_ext[1]) < float(tol))\n    return any((ext < float(tol)))", "docstring": "Check if neurite is flat using the given method\n\nArgs:\nneurite(Neurite): neurite to operate on\ntol(float): tolerance\nmethod(string): the method of flatness estimation:\n'tolerance' returns true if any extent of the tree is smaller\nthan the given tolerance\n'ratio' returns true if the ratio of the smallest directions\nis smaller than tol. e.g. [1,2,3] -> 1/2 < tol\n\nReturns:\nTrue if neurite is flat", "source": "codesearchnet"}
{"code": "def save(self, filename):\n        \n        with open(filename, 'w') as outfile:\n            json.dump(self.to_json(), outfile)", "docstring": "Writes the JSON representation of this graph to the provided\nfilename, such that the graph can be easily reconstructed using\nGraph(spec=filename).\n\nArgs:\nfilename (str): Path at which to write out the json file.", "source": "juraj-google-style"}
{"code": "def rename_libtensorflow(srcs_dir: str, version: str):\n    major_version = version.split('.')[0]\n    if is_macos():\n        shutil.move(os.path.join(srcs_dir, 'libtensorflow_cc.{}.dylib'.format(version)), os.path.join(srcs_dir, 'libtensorflow_cc.{}.dylib'.format(major_version)))\n        shutil.move(os.path.join(srcs_dir, 'libtensorflow_framework.{}.dylib'.format(version)), os.path.join(srcs_dir, 'libtensorflow_framework.{}.dylib'.format(major_version)))\n    else:\n        shutil.move(os.path.join(srcs_dir, 'libtensorflow_cc.so.{}'.format(version)), os.path.join(srcs_dir, 'libtensorflow_cc.so.{}'.format(major_version)))\n        shutil.move(os.path.join(srcs_dir, 'libtensorflow_framework.so.{}'.format(version)), os.path.join(srcs_dir, 'libtensorflow_framework.so.{}'.format(major_version)))", "docstring": "Update libtensorflow_cc file name.\n\nBazel sets full TF version in name but libtensorflow_cc must contain only\nmajor. Update accordingly to the platform:\ne.g. libtensorflow_cc.so.2.15.0 -> libtensorflow_cc.2\n\nArgs:\nsrcs_dir: target directory with files.\nversion: Major version to be set.", "source": "github-repos"}
{"code": "def url(request, json_list, nested, url_name='show_{}', ignore_get=None):\n    if (not ignore_get):\n        ignore_get = []\n    if isinstance(url_name, str):\n        url_string = str(url_name)\n        url_name = (lambda x: url_string.format(x))\n    urls = cache.get('proso_urls')\n    if (urls is None):\n        urls = {}\n    else:\n        urls = json_lib.loads(urls)\n    cache_updated = False\n    pass_string = pass_get_parameters_string(request, ignore_get)\n    for json in json_list:\n        if (('object_type' not in json) or ('id' not in json)):\n            continue\n        key = ('show_%s_%s' % (json['object_type'], json['id']))\n        if (key in urls):\n            json['url'] = urls[key]\n        else:\n            cache_updated = True\n            json['url'] = reverse(url_name(json['object_type']), kwargs={'id': json['id']})\n            urls[key] = json['url']\n        json['url'] = append_get_parameters(json['url'], pass_string)\n    if cache_updated:\n        cache.set('proso_urls', json_lib.dumps(urls), CACHE_EXPIRATION)", "docstring": "Enrich the given list of objects, so they have URL.\n\nArgs:\nrequest (django.http.request.HttpRequest): request which is currently processed\njson_list (list): list of dicts (JSON objects to be enriched)\nurl_name (str|fun): pattern to create a url name taking object_type\nignore_get (list): list of GET parameters which are ignored when the URL is generated\n\nReturns:\nlist: list of dicts (enriched JSON objects)", "source": "codesearchnet"}
{"code": "def getKeywordsForText(self, retina_name, body, ):\n        \n\n        resourcePath = '/text/keywords'\n        method = 'POST'\n\n        queryParams = {}\n        headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}\n        postData = None\n\n        queryParams['retina_name'] = retina_name\n        postData = body\n        response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)\n        return response.json()", "docstring": "Get a list of keywords from the text\nArgs:\nretina_name, str: The retina name (required)\nbody, str: The text to be evaluated (required)\nReturns: Array[str]", "source": "juraj-google-style"}
{"code": "def _handle_uniqueness(self):\n\n    def _getattr(u):\n        try:\n            return self._field_values[u]\n        except KeyError:\n            return getattr(self, u)\n    if self._uniques:\n        for u in self._uniques:\n            val = _getattr(u)\n            changed_fields = self.changed_fields(from_db=True)\n            if (self.exist and (not ((u in changed_fields) if (not callable(val)) else ((str(u) + '_id') in changed_fields)))):\n                if (val and (self.objects.filter(**{u: val}).count() > 1)):\n                    raise IntegrityError(('Unique mismatch: %s for %s already exists for value: %s' % (u, self.__class__.__name__, val)))\n            elif (val and self.objects.filter(**{u: val}).count()):\n                raise IntegrityError(('Unique mismatch: %s for %s already exists for value: %s' % (u, self.__class__.__name__, val)))\n    if self.Meta.unique_together:\n        changed_fields = self.changed_fields(from_db=True)\n        for uniques in self.Meta.unique_together:\n            vals = dict([(u, _getattr(u)) for u in uniques])\n            if self.exist:\n                query_is_changed = []\n                for uni in vals.keys():\n                    if callable(vals[uni]):\n                        is_changed = ((str(uni) + '_id') in changed_fields)\n                        query_is_changed.append(is_changed)\n                    else:\n                        is_changed = (uni in changed_fields)\n                        query_is_changed.append(is_changed)\n                is_unique_changed = any(query_is_changed)\n                if (not is_unique_changed):\n                    if (self.objects.filter(**vals).count() > 1):\n                        raise IntegrityError(('Unique together mismatch: %s combination already exists for %s' % (vals, self.__class__.__name__)))\n                elif self.objects.filter(**vals).count():\n                    raise IntegrityError(('Unique together mismatch: %s combination already exists for %s' % (vals, self.__class__.__name__)))\n            elif self.objects.filter(**vals).count():\n                raise IntegrityError(('Unique together mismatch: %s combination already exists for %s' % (vals, self.__class__.__name__)))", "docstring": "Checks marked as unique and unique_together fields of the Model at each\ncreation and update, and if it violates the uniqueness raises IntegrityError.\n\nFirst, looks at the fields which marked as \"unique\". If Model's unique fields\ndid not change, it means that there is still a record at db with same unique\nfield values. So, it must be checked that if more than one result violates the\nuniqueness. If it is, raise an IntegrityError. Otherwise, when marked as unique\nfields in the list of changed fields, it must be checked that if exists any\nviolation instead of more than one. And, if it is, again raise an IntegrityError.\n\nThen, looks at the fields which marked as \"unique_together\" with the same logic.\n\nRaises:\nIntegrityError if unique and unique_together checks does not pass", "source": "codesearchnet"}
{"code": "def from_json_file(cls, json_file: Union[str, os.PathLike]) -> 'PretrainedConfig':\n    config_dict = cls._dict_from_json_file(json_file)\n    return cls(**config_dict)", "docstring": "Instantiates a [`PretrainedConfig`] from the path to a JSON file of parameters.\n\nArgs:\njson_file (`str` or `os.PathLike`):\nPath to the JSON file containing the parameters.\n\nReturns:\n[`PretrainedConfig`]: The configuration object instantiated from that JSON file.", "source": "github-repos"}
{"code": "def __set_mutation_type(self, hgvs_string):\n        \n        self.__set_lost_stop_status(hgvs_string)\n        self.__set_lost_start_status(hgvs_string)\n        self.__set_missense_status(hgvs_string)  \n        self.__set_indel_status()  \n        self.__set_frame_shift_status()  \n        self.__set_premature_stop_codon_status(hgvs_string)", "docstring": "Interpret the mutation type (missense, etc.) and set appropriate flags.\n\nArgs:\nhgvs_string (str): hgvs syntax with \"p.\" removed", "source": "juraj-google-style"}
{"code": "def _lob_end_handler_factory(ion_type, action, validate=(lambda c, ctx, action_res: None)):\n    assert ((ion_type is IonType.BLOB) or (ion_type is IonType.CLOB))\n\n    @coroutine\n    def lob_end_handler(c, ctx):\n        val = ctx.value\n        prev = c\n        action_res = None\n        if ((c != _CLOSE_BRACE) and (c not in _WHITESPACE)):\n            action_res = action(c, ctx, prev, action_res, True)\n        (c, self) = (yield)\n        trans = ctx.immediate_transition(self)\n        while True:\n            if (c in _WHITESPACE):\n                if (prev == _CLOSE_BRACE):\n                    _illegal_character(c, ctx.set_ion_type(ion_type), 'Expected }.')\n            elif (c == _CLOSE_BRACE):\n                if (prev == _CLOSE_BRACE):\n                    validate(c, ctx, action_res)\n                    break\n            else:\n                action_res = action(c, ctx, prev, action_res, False)\n            prev = c\n            (c, _) = (yield trans)\n        ctx.set_self_delimiting(True)\n        (yield ctx.event_transition(IonThunkEvent, IonEventType.SCALAR, ion_type, _parse_lob(ion_type, val)))\n    return lob_end_handler", "docstring": "Generates handlers for the end of blob or clob values.\n\nArgs:\nion_type (IonType): The type of this lob (either blob or clob).\naction (callable): Called for each non-whitespace, non-closing brace character encountered before the end of\nthe lob. Accepts the current character's ordinal, the current context, the previous character's ordinal,\nthe result of the previous call to ``action`` (if any), and True if this is the first call to ``action``.\nReturns any state that will be needed by subsequent calls to ``action``. For blobs, this should validate\nthe character is valid base64; for clobs, this should ensure there are no illegal characters (e.g. comments)\nbetween the end of the data and the end of the clob.\nvalidate (Optional[callable]): Called once the second closing brace has been found. Accepts the current\ncharacter's ordinal, the current context, and the result of the last call to ``action``; raises an error\nif this is not a valid lob value.", "source": "codesearchnet"}
{"code": "def VerifyStructure(self, parser_mediator, line):\n    \n    structure = self.LOG_LINE\n\n    try:\n      parsed_structure = structure.parseString(line)\n    except pyparsing.ParseException:\n      logger.debug('Not a XChat scrollback log file')\n      return False\n\n    try:\n      int(parsed_structure.timestamp, 10)\n    except ValueError:\n      logger.debug('Not a XChat scrollback log file, invalid timestamp string')\n      return False\n\n    return True", "docstring": "Verify that this file is a XChat scrollback log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nline (str): line from a text file.\n\nReturns:\nbool: True if the line was successfully parsed.", "source": "juraj-google-style"}
{"code": "def _get_mpr_view(self, connection, table):\n        \n        logger.debug(\n            'Looking for view of the table.\\n    table: {}'.format(table.vid))\n        view = self.get_view_name(table)\n        view_exists = self._relation_exists(connection, view)\n        if view_exists:\n            logger.debug(\n                'View of the table exists.\\n    table: {}, view: {}'\n                .format(table.vid, view))\n            return view\n        raise MissingViewError('sqlite database does not have view for {} table.'\n                               .format(table.vid))", "docstring": "Finds and returns view name in the sqlite db represented by given connection.\n\nArgs:\nconnection: connection to sqlite db where to look for partition table.\ntable (orm.Table):\n\nRaises:\nMissingViewError: if database does not have partition table.\n\nReturns:\nstr: database table storing partition data.", "source": "juraj-google-style"}
{"code": "def __save__(script_name, benchbuild, experiment, projects):\n    \n    from jinja2 import Environment, PackageLoader\n\n    logs_dir = os.path.dirname(CFG['slurm']['logs'].value)\n    node_command = str(benchbuild[\"-E\", experiment.name, \"$_project\"])\n    env = Environment(\n        trim_blocks=True,\n        lstrip_blocks=True,\n        loader=PackageLoader('benchbuild', 'utils/templates'))\n    template = env.get_template('slurm.sh.inc')\n\n    with open(script_name, 'w') as slurm2:\n        slurm2.write(\n            template.render(\n                config=[\"export \" + x for x in repr(CFG).split('\\n')],\n                clean_lockdir=str(CFG[\"slurm\"][\"node_dir\"]),\n                clean_lockfile=str(CFG[\"slurm\"][\"node_dir\"]) + \\\n                    \".clean-in-progress.lock\",\n                cpus=int(CFG['slurm']['cpus_per_task']),\n                exclusive=bool(CFG['slurm']['exclusive']),\n                lockfile=str(CFG['slurm'][\"node_dir\"]) + \".lock\",\n                log=local.path(logs_dir) / str(experiment.id),\n                max_running=int(CFG['slurm']['max_running']),\n                name=experiment.name,\n                nice=int(CFG['slurm']['nice']),\n                nice_clean=int(CFG[\"slurm\"][\"nice_clean\"]),\n                node_command=node_command,\n                no_multithreading=not CFG['slurm']['multithread'],\n                ntasks=1,\n                prefix=str(CFG[\"slurm\"][\"node_dir\"]),\n                projects=projects,\n                slurm_account=str(CFG[\"slurm\"][\"account\"]),\n                slurm_partition=str(CFG[\"slurm\"][\"partition\"]),\n                timelimit=str(CFG['slurm']['timelimit']),\n            )\n        )\n\n    chmod(\"+x\", script_name)\n    if not __verify__(script_name):\n        LOG.error(\"SLURM script failed verification.\")\n    print(\"SLURM script written to {0}\".format(script_name))\n    return script_name", "docstring": "Dump a bash script that can be given to SLURM.\n\nArgs:\nscript_name (str): name of the bash script.\ncommands (list(benchbuild.utils.cmd)):\nList of plumbum commands to write to the bash script.\n**kwargs: Dictionary with all environment variable bindings we should\nmap in the bash script.", "source": "juraj-google-style"}
{"code": "def plot_histograms(self, freq=None, title=None,\n                        figsize=(10, 10), **kwargs):\n        \n        if title is None:\n            title = self._get_default_plot_title(\n                freq, 'Return Histogram Matrix')\n\n        plt.figure()\n        ser = self._get_series(freq).to_returns().dropna()\n        ser.hist(figsize=figsize, **kwargs)\n        return plt.suptitle(title)", "docstring": "Wrapper around pandas' hist.\n\nArgs:\n* freq (str): Data frequency used for display purposes.\nRefer to pandas docs for valid freq strings.\n* figsize ((x,y)): figure size\n* title (str): Title if default not appropriate\n* kwargs: passed to pandas' hist method", "source": "juraj-google-style"}
{"code": "def get_failed_enrollment_message(cls, users, enrolled_in):\n        \n        failed_emails = [user.email for user in users]\n        return (\n            'error',\n            _(\n                'The following learners could not be enrolled in {enrolled_in}: {user_list}'\n            ).format(\n                enrolled_in=enrolled_in,\n                user_list=', '.join(failed_emails),\n            )\n        )", "docstring": "Create message for the users who were not able to be enrolled in a course or program.\n\nArgs:\nusers: An iterable of users who were not successfully enrolled\nenrolled_in (str): A string identifier for the course or program with which enrollment was attempted\n\nReturns:\ntuple: A 2-tuple containing a message type and message text", "source": "juraj-google-style"}
{"code": "def stop(pid):\n    \n    if psutil.pid_exists(pid):\n      try:\n        p = psutil.Process(pid)\n        p.kill()\n      except Exception:\n        pass", "docstring": "Shut down a specific process.\n\nArgs:\npid: the pid of the process to shutdown.", "source": "juraj-google-style"}
{"code": "def __init__(self, selenium):\n        \n        self.selenium = selenium\n        self.window_manager = WindowManager(selenium)\n        \n        self.browser = self.window_manager.windows[0]", "docstring": "Create FoxPuppet object.\n\nArgs:\nselenium:\n(:py:class:`~selenium.webdriver.remote.webdriver.WebDriver`):\nFirefox WebDriver object.", "source": "juraj-google-style"}
{"code": "def OpenFileObject(cls, path_spec_object, resolver_context=None):\n    if (not isinstance(path_spec_object, path_spec.PathSpec)):\n        raise TypeError('Unsupported path specification type.')\n    if (resolver_context is None):\n        resolver_context = cls._resolver_context\n    if (path_spec_object.type_indicator == definitions.TYPE_INDICATOR_MOUNT):\n        if path_spec_object.HasParent():\n            raise errors.PathSpecError('Unsupported mount path specification with parent.')\n        mount_point = getattr(path_spec_object, 'identifier', None)\n        if (not mount_point):\n            raise errors.PathSpecError('Unsupported path specification without mount point identifier.')\n        path_spec_object = mount_manager.MountPointManager.GetMountPoint(mount_point)\n        if (not path_spec_object):\n            raise errors.MountPointError('No such mount point: {0:s}'.format(mount_point))\n    file_object = resolver_context.GetFileObject(path_spec_object)\n    if (not file_object):\n        resolver_helper = cls._GetResolverHelper(path_spec_object.type_indicator)\n        file_object = resolver_helper.NewFileObject(resolver_context)\n    file_object.open(path_spec=path_spec_object)\n    return file_object", "docstring": "Opens a file-like object defined by path specification.\n\nArgs:\npath_spec_object (PathSpec): path specification.\nresolver_context (Optional[Context]): resolver context, where None\nrepresents the built in context which is not multi process safe.\n\nReturns:\nFileIO: file-like object or None if the path specification could not\nbe resolved.\n\nRaises:\nPathSpecError: if the path specification is incorrect.\nTypeError: if the path specification type is unsupported.", "source": "codesearchnet"}
{"code": "def sqrt(cls, x: 'TensorFluent') -> 'TensorFluent':\n    return cls._unary_op(x, tf.sqrt, tf.float32)", "docstring": "Returns a TensorFluent for the sqrt function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the sqrt function.", "source": "codesearchnet"}
{"code": "def __init__(self, decode_module, encode_module, methodName='runTest'):\n    super(EncodeProtoOpTestBase, self).__init__(methodName)\n    self._decode_module = decode_module\n    self._encode_module = encode_module", "docstring": "EncodeProtoOpTestBase initializer.\n\nArgs:\ndecode_module: a module containing the `decode_proto_op` method\nencode_module: a module containing  the `encode_proto_op` method\nmethodName: the name of the test method (same as for test.TestCase)", "source": "github-repos"}
{"code": "def sync_firmware(self):\n        \n        serial_no = self.serial_number\n\n        if self.firmware_newer():\n            \n            \n            \n            try:\n                \n                \n                \n                self.invalidate_firmware()\n                self.update_firmware()\n            except errors.JLinkException as e:\n                pass\n\n            res = self.open(serial_no=serial_no)\n\n            if self.firmware_newer():\n                raise errors.JLinkException('Failed to sync firmware version.')\n\n            return res\n\n        elif self.firmware_outdated():\n            \n            \n            try:\n                \n                \n                \n                self.update_firmware()\n            except errors.JLinkException as e:\n                pass\n\n            if self.firmware_outdated():\n                raise errors.JLinkException('Failed to sync firmware version.')\n\n            return self.open(serial_no=serial_no)\n\n        return None", "docstring": "Syncs the emulator's firmware version and the DLL's firmware.\n\nThis method is useful for ensuring that the firmware running on the\nJ-Link matches the firmware supported by the DLL.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def strace_clear_all(self):\n    data = 0\n    res = self._dll.JLINK_STRACE_Control(enums.JLinkStraceCommand.TRACE_EVENT_CLR_ALL, data)\n    if (res < 0):\n        raise errors.JLinkException('Failed to clear all STRACE events.')\n    return None", "docstring": "Clears all STRACE events.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error.", "source": "codesearchnet"}
{"code": "def parse_args(argv):\n    parser = make_parser()\n    args = parser.parse_args(argv)\n    t = args.tool_args\n    kythe_args = kythe.Args(corpus=t.kythe_corpus, root=t.kythe_root, path=t.kythe_path, skip_stdlib=t.skip_stdlib)\n    return (args.all_args, kythe_args, args.pytype_opts)", "docstring": "Parse command line args.\n\nArguments:\nargv: Raw command line args, typically sys.argv[1:]\n\nReturns:\nA tuple of (\nparsed_args: argparse.Namespace,\nkythe_args: kythe.Args,\npytype_options: pytype.config.Options)", "source": "github-repos"}
{"code": "def predict(self, x_test):\n        \n        if self.model:\n            lengths = map(len, x_test)\n            x_test = self.p.transform(x_test)\n            y_pred = self.model.predict(x_test)\n            y_pred = self.p.inverse_transform(y_pred, lengths)\n            return y_pred \n        else:\n            raise OSError('Could not find a model. Call load(dir_path).')", "docstring": "Returns the prediction of the model on the given test data.\n\nArgs:\nx_test : array-like, shape = (n_samples, sent_length)\nTest samples.\n\nReturns:\ny_pred : array-like, shape = (n_smaples, sent_length)\nPrediction labels for x.", "source": "juraj-google-style"}
{"code": "def is_running(process):\n\t\n\n\tif os.name == 'nt':\n\t\tprocess_list = get_cmd_out(['tasklist', '/v'])\n\t\treturn process in process_list\n\n\telse:\n\t\tprocess_list = get_cmd_out('ps axw | awk \\'{print $5}\\'')\n\n\t\tfor i in process_list.split('\\n'):\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\tif not i == 'COMMAND' or i.startswith('['):\n\t\t\t\tif i == process:\n\t\t\t\t\treturn True\n\n\t\t\t\telif os.path.basename(i) == process:\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\treturn True\n\n\treturn False", "docstring": "Check if process is running.\n\nCheck if the given process name is running or not.\n\nNote:\nOn a Linux system, kernel threads (like\t``kthreadd`` etc.)\nare excluded.\n\nArgs:\nprocess (str): The name of the process.\n\nReturns:\nbool: Is the process running?", "source": "juraj-google-style"}
{"code": "def mark_all_as_done(self, **kwargs):\n    result = self.gitlab.http_post('/todos/mark_as_done', **kwargs)\n    try:\n        return int(result)\n    except ValueError:\n        return 0", "docstring": "Mark all the todos as done.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabTodoError: If the server failed to perform the request\n\nReturns:\nint: The number of todos maked done", "source": "codesearchnet"}
{"code": "def creationlog(base, package, stackdepth=_def_stackdepth):\n\n    @staticmethod\n    def wrapnew(cls, *argl, **argd):\n        global _atdepth_new, _cstack_new, streamlining\n        origstream = None\n        if (not (decorating or streamlining)):\n            (entry, _atdepth_new) = _pre_create(cls, _atdepth_new, stackdepth, *argl, **argd)\n            _cstack_new.append(cls)\n            fqdn = cls.__fqdn__\n            if ((fqdn in _streamlines) and _streamlines[fqdn]):\n                msg.std('Streamlining {}.'.format(fqdn), 2)\n                origstream = streamlining\n                streamlining = True\n        try:\n            if six.PY2:\n                result = base.__old__(cls, *argl, **argd)\n            elif (base.__old__ is object.__new__):\n                result = base.__old__(cls)\n            else:\n                result = base.__old__(cls, *argl, **argd)\n        except TypeError:\n            import sys\n            (xcls, xerr) = sys.exc_info()[0:2]\n            referral = xerr.args[0].split()[(- 1)]\n            if ('.__new__()' in referral):\n                t = eval(referral.split('.')[0])\n                result = t.__new__(cls, *argl, **argd)\n            else:\n                raise\n                result = None\n        if ((result is not None) and hasattr(cls, '__init__')):\n            try:\n                cls.__init__(result, *argl, **argd)\n            except:\n                print(cls, argl, argd)\n                raise\n        else:\n            msg.err('Object initialize failed for {}.'.format(base.__name__))\n        if (origstream is not None):\n            streamlining = origstream\n        if (not (decorating or streamlining)):\n            _cstack_new.pop()\n            if (len(_cstack_new) == 0):\n                _atdepth_new = False\n            _post_create(_atdepth_new, entry, result)\n        return result\n    return wrapnew", "docstring": "Decorator for wrapping the creation of class instances that are being logged\nby acorn.\n\nArgs:\nbase: base class used to call __new__ for the construction.\npackage (str): name of (global) package the class belongs to.\nstackdepth (int): if the calling stack is less than this depth, than\ninclude the entry in the log; otherwise ignore it.", "source": "codesearchnet"}
{"code": "def points_random_3d(count, range_x=((- 10.0), 10.0), range_y=((- 10.0), 10.0), range_z=((- 10.0), 10.0), seed=None) -> VAO:\n    random.seed(seed)\n\n    def gen():\n        for _ in range(count):\n            (yield random.uniform(*range_x))\n            (yield random.uniform(*range_y))\n            (yield random.uniform(*range_z))\n    data = numpy.fromiter(gen(), count=(count * 3), dtype=numpy.float32)\n    vao = VAO('geometry:points_random_3d', mode=moderngl.POINTS)\n    vao.buffer(data, '3f', ['in_position'])\n    return vao", "docstring": "Generates random positions inside a confied box.\n\nArgs:\ncount (int): Number of points to generate\n\nKeyword Args:\nrange_x (tuple): min-max range for x axis: Example (-10.0. 10.0)\nrange_y (tuple): min-max range for y axis: Example (-10.0. 10.0)\nrange_z (tuple): min-max range for z axis: Example (-10.0. 10.0)\nseed (int): The random seed\n\nReturns:\nA :py:class:`demosys.opengl.vao.VAO` instance", "source": "codesearchnet"}
{"code": "def load_tf_sharded_weights(model, shard_files, ignore_mismatched_sizes=False, strict=False, _prefix=None):\n    unexpected_keys = set()\n    saved_keys = set()\n    mismatched_keys = set()\n    model_keys = set()\n    model_layer_map = {}\n    for i, k in enumerate(model.weights):\n        layer_name = k.name\n        if _prefix is not None and layer_name.startswith(_prefix):\n            layer_name = layer_name[len(_prefix):]\n            layer_name = layer_name.lstrip('/')\n        if not ('model.' in layer_name or len(layer_name.split('/')) == 1):\n            layer_name = '/'.join(layer_name.split('/')[1:])\n        model_keys.add(layer_name)\n        model_layer_map[layer_name] = i\n    for shard_file in shard_files:\n        saved_weight_names_set, unexpected_keys_set, mismatched_keys_set = load_tf_shard(model, model_layer_map, shard_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=_prefix)\n        saved_keys.update(saved_weight_names_set)\n        unexpected_keys.update(unexpected_keys_set)\n        mismatched_keys.update(mismatched_keys_set)\n        gc.collect()\n    missing_keys = model_keys - saved_keys\n    if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0):\n        error_message = f'Error(s) in loading state_dict for {model.__class__.__name__}'\n        if len(missing_keys) > 0:\n            str_missing_keys = ','.join([f'\"{k}\"' for k in missing_keys])\n            error_message += f'\\nMissing key(s): {str_missing_keys}.'\n        if len(unexpected_keys) > 0:\n            str_unexpected_keys = ','.join([f'\"{k}\"' for k in unexpected_keys])\n            error_message += f'\\nMissing key(s): {str_unexpected_keys}.'\n        raise RuntimeError(error_message)\n    return (missing_keys, unexpected_keys, mismatched_keys)", "docstring": "This is the same as `load_tf_weights` but for a sharded checkpoint. Detect missing and unexpected layers and load\nthe TF weights from the shard file accordingly to their names and shapes.\n\nThis load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being\nloaded in the model.\n\nArgs:\nmodel (`keras.models.Model`): The model in which to load the checkpoint.\nshard_files (`str` or `os.PathLike`): A list containing the sharded checkpoint names.\nignore_mismatched_sizes`bool`, *optional`, defaults to `True`):\nWhether or not to ignore the mismatch between the sizes\nstrict (`bool`, *optional*, defaults to `True`):\nWhether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint.\n\nReturns:\nThree lists, one for the missing layers, another one for the unexpected layers, and a last one for the\nmismatched layers.", "source": "github-repos"}
{"code": "def conversation(self, name=None, **kwargs):\n    convo = Conversation(self, **kwargs)\n    super().conversation(name, convo)\n    return convo", "docstring": "Make a new conversation.\n\nArguments:\nname: The key for the dictionary the conversation will be stored as\nin conversations. If None the conversation will be stored as a\nlist instead. Mixing both types results in an error.\n**kwargs: Keyword arguments to pass into the new conversation.\nThese accept the same arguments as Cleverbot.\n\nReturns:\nThe new conversation.", "source": "codesearchnet"}
{"code": "def method(*args, **kwargs):\n    assert (len(args) == 0)\n    assert (len(kwargs) == 1)\n    assert ('num_return_vals' in kwargs)\n    num_return_vals = kwargs['num_return_vals']\n\n    def annotate_method(method):\n        method.__ray_num_return_vals__ = num_return_vals\n        return method\n    return annotate_method", "docstring": "Annotate an actor method.\n\n.. code-block:: python\n\n@ray.remote\nclass Foo(object):\n@ray.method(num_return_vals=2)\ndef bar(self):\nreturn 1, 2\n\nf = Foo.remote()\n\n_, _ = f.bar.remote()\n\nArgs:\nnum_return_vals: The number of object IDs that should be returned by\ninvocations of this actor method.", "source": "codesearchnet"}
{"code": "def GetContract(self, script_hash):\n        \n        if script_hash.ToBytes() in self._contracts.keys():\n            return self._contracts[script_hash.ToBytes()]\n        return None", "docstring": "Get contract for specified script_hash.\n\nArgs:\nscript_hash (UInt160): a bytearray (len 20).\n\nReturns:\nContract: if a contract was found matching the provided script hash, otherwise None", "source": "juraj-google-style"}
{"code": "def __init__(self, resolution, **kwargs):\n    \n    super(CelebaHQConfig, self).__init__(\n        name=\"%d\" % resolution,\n        description=(\"CelebaHQ images in %d x %d resolution\" %\n                     (resolution, resolution)),\n        **kwargs)\n    self.resolution = resolution\n    self.file_name = \"data%dx%d.tar\" % (resolution, resolution)", "docstring": "BuilderConfig for SQUAD.\n\nArgs:\nresolution: Resolution of the image. Values supported: powers of 2 up to\n1024.\n**kwargs: keyword arguments forwarded to super.", "source": "juraj-google-style"}
{"code": "def range(*args, prefix: str):\n    return [NamedQubit((prefix + str(i))) for i in range(*args)]", "docstring": "Returns a range of NamedQubits.\n\nThe range returned starts with the prefix, and followed by a qubit for\neach number in the range, e.g.:\n\nNamedQubit.range(3, prefix=\"a\") -> [\"a1\", \"a2\", \"a3]\nNamedQubit.range(2, 4, prefix=\"a\") -> [\"a2\", \"a3]\n\nArgs:\n*args: Args to be passed to Python's standard range function.\nprefix: A prefix for constructed NamedQubits.\n\nReturns:\nA list of NamedQubits.", "source": "codesearchnet"}
{"code": "def args_to_kwargs(base_type, removed_method=False, removed_args=None):\n\n    def wrap(func):\n        if removed_method:\n            return func\n        removed_arg_names = removed_args if removed_args is not None else []\n        base_arg_spec = getfullargspec(unwrap(getattr(base_type, func.__name__)))\n        base_arg_names = base_arg_spec.args\n        all_possible_base_arg_names = base_arg_names + base_arg_spec.kwonlyargs\n        beam_arg_names = getfullargspec(func).args\n        if (not_found := (set(beam_arg_names) - set(all_possible_base_arg_names) - set(removed_arg_names))):\n            raise TypeError(f'Beam definition of {func.__name__} has arguments that are not found in the base version of the function: {not_found}')\n\n        @functools.wraps(func)\n        def wrapper(*args, **kwargs):\n            if len(args) > len(base_arg_names):\n                raise TypeError(f'{func.__name__} got too many positioned arguments.')\n            for name, value in zip(base_arg_names, args):\n                if name in kwargs:\n                    raise TypeError(\"%s() got multiple values for argument '%s'\" % (func.__name__, name))\n                kwargs[name] = value\n            if removed_args:\n                for name in removed_args:\n                    if name not in kwargs:\n                        kwargs[name] = None\n            return func(**kwargs)\n        return wrapper\n    return wrap", "docstring": "Convert all args to kwargs before calling the decorated function.\n\nWhen applied to a function, this decorator creates a new function\nthat always calls the wrapped function with *only* keyword arguments. It\ninspects the argspec for the identically-named method on `base_type` to\ndetermine the name to use for arguments that are converted to keyword\narguments.\n\nFor internal use only. No backwards compatibility guarantees.\n\nArgs:\nbase_type: The pandas type of the method that this is trying to replicate.\nremoved_method: Whether this method has been removed in the running\nPandas version.\nremoved_args: If not empty, which arguments have been dropped in the\nrunning Pandas version.", "source": "github-repos"}
{"code": "def tap_hold(self, x, y, duration=1.0):\n        \n        data = {'x': x, 'y': y, 'duration': duration}\n        return self.http.post('/wda/touchAndHold', data=data)", "docstring": "Tap and hold for a moment\n\nArgs:\n- x, y(int): position\n- duration(float): seconds of hold time\n\n[[FBRoute POST:@\"/wda/touchAndHold\"] respondWithTarget:self action:@selector(handleTouchAndHoldCoordinate:)],", "source": "juraj-google-style"}
{"code": "def _get_object_by_name(self, object_endpoint, object_name, timeout=None):\n        \n        timeout = timeout or self._timeout\n        resp = self._get(self._u(object_endpoint, object_name),\n                         session=self._session, timeout=timeout)\n        resp.raise_for_status()\n        return resp.json()", "docstring": "generic function to get object (metadata, tag, ) by name from SignalFx.\n\nArgs:\nobject_endpoint (string): API endpoint suffix (e.g. 'v2/tag')\nobject_name (string): name of the object (e.g. 'jvm.cpu.load')\n\nReturns:\ndictionary of response", "source": "juraj-google-style"}
{"code": "def count_divisors(n):\n\n    \n\n    if not isinstance(n, int):\n        raise TypeError(\"Expecting a strictly positive integer\")\n    if n <= 0:\n        raise ValueError(\"Expecting a strictly positive integer\")\n\n    number_of_divisors = 1\n    remain = n\n\n    for p in prime_generator():\n        if p > n:\n            return number_of_divisors\n\n        exponent = 1\n        while remain % p == 0:\n            remain = remain \n            exponent += 1\n        number_of_divisors *= exponent\n\n        if remain == 1:\n            return number_of_divisors", "docstring": "Count the number of divisors of an integer n\n\nArgs:\nn (int): strictly positive integer\n\nReturns:\nThe number of distinct divisors of n\n\nRaises:\nTypeError: if n is not an integer\nValueError: if n is negative", "source": "juraj-google-style"}
{"code": "def _block_orth(self, p1, p2):\n    if p1.shape.as_list() != p2.shape.as_list():\n        raise ValueError(f'The dimension of the matrices must be the same. Received p1.shape={p1.shape} and p2.shape={p2.shape}.')\n    n = p1.shape.as_list()[0]\n    kernel2x2 = {}\n    eye = linalg_ops_impl.eye(n, dtype=self.dtype)\n    kernel2x2[0, 0] = math_ops.matmul(p1, p2)\n    kernel2x2[0, 1] = math_ops.matmul(p1, eye - p2)\n    kernel2x2[1, 0] = math_ops.matmul(eye - p1, p2)\n    kernel2x2[1, 1] = math_ops.matmul(eye - p1, eye - p2)\n    return kernel2x2", "docstring": "Construct a 2 x 2 kernel.\n\nUsed to construct orthgonal kernel.\n\nArgs:\np1: A symmetric projection matrix.\np2: A symmetric projection matrix.\n\nReturns:\nA 2 x 2 kernel [[p1p2,         p1(1-p2)],\n[(1-p1)p2, (1-p1)(1-p2)]].\nRaises:\nValueError: If the dimensions of p1 and p2 are different.", "source": "github-repos"}
{"code": "def __str__(self):\n        \n        name = self.__class__.__name__\n        return '%s(Type %d, Address %d)' % (name, self.Type, self.Addr)", "docstring": "Returns a string representation of the data event.\n\nArgs:\nself (JLinkDataEvent): the ``JLinkDataEvent`` instance\n\nReturns:\nA string representation of the data event.", "source": "juraj-google-style"}
{"code": "def add_workflow_definitions(sbi_config: dict):\n    registered_workflows = []\n    for i in range(len(sbi_config['processing_blocks'])):\n        workflow_config = sbi_config['processing_blocks'][i]['workflow']\n        workflow_name = '{}:{}'.format(workflow_config['id'], workflow_config['version'])\n        if (workflow_name in registered_workflows):\n            continue\n        workflow_definition = dict(id=workflow_config['id'], version=workflow_config['version'], stages=[])\n        key = 'workflow_definitions:{}:{}'.format(workflow_config['id'], workflow_config['version'])\n        DB.save_dict(key, workflow_definition, hierarchical=False)\n        registered_workflows.append(workflow_name)", "docstring": "Add any missing SBI workflow definitions as placeholders.\n\nThis is a utility function used in testing and adds mock / test workflow\ndefinitions to the database for workflows defined in the specified\nSBI config.\n\nArgs:\nsbi_config (dict): SBI configuration dictionary.", "source": "codesearchnet"}
{"code": "def _read_ipv4_options(self, size=None):\n    counter = 0\n    optkind = list()\n    options = dict()\n    while (counter < size):\n        kind = self._read_unpack(1)\n        opts = IPv4_OPT.get(kind)\n        if (opts is None):\n            len_ = (size - counter)\n            counter = size\n            options['Unknown'] = self._read_fileng(len_)\n            break\n        dscp = OPT_TYPE.get(kind)\n        desc = dscp.name\n        if opts[0]:\n            byte = self._read_unpack(1)\n            if byte:\n                data = process_opt[opts[2]](self, byte, kind)\n            else:\n                data = dict(kind=kind, type=self._read_opt_type(kind), length=2, flag=True)\n        else:\n            byte = 1\n            data = dict(kind=kind, type=self._read_opt_type(kind), length=1)\n        counter += byte\n        if (dscp in optkind):\n            if isinstance(options[desc], tuple):\n                options[desc] += (Info(data),)\n            else:\n                options[desc] = (Info(options[desc]), Info(data))\n        else:\n            optkind.append(dscp)\n            options[desc] = data\n        if (not kind):\n            break\n    if (counter < size):\n        len_ = (size - counter)\n        self._read_binary(len_)\n    return (tuple(optkind), options)", "docstring": "Read IPv4 option list.\n\nPositional arguments:\n* size -- int, buffer size\n\nReturns:\n* tuple -- IPv4 option list\n* dict -- extracted IPv4 option", "source": "codesearchnet"}
{"code": "def find_duplicates_in_array(array):\n    duplicates = []\n    non_duplicates = []\n    if (len(array) != len(set(array))):\n        for item in array:\n            if (item not in non_duplicates):\n                non_duplicates.append(item)\n            elif ((item in non_duplicates) and (item not in duplicates)):\n                duplicates.append(item)\n    return duplicates", "docstring": "Runs through the array and returns the elements that contain\nmore than one duplicate\n\nArgs:\narray: The array to check for duplicates.\n\nReturns:\nArray of the elements that are duplicates. Returns empty list if\nthere are no duplicates.", "source": "codesearchnet"}
{"code": "def get_figure(new_fig=True, subplot='111', params=None):\n    \n    _get_plt()\n\n    if new_fig:\n        fig = plt.figure()\n    else:\n        fig = plt.gcf()\n\n    params = dict_if_none(params)\n\n    if isinstance(subplot, (tuple, list)):\n        ax = fig.add_subplot(*subplot, **params)\n    else:\n        ax = fig.add_subplot(subplot, **params)\n\n    return fig, ax", "docstring": "Function to be used for viewing - plotting,\nto initialize the matplotlib figure - axes.\n\nArgs:\nnew_fig(bool): Defines if a new figure will be created, if false current figure is used\nsubplot (tuple or matplolib subplot specifier string): Create axes with these parameters\nparams (dict): extra options passed to add_subplot()\n\nReturns:\nMatplotlib Figure and Axes", "source": "juraj-google-style"}
{"code": "def summary(self, line_length=160, detailed=True, print_fn=None):\n    if not self._converted:\n        raise RuntimeError(f'Impossible to call `{self.__class__.__name__}.summary()` before calling {self.__class__.__name__}.convert()`.')\n    if line_length < 160:\n        raise ValueError(f'Invalid `line_length` value has been received: {line_length}. Minimum: 160.')\n    if print_fn is None:\n        print_fn = print\n    columns = [('TRTEngineOP Name', 0.2), ('Device', 0.09), ('\n    positions = [int(line_length * p) for _, p in columns]\n    positions = np.cumsum(positions).tolist()\n    headers = [h for h, _ in columns]\n    _print_row(headers, positions, print_fn=print_fn)\n    print_fn('=' * line_length)\n    n_engines = 0\n    n_ops_converted = 0\n    n_ops_not_converted = 0\n    graphdef = self._converted_func.graph.as_graph_def(add_shapes=True)\n    trtengineops_dict = dict()\n    for node in graphdef.node:\n        if node.op != 'TRTEngineOp':\n            n_ops_not_converted += 1\n            continue\n        else:\n            trtengineops_dict[node.name] = node\n            n_engines += 1\n    for name, node in sorted(trtengineops_dict.items()):\n        node_device = node.device.split('/')[-1]\n        in_shapes = trt_utils.get_node_io_shapes(node, 'input_shapes')\n        out_shapes = trt_utils.get_node_io_shapes(node, '_output_shapes')\n        in_dtypes = trt_utils.get_trtengineop_io_dtypes(node, 'InT')\n        out_dtypes = trt_utils.get_trtengineop_io_dtypes(node, 'OutT')\n        in_nodes_count = trt_utils.get_trtengineop_io_nodes_count(node, 'InT')\n        out_nodes_count = trt_utils.get_trtengineop_io_nodes_count(node, 'OutT')\n        node_count, converted_ops_dict = trt_utils.get_trtengineop_node_op_count(graphdef, name)\n        n_ops_converted += node_count\n        if n_engines != 1:\n            print_fn(f'\\n{'-' * 40}\\n')\n        _print_row(fields=[name, node_device, node_count, in_nodes_count, out_nodes_count, in_dtypes, out_dtypes, in_shapes, out_shapes], positions=positions, print_fn=print_fn)\n        if detailed:\n            print_fn()\n            for key, value in sorted(dict(converted_ops_dict).items()):\n                print_fn(f'\\t- {key}: {value}x')\n    print_fn(f'\\n{'=' * line_length}')\n    print_fn(f'[*] Total number of TensorRT engines: {n_engines}')\n    total_ops = n_ops_not_converted + n_ops_converted\n    conversion_ratio = n_ops_converted / total_ops * 100\n    print_fn(f'[*] % of OPs Converted: {conversion_ratio:.2f}% [{n_ops_converted}/{total_ops}]\\n')", "docstring": "This method describes the results of the conversion by TF-TRT.\n\nIt includes information such as the name of the engine, the number of nodes\nper engine, the input and output dtype, along with the input shape of each\nTRTEngineOp.\n\nArgs:\nline_length: Default line length when printing on the console. Minimum 160\ncharacters long.\ndetailed: Whether or not to show the nodes inside each TRTEngineOp.\nprint_fn: Print function to use. Defaults to `print`. It will be called on\neach line of the summary. You can set it to a custom function in order\nto capture the string summary.\n\nRaises:\nRuntimeError: if the graph is not converted.", "source": "github-repos"}
{"code": "def MsgUser(msg):\n    msg_tested_versions = ['xp', 'vista', '2008', '2003']\n    msg_args = ['/c', '%SystemRoot%\\\\System32\\\\msg.exe', '*', '/TIME:0']\n    host_version = platform.platform().lower()\n    if (not msg):\n        return ('Command not ran.', 'Empty message.', (- 1))\n    else:\n        msg_args.extend([msg])\n    for version in msg_tested_versions:\n        if (host_version.find(version) != (- 1)):\n            res = client_utils_common.Execute('cmd', msg_args, time_limit=(- 1), bypass_whitelist=True)\n            return res\n    return ('', 'Command not available for this version.', (- 1))", "docstring": "Sends a message to a user.\n\nArgs:\nmsg: Message to be displaied to user.\n\nReturns:\nres which is a tuple of (stdout, stderr, exit_status, time_taken).", "source": "codesearchnet"}
{"code": "def top_kth_iterative(x, k):\n\n    def next_x(cur_x, _):\n        top_x = tf.reduce_max(cur_x, axis=(- 1), keep_dims=True)\n        return (cur_x * to_float((cur_x < top_x)))\n    fin_x = tf.foldl(next_x, tf.range((k - 1)), initializer=tf.stop_gradient(x), parallel_iterations=2, back_prop=False)\n    return tf.stop_gradient(tf.reduce_max(fin_x, axis=(- 1), keep_dims=True))", "docstring": "Compute the k-th top element of x on the last axis iteratively.\n\nThis assumes values in x are non-negative, rescale if needed.\nIt is often faster than tf.nn.top_k for small k, especially if k < 30.\nNote: this does not support back-propagation, it stops gradients!\n\nArgs:\nx: a Tensor of non-negative numbers of type float.\nk: a python integer.\n\nReturns:\na float tensor of the same shape as x but with 1 on the last axis\nthat contains the k-th largest number in x.", "source": "codesearchnet"}
{"code": "def vectorial_decomp(self, symbols):\n    try:\n        symbols = [s.vec for s in symbols]\n        N = sum(map((lambda s: len(s)), symbols))\n        symbols_ = Vector(N)\n        i = 0\n        for v in symbols:\n            for s in v:\n                symbols_[i] = s\n                i += 1\n        symbols = symbols_\n    except TypeError:\n        pass\n    return self.mba.vectorial_decomp(symbols, self.vec)", "docstring": "Compute the vectorial decomposition of the expression according to the given symbols.\n\nsymbols is a list that represents the input of the resulting\napplication. They are considerated as a flatten vector of bits.\n\nArgs:\nsymbols: TODO\n\nReturns:\nAn :class:`pytanque.App` object\n\nExample:\n>>> mba = MBA(4)\n>>> x = mba.var('x')\n>>> y = mba.var('y')\n>>> e = x^y^6\n>>> e.vectorial_decomp([x,y])\nApp NL = Vec([\n0,\n0,\n0,\n0\n])\nAffApp matrix = Mat([\n[1, 0, 0, 0, 1, 0, 0, 0]\n[0, 1, 0, 0, 0, 1, 0, 0]\n[0, 0, 1, 0, 0, 0, 1, 0]\n[0, 0, 0, 1, 0, 0, 0, 1]\n])\nAffApp cst = Vec([\n0,\n1,\n1,\n0\n])", "source": "codesearchnet"}
{"code": "def request(self, request_method, api_method, *args, **kwargs):\n    url = self._build_url(api_method)\n    resp = requests.request(request_method, url, *args, **kwargs)\n    try:\n        rv = resp.json()\n    except ValueError:\n        raise RequestFailedError(resp, 'not a json body')\n    if (not resp.ok):\n        raise RequestFailedError(resp, rv.get('error'))\n    return rv", "docstring": "Perform a request.\n\nArgs:\nrequest_method: HTTP method for this request.\napi_method: API method name for this request.\n*args: Extra arguments to pass to the request.\n**kwargs: Extra keyword arguments to pass to the request.\n\nReturns:\nA dict contains the request response data.\n\nRaises:\nRequestFailedError: Raises when BearyChat's OpenAPI responses\nwith status code != 2xx", "source": "codesearchnet"}
{"code": "def must_exist(*components):\n    _path = path(*components)\n    if (not exists(_path)):\n        raise File404(_path)\n    return _path", "docstring": "Ensure path exists.\n\nArguments:\n*components (str[]): Path components.\n\nReturns:\nstr: File path.\n\nRaises:\nFile404: If path does not exist.", "source": "codesearchnet"}
{"code": "def unwrap(tensor):\n  \n  while isinstance(tensor, (PrettyTensor, Loss)):\n    tensor = tensor.tensor\n  return tensor", "docstring": "Returns the underlying tensor if tensor is wrapped or tensor.\n\nArgs:\ntensor: The tensor to unwrap.\nReturns:\nTensor or if it is a pretty tensor, the unwrapped version.\nRaises:\nValueError: if tensor holds a sequence.", "source": "juraj-google-style"}
{"code": "def submit_job(self, job_config=None):\n        \n        job_id = self._delegator._submit_bundle(self, job_config)\n        return self._instance.get_job(job_id)", "docstring": "Submit this Streams Application Bundle (sab file) to\nits associated instance.\n\nArgs:\njob_config(JobConfig): a job configuration overlay\n\nReturns:\nJob: Resulting job instance.", "source": "juraj-google-style"}
{"code": "def log_cdf_laplace(x, name='log_cdf_laplace'):\n    with tf.name_scope(name):\n        x = tf.convert_to_tensor(value=x, name='x')\n        lower_solution = ((- np.log(2.0)) + x)\n        safe_exp_neg_x = tf.exp((- tf.abs(x)))\n        upper_solution = tf.math.log1p(((- 0.5) * safe_exp_neg_x))\n        return tf.where((x < 0.0), lower_solution, upper_solution)", "docstring": "Log Laplace distribution function.\n\nThis function calculates `Log[L(x)]`, where `L(x)` is the cumulative\ndistribution function of the Laplace distribution, i.e.\n\n```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```\n\nFor numerical accuracy, `L(x)` is computed in different ways depending on `x`,\n\n```\nx <= 0:\nLog[L(x)] = Log[0.5] + x, which is exact\n\n0 < x:\nLog[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact\n```\n\nArgs:\nx: `Tensor` of type `float32`, `float64`.\nname: Python string. A name for the operation (default=\"log_ndtr\").\n\nReturns:\n`Tensor` with `dtype=x.dtype`.\n\nRaises:\nTypeError: if `x.dtype` is not handled.", "source": "codesearchnet"}
{"code": "def extract_annotation(self, node, var, name, stack, allowed_type_params: set[str] | None=None):\n    try:\n        typ = abstract_utils.get_atomic_value(var)\n    except abstract_utils.ConversionError:\n        self.ctx.errorlog.ambiguous_annotation(self.ctx.vm.frames, None, name)\n        return self.ctx.convert.unsolvable\n    typ = self._process_one_annotation(node, typ, name, stack)\n    if not typ:\n        return self.ctx.convert.unsolvable\n    if typ.formal and allowed_type_params is not None:\n        allowed_type_params = allowed_type_params | self.get_callable_type_parameter_names(typ)\n        if self.ctx.vm.frame.func and (isinstance(self.ctx.vm.frame.func.data, abstract.BoundFunction) or self.ctx.vm.frame.func.data.is_class_builder):\n            allowed_type_params.add('typing.Self')\n        illegal_params = []\n        for x in self.get_type_parameters(typ):\n            if not allowed_type_params.intersection([x.name, x.full_name]):\n                illegal_params.append(x.name)\n        if illegal_params:\n            self._log_illegal_params(illegal_params, stack, typ, name)\n            return self.ctx.convert.unsolvable\n    return typ", "docstring": "Returns an annotation extracted from 'var'.\n\nArgs:\nnode: The current node.\nvar: The variable to extract from.\nname: The annotated name.\nstack: The frame stack.\nallowed_type_params: Type parameters that are allowed to appear in the\nannotation. 'None' means all are allowed. If non-None, the result of\ncalling get_callable_type_parameter_names on the extracted annotation is\nalso added to the allowed set.", "source": "github-repos"}
{"code": "def merge(left, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True, indicator=False, validate=None):\n    if (not isinstance(left, DataFrame)):\n        raise ValueError('can not merge DataFrame with instance of type {}'.format(type(right)))\n    return left.merge(right, how=how, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, copy=copy, indicator=indicator, validate=validate)", "docstring": "Database style join, where common columns in \"on\" are merged.\n\nArgs:\nleft: DataFrame.\nright: DataFrame.\nhow: What type of join to use.\non: The common column name(s) to join on. If None, and left_on and\nright_on  are also None, will default to all commonly named\ncolumns.\nleft_on: The column(s) on the left to use for the join.\nright_on: The column(s) on the right to use for the join.\nleft_index: Use the index from the left as the join keys.\nright_index: Use the index from the right as the join keys.\nsort: Sort the join keys lexicographically in the result.\nsuffixes: Add this suffix to the common names not in the \"on\".\ncopy: Does nothing in our implementation\nindicator: Adds a column named _merge to the DataFrame with\nmetadata from the merge about each row.\nvalidate: Checks if merge is a specific type.\n\nReturns:\nA merged Dataframe", "source": "codesearchnet"}
{"code": "def slice_hidden(x, hidden_size, num_blocks):\n    (batch_size, latent_dim, _) = common_layers.shape_list(x)\n    block_dim = (hidden_size \n    x_sliced = tf.reshape(x, shape=[batch_size, latent_dim, num_blocks, block_dim])\n    return x_sliced", "docstring": "Slice encoder hidden state under num_blocks.\n\nArgs:\nx: Encoder hidden state of shape [batch_size, latent_dim, hidden_size].\nhidden_size: Dimension of the latent space.\nnum_blocks: Number of blocks in DVQ.\n\nReturns:\nSliced states of shape [batch_size, latent_dim, num_blocks, block_dim].", "source": "codesearchnet"}
{"code": "def readuntil(self, token, size=0):\n    self.__append()\n    i = self.buf.find(token, self.pos)\n    if (i < 0):\n        index = max((len(token) - 1), size)\n        newpos = max((len(self.buf) - index), self.pos)\n        data = self.buf[self.pos:newpos]\n        self.pos = newpos\n        self.__discard()\n        return (False, data)\n    newpos = (i + len(token))\n    data = self.buf[self.pos:newpos]\n    self.pos = newpos\n    self.__discard()\n    return (True, data)", "docstring": "Reads data from the FIFO until a token is encountered.\n\nIf no token is encountered as much data is read from the FIFO as\npossible keeping in mind that the FIFO must retain enough data to\nperform matches for the token across writes.\n\nArgs:\ntoken: The token to read until.\nsize: The minimum amount of data that should be left in the FIFO.\nThis is only used if it is greater than the length of the\ntoken.  When ommited this value will default to the length of\nthe token.\n\nReturns: A tuple of (found, data) where found is a boolean indicating\nwhether the token was found, and data is all the data that could be\nread from the FIFO.\n\nNote: When a token is found the token is also read from the buffer and\nreturned in the data.", "source": "codesearchnet"}
{"code": "def decorator(wrapped_decorator):\n\n    def helper(_func=None, **options):\n\n        def outer_wrapper(func):\n\n            @wrapping(func)\n            def inner_wrapper(*args, **kwds):\n                return wrapped_decorator(func, args, kwds, **options)\n            return inner_wrapper\n        if (_func is None):\n            return outer_wrapper\n        if options:\n            raise TypeError('positional arguments not supported')\n        return outer_wrapper(_func)\n    helper.wrapped_decorator = wrapped_decorator\n    return helper", "docstring": "Converts a function into a decorator that optionally accepts keyword\narguments in its declaration.\n\nExample usage:\n@utils.decorator\ndef decorator(func, args, kwds, op1=None):\n... apply op1 ...\nreturn func(*args, **kwds)\n\n# Form (1), vanilla\n@decorator\nfoo(...)\n...\n\n# Form (2), with options\n@decorator(op1=5)\nfoo(...)\n...\n\nArgs:\nwrapped_decorator: A function that accepts positional args (func, args,\nkwds) and any additional supported keyword arguments.\n\nReturns:\nA decorator with an additional 'wrapped_decorator' property that is set to\nthe original function.", "source": "codesearchnet"}
{"code": "def run_inference(examples, serving_bundle):\n    batch_size = 64\n    if (serving_bundle.estimator and serving_bundle.feature_spec):\n        preds = serving_bundle.estimator.predict((lambda : tf.data.Dataset.from_tensor_slices(tf.parse_example([ex.SerializeToString() for ex in examples], serving_bundle.feature_spec)).batch(batch_size)))\n        if serving_bundle.use_predict:\n            preds_key = serving_bundle.predict_output_tensor\n        elif (serving_bundle.model_type == 'regression'):\n            preds_key = 'predictions'\n        else:\n            preds_key = 'probabilities'\n        values = []\n        for pred in preds:\n            values.append(pred[preds_key])\n        return common_utils.convert_prediction_values(values, serving_bundle)\n    elif serving_bundle.custom_predict_fn:\n        values = serving_bundle.custom_predict_fn(examples)\n        return common_utils.convert_prediction_values(values, serving_bundle)\n    else:\n        return platform_utils.call_servo(examples, serving_bundle)", "docstring": "Run inference on examples given model information\n\nArgs:\nexamples: A list of examples that matches the model spec.\nserving_bundle: A `ServingBundle` object that contains the information to\nmake the inference request.\n\nReturns:\nA ClassificationResponse or RegressionResponse proto.", "source": "codesearchnet"}
{"code": "def matmul(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return Matmul().symbolic_call(x1, x2)\n    return backend.numpy.matmul(x1, x2)", "docstring": "Matrix product of two tensors.\n\n- If both tensors are 1-dimensional, the dot product (scalar) is returned.\n- If either tensor is N-D, N > 2, it is treated as a stack of matrices\nresiding in the last two indexes and broadcast accordingly.\n- If the first tensor is 1-D, it is promoted to a matrix by prepending\na 1 to its dimensions. After matrix multiplication the prepended\n1 is removed.\n- If the second tensor is 1-D, it is promoted to a matrix by appending a 1\nto its dimensions. After matrix multiplication the appended 1 is removed.\n\nArgs:\nx1: First tensor.\nx2: Second tensor.\n\nReturns:\nOutput tensor, matrix product of the inputs.", "source": "github-repos"}
{"code": "def add_time_step(self, **create_time_step_kwargs):\n    \n    ts = time_step.TimeStep.create_time_step(**create_time_step_kwargs)\n    assert isinstance(ts, time_step.TimeStep)\n    self._time_steps.append(ts)", "docstring": "Creates a time-step and appends it to the list.\n\nArgs:\n**create_time_step_kwargs: Forwarded to\ntime_step.TimeStep.create_time_step.", "source": "juraj-google-style"}
{"code": "def local_set_state(self, device, state, id_override=None, type_override=None):\n    if ALLOW_LOCAL_CONTROL:\n        if (device.local_id() is not None):\n            hub = HUBS.get(device.hub_id())\n            if ((hub is None) or (hub['token'] is None)):\n                return self.set_device_state(device, state, id_override, type_override)\n        else:\n            return self.set_device_state(device, state, id_override, type_override)\n        _LOGGER.info('Setting local state')\n        local_id = (id_override or device.local_id().split('.')[0])\n        object_type = (type_override or device.object_type())\n        LOCAL_API_HEADERS['Authorization'] = ('Bearer ' + hub['token'])\n        url_string = 'https:\n        try:\n            arequest = requests.put(url_string, data=json.dumps(state), headers=LOCAL_API_HEADERS, verify=False, timeout=3)\n        except requests.exceptions.RequestException:\n            _LOGGER.error('Error sending local control request. Sending request online')\n            return self.set_device_state(device, state, id_override, type_override)\n        response_json = arequest.json()\n        _LOGGER.debug('%s', response_json)\n        temp_state = device.json_state\n        for (key, value) in response_json['data']['last_reading'].items():\n            temp_state['last_reading'][key] = value\n        return temp_state\n    else:\n        return self.set_device_state(device, state, id_override, type_override)", "docstring": "Set device state via local API, and fall back to online API.\n\nArgs:\ndevice (WinkDevice): The device the change is being requested for.\nstate (Dict): The state being requested.\nid_override (String, optional): A device ID used to override the\npassed in device's ID. Used to make changes on sub-devices.\ni.e. Outlet in a Powerstrip. The Parent device's ID.\ntype_override (String, optional): Used to override the device type\nwhen a device inherits from a device other than WinkDevice.\nReturns:\nresponse_json (Dict): The API's response in dictionary format", "source": "codesearchnet"}
{"code": "def get_elements_between_bands(self, band_i, band_j):\n    if ((band_i < 1) or (band_i > self.nb_bands) or (band_j < 1) or (band_j > self.nb_bands)):\n        raise ValueError('Band index out of bounds')\n    return self.data[(:, (band_i - 1), (band_j - 1), :)]", "docstring": "Method returning a numpy array with elements\n\n[cdum_x_real, cdum_x_imag, cdum_y_real, cdum_y_imag, cdum_z_real, cdum_z_imag]\n\nbetween bands band_i and band_j (vasp 1-based indexing) for all kpoints.\n\nArgs:\nband_i (Integer): Index of band i\nband_j (Integer): Index of band j\n\nReturns:\na numpy list of elements for each kpoint", "source": "codesearchnet"}
{"code": "def _get_anchor(module_to_name, fullname):\n  \n  if not _anchor_re.match(fullname):\n    raise ValueError(\"'%s' is not a valid anchor\" % fullname)\n  anchor = fullname\n  for module_name in module_to_name.values():\n    if fullname.startswith(module_name + \".\"):\n      rest = fullname[len(module_name)+1:]\n      \n      if len(anchor) > len(rest):\n        anchor = rest\n  return anchor", "docstring": "Turn a full member name into an anchor.\n\nArgs:\nmodule_to_name: Dictionary mapping modules to short names.\nfullname: Fully qualified name of symbol.\n\nReturns:\nHTML anchor string.  The longest module name prefix of fullname is\nremoved to make the anchor.\n\nRaises:\nValueError: If fullname uses characters invalid in an anchor.", "source": "juraj-google-style"}
{"code": "def decode_image_tokens(self, image_tokens: torch.Tensor):\n    decoded_image = self.model.vqmodel.decode(image_tokens)\n    decoded_image = decoded_image.permute(0, 2, 3, 1)\n    return decoded_image", "docstring": "Decodes generated image tokens from language model to continuous pixel values\nwith VQGAN module via upsampling.\nArgs:\nimage_tokens (`torch.LongTensor` of shape `(batch_size, num_of_tokens)`):\nThe tensors corresponding to the input images.", "source": "github-repos"}
{"code": "def _bits_in_condition(self, cond):\n        \n        all_bits = []\n        if cond is not None:\n            all_bits.extend([(cond[0], j) for j in range(self.cregs[cond[0].name].size)])\n        return all_bits", "docstring": "Return a list of bits in the given condition.\n\nArgs:\ncond (tuple or None): optional condition (ClassicalRegister, int)\n\nReturns:\nlist[(ClassicalRegister, idx)]: list of bits", "source": "juraj-google-style"}
{"code": "def gaussian_pdf(std=10.0, mean=0.0):\n    norm_const = 1.0\n\n    def pdf(x):\n        return ((norm_const * np.exp(((- 0.5) * (((x - mean) / std) ** 2)))) * np.sin(((np.pi / 180.0) * x)))\n    norm_dev = quad(pdf, 0.0, 180.0)[0]\n    norm_const /= norm_dev\n    return pdf", "docstring": "Gaussian PDF for orientation averaging.\n\nArgs:\nstd: The standard deviation in degrees of the Gaussian PDF\nmean: The mean in degrees of the Gaussian PDF.  This should be a number\nin the interval [0, 180)\n\nReturns:\npdf(x), a function that returns the value of the spherical Jacobian-\nnormalized Gaussian PDF with the given STD at x (degrees). It is\nnormalized for the interval [0, 180].", "source": "codesearchnet"}
{"code": "def _add_tag(self, tag):\n    tags = self.data.get('tags', None)\n    if tags:\n        if (tag in [x['name'] for x in tags]):\n            return False\n    else:\n        tags = list()\n    tags.append({'name': tag})\n    self.data['tags'] = tags\n    return True", "docstring": "Add a tag\n\nArgs:\ntag (str): Tag to add\n\nReturns:\nbool: True if tag added or False if tag already present", "source": "codesearchnet"}
{"code": "def fuzzUsufy(fDomains=None, fFuzzStruct=None):\n    if (fFuzzStruct == None):\n        fuzzingStructures = ['http:\n    else:\n        try:\n            fuzzingStructures = fFuzzStruct.read().splitlines()\n        except:\n            print(('Usufy could NOT open the following file: ' + fFuzzStruct))\n    res = {}\n    lines = fDomains.read().splitlines()\n    for l in lines:\n        domain = l.split()[0]\n        print((('Performing tests for' + domain) + '...'))\n        nick = l.split()[1]\n        possibleURL = []\n        for struct in fuzzingStructures:\n            urlToTry = struct.replace('<DOMAIN>', domain)\n            test = urlToTry.replace('<USERNAME>', nick.lower())\n            print((('Processing ' + test) + '...'))\n            i3Browser = browser.Browser()\n            try:\n                html = i3Browser.recoverURL(test)\n                if (nick in html):\n                    possibleURL.append(test)\n                    print(general.success('\\tPossible usufy found!!!\\n'))\n            except:\n                print('The resource could not be downloaded.')\n        res[domain] = possibleURL\n    print(json.dumps(res, indent=2))\n    return res", "docstring": "Method to guess the usufy path against a list of domains or subdomains.\n\nArgs:\n-----\nfDomains: A list to strings containing the domains and (optionally) a\nnick.\nfFuzzStruct: A list to strings containing the transforms to be\nperformed.\n\nReturns:\n--------\ndict: A dictionary of the form of `{\"domain\": \"url\"}`.", "source": "codesearchnet"}
{"code": "def update(self, force=False):\n        \n\n        \n        if self.is_404 and not force:\n            return 0\n\n        if self._last_modified:\n            headers = {'If-Modified-Since': self._last_modified}\n        else:\n            headers = None\n\n        \n        try:\n            res = self._board._requests_session.get(self._api_url, headers=headers)\n        except:\n            \n            return 0\n\n        \n        if res.status_code == 304:\n            return 0\n\n        \n        elif res.status_code == 404:\n            self.is_404 = True\n            \n            self._board._thread_cache.pop(self.id, None)\n            return 0\n\n        elif res.status_code == 200:\n            \n            if self.is_404:\n                self.is_404 = False\n                self._board._thread_cache[self.id] = self\n\n            \n            self.want_update = False\n            self.omitted_images = 0\n            self.omitted_posts = 0\n\n            self._last_modified = res.headers['Last-Modified']\n            posts = res.json()['posts']\n\n            original_post_count = len(self.replies)\n            self.topic = Post(self, posts[0])\n\n            if self.last_reply_id and not force:\n                self.replies.extend(Post(self, p) for p in posts if p['no'] > self.last_reply_id)\n            else:\n                self.replies[:] = [Post(self, p) for p in posts[1:]]\n\n            new_post_count = len(self.replies)\n            post_count_delta = new_post_count - original_post_count\n            if not post_count_delta:\n                return 0\n\n            self.last_reply_id = self.replies[-1].post_number\n\n            return post_count_delta\n\n        else:\n            res.raise_for_status()", "docstring": "Fetch new posts from the server.\n\nArguments:\nforce (bool): Force a thread update, even if thread has 404'd.\n\nReturns:\nint: How many new posts have been fetched.", "source": "juraj-google-style"}
{"code": "def construct_graph(sakefile, settings):\n    verbose = settings['verbose']\n    sprint = settings['sprint']\n    G = nx.DiGraph()\n    sprint('Going to construct Graph', level='verbose')\n    for target in sakefile:\n        if (target == 'all'):\n            continue\n        if ('formula' not in sakefile[target]):\n            for atomtarget in sakefile[target]:\n                if (atomtarget == 'help'):\n                    continue\n                sprint(\"Adding '{}'\".format(atomtarget), level='verbose')\n                data_dict = sakefile[target][atomtarget]\n                data_dict['parent'] = target\n                G.add_node(atomtarget, **data_dict)\n        else:\n            sprint(\"Adding '{}'\".format(target), level='verbose')\n            G.add_node(target, **sakefile[target])\n    sprint('Nodes are built\\nBuilding connections', level='verbose')\n    for node in G.nodes(data=True):\n        sprint('checking node {} for dependencies'.format(node[0]), level='verbose')\n        for (k, v) in node[1].items():\n            if (v is None):\n                node[1][k] = []\n        if ('output' in node[1]):\n            for (index, out) in enumerate(node[1]['output']):\n                node[1]['output'][index] = clean_path(node[1]['output'][index])\n        if ('dependencies' not in node[1]):\n            continue\n        sprint('it has dependencies', level='verbose')\n        connects = []\n        for (index, dep) in enumerate(node[1]['dependencies']):\n            dep = os.path.normpath(dep)\n            shrt = 'dependencies'\n            node[1]['dependencies'][index] = clean_path(node[1][shrt][index])\n    for node in G.nodes(data=True):\n        connects = []\n        if ('dependencies' not in node[1]):\n            continue\n        for dep in node[1]['dependencies']:\n            matches = check_for_dep_in_outputs(dep, verbose, G)\n            if (not matches):\n                continue\n            for match in matches:\n                sprint('Appending {} to matches'.format(match), level='verbose')\n                connects.append(match)\n        if connects:\n            for connect in connects:\n                G.add_edge(connect, node[0])\n    return G", "docstring": "Takes the sakefile dictionary and builds a NetworkX graph\n\nArgs:\nA dictionary that is the parsed Sakefile (from sake.py)\nThe settings dictionary\n\nReturns:\nA NetworkX graph", "source": "codesearchnet"}
{"code": "def should_drop(self):\n    if self._drop_if_none and self.value is None:\n        return True\n    if self._drop_if_default and self.value == self._default:\n        return True\n    return False", "docstring": "Return True if the item should be dropped, or False if it should not\nbe dropped. This depends on the drop_if_none, and drop_if_default calls.\n\nReturns:\nTrue or False; depending on whether the item should be dropped or kept.", "source": "github-repos"}
{"code": "def auto_convert_cell_no_flags(cell, units=None, parens_as_neg=True):\n    units = (units if (units != None) else {})\n    return auto_convert_cell(flagable=Flagable(), cell=cell, position=None, worksheet=0, flags={}, units=units, parens_as_neg=parens_as_neg)", "docstring": "Performs a first step conversion of the cell to check\nit's type or try to convert if a valid conversion exists.\nThis version of conversion doesn't flag changes nor store\ncell units.\n\nArgs:\nunits: The dictionary holder for cell units.\nparens_as_neg: Converts numerics surrounded by parens to\nnegative values", "source": "codesearchnet"}
{"code": "def enrich_json_objects_by_object_type(request, value):\n    \n    time_start_globally = time()\n    if isinstance(value, list):\n        json = [x.to_json() if hasattr(x, \"to_json\") else x for x in value]\n    else:\n        if isinstance(value, dict):\n            json = value\n        else:\n            json = value.to_json()\n    objects, nested = _collect_json_objects(json, by='object_type')\n    for enricher_info in _get_OBJECT_TYPE_ENRICHER_ORDER():\n        if len(enricher_info['object_types']) > 0:\n            enricher_objects = flatten([objects.get(object_type, []) for object_type in enricher_info['object_types']])\n            enricher_nested = any([nested.get(object_type, False) for object_type in enricher_info['object_types']])\n        else:\n            enricher_objects = flatten(objects.values())\n            enricher_nested = any(nested.values())\n        if len(enricher_objects) > 0:\n            time_start = time()\n            enricher_info['enricher'](request, enricher_objects, enricher_nested)\n            LOGGER.debug('enrichment \"{}\" took {} seconds'.format(enricher_info['enricher_name'], time() - time_start))\n            if not enricher_info['pure']:\n                \n                \n                objects, nested = _collect_json_objects(json, by='object_type')\n    LOGGER.debug('The whole enrichment of json objects by their object_type took {} seconds.'.format(time() - time_start_globally))\n    return json", "docstring": "Take the given value and start enrichment by object_type. The va\n\nArgs:\nrequest (django.http.request.HttpRequest): request which is currently processed\nvalue (dict|list|django.db.models.Model):\nin case of django.db.models.Model object (or list of these\nobjects), to_json method is invoked\n\nReturns:\ndict|list", "source": "juraj-google-style"}
{"code": "def handle_malformed_config(error: MalformedConfigError) -> ResponseReturnValue:\n    return (DQMResponse(name='MalformedConfigError', description=str(error), code=400), 400)", "docstring": "DQM Malformed Config Response.\n\nArgs:\n* error: Config error\n\nReturns:\n* DQMResponse for the error with a 400 status code", "source": "github-repos"}
{"code": "def start_range(self, line, membership):\n    last = self._transitions[-1] if self._transitions else -1\n    if line < last:\n        raise ValueError('Line number less than previous start_range() call.')\n    previous = len(self._transitions) % 2 == 1\n    if membership == previous:\n        return\n    elif line == last:\n        self._transitions.pop()\n    else:\n        self._transitions.append(line)", "docstring": "Start a range of lines that are either included/excluded from the set.\n\nArgs:\nline: A line number.\nmembership: If True, lines >= line are included in the set (starting a\nrange), otherwise they are excluded (ending a range).\n\nRaises:\nValueError: if line is less than that of a previous call to start_range().", "source": "github-repos"}
{"code": "def history(self, condition: Optional[Callable[['Origin'], bool]]=None) -> List['Origin']:\n    condition = condition or (lambda o: True)\n    current = self\n    history = []\n    while current is not None:\n        if condition(current):\n            history.append(current)\n        current = getattr(current.source, 'sym_origin', None)\n    history.reverse()\n    return history", "docstring": "Returns a history of origins with an optional filter.\n\nArgs:\ncondition: An optional callable object with signature\n(origin) -> should_list. If None, all origins will be listed.\n\nReturns:\nA list of filtered origin from the earliest (root) to the most recent.", "source": "github-repos"}
{"code": "def connect(self, db_uri, debug=False):\n    kwargs = {'echo': debug, 'convert_unicode': True}\n    if ('mysql' in db_uri):\n        kwargs['pool_recycle'] = 3600\n    elif (':\n        logger.debug('detected sqlite path URI: {}'.format(db_uri))\n        db_path = os.path.abspath(os.path.expanduser(db_uri))\n        db_uri = 'sqlite:\n    self.engine = create_engine(db_uri, **kwargs)\n    logger.debug('connection established successfully')\n    BASE.metadata.bind = self.engine\n    self.session = scoped_session(sessionmaker(bind=self.engine))\n    self.query = self.session.query\n    return self", "docstring": "Configure connection to a SQL database.\n\nArgs:\ndb_uri (str): path/URI to the database to connect to\ndebug (Optional[bool]): whether to output logging information", "source": "codesearchnet"}
{"code": "def write_config(config, config_path=CONFIG_PATH):\n    \n    if not os.path.exists(config_path):\n        os.makedirs(os.path.dirname(config_path))\n    with open(config_path, 'w', encoding='utf-8') as f:\n        config.write(f)", "docstring": "Write the config to the output path.\nCreates the necessary directories if they aren't there.\n\nArgs:\nconfig (configparser.ConfigParser): A ConfigParser.", "source": "juraj-google-style"}
{"code": "def cylindrical_vert(script, radius=1.0, inside=True):\n    if inside:\n        function = 'sqrt(x^2+y^2)<={}'.format(radius)\n    else:\n        function = 'sqrt(x^2+y^2)>={}'.format(radius)\n    vert_function(script, function=function)\n    return None", "docstring": "Select all vertices within a cylindrical radius\n\nArgs:\nradius (float): radius of the sphere\ncenter_pt (3 coordinate tuple or list): center point of the sphere\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "codesearchnet"}
{"code": "def GetFileEntryByPathSpec(self, path_spec):\n    \n    return encoded_stream_file_entry.EncodedStreamFileEntry(\n        self._resolver_context, self, path_spec, is_root=True, is_virtual=True)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\nEncodedStreamFileEntry: a file entry or None if not available.", "source": "juraj-google-style"}
{"code": "def __call__(self, *x_batch, **kwargs) -> Union[List, np.ndarray]:\n        \n        with self.graph.as_default():\n            K.set_session(self.sess)\n            return self._net.predict_on_batch(x_batch, **kwargs)", "docstring": "Predicts answers on batch elements.\n\nArgs:\ninstance: a batch to predict answers on", "source": "juraj-google-style"}
{"code": "def json_to_entity(tc_data, value_fields, resource_type, resource_type_parent):\n    if (not isinstance(tc_data, list)):\n        tc_data = [tc_data]\n    entity_array = []\n    for d in tc_data:\n        entity = {'id': d.get('id'), 'webLink': d.get('webLink')}\n        values = []\n        if ('summary' in d):\n            values.append(d.get('summary'))\n        else:\n            for field in value_fields:\n                if (d.get(field) is not None):\n                    values.append(d.get(field))\n        entity['value'] = ' : '.join(values)\n        if (d.get('type') is not None):\n            entity['type'] = d.get('type')\n        else:\n            entity['type'] = resource_type\n        if (resource_type_parent in ['Indicator']):\n            entity['confidence'] = d.get('confidence')\n            entity['rating'] = d.get('rating')\n            entity['threatAssessConfidence'] = d.get('threatAssessConfidence')\n            entity['threatAssessRating'] = d.get('threatAssessRating')\n            entity['dateLastModified'] = d.get('lastModified')\n        if (resource_type_parent in ['Indicator', 'Group']):\n            if ('owner' in d):\n                entity['ownerName'] = d['owner']['name']\n            else:\n                entity['ownerName'] = d.get('ownerName')\n            entity['dateAdded'] = d.get('dateAdded')\n        if (resource_type_parent in ['Victim']):\n            entity['ownerName'] = d.get('org')\n        entity_array.append(entity)\n    return entity_array", "docstring": "Convert ThreatConnect JSON response to a TCEntityArray.\n\n.. Attention:: This method is subject to frequent changes.\n\nArgs:\ntc_data (dictionary): Array of data returned from TC API call.\nvalue_fields (list): Field names that contain the \"value\" data.\nresource_type (string): The resource type of the tc_data provided.\nresource_type_parent (string): The resource parent type of the tc_data provided.\n\nReturns:\n(list): A list representing a TCEntityArray.", "source": "codesearchnet"}
{"code": "def model_call_event(self) -> asyncio.Event:\n    return self._model_call_event", "docstring": "Returns an event that is set when the wrapped processor has all parts.\n\nThe event is set when the wrapped processor has all the input parts and\nis about to start generating the output.\n\nThe event starts in a cleared state when the first part of the input\nstream is yielded. It is also cleared at the end of the wrappedprocessor,\nwhen all the output parts have been yielded.\n\nIts default value is unset and this event is set only for a short time\nduring the call.\n\nReturns:\nAn event that is set when the model call is started, that is when all the\ninput parts have been sent to the wrapped processor.", "source": "github-repos"}
{"code": "def determine_git_ref(self, config):\n    ref_config_keys = 0\n    for i in ['commit', 'tag', 'branch']:\n        if config.get(i):\n            ref_config_keys += 1\n    if (ref_config_keys > 1):\n        raise ImportError(\"Fetching remote git sources failed: conflicting revisions (e.g. 'commit', 'tag', 'branch') specified for a package source\")\n    if config.get('commit'):\n        ref = config['commit']\n    elif config.get('tag'):\n        ref = config['tag']\n    else:\n        ref = self.git_ls_remote(config['uri'], self.determine_git_ls_remote_ref(config))\n    if ((sys.version_info[0] > 2) and isinstance(ref, bytes)):\n        return ref.decode()\n    return ref", "docstring": "Determine the ref to be used for 'git checkout'.\n\nArgs:\nconfig (dict): git config dictionary\n\nReturns:\nstr: A commit id or tag name", "source": "codesearchnet"}
{"code": "def get_ip_reports(self, ips):\n    api_name = 'virustotal-ip-address-reports'\n    (all_responses, ips) = self._bulk_cache_lookup(api_name, ips)\n    responses = self._request_reports('ip', ips, 'ip-address/report')\n    for (ip, response) in zip(ips, responses):\n        if self._cache:\n            self._cache.cache_value(api_name, ip, response)\n        all_responses[ip] = response\n    return all_responses", "docstring": "Retrieves the most recent VT info for a set of ips.\n\nArgs:\nips: list of IPs.\nReturns:\nA dict with the IP as key and the VT report as value.", "source": "codesearchnet"}
{"code": "def get_alignment_df(a_aln_seq, b_aln_seq, a_seq_id=None, b_seq_id=None):\n    if (len(a_aln_seq) != len(b_aln_seq)):\n        raise ValueError('Sequence lengths not equal - was an alignment run?')\n    if (not a_seq_id):\n        a_seq_id = 'a_seq'\n    if (not b_seq_id):\n        b_seq_id = 'b_seq'\n    a_aln_seq = ssbio.protein.sequence.utils.cast_to_str(a_aln_seq)\n    b_aln_seq = ssbio.protein.sequence.utils.cast_to_str(b_aln_seq)\n    a_idx = 1\n    b_idx = 1\n    appender = []\n    for (i, (a, b)) in enumerate(zip(a_aln_seq, b_aln_seq)):\n        to_append = {}\n        if ((a == b) and (a != '-') and (b != '-')):\n            aa_flag = 'match'\n        elif ((a != b) and (a == '-') and (b != '-')):\n            aa_flag = 'insertion'\n        elif ((a != b) and (a != '-') and (b == '-')):\n            aa_flag = 'deletion'\n        elif ((a != b) and (a != '-') and (b == 'X')):\n            aa_flag = 'unresolved'\n        elif ((a != b) and (b != '-') and (a == 'X')):\n            aa_flag = 'unresolved'\n        elif ((a != b) and (a != '-') and (b != '-')):\n            aa_flag = 'mutation'\n        to_append['id_a'] = a_seq_id\n        to_append['id_b'] = b_seq_id\n        to_append['type'] = aa_flag\n        if ((aa_flag == 'match') or (aa_flag == 'unresolved') or (aa_flag == 'mutation')):\n            to_append['id_a_aa'] = a\n            to_append['id_a_pos'] = int(a_idx)\n            to_append['id_b_aa'] = b\n            to_append['id_b_pos'] = int(b_idx)\n            a_idx += 1\n            b_idx += 1\n        if (aa_flag == 'deletion'):\n            to_append['id_a_aa'] = a\n            to_append['id_a_pos'] = int(a_idx)\n            a_idx += 1\n        if (aa_flag == 'insertion'):\n            to_append['id_b_aa'] = b\n            to_append['id_b_pos'] = int(b_idx)\n            b_idx += 1\n        appender.append(to_append)\n    cols = ['id_a', 'id_b', 'type', 'id_a_aa', 'id_a_pos', 'id_b_aa', 'id_b_pos']\n    alignment_df = pd.DataFrame.from_records(appender, columns=cols)\n    alignment_df = alignment_df.fillna(value=np.nan)\n    return alignment_df", "docstring": "Summarize two alignment strings in a dataframe.\n\nArgs:\na_aln_seq (str): Aligned sequence string\nb_aln_seq (str): Aligned sequence string\na_seq_id (str): Optional ID of a_seq\nb_seq_id (str): Optional ID of b_aln_seq\n\nReturns:\nDataFrame: a per-residue level annotation of the alignment", "source": "codesearchnet"}
{"code": "def find_gaps(self, index=False):\n        \n        return self.__find_incongruities(op=operator.lt, index=index)", "docstring": "Finds gaps in a striplog.\n\nArgs:\nindex (bool): If True, returns indices of intervals with\ngaps after them.\n\nReturns:\nStriplog: A striplog of all the gaps. A sort of anti-striplog.", "source": "juraj-google-style"}
{"code": "def write(data, file_name, worksheet_names=None):\n    if re.search(XML_EXT_REGEX, file_name):\n        return write_xml(data, file_name, worksheet_names=worksheet_names)\n    elif re.search(XLSX_EXT_REGEX, file_name):\n        return write_xlsx(data, file_name, worksheet_names=worksheet_names)\n    elif re.search(XLS_EXT_REGEX, file_name):\n        return write_xls(data, file_name, worksheet_names=worksheet_names)\n    elif re.search(CSV_EXT_REGEX, file_name):\n        return write_csv(data, file_name)\n    else:\n        return write_csv(data, file_name)", "docstring": "Writes 2D tables to file.\n\nArgs:\ndata: 2D list of tables/worksheets.\nfile_name: Name of the output file (determines type).\nworksheet_names: A list of worksheet names (optional).", "source": "codesearchnet"}
{"code": "def _terminate_all(self, sig=None):\n    sig = sig or getattr(signal, 'SIGKILL', signal.SIGTERM)\n    for (task_type, task_id), p in self._processes.items():\n        if p.exitcode is not None:\n            logging.info('%s-%d has already exited. Not terminating.', task_type, task_id)\n            continue\n        try:\n            os.kill(p.pid, sig)\n            self._terminated.add((task_type, task_id))\n            logging.info('%s-%d terminated with signal %r.', task_type, task_id, sig)\n        except ProcessLookupError:\n            logging.info('Attempting to kill %s-%d but it does not exist.', task_type, task_id)", "docstring": "Terminates all subprocesses.\n\nThe caller is required to hold self._process_lock.\n\nArgs:\nsig: the signal used to terminate the process. The default is SIGKILL.", "source": "github-repos"}
{"code": "def poll_stack(self):\n        \n        logging.info('polling stack status, POLL_INTERVAL={}'.format(POLL_INTERVAL))\n        time.sleep(POLL_INTERVAL)\n        completed_states = [\n            'CREATE_COMPLETE',\n            'UPDATE_COMPLETE',\n            'DELETE_COMPLETE'\n        ]\n        stack_name = self._config.get('environment', {}).get('stack_name', None)\n        while True:\n            try:\n                response = self._cloudFormation.describe_stacks(StackName=stack_name)\n                stack = response['Stacks'][0]\n                current_status = stack['StackStatus']\n                logging.info('current status of {}: {}'.format(stack_name, current_status))\n                if current_status.endswith('COMPLETE') or current_status.endswith('FAILED'):\n                    if current_status in completed_states:\n                        return True\n                    else:\n                        return False\n\n                time.sleep(POLL_INTERVAL)\n            except ClientError as wtf:\n                if str(wtf).find('does not exist') == -1:\n                    logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))\n                    traceback.print_exc(file=sys.stdout)\n                    return False\n                else:\n                    logging.info('{} is gone'.format(stack_name))\n                    return True\n            except Exception as wtf:\n                logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))\n                traceback.print_exc(file=sys.stdout)\n                return False", "docstring": "Spin in a loop while the Cloud Formation process either fails or succeeds\n\nArgs:\nNone\n\nReturns:\nGood or bad; True or False", "source": "juraj-google-style"}
{"code": "def _GetClientIdFromQueue(q):\n  \n  split = q.Split()\n  if not split or len(split) < 2:\n    return None\n\n  \n  split = [s.lower() for s in split]\n\n  str_client_id, tasks_marker = split\n\n  if not str_client_id.startswith(\"c.\") or tasks_marker != \"tasks\":\n    return None\n\n  \n  str_client_id = \"C\" + str_client_id[1:]\n\n  return str_client_id", "docstring": "Returns q's client id, if q is a client task queue, otherwise None.\n\nArgs:\nq: rdfvalue.RDFURN\n\nReturns:\nstring or None", "source": "juraj-google-style"}
{"code": "def _ReadAttributeValueString(self, attribute_values_data, record_offset, attribute_values_data_offset, attribute_value_offset):\n    if (attribute_value_offset == 0):\n        return None\n    data_type_map = self._GetDataTypeMap('keychain_string')\n    file_offset = ((record_offset + attribute_values_data_offset) + attribute_value_offset)\n    attribute_value_offset -= (attribute_values_data_offset + 1)\n    attribute_value_data = attribute_values_data[attribute_value_offset:]\n    try:\n        string_attribute_value = self._ReadStructureFromByteStream(attribute_value_data, file_offset, data_type_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to map string attribute value data at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))\n    return string_attribute_value.string", "docstring": "Reads a string attribute value.\n\nArgs:\nattribute_values_data (bytes): attribute values data.\nrecord_offset (int): offset of the record relative to the start of\nthe file.\nattribute_values_data_offset (int): offset of the attribute values data\nrelative to the start of the record.\nattribute_value_offset (int): offset of the attribute relative to\nthe start of the record.\n\nReturns:\nstr: string value or None if attribute value offset is not set.\n\nRaises:\nParseError: if the attribute value cannot be read.", "source": "codesearchnet"}
{"code": "def get_occupation(self, atom_index, orbital):\n    orbital_index = self.orbitals.index(orbital)\n    return {spin: np.sum((d[(:, :, atom_index, orbital_index)] * self.weights[(:, None)])) for (spin, d) in self.data.items()}", "docstring": "Returns the occupation for a particular orbital of a particular atom.\n\nArgs:\natom_num (int): Index of atom in the PROCAR. It should be noted\nthat VASP uses 1-based indexing for atoms, but this is\nconverted to 0-based indexing in this parser to be\nconsistent with representation of structures in pymatgen.\norbital (str): An orbital. If it is a single character, e.g., s,\np, d or f, the sum of all s-type, p-type, d-type or f-type\norbitals occupations are returned respectively. If it is a\nspecific orbital, e.g., px, dxy, etc., only the occupation\nof that orbital is returned.\n\nReturns:\nSum occupation of orbital of atom.", "source": "codesearchnet"}
{"code": "def getUserCaPath(self, name):\n    cert = self.getUserCert(name)\n    if (cert is None):\n        return None\n    return self._getCaPath(cert)", "docstring": "Gets the path to the CA certificate that issued a given user keypair.\n\nArgs:\nname (str): The name of the user keypair.\n\nExamples:\nGet the path to the CA cert which issue the cert for \"myuser\":\n\nmypath = cdir.getUserCaPath('myuser')\n\nReturns:\nstr: The path if exists.", "source": "codesearchnet"}
{"code": "def _configure_tls_parameters(parameters):\n    \n    cert = config.conf[\"tls\"][\"certfile\"]\n    key = config.conf[\"tls\"][\"keyfile\"]\n    if cert and key:\n        _log.info(\n            \"Authenticating with server using x509 (certfile: %s, keyfile: %s)\",\n            cert,\n            key,\n        )\n        parameters.credentials = pika.credentials.ExternalCredentials()\n    else:\n        cert, key = None, None\n\n    if SSLOptions is None:\n        parameters.ssl = True\n        parameters.ssl_options = {\n            \"keyfile\": key,\n            \"certfile\": cert,\n            \"ca_certs\": config.conf[\"tls\"][\"ca_cert\"],\n            \"cert_reqs\": ssl.CERT_REQUIRED,\n            \"ssl_version\": ssl.PROTOCOL_TLSv1_2,\n        }\n    else:\n        ssl_context = ssl.create_default_context()\n        if config.conf[\"tls\"][\"ca_cert\"]:\n            try:\n                ssl_context.load_verify_locations(cafile=config.conf[\"tls\"][\"ca_cert\"])\n            except ssl.SSLError as e:\n                raise ConfigurationException(\n                    'The \"ca_cert\" setting in the \"tls\" section is invalid ({})'.format(\n                        e\n                    )\n                )\n        ssl_context.options |= ssl.OP_NO_SSLv2\n        ssl_context.options |= ssl.OP_NO_SSLv3\n        ssl_context.options |= ssl.OP_NO_TLSv1\n        ssl_context.options |= ssl.OP_NO_TLSv1_1\n        ssl_context.verify_mode = ssl.CERT_REQUIRED\n        ssl_context.check_hostname = True\n        if cert and key:\n            try:\n                ssl_context.load_cert_chain(cert, key)\n            except ssl.SSLError as e:\n                raise ConfigurationException(\n                    'The \"keyfile\" setting in the \"tls\" section is invalid ({})'.format(\n                        e\n                    )\n                )\n        parameters.ssl_options = SSLOptions(\n            ssl_context, server_hostname=parameters.host\n        )", "docstring": "Configure the pika connection parameters for TLS based on the configuration.\n\nThis modifies the object provided to it. This accounts for whether or not\nthe new API based on the standard library's SSLContext is available for\npika.\n\nArgs:\nparameters (pika.ConnectionParameters): The connection parameters to apply\nTLS connection settings to.", "source": "juraj-google-style"}
{"code": "def new(self, injection_site_fn):\n    return _InjectionContext(injection_site_fn, binding_stack=[], scope_id=scoping.UNSCOPED, is_scope_usable_from_scope_fn=self._is_scope_usable_from_scope_fn)", "docstring": "Creates a _InjectionContext.\n\nArgs:\ninjection_site_fn: the initial function being injected into\nReturns:\na new empty _InjectionContext in the default scope", "source": "codesearchnet"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    values_dict = {}\n\n    if registry_key.number_of_values > 0:\n      for registry_value in registry_key.GetValues():\n        value_name = registry_value.name or '(default)'\n\n        if registry_value.DataIsString():\n          value_string = '[{0:s}] {1:s}'.format(\n              registry_value.data_type_string, registry_value.GetDataAsObject())\n\n        elif registry_value.DataIsInteger():\n          value_string = '[{0:s}] {1:d}'.format(\n              registry_value.data_type_string, registry_value.GetDataAsObject())\n\n        elif registry_value.DataIsMultiString():\n          value_string = '[{0:s}] {1:s}'.format(\n              registry_value.data_type_string, ''.join(\n                  registry_value.GetDataAsObject()))\n\n        else:\n          value_string = '[{0:s}]'.format(registry_value.data_type_string)\n\n        values_dict[value_name] = value_string\n\n    \n    event_data = windows_events.WindowsRegistryEventData()\n    event_data.key_path = registry_key.path\n    event_data.offset = registry_key.offset\n    event_data.regvalue = values_dict\n    event_data.urls = self.URLS\n\n    event = time_events.DateTimeValuesEvent(\n        registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    if registry_key.number_of_subkeys == 0:\n      error_string = 'Key: {0:s} missing subkeys.'.format(registry_key.path)\n      parser_mediator.ProduceExtractionWarning(error_string)\n      return\n\n    for zone_key in registry_key.GetSubkeys():\n      \n      \n      \n      path = '{0:s}\\\\{1:s}'.format(\n          registry_key.path, self._ZONE_NAMES[zone_key.name])\n\n      values_dict = {}\n\n      \n      \n      for value in zone_key.GetValues():\n        \n        if not value.name:\n          continue\n\n        if value.DataIsString():\n          value_string = value.GetDataAsObject()\n\n        elif value.DataIsInteger():\n          value_integer = value.GetDataAsObject()\n          if value.name in self._KNOWN_PERMISSIONS_VALUE_NAMES:\n            value_string = self._CONTROL_VALUES_PERMISSIONS.get(\n                value_integer, 'UNKNOWN')\n          elif value.name == '1A00':\n            value_string = self._CONTROL_VALUES_1A00.get(\n                value_integer, 'UNKNOWN')\n          elif value.name == '1C00':\n            value_string = self._CONTROL_VALUES_1C00.get(\n                value_integer, 'UNKNOWN')\n          elif value.name == '1E05':\n            value_string = self._CONTROL_VALUES_SAFETY.get(\n                value_integer, 'UNKNOWN')\n          else:\n            value_string = '{0:d}'.format(value_integer)\n\n        else:\n          value_string = '[{0:s}]'.format(value.data_type_string)\n\n        if len(value.name) == 4 and value.name != 'Icon':\n          value_description = self._FEATURE_CONTROLS.get(value.name, 'UNKNOWN')\n        else:\n          value_description = self._FEATURE_CONTROLS.get(value.name, '')\n\n        if value_description:\n          feature_control = '[{0:s}] {1:s}'.format(\n              value.name, value_description)\n        else:\n          feature_control = '[{0:s}]'.format(value.name)\n\n        values_dict[feature_control] = value_string\n\n      event_data = windows_events.WindowsRegistryEventData()\n      event_data.key_path = path\n      event_data.offset = zone_key.offset\n      event_data.regvalue = values_dict\n      event_data.urls = self.URLS\n\n      event = time_events.DateTimeValuesEvent(\n          zone_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def generate_support_dump(self, information, timeout=(- 1)):\n    uri = '{}/support-dumps'.format(self.data['uri'])\n    return self._helper.create(information, uri=uri, timeout=timeout)", "docstring": "Generates a support dump for the logical enclosure with the specified ID. A logical enclosure support dump\nincludes content for logical interconnects associated with that logical enclosure. By default, it also contains\nappliance support dump content.\n\nArgs:\ninformation (dict): Information to generate support dump.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Support dump.", "source": "codesearchnet"}
{"code": "def bind(self, extension: Extension) -> 'DictMentor':\n        \n        if not Extension.is_valid_extension(extension):\n            raise ValueError(\"Cannot bind extension due to missing interface requirements\")\n        self._extensions.append(extension)\n\n        return self", "docstring": "Add any predefined or custom extension.\n\nArgs:\nextension: Extension to add to the processor.\n\nReturns:\nThe DictMentor itself for chaining.", "source": "juraj-google-style"}
{"code": "def describe(self, version_name):\n    \n    version_yaml = yaml.safe_dump(self.get_version_details(version_name),\n                                  default_flow_style=False)\n    print(version_yaml)", "docstring": "Print information of a specified model.\n\nArgs:\nversion: the name of the version in short form, such as \"v1\".", "source": "juraj-google-style"}
{"code": "def split(s, posix=True):\n    if isinstance(s, six.binary_type):\n        s = s.decode('utf-8')\n    return shlex.split(s, posix=posix)", "docstring": "Split the string s using shell-like syntax.\n\nArgs:\ns (str): String to split\nposix (bool): Use posix split\n\nReturns:\nlist of str: List of string parts", "source": "codesearchnet"}
{"code": "def oxide_type(structure, relative_cutoff=1.1, return_nbonds=False):\n    \n\n    ox_obj = OxideType(structure, relative_cutoff)\n    if return_nbonds:\n        return ox_obj.oxide_type, ox_obj.nbonds\n    else:\n        return ox_obj.oxide_type", "docstring": "Determines if an oxide is a peroxide/superoxide/ozonide/normal oxide\n\nArgs:\nstructure (Structure): Input structure.\nrelative_cutoff (float): Relative_cutoff * act. cutoff stipulates the\nmax distance two O atoms must be from each other.\nreturn_nbonds (bool): Should number of bonds be requested?", "source": "juraj-google-style"}
{"code": "def while_loop(self, context, step_method):\n    logger.debug('starting')\n    context['whileCounter'] = 0\n    if ((self.stop is None) and (self.max is None)):\n        logger.error(f'while decorator missing both max and stop.')\n        raise PipelineDefinitionError('the while decorator must have either max or stop, or both. But not neither.')\n    error_on_max = context.get_formatted_as_type(self.error_on_max, out_type=bool)\n    sleep = context.get_formatted_as_type(self.sleep, out_type=float)\n    if (self.max is None):\n        max = None\n        logger.info(f'while decorator will loop until {self.stop} evaluates to True at {sleep}s intervals.')\n    else:\n        max = context.get_formatted_as_type(self.max, out_type=int)\n        if (max < 1):\n            logger.info(f'max {self.max} is {max}. while only runs when max > 0.')\n            logger.debug('done')\n            return\n        if (self.stop is None):\n            logger.info(f'while decorator will loop {max} times at {sleep}s intervals.')\n        else:\n            logger.info(f'while decorator will loop {max} times, or until {self.stop} evaluates to True at {sleep}s intervals.')\n    if (not poll.while_until_true(interval=sleep, max_attempts=max)(self.exec_iteration)(context=context, step_method=step_method)):\n        if error_on_max:\n            logger.error(f'exhausted {max} iterations of while loop, and errorOnMax is True.')\n            if (self.stop and max):\n                raise LoopMaxExhaustedError(f'while loop reached {max} and {self.stop} never evaluated to True.')\n            else:\n                raise LoopMaxExhaustedError(f'while loop reached {max}.')\n        elif (self.stop and max):\n            logger.info(f'while decorator looped {max} times, and {self.stop} never evaluated to True.')\n        logger.debug('while loop done')\n    else:\n        logger.info(f'while loop done, stop condition {self.stop} evaluated True.')\n    logger.debug('done')", "docstring": "Run step inside a while loop.\n\nArgs:\ncontext: (pypyr.context.Context) The pypyr context. This arg will\nmutate - after method execution will contain the new\nupdated context.\nstep_method: (method/function) This is the method/function that\nwill execute on every loop iteration. Signature is:\nfunction(context)", "source": "codesearchnet"}
{"code": "def generate_output(line='0', short=None, name=None, value=None, is_parent=False, colorize=True):\n    output = '{0}{1}{2}{3}{4}{5}{6}{7}\\n'.format((LINES['{0}{1}'.format(line, ('C' if colorize else ''))] if (line in LINES.keys()) else ''), (COLOR_DEPTH[line] if (colorize and (line in COLOR_DEPTH)) else ''), ANSI['b'], (short if (short is not None) else (name if (name is not None) else '')), ('' if ((name is None) or (short is None)) else ' ({0})'.format(name)), ('' if ((name is None) and (short is None)) else ': '), (ANSI['end'] if colorize else ''), ('' if is_parent else value))\n    return output", "docstring": "The function for formatting CLI output results.\n\nArgs:\nline (:obj:`str`): The line number (0-4). Determines indentation.\nDefaults to '0'.\nshort (:obj:`str`): The optional abbreviated name for a field.\nSee hr.py for values.\nname (:obj:`str`): The optional name for a field. See hr.py for values.\nvalue (:obj:`str`): The field data (required).\nis_parent (:obj:`bool`): Set to True if the field value has sub-items\n(dicts/lists). Defaults to False.\ncolorize (:obj:`bool`): Colorize the console output with ANSI colors.\nDefaults to True.\n\nReturns:\nstr: The generated output.", "source": "codesearchnet"}
{"code": "def run_using_threadpool(fn_to_execute, inputs, pool_size):\n    if not hasattr(threading.current_thread(), '_children'):\n        threading.current_thread()._children = weakref.WeakKeyDictionary()\n    pool = ThreadPool(min(pool_size, len(inputs)))\n    try:\n        old_level = logging.getLogger().level\n        return pool.map(fn_to_execute, inputs)\n    finally:\n        pool.terminate()\n        logging.getLogger().setLevel(old_level)", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\nRuns the given function on given inputs using a thread pool.\n\nArgs:\nfn_to_execute: Function to execute\ninputs: Inputs on which given function will be executed in parallel.\npool_size: Size of thread pool.\nReturns:\nResults retrieved after executing the given function on given inputs.", "source": "github-repos"}
{"code": "def _GetPlistRootKey(self, file_entry):\n    file_object = file_entry.GetFileObject()\n    try:\n        plist_file = plist.PlistFile()\n        plist_file.Read(file_object)\n    except IOError as exception:\n        location = getattr(file_entry.path_spec, 'location', '')\n        raise errors.PreProcessFail('Unable to read plist file: {0:s} with error: {1!s}'.format(location, exception))\n    finally:\n        file_object.close()\n    return plist_file.root_key", "docstring": "Retrieves the root key of a plist file.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry of the plist.\n\nReturns:\ndict[str, object]: plist root key.\n\nRaises:\nerrors.PreProcessFail: if the preprocessing fails.", "source": "codesearchnet"}
{"code": "def cli_print(msg, color='', end=None, file=sys.stdout, logger=_LOG):\n    if logger:\n        logger.debug('-> {}'.format(msg))\n    if CLI_QUIET:\n        return\n    if (end is None):\n        end = _linesep_for_file(file)\n    file.write('{color}{msg}{reset}{end}'.format(color=color, msg=msg, reset=colorama.Style.RESET_ALL, end=end))", "docstring": "Print the message to file and also log it.\n\nThis function is intended as a 'tee' mechanism to enable the CLI interface as\na first-class citizen, while ensuring that everything the operator sees also\nhas an analogous logging entry in the test record for later inspection.\n\nArgs:\nmsg: The message to print/log.\ncolor: Optional colorama color string to be applied to the message. You can\nconcatenate colorama color strings together in order to get any set of\neffects you want.\nend: A custom line-ending string to print instead of newline.\nfile: A file object to which the baracketed text will be written. Intended\nfor use with CLI output file objects like sys.stdout.\nlogger: A logger to use, or None to disable logging.", "source": "codesearchnet"}
{"code": "def get_uid(prefix=''):\n    object_name_uids = global_state.get_global_attribute('object_name_uids', default=collections.defaultdict(int), set_to_default=True)\n    object_name_uids[prefix] += 1\n    return object_name_uids[prefix]", "docstring": "Associates a string prefix with an integer counter.\n\nArgs:\nprefix: String prefix to index.\n\nReturns:\nUnique integer ID.\n\nExample:\n\n>>> get_uid('dense')\n1\n>>> get_uid('dense')\n2", "source": "github-repos"}
{"code": "def generate_hdate(date: str, subtract_year: str) -> str:\n    try:\n        input_date = datetime.datetime.strptime(date, '%Y-%m-%d')\n        if input_date.month == 2 and input_date.day == 29:\n            input_date = input_date - datetime.timedelta(days=1)\n        subtract_year = int(subtract_year)\n    except (ValueError, TypeError):\n        logger.error('Invalid input.')\n        raise\n    hdate = input_date - relativedelta(years=subtract_year)\n    return hdate.strftime('%Y-%m-%d')", "docstring": "Generate a historical date by subtracting a specified number of years from the given date.\nIf input date is leap day (Feb 29), return Feb 28 even if target hdate is also a leap year.\nThis is expected in ECMWF API.\n\nArgs:\ndate (str): The input date in the format 'YYYY-MM-DD'.\nsubtract_year (str): The number of years to subtract.\n\nReturns:\nstr: The historical date in the format 'YYYY-MM-DD'.", "source": "github-repos"}
{"code": "def add(self, value, date=None, return_value=False, key=None):\n        \n        data = {}\n        if self._metric_id is None:\n            self.tcex.handle_error(715, [self._metric_name])\n\n        body = {'value': value}\n        if date is not None:\n            body['date'] = self.tcex.utils.format_datetime(date, date_format='%Y-%m-%dT%H:%M:%SZ')\n        if key is not None:\n            body['name'] = key\n        self.tcex.log.debug('metric data: {}'.format(body))\n        params = {}\n        if return_value:\n            params = {'returnValue': 'true'}\n        url = '/v2/customMetrics/{}/data'.format(self._metric_id)\n        r = self.tcex.session.post(url, json=body, params=params)\n        if r.status_code == 200 and 'application/json' in r.headers.get('content-type', ''):\n            data = r.json()\n        elif r.status_code == 204:\n            pass\n        else:\n            self.tcex.handle_error(710, [r.status_code, r.text])\n\n        return data", "docstring": "Add metrics data to collection.\n\nArgs:\nvalue (str): The value of the metric.\ndate (str, optional): The optional date of the metric.\nreturn_value (bool, default:False): Tell the API to return the updates metric value.\nkey (str, optional): The key value for keyed metrics.\n\nReturn:\ndict: If return_value is True a dict with the current value for the time period\nis returned.", "source": "juraj-google-style"}
{"code": "def html_to_xhtml(html_unicode_string):\n    \n    try:\n        assert isinstance(html_unicode_string, basestring)\n    except AssertionError:\n        raise TypeError\n    root = BeautifulSoup(html_unicode_string, 'html.parser')\n    \n    try:\n        assert root.html is not None\n    except AssertionError:\n        raise ValueError(''.join(['html_unicode_string cannot be a fragment.',\n                         'string is the following: %s', unicode(root)]))\n    \n    root.html['xmlns'] = 'http:\n    unicode_string = unicode(root.prettify(encoding='utf-8', formatter='html'), encoding='utf-8')\n    \n    for tag in constants.SINGLETON_TAG_LIST:\n        unicode_string = unicode_string.replace(\n                '<' + tag + '/>',\n                '<' + tag + ' />')\n    return unicode_string", "docstring": "Converts html to xhtml\n\nArgs:\nhtml_unicode_string: A (possible unicode) string representing HTML.\n\nReturns:\nA (possibly unicode) string representing XHTML.\n\nRaises:\nTypeError: Raised if input_string isn't a unicode string or string.", "source": "juraj-google-style"}
{"code": "def cumprod(x, axis=0):\n    return math_ops.cumprod(x, axis=axis)", "docstring": "Cumulative product of the values in a tensor, alongside the specified axis.\n\nArgs:\nx: A tensor or variable.\naxis: An integer, the axis to compute the product.\n\nReturns:\nA tensor of the cumulative product of values of `x` along `axis`.", "source": "github-repos"}
{"code": "def revert_to(self):\n\n\t\t\n\n\t\t\n\t\tresponse = self.resource.repo.api.http_request('PATCH', self.uri)\n\n\t\t\n\t\tif response.status_code == 204:\n\t\t\tlogger.debug('reverting to previous version of resource, %s' % self.uri)\n\n\t\t\t\n\t\t\tself._current_resource.refresh()\n\n\t\telse:\n\t\t\traise Exception('HTTP %s, could not revert to resource version, %s' % (response.status_code, self.uri))", "docstring": "method to revert resource to this version by issuing PATCH\n\nArgs:\nNone\n\nReturns:\nNone: sends PATCH request, and refreshes parent resource", "source": "juraj-google-style"}
{"code": "def _zeros_slot(self, var, slot_name, op_name):\n    named_slots = self._slot_dict(slot_name)\n    if _var_key(var) not in named_slots:\n        new_slot_variable = slot_creator.create_zeros_slot(var, op_name, copy_xla_sharding=True)\n        self._restore_slot_variable(slot_name=slot_name, variable=var, slot_variable=new_slot_variable)\n        named_slots[_var_key(var)] = new_slot_variable\n    return named_slots[_var_key(var)]", "docstring": "Find or create a slot initialized with 0.0.\n\nArgs:\nvar: A `Variable` object.\nslot_name: Name for the slot.\nop_name: Name to use when scoping the Variable that\nneeds to be created for the slot.\n\nReturns:\nA `Variable` object.", "source": "github-repos"}
{"code": "def set(self, *args, **kwargs):\n        \n        if args:\n            for arg in args:\n                if arg is not None:\n                    for name in self.__slots__:\n                        self._set(name, getattr(arg, name, UNSET))\n        for name in kwargs:\n            self._set(name, kwargs.get(name, UNSET))", "docstring": "Conveniently set one or more fields at a time.\n\nArgs:\n*args: Optionally set from other objects, available fields from the passed object are used in order\n**kwargs: Set from given key/value pairs (only names defined in __slots__ are used)", "source": "juraj-google-style"}
{"code": "def asdim(dimension):\n    if isinstance(dimension, Dimension):\n        return dimension\n    elif isinstance(dimension, (tuple, dict, basestring)):\n        return Dimension(dimension)\n    else:\n        raise ValueError('%s type could not be interpreted as Dimension. Dimensions must be declared as a string, tuple, dictionary or Dimension type.')", "docstring": "Convert the input to a Dimension.\n\nArgs:\ndimension: tuple, dict or string type to convert to Dimension\n\nReturns:\nA Dimension object constructed from the dimension spec. No\ncopy is performed if the input is already a Dimension.", "source": "codesearchnet"}
{"code": "def ajax(cls, url, param={}, method='get'):\n    param = urllib.parse.urlencode(param)\n    if (method.lower() == 'get'):\n        req = urllib.request.Request(((url + '?') + param))\n    elif (method.lower() == 'post'):\n        param = param.encode('utf-8')\n        req = urllib.request.Request(url, data=param)\n    else:\n        raise Exception(\"invalid method '{}' (GET/POST)\".format(method))\n    rsp = urllib.request.urlopen(req)\n    if rsp:\n        rsp_json = rsp.read().decode('utf-8')\n        rsp_dict = json.loads(rsp_json)\n        return rsp_dict\n    return None", "docstring": "Get info by ajax\n\nArgs:\nurl: string\nReturns:\ndict: json decoded into a dict", "source": "codesearchnet"}
{"code": "def WMITimeStrToRDFDatetime(self, timestr):\n    offset_minutes = timestr[21:]\n    year = timestr[:4]\n    month = timestr[4:6]\n    day = timestr[6:8]\n    hours = timestr[8:10]\n    minutes = timestr[10:12]\n    seconds = timestr[12:14]\n    microseconds = timestr[15:21]\n    unix_seconds = calendar.timegm(tuple(map(int, [year, month, day, hours, minutes, seconds])))\n    unix_seconds -= (int(offset_minutes) * 60)\n    return rdfvalue.RDFDatetime(((unix_seconds * 1000000.0) + int(microseconds)))", "docstring": "Return RDFDatetime from string like 20140825162259.000000-420.\n\nArgs:\ntimestr: WMI time string\n\nReturns:\nrdfvalue.RDFDatetime\n\nWe have some timezone manipulation work to do here because the UTC offset is\nin minutes rather than +-HHMM", "source": "codesearchnet"}
{"code": "def copy_course_videos(source_course_id, destination_course_id):\n    if (source_course_id == destination_course_id):\n        return\n    course_videos = CourseVideo.objects.select_related('video', 'video_image').filter(course_id=six.text_type(source_course_id))\n    for course_video in course_videos:\n        (destination_course_video, __) = CourseVideo.objects.get_or_create(video=course_video.video, course_id=destination_course_id)\n        if hasattr(course_video, 'video_image'):\n            VideoImage.create_or_update(course_video=destination_course_video, file_name=course_video.video_image.image.name)", "docstring": "Adds the destination_course_id to the videos taken from the source_course_id\n\nArgs:\nsource_course_id: The original course_id\ndestination_course_id: The new course_id where the videos will be copied", "source": "codesearchnet"}
{"code": "def set_property(property_map, name, value, exclude_from_indexes=None):\n    set_value(property_map[name], value, exclude_from_indexes)", "docstring": "Set property value in the given datastore.Property proto message.\n\nArgs:\nproperty_map: a string->datastore.Value protobuf map.\nname: name of the property.\nvalue: python object or datastore.Value.\nexclude_from_indexes: if the value should be exclude from indexes. None\nleaves indexing as is (defaults to False if value is not a Value message).\n\nUsage:\n>>> set_property(property_proto, 'foo', u'a')\n\nRaises:\nTypeError: if the given value type is not supported.", "source": "codesearchnet"}
{"code": "def interpolate(features, hparams, decode_hp):\n  \n  inputs, targets = features[\"inputs\"], features[\"targets\"]\n  inputs = tf.unstack(inputs, axis=1)\n  targets = tf.unstack(targets, axis=1)\n  coeffs = np.linspace(0.0, 1.0, decode_hp.num_interp)\n\n  \n  first_frame, last_frame = inputs[0], targets[-1]\n  first_top_z, first_level_eps = frame_to_latents(first_frame, hparams)\n  last_top_z, last_level_eps = frame_to_latents(last_frame, hparams)\n\n  \n  first_lats = first_level_eps + [first_top_z]\n  last_lats = last_level_eps + [last_top_z]\n  interp_lats = []\n  lat_iterator = enumerate(zip(first_lats, last_lats))\n  for level_ind, (first_lat, last_lat) in lat_iterator:\n    if level_ind in decode_hp.level_interp:\n      if decode_hp.channel_interp == \"all\":\n        interp_lat = glow_ops.linear_interpolate(first_lat, last_lat, coeffs)\n      else:\n        interp_lat = glow_ops.linear_interpolate_rank(\n            first_lat, last_lat, coeffs, decode_hp.rank_interp)\n    else:\n      interp_lat = tf.tile(first_lat, [decode_hp.num_interp, 1, 1, 1])\n    interp_lats.append(interp_lat)\n\n  level_eps_interp = interp_lats[:hparams.n_levels-1]\n  z_top_interp = interp_lats[-1]\n  images = latents_to_frames(z_top_interp, level_eps_interp, hparams)\n  return images, first_frame, last_frame", "docstring": "Interpolate between the first input frame and last target frame.\n\nArgs:\nfeatures: dict of tensors\nhparams: HParams, training hparams.\ndecode_hp: HParams, decode hparams.\nReturns:\nimages: interpolated images, 4-D Tensor, shape=(num_interp, H, W, C)\nfirst_frame: image, 3-D Tensor, shape=(1, H, W, C)\nlast_frame: image, 3-D Tensor, shape=(1, H, W, C)", "source": "juraj-google-style"}
{"code": "def dbmax50years(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `dbmax50years`'.format(value))\n\n        self._dbmax50years = value", "docstring": "Corresponds to IDD Field `dbmax50years`\n50-year return period values for maximum extreme dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmax50years`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def __init__(self, rr, table='services'):\n        \n        self.rr = rr\n        self.table = table\n        self._ensure_table()", "docstring": "Initialize the service registry.\n\nCreates the database table if it does not exist.\n\nArgs:\nrr (doublethink.Rethinker): a doublethink.Rethinker, which must\nhave `dbname` set", "source": "juraj-google-style"}
{"code": "def PrepareMergeTaskStorage(self, task):\n    \n    if task.identifier not in self._task_storage_writers:\n      raise IOError('Storage writer for task: {0:s} does not exist.'.format(\n          task.identifier))", "docstring": "Prepares a task storage for merging.\n\nArgs:\ntask (Task): task.\n\nRaises:\nIOError: if the task storage does not exist.\nOSError: if the task storage does not exist.", "source": "juraj-google-style"}
{"code": "def dump_artifact(obj, path, filename=None):\n    p_sha1 = None\n    if (not os.path.exists(path)):\n        os.makedirs(path, mode=448)\n    else:\n        p_sha1 = hashlib.sha1()\n        p_sha1.update(obj.encode(encoding='UTF-8'))\n    if (filename is None):\n        (fd, fn) = tempfile.mkstemp(dir=path)\n    else:\n        fn = os.path.join(path, filename)\n    if os.path.exists(fn):\n        c_sha1 = hashlib.sha1()\n        with open(fn) as f:\n            contents = f.read()\n        c_sha1.update(contents.encode(encoding='UTF-8'))\n    if ((not os.path.exists(fn)) or (p_sha1.hexdigest() != c_sha1.hexdigest())):\n        lock_fp = os.path.join(path, '.artifact_write_lock')\n        lock_fd = os.open(lock_fp, (os.O_RDWR | os.O_CREAT), (stat.S_IRUSR | stat.S_IWUSR))\n        fcntl.lockf(lock_fd, fcntl.LOCK_EX)\n        try:\n            with open(fn, 'w') as f:\n                os.chmod(fn, stat.S_IRUSR)\n                f.write(str(obj))\n        finally:\n            fcntl.lockf(lock_fd, fcntl.LOCK_UN)\n            os.close(lock_fd)\n            os.remove(lock_fp)\n    return fn", "docstring": "Write the artifact to disk at the specified path\n\nArgs:\nobj (string): The string object to be dumped to disk in the specified\npath.  The artifact filename will be automatically created\n\npath (string): The full path to the artifacts data directory.\n\nfilename (string, optional): The name of file to write the artifact to.\nIf the filename is not provided, then one will be generated.\n\nReturns:\nstring: The full path filename for the artifact that was generated", "source": "codesearchnet"}
{"code": "def process(i: int, sentence: str, sep_indices: typing.Set[int], scale: int) -> str:\n    feature = get_feature(sentence[i - 3] if i > 2 else INVALID, sentence[i - 2] if i > 1 else INVALID, sentence[i - 1], sentence[i] if i < len(sentence) else INVALID, sentence[i + 1] if i + 1 < len(sentence) else INVALID, sentence[i + 2] if i + 2 < len(sentence) else INVALID)\n    positive = i in sep_indices\n    line = '\\t'.join(['%d' % scale if positive else '%d' % -scale] + feature)\n    return line", "docstring": "Outputs an encoded line of features from the given index.\n\nArgs:\ni (int): index\nsentence (str): A sentence\nsep_indices (typing.Set[int]): A set of separator indices.\nscale (int): A weight scale for the entries.", "source": "github-repos"}
{"code": "def plot_rb_data(xdata, ydatas, yavg, yerr, fit, survival_prob, ax=None, show_plt=True):\n    if (not HAS_MATPLOTLIB):\n        raise ImportError('The function plot_rb_data needs matplotlib. Run \"pip install matplotlib\" before.')\n    if (ax is None):\n        plt.figure()\n        ax = plt.gca()\n    for ydata in ydatas:\n        ax.plot(xdata, ydata, color='gray', linestyle='none', marker='x')\n    ax.errorbar(xdata, yavg, yerr=yerr, color='r', linestyle='--', linewidth=3)\n    ax.plot(xdata, survival_prob(xdata, *fit), color='blue', linestyle='-', linewidth=2)\n    ax.tick_params(labelsize=14)\n    ax.set_xlabel('Clifford Length', fontsize=16)\n    ax.set_ylabel('Z', fontsize=16)\n    ax.grid(True)\n    if show_plt:\n        plt.show()", "docstring": "Plot randomized benchmarking data.\n\nArgs:\nxdata (list): list of subsequence lengths\nydatas (list): list of lists of survival probabilities for each\nsequence\nyavg (list): mean of the survival probabilities at each sequence\nlength\nyerr (list): error of the survival\nfit (list): fit parameters\nsurvival_prob (callable): function that computes survival probability\nax (Axes or None): plot axis (if passed in)\nshow_plt (bool): display the plot.\n\nRaises:\nImportError: If matplotlib is not installed.", "source": "codesearchnet"}
{"code": "def update_all(cls, *criterion, **kwargs):\n    try:\n        r = cls.query.filter(*criterion).update(kwargs, 'fetch')\n        cls.session.commit()\n        return r\n    except:\n        cls.session.rollback()\n        raise", "docstring": "Batch method for updating all instances obeying the criterion\n\nArgs:\n*criterion: SQLAlchemy query criterion for filtering what\ninstances to update\n**kwargs: The parameters to be updated\n\nExamples:\n\n>>> User.update_all(active=True)\n\n>>> Customer.update_all(Customer.country=='India', active=True)\n\nThe second example sets active=True for all customers with\ncountry India.", "source": "codesearchnet"}
{"code": "def push_stack(stack, substack, op_id):\n    if ((substack is not None) and (not isinstance(substack, Stack))):\n        raise ValueError(('Substack should be type tangent.Stack or None, instead found %s' % type(substack)))\n    if __debug__:\n        stack.append((substack, op_id))\n    else:\n        stack.append(substack)", "docstring": "Proxy of push, where we know we're pushing a stack onto a stack.\n\nUsed when differentiating call trees,where sub-functions get their own stack.\nSee push() for more.\n\nArgs:\nstack: The stack object, which must support appending values.\nsubstack: The stack to append.\nop_id: A unique variable that is also passed into the corresponding pop.\nAllows optimization passes to track pairs of pushes and pops.\n\nRaises:\nValueError: If a non-stack value for `substack` is passed.", "source": "codesearchnet"}
{"code": "def input(self):\n    return self._get_node_attribute_at_index(0, 'input_tensors', 'input')", "docstring": "Retrieves the input tensor(s) of a symbolic operation.\n\nOnly returns the tensor(s) corresponding to the *first time*\nthe operation was called.\n\nReturns:\nInput tensor or list of input tensors.", "source": "github-repos"}
{"code": "def _show_status_for_work(self, work):\n    \n    work_count = len(work.work)\n    work_completed = {}\n    work_completed_count = 0\n    for v in itervalues(work.work):\n      if v['is_completed']:\n        work_completed_count += 1\n        worker_id = v['claimed_worker_id']\n        if worker_id not in work_completed:\n          work_completed[worker_id] = {\n              'completed_count': 0,\n              'last_update': 0.0,\n          }\n        work_completed[worker_id]['completed_count'] += 1\n        work_completed[worker_id]['last_update'] = max(\n            work_completed[worker_id]['last_update'],\n            v['claimed_worker_start_time'])\n    print('Completed {0}/{1} work'.format(work_completed_count,\n                                          work_count))\n    for k in sorted(iterkeys(work_completed)):\n      last_update_time = time.strftime(\n          '%Y-%m-%d %H:%M:%S',\n          time.localtime(work_completed[k]['last_update']))\n      print('Worker {0}: completed {1}   last claimed work at {2}'.format(\n          k, work_completed[k]['completed_count'], last_update_time))", "docstring": "Shows status for given work pieces.\n\nArgs:\nwork: instance of either AttackWorkPieces or DefenseWorkPieces", "source": "juraj-google-style"}
{"code": "def _convert_values_to_tf_tensors(sample: rd.RepresentativeSample) -> Mapping[str, core.Tensor]:\n    tensor_mapping = {}\n    for name, tensorlike_value in sample.items():\n        if isinstance(tensorlike_value, core.Tensor):\n            tensor_value = tensorlike_value\n        else:\n            tensor_value = tensor_conversion.convert_to_tensor_v2_with_dispatch(tensorlike_value)\n        tensor_mapping[name] = tensor_value\n    return tensor_mapping", "docstring": "Converts TensorLike values of `sample` to Tensors.\n\nCreates a copy of `sample`, where each value is converted to Tensors\nunless it is already a Tensor.\nThe values are not converted in-place (i.e. `sample` is not mutated).\n\nArgs:\nsample: A representative sample, which is a map of {name -> tensorlike\nvalue}.\n\nReturns:\nConverted map of {name -> tensor}.", "source": "github-repos"}
{"code": "def from_config(cls, config):\n    return cls(**config)", "docstring": "Creates a regularizer from its config.\n\nThis method is the reverse of `get_config`,\ncapable of instantiating the same regularizer from the config\ndictionary.\n\nThis method is used by saving and loading models to HDF5 formats,\nKeras model cloning, some visualization utilities,\nand exporting models to and from JSON.\n\nArgs:\nconfig: A Python dictionary, typically the output of get_config.\n\nReturns:\nA regularizer instance.", "source": "github-repos"}
{"code": "def _get_pdf_filenames_at(source_directory):\n    \n    if not os.path.isdir(source_directory):\n        raise ValueError(\"%s is not a directory!\" % source_directory)\n    return [os.path.join(source_directory, filename)\n            for filename in os.listdir(source_directory)\n            if filename.endswith(PDF_EXTENSION)]", "docstring": "Find all PDF files in the specified directory.\n\nArgs:\nsource_directory (str): The source directory.\n\nReturns:\nlist(str): Filepaths to all PDF files in the specified directory.\n\nRaises:\nValueError", "source": "juraj-google-style"}
{"code": "def get_user_shakes(self):\n    endpoint = '/api/shakes'\n    data = self._make_request(verb='GET', endpoint=endpoint)\n    shakes = [Shake.NewFromJSON(shk) for shk in data['shakes']]\n    return shakes", "docstring": "Get a list of Shake objects for the currently authenticated user.\n\nReturns:\nA list of Shake objects.", "source": "codesearchnet"}
{"code": "def download_sifts_xml(pdb_id, outdir='', force_rerun=False):\n    \n    baseURL = 'ftp:\n    filename = '{}.xml.gz'.format(pdb_id.lower())\n\n    outfile = op.join(outdir, filename.split('.')[0] + '.sifts.xml')\n\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n        response = urlopen(baseURL + filename)\n        with open(outfile, 'wb') as f:\n            f.write(gzip.decompress(response.read()))\n\n    return outfile", "docstring": "Download the SIFTS file for a PDB ID.\n\nArgs:\npdb_id (str): PDB ID\noutdir (str): Output directory, current working directory if not specified.\nforce_rerun (bool): If the file should be downloaded again even if it exists\n\nReturns:\nstr: Path to downloaded file", "source": "juraj-google-style"}
{"code": "def Open(self, path_spec):\n    \n    self._file_system = resolver.Resolver.OpenFileSystem(path_spec)\n    if self._file_system is None:\n      raise errors.VolumeSystemError('Unable to resolve path specification.')\n\n    type_indicator = self._file_system.type_indicator\n    if type_indicator != definitions.TYPE_INDICATOR_TSK_PARTITION:\n      raise errors.VolumeSystemError('Unsupported type indicator.')", "docstring": "Opens a volume defined by path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nRaises:\nVolumeSystemError: if the TSK partition virtual file system could not\nbe resolved.", "source": "juraj-google-style"}
{"code": "def __mul__(self, right: torch.Tensor) -> Rotation:\n    if not isinstance(right, torch.Tensor):\n        raise TypeError('The other multiplicand must be a Tensor')\n    if self._rot_mats is not None:\n        rot_mats = self._rot_mats * right[..., None, None]\n        return Rotation(rot_mats=rot_mats, quats=None)\n    elif self._quats is not None:\n        quats = self._quats * right[..., None]\n        return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n    else:\n        raise ValueError('Both rotations are None')", "docstring": "Pointwise left multiplication of the rotation with a tensor. Can be used to e.g. mask the Rotation.\n\nArgs:\nright:\nThe tensor multiplicand\nReturns:\nThe product", "source": "github-repos"}
{"code": "def mcast_ip(ip_addr, return_tuple=True):\n    \n    regex_mcast_ip = __re.compile(\"^(((2[2-3][4-9])|(23[0-3]))\\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9])))$\")\n    if return_tuple:\n        while not regex_mcast_ip.match(ip_addr):\n            print(\"Not a good multicast IP.\")\n            print(\"Please try again.\")\n            ip_addr = input(\"Please enter a multicast IP address in the following format x.x.x.x: \")\n        return ip_addr\n    elif not return_tuple:\n        if not regex_mcast_ip.match(ip_addr):\n            return False\n        else:\n            return True", "docstring": "Function to check if a address is multicast\nArgs:\nip_addr: Multicast IP address in the following format 239.1.1.1\nreturn_tuple: Set to True it returns a IP, set to False returns True or False\n\nReturns: see return_tuple for return options", "source": "juraj-google-style"}
{"code": "def execute(self, triple_map, output, **kwargs):\n    subjects = []\n    logical_src_iterator = str(triple_map.logicalSource.iterator)\n    json_object = kwargs.get('obj', self.source)\n    if (logical_src_iterator == '.'):\n        results = [None]\n    else:\n        json_path_exp = jsonpath_ng.parse(logical_src_iterator)\n        results = [r.value for r in json_path_exp.find(json_object)][0]\n    for row in results:\n        subject = self.generate_term(term_map=triple_map.subjectMap, **kwargs)\n        for pred_obj_map in triple_map.predicateObjectMap:\n            predicate = pred_obj_map.predicate\n            if (pred_obj_map.template is not None):\n                output.add((subject, predicate, self.generate_term(term_map=pred_obj_map, **kwargs)))\n            if (pred_obj_map.parentTriplesMap is not None):\n                self.__handle_parents__(output, parent_map=pred_obj_map.parentTriplesMap, subject=subject, predicate=predicate, obj=row, **kwargs)\n            if (pred_obj_map.reference is not None):\n                ref_exp = jsonpath_ng.parse(str(pred_obj_map.reference))\n                found_objects = [r.value for r in ref_exp.find(row)]\n                for obj in found_objects:\n                    if rdflib.term._is_valid_uri(obj):\n                        rdf_obj = rdflib.URIRef(str(obj))\n                    else:\n                        rdf_obj = rdflib.Literal(str(obj))\n                    output.add((subject, predicate, rdf_obj))\n            if (pred_obj_map.constant is not None):\n                output.add((subject, predicate, pred_obj_map.constant))\n        subjects.append(subject)\n    return subjects", "docstring": "Method executes mapping between JSON source and\noutput RDF\n\nArgs:\n\n-----\ntriple_map: SimpleNamespace", "source": "codesearchnet"}
{"code": "def is_variable_initialized(ref, name=None):\n    if ref.dtype._is_ref_dtype:\n        return gen_state_ops.is_variable_initialized(ref=ref, name=name)\n    return ref.is_initialized(name=name)", "docstring": "Checks whether a tensor has been initialized.\n\nOutputs boolean scalar indicating whether the tensor has been initialized.\n\nArgs:\nref: A mutable `Tensor`.\nShould be from a `Variable` node. May be uninitialized.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` of type `bool`.", "source": "github-repos"}
{"code": "def install_local(self):\n    folder = self._get_local_folder()\n    installed = self.installed_dir()\n    self._check_module(installed.parent)\n    installed.symlink_to(folder.resolve())", "docstring": "Make a symlink in install folder to a local NApp.\n\nRaises:\nFileNotFoundError: If NApp is not found.", "source": "codesearchnet"}
{"code": "def monitorTUN(self):\n    packet = self.checkTUN()\n    if packet:\n        try:\n            ret = self._faraday.send(packet)\n            return ret\n        except AttributeError as error:\n            print('AttributeError')", "docstring": "Monitors the TUN adapter and sends data over serial port.\n\nReturns:\nret: Number of bytes sent over serial port", "source": "codesearchnet"}
{"code": "def detect_arbitrary_send(self, contract):\n        \n        ret = []\n        for f in [f for f in contract.functions if f.contract == contract]:\n            nodes = self.arbitrary_send(f)\n            if nodes:\n                ret.append((f, nodes))\n        return ret", "docstring": "Detect arbitrary send\nArgs:\ncontract (Contract)\nReturns:\nlist((Function), (list (Node)))", "source": "juraj-google-style"}
{"code": "def write_schema_to_file(cls, schema, file_pointer=stdout, folder=MISSING, context=DEFAULT_DICT):\n    schema = cls._get_schema(schema)\n    json_schema = cls.generate_json_schema(schema, context=context)\n    if folder:\n        schema_filename = getattr(schema.Meta, 'json_schema_filename', '.'.join([schema.__class__.__name__, 'json']))\n        json_path = os.path.join(folder, schema_filename)\n        file_pointer = open(json_path, 'w')\n    json.dump(json_schema, file_pointer, indent=2)\n    return json_schema", "docstring": "Given a Marshmallow schema, create a JSON Schema for it.\n\nArgs:\nschema (marshmallow.Schema|str): The Marshmallow schema, or the\nPython path to one, to create the JSON schema for.\n\nKeyword Args:\nfile_pointer (file, optional): The pointer to the file to write\nthis schema to. If not provided, the schema will be dumped to\n``sys.stdout``.\nfolder (str, optional): The folder in which to save the JSON\nschema. The name of the schema file can be optionally\ncontrolled my the schema's ``Meta.json_schema_filename``. If\nthat attribute is not set, the class's name will be used for\nthe filename. If writing the schema to a specific file is\ndesired, please pass in a ``file_pointer``.\ncontext (dict, optional): The Marshmallow context to be pushed to\nthe schema generates the JSONSchema.\n\nReturns:\ndict: The JSON schema in dictionary form.", "source": "codesearchnet"}
{"code": "def add_arc(self, src, dst, char):\n        \n        \n        \n        \n        \n        \n        \n        for s_idx in [src, dst]:\n            if s_idx >= len(self.states):\n                for i in range(len(self.states), s_idx + 1):\n                    self.states.append(DFAState(i))\n        for arc in self.states[src].arcs:\n            if arc.ilabel == self.isyms.__getitem__(char) or char == EPSILON:\n                self.nfa = True\n                break\n        self.states[src].arcs.append(\n            DFAArc(src, dst, self.isyms.__getitem__(char)))", "docstring": "Adds a new Arc\nArgs:\nsrc (int): The source state identifier\ndst (int): The destination state identifier\nchar (str): The character for the transition\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def get_victim_social_asset(self, main_type, sub_type, unique_id, asset_id, params=None):\n        \n        params = params or {}\n\n        return self.victim_social_asset(main_type, sub_type, unique_id, asset_id, params=params)", "docstring": "Args:\nmain_type:\nsub_type:\nunique_id:\nasset_id:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def insert_top(self, node):\n    \n    if not isinstance(node, grammar.STATEMENTS):\n      raise ValueError\n    self.to_insert_top.append(node)", "docstring": "Insert statements at the top of the function body.\n\nNote that multiple calls to `insert_top` will result in the statements\nbeing prepended in that order; this is different behavior from `prepend`.\n\nArgs:\nnode: The statement to prepend.\n\nRaises:\nValueError: If the given node is not a statement.", "source": "juraj-google-style"}
{"code": "def get_height_rect(\n        self, x: int, y: int, width: int, height: int, string: str\n    ) -> int:\n        \n        string_ = string.encode(\"utf-8\")\n        return int(\n            lib.get_height_rect(\n                self.console_c, x, y, width, height, string_, len(string_)\n            )\n        )", "docstring": "Return the height of this text word-wrapped into this rectangle.\n\nArgs:\nx (int): The x coordinate from the left.\ny (int): The y coordinate from the top.\nwidth (int): Maximum width to render the text.\nheight (int): Maximum lines to render the text.\nstring (str): A Unicode string.\n\nReturns:\nint: The number of lines of text once word-wrapped.", "source": "juraj-google-style"}
{"code": "class _ConfusionMatrixConditionCount(Metric):\n\n    def __init__(self, confusion_matrix_cond, thresholds=None, name=None, dtype=None):\n        super().__init__(name=name, dtype=dtype)\n        self._confusion_matrix_cond = confusion_matrix_cond\n        self.init_thresholds = thresholds\n        self.thresholds = metrics_utils.parse_init_thresholds(thresholds, default_threshold=0.5)\n        self._thresholds_distributed_evenly = metrics_utils.is_evenly_distributed_thresholds(self.thresholds)\n        self.accumulator = self.add_variable(shape=(len(self.thresholds),), initializer=initializers.Zeros(), name='accumulator')\n\n    def update_state(self, y_true, y_pred, sample_weight=None):\n        \n        return metrics_utils.update_confusion_matrix_variables({self._confusion_matrix_cond: self.accumulator}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, sample_weight=sample_weight)\n\n    def result(self):\n        if len(self.thresholds) == 1:\n            result = self.accumulator[0]\n        else:\n            result = self.accumulator\n        return backend.convert_to_tensor(result)\n\n    def get_config(self):\n        config = {'thresholds': self.init_thresholds}\n        base_config = super().get_config()\n        return {**base_config, **config}", "docstring": "Calculates the number of the given confusion matrix condition.\n\nArgs:\nconfusion_matrix_cond: One of `metrics_utils.ConfusionMatrix`\nconditions.\nthresholds: (Optional) Defaults to `0.5`. A float value or a python list\n/ tuple of float threshold values in `[0, 1]`. A threshold is\ncompared with prediction values to determine the truth value of\npredictions (i.e., above the threshold is `True`, below is `False`).\nOne metric value is generated for each threshold value.\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.", "source": "github-repos"}
{"code": "def body(self, body):\n        \n        self._request.body = body\n        self.add_matcher(matcher('BodyMatcher', body))", "docstring": "Defines the body data to match.\n\n``body`` argument can be a ``str``, ``binary`` or a regular expression.\n\nArguments:\nbody (str|binary|regex): body data to match.\n\nReturns:\nself: current Mock instance.", "source": "juraj-google-style"}
{"code": "def _index_to_ansi_values(self, index):\n    if (self.__class__.__name__[0] == 'F'):\n        if (index < 8):\n            index += ANSI_FG_LO_BASE\n        else:\n            index += (ANSI_FG_HI_BASE - 8)\n    elif (index < 8):\n        index += ANSI_BG_LO_BASE\n    else:\n        index += (ANSI_BG_HI_BASE - 8)\n    return [str(index)]", "docstring": "Converts an palette index to the corresponding ANSI color.\n\nArguments:\nindex   - an int (from 0-15)\nReturns:\nindex as str in a list for compatibility with values.", "source": "codesearchnet"}
{"code": "def _calculate_hash(files, root):\n    file_hash = hashlib.md5()\n    for fname in sorted(files):\n        f = os.path.join(root, fname)\n        file_hash.update((fname + '\\x00').encode())\n        with open(f, 'rb') as fd:\n            for chunk in iter((lambda : fd.read(4096)), ''):\n                if (not chunk):\n                    break\n                file_hash.update(chunk)\n            file_hash.update('\\x00'.encode())\n    return file_hash.hexdigest()", "docstring": "Returns a hash of all of the given files at the given root.\n\nArgs:\nfiles (list[str]): file names to include in the hash calculation,\nrelative to ``root``.\nroot (str): base directory to analyze files in.\n\nReturns:\nstr: A hash of the hashes of the given files.", "source": "codesearchnet"}
{"code": "def _UpdateProcessingStatus(self, pid, process_status, used_memory):\n    \n    self._RaiseIfNotRegistered(pid)\n\n    if not process_status:\n      return\n\n    process = self._processes_per_pid[pid]\n\n    status_indicator = process_status.get('processing_status', None)\n\n    self._RaiseIfNotMonitored(pid)\n\n    display_name = process_status.get('display_name', '')\n\n    number_of_consumed_event_tags = process_status.get(\n        'number_of_consumed_event_tags', None)\n    number_of_produced_event_tags = process_status.get(\n        'number_of_produced_event_tags', None)\n\n    number_of_consumed_events = process_status.get(\n        'number_of_consumed_events', None)\n    number_of_produced_events = process_status.get(\n        'number_of_produced_events', None)\n\n    number_of_consumed_reports = process_status.get(\n        'number_of_consumed_reports', None)\n    number_of_produced_reports = process_status.get(\n        'number_of_produced_reports', None)\n\n    number_of_consumed_sources = process_status.get(\n        'number_of_consumed_sources', None)\n    number_of_produced_sources = process_status.get(\n        'number_of_produced_sources', None)\n\n    number_of_consumed_warnings = process_status.get(\n        'number_of_consumed_warnings', None)\n    number_of_produced_warnings = process_status.get(\n        'number_of_produced_warnings', None)\n\n    if status_indicator != definitions.STATUS_INDICATOR_IDLE:\n      last_activity_timestamp = process_status.get(\n          'last_activity_timestamp', 0.0)\n\n      if last_activity_timestamp:\n        last_activity_timestamp += self._PROCESS_WORKER_TIMEOUT\n\n        current_timestamp = time.time()\n        if current_timestamp > last_activity_timestamp:\n          logger.error((\n              'Process {0:s} (PID: {1:d}) has not reported activity within '\n              'the timeout period.').format(process.name, pid))\n          status_indicator = definitions.STATUS_INDICATOR_NOT_RESPONDING\n\n    self._processing_status.UpdateWorkerStatus(\n        process.name, status_indicator, pid, used_memory, display_name,\n        number_of_consumed_sources, number_of_produced_sources,\n        number_of_consumed_events, number_of_produced_events,\n        number_of_consumed_event_tags, number_of_produced_event_tags,\n        number_of_consumed_reports, number_of_produced_reports,\n        number_of_consumed_warnings, number_of_produced_warnings)", "docstring": "Updates the processing status.\n\nArgs:\npid (int): process identifier (PID) of the worker process.\nprocess_status (dict[str, object]): status values received from\nthe worker process.\nused_memory (int): size of used memory in bytes.\n\nRaises:\nKeyError: if the process is not registered with the engine.", "source": "juraj-google-style"}
{"code": "def _ReadRecordAttributeValueOffset(\n      self, file_object, file_offset, number_of_attribute_values):\n    \n    offsets_data_size = number_of_attribute_values * 4\n\n    offsets_data = file_object.read(offsets_data_size)\n\n    context = dtfabric_data_maps.DataTypeMapContext(values={\n        'number_of_attribute_values': number_of_attribute_values})\n\n    data_type_map = self._GetDataTypeMap(\n        'keychain_record_attribute_value_offsets')\n\n    try:\n      attribute_value_offsets = self._ReadStructureFromByteStream(\n          offsets_data, file_offset, data_type_map, context=context)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError((\n          'Unable to map record attribute value offsets data at offset: '\n          '0x{0:08x} with error: {1!s}').format(file_offset, exception))\n\n    return attribute_value_offsets", "docstring": "Reads the record attribute value offsets.\n\nArgs:\nfile_object (file): file-like object.\nfile_offset (int): offset of the record attribute values offsets relative\nto the start of the file.\nnumber_of_attribute_values (int): number of attribute values.\n\nReturns:\nkeychain_record_attribute_value_offsets: record attribute value offsets.\n\nRaises:\nParseError: if the record attribute value offsets cannot be read.", "source": "juraj-google-style"}
{"code": "def update_x(self, x, indices=None):\n        \n        x = _make_np_bool(x)\n        if indices is None:\n            if len(self._x) != len(x):\n                raise QiskitError(\"During updating whole x, you can not change \"\n                                  \"the number of qubits.\")\n            self._x = x\n        else:\n            if not isinstance(indices, list) and not isinstance(indices, np.ndarray):\n                indices = [indices]\n            for p, idx in enumerate(indices):\n                self._x[idx] = x[p]\n\n        return self", "docstring": "Update partial or entire x.\n\nArgs:\nx (numpy.ndarray or list): to-be-updated x\nindices (numpy.ndarray or list or optional): to-be-updated qubit indices\n\nReturns:\nPauli: self\n\nRaises:\nQiskitError: when updating whole x, the number of qubits must be the same.", "source": "juraj-google-style"}
{"code": "def json_to_key_value(json_data, key_field, value_field=None, array=False):\n        \n        if not isinstance(json_data, list):\n            json_data = [json_data]\n\n        key_value_array = []\n        for d in json_data:\n            if d.get(key_field) is not None and value_field is None:\n                \n                key = key_field\n                value = d.get(key_field)\n            elif d.get(key_field) is not None and d.get(value_field) is not None:\n                \n                key = d.get(key_field)\n                value = d.get(value_field)\n            else:\n                continue\n\n            key_value_array.append({'key': key, 'value': value})\n\n        if len(key_value_array) == 1 and not array:\n            return key_value_array[0]\n\n        return key_value_array", "docstring": "Convert JSON data to a KeyValue/KeyValueArray.\n\nArgs:\njson_data (dictionary|list): Array/List of JSON data.\nkey_field (string): Field name for the key.\nvalue_field (string): Field name for the value or use the value of the key field.\narray (boolean): Always return array even if only on result.\n\nReturns:\n(dictionary|list): A dictionary or list representing a KeyValue or KeyValueArray.", "source": "juraj-google-style"}
{"code": "def instantiate(self, cls=None):\n        \n        if cls is None:\n            cls = self.cls\n        if cls is None:\n            raise TypeError(\"cls must a class\")\n        return cls.create(*self.args, **self.kwargs)", "docstring": "Return an instantiated Expression as\n``cls.create(*self.args, **self.kwargs)``\n\nArgs:\ncls (class): The class of the instantiated expression. If not\ngiven, ``self.cls`` will be used.", "source": "juraj-google-style"}
{"code": "def test_torch_export(self, config=None, inputs_dict=None, tolerance=0.0001):\n    if not self.test_torch_exportable:\n        self.skipTest(reason='test_torch_exportable=False for this model.')\n\n    def recursively_check(eager_outputs, exported_outputs):\n        is_tested = False\n        if isinstance(eager_outputs, torch.Tensor):\n            torch.testing.assert_close(eager_outputs, exported_outputs, atol=tolerance, rtol=tolerance)\n            return True\n        elif isinstance(eager_outputs, (tuple, list)):\n            for eager_output, exported_output in zip(eager_outputs, exported_outputs):\n                is_tested = is_tested or recursively_check(eager_output, exported_output)\n            return is_tested\n        elif isinstance(eager_outputs, dict):\n            for key in eager_outputs:\n                is_tested = is_tested or recursively_check(eager_outputs[key], exported_outputs[key])\n            return is_tested\n        return is_tested\n    default_config, default_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n    config = config or default_config\n    inputs_dict = inputs_dict or default_inputs_dict\n    for model_class in self.all_model_classes:\n        if model_class.__name__.endswith('ForPreTraining'):\n            continue\n        with self.subTest(model_class.__name__):\n            model = model_class(config).eval().to(torch_device)\n            exported_model = torch.export.export(model, args=(), kwargs=inputs_dict, strict=getattr(self, 'test_torch_exportable_strictly', True))\n            with torch.no_grad():\n                torch.manual_seed(1234)\n                eager_outputs = model(**inputs_dict)\n                torch.manual_seed(1234)\n                exported_outputs = exported_model.module().forward(**inputs_dict)\n            is_tested = recursively_check(eager_outputs, exported_outputs)\n            self.assertTrue(is_tested, msg=f'No outputs were compared for {model_class.__name__}')", "docstring": "Test if model can be exported with torch.export.export()\n\nArgs:\nconfig (PretrainedConfig):\nConfig to use for the model, if None, use default config from model_tester\ninputs_dict (dict):\nInputs to use for the model, if None, use default inputs from model_tester\ntolerance (float):\n`atol` for torch.allclose(), defined in signature for test overriding", "source": "github-repos"}
{"code": "def label_total_duration(self, label_list_ids=None):\n        \n        duration = collections.defaultdict(float)\n\n        for label_list in self.label_lists.values():\n            if label_list_ids is None or label_list.idx in label_list_ids:\n                for label_value, label_duration in label_list.label_total_duration().items():\n                    duration[label_value] += label_duration\n\n        return duration", "docstring": "Return a dictionary containing the number of seconds,\nevery label-value is occurring in this utterance.\n\nArgs:\nlabel_list_ids (list): If not None, only labels from label-lists\nwith an id contained in this\nlist are considered.\n\nReturns:\ndict: A dictionary containing the number of seconds\nwith the label-value as key.", "source": "juraj-google-style"}
{"code": "def retry(func):\n\n    def retried_func(*args, **kwargs):\n        max_tries = 3\n        tries = 0\n        while True:\n            try:\n                resp = func(*args, **kwargs)\n            except requests.exceptions.ConnectionError as exc:\n                exc.msg = 'Connection error for session; exiting'\n                raise exc\n            except requests.exceptions.HTTPError as exc:\n                exc.msg = 'HTTP error for session; exiting'\n                raise exc\n            if ((resp.status_code != 200) and (tries < max_tries)):\n                logger.warning('retrying request; current status code: {}'.format(resp.status_code))\n                tries += 1\n                time.sleep((tries ** 2))\n                continue\n            break\n        if (resp.status_code != 200):\n            error_message = resp.json()['error']['message']\n            logger.error('HTTP Error code: {}: {}'.format(resp.status_code, error_message))\n            logger.error('Rule payload: {}'.format(kwargs['rule_payload']))\n            raise requests.exceptions.HTTPError\n        return resp\n    return retried_func", "docstring": "Decorator to handle API retries and exceptions. Defaults to three retries.\n\nArgs:\nfunc (function): function for decoration\n\nReturns:\ndecorated function", "source": "codesearchnet"}
{"code": "def GetNewSessionID(self):\n    base = self.runner_args.base_session_id\n    if (base is None):\n        base = (self.runner_args.client_id or aff4.ROOT_URN)\n        base = base.Add('flows')\n    return rdfvalue.SessionID(base=base, queue=self.runner_args.queue)", "docstring": "Returns a random session ID for this flow based on the runner args.\n\nReturns:\nA formatted session id URN.", "source": "codesearchnet"}
{"code": "def get_browser(browser_name, capabilities=None, **options):\n    \n\n    if browser_name == \"chrome\":\n        return webdriver.Chrome(desired_capabilities=capabilities, **options)\n    if browser_name == \"edge\":\n        return webdriver.Edge(capabilities=capabilities, **options)\n    if browser_name in [\"ff\", \"firefox\"]:\n        return webdriver.Firefox(capabilities=capabilities, **options)\n    if browser_name in [\"ie\", \"internet_explorer\"]:\n        return webdriver.Ie(capabilities=capabilities, **options)\n    if browser_name == \"phantomjs\":\n        return webdriver.PhantomJS(desired_capabilities=capabilities, **options)\n    if browser_name == \"remote\":\n        return webdriver.Remote(desired_capabilities=capabilities, **options)\n    if browser_name == \"safari\":\n        return webdriver.Safari(desired_capabilities=capabilities, **options)\n\n    raise ValueError(\"unsupported browser: {}\".format(repr(browser_name)))", "docstring": "Returns an instance of the given browser with the given capabilities.\n\nArgs:\nbrowser_name (str): The name of the desired browser.\ncapabilities (Dict[str, str | bool], optional): The desired capabilities of the browser.\nDefaults to None.\noptions: Arbitrary keyword arguments for the browser-specific subclass of\n:class:`webdriver.Remote`.\n\nReturns:\nWebDriver: An instance of the desired browser.", "source": "juraj-google-style"}
{"code": "def c_overturned(step):\n    (rbot, rtop) = misc.get_rbounds(step)\n    (cinit, rad) = init_c_overturn(step)\n    radf = ((((rtop ** 3) + (rbot ** 3)) - (rad ** 3)) ** (1 / 3))\n    return (cinit, radf)", "docstring": "Theoretical overturned concentration.\n\nThis compute the resulting composition profile if fractional\ncrystallization of a SMO is assumed and then a purely radial\noverturn happens.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nReturns:\ntuple of :class:`numpy.array`: the composition and the radial position\nat which it is evaluated.", "source": "codesearchnet"}
{"code": "def read_counts(node):\n    cfg.forward(node, cfg.ReachingDefinitions())\n    rc = ReadCounts()\n    rc.visit(node)\n    return rc.n_read", "docstring": "Check how many times a variable definition was used.\n\nArgs:\nnode: An AST to analyze.\n\nReturns:\nA dictionary from assignment nodes to the number of times the assigned to\nvariable was used.", "source": "codesearchnet"}
{"code": "def is_alias_command(subcommands, args):\n    \n    if not args:\n        return False\n\n    for subcommand in subcommands:\n        if args[:2] == ['alias', subcommand]:\n            return True\n\n    return False", "docstring": "Check if the user is invoking one of the comments in 'subcommands' in the  from az alias .\n\nArgs:\nsubcommands: The list of subcommands to check through.\nargs: The CLI arguments to process.\n\nReturns:\nTrue if the user is invoking 'az alias {command}'.", "source": "juraj-google-style"}
{"code": "def __init__(self, name, default_name=None, values=None) -> None:\n    if not (default_name is None or isinstance(default_name, str)):\n        raise TypeError('`default_name` type (%s) is not a string type. You likely meant to pass this into the `values` kwarg.' % type(default_name))\n    self._name = default_name if name is None else name\n    self._default_name = default_name\n    self._values = values", "docstring": "Initialize the context manager.\n\nArgs:\nname: The name argument that is passed to the op function.\ndefault_name: The default name to use if the `name` argument is `None`.\nvalues: The list of `Tensor` arguments that are passed to the op function.\n\nRaises:\nTypeError: if `default_name` is passed in but not a string.", "source": "github-repos"}
{"code": "def load_primitive(name):\n    for base_path in get_primitives_paths():\n        parts = name.split('.')\n        number_of_parts = len(parts)\n        for folder_parts in range(number_of_parts):\n            folder = os.path.join(base_path, *parts[:folder_parts])\n            filename = ('.'.join(parts[folder_parts:]) + '.json')\n            json_path = os.path.join(folder, filename)\n            if os.path.isfile(json_path):\n                with open(json_path, 'r') as json_file:\n                    LOGGER.debug('Loading primitive %s from %s', name, json_path)\n                    return json.load(json_file)\n    raise ValueError('Unknown primitive: {}'.format(name))", "docstring": "Locate and load the JSON annotation of the given primitive.\n\nAll the paths found in PRIMTIVE_PATHS will be scanned to find a JSON file\nwith the given name, and as soon as a JSON with the given name is found it\nis returned.\n\nArgs:\nname (str): name of the primitive to look for. The name should\ncorrespond to the primitive, not to the filename, as the\n`.json` extension will be added dynamically.\n\nReturns:\ndict:\nThe content of the JSON annotation file loaded into a dict.\n\nRaises:\nValueError: A `ValueError` will be raised if the primitive cannot be\nfound.", "source": "codesearchnet"}
{"code": "def get_model_proto(iterator) -> model_pb2.ModelProto:\n    if isinstance(iterator, iterator_ops.OwnedIterator):\n        iterator_resource = iterator._iterator_resource\n    elif isinstance(iterator, dataset_ops.NumpyIterator):\n        iterator_resource = iterator._iterator._iterator_resource\n    else:\n        raise ValueError('Only supports `tf.data.Iterator`-typed `iterator`.')\n    if not context.executing_eagerly():\n        raise ValueError(f'{get_model_proto.__name__} is not supported in graph mode.')\n    model_proto_string_tensor = ged_ops.iterator_get_model_proto(iterator_resource)\n    model_proto_bytes = model_proto_string_tensor.numpy()\n    return model_pb2.ModelProto.FromString(model_proto_bytes)", "docstring": "Gets the analytical model inside of `iterator` as `model_pb2.ModelProto`.\n\nArgs:\niterator: An `iterator_ops.OwnedIterator` or `dataset_ops.NumpyIterator`\n\nReturns:\nThe model inside of this iterator as a model proto.\n\nRaises:\nNotFoundError: If this iterator's autotune is not enabled.", "source": "github-repos"}
{"code": "def set_total_channel_deposit(self, registry_address: PaymentNetworkID, token_address: TokenAddress, partner_address: Address, total_deposit: TokenAmount, retry_timeout: NetworkTimeout=DEFAULT_RETRY_TIMEOUT):\n    chain_state = views.state_from_raiden(self.raiden)\n    token_addresses = views.get_token_identifiers(chain_state, registry_address)\n    channel_state = views.get_channelstate_for(chain_state=chain_state, payment_network_id=registry_address, token_address=token_address, partner_address=partner_address)\n    if (not is_binary_address(token_address)):\n        raise InvalidAddress('Expected binary address format for token in channel deposit')\n    if (not is_binary_address(partner_address)):\n        raise InvalidAddress('Expected binary address format for partner in channel deposit')\n    if (token_address not in token_addresses):\n        raise UnknownTokenAddress('Unknown token address')\n    if (channel_state is None):\n        raise InvalidAddress('No channel with partner_address for the given token')\n    if (self.raiden.config['environment_type'] == Environment.PRODUCTION):\n        per_token_network_deposit_limit = RED_EYES_PER_TOKEN_NETWORK_LIMIT\n    else:\n        per_token_network_deposit_limit = UINT256_MAX\n    token = self.raiden.chain.token(token_address)\n    token_network_registry = self.raiden.chain.token_network_registry(registry_address)\n    token_network_address = token_network_registry.get_token_network(token_address)\n    token_network_proxy = self.raiden.chain.token_network(token_network_address)\n    channel_proxy = self.raiden.chain.payment_channel(canonical_identifier=channel_state.canonical_identifier)\n    if (total_deposit == 0):\n        raise DepositMismatch('Attempted to deposit with total deposit being 0')\n    addendum = (total_deposit - channel_state.our_state.contract_balance)\n    total_network_balance = token.balance_of(registry_address)\n    if ((total_network_balance + addendum) > per_token_network_deposit_limit):\n        raise DepositOverLimit(f'The deposit of {addendum} will exceed the token network limit of {per_token_network_deposit_limit}')\n    balance = token.balance_of(self.raiden.address)\n    functions = token_network_proxy.proxy.contract.functions\n    deposit_limit = functions.channel_participant_deposit_limit().call()\n    if (total_deposit > deposit_limit):\n        raise DepositOverLimit(f'The additional deposit of {addendum} will exceed the channel participant limit of {deposit_limit}')\n    if (not (balance >= addendum)):\n        msg = 'Not enough balance to deposit. {} Available={} Needed={}'.format(pex(token_address), balance, addendum)\n        raise InsufficientFunds(msg)\n    channel_proxy.set_total_deposit(total_deposit=total_deposit, block_identifier=views.state_from_raiden(self.raiden).block_hash)\n    target_address = self.raiden.address\n    waiting.wait_for_participant_newbalance(raiden=self.raiden, payment_network_id=registry_address, token_address=token_address, partner_address=partner_address, target_address=target_address, target_balance=total_deposit, retry_timeout=retry_timeout)", "docstring": "Set the `total_deposit` in the channel with the peer at `partner_address` and the\ngiven `token_address` in order to be able to do transfers.\n\nRaises:\nInvalidAddress: If either token_address or partner_address is not\n20 bytes long.\nTransactionThrew: May happen for multiple reasons:\n- If the token approval fails, e.g. the token may validate if\naccount has enough balance for the allowance.\n- The deposit failed, e.g. the allowance did not set the token\naside for use and the user spent it before deposit was called.\n- The channel was closed/settled between the allowance call and\nthe deposit call.\nAddressWithoutCode: The channel was settled during the deposit\nexecution.\nDepositOverLimit: The total deposit amount is higher than the limit.", "source": "codesearchnet"}
{"code": "def create_balanced_geojson(input_file, classes, output_file='balanced.geojson', samples_per_class=None):\n    if (not output_file.endswith('.geojson')):\n        output_file += '.geojson'\n    with open(input_file) as f:\n        data = geojson.load(f)\n    sorted_classes = {clss: [] for clss in classes}\n    for feat in data['features']:\n        try:\n            sorted_classes[feat['properties']['class_name']].append(feat)\n        except KeyError:\n            continue\n    if (not samples_per_class):\n        smallest_class = min(sorted_classes, key=(lambda clss: len(sorted_classes[clss])))\n        samples_per_class = len(sorted_classes[smallest_class])\n    try:\n        samps = [random.sample(feats, samples_per_class) for feats in sorted_classes.values()]\n        final = [feat for sample in samps for feat in sample]\n    except ValueError:\n        raise Exception('Insufficient features in at least one class. Set samples_per_class to None to use maximum amount of features.')\n    np.random.shuffle(final)\n    data['features'] = final\n    with open(output_file, 'wb') as f:\n        geojson.dump(data, f)", "docstring": "Create a geojson comprised of balanced classes from the class_name property in\ninput_file. Randomly selects polygons from all classes.\n\nArgs:\ninput_file (str): File name\nclasses (list[str]): Classes in input_file to include in the balanced output file.\nMust exactly match the 'class_name' property in the features of input_file.\noutput_file (str): Name under which to save the balanced output file. Defualts to\nbalanced.geojson.\nsamples_per_class (int or None): Number of features to select per class in\ninput_file. If None will use the smallest class size. Defaults to None.", "source": "codesearchnet"}
{"code": "def _ctc_state_trans(label_seq):\n    with ops.name_scope('ctc_state_trans'):\n        label_seq = ops.convert_to_tensor(label_seq, name='label_seq')\n        batch_size = _get_dim(label_seq, 0)\n        num_labels = _get_dim(label_seq, 1)\n        num_label_states = num_labels + 1\n        num_states = 2 * num_label_states\n        label_states = math_ops.range(num_label_states)\n        blank_states = label_states + num_label_states\n        start_to_label = [[1, 0]]\n        blank_to_label = array_ops_stack.stack([label_states[1:], blank_states[:-1]], 1)\n        label_to_blank = array_ops_stack.stack([blank_states, label_states], 1)\n        indices = array_ops.concat([start_to_label, blank_to_label, label_to_blank], 0)\n        values = array_ops.ones([_get_dim(indices, 0)])\n        trans = array_ops.scatter_nd(indices, values, shape=[num_states, num_states])\n        trans += linalg_ops.eye(num_states)\n        batch_idx = array_ops.zeros_like(label_states[2:])\n        indices = array_ops_stack.stack([batch_idx, label_states[2:], label_states[1:-1]], 1)\n        indices = array_ops.tile(array_ops.expand_dims(indices, 0), [batch_size, 1, 1])\n        batch_idx = array_ops.expand_dims(math_ops.range(batch_size), 1) * [1, 0, 0]\n        indices += array_ops.expand_dims(batch_idx, 1)\n        repeats = math_ops.equal(label_seq[:, :-1], label_seq[:, 1:])\n        values = 1.0 - math_ops.cast(repeats, dtypes.float32)\n        batched_shape = [batch_size, num_states, num_states]\n        label_to_label = array_ops.scatter_nd(indices, values, batched_shape)\n        return array_ops.expand_dims(trans, 0) + label_to_label", "docstring": "Computes CTC alignment model transition matrix.\n\nArgs:\nlabel_seq: tensor of shape [batch_size, max_seq_length]\n\nReturns:\ntensor of shape [batch_size, states, states] with a state transition matrix\ncomputed for each sequence of the batch.", "source": "github-repos"}
{"code": "def get_clinvar_id(self, submission_id):\n        \n        submission_obj = self.clinvar_submission_collection.find_one({'_id': ObjectId(submission_id)})\n        clinvar_subm_id = submission_obj.get('clinvar_subm_id') \n        return clinvar_subm_id", "docstring": "Returns the official Clinvar submission ID for a submission object\n\nArgs:\nsubmission_id(str): submission_id(str) : id of the submission\n\nReturns:\nclinvar_subm_id(str): a string with a format: SUB[0-9]. It is obtained from clinvar portal when starting a new submission", "source": "juraj-google-style"}
{"code": "def _fetch_certs(request, certs_url):\n    \n    response = request(certs_url, method='GET')\n\n    if response.status != http_client.OK:\n        raise exceptions.TransportError(\n            'Could not fetch certificates at {}'.format(certs_url))\n\n    return json.loads(response.data.decode('utf-8'))", "docstring": "Fetches certificates.\n\nGoogle-style cerificate endpoints return JSON in the format of\n``{'key id': 'x509 certificate'}``.\n\nArgs:\nrequest (google.auth.transport.Request): The object used to make\nHTTP requests.\ncerts_url (str): The certificate endpoint URL.\n\nReturns:\nMapping[str, str]: A mapping of public key ID to x.509 certificate\ndata.", "source": "juraj-google-style"}
{"code": "def make_spiral_texture(spirals=6.0, ccw=False, offset=0.0, resolution=1000):\n    \n    dist = np.sqrt(np.linspace(0., 1., resolution))\n    if ccw:\n        direction = 1.\n    else:\n        direction = -1.\n    angle = dist * spirals * np.pi * 2. * direction\n    spiral_texture = (\n        (np.cos(angle) * dist / 2.) + 0.5,\n        (np.sin(angle) * dist / 2.) + 0.5\n    )\n    return spiral_texture", "docstring": "Makes a texture consisting of a spiral from the origin.\n\nArgs:\nspirals (float): the number of rotations to make\nccw (bool): make spirals counter-clockwise (default is clockwise)\noffset (float): if non-zero, spirals start offset by this amount\nresolution (int): number of midpoints along the spiral\n\nReturns:\nA texture.", "source": "juraj-google-style"}
{"code": "def load_config(self):\n        \n        logger.debug('loading config file: %s', self.config_file)\n        if os.path.exists(self.config_file):\n            with open(self.config_file) as file_handle:\n                return json.load(file_handle)\n        else:\n            logger.error('configuration file is required for eventify')\n        logger.error('unable to load configuration for service')\n        raise EventifyConfigError(\n            'Configuration is required! Missing: %s' % self.config_file\n        )", "docstring": "Load configuration for the service\n\nArgs:\nconfig_file: Configuration file path", "source": "juraj-google-style"}
{"code": "def set_forced_variation(self, experiment_key, user_id, variation_key):\n    \n    experiment = self.get_experiment_from_key(experiment_key)\n    if not experiment:\n      \n      return False\n\n    experiment_id = experiment.id\n    if variation_key is None:\n      if user_id in self.forced_variation_map:\n        experiment_to_variation_map = self.forced_variation_map.get(user_id)\n        if experiment_id in experiment_to_variation_map:\n          del(self.forced_variation_map[user_id][experiment_id])\n          self.logger.debug('Variation mapped to experiment \"%s\" has been removed for user \"%s\".' % (\n            experiment_key,\n            user_id\n          ))\n        else:\n          self.logger.debug('Nothing to remove. Variation mapped to experiment \"%s\" for user \"%s\" does not exist.' % (\n            experiment_key,\n            user_id\n          ))\n      else:\n        self.logger.debug('Nothing to remove. User \"%s\" does not exist in the forced variation map.' % user_id)\n      return True\n\n    if not validator.is_non_empty_string(variation_key):\n      self.logger.debug('Variation key is invalid.')\n      return False\n\n    forced_variation = self.get_variation_from_key(experiment_key, variation_key)\n    if not forced_variation:\n      \n      return False\n\n    variation_id = forced_variation.id\n\n    if user_id not in self.forced_variation_map:\n      self.forced_variation_map[user_id] = {experiment_id: variation_id}\n    else:\n      self.forced_variation_map[user_id][experiment_id] = variation_id\n\n    self.logger.debug('Set variation \"%s\" for experiment \"%s\" and user \"%s\" in the forced variation map.' % (\n      variation_id,\n      experiment_id,\n      user_id\n    ))\n    return True", "docstring": "Sets users to a map of experiments to forced variations.\n\nArgs:\nexperiment_key: Key for experiment.\nuser_id: The user ID.\nvariation_key: Key for variation. If None, then clear the existing experiment-to-variation mapping.\n\nReturns:\nA boolean value that indicates if the set completed successfully.", "source": "juraj-google-style"}
{"code": "def diff_prettyHtml(self, diffs):\n    \n    html = []\n    for (op, data) in diffs:\n      text = (data.replace(\"&\", \"&amp;\").replace(\"<\", \"&lt;\")\n                 .replace(\">\", \"&gt;\").replace(\"\\n\", \"&para;<br>\"))\n      if op == self.DIFF_INSERT:\n        html.append(\"<ins style=\\\"background:\n      elif op == self.DIFF_DELETE:\n        html.append(\"<del style=\\\"background:\n      elif op == self.DIFF_EQUAL:\n        html.append(\"<span>%s</span>\" % text)\n    return \"\".join(html)", "docstring": "Convert a diff array into a pretty HTML report.\n\nArgs:\ndiffs: Array of diff tuples.\n\nReturns:\nHTML representation.", "source": "juraj-google-style"}
{"code": "def sendto(self, transport, addr):\n    msg = (bytes(self) + b'\\r\\n')\n    logger.debug('%s:%s < %s', *(addr + (self,)))\n    transport.sendto(msg, addr)", "docstring": "Send request to a given address via given transport.\n\nArgs:\ntransport (asyncio.DatagramTransport):\nWrite transport to send the message on.\naddr (Tuple[str, int]):\nIP address and port pair to send the message to.", "source": "codesearchnet"}
{"code": "def compress(item_list, flag_list):\n    \n    assert len(item_list) == len(flag_list), (\n        'lists should correspond. len(item_list)=%r len(flag_list)=%r' %\n        (len(item_list), len(flag_list)))\n    filtered_items = list(util_iter.iter_compress(item_list, flag_list))\n    return filtered_items", "docstring": "like np.compress but for lists\n\nReturns items in item list where the corresponding item in flag list is\nTrue\n\nArgs:\nitem_list (list): list of items to mask\nflag_list (list): list of booleans used as a mask\n\nReturns:\nlist : filtered_items - masked items", "source": "juraj-google-style"}
{"code": "def parse_value(self, text: str) -> Optional[bool]:\n        \n        if text == \"true\":\n            return True\n        if text == \"false\":\n            return False", "docstring": "Parse boolean value.\n\nArgs:\ntext: String representation of the value.", "source": "juraj-google-style"}
{"code": "class AveragePooling1D(keras_layers.AveragePooling1D, base.Layer):\n\n    def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs):\n        if strides is None:\n            raise ValueError('Argument `strides` must not be None.')\n        super(AveragePooling1D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)", "docstring": "Average Pooling layer for 1D inputs.\n\nArgs:\npool_size: An integer or tuple/list of a single integer,\nrepresenting the size of the pooling window.\nstrides: An integer or tuple/list of a single integer, specifying the\nstrides of the pooling operation.\npadding: A string. The padding method, either 'valid' or 'same'.\nCase-insensitive.\ndata_format: A string, one of `channels_last` (default) or `channels_first`.\nThe ordering of the dimensions in the inputs.\n`channels_last` corresponds to inputs with shape\n`(batch, length, channels)` while `channels_first` corresponds to\ninputs with shape `(batch, channels, length)`.\nname: A string, the name of the layer.", "source": "github-repos"}
{"code": "def reply_all(self, reply_comment):\n        \n        payload = '{ \"Comment\": \"' + reply_comment + '\"}'\n        endpoint = 'https:\n\n        self._make_api_call('post', endpoint, data=payload)", "docstring": "Replies to everyone on the email, including those on the CC line.\n\nWith great power, comes great responsibility.\n\nArgs:\nreply_comment: The string comment to send to everyone on the email.", "source": "juraj-google-style"}
{"code": "def AddLabels(self, labels):\n    for label in labels:\n        if (not self._VALID_LABEL_REGEX.match(label)):\n            raise ValueError('Unsupported label: \"{0:s}\". A label must only consist of alphanumeric characters or underscores.'.format(label))\n    for label in labels:\n        if (label not in self.labels):\n            self.labels.append(label)", "docstring": "Adds labels to the event tag.\n\nArgs:\nlabels (list[str]): labels.\n\nRaises:\nValueError: if a label is malformed.", "source": "codesearchnet"}
{"code": "def __init__(self, xid=None, reason=None, desc=None):\n        \n        super().__init__(xid)\n        self.reason = reason\n        self.desc = desc", "docstring": "Assign parameters to object attributes.\n\nArgs:\nxid (int): Header's xid.\nreason (~pyof.v0x04.asynchronous.port_status.PortReason):\nAddition, deletion or modification.\ndesc (~pyof.v0x04.common.port.Port): Port description.", "source": "juraj-google-style"}
{"code": "def _restore_volume(self, fade):\n    self.device.mute = self.mute\n    if (self.volume == 100):\n        fixed_vol = self.device.renderingControl.GetOutputFixed([('InstanceID', 0)])['CurrentFixed']\n    else:\n        fixed_vol = False\n    if (not fixed_vol):\n        self.device.bass = self.bass\n        self.device.treble = self.treble\n        self.device.loudness = self.loudness\n        if fade:\n            self.device.volume = 0\n            self.device.ramp_to_volume(self.volume)\n        else:\n            self.device.volume = self.volume", "docstring": "Reinstate volume.\n\nArgs:\nfade (bool): Whether volume should be faded up on restore.", "source": "codesearchnet"}
{"code": "def run_server(self, blocking=True):\n    self._server_lock.acquire()\n    try:\n        if self._stop_requested:\n            raise ValueError('Server has already stopped')\n        if self._server_started:\n            raise ValueError('Server has already started running')\n        no_max_message_sizes = [('grpc.max_receive_message_length', -1), ('grpc.max_send_message_length', -1)]\n        self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=no_max_message_sizes)\n        debug_service_pb2_grpc.add_EventListenerServicer_to_server(self, self.server)\n        self.server.add_insecure_port('[::]:%d' % self._server_port)\n        self.server.start()\n        self._server_started = True\n    finally:\n        self._server_lock.release()\n    if blocking:\n        while not self._stop_requested:\n            time.sleep(1.0)", "docstring": "Start running the server.\n\nArgs:\nblocking: If `True`, block until `stop_server()` is invoked.\n\nRaises:\nValueError: If server stop has already been requested, or if the server\nhas already started running.", "source": "github-repos"}
{"code": "def reminders_add(self, *, text: str, time: str, **kwargs) -> SlackResponse:\n        \n        self._validate_xoxp_token()\n        kwargs.update({\"text\": text, \"time\": time})\n        return self.api_call(\"reminders.add\", json=kwargs)", "docstring": "Creates a reminder.\n\nArgs:\ntext (str): The content of the reminder. e.g. 'eat a banana'\ntime (str): When this reminder should happen:\nthe Unix timestamp (up to five years from now e.g. '1602288000'),\nthe number of seconds until the reminder (if within 24 hours),\nor a natural language description (Ex. 'in 15 minutes' or 'every Thursday')", "source": "juraj-google-style"}
{"code": "def snakecase(string):\n    \n\n    string = re.sub(r\"[\\-\\.\\s]\", '_', str(string))\n    if not string:\n        return string\n    return lowercase(string[0]) + re.sub(r\"[A-Z]\", lambda matched: '_' + lowercase(matched.group(0)), string[1:])", "docstring": "Convert string into snake case.\nJoin punctuation with underscore\n\nArgs:\nstring: String to convert.\n\nReturns:\nstring: Snake cased string.", "source": "juraj-google-style"}
{"code": "def _ParseUSNChangeJournal(self, parser_mediator, usn_change_journal):\n    \n    if not usn_change_journal:\n      return\n\n    usn_record_map = self._GetDataTypeMap('usn_record_v2')\n\n    usn_record_data = usn_change_journal.read_usn_record()\n    while usn_record_data:\n      current_offset = usn_change_journal.get_offset()\n\n      try:\n        usn_record = self._ReadStructureFromByteStream(\n            usn_record_data, current_offset, usn_record_map)\n      except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError((\n            'Unable to parse USN record at offset: 0x{0:08x} with error: '\n            '{1!s}').format(current_offset, exception))\n\n      \n      name_offset = usn_record.name_offset - 60\n      utf16_stream = usn_record.name[name_offset:usn_record.name_size]\n\n      try:\n        name_string = utf16_stream.decode('utf-16-le')\n      except (UnicodeDecodeError, UnicodeEncodeError) as exception:\n        name_string = utf16_stream.decode('utf-16-le', errors='replace')\n        parser_mediator.ProduceExtractionWarning((\n            'unable to decode USN record name string with error: '\n            '{0:s}. Characters that cannot be decoded will be replaced '\n            'with \"?\" or \"\\\\ufffd\".').format(exception))\n\n      event_data = NTFSUSNChangeEventData()\n      event_data.file_attribute_flags = usn_record.file_attribute_flags\n      event_data.file_reference = usn_record.file_reference\n      event_data.filename = name_string\n      event_data.offset = current_offset\n      event_data.parent_file_reference = usn_record.parent_file_reference\n      event_data.update_reason_flags = usn_record.update_reason_flags\n      event_data.update_sequence_number = usn_record.update_sequence_number\n      event_data.update_source_flags = usn_record.update_source_flags\n\n      if not usn_record.update_date_time:\n        date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n      else:\n        date_time = dfdatetime_filetime.Filetime(\n            timestamp=usn_record.update_date_time)\n\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_ENTRY_MODIFICATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      usn_record_data = usn_change_journal.read_usn_record()", "docstring": "Parses an USN change journal.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nusn_change_journal (pyfsntsfs.usn_change_journal): USN change journal.\n\nRaises:\nParseError: if an USN change journal record cannot be parsed.", "source": "juraj-google-style"}
{"code": "def wait_for_healthy(self, timeout_s=1200, interval=30):\n    timeout = time.time() + timeout_s\n    while self.health() != 'HEALTHY':\n        logging.warning('Waiting for TPU \"%s\" with state \"%s\" and health \"%s\" to become healthy', self.name(), self.state(), self.health())\n        if time.time() + interval > timeout:\n            raise RuntimeError('Timed out waiting for TPU \"%s\" to become healthy' % self.name())\n        time.sleep(interval)\n    logging.warning('TPU \"%s\" is healthy.', self.name())", "docstring": "Wait for TPU to become healthy or raise error if timeout reached.\n\nArgs:\ntimeout_s (int): The timeout in seconds for waiting TPU to become healthy.\ninterval (int): The interval in seconds to poll the TPU for health.\n\nRaises:\nRuntimeError: If the TPU doesn't become healthy by the timeout.", "source": "github-repos"}
{"code": "def managed_sans(self):\n    if (not self.__managed_sans):\n        self.__managed_sans = ManagedSANs(self.__connection)\n    return self.__managed_sans", "docstring": "Gets the Managed SANs API client.\n\nReturns:\nManagedSANs:", "source": "codesearchnet"}
{"code": "def deprecated(replacement=None, message=None):\n\n    def wrap(old):\n\n        def wrapped(*args, **kwargs):\n            msg = ('%s is deprecated' % old.__name__)\n            if (replacement is not None):\n                if isinstance(replacement, property):\n                    r = replacement.fget\n                elif isinstance(replacement, (classmethod, staticmethod)):\n                    r = replacement.__func__\n                else:\n                    r = replacement\n                msg += ('; use %s in %s instead.' % (r.__name__, r.__module__))\n            if (message is not None):\n                msg += ('\\n' + message)\n            warnings.simplefilter('default')\n            warnings.warn(msg, DeprecationWarning, stacklevel=2)\n            return old(*args, **kwargs)\n        return wrapped\n    return wrap", "docstring": "Decorator to mark classes or functions as deprecated,\nwith a possible replacement.\n\nArgs:\nreplacement (callable): A replacement class or method.\nmessage (str): A warning message to be displayed.\n\nReturns:\nOriginal function, but with a warning to use the updated class.", "source": "codesearchnet"}
{"code": "def from_environment_variables(cls):\n    ip = os.environ.get('ONEVIEWSDK_IP', '')\n    image_streamer_ip = os.environ.get('ONEVIEWSDK_IMAGE_STREAMER_IP', '')\n    api_version = int(os.environ.get('ONEVIEWSDK_API_VERSION', OneViewClient.DEFAULT_API_VERSION))\n    ssl_certificate = os.environ.get('ONEVIEWSDK_SSL_CERTIFICATE', '')\n    username = os.environ.get('ONEVIEWSDK_USERNAME', '')\n    auth_login_domain = os.environ.get('ONEVIEWSDK_AUTH_LOGIN_DOMAIN', '')\n    password = os.environ.get('ONEVIEWSDK_PASSWORD', '')\n    proxy = os.environ.get('ONEVIEWSDK_PROXY', '')\n    sessionID = os.environ.get('ONEVIEWSDK_SESSIONID', '')\n    timeout = os.environ.get('ONEVIEWSDK_CONNECTION_TIMEOUT')\n    config = dict(ip=ip, image_streamer_ip=image_streamer_ip, api_version=api_version, ssl_certificate=ssl_certificate, credentials=dict(userName=username, authLoginDomain=auth_login_domain, password=password, sessionID=sessionID), proxy=proxy, timeout=timeout)\n    return cls(config)", "docstring": "Construct OneViewClient using environment variables.\n\nAllowed variables: ONEVIEWSDK_IP (required), ONEVIEWSDK_USERNAME (required), ONEVIEWSDK_PASSWORD (required),\nONEVIEWSDK_AUTH_LOGIN_DOMAIN, ONEVIEWSDK_API_VERSION, ONEVIEWSDK_IMAGE_STREAMER_IP, ONEVIEWSDK_SESSIONID, ONEVIEWSDK_SSL_CERTIFICATE,\nONEVIEWSDK_CONNECTION_TIMEOUT and ONEVIEWSDK_PROXY.\n\nReturns:\nOneViewClient:", "source": "codesearchnet"}
{"code": "def from_event(cls, ion_event):\n        \n        if ion_event.value is not None:\n            args, kwargs = cls._to_constructor_args(ion_event.value)\n        else:\n            \n            \n            args, kwargs = (), {}\n        value = cls(*args, **kwargs)\n        value.ion_event = ion_event\n        value.ion_type = ion_event.ion_type\n        value.ion_annotations = ion_event.annotations\n        return value", "docstring": "Constructs the given native extension from the properties of an event.\n\nArgs:\nion_event (IonEvent): The event to construct the native value from.", "source": "juraj-google-style"}
{"code": "def Register(self, name, constructor):\n    precondition.AssertType(name, Text)\n    if (name in self._constructors):\n        message = \"Duplicated constructors %r and %r for name '%s'\"\n        message %= (constructor, self._constructors[name], name)\n        raise ValueError(message)\n    self._constructors[name] = constructor", "docstring": "Registers a new constructor in the factory.\n\nArgs:\nname: A name associated with given constructor.\nconstructor: A constructor function that creates instances.\n\nRaises:\nValueError: If there already is a constructor associated with given name.", "source": "codesearchnet"}
{"code": "def num_rewards(self):\n    if (not self.is_reward_range_finite):\n        tf.logging.error('Infinite reward range, `num_rewards returning None`')\n        return None\n    if (not self.is_processed_rewards_discrete):\n        tf.logging.error('Processed rewards are not discrete, `num_rewards` returning None')\n        return None\n    (min_reward, max_reward) = self.reward_range\n    return ((max_reward - min_reward) + 1)", "docstring": "Returns the number of distinct rewards.\n\nReturns:\nReturns None if the reward range is infinite or the processed rewards\naren't discrete, otherwise returns the number of distinct rewards.", "source": "codesearchnet"}
{"code": "def rename(script, label='blank', layer_num=None):\n    \n    filter_xml = ''.join([\n        '  <filter name=\"Rename Current Mesh\">\\n',\n        '    <Param name=\"newName\" ',\n        'value=\"{}\" '.format(label),\n        'description=\"New Label\" ',\n        'type=\"RichString\" ',\n        '/>\\n',\n        '  </filter>\\n'])\n    if isinstance(script, mlx.FilterScript):\n        if (layer_num is None) or (layer_num == script.current_layer()):\n            util.write_filter(script, filter_xml)\n            script.layer_stack[script.current_layer()] = label\n        else:\n            cur_layer = script.current_layer()\n            change(script, layer_num)\n            util.write_filter(script, filter_xml)\n            change(script, cur_layer)\n            script.layer_stack[layer_num] = label\n    else:\n        util.write_filter(script, filter_xml)\n    return None", "docstring": "Rename layer label\n\nCan be useful for outputting mlp files, as the output file names use\nthe labels.\n\nArgs:\nscript: the mlx.FilterScript object or script filename to write\nthe filter to.\nlabel (str): new label for the mesh layer\nlayer_num (int): layer number to rename. Default is the\ncurrent layer. Not supported on the file base API.\n\nLayer stack:\nRenames a layer\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "juraj-google-style"}
{"code": "def set_trunk_groups(self, vid, value=None, default=False, disable=False):\n    if default:\n        return self.configure_vlan(vid, 'default trunk group')\n    if disable:\n        return self.configure_vlan(vid, 'no trunk group')\n    current_value = self.get(vid)['trunk_groups']\n    failure = False\n    value = make_iterable(value)\n    for name in set(value).difference(current_value):\n        if (not self.add_trunk_group(vid, name)):\n            failure = True\n    for name in set(current_value).difference(value):\n        if (not self.remove_trunk_group(vid, name)):\n            failure = True\n    return (not failure)", "docstring": "Configures the list of trunk groups support on a vlan\n\nThis method handles configuring the vlan trunk group value to default\nif the default flag is set to True.  If the default flag is set\nto False, then this method will calculate the set of trunk\ngroup names to be added and to be removed.\n\nEosVersion:\n4.13.7M\n\nArgs:\nvid (str): The VLAN ID to configure\nvalue (str): The list of trunk groups that should be configured\nfor this vlan id.\ndefault (bool): Configures the trunk group value to default if\nthis value is true\ndisable (bool): Negates the trunk group value if set to true\n\nReturns:\nTrue if the operation was successful otherwise False", "source": "codesearchnet"}
{"code": "async def addFeedData(self, name, items, seqn=None):\n        \n        async with await self.snap() as snap:\n            snap.strict = False\n            return await snap.addFeedData(name, items, seqn=seqn)", "docstring": "Add data using a feed/parser function.\n\nArgs:\nname (str): The name of the feed record format.\nitems (list): A list of items to ingest.\nseqn ((str,int)): An (iden, offs) tuple for this feed chunk.\n\nReturns:\n(int): The next expected offset (or None) if seqn is None.", "source": "juraj-google-style"}
{"code": "def jobs(self):\n    return list(self._cluster_spec.keys())", "docstring": "Returns a list of job names in this cluster.\n\nReturns:\nA list of strings, corresponding to the names of jobs in this cluster.", "source": "github-repos"}
{"code": "def partial_run_setup(self, fetches, feeds=None):\n\n    def _feed_fn(feed):\n        for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS:\n            if isinstance(feed, tensor_type):\n                return feed_fn(feed)\n        raise TypeError(f'Feed argument {feed} has invalid type \"{type(feed).__name__}\"')\n    if self._closed:\n        raise RuntimeError('Attempted to use a closed Session.')\n    if self.graph.version == 0:\n        raise RuntimeError('The Session graph is empty. Add operations to the graph before calling run().')\n    if feeds is None:\n        feeds = []\n    feed_list = []\n    is_list_feed = isinstance(feeds, (list, tuple))\n    if not is_list_feed:\n        feeds = [feeds]\n    for feed in feeds:\n        for subfeed in _feed_fn(feed):\n            try:\n                subfeed_t = self.graph.as_graph_element(subfeed, allow_tensor=True, allow_operation=False)\n                feed_list.append(subfeed_t._as_tf_output())\n            except Exception as e:\n                e.message = f'Cannot interpret argument `feed` key as Tensor: {e.message}'\n                e.args = (e.message,)\n                raise e\n    fetch_handler = _FetchHandler(self._graph, fetches, {})\n\n    def _setup_fn(session, feed_list, fetch_list, target_list):\n        self._extend_graph()\n        return tf_session.TF_SessionPRunSetup_wrapper(session, feed_list, fetch_list, target_list)\n    final_fetches = [t._as_tf_output() for t in fetch_handler.fetches()]\n    final_targets = [op._c_op for op in fetch_handler.targets()]\n    return self._do_call(_setup_fn, self._session, feed_list, final_fetches, final_targets)", "docstring": "Sets up a graph with feeds and fetches for partial run.\n\nNOTE: This function is deprecated and we do not expect adding new\nfunctionality to it. Please do not have your code depending on this\nfunction.\n\nThis is EXPERIMENTAL and subject to change.\n\nNote that contrary to `run`, `feeds` only specifies the graph elements.\nThe tensors will be supplied by the subsequent `partial_run` calls.\n\nArgs:\nfetches: A single graph element, or a list of graph elements.\nfeeds: A single graph element, or a list of graph elements.\n\nReturns:\nA handle for partial run.\n\nRaises:\nRuntimeError: If this `Session` is in an invalid state (e.g. has been\nclosed).\nTypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.\ntf.errors.OpError: Or one of its subclasses if a TensorFlow error happens.", "source": "github-repos"}
{"code": "def resolve(self, file_path, follow_symlinks=True, allow_fd=False):\n    if isinstance(file_path, int):\n        if (allow_fd and (sys.version_info >= (3, 3))):\n            return self.get_open_file(file_path).get_object()\n        raise TypeError('path should be string, bytes or os.PathLike (if supported), not int')\n    if follow_symlinks:\n        file_path = make_string_path(file_path)\n        return self.get_object_from_normpath(self.resolve_path(file_path))\n    return self.lresolve(file_path)", "docstring": "Search for the specified filesystem object, resolving all links.\n\nArgs:\nfile_path: Specifies the target FakeFile object to retrieve.\nfollow_symlinks: If `False`, the link itself is resolved,\notherwise the object linked to.\nallow_fd: If `True`, `file_path` may be an open file descriptor\n\nReturns:\nThe FakeFile object corresponding to `file_path`.\n\nRaises:\nIOError: if the object is not found.", "source": "codesearchnet"}
{"code": "def shape(self):\n    if self._dense_shape is None:\n        return tensor_shape.TensorShape(None)\n    return tensor_util.constant_value_as_shape(self._dense_shape)", "docstring": "Gets the `tf.TensorShape` representing the shape of the dense tensor.\n\nReturns:\nA `tf.TensorShape` object.", "source": "github-repos"}
{"code": "def mtf_transformer_paper_tr(size):\n    n = (2 ** size)\n    hparams = mtf_transformer_base()\n    hparams.label_smoothing = 0.1\n    hparams.batch_size = 128\n    hparams.d_model = 1024\n    hparams.d_ff = int((4096 * n))\n    hparams.num_heads = int((8 * n))\n    hparams.shared_embedding_and_softmax_weights = False\n    hparams.learning_rate_decay_steps = 51400\n    return hparams", "docstring": "Config for translation experiments.\n\nTrain these on translate_enfr_wmt32k_packed for 154000 steps (3 epochs)\n\nThe size parameter is an integer that controls the number of heads and the\nsize of the size of the feedforward hidden layers.  Increasing size by 1\ndoubles each of these.\n\nArgs:\nsize: an integer\nReturns:\na hparams object", "source": "codesearchnet"}
{"code": "def _starts_with_drive_letter(self, file_path):\n        \n        colon = self._matching_string(file_path, ':')\n        return (self.is_windows_fs and len(file_path) >= 2 and\n                file_path[:1].isalpha and (file_path[1:2]) == colon)", "docstring": "Return True if file_path starts with a drive letter.\n\nArgs:\nfile_path: the full path to be examined.\n\nReturns:\n`True` if drive letter support is enabled in the filesystem and\nthe path starts with a drive letter.", "source": "juraj-google-style"}
{"code": "def create_db(file_pth):\n    conn = sqlite3.connect(file_pth)\n    c = conn.cursor()\n    c.execute('DROP TABLE IF EXISTS library_spectra_source')\n    c.execute('CREATE TABLE library_spectra_source (\\n                          id integer PRIMARY KEY,\\n                          name text NOT NULL,\\n                          created_at date,\\n                          parsing_software text\\n                          )')\n    c.execute('DROP TABLE IF EXISTS metab_compound')\n    c.execute('CREATE TABLE metab_compound (\\n                  inchikey_id text PRIMARY KEY,\\n                  name text,\\n                  pubchem_id text,\\n                  chemspider_id text,\\n                  other_names text,\\n                  exact_mass real,\\n                  molecular_formula text,\\n                  molecular_weight real,\\n                  compound_class text,\\n                  smiles text,\\n                  created_at date,\\n                  updated_at date\\n\\n                                           )')\n    c.execute('DROP TABLE IF EXISTS library_spectra_meta')\n    c.execute('CREATE TABLE library_spectra_meta (\\n                                   id integer PRIMARY KEY,\\n                                   name text,\\n                                   collision_energy text,\\n                                   ms_level real,\\n                                   accession text NOT NULL,\\n                                   resolution text,\\n                                   polarity integer,\\n                                   fragmentation_type text,\\n                                   precursor_mz real,\\n                                   precursor_type text,\\n                                   instrument_type text,\\n                                   instrument text,\\n                                   copyright text,\\n                                   column text,\\n                                   mass_accuracy real,\\n                                   mass_error real,\\n                                   origin text,\\n                                   splash text,\\n                                   retention_index real, \\n                                   retention_time real,\\n                                   library_spectra_source_id integer NOT NULL,\\n                                   inchikey_id text NOT NULL,\\n                                   FOREIGN KEY(library_spectra_source_id) REFERENCES library_spectra_source(id),\\n                                   FOREIGN KEY(inchikey_id) REFERENCES metab_compound(inchikey_id)\\n                                   )')\n    c.execute('DROP TABLE IF EXISTS library_spectra')\n    c.execute('CREATE TABLE library_spectra (\\n                                          id integer PRIMARY KEY,\\n                                          mz real NOT NULL,\\n                                          i real NOT NULL,\\n                                          other text,\\n                                          library_spectra_meta_id integer NOT NULL,\\n                                          FOREIGN KEY (library_spectra_meta_id) REFERENCES library_spectra_meta(id)\\n                                          )')\n    c.execute('DROP TABLE IF EXISTS library_spectra_annotation')\n    c.execute('CREATE TABLE library_spectra_annotation (\\n                                          id integer PRIMARY KEY,\\n                                          mz real,\\n                                          tentative_formula text,\\n                                          mass_error real,\\n                                          library_spectra_meta_id integer NOT NULL,\\n                                          FOREIGN KEY (library_spectra_meta_id) REFERENCES library_spectra_meta(id)\\n                                          )')", "docstring": "Create an empty SQLite database for library spectra.\n\nExample:\n>>> from msp2db.db import create_db\n>>> db_pth = 'library.db'\n>>> create_db(file_pth=db_pth)\n\nArgs:\nfile_pth (str): File path for SQLite database", "source": "codesearchnet"}
{"code": "def __init__(self, ctx, name, member_map, ast):\n    super().__init__(ctx, name, member_map, ast)\n    self.real_module = ctx.convert.constant_to_value(ast, subst=datatypes.AliasingDict(), node=ctx.root_node)", "docstring": "Initialize the overlay.\n\nArgs:\nctx: Instance of context.Context.\nname: A string containing the name of the underlying module.\nmember_map: Dict of str to abstract.BaseValues that provide type\ninformation not available in the underlying module.\nast: An pytd.TypeDeclUnit containing the AST for the underlying module.\nUsed to access type information for members of the module that are not\nexplicitly provided by the overlay.", "source": "github-repos"}
{"code": "def get_signed_url(self, file_id):\n    if (not is_valid_uuid(file_id)):\n        raise StorageArgumentException('Invalid UUID for file_id: {0}'.format(file_id))\n    return self._authenticated_request.to_endpoint('file/{}/content/secure_link/'.format(file_id)).return_body().get()['signed_url']", "docstring": "Get a signed unauthenticated URL.\n\nIt can be used to download the file content without the need for a\ntoken. The signed URL expires after 5 seconds.\n\nArgs:\nfile_id (str): The UUID of the file to get the link for.\n\nReturns:\nThe signed url as a string\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "codesearchnet"}
{"code": "def create_filter(condition: Callable[[ProcessorPart], bool]) -> PartProcessor:\n\n    async def filter_with_condition(part: ProcessorPart) -> AsyncIterable[ProcessorPart]:\n        if condition(part):\n            yield part\n    return _PartProcessorWrapper(filter_with_condition)", "docstring": "Creates a processor that filters parts based on `condition`.\n\nArgs:\ncondition: a part is returned by this processor iff `condition(part)=True`\n\nReturns:\na processor filtering the input stream", "source": "github-repos"}
{"code": "def _container_start_handler_factory(ion_type, before_yield=(lambda c, ctx: None)):\n    assert ion_type.is_container\n\n    @coroutine\n    def container_start_handler(c, ctx):\n        before_yield(c, ctx)\n        (yield)\n        (yield ctx.event_transition(IonEvent, IonEventType.CONTAINER_START, ion_type, value=None))\n    return container_start_handler", "docstring": "Generates handlers for tokens that begin with container start characters.\n\nArgs:\nion_type (IonType): The type of this container.\nbefore_yield (Optional[callable]): Called at initialization. Accepts the first character's ordinal and the\ncurrent context; performs any necessary initialization actions.", "source": "codesearchnet"}
{"code": "def __init__(self, windowfn, trigger=None, accumulation_mode=None, timestamp_combiner=None, allowed_lateness=0):\n    if isinstance(windowfn, Windowing):\n        windowing = windowfn\n        windowfn = windowing.windowfn\n        trigger = trigger or windowing.triggerfn\n        accumulation_mode = accumulation_mode or windowing.accumulation_mode\n        timestamp_combiner = timestamp_combiner or windowing.timestamp_combiner\n    self.windowing = Windowing(windowfn, trigger, accumulation_mode, timestamp_combiner, allowed_lateness)\n    super().__init__(self.WindowIntoFn(self.windowing))", "docstring": "Initializes a WindowInto transform.\n\nArgs:\nwindowfn (Windowing, WindowFn): Function to be used for windowing.\ntrigger: (optional) Trigger used for windowing, or None for default.\naccumulation_mode: (optional) Accumulation mode used for windowing,\nrequired for non-trivial triggers.\ntimestamp_combiner: (optional) Timestamp combniner used for windowing,\nor None for default.", "source": "github-repos"}
{"code": "def _ScanFileSystem(self, scan_node, base_path_specs):\n    \n    if not scan_node or not scan_node.path_spec:\n      raise errors.ScannerError('Invalid or missing file system scan node.')\n\n    base_path_specs.append(scan_node.path_spec)", "docstring": "Scans a file system scan node for file systems.\n\nArgs:\nscan_node (SourceScanNode): file system scan node.\nbase_path_specs (list[PathSpec]): file system base path specifications.\n\nRaises:\nScannerError: if the scan node is invalid.", "source": "juraj-google-style"}
{"code": "def list_to_tuple(structure):\n\n    def sequence_fn(instance, args):\n        if isinstance(instance, list):\n            return tuple(args)\n        return nest_util.sequence_like(instance, args)\n    return nest_util.pack_sequence_as(nest_util.Modality.CORE, structure, flatten(structure), False, sequence_fn=sequence_fn)", "docstring": "Replace all lists with tuples.\n\nThe fork of nest that tf.data uses treats lists as atoms, while\ntf.nest treats them as structures to recurse into. Keras has chosen to adopt\nthe latter convention, and must therefore deeply replace all lists with tuples\nbefore passing structures to Dataset.from_generator.\n\nArgs:\nstructure: A nested structure to be remapped.\n\nReturns:\nstructure mapped to replace all lists with tuples.", "source": "github-repos"}
{"code": "def select_one(self, selector):\n        \n        result = list(self.select(selector))\n        if len(result) > 1:\n            raise ValueError(\"Found more than one model matching %s: %r\" % (selector, result))\n        if len(result) == 0:\n            return None\n        return result[0]", "docstring": "Query this document for objects that match the given selector.\nRaises an error if more than one object is found.  Returns\nsingle matching object, or None if nothing is found\n\nArgs:\nselector (JSON-like query dictionary) : you can query by type or by\nname, e.g. ``{\"type\": HoverTool}``, ``{\"name\": \"mycircle\"}``\n\nReturns:\nModel or None", "source": "juraj-google-style"}
{"code": "def activate(self, user):\n        \n        org_user = self.organization.add_user(user, **self.activation_kwargs())\n        self.invitee = user\n        self.save()\n        return org_user", "docstring": "Updates the `invitee` value and saves the instance\n\nProvided as a way of extending the behavior.\n\nArgs:\nuser: the newly created user\n\nReturns:\nthe linking organization user", "source": "juraj-google-style"}
{"code": "def get_value_for_datastore(self, model_instance):\n    \n    value = super(JsonProperty, self).get_value_for_datastore(model_instance)\n    if not value:\n      return None\n    json_value = value\n    if not isinstance(value, dict):\n      json_value = value.to_json()\n    if not json_value:\n      return None\n    return datastore_types.Text(json.dumps(\n        json_value, sort_keys=True, cls=JsonEncoder))", "docstring": "Gets value for datastore.\n\nArgs:\nmodel_instance: instance of the model class.\n\nReturns:\ndatastore-compatible value.", "source": "juraj-google-style"}
{"code": "def tokenize(self, data, *args, **kwargs):\n        \n        self.lexer.input(data)\n        tokens = list()\n        while True:\n            token = self.lexer.token()\n            if not token:\n                break\n            tokens.append(token)\n        return tokens", "docstring": "Invoke the lexer on an input string an return the list of tokens.\nThis is relatively inefficient and should only be used for\ntesting/debugging as it slurps up all tokens into one list.\nArgs:\ndata: The input to be tokenized.\nReturns:\nA list of LexTokens", "source": "juraj-google-style"}
{"code": "def transform_to_mods_mono(marc_xml, uuid, url):\n    marc_xml = _read_content_or_path(marc_xml)\n    transformed = xslt_transformation(marc_xml, _absolute_template_path('MARC21slim2MODS3-4-NDK.xsl'))\n    return _apply_postprocessing(marc_xml=marc_xml, xml=transformed, func=mods_postprocessor.postprocess_monograph, uuid=uuid, url=url)", "docstring": "Convert `marc_xml` to MODS data format.\n\nArgs:\nmarc_xml (str): Filename or XML string. Don't use ``\\\\n`` in case of\nfilename.\nuuid (str): UUID string giving the package ID.\nurl (str): URL of the publication (public or not).\n\nReturns:\nlist: Collection of transformed xml strings.", "source": "codesearchnet"}
{"code": "def clear_list(self, **kwargs):\n    path = self._get_id_path('clear')\n    kwargs.update({'session_id': self.session_id})\n    payload = {}\n    response = self._POST(path, kwargs, payload)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Clears all of the items within a list. This is an irreversible action\nand should be treated with caution.\n\nA valid session id is required.\n\nArgs:\nconfirm: True (do it) | False (don't do it)\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def __init__(self, min_length=None, max_length=None, empty=True):\n        \n        super(StringTypeChecker, self).__init__(\n            iter_type=str, min_length=min_length, max_length=max_length, empty=empty\n        )", "docstring": "Initialization method.\n\nArgs:\nmin_length (int): minimum length of the string (included).\nmax_length (int): maximum length of the string (included).\nempty (bool): whether empty string is allowed.", "source": "juraj-google-style"}
{"code": "def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        \n        binary = \"{0:b}\".format(abs(self.value))\n        binary = (\"0\" * (64 - (len(binary) % 64))) + binary\n\n        \n        if self.value < 0:\n            binary = binary.replace('1', 'i')\n            binary = binary.replace('0', '1')\n            binary = binary.replace('i', '0')\n\n            pivot = binary.rfind('0')\n            binary = binary[0:pivot] + '1' + ('0' * len(binary[pivot + 1:]))\n\n        \n        hexadecimal = b''\n        for i in range(0, len(binary), 8):\n            byte = binary[i:i + 8]\n            byte = int(byte, 2)\n            hexadecimal += struct.pack('!B', byte)\n\n        self.length = len(hexadecimal)\n        super(BigInteger, self).write(ostream, kmip_version=kmip_version)\n        ostream.write(hexadecimal)", "docstring": "Write the encoding of the BigInteger to the output stream.\n\nArgs:\nostream (Stream): A buffer to contain the encoded bytes of a\nBigInteger object. Usually a BytearrayStream object.\nRequired.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def listen_forever(self, timeout_ms=30000, exception_handler=None,\n                       bad_sync_timeout=5):\n        \n        _bad_sync_timeout = bad_sync_timeout\n        self.should_listen = True\n        while (self.should_listen):\n            try:\n                self._sync(timeout_ms)\n                _bad_sync_timeout = bad_sync_timeout\n            \n            except MatrixRequestError as e:\n                logger.warning(\"A MatrixRequestError occured during sync.\")\n                if e.code >= 500:\n                    logger.warning(\"Problem occured serverside. Waiting %i seconds\",\n                                   bad_sync_timeout)\n                    sleep(bad_sync_timeout)\n                    _bad_sync_timeout = min(_bad_sync_timeout * 2,\n                                            self.bad_sync_timeout_limit)\n                elif exception_handler is not None:\n                    exception_handler(e)\n                else:\n                    raise\n            except Exception as e:\n                logger.exception(\"Exception thrown during sync\")\n                if exception_handler is not None:\n                    exception_handler(e)\n                else:\n                    raise", "docstring": "Keep listening for events forever.\n\nArgs:\ntimeout_ms (int): How long to poll the Home Server for before\nretrying.\nexception_handler (func(exception)): Optional exception handler\nfunction which can be used to handle exceptions in the caller\nthread.\nbad_sync_timeout (int): Base time to wait after an error before\nretrying. Will be increased according to exponential backoff.", "source": "juraj-google-style"}
{"code": "def write_updates_to_csv(self, updates):\n    with open(self._csv_file_name, 'w') as csvfile:\n        csvwriter = self.csv_writer(csvfile)\n        csvwriter.writerow(CSV_COLUMN_HEADERS)\n        for update in updates:\n            row = [update.name, update.current_version, update.new_version, update.prelease]\n            csvwriter.writerow(row)", "docstring": "Given a list of updates, write the updates out to the provided CSV\nfile.\n\nArgs:\nupdates (list): List of Update objects.", "source": "codesearchnet"}
{"code": "def convert_shape(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting shape ...')\n\n    def target_layer(x):\n        import tensorflow as tf\n        return tf.shape(x)\n    lambda_layer = keras.layers.Lambda(target_layer)\n    layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert shape operation.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def lengths_to_area_mask(feature_length, length, max_area_size):\n    paddings = tf.cast(tf.expand_dims(tf.logical_not(tf.sequence_mask(feature_length, maxlen=length)), 2), tf.float32)\n    (_, _, area_sum, _, _) = compute_area_features(paddings, max_area_width=max_area_size)\n    mask = tf.squeeze(tf.logical_not(tf.cast(area_sum, tf.bool)), [2])\n    return mask", "docstring": "Generates a non-padding mask for areas based on lengths.\n\nArgs:\nfeature_length: a tensor of [batch_size]\nlength: the length of the batch\nmax_area_size: the maximum area size considered\nReturns:\nmask: a tensor in shape of [batch_size, num_areas]", "source": "codesearchnet"}
{"code": "def single_node_env(args):\n  \n  \n  if isinstance(args, list):\n      sys.argv = args\n  elif args.argv:\n      sys.argv = args.argv\n\n  \n  num_gpus = args.num_gpus if 'num_gpus' in args else 1\n  util.single_node_env(num_gpus)", "docstring": "Sets up environment for a single-node TF session.\n\nArgs:\n:args: command line arguments as either argparse args or argv list", "source": "juraj-google-style"}
{"code": "def __init__(self, message, color, exc=None):\n        \n        super(Status, self).__init__()\n        self.msg = message\n        self.color = color\n        self.exc = exc", "docstring": "Initialize the exception.\n\nArgs:\nmessage: A six character status message to display on the terminal.\ncolor: An ANSI color code value to use while displaying the\nmessage.\nexc: An exception that caused the non-standard status message. If\nexc is supplied, it will be raised after the status message is\ndisplayed.", "source": "juraj-google-style"}
{"code": "def _binary_2d_label_to_2d_sparse_value(labels):\n    indices = []\n    values = []\n    batch = 0\n    for row in labels:\n        label = 0\n        xi = 0\n        for x in row:\n            if x == 1:\n                indices.append([batch, xi])\n                values.append(label)\n                xi += 1\n            else:\n                assert x == 0\n            label += 1\n        batch += 1\n    shape = [len(labels), len(labels[0])]\n    return sparse_tensor.SparseTensorValue(np.array(indices, np.int64), np.array(values, np.int64), np.array(shape, np.int64))", "docstring": "Convert dense 2D binary indicator to sparse ID.\n\nOnly 1 values in `labels` are included in result.\n\nArgs:\nlabels: Dense 2D binary indicator, shape [batch_size, num_classes].\n\nReturns:\n`SparseTensorValue` of shape [batch_size, num_classes], where num_classes\nis the number of `1` values in each row of `labels`. Values are indices\nof `1` values along the last dimension of `labels`.", "source": "github-repos"}
{"code": "def parse_original_feature_from_example(example, feature_name):\n    feature = get_example_features(example)[feature_name]\n    feature_type = feature.WhichOneof('kind')\n    original_value = proto_value_for_feature(example, feature_name)\n    return OriginalFeatureList(feature_name, original_value, feature_type)", "docstring": "Returns an `OriginalFeatureList` for the specified feature_name.\n\nArgs:\nexample: An example.\nfeature_name: A string feature name.\n\nReturns:\nA filled in `OriginalFeatureList` object representing the feature.", "source": "codesearchnet"}
{"code": "def _deconstruct_single_qubit_matrix_into_gate_turns(mat: np.ndarray) -> Tuple[(float, float, float)]:\n    (pre_phase, rotation, post_phase) = linalg.deconstruct_single_qubit_matrix_into_angles(mat)\n    tau = (2 * np.pi)\n    xy_turn = (rotation / tau)\n    xy_phase_turn = (0.25 - (pre_phase / tau))\n    total_z_turn = ((post_phase + pre_phase) / tau)\n    return (_signed_mod_1(xy_turn), _signed_mod_1(xy_phase_turn), _signed_mod_1(total_z_turn))", "docstring": "Breaks down a 2x2 unitary into gate parameters.\n\nArgs:\nmat: The 2x2 unitary matrix to break down.\n\nReturns:\nA tuple containing the amount to rotate around an XY axis, the phase of\nthat axis, and the amount to phase around Z. All results will be in\nfractions of a whole turn, with values canonicalized into the range\n[-0.5, 0.5).", "source": "codesearchnet"}
{"code": "def expand_groups(grp):\n    \n    p = re.compile(r\"(?P<name>.+)\\[(?P<start>\\d+)-(?P<end>\\d+)\\]\")\n    m = p.match(grp)\n    if m is not None:\n        s = int(m.group('start'))\n        e = int(m.group('end'))\n        n = m.group('name')\n        return list(map(lambda x: n + str(x), range(s, e + 1)))\n    else:\n        return [grp]", "docstring": "Expand group names.\n\nArgs:\ngrp (string): group names to expand\n\nReturns:\nlist of groups\n\nExamples:\n\n* grp[1-3] will be expanded to [grp1, grp2, grp3]\n* grp1 will be expanded to [grp1]", "source": "juraj-google-style"}
{"code": "def setup_callbacks(self, callbacks, monitors):\n        \n        assert isinstance(callbacks, list), callbacks\n        assert isinstance(monitors, list), monitors\n        describe_trainable_vars()   \n\n        self.register_callback(MaintainStepCounter())\n        for cb in callbacks:\n            self.register_callback(cb)\n        for cb in self._callbacks:\n            assert not isinstance(cb, MonitorBase), \"Monitor cannot be pre-registered for now!\"\n        registered_monitors = []\n        for m in monitors:\n            if self.register_callback(m):\n                registered_monitors.append(m)\n        self.monitors = Monitors(registered_monitors)\n        self.register_callback(self.monitors)   \n\n        \n        logger.info(\"Setup callbacks graph ...\")\n        self._callbacks = Callbacks(self._callbacks)\n        self._callbacks.setup_graph(weakref.proxy(self))", "docstring": "Setup callbacks and monitors. Must be called after the main graph is built.\n\nArgs:\ncallbacks ([Callback]):\nmonitors ([MonitorBase]):", "source": "juraj-google-style"}
{"code": "def __init__(self, location, field_type):\n        \n        super(OutputContextField, self).__init__(location, field_type)\n        self.location = location\n        self.field_type = field_type\n        self.validate()", "docstring": "Construct a new OutputContextField object for the field at the given location.\n\nArgs:\nlocation: Location, specifying where the field was declared. The Location\nmust point to a property, and that property's value is output as the result.\nfield_type: GraphQL type object, specifying the type of the field being output\n\nReturns:\nnew OutputContextField object", "source": "juraj-google-style"}
{"code": "def language_from_str(language_def, metamodel):\n    if (type(language_def) is not text):\n        raise TextXError('textX accepts only unicode strings.')\n    if metamodel.debug:\n        metamodel.dprint('*** PARSING LANGUAGE DEFINITION ***')\n    if (metamodel.debug in textX_parsers):\n        parser = textX_parsers[metamodel.debug]\n    else:\n        parser = ParserPython(textx_model, comment_def=comment, ignore_case=False, reduce_tree=False, memoization=metamodel.memoization, debug=metamodel.debug, file=metamodel.file)\n        textX_parsers[metamodel.debug] = parser\n    try:\n        parse_tree = parser.parse(language_def)\n    except NoMatch as e:\n        (line, col) = parser.pos_to_linecol(e.position)\n        raise TextXSyntaxError(text(e), line, col)\n    lang_parser = visit_parse_tree(parse_tree, TextXVisitor(parser, metamodel))\n    metamodel.validate()\n    lang_parser.metamodel = metamodel\n    metamodel._parser_blueprint = lang_parser\n    if metamodel.debug:\n        PMDOTExporter().exportFile(lang_parser.parser_model, '{}_parser_model.dot'.format(metamodel.rootcls.__name__))\n    return lang_parser", "docstring": "Constructs parser and initializes metamodel from language description\ngiven in textX language.\n\nArgs:\nlanguage_def (str): A language description in textX.\nmetamodel (TextXMetaModel): A metamodel to initialize.\n\nReturns:\nParser for the new language.", "source": "codesearchnet"}
{"code": "def _preprocess_grad(grad, body_graph_output, while_op_input, while_op_output):\n    if not _is_trainable(body_graph_output):\n        return None\n    if while_op_output.dtype in (dtypes.resource, dtypes.variant) and default_gradient.supports_default_grad(while_op_input) and (grad is None):\n        return _zeros_like(while_op_input, while_op_output)\n    if isinstance(grad, indexed_slices.IndexedSlices):\n        return ops.convert_to_tensor(grad)\n    return grad", "docstring": "Returns the initial gradient to be used for a given output tensor.\n\nArgs:\ngrad: the original gradient Tensor passed to the gradient function.\nbody_graph_output: the corresponding Tensor in the body graph.\nwhile_op_input: the corresponding Tensor input of the While op.\nwhile_op_output: the corresponding Tensor output of the While op.\n\nReturns:\nA Tensor or None.", "source": "github-repos"}
{"code": "def _expand_value_set_url_using_service(self, value_set_url: str, value_set_version: Optional[str], terminology_service_url: str, auth: Optional[Union[Tuple[str, str], str]]) -> value_set_pb2.ValueSet:\n    params = {'url': value_set_url}\n    if value_set_version is not None:\n        params['valueSetVersion'] = value_set_version\n    session_ = self.create_session()\n    session_.headers.update({'Accept': 'application/json'})\n    if auth is not None:\n        if isinstance(auth, tuple) and len(auth) == 2:\n            logging.debug('Using Basic auth for auth')\n            session_.auth = auth\n        else:\n            logging.debug('Using Bearer token for auth')\n            session_.headers['Authorization'] = auth\n    logging.info('Expanding value set url: %s version: %s using terminology service: %s', value_set_url, value_set_version, terminology_service_url)\n    with session_ as session:\n\n        def request_func(offset: int) -> requests.Response:\n            return session.get(terminology_service_url, params={'offset': offset, **params})\n        expanded_value_set = _paginate_expand_value_set_request(request_func, value_set_url, value_set_version)\n    logging.info('Retrieved %d codes for value set url: %s version: %s using terminology service: %s', len(expanded_value_set.expansion.contains), value_set_url, value_set_version, terminology_service_url)\n    return expanded_value_set", "docstring": "Expands the value set using the requested terminology service.\n\nRequests an expansion of the value set from the terminology\nserver at `terminology_service_url` for the given URL and version.\n\nArgs:\nvalue_set_url: The url of the value set to expand.\nvalue_set_version: The version of the value set to retrieve or None for\nthe latest version.\nterminology_service_url: The url of the terminology service to use when\nexpanding `value_set_url`.\nauth: A tuple of (user_name, password) to use when performing basic auth\nwith the terminology service or a singular token added to the\nAuthorization header or None if no authentication is required.\n\nReturns:\nThe current definition of the value set from the server with its expanded\ncodes present.", "source": "github-repos"}
{"code": "def __init__(self, name) -> None:\n    if not isinstance(name, str):\n        raise ValueError('name for name_scope must be a string.')\n    self._name = name\n    self._exit_fns = []", "docstring": "Initialize the context manager.\n\nArgs:\nname: The prefix to use on all names created within the name scope.\n\nRaises:\nValueError: If name is not a string.", "source": "github-repos"}
{"code": "def _map_captures_to_created_tensors(original_captures, tensor_map, function):\n    export_captures = []\n    for exterior, interior in original_captures:\n        mapped_resource = tensor_map.get(exterior, None)\n        if mapped_resource is None:\n            _raise_untracked_capture_error(function.name, exterior, interior)\n        export_captures.append(mapped_resource)\n    return export_captures", "docstring": "Maps eager tensors captured by a function to Graph resources for export.\n\nArgs:\noriginal_captures: A dictionary mapping from tensors captured by the\nfunction to interior placeholders for those tensors (inside the function\nbody).\ntensor_map: A dictionary mapping from resource tensors owned by the eager\ncontext to resource tensors in the exported graph.\nfunction: Function with the original captures. Only used when raising the\nAssertionError.\n\nReturns:\nA list of stand-in tensors which belong to the exported graph, corresponding\nto the function's captures.\n\nRaises:\nAssertionError: If the function references a resource which is not part of\n`tensor_map`.", "source": "github-repos"}
{"code": "def from_json(cls, data):\n    assert ('data_type' in data), 'Required keyword \"data_type\" is missing!'\n    keys = ('data_type', 'unit', 'analysis_period', 'metadata')\n    for key in keys:\n        if (key not in data):\n            data[key] = None\n    data_type = DataTypeBase.from_json(data['data_type'])\n    ap = AnalysisPeriod.from_json(data['analysis_period'])\n    return cls(data_type, data['unit'], ap, data['metadata'])", "docstring": "Create a header from a dictionary.\n\nArgs:\ndata: {\n\"data_type\": {}, //Type of data (e.g. Temperature)\n\"unit\": string,\n\"analysis_period\": {} // A Ladybug AnalysisPeriod\n\"metadata\": {}, // A dictionary of metadata\n}", "source": "codesearchnet"}
{"code": "def actnorm_3d(name, x, logscale_factor=3.):\n  \n  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n    x = tf.unstack(x, axis=1)\n    x_normed = []\n    for ind, x_step in enumerate(x):\n      x_step, _ = actnorm(\"actnorm_%d\" % ind, x_step,\n                          logscale_factor=logscale_factor)\n      x_normed.append(x_step)\n    return tf.stack(x_normed, axis=1), None", "docstring": "Applies actnorm to each time-step independently.\n\nThere are a total of 2*n_channels*n_steps parameters learnt.\n\nArgs:\nname: variable scope.\nx: 5-D Tensor, (NTHWC)\nlogscale_factor: Increases the learning rate of the scale by\nlogscale_factor.\nReturns:\nx: 5-D Tensor, (NTHWC) with the per-timestep, per-channel normalization.", "source": "juraj-google-style"}
{"code": "def run_config(self, project, run=None, entity=None):\n    query = gql('\\n        query Model($name: String!, $entity: String!, $run: String!) {\\n            model(name: $name, entityName: $entity) {\\n                bucket(name: $run) {\\n                    config\\n                    commit\\n                    patch\\n                    files(names: [\"wandb-metadata.json\"]) {\\n                        edges {\\n                            node {\\n                                url\\n                            }\\n                        }\\n                    }\\n                }\\n            }\\n        }\\n        ')\n    response = self.gql(query, variable_values={'name': project, 'run': run, 'entity': entity})\n    if (response['model'] == None):\n        raise ValueError('Run {}/{}/{} not found'.format(entity, project, run))\n    run = response['model']['bucket']\n    commit = run['commit']\n    patch = run['patch']\n    config = json.loads((run['config'] or '{}'))\n    if (len(run['files']['edges']) > 0):\n        url = run['files']['edges'][0]['node']['url']\n        res = requests.get(url)\n        res.raise_for_status()\n        metadata = res.json()\n    else:\n        metadata = {}\n    return (commit, config, patch, metadata)", "docstring": "Get the relevant configs for a run\n\nArgs:\nproject (str): The project to download, (can include bucket)\nrun (str, optional): The run to download\nentity (str, optional): The entity to scope this project to.", "source": "codesearchnet"}
{"code": "def release(self, force=False):\n        \n        \n        if not self.islocked:\n            return\n\n        if self.owned_by_self or force:\n            os.remove(self.path)\n        else:\n            raise UnableToReleaseLockError(self)", "docstring": "Release lock.\n\nTo release a lock, we must already own the lock.\n\nArguments:\nforce (bool, optional): If true, ignore any existing lock owner.\n\nRaises:\nUnableToReleaseLockError: If the lock is claimed by another\nprocess (not raised if force option is used).", "source": "juraj-google-style"}
{"code": "def eps(self, nodeids=None):\n        \n        if nodeids is None: nodeids = self._nodeids\n        _eps = self._eps\n        return [_eps[nodeid] for nodeid in nodeids]", "docstring": "Return the EPs with the given *nodeid*, or all EPs.\n\nArgs:\nnodeids: an iterable of nodeids of EPs to return; if\n`None`, return all EPs", "source": "juraj-google-style"}
{"code": "def emit(signal, *args, **kwargs):\n    \n    if signal not in __receivers:\n        return\n\n    receivers = __live_receivers(signal)\n\n    for func in receivers:\n        func(*args, **kwargs)", "docstring": "Emit a signal by serially calling each registered signal receiver for\nthe `signal`.\n\nNote:\nThe receiver must accept the *args and/or **kwargs that have been\npassed to it. There expected parameters are not dictated by\nmixbox.\n\nArgs:\nsignal: A signal identifier or name.\n*args: A variable-length argument list to pass to the receiver.\n**kwargs: Keyword-arguments to pass to the receiver.", "source": "juraj-google-style"}
{"code": "def distance_similarity(a, b, p, T=CLOSE_DISTANCE_THRESHOLD):\n    d = distance_to_line(a, b, p)\n    r = ((((- 1) / float(T)) * abs(d)) + 1)\n    return (r if (r > 0) else 0)", "docstring": "Computes the distance similarity between a line segment\nand a point\n\nArgs:\na ([float, float]): x and y coordinates. Line start\nb ([float, float]): x and y coordinates. Line end\np ([float, float]): x and y coordinates. Point to compute the distance\nReturns:\nfloat: between 0 and 1. Where 1 is very similar and 0 is completely different", "source": "codesearchnet"}
{"code": "def tables_list(self, dataset_name, max_results=0, page_token=None):\n    url = (Api._ENDPOINT + (Api._TABLES_PATH % (dataset_name.project_id, dataset_name.dataset_id, '', '')))\n    args = {}\n    if (max_results != 0):\n        args['maxResults'] = max_results\n    if (page_token is not None):\n        args['pageToken'] = page_token\n    return google.datalab.utils.Http.request(url, args=args, credentials=self.credentials)", "docstring": "Issues a request to retrieve a list of tables.\n\nArgs:\ndataset_name: the name of the dataset to enumerate.\nmax_results: an optional maximum number of tables to retrieve.\npage_token: an optional token to continue the retrieval.\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "codesearchnet"}
{"code": "def keras_model_to_graph_def(keras_layer):\n    input_to_layer = {}\n    model_name_to_output = {}\n    g = GraphDef()\n    prev_node_name = None\n    for (name_scope, layer) in _walk_layers(keras_layer):\n        if _is_model(layer):\n            (input_to_layer, model_name_to_output, prev_node_name) = _update_dicts(name_scope, layer, input_to_layer, model_name_to_output, prev_node_name)\n            continue\n        layer_config = layer.get('config')\n        node_name = _scoped_name(name_scope, layer_config.get('name'))\n        node_def = g.node.add()\n        node_def.name = node_name\n        if (layer.get('class_name') is not None):\n            keras_cls_name = layer.get('class_name').encode('ascii')\n            node_def.attr['keras_class'].s = keras_cls_name\n        if (layer_config.get('dtype') is not None):\n            tf_dtype = dtypes.as_dtype(layer_config.get('dtype'))\n            node_def.attr['dtype'].type = tf_dtype.as_datatype_enum\n        if (layer.get('inbound_nodes') is not None):\n            for maybe_inbound_node in layer.get('inbound_nodes'):\n                inbound_nodes = _norm_to_list_of_layers(maybe_inbound_node)\n                for [name, size, index, _] in inbound_nodes:\n                    inbound_name = _scoped_name(name_scope, name)\n                    inbound_node_names = model_name_to_output.get(inbound_name, [inbound_name])\n                    node_def.input.append(inbound_node_names[index])\n        elif (prev_node_name is not None):\n            node_def.input.append(prev_node_name)\n        if (node_name in input_to_layer):\n            node_def.input.append(input_to_layer.get(node_name))\n        prev_node_name = node_def.name\n    return g", "docstring": "Returns a GraphDef representation of the Keras model in a dict form.\n\nNote that it only supports models that implemented to_json().\n\nArgs:\nkeras_layer: A dict from Keras model.to_json().\n\nReturns:\nA GraphDef representation of the layers in the model.", "source": "codesearchnet"}
{"code": "def copy_to(self, new_key, bucket=None):\n    if (bucket is None):\n        bucket = self._bucket\n    try:\n        new_info = self._api.objects_copy(self._bucket, self._key, bucket, new_key)\n    except Exception as e:\n        raise e\n    return Item(bucket, new_key, new_info, context=self._context)", "docstring": "Copies this item to the specified new key.\n\nArgs:\nnew_key: the new key to copy this item to.\nbucket: the bucket of the new item; if None (the default) use the same bucket.\nReturns:\nAn Item corresponding to new key.\nRaises:\nException if there was an error copying the item.", "source": "codesearchnet"}
{"code": "def get_payments(self):\n    query = '\\n          query {\\n            user {\\n              payments {\\n                nmrAmount\\n                round {\\n                  number\\n                  openTime\\n                  resolveTime\\n                  resolvedGeneral\\n                  resolvedStaking\\n                }\\n                tournament\\n                usdAmount\\n              }\\n            }\\n          }\\n        '\n    data = self.raw_query(query, authorization=True)['data']\n    payments = data['user']['payments']\n    for p in payments:\n        utils.replace(p['round'], 'openTime', utils.parse_datetime_string)\n        utils.replace(p['round'], 'resolveTime', utils.parse_datetime_string)\n        utils.replace(p, 'usdAmount', utils.parse_float_string)\n        utils.replace(p, 'nmrAmount', utils.parse_float_string)\n    return payments", "docstring": "Get all your payments.\n\nReturns:\nlist of dicts: payments\n\nFor each payout in the list, a dict contains the following items:\n\n* nmrAmount (`decimal.Decimal`)\n* usdAmount (`decimal.Decimal`)\n* tournament (`str`)\n* round (`dict`)\n* number (`int`)\n* openTime (`datetime`)\n* resolveTime (`datetime`)\n* resolvedGeneral (`bool`)\n* resolvedStaking (`bool`)\n\nExample:\n>>> api = NumerAPI(secret_key=\"..\", public_id=\"..\")\n>>> api.get_payments()\n[{'nmrAmount': Decimal('0.00'),\n'round': {'number': 84,\n'openTime': datetime.datetime(2017, 12, 2, 18, 0, tzinfo=tzutc()),\n'resolveTime': datetime.datetime(2018, 1, 1, 18, 0, tzinfo=tzutc()),\n'resolvedGeneral': True,\n'resolvedStaking': True},\n'tournament': 'staking',\n'usdAmount': Decimal('17.44')},\n...\n]", "source": "codesearchnet"}
{"code": "def split_heads(self, x):\n    with tf.name_scope('split_heads'):\n        batch_size = tf.shape(x)[0]\n        length = tf.shape(x)[1]\n        depth = (self.hidden_size \n        x = tf.reshape(x, [batch_size, length, self.num_heads, depth])\n        return tf.transpose(x, [0, 2, 1, 3])", "docstring": "Split x into different heads, and transpose the resulting value.\n\nThe tensor is transposed to insure the inner dimensions hold the correct\nvalues during the matrix multiplication.\n\nArgs:\nx: A tensor with shape [batch_size, length, hidden_size]\n\nReturns:\nA tensor with shape [batch_size, num_heads, length, hidden_size/num_heads]", "source": "codesearchnet"}
{"code": "def List(self, request, global_params=None):\n    config = self.GetMethodConfig('List')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Retrieves table data from a specified set of rows. Requires the READER dataset role.\n\nArgs:\nrequest: (BigqueryTabledataListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(TableDataList) The response message.", "source": "github-repos"}
{"code": "def render(self, bindings):\n        \n        out = []\n        binding = False\n        for segment in self.segments:\n            if segment.kind == _BINDING:\n                if segment.literal not in bindings:\n                    raise ValidationException(\n                        ('rendering error: value for key \\'{}\\' '\n                         'not provided').format(segment.literal))\n                out.extend(PathTemplate(bindings[segment.literal]).segments)\n                binding = True\n            elif segment.kind == _END_BINDING:\n                binding = False\n            else:\n                if binding:\n                    continue\n                out.append(segment)\n        path = _format(out)\n        self.match(path)\n        return path", "docstring": "Renders a string from a path template using the provided bindings.\n\nArgs:\nbindings (dict): A dictionary of var names to binding strings.\n\nReturns:\nstr: The rendered instantiation of this path template.\n\nRaises:\nValidationError: If a key isn't provided or if a sub-template can't\nbe parsed.", "source": "juraj-google-style"}
{"code": "def assert_lessthan(arr_test, arr_max, msg=''):\n    r\n    if util_arg.NO_ASSERTS:\n        return\n    arr1 = np.array(arr_test)\n    arr2 = np.array(arr_max)\n    error = arr_max - arr_test\n    passed = error >= 0\n    if not np.all(passed):\n        failed_xs = np.where(np.logical_not(passed))\n        failed_error = error.take(failed_xs)\n        failed_arr_test = arr1.take(failed_xs)\n        failed_arr_target = arr2.take(failed_xs)\n\n        msg_list = [\n            'FAILED ASSERT LESSTHAN',\n            msg,\n            '  * failed_xs = %r' % (failed_xs,),\n            '  * failed_error = %r' % (failed_error,),\n            '  * failed_arr_test   = %r' % (failed_arr_test,),\n            '  * failed_arr_target = %r' % (failed_arr_target,),\n        ]\n        msg = '\\n'.join(msg_list)\n        raise AssertionError(msg)\n    return error", "docstring": "r\"\"\"\nArgs:\narr_test (ndarray or list):\narr_target (ndarray or list):\nthresh (scalar or ndarray or list):", "source": "juraj-google-style"}
{"code": "def _init_net_specs(conf):\n        \n        for net_name, net_spec in conf.get('nets', {}).items():\n            net_spec['name'] = net_name\n            net_spec['mapping'] = {}\n            net_spec.setdefault('type', 'nat')\n\n        return conf", "docstring": "Given a configuration specification, initializes all the net\ndefinitions in it so they can be used comfortably\n\nArgs:\nconf (dict): Configuration specification\n\nReturns:\ndict: the adapted new conf", "source": "juraj-google-style"}
{"code": "def ResourcePath(package_name, filepath):\n    if (not getattr(sys, 'frozen', None)):\n        target = _GetPkgResources(package_name, filepath)\n        if (target and os.access(target, os.R_OK)):\n            return target\n    target = os.path.join(sys.prefix, filepath)\n    if (target and os.access(target, os.R_OK)):\n        return target\n    return None", "docstring": "Computes a path to the specified package resource.\n\nArgs:\npackage_name: A name of the package where the resource is located.\nfilepath: A path to the resource relative to the package location.\n\nReturns:\nA path to the resource or `None` if the resource cannot be found.", "source": "codesearchnet"}
{"code": "def resolution(self, indicator=None):\n        \n        self._request_entity = 'dnsResolution'\n        self._request_uri = '{}/dnsResolutions'.format(self._request_uri)\n        if indicator is not None:\n            self._request_uri = '{}/{}/dnsResolutions'.format(self._api_uri, indicator)", "docstring": "Update the URI to retrieve host resolutions for the provided indicator.\n\nArgs:\nindicator (string): The indicator to retrieve resolutions.", "source": "juraj-google-style"}
{"code": "def repay_funding(self, amount, currency):\n    params = {'amount': amount, 'currency': currency}\n    return self._send_message('post', '/funding/repay', data=json.dumps(params))", "docstring": "Repay funding. Repays the older funding records first.\n\nArgs:\namount (int): Amount of currency to repay\ncurrency (str): The currency, example USD\n\nReturns:\nNot specified by cbpro.", "source": "codesearchnet"}
{"code": "def get_usedby_and_readonly(self, id):\n        \n        uri = self.URI + \"/\" + id + \"/usedby/readonly\"\n        return self._client.get(uri)", "docstring": "Gets the build plans details os teh selected plan script as per the selected attributes.\n\nArgs:\nid: ID of the Plan Script.\n\nReturns:\narray of build plans", "source": "juraj-google-style"}
{"code": "def __init__(self, value, method=Method.PREFIX):\n        \n        self.value = value\n        self.method = method", "docstring": "Init method.\n\nArgs:\nvalue (str): value to match.\nmethod (const): Method constant, matching method.", "source": "juraj-google-style"}
{"code": "def getTokensForText(self, body, POStags=None):\n        \n        return self._text.getTokensForText(self._retina, body, POStags)", "docstring": "Get tokenized input text\nArgs:\nbody, str: The text to be tokenized (required)\nPOStags, str: Specify desired POS types (optional)\nReturns:\nlist of str\nRaises:\nCorticalioException: if the request was not successful", "source": "juraj-google-style"}
{"code": "def macro_tpm(self, micro_tpm, check_independence=True):\n    if (not is_state_by_state(micro_tpm)):\n        micro_tpm = convert.state_by_node2state_by_state(micro_tpm)\n    macro_tpm = self.macro_tpm_sbs(micro_tpm)\n    if check_independence:\n        validate.conditionally_independent(macro_tpm)\n    return convert.state_by_state2state_by_node(macro_tpm)", "docstring": "Create a coarse-grained macro TPM.\n\nArgs:\nmicro_tpm (nd.array): The TPM of the micro-system.\ncheck_independence (bool): Whether to check that the macro TPM is\nconditionally independent.\n\nRaises:\nConditionallyDependentError: If ``check_independence`` is ``True``\nand the macro TPM is not conditionally independent.\n\nReturns:\nnp.ndarray: The state-by-node TPM of the macro-system.", "source": "codesearchnet"}
{"code": "def quarter_boundaries(quarter):\n    \n    year, quarter = quarter.split('Q')\n    year = int(year)\n    quarter = int(quarter)\n    first_month_of_quarter = 3 * quarter - 2\n    last_month_of_quarter = 3 * quarter\n    first_day = date(year, first_month_of_quarter, 1)\n    last_day = date(year, last_month_of_quarter, monthrange(year, last_month_of_quarter)[1])\n    return first_day, last_day", "docstring": "Returns first and last day of a quarter\n\nArgs:\nquarter (str) quarter, in format '2015Q1'\n\nReturns: (tuple) datetime.dates for the first and last days of the quarter", "source": "juraj-google-style"}
{"code": "def handle_error(self, item_session: ItemSession, error: BaseException) -> Actions:\n    if ((not self._ssl_verification) and isinstance(error, SSLVerificationError)):\n        self._statistics.increment_error(ProtocolError())\n    else:\n        self._statistics.increment_error(error)\n    self._waiter.increment()\n    action = self.consult_error_hook(item_session, error)\n    if (action == Actions.RETRY):\n        item_session.set_status(Status.error)\n    elif (action == Actions.FINISH):\n        item_session.set_status(Status.done)\n    elif (action == Actions.STOP):\n        raise HookStop('Script requested immediate stop.')\n    elif (self._ssl_verification and isinstance(error, SSLVerificationError)):\n        raise\n    elif (isinstance(error, ConnectionRefused) and (not self.retry_connrefused)):\n        item_session.set_status(Status.skipped)\n    elif (isinstance(error, DNSNotFound) and (not self.retry_dns_error)):\n        item_session.set_status(Status.skipped)\n    else:\n        item_session.set_status(Status.error)\n    return action", "docstring": "Process an error.\n\nReturns:\nA value from :class:`.hook.Actions`.", "source": "codesearchnet"}
{"code": "def _ParseInfo2Record(self, parser_mediator, file_object, record_offset, record_size):\n    record_data = self._ReadData(file_object, record_offset, record_size)\n    record_map = self._GetDataTypeMap('recycler_info2_file_entry')\n    try:\n        record = self._ReadStructureFromByteStream(record_data, record_offset, record_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to map record data at offset: 0x{0:08x} with error: {1!s}'.format(record_offset, exception))\n    codepage = (parser_mediator.codepage or 'ascii')\n    ascii_filename = record.original_filename.split(b'\\x00')[0]\n    try:\n        ascii_filename = ascii_filename.decode(codepage)\n    except UnicodeDecodeError:\n        ascii_filename = ascii_filename.decode(codepage, errors='replace')\n        parser_mediator.ProduceExtractionWarning('unable to decode original filename.')\n    unicode_filename = None\n    if (record_size > 280):\n        record_offset += 280\n        utf16_string_map = self._GetDataTypeMap('recycler_info2_file_entry_utf16le_string')\n        try:\n            unicode_filename = self._ReadStructureFromByteStream(record_data[280:], record_offset, utf16_string_map)\n        except (ValueError, errors.ParseError) as exception:\n            raise errors.ParseError('Unable to map record data at offset: 0x{0:08x} with error: {1!s}'.format(record_offset, exception))\n        unicode_filename = unicode_filename.rstrip('\\x00')\n    if (record.deletion_time == 0):\n        date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n    else:\n        date_time = dfdatetime_filetime.Filetime(timestamp=record.deletion_time)\n    event_data = WinRecycleBinEventData()\n    event_data.drive_number = record.drive_number\n    event_data.original_filename = (unicode_filename or ascii_filename)\n    event_data.file_size = record.original_file_size\n    event_data.offset = record_offset\n    event_data.record_index = record.index\n    if (ascii_filename != unicode_filename):\n        event_data.short_filename = ascii_filename\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_DELETED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an INFO-2 record.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\nrecord_offset (int): record offset.\nrecord_size (int): record size.\n\nRaises:\nParseError: if the record cannot be read.", "source": "codesearchnet"}
{"code": "def reply(self, status=200, new_response=False, **kw):\n    res = (Response(**kw) if new_response else self._response)\n    res.status((status or res._status))\n    res.mock = self\n    self._response = res\n    return res", "docstring": "Defines the mock response.\n\nArguments:\nstatus (int, optional): response status code. Defaults to ``200``.\n**kw (dict): optional keyword arguments passed to ``pook.Response``\nconstructor.\n\nReturns:\npook.Response: mock response definition instance.", "source": "codesearchnet"}
{"code": "def terminate(self, task_type, task_id):\n    with self._process_lock:\n        p = self._processes.get((task_type, task_id), None)\n        if p is None:\n            raise ValueError('{}-{} does not exist'.format(task_type, task_id))\n        self._terminated.add((task_type, task_id))\n        self._parent_to_sub_queue.put('terminate {} {}'.format(task_type, task_id))\n        p.join()", "docstring": "Terminates the process with `task_type` and `task_id`.\n\nIf auto_retart=True, the terminated task will be restarted unless the chief\nhas already exited with zero exit code.\n\nArgs:\ntask_type: the task type.\ntask_id: the task id.", "source": "github-repos"}
{"code": "def print_serial_number_info(self, serial_number, print_to_screen=True):\n    r = self.select_serial_number_row(serial_number)\n    if r.empty:\n        warnings.warn('missing serial number')\n        return\n    txt1 = (80 * '=')\n    txt1 += '\\n'\n    txt1 += f\n    txt1 = (80 * '-')\n    txt1 += '\\n'\n    txt2 = ''\n    for (label, value) in zip(r.columns, r.values[0]):\n        if (label in self.headers):\n            txt1 += f\n        else:\n            txt2 += f\n    if print_to_screen:\n        print(txt1)\n        print((80 * '-'))\n        print(txt2)\n        print((80 * '='))\n        return\n    else:\n        return txt1", "docstring": "Print information about the run.\n\nArgs:\nserial_number: serial number.\nprint_to_screen: runs the print statement if True,\nreturns txt if not.\n\nReturns:\ntxt if print_to_screen is False, else None.", "source": "codesearchnet"}
{"code": "def configure_app(app, config=None, config_obj=None):\n    \n    app.config.from_object(config_obj or BaseConfig)\n    if config is not None:\n        app.config.from_pyfile(config)", "docstring": "Configure application instance.\n\nArgs:\napp (Flask): initialized Flask app instance\nconfig (Optional[path]): path to a Python module config file\nconfig_obj (Optional[class]): Python config object", "source": "juraj-google-style"}
{"code": "def GetStream(data=None):\n        \n        if len(__mstreams_available__) == 0:\n            if data:\n                mstream = MemoryStream(data)\n                mstream.seek(0)\n            else:\n                mstream = MemoryStream()\n            __mstreams__.append(mstream)\n            return mstream\n\n        mstream = __mstreams_available__.pop()\n\n        if data is not None and len(data):\n            mstream.Cleanup()\n            mstream.write(data)\n\n        mstream.seek(0)\n\n        return mstream", "docstring": "Get a MemoryStream instance.\n\nArgs:\ndata (bytes, bytearray, BytesIO): (Optional) data to create the stream from.\n\nReturns:\nMemoryStream: instance.", "source": "juraj-google-style"}
{"code": "def _create_or_validate_filenames_dataset(filenames, name=None):\n    if isinstance(filenames, data_types.DatasetV2):\n        element_type = dataset_ops.get_legacy_output_types(filenames)\n        if element_type != dtypes.string:\n            raise TypeError(f'The `filenames` argument must contain `tf.string` elements. Got a dataset of `{element_type!r}` elements.')\n        element_shape = dataset_ops.get_legacy_output_shapes(filenames)\n        if not element_shape.is_compatible_with(tensor_shape.TensorShape([])):\n            raise TypeError(f'The `filenames` argument must contain `tf.string` elements of shape [] (i.e. scalars). Got a dataset of element shape {element_shape!r}.')\n    else:\n        filenames = nest.map_structure(_normalise_fspath, filenames)\n        filenames = ops.convert_to_tensor(filenames, dtype_hint=dtypes.string)\n        if filenames.dtype != dtypes.string:\n            raise TypeError(f'The `filenames` argument must contain `tf.string` elements. Got `{filenames.dtype!r}` elements.')\n        filenames = array_ops.reshape(filenames, [-1], name='flat_filenames')\n        filenames = from_tensor_slices_op._TensorSliceDataset(filenames, is_files=True, name=name)\n    return filenames", "docstring": "Creates (or validates) a dataset of filenames.\n\nArgs:\nfilenames: Either a list or dataset of filenames. If it is a list, it is\nconvert to a dataset. If it is a dataset, its type and shape is validated.\nname: (Optional.) A name for the tf.data operation.\n\nReturns:\nA dataset of filenames.", "source": "github-repos"}
{"code": "def onehot_encode(dataset, char_indices, maxlen):\n    X = np.zeros((len(dataset), maxlen, len(char_indices.keys())))\n    for (i, sentence) in enumerate(dataset):\n        for (t, char) in enumerate(sentence):\n            X[(i, t, char_indices[char])] = 1\n    return X", "docstring": "One hot encode the tokens\n\nArgs:\ndataset  list of lists of tokens\nchar_indices  dictionary of {key=character, value=index to use encoding vector}\nmaxlen  int  Length of each sample\nReturn:\nnp array of shape (samples, tokens, encoding length)", "source": "codesearchnet"}
{"code": "def _ConditionalFormatMessages(self, event_values):\n    \n    \n    \n    string_pieces = []\n    for map_index, attribute_name in enumerate(self._format_string_pieces_map):\n      if not attribute_name or attribute_name in event_values:\n        if attribute_name:\n          attribute = event_values.get(attribute_name, None)\n          \n          \n          \n          \n          if (not isinstance(attribute, (bool, float)) and\n              not isinstance(attribute, py2to3.INTEGER_TYPES) and\n              not attribute):\n            continue\n\n        string_pieces.append(self.FORMAT_STRING_PIECES[map_index])\n\n    format_string = self.FORMAT_STRING_SEPARATOR.join(string_pieces)\n\n    string_pieces = []\n    for map_index, attribute_name in enumerate(\n        self._format_string_short_pieces_map):\n      if not attribute_name or event_values.get(attribute_name, None):\n        string_pieces.append(self.FORMAT_STRING_SHORT_PIECES[map_index])\n    short_format_string = self.FORMAT_STRING_SEPARATOR.join(string_pieces)\n\n    return self._FormatMessages(\n        format_string, short_format_string, event_values)", "docstring": "Determines the conditional formatted message strings.\n\nArgs:\nevent_values (dict[str, object]): event values.\n\nReturns:\ntuple(str, str): formatted message string and short message string.", "source": "juraj-google-style"}
{"code": "def add_property_orders(query_proto, *orders):\n    for order in orders:\n        proto = query_proto.order.add()\n        if (order[0] == '-'):\n            order = order[1:]\n            proto.direction = query_pb2.PropertyOrder.DESCENDING\n        else:\n            proto.direction = query_pb2.PropertyOrder.ASCENDING\n        proto.property.name = order", "docstring": "Add ordering constraint for the given datastore.Query proto message.\n\nArgs:\nquery_proto: datastore.Query proto message.\norders: list of propertype name string, default to ascending\norder and set descending if prefixed by '-'.\n\nUsage:\n>>> add_property_orders(query_proto, 'foo')  # sort by foo asc\n>>> add_property_orders(query_proto, '-bar')  # sort by bar desc", "source": "codesearchnet"}
{"code": "def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:\n    try:\n        from pycocotools import mask as coco_mask\n    except ImportError:\n        raise ImportError('Pycocotools is not installed in your environment.')\n    masks = []\n    for polygons in segmentations:\n        rles = coco_mask.frPyObjects(polygons, height, width)\n        mask = coco_mask.decode(rles)\n        if len(mask.shape) < 3:\n            mask = mask[..., None]\n        mask = np.asarray(mask, dtype=np.uint8)\n        mask = np.any(mask, axis=2)\n        masks.append(mask)\n    if masks:\n        masks = np.stack(masks, axis=0)\n    else:\n        masks = np.zeros((0, height, width), dtype=np.uint8)\n    return masks", "docstring": "Convert a COCO polygon annotation to a mask.\n\nArgs:\nsegmentations (`List[List[float]]`):\nList of polygons, each polygon represented by a list of x-y coordinates.\nheight (`int`):\nHeight of the mask.\nwidth (`int`):\nWidth of the mask.", "source": "github-repos"}
{"code": "def _get_backend_instance(self, backend_cls):\n        \n        \n        try:\n            backend_instance = backend_cls(provider=self)\n        except Exception as err:\n            raise QiskitError('Backend %s could not be instantiated: %s' %\n                              (backend_cls, err))\n\n        return backend_instance", "docstring": "Return an instance of a backend from its class.\n\nArgs:\nbackend_cls (class): Backend class.\nReturns:\nBaseBackend: a backend instance.\nRaises:\nQiskitError: if the backend could not be instantiated.", "source": "juraj-google-style"}
{"code": "def model_fn(hparams, seed):\n  \n  rng = random.Random(seed)\n\n  model = tf.keras.models.Sequential()\n  model.add(tf.keras.layers.Input(INPUT_SHAPE))\n  model.add(tf.keras.layers.Reshape(INPUT_SHAPE + (1,)))  \n\n  \n  conv_filters = 8\n  for _ in xrange(hparams[HP_CONV_LAYERS]):\n    model.add(tf.keras.layers.Conv2D(\n        filters=conv_filters,\n        kernel_size=hparams[HP_CONV_KERNEL_SIZE],\n        padding=\"same\",\n        activation=\"relu\",\n    ))\n    model.add(tf.keras.layers.MaxPool2D(pool_size=2, padding=\"same\"))\n    conv_filters *= 2\n\n  model.add(tf.keras.layers.Flatten())\n  model.add(tf.keras.layers.Dropout(hparams[HP_DROPOUT], seed=rng.random()))\n\n  \n  dense_neurons = 32\n  for _ in xrange(hparams[HP_DENSE_LAYERS]):\n    model.add(tf.keras.layers.Dense(dense_neurons, activation=\"relu\"))\n    dense_neurons *= 2\n\n  \n  model.add(tf.keras.layers.Dense(OUTPUT_CLASSES, activation=\"softmax\"))\n\n  model.compile(\n      loss=\"sparse_categorical_crossentropy\",\n      optimizer=hparams[HP_OPTIMIZER],\n      metrics=[\"accuracy\"],\n  )\n  return model", "docstring": "Create a Keras model with the given hyperparameters.\n\nArgs:\nhparams: A dict mapping hyperparameters in `HPARAMS` to values.\nseed: A hashable object to be used as a random seed (e.g., to\nconstruct dropout layers in the model).\n\nReturns:\nA compiled Keras model.", "source": "juraj-google-style"}
{"code": "def guess_strategy_type(file_name_or_ext):\n    if ('.' not in file_name_or_ext):\n        ext = file_name_or_ext\n    else:\n        (name, ext) = os.path.splitext(file_name_or_ext)\n    ext = ext.lstrip('.')\n    file_type_map = get_file_type_map()\n    return file_type_map.get(ext, None)", "docstring": "Guess strategy type to use for file by extension.\n\nArgs:\nfile_name_or_ext: Either a file name with an extension or just\nan extension\n\nReturns:\nStrategy: Type corresponding to extension or None if there's no\ncorresponding strategy type", "source": "codesearchnet"}
{"code": "def start_listener_thread(self, timeout_ms: int = 30000, exception_handler: Callable = None):\n        \n        assert not self.should_listen and self.sync_thread is None, 'Already running'\n        self.should_listen = True\n        self.sync_thread = gevent.spawn(self.listen_forever, timeout_ms, exception_handler)\n        self.sync_thread.name = f'GMatrixClient.listen_forever user_id:{self.user_id}'", "docstring": "Start a listener greenlet to listen for events in the background.\nArgs:\ntimeout_ms: How long to poll the Home Server for before retrying.\nexception_handler: Optional exception handler function which can\nbe used to handle exceptions in the caller thread.", "source": "juraj-google-style"}
{"code": "def normalized_energy_at_conditions(self, pH, V):\n        \n        return self.energy_at_conditions(pH, V) * self.normalization_factor", "docstring": "Energy at an electrochemical condition, compatible with\nnumpy arrays for pH/V input\n\nArgs:\npH (float): pH at condition\nV (float): applied potential at condition\n\nReturns:\nenergy normalized by number of non-O/H atoms at condition", "source": "juraj-google-style"}
{"code": "def _verifyStackFrames(self, stack_frames):\n    self.assertTrue([frame for frame in stack_frames if frame[0] == _current_file_full_path])", "docstring": "Verify the correctness of the stack frames.\n\nCurrently, it simply asserts that the current file is found in the stack\nframes.\nTODO(cais): Perhaps implement a stricter check later.\n\nArgs:\nstack_frames: The stack frames to verify.", "source": "github-repos"}
{"code": "def parse_file(path, format=None, encoding='utf-8', force_types=True):\n    try:\n        with open(path, 'rb') as f:\n            return parse(f, format, encoding, force_types)\n    except EnvironmentError as e:\n        raise AnyMarkupError(e, traceback.format_exc())", "docstring": "A convenience wrapper of parse, which accepts path of file to parse.\n\nArgs:\npath: path to file to parse\nformat: explicitly override the guessed `inp` markup format\nencoding: file encoding, defaults to utf-8\nforce_types:\nif `True`, integers, floats, booleans and none/null\nare recognized and returned as proper types instead of strings;\nif `False`, everything is converted to strings\nif `None`, backend return value is used\nReturns:\nparsed `inp` (dict or list) containing unicode values\nRaises:\nAnyMarkupError if a problem occurs while parsing", "source": "codesearchnet"}
{"code": "def create_new(cls, mapreduce_id, shard_number):\n    \n    shard_id = cls.shard_id_from_number(mapreduce_id, shard_number)\n    state = cls(key_name=shard_id,\n                mapreduce_id=mapreduce_id)\n    return state", "docstring": "Create new shard state.\n\nArgs:\nmapreduce_id: unique mapreduce id as string.\nshard_number: shard number for which to create shard state.\n\nReturns:\nnew instance of ShardState ready to put into datastore.", "source": "juraj-google-style"}
{"code": "def _redistribute_builder(self, afi='ipv4', source=None):\n    if (source == 'connected'):\n        return getattr(self._rbridge, 'rbridge_id_router_router_bgp_address_family_{0}_{0}_unicast_default_vrf_af_{0}_uc_and_vrf_cmds_call_point_holder_redistribute_connected_redistribute_connected'.format(afi))\n    else:\n        raise AttributeError('Invalid source.')", "docstring": "Build BGP redistribute method.\n\nDo not use this method directly.  You probably want ``redistribute``.\n\nArgs:\nsource (str): Source for redistributing. (connected)\nafi (str): Address family to configure. (ipv4, ipv6)\n\nReturns:\nMethod to redistribute desired source.\n\nRaises:\nKeyError: if `source` is not specified.\n\nExamples:\n>>> import pynos.device\n>>> conn = ('10.24.39.203', '22')\n>>> auth = ('admin', 'password')\n>>> with pynos.device.Device(conn=conn, auth=auth) as dev:\n...     output = dev.bgp._redistribute_builder(source='connected',\n...     afi='ipv4')\n...     dev.bgp._redistribute_builder(source='hodor',\n...     afi='ipv4') # doctest: +IGNORE_EXCEPTION_DETAIL\nTraceback (most recent call last):\nAttributeError", "source": "codesearchnet"}
{"code": "def encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, xpaths: Optional[List[List[int]]]=None, node_labels: Optional[List[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n    padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)\n    return self._encode_plus(text=text, xpaths=xpaths, text_pair=text_pair, node_labels=node_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)", "docstring": "Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,\n`__call__` should be used instead.\n\nArgs:\ntext (`str`, `List[str]`, `List[List[str]]`):\nThe first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.\ntext_pair (`List[str]` or `List[int]`, *optional*):\nOptional second sequence to be encoded. This can be a list of strings (nodes of a single example) or a\nlist of list of strings (nodes of a batch of examples).", "source": "github-repos"}
{"code": "def get_rows_fieldnames_from_query(\n        session: Union[Session, Engine, Connection],\n        query: Query) -> Tuple[Sequence[Sequence[Any]], Sequence[str]]:\n    \n    \n    \n    \n    result = session.execute(query)  \n    fieldnames = result.keys()\n    \n    \n    rows = result.fetchall()\n    return rows, fieldnames", "docstring": "Returns results and column names from a query.\n\nArgs:\nsession: SQLAlchemy :class:`Session`, :class:`Engine`, or\n:class:`Connection` object\nquery: SQLAlchemy :class:`Query`\n\nReturns:\n``(rows, fieldnames)`` where ``rows`` is the usual set of results and\n``fieldnames`` are the name of the result columns/fields.", "source": "juraj-google-style"}
{"code": "def ConvertToWireFormat(self, value):\n    \n    output = _SerializeEntries(\n        (python_format, wire_format, value.type_descriptor)\n        for (python_format, wire_format) in value.wrapped_list)\n    return b\"\", b\"\", output", "docstring": "Convert to the wire format.\n\nArgs:\nvalue: is of type RepeatedFieldHelper.\n\nReturns:\nA wire format representation of the value.", "source": "juraj-google-style"}
{"code": "def _scalar_field_to_json(field, row_value):\n    \n    converter = _SCALAR_VALUE_TO_JSON_ROW.get(field.field_type)\n    if converter is None:  \n        return row_value\n    return converter(row_value)", "docstring": "Maps a field and value to a JSON-safe value.\n\nArgs:\nfield ( \\\n:class:`~google.cloud.bigquery.schema.SchemaField`, \\\n):\nThe SchemaField to use for type conversion and field name.\nrow_value (any):\nValue to be converted, based on the field's type.\n\nReturns:\nany:\nA JSON-serializable object.", "source": "juraj-google-style"}
{"code": "class AriaProjectorMLP(nn.Module):\n\n    def __init__(self, in_features, hidden_features, output_dim):\n        super().__init__()\n        self.linear_in = nn.Linear(in_features, hidden_features, bias=False)\n        self.linear_out = nn.Linear(hidden_features, output_dim, bias=False)\n        self.act = ACT2FN['gelu_new']\n\n    def forward(self, hidden_states):\n        hidden_states = self.act(self.linear_in(hidden_states))\n        hidden_states = self.linear_out(hidden_states)\n        return hidden_states", "docstring": "Feed-Forward Network module for the Aria Projector.\n\nArgs:\nin_features (`int`):\nInput embedding dimension.\nhidden_features (`int`):\nHidden dimension of the feed-forward network.\noutput_dim (`int`):\nOutput dimension.", "source": "github-repos"}
{"code": "def Var(self, mu=None):\n    if (mu is None):\n        mu = self.Mean()\n    var = 0.0\n    for (x, p) in self.d.iteritems():\n        var += (p * ((x - mu) ** 2))\n    return var", "docstring": "Computes the variance of a PMF.\n\nArgs:\nmu: the point around which the variance is computed;\nif omitted, computes the mean\n\nReturns:\nfloat variance", "source": "codesearchnet"}
{"code": "def __init__(self, all_batch_items=None, commit_count=None):\n    self._all_batch_items = all_batch_items\n    self._commit_count = commit_count\n    self.mutations = []", "docstring": "Fake ``google.cloud.datastore.batch.Batch`` object.\n\nArgs:\nall_batch_items: (list) If set, will append all entities/keys added to\nthis batch.\ncommit_count: (list of int) If set, will increment commit_count[0] on\neach ``commit``.", "source": "github-repos"}
{"code": "def get_execution_host_info():\n    host = os.environ.get('HOSTNAME', None)\n    cluster = os.environ.get('SGE_O_HOST', None)\n    if (host is None):\n        try:\n            import socket\n            host = (host or socket.gethostname())\n        except:\n            pass\n    return ((host or 'unknown'), (cluster or 'unknown'))", "docstring": "Tries to return a tuple describing the execution host.\nDoesn't work for all queueing systems\n\nReturns:\n(HOSTNAME, CLUSTER_NAME)", "source": "codesearchnet"}
{"code": "def app(environ, start_response):\n    from wsgi import container\n    'Add Environ To Service Container\\n    Add the environ to the service container. The environ is generated by the\\n    the WSGI server above and used by a service provider to manipulate the\\n    incoming requests\\n    '\n    container.bind('Environ', environ)\n    'Execute All Service Providers That Require The WSGI Server\\n    Run all service provider boot methods if the wsgi attribute is true.\\n    '\n    try:\n        for provider in container.make('WSGIProviders'):\n            container.resolve(provider.boot)\n    except Exception as e:\n        container.make('ExceptionHandler').load_exception(e)\n    \"We Are Ready For Launch\\n    If we have a solid response and not redirecting then we need to return\\n    a 200 status code along with the data. If we don't, then we'll have\\n    to return a 302 redirection to where ever the user would like go\\n    to next.\\n    \"\n    start_response(container.make('Request').get_status_code(), container.make('Request').get_and_reset_headers())\n    'Final Step\\n    This will take the data variable from the Service Container and return\\n    it to the WSGI server.\\n    '\n    return iter([bytes(container.make('Response'), 'utf-8')])", "docstring": "The WSGI Application Server.\n\nArguments:\nenviron {dict} -- The WSGI environ dictionary\nstart_response {WSGI callable}\n\nReturns:\nWSGI Response", "source": "codesearchnet"}
{"code": "def __init__(self, forecast_io):\n        \n        if forecast_io.has_currently():\n            self.currently = forecast_io.get_currently()\n            for item in self.currently.keys():\n                setattr(self, item, self.currently[item])", "docstring": "Construct a new 'FIOCurrently' object.\nRecieves an ForecastIO object and gets the currently weather conditions\nif they are available in the object.\n\nArgs:\nforecast_io (ForecastIO): The ForecastIO object", "source": "juraj-google-style"}
{"code": "def _handle_create(self, response, ignore_tombstone, auto_refresh):\n\n\t\t\n\n\t\t\n\t\tif response.status_code == 201:\n\t\t\t\n\t\t\tself.uri = self.repo.parse_uri(response.text)\n\t\t\t\n\t\t\tif auto_refresh:\n\t\t\t\tself.refresh()\n\t\t\telif auto_refresh == None:\n\t\t\t\tif self.repo.default_auto_refresh:\n\t\t\t\t\tself.refresh()\n\t\t\t\n\t\t\tif hasattr(self,'_post_create'):\n\t\t\t\tself._post_create(auto_refresh=auto_refresh)\n\n\t\t\n\t\telif response.status_code == 404:\n\t\t\traise Exception('HTTP 404, for this POST request target location does not exist')\n\n\t\t\n\t\telif response.status_code == 409:\n\t\t\traise Exception('HTTP 409, resource already exists')\n\n\t\t\n\t\telif response.status_code == 410:\n\t\t\tif ignore_tombstone:\n\t\t\t\tresponse = self.repo.api.http_request('DELETE', '%s/fcr:tombstone' % self.uri)\n\t\t\t\tif response.status_code == 204:\n\t\t\t\t\tlogger.debug('tombstone removed, retrying create')\n\t\t\t\t\tself.create()\n\t\t\t\telse:\n\t\t\t\t\traise Exception('HTTP %s, Could not remove tombstone for %s' % (response.status_code, self.uri))\n\t\t\telse:\n\t\t\t\traise Exception('tombstone for %s detected, aborting' % self.uri)\n\n\t\t\n\t\telif response.status_code == 415:\n\t\t\traise Exception('HTTP 415, unsupported media type')\n\n\t\t\n\t\telse:\n\t\t\traise Exception('HTTP %s, unknown error creating resource' % response.status_code)\n\n\t\t\n\t\treturn self", "docstring": "Handles response from self.create()\n\nArgs:\nresponse (requests.models.Response): response object from self.create()\nignore_tombstone (bool): If True, will attempt creation, if tombstone exists (409), will delete tombstone and retry", "source": "juraj-google-style"}
{"code": "def text_pb(tag, data, description=None):\n  \n  try:\n    tensor = tensor_util.make_tensor_proto(data, dtype=np.object)\n  except TypeError as e:\n    raise TypeError('tensor must be of type string', e)\n  summary_metadata = metadata.create_summary_metadata(\n      display_name=None, description=description)\n  summary = summary_pb2.Summary()\n  summary.value.add(tag=tag,\n                    metadata=summary_metadata,\n                    tensor=tensor)\n  return summary", "docstring": "Create a text tf.Summary protobuf.\n\nArguments:\ntag: String tag for the summary.\ndata: A Python bytestring (of type bytes), a Unicode string, or a numpy data\narray of those types.\ndescription: Optional long-form description for this summary, as a `str`.\nMarkdown is supported. Defaults to empty.\n\nRaises:\nTypeError: If the type of the data is unsupported.\n\nReturns:\nA `tf.Summary` protobuf object.", "source": "juraj-google-style"}
{"code": "def ProtoFromDataFrames(self, dataframes):\n    \n    datasets = []\n    for dataframe in dataframes:\n      table = dataframe['table']\n      table_entries = {}\n      for col in table:\n        table_entries[col] = self.NdarrayToEntry(table[col])\n      datasets.append({\n          'entries': table_entries,\n          'size': len(table),\n          'name': dataframe['name']\n      })\n    return self.GetDatasetsProto(datasets)", "docstring": "Creates a feature statistics proto from a set of pandas dataframes.\nArgs:\ndataframes: A list of dicts describing tables for each dataset for the\nproto. Each entry contains a 'table' field of the dataframe of the\ndata\nand a 'name' field to identify the dataset in the proto.\nReturns:\nThe feature statistics proto for the provided tables.", "source": "juraj-google-style"}
{"code": "def list_container_instance_groups_sub(access_token, subscription_id):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.ContainerInstance/ContainerGroups', '?api-version=', CONTAINER_API])\n    return do_get(endpoint, access_token)", "docstring": "List the container groups in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON list of container groups and their properties.", "source": "codesearchnet"}
{"code": "def set_soft_device_placement(enabled):\n    context.context().soft_device_placement = enabled", "docstring": "Enable or disable soft device placement.\n\nIf enabled, ops can be placed on different devices than the device explicitly\nassigned by the user. This potentially has a large performance cost due to an\nincrease in data communication between devices.\n\nSome cases where soft_device_placement would modify device assignment are:\n1. no GPU/TPU implementation for the OP\n2. no GPU devices are known or registered\n3. need to co-locate with reftype input(s) which are from CPU\n4. an OP can not be compiled by XLA.  Common for TPU which always requires\nthe XLA compiler.\n\nFor TPUs, if this option is true, a feature called automatic outside\ncompilation is enabled. Automatic outside compilation will move uncompilable\nops within a TPU program to instead run on the host. This can be used when\nencountering compilation failures due to unsupported ops.\n\nNote: by default soft device placement is enabled when running in eager mode\n(for convenience) and disabled in graph mode (for performance).\n\nArgs:\nenabled: A boolean indicating whether to enable soft placement.", "source": "github-repos"}
{"code": "def get_all_dependencies(metadata: MetaData,\n                         extra_dependencies: List[TableDependency] = None,\n                         sort: bool = True) \\\n        -> List[TableDependency]:\n    \n    extra_dependencies = extra_dependencies or []  \n    for td in extra_dependencies:\n        td.set_metadata_if_none(metadata)\n    dependencies = set([td.sqla_tuple() for td in extra_dependencies])\n\n    tables = list(metadata.tables.values())  \n\n    for table in tables:\n        for fkc in table.foreign_key_constraints:\n            if fkc.use_alter is True:\n                \n                continue\n\n            dependent_on = fkc.referred_table\n            if dependent_on is not table:\n                dependencies.add((dependent_on, table))\n\n        if hasattr(table, \"_extra_dependencies\"):\n            \n            dependencies.update(\n                (parent, table) for parent in table._extra_dependencies\n            )\n\n    dependencies = [\n        TableDependency(parent_table=parent, child_table=child)\n        for parent, child in dependencies\n    ]\n    if sort:\n        dependencies.sort(key=lambda td_: (td_.parent_tablename,\n                                           td_.child_tablename))\n    return dependencies", "docstring": "Describes how the tables found in the metadata depend on each other.\n(If table B contains a foreign key to table A, for example, then B depends\non A.)\n\nArgs:\nmetadata: the metadata to inspect\nextra_dependencies: additional table dependencies to specify manually\nsort: sort into alphabetical order of (parent, child) table names?\n\nReturns:\na list of :class:`TableDependency` objects\n\nSee :func:`sort_tables_and_constraints` for method.", "source": "juraj-google-style"}
{"code": "def __check_no_missing_attributes(self, node: yaml.Node, mapping: CommentedMap) -> None:\n    logger.debug('Checking presence of required attributes')\n    for (name, type_, required) in class_subobjects(self.class_):\n        if (required and (name not in mapping)):\n            raise RecognitionError('{}{}Missing attribute {} needed for constructing a {}'.format(node.start_mark, os.linesep, name, self.class_.__name__))\n        if ((name in mapping) and (not self.__type_matches(mapping[name], type_))):\n            raise RecognitionError('{}{}Attribute {} has incorrect type {}, expecting a {}'.format(node.start_mark, os.linesep, name, type(mapping[name]), type_))", "docstring": "Checks that all required attributes are present.\n\nAlso checks that they're of the correct type.\n\nArgs:\nmapping: The mapping with subobjects of this object.\n\nRaises:\nRecognitionError: if an attribute is missing or the type \\\nis incorrect.", "source": "codesearchnet"}
{"code": "def get_partstudio_tessellatededges(self, did, wid, eid):\n        \n\n        return self._api.request('get', '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/tessellatededges')", "docstring": "Gets the tessellation of the edges of all parts in a part studio.\n\nArgs:\n- did (str): Document ID\n- wid (str): Workspace ID\n- eid (str): Element ID\n\nReturns:\n- requests.Response: Onshape response data", "source": "juraj-google-style"}
{"code": "def checksum(path):\n    filesystem = FileSystems.get_filesystem(path)\n    return filesystem.checksum(path)", "docstring": "Fetch checksum metadata of a file on the\n:class:`~apache_beam.io.filesystem.FileSystem`.\n\nThis operation returns checksum metadata as stored in the underlying\nFileSystem. It should not read any file data. Checksum type and format are\nFileSystem dependent and are not compatible between FileSystems.\n\nArgs:\npath: string path of a file.\n\nReturns: string containing checksum\n\nRaises:\n``BeamIOError``: if path isn't a file or doesn't exist.", "source": "github-repos"}
{"code": "def run(argv=None, save_main_session=True):\n    known_args, pipeline_args = parse_known_args(argv)\n    pipeline_options = PipelineOptions(pipeline_args)\n    pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n    engine_handler = KeyedModelHandler(TensorRTEngineHandlerNumPy(min_batch_size=1, max_batch_size=1, engine_path=known_args.engine_path))\n    with beam.Pipeline(options=pipeline_options) as p:\n        filename_value_pair = p | 'ReadImageNames' >> beam.io.ReadFromText(known_args.input) | 'ReadImageData' >> beam.Map(lambda image_name: read_image(image_file_name=image_name, path_to_dir=known_args.images_dir)) | 'AttachImageSizeToKey' >> beam.Map(attach_im_size_to_key) | 'PreprocessImages' >> beam.MapTuple(lambda file_name, data: (file_name, preprocess_image(data)))\n        predictions = filename_value_pair | 'TensorRTRunInference' >> RunInference(engine_handler) | 'ProcessOutput' >> beam.ParDo(PostProcessor())\n        _ = predictions | 'WriteOutputToGCS' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)", "docstring": "Args:\nargv: Command line arguments defined for this example.", "source": "github-repos"}
{"code": "def ToRequest(self):\n    param = {}\n    if self.email:\n        param['email'] = self.email\n    if self.user_id:\n        param['localId'] = self.user_id\n    if self.name:\n        param['displayName'] = self.name\n    if self.photo_url:\n        param['photoUrl'] = self.photo_url\n    if (self.email_verified is not None):\n        param['emailVerified'] = self.email_verified\n    if self.password_hash:\n        param['passwordHash'] = base64.urlsafe_b64encode(self.password_hash)\n    if self.salt:\n        param['salt'] = base64.urlsafe_b64encode(self.salt)\n    if self.provider_info:\n        param['providerUserInfo'] = self.provider_info\n    return param", "docstring": "Converts to gitkit api request parameter dict.\n\nReturns:\nDict, containing non-empty user attributes.", "source": "codesearchnet"}
{"code": "def validlocations(configuration=None):\n    if (Locations._validlocations is None):\n        if (configuration is None):\n            configuration = Configuration.read()\n        Locations._validlocations = configuration.call_remoteckan('group_list', {'all_fields': True})\n    return Locations._validlocations", "docstring": "Read valid locations from HDX\n\nArgs:\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nList[Dict]: A list of valid locations", "source": "codesearchnet"}
{"code": "def multiple(layer: int, limit: int) -> Set[str]:\n\t\n\treturn {str(x).zfill(2) for x in [2**x for x in range(limit)] if x % 2**(layer - 1) == 0}", "docstring": "Returns a set of strings to be used as Slots with Pabianas default Clock.\n\nArgs:\nlayer: The layer in the hierarchy this Area is placed in.\nTechnically, the number specifies how many of the Clocks signals are relevant to the Area.\nBetween 1 and limit.\nlimit: The number of layers of the hierarchy.", "source": "juraj-google-style"}
{"code": "def _apply_same_padding(inputs, kernel_size, strides, data_format, operation_type, dilation_rate=1):\n    spatial_shape = inputs.shape[2:]\n    num_spatial_dims = len(spatial_shape)\n    padding = []\n    if operation_type != 'pooling':\n        dilation_rate = standardize_tuple(dilation_rate, num_spatial_dims, 'dilation_rate')\n    for i in range(num_spatial_dims):\n        dil = 1 if operation_type == 'pooling' else dilation_rate[i]\n        pad = _compute_padding_length(spatial_shape[i], kernel_size[i], strides[i], dil)\n        padding.append(pad)\n    if all((left == right for left, right in padding)):\n        return (inputs, [left for left, _ in padding])\n    flattened_padding = []\n    for pad in reversed(padding):\n        flattened_padding.extend(pad)\n    mode = 'replicate' if operation_type == 'pooling' else 'constant'\n    return (tnn.pad(inputs, pad=tuple(flattened_padding), mode=mode), 0)", "docstring": "Apply same padding to the input tensor.\n\nThis function will evaluate if the padding value is compatible with torch\nfunctions. To avoid calling `pad()` as much as possible, which may cause\nperformance or memory issues, when compatible, it does not apply the padding\nto the tensor, but returns the input tensor and the padding value to pass to\nthe torch functions. If not compatible, it returns the padded tensor and 0\nas the padding value.\n\nReturns:\ntensor: A padded tensor or the inputs.\npadding: The padding value, ready to pass to the torch functions.", "source": "github-repos"}
{"code": "def validate_language_key(obj, key):\n    backend = bigchaindb.config['database']['backend']\n    if (backend == 'localmongodb'):\n        data = obj.get(key, {})\n        if isinstance(data, dict):\n            validate_all_values_for_key_in_obj(data, 'language', validate_language)\n        elif isinstance(data, list):\n            validate_all_values_for_key_in_list(data, 'language', validate_language)", "docstring": "Validate all nested \"language\" key in `obj`.\n\nArgs:\nobj (dict): dictionary whose \"language\" key is to be validated.\n\nReturns:\nNone: validation successful\n\nRaises:\nValidationError: will raise exception in case language is not valid.", "source": "codesearchnet"}
{"code": "def variables_initializer(var_list, name='init'):\n    if var_list and (not context.executing_eagerly()):\n        return control_flow_ops.group(*[v.initializer for v in var_list], name=name)\n    return control_flow_ops.no_op(name=name)", "docstring": "Returns an Op that initializes a list of variables.\n\nAfter you launch the graph in a session, you can run the returned Op to\ninitialize all the variables in `var_list`. This Op runs all the\ninitializers of the variables in `var_list` in parallel.\n\nCalling `initialize_variables()` is equivalent to passing the list of\ninitializers to `Group()`.\n\nIf `var_list` is empty, however, the function still returns an Op that can\nbe run. That Op just has no effect.\n\n@compatibility(TF2)\nIn TF2, variables are initialized immediately when they are created. There is\nno longer a need to run variable initializers before using them.\n@end_compatibility\n\nArgs:\nvar_list: List of `Variable` objects to initialize.\nname: Optional name for the returned operation.\n\nReturns:\nAn Op that run the initializers of all the specified variables.", "source": "github-repos"}
{"code": "def pyxb_to_dict(rp_pyxb):\n    return {'allowed': bool(_get_attr_or_list(rp_pyxb, 'allowed')), 'num': _get_as_int(rp_pyxb), 'block': _get_as_set(rp_pyxb, 'block'), 'pref': _get_as_set(rp_pyxb, 'pref')}", "docstring": "Convert ReplicationPolicy PyXB object to a normalized dict.\n\nArgs:\nrp_pyxb: ReplicationPolicy to convert.\n\nReturns:\ndict : Replication Policy as normalized dict.\n\nExample::\n\n{\n'allowed': True,\n'num': 3,\n'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'},\n'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'},\n}", "source": "codesearchnet"}
{"code": "def sd(line, cell=None):\n    parser = google.datalab.utils.commands.CommandParser(prog='%sd', description='Execute various Stackdriver related operations. Use \"%sd <stackdriver_product> -h\" for help on a specific Stackdriver product.')\n    _create_monitoring_subparser(parser)\n    return google.datalab.utils.commands.handle_magic_line(line, cell, parser)", "docstring": "Implements the stackdriver cell magic for ipython notebooks.\n\nArgs:\nline: the contents of the storage line.\nReturns:\nThe results of executing the cell.", "source": "codesearchnet"}
{"code": "def greater_equal(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return GreaterEqual().symbolic_call(x1, x2)\n    return backend.numpy.greater_equal(x1, x2)", "docstring": "Return the truth value of `x1 >= x2` element-wise.\n\nArgs:\nx1: First input tensor.\nx2: Second input tensor.\n\nReturns:\nOutput tensor, element-wise comparison of `x1` and `x2`.", "source": "github-repos"}
{"code": "def is_abstract(x: Any) -> bool:\n    return utils.is_partial(x) or is_pure_symbolic(x)", "docstring": "Returns if the input value is abstract.\n\nExample::\n\n@pg.symbolize\nclass Foo:\ndef __init__(self, x):\npass\n\nclass Bar(pg.PureSymbolic):\npass\n\nassert not pg.is_abstract(1)\nassert not pg.is_abstract(Foo(1))\nassert pg.is_abstract(Foo.partial())\nassert pg.is_abstract(Bar())\nassert pg.is_abstract(Foo(Bar()))\nassert pg.is_abstract(Foo(pg.oneof([1, 2])))\n\nArgs:\nx: Value to query against.\n\nReturns:\nTrue if value itself is partial/PureSymbolic or its child and nested\nchild fields contain partial/PureSymbolic values.", "source": "github-repos"}
{"code": "def calculate_row_format(columns, keys=None):\n    \n    row_format = ''\n    if keys is None:\n        keys = columns.keys()\n    else:\n        keys = [key for key in keys if key in columns]\n\n    for key in keys:\n        if len(row_format) > 0:\n            row_format += \"|\"\n        row_format += \"%%(%s)-%ds\" % (key, columns[key])\n\n    return '|' + row_format + '|'", "docstring": "Calculate row format.\n\nArgs:\ncolumns (dict): the keys are the column name and the value the max length.\nkeys (list): optional list of keys to order columns as well as to filter for them.\n\nReturns:\nstr: format for table row", "source": "juraj-google-style"}
{"code": "def search_stack_for_localvar(varname):\n    \n    curr_frame = inspect.currentframe()\n    print(' * Searching parent frames for: ' + six.text_type(varname))\n    frame_no = 0\n    while curr_frame.f_back is not None:\n        if varname in curr_frame.f_locals.keys():\n            print(' * Found in frame: ' + six.text_type(frame_no))\n            return curr_frame.f_locals[varname]\n        frame_no += 1\n        curr_frame = curr_frame.f_back\n    print('... Found nothing in all ' + six.text_type(frame_no) + ' frames.')\n    return None", "docstring": "Finds a local varable somewhere in the stack and returns the value\n\nArgs:\nvarname (str): variable name\n\nReturns:\nNone if varname is not found else its value", "source": "juraj-google-style"}
{"code": "def get_flag_value(self, wanted_flag_name):\n    tensor_tracer_flags = self._env.get(FLAGS_ENV_VAR)\n    if not tensor_tracer_flags:\n        return (False, None)\n    pos = 0\n    while True:\n        match, has_value = TTParameters.match_next_flag(tensor_tracer_flags, pos)\n        if not match:\n            return (False, None)\n        flag_name = match.group(1)\n        if has_value:\n            flag_value = match.group(2)\n        else:\n            flag_value = None\n        if flag_name == wanted_flag_name:\n            return (True, flag_value)\n        pos = match.end()\n    raise RuntimeError('Invalid tensor tracer flag. Could not recognize %s.' % flag_name)", "docstring": "Returns the value of a TensorTracer flags.\n\nArgs:\nwanted_flag_name: the name of the flag we are looking for.\n\nReturns:\nA pair where the first element indicates if the flag is\nfound and the second element is the value of the flag.\n\nRaises:\nRuntimeError: If supposedly deadcode is reached.", "source": "github-repos"}
{"code": "def underline(self, action):\n        \n        if action == 'off':\n            action = '0'\n            self.send(chr(27)+chr(45)+action)\n        else:\n            self.send(chr(27)+chr(45)+action)", "docstring": "Enable/cancel underline printing\n\nArgs:\naction -- Enable or disable underline printing. Options are '1' - '4' and 'cancel'\nReturns:\nNone\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def is_union(declaration):\n    if (not is_class(declaration)):\n        return False\n    decl = class_traits.get_declaration(declaration)\n    return (decl.class_type == class_declaration.CLASS_TYPES.UNION)", "docstring": "Returns True if declaration represents a C++ union\n\nArgs:\ndeclaration (declaration_t): the declaration to be checked.\n\nReturns:\nbool: True if declaration represents a C++ union", "source": "codesearchnet"}
{"code": "def text(self, tag, textdata, step=None):\n    \n    if step is None:\n      step = self._step\n    else:\n      self._step = step\n    smd = SummaryMetadata(\n        plugin_data=SummaryMetadata.PluginData(plugin_name='text'))\n    if isinstance(textdata, (str, bytes)):\n      tensor = tf.make_tensor_proto(\n          values=[textdata.encode(encoding='utf_8')], shape=(1,))\n    else:\n      textdata = onp.array(textdata)  \n      datashape = onp.shape(textdata)\n      if len(datashape) == 1:\n        tensor = tf.make_tensor_proto(\n            values=[td.encode(encoding='utf_8') for td in textdata],\n            shape=(datashape[0],))\n      elif len(datashape) == 2:\n        tensor = tf.make_tensor_proto(\n            values=[\n                td.encode(encoding='utf_8') for td in onp.reshape(textdata, -1)\n            ],\n            shape=(datashape[0], datashape[1]))\n    summary = Summary(\n        value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])\n    self.add_summary(summary, step)", "docstring": "Saves a text summary.\n\nArgs:\ntag: str: label for this data\ntextdata: string, or 1D/2D list/numpy array of strings\nstep: int: training step\nNote: markdown formatting is rendered by tensorboard.", "source": "juraj-google-style"}
{"code": "def get(cls, sha1=''):\n    with conf.within_proj_dir():\n        cmd = 'git show -s --format=\"%H||%an||%ae||%s||%b||%P\" {}'.format(sha1)\n        result = shell.run(cmd, capture=True, never_pretend=True).stdout\n    (sha1, name, email, title, desc, parents) = result.split('||')\n    return CommitDetails(sha1=sha1, author=Author(name, email), title=title, desc=desc, parents_sha1=parents.split())", "docstring": "Return details about a given commit.\n\nArgs:\nsha1 (str):\nThe sha1 of the commit to query. If not given, it will return\nthe details for the latest commit.\n\nReturns:\nCommitDetails: Commit details. You can use the instance of the\nclass to query git tree further.", "source": "codesearchnet"}
{"code": "def resolve_identifier(self, name, expected_type=None):\n    name = str(name)\n    if (name in self._known_identifiers):\n        obj = self._known_identifiers[name]\n        if ((expected_type is not None) and (not isinstance(obj, expected_type))):\n            raise UnresolvedIdentifierError(u'Identifier resolved to an object of an unexpected type', name=name, expected_type=expected_type.__name__, resolved_type=obj.__class__.__name__)\n        return obj\n    if (self.parent is not None):\n        try:\n            return self.parent.resolve_identifier(name)\n        except UnresolvedIdentifierError:\n            pass\n    raise UnresolvedIdentifierError(u'Could not resolve identifier', name=name, scope=self.name)", "docstring": "Resolve an identifier to an object.\n\nThere is a single namespace for identifiers so the user also should\npass an expected type that will be checked against what the identifier\nactually resolves to so that there are no surprises.\n\nArgs:\nname (str): The name that we want to resolve\nexpected_type (type): The type of object that we expect to receive.\nThis is an optional parameter.  If None is passed, no type checking\nis performed.\n\nReturns:\nobject: The resolved object", "source": "codesearchnet"}
{"code": "def export_verified_variants(aggregate_variants, unique_callers):\n    document_lines = []\n    for variant in aggregate_variants:\n        samples = []\n        for sample in variant['samples']:\n            line = []\n            line.append(variant['institute'])\n            line.append(variant['_id'])\n            line.append(variant['category'])\n            line.append(variant['variant_type'])\n            line.append(variant['display_name'][:30])\n            case_name = variant['case_obj']['display_name']\n            local_link = '/'.join(['', variant['institute'], case_name, variant['_id']])\n            line.append(local_link)\n            line.append(variant.get('validation'))\n            line.append(case_name)\n            case_individual = next((ind for ind in variant['case_obj']['individuals'] if (ind['individual_id'] == sample['sample_id'])))\n            if (case_individual['phenotype'] == 2):\n                line.append(' '.join([sample.get('display_name'), '(A)']))\n            else:\n                line.append(sample.get('display_name'))\n            line.append(''.join(['chr', variant['chromosome'], ':', str(variant['position'])]))\n            line.append('>'.join([variant.get('reference')[:10], variant.get('alternative')[:10]]))\n            genes = []\n            prot_effect = []\n            funct_anno = []\n            for gene in variant.get('genes'):\n                genes.append(gene.get('hgnc_symbol', ''))\n                funct_anno.append(gene.get('functional_annotation'))\n                for transcript in gene.get('transcripts'):\n                    if (transcript.get('is_canonical') and transcript.get('protein_sequence_name')):\n                        prot_effect.append(urllib.parse.unquote(transcript.get('protein_sequence_name')))\n            line.append(','.join(prot_effect))\n            line.append(','.join(funct_anno))\n            line.append(','.join(genes))\n            line.append(variant.get('rank_score'))\n            line.append(variant.get('cadd_score'))\n            line.append(sample.get('genotype_call'))\n            line.append(sample['allele_depths'][0])\n            line.append(sample['allele_depths'][1])\n            line.append(sample['genotype_quality'])\n            for caller in unique_callers:\n                if variant.get(caller):\n                    line.append(variant.get(caller))\n                else:\n                    line.append('-')\n            document_lines.append(line)\n    return document_lines", "docstring": "Create the lines for an excel file with verified variants for\nan institute\n\nArgs:\naggregate_variants(list): a list of variants with aggregates case data\nunique_callers(set): a unique list of available callers\n\nReturns:\ndocument_lines(list): list of lines to include in the document", "source": "codesearchnet"}
{"code": "def Create(self, request, global_params=None):\n    config = self.GetMethodConfig('Create')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Creates a new `BitbucketServerConfig`. This API is experimental.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsBitbucketServerConfigsCreateRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def __init__(self, binary_id, cause=None):\n    \n    super(UnknownSignedBinaryError, self).__init__(binary_id, cause=cause)\n\n    self.binary_id = binary_id\n    self.message = (\"Signed binary of type %s and path %s was not found\" %\n                    (self.binary_id.binary_type, self.binary_id.path))", "docstring": "Initializes UnknownSignedBinaryError.\n\nArgs:\nbinary_id: rdf_objects.SignedBinaryID for the signed binary.\ncause: A lower-level Exception raised by the database driver, which might\nhave more details about the error.", "source": "juraj-google-style"}
{"code": "def __init__(self, name=None, eid=None):\n        \n        if None not in (name, eid):\n            raise TypeError(\"Provide only a `name` or an `eid`.\")\n\n        self._eid = eid or _get_enum(name)\n        self._comments = EnumComments(self._eid)", "docstring": "Get an existing enum.\n\nOnly provide one of `name` and `eid`.\n\nArgs:\nname: Name of the enum\neid: Enum ID", "source": "juraj-google-style"}
{"code": "def tables_insert(self, table_name, schema=None, query=None, friendly_name=None, description=None):\n    url = (Api._ENDPOINT + (Api._TABLES_PATH % (table_name.project_id, table_name.dataset_id, '', '')))\n    data = {'kind': 'bigquery\n    if schema:\n        data['schema'] = {'fields': schema}\n    if query:\n        data['view'] = {'query': query}\n    if friendly_name:\n        data['friendlyName'] = friendly_name\n    if description:\n        data['description'] = description\n    return datalab.utils.Http.request(url, data=data, credentials=self._credentials)", "docstring": "Issues a request to create a table or view in the specified dataset with the specified id.\nA schema must be provided to create a Table, or a query must be provided to create a View.\n\nArgs:\ntable_name: the name of the table as a tuple of components.\nschema: the schema, if this is a Table creation.\nquery: the query, if this is a View creation.\nfriendly_name: an optional friendly name.\ndescription: an optional description.\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "codesearchnet"}
{"code": "def offTagDel(self, name, func):\n    if ('*' in name):\n        self.ontagdelglobs.rem(name, func)\n        return\n    cblist = self.ontagdels.get(name)\n    if (cblist is None):\n        return\n    try:\n        cblist.remove(func)\n    except ValueError:\n        pass", "docstring": "Unregister a callback for tag deletion.\n\nArgs:\nname (str): The name of the tag or tag glob.\nfunc (function): The callback func(node, tagname, tagval).", "source": "codesearchnet"}
{"code": "def __init__(self, line: Optional[Text] = None):\n        \n\n        self.line = line or self.default_separator\n        super(Separator, self).__init__(self.line, None, \"-\")", "docstring": "Create a separator in a list.\n\nArgs:\nline: Text to be displayed in the list, by default uses `---`.", "source": "juraj-google-style"}
{"code": "def netmiko_file_transfer(task: Task, source_file: str, dest_file: str, **kwargs: Any) -> Result:\n    net_connect = task.host.get_connection('netmiko', task.nornir.config)\n    kwargs.setdefault('direction', 'put')\n    scp_result = file_transfer(net_connect, source_file=source_file, dest_file=dest_file, **kwargs)\n    if (kwargs.get('disable_md5') is True):\n        file_valid = scp_result['file_exists']\n    else:\n        file_valid = (scp_result['file_exists'] and scp_result['file_verified'])\n    return Result(host=task.host, result=file_valid, changed=scp_result['file_transferred'])", "docstring": "Execute Netmiko file_transfer method\n\nArguments:\nsource_file: Source file.\ndest_file: Destination file.\nkwargs: Additional arguments to pass to file_transfer\n\nReturns:\nResult object with the following attributes set:\n* result (``bool``): file exists and MD5 is valid\n* changed (``bool``): the destination file was changed", "source": "codesearchnet"}
{"code": "def _ExtractJQuery(self, jquery_raw):\n    \n    data_part = ''\n    if not jquery_raw:\n      return {}\n\n    if '[' in jquery_raw:\n      _, _, first_part = jquery_raw.partition('[')\n      data_part, _, _ = first_part.partition(']')\n    elif jquery_raw.startswith('\n      _, _, first_part = jquery_raw.partition('{')\n      data_part = '{{{0:s}'.format(first_part)\n    elif '({' in jquery_raw:\n      _, _, first_part = jquery_raw.partition('(')\n      data_part, _, _ = first_part.rpartition(')')\n\n    if not data_part:\n      return {}\n\n    try:\n      data_dict = json.loads(data_part)\n    except ValueError:\n      return {}\n\n    return data_dict", "docstring": "Extracts values from a JQuery string.\n\nArgs:\njquery_raw (str): JQuery string.\n\nReturns:\ndict[str, str]: extracted values.", "source": "juraj-google-style"}
{"code": "def build_tab_completion_table(alias_table):\n    \n    alias_commands = [t[1] for t in filter_aliases(alias_table)]\n    tab_completion_table = defaultdict(list)\n    for alias_command in alias_commands:\n        for reserved_command in azext_alias.cached_reserved_commands:\n            \n            if reserved_command == alias_command or reserved_command.startswith(alias_command + ' ') \\\n                    and '' not in tab_completion_table[alias_command]:\n                tab_completion_table[alias_command].append('')\n            elif ' {} '.format(alias_command) in reserved_command or reserved_command.endswith(' ' + alias_command):\n                \n                index = reserved_command.index(alias_command)\n                parent_command = reserved_command[:index - 1]\n                if parent_command not in tab_completion_table[alias_command]:\n                    tab_completion_table[alias_command].append(parent_command)\n\n    with open(GLOBAL_ALIAS_TAB_COMP_TABLE_PATH, 'w') as f:\n        f.write(json.dumps(tab_completion_table))\n\n    return tab_completion_table", "docstring": "Build a dictionary where the keys are all the alias commands (without positional argument placeholders)\nand the values are all the parent commands of the keys. After that, write the table into a file.\nThe purpose of the dictionary is to validate the alias tab completion state.\n\nFor example:\n{\n\"group\": [\"\", \"ad\"],\n\"dns\": [\"network\"]\n}\n\nArgs:\nalias_table: The alias table.\n\nReturns:\nThe tab completion table.", "source": "juraj-google-style"}
{"code": "def iter(self, max_value: int) -> Iterator[int]:\n        \n        return chain.from_iterable(\n            (self._get_range(elem, max_value) for elem in self.sequences))", "docstring": "Iterates through the sequence numbers contained in the set, bounded\nby the given maximum value (in place of any ``*``).\n\nArgs:\nmax_value: The maximum value of the set.", "source": "juraj-google-style"}
{"code": "def isClientCert(self, name):\n        \n        crtpath = self._getPathJoin('users', '%s.p12' % name)\n        return os.path.isfile(crtpath)", "docstring": "Checks if a user client certificate (PKCS12) exists.\n\nArgs:\nname (str): The name of the user keypair.\n\nExamples:\nCheck if the client certificate \"myuser\" exists:\n\nexists = cdir.isClientCert('myuser')\n\nReturns:\nbool: True if the certificate is present, False otherwise.", "source": "juraj-google-style"}
{"code": "def Send(self, command_id, data=b'', size=0):\n        \n        if data:\n            if not isinstance(data, bytes):\n                data = data.encode('utf8')\n            size = len(data)\n\n        if not self._CanAddToSendBuffer(len(data)):\n            self._Flush()\n        buf = struct.pack(b'<2I', self.id_to_wire[command_id], size) + data\n        self.send_buffer[self.send_idx:self.send_idx + len(buf)] = buf\n        self.send_idx += len(buf)", "docstring": "Send/buffer FileSync packets.\n\nPackets are buffered and only flushed when this connection is read from. All\nmessages have a response from the device, so this will always get flushed.\n\nArgs:\ncommand_id: Command to send.\ndata: Optional data to send, must set data or size.\nsize: Optionally override size from len(data).", "source": "juraj-google-style"}
{"code": "def filename(self, fname, timestep=None, suffix='', force_legacy=False):\n        \n        if timestep is not None:\n            fname += '{:05d}'.format(timestep)\n        fname += suffix\n        if not force_legacy and self.hdf5:\n            fpath = self.hdf5 / fname\n        else:\n            fpath = self.par['ioin']['output_file_stem'] + '_' + fname\n            fpath = self.path / fpath\n        return fpath", "docstring": "Return name of StagYY output file.\n\nArgs:\nfname (str): name stem.\ntimestep (int): snapshot number, set to None if this is not\nrelevant.\nsuffix (str): optional suffix of file name.\nforce_legacy (bool): force returning the legacy output path.\nReturns:\n:class:`pathlib.Path`: the path of the output file constructed\nwith the provided segments.", "source": "juraj-google-style"}
{"code": "def resolves_for(self, node):\n        \n\n        self.node = node\n        self.actual_styles = node.style(*self.expected_styles.keys())\n\n        return all(\n            toregex(value).search(self.actual_styles[style])\n            for style, value in iter(self.expected_styles.items()))", "docstring": "Resolves this query relative to the given node.\n\nArgs:\nnode (node.Base): The node to be evaluated.\n\nReturns:\nint: The number of matches found.", "source": "juraj-google-style"}
{"code": "def DeletePendingNotification(self, timestamp):\n    \n    shown_notifications = self.Get(self.Schema.SHOWN_NOTIFICATIONS)\n    if not shown_notifications:\n      shown_notifications = self.Schema.SHOWN_NOTIFICATIONS()\n\n    pending = self.Get(self.Schema.PENDING_NOTIFICATIONS)\n    if not pending:\n      return\n\n    \n    \n    delete_count = 0\n    for idx in reversed(range(0, len(pending))):\n      if pending[idx].timestamp == timestamp:\n        shown_notifications.Append(pending[idx])\n        pending.Pop(idx)\n        delete_count += 1\n\n    if delete_count > 1:\n      raise UniqueKeyError(\"Multiple notifications at %s\" % timestamp)\n\n    self.Set(self.Schema.PENDING_NOTIFICATIONS, pending)\n    self.Set(self.Schema.SHOWN_NOTIFICATIONS, shown_notifications)", "docstring": "Deletes the pending notification with the given timestamp.\n\nArgs:\ntimestamp: The timestamp of the notification. Assumed to be unique.\n\nRaises:\nUniqueKeyError: Raised if multiple notifications have the timestamp.", "source": "juraj-google-style"}
{"code": "def remove_volume(self, name, force=False):\n    params = {}\n    if force:\n        if utils.version_lt(self._version, '1.25'):\n            raise errors.InvalidVersion('force removal was introduced in API 1.25')\n        params = {'force': force}\n    url = self._url('/volumes/{0}', name, params=params)\n    resp = self._delete(url)\n    self._raise_for_status(resp)", "docstring": "Remove a volume. Similar to the ``docker volume rm`` command.\n\nArgs:\nname (str): The volume's name\nforce (bool): Force removal of volumes that were already removed\nout of band by the volume driver plugin.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf volume failed to remove.", "source": "codesearchnet"}
{"code": "def avg_grads(tower_grads):\n  \n  average_grads = []\n  for grad_and_vars in zip(*tower_grads):\n    \n    \n    grads = []\n    for g, _ in grad_and_vars:\n      \n      expanded_g = tf.expand_dims(g, 0)\n\n      \n      grads.append(expanded_g)\n\n    \n    grad = tf.concat(0, grads)\n    grad = tf.reduce_mean(grad, 0)\n\n    \n    \n    \n    v = grad_and_vars[0][1]\n    grad_and_var = (grad, v)\n    average_grads.append(grad_and_var)\n  return average_grads", "docstring": "Calculate the average gradient for each shared variable across all towers.\n\nNote that this function provides a synchronization point across all towers.\n\nArgs:\ntower_grads: List of lists of (gradient, variable) tuples. The outer list\nis over individual gradients. The inner list is over the gradient\ncalculation for each tower.\nReturns:\nList of pairs of (gradient, variable) where the gradient has been averaged\nacross all towers.", "source": "juraj-google-style"}
{"code": "def broadcast_dynamic_shape(shape_x, shape_y):\n    if not isinstance(shape_x, RaggedTensorDynamicShape):\n        raise TypeError('shape_x must be a RaggedTensorDynamicShape')\n    if not isinstance(shape_y, RaggedTensorDynamicShape):\n        raise TypeError('shape_y must be a RaggedTensorDynamicShape')\n    if shape_x.rank is None or shape_y.rank is None:\n        raise ValueError('Unable to broadcast: unknown rank')\n    broadcast_rank = max(shape_x.rank, shape_y.rank)\n    shape_x = shape_x.broadcast_to_rank(broadcast_rank)\n    shape_y = shape_y.broadcast_to_rank(broadcast_rank)\n    for axis in range(broadcast_rank):\n        shape_x = shape_x.broadcast_dimension(axis, shape_y.dimension_size(axis))\n        shape_y = shape_y.broadcast_dimension(axis, shape_x.dimension_size(axis))\n    return shape_x", "docstring": "Returns the shape formed by broadcasting two shapes to be compatible.\n\nArgs:\nshape_x: A `RaggedTensorDynamicShape`\nshape_y: A `RaggedTensorDynamicShape`\n\nReturns:\nA `RaggedTensorDynamicShape`.\nRaises:\nValueError: If `shape_x` and `shape_y` are not broadcast-compatible.", "source": "github-repos"}
{"code": "def remove_chain(self, name):\n        \n\n        if name in self.chains:\n            delattr(self.chains, name)\n        else:\n            raise ValueError(\"Chain with this name not found\")", "docstring": "Remove chain from current shelve file\n\nArgs:\nname: chain name", "source": "juraj-google-style"}
{"code": "def add_completions(\n    replace_list: list, belstr: str, replace_span: Span, completion_text: str\n) -> List[Mapping[str, Any]]:\n    \n\n    completions = []\n\n    for r in replace_list:\n\n        \n        \n        \n        \n\n        if len(belstr) > 0:\n            belstr_end = len(belstr) - 1\n        else:\n            belstr_end = 0\n\n        log.debug(\n            f'Replace list {r}  Replace_span {replace_span}  BELstr: {belstr} Len: {belstr_end} Test1 {r[\"type\"] == \"Function\"}  Test2 {replace_span[1] + 1 == len(belstr)}'\n        )\n\n        \n        if (\n            r[\"type\"] == \"Function\"\n            and replace_span[0] > 0\n            and belstr[replace_span[0] - 1] == \",\"\n        ):\n            log.debug(\"prior char is a comma\")\n            replacement = (\n                belstr[0 : replace_span[0]]\n                + \" \"\n                + f\"{r['replacement']}()\"\n                + belstr[replace_span[1] + 1 :]\n            )\n            cursor_loc = len(\n                belstr[0 : replace_span[0]] + \" \" + f\"{r['replacement']}()\"\n            )\n        \n        elif replace_span[0] > 0 and belstr[replace_span[0] - 1] == \",\":\n            log.debug(\"prior char is a comma\")\n            replacement = (\n                belstr[0 : replace_span[0]]\n                + \" \"\n                + r[\"replacement\"]\n                + belstr[replace_span[1] + 1 :]\n            )\n            cursor_loc = len(belstr[0 : replace_span[0]] + \" \" + r[\"replacement\"])\n        \n        elif r[\"type\"] == \"Function\" and replace_span[1] >= belstr_end:\n            replacement = belstr[0 : replace_span[0]] + f\"{r['replacement']}()\"\n            cursor_loc = len(replacement) - 1  \n            log.debug(f\"Replacement: {replacement}\")\n        \n        else:\n            replacement = (\n                belstr[0 : replace_span[0]]\n                + r[\"replacement\"]\n                + belstr[replace_span[1] + 1 :]\n            )\n            cursor_loc = len(\n                belstr[0 : replace_span[0]] + r[\"replacement\"]\n            )  \n\n        completions.append(\n            {\n                \"replacement\": replacement,\n                \"cursor_loc\": cursor_loc,\n                \"highlight\": r[\"highlight\"],\n                \"label\": r[\"label\"],\n            }\n        )\n\n    return completions", "docstring": "Create completions to return given replacement list\n\nArgs:\nreplace_list: list of completion replacement values\nbelstr: BEL String\nreplace_span: start, stop of belstr to replace\ncompletion_text: text to use for completion - used for creating highlight\nReturns:\n[{\n\"replacement\": replacement,\n\"cursor_loc\": cursor_loc,\n\"highlight\": highlight,\n\"label\": label,\n}]", "source": "juraj-google-style"}
{"code": "def to_b58check(self, testnet=False):\n    version = (self.TESTNET_VERSION if testnet else self.MAINNET_VERSION)\n    return base58.b58encode_check((bytes([version]) + bytes(self)))", "docstring": "Generates a Base58Check encoding of this private key.\n\nReturns:\nstr: A Base58Check encoded string representing the key.", "source": "codesearchnet"}
{"code": "def get_package_hashes(filename):\n    \n    log.debug('Getting package hashes')\n    filename = os.path.abspath(filename)\n    with open(filename, 'rb') as f:\n        data = f.read()\n\n    _hash = hashlib.sha256(data).hexdigest()\n    log.debug('Hash for file %s: %s', filename, _hash)\n    return _hash", "docstring": "Provides hash of given filename.\n\nArgs:\n\nfilename (str): Name of file to hash\n\nReturns:\n\n(str): sha256 hash", "source": "juraj-google-style"}
{"code": "def core_name(self):\n        \n        buf_size = self.MAX_BUF_SIZE\n        buf = (ctypes.c_char * buf_size)()\n        self._dll.JLINKARM_Core2CoreName(self.core_cpu(), buf, buf_size)\n        return ctypes.string_at(buf).decode()", "docstring": "Returns the name of the target ARM core.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nThe target core's name.", "source": "juraj-google-style"}
{"code": "def bind_sockets(address, port):\n    ss = netutil.bind_sockets(port=(port or 0), address=address)\n    assert len(ss)\n    ports = {s.getsockname()[1] for s in ss}\n    assert (len(ports) == 1), 'Multiple ports assigned??'\n    actual_port = ports.pop()\n    if port:\n        assert (actual_port == port)\n    return (ss, actual_port)", "docstring": "Bind a socket to a port on an address.\n\nArgs:\naddress (str) :\nAn address to bind a port on, e.g. ``\"localhost\"``\n\nport (int) :\nA port number to bind.\n\nPass 0 to have the OS automatically choose a free port.\n\nThis function returns a 2-tuple with the new socket as the first element,\nand the port that was bound as the second. (Useful when passing 0 as a port\nnumber to bind any free port.)\n\nReturns:\n(socket, port)", "source": "codesearchnet"}
{"code": "def add_key_value(self, key, value):\n    key = self._metadata_map.get(key, key)\n    if (key in ['dateAdded', 'eventDate', 'firstSeen', 'publishDate']):\n        self._group_data[key] = self._utils.format_datetime(value, date_format='%Y-%m-%dT%H:%M:%SZ')\n    elif (key == 'file_content'):\n        pass\n    else:\n        self._group_data[key] = value", "docstring": "Add custom field to Group object.\n\n.. note:: The key must be the exact name required by the batch schema.\n\nExample::\n\ndocument = tcex.batch.group('Document', 'My Document')\ndocument.add_key_value('fileName', 'something.pdf')\n\nArgs:\nkey (str): The field key to add to the JSON batch data.\nvalue (str): The field value to add to the JSON batch data.", "source": "codesearchnet"}
{"code": "def copy_docstring(source_class):\n\n    def decorator(method):\n        'Decorator implementation.\\n\\n        Args:\\n            method (Callable): The method to copy the docstring to.\\n\\n        Returns:\\n            Callable: the same method passed in with an updated docstring.\\n\\n        Raises:\\n            ValueError: if the method already has a docstring.\\n        '\n        if method.__doc__:\n            raise ValueError('Method already has a docstring.')\n        source_method = getattr(source_class, method.__name__)\n        method.__doc__ = source_method.__doc__\n        return method\n    return decorator", "docstring": "Decorator that copies a method's docstring from another class.\n\nArgs:\nsource_class (type): The class that has the documented method.\n\nReturns:\nCallable: A decorator that will copy the docstring of the same\nnamed method in the source class to the decorated method.", "source": "codesearchnet"}
{"code": "def permut2expr(self, P):\n    if (len(P) > (1 << self.nbits)):\n        raise ValueError(('P must not contain more than %d elements' % (1 << self.nbits)))\n    X = self.var('X')\n    ret = super(MBA, self).permut2expr(P, X.vec)\n    return (self.from_vec(ret), X)", "docstring": "Convert a substitution table into an arybo application\n\nArgs:\nP: list of integers. The list must not contain more than 2**nbits elements.\n\nReturns:\nA tuple containing an :class:`MBAVariable` object with the result\nand the symbolic input variable used in this object. A typical use\ncase is to feed these into vectorial_decomp.\n\nExample:\n>>> mba = MBA(4)\n>>> P = [i^7 for i in range(16)]\n>>> E,X = mba.permut2expr(P)\n>>> E.vectorial_decomp([X])\nApp NL = Vec([\n0,\n0,\n0,\n0\n])\nAffApp matrix = Mat([\n[1, 0, 0, 0]\n[0, 1, 0, 0]\n[0, 0, 1, 0]\n[0, 0, 0, 1]\n])\nAffApp cst = Vec([\n1,\n1,\n1,\n0\n])", "source": "codesearchnet"}
{"code": "def object_table(self, object_id=None):\n        \n        self._check_connected()\n        if object_id is not None:\n            \n            return self._object_table(object_id)\n        else:\n            \n            object_keys = self._keys(ray.gcs_utils.TablePrefix_OBJECT_string +\n                                     \"*\")\n            object_ids_binary = {\n                key[len(ray.gcs_utils.TablePrefix_OBJECT_string):]\n                for key in object_keys\n            }\n\n            results = {}\n            for object_id_binary in object_ids_binary:\n                results[binary_to_object_id(object_id_binary)] = (\n                    self._object_table(binary_to_object_id(object_id_binary)))\n            return results", "docstring": "Fetch and parse the object table info for one or more object IDs.\n\nArgs:\nobject_id: An object ID to fetch information about. If this is\nNone, then the entire object table is fetched.\n\nReturns:\nInformation from the object table.", "source": "juraj-google-style"}
{"code": "def depth_october_average_ground_temperature(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `depth_october_average_ground_temperature`'.format(value))\n\n        self._depth_october_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_october_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_october_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def addFileHandler(self,filename='', dr='',lvl=1):\n        \n        fname = self.name\n        if filename != '':\n            fname = filename\n        if '.' not in fname:\n            fname+='.log'\n        fh = logging.FileHandler(os.path.join(dr,fname))\n        fh.setLevel(lvl)\n        frmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n        fFrmt = logging.Formatter(frmtString)\n        fh.setFormatter(fFrmt)\n        self.addHandler(fh)", "docstring": "This function will add a file handler to a log with the provided level.\n\nArgs:\nlvl (int): The severity level of messages printed to the file with\nthe file handler, default = 1.", "source": "juraj-google-style"}
{"code": "def model_spec(ops, matrix):\n    return api.ModelSpec(matrix=np.array(matrix), ops=[INPUT] + ops + [OUTPUT])", "docstring": "NASBench model spec that is parameterized by ops and their connections.\n\nArgs:\nops: a list of allowed ops except the INPUT and OUTPUT layer.\nmatrix: the adjacency matrix for the connectivity of each layers, which\nshould be an upper triangle matrix.\n\nReturns:\nA NASBench spec.", "source": "github-repos"}
{"code": "def normalize(self, image, mean, std, rescale=False):\n    self._ensure_format_supported(image)\n    if isinstance(image, PIL.Image.Image):\n        image = self.to_numpy_array(image, rescale=True)\n    elif rescale:\n        if isinstance(image, np.ndarray):\n            image = self.rescale(image.astype(np.float32), 1 / 255.0)\n        elif is_torch_tensor(image):\n            image = self.rescale(image.float(), 1 / 255.0)\n    if isinstance(image, np.ndarray):\n        if not isinstance(mean, np.ndarray):\n            mean = np.array(mean).astype(image.dtype)\n        if not isinstance(std, np.ndarray):\n            std = np.array(std).astype(image.dtype)\n    elif is_torch_tensor(image):\n        import torch\n        if not isinstance(mean, torch.Tensor):\n            if isinstance(mean, np.ndarray):\n                mean = torch.from_numpy(mean)\n            else:\n                mean = torch.tensor(mean)\n        if not isinstance(std, torch.Tensor):\n            if isinstance(std, np.ndarray):\n                std = torch.from_numpy(std)\n            else:\n                std = torch.tensor(std)\n    if image.ndim == 3 and image.shape[0] in [1, 3]:\n        return (image - mean[:, None, None]) / std[:, None, None]\n    else:\n        return (image - mean) / std", "docstring": "Normalizes `image` with `mean` and `std`. Note that this will trigger a conversion of `image` to a NumPy array\nif it's a PIL Image.\n\nArgs:\nimage (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):\nThe image to normalize.\nmean (`List[float]` or `np.ndarray` or `torch.Tensor`):\nThe mean (per channel) to use for normalization.\nstd (`List[float]` or `np.ndarray` or `torch.Tensor`):\nThe standard deviation (per channel) to use for normalization.\nrescale (`bool`, *optional*, defaults to `False`):\nWhether or not to rescale the image to be between 0 and 1. If a PIL image is provided, scaling will\nhappen automatically.", "source": "github-repos"}
{"code": "def cancelPnL(self, account, modelCode: str = ''):\n        \n        key = (account, modelCode)\n        reqId = self.wrapper.pnlKey2ReqId.pop(key, None)\n        if reqId:\n            self.client.cancelPnL(reqId)\n            self.wrapper.pnls.pop(reqId, None)\n        else:\n            self._logger.error(\n                'cancelPnL: No subscription for '\n                f'account {account}, modelCode {modelCode}')", "docstring": "Cancel PnL subscription.\n\nArgs:\naccount: Cancel for this account.\nmodelCode: If specified, cancel for this account model.", "source": "juraj-google-style"}
{"code": "def ParseOptions(cls, options, output_module):\n    \n    if not isinstance(output_module, timesketch_out.TimesketchOutputModule):\n      raise errors.BadConfigObject(\n          'Output module is not an instance of TimesketchOutputModule')\n\n    document_type = cls._ParseStringOption(\n        options, 'document_type', default_value=cls._DEFAULT_DOCUMENT_TYPE)\n    output_module.SetDocumentType(document_type)\n\n    flush_interval = cls._ParseNumericOption(\n        options, 'flush_interval', default_value=cls._DEFAULT_FLUSH_INTERVAL)\n    output_module.SetFlushInterval(flush_interval)\n\n    index = cls._ParseStringOption(\n        options, 'index', default_value=cls._DEFAULT_UUID)\n    output_module.SetIndexName(index)\n\n    name = cls._ParseStringOption(\n        options, 'timeline_name', default_value=cls._DEFAULT_NAME)\n    output_module.SetTimelineName(name)\n\n    username = cls._ParseStringOption(\n        options, 'username', default_value=cls._DEFAULT_USERNAME)\n    output_module.SetTimelineOwner(username)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\noutput_module (TimesketchOutputModule): output module to configure.\n\nRaises:\nBadConfigObject: when the output module object is of the wrong type.\nBadConfigOption: when a configuration parameter fails validation.", "source": "juraj-google-style"}
{"code": "def load_wav_file(filename):\n    with tf.compat.v1.Session(graph=tf.Graph()) as sess:\n        wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, [])\n        wav_loader = io_ops.read_file(wav_filename_placeholder)\n        wav_decoder = tf.audio.decode_wav(wav_loader, desired_channels=1)\n        return sess.run(wav_decoder, feed_dict={wav_filename_placeholder: filename}).audio.flatten()", "docstring": "Loads an audio file and returns a float PCM-encoded array of samples.\n\nArgs:\nfilename: Path to the .wav file to load.\n\nReturns:\nNumpy array holding the sample data as floats between -1.0 and 1.0.", "source": "github-repos"}
{"code": "def loss(logits, labels, batch_size=None):\n    if (not batch_size):\n        batch_size = FLAGS.batch_size\n    sparse_labels = tf.reshape(labels, [batch_size, 1])\n    indices = tf.reshape(tf.range(batch_size), [batch_size, 1])\n    concated = tf.concat(axis=1, values=[indices, sparse_labels])\n    num_classes = logits[0].get_shape()[(- 1)].value\n    dense_labels = tf.sparse_to_dense(concated, [batch_size, num_classes], 1.0, 0.0)\n    slim.losses.cross_entropy_loss(logits[0], dense_labels, label_smoothing=0.1, weight=1.0)\n    slim.losses.cross_entropy_loss(logits[1], dense_labels, label_smoothing=0.1, weight=0.4, scope='aux_loss')", "docstring": "Adds all losses for the model.\n\nNote the final loss is not returned. Instead, the list of losses are collected\nby slim.losses. The losses are accumulated in tower_loss() and summed to\ncalculate the total loss.\n\nArgs:\nlogits: List of logits from inference(). Each entry is a 2-D float Tensor.\nlabels: Labels from distorted_inputs or inputs(). 1-D tensor\nof shape [batch_size]\nbatch_size: integer", "source": "codesearchnet"}
{"code": "def get_query_info(sql, con, partition_column):\n    \n    engine = create_engine(con)\n    if is_table(engine, sql):\n        table_metadata = get_table_metadata(engine, sql)\n        query = build_query_from_table(sql)\n        cols = get_table_columns(table_metadata)\n    else:\n        check_query(sql)\n        query = sql.replace(\";\", \"\")\n        cols = get_query_columns(engine, query)\n    \n    \n    cols_names = list(cols.keys())\n    return cols_names, query", "docstring": "Return a columns name list and the query string\n\nArgs:\nsql: SQL query or table name\ncon: database connection or url string\npartition_column: column used to share the data between the workers\n\nReturns:\nColumns name list and query string", "source": "juraj-google-style"}
{"code": "def every_match(self, callback, **kwargs):\n    if (len(kwargs) == 0):\n        raise ArgumentError('You must specify at least one message field to wait on')\n    spec = MessageSpec(**kwargs)\n    responder = self._add_waiter(spec, callback)\n    return (spec, responder)", "docstring": "Invoke callback every time a matching message is received.\n\nThe callback will be invoked directly inside process_message so that\nyou can guarantee that it has been called by the time process_message\nhas returned.\n\nThe callback can be removed by a call to remove_waiter(), passing the\nhandle object returned by this call to identify it.\n\nArgs:\ncallback (callable): A callable function that will be called as\ncallback(message) whenever a matching message is received.\n\nReturns:\nobject: An opaque handle that can be passed to remove_waiter().\n\nThis handle is the only way to remove this callback if you no\nlonger want it to be called.", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):\n    residual = hidden_states\n    hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, position_embeddings=position_embeddings, output_attentions=output_attentions)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    second_residual = hidden_states\n    cross_attn_weights = None\n    hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, output_attentions=output_attentions)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = second_residual + hidden_states\n    hidden_states = self.encoder_attn_layer_norm(hidden_states)\n    residual = hidden_states\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (self_attn_weights, cross_attn_weights)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`):\nInput to the layer of shape `(seq_len, batch, embed_dim)`.\nposition_embeddings (`torch.FloatTensor`, *optional*):\nPosition embeddings that are added to the queries and keys in the self-attention layer.\nreference_points (`torch.FloatTensor`, *optional*):\nReference points.\nspatial_shapes (`torch.LongTensor`, *optional*):\nSpatial shapes.\nlevel_start_index (`torch.LongTensor`, *optional*):\nLevel start index.\nencoder_hidden_states (`torch.FloatTensor`):\ncross attention input to the layer of shape `(seq_len, batch, embed_dim)`\nencoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size\n`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative\nvalues.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def attach(self, engine, start=Events.STARTED, pause=Events.COMPLETED, resume=None, step=None):\n    engine.add_event_handler(start, self.reset)\n    engine.add_event_handler(pause, self.pause)\n    if (resume is not None):\n        engine.add_event_handler(resume, self.resume)\n    if (step is not None):\n        engine.add_event_handler(step, self.step)\n    return self", "docstring": "Register callbacks to control the timer.\n\nArgs:\nengine (Engine):\nEngine that this timer will be attached to.\nstart (Events):\nEvent which should start (reset) the timer.\npause (Events):\nEvent which should pause the timer.\nresume (Events, optional):\nEvent which should resume the timer.\nstep (Events, optional):\nEvent which should call the `step` method of the counter.\n\nReturns:\nself (Timer)", "source": "codesearchnet"}
{"code": "def parse_iso8601_str(string):\n    datetime_obj = datetime.datetime.strptime(string, '%Y-%m-%dT%H:%M:%SZ')\n    return int(calendar.timegm(datetime_obj.utctimetuple()))", "docstring": "Parse a fixed ISO8601 datetime string.\n\n.. Note:: This function only parses dates in the format\n``%Y-%m-%dT%H:%M:%SZ``. You must use a library like ``dateutils``\nto properly parse dates and times.\n\nReturns:\nfloat: A UNIX timestamp.", "source": "codesearchnet"}
{"code": "def _any_overlap_or_contiguous(self, test_overlap: bool) -> bool:\n    for i in range(len(self.intervals)):\n        for j in range((i + 1), len(self.intervals)):\n            first = self.intervals[i]\n            second = self.intervals[j]\n            if test_overlap:\n                test = first.overlaps(second)\n            else:\n                test = first.contiguous(second)\n            if test:\n                return True\n    return False", "docstring": "Do any of the intervals overlap?\n\nArgs:\ntest_overlap: if ``True``, test for overlapping intervals; if\n``False``, test for contiguous intervals.", "source": "codesearchnet"}
{"code": "def _bigbird_block_rand_mask(self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1):\n    if from_seq_length \n        raise ValueError('Error the number of blocks needs to be same!')\n    rand_attn = np.zeros((from_seq_length \n    if not self.training:\n        return rand_attn\n    middle_seq = np.arange(1, to_seq_length \n    last = to_seq_length \n    if last_idx > 2 * to_block_size:\n        last = last_idx \n    r = num_rand_blocks\n    for i in range(1, from_seq_length \n        start = i - 2\n        end = i\n        if i == 1:\n            rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r]\n        elif i == 2:\n            rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r]\n        elif i == from_seq_length \n            rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]\n        elif i == from_seq_length \n            rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]\n        elif start > last:\n            start = last\n            rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]\n        elif end + 1 == last:\n            rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]\n        else:\n            rand_attn[i - 1, :] = np.random.permutation(np.concatenate((middle_seq[:start], middle_seq[end + 1:last])))[:r]\n    return rand_attn", "docstring": "Create adjacency list of random attention.\n\nArgs:\nfrom_seq_length: int. length of from sequence.\nto_seq_length: int. length of to sequence.\nfrom_block_size: int. size of block in from sequence.\nto_block_size: int. size of block in to sequence.\nnum_rand_blocks: int. Number of random chunks per row.\nlast_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence,\nif positive then num_rand_blocks blocks chosen only up to last_idx.\n\nReturns:\nadjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks", "source": "github-repos"}
{"code": "def _verify_docker_image_size(self, image_name):\n    \n    shell_call(['docker', 'pull', image_name])\n    try:\n      image_size = subprocess.check_output(\n          ['docker', 'inspect', '--format={{.Size}}', image_name]).strip()\n      image_size = int(image_size)\n    except (ValueError, subprocess.CalledProcessError) as e:\n      logging.error('Failed to determine docker image size: %s', e)\n      return False\n    logging.info('Size of docker image %s is %d', image_name, image_size)\n    if image_size > MAX_DOCKER_IMAGE_SIZE:\n      logging.error('Image size exceeds limit %d', MAX_DOCKER_IMAGE_SIZE)\n    return image_size <= MAX_DOCKER_IMAGE_SIZE", "docstring": "Verifies size of Docker image.\n\nArgs:\nimage_name: name of the Docker image.\n\nReturns:\nTrue if image size is within the limits, False otherwise.", "source": "juraj-google-style"}
{"code": "def make_layer_stack(layers=gin.REQUIRED, num_layers=6):\n  \n  return LayerStack([cls() for cls in layers] * num_layers)", "docstring": "Configurable layer stack.\n\nArgs:\nlayers: a list of subclasses of TransformerLayer\nnum_layers: an integer\nReturns:\na LayerStack", "source": "juraj-google-style"}
{"code": "def GetPresetsInformation(cls):\n    parser_presets_information = []\n    for preset_definition in ParsersManager.GetPresets():\n        preset_information_tuple = (preset_definition.name, ', '.join(preset_definition.parsers))\n        parser_presets_information.append(preset_information_tuple)\n    return parser_presets_information", "docstring": "Retrieves the presets information.\n\nReturns:\nlist[tuple]: containing:\n\nstr: preset name\nstr: comma separated parser names that are defined by the preset", "source": "codesearchnet"}
{"code": "class UnivNetModelOutput(ModelOutput):\n    waveforms: Optional[torch.FloatTensor] = None\n    waveform_lengths: Optional[torch.FloatTensor] = None", "docstring": "Output class for the [`UnivNetModel`], which includes the generated audio waveforms and the original unpadded\nlengths of those waveforms (so that the padding can be removed by [`UnivNetModel.batch_decode`]).\n\nArgs:\nwaveforms (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\nBatched 1D (mono-channel) output audio waveforms.\nwaveform_lengths (`torch.FloatTensor` of shape `(batch_size,)`):\nThe batched length in samples of each unpadded waveform in `waveforms`.", "source": "github-repos"}
{"code": "def EnsureGdbPosition(self, pid, tid, frame_depth):\n    \n    position = [pid, tid, frame_depth]\n    if not pid:\n      return\n    if not self.IsAttached():\n      try:\n        self.Attach(position)\n      except gdb.error as exc:\n        raise PositionUnavailableException(exc.message)\n    if gdb.selected_inferior().pid != pid:\n      self.Detach()\n      try:\n        self.Attach(position)\n      except gdb.error as exc:\n        raise PositionUnavailableException(exc.message)\n\n    if tid:\n      tstate_head = GdbCache.INTERP_HEAD['tstate_head']\n      for tstate in self._IterateChainedList(tstate_head, 'next'):\n        if tid == tstate['thread_id']:\n          self.selected_tstate = tstate\n          break\n      else:\n        raise PositionUnavailableException('Thread %s does not exist.' %\n                                           str(tid))\n      stack_head = self.selected_tstate['frame']\n      if frame_depth is not None:\n        frames = list(self._IterateChainedList(stack_head, 'f_back'))\n        frames.reverse()\n        try:\n          self.selected_frame = frames[frame_depth]\n        except IndexError:\n          raise PositionUnavailableException('Stack is not %s frames deep' %\n                                             str(frame_depth + 1))", "docstring": "Make sure our position matches the request.\n\nArgs:\npid: The process ID of the target process\ntid: The python thread ident of the target thread\nframe_depth: The 'depth' of the requested frame in the frame stack\nRaises:\nPositionUnavailableException: If the requested process, thread or frame\ncan't be found or accessed.", "source": "juraj-google-style"}
{"code": "def _kernel(kernel_spec):\n    if isinstance(kernel_spec, tf.compat.integral_types):\n        return [kernel_spec, kernel_spec]\n    elif (len(kernel_spec) == 1):\n        return [kernel_spec[0], kernel_spec[0]]\n    else:\n        assert (len(kernel_spec) == 2)\n        return kernel_spec", "docstring": "Expands the kernel spec into a length 2 list.\n\nArgs:\nkernel_spec: An integer or a length 1 or 2 sequence that is expanded to a\nlist.\nReturns:\nA length 2 list.", "source": "codesearchnet"}
{"code": "def connect_to(self, vertex, weight=1):\n        \n        for edge in self.edges_out:\n            if vertex == edge.vertex_in:\n                return edge\n        return Edge(self, vertex, weight)", "docstring": "Connect this vertex to another one.\n\nArgs:\nvertex (Vertex): vertex to connect to.\nweight (int): weight of the edge.\n\nReturns:\nEdge: the newly created edge.", "source": "juraj-google-style"}
{"code": "def from_storage(source, source_format='csv', csv_options=None, ignore_unknown_values=False, max_bad_records=0, compressed=False, schema=None):\n    result = FederatedTable()\n    if (source_format == 'csv'):\n        result._bq_source_format = 'CSV'\n        if (csv_options is None):\n            csv_options = _csv_options.CSVOptions()\n    elif (source_format == 'json'):\n        if csv_options:\n            raise Exception('CSV options are not support for JSON tables')\n        result._bq_source_format = 'NEWLINE_DELIMITED_JSON'\n    else:\n        raise Exception(('Invalid source format %s' % source_format))\n    result._source = (source if isinstance(source, list) else [source])\n    result._source_format = source_format\n    result._csv_options = csv_options\n    result._ignore_unknown_values = ignore_unknown_values\n    result._max_bad_records = max_bad_records\n    result._compressed = compressed\n    result._schema = schema\n    return result", "docstring": "Create an external table for a GCS object.\n\nArgs:\nsource: the URL of the source objects(s). Can include a wildcard '*' at the end of the item\nname. Can be a single source or a list.\nsource_format: the format of the data, 'csv' or 'json'; default 'csv'.\ncsv_options: For CSV files, the options such as quote character and delimiter.\nignore_unknown_values: If True, accept rows that contain values that do not match the schema;\nthe unknown values are ignored (default False).\nmax_bad_records: The maximum number of bad records that are allowed (and ignored) before\nreturning an 'invalid' error in the Job result (default 0).\ncompressed: whether the data is GZ compressed or not (default False). Note that compressed\ndata can be used as a federated table but cannot be loaded into a BQ Table.\nschema: the schema of the data. This is required for this table to be used as a federated\ntable or to be loaded using a Table object that itself has no schema (default None).", "source": "codesearchnet"}
{"code": "def padded_urlsafe_b64decode(value):\n    \n    b64string = to_bytes(value)\n    padded = b64string + b'=' * (-len(b64string) % 4)\n    return base64.urlsafe_b64decode(padded)", "docstring": "Decodes base64 strings lacking padding characters.\n\nGoogle infrastructure tends to omit the base64 padding characters.\n\nArgs:\nvalue (Union[str, bytes]): The encoded value.\n\nReturns:\nbytes: The decoded value", "source": "juraj-google-style"}
{"code": "def _write_cache(step, event_file_suffix=None, **kwargs):\n    file_suffix = _TT_EVENT_FILE_SUFFIX\n    if event_file_suffix is not None:\n        file_suffix = string_ops.string_join([file_suffix, event_file_suffix], separator='.')\n    summary_write_ops = []\n    summary_writer = summary.create_file_writer_v2(self._parameters.trace_dir, filename_suffix=file_suffix, max_queue=_TT_SUMMARY_MAX_QUEUE)\n    graph.add_to_collection(TENSOR_TRACER_SUMMARY_COLLECTION, summary_writer)\n    step_value = step[0]\n    dt = step_value.dtype\n    if dt.__ne__(dtypes.int64) and dt.__ne__(dtypes.uint64) and dt.__ne__(dtypes.float64):\n        step_value = math_ops.cast(step_value, dtypes.int64)\n    with summary_writer.as_default():\n        summary_metadata = summary_pb2.SummaryMetadata(plugin_data=summary_pb2.SummaryMetadata.PluginData(plugin_name=_TT_TENSORBOARD_PLUGIN_NAME))\n        for key, value in kwargs.items():\n            if not self._parameters.collect_summary_per_core:\n                if key == _TT_SUMMARY_TAG and value.shape.as_list()[0] != 1:\n                    value = self.aggregate_global_cache(value)\n            with ops.control_dependencies([summary_writer.init()]):\n                summary_write_ops.append(summary.write(_TT_SUMMARY_TAG + '/' + key + '.' + graph_summary_tag, value, metadata=summary_metadata, step=step_value))\n    return control_flow_ops.group(summary_write_ops)", "docstring": "Writes the given caches as tensor summary.\n\nArgs:\nstep: Step tensor with dimension [num_cores].\nevent_file_suffix: Event filename suffix tensor.\n**kwargs: The dictionary of tensors that needs to be written as\nsummaries. Key and value pairs within kwargs correspond to the tag\nname, and tensor content that will be written using summary.write.\nThe trace_modes that use this function are:\n- summary: In summary mode, kwargs includes a single (tag, content)\npair which are, _TT_SUMMARY_TAG and a tf.float32 signature_cache\nvariable. The dimension of the signature_cache is:\nnum_cores x num_traced_tensors x num_signatures.\n- full_tensor_summary: kwargs will include all traced tensors. Tag\nand content correspond to the name of the tensor, and its actual\ncontent.\nReturns:\nA tf.Operation that needs to be executed for the host call dependencies.", "source": "github-repos"}
{"code": "def consult_filters(self, url_info: URLInfo, url_record: URLRecord, is_redirect: bool=False) -> Tuple[(bool, str, dict)]:\n    if (not self._url_filter):\n        return (True, 'nofilters', None)\n    test_info = self._url_filter.test_info(url_info, url_record)\n    verdict = test_info['verdict']\n    if verdict:\n        reason = 'filters'\n    elif (is_redirect and self.is_only_span_hosts_failed(test_info)):\n        verdict = True\n        reason = 'redirect'\n    else:\n        reason = 'filters'\n    return (verdict, reason, test_info)", "docstring": "Consult the URL filter.\n\nArgs:\nurl_record: The URL record.\nis_redirect: Whether the request is a redirect and it is\ndesired that it spans hosts.\n\nReturns\ntuple:\n\n1. bool: The verdict\n2. str: A short reason string: nofilters, filters, redirect\n3. dict: The result from :func:`DemuxURLFilter.test_info`", "source": "codesearchnet"}
{"code": "def path_to_text(self, path):\n        \n        rsrcmgr = PDFResourceManager()\n        retstr = StringIO()\n        codec = 'utf-8'\n        laparams = LAParams()\n        device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)\n        fp = open(path, 'rb')\n        interpreter = PDFPageInterpreter(rsrcmgr, device)\n        password = \"\"\n        maxpages = 0\n        caching = True\n        pagenos = set()\n\n        pages_data = PDFPage.get_pages(\n            fp,\n            pagenos,\n            maxpages=maxpages,\n            password=password,\n            caching=caching,\n            check_extractable=True\n        )\n\n        for page in pages_data:\n            interpreter.process_page(page)\n\n        text = retstr.getvalue()\n        text = text.replace(\"\\n\", \"\")\n\n        fp.close()\n        device.close()\n        retstr.close()\n        return text", "docstring": "Transform local PDF file to string.\n\nArgs:\npath:   path to PDF file.\n\nReturns:\nstring.", "source": "juraj-google-style"}
{"code": "def paragraphs(self, index = None):\n        \n        if index is None:\n            return self.select(Paragraph,None,True,default_ignore_structure)\n        else:\n            if index < 0:\n                index = self.count(Paragraph,None,True,default_ignore_structure) + index\n            for i,e in enumerate(self.select(Paragraph,None,True,default_ignore_structure)):\n                if i == index:\n                    return e\n            raise IndexError", "docstring": "Returns a generator of Paragraph elements found (recursively) under this element.\n\nArguments:\nindex (int or None): If set to an integer, will retrieve and return the n'th element (starting at 0) instead of returning the generator of all", "source": "juraj-google-style"}
{"code": "def _callEventWaitAndGet(self, callback_id, event_name, timeout):\n    timeout_ms = int(timeout * 1000)\n    return self._event_client.eventWaitAndGet(callback_id, event_name, timeout_ms)", "docstring": "Calls snippet lib's eventWaitAndGet.\n\nOverride this method to use this class with various snippet lib\nimplementations.\n\nArgs:\ncallback_id: The callback identifier.\nevent_name: The callback name.\ntimeout: The number of seconds to wait for the event.\n\nReturns:\nThe event dictionary.", "source": "github-repos"}
{"code": "def format(self, record):\n        \n        \n        if record.levelno >= logging.ERROR:\n            color = colorama.Fore.RED\n        elif record.levelno >= logging.WARNING:\n            color = colorama.Fore.YELLOW\n        elif record.levelno >= logging.INFO:\n            color = colorama.Fore.RESET\n        else:\n            color = colorama.Fore.CYAN\n        format_template = (\n            '{}{}%(levelname)s{} [%(asctime)s][%(name)s]{} %(message)s')\n        if sys.stdout.isatty():\n            self._fmt = format_template.format(\n                colorama.Style.BRIGHT,\n                color,\n                colorama.Fore.RESET,\n                colorama.Style.RESET_ALL\n            )\n        else:\n            self._fmt = format_template.format(*[''] * 4)\n        if six.PY3:\n            self._style._fmt = self._fmt  \n        return super(_LogColorFormatter, self).format(record)", "docstring": "Format the log record with timestamps and level based colors.\n\nArgs:\nrecord: The log record to format.\n\nReturns:\nThe formatted log record.", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    data = file_object.read(self._HEADER_READ_SIZE)\n    if (not data.startswith(b'<?xml')):\n        raise errors.UnableToParseFile('Not an Android usage history file [not XML]')\n    (_, _, data) = data.partition(b'\\n')\n    if (not data.startswith(b'<usage-history')):\n        raise errors.UnableToParseFile('Not an Android usage history file [wrong XML root key]')\n    file_object.seek(0, os.SEEK_SET)\n    xml = ElementTree.parse(file_object)\n    root_node = xml.getroot()\n    for application_node in root_node:\n        package_name = application_node.get('name', None)\n        for part_node in application_node.iter():\n            if (part_node.tag != 'comp'):\n                continue\n            last_resume_time = part_node.get('lrt', None)\n            if (last_resume_time is None):\n                parser_mediator.ProduceExtractionWarning('missing last resume time.')\n                continue\n            try:\n                last_resume_time = int(last_resume_time, 10)\n            except ValueError:\n                parser_mediator.ProduceExtractionWarning('unsupported last resume time: {0:s}.'.format(last_resume_time))\n                continue\n            event_data = AndroidAppUsageEventData()\n            event_data.component = part_node.get('name', None)\n            event_data.package = package_name\n            date_time = dfdatetime_java_time.JavaTime(timestamp=last_resume_time)\n            event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_RESUME)\n            parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an Android usage-history file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def get_qubit_los(self, user_lo_config):\n    try:\n        _q_los = self.default_qubit_los.copy()\n    except KeyError:\n        raise PulseError('Qubit default frequencies not exist.')\n    for (channel, lo_freq) in user_lo_config.qubit_lo_dict().items():\n        _q_los[channel.index] = lo_freq\n    if (_q_los == self.default_qubit_los):\n        return None\n    return _q_los", "docstring": "Embed default qubit LO frequencies from backend and format them to list object.\nIf configured lo frequency is the same as default, this method returns `None`.\n\nArgs:\nuser_lo_config (LoConfig): A dictionary of LOs to format.\n\nReturns:\nlist: A list of qubit LOs.\n\nRaises:\nPulseError: when LO frequencies are missing.", "source": "codesearchnet"}
{"code": "def set_category(self, category):\n        \n        pcategory = self.find(\"general/category\")\n        pcategory.clear()\n        name = ElementTree.SubElement(pcategory, \"name\")\n        if isinstance(category, Category):\n            id_ = ElementTree.SubElement(pcategory, \"id\")\n            id_.text = category.id\n            name.text = category.name\n        elif isinstance(category, basestring):\n            name.text = category", "docstring": "Set the policy's category.\n\nArgs:\ncategory: A category object.", "source": "juraj-google-style"}
{"code": "def roll_to_business_day(self, date_tensor, roll_convention):\n    pass", "docstring": "Rolls the given dates to business dates according to given convention.\n\nArgs:\ndate_tensor: DateTensor of dates to roll from.\nroll_convention: BusinessDayConvention. Determines how to roll a date that\nfalls on a holiday.\n\nReturns:\nThe resulting DateTensor.", "source": "github-repos"}
{"code": "def incomplete_size(self, name=None):\n    if name is None:\n        name = '%s_BarrierIncompleteSize' % self._name\n    return gen_data_flow_ops.barrier_incomplete_size(self._barrier_ref, name=name)", "docstring": "Compute the number of incomplete elements in the given barrier.\n\nArgs:\nname: A name for the operation (optional).\n\nReturns:\nA single-element tensor containing the number of incomplete elements in\nthe given barrier.", "source": "github-repos"}
{"code": "def least_squares_effective_mass( cartesian_k_points, eigenvalues ):\n    \n    if not points_are_in_a_straight_line( cartesian_k_points ):\n        raise ValueError( 'k-points are not collinear' )\n    dk = cartesian_k_points - cartesian_k_points[0]\n    mod_dk = np.linalg.norm( dk, axis = 1 )\n    delta_e = eigenvalues - eigenvalues[0]\n    effective_mass = 1.0 / ( np.polyfit( mod_dk, eigenvalues, 2 )[0] * ev_to_hartree * 2.0 )\n    return effective_mass", "docstring": "Calculate the effective mass using a least squares quadratic fit.\n\nArgs:\ncartesian_k_points (np.array): Cartesian reciprocal coordinates for the k-points\neigenvalues (np.array):        Energy eigenvalues at each k-point to be used in the fit.\n\nReturns:\n(float): The fitted effective mass\n\nNotes:\nIf the k-points do not sit on a straight line a ValueError will be raised.", "source": "juraj-google-style"}
{"code": "def buckingham_input(self, structure, keywords, library=None,\n                         uc=True, valence_dict=None):\n        \n        gin = self.keyword_line(*keywords)\n        gin += self.structure_lines(structure, symm_flg=not uc)\n        if not library:\n            gin += self.buckingham_potential(structure, valence_dict)\n        else:\n            gin += self.library_line(library)\n        return gin", "docstring": "Gets a GULP input for an oxide structure and buckingham potential\nfrom library.\n\nArgs:\nstructure: pymatgen.core.structure.Structure\nkeywords: GULP first line keywords.\nlibrary (Default=None): File containing the species and potential.\nuc (Default=True): Unit Cell Flag.\nvalence_dict: {El: valence}", "source": "juraj-google-style"}
{"code": "def __init__(self, baseplate, token, actor_urn, *args, **kwargs):\n        \n\n        self.baseplate = baseplate\n        self.rest_baseurl = 'https:\n        self.token = token\n        self.headers = {\"Authorization\": \"Bot {}\".format(token),\n                        \"User-Agent\": \"Legobot\",\n                        \"Content-Type\": \"application/json\"}\n        self.actor_urn = actor_urn\n        self.ws = None\n        threading.Thread.__init__(self)", "docstring": "Initialize DiscoBot\n\nArgs:\nbaseplate (Legobot.Lego): The parent Pykka actor.\nTypically passed in from Legobot.Connectors.Discord.Discord\ntoken (string): Discord bot token\nactor_urn (string): URN of Pykka actor launching DiscoBot\n*args: Variable length argument list.\n**kwargs: Arbitrary keyword arguments.", "source": "juraj-google-style"}
{"code": "def get_threads(self, page=1):\n    url = self._url.page_url(page)\n    return self._request_threads(url)", "docstring": "Returns all threads on a certain page.\n\nGets a list of Thread objects for every thread on the given page. If a thread is\nalready in our cache, the cached version is returned and thread.want_update is\nset to True on the specific thread object.\n\nPages on 4chan are indexed from 1 onwards.\n\nArgs:\npage (int): Page to request threads for. Defaults to the first page.\n\nReturns:\nlist of :mod:`basc_py4chan.Thread`: List of Thread objects representing the threads on the given page.", "source": "codesearchnet"}
{"code": "def weights_multi_problem(labels, taskid=-1):\n  \n  taskid = check_nonnegative(taskid)\n  past_taskid = tf.cumsum(to_float(tf.equal(labels, taskid)), axis=1)\n  \n  past_taskid *= to_float(tf.not_equal(labels, taskid))\n  non_taskid = to_float(labels)\n  return to_float(tf.not_equal(past_taskid * non_taskid, 0))", "docstring": "Assign weight 1.0 to only the \"targets\" portion of the labels.\n\nWeight 1.0 is assigned to all labels past the taskid.\n\nArgs:\nlabels: A Tensor of int32s.\ntaskid: an int32 representing the task id for a problem.\n\nReturns:\nA Tensor of floats.\n\nRaises:\nValueError: The Task ID must be valid.", "source": "juraj-google-style"}
{"code": "def _patch_expand_path(self, settings, name, value):\n    if os.path.isabs(value):\n        return os.path.normpath(value)\n    value = os.path.expanduser(value)\n    if ((not os.path.isabs(value)) and self.projectdir):\n        value = os.path.join(self.projectdir, value)\n    return os.path.normpath(value)", "docstring": "Patch a path to expand home directory and make absolute path.\n\nArgs:\nsettings (dict): Current settings.\nname (str): Setting name.\nvalue (str): Path to patch.\n\nReturns:\nstr: Patched path to an absolute path.", "source": "codesearchnet"}
{"code": "def from_yang(self, text: str) -> ScalarValue:\n    res = self.parse_value(text)\n    if (res is None):\n        raise InvalidArgument(text)\n    return res", "docstring": "Parse value specified in a YANG module.\n\nArgs:\ntext: String representation of the value.\n\nRaises:\nInvalidArgument: If the receiver type cannot parse the text.", "source": "codesearchnet"}
{"code": "def print_solution(model, solver):\n    model_proto = model.Proto()\n    response_proto = solver.ResponseProto()\n    variables_in_objective_map = {}\n    maximization = False\n    if model_proto.HasField('objective'):\n        objective = model_proto.objective\n        for i in range(len(objective.vars)):\n            variables_in_objective_map[objective.vars[i]] = objective.coeffs[i]\n        if (objective.scaling_factor < 0.0):\n            maximization = True\n    variable_assignments = []\n    variables_in_objective = []\n    num_vars = len(model_proto.variables)\n    for var_index in range(num_vars):\n        if (not model_proto.variables[var_index].name):\n            continue\n        variable_name = model_proto.variables[var_index].name\n        if (var_index in variables_in_objective_map):\n            coefficient = variables_in_objective_map[var_index]\n            if coefficient:\n                if maximization:\n                    coefficient *= (- 1)\n                if (coefficient < 0):\n                    variables_in_objective.append(' - {} * {}'.format((- coefficient), variable_name))\n                elif (coefficient > 0):\n                    variables_in_objective.append(' + {} * {}'.format(coefficient, variable_name))\n        variable_assignments.append('  {} = {}\\n'.format(variable_name, response_proto.solution[var_index]))\n    print(''.join(variable_assignments), end='')\n    if (variables_in_objective and (variables_in_objective[0][1] == '+')):\n        variables_in_objective[0] = variables_in_objective[0][2:]\n    print('{}:{}'.format(('Maximize' if maximization else 'Minimize'), ''.join(variables_in_objective)))\n    print('Objective value: {}\\n'.format(solver.ObjectiveValue()))", "docstring": "Prints the solution associated with solver.\n\nIf solver has already had Solve() called on it, prints the solution. This\nincludes each variable and its assignment, along with the objective function\nand its optimal value.\nIf solver has not had Solve() called on it, or there is no feasible solution,\nthis will probably crash.\n\nArgs:\nmodel: A pywrapcp.CpModel object.\nsolver: A pywrapcp.CpSolver object.\n\nReturns:\nNothing, but prints the solution associated with solver.", "source": "codesearchnet"}
{"code": "def check_status(self, **kwargs):\n    for work in self:\n        work.check_status()\n    if kwargs.pop('show', False):\n        self.show_status(**kwargs)", "docstring": "Check the status of the works in self.\n\nArgs:\nshow: True to show the status of the flow.\nkwargs: keyword arguments passed to show_status", "source": "codesearchnet"}
{"code": "def RegisterRecordType(cls, record_class):\n    record_type = record_class.MatchType()\n    if (record_type not in UpdateRecord.KNOWN_CLASSES):\n        UpdateRecord.KNOWN_CLASSES[record_type] = []\n    UpdateRecord.KNOWN_CLASSES[record_type].append(record_class)", "docstring": "Register a known record type in KNOWN_CLASSES.\n\nArgs:\nrecord_class (UpdateRecord): An update record subclass.", "source": "codesearchnet"}
{"code": "def run_from_cli(self, args):\n    if args['--dump-config']:\n        self._config.print_config()\n    else:\n        (stdout, stderr) = self.lint(args['<path>'])\n        self.print_results(stdout, stderr)", "docstring": "Read arguments, run and print results.\n\nArgs:\nargs (dict): Arguments parsed by docopt.", "source": "codesearchnet"}
{"code": "def ParsePartitionsTable(\n      self, parser_mediator, database=None, table=None, **unused_kwargs):\n    \n    if database is None:\n      raise ValueError('Missing database value.')\n\n    if table is None:\n      raise ValueError('Missing table value.')\n\n    for esedb_record in table.records:\n      if parser_mediator.abort:\n        break\n\n      record_values = self._GetRecordValues(\n          parser_mediator, table.name, esedb_record)\n\n      event_data = MsieWebCachePartitionsEventData()\n      event_data.directory = record_values.get('Directory', None)\n      event_data.partition_identifier = record_values.get('PartitionId', None)\n      event_data.partition_type = record_values.get('PartitionType', None)\n      event_data.table_identifier = record_values.get('TableId', None)\n\n      timestamp = record_values.get('LastScavengeTime', None)\n      if timestamp:\n        date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(\n            date_time, 'Last Scavenge Time')\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses the Partitions table.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ndatabase (Optional[pyesedb.file]): ESE database.\ntable (Optional[pyesedb.table]): table.\n\nRaises:\nValueError: if the database or table value is missing.", "source": "juraj-google-style"}
{"code": "def get_db_row(db, start, size):\n    \n    type_ = snap7.snap7types.wordlen_to_ctypes[snap7.snap7types.S7WLByte]\n    data = client.db_read(db, start, type_, size)\n    \n    return data", "docstring": "Here you see and example of readying out a part of a DB\n\nArgs:\ndb (int): The db to use\nstart (int): The index of where to start in db data\nsize (int): The size of the db data to read", "source": "juraj-google-style"}
{"code": "def mkdirs(path):\n    filesystem = FileSystems.get_filesystem(path)\n    return filesystem.mkdirs(path)", "docstring": "Recursively create directories for the provided path.\n\nArgs:\npath: string path of the directory structure that should be created\n\nRaises:\nIOError: if leaf directory already exists.", "source": "github-repos"}
{"code": "def node_run(input_file, coords_only, bc_settings, bc_grid_weights):\n    log = logging.getLogger('pyspark')\n    log.setLevel(logging.INFO)\n    if (len(log.handlers) == 0):\n        log.addHandler(logging.StreamHandler(sys.stdout))\n    precision = bc_settings.value['precision']\n    imager = oskar.Imager(precision)\n    for (key, value) in bc_settings.value['imager'].items():\n        setattr(imager, key, value)\n    grid_size = imager.plane_size\n    grid_weights = None\n    ms_han = oskar.MeasurementSet.open(input_file)\n    if coords_only:\n        if (imager.weighting == 'Uniform'):\n            grid_weights = numpy.zeros([grid_size, grid_size], dtype=precision)\n        log.info('Reading coordinates from %s', input_file)\n        imager.coords_only = True\n        process_input_data(ms_han, imager, None, grid_weights)\n        imager.coords_only = False\n        return (grid_weights, imager.num_w_planes)\n    grid_data = numpy.zeros([grid_size, grid_size], dtype=('c8' if (precision == 'single') else 'c16'))\n    log.info('Reading visibilities from %s', input_file)\n    if bc_settings.value['combine']:\n        if (imager.weighting == 'Uniform'):\n            grid_weights = bc_grid_weights.value\n        grid_norm = process_input_data(ms_han, imager, grid_data, grid_weights)\n        log.info('Returning gridded visibilities to RDD')\n        return (grid_data, grid_norm)\n    else:\n        if (imager.weighting == 'Uniform'):\n            grid_weights = numpy.zeros([grid_size, grid_size], dtype=precision)\n        if ((imager.weighting == 'Uniform') or (imager.algorithm == 'W-projection')):\n            imager.coords_only = True\n            process_input_data(ms_han, imager, None, grid_weights)\n            imager.coords_only = False\n        grid_norm = process_input_data(ms_han, imager, grid_data, grid_weights)\n        output_file = (splitext(input_file)[0] + '.fits')\n        save_image(imager, grid_data, grid_norm, output_file)\n        log.info('Finished. Output file is %s', output_file)\n        return 0", "docstring": "Main function to process visibility data on Spark cluster nodes.\n\nArgs:\ninput_file (str):\nRDD element containing filename to process.\ncoords_only (boolean):\nIf true, read only baseline coordinates to define the weights grid.\nbc_settings (pyspark.broadcast.Broadcast):\nSpark broadcast variable containing pipeline settings dictionary.\nbc_grid_weights (pyspark.broadcast.Broadcast):\nSpark broadcast variable containing weights grid. May be None.\n\nReturns:\ntuple: Output RDD element.", "source": "codesearchnet"}
{"code": "def configure_stream(level='WARNING'):\n    root_logger = logging.getLogger()\n    root_logger.setLevel(level)\n    template = '[%(asctime)s] %(name)-25s %(levelname)-8s %(message)s'\n    formatter = logging.Formatter(template)\n    console = logging.StreamHandler()\n    console.setLevel(level)\n    console.setFormatter(formatter)\n    root_logger.addHandler(console)\n    return root_logger", "docstring": "Configure root logger using a standard stream handler.\n\nArgs:\nlevel (string, optional): lowest level to log to the console\n\nReturns:\nlogging.RootLogger: root logger instance with attached handler", "source": "codesearchnet"}
{"code": "def eval_image(image, height, width, scope=None):\n  \n  with tf.name_scope(values=[image, height, width], name=scope,\n                     default_name='eval_image'):\n    \n    \n    image = tf.image.central_crop(image, central_fraction=0.875)\n\n    \n    image = tf.expand_dims(image, 0)\n    image = tf.image.resize_bilinear(image, [height, width],\n                                     align_corners=False)\n    image = tf.squeeze(image, [0])\n    return image", "docstring": "Prepare one image for evaluation.\n\nArgs:\nimage: 3-D float Tensor\nheight: integer\nwidth: integer\nscope: Optional scope for name_scope.\nReturns:\n3-D float Tensor of prepared image.", "source": "juraj-google-style"}
{"code": "def serialize_cert_to_pem(cert_obj):\n    return cert_obj.public_bytes(encoding=cryptography.hazmat.primitives.serialization.Encoding.PEM)", "docstring": "Serialize certificate to PEM.\n\nThe certificate can be also be a Certificate Signing Request (CSR).\n\nArgs:\ncert_obj: cryptography.Certificate\n\nReturns:\nbytes: PEM encoded certificate", "source": "codesearchnet"}
{"code": "def get_summary_dict(self, include_msd_t=False, include_mscd_t=False):\n        \n        d = {\n            \"D\": self.diffusivity,\n            \"D_sigma\": self.diffusivity_std_dev,\n            \"D_charge\": self.chg_diffusivity,\n            \"D_charge_sigma\": self.chg_diffusivity_std_dev,\n            \"S\": self.conductivity,\n            \"S_sigma\": self.conductivity_std_dev,\n            \"S_charge\": self.chg_conductivity,\n            \"D_components\": self.diffusivity_components.tolist(),\n            \"S_components\": self.conductivity_components.tolist(),\n            \"D_components_sigma\": self.diffusivity_components_std_dev.tolist(),\n            \"S_components_sigma\": self.conductivity_components_std_dev.tolist(),\n            \"specie\": str(self.specie),\n            \"step_skip\": self.step_skip,\n            \"time_step\": self.time_step,\n            \"temperature\": self.temperature,\n            \"max_framework_displacement\": self.max_framework_displacement,\n            \"Haven_ratio\": self.haven_ratio\n        }\n        if include_msd_t:\n            d[\"msd\"] = self.msd.tolist()\n            d[\"msd_components\"] = self.msd_components.tolist()\n            d[\"dt\"] = self.dt.tolist()\n        if include_mscd_t:\n            d[\"mscd\"] = self.mscd.tolist()\n        return d", "docstring": "Provides a summary of diffusion information.\n\nArgs:\ninclude_msd_t (bool): Whether to include mean square displace and\ntime data with the data.\ninclude_msd_t (bool): Whether to include mean square charge displace and\ntime data with the data.\n\nReturns:\n(dict) of diffusion and conductivity data.", "source": "juraj-google-style"}
{"code": "def _get_summary_signatures(self):\n    signatures = self._flag_value_as_list(FLAG_NAME_SUMMARY_SIGNATURES)\n    supported_signatures = self._supported_signatures()\n    tt_signatures = []\n    for signature in signatures:\n        signature_with_prefix = '%s_%s' % (_TT_PREFIX, signature)\n        if signature in supported_signatures:\n            tt_signatures.append(signature)\n        elif signature_with_prefix in supported_signatures:\n            tt_signatures.append(signature_with_prefix)\n        else:\n            logging.warning('Unknown signature:%s. Supported signatures: %s' % (signature, supported_signatures))\n    if not tt_signatures:\n        return {TT_SUMMARY_MAX_ABS: 0, TT_SUMMARY_NORM: 1}\n    else:\n        return {signature: idx for idx, signature in enumerate(tt_signatures)}", "docstring": "Verifies and returns the summary signatures.\n\nReturns:\nA dictionary of the signature identifiers {signature: index} that will be\ncomputed when trace_mode is summary.", "source": "github-repos"}
{"code": "def SplitKeyPath(key_path, path_separator=definitions.KEY_PATH_SEPARATOR):\n  \n  \n  return list(filter(None, key_path.split(path_separator)))", "docstring": "Splits the key path into path segments.\n\nArgs:\nkey_path (str): key path.\npath_separator (Optional[str]): path separator.\n\nReturns:\nlist[str]: key path segments without the root path segment, which is an\nempty string.", "source": "juraj-google-style"}
{"code": "def get_cache_key(**kwargs):\n    key = '__'.join(['{}:{}'.format(item, value) for (item, value) in iteritems(kwargs)])\n    return hashlib.md5(key.encode('utf-8')).hexdigest()", "docstring": "Get MD5 encoded cache key for given arguments.\n\nHere is the format of key before MD5 encryption.\nkey1:value1__key2:value2 ...\n\nExample:\n>>> get_cache_key(site_domain=\"example.com\", resource=\"enterprise\")\n# Here is key format for above call\n# \"site_domain:example.com__resource:enterprise\"\na54349175618ff1659dee0978e3149ca\n\nArguments:\n**kwargs: Key word arguments that need to be present in cache key.\n\nReturns:\nAn MD5 encoded key uniquely identified by the key word arguments.", "source": "codesearchnet"}
{"code": "def _dispatch_command(self, command):\n    if command in self.CLI_EXIT_COMMANDS:\n        return debugger_cli_common.EXPLICIT_USER_EXIT\n    try:\n        prefix, args, output_file_path = self._parse_command(command)\n    except SyntaxError as e:\n        print(str(e))\n        return\n    if self._command_handler_registry.is_registered(prefix):\n        try:\n            screen_output = self._command_handler_registry.dispatch_command(prefix, args, screen_info=None)\n        except debugger_cli_common.CommandLineExit as e:\n            return e.exit_token\n    else:\n        screen_output = debugger_cli_common.RichTextLines([self.ERROR_MESSAGE_PREFIX + 'Invalid command prefix \"%s\"' % prefix])\n    self._display_output(screen_output)\n    if output_file_path:\n        try:\n            screen_output.write_to_file(output_file_path)\n            print('Wrote output to %s' % output_file_path)\n        except Exception:\n            print('Failed to write output to %s' % output_file_path)", "docstring": "Dispatch user command.\n\nArgs:\ncommand: (str) Command to dispatch.\n\nReturns:\nAn exit token object. None value means that the UI loop should not exit.\nA non-None value means the UI loop should exit.", "source": "github-repos"}
{"code": "def get_proposed_feature(project):\n    \n    change_collector = ChangeCollector(project)\n    collected_changes = change_collector.collect_changes()\n    try:\n        new_feature_info = one_or_raise(collected_changes.new_feature_info)\n        importer, _, _ = new_feature_info\n    except ValueError:\n        raise BalletError('Too many features collected')\n    module = importer()\n    feature = _get_contrib_feature_from_module(module)\n    return feature", "docstring": "Get the proposed feature\n\nThe path of the proposed feature is determined by diffing the project\nagainst a comparison branch, such as master. The feature is then imported\nfrom that path and returned.\n\nArgs:\nproject (ballet.project.Project): project info\n\nRaises:\nballet.exc.BalletError: more than one feature collected", "source": "juraj-google-style"}
{"code": "def __init__(self, *dic):\n        \n        super().__init__()\n        self.value = [ArraySingle()]\n        self.l = self.value[0].value", "docstring": "init\n\nArgs:\n*dic (dict): dictionary with format {'Day': 12, 'Hour': 34} Avaliable keys are Month, Day, Weekday, Hour, Minute. *Note the uppercase.* You can use gen(), genMix() to generate complex config dictionary.", "source": "juraj-google-style"}
{"code": "def line_iter(xo: int, yo: int, xd: int, yd: int) -> Iterator[Tuple[int, int]]:\n    \n    data = ffi.new(\"TCOD_bresenham_data_t *\")\n    lib.TCOD_line_init_mt(xo, yo, xd, yd, data)\n    x = ffi.new(\"int *\")\n    y = ffi.new(\"int *\")\n    yield xo, yo\n    while not lib.TCOD_line_step_mt(x, y, data):\n        yield (x[0], y[0])", "docstring": "returns an Iterable\n\nThis Iterable does not include the origin point.\n\nArgs:\nxo (int): X starting point.\nyo (int): Y starting point.\nxd (int): X destination point.\nyd (int): Y destination point.\n\nReturns:\nIterable[Tuple[int,int]]: An Iterable of (x,y) points.", "source": "juraj-google-style"}
{"code": "def fingerprint(self):\n    return gen_dataset_ops.dataset_fingerprint(self._variant_tensor)", "docstring": "Computes the fingerprint of this `Dataset`.\n\nIf two datasets have the same fingerprint, it is guaranteed that they\nwould produce identical elements as long as the content of the upstream\ninput files does not change and they produce data deterministically.\n\nHowever, two datasets producing identical values does not always mean they\nwould have the same fingerprint due to different graph constructs.\n\nIn other words, if two datasets have different fingerprints, they could\nstill produce identical values.\n\nReturns:\nA scalar `tf.Tensor` of type `tf.uint64`.", "source": "github-repos"}
{"code": "def parse_frequencies(variant, transcripts):\n    frequencies = {}\n    thousand_genomes_keys = ['1000GAF']\n    thousand_genomes_max_keys = ['1000G_MAX_AF']\n    exac_keys = ['EXACAF']\n    exac_max_keys = ['ExAC_MAX_AF', 'EXAC_MAX_AF']\n    gnomad_keys = ['GNOMADAF', 'GNOMAD_AF']\n    gnomad_max_keys = ['GNOMADAF_POPMAX', 'GNOMADAF_MAX']\n    for test_key in thousand_genomes_keys:\n        thousand_g = parse_frequency(variant, test_key)\n        if thousand_g:\n            frequencies['thousand_g'] = thousand_g\n            break\n    for test_key in thousand_genomes_max_keys:\n        thousand_g_max = parse_frequency(variant, test_key)\n        if thousand_g_max:\n            frequencies['thousand_g_max'] = thousand_g_max\n            break\n    for test_key in exac_keys:\n        exac = parse_frequency(variant, test_key)\n        if exac:\n            frequencies['exac'] = exac\n            break\n    for test_key in exac_max_keys:\n        exac_max = parse_frequency(variant, test_key)\n        if exac_max:\n            frequencies['exac_max'] = exac_max\n            break\n    for test_key in gnomad_keys:\n        gnomad = parse_frequency(variant, test_key)\n        if gnomad:\n            frequencies['gnomad'] = gnomad\n            break\n    for test_key in gnomad_max_keys:\n        gnomad_max = parse_frequency(variant, test_key)\n        if gnomad_max:\n            frequencies['gnomad_max'] = gnomad_max\n            break\n    if (not frequencies):\n        for transcript in transcripts:\n            exac = transcript.get('exac_maf')\n            exac_max = transcript.get('exac_max')\n            thousand_g = transcript.get('thousand_g_maf')\n            thousandg_max = transcript.get('thousandg_max')\n            gnomad = transcript.get('gnomad_maf')\n            gnomad_max = transcript.get('gnomad_max')\n            if exac:\n                frequencies['exac'] = exac\n            if exac_max:\n                frequencies['exac_max'] = exac_max\n            if thousand_g:\n                frequencies['thousand_g'] = thousand_g\n            if thousandg_max:\n                frequencies['thousand_g_max'] = thousandg_max\n            if gnomad:\n                frequencies['gnomad'] = gnomad\n            if gnomad_max:\n                frequencies['gnomad_max'] = gnomad_max\n    thousand_g_left = parse_frequency(variant, 'left_1000GAF')\n    if thousand_g_left:\n        frequencies['thousand_g_left'] = thousand_g_left\n    thousand_g_right = parse_frequency(variant, 'right_1000GAF')\n    if thousand_g_right:\n        frequencies['thousand_g_right'] = thousand_g_right\n    return frequencies", "docstring": "Add the frequencies to a variant\n\nFrequencies are parsed either directly from keys in info fieds or from the\ntranscripts is they are annotated there.\n\nArgs:\nvariant(cyvcf2.Variant): A parsed vcf variant\ntranscripts(iterable(dict)): Parsed transcripts\n\nReturns:\nfrequencies(dict): A dictionary with the relevant frequencies", "source": "codesearchnet"}
{"code": "def list_groups(self, filtr=None):\n        \n        return self.service.list_groups(\n            filtr, self.url_prefix, self.auth, self.session,\n            self.session_send_opts)", "docstring": "Get the groups the logged in user is a member of.\n\nOptionally filter by 'member' or 'maintainer'.\n\nArgs:\nfiltr (optional[string|None]): ['member'|'maintainer'] or defaults to None.\nReturns:\n(list[string]): List of group names.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def get_sv_variants(self, chromosome=None, end_chromosome=None, sv_type=None, pos=None, end=None):\n    query = {}\n    if chromosome:\n        query['chrom'] = chromosome\n    if end_chromosome:\n        query['end_chrom'] = end_chromosome\n    if sv_type:\n        query['sv_type'] = sv_type\n    if pos:\n        if (not ('$and' in query)):\n            query['$and'] = []\n        query['$and'].append({'pos_left': {'$lte': pos}})\n        query['$and'].append({'pos_right': {'$gte': pos}})\n    if end:\n        if (not ('$and' in query)):\n            query['$and'] = []\n        query['$and'].append({'end_left': {'$lte': end}})\n        query['$and'].append({'end_right': {'$gte': end}})\n    LOG.info('Find all sv variants {}'.format(query))\n    return self.db.structural_variant.find(query).sort([('chrom', ASCENDING), ('pos_left', ASCENDING)])", "docstring": "Return all structural variants in the database\n\nArgs:\nchromosome (str)\nend_chromosome (str)\nsv_type (str)\npos (int): Left position of SV\nend (int): Right position of SV\n\nReturns:\nvariants (Iterable(Variant))", "source": "codesearchnet"}
{"code": "def is_ready(self, node_id, metadata_priority=True):\n    if (not self._can_send_request(node_id)):\n        return False\n    if metadata_priority:\n        if self._metadata_refresh_in_progress:\n            return False\n        if (self.cluster.ttl() == 0):\n            return False\n    return True", "docstring": "Check whether a node is ready to send more requests.\n\nIn addition to connection-level checks, this method also is used to\nblock additional requests from being sent during a metadata refresh.\n\nArguments:\nnode_id (int): id of the node to check\nmetadata_priority (bool): Mark node as not-ready if a metadata\nrefresh is required. Default: True\n\nReturns:\nbool: True if the node is ready and metadata is not refreshing", "source": "codesearchnet"}
{"code": "def evpn_instance_rd_auto(self, **kwargs):\n    config = ET.Element('config')\n    rbridge_id = ET.SubElement(config, 'rbridge-id', xmlns='urn:brocade.com:mgmt:brocade-rbridge')\n    rbridge_id_key = ET.SubElement(rbridge_id, 'rbridge-id')\n    rbridge_id_key.text = kwargs.pop('rbridge_id')\n    evpn_instance = ET.SubElement(rbridge_id, 'evpn-instance', xmlns='urn:brocade.com:mgmt:brocade-bgp')\n    instance_name_key = ET.SubElement(evpn_instance, 'instance-name')\n    instance_name_key.text = kwargs.pop('instance_name')\n    route_distinguisher = ET.SubElement(evpn_instance, 'route-distinguisher')\n    ET.SubElement(route_distinguisher, 'auto')\n    callback = kwargs.pop('callback', self._callback)\n    return callback(config)", "docstring": "Add RD auto under EVPN instance.\n\nArgs:\nrbridge_id: Rbrdige id .\ninstance_name: EVPN instance name.\n\nReturns:\nTrue if command completes successfully or False if not.\n\nRaises:\nNone\nExamples:\n>>> import pynos.device\n>>> switches = ['10.24.39.211', '10.24.39.203']\n>>> auth = ('admin', 'password')\n>>> for switch in switches:\n...     conn = (switch, '22')\n...     with pynos.device.Device(conn=conn, auth=auth) as dev:\n...         output=dev.interface.evpn_instance_rd_auto(\n...         evpn_instance_name='100',\n...         rbridge_id='1')", "source": "codesearchnet"}
{"code": "def patch_apply(self, patches, text):\n    \n    if not patches:\n      return (text, [])\n\n    \n    patches = self.patch_deepCopy(patches)\n\n    nullPadding = self.patch_addPadding(patches)\n    text = nullPadding + text + nullPadding\n    self.patch_splitMax(patches)\n\n    \n    \n    \n    \n    delta = 0\n    results = []\n    for patch in patches:\n      expected_loc = patch.start2 + delta\n      text1 = self.diff_text1(patch.diffs)\n      end_loc = -1\n      if len(text1) > self.Match_MaxBits:\n        \n        \n        start_loc = self.match_main(text, text1[:self.Match_MaxBits],\n                                    expected_loc)\n        if start_loc != -1:\n          end_loc = self.match_main(text, text1[-self.Match_MaxBits:],\n              expected_loc + len(text1) - self.Match_MaxBits)\n          if end_loc == -1 or start_loc >= end_loc:\n            \n            start_loc = -1\n      else:\n        start_loc = self.match_main(text, text1, expected_loc)\n      if start_loc == -1:\n        \n        results.append(False)\n        \n        delta -= patch.length2 - patch.length1\n      else:\n        \n        results.append(True)\n        delta = start_loc - expected_loc\n        if end_loc == -1:\n          text2 = text[start_loc : start_loc + len(text1)]\n        else:\n          text2 = text[start_loc : end_loc + self.Match_MaxBits]\n        if text1 == text2:\n          \n          text = (text[:start_loc] + self.diff_text2(patch.diffs) +\n                      text[start_loc + len(text1):])\n        else:\n          \n          \n          diffs = self.diff_main(text1, text2, False)\n          if (len(text1) > self.Match_MaxBits and\n              self.diff_levenshtein(diffs) / float(len(text1)) >\n              self.Patch_DeleteThreshold):\n            \n            results[-1] = False\n          else:\n            self.diff_cleanupSemanticLossless(diffs)\n            index1 = 0\n            for (op, data) in patch.diffs:\n              if op != self.DIFF_EQUAL:\n                index2 = self.diff_xIndex(diffs, index1)\n              if op == self.DIFF_INSERT:  \n                text = text[:start_loc + index2] + data + text[start_loc +\n                                                               index2:]\n              elif op == self.DIFF_DELETE:  \n                text = text[:start_loc + index2] + text[start_loc +\n                    self.diff_xIndex(diffs, index1 + len(data)):]\n              if op != self.DIFF_DELETE:\n                index1 += len(data)\n    \n    text = text[len(nullPadding):-len(nullPadding)]\n    return (text, results)", "docstring": "Merge a set of patches onto the text.  Return a patched text, as well\nas a list of true/false values indicating which patches were applied.\n\nArgs:\npatches: Array of Patch objects.\ntext: Old text.\n\nReturns:\nTwo element Array, containing the new text and an array of boolean values.", "source": "juraj-google-style"}
{"code": "def sample_from_likelihood(self, n_timesteps=10):\n    self.latent_state_sequences = lmap((lambda A: ltake(n_timesteps, iterate((lambda s: pd.Series((A @ s.values), index=s.index)), self.s0))), self.transition_matrix_collection)\n    self.observed_state_sequences = [[self.sample_observed_state(s) for s in latent_state_sequence] for latent_state_sequence in self.latent_state_sequences]", "docstring": "Sample a collection of observed state sequences from the likelihood\nmodel given a collection of transition matrices.\n\nArgs:\nn_timesteps: The number of timesteps for the sequences.", "source": "codesearchnet"}
{"code": "def merge_level_and_latent_dist(level_dist, latent_dist, merge_std='prev_level'):\n    (level_mean, level_std) = (level_dist.loc, level_dist.scale)\n    (latent_mean, latent_std) = (latent_dist.loc, latent_dist.scale)\n    new_mean = (level_mean + latent_mean)\n    if (merge_std == 'normal'):\n        z_shape = common_layers.shape_list(latent_mean)\n        log_scale = tf.get_variable('merge_std', shape=z_shape, dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=False)\n        scale = tf.exp((log_scale * 3.0))\n    elif (merge_std == 'prev_level'):\n        scale = level_std\n    elif (merge_std == 'prev_step'):\n        scale = latent_std\n    return tfp.distributions.Normal(loc=new_mean, scale=scale)", "docstring": "Merge level_dist and latent_dist.\n\nnew_dist ~ N(level_dist.mean + latent_dis.mean, std) where std is determined\naccording to merge_std.\n\nArgs:\nlevel_dist: instance of tfp.distributions.Normal\nlatent_dist: instance of tfp.distributions.Normal\nmerge_std: can be \"prev_level\", \"prev_step\" or \"normal\".\nReturns:\nmerged_dist: instance of tfp.distributions.Normal", "source": "codesearchnet"}
{"code": "def if_true(self, predicate: Callable[..., bool]):\n    return Conditional(predicate, self, None)", "docstring": "Conditionally applies current operation when predicate returns True.\n\nArgs:\npredicate: The predicate that takes the outputs from the previous\noperation as input, with optional keyword arguments `global_state` and\n`step`. Returns True if current operation needs to be enabled.\nOtherwise no operation will be performed.\n\nReturns:\nA conditional operation.", "source": "github-repos"}
{"code": "def start_logging(self, region, name):\n    ct = self.session.client('cloudtrail', region_name=region)\n    ct.start_logging(Name=name)\n    auditlog(event='cloudtrail.start_logging', actor=self.ns, data={'account': self.account.account_name, 'region': region})\n    self.log.info('Enabled logging for {} ({})'.format(name, region))", "docstring": "Turn on logging for a CloudTrail Trail\n\nArgs:\nregion (`str`): Name of the AWS region\nname (`str`): Name of the CloudTrail Trail\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def get_min_muO2(self, min_voltage=None, max_voltage=None):\n    data = []\n    for pair in self._select_in_voltage_range(min_voltage, max_voltage):\n        if (pair.muO2_discharge is not None):\n            data.extend([d['chempot'] for d in pair.muO2_discharge])\n        if (pair.muO2_charge is not None):\n            data.extend([d['chempot'] for d in pair.muO2_discharge])\n    return (min(data) if (len(data) > 0) else None)", "docstring": "Minimum critical oxygen chemical potential along path.\n\nArgs:\nmin_voltage: The minimum allowable voltage for a given step\nmax_voltage: The maximum allowable voltage allowable for a given\nstep\n\nReturns:\nMinimum critical oxygen chemical of all compounds along the\ninsertion path (a subset of the path can be chosen by the optional\narguments).", "source": "codesearchnet"}
{"code": "def applies_to(self, transition, from_state=None):\n        \n        if '*' in self.names:\n            return True\n        elif self.kind in (HOOK_BEFORE, HOOK_AFTER, HOOK_CHECK):\n            return self._match_transition(transition)\n        elif self.kind == HOOK_ON_ENTER:\n            return self._match_state(transition.target)\n        elif from_state is None:\n            \n            \n            return any(self._match_state(src) for src in transition.source)\n        else:\n            return self._match_state(from_state)", "docstring": "Whether this hook applies to the given transition/state.\n\nArgs:\ntransition (Transition): the transition to check\nfrom_state (State or None): the state to check. If absent, the check\nis 'might this hook apply to the related transition, given a\nvalid source state'.", "source": "juraj-google-style"}
{"code": "def create_model_table(self, model):\n        \n        try:\n            return db_model_factory(self.Base, model, self.models)\n        except Exception as exc:\n            raise ModelError(\n                model.name,\n                message=\"failed to create in-memory table.\",\n                orig_exc=exc,\n                context=self.error_context\n            )", "docstring": "Creates the table for the given model.\n\nArgs:\nmodel: A StatikModel instance.\n\nReturns:\nA SQLAlchemy model instance for the table corresponding to this\nparticular model.", "source": "juraj-google-style"}
{"code": "def import_class(classpath):\n    \n    modname, classname = classpath.rsplit(\".\", 1)\n    module = importlib.import_module(modname)\n    klass  = getattr(module, classname)\n    return klass", "docstring": "Import the class referred to by the fully qualified class path.\n\nArgs:\nclasspath: A full \"foo.bar.MyClass\" path to a class definition.\n\nReturns:\nThe class referred to by the classpath.\n\nRaises:\nImportError: If an error occurs while importing the module.\nAttributeError: IF the class does not exist in the imported module.", "source": "juraj-google-style"}
{"code": "def _strip_layer_names(self, summaries, model_type):\n    result = set()\n    for s in summaries:\n        if '/' not in s.tag:\n            raise ValueError(f'tag has no layer name: {s.tag!r}')\n        start_from = 2 if 'subclass' in model_type else 1\n        new_tag = '/'.join(s.tag.split('/')[start_from:])\n        result.add(s._replace(tag=new_tag))\n    return result", "docstring": "Deduplicate summary names modulo layer prefix.\n\nThis removes the first slash-component of each tag name: for\ninstance, \"foo/bar/baz\" becomes \"bar/baz\".\n\nArgs:\nsummaries: A `set` of `_ObservedSummary` values.\nmodel_type: The model type currently being tested.\n\nReturns:\nA new `set` of `_ObservedSummary` values with layer prefixes\nremoved.", "source": "github-repos"}
{"code": "def to_json(self, drop_null=True, camel=False, indent=None, sort_keys=False):\n    return json.dumps(self.to_dict(drop_null, camel), indent=indent, sort_keys=sort_keys)", "docstring": "Serialize self as JSON\n\nArgs:\ndrop_null: bool, default True. Remove 'empty' attributes. See\nto_dict.\ncamel: bool, default True. Convert keys to camelCase.\nindent: int, default None. See json built-in.\nsort_keys: bool, default False. See json built-in.\n\nReturn:\nstr: object params.", "source": "codesearchnet"}
{"code": "def edit_distance_matrix(train_x, train_y=None):\n    \n    if train_y is None:\n        ret = np.zeros((train_x.shape[0], train_x.shape[0]))\n        for x_index, x in enumerate(train_x):\n            for y_index, y in enumerate(train_x):\n                if x_index == y_index:\n                    ret[x_index][y_index] = 0\n                elif x_index < y_index:\n                    ret[x_index][y_index] = edit_distance(x, y)\n                else:\n                    ret[x_index][y_index] = ret[y_index][x_index]\n        return ret\n    ret = np.zeros((train_x.shape[0], train_y.shape[0]))\n    for x_index, x in enumerate(train_x):\n        for y_index, y in enumerate(train_y):\n            ret[x_index][y_index] = edit_distance(x, y)\n    return ret", "docstring": "Calculate the edit distance.\nArgs:\ntrain_x: A list of neural architectures.\ntrain_y: A list of neural architectures.\nReturns:\nAn edit-distance matrix.", "source": "juraj-google-style"}
{"code": "def setOption(self, name, value):\n        \n        if isinstance(value, bool):\n            lock_and_call(\n                lambda: self._impl.setBoolOption(name, value),\n                self._lock\n            )\n        elif isinstance(value, int):\n            lock_and_call(\n                lambda: self._impl.setIntOption(name, value),\n                self._lock\n            )\n        elif isinstance(value, float):\n            lock_and_call(\n                lambda: self._impl.setDblOption(name, value),\n                self._lock\n            )\n        elif isinstance(value, basestring):\n            lock_and_call(\n                lambda: self._impl.setOption(name, value),\n                self._lock\n            )\n        else:\n            raise TypeError", "docstring": "Set an AMPL option to a specified value.\n\nArgs:\nname: Name of the option to be set (alphanumeric without spaces).\n\nvalue: The value the option must be set to.\n\nRaises:\nInvalidArgumet: if the option name is not valid.\n\nTypeError: if the value has an invalid type.", "source": "juraj-google-style"}
{"code": "def _process_tabs(self, tabs, current_tab, group_current_tab):\n    for t in tabs:\n        t.current_tab = current_tab\n        t.group_current_tab = group_current_tab\n    tabs = list(filter((lambda t: t.tab_visible), tabs))\n    tabs.sort(key=(lambda t: t.weight))\n    return tabs", "docstring": "Process and prepare tabs.\n\nThis includes steps like updating references to the current tab,\nfiltering out hidden tabs, sorting tabs etc...\n\nArgs:\ntabs:\nThe list of tabs to process.\ncurrent_tab:\nThe reference to the currently loaded tab.\ngroup_current_tab:\nThe reference to the active tab in the current tab group. For\nparent tabs, this is different than for the current tab group.\n\nReturns:\nProcessed list of tabs. Note that the method may have side effects.", "source": "codesearchnet"}
{"code": "def merge_dicts(dicts, op=operator.add):\n    a = None\n    for b in dicts:\n        if (a is None):\n            a = b.copy()\n        else:\n            a = dict(((a.items() + b.items()) + [(k, op(a[k], b[k])) for k in (set(b) & set(a))]))\n    return a", "docstring": "Merge a list of dictionaries.\n\nArgs:\ndicts (list): a list of dictionary objects\nop (operator): an operator item used to merge the dictionaries. Defaults to :py:func:`operator.add`.\n\nReturns:\ndict: the merged dictionary", "source": "codesearchnet"}
{"code": "def decr(self, key, value, noreply=False):\n    key = self.check_key(key)\n    cmd = (((b'decr ' + key) + b' ') + six.text_type(value).encode('ascii'))\n    if noreply:\n        cmd += b' noreply'\n    cmd += b'\\r\\n'\n    results = self._misc_cmd([cmd], b'decr', noreply)\n    if noreply:\n        return None\n    if (results[0] == b'NOT_FOUND'):\n        return None\n    return int(results[0])", "docstring": "The memcached \"decr\" command.\n\nArgs:\nkey: str, see class docs for details.\nvalue: int, the amount by which to increment the value.\nnoreply: optional bool, False to wait for the reply (the default).\n\nReturns:\nIf noreply is True, always returns None. Otherwise returns the new\nvalue of the key, or None if the key wasn't found.", "source": "codesearchnet"}
{"code": "def GetPathSegmentAndSuffix(self, base_path, path):\n    if ((path is None) or (base_path is None) or (not path.startswith(base_path))):\n        return (None, None)\n    path_index = len(base_path)\n    if (base_path and (not base_path.endswith(self.PATH_SEPARATOR))):\n        path_index += 1\n    if (path_index == len(path)):\n        return ('', '')\n    (path_segment, _, suffix) = path[path_index:].partition(self.PATH_SEPARATOR)\n    return (path_segment, suffix)", "docstring": "Determines the path segment and suffix of the path.\n\nNone is returned if the path does not start with the base path and\nan empty string if the path exactly matches the base path.\n\nArgs:\nbase_path (str): base path.\npath (str): path.\n\nReturns:\ntuple[str, str]: path segment and suffix string.", "source": "codesearchnet"}
{"code": "def movie_credits(self, **kwargs):\n    path = self._get_id_path('movie_credits')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the movie credits for a specific person id.\n\nArgs:\nlanguage: (optional) ISO 639-1 code.\nappend_to_response: (optional) Comma separated, any person method.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def deployment_plans(self):\n    if (not self.__deployment_plans):\n        self.__deployment_plans = DeploymentPlans(self.__connection)\n    return self.__deployment_plans", "docstring": "Gets the Deployment Plans API client.\n\nReturns:\nDeploymentPlans:", "source": "codesearchnet"}
{"code": "def two_qubit_matrix_to_ion_operations(q0: ops.Qid,\n                                       q1: ops.Qid,\n                                       mat: np.ndarray,\n                                       atol: float = 1e-8\n                                       ) -> List[ops.Operation]:\n    \n    kak = linalg.kak_decomposition(mat, atol=atol)\n    operations = _kak_decomposition_to_operations(q0,\n        q1, kak, atol)\n    return _cleanup_operations(operations)", "docstring": "Decomposes a two-qubit operation into MS/single-qubit rotation gates.\n\nArgs:\nq0: The first qubit being operated on.\nq1: The other qubit being operated on.\nmat: Defines the operation to apply to the pair of qubits.\ntolerance: A limit on the amount of error introduced by the\nconstruction.\n\nReturns:\nA list of operations implementing the matrix.", "source": "juraj-google-style"}
{"code": "def post(self, **kwargs):\n    if (self._url is None):\n        raise NoWebsiteLoadedError('request submission requires a loaded website')\n    data = kwargs.get('data', {})\n    for i in self.soup('form').select('input[name]'):\n        if (i.get('name') not in data):\n            data[i.get('name')] = i.get('value', '')\n    kwargs['data'] = data\n    response = self.session.post(self._url, **kwargs)\n    self._url = response.url\n    self._response = response\n    return response", "docstring": "Send a POST request to the currently loaded website's URL.\n\nThe browser will automatically fill out the form. If `data` dict has\nbeen passed into ``kwargs``, the contained input values will override\nthe automatically filled out values.\n\nReturns:\n`Response` object of a successful request.\n\nRaises:\nNoWebsiteLoadedError: If no website is currently loaded.", "source": "codesearchnet"}
{"code": "def WriteFileHash(self, path, hash_value):\n    \n    string = '{0:s}\\t{1:s}\\n'.format(hash_value, path)\n\n    encoded_string = self._EncodeString(string)\n    self._file_object.write(encoded_string)", "docstring": "Writes the file path and hash to file.\n\nArgs:\npath (str): path of the file.\nhash_value (str): message digest hash calculated over the file data.", "source": "juraj-google-style"}
{"code": "def FromFile(cls, path, actions_dict, resources_dict, file_format='yaml', name=None):\n    format_map = {'yaml': cls._process_yaml}\n    format_handler = format_map.get(file_format)\n    if (format_handler is None):\n        raise ArgumentError('Unknown file format or file extension', file_format=file_format, known_formats=[x for x in format_map if (format_map[x] is not None)])\n    recipe_info = format_handler(path)\n    if (name is None):\n        (name, _ext) = os.path.splitext(os.path.basename(path))\n    try:\n        recipe_info = RecipeSchema.verify(recipe_info)\n    except ValidationError as exc:\n        raise RecipeFileInvalid('Recipe file does not match expected schema', file=path, error_message=exc.msg, **exc.params)\n    description = recipe_info.get('description')\n    try:\n        resources = cls._parse_resource_declarations(recipe_info.get('resources', []), resources_dict)\n        defaults = cls._parse_variable_defaults(recipe_info.get('defaults', []))\n        steps = []\n        for (i, action) in enumerate(recipe_info.get('actions', [])):\n            action_name = action.pop('name')\n            if (action_name is None):\n                raise RecipeFileInvalid('Action is missing required name parameter', parameters=action, path=path)\n            action_class = actions_dict.get(action_name)\n            if (action_class is None):\n                raise UnknownRecipeActionType('Unknown step specified in recipe', action=action_name, step=(i + 1), path=path)\n            step_resources = cls._parse_resource_usage(action, declarations=resources)\n            (fixed_files, _variable_files) = cls._parse_file_usage(action_class, action)\n            step = RecipeStep(action_class, action, step_resources, fixed_files)\n            steps.append(step)\n        return RecipeObject(name, description, steps, resources, defaults, path)\n    except RecipeFileInvalid as exc:\n        cls._future_raise(RecipeFileInvalid, RecipeFileInvalid(exc.msg, recipe=name, **exc.params), sys.exc_info()[2])", "docstring": "Create a RecipeObject from a file.\n\nThe file should be a specially constructed yaml file that describes\nthe recipe as well as the actions that it performs.\n\nArgs:\npath (str): The path to the recipe file that we wish to load\nactions_dict (dict): A dictionary of named RecipeActionObject\ntypes that is used to look up all of the steps listed in\nthe recipe file.\nresources_dict (dict): A dictionary of named RecipeResource types\nthat is used to look up all of the shared resources listed in\nthe recipe file.\nfile_format (str): The file format of the recipe file.  Currently\nwe only support yaml.\nname (str): The name of this recipe if we created it originally from an\narchive.", "source": "codesearchnet"}
{"code": "def delete_endpoint(self, endpoint_name=None):\n    endpoint_name = (endpoint_name or self.best_training_job())\n    self.sagemaker_session.delete_endpoint(endpoint_name)", "docstring": "Delete an Amazon SageMaker endpoint.\n\nIf an endpoint name is not specified, this defaults to looking for an endpoint that\nshares a name with the best training job for deletion.\n\nArgs:\nendpoint_name (str): Name of the endpoint to delete", "source": "codesearchnet"}
{"code": "def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    tstream = BytearrayStream()\n    self.certificate_type.write(tstream, kmip_version=kmip_version)\n    self.certificate_value.write(tstream, kmip_version=kmip_version)\n    self.length = tstream.length()\n    super(Certificate, self).write(ostream, kmip_version=kmip_version)\n    ostream.write(tstream.buffer)", "docstring": "Write the data encoding the Certificate object to a stream.\n\nArgs:\nostream (Stream): A data stream in which to encode object data,\nsupporting a write method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def cluster_nodes(self, tol=0.2):\n        \n        lattice = self.structure.lattice\n        vf_coords = self.extrema_coords\n\n        if len(vf_coords) == 0:\n            if self.extrema_type is None:\n                logger.warning(\n                    \"Please run ChargeDensityAnalyzer.get_local_extrema first!\")\n                return\n            new_f_coords = []\n            self._update_extrema(new_f_coords, self.extrema_type)\n            return new_f_coords\n\n        \n        \n        dist_matrix = np.array(lattice.get_all_distances(vf_coords, vf_coords))\n        dist_matrix = (dist_matrix + dist_matrix.T) / 2\n\n        for i in range(len(dist_matrix)):\n            dist_matrix[i, i] = 0\n        condensed_m = squareform(dist_matrix)\n        z = linkage(condensed_m)\n        cn = fcluster(z, tol, criterion=\"distance\")\n        merged_fcoords = []\n\n        for n in set(cn):\n            frac_coords = []\n            for i, j in enumerate(np.where(cn == n)[0]):\n                if i == 0:\n                    frac_coords.append(self.extrema_coords[j])\n                else:\n                    f_coords = self.extrema_coords[j]\n                    \n                    d, image = lattice.get_distance_and_image(frac_coords[0],\n                                                              f_coords)\n                    frac_coords.append(f_coords + image)\n            merged_fcoords.append(np.average(frac_coords, axis=0))\n\n        merged_fcoords = [f - np.floor(f) for f in merged_fcoords]\n        merged_fcoords = [f * (np.abs(f - 1) > 1E-15) for f in merged_fcoords]\n        \n        \n        \n        self._update_extrema(merged_fcoords, extrema_type=self.extrema_type)\n        logger.debug(\n            \"{} vertices after combination.\".format(len(self.extrema_coords)))", "docstring": "Cluster nodes that are too close together using a tol.\n\nArgs:\ntol (float): A distance tolerance. PBC is taken into account.", "source": "juraj-google-style"}
{"code": "def value(self, new_value):\n    if ((self.unit != units.Undefined) and (new_value.unit != self.unit)):\n        raise AttributeError(('%s must be in %s' % (self.__class__, self.unit)))\n    self._value = new_value", "docstring": "Set the value of this measurement.\n\nRaises:\nAttributeError: if the new value isn't of the correct units.", "source": "codesearchnet"}
{"code": "def __init__(self, filename):\n        \n        self.root_values = {}\n        self.tree = []\n        \n        if filename is not None and os.path.dirname(filename) == '':\n            self.original_filename = os.path.join(os.getcwd(), filename)\n        else:\n            self.original_filename = filename\n\n        if filename is not None:\n            self.sax_parse(filename)", "docstring": "A container for a SAX SVG light tree objects document.\n\nThis class provides functions for extracting SVG data into Path objects.\n\nArgs:\nfilename (str): The filename of the SVG file", "source": "juraj-google-style"}
{"code": "def from_inputs(cls, workdir, inputs, manager=None, pickle_protocol=(- 1), task_class=ScfTask, work_class=Work, remove=False):\n    if (not isinstance(inputs, (list, tuple))):\n        inputs = [inputs]\n    flow = cls(workdir, manager=manager, pickle_protocol=pickle_protocol, remove=remove)\n    work = work_class()\n    for inp in inputs:\n        work.register(inp, task_class=task_class)\n    flow.register_work(work)\n    return flow.allocate()", "docstring": "Construct a simple flow from a list of inputs. The flow contains a single Work with\ntasks whose class is given by task_class.\n\n.. warning::\n\nDon't use this interface if you have dependencies among the tasks.\n\nArgs:\nworkdir: String specifying the directory where the works will be produced.\ninputs: List of inputs.\nmanager: :class:`TaskManager` object responsible for the submission of the jobs.\nIf manager is None, the object is initialized from the yaml file\nlocated either in the working directory or in the user configuration dir.\npickle_protocol: Pickle protocol version used for saving the status of the object.\n-1 denotes the latest version supported by the python interpreter.\ntask_class: The class of the :class:`Task`.\nwork_class: The class of the :class:`Work`.\nremove: attempt to remove working directory `workdir` if directory already exists.", "source": "codesearchnet"}
{"code": "def _add_main_menu(output, node_name=None, enable_list_tensors=True, enable_node_info=True, enable_print_tensor=True, enable_list_inputs=True, enable_list_outputs=True):\n    menu = debugger_cli_common.Menu()\n    menu.append(debugger_cli_common.MenuItem('list_tensors', 'list_tensors', enabled=enable_list_tensors))\n    if node_name:\n        menu.append(debugger_cli_common.MenuItem('node_info', 'node_info -a -d -t %s' % node_name, enabled=enable_node_info))\n        menu.append(debugger_cli_common.MenuItem('print_tensor', 'print_tensor %s' % node_name, enabled=enable_print_tensor))\n        menu.append(debugger_cli_common.MenuItem('list_inputs', 'list_inputs -c -r %s' % node_name, enabled=enable_list_inputs))\n        menu.append(debugger_cli_common.MenuItem('list_outputs', 'list_outputs -c -r %s' % node_name, enabled=enable_list_outputs))\n    else:\n        menu.append(debugger_cli_common.MenuItem('node_info', None, enabled=False))\n        menu.append(debugger_cli_common.MenuItem('print_tensor', None, enabled=False))\n        menu.append(debugger_cli_common.MenuItem('list_inputs', None, enabled=False))\n        menu.append(debugger_cli_common.MenuItem('list_outputs', None, enabled=False))\n    menu.append(debugger_cli_common.MenuItem('run_info', 'run_info'))\n    menu.append(debugger_cli_common.MenuItem('help', 'help'))\n    output.annotations[debugger_cli_common.MAIN_MENU_KEY] = menu", "docstring": "Generate main menu for the screen output from a command.\n\nArgs:\noutput: (debugger_cli_common.RichTextLines) the output object to modify.\nnode_name: (str or None) name of the node involved (if any). If None,\nthe menu items node_info, list_inputs and list_outputs will be\nautomatically disabled, overriding the values of arguments\nenable_node_info, enable_list_inputs and enable_list_outputs.\nenable_list_tensors: (bool) whether the list_tensor menu item will be\nenabled.\nenable_node_info: (bool) whether the node_info item will be enabled.\nenable_print_tensor: (bool) whether the print_tensor item will be enabled.\nenable_list_inputs: (bool) whether the item list_inputs will be enabled.\nenable_list_outputs: (bool) whether the item list_outputs will be enabled.", "source": "github-repos"}
{"code": "def parse_rfc3339_utc_string(rfc3339_utc_string):\n  \n\n  \n  \n  \n  \n  \n  \n  \n  m = re.match(r'(\\d{4})-(\\d{2})-(\\d{2})T(\\d{2}):(\\d{2}):(\\d{2}).?(\\d*)Z',\n               rfc3339_utc_string)\n\n  \n  \n  \n  \n  \n  \n  if not m:\n    return None\n\n  groups = m.groups()\n  if len(groups[6]) not in (0, 3, 6, 9):\n    return None\n\n  \n  \n  \n  \n  g = [int(val) for val in groups[:6]]\n\n  fraction = groups[6]\n  if not fraction:\n    micros = 0\n  elif len(fraction) == 3:\n    micros = int(fraction) * 1000\n  elif len(fraction) == 6:\n    micros = int(fraction)\n  elif len(fraction) == 9:\n    \n    micros = int(round(int(fraction) / 1000))\n  else:\n    assert False, 'Fraction length not 0, 6, or 9: {}'.len(fraction)\n\n  try:\n    return datetime(g[0], g[1], g[2], g[3], g[4], g[5], micros, tzinfo=pytz.utc)\n  except ValueError as e:\n    assert False, 'Could not parse RFC3339 datestring: {} exception: {}'.format(\n        rfc3339_utc_string, e)", "docstring": "Converts a datestamp from RFC3339 UTC to a datetime.\n\nArgs:\nrfc3339_utc_string: a datetime string in RFC3339 UTC \"Zulu\" format\n\nReturns:\nA datetime.", "source": "juraj-google-style"}
{"code": "def ParseAutofillRow(\n      self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = ChromeAutofillEventData()\n    event_data.field_name = self._GetRowValue(query_hash, row, 'name')\n    event_data.value = self._GetRowValue(query_hash, row, 'value')\n    event_data.usage_count = self._GetRowValue(query_hash, row, 'count')\n    event_data.query = query\n\n    \n    timestamp = self._GetRowValue(query_hash, row, 'date_created')\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_CREATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    \n    \n    if event_data.usage_count > 1:\n      timestamp = self._GetRowValue(query_hash, row, 'date_last_used')\n      date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_LAST_USED)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an autofill entry row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def _save_representative_dataset(representative_dataset: repr_dataset.RepresentativeDatasetOrMapping, signature_def_map: _SignatureDefMap) -> Mapping[str, _RepresentativeDatasetFile]:\n    if isinstance(representative_dataset, Mapping):\n        if set(signature_def_map.keys()) != set(representative_dataset.keys()):\n            raise ValueError(f'The signature keys and the keys of representative dataset map do not match. Signature keys: {set(signature_def_map.keys())}, representative dataset map: {set(representative_dataset.keys())}.')\n        representative_dataset_map = representative_dataset\n    elif len(signature_def_map.keys()) > 1:\n        raise ValueError(f'Representative dataset is not a mapping (got: {type(representative_dataset)}), but there is more than one signature key provided. Please provide a map of {{signature_key -> dataset}} with more than one signature key.')\n    else:\n        representative_dataset_map = {list(signature_def_map.keys())[0]: representative_dataset}\n    path_map = {}\n    expected_input_key_map = {}\n    for signature_key, signature_def in signature_def_map.items():\n        _, path_map[signature_key] = tempfile.mkstemp(suffix='.tfrecord', prefix=signature_key)\n        expected_input_key_map[signature_key] = signature_def.inputs.keys()\n    return repr_dataset.TfRecordRepresentativeDatasetSaver(path_map=path_map, expected_input_key_map=expected_input_key_map).save(representative_dataset_map)", "docstring": "Saves the representative dataset to temporary TFRecord files.\n\nArgs:\nrepresentative_dataset: Representative dataset used for the calibration\nstep. Representative datasets should exist for each signature def key in\n`signature_def_keys`.\nsignature_def_map: Signature def key -> SignatureDef mapping.\n\nReturns:\nA map from signature key to the saved representative dataset file.", "source": "github-repos"}
{"code": "def squad_v2_f1(y_true: List[List[str]], y_predicted: List[str]) -> float:\n    f1_total = 0.0\n    for (ground_truth, prediction) in zip(y_true, y_predicted):\n        prediction_tokens = normalize_answer(prediction).split()\n        f1s = []\n        for gt in ground_truth:\n            gt_tokens = normalize_answer(gt).split()\n            if ((len(gt_tokens) == 0) or (len(prediction_tokens) == 0)):\n                f1s.append(float((gt_tokens == prediction_tokens)))\n                continue\n            common = (Counter(prediction_tokens) & Counter(gt_tokens))\n            num_same = sum(common.values())\n            if (num_same == 0):\n                f1s.append(0.0)\n                continue\n            precision = ((1.0 * num_same) / len(prediction_tokens))\n            recall = ((1.0 * num_same) / len(gt_tokens))\n            f1 = (((2 * precision) * recall) / (precision + recall))\n            f1s.append(f1)\n        f1_total += max(f1s)\n    return (((100 * f1_total) / len(y_true)) if (len(y_true) > 0) else 0)", "docstring": "Calculates F-1 score between y_true and y_predicted\nF-1 score uses the best matching y_true answer\n\nThe same as in SQuAD-v2.0\n\nArgs:\ny_true: list of correct answers (correct answers are represented by list of strings)\ny_predicted: list of predicted answers\n\nReturns:\nF-1 score : float", "source": "codesearchnet"}
{"code": "def rename_style(self, old_name, new_name):\n        \n        if old_name not in self.styles:\n            raise KeyError(\"Style %r not found\" % old_name)\n        if new_name in self.styles:\n            raise ValueError(\"There is already a style called %r\" % new_name)\n        if not is_valid_field_content(new_name):\n            raise ValueError(\"%r is not a valid name\" % new_name)\n\n        self.styles[new_name] = self.styles[old_name]\n        del self.styles[old_name]\n\n        for line in self:\n            \n            if line.style == old_name:\n                line.style = new_name", "docstring": "Rename a style, including references to it.\n\nArguments:\nold_name (str): Style to be renamed.\nnew_name (str): New name for the style (must be unused).\n\nRaises:\nKeyError: No style named old_name.\nValueError: new_name is not a legal name (cannot use commas)\nor new_name is taken.", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor, metadata: Optional[List[torch.LongTensor]], decode: Optional[bool]=False, get_preds: Optional[bool]=False) -> List[torch.Tensor]:\n    batch_size = hidden_states.shape[0]\n    music_tokens, *music_tokens_conds = self.encode(hidden_states, bs_chunks=batch_size)\n    loss, metrics = self.forward_tokens(music_tokens=music_tokens, music_tokens_conds=music_tokens_conds, metadata=metadata, get_preds=get_preds)\n    if decode:\n        dequantised_states = self.decode([music_tokens, *music_tokens_conds])\n    else:\n        dequantised_states = None\n    return (dequantised_states, loss, metrics)", "docstring": "Encode the hidden states using the `vqvae` encoder, and then predicts the next token in the `forward_tokens`\nfunction. The loss is the sum of the `encoder` loss and the `decoder` loss.\n\nArgs:\nhidden_states (`torch.Tensor`):\nHidden states which should be raw audio\nmetadata (`List[torch.LongTensor]`, *optional*):\nList containing the metadata conditioning tensor with the lyric and the metadata tokens.\ndecode (`bool`, *optional*, defaults to `False`):\nWhether or not to decode the encoded to tokens.\nget_preds (`bool`, *optional*, defaults to `False`):\nWhether or not to return the actual predictions of the model.", "source": "github-repos"}
{"code": "def get_dihedral_degrees(self, indices, start_row=0):\n    coords = ['x', 'y', 'z']\n    if isinstance(indices, pd.DataFrame):\n        i_pos = self.loc[(indices.index, coords)].values\n        b_pos = self.loc[(indices.loc[(:, 'b')], coords)].values\n        a_pos = self.loc[(indices.loc[(:, 'a')], coords)].values\n        d_pos = self.loc[(indices.loc[(:, 'd')], coords)].values\n    else:\n        indices = np.array(indices)\n        if (len(indices.shape) == 1):\n            indices = indices[(None, :)]\n        i_pos = self.loc[(indices[(:, 0)], coords)].values\n        b_pos = self.loc[(indices[(:, 1)], coords)].values\n        a_pos = self.loc[(indices[(:, 2)], coords)].values\n        d_pos = self.loc[(indices[(:, 3)], coords)].values\n    IB = (b_pos - i_pos)\n    BA = (a_pos - b_pos)\n    AD = (d_pos - a_pos)\n    N1 = np.cross(IB, BA, axis=1)\n    N2 = np.cross(BA, AD, axis=1)\n    (n1, n2) = [(v / np.linalg.norm(v, axis=1)[(:, None)]) for v in (N1, N2)]\n    dot_product = np.sum((n1 * n2), axis=1)\n    dot_product[(dot_product > 1)] = 1\n    dot_product[(dot_product < (- 1))] = (- 1)\n    dihedrals = np.degrees(np.arccos(dot_product))\n    where_to_modify = (np.sum((BA * np.cross(n1, n2, axis=1)), axis=1) > 0)\n    where_to_modify = np.nonzero(where_to_modify)[0]\n    length = (indices.shape[0] - start_row)\n    sign = np.full(length, 1, dtype='float64')\n    to_add = np.full(length, 0, dtype='float64')\n    sign[where_to_modify] = (- 1)\n    to_add[where_to_modify] = 360\n    dihedrals = (to_add + (sign * dihedrals))\n    return dihedrals", "docstring": "Return the dihedrals between given atoms.\n\nCalculates the dihedral angle in degrees between the atoms with\nindices ``i, b, a, d``.\nThe indices can be given in three ways:\n\n* As simple list ``[i, b, a, d]``\n* As list of lists: ``[[i1, b1, a1, d1], [i2, b2, a2, d2]...]``\n* As :class:`pandas.DataFrame` where ``i`` is taken from the index and\n``b``, ``a`` and ``d``from the respective columns\n``'b'``, ``'a'`` and ``'d'``.\n\nArgs:\nindices (list):\n\nReturns:\n:class:`numpy.ndarray`: Vector of angles in degrees.", "source": "codesearchnet"}
{"code": "def _broadcast_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001):\n    mean, var = nn.moments(x, reduction_axes, None, None, False)\n    target_shape = []\n    for axis in range(ndim(x)):\n        if axis in reduction_axes:\n            target_shape.append(1)\n        else:\n            target_shape.append(array_ops.shape(x)[axis])\n    target_shape = array_ops_stack.stack(target_shape)\n    broadcast_mean = array_ops.reshape(mean, target_shape)\n    broadcast_var = array_ops.reshape(var, target_shape)\n    if gamma is None:\n        broadcast_gamma = None\n    else:\n        broadcast_gamma = array_ops.reshape(gamma, target_shape)\n    if beta is None:\n        broadcast_beta = None\n    else:\n        broadcast_beta = array_ops.reshape(beta, target_shape)\n    normed = nn.batch_normalization(x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon)\n    return (normed, mean, var)", "docstring": "Non-fused, broadcast version of `normalize_batch_in_training`.\n\nArgs:\nx: Input tensor or variable.\ngamma: Tensor by which to scale the input.\nbeta: Tensor with which to center the input.\nreduction_axes: iterable of integers,\naxes over which to normalize.\nepsilon: Fuzz factor.\n\nReturns:\nA tuple length of 3, `(normalized_tensor, mean, variance)`.", "source": "github-repos"}
{"code": "def dump_credibilities(self, output):\n        \n        for p in self.products:\n            json.dump({\n                \"product_id\": p.name,\n                \"credibility\": self.credibility(p)\n            }, output)\n            output.write(\"\\n\")", "docstring": "Dump credibilities of all products.\n\nArgs:\noutput: a writable object.", "source": "juraj-google-style"}
{"code": "def conversations_invite(self, *, channel: str, users: List[str], **kwargs) -> SlackResponse:\n    self._validate_xoxp_token()\n    kwargs.update({'channel': channel, 'users': users})\n    return self.api_call('conversations.invite', json=kwargs)", "docstring": "Invites users to a channel.\n\nArgs:\nchannel (str): The channel id. e.g. 'C1234567890'\nusers (list): An list of user id's to invite. e.g. ['U2345678901', 'U3456789012']", "source": "codesearchnet"}
{"code": "def SetUseSSL(self, use_ssl):\n    self._use_ssl = use_ssl\n    logger.debug('Elasticsearch use_ssl: {0!s}'.format(use_ssl))", "docstring": "Sets the use of ssl.\n\nArgs:\nuse_ssl (bool): enforces use of ssl.", "source": "codesearchnet"}
{"code": "def extend(self, table, keys=None):\n    if keys:\n        for k in keys:\n            if (k not in self._Header()):\n                raise IndexError(\"Unknown key: '%s'\", k)\n    extend_with = []\n    for column in table.header:\n        if (column not in self.header):\n            extend_with.append(column)\n    if (not extend_with):\n        return\n    for column in extend_with:\n        self.AddColumn(column)\n    if (not keys):\n        for (row1, row2) in zip(self, table):\n            for column in extend_with:\n                row1[column] = row2[column]\n        return\n    for row1 in self:\n        for row2 in table:\n            for k in keys:\n                if (row1[k] != row2[k]):\n                    break\n            else:\n                for column in extend_with:\n                    row1[column] = row2[column]\n                break", "docstring": "Extends all rows in the texttable.\n\nThe rows are extended with the new columns from the table.\n\nArgs:\ntable: A texttable, the table to extend this table by.\nkeys: A set, the set of columns to use as the key. If None, the\nrow index is used.\n\nRaises:\nIndexError: If key is not a valid column name.", "source": "codesearchnet"}
{"code": "def ordered_dump(data, Dumper=yaml.Dumper, **kws):\n\n    class OrderedDumper(Dumper):\n        pass\n\n    def _dict_representer(dumper, data):\n        return dumper.represent_mapping(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())\n    OrderedDumper.add_representer(OrderedDict, _dict_representer)\n    return yaml.dump(data, None, OrderedDumper, **kws)", "docstring": "Expand PyYAML's built-in dumper to support parsing OrderedDict. Return\na string as parse result of the original data structure, which includes\nOrderedDict.\n\nArgs:\ndata: the data structure to be dumped(parsed) which is supposed to\ncontain OrderedDict.\nDumper: the yaml serializer to be expanded and used.\nkws: extra key-value arguments to be passed to yaml.dump.", "source": "codesearchnet"}
{"code": "def add_snmp_community(self, **kwargs):\n        \n        community = kwargs.pop('community')\n        callback = kwargs.pop('callback', self._callback)\n\n        config = ET.Element('config')\n        snmp_server = ET.SubElement(config, 'snmp-server',\n                                    xmlns=(\"urn:brocade.com:mgmt:\"\n                                           \"brocade-snmp\"))\n        community_el = ET.SubElement(snmp_server, 'community')\n        community_name = ET.SubElement(community_el, 'community')\n        community_name.text = community\n\n        return callback(config)", "docstring": "Add SNMP Community to NOS device.\n\nArgs:\ncommunity (str): Community string to be added to device.\ncallback (function): A function executed upon completion of the\nmethod.  The only parameter passed to `callback` will be the\n``ElementTree`` `config`.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nKeyError: if `community` is not defined.", "source": "juraj-google-style"}
{"code": "async def remember(request, user_id):\n    \n    auth_policy = request.get(POLICY_KEY)\n    if auth_policy is None:\n        raise RuntimeError('auth_middleware not installed')\n\n    return await auth_policy.remember(request, user_id)", "docstring": "Called to store and remember the userid for a request\n\nArgs:\nrequest: aiohttp Request object.\nuser_id: String representing the user_id to remember\n\nRaises:\nRuntimeError: Middleware is not installed", "source": "juraj-google-style"}
{"code": "def _validate_alias_command_level(alias, command):\n    \n    alias_collision_table = AliasManager.build_collision_table([alias])\n\n    \n    if not alias_collision_table:\n        return\n\n    command_collision_table = AliasManager.build_collision_table([command])\n    alias_collision_levels = alias_collision_table.get(alias.split()[0], [])\n    command_collision_levels = command_collision_table.get(command.split()[0], [])\n\n    \n    if set(alias_collision_levels) & set(command_collision_levels):\n        raise CLIError(COMMAND_LVL_ERROR.format(alias, command))", "docstring": "Make sure that if the alias is a reserved command, the command that the alias points to\nin the command tree does not conflict in levels.\n\ne.g. 'dns' -> 'network dns' is valid because dns is a level 2 command and network dns starts at level 1.\nHowever, 'list' -> 'show' is not valid because list and show are both reserved commands at level 2.\n\nArgs:\nalias: The name of the alias.\ncommand: The command that the alias points to.", "source": "juraj-google-style"}
{"code": "def db_wb020(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `db_wb020`'.format(value))\n    self._db_wb020 = value", "docstring": "Corresponds to IDD Field `db_wb020`\nmean coincident dry-bulb temperature to\nWet-bulb temperature corresponding to 2.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `db_wb020`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def area_of_a_triangle_in_cartesian_space(a, b, c):\n    return (0.5 * np.linalg.norm(np.cross((b - a), (c - a))))", "docstring": "Returns the area of a triangle defined by three points in Cartesian space.\n\nArgs:\na (np.array): Cartesian coordinates of point A.\nb (np.array): Cartesian coordinates of point B.\nc (np.array): Cartesian coordinates of point C.\n\nReturns:\n(float): the area of the triangle.", "source": "codesearchnet"}
{"code": "def start(self, attempts=5, timeout=2):\n        \n\n        if not self.alive():\n            with LogTask('Create network %s' % self.name()):\n                net = self.libvirt_con.networkCreateXML(self._libvirt_xml())\n                if net is None:\n                    raise RuntimeError(\n                        'failed to create network, XML: %s' %\n                        (self._libvirt_xml())\n                    )\n                for _ in range(attempts):\n                    if net.isActive():\n                        return\n                    LOGGER.debug(\n                        'waiting for network %s to become active', net.name()\n                    )\n                    time.sleep(timeout)\n                raise RuntimeError(\n                    'failed to verify network %s is active' % net.name()\n                )", "docstring": "Start the network, will check if the network is active ``attempts``\ntimes, waiting ``timeout`` between each attempt.\n\nArgs:\nattempts (int): number of attempts to check the network is active\ntimeout  (int): timeout for each attempt\n\nReturns:\nNone\n\nRaises:\nRuntimeError: if network creation failed, or failed to verify it is\nactive.", "source": "juraj-google-style"}
{"code": "def _parse_normalization(normalization):\n        \n        parsed_normalization = None\n\n        if isinstance(normalization, dict):\n            if len(normalization.keys()) == 1:\n                items = list(normalization.items())[0]\n                if len(items) == 2: \n                    \n                    if items[1] and isinstance(items[1], dict):\n                        parsed_normalization = items\n                    else:\n                        parsed_normalization = items[0]\n        elif isinstance(normalization, STR_TYPE):\n            parsed_normalization = normalization\n\n        return parsed_normalization", "docstring": "Parse a normalization item.\n\nTransform dicts into a tuple containing the normalization\noptions. If a string is found, the actual value is used.\n\nArgs:\nnormalization: Normalization to parse.\n\nReturns:\nTuple or string containing the parsed normalization.", "source": "juraj-google-style"}
{"code": "def synctree(src, dst, onexist=None):\n    src = pathlib.Path(src).resolve()\n    dst = pathlib.Path(dst).resolve()\n    if (not src.is_dir()):\n        raise ValueError\n    if (dst.exists() and (not dst.is_dir())):\n        raise ValueError\n    if (onexist is None):\n\n        def onexist():\n            pass\n    _synctree(src, dst, onexist)", "docstring": "Recursively sync files at directory src to dst\n\nThis is more or less equivalent to::\n\ncp -n -R ${src}/ ${dst}/\n\nIf a file at the same path exists in src and dst, it is NOT overwritten\nin dst. Pass ``onexist`` in order to raise an error on such conditions.\n\nArgs:\nsrc (path-like): source directory\ndst (path-like): destination directory, does not need to exist\nonexist (callable): function to call if file exists at destination,\ntakes the full path to destination file as only argument", "source": "codesearchnet"}
{"code": "def __init__(self, message_type, message_text):\n        \n        if message_type not in self.TYPES:\n            raise TypeError(\"Unknown message_type: \" + message_type)\n        if not isinstance(message_text, six.text_type):\n            raise TypeError(\"Message text must be unicode\")\n        self.type = message_type\n        self.text = message_text", "docstring": "Create a new message.\n\nArgs:\nmessage_type (unicode): The type associated with this message. Must be included in `TYPES`.\nmessage_text (unicode): The textual message.", "source": "juraj-google-style"}
{"code": "def metadata(self):\n    if (self._metadata is None):\n        try:\n            with open(self.paths.metadata()) as metadata_fd:\n                self._metadata = json.load(metadata_fd)\n        except IOError:\n            self._metadata = {}\n    return self._metadata", "docstring": "Retrieve the metadata info for this prefix\n\nReturns:\ndict: metadata info", "source": "codesearchnet"}
{"code": "def get_block_containing_tx(self, txid):\n        \n        blocks = list(backend.query.get_block_with_transaction(self.connection, txid))\n        if len(blocks) > 1:\n            logger.critical('Transaction id %s exists in multiple blocks', txid)\n\n        return [block['height'] for block in blocks]", "docstring": "Retrieve the list of blocks (block ids) containing a\ntransaction with transaction id `txid`\n\nArgs:\ntxid (str): transaction id of the transaction to query\n\nReturns:\nBlock id list (list(int))", "source": "juraj-google-style"}
{"code": "def _read_template(template):\n    template = _read_content_or_path(template)\n    file_obj = StringIO.StringIO(template)\n    return ET.parse(file_obj)", "docstring": "Read XSLT template.\n\nArgs:\ntemplate (str): Filename or XML string. Don't use ``\\\\n`` in case of\nfilename.\n\nReturns:\nobj: Required XML parsed with ``lxml.etree``.", "source": "codesearchnet"}
{"code": "def probability_density(self, X):\n        \n        self.check_fit()\n\n        \n        covariance = self.covariance * np.identity(self.covariance.shape[0])\n        return stats.multivariate_normal.pdf(X, cov=covariance)", "docstring": "Compute probability density function for given copula family.\n\nArgs:\nX: `numpy.ndarray` or `pandas.DataFrame`\n\nReturns:\nnp.array: Probability density for the input values.", "source": "juraj-google-style"}
{"code": "def __init__(self, *, dtype=np.complex64):\n        \n        if dtype not in {np.complex64, np.complex128}:\n            raise ValueError(\n                'dtype must be complex64 or complex128 but was {}'.format(\n                    dtype))\n        self._dtype = dtype", "docstring": "A sparse matrix simulator.\n\nArgs:\ndtype: The `numpy.dtype` used by the simulation. One of\n`numpy.complex64` or `numpy.complex128`", "source": "juraj-google-style"}
{"code": "def datasets_insert(self, dataset_name, friendly_name=None, description=None):\n    \n    url = Api._ENDPOINT + (Api._DATASETS_PATH % (dataset_name.project_id, ''))\n    data = {\n        'kind': 'bigquery\n        'datasetReference': {\n            'projectId': dataset_name.project_id,\n            'datasetId': dataset_name.dataset_id\n        },\n    }\n    if friendly_name:\n      data['friendlyName'] = friendly_name\n    if description:\n      data['description'] = description\n    return datalab.utils.Http.request(url, data=data, credentials=self._credentials)", "docstring": "Issues a request to create a dataset.\n\nArgs:\ndataset_name: the name of the dataset to create.\nfriendly_name: (optional) the friendly name for the dataset\ndescription: (optional) a description for the dataset\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"}
{"code": "def get_flux_biases(sampler, embedding, chain_strength, num_reads=1000, max_age=3600):\n    if (not isinstance(sampler, dimod.Sampler)):\n        raise TypeError('input sampler should be DWaveSampler')\n    system_name = sampler.properties.get('chip_id', str(sampler.__class__))\n    try:\n        with cache_connect() as cur:\n            fbo = get_flux_biases_from_cache(cur, embedding.values(), system_name, chain_strength=chain_strength, max_age=max_age)\n        return fbo\n    except MissingFluxBias:\n        pass\n    try:\n        import dwave.drivers as drivers\n    except ImportError:\n        msg = \"dwave-drivers not found, cannot calculate flux biases. dwave-drivers can be installed with 'pip install dwave-drivers --extra-index-url https:\n        raise RuntimeError(msg)\n    fbo = drivers.oneshot_flux_bias(sampler, embedding.values(), num_reads=num_reads, chain_strength=chain_strength)\n    with cache_connect() as cur:\n        for chain in embedding.values():\n            v = next(iter(chain))\n            flux_bias = fbo.get(v, 0.0)\n            insert_flux_bias(cur, chain, system_name, flux_bias, chain_strength)\n    return fbo", "docstring": "Get the flux bias offsets for sampler and embedding.\n\nArgs:\nsampler (:obj:`.DWaveSampler`):\nA D-Wave sampler.\n\nembedding (dict[hashable, iterable]):\nMapping from a source graph to the specified sampler’s graph (the target graph). The\nkeys of embedding should be nodes in the source graph, the values should be an iterable\nof nodes in the target graph.\n\nchain_strength (number):\nDesired chain coupling strength. This is the magnitude of couplings between qubits\nin a chain.\n\nnum_reads (int, optional, default=1000):\nThe number of reads per system call if new flux biases need to be calculated.\n\nmax_age (int, optional, default=3600):\nThe maximum age (in seconds) allowed for previously calculated flux bias offsets.\n\nReturns:\ndict: A dict where the keys are the nodes in the chains and the values are the flux biases.", "source": "codesearchnet"}
{"code": "def _flag_is_registered(self, flag_obj):\n    \n    flag_dict = self._flags()\n    \n    name = flag_obj.name\n    if flag_dict.get(name, None) == flag_obj:\n      return True\n    \n    short_name = flag_obj.short_name\n    if (short_name is not None and\n        flag_dict.get(short_name, None) == flag_obj):\n      return True\n    return False", "docstring": "Checks whether a Flag object is registered under long name or short name.\n\nArgs:\nflag_obj: Flag, the Flag instance to check for.\n\nReturns:\nbool, True iff flag_obj is registered under long name or short name.", "source": "juraj-google-style"}
{"code": "def format_unitary(mat, decimals=None):\n    num_basis = len(mat)\n    mat_complex = np.zeros((num_basis, num_basis), dtype=complex)\n    for (i, vec) in enumerate(mat):\n        mat_complex[i] = format_statevector(vec, decimals)\n    return mat_complex", "docstring": "Format unitary coming from the backend to present to the Qiskit user.\n\nArgs:\nmat (list[list]): a list of list of [re, im] complex numbers\ndecimals (int): the number of decimals in the statevector.\nIf None, no rounding is done.\n\nReturns:\nlist[list[complex]]: a matrix of complex numbers", "source": "codesearchnet"}
{"code": "def map_arg(**maps):\n\n    def deco(func):\n\n        @functools.wraps(func)\n        def wrapper(*args, **kwargs):\n            if six.PY2:\n                argmap = inspect.getcallargs(func, *args, **kwargs)\n            else:\n                sig = inspect.signature(func)\n                argmap = sig.bind_partial(*args, **kwargs).arguments\n            for (k, map_func) in six.iteritems(maps):\n                if (k in argmap):\n                    argmap[k] = map_func(argmap[k])\n            return func(**argmap)\n        return wrapper\n    return deco", "docstring": "Apply a mapping on certain argument before calling the original function.\n\nArgs:\nmaps (dict): {argument_name: map_func}", "source": "codesearchnet"}
{"code": "def readSettings(self):\n    success = (self.readHolidayDates() and self.readMonthTariffs(ReadMonths.kWh) and self.readMonthTariffs(ReadMonths.kWhReverse) and self.readSchedules(ReadSchedules.Schedules_1_To_4) and self.readSchedules(ReadSchedules.Schedules_5_To_6))\n    return success", "docstring": "Recommended call to read all meter settings at once.\n\nReturns:\nbool: True if all subsequent serial calls completed with ACK.", "source": "codesearchnet"}
{"code": "def quantize_saved_model(src_saved_model_path: str, dst_saved_model_path: str, config: qc.QuantizationConfig) -> None:\n    print('=== User-provided QuantizationConfig ===')\n    print(config)\n    config = qc.QuantizationConfig.FromString(pywrap_quantization.populate_default_configs(config.SerializeToString()))\n    config = qc.QuantizationConfig.FromString(pywrap_quantization.expand_preset_configs(config.SerializeToString()))\n    print('=== Updated QuantizationConfig ===')\n    print(config)\n    if not (_has_quantization_method(config.specs, 'static_range_ptq') and len(config.calibration_options.representative_datasets) == 1) and (not _has_quantization_method(config.specs, 'weight_only_ptq')):\n        raise ValueError('`quantize_saved_model` currently only supports static-range PTQ with a single signature or weight-only quantization.')\n    signature_def_map = save_model.get_signatures_from_saved_model(src_saved_model_path, signature_keys=None, tags=set(config.tf_saved_model.tags))\n    signature_def_map_serialized = _serialize_signature_def_map(signature_def_map)\n    if _has_quantization_method(config.specs, 'static_range_ptq'):\n        pywrap_quantization.static_range_ptq(src_saved_model_path, dst_saved_model_path, quantization_config_serialized=config.SerializeToString(), signature_keys=list(signature_def_map.keys()), signature_def_map_serialized=signature_def_map_serialized, py_function_library=py_function_lib.PyFunctionLibrary())\n    elif _has_quantization_method(config.specs, 'weight_only_ptq'):\n        pywrap_quantization.weight_only_ptq(src_saved_model_path, dst_saved_model_path, quantization_config_serialized=config.SerializeToString(), signature_keys=list(signature_def_map.keys()), signature_def_map_serialized=signature_def_map_serialized, py_function_library=py_function_lib.PyFunctionLibrary())", "docstring": "Quantizes a saved model.\n\nArgs:\nsrc_saved_model_path: Path to the directory for the source SavedModel.\ndst_saved_model_path: Path to the directory for the destination SavedModel.\nconfig: Quantization configuration.\n\nRaises:\nValueError: When `config` was not configured for static-range PTQ\nsingle representative dataset.", "source": "github-repos"}
{"code": "def login(self, email, password, android_id):\n    self._email = email\n    self._android_id = android_id\n    res = gpsoauth.perform_master_login(self._email, password, self._android_id)\n    if ('Token' not in res):\n        raise exception.LoginException(res.get('Error'), res.get('ErrorDetail'))\n    self._master_token = res['Token']\n    self.refresh()\n    return True", "docstring": "Authenticate to Google with the provided credentials.\n\nArgs:\nemail (str): The account to use.\npassword (str): The account password.\nandroid_id (str): An identifier for this client.\n\nRaises:\nLoginException: If there was a problem logging in.", "source": "codesearchnet"}
{"code": "def save_qasm(self, file_path: Union[(str, bytes, int)], header: Optional[str]=None, precision: int=10, qubit_order: ops.QubitOrderOrList=ops.QubitOrder.DEFAULT) -> None:\n    self._to_qasm_output(header, precision, qubit_order).save(file_path)", "docstring": "Save a QASM file equivalent to the circuit.\n\nArgs:\nfile_path: The location of the file where the qasm will be written.\nheader: A multi-line string that is placed in a comment at the top\nof the QASM. Defaults to a cirq version specifier.\nprecision: Number of digits to use when representing numbers.\nqubit_order: Determines how qubits are ordered in the QASM\nregister.", "source": "codesearchnet"}
{"code": "def _ascending_sort(values, axis, return_argsort=False):\n    dtype = values.dtype\n    if dtype.is_unsigned:\n        offset = dtype.max\n        values_or_indices = _descending_sort(offset - values, axis, return_argsort)\n        return values_or_indices if return_argsort else offset - values_or_indices\n    elif dtype.is_integer:\n        values_or_indices = _descending_sort(-values - 1, axis, return_argsort)\n        return values_or_indices if return_argsort else -values_or_indices - 1\n    else:\n        values_or_indices = _descending_sort(-values, axis, return_argsort)\n        return values_or_indices if return_argsort else -values_or_indices", "docstring": "Sorts values in ascending order.\n\nArgs:\nvalues: Tensor of numeric values.\naxis: Index of the axis which values should be sorted along.\nreturn_argsort: If False, return the sorted values. If True, return the\nindices that would sort the values.\n\nReturns:\nThe sorted values.", "source": "github-repos"}
{"code": "def write_message(self, message, timeout):\n    \n    with self._writer_lock:\n      self._transport.write(message.header, timeout.remaining_ms)\n\n      \n      \n      \n      \n      if timeout.has_expired():\n        _LOG.warning('Timed out between AdbMessage header and data, sending '\n                     'data anyway with 10ms timeout')\n        timeout = timeouts.PolledTimeout.from_millis(10)\n      self._transport.write(message.data, timeout.remaining_ms)", "docstring": "Send the given message over this transport.\n\nArgs:\nmessage: The AdbMessage to send.\ntimeout: Use this timeout for the entire write operation, it should be an\ninstance of timeouts.PolledTimeout.", "source": "juraj-google-style"}
{"code": "def from_pkcs12(cls, key, email, scopes, subject=None, passphrase=PKCS12_PASSPHRASE):\n    key = OpenSSL.crypto.load_pkcs12(key, passphrase).get_privatekey()\n    return cls(key=key, email=email, scopes=scopes, subject=subject)", "docstring": "Alternate constructor intended for using .p12 files.\n\nArgs:\nkey (dict) - Parsed JSON with service account credentials.\nemail (str) - Service account email.\nscopes (Union[str, collections.Iterable[str]]) -\nList of permissions that the application requests.\nsubject (str) - The email address of the user for which\nthe application is requesting delegated access.\npassphrase (str) - Passphrase of private key file.\nGoogle generates .p12 files secured with fixed 'notasecret'\npassphrase, so if you didn't change it it's fine to omit\nthis parameter.\n\nReturns:\nServiceAccount", "source": "codesearchnet"}
{"code": "def CallDhclient(interfaces, logger, dhclient_script=None):\n    logger.info('Enabling the Ethernet interfaces %s.', interfaces)\n    dhclient_command = ['dhclient']\n    if (dhclient_script and os.path.exists(dhclient_script)):\n        dhclient_command += ['-sf', dhclient_script]\n    try:\n        subprocess.check_call(((dhclient_command + ['-x']) + interfaces))\n        subprocess.check_call((dhclient_command + interfaces))\n    except subprocess.CalledProcessError:\n        logger.warning('Could not enable interfaces %s.', interfaces)", "docstring": "Configure the network interfaces using dhclient.\n\nArgs:\ninterfaces: list of string, the output device names to enable.\nlogger: logger object, used to write to SysLog and serial port.\ndhclient_script: string, the path to a dhclient script used by dhclient.", "source": "codesearchnet"}
{"code": "class SpQRConfig(QuantizationConfigMixin):\n\n    def __init__(self, bits: int=3, beta1: int=16, beta2: int=16, shapes: Optional[Dict[str, int]]=None, modules_to_not_convert: Optional[List[str]]=None, **kwargs):\n        if shapes is None:\n            shapes = {}\n        self.shapes = shapes\n        self.quant_method = QuantizationMethod.SPQR\n        self.bits = bits\n        self.beta1 = beta1\n        self.beta2 = beta2\n        self.modules_to_not_convert = modules_to_not_convert\n        self.post_init()\n\n    def post_init(self):\n        \n        if not isinstance(self.bits, int):\n            raise TypeError('bits must be an int')\n        if not isinstance(self.beta1, int):\n            raise TypeError('beta1 must be an int')\n        if not isinstance(self.beta2, int):\n            raise TypeError('beta2 must be an int')\n        if self.bits != 3:\n            raise ValueError('SpQR currently only supports bits = 3')\n        if self.beta1 != 16:\n            raise ValueError('SpQR currently only supports beta1 = 16')\n        if self.beta2 != 16:\n            raise ValueError('SpQR currently only supports beta2 = 16')\n        if not isinstance(self.shapes, dict):\n            raise TypeError('shapes must be a dict')", "docstring": "This is a wrapper class about `spqr` parameters. Refer to the original publication for more details.\n\nArgs:\nbits (`int`, *optional*, defaults to 3):\nSpecifies the bit count for the weights and first order zero-points and scales.\nCurrently only bits = 3 is supported.\nbeta1 (`int`, *optional*, defaults to 16):\nSpQR tile width. Currently only beta1 = 16 is supported.\nbeta2 (`int`, *optional*, defaults to 16):\nSpQR tile height. Currently only beta2 = 16 is supported.\nshapes (`Optional`, *optional*):\nA dictionary holding the shape of each object. We need this because it's impossible\nto deduce the exact size of the parameters just from bits, beta1, beta2.\nmodules_to_not_convert (`Optional[List[str]]`, *optional*):\nOptionally, provides a list of full paths of `nn.Linear` weight parameters that shall not be quantized.\nDefaults to None.\nkwargs (`Dict[str, Any]`, *optional*):\nAdditional parameters from which to initialize the configuration object.", "source": "github-repos"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    cls = [self.cls_token_id]\n    sep = [self.sep_token_id]\n    return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A REALM sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def unlock_kinetis(jlink):\n    \n    if not jlink.connected():\n        raise ValueError('No target to unlock.')\n\n    method = UNLOCK_METHODS.get(jlink.tif, None)\n    if method is None:\n        raise NotImplementedError('Unsupported target interface for unlock.')\n\n    return method(jlink)", "docstring": "Unlock for Freescale Kinetis K40 or K60 device.\n\nArgs:\njlink (JLink): an instance of a J-Link that is connected to a target.\n\nReturns:\n``True`` if the device was successfully unlocked, otherwise ``False``.\n\nRaises:\nValueError: if the J-Link is not connected to a target.", "source": "juraj-google-style"}
{"code": "def get(cls, session, team_id):\n    return cls(('/teams/%d.json' % team_id), singleton=True, session=session)", "docstring": "Return a specific team.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nteam_id (int): The ID of the team to get.\n\nReturns:\nhelpscout.models.Person: A person singleton representing the team,\nif existing. Otherwise ``None``.", "source": "codesearchnet"}
{"code": "def icao(msg):\n    DF = df(msg)\n    if (DF in (11, 17, 18)):\n        addr = msg[2:8]\n    elif (DF in (0, 4, 5, 16, 20, 21)):\n        c0 = bin2int(crc(msg, encode=True))\n        c1 = hex2int(msg[(- 6):])\n        addr = ('%06X' % (c0 ^ c1))\n    else:\n        addr = None\n    return addr", "docstring": "Calculate the ICAO address from an Mode-S message\nwith DF4, DF5, DF20, DF21\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nString: ICAO address in 6 bytes hexadecimal string", "source": "codesearchnet"}
{"code": "def load_keys():\n    consumer_key = os.environ.get('CONSUMER_KEY')\n    consumer_secret = os.environ.get('CONSUMER_SECRET')\n    access_token = os.environ.get('ACCESS_TOKEN')\n    access_token_secret = os.environ.get('ACCESS_TOKEN_SECRET')\n    return (consumer_key, consumer_secret, access_token, access_token_secret)", "docstring": "Loads Twitter keys.\n\nReturns:\ntuple: consumer_key, consumer_secret, access_token, access_token_secret", "source": "codesearchnet"}
{"code": "def _process_intersects_filter_directive(filter_operation_info, location, context, parameters):\n    filtered_field_type = filter_operation_info.field_type\n    filtered_field_name = filter_operation_info.field_name\n    argument_inferred_type = strip_non_null_from_type(filtered_field_type)\n    if (not isinstance(argument_inferred_type, GraphQLList)):\n        raise GraphQLCompilationError(u'Cannot apply \"intersects\" to non-list type {}'.format(filtered_field_type))\n    (argument_expression, non_existence_expression) = _represent_argument(location, context, parameters[0], argument_inferred_type)\n    filter_predicate = expressions.BinaryComposition(u'intersects', expressions.LocalField(filtered_field_name), argument_expression)\n    if (non_existence_expression is not None):\n        filter_predicate = expressions.BinaryComposition(u'||', non_existence_expression, filter_predicate)\n    return blocks.Filter(filter_predicate)", "docstring": "Return a Filter basic block that checks if the directive arg and the field intersect.\n\nArgs:\nfilter_operation_info: FilterOperationInfo object, containing the directive and field info\nof the field where the filter is to be applied.\nlocation: Location where this filter is used.\ncontext: dict, various per-compilation data (e.g. declared tags, whether the current block\nis optional, etc.). May be mutated in-place in this function!\nparameters: list of 1 element, specifying the collection in which the value must exist;\nif the collection is optional and missing, the check will return True\n\nReturns:\na Filter basic block that performs the intersects check", "source": "codesearchnet"}
{"code": "def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3):\n    if (kmip_version < enums.KMIPVersion.KMIP_1_3):\n        raise exceptions.VersionNotSupported('KMIP {} does not support the CapabilityInformation object.'.format(kmip_version.value))\n    super(CapabilityInformation, self).read(input_buffer, kmip_version=kmip_version)\n    local_buffer = utils.BytearrayStream(input_buffer.read(self.length))\n    if self.is_tag_next(enums.Tags.STREAMING_CAPABILITY, local_buffer):\n        streaming_capability = primitives.Boolean(tag=enums.Tags.STREAMING_CAPABILITY)\n        streaming_capability.read(local_buffer, kmip_version=kmip_version)\n        self._streaming_capability = streaming_capability\n    if self.is_tag_next(enums.Tags.ASYNCHRONOUS_CAPABILITY, local_buffer):\n        asynchronous_capability = primitives.Boolean(tag=enums.Tags.ASYNCHRONOUS_CAPABILITY)\n        asynchronous_capability.read(local_buffer, kmip_version=kmip_version)\n        self._asynchronous_capability = asynchronous_capability\n    if self.is_tag_next(enums.Tags.ATTESTATION_CAPABILITY, local_buffer):\n        attestation_capability = primitives.Boolean(tag=enums.Tags.ATTESTATION_CAPABILITY)\n        attestation_capability.read(local_buffer, kmip_version=kmip_version)\n        self._attestation_capability = attestation_capability\n    if (kmip_version >= enums.KMIPVersion.KMIP_1_4):\n        if self.is_tag_next(enums.Tags.BATCH_UNDO_CAPABILITY, local_buffer):\n            batch_undo_capability = primitives.Boolean(tag=enums.Tags.BATCH_UNDO_CAPABILITY)\n            batch_undo_capability.read(local_buffer, kmip_version=kmip_version)\n            self._batch_continue_capability = batch_undo_capability\n        if self.is_tag_next(enums.Tags.BATCH_CONTINUE_CAPABILITY, local_buffer):\n            batch_continue_capability = primitives.Boolean(tag=enums.Tags.BATCH_CONTINUE_CAPABILITY)\n            batch_continue_capability.read(local_buffer, kmip_version=kmip_version)\n            self._batch_continue_capability = batch_continue_capability\n    if self.is_tag_next(enums.Tags.UNWRAP_MODE, local_buffer):\n        unwrap_mode = primitives.Enumeration(enums.UnwrapMode, tag=enums.Tags.UNWRAP_MODE)\n        unwrap_mode.read(local_buffer, kmip_version=kmip_version)\n        self._unwrap_mode = unwrap_mode\n    if self.is_tag_next(enums.Tags.DESTROY_ACTION, local_buffer):\n        destroy_action = primitives.Enumeration(enums.DestroyAction, tag=enums.Tags.DESTROY_ACTION)\n        destroy_action.read(local_buffer, kmip_version=kmip_version)\n        self._destroy_action = destroy_action\n    if self.is_tag_next(enums.Tags.SHREDDING_ALGORITHM, local_buffer):\n        shredding_algorithm = primitives.Enumeration(enums.ShreddingAlgorithm, tag=enums.Tags.SHREDDING_ALGORITHM)\n        shredding_algorithm.read(local_buffer, kmip_version=kmip_version)\n        self._shredding_algorithm = shredding_algorithm\n    if self.is_tag_next(enums.Tags.RNG_MODE, local_buffer):\n        rng_mode = primitives.Enumeration(enums.RNGMode, tag=enums.Tags.RNG_MODE)\n        rng_mode.read(local_buffer, kmip_version=kmip_version)\n        self._rng_mode = rng_mode\n    self.is_oversized(local_buffer)", "docstring": "Read the data encoding the CapabilityInformation structure and decode\nit into its constituent parts.\n\nArgs:\ninput_buffer (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 2.0.\n\nRaises:\nVersionNotSupported: Raised when a KMIP version is provided that\ndoes not support the CapabilityInformation structure.", "source": "codesearchnet"}
{"code": "def from_string(cls, prjs):\n        \n        def parse(v):\n            try:\n                return int(v)\n            except ValueError:\n                pass\n            try:\n                return float(v)\n            except ValueError:\n                return v\n\n        parts = [o.lstrip('+') for o in prjs.strip().split()]\n\n        items = map(\n            lambda kv: len(kv) == 2 and (kv[0], parse(kv[1])) or (kv[0], True),\n            (p.split('=') for p in parts))\n\n        return cls({k: v for k, v in items if '+'+k in PROJ4_PARAMS.keys()})", "docstring": "Turn a PROJ.4 string into a mapping of parameters. Bare parameters\nlike \"+no_defs\" are given a value of ``True``. All keys are checked\nagainst the ``all_proj_keys`` list.\n\nArgs:\nprjs (str): A PROJ4 string.", "source": "juraj-google-style"}
{"code": "def get(url, max_backoff=32, verbose=False, **kwargs):\n    sleep_seconds = 1\n    while (sleep_seconds <= max_backoff):\n        try:\n            response = requests.get(url, **{**{'timeout': 30}, **kwargs})\n            if (400 <= response.status_code < 500):\n                return None\n            if (200 <= response.status_code < 400):\n                return response\n        except RequestException as e:\n            if verbose:\n                print(str(e))\n        time.sleep(sleep_seconds)\n        sleep_seconds *= 2\n    return None", "docstring": "Adding retries to requests.get with exponential backoff.\n\nArgs:\nurl (str): The URL to fetch\nmax_backoff (int): The number of seconds to sleep at maximums\nverbose (bool): Whether to print exceptions.\n\nReturns:\nResponse: For successful requests return requests' response. `None` otherwise.", "source": "codesearchnet"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. LED does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def _build_encryption_key_information(self, value):\n        \n        if value is None:\n            return None\n        if not isinstance(value, dict):\n            raise TypeError(\"Encryption key information must be a dictionary.\")\n\n        cryptographic_parameters = value.get('cryptographic_parameters')\n        if cryptographic_parameters:\n            cryptographic_parameters = self._build_cryptographic_parameters(\n                cryptographic_parameters\n            )\n        encryption_key_information = cobjects.EncryptionKeyInformation(\n            unique_identifier=value.get('unique_identifier'),\n            cryptographic_parameters=cryptographic_parameters\n        )\n        return encryption_key_information", "docstring": "Build an EncryptionKeyInformation struct from a dictionary.\n\nArgs:\nvalue (dict): A dictionary containing the key/value pairs for a\nEncryptionKeyInformation struct.\n\nReturns:\nEncryptionKeyInformation: an EncryptionKeyInformation struct\n\nRaises:\nTypeError: if the input argument is invalid", "source": "juraj-google-style"}
{"code": "def call(self, inputs):\n    \n    del inputs  \n    latent_code = ed.MultivariateNormalDiag(loc=tf.zeros(self.latent_size),\n                                            sample_shape=1,\n                                            name=\"latent_code\")\n    state = self.lstm.zero_state(1, dtype=tf.float32)\n    t = 0\n    productions = []\n    stack = [self.grammar.start_symbol]\n    while stack:\n      symbol = stack.pop()\n      net, state = self.lstm(latent_code, state)\n      logits = (self.output_layer(net) +\n                self.grammar.mask(symbol, on_value=0., off_value=-1e9))\n      production = ed.OneHotCategorical(logits=logits,\n                                        name=\"production_\" + str(t))\n      _, rhs = self.grammar.production_rules[tf.argmax(\n          input=production, axis=-1)]\n      for symbol in rhs:\n        if symbol in self.grammar.nonterminal_symbols:\n          stack.append(symbol)\n      productions.append(production)\n      t += 1\n    return tf.stack(productions, axis=1)", "docstring": "Runs the model forward to generate a sequence of productions.\n\nArgs:\ninputs: Unused.\n\nReturns:\nproductions: Tensor of shape [1, num_productions, num_production_rules].\nSlices along the `num_productions` dimension represent one-hot vectors.", "source": "juraj-google-style"}
{"code": "def variable(dims=1):\n    \n    if dims == 1:\n        return Poly({(1,): 1}, dim=1, shape=())\n    return Poly({\n        tuple(indices): indices for indices in numpy.eye(dims, dtype=int)\n    }, dim=dims, shape=(dims,))", "docstring": "Simple constructor to create single variables to create polynomials.\n\nArgs:\ndims (int):\nNumber of dimensions in the array.\n\nReturns:\n(Poly):\nPolynomial array with unit components in each dimension.\n\nExamples:\n>>> print(variable())\nq0\n>>> print(variable(3))\n[q0, q1, q2]", "source": "juraj-google-style"}
{"code": "def AddItem(self, item, f=(lambda x: x)):\n    with self._mutex:\n        if ((len(self.items) < self._max_size) or (self._max_size == 0)):\n            self.items.append(f(item))\n        else:\n            r = self._random.randint(0, self._num_items_seen)\n            if (r < self._max_size):\n                self.items.pop(r)\n                self.items.append(f(item))\n            elif self.always_keep_last:\n                self.items[(- 1)] = f(item)\n        self._num_items_seen += 1", "docstring": "Add an item to the ReservoirBucket, replacing an old item if necessary.\n\nThe new item is guaranteed to be added to the bucket, and to be the last\nelement in the bucket. If the bucket has reached capacity, then an old item\nwill be replaced. With probability (_max_size/_num_items_seen) a random item\nin the bucket will be popped out and the new item will be appended\nto the end. With probability (1 - _max_size/_num_items_seen)\nthe last item in the bucket will be replaced.\n\nSince the O(n) replacements occur with O(1/_num_items_seen) likelihood,\nthe amortized runtime is O(1).\n\nArgs:\nitem: The item to add to the bucket.\nf: A function to transform item before addition, if it will be kept in\nthe reservoir.", "source": "codesearchnet"}
{"code": "def product_name(self):\n        \n        buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n        self._dll.JLINKARM_EMU_GetProductName(buf, self.MAX_BUF_SIZE)\n        return ctypes.string_at(buf).decode()", "docstring": "Returns the product name of the connected J-Link.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nProduct name.", "source": "juraj-google-style"}
{"code": "def install(self, connection, partition, table_name=None, columns=None, materialize=False,\n                logger=None):\n        \n\n        partition.localize()\n\n        self._add_partition(connection, partition)\n        fdw_table = partition.vid\n        view_table = '{}_v'.format(fdw_table)\n\n        if materialize:\n            with connection.cursor() as cursor:\n                view_exists = self._relation_exists(connection, view_table)\n                if view_exists:\n                    logger.debug(\n                        'Materialized view of the partition already exists.\\n    partition: {}, view: {}'\n                        .format(partition.name, view_table))\n                else:\n                    query = 'CREATE MATERIALIZED VIEW {} AS SELECT * FROM {};'\\\n                        .format(view_table, fdw_table)\n                    logger.debug(\n                        'Creating new materialized view of the partition.'\n                        '\\n    partition: {}, view: {}, query: {}'\n                        .format(partition.name, view_table, query))\n                    cursor.execute(query)\n                    cursor.execute('COMMIT;')\n\n        final_table = view_table if materialize else fdw_table\n\n        with connection.cursor() as cursor:\n            view_q = \"CREATE VIEW IF NOT EXISTS {} AS SELECT * FROM {} \".format(partition.vid, final_table)\n            cursor.execute(view_q)\n            cursor.execute('COMMIT;')\n\n        return partition.vid", "docstring": "Creates FDW or materialize view for given partition.\n\nArgs:\nconnection: connection to postgresql\npartition (orm.Partition):\nmaterialize (boolean): if True, create read-only table. If False create virtual table.\n\nReturns:\nstr: name of the created table.", "source": "juraj-google-style"}
{"code": "def retryable(a_func, retry_options, **kwargs):\n    delay_mult = retry_options.backoff_settings.retry_delay_multiplier\n    max_delay_millis = retry_options.backoff_settings.max_retry_delay_millis\n    has_timeout_settings = _has_timeout_settings(retry_options.backoff_settings)\n    if has_timeout_settings:\n        timeout_mult = retry_options.backoff_settings.rpc_timeout_multiplier\n        max_timeout = (retry_options.backoff_settings.max_rpc_timeout_millis / _MILLIS_PER_SECOND)\n        total_timeout = (retry_options.backoff_settings.total_timeout_millis / _MILLIS_PER_SECOND)\n\n    def inner(*args):\n        'Equivalent to ``a_func``, but retries upon transient failure.\\n\\n        Retrying is done through an exponential backoff algorithm configured\\n        by the options in ``retry``.\\n        '\n        delay = retry_options.backoff_settings.initial_retry_delay_millis\n        exc = errors.RetryError('Retry total timeout exceeded before anyresponse was received')\n        if has_timeout_settings:\n            timeout = (retry_options.backoff_settings.initial_rpc_timeout_millis / _MILLIS_PER_SECOND)\n            now = time.time()\n            deadline = (now + total_timeout)\n        else:\n            timeout = None\n            deadline = None\n        while ((deadline is None) or (now < deadline)):\n            try:\n                to_call = add_timeout_arg(a_func, timeout, **kwargs)\n                return to_call(*args)\n            except Exception as exception:\n                code = config.exc_to_code(exception)\n                if (code not in retry_options.retry_codes):\n                    raise errors.RetryError('Exception occurred in retry method that was not classified as transient', exception)\n                exc = errors.RetryError('Retry total timeout exceeded with exception', exception)\n                to_sleep = random.uniform(0, (delay * 2))\n                time.sleep((to_sleep / _MILLIS_PER_SECOND))\n                delay = min((delay * delay_mult), max_delay_millis)\n                if has_timeout_settings:\n                    now = time.time()\n                    timeout = min((timeout * timeout_mult), max_timeout, (deadline - now))\n        raise exc\n    return inner", "docstring": "Creates a function equivalent to a_func, but that retries on certain\nexceptions.\n\nArgs:\na_func (callable): A callable.\nretry_options (RetryOptions): Configures the exceptions upon which the\ncallable should retry, and the parameters to the exponential backoff\nretry algorithm.\nkwargs: Addtional arguments passed through to the callable.\n\nReturns:\nCallable: A function that will retry on exception.", "source": "codesearchnet"}
{"code": "def sub_annotations_for_parameterized_class(self, cls: abstract.ParameterizedClass, annotations: dict[str, abstract.BaseValue]) -> dict[str, abstract.BaseValue]:\n    formal_type_parameters = cls.get_formal_type_parameters()\n\n    def get_type_parameter_subst(annotation: abstract.BaseValue) -> abstract.BaseValue | None:\n        for name in (f'{cls.full_name}.{annotation.name}', f'{cls.name}.{annotation.name}'):\n            if name in formal_type_parameters:\n                return formal_type_parameters[name]\n        return annotation\n    return {name: self._do_sub_one_annotation(self.ctx.root_node, annot, get_type_parameter_subst) for name, annot in annotations.items()}", "docstring": "Apply type parameter substitutions to a dictionary of annotations.\n\nArgs:\ncls: ParameterizedClass that defines type parameter substitutions.\nannotations: A dictionary of annotations to which type parameter\nsubstition should be applied.\n\nReturns:\nAnnotations with type parameters substituted.", "source": "github-repos"}
{"code": "def get_cso_dataframe(self):\n    assert (self.jco is not None)\n    assert (self.pst is not None)\n    weights = self.pst.observation_data.loc[(self.jco.to_dataframe().index, 'weight')].copy().values\n    cso = (np.diag(np.sqrt(self.qhalfx.x.dot(self.qhalfx.x.T))) / float((self.pst.npar - 1)))\n    cso_df = pd.DataFrame.from_dict({'obnme': self.jco.to_dataframe().index, 'cso': cso})\n    cso_df.index = cso_df['obnme']\n    cso_df.drop('obnme', axis=1, inplace=True)\n    return cso_df", "docstring": "get a dataframe of composite observation sensitivity, as returned by PEST in the\nseo file.\n\nNote that this formulation deviates slightly from the PEST documentation in that the\nvalues are divided by (npar-1) rather than by (npar).\n\nThe equation is cso_j = ((Q^1/2*J*J^T*Q^1/2)^1/2)_jj/(NPAR-1)\nReturns:\ncso : pandas.DataFrame", "source": "codesearchnet"}
{"code": "def get_compliance_preview(self):\n    uri = '{}/compliance-preview'.format(self.data['uri'])\n    return self._helper.do_get(uri)", "docstring": "Gets the preview of manual and automatic updates required to make the server profile\nconsistent with its template.\n\nReturns:\ndict: Server profile compliance preview.", "source": "codesearchnet"}
{"code": "def save_image(figure, filename):\n    path = os.path.join(IMAGES_DIR, filename)\n    figure.savefig(path, bbox_inches='tight')\n    plt.close(figure)", "docstring": "Save an image to the docs images directory.\n\nArgs:\nfilename (str): The name of the file (not containing\ndirectory info).", "source": "codesearchnet"}
{"code": "def ParseCacheEntry(self, file_object, block_offset):\n    \n    cache_entry_map = self._GetDataTypeMap('chrome_cache_entry')\n\n    try:\n      cache_entry, _ = self._ReadStructureFromFileObject(\n          file_object, block_offset, cache_entry_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.UnableToParseFile((\n          'Unable to parse cache entry at offset: 0x{0:08x} with error: '\n          '{1!s}').format(block_offset, exception))\n\n    cache_entry_object = CacheEntry()\n\n    cache_entry_object.hash = cache_entry.hash\n    cache_entry_object.next = CacheAddress(cache_entry.next_address)\n    cache_entry_object.rankings_node = CacheAddress(\n        cache_entry.rankings_node_address)\n    cache_entry_object.creation_time = cache_entry.creation_time\n\n    byte_array = cache_entry.key\n    byte_string = bytes(bytearray(byte_array))\n    cache_entry_object.key, _, _ = byte_string.partition(b'\\x00')\n\n    try:\n      cache_entry_object.original_url = cache_entry_object.key.decode('ascii')\n    except UnicodeDecodeError as exception:\n      raise errors.ParseError(\n          'Unable to decode original URL in key with error: {0!s}'.format(\n              exception))\n\n    return cache_entry_object", "docstring": "Parses a cache entry.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object to read from.\nblock_offset (int): block offset of the cache entry.\n\nReturns:\nCacheEntry: cache entry.\n\nRaises:\nParseError: if the cache entry cannot be read.", "source": "juraj-google-style"}
{"code": "def _on_pass(self, record):\n    msg = record.details\n    if msg:\n        logging.info(msg)\n    self.on_pass(record)", "docstring": "Proxy function to guarantee the base implementation of on_pass is\ncalled.\n\nArgs:\nrecord: records.TestResultRecord, a copy of the test record for\nthis test, containing all information of the test execution\nincluding exception objects.", "source": "github-repos"}
{"code": "def default(fields=None, count=5):\n    projection = Sampling._create_projection(fields)\n    return (lambda sql: ('SELECT %s FROM (%s) LIMIT %d' % (projection, sql, count)))", "docstring": "Provides a simple default sampling strategy which limits the result set by a count.\n\nArgs:\nfields: an optional list of field names to retrieve.\ncount: optional number of rows to limit the sampled results to.\nReturns:\nA sampling function that can be applied to get a random sampling.", "source": "codesearchnet"}
{"code": "def hardware_status(self):\n        \n        stat = structs.JLinkHardwareStatus()\n        res = self._dll.JLINKARM_GetHWStatus(ctypes.byref(stat))\n        if res == 1:\n            raise errors.JLinkException('Error in reading hardware status.')\n        return stat", "docstring": "Retrieves and returns the hardware status.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nA ``JLinkHardwareStatus`` describing the J-Link hardware.", "source": "juraj-google-style"}
{"code": "def __init__(self, tcex):\n        \n        self.tcex = tcex\n\n        \n        self._request = self.tcex.request(self.tcex.session)\n        \n        self._request.content_type = 'application/json'\n\n        \n        self._api_branch = None\n        self._api_branch_base = None\n        self._api_entity = None\n        self._api_uri = None\n        self._case_preference = 'sensitive'\n        self._custom = False\n        self._http_method = 'GET'\n        self._filters = []\n        self._filter_or = False\n        self._name = None\n        self._parsable = False\n        self._paginate = True\n        self._paginate_count = 0\n        self._parent = None\n        self._request_entity = None\n        self._request_uri = None\n        self._result_count = None\n        self._result_limit = 500\n        self._result_start = 0\n        self._stream = False\n        self._status_codes = {}\n        self._value_fields = []\n        self.owner = self.tcex.args.api_default_org", "docstring": "Initialize the Class properties.\n\nArgs:\ntcex (object): Instance of TcEx.", "source": "juraj-google-style"}
{"code": "def get_subdomain(url):\n    if (url not in URLHelper.__cache):\n        URLHelper.__cache[url] = urlparse(url)\n    return '.'.join(URLHelper.__cache[url].netloc.split('.')[:(- 2)])", "docstring": "Get the subdomain of the given URL.\n\nArgs:\nurl (str): The URL to get the subdomain from.\n\nReturns:\nstr: The subdomain(s)", "source": "codesearchnet"}
{"code": "def parse_done(self, buf: memoryview) -> Tuple[(bool, memoryview)]:\n    match = self._pattern.match(buf)\n    if (not match):\n        raise NotParseable(buf)\n    done = (match.group(1).upper() == self.continuation)\n    buf = buf[match.end(0):]\n    return (done, buf)", "docstring": "Parse the continuation line sent by the client to end the ``IDLE``\ncommand.\n\nArgs:\nbuf: The continuation line to parse.", "source": "codesearchnet"}
{"code": "def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'):\n\n        \n\n        \n        terms = np.array(self.terms[term])[:, np.newaxis]\n\n        \n        kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms)\n\n        \n        x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis]\n        scores = kde.score_samples(x_axis)\n\n        \n        return np.exp(scores) * (len(self.tokens) / samples)", "docstring": "Estimate the kernel density of the instances of term in the text.\n\nArgs:\nterm (str): A stemmed term.\nbandwidth (int): The kernel bandwidth.\nsamples (int): The number of evenly-spaced sample points.\nkernel (str): The kernel function.\n\nReturns:\nnp.array: The density estimate.", "source": "juraj-google-style"}
{"code": "def _pad_for_batching(self, pixel_values: List[torch.Tensor], image_sizes: List[List[int]]):\n    max_shape = (max([size[0] for size in image_sizes]), max([size[1] for size in image_sizes]))\n    pixel_values = [torch.nn.functional.pad(image, pad=(0, max_shape[1] - size[1], 0, max_shape[0] - size[0])) for image, size in zip(pixel_values, image_sizes)]\n    return torch.stack(pixel_values)", "docstring": "Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches.\nArgs:\npixel_values (`List[torch.Tensor]`):\nAn array of pixel values of each images of shape (`batch_size`, `channels`, `height`, `width`)\nimage_sizes (`List[List[int]]`):\nA list of sizes for each image in `pixel_values` in (height, width) format.\nReturns:\nList[`torch.Tensor`]: The padded images.", "source": "github-repos"}
{"code": "def to_json(self, include_body=False):\n        \n\n        message = {\n            'emailId': self.email_id,\n            'timestamp': isoformat(self.timestamp),\n            'subsystem': self.subsystem,\n            'subject': self.subject,\n            'sender': self.sender,\n            'recipients': self.recipients,\n            'uuid': self.uuid,\n            'messageHtml': None,\n            'messageText': None\n        }\n\n        if include_body:\n            message['messageHtml'] = self.message_html\n            message['messageText'] = self.message_text\n\n        return message", "docstring": "Exports the object to a JSON friendly dict\n\nArgs:\ninclude_body (bool): Include the body of the message in the output\n\nReturns:\nDict representation of object type", "source": "juraj-google-style"}
{"code": "def export_default_probes(path, module_name = '', raise_errors = False):\n    \n\n    raise NotImplementedError\n\n\n    import b26_toolkit.b26_toolkit.instruments as instruments\n    from pylabcontrol.core import Probe\n\n    for name, obj in inspect.getmembers(instruments):\n\n        if inspect.isclass(obj):\n\n            try:\n                instrument = obj()\n                print(('--- created ', obj.__name__, ' -- '))\n                for probe_name, probe_info in instrument._PROBES.items():\n                    probe = Probe(instrument, probe_name, info = probe_info)\n                    filename = os.path.join(path, '{:s}.b26'.format(instrument.name))\n                    probe.save(filename)\n            except:\n                print(('failed to create probe file for: {:s}'.format(obj.__name__)))\n                print(('failed to create probe file for: {:s}'.format(obj.__name__)))", "docstring": "NOT IMPLEMENTED YET\ntries to instantiate all the instruments that are imported in /instruments/__init__.py\nand the probes of each instrument that could be instantiated into a .b26 file in the folder path\nArgs:\npath: target path for .b26 files", "source": "juraj-google-style"}
{"code": "def _google_section(line_info):\n    colon_index = line_info.remaining.find(':')\n    possible_title = line_info.remaining[:colon_index]\n    return _section_from_possible_title(possible_title)", "docstring": "Checks whether the current line is the start of a new Google-style section.\n\nThis docstring is a Google-style docstring. Google-style sections look like\nthis:\n\nSection Name:\nsection body goes here\n\nArgs:\nline_info: Information about the current line.\nReturns:\nA Section type if one matches, or None if no section type matches.", "source": "github-repos"}
{"code": "def get(self, group=None, backend=None):\n        \n        from .options import Store, Options\n        keywords = {}\n        groups = Options._option_groups if group is None else [group]\n        backend = backend if backend else Store.current_backend\n        for group in groups:\n            optsobj = Store.lookup_options(backend, self._obj, group)\n            keywords = dict(keywords, **optsobj.kwargs)\n        return Options(**keywords)", "docstring": "Returns the corresponding Options object.\n\nArgs:\ngroup: The options group. Flattens across groups if None.\nbackend: Current backend if None otherwise chosen backend.\n\nReturns:\nOptions object associated with the object containing the\napplied option keywords.", "source": "juraj-google-style"}
{"code": "def _as_document(self, dataset):\n        \n        assert isinstance(dataset, Dataset)\n\n        doc = super(self.__class__, self)._as_document(dataset)\n\n        \n        \n        doc['keywords'] = doc['keywords'].replace('-', '_')\n        doc['doc'] = doc['doc'].replace('-', '_')\n        doc['title'] = doc['title'].replace('-', '_')\n        return doc", "docstring": "Converts dataset to document indexed by to FTS index.\n\nArgs:\ndataset (orm.Dataset): dataset to convert.\n\nReturns:\ndict with structure matches to BaseDatasetIndex._schema.", "source": "juraj-google-style"}
{"code": "def conv_output_shape(input_shape, kernel_shape, strides, padding):\n    dims = range(len(kernel_shape))\n    output_shape = [conv_output_length(input_shape[d], kernel_shape[d], padding, strides[d]) for d in dims]\n    output_shape = tuple([0 if input_shape[d] == 0 else output_shape[d] for d in dims])\n    return output_shape", "docstring": "Return the output shape of an N-D convolution.\n\nForces dimensions where input is empty (size 0) to remain empty.\n\nArgs:\ninput_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the\ninput.\nkernel_shape: tuple of size N, spatial shape of the convolutional kernel /\nreceptive field.\nstrides: tuple of size N, strides along each spatial dimension.\npadding: type of padding, string `\"same\"` or `\"valid\"`.\n`\"valid\"` means no padding. `\"same\"` results in padding evenly to\nthe left/right or up/down of the input such that output has the same\nheight/width dimension as the input.\n\nReturns:\ntuple of size N: `(d_out1, ..., d_outN)`, spatial shape of the output.", "source": "github-repos"}
{"code": "def process_fixed_issues(self, volumes, existing_issues):\n    fixed_issues = []\n    for (issue_id, issue) in list(existing_issues.items()):\n        if (issue_id not in volumes):\n            fixed_issues.append(issue)\n    return fixed_issues", "docstring": "Provided a list of volumes and existing issues, returns a list of fixed issues to be deleted\n\nArgs:\nvolumes (`dict`): A dictionary keyed on the issue id, with the :obj:`Volume` object as the value\nexisting_issues (`dict`): A dictionary keyed on the issue id, with the :obj:`EBSVolumeAuditIssue` object as\nthe value\n\nReturns:\n:obj:`list` of :obj:`EBSVolumeAuditIssue`", "source": "codesearchnet"}
{"code": "def security(self, domains):\n    api_name = 'opendns-security'\n    fmt_url_path = u'security/name/{0}.json'\n    return self._multi_get(api_name, fmt_url_path, domains)", "docstring": "Calls security end point and adds an 'is_suspicious' key to each response.\n\nArgs:\ndomains: An enumerable of strings\nReturns:\nA dict of {domain: security_result}", "source": "codesearchnet"}
{"code": "def _ParseMRUListEntryValue(\n      self, parser_mediator, registry_key, entry_index, entry_letter, **kwargs):\n    \n    value_string = ''\n\n    value = registry_key.GetValueByName('{0:s}'.format(entry_letter))\n    if value is None:\n      parser_mediator.ProduceExtractionWarning(\n          'missing MRUList value: {0:s} in key: {1:s}.'.format(\n              entry_letter, registry_key.path))\n\n    elif value.DataIsString():\n      value_string = value.GetDataAsObject()\n\n    elif value.DataIsBinaryData():\n      logger.debug((\n          '[{0:s}] Non-string MRUList entry value: {1:s} parsed as string '\n          'in key: {2:s}.').format(self.NAME, entry_letter, registry_key.path))\n\n      utf16le_string_map = self._GetDataTypeMap('utf16le_string')\n\n      try:\n        value_string = self._ReadStructureFromByteStream(\n            value.data, 0, utf16le_string_map)\n      except (ValueError, errors.ParseError) as exception:\n        parser_mediator.ProduceExtractionWarning((\n            'unable to parse MRUList entry value: {0:s} with error: '\n            '{1!s}').format(entry_letter, exception))\n\n      value_string = value_string.rstrip('\\x00')\n\n    return value_string", "docstring": "Parses the MRUList entry value.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains\nthe MRUList value.\nentry_index (int): MRUList entry index.\nentry_letter (str): character value representing the entry.\n\nReturns:\nstr: MRUList entry value.", "source": "juraj-google-style"}
{"code": "def __init__(self, ppp_config_dir=None, enhancement_config_file=None):\n        \n        self.ppp_config_dir = ppp_config_dir or get_environ_config_dir()\n        self.enhancement_config_file = enhancement_config_file\n        \n        if self.enhancement_config_file is None:\n            \n            \n            config_fn = os.path.join(\"enhancements\", \"generic.yaml\")\n            self.enhancement_config_file = config_search_paths(config_fn, self.ppp_config_dir)\n\n        if not self.enhancement_config_file:\n            \n            self.enhancement_tree = None\n        else:\n            if not isinstance(self.enhancement_config_file, (list, tuple)):\n                self.enhancement_config_file = [self.enhancement_config_file]\n\n            self.enhancement_tree = EnhancementDecisionTree(*self.enhancement_config_file)\n\n        self.sensor_enhancement_configs = []", "docstring": "Initialize an Enhancer instance.\n\nArgs:\nppp_config_dir: Points to the base configuration directory\nenhancement_config_file: The enhancement configuration to apply, False to leave as is.", "source": "juraj-google-style"}
{"code": "def scalar_pb(tag, data, description=None):\n    arr = np.array(data)\n    if (arr.shape != ()):\n        raise ValueError(('Expected scalar shape for tensor, got shape: %s.' % arr.shape))\n    if (arr.dtype.kind not in ('b', 'i', 'u', 'f')):\n        raise ValueError(('Cast %s to float is not supported' % arr.dtype.name))\n    tensor_proto = tensor_util.make_tensor_proto(arr.astype(np.float32))\n    summary_metadata = metadata.create_summary_metadata(display_name=None, description=description)\n    summary = summary_pb2.Summary()\n    summary.value.add(tag=tag, metadata=summary_metadata, tensor=tensor_proto)\n    return summary", "docstring": "Create a scalar summary_pb2.Summary protobuf.\n\nArguments:\ntag: String tag for the summary.\ndata: A 0-dimensional `np.array` or a compatible python number type.\ndescription: Optional long-form description for this summary, as a\n`str`. Markdown is supported. Defaults to empty.\n\nRaises:\nValueError: If the type or shape of the data is unsupported.\n\nReturns:\nA `summary_pb2.Summary` protobuf object.", "source": "codesearchnet"}
{"code": "def connect(self, wire_char, where, label=None):\n        \n\n        if 'top' in where and self.top_connector:\n            self.top_connect = self.top_connector[wire_char]\n\n        if 'bot' in where and self.bot_connector:\n            self.bot_connect = self.bot_connector[wire_char]\n\n        if label:\n            self.top_format = self.top_format[:-1] + (label if label else \"\")", "docstring": "Connects boxes and elements using wire_char and setting proper connectors.\nArgs:\nwire_char (char): For example '║' or '│'.\nwhere (list[\"top\", \"bot\"]): Where the connector should be set.\nlabel (string): Some connectors have a label (see cu1, for example).", "source": "juraj-google-style"}
{"code": "def _get_params(self, validator_parameter, name_prefix):\n    \n    params_validator = self.request.get(validator_parameter)\n\n    user_params = {}\n    for key in self.request.arguments():\n      if key.startswith(name_prefix):\n        values = self.request.get_all(key)\n        adjusted_key = key[len(name_prefix):]\n        if len(values) == 1:\n          user_params[adjusted_key] = values[0]\n        else:\n          user_params[adjusted_key] = values\n\n    if params_validator:\n      resolved_validator = util.for_name(params_validator)\n      resolved_validator(user_params)\n\n    return user_params", "docstring": "Retrieves additional user-supplied params for the job and validates them.\n\nArgs:\nvalidator_parameter: name of the request parameter which supplies\nvalidator for this parameter set.\nname_prefix: common prefix for all parameter names in the request.\n\nRaises:\nAny exception raised by the 'params_validator' request parameter if\nthe params fail to validate.\n\nReturns:\nThe user parameters.", "source": "juraj-google-style"}
{"code": "def CopyFromDict(self, attributes):\n    for (attribute_name, attribute_value) in attributes.items():\n        if (attribute_name[0] == '_'):\n            continue\n        setattr(self, attribute_name, attribute_value)", "docstring": "Copies the attribute container from a dictionary.\n\nArgs:\nattributes (dict[str, object]): attribute values per name.", "source": "codesearchnet"}
{"code": "def new_scope(self, new_scope={}):\n        \n        old_scopes, self.scopes = self.scopes, self.scopes.new_child(new_scope)\n        yield\n        self.scopes = old_scopes", "docstring": "Add a new innermost scope for the duration of the with block.\n\nArgs:\nnew_scope (dict-like): The scope to add.", "source": "juraj-google-style"}
{"code": "def __init__(self, atlas_name, root_dir, reference_gempro, reference_genome_path=None, description=None):\n        \n        Object.__init__(self, id=atlas_name, description=description)\n\n        \n        self._root_dir = None\n        self.root_dir = root_dir\n\n        self.strains = DictList()\n        self.df_orthology_matrix = pd.DataFrame()\n        \n        \n        self._orthology_matrix_has_sequences = False\n\n        \n        \n        self.reference_gempro = reference_gempro\n        if not reference_genome_path and not self.reference_gempro.genome_path:\n            self.reference_gempro.genome_path = self.reference_gempro.write_representative_sequences_file(outname=self.reference_gempro.id)\n        else:\n            self.reference_gempro.genome_path = reference_genome_path\n            \n\n        \n        self._empty_reference_gempro = None\n        if self.reference_gempro.model:\n            \n            self._empty_reference_gempro = GEMPRO(gem_name='Copied reference GEM-PRO', gem=self.reference_gempro.model.copy())\n            \n            for x in self._empty_reference_gempro.genes:\n                x.reset_protein()\n        else:\n            \n            strain_genes = [x.id for x in self.reference_gempro.genes]\n            if len(strain_genes) == 0:\n                raise ValueError('GEM-PRO has no genes, unable to run multi-strain analysis')\n            self._empty_reference_gempro = GEMPRO(gem_name='Copied reference GEM-PRO', genes_list=strain_genes)", "docstring": "Prepare a GEM-PRO model for ATLAS analysis\n\nArgs:\natlas_name (str): Name of your ATLAS project\nroot_dir (str): Path to where the folder named after ``atlas_name`` will be created.\nreference_gempro (GEMPRO): GEM-PRO model to use as the reference genome\nreference_genome_path (str): Path to reference genome FASTA file\ndescription (str): Optional string to describe your project", "source": "juraj-google-style"}
{"code": "def reset_internal_states(self, record=None):\n    self._record = None\n    self._count = 0\n    self._record = record", "docstring": "Resets the internal state of the recorder.\n\nArgs:\nrecord: records.TestResultRecord, the test record for a test.", "source": "github-repos"}
{"code": "def expected_value(self):\n    alpha = (self.__success + self.__default_alpha)\n    beta = (self.__failure + self.__default_beta)\n    try:\n        expected_value = (alpha / (alpha + beta))\n    except ZeroDivisionError:\n        expected_value = 0.0\n    return expected_value", "docstring": "Compute expected value.\n\nReturns:\nExpected value.", "source": "codesearchnet"}
{"code": "def delta_stoichiometry( reactants, products ):\n     \n    totals = Counter()\n    for r in reactants:\n        totals.update( ( r * -1.0 ).stoichiometry )\n    for p in products:\n        totals.update( p.stoichiometry )\n    to_return = {}\n    for c in totals:\n        if totals[c] != 0:\n            to_return[c] = totals[c]\n    return to_return", "docstring": "Calculate the change in stoichiometry for reactants --> products.\n\nArgs:\nreactants (list(vasppy.Calculation): A list of vasppy.Calculation objects. The initial state.\nproducts  (list(vasppy.Calculation): A list of vasppy.Calculation objects. The final state.\n\nReturns:\n(Counter): The change in stoichiometry.", "source": "juraj-google-style"}
{"code": "def __init__(self, key, secret):\n    \n    self.__key = key\n    self.__secret = secret\n\n    if self.__key is None or self.__secret is None:\n      raise ValueError(\"Key and secret must be set.\")", "docstring": "Handles token authentication for Neurio Client.\n\nArgs:\nkey (string): your Neurio API key\nsecret (string): your Neurio API secret", "source": "juraj-google-style"}
{"code": "def peek(quantity, min_type=EventType.firstevent, max_type=EventType.lastevent):\n    return _peep(quantity, lib.SDL_PEEKEVENT, min_type, max_type)", "docstring": "Return events at the front of the event queue, within the specified minimum and maximum type,\nand do not remove them from the queue.\n\nArgs:\nquantity (int): The maximum number of events to return.\nmin_type (int): The minimum value for the event type of the returned events.\nmax_type (int): The maximum value for the event type of the returned events.\n\nReturns:\nList[Event]: Events from the front of the event queue.\n\nRaises:\nSDLError: If there was an error retrieving the events.", "source": "codesearchnet"}
{"code": "def get_pending_users_queryset(self, search_keyword, customer_uuid):\n        \n        queryset = PendingEnterpriseCustomerUser.objects.filter(\n            enterprise_customer__uuid=customer_uuid\n        )\n\n        if search_keyword is not None:\n            queryset = queryset.filter(user_email__icontains=search_keyword)\n\n        return queryset", "docstring": "Get the list of PendingEnterpriseCustomerUsers we want to render.\n\nArgs:\nsearch_keyword (str): The keyword to search for in pending users' email addresses.\ncustomer_uuid (str): A unique identifier to filter down to only pending users\nlinked to a particular EnterpriseCustomer.", "source": "juraj-google-style"}
{"code": "def _compose_custom_getters(getter_a, getter_b):\n    if (not getter_a):\n        return getter_b\n    if (not getter_b):\n        return getter_a\n\n    def getter_fn(getter, *args, **kwargs):\n        return getter_b(functools.partial(getter_a, getter), *args, **kwargs)\n    return getter_fn", "docstring": "Compose two custom getters.\n\nExample use:\ntf.get_variable_scope().set_custom_getter(\ncompose_custom_getters(tf.get_variable_scope().custom_getter, new_getter))\n\nThis composes getters in the same way as creating a new variable scope with\nthe new_getter, but it does not actually create a new variable scope.\n\nArgs:\ngetter_a: a custom getter - generally from the existing variable scope.\ngetter_b: a custom getter\n\nReturns:\na custom getter", "source": "codesearchnet"}
{"code": "def download(url: str, filename: str,\n             skip_cert_verify: bool = True) -> None:\n    \n    log.info(\"Downloading from {} to {}\", url, filename)\n\n    \n    \n    \n    \n    \n    \n    \n    \n\n    ctx = ssl.create_default_context()  \n    if skip_cert_verify:\n        log.debug(\"Skipping SSL certificate check for \" + url)\n        ctx.check_hostname = False\n        ctx.verify_mode = ssl.CERT_NONE\n    with urllib.request.urlopen(url, context=ctx) as u, open(filename,\n                                                             'wb') as f:  \n        f.write(u.read())", "docstring": "Downloads a URL to a file.\n\nArgs:\nurl: URL to download from\nfilename: file to save to\nskip_cert_verify: skip SSL certificate check?", "source": "juraj-google-style"}
{"code": "def is_valid(container, path):\n    \n    try:\n        tmp_hash_path = container.filename + \".hash\"\n        with open(tmp_hash_path, 'r') as tmp_file:\n            tmp_hash = tmp_file.readline()\n    except IOError:\n        LOG.info(\"No .hash-file in the tmp-directory.\")\n\n    container_hash_path = local.path(path) / \"gentoo.tar.bz2.hash\"\n    if container_hash_path.exists():\n        with open(container_hash_path, 'r') as hash_file:\n            container_hash = hash_file.readline()\n            return container_hash == tmp_hash\n    return False", "docstring": "Checks if a container exists and is unpacked.\n\nArgs:\npath: The location where the container is expected.\n\nReturns:\nTrue if the container is valid, False if the container needs to\nunpacked or if the path does not exist yet.", "source": "juraj-google-style"}
{"code": "def generate_meta_features(path, base_learner_id):\n    \n    with functions.DBContextManager(path) as session:\n        base_learner = session.query(models.BaseLearner).filter_by(id=base_learner_id).first()\n        if not base_learner:\n            raise exceptions.UserError('Base learner {} '\n                                       'does not exist'.format(base_learner_id))\n\n        base_learner.job_id = get_current_job().id\n        base_learner.job_status = 'started'\n\n        session.add(base_learner)\n        session.commit()\n\n        try:\n            est = base_learner.return_estimator()\n            extraction = session.query(models.Extraction).first()\n            X, y = extraction.return_train_dataset()\n            return_splits_iterable = functions.import_object_from_string_code(\n                extraction.meta_feature_generation['source'],\n                'return_splits_iterable'\n            )\n\n            meta_features_list = []\n            trues_list = []\n            for train_index, test_index in return_splits_iterable(X, y):\n                X_train, X_test = X[train_index], X[test_index]\n                y_train, y_test = y[train_index], y[test_index]\n                est = est.fit(X_train, y_train)\n                meta_features_list.append(\n                    getattr(est, base_learner.base_learner_origin.\n                            meta_feature_generator)(X_test)\n                )\n                trues_list.append(y_test)\n            meta_features = np.concatenate(meta_features_list, axis=0)\n            y_true = np.concatenate(trues_list)\n\n            for key in base_learner.base_learner_origin.metric_generators:\n                metric_generator = functions.import_object_from_string_code(\n                    base_learner.base_learner_origin.metric_generators[key],\n                    'metric_generator'\n                )\n                base_learner.individual_score[key] = metric_generator(y_true, meta_features)\n\n            meta_features_path = base_learner.meta_features_path(path)\n\n            if not os.path.exists(os.path.dirname(meta_features_path)):\n                os.makedirs(os.path.dirname(meta_features_path))\n\n            np.save(meta_features_path, meta_features, allow_pickle=False)\n            base_learner.job_status = 'finished'\n            base_learner.meta_features_exists = True\n            session.add(base_learner)\n            session.commit()\n\n        except:\n            session.rollback()\n            base_learner.job_status = 'errored'\n            base_learner.description['error_type'] = repr(sys.exc_info()[0])\n            base_learner.description['error_value'] = repr(sys.exc_info()[1])\n            base_learner.description['error_traceback'] = \\\n                traceback.format_exception(*sys.exc_info())\n            session.add(base_learner)\n            session.commit()\n            raise", "docstring": "Generates meta-features for specified base learner\n\nAfter generation of meta-features, the file is saved into the meta-features folder\n\nArgs:\npath (str): Path to Xcessiv notebook\n\nbase_learner_id (str): Base learner ID", "source": "juraj-google-style"}
{"code": "def use(plugin):\n    log.debug('register new plugin: {}'.format(plugin))\n    if inspect.isfunction(plugin):\n        return plugin(Engine)\n    if (plugin and hasattr(plugin, 'register')):\n        return plugin.register(Engine)\n    raise ValueError('invalid plugin: must be a function or implement register() method')", "docstring": "Register plugin in grappa.\n\n`plugin` argument can be a function or a object that implement `register`\nmethod, which should accept one argument: `grappa.Engine` instance.\n\nArguments:\nplugin (function|module): grappa plugin object to register.\n\nRaises:\nValueError: if `plugin` is not a valid interface.\n\nExample::\n\nimport grappa\n\nclass MyOperator(grappa.Operator):\npass\n\ndef my_plugin(engine):\nengine.register(MyOperator)\n\ngrappa.use(my_plugin)", "source": "codesearchnet"}
{"code": "def rvs(self, size=1):\n    return np.random.multivariate_normal(self.mean, self.cov, size)", "docstring": "Convenience method to sample from this distribution.\n\nArgs:\nsize (int or tuple): Shape of return value. Each element is drawn\nindependently from this distribution.", "source": "codesearchnet"}
{"code": "def _base_query(self, session):\n        \n        return session.query(ORMTargetMarker) \\\n            .filter(ORMTargetMarker.name == self.name) \\\n            .filter(ORMTargetMarker.params == self.params)", "docstring": "Base query for a target.\n\nArgs:\nsession: database session to query in", "source": "juraj-google-style"}
{"code": "def visualize_conv_weights(filters, name):\n    with tf.name_scope(('visualize_w_' + name)):\n        filters = tf.transpose(filters, (3, 2, 0, 1))\n        filters = tf.unstack(filters)\n        filters = tf.concat(filters, 1)\n        filters = tf.unstack(filters)\n        filters = tf.concat(filters, 1)\n        filters = tf.expand_dims(filters, 0)\n        filters = tf.expand_dims(filters, (- 1))\n    tf.summary.image(('visualize_w_' + name), filters)", "docstring": "Visualize use weights in convolution filters.\n\nArgs:\nfilters: tensor containing the weights [H,W,Cin,Cout]\nname: label for tensorboard\n\nReturns:\nimage of all weight", "source": "codesearchnet"}
{"code": "def merged(self, timeslots: 'TimeslotCollection') -> 'TimeslotCollection':\n        \n        slots = [Timeslot(slot.interval, slot.channel) for slot in self.timeslots]\n        slots.extend([Timeslot(slot.interval, slot.channel) for slot in timeslots.timeslots])\n        return TimeslotCollection(*slots)", "docstring": "Return a new TimeslotCollection merged with a specified `timeslots`\n\nArgs:\ntimeslots: TimeslotCollection to be merged", "source": "juraj-google-style"}
{"code": "def find_files(base_dir, extensions, exclude_dirs=list()):\n    \n    result = []\n    for root, dir_names, file_names in os.walk(base_dir):\n        for filename in file_names:\n            candidate = os.path.join(root, filename)\n            if should_include_file_in_search(candidate, extensions, exclude_dirs):\n                result.append(candidate)\n    return result", "docstring": "Find all files matching the given extensions.\n\nArgs:\nbase_dir (str): Path of base directory to search in.\nextensions (list): A list of file extensions to search for.\nexclude_dirs (list): A list of directories to exclude from search.\n\nReturns:\nlist of paths that match the search", "source": "juraj-google-style"}
{"code": "def delete_record(self, record):\n    self.children.remove(record.resource)\n    record.delete()", "docstring": "Remove a DNSRecord\n\nArgs:\nrecord (:obj:`DNSRecord`): :obj:`DNSRecord` to remove\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def mme_add(store, user_obj, case_obj, add_gender, add_features, add_disorders, genes_only, mme_base_url, mme_accepts, mme_token):\n    if ((not mme_base_url) or (not mme_accepts) or (not mme_token)):\n        return 'Please check that Matchmaker connection parameters are valid'\n    url = ''.join([mme_base_url, '/patient/add'])\n    features = []\n    disorders = []\n    g_features = []\n    contact_info = {'name': user_obj['name'], 'href': ''.join(['mailto:', user_obj['email']]), 'institution': 'Scout software user, Science For Life Laboratory, Stockholm, Sweden'}\n    if add_features:\n        features = hpo_terms(case_obj)\n    if add_disorders:\n        disorders = omim_terms(case_obj)\n    server_responses = []\n    submitted_info = {'contact': contact_info, 'sex': add_gender, 'features': features, 'disorders': disorders, 'genes_only': genes_only, 'patient_id': []}\n    for individual in case_obj.get('individuals'):\n        if (not (individual['phenotype'] in [2, 'affected'])):\n            continue\n        patient = {'contact': contact_info, 'id': '.'.join([case_obj['_id'], individual.get('individual_id')]), 'label': '.'.join([case_obj['display_name'], individual.get('display_name')]), 'features': features, 'disorders': disorders}\n        if add_gender:\n            if (individual['sex'] == '1'):\n                patient['sex'] = 'MALE'\n            else:\n                patient['sex'] = 'FEMALE'\n        if case_obj.get('suspects'):\n            g_features = genomic_features(store, case_obj, individual.get('display_name'), genes_only)\n            patient['genomicFeatures'] = g_features\n        resp = matchmaker_request(url=url, token=mme_token, method='POST', content_type=mme_accepts, accept='application/json', data={'patient': patient})\n        server_responses.append({'patient': patient, 'message': resp.get('message'), 'status_code': resp.get('status_code')})\n    submitted_info['server_responses'] = server_responses\n    return submitted_info", "docstring": "Add a patient to MatchMaker server\n\nArgs:\nstore(adapter.MongoAdapter)\nuser_obj(dict) a scout user object (to be added as matchmaker contact)\ncase_obj(dict) a scout case object\nadd_gender(bool) if True case gender will be included in matchmaker\nadd_features(bool) if True HPO features will be included in matchmaker\nadd_disorders(bool) if True OMIM diagnoses will be included in matchmaker\ngenes_only(bool) if True only genes and not variants will be shared\nmme_base_url(str) base url of the MME server\nmme_accepts(str) request content accepted by MME server\nmme_token(str) auth token of the MME server\n\nReturns:\nsubmitted_info(dict) info submitted to MatchMaker and its responses", "source": "codesearchnet"}
{"code": "def _generate_entry(self, vm):\n        \n        return \\\n            '{name} ' \\\n            'ansible_host={ip} ' \\\n            'ansible_ssh_private_key_file={key}'.format(\n                name=vm.name(),\n                ip=vm.ip(),\n                key=self.prefix.paths.ssh_id_rsa()\n            )", "docstring": "Generate host entry for the given VM\nArgs:\nvm (lago.plugins.vm.VMPlugin): The VM for which the entry\nshould be created for.\n\nReturns:\nstr: An entry for vm", "source": "juraj-google-style"}
{"code": "def _UpdateAndMigrateUnmerged(self, not_merged_stops, zone_map, merge_map, schedule):\n    for (stop, migrated_stop) in not_merged_stops:\n        if (stop.zone_id in zone_map):\n            migrated_stop.zone_id = zone_map[stop.zone_id]\n        else:\n            migrated_stop.zone_id = self.feed_merger.GenerateId(stop.zone_id)\n            zone_map[stop.zone_id] = migrated_stop.zone_id\n        if stop.parent_station:\n            parent_original = schedule.GetStop(stop.parent_station)\n            migrated_stop.parent_station = merge_map[parent_original].stop_id\n        self.feed_merger.merged_schedule.AddStopObject(migrated_stop)", "docstring": "Correct references in migrated unmerged stops and add to merged_schedule.\n\nFor stops migrated from one of the input feeds to the output feed update the\nparent_station and zone_id references to point to objects in the output\nfeed. Then add the migrated stop to the new schedule.\n\nArgs:\nnot_merged_stops: list of stops from one input feed that have not been\nmerged\nzone_map: map from zone_id in the input feed to zone_id in the output feed\nmerge_map: map from Stop objects in the input feed to Stop objects in\nthe output feed\nschedule: the input Schedule object", "source": "codesearchnet"}
{"code": "def _FormatDateTime(self, event):\n    \n    try:\n      datetime_object = datetime.datetime(\n          1970, 1, 1, 0, 0, 0, 0, tzinfo=pytz.UTC)\n      datetime_object += datetime.timedelta(microseconds=event.timestamp)\n      datetime_object.astimezone(self._output_mediator.timezone)\n\n      return datetime_object.replace(tzinfo=None)\n\n    except (OverflowError, ValueError) as exception:\n      self._ReportEventError(event, (\n          'unable to copy timestamp: {0!s} to a human readable date and time '\n          'with error: {1!s}. Defaulting to: \"ERROR\"').format(\n              event.timestamp, exception))\n      return 'ERROR'", "docstring": "Formats the date to a datetime object without timezone information.\n\nNote: timezone information must be removed due to lack of support\nby xlsxwriter and Excel.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\ndatetime.datetime|str: date and time value or a string containing\n\"ERROR\" on OverflowError.", "source": "juraj-google-style"}
{"code": "def cuts_connections(self, a, b):\n        \n        n = max(self.indices) + 1\n        return self.cut_matrix(n)[np.ix_(a, b)].any()", "docstring": "Check if this cut severs any connections from ``a`` to ``b``.\n\nArgs:\na (tuple[int]): A set of nodes.\nb (tuple[int]): A set of nodes.", "source": "juraj-google-style"}
{"code": "def __init__(self, address, ap, data):\n        \n        super(WriteRequest, self).__init__(address=address, ap=ap, data=data)", "docstring": "Initializes the base class.\n\nArgs:\nself (WriteRequest): the ``WriteRequest`` instance\naddress (int): the register index\nap (bool): ``True`` if this request is to an Access Port Access\nRegister, otherwise ``False`` for a Debug Port Access Register\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def fetch_committed_offsets(self, partitions):\n        \n        if not partitions:\n            return {}\n\n        while True:\n            self.ensure_coordinator_ready()\n\n            \n            future = self._send_offset_fetch_request(partitions)\n            self._client.poll(future=future)\n\n            if future.succeeded():\n                return future.value\n\n            if not future.retriable():\n                raise future.exception \n\n            time.sleep(self.config['retry_backoff_ms'] / 1000)", "docstring": "Fetch the current committed offsets for specified partitions\n\nArguments:\npartitions (list of TopicPartition): partitions to fetch\n\nReturns:\ndict: {TopicPartition: OffsetAndMetadata}", "source": "juraj-google-style"}
{"code": "def weeks(value: Union[int, float]) -> Duration:\n    return float(value * 60 * 60 * 24 * 7)", "docstring": "Converts input value from number of weeks to a `Duration` in seconds.\n\n```python\n>>> a = tp.event_set(\n...    # Dates are converted to unix timestamps\n...    timestamps=[\"2020-01-01\", \"2020-01-07\", \"2020-01-31\"],\n...    features={\"f1\": [1, 5, -5]}\n... )\n\n>>> a.moving_sum(window_length=tp.duration.weeks(2))\nindexes: ...\ntimestamps: ['2020-01-01T00:00:00' '2020-01-07T00:00:00'\n'2020-01-31T00:00:00']\n'f1': [ 1 6 -5]\n...\n\n```\n\nArgs:\nvalue: Number of weeks.\n\nReturns:\nEquivalent number of seconds.", "source": "github-repos"}
{"code": "def delete(self, paths):\n    results = s3io.S3IO(options=self._options).delete_paths(paths)\n    exceptions = {path: error for path, error in results.items() if error is not None}\n    if exceptions:\n        raise BeamIOError('Delete operation failed', exceptions)", "docstring": "Deletes files or directories at the provided paths.\nDirectories will be deleted recursively.\n\nArgs:\npaths: list of paths that give the file objects to be deleted", "source": "github-repos"}
{"code": "def GetSshkeyMap(self, since=None):\n    return SshkeyUpdateGetter().GetUpdates(self, self.conf['sshkey_url'], since)", "docstring": "Return the sshkey map from this source.\n\nArgs:\nsince: Get data only changed since this timestamp (inclusive) or None\nfor all data.\n\nReturns:\ninstance of sshkey.SshkeyMap", "source": "github-repos"}
{"code": "def TSKVolumeGetBytesPerSector(tsk_volume):\n  \n  \n  \n  \n  if hasattr(tsk_volume, 'info') and tsk_volume.info is not None:\n    block_size = getattr(tsk_volume.info, 'block_size', 512)\n  else:\n    block_size = 512\n\n  return block_size", "docstring": "Retrieves the number of bytes per sector from a TSK volume object.\n\nArgs:\ntsk_volume (pytsk3.Volume_Info): TSK volume information.\n\nReturns:\nint: number of bytes per sector or 512 by default.", "source": "juraj-google-style"}
{"code": "def remove_comp_items(self, context_word, comp_items):\n    if context_word not in self._comp_dict:\n        raise KeyError('Context word \"%s\" has not been registered' % context_word)\n    for item in comp_items:\n        self._comp_dict[context_word].remove(item)", "docstring": "Remove a list of completion items from a completion context.\n\nArgs:\ncontext_word: A single completion word as a string. The removal will\nalso apply to all other context words of the same context.\ncomp_items: Completion items to remove.\n\nRaises:\nKeyError: if the context word has not been registered.", "source": "github-repos"}
{"code": "def _make_pr_entry(self, step, wall_time, data_array, thresholds):\n    \n    \n    \n    true_positives = [int(v) for v in data_array[metadata.TRUE_POSITIVES_INDEX]]\n    false_positives = [\n        int(v) for v in data_array[metadata.FALSE_POSITIVES_INDEX]]\n    tp_index = metadata.TRUE_POSITIVES_INDEX\n    fp_index = metadata.FALSE_POSITIVES_INDEX\n    positives = data_array[[tp_index, fp_index], :].astype(int).sum(axis=0)\n    end_index_inclusive = len(positives) - 1\n    while end_index_inclusive > 0 and positives[end_index_inclusive] == 0:\n      end_index_inclusive -= 1\n    end_index = end_index_inclusive + 1\n\n    return {\n        'wall_time': wall_time,\n        'step': step,\n        'precision': data_array[metadata.PRECISION_INDEX, :end_index].tolist(),\n        'recall': data_array[metadata.RECALL_INDEX, :end_index].tolist(),\n        'true_positives': true_positives[:end_index],\n        'false_positives': false_positives[:end_index],\n        'true_negatives':\n            [int(v) for v in\n             data_array[metadata.TRUE_NEGATIVES_INDEX][:end_index]],\n        'false_negatives':\n            [int(v) for v in\n             data_array[metadata.FALSE_NEGATIVES_INDEX][:end_index]],\n        'thresholds': thresholds[:end_index],\n    }", "docstring": "Creates an entry for PR curve data. Each entry corresponds to 1 step.\n\nArgs:\nstep: The step.\nwall_time: The wall time.\ndata_array: A numpy array of PR curve data stored in the summary format.\nthresholds: An array of floating point thresholds.\n\nReturns:\nA PR curve entry.", "source": "juraj-google-style"}
{"code": "def __init__(self, graph, title=\"GraphViewer\", handler=None, padding=PADDING):\n        \n        title = self._make_unique_title(title)\n\n        idaapi.GraphViewer.__init__(self, title)\n\n        self._graph = graph\n\n        if handler is None:\n            handler = self.DEFAULT_HANDLER\n\n        \n        \n        \n        if not isinstance(handler, BasicNodeHandler):\n            raise TypeError(\"Node handler must inherit from `BasicNodeHandler`.\")\n\n        self._default_handler = handler\n        self._padding = padding", "docstring": "Initialize the graph viewer.\n\nTo avoid bizarre IDA errors (crashing when creating 2 graphs with the same title,)\na counter is appended to the title (similar to \"Hex View-1\".)\n\nArgs:\ngraph: A NetworkX graph to display.\ntitle: The graph title.\nhandler: The default node handler to use when accessing node data.", "source": "juraj-google-style"}
{"code": "def swd_write(self, output, value, nbits):\n        \n        pDir = binpacker.pack(output, nbits)\n        pIn = binpacker.pack(value, nbits)\n        bitpos = self._dll.JLINK_SWD_StoreRaw(pDir, pIn, nbits)\n        if bitpos < 0:\n            raise errors.JLinkException(bitpos)\n\n        return bitpos", "docstring": "Writes bytes over SWD (Serial Wire Debug).\n\nArgs:\nself (JLink): the ``JLink`` instance\noutput (int): the output buffer offset to write to\nvalue (int): the value to write to the output buffer\nnbits (int): the number of bits needed to represent the ``output`` and\n``value``\n\nReturns:\nThe bit position of the response in the input buffer.", "source": "juraj-google-style"}
{"code": "def PrivateKeyFromWIF(wif):\n        \n        if wif is None or len(wif) is not 52:\n            raise ValueError('Please provide a wif with a length of 52 bytes (LEN: {0:d})'.format(len(wif)))\n\n        data = base58.b58decode(wif)\n\n        length = len(data)\n\n        if length is not 38 or data[0] is not 0x80 or data[33] is not 0x01:\n            raise ValueError(\"Invalid format!\")\n\n        checksum = Crypto.Hash256(data[0:34])[0:4]\n\n        if checksum != data[34:]:\n            raise ValueError(\"Invalid WIF Checksum!\")\n\n        return data[1:33]", "docstring": "Get the private key from a WIF key\n\nArgs:\nwif (str): The wif key\n\nReturns:\nbytes: The private key", "source": "juraj-google-style"}
{"code": "def indent_xml(elem, level=0, more_sibs=False):\n    \n    i = \"\\n\"\n    pad = \"    \"\n    if level:\n        i += (level - 1) * pad\n    num_kids = len(elem)\n    if num_kids:\n        if not elem.text or not elem.text.strip():\n            elem.text = i + pad\n            if level:\n                elem.text += pad\n        count = 0\n        for kid in elem:\n            if kid.tag == \"data\":\n                kid.text = \"*DATA*\"\n            indent_xml(kid, level + 1, count < num_kids - 1)\n            count += 1\n        if not elem.tail or not elem.tail.strip():\n            elem.tail = i\n            if more_sibs:\n                elem.tail += pad\n    else:\n        if level and (not elem.tail or not elem.tail.strip()):\n            elem.tail = i\n            if more_sibs:\n                elem.tail += pad", "docstring": "Indent an xml element object to prepare for pretty printing.\n\nTo avoid changing the contents of the original Element, it is\nrecommended that a copy is made to send to this function.\n\nArgs:\nelem: Element to indent.\nlevel: Int indent level (default is 0)\nmore_sibs: Bool, whether to anticipate further siblings.", "source": "juraj-google-style"}
{"code": "def create_from_binary(cls, binary_view):\n    nw_obj = cls()\n    offset = 0\n    previous_dr_offset = 0\n    header_size = cls._INFO.size\n    while (binary_view[offset] != 0):\n        header = cls._INFO.unpack(binary_view[offset:(offset + header_size)])[0]\n        length_len = (header & 15)\n        length_offset = ((header & 240) >> 4)\n        temp_len = ((offset + header_size) + length_len)\n        dr_length = int.from_bytes(binary_view[(offset + header_size):temp_len], 'little', signed=False)\n        if length_offset:\n            dr_offset = (int.from_bytes(binary_view[temp_len:(temp_len + length_offset)], 'little', signed=True) + previous_dr_offset)\n            previous_dr_offset = dr_offset\n        else:\n            dr_offset = None\n        offset += ((header_size + length_len) + length_offset)\n        nw_obj.data_runs.append((dr_length, dr_offset))\n    _MOD_LOGGER.debug('DataRuns object created successfully')\n    return nw_obj", "docstring": "Creates a new object DataRuns from a binary stream. The binary\nstream can be represented by a byte string, bytearray or a memoryview of the\nbytearray.\n\nArgs:\nbinary_view (memoryview of bytearray) - A binary stream with the\ninformation of the attribute\n\nReturns:\nDataRuns: New object using hte binary stream as source", "source": "codesearchnet"}
{"code": "def convert_old_keys_to_new_keys(state_dict_keys: Optional[dict]=None):\n    output_dict = {}\n    if state_dict_keys is not None:\n        old_text = '\\n'.join(state_dict_keys)\n        new_text = old_text\n        for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():\n            if replacement is None:\n                new_text = re.sub(pattern, '', new_text)\n                continue\n            new_text = re.sub(pattern, replacement, new_text)\n        output_dict = dict(zip(old_text.split('\\n'), new_text.split('\\n')))\n    return output_dict", "docstring": "Converts old keys to new keys using the mapping and dynamically removes the 'ijepa.' prefix if necessary.\n\nArgs:\nstate_dict_keys (dict): The keys from the state_dict to convert.\n\nReturns:\ndict: A mapping from old keys to new keys.", "source": "github-repos"}
{"code": "def terminate_session(self, token):\n        \n\n        url = self.rest_url + \"/session/%s\" % token\n        response = self._delete(url)\n\n        \n        \n        if not response.ok:\n            return None\n\n        \n        return True", "docstring": "Terminates the session token, effectively logging out the user\nfrom all crowd-enabled services.\n\nArgs:\ntoken: The session token.\n\nReturns:\nTrue: If session terminated\n\nNone: If session termination failed", "source": "juraj-google-style"}
{"code": "def write_file(self, filename='HEADER'):\n    with open(filename, 'w') as f:\n        f.write((str(self) + '\\n'))", "docstring": "Writes Header into filename on disk.\n\nArgs:\nfilename: Filename and path for file to be written to disk", "source": "codesearchnet"}
{"code": "def update_plot_limits(ax, white_space):\n    if hasattr(ax, 'zz_dataLim'):\n        bounds = ax.xy_dataLim.bounds\n        ax.set_xlim((bounds[0] - white_space), ((bounds[0] + bounds[2]) + white_space))\n        ax.set_ylim((bounds[1] - white_space), ((bounds[1] + bounds[3]) + white_space))\n        bounds = ax.zz_dataLim.bounds\n        ax.set_zlim((bounds[0] - white_space), ((bounds[0] + bounds[2]) + white_space))\n    else:\n        bounds = ax.dataLim.bounds\n        assert (not any(map(np.isinf, bounds))), 'Cannot set bounds if dataLim has infinite elements'\n        ax.set_xlim((bounds[0] - white_space), ((bounds[0] + bounds[2]) + white_space))\n        ax.set_ylim((bounds[1] - white_space), ((bounds[1] + bounds[3]) + white_space))", "docstring": "Sets the limit options of a matplotlib plot.\n\nArgs:\nax: matplotlib axes\nwhite_space(float): whitespace added to surround the tight limit of the data\n\nNote: This relies on ax.dataLim (in 2d) and ax.[xy, zz]_dataLim being set in 3d", "source": "codesearchnet"}
{"code": "def clusters_sites_obj(clusters):\n    \n    result = {}\n    all_clusters = get_all_clusters_sites()\n    clusters_sites = {c: s for (c, s) in all_clusters.items()\n                        if c in clusters}\n    for cluster, site in clusters_sites.items():\n\n        \n        result.update({cluster: get_site_obj(site)})\n    return result", "docstring": "Get all the corresponding sites of the passed clusters.\n\nArgs:\nclusters(list): list of string uid of sites (e.g 'rennes')\n\nReturn:\ndict corresponding to the mapping cluster uid to python-grid5000 site", "source": "juraj-google-style"}
{"code": "def _SkipField(tokenizer):\n  \n  if tokenizer.TryConsume('['):\n    \n    tokenizer.ConsumeIdentifier()\n    while tokenizer.TryConsume('.'):\n      tokenizer.ConsumeIdentifier()\n    tokenizer.Consume(']')\n  else:\n    tokenizer.ConsumeIdentifier()\n\n  _SkipFieldContents(tokenizer)\n\n  \n  \n  if not tokenizer.TryConsume(','):\n    tokenizer.TryConsume(';')", "docstring": "Skips over a complete field (name and value/message).\n\nArgs:\ntokenizer: A tokenizer to parse the field name and values.", "source": "juraj-google-style"}
{"code": "def remove(self):\n    if (not self._is_item):\n        raise TypeError(\"Should be called on an item, not ListNode's itself.\")\n    self.container.node_stack.remove(self)", "docstring": "Removes an item from ListNode.\n\nRaises:\nTypeError: If it's called on container ListNode (intstead of ListNode's item)\n\nNote:\nParent object should be explicitly saved.", "source": "codesearchnet"}
{"code": "def autogen_argparse_block(extra_args=[]):\n    \n    \n    \n    \n\n    grouped_args = []\n    \n    for argtup in __REGISTERED_ARGS__:\n        argstr_list, type_, default, help_ = argtup\n        argstr_set = set(argstr_list)\n        \n        \n        found = False\n        for index, (keyset, vals) in enumerate(grouped_args):\n            if len(keyset.intersection(argstr_set)) > 0:\n                \n                keyset.update(argstr_set)\n                vals.append(argtup)\n                found = True\n                break\n        if not found:\n            new_keyset = argstr_set\n            new_vals = [argtup]\n            grouped_args.append((new_keyset, new_vals))\n        \n    \n    multi_groups = []\n    for keyset, vals in grouped_args:\n        if len(vals) > 1:\n            multi_groups.append(vals)\n    if len(multi_groups) > 0:\n        import utool as ut\n        print('Following arg was specified multiple times')\n        print(ut.repr4(multi_groups, newlines=2))", "docstring": "SHOULD TURN ANY REGISTERED ARGS INTO A A NEW PARSING CONFIG\nFILE FOR BETTER --help COMMANDS\n\nimport utool as ut\n__REGISTERED_ARGS__ = ut.util_arg.__REGISTERED_ARGS__\n\nArgs:\nextra_args (list): (default = [])\n\nCommandLine:\npython -m utool.util_arg --test-autogen_argparse_block\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> import utool as ut\n>>> extra_args = []\n>>> result = ut.autogen_argparse_block(extra_args)\n>>> print(result)", "source": "juraj-google-style"}
{"code": "def remove_keywords_from_dict(self, keyword_dict):\n    for (clean_name, keywords) in keyword_dict.items():\n        if (not isinstance(keywords, list)):\n            raise AttributeError('Value of key {} should be a list'.format(clean_name))\n        for keyword in keywords:\n            self.remove_keyword(keyword)", "docstring": "To remove keywords from a dictionary\n\nArgs:\nkeyword_dict (dict): A dictionary with `str` key and (list `str`) as value\n\nExamples:\n>>> keyword_dict = {\n\"java\": [\"java_2e\", \"java programing\"],\n\"product management\": [\"PM\", \"product manager\"]\n}\n>>> keyword_processor.remove_keywords_from_dict(keyword_dict)\n\nRaises:\nAttributeError: If value for a key in `keyword_dict` is not a list.", "source": "codesearchnet"}
{"code": "def add_presence_listener(self, callback):\n        \n        listener_uid = uuid4()\n        self.presence_listeners[listener_uid] = callback\n        return listener_uid", "docstring": "Add a presence listener that will send a callback when the client receives\na presence update.\n\nArgs:\ncallback (func(roomchunk)): Callback called when a presence update arrives.\n\nReturns:\nuuid.UUID: Unique id of the listener, can be used to identify the listener.", "source": "juraj-google-style"}
{"code": "def _load_audio_list(self, path):\n    result = {}\n    for entry in textfile.read_separated_lines_generator(path, separator='\\t', max_columns=4):\n        for i in range(len(entry)):\n            if (entry[i] == '\\\\N'):\n                entry[i] = None\n        if (len(entry) < 4):\n            entry.extend(([None] * (4 - len(entry))))\n        if ((not self.include_empty_licence) and (entry[2] is None)):\n            continue\n        if ((self.include_licenses is not None) and (entry[2] not in self.include_licenses)):\n            continue\n        result[entry[0]] = entry[1:]\n    return result", "docstring": "Load and filter the audio list.\n\nArgs:\npath (str): Path to the audio list file.\n\nReturns:\ndict: Dictionary of filtered sentences (id : username, license, attribution-url)", "source": "codesearchnet"}
{"code": "def create_object(self, obj_type, payload, return_fields=None):\n        \n        self._validate_obj_type_or_die(obj_type)\n\n        query_params = self._build_query_params(return_fields=return_fields)\n\n        url = self._construct_url(obj_type, query_params)\n        opts = self._get_request_options(data=payload)\n        self._log_request('post', url, opts)\n        if(self.session.cookies):\n            \n            \n            self.session.auth = None\n        r = self.session.post(url, **opts)\n\n        self._validate_authorized(r)\n\n        if r.status_code != requests.codes.CREATED:\n            response = utils.safe_json_load(r.content)\n            already_assigned = 'is assigned to another network view'\n            if response and already_assigned in response.get('text'):\n                exception = ib_ex.InfobloxMemberAlreadyAssigned\n            else:\n                exception = ib_ex.InfobloxCannotCreateObject\n            raise exception(\n                response=response,\n                obj_type=obj_type,\n                content=r.content,\n                args=payload,\n                code=r.status_code)\n\n        return self._parse_reply(r)", "docstring": "Create an Infoblox object of type 'obj_type'\n\nArgs:\nobj_type        (str): Infoblox object type,\ne.g. 'network', 'range', etc.\npayload       (dict): Payload with data to send\nreturn_fields (list): List of fields to be returned\nReturns:\nThe object reference of the newly create object\nRaises:\nInfobloxException", "source": "juraj-google-style"}
{"code": "def __init__(self, in_features: int, out_features: int, kernel_size: int=3, padding: int=1):\n    super().__init__()\n    self.layers = [nn.Conv2d(in_features, out_features, kernel_size=kernel_size, padding=padding, bias=False), nn.GroupNorm(32, out_features), nn.ReLU(inplace=True)]\n    for i, layer in enumerate(self.layers):\n        self.add_module(str(i), layer)", "docstring": "A basic module that executes conv - norm - in sequence used in MaskFormer.\n\nArgs:\nin_features (`int`):\nThe number of input features (channels).\nout_features (`int`):\nThe number of outputs features (channels).", "source": "github-repos"}
{"code": "def _GenerateStatsTable(self, feed_merger):\n    \n    rows = []\n    rows.append('<tr><th class=\"header\"/><th class=\"header\">Merged</th>'\n                '<th class=\"header\">Copied from old feed</th>'\n                '<th class=\"header\">Copied from new feed</th></tr>')\n    for merger in feed_merger.GetMergerList():\n      stats = merger.GetMergeStats()\n      if stats is None:\n        continue\n      merged, not_merged_a, not_merged_b = stats\n      rows.append('<tr><th class=\"header\">%s</th>'\n                  '<td class=\"header\">%d</td>'\n                  '<td class=\"header\">%d</td>'\n                  '<td class=\"header\">%d</td></tr>' %\n                  (merger.DATASET_NAME, merged, not_merged_a, not_merged_b))\n    return '<table>%s</table>' % '\\n'.join(rows)", "docstring": "Generate an HTML table of merge statistics.\n\nArgs:\nfeed_merger: The FeedMerger instance.\n\nReturns:\nThe generated HTML as a string.", "source": "juraj-google-style"}
{"code": "def verify_state(global_state_db, blockstore, bind_component, scheduler_type):\n    state_view_factory = StateViewFactory(global_state_db)\n    (start_block, prev_state_root) = search_for_present_state_root(blockstore, state_view_factory)\n    if (start_block is None):\n        LOGGER.info(\"Skipping state verification: chain head's state root is present\")\n        return\n    LOGGER.info('Recomputing missing state from block %s with %s scheduler', start_block, scheduler_type)\n    component_thread_pool = InstrumentedThreadPoolExecutor(max_workers=10, name='Component')\n    component_dispatcher = Dispatcher()\n    component_service = Interconnect(bind_component, component_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10)\n    context_manager = ContextManager(global_state_db)\n    transaction_executor = TransactionExecutor(service=component_service, context_manager=context_manager, settings_view_factory=SettingsViewFactory(state_view_factory), scheduler_type=scheduler_type, invalid_observers=[])\n    component_service.set_check_connections(transaction_executor.check_connections)\n    component_dispatcher.add_handler(validator_pb2.Message.TP_RECEIPT_ADD_DATA_REQUEST, tp_state_handlers.TpReceiptAddDataHandler(context_manager), component_thread_pool)\n    component_dispatcher.add_handler(validator_pb2.Message.TP_EVENT_ADD_REQUEST, tp_state_handlers.TpEventAddHandler(context_manager), component_thread_pool)\n    component_dispatcher.add_handler(validator_pb2.Message.TP_STATE_DELETE_REQUEST, tp_state_handlers.TpStateDeleteHandler(context_manager), component_thread_pool)\n    component_dispatcher.add_handler(validator_pb2.Message.TP_STATE_GET_REQUEST, tp_state_handlers.TpStateGetHandler(context_manager), component_thread_pool)\n    component_dispatcher.add_handler(validator_pb2.Message.TP_STATE_SET_REQUEST, tp_state_handlers.TpStateSetHandler(context_manager), component_thread_pool)\n    component_dispatcher.add_handler(validator_pb2.Message.TP_REGISTER_REQUEST, processor_handlers.ProcessorRegisterHandler(transaction_executor.processor_manager), component_thread_pool)\n    component_dispatcher.add_handler(validator_pb2.Message.TP_UNREGISTER_REQUEST, processor_handlers.ProcessorUnRegisterHandler(transaction_executor.processor_manager), component_thread_pool)\n    component_dispatcher.start()\n    component_service.start()\n    process_blocks(initial_state_root=prev_state_root, blocks=blockstore.get_block_iter(start_block=start_block, reverse=False), transaction_executor=transaction_executor, context_manager=context_manager, state_view_factory=state_view_factory)\n    component_dispatcher.stop()\n    component_service.stop()\n    component_thread_pool.shutdown(wait=True)\n    transaction_executor.stop()\n    context_manager.stop()", "docstring": "Verify the state root hash of all blocks is in state and if not,\nreconstruct the missing state. Assumes that there are no \"holes\" in\nstate, ie starting from genesis, state is present for all blocks up to some\npoint and then not at all. If persist is False, this recomputes state in\nmemory for all blocks in the blockstore and verifies the state root\nhashes.\n\nRaises:\nInvalidChainError: The chain in the blockstore is not valid.\nExecutionError: An unrecoverable error was encountered during batch\nexecution.", "source": "codesearchnet"}
{"code": "def build_query_string(self, data):\n        \n        query = []\n        keys_to_be_removed = []\n        for key, value in data.items():\n            if key not in ['version', 'restApi', 'resourcePath']:\n                if not key == 'method':\n                    if key == 'points':\n                        value = ','.join(str(val) for val in value)\n                        keys_to_be_removed.append(key)\n                    query.append('{0}={1}'.format(key, value))\n                    keys_to_be_removed.append(key)\n                keys_to_be_removed.append(key)\n        querystring = '&'.join(query)\n        data['query'] = '{0}?{1}'.format(data['method'], querystring)\n        for k in list(set(keys_to_be_removed)):\n            del data[k]\n        return data", "docstring": "This method occurs after dumping the data into the class.\n\nArgs:\ndata (dict): dictionary of all the query values\n\nReturns:\ndata (dict): ordered dict of all the values", "source": "juraj-google-style"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(KeyWrappingData, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = BytearrayStream(input_stream.read(self.length))\n    if self.is_tag_next(enums.Tags.WRAPPING_METHOD, local_stream):\n        self._wrapping_method = primitives.Enumeration(enum=enums.WrappingMethod, tag=enums.Tags.WRAPPING_METHOD)\n        self._wrapping_method.read(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Invalid struct missing the wrapping method attribute.')\n    if self.is_tag_next(enums.Tags.ENCRYPTION_KEY_INFORMATION, local_stream):\n        self._encryption_key_information = EncryptionKeyInformation()\n        self._encryption_key_information.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.MAC_SIGNATURE_KEY_INFORMATION, local_stream):\n        self._mac_signature_key_information = MACSignatureKeyInformation()\n        self._mac_signature_key_information.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.MAC_SIGNATURE, local_stream):\n        self._mac_signature = primitives.ByteString(tag=enums.Tags.MAC_SIGNATURE)\n        self._mac_signature.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.IV_COUNTER_NONCE, local_stream):\n        self._iv_counter_nonce = primitives.ByteString(tag=enums.Tags.IV_COUNTER_NONCE)\n        self._iv_counter_nonce.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.ENCODING_OPTION, local_stream):\n        self._encoding_option = primitives.Enumeration(enum=enums.EncodingOption, tag=enums.Tags.ENCODING_OPTION)\n        self._encoding_option.read(local_stream, kmip_version=kmip_version)\n    self.is_oversized(local_stream)", "docstring": "Read the data encoding the KeyWrappingData struct and decode it into\nits constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def qualNormGaussian(data, qualitative):\n    \n    genes, cells = data.shape\n    clusters = qualitative.shape[1]\n    output = np.zeros((genes, clusters))\n    missing_indices = []\n    qual_indices = []\n    for i in range(genes):\n        if qualitative[i,:].max() == -1 and qualitative[i,:].min() == -1:\n            missing_indices.append(i)\n            continue\n        qual_indices.append(i)\n        threshold = (qualitative[i,:].max() - qualitative[i,:].min())/2.0\n        kmeans = KMeans(n_clusters = 2).fit(data[i,:].reshape((1, cells)))\n        assignments = kmeans.labels_\n        means = kmeans.cluster_centers_\n        high_mean = means.max()\n        low_mean = means.min()\n        for k in range(clusters):\n            if qualitative[i,k]>threshold:\n                output[i,k] = high_mean\n            else:\n                output[i,k] = low_mean\n    if missing_indices:\n        \n        M_init = output[qual_indices, :]\n        kmeans = KMeans(n_clusters = 2, init = M_init, max_iter = 1).fit(data[qual_indices, :])\n        assignments = kmeans.labels_\n        \n        for ind in missing_indices:\n            for k in range(clusters):\n                output[ind, k] = np.mean(data[ind, assignments==k])\n    \n    return output", "docstring": "Generates starting points using binarized data. If qualitative data is missing for a given gene, all of its entries should be -1 in the qualitative matrix.\n\nArgs:\ndata (array): 2d array of genes x cells\nqualitative (array): 2d array of numerical data - genes x clusters\n\nReturns:\nArray of starting positions for state estimation or\nclustering, with shape genes x clusters", "source": "juraj-google-style"}
{"code": "def _compile_aggregation_expression(self, expr: Expression, scope: Dict[(str, TensorFluent)], batch_size: Optional[int]=None, noise: Optional[List[tf.Tensor]]=None) -> TensorFluent:\n    etype = expr.etype\n    args = expr.args\n    typed_var_list = args[:(- 1)]\n    vars_list = [var for (_, (var, _)) in typed_var_list]\n    expr = args[(- 1)]\n    x = self._compile_expression(expr, scope)\n    etype2aggr = {'sum': x.sum, 'prod': x.prod, 'avg': x.avg, 'maximum': x.maximum, 'minimum': x.minimum, 'exists': x.exists, 'forall': x.forall}\n    if (etype[1] not in etype2aggr):\n        raise ValueError('Invalid aggregation expression {}.'.format(expr))\n    aggr = etype2aggr[etype[1]]\n    fluent = aggr(vars_list=vars_list)\n    return fluent", "docstring": "Compile an aggregation expression `expr` into a TensorFluent\nin the given `scope` with optional batch size.\n\nArgs:\nexpr (:obj:`rddl2tf.expr.Expression`): A RDDL aggregation expression.\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.\nbatch_size (Optional[size]): The batch size.\n\nReturns:\n:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.", "source": "codesearchnet"}
{"code": "def __sub__(self, other: Union[None, int, str, 'KeyPath']) -> 'KeyPath':\n    if other is None:\n        return self\n    if isinstance(other, str):\n        other = KeyPath.parse(other)\n    elif isinstance(other, int):\n        other = KeyPath(other)\n    if not isinstance(other, KeyPath):\n        raise TypeError(f'Cannot subtract KeyPath({self}) by {other!r}.')\n    max_len = max(len(self), len(other))\n    for pos in range(max_len):\n        if pos >= len(self):\n            raise ValueError(f'KeyPath subtraction failed: left path {self!r} is an ancestor of right path {other!r}.')\n        if pos >= len(other):\n            return KeyPath(self.keys[pos:])\n        if self.keys[pos] != other.keys[pos]:\n            raise ValueError(f'KeyPath subtraction failed: left path {self!r} and right path {other!r} are in different subtree.')\n    return KeyPath()", "docstring": "Finds the relative path of this path to the other.\n\nExample::\n\npath1 = pg.KeyPath.parse('a.b.c.d')\npath2 = pg.KeyPath.parse('a.b')\nassert path1 - path2 == 'c.d'\n\nArgs:\nother: Object to subtract, which can be None, int (as a depth-1 KeyPath),\nstring (parsed as a KeyPath) or a KeyPath object.\n\nReturns:\nRelative path of this path to the other.\n\nRaises:\nValueError: This path is an ancestor node of the other path,\nor these two paths are in different branch.", "source": "github-repos"}
{"code": "def delete(self, messageId):\n        \n        check_type(messageId, basestring, may_be_none=False)\n\n        \n        self._session.delete(API_ENDPOINT + '/' + messageId)", "docstring": "Delete a message.\n\nArgs:\nmessageId(basestring): The ID of the message to be deleted.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "juraj-google-style"}
{"code": "def _should_recover(self, exception):\n    exception = _maybe_wrap_exception(exception)\n    if isinstance(exception, _RETRYABLE_STREAM_ERRORS):\n        _LOGGER.info('Observed recoverable stream error %s', exception)\n        return True\n    _LOGGER.info('Observed non-recoverable stream error %s', exception)\n    return False", "docstring": "Determine if an error on the RPC stream should be recovered.\n\nIf the exception is one of the retryable exceptions, this will signal\nto the consumer thread that it should \"recover\" from the failure.\n\nThis will cause the stream to exit when it returns :data:`False`.\n\nReturns:\nbool: Indicates if the caller should recover or shut down.\nWill be :data:`True` if the ``exception`` is \"acceptable\", i.e.\nin a list of retryable / idempotent exceptions.", "source": "codesearchnet"}
{"code": "def _flush_range(self, buffer, start, end):\n        \n        \n        with self._size_lock:\n            if not self._size_synched:\n                self._size_synched = True\n                try:\n                    self._size = self.raw._size\n                except (ObjectNotFoundError, UnsupportedOperation):\n                    self._size = 0\n\n        \n        \n        \n        while start > self._size:\n            sleep(self._FLUSH_WAIT)\n\n        \n        self._raw_flush(buffer, start, end)", "docstring": "Flush a buffer to a range of the file.\n\nMeant to be used asynchronously, used to provides parallel flushing of\nfile parts when applicable.\n\nArgs:\nbuffer (memoryview): Buffer content.\nstart (int): Start of buffer position to flush.\nend (int): End of buffer position to flush.", "source": "juraj-google-style"}
{"code": "def get_rect(self):\n    if self.handle:\n        (left, top, right, bottom) = win32gui.GetWindowRect(self.handle)\n        return RECT(left, top, right, bottom)\n    else:\n        desktop = win32gui.GetDesktopWindow()\n        (left, top, right, bottom) = win32gui.GetWindowRect(desktop)\n        return RECT(left, top, right, bottom)", "docstring": "Get rectangle of app or desktop resolution\n\nReturns:\nRECT(left, top, right, bottom)", "source": "codesearchnet"}
{"code": "def glob(*args):\n    if ((len(args) is 1) and isinstance(args[0], list)):\n        args = args[0]\n    matches = []\n    for pattern in args:\n        for item in glob2.glob(pattern):\n            if (not os.path.isdir(item)):\n                matches.append(item)\n    return matches", "docstring": "Returns list of paths matching one or more wildcard patterns.\n\nArgs:\ninclude_dirs: Include directories in the output", "source": "codesearchnet"}
{"code": "def forward(self, outputs, targets):\n    outputs_without_aux = {k: v for k, v in outputs.items() if 'auxiliary_outputs' not in k}\n    indices = self.matcher(outputs_without_aux, targets)\n    num_boxes = sum((len(t['class_labels']) for t in targets))\n    num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)\n    num_boxes = torch.clamp(num_boxes, min=1).item()\n    losses = {}\n    for loss in self.losses:\n        l_dict = self.get_loss(loss, outputs, targets, indices, num_boxes)\n        l_dict = {k: l_dict[k] * self.weight_dict[k] for k in l_dict if k in self.weight_dict}\n        losses.update(l_dict)\n    if 'auxiliary_outputs' in outputs:\n        for i, auxiliary_outputs in enumerate(outputs['auxiliary_outputs']):\n            indices = self.matcher(auxiliary_outputs, targets)\n            for loss in self.losses:\n                if loss == 'masks':\n                    continue\n                l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes)\n                l_dict = {k: l_dict[k] * self.weight_dict[k] for k in l_dict if k in self.weight_dict}\n                l_dict = {k + f'_aux_{i}': v for k, v in l_dict.items()}\n                losses.update(l_dict)\n    if 'dn_auxiliary_outputs' in outputs:\n        if 'denoising_meta_values' not in outputs:\n            raise ValueError(\"The output must have the 'denoising_meta_values` key. Please, ensure that 'outputs' includes a 'denoising_meta_values' entry.\")\n        indices = self.get_cdn_matched_indices(outputs['denoising_meta_values'], targets)\n        num_boxes = num_boxes * outputs['denoising_meta_values']['dn_num_group']\n        for i, auxiliary_outputs in enumerate(outputs['dn_auxiliary_outputs']):\n            for loss in self.losses:\n                if loss == 'masks':\n                    continue\n                kwargs = {}\n                l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes, **kwargs)\n                l_dict = {k: l_dict[k] * self.weight_dict[k] for k in l_dict if k in self.weight_dict}\n                l_dict = {k + f'_dn_{i}': v for k, v in l_dict.items()}\n                losses.update(l_dict)\n    return losses", "docstring": "This performs the loss computation.\n\nArgs:\noutputs (`dict`, *optional*):\nDictionary of tensors, see the output specification of the model for the format.\ntargets (`List[dict]`, *optional*):\nList of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the\nlosses applied, see each loss' doc.", "source": "github-repos"}
{"code": "def render(self, width: int, height: int) -> List[str]:\n        \n        if width == 0 or height == 0:\n            return [''] * height\n\n        out_chars = [[' '] * width for _ in range(height)]\n\n        mid_x = int((width - 1) * self.horizontal_alignment)\n        mid_y = (height - 1) \n\n        \n        if self.left:\n            out_chars[mid_y][:mid_x + 1] = self.left * (mid_x + 1)\n        if self.right:\n            out_chars[mid_y][mid_x:] = self.right * (width - mid_x)\n\n        \n        if self.top:\n            for y in range(mid_y + 1):\n                out_chars[y][mid_x] = self.top\n        if self.bottom:\n            for y in range(mid_y, height):\n                out_chars[y][mid_x] = self.bottom\n\n        \n        mid = self.content or self.center\n        if self.content or self.center:\n            content_lines = mid.split('\\n')\n            y = mid_y - (len(content_lines) - 1) \n            for dy, content_line in enumerate(content_lines):\n                s = int((len(content_line) - 1) * self.horizontal_alignment)\n                x = mid_x - s\n                for dx, c in enumerate(content_line):\n                    out_chars[y + dy][x + dx] = c\n\n        return [''.join(line) for line in out_chars]", "docstring": "Returns a list of text lines representing the block's contents.\n\nArgs:\nwidth: The width of the output text. Must be at least as large as\nthe block's minimum width.\nheight: The height of the output text. Must be at least as large as\nthe block's minimum height.\n\nReturns:\nText pre-split into lines.", "source": "juraj-google-style"}
{"code": "def _parse_source_interface(self, config):\n        \n        match = re.search(r'vxlan source-interface ([^\\s]+)', config)\n        value = match.group(1) if match else self.DEFAULT_SRC_INTF\n        return dict(source_interface=value)", "docstring": "Parses the conf block and returns the vxlan source-interface value\n\nParses the provided configuration block and returns the value of\nvxlan source-interface.  If the value is not configured, this method\nwill return DEFAULT_SRC_INTF instead.\n\nArgs:\nconfig (str): The Vxlan config block to scan\n\nReturn:\ndict: A dict object intended to be merged into the resource dict", "source": "juraj-google-style"}
{"code": "def message_tc(self, message, max_length=255):\n        \n        if os.access(self.default_args.tc_out_path, os.W_OK):\n            message_file = '{}/message.tc'.format(self.default_args.tc_out_path)\n        else:\n            message_file = 'message.tc'\n\n        message = '{}\\n'.format(message)\n        if max_length - len(message) > 0:\n            with open(message_file, 'a') as mh:\n                mh.write(message)\n        elif max_length > 0:\n            with open(message_file, 'a') as mh:\n                mh.write(message[:max_length])\n        max_length -= len(message)", "docstring": "Write data to message_tc file in TcEX specified directory.\n\nThis method is used to set and exit message in the ThreatConnect Platform.\nThreatConnect only supports files of max_message_length.  Any data exceeding\nthis limit will be truncated by this method.\n\nArgs:\nmessage (string): The message to add to message_tc file", "source": "juraj-google-style"}
{"code": "def run_task_tests(self, task, torch_dtype='float32'):\n    if task not in self.pipeline_model_mapping:\n        self.skipTest(f'{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{torch_dtype} is skipped: `{task}` is not in `self.pipeline_model_mapping` for `{self.__class__.__name__}`.')\n    model_architectures = self.pipeline_model_mapping[task]\n    if not isinstance(model_architectures, tuple):\n        model_architectures = (model_architectures,)\n    at_least_one_model_is_tested = False\n    for model_architecture in model_architectures:\n        model_arch_name = model_architecture.__name__\n        model_type = model_architecture.config_class.model_type\n        for _prefix in ['Flax', 'TF']:\n            if model_arch_name.startswith(_prefix):\n                model_arch_name = model_arch_name[len(_prefix):]\n                break\n        if model_arch_name not in tiny_model_summary:\n            continue\n        tokenizer_names = tiny_model_summary[model_arch_name]['tokenizer_classes']\n        image_processor_names = []\n        feature_extractor_names = []\n        processor_classes = tiny_model_summary[model_arch_name]['processor_classes']\n        for cls_name in processor_classes:\n            if 'ImageProcessor' in cls_name:\n                image_processor_names.append(cls_name)\n            elif 'FeatureExtractor' in cls_name:\n                feature_extractor_names.append(cls_name)\n        processor_names = PROCESSOR_MAPPING_NAMES.get(model_type, None)\n        if not isinstance(processor_names, (list, tuple)):\n            processor_names = [processor_names]\n        commit = None\n        if model_arch_name in tiny_model_summary and 'sha' in tiny_model_summary[model_arch_name]:\n            commit = tiny_model_summary[model_arch_name]['sha']\n        repo_name = f'tiny-random-{model_arch_name}'\n        if TRANSFORMERS_TINY_MODEL_PATH != 'hf-internal-testing':\n            repo_name = model_arch_name\n        self.run_model_pipeline_tests(task, repo_name, model_architecture, tokenizer_names=tokenizer_names, image_processor_names=image_processor_names, feature_extractor_names=feature_extractor_names, processor_names=processor_names, commit=commit, torch_dtype=torch_dtype)\n        at_least_one_model_is_tested = True\n    if task in task_to_pipeline_and_spec_mapping:\n        pipeline, hub_spec = task_to_pipeline_and_spec_mapping[task]\n        compare_pipeline_args_to_hub_spec(pipeline, hub_spec)\n    if not at_least_one_model_is_tested:\n        self.skipTest(f'{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{torch_dtype} is skipped: Could not find any model architecture in the tiny models JSON file for `{task}`.')", "docstring": "Run pipeline tests for a specific `task`\n\nArgs:\ntask (`str`):\nA task name. This should be a key in the mapping `pipeline_test_mapping`.\ntorch_dtype (`str`, `optional`, defaults to `'float32'`):\nThe torch dtype to use for the model. Can be used for FP16/other precision inference.", "source": "github-repos"}
{"code": "def parse_source(info):\n    if ('extractor_key' in info):\n        source = info['extractor_key']\n        lower_source = source.lower()\n        for key in SOURCE_TO_NAME:\n            lower_key = key.lower()\n            if (lower_source == lower_key):\n                source = SOURCE_TO_NAME[lower_key]\n        if (source != 'Generic'):\n            return source\n    if (('url' in info) and (info['url'] is not None)):\n        p = urlparse(info['url'])\n        if (p and p.netloc):\n            return p.netloc\n    return 'Unknown'", "docstring": "Parses the source info from an info dict generated by youtube-dl\n\nArgs:\ninfo (dict): The info dict to parse\n\nReturns:\nsource (str): The source of this song", "source": "codesearchnet"}
{"code": "def AddEvent(self, event):\n    \n    self._RaiseIfNotWritable()\n\n    self._storage_file.AddEvent(event)\n    self.number_of_events += 1\n\n    self._UpdateCounters(event)", "docstring": "Adds an event.\n\nArgs:\nevent (EventObject): an event.\n\nRaises:\nIOError: when the storage writer is closed.\nOSError: when the storage writer is closed.", "source": "juraj-google-style"}
{"code": "def _create_per_replica(value_list, strategy):\n    always_wrap = _always_wrap(strategy)\n    per_replicas = distribute_utils.regroup(value_list, always_wrap=always_wrap)\n    return per_replicas", "docstring": "Creates a PerReplica.\n\nFor strategies other than OneDeviceStrategy, it creates a PerReplica whose\ntype spec is set to the element spec of the dataset. This helps avoid\nretracing for partial batches. Retracing is problematic for multi client when\ndifferent client retraces different time, since retracing changes the\ncollective keys in the tf.function, and causes mismatches among clients.\n\nFor single client strategies, this simply calls distribute_utils.regroup().\n\nArgs:\nvalue_list: a list of values, one for each replica.\nstrategy: the `tf.distribute.Strategy`.\n\nReturns:\na structure of PerReplica.", "source": "github-repos"}
{"code": "def update(self, **kwargs):\n        \n        kwargs = self._preprocess_params(kwargs)\n        kwargs = self.preprocess_kwargs_before_update(kwargs)\n        for key, value in kwargs.iteritems():\n            cls = type(self)\n            if not hasattr(cls, key) or isinstance(getattr(cls, key), property):\n                continue\n            if key not in self._no_overwrite_:\n                setattr(self, key, value)\n            if isinstance(getattr(self, key), OrderingList):\n                getattr(self, key).reorder()\n            elif isinstance(getattr(cls, key), AssociationProxyInstance):\n                target_name = getattr(cls, key).target_collection\n                target_rel = getattr(self, target_name)\n                if isinstance(target_rel, OrderingList):\n                    target_rel.reorder()\n        try:\n            self.session.commit()\n            return self\n        except Exception as e:\n            self.session.rollback()\n            raise e", "docstring": "Updates an instance.\n\nArgs:\n**kwargs  :  Arbitrary keyword arguments. Column names are\nkeywords and their new values are the values.\n\nExamples:\n\n>>> customer.update(email=\"newemail@x.com\", name=\"new\")", "source": "juraj-google-style"}
{"code": "def __init__(self, short_name, long_name, preregistered, int_id=None, max_messages=5):\n        \n\n        self.short_name = short_name\n        self.long_name = long_name\n        self.preregistered = preregistered\n        self.last_heartbeat = monotonic()\n        self.num_heartbeats = 0\n        self.id = int_id\n        self._state = UNKNOWN\n        self.messages = deque(maxlen=max_messages)\n        self.headline = None\n        self._last_message_id = 0", "docstring": "Constructor.\n\nArgs:\nshort_name (string): A unique short name for the service\nlong_name (string): A user friendly name for the service\npreregistered (bool): Whether this is an expected preregistered\nservice\nint_id (int): An internal numeric id for this service\nmax_messages (int): The maximum number of messages to keep", "source": "juraj-google-style"}
{"code": "def wmo(self, value=None):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `wmo`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `wmo`')\n    self._wmo = value", "docstring": "Corresponds to IDD Field `wmo` usually a 6 digit field. Used as\nalpha in EnergyPlus.\n\nArgs:\nvalue (str): value for IDD Field `wmo`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def IsEquivalent(self, other):\n    \n    if self.name and other.name:\n      return self.name == other.name\n\n    if self.name:\n      self_family, self_version_tuple = self._FAMILY_AND_VERSION_PER_NAME.get(\n          self.name, self._DEFAULT_FAMILY_AND_VERSION)\n      return (\n          self_family == other.family and\n          self_version_tuple == other.version_tuple)\n\n    if self.family and self.version:\n      if other.name:\n        other_family, other_version_tuple = (\n            self._FAMILY_AND_VERSION_PER_NAME.get(\n                other.name, self._DEFAULT_FAMILY_AND_VERSION))\n      else:\n        other_family = other.family\n        other_version_tuple = other.version_tuple\n\n      return (\n          self.family == other_family and\n          self.version_tuple == other_version_tuple)\n\n    if self.family:\n      if other.name:\n        other_family, _ = self._FAMILY_AND_VERSION_PER_NAME.get(\n            other.name, self._DEFAULT_FAMILY_AND_VERSION)\n      else:\n        other_family = other.family\n\n      return self.family == other_family\n\n    return False", "docstring": "Determines if 2 operating system artifacts are equivalent.\n\nThis function compares the operating systems based in order of:\n* name derived from product\n* family and version\n* family\n\nArgs:\nother (OperatingSystemArtifact): operating system artifact attribute\ncontainer to compare with.\n\nReturns:\nbool: True if the operating systems are considered equivalent, False if\nthe most specific criteria do no match, or no criteria are available.", "source": "juraj-google-style"}
{"code": "def write_tree_newick(self, filename, hide_rooted_prefix=False):\n        \n        if not isinstance(filename, str):\n            raise TypeError(\"filename must be a str\")\n        treestr = self.newick()\n        if hide_rooted_prefix:\n            if treestr.startswith('[&R]'):\n                treestr = treestr[4:].strip()\n            else:\n                warn(\"Specified hide_rooted_prefix, but tree was not rooted\")\n        if filename.lower().endswith('.gz'): \n            f = gopen(expanduser(filename),'wb',9); f.write(treestr.encode()); f.close()\n        else: \n            f = open(expanduser(filename),'w'); f.write(treestr); f.close()", "docstring": "Write this ``Tree`` to a Newick file\n\nArgs:\n``filename`` (``str``): Path to desired output file (plain-text or gzipped)", "source": "juraj-google-style"}
{"code": "def decode(self, encoded):\n    encoded = super().decode(encoded)\n    if (encoded.numel() > 1):\n        raise ValueError('``decode`` decodes one label at a time, use ``batch_decode`` instead.')\n    return self.itos[encoded.squeeze().item()]", "docstring": "Decodes ``encoded`` label.\n\nArgs:\nencoded (torch.Tensor): Encoded label.\n\nReturns:\nobject: Label decoded from ``encoded``.", "source": "codesearchnet"}
{"code": "def rotated_printing(self, action):\n        \n        if action=='rotate':\n            action='1'\n        elif action=='cancel':\n            action='0'\n        else:\n            raise RuntimeError('Invalid action.')\n        self.send(chr(27)+chr(105)+chr(76)+action)", "docstring": "Calling this function applies the desired action to the printing orientation\nof the printer.\n\nArgs:\naction: The desired printing orientation. 'rotate' enables rotated printing. 'normal' disables rotated printing.\nReturns:\nNone\nRaises:\nRuntimeError: Invalid action.", "source": "juraj-google-style"}
{"code": "def add_electrode(self, electrode, label=None):\n    if (not label):\n        label = 'Electrode {}'.format((len(self._electrodes) + 1))\n    self._electrodes[label] = electrode", "docstring": "Add an electrode to the plot.\n\nArgs:\nelectrode: An electrode. All electrodes satisfying the\nAbstractElectrode interface should work.\nlabel: A label for the electrode. If None, defaults to a counting\nsystem, i.e. 'Electrode 1', 'Electrode 2', ...", "source": "codesearchnet"}
{"code": "def _process_counter_example(self, mma, w_string):\n        \n        diff = len(w_string)\n        same = 0\n        membership_answer = self._membership_query(w_string)\n        while True:\n            i = (same + diff) / 2\n            access_string = self._run_in_hypothesis(mma, w_string, i)\n            if membership_answer != self._membership_query(access_string + w_string[i:]):\n                diff = i\n            else:\n                same = i\n            if diff - same == 1:\n                break\n        exp = w_string[diff:]\n        self.observation_table.em_vector.append(exp)\n        for row in self.observation_table.sm_vector + self.observation_table.smi_vector:\n            self._fill_table_entry(row, exp)\n        return 0", "docstring": "Process a counterexample in the Rivest-Schapire way.\nArgs:\nmma (DFA): The hypothesis automaton\nw_string (str): The examined string to be consumed\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _ParsePropertiesXMLFile(self, xml_data):\n    \n    xml_root = ElementTree.fromstring(xml_data)\n\n    properties = {}\n    for xml_element in xml_root.iter():\n      if not xml_element.text:\n        continue\n\n      \n      \n      _, _, name = xml_element.tag.partition('}')\n\n      \n      if name == 'lpstr':\n        continue\n\n      property_name = self._PROPERTY_NAMES.get(name, None)\n      if not property_name:\n        property_name = self._FormatPropertyName(name)\n\n      properties[property_name] = xml_element.text\n\n    return properties", "docstring": "Parses a properties XML file.\n\nArgs:\nxml_data (bytes): data of a _rels/.rels XML file.\n\nReturns:\ndict[str, object]: properties.\n\nRaises:\nzipfile.BadZipfile: if the properties XML file cannot be read.", "source": "juraj-google-style"}
{"code": "def audio(self, audio, sample_rate, name=None, subdir=''):\n    from chainerui.report.audio_report import check_available\n    if (not check_available()):\n        return\n    from chainerui.report.audio_report import report as _audio\n    col_name = self.get_col_name(name, 'audio')\n    (out_dir, rel_out_dir) = self.get_subdir(subdir)\n    (filename, _) = _audio(audio, sample_rate, out_dir, col_name)\n    self.audios[col_name] = os.path.join(rel_out_dir, filename)\n    self.count += 1", "docstring": "Summary audio to listen on web browser.\n\nArgs:\naudio (:class:`numpy.ndarray` or :class:`cupy.ndarray` or \\\n:class:`chainer.Variable`): sampled wave array.\nsample_rate (int): sampling rate.\nname (str): name of image. set as column name. when not setting,\nassigned ``'audio'`` + sequential number.\nsubdir (str): sub-directory path of output.", "source": "codesearchnet"}
{"code": "def energy(self, sample_like, dtype=np.float):\n        \n        energy, = self.energies(sample_like, dtype=dtype)\n        return energy", "docstring": "The energy of the given sample.\n\nArgs:\nsample_like (samples_like):\nA raw sample. `sample_like` is an extension of\nNumPy's array_like structure. See :func:`.as_samples`.\n\ndtype (:class:`numpy.dtype`, optional):\nThe data type of the returned energies. Defaults to float.\n\nReturns:\nThe energy.", "source": "juraj-google-style"}
{"code": "def get_labels_encoder(self, data_dir):\n    label_filepath = os.path.join(data_dir, self.vocab_filename)\n    return text_encoder.TokenTextEncoder(label_filepath)", "docstring": "Builds encoder for the given class labels.\n\nArgs:\ndata_dir: data directory\n\nReturns:\nAn encoder for class labels.", "source": "codesearchnet"}
{"code": "def lookup_subclass(cls, d):\n        \n        try:\n            typeid = d[\"typeid\"]\n        except KeyError:\n            raise FieldError(\"typeid not present in keys %s\" % list(d))\n\n        subclass = cls._subcls_lookup.get(typeid, None)\n        if not subclass:\n            raise FieldError(\"'%s' not a valid typeid\" % typeid)\n        else:\n            return subclass", "docstring": "Look up a class based on a serialized dictionary containing a typeid\n\nArgs:\nd (dict): Dictionary with key \"typeid\"\n\nReturns:\nSerializable subclass", "source": "juraj-google-style"}
{"code": "def pose2mat(pose):\n    \n    homo_pose_mat = np.zeros((4, 4), dtype=np.float32)\n    homo_pose_mat[:3, :3] = quat2mat(pose[1])\n    homo_pose_mat[:3, 3] = np.array(pose[0], dtype=np.float32)\n    homo_pose_mat[3, 3] = 1.\n    return homo_pose_mat", "docstring": "Converts pose to homogeneous matrix.\n\nArgs:\npose: a (pos, orn) tuple where pos is vec3 float cartesian, and\norn is vec4 float quaternion.\n\nReturns:\n4x4 homogeneous matrix", "source": "juraj-google-style"}
{"code": "def get_bytes(obj):\n    try:\n        obj = obj.read(_NUM_SIGNATURE_BYTES)\n    except AttributeError:\n        pass\n    kind = type(obj)\n    if (kind is bytearray):\n        return signature(obj)\n    if (kind is str):\n        return get_signature_bytes(obj)\n    if (kind is bytes):\n        return signature(obj)\n    if (kind is memoryview):\n        return signature(obj).tolist()\n    raise TypeError(('Unsupported type as file input: %s' % kind))", "docstring": "Infers the input type and reads the first 262 bytes,\nreturning a sliced bytearray.\n\nArgs:\nobj: path to readable, file, bytes or bytearray.\n\nReturns:\nFirst 262 bytes of the file content as bytearray type.\n\nRaises:\nTypeError: if obj is not a supported type.", "source": "codesearchnet"}
{"code": "def wait(self, timeout=None, raise_error=True):\n        \n        return self.get(timeout=timeout, raise_error=raise_error)", "docstring": "alias of get\nArgs:\ntimeout (float): timeout seconds\nraise_error (bool): default true, whether to raise error if element not found\n\nRaises:\nWDAElementNotFoundError", "source": "juraj-google-style"}
{"code": "def get_country_name_from_m49(cls, m49, use_live=True, exception=None):\n        \n        \n        iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)\n        if iso3 is not None:\n            return cls.get_country_name_from_iso3(iso3, exception=exception)\n        return None", "docstring": "Get country name from M49 code\n\nArgs:\nm49 (int): M49 numeric code for which to get country name\nuse_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\nexception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\nReturns:\nOptional[str]: Country name", "source": "juraj-google-style"}
{"code": "def verify(value, msg):\n    return (bool(value) and converts_to_proto(value, msg) and successfuly_encodes(msg) and special_typechecking(value, msg))", "docstring": "C-style validator\n\nKeyword arguments:\nvalue -- dictionary to validate (required)\nmsg -- the protobuf schema to validate against (required)\n\nReturns:\nTrue: If valid input\nFalse: If invalid input", "source": "codesearchnet"}
{"code": "def _string_to_int(x, vocab):\n  \n\n  def _map_to_int(x):\n    \n    table = lookup.index_table_from_tensor(\n        vocab,\n        default_value=len(vocab))\n    return table.lookup(x)\n\n  return _map_to_int(x)", "docstring": "Given a vocabulary and a string tensor `x`, maps `x` into an int tensor.\nArgs:\nx: A `Column` representing a string value.\nvocab: list of strings.\n\nReturns:\nA `Column` where each string value is mapped to an integer representing\nits index in the vocab. Out of vocab values are mapped to len(vocab).", "source": "juraj-google-style"}
{"code": "def match_rules_context_multi(tree, rules, parent_context={}):\n    \n    all_contexts = []\n    for template, match_rules in rules.items():\n        context = parent_context.copy()\n        if match_template(tree, template, context):\n            child_contextss = []\n            if not match_rules:\n                all_contexts += [context]\n            else:\n                for key, child_rules in match_rules.items():\n                    child_contextss.append(match_rules_context_multi(context[key], child_rules, context))\n                all_contexts += cross_context(child_contextss)    \n    return all_contexts", "docstring": "Recursively matches a Tree structure with rules and returns context\n\nArgs:\ntree (Tree): Parsed tree structure\nrules (dict): See match_rules\nparent_context (dict): Context of parent call\nReturns:\ndict: Context matched dictionary of matched rules or\nNone if no match", "source": "juraj-google-style"}
{"code": "def _shuffle_single(fname, extra_fn=None):\n  \n  records = read_records(fname)\n  random.shuffle(records)\n  if extra_fn is not None:\n    records = extra_fn(records)\n  out_fname = fname.replace(UNSHUFFLED_SUFFIX, \"\")\n  write_records(records, out_fname)\n  tf.gfile.Remove(fname)", "docstring": "Shuffle a single file of records.\n\nArgs:\nfname: a string\nextra_fn: an optional function from list of TFRecords to list of TFRecords\nto be called after shuffling.", "source": "juraj-google-style"}
{"code": "def _ParseTriggerEndTime(self, parser_mediator, trigger):\n    time_elements_tuple = (trigger.end_date.year, trigger.end_date.month, trigger.end_date.day_of_month, 0, 0, 0)\n    date_time = None\n    if (time_elements_tuple != (0, 0, 0, 0, 0, 0)):\n        try:\n            date_time = dfdatetime_time_elements.TimeElements(time_elements_tuple=time_elements_tuple)\n            date_time.is_local_time = True\n            date_time._precision = dfdatetime_definitions.PRECISION_1_DAY\n        except ValueError:\n            parser_mediator.ProduceExtractionWarning('invalid trigger end time: {0!s}'.format(time_elements_tuple))\n    return date_time", "docstring": "Parses the end time from a trigger.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ntrigger (job_trigger): a trigger.\n\nReturns:\ndfdatetime.DateTimeValues: last run date and time or None if not\navailable.", "source": "codesearchnet"}
{"code": "def parse_variant(store, institute_obj, case_obj, variant_obj, update=False, genome_build='37', get_compounds=True):\n    has_changed = False\n    compounds = variant_obj.get('compounds', [])\n    if (compounds and get_compounds):\n        if ('not_loaded' not in compounds[0]):\n            new_compounds = store.update_variant_compounds(variant_obj)\n            variant_obj['compounds'] = new_compounds\n            has_changed = True\n        variant_obj['compounds'] = sorted(variant_obj['compounds'], key=(lambda compound: (- compound['combined_score'])))\n    variant_genes = variant_obj.get('genes')\n    if (variant_genes is not None):\n        for gene_obj in variant_genes:\n            if (not gene_obj['hgnc_id']):\n                continue\n            if (gene_obj.get('hgnc_symbol') is None):\n                hgnc_gene = store.hgnc_gene(gene_obj['hgnc_id'], build=genome_build)\n                if (not hgnc_gene):\n                    continue\n                has_changed = True\n                gene_obj['hgnc_symbol'] = hgnc_gene['hgnc_symbol']\n    if (update and has_changed):\n        variant_obj = store.update_variant(variant_obj)\n    variant_obj['comments'] = store.events(institute_obj, case=case_obj, variant_id=variant_obj['variant_id'], comments=True)\n    if variant_genes:\n        variant_obj.update(get_predictions(variant_genes))\n        if (variant_obj.get('category') == 'cancer'):\n            variant_obj.update(get_variant_info(variant_genes))\n    for compound_obj in compounds:\n        compound_obj.update(get_predictions(compound_obj.get('genes', [])))\n    if isinstance(variant_obj.get('acmg_classification'), int):\n        acmg_code = ACMG_MAP[variant_obj['acmg_classification']]\n        variant_obj['acmg_classification'] = ACMG_COMPLETE_MAP[acmg_code]\n    variant_length = variant_obj.get('length')\n    variant_obj['length'] = {100000000000: 'inf', (- 1): 'n.d.'}.get(variant_length, variant_length)\n    if (not ('end_chrom' in variant_obj)):\n        variant_obj['end_chrom'] = variant_obj['chromosome']\n    return variant_obj", "docstring": "Parse information about variants.\n\n- Adds information about compounds\n- Updates the information about compounds if necessary and 'update=True'\n\nArgs:\nstore(scout.adapter.MongoAdapter)\ninstitute_obj(scout.models.Institute)\ncase_obj(scout.models.Case)\nvariant_obj(scout.models.Variant)\nupdate(bool): If variant should be updated in database\ngenome_build(str)", "source": "codesearchnet"}
{"code": "def _call_method(self, method, req, resp_class):\n    \n    payload = req.SerializeToString()\n    headers = {\n        'Content-Type': 'application/x-protobuf',\n        'Content-Length': str(len(payload)),\n        'X-Goog-Api-Format-Version': '2'\n        }\n    response, content = self._http.request(\n        '%s:%s' % (self._url, method),\n        method='POST', body=payload, headers=headers)\n    if response.status != 200:\n      raise _make_rpc_error(method, response, content)\n    resp = resp_class()\n    resp.ParseFromString(content)\n    return resp", "docstring": "_call_method call the given RPC method over HTTP.\n\nIt uses the given protobuf message request as the payload and\nreturns the deserialized protobuf message response.\n\nArgs:\nmethod: RPC method name to be called.\nreq: protobuf message for the RPC request.\nresp_class: protobuf message class for the RPC response.\n\nReturns:\nDeserialized resp_class protobuf message instance.\n\nRaises:\nRPCError: The rpc method call failed.", "source": "juraj-google-style"}
{"code": "def call_boxes(self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]]=None, boxes: Optional[Union[List[List[int]], List[List[List[int]]]]]=None, word_labels: Optional[Union[List[int], List[List[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n\n    def _is_valid_text_input(t):\n        if isinstance(t, str):\n            return True\n        elif isinstance(t, (list, tuple)):\n            if len(t) == 0:\n                return True\n            elif isinstance(t[0], str):\n                return True\n            elif isinstance(t[0], (list, tuple)):\n                return len(t[0]) == 0 or isinstance(t[0][0], str)\n            else:\n                return False\n        else:\n            return False\n    if text_pair is not None:\n        if not _is_valid_text_input(text):\n            raise ValueError('text input must of type `str` (single example) or `List[str]` (batch of examples). ')\n        if not isinstance(text_pair, (list, tuple)):\n            raise ValueError('words must of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')\n    elif not isinstance(text, (list, tuple)):\n        raise ValueError('Words must of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')\n    if text_pair is not None:\n        is_batched = isinstance(text, (list, tuple))\n    else:\n        is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))\n    words = text if text_pair is None else text_pair\n    if boxes is None:\n        raise ValueError('You must provide corresponding bounding boxes')\n    if is_batched:\n        if len(words) != len(boxes):\n            raise ValueError('You must provide words and boxes for an equal amount of examples')\n        for words_example, boxes_example in zip(words, boxes):\n            if len(words_example) != len(boxes_example):\n                raise ValueError('You must provide as many words as there are bounding boxes')\n    elif len(words) != len(boxes):\n        raise ValueError('You must provide as many words as there are bounding boxes')\n    if is_batched:\n        if text_pair is not None and len(text) != len(text_pair):\n            raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.')\n        batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text\n        is_pair = bool(text_pair is not None)\n        return self.batch_encode_plus_boxes(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)\n    else:\n        return self.encode_plus_boxes(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)", "docstring": "Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of\nsequences with word-level normalized bounding boxes and optional labels.\n\nArgs:\ntext (`str`, `List[str]`, `List[List[str]]`):\nThe sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings\n(words of a single example or questions of a batch of examples) or a list of list of strings (batch of\nwords).\ntext_pair (`List[str]`, `List[List[str]]`):\nThe sequence or batch of sequences to be encoded. Each sequence should be a list of strings\n(pretokenized string).\nboxes (`List[List[int]]`, `List[List[List[int]]]`):\nWord-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.\nword_labels (`List[int]`, `List[List[int]]`, *optional*):\nWord-level integer labels (for token classification tasks such as FUNSD, CORD).", "source": "github-repos"}
{"code": "def getVersionListCount(self, orgresource):\n        \n\n        url = nurls['getVersionListCount']\n\n        data = {'userid': self.user_id,\n                'useridx': self.useridx,\n                'orgresource': orgresource,\n                }\n\n        r = self.session.post(url = url, data = data)\n        j = json.loads(r.text)\n\n        if j['message'] != 'success':\n            print \"[*] Error getVersionListCount: \" + j['message']\n            return False\n        else:\n            return int(j['resultvalue']['count'])", "docstring": "GetVersionListCount\n\nArgs:\norgresource: File path\n\nReturns:\nInteger number: # of version list\nFalse: Failed to get property", "source": "juraj-google-style"}
{"code": "def _URange(s):\n  \n  a = s.split(\"..\")\n  if len(a) == 1:\n    return [_UInt(a[0])]\n  if len(a) == 2:\n    lo = _UInt(a[0])\n    hi = _UInt(a[1])\n    if lo < hi:\n      return range(lo, hi + 1)\n  raise InputError(\"invalid Unicode range %s\" % (s,))", "docstring": "Converts string to Unicode range.\n\n'0001..0003' => [1, 2, 3].\n'0001' => [1].\n\nArgs:\ns: string to convert\n\nReturns:\nUnicode range\n\nRaises:\nInputError: the string is not a valid Unicode range.", "source": "juraj-google-style"}
{"code": "def text(cls, text, *, resize=None, single_use=None, selective=None):\n    return cls(types.KeyboardButton(text), resize=resize, single_use=single_use, selective=selective)", "docstring": "Creates a new button with the given text.\n\nArgs:\nresize (`bool`):\nIf present, the entire keyboard will be reconfigured to\nbe resized and be smaller if there are not many buttons.\n\nsingle_use (`bool`):\nIf present, the entire keyboard will be reconfigured to\nbe usable only once before it hides itself.\n\nselective (`bool`):\nIf present, the entire keyboard will be reconfigured to\nbe \"selective\". The keyboard will be shown only to specific\nusers. It will target users that are @mentioned in the text\nof the message or to the sender of the message you reply to.", "source": "codesearchnet"}
{"code": "def _get_table(name):\n  \n  \n  item = google.datalab.utils.commands.get_notebook_item(name)\n  if isinstance(item, bigquery.Table):\n    return item\n  \n  try:\n    return _existing_table_cache[name]\n  except KeyError:\n    table = bigquery.Table(name)\n    if table.exists():\n      _existing_table_cache[name] = table\n      return table\n  return None", "docstring": "Given a variable or table name, get a Table if it exists.\n\nArgs:\nname: the name of the Table or a variable referencing the Table.\nReturns:\nThe Table, if found.", "source": "juraj-google-style"}
{"code": "def persist_upstream_diagram(self, filepath):\n    assert isinstance(filepath, str), 'Step {} error, filepath must be str. Got {} instead'.format(self.name, type(filepath))\n    persist_as_png(self.upstream_structure, filepath)", "docstring": "Creates upstream steps diagram and persists it to disk as png file.\n\nPydot graph is created and persisted to disk as png file under the filepath directory.\n\nArgs:\nfilepath (str): filepath to which the png with steps visualization should\nbe persisted", "source": "codesearchnet"}
{"code": "def markdown_cell(markdown):\n    r\n    import utool as ut\n    markdown_header = ut.codeblock(\n        \n    )\n    markdown_footer = ut.codeblock(\n        \n    )\n    return (markdown_header + '\\n' +\n            ut.indent(repr_single_for_md(markdown), ' ' * 2) +\n            '\\n' + markdown_footer)", "docstring": "r\"\"\"\nArgs:\nmarkdown (str):\n\nReturns:\nstr: json formatted ipython notebook markdown cell\n\nCommandLine:\npython -m ibeis.templates.generate_notebook --exec-markdown_cell\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from ibeis.templates.generate_notebook import *  # NOQA\n>>> markdown = '# Title'\n>>> result = markdown_cell(markdown)\n>>> print(result)", "source": "juraj-google-style"}
{"code": "def _get_short_description(self):\n    if (self.description is None):\n        return None\n    lines = [x for x in self.description.split('\\n')]\n    if (len(lines) == 1):\n        return lines[0]\n    elif ((len(lines) >= 3) and (lines[1] == '')):\n        return lines[0]\n    return None", "docstring": "Return the first line of a multiline description\n\nReturns:\nstring: The short description, otherwise None", "source": "codesearchnet"}
{"code": "def sign(allocate_quota_request):\n    if (not isinstance(allocate_quota_request, sc_messages.AllocateQuotaRequest)):\n        raise ValueError(u'Invalid request')\n    op = allocate_quota_request.allocateOperation\n    if ((op is None) or (op.methodName is None) or (op.consumerId is None)):\n        logging.error(u'Bad %s: not initialized => not signed', allocate_quota_request)\n        raise ValueError(u'allocate_quota request must be initialized with an operation')\n    md5 = hashlib.md5()\n    md5.update(op.methodName.encode('utf-8'))\n    md5.update(b'\\x00')\n    md5.update(op.consumerId.encode('utf-8'))\n    if op.labels:\n        signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels))\n    for value_set in op.quotaMetrics:\n        md5.update(b'\\x00')\n        md5.update(value_set.metricName.encode('utf-8'))\n        for mv in value_set.metricValues:\n            metric_value.update_hash(md5, mv)\n    md5.update(b'\\x00')\n    return md5.digest()", "docstring": "Obtains a signature for an operation in a `AllocateQuotaRequest`\n\nArgs:\nop (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an\noperation used in a `AllocateQuotaRequest`\n\nReturns:\nstring: a secure hash generated from the operation", "source": "codesearchnet"}
{"code": "def define_grid(self, matrix):\n        \n        self.style['grid-template-areas'] = ''.join(\"'%s'\"%(' '.join(x)) for x in matrix)", "docstring": "Populates the Table with a list of tuples of strings.\n\nArgs:\nmatrix (list): list of iterables of strings (lists or something else).\nItems in the matrix have to correspond to a key for the children.", "source": "juraj-google-style"}
{"code": "def WinChmod(filename, acl_list, user=None):\n    if (user is None):\n        user = win32api.GetUserName()\n    if (not os.path.exists(filename)):\n        raise RuntimeError(('filename %s does not exist' % filename))\n    acl_bitmask = 0\n    for acl in acl_list:\n        acl_bitmask |= getattr(ntsecuritycon, acl)\n    dacl = win32security.ACL()\n    (win_user, _, _) = win32security.LookupAccountName('', user)\n    dacl.AddAccessAllowedAce(win32security.ACL_REVISION, acl_bitmask, win_user)\n    security_descriptor = win32security.GetFileSecurity(filename, win32security.DACL_SECURITY_INFORMATION)\n    security_descriptor.SetSecurityDescriptorDacl(DACL_PRESENT, dacl, DACL_DEFAULT)\n    win32security.SetFileSecurity(filename, win32security.DACL_SECURITY_INFORMATION, security_descriptor)", "docstring": "Provide chmod-like functionality for windows.\n\nDoco links:\ngoo.gl/n7YR1\ngoo.gl/rDv81\ngoo.gl/hDobb\n\nArgs:\nfilename: target filename for acl\n\nacl_list: list of ntsecuritycon acl strings to be applied with bitwise OR.\ne.g. [\"FILE_GENERIC_READ\", \"FILE_GENERIC_WRITE\"]\n\nuser: username string. If not specified we use the user we are running as.\n\nRaises:\nAttributeError: if a bad permission is passed\nRuntimeError: if filename doesn't exist", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n    \n    self.GetStepNames = channel.unary_unary(\n        '/gauge.messages.lspService/GetStepNames',\n        request_serializer=messages__pb2.StepNamesRequest.SerializeToString,\n        response_deserializer=messages__pb2.StepNamesResponse.FromString,\n        )\n    self.CacheFile = channel.unary_unary(\n        '/gauge.messages.lspService/CacheFile',\n        request_serializer=messages__pb2.CacheFileRequest.SerializeToString,\n        response_deserializer=lsp__pb2.Empty.FromString,\n        )\n    self.GetStepPositions = channel.unary_unary(\n        '/gauge.messages.lspService/GetStepPositions',\n        request_serializer=messages__pb2.StepPositionsRequest.SerializeToString,\n        response_deserializer=messages__pb2.StepPositionsResponse.FromString,\n        )\n    self.GetImplementationFiles = channel.unary_unary(\n        '/gauge.messages.lspService/GetImplementationFiles',\n        request_serializer=lsp__pb2.Empty.SerializeToString,\n        response_deserializer=messages__pb2.ImplementationFileListResponse.FromString,\n        )\n    self.ImplementStub = channel.unary_unary(\n        '/gauge.messages.lspService/ImplementStub',\n        request_serializer=messages__pb2.StubImplementationCodeRequest.SerializeToString,\n        response_deserializer=messages__pb2.FileDiff.FromString,\n        )\n    self.ValidateStep = channel.unary_unary(\n        '/gauge.messages.lspService/ValidateStep',\n        request_serializer=messages__pb2.StepValidateRequest.SerializeToString,\n        response_deserializer=messages__pb2.StepValidateResponse.FromString,\n        )\n    self.Refactor = channel.unary_unary(\n        '/gauge.messages.lspService/Refactor',\n        request_serializer=messages__pb2.RefactorRequest.SerializeToString,\n        response_deserializer=messages__pb2.RefactorResponse.FromString,\n        )\n    self.GetStepName = channel.unary_unary(\n        '/gauge.messages.lspService/GetStepName',\n        request_serializer=messages__pb2.StepNameRequest.SerializeToString,\n        response_deserializer=messages__pb2.StepNameResponse.FromString,\n        )\n    self.GetGlobPatterns = channel.unary_unary(\n        '/gauge.messages.lspService/GetGlobPatterns',\n        request_serializer=lsp__pb2.Empty.SerializeToString,\n        response_deserializer=messages__pb2.ImplementationFileGlobPatternResponse.FromString,\n        )\n    self.KillProcess = channel.unary_unary(\n        '/gauge.messages.lspService/KillProcess',\n        request_serializer=messages__pb2.KillProcessRequest.SerializeToString,\n        response_deserializer=lsp__pb2.Empty.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def _get_genes(self, variant):\n        \n        transcripts = variant['transcripts']\n        ensembl_ids = [transcript['ensembl_id'] for transcript in\n                       transcripts if transcript['ensembl_id']]\n        hgnc_symbols = [transcript['hgnc_symbol'] for transcript in\n                        transcripts if transcript['hgnc_symbol']]\n        genes = get_gene_info(ensembl_ids, hgnc_symbols)\n        return genes", "docstring": "Add the genes for a variant\n\nGet the hgnc symbols from all transcripts and add them\nto the variant.\n\nArgs:\nvariant (dict): A variant dictionary\n\nReturns:\ngenes (list): A list of Genes", "source": "juraj-google-style"}
{"code": "def cummax(self, axis=None, skipna=True, *args, **kwargs):\n        \n        axis = self._get_axis_number(axis) if axis is not None else 0\n        if axis:\n            self._validate_dtypes()\n        return self.__constructor__(\n            query_compiler=self._query_compiler.cummax(\n                axis=axis, skipna=skipna, **kwargs\n            )\n        )", "docstring": "Perform a cumulative maximum across the DataFrame.\n\nArgs:\naxis (int): The axis to take maximum on.\nskipna (bool): True to skip NA values, false otherwise.\n\nReturns:\nThe cumulative maximum of the DataFrame.", "source": "juraj-google-style"}
{"code": "def delete_variants(self, case_id, variant_type, category=None):\n        \n        category = category or ''\n        LOG.info(\"Deleting old {0} {1} variants for case {2}\".format(\n                    variant_type, category, case_id))\n        query = {'case_id': case_id, 'variant_type': variant_type}\n        if category:\n            query['category'] = category\n        result = self.variant_collection.delete_many(query)\n        LOG.info(\"{0} variants deleted\".format(result.deleted_count))", "docstring": "Delete variants of one type for a case\n\nThis is used when a case is reanalyzed\n\nArgs:\ncase_id(str): The case id\nvariant_type(str): 'research' or 'clinical'\ncategory(str): 'snv', 'sv' or 'cancer'", "source": "juraj-google-style"}
{"code": "def unstack(x, num=None, axis=0):\n    if any_symbolic_tensors((x,)):\n        return Unstack(num, axis).symbolic_call(x)\n    return backend.core.unstack(x, num=num, axis=axis)", "docstring": "Unpacks the given dimension of a rank-R tensor into rank-(R-1) tensors.\n\nArgs:\nx: The input tensor.\nnum: The length of the dimension axis. Automatically inferred\nif `None`.\naxis: The axis along which to unpack.\n\nReturns:\nA list of tensors unpacked along the given axis.\n\nExample:\n\n>>> x = keras.ops.array([[1, 2], [3, 4]])\n>>> keras.ops.unstack(x, axis=0)\n[array([1, 2]), array([3, 4])]", "source": "github-repos"}
{"code": "def tmpdir(suffix='', prefix='tmp', dir=None):\n    tmp = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)\n    (yield tmp)\n    shutil.rmtree(tmp)", "docstring": "Create a temporary directory with a context manager. The file is deleted when the context exits.\n\nThe prefix, suffix, and dir arguments are the same as for mkstemp().\n\nArgs:\nsuffix (str):  If suffix is specified, the file name will end with that suffix, otherwise there will be no\nsuffix.\nprefix (str):  If prefix is specified, the file name will begin with that prefix; otherwise,\na default prefix is used.\ndir (str):  If dir is specified, the file will be created in that directory; otherwise, a default directory is\nused.\nReturns:\nstr: path to the directory", "source": "codesearchnet"}
{"code": "def _get_next_partition(self) -> tuple[int, float]:\n    rank = self._working_tensor_shape.rank\n    if rank is None or rank == 0:\n        return (0, math.inf)\n    num_elems = self._working_tensor_shape.num_elements()\n\n    def num_partitions(axis: int) -> float:\n        axis_len = self._working_tensor_shape.dims[axis].value\n        slice_elems = num_elems \n        bytes_per_slice = slice_elems * self._dtype_size\n        slices_per_shard = self._shard_size_remaining \n        if slices_per_shard == 0:\n            return math.inf\n        return math.ceil(axis_len / slices_per_shard)\n    min_parts = num_partitions(0)\n    min_axis = 0\n    for axis in range(1, rank):\n        parts_along_axis = num_partitions(axis)\n        part_size = num_elems * self._dtype_size / parts_along_axis\n        if parts_along_axis < min_parts and part_size <= self._shard_size_remaining:\n            min_axis, min_parts = (axis, int(parts_along_axis))\n    return (min_axis, math.ceil(int(self._working_tensor_shape[min_axis]) / min_parts))", "docstring": "Gets tensor partition with size closest to shard_size_remaining.\n\nReturns:\nA tuple containing the axis and size of the next partition.", "source": "github-repos"}
{"code": "def trajectory(self):\n    traj = np.zeros((2, self.times.size))\n    for (t, time) in enumerate(self.times):\n        traj[(:, t)] = self.center_of_mass(time)\n    return traj", "docstring": "Calculates the center of mass for each time step and outputs an array\n\nReturns:", "source": "codesearchnet"}
{"code": "def infer_element_type(elements):\n    element_type = typehints.Union[[trivial_inference.instance_to_type(e) for e in elements]]\n    return element_type", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\nInfer a Beam type for a list of elements.\n\nArgs:\nelements (List[Any]): A list of elements for which the type should be\ninferred.\n\nReturns:\nA Beam type encompassing all elements.", "source": "github-repos"}
{"code": "def apply_grad_zmat_tensor(grad_C, construction_table, cart_dist):\n    if (construction_table.index != cart_dist.index).any():\n        message = 'construction_table and cart_dist must use the same index'\n        raise ValueError(message)\n    X_dist = cart_dist.loc[(:, ['x', 'y', 'z'])].values.T\n    C_dist = np.tensordot(grad_C, X_dist, axes=([3, 2], [0, 1])).T\n    if (C_dist.dtype == np.dtype('i8')):\n        C_dist = C_dist.astype('f8')\n    try:\n        C_dist[(:, [1, 2])] = np.rad2deg(C_dist[(:, [1, 2])])\n    except AttributeError:\n        C_dist[(:, [1, 2])] = sympy.deg(C_dist[(:, [1, 2])])\n    from chemcoord.internal_coordinates.zmat_class_main import Zmat\n    cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral']\n    dtypes = ['O', 'i8', 'f8', 'i8', 'f8', 'i8', 'f8']\n    new = pd.DataFrame(data=np.zeros((len(construction_table), 7)), index=cart_dist.index, columns=cols, dtype='f8')\n    new = new.astype(dict(zip(cols, dtypes)))\n    new.loc[(:, ['b', 'a', 'd'])] = construction_table\n    new.loc[(:, 'atom')] = cart_dist.loc[(:, 'atom')]\n    new.loc[(:, ['bond', 'angle', 'dihedral'])] = C_dist\n    return Zmat(new, _metadata={'last_valid_cartesian': cart_dist})", "docstring": "Apply the gradient for transformation to Zmatrix space onto cart_dist.\n\nArgs:\ngrad_C (:class:`numpy.ndarray`): A ``(3, n, n, 3)`` array.\nThe mathematical details of the index layout is explained in\n:meth:`~chemcoord.Cartesian.get_grad_zmat()`.\nconstruction_table (pandas.DataFrame): Explained in\n:meth:`~chemcoord.Cartesian.get_construction_table()`.\ncart_dist (:class:`~chemcoord.Cartesian`):\nDistortions in cartesian space.\n\nReturns:\n:class:`Zmat`: Distortions in Zmatrix space.", "source": "codesearchnet"}
{"code": "def diff_is_docstring_only(repo: Repo, branching_point: str, filename: str) -> bool:\n    folder = Path(repo.working_dir)\n    with checkout_commit(repo, branching_point):\n        with open(folder / filename, 'r', encoding='utf-8') as f:\n            old_content = f.read()\n    with open(folder / filename, 'r', encoding='utf-8') as f:\n        new_content = f.read()\n    old_content_clean = clean_code(old_content)\n    new_content_clean = clean_code(new_content)\n    return old_content_clean == new_content_clean", "docstring": "Check if the diff is only in docstrings (or comments and whitespace) in a filename.\n\nArgs:\nrepo (`git.Repo`): A git repository (for instance the Transformers repo).\nbranching_point (`str`): The commit reference of where to compare for the diff.\nfilename (`str`): The filename where we want to know if the diff isonly in docstrings/comments.\n\nReturns:\n`bool`: Whether the diff is docstring/comments only or not.", "source": "github-repos"}
{"code": "def load_region(adapter, case_id, hgnc_id=None, chrom=None, start=None, end=None):\n    if hgnc_id:\n        gene_obj = adapter.hgnc_gene(hgnc_id)\n        if (not gene_obj):\n            ValueError('Gene {} does not exist in database'.format(hgnc_id))\n        chrom = gene_obj['chromosome']\n        start = gene_obj['start']\n        end = gene_obj['end']\n    case_obj = adapter.case(case_id=case_id)\n    if (not case_obj):\n        raise ValueError('Case {} does not exist in database'.format(case_id))\n    log.info('Load clinical SNV variants for case: {0} region: chr {1}, start {2}, end {3}'.format(case_obj['_id'], chrom, start, end))\n    adapter.load_variants(case_obj=case_obj, variant_type='clinical', category='snv', chrom=chrom, start=start, end=end)\n    vcf_sv_file = case_obj['vcf_files'].get('vcf_sv')\n    if vcf_sv_file:\n        log.info('Load clinical SV variants for case: {0} region: chr {1}, start {2}, end {3}'.format(case_obj['_id'], chrom, start, end))\n        adapter.load_variants(case_obj=case_obj, variant_type='clinical', category='sv', chrom=chrom, start=start, end=end)\n    vcf_str_file = case_obj['vcf_files'].get('vcf_str')\n    if vcf_str_file:\n        log.info('Load clinical STR variants for case: {0} region: chr {1}, start {2}, end {3}'.format(case_obj['_id'], chrom, start, end))\n        adapter.load_variants(case_obj=case_obj, variant_type='clinical', category='str', chrom=chrom, start=start, end=end)\n    if case_obj['is_research']:\n        log.info('Load research SNV variants for case: {0} region: chr {1}, start {2}, end {3}'.format(case_obj['_id'], chrom, start, end))\n        adapter.load_variants(case_obj=case_obj, variant_type='research', category='snv', chrom=chrom, start=start, end=end)\n        vcf_sv_research = case_obj['vcf_files'].get('vcf_sv_research')\n        if vcf_sv_research:\n            log.info('Load research SV variants for case: {0} region: chr {1}, start {2}, end {3}'.format(case_obj['_id'], chrom, start, end))\n            adapter.load_variants(case_obj=case_obj, variant_type='research', category='sv', chrom=chrom, start=start, end=end)", "docstring": "Load all variants in a region defined by a HGNC id\n\nArgs:\nadapter (MongoAdapter)\ncase_id (str): Case id\nhgnc_id (int): If all variants from a gene should be uploaded\nchrom (str): If variants from coordinates should be uploaded\nstart (int): Start position for region\nend (int): Stop position for region", "source": "codesearchnet"}
{"code": "def _create_conversion_trie(strict):\n    \n    t = pygtrie.CharTrie()\n\n    for beta, uni in _map.BETACODE_MAP.items():\n        if strict:\n            t[beta] = uni\n        else:\n            \n            \n            \n            \n            diacritics = beta[1:]\n\n            perms = itertools.permutations(diacritics)\n            for perm in perms:\n                perm_str = beta[0] + ''.join(perm)\n                t[perm_str.lower()] = uni\n                t[perm_str.upper()] = uni\n\n    return t", "docstring": "Create the trie for betacode conversion.\n\nArgs:\ntext: The beta code text to convert. All of this text must be betacode.\nstrict: Flag to allow for flexible diacritic order on input.\n\nReturns:\nThe trie for conversion.", "source": "juraj-google-style"}
{"code": "def _create_checkable_action(self, text, conf_name, editorstack_method):\n        \n        def toogle(checked):\n            self.switch_to_plugin()\n            self._toggle_checkable_action(checked, editorstack_method,\n                                          conf_name)\n        action = create_action(self, text, toggled=toogle)\n        action.setChecked(CONF.get('editor', conf_name))\n        return action", "docstring": "Helper function to create a checkable action.\n\nArgs:\ntext (str): Text to be displayed in the action.\nconf_name (str): configuration setting associated with the action\neditorstack_method (str): name of EditorStack class that will be\nused to update the changes in each editorstack.", "source": "juraj-google-style"}
{"code": "def auto_docstring(obj=None, *, custom_intro=None, custom_args=None, checkpoint=None):\n\n    def auto_docstring_decorator(obj):\n        if len(obj.__qualname__.split('.')) > 1:\n            return auto_method_docstring(obj, custom_args=custom_args, custom_intro=custom_intro, checkpoint=checkpoint)\n        else:\n            return auto_class_docstring(obj, custom_args=custom_args, custom_intro=custom_intro, checkpoint=checkpoint)\n    if obj:\n        return auto_docstring_decorator(obj)\n    return auto_docstring_decorator", "docstring": "Automatically generates docstrings for classes and methods in the Transformers library.\n\nThis decorator can be used in the following forms:\n@auto_docstring\ndef my_function(...):\n...\nor\n@auto_docstring()\ndef my_function(...):\n...\nor\n@auto_docstring(custom_intro=\"Custom intro\", ...)\ndef my_function(...):\n...\n\nArgs:\ncustom_intro (str, optional): Custom introduction text to add to the docstring. This will replace the default\nintroduction text generated by the decorator before the Args section.\ncheckpoint (str, optional): Checkpoint name to use in the docstring. This should be automatically inferred from the\nmodel configuration class, but can be overridden if needed.", "source": "github-repos"}
{"code": "def get_conversion_factor(self, new_unit):\n        \n        uo_base, ofactor = self.as_base_units\n        un_base, nfactor = Unit(new_unit).as_base_units\n        units_new = sorted(un_base.items(),\n                           key=lambda d: _UNAME2UTYPE[d[0]])\n        units_old = sorted(uo_base.items(),\n                           key=lambda d: _UNAME2UTYPE[d[0]])\n        factor = ofactor / nfactor\n        for uo, un in zip(units_old, units_new):\n            if uo[1] != un[1]:\n                raise UnitError(\"Units %s and %s are not compatible!\" % (uo, un))\n            c = ALL_UNITS[_UNAME2UTYPE[uo[0]]]\n            factor *= (c[uo[0]] / c[un[0]]) ** uo[1]\n        return factor", "docstring": "Returns a conversion factor between this unit and a new unit.\nCompound units are supported, but must have the same powers in each\nunit type.\n\nArgs:\nnew_unit: The new unit.", "source": "juraj-google-style"}
{"code": "def adversary(self, name, owner=None, **kwargs):\n    return Adversary(self.tcex, name, owner=owner, **kwargs)", "docstring": "Create the Adversary TI object.\n\nArgs:\nowner:\nname:\n**kwargs:\n\nReturn:", "source": "codesearchnet"}
{"code": "def get_recipe(self, recipe_name):\n        \n        if recipe_name.endswith('.yaml'):\n            recipe = self._recipes.get(RecipeObject.FromFile(recipe_name, self._recipe_actions, self._recipe_resources).name)\n        else:\n            recipe = self._recipes.get(recipe_name)\n        if recipe is None:\n            raise RecipeNotFoundError(\"Could not find recipe\", recipe_name=recipe_name, known_recipes=[x for x in self._recipes.keys()])\n\n        return recipe", "docstring": "Get a recipe by name.\n\nArgs:\nrecipe_name (str): The name of the recipe to fetch. Can be either the\nyaml file name or the name of the recipe.", "source": "juraj-google-style"}
{"code": "def __init__(self, temperature=1.0, max_fine_history_length=512, max_fine_input_length=1024, n_fine_codebooks=8, **kwargs):\n    super().__init__(temperature=temperature)\n    self.max_fine_history_length = max_fine_history_length\n    self.max_fine_input_length = max_fine_input_length\n    self.n_fine_codebooks = n_fine_codebooks", "docstring": "Class that holds a generation configuration for [`BarkFineModel`].\n\n[`BarkFineModel`] is an autoencoder model, so should not usually be used for generation. However, under the\nhood, it uses `temperature` when used by [`BarkModel`]\n\nThis configuration inherit from [`GenerationConfig`] and can be used to control the model generation. Read the\ndocumentation from [`GenerationConfig`] for more information.\n\nArgs:\ntemperature (`float`, *optional*):\nThe value used to modulate the next token probabilities.\nmax_fine_history_length (`int`, *optional*, defaults to 512):\nMax length of the fine history vector.\nmax_fine_input_length (`int`, *optional*, defaults to 1024):\nMax length of fine input vector.\nn_fine_codebooks (`int`, *optional*, defaults to 8):\nNumber of codebooks used.", "source": "github-repos"}
{"code": "def handle_app_update(self, task_id, future, memo_cbk=False):\n    if (not self.tasks[task_id]['app_fu'].done()):\n        logger.error('Internal consistency error: app_fu is not done for task {}'.format(task_id))\n    if (not (self.tasks[task_id]['app_fu'] == future)):\n        logger.error('Internal consistency error: callback future is not the app_fu in task structure, for task {}'.format(task_id))\n    if (not memo_cbk):\n        self.memoizer.update_memo(task_id, self.tasks[task_id], future)\n        if (self.checkpoint_mode == 'task_exit'):\n            self.checkpoint(tasks=[task_id])\n    if (self.tasks[task_id]['app_fu'] and self.tasks[task_id]['app_fu'].done() and (self.tasks[task_id]['app_fu'].exception() is None) and (self.tasks[task_id]['executor'] != 'data_manager') and (self.tasks[task_id]['func_name'] != '_ftp_stage_in') and (self.tasks[task_id]['func_name'] != '_http_stage_in')):\n        for dfu in self.tasks[task_id]['app_fu'].outputs:\n            f = dfu.file_obj\n            if (isinstance(f, File) and f.is_remote()):\n                self.data_manager.stage_out(f, self.tasks[task_id]['executor'])\n    return", "docstring": "This function is called as a callback when an AppFuture\nis in its final state.\n\nIt will trigger post-app processing such as checkpointing\nand stageout.\n\nArgs:\ntask_id (string) : Task id\nfuture (Future) : The relevant app future (which should be\nconsistent with the task structure 'app_fu' entry\n\nKWargs:\nmemo_cbk(Bool) : Indicates that the call is coming from a memo update,\nthat does not require additional memo updates.", "source": "codesearchnet"}
{"code": "def handle(self, message, connection):\n        \n\n        handler = self._handlers.get((message.msgtype, message.revision))\n\n        if handler is None:\n            handler = self._handlers.get(message.msgtype)\n\n        if handler is None:\n            raise ProtocolError(\"%s not expected on server\" % message)\n\n        try:\n            work = yield handler(message, connection)\n        except Exception as e:\n            log.error(\"error handling message %r: %r\", message, e)\n            log.debug(\"  message header %r content %r\", message.header, message.content, exc_info=1)\n            work = connection.error(message, repr(e))\n        raise gen.Return(work)", "docstring": "Delegate a received message to the appropriate handler.\n\nArgs:\nmessage (Message) :\nThe message that was receive that needs to be handled\n\nconnection (ServerConnection) :\nThe connection that received this message\n\nRaises:\nProtocolError", "source": "juraj-google-style"}
{"code": "def _set_typeahead(cls, el, value):\n    PlaceholderHandler.reset_placeholder_dropdown(el)\n    if ((not value) and (not el.value)):\n        DropdownHandler.set_dropdown_glyph(el.id, 'glyphicon-alert')\n        return\n    if (len(value) == 1):\n        source = value[0]['source'].strip()\n        dropdown_el = DropdownHandler.set_dropdown_glyph(el.id, 'glyphicon-eye-open')\n        dropdown_content = \"<span class='gray_text'>&nbsp;(%s)</span>\"\n        if source:\n            dropdown_el.html = (dropdown_content % source[::(- 1)])\n        el.value = value[0]['val']\n        return\n    parent_id = el.parent.id\n    if ('typeahead' not in parent_id.lower()):\n        parent_id = el.parent.parent.id\n    if (parent_id in cls._set_by_typeahead):\n        window.destroy_typeahead_tag(('\n    window.make_typeahead_tag(('\n    DropdownHandler.set_dropdown_glyph(el.id, 'glyphicon-menu-down')\n    PlaceholderHandler.set_placeholder_dropdown(el)\n    cls._set_by_typeahead.add(parent_id)", "docstring": "Convert given `el` to typeahead input and set it to `value`.\n\nThis method also sets the dropdown icons and descriptors.\n\nArgs:\nel (obj): Element reference to the input you want to convert to\ntypeahead.\nvalue (list): List of dicts with two keys: ``source`` and ``val``.", "source": "codesearchnet"}
{"code": "def describe_file(module):\n    descriptor = FileDescriptor()\n    descriptor.package = util.get_package_for_module(module)\n    if (not descriptor.package):\n        descriptor.package = None\n    message_descriptors = []\n    enum_descriptors = []\n    for name in sorted(dir(module)):\n        value = getattr(module, name)\n        if isinstance(value, type):\n            if issubclass(value, messages.Message):\n                message_descriptors.append(describe_message(value))\n            elif issubclass(value, messages.Enum):\n                enum_descriptors.append(describe_enum(value))\n    if message_descriptors:\n        descriptor.message_types = message_descriptors\n    if enum_descriptors:\n        descriptor.enum_types = enum_descriptors\n    return descriptor", "docstring": "Build a file from a specified Python module.\n\nArgs:\nmodule: Python module to describe.\n\nReturns:\nInitialized FileDescriptor instance describing the module.", "source": "codesearchnet"}
{"code": "def _translate_name(name):\n        \n        underscored = inflection.underscore(name)\n        dasherized = inflection.dasherize(underscored)\n        words = dasherized.split('-')\n        last_word = words.pop()\n        words.append(inflection.pluralize(last_word))\n        return '-'.join(words)", "docstring": "Translate the class name to the API endpoint.\n\nFor example, Car would become cars, FastCar would become fast-cars.\n\nArgs:\nname (string): Camel case name (singular)\n\nReturns:\nstring: A pluraised, dasherized string.", "source": "juraj-google-style"}
{"code": "def intrusion_sets(self, name, owner=None, **kwargs):\n    return IntrusionSet(self.tcex, name, owner=owner, **kwargs)", "docstring": "Create the Intrustion Set TI object.\n\nArgs:\nowner:\nname:\n**kwargs:\n\nReturn:", "source": "codesearchnet"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. MVP does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def _ReadSupportedOS(self, definition_values, definition_object, name):\n    supported_os = definition_values.get('supported_os', [])\n    if (not isinstance(supported_os, list)):\n        raise errors.FormatError('Invalid supported_os type: {0!s}'.format(type(supported_os)))\n    undefined_supported_os = set(supported_os).difference(self.supported_os)\n    if undefined_supported_os:\n        error_string = 'Artifact definition: {0:s} undefined supported operating system: {1:s}.'.format(name, ', '.join(undefined_supported_os))\n        raise errors.FormatError(error_string)\n    definition_object.supported_os = supported_os", "docstring": "Reads the optional artifact or source type supported OS.\n\nArgs:\ndefinition_values (dict[str, object]): artifact definition values.\ndefinition_object (ArtifactDefinition|SourceType): the definition object.\nname (str): name of the artifact definition.\n\nRaises:\nFormatError: if there are undefined supported operating systems.", "source": "codesearchnet"}
{"code": "def ddot(L, R, left=None, out=None):\n    r\n    L = asarray(L, float)\n    R = asarray(R, float)\n    if left is None:\n        ok = min(L.ndim, R.ndim) == 1 and max(L.ndim, R.ndim) == 2\n        if not ok:\n            msg = \"Wrong array layout. One array should have\"\n            msg += \" ndim=1 and the other one ndim=2.\"\n            raise ValueError(msg)\n        left = L.ndim == 1\n    if left:\n        if out is None:\n            out = copy(R)\n        L = L.reshape(list(L.shape) + [1] * (R.ndim - 1))\n        return multiply(L, R, out=out)\n    else:\n        if out is None:\n            out = copy(L)\n        return multiply(L, R, out=out)", "docstring": "r\"\"\"Dot product of a matrix and a diagonal one.\n\nArgs:\nL (array_like): Left matrix.\nR (array_like): Right matrix.\nout (:class:`numpy.ndarray`, optional): copy result to.\n\nReturns:\n:class:`numpy.ndarray`: Resulting matrix.", "source": "juraj-google-style"}
{"code": "def load_module_functions(module):\n    module_functions = {}\n    for (name, item) in vars(module).items():\n        if validator.is_function(item):\n            module_functions[name] = item\n    return module_functions", "docstring": "load python module functions.\n\nArgs:\nmodule: python module\n\nReturns:\ndict: functions mapping for specified python module\n\n{\n\"func1_name\": func1,\n\"func2_name\": func2\n}", "source": "codesearchnet"}
{"code": "def ExpandGlobs(path, opts = None):\n  \n  precondition.AssertType(path, Text)\n  if not path:\n    raise ValueError(\"Path is empty\")\n\n  if not _IsAbsolutePath(path, opts):\n    raise ValueError(\"Path '%s' is not absolute\" % path)\n\n  if opts is not None and opts.pathtype == rdf_paths.PathSpec.PathType.REGISTRY:\n    \n    root_dir, tail = path.replace(\"\\\\\", \"/\").lstrip(\"/\").split(\"/\", 1)\n    components = list(ParsePath(tail, opts=opts))\n  else:\n    drive, tail = os.path.splitdrive(path)\n    root_dir = os.path.join(drive, os.path.sep).upper()\n    components = list(ParsePath(tail[1:], opts=opts))\n\n  return _ExpandComponents(root_dir, components)", "docstring": "Performs glob expansion on a given path.\n\nPath can contain regular glob elements (such as `**`, `*`, `?`, `[a-z]`). For\nexample, having files `foo`, `bar`, `baz` glob expansion of `ba?` will yield\n`bar` and `baz`.\n\nArgs:\npath: A path to expand.\nopts: A `PathOpts` object.\n\nReturns:\nGenerator over all possible glob expansions of a given path.\n\nRaises:\nValueError: If given path is empty or relative.", "source": "juraj-google-style"}
{"code": "def create_sys_dsn(driver: str, **kw) -> bool:\n    \n    attributes = []  \n    for attr in kw.keys():\n        attributes.append(\"%s=%s\" % (attr, kw[attr]))\n    return bool(\n        ctypes.windll.ODBCCP32.SQLConfigDataSource(0, ODBC_ADD_SYS_DSN,\n                                                   driver,\n                                                   nul.join(attributes))\n    )", "docstring": "(Windows only.)\nCreate a system ODBC data source name (DSN).\n\nArgs:\ndriver: ODBC driver name\nkw: Driver attributes\n\nReturns:\nbool: was the DSN created?", "source": "juraj-google-style"}
{"code": "def __call__(self, artist, genres, lyrics='', return_tensors='pt') -> BatchEncoding:\n    input_ids = [0, 0, 0]\n    artist = [artist] * len(self.version)\n    genres = [genres] * len(self.version)\n    artists_tokens, genres_tokens, lyrics_tokens = self.tokenize(artist, genres, lyrics)\n    artists_id, genres_ids, full_tokens = self._convert_token_to_id(artists_tokens, genres_tokens, lyrics_tokens)\n    attention_masks = [-INFINITY] * len(full_tokens[-1])\n    input_ids = [self.convert_to_tensors([input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]], tensor_type=return_tensors) for i in range(len(self.version))]\n    return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks})", "docstring": "Convert the raw string to a list of token ids\n\nArgs:\nartist (`str`):\nName of the artist.\ngenres (`str`):\nList of genres that will be mixed to condition the audio\nlyrics (`str`, *optional*, defaults to `\"\"`):\nLyrics used to condition the generation", "source": "github-repos"}
{"code": "def get_license_from_url(url):\n    if (not url):\n        return\n    split_url = urlsplit(url, scheme='http')\n    if (split_url.netloc.lower() == 'creativecommons.org'):\n        if ('publicdomain' in split_url.path):\n            match = _RE_PUBLIC_DOMAIN_URL.match(split_url.path)\n            if (match is None):\n                license = ['public domain']\n            else:\n                license = ['CC0']\n                license.extend((part for part in match.groups() if part))\n        else:\n            license = ['CC']\n            match = _RE_LICENSE_URL.match(split_url.path)\n            license.extend((part.upper() for part in match.groups() if part))\n    elif (split_url.netloc == 'arxiv.org'):\n        license = ['arXiv']\n        match = _RE_LICENSE_URL.match(split_url.path)\n        license.extend((part for part in match.groups() if part))\n    else:\n        raise ValueError('Unknown license URL')\n    return u' '.join(license)", "docstring": "Get the license abbreviation from an URL.\n\nArgs:\nurl(str): canonical url of the license.\n\nReturns:\nstr: the corresponding license abbreviation.\n\nRaises:\nValueError: when the url is not recognized", "source": "codesearchnet"}
{"code": "def load_default_traditional_chinese_parser() -> Parser:\n    with open(os.path.join(MODEL_DIR, 'zh-hant.json'), encoding='utf-8') as f:\n        model = json.load(f)\n    return Parser(model)", "docstring": "Loads a parser equipped with the default Traditional Chinese model.\n\nReturns:\nA parser (:obj:`budoux.Parser`).", "source": "github-repos"}
{"code": "def filter_segs(self, segs):\n        \n        def whole_seg(seg):\n            m = self.seg_regex.match(seg)\n            if m and m.group(0) == seg:\n                return True\n            else:\n                return False\n        return list(filter(whole_seg, segs))", "docstring": "Given list of strings, return only those which are valid segments.\n\nArgs:\nsegs (list): list of unicode values\n\nReturns:\nlist: values in `segs` that are valid segments (according to the\ndefinititions of bases and diacritics/modifiers known to the\nobject", "source": "juraj-google-style"}
{"code": "def snake_to_camel(name):\n    ret = ''.join((x.title() for x in name.split('_')))\n    ret = (ret[0].lower() + ret[1:])\n    return ret", "docstring": "Takes a snake_field_name and returns a camelCaseFieldName\n\nArgs:\nname (str): E.g. snake_field_name or SNAKE_FIELD_NAME\n\nReturns:\nstr: camelCase converted name. E.g. capsFieldName", "source": "codesearchnet"}
{"code": "def get_referenced_object_as_list(prev_obj, obj, dot_separated_name, desired_type=None):\n    res = get_referenced_object(prev_obj, obj, dot_separated_name, desired_type)\n    if (res is None):\n        return []\n    elif (type(res) is list):\n        return res\n    else:\n        return [res]", "docstring": "Same as get_referenced_object, but always returns a list.\n\nArgs:\nprev_obj: see get_referenced_object\nobj: see get_referenced_object\ndot_separated_name: see get_referenced_object\ndesired_type: see get_referenced_object\n\nReturns:\nsame as get_referenced_object, but always returns a list", "source": "codesearchnet"}
{"code": "def market_if_touched_replace(self, accountID, orderID, **kwargs):\n        \n        return self.replace(\n            accountID,\n            orderID,\n            order=MarketIfTouchedOrderRequest(**kwargs)\n        )", "docstring": "Shortcut to replace a pending MarketIfTouched Order in an Account\n\nArgs:\naccountID : The ID of the Account\norderID : The ID of the MarketIfTouched Order to replace\nkwargs : The arguments to create a MarketIfTouchedOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "juraj-google-style"}
{"code": "def split_instance_route(self, route: 'InstanceRoute') -> Optional[Tuple[('InstanceRoute', 'InstanceRoute')]]:\n    sroute = []\n    sn = self\n    while sn:\n        sroute.append(sn.iname())\n        sn = sn.data_parent()\n    i = 0\n    while True:\n        if (not sroute):\n            break\n        inst = sroute.pop()\n        if (inst != route[i].iname()):\n            return None\n        while True:\n            i += 1\n            if ((i >= len(route)) or isinstance(route[i], MemberName)):\n                break\n        if (not sroute):\n            return (InstanceRoute(route[:i]), InstanceRoute(route[i:]))\n        if (i >= len(route)):\n            return None", "docstring": "Split `route` into the part up to receiver and the rest.\n\nArgs:\nroute: Absolute instance route (the receiver should correspond to an\ninstance node on this route).\n\nReturns:\nA tuple consisting of\n- the part of `route` from the root up to and including the\ninstance whose schema node is the receiver, and\n- the rest of `route`.\n``None`` is returned if the receiver is not on the route.", "source": "codesearchnet"}
{"code": "def build_frontend(self, frontend_node):\n        \n        proxy_name = frontend_node.frontend_header.proxy_name.text\n        service_address_node = frontend_node.frontend_header.service_address\n\n        \n        config_block_lines = self.__build_config_block(\n            frontend_node.config_block)\n\n        \n        host, port = '', ''\n        if isinstance(service_address_node, pegnode.ServiceAddress):\n            host = service_address_node.host.text\n            port = service_address_node.port.text\n        else:\n            \n            \n            for line in config_block_lines:\n                if isinstance(line, config.Bind):\n                    host, port = line.host, line.port\n                    break\n            else:\n                raise Exception(\n                    'Not specify host and port in `frontend` definition')\n        return config.Frontend(\n            name=proxy_name, host=host, port=port,\n            config_block=config_block_lines)", "docstring": "parse `frontend` sections, and return a config.Frontend\n\nArgs:\nfrontend_node (TreeNode): Description\n\nRaises:\nException: Description\n\nReturns:\nconfig.Frontend: an object", "source": "juraj-google-style"}
{"code": "def query(self, s):\n        \n        s1 = np.sort([self.order[token] for token in s if token in self.order])\n        logging.debug(\"{} original tokens and {} tokens after applying \"\n            \"frequency order.\".format(len(s), len(s1)))\n        prefix = self._get_prefix(s1)\n        candidates = set([i for p1, token in enumerate(prefix)\n                for i, p2 in self.index[token]\n                if self.position_filter_func(s1, self.sets[i], p1, p2,\n                    self.similarity_threshold)])\n        logging.debug(\"{} candidates found.\".format(len(candidates)))\n        results = deque([])\n        for i in candidates:\n            s2 = self.sets[i]\n            sim = self.similarity_func(s1, s2)\n            if sim < self.similarity_threshold:\n                continue\n            results.append((i, sim))\n        logging.debug(\"{} verified sets found.\".format(len(results)))\n        return list(results)", "docstring": "Query the search index for sets similar to the query set.\n\nArgs:\ns (Iterable): the query set.\n\nReturns (list): a list of tuples `(index, similarity)` where the index\nis the index of the matching sets in the original list of sets.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n                 coupling_map,\n                 initial_layout=None,\n                 trials=20,\n                 seed=None):\n        \n        super().__init__()\n        self.coupling_map = coupling_map\n        self.initial_layout = initial_layout\n        self.trials = trials\n        self.seed = seed\n\n        self.requires.append(BarrierBeforeFinalMeasurements())", "docstring": "Maps a DAGCircuit onto a `coupling_map` using swap gates.\nArgs:\ncoupling_map (CouplingMap): Directed graph represented a coupling map.\ninitial_layout (Layout): initial layout of qubits in mapping\ntrials (int): the number of attempts the randomized algorithm makes.\nseed (int): initial seed.", "source": "juraj-google-style"}
{"code": "def typecheck(fn):\n    is_compiled = False\n    if hasattr(fn, '__wrapped__'):\n        signature_fn = fn.__wrapped__\n        if hasattr(signature_fn, 'is_tp_compiled'):\n            is_compiled = getattr(signature_fn, 'is_tp_compiled')\n    else:\n        signature_fn = fn\n    signature = inspect.signature(signature_fn)\n\n    @wraps(fn)\n    def wrapper(*args, **kwargs):\n        try:\n            all_args = signature.bind(*args, **kwargs)\n            for arg_key, arg_value in all_args.arguments.items():\n                trace = _Trace().add_context(f'When checking the argument \"{arg_key}\" of function \"{fn.__name__}\".')\n                if arg_key not in signature.parameters:\n                    raise ValueError(f'Unexpected argument \"{arg_key}\"')\n                param = signature.parameters[arg_key]\n                if param.kind in [inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD]:\n                    _check_annotation(trace, is_compiled, arg_value, param.annotation)\n                elif param.kind is inspect.Parameter.VAR_POSITIONAL:\n                    _check_annotation_list_or_set_or_uniform_tuple(trace, is_compiled, arg_value, [param.annotation])\n                elif param.kind is inspect.Parameter.VAR_KEYWORD:\n                    for sub_key, sub_value in arg_value.items():\n                        _check_annotation(_Trace().add_context(f'When checking the key \"{sub_key}\" of argument \"{arg_key}\" of function \"{fn.__name__}\".'), is_compiled, sub_value, param.annotation)\n        except ValueError as e:\n            if _ERROR_RAISES_EXCEPTION:\n                e.__traceback__ = None\n                raise e\n            else:\n                logging.warning('%s', str(e))\n        output = fn(*args, **kwargs)\n        try:\n            trace = _Trace().add_context(f'When checking the returned value of function \"{fn.__name__}\".')\n            _check_annotation(trace, is_compiled, output, signature.return_annotation)\n        except ValueError as e:\n            if _ERROR_RAISES_EXCEPTION:\n                e.__traceback__ = None\n                raise e\n            else:\n                logging.warning('%s', str(e))\n        return output\n    setattr(wrapper, '_typecheck', True)\n    return wrapper", "docstring": "Annotation that check the arguments and outputs of a function at runtime.\n\n@typecheck checks, at runtime, that the type hints of the arguments and output\nof a function are satisfied.\n\nUsage example:\n```python\n@typecheck\ndef f(a, b: int, c: str = \"aze\") -> List[str]:\nreturn [\"hello\", \"world\"]\n\nf(1, 2, \"a\") # Ok\nf(1, 2, 3) # Fails\n```\n\nIf combined with @compile, @typecheck should be applied after @compile (i.e.\nplace @compile just below @typecheck in the code).\n\nThis code only support what is required by Temporian API.\n\nDoes not support typing.GenericTypeAlias e.g. list[int]. Use List[int]\ninstead.\n\nArgs:\nfn: Function to instrument.\n\nReturns:\nInstrumented function.", "source": "github-repos"}
{"code": "def __call__(self, func):\n        \n        if not hasattr(func, \"parser\"):\n            _LOG.debug(\"Creating parser for '%s'%s\", func.__name__,\n                       \"/%s\" % self._name if self._name else \"\")\n            (func_args, _, _, defaults) = getargspec(func)\n            self._types, func_args = _check_types(func.__name__, self._types,\n                                                  func_args, defaults)\n            args_and_defaults = _get_args_and_defaults(func_args, defaults)\n            parser = _get_arg_parser(func, self._types, args_and_defaults,\n                                     self._delimiter_chars)\n            parser.get_name = lambda: self._name\n            func.parser = parser\n            func.parser.call = _get_parser_call_method(func)\n\n        @wraps(func)\n        def decorated(*args, **kwargs):\n            return func(*args, **kwargs)\n        return decorated", "docstring": "Add an argument parser attribute `parser` to the decorated function.\n\nArgs:\nfunc: the function for which we want to create an argument parser", "source": "juraj-google-style"}
{"code": "def call(self, inputs, state):\n    _check_rnn_cell_input_dtypes([inputs, state])\n    sigmoid = math_ops.sigmoid\n    one = constant_op.constant(1, dtype=dtypes.int32)\n    if self._state_is_tuple:\n        c, h = state\n    else:\n        c, h = array_ops.split(value=state, num_or_size_splits=2, axis=one)\n    gate_inputs = math_ops.matmul(array_ops.concat([inputs, h], 1), self._kernel)\n    gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)\n    i, j, f, o = array_ops.split(value=gate_inputs, num_or_size_splits=4, axis=one)\n    forget_bias_tensor = constant_op.constant(self._forget_bias, dtype=f.dtype)\n    add = math_ops.add\n    multiply = math_ops.multiply\n    new_c = add(multiply(c, sigmoid(add(f, forget_bias_tensor))), multiply(sigmoid(i), self._activation(j)))\n    new_h = multiply(self._activation(new_c), sigmoid(o))\n    if self._state_is_tuple:\n        new_state = LSTMStateTuple(new_c, new_h)\n    else:\n        new_state = array_ops.concat([new_c, new_h], 1)\n    return (new_h, new_state)", "docstring": "Long short-term memory cell (LSTM).\n\nArgs:\ninputs: `2-D` tensor with shape `[batch_size, input_size]`.\nstate: An `LSTMStateTuple` of state tensors, each shaped `[batch_size,\nnum_units]`, if `state_is_tuple` has been set to `True`.  Otherwise, a\n`Tensor` shaped `[batch_size, 2 * num_units]`.\n\nReturns:\nA pair containing the new hidden state, and the new state (either a\n`LSTMStateTuple` or a concatenated state, depending on\n`state_is_tuple`).", "source": "github-repos"}
{"code": "def _num_image_tokens(image_size: Tuple[int, int], patch_size: Tuple[int, int]) -> int:\n    height, width = image_size\n    patch_height, patch_width = patch_size if isinstance(patch_size, (tuple, list)) else (patch_size, patch_size)\n    num_width_tokens = (width - 1) \n    num_height_tokens = (height - 1) \n    return (num_height_tokens, num_width_tokens)", "docstring": "Calculate the number of image tokens given the image size and patch size.\n\nArgs:\nimage_size (`Tuple[int, int]`):\nThe size of the image as `(height, width)`.\npatch_size (`Tuple[int, int]`):\nThe patch size as `(height, width)`.\n\nReturns:\n`int`: The number of image tokens.", "source": "github-repos"}
{"code": "def get_events_for_blocks(self, blocks, subscriptions):\n        \n\n        events = []\n        for blkw in blocks:\n            events.extend(self.get_events_for_block(blkw, subscriptions))\n        return events", "docstring": "Get a list of events associated with all the blocks.\n\nArgs:\nblocks (list of BlockWrapper): The blocks to search for events that\nmatch each subscription.\nsubscriptions (list of EventSubscriptions): EventFilter and\nevent type to filter events.\n\nReturns (list of Events): The Events associated which each block id.\n\nRaises:\nKeyError A receipt is missing from the receipt store.", "source": "juraj-google-style"}
{"code": "def _retrieve_endpoint(self, endpoint_id: str, location: str, is_private: bool) -> aiplatform.Endpoint:\n    if is_private:\n        endpoint: aiplatform.Endpoint = aiplatform.PrivateEndpoint(endpoint_name=endpoint_id, location=location)\n        LOGGER.debug('Treating endpoint %s as private', endpoint_id)\n    else:\n        endpoint = aiplatform.Endpoint(endpoint_name=endpoint_id, location=location)\n        LOGGER.debug('Treating endpoint %s as public', endpoint_id)\n    try:\n        mod_list = endpoint.list_models()\n    except Exception as e:\n        raise ValueError('Failed to contact endpoint %s, got exception: %s', endpoint_id, e)\n    if len(mod_list) == 0:\n        raise ValueError('Endpoint %s has no models deployed to it.', endpoint_id)\n    return endpoint", "docstring": "Retrieves an AI Platform endpoint and queries it for liveness/deployed\nmodels.\n\nArgs:\nendpoint_id: the numerical ID of the Vertex AI endpoint to retrieve.\nis_private: a boolean indicating if the Vertex AI endpoint is a private\nendpoint\nReturns:\nAn aiplatform.Endpoint object\nRaises:\nValueError: if endpoint is inactive or has no models deployed to it.", "source": "github-repos"}
{"code": "def build_request_relationship(type, ids):\n    if (ids is None):\n        return {'data': None}\n    elif isinstance(ids, str):\n        return {'data': {'id': ids, 'type': type}}\n    else:\n        return {'data': [{'id': id, 'type': type} for id in ids]}", "docstring": "Build a relationship list.\n\nA relationship list is used to update relationships between two\nresources. Setting sensors on a label, for example, uses this\nfunction to construct the list of sensor ids to pass to the Helium\nAPI.\n\nArgs:\n\ntype(string): The resource type for the ids in the relationship\nids([uuid] or uuid): Just one or a list of resource uuids to use\nin the relationship\n\nReturns:\n\nA ready to use relationship JSON object.", "source": "codesearchnet"}
{"code": "def unbind(self, devices_to_unbind):\n        \n        if self.entity_api_key == \"\":\n            return {'status': 'failure', 'response': 'No API key found in request'}\n        url = self.base_url + \"api/0.1.0/subscribe/unbind\"\n        headers = {\"apikey\": self.entity_api_key}\n        data = {\n            \"exchange\": \"amq.topic\",\n            \"keys\": devices_to_unbind,\n            \"queue\": self.entity_id\n        }\n\n        with self.no_ssl_verification():\n            r = requests.delete(url, json=data, headers=headers)\n            print(r)\n        response = dict()\n        if \"No API key\" in str(r.content.decode(\"utf-8\")):\n            response[\"status\"] = \"failure\"\n            r = json.loads(r.content.decode(\"utf-8\"))['message']\n        elif 'unbind' in str(r.content.decode(\"utf-8\")):\n            response[\"status\"] = \"success\"\n            r = r.content.decode(\"utf-8\")\n        else:\n            response[\"status\"] = \"failure\"\n            r = r.content.decode(\"utf-8\")\n        response[\"response\"] = str(r)\n        return response", "docstring": "This function allows an entity to unbound devices that are already bound.\n\nArgs:\ndevices_to_unbind (list): an array of devices that are to be unbound ( stop listening)\nExample unbind([\"test10\",\"testDemo105\"])", "source": "juraj-google-style"}
{"code": "def _add_new_tf_operations(self, compute_devices=True) -> list['Operation']:\n    self._check_not_finalized()\n    new_ops = [self._create_op_from_tf_operation(c_op, compute_device=compute_devices) for c_op in self.new_operations()]\n    for op in new_ops:\n        new_control_inputs = self._control_dependencies_for_inputs(op.inputs)\n        op._add_control_inputs(new_control_inputs)\n        op._control_flow_post_processing()\n    return new_ops", "docstring": "Creates `Operations` in this graph for any new TF_Operations.\n\nThis is useful for when TF_Operations are indirectly created by the C API\noutside of the Operation constructor (e.g. by TF_ImportGraphDef,\nTF_FinishWhile). This ensures there are corresponding Operations for all\nTF_Operations in the underlying TF_Graph.\n\nArgs:\ncompute_devices: (Optional.) If True, device functions will be executed to\ncompute the device properties of each new Operation.\n\nReturns:\nA list of the new `Operation` objects.", "source": "github-repos"}
{"code": "def get_coordinate_offset(self, other_reading):\n        \n        my_x, my_y = self.reference_source_point\n        other_x, other_y = other_reading.reference_source_point\n        return my_x - other_x, my_y - other_y", "docstring": "Calculates the offsets between readings' coordinate systems.\n\nArgs:\nother_reading: ossos.astrom.SourceReading\nThe reading to compare coordinate systems with.\n\nReturns:\n(offset_x, offset_y):\nThe x and y offsets between this reading and the other reading's\ncoordinate systems.", "source": "juraj-google-style"}
{"code": "def sparse_slice(sp_input, start, size, name=None):\n    sp_input = _convert_to_sparse_tensor(sp_input)\n    start = ops.convert_to_tensor(start, dtypes.int64)\n    size = ops.convert_to_tensor(size, dtypes.int64)\n    with ops.name_scope(name, 'SparseSlice', [sp_input]) as name:\n        output_indices, output_values, output_shape = gen_sparse_ops.sparse_slice(sp_input.indices, sp_input.values, sp_input.dense_shape, start, size, name=name)\n        return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)", "docstring": "Slice a `SparseTensor` based on the `start` and `size`.\n\nFor example, if the input is\n\ninput_tensor = shape = [2, 7]\n[    a   d e  ]\n[b c          ]\n\nGraphically the output tensors are:\n\nsparse.slice([0, 0], [2, 4]) = shape = [2, 4]\n[    a  ]\n[b c    ]\n\nsparse.slice([0, 4], [2, 3]) = shape = [2, 3]\n[ d e  ]\n[      ]\n\nArgs:\nsp_input: The `SparseTensor` to split.\nstart: 1-D. tensor represents the start of the slice.\nsize: 1-D. tensor represents the size of the slice.\nname: A name for the operation (optional).\n\nReturns:\nA `SparseTensor` objects resulting from splicing.\n\nRaises:\nTypeError: If `sp_input` is not a `SparseTensor`.", "source": "github-repos"}
{"code": "def tan(x):\n    if any_symbolic_tensors((x,)):\n        return Tan().symbolic_call(x)\n    return backend.numpy.tan(x)", "docstring": "Compute tangent, element-wise.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput tensor of same shape as `x`.", "source": "github-repos"}
{"code": "def get_msd_plot(self, plt=None, mode=\"specie\"):\n        \n        from pymatgen.util.plotting import pretty_plot\n        plt = pretty_plot(12, 8, plt=plt)\n        if np.max(self.dt) > 100000:\n            plot_dt = self.dt / 1000\n            unit = 'ps'\n        else:\n            plot_dt = self.dt\n            unit = 'fs'\n\n        if mode == \"species\":\n            for sp in sorted(self.structure.composition.keys()):\n                indices = [i for i, site in enumerate(self.structure) if\n                           site.specie == sp]\n                sd = np.average(self.sq_disp_ions[indices, :], axis=0)\n                plt.plot(plot_dt, sd, label=sp.__str__())\n            plt.legend(loc=2, prop={\"size\": 20})\n        elif mode == \"sites\":\n            for i, site in enumerate(self.structure):\n                sd = self.sq_disp_ions[i, :]\n                plt.plot(plot_dt, sd, label=\"%s - %d\" % (\n                    site.specie.__str__(), i))\n            plt.legend(loc=2, prop={\"size\": 20})\n        elif mode == \"mscd\":\n            plt.plot(plot_dt, self.mscd, 'r')\n            plt.legend([\"Overall\"], loc=2, prop={\"size\": 20})\n        else:\n            \n            plt.plot(plot_dt, self.msd, 'k')\n            plt.plot(plot_dt, self.msd_components[:, 0], 'r')\n            plt.plot(plot_dt, self.msd_components[:, 1], 'g')\n            plt.plot(plot_dt, self.msd_components[:, 2], 'b')\n            plt.legend([\"Overall\", \"a\", \"b\", \"c\"], loc=2, prop={\"size\": 20})\n\n        plt.xlabel(\"Timestep ({})\".format(unit))\n        if mode == \"mscd\":\n            plt.ylabel(\"MSCD ($\\\\AA^2$)\")\n        else:\n            plt.ylabel(\"MSD ($\\\\AA^2$)\")\n        plt.tight_layout()\n        return plt", "docstring": "Get the plot of the smoothed msd vs time graph. Useful for\nchecking convergence. This can be written to an image file.\n\nArgs:\nplt: A plot object. Defaults to None, which means one will be\ngenerated.\nmode (str): Determines type of msd plot. By \"species\", \"sites\",\nor direction (default). If mode = \"mscd\", the smoothed mscd vs.\ntime will be plotted.", "source": "juraj-google-style"}
{"code": "def mark_streamer(self, index):\n    self._logger.debug('Marking streamer %d manually', index)\n    if (index >= len(self.streamers)):\n        raise ArgumentError('Invalid streamer index', index=index, num_streamers=len(self.streamers))\n    self._manually_triggered_streamers.add(index)", "docstring": "Manually mark a streamer that should trigger.\n\nThe next time check_streamers is called, the given streamer will be\nmanually marked that it should trigger, which will cause it to trigger\nunless it has no data.\n\nArgs:\nindex (int): The index of the streamer that we should mark as\nmanually triggered.\n\nRaises:\nArgumentError: If the streamer index is invalid.", "source": "codesearchnet"}
{"code": "def __init__(self, lower=True, num_norm=True,\n                 use_char=True, initial_vocab=None):\n        \n        self._num_norm = num_norm\n        self._use_char = use_char\n        self._word_vocab = Vocabulary(lower=lower)\n        self._char_vocab = Vocabulary(lower=False)\n        self._label_vocab = Vocabulary(lower=False, unk_token=False)\n\n        if initial_vocab:\n            self._word_vocab.add_documents([initial_vocab])\n            self._char_vocab.add_documents(initial_vocab)", "docstring": "Create a preprocessor object.\n\nArgs:\nlower: boolean. Whether to convert the texts to lowercase.\nuse_char: boolean. Whether to use char feature.\nnum_norm: boolean. Whether to normalize text.\ninitial_vocab: Iterable. Initial vocabulary for expanding word_vocab.", "source": "juraj-google-style"}
{"code": "def get_correct_answer(question, default=None, required=False,\n                       answer=None, is_answer_correct=None):\n    u\n    while 1:\n        if default is None:\n            msg = u' - No Default Available'\n        else:\n            msg = (u'\\n[DEFAULT] -> {}\\nPress Enter To '\n                   u'Use Default'.format(default))\n        prompt = question + msg + u'\\n--> '\n        if answer is None:\n            answer = six.moves.input(prompt)\n        if answer == '' and required and default is not None:\n            print(u'You have to enter a value\\n\\n')\n            six.moves.input(u'Press enter to continue')\n            print(u'\\n\\n')\n            answer = None\n            continue\n        if answer == u'' and default is not None:\n            answer = default\n        _ans = ask_yes_no(u'You entered {}, is this '\n                          u'correct?'.format(answer),\n                          answer=is_answer_correct)\n        if _ans:\n            return answer\n        else:\n            answer = None", "docstring": "u\"\"\"Ask user a question and confirm answer\n\nArgs:\n\nquestion (str): Question to ask user\n\ndefault (str): Default answer if no input from user\n\nrequired (str): Require user to input answer\n\nanswer (str): Used for testing\n\nis_answer_correct (str): Used for testing", "source": "juraj-google-style"}
{"code": "def _check_required_fields(self, object_type, ignore_fields):\n        \n        \n        for field in self.configuration[object_type]['required_fields']:\n            if field not in self.data and field not in ignore_fields:\n                raise HDXError('Field %s is missing in %s!' % (field, object_type))", "docstring": "Helper method to check that metadata for HDX object is complete\n\nArgs:\nignore_fields (List[str]): Any fields to ignore in the check\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def as_check_request(self, timer=datetime.utcnow):\n    if (not self.service_name):\n        raise ValueError(u'the service name must be set')\n    if (not self.operation_id):\n        raise ValueError(u'the operation id must be set')\n    if (not self.operation_name):\n        raise ValueError(u'the operation name must be set')\n    op = super(Info, self).as_operation(timer=timer)\n    labels = {}\n    if self.android_cert_fingerprint:\n        labels[_KNOWN_LABELS.SCC_ANDROID_CERT_FINGERPRINT.label_name] = self.android_cert_fingerprint\n    if self.android_package_name:\n        labels[_KNOWN_LABELS.SCC_ANDROID_PACKAGE_NAME.label_name] = self.android_package_name\n    if self.client_ip:\n        labels[_KNOWN_LABELS.SCC_CALLER_IP.label_name] = self.client_ip\n    if self.ios_bundle_id:\n        labels[_KNOWN_LABELS.SCC_IOS_BUNDLE_ID.label_name] = self.ios_bundle_id\n    if self.referer:\n        labels[_KNOWN_LABELS.SCC_REFERER.label_name] = self.referer\n    labels[_KNOWN_LABELS.SCC_SERVICE_AGENT.label_name] = SERVICE_AGENT\n    labels[_KNOWN_LABELS.SCC_USER_AGENT.label_name] = USER_AGENT\n    op.labels = encoding.PyValueToMessage(sc_messages.Operation.LabelsValue, labels)\n    check_request = sc_messages.CheckRequest(operation=op)\n    return sc_messages.ServicecontrolServicesCheckRequest(serviceName=self.service_name, checkRequest=check_request)", "docstring": "Makes a `ServicecontrolServicesCheckRequest` from this instance\n\nReturns:\na ``ServicecontrolServicesCheckRequest``\n\nRaises:\nValueError: if the fields in this instance are insufficient to\nto create a valid ``ServicecontrolServicesCheckRequest``", "source": "codesearchnet"}
{"code": "def GetBalance(self, wallet, address, as_string=False):\n    addr = PromptUtils.parse_param(address, wallet)\n    if isinstance(addr, UInt160):\n        addr = addr.Data\n    sb = ScriptBuilder()\n    sb.EmitAppCallWithOperationAndArgs(self.ScriptHash, 'balanceOf', [addr])\n    (tx, fee, results, num_ops, engine_success) = test_invoke(sb.ToArray(), wallet, [])\n    if engine_success:\n        try:\n            val = results[0].GetBigInteger()\n            precision_divisor = pow(10, self.decimals)\n            balance = (Decimal(val) / Decimal(precision_divisor))\n            if as_string:\n                formatter_str = ('.%sf' % self.decimals)\n                balance_str = format(balance, formatter_str)\n                return balance_str\n            return balance\n        except Exception as e:\n            logger.error(('could not get balance: %s ' % e))\n            traceback.print_stack()\n    else:\n        addr_str = Crypto.ToAddress(UInt160(data=addr))\n        logger.error(f'Could not get balance of address {addr_str} for token contract {self.ScriptHash}. VM execution failed. Make sure the contract exists on the network and that it adheres to the NEP-5 standard')\n    return 0", "docstring": "Get the token balance.\n\nArgs:\nwallet (neo.Wallets.Wallet): a wallet instance.\naddress (str): public address of the account to get the token balance of.\nas_string (bool): whether the return value should be a string. Default is False, returning an integer.\n\nReturns:\nint/str: token balance value as int (default), token balanace as string if `as_string` is set to True. 0 if balance retrieval failed.", "source": "codesearchnet"}
{"code": "def save_project_id(project_id):\n    try:\n        subprocess.call(['gcloud', 'config', 'set', 'project', project_id])\n    except:\n        config_file = os.path.join(get_config_dir(), 'config.json')\n        config = {}\n        if os.path.exists(config_file):\n            with open(config_file) as f:\n                config = json.loads(f.read())\n        config['project_id'] = project_id\n        with open(config_file, 'w') as f:\n            f.write(json.dumps(config))", "docstring": "Save project id to config file.\n\nArgs:\nproject_id: the project_id to save.", "source": "codesearchnet"}
{"code": "def maybe_get_common_dtype(arg_list):\n    if all(((a is None) for a in arg_list)):\n        return None\n    return dtype_util.common_dtype(arg_list, tf.float32)", "docstring": "Return common dtype of arg_list, or None.\n\nArgs:\narg_list: an iterable of items which are either `None` or have a `dtype`\nproperty.\n\nReturns:\ndtype: The common dtype of items in `arg_list`, or `None` if the list is\nempty or all items are `None`.", "source": "codesearchnet"}
{"code": "def log_every_n(level, msg, n, *args):\n    count = _get_next_log_count_per_token(get_absl_logger().findCaller())\n    log_if(level, msg, (not (count % n)), *args)", "docstring": "Logs 'msg % args' at level 'level' once per 'n' times.\n\nLogs the 1st call, (N+1)st call, (2N+1)st call,  etc.\nNot threadsafe.\n\nArgs:\nlevel: int, the absl logging level at which to log.\nmsg: str, the message to be logged.\nn: int, the number of times this should be called before it is logged.\n*args: The args to be substitued into the msg.", "source": "codesearchnet"}
{"code": "def create_host_call(model_dir):\n    graph = tf.get_default_graph()\n    summaries = graph.get_collection(tf.GraphKeys.SUMMARIES)\n    gs_t = tf.reshape(tf.to_int32(tf.train.get_global_step()), [1])\n    summary_kwargs = collections.OrderedDict()\n    for t in summaries:\n        if (t.op.type not in ['ScalarSummary']):\n            tf.logging.warn(('Ignoring unsupported tf.Summary type %s' % t.op.type))\n            continue\n        name = t.op.name\n        tensor = t.op.inputs[1]\n        if (t.op.type == 'ScalarSummary'):\n            assert tensor.shape.is_compatible_with([])\n            if (tensor.dtype == tf.int64):\n                tensor = tf.to_int32(tensor)\n            summary_kwargs[('ScalarSummary' + name)] = tf.reshape(tensor, [1])\n        elif (t.op.type == 'ImageSummary'):\n            if (tensor.dtype != tf.float32):\n                tf.logging.warn(('Currently T2T on TPU only supports ImageSummary of tf.float32-type Tensors. Skipping Tensor %s with dtype %s...' % (tensor.name, tensor.dtype)))\n                continue\n            summary_kwargs[('ImageSummary' + name)] = tensor\n    if (not summary_kwargs):\n        return None\n    summary_kwargs['global_step'] = gs_t\n    log_info(('summary_kwargs %s' % str(summary_kwargs)))\n\n    def host_call_fn(**kwargs):\n        'Training host call. Creates summaries for training metrics.\\n\\n    Args:\\n      **kwargs: Dict of {str: Tensor} , with `Tensor` of shape `[batch]`. Must\\n        contain key \"global_step\" with value of current global_step Tensor.\\n\\n    Returns:\\n      List of summary ops to run on the CPU host.\\n    '\n        gs = tf.to_int64(kwargs.pop('global_step')[0])\n        with tf.contrib.summary.create_file_writer(model_dir).as_default():\n            with tf.contrib.summary.always_record_summaries():\n                for (name, value) in sorted(six.iteritems(kwargs)):\n                    if name.startswith('ScalarSummary'):\n                        name = name[len('ScalarSummary'):]\n                        tf.contrib.summary.scalar(name, tf.reduce_mean(tf.to_float(value)), step=gs)\n                    elif name.startswith('ImageSummary'):\n                        name = name[len('ImageSummary'):]\n                        tf.contrib.summary.image(name, value, step=gs)\n                return tf.contrib.summary.all_summary_ops()\n    return (host_call_fn, summary_kwargs)", "docstring": "Construct a host_call writing scalar summaries.\n\nArgs:\nmodel_dir: String containing path to train\n\nReturns:\n(fn, args) Pair to be called by TPUEstimator as the host_call.", "source": "codesearchnet"}
{"code": "def get_default_assets_zip_provider():\n    path = os.path.join(os.path.dirname(inspect.getfile(sys._getframe(1))), 'webfiles.zip')\n    if (not os.path.exists(path)):\n        logger.warning('webfiles.zip static assets not found: %s', path)\n        return None\n    return (lambda : open(path, 'rb'))", "docstring": "Opens stock TensorBoard web assets collection.\n\nReturns:\nReturns function that returns a newly opened file handle to zip file\ncontaining static assets for stock TensorBoard, or None if webfiles.zip\ncould not be found. The value the callback returns must be closed. The\npaths inside the zip file are considered absolute paths on the web server.", "source": "codesearchnet"}
{"code": "def _get_newsfeeds(self, uri, detail_level = None):\n\t\t\n\t\tif detail_level:\n\t\t\tif detail_level not in ['ALL', 'CONDENSED']:\n\t\t\t\treturn requests.codes.bad_request, {'success' : 'False', \n\t\t\t\t\t\t\t\t\t\t\t\t'error': 'detailLevel needs to be provided and field_type needs to be \\'ALL\\' or \\'CONDENSED\\''}\n\t\t\turi +=  self.detail_level_suffix + detail_level\n\t\treturn self._req('get', uri)", "docstring": "General purpose function to get newsfeeds\nArgs:\nuri \t\t\turi for the feed base\ndetail_level \targuments for req str ['ALL', 'CONDENSED']\nreturn \t\t\tlist of feed dicts parse at your convenience", "source": "juraj-google-style"}
{"code": "def xarrayfunc(func):\n    \n    @wraps(func)\n    def wrapper(*args, **kwargs):\n        if any(isinstance(arg, xr.DataArray) for arg in args):\n            newargs = []\n            for arg in args:\n                if isinstance(arg, xr.DataArray):\n                    newargs.append(arg.values)\n                else:\n                    newargs.append(arg)\n\n            return dc.full_like(args[0], func(*newargs, **kwargs))\n        else:\n            return func(*args, **kwargs)\n\n    return wrapper", "docstring": "Make a function compatible with xarray.DataArray.\n\nThis function is intended to be used as a decorator like::\n\n>>> @dc.xarrayfunc\n>>> def func(array):\n...     # do something\n...     return newarray\n>>>\n>>> result = func(array)\n\nArgs:\nfunc (function): Function to be wrapped. The first argument\nof the function must be an array to be processed.\n\nReturns:\nwrapper (function): Wrapped function.", "source": "juraj-google-style"}
{"code": "def save(self, output_saved_model_dir, save_gpu_specific_engines=True, options=None):\n    assert self._converted\n    if trt_utils.is_experimental_feature_activated('remove_native_segments'):\n        logging.info(\"'remove_native_segments' experimental feature is enabled during saving of converted SavedModel.\")\n        self._converted_func = _remove_native_segments(self._converted_func)\n        self._converted_graph_def = self._converted_func.graph.as_graph_def()\n    if self._need_calibration and (not self._calibrated):\n        raise RuntimeError('A model that requires INT8 calibration has to be built before saving it. Call build() to build and calibrate the TensorRT engines.')\n    engine_asset_dir = tempfile.mkdtemp()\n    resource_map = {}\n\n    def _serialize_and_track_engine(node):\n        \n        canonical_engine_name = _get_canonical_engine_name(node.name)\n        if canonical_engine_name in resource_map:\n            return\n        filename = os.path.join(engine_asset_dir, 'trt-serialized-engine.' + canonical_engine_name)\n        try:\n            gen_trt_ops.serialize_trt_resource(resource_name=canonical_engine_name, filename=filename, delete_resource=True, save_gpu_specific_engines=save_gpu_specific_engines)\n        except errors.NotFoundError:\n            logging.info('Could not find %s in TF-TRT cache. This can happen if build() is not called, which means TensorRT engines will be built and cached at runtime.', canonical_engine_name)\n            return\n        resource_map[canonical_engine_name] = _TRTEngineResource(canonical_engine_name, filename, self._conversion_params.maximum_cached_engines)\n    self._for_each_trt_node(self._converted_graph_def, _serialize_and_track_engine)\n    trackable = autotrackable.AutoTrackable() if self.freeze else self._saved_model\n    trackable.trt_engine_resources = resource_map\n    if not self._conversion_params.allow_build_at_runtime:\n\n        def _reset_allow_build_at_runtime(node):\n            node.attr['_allow_build_at_runtime'].b = False\n        self._for_each_trt_node(self._converted_graph_def, _reset_allow_build_at_runtime)\n        reset_converted_func = wrap_function.function_from_graph_def(self._converted_graph_def, [tensor.name for tensor in self._converted_func.inputs], [tensor.name for tensor in self._converted_func.outputs])\n        reset_converted_func.graph.structured_outputs = nest.pack_sequence_as(self._converted_func.graph.structured_outputs, reset_converted_func.graph.structured_outputs)\n        reset_converted_func.graph.structured_input_signature = self._converted_func.structured_input_signature\n        self._converted_func = reset_converted_func\n    signatures = {self._input_saved_model_signature_key: self._converted_func}\n    save.save(trackable, output_saved_model_dir, signatures, options=options)", "docstring": "Save the converted SavedModel.\n\nArgs:\noutput_saved_model_dir: directory to saved the converted SavedModel.\nsave_gpu_specific_engines: whether to save TRT engines that have been\nbuilt. When True, all engines are saved and when False, the engines\nare not saved and will be rebuilt at inference time. By using\nsave_gpu_specific_engines=False after doing INT8 calibration, inference\ncan be done on different GPUs than the GPU that the model was calibrated\nand saved on.\noptions: `tf.saved_model.SaveOptions` object for configuring save options.\nRaises:\nRuntimeError: if the needed calibration hasn't been done.", "source": "github-repos"}
{"code": "def _sendline(self, line):\n        \n        logging.info('%s: sending line', self.port)\n        \n        self._lines = []\n        try:\n            self._read()\n        except socket.error:\n            logging.debug('%s: Nothing cleared', self.port)\n\n        print 'sending [%s]' % line\n        self._write(line + '\\r\\n')\n\n        \n        time.sleep(0.1)", "docstring": "Send exactly one line to the device\n\nArgs:\nline str: data send to device", "source": "juraj-google-style"}
{"code": "def average(self, var):\n    return self._averages.get(var.ref(), None)", "docstring": "Returns the `Variable` holding the average of `var`.\n\nArgs:\nvar: A `Variable` object.\n\nReturns:\nA `Variable` object or `None` if the moving average of `var`\nis not maintained.", "source": "github-repos"}
{"code": "def get_vasp_input(self, vasp_input_set=MPRelaxSet, **kwargs):\n        \n        d = vasp_input_set(self.final_structure, **kwargs).get_vasp_input()\n        d[\"transformations.json\"] = json.dumps(self.as_dict())\n        return d", "docstring": "Returns VASP input as a dict of vasp objects.\n\nArgs:\nvasp_input_set (pymatgen.io.vaspio_set.VaspInputSet): input set\nto create vasp input files from structures", "source": "juraj-google-style"}
{"code": "def _project_single_observable(self, **kwargs: Dict[(str, Any)]) -> Hist:\n    assert isinstance(self.output_attribute_name, str)\n    (output_hist, projection_name, projection_name_args) = self._project_observable(input_key='single_observable', input_observable=self.observable_to_project_from, **kwargs)\n    output_hist_args = projection_name_args\n    output_hist_args.update({'output_hist': output_hist, 'projection_name': projection_name})\n    output_hist = self.output_hist(**output_hist_args)\n    if (not hasattr(self.output_observable, self.output_attribute_name)):\n        raise ValueError(f'Attempted to assign hist to non-existent attribute {self.output_attribute_name} of object {self.output_observable}. Check the attribute name!')\n    setattr(self.output_observable, self.output_attribute_name, output_hist)\n    return output_hist", "docstring": "Driver function for projecting and storing a single observable.\n\nArgs:\nkwargs (dict): Additional named args to be passed to projection_name(...) and output_key_name(...)\nReturns:\nThe projected histogram. The histogram is also stored in the output specified by ``output_observable``.", "source": "codesearchnet"}
{"code": "def _new_convolution(self, use_bias):\n    \n    def clean_dict(input_dict):\n      if input_dict and not use_bias:\n        cleaned_dict = input_dict.copy()\n        cleaned_dict.pop(\"b\", None)\n        return cleaned_dict\n      return input_dict\n    return self._conv_class(\n        output_channels=4*self._output_channels,\n        kernel_shape=self._kernel_shape,\n        stride=self._stride,\n        rate=self._rate,\n        padding=self._padding,\n        use_bias=use_bias,\n        initializers=clean_dict(self._initializers),\n        partitioners=clean_dict(self._partitioners),\n        regularizers=clean_dict(self._regularizers),\n        name=\"conv\")", "docstring": "Returns new convolution.\n\nArgs:\nuse_bias: Use bias in convolutions. If False, clean_dict removes bias\nentries from initializers, partitioners and regularizers passed to\nthe constructor of the convolution.", "source": "juraj-google-style"}
{"code": "class BasicRNNCell(LayerRNNCell):\n\n    def __init__(self, num_units, activation=None, reuse=None, name=None, dtype=None, **kwargs):\n        warnings.warn('`tf.nn.rnn_cell.BasicRNNCell` is deprecated and will be removed in a future version. This class is equivalent as `tf.keras.layers.SimpleRNNCell`, and will be replaced by that in Tensorflow 2.0.')\n        super(BasicRNNCell, self).__init__(_reuse=reuse, name=name, dtype=dtype, **kwargs)\n        _check_supported_dtypes(self.dtype)\n        if context.executing_eagerly() and tf_config.list_logical_devices('GPU'):\n            logging.warning('%s: Note that this cell is not optimized for performance. Please use tf.contrib.cudnn_rnn.CudnnRNNTanh for better performance on GPU.', self)\n        self.input_spec = input_spec.InputSpec(ndim=2)\n        self._num_units = num_units\n        if activation:\n            self._activation = activations.get(activation)\n        else:\n            self._activation = math_ops.tanh\n\n    @property\n    def state_size(self):\n        return self._num_units\n\n    @property\n    def output_size(self):\n        return self._num_units\n\n    @tf_utils.shape_type_conversion\n    def build(self, inputs_shape):\n        if inputs_shape[-1] is None:\n            raise ValueError('Expected inputs.shape[-1] to be known, saw shape: %s' % str(inputs_shape))\n        _check_supported_dtypes(self.dtype)\n        input_depth = inputs_shape[-1]\n        self._kernel = self.add_variable(_WEIGHTS_VARIABLE_NAME, shape=[input_depth + self._num_units, self._num_units])\n        self._bias = self.add_variable(_BIAS_VARIABLE_NAME, shape=[self._num_units], initializer=init_ops.zeros_initializer(dtype=self.dtype))\n        self.built = True\n\n    def call(self, inputs, state):\n        \n        _check_rnn_cell_input_dtypes([inputs, state])\n        gate_inputs = math_ops.matmul(array_ops.concat([inputs, state], 1), self._kernel)\n        gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)\n        output = self._activation(gate_inputs)\n        return (output, output)\n\n    def get_config(self):\n        config = {'num_units': self._num_units, 'activation': activations.serialize(self._activation), 'reuse': self._reuse}\n        base_config = super(BasicRNNCell, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))", "docstring": "The most basic RNN cell.\n\nNote that this cell is not optimized for performance. Please use\n`tf.contrib.cudnn_rnn.CudnnRNNTanh` for better performance on GPU.\n\nArgs:\nnum_units: int, The number of units in the RNN cell.\nactivation: Nonlinearity to use.  Default: `tanh`. It could also be string\nthat is within Keras activation function names.\nreuse: (optional) Python boolean describing whether to reuse variables in an\nexisting scope.  If not `True`, and the existing scope already has the\ngiven variables, an error is raised.\nname: String, the name of the layer. Layers with the same name will share\nweights, but to avoid mistakes we require reuse=True in such cases.\ndtype: Default dtype of the layer (default of `None` means use the type of\nthe first input). Required when `build` is called before `call`.\n**kwargs: Dict, keyword named properties for common layer attributes, like\n`trainable` etc when constructing the cell from configs of get_config().", "source": "github-repos"}
{"code": "def restore_captures(concrete_function, inputs):\n    bound_inputs = [get_tensor_from_node(obj) for obj in inputs]\n    bound_variables = [obj for obj in inputs if isinstance(obj, (variables_lib.Variable, resource_variable_ops.BaseResourceVariable))]\n    captured_inputs_list = []\n    concrete_function.set_variables(bound_variables)\n    if bound_inputs:\n        for bound_input, internal_capture in zip(bound_inputs, concrete_function.inputs[-len(bound_inputs):]):\n            if hasattr(bound_input, '__tf_experimental_restore_capture__'):\n                captured_inputs_list.append(bound_input.__tf_experimental_restore_capture__(concrete_function, internal_capture))\n            else:\n                captured_inputs_list.append(bound_input)\n                concrete_function.graph.replace_capture(bound_input, internal_capture)\n                if internal_capture.dtype == dtypes.resource:\n                    if resource_variable_ops.is_resource_variable(bound_input):\n                        try:\n                            handle = bound_input.handle\n                        except ValueError:\n                            pass\n                        else:\n                            handle_data_util.copy_handle_data(handle, internal_capture)\n                    else:\n                        handle_data_util.copy_handle_data(bound_input, internal_capture)\n                concrete_function.graph.capture(bound_input)\n    if any([inp is None for inp in captured_inputs_list]):\n        warnings.warn(\"Trying to load ShardedVariables using tf.saved_model.load. This won't work if using a tf.distribute.Strategy, and may use excess memory if not using a Strategy. Ignore this warning if using tf.keras.models.load_model.\")\n    concrete_function.set_external_captures(captured_inputs_list)\n    if concrete_function.function_type:\n        concrete_function._function_type = function_type_lib.FunctionType(concrete_function.function_type.parameters.values(), concrete_function.graph.function_captures.capture_types, return_annotation=concrete_function.function_type.output)", "docstring": "Restore captures for the concrete function.\n\nUsed at deserialization time.  For functions that are being deserialized,\nsaved model restores objects that tensors were captured from, but functions\nonly know about their tensors -- object information is destroyed by tracing.\nThis additional logic extracts the tensors which the function originally\ncaptured.\n\nArgs:\nconcrete_function: the concrete function for which to restore captures\ninputs: a list tensors or other Python objects (such as variables) which\ncontain tensors that were originally captured by the function", "source": "github-repos"}
{"code": "def builder_from_source(source, filename, system_includes, nonsystem_includes, quiet=False):\n    return ASTBuilder(tokenize.get_tokens(source), filename, system_includes, nonsystem_includes, quiet=quiet)", "docstring": "Utility method that returns an ASTBuilder from source code.\n\nArgs:\nsource: 'C++ source code'\nfilename: 'file1'\n\nReturns:\nASTBuilder", "source": "codesearchnet"}
{"code": "def __init__(self, on_ui_exit=None, config=None):\n    self._on_ui_exit = on_ui_exit\n    self._command_handler_registry = debugger_cli_common.CommandHandlerRegistry()\n    self._tab_completion_registry = debugger_cli_common.TabCompletionRegistry()\n    self._tab_completion_registry.register_tab_comp_context([''], self.CLI_EXIT_COMMANDS + [debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND] + debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND_ALIASES)\n    self._config = config or cli_config.CLIConfig()\n    self._config_argparser = argparse.ArgumentParser(description='config command', usage=argparse.SUPPRESS)\n    subparsers = self._config_argparser.add_subparsers()\n    set_parser = subparsers.add_parser('set')\n    set_parser.add_argument('property_name', type=str)\n    set_parser.add_argument('property_value', type=str)\n    set_parser = subparsers.add_parser('show')\n    self.register_command_handler('config', self._config_command_handler, self._config_argparser.format_help(), prefix_aliases=['cfg'])", "docstring": "Constructor of the base class.\n\nArgs:\non_ui_exit: (`Callable`) the callback to be called when the UI exits.\nconfig: An instance of `cli_config.CLIConfig()` carrying user-facing\nconfigurations.", "source": "github-repos"}
{"code": "def random_set_distribution(\n    rnd: Optional[tcod.random.Random], dist: int\n) -> None:\n    \n    lib.TCOD_random_set_distribution(rnd.random_c if rnd else ffi.NULL, dist)", "docstring": "Change the distribution mode of a random number generator.\n\nArgs:\nrnd (Optional[Random]): A Random instance, or None to use the default.\ndist (int): The distribution mode to use.  Should be DISTRIBUTION_*.", "source": "juraj-google-style"}
{"code": "def set_circular(self, circular: bool, chain: List[Table] = None) -> None:\n        \n        self.circular = circular\n        self.circular_chain = chain or []", "docstring": "Mark this table as circular (or not).\n\nArgs:\ncircular: is it circular?\nchain: if it's circular, this should be the list of tables\nparticipating in the circular chain", "source": "juraj-google-style"}
{"code": "def time_estimate(self, duration, **kwargs):\n        \n        path = '%s/%s/time_estimate' % (self.manager.path, self.get_id())\n        data = {'duration': duration}\n        return self.manager.gitlab.http_post(path, post_data=data, **kwargs)", "docstring": "Set an estimated time of work for the object.\n\nArgs:\nduration (str): Duration in human format (e.g. 3h30)\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabTimeTrackingError: If the time tracking update cannot be done", "source": "juraj-google-style"}
{"code": "def __init__(\n        self,\n        size,\n        weights=None,\n        bias=True,\n        l2_regularization=0.0,\n        l1_regularization=0.0,\n        trainable=True,\n        named_tensors=None,\n        scope='linear',\n        summary_labels=()\n    ):\n        \n        self.size = size\n        self.weights_init = weights\n        self.bias_init = bias\n        self.l2_regularization = l2_regularization\n        self.l1_regularization = l1_regularization\n        self.trainable = trainable\n        super(Linear, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "Linear layer.\n\nArgs:\nsize: Layer size.\nweights: Weight initialization, random if None.\nbias: Bias initialization, random if True, no bias added if False.\nl2_regularization: L2 regularization weight.\nl1_regularization: L1 regularization weight.", "source": "juraj-google-style"}
{"code": "def get_typecast_value(self, value, type):\n    if (type == entities.Variable.Type.BOOLEAN):\n        return (value == 'true')\n    elif (type == entities.Variable.Type.INTEGER):\n        return int(value)\n    elif (type == entities.Variable.Type.DOUBLE):\n        return float(value)\n    else:\n        return value", "docstring": "Helper method to determine actual value based on type of feature variable.\n\nArgs:\nvalue: Value in string form as it was parsed from datafile.\ntype: Type denoting the feature flag type.\n\nReturn:\nValue type-casted based on type of feature variable.", "source": "codesearchnet"}
{"code": "def batch_shape(self):\n    return tensor_shape.as_shape(self._batch_shape())", "docstring": "Shape of a single sample from a single event index as a `TensorShape`.\n\nMay be partially defined or unknown.\n\nThe batch dimensions are indexes into independent, non-identical\nparameterizations of this distribution.\n\nReturns:\nbatch_shape: `TensorShape`, possibly unknown.", "source": "github-repos"}
{"code": "def _build_ragged_tensor_from_value_ranges(starts, limits, step, values):\n    if step is None:\n        step = 1\n    step = ops.convert_to_tensor(step, name='step')\n    if step.dtype.is_integer:\n        step = math_ops.cast(step, starts.dtype)\n    else:\n        raise TypeError('slice strides must be integers or None')\n    value_indices = ragged_math_ops.range(starts, limits, step, row_splits_dtype=starts.dtype)\n    if isinstance(values, ragged_tensor.RaggedTensor):\n        gathered_values = ragged_gather_ops.gather(params=values, indices=value_indices.values)\n    else:\n        gathered_values = array_ops.gather(params=values, indices=value_indices.values)\n    return value_indices.with_values(gathered_values)", "docstring": "Returns a `RaggedTensor` containing the specified sequences of values.\n\nReturns a RaggedTensor `output` where:\n\n```python\noutput.shape[0] = starts.shape[0]\noutput[i] = values[starts[i]:limits[i]:step]\n```\n\nRequires that `starts.shape == limits.shape` and\n`0 <= starts[i] <= limits[i] <= values.shape[0]`.\n\nArgs:\nstarts: 1D integer Tensor specifying the start indices for the sequences of\nvalues to include.\nlimits: 1D integer Tensor specifying the limit indices for the sequences of\nvalues to include.\nstep: Integer value specifying the step size for strided slices.\nvalues: The set of values to select from.\n\nReturns:\nA `RaggedTensor`.\n\nRaises:\nValueError: Until the prerequisite ops are checked in.", "source": "github-repos"}
{"code": "async def getProvStack(self, iden: str):\n    return self.cell.provstor.getProvStack(s_common.uhex(iden))", "docstring": "Return the providence stack associated with the given iden.\n\nArgs:\niden (str):  the iden from splice\n\nNote: the iden appears on each splice entry as the 'prov' property", "source": "codesearchnet"}
{"code": "def _get_path_params(match):\n    result = {}\n    for (var_name, value) in match.groupdict().iteritems():\n        actual_var_name = ApiConfigManager._from_safe_path_param_name(var_name)\n        result[actual_var_name] = urllib.unquote_plus(value)\n    return result", "docstring": "Gets path parameters from a regular expression match.\n\nArgs:\nmatch: A regular expression Match object for a path.\n\nReturns:\nA dictionary containing the variable names converted from base64.", "source": "codesearchnet"}
{"code": "def infer_shapes(nlp: Pipeline, framework: str) -> tuple[list[str], list[str], dict, BatchEncoding]:\n\n    def build_shape_dict(name: str, tensor, is_input: bool, seq_len: int):\n        if isinstance(tensor, (tuple, list)):\n            return [build_shape_dict(name, t, is_input, seq_len) for t in tensor]\n        else:\n            axes = {[axis for axis, numel in enumerate(tensor.shape) if numel == 1][0]: 'batch'}\n            if is_input:\n                if len(tensor.shape) == 2:\n                    axes[1] = 'sequence'\n                else:\n                    raise ValueError(f'Unable to infer tensor axes ({len(tensor.shape)})')\n            else:\n                seq_axes = [dim for dim, shape in enumerate(tensor.shape) if shape == seq_len]\n                axes.update(dict.fromkeys(seq_axes, 'sequence'))\n        print(f'Found {('input' if is_input else 'output')} {name} with shape: {axes}')\n        return axes\n    tokens = nlp.tokenizer('This is a sample output', return_tensors=framework)\n    seq_len = tokens.input_ids.shape[-1]\n    outputs = nlp.model(**tokens) if framework == 'pt' else nlp.model(tokens)\n    if isinstance(outputs, ModelOutput):\n        outputs = outputs.to_tuple()\n    if not isinstance(outputs, (list, tuple)):\n        outputs = (outputs,)\n    input_vars = list(tokens.keys())\n    input_dynamic_axes = {k: build_shape_dict(k, v, True, seq_len) for k, v in tokens.items()}\n    outputs_flat = []\n    for output in outputs:\n        if isinstance(output, (tuple, list)):\n            outputs_flat.extend(output)\n        else:\n            outputs_flat.append(output)\n    output_names = [f'output_{i}' for i in range(len(outputs_flat))]\n    output_dynamic_axes = {k: build_shape_dict(k, v, False, seq_len) for k, v in zip(output_names, outputs_flat)}\n    dynamic_axes = dict(input_dynamic_axes, **output_dynamic_axes)\n    return (input_vars, output_names, dynamic_axes, tokens)", "docstring": "Attempt to infer the static vs dynamic axes for each input and output tensors for a specific model\n\nArgs:\nnlp: The pipeline object holding the model to be exported\nframework: The framework identifier to dispatch to the correct inference scheme (pt/tf)\n\nReturns:\n\n- List of the inferred input variable names\n- List of the inferred output variable names\n- Dictionary with input/output variables names as key and shape tensor as value\n- a BatchEncoding reference which was used to infer all the above information", "source": "github-repos"}
{"code": "def GetVolumeByIdentifier(self, volume_identifier):\n    if (not self._is_parsed):\n        self._Parse()\n        self._is_parsed = True\n    return self._volumes[volume_identifier]", "docstring": "Retrieves a specific volume based on the identifier.\n\nArgs:\nvolume_identifier (str): identifier of the volume within\nthe volume system.\n\nReturns:\nVolume: a volume.", "source": "codesearchnet"}
{"code": "def pixel_shuffle(self, vision_features: torch.Tensor, scale_factor: float=0.5):\n    batch_size, width, height, channels = vision_features.size()\n    if height % scale_factor != 0 or width % scale_factor != 0:\n        raise ValueError('Height and width must be divisible by scale_factor for proper downsampling.')\n    vision_features = vision_features.view(batch_size, width, int(height * scale_factor), int(channels / scale_factor))\n    vision_features = vision_features.permute(0, 2, 1, 3).contiguous()\n    vision_features = vision_features.view(batch_size, int(height * scale_factor), int(width * scale_factor), int(channels / scale_factor ** 2))\n    vision_features = vision_features.permute(0, 2, 1, 3).contiguous()\n    return vision_features", "docstring": "Perform pixel shuffle downsampling on vision features.\n\nArgs:\nvision_features (`torch.Tensor`):\nInput tensor of shape (batch_size, width, height, channels).\nscale_factor (`float`, *optional*, defaults to `0.5`):\nFactor by which to downsample. Default is 0.5, which halves the dimensions.\n\nReturns:\nvision_features (`torch.Tensor`):\nDownsampled tensor of shape (batch_size, height*scale_factor, width*scale_factor, channels/(scale_factor^2)).", "source": "github-repos"}
{"code": "def _merge_precomputed_encodings(self, other, validate=True):\n    if self is other or (self._row_splits is other._row_splits and self._row_lengths is other._row_lengths and (self._value_rowids is other._value_rowids) and (self._nrows is other._nrows) and (self._nvals is other._nvals) and (self._uniform_row_length is other._uniform_row_length)):\n        return self\n    nrows, nrows_validated = _merge_tensors(self._nrows, other._nrows, 'nrows', validate)\n    nvals, _ = _merge_tensors(self._nvals, other._nvals, 'nvals', validate)\n    uniform_row_length, uniform_row_length_validated = _merge_tensors(self._uniform_row_length, other._uniform_row_length, 'uniform_row_length', validate)\n    if uniform_row_length_validated and nrows_validated:\n        validate = False\n    row_splits, row_splits_validated = _merge_tensors(self._row_splits, other._row_splits, 'row_splits', validate)\n    if row_splits_validated:\n        validate = False\n    row_lengths, row_lengths_validated = _merge_tensors(self._row_lengths, other._row_lengths, 'row_lengths', validate)\n    if row_lengths_validated:\n        validate = False\n    value_rowids, value_rowids_validated = _merge_tensors(self._value_rowids, other._value_rowids, 'value_rowids', validate)\n    if value_rowids_validated and nrows_validated:\n        validate = False\n    if row_splits is self._row_splits and row_lengths is self._row_lengths and (value_rowids is self._value_rowids) and (nrows is self._nrows) and (uniform_row_length is self._uniform_row_length):\n        return self\n    if row_splits is other._row_splits and row_lengths is other._row_lengths and (value_rowids is other._value_rowids) and (nrows is other._nrows) and (uniform_row_length is other._uniform_row_length):\n        return other\n    return RowPartition(row_splits=row_splits, row_lengths=row_lengths, value_rowids=value_rowids, nrows=nrows, uniform_row_length=uniform_row_length, nvals=nvals, internal=_row_partition_factory_key)", "docstring": "Returns a RowPartition that merges encodings from `self` and `other`.\n\nRequires that `self` and `other` describe the same partition.\n\nArgs:\nother: A `RowPartition` that encodes the same partition as `self`.\nvalidate: If true, then add runtime checks to verify that `self` and\n`other` encode the same row partition.\n\nReturns:\nA `RowPartition`.", "source": "github-repos"}
{"code": "def disassemble(self, annotate=False, blocks=False):\n    ops = disassemble(self.co_code, self.internals)\n    if annotate:\n        ops = [self.annotate_op(op) for op in ops]\n    if blocks:\n        return blocks_from_ops(ops)\n    else:\n        return ops", "docstring": "Disassemble the bytecode of this code object into a series of\nopcodes and labels. Can also annotate the opcodes and group\nthe opcodes into blocks based on the labels.\n\nArguments:\nannotate(bool): Whether to annotate the operations.\nblocks(bool): Whether to group the operations into blocks.\n\nReturns:\nlist: A list of :class:`Op` (or :class:`AnnotatedOp`) instances\nand labels.", "source": "codesearchnet"}
{"code": "def get_connection_id_by_endpoint(self, endpoint):\n    with self._connections_lock:\n        for connection_id in self._connections:\n            connection_info = self._connections[connection_id]\n            if (connection_info.uri == endpoint):\n                return connection_id\n        raise KeyError()", "docstring": "Returns the connection id associated with a publically\nreachable endpoint or raises KeyError if the endpoint is not\nfound.\n\nArgs:\nendpoint (str): A zmq-style uri which identifies a publically\nreachable endpoint.", "source": "codesearchnet"}
{"code": "class CategoricalHinge(MeanMetricWrapper):\n\n    def __init__(self, name='categorical_hinge', dtype=None):\n        super(CategoricalHinge, self).__init__(categorical_hinge, name, dtype=dtype)", "docstring": "Computes the categorical hinge metric between `y_true` and `y_pred`.\n\nArgs:\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.\n\nStandalone usage:\n\n>>> m = tf.keras.metrics.CategoricalHinge()\n>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])\n>>> m.result().numpy()\n1.4000001\n\n>>> m.reset_state()\n>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],\n...                sample_weight=[1, 0])\n>>> m.result().numpy()\n1.2\n\nUsage with `compile()` API:\n\n```python\nmodel.compile(\noptimizer='sgd',\nloss='mse',\nmetrics=[tf.keras.metrics.CategoricalHinge()])\n```", "source": "github-repos"}
{"code": "def __init__(self, position=(0., 0., 0.), rotation=(0., 0., 0.), scale=1., orientation0=(1., 0., 0.),\n                 **kwargs):\n        \n        super(Physical, self).__init__(**kwargs)\n\n        self.orientation0 = np.array(orientation0, dtype=np.float32)\n        self.rotation = coordinates.RotationEulerDegrees(*rotation)\n        self.position = coordinates.Translation(*position)\n        if hasattr(scale, '__iter__'):\n            if 0 in scale:\n                raise ValueError(\"Scale can not be set to 0\")\n            self.scale = coordinates.Scale(*scale)\n        else:\n            if scale is 0:\n                raise ValueError(\"Scale can not be set to 0\")\n            self.scale = coordinates.Scale(scale)\n\n        self._model_matrix = np.identity(4, dtype=np.float32)\n        self._normal_matrix = np.identity(4, dtype=np.float32)\n        self._view_matrix = np.identity(4, dtype=np.float32)", "docstring": "XYZ Position, Scale and XYZEuler Rotation Class.\n\nArgs:\nposition: (x, y, z) translation values.\nrotation: (x, y, z) rotation values\nscale (float): uniform scale factor. 1 = no scaling.", "source": "juraj-google-style"}
{"code": "def erfinv(x, name=None):\n    with ops.name_scope(name, 'erfinv', [x]):\n        return gen_math_ops.erfinv(x)", "docstring": "Compute inverse error function.\n\nGiven `x`, compute the inverse error function of `x`. This function\nis the inverse of `tf.math.erf`.\n\nArgs:\nx: `Tensor` with type `float` or `double`.\nname: A name for the operation (optional).\nReturns:\nInverse error function of `x`.", "source": "github-repos"}
{"code": "def multinomial_sample(x, vocab_size=None, sampling_method='random', temperature=1.0):\n    vocab_size = (vocab_size or common_layers.shape_list(x)[(- 1)])\n    if ((sampling_method == 'random') and (temperature > 0.0)):\n        samples = tf.multinomial((tf.reshape(x, [(- 1), vocab_size]) / temperature), 1)\n    else:\n        samples = tf.argmax(x, axis=(- 1))\n    reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:(- 1)])\n    return reshaped_samples", "docstring": "Multinomial sampling from a n-dimensional tensor.\n\nArgs:\nx: Tensor of shape [..., vocab_size]. Parameterizes logits of multinomial.\nvocab_size: Number of classes in multinomial distribution.\nsampling_method: String, \"random\" or otherwise deterministic.\ntemperature: Positive float.\n\nReturns:\nTensor of shape [...].", "source": "codesearchnet"}
{"code": "def task_table(self, task_id=None):\n    self._check_connected()\n    if (task_id is not None):\n        task_id = ray.TaskID(hex_to_binary(task_id))\n        return self._task_table(task_id)\n    else:\n        task_table_keys = self._keys((ray.gcs_utils.TablePrefix_RAYLET_TASK_string + '*'))\n        task_ids_binary = [key[len(ray.gcs_utils.TablePrefix_RAYLET_TASK_string):] for key in task_table_keys]\n        results = {}\n        for task_id_binary in task_ids_binary:\n            results[binary_to_hex(task_id_binary)] = self._task_table(ray.TaskID(task_id_binary))\n        return results", "docstring": "Fetch and parse the task table information for one or more task IDs.\n\nArgs:\ntask_id: A hex string of the task ID to fetch information about. If\nthis is None, then the task object table is fetched.\n\nReturns:\nInformation from the task table.", "source": "codesearchnet"}
{"code": "def _search_step(self, state):\n    (new_seq, new_log_probs, new_cache) = self._grow_alive_seq(state)\n    alive_state = self._get_new_alive_state(new_seq, new_log_probs, new_cache)\n    finished_state = self._get_new_finished_state(state, new_seq, new_log_probs)\n    new_state = {_StateKeys.CUR_INDEX: (state[_StateKeys.CUR_INDEX] + 1)}\n    new_state.update(alive_state)\n    new_state.update(finished_state)\n    return [new_state]", "docstring": "Beam search loop body.\n\nGrow alive sequences by a single ID. Sequences that have reached the EOS\ntoken are marked as finished. The alive and finished sequences with the\nhighest log probabilities and scores are returned.\n\nA sequence's finished score is calculating by dividing the log probability\nby the length normalization factor. Without length normalization, the\nsearch is more likely to return shorter sequences.\n\nArgs:\nstate: A dictionary with the current loop state.\n\nReturns:\nnew state dictionary.", "source": "codesearchnet"}
{"code": "def get_location_from_HDX_code(code, locations=None, configuration=None):\n        \n        \n        if locations is None:\n            locations = Locations.validlocations(configuration)\n        for locdict in locations:\n            if code.upper() == locdict['name'].upper():\n                return locdict['title']\n        return None", "docstring": "Get location from HDX location code\n\nArgs:\ncode (str): code for which to get location name\nlocations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX.\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nOptional[str]: location name", "source": "juraj-google-style"}
{"code": "def from_obj(cls, cls_obj):\n    if (not cls_obj):\n        return None\n    typekey = cls.objkey(cls_obj)\n    klass = cls.entity_class(typekey)\n    return klass.from_obj(cls_obj)", "docstring": "Parse the generateDS object and return an Entity instance.\n\nThis will attempt to extract type information from the input\nobject and pass it to entity_class to resolve the correct class\nfor the type.\n\nArgs:\ncls_obj: A generateDS object.\n\nReturns:\nAn Entity instance.", "source": "codesearchnet"}
{"code": "def upgrade(self, remote=None):\n        \n        if self.enabled:\n            raise errors.DockerError(\n                'Plugin must be disabled before upgrading.'\n            )\n\n        if remote is None:\n            remote = self.name\n        privileges = self.client.api.plugin_privileges(remote)\n        for d in self.client.api.upgrade_plugin(self.name, remote, privileges):\n            yield d\n        self._reload()", "docstring": "Upgrade the plugin.\n\nArgs:\nremote (string): Remote reference to upgrade to. The\n``:latest`` tag is optional and is the default if omitted.\nDefault: this plugin's name.\n\nReturns:\nA generator streaming the decoded API logs", "source": "juraj-google-style"}
{"code": "def parse_view(query):\n    try:\n        idx = query.lower().index('where')\n        query = query[:idx]\n    except ValueError:\n        pass\n    if (not query.endswith(';')):\n        query = query.strip()\n        query += ';'\n    result = _view_stmt.parseString(query)\n    return View(result)", "docstring": "Parses asql query to view object.\n\nArgs:\nquery (str): asql query\n\nReturns:\nView instance: parsed view.", "source": "codesearchnet"}
{"code": "def _RunOsLoginControl(self, params):\n    \n    try:\n      return subprocess.call([constants.OSLOGIN_CONTROL_SCRIPT] + params)\n    except OSError as e:\n      if e.errno == errno.ENOENT:\n        return None\n      else:\n        raise", "docstring": "Run the OS Login control script.\n\nArgs:\nparams: list, the params to pass to the script\n\nReturns:\nint, the return code from the call, or None if the script is not found.", "source": "juraj-google-style"}
{"code": "def combine_metadata(*metadata_objects, **kwargs):\n    average_times = kwargs.get('average_times', True)\n    shared_keys = None\n    info_dicts = []\n    for metadata_object in metadata_objects:\n        if isinstance(metadata_object, dict):\n            metadata_dict = metadata_object\n        elif hasattr(metadata_object, 'attrs'):\n            metadata_dict = metadata_object.attrs\n        else:\n            continue\n        info_dicts.append(metadata_dict)\n        if (shared_keys is None):\n            shared_keys = set(metadata_dict.keys())\n        else:\n            shared_keys &= set(metadata_dict.keys())\n    shared_info = {}\n    for k in shared_keys:\n        values = [nfo[k] for nfo in info_dicts]\n        any_arrays = any([isinstance(val, np.ndarray) for val in values])\n        if any_arrays:\n            if all((np.all((val == values[0])) for val in values[1:])):\n                shared_info[k] = values[0]\n        elif (('time' in k) and isinstance(values[0], datetime) and average_times):\n            shared_info[k] = average_datetimes(values)\n        elif all(((val == values[0]) for val in values[1:])):\n            shared_info[k] = values[0]\n    return shared_info", "docstring": "Combine the metadata of two or more Datasets.\n\nIf any keys are not equal or do not exist in all provided dictionaries\nthen they are not included in the returned dictionary.\nBy default any keys with the word 'time' in them and consisting\nof datetime objects will be averaged. This is to handle cases where\ndata were observed at almost the same time but not exactly.\n\nArgs:\n*metadata_objects: MetadataObject or dict objects to combine\naverage_times (bool): Average any keys with 'time' in the name\n\nReturns:\ndict: the combined metadata", "source": "codesearchnet"}
{"code": "async def _overlap(items, overlap_attr, client=None, get_method=None):\n    overlap = set.intersection(*(getattr(item, overlap_attr) for item in items))\n    if ((client is None) or (get_method is None)):\n        return overlap\n    results = []\n    for item in overlap:\n        result = (await getattr(client, get_method)(id_=item.id_))\n        results.append(result)\n    return results", "docstring": "Generic overlap implementation.\n\nArguments:\nitem (:py:class:`collections.abc.Sequence`): The objects to\nfind overlaps for.\noverlap_attr (:py:class:`str`): The attribute of the items to use\nas input for the overlap.\nclient (:py:class:`~.TMDbClient`, optional): The TMDb client\nto extract additional information about the overlap.\nget_method (:py:class:`str`, optional): The method of the\nclient to use for extracting additional information.\n\nReturns:\n:py:class:`list`: The relevant result objects.", "source": "codesearchnet"}
{"code": "class _TextEmbeddingHandler(_EmbeddingHandler):\n\n    def _validate_column_data(self, batch):\n        if not isinstance(batch[0], (str, bytes)):\n            raise TypeError(f'Embeddings can only be generated on dict[str, str].Got dict[str, {type(batch[0])}] instead.')\n\n    def get_metrics_namespace(self) -> str:\n        return self._underlying.get_metrics_namespace() or 'BeamML_TextEmbeddingHandler'", "docstring": "A ModelHandler intended to be work on list[dict[str, str]] inputs.\n\nThe inputs to the model handler are expected to be a list of dicts.\n\nFor example, if the original mode is used with RunInference to take a\nPCollection[E] to a PCollection[P], this ModelHandler would take a\nPCollection[dict[str, E]] to a PCollection[dict[str, P]].\n\n_TextEmbeddingHandler will accept an EmbeddingsManager instance, which\ncontains the details of the model to be loaded and the inference_fn to be\nused. The purpose of _TextEmbeddingHandler is to generate embeddings for\ntext inputs using the EmbeddingsManager instance.\n\nIf the input is not a text column, a RuntimeError will be raised.\n\nThis is an internal class and offers no backwards compatibility guarantees.\n\nArgs:\nembeddings_manager: An EmbeddingsManager instance.", "source": "github-repos"}
{"code": "def expectation(self, observable: Union[tf.Tensor, hamiltonian.Hamiltonian]):\n    raise NotImplementedError()", "docstring": "Take the expectation value of an observable against this dataset.\n\nArgs:\nobservable: Hermitian operator to measure.  If `tf.Tensor`, it is of type\n`tf.string` with shape [1], result of  calling `tfq.convert_to_tensor`\non a list of `cirq.PauliSum`, `[op]`.  Otherwise, a Hamiltonian.\n\nReturns:\nScalar `tf.Tensor` which is the expectation value of `observable` against\nthis quantum data source.", "source": "github-repos"}
{"code": "def _compute_nfps_real(counts, sizes):\n    \n    nfps = np.zeros((len(sizes), len(sizes)))\n    \n    \n    for l in range(len(sizes)):\n        for u in range(l, len(sizes)):\n            nfps[l, u] = _compute_nfp_real(l, u, counts, sizes)\n    return nfps", "docstring": "Computes the matrix of expected false positives for all possible\nsub-intervals of the complete domain of set sizes.\n\nArgs:\ncounts: the complete distribution of set sizes.\nsizes: the complete domain of set sizes.\n\nReturn (np.array): the 2-D array of expected number of false positives\nfor every pair of [l, u] interval, where l is axis-0 and u is\naxis-1.", "source": "juraj-google-style"}
{"code": "def from_version(cls, version, op=None):\n        \n        lower = None\n        upper = None\n\n        if op is None:\n            lower = _LowerBound(version, True)\n            upper = _UpperBound(version.next(), False)\n        elif op in (\"eq\", \"==\"):\n            lower = _LowerBound(version, True)\n            upper = _UpperBound(version, True)\n        elif op in (\"gt\", \">\"):\n            lower = _LowerBound(version, False)\n        elif op in (\"gte\", \">=\"):\n            lower = _LowerBound(version, True)\n        elif op in (\"lt\", \"<\"):\n            upper = _UpperBound(version, False)\n        elif op in (\"lte\", \"<=\"):\n            upper = _UpperBound(version, True)\n        else:\n            raise VersionError(\"Unknown bound operation '%s'\" % op)\n\n        bound = _Bound(lower, upper)\n        range = cls(None)\n        range.bounds = [bound]\n        return range", "docstring": "Create a range from a version.\n\nArgs:\nversion: Version object. This is used as the upper/lower bound of\nthe range.\nop: Operation as a string. One of 'gt'/'>', 'gte'/'>=', lt'/'<',\n'lte'/'<=', 'eq'/'=='. If None, a bounded range will be created\nthat contains the version superset.\n\nReturns:\n`VersionRange` object.", "source": "juraj-google-style"}
{"code": "def non_fluent_variables(self) -> FluentParamsList:\n    fluents = self.domain.non_fluents\n    ordering = self.domain.non_fluent_ordering\n    return self._fluent_params(fluents, ordering)", "docstring": "Returns the instantiated non-fluents in canonical order.\n\nReturns:\nSequence[Tuple[str, List[str]]]: A tuple of pairs of fluent name\nand a list of instantiated fluents represented as strings.", "source": "codesearchnet"}
{"code": "def omega(self, structure, n, u):\n    l0 = np.dot(np.sum(structure.lattice.matrix, axis=0), n)\n    l0 *= 1e-10\n    weight = (float(structure.composition.weight) * 1.66054e-27)\n    vol = (structure.volume * 1e-30)\n    vel = (((1000000000.0 * self[0].einsum_sequence([n, u, n, u])) / (weight / vol)) ** 0.5)\n    return (vel / l0)", "docstring": "Finds directional frequency contribution to the heat\ncapacity from direction and polarization\n\nArgs:\nstructure (Structure): Structure to be used in directional heat\ncapacity determination\nn (3x1 array-like): direction for Cv determination\nu (3x1 array-like): polarization direction, note that\nno attempt for verification of eigenvectors is made", "source": "codesearchnet"}
{"code": "def create_header(cls, request_id=None):\n        \n        header = {\n            'msgid'   : bkserial.make_id(),\n            'msgtype' : cls.msgtype\n        }\n        if request_id is not None:\n            header['reqid'] = request_id\n        return header", "docstring": "Return a message header fragment dict.\n\nArgs:\nrequest_id (str or None) :\nMessage ID of the message this message replies to\n\nReturns:\ndict : a message header", "source": "juraj-google-style"}
{"code": "def tensor_not_equals(self, other):\n    if other is None:\n        return True\n    if tensor_lib.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions():\n        self, other = override_binary_operator.maybe_promote_tensors(self, other)\n        return gen_math_ops.not_equal(self, other, incompatible_shape_error=False)\n    else:\n        return self is not other", "docstring": "The operation invoked by the `Tensor.__ne__` operator.\n\nCompares two tensors element-wise for inequality if they are\nbroadcast-compatible; or returns True if they are not broadcast-compatible.\n(Note that this behavior differs from `tf.math.not_equal`, which raises an\nexception if the two tensors are not broadcast-compatible.)\n\nPurpose in the API:\n\nThis method is exposed in TensorFlow's API so that library developers\ncan register dispatching for `Tensor.__ne__` to allow it to handle\ncustom composite tensors & other custom objects.\n\nThe API symbol is not intended to be called by users directly and does\nappear in TensorFlow's generated documentation.\n\nArgs:\nself: The left-hand side of the `!=` operator.\nother: The right-hand side of the `!=` operator.\n\nReturns:\nThe result of the elementwise `!=` operation, or `True` if the arguments\nare not broadcast-compatible.", "source": "github-repos"}
{"code": "def logical_downlinks(self):\n    if (not self.__logical_downlinks):\n        self.__logical_downlinks = LogicalDownlinks(self.__connection)\n    return self.__logical_downlinks", "docstring": "Gets the LogicalDownlinks API client.\n\nReturns:\nLogicalDownlinks:", "source": "codesearchnet"}
{"code": "def _remove_native_segments(input_func):\n    input_graph_def = input_func.graph.as_graph_def()\n    nodes_deleted = 0\n    for func_id in reversed(range(len(input_graph_def.library.function))):\n        f = input_graph_def.library.function[func_id]\n        if 'native_segment' in f.signature.name:\n            nodes_deleted += 1\n            while context.context().has_function(f.signature.name):\n                context.context().remove_function(f.signature.name)\n            del input_graph_def.library.function[func_id]\n    logging.info(f'Found and deleted native segments from {nodes_deleted} TRTEngineOp nodes.')\n    for node in input_graph_def.node:\n        if node.op == 'TRTEngineOp':\n            del node.attr['segment_func']\n    for func in input_graph_def.library.function:\n        for node in func.node_def:\n            if node.op == 'TRTEngineOp':\n                del node.attr['segment_func']\n    new_func = _construct_function_from_graph_def(input_func, input_graph_def)\n    return new_func", "docstring": "Remove native segments from the input TF-TRT Converted Function.\n\nArgs:\ninput_func: provide the concrete function with native segment nodes. The\ntransformed output func will not contain any native segment nodes. All the\nTRTEngineOp references will be deleted and reset to default empty func.", "source": "github-repos"}
{"code": "def calculate_keys_by_mapreduce_state(cls, mapreduce_state):\n    if (mapreduce_state is None):\n        return []\n    keys = []\n    for i in range(mapreduce_state.mapreduce_spec.mapper.shard_count):\n        shard_id = cls.shard_id_from_number(mapreduce_state.key().name(), i)\n        keys.append(cls.get_key_by_shard_id(shard_id))\n    return keys", "docstring": "Calculate all shard states keys for given mapreduce.\n\nArgs:\nmapreduce_state: MapreduceState instance\n\nReturns:\nA list of keys for shard states, sorted by shard id.\nThe corresponding shard states may not exist.", "source": "codesearchnet"}
{"code": "def try_storage(self, identifier, req, resp, resource, uri_kwargs):\n    if (identifier is None):\n        user = None\n    elif (self.user_storage is not None):\n        user = self.user_storage.get_user(self, identifier, req, resp, resource, uri_kwargs)\n    elif ((self.user_storage is None) and (not self.only_with_storage)):\n        user = {'identified_with': self, 'identifier': identifier}\n    else:\n        user = None\n    return user", "docstring": "Try to find user in configured user storage object.\n\nArgs:\nidentifier: User identifier.\n\nReturns:\nuser object.", "source": "codesearchnet"}
{"code": "def eval_adiabatic_limit(YABFGN, Ytilde, P0):\n    (Y, A, B, F, G, N) = YABFGN\n    Klim = ((P0 * (B - ((A * Ytilde) * A))) * P0).expand().simplify_scalar()\n    Hlim = (((Klim - Klim.dag()) / 2) / I).expand().simplify_scalar()\n    Ldlim = ((P0 * (G - ((A * Ytilde) * F))) * P0).expand().simplify_scalar()\n    dN = (identity_matrix(N.shape[0]) + ((F.H * Ytilde) * F))\n    Nlim = (((P0 * N) * dN) * P0).expand().simplify_scalar()\n    return SLH(Nlim.dag(), Ldlim.dag(), Hlim.dag())", "docstring": "Compute the limiting SLH model for the adiabatic approximation\n\nArgs:\nYABFGN: The tuple (Y, A, B, F, G, N)\nas returned by prepare_adiabatic_limit.\nYtilde: The pseudo-inverse of Y, satisfying Y * Ytilde = P0.\nP0: The projector onto the null-space of Y.\n\nReturns:\nSLH: Limiting SLH model", "source": "codesearchnet"}
{"code": "def run(self, variables=None, overrides=None):\n    old_dir = os.getcwd()\n    try:\n        os.chdir(self.run_directory)\n        initialized_steps = self.prepare(variables)\n        owned_resources = {}\n        try:\n            print(('Running in %s' % self.run_directory))\n            (initialized_resources, owned_resources) = self._prepare_resources(variables, overrides)\n            for (i, (step, decl)) in enumerate(zip(initialized_steps, self.steps)):\n                print(('===> Step %d: %s\\t Description: %s' % ((i + 1), self.steps[i][0].__name__, self.steps[i][1].get('description', ''))))\n                (runtime, out) = _run_step(step, decl, initialized_resources)\n                print(('======> Time Elapsed: %.2f seconds' % runtime))\n                if (out is not None):\n                    print(out[1])\n        finally:\n            self._cleanup_resources(owned_resources)\n    finally:\n        os.chdir(old_dir)", "docstring": "Initialize and run this recipe.\n\nBy default all necessary shared resources are created and destroyed in\nthis function unless you pass them preinitizlied in overrides, in\nwhich case they are used as is.  The overrides parameter is designed\nto allow testability of iotile-ship recipes by inspecting the shared\nresources after the recipe has finished to ensure that it was properly\nset up.\n\nArgs:\nvariables (dict): An optional dictionary of variable assignments.\nThere must be a single assignment for all free variables that\ndo not have a default value, otherwise the recipe will not\nrun.\noverrides (dict): An optional dictionary of shared resource\nobjects that should be used instead of creating that resource\nand destroying it inside this function.", "source": "codesearchnet"}
{"code": "def __init__(self, env, past_indices, flatten):\n    \n    if 0 not in past_indices:\n      raise KeyError('Past indices should include 0 for the current frame.')\n    self._env = env\n    self._past_indices = past_indices\n    self._step = 0\n    self._buffer = None\n    self._capacity = max(past_indices) + 1\n    self._flatten = flatten", "docstring": "Augment the observation with past observations.\n\nImplemented as a Numpy ring buffer holding the necessary past observations.\n\nArgs:\nenv: OpenAI Gym environment to wrap.\npast_indices: List of non-negative integers indicating the time offsets\nfrom the current time step of observations to include.\nflatten: Concatenate the past observations rather than stacking them.\n\nRaises:\nKeyError: The current observation is not included in the indices.", "source": "juraj-google-style"}
{"code": "def observe_reward_value(self, state_key, action_key):\n        \n        reward_value = 0.0\n        if state_key in self.__state_action_list_dict:\n            if action_key in self.__state_action_list_dict[state_key]:\n                reward_value = 1.0\n\n        return reward_value", "docstring": "Compute the reward value.\n\nArgs:\nstate_key:              The key of state.\naction_key:             The key of action.\n\nReturns:\nReward value.", "source": "juraj-google-style"}
{"code": "def add_file_handler(logger,level,tags):\n  \n  f_formatter = logging.Formatter('%(asctime)s:%(name)s:\\t%(message)s')\n  filename = get_logfile_name(tags)\n  handler = logging.FileHandler(filename=filename,mode=\"a\")\n  handler.setLevel(level)\n  handler.setFormatter(f_formatter)\n  logger.addHandler(handler)", "docstring": "Creates and Adds a file handler (`logging.FileHandler` instance) to the specified logger.\n\nArgs:\nlogger: The `logging.Logger` instance to add the new file handler to.\nlevel: `str`. The logging level for which the handler accepts messages, i.e. `logging.INFO`.\ntags: `list` of tags to append to the log file name. Each tag will be '_' delimited. Each tag\nwill be added in the same order as provided.", "source": "juraj-google-style"}
{"code": "def from_file(cls, vert, frag, **kwargs):\n        \n        vert_program = open(vert).read()\n        frag_program = open(frag).read()\n        return cls(vert=vert_program, frag=frag_program, **kwargs)", "docstring": "Reads the shader programs, given the vert and frag filenames\n\nArguments:\n- vert (str): The filename of the vertex shader program (ex: 'vertshader.vert')\n- frag (str): The filename of the fragment shader program (ex: 'fragshader.frag')\n\nReturns:\n- shader (Shader): The Shader using these files.", "source": "juraj-google-style"}
{"code": "def build_request_relationship(type, ids):\n    \n    if ids is None:\n        return {\n            'data': None\n        }\n    elif isinstance(ids, str):\n        return {\n            'data': {'id': ids, 'type': type}\n        }\n    else:\n        return {\n            \"data\": [{\"id\": id, \"type\": type} for id in ids]\n        }", "docstring": "Build a relationship list.\n\nA relationship list is used to update relationships between two\nresources. Setting sensors on a label, for example, uses this\nfunction to construct the list of sensor ids to pass to the Helium\nAPI.\n\nArgs:\n\ntype(string): The resource type for the ids in the relationship\nids([uuid] or uuid): Just one or a list of resource uuids to use\nin the relationship\n\nReturns:\n\nA ready to use relationship JSON object.", "source": "juraj-google-style"}
{"code": "def converted_call(f, args, kwargs, caller_fn_scope=None, options=None):\n    logging.log(1, 'Converted call: %s\\n    args: %s\\n    kwargs: %s\\n', f, args, kwargs)\n    if options is None:\n        if caller_fn_scope is None:\n            raise ValueError('either caller_fn_scope or options must have a value')\n        options = caller_fn_scope.callopts\n    if conversion.is_in_allowlist_cache(f, options):\n        logging.log(2, 'Allowlisted %s: from cache', f)\n        return _call_unconverted(f, args, kwargs, options, False)\n    if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:\n        logging.log(2, 'Allowlisted: %s: AutoGraph is disabled in context', f)\n        return _call_unconverted(f, args, kwargs, options, False)\n    if is_autograph_artifact(f):\n        logging.log(2, 'Permanently allowed: %s: AutoGraph artifact', f)\n        return _call_unconverted(f, args, kwargs, options)\n    if isinstance(f, functools.partial):\n        new_kwargs = {}\n        if f.keywords is not None:\n            new_kwargs = f.keywords.copy()\n        if kwargs is not None:\n            new_kwargs.update(kwargs)\n        new_args = f.args + args\n        logging.log(3, 'Forwarding call of partial %s with\\n%s\\n%s\\n', f, new_args, new_kwargs)\n        return converted_call(f.func, new_args, new_kwargs, caller_fn_scope=caller_fn_scope, options=options)\n    if inspect_utils.isbuiltin(f):\n        if f is eval:\n            return py_builtins.eval_in_original_context(f, args, caller_fn_scope)\n        if f is super:\n            return py_builtins.super_in_original_context(f, args, caller_fn_scope)\n        if f is globals:\n            return py_builtins.globals_in_original_context(caller_fn_scope)\n        if f is locals:\n            return py_builtins.locals_in_original_context(caller_fn_scope)\n        if kwargs:\n            return py_builtins.overload_of(f)(*args, **kwargs)\n        else:\n            return py_builtins.overload_of(f)(*args)\n    if conversion.is_unsupported(f):\n        return _call_unconverted(f, args, kwargs, options)\n    if not options.user_requested and conversion.is_allowlisted(f):\n        return _call_unconverted(f, args, kwargs, options)\n    if not options.internal_convert_user_code:\n        return _call_unconverted(f, args, kwargs, options)\n    try:\n        if inspect.ismethod(f) or inspect.isfunction(f):\n            target_entity = f\n            effective_args = args\n            f_self = getattr(f, '__self__', None)\n            if f_self is not None:\n                if isinstance(f_self, tf_method_target.TfMethodTarget):\n                    f_self = f_self.target\n                effective_args = (f_self,) + effective_args\n        elif hasattr(f, '__class__') and hasattr(f.__class__, '__call__'):\n            target_entity = f.__class__.__call__\n            effective_args = (f,) + args\n        else:\n            target_entity = f\n            raise NotImplementedError('unknown callable type \"%s\"' % type(f))\n    except Exception as e:\n        logging.log(1, 'Error transforming entity %s', target_entity, exc_info=True)\n        if is_autograph_strict_conversion_mode():\n            raise\n        return _fall_back_unconverted(f, args, kwargs, options, e)\n    if not hasattr(target_entity, '__code__'):\n        logging.log(2, 'Permanently allowed: %s: native binding', target_entity)\n        return _call_unconverted(f, args, kwargs, options)\n    elif hasattr(target_entity.__code__, 'co_filename') and target_entity.__code__.co_filename == '<string>':\n        logging.log(2, 'Permanently allowed: %s: dynamic code (exec?)', target_entity)\n        return _call_unconverted(f, args, kwargs, options)\n    try:\n        program_ctx = converter.ProgramContext(options=options)\n        converted_f = _convert_actual(target_entity, program_ctx)\n        if logging.has_verbosity(2):\n            _log_callargs(converted_f, effective_args, kwargs)\n    except Exception as e:\n        logging.log(1, 'Error transforming entity %s', target_entity, exc_info=True)\n        if is_autograph_strict_conversion_mode():\n            raise\n        return _fall_back_unconverted(f, args, kwargs, options, e)\n    with StackTraceMapper(converted_f), tf_stack.CurrentModuleFilter():\n        try:\n            if kwargs is not None:\n                result = converted_f(*effective_args, **kwargs)\n            else:\n                result = converted_f(*effective_args)\n        except Exception as e:\n            _attach_error_metadata(e, converted_f)\n            raise\n    return result", "docstring": "Converts a function call inline.\n\nFor internal use only.\n\nNote: The argument list is optimized for readability of generated code, which\nmay look like this:\n\nag__.converted_call(f, (arg1, arg2), None, fscope)\nag__.converted_call(f, (), dict(arg1=val1, **kwargs), fscope)\nag__.converted_call(f, (arg1, arg2) + varargs, dict(**kwargs), lscope)\n\nArgs:\nf: The function to convert.\nargs: Tuple, the original positional arguments of f\nkwargs: Optional[Dict], the original keyword arguments of f\ncaller_fn_scope: Optional[function_wrappers.FunctionScope], the function\nscope of the converted function in which this call was originally made.\noptions: Optional[converter.ConversionOptions], conversion options. If not\nspecified, the value of caller_fn_scope.callopts is used. Either options\nor caller_fn_scope must be present.\n\nReturns:\nAny, the result of executing a possibly-converted `f` with the given\narguments.", "source": "github-repos"}
{"code": "def upper_diag_self_prodx(list_):\n    return [(item1, item2) for (n1, item1) in enumerate(list_) for (n2, item2) in enumerate(list_) if (n1 < n2)]", "docstring": "upper diagnoal of cartesian product of self and self.\nWeird name. fixme\n\nArgs:\nlist_ (list):\n\nReturns:\nlist:\n\nCommandLine:\npython -m utool.util_alg --exec-upper_diag_self_prodx\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_alg import *  # NOQA\n>>> list_ = [1, 2, 3]\n>>> result = upper_diag_self_prodx(list_)\n>>> print(result)\n[(1, 2), (1, 3), (2, 3)]", "source": "codesearchnet"}
{"code": "def get_marginal_distribution(self, index_points=None):\n    with self._name_scope('get_marginal_distribution'):\n        index_points = self._get_index_points(index_points)\n        covariance = self._compute_covariance(index_points)\n        loc = self._mean_fn(index_points)\n        if self._is_univariate_marginal(index_points):\n            scale = tf.sqrt(covariance)\n            loc = tf.squeeze(loc, axis=(- 1))\n            return normal.Normal(loc=loc, scale=scale, validate_args=self._validate_args, allow_nan_stats=self._allow_nan_stats, name='marginal_distribution')\n        else:\n            scale = tf.linalg.LinearOperatorLowerTriangular(tf.linalg.cholesky(_add_diagonal_shift(covariance, self.jitter)), is_non_singular=True, name='GaussianProcessScaleLinearOperator')\n            return mvn_linear_operator.MultivariateNormalLinearOperator(loc=loc, scale=scale, validate_args=self._validate_args, allow_nan_stats=self._allow_nan_stats, name='marginal_distribution')", "docstring": "Compute the marginal of this GP over function values at `index_points`.\n\nArgs:\nindex_points: `float` `Tensor` representing finite (batch of) vector(s) of\npoints in the index set over which the GP is defined. Shape has the form\n`[b1, ..., bB, e, f1, ..., fF]` where `F` is the number of feature\ndimensions and must equal `kernel.feature_ndims` and `e` is the number\n(size) of index points in each batch. Ultimately this distribution\ncorresponds to a `e`-dimensional multivariate normal. The batch shape\nmust be broadcastable with `kernel.batch_shape` and any batch dims\nyielded by `mean_fn`.\n\nReturns:\nmarginal: a `Normal` or `MultivariateNormalLinearOperator` distribution,\naccording to whether `index_points` consists of one or many index\npoints, respectively.", "source": "codesearchnet"}
{"code": "def downstream(self, node):\n    graph = self.graph\n    if (node not in graph):\n        raise KeyError(('node %s is not in graph' % node))\n    return list(graph[node])", "docstring": "Returns a list of all nodes this node has edges towards.\n\nArgs:\nnode (str): The node whose downstream nodes you want to find.\n\nReturns:\nlist: A list of nodes that are immediately downstream from the\nnode.", "source": "codesearchnet"}
{"code": "def __find_variant(self, value):\n    if isinstance(value, bool):\n        return messages.Variant.BOOL\n    elif isinstance(value, six.integer_types):\n        return messages.Variant.INT64\n    elif isinstance(value, float):\n        return messages.Variant.DOUBLE\n    elif isinstance(value, six.string_types):\n        return messages.Variant.STRING\n    elif isinstance(value, (list, tuple)):\n        variant_priority = [None, messages.Variant.INT64, messages.Variant.DOUBLE, messages.Variant.STRING]\n        chosen_priority = 0\n        for v in value:\n            variant = self.__find_variant(v)\n            try:\n                priority = variant_priority.index(variant)\n            except IndexError:\n                priority = (- 1)\n            if (priority > chosen_priority):\n                chosen_priority = priority\n        return variant_priority[chosen_priority]\n    return None", "docstring": "Find the messages.Variant type that describes this value.\n\nArgs:\nvalue: The value whose variant type is being determined.\n\nReturns:\nThe messages.Variant value that best describes value's type,\nor None if it's a type we don't know how to handle.", "source": "codesearchnet"}
{"code": "def add_common_arguments(self, parser, has_device=False):\n        \n        if has_device:\n            parser.add_argument('-t', '--tif', required=True,\n                                type=str.lower, choices=['jtag', 'swd'],\n                                help='target interface (JTAG | SWD)')\n            parser.add_argument('-d', '--device', required=True,\n                                help='specify the target device name')\n\n        group = parser.add_mutually_exclusive_group(required=False)\n        group.add_argument('-s', '--serial', dest='serial_no',\n                           help='specify the J-Link serial number')\n        group.add_argument('-i', '--ip_addr', dest='ip_addr',\n                           help='J-Link IP address')\n\n        return None", "docstring": "Adds common arguments to the given parser.\n\nCommon arguments for a J-Link command are the target interface, and\nJ-Link serial number or IP address.\n\nArgs:\nself (Command): the ``Command`` instance\nparser (argparse.ArgumentParser): the parser to add the arguments to\nhas_device (bool): boolean indicating if it has the device argument\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def normalize(self, image: 'torch.Tensor', mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], **kwargs) -> 'torch.Tensor':\n    return F.normalize(image, mean, std)", "docstring": "Normalize an image. image = (image - image_mean) / image_std.\n\nArgs:\nimage (`torch.Tensor`):\nImage to normalize.\nmean (`torch.Tensor`, `float` or `Iterable[float]`):\nImage mean to use for normalization.\nstd (`torch.Tensor`, `float` or `Iterable[float]`):\nImage standard deviation to use for normalization.\n\nReturns:\n`torch.Tensor`: The normalized image.", "source": "github-repos"}
{"code": "def StreamMemory(self, process, offset=0, amount=None):\n    reader = MemoryReader(process, offset=offset)\n    return self.Stream(reader, amount=amount)", "docstring": "Streams chunks of memory of a given process starting at given offset.\n\nArgs:\nprocess: A platform-specific `Process` instance.\noffset: An integer offset at which the memory stream should start on.\namount: An upper bound on number of bytes to read.\n\nReturns:\nGenerator over `Chunk` instances.", "source": "codesearchnet"}
{"code": "def _evolve(self, state, qargs=None):\n        \n        state = self._format_state(state)\n        if qargs is None:\n            if state.shape[0] != self._input_dim:\n                raise QiskitError(\n                    \"Operator input dimension is not equal to state dimension.\"\n                )\n            if state.ndim == 1:\n                \n                return np.dot(self.data, state)\n            \n            return np.dot(\n                np.dot(self.data, state), np.transpose(np.conj(self.data)))\n        \n        return self._evolve_subsystem(state, qargs)", "docstring": "Evolve a quantum state by the operator.\n\nArgs:\nstate (QuantumState): The input statevector or density matrix.\nqargs (list): a list of QuantumState subsystem positions to apply\nthe operator on.\n\nReturns:\nQuantumState: the output quantum state.\n\nRaises:\nQiskitError: if the operator dimension does not match the\nspecified QuantumState subsystem dimensions.", "source": "juraj-google-style"}
{"code": "def _parse_bro_header(self, logfile):\n    _line = next(logfile)\n    while (not _line.startswith('\n        _line = next(logfile)\n    _field_names = _line.strip().split(self.delimiter)[1:]\n    _line = next(logfile)\n    _field_types = _line.strip().split(self.delimiter)[1:]\n    return (_field_names, _field_types)", "docstring": "This method tries to parse the Bro log header section.\n\nNote: My googling is failing me on the documentation on the format,\nso just making a lot of assumptions and skipping some shit.\nAssumption 1: The delimeter is a tab.\nAssumption 2: Types are either time, string, int or float\nAssumption 3: The header always ends with #fields and #types as\nthe last two lines.\n\nFormat example:\n#separator \\x09\n#set_separator\t,\n#empty_field\t(empty)\n#unset_field\t-\n#path\thttpheader_recon\n#fields\tts\torigin\tuseragent\theader_events_json\n#types\ttime\tstring\tstring\tstring\n\nArgs:\nlogfile: The Bro log file.\n\nReturns:\nA tuple of 2 lists. One for field names and other for field types.", "source": "codesearchnet"}
{"code": "def stat_float_times(cls, newvalue=None):\n    if (newvalue is not None):\n        cls._stat_float_times = bool(newvalue)\n    return cls._stat_float_times", "docstring": "Determine whether a file's time stamps are reported as floats\nor ints.\n\nCalling without arguments returns the current value.\nThe value is shared by all instances of FakeOsModule.\n\nArgs:\nnewvalue: If `True`, mtime, ctime, atime are reported as floats.\nOtherwise, they are returned as ints (rounding down).", "source": "codesearchnet"}
{"code": "def get_class_that_defined_method(fun):\n    \n    if inspect.ismethod(fun):\n        for cls in inspect.getmro(fun.__self__.__class__):\n            if cls.__dict__.get(fun.__name__) is fun:\n                return cls\n        fun = fun.__func__  \n    if inspect.isfunction(fun):\n        cls = getattr(inspect.getmodule(fun),\n                      fun.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0], None)\n        if isinstance(cls, type):\n            return cls\n    return getattr(fun, '__objclass__', None)", "docstring": "Tries to find the class that defined the specified method. Will not work for nested classes\n(locals).\n\nArgs:\nfun: Function / Method\n\nReturns:\nReturns the class which defines the given method / function.", "source": "juraj-google-style"}
{"code": "def write_eval_records(bt_table, game_data, last_game):\n    eval_num = last_game\n    GAMES_PER_COMMIT = 2000\n    for games in grouper(tqdm(game_data), GAMES_PER_COMMIT):\n        assert bt_table.read_row(EVAL_PREFIX.format(eval_num)), \"Prev row doesn't exists\"\n        assert (bt_table.read_row(EVAL_PREFIX.format((eval_num + 1))) is None), 'Row already exists'\n        rows = []\n        for (i, metadata) in enumerate(games):\n            eval_num += 1\n            row_name = EVAL_PREFIX.format(eval_num)\n            row = bt_table.row(row_name)\n            for (column, value) in metadata:\n                row.set_cell(METADATA, column, value)\n            rows.append(row)\n            if ((i < 5) or ((i + 5) > len(games))):\n                print('\\t', i, row_name, metadata[6][1])\n        if (eval_num == (last_game + len(games))):\n            test = input(\"Commit ('y'/'yes' required): \")\n            if (test.lower() not in ('y', 'yes')):\n                break\n        game_num_update = bt_table.row(TABLE_STATE)\n        game_num_update.set_cell(METADATA, EVAL_GAME_COUNTER, eval_num)\n        print(TABLE_STATE, eval_num)\n        response = bt_table.mutate_rows(rows)\n        any_bad = False\n        for (i, status) in enumerate(response):\n            if (status.code is not 0):\n                print('Row number {} failed to write {}'.format(i, status))\n                any_bad = True\n        if any_bad:\n            break\n        game_num_update.commit()", "docstring": "Write all eval_records to eval_table\n\nIn addition to writing new rows table_state must be updated in\nrow `table_state` columns `metadata:eval_game_counter`\n\nArgs:\nbt_table: bigtable table to add rows to.\ngame_data:  metadata pairs (column name, value) for each eval record.\nlast_game:  last_game in metadata:table_state", "source": "codesearchnet"}
{"code": "def remove_item(name, system_wide=False):\n\n\t\n\n\tdesktop_env = system.get_name()\n\n\tif desktop_env == 'windows':\n\t\timport winreg\n\t\tif system_wide:\n\t\t\tstartup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\\\Windows\\\\Start Menu\\\\Programs\\\\Startup')\n\n\t\telse:\n\t\t\tstartup_dir = os.path.join(directories.get_config_dir()[0], 'Roaming\\\\Microsoft\\\\Windows\\\\Start Menu\\\\Programs\\\\Startup')\n\n\t\tfor startup_file in os.path.listdir(start_dir):\n\t\t\tif startup_file == name or startup_file.split('.')[0] == name:\n\t\t\t\tos.remove(os.path.join(startup_dir, startup_file))\n\n\telif desktop_env == 'mac':\n\t\tsp.Popen(['launchctl', 'remove', name])\n\t\t\n\t\t\n\n\telse:\n\t\t\n\n\t\tif desktop_env == 'unknown':\n\t\t\t\n\t\t\tif system_wide:\n\t\t\t\tlogin_file = '/etc/profile'\n\t\t\telse:\n\t\t\t\tlogin_file = os.path.expanduser('~/.profile')\n\n\t\t\twith open(login_file) as f:\n\t\t\t\tlogin_file_contents = f.read()\n\n\t\t\tfinal_login_file_contents = ''\n\n\t\t\tfor line in login_file_contents.split('\\n'):\n\t\t\t\tif line.split(' ')[0] != name:\n\t\t\t\t\tfinal_login_file_contents += line\n\n\t\t\twith open(login_file, 'w') as f:\n\t\t\t\tf.write(final_login_file_contents)\n\n\t\telse:\n\t\t\ttry:\n\t\t\t\tdesktop_file_name = name + '.desktop'\n\n\t\t\t\tstartup_file = os.path.join(directories.get_config_dir('autostart', system_wide=system_wide)[0], desktop_file_name)\n\n\t\t\t\tif not os.path.isfile(startup_file):\n\t\t\t\t\tfor possible_startup_file in os.listdir(directories.get_config_dir('autostart', system_wide=system_wide)[0]):\n\t\t\t\t\t\tpossible_startup_file_parsed = desktopfile.parse(possible_startup_file)\n\n\t\t\t\t\t\tif possible_startup_file_parsed['Name'] == name:\n\t\t\t\t\t\t\tstartup_file = possible_startup_file\n\n\t\t\t\tos.remove(startup_file)\n\n\t\t\texcept IndexError:\n\t\t\t\tpass", "docstring": "Removes a program from startup.\n\nRemoves a program from startup.\n\nArgs:\nname        (str) : The name of the program (as known to the system) to remove. See :func:``list_items``.\nsystem_wide (bool): Remove it from system-wide startup.\n\nNote:\n``system_wide`` requires superuser/admin privileges.", "source": "juraj-google-style"}
{"code": "def emit_code_from_ir(sql_query_tree, compiler_metadata):\n    context = CompilationContext(query_path_to_selectable=dict(), query_path_to_location_info=sql_query_tree.query_path_to_location_info, query_path_to_output_fields=sql_query_tree.query_path_to_output_fields, query_path_to_filters=sql_query_tree.query_path_to_filters, query_path_to_node=sql_query_tree.query_path_to_node, compiler_metadata=compiler_metadata)\n    return _query_tree_to_query(sql_query_tree.root, context)", "docstring": "Return a SQLAlchemy Query from a passed SqlQueryTree.\n\nArgs:\nsql_query_tree: SqlQueryTree, tree representation of the query to emit.\ncompiler_metadata: SqlMetadata, SQLAlchemy specific metadata.\n\nReturns:\nSQLAlchemy Query", "source": "codesearchnet"}
{"code": "def _read_tags(self):\n        \n        tags = self._config.get('tags', {})\n        logging.info('Tags:')\n        for tag_name in tags.keys():\n            tag = {}\n            tag['Key'] = tag_name\n            tag['Value'] = tags[tag_name]\n            self._tags.append(tag)\n            logging.info('{} = {}'.format(tag_name, tags[tag_name]))\n\n        logging.debug(json.dumps(\n            self._tags,\n            indent=2,\n            sort_keys=True\n        ))\n        return True", "docstring": "Fill in the _tags dict from the tags file.\n\nArgs:\nNone\n\nReturns:\nTrue\n\nTodo:\nFigure what could go wrong and at least acknowledge the\nthe fact that Murphy was an optimist.", "source": "juraj-google-style"}
{"code": "def run_console(self, authorization_prompt_message=_DEFAULT_AUTH_PROMPT_MESSAGE, authorization_code_message=_DEFAULT_AUTH_CODE_MESSAGE, **kwargs):\n    kwargs.setdefault('prompt', 'consent')\n    self.redirect_uri = self._OOB_REDIRECT_URI\n    (auth_url, _) = self.authorization_url(**kwargs)\n    print(authorization_prompt_message.format(url=auth_url))\n    code = input(authorization_code_message)\n    self.fetch_token(code=code)\n    return self.credentials", "docstring": "Run the flow using the console strategy.\n\nThe console strategy instructs the user to open the authorization URL\nin their browser. Once the authorization is complete the authorization\nserver will give the user a code. The user then must copy & paste this\ncode into the application. The code is then exchanged for a token.\n\nArgs:\nauthorization_prompt_message (str): The message to display to tell\nthe user to navigate to the authorization URL.\nauthorization_code_message (str): The message to display when\nprompting the user for the authorization code.\nkwargs: Additional keyword arguments passed through to\n:meth:`authorization_url`.\n\nReturns:\ngoogle.oauth2.credentials.Credentials: The OAuth 2.0 credentials\nfor the user.", "source": "codesearchnet"}
{"code": "def get_missing_simulations(self, param_list, runs=None):\n    params_to_simulate = []\n    if (runs is not None):\n        next_runs = self.db.get_next_rngruns()\n        available_params = [r['params'] for r in self.db.get_results()]\n        for param_comb in param_list:\n            needed_runs = runs\n            for (i, p) in enumerate(available_params):\n                if (param_comb == {k: p[k] for k in p.keys() if (k != 'RngRun')}):\n                    needed_runs -= 1\n            new_param_combs = []\n            for needed_run in range(needed_runs):\n                new_param = deepcopy(param_comb)\n                new_param['RngRun'] = next(next_runs)\n                new_param_combs += [new_param]\n            params_to_simulate += new_param_combs\n    else:\n        for param_comb in param_list:\n            if (not self.db.get_results(param_comb)):\n                params_to_simulate += [param_comb]\n    return params_to_simulate", "docstring": "Return a list of the simulations among the required ones that are not\navailable in the database.\n\nArgs:\nparam_list (list): a list of dictionaries containing all the\nparameters combinations.\nruns (int): an integer representing how many repetitions are wanted\nfor each parameter combination, None if the dictionaries in\nparam_list already feature the desired RngRun value.", "source": "codesearchnet"}
{"code": "def add_variable_from_reference(self, reference_variable, name=None, initializer='zeros'):\n    name = name or 'var'\n    if hasattr(reference_variable, 'path'):\n        name = reference_variable.path.replace('/', '_') + '_' + name\n    else:\n        name = str(reference_variable.name).replace('/', '_').replace(':', '_') + '_' + name\n    return self.add_variable(shape=reference_variable.shape, initializer=initializer, dtype=reference_variable.dtype, name=name, layout=getattr(reference_variable, '_layout', None))", "docstring": "Add an optimizer variable from the model variable.\n\nCreate an optimizer variable based on the information of model variable.\nFor example, in SGD optimizer momemtum, for each model variable, a\ncorresponding momemtum variable is created of the same shape and dtype.\n\nArgs:\nreference_variable: `keras.Variable`. The corresponding model\nvariable to the optimizer variable to be created.\nname: Optional string. The name prefix of the optimizer variable to\nbe created. If not provided, it will be set to `\"var\"`. The\nvariable name will follow the pattern\n`{variable_name}_{reference_variable.name}`,\ne.g., `momemtum/dense_1`. Defaults to `None`.\ninitializer: Initializer object to use to populate the initial\nvariable value, or string name of a built-in initializer\n(e.g. `\"random_normal\"`). If unspecified, defaults to\n`\"zeros\"`.\n\nReturns:\nAn optimizer variable, in the format of `keras.Variable`.", "source": "github-repos"}
{"code": "def compute_distance(a, b):\n    \n\n    \n    if not a:\n        return len(b)\n    if not b:\n        return len(a)\n    if a == b or str.lower(a) == str.lower(b):\n        return 0\n\n    \n    a = str.lower(a)\n    b = str.lower(b)\n\n    \n    vector_1 = [-1] * (len(b) + 1)\n    vector_2 = [-1] * (len(b) + 1)\n\n    \n    for i in range(len(vector_1)):\n        vector_1[i] = i\n\n    \n    for i in range(len(a)):\n        vector_2[0] = i + 1\n\n        for j in range(len(b)):\n            penalty = 0 if a[i] == b[j] else compute_qwerty_distance(a[i], b[j])\n            vector_2[j + 1] = min(vector_2[j] + 1, vector_1[j + 1] + 1, vector_1[j] + penalty)\n\n        for j in range(len(vector_1)):\n            vector_1[j] = vector_2[j]\n\n    return vector_2[len(b)]", "docstring": "Computes a modified Levenshtein distance between two strings, comparing the\nlowercase versions of each string and accounting for QWERTY distance.\n\nArguments:\n- a (str) String to compare to 'b'\n- b (str) String to compare to 'a'\n\nReturns:\n- (int) Number representing closeness of 'a' and 'b' (lower is better)", "source": "juraj-google-style"}
{"code": "def delete_additional_charge(self, recurring_billing_id):\n    fmt = 'recurringBillItems/{}'.format(recurring_billing_id)\n    return self.client._delete((self.url + fmt), headers=self.get_headers())", "docstring": "Remove an extra charge from an invoice.\n\nArgs:\nrecurring_billing_id: Identifier of the additional charge.\n\nReturns:", "source": "codesearchnet"}
{"code": "def aggregate_variables(agg_funcs: t.List[t.Dict[str, str]], ds: xr.Dataset, time_fields: t.List[str], coords_to_squeeze: t.List[str]) -> xr.Dataset:\n    agg_dataset = xr.Dataset(coords=ds.coords, attrs=ds.attrs)\n    if len(time_fields):\n        agg_dataset = agg_dataset.groupby(ds['time'].dt.strftime(timestamp_formats[time_fields[0]]))\n        agg_dataset = apply_aggregation(agg_dataset, 'avg', None)\n        agg_dataset = agg_dataset.rename({'strftime': time_fields[0]})\n    agg_dataset = apply_aggregation(agg_dataset, 'avg', coords_to_squeeze)\n    for agg_func in agg_funcs:\n        variable, function = (agg_func['var'], agg_func['func'])\n        grouped_ds = ds[variable]\n        dims = [value for value in coords_to_squeeze if value in ds[variable].coords] if coords_to_squeeze else None\n        if len(time_fields):\n            groups = grouped_ds.groupby(ds['time'].dt.strftime(timestamp_formats[time_fields[0]]))\n            grouped_ds = apply_aggregation(groups, function, None)\n            grouped_ds = grouped_ds.rename({'strftime': time_fields[0]})\n        agg_dim_ds = apply_aggregation(grouped_ds, function, dims)\n        agg_dataset = agg_dataset.assign({f'{function}_{variable}': agg_dim_ds})\n    return agg_dataset", "docstring": "Aggregate variables in an xarray dataset based on aggregation functions.\n\nArgs:\nagg_funcs (List[Dict[str, str]]): List of dictionaries specifying aggregation functions for variables.\nds (xr.Dataset): The input xarray dataset.\ntime_fields (List[str]): List of time fields to consider for time-based grouping.\ncoords_to_squeeze (List[str]): List of coordinates to be squeezed during aggregation.\n\nReturns:\nxr.Dataset: The aggregated xarray dataset.", "source": "github-repos"}
{"code": "def loads(s, single=False, version=_default_version, strict=False, errors='warn'):\n    ms = deserialize(s, version=version, strict=strict, errors=errors)\n    if single:\n        return next(ms)\n    else:\n        return ms", "docstring": "Deserialize SimpleMRS string representations\n\nArgs:\ns (str): a SimpleMRS string\nsingle (bool): if `True`, only return the first Xmrs object\nReturns:\na generator of Xmrs objects (unless *single* is `True`)", "source": "codesearchnet"}
{"code": "def path(self, value):\n\t\t\n\t\tif not value.endswith('/'):\n\t\t\tself._path = '{v}/'.format(v=value)\n\t\telse:\n\t\t\tself._path = value", "docstring": "Setter for 'path' property\n\nArgs:\nvalue (str): Absolute path to scan", "source": "juraj-google-style"}
{"code": "def get_schema_node(self, path: SchemaPath) -> Optional[SchemaNode]:\n        \n        return self.schema.get_schema_descendant(\n            self.schema_data.path2route(path))", "docstring": "Return the schema node addressed by a schema path.\n\nArgs:\npath: Schema path.\n\nReturns:\nSchema node if found in the schema, or ``None``.\n\nRaises:\nInvalidSchemaPath: If the schema path is invalid.", "source": "juraj-google-style"}
{"code": "def get_keyvault(access_token, subscription_id, rgname, vault_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourcegroups/', rgname,\n                        '/providers/Microsoft.KeyVault/vaults/', vault_name,\n                        '?api-version=', KEYVAULT_API])\n    return do_get(endpoint, access_token)", "docstring": "Gets details about the named key vault.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\nvault_name (str): Name of the key vault.\n\nReturns:\nHTTP response. JSON body of key vault properties.", "source": "juraj-google-style"}
{"code": "def chunk_constant_value(node: node_def_pb2.NodeDef, size: int):\n    if node.op == _CONST_OP:\n        tensor_proto = node.attr['value'].tensor\n        if tensor_proto.tensor_content:\n            b = tensor_proto.tensor_content\n        else:\n            b = tensor_util.MakeNdarray(tensor_proto).tobytes()\n        kept_attributes = {key: getattr(tensor_proto, key) for key in _KEEP_TENSOR_PROTO_FIELDS}\n        tensor_proto.Clear()\n        for field, val in kept_attributes.items():\n            if isinstance(val, message.Message):\n                getattr(tensor_proto, field).MergeFrom(val)\n            else:\n                setattr(tensor_proto, field, val)\n        return b\n    else:\n        attributes_and_sizes = ', '.join([f'{key}: {util.format_bytes(val.ByteSize())}' for key, val in node.attr.items()])\n        raise ValueError(f'Unable to split GraphDef because at least one of the nodes individually exceeds the max size of {util.format_bytes(constants.max_size())}. Currently only Const nodes can be further split.\\nNode info:\\n\\tsize: {util.format_bytes(size)}\\n\\tname: {node.name}\\n\\top: {node.op}\\n\\tinputs: {node.input}\\n\\top: {node.op}\\n\\tdevice: {node.device}\\n\\tattr (and sizes): {attributes_and_sizes}')", "docstring": "Extracts and clears the constant value from a NodeDef.\n\nArgs:\nnode: NodeDef with const value to extract.\nsize: Size of NodeDef (for error reporting).\n\nReturns:\nBytes representation of the Constant tensor content.", "source": "github-repos"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_stream = BytearrayStream()\n    if self._nonce_id:\n        self._nonce_id.write(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Nonce struct is missing the nonce ID.')\n    if self._nonce_value:\n        self._nonce_value.write(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Nonce struct is missing the nonce value.')\n    self.length = local_stream.length()\n    super(Nonce, self).write(output_stream, kmip_version=kmip_version)\n    output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the Nonce struct to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the nonce ID or nonce value is not defined.", "source": "codesearchnet"}
{"code": "def __type_matches(self, obj: Any, type_: Type) -> bool:\n        \n        if is_generic_union(type_):\n            for t in generic_type_args(type_):\n                if self.__type_matches(obj, t):\n                    return True\n            return False\n        elif is_generic_list(type_):\n            if not isinstance(obj, list):\n                return False\n            for item in obj:\n                if not self.__type_matches(item, generic_type_args(type_)[0]):\n                    return False\n            return True\n        elif is_generic_dict(type_):\n            if not isinstance(obj, OrderedDict):\n                return False\n            for key, value in obj:\n                if not isinstance(key, generic_type_args(type_)[0]):\n                    return False\n                if not self.__type_matches(value, generic_type_args(type_)[1]):\n                    return False\n            return True\n        else:\n            return isinstance(obj, type_)", "docstring": "Checks that the object matches the given type.\n\nLike isinstance(), but will work with union types using Union, \\\nDict and List.\n\nArgs:\nobj: The object to check\ntype_: The type to check against\n\nReturns:\nTrue iff obj is of type type_", "source": "juraj-google-style"}
{"code": "def conjugate(x):\n    if any_symbolic_tensors((x,)):\n        return Conjugate().symbolic_call(x)\n    return backend.numpy.conjugate(x)", "docstring": "Returns the complex conjugate, element-wise.\n\nThe complex conjugate of a complex number is obtained by changing the sign\nof its imaginary part.\n\n`keras.ops.conj` is a shorthand for this function.\n\nArgs:\nx: Input tensor.\n\nReturns:\nThe complex conjugate of each element in `x`.", "source": "github-repos"}
{"code": "def WriteFileEntry(self, path):\n    \n    string = '{0:s}\\n'.format(path)\n\n    encoded_string = self._EncodeString(string)\n    self._file_object.write(encoded_string)", "docstring": "Writes the file path to file.\n\nArgs:\npath (str): path of the file.", "source": "juraj-google-style"}
{"code": "def _sort_dump_data_by(self, data, sort_by, reverse):\n    if sort_by == SORT_TENSORS_BY_TIMESTAMP:\n        return sorted(data, reverse=reverse, key=lambda x: x.timestamp)\n    elif sort_by == SORT_TENSORS_BY_DUMP_SIZE:\n        return sorted(data, reverse=reverse, key=lambda x: x.dump_size_bytes)\n    elif sort_by == SORT_TENSORS_BY_OP_TYPE:\n        return sorted(data, reverse=reverse, key=lambda x: self._debug_dump.node_op_type(x.node_name))\n    elif sort_by == SORT_TENSORS_BY_TENSOR_NAME:\n        return sorted(data, reverse=reverse, key=lambda x: '%s:%d' % (x.node_name, x.output_slot))\n    else:\n        raise ValueError('Unsupported key to sort tensors by: %s' % sort_by)", "docstring": "Sort a list of DebugTensorDatum in specified order.\n\nArgs:\ndata: (list of DebugTensorDatum) the data to be sorted.\nsort_by: The field to sort data by.\nreverse: (bool) Whether to use reversed (descending) order.\n\nReturns:\n(list of DebugTensorDatum) in sorted order.\n\nRaises:\nValueError: given an invalid value of sort_by.", "source": "github-repos"}
{"code": "def speed_info(self):\n    speed_info = structs.JLinkSpeedInfo()\n    self._dll.JLINKARM_GetSpeedInfo(ctypes.byref(speed_info))\n    return speed_info", "docstring": "Retrieves information about supported target interface speeds.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nThe ``JLinkSpeedInfo`` instance describing the supported target\ninterface speeds.", "source": "codesearchnet"}
{"code": "def build_transcript(transcript, build='37'):\n    \n    \n    \n    transcript_id = transcript['transcript_id']\n    transcript_obj = dict(\n        transcript_id = transcript_id\n    )\n    \n    \n    transcript_obj['hgnc_id'] = transcript['hgnc_id']\n\n    if transcript.get('protein_id'):\n        transcript_obj['protein_id'] = transcript['protein_id']\n    \n    if transcript.get('sift_prediction'):\n        transcript_obj['sift_prediction'] = transcript['sift_prediction']\n    \n    if transcript.get('polyphen_prediction'):\n        transcript_obj['polyphen_prediction'] = transcript['polyphen_prediction']\n\n\n    if transcript.get('swiss_prot'):\n        transcript_obj['swiss_prot'] = transcript['swiss_prot']\n    \n    if transcript.get('pfam_domain'):\n        transcript_obj['pfam_domain'] = transcript.get('pfam_domain')\n    \n    if transcript.get('prosite_profile'):\n        transcript_obj['prosite_profile'] = transcript.get('prosite_profile')\n    \n    if transcript.get('smart_domain'):\n        transcript_obj['smart_domain'] = transcript.get('smart_domain')\n\n    if transcript.get('biotype'):\n        transcript_obj['biotype'] = transcript.get('biotype')\n\n    if transcript.get('functional_annotations'):\n        transcript_obj['functional_annotations'] = transcript['functional_annotations']\n    \n    if transcript.get('region_annotations'):\n        transcript_obj['region_annotations'] = transcript['region_annotations']\n\n    if transcript.get('exon'):\n        transcript_obj['exon'] = transcript.get('exon')\n    \n    if transcript.get('intron'):\n        transcript_obj['intron'] = transcript.get('intron')\n    \n    if transcript.get('strand'):\n        transcript_obj['strand'] = transcript.get('strand')\n\n    if transcript.get('coding_sequence_name'):\n        transcript_obj['coding_sequence_name'] = transcript['coding_sequence_name']\n    \n    if transcript.get('protein_sequence_name'):\n        transcript_obj['protein_sequence_name'] = transcript['protein_sequence_name']\n\n    transcript_obj['is_canonical'] = transcript.get('is_canonical', False)\n\n    return transcript_obj", "docstring": "Build a transcript object\n\nThese represents the transcripts that are parsed from the VCF, not\nthe transcript definitions that are collected from ensembl.\n\nArgs:\ntranscript(dict): Parsed transcript information\n\nReturns:\ntranscript_obj(dict)", "source": "juraj-google-style"}
{"code": "def dinf_downslope_direction(a):\n        \n        taud, d = DinfUtil.check_orthogonal(a)\n        if d != -1:\n            down = [d]\n            return down\n        else:\n            if a < FlowModelConst.ne:  \n                down = [1, 2]\n            elif a < FlowModelConst.n:  \n                down = [2, 3]\n            elif a < FlowModelConst.nw:  \n                down = [3, 4]\n            elif a < FlowModelConst.w:  \n                down = [4, 5]\n            elif a < FlowModelConst.sw:  \n                down = [5, 6]\n            elif a < FlowModelConst.s:  \n                down = [6, 7]\n            elif a < FlowModelConst.se:  \n                down = [7, 8]\n            else:  \n                down = [8, 1]\n            return down", "docstring": "Get the downslope directions of an dinf direction value\nArgs:\na: Dinf value\n\nReturns:\ndownslope directions", "source": "juraj-google-style"}
{"code": "def information_matrix(qhbm: inference.QHBM, modular_hamiltonian: models.Hamiltonian, modular_hamiltonian_copy: models.Hamiltonian, config):\n\n    def ebm_block():\n        samples = qhbm.e_inference.sample(config.training.num_samples)\n        with tf.GradientTape() as tape:\n            tape.watch(modular_hamiltonian.energy.trainable_variables[0])\n            energies = modular_hamiltonian.energy(samples)\n        energy_jac = tape.jacobian(energies, modular_hamiltonian.energy.trainable_variables[0])\n        avg_energy_grad = tf.reduce_mean(energy_jac, axis=0)\n        centered_energy_jac = energy_jac - avg_energy_grad\n        return tf.matmul(centered_energy_jac, centered_energy_jac, transpose_a=True) / config.training.num_samples\n\n    def cross_block():\n        shift = tf.constant(0.5)\n        scale = tf.constant(np.pi / 2)\n        circuit_values = tf.identity(modular_hamiltonian.circuit.trainable_variables[0])\n\n        def grad(indices, updates):\n            modular_hamiltonian.circuit.trainable_variables[0].assign(tf.tensor_scatter_nd_add(circuit_values, indices=indices, updates=updates))\n            with tf.GradientTape() as tape:\n                tape.watch(modular_hamiltonian_copy.energy.trainable_variables[0])\n                expectation = qhbm.expectation(modular_hamiltonian_copy)\n            return tape.gradient(expectation, modular_hamiltonian_copy.energy.trainable_variables[0])\n\n        def row(i):\n            return scale * (grad([[i]], [-shift]) - grad([[i]], [shift]))\n        indices = tf.range(tf.shape(modular_hamiltonian.circuit.trainable_variables[0])[0])\n        block = tf.map_fn(fn=row, elems=indices, fn_output_signature=tf.float32)\n        modular_hamiltonian.circuit.trainable_variables[0].assign(circuit_values)\n        return block\n\n    def qnn_block():\n        shift = tf.constant(0.5)\n        scale = tf.constant(np.pi / 2)\n        circuit_values = tf.identity(modular_hamiltonian.circuit.trainable_variables[0])\n\n        def grad(indices, updates):\n            modular_hamiltonian.circuit.trainable_variables[0].assign(tf.tensor_scatter_nd_add(circuit_values, indices=indices, updates=updates))\n            with tf.GradientTape() as tape:\n                tape.watch(modular_hamiltonian_copy.circuit.trainable_variables[0])\n                expectation = qhbm.expectation(modular_hamiltonian_copy)\n            return tape.jacobian(expectation, modular_hamiltonian_copy.circuit.trainable_variables[0])\n\n        def row(i):\n            return scale * (grad([[i]], [-shift]) - grad([[i]], [shift]))\n        indices = tf.range(tf.shape(modular_hamiltonian.circuit.trainable_variables[0])[0])\n        block = tf.map_fn(fn=row, elems=indices, fn_output_signature=tf.float32)\n        modular_hamiltonian.circuit.trainable_variables[0].assign(circuit_values)\n        return block\n    block_ebm = ebm_block()\n    block_cross = tf.squeeze(cross_block())\n    block_qnn = tf.squeeze(qnn_block())\n    block_upper = tf.concat([block_ebm, tf.transpose(block_cross)], 1)\n    block_lower = tf.concat([block_cross, block_qnn], 1)\n    im = tf.concat([block_upper, block_lower], 0)\n    return (im + tf.transpose(im)) / 2.0", "docstring": "Estimates the Bogoliubov-Kubo-Mori information matrix.\nArgs:\nqhbm: Hamiltonian inference.\nmodular_hamiltonian: qhbm model. exp(-modular_hamiltonian)/Z(modular_hamiltonian) = rho.\nmodular_hamiltonian_copy: copy of modular_hamiltonian.\nconfig: config dict.\nReturns:\nThe BKM information matrix. This is tr[d_j rho d_k modular_hamiltonian] element-wise\ni.e.\nthe Hilbert-Schmidt inner product of a mixture coords tangent vector and\nan exponential coords tangent vector.", "source": "github-repos"}
{"code": "def __init__(self, graph, run_metadata):\n    self._graph = graph\n    if not run_metadata:\n        raise ValueError('No RunMetadata passed for profile analysis.')\n    self._run_metadata = run_metadata\n    self._arg_parsers = {}\n    ap = argparse.ArgumentParser(description='List nodes profile information.', usage=argparse.SUPPRESS)\n    ap.add_argument('-d', '--%s' % _DEVICE_NAME_FILTER_FLAG, dest=_DEVICE_NAME_FILTER_FLAG, type=str, default='', help='filter device name by regex.')\n    ap.add_argument('-n', '--%s' % _NODE_NAME_FILTER_FLAG, dest=_NODE_NAME_FILTER_FLAG, type=str, default='', help='filter node name by regex.')\n    ap.add_argument('-t', '--%s' % _OP_TYPE_FILTER_FLAG, dest=_OP_TYPE_FILTER_FLAG, type=str, default='', help='filter op type by regex.')\n    ap.add_argument('-f', '--file_path_filter', dest='file_path_filter', type=str, default='', help=\"filter by file name at the top position of node's creation stack that does not belong to TensorFlow library.\")\n    ap.add_argument('--min_lineno', dest='min_lineno', type=int, default=-1, help='(Inclusive) lower bound for 1-based line number in source file. If <= 0, has no effect.')\n    ap.add_argument('--max_lineno', dest='max_lineno', type=int, default=-1, help='(Exclusive) upper bound for 1-based line number in source file. If <= 0, has no effect.')\n    ap.add_argument('-e', '--execution_time', dest='execution_time', type=str, default='', help='Filter by execution time interval (includes compute plus pre- and post -processing time). Supported units are s, ms and us (default). E.g. -e >100s, -e <100, -e [100us,1000ms]')\n    ap.add_argument('-o', '--op_time', dest='op_time', type=str, default='', help='Filter by op time interval (only includes compute time). Supported units are s, ms and us (default). E.g. -e >100s, -e <100, -e [100us,1000ms]')\n    ap.add_argument('-s', '--sort_by', dest='sort_by', type=str, default=SORT_OPS_BY_START_TIME, help='the field to sort the data by: (%s)' % ' | '.join([SORT_OPS_BY_OP_NAME, SORT_OPS_BY_OP_TYPE, SORT_OPS_BY_START_TIME, SORT_OPS_BY_OP_TIME, SORT_OPS_BY_EXEC_TIME, SORT_OPS_BY_LINE]))\n    ap.add_argument('-r', '--reverse', dest='reverse', action='store_true', help='sort the data in reverse (descending) order')\n    ap.add_argument('--time_unit', dest='time_unit', type=str, default=cli_shared.TIME_UNIT_US, help='Time unit (' + ' | '.join(cli_shared.TIME_UNITS) + ')')\n    self._arg_parsers['list_profile'] = ap\n    ap = argparse.ArgumentParser(description='Print a Python source file with line-level profile information', usage=argparse.SUPPRESS)\n    ap.add_argument('source_file_path', type=str, help='Path to the source_file_path')\n    ap.add_argument('--cost_type', type=str, choices=['exec_time', 'op_time'], default='exec_time', help='Type of cost to display')\n    ap.add_argument('--time_unit', dest='time_unit', type=str, default=cli_shared.TIME_UNIT_US, help='Time unit (' + ' | '.join(cli_shared.TIME_UNITS) + ')')\n    ap.add_argument('-d', '--%s' % _DEVICE_NAME_FILTER_FLAG, dest=_DEVICE_NAME_FILTER_FLAG, type=str, default='', help='Filter device name by regex.')\n    ap.add_argument('-n', '--%s' % _NODE_NAME_FILTER_FLAG, dest=_NODE_NAME_FILTER_FLAG, type=str, default='', help='Filter node name by regex.')\n    ap.add_argument('-t', '--%s' % _OP_TYPE_FILTER_FLAG, dest=_OP_TYPE_FILTER_FLAG, type=str, default='', help='Filter op type by regex.')\n    ap.add_argument('--init_line', dest='init_line', type=int, default=0, help='The 1-based line number to scroll to initially.')\n    self._arg_parsers['print_source'] = ap", "docstring": "ProfileAnalyzer constructor.\n\nArgs:\ngraph: (tf.Graph) Python graph object.\nrun_metadata: A `RunMetadata` protobuf object.\n\nRaises:\nValueError: If run_metadata is None.", "source": "github-repos"}
{"code": "def __init__(self,\n               host=None,\n               port=None,\n               user=None,\n               password=None,\n               database=None):\n    \n\n    \n    warnings.filterwarnings(\"error\", category=MySQLdb.Warning)\n\n    for message in [\n        \n        \".*Duplicate entry.*\",\n        \n        \".*Table '.*' already exists\",\n        \n        \".*Duplicate key name.*\",\n        \n        \n        \n        \n        \".*Invalid.*character string.*\",\n    ]:\n      warnings.filterwarnings(\n          \"ignore\", category=MySQLdb.Warning, message=message)\n\n    self._connect_args = dict(\n        host=host or config.CONFIG[\"Mysql.host\"],\n        port=port or config.CONFIG[\"Mysql.port\"],\n        user=user or config.CONFIG[\"Mysql.username\"],\n        password=password or config.CONFIG[\"Mysql.password\"],\n        database=database or config.CONFIG[\"Mysql.database\"])\n\n    client_key_path = config.CONFIG[\"Mysql.client_key_path\"]\n    if client_key_path:\n      logging.debug(\"Client key file configured, trying to use SSL.\")\n      self._connect_args[\"client_key_path\"] = client_key_path\n      self._connect_args[\"client_cert_path\"] = config.CONFIG[\n          \"Mysql.client_cert_path\"]\n      self._connect_args[\"ca_cert_path\"] = config.CONFIG[\"Mysql.ca_cert_path\"]\n\n    _SetupDatabase(**self._connect_args)\n\n    max_pool_size = config.CONFIG.Get(\"Mysql.conn_pool_max\", 10)\n    self.pool = mysql_pool.Pool(self._Connect, max_size=max_pool_size)\n\n    self.handler_thread = None\n    self.handler_stop = True\n\n    self.flow_processing_request_handler_thread = None\n    self.flow_processing_request_handler_stop = None\n    self.flow_processing_request_handler_pool = (\n        threadpool.ThreadPool.Factory(\n            \"flow_processing_pool\", min_threads=2, max_threads=50))\n    self.flow_processing_request_handler_pool.Start()", "docstring": "Creates a datastore implementation.\n\nArgs:\nhost: Passed to MySQLdb.Connect when creating a new connection.\nport: Passed to MySQLdb.Connect when creating a new connection.\nuser: Passed to MySQLdb.Connect when creating a new connection.\npassword: Passed to MySQLdb.Connect when creating a new connection.\ndatabase: Passed to MySQLdb.Connect when creating a new connection.", "source": "juraj-google-style"}
{"code": "def copy(self, source_file_names, destination_file_names):\n    err_msg = 'source_file_names and destination_file_names should be equal in length'\n    assert len(source_file_names) == len(destination_file_names), err_msg\n\n    def _copy_path(source, destination):\n        \n        try:\n            if os.path.exists(destination):\n                if os.path.isdir(destination):\n                    shutil.rmtree(destination)\n                else:\n                    os.remove(destination)\n            if os.path.isdir(source):\n                shutil.copytree(source, destination)\n            else:\n                shutil.copy2(source, destination)\n        except OSError as err:\n            raise IOError(err)\n    exceptions = {}\n    for source, destination in zip(source_file_names, destination_file_names):\n        try:\n            _copy_path(source, destination)\n        except Exception as e:\n            exceptions[source, destination] = e\n    if exceptions:\n        raise BeamIOError('Copy operation failed', exceptions)", "docstring": "Recursively copy the file tree from the source to the destination\n\nArgs:\nsource_file_names: list of source file objects that needs to be copied\ndestination_file_names: list of destination of the new object\n\nRaises:\n``BeamIOError``: if any of the copy operations fail", "source": "github-repos"}
{"code": "def rename_edges(self, old_node_name, new_node_name):\n    graph = self.graph\n    for (node, edges) in graph.items():\n        if (node == old_node_name):\n            graph[new_node_name] = copy(edges)\n            del graph[old_node_name]\n        elif (old_node_name in edges):\n            edges.remove(old_node_name)\n            edges.add(new_node_name)", "docstring": "Change references to a node in existing edges.\n\nArgs:\nold_node_name (str): The old name for the node.\nnew_node_name (str): The new name for the node.", "source": "codesearchnet"}
{"code": "def _make_query_from_terms(self, terms, limit=None):\n        \n        expanded_terms = self._expand_terms(terms)\n        terms_used = 0\n\n        if expanded_terms['doc']:\n            \n            query_parts = [\"SELECT vid, dataset_vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) as score\"]\n        if expanded_terms['doc'] and expanded_terms['keywords']:\n            query_parts = [\"SELECT vid, dataset_vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) \"\n                           \" +  ts_rank_cd(setweight(to_tsvector(coalesce(keywords::text,'')),'B'), to_tsquery(:keywords))\"\n                           ' as score']\n        else:\n            \n            query_parts = ['SELECT vid, dataset_vid, 1 as score']\n\n        query_parts.append('FROM partition_index')\n        query_params = {}\n        where_count = 0\n\n        if expanded_terms['doc']:\n            query_parts.append('WHERE doc @@ to_tsquery(:doc)')\n            query_params['doc'] = self.backend._and_join(expanded_terms['doc'])\n            where_count += 1\n            terms_used += 1\n\n        if expanded_terms['keywords']:\n            query_params['keywords'] = self.backend._and_join(expanded_terms['keywords'])\n\n            kw_q = \"to_tsvector(coalesce(keywords::text,'')) @@ to_tsquery(:keywords)\"\n\n            query_parts.append((\"AND \" if where_count else \"WHERE \") + kw_q)\n\n            where_count += 1\n            terms_used += 1\n\n        if expanded_terms['from']:\n\n            query_parts.append((\"AND \" if where_count else \"WHERE \") + ' from_year >= :from_year')\n\n            query_params['from_year'] = expanded_terms['from']\n            where_count += 1\n            terms_used += 1\n\n        if expanded_terms['to']:\n\n            query_parts.append((\"AND \" if where_count else \"WHERE \") + ' to_year <= :to_year')\n\n            query_params['to_year'] = expanded_terms['to']\n            where_count += 1\n            terms_used += 1\n\n        query_parts.append('ORDER BY score DESC')\n\n        if limit:\n            query_parts.append('LIMIT :limit')\n            query_params['limit'] = limit\n\n        if not terms_used:\n            logger.debug('No terms used; not creating query')\n            return None, None\n\n        query_parts.append(';')\n        deb_msg = 'Dataset terms conversion: `{}` terms converted to `{}` with `{}` params query.'\\\n            .format(terms, query_parts, query_params)\n        logger.debug(deb_msg)\n\n        return text('\\n'.join(query_parts)), query_params", "docstring": "Creates a query for partition from decomposed search terms.\n\nArgs:\nterms (dict or unicode or string):\n\nReturns:\ntuple of (TextClause, dict): First element is FTS query, second is\nparameters of the query. Element of the execution of the query is\ntuple of three elements: (vid, dataset_vid, score).", "source": "juraj-google-style"}
{"code": "def depth_december_average_ground_temperature(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `depth_december_average_ground_temperature`'.format(value))\n    self._depth_december_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_december_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_december_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def list_hierarchy(class_name, bases):\n    \n\n    class_list = [Uri(class_name)]\n    for base in bases:\n        if base.__name__ not in IGNORE_CLASSES:\n            class_list.append(Uri(base.__name__))\n    return list([i for i in set(class_list)])", "docstring": "Creates a list of the class hierarchy\n\nArgs:\n-----\nclass_name: name of the current class\nbases: list/tuple of bases for the current class", "source": "juraj-google-style"}
{"code": "def set_spacing(self, space):\n    self.figure.spacing = space\n    if ('subplots_adjust_kwargs' not in self.figure.__dict__):\n        self.figure.subplots_adjust_kwargs = {}\n    if (space == 'wide'):\n        self.figure.subplots_adjust_kwargs['hspace'] = 0.3\n        self.figure.subplots_adjust_kwargs['wspace'] = 0.3\n    else:\n        self.figure.subplots_adjust_kwargs['hspace'] = 0.0\n        self.figure.subplots_adjust_kwargs['wspace'] = 0.0\n    return", "docstring": "Set the figure spacing.\n\nSets whether in general there is space between subplots.\nIf all axes are shared, this can be `tight`. Default in code is `wide`.\n\nThe main difference is the tick labels extend to the ends if space==`wide`.\nIf space==`tight`, the edge tick labels are cut off for clearity.\n\nArgs:\nspace (str): Sets spacing for subplots. Either `wide` or `tight`.", "source": "codesearchnet"}
{"code": "def filter_string(self, word):\n        \n        segs = [m.group(0) for m in self.seg_regex.finditer(word)]\n        return ''.join(segs)", "docstring": "Return a string like the input but containing only legal IPA segments\n\nArgs:\nword (unicode): input string to be filtered\n\nReturns:\nunicode: string identical to `word` but with invalid IPA segments\nabsent", "source": "juraj-google-style"}
{"code": "def Add(self, rdf_value, mutation_pool=None):\n    self.StaticAdd(self.urn, rdf_value, mutation_pool=mutation_pool)", "docstring": "Adds an rdf value to the queue.\n\nAdds an rdf value to the queue. Does not require that the queue be locked.\n\nArgs:\nrdf_value: The rdf value to add to the queue.\n\nmutation_pool: A MutationPool object to write to.\n\nRaises:\nValueError: rdf_value has unexpected type.", "source": "codesearchnet"}
{"code": "def _send_file(self, method, path, data, filename):\n        \n        with open(filename, 'r') as f:\n            return self._make_request(method, path, data=data, files=[f, ])", "docstring": "Make a multipart/form-encoded request.\n\nArgs:\n`method`: The method of the request (POST or PUT).\n`path`: The path to the resource.\n`data`: The JSON-encoded data.\n`filename`: The filename of the file to send.\nReturns:\nThe content of the response.\nRaises:\nAn exception depending on the HTTP status code of the response.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.ListDatabases = channel.unary_unary(\n            \"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases\",\n            request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabasesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabasesResponse.FromString,\n        )\n        self.CreateDatabase = channel.unary_unary(\n            \"/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase\",\n            request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.CreateDatabaseRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n        self.GetDatabase = channel.unary_unary(\n            \"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase\",\n            request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.Database.FromString,\n        )\n        self.UpdateDatabaseDdl = channel.unary_unary(\n            \"/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl\",\n            request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.UpdateDatabaseDdlRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n        self.DropDatabase = channel.unary_unary(\n            \"/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase\",\n            request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.DropDatabaseRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n        self.GetDatabaseDdl = channel.unary_unary(\n            \"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl\",\n            request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseDdlRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseDdlResponse.FromString,\n        )\n        self.SetIamPolicy = channel.unary_unary(\n            \"/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy\",\n            request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString,\n            response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,\n        )\n        self.GetIamPolicy = channel.unary_unary(\n            \"/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy\",\n            request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString,\n            response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,\n        )\n        self.TestIamPermissions = channel.unary_unary(\n            \"/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions\",\n            request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString,\n            response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def _cursor_pb(cursor_pair):\n    if (cursor_pair is not None):\n        (data, before) = cursor_pair\n        value_pbs = [_helpers.encode_value(value) for value in data]\n        return query_pb2.Cursor(values=value_pbs, before=before)", "docstring": "Convert a cursor pair to a protobuf.\n\nIf ``cursor_pair`` is :data:`None`, just returns :data:`None`.\n\nArgs:\ncursor_pair (Optional[Tuple[list, bool]]): Two-tuple of\n\n* a list of field values.\n* a ``before`` flag\n\nReturns:\nOptional[google.cloud.firestore_v1beta1.types.Cursor]: A\nprotobuf cursor corresponding to the values.", "source": "codesearchnet"}
{"code": "def _NodeDef(op_type, name, attrs=None) -> node_def_pb2.NodeDef:\n    node_def = node_def_pb2.NodeDef(op=compat.as_bytes(op_type), name=compat.as_bytes(name))\n    if attrs:\n        for k, v in attrs.items():\n            node_def.attr[k].CopyFrom(v)\n    return node_def", "docstring": "Create a NodeDef proto.\n\nArgs:\nop_type: Value for the \"op\" attribute of the NodeDef proto.\nname: Value for the \"name\" attribute of the NodeDef proto.\nattrs: Dictionary where the key is the attribute name (a string)\nand the value is the respective \"attr\" attribute of the NodeDef proto (an\nAttrValue).\n\nReturns:\nA node_def_pb2.NodeDef protocol buffer.", "source": "github-repos"}
{"code": "def parse_json_file(self, json_file: Union[str, os.PathLike], allow_extra_keys: bool=False) -> tuple[DataClass, ...]:\n    with open(Path(json_file), encoding='utf-8') as open_json_file:\n        data = json.loads(open_json_file.read())\n    outputs = self.parse_dict(data, allow_extra_keys=allow_extra_keys)\n    return tuple(outputs)", "docstring": "Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the\ndataclass types.\n\nArgs:\njson_file (`str` or `os.PathLike`):\nFile name of the json file to parse\nallow_extra_keys (`bool`, *optional*, defaults to `False`):\nDefaults to False. If False, will raise an exception if the json file contains keys that are not\nparsed.\n\nReturns:\nTuple consisting of:\n\n- the dataclass instances in the same order as they were passed to the initializer.", "source": "github-repos"}
{"code": "def list_deployment_operations(access_token, subscription_id, rg_name, deployment_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourcegroups/', rg_name,\n                        '/providers/Microsoft.Resources/deployments/', deployment_name,\n                        '/operations',\n                        '?api-version=', BASE_API])\n    return do_get(endpoint, access_token)", "docstring": "List all operations involved in a given deployment.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrg_name (str): Azure resource group name.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def fixed_point(is_zero, plus, minus, f, x):\n\n    @memo_Y\n    def _fixed_point(fixed_point_fun):\n\n        def __fixed_point(collected, new):\n            diff = minus(new, collected)\n            if is_zero(diff):\n                return collected\n            return fixed_point_fun(plus(collected, diff), f(diff))\n        return __fixed_point\n    return _fixed_point(x, f(x))", "docstring": "Get the least fixed point when it can be computed piecewise.\n\n.. testsetup::\n\nfrom proso.func import fixed_point\n\n.. doctest::\n\n>>> sorted(fixed_point(\n...    is_zero=lambda xs: len(xs) == 0,\n...    plus=lambda xs, ys: xs + ys,\n...    minus=lambda xs, ys: [x for x in xs if x not in ys],\n...    f=lambda xs: [x + 1 for x in xs if x < 10],\n...    x=[0, 5, 8]\n... ))\n[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\nArgs:\nis_zero: function returning True if the given value is zero\nplus: function taking two values and returning their addition\nminus: function taking two values and returning ther difference\nf: function computing the expected value\nx: initial value\n\nReturns:\nThe least fixed point.", "source": "codesearchnet"}
{"code": "async def get_matches(self, state: MatchState = MatchState.all_):\n        \n        matches = await self.connection('GET',\n                                        'tournaments/{}/matches'.format(self._tournament_id),\n                                        state=state.value,\n                                        participant_id=self._id)\n        \n        ms = []\n        for m in matches:\n            ms.append(await self._tournament.get_match(m['match']['id']))\n        return ms", "docstring": "Return the matches of the given state\n\n|methcoro|\n\nArgs:\nstate: see :class:`MatchState`\n\nRaises:\nAPIException", "source": "juraj-google-style"}
{"code": "def _StopOps(from_ops: list[ops.Operation], stop_gradient_ops: list[ops.Operation], pending_count, xs_set):\n    stop_ops = set()\n    for op in from_ops:\n        is_stop_op = True\n        for inp in _NonEagerInputs(op, xs_set):\n            if pending_count[inp.op] > 0:\n                is_stop_op = False\n                break\n        if is_stop_op:\n            stop_ops.add(op)\n    stop_ops.update((op for op in stop_gradient_ops))\n    return stop_ops", "docstring": "The set of ops that terminate the gradient computation.\n\nThis computes the frontier of the forward graph *before* which backprop\nshould stop. Operations in the returned set will not be differentiated.\nThis set is defined as the subset of `from_ops` containing ops that have\nno predecessor in `from_ops`. `pending_count` is the result of\n`_PendingCount(xs, from_ops)`. An 'op' has predecessors in `from_ops`\niff pending_count[op] > 0.\n\nIn addition, none of `stop_gradient_ops` will be differentiated.\n\nArgs:\nfrom_ops: list of Operations.\nstop_gradient_ops: list of Operations never to backprop through.\npending_count: mapping from operation to number of backprop inputs.\nxs_set: ObjectIdentitySet of Tensors.\n\nReturns:\nThe set of operations.", "source": "github-repos"}
{"code": "def add_jpeg_decoding(module_spec):\n    (input_height, input_width) = hub.get_expected_image_size(module_spec)\n    input_depth = hub.get_num_image_channels(module_spec)\n    jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')\n    decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)\n    decoded_image_as_float = tf.image.convert_image_dtype(decoded_image, tf.float32)\n    decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n    resize_shape = tf.stack([input_height, input_width])\n    resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)\n    resized_image = tf.image.resize_bilinear(decoded_image_4d, resize_shape_as_int)\n    return (jpeg_data, resized_image)", "docstring": "Adds operations that perform JPEG decoding and resizing to the graph..\n\nArgs:\nmodule_spec: The hub.ModuleSpec for the image module being used.\n\nReturns:\nTensors for the node to feed JPEG data into, and the output of the\npreprocessing steps.", "source": "codesearchnet"}
{"code": "def transform_normalize_unicode(source, form, name=None):\n    \n\n    with ops.name_scope(name, \"TransformNormalizeUnicode\", [source]):\n        source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string)\n        if isinstance(source, tf.SparseTensor):\n            result = tf.SparseTensor(\n                indices=source.indices,\n                values=ops_module.transform_normalize_unicode(source.values, form),\n                dense_shape=source.dense_shape\n            )\n        else:\n            result = ops_module.transform_normalize_unicode(source, form)\n\n        return result", "docstring": "Normalize unicode strings tensor.\n\nArgs:\nsource: `Tensor` or `SparseTensor` of any shape, strings to normalize.\nform: Scalar value, name of normalization algorithm.\nOne of `\"NFD\"`, `\"NFC\"`, `\"NFKD\"`, `\"NFKC\"`.\nname: A name for the operation (optional).\nReturns:\n`Tensor` or `SparseTensor` of same shape and size as input.", "source": "juraj-google-style"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    if token_ids_1 is None:\n        return len(token_ids_0 + sep) * [0]\n    return len(token_ids_0 + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does\nnot make use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def _bulk_cache_lookup(self, api_name, keys):\n        \n        if self._cache:\n            responses = self._cache.bulk_lookup(api_name, keys)\n            missing_keys = [key for key in keys if key not in responses.keys()]\n            return (responses, missing_keys)\n\n        return ({}, keys)", "docstring": "Performes a bulk cache lookup and returns a tuple with the results\nfound and the keys missing in the cache. If cached is not configured\nit will return an empty dictionary of found results and the initial\nlist of keys.\n\nArgs:\napi_name: a string name of the API.\nkeys: an enumerable of string keys.\nReturns:\nA tuple: (responses found, missing keys).", "source": "juraj-google-style"}
{"code": "def __init__(self, type, document, old_index, new_index):\n        \n        \n        self.type = type\n        self.document = document\n        self.old_index = old_index\n        self.new_index = new_index", "docstring": "DocumentChange\n\nArgs:\ntype (ChangeType):\ndocument (document.DocumentSnapshot):\nold_index (int):\nnew_index (int):", "source": "juraj-google-style"}
{"code": "def makesubatoffset(self, bitoffset, *, _offsetideal=None):\n    if (_offsetideal is None):\n        _offsetideal = bitoffset\n    if (bitoffset is 0):\n        return self\n    newpromise = TDOPromise(self._chain, (self._bitstart + bitoffset), self._bitlength, _parent=self, bitstartselective=(self._bitstartselective + _offsetideal))\n    self._addsub(newpromise, 0)\n    return newpromise", "docstring": "Create a copy of this promise with an offset, and use it as this promise's child.\n\nIf this promise's primitive is being merged with another\nprimitive, a new subpromise may be required to keep track of\nthe new offset of data coming from the new primitive.\n\n\nArgs:\nbitoffset: An integer offset of the data in the new primitive.\n_offsetideal: integer offset of the data if terms of bits actually used for promises. Used to calculate the start index to read if the associated primitive has arbitrary TDO control.\n\nReturns:\nA TDOPromise registered with this promise, and with the\ncorrect offset.", "source": "codesearchnet"}
{"code": "def split(self, n):\n    \n    new_range_filters = []\n    name = self.start[0]\n    prop_cls = self.prop.__class__\n    if prop_cls in _DISCRETE_PROPERTY_SPLIT_FUNCTIONS:\n      splitpoints = _DISCRETE_PROPERTY_SPLIT_FUNCTIONS[prop_cls](\n          self.start[2], self.end[2], n,\n          self.start[1] == \">=\", self.end[1] == \"<=\")\n      start_filter = (name, \">=\", splitpoints[0])\n      for p in splitpoints[1:]:\n        end_filter = (name, \"<\", p)\n        new_range_filters.append([start_filter, end_filter])\n        start_filter = (name, \">=\", p)\n    else:\n      splitpoints = _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS[prop_cls](\n          self.start[2], self.end[2], n)\n      start_filter = self.start\n      for p in splitpoints:\n        end_filter = (name, \"<\", p)\n        new_range_filters.append([start_filter, end_filter])\n        start_filter = (name, \">=\", p)\n      new_range_filters.append([start_filter, self.end])\n\n    for f in new_range_filters:\n      f.extend(self._equality_filters)\n\n    return [self.__class__(f, self.model_class_path) for f in new_range_filters]", "docstring": "Evenly split this range into contiguous, non overlapping subranges.\n\nArgs:\nn: number of splits.\n\nReturns:\na list of contiguous, non overlapping sub PropertyRanges. Maybe less than\nn when not enough subranges.", "source": "juraj-google-style"}
{"code": "def is_valid(value, valid_values):\n        \n\n        valid = False\n\n        if isinstance(valid_values, type) and type(value) is valid_values:\n            valid = True\n        elif isinstance(valid_values, type) and valid_values == float and type(value) == int:\n            \n            valid = True\n        elif isinstance(value, dict) and isinstance(valid_values, dict):\n            \n            \n            assert set(value.keys()) & set(valid_values.keys()) == set(value.keys()) \n            \n            for k ,v in value.items():\n                valid = Parameter.is_valid(v, valid_values[k])\n                if valid ==False:\n                    break\n\n        elif isinstance(value, dict) and valid_values == Parameter:\n            valid = True\n\n        elif isinstance(valid_values, list) and value in valid_values:\n            valid = True\n\n        return valid", "docstring": "check is the value is valid\nArgs:\nvalue: value to be tested\nvalid_values: allowed valid values (type or list of values)\n\nReturns:", "source": "juraj-google-style"}
{"code": "def delete_case(self, case):\n    mongo_case = self.case(case)\n    if (not mongo_case):\n        raise CaseError('Tried to delete case {0} but could not find case'.format(case.get('case_id')))\n    LOG.info('Removing case {0} from database'.format(mongo_case.get('case_id')))\n    self.db.case.delete_one({'_id': mongo_case['_id']})\n    return", "docstring": "Delete case from the database\n\nDelete a case from the database\n\nArgs:\ncase (dict): A case dictionary", "source": "codesearchnet"}
{"code": "def preprocess_histories(self, max_coarse_history: int, semantic_to_coarse_ratio: int, batch_size: int, semantic_generation_config: int, codebook_size: int, history_prompt: Optional[Dict[str, torch.Tensor]]=None):\n    if history_prompt is not None:\n        x_semantic_history = torch.repeat_interleave(history_prompt['semantic_prompt'][None], batch_size, dim=0)\n        x_coarse_history = history_prompt['coarse_prompt'].clone()\n        if codebook_size is not None:\n            for n in range(1, x_coarse_history.shape[0]):\n                x_coarse_history[n, :] += codebook_size * n\n        x_coarse_history = torch.transpose(x_coarse_history, 0, 1).reshape(-1)\n        x_coarse_history = x_coarse_history + semantic_generation_config.semantic_vocab_size\n        x_coarse_history = torch.repeat_interleave(x_coarse_history[None], batch_size, dim=0)\n        max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio))\n        n_semantic_hist_provided = min([max_semantic_history, x_semantic_history.shape[1] - x_semantic_history.shape[1] % 2, int(np.floor(x_coarse_history.shape[1] / semantic_to_coarse_ratio))])\n        n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio))\n        x_semantic_history = x_semantic_history[:, -n_semantic_hist_provided:].int()\n        x_coarse_history = x_coarse_history[:, -n_coarse_hist_provided:].int()\n        x_coarse_history = x_coarse_history[:, :-2]\n    else:\n        x_semantic_history = torch.tensor([[]] * batch_size, dtype=torch.int, device=self.device)\n        x_coarse_history = torch.tensor([[]] * batch_size, dtype=torch.int, device=self.device)\n    return (x_semantic_history, x_coarse_history)", "docstring": "Preprocess the optional `Bark` speaker prompts before `self.generate`.\n\nArgs:\nmax_coarse_history (`int`):\nMaximum size of coarse tokens used.\nsemantic_to_coarse_ratio (`int`):\nRatio of semantic to coarse frequency\nbatch_size (`int`):\nBatch size, i.e the number of samples.\nsemantic_generation_config (`BarkSemanticGenerationConfig`):\nGeneration config indicating how to generate the semantic tokens.\ncodebook_size (`int`):\nCodebook channel size, i.e. the size of the output vocabulary per codebook channel.\nhistory_prompt (`Optional[Dict[str,torch.Tensor]]`):\nOptional `Bark` speaker prompt.\nReturns: Returns:\n`tuple(torch.FloatTensor)`:\n- **x_semantic_history** (`torch.FloatTensor` -- Processed semantic speaker prompt.\n- **x_coarse_history** (`torch.FloatTensor`) -- Processed coarse speaker prompt.", "source": "github-repos"}
{"code": "def get_subassistants(self):\n    if (not hasattr(self, '_subassistants')):\n        self._subassistants = []\n        if ('get_subassistant_classes' in vars(type(self))):\n            for a in self.get_subassistant_classes():\n                self._subassistants.append(a())\n    return self._subassistants", "docstring": "Return list of instantiated subassistants.\n\nUsually, this needs not be overriden in subclasses, you should just override\nget_subassistant_classes\n\nReturns:\nlist of instantiated subassistants", "source": "codesearchnet"}
{"code": "def method(cache_name, key_prefix=None):\n    \n    def decorator(func):\n        if (func.__name__ in ['cause_repertoire', 'effect_repertoire'] and\n                not config.CACHE_REPERTOIRES):\n            return func\n\n        @wraps(func)\n        def wrapper(obj, *args, **kwargs):\n            cache = getattr(obj, cache_name)\n\n            \n            key = cache.key(*args, _prefix=key_prefix, **kwargs)\n\n            \n            value = cache.get(key)\n            if value is None:  \n                value = func(obj, *args, **kwargs)\n                cache.set(key, value)\n            return value\n\n        return wrapper\n    return decorator", "docstring": "Caching decorator for object-level method caches.\n\nCache key generation is delegated to the cache.\n\nArgs:\ncache_name (str): The name of the (already-instantiated) cache\non the decorated object which should be used to store results\nof this method.\n*key_prefix: A constant to use as part of the cache key in addition\nto the method arguments.", "source": "juraj-google-style"}
{"code": "def diff_commonSuffix(self, text1, text2):\n    \n    \n    if not text1 or not text2 or text1[-1] != text2[-1]:\n      return 0\n    \n    \n    pointermin = 0\n    pointermax = min(len(text1), len(text2))\n    pointermid = pointermax\n    pointerend = 0\n    while pointermin < pointermid:\n      if (text1[-pointermid:len(text1) - pointerend] ==\n          text2[-pointermid:len(text2) - pointerend]):\n        pointermin = pointermid\n        pointerend = pointermin\n      else:\n        pointermax = pointermid\n      pointermid = (pointermax - pointermin) \n    return pointermid", "docstring": "Determine the common suffix of two strings.\n\nArgs:\ntext1: First string.\ntext2: Second string.\n\nReturns:\nThe number of characters common to the end of each string.", "source": "juraj-google-style"}
{"code": "def PrintExtractionSummary(self, processing_status):\n    \n    if not processing_status:\n      self._output_writer.Write(\n          'WARNING: missing processing status information.\\n')\n\n    elif not processing_status.aborted:\n      if processing_status.error_path_specs:\n        self._output_writer.Write('Processing completed with errors.\\n')\n      else:\n        self._output_writer.Write('Processing completed.\\n')\n\n      number_of_warnings = (\n          processing_status.foreman_status.number_of_produced_warnings)\n      if number_of_warnings:\n        output_text = '\\n'.join([\n            '',\n            ('Number of warnings generated while extracting events: '\n             '{0:d}.').format(number_of_warnings),\n            '',\n            'Use pinfo to inspect warnings in more detail.',\n            ''])\n        self._output_writer.Write(output_text)\n\n      if processing_status.error_path_specs:\n        output_text = '\\n'.join([\n            '',\n            'Path specifications that could not be processed:',\n            ''])\n        self._output_writer.Write(output_text)\n        for path_spec in processing_status.error_path_specs:\n          self._output_writer.Write(path_spec.comparable)\n          self._output_writer.Write('\\n')\n\n    self._output_writer.Write('\\n')", "docstring": "Prints a summary of the extraction.\n\nArgs:\nprocessing_status (ProcessingStatus): processing status.", "source": "juraj-google-style"}
{"code": "def WaitUntilValid(self, timeout=None):\n    \n\n    return utils.Poll(\n        generator=self.Get,\n        condition=lambda f: f.data.is_valid,\n        timeout=timeout)", "docstring": "Wait until the approval is valid (i.e. - approved).\n\nArgs:\ntimeout: timeout in seconds. None means default timeout (1 hour).\n0 means no timeout (wait forever).\nReturns:\nOperation object with refreshed target_file.\nRaises:\nPollTimeoutError: if timeout is reached.", "source": "juraj-google-style"}
{"code": "def __init__(self, parent=None):\n        \n        super(CSVImportDialog, self).__init__(parent)\n        self._modal = True\n        self._windowTitle = 'Import CSV'\n        self._encodingKey = None\n        self._filename = None\n        self._delimiter = None\n        self._header = None\n        \n        self._initUI()", "docstring": "Constructs the object with the given parent.\n\nArgs:\nparent (QObject, optional): Causes the objected to be owned\nby `parent` instead of Qt. Defaults to `None`.", "source": "juraj-google-style"}
{"code": "def initialize_dual(neural_net_params_object, init_dual_file=None, random_init_variance=0.01, init_nu=200.0):\n    lambda_pos = []\n    lambda_neg = []\n    lambda_quad = []\n    lambda_lu = []\n    if (init_dual_file is None):\n        for i in range(0, (neural_net_params_object.num_hidden_layers + 1)):\n            initializer = np.random.uniform(0, random_init_variance, size=(neural_net_params_object.sizes[i], 1)).astype(np.float32)\n            lambda_pos.append(tf.get_variable(('lambda_pos_' + str(i)), initializer=initializer, dtype=tf.float32))\n            initializer = np.random.uniform(0, random_init_variance, size=(neural_net_params_object.sizes[i], 1)).astype(np.float32)\n            lambda_neg.append(tf.get_variable(('lambda_neg_' + str(i)), initializer=initializer, dtype=tf.float32))\n            initializer = np.random.uniform(0, random_init_variance, size=(neural_net_params_object.sizes[i], 1)).astype(np.float32)\n            lambda_quad.append(tf.get_variable(('lambda_quad_' + str(i)), initializer=initializer, dtype=tf.float32))\n            initializer = np.random.uniform(0, random_init_variance, size=(neural_net_params_object.sizes[i], 1)).astype(np.float32)\n            lambda_lu.append(tf.get_variable(('lambda_lu_' + str(i)), initializer=initializer, dtype=tf.float32))\n        nu = tf.get_variable('nu', initializer=init_nu)\n    else:\n        dual_var_init_val = np.load(init_dual_file).item()\n        for i in range(0, (neural_net_params_object.num_hidden_layers + 1)):\n            lambda_pos.append(tf.get_variable(('lambda_pos_' + str(i)), initializer=dual_var_init_val['lambda_pos'][i], dtype=tf.float32))\n            lambda_neg.append(tf.get_variable(('lambda_neg_' + str(i)), initializer=dual_var_init_val['lambda_neg'][i], dtype=tf.float32))\n            lambda_quad.append(tf.get_variable(('lambda_quad_' + str(i)), initializer=dual_var_init_val['lambda_quad'][i], dtype=tf.float32))\n            lambda_lu.append(tf.get_variable(('lambda_lu_' + str(i)), initializer=dual_var_init_val['lambda_lu'][i], dtype=tf.float32))\n        nu = tf.get_variable('nu', initializer=(1.0 * dual_var_init_val['nu']))\n    dual_var = {'lambda_pos': lambda_pos, 'lambda_neg': lambda_neg, 'lambda_quad': lambda_quad, 'lambda_lu': lambda_lu, 'nu': nu}\n    return dual_var", "docstring": "Function to initialize the dual variables of the class.\n\nArgs:\nneural_net_params_object: Object with the neural net weights, biases\nand types\ninit_dual_file: Path to file containing dual variables, if the path\nis empty, perform random initialization\nExpects numpy dictionary with\nlambda_pos_0, lambda_pos_1, ..\nlambda_neg_0, lambda_neg_1, ..\nlambda_quad_0, lambda_quad_1, ..\nlambda_lu_0, lambda_lu_1, ..\nrandom_init_variance: variance for random initialization\ninit_nu: Value to initialize nu variable with\n\nReturns:\ndual_var: dual variables initialized appropriately.", "source": "codesearchnet"}
{"code": "def generate_page(self, path, template, **kwargs):\n    directory = None\n    if kwargs.get('page'):\n        directory = kwargs['page'].dir\n    path = self._get_dist_path(path, directory=directory)\n    if (not path.endswith('.html')):\n        path = (path + '.html')\n    if (not os.path.isdir(os.path.dirname(path))):\n        os.makedirs(os.path.dirname(path))\n    html = self._get_template(template).render(**kwargs)\n    with open(path, 'w+') as file:\n        file.write(html)", "docstring": "Generate the HTML for a single page. You usually don't need to call this\nmethod manually, it is used by a lot of other, more end-user friendly\nmethods.\n\nArgs:\npath (str): Where to place the page relative to the root URL. Usually\nsomething like \"index\", \"about-me\", \"projects/example\", etc.\ntemplate (str): Which jinja template to use to render the page.\n**kwargs: Kwargs will be passed on to the jinja template. Also, if\nthe `page` kwarg is passed, its directory attribute will be\nprepended to the path.", "source": "codesearchnet"}
{"code": "def RemoveConnectedPeer(self, peer):\n        \n        if peer in self.Peers:\n            self.Peers.remove(peer)", "docstring": "Remove a connected peer from the known peers list.\n\nArgs:\npeer (NeoNode): instance.", "source": "juraj-google-style"}
{"code": "def parse_saved_model_with_debug_info(export_dir):\n    saved_model = parse_saved_model(export_dir)\n    debug_info_path = file_io.join(path_helpers.get_debug_dir(export_dir), constants.DEBUG_INFO_FILENAME_PB)\n    debug_info = graph_debug_info_pb2.GraphDebugInfo()\n    if file_io.file_exists(debug_info_path):\n        with file_io.FileIO(debug_info_path, 'rb') as debug_file:\n            try:\n                debug_info.ParseFromString(debug_file.read())\n            except message.DecodeError as e:\n                raise IOError(f'Cannot parse file {debug_info_path}: {e}.')\n    return (saved_model, debug_info)", "docstring": "Reads the savedmodel as well as the graph debug info.\n\nArgs:\nexport_dir: Directory containing the SavedModel and GraphDebugInfo files.\n\nReturns:\n`SavedModel` and `GraphDebugInfo` protocol buffers.\n\nRaises:\nIOError: If the saved model file does not exist, or cannot be successfully\nparsed. Missing graph debug info file is fine.", "source": "github-repos"}
{"code": "def get_chip(self, coordinates, catid, chip_type='PAN', chip_format='TIF', filename='chip.tif'):\n\n    def t2s1(t):\n        return str(t).strip('(,)').replace(',', '')\n\n    def t2s2(t):\n        return str(t).strip('(,)').replace(' ', '')\n    if (len(coordinates) != 4):\n        print('Wrong coordinate entry')\n        return False\n    (W, S, E, N) = coordinates\n    box = ((W, S), (W, N), (E, N), (E, S), (W, S))\n    box_wkt = (('POLYGON ((' + ','.join([t2s1(corner) for corner in box])) + '))')\n    results = self.get_images_by_catid_and_aoi(catid=catid, aoi_wkt=box_wkt)\n    description = self.describe_images(results)\n    (pan_id, ms_id, num_bands) = (None, None, 0)\n    for (catid, images) in description.items():\n        for (partnum, part) in images['parts'].items():\n            if ('PAN' in part.keys()):\n                pan_id = part['PAN']['id']\n                bucket = part['PAN']['bucket']\n            if ('WORLDVIEW_8_BAND' in part.keys()):\n                ms_id = part['WORLDVIEW_8_BAND']['id']\n                num_bands = 8\n                bucket = part['WORLDVIEW_8_BAND']['bucket']\n            elif ('RGBN' in part.keys()):\n                ms_id = part['RGBN']['id']\n                num_bands = 4\n                bucket = part['RGBN']['bucket']\n    band_str = ''\n    if (chip_type == 'PAN'):\n        band_str = (pan_id + '?bands=0')\n    elif (chip_type == 'MS'):\n        band_str = (ms_id + '?')\n    elif (chip_type == 'PS'):\n        if (num_bands == 8):\n            band_str = ((ms_id + '?bands=4,2,1&panId=') + pan_id)\n        elif (num_bands == 4):\n            band_str = ((ms_id + '?bands=0,1,2&panId=') + pan_id)\n    location_str = '&upperLeft={}&lowerRight={}'.format(t2s2((W, N)), t2s2((E, S)))\n    service_url = (('https:\n    url = ((service_url + band_str) + location_str)\n    url += ((('&format=' + chip_format) + '&token=') + self.gbdx_connection.access_token)\n    r = requests.get(url)\n    if (r.status_code == 200):\n        with open(filename, 'wb') as f:\n            f.write(r.content)\n            return True\n    else:\n        print('Cannot download chip')\n        return False", "docstring": "Downloads a native resolution, orthorectified chip in tif format\nfrom a user-specified catalog id.\n\nArgs:\ncoordinates (list): Rectangle coordinates in order West, South, East, North.\nWest and East are longitudes, North and South are latitudes.\nThe maximum chip size is (2048 pix)x(2048 pix)\ncatid (str): The image catalog id.\nchip_type (str): 'PAN' (panchromatic), 'MS' (multispectral), 'PS' (pansharpened).\n'MS' is 4 or 8 bands depending on sensor.\nchip_format (str): 'TIF' or 'PNG'\nfilename (str): Where to save chip.\n\nReturns:\nTrue if chip is successfully downloaded; else False.", "source": "codesearchnet"}
{"code": "def Parse(filename, global_env):\n    parser = StlParser(filename=filename, global_env=global_env)\n    with open(filename) as data:\n        return parser.parse(data.read())", "docstring": "Parse a state transition spec of |filename| and fill |module_dict|.\n\nArgs:\nfilename: A state transition spec file.\nglobal_env: Dictionary to store global STL state. It has one field:\nglobal_env['modules']: Dictionary of stl.module.Module by name.", "source": "github-repos"}
{"code": "def merge(self, other_roc):\n    if ((other_roc.thresholds.size == self.thresholds.size) and np.all((other_roc.thresholds == self.thresholds))):\n        self.contingency_tables += other_roc.contingency_tables\n    else:\n        print('Input table thresholds do not match.')", "docstring": "Ingest the values of another DistributedROC object into this one and update the statistics inplace.\n\nArgs:\nother_roc: another DistributedROC object.", "source": "codesearchnet"}
{"code": "def _CanMergeLineIntoIfStatement(lines, limit):\n    if len(lines[1].tokens) == 1 and lines[1].last.is_multiline_string:\n        return True\n    if lines[0].lineno != lines[1].lineno:\n        return False\n    if lines[1].last.total_length >= limit:\n        return False\n    return style.Get('JOIN_MULTIPLE_LINES')", "docstring": "Determine if we can merge a short if-then statement into one line.\n\nTwo lines of an if-then statement can be merged if they were that way in the\noriginal source, fit on the line without going over the column limit, and are\nconsidered \"simple\" statements --- typically statements like 'pass',\n'continue', and 'break'.\n\nArguments:\nlines: (list of LogicalLine) The lines we are wanting to merge.\nlimit: (int) The amount of space remaining on the line.\n\nReturns:\nTrue if the lines can be merged, False otherwise.", "source": "github-repos"}
{"code": "def run_gpu_only(func: _F) -> _F:\n    if tf_inspect.isclass(func):\n        raise ValueError('`run_gpu_only` only supports test methods.')\n\n    def decorated(self: 'TensorFlowTestCase', *args, **kwargs):\n        if not is_gpu_available():\n            self.skipTest('Test requires GPU')\n        return func(self, *args, **kwargs)\n    return decorated", "docstring": "Execute the decorated test only if a GPU is available.\n\nThis function is intended to be applied to tests that require the presence\nof a GPU. If a GPU is absent, it will simply be skipped.\n\nArgs:\nfunc: function to be annotated.\n\nReturns:\nReturns a function that will conditionally skip the decorated test method.", "source": "github-repos"}
{"code": "def download_mmcif_header(pdb_id, outdir='', force_rerun=False):\n    \n    \n    \n\n    pdb_id = pdb_id.lower()\n    file_type = 'cif'\n    folder = 'header'\n    outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))\n\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n        download_link = 'http:\n        urlretrieve(download_link, outfile)\n        log.debug('{}: saved header file'.format(outfile))\n    else:\n        log.debug('{}: header file already saved'.format(outfile))\n\n    return outfile", "docstring": "Download a mmCIF header file from the RCSB PDB by ID.\n\nArgs:\npdb_id: PDB ID\noutdir: Optional output directory, default is current working directory\nforce_rerun: If the file should be downloaded again even if it exists\n\nReturns:\nstr: Path to outfile", "source": "juraj-google-style"}
{"code": "def refresh(self, id_or_uri, timeout=-1):\n        \n        uri = self._client.build_uri(id_or_uri) + \"/refresh\"\n        return self._client.update_with_zero_body(uri, timeout=timeout)", "docstring": "The Refresh action reclaims the top-of-rack switches in a logical switch.\n\nArgs:\nid_or_uri:\nCan be either the Logical Switch ID or URI\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.\n\nReturns:\ndict: The Logical Switch", "source": "juraj-google-style"}
{"code": "def imsave(path, img, channel_first=False, as_uint16=False, auto_scale=True):\n    img = _imsave_before(img, channel_first, auto_scale)\n    if auto_scale:\n        img = upscale_pixel_intensity(img, as_uint16)\n    img = check_type_and_cast_if_necessary(img, as_uint16)\n    bitdepth = (8 if (img.dtype == np.uint8) else 16)\n    grayscale = (True if ((len(img.shape) == 2) or ((len(img.shape) == 3) and (img.shape[(- 1)] == 1))) else False)\n    writer = png.Writer(img.shape[1], img.shape[0], greyscale=grayscale, bitdepth=bitdepth)\n    writer.write(open(path, 'wb'), img.reshape(img.shape[0], (- 1)))", "docstring": "Save image by pypng module.\n\nArgs:\npath (str): output filename\nimg (numpy.ndarray): Image array to save. Image shape is considered as (height, width, channel) by default.\nchannel_first:\nThis argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).\nDefault value is False, which means the img shape is (height, width, channel)\nas_uint16 (bool):\nIf True, save image as uint16.\nauto_scale (bool) :\nWhether upscale pixel values or not.\nIf you want to save float image, this argument must be True.\nIn pypng backend, all below are supported.\n- float ([0, 1]) to uint8 ([0, 255])  (if img.dtype==float and upscale==True and as_uint16==False)\n- float to uint16 ([0, 65535]) (if img.dtype==float and upscale==True and as_uint16==True)\n- uint8 to uint16 are supported (if img.dtype==np.uint8 and upscale==True and as_uint16==True)", "source": "codesearchnet"}
{"code": "def split(input_layer, split_dim=0, num_splits=2):\n    shape = input_layer.shape\n    _check_split_dims(num_splits, split_dim, shape)\n    splits = tf.split(value=input_layer, num_or_size_splits=num_splits, axis=split_dim)\n    return input_layer.with_sequence(splits)", "docstring": "Splits this Tensor along the split_dim into num_splits Equal chunks.\n\nExamples:\n\n* `[1, 2, 3, 4] -> [1, 2], [3, 4]`\n* `[[1, 1], [2, 2], [3, 3], [4, 4]] -> [[1, 1], [2, 2]], [[3, 3], [4, 4]]`\n\nArgs:\ninput_layer: The chainable object, supplied.\nsplit_dim: The dimension to split along. Defaults to batch.\nnum_splits: The number of splits.\nReturns:\nA list of PrettyTensors.\nRaises:\nValueError: If split_dim is out of range or isn't divided evenly by\nnum_splits.", "source": "codesearchnet"}
{"code": "def send(self, **kwargs):\n    \n    assert len(kwargs) == 1, \"Must make a single request.\"\n    res = self.send_req(sc_pb.Request(**kwargs))\n    return getattr(res, list(kwargs.keys())[0])", "docstring": "Create and send a specific request, and return the response.\n\nFor example: send(ping=sc_pb.RequestPing()) => sc_pb.ResponsePing\n\nArgs:\n**kwargs: A single kwarg with the name and value to fill in to Request.\n\nReturns:\nThe Response corresponding to your request.", "source": "juraj-google-style"}
{"code": "def epsilon():\n    return _EPSILON", "docstring": "Returns the value of the fuzz factor used in numeric expressions.\n\nReturns:\nA float.\n\nExample:\n>>> tf.keras.backend.epsilon()\n1e-07", "source": "github-repos"}
{"code": "def _get_filters(nodes, context):\n    \n    filters = []\n    for node in nodes:\n        for filter_block in sql_context_helpers.get_filters(node, context):\n            filter_sql_expression = _transform_filter_to_sql(filter_block, node, context)\n            filters.append(filter_sql_expression)\n    return filters", "docstring": "Get filters to apply to a list of SqlNodes.\n\nArgs:\nnodes: List[SqlNode], the SqlNodes to get filters for.\ncontext: CompilationContext, global compilation state and metadata.\n\nReturns:\nList[Expression], list of SQLAlchemy expressions.", "source": "juraj-google-style"}
{"code": "def get_executor():\n    return context().executor", "docstring": "Get the Executor of the current thread.\n\nReturns:\nThe Executor of the current thread.", "source": "github-repos"}
{"code": "def join(self, other):\n        \n        if self.contains(other):\n            return True\n\n        if other.contains(self):\n            self.x = other.x\n            self.y = other.y\n            self.width = other.width\n            self.height = other.height\n            return True\n\n        if not self.intersects(other, edges=True):\n            return False\n\n        \n        if  self.left == other.left and self.width == other.width:\n            y_min = min(self.bottom, other.bottom)\n            y_max = max(self.top, other.top)  \n            self.y = y_min\n            self.height = y_max-y_min\n            return True\n\n        \n        if  self.bottom == other.bottom and self.height == other.height:\n            x_min = min(self.left, other.left)\n            x_max = max(self.right, other.right)\n            self.x = x_min\n            self.width = x_max-x_min\n            return True\n\n        return False", "docstring": "Try to join a rectangle to this one, if the result is also a rectangle\nand the operation is successful and this rectangle is modified to the union.\n\nArguments:\nother (Rectangle): Rectangle to join\n\nReturns:\nbool: True when successfully joined, False otherwise", "source": "juraj-google-style"}
{"code": "def _maybe_select_class_id(labels, predictions_idx, selected_id=None):\n    if selected_id is None:\n        return (labels, predictions_idx)\n    return (_select_class_id(labels, selected_id), _select_class_id(predictions_idx, selected_id))", "docstring": "If class ID is specified, filter all other classes.\n\nArgs:\nlabels: `int64` `Tensor` or `SparseTensor` with shape\n[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\ntarget classes for the associated prediction. Commonly, N=1 and `labels`\nhas shape [batch_size, num_labels]. [D1, ... DN] must match\n`predictions_idx`.\npredictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k]\nwhere N >= 1. Commonly, N=1 and `predictions_idx` has shape\n[batch size, k].\nselected_id: Int id to select.\n\nReturns:\nTuple of `labels` and `predictions_idx`, possibly with classes removed.", "source": "github-repos"}
{"code": "def add(self, coro, *args, **kw):\n        \n        \n        if asyncio.iscoroutinefunction(coro):\n            coro = coro(*args, **kw)\n\n        \n        if not asyncio.iscoroutine(coro):\n            raise TypeError('paco: coro must be a coroutine object')\n\n        \n        index = max(len(self.pool), 0)\n        task = Task(index, coro)\n\n        \n        self.pool.append(task)\n\n        return coro", "docstring": "Adds a new coroutine function with optional variadic argumetns.\n\nArguments:\ncoro (coroutine function): coroutine to execute.\n*args (mixed): optional variadic arguments\n\nRaises:\nTypeError: if the coro object is not a valid coroutine\n\nReturns:\nfuture: coroutine wrapped future", "source": "juraj-google-style"}
{"code": "def get(self, node_id):\n        \n        return \\\n            self._nodes[_node.Root.ID].get(node_id) or \\\n            self._nodes[_node.Root.ID].get(self._sid_map.get(node_id))", "docstring": "Get a note with the given ID.\n\nArgs:\nnode_id (str): The note ID.\n\nReturns:\ngkeepapi.node.TopLevelNode: The Note or None if not found.", "source": "juraj-google-style"}
{"code": "def run_resume_status(self, entity, project_name, name):\n    query = gql('\\n        query Model($project: String!, $entity: String, $name: String!) {\\n            model(name: $project, entityName: $entity) {\\n                id\\n                name\\n                entity {\\n                    id\\n                    name\\n                }\\n\\n                bucket(name: $name, missingOk: true) {\\n                    id\\n                    name\\n                    logLineCount\\n                    historyLineCount\\n                    eventsLineCount\\n                    historyTail\\n                    eventsTail\\n                }\\n            }\\n        }\\n        ')\n    response = self.gql(query, variable_values={'entity': entity, 'project': project_name, 'name': name})\n    if (('model' not in response) or ('bucket' not in response['model'])):\n        return None\n    project = response['model']\n    self.set_setting('project', project_name)\n    if ('entity' in project):\n        self.set_setting('entity', project['entity']['name'])\n    return project['bucket']", "docstring": "Check if a run exists and get resume information.\n\nArgs:\nentity (str, optional): The entity to scope this project to.\nproject_name (str): The project to download, (can include bucket)\nrun (str, optional): The run to download", "source": "codesearchnet"}
{"code": "def create_checklist(self, checklist_json):\n    return trolly.checklist.Checklist(trello_client=self, checklist_id=checklist_json['id'], name=checklist_json['name'], data=checklist_json)", "docstring": "Create a Checklist object from JSON object\n\nReturns:\nChecklist: The checklist from the given `checklist_json`.", "source": "codesearchnet"}
{"code": "def reverse_axis(self, axis_to_reverse):\n    if (axis_to_reverse.lower() == 'x'):\n        self.general.reverse_x_axis = True\n    if (axis_to_reverse.lower() == 'y'):\n        self.general.reverse_y_axis = True\n    if ((axis_to_reverse.lower() != 'x') or (axis_to_reverse.lower() != 'y')):\n        raise ValueError('Axis for reversing needs to be either x or y.')\n    return", "docstring": "Reverse an axis in all figure plots.\n\nThis will reverse the tick marks on an axis for each plot in the figure.\nIt can be overridden in SinglePlot class.\n\nArgs:\naxis_to_reverse (str): Axis to reverse. Supports `x` and `y`.\n\nRaises:\nValueError: The string representing the axis to reverse is not `x` or `y`.", "source": "codesearchnet"}
{"code": "def create_unbroadcast_axis(shape, broadcast_shape):\n    return tuple(((- (1 + i)) for i in range(len(broadcast_shape)) if ((i >= len(shape)) or (broadcast_shape[(- (1 + i))] > shape[(- (1 + i))]))))", "docstring": "Creates the reduction axis for unbroadcasting.\n\nArgs:\nshape: A list. The shape after the broadcast operation.\nbroadcast_shape: A list. The original shape the array being unbroadcast\nhad.\nReturns:\nA list. The axes along which the array needs to be reduced. These axes will\nbe distributed evenly into the original shape.", "source": "codesearchnet"}
{"code": "def array_to_base64_png(array):\n    array = np.array(array, dtype=np.float32)\n    if (len(array.shape) != 2):\n        raise ValueError(('Expected rank-2 array; received rank-%d array.' % len(array.shape)))\n    if (not np.size(array)):\n        raise ValueError(('Cannot encode an empty array (size: %s) as image.' % (array.shape,)))\n    is_infinity = np.isinf(array)\n    is_positive = (array > 0.0)\n    is_positive_infinity = np.logical_and(is_infinity, is_positive)\n    is_negative_infinity = np.logical_and(is_infinity, np.logical_not(is_positive))\n    is_nan = np.isnan(array)\n    finite_indices = np.where(np.logical_and(np.logical_not(is_infinity), np.logical_not(is_nan)))\n    if np.size(finite_indices):\n        minval = np.min(array[finite_indices])\n        maxval = np.max(array[finite_indices])\n        scaled = np.array((((array - minval) / (maxval - minval)) * 255), dtype=np.uint8)\n        rgb = np.repeat(np.expand_dims(scaled, (- 1)), IMAGE_COLOR_CHANNELS, axis=(- 1))\n    else:\n        rgb = np.zeros((array.shape + (IMAGE_COLOR_CHANNELS,)), dtype=np.uint8)\n    rgb[is_positive_infinity] = POSITIVE_INFINITY_RGB\n    rgb[is_negative_infinity] = NEGATIVE_INFINITY_RGB\n    rgb[is_nan] = NAN_RGB\n    image_encoded = base64.b64encode(encoder.encode_png(rgb))\n    return image_encoded", "docstring": "Convert an array into base64-enoded PNG image.\n\nArgs:\narray: A 2D np.ndarray or nested list of items.\n\nReturns:\nA base64-encoded string the image. The image is grayscale if the array is\n2D. The image is RGB color if the image is 3D with lsat dimension equal to\n3.\n\nRaises:\nValueError: If the input `array` is not rank-2, or if the rank-2 `array` is\nempty.", "source": "codesearchnet"}
{"code": "def stats(self, container, decode=None, stream=True):\n    url = self._url('/containers/{0}/stats', container)\n    if stream:\n        return self._stream_helper(self._get(url, stream=True), decode=decode)\n    else:\n        if decode:\n            raise errors.InvalidArgument('decode is only available in conjuction with stream=True')\n        return self._result(self._get(url, params={'stream': False}), json=True)", "docstring": "Stream statistics for a specific container. Similar to the\n``docker stats`` command.\n\nArgs:\ncontainer (str): The container to stream statistics from\ndecode (bool): If set to true, stream will be decoded into dicts\non the fly. Only applicable if ``stream`` is True.\nFalse by default.\nstream (bool): If set to false, only the current stats will be\nreturned instead of a stream. True by default.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def clear(self, rows=None):\n    rows = (tf.range(self._capacity) if (rows is None) else rows)\n    assert (rows.shape.ndims == 1)\n    return tf.scatter_update(self._length, rows, tf.zeros_like(rows))", "docstring": "Reset episodes in the memory.\n\nInternally, this only sets their lengths to zero. The memory entries will\nbe overridden by future calls to append() or replace().\n\nArgs:\nrows: Episodes to clear, defaults to all.\n\nReturns:\nOperation.", "source": "codesearchnet"}
{"code": "def compare_names(first, second):\n    first = name_to_vector(first)\n    second = name_to_vector(second)\n    zipped = zip(first, second)\n    if (not zipped):\n        return 0\n    similarity_factor = 0\n    for (fitem, _) in zipped:\n        if (fitem in second):\n            similarity_factor += 1\n    return ((float(similarity_factor) / len(zipped)) * 100)", "docstring": "Compare two names in complicated, but more error prone way.\n\nAlgorithm is using vector comparison.\n\nExample:\n>>> compare_names(\"Franta Putšálek\", \"ing. Franta Putšálek\")\n100.0\n>>> compare_names(\"F. Putšálek\", \"ing. Franta Putšálek\")\n50.0\n\nArgs:\nfirst (str): Fisst name as string.\nsecond (str): Second name as string.\n\nReturns:\nfloat: Percentage of the similarity.", "source": "codesearchnet"}
{"code": "def __init__(self, initial_learning_rate, decay_steps, decay_rate, staircase=False, name=None):\n    super(InverseTimeDecay, self).__init__()\n    self.initial_learning_rate = initial_learning_rate\n    self.decay_steps = decay_steps\n    self.decay_rate = decay_rate\n    self.staircase = staircase\n    self.name = name", "docstring": "Applies inverse time decay to the initial learning rate.\n\nArgs:\ninitial_learning_rate: A scalar `float32` or `float64` `Tensor` or a\nPython number.  The initial learning rate.\ndecay_steps: How often to apply decay.\ndecay_rate: A Python number.  The decay rate.\nstaircase: Whether to apply decay in a discrete staircase, as opposed to\ncontinuous, fashion.\nname: String.  Optional name of the operation.  Defaults to\n'InverseTimeDecay'.", "source": "github-repos"}
{"code": "def compute_values(edge_compatibility, v):\n  \n\n  \n  \n  \n  \n  all_edge_values = tf.matmul(tf.to_float(edge_compatibility), v)\n\n  \n  \n  output = tf.reduce_sum(all_edge_values, axis=1)  \n  return output", "docstring": "Compute values. If edge compatibilities is just adjacency, we get ggnn.\n\nArgs:\nedge_compatibility: A tensor of shape [batch, num_transforms, length, depth]\nv: A tensor of shape [batch, num_transforms, length, depth]\n\nReturns:\noutput: A [batch, length, depth] tensor", "source": "juraj-google-style"}
{"code": "def save(**kwargs):\n    ret = {'comment': [], 'result': True}\n    beacons = list_(return_yaml=False, include_pillar=False, **kwargs)\n    sfn = os.path.join(os.path.dirname(__opts__['conf_file']), os.path.dirname(__opts__['default_include']), 'beacons.conf')\n    if beacons:\n        tmp = {'beacons': beacons}\n        yaml_out = salt.utils.yaml.safe_dump(tmp, default_flow_style=False)\n    else:\n        yaml_out = ''\n    try:\n        with salt.utils.files.fopen(sfn, 'w+') as fp_:\n            fp_.write(yaml_out)\n        ret['comment'] = 'Beacons saved to {0}.'.format(sfn)\n    except (IOError, OSError):\n        ret['comment'] = 'Unable to write to beacons file at {0}. Check permissions.'.format(sfn)\n        ret['result'] = False\n    return ret", "docstring": "Save all configured beacons to the minion config.\n\nReturns:\ndict: Boolean and status message on success or failure of save.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' beacons.save", "source": "codesearchnet"}
{"code": "def formatted(self, func):\n    other = EscapedString.__new__(EscapedString)\n    other.strings = []\n    for (is_literal, value) in self.strings:\n        if (not is_literal):\n            value = func(value)\n        other.strings.append((is_literal, value))\n    return other", "docstring": "Return the string with non-literal parts formatted.\n\nArgs:\nfunc (callable): Callable that translates a string into a\nformatted string.\n\nReturns:\n`EscapedString` object.", "source": "codesearchnet"}
{"code": "def __init__(self, options=None):\n    \n    super(ExportConverter, self).__init__()\n    self.options = options or ExportOptions()", "docstring": "Constructor.\n\nArgs:\noptions: ExportOptions value, which contains settings that may or or may\nnot affect this converter's behavior.", "source": "juraj-google-style"}
{"code": "def get(self, **params):\n    if self._use_cache:\n        r = requests.get(self.url, params=params)\n    else:\n        with requests_cache.disabled():\n            r = requests.get(self.url, params=params)\n    r.raise_for_status()\n    return r", "docstring": "Performs get request to the biomart service.\n\nArgs:\n**params (dict of str: any): Arbitrary keyword arguments, which\nare added as parameters to the get request to biomart.\n\nReturns:\nrequests.models.Response: Response from biomart for the request.", "source": "codesearchnet"}
{"code": "def hwvtep_add_rbridgeid(self, **kwargs):\n    name = kwargs.pop('name')\n    id = kwargs.pop('rb_range')\n    ip_args = dict(name=name, rb_add=id)\n    method_name = 'overlay_gateway_attach_rbridge_id_rb_add'\n    method_class = self._brocade_tunnels\n    gw_attr = getattr(method_class, method_name)\n    config = gw_attr(**ip_args)\n    output = self._callback(config)\n    return output", "docstring": "Add a range of rbridge-ids\n\nArgs:\nname  (str): gateway-name\nvlan (str): rbridge-ids range\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nNone", "source": "codesearchnet"}
{"code": "def run_change_point_analysis(test_config_container: TestConfigContainer, big_query_metrics_fetcher: MetricsFetcher, change_point_config: ChangePointConfig=ChangePointConfig(), save_alert_metadata: bool=False):\n    logging.info('Running change point analysis for test ID :%s on metric: % s' % (test_config_container.test_id, test_config_container.metric_name))\n    test_name = test_config_container.test_name\n    min_runs_between_change_points = change_point_config.min_runs_between_change_points\n    num_runs_in_change_point_window = change_point_config.num_runs_in_change_point_window\n    metric_container = big_query_metrics_fetcher.fetch_metric_data(test_config=test_config_container)\n    metric_container.sort_by_timestamp()\n    metric_values = metric_container.values\n    timestamps = metric_container.timestamps\n    change_point_index = find_latest_change_point_index(metric_values=metric_values)\n    if not change_point_index:\n        logging.info('Change point is not detected for the test ID %s' % test_config_container.test_id)\n        return False\n    latest_change_point_run = len(timestamps) - 1 - change_point_index\n    if not is_change_point_in_valid_window(num_runs_in_change_point_window, latest_change_point_run):\n        logging.info('Performance regression/improvement found for the test ID: %s. on metric %s. Since the change point run %s lies outside the num_runs_in_change_point_window distance: %s, alert is not raised.' % (test_config_container.test_id, test_config_container.metric_name, latest_change_point_run + 1, num_runs_in_change_point_window))\n        return False\n    is_valid_change_point = True\n    last_reported_issue_number = None\n    issue_metadata_table_name = f'{test_config_container.metrics_table}_{test_config_container.metric_name}'\n    if test_config_container.test_name:\n        issue_metadata_table_name = f'{issue_metadata_table_name}_{test_config_container.test_name}'\n    existing_issue_data = get_existing_issues_data(table_name=issue_metadata_table_name)\n    if existing_issue_data is not None:\n        existing_issue_timestamps = existing_issue_data[constants._CHANGE_POINT_TIMESTAMP_LABEL].tolist()\n        last_reported_issue_number = existing_issue_data[constants._ISSUE_NUMBER].tolist()[0]\n        if not isinstance(last_reported_issue_number, int):\n            last_reported_issue_number = last_reported_issue_number.item()\n        is_valid_change_point = is_sibling_change_point(previous_change_point_timestamps=existing_issue_timestamps, change_point_index=change_point_index, timestamps=timestamps, min_runs_between_change_points=min_runs_between_change_points, test_id=test_config_container.test_id)\n    if is_valid_change_point and save_alert_metadata:\n        issue_number, issue_url = create_performance_alert(test_config_container=test_config_container, metric_container=metric_container, change_point_index=change_point_index, existing_issue_number=last_reported_issue_number)\n        issue_metadata = GitHubIssueMetaData(issue_timestamp=pd.Timestamp(datetime.now().replace(tzinfo=timezone.utc)), test_id=test_config_container.test_id.replace('.', '_'), test_name=test_name or uuid.uuid4().hex, metric_name=test_config_container.metric_name, change_point=metric_values[change_point_index], issue_number=issue_number, issue_url=issue_url, change_point_timestamp=timestamps[change_point_index])\n        publish_issue_metadata_to_big_query(issue_metadata=issue_metadata, table_name=issue_metadata_table_name, project=test_config_container.project)\n    return is_valid_change_point", "docstring": "Args:\ntest_config_container: TestConfigContainer containing test metadata for\nfetching data and running change point analysis.\nbig_query_metrics_fetcher: BigQuery metrics fetcher used to fetch data for\nchange point analysis.\nchange_point_config: ChangePointConfig containing parameters to run\nchange point analysis.\nsave_alert_metadata: bool indicating if issue metadata\nshould be published to BigQuery table.\nReturns:\nbool indicating if a change point is observed and alerted on GitHub.", "source": "github-repos"}
{"code": "def GetFeeds(client):\n    feed_service = client.GetService('FeedService', 'v201809')\n    feeds = []\n    more_pages = True\n    selector = {'fields': ['Id', 'Name', 'Attributes'], 'predicates': [{'field': 'Origin', 'operator': 'EQUALS', 'values': ['USER']}, {'field': 'FeedStatus', 'operator': 'EQUALS', 'values': ['ENABLED']}], 'paging': {'startIndex': 0, 'numberResults': PAGE_SIZE}}\n    while more_pages:\n        page = feed_service.get(selector)\n        if ('entries' in page):\n            feeds.extend(page['entries'])\n        selector['paging']['startIndex'] += PAGE_SIZE\n        more_pages = (selector['paging']['startIndex'] < int(page['totalNumEntries']))\n    return feeds", "docstring": "Returns a list of all enabled Feeds.\n\nArgs:\nclient: an AdWordsClient instance.\n\nReturns:\nA list containing all enabled Feeds.", "source": "codesearchnet"}
{"code": "def national_significant_number(numobj):\n    \n    \n    \n    national_number = U_EMPTY_STRING\n    if numobj.italian_leading_zero:\n        num_zeros = numobj.number_of_leading_zeros\n        if num_zeros is None:\n            num_zeros = 1\n        if num_zeros > 0:\n            national_number = U_ZERO * num_zeros\n    national_number += str(numobj.national_number)\n    return national_number", "docstring": "Gets the national significant number of a phone number.\n\nNote that a national significant number doesn't contain a national prefix\nor any formatting.\n\nArguments:\nnumobj -- The PhoneNumber object for which the national significant number\nis needed.\n\nReturns the national significant number of the PhoneNumber object passed\nin.", "source": "juraj-google-style"}
{"code": "def get_interpolated_value(self, energy, integrated=False):\n    inter = {}\n    for spin in self.cohp:\n        if (not integrated):\n            inter[spin] = get_linear_interpolated_value(self.energies, self.cohp[spin], energy)\n        elif (self.icohp is not None):\n            inter[spin] = get_linear_interpolated_value(self.energies, self.icohp[spin], energy)\n        else:\n            raise ValueError('ICOHP is empty.')\n    return inter", "docstring": "Returns the COHP for a particular energy.\n\nArgs:\nenergy: Energy to return the COHP value for.", "source": "codesearchnet"}
{"code": "def GetMessage(self, log_source, lcid, message_identifier):\n    event_log_provider_key = self._GetEventLogProviderKey(log_source)\n    if (not event_log_provider_key):\n        return None\n    generator = self._GetMessageFileKeys(event_log_provider_key)\n    if (not generator):\n        return None\n    message_string = None\n    for message_file_key in generator:\n        message_string = self._GetMessage(message_file_key, lcid, message_identifier)\n        if message_string:\n            break\n    if (self._string_format == 'wrc'):\n        message_string = self._ReformatMessageString(message_string)\n    return message_string", "docstring": "Retrieves a specific message for a specific Event Log source.\n\nArgs:\nlog_source (str): Event Log source.\nlcid (int): language code identifier (LCID).\nmessage_identifier (int): message identifier.\n\nReturns:\nstr: message string or None if not available.", "source": "codesearchnet"}
{"code": "def _GetSocket(self):\n    try:\n        return socket.create_connection((self._host, self._port), self._SOCKET_TIMEOUT)\n    except socket.error as exception:\n        logger.error('Unable to connect to nsrlsvr with error: {0!s}.'.format(exception))", "docstring": "Establishes a connection to an nsrlsvr instance.\n\nReturns:\nsocket._socketobject: socket connected to an nsrlsvr instance or None if\na connection cannot be established.", "source": "codesearchnet"}
{"code": "def create_database_view(self, view: views.View, view_name: str) -> None:\n    view_sql = f'CREATE OR REPLACE VIEW {self._view_dataset}.{view_name} AS\\n{self.to_sql(view)}'\n    self._engine.execute(view_sql).fetchall()", "docstring": "Creates a Spark view with the given name in the runner's view_dataset.\n\nArgs:\nview: the FHIR view that creates\nview_name: the view name passed to the CREATE OR REPLACE VIEW statement.", "source": "github-repos"}
{"code": "def to_dataframe(self, view: views.View, limit: Optional[int]=None) -> pandas.DataFrame:\n    df = pandas.read_sql_query(sql=self.to_sql(view, limit=limit), con=self._engine.raw_connection())\n    return runner_utils.clean_dataframe(df, view.get_select_columns_to_return_type())", "docstring": "Returns a Pandas dataframe of the results.\n\nArgs:\nview: the view that defines the query to run.\nlimit: optional limit of the number of items to return.\n\nReturns:\npandas.DataFrame: dataframe of the view contents.\n\nRaises:\nValueError propagated from the Spark client if pandas is not installed.", "source": "github-repos"}
{"code": "def diff_linesToChars(self, text1, text2):\n    lineArray = []\n    lineHash = {}\n    lineArray.append('')\n\n    def diff_linesToCharsMunge(text):\n        'Split a text into an array of strings.  Reduce the texts to a string\\n      of hashes where each Unicode character represents one line.\\n      Modifies linearray and linehash through being a closure.\\n\\n      Args:\\n        text: String to encode.\\n\\n      Returns:\\n        Encoded string.\\n      '\n        chars = []\n        lineStart = 0\n        lineEnd = (- 1)\n        while (lineEnd < (len(text) - 1)):\n            lineEnd = text.find('\\n', lineStart)\n            if (lineEnd == (- 1)):\n                lineEnd = (len(text) - 1)\n            line = text[lineStart:(lineEnd + 1)]\n            if (line in lineHash):\n                chars.append(chr(lineHash[line]))\n            else:\n                if (len(lineArray) == maxLines):\n                    line = text[lineStart:]\n                    lineEnd = len(text)\n                lineArray.append(line)\n                lineHash[line] = (len(lineArray) - 1)\n                chars.append(chr((len(lineArray) - 1)))\n            lineStart = (lineEnd + 1)\n        return ''.join(chars)\n    maxLines = 666666\n    chars1 = diff_linesToCharsMunge(text1)\n    maxLines = 1114111\n    chars2 = diff_linesToCharsMunge(text2)\n    return (chars1, chars2, lineArray)", "docstring": "Split two texts into an array of strings.  Reduce the texts to a string\nof hashes where each Unicode character represents one line.\n\nArgs:\ntext1: First string.\ntext2: Second string.\n\nReturns:\nThree element tuple, containing the encoded text1, the encoded text2 and\nthe array of unique strings.  The zeroth element of the array of unique\nstrings is intentionally blank.", "source": "codesearchnet"}
{"code": "def add(self, aspect, ifpresent='error'):\n    if isinstance(aspect, contextualize):\n        self.contextualize.update(aspect)\n        return True\n    classification = [(network, self.networks), (system, self.systems), (ansible, self.ansible_hosts), (deploy, self.deploys), (configure, self.configures)]\n    aspect_list = [l for (t, l) in classification if isinstance(aspect, t)]\n    assert (len(aspect_list) == 1), 'Unexpected aspect for RADL.'\n    aspect_list = aspect_list[0]\n    old_aspect = [a for a in aspect_list if (a.getId() == aspect.getId())]\n    if old_aspect:\n        if (ifpresent == 'error'):\n            raise Exception('Aspect with the same id was found.')\n        elif (ifpresent == 'replace'):\n            for (i, elem) in enumerate(aspect_list):\n                if (elem.getId() == old_aspect[0].getId()):\n                    del aspect_list[i]\n                    break\n            aspect_list.append(aspect)\n            return True\n        elif (ifpresent == 'ignore'):\n            return False\n        else:\n            raise ValueError\n    else:\n        aspect_list.append(aspect)\n        return True", "docstring": "Add a network, ansible_host, system, deploy, configure or contextualize.\n\nArgs:\n- aspect(network, system, deploy, configure or contextualize): thing to add.\n- ifpresent(str): if it has been defined, do:\n\n- ``\"ignore\"``: not add the aspect.\n- ``\"replace\"``: replace by the old defined.\n- ``\"error\"``: raise an error.\n\nReturn(bool): True if aspect was added.", "source": "codesearchnet"}
{"code": "def get_airport_stats(self, iata, page=1, limit=100):\n    url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)\n    return self._fr24.get_airport_stats(url)", "docstring": "Retrieve the performance statistics at an airport\n\nGiven the IATA code of an airport, this method returns the performance statistics for the airport.\n\nArgs:\niata (str): The IATA code for an airport, e.g. HYD\npage (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data\nlimit (int): Optional limit on number of records returned\n\nReturns:\nA list of dicts with the data; one dict for each row of data from flightradar24\n\nExample::\n\nfrom pyflightdata import FlightData\nf=FlightData()\n#optional login\nf.login(myemail,mypassword)\nf.get_airport_stats('HYD')\nf.get_airport_stats('HYD',page=1,limit=10)", "source": "codesearchnet"}
{"code": "def get(self, language: str=None, default: str=None) -> str:\n    language = (language or settings.LANGUAGE_CODE)\n    value = super().get(language, default)\n    return (value if (value is not None) else default)", "docstring": "Gets the underlying value in the specified or\nprimary language.\n\nArguments:\nlanguage:\nThe language to get the value in.\n\nReturns:\nThe value in the current language, or\nthe primary language in case no language\nwas specified.", "source": "codesearchnet"}
{"code": "def add_dos(self, label, dos):\n        \n\n        densities = dos.get_smeared_densities(self.sigma) if self.sigma \\\n            else dos.densities\n        self._doses[label] = {'frequencies': dos.frequencies, 'densities': densities}", "docstring": "Adds a dos for plotting.\n\nArgs:\nlabel:\nlabel for the DOS. Must be unique.\ndos:\nPhononDos object", "source": "juraj-google-style"}
{"code": "def DEFINE_integer(name, default, help, lower_bound=None, upper_bound=None, flag_values=_flagvalues.FLAGS, **args):\n    parser = _argument_parser.IntegerParser(lower_bound, upper_bound)\n    serializer = _argument_parser.ArgumentSerializer()\n    DEFINE(parser, name, default, help, flag_values, serializer, **args)\n    _register_bounds_validator_if_needed(parser, name, flag_values=flag_values)", "docstring": "Registers a flag whose value must be an integer.\n\nIf lower_bound, or upper_bound are set, then this flag must be\nwithin the given range.\n\nArgs:\nname: str, the flag name.\ndefault: int|str|None, the default value of the flag.\nhelp: str, the help message.\nlower_bound: int, min value of the flag.\nupper_bound: int, max value of the flag.\nflag_values: FlagValues, the FlagValues instance with which the flag will\nbe registered. This should almost never need to be overridden.\n**args: dict, the extra keyword args that are passed to DEFINE.", "source": "codesearchnet"}
{"code": "def set_fig_size(self, width, height=None):\n    self.figure.figure_width = width\n    self.figure.figure_height = height\n    return", "docstring": "Set the figure size in inches.\n\nSets the figure size with a call to fig.set_size_inches.\nDefault in code is 8 inches for each.\n\nArgs:\nwidth (float): Dimensions for figure width in inches.\nheight (float, optional): Dimensions for figure height in inches. Default is None.", "source": "codesearchnet"}
{"code": "def id_pools_vmac_ranges(self):\n    if (not self.__id_pools_vmac_ranges):\n        self.__id_pools_vmac_ranges = IdPoolsRanges('vmac', self.__connection)\n    return self.__id_pools_vmac_ranges", "docstring": "Gets the IdPoolsRanges API Client for VMAC Ranges.\n\nReturns:\nIdPoolsRanges:", "source": "codesearchnet"}
{"code": "def plot_summaries(self, show=False, save=True, figure_type=None):\n        \n\n        if not figure_type:\n            figure_type = self.default_figure_type\n\n        if not figure_type in self.default_figure_types:\n            logger.debug(\"unknown figure type selected\")\n            figure_type = self.default_figure_type\n\n        color_list, symbol_list = self._create_colors_markers_list()\n        summary_df = self.summary_df\n        selected_summaries = self.selected_summaries\n        batch_dir = self.batch_dir\n        batch_name = self.name\n        fig, ax = plot_summary_figure(self.info_df, summary_df, color_list,\n                                      symbol_list, selected_summaries,\n                                      batch_dir, batch_name, show=show,\n                                      save=save, figure_type=figure_type)\n        self.figure[figure_type] = fig\n        self.axes[figure_type] = ax", "docstring": "Plot summary graphs.\n\nArgs:\nshow: shows the figure if True.\nsave: saves the figure if True.\nfigure_type: optional, figure type to create.", "source": "juraj-google-style"}
{"code": "def get_record_schema_from_dict_table_schema(schema_name: str, table_schema: Dict[str, Any], namespace: str='apache_beam.io.gcp.bigquery') -> Dict[str, Any]:\n    avro_fields = [table_field_to_avro_field(field, '.'.join((namespace, schema_name))) for field in table_schema['fields']]\n    return {'type': 'record', 'name': schema_name, 'fields': avro_fields, 'doc': 'Translated Avro Schema for {}'.format(schema_name), 'namespace': namespace}", "docstring": "Convert a table schema into an Avro schema.\n\nArgs:\nschema_name (str): The name of the record.\ntable_schema (Dict[str, Any]): A BigQuery table schema in dict form.\nnamespace (str): The namespace of the Avro schema.\n\nReturns:\nDict[str, Any]: The schema as an Avro RecordSchema.", "source": "github-repos"}
{"code": "def get_action(self, action_id):\n    return Action.get_object(api_token=self.token, action_id=action_id)", "docstring": "Returns a specific Action by its ID.\n\nArgs:\naction_id (int): id of action", "source": "codesearchnet"}
{"code": "def collect_filtered_models(discard, *input_values):\n    ids = set([])\n    collected = []\n    queued = []\n\n    def queue_one(obj):\n        if ((obj.id not in ids) and (not (callable(discard) and discard(obj)))):\n            queued.append(obj)\n    for value in input_values:\n        _visit_value_and_its_immediate_references(value, queue_one)\n    while queued:\n        obj = queued.pop(0)\n        if (obj.id not in ids):\n            ids.add(obj.id)\n            collected.append(obj)\n            _visit_immediate_value_references(obj, queue_one)\n    return collected", "docstring": "Collect a duplicate-free list of all other Bokeh models referred to by\nthis model, or by any of its references, etc, unless filtered-out by the\nprovided callable.\n\nIterate over ``input_values`` and descend through their structure\ncollecting all nested ``Models`` on the go.\n\nArgs:\n*discard (Callable[[Model], bool])\na callable which accepts a *Model* instance as its single argument\nand returns a boolean stating whether to discard the instance. The\nlatter means that the instance will not be added to collected\nmodels nor will its references be explored.\n\n*input_values (Model)\nBokeh models to collect other models from\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def load_with_classes(filename, classes):\n    \n\n    ok = False\n    for class_ in classes:\n        obj = class_()\n        try:\n            obj.load(filename)\n            ok = True\n        \n        \n        \n        \n        \n        \n        except FileNotFoundError:\n            raise\n        except Exception as e:  \n            \n            if a99.logging_level == logging.DEBUG:\n                a99.get_python_logger().exception(\"Error trying with class \\\"{0!s}\\\"\".format(\n                                              class_.__name__))\n            pass\n        if ok:\n            break\n    if ok:\n        return obj\n    return None", "docstring": "Attempts to load file by trial-and-error using a given list of classes.\n\nArguments:\nfilename -- full path to file\nclasses -- list of classes having a load() method\n\nReturns: DataFile object if loaded successfully, or None if not.\n\nNote: it will stop at the first successful load.\n\nAttention: this is not good if there is a bug in any of the file readers,\nbecause *all exceptions will be silenced!*", "source": "juraj-google-style"}
{"code": "def clown_strike_ioc(self, ioc):\n        \n        r = requests.get('http:\n        self._output(r.text)", "docstring": "Performs Clown Strike lookup on an IoC.\n\nArgs:\nioc - An IoC.", "source": "juraj-google-style"}
{"code": "def add_input(self, mutable_accumulator, element, *args, **kwargs):\n    raise NotImplementedError(str(self))", "docstring": "Return result of folding element into accumulator.\n\nCombineFn implementors must override add_input.\n\nArgs:\nmutable_accumulator: the current accumulator,\nmay be modified and returned for efficiency\nelement: the element to add, should not be mutated\n*args: Additional arguments and side inputs.\n**kwargs: Additional arguments and side inputs.", "source": "github-repos"}
{"code": "def needs_reboot():\n    with salt.utils.winapi.Com():\n        obj_sys = win32com.client.Dispatch('Microsoft.Update.SystemInfo')\n    return salt.utils.data.is_true(obj_sys.RebootRequired)", "docstring": "Determines if the system needs to be rebooted.\n\nReturns:\n\nbool: True if the system requires a reboot, False if not\n\nCLI Examples:\n\n.. code-block:: bash\n\nimport salt.utils.win_update\n\nsalt.utils.win_update.needs_reboot()", "source": "codesearchnet"}
{"code": "def on_pass(self, record):", "docstring": "A function that is executed upon a test passing.\n\nImplementation is optional.\n\nArgs:\nrecord: records.TestResultRecord, a copy of the test record for\nthis test, containing all information of the test execution\nincluding exception objects.", "source": "github-repos"}
{"code": "def get_app_state():\n    if (not hasattr(g, 'app_state')):\n        model = get_model()\n        g.app_state = {'app_title': APP_TITLE, 'model_name': type(model).__name__, 'latest_ckpt_name': model.latest_ckpt_name, 'latest_ckpt_time': model.latest_ckpt_time}\n    return g.app_state", "docstring": "Get current status of application in context\n\nReturns:\n:obj:`dict` of application status", "source": "codesearchnet"}
{"code": "def list_tokens(self):\n        \n        url = self.url() + \"/nd/resource/public/token/\"\n        req = self.remote_utils.get_url(url)\n\n        if req.status_code is not 200:\n            raise RemoteDataNotFoundError('Coud not find {}'.format(req.text))\n        else:\n            return req.json()", "docstring": "Lists a set of tokens that are public in Neurodata.\nArguments:\nReturns:\ndict: Public tokens found in Neurodata", "source": "juraj-google-style"}
{"code": "def wrap_or_copy(cls, func, **options):\n    \n    if isinstance(func, openhtf.PhaseGroup):\n      raise PhaseWrapError('Cannot wrap PhaseGroup <%s> as a phase.' % (\n          func.name or 'Unnamed'))\n    if isinstance(func, cls):\n      \n      \n      retval = mutablerecords.CopyRecord(func)\n    else:\n      retval = cls(func)\n    retval.options.update(**options)\n    return retval", "docstring": "Return a new PhaseDescriptor from the given function or instance.\n\nWe want to return a new copy so that you can reuse a phase with different\noptions, plugs, measurements, etc.\n\nArgs:\nfunc: A phase function or PhaseDescriptor instance.\n**options: Options to update on the result.\n\nRaises:\nPhaseWrapError: if func is a openhtf.PhaseGroup.\n\nReturns:\nA new PhaseDescriptor object.", "source": "juraj-google-style"}
{"code": "def _local_var_name(splittable_dimensions, assignment):\n  \n  assignment_string = []\n  for splittable in sorted(splittable_dimensions):\n    if splittable in assignment:\n      assignment_string.append(\"{}:{}\".format(splittable,\n                                              assignment[splittable]))\n    else:\n      assignment_string.append(\"{}\".format(splittable))\n  return \"y_(\" + \",\".join(assignment_string) + \")\"", "docstring": "Name for a local variable.\n\nArgs:\nsplittable_dimensions: frozenset of names of splittable dimensions.\nassignment: dict from names of splittable dimensions to names of mesh\ndimensions.\n\nReturns:\nA string, the variable name.", "source": "juraj-google-style"}
{"code": "def project(self, term, **kwargs):\n    params = kwargs\n    baseuri = ((self._BASE_URI + 'projects/') + term)\n    res = self.session.get(baseuri, params=params)\n    self.handle_http_error(res)\n    return res", "docstring": "Search for a project by id.\n\nArgs:\nterm (str): Term to search for.\nkwargs (dict): additional keywords passed into\nrequests.session.get params keyword.", "source": "codesearchnet"}
{"code": "def get_test_data(train_samples, test_samples, input_shape, num_classes, random_seed=None):\n    if random_seed is not None:\n        np.random.seed(random_seed)\n    num_sample = train_samples + test_samples\n    templates = 2 * num_classes * np.random.random((num_classes,) + input_shape)\n    y = np.random.randint(0, num_classes, size=(num_sample,))\n    x = np.zeros((num_sample,) + input_shape, dtype=np.float32)\n    for i in range(num_sample):\n        x[i] = templates[y[i]] + np.random.normal(loc=0, scale=1.0, size=input_shape)\n    return ((x[:train_samples], y[:train_samples]), (x[train_samples:], y[train_samples:]))", "docstring": "Generates test data to train a model on.\n\nArgs:\ntrain_samples: Integer, how many training samples to generate.\ntest_samples: Integer, how many test samples to generate.\ninput_shape: Tuple of integers, shape of the inputs.\nnum_classes: Integer, number of classes for the data and targets.\nrandom_seed: Integer, random seed used by numpy to generate data.\n\nReturns:\nA tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.", "source": "github-repos"}
{"code": "def transform_wrap_with(source, left, right, name=None):\n    with ops.name_scope(name, 'TransformWrapWith', [source]):\n        source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string)\n        if isinstance(source, tf.SparseTensor):\n            result = tf.SparseTensor(indices=source.indices, values=ops_module.transform_wrap_with(source.values, left, right), dense_shape=source.dense_shape)\n        else:\n            result = ops_module.transform_wrap_with(source, left, right)\n        return result", "docstring": "Wrap source strings with \"left\" and \"right\" strings\n\nArgs:\nsource: `Tensor` or `SparseTensor` of any shape, strings to replace digits.\nleft: Scalar string to add in the beginning\nright: Scalar string to add in the ending\nname: A name for the operation (optional).\nReturns:\n`SparseTensor` of same shape and size as input.", "source": "codesearchnet"}
{"code": "class AriaTextMoELayer(nn.Module):\n\n    def __init__(self, config: AriaTextConfig):\n        super().__init__()\n        self.router = nn.Linear(config.hidden_size, config.moe_num_experts, bias=False)\n        self.experts = AriaGroupedExpertsMLP(config)\n        self.shared_experts = AriaSharedExpertsMLP(config)\n        self.config = config\n\n    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n        \n        original_shape = hidden_states.shape\n        hidden_states = hidden_states.view(-1, hidden_states.size(-1))\n        logits = self.router(hidden_states)\n        top_logits, top_indices = torch.topk(logits, k=self.config.moe_topk, dim=1)\n        scores = nn.functional.softmax(top_logits, dim=-1)\n        original_dtype = top_indices.dtype\n        tokens_per_expert = torch.histc(top_indices.flatten().to(torch.float32), bins=self.config.moe_num_experts, min=0, max=self.config.moe_num_experts - 1).to(original_dtype)\n        indices = top_indices\n        flatten_indices = indices.view(-1)\n        sorted_indices = torch.argsort(flatten_indices)\n        permuted_tokens = hidden_states.index_select(0, sorted_indices \n        expert_output = self.experts(permuted_tokens, tokens_per_expert)\n        unpermuted_tokens = torch.zeros((scores.shape[0] * self.config.moe_topk, expert_output.size(1)), dtype=expert_output.dtype, device=expert_output.device)\n        unpermuted_tokens.index_copy_(0, sorted_indices, expert_output)\n        unpermuted_tokens = unpermuted_tokens.view(-1, self.config.moe_topk, expert_output.size(1))\n        output = (unpermuted_tokens * scores.unsqueeze(-1)).sum(dim=1).view(original_shape)\n        shared_expert_output = self.shared_experts(hidden_states.view(original_shape))\n        return output + shared_expert_output", "docstring": "Aria Text Mixture of Experts (MoE) Layer.\n\nThis layer applies a gating mechanism to route input tokens to different experts.\n\nArgs:\nconfig (`AriaTextConfig`):\nConfiguration object for the text component of the model.", "source": "github-repos"}
{"code": "def configure_tpu_version(self, version, restart_type='always'):\n\n    def configure_worker(worker):\n        \n        ip_address = worker['ipAddress']\n        url = (_VERSION_SWITCHER_ENDPOINT + '/{}?restartType={}').format(ip_address, version, restart_type)\n        req = urllib.request.Request(url, data=b'')\n        try:\n            urllib.request.urlopen(req)\n        except urllib.error.HTTPError as e:\n            status_code = e.code\n            if status_code == 404:\n                raise Exception('Tensorflow version {} is not available on Cloud TPU, try a previous nightly version or refer to https:\n            else:\n                raise Exception('Failed to configure worker {}'.format(ip_address))\n    workers = self.network_endpoints()\n    with futures.ThreadPoolExecutor(max_workers=len(workers)) as executor:\n        results = executor.map(configure_worker, workers)\n        for result in results:\n            if result:\n                result.result()", "docstring": "Configure TPU software version.\n\nArgs:\nversion (string): Version of software to configure the TPU with.\nrestart_type (string): Restart behaviour when switching versions,\ndefaults to always restart. Options are 'always', 'ifNeeded'.", "source": "github-repos"}
{"code": "def keep_only_update_source_in_field(field, root, head, update):\n    update_sources = {source.lower() for source in get_value(thaw(update), '.'.join([field, 'source']), [])}\n    if (len(update_sources) != 1):\n        return (root, head, update)\n    source = update_sources.pop()\n    if (field in root):\n        root = root.set(field, remove_elements_with_source(source, root[field]))\n    if (field in head):\n        head = head.set(field, remove_elements_with_source(source, head[field]))\n    return (root, head, update)", "docstring": "Remove elements from root and head where ``source`` matches the update.\n\nThis is useful if the update needs to overwrite all elements with the same\nsource.\n\n.. note::\nIf the update doesn't contain exactly one source in ``field``, the\nrecords are returned with no modifications.\n\nArgs:\nfield (str): the field to filter out.\nroot (pmap): the root record, whose ``field`` will be cleaned.\nhead (pmap): the head record, whose ``field`` will be cleaned.\nupdate (pmap): the update record, from which the ``source`` is read.\n\nReturns:\ntuple: ``(root, head, update)`` with some elements filtered out from\n``root`` and ``head``.", "source": "codesearchnet"}
{"code": "def convert_sigmoid(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting sigmoid ...')\n\n    if names == 'short':\n        tf_name = 'SIGM' + random_string(4)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    sigmoid = keras.layers.Activation('sigmoid', name=tf_name)\n    layers[scope_name] = sigmoid(layers[inputs[0]])", "docstring": "Convert sigmoid layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def predict_signature_def(inputs, outputs):\n    if inputs is None or not inputs:\n        raise ValueError('Prediction `inputs` cannot be None or empty.')\n    if outputs is None or not outputs:\n        raise ValueError('Prediction `outputs` cannot be None or empty.')\n    signature_inputs = {key: utils.build_tensor_info(tensor) for key, tensor in inputs.items()}\n    signature_outputs = {key: utils.build_tensor_info(tensor) for key, tensor in outputs.items()}\n    signature_def = build_signature_def(signature_inputs, signature_outputs, signature_constants.PREDICT_METHOD_NAME)\n    return signature_def", "docstring": "Creates prediction signature from given inputs and outputs.\n\nThis function produces signatures intended for use with the TensorFlow Serving\nPredict API (tensorflow_serving/apis/prediction_service.proto). This API\nimposes no constraints on the input and output types.\n\nArgs:\ninputs: dict of string to `Tensor`.\noutputs: dict of string to `Tensor`.\n\nReturns:\nA prediction-flavored signature_def.\n\nRaises:\nValueError: If inputs or outputs is `None`.", "source": "github-repos"}
{"code": "def template_files(path, exts=None):\n    \n    if not os.path.isabs(path):\n        _path = os.path.join(determine_path(), path)\n    if not (os.path.exists(_path) and os.path.isdir(_path)):\n        return []\n    if not exts:\n        exts = []\n    files = os.listdir(_path)\n    files = [f for f in files if os.path.splitext(f)[-1] in exts]\n    files = [os.path.join(path, f) for f in files]\n    return files", "docstring": "Return a list of filenames found at @path.\n\nThe list of filenames can be filtered by extensions.\n\nArguments:\npath: Existing filepath we want to list.\nexts: List of extensions to filter by.\n\nReturns:\nA list of filenames found in the path.", "source": "juraj-google-style"}
{"code": "def merge_requests(self, **kwargs):\n        \n        path = '%s/%s/merge_requests' % (self.manager.path, self.get_id())\n        return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "List the merge requests related to the commit.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the references could not be retrieved\n\nReturns:\nlist: The merge requests related to the commit.", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor, global_hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor:\n    residual = hidden_states\n    global_residual = global_hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    global_hidden_states = self.global_self_attn_layer_norm(global_hidden_states)\n    if self.stagger_blocks_this_layer:\n        hidden_states, attention_mask = self.pad_local_tokens(hidden_states=hidden_states, attention_mask=attention_mask, block_size=self.block_size)\n    hidden_states, global_hidden_states, attn_weights = self.self_attn(token_hidden_states=hidden_states, global_hidden_states=global_hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)\n    if self.stagger_blocks_this_layer:\n        hidden_states = self.unpad_local_tokens(padded_hidden_states=hidden_states, block_size=self.block_size)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.dropout, training=self.training)\n    global_hidden_states = global_residual + global_hidden_states\n    residual = hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    global_residual = global_hidden_states\n    global_hidden_states = self.final_layer_norm(global_hidden_states)\n    global_hidden_states = self.activation_fn(self.fc1(global_hidden_states))\n    global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.activation_dropout, training=self.training)\n    global_hidden_states = self.fc2(global_hidden_states)\n    global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.dropout, training=self.training)\n    global_hidden_states = global_residual + global_hidden_states\n    outputs = (hidden_states, global_hidden_states)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*\nglobal_hidden_states (`torch.FloatTensor`): global token hidden states\n*(seq_len, num_global_tokens, embed_dim)*\nattention_mask (`torch.FloatTensor`): attention mask of size\n*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def _parse_schema(schema, method):\n    if (method and schema.get('readOnly', False)):\n        return _READONLY_PROPERTY\n    if ('allOf' in schema):\n        schema_ = copy.deepcopy(schema['allOf'][0])\n        for x in schema['allOf'][1:]:\n            _dict_merge(schema_, x)\n        return _parse_schema(schema_, method)\n    if ('oneOf' in schema):\n        return _parse_schema(schema['oneOf'][0], method)\n    if ('enum' in schema):\n        return schema['enum'][0]\n    schema_type = schema.get('type', 'object')\n    if (schema_type == 'array'):\n        if ('oneOf' in schema['items']):\n            return [_parse_schema(x, method) for x in schema['items']['oneOf']]\n        return [_parse_schema(schema['items'], method)]\n    if (schema_type == 'object'):\n        if (method and all((v.get('readOnly', False) for v in schema['properties'].values()))):\n            return _READONLY_PROPERTY\n        results = []\n        for (name, prop) in schema.get('properties', {}).items():\n            result = _parse_schema(prop, method)\n            if (result != _READONLY_PROPERTY):\n                results.append((name, result))\n        return collections.OrderedDict(results)\n    if ((schema_type, schema.get('format')) in _TYPE_MAPPING):\n        return _TYPE_MAPPING[(schema_type, schema.get('format'))]\n    return _TYPE_MAPPING[(schema_type, None)]", "docstring": "Convert a Schema Object to a Python object.\n\nArgs:\nschema: An ``OrderedDict`` representing the schema object.", "source": "codesearchnet"}
{"code": "def plot_job_history(jobs, interval='year'):\n    \n    def get_date(job):\n        \n        return datetime.datetime.strptime(job.creation_date(),\n                                          '%Y-%m-%dT%H:%M:%S.%fZ')\n\n    current_time = datetime.datetime.now()\n\n    if interval == 'year':\n        bins = [(current_time - datetime.timedelta(days=k*365/12))\n                for k in range(12)]\n    elif interval == 'month':\n        bins = [(current_time - datetime.timedelta(days=k)) for k in range(30)]\n    elif interval == 'week':\n        bins = [(current_time - datetime.timedelta(days=k)) for k in range(7)]\n\n    binned_jobs = [0]*len(bins)\n\n    if interval == 'year':\n        for job in jobs:\n            for ind, dat in enumerate(bins):\n                date = get_date(job)\n                if date.month == dat.month:\n                    binned_jobs[ind] += 1\n                    break\n            else:\n                continue\n    else:\n        for job in jobs:\n            for ind, dat in enumerate(bins):\n                date = get_date(job)\n                if date.day == dat.day and date.month == dat.month:\n                    binned_jobs[ind] += 1\n                    break\n            else:\n                continue\n\n    nz_bins = []\n    nz_idx = []\n    for ind, val in enumerate(binned_jobs):\n        if val != 0:\n            nz_idx.append(ind)\n            nz_bins.append(val)\n\n    total_jobs = sum(binned_jobs)\n\n    colors = ['\n              '\n\n    if interval == 'year':\n        labels = ['{}-{}'.format(str(bins[b].year)[2:], bins[b].month) for b in nz_idx]\n    else:\n        labels = ['{}-{}'.format(bins[b].month, bins[b].day) for b in nz_idx]\n    fig, ax = plt.subplots(1, 1, figsize=(5, 5))  \n    ax.pie(nz_bins[::-1], labels=labels, colors=colors, textprops={'fontsize': 14},\n           rotatelabels=True, counterclock=False)\n    ax.add_artist(Circle((0, 0), 0.7, color='white', zorder=1))\n    ax.text(0, 0, total_jobs, horizontalalignment='center',\n            verticalalignment='center', fontsize=26)\n    fig.tight_layout()\n    return fig", "docstring": "Plots the job history of the user from the given list of jobs.\n\nArgs:\njobs (list): A list of jobs with type IBMQjob.\ninterval (str): Interval over which to examine.\n\nReturns:\nfig: A Matplotlib figure instance.", "source": "juraj-google-style"}
{"code": "def save_statement(self, statement):\n        \n        response = self.lrs.save_statement(statement)\n\n        if not response:\n            raise ClientError('EnterpriseXAPIClient request failed.')", "docstring": "Save xAPI statement.\n\nArguments:\nstatement (EnterpriseStatement): xAPI Statement to send to the LRS.\n\nRaises:\nClientError: If xAPI statement fails to save.", "source": "juraj-google-style"}
{"code": "def load_models_using_filepattern(\n            self, filename_pattern, model, glob_args, is_main_model=False,\n            encoding='utf-8', add_to_local_models=True):\n        \n        if (model):\n            self.update_model_in_repo_based_on_filename(model)\n        filenames = glob.glob(filename_pattern, **glob_args)\n        if len(filenames) == 0:\n            raise IOError(\n                errno.ENOENT, os.strerror(errno.ENOENT), filename_pattern)\n        loaded_models = []\n        for filename in filenames:\n            the_metamodel = MetaModelProvider.get_metamodel(model, filename)\n            loaded_models.append(\n                self.load_model(the_metamodel, filename, is_main_model,\n                                encoding=encoding,\n                                add_to_local_models=add_to_local_models))\n        return loaded_models", "docstring": "add a new model to all relevant objects\n\nArgs:\nfilename_pattern: models to be loaded\nmodel: model holding the loaded models in its _tx_model_repository\nfield (may be None).\nglob_args: arguments passed to the glob.glob function.\n\nReturns:\nthe list of loaded models", "source": "juraj-google-style"}
{"code": "def get_entity_details(self, entity_id):\n    if (not is_valid_uuid(entity_id)):\n        raise StorageArgumentException('Invalid UUID for entity_id: {0}'.format(entity_id))\n    return self._authenticated_request.to_endpoint('entity/{}/'.format(entity_id)).return_body().get()", "docstring": "Get generic entity by UUID.\n\nArgs:\nentity_id (str): The UUID of the requested entity.\n\nReturns:\nA dictionary describing the entity::\n\n{\nu'collab_id': 2271,\nu'created_by': u'303447',\nu'created_on': u'2017-03-10T12:50:06.077891Z',\nu'description': u'',\nu'entity_type': u'project',\nu'modified_by': u'303447',\nu'modified_on': u'2017-03-10T12:50:06.077946Z',\nu'name': u'2271',\nu'uuid': u'3abd8742-d069-44cf-a66b-2370df74a682'\n}\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "codesearchnet"}
{"code": "def build_fhir_path_ast(input_str: str) -> Expression:\n    error_listener = _FhirPathErrorListener()\n    lexer = FhirPathLexer(antlr4.InputStream(input_str))\n    lexer.removeErrorListeners()\n    lexer.addErrorListener(error_listener)\n    token_stream = antlr4.CommonTokenStream(lexer)\n    parser = FhirPathParser(token_stream)\n    parser.removeErrorListeners()\n    parser.addErrorListener(error_listener)\n    cst_visitor = _FhirPathCstVisitor()\n    cst = parser.expression()\n    if error_listener.errors:\n        raise ValueError('\\n'.join(error_listener.errors))\n    ast = cst_visitor.visit(cst)\n    return ast", "docstring": "Given a FHIRPath query, constructs an AST and returns the root node.\n\nArgs:\ninput_str: The FHIRPath string to translate.\n\nReturns:\nA FHIRPath `Expression` instance, representing the root AST node.\n\nRaises:\nValueError: In the event that the provided `input_str` was syntactically\ninvalid FHIRPath that failed during lexing/parsing.", "source": "github-repos"}
{"code": "def role(self, value):\n        \n        if value == self._defaults['ai.cloud.role'] and 'ai.cloud.role' in self._values:\n            del self._values['ai.cloud.role']\n        else:\n            self._values['ai.cloud.role'] = value", "docstring": "The role property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def create_sas_locator(access_token, asset_id, accesspolicy_id):\n    path = '/Locators'\n    endpoint = ''.join([ams_rest_endpoint, path])\n    body = (((('{ \\t\\t\"AccessPolicyId\":\"' + accesspolicy_id) + '\", \\t\\t\"AssetId\":\"') + asset_id) + '\", \\t\\t\"Type\":1 \\t}')\n    return do_ams_post(endpoint, path, body, access_token)", "docstring": "Create Media Service SAS Locator.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nasset_id (str): Media Service Asset ID.\naccesspolicy_id (str): Media Service Access Policy ID.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def get_tensor_details(self, subgraph_index=0):\n    tensor_details = []\n    num_subgraphs = self._interpreter.NumSubgraphs()\n    if subgraph_index < 0 or subgraph_index >= num_subgraphs:\n        raise ValueError(f'subgraph_index is out of range: {subgraph_index} for the model, which has {num_subgraphs} subgraphs.')\n    for idx in range(self._interpreter.NumTensors(subgraph_index)):\n        try:\n            tensor_details.append(self._get_tensor_details(idx, subgraph_index))\n        except ValueError:\n            pass\n    return tensor_details", "docstring": "Gets tensor details for every tensor with valid tensor details from a subgraph.\n\nTensors where required information about the tensor is not found are not\nadded to the list. This includes temporary tensors without a name.\n\nArgs:\nsubgraph_index: Index of the subgraph to fetch the tensor.\n\nReturns:\nA list of dictionaries containing tensor information.", "source": "github-repos"}
{"code": "def create_new_tf_function(func_graph):\n    transform.apply_func_graph_transforms(func_graph)\n    func = atomic_function.from_func_graph(func_graph.name, func_graph, {})\n    func_graph.outer_graph._add_function_recursive(func)\n    return func_graph.name", "docstring": "Converts func_graph to a TF_Function and adds it to the current graph.\n\nArgs:\nfunc_graph: FuncGraph\n\nReturns:\nThe name of the new TF_Function.", "source": "github-repos"}
{"code": "def set_timeout(self, network_timeout):\n    if (network_timeout == self._network_timeout):\n        return\n    self._network_timeout = network_timeout\n    self._disconnect()", "docstring": "Set the timeout for existing and future Clients.\n\nClose all current connections. This will cause future operations to\ncreate new Clients with the network_timeout passed through\nsocketTimeoutMS optional parameter.\n\nArgs:\nnetwork_timeout: The new value in milliseconds for the timeout.", "source": "codesearchnet"}
{"code": "def get_dummies(self, columns, **kwargs):\n        \n        cls = type(self)\n        \n        \n        if columns is None:\n            columns = [c for c in self.columns if not is_numeric_dtype(self.dtypes[c])]\n            \n            \n            if len(columns) == 0:\n                return self.copy()\n        elif not is_list_like(columns):\n            columns = [columns]\n\n        \n        \n        \n        \n        \n        \n        \n        \n        \n        def set_columns(df, columns):\n            df.columns = columns\n            return df\n\n        set_cols = self.columns\n        columns_applied = self._map_across_full_axis(\n            1, lambda df: set_columns(df, set_cols)\n        )\n        \n        \n        \n        if len(columns) == len(self.columns):\n\n            def get_dummies_builder(df):\n                if df is not None:\n                    if not df.empty:\n                        return pandas.get_dummies(df, **kwargs)\n                    else:\n                        return pandas.DataFrame([])\n\n            func = self._prepare_method(lambda df: get_dummies_builder(df))\n            new_data = columns_applied.map_across_full_axis(0, func)\n            untouched_data = None\n        else:\n\n            def get_dummies_builder(df, internal_indices=[]):\n                return pandas.get_dummies(\n                    df.iloc[:, internal_indices], columns=None, **kwargs\n                )\n\n            numeric_indices = list(self.columns.get_indexer_for(columns))\n            new_data = columns_applied.apply_func_to_select_indices_along_full_axis(\n                0, get_dummies_builder, numeric_indices, keep_remaining=False\n            )\n            untouched_data = self.drop(columns=columns)\n        \n        \n        \n        final_columns = self.compute_index(1, new_data, False)\n        \n        \n        \n        if len(columns) != len(self.columns):\n            new_data = untouched_data.data.concat(1, new_data)\n            final_columns = untouched_data.columns.append(pandas.Index(final_columns))\n        return cls(new_data, self.index, final_columns)", "docstring": "Convert categorical variables to dummy variables for certain columns.\n\nArgs:\ncolumns: The columns to convert.\n\nReturns:\nA new QueryCompiler.", "source": "juraj-google-style"}
{"code": "def __init__( self, sites, cell_lengths ):\n        \n        self.cell_lengths = cell_lengths\n        self.sites = sites\n        self.number_of_sites = len( self.sites )\n        self.site_labels = set( [ site.label for site in self.sites ] )\n        self.site_populations = Counter( [ site.label for site in self.sites ] )\n        self.enforce_periodic_boundary_conditions()\n        self.initialise_site_lookup_table()\n        self.nn_energy = False\n        self.cn_energies = False\n        self.site_energies = False\n        self.jump_lookup_table = False\n        for site in self.sites:\n            site.p_neighbours = [ self.site_with_id( i ) for i in site.neighbours ]\n        self.reset()", "docstring": "Initialise a Lattice instance.\n\nArgs:\nsites (List(Site)): List of sites contained in the lattice.\ncell_lengths (np.array(x,y,z)): Vector of cell lengths for the simulation cell.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def qry_create(options):\n    qry_string = filt_end = param_str = ''\n    filt_st = 'Filters=['\n    param_str_default = 'All'\n    if options.id:\n        qry_string += (\"InstanceIds=['%s']\" % options.id)\n        param_str += (\"id: '%s'\" % options.id)\n        param_str_default = ''\n    if options.instname:\n        (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str)\n        filt_end = ']'\n        param_str_default = ''\n        qry_string += (filt_st + (\"{'Name': 'tag:Name', 'Values': ['%s']}\" % options.instname))\n        param_str += (\"name: '%s'\" % options.instname)\n    if options.inst_state:\n        (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st)\n        qry_string += (\"{'Name': 'instance-state-name','Values': ['%s']}\" % options.inst_state)\n        param_str += (\"state: '%s'\" % options.inst_state)\n        filt_end = ']'\n        param_str_default = ''\n    qry_string += filt_end\n    param_str += param_str_default\n    debg.dprintx('\\nQuery String')\n    debg.dprintx(qry_string, True)\n    debg.dprint('param_str: ', param_str)\n    return (qry_string, param_str)", "docstring": "Create query from the args specified and command chosen.\n\nCreates a query string that incorporates the args in the options\nobject, and creates the title for the 'list' function.\n\nArgs:\noptions (object): contains args and data from parser\nReturns:\nqry_string (str): the query to be used against the aws ec2 client.\nparam_str (str): the title to display before the list.", "source": "codesearchnet"}
{"code": "def load_embeddings(lang='en', task='embeddings', type='cw', normalize=False):\n    src_dir = ('_'.join((type, task)) if type else task)\n    p = locate_resource(src_dir, lang)\n    e = Embedding.load(p)\n    if (type == 'cw'):\n        e.apply_expansion(CaseExpander)\n        e.apply_expansion(DigitExpander)\n    if (type == 'sgns'):\n        e.apply_expansion(CaseExpander)\n    if (type == 'ue'):\n        e.apply_expansion(CaseExpander)\n    if normalize:\n        e.normalize_words(inplace=True)\n    return e", "docstring": "Return a word embeddings object for `lang` and of type `type`\n\nArgs:\nlang (string): language code.\ntask (string): parameters that define task.\ntype (string): skipgram, cw, cbow ...\nnoramlized (boolean): returns noramlized word embeddings vectors.", "source": "codesearchnet"}
{"code": "def ValidateKey(cls, key_path):\n    \n    for prefix in cls.VALID_PREFIXES:\n      if key_path.startswith(prefix):\n        return\n\n    \n    if key_path.startswith('HKEY_CURRENT_USER\\\\'):\n      raise errors.FormatError(\n          'HKEY_CURRENT_USER\\\\ is not supported instead use: '\n          'HKEY_USERS\\\\%%users.sid%%\\\\')\n\n    raise errors.FormatError(\n        'Unupported Registry key path: {0:s}'.format(key_path))", "docstring": "Validates this key against supported key names.\n\nArgs:\nkey_path (str): path of a Windows Registry key.\n\nRaises:\nFormatError: when key is not supported.", "source": "juraj-google-style"}
{"code": "def _sign_input(cls, input_, message, key_pairs):\n        \n        if isinstance(input_.fulfillment, Ed25519Sha256):\n            return cls._sign_simple_signature_fulfillment(input_, message,\n                                                          key_pairs)\n        elif isinstance(input_.fulfillment, ThresholdSha256):\n            return cls._sign_threshold_signature_fulfillment(input_, message,\n                                                             key_pairs)\n        else:\n            raise ValueError(\"Fulfillment couldn't be matched to \"\n                             'Cryptocondition fulfillment type.')", "docstring": "Signs a single Input.\n\nNote:\nThis method works only for the following Cryptoconditions\ncurrently:\n- Ed25519Fulfillment\n- ThresholdSha256.\n\nArgs:\ninput_ (:class:`~bigchaindb.common.transaction.\nInput`) The Input to be signed.\nmessage (str): The message to be signed\nkey_pairs (dict): The keys to sign the Transaction with.", "source": "juraj-google-style"}
{"code": "def ExportNEP2(self, passphrase):\n        \n        if len(passphrase) < 2:\n            raise ValueError(\"Passphrase must have a minimum of 2 characters\")\n\n        \n        address_hash_tmp = hashlib.sha256(self.GetAddress().encode(\"utf-8\")).digest()\n        address_hash_tmp2 = hashlib.sha256(address_hash_tmp).digest()\n        address_hash = address_hash_tmp2[:4]\n\n        \n        pwd_normalized = bytes(unicodedata.normalize('NFC', passphrase), 'utf-8')\n        derived = scrypt.hash(pwd_normalized, address_hash,\n                              N=SCRYPT_ITERATIONS,\n                              r=SCRYPT_BLOCKSIZE,\n                              p=SCRYPT_PARALLEL_FACTOR,\n                              buflen=SCRYPT_KEY_LEN_BYTES)\n\n        \n        derived1 = derived[:32]\n        derived2 = derived[32:]\n\n        \n        xor_ed = xor_bytes(bytes(self.PrivateKey), derived1)\n        cipher = AES.new(derived2, AES.MODE_ECB)\n        encrypted = cipher.encrypt(xor_ed)\n\n        \n        assembled = bytearray()\n        assembled.extend(NEP_HEADER)\n        assembled.extend(NEP_FLAG)\n        assembled.extend(address_hash)\n        assembled.extend(encrypted)\n\n        \n        encrypted_key_nep2 = base58.b58encode_check(bytes(assembled))\n        return encrypted_key_nep2.decode(\"utf-8\")", "docstring": "Export the encrypted private key in NEP-2 format.\n\nArgs:\npassphrase (str): The password to encrypt the private key with, as unicode string\n\nReturns:\nstr: The NEP-2 encrypted private key", "source": "juraj-google-style"}
{"code": "def decrypt(key, ciphertext, shift_function=shift_case_english):\n    return [shift_function(key, symbol) for symbol in ciphertext]", "docstring": "Decrypt Shift enciphered ``ciphertext`` using ``key``.\n\nExamples:\n>>> ''.join(decrypt(3, \"KHOOR\"))\nHELLO\n\n>> decrypt(15, [0xcf, 0x9e, 0xaf, 0xe0], shift_bytes)\n[0xde, 0xad, 0xbe, 0xef]\n\nArgs:\nkey (int): The shift to use\nciphertext (iterable): The symbols to decrypt\nshift_function (function (shift, symbol)): Shift function to apply to symbols in the ciphertext\n\nReturns:\nDecrypted ciphertext, list of plaintext symbols", "source": "codesearchnet"}
{"code": "def _find_suite_class():\n    test_suites = _find_suite_classes_in_module(sys.modules['__main__'])\n    if len(test_suites) == 0:\n        logging.debug('No suite class found in the __main__ module, trying to find it in the module of the caller of suite_runner.run_suite_class method.')\n        stacks = inspect.stack()\n        if len(stacks) < 2:\n            logging.debug('Failed to get the caller stack of run_suite_class. Got stacks: %s', stacks)\n        else:\n            run_suite_class_caller_frame_info = inspect.stack()[2]\n            caller_frame = run_suite_class_caller_frame_info.frame\n            module = inspect.getmodule(caller_frame)\n            if module is None:\n                logging.debug('Failed to find module for frame %s', caller_frame)\n            else:\n                test_suites = _find_suite_classes_in_module(module)\n    if len(test_suites) != 1:\n        logging.error('Expected 1 test class per file, found %s.', [t.__name__ for t in test_suites])\n        sys.exit(1)\n    return test_suites[0]", "docstring": "Finds the test suite class.\n\nFirst search for test suite classes in the __main__ module. If no test suite\nclass is found, search in the module that is calling\n`suite_runner.run_suite_class`.\n\nWalk through module members and find the subclass of BaseSuite. Only\none subclass is allowed.\n\nReturns:\nThe test suite class in the test module.", "source": "github-repos"}
{"code": "def __init__(self, key_path_prefix, windows_path, unique_key_paths):\n    \n    super(WinRegistryFileMapping, self).__init__()\n    self.key_path_prefix = key_path_prefix\n    self.unique_key_paths = unique_key_paths\n    self.windows_path = windows_path", "docstring": "Initializes the Windows Registry file mapping.\n\nArgs:\nkey_path_prefix (str): Windows Registry key path prefix.\nwindows_path (str): Windows path to the Windows Registry file, such as:\nC:\\\\Windows\\\\System32\\\\config\\\\SYSTEM\nunique_key_paths (list[str]): key paths unique to the Windows Registry\nfile.", "source": "juraj-google-style"}
{"code": "def peek_record(self, model_class, record_id):\n    if self._cache:\n        return self._cache.get_record(model_class.__name__, record_id)\n    else:\n        return None", "docstring": "Return an instance of the model_class from the cache if it is present.\n\nArgs:\nmodel_class (:class:`cinder_data.model.CinderModel`): A subclass of\n:class:`cinder_data.model.CinderModel` of your chosen model.\nrecord_id (int): The id of the record requested.\n\nReturns:\n:class:`cinder_data.model.CinderModel`: An instance of model_class or None.", "source": "codesearchnet"}
{"code": "def run(self, dag):\n        \n\n        q_gate_list = ['cx', 'cy', 'cz', 'h', 'x', 'y', 'z']\n\n        \n        cancellation_sets = defaultdict(lambda: [])\n\n        for wire in dag.wires:\n            wire_name = \"{0}[{1}]\".format(str(wire[0].name), str(wire[1]))\n            wire_commutation_set = self.property_set['commutation_set'][wire_name]\n\n            for com_set_idx, com_set in enumerate(wire_commutation_set):\n                if com_set[0].type in ['in', 'out']:\n                    continue\n                for node in com_set:\n                    num_qargs = len(node.qargs)\n                    if num_qargs == 1 and node.name in q_gate_list:\n                        cancellation_sets[(node.name, wire_name, com_set_idx)].append(node)\n                    if num_qargs == 1 and node.name in ['u1', 'rz', 't', 's']:\n                        cancellation_sets[('z_rotation', wire_name, com_set_idx)].append(node)\n                    elif num_qargs == 2 and node.qargs[0] == wire:\n                        second_op_name = \"{0}[{1}]\".format(str(node.qargs[1][0].name),\n                                                           str(node.qargs[1][1]))\n                        q2_key = (node.name, wire_name, second_op_name,\n                                  self.property_set['commutation_set'][(node, second_op_name)])\n                        cancellation_sets[q2_key].append(node)\n\n        for cancel_set_key in cancellation_sets:\n            set_len = len(cancellation_sets[cancel_set_key])\n            if ((set_len) > 1 and cancel_set_key[0] in q_gate_list):\n                gates_to_cancel = cancellation_sets[cancel_set_key]\n                for c_node in gates_to_cancel[:(set_len \n                    dag.remove_op_node(c_node)\n\n            elif((set_len) > 1 and cancel_set_key[0] == 'z_rotation'):\n                run = cancellation_sets[cancel_set_key]\n                run_qarg = run[0].qargs[0]\n                total_angle = 0.0  \n                for current_node in run:\n                    if (current_node.condition is not None\n                            or len(current_node.qargs) != 1\n                            or current_node.qargs[0] != run_qarg):\n                        raise TranspilerError(\"internal error\")\n\n                    if current_node.name in ['u1', 'rz']:\n                        current_angle = float(current_node.op.params[0])\n                    elif current_node.name == 't':\n                        current_angle = sympy.pi / 4\n                    elif current_node.name == 's':\n                        current_angle = sympy.pi / 2\n\n                    \n                    total_angle = current_angle + total_angle\n\n                \n                new_op = U1Gate(total_angle)\n                new_qarg = (QuantumRegister(1, 'q'), 0)\n                new_dag = DAGCircuit()\n                new_dag.add_qreg(new_qarg[0])\n                new_dag.apply_operation_back(new_op, [new_qarg])\n                dag.substitute_node_with_dag(run[0], new_dag)\n\n                \n                for current_node in run[1:]:\n                    dag.remove_op_node(current_node)\n\n        return dag", "docstring": "Run the CommutativeCancellation pass on a dag\n\nArgs:\ndag (DAGCircuit): the DAG to be optimized.\n\nReturns:\nDAGCircuit: the optimized DAG.\n\nRaises:\nTranspilerError: when the 1 qubit rotation gates are not found", "source": "juraj-google-style"}
{"code": "def __init__(self, variable):\n    if not isinstance(variable, variables.Variable):\n        raise ValueError('variable must be of type tf.ResourceVariable, but got: %s' % variable)\n    if not variable.dtype.is_floating:\n        raise ValueError('variable must be a floating point variable but has type: %s' % variable.dtype.name)\n    self._variable = variable\n    self._op = 'delegate'", "docstring": "Creates an AutoCastVariable instance.\n\nArgs:\nvariable: A floating-point resource variable to wrap.\n\nRaises:\nValueError: If `variable` is not a floating-point resource variable", "source": "github-repos"}
{"code": "def handle_closed_task(self, task_name, record):\n    if (task_name not in self.tasks):\n        return\n    if self.main_failed:\n        self.mark_parent_tasks_as_failed(self.cur_task)\n    if self.tasks[task_name].failed:\n        record.msg = ColorFormatter.colored('red', END_TASK_ON_ERROR_MSG)\n    else:\n        record.msg = ColorFormatter.colored('green', END_TASK_MSG)\n    record.msg += (' (in %s)' % self.tasks[task_name].elapsed_time())\n    if (self.should_show_by_depth() or self.tasks[task_name].force_show):\n        if self.tasks[task_name].force_show:\n            self.handle_error()\n        self.pretty_emit(record, is_header=True)\n    self.close_children_tasks(task_name)\n    self.tasks.pop(task_name)", "docstring": "Do everything needed when a task is closed\n\nParams:\ntask_name (str): name of the task that is finishing\nrecord (logging.LogRecord): log record with all the info\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def __init__(self, filename, flag):\n        \n        super(LMDBNoLockDatabase, self).__init__()\n\n        create = bool(flag == 'c')\n\n        if flag == 'n':\n            if os.path.isfile(filename):\n                os.remove(filename)\n            create = True\n\n        self._lmdb = lmdb.Environment(\n            path=filename,\n            map_size=1024**4,\n            map_async=True,\n            writemap=True,\n            readahead=False,\n            subdir=False,\n            create=create,\n            lock=True)", "docstring": "Constructor for the LMDBNoLockDatabase class.\n\nArgs:\nfilename (str): The filename of the database file.\nflag (str): a flag indicating the mode for opening the database.\nRefer to the documentation for anydbm.open().", "source": "juraj-google-style"}
{"code": "def _countdown(self, waitTime=0, printString='Waiting %*d seconds...', verbose=True):\n    if (waitTime <= 0):\n        waitTime = self.__retryDelay\n    for remaining in range(waitTime, 0, (- 1)):\n        _vPrint(verbose, ('\\r' + (printString % (len(str(waitTime)), remaining))), end='', flush=True)\n        time.sleep(1)\n    if verbose:\n        _vPrint(verbose, ('\\r' + (printString % (len(str(waitTime)), 0))))", "docstring": "Makes a pretty countdown.\n\nArgs:\ngitquery (str): The query or endpoint itself.\nExamples:\nquery: 'query { viewer { login } }'\nendpoint: '/user'\nprintString (Optional[str]): A counter message to display.\nDefaults to 'Waiting %*d seconds...'\nverbose (Optional[bool]): If False, all extra printouts will be\nsuppressed. Defaults to True.", "source": "codesearchnet"}
{"code": "def cancel(self, force=False):\n        \n        return self.rest_client._sc._delegator._cancel_job(self, force)", "docstring": "Cancel this job.\n\nArgs:\nforce (bool, optional): Forcefully cancel this job.\n\nReturns:\nbool: True if the job was cancelled, otherwise False if an error occurred.", "source": "juraj-google-style"}
{"code": "def speak(self, message):\n        \n        campfire = self.get_campfire()\n        if not isinstance(message, Message):\n            message = Message(campfire, message)\n\n        result = self._connection.post(\n            \"room/%s/speak\" % self.id,\n            {\"message\": message.get_data()},\n            parse_data=True,\n            key=\"message\"\n        )\n\n        if result[\"success\"]:\n            return Message(campfire, result[\"data\"])\n        return result[\"success\"]", "docstring": "Post a message.\n\nArgs:\nmessage (:class:`Message` or string): Message\n\nReturns:\nbool. Success", "source": "juraj-google-style"}
{"code": "def attribute(self, stream, name):\n        \n        if stream not in self._inputs:\n            raise ValueError(\"Stream is not an input of this operator.\")\n        if len(self._inputs) == 1:\n            return Expression('attribute', name)\n        else:\n            iport = self._op().inputPorts[self._inputs.index(stream)]\n            return Expression('attribute', iport._alias + '.' + name)", "docstring": "Expression for an input attribute.\n\nAn input attribute is an attribute on one of the input\nports of the operator invocation. `stream` must have been\nused to declare this invocation.\n\nArgs:\nstream(Stream): Stream the attribute is from.\nname(str): Name of the attribute.\n\nReturns:\nExpression: Expression representing the input attribute.", "source": "juraj-google-style"}
{"code": "def UpdateTaskAsPendingMerge(self, task):\n    \n    with self._lock:\n      is_abandoned = task.identifier in self._tasks_abandoned\n      is_processing = task.identifier in self._tasks_processing\n      is_queued = task.identifier in self._tasks_queued\n\n      if not is_queued and not is_processing and not is_abandoned:\n        raise KeyError('Status of task {0:s} is unknown.'.format(\n            task.identifier))\n\n      if is_abandoned and task.has_retry:\n        raise KeyError('Will not merge a task {0:s} with retry task.'.format(\n            task.identifier))\n\n      if is_queued:\n        logger.debug('Task {0:s} was queued, now merging.'.format(\n            task.identifier))\n        del self._tasks_queued[task.identifier]\n\n      if is_processing:\n        logger.debug('Task {0:s} was processing, now merging.'.format(\n            task.identifier))\n        del self._tasks_processing[task.identifier]\n\n      if is_abandoned:\n        logger.debug('Task {0:s} was abandoned, now merging.'.format(\n            task.identifier))\n        del self._tasks_abandoned[task.identifier]\n\n      self._tasks_pending_merge.PushTask(task)\n\n      self.SampleTaskStatus(task, 'pending_merge')\n\n      task.UpdateProcessingTime()\n      self._UpdateLatestProcessingTime(task)", "docstring": "Updates the task manager to reflect the task is ready to be merged.\n\nArgs:\ntask (Task): task.\n\nRaises:\nKeyError: if the task was not queued, processing or abandoned, or\nthe task was abandoned and has a retry task.", "source": "juraj-google-style"}
{"code": "def indices(self):\n    return self._indices", "docstring": "The indices of non-zero values in the represented dense tensor.\n\nReturns:\nA 2-D Tensor of int64 with dense_shape `[N, ndims]`, where `N` is the\nnumber of non-zero values in the tensor, and `ndims` is the rank.", "source": "github-repos"}
{"code": "def __init__(self, rate=None, burst_size=None, experimenter=None):\n        \n        super().__init__(MeterBandType.OFPMBT_EXPERIMENTER, rate, burst_size)\n        self.experimenter = experimenter", "docstring": "Create a MeterBandExperimenter with the optional parameters below.\n\nArgs:\nrate (int): Rate for remarking packets.\nburst_size (int): Size of bursts.\nexperimenter (int): Experimenter ID which takes the same form as in\n:class:`.ExperimenterHeader`.", "source": "juraj-google-style"}
{"code": "def process(self, element):\n    return re.findall(\"[\\\\w\\\\']+\", element, re.UNICODE)", "docstring": "Returns an iterator over the words of this element.\n\nThe element is a line of text.  If the line is blank, note that, too.\n\nArgs:\nelement: the element being processed\n\nReturns:\nThe processed element.", "source": "github-repos"}
{"code": "def sandbox_call(func: Callable[..., Any], *args, timeout: Optional[float]=None, **kwargs) -> Any:\n\n    def _call(q, *args, **kwargs):\n\n        def _run():\n            r = func(*args, **kwargs)\n            try:\n                return pickle.dumps(r)\n            except BaseException as e:\n                raise errors.SerializationError(f'Cannot serialize sandbox result: {r}', e) from e\n        try:\n            q.put(_run())\n        except Exception as e:\n            q.put(e)\n    q = multiprocessing.Queue()\n    p = multiprocessing.Process(target=_call, args=tuple([q] + list(args)), kwargs=kwargs)\n    try:\n        p.start()\n        x = q.get(timeout=timeout)\n    except queue.Empty as e:\n        if p.is_alive():\n            p.kill()\n        raise TimeoutError(f'Execution time exceed {timeout} seconds.') from e\n    finally:\n        q.close()\n    if isinstance(x, Exception):\n        raise x\n    try:\n        return pickle.loads(x)\n    except Exception as e:\n        raise errors.SerializationError('Cannot deserialize the output from sandbox.', e) from e", "docstring": "Calls a function with sandboxing.\n\nArgs:\nfunc: Function to call.\n*args: Positional arguments for `func`\ntimeout: Execution timeout in seconds. If None, wait `func` to complete.\n**kwargs: Keyword arguments for `func`.\n\nReturns:\nReturn value from `func`.\n\nRaises:\nTimeoutError: If the execution time exceeds the timeout.\nException: Exception raised from `func`.", "source": "github-repos"}
{"code": "def delete(self, url, callback, json=None):\n        \n        return self.adapter.delete(url, callback, json=json)", "docstring": "Delete a URL.\n\nArgs:\n\nurl(string): URL for the request\n\ncallback(func): The response callback function\n\nKeyword Args:\n\njson(dict): JSON body for the request\n\nReturns:\n\nThe result of the callback handling the resopnse from the\nexecuted request", "source": "juraj-google-style"}
{"code": "def __init__(\n      self, cipher_mode=None, encryption_method=None,\n      initialization_vector=None, key=None, parent=None, **kwargs):\n    \n    if not encryption_method or not parent:\n      raise ValueError('Missing encryption method or parent value.')\n\n    super(EncryptedStreamPathSpec, self).__init__(parent=parent, **kwargs)\n    self.cipher_mode = cipher_mode\n    self.encryption_method = encryption_method\n    self.initialization_vector = initialization_vector\n    self.key = key", "docstring": "Initializes a path specification.\n\nNote that the encrypted stream path specification must have a parent.\n\nArgs:\ncipher_mode (Optional[str]): cipher mode.\nencryption_method (Optional[str]): method used to the encrypt the data.\ninitialization_vector (Optional[bytes]):  initialization vector.\nkey (Optional[bytes]): key.\nparent (Optional[PathSpec]): parent path specification.\n\nRaises:\nValueError: when encryption method or parent are not set.", "source": "juraj-google-style"}
{"code": "def on_session_init(self, request):\n    return framework.OnSessionInitResponse(framework.OnSessionInitAction.PROCEED)", "docstring": "Overrides on-session-init callback.\n\nArgs:\nrequest: An instance of `OnSessionInitRequest`.\n\nReturns:\nAn instance of `OnSessionInitResponse`.", "source": "github-repos"}
{"code": "def on_moved(self, event):\n        \n        if not self._event_error:\n            \n            \n            pathtools_options = {\n                'included_patterns': self.patterns,\n                'excluded_patterns': self.ignore_patterns,\n                'case_sensitive': self.case_sensitive,\n            }\n            \n            \n            if match_path(event.dest_path, **pathtools_options):\n                self.logger.info(u\"Change detected from a move on: %s\",\n                                 event.dest_path)\n                self.compile_dependencies(event.dest_path)", "docstring": "Called when a file or a directory is moved or renamed.\n\nMany editors don't directly change a file, instead they make a\ntransitional file like ``*.part`` then move it to the final filename.\n\nArgs:\nevent: Watchdog event, either ``watchdog.events.DirMovedEvent`` or\n``watchdog.events.FileModifiedEvent``.", "source": "juraj-google-style"}
{"code": "def has_auth_params(self, scheme):\n        \n        for k, v in iteritems(self.schemes[scheme][u'params']):\n            if not v: return False\n        return True", "docstring": "Check whether all information required for a given auth scheme have\nbeen supplied.\n\nArgs:\nscheme (str): Name of the authentication scheme to check. One of\nGem-Identify, Gem-Device, Gem-Application\n\nReturns:\nTrue if all required parameters for the specified scheme are present\nor False otherwise.", "source": "juraj-google-style"}
{"code": "def dp010(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `dp010`'.format(value))\n    self._dp010 = value", "docstring": "Corresponds to IDD Field `dp010`\nDew-point temperature corresponding to 1.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `dp010`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def hdfs_path(ctx, path):\n    HADOOP_SCHEMES = ['adl:\n    if any((path.startswith(scheme) for scheme in HADOOP_SCHEMES)):\n        return path\n    elif path.startswith('/'):\n        return (ctx.defaultFS + path)\n    elif (ctx.defaultFS.startswith('hdfs:\n        return '{0}/user/{1}/{2}'.format(ctx.defaultFS, getpass.getuser(), path)\n    elif ctx.defaultFS.startswith('file:\n        return '{0}/{1}/{2}'.format(ctx.defaultFS, ctx.working_dir[1:], path)\n    else:\n        logging.warn('Unknown scheme {0} with relative path: {1}'.format(ctx.defaultFS, path))\n        return '{0}/{1}'.format(ctx.defaultFS, path)", "docstring": "Convenience function to create a Tensorflow-compatible absolute HDFS path from relative paths\n\nArgs:\n:ctx: TFNodeContext containing the metadata specific to this node in the cluster.\n:path: path to convert\n\nReturns:\nAn absolute path prefixed with the correct filesystem scheme.", "source": "codesearchnet"}
{"code": "def get_structures(self, chemsys_formula_id, final=True):\n    prop = ('final_structure' if final else 'initial_structure')\n    data = self.get_data(chemsys_formula_id, prop=prop)\n    return [d[prop] for d in data]", "docstring": "Get a list of Structures corresponding to a chemical system, formula,\nor materials_id.\n\nArgs:\nchemsys_formula_id (str): A chemical system (e.g., Li-Fe-O),\nor formula (e.g., Fe2O3) or materials_id (e.g., mp-1234).\nfinal (bool): Whether to get the final structure, or the initial\n(pre-relaxation) structure. Defaults to True.\n\nReturns:\nList of Structure objects.", "source": "codesearchnet"}
{"code": "def set_soft_device_placement(enabled):\n    context().soft_device_placement = enabled", "docstring": "Set if soft device placements should be allowed.\n\nArgs:\nenabled: Whether to enable soft device placement.", "source": "github-repos"}
{"code": "def halt(self):\n        \n        res = int(self._dll.JLINKARM_Halt())\n        if res == 0:\n            time.sleep(1)\n            return True\n        return False", "docstring": "Halts the CPU Core.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\n``True`` if halted, ``False`` otherwise.", "source": "juraj-google-style"}
{"code": "def write(self, originalPrefix, newPrefix=None):\n    numSpaces = max(2, (25 - len(self.name)))\n    if (self.value is None):\n        line = ('%s\\n' % self.name)\n    elif (self.name == 'WMS'):\n        line = ('%s %s\\n' % (self.name, self.value))\n    elif (newPrefix is None):\n        line = ('%s%s%s\\n' % (self.name, (' ' * numSpaces), self.value))\n    elif (originalPrefix in self.value):\n        line = ('%s%s%s\\n' % (self.name, (' ' * numSpaces), self.value.replace(originalPrefix, newPrefix)))\n    else:\n        line = ('%s%s%s\\n' % (self.name, (' ' * numSpaces), self.value))\n    return line", "docstring": "Write project card to string.\n\nArgs:\noriginalPrefix (str): Original name to give to files that follow the project naming convention\n(e.g: prefix.gag).\nnewPrefix (str, optional): If new prefix is desired, pass in this parameter. Defaults to None.\n\nReturns:\nstr: Card and value as they would be written to the project file.", "source": "codesearchnet"}
{"code": "def diff_commonSuffix(self, text1, text2):\n    if ((not text1) or (not text2) or (text1[(- 1)] != text2[(- 1)])):\n        return 0\n    pointermin = 0\n    pointermax = min(len(text1), len(text2))\n    pointermid = pointermax\n    pointerend = 0\n    while (pointermin < pointermid):\n        if (text1[(- pointermid):(len(text1) - pointerend)] == text2[(- pointermid):(len(text2) - pointerend)]):\n            pointermin = pointermid\n            pointerend = pointermin\n        else:\n            pointermax = pointermid\n        pointermid = (((pointermax - pointermin) \n    return pointermid", "docstring": "Determine the common suffix of two strings.\n\nArgs:\ntext1: First string.\ntext2: Second string.\n\nReturns:\nThe number of characters common to the end of each string.", "source": "codesearchnet"}
{"code": "def connection_required(func):\n\n    @functools.wraps(func)\n    def wrapper(self, *args, **kwargs):\n        \"Wrapper function to check that the given ``JLink`` has been\\n            connected to a target.\\n\\n            Args:\\n              self (JLink): the ``JLink`` instance\\n              args: list of arguments to pass to the wrapped function\\n              kwargs: key-word arguments dict to pass to the wrapped function\\n\\n            Returns:\\n              The return value of the wrapped function.\\n\\n            Raises:\\n              JLinkException: if the JLink's target is not connected.\\n            \"\n        if (not self.target_connected()):\n            raise errors.JLinkException('Target is not connected.')\n        return func(self, *args, **kwargs)\n    return wrapper", "docstring": "Decorator to specify that a target connection is required in order\nfor the given method to be used.\n\nArgs:\nfunc (function): function being decorated\n\nReturns:\nThe wrapper function.", "source": "codesearchnet"}
{"code": "def add_it(workbench, file_list, labels):\n    md5s = []\n    for filename in file_list:\n        if (filename != '.DS_Store'):\n            with open(filename, 'rb') as pe_file:\n                base_name = os.path.basename(filename)\n                md5 = workbench.store_sample(pe_file.read(), base_name, 'exe')\n                workbench.add_node(md5, md5[:6], labels)\n                md5s.append(md5)\n    return md5s", "docstring": "Add the given file_list to workbench as samples, also add them as nodes.\n\nArgs:\nworkbench: Instance of Workbench Client.\nfile_list: list of files.\nlabels: labels for the nodes.\n\nReturns:\nA list of md5s.", "source": "codesearchnet"}
{"code": "def camelcase(string):\n    string = re.sub('^[\\\\-_\\\\.]', '', str(string))\n    if (not string):\n        return string\n    return (lowercase(string[0]) + re.sub('[\\\\-_\\\\.\\\\s]([a-z])', (lambda matched: uppercase(matched.group(1))), string[1:]))", "docstring": "Convert string into camel case.\n\nArgs:\nstring: String to convert.\n\nReturns:\nstring: Camel case string.", "source": "codesearchnet"}
{"code": "def _ParseStorageMediaOptions(self, options):\n    \n    self._ParseStorageMediaImageOptions(options)\n    self._ParseVSSProcessingOptions(options)\n    self._ParseCredentialOptions(options)\n    self._ParseSourcePathOption(options)", "docstring": "Parses the storage media options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "juraj-google-style"}
{"code": "def end_of(self, event_id, import_options=True):\n        \n        event_id = str(event_id)\n        if event_id in DatePickerDictionary.items:\n            linked_picker = DatePickerDictionary.items[event_id]\n            self.config['linked_to'] = linked_picker.config['id']\n            if import_options:\n                backup_moment_format = self.config['options']['format']\n                self.config['options'].update(linked_picker.config['options'])\n                self.config['options'].update(self.options_param)\n                if self.format_param or 'format' in self.options_param:\n                    self.config['options']['format'] = backup_moment_format\n                else:\n                    self.format = linked_picker.format\n            \n            \n            self.config['options']['useCurrent'] = False\n            self._link_to(linked_picker)\n        else:\n            raise KeyError(\n                'start-date not specified for event_id \"%s\"' % event_id)\n        return self", "docstring": "Set Date-Picker as the end-date of a date-range.\n\nArgs:\n- event_id (string): User-defined unique id for linking two fields\n- import_options (bool): inherit options from start-date input,\ndefault: TRUE", "source": "juraj-google-style"}
{"code": "def recipe_cm_user_editor(config, recipe_name):\n    drive(config, {'auth': 'user', 'hour': [], 'copy': {'source': 'https:", "docstring": "A tool for rapidly bulk editing Campaign Manager profiles, roles, and sub\naccounts.\n\nArgs:\nrecipe_name (string) - Name of document to deploy to.", "source": "github-repos"}
{"code": "def sync_firmware(self):\n    serial_no = self.serial_number\n    if self.firmware_newer():\n        try:\n            self.invalidate_firmware()\n            self.update_firmware()\n        except errors.JLinkException as e:\n            pass\n        res = self.open(serial_no=serial_no)\n        if self.firmware_newer():\n            raise errors.JLinkException('Failed to sync firmware version.')\n        return res\n    elif self.firmware_outdated():\n        try:\n            self.update_firmware()\n        except errors.JLinkException as e:\n            pass\n        if self.firmware_outdated():\n            raise errors.JLinkException('Failed to sync firmware version.')\n        return self.open(serial_no=serial_no)\n    return None", "docstring": "Syncs the emulator's firmware version and the DLL's firmware.\n\nThis method is useful for ensuring that the firmware running on the\nJ-Link matches the firmware supported by the DLL.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def _get_sliced_variables(var_list):\n  \n  unsliced_variables = []\n  sliced_variables = collections.defaultdict(lambda: [])\n  for var in var_list:\n    if var._save_slice_info:\n      sliced_variables[var._save_slice_info.full_name].append(var)\n    else:\n      unsliced_variables.append(var)\n  return unsliced_variables, sliced_variables", "docstring": "Separates the sliced (partitioned) and unsliced variables in var_list.\n\nArgs:\nvar_list: a list of variables.\n\nReturns:\nA list of unsliced variables in var_list, and a dict mapping names to parts\nfor the sliced variables in var_list.", "source": "juraj-google-style"}
{"code": "def list_apppools():\n    ret = dict()\n    ps_cmd = []\n    ps_cmd.append(\"Get-ChildItem -Path 'IIS:\\\\AppPools' | Select-Object Name, State\")\n    ps_cmd.append(\", @{ Name = 'Applications'; Expression = { $AppPool = $_.Name;\")\n    ps_cmd.append(\"$AppPath = 'machine/webroot/apphost';\")\n    ps_cmd.append(\"$FilterBase = '/system.applicationHost/sites/site/application';\")\n    ps_cmd.append('$FilterBase += \"[@applicationPool = \\'$($AppPool)\\' and @path\";')\n    ps_cmd.append('$FilterRoot = \"$($FilterBase) = \\'/\\']/parent::*\";')\n    ps_cmd.append('$FilterNonRoot = \"$($FilterBase) != \\'/\\']\";')\n    ps_cmd.append('Get-WebConfigurationProperty -Filter $FilterRoot -PsPath $AppPath -Name Name')\n    ps_cmd.append('| ForEach-Object { $_.Value };')\n    ps_cmd.append('Get-WebConfigurationProperty -Filter $FilterNonRoot -PsPath $AppPath -Name Path')\n    ps_cmd.append(\"| ForEach-Object { $_.Value } | Where-Object { $_ -ne '/' }\")\n    ps_cmd.append('} }')\n    cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)\n    try:\n        items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n    except ValueError:\n        raise CommandExecutionError('Unable to parse return data as Json.')\n    for item in items:\n        applications = list()\n        if isinstance(item['Applications'], dict):\n            if ('value' in item['Applications']):\n                applications += item['Applications']['value']\n        else:\n            applications.append(item['Applications'])\n        ret[item['name']] = {'state': item['state'], 'applications': applications}\n    if (not ret):\n        log.warning('No application pools found in output: %s', cmd_ret['stdout'])\n    return ret", "docstring": "List all configured IIS application pools.\n\nReturns:\ndict: A dictionary of IIS application pools and their details.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.list_apppools", "source": "codesearchnet"}
{"code": "def adb_call(args=None, shell=False, timeout=None, stderr=None) -> bytes:\n    return self._exec_adb_cmd(name, args, shell=shell, timeout=timeout, stderr=stderr)", "docstring": "Wrapper for an ADB command.\n\nArgs:\nargs: string or list of strings, arguments to the adb command.\nSee subprocess.Proc() documentation.\nshell: bool, True to run this command through the system shell,\nFalse to invoke it directly. See subprocess.Proc() docs.\ntimeout: float, the number of seconds to wait before timing out.\nIf not specified, no timeout takes effect.\nstderr: a Byte stream, like io.BytesIO, stderr of the command\nwill be written to this object if provided.\n\nReturns:\nThe output of the adb command run if exit code is 0.", "source": "github-repos"}
{"code": "def _WrapEndMarker(tree):\n    if isinstance(tree, pytree.Leaf) and tree.type == token.ENDMARKER:\n        return pytree.Node(pygram.python_symbols.file_input, [tree])\n    return tree", "docstring": "Wrap a single ENDMARKER token in a \"file_input\" node.\n\nArguments:\ntree: (pytree.Node) The root node of the parsed tree.\n\nReturns:\nThe root node of the parsed tree. If the tree is a single ENDMARKER node,\nthen that node is wrapped in a \"file_input\" node. That will ensure we don't\nskip comments attached to that node.", "source": "github-repos"}
{"code": "def __init__(self, allow_soft_placement=True, disable_detailed_stats=True, disable_timeline=True, devices=None):\n    self._tf_cluster = None\n    self._generate_timeline = not disable_timeline\n    if devices is None:\n        self._tf_cluster = tf_cluster.TF_NewCluster(allow_soft_placement, disable_detailed_stats)\n    else:\n        devices_serialized = [device.SerializeToString() for device in devices]\n        self._tf_cluster = tf_cluster.TF_NewVirtualCluster(devices_serialized)", "docstring": "Creates a Cluster.\n\nArgs:\nallow_soft_placement: If True, TF will automatically fix illegal\nplacements instead of erroring out if the placement isn't legal.\ndisable_detailed_stats: If True, detailed statistics will not be\navailable.\ndisable_timeline: If True, the timeline information will not be reported.\ndevices: A list of devices of type device_properties_pb2.NamedDevice.\nIf None, a device list will be created based on the spec of\nthe local machine.", "source": "github-repos"}
{"code": "def update_script_from_item(self, item):\n        \n\n        script, path_to_script, script_item = item.get_script()\n\n        \n        \n        dictator = list(script_item.to_dict().values())[0]  \n\n        for instrument in list(script.instruments.keys()):\n            \n            script.instruments[instrument]['settings'] = dictator[instrument]['settings']\n            \n            del dictator[instrument]\n\n\n        for sub_script_name in list(script.scripts.keys()):\n            sub_script_item = script_item.get_subscript(sub_script_name)\n            self.update_script_from_item(sub_script_item)\n            del dictator[sub_script_name]\n\n        script.update(dictator)\n        \n        script.data_path = self.gui_settings['data_folder']", "docstring": "updates the script based on the information provided in item\n\nArgs:\nscript: script to be updated\nitem: B26QTreeItem that contains the new settings of the script", "source": "juraj-google-style"}
{"code": "def extract_lookups_from_string(value):\n    lookups = set()\n    for match in LOOKUP_REGEX.finditer(value):\n        groupdict = match.groupdict()\n        raw = match.groups()[0]\n        lookup_type = groupdict['type']\n        lookup_input = groupdict['input']\n        lookups.add(Lookup(lookup_type, lookup_input, raw))\n    return lookups", "docstring": "Extract any lookups within a string.\n\nArgs:\nvalue (str): string value we're extracting lookups from\n\nReturns:\nlist: list of :class:`stacker.lookups.Lookup` if any", "source": "codesearchnet"}
{"code": "def p45(msg):\n    d = hex2bin(data(msg))\n    if (d[26] == '0'):\n        return None\n    p = bin2int(d[27:38])\n    return p", "docstring": "Average static pressure.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nint: static pressure in hPa", "source": "codesearchnet"}
{"code": "def add_country_locations(self, countries, locations=None, use_live=True):\n    allcountriesadded = True\n    for country in countries:\n        if (not self.add_country_location(country, locations=locations, use_live=use_live)):\n            allcountriesadded = False\n    return allcountriesadded", "docstring": "Add a list of countries. If iso 3 codes are not provided, values are parsed and where they are valid country\nnames, converted to iso 3 codes. If any country is already added, it is ignored.\n\nArgs:\ncountries (List[str]): list of countries to add\nlocations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX.\nuse_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True.\n\nReturns:\nbool: True if all countries added or False if any already present.", "source": "codesearchnet"}
{"code": "def _set_bearer_user_vars_local(token, allowed_client_ids, scopes):\n    result = urlfetch.fetch(('%s?%s' % (_TOKENINFO_URL, urllib.urlencode({'access_token': token}))))\n    if (result.status_code != 200):\n        try:\n            error_description = json.loads(result.content)['error_description']\n        except (ValueError, KeyError):\n            error_description = ''\n        _logger.error('Token info endpoint returned status %s: %s', result.status_code, error_description)\n        return\n    token_info = json.loads(result.content)\n    if ('email' not in token_info):\n        _logger.warning(\"Oauth token doesn't include an email address.\")\n        return\n    if (token_info.get('email_verified') != 'true'):\n        _logger.warning(\"Oauth token email isn't verified.\")\n        return\n    client_id = token_info.get('azp')\n    if ((list(allowed_client_ids) != SKIP_CLIENT_ID_CHECK) and (client_id not in allowed_client_ids)):\n        _logger.warning('Client ID is not allowed: %s', client_id)\n        return\n    (_, sufficient_scopes) = _process_scopes(scopes)\n    authorized_scopes = token_info.get('scope', '').split(' ')\n    if (not _are_scopes_sufficient(authorized_scopes, sufficient_scopes)):\n        _logger.warning(\"Oauth token scopes don't match any acceptable scopes.\")\n        return\n    os.environ[_ENV_AUTH_EMAIL] = token_info['email']\n    os.environ[_ENV_AUTH_DOMAIN] = ''\n    _logger.debug('Local dev returning user from token.')", "docstring": "Validate the oauth bearer token on the dev server.\n\nSince the functions in the oauth module return only example results in local\ndevelopment, this hits the tokeninfo endpoint and attempts to validate the\ntoken.  If it's valid, we'll set _ENV_AUTH_EMAIL and _ENV_AUTH_DOMAIN so we\ncan get the user from the token.\n\nArgs:\ntoken: String with the oauth token to validate.\nallowed_client_ids: List of client IDs that are acceptable.\nscopes: List of acceptable scopes.", "source": "codesearchnet"}
{"code": "def StartTiming(self, profile_name):\n    \n    if profile_name not in self._profile_measurements:\n      self._profile_measurements[profile_name] = CPUTimeMeasurement()\n\n    self._profile_measurements[profile_name].SampleStart()", "docstring": "Starts timing CPU time.\n\nArgs:\nprofile_name (str): name of the profile to sample.", "source": "juraj-google-style"}
{"code": "def recipe_cm_campaign_audit(config, recipe_name):\n    drive(config, {'auth': 'user', 'hour': [], 'copy': {'source': 'https:", "docstring": "A tool for rapidly bulk checking Campaign Manager campaigns\n\nArgs:\nrecipe_name (string) - Name of document to deploy to.", "source": "github-repos"}
{"code": "def start_site(name):\n    ps_cmd = ['Start-WebSite', \"'{0}'\".format(name)]\n    cmd_ret = _srvmgr(ps_cmd)\n    return (cmd_ret['retcode'] == 0)", "docstring": "Start a Web Site in IIS.\n\n.. versionadded:: 2017.7.0\n\nArgs:\nname (str): The name of the website to start.\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.start_site name='My Test Site'", "source": "codesearchnet"}
{"code": "def master_key_from_seed(seed):\n        \n        S = get_bytes(seed)\n        I = hmac.new(b\"Bitcoin seed\", S, hashlib.sha512).digest()\n        Il, Ir = I[:32], I[32:]\n        parse_Il = int.from_bytes(Il, 'big')\n        if parse_Il == 0 or parse_Il >= bitcoin_curve.n:\n            raise ValueError(\"Bad seed, resulting in invalid key!\")\n\n        return HDPrivateKey(key=parse_Il, chain_code=Ir, index=0, depth=0)", "docstring": "Generates a master key from a provided seed.\n\nArgs:\nseed (bytes or str): a string of bytes or a hex string\n\nReturns:\nHDPrivateKey: the master private key.", "source": "juraj-google-style"}
{"code": "def GetFeedMapping(client, feed, placeholder_type):\n  \n  feed_mapping_service = client.GetService('FeedMappingService', 'v201809')\n\n  attribute_mappings = {}\n  more_pages = True\n\n  selector = {\n      'fields': ['FeedMappingId', 'AttributeFieldMappings'],\n      'predicates': [\n          {\n              'field': 'FeedId',\n              'operator': 'EQUALS',\n              'values': [feed['id']]\n          },\n          {\n              'field': 'PlaceholderType',\n              'operator': 'EQUALS',\n              'values': [placeholder_type]\n          }\n      ],\n      'paging': {\n          'startIndex': 0,\n          'numberResults': PAGE_SIZE\n      }\n  }\n\n  while more_pages:\n    page = feed_mapping_service.get(selector)\n\n    if 'entries' in page:\n      \n      \n      for feed_mapping in page['entries']:\n        for attribute_mapping in feed_mapping['attributeFieldMappings']:\n          \n          \n          if attribute_mapping['feedAttributeId'] in attribute_mappings:\n            attribute_mappings[attribute_mapping['feedAttributeId']].append(\n                attribute_mapping['fieldId'])\n          else:\n            attribute_mappings[attribute_mapping['feedAttributeId']] = [\n                attribute_mapping['fieldId']]\n\n    selector['paging']['startIndex'] += PAGE_SIZE\n    more_pages = selector['paging']['startIndex'] < int(page['totalNumEntries'])\n\n  return attribute_mappings", "docstring": "Gets the Feed Mapping for a given Feed.\n\nArgs:\nclient: an AdWordsClient instance.\nfeed: the Feed we are retrieving the Feed Mapping for.\nplaceholder_type: the Placeholder Type we are looking for.\nReturns:\nA dictionary containing the Feed Mapping.", "source": "juraj-google-style"}
{"code": "def cost(self, logits, target):\n    \n    logits = tf.reshape(logits, [self._num_steps * self._batch_size, -1])\n    target = tf.reshape(target, [self._num_steps * self._batch_size, -1])\n    xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=target)\n    loss = tf.reduce_sum(xent)\n\n    return loss / self._batch_size", "docstring": "Returns cost.\n\nArgs:\nlogits: model output.\ntarget: target.\n\nReturns:\nCross-entropy loss for a sequence of logits. The loss will be averaged\nacross time steps if time_average_cost was enabled at construction time.", "source": "juraj-google-style"}
{"code": "def transactional(func, args, kwds, **options):\n  \n  return transactional_async.wrapped_decorator(\n      func, args, kwds, **options).get_result()", "docstring": "Decorator to make a function automatically run in a transaction.\n\nArgs:\n**ctx_options: Transaction options (see transaction(), but propagation\ndefault to TransactionOptions.ALLOWED).\n\nThis supports two forms:\n\n(1) Vanilla:\n@transactional\ndef callback(arg):\n...\n\n(2) With options:\n@transactional(retries=1)\ndef callback(arg):\n...", "source": "juraj-google-style"}
{"code": "def read_nmr_efg(self):\n    header_pattern = '^\\\\s+NMR quadrupolar parameters\\\\s+$\\\\n^\\\\s+Cq : quadrupolar parameter\\\\s+Cq=e[*]Q[*]V_zz/h$\\\\n^\\\\s+eta: asymmetry parameters\\\\s+\\\\(V_yy - V_xx\\\\)/ V_zz$\\\\n^\\\\s+Q  : nuclear electric quadrupole moment in mb \\\\(millibarn\\\\)$\\\\n^-{50,}$\\\\n^\\\\s+ion\\\\s+Cq\\\\(MHz\\\\)\\\\s+eta\\\\s+Q \\\\(mb\\\\)\\\\s+$\\\\n^-{50,}\\\\s*$\\\\n'\n    row_pattern = '\\\\d+\\\\s+(?P<cq>[-]?\\\\d+\\\\.\\\\d+)\\\\s+(?P<eta>[-]?\\\\d+\\\\.\\\\d+)\\\\s+(?P<nuclear_quadrupole_moment>[-]?\\\\d+\\\\.\\\\d+)'\n    footer_pattern = '-{50,}\\\\s*$'\n    self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float, last_one_only=True, attribute_name='efg')", "docstring": "Parse the NMR Electric Field Gradient interpretted values.\n\nReturns:\nElectric Field Gradient tensors as a list of dict in the order of atoms from OUTCAR.\nEach dict key/value pair corresponds to a component of the tensors.", "source": "codesearchnet"}
{"code": "def check_or(state, *tests):\n    success = False\n    first_feedback = None\n    for test in iter_tests(tests):\n        try:\n            multi(state, test)\n            success = True\n        except TestFail as e:\n            if (not first_feedback):\n                first_feedback = e.feedback\n        if success:\n            return state\n    state.report(first_feedback)", "docstring": "Test whether at least one SCT passes.\n\nIf all of the tests fail, the feedback of the first test will be presented to the student.\n\nArgs:\nstate: State instance describing student and solution code, can be omitted if used with Ex()\ntests: one or more sub-SCTs to run\n\n:Example:\nThe SCT below tests that the student typed either 'SELECT' or 'WHERE' (or both).. ::\n\nEx().check_or(\nhas_code('SELECT'),\nhas_code('WHERE')\n)\n\nThe SCT below checks that a SELECT statement has at least a WHERE c or LIMIT clause.. ::\n\nEx().check_node('SelectStmt', 0).check_or(\ncheck_edge('where_clause'),\ncheck_edge('limit_clause')\n)", "source": "codesearchnet"}
{"code": "def factory(attr_type, data):\n        \n\n        constructors = {\n            MFT_ATTR_STANDARD_INFORMATION: MftAttrStandardInformation,\n            MFT_ATTR_ATTRIBUTE_LIST: MftAttrAttributeList,\n            MFT_ATTR_FILENAME: MftAttrFilename,\n            MFT_ATTR_OBJECT_ID: MftAttrObjectId,\n            MFT_ATTR_SECURITY_DESCRIPTOR: MftAttrSecurityDescriptor,\n            MFT_ATTR_VOLUME_NAME: MftAttrVolumeName,\n            MFT_ATTR_VOLUME_INFO: MftAttrVolumeInfo,\n            MFT_ATTR_DATA: MftAttrData,\n            MFT_ATTR_INDEX_ROOT: MftAttrIndexRoot,\n            MFT_ATTR_INDEX_ALLOCATION: MftAttrIndexAllocation,\n            MFT_ATTR_BITMAP: MftAttrBitmap,\n            MFT_ATTR_REPARSE_POINT: MftAttrReparsePoint,\n            MFT_ATTR_LOGGED_TOOLSTREAM: MftAttrLoggedToolstream,\n        }\n\n        if attr_type not in constructors:\n            return None\n\n        return constructors[attr_type](data)", "docstring": "Returns Initialized attribute object based on attr_type \\\n(eg. :class:`MftAttrStandardInformation`)\n\nArgs:\nattr_type (uint): Attribute type number (eg. 0x10 - \\\n$STANDARD_INFORMATION)\ndata (byte array): Data to initialize attribute object with.", "source": "juraj-google-style"}
{"code": "def get_nested_streams(dmap):\n    return list({s for dmap in get_nested_dmaps(dmap) for s in dmap.streams})", "docstring": "Recurses supplied DynamicMap to find all streams\n\nArgs:\ndmap: DynamicMap to recurse to look for streams\n\nReturns:\nList of streams that were found", "source": "codesearchnet"}
{"code": "def sites_at_edges(self):\n    min_x = min([s.r[0] for s in self.sites])\n    max_x = max([s.r[0] for s in self.sites])\n    min_y = min([s.r[1] for s in self.sites])\n    max_y = max([s.r[1] for s in self.sites])\n    min_z = min([s.r[2] for s in self.sites])\n    max_z = max([s.r[2] for s in self.sites])\n    x_max = [s for s in self.sites if (s.r[0] == min_x)]\n    x_min = [s for s in self.sites if (s.r[0] == max_x)]\n    y_max = [s for s in self.sites if (s.r[1] == min_y)]\n    y_min = [s for s in self.sites if (s.r[1] == max_y)]\n    z_max = [s for s in self.sites if (s.r[2] == min_z)]\n    z_min = [s for s in self.sites if (s.r[2] == max_z)]\n    return (x_max, x_min, y_max, y_min, z_max, z_min)", "docstring": "Finds the six sites with the maximum and minimum coordinates along x, y, and z.\n\nArgs:\nNone\n\nReturns:\n(List(List)): In the order [ +x, -x, +y, -y, +z, -z ]", "source": "codesearchnet"}
{"code": "def get_matching_files(filename):\n    return get_matching_files_v2(filename)", "docstring": "Returns a list of files that match the given pattern(s).\n\nArgs:\nfilename: string or iterable of strings. The glob pattern(s).\n\nReturns:\nA list of strings containing filenames that match the given pattern(s).\n\nRaises:\n*  errors.OpError: If there are filesystem / directory listing errors.\n*  errors.NotFoundError: If pattern to be matched is an invalid directory.", "source": "github-repos"}
{"code": "def handler_for_name(fq_name):\n  \n  resolved_name = for_name(fq_name)\n  if isinstance(resolved_name, (type, types.ClassType)):\n    \n    return resolved_name()\n  elif isinstance(resolved_name, types.MethodType):\n    \n    return getattr(resolved_name.im_class(), resolved_name.__name__)\n  else:\n    return resolved_name", "docstring": "Resolves and instantiates handler by fully qualified name.\n\nFirst resolves the name using for_name call. Then if it resolves to a class,\ninstantiates a class, if it resolves to a method - instantiates the class and\nbinds method to the instance.\n\nArgs:\nfq_name: fully qualified name of something to find.\n\nReturns:\nhandler instance which is ready to be called.", "source": "juraj-google-style"}
{"code": "def _set_details(self, content):\n        \n        try:\n            self.details = str(content)\n        except UnicodeEncodeError:\n            if sys.version_info < (3, 0):\n                \n                self.details = unicode(content)\n            else:\n                \n                \n                logging.error(\n                    'Unable to decode \"%s\" in Py3, encoding in utf-8.',\n                    content)\n                self.details = content.encode('utf-8')", "docstring": "Sets the `details` field.\n\nArgs:\ncontent: the content to extract details from.", "source": "juraj-google-style"}
{"code": "def get_mapping(self, superset, subset):\n    if self._supercell:\n        raise ValueError('cannot compute mapping to supercell')\n    if self._primitive_cell:\n        raise ValueError('cannot compute mapping with primitive cell option')\n    if (len(subset) > len(superset)):\n        raise ValueError('subset is larger than superset')\n    (superset, subset, _, _) = self._preprocess(superset, subset, True)\n    match = self._strict_match(superset, subset, 1, break_on_match=False)\n    if ((match is None) or (match[0] > self.stol)):\n        return None\n    return match[4]", "docstring": "Calculate the mapping from superset to subset.\n\nArgs:\nsuperset (Structure): Structure containing at least the sites in\nsubset (within the structure matching tolerance)\nsubset (Structure): Structure containing some of the sites in\nsuperset (within the structure matching tolerance)\n\nReturns:\nnumpy array such that superset.sites[mapping] is within matching\ntolerance of subset.sites or None if no such mapping is possible", "source": "codesearchnet"}
{"code": "def safe_group_name(group_name, group_max_length=100, ellipsis=True):\n    ellipsis_value = ''\n    if ellipsis:\n        ellipsis_value = ' ...'\n    if ((group_name is not None) and (len(group_name) > group_max_length)):\n        group_name_array = group_name.split(' ')\n        group_name = ''\n        for word in group_name_array:\n            word = u'{}'.format(word)\n            if (((len(group_name) + len(word)) + len(ellipsis_value)) >= group_max_length):\n                group_name = '{}{}'.format(group_name, ellipsis_value)\n                group_name = group_name.lstrip(' ')\n                break\n            group_name += ' {}'.format(word)\n    return group_name", "docstring": "Truncate group name to match limit breaking on space and optionally add an ellipsis.\n\n.. note:: Currently the ThreatConnect group name limit is 100 characters.\n\nArgs:\ngroup_name (string): The raw group name to be truncated.\ngroup_max_length (int): The max length of the group name.\nellipsis (boolean): If true the truncated name will have '...' appended.\n\nReturns:\n(string): The truncated group name with optional ellipsis.", "source": "codesearchnet"}
{"code": "def format_h1(s, format=\"text\", indents=0):\n    \n\n    _CHAR = \"=\"\n    if format.startswith(\"text\"):\n        return format_underline(s, _CHAR, indents)\n    elif format.startswith(\"markdown\"):\n        return [\"\n    elif format.startswith(\"rest\"):\n        return format_underline(s, _CHAR, 0)", "docstring": "Encloses string in format text\n\nArgs:\ns: string\nformat: string starting with \"text\", \"markdown\", or \"rest\"\nindents: number of leading intenting spaces\n\nReturns: list\n\n>>> print(\"\\\\n\".join(format_h2(\"Header 1\", indents=10)))\nHeader 1\n--------\n\n>>> print(\"\\\\n\".join(format_h2(\"Header 1\", \"markdown\", 0)))\n## Header 1", "source": "juraj-google-style"}
{"code": "def Parse(self, parser_mediator):\n    \n    file_entry = parser_mediator.GetFileEntry()\n    if not file_entry:\n      raise errors.UnableToParseFile('Invalid file entry')\n\n    parser_mediator.AppendToParserChain(self)\n    try:\n      self.ParseFileEntry(parser_mediator, file_entry)\n    finally:\n      parser_mediator.PopFromParserChain()", "docstring": "Parsers the file entry and extracts event objects.\n\nArgs:\nparser_mediator (ParserMediator): a parser mediator.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def intent(self, user: str=None, token: Optional[str]=None) -> 'IntentAPI':\n    if self.is_real_user:\n        raise ValueError(\"Can't get child intent of real user\")\n    if token:\n        return IntentAPI(user, self.real_user(user, token), self.bot_intent(), self.state_store, self.intent_log)\n    return IntentAPI(user, self.user(user), self.bot_intent(), self.state_store, self.intent_log)", "docstring": "Get the intent API for a specific user.\n\nArgs:\nuser: The Matrix ID of the user whose intent API to get.\n\nReturns:\nThe IntentAPI for the given user.", "source": "codesearchnet"}
{"code": "def islink(self, path=None, header=None):\n        \n        if header is None:\n            header = self._head(self.get_client_kwargs(path))\n\n        for key in ('x-oss-object-type', 'type'):\n            try:\n                return header.pop(key) == 'Symlink'\n            except KeyError:\n                continue\n        return False", "docstring": "Returns True if object is a symbolic link.\n\nArgs:\npath (str): File path or URL.\nheader (dict): Object header.\n\nReturns:\nbool: True if object is Symlink.", "source": "juraj-google-style"}
{"code": "def convert_lrelu(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting lrelu ...')\n\n    if names == 'short':\n        tf_name = 'lRELU' + random_string(3)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    leakyrelu = \\\n        keras.layers.LeakyReLU(alpha=params['alpha'], name=tf_name)\n    layers[scope_name] = leakyrelu(layers[inputs[0]])", "docstring": "Convert leaky relu layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def __init__(self, buckets):\n    self.buckets = buckets", "docstring": "Creates a new Buckets.\n\nArgs:\nbuckets: A c pointer of TFE_MonitoringBuckets.", "source": "github-repos"}
{"code": "def Parse(text, message):\n  \n  if not isinstance(text, six.text_type): text = text.decode('utf-8')\n  try:\n    if sys.version_info < (2, 7):\n      \n      js = json.loads(text)\n    else:\n      js = json.loads(text, object_pairs_hook=_DuplicateChecker)\n  except ValueError as e:\n    raise ParseError('Failed to load JSON: {0}.'.format(str(e)))\n  _ConvertMessage(js, message)\n  return message", "docstring": "Parses a JSON representation of a protocol message into a message.\n\nArgs:\ntext: Message JSON representation.\nmessage: A protocol beffer message to merge into.\n\nReturns:\nThe same message passed as argument.\n\nRaises::\nParseError: On JSON parsing problems.", "source": "juraj-google-style"}
{"code": "def get_parent_dir_for_name(module_name):\n    name_split = module_name.split('.')\n    if not name_split:\n        return None\n    try:\n        spec = importlib.util.find_spec(name_split[0])\n    except ValueError:\n        return None\n    if not spec or not spec.origin:\n        return None\n    base_path = os.path.dirname(spec.origin)\n    return os.path.join(base_path, *name_split[1:-1])", "docstring": "Get parent directory for module with the given name.\n\nArgs:\nmodule_name: Module name, e.g. tf_keras.api._v2.keras.\n\nReturns:\nPath to the parent directory if module is found and None otherwise.\nGiven example above, it should return:\n/root_path/tf_keras/api/_v2.", "source": "github-repos"}
{"code": "def require_fresh_games(self, number_fresh):\n        \n        latest = self.latest_game_number\n        table_state = self.bt_table.row(TABLE_STATE)\n        table_state.set_cell(METADATA, WAIT_CELL, int(latest + number_fresh))\n        table_state.commit()\n        print(\"== Setting wait cell to \", int(latest + number_fresh), flush=True)", "docstring": "Require a given number of fresh games to be played.\n\nArgs:\nnumber_fresh:  integer, number of new fresh games needed\n\nIncrements the cell `table_state=metadata:wait_for_game_number`\nby the given number of games.  This will cause\n`self.wait_for_fresh_games()` to block until the game\ncounter has reached this number.", "source": "juraj-google-style"}
{"code": "def resolve_import(name, is_from, is_star):\n    \n    \n    \n    if name.startswith('.') or is_builtin(name):\n        return None\n    ret = _resolve_import(name)\n    if ret is None and is_from and not is_star:\n        package, _ = name.rsplit('.', 1)\n        ret = _resolve_import(package)\n    return ret", "docstring": "Use python to resolve an import.\n\nArgs:\nname: The fully qualified module name.\n\nReturns:\nThe path to the module source file or None.", "source": "juraj-google-style"}
{"code": "def correct_dihedral(self, construction_table, use_lookup=None):\n    if (use_lookup is None):\n        use_lookup = settings['defaults']['use_lookup']\n    problem_index = self.check_dihedral(construction_table)\n    bond_dict = self._give_val_sorted_bond_dict(use_lookup=use_lookup)\n    c_table = construction_table.copy()\n    for i in problem_index:\n        loc_i = c_table.index.get_loc(i)\n        (b, a, problem_d) = c_table.loc[(i, ['b', 'a', 'd'])]\n        try:\n            c_table.loc[(i, 'd')] = ((bond_dict[a] - {b, a, problem_d}) - set(c_table.index[loc_i:]))[0]\n        except IndexError:\n            visited = (set(c_table.index[loc_i:]) | {b, a, problem_d})\n            tmp_bond_dict = OrderedDict([(j, (bond_dict[j] - visited)) for j in bond_dict[problem_d]])\n            found = False\n            while (tmp_bond_dict and (not found)):\n                new_tmp_bond_dict = OrderedDict()\n                for new_d in tmp_bond_dict:\n                    if (new_d in visited):\n                        continue\n                    angle = self.get_angle_degrees([b, a, new_d])[0]\n                    if (5 < angle < 175):\n                        found = True\n                        c_table.loc[(i, 'd')] = new_d\n                    else:\n                        visited.add(new_d)\n                        for j in tmp_bond_dict[new_d]:\n                            new_tmp_bond_dict[j] = (bond_dict[j] - visited)\n                tmp_bond_dict = new_tmp_bond_dict\n            if (not found):\n                other_atoms = c_table.index[:loc_i].difference({b, a})\n                molecule = self.get_distance_to(origin=i, sort=True, other_atoms=other_atoms)\n                k = 0\n                while ((not found) and (k < len(molecule))):\n                    new_d = molecule.index[k]\n                    angle = self.get_angle_degrees([b, a, new_d])[0]\n                    if (5 < angle < 175):\n                        found = True\n                        c_table.loc[(i, 'd')] = new_d\n                    k = (k + 1)\n                if (not found):\n                    message = 'The atom with index {} has no possibility to get nonlinear reference atoms'.format\n                    raise UndefinedCoordinateSystem(message(i))\n    return c_table", "docstring": "Reindexe the dihedral defining atom if linear reference is used.\n\nUses :meth:`~Cartesian.check_dihedral` to obtain the problematic\nindices.\n\nArgs:\nconstruction_table (pd.DataFrame):\nuse_lookup (bool): Use a lookup variable for\n:meth:`~chemcoord.Cartesian.get_bonds`. The default is\nspecified in ``settings['defaults']['use_lookup']``\n\nReturns:\npd.DataFrame: Appropiately renamed construction table.", "source": "codesearchnet"}
{"code": "def reduce_sum(tensors):\n    return _apply_reduce('sum', tensors)", "docstring": "Returns a tensor with the reduce sum across `tensors`.\n\nThe computation is done with a reduce operation, so only one tensor is\nreturned.\n\nArgs:\ntensors: The input tensors across which to sum; must be assigned\nto GPU devices.\n\nReturns:\nA tensor containing the sum of the input tensors.\n\nRaises:\nLookupError: If context is not currently using a GPU device.", "source": "github-repos"}
{"code": "def __init__(self, code_page=None, time_zone=None):\n    \n    super(SystemConfigurationArtifact, self).__init__()\n    self.code_page = code_page\n    self.hostname = None\n    self.keyboard_layout = None\n    self.operating_system = None\n    self.operating_system_product = None\n    self.operating_system_version = None\n    self.time_zone = time_zone\n    self.user_accounts = []", "docstring": "Initializes a system configuration artifact.\n\nArgs:\ncode_page (Optional[str]): system code page.\ntime_zone (Optional[str]): system time zone.", "source": "juraj-google-style"}
{"code": "def roc_auc(logits, labels, weights_fn=None):\n  \n  del weights_fn\n  with tf.variable_scope(\"roc_auc\", values=[logits, labels]):\n    predictions = tf.argmax(logits, axis=-1)\n    _, auc = tf.metrics.auc(labels, predictions, curve=\"ROC\")\n    return auc, tf.constant(1.0)", "docstring": "Calculate ROC AUC.\n\nRequires binary classes.\n\nArgs:\nlogits: Tensor of size [batch_size, 1, 1, num_classes]\nlabels: Tensor of size [batch_size, 1, 1, num_classes]\nweights_fn: Function that takes in labels and weighs examples (unused)\nReturns:\nROC AUC (scalar), weights", "source": "juraj-google-style"}
{"code": "def trainable_weights(self):\n    if self.trainable:\n        children_weights = self._gather_children_attribute('trainable_variables')\n        return self._dedup_weights(self._trainable_weights + children_weights)\n    else:\n        return []", "docstring": "List of all trainable weights tracked by this layer.\n\nTrainable weights are updated via gradient descent during training.\n\nReturns:\nA list of trainable variables.", "source": "github-repos"}
{"code": "def route(self, method, pattern):\n        \n        def decorator(callback):\n            self._router.add(method, pattern, callback)\n            return callback\n        return decorator", "docstring": "Decorator to add route for a request with any HTTP method.\n\nArguments:\nmethod (str): HTTP method name, e.g. GET, POST, etc.\npattern (str): Routing pattern the path must match.\n\nReturns:\nfunction: Decorator function to add route.", "source": "juraj-google-style"}
{"code": "def append(self, node, dirty=True):\n        \n        self._children[node.id] = node\n        node.parent = self\n        if dirty:\n            self.touch()\n\n        return node", "docstring": "Add a new child node.\n\nArgs:\nnode (gkeepapi.Node): Node to add.\ndirty (bool): Whether this node should be marked dirty.", "source": "juraj-google-style"}
{"code": "def device(self) -> str:\n    return pywrap_tf_session.TF_OperationDevice(self._c_op)", "docstring": "The name of the device to which this op has been assigned, if any.\n\nReturns:\nThe string name of the device to which this op has been\nassigned, or an empty string if it has not been assigned to a\ndevice.", "source": "github-repos"}
{"code": "def sphere_selector_using_residues(self, radius, force_rerun=False):\n        \n        log.debug('{}: running sphere selector...'.format(self.id))\n\n        if not self.sphgen_path or not self.bindingsite_path:\n            return ValueError('Please run sphgen and binding_site_mol2')\n\n        selsph = op.join(self.dock_dir, '{}_selsph_binding.sph'.format(self.id))\n\n        if ssbio.utils.force_rerun(flag=force_rerun, outfile=selsph):\n            cmd = \"sphere_selector {} {} {}\".format(self.sphgen_path, self.bindingsite_path, radius)\n            rename = \"mv selected_spheres.sph {}\".format(selsph)\n\n            os.system(cmd)\n            os.system(rename)\n\n        if ssbio.utils.is_non_zero_file(selsph):\n            self.sphsel_path = selsph\n            log.debug('{}: successful sphere selection'.format(self.sphsel_path))\n        else:\n            log.critical('{}: sphere_selector_using_residues failed to run on sph file'.format(self.sphgen_path))", "docstring": "Select spheres based on binding site residues\n\nArgs:\nradius (int, float): Radius around binding residues to dock to\nforce_rerun (bool): If method should be rerun even if output file exists", "source": "juraj-google-style"}
{"code": "def make_json_serializable(doc: Dict):\n    for (k, v) in doc.items():\n        if isinstance(v, datetime.date):\n            doc[k] = v.strftime('%Y-%m-%d')\n        elif isinstance(v, datetime.datetime):\n            doc[k] = v.isoformat()", "docstring": "Make the document JSON serializable. This is a poor man's implementation that handles dates and nothing else.\nThis method modifies the given document in place.\n\nArgs:\ndoc: A Python Dictionary, typically a CDR object.\n\nReturns: None", "source": "codesearchnet"}
{"code": "def set_session(self, headers=None):\n        \n        if headers is None:\n            headers = {\n                'User-Agent':\n                ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3)'\n                 ' AppleWebKit/537.36 (KHTML, like Gecko) '\n                 'Chrome/48.0.2564.116 Safari/537.36')\n            }\n        elif not isinstance(headers, dict):\n            raise TypeError('\"headers\" must be a dict object')\n\n        self.session = Session(self.proxy_pool)\n        self.session.headers.update(headers)", "docstring": "Init session with default or custom headers\n\nArgs:\nheaders: A dict of headers (default None, thus using the default\nheader to init the session)", "source": "juraj-google-style"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return self.prefix_tokens + token_ids_0 + self.suffix_tokens\n    return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. An MBART sequence has the following format, where `X` represents the sequence:\n\n- `input_ids` (for encoder) `X [eos, src_lang_code]`\n- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`\n\nBOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a\nseparator.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def delete(self, *, auto_commit=False):\n        \n        try:\n            db.session.delete(self.resource)\n            if auto_commit:\n                db.session.commit()\n        except SQLAlchemyError:\n            self.log.exception('Failed deleting resource: {}'.format(self.id))\n            db.session.rollback()", "docstring": "Removes a resource from the database\n\nArgs:\nauto_commit (bool): Automatically commit the transaction. Default: `False`\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):\n    residual = hidden_states\n    hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=encoder_attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    second_residual = hidden_states\n    cross_attn_weights = None\n    hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, output_attentions=output_attentions)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = second_residual + hidden_states\n    hidden_states = self.encoder_attn_layer_norm(hidden_states)\n    residual = hidden_states\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (self_attn_weights, cross_attn_weights)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`):\nInput to the layer of shape `(seq_len, batch, embed_dim)`.\nposition_embeddings (`torch.FloatTensor`, *optional*):\nPosition embeddings that are added to the queries and keys in the self-attention layer.\nreference_points (`torch.FloatTensor`, *optional*):\nReference points.\nspatial_shapes (`torch.LongTensor`, *optional*):\nSpatial shapes.\nlevel_start_index (`torch.LongTensor`, *optional*):\nLevel start index.\nencoder_hidden_states (`torch.FloatTensor`):\ncross attention input to the layer of shape `(seq_len, batch, embed_dim)`\nencoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size\n`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative\nvalues.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def update(self, data=None, timeout=-1, force=''):\n        \n        uri = self.data['uri']\n\n        resource = deepcopy(self.data)\n        resource.update(data)\n\n        \n        if resource.get('serverHardwareUri') is None:\n            resource.pop('enclosureBay', None)\n            resource.pop('enclosureUri', None)\n\n        self.data = self._helper.update(resource, uri, force, timeout)\n\n        return self", "docstring": "Updates server profile template.\n\nArgs:\ndata: Data to update the resource.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\nforce: Force the update operation.\n\nReturns:\nA dict with the updated resource data.", "source": "juraj-google-style"}
{"code": "def ConsumeString(self):\n    the_bytes = self.ConsumeByteString()\n    try:\n        return six.text_type(the_bytes, 'utf-8')\n    except UnicodeDecodeError as e:\n        raise self._StringParseError(e)", "docstring": "Consumes a string value.\n\nReturns:\nThe string parsed.\n\nRaises:\nParseError: If a string value couldn't be consumed.", "source": "codesearchnet"}
{"code": "def check_query(query):\n    q = query.lower()\n    if ('select ' not in q):\n        raise InvalidQuery('SELECT word not found in the query: {0}'.format(query))\n    if (' from ' not in q):\n        raise InvalidQuery('FROM word not found in the query: {0}'.format(query))", "docstring": "Check query sanity\n\nArgs:\nquery: query string\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def start_test(self, idempotence_key=None, base_path='', **kwargs):\n    \n    if not idempotence_key:\n      idempotence_key = uuid.uuid4().hex\n    pipeline_key = db.Key.from_path(_PipelineRecord.kind(), idempotence_key)\n    context = _PipelineContext('', 'default', base_path)\n    future = PipelineFuture(self.output_names, force_strict=True)\n    self._set_values_internal(\n        context, pipeline_key, pipeline_key, future, _PipelineRecord.WAITING)\n    context.start_test(self)", "docstring": "Starts this pipeline in test fashion.\n\nArgs:\nidempotence_key: Dummy idempotence_key to use for this root pipeline.\nbase_path: Dummy base URL path to use for this root pipeline.\nkwargs: Ignored keyword arguments usually passed to start().", "source": "juraj-google-style"}
{"code": "def _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask):\n    dist_aggregation = tfp.distributions.Categorical(logits=logits_aggregation)\n    aggregation_ops_total_mass = tf.reduce_sum(dist_aggregation.probs_parameter()[:, 1:], axis=1)\n    return -tf.math.log(aggregation_ops_total_mass) * aggregate_mask", "docstring": "Calculates aggregation loss in the case of answer supervision.\n\nArgs:\nlogits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`):\nLogits per aggregation operation.\naggregate_mask (`tf.Tensor` of shape `(batch_size, )`):\nA mask set to 1 for examples that should use aggregation functions\n\nReturns:\naggregation_loss_unknown (`tf.Tensor` of shape `(batch_size,)`): Aggregation loss (in case of answer\nsupervision) per example.", "source": "github-repos"}
{"code": "def process_exists(self, task_type, task_id):\n    return self.get_process_exit_code(task_type, task_id) is None", "docstring": "Returns whether the subprocess still exists given the task type and id.\n\nArgs:\ntask_type: The task type.\ntask_id: The task id.\n\nReturns:\nBoolean; whether the subprocess still exists. If the subprocess has\nexited, this returns False.", "source": "github-repos"}
{"code": "def forward(self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor]=None) -> torch.FloatTensor:\n    x = self.dense(hidden_states).squeeze(-1)\n    if p_mask is not None:\n        if get_parameter_dtype(self) == torch.float16:\n            x = x * (1 - p_mask) - 65500 * p_mask\n        else:\n            x = x * (1 - p_mask) - 1e+30 * p_mask\n    return x", "docstring": "Args:\nhidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`):\nThe final hidden states of the model.\np_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*):\nMask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token\nshould be masked.\n\nReturns:\n`torch.FloatTensor`: The start logits for SQuAD.", "source": "github-repos"}
{"code": "def get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size, generator, max_subtoken_length=None, reserved_tokens=None):\n    if (data_dir and vocab_filename):\n        vocab_filepath = os.path.join(data_dir, vocab_filename)\n        if tf.gfile.Exists(vocab_filepath):\n            tf.logging.info('Found vocab file: %s', vocab_filepath)\n            return text_encoder.SubwordTextEncoder(vocab_filepath)\n    else:\n        vocab_filepath = None\n    tf.logging.info('Generating vocab file: %s', vocab_filepath)\n    vocab = text_encoder.SubwordTextEncoder.build_from_generator(generator, vocab_size, max_subtoken_length=max_subtoken_length, reserved_tokens=reserved_tokens)\n    if vocab_filepath:\n        tf.gfile.MakeDirs(data_dir)\n        vocab.store_to_file(vocab_filepath)\n    return vocab", "docstring": "Inner implementation for vocab generators.\n\nArgs:\ndata_dir: The base directory where data and vocab files are stored. If None,\nthen do not save the vocab even if it doesn't exist.\nvocab_filename: relative filename where vocab file is stored\nvocab_size: target size of the vocabulary constructed by SubwordTextEncoder\ngenerator: a generator that produces tokens from the vocabulary\nmax_subtoken_length: an optional integer.  Set this to a finite value to\navoid quadratic costs during vocab building.\nreserved_tokens: List of reserved tokens. `text_encoder.RESERVED_TOKENS`\nshould be a prefix of `reserved_tokens`. If `None`, defaults to\n`RESERVED_TOKENS`.\n\nReturns:\nA SubwordTextEncoder vocabulary object.", "source": "codesearchnet"}
{"code": "def from_esri_code(code):\n    \n    \n    code = str(code)\n    proj4 = utils.crscode_to_string(\"esri\", code, \"proj4\")\n    crs = from_proj4(proj4)\n    return crs", "docstring": "Load crs object from esri code, via spatialreference.org.\nParses based on the proj4 representation.\n\nArguments:\n\n- *code*: The ESRI code as an integer.\n\nReturns:\n\n- A CS instance of the indicated type.", "source": "juraj-google-style"}
{"code": "def _set_per_output_metric_attributes(self, metrics_dict, output_index):\n    updated_metrics_dict = collections.OrderedDict()\n    for metric_name, metric_fn in metrics_dict.items():\n        metric_name = self._add_unique_metric_name(metric_name, metric_fn, output_index)\n        metric_fn._name = metric_name\n        updated_metrics_dict[metric_name] = metric_fn\n        self._compile_metric_functions.append(metric_fn)\n    return updated_metrics_dict", "docstring": "Sets the metric attributes on the model for the given output.\n\nArgs:\nmetrics_dict: A dict with metric names as keys and metric fns as values.\noutput_index: The index of the model output for which the metric\nattributes are added.\n\nReturns:\nMetrics dict updated with unique metric names as keys.", "source": "github-repos"}
{"code": "def logical_switches(self):\n    if (not self.__logical_switches):\n        self.__logical_switches = LogicalSwitches(self.__connection)\n    return self.__logical_switches", "docstring": "Gets the LogicalSwitches API client.\n\nReturns:\nLogicalSwitches:", "source": "codesearchnet"}
{"code": "def IsBlockInNameSpace(nesting_state, is_forward_declaration):\n  \n  if is_forward_declaration:\n    return len(nesting_state.stack) >= 1 and (\n      isinstance(nesting_state.stack[-1], _NamespaceInfo))\n\n\n  return (len(nesting_state.stack) > 1 and\n          nesting_state.stack[-1].check_namespace_indentation and\n          isinstance(nesting_state.stack[-2], _NamespaceInfo))", "docstring": "Checks that the new block is directly in a namespace.\n\nArgs:\nnesting_state: The _NestingState object that contains info about our state.\nis_forward_declaration: If the class is a forward declared class.\nReturns:\nWhether or not the new block is directly in a namespace.", "source": "juraj-google-style"}
{"code": "def _sub_entity_map(self, assignments, item, campaign):\n    for assignment in assignments:\n        placement = self._placement_dao.get(assignment, required=True)\n        event_tag = self._event_tag_dao.get(assignment, required=True)\n        creative = self._creative_dao.get(assignment, required=True)\n        landing_page = None\n        if assignment.get(FieldMap.AD_LANDING_PAGE_ID, '') != 'CAMPAIGN_DEFAULT':\n            landing_page = self._landing_page_dao.get(assignment, required=True)\n        if landing_page:\n            assignment[FieldMap.AD_LANDING_PAGE_ID] = landing_page['id']\n        if item:\n            assignment[FieldMap.AD_ID] = item['id']\n            assignment[FieldMap.AD_NAME] = item['name']\n        if campaign:\n            assignment[FieldMap.CAMPAIGN_ID] = campaign['id']\n            assignment[FieldMap.CAMPAIGN_NAME] = campaign['name']\n        if placement:\n            assignment[FieldMap.PLACEMENT_ID] = placement['id']\n            assignment[FieldMap.PLACEMENT_NAME] = placement['name']\n        if creative:\n            assignment[FieldMap.CREATIVE_ID] = creative['id']\n            assignment[FieldMap.CREATIVE_NAME] = creative['name']\n        if event_tag:\n            assignment[FieldMap.EVENT_TAG_ID] = event_tag['id']\n            assignment[FieldMap.EVENT_TAG_NAME] = event_tag['name']", "docstring": "Maps ids and names of sub entities so they can be updated in the Bulkdozer feed.\n\nWhen Bulkdozer is done processing an item, it writes back the updated names\nand ids of related objects, this method makes sure those are updated in the\nad feed.\n\nArgs:\nassignments: List of child feeds to map.\nitem: The DCM ad object that was updated or created.\ncampaign: The campaign object associated with the ad.", "source": "github-repos"}
{"code": "def createLabel(self, name):\n        \n        if self.findLabel(name):\n            raise exception.LabelException('Label exists')\n        node = _node.Label()\n        node.name = name\n        self._labels[node.id] = node \n        return node", "docstring": "Create a new label.\n\nArgs:\nname (str): Label name.\n\nReturns:\ngkeepapi.node.Label: The new label.\n\nRaises:\nLabelException: If the label exists.", "source": "juraj-google-style"}
{"code": "def slice_hidden(self, x):\n    \n    x_sliced = tf.reshape(\n        x, shape=[-1, self.hparams.num_blocks, self.hparams.block_dim])\n    return x_sliced", "docstring": "Slice encoder hidden state into block_dim.\n\nArgs:\nx: Encoder hidden state of shape [-1, hidden_size].\n\nReturns:\nSliced states of shape [-1, num_blocks, block_dim].", "source": "juraj-google-style"}
{"code": "def load(self, email, master_token, android_id):\n    self._email = email\n    self._android_id = android_id\n    self._master_token = master_token\n    self.refresh()\n    return True", "docstring": "Authenticate to Google with the provided master token.\n\nArgs:\nemail (str): The account to use.\nmaster_token (str): The master token.\nandroid_id (str): An identifier for this client.\n\nRaises:\nLoginException: If there was a problem logging in.", "source": "codesearchnet"}
{"code": "def __type_check_attributes(self, node: yaml.Node, mapping: CommentedMap, argspec: inspect.FullArgSpec) -> None:\n    logger.debug('Checking for extraneous attributes')\n    logger.debug('Constructor arguments: {}, mapping: {}'.format(argspec.args, list(mapping.keys())))\n    for (key, value) in mapping.items():\n        if (not isinstance(key, str)):\n            raise RecognitionError('{}{}YAtiML only supports strings for mapping keys'.format(node.start_mark, os.linesep))\n        if ((key not in argspec.args) and ('yatiml_extra' not in argspec.args)):\n            raise RecognitionError('{}{}Found additional attributes and {} does not support those'.format(node.start_mark, os.linesep, self.class_.__name__))\n        if ((key in argspec.args) and (not self.__type_matches(value, argspec.annotations[key]))):\n            raise RecognitionError('{}{}Expected attribute {} to be of type {} but it is a(n) {}'.format(node.start_mark, os.linesep, key, argspec.annotations[key], type(value)))", "docstring": "Ensure all attributes have a matching constructor argument.\n\nThis checks that there is a constructor argument with a \\\nmatching type for each existing attribute.\n\nIf the class has a yatiml_extra attribute, then extra \\\nattributes are okay and no error will be raised if they exist.\n\nArgs:\nnode: The node we're processing\nmapping: The mapping with constructed subobjects\nconstructor_attrs: The attributes of the constructor, \\\nincluding self and yatiml_extra, if applicable", "source": "codesearchnet"}
{"code": "def convert_positional_argument(self, index, arg_value):\n        \n\n        \n        if self._has_self:\n            if index == 0:\n                return arg_value\n\n            index -= 1\n\n        arg_name = self.arg_names[index]\n        return self.convert_argument(arg_name, arg_value)", "docstring": "Convert and validate a positional argument.\n\nArgs:\nindex (int): The positional index of the argument\narg_value (object): The value to convert and validate\n\nReturns:\nobject: The converted value.", "source": "juraj-google-style"}
{"code": "def scan_devices(self, subnet, timeout=None):\n        \n\n        \n        \n        max_range = {\n            16: 256,\n            24: 256,\n            25: 128,\n            27: 32,\n            28: 16,\n            29: 8,\n            30: 4,\n            31: 2\n        }\n\n        \n        if \"/\" not in subnet:\n            mask = int(24)\n            network = subnet\n        else:\n            network, mask = subnet.split(\"/\")\n            mask = int(mask)\n\n        if mask not in max_range:\n            raise RuntimeError(\"Cannot determine the subnet mask!\")\n\n        \n        \n        network = network.rpartition(\".\")[0]\n\n        if mask == 16:\n            \n            \n\n            \n            for i in range(0, 1):\n                network = network.rpartition(\".\")[0]\n\n        \n        \n        \n        if mask == 16:\n            for seq1 in range(0, max_range[mask]):\n                for seq2 in range(0, max_range[mask]):\n                    ipaddr = \"{0}.{1}.{2}\".format(network, seq1, seq2)\n                    thd = threading.Thread(\n                        target=self.__raw_scan, args=(ipaddr, timeout)\n                    )\n                    thd.start()\n        else:\n            for seq1 in range(0, max_range[mask]):\n                ipaddr = \"{0}.{1}\".format(network, seq1)\n                thd = threading.Thread(\n                    target=self.__raw_scan, args=(ipaddr, timeout)\n                )\n                thd.start()\n\n        return self.amcrest_ips", "docstring": "Scan cameras in a range of ips\n\nParams:\nsubnet - subnet, i.e: 192.168.1.0/24\nif mask not used, assuming mask 24\n\ntimeout_sec - timeout in sec\n\nReturns:", "source": "juraj-google-style"}
{"code": "def add_value(self, field_name: str, value: object=None, json_path: str=None, json_path_extraction: str=None, keep_empty: bool=False) -> None:\n\n    def validate(v):\n        if (v is not None):\n            if isinstance(v, str):\n                if ((v.strip() != '') or keep_empty):\n                    return True\n                else:\n                    return False\n            else:\n                return True\n        return False\n    self.validate_field(field_name)\n    if (field_name not in self._kg):\n        self._kg[field_name] = []\n    if json_path:\n        self._add_doc_value(field_name, json_path)\n    if validate(value):\n        if (not isinstance(value, list)):\n            value = [value]\n        all_valid = True\n        invalid = []\n        for a_value in value:\n            if isinstance(a_value, Extraction):\n                valid = self._add_single_value(field_name, a_value.value, provenance_path=str(json_path_extraction), keep_empty=keep_empty)\n            elif isinstance(a_value, Segment):\n                valid = self._add_single_value(field_name, a_value.value, provenance_path=a_value.json_path, keep_empty=keep_empty)\n            else:\n                valid = self._add_single_value(field_name, a_value, provenance_path=json_path_extraction, reference_type='constant', keep_empty=keep_empty)\n            all_valid = (all_valid and valid)\n            if (not valid):\n                invalid.append(((field_name + ':') + str(a_value)))\n        if (not all_valid):\n            print(('Some kg value type invalid according to schema:' + json.dumps(invalid)))\n    if (len(self._kg[field_name]) == 0):\n        self._kg.pop(field_name)", "docstring": "Add a value to knowledge graph.\nInput can either be a value or a json_path. If the input is json_path, the helper function _add_doc_value is\ncalled.\nIf the input is a value, then it is handled\n\nArgs:\nfield_name: str, the field name in the knowledge graph\nvalue: the value to be added to the knowledge graph\njson_path: str, if json_path is provided, then get the value at this path in the doc\njson_path_extraction: str,\ndiscard_empty: bool,\nReturns:", "source": "codesearchnet"}
{"code": "def __init__(self, command_sequence, sess, dump_root=None):\n    local_cli_wrapper.LocalCLIDebugWrapperSession.__init__(self, sess, dump_root=dump_root)\n    self._command_sequence = command_sequence\n    self._command_pointer = 0\n    self.observers = {'debug_dumps': [], 'tf_errors': [], 'run_start_cli_run_numbers': [], 'run_end_cli_run_numbers': [], 'print_feed_responses': [], 'profiler_py_graphs': [], 'profiler_run_metadata': []}", "docstring": "Constructor of the for-test subclass.\n\nArgs:\ncommand_sequence: (list of list of str) A list of command arguments,\nincluding the command prefix, each element of the list is such as:\n[\"run\", \"-n\"],\n[\"print_feed\", \"input:0\"].\nsess: See the doc string of LocalCLIDebugWrapperSession.__init__.\ndump_root: See the doc string of LocalCLIDebugWrapperSession.__init__.", "source": "github-repos"}
{"code": "def token_network_connect(self, registry_address: PaymentNetworkID, token_address: TokenAddress, funds: TokenAmount, initial_channel_target: int=3, joinable_funds_target: float=0.4) -> None:\n    if (not is_binary_address(registry_address)):\n        raise InvalidAddress('registry_address must be a valid address in binary')\n    if (not is_binary_address(token_address)):\n        raise InvalidAddress('token_address must be a valid address in binary')\n    token_network_identifier = views.get_token_network_identifier_by_token_address(chain_state=views.state_from_raiden(self.raiden), payment_network_id=registry_address, token_address=token_address)\n    connection_manager = self.raiden.connection_manager_for_token_network(token_network_identifier)\n    (has_enough_reserve, estimated_required_reserve) = has_enough_gas_reserve(raiden=self.raiden, channels_to_open=initial_channel_target)\n    if (not has_enough_reserve):\n        raise InsufficientGasReserve(f'The account balance is below the estimated amount necessary to finish the lifecycles of all active channels. A balance of at least {estimated_required_reserve} wei is required.')\n    connection_manager.connect(funds=funds, initial_channel_target=initial_channel_target, joinable_funds_target=joinable_funds_target)", "docstring": "Automatically maintain channels open for the given token network.\n\nArgs:\ntoken_address: the ERC20 token network to connect to.\nfunds: the amount of funds that can be used by the ConnectionMananger.\ninitial_channel_target: number of channels to open proactively.\njoinable_funds_target: fraction of the funds that will be used to join\nchannels opened by other participants.", "source": "codesearchnet"}
{"code": "def tdist95conf_level(df):\n    \n    df = int(round(df))\n    highest_table_df = len(_T_DIST_95_CONF_LEVELS)\n    if df >= 200:\n        return 1.960\n    if df >= 100:\n        return 1.984\n    if df >= 80:\n        return 1.990\n    if df >= 60:\n        return 2.000\n    if df >= 50:\n        return 2.009\n    if df >= 40:\n        return 2.021\n    if df >= highest_table_df:\n        return _T_DIST_95_CONF_LEVELS[highest_table_df - 1]\n    return _T_DIST_95_CONF_LEVELS[df]", "docstring": "Approximate the 95% confidence interval for Student's T distribution.\n\nGiven the degrees of freedom, returns an approximation to the 95%\nconfidence interval for the Student's T distribution.\n\nArgs:\ndf: An integer, the number of degrees of freedom.\n\nReturns:\nA float.", "source": "juraj-google-style"}
{"code": "def download_decompress(url: str, download_path: [Path, str], extract_paths=None):\n    file_name = Path(urlparse(url).path).name\n    download_path = Path(download_path)\n    if (extract_paths is None):\n        extract_paths = [download_path]\n    elif isinstance(extract_paths, list):\n        extract_paths = [Path(path) for path in extract_paths]\n    else:\n        extract_paths = [Path(extract_paths)]\n    cache_dir = os.getenv('DP_CACHE_DIR')\n    extracted = False\n    if cache_dir:\n        cache_dir = Path(cache_dir)\n        url_hash = md5(url.encode('utf8')).hexdigest()[:15]\n        arch_file_path = (cache_dir / url_hash)\n        extracted_path = (cache_dir / (url_hash + '_extracted'))\n        extracted = extracted_path.exists()\n        if ((not extracted) and (not arch_file_path.exists())):\n            simple_download(url, arch_file_path)\n    else:\n        arch_file_path = (download_path / file_name)\n        simple_download(url, arch_file_path)\n        extracted_path = extract_paths.pop()\n    if (not extracted):\n        log.info('Extracting {} archive into {}'.format(arch_file_path, extracted_path))\n        extracted_path.mkdir(parents=True, exist_ok=True)\n        if file_name.endswith('.tar.gz'):\n            untar(arch_file_path, extracted_path)\n        elif file_name.endswith('.gz'):\n            ungzip(arch_file_path, (extracted_path / Path(file_name).with_suffix('').name))\n        elif file_name.endswith('.zip'):\n            with zipfile.ZipFile(arch_file_path, 'r') as zip_ref:\n                zip_ref.extractall(extracted_path)\n        else:\n            raise RuntimeError(f'Trying to extract an unknown type of archive {file_name}')\n        if (not cache_dir):\n            arch_file_path.unlink()\n    for extract_path in extract_paths:\n        for src in extracted_path.iterdir():\n            dest = (extract_path / src.name)\n            if src.is_dir():\n                copytree(src, dest)\n            else:\n                extract_path.mkdir(parents=True, exist_ok=True)\n                shutil.copy(str(src), str(dest))", "docstring": "Download and extract .tar.gz or .gz file to one or several target locations.\nThe archive is deleted if extraction was successful.\n\nArgs:\nurl: URL for file downloading\ndownload_path: path to the directory where downloaded file will be stored\nuntil the end of extraction\nextract_paths: path or list of paths where contents of archive will be extracted", "source": "codesearchnet"}
{"code": "def connection_made(self, transport):\n    self.transport = transport\n    self.transport.sendto(self.message)\n    self.transport.close()", "docstring": "Create connection, use to send message and close.\n\nArgs:\ntransport (asyncio.DatagramTransport): Transport used for sending.", "source": "codesearchnet"}
{"code": "def _DefaultValueConstructorForField(field):\n  \n\n  if _IsMapField(field):\n    return _GetInitializeDefaultForMap(field)\n\n  if field.label == _FieldDescriptor.LABEL_REPEATED:\n    if field.has_default_value and field.default_value != []:\n      raise ValueError('Repeated field default value not empty list: %s' % (\n          field.default_value))\n    if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:\n      \n      \n      message_type = field.message_type\n      def MakeRepeatedMessageDefault(message):\n        return containers.RepeatedCompositeFieldContainer(\n            message._listener_for_children, field.message_type)\n      return MakeRepeatedMessageDefault\n    else:\n      type_checker = type_checkers.GetTypeChecker(field)\n      def MakeRepeatedScalarDefault(message):\n        return containers.RepeatedScalarFieldContainer(\n            message._listener_for_children, type_checker)\n      return MakeRepeatedScalarDefault\n\n  if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:\n    \n    message_type = field.message_type\n    def MakeSubMessageDefault(message):\n      result = message_type._concrete_class()\n      result._SetListener(\n          _OneofListener(message, field)\n          if field.containing_oneof is not None\n          else message._listener_for_children)\n      return result\n    return MakeSubMessageDefault\n\n  def MakeScalarDefault(message):\n    \n    \n    return field.default_value\n  return MakeScalarDefault", "docstring": "Returns a function which returns a default value for a field.\n\nArgs:\nfield: FieldDescriptor object for this field.\n\nThe returned function has one argument:\nmessage: Message instance containing this field, or a weakref proxy\nof same.\n\nThat function in turn returns a default value for this field.  The default\nvalue may refer back to |message| via a weak reference.", "source": "juraj-google-style"}
{"code": "def allow_nan_stats(self):\n    return self._allow_nan_stats", "docstring": "Python `bool` describing behavior when a stat is undefined.\n\nStats return +/- infinity when it makes sense. E.g., the variance of a\nCauchy distribution is infinity. However, sometimes the statistic is\nundefined, e.g., if a distribution's pdf does not achieve a maximum within\nthe support of the distribution, the mode is undefined. If the mean is\nundefined, then by definition the variance is undefined. E.g. the mean for\nStudent's T for df = 1 is undefined (no clear way to say it is either + or -\ninfinity), so the variance = E[(X - mean)**2] is also undefined.\n\nReturns:\nallow_nan_stats: Python `bool`.", "source": "github-repos"}
{"code": "def is_point(self):\n        \n\n        if self.childCount() == 2:\n                if self.child(0).valid_values == float and self.child(1).valid_values == float:\n                    return True\n        else:\n            return False", "docstring": "figures out if item is a point, that is if it has two subelements of type float\nArgs:\nself:\n\nReturns: if item is a point (True) or not (False)", "source": "juraj-google-style"}
{"code": "def _flatten_obs(self, obs_dict, verbose=False):\n    ob_lst = []\n    for key in obs_dict:\n        if (key in self.keys):\n            if verbose:\n                print('adding key: {}'.format(key))\n            ob_lst.append(obs_dict[key])\n    return np.concatenate(ob_lst)", "docstring": "Filters keys of interest out and concatenate the information.\n\nArgs:\nobs_dict: ordered dictionary of observations", "source": "codesearchnet"}
{"code": "def __init__(self, options):\n        \n\n        self.__options = options\n        self.count_total = 0\n        self.items_queued = OrderedDict()\n        self.items_in_progress = OrderedDict()\n        self.items_finished = OrderedDict()\n        self.items_cancelled = OrderedDict()\n        self.items_errored = OrderedDict()", "docstring": "Constructs a Queue instance.\n\nArgs:\noptions (:class:`nyawc.Options`): The options to use.", "source": "juraj-google-style"}
{"code": "def _ip_assigned(self):\n    output = []\n    cmd = ['/sbin/ip', 'address', 'show', 'dev', self.config['interface'], 'to', self.ip_with_prefixlen]\n    if self.ip_check_disabled:\n        self.log.info('checking for IP assignment on interface %s is disabled', self.config['interface'])\n        return True\n    self.log.debug('running %s', ' '.join(cmd))\n    try:\n        output = subprocess.check_output(cmd, universal_newlines=True, timeout=1)\n    except subprocess.CalledProcessError as error:\n        self.log.error('error checking IP-PREFIX %s: %s', cmd, error.output)\n        return True\n    except subprocess.TimeoutExpired:\n        self.log.error('timeout running %s', ' '.join(cmd))\n        return True\n    except ValueError as error:\n        self.log.error('running %s raised ValueError exception:%s', ' '.join(cmd), error)\n        return True\n    else:\n        if (self.ip_with_prefixlen in output):\n            msg = '{i} assigned to loopback interface'.format(i=self.ip_with_prefixlen)\n            self.log.debug(msg)\n            return True\n        else:\n            msg = \"{i} isn't assigned to {d} interface\".format(i=self.ip_with_prefixlen, d=self.config['interface'])\n            self.log.warning(msg)\n            return False\n    self.log.debug(\"I shouldn't land here!, it is a BUG\")\n    return False", "docstring": "Check if IP prefix is assigned to loopback interface.\n\nReturns:\nTrue if IP prefix found assigned otherwise False.", "source": "codesearchnet"}
{"code": "def from_value(cls, value):\n    return cls(value.shape, dtype=value.dtype, trainable=value.trainable)", "docstring": "Creates a `VariableSpec` from the given `Variable`.\n\n`value`'s shape, dtype, and trainable attributes will be used to create\nthe new `VariableSpec`.\n\nExample:\n\n>>> v = tf.Variable([1., 2., 3.])\n>>> VariableSpec.from_value(v)\nVariableSpec(shape=(3,), dtype=tf.float32, trainable=True, alias_id=None)\n\nArgs:\nvalue: A Variable.\n\nReturns:\nA `VariableSpec` created from `value`.", "source": "github-repos"}
{"code": "def _new_import(self, import_name):\n        \n\n        \n        assert self.root_path is not None, \\\n            '\"import\" statement can not be used if meta-model is ' \\\n            'loaded from string.'\n\n        \n        \n        current_namespace = self._namespace_stack[-1]\n        if '.' in current_namespace:\n            root_namespace = current_namespace.rsplit('.', 1)[0]\n            import_name = \"%s.%s\" % (root_namespace, import_name)\n\n        import_file_name = \"%s.tx\" % os.path.join(self.root_path,\n                                                  *import_name.split(\".\"))\n\n        if import_name not in self.namespaces:\n            self._enter_namespace(import_name)\n            if self.debug:\n                self.dprint(\"*** IMPORTING FILE: %s\" % import_file_name)\n            metamodel_from_file(import_file_name, metamodel=self)\n            self._leave_namespace()\n\n        \n        \n        \n        self._imported_namespaces[current_namespace].append(\n            self.namespaces[import_name])", "docstring": "Starts a new import.\nArgs:\nimport_name(str): A relative import in the dot syntax\n(e.g. \"first.second.expressions\")", "source": "juraj-google-style"}
{"code": "def delete_datastore(self):\n    (success, result) = self._read_from_hdx('datastore', self.data['id'], 'resource_id', self.actions()['datastore_delete'], force=True)\n    if (not success):\n        logger.debug(result)", "docstring": "Delete a resource from the HDX datastore\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def __init__(self, scopes=None, service_account_name='default', **kwds):\n        \n        \n        \n        \n        \n        \n        self.__service_account_name = service_account_name\n        cached_scopes = None\n        cache_filename = kwds.get('cache_filename')\n        if cache_filename:\n            cached_scopes = self._CheckCacheFileForMatch(\n                cache_filename, scopes)\n\n        scopes = cached_scopes or self._ScopesFromMetadataServer(scopes)\n\n        if cache_filename and not cached_scopes:\n            self._WriteCacheFile(cache_filename, scopes)\n\n        \n        \n        \n        \n        with warnings.catch_warnings():\n            warnings.simplefilter('ignore')\n            super(GceAssertionCredentials, self).__init__(scope=scopes, **kwds)", "docstring": "Initializes the credentials instance.\n\nArgs:\nscopes: The scopes to get. If None, whatever scopes that are\navailable to the instance are used.\nservice_account_name: The service account to retrieve the scopes\nfrom.\n**kwds: Additional keyword args.", "source": "juraj-google-style"}
{"code": "def evaluate(self, instance, step, extra):\n        \n        \n        chain = step.chain[1:]\n        if self.strict and not chain:\n            raise TypeError(\n                \"A ContainerAttribute in 'strict' mode can only be used \"\n                \"within a SubFactory.\")\n\n        return self.function(instance, chain)", "docstring": "Evaluate the current ContainerAttribute.\n\nArgs:\nobj (LazyStub): a lazy stub of the object being constructed, if\nneeded.\ncontainers (list of LazyStub): a list of lazy stubs of factories\nbeing evaluated in a chain, each item being a future field of\nnext one.", "source": "juraj-google-style"}
{"code": "def decrease_exponent_to(self, new_exp):\n    if (new_exp > self.exponent):\n        raise ValueError(('New exponent %i should be more negative thanold exponent %i' % (new_exp, self.exponent)))\n    factor = pow(self.BASE, (self.exponent - new_exp))\n    new_enc = ((self.encoding * factor) % self.public_key.n)\n    return self.__class__(self.public_key, new_enc, new_exp)", "docstring": "Return an `EncodedNumber` with same value but lower exponent.\n\nIf we multiply the encoded value by :attr:`BASE` and decrement\n:attr:`exponent`, then the decoded value does not change. Thus\nwe can almost arbitrarily ratchet down the exponent of an\n:class:`EncodedNumber` - we only run into trouble when the encoded\ninteger overflows. There may not be a warning if this happens.\n\nThis is necessary when adding :class:`EncodedNumber` instances,\nand can also be useful to hide information about the precision\nof numbers - e.g. a protocol can fix the exponent of all\ntransmitted :class:`EncodedNumber` to some lower bound(s).\n\nArgs:\nnew_exp (int): the desired exponent.\n\nReturns:\nEncodedNumber: Instance with the same value and desired\nexponent.\n\nRaises:\nValueError: You tried to increase the exponent, which can't be\ndone without decryption.", "source": "codesearchnet"}
{"code": "def num_samples(self, dataset_split):\n    \n    return {\n        problem.DatasetSplit.TRAIN: 1000000,\n        problem.DatasetSplit.EVAL: 10000,\n        problem.DatasetSplit.TEST: 10000\n    }[dataset_split]", "docstring": "Determine the dataset sized given a dataset_split.\n\nArgs:\ndataset_split: A problem.DatasetSplit.\n\nReturns:\nThe desired number of samples for this dataset_split.", "source": "juraj-google-style"}
{"code": "def get_stream(self, error_callback=None, live=True):\n    self.join()\n    return Stream(self, error_callback=error_callback, live=live)", "docstring": "Get room stream to listen for messages.\n\nKwargs:\nerror_callback (func): Callback to call when an error occurred (parameters: exception)\nlive (bool): If True, issue a live stream, otherwise an offline stream\n\nReturns:\n:class:`Stream`. Stream", "source": "codesearchnet"}
{"code": "def get_filtered_vts_list(self, vts, vt_filter):\n    if (not vt_filter):\n        raise RequiredArgument('vt_filter: A valid filter is required.')\n    filters = self.parse_filters(vt_filter)\n    if (not filters):\n        return None\n    _vts_aux = vts.copy()\n    for (_element, _oper, _filter_val) in filters:\n        for vt_id in _vts_aux.copy():\n            if (not _vts_aux[vt_id].get(_element)):\n                _vts_aux.pop(vt_id)\n                continue\n            _elem_val = _vts_aux[vt_id].get(_element)\n            _val = self.format_filter_value(_element, _elem_val)\n            if self.filter_operator[_oper](_val, _filter_val):\n                continue\n            else:\n                _vts_aux.pop(vt_id)\n    return _vts_aux", "docstring": "Gets a collection of vulnerability test from the vts dictionary,\nwhich match the filter.\n\nArguments:\nvt_filter (string): Filter to apply to the vts collection.\nvts (dictionary): The complete vts collection.\n\nReturns:\nDictionary with filtered vulnerability tests.", "source": "codesearchnet"}
{"code": "def get_go_server(settings=None):\n    if (not settings):\n        settings = get_settings()\n    return gocd.Server(settings.get('server'), user=settings.get('user'), password=settings.get('password'))", "docstring": "Returns a `gocd.Server` configured by the `settings`\nobject.\n\nArgs:\nsettings: a `gocd_cli.settings.Settings` object.\nDefault: if falsey calls `get_settings`.\n\nReturns:\ngocd.Server: a configured gocd.Server instance", "source": "codesearchnet"}
{"code": "def _GetConfigValue(self, config_parser, section_name, value_name):\n    \n    try:\n      return config_parser.get(section_name, value_name)\n    except configparser.NoOptionError:\n      return None", "docstring": "Retrieves a value from the config parser.\n\nArgs:\nconfig_parser (ConfigParser): configuration parser.\nsection_name (str): name of the section that contains the value.\nvalue_name (str): name of the value.\n\nReturns:\nobject: configuration value or None if the value does not exists.", "source": "juraj-google-style"}
{"code": "def vector(p1, p2):\n    \n    return np.subtract(p1[COLS.XYZ], p2[COLS.XYZ])", "docstring": "compute vector between two 3D points\n\nArgs:\np1, p2: indexable objects with\nindices 0, 1, 2 corresponding to 3D cartesian coordinates.\n\nReturns:\n3-vector from p1 - p2", "source": "juraj-google-style"}
{"code": "def is_alive(self) -> bool:\n    return self._thread.is_alive()", "docstring": "Returns whether the thread is alive.\n\nThis method returns True just before the run() method starts\nuntil just after the run() method terminates.\n\nReturns:\nTrue if the thread is alive, otherwise False.", "source": "github-repos"}
{"code": "def _render_objects(self, items, attributes=None, datatype='object'):\n    if (not items):\n        return\n    if (datatype == 'chartdata'):\n        if (not attributes):\n            attributes = [items['cols'][i]['label'] for i in range(0, len(items['cols']))]\n        items = items['rows']\n        indices = {attributes[i]: i for i in range(0, len(attributes))}\n    num_segments = len(self._segments)\n    self._segments.append('<table>')\n    first = True\n    for o in items:\n        if first:\n            first = False\n            if ((datatype == 'dict') and (not attributes)):\n                attributes = list(o.keys())\n            if (attributes is not None):\n                self._segments.append('<tr>')\n                for attr in attributes:\n                    self._segments.append(('<th>%s</th>' % attr))\n                self._segments.append('</tr>')\n        self._segments.append('<tr>')\n        if (attributes is None):\n            self._segments.append(('<td>%s</td>' % HtmlBuilder._format(o)))\n        else:\n            for attr in attributes:\n                if (datatype == 'dict'):\n                    self._segments.append(('<td>%s</td>' % HtmlBuilder._format(o.get(attr, None), nbsp=True)))\n                elif (datatype == 'chartdata'):\n                    self._segments.append(('<td>%s</td>' % HtmlBuilder._format(o['c'][indices[attr]]['v'], nbsp=True)))\n                else:\n                    self._segments.append(('<td>%s</td>' % HtmlBuilder._format(o.__getattribute__(attr), nbsp=True)))\n        self._segments.append('</tr>')\n    self._segments.append('</table>')\n    if first:\n        self._segments = self._segments[:num_segments]", "docstring": "Renders an HTML table with the specified list of objects.\n\nArgs:\nitems: the iterable collection of objects to render.\nattributes: the optional list of properties or keys to render.\ndatatype: the type of data; one of 'object' for Python objects, 'dict' for a list\nof dictionaries, or 'chartdata' for Google chart data.", "source": "codesearchnet"}
{"code": "def _send_offset_commit_request(self, offsets):\n        \n        assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'\n        assert all(map(lambda k: isinstance(k, TopicPartition), offsets))\n        assert all(map(lambda v: isinstance(v, OffsetAndMetadata),\n                       offsets.values()))\n        if not offsets:\n            log.debug('No offsets to commit')\n            return Future().success(None)\n\n        node_id = self.coordinator()\n        if node_id is None:\n            return Future().failure(Errors.GroupCoordinatorNotAvailableError)\n\n\n        \n        offset_data = collections.defaultdict(dict)\n        for tp, offset in six.iteritems(offsets):\n            offset_data[tp.topic][tp.partition] = offset\n\n        if self._subscription.partitions_auto_assigned():\n            generation = self.generation()\n        else:\n            generation = Generation.NO_GENERATION\n\n        \n        \n        \n        if self.config['api_version'] >= (0, 9) and generation is None:\n            return Future().failure(Errors.CommitFailedError())\n\n        if self.config['api_version'] >= (0, 9):\n            request = OffsetCommitRequest[2](\n                self.group_id,\n                generation.generation_id,\n                generation.member_id,\n                OffsetCommitRequest[2].DEFAULT_RETENTION_TIME,\n                [(\n                    topic, [(\n                        partition,\n                        offset.offset,\n                        offset.metadata\n                    ) for partition, offset in six.iteritems(partitions)]\n                ) for topic, partitions in six.iteritems(offset_data)]\n            )\n        elif self.config['api_version'] >= (0, 8, 2):\n            request = OffsetCommitRequest[1](\n                self.group_id, -1, '',\n                [(\n                    topic, [(\n                        partition,\n                        offset.offset,\n                        -1,\n                        offset.metadata\n                    ) for partition, offset in six.iteritems(partitions)]\n                ) for topic, partitions in six.iteritems(offset_data)]\n            )\n        elif self.config['api_version'] >= (0, 8, 1):\n            request = OffsetCommitRequest[0](\n                self.group_id,\n                [(\n                    topic, [(\n                        partition,\n                        offset.offset,\n                        offset.metadata\n                    ) for partition, offset in six.iteritems(partitions)]\n                ) for topic, partitions in six.iteritems(offset_data)]\n            )\n\n        log.debug(\"Sending offset-commit request with %s for group %s to %s\",\n                  offsets, self.group_id, node_id)\n\n        future = Future()\n        _f = self._client.send(node_id, request)\n        _f.add_callback(self._handle_offset_commit_response, offsets, future, time.time())\n        _f.add_errback(self._failed_request, node_id, request, future)\n        return future", "docstring": "Commit offsets for the specified list of topics and partitions.\n\nThis is a non-blocking call which returns a request future that can be\npolled in the case of a synchronous commit or ignored in the\nasynchronous case.\n\nArguments:\noffsets (dict of {TopicPartition: OffsetAndMetadata}): what should\nbe committed\n\nReturns:\nFuture: indicating whether the commit was successful or not", "source": "juraj-google-style"}
{"code": "def transition_scope(self,\n        state: Sequence[tf.Tensor],\n        action: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]:\n        \n        scope = {}\n        scope.update(self.non_fluents_scope())\n        scope.update(self.state_scope(state))\n        scope.update(self.action_scope(action))\n        return scope", "docstring": "Returns the complete transition fluent scope\nfor the current `state` and `action` fluents.\n\nArgs:\nstate (Sequence[tf.Tensor]): The current state fluents.\naction (Sequence[tf.Tensor]): The action fluents.\n\nReturns:\nA mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.", "source": "juraj-google-style"}
{"code": "def command(self, server_id, command, *args):\n        \n        server = self._storage[server_id]\n        try:\n            if args:\n                result = getattr(server, command)(*args)\n            else:\n                result = getattr(server, command)()\n        except AttributeError:\n            raise ValueError(\"Cannot issue the command %r to server %s\"\n                             % (command, server_id))\n        self._storage[server_id] = server\n        return result", "docstring": "run command\nArgs:\nserver_id - server identity\ncommand - command which apply to server", "source": "juraj-google-style"}
{"code": "def __nonzero__(self):\n    self._disallow_bool_casting()", "docstring": "Dummy method to prevent a tensor from being used as a Python `bool`.\n\nThis is the Python 2.x counterpart to `__bool__()` above.\n\nRaises:\n`TypeError`.", "source": "github-repos"}
{"code": "def prettyprint_cfg_node(node, decorate_after_node=0, full=False):\n    if node.id <= decorate_after_node:\n        return repr(node) + f' [{len(node.bindings)} bindings]'\n    if full:\n        name = lambda x: getattr(x, 'name', str(x))\n    else:\n        name = str\n    bindings = collections.defaultdict(list)\n    for b in node.bindings:\n        bindings[b.variable.id].append(name(b.data))\n    b = ', '.join(['%d:%s' % (k, '|'.join(v)) for k, v in sorted(bindings.items())])\n    return repr(node) + ' [' + b + ']'", "docstring": "A reasonably compact representation of all the bindings at a node.\n\nArgs:\nnode: The node to prettyprint.\ndecorate_after_node: Don't print bindings unless node_id > this.\nfull: Print the full string representation of a binding's data\n\nReturns:\nA prettyprinted node.", "source": "github-repos"}
{"code": "def _read_output(self, stream, callback, output_file):\n    if (((callback is None) and (output_file is None)) or stream.closed):\n        return False\n    line = stream.readline()\n    if line:\n        if (callback is not None):\n            callback(line.decode(), self._data, self._store, self._signal, self._context)\n        if (output_file is not None):\n            output_file.write(line)\n        return True\n    else:\n        return False", "docstring": "Read the output of the process, executed the callback and save the output.\n\nArgs:\nstream: A file object pointing to the output stream that should be read.\ncallback(callable, None): A callback function that is called for each new\nline of output.\noutput_file: A file object to which the full output is written.\n\nReturns:\nbool: True if a line was read from the output, otherwise False.", "source": "codesearchnet"}
{"code": "def merge_nodes(self, n1: str, n2: str, same_polarity: bool = True):\n        \n\n        for p in self.predecessors(n1):\n            for st in self[p][n1][\"InfluenceStatements\"]:\n                if not same_polarity:\n                    st.obj_delta[\"polarity\"] = -st.obj_delta[\"polarity\"]\n                st.obj.db_refs[\"UN\"][0] = (n2, st.obj.db_refs[\"UN\"][0][1])\n\n            if not self.has_edge(p, n2):\n                self.add_edge(p, n2)\n                self[p][n2][\"InfluenceStatements\"] = self[p][n1][\n                    \"InfluenceStatements\"\n                ]\n\n            else:\n                self[p][n2][\"InfluenceStatements\"] += self[p][n1][\n                    \"InfluenceStatements\"\n                ]\n\n        for s in self.successors(n1):\n            for st in self.edges[n1, s][\"InfluenceStatements\"]:\n                if not same_polarity:\n                    st.subj_delta[\"polarity\"] = -st.subj_delta[\"polarity\"]\n                st.subj.db_refs[\"UN\"][0] = (n2, st.subj.db_refs[\"UN\"][0][1])\n\n            if not self.has_edge(n2, s):\n                self.add_edge(n2, s)\n                self[n2][s][\"InfluenceStatements\"] = self[n1][s][\n                    \"InfluenceStatements\"\n                ]\n            else:\n                self[n2][s][\"InfluenceStatements\"] += self[n1][s][\n                    \"InfluenceStatements\"\n                ]\n\n        self.remove_node(n1)", "docstring": "Merge node n1 into node n2, with the option to specify relative\npolarity.\n\nArgs:\nn1\nn2\nsame_polarity", "source": "juraj-google-style"}
{"code": "def unpack_wstring(self, offset, length):\n        \n        start = self._offset + offset\n        end = self._offset + offset + 2 * length\n        try:\n            return bytes(self._buf[start:end]).decode(\"utf16\")\n        except AttributeError:  \n            return bytes(self._buf[start:end]).decode('utf16')", "docstring": "Returns a string from the relative offset with the given length,\nwhere each character is a wchar (2 bytes)\nArguments:\n- `offset`: The relative offset from the start of the block.\n- `length`: The length of the string.\nThrows:\n- `UnicodeDecodeError`", "source": "juraj-google-style"}
{"code": "def get_policy(self, name):\n    address = _create_policy_address(name)\n    policy_list_bytes = None\n    try:\n        policy_list_bytes = self._state_view.get(address=address)\n    except KeyError:\n        return None\n    if (policy_list_bytes is not None):\n        policy_list = _create_from_bytes(policy_list_bytes, identity_pb2.PolicyList)\n        for policy in policy_list.policies:\n            if (policy.name == name):\n                return policy\n    return None", "docstring": "Get a single Policy by name.\n\nArgs:\nname (str): The name of the Policy.\n\nReturns:\n(:obj:`Policy`) The Policy that matches the name.", "source": "codesearchnet"}
{"code": "def Deserialize(self, reader):\n        \n        self.Type = reader.ReadByte()\n        self.Hashes = reader.ReadHashes()", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def extract_sub_graph(graph_def, dest_nodes):\n    if not isinstance(graph_def, graph_pb2.GraphDef):\n        raise TypeError(f'graph_def must be a graph_pb2.GraphDef proto, but got type {type(graph_def)}.')\n    if isinstance(dest_nodes, str):\n        raise TypeError(f'dest_nodes must be an iterable of strings, but got type {type(dest_nodes)}.')\n    name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(graph_def)\n    _assert_nodes_are_present(name_to_node, dest_nodes)\n    nodes_to_keep = _bfs_for_reachable_nodes(dest_nodes, name_to_input_name)\n    nodes_to_keep_list = sorted(list(nodes_to_keep), key=lambda n: name_to_seq_num[n])\n    out = graph_pb2.GraphDef()\n    for n in nodes_to_keep_list:\n        out.node.extend([copy.deepcopy(name_to_node[n])])\n    out.library.CopyFrom(graph_def.library)\n    out.versions.CopyFrom(graph_def.versions)\n    return out", "docstring": "Extract the subgraph that can reach any of the nodes in 'dest_nodes'.\n\nArgs:\ngraph_def: A graph_pb2.GraphDef proto.\ndest_nodes: An iterable of strings specifying the destination node names.\nReturns:\nThe GraphDef of the sub-graph.\n\nRaises:\nTypeError: If 'graph_def' is not a graph_pb2.GraphDef proto.", "source": "github-repos"}
{"code": "def get(object_ids):\n    \n    if isinstance(object_ids, (tuple, np.ndarray)):\n        return ray.get(list(object_ids))\n    elif isinstance(object_ids, dict):\n        keys_to_get = [\n            k for k, v in object_ids.items() if isinstance(v, ray.ObjectID)\n        ]\n        ids_to_get = [\n            v for k, v in object_ids.items() if isinstance(v, ray.ObjectID)\n        ]\n        values = ray.get(ids_to_get)\n\n        result = object_ids.copy()\n        for key, value in zip(keys_to_get, values):\n            result[key] = value\n        return result\n    else:\n        return ray.get(object_ids)", "docstring": "Get a single or a collection of remote objects from the object store.\n\nThis method is identical to `ray.get` except it adds support for tuples,\nndarrays and dictionaries.\n\nArgs:\nobject_ids: Object ID of the object to get, a list, tuple, ndarray of\nobject IDs to get or a dict of {key: object ID}.\n\nReturns:\nA Python object, a list of Python objects or a dict of {key: object}.", "source": "juraj-google-style"}
{"code": "def execute(self, container: Container, test: TestCase, verbose: bool=False) -> TestOutcome:\n    bug = self.__installation.bugs[container.bug]\n    response = self.command(container, cmd=test.command, context=test.context, stderr=True, time_limit=test.time_limit, kill_after=test.kill_after, verbose=verbose)\n    passed = test.oracle.check(response)\n    return TestOutcome(response, passed)", "docstring": "Runs a specified test inside a given container.\n\nReturns:\nthe outcome of the test execution.", "source": "codesearchnet"}
{"code": "def _expand_variable_match(positional_vars, named_vars, match):\n    positional = match.group('positional')\n    name = match.group('name')\n    if (name is not None):\n        try:\n            return six.text_type(named_vars[name])\n        except KeyError:\n            raise ValueError(\"Named variable '{}' not specified and needed by template `{}` at position {}\".format(name, match.string, match.start()))\n    elif (positional is not None):\n        try:\n            return six.text_type(positional_vars.pop(0))\n        except IndexError:\n            raise ValueError('Positional variable not specified and needed by template `{}` at position {}'.format(match.string, match.start()))\n    else:\n        raise ValueError('Unknown template expression {}'.format(match.group(0)))", "docstring": "Expand a matched variable with its value.\n\nArgs:\npositional_vars (list): A list of positonal variables. This list will\nbe modified.\nnamed_vars (dict): A dictionary of named variables.\nmatch (re.Match): A regular expression match.\n\nReturns:\nstr: The expanded variable to replace the match.\n\nRaises:\nValueError: If a positional or named variable is required by the\ntemplate but not specified or if an unexpected template expression\nis encountered.", "source": "codesearchnet"}
{"code": "def to_df(self, **kwargs):\n        \n\n        return pd.read_sql(sql=self.statement, con=self.session.bind, **kwargs)", "docstring": "[pandas.read_sql]\n\nArguments:\nQuery {[type]} -- [description]\n\nReturns:\n[pd.DataFrame or generate] -- [description]", "source": "juraj-google-style"}
{"code": "def start_prompt(self, message, text_input=False, cli_color=''):\n    \n    with self._cond:\n      if self._prompt:\n        raise MultiplePromptsError\n      prompt_id = uuid.uuid4().hex\n      _LOG.debug('Displaying prompt (%s): \"%s\"%s', prompt_id, message,\n                 ', Expects text input.' if text_input else '')\n\n      self._response = None\n      self._prompt = Prompt(\n          id=prompt_id, message=message, text_input=text_input)\n      if sys.stdin.isatty():\n        self._console_prompt = ConsolePrompt(\n            message, functools.partial(self.respond, prompt_id), cli_color)\n        self._console_prompt.start()\n\n      self.notify_update()\n      return prompt_id", "docstring": "Display a prompt.\n\nArgs:\nmessage: A string to be presented to the user.\ntext_input: A boolean indicating whether the user must respond with text.\ncli_color: An ANSI color code, or the empty string.\n\nRaises:\nMultiplePromptsError: There was already an existing prompt.\n\nReturns:\nA string uniquely identifying the prompt.", "source": "juraj-google-style"}
{"code": "def get_keys(self, alias_name, key_format):\n        \n        uri = self.URI + \"/keys/\" + alias_name + \"?format=\" + key_format\n        return self._client.get(uri)", "docstring": "Retrieves the contents of PKCS12 file in the format specified.\nThis PKCS12 formatted file contains both the certificate as well as the key file data.\nValid key formats are Base64 and PKCS12.\n\nArgs:\nalias_name: Key pair associated with the RabbitMQ\nkey_format: Valid key formats are Base64 and PKCS12.\nReturns:\ndict: RabbitMQ certificate", "source": "juraj-google-style"}
{"code": "def torque_off(self):\n        \n        data = []\n        data.append(0x0A)\n        data.append(self.servoid)\n        data.append(RAM_WRITE_REQ)\n        data.append(TORQUE_CONTROL_RAM)\n        data.append(0x01)\n        data.append(0x00)\n        send_data(data)", "docstring": "Set the torques of Herkulex to zero\n\nIn this mode, position control and velocity control\nwill not work, enable torque before that. Also the\nservo shaft is freely movable\n\nArgs:\nnone", "source": "juraj-google-style"}
{"code": "def _update_sample_weight_modes(self, sample_weights=None):\n    if not self._is_compiled:\n        return\n    if sample_weights and any((s is not None for s in sample_weights)):\n        for endpoint in self._training_endpoints:\n            endpoint.sample_weight_mode = endpoint.sample_weight_mode or 'samplewise'\n    else:\n        for endpoint in self._training_endpoints:\n            endpoint.sample_weight_mode = None", "docstring": "Updates sample weight modes based on training/eval inputs.\n\nSample weight placeholders will be created for all or no outputs\nbased on whether sample_weight is provided for any output.\n\nIf model contains `_sample_weight_modes` we check if the input\n`sample_weights` corresponds to the sample weight modes.\n1. Set sample weight mode to be 'temporal' for output i, if `compile`\nsample_weight_mode was set to `temporal` and sample weight inputs\nare given for one or more outputs.\n2. Set sample weight mode to be 'samplewise' for output i, if `compile`\nsample_weight_mode was not set and sample weight inputs are given for\none or more outputs.\n3. Reset sample weight mode to None for output i if sample weight mode\nwas set but there is no sample weight input.\n\nArgs:\nsample_weights: List of sample weights of the same length as model outputs\nor None.", "source": "github-repos"}
{"code": "def noisy_wrap(__func: Callable) -> Callable:\n    \n    \n    def wrapper(*args, **kwargs):\n        DebugPrint.enable()\n        try:\n            __func(*args, **kwargs)\n        finally:\n            DebugPrint.disable()\n    return wrapper", "docstring": "Decorator to enable DebugPrint for a given function.\n\nArgs:\n__func: Function to wrap\nReturns:\nWrapped function", "source": "juraj-google-style"}
{"code": "def get_mask_from_raster(rasterfile, outmaskfile, keep_nodata=False):\n    raster_r = RasterUtilClass.read_raster(rasterfile)\n    xsize = raster_r.nCols\n    ysize = raster_r.nRows\n    nodata_value = raster_r.noDataValue\n    srs = raster_r.srs\n    x_min = raster_r.xMin\n    y_max = raster_r.yMax\n    dx = raster_r.dx\n    data = raster_r.data\n    if (not keep_nodata):\n        i_min = (ysize - 1)\n        i_max = 0\n        j_min = (xsize - 1)\n        j_max = 0\n        for i in range(ysize):\n            for j in range(xsize):\n                if (abs((data[i][j] - nodata_value)) > DELTA):\n                    i_min = min(i, i_min)\n                    i_max = max(i, i_max)\n                    j_min = min(j, j_min)\n                    j_max = max(j, j_max)\n        y_size_mask = ((i_max - i_min) + 1)\n        x_size_mask = ((j_max - j_min) + 1)\n        x_min_mask = (x_min + (j_min * dx))\n        y_max_mask = (y_max - (i_min * dx))\n    else:\n        y_size_mask = ysize\n        x_size_mask = xsize\n        x_min_mask = x_min\n        y_max_mask = y_max\n        i_min = 0\n        j_min = 0\n    print(('%dx%d -> %dx%d' % (xsize, ysize, x_size_mask, y_size_mask)))\n    mask = numpy.zeros((y_size_mask, x_size_mask))\n    for i in range(y_size_mask):\n        for j in range(x_size_mask):\n            if (abs((data[(i + i_min)][(j + j_min)] - nodata_value)) > DELTA):\n                mask[i][j] = 1\n            else:\n                mask[i][j] = DEFAULT_NODATA\n    mask_geotrans = [x_min_mask, dx, 0, y_max_mask, 0, (- dx)]\n    RasterUtilClass.write_gtiff_file(outmaskfile, y_size_mask, x_size_mask, mask, mask_geotrans, srs, DEFAULT_NODATA, GDT_Int32)\n    return Raster(y_size_mask, x_size_mask, mask, DEFAULT_NODATA, mask_geotrans, srs)", "docstring": "Generate mask data from a given raster data.\n\nArgs:\nrasterfile: raster file path.\noutmaskfile: output mask file path.\n\nReturns:\nRaster object of mask data.", "source": "codesearchnet"}
{"code": "def delete(paths):\n    if isinstance(paths, str):\n        raise BeamIOError('Delete passed string argument instead of list: %s' % paths)\n    if len(paths) == 0:\n        return\n    filesystem = FileSystems.get_filesystem(paths[0])\n    return filesystem.delete(paths)", "docstring": "Deletes files or directories at the provided paths.\nDirectories will be deleted recursively.\n\nArgs:\npaths: list of paths that give the file objects to be deleted\n\nRaises:\n``BeamIOError``: if any of the delete operations fail", "source": "github-repos"}
{"code": "def just_load_srno(srno, prm_filename=None):\n    \n    from cellpy import dbreader, filefinder\n    print(\"just_load_srno: srno: %i\" % srno)\n\n    \n    \n    \n    \n    \n\n    print(\"just_load_srno: making class and setting prms\")\n    d = CellpyData()\n\n    \n    print()\n    print(\"just_load_srno: starting to load reader\")\n    \n    reader = dbreader.Reader()\n    print(\"------ok------\")\n\n    run_name = reader.get_cell_name(srno)\n    print(\"just_load_srno: run_name:\")\n    print(run_name)\n\n    m = reader.get_mass(srno)\n    print(\"just_load_srno: mass: %f\" % m)\n    print()\n\n    \n    print(\"just_load_srno: getting file_names\")\n    raw_files, cellpy_file = filefinder.search_for_files(run_name)\n    print(\"raw_files:\", raw_files)\n    print(\"cellpy_file:\", cellpy_file)\n\n    print(\"just_load_srno: running loadcell\")\n    d.loadcell(raw_files, cellpy_file, mass=m)\n    print(\"------ok------\")\n\n    \n    print(\"just_load_srno: getting step_numbers for charge\")\n    v = d.get_step_numbers(\"charge\")\n    print(v)\n\n    print()\n    print(\"just_load_srno: finding C-rates\")\n    d.find_C_rates(v, silent=False)\n\n    print()\n    print(\"just_load_srno: OK\")\n    return True", "docstring": "Simply load an dataset based on serial number (srno).\n\nThis convenience function reads a dataset based on a serial number. This\nserial number (srno) must then be defined in your database. It is mainly\nused to check that things are set up correctly.\n\nArgs:\nprm_filename: name of parameter file (optional).\nsrno (int): serial number\n\nExample:\n>>> srno = 918\n>>> just_load_srno(srno)\nsrno: 918\nread prms\n....", "source": "juraj-google-style"}
{"code": "def build_transcript(transcript_info, build='37'):\n    try:\n        transcript_id = transcript_info['ensembl_transcript_id']\n    except KeyError:\n        raise KeyError('Transcript has to have ensembl id')\n    build = build\n    is_primary = transcript_info.get('is_primary', False)\n    refseq_id = transcript_info.get('refseq_id')\n    refseq_identifiers = transcript_info.get('refseq_identifiers')\n    try:\n        chrom = transcript_info['chrom']\n    except KeyError:\n        raise KeyError('Transcript has to have a chromosome')\n    try:\n        start = int(transcript_info['transcript_start'])\n    except KeyError:\n        raise KeyError('Transcript has to have start')\n    except TypeError:\n        raise TypeError('Transcript start has to be integer')\n    try:\n        end = int(transcript_info['transcript_end'])\n    except KeyError:\n        raise KeyError('Transcript has to have end')\n    except TypeError:\n        raise TypeError('Transcript end has to be integer')\n    try:\n        hgnc_id = int(transcript_info['hgnc_id'])\n    except KeyError:\n        raise KeyError('Transcript has to have a hgnc id')\n    except TypeError:\n        raise TypeError('hgnc id has to be integer')\n    transcript_obj = HgncTranscript(transcript_id=transcript_id, hgnc_id=hgnc_id, chrom=chrom, start=start, end=end, is_primary=is_primary, refseq_id=refseq_id, refseq_identifiers=refseq_identifiers, build=build)\n    for key in list(transcript_obj):\n        if (transcript_obj[key] is None):\n            transcript_obj.pop(key)\n    return transcript_obj", "docstring": "Build a hgnc_transcript object\n\nArgs:\ntranscript_info(dict): Transcript information\n\nReturns:\ntranscript_obj(HgncTranscript)\n{\ntranscript_id: str, required\nhgnc_id: int, required\nbuild: str, required\nrefseq_id: str,\nchrom: str, required\nstart: int, required\nend: int, required\nis_primary: bool\n}", "source": "codesearchnet"}
{"code": "def _definition_from_example(example):\n    assert isinstance(example, dict)\n\n    def _has_simple_type(value):\n        accepted = (str, int, float, bool)\n        return isinstance(value, accepted)\n    definition = {'type': 'object', 'properties': {}}\n    for (key, value) in example.items():\n        if (not _has_simple_type(value)):\n            raise Exception('Not implemented yet')\n        ret_value = None\n        if isinstance(value, str):\n            ret_value = {'type': 'string'}\n        elif isinstance(value, int):\n            ret_value = {'type': 'integer', 'format': 'int64'}\n        elif isinstance(value, float):\n            ret_value = {'type': 'number', 'format': 'double'}\n        elif isinstance(value, bool):\n            ret_value = {'type': 'boolean'}\n        else:\n            raise Exception('Not implemented yet')\n        definition['properties'][key] = ret_value\n    return definition", "docstring": "Generates a swagger definition json from a given example\nWorks only for simple types in the dict\n\nArgs:\nexample: The example for which we want a definition\nType is DICT\n\nReturns:\nA dict that is the swagger definition json", "source": "codesearchnet"}
{"code": "def get_cross_attention_token_mask(input_ids: List[int], image_token_id: int) -> List[List[int]]:\n    image_token_locations = [i for i, token in enumerate(input_ids) if token == image_token_id]\n    if len(image_token_locations) == 0:\n        return []\n    if len(image_token_locations) == 1:\n        return [[image_token_locations[0], -1]]\n    vision_masks = [[loc1, loc2] for loc1, loc2 in zip(image_token_locations[:-1], image_token_locations[1:])]\n    vision_masks.append([image_token_locations[-1], len(input_ids)])\n    last_mask_end = vision_masks[-1][1]\n    for vision_mask in vision_masks[::-1]:\n        if vision_mask[0] == vision_mask[1] - 1:\n            vision_mask[1] = last_mask_end\n        last_mask_end = vision_mask[1]\n    return vision_masks", "docstring": "Generate a cross-attention token mask for image tokens in the input sequence.\n\nThis function identifies the positions of image tokens in the input sequence and creates\na mask that defines which subsequent tokens each image token should attend to.\n\nArgs:\ninput_ids (List[int]): A list of token ids representing the input sequence.\nimage_token_id (int): The id of the token used to represent images in the sequence.\n\nReturns:\nList[List[int]]: A list of [start, end] pairs, where each pair represents the range\nof tokens an image token should attend to.\n\nNotes:\n- If no image tokens are present, an empty list is returned.\n- For a single image token, it attends to all subsequent tokens until the end of the sequence.\n- For multiple image tokens, each attends to tokens up to the next image token or the end of the sequence.\n- Consecutive image tokens are treated as a group and attend to all subsequent tokens together.", "source": "github-repos"}
{"code": "def __init__(self, data_type=None):\n    \n    super(EventData, self).__init__()\n    self.data_type = data_type\n    self.offset = None\n    self.query = None", "docstring": "Initializes an event data attribute container.\n\nArgs:\ndata_type (Optional[str]): event data type indicator.", "source": "juraj-google-style"}
{"code": "def _ParseCredentialOptions(self, options):\n    \n    credentials = getattr(options, 'credentials', [])\n    if not isinstance(credentials, list):\n      raise errors.BadConfigOption('Unsupported credentials value.')\n\n    for credential_string in credentials:\n      credential_type, _, credential_data = credential_string.partition(':')\n      if not credential_type or not credential_data:\n        raise errors.BadConfigOption(\n            'Badly formatted credential: {0:s}.'.format(credential_string))\n\n      if credential_type not in self._SUPPORTED_CREDENTIAL_TYPES:\n        raise errors.BadConfigOption(\n            'Unsupported credential type for: {0:s}.'.format(\n                credential_string))\n\n      if credential_type in self._BINARY_DATA_CREDENTIAL_TYPES:\n        try:\n          credential_data = credential_data.decode('hex')\n        except TypeError:\n          raise errors.BadConfigOption(\n              'Unsupported credential data for: {0:s}.'.format(\n                  credential_string))\n\n      self._credentials.append((credential_type, credential_data))", "docstring": "Parses the credential options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "juraj-google-style"}
{"code": "def uniprot_ec(uniprot_id):\n    r = requests.post(('http:\n    ec = r.content.decode('utf-8').splitlines()[1]\n    if (len(ec) == 0):\n        ec = None\n    return ec", "docstring": "Retrieve the EC number annotation for a UniProt ID.\n\nArgs:\nuniprot_id: Valid UniProt ID\n\nReturns:", "source": "codesearchnet"}
{"code": "def validate_per_replica_inputs(distribution_strategy, x):\n    per_replica_list = nest.flatten(x, expand_composites=True)\n    x_values_list = []\n    for x in per_replica_list:\n        x_values = distribution_strategy.unwrap(x)\n        for value in x_values:\n            if not tensor_util.is_tf_type(value):\n                raise ValueError('Dataset input to the model should be tensors instead they are of type {}'.format(type(value)))\n        if not context.executing_eagerly():\n            validate_all_tensor_shapes(x, x_values)\n        validate_all_tensor_types(x, x_values)\n        x_values_list.append(x_values[0])\n    return x_values_list", "docstring": "Validates PerReplica dataset input list.\n\nArgs:\ndistribution_strategy: The current DistributionStrategy used to call\n`fit`, `evaluate` and `predict`.\nx: A list of PerReplica objects that represent the input or\ntarget values.\n\nReturns:\nList containing the first element of each of the PerReplica objects in\nthe input list.\n\nRaises:\nValueError: If any of the objects in the `per_replica_list` is not a tensor.", "source": "github-repos"}
{"code": "def extract_example_parser_configuration(parse_example_op, sess):\n    if parse_example_op.type == 'ParseExample':\n        return _extract_from_parse_example(parse_example_op, sess)\n    elif parse_example_op.type == 'ParseExampleV2':\n        return _extract_from_parse_example_v2(parse_example_op, sess)\n    else:\n        raise ValueError(f'Found unexpected type when parsing example. Expected `ParseExample` object. Received type: {parse_example_op.type}')", "docstring": "Returns an ExampleParserConfig proto.\n\nArgs:\nparse_example_op: A ParseExample or ParseExampleV2 `Operation`\nsess: A tf.compat.v1.Session needed to obtain some configuration values.\nReturns:\nA ExampleParserConfig proto.\n\nRaises:\nValueError: If attributes are inconsistent.", "source": "github-repos"}
{"code": "def _get_or_create_eval_step():\n    graph = ops.get_default_graph()\n    eval_steps = graph.get_collection(ops.GraphKeys.EVAL_STEP)\n    if len(eval_steps) == 1:\n        return eval_steps[0]\n    elif len(eval_steps) > 1:\n        raise ValueError('Multiple tensors added to tf.GraphKeys.EVAL_STEP')\n    else:\n        counter = variable_scope.get_variable('eval_step', shape=[], dtype=dtypes.int64, initializer=init_ops.zeros_initializer(), trainable=False, collections=[ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.EVAL_STEP])\n        return counter", "docstring": "Gets or creates the eval step `Tensor`.\n\nReturns:\nA `Tensor` representing a counter for the evaluation step.\n\nRaises:\nValueError: If multiple `Tensors` have been added to the\n`tf.GraphKeys.EVAL_STEP` collection.", "source": "github-repos"}
{"code": "def execute_wait(self, cmd, walltime=2, envs={}):\n        \n\n        \n        stdin, stdout, stderr = self.ssh_client.exec_command(\n            self.prepend_envs(cmd, envs), bufsize=-1, timeout=walltime\n        )\n        \n        exit_status = stdout.channel.recv_exit_status()\n        return exit_status, stdout.read().decode(\"utf-8\"), stderr.read().decode(\"utf-8\")", "docstring": "Synchronously execute a commandline string on the shell.\n\nArgs:\n- cmd (string) : Commandline string to execute\n- walltime (int) : walltime in seconds\n\nKwargs:\n- envs (dict) : Dictionary of env variables\n\nReturns:\n- retcode : Return code from the execution, -1 on fail\n- stdout  : stdout string\n- stderr  : stderr string\n\nRaises:\nNone.", "source": "juraj-google-style"}
{"code": "def set_hostname(hostname):\n    with salt.utils.winapi.Com():\n        conn = wmi.WMI()\n        comp = conn.Win32_ComputerSystem()[0]\n    return comp.Rename(Name=hostname)", "docstring": "Set the hostname of the windows minion, requires a restart before this will\nbe updated.\n\n.. versionadded:: 2016.3.0\n\nArgs:\nhostname (str): The hostname to set\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt 'minion-id' system.set_hostname newhostname", "source": "codesearchnet"}
{"code": "def set_type(self, agent_type):\n        \n        type_str = SpawnAgentCommand.__type_keys[agent_type]\n        self.add_string_parameters(type_str)", "docstring": "Set the type of agent to spawn in Holodeck. Currently accepted agents are: DiscreteSphereAgent, UAVAgent,\nand AndroidAgent.\n\nArgs:\nagent_type (str): The type of agent to spawn.", "source": "juraj-google-style"}
{"code": "def assert_raises_regex(expected_exception, expected_regex, extras=None, *args, **kwargs):\n    context = _AssertRaisesContext(expected_exception, expected_regex, extras=extras)\n    return context", "docstring": "Assert that an exception is raised when a function is called.\n\nIf no exception is raised, test fail. If an exception is raised but not\nof the expected type, the exception is let through. If an exception of the\nexpected type is raised but the error message does not match the\nexpected_regex, test fail.\n\nThis should only be used as a context manager:\nwith assert_raises(Exception):\nfunc()\n\nArgs:\nexpected_exception: An exception class that is expected to be\nraised.\nextras: An optional field for extra information to be included in\ntest result.", "source": "github-repos"}
{"code": "def data_group_association(self, xid):\n    groups = []\n    group_data = None\n    if (self.groups.get(xid) is not None):\n        group_data = self.groups.get(xid)\n        del self.groups[xid]\n    elif (self.groups_shelf.get(xid) is not None):\n        group_data = self.groups_shelf.get(xid)\n        del self.groups_shelf[xid]\n    if (group_data is not None):\n        group_data = self.data_group_type(group_data)\n        groups.append(group_data)\n        for assoc_xid in group_data.get('associatedGroupXid', []):\n            groups.extend(self.data_group_association(assoc_xid))\n    return groups", "docstring": "Return group dict array following all associations.\n\nArgs:\nxid (str): The xid of the group to retrieve associations.\n\nReturns:\nlist: A list of group dicts.", "source": "codesearchnet"}
{"code": "def get_symbol(self, symbol):\n        \n\n        self._ensure_symbols_loaded()\n        if type(symbol) is int:\n            return self._symbols_by_index[symbol]\n        else:\n            return self._symbols_by_name[symbol]", "docstring": "Get a specific symbol by index or name.\n\nArgs:\nsymbol(int or str): The index or name of the symbol to return.\n\nReturns:\nELF.Symbol: The symbol.\n\nRaises:\nKeyError: The requested symbol does not exist.", "source": "juraj-google-style"}
{"code": "def update(self, uid: int, flag_set: Iterable[Flag],\n               op: FlagOp = FlagOp.REPLACE) -> FrozenSet[Flag]:\n        \n        orig_set = self._flags.get(uid, frozenset())\n        new_flags = op.apply(orig_set, self & flag_set)\n        if new_flags:\n            self._flags[uid] = new_flags\n        else:\n            self._flags.pop(uid, None)\n        return new_flags", "docstring": "Update the flags for the session, returning the resulting flags.\n\nArgs:\nuid: The message UID value.\nflag_set: The set of flags for the update operation.\nop: The type of update.", "source": "juraj-google-style"}
{"code": "def _UpdateEtag(self, response):\n    etag = response.headers.get('etag', self.etag)\n    etag_updated = (self.etag != etag)\n    self.etag = etag\n    return etag_updated", "docstring": "Update the etag from an API response.\n\nArgs:\nresponse: HTTP response with a header field.\n\nReturns:\nbool, True if the etag in the response header updated.", "source": "codesearchnet"}
{"code": "def ParseOptions(cls, options, configuration_object):\n    \n    if not isinstance(configuration_object, tools.CLITool):\n      raise errors.BadConfigObject(\n          'Configuration object is not an instance of CLITool')\n\n    number_of_extraction_workers = cls._ParseNumericOption(\n        options, 'workers', default_value=0)\n\n    if number_of_extraction_workers < 0:\n      raise errors.BadConfigOption(\n          'Invalid number of extraction workers value cannot be negative.')\n\n    worker_memory_limit = cls._ParseNumericOption(\n        options, 'worker_memory_limit')\n\n    if worker_memory_limit and worker_memory_limit < 0:\n      raise errors.BadConfigOption(\n          'Invalid worker memory limit value cannot be negative.')\n\n    setattr(\n        configuration_object, '_number_of_extraction_workers',\n        number_of_extraction_workers)\n    setattr(configuration_object, '_worker_memory_limit', worker_memory_limit)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.\nBadConfigOption: when a configuration parameter fails validation.", "source": "juraj-google-style"}
{"code": "def array_view(array, slicing=None, mapping=None):\n    dtype = translate_dtype(array.dtype)\n    sliced_array = (array[command_parser._parse_slices(slicing)] if slicing else array)\n    if (np.isscalar(sliced_array) and (str(dtype) == 'string')):\n        ndims = len(array.shape)\n        slice_shape = []\n        for _ in range(ndims):\n            sliced_array = [sliced_array]\n            slice_shape.append(1)\n        return (dtype, tuple(slice_shape), sliced_array)\n    else:\n        shape = sliced_array.shape\n        if (mapping == 'image/png'):\n            if (len(sliced_array.shape) == 2):\n                return (dtype, shape, array_to_base64_png(sliced_array))\n            elif (len(sliced_array.shape) == 3):\n                raise NotImplementedError('image/png mapping for 3D array has not been implemented')\n            else:\n                raise ValueError(('Invalid rank for image/png mapping: %d' % len(sliced_array.shape)))\n        elif (mapping == 'health-pill'):\n            health_pill = health_pill_calc.calc_health_pill(array)\n            return (dtype, shape, health_pill)\n        elif ((mapping is None) or (mapping == '') or (mapping.lower() == 'none')):\n            return (dtype, shape, sliced_array.tolist())\n        else:\n            raise ValueError(('Invalid mapping: %s' % mapping))", "docstring": "View a slice or the entirety of an ndarray.\n\nArgs:\narray: The input array, as an numpy.ndarray.\nslicing: Optional slicing string, e.g., \"[:, 1:3, :]\".\nmapping: Optional mapping string. Supported mappings:\n`None` or case-insensitive `'None'`: Unmapped nested list.\n`'image/png'`: Image encoding of a 2D sliced array or 3D sliced array\nwith 3 as the last dimension. If the sliced array is not 2D or 3D with\n3 as the last dimension, a `ValueError` will be thrown.\n`health-pill`: A succinct summary of the numeric values of a tensor.\nSee documentation in [`health_pill_calc.py`] for more details.\n\nReturns:\n1. dtype as a `str`.\n2. shape of the sliced array, as a tuple of `int`s.\n3. the potentially sliced values, as a nested `list`.", "source": "codesearchnet"}
{"code": "def trace(self, graph_element_name):\n    self._depth_count += 1\n    node_name = get_node_name(graph_element_name)\n    if node_name == self._destination_node_name:\n        raise GraphTracingReachedDestination()\n    if node_name in self._skip_node_names:\n        return\n    if node_name in self._visited_nodes:\n        return\n    self._visited_nodes.append(node_name)\n    for input_list in self._input_lists:\n        if node_name not in input_list:\n            continue\n        for inp in input_list[node_name]:\n            if get_node_name(inp) in self._visited_nodes:\n                continue\n            self._inputs.append(inp)\n            self._depth_list.append(self._depth_count)\n            self.trace(inp)\n    self._depth_count -= 1", "docstring": "Trace inputs.\n\nArgs:\ngraph_element_name: Name of the node or an output tensor of the node, as a\nstr.\n\nRaises:\nGraphTracingReachedDestination: if destination_node_name of this tracer\nobject is not None and the specified node is reached.", "source": "github-repos"}
{"code": "def pretty_dump(fn):\n    \n    @wraps(fn)\n    def pretty_dump_wrapper(*args, **kwargs):\n        response.content_type = \"application/json; charset=utf-8\"\n\n        return json.dumps(\n            fn(*args, **kwargs),\n\n            \n            indent=4,\n            separators=(',', ': ')\n        )\n\n    return pretty_dump_wrapper", "docstring": "Decorator used to output prettified JSON.\n\n``response.content_type`` is set to ``application/json; charset=utf-8``.\n\nArgs:\nfn (fn pointer): Function returning any basic python data structure.\n\nReturns:\nstr: Data converted to prettified JSON.", "source": "juraj-google-style"}
{"code": "def dispatch(self, event):\n        \n        if event.is_directory:\n            return\n        paths = []\n        if has_attribute(event, 'dest_path'):\n            paths.append(os.path.realpath(\n                unicode_paths.decode(event.dest_path)))\n        if event.src_path:\n            paths.append(os.path.realpath(\n                unicode_paths.decode(event.src_path)))\n        paths = [p for p in paths\n                 if not p.startswith(os.path.realpath(self.vcs.repository_dir()))\n                 and not self.vcs.path_is_ignored(p)]\n\n        if len(paths) > 0:\n            super(VcsEventHandler, self).dispatch(event)", "docstring": "Only dispatch if the event does not correspond to an ignored file.\nArgs:\nevent (watchdog.events.FileSystemEvent)", "source": "juraj-google-style"}
{"code": "def psd(data, dt, ndivide=1, window=hanning, overlap_half=False):\n    \n    logger = getLogger('decode.utils.ndarray.psd')\n\n    if overlap_half:\n        step = int(len(data) / (ndivide + 1))\n        size = step * 2\n    else:\n        step = int(len(data) / ndivide)\n        size = step\n\n    if bin(len(data)).count('1') != 1:\n        logger.warning('warning: length of data is not power of 2: {}'.format(len(data)))\n    size = int(len(data) / ndivide)\n    if bin(size).count('1') != 1.:\n        if overlap_half:\n            logger.warning('warning: ((length of data) / (ndivide+1)) * 2 is not power of 2: {}'.format(size))\n        else:\n            logger.warning('warning: (length of data) / ndivide is not power of 2: {}'.format(size))\n    psd = np.zeros(size)\n    T   = (size - 1) * dt\n    vs  = 1 / dt\n    vk_ = fftfreq(size, dt)\n    vk  = vk_[np.where(vk_ >= 0)]\n\n    for i in range(ndivide):\n        d = data[i * step:i * step + size]\n        if window is None:\n            w    = np.ones(size)\n            corr = 1.0\n        else:\n            w    = window(size)\n            corr = np.mean(w**2)\n        psd = psd + 2 * (np.abs(fft(d * w)))**2 / size * dt / corr\n\n    return vk, psd[:len(vk)] / ndivide", "docstring": "Calculate power spectrum density of data.\n\nArgs:\ndata (np.ndarray): Input data.\ndt (float): Time between each data.\nndivide (int): Do averaging (split data into ndivide, get psd of each, and average them).\nax (matplotlib.axes): Axis you want to plot on.\ndoplot (bool): Plot how averaging works.\noverlap_half (bool): Split data to half-overlapped regions.\n\nReturns:\nvk (np.ndarray): Frequency.\npsd (np.ndarray): PSD", "source": "juraj-google-style"}
{"code": "def __init__(self, compression_method=None, parent=None, **kwargs):\n    \n    if not compression_method or not parent:\n      raise ValueError('Missing compression method or parent value.')\n\n    super(CompressedStreamPathSpec, self).__init__(parent=parent, **kwargs)\n    self.compression_method = compression_method", "docstring": "Initializes a path specification.\n\nNote that the compressed stream path specification must have a parent.\n\nArgs:\ncompression_method (Optional[str]): method used to the compress the data.\nparent (Optional[PathSpec]): parent path specification.\n\nRaises:\nValueError: when compression method or parent are not set.", "source": "juraj-google-style"}
{"code": "def _from_string(cls, serialized):\n        \n        if ':' not in serialized:\n            raise InvalidKeyError(\n                \"BlockTypeKeyV1 keys must contain ':' separating the block family from the block_type.\", serialized)\n        family, __, block_type = serialized.partition(':')\n        return cls(family, block_type)", "docstring": "Return an instance of `cls` parsed from its `serialized` form.\n\nArgs:\ncls: The :class:`OpaqueKey` subclass.\nserialized (unicode): A serialized :class:`OpaqueKey`, with namespace already removed.\n\nRaises:\nInvalidKeyError: Should be raised if `serialized` is not a valid serialized key\nunderstood by `cls`.", "source": "juraj-google-style"}
{"code": "def get_block_entity_data(self, pos_or_x, y=None, z=None):\n    if (None not in (y, z)):\n        pos_or_x = (pos_or_x, y, z)\n    coord_tuple = tuple((int(floor(c)) for c in pos_or_x))\n    return self.block_entities.get(coord_tuple, None)", "docstring": "Access block entity data.\n\nReturns:\nBlockEntityData subclass instance or\nNone if no block entity data is stored for that location.", "source": "codesearchnet"}
{"code": "def feature_info(self):\n    feature_list = self.prop('available-features-list', None)\n    if (feature_list is None):\n        raise ValueError(('Firmware features are not supported on CPC %s' % self.name))\n    return feature_list", "docstring": "Returns information about the features available for this CPC.\n\nAuthorization requirements:\n\n* Object-access permission to this CPC.\n\nReturns:\n\n:term:`iterable`:\nAn iterable where each item represents one feature that is\navailable for this CPC.\n\nEach item is a dictionary with the following items:\n\n* `name` (:term:`unicode string`): Name of the feature.\n* `description` (:term:`unicode string`): Short description of\nthe feature.\n* `state` (bool): Enablement state of the feature (`True` if the\nenabled, `False` if disabled).\n\nRaises:\n\n:exc:`ValueError`: Features are not supported on the HMC.\n:exc:`~zhmcclient.HTTPError`\n:exc:`~zhmcclient.ParseError`\n:exc:`~zhmcclient.AuthError`\n:exc:`~zhmcclient.ConnectionError`", "source": "codesearchnet"}
{"code": "def total_duration(utterances: List[Utterance]) -> int:\n\n    \n    return sum([duration(utter) for utter in utterances])", "docstring": "Get the duration of an entire list of utterances in milliseconds\nArgs:\nutterances: The list of utterance we are finding the duration of", "source": "juraj-google-style"}
{"code": "def get_input_info_dict(self, signature=None):\n    \n    return self._spec.get_input_info_dict(signature=signature, tags=self._tags)", "docstring": "Describes the inputs required by a signature.\n\nArgs:\nsignature: A string with the signature to get inputs information for.\nIf None, the default signature is used if defined.\n\nReturns:\nThe result of ModuleSpec.get_input_info_dict() for the given signature,\nand the graph variant selected by `tags` when this Module was initialized.\n\nRaises:\nKeyError: if there is no such signature.", "source": "juraj-google-style"}
{"code": "def __call__(self, input_values, attention_mask=None, mask_time_indices=None, gumbel_temperature: int=1, deterministic: bool=True, output_attentions=None, output_hidden_states=None, freeze_feature_encoder=False, return_dict=None):\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    outputs = self.wav2vec2(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, mask_time_indices=mask_time_indices, deterministic=deterministic, freeze_feature_encoder=freeze_feature_encoder, return_dict=return_dict)\n    transformer_features = self.project_hid(outputs[0])\n    extract_features = self.dropout_features(outputs[1], deterministic=deterministic)\n    quantized_features, codevector_perplexity = self.quantizer(extract_features, mask_time_indices, deterministic=deterministic, temperature=gumbel_temperature)\n    quantized_features = self.project_q(quantized_features)\n    if not return_dict:\n        return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:]\n    return FlaxWav2Vec2ForPreTrainingOutput(projected_states=transformer_features, projected_quantized_states=quantized_features, codevector_perplexity=codevector_perplexity, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "Returns:\n\nExample:\n\n```python\n\n```", "source": "github-repos"}
{"code": "def true_num_reactions(model, custom_spont_id=None):\n    true_num = 0\n    for rxn in model.reactions:\n        if (len(rxn.genes) == 0):\n            continue\n        if ((len(rxn.genes) == 1) and is_spontaneous(list(rxn.genes)[0], custom_id=custom_spont_id)):\n            continue\n        else:\n            true_num += 1\n    return true_num", "docstring": "Return the number of reactions associated with a gene.\n\nArgs:\nmodel (Model):\ncustom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``\n\nReturns:\nint: Number of reactions associated with a gene", "source": "codesearchnet"}
{"code": "def _normalize_string(raw_str):\n  \n  return \" \".join(\n      token.strip()\n      for token in tokenizer.encode(text_encoder.native_to_unicode(raw_str)))", "docstring": "Normalizes the string using tokenizer.encode.\n\nArgs:\nraw_str: the input string\n\nReturns:\nA string which is ready to be tokenized using split()", "source": "juraj-google-style"}
{"code": "def random_unitary_matrix(num_qubits):\n    hermitian_matrix = random_hermitian_matrix(num_qubits)\n    return tf.linalg.expm(-1j * hermitian_matrix)", "docstring": "Returns a random unitary matrix.\n\nUses the property that e^{-iH} is unitary for any Hermitian matrix H.\n\nArgs:\nnum_qubits: Number of qubits on which the matrix acts.", "source": "github-repos"}
{"code": "def isUserCert(self, name):\n    crtpath = self._getPathJoin('users', ('%s.crt' % name))\n    return os.path.isfile(crtpath)", "docstring": "Checks if a user certificate exists.\n\nArgs:\nname (str): The name of the user keypair.\n\nExamples:\nCheck if the user cert \"myuser\" exists:\n\nexists = cdir.isUserCert('myuser')\n\nReturns:\nbool: True if the certificate is present, False otherwise.", "source": "codesearchnet"}
{"code": "def _module_to_paths(module):\n    submodules = []\n    module_segments = module.split('.')\n    for i in range(len(module_segments)):\n        submodules.append('.'.join(module_segments[:i + 1]))\n    paths = []\n    for submodule in submodules:\n        if not submodule:\n            paths.append('__init__.py')\n            continue\n        paths.append('%s/__init__.py' % submodule.replace('.', '/'))\n    return paths", "docstring": "Get all API __init__.py file paths for the given module.\n\nArgs:\nmodule: Module to get file paths for.\n\nReturns:\nList of paths for the given module. For e.g. module foo.bar\nrequires 'foo/__init__.py' and 'foo/bar/__init__.py'.", "source": "github-repos"}
{"code": "def disease_term(self, disease_identifier):\n        \n        query = {}\n        try:\n            disease_identifier = int(disease_identifier)\n            query['disease_nr'] = disease_identifier\n        except ValueError:\n            query['_id'] = disease_identifier\n\n        return self.disease_term_collection.find_one(query)", "docstring": "Return a disease term\n\nChecks if the identifier is a disease number or a id\n\nArgs:\ndisease_identifier(str)\n\nReturns:\ndisease_obj(dict)", "source": "juraj-google-style"}
{"code": "def distance_similarity(a, b, p, T=CLOSE_DISTANCE_THRESHOLD):\n    \n    d = distance_to_line(a, b, p)\n    r = (-1/float(T)) * abs(d) + 1\n\n    return r if r > 0 else 0", "docstring": "Computes the distance similarity between a line segment\nand a point\n\nArgs:\na ([float, float]): x and y coordinates. Line start\nb ([float, float]): x and y coordinates. Line end\np ([float, float]): x and y coordinates. Point to compute the distance\nReturns:\nfloat: between 0 and 1. Where 1 is very similar and 0 is completely different", "source": "juraj-google-style"}
{"code": "class QuantLinear(nn.Module):\n\n    def __init__(self, in_features, out_features, bias=True, weight_bit=8, bias_bit=32, per_channel=False, quant_mode=False):\n        super().__init__()\n        self.in_features = in_features\n        self.out_features = out_features\n        self.weight = nn.Parameter(torch.zeros([out_features, in_features]))\n        self.register_buffer('weight_integer', torch.zeros_like(self.weight))\n        self.register_buffer('fc_scaling_factor', torch.zeros(self.out_features))\n        if bias:\n            self.bias = nn.Parameter(torch.zeros(out_features))\n            self.register_buffer('bias_integer', torch.zeros_like(self.bias))\n        self.weight_bit = weight_bit\n        self.quant_mode = quant_mode\n        self.per_channel = per_channel\n        self.bias_bit = bias_bit\n        self.quant_mode = quant_mode\n        self.percentile_mode = False\n        self.weight_function = SymmetricQuantFunction.apply\n\n    def __repr__(self):\n        s = super().__repr__()\n        s = f'({s} weight_bit={self.weight_bit}, quant_mode={self.quant_mode})'\n        return s\n\n    def forward(self, x, prev_act_scaling_factor=None):\n        if not self.quant_mode:\n            return (nn.functional.linear(x, weight=self.weight, bias=self.bias), None)\n        assert prev_act_scaling_factor is not None and prev_act_scaling_factor.shape == (1,), 'Input activation to the QuantLinear layer should be globally (non-channel-wise) quantized. Please add a QuantAct layer with `per_channel = True` before this QuantAct layer'\n        w = self.weight\n        w_transform = w.data.detach()\n        if self.per_channel:\n            w_min, _ = torch.min(w_transform, dim=1, out=None)\n            w_max, _ = torch.max(w_transform, dim=1, out=None)\n        else:\n            w_min = w_transform.min().expand(1)\n            w_max = w_transform.max().expand(1)\n        self.fc_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, self.per_channel)\n        self.weight_integer = self.weight_function(self.weight, self.weight_bit, self.percentile_mode, self.fc_scaling_factor)\n        bias_scaling_factor = self.fc_scaling_factor * prev_act_scaling_factor\n        if self.bias is not None:\n            self.bias_integer = self.weight_function(self.bias, self.bias_bit, False, bias_scaling_factor)\n        prev_act_scaling_factor = prev_act_scaling_factor.view(1, -1)\n        x_int = x / prev_act_scaling_factor\n        return (nn.functional.linear(x_int, weight=self.weight_integer, bias=self.bias_integer) * bias_scaling_factor, bias_scaling_factor)", "docstring": "Quantized version of `torch.nn.Linear`. Adds quantization-specific arguments on top of `torch.nn.Linear`.\n\nArgs:\nweight_bit (`int`, *optional*, defaults to `8`):\nBitwidth for the quantized weight.\nbias_bit (`int`, *optional*, defaults to `32`):\nBitwidth for the quantized bias.\nper_channel (`bool`, *optional*, defaults to `False`):\nWhether or not to use channel-wise quantization.\nquant_mode (`bool`, *optional*, defaults to `False`):\nWhether or not the layer is quantized.", "source": "github-repos"}
{"code": "def ParseIfaddrs(ifaddrs):\n  \n  precondition.AssertOptionalType(ifaddrs, ctypes.POINTER(Ifaddrs))\n\n  ifaces = {}\n\n  for ifaddr in IterIfaddrs(ifaddrs):\n    ifname = ctypes.string_at(ifaddr.ifa_name).decode(\"utf-8\")\n    iface = ifaces.setdefault(ifname, rdf_client_network.Interface())\n    iface.ifname = ifname\n\n    if not ifaddr.ifa_addr:\n      continue\n\n    sockaddr = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddr))\n    iffamily = sockaddr.contents.sa_family\n    if iffamily == AF_INET:\n      sockaddrin = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddrin))\n\n      address = rdf_client_network.NetworkAddress()\n      address.address_type = rdf_client_network.NetworkAddress.Family.INET\n      address.packed_bytes = struct.pack(\"=L\", sockaddrin.contents.sin_addr)\n      iface.addresses.append(address)\n    elif iffamily == AF_INET6:\n      sockaddrin = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddrin6))\n\n      address = rdf_client_network.NetworkAddress()\n      address.address_type = rdf_client_network.NetworkAddress.Family.INET6\n      address.packed_bytes = bytes(list(sockaddrin.contents.sin6_addr))\n      iface.addresses.append(address)\n    elif iffamily == AF_LINK:\n      sockaddrdl = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddrdl))\n\n      nlen = sockaddrdl.contents.sdl_nlen\n      alen = sockaddrdl.contents.sdl_alen\n      iface.mac_address = bytes(sockaddrdl.contents.sdl_data[nlen:nlen + alen])\n    else:\n      raise ValueError(\"Unexpected socket address family: %s\" % iffamily)\n\n  return itervalues(ifaces)", "docstring": "Parses contents of the intrusive linked list of `ifaddrs`.\n\nArgs:\nifaddrs: A pointer to the first node of `ifaddrs` linked list. Can be NULL.\n\nReturns:\nAn iterator over instances of `rdf_client_network.Interface`.", "source": "juraj-google-style"}
{"code": "def _unverified_decode(token):\n    token = _helpers.to_bytes(token)\n    if (token.count(b'.') != 2):\n        raise ValueError('Wrong number of segments in token: {0}'.format(token))\n    (encoded_header, encoded_payload, signature) = token.split(b'.')\n    signed_section = ((encoded_header + b'.') + encoded_payload)\n    signature = _helpers.padded_urlsafe_b64decode(signature)\n    header = _decode_jwt_segment(encoded_header)\n    payload = _decode_jwt_segment(encoded_payload)\n    return (header, payload, signed_section, signature)", "docstring": "Decodes a token and does no verification.\n\nArgs:\ntoken (Union[str, bytes]): The encoded JWT.\n\nReturns:\nTuple[str, str, str, str]: header, payload, signed_section, and\nsignature.\n\nRaises:\nValueError: if there are an incorrect amount of segments in the token.", "source": "codesearchnet"}
{"code": "def orient_undirected_graph(self, data, graph, **kwargs):\n    self.arguments['{CITEST}'] = self.dir_CI_test[self.CI_test]\n    self.arguments['{METHOD_INDEP}'] = self.dir_method_indep[self.method_indep]\n    self.arguments['{DIRECTED}'] = 'TRUE'\n    self.arguments['{ALPHA}'] = str(self.alpha)\n    self.arguments['{NJOBS}'] = str(self.nb_jobs)\n    self.arguments['{VERBOSE}'] = str(self.verbose).upper()\n    fe = DataFrame(nx.adj_matrix(graph, weight=None).todense())\n    fg = DataFrame((1 - fe.values))\n    results = self._run_pc(data, fixedEdges=fe, fixedGaps=fg, verbose=self.verbose)\n    return nx.relabel_nodes(nx.DiGraph(results), {idx: i for (idx, i) in enumerate(data.columns)})", "docstring": "Run PC on an undirected graph.\n\nArgs:\ndata (pandas.DataFrame): DataFrame containing the data\ngraph (networkx.Graph): Skeleton of the graph to orient\n\nReturns:\nnetworkx.DiGraph: Solution given by PC on the given skeleton.", "source": "codesearchnet"}
{"code": "def report_factory(app, report_name, **kwargs):\n    created = pendulum.now().to_rfc3339_string()\n    user_model = app._swimlane.user.as_usergroup_selection()\n    return Report(app, {'$type': Report._type, 'groupBys': [], 'aggregates': [], 'applicationIds': [app.id], 'columns': [], 'sorts': {'$type': 'System.Collections.Generic.Dictionary`2[[System.String, mscorlib],[Core.Models.Search.SortTypes, Core]], mscorlib'}, 'filters': [], 'defaultSearchReport': False, 'allowed': [], 'permissions': {'$type': 'Core.Models.Security.PermissionMatrix, Core'}, 'createdDate': created, 'modifiedDate': created, 'createdByUser': user_model, 'modifiedByUser': user_model, 'id': None, 'name': report_name, 'disabled': False, 'keywords': ''}, **kwargs)", "docstring": "Report instance factory populating boilerplate raw data\n\nArgs:\napp (App): Swimlane App instance\nreport_name (str): Generated Report name\n\nKeyword Args\n**kwargs: Kwargs to pass to the Report class", "source": "codesearchnet"}
{"code": "def draw_sample(num_samples, num_classes, logits, num_trials, dtype, seed):\n    with tf.name_scope('multinomial.draw_sample'):\n        num_trials = (tf.ones_like(logits[(..., 0)], dtype=num_trials.dtype) * num_trials)\n        logits = (tf.ones_like(num_trials[(..., tf.newaxis)], dtype=logits.dtype) * logits)\n        flat_logits = tf.reshape(logits, [(- 1), num_classes])\n        flat_num_trials = (num_samples * tf.reshape(num_trials, [(- 1)]))\n\n        def _sample_one_batch_member(args):\n            (logits, num_cat_samples) = (args[0], args[1])\n            x = tf.random.categorical(logits[(tf.newaxis, ...)], num_cat_samples, seed=seed)\n            x = tf.reshape(x, shape=[num_samples, (- 1)])\n            x = tf.one_hot(x, depth=num_classes)\n            x = tf.reduce_sum(input_tensor=x, axis=(- 2))\n            return tf.cast(x, dtype=dtype)\n        x = tf.map_fn(_sample_one_batch_member, [flat_logits, flat_num_trials], dtype=dtype)\n        x = tf.transpose(a=x, perm=[1, 0, 2])\n        final_shape = tf.concat([[num_samples], tf.shape(input=num_trials), [num_classes]], axis=0)\n        x = tf.reshape(x, final_shape)\n        return x", "docstring": "Sample a multinomial.\n\nThe batch shape is given by broadcasting num_trials with\nremove_last_dimension(logits).\n\nArgs:\nnum_samples: Python int or singleton integer Tensor: number of multinomial\nsamples to draw.\nnum_classes: Python int or singleton integer Tensor: number of classes.\nlogits: Floating Tensor with last dimension k, of (unnormalized) logit\nprobabilities per class.\nnum_trials: Tensor of number of categorical trials each multinomial consists\nof.  num_trials[..., tf.newaxis] must broadcast with logits.\ndtype: dtype at which to emit samples.\nseed: Random seed.\n\nReturns:\nsamples: Tensor of given dtype and shape [n] + batch_shape + [k].", "source": "codesearchnet"}
{"code": "def plot_compare(self, other_plotter):\n    data_orig = self.bs_plot_data()\n    data = other_plotter.bs_plot_data()\n    if (len(data_orig['distances']) != len(data['distances'])):\n        raise ValueError('The two objects are not compatible.')\n    plt = self.get_plot()\n    band_linewidth = 1\n    for i in range(other_plotter._nb_bands):\n        for d in range(len(data_orig['distances'])):\n            plt.plot(data_orig['distances'][d], [e[i] for e in data['frequency']][d], 'r-', linewidth=band_linewidth)\n    return plt", "docstring": "plot two band structure for comparison. One is in red the other in blue.\nThe two band structures need to be defined on the same symmetry lines!\nand the distance between symmetry lines is\nthe one of the band structure used to build the PhononBSPlotter\n\nArgs:\nanother PhononBSPlotter object defined along the same symmetry lines\n\nReturns:\na matplotlib object with both band structures", "source": "codesearchnet"}
{"code": "def randomize(vm, length=(10, 10), ints=(0, 999), strs=(1, 10), chars=(32, 126), instruction_ratio=0.5, number_string_ratio=0.8, exclude=map(crianza.instructions.lookup, ['.', 'exit', 'read', 'write', 'str']), restrict_to=None):\n    vm.code = []\n    instructions = (set(vm.instructions.values()) - set(exclude))\n    if (restrict_to is not None):\n        instructions = instructions.intersection(set(restrict_to))\n    instructions = list(instructions)\n    for _ in xrange(random.randint(*length)):\n        r = random.random()\n        if (r <= instruction_ratio):\n            vm.code.append(random.choice(instructions))\n        elif (r <= number_string_ratio):\n            vm.code.append(crianza.compiler.make_embedded_push(random.randint(*ints)))\n        else:\n            vm.code.append(crianza.compiler.make_embedded_push(('%s' % ''.join((chr(random.randint(*chars)) for n in xrange(0, random.randint(*strs)))))))\n    return vm", "docstring": "Replaces existing code with completely random instructions. Does not\noptimize code after generating it.\n\nArgs:\nlength: Tuple of minimum and maximum code lengths. Code length will\nbe a random number between these two, inclusive values.\n\nints: Integers in the code will be selected at random from this\ninclusive range.\n\nstrs: Inclusive range of the length of strings in the code.\n\nchars: Inclusive range of characters in random strings.\n\ninstruction_ratio: Ratio of instructions to numbers/strings,\nmeaning that if this value is 0.5 then there will just as many\ninstructions in the code as there are numbers and strings.\n\nnumber_string_ratio: Ratio of numbers to strings.\n\nexclude: Excluded instructions. For genetic programming, one wants\nto avoid the program to hang for user input.  The default value is\nto exclude console i/o and debug instructions.\n\nrestrict_to: Limit instructions to the given list.\n\nReturns:\nThe VM.", "source": "codesearchnet"}
{"code": "def add_all_exchange_reactions(model, compartment, allow_duplicates=False):\n    \n\n    all_reactions = {}\n    if not allow_duplicates:\n        \n        \n        for rxnid in model.database.reactions:\n            rx = model.database.get_reaction(rxnid)\n            all_reactions[rx] = rxnid\n\n    added = set()\n    added_compounds = set()\n    initial_compounds = set(model.compounds)\n    reactions = set(model.database.reactions)\n    for model_compound in initial_compounds:\n        compound = model_compound.in_compartment(compartment)\n        if compound in added_compounds:\n            continue\n\n        rxnid_ex = create_exchange_id(reactions, compound)\n\n        reaction_ex = Reaction(Direction.Both, {compound: -1})\n        if reaction_ex not in all_reactions:\n            model.database.set_reaction(rxnid_ex, reaction_ex)\n            reactions.add(rxnid_ex)\n        else:\n            rxnid_ex = all_reactions[reaction_ex]\n\n        if not model.has_reaction(rxnid_ex):\n            added.add(rxnid_ex)\n        model.add_reaction(rxnid_ex)\n        added_compounds.add(compound)\n\n    return added", "docstring": "Add all exchange reactions to database and to model.\n\nArgs:\nmodel: :class:`psamm.metabolicmodel.MetabolicModel`.", "source": "juraj-google-style"}
{"code": "def parse_relations(belstr: str, char_locs: CharLocs, parsed: Parsed, errors: Errors) -> Tuple[(Parsed, Errors)]:\n    quotes = char_locs['quotes']\n    quoted_range = set([i for (start, end) in quotes.items() for i in range(start, end)])\n    for match in relations_pattern_middle.finditer(belstr):\n        (start, end) = match.span(1)\n        end = (end - 1)\n        if (start != end):\n            test_range = set(range(start, end))\n        else:\n            test_range = set(start)\n        if test_range.intersection(quoted_range):\n            continue\n        span_key = (start, end)\n        parsed[span_key] = {'type': 'Relation', 'name': match.group(1), 'span': (start, end)}\n    for match in relations_pattern_end.finditer(belstr):\n        (start, end) = match.span(1)\n        log.debug(f'Relation-end {match}')\n        end = (end - 1)\n        if (start != end):\n            test_range = set(range(start, end))\n        else:\n            test_range = set(start)\n        if test_range.intersection(quoted_range):\n            continue\n        span_key = (start, end)\n        parsed[span_key] = {'type': 'Relation', 'name': match.group(1), 'span': (start, end)}\n    return (parsed, errors)", "docstring": "Parse relations from BEL string\n\nArgs:\nbelstr: BEL string as one single string (not list of chars)\nchar_locs: paren, comma and quote char locations\nparsed: data structure for parsed functions, relations, nested\nerrors: error messages\n\nReturns:\n(parsed, errors):", "source": "codesearchnet"}
{"code": "def table_delete(self, table_name):\n    url = (Api._ENDPOINT + (Api._TABLES_PATH % table_name))\n    return datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials, raw_response=True)", "docstring": "Issues a request to delete a table.\n\nArgs:\ntable_name: the name of the table as a tuple of components.\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "codesearchnet"}
{"code": "def read_at(self, d, **kwargs):\n        \n        try:\n            return np.array([self._read_at(depth, **kwargs) for depth in d])\n        except:\n            return self._read_at(d, **kwargs)", "docstring": "Read the log at a specific depth or an array of depths.\n\nArgs:\nd (float or array-like)\ninterpolation (str)\nindex(bool)\nreturn_basis (bool)\n\nReturns:\nfloat or ndarray.", "source": "juraj-google-style"}
{"code": "def learn_dfa(self, mma=None):\n        \n        logging.info('Initializing learning procedure.')\n        if mma:\n            self._init_table_from_dfa(mma)\n        else:\n            self._init_table()\n\n        logging.info('Generating a closed and consistent observation table.')\n        while True:\n\n            closed = False\n            \n            while not closed:\n                logging.debug('Checking if table is closed.')\n                closed, string = self.observation_table.is_closed()\n                if not closed:\n                    logging.debug('Closing table.')\n                    self._ot_make_closed(string)\n                else:\n                    logging.debug('Table closed.')\n\n            \n\n            dfa = self.get_dfa_conjecture()\n\n            logging.info('Generated conjecture machine with %d states.',len(list(dfa.states)))\n\n            \n            logging.debug('Running equivalence query.')\n            found, counter_example = self._equivalence_query(dfa)\n\n            \n            if found:\n                logging.info('No counterexample found. Hypothesis is correct!')\n                break\n\n            \n            \n            logging.info('Processing counterexample %s with length %d.', counter_example, len(counter_example))\n            self._process_counter_example(dfa, counter_example)\n\n        logging.info('Learning complete.')\n        logging.info('Learned em_vector table is the following:')\n        logging.info(self.observation_table.em_vector)\n        return '', dfa", "docstring": "Implements the high level loop of the algorithm for learning a\nMealy machine.\nArgs:\nmma (DFA): The input automaton\nReturns:\nMealyMachine: A string and a model for the Mealy machine to be learned.", "source": "juraj-google-style"}
{"code": "def find_last_sublist(list_, sublist):\n    for i in reversed(range(((len(list_) - len(sublist)) + 1))):\n        if ((list_[i] == sublist[0]) and (list_[i:(i + len(sublist))] == sublist)):\n            return i\n    return None", "docstring": "Given a list, find the last occurance of a sublist within it.\n\nReturns:\nIndex where the sublist starts, or None if there is no match.", "source": "codesearchnet"}
{"code": "def verify_firebase_token(id_token, request, audience=None):\n    \n    return verify_token(\n        id_token, request, audience=audience, certs_url=_GOOGLE_APIS_CERTS_URL)", "docstring": "Verifies an ID Token issued by Firebase Authentication.\n\nArgs:\nid_token (Union[str, bytes]): The encoded token.\nrequest (google.auth.transport.Request): The object used to make\nHTTP requests.\naudience (str): The audience that this token is intended for. This is\ntypically your Firebase application ID. If None then the audience\nis not verified.\n\nReturns:\nMapping[str, Any]: The decoded token.", "source": "juraj-google-style"}
{"code": "def get_device_state(self, device, id_override=None, type_override=None):\n    _LOGGER.info('Getting state via online API')\n    object_id = (id_override or device.object_id())\n    object_type = (type_override or device.object_type())\n    url_string = '{}/{}s/{}'.format(self.BASE_URL, object_type, object_id)\n    arequest = requests.get(url_string, headers=API_HEADERS)\n    response_json = arequest.json()\n    _LOGGER.debug('%s', response_json)\n    return response_json", "docstring": "Get device state via online API.\n\nArgs:\ndevice (WinkDevice): The device the change is being requested for.\nid_override (String, optional): A device ID used to override the\npassed in device's ID. Used to make changes on sub-devices.\ni.e. Outlet in a Powerstrip. The Parent device's ID.\ntype_override (String, optional): Used to override the device type\nwhen a device inherits from a device other than WinkDevice.\nReturns:\nresponse_json (Dict): The API's response in dictionary format", "source": "codesearchnet"}
{"code": "def fail_run_group(group, session):\n    \n    from datetime import datetime\n\n    group.end = datetime.now()\n    group.status = 'failed'\n    session.commit()", "docstring": "End the run_group unsuccessfully.\n\nArgs:\ngroup: The run_group we want to complete.\nsession: The database transaction we will finish.", "source": "juraj-google-style"}
{"code": "def normalize_json(template):\n    \n    obj = parse_cloudformation_template(template)\n    json_str = json.dumps(\n        obj, sort_keys=True, indent=4, default=str, separators=(',', ': '),\n    )\n    result = []\n    lines = json_str.split(\"\\n\")\n    for line in lines:\n        result.append(line + \"\\n\")\n    return result", "docstring": "Normalize our template for diffing.\n\nArgs:\ntemplate(str): string representing the template\n\nReturns:\nlist: json representation of the parameters", "source": "juraj-google-style"}
{"code": "def _ReadRecordSchemaIndexes(self, tables, file_object, record_offset):\n    \n    _ = self._ReadRecordHeader(file_object, record_offset)\n\n    attribute_value_offsets = self._ReadRecordAttributeValueOffset(\n        file_object, record_offset + 24, 5)\n\n    if attribute_value_offsets != (0x2d, 0x31, 0x35, 0x39, 0x3d):\n      raise errors.ParseError('Unsupported record attribute value offsets')\n\n    file_offset = file_object.tell()\n    data_type_map = self._GetDataTypeMap('keychain_record_schema_indexes')\n\n    record_values, _ = self._ReadStructureFromFileObject(\n        file_object, file_offset, data_type_map)\n\n    if record_values.relation_identifier not in tables:\n      raise errors.ParseError(\n          'CSSM_DL_DB_SCHEMA_INDEXES defines relation identifier not defined '\n          'in CSSM_DL_DB_SCHEMA_INFO.')\n\n    table = tables.get(self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INDEXES, None)\n    if not table:\n      raise errors.ParseError('Missing CSSM_DL_DB_SCHEMA_INDEXES table.')\n\n    record = collections.OrderedDict({\n        'RelationID': record_values.relation_identifier,\n        'IndexID': record_values.index_identifier,\n        'AttributeID': record_values.attribute_identifier,\n        'IndexType': record_values.index_type,\n        'IndexedDataLocation': record_values.index_data_location})\n\n    table.records.append(record)", "docstring": "Reads a schema indexes (CSSM_DL_DB_SCHEMA_INDEXES) record.\n\nArgs:\ntables (dict[int, KeychainDatabaseTable]): tables per identifier.\nfile_object (file): file-like object.\nrecord_offset (int): offset of the record relative to the start of\nthe file.\n\nRaises:\nParseError: if the record cannot be read.", "source": "juraj-google-style"}
{"code": "def set_speech_text(self, text):\n    self.response.outputSpeech.type = 'PlainText'\n    self.response.outputSpeech.text = text", "docstring": "Set response output speech as plain text type.\n\nArgs:\ntext: str. Response speech used when type is 'PlainText'. Cannot exceed\n8,000 characters.", "source": "codesearchnet"}
{"code": "def get_default_name(self):\n    long_names = [name for name in self.name if name.startswith('--')]\n    short_names = [name for name in self.name if (not name.startswith('--'))]\n    if long_names:\n        return to_snake_case(long_names[0].lstrip('-'))\n    return to_snake_case(short_names[0].lstrip('-'))", "docstring": "Return the default generated name to store value on the parser for this option.\n\neg. An option *['-s', '--use-ssl']* will generate the *use_ssl* name\n\nReturns:\nstr: the default name of the option", "source": "codesearchnet"}
{"code": "def print_tools(self, pattern=None, buf=sys.stdout):\n        \n        seen = set()\n        rows = []\n\n        context = self.context\n        if context:\n            data = context.get_tools()\n            conflicts = set(context.get_conflicting_tools().keys())\n            for _, (variant, tools) in sorted(data.items()):\n                pkg_str = variant.qualified_package_name\n                for tool in tools:\n                    if pattern and not fnmatch(tool, pattern):\n                        continue\n\n                    if tool in conflicts:\n                        label = \"(in conflict)\"\n                        color = critical\n                    else:\n                        label = ''\n                        color = None\n\n                    rows.append([tool, '-', pkg_str, \"active context\", label, color])\n                    seen.add(tool)\n\n        for suite in self.suites:\n            for tool, d in suite.get_tools().iteritems():\n                if tool in seen:\n                    continue\n                if pattern and not fnmatch(tool, pattern):\n                    continue\n\n                label = []\n                color = None\n                path = which(tool)\n                if path:\n                    path_ = os.path.join(suite.tools_path, tool)\n                    if path != path_:\n                        label.append(\"(hidden by unknown tool '%s')\" % path)\n                        color = warning\n\n                variant = d[\"variant\"]\n                if isinstance(variant, set):\n                    pkg_str = \", \".join(variant)\n                    label.append(\"(in conflict)\")\n                    color = critical\n                else:\n                    pkg_str = variant.qualified_package_name\n\n                orig_tool = d[\"tool_name\"]\n                if orig_tool == tool:\n                    orig_tool = '-'\n\n                label = ' '.join(label)\n                source = (\"context '%s' in suite '%s'\"\n                          % (d[\"context_name\"], suite.load_path))\n\n                rows.append([tool, orig_tool, pkg_str, source, label, color])\n                seen.add(tool)\n\n        _pr = Printer(buf)\n        if not rows:\n            _pr(\"No matching tools.\")\n            return False\n\n        headers = [[\"TOOL\", \"ALIASING\", \"PACKAGE\", \"SOURCE\", \"\", None],\n                   [\"----\", \"--------\", \"-------\", \"------\", \"\", None]]\n        rows = headers + sorted(rows, key=lambda x: x[0].lower())\n        print_colored_columns(_pr, rows)\n        return True", "docstring": "Print a list of visible tools.\n\nArgs:\npattern (str): Only list tools that match this glob pattern.", "source": "juraj-google-style"}
{"code": "def __call__(self, package_names):\n        \n\n        result = True\n        registry = get(self.registry_name)\n        for package_name in package_names:\n            metadata = {}\n            for entry_point, export_target in registry.iter_export_targets_for(\n                    package_name):\n                builder = next(registry.generate_builder(\n                    entry_point, export_target), None)\n                if not builder:\n                    \n                    result = False\n                    continue\n                entries = registry.execute_builder(*builder)\n                \n                result = bool(entries) and result\n                metadata.update(entries)\n            \n            result = bool(metadata) and result\n            registry.update_artifact_metadata(package_name, metadata)\n        return result", "docstring": "Generic artifact builder function.\n\nArguments:\n\npackage_names\nList of package names to be built\n\nReturns True if the build is successful without errors, False if\nerrors were found or if no artifacts were built.", "source": "juraj-google-style"}
{"code": "def convert_unsqueeze(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting unsqueeze ...')\n    if (names == 'short'):\n        tf_name = ('UNSQ' + random_string(4))\n    elif (names == 'keep'):\n        tf_name = w_name\n    else:\n        tf_name = (w_name + str(random.random()))\n\n    def target_layer(x):\n        import keras\n        return keras.backend.expand_dims(x)\n    lambda_layer = keras.layers.Lambda(target_layer, name=(tf_name + 'E'))\n    layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert unsqueeze operation.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def seek(self, offset, whence=os.SEEK_SET):\n    \n    if not self._database_object:\n      raise IOError('Not opened.')\n\n    if whence == os.SEEK_CUR:\n      offset += self._current_offset\n    elif whence == os.SEEK_END:\n      offset += self._size\n    elif whence != os.SEEK_SET:\n      raise IOError('Unsupported whence.')\n\n    if offset < 0:\n      raise IOError('Invalid offset value out of bounds.')\n\n    self._current_offset = offset", "docstring": "Seeks to an offset within the file-like object.\n\nArgs:\noffset (int): offset to seek to.\nwhence (Optional(int)): value that indicates whether offset is an absolute\nor relative position within the file.\n\nRaises:\nIOError: if the seek failed.\nOSError: if the seek failed.", "source": "juraj-google-style"}
{"code": "def all_reduce_ring(x, parallelism, maybe_reduce=True, use_bfloat16=True):\n  \n  if parallelism.n == 1:\n    return x\n\n  if maybe_reduce:\n    original_parallelism = parallelism\n    parallelism, x = reduce_by_device(parallelism, x, tf.add_n)\n\n  if parallelism.n == 1:\n    y = x\n  else:\n    \n    x_flat = parallelism(tf.reshape, x, [[-1]] * parallelism.n)\n    \n    x_split = parallelism(\n        common_layers.approximate_split, x_flat, parallelism.n, 0)\n    def _step(source_replica, target_replica, x_split, op=\"plus_eq\"):\n      \n      for shard in range(parallelism.n):\n        source_device = (shard + source_replica) % parallelism.n\n        target_device = (shard + target_replica) % parallelism.n\n        source = x_split[source_device][shard]\n        if use_bfloat16:\n          with tf.device(parallelism.devices[source_device]):\n            source = tf.to_bfloat16(source)\n        with tf.device(parallelism.devices[target_device]):\n          source = tf.to_float(source)\n          if op == \"plus_eq\":\n            x_split[target_device][shard] += source\n          else:\n            assert op == \"copy\"\n            x_split[target_device][shard] = tf.identity(source)\n    center = parallelism.n \n\n    \n    for i in reversed(range(center, parallelism.n - 1)):\n      _step(i + 1, i, x_split, op=\"plus_eq\")\n    for i in range(center):\n      _step(i, i + 1, x_split, op=\"plus_eq\")\n    \n    for i in range(center, parallelism.n - 1):\n      _step(i, i + 1, x_split, op=\"copy\")\n    for i in reversed(range(center)):\n      _step(i + 1, i, x_split, op=\"copy\")\n    x_concat = parallelism(tf.concat, x_split, 0)\n    y = parallelism(common_layers.reshape_like_all_dims, x_concat, x)\n  if maybe_reduce:\n    y = expand_by_device(original_parallelism, parallelism, y)\n  return y", "docstring": "Compute the sum of all Tensors and put the result everywhere.\n\nAssumes that the devices are connected in a ring.\n\nArgs:\nx: a list of Tensors with length parallelism.n\nparallelism: a expert_utils.Parallelism object.\nmaybe_reduce: a boolean - first reduce per device.\nuse_bfloat16: a boolean - saves bandwidth but loses precision\n\nReturns:\na list of Tensors with length parallelism.n", "source": "juraj-google-style"}
{"code": "def extractHolidayDate(self, setting_holiday):\n    ret = namedtuple('result', ['Holiday', 'Month', 'Day'])\n    setting_holiday += 1\n    ret.Holiday = str(setting_holiday)\n    if ((setting_holiday < 1) or (setting_holiday > Extents.Holidays)):\n        ekm_log(('Out of bounds:  holiday ' + str(setting_holiday)))\n        ret.Holiday = ret.Month = ret.Day = str(0)\n        return ret\n    idxday = (('Holiday_' + str(setting_holiday)) + '_Day')\n    idxmon = (('Holiday_' + str(setting_holiday)) + '_Mon')\n    if (idxmon not in self.m_hldy):\n        ret.Holiday = ret.Month = ret.Day = str(0)\n        return ret\n    if (idxday not in self.m_hldy):\n        ret.Holiday = ret.Month = ret.Day = str(0)\n        return ret\n    ret.Day = self.m_hldy[idxday][MeterData.StringValue]\n    ret.Month = self.m_hldy[idxmon][MeterData.StringValue]\n    return ret", "docstring": "Read a single holiday date from meter buffer.\n\nArgs:\nsetting_holiday (int):  Holiday from 0-19 or in range(Extents.Holidays)\n\nReturns:\ntuple: Holiday tuple, elements are strings.\n\n=============== ======================\nHoliday         Holiday 0-19 as string\nDay             Day 1-31 as string\nMonth           Monty 1-12 as string\n=============== ======================", "source": "codesearchnet"}
{"code": "def is_nested(structure):\n    return tree_impl.is_nested(structure)", "docstring": "Checks if a given structure is nested.\n\nExamples:\n\n>>> keras.tree.is_nested(42)\nFalse\n>>> keras.tree.is_nested({\"foo\": 42})\nTrue\n\nArgs:\nstructure: A structure to check.\n\nReturns:\n`True` if a given structure is nested, i.e. is a sequence, a mapping,\nor a namedtuple, and `False` otherwise.", "source": "github-repos"}
{"code": "def update_query_parameters(url, query_parameters):\n    (scheme, netloc, path, query_string, fragment) = urlsplit(url)\n    url_params = parse_qs(query_string)\n    url_params.update(query_parameters)\n    return urlunsplit((scheme, netloc, path, urlencode(sorted(url_params.items()), doseq=True), fragment))", "docstring": "Return url with updated query parameters.\n\nArguments:\nurl (str): Original url whose query parameters need to be updated.\nquery_parameters (dict): A dictionary containing query parameters to be added to course selection url.\n\nReturns:\n(slug): slug identifier for the identity provider that can be used for identity verification of\nusers associated the enterprise customer of the given user.", "source": "codesearchnet"}
{"code": "def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n    with open(json_file_path, 'w', encoding='utf-8') as writer:\n        writer.write(self.to_json_string())", "docstring": "Save this instance to a JSON file.\n\nArgs:\njson_file_path (`str` or `os.PathLike`):\nPath to the JSON file in which this image_processor instance's parameters will be saved.", "source": "github-repos"}
{"code": "def dims(self):\n    if self._dims is None:\n        return None\n    return [as_dimension(d) for d in self._dims]", "docstring": "Deprecated.  Returns list of dimensions for this shape.\n\nSuggest `TensorShape.as_list` instead.\n\nReturns:\nA list containing `tf.compat.v1.Dimension`s, or None if the shape is\nunspecified.", "source": "github-repos"}
{"code": "def send_status_message(self, object_id, status):\n    try:\n        body = json.dumps({'id': object_id, 'status': status})\n        self.status_queue.send_message(MessageBody=body, MessageGroupId='job_status', MessageDeduplicationId=get_hash((object_id, status)))\n        return True\n    except Exception as ex:\n        print(ex)\n        return False", "docstring": "Send a message to the `status_queue` to update a job's status.\n\nReturns `True` if the message was sent, else `False`\n\nArgs:\nobject_id (`str`): ID of the job that was executed\nstatus (:obj:`SchedulerStatus`): Status of the job\n\nReturns:\n`bool`", "source": "codesearchnet"}
{"code": "def _CheckPythonModule(self, dependency):\n    \n    module_object = self._ImportPythonModule(dependency.name)\n    if not module_object:\n      status_message = 'missing: {0:s}'.format(dependency.name)\n      return False, status_message\n\n    if not dependency.version_property:\n      return True, dependency.name\n\n    return self._CheckPythonModuleVersion(\n        dependency.name, module_object, dependency.version_property,\n        dependency.minimum_version, dependency.maximum_version)", "docstring": "Checks the availability of a Python module.\n\nArgs:\ndependency (DependencyDefinition): dependency definition.\n\nReturns:\ntuple: consists:\n\nbool: True if the Python module is available and conforms to\nthe minimum required version, False otherwise.\nstr: status message.", "source": "juraj-google-style"}
{"code": "def _ValidateCacheFileMetadataHeader(self, cache_file_metadata_header):\n    \n    \n    return (\n        cache_file_metadata_header.key_size > 0 and\n        cache_file_metadata_header.key_size < self._MAXIMUM_URL_LENGTH and\n        cache_file_metadata_header.format_version == 1 and\n        cache_file_metadata_header.last_fetched_time > 0 and\n        cache_file_metadata_header.fetch_count > 0)", "docstring": "Determines whether the cache file metadata header is valid.\n\nArgs:\ncache_file_metadata_header (firefox_cache2_file_metadata_header): cache\nfile metadata header.\n\nReturns:\nbool: True if the cache file metadata header is valid.", "source": "juraj-google-style"}
{"code": "def replace_model_patterns(text: str, old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns) -> Tuple[str, str]:\n    attributes_to_check = ['config_class']\n    for attr in ['tokenizer_class', 'image_processor_class', 'image_processor_fast_class', 'feature_extractor_class', 'processor_class']:\n        if getattr(old_model_patterns, attr) is not None and getattr(new_model_patterns, attr) is not None:\n            attributes_to_check.append(attr)\n    if old_model_patterns.checkpoint not in [old_model_patterns.model_type, old_model_patterns.model_lower_cased]:\n        attributes_to_check.append('checkpoint')\n    if old_model_patterns.model_type != old_model_patterns.model_lower_cased:\n        attributes_to_check.append('model_type')\n    else:\n        text = re.sub(f'(\\\\s*)model_type = \"{old_model_patterns.model_type}\"', '\\\\1model_type = \"[MODEL_TYPE]\"', text)\n    if old_model_patterns.model_upper_cased == old_model_patterns.model_camel_cased:\n        old_model_value = old_model_patterns.model_upper_cased\n        if re.search(f'{old_model_value}_[A-Z_]*[^A-Z_]', text) is not None:\n            text = re.sub(f'{old_model_value}([A-Z_]*)([^a-zA-Z_])', '[MODEL_UPPER_CASED]\\\\1\\\\2', text)\n    else:\n        attributes_to_check.append('model_upper_cased')\n    attributes_to_check.extend(['model_camel_cased', 'model_lower_cased', 'model_name'])\n    for attr in attributes_to_check:\n        text = text.replace(getattr(old_model_patterns, attr), ATTRIBUTE_TO_PLACEHOLDER[attr])\n    replacements = []\n    for attr, placeholder in ATTRIBUTE_TO_PLACEHOLDER.items():\n        if placeholder in text:\n            replacements.append((getattr(old_model_patterns, attr), getattr(new_model_patterns, attr)))\n            text = text.replace(placeholder, getattr(new_model_patterns, attr))\n    old_replacement_values = [old for old, new in replacements]\n    if len(set(old_replacement_values)) != len(old_replacement_values):\n        return (text, '')\n    replacements = simplify_replacements(replacements)\n    replacements = [f'{old}->{new}' for old, new in replacements]\n    return (text, ','.join(replacements))", "docstring": "Replace all patterns present in a given text.\n\nArgs:\ntext (`str`): The text to treat.\nold_model_patterns (`ModelPatterns`): The patterns for the old model.\nnew_model_patterns (`ModelPatterns`): The patterns for the new model.\n\nReturns:\n`Tuple(str, str)`: A tuple of with the treated text and the replacement actually done in it.", "source": "github-repos"}
{"code": "def inflate_plugin_dict(plugin_dict, inflate_plugin):\n        \n        plugins = []\n        for identifier, definition in plugin_dict.items():\n            try:\n                plugins.append(inflate_plugin(identifier, definition))\n            except PluginNotFoundError as e:\n                logger.error('Could not import plugin identified by %s. '\n                             'Exception: %s.', identifier, e)\n        return plugins", "docstring": "Inflate a list of strings/dictionaries to a list of plugin instances.\n\nArgs:\nplugin_dict (dict): a dict of dict.\ninflate_plugin (method): the method to inflate the plugin.\n\nReturns:\nlist: a plugin instances list.", "source": "juraj-google-style"}
{"code": "def __init__(self, parent=None, **kwargs):\n    \n    if not parent:\n      raise ValueError('Missing parent value.')\n\n    super(VHDIPathSpec, self).__init__(parent=parent, **kwargs)", "docstring": "Initializes a path specification.\n\nNote that the VHDI file path specification must have a parent.\n\nArgs:\nparent (Optional[PathSpec]): parent path specification.\n\nRaises:\nValueError: when parent is not set.", "source": "juraj-google-style"}
{"code": "def total_surface_energy(self):\n    tot_surface_energy = 0\n    for hkl in self.miller_energy_dict.keys():\n        tot_surface_energy += (self.miller_energy_dict[hkl] * self.miller_area_dict[hkl])\n    return tot_surface_energy", "docstring": "Total surface energy of the Wulff shape.\n\nReturns:\n(float) sum(surface_energy_hkl * area_hkl)", "source": "codesearchnet"}
{"code": "def setLCDCmd(self, display_list, password='00000000'):\n    result = False\n    try:\n        self.initLcd()\n        item_cnt = len(display_list)\n        if ((item_cnt > 45) or (item_cnt <= 0)):\n            ekm_log('LCD item list must have between 1 and 40 items')\n            return False\n        for display_item in display_list:\n            self.addLcdItem(int(display_item))\n        result = self.setLCD(password)\n    except:\n        ekm_log(traceback.format_exc(sys.exc_info()))\n    return result", "docstring": "Single call wrapper for LCD set.\"\n\nWraps :func:`~ekmmeters.V4Meter.setLcd` and associated init and add methods.\n\nArgs:\ndisplay_list (list): List composed of :class:`~ekmmeters.LCDItems`\npassword (str): Optional password.\n\nReturns:\nbool: Passthrough from :func:`~ekmmeters.V4Meter.setLcd`", "source": "codesearchnet"}
{"code": "def as_pil_image(self):\n    from PIL import Image\n    try:\n        bio = BytesIO()\n        self._extract_direct(stream=bio)\n        bio.seek(0)\n        return Image.open(bio)\n    except UnsupportedImageTypeError:\n        pass\n    im = self._extract_transcoded()\n    if (not im):\n        raise UnsupportedImageTypeError(repr(self))\n    return im", "docstring": "Extract the image as a Pillow Image, using decompression as necessary\n\nReturns:\nPIL.Image.Image", "source": "codesearchnet"}
{"code": "def delete_adapter(self, adapter_names: Union[List[str], str]) -> None:\n    check_peft_version(min_version=MIN_PEFT_VERSION)\n    if not self._hf_peft_config_loaded:\n        raise ValueError('No adapter loaded. Please load an adapter first.')\n    from peft.tuners.tuners_utils import BaseTunerLayer\n    if isinstance(adapter_names, str):\n        adapter_names = [adapter_names]\n    missing_adapters = [name for name in adapter_names if name not in self.peft_config]\n    if missing_adapters:\n        raise ValueError(f'The following adapter(s) are not present and cannot be deleted: {', '.join(missing_adapters)}')\n    for adapter_name in adapter_names:\n        for module in self.modules():\n            if isinstance(module, BaseTunerLayer):\n                if hasattr(module, 'delete_adapter'):\n                    module.delete_adapter(adapter_name)\n                else:\n                    raise ValueError('The version of PEFT you are using is not compatible, please use a version that is greater than 0.6.1')\n        if getattr(self, '_hf_peft_config_loaded', False) and hasattr(self, 'peft_config'):\n            self.peft_config.pop(adapter_name, None)\n    if len(self.peft_config) == 0:\n        del self.peft_config\n        self._hf_peft_config_loaded = False", "docstring": "Delete an adapter's LoRA layers from the underlying model.\n\nArgs:\nadapter_names (`Union[List[str], str]`):\nThe name(s) of the adapter(s) to delete.\n\nExample:\n\n```py\nfrom diffusers import AutoPipelineForText2Image\nimport torch\n\npipeline = AutoPipelineForText2Image.from_pretrained(\n\"stabilityai/stable-diffusion-xl-base-1.0\", torch_dtype=torch.float16\n).to(\"cuda\")\npipeline.load_lora_weights(\n\"jbilcke-hf/sdxl-cinematic-1\", weight_name=\"pytorch_lora_weights.safetensors\", adapter_names=\"cinematic\"\n)\npipeline.delete_adapters(\"cinematic\")\n```", "source": "github-repos"}
{"code": "def set_default_by_index(self, index):\n    if (index >= len(self._datasets)):\n        raise DataInvalidIndex('A dataset with index {} does not exist'.format(index))\n    self._default_index = index", "docstring": "Set the default dataset by its index.\n\nAfter changing the default dataset, all calls without explicitly specifying the\ndataset by index or alias will be redirected to this dataset.\n\nArgs:\nindex (int): The index of the dataset that should be made the default.\n\nRaises:\nDataInvalidIndex: If the index does not represent a valid dataset.", "source": "codesearchnet"}
{"code": "def overlapping(self, start, stop):\n        \n        for event in self:\n            if ((start <= event.begin <= stop \n            or start <= event.end <= stop) \n            or event.begin <= start and event.end >= stop): \n                yield event", "docstring": "Iterates (in chronological order) over every event that has an intersection\nwith the timespan between `start` and `stop`\n\nArgs:\nstart : (Arrow object)\nstop : (Arrow object)", "source": "juraj-google-style"}
{"code": "def pack_rpc_payload(arg_format, args):\n    \n\n    code = _create_respcode(arg_format, args)\n\n    packed_result = struct.pack(code, *args)\n    unpacked_validation = struct.unpack(code, packed_result)\n    if tuple(args) != unpacked_validation:\n        raise RPCInvalidArgumentsError(\"Passed values would be truncated, please validate the size of your string\",\n                                       code=code, args=args)\n    return packed_result", "docstring": "Pack an RPC payload according to arg_format.\n\nArgs:\narg_format (str): a struct format code (without the <) for the\nparameter format for this RPC.  This format code may include the final\ncharacter V, which means that it expects a variable length bytearray.\nargs (list): A list of arguments to pack according to arg_format.\n\nReturns:\nbytes: The packed argument buffer.", "source": "juraj-google-style"}
{"code": "def remove(text, exclude):\n    \n    exclude = ''.join(str(symbol) for symbol in exclude)\n    return text.translate(str.maketrans('', '', exclude))", "docstring": "Remove ``exclude`` symbols from ``text``.\n\nExample:\n>>> remove(\"example text\", string.whitespace)\n'exampletext'\n\nArgs:\ntext (str): The text to modify\nexclude (iterable): The symbols to exclude\n\nReturns:\n``text`` with ``exclude`` symbols removed", "source": "juraj-google-style"}
{"code": "def decode(self, obj, restype, raw_ptr=False):\n        \n        if raw_ptr:\n            data = obj\n        else:\n            data = cweld.WeldValue(obj).data()\n        result = ctypes.cast(data, ctypes.POINTER(restype.ctype_class)).contents\n\n        if restype == WeldInt16():\n            data = cweld.WeldValue(obj).data()\n            result = ctypes.cast(data, ctypes.POINTER(c_int16)).contents.value\n            return result\n        elif restype == WeldInt():\n            data = cweld.WeldValue(obj).data()\n            result = ctypes.cast(data, ctypes.POINTER(c_int)).contents.value\n            return result\n        elif restype == WeldLong():\n            data = cweld.WeldValue(obj).data()\n            result = ctypes.cast(data, ctypes.POINTER(c_long)).contents.value\n            return result\n        elif restype == WeldFloat():\n            data = cweld.WeldValue(obj).data()\n            result = ctypes.cast(data, ctypes.POINTER(c_float)).contents.value\n            return np.float32(result)\n        elif restype == WeldDouble():\n            data = cweld.WeldValue(obj).data()\n            result = ctypes.cast(data, ctypes.POINTER(c_double)).contents.value\n            return float(result)\n        elif restype == WeldBit():\n            data = cweld.WeldValue(obj).data()\n            result = ctypes.cast(data, ctypes.POINTER(c_bool)).contents.value\n            return bool(result)\n\n        \n        \n        if restype == WeldVec(WeldBit()):\n            weld_to_numpy = self.utils.weld_to_numpy_bool_arr\n        elif restype == WeldVec(WeldInt16()):\n            weld_to_numpy = self.utils.weld_to_numpy_int16_arr\n        elif restype == WeldVec(WeldInt()):\n            weld_to_numpy = self.utils.weld_to_numpy_int_arr\n        elif restype == WeldVec(WeldLong()):\n            weld_to_numpy = self.utils.weld_to_numpy_long_arr\n        elif restype == WeldVec(WeldFloat()):\n            weld_to_numpy = self.utils.weld_to_numpy_float_arr\n        elif restype == WeldVec(WeldDouble()):\n            weld_to_numpy = self.utils.weld_to_numpy_double_arr\n        elif restype == WeldVec(WeldVec(WeldChar())):\n            weld_to_numpy = self.utils.weld_to_numpy_char_arr_arr\n        elif restype == WeldVec(WeldVec(WeldInt16())):\n            weld_to_numpy = self.utils.weld_to_numpy_int16_arr_arr\n        elif restype == WeldVec(WeldVec(WeldInt())):\n            weld_to_numpy = self.utils.weld_to_numpy_int_arr_arr\n        elif restype == WeldVec(WeldVec(WeldLong())):\n            weld_to_numpy = self.utils.weld_to_numpy_long_arr_arr\n        elif restype == WeldVec(WeldVec(WeldFloat())):\n            weld_to_numpy = self.utils.weld_to_numpy_float_arr_arr\n        elif restype == WeldVec(WeldVec(WeldDouble())):\n            weld_to_numpy = self.utils.weld_to_numpy_double_arr_arr\n        elif restype == WeldVec(WeldVec(WeldBit())):\n            weld_to_numpy = self.utils.weld_to_numpy_bool_arr_arr\n        elif isinstance(restype, WeldStruct):\n            ret_vecs = []\n            \n            \n            for field_type in restype.field_types:\n                ret_vec = self.decode(data, field_type, raw_ptr=True)\n                data += sizeof(field_type.ctype_class())\n                ret_vecs.append(ret_vec)\n            return tuple(ret_vecs)\n        else:\n            raise Exception(\"Unable to decode; invalid return type\")\n\n        weld_to_numpy.restype = py_object\n        weld_to_numpy.argtypes = [restype.ctype_class]\n\n        ret_vec = weld_to_numpy(result)\n        return ret_vec", "docstring": "Converts Weld object to Python object.\n\nArgs:\nobj: Result of Weld computation that needs to be decoded\nrestype: Type of Weld computation result\nraw_ptr: Boolean indicating whether obj needs to be extracted\nfrom WeldValue or not\n\nReturns:\nPython object representing result of the Weld computation", "source": "juraj-google-style"}
{"code": "def save_plot(fig, prefile='', postfile='', output_path='./', output_name='Figure', output_format='png', dpi=300, transparent=False, **_):\n    if (not os.path.exists(output_path)):\n        os.makedirs(output_path)\n    output = os.path.join(output_path, ((((prefile + output_name) + postfile) + '.') + output_format))\n    fig.savefig(output, dpi=dpi, transparent=transparent)", "docstring": "Generates a figure file in the selected directory.\n\nArgs:\nfig: matplotlib figure\nprefile(str): Include before the general filename of the figure\npostfile(str): Included after the general filename of the figure\noutput_path(str): Define the path to the output directory\noutput_name(str): String to define the name of the output figure\noutput_format(str): String to define the format of the output figure\ndpi(int): Define the DPI (Dots per Inch) of the figure\ntransparent(bool): If True the saved figure will have a transparent background", "source": "codesearchnet"}
{"code": "def get_autosave_filename(self, filename):\n        \n        try:\n            autosave_filename = self.name_mapping[filename]\n        except KeyError:\n            autosave_dir = get_conf_path('autosave')\n            if not osp.isdir(autosave_dir):\n                try:\n                    os.mkdir(autosave_dir)\n                except EnvironmentError as error:\n                    action = _('Error while creating autosave directory')\n                    msgbox = AutosaveErrorDialog(action, error)\n                    msgbox.exec_if_enabled()\n            autosave_filename = self.create_unique_autosave_filename(\n                    filename, autosave_dir)\n            self.name_mapping[filename] = autosave_filename\n            self.stack.sig_option_changed.emit(\n                    'autosave_mapping', self.name_mapping)\n            logger.debug('New autosave file name')\n        return autosave_filename", "docstring": "Get name of autosave file for specified file name.\n\nThis function uses the dict in `self.name_mapping`. If `filename` is\nin the mapping, then return the corresponding autosave file name.\nOtherwise, construct a unique file name and update the mapping.\n\nArgs:\nfilename (str): original file name", "source": "juraj-google-style"}
{"code": "def __is_noncopyable_single(class_, already_visited_cls_vars=None):\n    \n    \n    \n    logger = utils.loggers.cxx_parser\n\n    if has_copy_constructor(class_) \\\n       and has_public_constructor(class_) \\\n       and has_public_assign(class_) \\\n       and has_public_destructor(class_):\n        msg = os.linesep.join([\n            \"__is_noncopyable_single - %s - COPYABLE:\" % class_.decl_string,\n            \"    trivial copy constructor: yes\",\n            \"    public constructor: yes\",\n            \"    public assign: yes\",\n            \"    public destructor: yes\"])\n        logger.debug(msg)\n        return False\n\n    if already_visited_cls_vars is None:\n        already_visited_cls_vars = []\n\n    if find_noncopyable_vars(class_, already_visited_cls_vars):\n        logger.debug(\n            (\"__is_noncopyable_single(TRUE) - %s - contains noncopyable \" +\n             \"members\"), class_.decl_string)\n        return True\n\n    logger.debug((\n        \"__is_noncopyable_single(FALSE) - %s - COPYABLE, because is \" +\n        \"doesn't contains noncopyable members\"), class_.decl_string)\n    return False", "docstring": "Implementation detail.\n\nChecks if the class is non copyable, without considering the base classes.\n\nArgs:\nclass_ (declarations.class_t): the class to be checked\nalready_visited_cls_vars (list): optional list of vars that should not\nbe checked a second time, to prevent infinite recursions.\n\nReturns:\nbool: if the class is non copyable", "source": "juraj-google-style"}
{"code": "def getUserForHost(self, user, host):\n    for name in iterFqdnUp(host):\n        usercert = ('%s@%s' % (user, name))\n        if self.isUserCert(usercert):\n            return usercert", "docstring": "Gets the name of the first existing user cert for a given user and host.\n\nArgs:\nuser (str): The name of the user.\nhost (str): The name of the host.\n\nExamples:\nGet the name for the \"myuser\" user cert at \"cool.vertex.link\":\n\nusercertname = cdir.getUserForHost('myuser', 'cool.vertex.link')\n\nReturns:\nstr: The cert name, if exists.", "source": "codesearchnet"}
{"code": "def trigger_methods(instance, args):\n    \n    \n    for name in sorted(args):\n        value = args[name]\n        target = instance\n\n        \n        if name.startswith('response_') or name.startswith('reply_'):\n            name = name.replace('response_', '').replace('reply_', '')\n            \n            if hasattr(instance, '_response'):\n                target = instance._response\n\n        \n        member = getattr(target, name, None)\n\n        \n        isattr = name in dir(target)\n        iscallable = ismethod(member) and not isfunction(member)\n\n        if not iscallable and not isattr:\n            raise PookInvalidArgument('Unsupported argument: {}'.format(name))\n\n        \n        if iscallable:\n            member(value)\n        else:\n            setattr(target, name, value)", "docstring": "Triggers specific class methods using a simple reflection\nmechanism based on the given input dictionary params.\n\nArguments:\ninstance (object): target instance to dynamically trigger methods.\nargs (iterable): input arguments to trigger objects to\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def GetCodeObjectAtLine(module, line):\n  \n  if not hasattr(module, '__file__'):\n    return (False, (None, None))\n\n  prev_line = 0\n  next_line = six.MAXSIZE\n\n  for code_object in _GetModuleCodeObjects(module):\n    for co_line_number in _GetLineNumbers(code_object):\n      if co_line_number == line:\n        return (True, code_object)\n      elif co_line_number < line:\n        prev_line = max(prev_line, co_line_number)\n      elif co_line_number > line:\n        next_line = min(next_line, co_line_number)\n        break\n\n  prev_line = None if prev_line == 0 else prev_line\n  next_line = None if next_line == six.MAXSIZE else next_line\n  return (False, (prev_line, next_line))", "docstring": "Searches for a code object at the specified line in the specified module.\n\nArgs:\nmodule: module to explore.\nline: 1-based line number of the statement.\n\nReturns:\n(True, Code object) on success or (False, (prev_line, next_line)) on\nfailure, where prev_line and next_line are the closest lines with code above\nand below the specified line, or None if they do not exist.", "source": "juraj-google-style"}
{"code": "def _send_request(self, xml_request):\n        \n        if self._scheme == 'http':\n            return self._send_http_request(xml_request)\n        else:\n            return self._send_socket_request(xml_request)", "docstring": "Send the prepared XML request block to the CPS using the corect protocol.\n\nArgs:\nxml_request -- A fully formed xml request string for the CPS.\n\nReturns:\nThe raw xml response string.\n\nRaises:\nConnectionError -- Can't establish a connection with the server.", "source": "juraj-google-style"}
{"code": "def add_spin_by_site(self, spins):\n        \n        if len(spins) != len(self.sites):\n            raise ValueError(\"Spin of all sites must be \"\n                             \"specified in the dictionary.\")\n\n        for site, spin in zip(self.sites, spins):\n            new_sp = {}\n            for sp, occu in site.species.items():\n                sym = sp.symbol\n                oxi_state = getattr(sp, \"oxi_state\", None)\n                new_sp[Specie(sym, oxidation_state=oxi_state,\n                              properties={'spin': spin})] = occu\n            site.species = new_sp", "docstring": "Add spin states to a structure by site.\n\nArgs:\nspins (list): List of spins\nE.g., [+5, -5, 0, 0]", "source": "juraj-google-style"}
{"code": "def delete_resource(self, resource, delete=True):\n    if isinstance(resource, str):\n        if (is_valid_uuid(resource) is False):\n            raise HDXError(('%s is not a valid resource id!' % resource))\n    return self._remove_hdxobject(self.resources, resource, delete=delete)", "docstring": "Delete a resource from the dataset and also from HDX by default\n\nArgs:\nresource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary\ndelete (bool): Whetehr to delete the resource from HDX (not just the dataset). Defaults to True.\n\nReturns:\nbool: True if resource removed or False if not", "source": "codesearchnet"}
{"code": "def human_timestamp(__timestamp: datetime.datetime) -> str:\n    \n    numstr = '. a two three four five six seven eight nine ten'.split()\n\n    matches = [\n        60 * 60 * 24 * 365,\n        60 * 60 * 24 * 28,\n        60 * 60 * 24 * 7,\n        60 * 60 * 24,\n        60 * 60,\n        60,\n        1,\n    ]\n    match_names = ['year', 'month', 'week', 'day', 'hour', 'minute', 'second']\n\n    if __timestamp.tzinfo is None:\n        __timestamp = __timestamp.replace(tzinfo=datetime.timezone.utc)\n\n    now = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)\n\n    delta = int((now - __timestamp).total_seconds())\n    for scale in matches:\n        i = delta \n        if i:\n            name = match_names[matches.index(scale)]\n            break\n    else:\n        i = 0  \n\n    if i == 0:\n        result = 'right now'\n    elif i == 1 and name in ('year', 'month', 'week'):\n        result = 'last {}'.format(name)\n    elif i == 1 and name == 'day':\n        result = 'yesterday'\n    elif i == 1 and name == 'hour':\n        result = 'about an hour ago'\n    else:\n        result = 'about {} {}{} ago'.format(i if i > 10 else numstr[i], name,\n                                            's' if i > 1 else '')\n    return result", "docstring": "Format a relative time.\n\nArgs:\n__timestamp: Event to generate relative timestamp against\nReturns:\nHuman readable date and time offset", "source": "juraj-google-style"}
{"code": "def _set_typeahead(cls, el, value):\n        \n        PlaceholderHandler.reset_placeholder_dropdown(el)\n\n        \n        if not value and not el.value:\n            DropdownHandler.set_dropdown_glyph(el.id, \"glyphicon-alert\")\n            return\n\n        \n        \n        \n        if len(value) == 1:\n            source = value[0][\"source\"].strip()\n            dropdown_el = DropdownHandler.set_dropdown_glyph(\n                el.id,\n                \"glyphicon-eye-open\"\n            )\n            dropdown_content = \"<span class='gray_text'>&nbsp;(%s)</span>\"\n\n            \n            if source:\n                dropdown_el.html = dropdown_content % source[::-1]\n\n            el.value = value[0][\"val\"]\n            return\n\n        \n        parent_id = el.parent.id\n        if \"typeahead\" not in parent_id.lower():\n            parent_id = el.parent.parent.id\n\n        if parent_id in cls._set_by_typeahead:\n            window.destroy_typeahead_tag(\"\n\n        \n        \n        window.make_typeahead_tag(\"\n        DropdownHandler.set_dropdown_glyph(el.id, \"glyphicon-menu-down\")\n        PlaceholderHandler.set_placeholder_dropdown(el)\n        cls._set_by_typeahead.add(parent_id)", "docstring": "Convert given `el` to typeahead input and set it to `value`.\n\nThis method also sets the dropdown icons and descriptors.\n\nArgs:\nel (obj): Element reference to the input you want to convert to\ntypeahead.\nvalue (list): List of dicts with two keys: ``source`` and ``val``.", "source": "juraj-google-style"}
{"code": "def _ParseNamesString(self, names_string):\n    \n    if not names_string:\n      return\n\n    names_string = names_string.lower()\n    names = [name.strip() for name in names_string.split(',')]\n    file_entry_filter = file_entry_filters.NamesFileEntryFilter(names)\n    self._filter_collection.AddFilter(file_entry_filter)", "docstring": "Parses the name string.\n\nArgs:\nnames_string (str): comma separated filenames to filter.", "source": "juraj-google-style"}
{"code": "def single_qubit_matrix_to_gates(\n        mat: np.ndarray, tolerance: float = 0\n) -> List[ops.SingleQubitGate]:\n    \n    rotations = single_qubit_matrix_to_pauli_rotations(mat, tolerance)\n    return [cast(ops.SingleQubitGate, pauli)**ht for pauli, ht in rotations]", "docstring": "Implements a single-qubit operation with few gates.\n\nArgs:\nmat: The 2x2 unitary matrix of the operation to implement.\ntolerance: A limit on the amount of error introduced by the\nconstruction.\n\nReturns:\nA list of gates that, when applied in order, perform the desired\noperation.", "source": "juraj-google-style"}
{"code": "def get_upper_triangle(correlation_matrix):\n    \n    upper_triangle = correlation_matrix.where(np.triu(np.ones(correlation_matrix.shape), k=1).astype(np.bool))\n\n    \n    upper_tri_df = upper_triangle.stack().reset_index(level=1)\n    upper_tri_df.columns = ['rid', 'corr']\n\n    \n    upper_tri_df.reset_index(level=0, inplace=True)\n\n    \n    upper_tri_df['corr'] = upper_tri_df['corr'].clip(lower=0)\n\n    return upper_tri_df.round(rounding_precision)", "docstring": "Extract upper triangle from a square matrix. Negative values are\nset to 0.\n\nArgs:\ncorrelation_matrix (pandas df): Correlations between all replicates\n\nReturns:\nupper_tri_df (pandas df): Upper triangle extracted from\ncorrelation_matrix; rid is the row index, cid is the column index,\ncorr is the extracted correlation value", "source": "juraj-google-style"}
{"code": "def flow2rgb(flow, color_wheel=None, unknown_thr=1e6):\n    \n    assert flow.ndim == 3 and flow.shape[-1] == 2\n    if color_wheel is None:\n        color_wheel = make_color_wheel()\n    assert color_wheel.ndim == 2 and color_wheel.shape[1] == 3\n    num_bins = color_wheel.shape[0]\n\n    dx = flow[:, :, 0].copy()\n    dy = flow[:, :, 1].copy()\n\n    ignore_inds = (np.isnan(dx) | np.isnan(dy) | (np.abs(dx) > unknown_thr) |\n                   (np.abs(dy) > unknown_thr))\n    dx[ignore_inds] = 0\n    dy[ignore_inds] = 0\n\n    rad = np.sqrt(dx**2 + dy**2)\n    if np.any(rad > np.finfo(float).eps):\n        max_rad = np.max(rad)\n        dx /= max_rad\n        dy /= max_rad\n\n    [h, w] = dx.shape\n\n    rad = np.sqrt(dx**2 + dy**2)\n    angle = np.arctan2(-dy, -dx) / np.pi\n\n    bin_real = (angle + 1) / 2 * (num_bins - 1)\n    bin_left = np.floor(bin_real).astype(int)\n    bin_right = (bin_left + 1) % num_bins\n    w = (bin_real - bin_left.astype(np.float32))[..., None]\n    flow_img = (\n        1 - w) * color_wheel[bin_left, :] + w * color_wheel[bin_right, :]\n    small_ind = rad <= 1\n    flow_img[small_ind] = 1 - rad[small_ind, None] * (1 - flow_img[small_ind])\n    flow_img[np.logical_not(small_ind)] *= 0.75\n\n    flow_img[ignore_inds, :] = 0\n\n    return flow_img", "docstring": "Convert flow map to RGB image.\n\nArgs:\nflow (ndarray): Array of optical flow.\ncolor_wheel (ndarray or None): Color wheel used to map flow field to\nRGB colorspace. Default color wheel will be used if not specified.\nunknown_thr (str): Values above this threshold will be marked as\nunknown and thus ignored.\n\nReturns:\nndarray: RGB image that can be visualized.", "source": "juraj-google-style"}
{"code": "def __init__(self, fraction_of_second=None, timestamp=None):\n    \n    \n    \n    if pytsk3.TSK_VERSION_NUM >= 0x040200ff:\n      precision = dfdatetime_definitions.PRECISION_1_NANOSECOND\n    else:\n      precision = dfdatetime_definitions.PRECISION_100_NANOSECONDS\n\n    super(TSKTime, self).__init__()\n    self._precision = precision\n    self._timestamp = timestamp\n    self.fraction_of_second = fraction_of_second", "docstring": "Initializes a SleuthKit timestamp.\n\nArgs:\nfraction_of_second (Optional[int]): fraction of second, which is\nan integer that contains the number 100 nano seconds before\nSleuthkit 4.2.0 or number of nano seconds in Sleuthkit 4.2.0\nand later.\ntimestamp (Optional[int]): POSIX timestamp.", "source": "juraj-google-style"}
{"code": "def url_is(white_list):\n    \n    def func(url):\n        prefixes = white_list.get('PREFIXES', ())\n        for prefix in prefixes:\n            if url.startswith(prefix):\n                return True\n        constants = white_list.get('CONSTANTS', ())\n        for exact_url in constants:\n            if url == exact_url:\n                return True\n        return False\n    return func", "docstring": "Function generator.\n\nArgs:\nwhite_list (dict): dict with PREFIXES and CONSTANTS keys (list values).\n\nReturns:\nfunc: a function to check if a URL is...", "source": "juraj-google-style"}
{"code": "def count(self, event):\n    return (len(self._listeners[event]) + len(self._once[event]))", "docstring": "Get the number of listeners for the event.\n\nArgs:\nevent (str): The event for which to count all listeners.\n\nThe resulting count is a combination of listeners added using\n'on'/'add_listener' and 'once'.", "source": "codesearchnet"}
{"code": "def get(self, tx_id):\n    pool = current_app.config['bigchain_pool']\n    with pool() as bigchain:\n        tx = bigchain.get_transaction(tx_id)\n    if (not tx):\n        return make_error(404)\n    return tx.to_dict()", "docstring": "API endpoint to get details about a transaction.\n\nArgs:\ntx_id (str): the id of the transaction.\n\nReturn:\nA JSON string containing the data about the transaction.", "source": "codesearchnet"}
{"code": "def _build(self, input_batch, is_training, test_local_stats=False):\n    input_shape = input_batch.get_shape()\n    if (not self._data_format):\n        if (len(input_shape) == 2):\n            self._data_format = 'NC'\n        elif (len(input_shape) == 3):\n            self._data_format = 'NWC'\n        elif (len(input_shape) == 4):\n            self._data_format = 'NHWC'\n        elif (len(input_shape) == 5):\n            self._data_format = 'NDHWC'\n        else:\n            raise base.IncompatibleShapeError('Input shape {} has too many or too few dimensions.'.format(input_shape))\n    self._channel_index = self._data_format.index('C')\n    self._axis = list(range(len(self._data_format)))\n    del self._axis[self._channel_index]\n    if (len(self._data_format) != len(input_shape)):\n        raise base.IncompatibleShapeError('Incorrect data format {} for input shape {}.'.format(self._data_format, input_shape))\n    dtype = input_batch.dtype.base_dtype\n    if (self._fused and (dtype == tf.bfloat16)):\n        raise base.NotSupportedError('Fused batch norm does not support tf.bfloat16.')\n    stat_dtype = (tf.float32 if (dtype in [tf.float16, tf.bfloat16]) else dtype)\n    self._num_channels = int(input_shape[self._channel_index])\n    if (self._channel_index == 1):\n        self._image_shape = [int(x) for x in input_shape[2:]]\n    else:\n        self._image_shape = [int(x) for x in input_shape[1:(- 1)]]\n    self._expanded_mean_shape = ([1] * len(input_shape))\n    self._expanded_mean_shape[self._channel_index] = self._num_channels\n    use_batch_stats = (is_training | test_local_stats)\n    (mean, variance) = self._build_statistics(input_batch, use_batch_stats, stat_dtype)\n    self._build_scale_offset(dtype)\n    (out, mean, variance) = self._batch_norm_op(input_batch, mean, variance, use_batch_stats, stat_dtype)\n    update_ops = self._build_update_ops(mean, variance, is_training)\n    if update_ops:\n        if self._update_ops_collection:\n            for update_op in update_ops:\n                tf.add_to_collection(self._update_ops_collection, update_op)\n        else:\n            with tf.control_dependencies(update_ops):\n                out = tf.identity(out)\n    return out", "docstring": "Connects the BatchNormV2 module into the graph.\n\nArgs:\ninput_batch: A Tensor of the same dimension as `len(data_format)`.\nis_training: A boolean to indicate if the module should be connected in\ntraining mode, meaning the moving averages are updated. Can be a Tensor.\ntest_local_stats: A boolean to indicate if local batch statistics should\nbe used when `is_training=False`. If not, moving averages are used.\nBy default `False`. Can be a Tensor.\n\nReturns:\nA tensor with the same shape as `input_batch`.\n\nRaises:\nbase.IncompatibleShapeError: If `data_format` is not valid for the\ninput shape.\nbase.NotSupportedError: If `input_batch` has data type of `tf.bfloat16`.", "source": "codesearchnet"}
{"code": "def _ValidateFSM(self):\n    if ('Start' not in self.states):\n        raise TextFSMTemplateError(\"Missing state 'Start'.\")\n    if self.states.get('End'):\n        raise TextFSMTemplateError(\"Non-Empty 'End' state.\")\n    if self.states.get('EOF'):\n        raise TextFSMTemplateError(\"Non-Empty 'EOF' state.\")\n    if ('End' in self.states):\n        del self.states['End']\n        self.state_list.remove('End')\n    for state in self.states:\n        for rule in self.states[state]:\n            if (rule.line_op == 'Error'):\n                continue\n            if ((not rule.new_state) or (rule.new_state in ('End', 'EOF'))):\n                continue\n            if (rule.new_state not in self.states):\n                raise TextFSMTemplateError((\"State '%s' not found, referenced in state '%s'\" % (rule.new_state, state)))\n    return True", "docstring": "Checks state names and destinations for validity.\n\nEach destination state must exist, be a valid name and\nnot be a reserved name.\nThere must be a 'Start' state and if 'EOF' or 'End' states are specified,\nthey must be empty.\n\nReturns:\nTrue if FSM is valid.\n\nRaises:\nTextFSMTemplateError: If any state definitions are invalid.", "source": "codesearchnet"}
{"code": "def PackTag(field_number, wire_type):\n  \n  if not 0 <= wire_type <= _WIRETYPE_MAX:\n    raise message.EncodeError('Unknown wire type: %d' % wire_type)\n  return (field_number << TAG_TYPE_BITS) | wire_type", "docstring": "Returns an unsigned 32-bit integer that encodes the field number and\nwire type information in standard protocol message wire format.\n\nArgs:\nfield_number: Expected to be an integer in the range [1, 1 << 29)\nwire_type: One of the WIRETYPE_* constants.", "source": "juraj-google-style"}
{"code": "def assignees(self, assignee=None, resource_id=None):\n        \n        if resource_id is not None:\n            self.resource_id(resource_id)\n        self._request_uri = '{}/assignees'.format(self._request_uri)\n        if assignee is not None:\n            self._request_uri = '{}/{}'.format(self._request_uri, assignee)", "docstring": "Add an assignee to a Task\n\nGET: /v2/tasks/{uniqueId}/assignees\nGET: /v2/tasks/{uniqueId}/assignees/{assigneeId}\nPOST: /v2/tasks/{uniqueId}/assignees/{assigneeId}\nDELETE: /v2/tasks/{uniqueId}/assignees/{assigneeId}\n\nArgs:\nassignee (Optional [string]): The assignee name.\nresource_id (Optional [string]): The task ID.", "source": "juraj-google-style"}
{"code": "def ContainsAny(self, *values):\n    self._awql = self._CreateMultipleValuesCondition(values, 'CONTAINS_ANY')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"contains any\".\n\nArgs:\n*values: The values to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "codesearchnet"}
{"code": "def _ReadAttributeValueInteger(self, attribute_values_data, record_offset, attribute_values_data_offset, attribute_value_offset):\n    if (attribute_value_offset == 0):\n        return None\n    data_type_map = self._GetDataTypeMap('uint32be')\n    file_offset = ((record_offset + attribute_values_data_offset) + attribute_value_offset)\n    attribute_value_offset -= (attribute_values_data_offset + 1)\n    attribute_value_data = attribute_values_data[attribute_value_offset:]\n    try:\n        return self._ReadStructureFromByteStream(attribute_value_data, file_offset, data_type_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to map integer attribute value data at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))", "docstring": "Reads an integer attribute value.\n\nArgs:\nattribute_values_data (bytes): attribute values data.\nrecord_offset (int): offset of the record relative to the start of\nthe file.\nattribute_values_data_offset (int): offset of the attribute values data\nrelative to the start of the record.\nattribute_value_offset (int): offset of the attribute relative to\nthe start of the record.\n\nReturns:\nint: integer value or None if attribute value offset is not set.\n\nRaises:\nParseError: if the attribute value cannot be read.", "source": "codesearchnet"}
{"code": "def assertOutputStateMatches(self, **has_output):\n    output_types = {'stdout', 'stderr', 'returncode'}\n    assert len(output_types) == len(has_output)\n    for output_type in output_types:\n        output_value = getattr(self, output_type)\n        if has_output[output_type]:\n            self.assertTrue(output_value, output_type + ' unexpectedly empty')\n        else:\n            value = str(output_value)\n            if len(value) > 50:\n                value = value[:47] + '...'\n            self.assertFalse(output_value, f'Unexpected output to {output_type}: {value!r}')", "docstring": "Check that the output state matches expectations.\n\nIf, for example, you expect the program to print something to stdout and\nnothing to stderr before exiting with an error code, you would write\nassertOutputStateMatches(stdout=True, stderr=False, returncode=True).\n\nArgs:\n**has_output: Whether each output type should have output.", "source": "github-repos"}
{"code": "def assertDictEqual(self, a, b, msg=None):\n    try:\n        super().assertDictEqual(a, b, msg)\n    except Exception:\n        self.assertSameElements(a.keys(), b.keys())\n        for k, v in a.items():\n            a_k, b_k = self.evaluate_if_both_tensors(v, b[k])\n            a_k = self._GetNdArray(a_k)\n            b_k = self._GetNdArray(b_k)\n            if np.issubdtype(a_k.dtype, np.floating):\n                self.assertAllClose(v, b[k], msg=k)\n            else:\n                self.assertAllEqual(v, b[k], msg=k)", "docstring": "Assert that two given dictionary of tensors are the same.\n\nArgs:\na: Expected dictionary with numpy ndarray or anything else that can be\nconverted to one as values.\nb: Actual dictionary with numpy ndarray or anything else that can be\nconverted to one as values.\nmsg: Optional message to report on failure.", "source": "github-repos"}
{"code": "def _DownloadAuthUrl(self, url, dest_dir):\n    dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False)\n    dest_file.close()\n    dest = dest_file.name\n    self.logger.info('Downloading url from %s to %s using authentication token.', url, dest)\n    if (not self.token):\n        response = self.watcher.GetMetadata(self.token_metadata_key, recursive=False, retry=False)\n        if (not response):\n            self.logger.info('Authentication token not found. Attempting unauthenticated download.')\n            return self._DownloadUrl(url, dest_dir)\n        self.token = ('%s %s' % (response.get('token_type', ''), response.get('access_token', '')))\n    try:\n        request = urlrequest.Request(url)\n        request.add_unredirected_header('Metadata-Flavor', 'Google')\n        request.add_unredirected_header('Authorization', self.token)\n        content = urlrequest.urlopen(request).read().decode('utf-8')\n    except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:\n        self.logger.warning('Could not download %s. %s.', url, str(e))\n        return None\n    with open(dest, 'wb') as f:\n        f.write(content)\n    return dest", "docstring": "Download a Google Storage URL using an authentication token.\n\nIf the token cannot be fetched, fallback to unauthenticated download.\n\nArgs:\nurl: string, the URL to download.\ndest_dir: string, the path to a directory for storing metadata scripts.\n\nReturns:\nstring, the path to the file storing the metadata script.", "source": "codesearchnet"}
{"code": "def store_unspent_outputs(self, *unspent_outputs):\n        \n        if unspent_outputs:\n            return backend.query.store_unspent_outputs(\n                                            self.connection, *unspent_outputs)", "docstring": "Store the given ``unspent_outputs`` (utxos).\n\nArgs:\n*unspent_outputs (:obj:`tuple` of :obj:`dict`): Variable\nlength tuple or list of unspent outputs.", "source": "juraj-google-style"}
{"code": "def orbit(self, orbit):\n        \n\n        self._orbit = orbit\n        tle = Tle.from_orbit(orbit)\n        lines = tle.text.splitlines()\n\n        if len(lines) == 3:\n            _, line1, line2 = lines\n        else:\n            line1, line2 = lines\n\n        self.tle = twoline2rv(line1, line2, wgs72)", "docstring": "Initialize the propagator\n\nArgs:\norbit (Orbit)", "source": "juraj-google-style"}
{"code": "def prelu(inp, base_axis=1, shared=True, fix_parameters=False):\n    shape = (tuple() if shared else (inp.shape[base_axis],))\n    w = get_parameter_or_create('slope', shape, ConstantInitializer((- 1)), True, (not fix_parameters))\n    return F.prelu(inp, w, base_axis)", "docstring": "Parametrized Rectified Linear Unit function defined as\n\n.. math::\ny_i = \\max(0, x_i) + w_i \\min(0, -x_i)\n\nwhere negative slope :math:`w` is learned and can vary across channels (an\naxis specified with base_axis). Weights are initialized with :math:`-1`.\n\nArgs:\nx(~nnabla.Variable): N-D array as input\nbase_axis(int): Dimensions up to base_axis is treated as sample dimension.\nshared(bool): Use shared weight value or not\nfix_parameters (bool): When set to `True`, the negative slope values\nwill not be updated.\n\nReturns:\n~nnabla.Variable: N-D array.", "source": "codesearchnet"}
{"code": "def get(self, profile_id):\n    if (profile_id not in self._profiles):\n        try:\n            self._profiles[profile_id] = self._get_profile(profile_id)\n        except (ValueError, IOError) as e:\n            six.raise_from(RegistryError(e), e)\n    return self._profiles[profile_id]", "docstring": "Returns the profile with the received ID as a dict\n\nIf a local copy of the profile exists, it'll be returned. If not, it'll\nbe downloaded from the web. The results are cached, so any subsequent\ncalls won't hit the filesystem or the web.\n\nArgs:\nprofile_id (str): The ID of the profile you want.\n\nRaises:\nRegistryError: If there was some problem opening the profile file\nor its format was incorrect.", "source": "codesearchnet"}
{"code": "def add_bonds(self, neighbors, center, color=None, opacity=None,\n                  radius=0.1):\n        \n        points = vtk.vtkPoints()\n        points.InsertPoint(0, center.x, center.y, center.z)\n        n = len(neighbors)\n        lines = vtk.vtkCellArray()\n        for i in range(n):\n            points.InsertPoint(i + 1, neighbors[i].coords)\n            lines.InsertNextCell(2)\n            lines.InsertCellPoint(0)\n            lines.InsertCellPoint(i + 1)\n        pd = vtk.vtkPolyData()\n        pd.SetPoints(points)\n        pd.SetLines(lines)\n\n        tube = vtk.vtkTubeFilter()\n        if vtk.VTK_MAJOR_VERSION <= 5:\n            tube.SetInputConnection(pd.GetProducerPort())\n        else:\n            tube.SetInputData(pd)\n        tube.SetRadius(radius)\n\n        mapper = vtk.vtkPolyDataMapper()\n        mapper.SetInputConnection(tube.GetOutputPort())\n\n        actor = vtk.vtkActor()\n        actor.SetMapper(mapper)\n        if opacity is not None:\n            actor.GetProperty().SetOpacity(opacity)\n        if color is not None:\n            actor.GetProperty().SetColor(color)\n        self.ren.AddActor(actor)", "docstring": "Adds bonds for a site.\n\nArgs:\nneighbors: Neighbors of the site.\ncenter: The site in the center for all bonds.\ncolor: Color of the tubes representing the bonds\nopacity: Opacity of the tubes representing the bonds\nradius: Radius of tube s representing the bonds", "source": "juraj-google-style"}
{"code": "def notify(self, method, params=None):\n        \n        log.debug('Sending notification: %s %s', method, params)\n\n        message = {\n            'jsonrpc': JSONRPC_VERSION,\n            'method': method,\n        }\n        if params is not None:\n            message['params'] = params\n\n        self._consumer(message)", "docstring": "Send a JSON RPC notification to the client.\n\nArgs:\nmethod (str): The method name of the notification to send\nparams (any): The payload of the notification", "source": "juraj-google-style"}
{"code": "def read_single_knmi_file(filename):\n    hourly_data_obs_raw = pd.read_csv(filename, parse_dates=[['YYYYMMDD', 'HH']], date_parser=(lambda yyyymmdd, hh: pd.datetime(int(str(yyyymmdd)[0:4]), int(str(yyyymmdd)[4:6]), int(str(yyyymmdd)[6:8]), (int(hh) - 1))), skiprows=31, skipinitialspace=True, na_values='', keep_date_col=True)\n    hourly_data_obs_raw.index = hourly_data_obs_raw['YYYYMMDD_HH']\n    hourly_data_obs_raw.index = (hourly_data_obs_raw.index + pd.Timedelta(hours=1))\n    columns_hourly = ['temp', 'precip', 'glob', 'hum', 'wind', 'ssd']\n    hourly_data_obs = pd.DataFrame(index=hourly_data_obs_raw.index, columns=columns_hourly, data=dict(temp=((hourly_data_obs_raw['T'] / 10) + 273.15), precip=(hourly_data_obs_raw['RH'] / 10), glob=((hourly_data_obs_raw['Q'] * 10000) / 3600.0), hum=hourly_data_obs_raw['U'], wind=(hourly_data_obs_raw['FH'] / 10), ssd=(hourly_data_obs_raw['SQ'] * 6)))\n    negative_values = (hourly_data_obs['precip'] < 0.0)\n    hourly_data_obs.loc[(negative_values, 'precip')] = 0.0\n    return hourly_data_obs", "docstring": "reads a single file of KNMI's meteorological time series\n\ndata availability: www.knmi.nl/nederland-nu/klimatologie/uurgegevens\n\nArgs:\nfilename: the file to be opened\n\nReturns:\npandas data frame including time series", "source": "codesearchnet"}
{"code": "def SetServerInformation(self, server, port):\n    self._host = server\n    self._port = port\n    logger.debug('Elasticsearch server: {0!s} port: {1:d}'.format(server, port))", "docstring": "Set the server information.\n\nArgs:\nserver (str): IP address or hostname of the server.\nport (int): Port number of the server.", "source": "codesearchnet"}
{"code": "def _add_genotypes(self, variant_obj, gemini_variant, case_id,\n                       individual_objs):\n        \n        for ind in individual_objs:\n            index = ind.ind_index\n            variant_obj.add_individual(Genotype(\n                sample_id=ind.ind_id,\n                genotype=gemini_variant['gts'][index],\n                case_id=case_id,\n                phenotype=ind.phenotype,\n                ref_depth=gemini_variant['gt_ref_depths'][index],\n                alt_depth=gemini_variant['gt_alt_depths'][index],\n                depth=gemini_variant['gt_depths'][index],\n                genotype_quality=gemini_variant['gt_quals'][index]\n            ))", "docstring": "Add the genotypes for a variant for all individuals\n\nArgs:\nvariant_obj (puzzle.models.Variant)\ngemini_variant (GeminiQueryRow): The gemini variant\ncase_id (str): related case id\nindividual_objs (list(dict)): A list of Individuals", "source": "juraj-google-style"}
{"code": "def move_file(src, dest):\n    try:\n        os.replace(src, dest)\n    except Exception as ex_replace:\n        logger.error(f'error moving file {src} to {dest}. {ex_replace}')\n        raise", "docstring": "Move source file to destination.\n\nOverwrites dest.\n\nArgs:\nsrc: str or path-like. source file\ndest: str or path-like. destination file\n\nReturns:\nNone.\n\nRaises:\nFileNotFoundError: out path parent doesn't exist.\nOSError: if any IO operations go wrong.", "source": "codesearchnet"}
{"code": "def get_pair(self, term1, term2):\n\n        \n\n        key = self.key(term1, term2)\n        return self.pairs.get(key, None)", "docstring": "Get the value for a pair of terms.\n\nArgs:\nterm1 (str)\nterm2 (str)\n\nReturns:\nThe stored value.", "source": "juraj-google-style"}
{"code": "def create_initial(self, address_values):\n    with self._lock:\n        for (add, val) in address_values:\n            self._state[add] = _ContextFuture(address=add, result=val)", "docstring": "Create futures from inputs with the current value for that address\nat the start of that context.\n\nArgs:\naddress_values (list of tuple): The tuple is string, bytes of the\naddress and value.", "source": "codesearchnet"}
{"code": "def CopyToIsoFormat(cls, timestamp, timezone=pytz.UTC, raise_error=False):\n    datetime_object = cls.CopyToDatetime(timestamp, timezone, raise_error=raise_error)\n    return datetime_object.isoformat()", "docstring": "Copies the timestamp to an ISO 8601 formatted string.\n\nArgs:\ntimestamp: The timestamp which is an integer containing the number\nof micro seconds since January 1, 1970, 00:00:00 UTC.\ntimezone: Optional timezone (instance of pytz.timezone).\nraise_error: Boolean that if set to True will not absorb an OverflowError\nif the timestamp is out of bounds. By default there will be\nno error raised.\n\nReturns:\nA string containing an ISO 8601 formatted date and time.", "source": "codesearchnet"}
{"code": "def download_from_s3(context):\n    \n    target_file = context.solid_config['target_file']\n    return context.resources.download_manager.download_file_contents(context, target_file)", "docstring": "Download an object from s3.\n\nArgs:\ninfo (ExpectationExecutionInfo): Must expose a boto3 S3 client as its `s3` resource.\n\nReturns:\nstr:\nThe path to the downloaded object.", "source": "juraj-google-style"}
{"code": "def load(cls, pkid_or_path=None):\n    path = pkid_or_path\n    if isinstance(path, (int, np.int32, np.int64)):\n        raise NotImplementedError('Lookup via CMS not implemented.')\n    elif (not os.path.isfile(path)):\n        raise FileNotFoundError('File {} not found.'.format(path))\n    kwargs = {}\n    fields = defaultdict(dict)\n    with pd.HDFStore(path) as store:\n        for key in store.keys():\n            if ('kwargs' in key):\n                kwargs.update(store.get_storer(key).attrs.metadata)\n            elif ('FIELD' in key):\n                (name, dname) = '_'.join(key.split('_')[1:]).split('/')\n                dname = dname.replace('values', '')\n                fields[name][dname] = store[key]\n            else:\n                name = str(key[1:])\n                kwargs[name] = store[key]\n    for (name, field_data) in fields.items():\n        fps = field_data.pop('data')\n        kwargs[name] = Field(fps, field_values=[field_data[str(arr)] for arr in sorted(map(int, field_data.keys()))])\n    return cls(**kwargs)", "docstring": "Load a container object from a persistent location or file path.\n\nArgs:\npkid_or_path: Integer pkid corresponding to the container table or file path\n\nReturns:\ncontainer: The saved container object", "source": "codesearchnet"}
{"code": "def GetMetadata(fn) -> Dict[str, Any]:\n    default = {ACCEPTS_POSITIONAL_ARGS: inspect.isroutine(fn)}\n    try:\n        metadata = getattr(fn, FIRE_METADATA, default)\n        if ACCEPTS_POSITIONAL_ARGS in metadata:\n            return metadata\n        else:\n            return default\n    except:\n        return default", "docstring": "Gets metadata attached to the function `fn` as an attribute.\n\nArgs:\nfn: The function from which to retrieve the function metadata.\nReturns:\nA dictionary mapping property strings to their value.", "source": "github-repos"}
{"code": "def process_tree_files(tree):\n    \n    \n    config.LOGGER.info(\"Processing content...\")\n    files_to_diff = tree.process_tree(tree.channel)\n    config.SUSHI_BAR_CLIENT.report_statistics(files_to_diff, topic_count=tree.channel.get_topic_count())\n    tree.check_for_files_failed()\n    return files_to_diff, config.FAILED_FILES", "docstring": "process_tree_files: Download files from nodes\nArgs:\ntree (ChannelManager): manager to handle communication to Kolibri Studio\nReturns: None", "source": "juraj-google-style"}
{"code": "def ReadClientPostingLists(self, keywords):\n    \n\n    start_time, filtered_keywords = self._AnalyzeKeywords(keywords)\n\n    return data_store.REL_DB.ListClientsForKeywords(\n        filtered_keywords, start_time=start_time)", "docstring": "Looks up all clients associated with any of the given keywords.\n\nArgs:\nkeywords: A list of keywords we are interested in.\n\nReturns:\nA dict mapping each keyword to a list of matching clients.", "source": "juraj-google-style"}
{"code": "def _run_test_class(self, config, test_class, tests=None):\n    test_instance = test_class(config)\n    logging.debug('Executing test class \"%s\" with config: %s', test_class.__name__, config)\n    try:\n        cls_result = test_instance.run(tests)\n        self.results += cls_result\n    except signals.TestAbortAll as e:\n        self.results += e.results\n        raise e", "docstring": "Instantiates and executes a test class.\n\nIf tests is None, the tests listed in self.tests will be executed\ninstead. If self.tests is empty as well, every test in this test class\nwill be executed.\n\nArgs:\nconfig: A config_parser.TestRunConfig object.\ntest_class: class, test class to execute.\ntests: Optional list of test names within the class to execute.", "source": "github-repos"}
{"code": "def update_info(self, custom=None):\n    self.figure.suptitle((self.info_string() if (custom is None) else custom))", "docstring": "Updates the figure's suptitle.\n\nCalls self.info_string() unless custom is provided.\n\nArgs:\ncustom: Overwrite it with this string, unless None.", "source": "codesearchnet"}
{"code": "def getcallargs(*func_and_positional, **named):\n    func = func_and_positional[0]\n    positional = func_and_positional[1:]\n    argspec = getfullargspec(func)\n    call_args = named.copy()\n    this = getattr(func, 'im_self', None) or getattr(func, '__self__', None)\n    if ismethod(func) and this:\n        positional = (this,) + positional\n    remaining_positionals = [arg for arg in argspec.args if arg not in call_args]\n    call_args.update(dict(zip(remaining_positionals, positional)))\n    default_count = 0 if not argspec.defaults else len(argspec.defaults)\n    if default_count:\n        for arg, value in zip(argspec.args[-default_count:], argspec.defaults):\n            if arg not in call_args:\n                call_args[arg] = value\n    if argspec.kwonlydefaults is not None:\n        for k, v in argspec.kwonlydefaults.items():\n            if k not in call_args:\n                call_args[k] = v\n    return call_args", "docstring": "TFDecorator-aware replacement for inspect.getcallargs.\n\nArgs:\n*func_and_positional: A callable, possibly decorated, followed by any\npositional arguments that would be passed to `func`.\n**named: The named argument dictionary that would be passed to `func`.\n\nReturns:\nA dictionary mapping `func`'s named arguments to the values they would\nreceive if `func(*positional, **named)` were called.\n\n`getcallargs` will use the argspec from the outermost decorator that provides\nit. If no attached decorators modify argspec, the final unwrapped target's\nargspec will be used.", "source": "github-repos"}
{"code": "def parse_node_or_tensor_name(name):\n    if ':' in name and (not name.endswith(':')):\n        node_name = name[:name.rfind(':')]\n        output_slot = int(name[name.rfind(':') + 1:])\n        return (node_name, output_slot)\n    else:\n        return (name, None)", "docstring": "Get the node name from a string that can be node or tensor name.\n\nArgs:\nname: An input node name (e.g., \"node_a\") or tensor name (e.g.,\n\"node_a:0\"), as a str.\n\nReturns:\n1) The node name, as a str. If the input name is a tensor name, i.e.,\nconsists of a colon, the final colon and the following output slot\nwill be stripped.\n2) If the input name is a tensor name, the output slot, as an int. If\nthe input name is not a tensor name, None.", "source": "github-repos"}
{"code": "def check_unused(intersection, duplicates, intersections):\n    for other in intersections:\n        if ((other.interior_curve == UNUSED_T) and (intersection.index_first == other.index_first) and (intersection.index_second == other.index_second)):\n            if ((intersection.s == 0.0) and (other.s == 0.0)):\n                duplicates.append(intersection)\n                return True\n            if ((intersection.t == 0.0) and (other.t == 0.0)):\n                duplicates.append(intersection)\n                return True\n    return False", "docstring": "Check if a \"valid\" ``intersection`` is already in ``intersections``.\n\nThis assumes that\n\n* ``intersection`` will have at least one of ``s == 0.0`` or ``t == 0.0``\n* At least one of the intersections in ``intersections`` is classified as\n``COINCIDENT_UNUSED``.\n\nArgs:\nintersection (.Intersection): An intersection to be added.\nduplicates (List[.Intersection]): List of duplicate intersections.\nintersections (List[.Intersection]): List of \"accepted\" (i.e.\nnon-duplicate) intersections.\n\nReturns:\nbool: Indicates if the ``intersection`` is a duplicate.", "source": "codesearchnet"}
{"code": "def match_regex(self, regex: Pattern, required: bool=False, meaning: str='') -> str:\n    mo = regex.match(self.input, self.offset)\n    if mo:\n        self.offset = mo.end()\n        return mo.group()\n    if required:\n        raise UnexpectedInput(self, meaning)", "docstring": "Parse input based on a regular expression .\n\nArgs:\nregex: Compiled regular expression object.\nrequired: Should the exception be raised on unexpected input?\nmeaning: Meaning of `regex` (for use in error messages).\n\nRaises:\nUnexpectedInput: If no syntactically correct keyword is found.", "source": "codesearchnet"}
{"code": "def load_sample(self, file_path, tags=None):\n        \n\n        \n        if not tags:\n            print '\\n%sRequired: Add a list of tags when you load samples (put \\'unknown\\' if you must). \\\n                   \\n\\t%sExamples: [\\'bad\\'], [\\'good\\'], [\\'bad\\',\\'aptz13\\']%s' % (color.Yellow, color.Green, color.Normal)\n            return\n\n        \n        if os.path.isdir(file_path):\n            file_list = self._all_files_in_directory(file_path)\n        else:\n            file_list = [file_path]\n\n        \n        md5_list = []\n        for path in file_list:\n            with open(path, 'rb') as my_file:\n                raw_bytes = my_file.read()\n                md5 = hashlib.md5(raw_bytes).hexdigest()\n                if not self.workbench.has_sample(md5):\n                    print '%sStreaming Sample...%s' % (color.LightPurple, color.Normal)\n                    basename = os.path.basename(path)\n                    md5 = self.streamer.stream_to_workbench(raw_bytes, basename, 'unknown', tags)\n\n                print '\\n%s  %s%s %sLocked and Loaded...%s\\n' % \\\n                      (self.beer, color.LightPurple, md5[:6], color.Yellow, color.Normal)\n\n                \n                self.workbench.add_tags(md5, tags)\n                md5_list.append(md5)\n\n        \n        set_md5 = self.workbench.store_sample_set(md5_list)\n        self.pivot(set_md5, '_'.join(tags))\n\n        \n        self.tags()", "docstring": "Load a sample (or samples) into workbench\nArgs:\nfile_path: path to a file or directory\ntags (optional): a list of tags for the sample/samples ['bad','aptz13']\nReturns:\nThe list of md5s for all samples", "source": "juraj-google-style"}
{"code": "def get_tensor_filter(self, filter_name):\n    if filter_name not in self._tensor_filters:\n        raise ValueError('There is no tensor filter named \"%s\"' % filter_name)\n    return self._tensor_filters[filter_name]", "docstring": "Retrieve filter function by name.\n\nArgs:\nfilter_name: Name of the filter set during add_tensor_filter() call.\n\nReturns:\nThe callable associated with the filter name.\n\nRaises:\nValueError: If there is no tensor filter of the specified filter name.", "source": "github-repos"}
{"code": "def get_by_provider_display_name(self, provider_display_name):\n    san_managers = self._client.get_all()\n    result = [x for x in san_managers if (x['providerDisplayName'] == provider_display_name)]\n    return (result[0] if result else None)", "docstring": "Gets a SAN Manager by provider display name.\n\nArgs:\nprovider_display_name: Name of the Provider Display Name\n\nReturns:\ndict: SAN Manager.", "source": "codesearchnet"}
{"code": "def pull_file(self, remote_source, local_dir):\n        \n\n        local_dest = local_dir + '/' + os.path.basename(remote_source)\n\n        try:\n            os.makedirs(local_dir)\n        except OSError as e:\n            if e.errno != errno.EEXIST:\n                logger.exception(\"Failed to create script_dir: {0}\".format(script_dir))\n                raise BadScriptPath(e, self.hostname)\n\n        \n        \n        if os.path.exists(local_dest):\n            logger.exception(\"Remote file copy will overwrite a local file:{0}\".format(local_dest))\n            raise FileExists(None, self.hostname, filename=local_dest)\n\n        try:\n            self.sftp_client.get(remote_source, local_dest)\n        except Exception as e:\n            logger.exception(\"File pull failed\")\n            raise FileCopyException(e, self.hostname)\n\n        return local_dest", "docstring": "Transport file on the remote side to a local directory\n\nArgs:\n- remote_source (string): remote_source\n- local_dir (string): Local directory to copy to\n\n\nReturns:\n- str: Local path to file\n\nRaises:\n- FileExists : Name collision at local directory.\n- FileCopyException : FileCopy failed.", "source": "juraj-google-style"}
{"code": "def _HandleHashAnalysis(self, hash_analysis):\n    tags = []\n    labels = self.GenerateLabels(hash_analysis.hash_information)\n    path_specifications = self._hash_pathspecs.pop(hash_analysis.subject_hash)\n    for path_specification in path_specifications:\n        event_identifiers = self._event_identifiers_by_pathspec.pop(path_specification, [])\n        if (not labels):\n            continue\n        for event_identifier in event_identifiers:\n            event_tag = events.EventTag(comment=self._comment)\n            event_tag.SetEventIdentifier(event_identifier)\n            event_tag.AddLabels(labels)\n            tags.append(event_tag)\n    return (path_specifications, labels, tags)", "docstring": "Deals with the results of the analysis of a hash.\n\nThis method ensures that labels are generated for the hash,\nthen tags all events derived from files with that hash.\n\nArgs:\nhash_analysis (HashAnalysis): hash analysis plugin's results for a given\nhash.\n\nReturns:\ntuple: containing:\n\nlist[dfvfs.PathSpec]: pathspecs that had the hash value looked up.\nlist[str]: labels that corresponds to the hash value that was looked up.\nlist[EventTag]: event tags for all events that were extracted from the\npath specifications.", "source": "codesearchnet"}
{"code": "def call(self, input_ids: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, token_type_ids: Optional[tf.Tensor]=None, inputs_embeds: Optional[tf.Tensor]=None, mask: Optional[tf.Tensor]=None, training: bool=False) -> tf.Tensor:\n    if input_ids is None and inputs_embeds is None:\n        raise ValueError('Need to provide either `input_ids` or `input_embeds`.')\n    if input_ids is not None:\n        check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n        inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n    input_shape = shape_list(inputs_embeds)[:-1]\n    if token_type_ids is None:\n        token_type_ids = tf.fill(dims=input_shape, value=0)\n    if position_ids is None:\n        position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)\n    final_embeddings = inputs_embeds\n    if self.position_biased_input:\n        position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)\n        final_embeddings += position_embeds\n    if self.config.type_vocab_size > 0:\n        token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)\n        final_embeddings += token_type_embeds\n    if self.embedding_size != self.hidden_size:\n        final_embeddings = self.embed_proj(final_embeddings)\n    final_embeddings = self.LayerNorm(final_embeddings)\n    if mask is not None:\n        if len(shape_list(mask)) != len(shape_list(final_embeddings)):\n            if len(shape_list(mask)) == 4:\n                mask = tf.squeeze(tf.squeeze(mask, axis=1), axis=1)\n            mask = tf.cast(tf.expand_dims(mask, axis=2), dtype=self.compute_dtype)\n        final_embeddings = final_embeddings * mask\n    final_embeddings = self.dropout(final_embeddings, training=training)\n    return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\nfinal_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github-repos"}
{"code": "def parse_fs_url(fs_url):\n    \n    \n    match = _RE_FS_URL.match(fs_url)\n    if match is None:\n        raise ParseError(\"{!r} is not a fs2 url\".format(fs_url))\n\n    fs_name, credentials, url1, url2, path = match.groups()\n    if not credentials:\n        username = None  \n        password = None  \n        url = url2\n    else:\n        username, _, password = credentials.partition(\":\")\n        username = unquote(username)\n        password = unquote(password)\n        url = url1\n    url, has_qs, qs = url.partition(\"?\")\n    resource = unquote(url)\n    if has_qs:\n        _params = parse_qs(qs, keep_blank_values=True)\n        params = {k: unquote(v[0]) for k, v in six.iteritems(_params)}\n    else:\n        params = {}\n    return ParseResult(fs_name, username, password, resource, params, path)", "docstring": "Parse a Filesystem URL and return a `ParseResult`.\n\nArguments:\nfs_url (str): A filesystem URL.\n\nReturns:\n~fs.opener.parse.ParseResult: a parse result instance.\n\nRaises:\n~fs.errors.ParseError: if the FS URL is not valid.", "source": "juraj-google-style"}
{"code": "def has_basal_dendrite(neuron, min_number=1, treefun=_read_neurite_type):\n    \n    types = [treefun(n) for n in neuron.neurites]\n    return CheckResult(types.count(NeuriteType.basal_dendrite) >= min_number)", "docstring": "Check if a neuron has basal dendrites\n\nArguments:\nneuron(Neuron): The neuron object to test\nmin_number: minimum number of basal dendrites required\ntreefun: Optional function to calculate the tree type of neuron's\nneurites\n\nReturns:\nCheckResult with result", "source": "juraj-google-style"}
{"code": "def add_inputs(self, mutable_accumulator, elements, *args, **kwargs):\n    for element in elements:\n        mutable_accumulator = self.add_input(mutable_accumulator, element, *args, **kwargs)\n    return mutable_accumulator", "docstring": "Returns the result of folding each element in elements into accumulator.\n\nThis is provided in case the implementation affords more efficient\nbulk addition of elements. The default implementation simply loops\nover the inputs invoking add_input for each one.\n\nArgs:\nmutable_accumulator: the current accumulator,\nmay be modified and returned for efficiency\nelements: the elements to add, should not be mutated\n*args: Additional arguments and side inputs.\n**kwargs: Additional arguments and side inputs.", "source": "github-repos"}
{"code": "def Sign(self, data, signing_key, verify_key=None):\n    if (signing_key.KeyLen() < 2048):\n        logging.warning('signing key is too short.')\n    self.signature = signing_key.Sign(data)\n    self.signature_type = self.SignatureType.RSA_PKCS1v15\n    self.digest = hashlib.sha256(data).digest()\n    self.digest_type = self.HashType.SHA256\n    self.data = data\n    if (verify_key is None):\n        verify_key = signing_key.GetPublicKey()\n    self.Verify(verify_key)\n    return self", "docstring": "Use the data to sign this blob.\n\nArgs:\ndata: String containing the blob data.\nsigning_key: The key to sign with.\nverify_key: Key to verify with. If None we assume the signing key also\ncontains the public key.\n\nReturns:\nself for call chaining.", "source": "codesearchnet"}
{"code": "def str2dict_keys(str_in):\n    tmp_dict = str2dict(str_in)\n    if (tmp_dict is None):\n        return None\n    return sorted([k for k in tmp_dict])", "docstring": "Extracts the keys from a string that represents a dict and returns them\nsorted by key.\n\nArgs:\nstr_in (string) that contains python dict\nReturns:\n(list) with keys or None if no valid dict was found\nRaises:\n-", "source": "codesearchnet"}
{"code": "def gather_dilated_memory_blocks(x, num_memory_blocks, gap_size, query_block_size, memory_block_size, gather_indices, direction='left'):\n    gathered_blocks = []\n    for block_id in range(num_memory_blocks):\n        block_end_index = (- ((query_block_size + (gap_size * (block_id + 1))) + (memory_block_size * block_id)))\n        block_start_index = ((memory_block_size + gap_size) * (num_memory_blocks - (block_id + 1)))\n        if (direction != 'left'):\n            [block_end_index, block_start_index] = [(- block_start_index), (- block_end_index)]\n        if (block_end_index == 0):\n            x_block = x[block_start_index:]\n        else:\n            x_block = x[block_start_index:block_end_index]\n\n        def gather_dilated_1d_blocks(x, gather_indices):\n            x_new = tf.gather(x, gather_indices)\n            return tf.transpose(x_new, [2, 3, 0, 1, 4])\n        gathered_blocks.append(gather_dilated_1d_blocks(x_block, gather_indices))\n    return tf.concat(gathered_blocks, 3)", "docstring": "Gathers blocks with gaps in between.\n\nArgs:\nx: Tensor of shape [length, batch, heads, depth]\nnum_memory_blocks: how many memory blocks to look in \"direction\". Each will\nbe separated by gap_size.\ngap_size: an integer indicating the gap size\nquery_block_size: an integer indicating size of query block\nmemory_block_size: an integer indicating the size of a memory block.\ngather_indices: The indices to gather from.\ndirection: left or right\n\nReturns:\nTensor of shape [batch, heads, blocks, block_length, depth]", "source": "codesearchnet"}
{"code": "def normalize_bytes2str(x):\n    if isinstance(x, str):\n        return x\n    if isinstance(x, bytes):\n        return x.decode('utf8')\n    elif is_array_str(x):\n        return _to_str_array(x)\n    else:\n        return x", "docstring": "Normalize `bytes` array to `str` (UTF-8).\n\nExample of usage:\n\n```python\nfor ex in tfds.as_numpy(ds):  # tf.data returns `tf.string` as `bytes`\nex = tf.nest.map_structure(enp.normalize_bytes2str, ex)\n```\n\nArgs:\nx: Any array\n\nReturns:\nx: `bytes` array are decoded as `str`", "source": "github-repos"}
{"code": "def load(cls, config: Optional[Config]=None):\n    if (cls._dfk is not None):\n        raise RuntimeError('Config has already been loaded')\n    if (config is None):\n        cls._dfk = DataFlowKernel(Config())\n    else:\n        cls._dfk = DataFlowKernel(config)\n    return cls._dfk", "docstring": "Load a DataFlowKernel.\n\nArgs:\n- config (Config) : Configuration to load. This config will be passed to a\nnew DataFlowKernel instantiation which will be set as the active DataFlowKernel.\nReturns:\n- DataFlowKernel : The loaded DataFlowKernel object.", "source": "codesearchnet"}
{"code": "def GetValidHostsForCert(cert):\n  \n  if 'subjectAltName' in cert:\n    return [x[1] for x in cert['subjectAltName'] if x[0].lower() == 'dns']\n  else:\n    return [x[0][1] for x in cert['subject']\n            if x[0][0].lower() == 'commonname']", "docstring": "Returns a list of valid host globs for an SSL certificate.\n\nArgs:\ncert: A dictionary representing an SSL certificate.\nReturns:\nlist: A list of valid host globs.", "source": "juraj-google-style"}
{"code": "def _iflat_tasks_wti(self, status=None, op='==', nids=None, with_wti=True):\n    nids = as_set(nids)\n    if (status is None):\n        for (wi, work) in enumerate(self):\n            for (ti, task) in enumerate(work):\n                if (nids and (task.node_id not in nids)):\n                    continue\n                if with_wti:\n                    (yield (task, wi, ti))\n                else:\n                    (yield task)\n    else:\n        op = operator_from_str(op)\n        status = Status.as_status(status)\n        for (wi, work) in enumerate(self):\n            for (ti, task) in enumerate(work):\n                if (nids and (task.node_id not in nids)):\n                    continue\n                if op(task.status, status):\n                    if with_wti:\n                        (yield (task, wi, ti))\n                    else:\n                        (yield task)", "docstring": "Generators that produces a flat sequence of task.\nif status is not None, only the tasks with the specified status are selected.\nnids is an optional list of node identifiers used to filter the tasks.\n\nReturns:\n(task, work_index, task_index) if with_wti is True else task", "source": "codesearchnet"}
{"code": "def execute_workflow(self, directory: str) -> None:\n    thread = current_thread()\n    print(f'Executing directory {directory} on thread {thread.name}...')\n    for path, subdirs, files in os.walk(directory):\n        s = os.path.join(path, 'service.json')\n        if os.path.isfile(s):\n            project = self.get_project_from_service(s)\n            service = s\n        else:\n            project = self.get_project_from_vm()\n            service = 'DEFAULT'\n        for filename in files:\n            if filename != 'service.json':\n                workflow = os.path.join(path, filename)\n                if os.path.isfile(workflow):\n                    command = f'python3 starthinker/tool/recipe.py {workflow} -s {service} -p {project} --verbose'\n                    self.execute_command(command)\n    print(f'Finished executing workflows in directory {directory}.')", "docstring": "Executes workflows in the provided directory, one per thread\n\nArgs:\n- directory: (string) The directory with the workflow JSON files to execute", "source": "github-repos"}
{"code": "def __call__(self, token, device, args):\n    func = self.get(token, None)\n    if func is None:\n        raise ValueError(f'Could not find callback with key={token} in the registry.')\n    if isinstance(func, EagerFunc):\n        return func(device, token, args)\n    else:\n        ret = func(*args)\n        if isinstance(ret, bytes):\n            ret = [ret]\n        if isinstance(ret, (tuple, list)):\n            return [self._convert(x) for x in ret]\n        else:\n            return self._convert(ret)", "docstring": "Calls the registered function for `token` with args.\n\nArgs:\ntoken: A key into this `FuncRegistry` identifying which function to call.\ndevice: Name of the device on which outputs of `token`'s corresponding\noperation should be placed. Used iff the function registered for `token`\nis an EagerPyFunc.\nargs: The arguments to pass to the function registered for `token`.\n\nReturns:\nThe output of the function registered for `token`.\n\nRaises:\nValueError: if no function is registered for `token`.", "source": "github-repos"}
{"code": "def __init__(self, zslgen=ZSLGenerator(), film_max_miller=1, substrate_max_miller=1):\n        \n        self.zsl = zslgen\n        self.film_max_miller = film_max_miller\n        self.substrate_max_miller = substrate_max_miller", "docstring": "Initializes the substrate analyzer\nArgs:\nzslgen(ZSLGenerator): Defaults to a ZSLGenerator with standard\ntolerances, but can be fed one with custom tolerances\nfilm_max_miller(int): maximum miller index to generate for film\nsurfaces\nsubstrate_max_miller(int): maximum miller index to generate for\nsubstrate surfaces", "source": "juraj-google-style"}
{"code": "def get_board(self, **query_params):\n    board_json = self.get_board_json(self.base_uri, query_params=query_params)\n    return self.create_board(board_json)", "docstring": "Get board information for this card. Returns a Board object.\n\nReturns:\nBoard: The board this card is attached to", "source": "codesearchnet"}
{"code": "def add_annotation(self, subj: URIRef, pred: URIRef, obj: Union[(Literal, URIRef)], a_p: URIRef, a_o: Union[(Literal, URIRef)]) -> BNode:\n    bnode: BNode = self.triple2annotation_bnode.get((subj, pred, obj))\n    if (not bnode):\n        a_s: BNode = BNode()\n        self.triple2annotation_bnode[(subj, pred, obj)]: BNode = a_s\n        self.g.add((a_s, RDF.type, OWL.Axiom))\n        self.g.add((a_s, OWL.annotatedSource, self.process_subj_or_pred(subj)))\n        self.g.add((a_s, OWL.annotatedProperty, self.process_subj_or_pred(pred)))\n        self.g.add((a_s, OWL.annotatedTarget, self.process_obj(obj)))\n    else:\n        a_s: BNode = bnode\n    self.g.add((a_s, self.process_subj_or_pred(a_p), self.process_obj(a_o)))\n    return bnode", "docstring": "Adds annotation to rdflib graph.\n\nThe annotation axiom will filled in if this is a new annotation for the triple.\n\nArgs:\nsubj: Entity subject to be annotated\npref: Entities Predicate Anchor to be annotated\nobj: Entities Object Anchor to be annotated\na_p: Annotation predicate\na_o: Annotation object\n\nReturns:\nA BNode which is an address to the location in the RDF graph that is storing the\nannotation information.", "source": "codesearchnet"}
{"code": "def EnableNetworkInterfaces(\n      self, interfaces, logger, dhclient_script=None):\n    \n    helpers.CallDhclient(interfaces, logger, dhclient_script=dhclient_script)", "docstring": "Enable the list of network interfaces.\n\nArgs:\ninterfaces: list of string, the output device names to enable.\nlogger: logger object, used to write to SysLog and serial port.\ndhclient_script: string, the path to a dhclient script used by dhclient.", "source": "juraj-google-style"}
{"code": "def decode(self, image_tokens: torch.LongTensor) -> torch.FloatTensor:\n    if image_tokens.shape[1] != self.quantize.quant_state_dims[0] * self.quantize.quant_state_dims[1]:\n        raise ValueError(f'Expected `image_tokens` to have shape `(batch_size, {self.quantize.quant_state_dims[0] * self.quantize.quant_state_dims[1]})`, but got shape `{image_tokens.shape}`.')\n    codebook_entry = self.quantize.get_codebook_entry(image_tokens)\n    hidden_states = self.post_quant_conv(codebook_entry)\n    pixel_values = self.decoder(hidden_states)\n    return pixel_values", "docstring": "Decodes quantized token IDs into pixel values.\nArgs:\nimage_tokens (torch.LongTensor): Batch of token IDs.\nReturns:\npixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\nPixel values decoded from the token IDs.", "source": "github-repos"}
{"code": "def wrap_http_for_auth(credentials, http):\n    \n    orig_request_method = http.request\n\n    \n    def new_request(uri, method='GET', body=None, headers=None,\n                    redirections=httplib2.DEFAULT_MAX_REDIRECTS,\n                    connection_type=None):\n        if not credentials.access_token:\n            _LOGGER.info('Attempting refresh to obtain '\n                         'initial access_token')\n            credentials._refresh(orig_request_method)\n\n        \n        \n        headers = _initialize_headers(headers)\n        credentials.apply(headers)\n        _apply_user_agent(headers, credentials.user_agent)\n\n        body_stream_position = None\n        \n        if all(getattr(body, stream_prop, None) for stream_prop in\n               _STREAM_PROPERTIES):\n            body_stream_position = body.tell()\n\n        resp, content = request(orig_request_method, uri, method, body,\n                                clean_headers(headers),\n                                redirections, connection_type)\n\n        \n        \n        max_refresh_attempts = 2\n        for refresh_attempt in range(max_refresh_attempts):\n            if resp.status not in REFRESH_STATUS_CODES:\n                break\n            _LOGGER.info('Refreshing due to a %s (attempt %s/%s)',\n                         resp.status, refresh_attempt + 1,\n                         max_refresh_attempts)\n            credentials._refresh(orig_request_method)\n            credentials.apply(headers)\n            if body_stream_position is not None:\n                body.seek(body_stream_position)\n\n            resp, content = request(orig_request_method, uri, method, body,\n                                    clean_headers(headers),\n                                    redirections, connection_type)\n\n        return resp, content\n\n    \n    http.request = new_request\n\n    \n    http.request.credentials = credentials", "docstring": "Prepares an HTTP object's request method for auth.\n\nWraps HTTP requests with logic to catch auth failures (typically\nidentified via a 401 status code). In the event of failure, tries\nto refresh the token used and then retry the original request.\n\nArgs:\ncredentials: Credentials, the credentials used to identify\nthe authenticated user.\nhttp: httplib2.Http, an http object to be used to make\nauth requests.", "source": "juraj-google-style"}
{"code": "def get_common_register(start, end):\n    registers = defaultdict(int)\n    for line in lines(start, end):\n        insn = line.insn\n        for operand in insn.operands:\n            if (not operand.type.has_phrase):\n                continue\n            if (not operand.base):\n                continue\n            register_name = operand.base\n            registers[register_name] += 1\n    return max(registers.iteritems(), key=operator.itemgetter(1))[0]", "docstring": "Get the register most commonly used in accessing structs.\n\nAccess to is considered for every opcode that accesses memory\nin an offset from a register::\n\nmov eax, [ebx + 5]\n\nFor every access, the struct-referencing registers, in this case\n`ebx`, are counted. The most used one is returned.\n\nArgs:\nstart: The adderss to start at\nend: The address to finish at", "source": "codesearchnet"}
{"code": "def replace_batch_norm(model):\n    for name, module in model.named_children():\n        if isinstance(module, nn.BatchNorm2d):\n            new_module = TableTransformerFrozenBatchNorm2d(module.num_features)\n            if not module.weight.device == torch.device('meta'):\n                new_module.weight.data.copy_(module.weight)\n                new_module.bias.data.copy_(module.bias)\n                new_module.running_mean.data.copy_(module.running_mean)\n                new_module.running_var.data.copy_(module.running_var)\n            model._modules[name] = new_module\n        if len(list(module.children())) > 0:\n            replace_batch_norm(module)", "docstring": "Recursively replace all `torch.nn.BatchNorm2d` with `TableTransformerFrozenBatchNorm2d`.\n\nArgs:\nmodel (torch.nn.Module):\ninput model", "source": "github-repos"}
{"code": "def save_data(data, file_fmt, append=False, drop_dups=None, info=None, **kwargs):\n    d_file = data_file(file_fmt=file_fmt, info=info, **kwargs)\n    if (append and files.exists(d_file)):\n        data = pd.DataFrame(pd.concat([pd.read_parquet(d_file), data], sort=False))\n        if (drop_dups is not None):\n            data.drop_duplicates(subset=utils.tolist(drop_dups), inplace=True)\n    if (not data.empty):\n        data.to_parquet(d_file)\n    return data", "docstring": "Save data to file\n\nArgs:\ndata: pd.DataFrame\nfile_fmt: data file format in terms of f-strings\nappend: if append data to existing data\ndrop_dups: list, drop duplicates in columns\ninfo: dict, infomation to be hashed and passed to f-strings\n**kwargs: additional parameters for f-strings\n\nExamples:\n>>> data = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])\n>>> # save_data(\n>>>     # data, '{ROOT}/daily/{typ}.parq',\n>>>     # ROOT='tests/data', typ='earnings'\n>>> # )", "source": "codesearchnet"}
{"code": "def _finalize_namespaces(self, ns_dict=None):\n    if ns_dict:\n        for (ns, alias) in six.iteritems(ns_dict):\n            self._collected_namespaces.add_namespace_uri(ns, alias)\n    self._collected_namespaces.add_namespace_uri(ns_uri=idgen.get_id_namespace(), prefix=idgen.get_id_namespace_alias())\n    self._fix_example_namespace()\n    for (prefix, uri) in six.iteritems(self._input_namespaces):\n        self._collected_namespaces.add_namespace_uri(uri, prefix)\n    self._collected_namespaces.import_from(namespaces.XML_NAMESPACES)\n    for ns_uri in self._collected_namespaces.namespace_uris:\n        preferred_prefix = self._collected_namespaces.preferred_prefix_for_namespace(ns_uri)\n        if preferred_prefix:\n            continue\n        prefixes = self._collected_namespaces.get_prefixes(ns_uri)\n        if prefixes:\n            prefix = next(iter(prefixes))\n        else:\n            prefix = namespaces.lookup_name(ns_uri)\n        if (prefix is None):\n            raise namespaces.NoPrefixesError(ns_uri)\n        self._collected_namespaces.set_preferred_prefix_for_namespace(ns_uri=ns_uri, prefix=prefix, add_if_not_exist=True)", "docstring": "Returns a dictionary of namespaces to be exported with an XML\ndocument.\n\nThis loops over all the namespaces that were discovered and built\nduring the execution of ``collect()`` and\n``_parse_collected_classes()`` and attempts to merge them all.\n\nRaises:\n.namespaces.DuplicatePrefixError: If namespace prefix was\nmapped to more than one namespace.\n.namespaces.NoPrefixError: If a namespace was collected that is\nnot mapped to a prefix.", "source": "codesearchnet"}
{"code": "def get_central_coors(self, row, col):\n        \n        if row < 0 or row >= self.nRows or col < 0 or col >= self.nCols:\n            raise ValueError(\"The row (%d) or col (%d) must be >=0 and less than \"\n                             \"nRows (%d) or nCols (%d)!\" % (row, col, self.nRows, self.nCols))\n        else:\n            tmpx = self.xMin + (col + 0.5) * self.dx\n            tmpy = self.yMax - (row + 0.5) * self.dx\n            return tmpx, tmpy", "docstring": "Get the coordinates of central grid.\n\nArgs:\nrow: row number, range from 0 to (nRows - 1).\ncol: col number, range from 0 to (nCols - 1).\n\nReturns:\nXY coordinates. If the row or col are invalid, raise ValueError.", "source": "juraj-google-style"}
{"code": "def _track_trackable(self, trackable, name, overwrite=False):\n    self._maybe_initialize_trackable()\n    if not isinstance(trackable, Trackable):\n        raise TypeError(f'Trackable._track_trackable() can only be used to track objects of type Trackable. Got type {type(trackable)}.')\n    if not getattr(self, '_manual_tracking', True):\n        return trackable\n    new_reference = TrackableReference(name=name, ref=trackable)\n    current_object = self._lookup_dependency(name)\n    if current_object is not None and current_object is not trackable:\n        if not overwrite:\n            raise ValueError(f\"Called Trackable._track_trackable() with name='{name}', but a Trackable with this name is already declared as a dependency. Names must be unique (or overwrite=True).\")\n        for index, (old_name, _) in enumerate(self._self_unconditional_checkpoint_dependencies):\n            if name == old_name:\n                self._self_unconditional_checkpoint_dependencies[index] = new_reference\n    elif current_object is None:\n        self._self_unconditional_checkpoint_dependencies.append(new_reference)\n        self._handle_deferred_dependencies(name=name, trackable=trackable)\n    self._self_unconditional_dependency_names[name] = trackable\n    return trackable", "docstring": "Declare a dependency on another `Trackable` object.\n\nIndicates that checkpoints for this object should include variables from\n`trackable`.\n\nVariables in a checkpoint are mapped to `Trackable`s based on the names\nprovided when the checkpoint was written. To avoid breaking existing\ncheckpoints when modifying a class, neither variable names nor dependency\nnames (the names passed to `_track_trackable`) may change.\n\nArgs:\ntrackable: A `Trackable` which this object depends on.\nname: A local name for `trackable`, used for loading checkpoints into the\ncorrect objects.\noverwrite: Boolean, whether silently replacing dependencies is OK. Used\nfor __setattr__, where throwing an error on attribute reassignment would\nbe inappropriate.\n\nReturns:\n`trackable`, for convenience when declaring a dependency and\nassigning to a member variable in one statement.\n\nRaises:\nTypeError: If `trackable` does not inherit from `Trackable`.\nValueError: If another object is already tracked by this name.", "source": "github-repos"}
{"code": "def _Open(self, path_spec=None, mode='rb'):\n    \n    if not path_spec:\n      raise ValueError('Missing path specification.')\n\n    data_stream = getattr(path_spec, 'data_stream', None)\n\n    self._file_system = resolver.Resolver.OpenFileSystem(\n        path_spec, resolver_context=self._resolver_context)\n\n    file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n    if not file_entry:\n      raise IOError('Unable to open file entry.')\n\n    fsntfs_data_stream = None\n    fsntfs_file_entry = file_entry.GetNTFSFileEntry()\n    if not fsntfs_file_entry:\n      raise IOError('Unable to open NTFS file entry.')\n\n    if data_stream:\n      fsntfs_data_stream = fsntfs_file_entry.get_alternate_data_stream_by_name(\n          data_stream)\n      if not fsntfs_data_stream:\n        raise IOError('Unable to open data stream: {0:s}.'.format(\n            data_stream))\n\n    elif not fsntfs_file_entry.has_default_data_stream():\n      raise IOError('Missing default data stream.')\n\n    self._fsntfs_data_stream = fsntfs_data_stream\n    self._fsntfs_file_entry = fsntfs_file_entry", "docstring": "Opens the file-like object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nmode (Optional[str]): file access mode.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file-like object could not be opened.\nOSError: if the file-like object could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(GetAttributesResponsePayload, self).read(input_buffer, kmip_version=kmip_version)\n    local_buffer = utils.BytearrayStream(input_buffer.read(self.length))\n    if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_buffer):\n        unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)\n        unique_identifier.read(local_buffer, kmip_version=kmip_version)\n        self.unique_identifier = unique_identifier.value\n    else:\n        raise exceptions.InvalidKmipEncoding('The GetAttributes response payload encoding is missing the unique identifier.')\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        self._attributes = list()\n        while self.is_tag_next(enums.Tags.ATTRIBUTE, local_buffer):\n            attribute = objects.Attribute()\n            attribute.read(local_buffer, kmip_version=kmip_version)\n            self._attributes.append(attribute)\n    elif self.is_tag_next(enums.Tags.ATTRIBUTES, local_buffer):\n        attributes = objects.Attributes()\n        attributes.read(local_buffer, kmip_version=kmip_version)\n        temp_attr = objects.convert_attributes_to_template_attribute(attributes)\n        self._attributes = temp_attr.attributes\n    else:\n        raise exceptions.InvalidKmipEncoding('The GetAttributes response payload encoding is missing the attributes structure.')\n    self.is_oversized(local_buffer)", "docstring": "Read the data encoding the GetAttributes response payload and decode\nit into its constituent parts.\n\nArgs:\ninput_buffer (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def __init__(self, scope, parent, name):\n        \n        CodeEntity.__init__(self, scope, parent)\n        self.name = name\n        self.children = []", "docstring": "Constructor for namespaces.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.\nname (str): The name of the namespace in the program.", "source": "juraj-google-style"}
{"code": "def _full_reduce(nodes):\n    (was_reduced, nodes) = maybe_reduce(nodes)\n    while was_reduced:\n        (was_reduced, nodes) = maybe_reduce(nodes)\n    return nodes", "docstring": "Apply degree reduction to ``nodes`` until it can no longer be reduced.\n\n.. note::\n\nThere is also a Fortran implementation of this function, which\nwill be used if it can be built.\n\nArgs:\nnodes (numpy.ndarray): The nodes in the curve.\n\nReturns:\nnumpy.ndarray: The fully degree-reduced nodes.", "source": "codesearchnet"}
{"code": "async def _get_person_json(self, id_, url_params=None):\n    url = self.url_builder('person/{person_id}', dict(person_id=id_), url_params=(url_params or OrderedDict()))\n    data = (await self.get_data(url))\n    return data", "docstring": "Retrieve raw person JSON by ID.\n\nArguments:\nid_ (:py:class:`int`): The person's TMDb ID.\nurl_params (:py:class:`dict`): Any additional URL parameters.\n\nReturns:\n:py:class:`dict`: The JSON data.", "source": "codesearchnet"}
{"code": "def add_record_references(self, app_id, record_id, field_id, target_record_ids):\n    self._swimlane.request('post', 'app/{0}/record/{1}/add-references'.format(app_id, record_id), json={'fieldId': field_id, 'targetRecordIds': target_record_ids})", "docstring": "Bulk operation to directly add record references without making any additional requests\n\nWarnings:\nDoes not perform any app, record, or target app/record validation\n\nArgs:\napp_id (str): Full App ID string\nrecord_id (str): Full parent Record ID string\nfield_id (str): Full field ID to target reference field on parent Record string\ntarget_record_ids (List(str)): List of full target reference Record ID strings", "source": "codesearchnet"}
{"code": "def check_cache(resource_type):\n    \n\n    def decorator(func):\n        @functools.wraps(func)\n        def wrapper(*args, **kwargs):\n            try:\n                adapter = args[0]\n                key, val = list(kwargs.items())[0]\n            except IndexError:\n                logger.warning(\"Couldn't generate full index key, skipping cache\")\n            else:\n\n                index_key = (resource_type, key, val)\n                try:\n                    cached_record = adapter._swimlane.resources_cache[index_key]\n                except KeyError:\n                    logger.debug('Cache miss: `{!r}`'.format(index_key))\n                else:\n                    logger.debug('Cache hit: `{!r}`'.format(cached_record))\n                    return cached_record\n\n            \n            return func(*args, **kwargs)\n\n        return wrapper\n    return decorator", "docstring": "Decorator for adapter methods to check cache for resource before normally sending requests to retrieve data\n\nOnly works with single kwargs, almost always used with @one_of_keyword_only decorator\n\nArgs:\nresource_type (type(APIResource)): Subclass of APIResource of cache to be checked when called", "source": "juraj-google-style"}
{"code": "def lf_empirical_accuracies(L, Y):\n    \n    \n    Y = arraylike_to_numpy(Y)\n    L = L.toarray()\n    X = np.where(L == 0, 0, np.where(L == np.vstack([Y] * L.shape[1]).T, 1, -1))\n    return 0.5 * (X.sum(axis=0) / (L != 0).sum(axis=0) + 1)", "docstring": "Return the **empirical accuracy** against a set of labels Y (e.g. dev\nset) for each LF.\nArgs:\nL: an n x m scipy.sparse matrix where L_{i,j} is the label given by the\njth LF to the ith candidate\nY: an [n] or [n, 1] np.ndarray of gold labels", "source": "juraj-google-style"}
{"code": "def GetTokenBalance(self, token, watch_only=0):\n    total = Decimal(0)\n    if (watch_only > 0):\n        for addr in self._watch_only:\n            balance = token.GetBalance(self, addr)\n            total += balance\n    else:\n        for contract in self._contracts.values():\n            balance = token.GetBalance(self, contract.Address)\n            total += balance\n    return total", "docstring": "Get the balance of the specified token.\n\nArgs:\ntoken (NEP5Token): an instance of type neo.Wallets.NEP5Token to get the balance from.\nwatch_only (bool): True, to limit to watch only wallets.\n\nReturns:\nDecimal: total balance for `token`.", "source": "codesearchnet"}
{"code": "def add_param_summary(*summary_lists, **kwargs):\n    collections = kwargs.pop('collections', None)\n    assert (len(kwargs) == 0), ('Unknown kwargs: ' + str(kwargs))\n    ctx = get_current_tower_context()\n    if ((ctx is not None) and (not ctx.is_main_training_tower)):\n        return\n    params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n    with cached_name_scope('param-summary'):\n        for p in params:\n            name = p.op.name\n            for (rgx, actions) in summary_lists:\n                if (not rgx.endswith('$')):\n                    rgx = (rgx + '$')\n                if re.match(rgx, name):\n                    add_tensor_summary(p, actions, name=name, collections=collections)", "docstring": "Add summary ops for all trainable variables matching the regex, under a\nreused 'param-summary' name scope.\nThis function is a no-op if not calling from main training tower.\n\nArgs:\nsummary_lists (list): each is (regex, [list of summary type]).\nSummary type is defined in :func:`add_tensor_summary`.\ncollections (list[str]): collections of the summary ops.\n\nExample:\n\n.. code-block:: python\n\nadd_param_summary(\n('.*/W', ['histogram', 'rms']),\n('.*/gamma', ['scalar']),\n)", "source": "codesearchnet"}
{"code": "def forbidden(cls, errors=None):\n        \n        if cls.expose_status:  \n            cls.response.content_type = 'application/json'\n            cls.response._status_line = '403 Forbidden'\n\n        return cls(403, errors=errors).to_json", "docstring": "Shortcut API for HTTP 403 `Forbidden` response.\n\nArgs:\nerrors (list): Response key/value data.\n\nReturns:\nWSResponse Instance.", "source": "juraj-google-style"}
{"code": "def _create_distributed_tensor_spec(strategy, tensor_spec):\n    num_replicas = len(strategy.extended.worker_devices)\n    if not _always_wrap(strategy):\n        return tensor_spec\n\n    def _get_value_per_replica(tensor_spec_per_input):\n        value_specs = [tensor_spec_per_input for _ in range(num_replicas)]\n        return values.PerReplicaSpec(*value_specs)\n    return nest.map_structure(_get_value_per_replica, tensor_spec)", "docstring": "Create a `tf.TypeSpec` for a given strategy and input `tensor_spec`.\n\nArgs:\nstrategy: The given `tf.distribute` strategy.\ntensor_spec: `tf.TensorSpec` of a given value. The batch dimension of the\nshape should be None if you have partial batches.\n\nReturns:\nA `tf.TypeSpec` that matches the values produced by a given strategy. This\ncan be a `tf.TensorSpec` or a `PerRelicaSpec`.", "source": "github-repos"}
{"code": "def DeregisterDecrypter(cls, decrypter):\n    \n    encryption_method = decrypter.ENCRYPTION_METHOD.lower()\n    if encryption_method not in cls._decrypters:\n      raise KeyError(\n          'Decrypter for encryption method: {0:s} not set.'.format(\n              decrypter.ENCRYPTION_METHOD))\n\n    del cls._decrypters[encryption_method]", "docstring": "Deregisters a decrypter for a specific encryption method.\n\nArgs:\ndecrypter (type): decrypter class.\n\nRaises:\nKeyError: if the corresponding decrypter is not set.", "source": "juraj-google-style"}
{"code": "def __init__(self, proxy: T, reference: Any=None):\n    super().__init__('placeholder', proxy)\n    self._reference = reference", "docstring": "Initialize a placeholder expression.\n\nArgs:\nproxy: A proxy object with the type expected to be bound to this\nexpression. Used for type checking at pipeline construction time.", "source": "github-repos"}
{"code": "def get_all_supported_aspect_ratios(max_image_tiles: int) -> List[Tuple[int, int]]:\n    aspect_ratios = []\n    for width in range(1, max_image_tiles + 1):\n        for height in range(1, max_image_tiles + 1):\n            if width * height <= max_image_tiles:\n                aspect_ratios.append((width, height))\n    return aspect_ratios", "docstring": "Computes all allowed aspect ratios for a given maximum number of input tiles.\n\nThis function calculates all possible arrangements of tiles that can be formed\nwithin the constraint of the maximum number of tiles. Each arrangement is\nrepresented by its aspect ratio (width/height) and the corresponding tile configuration.\n\nArgs:\nmax_image_tiles (`int`):\nThe maximum number of tiles allowed.\n\nReturns:\n`List[Tuple[int, int]]`: A list of tuples, each tuple representing a valid (width, height)\nconfiguration in terms of number of tiles.\n\nExample:\n>>> get_all_supported_aspect_ratios(4)\n[(1, 1), (1, 2), (1, 3), (1, 4), (2, 1), (2, 2), (3, 1), (4, 1)]", "source": "github-repos"}
{"code": "def ParseBookmarkAnnotationRow(\n      self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = FirefoxPlacesBookmarkAnnotationEventData()\n    event_data.content = self._GetRowValue(query_hash, row, 'content')\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.title = self._GetRowValue(query_hash, row, 'title')\n    event_data.url = self._GetRowValue(query_hash, row, 'url')\n\n    timestamp = self._GetRowValue(query_hash, row, 'dateAdded')\n    if timestamp:\n      date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n          timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_ADDED)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'lastModified')\n    if timestamp:\n      date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n          timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a bookmark annotation row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def _parse_title(dom, details):\n    \n    title = details.find(\"h1\")\n\n    \n    if not title:\n        title = dom.find(\"title\")\n        assert title, \"Can't find <title> tag!\"\n\n        return title[0].getContent().split(\"|\")[0].strip()\n\n    return title[0].getContent().strip()", "docstring": "Parse title/name of the book.\n\nArgs:\ndom (obj): HTMLElement containing whole HTML page.\ndetails (obj): HTMLElement containing slice of the page with details.\n\nReturns:\nstr: Book's title.\n\nRaises:\nAssertionError: If title not found.", "source": "juraj-google-style"}
{"code": "def _rapply(input_layer, operation, *op_args, **op_kwargs):\n    op_args = list(op_args)\n    op_args.append(input_layer.tensor)\n    return input_layer.with_tensor(operation(*op_args, **op_kwargs))", "docstring": "Applies the given operation to this after expanding op_args.\n\nArgs:\ninput_layer: The input layer for this op.\noperation: An operation that takes a tensor and the supplied args.\n*op_args: Extra arguments for operation.\n**op_kwargs: Keyword arguments for the operation.\nReturns:\nA new layer with operation applied.", "source": "codesearchnet"}
{"code": "def add_case(self, case_obj, vtype='snv', mode='vcf', ped_svg=None):\n        \n        new_case = Case(case_id=case_obj.case_id,\n                        name=case_obj.name,\n                        variant_source=case_obj.variant_source,\n                        variant_type=vtype,\n                        variant_mode=mode,\n                        pedigree=ped_svg,\n                        compressed=case_obj.compressed,\n                        tabix_index=case_obj.tabix_index)\n\n        \n        inds = [Individual(\n            ind_id=ind.ind_id,\n            name=ind.name,\n            mother=ind.mother,\n            father=ind.father,\n            sex=ind.sex,\n            phenotype=ind.phenotype,\n            ind_index=ind.ind_index,\n            variant_source=ind.variant_source,\n            bam_path=ind.bam_path,\n        ) for ind in case_obj.individuals]\n        new_case.individuals = inds\n        \n        if self.case(new_case.case_id):\n            logger.warning(\"Case already exists in database!\")\n        else:\n            self.session.add(new_case)\n            self.save()\n        return new_case", "docstring": "Load a case with individuals.\n\nArgs:\ncase_obj (puzzle.models.Case): initialized case model", "source": "juraj-google-style"}
{"code": "def _assert_valid_dtypes(self, tensors):\n    valid_dtypes = self._valid_dtypes()\n    for t in tensors:\n        dtype = t.dtype.base_dtype\n        if dtype not in valid_dtypes:\n            raise ValueError('Invalid type %r for %s, expected: %s.' % (dtype, t.name, [v for v in valid_dtypes]))", "docstring": "Asserts tensors are all valid types (see `_valid_dtypes`).\n\nArgs:\ntensors: Tensors to check.\n\nRaises:\nValueError: If any tensor is not a valid type.", "source": "github-repos"}
{"code": "def __getattr__(self, name):\n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n\n        if not str(name) in ['_initialized', '_settings']:\n            try:\n                \n                xx = self.read_probes(name)\n                \n                return xx\n                \n            except:\n                \n                print(('class ' + type(self).__name__ + ' has no attribute ' + str(name)))\n                raise AttributeError('class ' + type(self).__name__ + ' has no attribute ' + str(name))", "docstring": "allows to read instrument inputs in the form value = instrument.input\nArgs:\nname: name of input channel\n\nReturns: value of input channel", "source": "juraj-google-style"}
{"code": "def enable_beacon(name, **kwargs):\n    ret = {'comment': [], 'result': True}\n    if (not name):\n        ret['comment'] = 'Beacon name is required.'\n        ret['result'] = False\n        return ret\n    if (('test' in kwargs) and kwargs['test']):\n        ret['comment'] = 'Beacon {0} would be enabled.'.format(name)\n    else:\n        _beacons = list_(return_yaml=False, **kwargs)\n        if (name not in _beacons):\n            ret['comment'] = 'Beacon {0} is not currently configured.'.format(name)\n            ret['result'] = False\n            return ret\n        try:\n            eventer = salt.utils.event.get_event('minion', opts=__opts__)\n            res = __salt__['event.fire']({'func': 'enable_beacon', 'name': name}, 'manage_beacons')\n            if res:\n                event_ret = eventer.get_event(tag='/salt/minion/minion_beacon_enabled_complete', wait=kwargs.get('timeout', 30))\n                if (event_ret and event_ret['complete']):\n                    beacons = event_ret['beacons']\n                    beacon_config_dict = _get_beacon_config_dict(beacons[name])\n                    if (('enabled' in beacon_config_dict) and beacon_config_dict['enabled']):\n                        ret['result'] = True\n                        ret['comment'] = 'Enabled beacon {0} on minion.'.format(name)\n                    else:\n                        ret['result'] = False\n                        ret['comment'] = 'Failed to enable beacon {0} on minion.'.format(name)\n                elif event_ret:\n                    ret['result'] = False\n                    ret['comment'] = event_ret['comment']\n                else:\n                    ret['result'] = False\n                    ret['comment'] = 'Did not receive the manage event before the timeout of {0}s'.format(kwargs.get('timeout', 30))\n                return ret\n        except KeyError:\n            ret['result'] = False\n            ret['comment'] = 'Event module not available. Beacon enable job failed.'\n    return ret", "docstring": "Enable a beacon on the minion.\n\nArgs:\nname (str): Name of the beacon to enable.\n\nReturns:\ndict: Boolean and status message on success or failure of enable.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' beacons.enable_beacon ps", "source": "codesearchnet"}
{"code": "def releases(self, **kwargs):\n    path = self._get_id_path('releases')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the release date and certification information by country for a\nspecific movie id.\n\nArgs:\nappend_to_response: (optional) Comma separated, any movie method.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def UploadAccount(self, hash_algorithm, hash_key, accounts):\n    param = {'hashAlgorithm': hash_algorithm, 'signerKey': hash_key, 'users': accounts}\n    return self._InvokeGitkitApi('uploadAccount', param)", "docstring": "Uploads multiple accounts to Gitkit server.\n\nArgs:\nhash_algorithm: string, algorithm to hash password.\nhash_key: string, base64-encoded key of the algorithm.\naccounts: array of accounts to be uploaded.\n\nReturns:\nResponse of the API.", "source": "codesearchnet"}
{"code": "def _pad_for_batching(self, pixel_values: List['torch.Tensor']) -> List['torch.Tensor']:\n    max_patch = max((len(x) for x in pixel_values))\n    pixel_values = [torch.nn.functional.pad(image, pad=[0, 0, 0, 0, 0, 0, 0, max_patch - image.shape[0]]) for image in pixel_values]\n    return pixel_values", "docstring": "Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches.\n\nArgs:\npixel_values (`List[torch.Tensor]`):\nAn array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`)\n\nReturns:\nList[`torch.Tensor`]: The padded images.", "source": "github-repos"}
{"code": "def GetMessageStrings(cls, formatter_mediator, event):\n    \n    formatter_object = cls.GetFormatterObject(event.data_type)\n    return formatter_object.GetMessages(formatter_mediator, event)", "docstring": "Retrieves the formatted message strings for a specific event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions between\nformatters and other components, such as storage and Windows EventLog\nresources.\nevent (EventObject): event.\n\nReturns:\nlist[str, str]: long and short version of the message string.", "source": "juraj-google-style"}
{"code": "def list_locations(access_token, subscription_id):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/locations?api-version=', BASE_API])\n    return do_get(endpoint, access_token)", "docstring": "List available locations for a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON list of locations.", "source": "codesearchnet"}
{"code": "def __init__(self, n=65, radius=1, port_distance_from_surface=.07):\n        \n        super(Sphere, self).__init__()\n        particle = mb.Particle(name='np')\n        particle.add(mb.Port(anchor=particle), label='out')\n\n        \n        pattern = mb.SpherePattern(n)\n        \n        pattern.scale(radius)\n\n        particles = pattern.apply(particle, orientation='normal', compound_port='out')\n        self.add(particles, label='np_[$]')\n\n        \n        for i, pos in enumerate(pattern.points):\n            particle = mb.Particle(name=\"np\", pos=pos)\n            self.add(particle, \"np_{}\".format(i))\n            port = mb.Port(anchor=particle)\n            self.add(port, \"port_{}\".format(i))\n\n            \n            port.spin(-pi/2, [0, 0, 1])\n            \n            port.spin(-arcsin(pos[2]/radius), [0, 1, 0])\n            \n            port.spin(arctan2(pos[1], pos[0]), [0, 0, 1])\n            \n            port.translate(pos/radius * port_distance_from_surface)", "docstring": "Initialize a Sphere object.\n\nArgs:\nn (int): Number of points used to construct the Sphere.\nradius (float): Radius of the Sphere.\nport_distance_from_surface (float): Distance of Ports from Sphere.", "source": "juraj-google-style"}
{"code": "def scatterplot_matrix(df, features, downsample_frac=None, figsize=(15, 15)):\n    if downsample_frac:\n        df = df.sample(frac=downsample_frac)\n    plt.figure(figsize=figsize)\n    sns.pairplot(df[features], hue='target')\n    plt.show()", "docstring": "Plot a scatterplot matrix for a list of features, colored by target value.\n\nExample: `scatterplot_matrix(X, X.columns.tolist(), downsample_frac=0.01)`\n\nArgs:\ndf: Pandas dataframe containing the target column (named 'target').\nfeatures: The list of features to include in the correlation plot.\ndownsample_frac: Dataframe downsampling rate (0.1 to include 10% of the dataset).\nfigsize: The size of the plot.", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n    \n    self.NotifyReviewEvent = channel.unary_unary(\n        '/pb.Analyzer/NotifyReviewEvent',\n        request_serializer=lookout_dot_sdk_dot_event__pb2.ReviewEvent.SerializeToString,\n        response_deserializer=lookout_dot_sdk_dot_service__analyzer__pb2.EventResponse.FromString,\n        )\n    self.NotifyPushEvent = channel.unary_unary(\n        '/pb.Analyzer/NotifyPushEvent',\n        request_serializer=lookout_dot_sdk_dot_event__pb2.PushEvent.SerializeToString,\n        response_deserializer=lookout_dot_sdk_dot_service__analyzer__pb2.EventResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def write_asc_file(filename, data, xsize, ysize, geotransform, nodata_value):\n    UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(filename)))\n    header = ('NCOLS %d\\nNROWS %d\\nXLLCENTER %f\\nYLLCENTER %f\\nCELLSIZE %f\\nNODATA_VALUE %f' % (xsize, ysize, (geotransform[0] + (0.5 * geotransform[1])), (geotransform[3] - ((ysize - 0.5) * geotransform[1])), geotransform[1], nodata_value))\n    with open(filename, 'w', encoding='utf-8') as f:\n        f.write(header)\n        for i in range(0, ysize):\n            for j in range(0, xsize):\n                f.write(('%s\\t' % repr(data[i][j])))\n            f.write('\\n')\n    f.close()", "docstring": "Output Raster to ASCII file.\n\nArgs:\nfilename: output ASCII filename.\ndata: 2D array data.\nxsize: Col count.\nysize: Row count.\ngeotransform: geographic transformation.\nnodata_value: nodata_flow value.", "source": "codesearchnet"}
{"code": "def saturate_cast(value, dtype, name=None):\n    with ops.name_scope(name, 'saturate_cast', [value]) as name:\n        value = ops.convert_to_tensor(value, name='value')\n        dtype = dtypes.as_dtype(dtype).base_dtype\n        in_dtype = value.dtype\n        if in_dtype.is_complex:\n            if dtype.is_complex:\n                real_in_dtype = in_dtype.real_dtype\n                real_out_dtype = dtype.real_dtype\n                if real_in_dtype.min < real_out_dtype.min or real_in_dtype.max > real_out_dtype.max:\n                    value = gen_math_ops._clip_by_value(value, ops.convert_to_tensor(builtins.complex(real_out_dtype.min, real_out_dtype.min), dtype=in_dtype), ops.convert_to_tensor(builtins.complex(real_out_dtype.max, real_out_dtype.max), dtype=in_dtype), name='clamp')\n                return cast(value, dtype, name=name)\n            else:\n                value = real(value)\n                logging.warn('Casting complex to real discards imaginary part.')\n                in_dtype = in_dtype.real_dtype\n        out_real_dtype = dtype.real_dtype\n        if forward_compat.forward_compatible(2024, 11, 1) or in_dtype.min < out_real_dtype.min or in_dtype.max > out_real_dtype.max:\n            np_dtype = in_dtype.as_numpy_dtype\n            try:\n                promoted_type = np.promote_types(np_dtype, out_real_dtype.as_numpy_dtype)\n            except TypeError:\n                promoted_type = float\n            min_limit = np_dtype(np.maximum(in_dtype.min, out_real_dtype.min))\n            promoted = np.array([min_limit, out_real_dtype.min], dtype=promoted_type)\n            if promoted[0] < promoted[1]:\n                min_limit = np.nextafter(min_limit, np_dtype(0), dtype=np_dtype)\n            max_limit = np_dtype(np.minimum(in_dtype.max, out_real_dtype.max))\n            promoted = np.array([max_limit, out_real_dtype.max], dtype=promoted_type)\n            if promoted[0] > promoted[1]:\n                max_limit = np.nextafter(max_limit, np_dtype(0), dtype=np_dtype)\n            value = gen_math_ops._clip_by_value(value, ops.convert_to_tensor(min_limit, dtype=in_dtype), ops.convert_to_tensor(max_limit, dtype=in_dtype), name='clamp')\n        return cast(value, dtype, name=name)", "docstring": "Performs a safe saturating cast of `value` to `dtype`.\n\nThis function casts the input to `dtype` without overflow.  If\nthere is a danger that values would over or underflow in the cast, this op\napplies the appropriate clamping before the cast.  See `tf.cast` for more\ndetails.\n\nArgs:\nvalue: A `Tensor`.\ndtype: The desired output `DType`.\nname: A name for the operation (optional).\n\nReturns:\n`value` safely cast to `dtype`.", "source": "github-repos"}
{"code": "def leak(self):\n    (capacity, last_leak) = self.storage.mget(self.key_amount, self.key_last_leak, coherent=True)\n    now = time.time()\n    if last_leak:\n        elapsed = (now - last_leak)\n        decrement = (elapsed * self.rate)\n        new_capacity = max(int((capacity - decrement)), 0)\n    else:\n        new_capacity = 0\n    self.storage.mset({self.key_amount: new_capacity, self.key_last_leak: now})\n    return new_capacity", "docstring": "Leak the adequate amount of data from the bucket.\n\nThis should be called before any consumption takes place.\n\nReturns:\nint: the new capacity of the bucket", "source": "codesearchnet"}
{"code": "def _validate_measure_sampling(self, experiment):\n    if (self._shots <= 1):\n        self._sample_measure = False\n        return\n    if hasattr(experiment.config, 'allows_measure_sampling'):\n        self._sample_measure = experiment.config.allows_measure_sampling\n    else:\n        measure_flag = False\n        for instruction in experiment.instructions:\n            if (instruction.name == 'reset'):\n                self._sample_measure = False\n                return\n            if measure_flag:\n                if (instruction.name not in ['measure', 'barrier', 'id', 'u0']):\n                    self._sample_measure = False\n                    return\n            elif (instruction.name == 'measure'):\n                measure_flag = True\n        self._sample_measure = True", "docstring": "Determine if measure sampling is allowed for an experiment\n\nArgs:\nexperiment (QobjExperiment): a qobj experiment.", "source": "codesearchnet"}
{"code": "def process_openxml_file(filename: str, print_good: bool, delete_if_bad: bool) -> None:\n    print_bad = (not print_good)\n    try:\n        file_good = is_openxml_good(filename)\n        file_bad = (not file_good)\n        if ((print_good and file_good) or (print_bad and file_bad)):\n            print(filename)\n        if (delete_if_bad and file_bad):\n            log.warning('Deleting: {}', filename)\n            os.remove(filename)\n    except Exception as e:\n        log.critical('Uncaught error in subprocess: {!r}\\n{}', e, traceback.format_exc())\n        raise", "docstring": "Prints the filename of, or deletes, an OpenXML file depending on whether\nit is corrupt or not.\n\nArgs:\nfilename: filename to check\nprint_good: if ``True``, then prints the filename if the file\nappears good.\ndelete_if_bad: if ``True``, then deletes the file if the file\nappears corrupt.", "source": "codesearchnet"}
{"code": "def random_masking(self, sequence, noise=None):\n    batch_size, seq_length, dim = sequence.shape\n    len_keep = int(seq_length * (1 - self.config.mask_ratio))\n    if noise is None:\n        noise = torch.rand(batch_size, seq_length, device=sequence.device)\n    ids_shuffle = torch.argsort(noise, dim=1).to(sequence.device)\n    ids_restore = torch.argsort(ids_shuffle, dim=1).to(sequence.device)\n    ids_keep = ids_shuffle[:, :len_keep]\n    sequence_unmasked = torch.gather(sequence, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, dim))\n    mask = torch.ones([batch_size, seq_length], device=sequence.device)\n    mask[:, :len_keep] = 0\n    mask = torch.gather(mask, dim=1, index=ids_restore)\n    return (sequence_unmasked, mask, ids_restore)", "docstring": "Perform per-sample random masking by per-sample shuffling. Per-sample shuffling is done by argsort random\nnoise.\n\nArgs:\nsequence (`torch.LongTensor` of shape `(batch_size, sequence_length, dim)`)\nnoise (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*) which is\nmainly used for testing purposes to control randomness and maintain the reproducibility", "source": "github-repos"}
{"code": "def verified(self, institute_id):\n    query = {'verb': 'validate', 'institute': institute_id}\n    res = []\n    validate_events = self.event_collection.find(query)\n    for validated in list(validate_events):\n        case_id = validated['case']\n        var_obj = self.variant(case_id=case_id, document_id=validated['variant_id'])\n        case_obj = self.case(case_id=case_id)\n        if ((not case_obj) or (not var_obj)):\n            continue\n        var_obj['case_obj'] = {'display_name': case_obj['display_name'], 'individuals': case_obj['individuals']}\n        res.append(var_obj)\n    return res", "docstring": "Return all verified variants for a given institute\n\nArgs:\ninstitute_id(str): institute id\n\nReturns:\nres(list): a list with validated variants", "source": "codesearchnet"}
{"code": "def add_router(self, path, router):\n        \n        if self.strict_router_check and not isinstance(router, Router):\n            raise TypeError(\"Expected object of type Router, found %r\" % type(router))\n\n        log.info(\"{} Adding router {} on path {}\", id(self), router, path)\n        self.middleware.add(path=path,\n                            func=router,\n                            method_mask=HTTPMethod.ALL,)", "docstring": "Adds a router to the list of routers\n\nArgs:\npath (str or regex): The path on which the router binds\nrouter (growler.Router): The router which will respond to\nrequests\n\nRaises:\nTypeError: If `strict_router_check` attribute is True and\nthe router is not an instance of growler.Router.", "source": "juraj-google-style"}
{"code": "def get_metadata_attribute(self, metaname):\n    metadata_value = self.metadata.get(metaname, None)\n    if (metadata_value is None):\n        raise NoMetadataException(('No metadata attribute named %s' % metaname))\n    if (not isinstance(metadata_value, list)):\n        raise TypeError('Metadata is not a list and it should be.')\n    if (len(metadata_value) > 1):\n        return metadata_value\n    else:\n        return metadata_value[0]", "docstring": "Get the metadata attribute by the name.\n\nArgs:\nmetaname (:obj:`str`): Name of the attribute\n\nReturns:\n:obj:`list` or :obj:`str`: Value(s) of the requested metadata\nattribute\n\nRaises:\nNoMetadataException: Attribute error\nTypeError: Metadata should be a list", "source": "codesearchnet"}
{"code": "def OpenFileEntry(cls, path_spec_object, resolver_context=None):\n    file_system = cls.OpenFileSystem(path_spec_object, resolver_context=resolver_context)\n    if (resolver_context is None):\n        resolver_context = cls._resolver_context\n    file_entry = file_system.GetFileEntryByPathSpec(path_spec_object)\n    resolver_context.ReleaseFileSystem(file_system)\n    return file_entry", "docstring": "Opens a file entry object defined by path specification.\n\nArgs:\npath_spec_object (PathSpec): path specification.\nresolver_context (Optional[Context]): resolver context, where None\nrepresents the built in context which is not multi process safe.\n\nReturns:\nFileEntry: file entry or None if the path specification could not be\nresolved.", "source": "codesearchnet"}
{"code": "def _parse_exchange_token_response(content):\n    \n    resp = {}\n    content = _helpers._from_bytes(content)\n    try:\n        resp = json.loads(content)\n    except Exception:\n        \n        \n        resp = _helpers.parse_unique_urlencoded(content)\n\n    \n    if resp and 'expires' in resp:\n        resp['expires_in'] = resp.pop('expires')\n\n    return resp", "docstring": "Parses response of an exchange token request.\n\nMost providers return JSON but some (e.g. Facebook) return a\nurl-encoded string.\n\nArgs:\ncontent: The body of a response\n\nReturns:\nContent as a dictionary object. Note that the dict could be empty,\ni.e. {}. That basically indicates a failure.", "source": "juraj-google-style"}
{"code": "def secondary_training_status_message(job_description, prev_description):\n    \n\n    if job_description is None or job_description.get('SecondaryStatusTransitions') is None\\\n            or len(job_description.get('SecondaryStatusTransitions')) == 0:\n        return ''\n\n    prev_description_secondary_transitions = prev_description.get('SecondaryStatusTransitions')\\\n        if prev_description is not None else None\n    prev_transitions_num = len(prev_description['SecondaryStatusTransitions'])\\\n        if prev_description_secondary_transitions is not None else 0\n    current_transitions = job_description['SecondaryStatusTransitions']\n\n    if len(current_transitions) == prev_transitions_num:\n        \n        transitions_to_print = current_transitions[-1:]\n    else:\n        \n        transitions_to_print = current_transitions[prev_transitions_num - len(current_transitions):]\n\n    status_strs = []\n    for transition in transitions_to_print:\n        message = transition['StatusMessage']\n        time_str = datetime.utcfromtimestamp(\n            time.mktime(job_description['LastModifiedTime'].timetuple())).strftime('%Y-%m-%d %H:%M:%S')\n        status_strs.append('{} {} - {}'.format(time_str, transition['Status'], message))\n\n    return '\\n'.join(status_strs)", "docstring": "Returns a string contains last modified time and the secondary training job status message.\n\nArgs:\njob_description: Returned response from DescribeTrainingJob call\nprev_description: Previous job description from DescribeTrainingJob call\n\nReturns:\nstr: Job status string to be printed.", "source": "juraj-google-style"}
{"code": "def set_requestable(self, requestable=True):\n        \n        \n        self.data['is_requestdata_type'] = requestable\n        if requestable:\n            self.data['private'] = False", "docstring": "Set the dataset to be of type requestable or not\n\nArgs:\nrequestable (bool): Set whether dataset is requestable. Defaults to True.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def make_initial_frame_chooser(real_env, frame_stack_size, simulation_random_starts, simulation_flip_first_random_for_beginning, split=tf.estimator.ModeKeys.TRAIN):\n    initial_frame_rollouts = real_env.current_epoch_rollouts(split=split, minimal_rollout_frames=frame_stack_size)\n\n    def initial_frame_chooser(batch_size):\n        'Frame chooser.'\n        deterministic_initial_frames = initial_frame_rollouts[0][:frame_stack_size]\n        if (not simulation_random_starts):\n            initial_frames = ([deterministic_initial_frames] * batch_size)\n        else:\n            initial_frames = random_rollout_subsequences(initial_frame_rollouts, batch_size, frame_stack_size)\n            if simulation_flip_first_random_for_beginning:\n                initial_frames[0] = deterministic_initial_frames\n        return np.stack([[frame.observation.decode() for frame in initial_frame_stack] for initial_frame_stack in initial_frames])\n    return initial_frame_chooser", "docstring": "Make frame chooser.\n\nArgs:\nreal_env: T2TEnv to take initial frames from.\nframe_stack_size (int): Number of consecutive frames to extract.\nsimulation_random_starts (bool): Whether to choose frames at random.\nsimulation_flip_first_random_for_beginning (bool): Whether to flip the first\nframe stack in every batch for the frames at the beginning.\nsplit (tf.estimator.ModeKeys or None): Data split to take the frames from,\nNone means use all frames.\n\nReturns:\nFunction batch_size -> initial_frames.", "source": "codesearchnet"}
{"code": "def conjugate(self):\n    return self.__class__(scalar=self.scalar, vector=(- self.vector))", "docstring": "Quaternion conjugate, encapsulated in a new instance.\n\nFor a unit quaternion, this is the same as the inverse.\n\nReturns:\nA new Quaternion object clone with its vector part negated", "source": "codesearchnet"}
{"code": "def requestMapIdentity(self, subject, vendorSpecific=None):\n        \n        response = self.requestMapIdentityResponse(subject, vendorSpecific)\n        return self._read_boolean_response(response)", "docstring": "See Also: requestMapIdentityResponse()\n\nArgs:\nsubject:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def run_config_diagnostics(config_path=CONFIG_PATH):\n    \n    config = read_config(config_path)\n    missing_sections = set()\n    malformed_entries = defaultdict(set)\n    for section, expected_section_keys in SECTION_KEYS.items():\n        section_content = config.get(section)\n        if not section_content:\n            missing_sections.add(section)\n        else:\n            for option in expected_section_keys:\n                option_value = section_content.get(option)\n                if not option_value:\n                    malformed_entries[section].add(option)\n    return config_path, missing_sections, malformed_entries", "docstring": "Run diagnostics on the configuration file.\n\nArgs:\nconfig_path (str): Path to the configuration file.\nReturns:\nstr, Set[str], dict(str, Set[str]): The path to the configuration file, a set of missing\nsections and a dict that maps each section to the entries that have either missing or empty\noptions.", "source": "juraj-google-style"}
{"code": "def save_data_files(bs, prefix=None, directory=None):\n    filename = 'phonon_band.dat'\n    filename = ('{}_phonon_band.dat'.format(prefix) if prefix else filename)\n    directory = (directory if directory else '.')\n    filename = os.path.join(directory, filename)\n    with open(filename, 'w') as f:\n        header = '\n        f.write(header)\n        for band in bs.bands:\n            for (d, e) in zip(bs.distance, band):\n                f.write('{:.8f} {:.8f}\\n'.format(d, e))\n            f.write('\\n')\n    return filename", "docstring": "Write the phonon band structure data files to disk.\n\nArgs:\nbs (:obj:`~pymatgen.phonon.bandstructure.PhononBandStructureSymmLine`):\nThe phonon band structure.\nprefix (:obj:`str`, optional): Prefix for data file.\ndirectory (:obj:`str`, optional): Directory in which to save the data.\n\nReturns:\nstr: The filename of the written data file.", "source": "codesearchnet"}
{"code": "def _einsum_matmul_index_helper(gate_indices, number_of_qubits):\n    if ((len(gate_indices) + number_of_qubits) > 26):\n        raise QiskitError('Total number of free indexes limited to 26')\n    tens_in = ascii_lowercase[:number_of_qubits]\n    tens_out = list(tens_in)\n    mat_left = ''\n    mat_right = ''\n    for (pos, idx) in enumerate(reversed(gate_indices)):\n        mat_left += ascii_lowercase[((- 1) - pos)]\n        mat_right += tens_in[((- 1) - idx)]\n        tens_out[((- 1) - idx)] = ascii_lowercase[((- 1) - pos)]\n    tens_out = ''.join(tens_out)\n    return (mat_left, mat_right, tens_in, tens_out)", "docstring": "Return the index string for Numpy.eignsum matrix multiplication.\n\nThe returned indices are to perform a matrix multiplication A.v where\nthe matrix A is an M-qubit matrix, matrix v is an N-qubit vector, and\nM <= N, and identity matrices are implied on the subsystems where A has no\nsupport on v.\n\nArgs:\ngate_indices (list[int]): the indices of the right matrix subsystems\nto contract with the left matrix.\nnumber_of_qubits (int): the total number of qubits for the right matrix.\n\nReturns:\ntuple: (mat_left, mat_right, tens_in, tens_out) of index strings for\nthat may be combined into a Numpy.einsum function string.\n\nRaises:\nQiskitError: if the total number of qubits plus the number of\ncontracted indices is greater than 26.", "source": "codesearchnet"}
{"code": "def diet_expert(x, hidden_size, params):\n  \n\n  @fn_with_diet_vars(params)\n  def diet_expert_internal(x):\n    dim = x.get_shape().as_list()[-1]\n    h = tf.layers.dense(x, hidden_size, activation=tf.nn.relu, use_bias=False)\n    y = tf.layers.dense(h, dim, use_bias=False)\n    y *= tf.rsqrt(tf.to_float(dim * hidden_size))\n    return y\n\n  return diet_expert_internal(x)", "docstring": "A two-layer feed-forward network with relu activation on hidden layer.\n\nUses diet variables.\nRecomputes hidden layer on backprop to save activation memory.\n\nArgs:\nx: a Tensor with shape [batch, io_size]\nhidden_size: an integer\nparams: a diet variable HParams object.\n\nReturns:\na Tensor with shape [batch, io_size]", "source": "juraj-google-style"}
{"code": "def extend(self, *bindings):\n        \n        self._bindings.extend(self._preprocess(bindings))\n        return self", "docstring": "Append the given bindings to this keymap.\n\nArguments:\n*bindings (Binding): Bindings to be added.\nReturns:\nKeymap: self", "source": "juraj-google-style"}
{"code": "def _obj_to_path(obj):\n    if (obj is None):\n        return obj\n    if (inspect.isclass(obj) or inspect.isfunction(obj)):\n        fetched = getattr(sys.modules[obj.__module__], obj.__name__, None)\n        if (fetched is None):\n            raise ValueError(('Object %r must be defined on the top level of a module.' % obj))\n        return ('%s.%s' % (obj.__module__, obj.__name__))\n    raise TypeError(('Unexpected type %s.' % type(obj)))", "docstring": "Returns the fully qualified path to the object.\n\nArgs:\nobj: obj must be a new style top level class, or a top level function.\nNo inner function or static method.\n\nReturns:\nFully qualified path to the object.\n\nRaises:\nTypeError: when argument obj has unsupported type.\nValueError: when obj can't be discovered on the top level.", "source": "codesearchnet"}
{"code": "def _retrieve_all_filtered_nodes(self):\n    if self._node_filters is None:\n        return None\n    all_filtered_nodes = set()\n    nodes_to_visit = list(self._node_filters)\n    while nodes_to_visit:\n        node_path = nodes_to_visit.pop(0)\n        node_id = self._node_path_to_id[node_path]\n        if node_id in all_filtered_nodes:\n            continue\n        all_filtered_nodes.add(node_id)\n        node, setter = self._loaded_nodes.get(node_id, (None, None))\n        if node is not None:\n            if not isinstance(node, base.Trackable):\n                raise TypeError(f\"Error when processing dictionary values passed to nodes_to_load.Object at {node_path} is expected to be a checkpointable (i.e. 'trackable') TensorFlow object (e.g. tf.Variable, tf.Module or Keras layer).\")\n            node._maybe_initialize_trackable()\n        for reference in self._proto.nodes[node_id].children:\n            child_object, _ = self._loaded_nodes.get(reference.node_id, (None, None))\n            if child_object is None and node is not None:\n                child_object = node._lookup_dependency(reference.local_name)\n                if isinstance(child_object, data_structures.TrackableDataStructure):\n                    setter = lambda *args: None\n                    self._loaded_nodes[reference.node_id] = (child_object, setter)\n            child_path = '{}.{}'.format(node_path, reference.local_name)\n            self._node_path_to_id[child_path] = reference.node_id\n            nodes_to_visit.append(child_path)\n    if 0 in all_filtered_nodes:\n        return None\n    return all_filtered_nodes", "docstring": "Traverses through the object graph to get the IDs of all nodes to load.\n\nAs a side-effect, if node_filters is a dictionary that contains already-\ncreated objects, then the children tracked by those objects will be\nadded to node_filters.\n\nReturns:\nList of all nodes to load, or None if all nodes should be loaded.", "source": "github-repos"}
{"code": "def _select_class_id(ids, selected_id):\n    ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids)\n    if isinstance(ids, sparse_tensor.SparseTensor):\n        return sparse_ops.sparse_retain(ids, math_ops.equal(ids.values, selected_id))\n    ids_shape = array_ops.shape(ids, out_type=dtypes.int64)\n    ids_last_dim = array_ops.size(ids_shape) - 1\n    filled_selected_id_shape = math_ops.reduced_shape(ids_shape, array_ops.reshape(ids_last_dim, [1]))\n    filled_selected_id = array_ops.fill(filled_selected_id_shape, math_ops.cast(selected_id, dtypes.int64))\n    result = sets.set_intersection(filled_selected_id, ids)\n    return sparse_tensor.SparseTensor(indices=result.indices, values=result.values, dense_shape=ids_shape)", "docstring": "Filter all but `selected_id` out of `ids`.\n\nArgs:\nids: `int64` `Tensor` or `SparseTensor` of IDs.\nselected_id: Int id to select.\n\nReturns:\n`SparseTensor` of same dimensions as `ids`. This contains only the entries\nequal to `selected_id`.", "source": "github-repos"}
{"code": "def assertAllLessEqual(self, a, comparison_target):\n    a, comparison_target = self.evaluate_if_both_tensors(a, comparison_target)\n    a = self._GetNdArray(a)\n    self.assertLessEqual(np.max(a), comparison_target)", "docstring": "Assert element values are all less than or equal to a target value.\n\nArgs:\na: The numpy `ndarray`, or anything that can be converted into a numpy\n`ndarray` (including Tensor).\ncomparison_target: The target value of comparison.", "source": "github-repos"}
{"code": "def parse(self, string, root=None):\n    phrases = []\n    meta = self.meta.search(string)\n    while meta:\n        pos = meta.start()\n        if (meta.group() == '<'):\n            (string, child, meta) = self.open_phrase(string, pos)\n            if (child and root):\n                root.nested.append(child)\n            elif child:\n                phrases.append(child)\n            continue\n        elif root:\n            if (meta.group() == '('):\n                meta = self.meta.search(string, (pos + 1))\n                if (meta.group() == ')'):\n                    (string, root, meta) = self.handle_arguments(string, root, pos, meta.start())\n                    continue\n            elif (meta.group() == '>'):\n                (string, phrase, meta) = self.close_phrase(string, root, pos)\n                if phrase:\n                    return (string, phrase)\n                continue\n        (string, meta) = self.escape_meta(string, pos)\n    if (not root):\n        return (string, phrases)\n    word = re.search('([\\\\w\\\\s]+)(?![\\\\d]*>[\\\\w\\\\s]+>)', string)\n    what = 'No closing tag found for opening tag'\n    if word:\n        what += \" after expression '{0}'\".format(word.group())\n    raise errors.ParseError((what + '!'))", "docstring": "Parses a string to handle escaped tags and retrieve phrases.\n\nThis method works recursively to parse nested tags. When escaped\ntags are found, those are removed from the string. Also argument\nsequences are removed from the string. The string returned can\nthus be quite different from the string passed.\n\nArguments:\nstring (str): The string to parse.\nroot (Phrase): If in a recursive call, the root/parent phrase.\n\nReturns:\nFor one, the escaped string (without escape characters and\nphrase arguments). For the other, it depends on the stack-depth.\nIf this is the lowest recursion depth/level (i.e. the stack\ncall resulting from the first function call in self.beautify()),\nit will return a list of phrases. For higher stack levels (\ni.e. resulting from recursive function calls from with\nself.parse(), for nested phrases), it returns exactly one\nPhrase instance.\n\nRaises:\nerrors.ParseError: If no closing tag could be\nfound for an opening tag.", "source": "codesearchnet"}
{"code": "def words_string(fake: Faker, n: int) -> str:\n    return ' '.join(fake.words(n))", "docstring": "Provide Faker words as a joined string.\n\nArgs:\n* fake: Faker instance\n* n: number of words\n\nReturns:\n* string of n words joined by spaces", "source": "github-repos"}
{"code": "def get_newest(blocks, layout_blocks):\n    layout_temp = list(layout_blocks)\n    for i in range(0, len(layout_temp)):\n        for k in range(0, len(layout_blocks)):\n            if (blocks[layout_temp[i]].ec_hdr.image_seq != blocks[layout_blocks[k]].ec_hdr.image_seq):\n                continue\n            if (blocks[layout_temp[i]].leb_num != blocks[layout_blocks[k]].leb_num):\n                continue\n            if (blocks[layout_temp[i]].vid_hdr.sqnum > blocks[layout_blocks[k]].vid_hdr.sqnum):\n                del layout_blocks[k]\n                break\n    return layout_blocks", "docstring": "Filter out old layout blocks from list\n\nArguments:\nList:blocks        -- List of block objects\nList:layout_blocks -- List of layout block indexes\n\nReturns:\nList -- Newest layout blocks in list", "source": "codesearchnet"}
{"code": "def push(self, key, value, *, section=DataStoreDocumentSection.Data):\n    key_notation = '.'.join([section, key])\n    result = self._collection.update_one({'_id': ObjectId(self._workflow_id)}, {'$push': {key_notation: self._encode_value(value)}, '$currentDate': {'lastModified': True}})\n    return (result.modified_count == 1)", "docstring": "Appends a value to a list in the specified section of the document.\n\nArgs:\nkey (str): The key pointing to the value that should be stored/updated.\nIt supports MongoDB's dot notation for nested fields.\nvalue: The value that should be appended to a list in the data store.\nsection (DataStoreDocumentSection): The section from which the data should\nbe retrieved.\n\nReturns:\nbool: ``True`` if the value could be appended, otherwise ``False``.", "source": "codesearchnet"}
{"code": "def uninstall(pkg):\n    \n    ret = {'result': None, 'output': ''}\n\n    out = __salt__['cmd.run_all'](FLATPAK_BINARY_NAME + ' uninstall ' + pkg)\n\n    if out['retcode'] and out['stderr']:\n        ret['stderr'] = out['stderr'].strip()\n        ret['result'] = False\n    else:\n        ret['stdout'] = out['stdout'].strip()\n        ret['result'] = True\n\n    return ret", "docstring": "Uninstall the specified package.\n\nArgs:\npkg (str): The package name.\n\nReturns:\ndict: The ``result`` and ``output``.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' flatpak.uninstall org.gimp.GIMP", "source": "juraj-google-style"}
{"code": "def get_corrections_dict(self, entry):\n        \n        corrections = {}\n        for c in self.corrections:\n            val = c.get_correction(entry)\n            if val != 0:\n                corrections[str(c)] = val\n        return corrections", "docstring": "Returns the corrections applied to a particular entry.\n\nArgs:\nentry: A ComputedEntry object.\n\nReturns:\n({correction_name: value})", "source": "juraj-google-style"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3):\n    if (kmip_version < enums.KMIPVersion.KMIP_1_3):\n        raise exceptions.VersionNotSupported('KMIP {} does not support the ProfileInformation object.'.format(kmip_version.value))\n    local_buffer = BytearrayStream()\n    if self._profile_name:\n        self._profile_name.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The ProfileInformation structure is missing the profile name field.')\n    if self._server_uri:\n        self._server_uri.write(local_buffer, kmip_version=kmip_version)\n    if self._server_port:\n        self._server_port.write(local_buffer, kmip_version=kmip_version)\n    self.length = local_buffer.length()\n    super(ProfileInformation, self).write(output_buffer, kmip_version=kmip_version)\n    output_buffer.write(local_buffer.buffer)", "docstring": "Write the ProfileInformation structure encoding to the data stream.\n\nArgs:\noutput_buffer (stream): A data stream in which to encode\nProfileInformation structure data, supporting a write method.\nkmip_version (enum): A KMIPVersion enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 2.0.\n\nRaises:\nInvalidField: Raised if the profile name field is not defined.\nVersionNotSupported: Raised when a KMIP version is provided that\ndoes not support the ProfileInformation structure.", "source": "codesearchnet"}
{"code": "def multiply(x1, x2, output_shape=None, name=None):\n  \n  if not isinstance(x2, Tensor):\n    return ScalarMultiplyOperation(x1, x2).outputs[0]\n  with tf.name_scope(name, default_name=\"mul\"):\n    x1, x2 = binary_arguments_to_tensors(x1, x2)\n    return einsum(\n        [x1, x2],\n        output_shape=_infer_binary_broadcast_shape(\n            x1.shape, x2.shape, output_shape))", "docstring": "Binary multiplication with broadcasting.\n\nArgs:\nx1: a Tensor\nx2: a Tensor\noutput_shape: an optional Shape\nname: an optional string\nReturns:\na Tensor", "source": "juraj-google-style"}
{"code": "def register_instance(self, instance: '_instance_base.Instance') -> None:", "docstring": "Treating self as a class definition, register an instance of it.\n\nThis is used for keeping merging call records on instances when generating\nthe formal definition of a class. See InterpreterClass and TupleClass.\n\nArgs:\ninstance: An instance of this class (as a BaseValue)", "source": "github-repos"}
{"code": "def delete(self, collector_id=None):\n        \n        cid = self.collector_id\n\n        if collector_id:\n            cid = collector_id\n\n        \n        url = '{0}/{1}'.format(self.url, cid)\n        request = requests.delete(url, auth=self.auth)\n        try:\n            \n            response = request.json()\n        except ValueError:\n            \n            \n            \n            response = {\n                u'message': u'The request completed successfully.',\n                u'status': 200,\n            }\n        return response", "docstring": "Delete a collector from inventory.\n\nArgs:\ncollector_id (int): id of collector (optional)", "source": "juraj-google-style"}
{"code": "def calculate_parity(n):\n    if (not is_natural(n)):\n        raise ValueError('Expected n to be a positive integer.')\n    y = 0\n    n = abs(n)\n    while n:\n        y += (n & 1)\n        n = (n >> 1)\n    return (y & 1)", "docstring": "Calculates and returns the parity of a number.\n\nThe parity of a number is ``1`` if the number has an odd number of ones\nin its binary representation, otherwise ``0``.\n\nArgs:\nn (int): the number whose parity to calculate\n\nReturns:\n``1`` if the number has an odd number of ones, otherwise ``0``.\n\nRaises:\nValueError: if ``n`` is less than ``0``.", "source": "codesearchnet"}
{"code": "def numeric_function_clean_dataframe(self, axis):\n        \n        result = None\n        query_compiler = self\n        \n        if not axis and len(self.index) == 0:\n            result = pandas.Series(dtype=np.int64)\n\n        nonnumeric = [\n            col\n            for col, dtype in zip(self.columns, self.dtypes)\n            if not is_numeric_dtype(dtype)\n        ]\n        if len(nonnumeric) == len(self.columns):\n            \n            if axis:\n                result = pandas.Series([np.nan for _ in self.index])\n            else:\n                result = pandas.Series([0 for _ in self.index])\n        else:\n            query_compiler = self.drop(columns=nonnumeric)\n        return result, query_compiler", "docstring": "Preprocesses numeric functions to clean dataframe and pick numeric indices.\n\nArgs:\naxis: '0' if columns and '1' if rows.\n\nReturns:\nTuple with return value(if any), indices to apply func to & cleaned Manager.", "source": "juraj-google-style"}
{"code": "def gui(discord_token, discord_client_id):\n    \n\n    logger.info(\"Starting Modis in GUI\")\n\n    import tkinter as tk\n\n    logger.debug(\"Loading packages\")\n    from modis.discord_modis import gui as discord_modis_gui\n    from modis.reddit_modis import gui as reddit_modis_gui\n    from modis.facebook_modis import gui as facebook_modis_gui\n\n    logger.debug(\"Initialising window\")\n\n    \n    root = tk.Tk()\n    root.minsize(width=800, height=400)\n    root.geometry(\"800x600\")\n    root.title(\"Modis Control Panel\")\n    \n    root.iconbitmap(r\"{}/assets/modis.ico\".format(file_dir))\n\n    \n    \n    discord = discord_modis_gui.Frame(root, discord_token, discord_client_id)\n    discord.grid(column=0, row=0, padx=0, pady=0, sticky=\"W E N S\")\n    \n    root.columnconfigure(0, weight=1)\n    root.rowconfigure(0, weight=1)\n    discord.columnconfigure(0, weight=1)\n    discord.rowconfigure(0, weight=1)\n\n    logger.debug(\"GUI initialised\")\n\n    \n    root.mainloop()", "docstring": "Start Modis in gui format.\n\nArgs:\ndiscord_token (str): The bot token for your Discord application\ndiscord_client_id: The bot's client ID", "source": "juraj-google-style"}
{"code": "async def connect(self, client_id, conn_string):\n    conn_id = self.adapter.unique_conn_id()\n    self._client_info(client_id)\n    (await self.adapter.connect(conn_id, conn_string))\n    self._hook_connect(conn_string, conn_id, client_id)", "docstring": "Connect to a device on behalf of a client.\n\nSee :meth:`AbstractDeviceAdapter.connect`.\n\nArgs:\nclient_id (str): The client we are working for.\nconn_string (str): A connection string that will be\npassed to the underlying device adapter to connect.\n\nRaises:\nDeviceServerError: There is an issue with your client_id.\nDeviceAdapterError: The adapter had an issue connecting.", "source": "codesearchnet"}
{"code": "def make_timebar(progress=0, duration=0):\n    duration_string = api_music.duration_to_string(duration)\n    if (duration <= 0):\n        return '---'\n    time_counts = int(round(((progress / duration) * TIMEBAR_LENGTH)))\n    if (time_counts > TIMEBAR_LENGTH):\n        time_counts = TIMEBAR_LENGTH\n    if (duration > 0):\n        bar = ((('│' + (TIMEBAR_PCHAR * time_counts)) + (TIMEBAR_ECHAR * (TIMEBAR_LENGTH - time_counts))) + '│')\n        time_bar = '{} {}'.format(bar, duration_string)\n    else:\n        time_bar = duration_string\n    return time_bar", "docstring": "Makes a new time bar string\n\nArgs:\nprogress: How far through the current song we are (in seconds)\nduration: The duration of the current song (in seconds)\n\nReturns:\ntimebar (str): The time bar string", "source": "codesearchnet"}
{"code": "def end(self, session):\n    pass", "docstring": "Called at the end of session.\n\nThe `session` argument can be used in case the hook wants to run final ops,\nsuch as saving a last checkpoint.\n\nIf `session.run()` raises exception other than OutOfRangeError or\nStopIteration then `end()` is not called.\nNote the difference between `end()` and `after_run()` behavior when\n`session.run()` raises OutOfRangeError or StopIteration. In that case\n`end()` is called but `after_run()` is not called.\n\nArgs:\nsession: A TensorFlow Session that will be soon closed.", "source": "github-repos"}
{"code": "def __init__(self, log_path, config_path, output_path):\n        \n        if FLAGS.phantomjs_timeout is not None:\n            logging.info(\n                'Using FLAGS.phantomjs_timeout which is deprecated in favor'\n                'of FLAGS.capture_timeout - please update your config')\n            capture_timeout = FLAGS.phantomjs_timeout\n        else:\n            capture_timeout = FLAGS.capture_timeout\n        process_worker.ProcessWorkflow.__init__(\n            self, log_path, timeout_seconds=capture_timeout)\n        self.config_path = config_path\n        self.output_path = output_path", "docstring": "Initializer.\n\nArgs:\nlog_path: Where to write the verbose logging output.\nconfig_path: Path to the screenshot config file to pass\nto PhantomJs.\noutput_path: Where the output screenshot should be written.", "source": "juraj-google-style"}
{"code": "def augment(self, dct: NonAugmentedDict, document: Optional[YamlDocument]=None) -> AugmentedDict:\n    Validator.instance_of(dict, raise_ex=True, dct=dct)\n    for instance in self._extensions:\n        nodes = list(dict_find_pattern(dct, **instance.config()))\n        for (parent, k, val) in nodes:\n            parent.pop(k)\n            fragment = instance.apply(ExtensionContext(mentor=self, document=(document or dct), dct=dct, parent_node=parent, node=(k, val)))\n            if (fragment is not None):\n                parent.update(fragment)\n    return dct", "docstring": "Augments the given dictionary by using all the bound extensions.\n\nArgs:\ndct: Dictionary to augment.\ndocument: The document the dictionary was loaded from.\n\nReturns:\nThe augmented dictionary.", "source": "codesearchnet"}
{"code": "def get_distrib_version():\n    key = 'distrib_ver'\n    out, err = run_shell_cmd(cmds_all[PLATFORM][key])\n    if err and FLAGS.debug:\n        print('Error in detecting distribution version:\\n %s' % str(err))\n    return out.strip(b'\\n')", "docstring": "Retrieves distribution version of the operating system.\n\nReturns:\nString that is the distribution version.\ne.g. '14.04'", "source": "github-repos"}
{"code": "def parse_simple_id(chrom, pos, ref, alt):\n    return '_'.join([chrom, pos, ref, alt])", "docstring": "Parse the simple id for a variant\n\nSimple id is used as a human readable reference for a position, it is\nin no way unique.\n\nArgs:\nchrom(str)\npos(str)\nref(str)\nalt(str)\n\nReturns:\nsimple_id(str): The simple human readable variant id", "source": "codesearchnet"}
{"code": "def auto_forward(auto=True):\n    global __auto_forward_state\n    prev = __auto_forward_state\n    __auto_forward_state = auto\n    (yield)\n    __auto_forward_state = prev", "docstring": "Context for dynamic graph execution mode.\n\nArgs:\nauto (bool): Whether forward computation is executed during a\ncomputation graph construction.\n\nReturns: bool", "source": "codesearchnet"}
{"code": "def __init__(self, model_file, input_arrays=None, input_shapes=None, output_arrays=None, custom_objects=None):\n    super(TFLiteKerasModelConverter, self).__init__(experimental_debug_info_func=None)\n    if context.executing_eagerly():\n        if input_arrays or output_arrays:\n            raise ValueError('`input_arrays` and `output_arrays` are unsupported with Eager mode. If your model requires any of these parameters, please use disable_eager_execution().')\n        keras_model = keras_deps.get_load_model_function()(model_file, custom_objects)\n        function = _trace_model_call(keras_model)\n        concrete_func = function.get_concrete_function()\n        frozen_func = _convert_to_constants.convert_variables_to_constants_v2(concrete_func, lower_control_flow=False)\n        _set_tensor_shapes(frozen_func.inputs, input_shapes)\n        self._keras_model = keras_model\n        self._graph_def = frozen_func.graph.as_graph_def()\n        self._input_tensors = frozen_func.inputs\n        self._output_tensors = frozen_func.outputs\n        self._debug_info_func = _build_debug_info_func(frozen_func.graph)\n        return\n    keras_deps.get_clear_session_function()()\n    keras_model = keras_deps.get_load_model_function()(model_file, custom_objects)\n    sess = keras_deps.get_get_session_function()()\n    if input_arrays:\n        input_tensors = _get_tensors_from_tensor_names(sess.graph, input_arrays)\n    else:\n        input_tensors = keras_model.inputs\n    if output_arrays:\n        output_tensors = _get_tensors_from_tensor_names(sess.graph, output_arrays)\n    else:\n        output_tensors = keras_model.outputs\n    _set_tensor_shapes(input_tensors, input_shapes)\n    graph_def = _freeze_graph(sess, input_tensors, output_tensors)\n    self._keras_model = keras_model\n    self._graph_def = graph_def\n    self._input_tensors = input_tensors\n    self._output_tensors = output_tensors\n    self._debug_info_func = _build_debug_info_func(sess.graph)", "docstring": "Constructor for TFLiteConverter.\n\nArgs:\nmodel_file: Full filepath of HDF5 file containing the tf.keras model.\ninput_arrays: List of input tensors to freeze graph with. Uses input\narrays from SignatureDef when none are provided. (default None)\ninput_shapes: Dict of strings representing input tensor names to list of\nintegers representing input shapes (e.g., {\"foo\" : [1, 16, 16, 3]}).\nAutomatically determined when input shapes is None (e.g., {\"foo\" :\nNone}). (default None)\noutput_arrays: List of output tensors to freeze graph with. Uses output\narrays from SignatureDef when none are provided. (default None)\ncustom_objects: Dict mapping names (strings) to custom classes or\nfunctions to be considered during model deserialization. (default None)\n\nRaises:\nValueError: Invalid arguments.", "source": "github-repos"}
{"code": "def set_storage(self, storage):\n        \n        if isinstance(storage, BaseStorage):\n            self.storage = storage\n        elif isinstance(storage, dict):\n            if 'backend' not in storage and 'root_dir' in storage:\n                storage['backend'] = 'FileSystem'\n            try:\n                backend_cls = getattr(storage_package, storage['backend'])\n            except AttributeError:\n                try:\n                    backend_cls = import_module(storage['backend'])\n                except ImportError:\n                    self.logger.error('cannot find backend module %s',\n                                      storage['backend'])\n                    sys.exit()\n            kwargs = storage.copy()\n            del kwargs['backend']\n            self.storage = backend_cls(**kwargs)\n        else:\n            raise TypeError('\"storage\" must be a storage object or dict')", "docstring": "Set storage backend for downloader\n\nFor full list of storage backend supported, please see :mod:`storage`.\n\nArgs:\nstorage (dict or BaseStorage): storage backend configuration or instance", "source": "juraj-google-style"}
{"code": "def _projected_entity_to_message(ent, message_type):\n  \n  msg = message_type()\n  analyzed = _analyze_indexed_fields(ent._projection)\n  for name, sublist in analyzed.iteritems():\n    prop = ent._properties[name]\n    val = prop._get_value(ent)\n    assert isinstance(prop, model.StructuredProperty) == bool(sublist)\n    if sublist:\n      field = message_type.field_by_name(name)\n      assert isinstance(field, messages.MessageField)\n      assert prop._repeated == field.repeated\n      if prop._repeated:\n        assert isinstance(val, list)\n        val = [_projected_entity_to_message(v, field.type) for v in val]\n      else:\n        assert isinstance(val, prop._modelclass)\n        val = _projected_entity_to_message(val, field.type)\n    setattr(msg, name, val)\n  return msg", "docstring": "Recursive helper for _from_base_type() to convert an entity to a message.\n\nArgs:\nent: A Model instance.\nmessage_type: A Message subclass.\n\nReturns:\nAn instance of message_type.", "source": "juraj-google-style"}
{"code": "def call(self, hidden_states: tf.Tensor, attention_mask: Optional[tf.Tensor]=None, image_hidden_states: Optional[tf.Tensor]=None, image_attention_mask: Optional[tf.Tensor]=None, cross_attention_gate: Optional[tf.Tensor]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, past_key_value: Optional[Tuple[tf.Tensor]]=None) -> Tuple[tf.Tensor, Optional[Tuple[tf.Tensor, tf.Tensor]]]:\n    if image_hidden_states is None:\n        raise ValueError('`image_hidden_states` is required for Idefics cross attention module which are visual features to be conditioned on.')\n    if cross_attention_gate is None:\n        raise ValueError('`cross_attention_gate` is required for Idefics cross attention module to zero-out the cross-attention hidden_states attending to no images.')\n    if past_key_value is not None:\n        raise NotImplementedError('Past key value states are not implemented for Idefics cross attention module.')\n    residual = hidden_states\n    hidden_states = self.input_layernorm(hidden_states)\n    hidden_states, self_attn_weights, present_key_value = self.cross_attn(hidden_states=hidden_states, key_value_states=image_hidden_states, attention_mask=image_attention_mask, output_attentions=output_attentions)\n    hidden_states = tf.nn.dropout(hidden_states, rate=self.config)\n    mask = tf.cast(cross_attention_gate == 0, dtype=hidden_states.dtype)\n    mask = tf.expand_dims(mask, -1)\n    hidden_states = tf.where(tf.broadcast_to(mask, tf.shape(hidden_states)) == 1, tf.zeros_like(hidden_states), hidden_states)\n    hidden_states = residual + self.act_cross_attn(self.alpha_cross_attn) * hidden_states\n    residual = hidden_states\n    hidden_states = self.post_attention_layernorm(hidden_states)\n    hidden_states = self.mlp(hidden_states)\n    hidden_states = tf.nn.dropout(hidden_states, rate=self.config)\n    hidden_states = residual + self.act_dense(self.alpha_dense) * hidden_states\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (self_attn_weights,)\n    if use_cache:\n        outputs += (present_key_value,)\n    return outputs", "docstring": "Args:\nhidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`tf.Tensor`, *optional*): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.\nuse_cache (`bool`, *optional*):\nIf set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n(see `past_key_values`).\npast_key_value (`Tuple(tf.Tensor)`, *optional*): cached past key and value projection states\nno_images (`bool`, *optional*, defaults to `False`): If `True` the vision part is ignored", "source": "github-repos"}
{"code": "def assertNotAllClose(self, a, b, rtol=1e-06, atol=1e-06, msg=None):\n    try:\n        self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)\n    except AssertionError:\n        return\n    msg = msg or ''\n    raise AssertionError('The two values are close at all elements. %s' % msg)", "docstring": "Assert that two numpy arrays, or Tensors, do not have near values.\n\nArgs:\na: The expected numpy `ndarray`, or anything that can be converted into a\nnumpy `ndarray` (including Tensor), or any arbitrarily nested of\nstructure of these.\nb: The actual numpy `ndarray`, or anything that can be converted into a\nnumpy `ndarray` (including Tensor), or any arbitrarily nested of\nstructure of these.\nrtol: relative tolerance.\natol: absolute tolerance.\nmsg: Optional message to report on failure.\n\nRaises:\nAssertionError: If `a` and `b` are unexpectedly close at all elements.", "source": "github-repos"}
{"code": "def iter(self, keyed=False, extended=False):\n    if self.closed:\n        message = 'Stream is closed. Please call \"stream.open()\" first.'\n        raise exceptions.TabulatorException(message)\n    iterator = chain(self.__sample_extended_rows, self.__parser.extended_rows)\n    iterator = self.__apply_processors(iterator)\n    for (row_number, headers, row) in iterator:\n        if (row_number > self.__row_number):\n            self.__row_number = row_number\n            if extended:\n                (yield (row_number, headers, row))\n            elif keyed:\n                (yield dict(zip(headers, row)))\n            else:\n                (yield row)", "docstring": "Iterate over the rows.\n\nEach row is returned in a format that depends on the arguments `keyed`\nand `extended`. By default, each row is returned as list of their\nvalues.\n\nArgs:\nkeyed (bool, optional): When True, each returned row will be a\n`dict` mapping the header name to its value in the current row.\nFor example, `[{'name': 'J Smith', 'value': '10'}]`. Ignored if\n``extended`` is True. Defaults to False.\nextended (bool, optional): When True, returns each row as a tuple\nwith row number (starts at 1), list of headers, and list of row\nvalues. For example, `(1, ['name', 'value'], ['J Smith', '10'])`.\nDefaults to False.\n\nReturns:\nIterator[Union[List[Any], Dict[str, Any], Tuple[int, List[str], List[Any]]]]:\nThe row itself. The format depends on the values of `keyed` and\n`extended` arguments.\n\nRaises:\nexceptions.TabulatorException: If the stream is closed.", "source": "codesearchnet"}
{"code": "def has_error(self):\n    return next((True for cr in self.component_results if cr.has_error()), False)", "docstring": "Returns whether there was a business logic error when fetching data\nfor any components for this property.\n\nReturns:\nboolean", "source": "codesearchnet"}
{"code": "def match(self, patterns, limits=None):\n    if limits is None:\n        limits = [None] * len(patterns)\n    else:\n        err_msg = 'Patterns and limits should be equal in length'\n        assert len(patterns) == len(limits), err_msg\n\n    def _match(pattern, limit):\n        \n        if pattern.endswith('/') or pattern.endswith('\\\\'):\n            pattern += '*'\n        prefix_or_dir = re.match('^[^[*?]*', pattern).group(0)\n        file_metadatas = []\n        if prefix_or_dir == pattern:\n            if self.exists(pattern):\n                file_metadatas = [self.metadata(pattern)]\n        else:\n            if self.has_dirs():\n                prefix_dirname = self._url_dirname(prefix_or_dir)\n                if not prefix_dirname == prefix_or_dir:\n                    logger.debug('Changed prefix_or_dir %r -> %r', prefix_or_dir, prefix_dirname)\n                    prefix_or_dir = prefix_dirname\n            logger.debug('Listing files in %r', prefix_or_dir)\n            file_metadatas = self._list(prefix_or_dir)\n        metadata_list = []\n        for file_metadata in self.match_files(file_metadatas, pattern):\n            if limit is not None and len(metadata_list) >= limit:\n                break\n            metadata_list.append(file_metadata)\n        return MatchResult(pattern, metadata_list)\n    exceptions = {}\n    result = []\n    for pattern, limit in zip(patterns, limits):\n        try:\n            result.append(_match(pattern, limit))\n        except Exception as e:\n            exceptions[pattern] = e\n    if exceptions:\n        raise BeamIOError('Match operation failed', exceptions)\n    return result", "docstring": "Find all matching paths to the patterns provided.\n\nSee Also:\n:meth:`translate_pattern`\n\nPatterns ending with '/' or '\\' will be appended with '*'.\n\nArgs:\npatterns: list of string for the file path pattern to match against\nlimits: list of maximum number of responses that need to be fetched\n\nReturns: list of ``MatchResult`` objects.\n\nRaises:\n``BeamIOError``: if any of the pattern match operations fail", "source": "github-repos"}
{"code": "def AddDateTimeRange(self, time_value, start_time_string=None, end_time_string=None):\n    if (not isinstance(time_value, py2to3.STRING_TYPES)):\n        raise ValueError('Filter type must be a string.')\n    if ((start_time_string is None) and (end_time_string is None)):\n        raise ValueError('Filter must have either a start or an end date time value.')\n    time_value_lower = time_value.lower()\n    if (time_value_lower not in self._SUPPORTED_TIME_VALUES):\n        raise ValueError('Unsupported time value: {0:s}.'.format(time_value))\n    start_date_time = None\n    if start_time_string:\n        start_date_time = time_elements.TimeElementsInMicroseconds()\n        start_date_time.CopyFromDateTimeString(start_time_string)\n    end_date_time = None\n    if end_time_string:\n        end_date_time = time_elements.TimeElementsInMicroseconds()\n        end_date_time.CopyFromDateTimeString(end_time_string)\n    if ((None not in (start_date_time, end_date_time)) and (start_date_time > end_date_time)):\n        raise ValueError('Invalid date time value start must be earlier than end.')\n    self._date_time_ranges.append(self._DATE_TIME_RANGE_TUPLE(time_value_lower, start_date_time, end_date_time))", "docstring": "Adds a date time filter range.\n\nThe time strings are formatted as:\nYYYY-MM-DD hh:mm:ss.######[+-]##:##\nWhere # are numeric digits ranging from 0 to 9 and the seconds\nfraction can be either 3 or 6 digits. The time of day, seconds fraction\nand timezone offset are optional. The default timezone is UTC.\n\nArgs:\ntime_value (str): time value, such as, atime, ctime, crtime, dtime, bkup\nand mtime.\nstart_time_string (str): start date and time value string.\nend_time_string (str): end date and time value string.\n\nRaises:\nValueError: If the filter is badly formed.", "source": "codesearchnet"}
{"code": "def Unregister(self, name):\n    \n    precondition.AssertType(name, Text)\n\n    try:\n      del self._constructors[name]\n    except KeyError:\n      raise ValueError(\"Constructor with name '%s' is not registered\" % name)", "docstring": "Unregisters a constructor.\n\nArgs:\nname: A name of the constructor to unregister.\n\nRaises:\nValueError: If constructor with specified name has never been registered.", "source": "juraj-google-style"}
{"code": "def _construct_linebreak_token(self, d: Dict) -> List[Dict]:\n        \n\n        result = []\n        num_break = int(d[\"length\"][0]) if d[\"length\"] else 1\n        if num_break:\n            s = ''\n            for i in range(num_break):\n                s += '\\n'\n            this_token = {attrs.LOWER: s}\n            result.append(this_token)\n            s += ' '\n            this_token = {attrs.LOWER: s}\n            result.append(this_token)\n        result = self._add_common_constrain(result, d)\n\n        return result", "docstring": "Construct a shape token\nArgs:\nd: Dict\n\nReturns: List[Dict]", "source": "juraj-google-style"}
{"code": "def frombase(path1, path2):\n    if (not isparent(path1, path2)):\n        raise ValueError('path1 must be a prefix of path2')\n    return path2[len(path1):]", "docstring": "Get the final path of ``path2`` that isn't in ``path1``.\n\nArguments:\npath1 (str): A PyFilesytem path.\npath2 (str): A PyFilesytem path.\n\nReturns:\nstr: the final part of ``path2``.\n\nExample:\n>>> frombase('foo/bar/', 'foo/bar/baz/egg')\n'baz/egg'", "source": "codesearchnet"}
{"code": "def singleOrPair(obj):\n    \n    if len(list(obj.__class__.__mro__)) <= 2:\n        return 'Neither'\n    else:\n        \n        if ancestorJr(obj) is Pair:\n            return 'Pair'\n        elif ancestor(obj) is Single:\n            return 'Single'\n        else:\n            return 'Neither'", "docstring": "Chech an object is single or pair or neither.\n\nOf course,, all pairs are single, so what the function is really detecting is whether an object is only single or at the same time a pair.\n\nArgs:\nobj (object): Literally anything.\n\nReturns:\nstr: 'Single', or 'Pair', or 'Neither'", "source": "juraj-google-style"}
{"code": "def coarse_grain(G, ncg):\n    if (ncg <= 1):\n        return G\n    G = numpy.asarray(G)\n    (nbin, remainder) = divmod(G.shape[(- 1)], ncg)\n    if (remainder != 0):\n        nbin += 1\n    return numpy.transpose([(numpy.sum(G[(..., i:(i + ncg))], axis=(- 1)) / G[(..., i:(i + ncg))].shape[(- 1)]) for i in numpy.arange(0, (ncg * nbin), ncg)])", "docstring": "Coarse-grain last index of array ``G``.\n\nBin the last index of array ``G`` in bins of width ``ncg``, and\nreplace each bin by its average. Return the binned results.\n\nArgs:\nG: Array to be coarse-grained.\nncg: Bin width for coarse-graining.", "source": "codesearchnet"}
{"code": "def match(self, request):\n    if (self._times <= 0):\n        raise PookExpiredMock('Mock expired')\n    for test in self.filters:\n        if (not test(request, self)):\n            return (False, [])\n    for mapper in self.mappers:\n        request = mapper(request, self)\n        if (not request):\n            raise ValueError('map function must return a request object')\n    (matches, errors) = self.matchers.match(request)\n    if (not matches):\n        return (False, errors)\n    self._calls.append(request)\n    self._matches += 1\n    if (not self._persist):\n        self._times -= 1\n    if self._error:\n        raise self._error\n    for callback in self.callbacks:\n        callback(request, self)\n    return (True, [])", "docstring": "Matches an outgoing HTTP request against the current mock matchers.\n\nThis method acts like a delegator to `pook.MatcherEngine`.\n\nArguments:\nrequest (pook.Request): request instance to match.\n\nRaises:\nException: if the mock has an exception defined.\n\nReturns:\ntuple(bool, list[Exception]): ``True`` if the mock matches\nthe outgoing HTTP request, otherwise ``False``. Also returns\nan optional list of error exceptions.", "source": "codesearchnet"}
{"code": "def get_content_field(self, name):\n        \n        fields = self._content.findall(name)\n        if not fields:\n            return None\n        elif len(fields) == 1:\n            return etree_to_dict(fields[0])[name]\n        else:\n            return [etree_to_dict(field)[name] for field in fields]", "docstring": "Get the contents of a specific subtag from Clusterpoint Storage's response's content tag.\n\nArgs:\nname -- A name string of the content's subtag to be returned.\n\nReturns:\nA dict representing the contents of the specified field or a list of dicts\nif there are multiple fields with that tag name. Returns None if no field found.", "source": "juraj-google-style"}
{"code": "def cs20(msg):\n    \n    chars = '\n\n    d = hex2bin(data(msg))\n\n    cs = ''\n    cs += chars[bin2int(d[8:14])]\n    cs += chars[bin2int(d[14:20])]\n    cs += chars[bin2int(d[20:26])]\n    cs += chars[bin2int(d[26:32])]\n    cs += chars[bin2int(d[32:38])]\n    cs += chars[bin2int(d[38:44])]\n    cs += chars[bin2int(d[44:50])]\n    cs += chars[bin2int(d[50:56])]\n\n    return cs", "docstring": "Aircraft callsign\n\nArgs:\nmsg (String): 28 bytes hexadecimal message (BDS40) string\n\nReturns:\nstring: callsign, max. 8 chars", "source": "juraj-google-style"}
{"code": "def _DictToListOfStrings(self, data_dict):\n    ret_list = []\n    for (key, value) in iter(data_dict.items()):\n        if (key in ('body', 'datetime', 'type', 'room', 'rooms', 'id')):\n            continue\n        ret_list.append('{0:s} = {1!s}'.format(key, value))\n    return ret_list", "docstring": "Converts a dictionary into a list of strings.\n\nArgs:\ndata_dict (dict[str, object]): dictionary to convert.\n\nReturns:\nlist[str]: list of strings.", "source": "codesearchnet"}
{"code": "def average_datetimes(dt_list):\n    if (sys.version_info < (3, 3)):\n        import time\n\n        def timestamp_func(dt):\n            return time.mktime(dt.timetuple())\n    else:\n        timestamp_func = datetime.timestamp\n    total = [timestamp_func(dt) for dt in dt_list]\n    return datetime.fromtimestamp((sum(total) / len(total)))", "docstring": "Average a series of datetime objects.\n\n.. note::\n\nThis function assumes all datetime objects are naive and in the same\ntime zone (UTC).\n\nArgs:\ndt_list (iterable): Datetime objects to average\n\nReturns: Average datetime as a datetime object", "source": "codesearchnet"}
{"code": "def MapByteStream(\n      self, byte_stream, byte_offset=0, context=None, **unused_kwargs):\n    \n    data_type_size = self._data_type_definition.GetByteSize()\n    self._CheckByteStreamSize(byte_stream, byte_offset, data_type_size)\n\n    try:\n      struct_tuple = self._operation.ReadFrom(byte_stream[byte_offset:])\n      mapped_value = self.MapValue(*struct_tuple)\n\n    except Exception as exception:\n      error_string = (\n          'Unable to read: {0:s} from byte stream at offset: {1:d} '\n          'with error: {2!s}').format(\n              self._data_type_definition.name, byte_offset, exception)\n      raise errors.MappingError(error_string)\n\n    if context:\n      context.byte_size = data_type_size\n\n    return mapped_value", "docstring": "Maps the data type on a byte stream.\n\nArgs:\nbyte_stream (bytes): byte stream.\nbyte_offset (Optional[int]): offset into the byte stream where to start.\ncontext (Optional[DataTypeMapContext]): data type map context.\n\nReturns:\nobject: mapped value.\n\nRaises:\nMappingError: if the data type definition cannot be mapped on\nthe byte stream.", "source": "juraj-google-style"}
{"code": "def constant_time_string_compare(a, b):\n    \n\n    try:\n        return hmac.compare_digest(a, b)\n    except AttributeError:\n\n        if len(a) != len(b):\n            return False\n\n        result = 0\n\n        for x, y in zip(a, b):\n            result |= ord(x) ^ ord(y)\n\n        return result == 0", "docstring": "Helper for comparing string in constant time, independent\nof the python version being used.\n\nArgs:\na (str): A string to compare\nb (str): A string to compare", "source": "juraj-google-style"}
{"code": "def show_history(self, status=None, nids=None, full_history=False, metadata=False):\n    (nrows, ncols) = get_terminal_size()\n    works_done = []\n    for task in self.iflat_tasks(status=status, nids=nids):\n        work = task.work\n        if (work not in works_done):\n            works_done.append(work)\n            if (work.history or full_history):\n                cprint(make_banner(str(work), width=ncols, mark='='), **work.status.color_opts)\n                print(work.history.to_string(metadata=metadata))\n        if (task.history or full_history):\n            cprint(make_banner(str(task), width=ncols, mark='='), **task.status.color_opts)\n            print(task.history.to_string(metadata=metadata))\n    if (self.history or full_history):\n        cprint(make_banner(str(self), width=ncols, mark='='), **self.status.color_opts)\n        print(self.history.to_string(metadata=metadata))", "docstring": "Print the history of the flow to stdout.\n\nArgs:\nstatus: if not None, only the tasks with this status are select\nfull_history: Print full info set, including nodes with an empty history.\nnids: optional list of node identifiers used to filter the tasks.\nmetadata: print history metadata (experimental)", "source": "codesearchnet"}
{"code": "def _extract_direct(self, *, stream):\n        \n\n        def normal_dct_rgb():\n            \n            \n            \n            \n            \n            DEFAULT_CT_RGB = 1\n            ct = self.filter_decodeparms[0][1].get('/ColorTransform', DEFAULT_CT_RGB)\n            return self.mode == 'RGB' and ct == DEFAULT_CT_RGB\n\n        def normal_dct_cmyk():\n            \n            \n            DEFAULT_CT_CMYK = 0\n            ct = self.filter_decodeparms[0][1].get('/ColorTransform', DEFAULT_CT_CMYK)\n            return self.mode == 'CMYK' and ct == DEFAULT_CT_CMYK\n\n        if self.filters == ['/CCITTFaxDecode']:\n            data = self.obj.read_raw_bytes()\n            stream.write(self._generate_ccitt_header(data))\n            stream.write(data)\n            return '.tif'\n        elif self.filters == ['/DCTDecode'] and (\n            self.mode == 'L' or normal_dct_rgb() or normal_dct_cmyk()\n        ):\n            buffer = self.obj.get_raw_stream_buffer()\n            stream.write(buffer)\n            return '.jpg'\n\n        raise UnsupportedImageTypeError()", "docstring": "Attempt to extract the image directly to a usable image file\n\nIf there is no way to extract the image without decompressing or\ntranscoding then raise an exception. The type and format of image\ngenerated will vary.\n\nArgs:\nstream: Writable stream to write data to", "source": "juraj-google-style"}
{"code": "def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int:\n    if not hasattr(self, 'warnings_issued'):\n        self.warnings_issued = {}\n    if self.main_input_name in input_dict:\n        return input_dict[self.main_input_name].numel()\n    elif 'estimate_tokens' not in self.warnings_issued:\n        logger.warning('Could not estimate the number of tokens of the input, floating-point operations will not be computed')\n        self.warnings_issued['estimate_tokens'] = True\n    return 0", "docstring": "Helper function to estimate the total number of tokens from the model inputs.\n\nArgs:\ninputs (`dict`): The model inputs.\n\nReturns:\n`int`: The total number of tokens.", "source": "github-repos"}
{"code": "def is_truthy(value, default=False):\n    \n\n    if value is None:\n        return False\n\n    if isinstance(value, bool):\n        return value\n\n    if isinstance(value, int):\n        return value > 0\n\n    trues = ('1', 'true', 'y', 'yes', 'ok')\n    falses = ('', '0', 'false', 'n', 'none', 'no')\n\n    if value.lower().strip() in falses:\n        return False\n\n    elif value.lower().strip() in trues:\n        return True\n\n    else:\n        if default:\n            return default\n        else:\n            raise ValueError('Invalid argument given to truthy: {0}'.format(value))", "docstring": "Evaluate a value for truthiness\n\n>>> is_truthy('Yes')\nTrue\n>>> is_truthy('False')\nFalse\n>>> is_truthy(1)\nTrue\n\nArgs:\nvalue (Any): Value to evaluate\ndefault (bool): Optional default value, if the input does not match the true or false values\n\nReturns:\nTrue if a truthy value is passed, else False", "source": "juraj-google-style"}
{"code": "def Verify(self, mempool):\n        \n        if not super(ClaimTransaction, self).Verify(mempool):\n            return False\n\n        \n        \n        \n        \n        \n        \n        \n        \n        \n\n        \n        otherclaimTxs = [tx for tx in mempool if tx is ClaimTransaction and tx is not self]\n        for other in otherclaimTxs:\n            \n            if len([list(filter(lambda x: x in self.Claims, otherClaims)) for otherClaims in other.Claims]):\n                return False\n\n        txResult = None\n        for tx in self.GetTransactionResults():\n            if tx.AssetId == Blockchain.SystemCoin().Hash:\n                txResult = tx\n                break\n\n        if txResult is None or txResult.Amount > Fixed8(0):\n            return False\n\n        try:\n            return Blockchain.CalculateBonusIgnoreClaimed(self.Claims, False) == -txResult.Amount\n\n        except Exception as e:\n            logger.error('Could not calculate bonus: %s ' % e)\n\n        return False", "docstring": "Verify the transaction.\n\nArgs:\nmempool:\n\nReturns:\nbool: True if verified. False otherwise.", "source": "juraj-google-style"}
{"code": "def __recv(self, size=4096):\n    data = self.socket.recv(size)\n    if (not data):\n        raise NNTPError('Failed to read from socket')\n    self.__buffer.write(data)", "docstring": "Reads data from the socket.\n\nRaises:\nNNTPError: When connection times out or read from socket fails.", "source": "codesearchnet"}
{"code": "def categorize(values, categories, default=None):\n    \n    uniq_cats = list(unique_iterator(values))\n    cats = []\n    for c in values:\n        if isinstance(categories, list):\n            cat_ind = uniq_cats.index(c)\n            if cat_ind < len(categories):\n                cat = categories[cat_ind]\n            else:\n                cat = default\n        else:\n            cat = categories.get(c, default)\n        cats.append(cat)\n    return np.asarray(cats)", "docstring": "Maps discrete values to supplied categories.\n\nReplaces discrete values in input array with a fixed set of\ncategories defined either as a list or dictionary.\n\nArgs:\nvalues: Array of values to be categorized\ncategories: List or dict of categories to map inputs to\ndefault: Default value to assign if value not in categories\n\nReturns:\nArray of categorized values", "source": "juraj-google-style"}
{"code": "def content(self, request, id):\n        \n        gist = self.send(request, id).json()\n\n        def convert(data):\n            return base64.b64decode(data).decode('utf-8')\n\n        content = {}\n        for name, data in gist['files'].items():\n            content[name] = convert(data['content'])\n\n        return content", "docstring": "Returns the content of the gist\n\nArguments:\nrequest: an initial request object\nid:      the gist identifier\n\nReturns:\nA dict containing the contents of each file in the gist", "source": "juraj-google-style"}
{"code": "def from_structure(cls, structure, ff_elements=None, atom_style=\"charge\"):\n        \n        s = structure.get_sorted_structure()\n        box, symmop = lattice_2_lmpbox(s.lattice)\n        coords = symmop.operate_multi(s.cart_coords)\n        site_properties = s.site_properties\n        if \"velocities\" in site_properties:\n            velos = np.array(s.site_properties[\"velocities\"])\n            rot = SymmOp.from_rotation_and_translation(symmop.rotation_matrix)\n            rot_velos = rot.operate_multi(velos)\n            site_properties.update({\"velocities\": rot_velos})\n        boxed_s = Structure(box.to_lattice(), s.species, coords,\n                            site_properties=site_properties,\n                            coords_are_cartesian=True)\n\n        symbols = list(s.symbol_set)\n        if ff_elements:\n            symbols.extend(ff_elements)\n        elements = sorted(Element(el) for el in set(symbols))\n        mass_info = [tuple([i.symbol] * 2) for i in elements]\n        ff = ForceField(mass_info)\n        topo = Topology(boxed_s)\n        return cls.from_ff_and_topologies(box=box, ff=ff, topologies=[topo],\n                                          atom_style=atom_style)", "docstring": "Simple constructor building LammpsData from a structure without\nforce field parameters and topologies.\n\nArgs:\nstructure (Structure): Input structure.\nff_elements ([str]): List of strings of elements that must\nbe present due to force field settings but not\nnecessarily in the structure. Default to None.\natom_style (str): Choose between \"atomic\" (neutral) and\n\"charge\" (charged). Default to \"charge\".", "source": "juraj-google-style"}
{"code": "def _apply_credentials(auto_refresh=True, credentials=None,\n                           headers=None):\n        \n        token = credentials.get_credentials().access_token\n        if auto_refresh is True:\n            if token is None:\n                token = credentials.refresh(\n                    access_token=None, timeout=10)\n            elif credentials.jwt_is_expired():\n                token = credentials.refresh(timeout=10)\n        headers.update(\n            {'Authorization': \"Bearer {}\".format(token)}\n        )", "docstring": "Update Authorization header.\n\nUpdate request headers with latest `access_token`. Perform token\n`refresh` if token is ``None``.\n\nArgs:\nauto_refresh (bool): Perform token refresh if access_token is ``None`` or expired. Defaults to ``True``.\ncredentials (class): Read-only credentials.\nheaders (class): Requests `CaseInsensitiveDict`.", "source": "juraj-google-style"}
{"code": "def strace_data_access_event(self, operation, address, data, data_mask=None, access_width=4, address_range=0):\n    cmd = enums.JLinkStraceCommand.TRACE_EVENT_SET\n    event_info = structs.JLinkStraceEventInfo()\n    event_info.Type = enums.JLinkStraceEvent.DATA_ACCESS\n    event_info.Op = operation\n    event_info.AccessSize = int(access_width)\n    event_info.Addr = int(address)\n    event_info.Data = int(data)\n    event_info.DataMask = int((data_mask or 0))\n    event_info.AddrRangeSize = int(address_range)\n    handle = self._dll.JLINK_STRACE_Control(cmd, ctypes.byref(event_info))\n    if (handle < 0):\n        raise errors.JLinkException(handle)\n    return handle", "docstring": "Sets an event to trigger trace logic when data access is made.\n\nData access corresponds to either a read or write.\n\nArgs:\nself (JLink): the ``JLink`` instance.\noperation (int): one of the operations in ``JLinkStraceOperation``.\naddress (int): the address of the load/store data.\ndata (int): the data to be compared the event data to.\ndata_mask (int): optional bitmask specifying bits to ignore in\ncomparison.\nacess_width (int): optional access width for the data.\naddress_range (int): optional range of address to trigger event on.\n\nReturns:\nAn integer specifying the trace event handle.  This handle should be\nretained in order to clear the event at a later time.\n\nRaises:\nJLinkException: on error.", "source": "codesearchnet"}
{"code": "def sg_any(tensor, opt):\n    r\n    return tf.reduce_any(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)", "docstring": "r\"\"\"Computes the \"logical or\" of elements across axis of a tensor.\n\nSee `tf.reduce_any()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` (automatically given by chain).\nopt:\naxis : A tuple/list of integers or an integer. The axis to reduce.\nkeep_dims: If true, retains reduced dimensions with length 1.\nname: If provided, replace current tensor's name.\n\nReturns:\nA `Tensor`.", "source": "juraj-google-style"}
{"code": "def _multi_worker_session(kwargs):\n    strategy = None\n    for _, v in kwargs.items():\n        if isinstance(v, distribute_lib.StrategyBase):\n            if strategy is not None:\n                logging.warning('The test uses multiple strategies. Skipping entering a session that is configured for the strategy.')\n                return ops.NullContextmanager()\n            strategy = v\n    if context.executing_eagerly() or not isinstance(strategy, collective_all_reduce_strategy.CollectiveAllReduceStrategy):\n        return ops.NullContextmanager()\n    sess_config = copy.deepcopy(context.context().config)\n    sess_config = strategy.update_config_proto(sess_config)\n    target = strategy.cluster_resolver.master()\n    return session.Session(config=sess_config, target=target).as_default()", "docstring": "Returns a context manager that enters a session that is configured for the MultiWorkerMirroredStrategy.\n\nArgs:\nkwargs: a dict. Keyword arguments passed to the test.\n\nReturns:\nA context manager. If MultiWorkerMirroredStrategy is the  one and only one\nstrategy in kwargs and it's in graph mode, it's the session that is\nconfigured for that strategy.  Otherwise, it's a no-op context manager.", "source": "github-repos"}
{"code": "def _parse_mtu(self, config):\n    match = re.search('mtu (\\\\d+)', config)\n    return dict(mtu=int(match.group(1)))", "docstring": "Parses the config block and returns the configured IP MTU value\n\nThe provided configuration block is scanned and the configured value\nfor the IP MTU is returned as a dict object.  The IP MTU value is\nexpected to always be present in the provided config block\n\nArgs:\nconfig (str): The interface configuration block to parse\n\nReturn:\ndict: A dict object intended to be merged into the resource dict", "source": "codesearchnet"}
{"code": "def print_schema_results(results, level=0):\n    \n    for error in results.errors:\n        print_level(logger.error, _RED + \"[X] %s\", level, error)", "docstring": "Print JSON Schema validation errors to stdout.\n\nArgs:\nresults: An instance of ObjectValidationResults.\nlevel: The level at which to print the results.", "source": "juraj-google-style"}
{"code": "def get_id_transcripts(self, hgnc_id, build='37'):\n        \n        transcripts = self.transcripts(build=build, hgnc_id=hgnc_id)\n\n        identifier_transcripts = set()\n        longest = None\n        nr = []\n        xm = []\n        for tx in transcripts:\n            enst_id = tx['transcript_id']\n            \n            if not longest:\n                longest = enst_id\n            refseq_id = tx.get('refseq_id')\n            if not refseq_id:\n                continue\n            \n            if 'NM' in refseq_id:\n                identifier_transcripts.add(enst_id)\n            elif 'NR' in refseq_id:\n                nr.append(enst_id)\n            elif 'XM' in refseq_id:\n                xm.append(enst_id)\n        \n        if identifier_transcripts:\n            return identifier_transcripts\n        \n        if nr:\n            return set([nr[0]])\n\n        if xm:\n            return set([xm[0]])\n        \n        return set([longest])", "docstring": "Return a set with identifier transcript(s)\n\nChoose all refseq transcripts with NM symbols, if none where found choose ONE with NR,\nif no NR choose ONE with XM. If there are no RefSeq transcripts identifiers choose the\nlongest ensembl transcript.\n\nArgs:\nhgnc_id(int)\nbuild(str)\n\nReturns:\nidentifier_transcripts(set)", "source": "juraj-google-style"}
{"code": "def _is_univariate_marginal(self, index_points):\n    num_index_points = tf.compat.dimension_value(index_points.shape[(- (self.kernel.feature_ndims + 1))])\n    if (num_index_points is None):\n        warnings.warn('Unable to detect statically whether the number of index_points is 1. As a result, defaulting to treating the marginal GP at `index_points` as a multivariate Gaussian. This makes some methods, like `cdf` unavailable.')\n    return (num_index_points == 1)", "docstring": "True if the given index_points would yield a univariate marginal.\n\nArgs:\nindex_points: the set of index set locations at which to compute the\nmarginal Gaussian distribution. If this set is of size 1, the marginal is\nunivariate.\n\nReturns:\nis_univariate: Boolean indicating whether the marginal is univariate or\nmultivariate. In the case of dynamic shape in the number of index points,\ndefaults to \"multivariate\" since that's the best we can do.", "source": "codesearchnet"}
{"code": "def get_interface(self):\n    raise NotImplementedError('Base class should not be called directly!')", "docstring": "This function returns The interface used to configure the sniffer,\ne.g. 'wlan0'.\n\nReturns:\nThe interface (string) used to configure the sniffer. Corresponds to\nthe 'Interface' key of the sniffer configuration.", "source": "github-repos"}
{"code": "def update_parser(self, parser):\n    self._parser = parser\n    ini_str = argparse_to_ini(parser)\n    configp = configparser.ConfigParser(allow_no_value=True)\n    configp.read_dict(self._config)\n    configp.read_string(ini_str)\n    self._config.update({s: dict(configp.items(s)) for s in configp.sections()})", "docstring": "Update config dictionary with declared arguments in an argparse.parser\nNew variables will be created, and existing ones overridden.\n\nArgs:\nparser (argparse.ArgumentParser): parser to read variables from", "source": "codesearchnet"}
{"code": "def __init__(self, app, env, region, prop_path):\n        \n        self.app_name = app\n        self.env = env\n        self.region = region\n        self.properties = get_properties(prop_path)\n        generated = get_details(app=self.app_name)\n        self.group = generated.data['project']\n\n        try:\n            self.pipeline = self.properties['pipeline']['lambda']\n        except KeyError:\n            raise RequiredKeyNotFound(\"Lambda key in pipeline.json is required.\")\n\n        self.runtime = self.pipeline['runtime']\n        self.description = self.pipeline['app_description']\n        self.handler = self.pipeline['handler']\n        self.vpc_enabled = self.pipeline['vpc_enabled']\n\n        self.settings = get_properties(prop_path, env=self.env, region=self.region)\n        app = self.settings['app']\n        self.lambda_environment = app['lambda_environment']\n        self.memory = app['lambda_memory']\n        self.role = app.get('lambda_role') or generated.iam()['lambda_role']\n        self.timeout = app['lambda_timeout']\n        self.concurrency_limit = app.get('lambda_concurrency_limit')\n\n        self.role_arn = get_role_arn(self.role, self.env, self.region)\n\n        self.session = boto3.Session(profile_name=self.env, region_name=self.region)\n        self.lambda_client = self.session.client('lambda')", "docstring": "Lambda function object.\n\nArgs:\napp (str): Application name\nenv (str): Environment/Account\nregion (str): AWS Region\nprop_path (str): Path of environment property file", "source": "juraj-google-style"}
{"code": "def _parse_deploy(self, deploy_values: dict, service_config: dict):\n    mode = {}\n    for d_value in deploy_values:\n        if ('restart_policy' in d_value):\n            restart_spec = docker.types.RestartPolicy(**deploy_values[d_value])\n            service_config['restart_policy'] = restart_spec\n        if ('placement' in d_value):\n            for (constraints_key, constraints_value) in deploy_values[d_value].items():\n                service_config[constraints_key] = constraints_value\n        if ('mode' in d_value):\n            mode[d_value] = deploy_values[d_value]\n        if ('replicas' in d_value):\n            mode[d_value] = deploy_values[d_value]\n        if ('resources' in d_value):\n            resource_spec = self._parse_resources(deploy_values, d_value)\n            service_config['resources'] = resource_spec\n    mode_spec = docker.types.ServiceMode(**mode)\n    service_config['mode'] = mode_spec", "docstring": "Parse deploy key.\n\nArgs:\ndeploy_values (dict): deploy configuration values\nservice_config (dict): Service configuration", "source": "codesearchnet"}
{"code": "def drop(self, index=None, columns=None):\n    if self._is_transposed:\n        return self.transpose().drop(index=columns, columns=index).transpose()\n    if (index is None):\n        new_data = self.data\n        new_index = self.index\n    else:\n\n        def delitem(df, internal_indices=[]):\n            return df.drop(index=df.index[internal_indices])\n        numeric_indices = list(self.index.get_indexer_for(index))\n        new_data = self.data.apply_func_to_select_indices(1, delitem, numeric_indices, keep_remaining=True)\n        new_index = self.index[(~ self.index.isin(index))]\n    if (columns is None):\n        new_columns = self.columns\n        new_dtypes = self.dtypes\n    else:\n\n        def delitem(df, internal_indices=[]):\n            return df.drop(columns=df.columns[internal_indices])\n        numeric_indices = list(self.columns.get_indexer_for(columns))\n        new_data = new_data.apply_func_to_select_indices(0, delitem, numeric_indices, keep_remaining=True)\n        new_columns = self.columns[(~ self.columns.isin(columns))]\n        new_dtypes = self.dtypes.drop(columns)\n    return self.__constructor__(new_data, new_index, new_columns, new_dtypes)", "docstring": "Remove row data for target index and columns.\n\nArgs:\nindex: Target index to drop.\ncolumns: Target columns to drop.\n\nReturns:\nA new QueryCompiler.", "source": "codesearchnet"}
{"code": "def zeros(shape, dtype=None, **kwargs):\n    data = np.zeros(shape, dtype)\n    return dc.array(data, **kwargs)", "docstring": "Create an array of given shape and type, filled with zeros.\n\nArgs:\nshape (sequence of ints): 2D shape of the array.\ndtype (data-type, optional): Desired data-type for the array.\nkwargs (optional): Other arguments of the array (*coords, attrs, and name).\n\nReturns:\narray (decode.array): Decode array filled with zeros.", "source": "codesearchnet"}
{"code": "def __init__(self, rate=None, burst_size=None, prec_level=None):\n        \n        super().__init__(MeterBandType.OFPMBT_DSCP_REMARK, rate, burst_size)\n        self.prec_level = prec_level", "docstring": "Create a MeterBandDscpRemark with the optional parameters below.\n\nArgs:\nrate (int): Rate for remarking packets.\nburst_size (int): Size of bursts.\nprec_level (int): Number of precendence level to substract.", "source": "juraj-google-style"}
{"code": "def _create_config_proto(self) -> tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration:\n    config_proto = tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration()\n    learning_rate_index = {r: i for i, r in enumerate(self._dynamic_learning_rates)}\n    for table in self._table_config:\n        table._set_table_descriptor(config_proto.table_descriptor.add(), self._strategy.extended.num_hosts, learning_rate_index)\n    table_to_id = {table: i for i, table in enumerate(self._table_config)}\n    for feature, output_shape in zip(nest.flatten(self._feature_config), self._output_shapes):\n        feature_descriptor = config_proto.feature_descriptor.add()\n        if feature.name:\n            feature_descriptor.name = feature.name\n        feature_descriptor.table_id = table_to_id[feature.table]\n        feature_descriptor.input_shape.extend(output_shape.as_list())\n    config_proto.mode = tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration.TRAINING\n    num_replica = self._strategy.num_replicas_in_sync\n    num_cores_per_replica = self._num_cores_per_replica or 1\n    config_proto.num_hosts = self._strategy.extended.num_hosts\n    config_proto.num_tensor_cores = num_replica * num_cores_per_replica\n    config_proto.sharding_strategy = tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration.DIV_DEFAULT\n    config_proto.pipeline_execution_with_tensor_core = self._pipeline_execution_with_tensor_core\n    if self._num_cores_per_replica:\n        config_proto.spmd_sharding.enabled = True\n        config_proto.spmd_sharding.num_cores_per_replica = self._num_cores_per_replica\n    return config_proto", "docstring": "Creates the TPUEmbeddingConfiguration proto.\n\nThis proto is used to initialize the TPU embedding engine.\n\nReturns:\nA TPUEmbeddingConfiguration proto.", "source": "github-repos"}
{"code": "def load_dict_values(self, db_key: str, dict_keys: List[str], hierarchical: bool=False) -> List:\n    result = []\n    if (not hierarchical):\n        _values = self._db.hmget(db_key, *dict_keys)\n        result = [ast.literal_eval(_value) for _value in _values]\n    else:\n        db_keys = self._db.keys(pattern=(db_key + '*'))\n        for _db_key in db_keys:\n            for name in _db_key.split(':')[1:]:\n                if (name in dict_keys):\n                    _values = self._load_values(_db_key)\n                    result.append(_values)\n            _values = self._db.hmget(_db_key, *dict_keys)\n            for (i, value) in enumerate(_values):\n                try:\n                    _values[i] = ast.literal_eval(value)\n                except SyntaxError:\n                    pass\n                except ValueError:\n                    pass\n            result += [value for value in _values if (value is not None)]\n    return result", "docstring": "Load values from a dictionary with the specified dict_keys.\n\nArgs:\ndb_key (str): Key where the dictionary is stored\ndict_keys (List[str]): Keys within the dictionary to load.\nhierarchical (bool): If True, expect the dictionary to have been\nstored hierarchically. If False, expect the dictionary to have\nbeen stored flat.\n\n\nReturns:\nobject: The value stored at dict_key in the dictionary stored at\nkey", "source": "codesearchnet"}
{"code": "def _range_along_dimension(range_dim, shape):\n    rank = len(shape)\n    if (range_dim >= rank):\n        raise ValueError('Cannot calculate range along non-existent index.')\n    indices = tf.range(start=0, limit=shape[range_dim])\n    indices = tf.reshape(indices, shape=[(1 if (i != range_dim) else shape[range_dim]) for i in range(rank)])\n    return tf.tile(indices, [(shape[i] if (i != range_dim) else 1) for i in range(rank)])", "docstring": "Construct a Tensor whose values are the index along a dimension.\n\nConstruct a Tensor that counts the distance along a single dimension. This is\nuseful, for example, when constructing an identity matrix,\n\n>>> x = _range_along_dimension(0, [2, 2]).eval()\n>>> x\narray([[0, 0],\n[1, 1]], dtype=int32)\n\n>>> y = _range_along_dimension(1, [2, 2]).eval()\n>>> y\narray([[0, 1],\n[0, 1]], dtype=int32)\n\n>>> tf.cast(tf.equal(x, y), dtype=tf.int32).eval()\narray([[1, 0],\n[0, 1]], dtype=int32)\n\nArgs:\nrange_dim: int. Dimension to count indices on.\nshape: 1D Tensor of ints. Shape of Tensor to construct.\n\nReturns:\nA Tensor whose values are the same as the range along dimension range_dim.\n\nRaises:\nValueError: If range_dim isn't a valid dimension.", "source": "codesearchnet"}
{"code": "def write_to_hdf5(self, filename_out, *args, **kwargs):\n    t0 = time.time()\n    self.__update_header()\n    if self.container.isheavy():\n        self.__write_to_hdf5_heavy(filename_out)\n    else:\n        self.__write_to_hdf5_light(filename_out)\n    t1 = time.time()\n    logger.info(('Conversion time: %2.2fsec' % (t1 - t0)))", "docstring": "Write data to HDF5 file.\nIt check the file size then decides how to write the file.\n\nArgs:\nfilename_out (str): Name of output file", "source": "codesearchnet"}
{"code": "def create_tree(profile, tree):\n    resource = '/trees'\n    payload = {'tree': tree}\n    data = api.post_request(profile, resource, payload)\n    return prepare(data)", "docstring": "Create a new tree.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\ntree\nA list of blob objects (each with a path, mode, type, and\ncontent or sha) to put in the tree.\n\nReturns:\nA dict with data about the tree.", "source": "codesearchnet"}
{"code": "def EnableNetworkInterfaces(\n      self, interfaces, logger, dhclient_script=None):\n    \n    \n    if os.path.exists(self.network_path):\n      self._DisableNetworkManager(interfaces, logger)\n    helpers.CallDhclient(interfaces, logger)", "docstring": "Enable the list of network interfaces.\n\nArgs:\ninterfaces: list of string, the output device names to enable.\nlogger: logger object, used to write to SysLog and serial port.\ndhclient_script: string, the path to a dhclient script used by dhclient.", "source": "juraj-google-style"}
{"code": "def prefetch_users(persistent_course_grades):\n    users = User.objects.filter(id__in=[grade.user_id for grade in persistent_course_grades])\n    return {user.id: user for user in users}", "docstring": "Prefetch Users from the list of user_ids present in the persistent_course_grades.\n\nArguments:\npersistent_course_grades (list): A list of PersistentCourseGrade.\n\nReturns:\n(dict): A dictionary containing user_id to user mapping.", "source": "codesearchnet"}
{"code": "def unique_array(arr):\n    \n    if not len(arr):\n        return np.asarray(arr)\n    elif pd:\n        if isinstance(arr, np.ndarray) and arr.dtype.kind not in 'MO':\n            \n            return pd.unique(arr)\n\n        values = []\n        for v in arr:\n            if (isinstance(v, datetime_types) and\n                not isinstance(v, cftime_types)):\n                v = pd.Timestamp(v).to_datetime64()\n            values.append(v)\n        return pd.unique(values)\n    else:\n        arr = np.asarray(arr)\n        _, uniq_inds = np.unique(arr, return_index=True)\n        return arr[np.sort(uniq_inds)]", "docstring": "Returns an array of unique values in the input order.\n\nArgs:\narr (np.ndarray or list): The array to compute unique values on\n\nReturns:\nA new array of unique values", "source": "juraj-google-style"}
{"code": "def _transform_binary_composition_to_expression(expression, node, context):\n    if (expression.operator not in constants.SUPPORTED_OPERATORS):\n        raise NotImplementedError(u'Filter operation \"{}\" is not supported by the SQL backend.'.format(expression.operator))\n    sql_operator = constants.SUPPORTED_OPERATORS[expression.operator]\n    left = _expression_to_sql(expression.left, node, context)\n    right = _expression_to_sql(expression.right, node, context)\n    if (sql_operator.cardinality == constants.CARDINALITY_UNARY):\n        (left, right) = _get_column_and_bindparam(left, right, sql_operator)\n        clause = getattr(left, sql_operator.name)(right)\n        return clause\n    elif (sql_operator.cardinality == constants.CARDINALITY_BINARY):\n        clause = getattr(sql_expressions, sql_operator.name)(left, right)\n        return clause\n    elif (sql_operator.cardinality == constants.CARDINALITY_LIST_VALUED):\n        (left, right) = _get_column_and_bindparam(left, right, sql_operator)\n        right.expanding = True\n        clause = getattr(left, sql_operator.name)(right)\n        return clause\n    raise AssertionError(u'Unreachable, operator cardinality {} for compiler expression {} is unknown'.format(sql_operator.cardinality, expression))", "docstring": "Transform a BinaryComposition compiler expression into a SQLAlchemy expression.\n\nRecursively calls _expression_to_sql to convert its left and right sub-expressions.\n\nArgs:\nexpression: expression, BinaryComposition compiler expression.\nnode: SqlNode, the SqlNode the expression applies to.\ncontext: CompilationContext, global compilation state and metadata.\n\nReturns:\nExpression, SQLAlchemy expression.", "source": "codesearchnet"}
{"code": "def split_raster(rs, split_shp, field_name, temp_dir):\n        \n        UtilClass.rmmkdir(temp_dir)\n        ds = ogr_Open(split_shp)\n        lyr = ds.GetLayer(0)\n        lyr.ResetReading()\n        ft = lyr.GetNextFeature()\n        while ft:\n            cur_field_name = ft.GetFieldAsString(field_name)\n            for r in rs:\n                cur_file_name = r.split(os.sep)[-1]\n                outraster = temp_dir + os.sep + \\\n                            cur_file_name.replace('.tif', '_%s.tif' %\n                                                  cur_field_name.replace(' ', '_'))\n                subprocess.call(['gdalwarp', r, outraster, '-cutline', split_shp,\n                                 '-crop_to_cutline', '-cwhere',\n                                 \"'%s'='%s'\" % (field_name, cur_field_name), '-dstnodata',\n                                 '-9999'])\n            ft = lyr.GetNextFeature()\n        ds = None", "docstring": "Split raster by given shapefile and field name.\n\nArgs:\nrs: origin raster file.\nsplit_shp: boundary (ESRI Shapefile) used to spilt raster.\nfield_name: field name identify the spilt value.\ntemp_dir: directory to store the spilt rasters.", "source": "juraj-google-style"}
{"code": "def _CreateRouteShapesFolder(self, schedule, parent, route, style_id=None, visible=True):\n    shape_id_to_trips = {}\n    for trip in route.trips:\n        if trip.shape_id:\n            shape_id_to_trips.setdefault(trip.shape_id, []).append(trip)\n    if (not shape_id_to_trips):\n        return None\n    shape_id_to_trips_items = shape_id_to_trips.items()\n    shape_id_to_trips_items.sort((lambda a, b: cmp(len(b[1]), len(a[1]))))\n    folder = self._CreateFolder(parent, 'Shapes', visible)\n    for (shape_id, trips) in shape_id_to_trips_items:\n        trip_ids = [trip.trip_id for trip in trips]\n        name = ('%s (trips: %d)' % (shape_id, len(trips)))\n        description = ('Trips using this shape (%d in total): %s' % (len(trips), ', '.join(trip_ids)))\n        placemark = self._CreatePlacemark(folder, name, style_id, visible, description)\n        self._CreateLineStringForShape(placemark, schedule.GetShape(shape_id))\n    return folder", "docstring": "Create a KML Folder for the shapes of a route.\n\nThe folder contains a placemark for each shape referenced by a trip in the\nroute. If there are no such shapes, no folder is created and None is\nreturned.\n\nArgs:\nschedule: The transitfeed.Schedule instance.\nparent: The parent ElementTree.Element instance.\nroute: The transitfeed.Route instance.\nstyle_id: The id of a style to use if not None.\nvisible: Whether the placemark is initially visible or not.\n\nReturns:\nThe Folder ElementTree.Element instance or None.", "source": "codesearchnet"}
{"code": "def sendfrom(self, user_id, dest_address, amount, minconf=1):\n        \n        amount = Decimal(amount).quantize(self.quantum, rounding=ROUND_HALF_EVEN)\n        txhash = self.rpc.call(\"sendfrom\",\n            user_id, dest_address, float(str(amount)), minconf\n        )\n        self.logger.debug(\"Send %s %s from %s to %s\" % (str(amount), self.coin,\n                                                        str(user_id), dest_address))\n        self.logger.debug(\"Transaction hash: %s\" % txhash)\n        return txhash", "docstring": "Send coins from user's account.\n\nArgs:\nuser_id (str): this user's unique identifier\ndest_address (str): address which is to receive coins\namount (str or Decimal): amount to send (eight decimal points)\nminconf (int): ensure the account has a valid balance using this\nmany confirmations (default=1)\n\nReturns:\nstr: transaction ID", "source": "juraj-google-style"}
{"code": "def garbage_collection(time_limit=YEAR/12.0):\n    \n    expired_request_infos = (\n        ri for ri in DATABASE.values()\n        if ri.creation_ts + time_limit <= time.time()\n    )\n\n    for ri in expired_request_infos:\n        del DATABASE[ri.url]", "docstring": "Collect and remove all :class:`.RequestInfo` objects older than\n`time_limit` (in seconds).\n\nArgs:\ntime_limit (float, default YEAR / 2): Collect objects older than\nthis limit.", "source": "juraj-google-style"}
{"code": "def padding_to_length(padding):\n    non_padding = (1.0 - padding)\n    return tf.to_int32(tf.reduce_sum(non_padding, axis=(- 1)))", "docstring": "Calculate the length of mask based on padding.\n\nArgs:\npadding: a Tensor with shape [..., length].\nReturns:\na Tensor with shape [...].", "source": "codesearchnet"}
{"code": "def _init_request_logging(self, app):\n        \n        enabled = not app.config.get(CONF_DISABLE_REQUEST_LOGGING, False)\n\n        if not enabled:\n            return\n\n        self._requests_middleware = WSGIApplication(\n            self._key, app.wsgi_app, telemetry_channel=self._channel)\n\n        app.wsgi_app = self._requests_middleware", "docstring": "Sets up request logging unless ``APPINSIGHTS_DISABLE_REQUEST_LOGGING``\nis set in the Flask config.\n\nArgs:\napp (flask.Flask). the Flask application for which to initialize the extension.", "source": "juraj-google-style"}
{"code": "def residual_block_layer(inputs, hparams):\n    kernel = (hparams.res_kernel_size, hparams.res_kernel_size)\n    x = inputs\n    for i in range(hparams.num_res_layers):\n        with tf.variable_scope(('res_conv_%d' % i)):\n            y = common_layers.conv_block(common_layers.layer_norm(x, hparams.hidden_size, name='lnorm'), hparams.hidden_size, [((1, 1), kernel)], strides=(1, 1), padding='SAME', name='residual_conv')\n            y = common_layers.conv_block(y, hparams.hidden_size, [((1, 1), (1, 1))], strides=(1, 1), padding='SAME', name='residual_dense')\n            x = common_layers.layer_postprocess(x, y, hparams)\n    return x", "docstring": "Residual block over inputs.\n\nRuns a residual block consisting of\nconv: kernel_size x kernel_size\nconv: 1x1\ndropout, add and normalize according to hparams.layer_postprocess_sequence.\n\nArgs:\ninputs: Tensor of shape [batch, height, width, hparams.hidden_size].\nhparams: HParams.\n\nReturns:\nTensor of shape [batch, height, width, hparams.hidden_size].", "source": "codesearchnet"}
{"code": "def NamedSelector(name, fields, description=None, type_attributes=DEFAULT_TYPE_ATTRIBUTES):\n    \n    check.str_param(name, 'name')\n    check_user_facing_fields_dict(fields, 'NamedSelector named \"{}\"'.format(name))\n\n    class _NamedSelector(_ConfigSelector):\n        def __init__(self):\n            super(_NamedSelector, self).__init__(\n                key=name,\n                name=name,\n                fields=fields,\n                description=description,\n                type_attributes=type_attributes,\n            )\n\n    return _NamedSelector", "docstring": "A :py:class`Selector` with a name, allowing it to be referenced by that name.\n\nArgs:\nname (str):\nfields (Dict[str, Field])", "source": "juraj-google-style"}
{"code": "def call_each(seq):\n    \n    try:\n        reduce(lambda _, y: y(), seq)\n    except TypeError as e:\n        if text_type(e) != \"reduce() of empty sequence with no initial value\":\n            raise", "docstring": "Calls each element of sequence to invoke the side effect.\n\nArgs:\nseq:\n\nReturns: None", "source": "juraj-google-style"}
{"code": "def predict_features(self, df_features, df_target, nh=20, idx=0, dropout=0.0, activation_function=th.nn.ReLU, lr=0.01, l1=0.1, batch_size=(- 1), train_epochs=1000, test_epochs=1000, device=None, verbose=None, nb_runs=3):\n    (device, verbose) = SETTINGS.get_default(('device', device), ('verbose', verbose))\n    x = th.FloatTensor(scale(df_features.values)).to(device)\n    y = th.FloatTensor(scale(df_target.values)).to(device)\n    out = []\n    for i in range(nb_runs):\n        model = FSGNN_model([(x.size()[1] + 1), nh, 1], dropout=dropout, activation_function=activation_function).to(device)\n        out.append(model.train(x, y, lr=0.01, l1=0.1, batch_size=(- 1), train_epochs=train_epochs, test_epochs=test_epochs, device=device, verbose=verbose))\n    return list(np.mean(np.array(out), axis=0))", "docstring": "For one variable, predict its neighbours.\n\nArgs:\ndf_features (pandas.DataFrame):\ndf_target (pandas.Series):\nnh (int): number of hidden units\nidx (int): (optional) for printing purposes\ndropout (float): probability of dropout (between 0 and 1)\nactivation_function (torch.nn.Module): activation function of the NN\nlr (float): learning rate of Adam\nl1 (float): L1 penalization coefficient\nbatch_size (int): batch size, defaults to full-batch\ntrain_epochs (int): number of train epochs\ntest_epochs (int): number of test epochs\ndevice (str): cuda or cpu device (defaults to ``cdt.SETTINGS.default_device``)\nverbose (bool): verbosity (defaults to ``cdt.SETTINGS.verbose``)\nnb_runs (int): number of bootstrap runs\n\nReturns:\nlist: scores of each feature relatively to the target", "source": "codesearchnet"}
{"code": "def update_vlan(self, name, vid, vni):\n    cmd = ('vxlan vlan %s vni %s' % (vid, vni))\n    return self.configure_interface(name, cmd)", "docstring": "Adds a new vlan to vni mapping for the interface\n\nEosVersion:\n4.13.7M\n\nArgs:\nvlan (str, int): The vlan id to map to the vni\nvni (str, int): The vni value to use\n\nReturns:\nTrue if the command completes successfully", "source": "codesearchnet"}
{"code": "def delete_vnet(access_token, subscription_id, resource_group, name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Network/virtualNetworks/', name,\n                        '?api-version=', NETWORK_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete a virtual network.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nname (str): Name of the VNet.\n\nReturns:\nHTTP response. VNet JSON body.", "source": "juraj-google-style"}
{"code": "def __init__(self, target='', graph=None, config=None):\n    _python_session_create_counter.get_cell().increase_by(1)\n    if graph is None:\n        self._graph = ops.get_default_graph()\n    else:\n        if not isinstance(graph, ops.Graph):\n            raise TypeError(f'Argument `graph` must be a tf.Graph, but got \"{type(graph).__name__}\"')\n        self._graph = graph\n    self._closed = False\n    if target is not None:\n        try:\n            self._target = compat.as_bytes(target)\n        except TypeError:\n            if isinstance(target, config_pb2.ConfigProto):\n                raise TypeError(f'Argument `target` must be a string, but got \"{type(target).__name__}\". Did you do \"Session(config)\" instead of \"Session(config=config)\"?')\n            raise TypeError(f'Argument `target` must be a string, but got \"{type(target).__name__}\"')\n    else:\n        self._target = None\n    self._delete_lock = threading.Lock()\n    self._dead_handles = []\n    if config is None:\n        config = context.context().config\n    if not isinstance(config, config_pb2.ConfigProto):\n        raise TypeError(f'Argument `config` must be a tf.ConfigProto, but got \"{type(config).__name__}\"')\n    if mixed_precision_global_state.is_mixed_precision_graph_rewrite_enabled() and config.graph_options.rewrite_options.auto_mixed_precision != rewriter_config_pb2.RewriterConfig.OFF:\n        new_config = config_pb2.ConfigProto()\n        new_config.CopyFrom(config)\n        new_config.graph_options.rewrite_options.auto_mixed_precision = rewriter_config_pb2.RewriterConfig.ON\n        config = new_config\n    elif config.graph_options.rewrite_options.auto_mixed_precision != rewriter_config_pb2.RewriterConfig.ON:\n        mixed_precision_global_state.set_non_mixed_precision_session_created(True)\n    self._config = config\n    self._add_shapes = config.graph_options.infer_shapes\n    self._session = None\n    opts = tf_session.TF_NewSessionOptions(target=self._target, config=config)\n    try:\n        with self._graph._c_graph.get() as c_graph:\n            self._session = tf_session.TF_NewSessionRef(c_graph, opts)\n    finally:\n        tf_session.TF_DeleteSessionOptions(opts)", "docstring": "Constructs a new TensorFlow session.\n\nArgs:\ntarget: (Optional) The TensorFlow execution engine to connect to.\ngraph: (Optional) The graph to be used. If this argument is None, the\ndefault graph will be used.\nconfig: (Optional) ConfigProto proto used to configure the session. If no\nconfig is specified, the global default will be used. The global default\ncan be configured via the tf.config APIs.\n\nRaises:\ntf.errors.OpError: Or one of its subclasses if an error occurs while\ncreating the TensorFlow session.\nTypeError: If one of the arguments has the wrong type.", "source": "github-repos"}
{"code": "def from_pymatgen_molecule(cls, molecule):\n        \n        new = cls(atoms=[el.value for el in molecule.species],\n                  coords=molecule.cart_coords)\n        return new._to_numeric()", "docstring": "Create an instance of the own class from a pymatgen molecule\n\nArgs:\nmolecule (:class:`pymatgen.core.structure.Molecule`):\n\nReturns:\nCartesian:", "source": "juraj-google-style"}
{"code": "def StatEntryFromPath(path, pathspec, ext_attrs=True):\n    try:\n        stat = filesystem.Stat.FromPath(path)\n    except (IOError, OSError) as error:\n        logging.error(\"Failed to obtain stat for '%s': %s\", pathspec, error)\n        return rdf_client_fs.StatEntry(pathspec=pathspec)\n    return StatEntryFromStat(stat, pathspec, ext_attrs=ext_attrs)", "docstring": "Builds a stat entry object from a given path.\n\nArgs:\npath: A path (string value) to stat.\npathspec: A `PathSpec` corresponding to the `path`.\next_attrs: Whether to include extended file attributes in the result.\n\nReturns:\n`StatEntry` object.", "source": "codesearchnet"}
{"code": "def queuify_logger(logger, queue_handler, queue_listener):\n    \n    if isinstance(logger, str):\n        logger = logging.getLogger(logger)\n\n    \n    handlers = [handler for handler in logger.handlers\n                if handler not in queue_listener.handlers]\n\n    if handlers:\n        \n        queue_listener.handlers = \\\n            tuple(list(queue_listener.handlers) + handlers)\n\n    \n    del logger.handlers[:]\n    logger.addHandler(queue_handler)", "docstring": "Replace logger's handlers with a queue handler while adding existing\nhandlers to a queue listener.\n\nThis is useful when you want to use a default logging config but then\noptionally add a logger's handlers to a queue during runtime.\n\nArgs:\nlogger (mixed): Logger instance or string name of logger to queue-ify\nhandlers.\nqueue_handler (QueueHandler): Instance of a ``QueueHandler``.\nqueue_listener (QueueListener): Instance of a ``QueueListener``.", "source": "juraj-google-style"}
{"code": "def Oem(self, command, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):\n        \n        if not isinstance(command, bytes):\n            command = command.encode('utf8')\n        return self._SimpleCommand(\n            b'oem %s' % command, timeout_ms=timeout_ms, info_cb=info_cb)", "docstring": "Executes an OEM command on the device.\n\nArgs:\ncommand: Command to execute, such as 'poweroff' or 'bootconfig read'.\ntimeout_ms: Optional timeout in milliseconds to wait for a response.\ninfo_cb: See Download. Messages vary based on command.\n\nReturns:\nThe final response from the device.", "source": "juraj-google-style"}
{"code": "def Sample(self, tasks_status):\n    sample_time = time.time()\n    sample = '{0:f}\\t{1:d}\\t{2:d}\\t{3:d}\\t{4:d}\\t{5:d}\\n'.format(sample_time, tasks_status.number_of_queued_tasks, tasks_status.number_of_tasks_processing, tasks_status.number_of_tasks_pending_merge, tasks_status.number_of_abandoned_tasks, tasks_status.total_number_of_tasks)\n    self._WritesString(sample)", "docstring": "Takes a sample of the status of queued tasks for profiling.\n\nArgs:\ntasks_status (TasksStatus): status information about tasks.", "source": "codesearchnet"}
{"code": "def raster_reclassify(srcfile, v_dict, dstfile, gdaltype=GDT_Float32):\n        \n        src_r = RasterUtilClass.read_raster(srcfile)\n        src_data = src_r.data\n        dst_data = numpy.copy(src_data)\n        if gdaltype == GDT_Float32 and src_r.dataType != GDT_Float32:\n            gdaltype = src_r.dataType\n        no_data = src_r.noDataValue\n        new_no_data = DEFAULT_NODATA\n        if gdaltype in [GDT_Unknown, GDT_Byte, GDT_UInt16, GDT_UInt32]:\n            new_no_data = 0\n        if not MathClass.floatequal(new_no_data, src_r.noDataValue):\n            if src_r.noDataValue not in v_dict:\n                v_dict[src_r.noDataValue] = new_no_data\n                no_data = new_no_data\n\n        for (k, v) in iteritems(v_dict):\n            dst_data[src_data == k] = v\n        RasterUtilClass.write_gtiff_file(dstfile, src_r.nRows, src_r.nCols, dst_data,\n                                         src_r.geotrans, src_r.srs, no_data, gdaltype)", "docstring": "Reclassify raster by given classifier dict.\n\nArgs:\nsrcfile: source raster file.\nv_dict: classifier dict.\ndstfile: destination file path.\ngdaltype (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default.", "source": "juraj-google-style"}
{"code": "def _get_commands(dist  \n                  ):\n    \n    \n    py_files = (f for f in setuptools.findall()\n                if os.path.splitext(f)[1].lower() == '.py')\n    pkg_files = (f for f in py_files if _get_package_name(f) in dist.packages)\n    commands = {}  \n    for file_name in pkg_files:\n        with open(file_name) as py_file:\n            module = typing.cast(ast.Module, ast.parse(py_file.read()))\n        module_name = _get_module_name(file_name)\n        _append_commands(commands, module_name, _get_module_commands(module))\n        _append_commands(commands, module_name, _get_class_commands(module))\n        _append_commands(commands, module_name, _get_function_commands(module))\n    return commands", "docstring": "Find all commands belonging to the given distribution.\n\nArgs:\ndist: The Distribution to search for docopt-compatible docstrings that\ncan be used to generate command entry points.\n\nReturns:\nA dictionary containing a mapping of primary commands to sets of\nsubcommands.", "source": "juraj-google-style"}
{"code": "def _GetAnalysisPlugins(self, analysis_plugins_string):\n    if (not analysis_plugins_string):\n        return []\n    analysis_plugins_list = [name.strip() for name in analysis_plugins_string.split(',')]\n    analysis_plugins = self._analysis_manager.GetPluginObjects(analysis_plugins_list)\n    return analysis_plugins.values()", "docstring": "Retrieves analysis plugins.\n\nArgs:\nanalysis_plugins_string (str): comma separated names of analysis plugins\nto enable.\n\nReturns:\nlist[AnalysisPlugin]: analysis plugins.", "source": "codesearchnet"}
{"code": "def getAll(self, event_name):\n    raw_events = self.callEventGetAllRpc(self._id, event_name)\n    return [callback_event.from_dict(msg) for msg in raw_events]", "docstring": "Gets all existing events in the server with the specified identifier.\n\nThis is a non-blocking call.\n\nArgs:\nevent_name: str, the name of the event to get.\n\nReturns:\nA list of CallbackEvent, each representing an event from the Server side.", "source": "github-repos"}
{"code": "def initialize_repository(path, spor_dir='.spor'):\n    path = pathlib.Path(path)\n    spor_path = (path / spor_dir)\n    if spor_path.exists():\n        raise ValueError('spor directory already exists: {}'.format(spor_path))\n    spor_path.mkdir()\n    return Repository(path, spor_dir)", "docstring": "Initialize a spor repository in `path` if one doesn't already exist.\n\nArgs:\npath: Path to any file or directory within the repository.\nspor_dir: The name of the directory containing spor data.\n\nReturns: A `Repository` instance.\n\nRaises:\nValueError: A repository already exists at `path`.", "source": "codesearchnet"}
{"code": "def ns(self, value):\n        \n        if value == self._defaults['ns'] and 'ns' in self._values:\n            del self._values['ns']\n        else:\n            self._values['ns'] = value", "docstring": "The ns property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def __init__(self, feed_merger):\n    \n    self.feed_merger = feed_merger\n    self._num_merged = 0\n    self._num_not_merged_a = 0\n    self._num_not_merged_b = 0", "docstring": "Initialise.\n\nArgs:\nfeed_merger: The FeedMerger.", "source": "juraj-google-style"}
{"code": "def AddLabel(self, label):\n    if (not isinstance(label, py2to3.STRING_TYPES)):\n        raise TypeError('label is not a string type. Is {0:s}'.format(type(label)))\n    if (not self._VALID_LABEL_REGEX.match(label)):\n        raise ValueError('Unsupported label: \"{0:s}\". A label must only consist of alphanumeric characters or underscores.'.format(label))\n    if (label not in self.labels):\n        self.labels.append(label)", "docstring": "Adds a label to the event tag.\n\nArgs:\nlabel (str): label.\n\nRaises:\nTypeError: if the label provided is not a string.\nValueError: if a label is malformed.", "source": "codesearchnet"}
{"code": "def add_trial(self, trial):\n        \n        trial.set_verbose(self._verbose)\n        self._trials.append(trial)\n        with warn_if_slow(\"scheduler.on_trial_add\"):\n            self._scheduler_alg.on_trial_add(self, trial)\n        self.trial_executor.try_checkpoint_metadata(trial)", "docstring": "Adds a new trial to this TrialRunner.\n\nTrials may be added at any time.\n\nArgs:\ntrial (Trial): Trial to queue.", "source": "juraj-google-style"}
{"code": "def lookup_subclass(cls, d):\n    try:\n        typeid = d['typeid']\n    except KeyError:\n        raise FieldError(('typeid not present in keys %s' % list(d)))\n    subclass = cls._subcls_lookup.get(typeid, None)\n    if (not subclass):\n        raise FieldError((\"'%s' not a valid typeid\" % typeid))\n    else:\n        return subclass", "docstring": "Look up a class based on a serialized dictionary containing a typeid\n\nArgs:\nd (dict): Dictionary with key \"typeid\"\n\nReturns:\nSerializable subclass", "source": "codesearchnet"}
{"code": "def process_resource(self, req, resp, resource, uri_kwargs=None):\n        \n        if 'user' in req.context:\n            return\n\n        identifier = self.identify(req, resp, resource, uri_kwargs)\n        user = self.try_storage(identifier, req, resp, resource, uri_kwargs)\n\n        if user is not None:\n            req.context['user'] = user\n\n        \n        \n        elif self.challenge is not None:\n            req.context.setdefault(\n                'challenges', list()\n            ).append(self.challenge)", "docstring": "Process resource after routing to it.\n\nThis is basic falcon middleware handler.\n\nArgs:\nreq (falcon.Request): request object\nresp (falcon.Response): response object\nresource (object): resource object matched by falcon router\nuri_kwargs (dict): additional keyword argument from uri template.\nFor ``falcon<1.0.0`` this is always ``None``", "source": "juraj-google-style"}
{"code": "def __call__(self, index, s):\n        \n        if self.colorize:\n            self._color_wrap(index, s)\n        else:\n            print(s)", "docstring": "Print the output, colorized or not, depending on the environment.\n\nArgs:\nindex (int): The instance number.\ns (str): The string to print.", "source": "juraj-google-style"}
{"code": "def importGurobiSolution(self, grbmodel):\n        \n        self.eval(''.join(\n            'let {} := {};'.format(var.VarName, var.X)\n            for var in grbmodel.getVars()\n            if '$' not in var.VarName\n        ))", "docstring": "Import the solution from a gurobipy.Model object.\n\nArgs:\ngrbmodel: A :class:`gurobipy.Model` object with the model solved.", "source": "juraj-google-style"}
{"code": "def DecoderLayer(feature_depth, feedforward_depth, num_heads, dropout, mode):\n    return layers.Serial(layers.Residual(layers.LayerNorm(), layers.Branch(), layers.Parallel(layers.Identity(), layers.CausalMask(axis=(- 2))), layers.MultiHeadedAttention(feature_depth, num_heads=num_heads, dropout=dropout, mode=mode), layers.Dropout(rate=dropout, mode=mode)), ResidualFeedForward(feature_depth, feedforward_depth, dropout, mode=mode))", "docstring": "Transformer decoder layer.\n\nArgs:\nfeature_depth: int:  depth of embedding\nfeedforward_depth: int: depth of feed-forward layer\nnum_heads: int: number of attention heads\ndropout: float: dropout rate (how much to drop out)\nmode: str: 'train' or 'eval'\n\nReturns:\nthe layer.", "source": "codesearchnet"}
{"code": "def swo_stop(self):\n        \n        res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.STOP, 0)\n        if res < 0:\n            raise errors.JLinkException(res)\n\n        return None", "docstring": "Stops collecting SWO data.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error", "source": "juraj-google-style"}
{"code": "def check_streamers(self, blacklist=None):\n    ready = []\n    selected = set()\n    for (i, streamer) in enumerate(self.streamers):\n        if ((blacklist is not None) and (i in blacklist)):\n            continue\n        if (i in selected):\n            continue\n        marked = False\n        if (i in self._manually_triggered_streamers):\n            marked = True\n            self._manually_triggered_streamers.remove(i)\n        if streamer.triggered(marked):\n            self._logger.debug('Streamer %d triggered, manual=%s', i, marked)\n            ready.append(streamer)\n            selected.add(i)\n            for (j, streamer2) in enumerate(self.streamers[i:]):\n                if ((streamer2.with_other == i) and (j not in selected) and streamer2.triggered(True)):\n                    self._logger.debug('Streamer %d triggered due to with-other on %d', j, i)\n                    ready.append(streamer2)\n                    selected.add(j)\n    return ready", "docstring": "Check if any streamers are ready to produce a report.\n\nYou can limit what streamers are checked by passing a set-like\nobject into blacklist.\n\nThis method is the primary way to see when you should poll a given\nstreamer for its next report.\n\nNote, this function is not idempotent.  If a streamer is marked as\nmanual and it is triggered from a node rule inside the sensor_graph,\nthat trigger will only last as long as the next call to\ncheck_streamers() so you need to explicitly build a report on all\nready streamers before calling check_streamers again.\n\nArgs:\nblacklist (set): Optional set of streamer indices that should\nnot be checked right now.\n\nReturns:\nlist of DataStreamer: A list of the ready streamers.", "source": "codesearchnet"}
{"code": "def FindChecks(cls, artifact=None, os_name=None, cpe=None, labels=None, restrict_checks=None):\n    check_ids = set()\n    conditions = list(cls.Conditions(artifact, os_name, cpe, labels))\n    for (chk_id, chk) in iteritems(cls.checks):\n        if (restrict_checks and (chk_id not in restrict_checks)):\n            continue\n        for condition in conditions:\n            if chk.triggers.Match(*condition):\n                check_ids.add(chk_id)\n                break\n    return check_ids", "docstring": "Takes targeting info, identifies relevant checks.\n\nFindChecks will return results when a host has the conditions necessary for\na check to occur. Conditions with partial results are not returned. For\nexample, FindChecks will not return checks that if a check targets\nos_name=[\"Linux\"], labels=[\"foo\"] and a host only has the os_name=[\"Linux\"]\nattribute.\n\nArgs:\nartifact: 0+ artifact names.\nos_name: 0+ OS names.\ncpe: 0+ CPE identifiers.\nlabels: 0+ GRR labels.\nrestrict_checks: A list of check ids to restrict check processing to.\n\nReturns:\nthe check_ids that apply.", "source": "codesearchnet"}
{"code": "def to_pytd_def(self, val: abstract.BaseValue) -> pytd.Node:\n    if isinstance(val, abstract.SimpleClass):\n        return self._class_to_pytd_def(val)\n    elif isinstance(val, abstract.BaseFunction):\n        return self._function_to_pytd_def(val)\n    else:\n        raise NotImplementedError(f'to_pytd_def() not implemented for {val.__class__.__name__}: {val}')", "docstring": "Returns the pytd definition of the abstract value.\n\nFor example, if the abstract value is:\nInterpreterClass(name='C', members={'x': PythonConstant(0)})\nthen to_pytd_def() produces:\npytd.Class(name='C',\nconstants=(pytd.Constant(name='x', type=pytd.NamedType(int)),))\n\nArgs:\nval: The abstract value.", "source": "github-repos"}
{"code": "def _GetKeysDefaultEmpty(self, top_level, keys, depth=1):\n    \n    keys = set(keys)\n    match = {}\n\n    if depth == 1:\n      for key in keys:\n        value = top_level.get(key, None)\n        if value is not None:\n          match[key] = value\n    else:\n      for _, parsed_key, parsed_value in plist_interface.RecurseKey(\n          top_level, depth=depth):\n        if parsed_key in keys:\n          match[parsed_key] = parsed_value\n          if set(match.keys()) == keys:\n            return match\n    return match", "docstring": "Retrieves plist keys, defaulting to empty values.\n\nArgs:\ntop_level (plistlib._InternalDict): top level plist object.\nkeys (set[str]): names of keys that should be returned.\ndepth (int): depth within the plist, where 1 is top level.\n\nReturns:\ndict[str, str]: values of the requested keys.", "source": "juraj-google-style"}
{"code": "def getValue(self, scalarExpression):\n    return lock_and_call((lambda : Utils.castVariant(self._impl.getValue(scalarExpression))), self._lock)", "docstring": "Get a scalar value from the underlying AMPL interpreter, as a double or\na string.\n\nArgs:\nscalarExpression: An AMPL expression which evaluates to a scalar\nvalue.\n\nReturns:\nThe value of the expression.", "source": "codesearchnet"}
{"code": "def genUserCert(self, name, signas=None, outp=None, csr=None):\n    (pkey, cert) = self._genBasePkeyCert(name, pkey=csr)\n    cert.add_extensions([crypto.X509Extension(b'nsCertType', False, b'client'), crypto.X509Extension(b'keyUsage', False, b'digitalSignature'), crypto.X509Extension(b'extendedKeyUsage', False, b'clientAuth'), crypto.X509Extension(b'basicConstraints', False, b'CA:FALSE')])\n    if (signas is not None):\n        self.signCertAs(cert, signas)\n    else:\n        self.selfSignCert(cert, pkey)\n    crtpath = self._saveCertTo(cert, 'users', ('%s.crt' % name))\n    if (outp is not None):\n        outp.printf(('cert saved: %s' % (crtpath,)))\n    if (not pkey._only_public):\n        keypath = self._savePkeyTo(pkey, 'users', ('%s.key' % name))\n        if (outp is not None):\n            outp.printf(('key saved: %s' % (keypath,)))\n    return (pkey, cert)", "docstring": "Generates a user keypair.\n\nArgs:\nname (str): The name of the user keypair.\nsignas (str): The CA keypair to sign the new user keypair with.\noutp (synapse.lib.output.Output): The output buffer.\ncsr (OpenSSL.crypto.PKey): The CSR public key when generating the keypair from a CSR.\n\nExamples:\nGenerate a user cert for the user \"myuser\":\n\nmyuserkey, myusercert = cdir.genUserCert('myuser')\n\nReturns:\n((OpenSSL.crypto.PKey, OpenSSL.crypto.X509)): Tuple containing the key and certificate objects.", "source": "codesearchnet"}
{"code": "def getValue(self, scalarExpression):\n        \n        return lock_and_call(\n            lambda: Utils.castVariant(self._impl.getValue(scalarExpression)),\n            self._lock\n        )", "docstring": "Get a scalar value from the underlying AMPL interpreter, as a double or\na string.\n\nArgs:\nscalarExpression: An AMPL expression which evaluates to a scalar\nvalue.\n\nReturns:\nThe value of the expression.", "source": "juraj-google-style"}
{"code": "def _Open(self, path_spec=None, mode='rb'):\n    \n    if not path_spec:\n      raise ValueError('Missing path specification.')\n\n    file_system = resolver.Resolver.OpenFileSystem(\n        path_spec, resolver_context=self._resolver_context)\n\n    file_entry = file_system.GetFileEntryByPathSpec(path_spec)\n    if not file_entry:\n      file_system.Close()\n      raise IOError('Unable to retrieve file entry.')\n\n    if not file_entry.IsFile():\n      file_system.Close()\n      raise IOError('Not a regular file.')\n\n    self._file_system = file_system\n    self._zip_file = self._file_system.GetZipFile()\n    self._zip_info = file_entry.GetZipInfo()\n\n    self._current_offset = 0\n    self._uncompressed_stream_size = self._zip_info.file_size", "docstring": "Opens the file-like object defined by path specification.\n\nArgs:\npath_spec (Optional[PathSpec]): path specification.\nmode (Optional[str]): file access mode.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file-like object could not be opened.\nOSError: if the file-like object could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def convert_rgb(self, image):\n    self._ensure_format_supported(image)\n    if not isinstance(image, PIL.Image.Image):\n        return image\n    return image.convert('RGB')", "docstring": "Converts `PIL.Image.Image` to RGB format.\n\nArgs:\nimage (`PIL.Image.Image`):\nThe image to convert.", "source": "github-repos"}
{"code": "def _format_variant(self, case_id, gemini_variant, individual_objs,\n                        index=0, add_all_info=False):\n        \n        chrom = gemini_variant['chrom']\n        if chrom.startswith('chr') or chrom.startswith('CHR'):\n            chrom = chrom[3:]\n\n        variant_dict = {\n            'CHROM':chrom,\n            'POS':str(gemini_variant['start']),\n            'ID':gemini_variant['rs_ids'],\n            'REF':gemini_variant['ref'],\n            'ALT':gemini_variant['alt'],\n            'QUAL':gemini_variant['qual'],\n            'FILTER':gemini_variant['filter']\n        }\n\n        variant = Variant(**variant_dict)\n\n        \n        variant.update_variant_id(gemini_variant['variant_id'])\n        logger.debug(\"Creating a variant object of variant {0}\".format(\n            variant.variant_id))\n\n        variant['index'] = index\n\n        \n        self._add_most_severe_consequence(variant, gemini_variant)\n\n        \n        self._add_impact_severity(variant, gemini_variant)\n        \n        variant.start = int(gemini_variant['start'])\n        variant.stop = int(gemini_variant['end'])\n\n        \n        if self.variant_type == 'sv':\n            variant.sv_type = gemini_variant['sub_type']\n            variant.stop = int(gemini_variant['end'])\n            self._add_sv_coordinates(variant)\n\n        else:\n            \n            \n            self._add_transcripts(variant, gemini_variant)\n            self._add_thousand_g(variant, gemini_variant)\n            self._add_exac(variant, gemini_variant)\n            self._add_gmaf(variant, gemini_variant)\n            \n            if gemini_variant['cadd_scaled']:\n                variant.cadd_score = gemini_variant['cadd_scaled']\n\n            \n            polyphen = gemini_variant['polyphen_pred']\n            if polyphen:\n                variant.add_severity('Polyphen', polyphen)\n\n            \n            sift = gemini_variant['sift_pred']\n            if sift:\n                variant.add_severity('SIFT', sift)\n\n        \n        self._add_hgnc_symbols(variant)\n        if self.variant_type == 'snv':\n            self._add_genes(variant)\n\n        self._add_consequences(variant)\n\n        \n        \n        if add_all_info:\n            self._add_genotypes(variant, gemini_variant, case_id, individual_objs)\n            if self.variant_type == 'sv':\n                self._add_genes(variant)\n\n        return variant", "docstring": "Make a puzzle variant from a gemini variant\n\nArgs:\ncase_id (str): related case id\ngemini_variant (GeminiQueryRow): The gemini variant\nindividual_objs (list(dict)): A list of Individuals\nindex(int): The index of the variant\n\nReturns:\nvariant (dict): A Variant object", "source": "juraj-google-style"}
{"code": "def agg_wt_avg(mat, min_wt=0.01, corr_metric='spearman'):\n    assert (mat.shape[1] > 0), 'mat is empty! mat: {}'.format(mat)\n    if (mat.shape[1] == 1):\n        out_sig = mat\n        upper_tri_df = None\n        raw_weights = None\n        weights = None\n    else:\n        assert (corr_metric in ['spearman', 'pearson'])\n        corr_mat = mat.corr(method=corr_metric)\n        upper_tri_df = get_upper_triangle(corr_mat)\n        (raw_weights, weights) = calculate_weights(corr_mat, min_wt)\n        weighted_values = (mat * weights)\n        out_sig = weighted_values.sum(axis=1)\n    return (out_sig, upper_tri_df, raw_weights, weights)", "docstring": "Aggregate a set of replicate profiles into a single signature using\na weighted average.\n\nArgs:\nmat (pandas df): a matrix of replicate profiles, where the columns are\nsamples and the rows are features; columns correspond to the\nreplicates of a single perturbagen\nmin_wt (float): Minimum raw weight when calculating weighted average\ncorr_metric (string): Spearman or Pearson; the correlation method\n\nReturns:\nout_sig (pandas series): weighted average values\nupper_tri_df (pandas df): the correlations between each profile that went into the signature\nraw weights (pandas series): weights before normalization\nweights (pandas series): weights after normalization", "source": "codesearchnet"}
{"code": "def run_config(self, project, run=None, entity=None):\n        \n        query = gql()\n\n        response = self.gql(query, variable_values={\n            'name': project, 'run': run, 'entity': entity\n        })\n        if response['model'] == None:\n            raise ValueError(\"Run {}/{}/{} not found\".format(entity, project, run) )\n        run = response['model']['bucket']\n        commit = run['commit']\n        patch = run['patch']\n        config = json.loads(run['config'] or '{}')\n        if len(run['files']['edges']) > 0:\n            url = run['files']['edges'][0]['node']['url']\n            res = requests.get(url)\n            res.raise_for_status()\n            metadata = res.json()\n        else:\n            metadata = {}\n        return (commit, config, patch, metadata)", "docstring": "Get the relevant configs for a run\n\nArgs:\nproject (str): The project to download, (can include bucket)\nrun (str, optional): The run to download\nentity (str, optional): The entity to scope this project to.", "source": "juraj-google-style"}
{"code": "def read_proto(filename: str, proto_cls: Type[_T]) -> _T:\n    filepath = _fhir_filepath_from(filename)\n    proto = proto_cls()\n    raw_proto = ''\n    with open(filepath, 'r', encoding='utf-8') as f:\n        raw_proto = f.read()\n        text_format.Parse(raw_proto, proto)\n    return proto", "docstring": "Reads protobuf information from filename relative to the fhir/ root dir.\n\nData is serialized into an instance of `proto_cls`.\n\nArgs:\nfilename: The file to read from.\nproto_cls: The type of protobuf message to look for and return.\n\nReturns:\nThe protobuf message in the file.", "source": "github-repos"}
{"code": "def reverse_transform(self, col):\n        \n        output = pd.DataFrame()\n        new_name = '?' + self.col_name\n\n        col.loc[col[new_name] == 0, self.col_name] = np.nan\n        output[self.col_name] = col[self.col_name]\n        return output", "docstring": "Converts data back into original format.\n\nArgs:\ncol(pandas.DataFrame): Data to transform.\n\nReturns:\npandas.DataFrame", "source": "juraj-google-style"}
{"code": "def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, training=False):\n    assert not (input_ids is None and inputs_embeds is None)\n    if input_ids is not None:\n        check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n        inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n    input_shape = shape_list(inputs_embeds)[:-1]\n    if token_type_ids is None:\n        token_type_ids = tf.fill(dims=input_shape, value=0)\n    if self.trigram_input:\n        inputs_embeds = tf.concat([tf.pad(inputs_embeds[:, 1:], ((0, 0), (0, 1), (0, 0))), inputs_embeds, tf.pad(inputs_embeds[:, :-1], ((0, 0), (1, 0), (0, 0)))], axis=2)\n    if self.trigram_input or self.embedding_size != self.hidden_size:\n        inputs_embeds = self.embedding_transformation(inputs_embeds)\n    if position_ids is None:\n        position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)\n    position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)\n    token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)\n    final_embeddings = inputs_embeds + position_embeds + token_type_embeds\n    final_embeddings = self.LayerNorm(inputs=final_embeddings)\n    final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n    return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\nfinal_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github-repos"}
{"code": "def __init__(self, window, index=-1, flags=frozenset()):\n        \n        self._ptr = check_ptr_err(lib.SDL_CreateRenderer(window._ptr, index, enumtools.get_mask(flags)))", "docstring": "Create a 2D rendering context for a window.\n\nArgs:\nwindow (Window): The window where rendering is displayed.\nindex (int): The index of the rendering driver to initialize, or -1 to initialize the first one supporting\nthe requested flags.\nflags (Set[RendererFlags]): The requested renderer flags.\n\nRaises:\nSDLError: If there was an error creating the renderer.", "source": "juraj-google-style"}
{"code": "def get_size_ratio(path_a: str, path_b: str) -> float:\n    size_a = get_dir_size(path_a)\n    size_b = get_dir_size(path_b)\n    return size_a / size_b", "docstring": "Return the size ratio of the given paths.\n\nArgs:\npath_a: Path of a directory or a file to be the nominator of the ratio.\npath_b: Path of a directory or a file to be the denominator of the ratio.\n\nReturns:\nRatio of size of path_a / size of path_b.", "source": "github-repos"}
{"code": "def GetArtifactsForCollection(os_name, artifact_list):\n    artifact_arranger = ArtifactArranger(os_name, artifact_list)\n    artifact_names = artifact_arranger.GetArtifactsInProperOrder()\n    return artifact_names", "docstring": "Wrapper for the ArtifactArranger.\n\nExtend the artifact list by dependencies and sort the artifacts to resolve the\ndependencies.\n\nArgs:\nos_name: String specifying the OS name.\nartifact_list: List of requested artifact names.\n\nReturns:\nA list of artifacts such that if they are collected in the given order\ntheir dependencies are resolved.", "source": "codesearchnet"}
{"code": "def download_mmcif_header(pdb_id, outdir='', force_rerun=False):\n    pdb_id = pdb_id.lower()\n    file_type = 'cif'\n    folder = 'header'\n    outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n        download_link = 'http:\n        urlretrieve(download_link, outfile)\n        log.debug('{}: saved header file'.format(outfile))\n    else:\n        log.debug('{}: header file already saved'.format(outfile))\n    return outfile", "docstring": "Download a mmCIF header file from the RCSB PDB by ID.\n\nArgs:\npdb_id: PDB ID\noutdir: Optional output directory, default is current working directory\nforce_rerun: If the file should be downloaded again even if it exists\n\nReturns:\nstr: Path to outfile", "source": "codesearchnet"}
{"code": "def from_hubo(cls, H, offset=None):\n        \n        poly = cls(H, Vartype.BINARY)\n        if offset is not None:\n            poly[()] = poly.get((), 0) + offset\n        return poly", "docstring": "Construct a binary polynomial from a higher-order unconstrained\nbinary optimization (HUBO) problem.\n\nArgs:\nH (dict):\nCoefficients of a higher-order unconstrained binary optimization\n(HUBO) model.\n\nReturns:\n:obj:`.BinaryPolynomial`\n\nExamples:\n>>> poly = dimod.BinaryPolynomial.from_hubo({('a', 'b', 'c'): -1})", "source": "juraj-google-style"}
{"code": "def _maybe_broadcast_to_outputs(self, outputs, objects):\n    if not self._should_broadcast(objects):\n        return objects\n    should_copy_objects = len(nest.flatten(outputs)) > 1\n\n    def _broadcast_fn():\n        if should_copy_objects:\n            return nest.map_structure(self._copy_object, objects)\n        return objects\n    return nest.map_structure(lambda _: _broadcast_fn(), outputs)", "docstring": "Determines if losses / metrics should be applied to all outputs.\n\nNOTE: This method should only be called for Metrics / Losses, not for\ny_true / sample_weight.\n\nArgs:\noutputs: Model predictions.\nobjects: Arbitrary nested structure (e.g. of losses or metrics)\n\nReturns:\nArbitrary nested structure of objects, maybe copied to each output.\n\nApplies a Loss / Metric to all outputs.", "source": "github-repos"}
{"code": "def _ip_unnumbered_name(self, **kwargs):\n        \n\n        method_name = 'interface_%s_ip_ip_config_unnumbered_ip_donor_'\\\n            'interface_name' % kwargs['int_type']\n        ip_unnumbered_name = getattr(self._interface, method_name)\n        config = ip_unnumbered_name(**kwargs)\n        if kwargs['delete']:\n            tag = 'ip-donor-interface-name'\n            config.find('.\n        return config", "docstring": "Return the `ip unnumbered` donor name XML.\n\nYou should not use this method.\nYou probably want `Interface.ip_unnumbered`.\n\nArgs:\nint_type (str): Type of interface. (gigabitethernet,\ntengigabitethernet etc).\ndelete (bool): Remove the configuration if ``True``.\nip_donor_interface_name (str): The donor interface name (1, 2, etc)\n\nReturns:\nXML to be passed to the switch.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def get_model_details(self, model_name):\n    \n    full_name = model_name\n    if not model_name.startswith('projects/'):\n      full_name = ('projects/%s/models/%s' % (self._project_id, model_name))\n    return self._api.projects().models().get(name=full_name).execute()", "docstring": "Get details of the specified model from CloudML Service.\n\nArgs:\nmodel_name: the name of the model. It can be a model full name\n(\"projects/[project_id]/models/[model_name]\") or just [model_name].\nReturns: a dictionary of the model details.", "source": "juraj-google-style"}
{"code": "def change_password(username, new_password):\n    \n    assert username in passwd_reader.load_users(),\\\n           \"Username '%s' not found!\" % username\n\n    sh.ftpasswd(\n        \"--change-password\",\n        passwd=True,        \n        name=username,\n        stdin=True,         \n        file=settings.LOGIN_FILE,\n        _in=new_password\n    )\n\n    reload_configuration()", "docstring": "Change password for given `username`.\n\nArgs:\nusername (str): User's name.\nnew_password (str): User's new password.", "source": "juraj-google-style"}
{"code": "def inverse(self):\n    if (not self.definition):\n        raise QiskitError(('inverse() not implemented for %s.' % self.name))\n    inverse_gate = self.copy(name=(self.name + '_dg'))\n    inverse_gate._definition = []\n    for (inst, qargs, cargs) in reversed(self._definition):\n        inverse_gate._definition.append((inst.inverse(), qargs, cargs))\n    return inverse_gate", "docstring": "Invert this instruction.\n\nIf the instruction is composite (i.e. has a definition),\nthen its definition will be recursively inverted.\n\nSpecial instructions inheriting from Instruction can\nimplement their own inverse (e.g. T and Tdg, Barrier, etc.)\n\nReturns:\nInstruction: a fresh instruction for the inverse\n\nRaises:\nQiskitError: if the instruction is not composite\nand an inverse has not been implemented for it.", "source": "codesearchnet"}
{"code": "def patch_request(self, uri, body, custom_headers=None, timeout=-1):\n        \n        logger.debug('Patch resource (uri = %s, data = %s)' % (uri, body))\n\n        if not custom_headers:\n            custom_headers = {}\n\n        if self._connection._apiVersion >= 300 and 'Content-Type' not in custom_headers:\n            custom_headers['Content-Type'] = 'application/json-patch+json'\n\n        task, entity = self._connection.patch(uri, body, custom_headers=custom_headers)\n\n        if not task:\n            return entity\n\n        return self._task_monitor.wait_for_task(task, timeout)", "docstring": "Uses the PATCH to update a resource.\n\nOnly one operation can be performed in each PATCH call.\n\nArgs:\nbody (list): Patch request body\ntimeout (int): Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\ncustom_headers (dict): Allows to add custom http headers.\n\nReturns:\nUpdated resource.", "source": "juraj-google-style"}
{"code": "def __contains__(self, key):\n        \n        path = self.keypath(key)\n        return fs.exists(path)", "docstring": "Check cache contents.\n\nArguments:\nkey: Key.\n\nReturns:\nbool: True if key in cache, else false.", "source": "juraj-google-style"}
{"code": "class TFCvtEncoder(keras.layers.Layer):\n    config_class = CvtConfig\n\n    def __init__(self, config: CvtConfig, **kwargs):\n        super().__init__(**kwargs)\n        self.config = config\n        self.stages = [TFCvtStage(config, stage_idx, name=f'stages.{stage_idx}') for stage_idx in range(len(config.depth))]\n\n    def call(self, pixel_values: TFModelInputType, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor]]:\n        all_hidden_states = () if output_hidden_states else None\n        hidden_state = pixel_values\n        hidden_state = tf.transpose(hidden_state, perm=(0, 2, 3, 1))\n        cls_token = None\n        for _, stage_module in enumerate(self.stages):\n            hidden_state, cls_token = stage_module(hidden_state, training=training)\n            if output_hidden_states:\n                all_hidden_states = all_hidden_states + (hidden_state,)\n        hidden_state = tf.transpose(hidden_state, perm=(0, 3, 1, 2))\n        if output_hidden_states:\n            all_hidden_states = tuple([tf.transpose(hs, perm=(0, 3, 1, 2)) for hs in all_hidden_states])\n        if not return_dict:\n            return tuple((v for v in [hidden_state, cls_token, all_hidden_states] if v is not None))\n        return TFBaseModelOutputWithCLSToken(last_hidden_state=hidden_state, cls_token_value=cls_token, hidden_states=all_hidden_states)\n\n    def build(self, input_shape=None):\n        if self.built:\n            return\n        self.built = True\n        if getattr(self, 'stages', None) is not None:\n            for layer in self.stages:\n                with tf.name_scope(layer.name):\n                    layer.build(None)", "docstring": "Convolutional Vision Transformer encoder. CVT has 3 stages of encoder blocks with their respective number of layers\n(depth) being 1, 2 and 10.\n\nArgs:\nconfig ([`CvtConfig`]): Model configuration class.", "source": "github-repos"}
{"code": "def flatten(index, name='segmented_flatten'):\n    batch_size = torch.prod(torch.tensor(list(index.batch_shape())))\n    offset = torch.arange(start=0, end=batch_size, device=index.num_segments.device) * index.num_segments\n    offset = offset.view(index.batch_shape())\n    for _ in range(index.batch_dims, len(index.indices.size())):\n        offset = offset.unsqueeze(-1)\n    indices = offset + index.indices\n    return IndexMap(indices=indices.view(-1), num_segments=index.num_segments * batch_size, batch_dims=0)", "docstring": "Flattens a batched index map (which is typically of shape batch_size, seq_length) to a 1d index map. This operation\nrelabels the segments to keep batch elements distinct. The k-th batch element will have indices shifted by\n*num_segments* * (k - 1). The result is a tensor with *num_segments* multiplied by the number of elements in the\nbatch.\n\nArgs:\nindex (`IndexMap`):\nIndexMap to flatten.\nname (`str`, *optional*, defaults to 'segmented_flatten'):\nName for the operation. Currently not used\n\nReturns:\n(`IndexMap`): The flattened IndexMap.", "source": "github-repos"}
{"code": "def load_schema(schema_name, resolved=False):\n    \n    schema_data = ''\n    with open(get_schema_path(schema_name, resolved)) as schema_fd:\n        schema_data = json.loads(schema_fd.read())\n\n    return schema_data", "docstring": "Load the given schema from wherever it's installed.\n\nArgs:\nschema_name(str): Name of the schema to load, for example 'authors'.\nresolved(bool): If True will return the resolved schema, that is with\nall the $refs replaced by their targets.\n\nReturns:\ndict: the schema with the given name.", "source": "juraj-google-style"}
{"code": "async def inspect(self, *, node_id: str) -> Mapping[(str, Any)]:\n    response = (await self.docker._query_json('nodes/{node_id}'.format(node_id=node_id), method='GET'))\n    return response", "docstring": "Inspect a node\n\nArgs:\nnode_id: The ID or name of the node", "source": "codesearchnet"}
{"code": "def run(self, text):\n    for pp in self.pre_processors:\n        text = pp.run(text)\n    return text", "docstring": "Run each substitution on ``text``.\n\nArgs:\ntext (string): the input text.\n\nReturns:\nstring: text after all substitutions have been sequentially\napplied.", "source": "codesearchnet"}
{"code": "def test_rpc_stage_dependencies(self, mock_handle_resp, mock_decode_resp_str, mock_send_request, mock_gen_request, mock_precheck):\n    self.client.initialize()\n    expected_response_str = '{\"id\": 0, \"result\": 123, \"error\": null, \"callback\": null}'\n    expected_response_dict = {'id': 0, 'result': 123, 'error': None, 'callback': None}\n    expected_request = '{\"id\": 10, \"method\": \"some_rpc\", \"params\": [1, 2],\"kwargs\": {\"test_key\": 3}'\n    expected_result = 123\n    mock_gen_request.return_value = expected_request\n    mock_send_request.return_value = expected_response_str\n    mock_decode_resp_str.return_value = expected_response_dict\n    mock_handle_resp.return_value = expected_result\n    rpc_result = self.client.some_rpc(1, 2, test_key=3)\n    mock_precheck.assert_called()\n    mock_gen_request.assert_called_with(0, 'some_rpc', 1, 2, test_key=3)\n    mock_send_request.assert_called_with(expected_request)\n    mock_decode_resp_str.assert_called_with(0, expected_response_str)\n    mock_handle_resp.assert_called_with('some_rpc', expected_response_dict)\n    self.assertEqual(rpc_result, expected_result)", "docstring": "Test the internal dependencies when sending an RPC.\n\nWhen sending an RPC, it calls multiple functions in specific order, and\neach function uses the output of the previously called function. This test\ncase checks above dependencies.\n\nArgs:\nmock_handle_resp: the mock function of FakeClient._handle_rpc_response.\nmock_decode_resp_str: the mock function of\nFakeClient._decode_response_string_and_validate_format.\nmock_send_request: the mock function of FakeClient.send_rpc_request.\nmock_gen_request: the mock function of FakeClient._gen_rpc_request.\nmock_precheck: the mock function of FakeClient.check_server_proc_running.", "source": "github-repos"}
{"code": "def get_cluster_interfaces(cluster, extra_cond=lambda nic: True):\n    \n    nics = get_nics(cluster)\n    \n    \n    \n    \n    \n    \n    nics = [(nic['device'], nic['name']) for nic in nics\n            if nic['mountable']\n            and nic['interface'] == 'Ethernet'\n            and not nic['management']\n            and extra_cond(nic)]\n    nics = sorted(nics)\n    return nics", "docstring": "Get the network interfaces names corresponding to a criteria.\n\nNote that the cluster is passed (not the individual node names), thus it is\nassumed that all nodes in a cluster have the same interface names same\nconfiguration. In addition to ``extra_cond``, only the mountable and\nEhernet interfaces are returned.\n\nArgs:\ncluster(str): the cluster to consider\nextra_cond(lambda): boolean lambda that takes the nic(dict) as\nparameter", "source": "juraj-google-style"}
{"code": "def __instantiate_page_object(page_obj_class, webdriver, **kwargs):\n        \n        try:\n            page = page_obj_class(webdriver, **kwargs)\n            return page\n        except InvalidPageError:\n            \n            \n            return True\n        except TypeError:\n            \n            \n            return False\n        except Exception as e:\n            \n            raise e", "docstring": "Attempts to instantiate a page object.\n\nArgs:\npage_obj_class (PageObject) - PageObject to instantiate.\nwebdriver (WebDriver) - Selenium webdriver to associate with the PageObject\n\nReturns:\nPageObject - If page object instantiation succeeded.\nTrue - If page object instantiation failed, but validation was called.\nNone - If validation did not occur.", "source": "juraj-google-style"}
{"code": "def _is_molecule_linear(self, mol):\n        \n        if mol.NumAtoms() < 3:\n            return True\n        a1 = mol.GetAtom(1)\n        a2 = mol.GetAtom(2)\n        for i in range(3, mol.NumAtoms()+1):\n            angle = float(mol.GetAtom(i).GetAngle(a2, a1))\n            if angle < 0.0:\n                angle = -angle\n            if angle > 90.0:\n                angle = 180.0 - angle\n            if angle > self._angle_tolerance:\n                return False\n        return True", "docstring": "Is the molecule a linear one\n\nArgs:\nmol: The molecule. OpenBabel OBMol object.\n\nReturns:\nBoolean value.", "source": "juraj-google-style"}
{"code": "def get(self, filter=False):\n    result = {}\n    for (k, v) in self.elements().items():\n        intermediate = v.get(filter=filter)\n        if intermediate:\n            result[k] = intermediate\n    return result", "docstring": "Returns a dictionary with the values of the model. Note that the values\nof the leafs are YANG classes.\n\nArgs:\nfilter (bool): If set to ``True``, show only values that have been set.\n\nReturns:\ndict: A dictionary with the values of the model.\n\nExample:\n\n>>> pretty_print(config.get(filter=True))\n>>> {\n>>>     \"interfaces\": {\n>>>         \"interface\": {\n>>>             \"et1\": {\n>>>                 \"config\": {\n>>>                     \"description\": \"My description\",\n>>>                     \"mtu\": 1500\n>>>                 },\n>>>                 \"name\": \"et1\"\n>>>             },\n>>>             \"et2\": {\n>>>                 \"config\": {\n>>>                     \"description\": \"Another description\",\n>>>                     \"mtu\": 9000\n>>>                 },\n>>>                 \"name\": \"et2\"\n>>>             }\n>>>         }\n>>>     }\n>>> }", "source": "codesearchnet"}
{"code": "async def report_winner(self, winner: Participant, scores_csv: str):\n        \n        await self._report(scores_csv, winner._id)", "docstring": "report scores and give a winner\n\n|methcoro|\n\nArgs:\nwinner: :class:Participant instance\nscores_csv: Comma separated set/game scores with player 1 score first (e.g. \"1-3,3-0,3-2\")\n\nRaises:\nValueError: scores_csv has a wrong format\nAPIException", "source": "juraj-google-style"}
{"code": "def deserialize(segment):\n        \n        link_target = segment.link_data.link_target\n        return ChatMessageSegment(\n            segment.text, segment_type=segment.type,\n            is_bold=segment.formatting.bold,\n            is_italic=segment.formatting.italic,\n            is_strikethrough=segment.formatting.strikethrough,\n            is_underline=segment.formatting.underline,\n            link_target=None if link_target == '' else link_target\n        )", "docstring": "Construct :class:`ChatMessageSegment` from ``Segment`` message.\n\nArgs:\nsegment: ``Segment`` message to parse.\n\nReturns:\n:class:`ChatMessageSegment` object.", "source": "juraj-google-style"}
{"code": "def _begin(self, retry_id=None):\n        \n        if self.in_progress:\n            msg = _CANT_BEGIN.format(self._id)\n            raise ValueError(msg)\n\n        transaction_response = self._client._firestore_api.begin_transaction(\n            self._client._database_string,\n            options_=self._options_protobuf(retry_id),\n            metadata=self._client._rpc_metadata,\n        )\n        self._id = transaction_response.transaction", "docstring": "Begin the transaction.\n\nArgs:\nretry_id (Optional[bytes]): Transaction ID of a transaction to be\nretried.\n\nRaises:\nValueError: If the current transaction has already begun.", "source": "juraj-google-style"}
{"code": "def _AvgPoolAlongCols(self, input_matrix, col_seq, overlapping):\n    input_matrix = input_matrix.transpose()\n    output_matrix = self._AvgPoolAlongRows(input_matrix, col_seq, overlapping)\n    return output_matrix.transpose()", "docstring": "Perform average pool along column of a 2-D matrix based on col_seq.\n\nArgs:\ninput_matrix: A 2-D matrix.\ncol_seq: Cumulative pooling sequence along column.\noverlapping: Whether or not use overlapping when pooling.\n\nReturns:\nA 2-D matrix, with\n* num_rows = input_matrix.num_rows\n* num_cols = len(col_seq)-1.", "source": "github-repos"}
{"code": "def _SetExtractionParsersAndPlugins(self, configuration, session):\n    \n    names_generator = parsers_manager.ParsersManager.GetParserAndPluginNames(\n        parser_filter_expression=configuration.parser_filter_expression)\n\n    session.enabled_parser_names = list(names_generator)\n    session.parser_filter_expression = configuration.parser_filter_expression", "docstring": "Sets the parsers and plugins before extraction.\n\nArgs:\nconfiguration (ProcessingConfiguration): processing configuration.\nsession (Session): session.", "source": "juraj-google-style"}
{"code": "def rebuild(cls, session, tree_id=None):\n    trees = session.query(cls).filter_by(parent_id=None)\n    if tree_id:\n        trees = trees.filter_by(tree_id=tree_id)\n    for tree in trees:\n        cls.rebuild_tree(session, tree.tree_id)", "docstring": "This function rebuid tree.\n\nArgs:\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session\n\nKwargs:\ntree_id (int or str): id of tree, default None\n\nExample:\n\n* :mod:`sqlalchemy_mptt.tests.TestTree.test_rebuild`", "source": "codesearchnet"}
{"code": "def get_example_from_prop_spec(self, prop_spec, from_allof=False):\n        \n        \n        easy_keys = ['example', 'x-example', 'default']\n        for key in easy_keys:\n            if key in prop_spec.keys() and self.use_example:\n                return prop_spec[key]\n        \n        if 'enum' in prop_spec.keys():\n            return prop_spec['enum'][0]\n        \n        if '$ref' in prop_spec.keys():\n            return self._example_from_definition(prop_spec)\n        \n        if 'allOf' in prop_spec.keys():\n            return self._example_from_allof(prop_spec)\n        \n        if 'type' not in prop_spec:\n            return self._example_from_complex_def(prop_spec)\n        \n        if prop_spec['type'] == 'object':\n            example, additional_properties = self._get_example_from_properties(prop_spec)\n            if additional_properties or from_allof:\n                return example\n            return [example]\n        \n        if prop_spec['type'] == 'array' or (isinstance(prop_spec['type'], list) and prop_spec['type'][0] == 'array'):\n            return self._example_from_array_spec(prop_spec)\n        \n        if prop_spec['type'] == 'file':\n            return (StringIO('my file contents'), 'hello world.txt')\n        \n        if 'format' in prop_spec.keys() and prop_spec['format'] == 'date-time':\n            return self._get_example_from_basic_type('datetime')[0]\n        \n        if isinstance(prop_spec['type'], list):\n            return self._get_example_from_basic_type(prop_spec['type'][0])[0]\n\n        \n        logging.info(\"falling back to basic type, no other match found\")\n        return self._get_example_from_basic_type(prop_spec['type'])[0]", "docstring": "Return an example value from a property specification.\n\nArgs:\nprop_spec: the specification of the property.\nfrom_allof: whether these properties are part of an\nallOf section\n\nReturns:\nAn example value", "source": "juraj-google-style"}
{"code": "def __recognize_scalar(self, node: yaml.Node, expected_type: Type) -> RecResult:\n    logger.debug('Recognizing as a scalar')\n    if (isinstance(node, yaml.ScalarNode) and (node.tag == scalar_type_to_tag[expected_type])):\n        return ([expected_type], '')\n    message = 'Failed to recognize a {}\\n{}\\n'.format(type_to_desc(expected_type), node.start_mark)\n    return ([], message)", "docstring": "Recognize a node that we expect to be a scalar.\n\nArgs:\nnode: The node to recognize.\nexpected_type: The type it is expected to be.\n\nReturns:\nA list of recognized types and an error message", "source": "codesearchnet"}
{"code": "def to_qasm(self, header: Optional[str]=None, precision: int=10, qubit_order: ops.QubitOrderOrList=ops.QubitOrder.DEFAULT) -> str:\n    return str(self._to_qasm_output(header, precision, qubit_order))", "docstring": "Returns QASM equivalent to the circuit.\n\nArgs:\nheader: A multi-line string that is placed in a comment at the top\nof the QASM. Defaults to a cirq version specifier.\nprecision: Number of digits to use when representing numbers.\nqubit_order: Determines how qubits are ordered in the QASM\nregister.", "source": "codesearchnet"}
{"code": "def VisitUnionType(self, union):\n    intersection = self.hierarchy.ExpandSuperClasses(str(union.type_list[0]))\n    for t in union.type_list[1:]:\n        intersection.intersection_update(self.hierarchy.ExpandSuperClasses(str(t)))\n    new_type_list = tuple((pytd.NamedType(cls) for cls in intersection if not self.hierarchy.HasSubClassInSet(cls, intersection)))\n    if not new_type_list:\n        return union\n    return pytd_utils.JoinTypes(new_type_list)", "docstring": "Given a union type, try to find a simplification by using superclasses.\n\nThis is a lossy optimization that tries to map a list of types to a common\nbase type. For example, int and bool are both base classes of int, so it\nwould convert \"Union[int, bool]\" to \"int\".\n\nArguments:\nunion: A union type.\n\nReturns:\nA simplified type, if available.", "source": "github-repos"}
{"code": "def __init__(self, vendor_identification=None, attribute_name=None):\n        \n        super(AttributeReference, self).__init__(\n            tag=enums.Tags.ATTRIBUTE_REFERENCE\n        )\n\n        self._vendor_identification = None\n        self._attribute_name = None\n\n        self.vendor_identification = vendor_identification\n        self.attribute_name = attribute_name", "docstring": "Construct an AttributeReference structure.\n\nArgs:\nvendor_identification (string): A string identifying the vendor\nassociated with the attribute. Optional, defaults to None.\nRequired for read/write.\nattribute_name (string): A string containing the attribute name.\nOptional, defaults to None. Required for read/write.", "source": "juraj-google-style"}
{"code": "def launch(self, workflow):\n    try:\n        r = self.gbdx_connection.post(self.workflows_url, json=workflow)\n        try:\n            r.raise_for_status()\n        except:\n            print(('GBDX API Status Code: %s' % r.status_code))\n            print(('GBDX API Response: %s' % r.text))\n            r.raise_for_status()\n        workflow_id = r.json()['id']\n        return workflow_id\n    except TypeError:\n        self.logger.debug('Workflow not launched!')", "docstring": "Launches GBDX workflow.\n\nArgs:\nworkflow (dict): Dictionary specifying workflow tasks.\n\nReturns:\nWorkflow id (str).", "source": "codesearchnet"}
{"code": "def DisjoinCalendars(self, cutoff):\n\n    def TruncatePeriod(service_period, start, end):\n        'Truncate the service period to into the range [start, end].\\n\\n      Args:\\n        service_period: The service period to truncate.\\n        start: The start date as a string in YYYYMMDD format.\\n        end: The end date as a string in YYYYMMDD format.\\n      '\n        service_period.start_date = max(service_period.start_date, start)\n        service_period.end_date = min(service_period.end_date, end)\n        dates_to_delete = []\n        for k in service_period.date_exceptions:\n            if ((k < start) or (k > end)):\n                dates_to_delete.append(k)\n        for k in dates_to_delete:\n            del service_period.date_exceptions[k]\n    year = int(cutoff[:4])\n    month = int(cutoff[4:6])\n    day = int(cutoff[6:8])\n    cutoff_date = datetime.date(year, month, day)\n    one_day_delta = datetime.timedelta(days=1)\n    before = (cutoff_date - one_day_delta).strftime('%Y%m%d')\n    for a in self.feed_merger.a_schedule.GetServicePeriodList():\n        TruncatePeriod(a, 0, before)\n    for b in self.feed_merger.b_schedule.GetServicePeriodList():\n        TruncatePeriod(b, cutoff, ('9' * 8))", "docstring": "Forces the old and new calendars to be disjoint about a cutoff date.\n\nThis truncates the service periods of the old schedule so that service\nstops one day before the given cutoff date and truncates the new schedule\nso that service only begins on the cutoff date.\n\nArgs:\ncutoff: The cutoff date as a string in YYYYMMDD format. The timezone\nis the same as used in the calendar.txt file.", "source": "codesearchnet"}
{"code": "def present_weather_codes(self, value=None):\n    if (value is not None):\n        try:\n            value = int(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type int for field `present_weather_codes`'.format(value))\n    self._present_weather_codes = value", "docstring": "Corresponds to IDD Field `present_weather_codes`\n\nArgs:\nvalue (int): value for IDD Field `present_weather_codes`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def format_terminal_row(headers, example_row):\n    \n\n    def format_column(col):\n        if isinstance(col, str):\n            return '{{:{w}.{w}}}'\n        return '{{:<{w}}}'\n\n    widths = [max(len(h), len(str(d))) for h, d in zip(headers, example_row)]\n\n    \n    original_last_width = widths[-1]\n    if sys.stdout.isatty():\n        widths[-1] = max(\n            len(headers[-1]),\n            \n            tty.width() - sum(w + 2 for w in widths[0:-1]) - 3)\n\n    \n    cols = [format_column(c).format(w=w) for c, w in zip(example_row, widths)]\n    format_string = '  '.join(cols)\n    if original_last_width > widths[-1]:\n        format_string += '...'\n\n    return format_string", "docstring": "Uses headers and a row of example data to generate a format string\nfor printing a single row of data.\n\nArgs:\nheaders (tuple of strings): The headers for each column of data\nexample_row (tuple): A representative tuple of strings or ints\n\nReturns\nstring: A format string with a size for each column", "source": "juraj-google-style"}
{"code": "def _begin_connection_action(self, action):\n        \n\n        connection_id = action.data['connection_id']\n        internal_id = action.data['internal_id']\n        callback = action.data['callback']\n\n        \n        if self._get_connection_state(connection_id) != self.Disconnected:\n            callback(connection_id, self.id, False, 'Connection ID is already in use for another connection')\n            return\n\n        if self._get_connection_state(internal_id) != self.Disconnected:\n            callback(connection_id, self.id, False, 'Internal ID is already in use for another connection')\n            return\n\n        conn_data = {\n            'state': self.Connecting,\n            'microstate': None,\n            'connection_id': connection_id,\n            'internal_id': internal_id,\n            'action': action,\n            'context': action.data['context']\n        }\n\n        self._connections[connection_id] = conn_data\n        self._int_connections[internal_id] = conn_data", "docstring": "Begin a connection attempt\n\nArgs:\naction (ConnectionAction): the action object describing what we are\nconnecting to", "source": "juraj-google-style"}
{"code": "def make(target='all', dir='.', **kwargs):\n    if (not fs.isfile(fs.path(dir, 'Makefile'))):\n        raise NoMakefileError(\"No makefile in '{}'\".format(fs.abspath(dir)))\n    fs.cd(dir)\n    if ('timeout' not in kwargs):\n        kwargs['timeout'] = 300\n    (ret, out, err) = system.run(['make', target], **kwargs)\n    fs.cdpop()\n    if (ret > 0):\n        if re.search(_BAD_TARGET_RE, err):\n            raise NoTargetError(\"No rule for target '{}'\".format(target))\n        else:\n            raise MakeError(\"Target '{}' failed\".format(target))\n        raise MakeError('Failed')\n    return (ret, out, err)", "docstring": "Run make.\n\nArguments:\n\ntarget (str, optional): Name of the target to build. Defaults\nto \"all\".\ndir (str, optional): Path to directory containing Makefile.\n**kwargs (optional): Any additional arguments to be passed to\nsystem.run().\n\nReturns:\n\n(int, str, str): The first element is the return code of the\nmake command. The second and third elements are the stdout\nand stderr of the process.\n\nRaises:\n\nNoMakefileError: In case a Makefile is not found in the target\ndirectory.\nNoTargetError: In case the Makefile does not support the\nrequested target.\nMakeError: In case the target rule fails.", "source": "codesearchnet"}
{"code": "def __init__(self, callback, *args, interval=5):\n        \n\n        self.interval = interval\n        self.cb_args = args\n        self.callback = callback\n        self._wake_up_time = time.time() + 1\n\n        self._kill_event = threading.Event()\n        self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,))\n        self._thread.daemon = True\n        self._thread.start()", "docstring": "Initialize the flowcontrol object\nWe start the timer thread here\n\nArgs:\n- dfk (DataFlowKernel) : DFK object to track parsl progress\n\nKWargs:\n- threshold (int) : Tasks after which the callback is triggered\n- interval (int) : seconds after which timer expires", "source": "juraj-google-style"}
{"code": "def least_squares_effective_mass(cartesian_k_points, eigenvalues):\n    if (not points_are_in_a_straight_line(cartesian_k_points)):\n        raise ValueError('k-points are not collinear')\n    dk = (cartesian_k_points - cartesian_k_points[0])\n    mod_dk = np.linalg.norm(dk, axis=1)\n    delta_e = (eigenvalues - eigenvalues[0])\n    effective_mass = (1.0 / ((np.polyfit(mod_dk, eigenvalues, 2)[0] * ev_to_hartree) * 2.0))\n    return effective_mass", "docstring": "Calculate the effective mass using a least squares quadratic fit.\n\nArgs:\ncartesian_k_points (np.array): Cartesian reciprocal coordinates for the k-points\neigenvalues (np.array):        Energy eigenvalues at each k-point to be used in the fit.\n\nReturns:\n(float): The fitted effective mass\n\nNotes:\nIf the k-points do not sit on a straight line a ValueError will be raised.", "source": "codesearchnet"}
{"code": "def _broadcast_dynamic_shape_one_layer(a, b):\n    a_0 = a[0]\n    b_0 = b[0]\n\n    def broadcast_from_a():\n        a_layer = array_ops.zeros(b_0, dtype=b_0.dtype)\n        b_layer = math_ops.range(b_0)\n        target = b\n        return [a_layer, b_layer, target]\n    a_static = tensor_util.constant_value(a)\n    if a_static is not None and a_static[0] == 1:\n        [a_gi, b_gi, target] = broadcast_from_a()\n        a_layer = _LayerBroadcaster.from_gather_index(a_gi)\n        b_layer = _LayerBroadcaster.from_gather_index(b_gi)\n        return [a_layer, b_layer, target]\n\n    def broadcast_from_b():\n        a_layer = math_ops.range(a_0)\n        b_layer = array_ops.zeros(a_0, dtype=a_0.dtype)\n        target = a\n        return [a_layer, b_layer, target]\n    b_static = tensor_util.constant_value(b)\n    if b_static is not None and b_static[0] == 1:\n        [a_gi, b_gi, target] = broadcast_from_b()\n        a_layer = _LayerBroadcaster.from_gather_index(a_gi)\n        b_layer = _LayerBroadcaster.from_gather_index(b_gi)\n        return [a_layer, b_layer, target]\n\n    def broadcast_noop():\n        a_layer = math_ops.range(a_0)\n        b_layer = math_ops.range(b_0)\n        target = b\n        return [a_layer, b_layer, target]\n    can_broadcast_from_a = math_ops.equal(a_0, 1)\n    can_broadcast_from_b = math_ops.equal(b_0, 1)\n\n    def broadcast_not_from_a():\n        return cond.cond(can_broadcast_from_b, true_fn=broadcast_from_b, false_fn=broadcast_noop)\n    nrows_equal = math_ops.equal(a_0, b_0)\n    can_broadcast = math_ops.logical_or(can_broadcast_from_a, math_ops.logical_or(can_broadcast_from_b, nrows_equal))\n    check_can_broadcast = check_ops.assert_equal(can_broadcast, True, message='Cannot broadcast')\n    results = cond.cond(can_broadcast_from_a, true_fn=broadcast_from_a, false_fn=broadcast_not_from_a)\n    results = [control_flow_ops.with_dependencies([check_can_broadcast], x) for x in results]\n    [a_gi, b_gi, target] = results\n    a_layer = _LayerBroadcaster.from_gather_index(a_gi)\n    b_layer = _LayerBroadcaster.from_gather_index(b_gi)\n    return [a_layer, b_layer, target]", "docstring": "Broadcast two vectors, given their shapes.\n\nArgs:\na: the number of rows in a.\nb: the number of rows in b.\n\nReturns:\n(layer_a, layer_b, target_shape)\nlayer_a is a _LayerBroadcaster from a to the target_shape.\nlayer_b is a _LayerBroadcaster from b to the target_shape.\ntarget_shape is the target_shape\n\nRaises:\nInvalidArgumentError if the shapes are not consistent.", "source": "github-repos"}
{"code": "def exchange(self, pubkey):\n    try:\n        return self.priv.exchange(c_ec.ECDH(), pubkey.publ)\n    except ValueError as e:\n        raise s_exc.BadEccExchange(mesg=str(e))", "docstring": "Perform a ECDH key exchange with a public key.\n\nArgs:\npubkey (PubKey): A PubKey to perform the ECDH with.\n\nReturns:\nbytes: The ECDH bytes. This is deterministic for a given pubkey\nand private key.", "source": "codesearchnet"}
{"code": "def __init__(self, learning_rate, global_step, initial_gradient_squared_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0, use_locking=False, name='AdagradDA'):\n    if initial_gradient_squared_accumulator_value <= 0.0:\n        raise ValueError('initial_gradient_squared_accumulator_value must be positive: %s' % initial_gradient_squared_accumulator_value)\n    super(AdagradDAOptimizer, self).__init__(use_locking, name)\n    self._learning_rate = learning_rate\n    self._initial_gradient_squared_accumulator_value = initial_gradient_squared_accumulator_value\n    self._learning_rate_tensor = None\n    self._l1_regularization_strength = l1_regularization_strength\n    self._l2_regularization_strength = l2_regularization_strength\n    self._global_step = global_step\n    self._global_step_on_worker = None", "docstring": "Construct a new AdagradDA optimizer.\n\nArgs:\nlearning_rate: A `Tensor` or a floating point value.  The learning rate.\nglobal_step: A `Tensor` containing the current training step number.\ninitial_gradient_squared_accumulator_value: A floating point value.\nStarting value for the accumulators, must be positive.\nl1_regularization_strength: A float value, must be greater than or\nequal to zero.\nl2_regularization_strength: A float value, must be greater than or\nequal to zero.\nuse_locking: If `True` use locks for update operations.\nname: Optional name prefix for the operations created when applying\ngradients.  Defaults to \"AdagradDA\".\n\nRaises:\nValueError: If the `initial_gradient_squared_accumulator_value` is\ninvalid.", "source": "github-repos"}
{"code": "def send_cmd(cmd, args, ret):\n        \n        from dvc.daemon import daemon\n\n        if not Analytics._is_enabled(cmd):\n            return\n\n        analytics = Analytics()\n        analytics.collect_cmd(args, ret)\n        daemon([\"analytics\", analytics.dump()])", "docstring": "Collect and send analytics for CLI command.\n\nArgs:\nargs (list): parsed args for the CLI command.\nret (int): return value of the CLI command.", "source": "juraj-google-style"}
{"code": "def get_signature(self, base_commit=None):\n    if (base_commit is None):\n        base_commit = 'HEAD'\n    self.run('add', '-A', self.path)\n    sha = self.run('rev-parse', '--verify', base_commit).strip()\n    diff = self.run('diff', sha).strip()\n    if (len(diff) == 0):\n        try:\n            return self.get_signature((base_commit + '~1'))\n        except CommandError:\n            pass\n    h = hashlib.sha1()\n    h.update(sha)\n    h.update(diff)\n    return h.hexdigest()", "docstring": "Get the signature of the current state of the repository\n\nTODO right now `get_signature` is an effectful process in that\nit adds all untracked file to staging. This is the only way to get\naccruate diff on new files. This is ok because we only use it on a\ndisposable copy of the repo.\n\nArgs:\nbase_commit - the base commit ('HEAD', sha, etc.)\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def _execute_primitives(self, commands):\n        \n        for p in commands:\n            if self._scanchain and self._scanchain._debug:\n                print(\"  Executing\", p)\n            p.execute(self)", "docstring": "Run a list of executable primitives on this controller, and distribute the returned data to the associated TDOPromises.\n\nArgs:\ncommands: A list of Executable Primitives to be run in order.", "source": "juraj-google-style"}
{"code": "def _multiplex(self, target_gate, list_of_angles):\n    list_len = len(list_of_angles)\n    local_num_qubits = (int(math.log2(list_len)) + 1)\n    q = QuantumRegister(local_num_qubits)\n    circuit = QuantumCircuit(q, name=('multiplex' + local_num_qubits.__str__()))\n    lsb = q[0]\n    msb = q[(local_num_qubits - 1)]\n    if (local_num_qubits == 1):\n        circuit.append(target_gate(list_of_angles[0]), [q[0]])\n        return circuit\n    angle_weight = scipy.kron([[0.5, 0.5], [0.5, (- 0.5)]], np.identity((2 ** (local_num_qubits - 2))))\n    list_of_angles = angle_weight.dot(np.array(list_of_angles)).tolist()\n    multiplex_1 = self._multiplex(target_gate, list_of_angles[0:(list_len \n    circuit.append(multiplex_1.to_instruction(), q[0:(- 1)])\n    circuit.append(CnotGate(), [msb, lsb])\n    multiplex_2 = self._multiplex(target_gate, list_of_angles[(list_len \n    if (list_len > 1):\n        circuit.append(multiplex_2.to_instruction().mirror(), q[0:(- 1)])\n    else:\n        circuit.append(multiplex_2.to_instruction(), q[0:(- 1)])\n    circuit.append(CnotGate(), [msb, lsb])\n    return circuit", "docstring": "Return a recursive implementation of a multiplexor circuit,\nwhere each instruction itself has a decomposition based on\nsmaller multiplexors.\n\nThe LSB is the multiplexor \"data\" and the other bits are multiplexor \"select\".\n\nArgs:\ntarget_gate (Gate): Ry or Rz gate to apply to target qubit, multiplexed\nover all other \"select\" qubits\nlist_of_angles (list[float]): list of rotation angles to apply Ry and Rz\n\nReturns:\nDAGCircuit: the circuit implementing the multiplexor's action", "source": "codesearchnet"}
{"code": "def dtype_checker_df(df, dtype, return_=None):\n    dtype_range = dtype_ranges[dtype]\n    df_out_of_range = (((df < dtype_range[0]) | (df > dtype_range[1])) | (~ np.isfinite(df)))\n    if df_out_of_range.any().any():\n        if (return_ == 'colsums'):\n            df_out_of_range = df_out_of_range.apply(sum, axis=0)\n        elif (return_ == 'rowsums'):\n            df_out_of_range = df_out_of_range.apply(sum, axis=1)\n        elif (return_ == 'all'):\n            df_out_of_range = df_out_of_range\n        else:\n            df_out_of_range = 1\n    else:\n        df_out_of_range = 0\n    return df_out_of_range", "docstring": "Check if there are NaN values of values outside of a given datatype range.\n\nArguments:\ndf {dataframe} -- A dataframe.\ndtype {str} -- The datatype to check for.\nKeyword Arguments:\nreturn_ {str} -- Returns a boolean dataframe with the values not in the range of the dtype ('all'),\nthe row ('rowsums') or column ('colsums') sums of that dataframe or an exit code 1 (None, default)\nif any of the values is not in the range.\n\nReturns:\n[int or DataFrame or Series] -- If no value is out of the range exit code 0 is returned, else depends on return_.", "source": "codesearchnet"}
{"code": "def cancel_signature_request(self, signature_request_id):\n        \n        request = self._get_request()\n        request.post(url=self.SIGNATURE_REQUEST_CANCEL_URL + signature_request_id, get_json=False)", "docstring": "Cancels a SignatureRequest\n\nCancels a SignatureRequest. After canceling, no one will be able to sign\nor access the SignatureRequest or its documents. Only the requester can\ncancel and only before everyone has signed.\n\nArgs:\n\nsigning_request_id (str): The id of the signature request to cancel\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _render_timestep(self, t: int, s: Fluents, a: Fluents, f: Fluents, r: np.float32) -> None:\n    print('============================')\n    print('TIME = {}'.format(t))\n    print('============================')\n    fluent_variables = self._compiler.rddl.action_fluent_variables\n    self._render_fluent_timestep('action', a, fluent_variables)\n    fluent_variables = self._compiler.rddl.interm_fluent_variables\n    self._render_fluent_timestep('interms', f, fluent_variables)\n    fluent_variables = self._compiler.rddl.state_fluent_variables\n    self._render_fluent_timestep('states', s, fluent_variables)\n    self._render_reward(r)", "docstring": "Prints fluents and rewards for the given timestep `t`.\n\nArgs:\nt (int): timestep\ns (Sequence[Tuple[str], np.array]: State fluents.\na (Sequence[Tuple[str], np.array]: Action fluents.\nf (Sequence[Tuple[str], np.array]: Interm state fluents.\nr (np.float32): Reward.", "source": "codesearchnet"}
{"code": "def score_prediction_adapter(keyed_prediction: tuple[KeyT, PredictionResult]) -> tuple[KeyT, AnomalyPrediction]:\n    key, prediction = keyed_prediction\n    score = prediction.inference\n    assert isinstance(score, SupportsFloat)\n    return (key, AnomalyPrediction(score=float(score)))", "docstring": "Extracts a float score from `PredictionResult.inference` and wraps it.\n\nTakes a keyed `PredictionResult` from common ModelHandler output, assumes\nits `inference` attribute is a float-convertible score, and returns the key\npaired with an `AnomalyPrediction` containing that float score.\n\nArgs:\nkeyed_prediction: tuple of `(key, PredictionResult)`. `PredictionResult`\nmust have an `inference` attribute supporting float conversion.\n\nReturns:\ntuple of `(key, AnomalyPrediction)` with the extracted score.\n\nRaises:\nAssertionError: If `PredictionResult.inference` doesn't support float().", "source": "github-repos"}
{"code": "def optional(self, value = None):\n\t\t\n\n\t\t\n\t\tif value is None:\n\t\t\treturn this._optional\n\n\t\t\n\t\telse:\n\t\t\tthis._optional = value and True or False", "docstring": "Optional\n\nGetter/Setter method for optional flag\n\nArgs:\nvalue (bool): If set, the method is a setter\n\nReturns:\nbool | None", "source": "juraj-google-style"}
{"code": "def unload(self, keepables=None):\n        \n        to_del = [ds_id for ds_id, projectable in self.datasets.items()\n                  if ds_id not in self.wishlist and (not keepables or ds_id\n                                                     not in keepables)]\n        for ds_id in to_del:\n            LOG.debug(\"Unloading dataset: %r\", ds_id)\n            del self.datasets[ds_id]", "docstring": "Unload all unneeded datasets.\n\nDatasets are considered unneeded if they weren't directly requested\nor added to the Scene by the user or they are no longer needed to\ngenerate composites that have yet to be generated.\n\nArgs:\nkeepables (iterable): DatasetIDs to keep whether they are needed\nor not.", "source": "juraj-google-style"}
{"code": "def __getitem__(self, index):\n    rank = self.rank\n    if isinstance(index, slice):\n        if index.step is not None and index.step != 1:\n            raise IndexError('Cannot stride through a shape')\n        start = index.start\n        stop = index.stop\n        if start is None:\n            start = 0\n        start = _fix_start_index(start, rank, self.num_row_partitions)\n        stop = _fix_stop_index(stop, rank)\n        return self._slice_shape(start, stop)\n    elif isinstance(index, int):\n        if index < 0:\n            if rank is None:\n                raise ValueError('Rank must be known to use __getitem__ with a negative index.')\n            return self._dimension(rank + index)\n        return self._dimension(index)\n    else:\n        raise TypeError('Argument is not an int or a slice')", "docstring": "Returns a dimension or a slice of the shape.\n\nRagged shapes can have ragged dimensions that depend upon other dimensions.\nTherefore, if you ask for a dimension that is ragged, this function returns\na ValueError. For similar reasons, if a slice is selected that includes\na ragged dimension without including the zero dimension, then this fails.\n\nAny slice that does not start at zero will return a shape\nwith num_row_partitions == 0.\n\nArgs:\nindex: the index: can be an int or a slice.\n\nRaises:\nIndexError: if the index is not in range.\nValueError: if the rank is unknown, or a ragged rank is requested\nincorrectly.", "source": "github-repos"}
{"code": "def get_access_token_from_cli():\n    if (('ACC_CLOUD' in os.environ) and ('MSI_ENDPOINT' in os.environ)):\n        endpoint = os.environ['MSI_ENDPOINT']\n        headers = {'Metadata': 'true'}\n        body = {'resource': 'https:\n        ret = requests.post(endpoint, headers=headers, data=body)\n        return ret.json()['access_token']\n    else:\n        home = os.path.expanduser('~')\n        sub_username = ''\n        azure_profile_path = ((((home + os.sep) + '.azure') + os.sep) + 'azureProfile.json')\n        if (os.path.isfile(azure_profile_path) is False):\n            print(('Error from get_access_token_from_cli(): Cannot find ' + azure_profile_path))\n            return None\n        with codecs.open(azure_profile_path, 'r', 'utf-8-sig') as azure_profile_fd:\n            subs = json.load(azure_profile_fd)\n        for sub in subs['subscriptions']:\n            if (sub['isDefault'] == True):\n                sub_username = sub['user']['name']\n        if (sub_username == ''):\n            print(('Error from get_access_token_from_cli(): Default subscription not found in ' + azure_profile_path))\n            return None\n        access_keys_path = ((((home + os.sep) + '.azure') + os.sep) + 'accessTokens.json')\n        if (os.path.isfile(access_keys_path) is False):\n            print(('Error from get_access_token_from_cli(): Cannot find ' + access_keys_path))\n            return None\n        with open(access_keys_path, 'r') as access_keys_fd:\n            keys = json.load(access_keys_fd)\n        for key in keys:\n            if (key['userId'] == sub_username):\n                if ('accessToken' not in keys[0]):\n                    print(('Error from get_access_token_from_cli(): accessToken not found in ' + access_keys_path))\n                    return None\n                if ('tokenType' not in keys[0]):\n                    print(('Error from get_access_token_from_cli(): tokenType not found in ' + access_keys_path))\n                    return None\n                if ('expiresOn' not in keys[0]):\n                    print(('Error from get_access_token_from_cli(): expiresOn not found in ' + access_keys_path))\n                    return None\n                expiry_date_str = key['expiresOn']\n                if ('T' in expiry_date_str):\n                    exp_date = dt.strptime(key['expiresOn'], '%Y-%m-%dT%H:%M:%S.%fZ')\n                else:\n                    exp_date = dt.strptime(key['expiresOn'], '%Y-%m-%d %H:%M:%S.%f')\n                if (exp_date < dt.now()):\n                    continue\n                else:\n                    return key['accessToken']\n        print(\"Error from get_access_token_from_cli(): token expired. Run 'az login'\")\n        return None", "docstring": "Get an Azure authentication token from CLI's cache.\n\nWill only work if CLI local cache has an unexpired auth token (i.e. you ran 'az login'\nrecently), or if you are running in Azure Cloud Shell (aka cloud console)\n\nReturns:\nAn Azure authentication token string.", "source": "codesearchnet"}
{"code": "def add_argument(self, parser, bootstrap=False):\n    if self.cli_expose:\n        for child in self.children.values():\n            child.add_argument(parser, bootstrap)", "docstring": "Add dict-style item as an argument to the given parser.\n\nThe dict item will take all the nested items in the dictionary and\nnamespace them with the dict name, adding each child item as\ntheir own CLI argument.\n\nExamples:\nA non-nested dict item with the name 'db' and children named\n'port' and 'host' will result in the following being valid\nCLI args:\n\n['--db-host', 'localhost', '--db-port', '1234']\n\nArgs:\nparser (argparse.ArgumentParser): The parser to add this item to.\nbootstrap (bool): Flag to indicate whether you only want to mark\nthis item as required or not.", "source": "codesearchnet"}
{"code": "def _pypi_push(dist):\n    \n    \n    \n    \n    for filename in os.listdir(dist):\n        full_path = os.path.join(dist, filename)\n        if os.path.isfile(full_path):\n            \n            _shell('twine register ' + shlex.quote(full_path), check=False)\n\n    _shell('twine upload ' + shlex.quote(dist + '/*'))", "docstring": "Push created package to PyPI.\n\nRequires the following defined environment variables:\n- TWINE_USERNAME: The PyPI username to upload this package under\n- TWINE_PASSWORD: The password to the user's account\n\nArgs:\ndist (str):\nThe distribution to push. Must be a valid directory; shell globs are\nNOT allowed.", "source": "juraj-google-style"}
{"code": "def list_registered_stateful_ops_without_inputs():\n    return set([name for (name, op) in op_def_registry.get_registered_ops().items() if (op.is_stateful and (not op.input_arg))])", "docstring": "Returns set of registered stateful ops that do not expect inputs.\n\nThis list is used to identify the ops to be included in the state-graph and\nthat are subsequently fed into the apply-graphs.\n\nReturns:\nA set of strings.", "source": "codesearchnet"}
{"code": "def test_src_dir_path(relative_path):\n    return _googletest.test_src_dir_path(relative_path)", "docstring": "Creates an absolute test srcdir path given a relative path.\n\nArgs:\nrelative_path: a path relative to tensorflow root.\ne.g. \"core/platform\".\n\nReturns:\nAn absolute path to the linked in runfiles.", "source": "github-repos"}
{"code": "def _get_parameter_info(param_name, documented_params, source_args_dict, param_type, optional):\n    description = None\n    shape = None\n    shape_string = ''\n    is_documented = True\n    additional_info = None\n    if param_name in documented_params:\n        if param_type == '' and documented_params[param_name].get('type', None) is not None:\n            param_type = documented_params[param_name]['type']\n        optional = documented_params[param_name]['optional']\n        shape = documented_params[param_name]['shape']\n        shape_string = shape if shape else ''\n        additional_info = documented_params[param_name]['additional_info'] or ''\n        description = f'{documented_params[param_name]['description']}\\n'\n    elif param_name in source_args_dict:\n        shape = source_args_dict[param_name]['shape']\n        shape_string = ' ' + shape if shape else ''\n        description = source_args_dict[param_name]['description']\n        additional_info = None\n    else:\n        is_documented = False\n    optional_string = ', *optional*' if optional else ''\n    return (param_type, optional_string, shape_string, additional_info, description, is_documented)", "docstring": "Get parameter documentation details from the appropriate source.\nTensor shape, optional status and description are taken from the custom docstring in priority if available.\nType is taken from the function signature first, then from the custom docstring if missing from the signature\n\nArgs:\nparam_name (`str`): Name of the parameter\ndocumented_params (`dict`): Dictionary of documented parameters (manually specified in the docstring)\nsource_args_dict (`dict`): Default source args dictionary to use if not in documented_params\nparam_type (`str`): Current parameter type (may be updated)\noptional (`bool`): Whether the parameter is optional (may be updated)", "source": "github-repos"}
{"code": "def _padding_value_to_tensor(value, output_type):\n    value = ops.convert_to_tensor(value, name='padding_value')\n    if not value.shape.is_compatible_with(tensor_shape.TensorShape([])):\n        raise ValueError(f'Invalid `padding_values`. `padding_values` values should be scalars, but got {value.shape}.')\n    if value.dtype != output_type:\n        raise TypeError(f'Invalid `padding_values`. `padding_values` values type {value.dtype} does not match type {output_type} of the corresponding input component.')\n    return value", "docstring": "Converts the padding value to a tensor.\n\nArgs:\nvalue: The padding value.\noutput_type: Its expected dtype.\n\nReturns:\nA scalar `Tensor`.\n\nRaises:\nValueError: if the padding value is not a scalar.\nTypeError: if the padding value's type does not match `output_type`.", "source": "github-repos"}
{"code": "def assert_text(self, *args, **kwargs):\n        \n\n        query = TextQuery(*args, **kwargs)\n\n        @self.synchronize(wait=query.wait)\n        def assert_text():\n            count = query.resolve_for(self)\n\n            if not (matches_count(count, query.options) and\n                    (count > 0 or expects_none(query.options))):\n                raise ExpectationNotMet(query.failure_message)\n\n            return True\n\n        return assert_text()", "docstring": "Asserts that the page or current node has the given text content, ignoring any HTML tags.\n\nArgs:\n*args: Variable length argument list for :class:`TextQuery`.\n**kwargs: Arbitrary keyword arguments for :class:`TextQuery`.\n\nReturns:\nTrue\n\nRaises:\nExpectationNotMet: If the assertion hasn't succeeded during the wait time.", "source": "juraj-google-style"}
{"code": "def __init__(self, inputs=[], outputs=[], attributes=[], scripts=[]):\n        \n        super(Transaction, self).__init__()\n        self.inputs = inputs\n        self.outputs = outputs\n        self.Attributes = attributes\n        self.scripts = scripts\n        self.InventoryType = 0x01  \n        self.__references = None", "docstring": "Create an instance.\nArgs:\ninputs (list): of neo.Core.CoinReference.CoinReference.\noutputs (list): of neo.Core.TX.Transaction.TransactionOutput items.\nattributes (list): of neo.Core.TX.TransactionAttribute.\nscripts:", "source": "juraj-google-style"}
{"code": "def equal_distribution_folds(y, folds=2):\n    \n    n, classes = y.shape\n\n    \n    dist = y.sum(axis=0).astype('float')\n    dist /= dist.sum()\n\n    index_list = []\n    fold_dist = np.zeros((folds, classes), dtype='float')\n    for _ in range(folds):\n        index_list.append([])\n    for i in range(n):\n        if i < folds:\n            target_fold = i\n        else:\n            normed_folds = fold_dist.T / fold_dist.sum(axis=1)\n            how_off = normed_folds.T - dist\n            target_fold = np.argmin(\n                np.dot((y[i] - .5).reshape(1, -1), how_off.T))\n        fold_dist[target_fold] += y[i]\n        index_list[target_fold].append(i)\n\n    logger.debug(\"Fold distributions:\")\n    logger.debug(fold_dist)\n    return index_list", "docstring": "Creates `folds` number of indices that has roughly balanced multi-label distribution.\n\nArgs:\ny: The multi-label outputs.\nfolds: The number of folds to create.\n\nReturns:\n`folds` number of indices that have roughly equal multi-label distributions.", "source": "juraj-google-style"}
{"code": "def __init__(self,consumer_key,consumer_secret,access_token=None):\n        \n        self.consumer = oauth.Consumer(consumer_key, consumer_secret)\n\n        \n        if access_token:\n            self.setAccessToken(access_token)", "docstring": "Initializes the splitwise class. Sets consumer and access token\n\nArgs:\nconsumer_key (str) : Consumer Key provided by Spliwise\nconsumer_secret (str): Consumer Secret provided by Splitwise\naccess_token (:obj: `dict`) Access Token is a combination of oauth_token and oauth_token_secret\n\nReturns:\nA Splitwise Object", "source": "juraj-google-style"}
{"code": "def UpdateNumberOfEventTags(\n      self, number_of_consumed_event_tags, number_of_produced_event_tags):\n    \n    consumed_event_tags_delta = 0\n    if number_of_consumed_event_tags is not None:\n      if number_of_consumed_event_tags < self.number_of_consumed_event_tags:\n        raise ValueError(\n            'Number of consumed event tags smaller than previous update.')\n\n      consumed_event_tags_delta = (\n          number_of_consumed_event_tags - self.number_of_consumed_event_tags)\n\n      self.number_of_consumed_event_tags = number_of_consumed_event_tags\n      self.number_of_consumed_event_tags_delta = consumed_event_tags_delta\n\n    produced_event_tags_delta = 0\n    if number_of_produced_event_tags is not None:\n      if number_of_produced_event_tags < self.number_of_produced_event_tags:\n        raise ValueError(\n            'Number of produced event tags smaller than previous update.')\n\n      produced_event_tags_delta = (\n          number_of_produced_event_tags - self.number_of_produced_event_tags)\n\n      self.number_of_produced_event_tags = number_of_produced_event_tags\n      self.number_of_produced_event_tags_delta = produced_event_tags_delta\n\n    return consumed_event_tags_delta > 0 or produced_event_tags_delta > 0", "docstring": "Updates the number of event tags.\n\nArgs:\nnumber_of_consumed_event_tags (int): total number of event tags consumed\nby the process.\nnumber_of_produced_event_tags (int): total number of event tags produced\nby the process.\n\nReturns:\nbool: True if either number of event tags has increased.\n\nRaises:\nValueError: if the consumed or produced number of event tags is smaller\nthan the value of the previous update.", "source": "juraj-google-style"}
{"code": "def get_plot_frame(map_obj, key_map, cached=False):\n    \n    if map_obj.kdims and len(map_obj.kdims) == 1 and map_obj.kdims[0] == 'Frame':\n        \n        return map_obj.last\n    key = tuple(key_map[kd.name] for kd in map_obj.kdims if kd.name in key_map)\n    if key in map_obj.data and cached:\n        return map_obj.data[key]\n    else:\n        try:\n            return map_obj[key]\n        except KeyError:\n            return None\n        except StopIteration as e:\n            raise e\n        except Exception:\n            print(traceback.format_exc())\n            return None", "docstring": "Returns the current frame in a mapping given a key mapping.\n\nArgs:\nobj: Nested Dimensioned object\nkey_map: Dictionary mapping between dimensions and key value\ncached: Whether to allow looking up key in cache\n\nReturns:\nThe item in the mapping corresponding to the supplied key.", "source": "juraj-google-style"}
{"code": "def get(self, node_id):\n        \n        return self.prepare_model(self.client.api.inspect_node(node_id))", "docstring": "Get a node.\n\nArgs:\nnode_id (string): ID of the node to be inspected.\n\nReturns:\nA :py:class:`Node` object.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def create_template(self, s, provider_name=None):\n        \n        if provider_name is None:\n            provider_name = self.supported_providers[0]\n        return template_exception_handler(\n            lambda: self.get_provider(provider_name).create_template(s),\n            self.error_context\n        )", "docstring": "Creates a template from the given string based on the specified provider or the provider with\nhighest precedence.\n\nArgs:\ns: The string to convert to a template.\nprovider_name: The name of the provider to use to create the template.", "source": "juraj-google-style"}
{"code": "def create_file(self, filename):\n    self.response.write(('Creating file %s\\n' % filename))\n    write_retry_params = gcs.RetryParams(backoff_factor=1.1)\n    gcs_file = gcs.open(filename, 'w', content_type='text/plain', options={'x-goog-meta-foo': 'foo', 'x-goog-meta-bar': 'bar'}, retry_params=write_retry_params)\n    gcs_file.write('abcde\\n')\n    gcs_file.write(((('f' * 1024) * 4) + '\\n'))\n    gcs_file.close()\n    self.tmp_filenames_to_clean_up.append(filename)", "docstring": "Create a file.\n\nThe retry_params specified in the open call will override the default\nretry params for this particular file handle.\n\nArgs:\nfilename: filename.", "source": "codesearchnet"}
{"code": "def diff_lineMode(self, text1, text2, deadline):\n    (text1, text2, linearray) = self.diff_linesToChars(text1, text2)\n    diffs = self.diff_main(text1, text2, False, deadline)\n    self.diff_charsToLines(diffs, linearray)\n    self.diff_cleanupSemantic(diffs)\n    diffs.append((self.DIFF_EQUAL, ''))\n    pointer = 0\n    count_delete = 0\n    count_insert = 0\n    text_delete = ''\n    text_insert = ''\n    while (pointer < len(diffs)):\n        if (diffs[pointer][0] == self.DIFF_INSERT):\n            count_insert += 1\n            text_insert += diffs[pointer][1]\n        elif (diffs[pointer][0] == self.DIFF_DELETE):\n            count_delete += 1\n            text_delete += diffs[pointer][1]\n        elif (diffs[pointer][0] == self.DIFF_EQUAL):\n            if ((count_delete >= 1) and (count_insert >= 1)):\n                subDiff = self.diff_main(text_delete, text_insert, False, deadline)\n                diffs[((pointer - count_delete) - count_insert):pointer] = subDiff\n                pointer = (((pointer - count_delete) - count_insert) + len(subDiff))\n            count_insert = 0\n            count_delete = 0\n            text_delete = ''\n            text_insert = ''\n        pointer += 1\n    diffs.pop()\n    return diffs", "docstring": "Do a quick line-level diff on both strings, then rediff the parts for\ngreater accuracy.\nThis speedup can produce non-minimal diffs.\n\nArgs:\ntext1: Old string to be diffed.\ntext2: New string to be diffed.\ndeadline: Time when the diff should be complete by.\n\nReturns:\nArray of changes.", "source": "codesearchnet"}
{"code": "def _transform_filter_to_sql(filter_block, node, context):\n    \n    expression = filter_block.predicate\n    return _expression_to_sql(expression, node, context)", "docstring": "Transform a Filter block to its corresponding SQLAlchemy expression.\n\nArgs:\nfilter_block: Filter, the Filter block to transform.\nnode: SqlNode, the node Filter block applies to.\ncontext: CompilationContext, global compilation state and metadata.\n\nReturns:\nExpression, SQLAlchemy expression equivalent to the Filter.predicate expression.", "source": "juraj-google-style"}
{"code": "def __init__(self, scope, parent):\n        \n        CodeStatement.__init__(self, scope, parent)\n        self.variables = []", "docstring": "Constructor for declaration statements.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.", "source": "juraj-google-style"}
{"code": "def _AddForwardedIps(self, forwarded_ips, interface):\n    for address in forwarded_ips:\n        self.ip_forwarding_utils.AddForwardedIp(address, interface)", "docstring": "Configure the forwarded IP address on the network interface.\n\nArgs:\nforwarded_ips: list, the forwarded IP address strings to configure.\ninterface: string, the output device to use.", "source": "codesearchnet"}
{"code": "def convert_error(exc_src, exc_dest):\n\n    def wrap(func):\n\n        @wraps(func)\n        def wrapper(*args, **kwargs):\n            try:\n                return func(*args, **kwargs)\n            except exc_dest:\n                raise\n            except exc_src as err:\n                reraise(exc_dest, err, sys.exc_info()[2])\n        return wrapper\n    return wrap", "docstring": "A decorator for reraising exceptions with a different type.\nMostly useful for IOError.\n\nArgs:\nexc_src (type): The source exception type\nexc_dest (type): The target exception type.", "source": "codesearchnet"}
{"code": "def _on_write_request(self, request):\n        \n        if request['connection_handle'] != self._connection_handle:\n            return False\n\n        attribute_handle = request['attribute_handle']\n\n        \n        config_handles = [\n            ReceiveHeaderChar.config_handle,\n            ReceivePayloadChar.config_handle,\n            StreamingChar.config_handle,\n            TracingChar.config_handle\n        ]\n        if attribute_handle in config_handles:\n            notification_enabled, _ = struct.unpack('<BB', request['value'])\n\n            \n            if attribute_handle in [ReceiveHeaderChar.config_handle, ReceivePayloadChar.config_handle] and notification_enabled:\n                if attribute_handle == ReceiveHeaderChar.config_handle:\n                    self.header_notif = True\n                elif attribute_handle == ReceivePayloadChar.config_handle:\n                    self.payload_notif = True\n\n                if self.header_notif and self.payload_notif:\n                    self.device.open_rpc_interface()\n                    self._audit(\"RPCInterfaceOpened\")\n\n            \n            elif attribute_handle == StreamingChar.config_handle:\n                if notification_enabled and not self.streaming:\n                    self.streaming = True\n\n                    \n                    reports = self.device.open_streaming_interface()\n                    if reports is not None:\n                        self._queue_reports(*reports)\n\n                    self._audit('StreamingInterfaceOpened')\n                elif not notification_enabled and self.streaming:\n                    self.streaming = False\n                    self.device.close_streaming_interface()\n                    self._audit('StreamingInterfaceClosed')\n\n            \n            elif attribute_handle == TracingChar.config_handle:\n                if notification_enabled and not self.tracing:\n                    self.tracing = True\n\n                    \n                    traces = self.device.open_tracing_interface()\n                    if traces is not None:\n                        self._queue_traces(*traces)\n\n                    self._audit('TracingInterfaceOpened')\n                elif not notification_enabled and self.tracing:\n                    self.tracing = False\n                    self.device.close_tracing_interface()\n                    self._audit('TracingInterfaceClosed')\n\n            return True\n        \n        elif attribute_handle in [SendHeaderChar.value_handle, SendPayloadChar.value_handle]:\n            \n            if attribute_handle == SendPayloadChar.value_handle:\n                self.rpc_payload = bytearray(request['value'])\n                if len(self.rpc_payload) < 20:\n                    self.rpc_payload += bytearray(20 - len(self.rpc_payload))\n            \n            elif attribute_handle == SendHeaderChar.value_handle:\n                self._defer(self._call_rpc, [bytearray(request['value'])])\n\n            return True\n        else:\n            return False", "docstring": "Callback function called when a write request has been received.\nIt is executed in the baBLE working thread: should not be blocking.\n\nArgs:\nrequest (dict): Information about the request\n- connection_handle (int): The connection handle that sent the request\n- attribute_handle (int): The attribute handle to write\n- value (bytes): The value to write", "source": "juraj-google-style"}
{"code": "def get_servo_status(self):\n    data = []\n    data.append(9)\n    data.append(self.servoid)\n    data.append(RAM_READ_REQ)\n    data.append(STATUS_ERROR_RAM)\n    data.append(BYTE1)\n    send_data(data)\n    rxdata = []\n    try:\n        rxdata = SERPORT.read(12)\n        return (ord(rxdata[9]) & 255)\n    except:\n        raise HerkulexError('could not communicate with motors')", "docstring": "Get the error status of servo\n\nThis function gets the  error status (if any) of the servo\n\nArgs:\nnone\n\nReturns:\nint:  an integer corresponding to the servo status\n* refer datasheet", "source": "codesearchnet"}
{"code": "def LessThan(self, value):\n    \n    self._awql = self._CreateSingleValueCondition(value, '<')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"less than\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "juraj-google-style"}
{"code": "def get_event_consumer(config, success_channel, error_channel, metrics, **kwargs):\n    builder = event_consumer.GPSEventConsumerBuilder(config, success_channel, error_channel, metrics, **kwargs)\n    return builder.build_event_consumer()", "docstring": "Get a GPSEventConsumer client.\n\nA factory function that validates configuration, creates schema\nvalidator and parser clients, creates an auth and a pubsub client,\nand returns an event consumer (:interface:`gordon.interfaces.\nIRunnable` and :interface:`gordon.interfaces.IMessageHandler`)\nprovider.\n\nArgs:\nconfig (dict): Google Cloud Pub/Sub-related configuration.\nsuccess_channel (asyncio.Queue): Queue to place a successfully\nconsumed message to be further handled by the ``gordon``\ncore system.\nerror_channel (asyncio.Queue): Queue to place a message met\nwith errors to be further handled by the ``gordon`` core\nsystem.\nmetrics (obj): :interface:`IMetricRelay` implementation.\nkwargs (dict): Additional keyword arguments to pass to the\nevent consumer.\nReturns:\nA :class:`GPSEventConsumer` instance.", "source": "codesearchnet"}
{"code": "def _ConvertListToObject(cls, json_list):\n    list_value = []\n    for json_list_element in json_list:\n        if isinstance(json_list_element, dict):\n            list_value.append(cls._ConvertDictToObject(json_list_element))\n        elif isinstance(json_list_element, list):\n            list_value.append(cls._ConvertListToObject(json_list_element))\n        else:\n            list_value.append(json_list_element)\n    return list_value", "docstring": "Converts a JSON list into an object.\n\nArgs:\njson_list (list[object]): JSON serialized objects.\n\nReturns:\nlist[object]: a deserialized list.", "source": "codesearchnet"}
{"code": "def get_choices_for(self, field):\n    choices = self._fields[field].choices\n    if isinstance(choices, six.string_types):\n        return [(d['value'], d['name']) for d in self._choices_manager.get_all(choices)]\n    else:\n        return choices", "docstring": "Get the choices for the given fields.\n\nArgs:\nfield (str): Name of field.\n\nReturns:\nList of tuples. [(name, value),...]", "source": "codesearchnet"}
{"code": "def dump_size_bytes(self):\n    return self._dump_size_bytes", "docstring": "Size of the dump file.\n\nUnit: byte.\n\nReturns:\nIf the dump file exists, size of the dump file, in bytes.\nIf the dump file does not exist, None.", "source": "github-repos"}
{"code": "def load_maps(maps_dir):\n    \n    maps_dir = os.path.abspath(maps_dir)\n    maps = {}\n    for root, dirnames, filenames in os.walk(maps_dir):\n        for filename in filenames:\n            if filename.endswith(\".xml\"):\n                xml_file = os.path.join(root, filename)\n                map = MapSource.from_xml(xml_file, maps_dir)\n                if map.id in maps:\n                    raise MapSourceException(\"duplicate map id: {} in file {}\".format(map.id, xml_file))\n                else:\n                    maps[map.id] = map\n    return maps", "docstring": "Load all xml map sources from a given directory.\n\nArgs:\nmaps_dir: path to directory to search for maps\n\nReturns:\ndict of MapSource:", "source": "juraj-google-style"}
{"code": "def _get_authenticated_session(self):\n    session = requests.Session()\n    session.auth = self.auth\n    return session", "docstring": "Return an authenticated requests session.\n\nReturns:\nrequests.Session: Authenticated session for use.", "source": "codesearchnet"}
{"code": "def FindMessageTypeByName(self, full_name):\n    \n\n    full_name = _NormalizeFullyQualifiedName(full_name)\n    if full_name not in self._descriptors:\n      self.FindFileContainingSymbol(full_name)\n    return self._descriptors[full_name]", "docstring": "Loads the named descriptor from the pool.\n\nArgs:\nfull_name: The full name of the descriptor to load.\n\nReturns:\nThe descriptor for the named type.", "source": "juraj-google-style"}
{"code": "def create_sns_topic(self, region):\n        \n        sns = self.session.client('sns', region_name=region)\n\n        self.log.info('Creating SNS topic for {}/{}'.format(self.account, region))\n        \n        res = sns.create_topic(Name=self.topic_name)\n        arn = res['TopicArn']\n\n        \n        tmpl = get_template('cloudtrail_sns_policy.json')\n        policy = tmpl.render(region=region, account_id=self.account.account_number, topic_name=self.topic_name)\n        sns.set_topic_attributes(TopicArn=arn, AttributeName='Policy', AttributeValue=policy)\n\n        auditlog(\n            event='cloudtrail.create_sns_topic',\n            actor=self.ns,\n            data={\n                'account': self.account.account_name,\n                'region': region\n            }\n        )\n\n        return arn", "docstring": "Creates an SNS topic if needed. Returns the ARN if the created SNS topic\n\nArgs:\nregion (str): Region name\n\nReturns:\n`str`", "source": "juraj-google-style"}
{"code": "def _start_app_and_connect(self):\n    self._check_app_installed()\n    self.disable_hidden_api_blacklist()\n    persists_shell_cmd = self._get_persist_command()\n    self.log.info('Launching snippet apk %s with protocol %d.%d', self.package, _PROTOCOL_MAJOR_VERSION, _PROTOCOL_MINOR_VERSION)\n    cmd = _LAUNCH_CMD.format(shell_cmd=persists_shell_cmd, user=self._get_user_command_string(), snippet_package=self.package)\n    start_time = time.perf_counter()\n    self._proc = self._do_start_app(cmd)\n    line = self._read_protocol_line()\n    match = re.match('^SNIPPET START, PROTOCOL ([0-9]+) ([0-9]+)$', line)\n    if not match or match.group(1) != '1':\n        raise ProtocolVersionError(self._ad, line)\n    line = self._read_protocol_line()\n    match = re.match('^SNIPPET SERVING, PORT ([0-9]+)$', line)\n    if not match:\n        raise ProtocolVersionError(self._ad, line)\n    self.device_port = int(match.group(1))\n    self.host_port = utils.get_available_host_port()\n    self._adb.forward(['tcp:%d' % self.host_port, 'tcp:%d' % self.device_port])\n    self.connect()\n    self.log.debug('Snippet %s started after %.1fs on host port %s', self.package, time.perf_counter() - start_time, self.host_port)", "docstring": "Starts snippet apk on the device and connects to it.\n\nAfter prechecks, this launches the snippet apk with an adb cmd in a\nstanding subprocess, checks the cmd response from the apk for protocol\nversion, then sets up the socket connection over adb port-forwarding.\n\nArgs:\nProtocolVersionError, if protocol info or port info cannot be\nretrieved from the snippet apk.", "source": "github-repos"}
{"code": "def _as_document(self, identifier):\n        \n        return {\n            'identifier': u('{}').format(identifier['identifier']),\n            'type': u('{}').format(identifier['type']),\n            'name': u('{}').format(identifier['name'])\n        }", "docstring": "Converts given identifier to the document indexed by FTS backend.\n\nArgs:\nidentifier (dict): identifier to convert. Dict contains at\nleast 'identifier', 'type' and 'name' keys.\n\nReturns:\ndict with structure matches to BaseIdentifierIndex._schema.", "source": "juraj-google-style"}
{"code": "def test(verbosity=1):  \n    \n    import unittest\n    from .tests import test_suite\n\n    \n    unittest.TextTestRunner(verbosity=verbosity).run(test_suite)", "docstring": "Executes all the tests for pyplink.\n\nArgs:\nverbosity (int): The verbosity level for :py:mod:`unittest`.\n\nJust set ``verbosity`` to an integer higher than ``1`` to have more\ninformation about the tests.", "source": "juraj-google-style"}
{"code": "def link(target, link_to):\n    assert isinstance(target, str)\n    assert os.path.exists(target)\n    assert isinstance(link_to, str)\n    abs_path = os.path.dirname(os.path.abspath(link_to))\n    if (not os.path.isdir(abs_path)):\n        os.makedirs(abs_path)\n    chmod(target)\n    os.symlink(target, link_to)", "docstring": "Create a link to a target file or a folder.\n\nFor simplicity sake, both target and link_to must be absolute path and must\ninclude the filename of the file or folder.\nAlso do not include any trailing slash.\n\ne.g. link('/path/to/file', '/path/to/link')\n\nBut not: link('/path/to/file', 'path/to/')\nor link('/path/to/folder/', '/path/to/link')\n\nArgs:\ntarget (str): file or folder the link will point to\nlink_to (str): Link to create", "source": "codesearchnet"}
{"code": "def list(self, pattern='*'):\n    \n    if self._descriptors is None:\n      self._descriptors = self._client.list_resource_descriptors(\n          filter_string=self._filter_string)\n    return [resource for resource in self._descriptors\n            if fnmatch.fnmatch(resource.type, pattern)]", "docstring": "Returns a list of resource descriptors that match the filters.\n\nArgs:\npattern: An optional pattern to further filter the descriptors. This can\ninclude Unix shell-style wildcards. E.g. ``\"aws*\"``, ``\"*cluster*\"``.\n\nReturns:\nA list of ResourceDescriptor objects that match the filters.", "source": "juraj-google-style"}
{"code": "def __init__(self, workflow, generator, work):\n        \n        super(Barrier, self).__init__()\n        self.workflow = workflow\n        self.generator = generator\n\n        if isinstance(work, (list, tuple)):\n            self[:] = list(work)\n            self.was_list = True\n            self.wait_any = False\n        elif isinstance(work, WaitAny):\n            self[:] = list(work.items)\n            self.was_list = True\n            self.wait_any = True\n        else:\n            self[:] = [work]\n            self.was_list = False\n            self.wait_any = False\n\n        for item in self:\n            assert isinstance(item, WorkItem)\n            item.parent = workflow", "docstring": "Initializer.\n\nArgs:\nworkflow: WorkflowItem instance this is for.\ngenerator: Current state of the WorkflowItem's generator.\nwork: Next set of work to do. May be a single WorkItem object or\na list or tuple that contains a set of WorkItems to run in\nparallel.", "source": "juraj-google-style"}
{"code": "def _add_sub_parsers(self, top_level_parser, methods_to_parse, class_name):\n    description = 'Accessible methods of {}'.format(class_name)\n    sub_parsers = top_level_parser.add_subparsers(description=description, dest='method')\n    parser_to_method = {}\n    for (method_name, parser) in methods_to_parse.items():\n        parser_name = (parser.get_name() or method_name)\n        if parser_name.startswith('_'):\n            if (not self._parse_private):\n                continue\n            parser_name = parser_name.strip('_')\n        parser_name = parser_name.replace('_', '-')\n        parser_to_method[parser_name] = method_name\n        sub_parsers.add_parser(parser_name, parents=[parser], add_help=False, description=parser.description)\n    return parser_to_method", "docstring": "Add all the sub-parsers to the top_level_parser.\n\nArgs:\ntop_level_parser: the top level parser\nmethods_to_parse: dict of method name pointing to their associated\nargument parser\nclass_name: name of the decorated class\n\nReturns:\na dict of registered name of the parser i.e. sub command name\npointing to the method real name", "source": "codesearchnet"}
{"code": "def save(self, force=False):\n    from time import time\n    from datetime import datetime\n    savefreq = TaskDB.get_option('savefreq', 2, int)\n    if (self.lastsave is not None):\n        delta = (datetime.fromtimestamp(time()) - datetime.fromtimestamp(self.lastsave))\n        elapsed = int((delta.total_seconds() / 60))\n    else:\n        elapsed = (savefreq + 1)\n    if ((elapsed > savefreq) or force):\n        if (not writeable):\n            self.lastsave = time()\n            msg.std('Skipping database write to disk by setting.', 2)\n            return\n        import json\n        try:\n            (entities, compkeys) = _json_clean(self.entities)\n            jdb = {'entities': entities, 'compkeys': compkeys, 'uuids': self.uuids}\n            with open(self.dbpath, 'w') as f:\n                json.dump(jdb, f)\n        except:\n            from acorn.msg import err\n            import sys\n            raise\n            err('{}: {}'.format(*sys.exc_info()[0:2]))\n        self.lastsave = time()", "docstring": "Serializes the database file to disk.\n\nArgs:\nforce (bool): when True, the elapsed time since last save is ignored\nand the database is saved anyway (subject to global\n:data:`writeable` setting).", "source": "codesearchnet"}
{"code": "class Activation(Layer):\n\n    def __init__(self, activation, **kwargs):\n        super(Activation, self).__init__(**kwargs)\n        self.supports_masking = True\n        self.activation = activations.get(activation)\n\n    def call(self, inputs):\n        return self.activation(inputs)\n\n    def compute_output_shape(self, input_shape):\n        return input_shape\n\n    def get_config(self):\n        config = {'activation': activations.serialize(self.activation)}\n        base_config = super(Activation, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))", "docstring": "Applies an activation function to an output.\n\nArgs:\nactivation: Activation function, such as `tf.nn.relu`, or string name of\nbuilt-in activation function, such as \"relu\".\n\nUsage:\n\n>>> layer = tf.keras.layers.Activation('relu')\n>>> output = layer([-3.0, -1.0, 0.0, 2.0])\n>>> list(output.numpy())\n[0.0, 0.0, 0.0, 2.0]\n>>> layer = tf.keras.layers.Activation(tf.nn.relu)\n>>> output = layer([-3.0, -1.0, 0.0, 2.0])\n>>> list(output.numpy())\n[0.0, 0.0, 0.0, 2.0]\n\nInput shape:\nArbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the batch axis)\nwhen using this layer as the first layer in a model.\n\nOutput shape:\nSame shape as input.", "source": "github-repos"}
{"code": "def _make_request(self, url, method=\"get\", data=None, extra_headers=None):\n        \n        attempts = 0\n\n        while attempts < 1:\n            \n            if not self._is_authenticated:\n                self._authenticate()\n            \n            \n            try:\n                return self._send_request(url, method, data, extra_headers)\n            except HTTPError as e:\n                if e.response.status_code == 403:\n                    logger.info(\"Authenticated session against NetMRI timed out. Retrying.\")\n                    self._is_authenticated = False\n                    attempts += 1\n                else:\n                    \n                    raise", "docstring": "Prepares the request, checks for authentication and retries in case of issues\n\nArgs:\nurl (str): URL of the request\nmethod (str): Any of \"get\", \"post\", \"delete\"\ndata (any): Possible extra data to send with the request\nextra_headers (dict): Possible extra headers to send along in the request\nReturns:\ndict", "source": "juraj-google-style"}
{"code": "def _secured_storage_parameters(self):\n    parameters = (self._storage_parameters or dict())\n    if self._unsecure:\n        parameters = parameters.copy()\n        parameters['protocol'] = 'http'\n    return parameters", "docstring": "Updates storage parameters with unsecure mode.\n\nReturns:\ndict: Updated storage_parameters.", "source": "codesearchnet"}
{"code": "def morph_dict(d, convert_function):\n    \n    \n    new = {}\n    for k, v in six.iteritems(d):\n        new_v = v\n        if isinstance(v, dict):\n            new_v = morph_dict(v, convert_function)\n        elif isinstance(v, list):\n            new_v = list()\n            for x in v:\n                new_v.append(\n                    morph_dict(x, convert_function)\n                )\n        new[convert_function(k)] = new_v\n    return new", "docstring": "Convert a nested dictionary from one convention to another.\nArgs:\nd (dict): dictionary (nested or not) to be converted.\nconvert_function (func): function that takes the string in one\nconvention and returns it in the other one.\nReturns:\nDictionary with the new keys.", "source": "juraj-google-style"}
{"code": "def CleanVacuousVersions(clients=None, dry_run=True):\n  \n\n  if not clients:\n    index = client_index.CreateClientIndex()\n    clients = index.LookupClients([\".\"])\n  clients.sort()\n  with data_store.DB.GetMutationPool() as pool:\n\n    logging.info(\"checking %d clients\", len(clients))\n    for batch in collection.Batch(clients, 10000):\n      \n      \n      client_infos = data_store.DB.MultiResolvePrefix(\n          batch, [\"aff4:\", \"aff4:\"], data_store.DB.ALL_TIMESTAMPS)\n\n      for client, type_list in client_infos:\n        cleared = 0\n        kept = 0\n        updates = []\n        for a, _, ts in type_list:\n          if ts != 0:\n            updates.append((ts, a))\n        updates = sorted(updates)\n        dirty = True\n        for ts, a in updates:\n          if a == \"aff4:type\":\n            if dirty:\n              kept += 1\n              dirty = False\n            else:\n              cleared += 1\n              if not dry_run:\n                pool.DeleteAttributes(client, [\"aff4:type\"], start=ts, end=ts)\n                if pool.Size() > 1000:\n                  pool.Flush()\n          else:\n            dirty = True\n        logging.info(\"%s: kept %d and cleared %d\", client, kept, cleared)", "docstring": "A script to remove no-op client versions.\n\nThis script removes versions of a client when it is identical to the previous,\nin the sense that no versioned attributes were changed since the previous\nclient version.\n\nArgs:\nclients: A list of ClientURN, if empty cleans all clients.\ndry_run: whether this is a dry run", "source": "juraj-google-style"}
{"code": "def get_object(cls, api_token, droplet_id):\n        \n        droplet = cls(token=api_token, id=droplet_id)\n        droplet.load()\n        return droplet", "docstring": "Class method that will return a Droplet object by ID.\n\nArgs:\napi_token (str): token\ndroplet_id (int): droplet id", "source": "juraj-google-style"}
{"code": "def add(self, email):\n        \n        if email not in self._collaborators:\n            self._collaborators[email] = ShareRequestValue.Add\n        self._dirty = True", "docstring": "Add a collaborator.\n\nArgs:\nstr : Collaborator email address.", "source": "juraj-google-style"}
{"code": "def convertTimestamps(column):\n    \n    tempColumn = column\n\n    try:\n        \n        \n        \n        tempValue = np.datetime64(column[randint(0, len(column.index) - 1)])\n        tempColumn = column.apply(to_datetime)\n    except Exception:\n        pass\n    return tempColumn", "docstring": "Convert a dtype of a given column to a datetime.\n\nThis method tries to do this by brute force.\n\nArgs:\ncolumn (pandas.Series): A Series object with all rows.\n\nReturns:\ncolumn: Converted to datetime if no errors occured, else the\noriginal column will be returned.", "source": "juraj-google-style"}
{"code": "async def check_in(self):\n    res = (await self.connection('POST', 'tournaments/{}/participants/{}/check_in'.format(self._tournament_id, self._id)))\n    self._refresh_from_json(res)", "docstring": "Checks this participant in\n\n|methcoro|\n\nWarning:\n|unstable|\n\nRaises:\nAPIException", "source": "codesearchnet"}
{"code": "def UploadUsers(self, hash_algorithm, hash_key, accounts):\n    \n    return self.rpc_helper.UploadAccount(hash_algorithm,\n                                         base64.urlsafe_b64encode(hash_key),\n                                         [GitkitUser.ToRequest(i) for i in accounts])", "docstring": "Uploads multiple users to Gitkit server.\n\nArgs:\nhash_algorithm: string, the hash algorithm.\nhash_key: array, raw key of the hash algorithm.\naccounts: list of GitkitUser.\n\nReturns:\nA dict of failed accounts. The key is the index of the 'accounts' list,\nstarting from 0.", "source": "juraj-google-style"}
{"code": "def create(self, path, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO):\n    return self._path_open(path, 'wb', mime_type, compression_type)", "docstring": "Returns a write channel for the given file path.\n\nArgs:\npath: string path of the file object to be written to the system\nmime_type: MIME type to specify the type of content in the file object\ncompression_type: Type of compression to be used for this object\n\nReturns: file handle with a close function for the user to use", "source": "github-repos"}
{"code": "def get_yielded_type(type_hint):\n    if isinstance(type_hint, typing.TypeVar):\n        return typing.Any\n    if isinstance(type_hint, AnyTypeConstraint):\n        return type_hint\n    if is_consistent_with(type_hint, Iterator[Any]):\n        return type_hint.yielded_type\n    if is_consistent_with(type_hint, Tuple[Any, ...]):\n        if isinstance(type_hint, TupleConstraint):\n            return Union[type_hint.tuple_types]\n        else:\n            return type_hint.inner_type\n    if is_consistent_with(type_hint, Iterable[Any]):\n        if isinstance(type_hint, UnionConstraint):\n            yielded_types = set()\n            for typ in type_hint.inner_types():\n                yielded_types.add(get_yielded_type(typ))\n            return Union[yielded_types]\n        return type_hint.inner_type\n    raise ValueError('%s is not iterable' % type_hint)", "docstring": "Obtains the type of elements yielded by an iterable.s\n\nNote that \"iterable\" here means: can be iterated over in a for loop, excluding\nstrings and dicts.\n\nArgs:\ntype_hint: (TypeConstraint) The iterable in question. Must be normalize()-d.\n\nReturns:\nYielded type of the iterable.\n\nRaises:\nValueError if not iterable.", "source": "github-repos"}
{"code": "def accepts(regex, negate, *values):\n    \n    return any(v and regex.search(v) for v in values) != negate", "docstring": "Given a compiled regex and a negate, find if any of the values match.\n\nArgs:\nregex (Pattern):\nnegate (bool):\n*values (str):\n\nReturns:", "source": "juraj-google-style"}
{"code": "def decompose(miz_file: Path, output_folder: Path):\n        \n        mission_folder, assets_folder = NewMiz._get_subfolders(output_folder)\n        NewMiz._wipe_folders(mission_folder, assets_folder)\n        LOGGER.info('unzipping mission file')\n        with Miz(miz_file) as miz:\n            version = miz.mission.d['version']\n            LOGGER.debug(f'mission version: \"%s\"', version)\n\n            LOGGER.info('copying assets to: \"%s\"', assets_folder)\n            ignore = shutil.ignore_patterns('mission')\n            shutil.copytree(str(miz.temp_dir), str(assets_folder), ignore=ignore)\n\n            NewMiz._reorder_warehouses(assets_folder)\n\n            LOGGER.info('decomposing mission table into: \"%s\" (this will take a while)', mission_folder)\n            NewMiz._decompose_dict(miz.mission.d, 'base_info', mission_folder, version, miz)", "docstring": "Decompose this Miz into json\n\nArgs:\noutput_folder: folder to output the json structure as a Path\nmiz_file: MIZ file path as a Path", "source": "juraj-google-style"}
{"code": "def register_auth_system(self, auth_system):\n        \n        auth_system_settings = dbconfig.get('auth_system')\n\n        if auth_system.name not in auth_system_settings['available']:\n            auth_system_settings['available'].append(auth_system.name)\n            dbconfig.set('default', 'auth_system', DBCChoice(auth_system_settings))\n\n        if auth_system.name == auth_system_settings['enabled'][0]:\n            self.active_auth_system = auth_system\n            auth_system().bootstrap()\n            logger.debug('Registered {} as the active auth system'.format(auth_system.name))\n            return True\n\n        else:\n            logger.debug('Not trying to load the {} auth system as it is disabled by config'.format(auth_system.name))\n            return False", "docstring": "Register a given authentication system with the framework. Returns `True` if the `auth_system` is registered\nas the active auth system, else `False`\n\nArgs:\nauth_system (:obj:`BaseAuthPlugin`): A subclass of the `BaseAuthPlugin` class to register\n\nReturns:\n`bool`", "source": "juraj-google-style"}
{"code": "def enable_argscope_for_module(module, log_shape=True):\n    if (is_tfv2() and (module == tf.layers)):\n        module = tf.compat.v1.layers\n    for (name, obj) in getmembers(module):\n        if isfunction(obj):\n            setattr(module, name, enable_argscope_for_function(obj, log_shape=log_shape))", "docstring": "Overwrite all functions of a given module to support argscope.\nNote that this function monkey-patches the module and therefore could\nhave unexpected consequences.\nIt has been only tested to work well with ``tf.layers`` module.\n\nExample:\n\n.. code-block:: python\n\nimport tensorflow as tf\nenable_argscope_for_module(tf.layers)\n\nArgs:\nlog_shape (bool): print input/output shapes of each function.", "source": "codesearchnet"}
{"code": "def set_file_to_upload(self, file_to_upload):\n    if ('url' in self.data):\n        del self.data['url']\n    self.file_to_upload = file_to_upload", "docstring": "Delete any existing url and set the file uploaded to the local path provided\n\nArgs:\nfile_to_upload (str): Local path to file to upload\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _handle_metrics(self, outputs, targets=None, skip_target_masks=None, sample_weights=None, masks=None, return_weighted_metrics=False, return_weighted_and_unweighted_metrics=False):\n    skip_target_masks = skip_target_masks or [False] * len(outputs)\n    metric_results = []\n    with backend.name_scope('metrics'):\n        for i in range(len(outputs)):\n            if skip_target_masks[i]:\n                continue\n            output = outputs[i] if outputs else None\n            target = targets[i] if targets else None\n            output_mask = masks[i] if masks else None\n            if return_weighted_and_unweighted_metrics or not return_weighted_metrics:\n                metric_results.extend(self._handle_per_output_metrics(self._per_output_metrics[i], target, output, output_mask))\n            if return_weighted_and_unweighted_metrics or return_weighted_metrics:\n                metric_results.extend(self._handle_per_output_metrics(self._per_output_weighted_metrics[i], target, output, output_mask, weights=sample_weights[i] if sample_weights else None))\n    return metric_results", "docstring": "Handles calling metric functions.\n\nArgs:\noutputs: List of outputs (predictions).\ntargets: List of targets.\nskip_target_masks: Optional. List of boolean for whether the corresponding\ntarget should be ignored or not.\nsample_weights: Optional list of sample weight arrays.\nmasks: List of computed output mask values.\nreturn_weighted_metrics: Flag that indicates whether weighted metrics\nshould be computed instead of unweighted metrics. This flag is ignored\nwhen `return_weighted_and_unweighted_metrics` is enabled.\nreturn_weighted_and_unweighted_metrics: Flag that is used to indicate\nwhether both weighted and unweighted metrics should be computed. When\nthis is not enabled, we use `return_weighted_metrics` param to indicate\nwhether weighted or unweighted metrics should be returned.\n\nReturns:\nA list of metric result tensors.", "source": "github-repos"}
{"code": "def _replace_oov(original_vocab, line):\n  \n  return u\" \".join(\n      [word if word in original_vocab else u\"UNK\" for word in line.split()])", "docstring": "Replace out-of-vocab words with \"UNK\".\n\nThis maintains compatibility with published results.\n\nArgs:\noriginal_vocab: a set of strings (The standard vocabulary for the dataset)\nline: a unicode string - a space-delimited sequence of words.\n\nReturns:\na unicode string - a space-delimited sequence of words.", "source": "juraj-google-style"}
{"code": "def wait_for_notification(self, notification_class=BaseNotification):\n        \n        if notification_class:\n            if notification_class is BaseNotification:\n                message = \"No notification was shown.\"\n            else:\n                message = \"{0} was not shown.\".format(notification_class.__name__)\n            self.wait.until(\n                lambda _: isinstance(self.notification, notification_class),\n                message=message,\n            )\n            return self.notification\n        else:\n            self.wait.until(\n                lambda _: self.notification is None,\n                message=\"Unexpected notification shown.\",\n            )", "docstring": "Wait for the specified notification to be displayed.\n\nArgs:\nnotification_class (:py:class:`BaseNotification`, optional):\nThe notification class to wait for. If `None` is specified it\nwill wait for any notification to be closed. Defaults to\n`BaseNotification`.\n\nReturns:\n:py:class:`BaseNotification`: Firefox notification.", "source": "juraj-google-style"}
{"code": "def __call__(self, *args, **kwargs):\n    if not hasattr(self, '_thread_local'):\n        raise RuntimeError('You must call `super().__init__()` in the layer constructor.')\n    inputs, args, kwargs = self._split_out_first_arg(args, kwargs)\n    input_list = nest.flatten(inputs)\n    if _in_functional_construction_mode(self, inputs, args, kwargs, input_list):\n        return self._functional_construction_call(inputs, args, kwargs, input_list)\n    call_context = base_layer_utils.call_context()\n    if any((isinstance(x, (np_arrays.ndarray, np.ndarray, float, int)) for x in input_list)):\n        inputs = nest.map_structure(_convert_numpy_or_python_types, inputs)\n        input_list = nest.flatten(inputs)\n    input_masks, mask_is_implicit = self._get_input_masks(inputs, input_list, args, kwargs)\n    if self._expects_mask_arg and mask_is_implicit:\n        kwargs['mask'] = input_masks\n    args, kwargs, training_mode = self._set_training_mode(args, kwargs, call_context)\n    if not call_context.in_call:\n        self._clear_losses()\n    eager = context.executing_eagerly()\n    with call_context.enter(layer=self, inputs=inputs, build_graph=not eager, training=training_mode):\n        input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)\n        if eager:\n            call_fn = self.call\n            name_scope = self._name\n        else:\n            name_scope = self._name_scope()\n            call_fn = self._autographed_call()\n        with ops.name_scope_v2(name_scope):\n            if not self.built:\n                self._maybe_build(inputs)\n            if self._autocast:\n                inputs = self._maybe_cast_inputs(inputs, input_list)\n            with autocast_variable.enable_auto_cast_variables(self._compute_dtype_object):\n                outputs = call_fn(inputs, *args, **kwargs)\n            if self._activity_regularizer:\n                self._handle_activity_regularization(inputs, outputs)\n            if self._supports_masking:\n                self._set_mask_metadata(inputs, outputs, input_masks, not eager)\n            if self._saved_model_inputs_spec is None:\n                self._set_save_spec(inputs)\n            return outputs", "docstring": "Wraps `call`, applying pre- and post-processing steps.\n\nArgs:\n*args: Positional arguments to be passed to `self.call`.\n**kwargs: Keyword arguments to be passed to `self.call`.\n\nReturns:\nOutput tensor(s).\n\nNote:\n- The following optional keyword arguments are reserved for specific uses:\n* `training`: Boolean scalar tensor of Python boolean indicating\nwhether the `call` is meant for training or inference.\n* `mask`: Boolean input mask.\n- If the layer's `call` method takes a `mask` argument (as some Keras\nlayers do), its default value will be set to the mask generated\nfor `inputs` by the previous layer (if `input` did come from\na layer that generated a corresponding mask, i.e. if it came from\na Keras layer with masking support.\n- If the layer is not built, the method will call `build`.\n\nRaises:\nValueError: if the layer's `call` method returns None (an invalid value).\nRuntimeError: if `super().__init__()` was not called in the constructor.", "source": "github-repos"}
{"code": "def load_attributes_from_hdf5_group(group, name):\n    if name in group.attrs:\n        data = [n.decode('utf8') if hasattr(n, 'decode') else n for n in group.attrs[name]]\n    else:\n        data = []\n        chunk_id = 0\n        while '%s%d' % (name, chunk_id) in group.attrs:\n            data.extend([n.decode('utf8') if hasattr(n, 'decode') else n for n in group.attrs['%s%d' % (name, chunk_id)]])\n            chunk_id += 1\n    return data", "docstring": "Loads attributes of the specified name from the HDF5 group.\n\nThis method deals with an inherent problem of HDF5 file which is not able to store data larger than\nHDF5_OBJECT_HEADER_LIMIT bytes.\n\nArgs:\ngroup: A pointer to a HDF5 group.\nname: A name of the attributes to load.\n\nReturns:\ndata: Attributes data.\n\nCopied from Keras to Transformers to avoid versioning issues.", "source": "github-repos"}
{"code": "def _replace_ragged_with_flat_values(value, partition_lists, flat_values_nrows):\n    if ragged_tensor.is_ragged(value):\n        value = ragged_tensor.convert_to_tensor_or_ragged_tensor(value)\n        partition_lists.append(value._nested_row_partitions)\n        nrows = tensor_shape.dimension_at_index(value.flat_values.shape, 0).value\n        if nrows is not None:\n            flat_values_nrows.append(nrows)\n        return value.flat_values\n\n    def recurse(v):\n        return _replace_ragged_with_flat_values(v, partition_lists, flat_values_nrows)\n    if isinstance(value, list):\n        return [recurse(v) for v in value]\n    elif isinstance(value, tuple):\n        return tuple((recurse(v) for v in value))\n    elif isinstance(value, dict):\n        return dict(((k, recurse(v)) for k, v in value.items()))\n    else:\n        return value", "docstring": "Replace RaggedTensors with their flat_values, and record their partitions.\n\nReturns a copy of `value`, with any nested `RaggedTensor`s replaced by their\n`flat_values` tensor.  Looks inside lists, tuples, and dicts.\n\nAppends each `RaggedTensor`'s `RowPartition`s to `partition_lists`.\n\nArgs:\nvalue: The value that should be transformed by replacing `RaggedTensors`.\npartition_lists: An output parameter used to record the row partitions\nfor any `RaggedTensors` that were replaced.\nflat_values_nrows: An output parameter used to record the outer dimension\nsize for each replacement `flat_values` (when known).  Contains a list of\nint.\n\nReturns:\nA copy of `value` with nested `RaggedTensors` replaced by their `values`.", "source": "github-repos"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    regf_file = pyregf.file()\n    try:\n        regf_file.open_file_object(file_object)\n    except IOError:\n        return\n    root_key = regf_file.get_root_key()\n    if (root_key is None):\n        regf_file.close()\n        return\n    root_file_key = root_key.get_sub_key_by_path(self._AMCACHE_ROOT_FILE_KEY)\n    if (root_file_key is None):\n        regf_file.close()\n        return\n    for volume_key in root_file_key.sub_keys:\n        for am_entry in volume_key.sub_keys:\n            self._ProcessAMCacheFileKey(am_entry, parser_mediator)\n    root_program_key = root_key.get_sub_key_by_path(self._AMCACHE_ROOT_PROGRAM_KEY)\n    if (root_program_key is None):\n        regf_file.close()\n        return\n    for am_entry in root_program_key.sub_keys:\n        self._ProcessAMCacheProgramKey(am_entry, parser_mediator)\n    regf_file.close()", "docstring": "Parses an Amcache.hve file for events.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.", "source": "codesearchnet"}
{"code": "def __init__(self, *nodes, timeout=None):\n        \n        self.nodes = nodes\n        self.timeout = timeout\n        self.connection_pool = Pool([Connection(node_url=node['endpoint'],\n                                                headers=node['headers'])\n                                     for node in nodes])", "docstring": "Initializes an instance of\n:class:`~bigchaindb_driver.transport.Transport`.\n\nArgs:\nnodes: each node is a dictionary with the keys `endpoint` and\n`headers`\ntimeout (int): Optional timeout in seconds.", "source": "juraj-google-style"}
{"code": "def etm_supported(self):\n    res = self._dll.JLINKARM_ETM_IsPresent()\n    if (res == 1):\n        return True\n    info = ctypes.c_uint32(0)\n    index = enums.JLinkROMTable.ETM\n    res = self._dll.JLINKARM_GetDebugInfo(index, ctypes.byref(info))\n    if (res == 1):\n        return False\n    return True", "docstring": "Returns if the CPU core supports ETM.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\n``True`` if the CPU has the ETM unit, otherwise ``False``.", "source": "codesearchnet"}
{"code": "def getlines(self, bufnr=None):\n    buf = (self._vim.buffers[bufnr] if bufnr else self._vim.current.buffer)\n    return buf[:]", "docstring": "Get all lines of a buffer as a list.\n\nArgs:\nbufnr (Optional[int]): A Vim buffer number, current if ``None``.\n\nReturns:\nList[str]", "source": "codesearchnet"}
{"code": "def compute_writer_results(results):\n    \n    if not results:\n        return\n\n    sources, targets, delayeds = split_results(results)\n\n    \n    if targets:\n        delayeds.append(da.store(sources, targets, compute=False))\n\n    if delayeds:\n        da.compute(delayeds)\n\n    if targets:\n        for target in targets:\n            if hasattr(target, 'close'):\n                target.close()", "docstring": "Compute all the given dask graphs `results` so that the files are\nsaved.\n\nArgs:\nresults (iterable): Iterable of dask graphs resulting from calls to\n`scn.save_datasets(..., compute=False)`", "source": "juraj-google-style"}
{"code": "def clean_doctest_list(doctest_file: str, overwrite: bool=False):\n    non_existent_paths = []\n    all_paths = []\n    with open(doctest_file, 'r', encoding='utf-8') as f:\n        for line in f:\n            line = line.strip().split(' ')[0]\n            path = os.path.join(REPO_PATH, line)\n            if not (os.path.isfile(path) or os.path.isdir(path)):\n                non_existent_paths.append(line)\n            all_paths.append(line)\n    if len(non_existent_paths) > 0:\n        non_existent_paths = '\\n'.join([f'- {f}' for f in non_existent_paths])\n        raise ValueError(f'`{doctest_file}` contains non-existent paths:\\n{non_existent_paths}')\n    sorted_paths = sorted(all_paths)\n    if all_paths != sorted_paths:\n        if not overwrite:\n            raise ValueError(f'Files in `{doctest_file}` are not in alphabetical order, run `make fix-copies` to fix this automatically.')\n        with open(doctest_file, 'w', encoding='utf-8') as f:\n            f.write('\\n'.join(sorted_paths) + '\\n')", "docstring": "Cleans the doctest in a given file.\n\nArgs:\ndoctest_file (`str`):\nThe path to the doctest file to check or clean.\noverwrite (`bool`, *optional*, defaults to `False`):\nWhether or not to fix problems. If `False`, will error when the file is not clean.", "source": "github-repos"}
{"code": "def register_macro(name: str, func: Callable, allow_overwrite: bool=False) -> None:\n    if hasattr(Circuit, name):\n        if allow_overwrite:\n            warnings.warn(f'Circuit has attribute `{name}`.')\n        else:\n            raise ValueError(f'Circuit has attribute `{name}`.')\n    if name.startswith('run_with_'):\n        if allow_overwrite:\n            warnings.warn(f'Gate name `{name}` may conflict with run of backend.')\n        else:\n            raise ValueError(f\"Gate name `{name}` shall not start with 'run_with_'.\")\n    if (not allow_overwrite):\n        if (name in GATE_SET):\n            raise ValueError(f\"Gate '{name}' is already exists in gate set.\")\n        if (name in GLOBAL_MACROS):\n            raise ValueError(f\"Macro '{name}' is already exists.\")\n    GLOBAL_MACROS[name] = func", "docstring": "Register new macro to Circuit.\n\nArgs:\nname (str): The name of macro.\nfunc (callable): The function to be called.\nallow_overwrite (bool, optional): If True, allow to overwrite the existing macro.\nOtherwise, raise the ValueError.\n\nRaises:\nValueError: The name is duplicated with existing macro, gate or method.\nWhen `allow_overwrite=True`, this error is not raised.", "source": "codesearchnet"}
{"code": "def update_hash(src_file):\n    \n    hash_file = local.path(src_file) + \".hash\"\n    new_hash = 0\n    with open(hash_file, 'w') as h_file:\n        new_hash = get_hash_of_dirs(src_file)\n        h_file.write(str(new_hash))\n    return new_hash", "docstring": "Update the hash for the given file.\n\nArgs:\nsrc: The file name.\nroot: The path of the given file.", "source": "juraj-google-style"}
{"code": "def save(self, file_prefix, checkpoint_number=None, session=None, options=None):\n    options = options or checkpoint_options.CheckpointOptions()\n    feed_dict = {}\n    use_session = not context.executing_eagerly() and (not ops.inside_function())\n    if checkpoint_number:\n        file_prefix = '%s-%d' % (file_prefix, checkpoint_number)\n    if use_session:\n        if self._object_graph_feed_tensor is None:\n            with ops.device('/cpu:0'):\n                self._object_graph_feed_tensor = constant_op.constant('', dtype=dtypes.string)\n                self._file_prefix_feed_tensor = constant_op.constant('', dtype=dtypes.string)\n        object_graph_tensor = self._object_graph_feed_tensor\n        file_prefix_tensor = self._file_prefix_feed_tensor\n        feed_dict[file_prefix_tensor] = file_prefix\n    else:\n        with ops.device('/cpu:0'):\n            file_prefix_tensor = ops.convert_to_tensor(file_prefix, dtype=dtypes.string)\n        object_graph_tensor = None\n    if not tensor_util.is_tensor(file_prefix):\n        file_io.recursive_create_dir(os.path.dirname(file_prefix))\n    save_path, new_feed_additions = self._save_cached_when_graph_building(file_prefix_tensor, object_graph_tensor, options)\n    if new_feed_additions:\n        feed_dict.update(new_feed_additions)\n    if not use_session:\n        session = None\n    elif session is None:\n        session = get_session()\n    if session:\n        return session.run(save_path, feed_dict=feed_dict)\n    elif use_session:\n        raise RuntimeError(f'Unable to save checkpoint to \"{file_prefix}\" in graph mode without a default session. Please use `with tf.Session():` to create a session.')\n    else:\n        return save_path", "docstring": "Save a training checkpoint.\n\nThe saved checkpoint includes variables created by this object and any\nTrackable objects it depends on at the time `Saver.save()` is called.\n\nArgs:\nfile_prefix: A prefix to use for the checkpoint filenames\n(/path/to/directory/and_a_prefix). Names are generated based on this\nprefix and `checkpoint_number`, if provided.\ncheckpoint_number: An integer variable or Tensor, used to number\ncheckpoints. Typically this value is saved along with other variables in\ntraining checkpoints, which will happen automatically if it was created\nby `root_trackable` or one of its dependencies (via\n`Trackable._add_variable`).\nsession: The session to evaluate variables in. Ignored when executing\neagerly. If not provided when graph building, the default session is\nused.\noptions: Optional `tf.train.CheckpointOptions` object.\n\nReturns:\nThe full path to the checkpoint.\n\nRaises:\nRuntimeError: if called in V1 Graph mode without a default session.", "source": "github-repos"}
{"code": "def get_data(self, how_many, offset, model_settings, background_frequency, background_volume_range, time_shift, mode, sess):\n    candidates = self.data_index[mode]\n    if how_many == -1:\n        sample_count = len(candidates)\n    else:\n        sample_count = max(0, min(how_many, len(candidates) - offset))\n    data = np.zeros((sample_count, model_settings['fingerprint_size']))\n    labels = np.zeros(sample_count)\n    desired_samples = model_settings['desired_samples']\n    use_background = self.background_data and mode == 'training'\n    pick_deterministically = mode != 'training'\n    for i in range(offset, offset + sample_count):\n        if how_many == -1 or pick_deterministically:\n            sample_index = i\n        else:\n            sample_index = np.random.randint(len(candidates))\n        sample = candidates[sample_index]\n        if time_shift > 0:\n            time_shift_amount = np.random.randint(-time_shift, time_shift)\n        else:\n            time_shift_amount = 0\n        if time_shift_amount > 0:\n            time_shift_padding = [[time_shift_amount, 0], [0, 0]]\n            time_shift_offset = [0, 0]\n        else:\n            time_shift_padding = [[0, -time_shift_amount], [0, 0]]\n            time_shift_offset = [-time_shift_amount, 0]\n        input_dict = {self.wav_filename_placeholder_: sample['file'], self.time_shift_padding_placeholder_: time_shift_padding, self.time_shift_offset_placeholder_: time_shift_offset}\n        if use_background or sample['label'] == SILENCE_LABEL:\n            background_index = np.random.randint(len(self.background_data))\n            background_samples = self.background_data[background_index]\n            if len(background_samples) <= model_settings['desired_samples']:\n                raise ValueError('Background sample is too short! Need more than %d samples but only %d were found' % (model_settings['desired_samples'], len(background_samples)))\n            background_offset = np.random.randint(0, len(background_samples) - model_settings['desired_samples'])\n            background_clipped = background_samples[background_offset:background_offset + desired_samples]\n            background_reshaped = background_clipped.reshape([desired_samples, 1])\n            if sample['label'] == SILENCE_LABEL:\n                background_volume = np.random.uniform(0, 1)\n            elif np.random.uniform(0, 1) < background_frequency:\n                background_volume = np.random.uniform(0, background_volume_range)\n            else:\n                background_volume = 0\n        else:\n            background_reshaped = np.zeros([desired_samples, 1])\n            background_volume = 0\n        input_dict[self.background_data_placeholder_] = background_reshaped\n        input_dict[self.background_volume_placeholder_] = background_volume\n        if sample['label'] == SILENCE_LABEL:\n            input_dict[self.foreground_volume_placeholder_] = 0\n        else:\n            input_dict[self.foreground_volume_placeholder_] = 1\n        summary, data_tensor = sess.run([self.merged_summaries_, self.output_], feed_dict=input_dict)\n        self.summary_writer_.add_summary(summary)\n        data[i - offset, :] = data_tensor.flatten()\n        label_index = self.word_to_index[sample['label']]\n        labels[i - offset] = label_index\n    return (data, labels)", "docstring": "Gather samples from the data set, applying transformations as needed.\n\nWhen the mode is 'training', a random selection of samples will be returned,\notherwise the first N clips in the partition will be used. This ensures that\nvalidation always uses the same samples, reducing noise in the metrics.\n\nArgs:\nhow_many: Desired number of samples to return. -1 means the entire\ncontents of this partition.\noffset: Where to start when fetching deterministically.\nmodel_settings: Information about the current model being trained.\nbackground_frequency: How many clips will have background noise, 0.0 to\n1.0.\nbackground_volume_range: How loud the background noise will be.\ntime_shift: How much to randomly shift the clips by in time.\nmode: Which partition to use, must be 'training', 'validation', or\n'testing'.\nsess: TensorFlow session that was active when processor was created.\n\nReturns:\nList of sample data for the transformed samples, and list of label indexes\n\nRaises:\nValueError: If background samples are too short.", "source": "github-repos"}
{"code": "def __init__(self, byte_size, is_complete=False):\n    \n    super(DataTypeMapSizeHint, self).__init__()\n    self.byte_size = byte_size\n    self.is_complete = is_complete", "docstring": "Initializes a data type map size hint.\n\nArgs:\nbyte_size (int): byte size.\nis_complete (optional[bool]): True if the size is the complete size of\nthe data type.", "source": "juraj-google-style"}
{"code": "def delete(self, filename):\n    folder = ('Packages' if is_package(filename) else 'Scripts')\n    path = os.path.join(self.connection['mount_point'], folder, filename)\n    if os.path.isdir(path):\n        shutil.rmtree(path)\n    elif os.path.isfile(path):\n        os.remove(path)", "docstring": "Delete a file from the repository.\n\nThis method will not delete a script from a migrated JSS.\nPlease remove migrated scripts with jss.Script.delete.\n\nArgs:\nfilename: String filename only (i.e. no path) of file to\ndelete. Will handle deleting scripts vs. packages\nautomatically.", "source": "codesearchnet"}
{"code": "def remove_redistribution(self, protocol):\n        \n\n        protocols = ['bgp', 'rip', 'static', 'connected']\n        if protocol not in protocols:\n            raise ValueError('redistributed protocol must be'\n                             'bgp, connected, rip or static')\n        cmd = 'no redistribute {}'.format(protocol)\n        return self.configure_ospf(cmd)", "docstring": "Removes a protocol redistribution to OSPF\n\nArgs:\nprotocol (str):  protocol to redistribute\nroute_map_name (str): route-map to be used to\nfilter the protocols\nReturns:\nbool: True if the command completes successfully\nException:\nValueError:  This will be raised if the protocol pass is not one\nof the following: [rip, bgp, static, connected]", "source": "juraj-google-style"}
{"code": "def base_list_parser():\n    base_parser = ArgumentParser(add_help=False)\n    base_parser.add_argument('-F', '--format', action='store', default='default', choices=['csv', 'json', 'yaml', 'default'], help='choose the output format')\n    return base_parser", "docstring": "Creates a parser with arguments specific to formatting lists\nof resources.\n\nReturns:\n{ArgumentParser}: Base parser with defaul list args", "source": "codesearchnet"}
{"code": "def _allocate_ips_to_nics(self, conf):\n        \n        for dom_name, dom_spec in conf.get('domains', {}).items():\n            for idx, nic in enumerate(dom_spec.get('nics', [])):\n                if 'ip' in nic:\n                    continue\n                net = self._get_net(conf, dom_name, nic)\n                if net['type'] != 'nat':\n                    continue\n\n                allocated = net['mapping'].values()\n                vacant = _create_ip(\n                    net['gw'],\n                    set(range(2, 255)).difference(\n                        set([int(ip.split('.')[-1]) for ip in allocated])\n                    ).pop()\n                )\n                nic['ip'] = vacant\n                self._add_nic_to_mapping(net, dom_spec, nic)", "docstring": "For all the nics of all the domains in the conf that have dynamic ip,\nallocate one and addit to the network mapping\n\nArgs:\nconf (dict): Configuration spec to extract the domains from\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def get_calendar(self, **kwargs):\n    start_date = util.date_string(kwargs.get('start_day', '01'), kwargs.get('start_month', '01'), kwargs.get('start_year', '1970'))\n    end_date = util.date_string(kwargs.get('end_day', '01'), kwargs.get('end_month', '01'), kwargs.get('end_year', '1970'))\n    params = {'SelectDateBegin': start_date, 'SelectDateEnd': end_date}\n    result = self.make_request('bus', 'get_calendar', **params)\n    if (not util.check_result(result)):\n        return (False, result.get('resultDescription', 'UNKNOWN ERROR'))\n    values = util.response_list(result, 'resultValues')\n    return (True, [emtype.CalendarItem(**a) for a in values])", "docstring": "Obtain EMT calendar for a range of dates.\n\nArgs:\nstart_day (int): Starting day of the month in format DD.\nThe number is automatically padded if it only has one digit.\nstart_month (int): Starting month number in format MM.\nThe number is automatically padded if it only has one digit.\nstart_year (int): Starting year number in format YYYY.\nend_day (int): Ending day of the month in format DD.\nThe number is automatically padded if it only has one digit.\nend_month (int): Ending month number in format MM.\nThe number is automatically padded if it only has one digit.\nend_year (int): Ending year number in format YYYY.\n\nReturns:\nStatus boolean and parsed response (list[CalendarItem]), or message\nstring in case of error.", "source": "codesearchnet"}
{"code": "def write_markdown_to_file(self, f):\n    \n    print(\"---\", file=f)\n    print(\"---\", file=f)\n    print(\"<!-- This file is machine generated: DO NOT EDIT! -->\", file=f)\n    print(\"\", file=f)\n    print(\"\n    print(\"\", file=f)\n    fullname_f = lambda name: self._members[name][0]\n    anchor_f = lambda name: _get_anchor(self._module_to_name, fullname_f(name))\n\n    for filename, library in self._filename_to_library_map:\n      sorted_names = sorted(library.mentioned, key=lambda x: (str.lower(x), x))\n      member_names = [n for n in sorted_names if n in self._members]\n      \n      \n      full_filename = self._path_prefix + filename\n      links = [\"[`%s`](%s\n               for name in member_names]\n      if links:\n        print(\"* **[%s](%s)**:\" % (library.title, full_filename[:-3]), file=f)\n        for link in links:\n          print(\"  * %s\" % link, file=f)\n        print(\"\", file=f)", "docstring": "Writes this index to file `f`.\n\nThe output is formatted as an unordered list. Each list element\ncontains the title of the library, followed by a list of symbols\nin that library hyperlinked to the corresponding anchor in that\nlibrary.\n\nArgs:\nf: The output file.", "source": "juraj-google-style"}
{"code": "def Artifacts(self, os_name=None, cpe=None, label=None):\n    \n    hit = lambda x: x[0] == x[1] or not x[0]\n    seq = [(self.os_name, os_name), (self.cpe, cpe), (self.label, label)]\n    return all(map(hit, seq))", "docstring": "Whether the conditions applies, modulo host data.\n\nArgs:\nos_name: An OS string.\ncpe: A CPE string.\nlabel: A label string.\n\nReturns:\nTrue if os_name, cpe or labels match. Empty values are ignored.", "source": "juraj-google-style"}
{"code": "def get_v2_optimizer(name, **kwargs):\n    try:\n        return _V2_OPTIMIZER_MAP[name](**kwargs)\n    except KeyError:\n        raise ValueError('Could not find requested v2 optimizer: {}\\nValid choices: {}'.format(name, list(_V2_OPTIMIZER_MAP.keys())))", "docstring": "Get the v2 optimizer requested.\n\nThis is only necessary until v2 are the default, as we are testing in Eager,\nand Eager + v1 optimizers fail tests. When we are in v2, the strings alone\nshould be sufficient, and this mapping can theoretically be removed.\n\nArgs:\nname: string name of Keras v2 optimizer.\n**kwargs: any kwargs to pass to the optimizer constructor.\n\nReturns:\nInitialized Keras v2 optimizer.\n\nRaises:\nValueError: if an unknown name was passed.", "source": "github-repos"}
{"code": "def add_action_to(cls, parser, action, subactions, level):\n    p = parser.add_parser(action.name, description=action.description, argument_default=argparse.SUPPRESS)\n    for arg in action.args:\n        arg.add_argument_to(p)\n    if subactions:\n        subparsers = cls._add_subparsers_required(p, dest=settings.SUBASSISTANT_N_STRING.format(level), title=cls.subactions_str, description=cls.subactions_desc)\n        for (subact, subsubacts) in sorted(subactions.items(), key=(lambda x: x[0].name)):\n            cls.add_action_to(subparsers, subact, subsubacts, (level + 1))", "docstring": "Adds given action to given parser\n\nArgs:\nparser: instance of devassistant_argparse.ArgumentParser\naction: devassistant.actions.Action subclass\nsubactions: dict with subactions - {SubA: {SubB: {}}, SubC: {}}", "source": "codesearchnet"}
{"code": "def signature_cert_chain_url(url):\n    \n    r = urlparse(url)\n    if not r.scheme.lower() == 'https':\n        warnings.warn('Certificate URL scheme is invalid.')\n        return False\n    if not r.hostname.lower() == 's3.amazonaws.com':\n        warnings.warn('Certificate URL hostname is invalid.')\n        return False\n    if not os.path.normpath(r.path).startswith('/echo.api/'):\n        warnings.warn('Certificate URL path is invalid.')\n        return False\n    if r.port and not r.port == 443:\n        warnings.warn('Certificate URL port is invalid.')\n        return False\n    return True", "docstring": "Validate URL specified by SignatureCertChainUrl.\n\nSee `validate.request` for additional info.\n\nArgs:\nurl: str. SignatureCertChainUrl header value sent by request.\n\nReturns:\nbool: True if valid, False otherwise.", "source": "juraj-google-style"}
{"code": "def import_from_xml(xml, edx_video_id, resource_fs, static_dir, external_transcripts=dict(), course_id=None):\n    if (xml.tag != 'video_asset'):\n        raise ValCannotCreateError('Invalid XML')\n    try:\n        if (not edx_video_id):\n            raise Video.DoesNotExist\n        video = Video.objects.get(edx_video_id=edx_video_id)\n        logger.info(\"edx_video_id '%s' present in course '%s' not imported because it exists in VAL.\", edx_video_id, course_id)\n        if (course_id and (video.status != EXTERNAL_VIDEO_STATUS)):\n            (course_video, __) = CourseVideo.get_or_create_with_validation(video=video, course_id=course_id)\n            image_file_name = xml.get('image', '').strip()\n            if image_file_name:\n                VideoImage.create_or_update(course_video, image_file_name)\n        return edx_video_id\n    except ValidationError as err:\n        logger.exception(err.message)\n        raise ValCannotCreateError(err.message_dict)\n    except Video.DoesNotExist:\n        pass\n    if edx_video_id:\n        data = {'edx_video_id': edx_video_id, 'client_video_id': xml.get('client_video_id'), 'duration': xml.get('duration'), 'status': 'imported', 'encoded_videos': [], 'courses': ([{course_id: xml.get('image')}] if course_id else [])}\n        for encoded_video_el in xml.iterfind('encoded_video'):\n            profile_name = encoded_video_el.get('profile')\n            try:\n                Profile.objects.get(profile_name=profile_name)\n            except Profile.DoesNotExist:\n                logger.info(\"Imported edx_video_id '%s' contains unknown profile '%s'.\", edx_video_id, profile_name)\n                continue\n            data['encoded_videos'].append({'profile': profile_name, 'url': encoded_video_el.get('url'), 'file_size': encoded_video_el.get('file_size'), 'bitrate': encoded_video_el.get('bitrate')})\n        if (not data['encoded_videos']):\n            data['status'] = EXTERNAL_VIDEO_STATUS\n            data['courses'] = []\n        edx_video_id = create_video(data)\n    else:\n        edx_video_id = create_external_video('External Video')\n    create_transcript_objects(xml, edx_video_id, resource_fs, static_dir, external_transcripts)\n    return edx_video_id", "docstring": "Imports data from a video_asset element about the given video_id.\n\nIf the edx_video_id already exists, then no changes are made. If an unknown\nprofile is referenced by an encoded video, that encoding will be ignored.\n\nArguments:\nxml (Element): An lxml video_asset element containing import data\nedx_video_id (str): val video id\nresource_fs (OSFS): Import file system.\nstatic_dir (str): The Directory to retrieve transcript file.\nexternal_transcripts (dict): A dict containing the list of names of the external transcripts.\nExample:\n{\n'en': ['The_Flash.srt', 'Harry_Potter.srt'],\n'es': ['Green_Arrow.srt']\n}\ncourse_id (str): The ID of a course to associate the video with\n\nRaises:\nValCannotCreateError: if there is an error importing the video\n\nReturns:\nedx_video_id (str): val video id.", "source": "codesearchnet"}
{"code": "def log_correction(self, event, action):\n        \n        \n        action = str(action)\n        self.history.info(action)\n\n        self._corrections.append(dict(\n            event=event.as_dict(),\n            action=action,\n        ))", "docstring": "This method should be called once we have fixed the problem associated to this event.\nIt adds a new entry in the correction history of the node.\n\nArgs:\nevent: :class:`AbinitEvent` that triggered the correction.\naction (str): Human-readable string with info on the action perfomed to solve the problem.", "source": "juraj-google-style"}
{"code": "def bottleneck_block_v1(cnn, depth, depth_bottleneck, stride):\n    \n    input_layer = cnn.top_layer\n    in_size = cnn.top_size\n    name_key = \"resnet_v1\"\n    name = name_key + str(cnn.counts[name_key])\n    cnn.counts[name_key] += 1\n\n    with tf.variable_scope(name):\n        if depth == in_size:\n            if stride == 1:\n                shortcut = input_layer\n            else:\n                shortcut = cnn.apool(\n                    1,\n                    1,\n                    stride,\n                    stride,\n                    input_layer=input_layer,\n                    num_channels_in=in_size)\n        else:\n            shortcut = cnn.conv(\n                depth,\n                1,\n                1,\n                stride,\n                stride,\n                activation=None,\n                use_batch_norm=True,\n                input_layer=input_layer,\n                num_channels_in=in_size,\n                bias=None)\n        cnn.conv(\n            depth_bottleneck,\n            1,\n            1,\n            stride,\n            stride,\n            input_layer=input_layer,\n            num_channels_in=in_size,\n            use_batch_norm=True,\n            bias=None)\n        cnn.conv(\n            depth_bottleneck,\n            3,\n            3,\n            1,\n            1,\n            mode=\"SAME_RESNET\",\n            use_batch_norm=True,\n            bias=None)\n        res = cnn.conv(\n            depth, 1, 1, 1, 1, activation=None, use_batch_norm=True, bias=None)\n        output = tf.nn.relu(shortcut + res)\n        cnn.top_layer = output\n        cnn.top_size = depth", "docstring": "Bottleneck block with identity short-cut for ResNet v1.\n\nArgs:\ncnn: the network to append bottleneck blocks.\ndepth: the number of output filters for this bottleneck block.\ndepth_bottleneck: the number of bottleneck filters for this block.\nstride: Stride used in the first layer of the bottleneck block.", "source": "juraj-google-style"}
{"code": "def _CreateRouteTripsFolder(self, parent, route, style_id=None, schedule=None):\n    \n    if not route.trips:\n      return None\n    trips = list(route.trips)\n    trips.sort(key=lambda x: x.trip_id)\n    trips_folder = self._CreateFolder(parent, 'Trips', visible=False)\n    for trip in trips:\n      if (self.date_filter and\n          not trip.service_period.IsActiveOn(self.date_filter)):\n        continue\n\n      if trip.trip_headsign:\n        description = 'Headsign: %s' % trip.trip_headsign\n      else:\n        description = None\n\n      coordinate_list = []\n      for secs, stoptime, tp in trip.GetTimeInterpolatedStops():\n        if self.altitude_per_sec > 0:\n          coordinate_list.append((stoptime.stop.stop_lon, stoptime.stop.stop_lat,\n                                  (secs - 3600 * 4) * self.altitude_per_sec))\n        else:\n          coordinate_list.append((stoptime.stop.stop_lon,\n                                  stoptime.stop.stop_lat))\n      placemark = self._CreatePlacemark(trips_folder,\n                                        trip.trip_id,\n                                        style_id=style_id,\n                                        visible=False,\n                                        description=description)\n      self._CreateLineString(placemark, coordinate_list)\n    return trips_folder", "docstring": "Create a KML Folder containing all the trips in the route.\n\nThe folder contains a placemark for each of these trips. If there are no\ntrips in the route, no folder is created and None is returned.\n\nArgs:\nparent: The parent ElementTree.Element instance.\nroute: The transitfeed.Route instance.\nstyle_id: A style id string for the placemarks or None.\n\nReturns:\nThe Folder ElementTree.Element instance or None.", "source": "juraj-google-style"}
{"code": "def service(self, block, service_name):\n    declaration = block.service_declaration(service_name)\n    if (declaration is None):\n        raise NoSuchServiceError('Service {!r} was not requested.'.format(service_name))\n    service = self._services.get(service_name)\n    if ((service is None) and (declaration == 'need')):\n        raise NoSuchServiceError('Service {!r} is not available.'.format(service_name))\n    return service", "docstring": "Return a service, or None.\n\nServices are objects implementing arbitrary other interfaces.  They are\nrequested by agreed-upon names, see [XXX TODO] for a list of possible\nservices.  The object returned depends on the service requested.\n\nXBlocks must announce their intention to request services with the\n`XBlock.needs` or `XBlock.wants` decorators.  Use `needs` if you assume\nthat the service is available, or `wants` if your code is flexible and\ncan accept a None from this method.\n\nRuntimes can override this method if they have different techniques for\nfinding and delivering services.\n\nArguments:\nblock (XBlock): this block's class will be examined for service\ndecorators.\nservice_name (str): the name of the service requested.\n\nReturns:\nAn object implementing the requested service, or None.", "source": "codesearchnet"}
{"code": "def nPr(n, r):\n    f = math.factorial\n    return int((f(n) / f((n - r))))", "docstring": "Calculates nPr.\n\nArgs:\nn (int): total number of items.\nr (int): items to permute\n\nReturns:\nnPr.", "source": "codesearchnet"}
{"code": "def _use_tables(objs):\n    from ..models.widgets import TableWidget\n    return _any(objs, (lambda obj: isinstance(obj, TableWidget)))", "docstring": "Whether a collection of Bokeh objects contains a TableWidget\n\nArgs:\nobjs (seq[Model or Document]) :\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def attach_stream(self, stream):\n    (curr_stream, count, prev) = self._allocated_streams[stream]\n    if (count == (self.model.get(u'max_node_outputs') - 1)):\n        new_stream = self.allocate_stream(curr_stream.stream_type, previous=curr_stream)\n        copy_desc = u'({} always) => {} using copy_all_a'.format(curr_stream, new_stream)\n        self.sensor_graph.add_node(copy_desc)\n        self._allocated_streams[stream] = (new_stream, 1, curr_stream)\n        if ((curr_stream.stream_type == DataStream.ConstantType) and (curr_stream in self.sensor_graph.constant_database)):\n            self.sensor_graph.add_constant(new_stream, self.sensor_graph.constant_database[curr_stream])\n        return new_stream\n    self._allocated_streams[stream] = (curr_stream, (count + 1), prev)\n    return curr_stream", "docstring": "Notify that we would like to attach a node input to this stream.\n\nThe return value from this function is the DataStream that should be attached\nto since this function may internally allocate a new SGNode that copies the\nstream if there is no space in the output list to hold another input.\n\nThis function should be called once for every node input before allocated a new\nsensor graph node that attaches to a stream that is managed by the StreamAllocator.\n\nArgs:\nstream (DataStream): The stream (originally returned from allocate_stream)\nthat we want to attach to.\n\nReturns:\nDatastream: A data stream, possible the same as stream, that should be attached\nto a node input.", "source": "codesearchnet"}
{"code": "def GetArtifactParserDependencies(rdf_artifact):\n    deps = set()\n    processors = parser.Parser.GetClassesByArtifact(rdf_artifact.name)\n    for p in processors:\n        deps.update(p.knowledgebase_dependencies)\n    return deps", "docstring": "Return the set of knowledgebase path dependencies required by the parser.\n\nArgs:\nrdf_artifact: RDF artifact object.\n\nReturns:\nA set of strings for the required kb objects e.g.\n[\"users.appdata\", \"systemroot\"]", "source": "codesearchnet"}
{"code": "def EvalGeneric(self, hashers=None):\n    if (hashers is None):\n        hashers = Fingerprinter.GENERIC_HASH_CLASSES\n    hashfuncs = [x() for x in hashers]\n    finger = Finger(hashfuncs, [Range(0, self.filelength)], {'name': 'generic'})\n    self.fingers.append(finger)\n    return True", "docstring": "Causes the entire file to be hashed by the given hash functions.\n\nThis sets up a 'finger' for fingerprinting, where the entire file\nis passed through a pre-defined (or user defined) set of hash functions.\n\nArgs:\nhashers: An iterable of hash classes (e.g. out of hashlib) which will\nbe instantiated for use. If hashers is not provided, or is\nprovided as 'None', the default hashers will get used. To\ninvoke this without hashers, provide an empty list.\n\nReturns:\nAlways True, as all files are 'generic' files.", "source": "codesearchnet"}
{"code": "def merge_entries(self, source_entry):\n        \n        \n        \n        \n        for list_attr in source_entry.attrs.values():\n            for attr in list_attr:\n                self.attrs[attr.header.attr_type_id].append(attr) \n        \n        for stream in source_entry.data_streams:\n            dest_stream = self._find_datastream(stream.name)\n            if dest_stream is not None:\n                dest_stream.add_from_datastream(stream)\n            else:\n                self.data_streams.append(stream)", "docstring": "Merge two entries.\n\nAllow the merging of two MFTEntries copying the attributes to the correct\nplace and the datastreams.\n\nArgs:\nsource_entry (:obj:`MFTEntry`) - Source entry where the data will be\ncopied from", "source": "juraj-google-style"}
{"code": "def get(self, name_or_uri):\n        \n        name_or_uri = quote(name_or_uri)\n        return self._client.get(name_or_uri)", "docstring": "Get the role by its URI or Name.\n\nArgs:\nname_or_uri:\nCan be either the Name or the URI.\n\nReturns:\ndict: Role", "source": "juraj-google-style"}
{"code": "def _MergeSameAgency(self, a_agency_id, b_agency_id):\n    a_agency_id = (a_agency_id or self.feed_merger.a_schedule.GetDefaultAgency().agency_id)\n    b_agency_id = (b_agency_id or self.feed_merger.b_schedule.GetDefaultAgency().agency_id)\n    a_agency = self.feed_merger.a_schedule.GetAgency(a_agency_id)._migrated_entity\n    b_agency = self.feed_merger.b_schedule.GetAgency(b_agency_id)._migrated_entity\n    if (a_agency != b_agency):\n        raise MergeError('agency must be the same')\n    return a_agency.agency_id", "docstring": "Merge agency ids to the corresponding agency id in the merged schedule.\n\nArgs:\na_agency_id: an agency id from the old schedule\nb_agency_id: an agency id from the new schedule\n\nReturns:\nThe agency id of the corresponding merged agency.\n\nRaises:\nMergeError: If a_agency_id and b_agency_id do not correspond to the same\nmerged agency.\nKeyError: Either aaid or baid is not a valid agency id.", "source": "codesearchnet"}
{"code": "def get_config_string_option(parser: ConfigParser,\n                             section: str,\n                             option: str,\n                             default: str = None) -> str:\n    \n    if not parser.has_section(section):\n        raise ValueError(\"config missing section: \" + section)\n    return parser.get(section, option, fallback=default)", "docstring": "Retrieves a string value from a parser.\n\nArgs:\nparser: instance of :class:`ConfigParser`\nsection: section name within config file\noption: option (variable) name within that section\ndefault: value to return if option is absent\n\nReturns:\nstring value\n\nRaises:\nValueError: if the section is absent", "source": "juraj-google-style"}
{"code": "def nrows(self, out_type=None, name=None):\n    with ops.name_scope(name, 'RaggedNRows', [self]):\n        if out_type is None:\n            return self._row_partition.nrows()\n        else:\n            return math_ops.cast(self._row_partition.nrows(), dtype=out_type)", "docstring": "Returns the number of rows in this ragged tensor.\n\nI.e., the size of the outermost dimension of the tensor.\n\nArgs:\nout_type: `dtype` for the returned tensor.  Defaults to\n`self.row_splits.dtype`.\nname: A name prefix for the returned tensor (optional).\n\nReturns:\nA scalar `Tensor` with dtype `out_type`.\n\n#### Example:\n\n>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])\n>>> print(rt.nrows())  # rt has 5 rows.\ntf.Tensor(5, shape=(), dtype=int64)", "source": "github-repos"}
{"code": "def _get_command_and_argv(argv):\n    \n    \n    command_name = argv[0]\n    if not command_name:\n        argv = argv[1:]\n    elif command_name == settings.command:\n        argv.remove(command_name)\n    return command_name, argv", "docstring": "Extract the command name and arguments to pass to docopt.\n\nArgs:\nargv: The argument list being used to run the command.\n\nReturns:\nA tuple containing the name of the command and the arguments to pass\nto docopt.", "source": "juraj-google-style"}
{"code": "def __init__(self, loop_var, loop_len, pfor_ops, fallback_to_while_loop, all_indices=None, all_indices_partitioned=False, pfor_config=None, warn=False):\n    assert isinstance(loop_var, tensor_lib.Tensor)\n    assert loop_var.op.type == 'PlaceholderWithDefault'\n    self._loop_var = loop_var\n    loop_len_value = tensor_util.constant_value(loop_len)\n    if loop_len_value is not None:\n        loop_len = loop_len_value\n        self._loop_len_vector = ops.convert_to_tensor([loop_len])\n    else:\n        self._loop_len_vector = array_ops.reshape(loop_len, [1])\n    self._all_indices_partitioned = all_indices_partitioned\n    if all_indices_partitioned:\n        assert all_indices is not None\n    if all_indices is None:\n        self.all_indices = math_ops.range(loop_len, dtype=dtypes.int32, name='all_indices')\n    else:\n        self.all_indices = all_indices\n    self._conversion_map = object_identity.ObjectIdentityDictionary()\n    self._conversion_map[loop_var] = wrap(self.all_indices, True)\n    self._pfor_ops = set(pfor_ops)\n    self._pfor_op_ids = set((x._id for x in pfor_ops))\n    self._fallback_to_while_loop = fallback_to_while_loop\n    self._warn = warn\n    self._pfor_config = pfor_config", "docstring": "Creates an object to rewrite a parallel-for loop.\n\nArgs:\nloop_var: Tensor output of a Placeholder operation. The value should\nbe an int32 scalar representing the loop iteration number.\nloop_len: A scalar or scalar Tensor representing the number of iterations\nthe loop is run for.\npfor_ops: List of all ops inside the loop body.\nfallback_to_while_loop: If True, on failure to vectorize an op, a while\nloop is used to sequentially execute that op.\nall_indices: If not None, an int32 vector with size `loop_len`\nrepresenting the iteration ids that are still active. These values\nshould be unique and sorted. However they may not be contiguous. This is\ntypically the case when inside a control flow construct which has\npartitioned the indices of the iterations that are being converted.\nall_indices_partitioned: If True, this object is being constructed from a\ncontrol flow construct where not all the pfor iterations are guaranteed\nto be active.\npfor_config: PForConfig object used while constructing the loop body.\nwarn: Whether or not to warn on while loop conversions.", "source": "github-repos"}
{"code": "def vstack(xs):\n    if any_symbolic_tensors((xs,)):\n        return Vstack().symbolic_call(xs)\n    return backend.numpy.vstack(xs)", "docstring": "Stack tensors in sequence vertically (row wise).\n\nArgs:\nxs: Sequence of tensors.\n\nReturns:\nTensor formed by stacking the given tensors.", "source": "github-repos"}
{"code": "def create_chapter_from_string(self, html_string, url=None, title=None):\n    clean_html_string = self.clean_function(html_string)\n    clean_xhtml_string = clean.html_to_xhtml(clean_html_string)\n    if title:\n        pass\n    else:\n        try:\n            root = BeautifulSoup(html_string, 'html.parser')\n            title_node = root.title\n            if (title_node is not None):\n                title = unicode(title_node.string)\n            else:\n                raise ValueError\n        except (IndexError, ValueError):\n            title = 'Ebook Chapter'\n    return Chapter(clean_xhtml_string, title, url)", "docstring": "Creates a Chapter object from a string. Sanitizes the\nstring using the clean_function method, and saves\nit as the content of the created chapter.\n\nArgs:\nhtml_string (string): The html or xhtml content of the created\nChapter\nurl (Option[string]): A url to infer the title of the chapter from\ntitle (Option[string]): The title of the created Chapter. By\ndefault, this is None, in which case the title will try to be\ninferred from the webpage at the url.\n\nReturns:\nChapter: A chapter object whose content is the given string\nand whose title is that provided or inferred from the url", "source": "codesearchnet"}
{"code": "def _ExtractInterfaceMetadata(self, metadata):\n    interfaces = []\n    for network_interface in metadata:\n        mac_address = network_interface.get('mac')\n        interface = self.network_utils.GetNetworkInterface(mac_address)\n        ip_addresses = []\n        if interface:\n            ip_addresses.extend(network_interface.get('forwardedIps', []))\n            if self.ip_aliases:\n                ip_addresses.extend(network_interface.get('ipAliases', []))\n            if self.target_instance_ips:\n                ip_addresses.extend(network_interface.get('targetInstanceIps', []))\n            interfaces.append(NetworkDaemon.NetworkInterface(interface, ip_addresses, network_interface.get('ip', [])))\n        else:\n            message = 'Network interface not found for MAC address: %s.'\n            self.logger.warning(message, mac_address)\n    return interfaces", "docstring": "Extracts network interface metadata.\n\nArgs:\nmetadata: dict, the metadata response with the new network interfaces.\n\nReturns:\nlist, a list of NetworkInterface objects.", "source": "codesearchnet"}
{"code": "def create(self, key, value):\n        \n        key = quote(key, safe='~')\n        headers = {'content-type': 'application/octet-stream'}\n        url = '/internal/playbooks/keyValue/{}'.format(key)\n        r = self.tcex.session.put(url, data=value, headers=headers)\n        return r.content", "docstring": "Create key/value pair in remote KV store.\n\nArgs:\nkey (string): The key to create in remote KV store.\nvalue (any): The value to store in remote KV store.\n\nReturns:\n(string): The response from the API call.", "source": "juraj-google-style"}
{"code": "def _get_array(self, handle: int) -> np.ndarray:\n        \n        tup = self._arrays[handle]\n        assert tup is not None\n        c_arr, shape = tup\n        with warnings.catch_warnings():\n            warnings.simplefilter('ignore', RuntimeWarning)\n            result = np.ctypeslib.as_array(c_arr)\n        result.shape = shape\n        return result", "docstring": "Returns the array with the given handle.\n\nArgs:\nhandle: The handle of the array whose memory should be freed. This\nhandle must come from the _create_array method.\n\nReturns:\nThe numpy ndarray with the handle given from _create_array.", "source": "juraj-google-style"}
{"code": "class GraniteMoeSharedMoE(nn.Module):\n\n    def __init__(self, config: GraniteMoeSharedConfig):\n        super(GraniteMoeSharedMoE, self).__init__()\n        self.input_size = config.hidden_size\n        self.hidden_size = config.intermediate_size\n        self.activation = ACT2FN[config.hidden_act]\n        self.input_linear = GraniteMoeSharedParallelExperts(config.num_local_experts, self.input_size, self.hidden_size * 2)\n        self.output_linear = GraniteMoeSharedParallelExperts(config.num_local_experts, self.hidden_size, self.input_size)\n        self.router = GraniteMoeSharedTopKGating(input_size=self.input_size, num_experts=config.num_local_experts, top_k=config.num_experts_per_tok)\n\n    def forward(self, layer_input):\n        \n        bsz, length, emb_size = layer_input.size()\n        layer_input = layer_input.reshape(-1, emb_size)\n        _, batch_index, batch_gates, expert_size, router_logits = self.router(layer_input)\n        expert_inputs = layer_input[batch_index]\n        hidden_states = self.input_linear(expert_inputs, expert_size)\n        chunked_hidden_states = hidden_states.chunk(2, dim=-1)\n        hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1]\n        expert_outputs = self.output_linear(hidden_states, expert_size)\n        expert_outputs = expert_outputs * batch_gates[:, None]\n        zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device)\n        layer_output = zeros.index_add(0, batch_index, expert_outputs)\n        layer_output = layer_output.view(bsz, length, self.input_size)\n        return (layer_output, router_logits)", "docstring": "A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.\n\nArgs:\nconfig:\nConfiguration object with model hyperparameters.", "source": "github-repos"}
{"code": "def to_genai_part(part_content: content_api.ProcessorPartTypes, mimetype: str | None=None) -> genai_types.Part:\n    if isinstance(part_content, str):\n        return genai_types.Part(text=part_content)\n    elif isinstance(part_content, bytes):\n        if mimetype is None:\n            raise ValueError('Mimetype must be specified for bytes to_genai_part conversion.')\n        p = ProcessorPart(part_content, mimetype=mimetype)\n        return p.part\n    elif isinstance(part_content, Image.Image):\n        p = ProcessorPart(part_content)\n        return p.part\n    elif isinstance(part_content, ProcessorPart):\n        return part_content.part\n    elif isinstance(part_content, genai_types.Part):\n        return part_content\n    else:\n        raise ValueError(f'Unsupported type for to_genai_part: {type(part_content)}')", "docstring": "Converts object of type `ProcessorPartTypes` to a Genai Part.\n\nArgs:\npart_content: The content to convert.\nmimetype: (Optional) The mimetype of the content. Must be specified if\npart_content is bytes.\n\nReturns:\nThe Genai Part representation of the content.", "source": "github-repos"}
{"code": "def set(self, name: str, value: Any) -> None:\n    self.agent.set(name, value)", "docstring": "Stores a knowledge item in the agent knowledge base.\n\nArgs:\nname (str): name of the item\nvalue (Any): value of the item", "source": "codesearchnet"}
{"code": "def update(self, **kwargs):\n    to_remove = []\n    for key, value in kwargs.items():\n        if hasattr(self, key):\n            setattr(self, key, value)\n            to_remove.append(key)\n    unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove}\n    return unused_kwargs", "docstring": "Updates attributes of this class instance with attributes from `kwargs` if they match existing attributes,\nreturning all the unused kwargs.\n\nArgs:\nkwargs (`Dict[str, Any]`):\nDictionary of attributes to tentatively update this class.\n\nReturns:\n`Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance.", "source": "github-repos"}
{"code": "def update_uid_state(self, id_or_uri, refresh_state_data):\n        \n        uri = self._client.build_uri(id_or_uri) + \"/uidState\"\n        return self._client.update(refresh_state_data, uri)", "docstring": "Sets the unit identification (UID) light state of the specified power delivery device. The device must be an\nHP iPDU component with a locator light (HP Intelligent Load Segment, HP AC Module, HP Intelligent Outlet Bar,\nor HP Intelligent Outlet)\n\nArgs:\nid_or_uri:\nCan be either the power device id or the uri\nrefresh_state_data:\nPower device refresh request\n\nReturns:\nstr: The UID state", "source": "juraj-google-style"}
{"code": "def get_text(obj) -> Tuple[int, str]:\n    \n    from bioc.bioc import BioCDocument, BioCPassage, BioCSentence\n\n    if isinstance(obj, BioCSentence):\n        return obj.offset, obj.text\n    if isinstance(obj, BioCPassage):\n        if obj.text:\n            return obj.offset, obj.text\n        text = ''\n        for sentence in obj.sentences:\n            try:\n                text = pad_char(text, sentence.offset - obj.offset, ' ')\n                assert sentence.text, f'BioC sentence has no text: {sentence.offset}'\n                text += sentence.text\n            except ValueError:\n                raise ValueError(f'Overlapping sentences {sentence.offset}')\n        return obj.offset, text\n    if isinstance(obj, BioCDocument):\n        text = ''\n        for passage in obj.passages:\n            try:\n                text = pad_char(text, passage.offset)\n                text += get_text(passage)[1]\n            except ValueError:\n                raise ValueError(f'{obj.id}: overlapping passages {passage.offset}')\n        return 0, text\n    raise TypeError(f'Object of type {obj.__class__.__name__} must be BioCCollection, '\n                    f'BioCDocument, BioCPassage, or BioCSentence')", "docstring": "Return text with its offset in the document\n\nArgs:\nobj: BioCDocument, BioCPassage, or BioCSentence\n\nReturns:\noffset, text", "source": "juraj-google-style"}
{"code": "def run_query_series(queries, conn):\n    results = []\n    for item in queries:\n        qry = item\n        kwargs = {}\n        if isinstance(item, tuple):\n            qry = item[0]\n            kwargs = item[1]\n        result = conn.update_query(qry, **kwargs)\n        results.append(result)\n    return results", "docstring": "Iterates through a list of queries and runs them through the connection\n\nArgs:\n-----\nqueries: list of strings or tuples containing (query_string, kwargs)\nconn: the triplestore connection to use", "source": "codesearchnet"}
{"code": "def step(self, action, blocking=True):\n    promise = self.call('step', action)\n    if blocking:\n        return promise()\n    else:\n        return promise", "docstring": "Step the environment.\n\nArgs:\naction: The action to apply to the environment.\nblocking: Whether to wait for the result.\n\nReturns:\nTransition tuple when blocking, otherwise callable that returns the\ntransition tuple.", "source": "codesearchnet"}
{"code": "def help(route):\n  r\n  help_text = getRouteHelp(route.split('/') if route else [])\n\n  if help_text is None:\n    err('Can\\'t help :(')\n\n  else:\n    print '\\n%s' % help_text", "docstring": "r\"\"\"Displays help for the given route.\n\nArgs:\nroute (str): A route that resolves a member.", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor]=None) -> torch.FloatTensor:\n    if self.summary_type == 'last':\n        output = hidden_states[:, -1]\n    elif self.summary_type == 'first':\n        output = hidden_states[:, 0]\n    elif self.summary_type == 'mean':\n        output = hidden_states.mean(dim=1)\n    elif self.summary_type == 'cls_index':\n        if cls_index is None:\n            cls_index = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long)\n        else:\n            cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)\n            cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))\n        output = hidden_states.gather(-2, cls_index).squeeze(-2)\n    elif self.summary_type == 'attn':\n        raise NotImplementedError\n    output = self.first_dropout(output)\n    output = self.summary(output)\n    output = self.activation(output)\n    output = self.last_dropout(output)\n    return output", "docstring": "Compute a single vector summary of a sequence hidden states.\n\nArgs:\nhidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`):\nThe hidden states of the last layer.\ncls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*):\nUsed if `summary_type == \"cls_index\"` and takes the last token of the sequence as classification token.\n\nReturns:\n`torch.FloatTensor`: The summary of the sequence hidden states.", "source": "github-repos"}
{"code": "def __getitem__(self, key):\n        \n        if key in self.patterns:\n            return ScreenPattern(self.patterns[key], self.field_registry)\n        for shorter in range(key, 0, -1):\n            if shorter in self.min_patterns:\n                pattern = self.min_patterns[shorter]\n\n                \n                prefix = [''] * (key - shorter / 2)\n                return ScreenPattern(prefix + pattern, self.field_registry)\n        return ScreenPattern([], self.field_registry)", "docstring": "Retrieve the best pattern for a given size.\n\nThe algorithm is:\n- If a pattern is registered for the size, use it\n- Otherwise, find the longest registered pattern shorter thant size, add\nsome blank lines before, and return it\n- If no shorter pattern exist, return a blank pattern.\n\nArgs:\nkey (int): the target size\n\nReturns:\nScreenPattern: the best pattern available for that size", "source": "juraj-google-style"}
{"code": "def continue_abort(self,\n                     root_pipeline_key,\n                     cursor=None,\n                     max_to_notify=_MAX_ABORTS_TO_BEGIN):\n    \n    if not isinstance(root_pipeline_key, db.Key):\n      root_pipeline_key = db.Key(root_pipeline_key)\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    query = (\n        _PipelineRecord.all(cursor=cursor)\n        .filter('root_pipeline =', root_pipeline_key))\n    results = query.fetch(max_to_notify)\n\n    task_list = []\n    for pipeline_record in results:\n      if pipeline_record.status not in (\n          _PipelineRecord.RUN, _PipelineRecord.WAITING):\n        continue\n\n      pipeline_key = pipeline_record.key()\n      task_list.append(taskqueue.Task(\n          name='%s-%s-abort' % (self.task_name, pipeline_key.name()),\n          url=self.abort_handler_path,\n          params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.ABORT),\n          headers={'X-Ae-Pipeline-Key': pipeline_key}))\n\n    \n    if len(results) == max_to_notify:\n      the_match = re.match('(.*)-([0-9]+)', self.task_name)\n      if the_match:\n        prefix = the_match.group(1)\n        end = int(the_match.group(2)) + 1\n      else:\n        prefix = self.task_name\n        end = 0\n      task_list.append(taskqueue.Task(\n          name='%s-%d' % (prefix, end),\n          url=self.fanout_abort_handler_path,\n          params=dict(root_pipeline_key=root_pipeline_key,\n                      cursor=query.cursor())))\n\n    if task_list:\n      try:\n        taskqueue.Queue(self.queue_name).add(task_list)\n      except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):\n        pass", "docstring": "Sends the abort signal to all children for a root pipeline.\n\nArgs:\nroot_pipeline_key: db.Key of the root pipeline to abort.\ncursor: The query cursor for enumerating _PipelineRecords when inserting\ntasks to cause child pipelines to terminate.\nmax_to_notify: Used for testing.", "source": "juraj-google-style"}
{"code": "def iter_processed_text(self, file, encoding=None, base_url=None):\n    for (text, is_link) in self.iter_text(file, encoding):\n        if (is_link and base_url):\n            new_link = urljoin_safe(base_url, text, allow_fragments=False)\n            if new_link:\n                (yield (new_link, is_link))\n            else:\n                (yield (new_link, False))\n        else:\n            (yield (text, is_link))", "docstring": "Return the file text and processed absolute links.\n\nArgs:\nfile: A file object containing the document.\nencoding (str): The encoding of the document.\nbase_url (str): The URL at which the document is located.\n\nReturns:\niterator: Each item is a tuple:\n\n1. str: The text\n2. bool: Whether the text a link", "source": "codesearchnet"}
{"code": "def process_filter_directive(filter_operation_info, location, context):\n    (op_name, operator_params) = _get_filter_op_name_and_values(filter_operation_info.directive)\n    non_comparison_filters = {u'name_or_alias': _process_name_or_alias_filter_directive, u'between': _process_between_filter_directive, u'in_collection': _process_in_collection_filter_directive, u'has_substring': _process_has_substring_filter_directive, u'contains': _process_contains_filter_directive, u'intersects': _process_intersects_filter_directive, u'has_edge_degree': _process_has_edge_degree_filter_directive}\n    all_recognized_filters = (frozenset(non_comparison_filters.keys()) | COMPARISON_OPERATORS)\n    if (all_recognized_filters != ALL_OPERATORS):\n        unrecognized_filters = (ALL_OPERATORS - all_recognized_filters)\n        raise AssertionError(u'Some filtering operators are defined but do not have an associated processing function. This is a bug: {}'.format(unrecognized_filters))\n    if (op_name in COMPARISON_OPERATORS):\n        process_func = partial(_process_comparison_filter_directive, operator=op_name)\n    else:\n        process_func = non_comparison_filters.get(op_name, None)\n    if (process_func is None):\n        raise GraphQLCompilationError(u'Unknown op_name for filter directive: {}'.format(op_name))\n    if ((filter_operation_info.field_name is None) and (op_name not in INNER_SCOPE_VERTEX_FIELD_OPERATORS)):\n        raise GraphQLCompilationError(u'The filter with op_name \"{}\" must be applied on a field. It may not be applied on a type coercion.'.format(op_name))\n    fields = ((filter_operation_info.field_name,) if (op_name != 'name_or_alias') else ('name', 'alias'))\n    context['metadata'].record_filter_info(location, FilterInfo(fields=fields, op_name=op_name, args=tuple(operator_params)))\n    return process_func(filter_operation_info, location, context, operator_params)", "docstring": "Return a Filter basic block that corresponds to the filter operation in the directive.\n\nArgs:\nfilter_operation_info: FilterOperationInfo object, containing the directive and field info\nof the field where the filter is to be applied.\nlocation: Location where this filter is used.\ncontext: dict, various per-compilation data (e.g. declared tags, whether the current block\nis optional, etc.). May be mutated in-place in this function!\n\nReturns:\na Filter basic block that performs the requested filtering operation", "source": "codesearchnet"}
{"code": "def write(self,\n              x: int,\n              y: int,\n              text: str,\n              transposed_text: 'Optional[str]' = None):\n        \n        entry = self.entries.get((x, y), _DiagramText('', ''))\n        self.entries[(x, y)] = _DiagramText(\n            entry.text + text,\n            entry.transposed_text + (transposed_text if transposed_text\n                                     else text))", "docstring": "Adds text to the given location.\n\nArgs:\nx: The column in which to write the text.\ny: The row in which to write the text.\ntext: The text to write at location (x, y).\ntransposed_text: Optional text to write instead, if the text\ndiagram is transposed.", "source": "juraj-google-style"}
{"code": "def map_concepts_to_indicators(self, n: int=1, min_temporal_res: Optional[str]=None):\n    for node in self.nodes(data=True):\n        query_parts = ['select Indicator from concept_to_indicator_mapping', f\"where `Concept` like '{node[0]}'\"]\n        query = '  '.join(query_parts)\n        results = engine.execute(query)\n        if (min_temporal_res is not None):\n            if (min_temporal_res not in ['month']):\n                raise ValueError(\"min_temporal_res must be 'month'\")\n            vars_with_required_temporal_resolution = [r[0] for r in engine.execute(f'select distinct `Variable` from indicator where `{min_temporal_res.capitalize()}` is not null')]\n            results = [r for r in results if (r[0] in vars_with_required_temporal_resolution)]\n        node[1]['indicators'] = {x: Indicator(x, 'MITRE12') for x in [r[0] for r in take(n, results)]}", "docstring": "Map each concept node in the AnalysisGraph instance to one or more\ntangible quantities, known as 'indicators'.\n\nArgs:\nn: Number of matches to keep\nmin_temporal_res: Minimum temporal resolution that the indicators\nmust have data for.", "source": "codesearchnet"}
{"code": "def alltoall(self, x, mesh_axis, split_axis, concat_axis):\n    \n    x = x.to_laid_out_tensor()\n    t = x.one_slice\n    group_assignment = self._create_group_assignment([mesh_axis])\n    dtype = t.dtype\n    if dtype == tf.float32:\n      \n      \n      \n      t = tf.to_bfloat16(t)\n    t = tpu_ops.all_to_all(\n        t,\n        concat_dimension=concat_axis,\n        split_dimension=split_axis,\n        split_count=len(group_assignment[0]),\n        group_assignment=group_assignment)\n    t = tf.cast(t, dtype)\n    x = self.LaidOutTensor([t])\n    return x", "docstring": "Grouped alltoall (like MPI alltoall with splitting and concatenation).\n\nArgs:\nx: a LaidOutTensor\nmesh_axis: an integer the mesh axis along which to group\nsplit_axis: an integer (the Tensor axis along which to split)\nconcat_axis: an integer (the Tensor axis along which to concatenate)\nReturns:\na LaidOutTensor", "source": "juraj-google-style"}
{"code": "def precompute_edge_matrices(adjacency, hparams):\n    (batch_size, num_nodes, _, edge_dim) = common_layers.shape_list(adjacency)\n    with tf.variable_scope('edge_network'):\n        x = tf.reshape(adjacency, [((batch_size * num_nodes) * num_nodes), edge_dim], name='adj_reshape_in')\n        for ip_layer in range(hparams.edge_network_layers):\n            name = ('edge_network_layer_%d' % ip_layer)\n            x = tf.layers.dense(common_layers.layer_preprocess(x, hparams), hparams.edge_network_hidden_size, activation=tf.nn.relu, name=name)\n        x = tf.layers.dense(common_layers.layer_preprocess(x, hparams), (hparams.hidden_size ** 2), activation=None, name='edge_network_output')\n    edge_matrices_flat = tf.reshape(x, [batch_size, num_nodes, num_nodes, hparams.hidden_size, hparams.hidden_size])\n    edge_matrices = tf.reshape(tf.transpose(edge_matrices_flat, [0, 1, 3, 2, 4]), [(- 1), (num_nodes * hparams.hidden_size), (num_nodes * hparams.hidden_size)], name='edge_matrices')\n    return edge_matrices", "docstring": "Precompute the a_in and a_out tensors.\n\n(we don't want to add to the graph everytime _fprop is called)\nArgs:\nadjacency: placeholder of real valued vectors of shape [B, L, L, E]\nhparams: HParams object\nReturns:\nedge_matrices: [batch, L * D, L * D] the dense matrix for message passing\nviewed as a block matrix (L,L) blocks of size (D,D). Each plot is a function\nof the edge vector of the adjacency matrix at that spot.", "source": "codesearchnet"}
{"code": "def find_nearest_color_index(r, g, b, color_table=None, method='euclid'):\n    \n    shortest_distance = 257*257*3  \n    index = 0                      \n    if not color_table:\n        if not color_table8:\n            build_color_tables()\n        color_table = color_table8\n\n    for i, values in enumerate(color_table):\n        rd = r - values[0]\n        gd = g - values[1]\n        bd = b - values[2]\n\n        this_distance = (rd * rd) + (gd * gd) + (bd * bd)\n\n        if this_distance < shortest_distance:  \n            index = i\n            shortest_distance = this_distance\n\n    return index", "docstring": "Given three integers representing R, G, and B,\nreturn the nearest color index.\n\nArguments:\nr:    int - of range 0…255\ng:    int - of range 0…255\nb:    int - of range 0…255\n\nReturns:\nint, None: index, or None on error.", "source": "juraj-google-style"}
{"code": "def be2le_state_by_state(tpm):\n    \n    le = np.empty(tpm.shape)\n    N = tpm.shape[0]\n    n = int(log2(N))\n    for i in range(N):\n        le[i, :] = tpm[be2le(i, n), :]\n    return le", "docstring": "Convert a state-by-state TPM from big-endian to little-endian or vice\nversa.\n\nArgs:\ntpm (np.ndarray): A state-by-state TPM.\n\nReturns:\nnp.ndarray: The state-by-state TPM in the other indexing format.\n\nExample:\n>>> tpm = np.arange(16).reshape([4, 4])\n>>> be2le_state_by_state(tpm)\narray([[ 0.,  1.,  2.,  3.],\n[ 8.,  9., 10., 11.],\n[ 4.,  5.,  6.,  7.],\n[12., 13., 14., 15.]])", "source": "juraj-google-style"}
{"code": "def cluster_spec(self):\n    tf_config = _load_tf_config()\n    if 'cluster' not in tf_config:\n        return ClusterSpec({})\n    return ClusterSpec(tf_config['cluster'])", "docstring": "Returns a ClusterSpec based on the TF_CONFIG environment variable.\n\nReturns:\nA ClusterSpec with information from the TF_CONFIG environment variable.", "source": "github-repos"}
{"code": "def _InternalUnpackAny(msg):\n    type_url = msg.type_url\n    db = symbol_database.Default()\n    if (not type_url):\n        return None\n    type_name = type_url.split('/')[(- 1)]\n    descriptor = db.pool.FindMessageTypeByName(type_name)\n    if (descriptor is None):\n        return None\n    message_class = db.GetPrototype(descriptor)\n    message = message_class()\n    message.ParseFromString(msg.value)\n    return message", "docstring": "Unpacks Any message and returns the unpacked message.\n\nThis internal method is differnt from public Any Unpack method which takes\nthe target message as argument. _InternalUnpackAny method does not have\ntarget message type and need to find the message type in descriptor pool.\n\nArgs:\nmsg: An Any message to be unpacked.\n\nReturns:\nThe unpacked message.", "source": "codesearchnet"}
{"code": "def v_cross(u, v):\n    \n    \n\n    i = '(({u1})*({v2}) - ({u2})*({v1}))'.format(u1=u[1], u2=u[2], v1=v[1], v2=v[2])\n    j = '(({u2})*({v0}) - ({u0})*({v2}))'.format(u0=u[0], u2=u[2], v0=v[0], v2=v[2])\n    k = '(({u0})*({v1}) - ({u1})*({v0}))'.format(u0=u[0], u1=u[1], v0=v[0], v1=v[1])\n    return [i, j, k]", "docstring": "muparser cross product function\n\nCompute the cross product of two 3x1 vectors\n\nArgs:\nu (list or tuple of 3 strings): first vector\nv (list or tuple of 3 strings): second vector\nReturns:\nA list containing a muparser string of the cross product", "source": "juraj-google-style"}
{"code": "def _replacer(self, match: re.Match[str], is_verbatim: bool, is_global: bool) -> str:\n    symbol_name = match.group(0)\n    if symbol_name in self._local_symbol_replacement_cache:\n        return self._local_symbol_replacement_cache[symbol_name]\n    if symbol_name in self._global_symbol_replacement_cache:\n        return self._global_symbol_replacement_cache[symbol_name]\n    if is_verbatim:\n        declaration_replacement = symbol_name\n        reference_replacement = symbol_name\n    else:\n        capture_name = self._generate_unique_name(symbol_name)\n        capture_pattern = '[^ ]+'\n        maybe_global_flag = '$' if is_global else ''\n        declaration_replacement = f'[[{maybe_global_flag}{capture_name}:{capture_pattern}]]'\n        reference_replacement = f'[[{maybe_global_flag}{capture_name}]]'\n    if is_global:\n        self._global_symbol_replacement_cache[symbol_name] = reference_replacement\n    else:\n        self._local_symbol_replacement_cache[symbol_name] = reference_replacement\n    return declaration_replacement", "docstring": "A symbol-name replacement function for use in `re.sub`.\n\nArgs:\nmatch: The match object produced by `self._SYMBOL_NAME_REGEX`.\nis_verbatim: Whether the newly matched symbol appears in a \"CHECK-LABEL\"\ndirective, in which case it should be checked verbatim (not replaced\nwith a regex capture).\nis_global: Whether the newly matched symbol appears in a declaration at\nglobal scope, i.e. whether it's a function name. If so, it should be\nremembered across function boundaries.\n\nReturns:\nThe replacement string for the symbol name.", "source": "github-repos"}
{"code": "def from_string(contents):\n        \n        if contents[-1] != \"\\n\":\n            contents += \"\\n\"\n        white_space = r\"[ \\t\\r\\f\\v]\"\n        natoms_line = white_space + r\"*\\d+\" + white_space + r\"*\\n\"\n        comment_line = r\"[^\\n]*\\n\"\n        coord_lines = r\"(\\s*\\w+\\s+[0-9\\-\\+\\.eEdD]+\\s+[0-9\\-\\+\\.eEdD]+\\s+[0-9\\-\\+\\.eEdD]+\\s*\\n)+\"\n        frame_pattern_text = natoms_line + comment_line + coord_lines\n        pat = re.compile(frame_pattern_text, re.MULTILINE)\n        mols = []\n        for xyz_match in pat.finditer(contents):\n            xyz_text = xyz_match.group(0)\n            mols.append(XYZ._from_frame_string(xyz_text))\n        return XYZ(mols)", "docstring": "Creates XYZ object from a string.\n\nArgs:\ncontents: String representing an XYZ file.\n\nReturns:\nXYZ object", "source": "juraj-google-style"}
{"code": "def visualize_instance_html(self, exp, label, div_name, exp_object_name, show_table=True, show_all=False):\n    if (not show_table):\n        return ''\n    weights = ([0] * len(self.feature_names))\n    for x in exp:\n        weights[x[0]] = x[1]\n    out_list = list(zip(self.exp_feature_names, self.feature_values, weights))\n    if (not show_all):\n        out_list = [out_list[x[0]] for x in exp]\n    ret = (u'\\n            %s.show_raw_tabular(%s, %d, %s);\\n        ' % (exp_object_name, json.dumps(out_list, ensure_ascii=False), label, div_name))\n    return ret", "docstring": "Shows the current example in a table format.\n\nArgs:\nexp: list of tuples [(id, weight), (id,weight)]\nlabel: label id (integer)\ndiv_name: name of div object to be used for rendering(in js)\nexp_object_name: name of js explanation object\nshow_table: if False, don't show table visualization.\nshow_all: if True, show zero-weighted features in the table.", "source": "codesearchnet"}
{"code": "def edit_distance(x, y):\n    \n\n    ret = layers_distance(x.layers, y.layers)\n    ret += Constant.KERNEL_LAMBDA * skip_connections_distance(\n        x.skip_connections, y.skip_connections\n    )\n    return ret", "docstring": "The distance between two neural networks.\nArgs:\nx: An instance of NetworkDescriptor.\ny: An instance of NetworkDescriptor\nReturns:\nThe edit-distance between x and y.", "source": "juraj-google-style"}
{"code": "def download(url):\n    \n    headers = {\"User-Agent\": USER_AGENT}\n    resp = requests.get(\n        url,\n        timeout=REQUEST_TIMEOUT,\n        headers=headers,\n        allow_redirects=True,\n        verify=False,\n    )\n\n    def decode(st, alt_encoding=None):\n        encodings = ['ascii', 'utf-8', 'iso-8859-1', 'iso-8859-15']\n\n        if alt_encoding:\n            if isinstance(alt_encoding, basestring):\n                encodings.append(alt_encoding)\n            else:\n                encodings.extend(alt_encoding)\n\n        for encoding in encodings:\n            try:\n                return st.encode(encoding).decode(\"utf-8\")\n            except UnicodeEncodeError, UnicodeDecodeError:\n                pass\n\n        raise UnicodeError('Could not find encoding.')\n\n    return decode(resp.text, resp.encoding)", "docstring": "Download `url` and return it as utf-8 encoded text.\n\nArgs:\nurl (str): What should be downloaded?\n\nReturns:\nstr: Content of the page.", "source": "juraj-google-style"}
{"code": "def __init__(self, node_def, op, message, error_code):\n        \n        super(OpError, self).__init__()\n        self._message = message\n        self._node_def = node_def\n        self._op = op\n        self._error_code = error_code", "docstring": "Creates a new `OpError` indicating that a particular op failed.\n\nArgs:\nnode_def: The `node_def_pb2.NodeDef` proto representing the op that\nfailed, if known; otherwise None.\nop: The `ops.Operation` that failed, if known; otherwise None.\nmessage: The message string describing the failure.\nerror_code: The `error_codes.Code` describing the error.", "source": "juraj-google-style"}
{"code": "def apply(self, func, **kwargs):\n    oid = self.oid\n    self.call_queue.append((func, kwargs))\n\n    def call_queue_closure(oid_obj, call_queues):\n        for (func, kwargs) in call_queues:\n            if isinstance(func, ray.ObjectID):\n                func = ray.get(func)\n            if isinstance(kwargs, ray.ObjectID):\n                kwargs = ray.get(kwargs)\n            oid_obj = func(oid_obj, **kwargs)\n        return oid_obj\n    oid = deploy_ray_func.remote(call_queue_closure, oid, kwargs={'call_queues': self.call_queue})\n    self.call_queue = []\n    return PyarrowOnRayFramePartition(oid)", "docstring": "Apply a function to the object stored in this partition.\n\nNote: It does not matter if func is callable or an ObjectID. Ray will\nhandle it correctly either way. The keyword arguments are sent as a\ndictionary.\n\nArgs:\nfunc: The function to apply.\n\nReturns:\nA RayRemotePartition object.", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context):\n    \n    super(EncryptedStreamFileSystem, self).__init__(resolver_context)\n    self._encryption_method = None", "docstring": "Initializes an encrypted file system.\n\nArgs:\nresolver_context (Context): a resolver context.", "source": "juraj-google-style"}
{"code": "def raisefrom(exc_type, message, exc):\n    \n    \n    if sys.version_info[:2] >= (3, 2):\n        six.raise_from(exc_type(message), exc)\n    else:\n        six.reraise(exc_type, '%s - %s' % (message, exc), sys.exc_info()[2])", "docstring": "Call Python 3 raise from or emulate it for Python 2\n\nArgs:\nexc_type (Any): Type of Exception\nmessage (str): Error message to display\nexc (BaseException): original exception\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def watch_printer(watch, value):\n    \n\n    print(\"({: 8} s) {}: {}\".format(value.raw_time, watch, value.value))", "docstring": "Print a watched value.\n\nArgs:\nwatch (DataStream): The stream that was watched\nvalue (IOTileReading): The value to was seen", "source": "juraj-google-style"}
{"code": "def list_parking(self, **kwargs):\n        \n        \n        url_args = {'lang': util.language_code(kwargs.get('lang'))}\n\n        \n        result = self.make_request('list_parking', url_args)\n\n        if not util.check_result(result):\n            return False, result.get('message', 'UNKNOWN ERROR')\n\n        \n        values = util.response_list(result, 'Data')\n        return True, [emtype.Parking(**a) for a in values]", "docstring": "Obtain a list of parkings.\n\nArgs:\nlang (str):  Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[Parking]), or message\nstring in case of error.", "source": "juraj-google-style"}
{"code": "def get_distrib():\n    key = 'distrib'\n    out, err = run_shell_cmd(cmds_all[PLATFORM][key])\n    if err and FLAGS.debug:\n        print('Error in detecting distribution:\\n %s' % str(err))\n    return out.strip(b'\\n')", "docstring": "Retrieves distribution name of the operating system.\n\nReturns:\nString that is the name of distribution.\ne.g. 'Ubuntu'", "source": "github-repos"}
{"code": "def CopyRecord(record, **field_overrides):\n    \n\n    fields = field_overrides\n    for field in record.__slots__:\n        if field in field_overrides:\n            continue\n        value = getattr(record, field)\n        if isinstance(value, RecordClass):\n            \n            new_value = CopyRecord(value)\n        else:\n            new_value = copy.copy(value)\n        fields[field] = new_value\n\n    return type(record)(**fields)", "docstring": "Copies a record and its fields, recurses for any field that is a Record.\n\nFor records that have nested mutable fields, use copy.deepcopy.\n\nArgs:\nrecord: A Record instance to be copied.\n**field_overrides: Fields and their values to override in the new copy.\n\nReturns: A copy of the given record with any fields overridden.", "source": "juraj-google-style"}
{"code": "def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):\n    line = clean_lines.elided[linenum]\n    match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)\n    if match:\n        error(filename, linenum, 'build/explicit_make_pair', 4, 'For C++11-compatibility, omit template arguments from make_pair OR use pair directly OR if appropriate, construct a pair directly')", "docstring": "Check that make_pair's template arguments are deduced.\n\nG++ 4.6 in C++11 mode fails badly if make_pair's template arguments are\nspecified explicitly, and such use isn't intended in any case.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def from_signature(cls, sig: inspect.Signature, name: str, callable_type: CallableType, module_name: Optional[str]=None, qualname: Optional[str]=None, auto_typing: bool=False, docstr: Union[str, utils.DocStr, None]=None, parent_module: Optional[types.ModuleType]=None) -> 'Signature':\n    args = []\n    kwonly_args = []\n    varargs = None\n    varkw = None\n    if isinstance(docstr, str):\n        docstr = utils.DocStr.parse(docstr)\n\n    def make_arg_spec(param: inspect.Parameter) -> Argument:\n        \n        docstr_arg = docstr.parameter(param) if docstr else None\n        return Argument.from_parameter(param, description=docstr_arg.description if docstr_arg else None, auto_typing=auto_typing, parent_module=parent_module)\n    for param in sig.parameters.values():\n        arg_spec = make_arg_spec(param)\n        if param.kind == inspect.Parameter.POSITIONAL_ONLY or param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:\n            args.append(arg_spec)\n        elif param.kind == inspect.Parameter.KEYWORD_ONLY:\n            kwonly_args.append(arg_spec)\n        elif param.kind == inspect.Parameter.VAR_POSITIONAL:\n            varargs = arg_spec\n        else:\n            assert param.kind == inspect.Parameter.VAR_KEYWORD, param.kind\n            varkw = arg_spec\n    return_value = None\n    if sig.return_annotation is not inspect.Parameter.empty:\n        return_value = class_schema.ValueSpec.from_annotation(sig.return_annotation, auto_typing=auto_typing, parent_module=parent_module)\n    return cls(callable_type=callable_type, name=name, module_name=module_name, qualname=qualname, description=docstr.short_description if docstr else None, args=args, kwonlyargs=kwonly_args, varargs=varargs, varkw=varkw, return_value=return_value)", "docstring": "Returns PyGlove signature from Python signature.\n\nArgs:\nsig: Python signature.\nname: Name of the entity (class name or function/method name).\ncallable_type: the type of this callable.\nmodule_name: Module name of the entity.\nqualname: (Optional) qualified name of the entity.\nauto_typing: If True, automatically convert argument annotations\nto PyGlove ValueSpec objects. Otherwise use pg.typing.Any()\nwith annotations.\ndocstr: (Optional) DocStr for this entity.\nparent_module: (Optional) Parent module from where the signature is\nderived. This is useful to infer classes with forward declarations.\n\nReturns:\nA PyGlove Signature object.", "source": "github-repos"}
{"code": "def from_dict(cls, d, identifier_str=None):\n        \n        \n        def _print_version(value):\n            return '.'.join(str(x) for x in value)\n\n        toks = str(d[\"serialize_version\"]).split('.')\n        load_ver = tuple(int(x) for x in toks)\n        curr_ver = ResolvedContext.serialize_version\n\n        if load_ver[0] > curr_ver[0]:\n            msg = [\"The context\"]\n            if identifier_str:\n                msg.append(\"in %s\" % identifier_str)\n            msg.append(\"was written by a newer version of Rez. The load may \"\n                       \"fail (serialize version %d > %d)\"\n                       % (_print_version(load_ver), _print_version(curr_ver)))\n            print >> sys.stderr, ' '.join(msg)\n\n        \n        r = ResolvedContext.__new__(ResolvedContext)\n        r.load_path = None\n        r.pre_resolve_bindings = None\n\n        r.timestamp = d[\"timestamp\"]\n        r.building = d[\"building\"]\n        r.caching = d[\"caching\"]\n        r.implicit_packages = [PackageRequest(x) for x in d[\"implicit_packages\"]]\n        r._package_requests = [PackageRequest(x) for x in d[\"package_requests\"]]\n        r.package_paths = d[\"package_paths\"]\n\n        r.rez_version = d[\"rez_version\"]\n        r.rez_path = d[\"rez_path\"]\n        r.user = d[\"user\"]\n        r.host = d[\"host\"]\n        r.platform = d[\"platform\"]\n        r.arch = d[\"arch\"]\n        r.os = d[\"os\"]\n        r.created = d[\"created\"]\n        r.verbosity = d.get(\"verbosity\", 0)\n\n        r.status_ = ResolverStatus[d[\"status\"]]\n        r.failure_description = d[\"failure_description\"]\n\n        r.solve_time = d[\"solve_time\"]\n        r.load_time = d[\"load_time\"]\n\n        r.graph_string = d[\"graph\"]\n        r.graph_ = None\n\n        r._resolved_packages = []\n        for d_ in d[\"resolved_packages\"]:\n            variant_handle = d_\n            if load_ver < (4, 0):\n                \n                from rez.utils.backcompat import convert_old_variant_handle\n                variant_handle = convert_old_variant_handle(variant_handle)\n\n            variant = get_variant(variant_handle)\n            variant.set_context(r)\n            r._resolved_packages.append(variant)\n\n        \n\n        r.requested_timestamp = d.get(\"requested_timestamp\", 0)\n\n        \n\n        r.parent_suite_path = d.get(\"parent_suite_path\")\n        r.suite_context_name = d.get(\"suite_context_name\")\n\n        \n\n        r.default_patch_lock = PatchLock[d.get(\"default_patch_lock\", \"no_lock\")]\n        patch_locks = d.get(\"patch_locks\", {})\n        r.patch_locks = dict((k, PatchLock[v]) for k, v in patch_locks)\n\n        \n\n        r.from_cache = d.get(\"from_cache\", False)\n\n        \n\n        data = d.get(\"package_filter\", [])\n        r.package_filter = PackageFilterList.from_pod(data)\n\n        \n\n        data = d.get(\"package_orderers\")\n        if data:\n            r.package_orderers = [package_order.from_pod(x) for x in data]\n        else:\n            r.package_orderers = None\n\n        \n\n        r.num_loaded_packages = d.get(\"num_loaded_packages\", -1)\n\n        \n        if config.context_tracking_host:\n            data = dict((k, v) for k, v in d.iteritems()\n                        if k in config.context_tracking_context_fields)\n\n            r._track_context(data, action=\"sourced\")\n\n        return r", "docstring": "Load a `ResolvedContext` from a dict.\n\nArgs:\nd (dict): Dict containing context data.\nidentifier_str (str): String identifying the context, this is only\nused to display in an error string if a serialization version\nmismatch is detected.\n\nReturns:\n`ResolvedContext` object.", "source": "juraj-google-style"}
{"code": "def solve_fba(self, objective):\n    self._prob.set_objective(self._v_wt[objective])\n    return self._solve(lp.ObjectiveSense.Maximize)", "docstring": "Solve the wild type problem using FBA.\n\nArgs:\nobjective: The objective reaction to be maximized.\n\nReturns:\nThe LP Result object for the solved FBA problem.", "source": "codesearchnet"}
{"code": "def read_tables(fstream):\n    table = read_table(fstream)\n    while (table is not None):\n        (yield table)\n        table = read_table(fstream)", "docstring": "Read all tables from likwid's file stream.\n\nArgs:\nfstream: Likwid's output file stream.\n\nReturns:\nA generator that can be used to iterate over all tables in the fstream.", "source": "codesearchnet"}
{"code": "def get_shape(x: tf.Tensor, name: Optional[str]=None) -> Union[tf.TensorShape, types.IntTensor]:\n    name = 'get_shape' if name is None else name\n    with tf.name_scope(name):\n        x = tf.convert_to_tensor(x)\n        is_fully_defined = x.shape.is_fully_defined()\n        if is_fully_defined:\n            return x.shape\n        return tf.shape(x)", "docstring": "Returns static shape of `x` if it is fully defined, or dynamic, otherwise.\n\n####Example\n```python\nimport tensorflow as tf\nimport tf_quant_finance as tff\n\nx = tf.zeros([5, 2])\nprefer_static_shape(x)\n# Expected: [5, 2]\n\nArgs:\nx: A tensor of any shape and `dtype`\nname: Python string. The name to give to the ops created by this function.\nDefault value: `None` which maps to the default name\n`get_shape`.\n\nReturns:\nA shape of `x` which a list, if the shape is fully defined, or a `Tensor`\nfor dynamically shaped `x`.", "source": "github-repos"}
{"code": "def recipe_to_colab(name, description, instructions, tasks, parameters={}, project=None, client_credentials=None, user_credentials=None, service_credentials=None):\n    colab = Colab(name)\n    colab.header(name)\n    colab.paragraph(description)\n    colab.header('License')\n    colab.paragraph(textwrap.dedent('\\n    Copyright 2020 Google LLC,\\n\\n    Licensed under the Apache License, Version 2.0 (the \"License\");\\n    you may not use this file except in compliance with the License.\\n    You may obtain a copy of the License at\\n\\n      https:\n    colab.header('Disclaimer')\n    colab.paragraph('This is not an officially supported Google product. It is a reference implementation. There is absolutely NO WARRANTY provided for using this code. The code is Apache Licensed and CAN BE fully modified, white labeled, and disassembled by your team.')\n    colab.paragraph(textwrap.dedent('\\n    This code generated (see starthinker/scripts for possible source):\\n      - **Command**: \"python starthinker_ui/manage.py colab\"\\n      - **Command**: \"python starthinker/tools/colab.py [JSON RECIPE]\"\\n  '))\n    colab.header('1. Install Dependencies')\n    colab.paragraph('First install the libraries needed to execute recipes, this only needs to be done once, then click play.')\n    colab.code('!pip install git+https:\n    colab.header('2. Set Configuration')\n    colab.paragraph(textwrap.dedent('\\n    This code is required to initialize the project. Fill in required fields and press play.\\n\\n    1. If the recipe uses a Google Cloud Project:\\n      - Set the configuration **project** value to the project identifier from [these instructions](https:\n    colab.code('from starthinker.util.configuration import Configuration')\n    colab.code('')\n    colab.code(textwrap.dedent('\\n    CONFIG = Configuration(\\n      project=\"\",\\n      client={},\\n      service={},\\n      user=\"/content/user.json\",\\n      verbose=True\\n    )\\n  '))\n    fields = json_get_fields(tasks)\n    if fields:\n        colab.header('3. Enter %s Recipe Parameters' % name)\n        colab.list(instructions)\n        colab.paragraph('Modify the values below for your use case, can be done multiple times, then click play.')\n        colab.code('FIELDS = %s' % fields_to_string(fields, parameters))\n        colab.code('\\nprint(\"Parameters Set To: %s\" % FIELDS)')\n    colab.header('%d. Execute %s' % (4 if fields else 3, name))\n    colab.paragraph('This does NOT need to be modified unless you are changing the recipe, click play.')\n    colab.code('from starthinker.util.configuration import execute')\n    colab.code('from starthinker.util.recipe import json_set_fields')\n    colab.code('')\n    colab.code('TASKS = %s' % dict_to_string(tasks, skip=('field',)))\n    colab.code('')\n    if fields:\n        colab.code('json_set_fields(TASKS, FIELDS)')\n    colab.code('')\n    colab.code('execute(CONFIG, TASKS, force=True)')\n    return colab.render()", "docstring": "Converts a JSON recipe into a Jupyter Notebook for Colabs.\n\nSets up multiple steps to execute recipe:\n1. Install starthinker from repository\n2. Get Cloud Project ID.\n3. Get Client Credentials ( optional if User Credentials exist ).\n4. Enter Recipe parameters if fields present.\n5. Execute recipe tasks.\n\nArgs:\n* name: (string) The name of the notebook.\n* description: (string) A description fo the recipe.\n* instructions: (string) Recipe manual instructions, for example connecting datastudios.\n* tasks: (list) The task JSON to execute.\n* parameters: (dict) Values for field parameters in tasks, optional.\n* project: (string) The GCP project id.\n* client_credentials: (string) The GCP Desktop Client Credentials in JSON string.\n* user_credentials: (string) Not used, placeholder.\n* service_credentials: (string) Not used, placeholder.\n\nReturns:\n* (string) Rendered notebook source code to be written to a ipynb file.", "source": "github-repos"}
{"code": "def __init__(self, time_elements_tuple=None):\n    \n    super(TimeElements, self).__init__()\n    self._number_of_seconds = None\n    self._precision = definitions.PRECISION_1_SECOND\n    self._time_elements_tuple = time_elements_tuple\n\n    if time_elements_tuple:\n      if len(time_elements_tuple) < 6:\n        raise ValueError((\n            'Invalid time elements tuple at least 6 elements required,'\n            'got: {0:d}').format(len(time_elements_tuple)))\n\n      self._number_of_seconds = self._GetNumberOfSecondsFromElements(\n          *time_elements_tuple)", "docstring": "Initializes time elements.\n\nArgs:\ntime_elements_tuple (Optional[tuple[int, int, int, int, int, int]]):\ntime elements, contains year, month, day of month, hours, minutes and\nseconds.\n\nRaises:\nValueError: if the time elements tuple is invalid.", "source": "juraj-google-style"}
{"code": "def readInput(self, directory, projectFileName, session, spatial=False, spatialReferenceID=None):\n    self.project_directory = directory\n    with tmp_chdir(directory):\n        session.add(self)\n        self.read(directory, projectFileName, session, spatial, spatialReferenceID)\n        if (spatialReferenceID is None):\n            spatialReferenceID = self._automaticallyDeriveSpatialReferenceId(directory)\n        replaceParamFile = self._readReplacementFiles(directory, session, spatial, spatialReferenceID)\n        self._readXput(self.INPUT_FILES, directory, session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile)\n        self._readXputMaps(self.INPUT_MAPS, directory, session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile)\n        self._commit(session, self.COMMIT_ERROR_MESSAGE)", "docstring": "Read only input files for a GSSHA project into the database.\n\nUse this method to read a project when only pre-processing tasks need to be performed.\n\nArgs:\ndirectory (str): Directory containing all GSSHA model files. This method assumes that all files are located\nin the same directory.\nprojectFileName (str): Name of the project file for the GSSHA model which will be read (e.g.: 'example.prj').\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database\nspatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects.\nDefaults to False.\nspatialReferenceID (int, optional): Integer id of spatial reference system for the model. If no id is\nprovided GsshaPy will attempt to automatically lookup the spatial reference ID. If this process fails,\ndefault srid will be used (4326 for WGS 84).", "source": "codesearchnet"}
{"code": "def function(inputs, outputs, updates=None, name=None, **kwargs):\n    if ops.executing_eagerly_outside_functions():\n        if kwargs:\n            raise ValueError('Session keyword arguments are not supported during eager execution. You passed: %s' % (kwargs,))\n        if updates:\n            raise ValueError('`updates` argument is not supported during eager execution. You passed: %s' % (updates,))\n        from tensorflow.python.keras import models\n        from tensorflow.python.keras.utils import tf_utils\n        model = models.Model(inputs=inputs, outputs=outputs)\n        wrap_outputs = isinstance(outputs, list) and len(outputs) == 1\n\n        def func(model_inputs):\n            outs = model(model_inputs)\n            if wrap_outputs:\n                outs = [outs]\n            return tf_utils.sync_to_numpy_or_python_type(outs)\n        return func\n    if kwargs:\n        for key in kwargs:\n            if key not in tf_inspect.getfullargspec(session_module.Session.run)[0] and key not in ['inputs', 'outputs', 'updates', 'name']:\n                msg = 'Invalid argument \"%s\" passed to K.function with TensorFlow backend' % key\n                raise ValueError(msg)\n    return GraphExecutionFunction(inputs, outputs, updates=updates, name=name, **kwargs)", "docstring": "Instantiates a Keras function.\n\nArgs:\ninputs: List of placeholder tensors.\noutputs: List of output tensors.\nupdates: List of update ops.\nname: String, name of function.\n**kwargs: Passed to `tf.Session.run`.\n\nReturns:\nOutput values as Numpy arrays.\n\nRaises:\nValueError: if invalid kwargs are passed in or if in eager execution.", "source": "github-repos"}
{"code": "def _handle_offset_response(self, future, response):\n        \n        timestamp_offset_map = {}\n        for topic, part_data in response.topics:\n            for partition_info in part_data:\n                partition, error_code = partition_info[:2]\n                partition = TopicPartition(topic, partition)\n                error_type = Errors.for_code(error_code)\n                if error_type is Errors.NoError:\n                    if response.API_VERSION == 0:\n                        offsets = partition_info[2]\n                        assert len(offsets) <= 1, 'Expected OffsetResponse with one offset'\n                        if not offsets:\n                            offset = UNKNOWN_OFFSET\n                        else:\n                            offset = offsets[0]\n                        log.debug(\"Handling v0 ListOffsetResponse response for %s. \"\n                                  \"Fetched offset %s\", partition, offset)\n                        if offset != UNKNOWN_OFFSET:\n                            timestamp_offset_map[partition] = (offset, None)\n                    else:\n                        timestamp, offset = partition_info[2:]\n                        log.debug(\"Handling ListOffsetResponse response for %s. \"\n                                  \"Fetched offset %s, timestamp %s\",\n                                  partition, offset, timestamp)\n                        if offset != UNKNOWN_OFFSET:\n                            timestamp_offset_map[partition] = (offset, timestamp)\n                elif error_type is Errors.UnsupportedForMessageFormatError:\n                    \n                    \n                    log.debug(\"Cannot search by timestamp for partition %s because the\"\n                              \" message format version is before 0.10.0\", partition)\n                elif error_type is Errors.NotLeaderForPartitionError:\n                    log.debug(\"Attempt to fetch offsets for partition %s failed due\"\n                              \" to obsolete leadership information, retrying.\",\n                              partition)\n                    future.failure(error_type(partition))\n                    return\n                elif error_type is Errors.UnknownTopicOrPartitionError:\n                    log.warning(\"Received unknown topic or partition error in ListOffset \"\n                             \"request for partition %s. The topic/partition \" +\n                             \"may not exist or the user may not have Describe access \"\n                             \"to it.\", partition)\n                    future.failure(error_type(partition))\n                    return\n                else:\n                    log.warning(\"Attempt to fetch offsets for partition %s failed due to:\"\n                                \" %s\", partition, error_type)\n                    future.failure(error_type(partition))\n                    return\n        if not future.is_done:\n            future.success(timestamp_offset_map)", "docstring": "Callback for the response of the list offset call above.\n\nArguments:\nfuture (Future): the future to update based on response\nresponse (OffsetResponse): response from the server\n\nRaises:\nAssertionError: if response does not match partition", "source": "juraj-google-style"}
{"code": "def api_keys(self, serverid, api_key):\n        \n        if serverid and api_key:\n            self.can_query = True \n        self.serverid = int(serverid)\n        self.api_key = api_key\n        self.webhook_url = self.__base_url + str(self.serverid) + '/' + self.api_key", "docstring": "Load object with id/API pair\n\nArgs:\nserverid (int): Discord 'guild' webhook is attached to\napi_key (`str`:uuid): unique ID for webhook", "source": "juraj-google-style"}
{"code": "def generate_name_variations(name):\n\n    def _update_name_variations_with_product(set_a, set_b):\n        name_variations.update([unidecode(((names_variation[0] + separator) + names_variation[1]).strip(''.join(_LASTNAME_NON_LASTNAME_SEPARATORS))).lower() for names_variation in product(set_a, set_b) for separator in _LASTNAME_NON_LASTNAME_SEPARATORS])\n    parsed_name = ParsedName.loads(name)\n    if (len(parsed_name) == 1):\n        return [parsed_name.dumps().lower()]\n    name_variations = set()\n    non_lastnames = [non_lastname for non_lastname in (parsed_name.first_list + parsed_name.suffix_list) if non_lastname]\n    if ((len(non_lastnames) > _NAMES_MAX_NUMBER_THRESHOLD) or (len(parsed_name.last_list) > _NAMES_MAX_NUMBER_THRESHOLD)):\n        LOGGER.error('Skipping name variations generation - too many names in: \"%s\"', name)\n        return [name]\n    non_lastnames_variations = _generate_non_lastnames_variations(non_lastnames)\n    lastnames_variations = _generate_lastnames_variations(parsed_name.last_list)\n    _update_name_variations_with_product(lastnames_variations, non_lastnames_variations)\n    _update_name_variations_with_product(non_lastnames_variations, lastnames_variations)\n    return list(name_variations)", "docstring": "Generate name variations for a given name.\n\nArgs:\nname (six.text_type): The name whose variations are to be generated.\n\nReturns:\nlist: All the name variations for the given name.\n\nNotes:\nUses `unidecode` for doing unicode characters transliteration to ASCII ones. This was chosen so that we can map\nboth full names of authors in HEP records and user's input to the same space and thus make exact queries work.", "source": "codesearchnet"}
{"code": "def create_s3_bucket(cls, bucket_name, bucket_region, bucket_account, template):\n        \n        s3 = get_aws_session(bucket_account).client('s3', region_name=bucket_region)\n\n        \n        try:\n            s3.head_bucket(Bucket=bucket_name)\n        except ClientError as ex:\n            status_code = ex.response['ResponseMetadata']['HTTPStatusCode']\n\n            \n            if status_code == 403:\n                raise Exception('Bucket {} already exists but we do not have access to it and so cannot continue'.format(\n                    bucket_name\n                ))\n\n            \n            elif status_code == 404:\n                try:\n                    s3.create_bucket(\n                        Bucket=bucket_name,\n                        CreateBucketConfiguration={\n                            'LocationConstraint': bucket_region\n                        }\n                    )\n\n                    auditlog(\n                        event='cloudtrail.create_s3_bucket',\n                        actor=cls.ns,\n                        data={\n                            'account': bucket_account.account_name,\n                            'bucket_region': bucket_region,\n                            'bucket_name': bucket_name\n                        }\n                    )\n                except Exception:\n                    raise Exception('An error occured while trying to create the bucket, cannot continue')\n\n        try:\n            bucket_acl = template.render(\n                bucket_name=bucket_name,\n                account_id=bucket_account.account_number\n            )\n            s3.put_bucket_policy(Bucket=bucket_name, Policy=bucket_acl)\n\n        except Exception as ex:\n            raise Warning('An error occurred while setting bucket policy: {}'.format(ex))", "docstring": "Creates the S3 bucket on the account specified as the destination account for log files\n\nArgs:\nbucket_name (`str`): Name of the S3 bucket\nbucket_region (`str`): AWS Region for the bucket\nbucket_account (:obj:`Account`): Account to create the S3 bucket in\ntemplate (:obj:`Template`): Jinja2 Template object for the bucket policy\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def get_pixel(self, x: int, y: int) -> Tuple[int, int, int]:\n        \n        color = lib.TCOD_image_get_pixel(self.image_c, x, y)\n        return color.r, color.g, color.b", "docstring": "Get the color of a pixel in this Image.\n\nArgs:\nx (int): X pixel of the Image.  Starting from the left at 0.\ny (int): Y pixel of the Image.  Starting from the top at 0.\n\nReturns:\nTuple[int, int, int]:\nAn (r, g, b) tuple containing the pixels color value.\nValues are in a 0 to 255 range.", "source": "juraj-google-style"}
{"code": "def add_defaults(self, ctype: ContentType = None) -> \"InstanceNode\":\n        \n        val = self.value\n        if not (isinstance(val, StructuredValue) and self.is_internal()):\n            return self\n        res = self\n        if isinstance(val, ObjectValue):\n            if val:\n                for mn in self._member_names():\n                    m = res._member(mn) if res is self else res.sibling(mn)\n                    res = m.add_defaults(ctype)\n                res = res.up()\n            return self.schema_node._add_defaults(res, ctype)\n        if not val:\n            return res\n        en = res[0]\n        while True:\n            res = en.add_defaults(ctype)\n            try:\n                en = res.next()\n            except NonexistentInstance:\n                break\n        return res.up()", "docstring": "Return the receiver with defaults added recursively to its value.\n\nArgs:\nctype: Content type of the defaults to be added. If it is\n``None``, the content type will be the same as receiver's.", "source": "juraj-google-style"}
{"code": "def mnist_model(image, labels, mesh):\n    batch_dim = mtf.Dimension('batch', FLAGS.batch_size)\n    row_blocks_dim = mtf.Dimension('row_blocks', 4)\n    col_blocks_dim = mtf.Dimension('col_blocks', 4)\n    rows_dim = mtf.Dimension('rows_size', 7)\n    cols_dim = mtf.Dimension('cols_size', 7)\n    classes_dim = mtf.Dimension('classes', 10)\n    one_channel_dim = mtf.Dimension('one_channel', 1)\n    x = mtf.import_tf_tensor(mesh, tf.reshape(image, [FLAGS.batch_size, 4, 7, 4, 7, 1]), mtf.Shape([batch_dim, row_blocks_dim, rows_dim, col_blocks_dim, cols_dim, one_channel_dim]))\n    x = mtf.transpose(x, [batch_dim, row_blocks_dim, col_blocks_dim, rows_dim, cols_dim, one_channel_dim])\n    fh_dim = mtf.Dimension('fh', 9)\n    fw_dim = mtf.Dimension('fw', 9)\n    filters1_dim = mtf.Dimension('filters1', 16)\n    filters2_dim = mtf.Dimension('filters2', 16)\n    kernel1 = mtf.get_variable(mesh, 'kernel1', [fh_dim, fw_dim, one_channel_dim, filters1_dim])\n    kernel2 = mtf.get_variable(mesh, 'kernel2', [fh_dim, fw_dim, filters1_dim, filters2_dim])\n    f1 = mtf.relu(mtf.conv2d_with_blocks(x, kernel1, strides=[1, 1, 1, 1], padding='SAME', h_blocks_dim=row_blocks_dim, w_blocks_dim=col_blocks_dim))\n    f2 = mtf.relu(mtf.conv2d_with_blocks(f1, kernel2, strides=[1, 1, 1, 1], padding='SAME', h_blocks_dim=row_blocks_dim, w_blocks_dim=col_blocks_dim))\n    x = mtf.reduce_mean(f2, reduced_dim=filters2_dim)\n    hidden_dim1 = mtf.Dimension('hidden1', FLAGS.hidden_size)\n    hidden_dim2 = mtf.Dimension('hidden2', FLAGS.hidden_size)\n    h1 = mtf.layers.dense(x, hidden_dim1, reduced_dims=x.shape.dims[(- 4):], activation=mtf.relu, name='hidden1')\n    h2 = mtf.layers.dense(h1, hidden_dim2, activation=mtf.relu, name='hidden2')\n    logits = mtf.layers.dense(h2, classes_dim, name='logits')\n    if (labels is None):\n        loss = None\n    else:\n        labels = mtf.import_tf_tensor(mesh, tf.reshape(labels, [FLAGS.batch_size]), mtf.Shape([batch_dim]))\n        loss = mtf.layers.softmax_cross_entropy_with_logits(logits, mtf.one_hot(labels, classes_dim), classes_dim)\n        loss = mtf.reduce_mean(loss)\n    return (logits, loss)", "docstring": "The model.\n\nArgs:\nimage: tf.Tensor with shape [batch, 28*28]\nlabels: a tf.Tensor with shape [batch] and dtype tf.int32\nmesh: a mtf.Mesh\n\nReturns:\nlogits: a mtf.Tensor with shape [batch, 10]\nloss: a mtf.Tensor with shape []", "source": "codesearchnet"}
{"code": "def set_charge_and_spin(self, charge, spin_multiplicity=None):\n        \n        self._charge = charge\n        nelectrons = 0\n        for site in self._sites:\n            for sp, amt in site.species.items():\n                if not isinstance(sp, DummySpecie):\n                    nelectrons += sp.Z * amt\n        nelectrons -= charge\n        self._nelectrons = nelectrons\n        if spin_multiplicity:\n            if (nelectrons + spin_multiplicity) % 2 != 1:\n                raise ValueError(\n                    \"Charge of {} and spin multiplicity of {} is\"\n                    \" not possible for this molecule\".format(\n                        self._charge, spin_multiplicity))\n            self._spin_multiplicity = spin_multiplicity\n        else:\n            self._spin_multiplicity = 1 if nelectrons % 2 == 0 else 2", "docstring": "Set the charge and spin multiplicity.\n\nArgs:\ncharge (int): Charge for the molecule. Defaults to 0.\nspin_multiplicity (int): Spin multiplicity for molecule.\nDefaults to None, which means that the spin multiplicity is\nset to 1 if the molecule has no unpaired electrons and to 2\nif there are unpaired electrons.", "source": "juraj-google-style"}
{"code": "def __init__(self, conf, map_name, automount_mountpoint=None):\n    super(Cache, self).__init__()\n    self.log = logging.getLogger(__name__)\n    self.conf = conf\n    self.output_dir = conf.get('dir', '.')\n    self.automount_mountpoint = automount_mountpoint\n    self.map_name = map_name\n    if map_name == config.MAP_PASSWORD:\n        self.data = passwd.PasswdMap()\n    elif map_name == config.MAP_SSHKEY:\n        self.data = sshkey.SshkeyMap()\n    elif map_name == config.MAP_GROUP:\n        self.data = group.GroupMap()\n    elif map_name == config.MAP_SHADOW:\n        self.data = shadow.ShadowMap()\n    elif map_name == config.MAP_NETGROUP:\n        self.data = netgroup.NetgroupMap()\n    elif map_name == config.MAP_AUTOMOUNT:\n        self.data = automount.AutomountMap()\n    else:\n        raise error.UnsupportedMap('Cache does not support %s' % map_name)", "docstring": "Initialise the Cache object.\n\nArgs:\nconf: A dictionary of key/value pairs\nmap_name: A string representation of the map type\nautomount_mountpoint: A string containing the automount mountpoint,\nused only by automount maps.\n\nRaises:\nUnsupportedMap: for map types we don't know about", "source": "github-repos"}
{"code": "def _bind_topics(self, topics):\n    self.client.subscribe(topics.status, self._on_status_message)\n    self.client.subscribe(topics.tracing, self._on_trace)\n    self.client.subscribe(topics.streaming, self._on_report)\n    self.client.subscribe(topics.response, self._on_response_message)", "docstring": "Subscribe to all the topics we need to communication with this device\n\nArgs:\ntopics (MQTTTopicValidator): The topic validator for this device that\nwe are connecting to.", "source": "codesearchnet"}
{"code": "def to_variable(self, node: 'cfg.CFGNode') -> 'cfg.Variable':\n    return self.ctx.program.NewVariable([self], source_set=[], where=node)", "docstring": "Build a variable out of this abstract value.\n\nArgs:\nnode: The current CFG node.\n\nReturns:\nA cfg.Variable.", "source": "github-repos"}
{"code": "def _setup_class(self):\n    class_record = records.TestResultRecord(STAGE_NAME_SETUP_CLASS, self.TAG)\n    class_record.test_begin()\n    self.current_test_info = runtime_test_info.RuntimeTestInfo(STAGE_NAME_SETUP_CLASS, self.log_path, class_record)\n    expects.recorder.reset_internal_states(class_record)\n    try:\n        with self._log_test_stage(STAGE_NAME_SETUP_CLASS):\n            self.setup_class()\n    except signals.TestAbortSignal:\n        raise\n    except Exception as e:\n        logging.exception('Error in %s\n        class_record.test_error(e)\n        self.results.add_class_error(class_record)\n        self._exec_procedure_func(self._on_fail, class_record)\n        class_record.update_record()\n        self.summary_writer.dump(class_record.to_dict(), records.TestSummaryEntryType.RECORD)\n        self._skip_remaining_tests(e)\n        return self.results\n    if expects.recorder.has_error:\n        self._exec_procedure_func(self._on_fail, class_record)\n        class_record.test_error()\n        class_record.update_record()\n        self.summary_writer.dump(class_record.to_dict(), records.TestSummaryEntryType.RECORD)\n        self.results.add_class_error(class_record)\n        self._skip_remaining_tests(class_record.termination_signal.exception)\n        return self.results", "docstring": "Proxy function to guarantee the base implementation of setup_class\nis called.\n\nReturns:\nIf `self.results` is returned instead of None, this means something\nhas gone wrong, and the rest of the test class should not execute.", "source": "github-repos"}
{"code": "def TensorShapeProtoToList(shape):\n    return [dim.size for dim in shape.dim]", "docstring": "Convert a TensorShape to a list.\n\nArgs:\nshape: A TensorShapeProto.\n\nReturns:\nList of integers representing the dimensions of the tensor.", "source": "github-repos"}
{"code": "def BuildCampaignOperations(batch_job_helper, budget_operations, number_of_campaigns=1):\n    budget_id = budget_operations[0]['operand']['budgetId']\n    campaign_operations = [{'xsi_type': 'CampaignOperation', 'operand': {'name': ('Batch Campaign \n    return campaign_operations", "docstring": "Builds the operations needed to create a new Campaign.\n\nNote: When the Campaigns are created, they will have a different Id than those\ngenerated here as a temporary Id. This is just used to identify them in the\nBatchJobService.\n\nArgs:\nbatch_job_helper: a BatchJobHelper instance.\nbudget_operations: a list containing the operation that will add the budget\nused by these Campaigns.\nnumber_of_campaigns: an int number defining the number of campaigns to be\ncreated.\n\nReturns:\na list containing the operations to create the desired number of Campaigns.", "source": "codesearchnet"}
{"code": "def swo_start(self, swo_speed=9600):\n        \n        if self.swo_enabled():\n            self.swo_stop()\n\n        info = structs.JLinkSWOStartInfo()\n        info.Speed = swo_speed\n        res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.START,\n                                             ctypes.byref(info))\n        if res < 0:\n            raise errors.JLinkException(res)\n\n        self._swo_enabled = True\n\n        return None", "docstring": "Starts collecting SWO data.\n\nNote:\nIf SWO is already enabled, it will first stop SWO before enabling it\nagain.\n\nArgs:\nself (JLink): the ``JLink`` instance\nswo_speed (int): the frequency in Hz used by the target to communicate\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error", "source": "juraj-google-style"}
{"code": "def setup(self,\n            hunt_id,\n            reason, grr_server_url, grr_username, grr_password, approvers=None,\n            verify=True):\n    \n    super(GRRHuntDownloader, self).setup(\n        reason, grr_server_url, grr_username, grr_password,\n        approvers=approvers, verify=verify)\n    self.hunt_id = hunt_id\n    self.output_path = tempfile.mkdtemp()", "docstring": "Initializes a GRR Hunt file collector.\n\nArgs:\nhunt_id: Hunt ID to download results from.\nreason: justification for GRR access.\ngrr_server_url: GRR server URL.\ngrr_username: GRR username.\ngrr_password: GRR password.\napprovers: comma-separated list of GRR approval recipients.\nverify: boolean, whether to verify the GRR server's x509 certificate.", "source": "juraj-google-style"}
{"code": "def __init__(self, email, password):\n        \n\n        self.email = email\n        self.password = password", "docstring": "Initialize the AMYLPRED object with your email and password used to login here.\n\nArgs:\nemail (str): Account email\npassword (str): Account password", "source": "juraj-google-style"}
{"code": "def render_to_terminal(self, array, cursor_pos=(0, 0)):\n    for_stdout = self.fmtstr_to_stdout_xform()\n    if (not self.hide_cursor):\n        self.write(self.t.hide_cursor)\n    (height, width) = (self.t.height, self.t.width)\n    if ((height != self._last_rendered_height) or (width != self._last_rendered_width)):\n        self.on_terminal_size_change(height, width)\n    current_lines_by_row = {}\n    rows_for_use = list(range(self.top_usable_row, height))\n    shared = min(len(array), len(rows_for_use))\n    for (row, line) in zip(rows_for_use[:shared], array[:shared]):\n        current_lines_by_row[row] = line\n        if (line == self._last_lines_by_row.get(row, None)):\n            continue\n        self.write(self.t.move(row, 0))\n        self.write(for_stdout(line))\n        if (len(line) < width):\n            self.write(self.t.clear_eol)\n    rest_of_lines = array[shared:]\n    rest_of_rows = rows_for_use[shared:]\n    for row in rest_of_rows:\n        if (self._last_lines_by_row and (row not in self._last_lines_by_row)):\n            continue\n        self.write(self.t.move(row, 0))\n        self.write(self.t.clear_eol)\n        self.write(self.t.clear_bol)\n        current_lines_by_row[row] = None\n    offscreen_scrolls = 0\n    for line in rest_of_lines:\n        self.scroll_down()\n        if (self.top_usable_row > 0):\n            self.top_usable_row -= 1\n        else:\n            offscreen_scrolls += 1\n        current_lines_by_row = dict((((k - 1), v) for (k, v) in current_lines_by_row.items()))\n        logger.debug(('new top_usable_row: %d' % self.top_usable_row))\n        self.write(self.t.move((height - 1), 0))\n        self.write(for_stdout(line))\n        current_lines_by_row[(height - 1)] = line\n    logger.debug(('lines in last lines by row: %r' % self._last_lines_by_row.keys()))\n    logger.debug(('lines in current lines by row: %r' % current_lines_by_row.keys()))\n    self._last_cursor_row = max(0, ((cursor_pos[0] - offscreen_scrolls) + self.top_usable_row))\n    self._last_cursor_column = cursor_pos[1]\n    self.write(self.t.move(self._last_cursor_row, self._last_cursor_column))\n    self._last_lines_by_row = current_lines_by_row\n    if (not self.hide_cursor):\n        self.write(self.t.normal_cursor)\n    return offscreen_scrolls", "docstring": "Renders array to terminal, returns the number of lines scrolled offscreen\n\nReturns:\nNumber of times scrolled\n\nArgs:\narray (FSArray): Grid of styled characters to be rendered.\n\nIf array received is of width too small, render it anyway\n\nif array received is of width too large, render it anyway\n\nif array received is of height too small, render it anyway\n\nif array received is of height too large, render it, scroll down,\nand render the rest of it, then return how much we scrolled down", "source": "codesearchnet"}
{"code": "def json(self) -> list:\n    json_controls = [control.json() for control in self.controls]\n    return json_controls", "docstring": "Returns list of json compatible states of the RichMessage instance\nnested controls.\n\nReturns:\njson_controls: Json representation of RichMessage instance\nnested controls.", "source": "codesearchnet"}
{"code": "def _can_connect(host, port=22):  \n    \n    try:\n        logger.debug('Testing connection to host %s', host)\n        client = paramiko.SSHClient()\n        client.load_system_host_keys()\n        client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n        client.connect(host,\n                       port=port)\n        client.close()\n        logger.info('Can connect to host %s', host)\n        return True\n    except Exception as e:\n        logger.info('Cannot connect to host %s', host)\n\n        logger.info('Connection failed with exception: \\n %s', str(e))\n        return False", "docstring": "Checks if the connection to provided ``host`` and ``port`` is possible or not.\nArgs:\nhost (str): Hostname for the host to check connection.\nport (int): Port name of the host to check connection on.", "source": "juraj-google-style"}
{"code": "def __getitem__(self, key: Any) -> 'ColumnExpressionBuilder':\n    item = self._builder[key]\n    if isinstance(item, expressions.Builder) and self._sealed:\n        raise self._fhir_path_sealed_error(key)\n    return ColumnExpressionBuilder._wrap_any(self, item)", "docstring": "Redirects to the expressions.Builder to get the item.\n\nArgs:\nkey: the key of the item.\n\nReturns:\nA ColumnExpressionBuilder, because the item got from the\nexpressions.Builder is always the type of Builder.\n\nRaises:\nAttributeError: if the FHIR path in this class is already sealed.\nTypeError: if getting the key from self._builder fails.", "source": "github-repos"}
{"code": "def inference(self, observed_arr):\n        \n        decoded_arr = self.__encoder_decoder_controller.inference(observed_arr)\n        encoded_arr = self.__encoder_decoder_controller.get_feature_points()\n        _ = self.__retrospective_encoder.inference(decoded_arr)\n        re_encoded_arr = self.__retrospective_encoder.get_feature_points()\n        self.__inferenced_tuple = (observed_arr, encoded_arr, decoded_arr, re_encoded_arr)\n        return re_encoded_arr", "docstring": "Infernece by the model.\n\nArgs:\nobserved_arr:       `np.ndarray` of observed data points.\n\nReturns:\n`np.ndarray` of inferenced feature points.", "source": "juraj-google-style"}
{"code": "def astype(self, col_dtypes, **kwargs):\n        \n        \n        dtype_indices = {}\n        columns = col_dtypes.keys()\n        numeric_indices = list(self.columns.get_indexer_for(columns))\n        \n        new_dtypes = self.dtypes.copy()\n        for i, column in enumerate(columns):\n            dtype = col_dtypes[column]\n            if (\n                not isinstance(dtype, type(self.dtypes[column]))\n                or dtype != self.dtypes[column]\n            ):\n                \n                if dtype in dtype_indices.keys():\n                    dtype_indices[dtype].append(numeric_indices[i])\n                else:\n                    dtype_indices[dtype] = [numeric_indices[i]]\n                \n                try:\n                    new_dtype = np.dtype(dtype)\n                except TypeError:\n                    new_dtype = dtype\n                if dtype != np.int32 and new_dtype == np.int32:\n                    new_dtype = np.dtype(\"int64\")\n                elif dtype != np.float32 and new_dtype == np.float32:\n                    new_dtype = np.dtype(\"float64\")\n                new_dtypes[column] = new_dtype\n        \n        new_data = self.data\n        for dtype in dtype_indices.keys():\n\n            def astype(df, internal_indices=[]):\n                block_dtypes = {}\n                for ind in internal_indices:\n                    block_dtypes[df.columns[ind]] = dtype\n                return df.astype(block_dtypes)\n\n            new_data = new_data.apply_func_to_select_indices(\n                0, astype, dtype_indices[dtype], keep_remaining=True\n            )\n        return self.__constructor__(new_data, self.index, self.columns, new_dtypes)", "docstring": "Converts columns dtypes to given dtypes.\n\nArgs:\ncol_dtypes: Dictionary of {col: dtype,...} where col is the column\nname and dtype is a numpy dtype.\n\nReturns:\nDataFrame with updated dtypes.", "source": "juraj-google-style"}
{"code": "def gradient_helper(optimizer, loss, var_list=None):\n    if (var_list is None):\n        var_list = tf.compat.v1.trainable_variables()\n    grads_and_vars = optimizer.compute_gradients(loss, var_list=var_list)\n    grads = [pair[0] for pair in grads_and_vars]\n    return (grads, optimizer.apply_gradients(grads_and_vars))", "docstring": "A helper to get the gradients out at each step.\n\nArgs:\noptimizer: the optimizer op.\nloss: the op that computes your loss value.\n\nReturns: the gradient tensors and the train_step op.", "source": "codesearchnet"}
{"code": "def save_as(self, new_filename):\n        \n        xfile._save_file(\n            self._filename, self._workbookTree, new_filename)", "docstring": "Save our file with the name provided.\n\nArgs:\nnew_filename:  New name for the workbook file. String.\n\nReturns:\nNothing.", "source": "juraj-google-style"}
{"code": "def to_element(self, include_namespaces=False):\n        \n        \n        didl_item = DidlItem(\n            title=\"DUMMY\",\n            \n            parent_id=\"DUMMY\",  \n            item_id=self.item_id,\n            desc=self.desc,\n            resources=self.resources\n        )\n        return didl_item.to_element(include_namespaces=include_namespaces)", "docstring": "Return an ElementTree Element representing this instance.\n\nArgs:\ninclude_namespaces (bool, optional): If True, include xml\nnamespace attributes on the root element\n\nReturn:\n~xml.etree.ElementTree.Element: The (XML) Element representation of\nthis object", "source": "juraj-google-style"}
{"code": "def member_command(self, repl_id, member_id, command):\n        \n        repl = self[repl_id]\n        result = repl.member_command(member_id, command)\n        self[repl_id] = repl\n        return result", "docstring": "apply command(start, stop, restart) to the member of replica set\nArgs:\nrepl_id - replica set identity\nmember_id - member index\ncommand - command: start, stop, restart\n\nreturn True if operation success otherwise False", "source": "juraj-google-style"}
{"code": "def trace_region(self, region_index):\n        \n        cmd = enums.JLinkTraceCommand.GET_REGION_PROPS_EX\n        region = structs.JLinkTraceRegion()\n        region.RegionIndex = int(region_index)\n        res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(region))\n        if (res == 1):\n            raise errors.JLinkException('Failed to get trace region.')\n        return region", "docstring": "Retrieves the properties of a trace region.\n\nArgs:\nself (JLink): the ``JLink`` instance.\nregion_index (int): the trace region index.\n\nReturns:\nAn instance of ``JLinkTraceRegion`` describing the specified region.", "source": "juraj-google-style"}
{"code": "def _det_large_enough_mask(x, det_bounds):\n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  return tf.cast(tf.linalg.det(x) > det_bounds, dtype=x.dtype)", "docstring": "Returns whether the input matches the given determinant limit.\n\nArgs:\nx: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.\ndet_bounds: A floating-point `Tensor` that must broadcast to shape\n`[B1, ..., Bn]`, giving the desired lower bound on the\ndeterminants in `x`.\n\nReturns:\nmask: A floating-point `Tensor` of shape [B1, ..., Bn].  Each\nscalar is 1 if the corresponding matrix had determinant above\nthe corresponding bound, otherwise 0.", "source": "juraj-google-style"}
{"code": "def depth_soil_conductivity(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `depth_soil_conductivity`'.format(value))\n    self._depth_soil_conductivity = value", "docstring": "Corresponds to IDD Field `depth_soil_conductivity`\n\nArgs:\nvalue (float): value for IDD Field `depth_soil_conductivity`\nUnit: W/m-K,\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def join(self) -> None:\n    self._server.join()", "docstring": "Blocks until the server has shut down.\n\nThis is useful when starting a dedicated worker process.\n\n```\nworker_server = tf.data.experimental.service.WorkerServer(\nport=5051, dispatcher_address=\"localhost:5050\")\nworker_server.join()\n```\n\nThis method currently blocks forever.\n\nRaises:\ntf.errors.OpError: Or one of its subclasses if an error occurs while\njoining the server.", "source": "github-repos"}
{"code": "def _write_version(self, data, model):\n    vdata = {'data': data, 'key': model.key, 'model': model.Meta.bucket_name, 'timestamp': time.time()}\n    obj = version_bucket.new(data=vdata)\n    obj.add_index('key_bin', model.key)\n    obj.add_index('model_bin', vdata['model'])\n    obj.add_index('timestamp_int', int(vdata['timestamp']))\n    obj.store()\n    return obj.key", "docstring": "Writes a copy of the objects current state to write-once mirror bucket.\n\nArgs:\ndata (dict): Model instance's all data for versioning.\nmodel (instance): Model instance.\n\nReturns:\nKey of version record.\nkey (str): Version_bucket key.", "source": "codesearchnet"}
{"code": "def floor(cls, x: 'TensorFluent') -> 'TensorFluent':\n        \n        return cls._unary_op(x, tf.floor, tf.float32)", "docstring": "Returns a TensorFluent for the floor function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the floor function.", "source": "juraj-google-style"}
{"code": "def parse_struct(path_dir):\n        \n        with open(os.path.join(path_dir, \"boltztrap.struct\"), 'r') as f:\n            tokens = f.readlines()\n            return Lattice([[Length(float(tokens[i].split()[j]), \"bohr\").\n                           to(\"ang\") for j in range(3)] for i in\n                            range(1, 4)]).volume", "docstring": "Parses boltztrap.struct file (only the volume)\nArgs:\npath_dir: (str) dir containing the boltztrap.struct file\n\nReturns:\n(float) volume", "source": "juraj-google-style"}
{"code": "def get_is_group_maintainer(self, grp_name, user):\n        \n        self.project_service.set_auth(self._token_project)\n        return self.project_service.get_is_group_maintainer(grp_name, user)", "docstring": "Check if the given user is a member of the named group.\n\nArgs:\nname (string): Name of group.\nuser (string): User of interest.\n\nReturns:\n(bool): False if user not a member.", "source": "juraj-google-style"}
{"code": "def flatten_with_path(structure):\n    return tree_impl.flatten_with_path(structure)", "docstring": "Flattens a possibly nested structure into a list.\n\nThis is a variant of flattens() which produces a\nlist of pairs: `(path, item)`. A path is a tuple of indices and/or keys\nwhich uniquely identifies the position of the corresponding item.\n\nDictionaries with non-sortable keys are not supported.\n\nExamples:\n\n>>> keras.flatten_with_path([{\"foo\": 42}])\n[((0, 'foo'), 42)]\n\n\nArgs:\nstructure: An arbitrarily nested structure.\n\nReturns:\nA list of `(path, item)` pairs corresponding to the flattened\nversion of the input `structure`.", "source": "github-repos"}
{"code": "def __init__(self, shape=None, dtype=dtypes.float32, ragged_rank=None, row_splits_dtype=dtypes.int64, flat_values_spec=None):\n    self._shape = tensor_shape.as_shape(shape)\n    self._row_splits_dtype = dtypes.as_dtype(row_splits_dtype)\n    if flat_values_spec is not None:\n        if dtype is None:\n            dtype = flat_values_spec.dtype\n        elif dtype != flat_values_spec.dtype:\n            raise ValueError('dtype must be the same as flat_values_spec.dtype')\n    elif dtype is None:\n        raise ValueError('At least one of dtype or flat_values_spec must be provided')\n    self._dtype = dtypes.as_dtype(dtype)\n    self._flat_values_spec = flat_values_spec\n    rank = self._shape.ndims\n    if ragged_rank is None:\n        if rank is None:\n            raise ValueError('Must specify ragged_rank or a shape with a known rank.')\n        ragged_rank = rank - 1\n    self._ragged_rank = ragged_rank\n    if not isinstance(self._ragged_rank, int):\n        raise TypeError(f'Argument `ragged_rank` must be an int. Received {ragged_rank}.')\n    if rank is not None:\n        if ragged_rank >= rank:\n            raise ValueError(f'Argument `ragged_rank` ({ragged_rank}) must be less than rank ({rank}).')", "docstring": "Constructs a type specification for a `tf.RaggedTensor`.\n\nArgs:\nshape: The shape of the RaggedTensor, or `None` to allow any shape.  If a\nshape is specified, then all ragged dimensions must have size `None`.\ndtype: `tf.DType` of values in the RaggedTensor.\nragged_rank: Python integer, the number of times the RaggedTensor's\nflat_values is partitioned.  Defaults to `shape.ndims - 1`.\nrow_splits_dtype: `dtype` for the RaggedTensor's `row_splits` tensor. One\nof `tf.int32` or `tf.int64`.\nflat_values_spec: TypeSpec for flat_value of the RaggedTensor. It shall be\nprovided when the flat_values is a CompositeTensor rather then Tensor.\nIf both `dtype` and `flat_values_spec` and  are provided, `dtype` must\nbe the same as `flat_values_spec.dtype`. (experimental)", "source": "github-repos"}
{"code": "def read_int32(self, little_endian=True):\n    if little_endian:\n        endian = '<'\n    else:\n        endian = '>'\n    return self.unpack(('%si' % endian), 4)", "docstring": "Read 4 bytes as a signed integer value from the stream.\n\nArgs:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint:", "source": "codesearchnet"}
{"code": "def delta_hv(scatterer):\n    Z = scatterer.get_Z()\n    return np.arctan2((Z[(2, 3)] - Z[(3, 2)]), ((- Z[(2, 2)]) - Z[(3, 3)]))", "docstring": "Delta_hv for the current setup.\n\nArgs:\nscatterer: a Scatterer instance.\n\nReturns:\nDelta_hv [rad].", "source": "codesearchnet"}
{"code": "def GetMountPoint(self, path=None):\n    \n    path = os.path.abspath(\n        client_utils.CanonicalPathToLocalPath(path or self.path))\n\n    while not os.path.ismount(path):\n      path = os.path.dirname(path)\n\n    return path", "docstring": "Walk back from the path to find the mount point.\n\nArgs:\npath: a Unicode string containing the path or None. If path is None the\nvalue in self.path is used.\n\nReturns:\npath string of the mount point", "source": "juraj-google-style"}
{"code": "def do_phonefy(self, query, **kwargs):\n    results = []\n    test = self.check_phonefy(query, kwargs)\n    if test:\n        r = {'type': 'i3visio.phone', 'value': ((self.platformName + ' - ') + query), 'attributes': []}\n        try:\n            aux = {'type': 'i3visio.uri', 'value': self.createURL(query, mode='phonefy'), 'attributes': []}\n            r['attributes'].append(aux)\n        except:\n            pass\n        aux = {'type': 'i3visio.platform', 'value': self.platformName, 'attributes': []}\n        r['attributes'].append(aux)\n        r['attributes'] += self.process_phonefy(test)\n        results.append(r)\n    return results", "docstring": "Verifying a phonefy query in this platform.\n\nThis might be redefined in any class inheriting from Platform.\n\nArgs:\n-----\nquery: The element to be searched.\n\nReturn:\n-------\nA list of elements to be appended.", "source": "codesearchnet"}
{"code": "def Run(script, container=None, exit_on_error=False, gas=Fixed8.Zero(), test_mode=True):\n        \n\n        from neo.Core.Blockchain import Blockchain\n        from neo.SmartContract.StateMachine import StateMachine\n        from neo.EventHub import events\n\n        bc = Blockchain.Default()\n\n        accounts = DBCollection(bc._db, DBPrefix.ST_Account, AccountState)\n        assets = DBCollection(bc._db, DBPrefix.ST_Asset, AssetState)\n        validators = DBCollection(bc._db, DBPrefix.ST_Validator, ValidatorState)\n        contracts = DBCollection(bc._db, DBPrefix.ST_Contract, ContractState)\n        storages = DBCollection(bc._db, DBPrefix.ST_Storage, StorageItem)\n\n        script_table = CachedScriptTable(contracts)\n        service = StateMachine(accounts, validators, assets, contracts, storages, None)\n\n        engine = ApplicationEngine(\n            trigger_type=TriggerType.Application,\n            container=container,\n            table=script_table,\n            service=service,\n            gas=gas,\n            testMode=test_mode,\n            exit_on_error=exit_on_error\n        )\n\n        script = binascii.unhexlify(script)\n\n        engine.LoadScript(script)\n\n        try:\n            success = engine.Execute()\n            engine.testMode = True\n            service.ExecutionCompleted(engine, success)\n        except Exception as e:\n            engine.testMode = True\n            service.ExecutionCompleted(engine, False, e)\n\n        for event in service.events_to_dispatch:\n            events.emit(event.event_type, event)\n\n        return engine", "docstring": "Runs a script in a test invoke environment\n\nArgs:\nscript (bytes): The script to run\ncontainer (neo.Core.TX.Transaction): [optional] the transaction to use as the script container\n\nReturns:\nApplicationEngine", "source": "juraj-google-style"}
{"code": "def save(self, data: List[dict]):\n    with open(self.output_path, 'w') as f:\n        if len(data) > 0:\n            writer = csv.DictWriter(f, list(data[0].keys()))\n            writer.writeheader()\n            writer.writerows(data)", "docstring": "Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`].\n\nArgs:\ndata (`List[dict]`): The data to store.", "source": "github-repos"}
{"code": "def _check_archive_signature(archive_file: io.BufferedIOBase) -> None:\n    signature = archive_file.read(8)\n    if signature != b'!<arch>\\n':\n        raise RuntimeError('Invalid archive file format.')", "docstring": "Checks if the file has the correct archive header signature.\n\nThe cursor is moved to the first available file header section after\nsuccessfully checking the signature.\n\nArgs:\narchive_file: The archive file object pointing at its beginning.\n\nRaises:\nRuntimeError: The archive signature is invalid.", "source": "github-repos"}
{"code": "def __init__(self, job_id=None, future=None):\n    \n    self._job_id = str(uuid.uuid4()) if job_id is None else job_id\n    self._future = future\n    self._is_complete = False\n    self._errors = None\n    self._fatal_error = None\n    self._result = None\n    self._start_time = datetime.datetime.utcnow()\n    self._end_time = None", "docstring": "Initializes an instance of a Job.\n\nArgs:\njob_id: a unique ID for the job. If None, a UUID will be generated.\nfuture: the Future associated with the Job, if any.", "source": "juraj-google-style"}
{"code": "def get_orientation_error(target_orn, current_orn):\n    \n    current_orn = np.array(\n        [current_orn[3], current_orn[0], current_orn[1], current_orn[2]]\n    )\n    target_orn = np.array([target_orn[3], target_orn[0], target_orn[1], target_orn[2]])\n\n    pinv = np.zeros((3, 4))\n    pinv[0, :] = [-current_orn[1], current_orn[0], -current_orn[3], current_orn[2]]\n    pinv[1, :] = [-current_orn[2], current_orn[3], current_orn[0], -current_orn[1]]\n    pinv[2, :] = [-current_orn[3], -current_orn[2], current_orn[1], current_orn[0]]\n    orn_error = 2.0 * pinv.dot(np.array(target_orn))\n    return orn_error", "docstring": "Returns the difference between two quaternion orientations as a 3 DOF numpy array.\nFor use in an impedance controller / task-space PD controller.\n\nArgs:\ntarget_orn: 4-dim iterable, desired orientation as a (x, y, z, w) quaternion\ncurrent_orn: 4-dim iterable, current orientation as a (x, y, z, w) quaternion\n\nReturns:\norn_error: 3-dim numpy array for current orientation error, corresponds to\n(target_orn - current_orn)", "source": "juraj-google-style"}
{"code": "def clean_file(c_source, virtualenv_dirname):\n    with open(c_source, 'r') as file_obj:\n        contents = file_obj.read().rstrip()\n    py_version = 'python{}.{}'.format(*sys.version_info[:2])\n    lib_path = os.path.join('.nox', virtualenv_dirname, 'lib', py_version, 'site-packages', '')\n    contents = contents.replace(lib_path, '')\n    lines = contents.split('\\n')\n    with open(c_source, 'w') as file_obj:\n        for line in lines:\n            file_obj.write((line.rstrip() + '\\n'))", "docstring": "Strip trailing whitespace and clean up \"local\" names in C source.\n\nThese source files are autogenerated from the ``cython`` CLI.\n\nArgs:\nc_source (str): Path to a ``.c`` source file.\nvirtualenv_dirname (str): The name of the ``virtualenv``\ndirectory where Cython is installed (this is part of a\nrelative path ``.nox/{NAME}/lib/...``).", "source": "codesearchnet"}
{"code": "def load_notebook_node(notebook_path):\n    \n    nb = nbformat.reads(papermill_io.read(notebook_path), as_version=4)\n\n    if not hasattr(nb.metadata, 'papermill'):\n        nb.metadata['papermill'] = {\n            'parameters': dict(),\n            'environment_variables': dict(),\n            'version': __version__,\n        }\n\n    for cell in nb.cells:\n        if not hasattr(cell.metadata, 'tags'):\n            cell.metadata['tags'] = []  \n\n        if not hasattr(cell.metadata, 'papermill'):\n            cell.metadata['papermill'] = dict()\n    return nb", "docstring": "Returns a notebook object with papermill metadata loaded from the specified path.\n\nArgs:\nnotebook_path (str): Path to the notebook file.\n\nReturns:\nnbformat.NotebookNode", "source": "juraj-google-style"}
{"code": "def _ReadLabels(self, artifact_definition_values, artifact_definition, name):\n    \n    labels = artifact_definition_values.get('labels', [])\n\n    undefined_labels = set(labels).difference(self.labels)\n    if undefined_labels:\n      raise errors.FormatError(\n          'Artifact definition: {0:s} found undefined labels: {1:s}.'.format(\n              name, ', '.join(undefined_labels)))\n\n    artifact_definition.labels = labels", "docstring": "Reads the optional artifact definition labels.\n\nArgs:\nartifact_definition_values (dict[str, object]): artifact definition\nvalues.\nartifact_definition (ArtifactDefinition): an artifact definition.\nname (str): name of the artifact definition.\n\nRaises:\nFormatError: if there are undefined labels.", "source": "juraj-google-style"}
{"code": "def remove_droplets(self, droplet_ids):\n        \n        return self.get_data(\n            \"load_balancers/%s/droplets/\" % self.id,\n            type=DELETE,\n            params={\"droplet_ids\": droplet_ids}\n        )", "docstring": "Unassign a LoadBalancer.\n\nArgs:\ndroplet_ids (obj:`list` of `int`): A list of Droplet IDs", "source": "juraj-google-style"}
{"code": "def register_once(event_name: str, callback: Callable[..., None], info: Hashable) -> None:\n    ip = IPython.get_ipython()\n    info = hash(info)\n    for old_callback in ip.events.callbacks[event_name]:\n        if getattr(old_callback, '__ecolab_event__', None) == info:\n            ip.events.unregister(event_name, old_callback)\n            break\n    callback.__ecolab_event__ = info\n    ip.events.register(event_name, callback)", "docstring": "Register the IPython event once (replace the previous event if exists).\n\nAlias for `InteractiveShell.events.register` but replaces previous event\nif it exists.\n\nThis avoids duplicated events after ecolab reload or running cell twice.\n\nArgs:\nevent_name: Forwarded to `ip.events.register`\ncallback: Forwarded to `ip.events.register`\ninfo: If an event with the same info already exists, it is replaced.", "source": "github-repos"}
{"code": "def tox(args=''):\n    basedir = dirname(__file__)\n    latest_pythons = _determine_latest_pythons()\n    highest_minor_python = _highest_minor(latest_pythons)\n    _local_needs_pythons(flo('cd {basedir}  &&  python{highest_minor_python} -m tox {args}'))", "docstring": "Run tox.\n\nBuild package and run unit tests against several pythons.\n\nArgs:\nargs: Optional arguments passed to tox.\nExample:\n\nfab tox:'-e py36 -r'", "source": "codesearchnet"}
{"code": "def can_encode(nested_structure):\n    try:\n        encode_structure(nested_structure)\n    except NotEncodableError:\n        return False\n    return True", "docstring": "Determines whether a nested structure can be encoded into a proto.\n\nArgs:\nnested_structure: Structure to encode.\n\nReturns:\nTrue if the nested structured can be encoded.", "source": "github-repos"}
{"code": "def __init__(self, protojson_protocol=None, **kwargs):\n        \n        super(MessageJSONEncoder, self).__init__(**kwargs)\n        self.__protojson_protocol = (\n            protojson_protocol or ProtoJson.get_default())", "docstring": "Constructor.\n\nArgs:\nprotojson_protocol: ProtoJson instance.", "source": "juraj-google-style"}
{"code": "def replace_666(meta_df, convert_neg_666):\n    \n    if convert_neg_666:\n        out_df = meta_df.replace([-666, \"-666\", -666.0], np.nan)\n    else:\n        out_df = meta_df.replace([-666, -666.0], \"-666\")\n    return out_df", "docstring": "Replace -666, -666.0, and optionally \"-666\".\nArgs:\nmeta_df (pandas df):\nconvert_neg_666 (bool):\nReturns:\nout_df (pandas df): updated meta_df", "source": "juraj-google-style"}
{"code": "def from_json(cls, json_value: JSONValueType, **kwargs) -> 'JSONConvertible':\n    assert isinstance(json_value, dict)\n    init_args = {k: from_json(v, **kwargs) for k, v in json_value.items() if k != JSONConvertible.TYPE_NAME_KEY}\n    return cls(**init_args)", "docstring": "Creates an instance of this class from a plain Python value.\n\nNOTE(daiyip): ``pg.Symbolic`` overrides ``from_json`` class method.\n\nArgs:\njson_value: JSON value type.\n**kwargs: Keyword arguments as flags to control object creation.\n\nReturns:\nAn instance of cls.", "source": "github-repos"}
{"code": "def get(pb_or_dict, key, default=_SENTINEL):\n    (key, subkey) = _resolve_subkeys(key)\n    if isinstance(pb_or_dict, Message):\n        answer = getattr(pb_or_dict, key, default)\n    elif isinstance(pb_or_dict, collections.Mapping):\n        answer = pb_or_dict.get(key, default)\n    else:\n        raise TypeError('Tried to fetch a key %s on an invalid object; expected a dict or protobuf message.')\n    if (answer is _SENTINEL):\n        raise KeyError(key)\n    if (subkey and (answer is not default)):\n        return get(answer, subkey, default=default)\n    return answer", "docstring": "Retrieve the given key off of the object.\n\nIf a default is specified, return it if the key is not found, otherwise\nraise KeyError.\n\nArgs:\npb_or_dict (Union[~google.protobuf.message.Message, Mapping]): the\nobject.\nkey (str): The key to retrieve from the object in question.\ndefault (Any): If the key is not present on the object, and a default\nis set, returns that default instead. A type-appropriate falsy\ndefault is generally recommended, as protobuf messages almost\nalways have default values for unset values and it is not always\npossible to tell the difference between a falsy value and an\nunset one. If no default is set, raises KeyError for not found\nvalues.\n\nReturns:\nAny: The return value from the underlying message or dict.\n\nRaises:\nKeyError: If the key is not found. Note that, for unset values,\nmessages and dictionaries may not have consistent behavior.\nTypeError: If pb_or_dict is not a Message or Mapping.", "source": "codesearchnet"}
{"code": "def HasIndex(self, index):\n        \n        for i in self.Items:\n            if i.index == index:\n                return True\n        return False", "docstring": "Flag indicating the index exists in any of the spent coin items.\nArgs:\nindex (int):\n\nReturns:", "source": "juraj-google-style"}
{"code": "def data_to_unicode(self, data):\n        \n        if isinstance(data, dict):\n            return {self.to_unicode(k): self.to_unicode(v) for k, v in data.iteritems()}\n        if isinstance(data, list):\n            return [self.to_unicode(l) for l in data]\n        else:\n            return self.to_unicode(data)", "docstring": "Recursively convert a list or dictionary to unicode.\n\nArgs:\ndata: The data to be unicoded.\n\nReturns:\nUnicoded data.", "source": "juraj-google-style"}
{"code": "def parse(type: Type):\n        \n\n        def decorator(parser):\n            EnvVar.parsers[type] = parser\n            return parser\n\n        return decorator", "docstring": "Register a parser for a attribute type.\n\nParsers will be used to parse `str` type objects from either\nthe commandline arguments or environment variables.\n\nArgs:\ntype: the type the decorated function will be responsible\nfor parsing a environment variable to.", "source": "juraj-google-style"}
{"code": "def build_image(image_path, image_name, build_args=None, dockerfile_path=None):\n    cmd = ['docker', 'build', '-t', image_name, image_path]\n    if dockerfile_path:\n        cmd.extend(['-f', dockerfile_path])\n    for (k, v) in (build_args or {}).items():\n        cmd += ['--build-arg', '{}={}'.format(k, v)]\n    check_call(cmd)", "docstring": "Build an image\n\nArgs:\nimage_path (str): the path to the image directory\nimage_name (str): image 'name:tag' to build\nbuild_args (dict, optional): dict of docker build arguments\ndockerfile_path (str, optional):\npath to dockerfile relative to image_path\nif not `image_path/Dockerfile`.", "source": "codesearchnet"}
{"code": "def decode_base64_dict(data):\n    \n    b64 = base64.b64decode(data['__ndarray__'])\n    array = np.copy(np.frombuffer(b64, dtype=data['dtype']))\n    if len(data['shape']) > 1:\n        array = array.reshape(data['shape'])\n    return array", "docstring": "Decode a base64 encoded array into a NumPy array.\n\nArgs:\ndata (dict) : encoded array data to decode\n\nData should have the format encoded by :func:`encode_base64_dict`.\n\nReturns:\nnp.ndarray", "source": "juraj-google-style"}
{"code": "def datetime_string(day, month, year, hour, minute):\n    \n    \n    if hour < 0 or hour > 23: hour = 0\n    if minute < 0 or minute > 60: minute = 0\n\n    return '%d-%02d-%02dT%02d:%02d:00' % (year, month, day, hour, minute)", "docstring": "Build a date string using the provided day, month, year numbers.\n\nAutomatically adds a leading zero to ``day`` and ``month`` if they only have\none digit.\n\nArgs:\nday (int): Day number.\nmonth(int): Month number.\nyear(int): Year number.\nhour (int): Hour of the day in 24h format.\nminute (int): Minute of the hour.\n\nReturns:\nstr: Date in the format *YYYY-MM-DDThh:mm:ss*.", "source": "juraj-google-style"}
{"code": "def _download_mlu_data(tmp_dir, data_dir):\n  \n  if not tf.gfile.Exists(data_dir):\n    tf.gfile.MakeDirs(data_dir)\n\n  filename = os.path.basename(_URL)\n  file_path = os.path.join(tmp_dir, filename)\n  headers = {\"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) \"\n                           \"AppleWebKit/537.36 (KHTML, like Gecko) \"\n                           \"Chrome/63.0.3239.132 Safari/537.36\"}\n  resp = requests.get(_URL, headers=headers)\n  with open(file_path, \"wb\") as f:\n    f.write(resp.content)\n\n  with tarfile.open(file_path, \"r:gz\") as tar:\n    tar.extractall(tmp_dir)\n\n  return tmp_dir", "docstring": "Downloads and extracts the dataset.\n\nArgs:\ntmp_dir: temp directory to download and extract the dataset\ndata_dir: The base directory where data and vocab files are stored.\n\nReturns:\ntmp_dir: temp directory containing the raw data.", "source": "juraj-google-style"}
{"code": "def push_plugin(self, name):\n        \n        url = self._url('/plugins/{0}/pull', name)\n\n        headers = {}\n        registry, repo_name = auth.resolve_repository_name(name)\n        header = auth.get_config_header(self, registry)\n        if header:\n            headers['X-Registry-Auth'] = header\n        res = self._post(url, headers=headers)\n        self._raise_for_status(res)\n        return self._stream_helper(res, decode=True)", "docstring": "Push a plugin to the registry.\n\nArgs:\nname (string): Name of the plugin to upload. The ``:latest``\ntag is optional, and is the default if omitted.\n\nReturns:\n``True`` if successful", "source": "juraj-google-style"}
{"code": "class Document(object):\n\n    def __init__(self, content: str, type: Union[str, language_v1.Document.Type]='PLAIN_TEXT', language_hint: Optional[str]=None, encoding: Optional[str]='UTF8', from_gcs: bool=False):\n        self.content = content\n        self.type = type\n        self.encoding = encoding\n        self.language_hint = language_hint\n        self.from_gcs = from_gcs\n\n    @staticmethod\n    def to_dict(document: 'Document') -> Mapping[str, Optional[str]]:\n        if document.from_gcs:\n            dict_repr = {'gcs_content_uri': document.content}\n        else:\n            dict_repr = {'content': document.content}\n        dict_repr.update({'type': document.type, 'language': document.language_hint})\n        return dict_repr", "docstring": "Represents the input to :class:`AnnotateText` transform.\n\nArgs:\ncontent (str): The content of the input or the Google Cloud Storage URI\nwhere the file is stored.\ntype (`Union[str, google.cloud.language_v1.Document.Type]`): Text type.\nPossible values are `HTML`, `PLAIN_TEXT`. The default value is\n`PLAIN_TEXT`.\nlanguage_hint (`Optional[str]`): The language of the text. If not specified,\nlanguage will be automatically detected. Values should conform to\nISO-639-1 standard.\nencoding (`Optional[str]`): Text encoding. Possible values are: `NONE`,\n`UTF8`, `UTF16`, `UTF32`. The default value is `UTF8`.\nfrom_gcs (bool): Whether the content should be interpret as a Google Cloud\nStorage URI. The default value is :data:`False`.", "source": "github-repos"}
{"code": "def GetDecoder(cls, encoding_method):\n    \n    encoding_method = encoding_method.lower()\n    decoder = cls._decoders.get(encoding_method, None)\n    if not decoder:\n      return None\n\n    return decoder()", "docstring": "Retrieves the decoder object for a specific encoding method.\n\nArgs:\nencoding_method (str): encoding method identifier.\n\nReturns:\nDecoder: decoder or None if the encoding method does not exists.", "source": "juraj-google-style"}
{"code": "def _ToJSonObj(self, columns_order=None, order_by=()):\n    if (columns_order is None):\n        columns_order = [col['id'] for col in self.__columns]\n    col_dict = dict([(col['id'], col) for col in self.__columns])\n    col_objs = []\n    for col_id in columns_order:\n        col_obj = {'id': col_dict[col_id]['id'], 'label': col_dict[col_id]['label'], 'type': col_dict[col_id]['type']}\n        if col_dict[col_id]['custom_properties']:\n            col_obj['p'] = col_dict[col_id]['custom_properties']\n        col_objs.append(col_obj)\n    row_objs = []\n    for (row, cp) in self._PreparedData(order_by):\n        cell_objs = []\n        for col in columns_order:\n            value = self.CoerceValue(row.get(col, None), col_dict[col]['type'])\n            if (value is None):\n                cell_obj = None\n            elif isinstance(value, tuple):\n                cell_obj = {'v': value[0]}\n                if ((len(value) > 1) and (value[1] is not None)):\n                    cell_obj['f'] = value[1]\n                if (len(value) == 3):\n                    cell_obj['p'] = value[2]\n            else:\n                cell_obj = {'v': value}\n            cell_objs.append(cell_obj)\n        row_obj = {'c': cell_objs}\n        if cp:\n            row_obj['p'] = cp\n        row_objs.append(row_obj)\n    json_obj = {'cols': col_objs, 'rows': row_objs}\n    if self.custom_properties:\n        json_obj['p'] = self.custom_properties\n    return json_obj", "docstring": "Returns an object suitable to be converted to JSON.\n\nArgs:\ncolumns_order: Optional. A list of all column IDs in the order in which\nyou want them created in the output table. If specified,\nall column IDs must be present.\norder_by: Optional. Specifies the name of the column(s) to sort by.\nPassed as is to _PreparedData().\n\nReturns:\nA dictionary object for use by ToJSon or ToJSonResponse.", "source": "codesearchnet"}
{"code": "def console_fill_foreground(con: tcod.console.Console, r: Sequence[int], g: Sequence[int], b: Sequence[int]) -> None:\n    if ((len(r) != len(g)) or (len(r) != len(b))):\n        raise TypeError('R, G and B must all have the same size.')\n    if (isinstance(r, np.ndarray) and isinstance(g, np.ndarray) and isinstance(b, np.ndarray)):\n        r_ = np.ascontiguousarray(r, dtype=np.intc)\n        g_ = np.ascontiguousarray(g, dtype=np.intc)\n        b_ = np.ascontiguousarray(b, dtype=np.intc)\n        cr = ffi.cast('int *', r_.ctypes.data)\n        cg = ffi.cast('int *', g_.ctypes.data)\n        cb = ffi.cast('int *', b_.ctypes.data)\n    else:\n        cr = ffi.new('int[]', r)\n        cg = ffi.new('int[]', g)\n        cb = ffi.new('int[]', b)\n    lib.TCOD_console_fill_foreground(_console(con), cr, cg, cb)", "docstring": "Fill the foregound of a console with r,g,b.\n\nArgs:\ncon (Console): Any Console instance.\nr (Sequence[int]): An array of integers with a length of width*height.\ng (Sequence[int]): An array of integers with a length of width*height.\nb (Sequence[int]): An array of integers with a length of width*height.\n\n.. deprecated:: 8.4\nYou should assign to :any:`tcod.console.Console.fg` instead.", "source": "codesearchnet"}
{"code": "def make_path(path: PathLike) -> abstract_path.Path:\n    is_windows = os.name == 'nt'\n    if isinstance(path, str):\n        uri_splits = path.split(':\n        if len(uri_splits) > 1:\n            return _URI_PREFIXES_TO_CLS[uri_splits[0] + ':\n        elif is_windows:\n            return gpath.WindowsGPath(path)\n        else:\n            return gpath.PosixGPath(path)\n    elif isinstance(path, _PATHLIKE_CLS):\n        return path\n    elif isinstance(path, os.PathLike):\n        path_cls = gpath.WindowsGPath if is_windows else gpath.PosixGPath\n        return path_cls(path)\n    else:\n        raise TypeError(f'Invalid path type: {path!r}')", "docstring": "Create a generic `pathlib.Path`-like abstraction.\n\nDepending on the input (e.g. `gs://`, `github://`, `ResourcePath`,...), the\nsystem (Windows, Linux,...), the function will create the right pathlib-like\nabstraction.\n\nArgs:\npath: Pathlike object.\n\nReturns:\npath: The `pathlib.Path`-like abstraction.", "source": "github-repos"}
{"code": "def _collect_certificate_data(self, enterprise_enrollment):\n    if (self.certificates_api is None):\n        self.certificates_api = CertificatesApiClient(self.user)\n    course_id = enterprise_enrollment.course_id\n    username = enterprise_enrollment.enterprise_customer_user.user.username\n    try:\n        certificate = self.certificates_api.get_course_certificate(course_id, username)\n        completed_date = certificate.get('created_date')\n        if completed_date:\n            completed_date = parse_datetime(completed_date)\n        else:\n            completed_date = timezone.now()\n        is_passing = certificate.get('is_passing')\n        grade = (self.grade_passing if is_passing else self.grade_failing)\n    except HttpNotFoundError:\n        completed_date = None\n        grade = self.grade_incomplete\n        is_passing = False\n    return (completed_date, grade, is_passing)", "docstring": "Collect the learner completion data from the course certificate.\n\nUsed for Instructor-paced courses.\n\nIf no certificate is found, then returns the completed_date = None, grade = In Progress, on the idea that a\ncertificate will eventually be generated.\n\nArgs:\nenterprise_enrollment (EnterpriseCourseEnrollment): the enterprise enrollment record for which we need to\ncollect completion/grade data\n\nReturns:\ncompleted_date: Date the course was completed, this is None if course has not been completed.\ngrade: Current grade in the course.\nis_passing: Boolean indicating if the grade is a passing grade or not.", "source": "codesearchnet"}
{"code": "def version(msg):\n    tc = typecode(msg)\n    if (tc != 31):\n        raise RuntimeError(('%s: Not a status operation message, expecting TC = 31' % msg))\n    msgbin = common.hex2bin(msg)\n    version = common.bin2int(msgbin[72:75])\n    return version", "docstring": "ADS-B Version\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string, TC = 31\n\nReturns:\nint: version number", "source": "codesearchnet"}
{"code": "def _save_private_file(filename, json_contents):\n    \n    temp_filename = tempfile.mktemp()\n    file_desc = os.open(temp_filename, os.O_WRONLY | os.O_CREAT, 0o600)\n    with os.fdopen(file_desc, 'w') as file_handle:\n        json.dump(json_contents, file_handle, sort_keys=True,\n                  indent=2, separators=(',', ': '))\n    shutil.move(temp_filename, filename)", "docstring": "Saves a file with read-write permissions on for the owner.\n\nArgs:\nfilename: String. Absolute path to file.\njson_contents: JSON serializable object to be saved.", "source": "juraj-google-style"}
{"code": "def __init__(self, chgcar):\n        \n        self.chgcar = chgcar\n        self.structure = chgcar.structure\n        self.extrema_coords = []  \n        self.extrema_type = None  \n        self._extrema_df = None  \n        self._charge_distribution_df = None", "docstring": "Initialization.\n\nArgs:\nchgcar (pmg.Chgcar): input Chgcar object.", "source": "juraj-google-style"}
{"code": "def generate_password(length=32):\n    return ''.join((random.SystemRandom().choice((string.ascii_letters + '!@", "docstring": "Generate a cryptographically secure random string to use for passwords\n\nArgs:\nlength (int): Length of password, defaults to 32 characters\n\nReturns:\nRandomly generated string", "source": "codesearchnet"}
{"code": "def _ParseIndexTable(self, file_object):\n    \n    cache_address_map = self._GetDataTypeMap('uint32le')\n    file_offset = file_object.get_offset()\n\n    cache_address_data = file_object.read(4)\n\n    while len(cache_address_data) == 4:\n      try:\n        value = self._ReadStructureFromByteStream(\n            cache_address_data, file_offset, cache_address_map)\n      except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError((\n            'Unable to map cache address at offset: 0x{0:08x} with error: '\n            '{1!s}').format(file_offset, exception))\n\n      if value:\n        cache_address = CacheAddress(value)\n        self.index_table.append(cache_address)\n\n      file_offset += 4\n\n      cache_address_data = file_object.read(4)", "docstring": "Parses the index table.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object to parse.\n\nRaises:\nParseError: if the index table cannot be read.", "source": "juraj-google-style"}
{"code": "def render_list(self, cnt, unique=False, progress_callback=None, **kwargs):\n    rendered_list = []\n    i = 0\n    total_attempts = 0\n    while True:\n        if (i >= cnt):\n            break\n        if (total_attempts > (cnt * self.unique_attempts_factor)):\n            raise StringGenerator.UniquenessError(u\"couldn't satisfy uniqueness\")\n        s = self.render(**kwargs)\n        if unique:\n            if (not (s in rendered_list)):\n                rendered_list.append(s)\n                i += 1\n        else:\n            rendered_list.append(s)\n            i += 1\n        total_attempts += 1\n        if (progress_callback and callable(progress_callback)):\n            progress_callback(i, cnt)\n    return rendered_list", "docstring": "Return a list of generated strings.\n\nArgs:\ncnt (int): length of list\nunique (bool): whether to make entries unique\n\nReturns:\nlist.\n\nWe keep track of total attempts because a template may\nspecify something impossible to attain, like [1-9]{} with cnt==1000", "source": "codesearchnet"}
{"code": "def get_initialized_tpu_systems():\n    return _INITIALIZED_TPU_SYSTEMS.copy()", "docstring": "Returns all currently initialized tpu systems.\n\nReturns:\nA dictionary, with tpu name as the key and the tpu topology as the value.", "source": "github-repos"}
{"code": "def _parse_book_links(dom):\n    \n    links = []\n    picker = lambda x: x.params.get(\"class\", \"\").startswith(\"boxProKnihy\")\n\n    for el in dom.find(None, fn=picker):\n        book_ref = el.find(\"a\")\n\n        if not book_ref or \"href\" not in book_ref[0].params:\n            continue\n\n        links.append(book_ref[0].params[\"href\"])\n\n    return links", "docstring": "Parse links to the details about publications from page with book list.\n\nArgs:\ndom (obj): HTMLElement container of the page with book list.\n\nReturns:\nlist: List of strings / absolute links to book details.", "source": "juraj-google-style"}
{"code": "def read_raster(raster_file):\n        \n        ds = gdal_Open(raster_file)\n        band = ds.GetRasterBand(1)\n        data = band.ReadAsArray()\n        xsize = band.XSize\n        ysize = band.YSize\n\n        nodata_value = band.GetNoDataValue()\n        geotrans = ds.GetGeoTransform()\n        dttype = band.DataType\n\n        srs = osr_SpatialReference()\n        srs.ImportFromWkt(ds.GetProjection())\n        \n        if nodata_value is None:\n            nodata_value = DEFAULT_NODATA\n        band = None\n        ds = None\n        return Raster(ysize, xsize, data, nodata_value, geotrans, srs, dttype)", "docstring": "Read raster by GDAL.\n\nArgs:\nraster_file: raster file path.\n\nReturns:\nRaster object.", "source": "juraj-google-style"}
{"code": "def get_fixers(self):\n    pre_order_fixers = []\n    post_order_fixers = []\n    for fix_mod_path in self.fixers:\n        mod = __import__(fix_mod_path, {}, {}, ['*'])\n        fix_name = fix_mod_path.rsplit('.', 1)[(- 1)]\n        if fix_name.startswith(self.FILE_PREFIX):\n            fix_name = fix_name[len(self.FILE_PREFIX):]\n        parts = fix_name.split('_')\n        class_name = (self.CLASS_PREFIX + ''.join([p.title() for p in parts]))\n        try:\n            fix_class = getattr(mod, class_name)\n        except AttributeError:\n            raise FixerError((\"Can't find %s.%s\" % (fix_name, class_name)))\n        fixer = fix_class(self.options, self.fixer_log)\n        if (fixer.explicit and (self.explicit is not True) and (fix_mod_path not in self.explicit)):\n            self.log_message('Skipping implicit fixer: %s', fix_name)\n            continue\n        self.log_debug('Adding transformation: %s', fix_name)\n        if (fixer.order == 'pre'):\n            pre_order_fixers.append(fixer)\n        elif (fixer.order == 'post'):\n            post_order_fixers.append(fixer)\n        else:\n            raise FixerError(('Illegal fixer order: %r' % fixer.order))\n    key_func = operator.attrgetter('run_order')\n    pre_order_fixers.sort(key=key_func)\n    post_order_fixers.sort(key=key_func)\n    return (pre_order_fixers, post_order_fixers)", "docstring": "Inspects the options to load the requested patterns and handlers.\n\nReturns:\n(pre_order, post_order), where pre_order is the list of fixers that\nwant a pre-order AST traversal, and post_order is the list that want\npost-order traversal.", "source": "codesearchnet"}
{"code": "def simulate_phases(self, phase_map: Dict[(Tuple[(int, ...)], float)]):\n    self._pool.map(_clear_scratch, self._shard_num_args())\n    for (indices, half_turns) in phase_map.items():\n        args = self._shard_num_args({'indices': indices, 'half_turns': half_turns})\n        if (len(indices) == 1):\n            self._pool.map(_single_qubit_accumulate_into_scratch, args)\n        elif (len(indices) == 2):\n            self._pool.map(_two_qubit_accumulate_into_scratch, args)\n    self._pool.map(_apply_scratch_as_phase, self._shard_num_args())", "docstring": "Simulate a set of phase gates on the xmon architecture.\n\nArgs:\nphase_map: A map from a tuple of indices to a value, one for each\nphase gate being simulated. If the tuple key has one index, then\nthis is a Z phase gate on the index-th qubit with a rotation\nangle of pi times the value of the map. If the tuple key has two\nindices, then this is a |11> phasing gate, acting on the qubits\nat the two indices, and a rotation angle of pi times the value\nof the map.", "source": "codesearchnet"}
{"code": "def get_script_module(script_information, package='pylabcontrol', verbose=False):\n        \n\n        module, _, _, _, _, _, _ = Script.get_script_information(script_information=script_information, package=package, verbose=verbose)\n\n        return module", "docstring": "wrapper to get the module for a script\n\nArgs:\nscript_information: information of the script. This can be\n- a dictionary\n- a Script instance\n- name of Script class\npackage (optional): name of the package to which the script belongs, i.e. pylabcontrol or b26toolkit only used when script_information is a string\nReturns:\nmodule", "source": "juraj-google-style"}
{"code": "def parse_columns(lines):\n    \n    data = []\n    index = []\n    for line in lines:\n        line = line.rstrip()\n        if line.startswith(\"\n            tmp = __parse_entry(line)\n            data.append(tmp[1])\n            index.append(tmp[0])\n\n    return DataFrame(data, index=index, columns=['description'])", "docstring": "Parse list of lines with columns description from SOFT file.\n\nArgs:\nlines (:obj:`Iterable`): Iterator over the lines.\n\nReturns:\n:obj:`pandas.DataFrame`: Columns description.", "source": "juraj-google-style"}
{"code": "def virt_customize(self, options):\n        \n        cmd = ['virt-customize', '-a', self.disk_path]\n        if 'ssh-inject' in options and not options['ssh-inject']:\n            options['ssh-inject'] = 'root:file:{}'.format(\n                self.paths.ssh_id_rsa_pub()\n            )\n\n        options = self.normalize_options(options)\n        cmd.extend(options)\n        return Command('virt-customize', cmd)", "docstring": "Handler for 'virt-customize'\nnote: if 'ssh-inject' option was specified without a path to a key,\nthe prefix' key will be copied to the vm.\n\nArgs:\noptions (lst of str): Options and arguments for 'virt-customize'\n\nReturns:\ncallable: which handles cmd\n\nRaises:\nlago.build.BuildException: If an handler for cmd doesn't exist", "source": "juraj-google-style"}
{"code": "def get_HDX_code_from_location(location, locations=None, configuration=None):\n        \n        \n        if locations is None:\n            locations = Locations.validlocations(configuration)\n        locationupper = location.upper()\n        for locdict in locations:\n            locationcode = locdict['name'].upper()\n            if locationupper == locationcode:\n                return locationcode\n\n        for locdict in locations:\n            if locationupper == locdict['title'].upper():\n                return locdict['name'].upper()\n        return None", "docstring": "Get HDX code for location\n\nArgs:\nlocation (str): Location for which to get HDX code\nlocations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX.\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nOptional[str]: HDX code or None", "source": "juraj-google-style"}
{"code": "def gcd(a, b, name=None):\n    with ops.name_scope(name, 'gcd', [a, b]):\n        a = ops.convert_to_tensor(a)\n        b = ops.convert_to_tensor(b)\n        a.shape.assert_has_rank(0)\n        b.shape.assert_has_rank(0)\n        if not a.dtype.is_integer:\n            raise ValueError('a must be an integer type. Got: %s' % a.dtype)\n        if not b.dtype.is_integer:\n            raise ValueError('b must be an integer type. Got: %s' % b.dtype)\n        const_a = tensor_util.constant_value(a)\n        const_b = tensor_util.constant_value(b)\n        if const_a is not None and const_b is not None:\n            if sys.version_info.major < 3:\n                math_gcd = fractions.gcd\n            else:\n                math_gcd = math.gcd\n            return ops.convert_to_tensor(math_gcd(const_a, const_b))\n        cond = lambda _, b: math_ops.greater(b, array_ops.zeros_like(b))\n        body = lambda a, b: [b, math_ops.mod(a, b)]\n        a, b = while_loop.while_loop(cond, body, [a, b], back_prop=False)\n        return a", "docstring": "Returns the greatest common divisor via Euclid's algorithm.\n\nArgs:\na: The dividend. A scalar integer `Tensor`.\nb: The divisor. A scalar integer `Tensor`.\nname: An optional name for the operation.\n\nReturns:\nA scalar `Tensor` representing the greatest common divisor between `a` and\n`b`.\n\nRaises:\nValueError: If `a` or `b` are not scalar integers.", "source": "github-repos"}
{"code": "def jaccard_sim(features1, features2):\n    \n    set1 = set(features1)\n    set2 = set(features2)\n    try:\n        return len(set1.intersection(set2))/float(max(len(set1), len(set2)))\n    except ZeroDivisionError:\n        return 0", "docstring": "Compute similarity between two sets using Jaccard similarity.\n\nArgs:\nfeatures1: list of PE Symbols.\nfeatures2: list of PE Symbols.\n\nReturns:\nReturns an int.", "source": "juraj-google-style"}
{"code": "def serialize_cert_to_der(cert_obj):\n    return cert_obj.public_bytes(cryptography.hazmat.primitives.serialization.Encoding.DER)", "docstring": "Serialize certificate to DER.\n\nArgs:\ncert_obj: cryptography.Certificate\n\nReturns:\nbytes: DER encoded certificate", "source": "codesearchnet"}
{"code": "def _read_accept_states(self):\n        \n        states = []\n        i = 0\n        regex = re.compile('[ \\t\\n\\r:,]+')\n        found = 0  \n        state = 0  \n        mapping = [] \n        cur_line = None\n        with open(self.outfile) as flex_file:\n            for cur_line in flex_file:\n                if cur_line[0:37] == \"static yyconst flex_int16_t yy_accept\" or cur_line[0:35] == \"static const flex_int16_t yy_accept\":\n                    found = 1\n                    continue\n                if found == 1:\n                    \n                    if state == 0 and cur_line[0:5] == \"    {\":\n                        mapping.append(0)  \n                        state = 1\n                        continue\n\n                    if state == 1:\n                        if cur_line[0:7] != \"    } ;\":\n                            cur_line = \"\".join(cur_line.split())\n                            if cur_line == '':\n                                continue\n                            if cur_line[cur_line.__len__() - 1] == ',':\n                                splitted_line = regex.split(\n                                    cur_line[:cur_line.__len__() - 1])\n                            else:\n                                splitted_line = regex.split(cur_line)\n                            mapping = mapping + splitted_line\n                            continue\n                        else:\n                            cleared = []\n                            for j in mapping:\n                                cleared.append(int(j))\n                            max_value = max(cleared)\n                            for i in range(0, len(cleared)):\n                                if cleared[i] > 0 and cleared[\n                                        i] < (max_value - 1):\n                                    states.append(i)\n                            return states\n        return []", "docstring": "Read DFA accepted states from flex compiled file\nArgs:\nNone\nReturns:\nlist: The list of accepted states", "source": "juraj-google-style"}
{"code": "def signature(self, name, file_name, file_type, file_content, owner=None, **kwargs):\n        \n        return Signature(self.tcex, name, file_name, file_type, file_content, owner=owner, **kwargs)", "docstring": "Create the Signature TI object.\n\nArgs:\nowner:\nfile_content:\nfile_name:\nfile_type:\nname:\n**kwargs:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def order_by(self, field_path, **kwargs):\n    query = query_mod.Query(self)\n    return query.order_by(field_path, **kwargs)", "docstring": "Create an \"order by\" query with this collection as parent.\n\nSee\n:meth:`~.firestore_v1beta1.query.Query.order_by` for\nmore information on this method.\n\nArgs:\nfield_path (str): A field path (``.``-delimited list of\nfield names) on which to order the query results.\nkwargs (Dict[str, Any]): The keyword arguments to pass along\nto the query. The only supported keyword is ``direction``,\nsee :meth:`~.firestore_v1beta1.query.Query.order_by` for\nmore information.\n\nReturns:\n~.firestore_v1beta1.query.Query: An \"order by\" query.", "source": "codesearchnet"}
{"code": "def _tf_flatten_batch_dims(x, num_nonbatch_dims):\n    shape = x.shape.as_list()\n    assert (None not in shape)\n    new_shape = ([list_product(shape[:(- num_nonbatch_dims)])] + shape[(- num_nonbatch_dims):])\n    if (new_shape != shape):\n        x = tf.reshape(x, new_shape)\n    return x", "docstring": "Flatten all but last num_nonbatch_dims into one dimension.\n\nArgs:\nx: a tf.Tensor:\nnum_nonbatch_dims: an integer\n\nReturns:\na tf.Tensor with 1 + num_nonbatch_dims dimensions.", "source": "codesearchnet"}
{"code": "def get_geostationary_mask(area):\n    \n    \n    h = area.proj_dict['h']\n    xmax, ymax = get_geostationary_angle_extent(area)\n    xmax *= h\n    ymax *= h\n\n    \n    x, y = area.get_proj_coords_dask()\n\n    \n    return ((x / xmax) ** 2 + (y / ymax) ** 2) <= 1", "docstring": "Compute a mask of the earth's shape as seen by a geostationary satellite\n\nArgs:\narea (pyresample.geometry.AreaDefinition) : Corresponding area\ndefinition\n\nReturns:\nBoolean mask, True inside the earth's shape, False outside.", "source": "juraj-google-style"}
{"code": "def swapdim(P, dim1=1, dim2=0):\n    \n    if not isinstance(P, Poly):\n        return numpy.swapaxes(P, dim1, dim2)\n\n    dim = P.dim\n    shape = P.shape\n    dtype = P.dtype\n\n    if dim1==dim2:\n        return P\n\n    m = max(dim1, dim2)\n    if P.dim <= m:\n        P = chaospy.poly.dimension.setdim(P, m+1)\n        dim = m+1\n\n    A = {}\n\n    for key in P.keys:\n\n        val = P.A[key]\n        key = list(key)\n        key[dim1], key[dim2] = key[dim2], key[dim1]\n        A[tuple(key)] = val\n\n    return Poly(A, dim, shape, dtype)", "docstring": "Swap the dim between two variables.\n\nArgs:\nP (Poly):\nInput polynomial.\ndim1 (int):\nFirst dim\ndim2 (int):\nSecond dim.\n\nReturns:\n(Poly):\nPolynomial with swapped dimensions.\n\nExamples:\n>>> x,y = variable(2)\n>>> P = x**4-y\n>>> print(P)\nq0^4-q1\n>>> print(swapdim(P))\nq1^4-q0", "source": "juraj-google-style"}
{"code": "def _invalid_docstring_quote(self, quote, row, col=None):\n        \n        self.add_message(\n            'invalid-docstring-quote',\n            line=row,\n            args=(quote, TRIPLE_QUOTE_OPTS.get(self.config.docstring_quote)),\n            **self.get_offset(col)\n        )", "docstring": "Add a message for an invalid docstring quote.\n\nArgs:\nquote: The quote characters that were found.\nrow: The row number the quote characters were found on.\ncol: The column the quote characters were found on.", "source": "juraj-google-style"}
{"code": "def ReadVFS(pathspec, offset, length, progress_callback=None):\n    fd = VFSOpen(pathspec, progress_callback=progress_callback)\n    fd.Seek(offset)\n    return fd.Read(length)", "docstring": "Read from the VFS and return the contents.\n\nArgs:\npathspec: path to read from\noffset: number of bytes to skip\nlength: number of bytes to read\nprogress_callback: A callback to indicate that the open call is still\nworking but needs more time.\n\nReturns:\nVFS file contents", "source": "codesearchnet"}
{"code": "class MarkupLMProcessor(ProcessorMixin):\n    feature_extractor_class = 'MarkupLMFeatureExtractor'\n    tokenizer_class = ('MarkupLMTokenizer', 'MarkupLMTokenizerFast')\n    parse_html = True\n\n    def __call__(self, html_strings=None, nodes=None, xpaths=None, node_labels=None, questions=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchEncoding:\n        \n        if self.parse_html:\n            if html_strings is None:\n                raise ValueError('Make sure to pass HTML strings in case `parse_html` is set to `True`')\n            if nodes is not None or xpaths is not None or node_labels is not None:\n                raise ValueError(\"Please don't pass nodes, xpaths nor node labels in case `parse_html` is set to `True`\")\n            features = self.feature_extractor(html_strings)\n            nodes = features['nodes']\n            xpaths = features['xpaths']\n        else:\n            if html_strings is not None:\n                raise ValueError('You have passed HTML strings but `parse_html` is set to `False`.')\n            if nodes is None or xpaths is None:\n                raise ValueError('Make sure to pass nodes and xpaths in case `parse_html` is set to `False`')\n        if questions is not None and self.parse_html:\n            if isinstance(questions, str):\n                questions = [questions]\n        encoded_inputs = self.tokenizer(text=questions if questions is not None else nodes, text_pair=nodes if questions is not None else None, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs)\n        return encoded_inputs\n\n    def batch_decode(self, *args, **kwargs):\n        \n        return self.tokenizer.batch_decode(*args, **kwargs)\n\n    def decode(self, *args, **kwargs):\n        \n        return self.tokenizer.decode(*args, **kwargs)\n\n    @property\n    def model_input_names(self):\n        tokenizer_input_names = self.tokenizer.model_input_names\n        return tokenizer_input_names", "docstring": "Constructs a MarkupLM processor which combines a MarkupLM feature extractor and a MarkupLM tokenizer into a single\nprocessor.\n\n[`MarkupLMProcessor`] offers all the functionalities you need to prepare data for the model.\n\nIt first uses [`MarkupLMFeatureExtractor`] to extract nodes and corresponding xpaths from one or more HTML strings.\nNext, these are provided to [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`], which turns them into token-level\n`input_ids`, `attention_mask`, `token_type_ids`, `xpath_tags_seq` and `xpath_subs_seq`.\n\nArgs:\nfeature_extractor (`MarkupLMFeatureExtractor`):\nAn instance of [`MarkupLMFeatureExtractor`]. The feature extractor is a required input.\ntokenizer (`MarkupLMTokenizer` or `MarkupLMTokenizerFast`):\nAn instance of [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`]. The tokenizer is a required input.\nparse_html (`bool`, *optional*, defaults to `True`):\nWhether or not to use `MarkupLMFeatureExtractor` to parse HTML strings into nodes and corresponding xpaths.", "source": "github-repos"}
{"code": "def __init__(self, env):\n    \n    self._env = env\n    observ_shape = self._parse_shape(self._env.observation_space)\n    observ_dtype = self._parse_dtype(self._env.observation_space)\n    action_shape = self._parse_shape(self._env.action_space)\n    action_dtype = self._parse_dtype(self._env.action_space)\n    with tf.name_scope('environment'):\n      self._observ = tf.Variable(\n          tf.zeros(observ_shape, observ_dtype), name='observ', trainable=False)\n      self._action = tf.Variable(\n          tf.zeros(action_shape, action_dtype), name='action', trainable=False)\n      self._reward = tf.Variable(\n          0.0, dtype=tf.float32, name='reward', trainable=False)\n      self._done = tf.Variable(\n          True, dtype=tf.bool, name='done', trainable=False)\n      self._step = tf.Variable(\n          0, dtype=tf.int32, name='step', trainable=False)", "docstring": "Put an OpenAI Gym environment into the TensorFlow graph.\n\nArgs:\nenv: OpenAI Gym environment.", "source": "juraj-google-style"}
{"code": "def sync_job_info(self, job_name):\n    job_path = os.path.join(self._logdir, job_name)\n    if (job_name not in self._monitored_jobs):\n        self._create_job_info(job_path)\n        self._monitored_jobs.add(job_name)\n    else:\n        self._update_job_info(job_path)\n    expr_dirs = filter((lambda d: os.path.isdir(os.path.join(job_path, d))), os.listdir(job_path))\n    for expr_dir_name in expr_dirs:\n        self.sync_trial_info(job_path, expr_dir_name)\n    self._update_job_info(job_path)", "docstring": "Load information of the job with the given job name.\n\n1. Traverse each experiment sub-directory and sync information\nfor each trial.\n2. Create or update the job information, together with the job\nmeta file.\n\nArgs:\njob_name (str) name of the Tune experiment", "source": "codesearchnet"}
{"code": "def name_from_base(base, max_length=63, short=False):\n    \n    timestamp = sagemaker_short_timestamp() if short else sagemaker_timestamp()\n    trimmed_base = base[:max_length - len(timestamp) - 1]\n    return '{}-{}'.format(trimmed_base, timestamp)", "docstring": "Append a timestamp to the provided string.\n\nThis function assures that the total length of the resulting string is not\nlonger than the specified max length, trimming the input parameter if necessary.\n\nArgs:\nbase (str): String used as prefix to generate the unique name.\nmax_length (int): Maximum length for the resulting string.\nshort (bool): Whether or not to use a truncated timestamp.\n\nReturns:\nstr: Input parameter with appended timestamp.", "source": "juraj-google-style"}
{"code": "def __init__(self, ary):\n        \n\n        \n        self._dirty = True\n\n        \n        self._typed = None\n\n        if isinstance(ary, (list, tuple, collections.Sequence)):\n            self.data = ary\n        elif isinstance(ary, ArrayWrapper):\n            self.data = ary.data\n        else:\n            raise TypeError(\"Invalid value given to array validator: {0}\"\n                            .format(ary))\n\n        logger.debug(fmt(\"Initializing ArrayWrapper {} with {}\", self, ary))", "docstring": "Initialize a wrapper for the array\n\nArgs:\nary: (list-like, or ArrayWrapper)", "source": "juraj-google-style"}
{"code": "def rename(df, **kwargs):\n    return df.rename(columns={v: k for (k, v) in kwargs.items()})", "docstring": "Renames columns, where keyword argument values are the current names\nof columns and keys are the new names.\n\nArgs:\ndf (:obj:`pandas.DataFrame`): DataFrame passed in via `>>` pipe.\n\nKwargs:\n**kwargs: key:value pairs where keys are new names for columns and\nvalues are current names of columns.", "source": "codesearchnet"}
{"code": "def register_gradient_tensor(self, x_tensor_name, gradient_tensor):\n    if len(_gradient_debuggers) == 1 or self._is_active_context:\n        self._check_same_graph(gradient_tensor)\n        self._gradient_tensors[x_tensor_name] = gradient_tensor", "docstring": "Register the gradient tensor for an x-tensor.\n\nArgs:\nx_tensor_name: (`str`) the name of the independent `tf.Tensor`, i.e.,\nthe tensor on the denominator of the differentiation.\ngradient_tensor: the gradient `tf.Tensor`.", "source": "github-repos"}
{"code": "def TestIamPermissions(self, request, global_params=None):\n    config = self.GetMethodConfig('TestIamPermissions')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.\n\nArgs:\nrequest: (BigqueryTablesTestIamPermissionsRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(TestIamPermissionsResponse) The response message.", "source": "github-repos"}
{"code": "def dbname(self, value):\n    self._dbname = value\n    self._connectionXML.set('dbname', value)", "docstring": "Set the connection's database name property.\n\nArgs:\nvalue:  New name of the database. String.\n\nReturns:\nNothing.", "source": "codesearchnet"}
{"code": "def noise_get_turbulence(\n    n: tcod.noise.Noise,\n    f: Sequence[float],\n    oc: float,\n    typ: int = NOISE_DEFAULT,\n) -> float:\n    \n    return float(\n        lib.TCOD_noise_get_turbulence_ex(\n            n.noise_c, ffi.new(\"float[4]\", f), oc, typ\n        )\n    )", "docstring": "Return the turbulence noise sampled from the ``f`` coordinate.\n\nArgs:\nn (Noise): A Noise instance.\nf (Sequence[float]): The point to sample the noise from.\ntyp (int): The noise algorithm to use.\noctaves (float): The level of level.  Should be more than 1.\n\nReturns:\nfloat: The sampled noise value.", "source": "juraj-google-style"}
{"code": "def as_dimension(value):\n    if isinstance(value, Dimension):\n        return value\n    else:\n        return Dimension(value)", "docstring": "Converts the given value to a Dimension.\n\nA Dimension input will be returned unmodified.\nAn input of `None` will be converted to an unknown Dimension.\nAn integer input will be converted to a Dimension with that value.\n\nArgs:\nvalue: The value to be converted.\n\nReturns:\nA Dimension corresponding to the given value.", "source": "github-repos"}
{"code": "def enclose_points(points, clip_rect):\n        \n        point_array = ffi.new('SDL_Point[]', len(points))\n        for i, p in enumerate(points):\n            point_array[i] = p._ptr\n        enclosing_rect = Rect()\n        if lib.SDL_EnclosePoints(point_array, len(points), clip_rect._ptr, enclosing_rect._ptr):\n            return enclosing_rect\n        else:\n            return None", "docstring": "Return the minimal rectangle enclosing the given set of points\n\nArgs:\npoints (List[Point]): The set of points that the new Rect must enclose.\nclip_rect (Rect): A clipping Rect.\n\nReturns:\nRect: A new Rect enclosing the given points.", "source": "juraj-google-style"}
{"code": "def replace_with_operand(cls, old_builder: 'Builder', old_path: str, replacement_node: _evaluation.ExpressionNode) -> 'Builder':\n    localized = copy.deepcopy(old_builder.node)\n    localized.replace_operand(old_path, replacement_node)\n    return cls(localized, old_builder._handler)", "docstring": "Returns a builder with the old path replaced with a new node.\n\nArgs:\nold_builder: Builder with nodes to be copied into the new one.\nold_path: String of the old path to be replaced in the old_builder. If no\npath matches, then the old builder will be the same as the new builder.\nreplacement_node: An expression node that will replace the node that\nmatches the old_path.\n\nReturns:\nA builder with the new expression node tree.", "source": "github-repos"}
{"code": "def create_single_payment(self, order_number, order_description, order_items, amount, return_url, contact=None, currency=None, lang=None, additional_params=None):\n    return self.create_payment(contact, {'amount': amount, 'currency': (currency if (currency is not None) else settings.GOPAY_CURRENCY), 'lang': (lang if (lang is not None) else settings.GOPAY_LANG), 'additional_params': ([] if (additional_params is None) else [{'name': key, 'value': str(value)} for (key, value) in additional_params.items()]), 'order_number': str(order_number), 'order_description': order_description, 'items': [{'name': key, 'amount': value} for (key, value) in order_items.items()], 'callback': {'return_url': return_url, 'notification_url': '{}{}'.format(settings.GOPAY_DOMAIN, reverse('gopay_notify'))}})", "docstring": "Create a single payment.\n\nArgs:\ncontact: JSON describing a payer (see PaymentManager#create_contact)\norder_number: your identifier to the order which the payment is for\norder_description: desription of the order which is show to the\nuser\norder_items: items in order which are shown to the other\n(item name -> amount)\namount: total amount of money which will be paid\nreturl_url: url for rediraction after payment is processed\ncurrency: default is set in settings (GOPAY_CURRENCY)\nlang: default is set in settings (GOPAY_LANG)\nReturns:\ndict: payment status", "source": "codesearchnet"}
{"code": "def encode(self, input_values: torch.Tensor, padding_mask: Optional[torch.Tensor]=None, bandwidth: Optional[float]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor, Optional[torch.Tensor]], EncodecEncoderOutput]:\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    if bandwidth is None:\n        bandwidth = self.config.target_bandwidths[0]\n    if bandwidth not in self.config.target_bandwidths:\n        raise ValueError(f\"This model doesn't support the bandwidth {bandwidth}. Select one of {self.config.target_bandwidths}.\")\n    _, channels, input_length = input_values.shape\n    if channels < 1 or channels > 2:\n        raise ValueError(f'Number of audio channels must be 1 or 2, but got {channels}')\n    chunk_length = self.config.chunk_length\n    if chunk_length is None:\n        chunk_length = input_length\n        stride = input_length\n    else:\n        stride = self.config.chunk_stride\n    if padding_mask is None:\n        padding_mask = torch.ones_like(input_values).bool()\n    encoded_frames = []\n    scales = []\n    step = chunk_length - stride\n    if input_length % stride - step != 0:\n        raise ValueError('The input length is not properly padded for batched chunked decoding. Make sure to pad the input correctly.')\n    for offset in range(0, input_length - step, stride):\n        mask = padding_mask[..., offset:offset + chunk_length].bool()\n        frame = input_values[:, :, offset:offset + chunk_length]\n        encoded_frame, scale = self._encode_frame(frame, bandwidth, mask)\n        encoded_frames.append(encoded_frame)\n        scales.append(scale)\n    encoded_frames = torch.stack(encoded_frames)\n    if not return_dict:\n        return (encoded_frames, scales)\n    return EncodecEncoderOutput(encoded_frames, scales)", "docstring": "Encodes the input audio waveform into discrete codes.\n\nArgs:\ninput_values (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):\nFloat values of the input audio waveform.\npadding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):\nPadding mask used to pad the `input_values`.\nbandwidth (`float`, *optional*):\nThe target bandwidth. Must be one of `config.target_bandwidths`. If `None`, uses the smallest possible\nbandwidth. bandwidth is represented as a thousandth of what it is, e.g. 6kbps bandwidth is represented\nas bandwidth == 6.0\n\nReturns:\nA list of frames containing the discrete encoded codes for the input audio waveform, along with rescaling\nfactors for each chunk when `normalize` is True. Each frames is a tuple `(codebook, scale)`, with\n`codebook` of shape `[batch_size, num_codebooks, frames]`.", "source": "github-repos"}
{"code": "def nodeids(self, ivs=None, quantifier=None):\n        \n        if ivs is None:\n            nids = list(self._nodeids)\n        else:\n            _vars = self._vars\n            nids = []\n            for iv in ivs:\n                if iv in _vars and IVARG_ROLE in _vars[iv]['refs']:\n                    nids.extend(_vars[iv]['refs'][IVARG_ROLE])\n                else:\n                    raise KeyError(iv)\n        if quantifier is not None:\n            nids = [n for n in nids if self.ep(n).is_quantifier()==quantifier]\n        return nids", "docstring": "Return the list of nodeids given by *ivs*, or all nodeids.\n\nArgs:\nivs: the intrinsic variables of the predications to select;\nif `None`, return all nodeids (but see *quantifier*)\nquantifier: if `True`, only return nodeids of quantifiers;\nif `False`, only return non-quantifiers; if `None`\n(the default), return both", "source": "juraj-google-style"}
{"code": "def normalize(inputs,\n              epsilon=1e-8,\n              scope=\"ln\"):\n    \n    with tf.variable_scope(scope):\n        inputs_shape = inputs.get_shape()\n        params_shape = inputs_shape[-1:]\n\n        mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)\n        beta = tf.Variable(tf.zeros(params_shape))\n        gamma = tf.Variable(tf.ones(params_shape))\n        normalized = (inputs - mean) / ((variance + epsilon) ** (.5))\n        outputs = gamma * normalized + beta\n\n    return outputs", "docstring": "Applies layer normalization.\n\nArgs:\ninputs: A tensor with 2 or more dimensions, where the first dimension has\n`batch_size`.\nepsilon: A floating number. A very small number for preventing ZeroDivision Error.\nscope: Optional scope for `variable_scope`.\nreuse: Boolean, whether to reuse the weights of a previous layer\nby the same name.\n\nReturns:\nA tensor with the same shape and data dtype as `inputs`.", "source": "juraj-google-style"}
{"code": "def parse_requirements(file_):\n    modules = []\n    delim = ['<', '>', '=', '!', '~']\n    try:\n        f = open_func(file_, 'r')\n    except OSError:\n        logging.error('Failed on file: {}'.format(file_))\n        raise\n    else:\n        data = [x.strip() for x in f.readlines() if (x != '\\n')]\n    finally:\n        f.close()\n    data = [x for x in data if x[0].isalpha()]\n    for x in data:\n        if (not any([(y in x) for y in delim])):\n            modules.append({'name': x, 'version': None})\n        for y in x:\n            if (y in delim):\n                module = x.split(y)\n                module_name = module[0]\n                module_version = module[(- 1)].replace('=', '')\n                module = {'name': module_name, 'version': module_version}\n                if (module not in modules):\n                    modules.append(module)\n                break\n    return modules", "docstring": "Parse a requirements formatted file.\n\nTraverse a string until a delimiter is detected, then split at said\ndelimiter, get module name by element index, create a dict consisting of\nmodule:version, and add dict to list of parsed modules.\n\nArgs:\nfile_: File to parse.\n\nRaises:\nOSerror: If there's any issues accessing the file.\n\nReturns:\ntuple: The contents of the file, excluding comments.", "source": "codesearchnet"}
{"code": "def poll(self, timeout_ms=None, future=None):\n    if (future is not None):\n        timeout_ms = 100\n    elif (timeout_ms is None):\n        timeout_ms = self.config['request_timeout_ms']\n    elif (not isinstance(timeout_ms, (int, float))):\n        raise TypeError(('Invalid type for timeout: %s' % type(timeout_ms)))\n    responses = []\n    while True:\n        with self._lock:\n            if self._closed:\n                break\n            for node_id in list(self._connecting):\n                self._maybe_connect(node_id)\n            metadata_timeout_ms = self._maybe_refresh_metadata()\n            if ((future is not None) and future.is_done):\n                timeout = 0\n            else:\n                idle_connection_timeout_ms = self._idle_expiry_manager.next_check_ms()\n                timeout = min(timeout_ms, metadata_timeout_ms, idle_connection_timeout_ms, self.config['request_timeout_ms'])\n                timeout = max(0, (timeout / 1000))\n            self._poll(timeout)\n            responses.extend(self._fire_pending_completed_requests())\n        if ((future is None) or future.is_done):\n            break\n    return responses", "docstring": "Try to read and write to sockets.\n\nThis method will also attempt to complete node connections, refresh\nstale metadata, and run previously-scheduled tasks.\n\nArguments:\ntimeout_ms (int, optional): maximum amount of time to wait (in ms)\nfor at least one response. Must be non-negative. The actual\ntimeout will be the minimum of timeout, request timeout and\nmetadata timeout. Default: request_timeout_ms\nfuture (Future, optional): if provided, blocks until future.is_done\n\nReturns:\nlist: responses received (can be empty)", "source": "codesearchnet"}
{"code": "def _apply_conv(self, inputs, w):\n    \n    tiled_weights = tf.tile(w, [1, 1, self._input_channels, 1])\n    outputs = tf.nn.depthwise_conv2d(inputs,\n                                     tiled_weights,\n                                     strides=self.stride,\n                                     padding=self._conv_op_padding,\n                                     data_format=self._data_format)\n    return outputs", "docstring": "Apply a depthwise_conv2d operation on `inputs` using variable `w`.\n\nArgs:\ninputs: A Tensor of shape `data_format` and of type `tf.float16`,\n`tf.bfloat16` or `tf.float32`.\nw: A weight matrix of the same type as `inputs`.\n\nReturns:\noutputs: The result of the convolution operation on `inputs`.", "source": "juraj-google-style"}
{"code": "def roc_auc_score(gold, probs, ignore_in_gold=[], ignore_in_pred=[]):\n    gold = arraylike_to_numpy(gold)\n    if (len(ignore_in_pred) > 0):\n        raise ValueError('ignore_in_pred not defined for ROC-AUC score.')\n    keep = [(x not in ignore_in_gold) for x in gold]\n    gold = gold[keep]\n    probs = probs[(keep, :)]\n    gold_s = pred_to_prob(torch.from_numpy(gold), k=probs.shape[1]).numpy()\n    return skm.roc_auc_score(gold_s, probs)", "docstring": "Compute the ROC AUC score, given the gold labels and predicted probs.\n\nArgs:\ngold: A 1d array-like of gold labels\nprobs: A 2d array-like of predicted probabilities\nignore_in_gold: A list of labels for which elements having that gold\nlabel will be ignored.\n\nReturns:\nroc_auc_score: The (float) roc_auc score", "source": "codesearchnet"}
{"code": "def get_ggt(self, n, u):\n        \n        gk = self[0].einsum_sequence([n, u, n, u])\n        result = -(2*gk*np.outer(u, u) + self[0].einsum_sequence([n, n])\n                   + self[1].einsum_sequence([n, u, n, u])) / (2*gk)\n        return result", "docstring": "Gets the Generalized Gruneisen tensor for a given\nthird-order elastic tensor expansion.\n\nArgs:\nn (3x1 array-like): normal mode direction\nu (3x1 array-like): polarization direction", "source": "juraj-google-style"}
{"code": "def generate_scaling_plot(timing_data, title, ylabel, description, plot_file):\n    \n    proc_counts = timing_data['proc_counts']\n    if len(proc_counts) > 2:\n        plt.figure(figsize=(10, 8), dpi=150)\n        plt.title(title)\n        plt.xlabel(\"Number of processors\")\n        plt.ylabel(ylabel)\n\n        for case, case_color in zip(['bench', 'model'], ['\n            case_data = timing_data[case]\n            means = case_data['means']\n            mins = case_data['mins']\n            maxs = case_data['maxs']\n            plt.fill_between(proc_counts, mins, maxs, facecolor=case_color, alpha=0.5)\n            plt.plot(proc_counts, means, 'o-', color=case_color, label=case)\n\n        plt.legend(loc='best')\n    else:\n        plt.figure(figsize=(5, 3))\n        plt.axis('off')\n        plt.text(0.4, 0.8, \"ERROR:\")\n        plt.text(0.0, 0.6, \"Not enough data points to draw scaling plot\")\n        plt.text(0.0, 0.44, \"To generate this data rerun BATS with the\")\n        plt.text(0.0, 0.36, \"performance option enabled.\")\n\n    if livvkit.publish:\n        plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600)\n    plt.savefig(plot_file)\n    plt.close()\n    return elements.image(title, description, os.path.basename(plot_file))", "docstring": "Generate a scaling plot.\n\nArgs:\ntiming_data: data returned from a `*_scaling` method\ntitle: the title of the plot\nylabel: the y-axis label of the plot\ndescription: a description of the plot\nplot_file: the file to write out to\n\nReturns:\nan image element containing the plot file and metadata", "source": "juraj-google-style"}
{"code": "def autobuild_bootstrap_file(file_name, image_list):\n    family = utilities.get_family('module_settings.json')\n    target = family.platform_independent_target()\n    resolver = ProductResolver.Create()\n    env = Environment(tools=[])\n    output_dir = target.build_dirs()['output']\n    build_dir = target.build_dirs()['build']\n    build_output_name = os.path.join(build_dir, file_name)\n    full_output_name = os.path.join(output_dir, file_name)\n    processed_input_images = []\n    for image_name in image_list:\n        image_info = resolver.find_unique('firmware_image', image_name)\n        image_path = image_info.full_path\n        hex_path = arm.ensure_image_is_hex(image_path)\n        processed_input_images.append(hex_path)\n    env.Command(build_output_name, processed_input_images, action=Action(arm.merge_hex_executables, ('Merging %d hex files into $TARGET' % len(processed_input_images))))\n    env.Command(full_output_name, build_output_name, Copy('$TARGET', '$SOURCE'))", "docstring": "Combine multiple firmware images into a single bootstrap hex file.\n\nThe files listed in image_list must be products of either this tile or any\ndependency tile and should correspond exactly with the base name listed on\nthe products section of the module_settings.json file of the corresponding\ntile.  They must be listed as firmware_image type products.\n\nThis function keeps a global map of all of the intermediate files that it\nhas had to create so that we don't try to build them multiple times.\n\nArgs:\nfile_name(str): Full name of the output bootstrap hex file.\nimage_list(list of str): List of files that will be combined into a\nsingle hex file that will be used to flash a chip.", "source": "codesearchnet"}
{"code": "def __init__(self, prefix_length: int, option_suffix: Text = '') -> None:\n        \n        super().__init__(option_suffix)\n        self._prefix_length = prefix_length", "docstring": "Create a new instance.\n\nArgs:\nprefix_length:\nAmount of characters to skip at the beginning of the entry", "source": "juraj-google-style"}
{"code": "def _build_inner(self, slice_content: 'Iterable[cfg.Variable]') -> 'tuple[list[_base.BaseValue], set[int]]':\n    inner = []\n    ellipses = set()\n    for var in slice_content:\n        if len(var.bindings) > 1:\n            self.ctx.errorlog.ambiguous_annotation(self.ctx.vm.frames, var.data)\n            inner.append(self.ctx.convert.unsolvable)\n        else:\n            val = var.bindings[0].data\n            if val is self.ctx.convert.ellipsis:\n                ellipses.add(len(inner))\n                inner.append(self.ctx.convert.unsolvable)\n            else:\n                inner.append(val)\n    return (inner, ellipses)", "docstring": "Build the list of parameters.\n\nArgs:\nslice_content: The iterable of variables to extract parameters from.\n\nReturns:\nA tuple of a list of parameters and a set of indices at which an ellipsis\nwas replaced with Any.", "source": "github-repos"}
{"code": "def GetHeaderGuardCPPVariable(filename):\n    filename = re.sub('_flymake\\\\.h$', '.h', filename)\n    filename = re.sub('/\\\\.flymake/([^/]*)$', '/\\\\1', filename)\n    filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')\n    fileinfo = FileInfo(filename)\n    file_path_from_root = fileinfo.RepositoryName()\n    if _root:\n        suffix = os.sep\n        if (suffix == '\\\\'):\n            suffix += '\\\\'\n        file_path_from_root = re.sub((('^' + _root) + suffix), '', file_path_from_root)\n    return (re.sub('[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_')", "docstring": "Returns the CPP variable that should be used as a header guard.\n\nArgs:\nfilename: The name of a C++ header file.\n\nReturns:\nThe CPP variable that should be used as a header guard in the\nnamed file.", "source": "codesearchnet"}
{"code": "def _scoped_subscribe(tensor, side_effects, control_cache):\n    with ops.device(tensor.device):\n        with _preserve_control_flow_context(tensor):\n            return _subscribe(tensor, side_effects, control_cache)", "docstring": "Helper method that subscribes a single tensor to a list of side_effects.\n\nThis is a thin wrapper around `_subscribe` and ensures that the side effect\nops are added within the same device and control flow context of the\nsubscribed tensor.\n\nArgs:\ntensor: The `tf.Tensor` to be subscribed.\nside_effects: List of side_effect functions, see subscribe for details.\ncontrol_cache: `_ControlOutputCache` helper to get control_outputs faster.\n\nReturns:\nThe modified replacement to the passed in tensor which triggers the side\neffects or the given tensor, if it was already been subscribed.", "source": "github-repos"}
{"code": "def _acquire(self, uuid_path):\n    for index in range(self._min_third_octet, (self._max_third_octet + 1)):\n        lease = self.create_lease_object_from_idx(index)\n        if self._lease_valid(lease):\n            continue\n        self._take_lease(lease, uuid_path, safe=False)\n        return lease.to_ip_network()\n    raise LagoSubnetLeaseStoreFullException(self.get_allowed_range())", "docstring": "Lease a free network for the given uuid path\n\nArgs:\nuuid_path (str): Path to the uuid file of a :class:`lago.Prefix`\n\nReturns:\nnetaddr.IPNetwork: Which represents the selected subnet\n\nRaises:\nLagoSubnetLeaseException: If the store is full", "source": "codesearchnet"}
{"code": "def _is_none_or_undef(value):\n    return value is None or isinstance(value, variables.UndefinedReturnValue) or isinstance(value, variables.Undefined)", "docstring": "Tests whether a value is None or undefined.\n\nAutoGraph represents undefined symbols using special objects of type Undefined\nor UndefinedReturnValue.\n\nArgs:\nvalue: value to test\n\nReturns:\nBoolean", "source": "github-repos"}
{"code": "def __register_types(self):\n    try:\n        for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.types']['plugins']:\n            cls = entry_point.load()\n            self.types[ResourceType.get(cls.resource_type).resource_type_id] = cls\n            logger.debug('Registered resource type {}'.format(cls.__name__))\n    except SQLAlchemyError as ex:\n        logger.warning('Failed loading type information: {}'.format(ex))", "docstring": "Iterates all entry points for resource types and registers a `resource_type_id` to class mapping\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def serialize_sparse_tensors(tensors):\n    ret = nest.pack_sequence_as(tensors, [sparse_ops.serialize_sparse(tensor, out_type=dtypes.variant) if isinstance(tensor, sparse_tensor.SparseTensor) else tensor for tensor in nest.flatten(tensors)])\n    return ret", "docstring": "Serializes sparse tensors.\n\nArgs:\ntensors: a tensor structure to serialize.\n\nReturns:\n`tensors` with any sparse tensors replaced by their serialized version.", "source": "github-repos"}
{"code": "def training_env():\n    from sagemaker_containers import _env\n    return _env.TrainingEnv(resource_config=_env.read_resource_config(), input_data_config=_env.read_input_data_config(), hyperparameters=_env.read_hyperparameters())", "docstring": "Create a TrainingEnv.\n\nReturns:\nTrainingEnv: an instance of TrainingEnv", "source": "codesearchnet"}
{"code": "def _tensor_proto_to_health_pill(self, tensor_event, node_name, device, output_slot):\n    return self._process_health_pill_value(wall_time=tensor_event.wall_time, step=tensor_event.step, device_name=device, output_slot=output_slot, node_name=node_name, tensor_proto=tensor_event.tensor_proto)", "docstring": "Converts an event_accumulator.TensorEvent to a HealthPillEvent.\n\nArgs:\ntensor_event: The event_accumulator.TensorEvent to convert.\nnode_name: The name of the node (without the output slot).\ndevice: The device.\noutput_slot: The integer output slot this health pill is relevant to.\n\nReturns:\nA HealthPillEvent.", "source": "codesearchnet"}
{"code": "def rtt_control(self, command, config):\n        \n        config_byref = ctypes.byref(config) if config is not None else None\n        res = self._dll.JLINK_RTTERMINAL_Control(command, config_byref)\n\n        if res < 0:\n            raise errors.JLinkRTTException(res)\n\n        return res", "docstring": "Issues an RTT Control command.\n\nAll RTT control is done through a single API call which expects\nspecifically laid-out configuration structures.\n\nArgs:\nself (JLink): the ``JLink`` instance\ncommand (int): the command to issue (see enums.JLinkRTTCommand)\nconfig (ctypes type): the configuration to pass by reference.\n\nReturns:\nAn integer containing the result of the command.", "source": "juraj-google-style"}
{"code": "def run(inputs, program, outputs):\n    root = tempfile.mkdtemp()\n    try:\n        cwd = os.getcwd()\n        for (fake, real) in inputs:\n            parent = os.path.join(root, os.path.dirname(fake))\n            if (not os.path.exists(parent)):\n                os.makedirs(parent)\n            if (hasattr(os, 'symlink') and (not (os.name == 'nt'))):\n                os.symlink(os.path.join(cwd, real), os.path.join(root, fake))\n            else:\n                shutil.copyfile(os.path.join(cwd, real), os.path.join(root, fake))\n        if (subprocess.call((program + [root])) != 0):\n            return 1\n        for (fake, real) in outputs:\n            shutil.copyfile(os.path.join(root, fake), real)\n        return 0\n    finally:\n        try:\n            shutil.rmtree(root)\n        except EnvironmentError:\n            pass", "docstring": "Creates temp symlink tree, runs program, and copies back outputs.\n\nArgs:\ninputs: List of fake paths to real paths, which are used for symlink tree.\nprogram: List containing real path of program and its arguments. The\nexecroot directory will be appended as the last argument.\noutputs: List of fake outputted paths to copy back to real paths.\nReturns:\n0 if succeeded or nonzero if failed.", "source": "codesearchnet"}
{"code": "def EncodeToBytes(data):\n    if data is None:\n        return b''\n    if isinstance(data, bytes):\n        return data\n    s = str(data)\n    try:\n        return s.encode('iso-8859-1')\n    except UnicodeEncodeError:\n        pass\n    try:\n        return s.encode(GetConsoleAttr().GetEncoding())\n    except UnicodeEncodeError:\n        pass\n    return s.encode('utf-8')", "docstring": "Encode data to bytes.\n\nThe primary use case is for base64/mime style 7-bit ascii encoding where the\nencoder input must be bytes. \"safe\" means that the conversion always returns\nbytes and will not raise codec exceptions.\n\nIf data is text then an 8-bit ascii encoding is attempted, then the console\nencoding, and finally utf-8.\n\nArgs:\ndata: Any bytes, string, or object that has str() or unicode() methods.\n\nReturns:\nA bytes string representation of the data.", "source": "github-repos"}
{"code": "def split(self, path):\n    path = path.strip()\n    if not path.startswith(GCSFileSystem.GCS_PREFIX):\n        raise ValueError('Path %r must be GCS path.' % path)\n    prefix_len = len(GCSFileSystem.GCS_PREFIX)\n    last_sep = path[prefix_len:].rfind('/')\n    if last_sep >= 0:\n        last_sep += prefix_len\n    if last_sep > 0:\n        return (path[:last_sep], path[last_sep + 1:])\n    elif last_sep < 0:\n        return (path, '')\n    else:\n        raise ValueError('Invalid path: %s' % path)", "docstring": "Splits the given path into two parts.\n\nSplits the path into a pair (head, tail) such that tail contains the last\ncomponent of the path and head contains everything up to that.\n\nHead will include the GCS prefix ('gs://').\n\nArgs:\npath: path as a string\nReturns:\na pair of path components as strings.", "source": "github-repos"}
{"code": "def _ContainsAny(self, verb, expected):\n    if len(expected) == 1 and expected[0] in self._actual:\n        return\n    if expected:\n        try:\n            actual_set = set(self._actual)\n        except TypeError:\n            actual_set = self._actual\n        for i in expected:\n            if i in actual_set:\n                return\n    self._FailComparingValues(verb, expected)", "docstring": "Determines if the subject contains any of the expected elements.\n\nHelper function for ContainsAnyIn() and ContainsAnyOf().\n\nArgs:\nverb: string describing how the expected elements should be contained.\nexpected: iterable of objects that should be contained in the subject.\n\nReturns:\nNone if the subject contains any of the expected elements.\n\nRaises:\nTruthAssertionError: the subject is missing all of the expected elements.", "source": "github-repos"}
{"code": "def _calculate_scores(self, query, key):\n    return NotImplementedError", "docstring": "Calculates attention scores.\n\nArgs:\nquery: Query tensor of shape `[batch_size, Tq, dim]`.\nkey: Key tensor of shape `[batch_size, Tv, dim]`.\n\nReturns:\nTensor of shape `[batch_size, Tq, Tv]`.", "source": "github-repos"}
{"code": "def compress(self, value: List[str]) -> value_class:\n        \n\n        localized_value = self.value_class()\n\n        for (lang_code, _), value in zip(settings.LANGUAGES, value):\n            localized_value.set(lang_code, value)\n\n        return localized_value", "docstring": "Compresses the values from individual fields\ninto a single :see:LocalizedValue instance.\n\nArguments:\nvalue:\nThe values from all the widgets.\n\nReturns:\nA :see:LocalizedValue containing all\nthe value in several languages.", "source": "juraj-google-style"}
{"code": "def _AddOption(self, name):\n    if (name in [option.name for option in self.options]):\n        raise TextFSMTemplateError(('Duplicate option \"%s\"' % name))\n    try:\n        option = self._options_cls.GetOption(name)(self)\n    except AttributeError:\n        raise TextFSMTemplateError(('Unknown option \"%s\"' % name))\n    self.options.append(option)", "docstring": "Add an option to this Value.\n\nArgs:\nname: (str), the name of the Option to add.\n\nRaises:\nTextFSMTemplateError: If option is already present or\nthe option does not exist.", "source": "codesearchnet"}
{"code": "def __init__(self, usb, chunk_kb=1024):\n        \n        self.usb = usb\n        self.chunk_kb = chunk_kb", "docstring": "Constructs a FastbootProtocol instance.\n\nArgs:\nusb: UsbHandle instance.\nchunk_kb: Packet size. For older devices, 4 may be required.", "source": "juraj-google-style"}
{"code": "def GetFileEntryByPath(self, path):\n    if (path is None):\n        return None\n    (file_entry_type, _) = self._paths.get(path, (None, None))\n    if (not file_entry_type):\n        return None\n    path_spec = fake_path_spec.FakePathSpec(location=path)\n    return fake_file_entry.FakeFileEntry(self._resolver_context, self, path_spec, file_entry_type=file_entry_type)", "docstring": "Retrieves a file entry for a path.\n\nArgs:\npath (str): path of the file entry.\n\nReturns:\nFakeFileEntry: a file entry or None if not available.", "source": "codesearchnet"}
{"code": "def service_info(self, short_name):\n    if (short_name not in self.services):\n        raise ArgumentError('Unknown service name', short_name=short_name)\n    info = {}\n    info['short_name'] = short_name\n    info['long_name'] = self.services[short_name]['state'].long_name\n    info['preregistered'] = self.services[short_name]['state'].preregistered\n    return info", "docstring": "Get static information about a service.\n\nArgs:\nshort_name (string): The short name of the service to query\n\nReturns:\ndict: A dictionary with the long_name and preregistered info\non this service.", "source": "codesearchnet"}
{"code": "def get_concatenated_pdf_from_disk(filenames: Iterable[str], start_recto: bool=True) -> bytes:\n    if start_recto:\n        writer = PdfFileWriter()\n        for filename in filenames:\n            if filename:\n                if ((writer.getNumPages() % 2) != 0):\n                    writer.addBlankPage()\n                writer.appendPagesFromReader(PdfFileReader(open(filename, 'rb')))\n        return pdf_from_writer(writer)\n    else:\n        merger = PdfFileMerger()\n        for filename in filenames:\n            if filename:\n                merger.append(open(filename, 'rb'))\n        return pdf_from_writer(merger)", "docstring": "Concatenates PDFs from disk and returns them as an in-memory binary PDF.\n\nArgs:\nfilenames: iterable of filenames of PDFs to concatenate\nstart_recto: start a new right-hand page for each new PDF?\n\nReturns:\nconcatenated PDF, as ``bytes``", "source": "codesearchnet"}
{"code": "def _get_executor_init(self, workers):\n\n    def pool_fn(seqs):\n        pool = get_pool_class(True)(workers, initializer=init_pool_generator, initargs=(seqs, None, get_worker_id_queue()))\n        _DATA_POOLS.add(pool)\n        return pool\n    return pool_fn", "docstring": "Gets the Pool initializer for multiprocessing.\n\nArgs:\nworkers: Number of workers.\n\nReturns:\nFunction, a Function to initialize the pool", "source": "github-repos"}
{"code": "def AddTripDecoration(self, triplist, color=\"\n    \n    tmpstr = self._DrawTrips(triplist,color)\n    self._decorators.append(tmpstr)", "docstring": "Flushes existing decorations and highlights the given trips.\n\nArgs:\n# Class Trip is defined in transitfeed.py\ntriplist: [Trip, Trip, ...]\n# An optional string with a html color code\ncolor: \"#fff\"", "source": "juraj-google-style"}
{"code": "def dp990(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `dp990`'.format(value))\n\n        self._dp990 = value", "docstring": "Corresponds to IDD Field `dp990`\nDew-point temperature corresponding to 90.0% annual cumulative\nfrequency of occurrence (cold conditions)\n\nArgs:\nvalue (float): value for IDD Field `dp990`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def get_statistics(self, id_or_uri, port_name=''):\n    uri = (self._client.build_uri(id_or_uri) + '/statistics')\n    if port_name:\n        uri = ((uri + '/') + port_name)\n    return self._client.get(uri)", "docstring": "Gets the statistics from an interconnect.\n\nArgs:\nid_or_uri:  Can be either the interconnect id or the interconnect uri.\nport_name (str): A specific port name of an interconnect.\n\nReturns:\ndict: The statistics for the interconnect that matches id.", "source": "codesearchnet"}
{"code": "def _logsum_expbig_minus_expsmall(big, small):\n  \n  with tf.name_scope(\"logsum_expbig_minus_expsmall\"):\n    return tf.math.log1p(-tf.exp(small - big)) + big", "docstring": "Stable evaluation of `Log[exp{big} - exp{small}]`.\n\nTo work correctly, we should have the pointwise relation:  `small <= big`.\n\nArgs:\nbig: Floating-point `Tensor`\nsmall: Floating-point `Tensor` with same `dtype` as `big` and broadcastable\nshape.\n\nReturns:\n`Tensor` of same `dtype` of `big` and broadcast shape.", "source": "juraj-google-style"}
{"code": "def create(self, name, **request_parameters):\n    check_type(name, basestring, may_be_none=False)\n    post_data = dict_from_items_with_values(request_parameters, name=name)\n    json_data = self._session.post(API_ENDPOINT, json=post_data)\n    return self._object_factory(OBJECT_TYPE, json_data)", "docstring": "Create a team.\n\nThe authenticated user is automatically added as a member of the team.\n\nArgs:\nname(basestring): A user-friendly name for the team.\n**request_parameters: Additional request parameters (provides\nsupport for parameters that may be added in the future).\n\nReturns:\nTeam: A Team object with the details of the created team.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"}
{"code": "def _get_client_by_hostname(self, hostname):\n    print('Searching for client: {0:s}'.format(hostname))\n    try:\n        search_result = self.grr_api.SearchClients(hostname)\n    except grr_errors.UnknownError as exception:\n        self.state.add_error('Could not search for host {0:s}: {1!s}'.format(hostname, exception), critical=True)\n        return None\n    result = []\n    for client in search_result:\n        if (hostname.lower() in client.data.os_info.fqdn.lower()):\n            result.append((client.data.last_seen_at, client))\n    if (not result):\n        self.state.add_error('Could not get client_id for {0:s}'.format(hostname), critical=True)\n        return None\n    (last_seen, client) = sorted(result, key=(lambda x: x[0]), reverse=True)[0]\n    last_seen_datetime = datetime.datetime.utcfromtimestamp((last_seen / 1000000))\n    last_seen_seconds = (datetime.datetime.utcnow() - last_seen_datetime).total_seconds()\n    last_seen_minutes = int(round((last_seen_seconds / 60)))\n    print('{0:s}: Found active client'.format(client.client_id))\n    print('Found active client: {0:s}'.format(client.client_id))\n    print('Client last seen: {0:s} ({1:d} minutes ago)'.format(last_seen_datetime.strftime('%Y-%m-%dT%H:%M:%S+0000'), last_seen_minutes))\n    return client", "docstring": "Search GRR by hostname and get the latest active client.\n\nArgs:\nhostname: hostname to search for.\n\nReturns:\nGRR API Client object\n\nRaises:\nDFTimewolfError: if no client ID found for hostname.", "source": "codesearchnet"}
{"code": "def add_gene(self, gene):\n        \n        logger.debug(\"Adding gene {0} to variant {1}\".format(\n            gene, self['variant_id']))\n        self['genes'].append(gene)", "docstring": "Add the information of a gene\n\nThis adds a gene dict to variant['genes']\n\nArgs:\ngene (dict): A gene dictionary", "source": "juraj-google-style"}
{"code": "def create_branch(self, branch_name: str):\n    LOGGER.info('creating branch: %s', branch_name)\n    self._validate_branch_name(branch_name)\n    if (branch_name in self.list_branches()):\n        LOGGER.error('branch already exists')\n        sys.exit((- 1))\n    new_branch = self.repo.create_head(branch_name)\n    new_branch.commit = self.repo.head.commit", "docstring": "Creates a new branch\n\nArgs:\nbranch_name: name of the branch", "source": "codesearchnet"}
{"code": "def add_arguments(self, parser):\n    parser.add_argument('name', nargs=1, choices=['kinetis'], help='name of MCU to unlock')\n    return self.add_common_arguments(parser, True)", "docstring": "Adds the unlock command arguments to the parser.\n\nArgs:\nself (UnlockCommand): the ``UnlockCommand`` instance\nparser (argparse.ArgumentParser): the parser to add the arguments to\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def normalize(self, inplace=False):\n        \n        if inplace:\n            nrm = self.norm()\n            self.data /= nrm\n            return None\n        nrm = self.norm()\n        data_copy = np.array(self.data, copy=True)\n        data_copy /= nrm\n        return Quaternion(data_copy)", "docstring": "Normalizes a Quaternion to unit length\nso that it represents a valid rotation.\n\nArgs:\ninplace (bool): Do an inplace normalization.\n\nReturns:\nQuaternion: Normalized quaternion.", "source": "juraj-google-style"}
{"code": "def calc_health_pill(tensor):\n    health_pill = ([0.0] * 14)\n    if (not isinstance(tensor, np.ndarray)):\n        return health_pill\n    health_pill[0] = 1.0\n    if (not (np.issubdtype(tensor.dtype, np.float) or np.issubdtype(tensor.dtype, np.complex) or np.issubdtype(tensor.dtype, np.integer) or (tensor.dtype == np.bool))):\n        return None\n    health_pill[1] = float(np.size(tensor))\n    nan_mask = np.isnan(tensor)\n    inf_mask = np.isinf(tensor)\n    health_pill[2] = float(np.sum(nan_mask))\n    health_pill[3] = float(np.sum((tensor == (- np.inf))))\n    health_pill[4] = float(np.sum(np.logical_and(np.logical_not(inf_mask), (tensor < 0.0))))\n    health_pill[5] = float(np.sum((tensor == 0.0)))\n    health_pill[6] = float(np.sum(np.logical_and(np.logical_not(inf_mask), (tensor > 0.0))))\n    health_pill[7] = float(np.sum((tensor == np.inf)))\n    finite_subset = tensor[np.logical_and(np.logical_not(nan_mask), np.logical_not(inf_mask))]\n    if np.size(finite_subset):\n        health_pill[8] = float(np.min(finite_subset))\n        health_pill[9] = float(np.max(finite_subset))\n        health_pill[10] = float(np.mean(finite_subset))\n        health_pill[11] = float(np.var(finite_subset))\n    else:\n        health_pill[8] = np.inf\n        health_pill[9] = (- np.inf)\n        health_pill[10] = np.nan\n        health_pill[11] = np.nan\n    health_pill[12] = (- 1.0)\n    health_pill[13] = float(len(tensor.shape))\n    health_pill.extend([float(x) for x in tensor.shape])\n    return health_pill", "docstring": "Calculate health pill of a tensor.\n\nArgs:\ntensor: An instance of `np.array` (for initialized tensors) or\n`tensorflow.python.debug.lib.debug_data.InconvertibleTensorProto`\n(for unininitialized tensors).\n\nReturns:\nIf `tensor` is an initialized tensor of numeric or boolean types:\nthe calculated health pill, as a `list` of `float`s.\nElse if `tensor` is an initialized tensor with `string`, `resource` or any\nother non-numeric types:\n`None`.\nElse (i.e., if `tensor` is uninitialized): An all-zero `list`, with the\nfirst element signifying that the tensor is uninitialized.", "source": "codesearchnet"}
{"code": "def attention_params_simple(\n    mesh, io_dim, kv_dim, heads_dim, variable_dtype):\n  \n  return AttentionParams(\n      mesh,\n      query_input_dim=io_dim,\n      memory_input_dim=io_dim,\n      output_dim=io_dim,\n      key_dim=kv_dim,\n      value_dim=kv_dim,\n      query_heads_dims=[heads_dim],\n      memory_heads_dims=[heads_dim],\n      variable_dtype=variable_dtype)", "docstring": "Common case attention parameters.\n\nArgs:\nmesh: a Mesh\nio_dim: a Dimension (channels dimension of inputs and outputs)\nkv_dim: a Dimension (channels in keys and values)\nheads_dim: a Dimension (number of attention \"heads\")\nvariable_dtype: a mtf.VariableDType\nReturns:\nan AttentionParams", "source": "juraj-google-style"}
{"code": "def create_knowledge_base(project_id, display_name):\n    \n    import dialogflow_v2beta1 as dialogflow\n    client = dialogflow.KnowledgeBasesClient()\n    project_path = client.project_path(project_id)\n\n    knowledge_base = dialogflow.types.KnowledgeBase(\n        display_name=display_name)\n\n    response = client.create_knowledge_base(project_path, knowledge_base)\n\n    print('Knowledge Base created:\\n')\n    print('Display Name: {}\\n'.format(response.display_name))\n    print('Knowledge ID: {}\\n'.format(response.name))", "docstring": "Creates a Knowledge base.\n\nArgs:\nproject_id: The GCP project linked with the agent.\ndisplay_name: The display name of the Knowledge base.", "source": "juraj-google-style"}
{"code": "def notify(self, subsystem, recipient, subject, body_html, body_text):\n        \n        if not re.match(RGX_EMAIL_VALIDATION_PATTERN, recipient, re.I):\n            raise ValueError('Invalid recipient provided')\n\n        email = Email()\n        email.timestamp = datetime.now()\n        email.subsystem = subsystem\n        email.sender = self.sender\n        email.recipients = recipient\n        email.subject = subject\n        email.uuid = uuid.uuid4()\n        email.message_html = body_html\n        email.message_text = body_text\n\n        method = dbconfig.get('method', NS_EMAIL, 'ses')\n        try:\n            if method == 'ses':\n                self.__send_ses_email([recipient], subject, body_html, body_text)\n\n            elif method == 'smtp':\n                self.__send_smtp_email([recipient], subject, body_html, body_text)\n\n            else:\n                raise ValueError('Invalid email method: {}'.format(method))\n\n            db.session.add(email)\n            db.session.commit()\n        except Exception as ex:\n            raise EmailSendError(ex)", "docstring": "Method to send a notification. A plugin may use only part of the information, but all fields are required.\n\nArgs:\nsubsystem (`str`): Name of the subsystem originating the notification\nrecipient (`str`): Recipient email address\nsubject (`str`): Subject / title of the notification\nbody_html (`str)`: HTML formatted version of the message\nbody_text (`str`): Text formatted version of the message\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def replace_species(self, species_mapping):\n        \n\n        species_mapping = {get_el_sp(k): v\n                           for k, v in species_mapping.items()}\n        sp_to_replace = set(species_mapping.keys())\n        sp_in_structure = set(self.composition.keys())\n        if not sp_in_structure.issuperset(sp_to_replace):\n            warnings.warn(\n                \"Some species to be substituted are not present in \"\n                \"structure. Pls check your input. Species to be \"\n                \"substituted = %s; Species in structure = %s\"\n                % (sp_to_replace, sp_in_structure))\n\n        for site in self._sites:\n            if sp_to_replace.intersection(site.species):\n                c = Composition()\n                for sp, amt in site.species.items():\n                    new_sp = species_mapping.get(sp, sp)\n                    try:\n                        c += Composition(new_sp) * amt\n                    except Exception:\n                        c += {new_sp: amt}\n                site.species = c", "docstring": "Swap species.\n\nArgs:\nspecies_mapping (dict): dict of species to swap. Species can be\nelements too. E.g., {Element(\"Li\"): Element(\"Na\")} performs\na Li for Na substitution. The second species can be a\nsp_and_occu dict. For example, a site with 0.5 Si that is\npassed the mapping {Element('Si): {Element('Ge'):0.75,\nElement('C'):0.25} } will have .375 Ge and .125 C.", "source": "juraj-google-style"}
{"code": "def has_access(user, required_roles, match_all=True):\n    \n    \n    if ROLE_ADMIN in user.roles:\n        return True\n\n    if isinstance(required_roles, str):\n        if required_roles in user.roles:\n            return True\n\n        return False\n\n    \n    if match_all:\n        for role in required_roles:\n            if role not in user.roles:\n                return False\n\n        return True\n\n    else:\n        for role in required_roles:\n            if role in user.roles:\n                return True\n\n        return False", "docstring": "Check if the user meets the role requirements. If mode is set to AND, all the provided roles must apply\n\nArgs:\nuser (:obj:`User`): User object\nrequired_roles (`list` of `str`): List of roles that the user must have applied\nmatch_all (`bool`): If true, all the required_roles must be applied to the user, else any one match will\nreturn `True`\n\nReturns:\n`bool`", "source": "juraj-google-style"}
{"code": "def _ProcessSources(\n      self, source_path_specs, storage_writer, filter_find_specs=None):\n    \n    if self._processing_profiler:\n      self._processing_profiler.StartTiming('process_sources')\n\n    self._status = definitions.STATUS_INDICATOR_COLLECTING\n    self._number_of_consumed_event_tags = 0\n    self._number_of_consumed_events = 0\n    self._number_of_consumed_reports = 0\n    self._number_of_consumed_sources = 0\n    self._number_of_consumed_warnings = 0\n    self._number_of_produced_event_tags = 0\n    self._number_of_produced_events = 0\n    self._number_of_produced_reports = 0\n    self._number_of_produced_sources = 0\n    self._number_of_produced_warnings = 0\n\n    path_spec_generator = self._path_spec_extractor.ExtractPathSpecs(\n        source_path_specs, find_specs=filter_find_specs,\n        recurse_file_system=False, resolver_context=self._resolver_context)\n\n    for path_spec in path_spec_generator:\n      if self._abort:\n        break\n\n      \n      \n      event_source = event_sources.FileEntryEventSource(path_spec=path_spec)\n      storage_writer.AddEventSource(event_source)\n\n      self._number_of_produced_sources = storage_writer.number_of_event_sources\n\n      \n      self._UpdateForemanProcessStatus()\n\n      if self._status_update_callback:\n        self._status_update_callback(self._processing_status)\n\n    self._ScheduleTasks(storage_writer)\n\n    if self._abort:\n      self._status = definitions.STATUS_INDICATOR_ABORTED\n    else:\n      self._status = definitions.STATUS_INDICATOR_COMPLETED\n\n    self._number_of_produced_events = storage_writer.number_of_events\n    self._number_of_produced_sources = storage_writer.number_of_event_sources\n    self._number_of_produced_warnings = storage_writer.number_of_warnings\n\n    if self._processing_profiler:\n      self._processing_profiler.StopTiming('process_sources')\n\n    \n    \n    self._UpdateForemanProcessStatus()\n\n    tasks_status = self._task_manager.GetStatusInformation()\n    if self._task_queue_profiler:\n      self._task_queue_profiler.Sample(tasks_status)\n\n    self._processing_status.UpdateTasksStatus(tasks_status)\n\n    if self._status_update_callback:\n      self._status_update_callback(self._processing_status)", "docstring": "Processes the sources.\n\nArgs:\nsource_path_specs (list[dfvfs.PathSpec]): path specifications of\nthe sources to process.\nstorage_writer (StorageWriter): storage writer for a session storage.\nfilter_find_specs (Optional[list[dfvfs.FindSpec]]): find specifications\nused in path specification extraction. If set, path specifications\nthat match the find specification will be processed.", "source": "juraj-google-style"}
{"code": "def from_json(cls, data):\n        \n        if 'month' not in data:\n            data['month'] = 1\n\n        if 'day' not in data:\n            data['day'] = 1\n\n        if 'hour' not in data:\n            data['hour'] = 0\n\n        if 'minute' not in data:\n            data['minute'] = 0\n\n        if 'year' not in data:\n            data['year'] = 2017\n\n        leap_year = True if int(data['year']) == 2016 else False\n        return cls(data['month'], data['day'], data['hour'], data['minute'], leap_year)", "docstring": "Creat datetime from a dictionary.\n\nArgs:\ndata: {\n'month': A value for month between 1-12. (Defualt: 1)\n'day': A value for day between 1-31. (Defualt: 1)\n'hour': A value for hour between 0-23. (Defualt: 0)\n'minute': A value for month between 0-59. (Defualt: 0)\n}", "source": "juraj-google-style"}
{"code": "def CheckFile(self, path):\n    print('Checking: {0:s}'.format(path))\n    definitions_registry = registry.DataTypeDefinitionsRegistry()\n    definitions_reader = reader.YAMLDataTypeDefinitionsFileReader()\n    result = False\n    try:\n        definitions_reader.ReadFile(definitions_registry, path)\n        result = True\n    except KeyError as exception:\n        logging.warning('Unable to register data type definition in file: {0:s} with error: {1:s}'.format(path, exception))\n    except errors.FormatError as exception:\n        logging.warning('Unable to validate file: {0:s} with error: {1:s}'.format(path, exception))\n    return result", "docstring": "Validates the definition in a file.\n\nArgs:\npath (str): path of the definition file.\n\nReturns:\nbool: True if the file contains valid definitions.", "source": "codesearchnet"}
{"code": "def id_pools_ipv4_subnets(self):\n    if (not self.__id_pools_ipv4_subnets):\n        self.__id_pools_ipv4_subnets = IdPoolsIpv4Subnets(self.__connection)\n    return self.__id_pools_ipv4_subnets", "docstring": "Gets the IdPoolsIpv4Subnets API client.\n\nReturns:\nIdPoolsIpv4Subnets:", "source": "codesearchnet"}
{"code": "def _identity_matrix(num_columns: types.IntTensor, num_digits: types.IntTensor, dtype: tf.DType=None) -> types.IntTensor:\n    dtype = dtype or tf.int32\n    shifts = tf.range(num_digits - 1, num_digits - 1 - num_columns, delta=-1)\n    return tf.bitwise.left_shift(tf.ones(shape=(1, num_columns), dtype=dtype), tf.cast(shifts, dtype))", "docstring": "Returns the identity matrix.\n\nArgs:\nnum_columns: Positive scalar `Tensor` with rank 0 representing the number of\ncolumns of the returned matrix.\nnum_digits: Positive scalar `Tensor` with rank 0 representing the base-2\nprecision of the samples.\ndtype: Optional `dtype`. The `dtype` of the output `Tensor` (either a signed\nor unsigned integer `dtype`).\nDefault value: `None` which maps to `int32`.\n\nReturns:\nA scalar `Tensor` with shape `(1, num_columns)`.", "source": "github-repos"}
{"code": "def _find_human_readable_labels(synsets, synset_to_human):\n  \n  humans = []\n  for s in synsets:\n    assert s in synset_to_human, ('Failed to find: %s' % s)\n    humans.append(synset_to_human[s])\n  return humans", "docstring": "Build a list of human-readable labels.\n\nArgs:\nsynsets: list of strings; each string is a unique WordNet ID.\nsynset_to_human: dict of synset to human labels, e.g.,\n'n02119022' --> 'red fox, Vulpes vulpes'\n\nReturns:\nList of human-readable strings corresponding to each synset.", "source": "juraj-google-style"}
{"code": "def super(cls, method):\n    method_cls = type(method.__self__)\n    for supercls in method_cls.__mro__:\n        if '__mixin_overloads__' in supercls.__dict__ and supercls.__mixin_overloads__.get(method.__name__) is cls:\n            method_cls = supercls\n            break\n    return getattr(super(method_cls, method.__self__), method.__name__)", "docstring": "Imitate super() in a mix-in.\n\nThis method is a substitute for\nsuper(MixinClass, self).overloaded_method(arg),\nwhich we can't use because mix-ins appear at the end of the MRO. It should\nbe called as\nMixinClass.super(self.overloaded_method)(arg)\n. It works by finding the class on which MixinMeta.__init__ set\nMixinClass.overloaded_method and calling super() on that class.\n\nArgs:\nmethod: The method in the mix-in.\n\nReturns:\nThe method overloaded by 'method'.", "source": "github-repos"}
{"code": "def _ws_on_message(self, ws: websocket.WebSocketApp, raw: Union[(str, bytes)]):\n    if isinstance(raw, bytes):\n        decoded = zlib.decompress(raw, 15, 10490000).decode('utf-8')\n    else:\n        decoded = raw\n    data = json.loads(decoded)\n    if (data.get('s') is not None):\n        global last_sequence\n        last_sequence = str(data['s'])\n        self.logger.debug(('Set last_sequence to ' + last_sequence))\n    event = WebSocketEvent.parse(data['op'])\n    self.logger.debug('Received event {} (op \n    if (event == WebSocketEvent.HELLO):\n        interval = (float(data['d']['heartbeat_interval']) / 1000)\n        self.logger.debug(f'Starting heartbeat thread at {interval} seconds')\n        self._ws_keep_alive = WebSocketKeepAlive(self.logger, ws, interval)\n        self._ws_keep_alive.start()\n    elif (event == WebSocketEvent.DISPATCH):\n        self.logger.debug(('Got dispatch ' + data['t']))\n        if (data['t'] == PycordCallback.MESSAGE.value):\n            message_content = data['d']['content']\n            if (message_content.startswith(self.command_prefix) and self._commands):\n                cmd_str = message_content[1:].split(' ')[0].lower()\n                self.logger.debug(f'Got new message, checking for callback for command \"{cmd_str}\"')\n                for command_obj in self._commands:\n                    if (command_obj[0].lower() == cmd_str):\n                        self.logger.debug(f'Found matching command \"{command_obj[0]}\", invoking callback')\n                        command_obj[1](data)\n        for key in self.callbacks:\n            if (key.value == data['t']):\n                self.callbacks[key](data)", "docstring": "Callback for receiving messages from the websocket connection\n\nThis method receives ALL events from the websocket connection, some of which\nare used for the initial authentication flow, some of which are used for maintaining\nthe connection, some of which are for notifying this client of user states, etc.\nOnly a few of the events are really worth listening to by \"downstream\" clients,\nmostly chat events (``WebSocketEvent.DISPATCH`` with element ``t`` == 'MESSAGE_CREATE'),\nand those can be accessed by clients using this library via the command registration,\nwhich is handled by this method.\n\nArgs:\nws: websocket connection\nraw: message received from the connection; either string or bytes, the latter\nis a zlip-compressed string. Either way, the end result of formatting is JSON", "source": "codesearchnet"}
{"code": "def connect_output(self, node):\n        \n\n        if len(self.outputs) == self.max_outputs:\n            raise TooManyOutputsError(\"Attempted to connect too many nodes to the output of a node\", max_outputs=self.max_outputs, stream=self.stream)\n\n        self.outputs.append(node)", "docstring": "Connect another node to our output.\n\nThis downstream node will automatically be triggered when we update\nour output.\n\nArgs:\nnode (SGNode): The node that should receive our output", "source": "juraj-google-style"}
{"code": "def push(self, filename, data):\n    self._queue.put(Chunk(filename, data))", "docstring": "Push a chunk of a file to the streaming endpoint.\n\nArgs:\nfilename: Name of file that this is a chunk of.\nchunk_id: TODO: change to 'offset'\nchunk: File data.", "source": "codesearchnet"}
{"code": "def get_string(self, offset, length):\n        \n        return struct.unpack(str(length) + \"s\", self.data[\n                                                offset:offset + length\n                                                ])[0]", "docstring": "Returns string (length bytes)\n\nArgs:\noffset (int): sring offset in byte array\nlength (int): string length", "source": "juraj-google-style"}
{"code": "def universal_transformer_with_lstm_as_transition_function(layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None):\n    (state, unused_inputs, memory) = tf.unstack(layer_inputs, num=None, axis=0, name='unstack')\n    assert (not hparams.add_step_timing_signal)\n    mh_attention_input = step_preprocess(state, step, hparams)\n    transition_function_input = attention_unit(mh_attention_input)\n    if hparams.add_ffn_unit_to_the_transition_function:\n        transition_function_input = ffn_unit(transition_function_input)\n    transition_function_input = common_layers.layer_preprocess(transition_function_input, hparams)\n    with tf.variable_scope('lstm'):\n        transition_function_input_gate = _ffn_layer_multi_inputs([transition_function_input, state], hparams, name='input', bias_initializer=tf.zeros_initializer(), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=False, postprocess=False)\n        tf.contrib.summary.scalar('lstm_input_gate', tf.reduce_mean(transition_function_input_gate))\n        transition_function_forget_gate = _ffn_layer_multi_inputs([transition_function_input, state], hparams, name='forget', bias_initializer=tf.zeros_initializer(), activation=None, pad_remover=pad_remover, preprocess=False, postprocess=False)\n        forget_bias_tensor = tf.constant(hparams.lstm_forget_bias)\n        transition_function_forget_gate = tf.sigmoid((transition_function_forget_gate + forget_bias_tensor))\n        tf.contrib.summary.scalar('lstm_forget_gate', tf.reduce_mean(transition_function_forget_gate))\n        transition_function_output_gate = _ffn_layer_multi_inputs([transition_function_input, state], hparams, name='output', bias_initializer=tf.zeros_initializer(), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=False, postprocess=False)\n        tf.contrib.summary.scalar('lstm_output_gate', tf.reduce_mean(transition_function_output_gate))\n        transition_function_input_modulation = _ffn_layer_multi_inputs([transition_function_input, state], hparams, name='input_modulation', bias_initializer=tf.zeros_initializer(), activation=tf.tanh, pad_remover=pad_remover, preprocess=False, postprocess=False)\n        transition_function_memory = ((memory * transition_function_forget_gate) + (transition_function_input_gate * transition_function_input_modulation))\n        transition_function_output = (tf.tanh(transition_function_memory) * transition_function_output_gate)\n    transition_function_output = common_layers.layer_preprocess(transition_function_output, hparams)\n    return (transition_function_output, unused_inputs, transition_function_memory)", "docstring": "Universal Transformer which uses a lstm as transition function.\n\nIt's kind of like having a lstm, filliped vertically next to the Universal\nTransformer that controls the flow of the  information in depth,\nover different steps of the Universal Transformer.\n\nArgs:\nlayer_inputs:\n- state: state\n- inputs: the original embedded inputs (= inputs to the first step)\n- memory: memory used in lstm.\nstep: indicates number of steps taken so far\nhparams: model hyper-parameters.\nffn_unit: feed-forward unit\nattention_unit: multi-head attention unit\npad_remover: to mask out padding in convolutional layers (efficiency).\nReturns:\nlayer_output:\nnew_state: new state\ninputs: the original embedded inputs (= inputs to the first step)\nmemory: contains information of state from all the previous steps.", "source": "codesearchnet"}
{"code": "def prepare_iter_request(url: Union[(methods, str)], data: MutableMapping, *, iterkey: Optional[str]=None, itermode: Optional[str]=None, limit: int=200, itervalue: Optional[Union[(str, int)]]=None) -> Tuple[(MutableMapping, str, str)]:\n    (itermode, iterkey) = find_iteration(url, itermode, iterkey)\n    if (itermode == 'cursor'):\n        data['limit'] = limit\n        if itervalue:\n            data['cursor'] = itervalue\n    elif (itermode == 'page'):\n        data['count'] = limit\n        if itervalue:\n            data['page'] = itervalue\n    elif (itermode == 'timeline'):\n        data['count'] = limit\n        if itervalue:\n            data['latest'] = itervalue\n    return (data, iterkey, itermode)", "docstring": "Prepare outgoing iteration request\n\nArgs:\nurl: :class:`slack.methods` item or string of url\ndata: Outgoing data\nlimit: Maximum number of results to return per call.\niterkey: Key in response data to iterate over (required for url string).\nitermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`)\nitervalue: Value for current iteration (cursor hash, page or timestamp depending on the itermode)\nReturns:\n:py:class:`tuple` (data, iterkey, itermode)", "source": "codesearchnet"}
{"code": "def RemoveKeywordsForName(self, name, keywords):\n    \n    data_store.DB.IndexRemoveKeywordsForName(self.urn, name, keywords)", "docstring": "Removes keywords for a name.\n\nArgs:\nname: A name which should not be associated with some keywords anymore.\nkeywords: A collection of keywords.", "source": "juraj-google-style"}
{"code": "def get_variant_slice(self, package_name, range_):\n        \n        variant_list = self.variant_lists.get(package_name)\n\n        if variant_list is None:\n            variant_list = _PackageVariantList(package_name, self.solver)\n            self.variant_lists[package_name] = variant_list\n\n        entries = variant_list.get_intersection(range_)\n        if not entries:\n            return None\n\n        slice_ = _PackageVariantSlice(package_name,\n                                      entries=entries,\n                                      solver=self.solver)\n        return slice_", "docstring": "Get a list of variants from the cache.\n\nArgs:\npackage_name (str): Name of package.\nrange_ (`VersionRange`): Package version range.\n\nReturns:\n`_PackageVariantSlice` object.", "source": "juraj-google-style"}
{"code": "def get_endpoints(self, start=0, count=(- 1), filter='', sort=''):\n    uri = '{}/endpoints/'.format(self.data['uri'])\n    return self._helper.get_all(start, count, filter=filter, sort=sort, uri=uri)", "docstring": "Gets a list of endpoints in a SAN.\n\nArgs:\nstart:\nThe first item to return, using 0-based indexing.\nIf not specified, the default is 0 - start with the first available item.\ncount:\nThe number of resources to return. A count of -1 requests all items.\nThe actual number of items in the response might differ from the requested\ncount if the sum of start and count exceeds the total number of items.\nfilter (list or str):\nA general filter/query string to narrow the list of items returned. The\ndefault is no filter; all resources are returned.\nsort:\nThe sort order of the returned data set. By default, the sort order is based\non create time with the oldest entry first.\n\nReturns:\nlist: A list of endpoints.", "source": "codesearchnet"}
{"code": "def _multi_get(self, cache_api_name, fmt_url_path, url_params, query_params=None):\n        \n        all_responses = {}\n\n        if self._cache:\n            all_responses = self._cache.bulk_lookup(cache_api_name, url_params)\n            url_params = [key for key in url_params if key not in all_responses.keys()]\n\n        if len(url_params):\n            urls = self._to_urls(fmt_url_path, url_params)\n            responses = self._requests.multi_get(urls, query_params)\n            for url_param, response in zip(url_params, responses):\n                if self._cache:\n                    self._cache.cache_value(cache_api_name, url_param, response)\n                all_responses[url_param] = response\n\n        return all_responses", "docstring": "Makes multiple GETs to an OpenDNS endpoint.\n\nArgs:\ncache_api_name: string api_name for caching\nfmt_url_path: format string for building URL paths\nurl_params: An enumerable of strings used in building URLs\nquery_params - None / dict / list of dicts containing query params\nReturns:\nA dict of {url_param: api_result}", "source": "juraj-google-style"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Optional[Union[int, List[int]]]=None, vision_feature_select_strategy: Optional[str]=None, **kwargs):\n    vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer\n    vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy\n    downsample_ratio = self.config.downsample_ratio\n    if vision_feature_layer == -1:\n        vision_features = self.vision_tower(pixel_values=pixel_values).last_hidden_state\n    else:\n        vision_features = self.vision_model(pixel_values=pixel_values).hidden_states[vision_feature_layer]\n    if vision_feature_select_strategy == 'default':\n        vision_features = vision_features[:, 1:, :]\n    channels = vision_features.shape[1]\n    feature_size = int(channels ** 0.5)\n    batch_size = vision_features.shape[0]\n    vision_features = vision_features.reshape(batch_size, feature_size, feature_size, -1)\n    vision_features = self.pixel_shuffle(vision_features, scale_factor=downsample_ratio)\n    vision_features = vision_features.reshape(batch_size, -1, vision_features.shape[-1])\n    vision_features = self.multi_modal_projector(vision_features)\n    return vision_features", "docstring": "Obtains image last hidden states from the vision tower and apply multimodal projection.\n\nArgs:\npixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)\nThe tensors corresponding to the input images.\nvision_feature_layer (`int` or `List[int]`):\nLayer index or list of layer indices to extract features from.\nReturns:\nvision_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`.", "source": "github-repos"}
{"code": "def run_suite(test_classes, argv=None):\n    args = _parse_cli_args(argv)\n    for test_class in test_classes:\n        if not issubclass(test_class, base_test.BaseTestClass):\n            logging.error('Test class %s does not extend mobly.base_test.BaseTestClass', test_class)\n            sys.exit(1)\n    if args.list_tests:\n        _print_test_names(test_classes)\n        sys.exit(0)\n    test_configs = config_parser.load_test_config_file(args.config, args.test_bed)\n    selected_tests = compute_selected_tests(test_classes, args.tests)\n    console_level = logging.DEBUG if args.verbose else logging.INFO\n    ok = True\n    for config in test_configs:\n        runner = test_runner.TestRunner(config.log_path, config.testbed_name)\n        with runner.mobly_logger(console_level=console_level):\n            for test_class, tests in selected_tests.items():\n                runner.add_test_class(config, test_class, tests)\n            try:\n                runner.run()\n                ok = runner.results.is_all_pass and ok\n            except signals.TestAbortAll:\n                pass\n            except Exception:\n                logging.exception('Exception when executing %s.', config.testbed_name)\n                ok = False\n    if not ok:\n        sys.exit(1)", "docstring": "Executes multiple test classes as a suite.\n\nThis is the default entry point for running a test suite script file\ndirectly.\n\nArgs:\ntest_classes: List of python classes containing Mobly tests.\nargv: A list that is then parsed as cli args. If None, defaults to cli\ninput.", "source": "github-repos"}
{"code": "def CaffeBilinearUpSample(x, shape):\n    \n    inp_shape = x.shape.as_list()\n    ch = inp_shape[1]\n    assert ch == 1, \"This layer only works for channel=1\"\n    \n    \n\n    shape = int(shape)\n    filter_shape = 2 * shape\n\n    def bilinear_conv_filler(s):\n        \n        f = np.ceil(float(s) / 2)\n        c = float(2 * f - 1 - f % 2) / (2 * f)\n        ret = np.zeros((s, s), dtype='float32')\n        for x in range(s):\n            for y in range(s):\n                ret[x, y] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))\n        return ret\n\n    w = bilinear_conv_filler(filter_shape)\n    w = np.repeat(w, ch * ch).reshape((filter_shape, filter_shape, ch, ch))\n\n    weight_var = tf.constant(w, tf.float32,\n                             shape=(filter_shape, filter_shape, ch, ch),\n                             name='bilinear_upsample_filter')\n    x = tf.pad(x, [[0, 0], [0, 0], [shape - 1, shape - 1], [shape - 1, shape - 1]], mode='SYMMETRIC')\n    out_shape = tf.shape(x) * tf.constant([1, 1, shape, shape], tf.int32)\n    deconv = tf.nn.conv2d_transpose(x, weight_var, out_shape,\n                                    [1, 1, shape, shape], 'SAME', data_format='NCHW')\n    edge = shape * (shape - 1)\n    deconv = deconv[:, :, edge:-edge, edge:-edge]\n\n    if inp_shape[2]:\n        inp_shape[2] *= shape\n    if inp_shape[3]:\n        inp_shape[3] *= shape\n    deconv.set_shape(inp_shape)\n    return deconv", "docstring": "Deterministic bilinearly-upsample the input images.\nIt is implemented by deconvolution with \"BilinearFiller\" in Caffe.\nIt is aimed to mimic caffe behavior.\n\nArgs:\nx (tf.Tensor): a NCHW tensor\nshape (int): the upsample factor\n\nReturns:\ntf.Tensor: a NCHW tensor.", "source": "juraj-google-style"}
{"code": "def restore_original_dimensions(obs, obs_space, tensorlib=tf):\n    if hasattr(obs_space, 'original_space'):\n        return _unpack_obs(obs, obs_space.original_space, tensorlib=tensorlib)\n    else:\n        return obs", "docstring": "Unpacks Dict and Tuple space observations into their original form.\n\nThis is needed since we flatten Dict and Tuple observations in transit.\nBefore sending them to the model though, we should unflatten them into\nDicts or Tuples of tensors.\n\nArguments:\nobs: The flattened observation tensor.\nobs_space: The flattened obs space. If this has the `original_space`\nattribute, we will unflatten the tensor to that shape.\ntensorlib: The library used to unflatten (reshape) the array/tensor.\n\nReturns:\nsingle tensor or dict / tuple of tensors matching the original\nobservation space.", "source": "codesearchnet"}
{"code": "def trigger_streamer(*inputs, **kwargs):\n    streamer_marker = kwargs['mark_streamer']\n    try:\n        reading = inputs[1].pop()\n    except StreamEmptyError:\n        return []\n    finally:\n        for input_x in inputs:\n            input_x.skip_all()\n    try:\n        streamer_marker(reading.value)\n    except ArgumentError:\n        return []\n    return [IOTileReading(0, 0, 0)]", "docstring": "Trigger a streamer based on the index read from input b.\n\nReturns:\nlist(IOTileReading)", "source": "codesearchnet"}
{"code": "def __init__(self,\n                 located_items=None,\n                 unique_identifiers=None):\n        \n        super(LocateResponsePayload, self).__init__(\n            enums.Tags.RESPONSE_PAYLOAD)\n\n        self._located_items = None\n        self._unique_identifiers = None\n\n        self.located_items = located_items\n        self.unique_identifiers = unique_identifiers", "docstring": "Construct a Locate response payload structure.\n\nArgs:\nlocated_items (int): An integer specifying the number of matching\nobjects found by the server. Note that this may not equal the\nnumber of object identifiers returned in this payload.\nOptional, defaults to None.\nunique_identifiers (list): A list of strings specifying the object\nidentifiers for matching objects. Optional, defaults to None.", "source": "juraj-google-style"}
{"code": "def calculate_bbh(blast_results_1, blast_results_2, r_name=None, g_name=None, outdir=''):\n    \n    \n\n    cols = ['gene', 'subject', 'PID', 'alnLength', 'mismatchCount', 'gapOpenCount', 'queryStart', 'queryEnd',\n            'subjectStart', 'subjectEnd', 'eVal', 'bitScore']\n\n    if not r_name and not g_name:\n        r_name = op.basename(blast_results_1).split('_vs_')[0]\n        g_name = op.basename(blast_results_1).split('_vs_')[1].replace('_blast.out', '')\n\n        r_name2 = op.basename(blast_results_2).split('_vs_')[1].replace('_blast.out', '')\n        if r_name != r_name2:\n            log.warning('{} != {}'.format(r_name, r_name2))\n\n    outfile = op.join(outdir, '{}_vs_{}_bbh.csv'.format(r_name, g_name))\n    if op.exists(outfile) and os.stat(outfile).st_size != 0:\n        log.debug('{} vs {} BLAST BBHs already found at {}'.format(r_name, g_name, outfile))\n        return outfile\n\n    bbh1 = pd.read_csv(blast_results_1, sep='\\t', names=cols)\n    bbh2 = pd.read_csv(blast_results_2, sep='\\t', names=cols)\n\n    out = pd.DataFrame()\n    log.debug('Finding BBHs for {} vs. {}'.format(r_name, g_name))\n\n    for g in bbh1[pd.notnull(bbh1.gene)].gene.unique():\n        res = bbh1[bbh1.gene == g]\n        if len(res) == 0:\n            continue\n        best_hit = res.ix[res.PID.idxmax()].copy()\n        best_gene = best_hit.subject\n        res2 = bbh2[bbh2.gene == best_gene]\n        if len(res2) == 0:\n            continue\n        best_hit2 = res2.ix[res2.PID.idxmax()]\n        best_gene2 = best_hit2.subject\n        if g == best_gene2:\n            best_hit['BBH'] = '<=>'\n        else:\n            best_hit['BBH'] = '->'\n        out = pd.concat([out, pd.DataFrame(best_hit).transpose()])\n\n    out.to_csv(outfile)\n    log.debug('{} vs {} BLAST BBHs saved at {}'.format(r_name, g_name, outfile))\n    return outfile", "docstring": "Calculate the best bidirectional BLAST hits (BBH) and save a dataframe of results.\n\nArgs:\nblast_results_1 (str): BLAST results for reference vs. other genome\nblast_results_2 (str): BLAST results for other vs. reference genome\nr_name: Name of reference genome\ng_name: Name of other genome\noutdir: Directory where BLAST results are stored.\n\nReturns:\nPath to Pandas DataFrame of the BBH results.", "source": "juraj-google-style"}
{"code": "def sphere_selector_using_residues(self, radius, force_rerun=False):\n    log.debug('{}: running sphere selector...'.format(self.id))\n    if ((not self.sphgen_path) or (not self.bindingsite_path)):\n        return ValueError('Please run sphgen and binding_site_mol2')\n    selsph = op.join(self.dock_dir, '{}_selsph_binding.sph'.format(self.id))\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=selsph):\n        cmd = 'sphere_selector {} {} {}'.format(self.sphgen_path, self.bindingsite_path, radius)\n        rename = 'mv selected_spheres.sph {}'.format(selsph)\n        os.system(cmd)\n        os.system(rename)\n    if ssbio.utils.is_non_zero_file(selsph):\n        self.sphsel_path = selsph\n        log.debug('{}: successful sphere selection'.format(self.sphsel_path))\n    else:\n        log.critical('{}: sphere_selector_using_residues failed to run on sph file'.format(self.sphgen_path))", "docstring": "Select spheres based on binding site residues\n\nArgs:\nradius (int, float): Radius around binding residues to dock to\nforce_rerun (bool): If method should be rerun even if output file exists", "source": "codesearchnet"}
{"code": "def _get_image_patches(self, image: 'torch.Tensor', grid_pinpoints, size: tuple, patch_size: int, interpolation: 'F.InterpolationMode') -> List['torch.Tensor']:\n    if not isinstance(grid_pinpoints, list):\n        raise TypeError('grid_pinpoints must be a list of possible resolutions.')\n    possible_resolutions = grid_pinpoints\n    image_size = get_image_size(image, channel_dim=ChannelDimension.FIRST)\n    best_resolution = select_best_resolution(image_size, possible_resolutions)\n    resized_image = self._resize_for_patching(image, best_resolution, interpolation=interpolation, input_data_format=ChannelDimension.FIRST)\n    padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=ChannelDimension.FIRST)\n    patches = divide_to_patches(padded_image, patch_size=patch_size)\n    resized_original_image = F.resize(image, size=size, interpolation=interpolation)\n    image_patches = [resized_original_image] + patches\n    return image_patches", "docstring": "Process an image with variable resolutions by dividing it into patches.\n\nArgs:\nimage (\"torch.Tensor\"):\nThe input image to be processed.\ngrid_pinpoints (List):\nA string representation of a list of possible resolutions.\nsize (`tuple`):\nSize to resize the original image to.\npatch_size (`int`):\nSize of the patches to divide the image into.\ninterpolation (`\"InterpolationMode\"`):\nResampling filter to use if resizing the image.\n\nReturns:\nList[\"torch.Tensor\"]: A list of NumPy arrays containing the processed image patches.", "source": "github-repos"}
{"code": "def get(self, key, default=None):\n        \n        \n        if key.count('.') == 0:\n            return super(DotDict, self).get(key, default)\n\n        \n        value = default\n\n        \n        \n        \n        \n        first, remainder = key.split('.', 1)\n        if first in self:\n            value = super(DotDict, self).get(first, default)\n\n            \n            \n            if isinstance(value, (dict, DotDict)):\n                return DotDict(value).get(remainder, default)\n\n            \n\n        return value", "docstring": "Get a value from the `DotDict`.\n\nThe `key` parameter can either be a regular string key,\ne.g. \"foo\", or it can be a string key with dot notation,\ne.g. \"foo.bar.baz\", to signify a nested lookup.\n\nThe default value is returned if any level of the key's\ncomponents are not found.\n\nArgs:\nkey (str): The key to get the value for.\ndefault: The return value should the given key\nnot exist in the `DotDict`.", "source": "juraj-google-style"}
{"code": "def make_sql(table_name, max_rows=None, for_eval=False):\n    if for_eval:\n        where_clause = 'WHERE MOD(FARM_FINGERPRINT(unique_key), 3) = 0 AND pickup_latitude is not null AND pickup_longitude is not null AND dropoff_latitude is not null AND dropoff_longitude is not null'\n    else:\n        where_clause = 'WHERE MOD(FARM_FINGERPRINT(unique_key), 3) > 0 AND pickup_latitude is not null AND pickup_longitude is not null AND dropoff_latitude is not null AND dropoff_longitude is not null'\n    limit_clause = ''\n    if max_rows:\n        limit_clause = 'LIMIT {max_rows}'.format(max_rows=max_rows)\n    return '\\n  SELECT\\n      CAST(pickup_community_area AS string) AS pickup_community_area,\\n      CAST(dropoff_community_area AS string) AS dropoff_community_area,\\n      CAST(pickup_census_tract AS string) AS pickup_census_tract,\\n      CAST(dropoff_census_tract AS string) AS dropoff_census_tract,\\n      fare,\\n      EXTRACT(MONTH FROM trip_start_timestamp) AS trip_start_month,\\n      EXTRACT(HOUR FROM trip_start_timestamp) AS trip_start_hour,\\n      EXTRACT(DAYOFWEEK FROM trip_start_timestamp) AS trip_start_day,\\n      UNIX_SECONDS(trip_start_timestamp) AS trip_start_timestamp,\\n      pickup_latitude,\\n      pickup_longitude,\\n      dropoff_latitude,\\n      dropoff_longitude,\\n      trip_miles,\\n      payment_type,\\n      company,\\n      trip_seconds,\\n      tips\\n  FROM `{table_name}`\\n  {where_clause}\\n  {limit_clause}\\n'.format(table_name=table_name, where_clause=where_clause, limit_clause=limit_clause)", "docstring": "Creates the sql command for pulling data from BigQuery.\n\nArgs:\ntable_name: BigQuery table name\nmax_rows: if set, limits the number of rows pulled from BigQuery\nfor_eval: True if this is for evaluation, false otherwise\n\nReturns:\nsql command as string", "source": "github-repos"}
{"code": "def city_nums():\n    city_nums = {}\n    first_row = 1\n    num = 0\n    fname = pkg_resources.resource_filename(__name__, 'resources/Distance_Matrix.csv')\n    with open(fname, 'rU') as csvfile:\n        reader = csv.reader(csvfile, delimiter=',')\n        for row in reader:\n            if (first_row == 1):\n                first_row = 0\n            else:\n                city_nums[row[0]] = num\n                num = (num + 1)\n    return city_nums", "docstring": "Get a dictionary of Backpage city names mapped to their 'legend' value.\n\nReturns:\ndictionary of Backpage city names mapped to their numeric value", "source": "codesearchnet"}
{"code": "def wait(self, timeout_ms=None):\n    \n    closed = timeouts.loop_until_timeout_or_true(\n        timeouts.PolledTimeout.from_millis(timeout_ms),\n        self.stream.is_closed, .1)\n    if closed:\n      if hasattr(self.stdout, 'getvalue'):\n        return self.stdout.getvalue()\n      return True\n    return None", "docstring": "Block until this command has completed.\n\nArgs:\ntimeout_ms: Timeout, in milliseconds, to wait.\n\nReturns:\nOutput of the command if it complete and self.stdout is a StringIO\nobject or was passed in as None.  Returns True if the command completed but\nstdout was provided (and was not a StringIO object).  Returns None if the\ntimeout expired before the command completed.  Be careful to check the\nreturn value explicitly for None, as the output may be ''.", "source": "juraj-google-style"}
{"code": "def to_pdb(prot: Protein) -> str:\n    restypes = residue_constants.restypes + ['X']\n\n    def res_1to3(r: int) -> str:\n        return residue_constants.restype_1to3.get(restypes[r], 'UNK')\n    atom_types = residue_constants.atom_types\n    pdb_lines: List[str] = []\n    atom_mask = prot.atom_mask\n    aatype = prot.aatype\n    atom_positions = prot.atom_positions\n    residue_index = prot.residue_index.astype(np.int32)\n    b_factors = prot.b_factors\n    chain_index = prot.chain_index\n    if np.any(aatype > residue_constants.restype_num):\n        raise ValueError('Invalid aatypes.')\n    headers = get_pdb_headers(prot)\n    if len(headers) > 0:\n        pdb_lines.extend(headers)\n    n = aatype.shape[0]\n    atom_index = 1\n    prev_chain_index = 0\n    chain_tags = string.ascii_uppercase\n    chain_tag = None\n    for i in range(n):\n        res_name_3 = res_1to3(aatype[i])\n        for atom_name, pos, mask, b_factor in zip(atom_types, atom_positions[i], atom_mask[i], b_factors[i]):\n            if mask < 0.5:\n                continue\n            record_type = 'ATOM'\n            name = atom_name if len(atom_name) == 4 else f' {atom_name}'\n            alt_loc = ''\n            insertion_code = ''\n            occupancy = 1.0\n            element = atom_name[0]\n            charge = ''\n            chain_tag = 'A'\n            if chain_index is not None:\n                chain_tag = chain_tags[chain_index[i]]\n            atom_line = f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}{res_name_3:>3} {chain_tag:>1}{residue_index[i]:>4}{insertion_code:>1}   {pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}{occupancy:>6.2f}{b_factor:>6.2f}          {element:>2}{charge:>2}'\n            pdb_lines.append(atom_line)\n            atom_index += 1\n        should_terminate = i == n - 1\n        if chain_index is not None:\n            if i != n - 1 and chain_index[i + 1] != prev_chain_index:\n                should_terminate = True\n                prev_chain_index = chain_index[i + 1]\n        if should_terminate:\n            chain_end = 'TER'\n            chain_termination_line = f'{chain_end:<6}{atom_index:>5}      {res_1to3(aatype[i]):>3} {chain_tag:>1}{residue_index[i]:>4}'\n            pdb_lines.append(chain_termination_line)\n            atom_index += 1\n            if i != n - 1:\n                pdb_lines.extend(get_pdb_headers(prot, prev_chain_index))\n    pdb_lines.append('END')\n    pdb_lines.append('')\n    return '\\n'.join(pdb_lines)", "docstring": "Converts a `Protein` instance to a PDB string.\n\nArgs:\nprot: The protein to convert to PDB.\n\nReturns:\nPDB string.", "source": "github-repos"}
{"code": "def add_controller(self, controller, timeout=None):\n    assert (controller.mri not in self._controllers), ('Controller already exists for %s' % controller.mri)\n    self._controllers[controller.mri] = controller\n    controller.setup(self)\n    if self.state:\n        should_publish = self._start_controllers([controller], timeout)\n        if ((self.state == STARTED) and should_publish):\n            self._publish_controllers(timeout)", "docstring": "Add a controller to be hosted by this process\n\nArgs:\ncontroller (Controller): Its controller\ntimeout (float): Maximum amount of time to wait for each spawned\nobject. None means forever", "source": "codesearchnet"}
{"code": "def __init__(self, hidden_size):\n    \n    super(Compressor, self).__init__()\n    self.hidden_size = hidden_size\n    \n    conv = functools.partial(\n        tf.keras.layers.Conv2D, padding=\"SAME\", activation=tf.nn.leaky_relu)\n    self.conv1 = conv(256, 3, 2)\n    self.conv2 = conv(256, 3, 2)\n    self.conv3 = conv(256, 3, 2)\n    self.conv4 = conv(hidden_size, 8, padding=\"VALID\")", "docstring": "Constructs a convolutional compressor.\n\nThis model takes as input `x_{1:T}` and outputs an intermediate\nrepresentation for use in downstream probabilistic encoders.\n\nArgs:\nhidden_size: Dimensionality of the intermediate representations.", "source": "juraj-google-style"}
{"code": "def _get_global_step_read(graph=None):\n    graph = graph or ops.get_default_graph()\n    global_step_read_tensors = graph.get_collection(GLOBAL_STEP_READ_KEY)\n    if len(global_step_read_tensors) > 1:\n        raise RuntimeError('There are multiple items in collection {}. There should be only one.'.format(GLOBAL_STEP_READ_KEY))\n    if len(global_step_read_tensors) == 1:\n        return global_step_read_tensors[0]\n    return None", "docstring": "Gets global step read tensor in graph.\n\nArgs:\ngraph: The graph in which to create the global step read tensor. If missing,\nuse default graph.\n\nReturns:\nGlobal step read tensor.\n\nRaises:\nRuntimeError: if multiple items found in collection GLOBAL_STEP_READ_KEY.", "source": "github-repos"}
{"code": "def forward(self, z: torch.Tensor, mask: Optional[torch.Tensor]=None, inplace_safe: bool=False, _add_with_inplace: bool=False, _inplace_chunk_size: Optional[int]=256) -> torch.Tensor:\n    if inplace_safe:\n        x = self._inference_forward(z, mask, inplace_chunk_size=_inplace_chunk_size, with_add=_add_with_inplace)\n        return x\n    if mask is None:\n        mask = z.new_ones(z.shape[:-1])\n    mask = mask.unsqueeze(-1)\n    z = self.layer_norm_in(z)\n    a = mask\n    a = a * self.sigmoid(self.linear_a_g(z))\n    a = a * self.linear_a_p(z)\n    b = mask\n    b = b * self.sigmoid(self.linear_b_g(z))\n    b = b * self.linear_b_p(z)\n    if is_fp16_enabled():\n        with torch.cuda.amp.autocast(enabled=False):\n            x = self._combine_projections(a.float(), b.float())\n    else:\n        x = self._combine_projections(a, b)\n    del a, b\n    x = self.layer_norm_out(x)\n    x = self.linear_z(x)\n    g = self.sigmoid(self.linear_g(z))\n    x = x * g\n    return x", "docstring": "Args:\nx:\n[*, N_res, N_res, C_z] input tensor\nmask:\n[*, N_res, N_res] input mask\nReturns:\n[*, N_res, N_res, C_z] output tensor", "source": "github-repos"}
{"code": "def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]:\n    if not isinstance(hidden_states, (tuple, list)):\n        raise TypeError('hidden_states should be a tuple or list of tensors')\n    if len(hidden_states) != len(self.config.neck_hidden_sizes):\n        raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.')\n    hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)\n    features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]\n    output = self.fusion_stage(features)\n    return output", "docstring": "Args:\nhidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):\nList of hidden states from the backbone.", "source": "github-repos"}
{"code": "def _validate_config(config):\n    \n    required_keys = [KEY_ADDRESS, KEY_MODEL, KEY_PORT, KEY_PATHS]\n    for key in required_keys:\n        if key not in config:\n            raise Error(\"Required key %s missing from config %s\",\n                        (key, config))", "docstring": "Verifies that a config dict for an attenuator device is valid.\n\nArgs:\nconfig: A dict that is the configuration for an attenuator device.\n\nRaises:\nattenuator.Error: A config is not valid.", "source": "juraj-google-style"}
{"code": "def _on_join_leader(self, response):\n        \n        try:\n            group_assignment = self._perform_assignment(response.leader_id,\n                                                        response.group_protocol,\n                                                        response.members)\n        except Exception as e:\n            return Future().failure(e)\n\n        version = 0 if self.config['api_version'] < (0, 11, 0) else 1\n        request = SyncGroupRequest[version](\n            self.group_id,\n            self._generation.generation_id,\n            self._generation.member_id,\n            [(member_id,\n              assignment if isinstance(assignment, bytes) else assignment.encode())\n             for member_id, assignment in six.iteritems(group_assignment)])\n\n        log.debug(\"Sending leader SyncGroup for group %s to coordinator %s: %s\",\n                  self.group_id, self.coordinator_id, request)\n        return self._send_sync_group_request(request)", "docstring": "Perform leader synchronization and send back the assignment\nfor the group via SyncGroupRequest\n\nArguments:\nresponse (JoinResponse): broker response to parse\n\nReturns:\nFuture: resolves to member assignment encoded-bytes", "source": "juraj-google-style"}
{"code": "def find_sanitiser_nodes(\n    sanitiser,\n    sanitisers_in_file\n):\n    \n    for sanitiser_tuple in sanitisers_in_file:\n        if sanitiser == sanitiser_tuple.trigger_word:\n            yield sanitiser_tuple.cfg_node", "docstring": "Find nodes containing a particular sanitiser.\n\nArgs:\nsanitiser(string): sanitiser to look for.\nsanitisers_in_file(list[Node]): list of CFG nodes with the sanitiser.\n\nReturns:\nIterable of sanitiser nodes.", "source": "juraj-google-style"}
{"code": "def parse_display_name(chrom, pos, ref, alt, variant_type):\n    \n    return '_'.join([chrom, pos, ref, alt, variant_type])", "docstring": "Parse the variant id for a variant\n\nThis is used to display the variant in scout.\n\nArgs:\nchrom(str)\npos(str)\nref(str)\nalt(str)\nvariant_type(str): 'clinical' or 'research'\n\nReturns:\nvariant_id(str): The variant id in human readable format", "source": "juraj-google-style"}
{"code": "def dates(self):\n    return _gen_business_days(self._start_date, self._end_date, self._holiday_calendar, self._backward)", "docstring": "Returns the dates as computed from the schedule as a DateTensor.\n\nConstructs the date schedule from the supplied data. For more details see\nthe initializer docstring.\n\nReturns:\n`DateTensor` of rank one more than `start_date` or `end_date`\n(depending on `backwards`), representing schedules for each element\nof the input.", "source": "github-repos"}
{"code": "def perform_load_job(self, destination, job_id, source_uris=None, source_stream=None, schema=None, write_disposition=None, create_disposition=None, additional_load_parameters=None, source_format=None, job_labels=None, load_job_project_id=None):\n    project_id = destination.projectId if load_job_project_id is None else load_job_project_id\n    return self._insert_load_job(project_id, job_id, destination, source_uris=source_uris, source_stream=source_stream, schema=schema, create_disposition=create_disposition, write_disposition=write_disposition, additional_load_parameters=additional_load_parameters, source_format=source_format, job_labels=job_labels)", "docstring": "Starts a job to load data into BigQuery.\n\nReturns:\nbigquery.JobReference with the information about the job that was started.", "source": "github-repos"}
{"code": "def create_run_group(prj):\n    \n    from benchbuild.utils import schema as s\n\n    session = s.Session()\n    experiment = prj.experiment\n    group = s.RunGroup(id=prj.run_uuid, experiment=experiment.id)\n    session.add(group)\n    session.commit()\n\n    return (group, session)", "docstring": "Create a new 'run_group' in the database.\n\nThis creates a new transaction in the database and creates a new run_group\nwithin this transaction. Afterwards we return both the transaction as well\nas the run_group itself. The user is responsible for committing it when the\ntime comes.\n\nArgs:\nprj - The project for which we open the run_group.\n\nReturns:\nA tuple (group, session) containing both the newly created run_group and\nthe transaction object.", "source": "juraj-google-style"}
{"code": "def GenerateId(self, entity_id=None):\n    self._idnum += 1\n    if entity_id:\n        return ('%s_merged_%d' % (entity_id, self._idnum))\n    else:\n        return ('merged_%d' % self._idnum)", "docstring": "Generate a unique id based on the given id.\n\nThis is done by appending a counter which is then incremented. The\ncounter is initialised at the maximum number used as an ending for\nany id in the old and new schedules.\n\nArgs:\nentity_id: The base id string. This is allowed to be None.\n\nReturns:\nThe generated id.", "source": "codesearchnet"}
{"code": "def _from_proto_sparse_tensor(sparse_tensor_proto, process_leafs):\n  \n  if not sparse_tensor_proto.HasField(\"named_tuple\"):\n    raise base_errors.ModuleInfoError(\n        \"Error while deserializing a SparseTensor: expected proto tuple.\")\n  if sparse_tensor_proto.named_tuple.name != _SPARSE_TENSOR_NAME:\n    raise base_errors.ModuleInfoError(\n        \"Error while deserializing a SparseTensor: The name of the tuple \"\n        \"should have been {} but was {}.\".format(\n            _SPARSE_TENSOR_NAME, sparse_tensor_proto.named_tuple.name))\n  named_tuple_map = sparse_tensor_proto.named_tuple.map\n  return tf.SparseTensor(\n      indices=process_leafs(named_tuple_map[\"indices\"].value),\n      values=process_leafs(named_tuple_map[\"values\"].value),\n      dense_shape=process_leafs(named_tuple_map[\"dense_shape\"].value))", "docstring": "Deserializes a `tf.SparseTensor` from `sparse_tensor_proto`.\n\nArgs:\nsparse_tensor_proto: A proto representing a `tf.SparseTensor`.\nprocess_leafs: A function to be applied to the leaf valued of the nested\nstructure.\n\nReturns:\nAn instance of `tf.SparseTensor`.", "source": "juraj-google-style"}
{"code": "def dqdv_cycles(cycles, **kwargs):\n    ica_dfs = list()\n    cycle_group = cycles.groupby('cycle')\n    for (cycle_number, cycle) in cycle_group:\n        (v, dq) = dqdv_cycle(cycle, splitter=True, **kwargs)\n        _ica_df = pd.DataFrame({'voltage': v, 'dq': dq})\n        _ica_df['cycle'] = cycle_number\n        _ica_df = _ica_df[['cycle', 'voltage', 'dq']]\n        ica_dfs.append(_ica_df)\n    ica_df = pd.concat(ica_dfs)\n    return ica_df", "docstring": "Convenience functions for creating dq-dv data from given capacity and\nvoltage cycles.\n\nReturns a DataFrame with a 'voltage' and a 'incremental_capacity'\ncolumn.\n\nArgs:\ncycles (pandas.DataFrame): the cycle data ('cycle', 'voltage',\n'capacity', 'direction' (1 or -1)).\n\nReturns:\npandas.DataFrame with columns 'cycle', 'voltage', 'dq'.\n\nExample:\n>>> cycles_df = my_data.get_cap(\n>>> ...   categorical_column=True,\n>>> ...   method = \"forth-and-forth\",\n>>> ...   label_cycle_number=True,\n>>> ... )\n>>> ica_df = ica.dqdv_cycles(cycles_df)", "source": "codesearchnet"}
{"code": "def get_by(self, field, value):\n        \n        if field == 'userName' or field == 'name':\n            return self._client.get(self.URI + '/' + value)\n        elif field == 'role':\n            value = value.replace(\" \", \"%20\")\n            return self._client.get(self.URI + '/roles/users/' + value)['members']\n        else:\n            raise HPOneViewException('Only userName, name and role can be queried for this resource.')", "docstring": "Gets all Users that match the filter.\n\nThe search is case-insensitive.\n\nArgs:\nfield: Field name to filter. Accepted values: 'name', 'userName', 'role'\nvalue: Value to filter.\n\nReturns:\nlist: A list of Users.", "source": "juraj-google-style"}
{"code": "def remove_item(self, **kwargs):\n    path = self._get_id_path('remove_item')\n    kwargs.update({'session_id': self.session_id})\n    payload = {'media_id': kwargs.pop('media_id', None)}\n    response = self._POST(path, kwargs, payload)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Delete movies from a list that the user created.\n\nA valid session id is required.\n\nArgs:\nmedia_id: A movie id.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def get_ordered_params(url):\n    if (url not in URLHelper.__cache):\n        URLHelper.__cache[url] = urlparse(url)\n    params = URLHelper.query_string_to_dict(URLHelper.__cache[url].query)\n    return OrderedDict(sorted(params.items()))", "docstring": "Get the query parameters of the given URL in alphabetical order.\n\nArgs:\nurl (str): The URL to get the query parameters from.\n\nReturns:\nstr: The query parameters", "source": "codesearchnet"}
{"code": "def __init__(self, assign_defaults=(), method_name=None, overwrite=False):\n    \n    super(self.__class__, self).__init__(assign_defaults=assign_defaults,\n                                         method_name=method_name,\n                                         overwrite=overwrite)", "docstring": "Assigns arguments to the decorator.\n\nArgs:\nassign_defaults: A sequence of strings for the default values that should\nbe provided. Defaults are shared across methods.\nmethod_name: If provided, use this as the method_name instead of the\nwrapped function's name.\noverwrite: if true, overwrites definition if exists.", "source": "juraj-google-style"}
{"code": "async def movehere(self, channel):\n        \n\n        self.logger.debug(\"movehere command\")\n\n        \n        await self.embed.delete()\n        \n        self.embed.channel = channel\n        \n        await self.embed.send()\n        \n        await self.add_reactions()\n\n        self.statuslog.info(\"Moved to front\")", "docstring": "Moves the embed message to a new channel; can also be used to move the musicplayer to the front\n\nArgs:\nchannel (discord.Channel): The channel to move to", "source": "juraj-google-style"}
{"code": "def CheckDependencies(verbose_output=True):\n  \n  print('Checking availability and versions of dependencies.')\n  check_result = True\n\n  for module_name, version_tuple in sorted(PYTHON_DEPENDENCIES.items()):\n    if not _CheckPythonModule(\n        module_name, version_tuple[0], version_tuple[1],\n        is_required=version_tuple[3], maximum_version=version_tuple[2],\n        verbose_output=verbose_output):\n      check_result = False\n\n  if not _CheckSQLite3(verbose_output=verbose_output):\n    check_result = False\n\n  if check_result and not verbose_output:\n    print('[OK]')\n\n  print('')\n  return check_result", "docstring": "Checks the availability of the dependencies.\n\nArgs:\nverbose_output (Optional[bool]): True if output should be verbose.\n\nReturns:\nbool: True if the dependencies are available, False otherwise.", "source": "juraj-google-style"}
{"code": "def stream_realtime(self, stream, value):\n        \n\n        if not self.stream_iface_open:\n            return\n\n        reading = IOTileReading(0, stream, value)\n\n        report = IndividualReadingReport.FromReadings(self.iotile_id, [reading])\n        self.stream(report)", "docstring": "Stream a realtime value as an IndividualReadingReport.\n\nIf the streaming interface of the VirtualInterface this\nVirtualDevice is attached to is not opened, the realtime\nreading may be dropped.\n\nArgs:\nstream (int): The stream id to send\nvalue (int): The stream value to send", "source": "juraj-google-style"}
{"code": "def write(self, path=None, *args, **kwargs):\n        \n        if path is None:\n            print(self.format(*args, **kwargs))\n        else:\n            with io.open(path, 'w', newline=\"\") as f:\n                f.write(self.format(*args, **kwargs))", "docstring": "Perform formatting and write the formatted string to a file or stdout.\n\nOptional arguments can be used to format the editor's contents. If no\nfile path is given, prints to standard output.\n\nArgs:\npath (str): Full file path (default None, prints to stdout)\n*args: Positional arguments to format the editor with\n**kwargs: Keyword arguments to format the editor with", "source": "juraj-google-style"}
{"code": "def wait_all_futures(self, futures, timeout=None, event_timeout=None):\n    if (timeout is None):\n        end = None\n    else:\n        end = (time.time() + timeout)\n    if (not isinstance(futures, list)):\n        if futures:\n            futures = [futures]\n        else:\n            futures = []\n    filtered_futures = []\n    for f in futures:\n        if f.done():\n            if (f.exception() is not None):\n                raise f.exception()\n        else:\n            filtered_futures.append(f)\n    while filtered_futures:\n        if (event_timeout is not None):\n            until = (time.time() + event_timeout)\n            if (end is not None):\n                until = min(until, end)\n        else:\n            until = end\n        self._service_futures(filtered_futures, until)", "docstring": "Services all futures until the list 'futures' are all done\nthen returns. Calls relevant subscription callbacks as they\ncome off the queue and raises an exception on abort\n\nArgs:\nfutures: a `Future` or list of all futures that the caller\nwants to wait for\ntimeout: maximum total time in seconds to wait for responses, wait\nforever if None\nevent_timeout: maximum time in seconds to wait between each response\nevent, wait forever if None", "source": "codesearchnet"}
{"code": "async def _handle_set_typing_notification(self, set_typing_notification):\n    conv_id = set_typing_notification.conversation_id.id\n    res = parsers.parse_typing_status_message(set_typing_notification)\n    (await self.on_typing.fire(res))\n    try:\n        conv = (await self._get_or_fetch_conversation(conv_id))\n    except exceptions.NetworkError:\n        logger.warning('Failed to fetch conversation for typing notification: %s', conv_id)\n    else:\n        (await conv.on_typing.fire(res))", "docstring": "Receive SetTypingNotification and update the conversation.\n\nArgs:\nset_typing_notification: hangouts_pb2.SetTypingNotification\ninstance", "source": "codesearchnet"}
{"code": "def _wait_for_and_process_task(self, task):\n        \n        function_descriptor = FunctionDescriptor.from_bytes_list(\n            task.function_descriptor_list())\n        driver_id = task.driver_id()\n\n        \n        \n        if not task.actor_creation_id().is_nil():\n            assert self.actor_id.is_nil()\n            self.actor_id = task.actor_creation_id()\n            self.actor_creation_task_id = task.task_id()\n            actor_class = self.function_actor_manager.load_actor_class(\n                driver_id, function_descriptor)\n            self.actors[self.actor_id] = actor_class.__new__(actor_class)\n            self.actor_checkpoint_info[self.actor_id] = ActorCheckpointInfo(\n                num_tasks_since_last_checkpoint=0,\n                last_checkpoint_timestamp=int(1000 * time.time()),\n                checkpoint_ids=[],\n            )\n\n        execution_info = self.function_actor_manager.get_execution_info(\n            driver_id, function_descriptor)\n\n        \n        function_name = execution_info.function_name\n        extra_data = {\"name\": function_name, \"task_id\": task.task_id().hex()}\n        if task.actor_id().is_nil():\n            if task.actor_creation_id().is_nil():\n                title = \"ray_worker:{}()\".format(function_name)\n                next_title = \"ray_worker\"\n            else:\n                actor = self.actors[task.actor_creation_id()]\n                title = \"ray_{}:{}()\".format(actor.__class__.__name__,\n                                             function_name)\n                next_title = \"ray_{}\".format(actor.__class__.__name__)\n        else:\n            actor = self.actors[task.actor_id()]\n            title = \"ray_{}:{}()\".format(actor.__class__.__name__,\n                                         function_name)\n            next_title = \"ray_{}\".format(actor.__class__.__name__)\n        with profiling.profile(\"task\", extra_data=extra_data):\n            with _changeproctitle(title, next_title):\n                self._process_task(task, execution_info)\n            \n            self.task_context.current_task_id = TaskID.nil()\n            self.task_context.task_index = 0\n            self.task_context.put_index = 1\n            if self.actor_id.is_nil():\n                \n                \n                \n                self.task_driver_id = DriverID.nil()\n                \n                \n                ray_signal.reset()\n\n        \n        self.function_actor_manager.increase_task_counter(\n            driver_id, function_descriptor)\n\n        reached_max_executions = (self.function_actor_manager.get_task_counter(\n            driver_id, function_descriptor) == execution_info.max_calls)\n        if reached_max_executions:\n            self.raylet_client.disconnect()\n            sys.exit(0)", "docstring": "Wait for a task to be ready and process the task.\n\nArgs:\ntask: The task to execute.", "source": "juraj-google-style"}
{"code": "def get_axis_grid(self, ind):\n        \n        ng = self.dim\n        num_pts = ng[ind]\n        lengths = self.structure.lattice.abc\n        return [i / num_pts * lengths[ind] for i in range(num_pts)]", "docstring": "Returns the grid for a particular axis.\n\nArgs:\nind (int): Axis index.", "source": "juraj-google-style"}
{"code": "def render(self, data):\n    renderers = {'text/csv': self._render_as_csv, 'text/html': self._render_as_html, None: self._render_as_html}\n    render = renderers[data.content_type]\n    return render(data)", "docstring": "Renders the reports based on data.content_type's value.\n\nArguments:\ndata (ReportViewRequestData): The report data. data.content_type\nis used to determine how the reports are rendered.\n\nReturns:\nHTTPResponse: The rendered version of the report.", "source": "codesearchnet"}
{"code": "def get_variable_dtype(master_dtype=tf.bfloat16, slice_dtype=tf.float32, activation_dtype=tf.float32):\n    return mtf.VariableDType(master_dtype=tf.as_dtype(master_dtype), slice_dtype=tf.as_dtype(slice_dtype), activation_dtype=tf.as_dtype(activation_dtype))", "docstring": "Datatypes to use for the run.\n\nArgs:\nmaster_dtype: string, datatype for checkpoints\nkeep this the same between training and eval/inference\nslice_dtype: string, datatype for variables in memory\nmust be tf.float32 for training\nactivation_dtype: string, datatype for activations\nless memory usage if tf.bfloat16 but possible numerical issues\nReturns:\na mtf.VariableDtype", "source": "codesearchnet"}
{"code": "def normal_var(data, mean):\n    if (not isinstance(data, np.ndarray)):\n        data = np.array(data)\n    cumm = [0.0]\n    cumm.extend(np.cumsum(np.power(np.abs((data - mean)), 2)))\n\n    def cost(s, t):\n        ' Cost function for normal distribution with variable variance\\n\\n        Args:\\n            start (int): start index\\n            end (int): end index\\n        Returns:\\n            float: Cost, from start to end\\n        '\n        dist = float((t - s))\n        diff = (cumm[t] - cumm[s])\n        return (dist * np.log((diff / dist)))\n    return cost", "docstring": "Creates a segment cost function for a time series with a\nNormal distribution with changing variance\n\nArgs:\ndata (:obj:`list` of float): 1D time series data\nvariance (float): variance\nReturns:\nfunction: Function with signature\n(int, int) -> float\nwhere the first arg is the starting index, and the second\nis the last arg. Returns the cost of that segment", "source": "codesearchnet"}
{"code": "def last_timestamp(self, event_key=None):\n    \n    if event_key is None:\n      timestamps = [self._trackers[key].first_timestamp\n                    for key in self._trackers]\n      return max(timestamp for timestamp in timestamps if timestamp >= 0)\n    else:\n      return self._trackers[event_key].last_timestamp", "docstring": "Obtain the last timestamp.\n\nArgs:\nevent_key: the type key of the sought events (e.g., constants.NAN_KEY). If\nNone, includes all event type keys.\n\nReturns:\nLast (latest) timestamp of all the events of the given type (or all\nevent types if event_key is None).", "source": "juraj-google-style"}
{"code": "def __init__(self, src_state_id, dst_state_id, guard_p, term=None):\n        \n        self.src_state = src_state_id\n        self.dst_state = dst_state_id\n        self.guard = guard_p\n        self.term = None", "docstring": "Initialization function for Arc's guardgen structure\nArgs:\nsrc_state_id (int): The source state identifier\ndst_state_id (int): The destination state identifier\nguard_p: The input character\nterm: The input term\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def detect_mbr(self, filename, offset, fs_id):\n    self.logger.debug('Detecting MBR partition type')\n    if (fs_id not in self.__mbr_plugins):\n        return None\n    else:\n        plugins = self.__mbr_plugins.get(fs_id)\n        for plugin in plugins:\n            if plugin.detect(filename, offset):\n                return plugin.get_volume_object()\n    return None", "docstring": "Used by rawdisk.session.Session to match mbr partitions against\nfilesystem plugins.\n\nArgs:\nfilename: device or file that it will read in order to detect\nthe filesystem fs_id: filesystem id to match (ex. 0x07)\noffset: offset for the filesystem that is being matched\n\nReturns:\nVolume object supplied by matched plugin.\nIf there is no match, None is returned", "source": "codesearchnet"}
{"code": "def size(self, time):\n    if (self.start_time <= time <= self.end_time):\n        return self.masks[(time - self.start_time)].sum()\n    else:\n        return 0", "docstring": "Gets the size of the object at a given time.\n\nArgs:\ntime: Time value being queried.\n\nReturns:\nsize of the object in pixels", "source": "codesearchnet"}
{"code": "def _validate_netconfig(self, conf):\n    nets = conf.get('nets', {})\n    if (len(nets) == 0):\n        raise LagoInitException('No networks configured.')\n    no_mgmt_dns = [name for (name, net) in nets.iteritems() if ((net.get('management', None) is None) and (net.get('main_dns') or net.get('dns_domain_name')))]\n    if ((len(no_mgmt_dns) > 0) and (len(nets.keys()) > 1)):\n        raise LagoInitException('Networks: {0}, misconfigured, they are not marked as management, but have DNS attributes. DNS is supported only in management networks.'.format(','.join(no_mgmt_dns)))\n    for (dom_name, dom_spec) in conf['domains'].items():\n        mgmts = []\n        for nic in dom_spec['nics']:\n            net = self._get_net(conf, dom_name, nic)\n            if (net.get('management', False) is True):\n                mgmts.append(nic['net'])\n        if (len(mgmts) == 0):\n            raise LagoInitException('VM {0} has no management network, please connect it to one.'.format(dom_name))\n        if (len(mgmts) > 1):\n            raise LagoInitException('VM {0} has more than one management network: {1}. It should have exactly one.'.format(dom_name, ','.join(mgmts)))", "docstring": "Validate network configuration\n\nArgs:\nconf(dict): spec\n\nReturns:\nNone\n\n\nRaises:\n:exc:`~lago.utils.LagoInitException`: If a VM has more than\none management network configured, or a network which is not\nmanagement has DNS attributes, or a VM is configured with a\nnone-existence NIC, or a VM has no management network.", "source": "codesearchnet"}
{"code": "def from_pseudoinverse(cls, strains, stresses):\n        \n        \n        warnings.warn(\"Pseudoinverse fitting of Strain/Stress lists may yield \"\n                      \"questionable results from vasp data, use with caution.\")\n        stresses = np.array([Stress(stress).voigt for stress in stresses])\n        with warnings.catch_warnings(record=True):\n            strains = np.array([Strain(strain).voigt for strain in strains])\n\n        voigt_fit = np.transpose(np.dot(np.linalg.pinv(strains), stresses))\n        return cls.from_voigt(voigt_fit)", "docstring": "Class method to fit an elastic tensor from stress/strain\ndata.  Method uses Moore-Penrose pseudoinverse to invert\nthe s = C*e equation with elastic tensor, stress, and\nstrain in voigt notation\n\nArgs:\nstresses (Nx3x3 array-like): list or array of stresses\nstrains (Nx3x3 array-like): list or array of strains", "source": "juraj-google-style"}
{"code": "def crscode_to_string(codetype, code, format):\n    link = ('http:\n    result = urllib2.urlopen(link).read()\n    if (not isinstance(result, str)):\n        result = result.decode()\n    return result", "docstring": "Lookup crscode on spatialreference.org and return in specified format.\n\nArguments:\n\n- *codetype*: \"epsg\", \"esri\", or \"sr-org\".\n- *code*: The code.\n- *format*: The crs format of the returned string. One of \"ogcwkt\", \"esriwkt\", or \"proj4\", but also several others...\n\nReturns:\n\n- Crs string in the specified format.", "source": "codesearchnet"}
{"code": "def ParseOptions(cls, options, output_module):\n    \n    if not isinstance(output_module, shared_4n6time.Shared4n6TimeOutputModule):\n      raise errors.BadConfigObject(\n          'Output module is not an instance of Shared4n6TimeOutputModule')\n\n    append = getattr(options, 'append', cls._DEFAULT_APPEND)\n    evidence = cls._ParseStringOption(\n        options, 'evidence', default_value=cls._DEFAULT_EVIDENCE)\n    fields = cls._ParseStringOption(\n        options, 'fields', default_value=cls._DEFAULT_FIELDS)\n    additional_fields = cls._ParseStringOption(\n        options, 'additional_fields')\n\n    if additional_fields:\n      fields = '{0:s},{1:s}'.format(fields, additional_fields)\n\n    output_module.SetAppendMode(append)\n    output_module.SetEvidence(evidence)\n    output_module.SetFields([\n        field_name.strip() for field_name in fields.split(',')])", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\noutput_module (OutputModule): output module to configure.\n\nRaises:\nBadConfigObject: when the output module object is of the wrong type.", "source": "juraj-google-style"}
{"code": "def get_dopants_from_shannon_radii(bonded_structure, num_dopants=5, match_oxi_sign=False):\n    all_species = [Specie(el, oxi) for el in Element for oxi in el.common_oxidation_states]\n    cn_and_species = set(((bonded_structure.get_coordination_of_site(i), bonded_structure.structure[i].specie) for i in range(bonded_structure.structure.num_sites)))\n    cn_to_radii_map = {}\n    possible_dopants = []\n    for (cn, species) in cn_and_species:\n        cn_roman = _int_to_roman(cn)\n        try:\n            species_radius = species.get_shannon_radius(cn_roman)\n        except KeyError:\n            warnings.warn('Shannon radius not found for {} with coordination number {}.\\nSkipping...'.format(species, cn))\n            continue\n        if (cn not in cn_to_radii_map):\n            cn_to_radii_map[cn] = _shannon_radii_from_cn(all_species, cn_roman, radius_to_compare=species_radius)\n        shannon_radii = cn_to_radii_map[cn]\n        possible_dopants += [{'radii_diff': p['radii_diff'], 'dopant_species': p['species'], 'original_species': species} for p in shannon_radii]\n    possible_dopants.sort(key=(lambda x: abs(x['radii_diff'])))\n    return _get_dopants(possible_dopants, num_dopants, match_oxi_sign)", "docstring": "Get dopant suggestions based on Shannon radii differences.\n\nArgs:\nbonded_structure (StructureGraph): A pymatgen structure graph\ndecorated with oxidation states. For example, generated using the\nCrystalNN.get_bonded_structure() method.\nnum_dopants (int): The nummber of suggestions to return for\nn- and p-type dopants.\nmatch_oxi_sign (bool): Whether to force the dopant and original species\nto have the same sign of oxidation state. E.g. If the original site\nis in a negative charge state, then only negative dopants will be\nreturned.\n\nReturns:\n(dict): Dopant suggestions, given as a dictionary with keys \"n_type\" and\n\"p_type\". The suggestions for each doping type are given as a list of\ndictionaries, each with they keys:\n\n- \"radii_diff\": The difference between the Shannon radii of the species.\n- \"dopant_spcies\": The dopant species.\n- \"original_species\": The substituted species.", "source": "codesearchnet"}
{"code": "def is_native_xmon_gate(gate: ops.Gate) -> bool:\n    \n    return isinstance(gate, (ops.CZPowGate,\n                             ops.MeasurementGate,\n                             ops.PhasedXPowGate,\n                             ops.XPowGate,\n                             ops.YPowGate,\n                             ops.ZPowGate))", "docstring": "Check if a gate is a native xmon gate.\n\nArgs:\ngate: Input gate.\n\nReturns:\nTrue if the gate is native to the xmon, false otherwise.", "source": "juraj-google-style"}
{"code": "def get_name(node):\n    if isinstance(node, gast.Name):\n        return node.id\n    elif isinstance(node, (gast.Subscript, gast.Attribute)):\n        return get_name(node.value)\n    else:\n        raise TypeError", "docstring": "Get the name of a variable.\n\nArgs:\nnode: A `Name`, `Subscript` or `Attribute` node.\n\nReturns:\nThe name of the variable e.g. `'x'` for `x`, `x.i` and `x[i]`.", "source": "codesearchnet"}
{"code": "def get_vcenter(self, **kwargs):\n        \n        config = ET.Element(\"config\")\n        urn = \"urn:brocade.com:mgmt:brocade-vswitch\"\n        ET.SubElement(config, \"vcenter\", xmlns=urn)\n        output = self._callback(config, handler='get_config')\n        result = []\n        element = ET.fromstring(str(output))\n        for vcenter in element.iter('{%s}vcenter'%urn):\n            vc = {}\n            vc['name'] = vcenter.find('{%s}id' % urn).text\n            vc['url'] = (vcenter.find('{%s}credentials' % urn)).find('{%s}url' % urn).text\n            isactive = vcenter.find('{%s}activate' %urn)\n            if isactive is None:\n                vc['isactive'] = False\n            else:\n                vc['isactive'] = True\n            result.append(vc)\n        return result", "docstring": "Get vCenter hosts on the switch\n\nArgs:\n\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturns a list of vcenters\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def parse_args():\n    parser = argparse.ArgumentParser()\n    parser.register('type', 'bool', lambda v: v.lower() == 'true')\n    parser.add_argument('--max_steps', type=int, default=10, help='Number of steps to run trainer.')\n    parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size used during training.')\n    parser.add_argument('--learning_rate', type=float, default=0.025, help='Initial learning rate.')\n    parser.add_argument('--data_dir', type=str, default='/tmp/mnist_data', help='Directory for storing data')\n    parser.add_argument('--fake_data', type='bool', nargs='?', const=True, default=False, help='Use fake MNIST data for unit testing')\n    parser.add_argument('--check_numerics', type='bool', nargs='?', const=True, default=False, help='Use tfdbg to track down bad values during training. Mutually exclusive with the --dump_dir flag.')\n    parser.add_argument('--dump_dir', type=str, default=None, help='Dump TensorFlow program debug data to the specified directory. The dumped data contains information regarding tf.function building, execution of ops and tf.functions, as well as their stack traces and associated source-code snapshots. Mutually exclusive with the --check_numerics flag.')\n    parser.add_argument('--dump_tensor_debug_mode', type=str, default='FULL_HEALTH', help='Mode for dumping tensor values. Options: NO_TENSOR, CURT_HEALTH, CONCISE_HEALTH, SHAPE, FULL_HEALTH. This is relevant only when --dump_dir is set.')\n    parser.add_argument('--dump_circular_buffer_size', type=int, default=-1, help='Size of the circular buffer used to dump execution events. A value <= 0 disables the circular-buffer behavior and causes all instrumented tensor values to be dumped. This is relevant only when --dump_dir is set.')\n    parser.add_argument('--use_random_config_path', type='bool', nargs='?', const=True, default=False, help='If set, set config file path to a random file in the temporary\\n      directory.')\n    return parser.parse_known_args()", "docstring": "Parses commandline arguments.\n\nReturns:\nA tuple (parsed, unparsed) of the parsed object and a group of unparsed\narguments that did not match the parser.", "source": "github-repos"}
{"code": "def isworkday(self, date):\n        \n        date = parsefun(date)\n        return self.weekdaymap[date.weekday()].isworkday", "docstring": "Check if a given date is a work date, ignoring holidays.\n\nArgs:\ndate (date, datetime or str): Date to be checked.\n\nReturns:\nbool: True if the date is a work date, False otherwise.", "source": "juraj-google-style"}
{"code": "def check_import_stdlib(module):\n        \n        if (\n            module in stdlib_list('2.7')  \n            or module in stdlib_list('3.4')\n            or module in stdlib_list('3.5')\n            or module in stdlib_list('3.6')\n            or module in stdlib_list('3.7')\n            or module in ['app', 'args', 'playbook_app']\n        ):\n            return True\n        return False", "docstring": "Check if module is in Python stdlib.\n\nArgs:\nmodule (str): The name of the module to check.\n\nReturns:\nbool: Returns True if the module is in the stdlib or template.", "source": "juraj-google-style"}
{"code": "def download_kegg_gene_metadata(gene_id, outdir=None, force_rerun=False):\n    \n    if not outdir:\n        outdir = ''\n\n    \n    outfile = op.join(outdir, '{}.kegg'.format(custom_slugify(gene_id)))\n\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n        raw_text = bs_kegg.get(\"{}\".format(gene_id))\n        if raw_text == 404:\n            return\n\n        with io.open(outfile, mode='wt', encoding='utf-8') as f:\n            f.write(raw_text)\n\n        log.debug('{}: downloaded KEGG metadata file'.format(outfile))\n    else:\n        log.debug('{}: KEGG metadata file already exists'.format(outfile))\n\n    return outfile", "docstring": "Download the KEGG flatfile for a KEGG ID and return the path.\n\nArgs:\ngene_id: KEGG gene ID (with organism code), i.e. \"eco:1244\"\noutdir: optional output directory of metadata\n\nReturns:\nPath to metadata file", "source": "juraj-google-style"}
{"code": "def _SetCompleted(self):\n    with self._lock:\n        if self._completed:\n            return False\n        self._completed = True\n        return True", "docstring": "Atomically marks the breakpoint as completed.\n\nReturns:\nTrue if the breakpoint wasn't marked already completed or False if the\nbreakpoint was already completed.", "source": "codesearchnet"}
{"code": "def _run_model(iterator, args, tf_args):\n  \n  single_node_env(tf_args)\n\n  logging.info(\"===== input_mapping: {}\".format(args.input_mapping))\n  logging.info(\"===== output_mapping: {}\".format(args.output_mapping))\n  input_tensor_names = [tensor for col, tensor in sorted(args.input_mapping.items())]\n  output_tensor_names = [tensor for tensor, col in sorted(args.output_mapping.items())]\n\n  \n  if args.signature_def_key:\n    assert args.export_dir, \"Inferencing with signature_def_key requires --export_dir argument\"\n    logging.info(\"===== loading meta_graph_def for tag_set ({0}) from saved_model: {1}\".format(args.tag_set, args.export_dir))\n    meta_graph_def = get_meta_graph_def(args.export_dir, args.tag_set)\n    signature = meta_graph_def.signature_def[args.signature_def_key]\n    logging.debug(\"signature: {}\".format(signature))\n    inputs_tensor_info = signature.inputs\n    logging.debug(\"inputs_tensor_info: {0}\".format(inputs_tensor_info))\n    outputs_tensor_info = signature.outputs\n    logging.debug(\"outputs_tensor_info: {0}\".format(outputs_tensor_info))\n\n  result = []\n\n  global global_sess, global_args\n  if global_sess and global_args == args:\n    \n    sess = global_sess\n  else:\n    \n    tf.reset_default_graph()\n    sess = tf.Session(graph=tf.get_default_graph())\n    if args.export_dir:\n      assert args.tag_set, \"Inferencing from a saved_model requires --tag_set\"\n      \n      logging.info(\"===== restoring from saved_model: {}\".format(args.export_dir))\n      loader.load(sess, args.tag_set.split(','), args.export_dir)\n    elif args.model_dir:\n      \n      ckpt = tf.train.latest_checkpoint(args.model_dir)\n      assert ckpt, \"Invalid model checkpoint path: {}\".format(args.model_dir)\n      logging.info(\"===== restoring from checkpoint: {}\".format(ckpt + \".meta\"))\n      saver = tf.train.import_meta_graph(ckpt + \".meta\", clear_devices=True)\n      saver.restore(sess, ckpt)\n    else:\n      raise Exception(\"Inferencing requires either --model_dir or --export_dir argument\")\n    global_sess = sess\n    global_args = args\n\n  \n  if args.signature_def_key:\n    input_tensors = [inputs_tensor_info[t].name for t in input_tensor_names]\n    output_tensors = [outputs_tensor_info[output_tensor_names[0]].name]\n  else:\n    input_tensors = [t + ':0' for t in input_tensor_names]\n    output_tensors = [t + ':0' for t in output_tensor_names]\n\n  logging.info(\"input_tensors: {0}\".format(input_tensors))\n  logging.info(\"output_tensors: {0}\".format(output_tensors))\n\n  \n  for tensors in yield_batch(iterator, args.batch_size, len(input_tensor_names)):\n    inputs_feed_dict = {}\n    for i in range(len(input_tensors)):\n      inputs_feed_dict[input_tensors[i]] = tensors[i]\n\n    outputs = sess.run(output_tensors, feed_dict=inputs_feed_dict)\n    lengths = [len(output) for output in outputs]\n    input_size = len(tensors[0])\n    assert all([length == input_size for length in lengths]), \"Output array sizes {} must match input size: {}\".format(lengths, input_size)\n    python_outputs = [output.tolist() for output in outputs]      \n    result.extend(zip(*python_outputs))                           \n\n  return result", "docstring": "mapPartitions function to run single-node inferencing from a checkpoint/saved_model, using the model's input/output mappings.\n\nArgs:\n:iterator: input RDD partition iterator.\n:args: arguments for TFModel, in argparse format\n:tf_args: arguments for TensorFlow inferencing code, in argparse or ARGV format.\n\nReturns:\nAn iterator of result data.", "source": "juraj-google-style"}
{"code": "def dropout(x, keep_prob, noise_shape=None, name=None):\n    noise_shape = convert_to_shape(noise_shape)\n    if (noise_shape is None):\n        noise_shape = x.shape\n    with tf.variable_scope(name, default_name='dropout'):\n        if (keep_prob == 1.0):\n            return x\n        noise = cast(less(random_uniform(x.mesh, noise_shape, dtype=x.dtype), keep_prob), x.dtype)\n        noise /= keep_prob\n        return (x * noise)", "docstring": "Dropout layer.\n\nArgs:\nx: a Tensor\nkeep_prob: a float between 0.0 and 1.0\nnoise_shape: an optional Shape (a subset of x.shape)\nname: an optional string\n\nReturns:\na Tensor", "source": "codesearchnet"}
{"code": "def compile_action_preconditions(self,\n            state: Sequence[tf.Tensor],\n            action: Sequence[tf.Tensor]) -> List[TensorFluent]:\n        \n        scope = self.action_precondition_scope(state, action)\n        preconds = []\n        with self.graph.as_default():\n            with tf.name_scope('action_preconditions'):\n                for p in self.rddl.domain.preconds:\n                    fluent = self._compile_expression(p, scope)\n                    preconds.append(fluent)\n                return preconds", "docstring": "Compiles the action preconditions given current `state` and `action` fluents.\n\nArgs:\nstate (Sequence[tf.Tensor]): The current state fluents.\naction (Sequence[tf.Tensor]): The action fluents.\n\nReturns:\nA list of :obj:`rddl2tf.fluent.TensorFluent`.", "source": "juraj-google-style"}
{"code": "def Sample(self, task, status):\n    \n    sample_time = time.time()\n    sample = '{0:f}\\t{1:s}\\t{2:s}\\n'.format(\n        sample_time, task.identifier, status)\n    self._WritesString(sample)", "docstring": "Takes a sample of the status of a task for profiling.\n\nArgs:\ntask (Task): a task.\nstatus (str): status.", "source": "juraj-google-style"}
{"code": "def get_metadata_as_dict(self, user_id=None, source=None):\n        \n\n        if self.metadata is None or self.metadata == \"\":\n            return {}\n\n        metadata_dict = self.metadata if isinstance(self.metadata, dict) else json.loads(self.metadata)\n\n        \n        metadata_keys = [m.lower() for m in metadata_dict]\n        if user_id is not None and 'user_id' not in metadata_keys:\n            metadata_dict['user_id'] = six.text_type(user_id)\n\n        if source is not None and 'source' not in metadata_keys:\n            metadata_dict['source'] = six.text_type(source)\n\n        return { k : six.text_type(v) for k, v in metadata_dict.items() }", "docstring": "Convert a metadata json string into a dictionary.\n\nArgs:\nuser_id (int): Optional: Insert user_id into the metadata if specified\nsource (string): Optional: Insert source (the name of the app typically) into the metadata if necessary.\n\nReturns:\ndict: THe metadata as a python dictionary", "source": "juraj-google-style"}
{"code": "def raise_for_status(response):\n    for err_name in web_exceptions.__all__:\n        err = getattr(web_exceptions, err_name)\n        if (err.status_code == response.status):\n            payload = dict(headers=response.headers, reason=response.reason)\n            if issubclass(err, web_exceptions._HTTPMove):\n                raise err(response.headers['Location'], **payload)\n            raise err(**payload)", "docstring": "Raise an appropriate error for a given response.\n\nArguments:\nresponse (:py:class:`aiohttp.ClientResponse`): The API response.\n\nRaises:\n:py:class:`aiohttp.web_exceptions.HTTPException`: The appropriate\nerror for the response's status.", "source": "codesearchnet"}
{"code": "def _process_query(self, query, prepared=False):\n    if (prepared is True):\n        files = {'query': str(query)}\n        logger.debug('About to submit the following query {}'.format(query))\n        (res, status) = self.post(self.disambiguate_service, files=files, headers={'Accept': 'application/json'})\n        if (status == 200):\n            return (self.decode(res), status)\n        else:\n            logger.debug('Disambiguation failed.')\n            return (None, status)\n    text = query['text']\n    sentence_coordinates = [{'offsetStart': 0, 'offsetEnd': len(text)}]\n    total_nb_sentences = len(sentence_coordinates)\n    sentences_groups = []\n    if (len(text) > self.max_text_length):\n        (res, status_code) = self.segment(text)\n        if (status_code == 200):\n            sentence_coordinates = res['sentences']\n            total_nb_sentences = len(sentence_coordinates)\n        else:\n            logger.error('Error during the segmentation of the text.')\n        logger.debug('Text too long, split in {} sentences; building groups of {} sentences.'.format(total_nb_sentences, self.sentences_per_group))\n        sentences_groups = self._group_sentences(total_nb_sentences, self.sentences_per_group)\n    else:\n        query['sentence'] = 'true'\n    if (total_nb_sentences > 1):\n        query['sentences'] = sentence_coordinates\n    if (len(sentences_groups) > 0):\n        for group in sentences_groups:\n            query['processSentence'] = group\n            (res, status_code) = self._process_query(query, prepared=True)\n            if (status_code == 200):\n                if ('entities' in res):\n                    query['entities'] = res[u'entities']\n                query['language'] = res[u'language']\n            else:\n                logger.error('Error when processing the query {}'.format(query))\n                return (None, status_code)\n    else:\n        (res, status_code) = self._process_query(query, prepared=True)\n        if (status_code == 200):\n            query['language'] = res[u'language']\n            if ('entities' in res):\n                query['entities'] = res[u'entities']\n        else:\n            logger.error('Error when processing the query {}'.format(query))\n            return (None, status_code)\n    return (query, status_code)", "docstring": "Process query recursively, if the text is too long,\nit is split and processed bit a bit.\n\nArgs:\nquery (sdict): Text to be processed.\nprepared (bool): True when the query is ready to be submitted via\nPOST request.\nReturns:\nstr: Body ready to be submitted to the API.", "source": "codesearchnet"}
{"code": "def Deserialize(self, reader):\n        \n        self.Timestamp = reader.ReadUInt32()\n        self.Services = reader.ReadUInt64()\n        addr = bytearray(reader.ReadFixedString(16))\n        addr.reverse()\n        addr.strip(b'\\x00')\n        nums = []\n        for i in range(0, 4):\n            nums.append(str(addr[i]))\n        nums.reverse()\n        adddd = '.'.join(nums)\n        self.Address = adddd\n        self.Port = reader.ReadUInt16(endian='>')", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def center_crop(self, image: 'torch.Tensor', size: Dict[str, int], **kwargs) -> 'torch.Tensor':\n    output_size = size.shortest_edge\n    return F.center_crop(image, output_size=(output_size, output_size), **kwargs)", "docstring": "Center crop an image to `(size[\"height\"], size[\"width\"])`. If the input size is smaller than `crop_size` along\nany edge, the image is padded with 0's and then center cropped.\n\nArgs:\nimage (`torch.Tensor`):\nImage to center crop.\nsize (`Dict[str, int]`):\nSize of the output image in the form `{\"height\": h, \"width\": w}`.", "source": "github-repos"}
{"code": "def _draw_breakpoint_icon(self, top, painter, icon_name):\n    rect = QRect(0, top, self.sizeHint().width(), self.sizeHint().height())\n    try:\n        icon = self.icons[icon_name]\n    except KeyError as e:\n        debug_print(\"Breakpoint icon doen't exist, {}\".format(e))\n    else:\n        icon.paint(painter, rect)", "docstring": "Draw the given breakpoint pixmap.\n\nArgs:\ntop (int): top of the line to draw the breakpoint icon.\npainter (QPainter)\nicon_name (srt): key of icon to draw (see: self.icons)", "source": "codesearchnet"}
{"code": "def __init__(self, hosts=None):\n        \n\n        \n        if not hosts:\n            hosts = [{\"host\": \"localhost\", \"port\": 9200}]\n\n        \n        try:\n            self.els_search = elasticsearch.Elasticsearch(hosts)\n            info = self.els_search.info()\n            version = info['version']\n            print '\\t- ELS Indexer connected: %s %s %s %s' % (str(hosts), info['name'],\n                                                          version['number'], version['lucene_version'])\n        except elasticsearch.exceptions.ConnectionError:\n            print '\\t- ELS connection failed! Is your ELS server running?'\n            exit(1)", "docstring": "Initialization for the Elastic Search Indexer.\n\nArgs:\nhosts: List of connection settings.", "source": "juraj-google-style"}
{"code": "def _next_power_of_two(x):\n    return 1 if x == 0 else 2 ** (int(x) - 1).bit_length()", "docstring": "Calculates the smallest enclosing power of two for an input.\n\nArgs:\nx: Positive float or integer number.\n\nReturns:\nNext largest power of two integer.", "source": "github-repos"}
{"code": "def ice_register_write(self, register_index, value, delay=False):\n        \n        self._dll.JLINKARM_WriteICEReg(register_index, int(value), int(delay))\n        return None", "docstring": "Writes a value to an ARM ICE register.\n\nArgs:\nself (JLink): the ``JLink`` instance\nregister_index (int): the ICE register to write to\nvalue (int): the value to write to the ICE register\ndelay (bool): boolean specifying if the write should be delayed\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def init_from_storage_write_to_datastore(self, batch_size=100, allowed_epsilon=None, skip_image_ids=None, max_num_images=None):\n    if (allowed_epsilon is None):\n        allowed_epsilon = copy.copy(DEFAULT_EPSILON)\n    self._dataset_batches = {}\n    images = self._read_image_list(skip_image_ids)\n    if max_num_images:\n        images = images[:max_num_images]\n    for (batch_idx, batch_start) in enumerate(range(0, len(images), batch_size)):\n        batch = images[batch_start:(batch_start + batch_size)]\n        batch_id = DATASET_BATCH_ID_PATTERN.format(batch_idx)\n        batch_epsilon = allowed_epsilon[(batch_idx % len(allowed_epsilon))]\n        self.add_batch(batch_id, {'epsilon': batch_epsilon})\n        for (image_id, image_path) in batch:\n            self.add_image(batch_id, image_id, {'dataset_image_id': os.path.basename(image_path)[:(- 4)], 'image_path': image_path})\n    self.write_to_datastore()", "docstring": "Initializes dataset batches from the list of images in the datastore.\n\nArgs:\nbatch_size: batch size\nallowed_epsilon: list of allowed epsilon or None to use default\nskip_image_ids: list of image ids to skip\nmax_num_images: maximum number of images to read", "source": "codesearchnet"}
{"code": "def _read(cls, **kwargs):\n        \n        pd_obj = pandas.read_csv(**kwargs)\n        if isinstance(pd_obj, pandas.DataFrame):\n            return cls.from_pandas(pd_obj)\n        if isinstance(pd_obj, pandas.io.parsers.TextFileReader):\n            \n            \n            pd_read = pd_obj.read\n            pd_obj.read = lambda *args, **kwargs: cls.from_pandas(\n                pd_read(*args, **kwargs)\n            )\n        return pd_obj", "docstring": "Read csv file from local disk.\nArgs:\nfilepath_or_buffer:\nThe filepath of the csv file.\nWe only support local files for now.\nkwargs: Keyword arguments in pandas.read_csv", "source": "juraj-google-style"}
{"code": "def delete(self, filething=None):\n        \n\n        fileobj = filething.fileobj\n\n        self.tags.clear()\n        \n        \n        try:\n            try:\n                self.tags._inject(fileobj, lambda x: 0)\n            except error as e:\n                reraise(self._Error, e, sys.exc_info()[2])\n            except EOFError:\n                raise self._Error(\"no appropriate stream found\")\n        except IOError as e:\n            reraise(self._Error, e, sys.exc_info()[2])", "docstring": "delete(filething=None)\n\nRemove tags from a file.\n\nIf no filename is given, the one most recently loaded is used.\n\nArgs:\nfilething (filething)\nRaises:\nmutagen.MutagenError", "source": "juraj-google-style"}
{"code": "def GetLogdirSubdirectories(path):\n    if (not tf.io.gfile.exists(path)):\n        return ()\n    if (not tf.io.gfile.isdir(path)):\n        raise ValueError(('GetLogdirSubdirectories: path exists and is not a directory, %s' % path))\n    if IsCloudPath(path):\n        logger.info('GetLogdirSubdirectories: Starting to list directories via glob-ing.')\n        traversal_method = ListRecursivelyViaGlobbing\n    else:\n        logger.info('GetLogdirSubdirectories: Starting to list directories via walking.')\n        traversal_method = ListRecursivelyViaWalking\n    return (subdir for (subdir, files) in traversal_method(path) if any((IsTensorFlowEventsFile(f) for f in files)))", "docstring": "Obtains all subdirectories with events files.\n\nThe order of the subdirectories returned is unspecified. The internal logic\nthat determines order varies by scenario.\n\nArgs:\npath: The path to a directory under which to find subdirectories.\n\nReturns:\nA tuple of absolute paths of all subdirectories each with at least 1 events\nfile directly within the subdirectory.\n\nRaises:\nValueError: If the path passed to the method exists and is not a directory.", "source": "codesearchnet"}
{"code": "def __init__(self, function_approximator, map_size=(10, 10), memory_num=4, repeating_penalty=0.5):\n        \n        self.__map_arr = self.__create_map(map_size)\n        self.__agent_pos = self.START_POS\n        self.__reward_list = []\n        self.__route_memory_list = []\n        self.__memory_num = memory_num\n        self.__repeating_penalty = repeating_penalty\n\n        super().__init__(function_approximator)", "docstring": "Init.\n\nArgs:\nfunction_approximator:  is-a `FunctionApproximator`.\nmap_size:               Size of map.\nmemory_num:             The number of step of agent's memory.\nrepeating_penalty:      The value of penalty in the case that agent revisit.", "source": "juraj-google-style"}
{"code": "def aoi(self, **kwargs):\n        \n        g = self._parse_geoms(**kwargs)\n        if g is None:\n            return self\n        else:\n            return self[g]", "docstring": "Subsets the Image by the given bounds\n\nArgs:\nbbox (list): optional. A bounding box array [minx, miny, maxx, maxy]\nwkt (str): optional. A WKT geometry string\ngeojson (str): optional. A GeoJSON geometry dictionary\n\nReturns:\nimage: an image instance of the same type", "source": "juraj-google-style"}
{"code": "def run_and_report_benchmark(self, dataset, num_elements, name, iters=5, extras=None, warmup=True, apply_default_optimizations=False, session_config=None):\n    wall_time = self.run_benchmark(dataset=dataset, num_elements=num_elements, iters=iters, warmup=warmup, apply_default_optimizations=apply_default_optimizations, session_config=session_config)\n    if extras is None:\n        extras = {}\n    if context.executing_eagerly():\n        name = '{}.eager'.format(name)\n        extras['implementation'] = 'eager'\n    else:\n        name = '{}.graph'.format(name)\n        extras['implementation'] = 'graph'\n    extras['num_elements'] = num_elements\n    self.report_benchmark(wall_time=wall_time, iters=iters, name=name, extras=extras)\n    return wall_time", "docstring": "Benchmarks the dataset and reports the stats.\n\nRuns the dataset `iters` times. In each iteration, the benchmark measures\nthe time it takes to go through `num_elements` elements of the dataset.\nThis is followed by logging/printing the benchmark stats.\n\nArgs:\ndataset: Dataset to benchmark.\nnum_elements: Number of dataset elements to iterate through each benchmark\niteration.\nname: Name of the benchmark.\niters: Number of times to repeat the timing.\nextras: A dict which maps string keys to additional benchmark info.\nwarmup: If true, warms up the session caches by running an untimed run.\napply_default_optimizations: Determines whether default optimizations\nshould be applied.\nsession_config: A ConfigProto protocol buffer with configuration options\nfor the session. Applicable only for benchmarking in graph mode.\n\nReturns:\nA float, representing the per-element wall time of the dataset in seconds.\nThis is the median time (with respect to `iters`) it takes for the dataset\nto go through `num_elements` elements, divided by `num_elements.`", "source": "github-repos"}
{"code": "def pack(value):\n    if is_packed(value):\n        return value\n    spec = value._type_spec._tf_extension_type_with_packed(True)\n    try:\n        variant = composite_tensor_ops.composite_tensor_to_variants(value)\n    except nested_structure_coder.NotEncodableError as e:\n        raise ValueError('ExtensionTypes must have a __name__ field in order to be packed.') from e\n    return _create_object_from_type_and_dict(type(value), {'_tf_extension_type_cached_type_spec': spec, '_tf_extension_type_packed_variant': variant})", "docstring": "Returns a copy of `value` with fields packed in a single Variant.\n\nArgs:\nvalue: An `ExtensionType` object.\n\nReturns:\nAn `ExtensionType` object.", "source": "github-repos"}
{"code": "def ExtractFilename(self, flagfile_str):\n    \n    if flagfile_str.startswith('--flagfile='):\n      return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip())\n    elif flagfile_str.startswith('-flagfile='):\n      return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip())\n    else:\n      raise exceptions.Error(\n          'Hit illegal --flagfile type: %s' % flagfile_str)", "docstring": "Returns filename from a flagfile_str of form -[-]flagfile=filename.\n\nThe cases of --flagfile foo and -flagfile foo shouldn't be hitting\nthis function, as they are dealt with in the level above this\nfunction.\n\nArgs:\nflagfile_str: flagfile string.\n\nReturns:\nstr filename from a flagfile_str of form -[-]flagfile=filename.\n\nRaises:\nError: when illegal --flagfile provided.", "source": "juraj-google-style"}
{"code": "def build_results(self, session, tensor_values):\n    full_values = []\n    assert len(self._final_fetches) == len(tensor_values)\n    i = 0\n    j = 0\n    for is_op in self._ops:\n        if is_op:\n            full_values.append(None)\n        else:\n            if self._fetches[i].ref() in self._feed_handles:\n                value = self._feed_handles[self._fetches[i].ref()].eval()\n            else:\n                value = self._feeds.get(self._fetches[i].ref())\n            if value is None:\n                value = tensor_values[j]\n                j += 1\n            dtype = self._fetch_handles.get(self._fetches[i].ref())\n            if dtype:\n                full_values.append(session_ops.TensorHandle(value, dtype, session))\n            else:\n                full_values.append(value)\n            i += 1\n    assert j == len(tensor_values)\n    return self._fetch_mapper.build_results(full_values)", "docstring": "Build results matching the original fetch shape.\n\n`tensor_values` must be a list of the same length as\nthe one returned by `fetches()`, and holding the requested\nfetch values.\n\nThis method builds a struct with the same shape as the original `fetches`\npassed to the constructor, in which the fetches are replaced by their\nfetched value.\n\nArgs:\nsession: The enclosing session.  Used for tensor handles.\ntensor_values: List of values matching the list returned by fetches().\n\nReturns:\nA structure of the same shape as the original `fetches` argument but\ncontaining tensors or None (for fetched ops).", "source": "github-repos"}
{"code": "def manual_get_pfam_annotations(seq, outpath, searchtype='phmmer', force_rerun=False):\n    if op.exists(outpath):\n        with open(outpath, 'r') as f:\n            json_results = json.loads(json.load(f))\n    else:\n        fseq = ('>Seq\\n' + seq)\n        if (searchtype == 'phmmer'):\n            parameters = {'seqdb': 'pdb', 'seq': fseq}\n        if (searchtype == 'hmmscan'):\n            parameters = {'hmmdb': 'pfam', 'seq': fseq}\n        enc_params = urllib.urlencode(parameters).encode('utf-8')\n        request = urllib2.Request('http:\n        url = (urllib2.urlopen(request).geturl() + '?output=json')\n        request = str(url)\n        request_read = urlopen(request).read().decode('utf-8')\n        with open(outpath, 'w') as f:\n            json.dump(request_read, f)\n        json_results = json.loads(request_read)\n    return json_results['results']['hits']", "docstring": "Retrieve and download PFAM results from the HMMER search tool.\n\nArgs:\nseq:\noutpath:\nsearchtype:\nforce_rerun:\n\nReturns:\n\nTodo:\n* Document and test!", "source": "codesearchnet"}
{"code": "def dict_factory(self, cursor, row):\n        \n        d = {}\n        for idx, col in enumerate(cursor.description):\n            val = row[idx]\n            name = col[0]\n            if name == Field.Time_Stamp:\n                d[col[0]] = str(val)\n                continue\n            if name == \"Raw_A\" or name == \"Raw_B\":  \n                continue\n            if name not in self.m_all_fields:\n                continue\n            if (str(val) != \"None\") and ((val > 0) or (val < 0)):\n                d[name] = str(val)\n        return d", "docstring": "Sqlite callback accepting the cursor and the original row as a tuple.\n\nSimple return of JSON safe types.\n\nArgs:\ncursor (sqlite cursor):  Original cursory\nrow (sqlite row tuple): Original row.\n\nReturns:\ndict: modified row.", "source": "juraj-google-style"}
{"code": "def _ParseFileEntryWithParsers(self, parser_mediator, parser_names, file_entry, file_object=None):\n    parse_results = self._PARSE_RESULT_UNSUPPORTED\n    for parser_name in parser_names:\n        parser = self._parsers.get(parser_name, None)\n        if (not parser):\n            raise RuntimeError('Parser object missing for parser: {0:s}'.format(parser_name))\n        if parser.FILTERS:\n            if (not self._CheckParserCanProcessFileEntry(parser, file_entry)):\n                parse_results = self._PARSE_RESULT_SUCCESS\n                continue\n        display_name = parser_mediator.GetDisplayName(file_entry)\n        logger.debug('[ParseFileEntryWithParsers] parsing file: {0:s} with parser: {1:s}'.format(display_name, parser_name))\n        parse_result = self._ParseFileEntryWithParser(parser_mediator, parser, file_entry, file_object=file_object)\n        if (parse_result == self._PARSE_RESULT_FAILURE):\n            return self._PARSE_RESULT_FAILURE\n        if (parse_result == self._PARSE_RESULT_SUCCESS):\n            parse_results = self._PARSE_RESULT_SUCCESS\n    return parse_results", "docstring": "Parses a file entry with a specific parsers.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nparser_names (list[str]): names of parsers.\nfile_entry (dfvfs.FileEntry): file entry.\nfile_object (Optional[file]): file-like object to parse.\nIf not set the parser will use the parser mediator to open\nthe file entry's default data stream as a file-like object.\n\nReturns:\nint: parse result which is _PARSE_RESULT_FAILURE if the file entry\ncould not be parsed, _PARSE_RESULT_SUCCESS if the file entry\nsuccessfully was parsed or _PARSE_RESULT_UNSUPPORTED when\nUnableToParseFile was raised or no names of parser were provided.\n\nRaises:\nRuntimeError: if the parser object is missing.", "source": "codesearchnet"}
{"code": "def _page_streamable(page_descriptor):\n\n    def inner(a_func, settings, request, **kwargs):\n        'Actual page-streaming based on the settings.'\n        page_iterator = gax.PageIterator(a_func, page_descriptor, settings.page_token, request, **kwargs)\n        if settings.flatten_pages:\n            return gax.ResourceIterator(page_iterator)\n        else:\n            return page_iterator\n    return inner", "docstring": "Creates a function that yields an iterable to performs page-streaming.\n\nArgs:\npage_descriptor (:class:`PageDescriptor`): indicates the structure\nof page streaming to be performed.\n\nReturns:\nCallable: A function that returns an iterator.", "source": "codesearchnet"}
{"code": "def build(self):\n    if (not self.build_cmds):\n        LOGGER.debug('No build commands were found, skipping build step')\n    with LogTask('Building {} disk {}'.format(self.name, self.disk_path)):\n        for command in self.build_cmds:\n            with LogTask('Running command {}'.format(command.name)):\n                LOGGER.debug(command.cmd)\n                result = utils.run_command(command.cmd)\n                if result:\n                    raise BuildException(result.err)", "docstring": "Run all the commands in self.build_cmds\n\nRaises:\nlago.build.BuildException: If a command returned a non-zero code", "source": "codesearchnet"}
{"code": "def alternative_titles(self, **kwargs):\n    path = self._get_id_path('alternative_titles')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the alternative titles for a specific movie id.\n\nArgs:\ncountry: (optional) ISO 3166-1 code.\nappend_to_response: (optional) Comma separated, any movie method.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def set_reprompt_ssml(self, ssml):\n    self.response.reprompt.outputSpeech.type = 'SSML'\n    self.response.reprompt.outputSpeech.ssml = ssml", "docstring": "Set response reprompt output speech as SSML type.\n\nArgs:\nssml: str. Response speech used when type is 'SSML', should be formatted\nwith Speech Synthesis Markup Language. Cannot exceed 8,000\ncharacters.", "source": "codesearchnet"}
{"code": "async def verify_docker_image_task(chain, link):\n    errors = []\n    worker_type = get_worker_type(link.task)\n    if (worker_type not in chain.context.config['valid_docker_image_worker_types']):\n        errors.append('{} is not a valid docker-image workerType!'.format(worker_type))\n    raise_on_errors(errors)", "docstring": "Verify the docker image Link.\n\nArgs:\nchain (ChainOfTrust): the chain we're operating on.\nlink (LinkOfTrust): the task link we're checking.", "source": "codesearchnet"}
{"code": "def _validated_config_filename(self, name):\n        \n        dir_name = self._make_config_dir()\n        filename = os.path.join(dir_name, name.split(\".json\")[0] + \".json\")\n        return filename", "docstring": "Make config dir and return full file path and extension\n\nArgs:\nname (str): Filename without dir or extension\n\nReturns:\nstr: Full path including extension", "source": "juraj-google-style"}
{"code": "def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, data_type, data_format='NHWC'):\n    total_size_1 = 1\n    total_size_2 = 1\n    for s in tensor_in_sizes:\n        total_size_1 *= s\n    for s in filter_in_sizes:\n        total_size_2 *= s\n    x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)], dtype=data_type).reshape(tensor_in_sizes)\n    x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)], dtype=data_type).reshape(filter_in_sizes)\n    with self.session() as sess:\n        if data_type == np.float32:\n            tolerance = 0.0001\n        else:\n            self.assertEqual(data_type, np.float64)\n            tolerance = 1e-08\n        t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=data_type)\n        t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=data_type)\n        native_t1 = t1\n        strides = [1, stride, stride, 1]\n        if data_format == 'NCHW':\n            native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])\n            strides = [1, 1, stride, stride]\n        with self.test_scope():\n            conv_native = nn_ops.depthwise_conv2d_native(native_t1, t2, strides=strides, data_format=data_format, padding=padding)\n        if data_format == 'NCHW':\n            conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])\n        with ops.device('CPU'):\n            conv_interface = ReferenceDepthwiseConv2D(t1, t2, strides=[1, stride, stride, 1], padding=padding)\n        native_result = sess.run(conv_native, {t1: x1, t2: x2})\n        interface_result = sess.run(conv_interface, {t1: x1, t2: x2})\n    print('data_type:', data_type, 'max diff = ', np.amax(np.absolute(native_result - interface_result)))\n    self.assertAllClose(np.ravel(native_result), np.ravel(interface_result), rtol=tolerance)", "docstring": "Verifies the output values of the convolution function.\n\nArgs:\ntensor_in_sizes: Input tensor dimensions in\n[batch, input_rows, input_cols, input_depth].\nfilter_in_sizes: Filter tensor dimensions in\n[filter_rows, filter_cols, input_depth, depth_multiplier].\nstride: Stride.\npadding: Padding type.\ndata_type: The data type to use.\ndata_format: The data_format of the input. \"NHWC\" or \"NCHW\".", "source": "github-repos"}
{"code": "def convert_obatoms_to_molecule(self, atoms, residue_name=None, site_property='ff_map'):\n    restore_site_props = (True if (residue_name is not None) else False)\n    if (restore_site_props and (not hasattr(self, 'map_residue_to_mol'))):\n        self._set_residue_map()\n    coords = []\n    zs = []\n    for atm in atoms:\n        coords.append(list(atm.coords))\n        zs.append(atm.atomicnum)\n    mol = Molecule(zs, coords)\n    if restore_site_props:\n        props = []\n        ref = self.map_residue_to_mol[residue_name].copy()\n        assert (len(mol) == len(ref))\n        assert (ref.formula == mol.formula)\n        for (i, site) in enumerate(mol):\n            assert (site.specie.symbol == ref[i].specie.symbol)\n            props.append(getattr(ref[i], site_property))\n        mol.add_site_property(site_property, props)\n    return mol", "docstring": "Convert list of openbabel atoms to MOlecule.\n\nArgs:\natoms ([OBAtom]): list of OBAtom objects\nresidue_name (str): the key in self.map_residue_to_mol. Usec to\nrestore the site properties in the final packed molecule.\nsite_property (str): the site property to be restored.\n\nReturns:\nMolecule object", "source": "codesearchnet"}
{"code": "def to_price_index(returns, start=100):\n    return ((returns.replace(to_replace=np.nan, value=0) + 1).cumprod() * start)", "docstring": "Returns a price index given a series of returns.\n\nArgs:\n* returns: Expects a return series\n* start (number): Starting level\n\nAssumes arithmetic returns.\n\nFormula is: cumprod (1+r)", "source": "codesearchnet"}
{"code": "def convert(self, calibration_inputs: Optional[Mapping[str, np.ndarray]]=None, num_runs=1) -> None:", "docstring": "Converts the model with TensorRT and calibrates if using INT8 precision mode.\n\nArgs:\ncalibration_inputs: Mapping from input names to ndarrays in TF1. Or a\nsequence of tensors in TF2. Used as calibration data.\nnum_runs: Number of calibration runs.", "source": "github-repos"}
{"code": "def aes_encrypt(base64_encryption_key, data):\n    if isinstance(data, text_type):\n        data = data.encode('UTF-8')\n    (aes_key_bytes, hmac_key_bytes) = _extract_keys(base64_encryption_key)\n    data = _pad(data)\n    iv_bytes = os.urandom(AES_BLOCK_SIZE)\n    cipher = AES.new(aes_key_bytes, mode=AES.MODE_CBC, IV=iv_bytes)\n    data = (iv_bytes + cipher.encrypt(data))\n    hmac_signature = hmac.new(hmac_key_bytes, data, hashlib.sha256).digest()\n    return as_base64((data + hmac_signature))", "docstring": "Encrypt data with AES-CBC and sign it with HMAC-SHA256\n\nArguments:\nbase64_encryption_key (str): a base64-encoded string containing an AES encryption key\nand HMAC signing key as generated by generate_encryption_key()\ndata (str): a byte string containing the data to be encrypted\n\nReturns:\nstr: the encrypted data as a byte string with the HMAC signature appended to the end", "source": "codesearchnet"}
{"code": "def _KillProcess(self, pid):\n    if sys.platform.startswith('win'):\n        process_terminate = 1\n        handle = ctypes.windll.kernel32.OpenProcess(process_terminate, False, pid)\n        ctypes.windll.kernel32.TerminateProcess(handle, (- 1))\n        ctypes.windll.kernel32.CloseHandle(handle)\n    else:\n        try:\n            os.kill(pid, signal.SIGKILL)\n        except OSError as exception:\n            logger.error('Unable to kill process {0:d} with error: {1!s}'.format(pid, exception))", "docstring": "Issues a SIGKILL or equivalent to the process.\n\nArgs:\npid (int): process identifier (PID).", "source": "codesearchnet"}
{"code": "def expand_role(self, role):\n    if ('/' in role):\n        return role\n    else:\n        return self.boto_session.resource('iam').Role(role).arn", "docstring": "Expand an IAM role name into an ARN.\n\nIf the role is already in the form of an ARN, then the role is simply returned. Otherwise we retrieve the full\nARN and return it.\n\nArgs:\nrole (str): An AWS IAM role (either name or full ARN).\n\nReturns:\nstr: The corresponding AWS IAM role ARN.", "source": "codesearchnet"}
{"code": "def delete(self, url, **kwargs):\n    check_type(url, basestring, may_be_none=False)\n    erc = kwargs.pop('erc', EXPECTED_RESPONSE_CODE['DELETE'])\n    self.request('DELETE', url, erc, **kwargs)", "docstring": "Sends a DELETE request.\n\nArgs:\nurl(basestring): The URL of the API endpoint.\n**kwargs:\nerc(int): The expected (success) response code for the request.\nothers: Passed on to the requests package.\n\nRaises:\nApiError: If anything other than the expected response code is\nreturned by the Webex Teams API endpoint.", "source": "codesearchnet"}
{"code": "def _signature_to_tf2xla_config(signature_def, variable_nodes_to_feed):\n    from tensorflow.compiler.tf2xla import tf2xla_pb2\n    config = tf2xla_pb2.Config()\n    tensor_id = tf2xla_pb2.TensorId\n    for name, input_ in signature_def.inputs.items():\n        name = name.replace('/', '_')\n        name = 'feed_{}'.format(name)\n        node_name, output_index = _parse_tensor_name(input_.name)\n        output_index = int(output_index)\n        config.feed.append(tf2xla_pb2.Feed(id=tensor_id(node_name=node_name, output_index=output_index), name=name, type=input_.dtype, shape=input_.tensor_shape))\n    for name, output_ in signature_def.outputs.items():\n        name = name.replace('/', '_')\n        name = 'fetch_{}'.format(name)\n        node_name, output_index = _parse_tensor_name(output_.name)\n        output_index = int(output_index)\n        config.fetch.append(tf2xla_pb2.Fetch(id=tensor_id(node_name=node_name, output_index=output_index), name=name, type=output_.dtype, shape=output_.tensor_shape))\n    for node, modified in variable_nodes_to_feed:\n        name = node.name.replace('/', '_')\n        name = 'param_{}'.format(name)\n        config.variable.append(tf2xla_pb2.Variable(node_name=node.name, name=name, type=node.attr['dtype'].type, shape=node.attr['shape'].shape, readonly=not modified))\n    return config", "docstring": "Convert `signature_def` to tf2xla config.  Returns a `tf2xla.Config` proto.\n\nArgs:\nsignature_def: Instance of `SignatureDef`.\nvariable_nodes_to_feed: List of tuples of form `(node_def, modified)`\ncorresponding to VarHandleOp, and a boolean `modified` that describes\nwhether the variable was modified during execution.\n\nReturns:\nAn instance of `tf2xla.Config` proto.\n\nRaises:\nRuntimeError: If TensorFlow was not compiled with XLA.", "source": "github-repos"}
{"code": "def _to_json(self, strip, to_serialize=None):\n    if (to_serialize is None):\n        to_serialize = copy.copy(self.__dict__)\n    pkcs12_val = to_serialize.get(_PKCS12_KEY)\n    if (pkcs12_val is not None):\n        to_serialize[_PKCS12_KEY] = base64.b64encode(pkcs12_val)\n    return super(ServiceAccountCredentials, self)._to_json(strip, to_serialize=to_serialize)", "docstring": "Utility function that creates JSON repr. of a credentials object.\n\nOver-ride is needed since PKCS#12 keys will not in general be JSON\nserializable.\n\nArgs:\nstrip: array, An array of names of members to exclude from the\nJSON.\nto_serialize: dict, (Optional) The properties for this object\nthat will be serialized. This allows callers to\nmodify before serializing.\n\nReturns:\nstring, a JSON representation of this instance, suitable to pass to\nfrom_json().", "source": "codesearchnet"}
{"code": "def get_pkg_names(pkgs):\n    result = set()\n    with open(join('mapping'), 'r') as f:\n        data = dict((x.strip().split(':') for x in f))\n    for pkg in pkgs:\n        result.add(data.get(pkg, pkg))\n    return sorted(result, key=(lambda s: s.lower()))", "docstring": "Get PyPI package names from a list of imports.\n\nArgs:\npkgs (List[str]): List of import names.\n\nReturns:\nList[str]: The corresponding PyPI package names.", "source": "codesearchnet"}
{"code": "def run(self, *args, **kwargs):\n    self.log.debug('Starting EBSAuditor')\n    data = self.update_data()\n    notices = defaultdict(list)\n    for (account, issues) in data.items():\n        for issue in issues:\n            for recipient in account.contacts:\n                notices[NotificationContact(type=recipient['type'], value=recipient['value'])].append(issue)\n    self.notify(notices)", "docstring": "Main execution point for the auditor\n\nArgs:\n*args:\n**kwargs:\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def delete_user_role(self, user, role):\n        \n        self.project_service.set_auth(self._token_project)\n        self.project_service.delete_user_role(user, role)", "docstring": "Remove role from given user.\n\nArgs:\nuser (string): User name.\nrole (string): Role to remove.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def _parse_name(self, name):\n    if not isinstance(name, str):\n        raise TypeError(f\"'name' must be a string, such as 'mixed_float16'. Received: name={name} (of type {type(name)})\")\n    if name == 'mixed_float16':\n        return ('float16', 'float32')\n    elif name == 'mixed_bfloat16':\n        return ('bfloat16', 'float32')\n    try:\n        dtype = backend.standardize_dtype(name)\n        return (dtype, dtype)\n    except ValueError:\n        raise ValueError(f\"Cannot convert '{name}' to a mixed precision DTypePolicy. Valid policies include 'mixed_float16', 'mixed_bfloat16', and the name of any float dtype such as 'float32'.\")", "docstring": "Parses a `DTypePolicy` name into a compute and variable dtype.\n\nArgs:\nname: The name of the policy.\n\nReturns:\nThe `(compute_dtype, variable_dtype)` pair.", "source": "github-repos"}
{"code": "def num_accelerators(self, task_type=None, task_id=None, config_proto=None):\n    master = self.master(task_type, task_id)\n    devices = get_accelerator_devices(master, config_proto)\n    mapping = collections.defaultdict(int)\n    for device in devices:\n        if task_type is not None and task_id is not None:\n            job_path = '/job:%s' % task_type\n            task_path = '/task:%s' % task_id\n            if job_path not in device.name or task_path not in device.name:\n                continue\n        mapping[device.device_type] += 1\n    return mapping", "docstring": "Returns the number of accelerator cores per worker.\n\nThis returns the number of accelerator cores (such as GPUs and TPUs)\navailable per worker.\n\nOptionally, we allow callers to specify the task_type, and task_id, for\nif they want to target a specific TensorFlow task to query\nthe number of accelerators. This is to support heterogenous environments,\nwhere the number of accelerators cores per host is different.\n\nArgs:\ntask_type: (Optional) The type of the TensorFlow task of the machine we\nwant to query.\ntask_id: (Optional) The index of the TensorFlow task of the machine we\nwant to query.\nconfig_proto: (Optional) Configuration for starting a new session to\nquery how many accelerator cores it has.\n\nReturns:\nA map of accelerator types to number of cores.", "source": "github-repos"}
{"code": "def Process(self, parser_mediator, root_item=None, **kwargs):\n    \n    \n    super(AutomaticDestinationsOLECFPlugin, self).Process(\n        parser_mediator, **kwargs)\n\n    if not root_item:\n      raise ValueError('Root item not set.')\n\n    for item in root_item.sub_items:\n      if item.name == 'DestList':\n        self.ParseDestList(parser_mediator, item)\n\n      elif self._RE_LNK_ITEM_NAME.match(item.name):\n        display_name = parser_mediator.GetDisplayName()\n        if display_name:\n          display_name = '{0:s} \n        else:\n          display_name = '\n\n        parser_mediator.AppendToParserChain(self._WINLNK_PARSER)\n        try:\n          item.seek(0, os.SEEK_SET)\n          self._WINLNK_PARSER.ParseFileLNKFile(\n              parser_mediator, item, display_name)\n        finally:\n          parser_mediator.PopFromParserChain()", "docstring": "Parses an OLECF file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nroot_item (Optional[pyolecf.item]): root item of the OLECF file.\n\nRaises:\nValueError: If the root_item is not set.", "source": "juraj-google-style"}
{"code": "def __init__(self, owner, repo_name, token=''):\n        \n        self._github_repository = GitHub(token=token).repository(owner, repo_name)", "docstring": "Build the GitHub API URL which points to the definition of the repository.\n\nArgs:\nowner (str): the owner's GitHub username\nrepo_name (str): the name of the repository\ntoken (str): the GitHub API token\n\nReturns:\ndict: a representation of the repo definition", "source": "juraj-google-style"}
{"code": "def loop(self, timer_interval_secs, target, args=None, kwargs=None):\n    looper = coordinator.LooperThread(self._coord, timer_interval_secs, target=target, args=args, kwargs=kwargs)\n    looper.start()\n    return looper", "docstring": "Start a LooperThread that calls a function periodically.\n\nIf `timer_interval_secs` is None the thread calls `target(*args, **kwargs)`\nrepeatedly.  Otherwise it calls it every `timer_interval_secs`\nseconds.  The thread terminates when a stop is requested.\n\nThe started thread is added to the list of threads managed by the supervisor\nso it does not need to be passed to the `stop()` method.\n\nArgs:\ntimer_interval_secs: Number. Time boundaries at which to call `target`.\ntarget: A callable object.\nargs: Optional arguments to pass to `target` when calling it.\nkwargs: Optional keyword arguments to pass to `target` when calling it.\n\nReturns:\nThe started thread.", "source": "github-repos"}
{"code": "def available_cpu_count() -> int:\n    try:\n        match = re.search('(?m)^Cpus_allowed:\\\\s*(.*)$', open('/proc/self/status').read())\n        if match:\n            res = bin(int(match.group(1).replace(',', ''), 16)).count('1')\n            if (res > 0):\n                return res\n    except IOError:\n        LOG.debug('Could not get the number of allowed CPUs')\n    try:\n        import psutil\n        return psutil.cpu_count()\n    except (ImportError, AttributeError):\n        LOG.debug('Could not get the number of allowed CPUs')\n    try:\n        res = int(os.sysconf('SC_NPROCESSORS_ONLN'))\n        if (res > 0):\n            return res\n    except (AttributeError, ValueError):\n        LOG.debug('Could not get the number of allowed CPUs')\n    try:\n        res = open('/proc/cpuinfo').read().count('processor\\t:')\n        if (res > 0):\n            return res\n    except IOError:\n        LOG.debug('Could not get the number of allowed CPUs')\n    raise Exception('Can not determine number of CPUs on this system')", "docstring": "Get the number of available CPUs.\n\nNumber of available virtual or physical CPUs on this system, i.e.\nuser/real as output by time(1) when called with an optimally scaling\nuserspace-only program.\n\nReturns:\nNumber of avaialable CPUs.", "source": "codesearchnet"}
{"code": "def get_optimizer_experimental_options():\n    return context.context().get_optimizer_experimental_options()", "docstring": "Get experimental optimizer options.\n\nRefer to tf.config.optimizer.set_experimental_options for a list of current\noptions.\n\nNote that optimizations are only applied in graph mode, (within tf.function).\nIn addition, as these are experimental options, the list is subject to change.\n\nReturns:\nDictionary of configured experimental optimizer options", "source": "github-repos"}
{"code": "def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs):\n    for entry in top_level:\n        datetime_value = entry.get('date', None)\n        package_identifiers = entry.get('packageIdentifiers', [])\n        if ((not datetime_value) or (not package_identifiers)):\n            continue\n        display_name = entry.get('displayName', '<UNKNOWN>')\n        display_version = entry.get('displayVersion', '<DISPLAY_VERSION>')\n        process_name = entry.get('processName', '<PROCESS_NAME>')\n        package_identifiers = ', '.join(package_identifiers)\n        event_data = plist_event.PlistTimeEventData()\n        event_data.desc = 'Installation of [{0:s} {1:s}] using [{2:s}]. Packages: {3:s}.'.format(display_name, display_version, process_name, package_identifiers)\n        event_data.key = ''\n        event_data.root = '/item'\n        event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts relevant install history entries.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ntop_level (dict[str, object]): plist top-level key.", "source": "codesearchnet"}
{"code": "def word_matches(s1, s2, n=3):\n    \n    return __matches(s1, s2, word_ngrams, n=n)", "docstring": "Word-level n-grams that match between two strings\n\nArgs:\ns1: a string\ns2: another string\nn: an int for the n in n-gram\n\nReturns:\nset: the n-grams found in both strings", "source": "juraj-google-style"}
{"code": "def all_tokens(self, delimiter=' ', label_list_ids=None):\n        \n        tokens = set()\n\n        for label_list in self.label_lists.values():\n            if label_list_ids is None or label_list.idx in label_list_ids:\n                tokens = tokens.union(label_list.all_tokens(delimiter=delimiter))\n\n        return tokens", "docstring": "Return a list of all tokens occurring in\none of the labels in the label-lists.\n\nArgs:\ndelimiter (str): The delimiter used to split labels into tokens\n(see :meth:`audiomate.annotations.Label.tokenized`).\nlabel_list_ids (list): If not None, only labels from label-lists with\nan idx contained in this list are considered.\n\nReturns:\n:class:`set`: A set of distinct tokens.", "source": "juraj-google-style"}
{"code": "def add_node(self, binary_descriptor):\n    try:\n        node_string = parse_binary_descriptor(binary_descriptor)\n    except:\n        self._logger.exception('Error parsing binary node descriptor: %s', binary_descriptor)\n        return _pack_sgerror(SensorGraphError.INVALID_NODE_STREAM)\n    try:\n        self.graph.add_node(node_string)\n    except NodeConnectionError:\n        return _pack_sgerror(SensorGraphError.STREAM_NOT_IN_USE)\n    except ProcessingFunctionError:\n        return _pack_sgerror(SensorGraphError.INVALID_PROCESSING_FUNCTION)\n    except ResourceUsageError:\n        return _pack_sgerror(SensorGraphError.NO_NODE_SPACE_AVAILABLE)\n    return Error.NO_ERROR", "docstring": "Add a node to the sensor_graph using a binary node descriptor.\n\nArgs:\nbinary_descriptor (bytes): An encoded binary node descriptor.\n\nReturns:\nint: A packed error code.", "source": "codesearchnet"}
{"code": "def splitGenoSlidingWindow(pos,out_file,size=5e4,step=None):\n    \n    if step is None:    step = 0.5*size\n    chroms = SP.unique(pos[:,0])\n\n    RV = []\n    wnd_i = 0\n    wnd_file = csv.writer(open(out_file,'w'),delimiter='\\t')\n    nSnps = [] \n    for chrom_i in chroms:\n        Ichrom = pos[:,0]==chrom_i\n        idx_chrom_start = SP.where(Ichrom)[0][0]\n        pos_chr = pos[Ichrom,1]\n        start = pos_chr.min()\n        pos_chr_max = pos_chr.max()\n        while 1:\n            if start>pos_chr_max: break\n            end = start+size\n            Ir = (pos_chr>=start)*(pos_chr<end)\n            _nSnps = Ir.sum()\n            if _nSnps>0:\n                idx_wnd_start = idx_chrom_start+SP.where(Ir)[0][0]\n                nSnps.append(_nSnps)\n                line = SP.array([wnd_i,chrom_i,start,end,idx_wnd_start,_nSnps],dtype=int)\n                wnd_file.writerow(line)\n                wnd_i+=1\n            start += step\n    nSnps = SP.array(nSnps)\n    return wnd_i,nSnps", "docstring": "split into windows using a slide criterion\nArgs:\nsize:       window size\nstep:       moving step (default: 0.5*size)\nReturns:\nwnd_i:      number of windows\nnSnps:      vector of per-window number of SNPs", "source": "juraj-google-style"}
{"code": "def _create_and_save_state(cls, mapreduce_spec, _app):\n    state = model.MapreduceState.create_new(mapreduce_spec.mapreduce_id)\n    state.mapreduce_spec = mapreduce_spec\n    state.active = True\n    state.active_shards = 0\n    if _app:\n        state.app_id = _app\n    config = util.create_datastore_write_config(mapreduce_spec)\n    state.put(config=config)\n    return state", "docstring": "Save mapreduce state to datastore.\n\nSave state to datastore so that UI can see it immediately.\n\nArgs:\nmapreduce_spec: model.MapreduceSpec,\n_app: app id if specified. None otherwise.\n\nReturns:\nThe saved Mapreduce state.", "source": "codesearchnet"}
{"code": "def get(self, username=None, password=None, headers={}):\n    if all((username, password)):\n        return BasicAuth(username, password, headers)\n    elif (not any((username, password))):\n        return AnonymousAuth(headers)\n    else:\n        if (username is None):\n            data = ('username', username)\n        else:\n            data = ('Password', password)\n        msg = (\"%s must have a value (instead of '%s')\" % (data[0], data[1]))\n        raise ValueError(msg)", "docstring": "Factory method to get the correct AuthInfo object.\n\nThe returned value depends on the arguments given. In case the\nusername and password don't have a value (ie evaluate to False),\nreturn an object for anonymous access. Else, return an auth\nobject that supports basic authentication.\n\nArgs:\n`username`: The username of the user.\n`password`: The password of the user.\n`headers`: Custom headers to be sent to each request.\nRaises:\nValueError in case one of the two arguments evaluates to False,\n(such as having the None value).", "source": "codesearchnet"}
{"code": "def setup(config_root=''):\n    \n    config = _load_config(root=config_root)\n\n    logging_config = config.get('core', {}).get('logging', {})\n    log_level = logging_config.get('level', 'INFO').upper()\n    log_handlers = logging_config.get('handlers') or ['syslog']\n\n    ulogger.setup_logging(\n        progname='gordon-janitor', level=log_level, handlers=log_handlers)\n\n    return config", "docstring": "Service configuration and logging setup.\n\nConfiguration defined in ``gordon-janitor-user.toml`` will overwrite\n``gordon-janitor.toml``.\n\nArgs:\nconfig_root (str): where configuration should load from,\ndefaults to current working directory.\nReturns:\nA dict for Gordon service configuration", "source": "juraj-google-style"}
{"code": "def spec_like(self, tree: Tree[Array], *, ignore_other: bool=True) -> Tree[enp.ArraySpec]:\n\n    def _to_spec_array(array):\n        if not enp.ArraySpec.is_array(array):\n            if ignore_other:\n                return array\n            else:\n                raise TypeError(f'Unknown array type: {type(array)}')\n        else:\n            return enp.ArraySpec.from_array(array)\n    return self.backend.map(_to_spec_array, tree)", "docstring": "Inspect a tree of array, works with any array type.\n\nExample:\n\n```python\nmodel = MyModel()\nvariables = model.init(jax.random.PRNGKey(0), x)\n\n# Inspect the `variables` tree structures\nprint(etree.spec_like(variables))\n```\n\nArgs:\ntree: The tree of array\nignore_other: If `True`, non-array are forwarded as-is.\n\nReturns:\nThe tree of `enp.ArraySpec`.", "source": "github-repos"}
{"code": "def date_added(self, date_added):\n        \n        date_added = self._utils.format_datetime(date_added, date_format='%Y-%m-%dT%H:%M:%SZ')\n\n        self._data['dateAdded'] = date_added\n        request = self._base_request\n        request['dateAdded'] = date_added\n        return self._tc_requests.update(request, owner=self.owner)", "docstring": "Updates the security labels date_added\n\nArgs:\ndate_added: Converted to %Y-%m-%dT%H:%M:%SZ date format", "source": "juraj-google-style"}
{"code": "def _restore_path(table):\n    name = None\n    splited = table.split('___')\n    path = splited[0]\n    if (len(splited) == 2):\n        name = splited[1]\n    path = path.replace('__', os.path.sep)\n    path += '.csv'\n    return (path, name)", "docstring": "Restore resource's path and name from storage's table.\n\nArgs:\ntable (str): table name\n\nReturns:\n(str, str): resource path and name", "source": "codesearchnet"}
{"code": "def HelpText(component, trace=None, verbose=False):\n    info = inspectutils.Info(component)\n    actions_grouped_by_kind = _GetActionsGroupedByKind(component, verbose=verbose)\n    spec = inspectutils.GetFullArgSpec(component)\n    metadata = decorators.GetMetadata(component)\n    name_section = _NameSection(component, info, trace=trace, verbose=verbose)\n    synopsis_section = _SynopsisSection(component, actions_grouped_by_kind, spec, metadata, trace=trace)\n    description_section = _DescriptionSection(component, info)\n    if callable(component):\n        args_and_flags_sections, notes_sections = _ArgsAndFlagsSections(info, spec, metadata)\n    else:\n        args_and_flags_sections = []\n        notes_sections = []\n    usage_details_sections = _UsageDetailsSections(component, actions_grouped_by_kind)\n    sections = [name_section, synopsis_section, description_section] + args_and_flags_sections + usage_details_sections + notes_sections\n    return '\\n\\n'.join((_CreateOutputSection(*section) for section in sections if section is not None))", "docstring": "Gets the help string for the current component, suitable for a help screen.\n\nArgs:\ncomponent: The component to construct the help string for.\ntrace: The Fire trace of the command so far. The command executed so far\ncan be extracted from this trace.\nverbose: Whether to include private members in the help screen.\n\nReturns:\nThe full help screen as a string.", "source": "github-repos"}
{"code": "def check_hours(tickers, tz_exch, tz_loc=DEFAULT_TZ) -> pd.DataFrame:\n    cols = ['Trading_Day_Start_Time_EOD', 'Trading_Day_End_Time_EOD']\n    (con, _) = create_connection()\n    hours = con.ref(tickers=tickers, flds=cols)\n    cur_dt = pd.Timestamp('today').strftime('%Y-%m-%d ')\n    hours.loc[(:, 'local')] = hours.value.astype(str).str[:(- 3)]\n    hours.loc[(:, 'exch')] = pd.DatetimeIndex((cur_dt + hours.value.astype(str))).tz_localize(tz_loc).tz_convert(tz_exch).strftime('%H:%M')\n    hours = pd.concat([hours.set_index(['ticker', 'field']).exch.unstack().loc[(:, cols)], hours.set_index(['ticker', 'field']).local.unstack().loc[(:, cols)]], axis=1)\n    hours.columns = ['Exch_Start', 'Exch_End', 'Local_Start', 'Local_End']\n    return hours", "docstring": "Check exchange hours vs local hours\n\nArgs:\ntickers: list of tickers\ntz_exch: exchange timezone\ntz_loc: local timezone\n\nReturns:\nLocal and exchange hours", "source": "codesearchnet"}
{"code": "def l1_l2(l1=0.01, l2=0.01):\n    return L1L2(l1=l1, l2=l2)", "docstring": "Create a regularizer that applies both L1 and L2 penalties.\n\nThe L1 regularization penalty is computed as:\n`loss = l1 * reduce_sum(abs(x))`\n\nThe L2 regularization penalty is computed as:\n`loss = l2 * reduce_sum(square(x))`\n\nArgs:\nl1: Float; L1 regularization factor.\nl2: Float; L2 regularization factor.\n\nReturns:\nAn L1L2 Regularizer with the given regularization factors.", "source": "github-repos"}
{"code": "def reshape_by_blocks(x, x_shape, memory_block_size):\n    x = tf.reshape(x, [x_shape[0], x_shape[1], (x_shape[2] \n    return x", "docstring": "Reshapes input by splitting its length over blocks of memory_block_size.\n\nArgs:\nx: a Tensor with shape [batch, heads, length, depth]\nx_shape: tf.TensorShape of x.\nmemory_block_size: Integer which divides length.\n\nReturns:\nTensor with shape\n[batch, heads, length // memory_block_size, memory_block_size, depth].", "source": "codesearchnet"}
{"code": "def __init__(self, session, proxy_class):\n        \n        assert isinstance(proxy_class, type)\n        self.session = session\n        self.proxy_class = proxy_class", "docstring": "Instantiate an API Authentication Proxy.\n\nArgs:\nauth (requests.Session): Authenticated requests Session.\nproxy_class (type): A class implementing the ``BaseApi``\ninterface.", "source": "juraj-google-style"}
{"code": "def get(cls, blob_key, **ctx_options):\n    fut = cls.get_async(blob_key, **ctx_options)\n    return fut.get_result()", "docstring": "Retrieve a BlobInfo by key.\n\nArgs:\nblob_key: A blob key.  This may be a str, unicode or BlobKey instance.\n**ctx_options: Context options for Model().get_by_id().\n\nReturns:\nA BlobInfo entity associated with the provided key,  If there was\nno such entity, returns None.", "source": "codesearchnet"}
{"code": "def _set_value(self, slot_record):\n    if (slot_record.status == _SlotRecord.FILLED):\n        self.filled = True\n        self._filler_pipeline_key = _SlotRecord.filler.get_value_for_datastore(slot_record)\n        self._fill_datetime = slot_record.fill_time\n        self._value = slot_record.value", "docstring": "Sets the value of this slot based on its corresponding _SlotRecord.\n\nDoes nothing if the slot has not yet been filled.\n\nArgs:\nslot_record: The _SlotRecord containing this Slot's value.", "source": "codesearchnet"}
{"code": "def block_view(self, mri):\n        \n        \n        controller = self.get_controller(mri)\n        block = controller.block_view(weakref.proxy(self))\n        return block", "docstring": "Get a view of a block\n\nArgs:\nmri: The mri of the controller hosting the block\n\nReturns:\nBlock: The block we control", "source": "juraj-google-style"}
{"code": "def _parse_parameters(val_type, val):\n    if (val_type == 'logical'):\n        return (val == 'T')\n    elif (val_type == 'int'):\n        return int(val)\n    elif (val_type == 'string'):\n        return val.strip()\n    else:\n        return float(val)", "docstring": "Helper function to convert a Vasprun parameter into the proper type.\nBoolean, int and float types are converted.\n\nArgs:\nval_type: Value type parsed from vasprun.xml.\nval: Actual string value parsed for vasprun.xml.", "source": "codesearchnet"}
{"code": "def GetAutomountMapMetadata(self, conf, epoch=False):\n    map_name = config.MAP_AUTOMOUNT\n    cache_options = conf.options[map_name].cache\n    value_list = []\n    values = self.GetSingleMapMetadata(map_name, conf, automount_mountpoint=None, epoch=epoch)\n    value_list.extend(values)\n    cache = cache_factory.Create(cache_options, config.MAP_AUTOMOUNT, automount_mountpoint=None)\n    master_map = cache.GetMap()\n    for map_entry in master_map:\n        values = self.GetSingleMapMetadata(map_name, conf, automount_mountpoint=map_entry.key, epoch=epoch)\n        value_list.extend(values)\n    return value_list", "docstring": "Return status of automount master map and all listed automount maps.\n\nWe retrieve the automount master map, and build a list of dicts which\nare used by the caller to print the status output.\n\nArgs:\nconf: a config.Config object\nepoch: return times as an integer epoch (time_t) instead of a\nhuman readable name\n\nReturns:\na list of dicts of metadata key/value pairs", "source": "github-repos"}
{"code": "def __init__(self, namespace=None):\n        \n        assert namespace != DEFAULT_REQUEST_CACHE_NAMESPACE,\\\n            'Optional namespace can not be {}.'.format(DEFAULT_REQUEST_CACHE_NAMESPACE)\n        self.namespace = namespace or DEFAULT_REQUEST_CACHE_NAMESPACE", "docstring": "Creates a request cache with the provided namespace.\n\nArgs:\nnamespace (string): (optional) uses 'default' if not provided.", "source": "juraj-google-style"}
{"code": "def _get_local_folder(self, root=None):\n        \n        if root is None:\n            root = Path()\n        for folders in ['.'], [self.user, self.napp]:\n            kytos_json = root / Path(*folders) / 'kytos.json'\n            if kytos_json.exists():\n                with kytos_json.open() as file_descriptor:\n                    meta = json.load(file_descriptor)\n                    \n                    \n                    username = meta.get('username', meta.get('author'))\n                    if username == self.user and meta.get('name') == self.napp:\n                        return kytos_json.parent\n        raise FileNotFoundError('kytos.json not found.')", "docstring": "Return local NApp root folder.\n\nSearch for kytos.json in _./_ folder and _./user/napp_.\n\nArgs:\nroot (pathlib.Path): Where to begin searching.\n\nReturn:\npathlib.Path: NApp root folder.\n\nRaises:\nFileNotFoundError: If there is no such local NApp.", "source": "juraj-google-style"}
{"code": "def _shape_union(shapes):\n  \n  return Shape(sorted(list(set(sum([s.dims for s in shapes], [])))))", "docstring": "A shape containing the union of all dimensions in the input shapes.\n\nArgs:\nshapes: a list of Shapes\n\nReturns:\na Shape", "source": "juraj-google-style"}
{"code": "def get_conversion_factor(self, new_unit):\n    (uo_base, ofactor) = self.as_base_units\n    (un_base, nfactor) = Unit(new_unit).as_base_units\n    units_new = sorted(un_base.items(), key=(lambda d: _UNAME2UTYPE[d[0]]))\n    units_old = sorted(uo_base.items(), key=(lambda d: _UNAME2UTYPE[d[0]]))\n    factor = (ofactor / nfactor)\n    for (uo, un) in zip(units_old, units_new):\n        if (uo[1] != un[1]):\n            raise UnitError(('Units %s and %s are not compatible!' % (uo, un)))\n        c = ALL_UNITS[_UNAME2UTYPE[uo[0]]]\n        factor *= ((c[uo[0]] / c[un[0]]) ** uo[1])\n    return factor", "docstring": "Returns a conversion factor between this unit and a new unit.\nCompound units are supported, but must have the same powers in each\nunit type.\n\nArgs:\nnew_unit: The new unit.", "source": "codesearchnet"}
{"code": "def convert(self):\n    graph_def, input_tensors, output_tensors = self._load_saved_model(self.saved_model_dir, self._saved_model_tags)\n    if self.saved_model_dir is None or not self.experimental_new_converter:\n        graph_def, _, _, _ = _freeze_saved_model(self.saved_model_dir, None, None, None, self._saved_model_tags, _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)\n        self.saved_model_dir = None\n        return super(TFLiteSavedModelConverterV2, self).convert(graph_def, input_tensors, output_tensors)\n    trackable_obj = _load(self.saved_model_dir, self._saved_model_tags)\n    if trackable_obj is None:\n        self._debug_info = _get_debug_info(_build_debug_info_func(self._funcs[0].graph), graph_def)\n    else:\n        self._debug_info = _get_debug_info(_convert_debug_info_func(trackable_obj.graph_debug_info), graph_def)\n    del trackable_obj\n    gc.collect()\n    return self._convert_from_saved_model(graph_def)", "docstring": "Converts a TensorFlow GraphDef based on instance variables.\n\nReturns:\nThe converted data in serialized format.\n\nRaises:\nValueError:\nNo concrete function is specified.\nMultiple concrete functions are specified.\nInput shape is not specified.\nInvalid quantization parameters.", "source": "github-repos"}
{"code": "def squeeze(name, x, factor=2, reverse=True):\n  \n  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n    shape = common_layers.shape_list(x)\n    if factor == 1:\n      return x\n    height = int(shape[1])\n    width = int(shape[2])\n    n_channels = int(shape[3])\n\n    if not reverse:\n      assert height % factor == 0 and width % factor == 0\n      x = tf.reshape(x, [-1, height\n                         width\n      x = tf.transpose(x, [0, 1, 3, 5, 2, 4])\n      x = tf.reshape(x, [-1, height\n                         factor, n_channels*factor*factor])\n    else:\n      x = tf.reshape(\n          x, (-1, height, width, int(n_channels/factor**2), factor, factor))\n      x = tf.transpose(x, [0, 1, 4, 2, 5, 3])\n      x = tf.reshape(x, (-1, int(height*factor),\n                         int(width*factor), int(n_channels/factor**2)))\n    return x", "docstring": "Block-wise spatial squeezing of x to increase the number of channels.\n\nArgs:\nname: Used for variable scoping.\nx: 4-D Tensor of shape (batch_size X H X W X C)\nfactor: Factor by which the spatial dimensions should be squeezed.\nreverse: Squueze or unsqueeze operation.\n\nReturns:\nx: 4-D Tensor of shape (batch_size X (H//factor) X (W//factor) X\n(cXfactor^2). If reverse is True, then it is factor = (1 / factor)", "source": "juraj-google-style"}
{"code": "def stage_tc_create_attribute(self, attribute_type, attribute_value, resource):\n        \n        attribute_data = {'type': str(attribute_type), 'value': str(attribute_value)}\n        \n        if attribute_type in ['Description', 'Source']:\n            attribute_data['displayed'] = True\n\n        attrib_resource = resource.attributes()\n        attrib_resource.body = json.dumps(attribute_data)\n        attrib_resource.http_method = 'POST'\n\n        \n        a_response = attrib_resource.request()\n        if a_response.get('status') != 'Success':\n            self.log.warning(\n                '[stage] Failed adding attribute type \"{}\":\"{}\" ({}).'.format(\n                    attribute_type, attribute_value, a_response.get('response').text\n                )\n            )", "docstring": "Add an attribute to a resource.\n\nArgs:\nattribute_type (str): The attribute type (e.g., Description).\nattribute_value (str): The attribute value.\nresource (obj): An instance of tcex resource class.", "source": "juraj-google-style"}
{"code": "def removedirs(self, target_directory):\n        \n        target_directory = self.filesystem.absnormpath(target_directory)\n        directory = self.filesystem.confirmdir(target_directory)\n        if directory.contents:\n            self.filesystem.raise_os_error(\n                errno.ENOTEMPTY, self.path.basename(target_directory))\n        else:\n            self.rmdir(target_directory)\n        head, tail = self.path.split(target_directory)\n        if not tail:\n            head, tail = self.path.split(head)\n        while head and tail:\n            head_dir = self.filesystem.confirmdir(head)\n            if head_dir.contents:\n                break\n            \n            self.filesystem.rmdir(head, allow_symlink=True)\n            head, tail = self.path.split(head)", "docstring": "Remove a leaf fake directory and all empty intermediate ones.\n\nArgs:\ntarget_directory: the directory to be removed.\n\nRaises:\nOSError: if target_directory does not exist or is not a directory.\nOSError: if target_directory is not empty.", "source": "juraj-google-style"}
{"code": "def defaultStorable(self, python_type=None, storable_type=None, version=None, **kwargs):\n        \n        if python_type is None:\n            python_type = lookup_type(storable_type)\n        if self.verbose:\n            print('generating storable instance for type: {}'.format(python_type))\n        self.storables.registerStorable(default_storable(python_type, \\\n                version=version, storable_type=storable_type), **kwargs)\n        return self.byPythonType(python_type, True).asVersion(version)", "docstring": "Generate a default storable instance.\n\nArguments:\n\npython_type (type): Python type of the object.\n\nstorable_type (str): storable type name.\n\nversion (tuple): version number of the storable handler.\n\nReturns:\n\nStorableHandler: storable instance.\n\nExtra keyword arguments are passed to :meth:`registerStorable`.", "source": "juraj-google-style"}
{"code": "def validate(self, data):\n        \n        try:\n            self._validator.validate(data)\n        except jsonschema.ValidationError as e:\n            six.raise_from(ValidationError.create_from(e), e)", "docstring": "Validates a data dict against this schema.\n\nArgs:\ndata (dict): The data to be validated.\n\nRaises:\nValidationError: If the data is invalid.", "source": "juraj-google-style"}
{"code": "def PrepareForExport(module_name, ast, loader):\n    src = pytd_utils.Print(ast)\n    return SourceToExportableAst(module_name, src, loader)", "docstring": "Prepare an ast as if it was parsed and loaded.\n\nExternal dependencies will not be resolved, as the ast generated by this\nmethod is supposed to be exported.\n\nArgs:\nmodule_name: The module_name as a string for the returned ast.\nast: pytd.TypeDeclUnit, is only used if src is None.\nloader: A load_pytd.Loader instance.\n\nReturns:\nA pytd.TypeDeclUnit representing the supplied AST as it would look after\nbeing written to a file and parsed.", "source": "github-repos"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(PollRequestPayload, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = utils.BytearrayStream(input_stream.read(self.length))\n    if self.is_tag_next(enums.Tags.ASYNCHRONOUS_CORRELATION_VALUE, local_stream):\n        self._asynchronous_correlation_value = primitives.ByteString(tag=enums.Tags.ASYNCHRONOUS_CORRELATION_VALUE)\n        self._asynchronous_correlation_value.read(local_stream, kmip_version=kmip_version)\n    self.is_oversized(local_stream)", "docstring": "Read the data encoding the Poll request payload and decode it into\nits constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the data attribute is missing from the\nencoded payload.", "source": "codesearchnet"}
{"code": "def write(self, path=None, *args, **kwargs):\n    if (path is None):\n        print(self.format(*args, **kwargs))\n    else:\n        with io.open(path, 'w', newline='') as f:\n            f.write(self.format(*args, **kwargs))", "docstring": "Perform formatting and write the formatted string to a file or stdout.\n\nOptional arguments can be used to format the editor's contents. If no\nfile path is given, prints to standard output.\n\nArgs:\npath (str): Full file path (default None, prints to stdout)\n*args: Positional arguments to format the editor with\n**kwargs: Keyword arguments to format the editor with", "source": "codesearchnet"}
{"code": "def enable(self, timeout=0):\n    self.client.api.enable_plugin(self.name, timeout)\n    self.reload()", "docstring": "Enable the plugin.\n\nArgs:\ntimeout (int): Timeout in seconds. Default: 0\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def __convertLongToString(self, iValue):\n    string = ''\n    strValue = str(hex(iValue))\n    string = strValue.lstrip('0x')\n    string = string.rstrip('L')\n    return string", "docstring": "convert a long hex integer to string\nremove '0x' and 'L' return string\n\nArgs:\niValue: long integer in hex format\n\nReturns:\nstring of this long integer without \"0x\" and \"L\"", "source": "codesearchnet"}
{"code": "def convert_datetime_array(array):\n    if (not isinstance(array, np.ndarray)):\n        return array\n    try:\n        dt2001 = np.datetime64('2001')\n        legacy_datetime64 = (dt2001.astype('int64') == dt2001.astype('datetime64[ms]').astype('int64'))\n    except AttributeError as e:\n        if (e.args == (\"'module' object has no attribute 'datetime64'\",)):\n            if ('PyPy' in sys.version):\n                legacy_datetime64 = False\n                pass\n            else:\n                raise e\n        else:\n            raise e\n    if (array.dtype.kind == 'M'):\n        if legacy_datetime64:\n            if (array.dtype == np.dtype('datetime64[ns]')):\n                array = (array.astype('int64') / (10 ** 6.0))\n        else:\n            array = (array.astype('datetime64[us]').astype('int64') / 1000.0)\n    elif (array.dtype.kind == 'm'):\n        array = (array.astype('timedelta64[us]').astype('int64') / 1000.0)\n    return array", "docstring": "Convert NumPy datetime arrays to arrays to milliseconds since epoch.\n\nArgs:\narray : (obj)\nA NumPy array of datetime to convert\n\nIf the value passed in is not a NumPy array, it will be returned as-is.\n\nReturns:\narray", "source": "codesearchnet"}
{"code": "def markdown_compatible(text: str) -> str:\n    text = re.sub('^\\\\(([\\\\d.]+[a-zA-Z]?)\\\\) \\\\\\\\\\\\[(.+?)\\\\\\\\\\\\]$', '\\\\[\\\\2 \\\\\\\\tag{\\\\1}\\\\]', text, flags=re.M)\n    text = re.sub('^\\\\\\\\\\\\[(.+?)\\\\\\\\\\\\] \\\\(([\\\\d.]+[a-zA-Z]?)\\\\)$', '\\\\[\\\\1 \\\\\\\\tag{\\\\2}\\\\]', text, flags=re.M)\n    text = re.sub('^\\\\\\\\\\\\[(.+?)\\\\\\\\\\\\] \\\\(([\\\\d.]+[a-zA-Z]?)\\\\) (\\\\\\\\\\\\[.+?\\\\\\\\\\\\])$', '\\\\[\\\\1 \\\\\\\\tag{\\\\2}\\\\] \\\\3', text, flags=re.M)\n    text = text.replace('\\\\. ', '. ')\n    text = text.replace('\\\\bm{', '\\\\mathbf{').replace('{\\\\\\\\bm ', '\\\\mathbf{')\n    text = re.sub('\\\\\\\\mbox{ ?\\\\\\\\boldmath\\\\$(.*?)\\\\$}', '\\\\\\\\mathbf{\\\\1}', text)\n    text = re.sub('((?:http|ftp|https):\\\\/\\\\/(?:[\\\\w_-]+(?:(?:\\\\.[\\\\w_-]+)+))(?:[\\\\w.,@?^=%&:\\\\/~+\n    text = re.sub('```\\\\s*(.+?)\\\\s*```', '```\\\\n\\\\1\\\\n```', text, flags=re.S)\n    return text", "docstring": "Make text compatible with Markdown formatting.\n\nThis function makes various text formatting adjustments to make it compatible with Markdown.\n\nArgs:\ntext (`str`):\nThe input text to be made Markdown-compatible.\n\nReturns:\n`str`: The Markdown-compatible text.", "source": "github-repos"}
{"code": "def create_metadata(self, resource, keys_vals):\n    self.metadata_service.set_auth(self._token_metadata)\n    self.metadata_service.create(resource, keys_vals)", "docstring": "Associates new key-value pairs with the given resource.\n\nWill attempt to add all key-value pairs even if some fail.\n\nArgs:\nresource (intern.resource.boss.BossResource)\nkeys_vals (dictionary): Collection of key-value pairs to assign to\ngiven resource.\n\nRaises:\nHTTPErrorList on failure.", "source": "codesearchnet"}
{"code": "def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n    size = get_size_dict(size)\n    shortest_edge = min(size['height'], size['width'])\n    output_size = get_resize_output_image_size(image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format)\n    resized_image = resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)\n    return resized_image", "docstring": "Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nsize (`Dict[str, int]`):\nSize of the output image.\nresample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\nResampling filter to use when resiizing the image.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def _column_name_with_class_name(fc):\n    return fc.__class__.__name__ + ':' + fc.name", "docstring": "Returns a unique name for the feature column used during deduping.\n\nWithout this two FeatureColumns that have the same name and where\none wraps the other, such as an IndicatorColumn wrapping a\nSequenceCategoricalColumn, will fail to deserialize because they will have the\nsame name in columns_by_name, causing the wrong column to be returned.\n\nArgs:\nfc: A FeatureColumn.\n\nReturns:\nA unique name as a string.", "source": "github-repos"}
{"code": "def try_listify_dict_with_int_keys(src: Dict[Any, Any], convert_when_sparse: bool=False) -> Tuple[Union[List[Any], Dict[Any, Any]], bool]:\n    if not src:\n        return (src, False)\n    min_key = None\n    max_key = None\n    for key in src.keys():\n        if not isinstance(key, int):\n            return (src, False)\n        if min_key is None or min_key > key:\n            min_key = key\n        if max_key is None or max_key < key:\n            max_key = key\n    if convert_when_sparse or (min_key == 0 and max_key == len(src) - 1):\n        return ([src[key] for key in sorted(src.keys())], True)\n    return (src, False)", "docstring": "Try to convert a dictionary with consequentive integer keys to a list.\n\nArgs:\nsrc: A dict whose keys may be int type and their range form a perfect\nrange(0, N) list unless convert_when_sparse is set to True.\nconvert_when_sparse: When src is a int-key dict, force convert\nit to a list ordered by key, even it's sparse.\n\nReturns:\nconverted list or src unchanged.", "source": "github-repos"}
{"code": "def create_mapping(record, keys):\n    ordered = OrderedDict()\n    field_mappings = []\n    for (key, value) in record.items():\n        ordered[key] = value\n        field_mappings.append({'columnNumber': len(ordered), 'fieldName': key, 'key': (key in keys)})\n    return {'field_mappings': field_mappings, 'data': ordered, 'fields': list(ordered.values())}", "docstring": "Create a field mapping for use in API updates and creates.\n\nArgs:\nrecord (BaseModel): Record that should be mapped.\nkeys (list[str]): Fields that should be mapped as keys.\n\nReturns:\ndict: Dictionary with keys:\n\n* ``field_mappings``: Field mappings as required by API.\n* ``data``: Ordered data dictionary for input record.", "source": "codesearchnet"}
{"code": "def legacy_raw_flush(writer=None, name=None):\n    if writer is None or isinstance(writer, SummaryWriter):\n        return flush(writer, name)\n    else:\n        with ops.device('cpu:0'):\n            return gen_summary_ops.flush_summary_writer(writer, name=name)", "docstring": "Legacy version of flush() that accepts a raw resource tensor for `writer`.\n\nDo not use this function in any new code. Not supported and not part of the\npublic TF APIs.\n\nArgs:\nwriter: The `tf.summary.SummaryWriter` to flush. If None, the current\ndefault writer will be used instead; if there is no current writer, this\nreturns `tf.no_op`. For this legacy version only, also accepts a raw\nresource tensor pointing to the underlying C++ writer resource.\nname: Ignored legacy argument for a name for the operation.\n\nReturns:\nThe created `tf.Operation`.", "source": "github-repos"}
{"code": "def _merge_run_options(self, options, incoming_options):\n    options.trace_level = max(options.trace_level, incoming_options.trace_level)\n    options.timeout_in_ms = max(options.timeout_in_ms, incoming_options.timeout_in_ms)\n    options.inter_op_thread_pool = max(options.inter_op_thread_pool, incoming_options.inter_op_thread_pool)\n    options.output_partition_graphs = max(options.output_partition_graphs, incoming_options.output_partition_graphs)\n    options.debug_options.debug_tensor_watch_opts.extend(incoming_options.debug_options.debug_tensor_watch_opts)\n    options.debug_options.reset_disk_byte_usage = options.debug_options.reset_disk_byte_usage or incoming_options.debug_options.reset_disk_byte_usage\n    options.report_tensor_allocations_upon_oom = options.report_tensor_allocations_upon_oom or incoming_options.report_tensor_allocations_upon_oom", "docstring": "Merge two instances of RunOptions into the first one.\n\nDuring the merger, the numerical fields including trace_level,\ntimeout_in_ms, inter_op_thread_pool are set to the larger one of the two.\nThe boolean value is set to the logical OR of the two.\ndebug_tensor_watch_opts of the original options is extended with that from\nthe incoming one.\n\nArgs:\noptions: The options to merge into.\nincoming_options: The options to be merged into the first argument.", "source": "github-repos"}
{"code": "def GetPrototype(self, descriptor):\n    if (descriptor.full_name not in self._classes):\n        descriptor_name = descriptor.name\n        if (str is bytes):\n            descriptor_name = descriptor.name.encode('ascii', 'ignore')\n        result_class = reflection.GeneratedProtocolMessageType(descriptor_name, (message.Message,), {'DESCRIPTOR': descriptor, '__module__': None})\n        self._classes[descriptor.full_name] = result_class\n        for field in descriptor.fields:\n            if field.message_type:\n                self.GetPrototype(field.message_type)\n        for extension in result_class.DESCRIPTOR.extensions:\n            if (extension.containing_type.full_name not in self._classes):\n                self.GetPrototype(extension.containing_type)\n            extended_class = self._classes[extension.containing_type.full_name]\n            extended_class.RegisterExtension(extension)\n    return self._classes[descriptor.full_name]", "docstring": "Builds a proto2 message class based on the passed in descriptor.\n\nPassing a descriptor with a fully qualified name matching a previous\ninvocation will cause the same class to be returned.\n\nArgs:\ndescriptor: The descriptor to build from.\n\nReturns:\nA class describing the passed in descriptor.", "source": "codesearchnet"}
{"code": "def disaggregate_wind(wind_daily, method='equal', a=None, b=None, t_shift=None):\n    \n    assert method in ('equal', 'cosine', 'random'), 'Invalid method'\n\n    wind_eq = melodist.distribute_equally(wind_daily)\n\n    if method == 'equal':\n        wind_disagg = wind_eq\n    elif method == 'cosine':\n        assert None not in (a, b, t_shift)\n        wind_disagg = _cosine_function(np.array([wind_eq.values, wind_eq.index.hour]), a, b, t_shift)\n    elif method == 'random':\n        wind_disagg = wind_eq * (-np.log(np.random.rand(len(wind_eq))))**0.3\n\n    return wind_disagg", "docstring": "general function for windspeed disaggregation\n\nArgs:\nwind_daily: daily values\nmethod: keyword specifying the disaggregation method to be used\na: parameter a for the cosine function\nb: parameter b for the cosine function\nt_shift: parameter t_shift for the cosine function\n\nReturns:\nDisaggregated hourly values of windspeed.", "source": "juraj-google-style"}
{"code": "def expand_batch_coordinates(bc, length_factor):\n    assert (bc.get_shape().as_list() == [1, None, 1])\n    bc *= tf.constant([([1] * length_factor)])\n    bc = tf.reshape(bc, [1, (- 1), 1])\n    return bc", "docstring": "Duplicate elements of bc by length_factor.\n\nArgs:\nbc (tf.Tensor): int32 tensor of shape [1, length, 1]\nlength_factor (int):\n\nReturns:\ntf.Tensor: of shape [1, length*length_factor, 1] where every elements has\nbeen duplicated length_factor times.", "source": "codesearchnet"}
{"code": "def __init__(self, logger, script_type):\n    \n    self.logger = logger\n    self.script_type = script_type\n    self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)", "docstring": "Constructor.\n\nArgs:\nlogger: logger object, used to write to SysLog and serial port.\nscript_type: string, the metadata script type to run.", "source": "juraj-google-style"}
{"code": "def query_put_bounders(query, partition_column, start, end):\n    where = ' WHERE TMP_TABLE.{0} >= {1} AND TMP_TABLE.{0} <= {2}'.format(partition_column, start, end)\n    query_with_bounders = 'SELECT * FROM ({0}) AS TMP_TABLE {1}'.format(query, where)\n    return query_with_bounders", "docstring": "Put bounders in the query\n\nArgs:\nquery: SQL query string\npartition_column: partition_column name\nstart: lower_bound\nend: upper_bound\n\nReturns:\nQuery with bounders", "source": "codesearchnet"}
{"code": "def goto(self, rules, symbol):\n        \n        return self.closure(\n            {rule.move_dot() for rule in rules\n             if not rule.at_end and rule.rhs[rule.pos] == symbol},\n        )", "docstring": "Computes the next closure for rules based on the symbol we got.\n\nArgs:\nrules - an iterable of DottedRules\nsymbol - a string denoting the symbol we've just seen\n\nReturns: frozenset of DottedRules", "source": "juraj-google-style"}
{"code": "def Optimize(node, deps=None, lossy=False, use_abcs=False, max_union=7, remove_mutable=False, can_do_lookup=True):\n    node = node.Visit(NormalizeGenericSelfTypes())\n    node = node.Visit(RemoveDuplicates())\n    node = node.Visit(SimplifyUnions())\n    node = node.Visit(CombineReturnsAndExceptions())\n    node = node.Visit(CombineContainers())\n    node = node.Visit(SimplifyContainers())\n    if deps:\n        superclasses = deps.Visit(visitors.ExtractSuperClassesByName())\n        superclasses.update(node.Visit(visitors.ExtractSuperClassesByName()))\n        if use_abcs:\n            superclasses.update(abc_hierarchy.GetSuperClasses())\n        hierarchy = SuperClassHierarchy(superclasses)\n        node = node.Visit(SimplifyUnionsWithSuperclasses(hierarchy))\n        if lossy:\n            node = node.Visit(FindCommonSuperClasses(hierarchy))\n    if max_union:\n        node = node.Visit(CollapseLongUnions(max_union))\n    node = node.Visit(AdjustReturnAndConstantGenericType())\n    if remove_mutable:\n        node = node.Visit(AbsorbMutableParameters())\n        node = node.Visit(CombineContainers())\n        node = node.Visit(MergeTypeParameters())\n        node = node.Visit(visitors.AdjustSelf())\n    node = node.Visit(SimplifyContainers())\n    if deps and can_do_lookup:\n        node = visitors.LookupClasses(node, deps, ignore_late_types=True)\n    return node", "docstring": "Optimize a PYTD tree.\n\nTries to shrink a PYTD tree by applying various optimizations.\n\nArguments:\nnode: A pytd node to be optimized. It won't be modified - this function will\nreturn a new node.\ndeps: Definitions of all of the external types in node.\nlossy: Allow optimizations that change the meaning of the pytd.\nuse_abcs: Use abstract base classes to represent unions like e.g.\n\"Union[float, int]\" as \"Real\".\nmax_union: How many types we allow in a union before we simplify it to just\n\"object\".\nremove_mutable: Whether to simplify mutable parameters to normal parameters.\ncan_do_lookup: True: We're either allowed to try to resolve NamedType\ninstances in the AST, or the AST is already resolved. False: Skip any\noptimizations that would require NamedTypes to be resolved.\n\nReturns:\nAn optimized node.", "source": "github-repos"}
{"code": "def hget(self, key):\n        \n        data = self.r.hget(self.hash, key)\n        if data is not None and not isinstance(data, str):\n            data = str(self.r.hget(self.hash, key), 'utf-8')\n        return data", "docstring": "Read data from Redis for the provided key.\n\nArgs:\nkey (string): The key to read in Redis.\n\nReturns:\n(any): The response data from Redis.", "source": "juraj-google-style"}
{"code": "def _eval_indexed_slices(a):\n    if isinstance(a, indexed_slices.IndexedSlices) and context.executing_eagerly():\n        return indexed_slices.IndexedSlicesValue(indices=[x.numpy() for x in a.indices], values=[x.numpy() for x in a.values], dense_shape=a.dense_shape)\n    return a", "docstring": "Converts IndexedSlices to IndexedSlicesValue with numpy indices/values.\n\nWhen eager execution is enabled, converts IndexedSlices\nto IndexedSlicesValue with numpy indices/values.\n\nArgs:\na: any value.\n\nReturns:\nIf a is IndexedSlices and eager execution is enabled, calls numpy() on a's\nfields. Otherwise returns a unchanged.", "source": "github-repos"}
{"code": "def main(raw_args=None):\n    \n\n    if raw_args is None:\n        raw_args = sys.argv[1:]\n\n    parser = build_parser()\n    args = parser.parse_args(raw_args)\n\n    if args.firmware_image is None and args.gdb is None:\n        print(\"You must specify either a firmware image or attach a debugger with --gdb <PORT>\")\n        return 1\n\n    test_args = ['qemu-system-gnuarmeclipse', '-verbose', '-verbose', '-board', 'STM32F0-Discovery',\n                 '-nographic', '-monitor', 'null', '-serial', 'null', '--semihosting-config',\n                 'enable=on,target=native', '-d', 'unimp,guest_errors']\n\n    if args.firmware_image:\n        test_args += ['-image', args.firmware_image]\n\n    if args.gdb:\n        test_args += ['--gdb', 'tcp::%d' % args.gdb]\n\n    proc = subprocess.Popen(test_args, stdout=sys.stdout, stderr=sys.stderr)\n\n    try:\n        proc.communicate()\n    except KeyboardInterrupt:\n        proc.terminate()\n\n    return 0", "docstring": "Run the iotile-emulate script.\n\nArgs:\nraw_args (list): Optional list of commmand line arguments.  If not\npassed these are pulled from sys.argv.", "source": "juraj-google-style"}
{"code": "def generator_next_fn(iterator_id_t):\n    if output_types and output_shapes:\n        flattened_types = [dtypes.as_dtype(dt) for dt in nest.flatten(output_types)]\n        flattened_shapes = nest.flatten(output_shapes)\n\n        def generator_py_func(iterator_id):\n            \n            values = next(generator_state.get_iterator(iterator_id))\n            try:\n                flattened_values = nest.flatten_up_to(output_types, values)\n            except (TypeError, ValueError) as e:\n                raise TypeError(f'`generator` yielded an element that did not match the expected structure. The expected structure was {output_types}, but the yielded element was {values}.') from e\n            ret_arrays = []\n            for ret, dtype in zip(flattened_values, flattened_types):\n                try:\n                    ret_arrays.append(script_ops.FuncRegistry._convert(ret, dtype=dtype.as_numpy_dtype))\n                except (TypeError, ValueError) as e:\n                    raise TypeError(f'`generator` yielded an element that could not be converted to the expected type. The expected type was {dtype.name}, but the yielded element was {ret}.') from e\n            for ret_array, expected_dtype, expected_shape in zip(ret_arrays, flattened_types, flattened_shapes):\n                if ret_array.dtype != expected_dtype.as_numpy_dtype:\n                    raise TypeError(f'`generator` yielded an element of type {ret_array.dtype} where an element of type {expected_dtype.as_numpy_dtype} was expected.')\n                if not expected_shape.is_compatible_with(ret_array.shape):\n                    raise TypeError(f'`generator` yielded an element of shape {ret_array.shape} where an element of shape {expected_shape} was expected.')\n            return ret_arrays\n        flat_values = script_ops.numpy_function(generator_py_func, [iterator_id_t], flattened_types)\n        if not isinstance(flat_values, (list, tuple)):\n            flat_values = [flat_values]\n        if output_shapes is not None:\n            for ret_t, shape in zip(flat_values, flattened_shapes):\n                ret_t.set_shape(shape)\n        return nest.pack_sequence_as(output_types, flat_values)\n    else:\n        flat_output_types = structure.get_flat_tensor_types(output_signature)\n\n        def generator_py_func(iterator_id):\n            \n            values = next(generator_state.get_iterator(iterator_id.numpy()))\n            try:\n                values = structure.normalize_element(values, output_signature)\n            except (TypeError, ValueError) as e:\n                raise TypeError(f'`generator` yielded an element that did not match the expected structure. The expected structure was {output_signature}, but the yielded element was {values}.') from e\n            values_spec = structure.type_spec_from_value(values)\n            if not structure.are_compatible(values_spec, output_signature):\n                raise TypeError(f'`generator` yielded an element of {values_spec} where an element of {output_signature} was expected.')\n            return structure.to_tensor_list(output_signature, values)\n        return script_ops.eager_py_func(generator_py_func, inp=[iterator_id_t], Tout=flat_output_types)", "docstring": "Generates the next element from iterator with ID `iterator_id_t`.\n\nWe map this function across an infinite repetition of the\n`iterator_id_t`, and raise `StopIteration` to terminate the iteration.\n\nArgs:\niterator_id_t: A `tf.int64` tensor whose value uniquely identifies the\niterator in `generator_state` from which to generate an element.\n\nReturns:\nThe next element to generate from the iterator.", "source": "github-repos"}
{"code": "def get_nets_jpnic(self, response):\n        \n\n        nets = []\n\n        \n        \n        for match in re.finditer(\n                r'^.*?(\\[Network Number\\])[^\\S\\n]+.+?>(?P<val>.+?)</A>$',\n                response,\n                re.MULTILINE\n        ):\n\n            try:\n\n                net = copy.deepcopy(BASE_NET)\n                tmp = ip_network(match.group(2))\n\n                try:  \n                    network_address = tmp.network_address\n                except AttributeError:  \n                    network_address = tmp.ip\n                    pass\n\n                try:  \n                    broadcast_address = tmp.broadcast_address\n                except AttributeError:  \n                    broadcast_address = tmp.broadcast\n                    pass\n\n                net['range'] = '{0} - {1}'.format(\n                    network_address + 1, broadcast_address\n                )\n\n                cidr = ip_network(match.group(2).strip()).__str__()\n\n                net['cidr'] = cidr\n                net['start'] = match.start()\n                net['end'] = match.end()\n                nets.append(net)\n\n            except (ValueError, TypeError):\n\n                pass\n\n        return nets", "docstring": "The function for parsing network blocks from jpnic whois data.\n\nArgs:\nresponse (:obj:`str`): The response from the jpnic server.\n\nReturns:\nlist of dict: Mapping of networks with start and end positions.\n\n::\n\n[{\n'cidr' (str) - The network routing block\n'start' (int) - The starting point of the network\n'end' (int) - The endpoint point of the network\n}]", "source": "juraj-google-style"}
{"code": "def _convert_observ(self, observ):\n    if (not np.isfinite(observ).all()):\n        raise ValueError('Infinite observation encountered.')\n    if (observ.dtype == np.float64):\n        return observ.astype(np.float32)\n    if (observ.dtype == np.int64):\n        return observ.astype(np.int32)\n    return observ", "docstring": "Convert the observation to 32 bits.\n\nArgs:\nobserv: Numpy observation.\n\nRaises:\nValueError: Observation contains infinite values.\n\nReturns:\nNumpy observation with 32-bit data type.", "source": "codesearchnet"}
{"code": "def console_set_alignment(con: tcod.console.Console, alignment: int) -> None:\n    \n    lib.TCOD_console_set_alignment(_console(con), alignment)", "docstring": "Change this consoles current alignment mode.\n\n* tcod.LEFT\n* tcod.CENTER\n* tcod.RIGHT\n\nArgs:\ncon (Console): Any Console instance.\nalignment (int):\n\n.. deprecated:: 8.5\nSet :any:`Console.default_alignment` instead.", "source": "juraj-google-style"}
{"code": "def get_by_name(self, name):\n    managed_sans = self.get_all()\n    result = [x for x in managed_sans if (x['name'] == name)]\n    resource = (result[0] if result else None)\n    if resource:\n        resource = self.new(self._connection, resource)\n    return resource", "docstring": "Gets a Managed SAN by name.\n\nArgs:\nname: Name of the Managed SAN\n\nReturns:\ndict: Managed SAN.", "source": "codesearchnet"}
{"code": "def fit_transform(self, X, y=None, **params):\n        \n        return self.fit(X, y).transform(X, y)", "docstring": "Learn vocabulary and return document id matrix.\n\nThis is equivalent to fit followed by transform.\n\nArgs:\nX : iterable\nan iterable which yields either str, unicode or file objects.\n\nReturns:\nlist : document id matrix.\nlist: label id matrix.", "source": "juraj-google-style"}
{"code": "def _add_qasm_measure(self, qubit, cmembit, cregbit=None):\n    (outcome, probability) = self._get_measure_outcome(qubit)\n    membit = (1 << cmembit)\n    self._classical_memory = ((self._classical_memory & (~ membit)) | (int(outcome) << cmembit))\n    if (cregbit is not None):\n        regbit = (1 << cregbit)\n        self._classical_register = ((self._classical_register & (~ regbit)) | (int(outcome) << cregbit))\n    if (outcome == '0'):\n        update_diag = [[(1 / np.sqrt(probability)), 0], [0, 0]]\n    else:\n        update_diag = [[0, 0], [0, (1 / np.sqrt(probability))]]\n    self._add_unitary_single(update_diag, qubit)", "docstring": "Apply a measure instruction to a qubit.\n\nArgs:\nqubit (int): qubit is the qubit measured.\ncmembit (int): is the classical memory bit to store outcome in.\ncregbit (int, optional): is the classical register bit to store outcome in.", "source": "codesearchnet"}
{"code": "def SetName(obj, name):\n    precondition.AssertType(name, str)\n    if PY2:\n        obj.__name__ = name.encode('ascii')\n    else:\n        obj.__name__ = name", "docstring": "A compatibility wrapper for setting object's name.\n\nSee documentation for `GetName` for more information.\n\nArgs:\nobj: A type or function object to set the name for.\nname: A name to set.", "source": "codesearchnet"}
{"code": "def distance(cls, q0, q1):\n    q = Quaternion.log_map(q0, q1)\n    return q.norm", "docstring": "Quaternion intrinsic distance.\n\nFind the intrinsic geodesic distance between q0 and q1.\n\nParams:\nq0: the first quaternion\nq1: the second quaternion\n\nReturns:\nA positive amount corresponding to the length of the geodesic arc\nconnecting q0 to q1.\n\nNote:\nAlthough the q0^(-1)*q1 != q1^(-1)*q0, the length of the path joining\nthem is given by the logarithm of those product quaternions, the norm\nof which is the same.", "source": "codesearchnet"}
{"code": "def repack_weights(packed_parameter: torch.Tensor, sharded_dim: int, world_size: int, num_blocks: int=2) -> torch.Tensor:\n    if num_blocks != 2:\n        raise ValueError('Num blocks different from 2 is not supported yet. This is most likely a bug in your implementation as we only pack gate and up projections together.')\n    actual_sharded_dim = sharded_dim if sharded_dim >= 0 else sharded_dim + packed_parameter.ndim\n    total_size_on_sharded_dim = packed_parameter.shape[actual_sharded_dim]\n    original_block_size_on_dim = total_size_on_sharded_dim \n    shard_chunk_size = original_block_size_on_dim \n    prefix_shape = packed_parameter.shape[:actual_sharded_dim]\n    suffix_shape = packed_parameter.shape[actual_sharded_dim + 1:]\n    tensor_view = packed_parameter.view(*prefix_shape, world_size, num_blocks, shard_chunk_size, *suffix_shape)\n    axis_ws_abs = len(prefix_shape)\n    axis_npp_abs = len(prefix_shape) + 1\n    permute_order = list(range(tensor_view.ndim))\n    permute_order[axis_ws_abs], permute_order[axis_npp_abs] = (permute_order[axis_npp_abs], permute_order[axis_ws_abs])\n    tensor_permuted = tensor_view.permute(*permute_order)\n    final_ordered_tensor = tensor_permuted.reshape_as(packed_parameter)\n    return final_ordered_tensor", "docstring": "Reorders a tensor that was reconstructed from sharded packed weights into its canonical packed format.\n\nFor example, if a weight was packed (e.g., gate_proj and up_proj) and then sharded,\nDTensor.full_tensor() might produce an interleaved layout like [G0, U0, G1, U1, ...]\nalong the sharded dimension. This function reorders it to [G0, G1, ..., U0, U1, ...].\nThis is an inverse operation to get_packed_weights.\n\nArgs:\nreconstructed_tensor: The tensor reconstructed from DTensor (e.g., via .full_tensor().contiguous()).\nsharded_dim: The dimension index in the reconstructed_tensor that was originally sharded.\nworld_size: The tensor parallel world size.\nnum_packed_projs: The number of projections that were packed together (e.g., 2 for gate_up_proj).\n\nReturns:\nThe reordered tensor in canonical packed format.", "source": "github-repos"}
{"code": "def __init__(self, app, db, UserClass, UserEmailClass=None, UserInvitationClass=None, RoleClass=None):\n        \n        self.app = app\n        self.db = db\n        self.UserClass = UserClass\n        self.UserEmailClass = UserEmailClass\n        self.UserInvitationClass = UserInvitationClass\n        self.RoleClass = RoleClass\n\n        self.user_manager = app.user_manager\n        self.db_adapter = None\n\n        \n        if self.db_adapter is None:\n            try:\n                from flask_sqlalchemy import SQLAlchemy\n\n                if isinstance(db, SQLAlchemy):\n                    self.db_adapter = SQLDbAdapter(app, db)\n            except ImportError:\n                pass  \n\n        \n        if self.db_adapter is None:\n            try:\n                from flask_mongoengine import MongoEngine\n\n                if isinstance(db, MongoEngine):\n                    self.db_adapter = MongoDbAdapter(app, db)\n            except ImportError:\n                pass  \n\n        \n        if self.db_adapter is None: \n            try:\n                from flask_flywheel import Flywheel\n\n                if isinstance(db, Flywheel):\n                    self.db_adapter = DynamoDbAdapter(app, db)\n            except ImportError:\n                pass  \n\n        \n        if self.db_adapter is None:\n            try:\n                from pynamodb.models import Model\n\n                if issubclass(UserClass, Model):\n                    self.db_adapter = PynamoDbAdapter(app)\n            except ImportError:\n                pass \n\n        \n        if self.db_adapter is None:\n            raise ConfigError(\n                'No Flask-SQLAlchemy, Flask-MongoEngine or Flask-Flywheel installed and no Pynamo Model in use.'\\\n                ' You must install one of these Flask extensions.')", "docstring": "Initialize the appropriate DbAdapter, based on the ``db`` parameter type.\n\nArgs:\napp(Flask): The Flask application instance.\ndb: The Object-Database Mapper instance.\nUserClass: The User class.\nUserEmailClass: Optional UserEmail class for multiple-emails-per-user feature.\nUserInvitationClass: Optional UserInvitation class for user-invitation feature.\nRoleClass: For testing purposes only.", "source": "juraj-google-style"}
{"code": "def input(self):\n    return self._nested_inputs", "docstring": "Retrieves the input tensor(s) of a layer.\n\nOnly applicable if the layer has exactly one input,\ni.e. if it is connected to one incoming layer.\n\nReturns:\nInput tensor or list of input tensors.\n\nRaises:\nRuntimeError: If called in Eager mode.\nAttributeError: If no inbound nodes are found.", "source": "github-repos"}
{"code": "def _GetMetadataUpdate(\n      self, metadata_key='', recursive=True, wait=True, timeout=None):\n    \n    metadata_key = os.path.join(metadata_key, '') if recursive else metadata_key\n    metadata_url = os.path.join(METADATA_SERVER, metadata_key)\n    params = {\n        'alt': 'json',\n        'last_etag': self.etag,\n        'recursive': recursive,\n        'timeout_sec': timeout or self.timeout,\n        'wait_for_change': wait,\n    }\n    while True:\n      response = self._GetMetadataRequest(\n          metadata_url, params=params, timeout=timeout)\n      etag_updated = self._UpdateEtag(response)\n      if wait and not etag_updated and not timeout:\n        \n        continue\n      else:\n        \n        \n        \n        \n        break\n    return json.loads(response.read().decode('utf-8'))", "docstring": "Request the contents of metadata server and deserialize the response.\n\nArgs:\nmetadata_key: string, the metadata key to watch for changes.\nrecursive: bool, True if we should recursively watch for metadata changes.\nwait: bool, True if we should wait for a metadata change.\ntimeout: int, timeout in seconds for returning metadata output.\n\nReturns:\njson, the deserialized contents of the metadata server.", "source": "juraj-google-style"}
{"code": "def run(self, *args, **kwargs):\n        \n        self.log.debug('Starting EBSAuditor')\n        data = self.update_data()\n\n        notices = defaultdict(list)\n        for account, issues in data.items():\n            for issue in issues:\n                for recipient in account.contacts:\n                    notices[NotificationContact(type=recipient['type'], value=recipient['value'])].append(issue)\n\n        self.notify(notices)", "docstring": "Main execution point for the auditor\n\nArgs:\n*args:\n**kwargs:\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def split_to_tiles(image: np.ndarray, num_tiles_height: int, num_tiles_width: int) -> np.ndarray:\n    num_channels, height, width = image.shape\n    tile_height = height \n    tile_width = width \n    image = image.reshape(num_channels, num_tiles_height, tile_height, num_tiles_width, tile_width)\n    image = image.transpose(1, 3, 0, 2, 4)\n    image = image.reshape(num_tiles_width * num_tiles_height, num_channels, tile_height, tile_width)\n    return np.ascontiguousarray(image)", "docstring": "Split an image into a specified number of tiles along its width and height dimensions.\n\nArgs:\nimage (`np.ndarray`):\nInput image with shape (num_channels, height, width).\nnum_tiles_height (`int`):\nNumber of tiles to split the image into along its height.\nnum_tiles_width (`int`):\nNumber of tiles to split the image into along its width.\n\nReturns:\n`np.ndarray`:\nArray of image tiles with shape (num_tiles_width * num_tiles_height, num_channels, tile_height, tile_width).", "source": "github-repos"}
{"code": "def make_slot_check(wanted):\n    if isinstance(wanted, types.FunctionType):\n        return wanted\n    if isinstance(wanted, int):\n        (item, meta) = (wanted, None)\n    elif isinstance(wanted, Slot):\n        (item, meta) = (wanted.item_id, wanted.damage)\n    elif isinstance(wanted, (Item, Block)):\n        (item, meta) = (wanted.id, wanted.metadata)\n    elif isinstance(wanted, str):\n        item_or_block = get_item_or_block(wanted, init=True)\n        (item, meta) = (item_or_block.id, item_or_block.metadata)\n    else:\n        try:\n            (item, meta) = wanted\n        except TypeError:\n            raise ValueError(('Illegal args for make_slot_check(): %s' % wanted))\n    return (lambda slot: ((item == slot.item_id) and (meta in (None, slot.damage))))", "docstring": "Creates and returns a function that takes a slot\nand checks if it matches the wanted item.\n\nArgs:\nwanted: function(Slot) or Slot or itemID or (itemID, metadata)", "source": "codesearchnet"}
{"code": "def clientConnectionFailed(self, err, address: Address):\n        \n        if type(err.value) == error.TimeoutError:\n            logger.debug(f\"Failed connecting to {address} connection timed out\")\n        elif type(err.value) == error.ConnectError:\n            ce = err.value\n            if len(ce.args) > 0:\n                logger.debug(f\"Failed connecting to {address} {ce.args[0].value}\")\n            else:\n                logger.debug(f\"Failed connecting to {address}\")\n        else:\n            logger.debug(f\"Failed connecting to {address} {err.value}\")\n        self.peers_connecting -= 1\n        self.RemoveKnownAddress(address)\n        self.RemoveFromQueue(address)\n        \n        self.AddDeadAddress(address)\n\n        \n        return err.type", "docstring": "Called when we fail to connect to an endpoint\nArgs:\nerr: Twisted Failure instance\naddress: the address we failed to connect to", "source": "juraj-google-style"}
{"code": "def cut_matrix(self, n):\n        \n        return connectivity.relevant_connections(n, self.from_nodes,\n                                                 self.to_nodes)", "docstring": "Compute the cut matrix for this cut.\n\nThe cut matrix is a square matrix which represents connections severed\nby the cut.\n\nArgs:\nn (int): The size of the network.\n\nExample:\n>>> cut = Cut((1,), (2,))\n>>> cut.cut_matrix(3)\narray([[0., 0., 0.],\n[0., 0., 1.],\n[0., 0., 0.]])", "source": "juraj-google-style"}
{"code": "def ScanForFileSystem(self, source_path_spec):\n    if (source_path_spec.type_indicator == definitions.TYPE_INDICATOR_APFS_CONTAINER):\n        return path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_APFS, location='/', parent=source_path_spec)\n    try:\n        type_indicators = analyzer.Analyzer.GetFileSystemTypeIndicators(source_path_spec, resolver_context=self._resolver_context)\n    except RuntimeError as exception:\n        raise errors.BackEndError('Unable to process source path specification with error: {0!s}'.format(exception))\n    if (not type_indicators):\n        return None\n    type_indicator = type_indicators[0]\n    if (len(type_indicators) > 1):\n        if (definitions.PREFERRED_NTFS_BACK_END not in type_indicators):\n            raise errors.BackEndError('Unsupported source found more than one file system types.')\n        type_indicator = definitions.PREFERRED_NTFS_BACK_END\n    if (type_indicator == definitions.TYPE_INDICATOR_NTFS):\n        root_location = '\\\\'\n    else:\n        root_location = '/'\n    file_system_path_spec = path_spec_factory.Factory.NewPathSpec(type_indicator, location=root_location, parent=source_path_spec)\n    if (type_indicator == definitions.TYPE_INDICATOR_TSK):\n        try:\n            file_system = resolver.Resolver.OpenFileSystem(file_system_path_spec, resolver_context=self._resolver_context)\n            file_system.Close()\n        except errors.BackEndError:\n            file_system_path_spec = None\n    return file_system_path_spec", "docstring": "Scans the path specification for a supported file system format.\n\nArgs:\nsource_path_spec (PathSpec): source path specification.\n\nReturns:\nPathSpec: file system path specification or None if no supported file\nsystem type was found.\n\nRaises:\nBackEndError: if the source cannot be scanned or more than one file\nsystem type is found.", "source": "codesearchnet"}
{"code": "def num_employers(self, num_employers):\n    if (num_employers < 2):\n        self._logger.log('warn', 'Two employers are needed: setting to two')\n        num_employers = 2\n    self._num_employers = num_employers\n    self._logger.log('debug', 'Number of employers set to {}'.format(num_employers))\n    self._limit = (num_employers * len(self._value_ranges))\n    self._logger.log('debug', 'Limit set to {}'.format(self._limit))", "docstring": "Sets the number of employer bees; at least two are required\n\nArgs:\nnum_employers (int): number of employer bees", "source": "codesearchnet"}
{"code": "def un(byts):\n    return msgpack.loads(byts, use_list=False, raw=False, unicode_errors='surrogatepass')", "docstring": "Use msgpack to de-serialize a python object.\n\nArgs:\nbyts (bytes): The bytes to de-serialize\n\nNotes:\nString objects are decoded using utf8 encoding.  In order to handle\npotentially malformed input, ``unicode_errors='surrogatepass'`` is set\nto allow decoding bad input strings.\n\nReturns:\nobj: The de-serialized object", "source": "codesearchnet"}
{"code": "def read_from_source(source, start_position=None, stop_position=None):\n    values = []\n    range_tracker = source.get_range_tracker(start_position, stop_position)\n    assert isinstance(range_tracker, iobase.RangeTracker)\n    reader = source.read(range_tracker)\n    for value in reader:\n        values.append(value)\n    return values", "docstring": "Reads elements from the given ```BoundedSource```.\n\nOnly reads elements within the given position range.\nArgs:\nsource (~apache_beam.io.iobase.BoundedSource):\n:class:`~apache_beam.io.iobase.BoundedSource` implementation.\nstart_position (int): start position for reading.\nstop_position (int): stop position for reading.\n\nReturns:\nList[str]: the set of values read from the sources.", "source": "github-repos"}
{"code": "def get_model_filepath(self, infodict):\n        \n        u = infodict['uniprot_ac']\n\n        original_filename = '{}_{}_{}_{}'.format(infodict['from'], infodict['to'],\n                                                 infodict['template'], infodict['coordinate_id'])\n        file_path = op.join(self.metadata_dir, u[:2], u[2:4], u[4:6],\n                            'swissmodel', '{}.pdb'.format(original_filename))\n\n        if op.exists(file_path):\n            return file_path\n        else:\n            log.warning('{}: no file {} found for model'.format(u, file_path))\n            return None", "docstring": "Get the path to the homology model using information from the index dictionary for a single model.\n\nExample: use self.get_models(UNIPROT_ID) to get all the models, which returns a list of dictionaries.\nUse one of those dictionaries as input to this function to get the filepath to the model itself.\n\nArgs:\ninfodict (dict): Information about a model from get_models\n\nReturns:\nstr: Path to homology model", "source": "juraj-google-style"}
{"code": "def raisefrom(exc_type, message, exc):\n    if (sys.version_info[:2] >= (3, 2)):\n        six.raise_from(exc_type(message), exc)\n    else:\n        six.reraise(exc_type, ('%s - %s' % (message, exc)), sys.exc_info()[2])", "docstring": "Call Python 3 raise from or emulate it for Python 2\n\nArgs:\nexc_type (Any): Type of Exception\nmessage (str): Error message to display\nexc (BaseException): original exception\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def find_tested_models(test_file: str) -> List[str]:\n    with open(os.path.join(PATH_TO_TESTS, test_file), 'r', encoding='utf-8', newline='\\n') as f:\n        content = f.read()\n    all_models = re.findall('all_model_classes\\\\s+=\\\\s+\\\\(\\\\s*\\\\(([^\\\\)]*)\\\\)', content)\n    all_models += re.findall('all_model_classes\\\\s+=\\\\s+\\\\(([^\\\\)]*)\\\\)', content)\n    if len(all_models) > 0:\n        model_tested = []\n        for entry in all_models:\n            for line in entry.split(','):\n                name = line.strip()\n                if len(name) > 0:\n                    model_tested.append(name)\n        return model_tested", "docstring": "Parse the content of test_file to detect what's in `all_model_classes`. This detects the models that inherit from\nthe common test class.\n\nArgs:\ntest_file (`str`): The path to the test file to check\n\nReturns:\n`List[str]`: The list of models tested in that file.", "source": "github-repos"}
{"code": "def add_from_existing(self, resource, timeout=-1):\n        \n        uri = self.URI + \"/from-existing\"\n        return self._client.create(resource, uri=uri, timeout=timeout)", "docstring": "Adds a volume that already exists in the Storage system\n\nArgs:\nresource (dict):\nObject to create.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.\n\nReturns:\ndict: Added resource.", "source": "juraj-google-style"}
{"code": "def isCaCert(self, name):\n        \n        crtpath = self._getPathJoin('cas', '%s.crt' % name)\n        return os.path.isfile(crtpath)", "docstring": "Checks if a CA certificate exists.\n\nArgs:\nname (str): The name of the CA keypair.\n\nExamples:\nCheck if the CA certificate for \"myca\" exists:\n\nexists = cdir.isCaCert('myca')\n\nReturns:\nbool: True if the certificate is present, False otherwise.", "source": "juraj-google-style"}
{"code": "def query_snl(self, criteria):\n        \n        try:\n            payload = {\"criteria\": json.dumps(criteria)}\n            response = self.session.post(\"{}/snl/query\".format(self.preamble),\n                                         data=payload)\n            if response.status_code in [200, 400]:\n                resp = json.loads(response.text)\n                if resp[\"valid_response\"]:\n                    if resp.get(\"warning\"):\n                        warnings.warn(resp[\"warning\"])\n                    return resp[\"response\"]\n                else:\n                    raise MPRestError(resp[\"error\"])\n\n            raise MPRestError(\"REST error with status code {} and error {}\"\n                              .format(response.status_code, response.text))\n\n        except Exception as ex:\n            raise MPRestError(str(ex))", "docstring": "Query for submitted SNLs.\n\n.. note::\n\nAs of now, this MP REST feature is open only to a select group of\nusers. Opening up submissions to all users is being planned for\nthe future.\n\nArgs:\ncriteria (dict): Query criteria.\n\nReturns:\nA dict, with a list of submitted SNLs in the \"response\" key.\n\nRaises:\nMPRestError", "source": "juraj-google-style"}
{"code": "def __add__(self, r):\n        \n        if not isinstance(r, TestResult):\n            raise TypeError('Operand %s of type %s is not a TestResult.' %\n                            (r, type(r)))\n        sum_result = TestResult()\n        for name in sum_result.__dict__:\n            r_value = getattr(r, name)\n            l_value = getattr(self, name)\n            if isinstance(r_value, list):\n                setattr(sum_result, name, l_value + r_value)\n        return sum_result", "docstring": "Overrides '+' operator for TestResult class.\n\nThe add operator merges two TestResult objects by concatenating all of\ntheir lists together.\n\nArgs:\nr: another instance of TestResult to be added\n\nReturns:\nA TestResult instance that's the sum of two TestResult instances.", "source": "juraj-google-style"}
{"code": "def delay(self, identifier: typing.Any, until: typing.Union[(int, float)]=(- 1)) -> bool:\n    raise NotImplementedError()", "docstring": "Delay a deferred function until the given time.\n\nArgs:\nidentifier (typing.Any): The identifier returned from a call\nto defer or defer_for.\nuntil (typing.Union[int, float]): A numeric value that represents\nthe clock time when the callback becomes available for\nexecution. Values that are less than the current time result in\nthe function being called at the next opportunity.\n\nReturns:\nbool: True if the call is delayed. False if the identifier is\ninvalid or if the deferred call is already executed.", "source": "codesearchnet"}
{"code": "def fetch(self, customer_id, data={}, **kwargs):\n        \n        return super(Customer, self).fetch(customer_id, data, **kwargs)", "docstring": "Fetch Customer for given Id\n\nArgs:\ncustomer_id : Id for which customer object has to be retrieved\n\nReturns:\nOrder dict for given customer Id", "source": "juraj-google-style"}
{"code": "def new_log_files(self, name, redirect_output=True):\n    if (redirect_output is None):\n        redirect_output = self._ray_params.redirect_output\n    if (not redirect_output):\n        return (None, None)\n    log_stdout = self._make_inc_temp(suffix='.out', prefix=name, directory_name=self._logs_dir)\n    log_stderr = self._make_inc_temp(suffix='.err', prefix=name, directory_name=self._logs_dir)\n    log_stdout_file = open(log_stdout, 'a', buffering=1)\n    log_stderr_file = open(log_stderr, 'a', buffering=1)\n    return (log_stdout_file, log_stderr_file)", "docstring": "Generate partially randomized filenames for log files.\n\nArgs:\nname (str): descriptive string for this log file.\nredirect_output (bool): True if files should be generated for\nlogging stdout and stderr and false if stdout and stderr\nshould not be redirected.\nIf it is None, it will use the \"redirect_output\" Ray parameter.\n\nReturns:\nIf redirect_output is true, this will return a tuple of two\nfile handles. The first is for redirecting stdout and the\nsecond is for redirecting stderr.\nIf redirect_output is false, this will return a tuple\nof two None objects.", "source": "codesearchnet"}
{"code": "def _create_job_info(self, job_dir):\n        \n        meta = self._build_job_meta(job_dir)\n\n        self.logger.debug(\"Create job: %s\" % meta)\n\n        job_record = JobRecord.from_json(meta)\n        job_record.save()", "docstring": "Create information for given job.\n\nMeta file will be loaded if exists, and the job information will\nbe saved in db backend.\n\nArgs:\njob_dir (str): Directory path of the job.", "source": "juraj-google-style"}
{"code": "def parse_rsa_data(rsa_outfile, ignore_hets=True):\n    \n\n    naccess_rel_dict = OrderedDict()\n\n    with open(rsa_outfile, 'r') as f:\n        for line in f:\n            if line.startswith('RES'):\n                res_name = line[4:7]\n                chain_id = line[8]\n                resseq = int(line[9:13])\n                icode = line[13]\n                res_id = (' ', resseq, icode)\n                all_atoms_abs = line[16:22].strip()\n                all_atoms_rel = line[23:28].strip()\n                side_chain_abs = line[29:35].strip()\n                side_chain_rel = line[36:41].strip()\n                main_chain_abs = line[42:48].strip()\n                main_chain_rel = line[49:54].strip()\n                non_polar_abs = line[55:61].strip()\n                non_polar_rel = line[62:67].strip()\n                all_polar_abs = line[68:74].strip()\n                all_polar_rel = line[75:80].strip()\n\n                if all_atoms_rel =='N/A' and main_chain_rel =='N/A' and all_polar_rel =='N/A' and non_polar_rel =='N/A' and side_chain_rel =='N/A' and ignore_hets:\n                    continue\n\n                naccess_rel_dict[(chain_id, res_id)] = {\n                    'res_name'      : res_name,\n                    'all_atoms_abs' : ssbio.utils.conv_to_float(all_atoms_abs, inf_str='N/A'),\n                    'all_atoms_rel' : ssbio.utils.conv_to_float(all_atoms_rel, inf_str='N/A'),\n                    'side_chain_abs': ssbio.utils.conv_to_float(side_chain_abs, inf_str='N/A'),\n                    'side_chain_rel': ssbio.utils.conv_to_float(side_chain_rel, inf_str='N/A'),\n                    'main_chain_abs': ssbio.utils.conv_to_float(main_chain_abs, inf_str='N/A'),\n                    'main_chain_rel': ssbio.utils.conv_to_float(main_chain_rel, inf_str='N/A'),\n                    'non_polar_abs' : ssbio.utils.conv_to_float(non_polar_abs, inf_str='N/A'),\n                    'non_polar_rel' : ssbio.utils.conv_to_float(non_polar_rel, inf_str='N/A'),\n                    'all_polar_abs' : ssbio.utils.conv_to_float(all_polar_abs, inf_str='N/A'),\n                    'all_polar_rel' : ssbio.utils.conv_to_float(all_polar_rel, inf_str='N/A')}\n\n    return naccess_rel_dict", "docstring": "Process a NACCESS or freesasa RSA output file. Adapted from Biopython NACCESS modele.\n\nArgs:\nrsa_outfile (str): Path to RSA output file\nignore_hets (bool): If HETATMs should be excluded from the final dictionary. This is extremely important\nwhen loading this information into a ChainProp's SeqRecord, since this will throw off the sequence matching.\n\nReturns:\ndict: Per-residue dictionary of RSA values", "source": "juraj-google-style"}
{"code": "def _create_flow(self, request_handler):\n        \n        if self.flow is None:\n            redirect_uri = request_handler.request.relative_url(\n                self._callback_path)  \n            self.flow = client.OAuth2WebServerFlow(\n                self._client_id, self._client_secret, self._scope,\n                redirect_uri=redirect_uri, user_agent=self._user_agent,\n                auth_uri=self._auth_uri, token_uri=self._token_uri,\n                revoke_uri=self._revoke_uri, **self._kwargs)", "docstring": "Create the Flow object.\n\nThe Flow is calculated lazily since we don't know where this app is\nrunning until it receives a request, at which point redirect_uri can be\ncalculated and then the Flow object can be constructed.\n\nArgs:\nrequest_handler: webapp.RequestHandler, the request handler.", "source": "juraj-google-style"}
{"code": "def CheckDataVisiblity(self, value):\n    if (not self.data_visibility_policy):\n        return None\n    (visible, reason) = self.data_visibility_policy.IsDataVisible(DetermineType(value))\n    if visible:\n        return None\n    return {'status': {'isError': True, 'refersTo': 'VARIABLE_NAME', 'description': {'format': reason}}}", "docstring": "Returns a status object if the given name is not visible.\n\nArgs:\nvalue: The value to check.  The actual value here is not important but the\nvalue's metadata (e.g. package and type) will be checked.\n\nReturns:\nNone if the value is visible.  A variable structure with an error status\nif the value should not be visible.", "source": "codesearchnet"}
{"code": "def _verify_parsed_token(parsed_token, issuers, audiences, allowed_client_ids, is_legacy_google_auth=True):\n  \n  \n  if parsed_token.get('iss') not in issuers:\n    _logger.warning('Issuer was not valid: %s', parsed_token.get('iss'))\n    return False\n\n  \n  aud = parsed_token.get('aud')\n  if not aud:\n    _logger.warning('No aud field in token')\n    return False\n  \n  \n  \n  cid = parsed_token.get('azp')\n  audience_allowed = (aud in audiences) or (is_legacy_google_auth and aud == cid)\n  if not audience_allowed:\n    _logger.warning('Audience not allowed: %s', aud)\n    return False\n\n  \n  if is_legacy_google_auth:\n    if list(allowed_client_ids) == SKIP_CLIENT_ID_CHECK:\n      _logger.warning('Client ID check can\\'t be skipped for ID tokens.  '\n                      'Id_token cannot be verified.')\n      return False\n    elif not cid or cid not in allowed_client_ids:\n      _logger.warning('Client ID is not allowed: %s', cid)\n      return False\n\n  if 'email' not in parsed_token:\n    return False\n\n  return True", "docstring": "Verify a parsed user ID token.\n\nArgs:\nparsed_token: The parsed token information.\nissuers: A list of allowed issuers\naudiences: The allowed audiences.\nallowed_client_ids: The allowed client IDs.\n\nReturns:\nTrue if the token is verified, False otherwise.", "source": "juraj-google-style"}
{"code": "def FinalizeTaskStorage(self, task):\n    \n    if task.identifier not in self._task_storage_writers:\n      raise IOError('Storage writer for task: {0:s} does not exist.'.format(\n          task.identifier))", "docstring": "Finalizes a processed task storage.\n\nArgs:\ntask (Task): task.\n\nRaises:\nIOError: if the task storage does not exist.\nOSError: if the task storage does not exist.", "source": "juraj-google-style"}
{"code": "def delete_folder(self, folder):\n        \n        if not is_valid_uuid(folder):\n            raise StorageArgumentException(\n                'Invalid UUID for folder: {0}'.format(folder))\n        self._authenticated_request \\\n            .to_endpoint('folder/{}/'.format(folder)) \\\n            .delete()", "docstring": "Delete a folder. It will recursively delete all the content.\n\nArgs:\nfolder_id (str): The UUID of the folder to be deleted.\n\nReturns:\nNone\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: 403\nStorageNotFoundException: 404\nHTTPError: other non-20x error codes", "source": "juraj-google-style"}
{"code": "def __init__(self, subdir, experiment_name, run_name):\n    \n    self._subdir = subdir\n    self._experiment_name = experiment_name\n    self._run_name = run_name\n    self._directory_watcher = directory_watcher.DirectoryWatcher(\n        subdir,\n        event_file_loader.RawEventFileLoader,\n        io_wrapper.IsTensorFlowEventsFile)", "docstring": "Constructs a `_RunLoader`.\n\nArgs:\nsubdir: string, filesystem path of the run directory\nexperiment_name: string, name of the run's experiment\nrun_name: string, name of the run", "source": "juraj-google-style"}
{"code": "def new_typed_dict(self, name, items, keywords):\n    cls_name = escape.pack_typeddict_base_class(name, len(self.generated_classes[name]))\n    processed_keywords = []\n    for k in keywords:\n        if k.arg != 'total':\n            raise _ParseError(f'Unexpected kwarg {k.arg!r} passed to TypedDict')\n        if not isinstance(k.value, types.Pyval) or not isinstance(k.value.value, bool):\n            raise _ParseError(f\"Illegal value {k.value!r} for 'total' kwarg to TypedDict\")\n        processed_keywords.append((k.arg, k.value.to_pytd_literal()))\n    constants = tuple((pytd.Constant(k, v) for k, v in items.items()))\n    cls = pytd.Class(name=cls_name, keywords=tuple(processed_keywords), bases=(pytd.NamedType('typing.TypedDict'),), methods=(), constants=constants, decorators=(), classes=(), slots=None, template=())\n    self.generated_classes[name].append(cls)\n    self.add_import('typing', ['TypedDict'])\n    return pytd.NamedType(cls_name)", "docstring": "Returns a type for a TypedDict.\n\nThis method is called only for TypedDict objects defined via the following\nfunction-based syntax:\n\nFoo = TypedDict('Foo', {'a': int, 'b': str}, total=False)\n\nrather than the recommended class-based syntax.\n\nArgs:\nname: the name of the TypedDict instance, e.g., \"'Foo'\".\nitems: a {key: value_type} dict, e.g., {\"'a'\": \"int\", \"'b'\": \"str\"}.\nkeywords: A sequence of kwargs passed to the function.", "source": "github-repos"}
{"code": "def to_value(original_string, corenlp_value=None):\n    if isinstance(original_string, Value):\n        return original_string\n    if (not corenlp_value):\n        corenlp_value = original_string\n    amount = NumberValue.parse(corenlp_value)\n    if (amount is not None):\n        return NumberValue(amount, original_string)\n    ymd = DateValue.parse(corenlp_value)\n    if (ymd is not None):\n        if (ymd[1] == ymd[2] == (- 1)):\n            return NumberValue(ymd[0], original_string)\n        else:\n            return DateValue(ymd[0], ymd[1], ymd[2], original_string)\n    return StringValue(original_string)", "docstring": "Convert the string to Value object.\n\nArgs:\noriginal_string (basestring): Original string\ncorenlp_value (basestring): Optional value returned from CoreNLP\nReturns:\nValue", "source": "codesearchnet"}
{"code": "def add_affiliation(self, value, curated_relation=None, record=None):\n        \n        if value:\n            affiliation = {\n                'value': value\n            }\n            if record:\n                affiliation['record'] = record\n            if curated_relation is not None:\n                affiliation['curated_relation'] = curated_relation\n            self._ensure_list_field('affiliations', affiliation)", "docstring": "Add an affiliation.\n\nArgs:\nvalue (string): affiliation value\ncurated_relation (bool): is relation curated\nrecord (dict): affiliation JSON reference", "source": "juraj-google-style"}
{"code": "def remove(self, email):\n    if (email in self._collaborators):\n        if (self._collaborators[email] == ShareRequestValue.Add):\n            del self._collaborators[email]\n        else:\n            self._collaborators[email] = ShareRequestValue.Remove\n    self._dirty = True", "docstring": "Remove a Collaborator.\n\nArgs:\nstr : Collaborator email address.", "source": "codesearchnet"}
{"code": "def get_component(self, colour, tolerance=0, default=None):\n    if (not (0 <= tolerance <= np.sqrt(195075))):\n        raise LegendError('Tolerance must be between 0 and 441.67')\n    for decor in self.__list:\n        if (colour.lower() == decor.colour):\n            return decor.component\n    (r1, g1, b1) = utils.hex_to_rgb(colour)\n    best_match = '\n    best_match_dist = np.sqrt((((r1 ** 2.0) + (g1 ** 2.0)) + (b1 ** 2.0)))\n    for decor in self.__list:\n        (r2, g2, b2) = decor.rgb\n        distance = np.sqrt(((((r2 - r1) ** 2.0) + ((g2 - g1) ** 2.0)) + ((b2 - b1) ** 2.0)))\n        if (distance < best_match_dist):\n            best_match = decor.component\n            best_match_dist = distance\n            best_match_colour = decor.colour\n    if (best_match_dist <= tolerance):\n        return best_match\n    else:\n        with warnings.catch_warnings():\n            warnings.simplefilter('always')\n            w = 'No match found for {0} '.format(colour.lower())\n            w += 'with tolerance of {0}. Best match is '.format(tolerance)\n            w += '{0}, {1}'.format(best_match.summary(), best_match_colour)\n            w += ', d={0}'.format(best_match_dist)\n            warnings.warn(w)\n        return default", "docstring": "Get the component corresponding to a display colour. This is for\ngenerating a Striplog object from a colour image of a striplog.\n\nArgs:\ncolour (str): The hex colour string to look up.\ntolerance (float): The colourspace distance within which to match.\ndefault (component or None): The component to return in the event\nof no match.\n\nReturns:\ncomponent. The component best matching the provided colour.", "source": "codesearchnet"}
{"code": "def _send(self, method, path, data, filename):\n        \n        if filename is None:\n            return self._send_json(method, path, data)\n        else:\n            return self._send_file(method, path, data, filename)", "docstring": "Send data to a remote server, either with a POST or a PUT request.\n\nArgs:\n`method`: The method (POST or PUT) to use.\n`path`: The path to the resource.\n`data`: The data to send.\n`filename`: The filename of the file to send (if any).\nReturns:\nThe content of the response.\nRaises:\nAn exception depending on the HTTP status code of the response.", "source": "juraj-google-style"}
{"code": "def GetRelativePath(self, path_spec):\n    \n    location = getattr(path_spec, 'location', None)\n    if location is None:\n      raise errors.PathSpecError('Path specification missing location.')\n\n    if path_spec_factory.Factory.IsSystemLevelTypeIndicator(\n        self._file_system.type_indicator):\n      if not location.startswith(self._mount_point.location):\n        raise errors.PathSpecError(\n            'Path specification does not contain mount point.')\n    else:\n      if not hasattr(path_spec, 'parent'):\n        raise errors.PathSpecError('Path specification missing parent.')\n\n      if path_spec.parent != self._mount_point:\n        raise errors.PathSpecError(\n            'Path specification does not contain mount point.')\n\n    path_segments = self._file_system.SplitPath(location)\n\n    if path_spec_factory.Factory.IsSystemLevelTypeIndicator(\n        self._file_system.type_indicator):\n      mount_point_path_segments = self._file_system.SplitPath(\n          self._mount_point.location)\n      path_segments = path_segments[len(mount_point_path_segments):]\n\n    return '{0:s}{1:s}'.format(\n        self._file_system.PATH_SEPARATOR,\n        self._file_system.PATH_SEPARATOR.join(path_segments))", "docstring": "Returns the relative path based on a resolved path specification.\n\nThe relative path is the location of the upper most path specification.\nThe the location of the mount point is stripped off if relevant.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nstr: corresponding relative path or None if the relative path could not\nbe determined.\n\nRaises:\nPathSpecError: if the path specification is incorrect.", "source": "juraj-google-style"}
{"code": "def inet_to_str(inet):\n    try:\n        return socket.inet_ntop(socket.AF_INET, inet)\n    except ValueError:\n        return socket.inet_ntop(socket.AF_INET6, inet)", "docstring": "Convert inet object to a string\n\nArgs:\ninet (inet struct): inet network address\nReturns:\nstr: Printable/readable IP address", "source": "codesearchnet"}
{"code": "def destroy_iam(app='', env='dev', **_):\n    \n    session = boto3.Session(profile_name=env)\n    client = session.client('iam')\n\n    generated = get_details(env=env, app=app)\n    generated_iam = generated.iam()\n    app_details = collections.namedtuple('AppDetails', generated_iam.keys())\n    details = app_details(**generated_iam)\n\n    LOG.debug('Application details: %s', details)\n\n    resource_action(\n        client,\n        action='remove_user_from_group',\n        log_format='Removed user from group: %(UserName)s ~> %(GroupName)s',\n        GroupName=details.group,\n        UserName=details.user)\n    resource_action(client, action='delete_user', log_format='Destroyed user: %(UserName)s', UserName=details.user)\n    resource_action(client, action='delete_group', log_format='Destroyed group: %(GroupName)s', GroupName=details.group)\n\n    resource_action(\n        client,\n        action='remove_role_from_instance_profile',\n        log_format='Destroyed Instance Profile from Role: '\n        '%(InstanceProfileName)s ~> %(RoleName)s',\n        InstanceProfileName=details.profile,\n        RoleName=details.role)\n    resource_action(\n        client,\n        action='delete_instance_profile',\n        log_format='Destroyed Instance Profile: %(InstanceProfileName)s',\n        InstanceProfileName=details.profile)\n\n    role_policies = []\n    try:\n        role_policies = resource_action(\n            client,\n            action='list_role_policies',\n            log_format='Found Role Policies for %(RoleName)s.',\n            RoleName=details.role)['PolicyNames']\n    except TypeError:\n        LOG.info('Role %s not found.', details.role)\n\n    for policy in role_policies:\n        resource_action(\n            client,\n            action='delete_role_policy',\n            log_format='Removed Inline Policy from Role: '\n            '%(PolicyName)s ~> %(RoleName)s',\n            RoleName=details.role,\n            PolicyName=policy)\n\n    attached_role_policies = []\n    try:\n        attached_role_policies = resource_action(\n            client,\n            action='list_attached_role_policies',\n            log_format='Found attached Role Polices for %(RoleName)s.',\n            RoleName=details.role)['AttachedPolicies']\n    except TypeError:\n        LOG.info('Role %s not found.', details.role)\n\n    for policy in attached_role_policies:\n        resource_action(\n            client,\n            action='detach_role_policy',\n            log_format='Detached Policy from Role: '\n            '%(PolicyArn)s ~> %(RoleName)s',\n            RoleName=details.role,\n            PolicyArn=policy['PolicyArn'])\n\n    resource_action(client, action='delete_role', log_format='Destroyed Role: %(RoleName)s', RoleName=details.role)", "docstring": "Destroy IAM Resources.\n\nArgs:\napp (str): Spinnaker Application name.\nenv (str): Deployment environment, i.e. dev, stage, prod.\n\nReturns:\nTrue upon successful completion.", "source": "juraj-google-style"}
{"code": "def movie_list(self, **kwargs):\n        \n        path = self._get_path('movie_list')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the list of Movie genres.\n\nArgs:\nlanguage: (optional) ISO 639-1 code.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def get_mnemonic(self, mnemonic, alias=None):\n    alias = (alias or {})\n    aliases = alias.get(mnemonic, [mnemonic])\n    for a in aliases:\n        if (a in self.data):\n            return a\n    return None", "docstring": "Instead of picking curves by name directly from the data dict, you\ncan pick them up with this method, which takes account of the alias\ndict you pass it. If you do not pass an alias dict, then you get the\ncurve you asked for, if it exists, or None. NB Wells do not have alias\ndicts, but Projects do.\n\nArgs:\nmnemonic (str): the name of the curve you want.\nalias (dict): an alias dictionary, mapping mnemonics to lists of\nmnemonics.\n\nReturns:\nCurve.", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    file_offset = 0\n    file_size = file_object.get_size()\n    record_map = self._GetDataTypeMap('pls_recall_record')\n    while (file_offset < file_size):\n        try:\n            (pls_record, record_data_size) = self._ReadStructureFromFileObject(file_object, file_offset, record_map)\n        except (ValueError, errors.ParseError) as exception:\n            if (file_offset == 0):\n                raise errors.UnableToParseFile('Unable to parse first record.')\n            parser_mediator.ProduceExtractionWarning('unable to parse record at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))\n            break\n        if ((file_offset == 0) and (not self._VerifyRecord(pls_record))):\n            raise errors.UnableToParseFile('Verification of first record failed.')\n        event_data = PlsRecallEventData()\n        event_data.database_name = pls_record.database_name.rstrip('\\x00')\n        event_data.sequence_number = pls_record.sequence_number\n        event_data.offset = file_offset\n        event_data.query = pls_record.query.rstrip('\\x00')\n        event_data.username = pls_record.username.rstrip('\\x00')\n        date_time = dfdatetime_delphi_date_time.DelphiDateTime(timestamp=pls_record.last_written_time)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n        file_offset += record_data_size", "docstring": "Parses a PLSRecall.dat file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def loss(logits, labels, batch_size=None):\n  \n  if not batch_size:\n    batch_size = FLAGS.batch_size\n\n  \n  \n  sparse_labels = tf.reshape(labels, [batch_size, 1])\n  indices = tf.reshape(tf.range(batch_size), [batch_size, 1])\n  concated = tf.concat(axis=1, values=[indices, sparse_labels])\n  num_classes = logits[0].get_shape()[-1].value\n  dense_labels = tf.sparse_to_dense(concated,\n                                    [batch_size, num_classes],\n                                    1.0, 0.0)\n\n  \n  slim.losses.cross_entropy_loss(logits[0],\n                                 dense_labels,\n                                 label_smoothing=0.1,\n                                 weight=1.0)\n\n  \n  slim.losses.cross_entropy_loss(logits[1],\n                                 dense_labels,\n                                 label_smoothing=0.1,\n                                 weight=0.4,\n                                 scope='aux_loss')", "docstring": "Adds all losses for the model.\n\nNote the final loss is not returned. Instead, the list of losses are collected\nby slim.losses. The losses are accumulated in tower_loss() and summed to\ncalculate the total loss.\n\nArgs:\nlogits: List of logits from inference(). Each entry is a 2-D float Tensor.\nlabels: Labels from distorted_inputs or inputs(). 1-D tensor\nof shape [batch_size]\nbatch_size: integer", "source": "juraj-google-style"}
{"code": "def _maintain_LC(self, obj, slice_id, last_slice=False, begin_slice=True,\n                   shard_ctx=None, slice_ctx=None):\n    \n    if obj is None or not isinstance(obj, shard_life_cycle._ShardLifeCycle):\n      return\n\n    shard_context = shard_ctx or self.shard_context\n    slice_context = slice_ctx or self.slice_context\n    if begin_slice:\n      if slice_id == 0:\n        obj.begin_shard(shard_context)\n      obj.begin_slice(slice_context)\n    else:\n      obj.end_slice(slice_context)\n      if last_slice:\n        obj.end_shard(shard_context)", "docstring": "Makes sure shard life cycle interface are respected.\n\nArgs:\nobj: the obj that may have implemented _ShardLifeCycle.\nslice_id: current slice_id\nlast_slice: whether this is the last slice.\nbegin_slice: whether this is the beginning or the end of a slice.\nshard_ctx: shard ctx for dependency injection. If None, it will be read\nfrom self.\nslice_ctx: slice ctx for dependency injection. If None, it will be read\nfrom self.", "source": "juraj-google-style"}
{"code": "def update(self, **kwargs):\n    for arg in kwargs:\n        if hasattr(self, arg):\n            setattr(self, arg, kwargs[arg])\n        else:\n            raise ValueError(('Invalid RayParams parameter in update: %s' % arg))\n    self._check_usage()", "docstring": "Update the settings according to the keyword arguments.\n\nArgs:\nkwargs: The keyword arguments to set corresponding fields.", "source": "codesearchnet"}
{"code": "def assert_like_rnncell(cell_name, cell):\n    conditions = [_hasattr(cell, 'output_size'), _hasattr(cell, 'state_size'), _hasattr(cell, 'get_initial_state') or _hasattr(cell, 'zero_state'), callable(cell)]\n    errors = [\"'output_size' property is missing\", \"'state_size' property is missing\", \"either 'zero_state' or 'get_initial_state' method is required\", 'is not callable']\n    if not all(conditions):\n        errors = [error for error, cond in zip(errors, conditions) if not cond]\n        raise TypeError('The argument {!r} ({}) is not an RNNCell: {}.'.format(cell_name, cell, ', '.join(errors)))", "docstring": "Raises a TypeError if cell is not like an RNNCell.\n\nNOTE: Do not rely on the error message (in particular in tests) which can be\nsubject to change to increase readability. Use\nASSERT_LIKE_RNNCELL_ERROR_REGEXP.\n\nArgs:\ncell_name: A string to give a meaningful error referencing to the name of\nthe functionargument.\ncell: The object which should behave like an RNNCell.\n\nRaises:\nTypeError: A human-friendly exception.", "source": "github-repos"}
{"code": "def step(self, actions):\n    \n    for index, (env, action) in enumerate(zip(self._envs, actions)):\n      if not env.action_space.contains(action):\n        message = 'Invalid action at index {}: {}'\n        raise ValueError(message.format(index, action))\n    if self._blocking:\n      transitions = [\n          env.step(action)\n          for env, action in zip(self._envs, actions)]\n    else:\n      transitions = [\n          env.step(action, blocking=False)\n          for env, action in zip(self._envs, actions)]\n      transitions = [transition() for transition in transitions]\n    observs, rewards, dones, infos = zip(*transitions)\n    observ = np.stack(observs)\n    reward = np.stack(rewards)\n    done = np.stack(dones)\n    info = tuple(infos)\n    return observ, reward, done, info", "docstring": "Forward a batch of actions to the wrapped environments.\n\nArgs:\nactions: Batched action to apply to the environment.\n\nRaises:\nValueError: Invalid actions.\n\nReturns:\nBatch of observations, rewards, and done flags.", "source": "juraj-google-style"}
{"code": "def to_image(self, filename='palette.png', band_width=1, length=60, max_width=0, vertical=True, alpha_channel=False):\n    if (max_width < 1):\n        pass\n    else:\n        band_width = int((max_width / len(self._colours)))\n    image_width = (band_width * len(self._colours))\n    if alpha_channel:\n        my_image = Image.new('RGBA', (image_width, length))\n    else:\n        my_image = Image.new('RGB', (image_width, length))\n    image_loaded = my_image.load()\n    x = 0\n    for my_colour in self._colours:\n        for x1 in range(band_width):\n            for y in range(length):\n                image_loaded[(x, y)] = my_colour.rgb()\n            x = (x + 1)\n    if vertical:\n        my_image = my_image.rotate(270)\n    my_image.save(filename)", "docstring": "Creates an image from the palette.\n\nArgs:\nfilename(Optional[string]): filename of saved file. Defaults to\n``palette.png`` in the current working directory.\nband_width(optional[int]): how wide each colour band should be.\nDefaults to 1 pixel.\nlength(Optional[int]): the length of the overall image in pixels.\nThis is the dimension orthogonal to ``band_width``. Defaults\nto 60 pixels.\nmax_width(Optional[int]): if ``band_width`` is not set and this is,\nthis determines how wide the whole image should be.\nvertical(Optional[bool]): if the image runs vertical (``True``,\ndefault) or horizontal (``False``).\nalpha_channel(Optional[bool]): if ``True``, the created image will\nhave an Alpha channel. Defaults to ``False``.", "source": "codesearchnet"}
{"code": "def export_panels(adapter, panels, versions=None, build='37'):\n    \n    if versions and (len(versions) != len(panels)):\n        raise SyntaxError(\"If version specify for each panel\")\n\n    headers = []\n    build_string = (\"\n    \n    headers.append(build_string.format(build))\n    header_string = (\"\n    contig_string = (\"\n    bed_string = (\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\")\n\n    \n    panel_geneids = set()\n    \n    chromosomes_found = set()\n    \n    hgnc_geneobjs = []\n\n    \n    for i,panel_id in enumerate(panels):\n        version = None\n        if versions:\n            version = versions[i]\n            \n        panel_obj = adapter.gene_panel(panel_id, version=version)\n        if not panel_obj:\n            LOG.warning(\"Panel {0} version {1} could not be found\".format(panel_id, version))\n            continue\n\n        headers.append(header_string.format(\n            panel_obj['panel_name'],\n            panel_obj['version'],\n            panel_obj['date'].date(),\n            panel_obj['display_name'],\n        ))\n        \n        for gene_obj in panel_obj['genes']:\n            panel_geneids.add(gene_obj['hgnc_id'])\n\n    \n    gene_objs = adapter.hgncid_to_gene(build=build)\n    \n    for hgnc_id in panel_geneids:\n        hgnc_geneobj = gene_objs.get(hgnc_id)\n        if hgnc_geneobj is None:\n            LOG.warn(\"missing HGNC gene: %s\", hgnc_id)\n            continue\n        chrom = hgnc_geneobj['chromosome']\n        start = hgnc_geneobj['start']\n        chrom_int = CHROMOSOME_INTEGERS.get(chrom)\n        if not chrom_int:\n            LOG.warn(\"Chromosome %s out of scope\", chrom)\n            continue\n            \n        hgnc_geneobjs.append((chrom_int, start, hgnc_geneobj))\n        chromosomes_found.add(chrom)\n    \n    \n    hgnc_geneobjs.sort(key=lambda tup: (tup[0], tup[1]))\n    \n    for chrom in CHROMOSOMES:\n        if chrom in chromosomes_found:\n            headers.append(contig_string.format(chrom))\n\n    headers.append(\"\n\n    for header in headers:\n        yield header\n\n    for hgnc_gene in hgnc_geneobjs:\n        gene_obj = hgnc_gene[-1]\n        gene_line = bed_string.format(gene_obj['chromosome'], gene_obj['start'],\n                                      gene_obj['end'], gene_obj['hgnc_id'],\n                                      gene_obj['hgnc_symbol'])\n        yield gene_line", "docstring": "Export all genes in gene panels\n\nExports the union of genes in one or several gene panels to a bed like format with coordinates.\n\nArgs:\nadapter(scout.adapter.MongoAdapter)\npanels(iterable(str)): Iterable with panel ids\nbed(bool): If lines should be bed formated", "source": "juraj-google-style"}
{"code": "def running(processid):\n    try:\n        os.kill(processid, 0)\n    except OverflowError as exc:\n        print('checking validity of pid ({p}) failed with: {e}'.format(p=processid, e=exc))\n        sys.exit(1)\n    except OSError:\n        return False\n    else:\n        return True", "docstring": "Check the validity of a process ID.\n\nArguments:\nprocessid (int): Process ID number.\n\nReturns:\nTrue if process ID is found otherwise False.", "source": "codesearchnet"}
{"code": "def make_repr(self, attrs):\n    if (config.REPR_VERBOSITY in [MEDIUM, HIGH]):\n        return self.__str__()\n    elif (config.REPR_VERBOSITY is LOW):\n        return '{}({})'.format(self.__class__.__name__, ', '.join((((attr + '=') + repr(getattr(self, attr))) for attr in attrs)))\n    raise ValueError('Invalid value for `config.REPR_VERBOSITY`')", "docstring": "Construct a repr string.\n\nIf `config.REPR_VERBOSITY` is ``1`` or ``2``, this function calls the\nobject's __str__ method. Although this breaks the convention that __repr__\nshould return a string which can reconstruct the object, readable reprs are\ninvaluable since the Python interpreter calls `repr` to represent all\nobjects in the shell. Since PyPhi is often used in the interpreter we want\nto have meaningful and useful representations.\n\nArgs:\nself (obj): The object in question\nattrs (Iterable[str]): Attributes to include in the repr\n\nReturns:\nstr: the ``repr``esentation of the object", "source": "codesearchnet"}
{"code": "def override_default_args(**kwargs):\n  \n\n  override_default_kwargs = kwargs\n\n  def custom_getter(getter, *args, **kwargs):\n    \n    updated_kwargs = override_default_kwargs.copy()\n    updated_kwargs.update({kw: value for kw, value in six.iteritems(kwargs)\n                           if value is not None})\n    return getter(*args, **updated_kwargs)\n\n  return custom_getter", "docstring": "Creates a custom getter that applies specified named arguments.\n\nThe returned custom getter treats the specified named arguments as revised\ndefaults, and does not override any non-`None` argument values supplied by\nthe original get_variable call (or by a nested scope's custom getter).\n\nArgs:\n**kwargs: Overriding arguments for the custom getter to use in preference\nthe named arguments it's called with.\n\nReturns:\nCustom getter.", "source": "juraj-google-style"}
{"code": "def render(raw_config, environment=None):\n    \n\n    t = Template(raw_config)\n    buff = StringIO()\n    if not environment:\n        environment = {}\n    try:\n        substituted = t.substitute(environment)\n    except KeyError as e:\n        raise exceptions.MissingEnvironment(e.args[0])\n    except ValueError:\n        \n        substituted = t.safe_substitute(environment)\n\n    if not isinstance(substituted, str):\n        substituted = substituted.decode('utf-8')\n\n    buff.write(substituted)\n    buff.seek(0)\n    return buff.read()", "docstring": "Renders a config, using it as a template with the environment.\n\nArgs:\nraw_config (str): the raw stacker configuration string.\nenvironment (dict, optional): any environment values that should be\npassed to the config\n\nReturns:\nstr: the stacker configuration populated with any values passed from\nthe environment", "source": "juraj-google-style"}
{"code": "def update_hparams_for_universal_transformer(hparams):\n    hparams.daisy_chain_variables = False\n    hparams.add_hparam('mix_with_transformer', None)\n    hparams.add_hparam('num_mixedin_layers', 2)\n    hparams.add_hparam('num_inrecurrence_layers', 1)\n    hparams.add_hparam('recurrence_type', 'basic')\n    hparams.add_hparam('num_rec_steps', hparams.num_hidden_layers)\n    hparams.add_hparam('add_position_timing_signal', True)\n    if hparams.add_position_timing_signal:\n        hparams.pos = None\n    hparams.add_hparam('position_start_index', None)\n    hparams.add_hparam('add_step_timing_signal', True)\n    hparams.add_hparam('step_timing_signal_type', 'learned')\n    hparams.add_hparam('add_or_concat_timing_signal', 'add')\n    hparams.add_hparam('add_sru', False)\n    hparams.add_hparam('transformer_ffn_type', 'fc')\n    hparams.add_hparam('transform_bias_init', (- 1.0))\n    hparams.add_hparam('couple_carry_transform_gates', True)\n    hparams.add_hparam('depth_embedding', True)\n    hparams.add_hparam('dwa_elements', True)\n    hparams.add_hparam('gate_ffn_layer', 'dense')\n    hparams.add_hparam('lstm_forget_bias', 1.0)\n    hparams.add_hparam('use_memory_as_final_state', False)\n    hparams.add_hparam('add_ffn_unit_to_the_transition_function', False)\n    hparams.add_hparam('act_type', 'basic')\n    hparams.add_hparam('act_max_steps', (2 * hparams.num_hidden_layers))\n    hparams.add_hparam('act_halting_bias_init', 1.0)\n    hparams.add_hparam('act_epsilon', 0.01)\n    hparams.add_hparam('act_loss_weight', 0.01)\n    return hparams", "docstring": "Adds default hparams for all of the variants of the Universal Transformer.\n\nArgs:\nhparams: default hparams (usually one of the standard hparams from\ntransformer model (like \"transformer_base\")\n\nReturns:\nhparams with default values for Universal Transformers hyper-parameters", "source": "codesearchnet"}
{"code": "def InitUser():\n    result = AppUser.query((AppUser.user == users.get_current_user())).fetch()\n    if result:\n        app_user = result[0]\n    else:\n        app_user = AppUser(user=users.get_current_user(), email=users.get_current_user().email())\n        app_user.put()\n    return app_user", "docstring": "Initialize application user.\n\nRetrieve existing user credentials from datastore or add new user.\n\nReturns:\nAppUser instance of the application user.", "source": "codesearchnet"}
{"code": "def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks):\n    plan_from_length = []\n    plan_num_rand_blocks = []\n    if 2 * num_rand_blocks + 5 < from_seq_length \n        plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size))\n        plan_num_rand_blocks.append(num_rand_blocks)\n        plan_from_length.append(from_seq_length)\n        plan_num_rand_blocks.append(0)\n    elif num_rand_blocks + 5 < from_seq_length \n        plan_from_length.append(int((num_rand_blocks + 5) * from_block_size))\n        plan_num_rand_blocks.append(num_rand_blocks \n        plan_from_length.append(from_seq_length)\n        plan_num_rand_blocks.append(num_rand_blocks - num_rand_blocks \n    else:\n        plan_from_length.append(from_seq_length)\n        plan_num_rand_blocks.append(num_rand_blocks)\n    return (plan_from_length, plan_num_rand_blocks)", "docstring": "Gives the plan of where to put random attention.\n\nArgs:\nfrom_seq_length: int. length of from sequence.\nfrom_block_size: int. size of block in from sequence.\nnum_rand_blocks: int. Number of random chunks per row.\n\nReturns:\nplan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for\neach block", "source": "github-repos"}
{"code": "def json_set_description(recipe, variables):\n    if 'script' in recipe:\n        if 'description' in recipe['script']:\n            try:\n                recipe['script']['description'] = text_set_fields(recipe['script']['description'], variables)\n            except KeyError:\n                pass", "docstring": "Replaces all fields in description with values provided.\n\nChecks if recipe['script']['description'] exist.  The replaces all %(???)s\nvariables\nwith values provided.  Note: %(???)s must match { \"field\":{ \"name\":\"???\" }}\nin JOSN.\n\nArgs:\nrecipe: (dict) A dictionary representation of the JSON script.\nvariables: (dict) A lookup table of all values to be replaced, key is name\nof field.\n\nReturns:\nNothig. Description is modified in place.", "source": "github-repos"}
{"code": "def _resize_for_patching(self, image: np.array, target_resolution: tuple, resample, input_data_format: ChannelDimension) -> np.array:\n    new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format)\n    resized_image = resize(image, (new_height, new_width), resample=resample, input_data_format=input_data_format)\n    return resized_image", "docstring": "Resizes an image to a target resolution while maintaining aspect ratio.\n\nArgs:\nimage (np.array):\nThe input image.\ntarget_resolution (tuple):\nThe target resolution (height, width) of the image.\nresample (`PILImageResampling`):\nResampling filter to use if resizing the image.\ninput_data_format (`ChannelDimension` or `str`):\nThe channel dimension format of the input image.\n\nReturns:\nnp.array: The resized and padded image.", "source": "github-repos"}
{"code": "def __init__(self, bits: List[int], order: int, initializer: tf.keras.initializers.Initializer=tf.keras.initializers.RandomUniform(), name: Union[None, str]=None):\n    parity_layer = energy_utils.Parity(bits, order)\n    self._num_terms = parity_layer.num_terms\n    self._indices = parity_layer.indices\n    pre_process = [energy_utils.SpinsFromBitstrings(), parity_layer]\n    post_process = [energy_utils.VariableDot(initializer=initializer)]\n    super().__init__(bits, pre_process + post_process, name)\n    self._post_process = post_process", "docstring": "Initializes a KOBE.\n\nArgs:\nbits: Each entry is an index on which the distribution is supported.\norder: The order of the KOBE.\ninitializer: Specifies how to initialize the values of the parameters.\nname: Optional name for the model.", "source": "github-repos"}
{"code": "def holiday_name(self, value=None):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type str '\n                                 'for field `holiday_name`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `holiday_name`')\n\n        self._holiday_name = value", "docstring": "Corresponds to IDD Field `holiday_name`\n\nArgs:\nvalue (str): value for IDD Field `holiday_name`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def _minimum_one_is_missing(self, **kwargs):\n    rqset = self._meta_data['minimum_additional_parameters']\n    if rqset:\n        kwarg_set = set(iterkeys(kwargs))\n        if kwarg_set.isdisjoint(rqset):\n            args = sorted(rqset)\n            error_message = ('This resource requires at least one of the mandatory additional parameters to be provided: %s' % ', '.join(args))\n            raise MissingRequiredCreationParameter(error_message)", "docstring": "Helper function to do operation on sets\n\nVerify if at least one of the elements\nis present in **kwargs. If no items of rqset\nare contained in **kwargs  the function\nraises exception.\n\nThis check will only trigger if rqset is not empty.\n\nRaises:\nMissingRequiredCreationParameter", "source": "codesearchnet"}
{"code": "def _define_loop(graph, logdir, train_steps, eval_steps):\n    loop = tools.Loop(logdir, graph.step, graph.should_log, graph.do_report, graph.force_reset)\n    loop.add_phase('train', graph.done, graph.score, graph.summary, train_steps, report_every=train_steps, log_every=(train_steps \n    loop.add_phase('eval', graph.done, graph.score, graph.summary, eval_steps, report_every=eval_steps, log_every=(eval_steps \n    return loop", "docstring": "Create and configure a training loop with training and evaluation phases.\n\nArgs:\ngraph: Object providing graph elements via attributes.\nlogdir: Log directory for storing checkpoints and summaries.\ntrain_steps: Number of training steps per epoch.\neval_steps: Number of evaluation steps per epoch.\n\nReturns:\nLoop object.", "source": "codesearchnet"}
{"code": "def set_x_grid_info(self, x_low, x_high, num_x, xscale, xval_name):\n        \n        self._set_grid_info('x', x_low, x_high, num_x, xscale, xval_name)\n        return", "docstring": "Set the grid values for x.\n\nCreate information for the grid of x values.\n\nArgs:\nnum_x (int): Number of points on axis.\nx_low/x_high (float): Lowest/highest value for the axis.\nxscale (str): Scale of the axis. Choices are 'log' or 'lin'.\nxval_name (str): Name representing the axis. See GenerateContainer documentation\nfor options for the name.", "source": "juraj-google-style"}
{"code": "def get_word_index(path='reuters_word_index.json'):\n    origin_folder = 'https:\n    path = get_file(path, origin=origin_folder + 'reuters_word_index.json', file_hash='4d44cc38712099c9e383dc6e5f11a921')\n    with open(path) as f:\n        return json.load(f)", "docstring": "Retrieves a dict mapping words to their index in the Reuters dataset.\n\nActual word indices starts from 3, with 3 indices reserved for:\n0 (padding), 1 (start), 2 (oov).\n\nE.g. word index of 'the' is 1, but the in the actual training data, the\nindex of 'the' will be 1 + 3 = 4. Vice versa, to translate word indices in\ntraining data back to words using this mapping, indices need to subtract 3.\n\nArgs:\npath: where to cache the data (relative to `~/.keras/dataset`).\n\nReturns:\nThe word index dictionary. Keys are word strings, values are their\nindex.", "source": "github-repos"}
{"code": "def _set_request_cache_if_django_cache_hit(key, django_cached_response):\n    if django_cached_response.is_found:\n        DEFAULT_REQUEST_CACHE.set(key, django_cached_response.value)", "docstring": "Sets the value in the request cache if the django cached response was a hit.\n\nArgs:\nkey (string)\ndjango_cached_response (CachedResponse)", "source": "codesearchnet"}
{"code": "def GetConsoleAttr(encoding=None, reset=False):\n    attr = ConsoleAttr._CONSOLE_ATTR_STATE\n    if not reset:\n        if not attr:\n            reset = True\n        elif encoding and encoding != attr.GetEncoding():\n            reset = True\n    if reset:\n        attr = ConsoleAttr(encoding=encoding)\n        ConsoleAttr._CONSOLE_ATTR_STATE = attr\n    return attr", "docstring": "Gets the console attribute state.\n\nIf this is the first call or reset is True or encoding is not None and does\nnot match the current encoding or out is not None and does not match the\ncurrent out then the state is (re)initialized. Otherwise the current state\nis returned.\n\nThis call associates the out file stream with the console. All console related\noutput should go to the same stream.\n\nArgs:\nencoding: Encoding override.\nascii -- ASCII. This is the default.\nutf8 -- UTF-8 unicode.\nwin -- Windows code page 437.\nreset: Force re-initialization if True.\n\nReturns:\nThe global ConsoleAttr state object.", "source": "github-repos"}
{"code": "class Sliceable:\n\n    def __init__(self, array):\n        self.array = array\n\n    def __getitem__(self, indices):\n        \n        return self.array[indices]\n\n    @classmethod\n    def cast(cls, x, dtype):\n        \n        return x.astype(dtype)\n\n    @classmethod\n    def convert_to_numpy(cls, x):\n        \n        return x\n\n    @classmethod\n    def convert_to_tf_dataset_compatible(cls, x):\n        \n        return x\n\n    @classmethod\n    def convert_to_jax_compatible(cls, x):\n        \n        return x\n\n    @classmethod\n    def convert_to_torch_compatible(cls, x):\n        \n        return x", "docstring": "`Sliceable` wrapping a tensor.\n\nA `Sliceable` implements the subscript operator to slice or index against\nthe first dimension of the array. It also has conversion methods for each\none of the backends.\n\nArgs:\narray: the native array or tensor to wrap.\n\nAttributes:\nshape: the shape of the full dense native array.", "source": "github-repos"}
{"code": "def destroy_cloudwatch_event(app='', env='dev', region=''):\n    \n\n    session = boto3.Session(profile_name=env, region_name=region)\n    cloudwatch_client = session.client('events')\n\n    event_rules = get_cloudwatch_event_rule(app_name=app, account=env, region=region)\n\n    for rule in event_rules:\n        cloudwatch_client.remove_targets(Rule=rule, Ids=[app])\n\n    return True", "docstring": "Destroy Cloudwatch event subscription.\n\nArgs:\napp (str): Spinnaker Application name.\nenv (str): Deployment environment.\nregion (str): AWS region.\nReturns:\nbool: True upon successful completion.", "source": "juraj-google-style"}
{"code": "def print_tensors_in_checkpoint_file(file_name, tensor_name, all_tensors, all_tensor_names=False, count_exclude_pattern=''):\n    try:\n        reader = py_checkpoint_reader.NewCheckpointReader(file_name)\n        if all_tensors or all_tensor_names:\n            var_to_shape_map = reader.get_variable_to_shape_map()\n            var_to_dtype_map = reader.get_variable_to_dtype_map()\n            for key, value in sorted(var_to_shape_map.items()):\n                print('tensor: %s (%s) %s' % (key, var_to_dtype_map[key].name, value))\n                if all_tensors:\n                    try:\n                        print(reader.get_tensor(key))\n                    except errors_impl.InternalError:\n                        print('<not convertible to a numpy dtype>')\n        elif not tensor_name:\n            print(reader.debug_string().decode('utf-8', errors='ignore'))\n        else:\n            if not reader.has_tensor(tensor_name):\n                print('Tensor %s not found in checkpoint' % tensor_name)\n                return\n            var_to_shape_map = reader.get_variable_to_shape_map()\n            var_to_dtype_map = reader.get_variable_to_dtype_map()\n            print('tensor: %s (%s) %s' % (tensor_name, var_to_dtype_map[tensor_name].name, var_to_shape_map[tensor_name]))\n            print(reader.get_tensor(tensor_name))\n        print('\n    except Exception as e:\n        print(str(e))\n        if 'corrupted compressed block contents' in str(e):\n            print(\"It's likely that your checkpoint file has been compressed with SNAPPY.\")\n        if 'Data loss' in str(e) and any((e in file_name for e in ['.index', '.meta', '.data'])):\n            proposed_file = '.'.join(file_name.split('.')[0:-1])\n            v2_file_error_template = \"\\nIt's likely that this is a V2 checkpoint and you need to provide the filename\\n*prefix*.  Try removing the '.' and extension.  Try:\\ninspect checkpoint --file_name = {}\"\n            print(v2_file_error_template.format(proposed_file))", "docstring": "Prints tensors in a checkpoint file.\n\nIf no `tensor_name` is provided, prints the tensor names and shapes\nin the checkpoint file.\n\nIf `tensor_name` is provided, prints the content of the tensor.\n\nArgs:\nfile_name: Name of the checkpoint file.\ntensor_name: Name of the tensor in the checkpoint file to print.\nall_tensors: Boolean indicating whether to print all tensors.\nall_tensor_names: Boolean indicating whether to print all tensor names.\ncount_exclude_pattern: Regex string, pattern to exclude tensors from count.", "source": "github-repos"}
{"code": "def create_combination(list_of_sentences):\n  \n  num_sentences = len(list_of_sentences) - 1\n  combinations = []\n  for i, _ in enumerate(list_of_sentences):\n    if i == num_sentences:\n      break\n    num_pairs = num_sentences - i\n    populated = num_pairs * [list_of_sentences[i]]\n    zipped = list(zip(populated, list_of_sentences[i + 1:]))\n    combinations += zipped\n  return combinations", "docstring": "Generates all possible pair combinations for the input list of sentences.\n\nFor example:\n\ninput = [\"paraphrase1\", \"paraphrase2\", \"paraphrase3\"]\n\noutput = [(\"paraphrase1\", \"paraphrase2\"),\n(\"paraphrase1\", \"paraphrase3\"),\n(\"paraphrase2\", \"paraphrase3\")]\n\nArgs:\nlist_of_sentences: the list of input sentences.\nReturns:\nthe list of all possible sentence pairs.", "source": "juraj-google-style"}
{"code": "def reset_time_estimate(self, **kwargs):\n    path = ('%s/%s/reset_time_estimate' % (self.manager.path, self.get_id()))\n    return self.manager.gitlab.http_post(path, **kwargs)", "docstring": "Resets estimated time for the object to 0 seconds.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabTimeTrackingError: If the time tracking update cannot be done", "source": "codesearchnet"}
{"code": "def get_key_by_job_id(cls, mapreduce_id):\n    \n    return db.Key.from_path(cls.kind(), \"%s:%s\" % (mapreduce_id, cls._KEY_NAME))", "docstring": "Retrieves the Key for a mapreduce ID.\n\nArgs:\nmapreduce_id: The job to fetch.\n\nReturns:\nDatastore Key for the command for the given job ID.", "source": "juraj-google-style"}
{"code": "def get_attribute_from_config(config, section, attribute):\n    \n    section = config.get(section)\n    if section:\n        option = section.get(attribute)\n        if option:\n            return option\n    raise ConfigurationError(\"Config file badly formed!\\n\"\n                             \"Failed to get attribute '{}' from section '{}'!\"\n                             .format(attribute, section))", "docstring": "Try to parse an attribute of the config file.\n\nArgs:\nconfig (defaultdict): A defaultdict.\nsection (str): The section of the config file to get information from.\nattribute (str): The attribute of the section to fetch.\nReturns:\nstr: The string corresponding to the section and attribute.\nRaises:\nConfigurationError", "source": "juraj-google-style"}
{"code": "def hertz_to_mel(freq: Union[float, np.ndarray], mel_scale: str='htk') -> Union[float, np.ndarray]:\n    if mel_scale not in ['slaney', 'htk', 'kaldi']:\n        raise ValueError('mel_scale should be one of \"htk\", \"slaney\" or \"kaldi\".')\n    if mel_scale == 'htk':\n        return 2595.0 * np.log10(1.0 + freq / 700.0)\n    elif mel_scale == 'kaldi':\n        return 1127.0 * np.log(1.0 + freq / 700.0)\n    min_log_hertz = 1000.0\n    min_log_mel = 15.0\n    logstep = 27.0 / np.log(6.4)\n    mels = 3.0 * freq / 200.0\n    if isinstance(freq, np.ndarray):\n        log_region = freq >= min_log_hertz\n        mels[log_region] = min_log_mel + np.log(freq[log_region] / min_log_hertz) * logstep\n    elif freq >= min_log_hertz:\n        mels = min_log_mel + np.log(freq / min_log_hertz) * logstep\n    return mels", "docstring": "Convert frequency from hertz to mels.\n\nArgs:\nfreq (`float` or `np.ndarray`):\nThe frequency, or multiple frequencies, in hertz (Hz).\nmel_scale (`str`, *optional*, defaults to `\"htk\"`):\nThe mel frequency scale to use, `\"htk\"`, `\"kaldi\"` or `\"slaney\"`.\n\nReturns:\n`float` or `np.ndarray`: The frequencies on the mel scale.", "source": "github-repos"}
{"code": "def union(df, other, index=False, keep='first'):\n    validate_set_ops(df, other)\n    stacked = df.append(other)\n    if index:\n        stacked_reset_indexes = stacked.reset_index()\n        index_cols = [col for col in stacked_reset_indexes.columns if (col not in df.columns)]\n        index_name = df.index.names\n        return_df = stacked_reset_indexes.drop_duplicates(keep=keep).set_index(index_cols)\n        return_df.index.names = index_name\n        return return_df\n    else:\n        return stacked.drop_duplicates(keep=keep)", "docstring": "Returns rows that appear in either DataFrame.\n\nArgs:\ndf (pandas.DataFrame): data passed in through the pipe.\nother (pandas.DataFrame): other DataFrame to use for set operation with\nthe first.\n\nKwargs:\nindex (bool): Boolean indicating whether to consider the pandas index\nas part of the set operation (default `False`).\nkeep (str): Indicates which duplicate should be kept. Options are `'first'`\nand `'last'`.", "source": "codesearchnet"}
{"code": "def calculate_query_times(**kwargs):\n    return {'total_time_avg': round(numpy.mean(kwargs['total_times']), 1), 'total_time_min': round(numpy.min(kwargs['total_times']), 1), 'total_time_max': round(numpy.max(kwargs['total_times']), 1), 'total_time_85': round(numpy.percentile(kwargs['total_times'], 85), 1), 'execution_time_avg': round(numpy.mean(kwargs['execution_times']), 1), 'execution_time_min': round(numpy.min(kwargs['execution_times']), 1), 'execution_time_max': round(numpy.max(kwargs['execution_times']), 1), 'execution_time_85': round(numpy.percentile(kwargs['execution_times'], 85), 1), 'execution_time_25': round(numpy.percentile(kwargs['execution_times'], 25), 1), 'execution_time_std': round(numpy.std(kwargs['execution_times']), 1), 'connect_time_avg': round(numpy.mean(kwargs['connect_times']), 1), 'connect_time_min': round(numpy.min(kwargs['connect_times']), 1), 'connect_time_max': round(numpy.max(kwargs['connect_times']), 1), 'connect_time_85': round(numpy.percentile(kwargs['connect_times'], 85), 1), 'results_iter_time_avg': round(numpy.mean(kwargs['results_iter_times']), 1), 'results_iter_time_min': round(numpy.min(kwargs['results_iter_times']), 1), 'results_iter_time_max': round(numpy.max(kwargs['results_iter_times']), 1), 'results_iter_time_85': round(numpy.percentile(kwargs['results_iter_times'], 85), 1)}", "docstring": "Calculates aggregate query times from all iteration times\n\nKwargs:\ntotal_times(list): List of total time calculations\nexecution_times(list): List of execution_time calculations\nresults_iter_times(list): List of results_iter_time calculations\nconnect_times(list): List of connect_time calculations\n\nReturns:\nquery_execution(dict): Query times\nFalse(bool): The query failed. Exception should be logged.", "source": "codesearchnet"}
{"code": "def send_batches(self, batch_list):\n        \n        if isinstance(batch_list, BaseMessage):\n            batch_list = batch_list.SerializeToString()\n\n        return self._post('/batches', batch_list)", "docstring": "Sends a list of batches to the validator.\n\nArgs:\nbatch_list (:obj:`BatchList`): the list of batches\n\nReturns:\ndict: the json result data, as a dict", "source": "juraj-google-style"}
{"code": "def _get_user_id(arguments_dict):\n    \n    if 'user_id' not in arguments_dict:\n        raise TypeError('Each invocation of a UserTaskMixin subclass must include the user_id')\n    user_id = arguments_dict['user_id']\n    try:\n        get_user_model().objects.get(pk=user_id)\n    except (ValueError, get_user_model().DoesNotExist):\n        raise TypeError('Invalid user_id: {}'.format(user_id))\n    return user_id", "docstring": "Get and validate the `user_id` argument to a task derived from `UserTaskMixin`.\n\nArguments:\narguments_dict (dict): The parsed positional and keyword arguments to the task\n\nReturns\n-------\nint: The primary key of a user record (may not be an int if using a custom user model)", "source": "juraj-google-style"}
{"code": "def import_certificate(self, certificate_data, bay_number=None):\n    uri = '{}/https/certificaterequest'.format(self.data['uri'])\n    if bay_number:\n        uri += ('?bayNumber=%d' % bay_number)\n    headers = {'Content-Type': 'application/json'}\n    return self._helper.do_put(uri, certificate_data, (- 1), headers)", "docstring": "Imports a signed server certificate into the enclosure.\n\nArgs:\ncertificate_data: Dictionary with Signed certificate and type.\nbay_number: OA to which the signed certificate will be imported.\n\nReturns:\nEnclosure.", "source": "codesearchnet"}
{"code": "def add_handler(self, handler):\n    handler['logger'] = self._get_logger(handler)\n    handler['reads'] = 0\n    handler['data_read'] = 0\n    self.capture_handlers.append(handler)", "docstring": "Add an additional handler\n\nArgs:\nhandler:\nA dictionary of handler configuration for the handler\nthat should be added. See :func:`__init__` for details\non valid parameters.", "source": "codesearchnet"}
{"code": "def picture_view(request, user_id, year=None):\n    \n    try:\n        user = User.objects.get(id=user_id)\n    except User.DoesNotExist:\n        raise Http404\n    default_image_path = os.path.join(settings.PROJECT_ROOT, \"static/img/default_profile_pic.png\")\n\n    if user is None:\n        raise Http404\n    else:\n        if year is None:\n            preferred = user.preferred_photo\n\n            if preferred is None:\n                data = user.default_photo\n                if data is None:\n                    image_buffer = io.open(default_image_path, mode=\"rb\")\n                else:\n                    image_buffer = io.BytesIO(data)\n\n            \n            else:\n                data = preferred.binary\n                if data:\n                    image_buffer = io.BytesIO(data)\n                else:\n                    image_buffer = io.open(default_image_path, mode=\"rb\")\n        else:\n            grade_number = Grade.number_from_name(year)\n            if user.photos.filter(grade_number=grade_number).exists():\n                data = user.photos.filter(grade_number=grade_number).first().binary\n            else:\n                data = None\n            if data:\n                image_buffer = io.BytesIO(data)\n            else:\n                image_buffer = io.open(default_image_path, mode=\"rb\")\n\n        response = HttpResponse(content_type=\"image/jpeg\")\n        response[\"Content-Disposition\"] = \"filename={}_{}.jpg\".format(user_id, year or preferred)\n        try:\n            img = image_buffer.read()\n        except UnicodeDecodeError:\n            img = io.open(default_image_path, mode=\"rb\").read()\n\n        image_buffer.close()\n        response.write(img)\n\n        return response", "docstring": "Displays a view of a user's picture.\n\nArgs:\nuser_id\nThe ID of the user whose picture is being fetched.\nyear\nThe user's picture from this year is fetched. If not\nspecified, use the preferred picture.", "source": "juraj-google-style"}
{"code": "def listFormats(self, vendorSpecific=None):\n        \n        response = self.listFormatsResponse(vendorSpecific)\n        return self._read_dataone_type_response(response, 'ObjectFormatList')", "docstring": "See Also: listFormatsResponse()\n\nArgs:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def is_supported(cls, desc):\n    for l in cls:\n        if l.matches(desc):\n            return True\n    return False", "docstring": "Determines if the given label descriptor is supported.\n\nArgs:\ndesc (:class:`endpoints_management.gen.servicemanagement_v1_messages.LabelDescriptor`):\nthe label descriptor to test\n\nReturn:\n`True` if desc is supported, otherwise `False`", "source": "codesearchnet"}
{"code": "def _get_longest_diag_index(input_matrix):\n    diags = AssistedCandidateGeneratorDifferentTokenizers._get_longest_diag_dict(input_matrix, input_matrix.nonzero())\n    diags_values = list(diags.values())\n    diags_keys = list(diags.keys())\n    best_diag = np.argmax(diags_values)\n    diag_start_index = diags_keys[best_diag]\n    diag_start_length = diags_values[best_diag]\n    return (diag_start_index, diag_start_length)", "docstring": "Returns the start index and length of the longest diagonal in the given input.\nArgs:\ninput_matrix (numpy.ndarray): The input matrix.\nReturns:\ntuple: A tuple containing the start index and length of the longest diagonal.", "source": "github-repos"}
{"code": "def is_attribute_multivalued(self, attribute):\n    rule_set = self._attribute_rule_sets.get(attribute)\n    return rule_set.multiple_instances_permitted", "docstring": "Check if the attribute is allowed to have multiple instances.\n\nArgs:\nattribute (string): The name of the attribute\n(e.g., 'State'). Required.", "source": "codesearchnet"}
{"code": "def load_configuration(config_file, config_dir, service_file):\n    config_files = [config_file]\n    config = configparser.ConfigParser()\n    config.read_dict(DEFAULT_OPTIONS)\n    if (not os.path.isfile(config_file)):\n        raise ValueError(\"{f} configuration file either isn't readable or doesn't exist\".format(f=config_file))\n    if (service_file is not None):\n        if (not os.path.isfile(service_file)):\n            raise ValueError(\"{f} configuration file for a service check doesn't exist\".format(f=service_file))\n        else:\n            config_files.append(service_file)\n    elif (config_dir is not None):\n        if (not os.path.isdir(config_dir)):\n            raise ValueError(\"{d} directory with configuration files for service checks doesn't exist\".format(d=config_dir))\n        else:\n            config_files.extend(glob.glob(os.path.join(config_dir, '*.conf')))\n    try:\n        config.read(config_files)\n    except configparser.Error as exc:\n        raise ValueError(exc)\n    configuration_check(config)\n    bird_configuration = build_bird_configuration(config)\n    create_bird_config_files(bird_configuration)\n    return (config, bird_configuration)", "docstring": "Build configuration objects.\n\nIf all sanity checks against daemon and service check settings are passed\nthen it builds a ConfigParser object which holds all our configuration\nand a dictionary data structure which holds Bird configuration per IP\nprotocol version.\n\nArguments:\nconfig_file (str): The file name which holds daemon settings\nconfig_dir (str): The directory name which has configuration files\nfor each service check\nservice_file (str): A file which contains configuration for a single\nservice check\n\nReturns:\nA tuple with 1st element a ConfigParser object and 2nd element\na dictionary.\nRaises:\nValueError if a sanity check fails.", "source": "codesearchnet"}
{"code": "def select_by_key(self, key):\n        \n        self._selected_key = None\n        self._selected_item = None\n        for item in self.children.values():\n            item.attributes['selected'] = False\n\n        if key in self.children:\n            self.children[key].attributes['selected'] = True\n            self._selected_key = key\n            self._selected_item = self.children[key]", "docstring": "Selects an item by its key.\n\nArgs:\nkey (str): The unique string identifier of the item that have to be selected.", "source": "juraj-google-style"}
{"code": "def _process_update(self, item, feed_item):\n    pass", "docstring": "Handles updates to the creative asset object.\n\nSince creative assets are read only in DCM, there is nothing to do here,\nthis method is mandatory as it is invoked by the BaseDAO class.\n\nArgs:\nitem: The creative asset DCM object being updated.\nfeed_item: The feed item representing the creative asset from the\nBulkdozer feed.", "source": "github-repos"}
{"code": "def roots_in_unit_interval(coeffs):\n    all_roots = polynomial.polyroots(coeffs)\n    all_roots = all_roots[((_UNIT_INTERVAL_WIGGLE_START < all_roots.real) & (all_roots.real < _UNIT_INTERVAL_WIGGLE_END))]\n    real_inds = (np.abs(all_roots.imag) < _IMAGINARY_WIGGLE)\n    return all_roots[real_inds].real", "docstring": "r\"\"\"Compute roots of a polynomial in the unit interval.\n\nArgs:\ncoeffs (numpy.ndarray): A 1D array (size ``d + 1``) of coefficients in\nmonomial / power basis.\n\nReturns:\nnumpy.ndarray: ``N``-array of real values in :math:`\\left[0, 1\\right]`.", "source": "codesearchnet"}
{"code": "def list_resource_groups(access_token, subscription_id):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', '?api-version=', RESOURCE_API])\n    return do_get(endpoint, access_token)", "docstring": "List the resource groups in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response.", "source": "codesearchnet"}
{"code": "def find_gaps(self, index=False):\n    return self.__find_incongruities(op=operator.lt, index=index)", "docstring": "Finds gaps in a striplog.\n\nArgs:\nindex (bool): If True, returns indices of intervals with\ngaps after them.\n\nReturns:\nStriplog: A striplog of all the gaps. A sort of anti-striplog.", "source": "codesearchnet"}
{"code": "def inspect_plugin(self, name):\n        \n        url = self._url('/plugins/{0}/json', name)\n        return self._result(self._get(url), True)", "docstring": "Retrieve plugin metadata.\n\nArgs:\nname (string): The name of the plugin. The ``:latest`` tag is\noptional, and is the default if omitted.\n\nReturns:\nA dict containing plugin info", "source": "juraj-google-style"}
{"code": "def show(config, section, opt):\n    if (section not in config.keys()):\n        raise ConfigError(\"section '{}' doesn't exist\".format(section))\n    if (opt not in config[section].keys()):\n        raise ConfigError(\"option '{}.{}' doesn't exist\".format(section, opt))\n    logger.info(config[section][opt])", "docstring": "Prints option value from the config.\n\nArgs:\nconfig (configobj.ConfigObj): config to work on.\nsection (str): section name.\nopt (str): option name.", "source": "codesearchnet"}
{"code": "def _run(self, cmd):\n    if isinstance(cmd, six.string_types):\n        cmd = salt.utils.args.shlex_split(cmd)\n    try:\n        log.debug(cmd)\n        p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n        return p.communicate()\n    except (OSError, IOError) as exc:\n        log.debug('Command Failed: %s', ' '.join(cmd))\n        log.debug('Error: %s', exc)\n        raise CommandExecutionError(exc)", "docstring": "Internal function for running commands. Used by the uninstall function.\n\nArgs:\ncmd (str, list): The command to run\n\nReturns:\nstr: The stdout of the command", "source": "codesearchnet"}
{"code": "def parse_query_param(url, param):\n    try:\n        return parse.parse_qs(parse.urlparse(url).query)[param][0]\n    except:\n        return None", "docstring": "Parses the query string of a URL and returns the value of a parameter.\n\nArgs:\nurl: A URL.\nparam: A string representing the name of the parameter.\n\nReturns:\nThe value of the parameter.", "source": "codesearchnet"}
{"code": "def get_lang_tags(index_page):\n    dom = dhtmlparser.parseString(index_page)\n    lang_tags = [get_html_lang_tags(dom), get_dc_lang_tags(dom), [detect_language(dom)], get_html_tag_lang_params(dom)]\n    return list(sorted(set((SourceString(normalize(lang), source=lang.source) for lang in sum(lang_tags, [])))))", "docstring": "Collect informations about language of the page from HTML and Dublin core\ntags and langdetect guesses.\n\nArgs:\nindex_page (str): HTML content of the page you wish to analyze.\n\nReturns:\nlist: List of :class:`.SourceString` objects.", "source": "codesearchnet"}
{"code": "def resolve(self, name, version, max_id):\n    if (not isinstance(name, six.text_type)):\n        raise TypeError(('Name must be a Unicode sequence: %r' % name))\n    if (not isinstance(version, int)):\n        raise TypeError(('Version must be an int: %r' % version))\n    if (version <= 0):\n        raise ValueError(('Version must be positive: %s' % version))\n    if ((max_id is not None) and (max_id < 0)):\n        raise ValueError(('Max ID must be zero or positive: %s' % max_id))\n    versions = self.__tables.get(name)\n    if (versions is None):\n        if (max_id is None):\n            raise CannotSubstituteTable(('Found no table for %s, but no max_id' % name))\n        return placeholder_symbol_table(name, version, max_id)\n    table = versions.get(version)\n    if (table is None):\n        keys = list(versions)\n        keys.sort()\n        table = versions[keys[(- 1)]]\n    if ((table.version == version) and ((max_id is None) or (table.max_id == max_id))):\n        return table\n    if (max_id is None):\n        raise CannotSubstituteTable(('Found match for %s, but not version %d, and no max_id' % (name, version)))\n    return substitute_symbol_table(table, version, max_id)", "docstring": "Resolves the table for a given name and version.\n\nArgs:\nname (unicode): The name of the table to resolve.\nversion (int): The version of the table to resolve.\nmax_id (Optional[int]): The maximum ID of the table requested.\nMay be ``None`` in which case an exact match on ``name`` and ``version``\nis required.\n\nReturns:\nSymbolTable: The *closest* matching symbol table.  This is either an exact match,\na placeholder, or a derived substitute depending on what tables are registered.", "source": "codesearchnet"}
{"code": "def set_rgb_dim_level_with_time(\n        self,\n        channelIndex: int,\n        rgb: RGBColorState,\n        dimLevel: float,\n        onTime: float,\n        rampTime: float,\n    ):\n        \n        data = {\n            \"channelIndex\": channelIndex,\n            \"deviceId\": self.id,\n            \"simpleRGBColorState\": rgb,\n            \"dimLevel\": dimLevel,\n            \"onTime\": onTime,\n            \"rampTime\": rampTime,\n        }\n        return self._restCall(\n            \"device/control/setSimpleRGBColorDimLevelWithTime\", body=json.dumps(data)\n        )", "docstring": "sets the color and dimlevel of the lamp\n\nArgs:\nchannelIndex(int): the channelIndex of the lamp. Use self.topLightChannelIndex or self.bottomLightChannelIndex\nrgb(RGBColorState): the color of the lamp\ndimLevel(float): the dimLevel of the lamp. 0.0 = off, 1.0 = MAX\nonTime(float):\nrampTime(float):\nReturns:\nthe result of the _restCall", "source": "juraj-google-style"}
{"code": "def incoming(self, messages):\n        \n        if self._observers:\n            campfire = self._room.get_campfire()\n            for message in messages:\n                for observer in self._observers:\n                    observer(Message(campfire, message))", "docstring": "Called when incoming messages arrive.\n\nArgs:\nmessages (tuple): Messages (each message is a dict)", "source": "juraj-google-style"}
{"code": "def _separable_conv_block(ip, filters, kernel_size=(3, 3), strides=(1, 1), block_id=None):\n    channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1\n    with backend.name_scope(f'separable_conv_block_{block_id}'):\n        x = layers.Activation('relu')(ip)\n        if strides == (2, 2):\n            x = layers.ZeroPadding2D(padding=imagenet_utils.correct_pad(x, kernel_size), name=f'separable_conv_1_pad_{block_id}')(x)\n            conv_pad = 'valid'\n        else:\n            conv_pad = 'same'\n        x = layers.SeparableConv2D(filters, kernel_size, strides=strides, name=f'separable_conv_1_{block_id}', padding=conv_pad, use_bias=False)(x)\n        x = layers.BatchNormalization(axis=channel_dim, momentum=0.9997, epsilon=0.001, name=f'separable_conv_1_bn_{block_id}')(x)\n        x = layers.Activation('relu')(x)\n        x = layers.SeparableConv2D(filters, kernel_size, name=f'separable_conv_2_{block_id}', padding='same', use_bias=False)(x)\n        x = layers.BatchNormalization(axis=channel_dim, momentum=0.9997, epsilon=0.001, name=f'separable_conv_2_bn_{block_id}')(x)\n    return x", "docstring": "Adds 2 blocks of [relu-separable conv-batchnorm].\n\nArgs:\nip: Input tensor\nfilters: Number of output filters per layer\nkernel_size: Kernel size of separable convolutions\nstrides: Strided convolution for downsampling\nblock_id: String block_id\n\nReturns:\nA Keras tensor", "source": "github-repos"}
{"code": "def _transform_binary_composition_to_expression(expression, node, context):\n    \n    if expression.operator not in constants.SUPPORTED_OPERATORS:\n        raise NotImplementedError(\n            u'Filter operation \"{}\" is not supported by the SQL backend.'.format(\n                expression.operator))\n    sql_operator = constants.SUPPORTED_OPERATORS[expression.operator]\n    left = _expression_to_sql(expression.left, node, context)\n    right = _expression_to_sql(expression.right, node, context)\n    if sql_operator.cardinality == constants.CARDINALITY_UNARY:\n        left, right = _get_column_and_bindparam(left, right, sql_operator)\n        clause = getattr(left, sql_operator.name)(right)\n        return clause\n    elif sql_operator.cardinality == constants.CARDINALITY_BINARY:\n        clause = getattr(sql_expressions, sql_operator.name)(left, right)\n        return clause\n    elif sql_operator.cardinality == constants.CARDINALITY_LIST_VALUED:\n        left, right = _get_column_and_bindparam(left, right, sql_operator)\n        \n        right.expanding = True\n        clause = getattr(left, sql_operator.name)(right)\n        return clause\n    raise AssertionError(u'Unreachable, operator cardinality {} for compiler expression {} is '\n                         u'unknown'.format(sql_operator.cardinality, expression))", "docstring": "Transform a BinaryComposition compiler expression into a SQLAlchemy expression.\n\nRecursively calls _expression_to_sql to convert its left and right sub-expressions.\n\nArgs:\nexpression: expression, BinaryComposition compiler expression.\nnode: SqlNode, the SqlNode the expression applies to.\ncontext: CompilationContext, global compilation state and metadata.\n\nReturns:\nExpression, SQLAlchemy expression.", "source": "juraj-google-style"}
{"code": "def cast(self, value):\n        \n        \n        \n        if self.type is None:\n            return value\n\n        \n        if self.type in (str, int, float):\n            try:\n                return self.type(value)\n            except Exception as e:\n                raise errors.BisonError(\n                    'Failed to cast {} to {}'.format(value, self.type)\n                ) from e\n\n        \n        \n        elif self.type == bool:\n            return value.lower() == 'true'\n\n        \n        else:\n            raise errors.BisonError('Unsupported type for casting: {}'.format(self.type))", "docstring": "Cast a value to the type required by the option, if one is set.\n\nThis is used to cast the string values gathered from environment\nvariable into their required type.\n\nArgs:\nvalue: The value to cast.\n\nReturns:\nThe value casted to the expected type for the option.", "source": "juraj-google-style"}
{"code": "def _convert_appengine_app_assertion_credentials(credentials):\n    \n    \n    return google.auth.app_engine.Credentials(\n        scopes=_helpers.string_to_scopes(credentials.scope),\n        service_account_id=credentials.service_account_id)", "docstring": "Converts to :class:`google.auth.app_engine.Credentials`.\n\nArgs:\ncredentials (oauth2client.contrib.app_engine.AppAssertionCredentials):\nThe credentials to convert.\n\nReturns:\ngoogle.oauth2.service_account.Credentials: The converted credentials.", "source": "juraj-google-style"}
{"code": "def exp(array, ty):\n    \n    weld_obj = WeldObject(encoder_, decoder_)\n\n    array_var = weld_obj.update(array)\n    if isinstance(array, WeldObject):\n        array_var = array.obj_id\n        weld_obj.dependencies[array_var] = array\n\n    weld_template = \n    weld_obj.weld_code = weld_template % {\"array\": array_var, \"ty\": ty}\n    return weld_obj", "docstring": "Computes the per-element exponenet of the passed-in array.\n\nArgs:\narray (WeldObject / Numpy.ndarray): Input array\nty (WeldType): Type of each element in the input array\n\nReturns:\nA WeldObject representing this computation", "source": "juraj-google-style"}
{"code": "def update_config(self, new_config):\n    self._assert_not_running()\n    self._ad.log.info('[LogcatService] Changing config from %s to %s', self._config, new_config)\n    self._config = new_config", "docstring": "Updates the configuration for the service.\n\nThe service needs to be stopped before updating, and explicitly started\nafter the update.\n\nThis will reset the service. Previous output files may be orphaned if\noutput path is changed.\n\nArgs:\nnew_config: Config, the new config to use.", "source": "github-repos"}
{"code": "def get_language(self, text):\n        \n        files = {'text': text}\n        res, status_code = self.post(self.language_service, files=files)\n\n        if status_code != 200:\n            logger.debug('Language recognition failed.')\n\n        return self.decode(res), status_code", "docstring": "Recognise the language of the text in input\n\nArgs:\nid (str): The text whose the language needs to be recognised\n\nReturns:\ndict, int: A dict containing the recognised language and the\nconfidence score.", "source": "juraj-google-style"}
{"code": "def get_arrhenius_plot(temps, diffusivities, diffusivity_errors=None, **kwargs):\n    (Ea, c, _) = fit_arrhenius(temps, diffusivities)\n    from pymatgen.util.plotting import pretty_plot\n    plt = pretty_plot(12, 8)\n    arr = (c * np.exp(((- Ea) / ((const.k / const.e) * np.array(temps)))))\n    t_1 = (1000 / np.array(temps))\n    plt.plot(t_1, diffusivities, 'ko', t_1, arr, 'k--', markersize=10, **kwargs)\n    if (diffusivity_errors is not None):\n        n = len(diffusivity_errors)\n        plt.errorbar(t_1[0:n], diffusivities[0:n], yerr=diffusivity_errors, fmt='ko', ecolor='k', capthick=2, linewidth=2)\n    ax = plt.axes()\n    ax.set_yscale('log')\n    plt.text(0.6, 0.85, 'E$_a$ = {:.0f} meV'.format((Ea * 1000)), fontsize=30, transform=plt.axes().transAxes)\n    plt.ylabel('D (cm$^2$/s)')\n    plt.xlabel('1000/T (K$^{-1}$)')\n    plt.tight_layout()\n    return plt", "docstring": "Returns an Arrhenius plot.\n\nArgs:\ntemps ([float]): A sequence of temperatures.\ndiffusivities ([float]): A sequence of diffusivities (e.g.,\nfrom DiffusionAnalyzer.diffusivity).\ndiffusivity_errors ([float]): A sequence of errors for the\ndiffusivities. If None, no error bar is plotted.\n\\\\*\\\\*kwargs:\nAny keyword args supported by matplotlib.pyplot.plot.\n\nReturns:\nA matplotlib.pyplot object. Do plt.show() to show the plot.", "source": "codesearchnet"}
{"code": "def build_input(data, batch_size, dataset, train):\n    image_size = 32\n    depth = 3\n    num_classes = (10 if (dataset == 'cifar10') else 100)\n    (images, labels) = data\n    num_samples = (images.shape[0] - (images.shape[0] % batch_size))\n    dataset = tf.contrib.data.Dataset.from_tensor_slices((images[:num_samples], labels[:num_samples]))\n\n    def map_train(image, label):\n        image = tf.image.resize_image_with_crop_or_pad(image, (image_size + 4), (image_size + 4))\n        image = tf.random_crop(image, [image_size, image_size, 3])\n        image = tf.image.random_flip_left_right(image)\n        image = tf.image.per_image_standardization(image)\n        return (image, label)\n\n    def map_test(image, label):\n        image = tf.image.resize_image_with_crop_or_pad(image, image_size, image_size)\n        image = tf.image.per_image_standardization(image)\n        return (image, label)\n    dataset = dataset.map((map_train if train else map_test))\n    dataset = dataset.batch(batch_size)\n    dataset = dataset.repeat()\n    if train:\n        dataset = dataset.shuffle(buffer_size=(16 * batch_size))\n    (images, labels) = dataset.make_one_shot_iterator().get_next()\n    images = tf.reshape(images, [batch_size, image_size, image_size, depth])\n    labels = tf.reshape(labels, [batch_size, 1])\n    indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1])\n    labels = tf.sparse_to_dense(tf.concat([indices, labels], 1), [batch_size, num_classes], 1.0, 0.0)\n    assert (len(images.get_shape()) == 4)\n    assert (images.get_shape()[0] == batch_size)\n    assert (images.get_shape()[(- 1)] == 3)\n    assert (len(labels.get_shape()) == 2)\n    assert (labels.get_shape()[0] == batch_size)\n    assert (labels.get_shape()[1] == num_classes)\n    if (not train):\n        tf.summary.image('images', images)\n    return (images, labels)", "docstring": "Build CIFAR image and labels.\n\nArgs:\ndata_path: Filename for cifar10 data.\nbatch_size: Input batch size.\ntrain: True if we are training and false if we are testing.\n\nReturns:\nimages: Batches of images of size\n[batch_size, image_size, image_size, 3].\nlabels: Batches of labels of size [batch_size, num_classes].\n\nRaises:\nValueError: When the specified dataset is not supported.", "source": "codesearchnet"}
{"code": "def get_reachable_ports(self, id_or_uri, start=0, count=(- 1), filter='', query='', sort='', networks=[]):\n    uri = (self._client.build_uri(id_or_uri) + '/reachable-ports')\n    if networks:\n        elements = \"'\"\n        for n in networks:\n            elements += (n + ',')\n        elements = (elements[:(- 1)] + \"'\")\n        uri = ((uri + '?networks=') + elements)\n    return self._client.get(self._client.build_query_uri(start=start, count=count, filter=filter, query=query, sort=sort, uri=uri))", "docstring": "Gets the storage ports that are connected on the specified networks\nbased on the storage system port's expected network connectivity.\n\nReturns:\nlist: Reachable Storage Port List.", "source": "codesearchnet"}
{"code": "def params(self, params):\n        \n        url = furl(self._request.rawurl)\n        url = url.add(params)\n        self._request.url = url.url\n        self.add_matcher(matcher('QueryMatcher', params))", "docstring": "Defines a set of URL query params to match.\n\nArguments:\nparams (dict): set of params to match.\n\nReturns:\nself: current Mock instance.", "source": "juraj-google-style"}
{"code": "def save_metrics(self, split, metrics, combined=True):\n    if not self.is_world_process_zero():\n        return\n    path = os.path.join(self.args.output_dir, f'{split}_results.json')\n    with open(path, 'w') as f:\n        json.dump(metrics, f, indent=4, sort_keys=True)\n    if combined:\n        path = os.path.join(self.args.output_dir, 'all_results.json')\n        if os.path.exists(path):\n            with open(path) as f:\n                all_metrics = json.load(f)\n        else:\n            all_metrics = {}\n        all_metrics.update(metrics)\n        with open(path, 'w') as f:\n            json.dump(all_metrics, f, indent=4, sort_keys=True)", "docstring": "Save metrics into a json file for that split, e.g. `train_results.json`.\n\nUnder distributed environment this is done only for a process with rank 0.\n\nArgs:\nsplit (`str`):\nMode/split name: one of `train`, `eval`, `test`, `all`\nmetrics (`Dict[str, float]`):\nThe metrics returned from train/evaluate/predict\ncombined (`bool`, *optional*, defaults to `True`):\nCreates combined metrics by updating `all_results.json` with metrics of this call\n\nTo understand the metrics please read the docstring of [`~Trainer.log_metrics`]. The only difference is that raw\nunformatted numbers are saved in the current method.", "source": "github-repos"}
{"code": "def __init__(self, xid=None, role=None, generation_id=None):\n        \n        super().__init__(xid, role, generation_id)\n        self.header.message_type = Type.OFPT_ROLE_REQUEST", "docstring": "Create a RoleRequest with the optional parameters below.\n\nArgs:\nxid (int): OpenFlow xid to the header.\nrole (:class:`~.controller2switch.common.ControllerRole`):\nIs the new role that the controller wants to assume.\ngeneration_id (int): Master Election Generation Id.", "source": "juraj-google-style"}
{"code": "def get_parent_of_type(typ, obj):\n    if (type(typ) is not text):\n        typ = typ.__name__\n    while hasattr(obj, 'parent'):\n        obj = obj.parent\n        if (obj.__class__.__name__ == typ):\n            return obj", "docstring": "Finds first object up the parent chain of the given type.\nIf no parent of the given type exists None is returned.\n\nArgs:\ntyp(str or python class): The type of the model object we are\nlooking for.\nobj (model object): Python model object which is the start of the\nsearch process.", "source": "codesearchnet"}
{"code": "def has_unchecked_field(self, locator, **kwargs):\n    kwargs['checked'] = False\n    return self.has_selector('field', locator, **kwargs)", "docstring": "Checks if the page or current node has a radio button or checkbox with the given label,\nvalue, or id, that is currently unchecked.\n\nArgs:\nlocator (str): The label, name, or id of an unchecked field.\n**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.\n\nReturns:\nbool: Whether it exists.", "source": "codesearchnet"}
{"code": "def _ParseLogLine(self, parser_mediator, structure):\n    try:\n        date_time = dfdatetime_time_elements.TimeElements(time_elements_tuple=structure.date_time)\n        date_time.is_local_time = True\n    except ValueError:\n        parser_mediator.ProduceExtractionWarning('invalid date time value: {0!s}'.format(structure.date_time))\n        return\n    event_data = WinFirewallEventData()\n    event_data.action = self._GetStructureValue(structure, 'action')\n    event_data.dest_ip = self._GetStructureValue(structure, 'dest_ip')\n    event_data.dest_port = self._GetStructureValue(structure, 'dest_port')\n    event_data.flags = self._GetStructureValue(structure, 'flags')\n    event_data.icmp_code = self._GetStructureValue(structure, 'icmp_code')\n    event_data.icmp_type = self._GetStructureValue(structure, 'icmp_type')\n    event_data.info = self._GetStructureValue(structure, 'info')\n    event_data.path = self._GetStructureValue(structure, 'path')\n    event_data.protocol = self._GetStructureValue(structure, 'protocol')\n    event_data.size = self._GetStructureValue(structure, 'size')\n    event_data.source_ip = self._GetStructureValue(structure, 'source_ip')\n    event_data.source_port = self._GetStructureValue(structure, 'source_port')\n    event_data.tcp_ack = self._GetStructureValue(structure, 'tcp_ack')\n    event_data.tcp_seq = self._GetStructureValue(structure, 'tcp_seq')\n    event_data.tcp_win = self._GetStructureValue(structure, 'tcp_win')\n    if self._use_local_timezone:\n        time_zone = parser_mediator.timezone\n    else:\n        time_zone = pytz.UTC\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_WRITTEN, time_zone=time_zone)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parse a single log line and and produce an event object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.", "source": "codesearchnet"}
{"code": "def get_max_size(pool, num_option, item_length):\n    \n    max_items = POOL_SIZE / item_length\n    \n    \n    existing = POOL_OPTION_MIN_SIZE * num_option + sum([max(0, len(pool.get(i, {})) - 5) for i in xrange(num_option)])\n    return int(max_items - existing)", "docstring": "Calculate the max number of item that an option can stored in the pool at give time.\n\nThis is to limit the pool size to POOL_SIZE\n\nArgs:\noption_index (int): the index of the option to calculate the size for\npool (dict): answer pool\nnum_option (int): total number of options available for the question\nitem_length (int): the length of the item\n\nReturns:\nint: the max number of items that `option_index` can have", "source": "juraj-google-style"}
{"code": "def pyrdf2(value, class_type=None, datatype=None, lang=None, **kwargs):\n    \n    try:\n\n        if isinstance(value, dict):\n            \n            \n            \n            if value.get('type') == \"literal\":\n                if not value.get(\"datatype\"):\n                    return XsdString(value['value'])\n                else:\n                    try:\n                        if value.get(\"lang\"):\n                            \n                            return DT_LOOKUP[value['datatype']](value['value'],\n                                    lang=value.get(\"lang\"))\n                        else:\n                            return DT_LOOKUP[value['datatype']](value['value'])\n                    except:\n                        rtn_val = BaseRdfDataType(value['value'])\n                        rtn_val.datatype = Uri(value['datatype'])\n                        return rtn_val\n            else:\n                return DT_LOOKUP[value['type']](value['value'])\n        elif isinstance(value, BaseRdfDataType):\n            return value\n        else:\n            return DT_LOOKUP[type(value)](value)\n    except:\n        pdb.set_trace()\n        pass", "docstring": "Coverts an input to one of the rdfdatatypes classes\n\nArgs:\nvalue: any rdfdatatype, json dict or vlaue\nclass_type: \"literal\", \"uri\" or \"blanknode\"\ndatatype: \"xsd:string\", \"xsd:int\" , etc", "source": "juraj-google-style"}
{"code": "def configure(cls, api_token, api_url='https:\n    cls._auth = QuboleAuth(api_token)\n    cls.api_token = api_token\n    cls.version = version\n    cls.baseurl = api_url\n    if (poll_interval < Qubole.MIN_POLL_INTERVAL):\n        log.warn(('Poll interval cannot be less than %s seconds. Setting it to %s seconds.\\n' % (Qubole.MIN_POLL_INTERVAL, Qubole.MIN_POLL_INTERVAL)))\n        cls.poll_interval = Qubole.MIN_POLL_INTERVAL\n    else:\n        cls.poll_interval = poll_interval\n    cls.skip_ssl_cert_check = skip_ssl_cert_check\n    cls.cloud_name = cloud_name.lower()\n    cls.cached_agent = None", "docstring": "Set parameters governing interaction with QDS\n\nArgs:\n`api_token`: authorization token for QDS. required\n\n`api_url`: the base URL for QDS API. configurable for testing only\n\n`version`: QDS REST api version. Will be used throughout unless overridden in Qubole.agent(..)\n\n`poll_interval`: interval in secs when polling QDS for events", "source": "codesearchnet"}
{"code": "def FindUnspentCoins(self, from_addr=None, use_standard=False, watch_only_val=0):\n    ret = []\n    for coin in self.GetCoins():\n        if (((coin.State & CoinState.Confirmed) > 0) and ((coin.State & CoinState.Spent) == 0) and ((coin.State & CoinState.Locked) == 0) and ((coin.State & CoinState.Frozen) == 0) and ((coin.State & CoinState.WatchOnly) == watch_only_val)):\n            do_exclude = False\n            if self._vin_exclude:\n                for to_exclude in self._vin_exclude:\n                    if ((coin.Reference.PrevIndex == to_exclude.PrevIndex) and (coin.Reference.PrevHash == to_exclude.PrevHash)):\n                        do_exclude = True\n            if do_exclude:\n                continue\n            if (from_addr is not None):\n                if (coin.Output.ScriptHash == from_addr):\n                    ret.append(coin)\n            elif use_standard:\n                contract = self._contracts[coin.Output.ScriptHash.ToBytes()]\n                if contract.IsStandard:\n                    ret.append(coin)\n            else:\n                ret.append(coin)\n    return ret", "docstring": "Finds unspent coin objects in the wallet.\n\nArgs:\nfrom_addr (UInt160): a bytearray (len 20) representing an address.\nuse_standard (bool): whether or not to only include standard contracts ( i.e not a smart contract addr ).\nwatch_only_val (int): a flag ( 0 or 64 ) indicating whether or not to find coins that are in 'watch only' addresses.\n\nReturns:\nlist: a list of ``neo.Wallet.Coins`` in the wallet that are not spent.", "source": "codesearchnet"}
{"code": "def saveAsTFRecords(df, output_dir):\n  \n  tf_rdd = df.rdd.mapPartitions(toTFExample(df.dtypes))\n  tf_rdd.saveAsNewAPIHadoopFile(output_dir, \"org.tensorflow.hadoop.io.TFRecordFileOutputFormat\",\n                                keyClass=\"org.apache.hadoop.io.BytesWritable\",\n                                valueClass=\"org.apache.hadoop.io.NullWritable\")", "docstring": "Save a Spark DataFrame as TFRecords.\n\nThis will convert the DataFrame rows to TFRecords prior to saving.\n\nArgs:\n:df: Spark DataFrame\n:output_dir: Path to save TFRecords", "source": "juraj-google-style"}
{"code": "def run(self):\n    with util.timed_block() as t:\n        files = self._collect_files()\n    log.info('Collected <33>{} <32>files in <33>{}s'.format(len(files), t.elapsed_s))\n    if self.verbose:\n        for p in files:\n            log.info('  <0>{}', p)\n    if (not files):\n        return self.allow_empty\n    with util.timed_block() as t:\n        results = self._run_checks(files)\n    log.info('Code checked in <33>{}s', t.elapsed_s)\n    success = True\n    for (name, retcodes) in results.items():\n        if any(((x != 0) for x in retcodes)):\n            success = False\n            log.err('<35>{} <31>failed with: <33>{}'.format(name, retcodes))\n    return success", "docstring": "Run all linters and report results.\n\nReturns:\nbool: **True** if all checks were successful, **False** otherwise.", "source": "codesearchnet"}
{"code": "def starts_with(self, prefix):\n        \n        prefix = prefix.lower()\n        found_words = []\n\n        res = cgaddag.gdg_starts_with(self.gdg, prefix.encode(encoding=\"ascii\"))\n        tmp = res\n\n        while tmp:\n            word = tmp.contents.str.decode(\"ascii\")\n            found_words.append(word)\n            tmp = tmp.contents.next\n\n        cgaddag.gdg_destroy_result(res)\n        return found_words", "docstring": "Find all words starting with a prefix.\n\nArgs:\nprefix: A prefix to be searched for.\n\nReturns:\nA list of all words found.", "source": "juraj-google-style"}
{"code": "def _DeserializeAttributeContainer(self, container_type, serialized_data):\n    \n    if not serialized_data:\n      return None\n\n    if self._serializers_profiler:\n      self._serializers_profiler.StartTiming(container_type)\n\n    try:\n      serialized_string = serialized_data.decode('utf-8')\n    except UnicodeDecodeError as exception:\n      raise IOError('Unable to decode serialized data: {0!s}'.format(\n          exception))\n    attribute_container = self._serializer.ReadSerialized(serialized_string)\n\n    if self._serializers_profiler:\n      self._serializers_profiler.StopTiming(container_type)\n\n    return attribute_container", "docstring": "Deserializes an attribute container.\n\nArgs:\ncontainer_type (str): attribute container type.\nserialized_data (bytes): serialized attribute container data.\n\nReturns:\nAttributeContainer: attribute container or None.\n\nRaises:\nIOError: if the serialized data cannot be decoded.\nOSError: if the serialized data cannot be decoded.", "source": "juraj-google-style"}
{"code": "def __init__(self, pqc: cirq.Circuit, initializer: tf.keras.initializers.Initializer=tf.keras.initializers.RandomUniform(0, 2), name: Union[None, str]=None):\n    raw_symbol_names = list(sorted(tfq.util.get_circuit_symbols(pqc)))\n    symbol_names = tf.constant([str(x) for x in raw_symbol_names], dtype=tf.string)\n    values = [tf.Variable(initializer(shape=[len(raw_symbol_names)]))]\n    value_layers = [[]]\n    super().__init__(tfq.convert_to_tensor([pqc]), pqc.all_qubits(), symbol_names, values, value_layers)", "docstring": "Initializes a DirectQuantumCircuit.\n\nArgs:\npqc: Representation of a parameterized quantum circuit.\ninitializer: A `tf.keras.initializers.Initializer` which specifies how to\ninitialize the values of the parameters in `circuit`.  The default\ninitializer assumes parameters of gates are exponents, so that one full\nperiod is covered by the parameter range 0 to 2.\nname: Optional name for the model.", "source": "github-repos"}
{"code": "def get_rml(self, rml_def, **kwargs):\n        \n        if isinstance(rml_def, str):\n            rml_procs = self.es_defs.get(\"kds_esRmlProcessor\", [])\n            for item in rml_procs:\n                if item['name'] == rml_def:\n                    rml_def = item\n                    break\n        proc_kwargs = {rml_def['subj']: self.subject,\n                       \"dataset\": self.dataset}\n        proc_kwargs.update(rml_def['proc_kwargs'])\n        return rml_def['processor'](**proc_kwargs)", "docstring": "returns the rml mapping output for specified mapping\n\nArgs:\n-----\nrml_def: The name of the mapping or a dictionary definition", "source": "juraj-google-style"}
{"code": "def get_text_features(self, input_ids=None, attention_mask=None, position_ids=None, token_type_ids=None, output_attentions=None, output_hidden_states=None, return_dict=None):\n    text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n    pooled_output = text_outputs[1]\n    text_features = self.text_projection(pooled_output)\n    return text_features", "docstring": "Returns:\ntext_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying\nthe projection layer to the pooled output of [`TFCLIPTextModel`].\n\nExamples:\n\n```python\n>>> from transformers import TFVisionTextDualEncoderModel, AutoTokenizer\n\n>>> model = TFVisionTextDualEncoderModel.from_pretrained(\"clip-italian/clip-italian\", from_pt=True)\n>>> tokenizer = AutoTokenizer.from_pretrained(\"clip-italian/clip-italian\")\n\n>>> inputs = tokenizer([\"una foto di un gatto\", \"una foto di un cane\"], padding=True, return_tensors=\"np\")\n>>> text_features = model.get_text_features(**inputs)\n```", "source": "github-repos"}
{"code": "def delete(self, resource, force=False, export_only=None, suppress_device_updates=None, timeout=(- 1)):\n    custom_headers = {'If-Match': '*'}\n    if ('uri' in resource):\n        uri = resource['uri']\n    else:\n        uri = self._client.build_uri(resource)\n    if suppress_device_updates:\n        uri += '?suppressDeviceUpdates=true'\n    if export_only:\n        custom_headers['exportOnly'] = True\n    return self._client.delete(uri, force=force, timeout=timeout, custom_headers=custom_headers)", "docstring": "Deletes a managed volume.\n\nArgs:\nresource (dict):\nObject to delete.\nforce:\nIf set to true, the operation completes despite any problems with\nnetwork connectivity or errors on the resource itself. The default is false.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\nexport_only:\nValid prior to API500. By default, volumes will be deleted from OneView, and storage system.\nTo delete the volume from OneView only, you must set its value to True.\nSetting its value to False has the same behavior as the default behavior.\nsuppress_device_updates:\nValid API500 onwards. By default, volumes will be deleted from OneView, and storage system.\nTo delete the volume from OneView only, you must set its value to True.\nSetting its value to False has the same behavior as the default behavior.\n\nReturns:\nbool: Indicates if the volume was successfully deleted.", "source": "codesearchnet"}
{"code": "def _dataset_partition(self, mode, config, params):\n    \n    if mode != tf.estimator.ModeKeys.TRAIN or not hasattr(config, \"tpu_config\"):\n      \n      self._next_partition_id = 0\n      return 0, 1\n    phift = config.tpu_config.per_host_input_for_training\n    \n    if (hasattr(tpu_config.InputPipelineConfig, \"BROADCAST\") and\n        phift == tpu_config.InputPipelineConfig.BROADCAST):\n      return 0, 1\n    if phift:\n      num_hosts = (params[\"context\"].num_hosts if \"context\" in params\n                   else config.tpu_config.num_shards \n      num_partitions = max(num_hosts, 1)\n    else:\n      num_partitions = config.tpu_config.num_shards\n    partition_id = getattr(self, \"_next_partition_id\", 0)\n    self._next_partition_id = partition_id + 1\n    tf.logging.info(\"num_partitions = %d partition_id = %d\" %\n                    (num_partitions, partition_id))\n    assert partition_id < num_partitions\n    return partition_id, num_partitions", "docstring": "Which part of the training data to read.\n\nIf there are multiple parallel calls to input_fn (multiple TPU hosts),\nthen we want each one to read from a separate partition of the training\ndata.\n\nArgs:\nmode: tf.estimator.ModeKeys\nconfig: RunConfig\nparams: A dict that contains parameters.\nReturns:\npartition_id: an integer\nnum_partitions: an integer", "source": "juraj-google-style"}
{"code": "def __init__(self, debug=False):\n    \n    facility = logging.handlers.SysLogHandler.LOG_DAEMON\n    self.logger = logger.Logger(\n        name='google-clock-skew', debug=debug, facility=facility)\n    self.distro_utils = distro_utils.Utils(debug=debug)\n    self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)\n    try:\n      with file_utils.LockFile(LOCKFILE):\n        self.logger.info('Starting Google Clock Skew daemon.')\n        self.watcher.WatchMetadata(\n            self.HandleClockSync, metadata_key=self.drift_token,\n            recursive=False)\n    except (IOError, OSError) as e:\n      self.logger.warning(str(e))", "docstring": "Constructor.\n\nArgs:\ndebug: bool, True if debug output should write to the console.", "source": "juraj-google-style"}
{"code": "def add_user_role(self, user, role):\n        \n        self.project_service.set_auth(self._token_project)\n        self.project_service.add_user_role(user, role)", "docstring": "Add role to given user.\n\nArgs:\nuser (string): User name.\nrole (string): Role to assign.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def getDelOps(self, buid):\n        \n        return (\n            ('prop:del', (buid, self.form.name, self.name, self.storinfo)),\n        )", "docstring": "Get a list of storage operations to delete this property from the buid.\n\nArgs:\nbuid (bytes): The node buid.\n\nReturns:\n(tuple): The storage operations", "source": "juraj-google-style"}
{"code": "def set_description(self, name, value=None, default=False, disable=False):\n    string = 'description'\n    commands = self.command_builder(string, value=value, default=default, disable=disable)\n    return self.configure_interface(name, commands)", "docstring": "Configures the interface description\n\nEosVersion:\n4.13.7M\n\nArgs:\nname (string): The interface identifier.  It must be a full\ninterface name (ie Ethernet, not Et)\nvalue (string): The value to set the description to.\ndefault (boolean): Specifies to default the interface description\ndisable (boolean): Specifies to negate the interface description\n\nReturns:\nTrue if the operation succeeds otherwise False", "source": "codesearchnet"}
{"code": "def GetName(self, number):\n    value = self._data_type_definition.values_per_number.get(number, None)\n    if (not value):\n        return None\n    return value.name", "docstring": "Retrieves the name of an enumeration value by number.\n\nArgs:\nnumber (int): number.\n\nReturns:\nstr: name of the enumeration value or None if no corresponding\nenumeration value was found.", "source": "codesearchnet"}
{"code": "def ignore_errors(log_warning=False):\n\n    def _apply_fn(dataset):\n        return dataset.ignore_errors(log_warning)\n    return _apply_fn", "docstring": "Creates a `Dataset` from another `Dataset` and silently ignores any errors.\n\nUse this transformation to produce a dataset that contains the same elements\nas the input, but silently drops any elements that caused an error. For\nexample:\n\n```python\ndataset = tf.data.Dataset.from_tensor_slices([1., 2., 0., 4.])\n\n# Computing `tf.debugging.check_numerics(1. / 0.)` will raise an\nInvalidArgumentError.\ndataset = dataset.map(lambda x: tf.debugging.check_numerics(1. / x, \"error\"))\n\n# Using `ignore_errors()` will drop the element that causes an error.\ndataset =\ndataset.apply(tf.data.experimental.ignore_errors())  # ==> {1., 0.5, 0.2}\n```\nArgs:\nlog_warning: (Optional.) A 'tf.bool' scalar indicating whether ignored\nerrors should be logged to stderr. Defaults to 'False'.\n\nReturns:\nA `Dataset` transformation function, which can be passed to\n`tf.data.Dataset.apply`.", "source": "github-repos"}
{"code": "def timezone(self, timezone=0):\n        \n        tz_dt = timedelta(hours=timezone)\n        for segment in self.segments:\n            for point in segment.points:\n                point.time = point.time + tz_dt\n        return self", "docstring": "Sets the timezone of the entire track\n\nArgs:\ntimezone (int): Timezone hour delta", "source": "juraj-google-style"}
{"code": "def period_start_day(self, value=None):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type str '\n                                 'for field `period_start_day`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `period_start_day`')\n\n        self._period_start_day = value", "docstring": "Corresponds to IDD Field `period_start_day`\n\nArgs:\nvalue (str): value for IDD Field `period_start_day`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def cacheResult(fn):\n        \n        cache = {}\n\n        @functools.wraps(fn)\n        def wrapper(*args, **kwargs):\n            \n            key = args + tuple(kwargs.items())\n            \n            try:\n                \n                if key in cache:\n                    return cache[key]\n            except TypeError:\n                \n                return fn(*args, **kwargs)\n            \n            cache[key] = fn(*args, **kwargs)\n            return cache[key]\n\n        \n        wrapper.cache = cache\n        return wrapper", "docstring": "Method decorator: calculate the value on first access, produce the cached value thereafter.\n\nIf the function takes arguments, the cache is a dictionary using all arguments as the key.\n\nArgs:\nfn (method): function to decorate\n\nReturns:\nmethod: wrapper function with caching", "source": "juraj-google-style"}
{"code": "def _FormatPackedIPv6Address(self, packed_ip_address):\n    \n    \n    octet_pairs = zip(packed_ip_address[0::2], packed_ip_address[1::2])\n    octet_pairs = [octet1 << 8 | octet2 for octet1, octet2 in octet_pairs]\n    \n    return ':'.join([\n        '{0:04x}'.format(octet_pair) for octet_pair in octet_pairs])", "docstring": "Formats a packed IPv6 address as a human readable string.\n\nArgs:\npacked_ip_address (list[int]): packed IPv6 address.\n\nReturns:\nstr: human readable IPv6 address.", "source": "juraj-google-style"}
{"code": "def _gen_ipython_string(func, args, defaults, original_doc):\n    magic_string = ('%s(' % func.__name__)\n    if defaults:\n        default_offset = (len(args) - len(defaults))\n    else:\n        default_offset = len(args)\n    for (i, value) in enumerate(args):\n        if (i >= default_offset):\n            magic_string += ('%s=%s, ' % (value, defaults[(i - default_offset)]))\n        else:\n            magic_string += ('%s, ' % value)\n    if args:\n        magic_string = magic_string[:(- 2)]\n    magic_string += ')\\n\\n'\n    if (original_doc is not None):\n        magic_string += original_doc\n    return magic_string", "docstring": "Provides auto-complete hint to ipython.\n\nIf the first line in a docstring is fn(arg1=, arg2=) then they are added to\nauto-complete.  This cannot be called on an instance method.\n\nArgs:\nfunc: The function that will be modified.\nargs: The arguments that this function takes in order.\ndefaults: The default arguments corresponding the last arguments.\noriginal_doc: Original docstring to assign after the magic string.\nReturns:\nThe new doc string with the magic bit prepended.", "source": "codesearchnet"}
{"code": "def register_for_auto_class(cls, auto_class='AutoTokenizer'):\n    if not isinstance(auto_class, str):\n        auto_class = auto_class.__name__\n    import transformers.models.auto as auto_module\n    if not hasattr(auto_module, auto_class):\n        raise ValueError(f'{auto_class} is not a valid auto class.')\n    cls._auto_class = auto_class", "docstring": "Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the\nlibrary are already mapped with `AutoTokenizer`.\n\n\n\nArgs:\nauto_class (`str` or `type`, *optional*, defaults to `\"AutoTokenizer\"`):\nThe auto class to register this new tokenizer with.", "source": "github-repos"}
{"code": "def pre_verify(method):\n    \n    @wraps(method)\n    def wrapper(self, *args, **kwargs):  \n        self._verify_page()  \n        return method(self, *args, **kwargs)\n    return wrapper", "docstring": "Decorator that calls self._verify_page() before executing the decorated method\n\nArgs:\nmethod (callable): The method to decorate.\n\nReturns:\nDecorated method", "source": "juraj-google-style"}
{"code": "def evaluate_extracted_tokens(gold_content, extr_content):\n    if isinstance(gold_content, string_):\n        gold_content = simple_tokenizer(gold_content)\n    if isinstance(extr_content, string_):\n        extr_content = simple_tokenizer(extr_content)\n    gold_set = set(gold_content)\n    extr_set = set(extr_content)\n    jaccard = (len((gold_set & extr_set)) / len((gold_set | extr_set)))\n    levenshtein = dameraulevenshtein(gold_content, extr_content)\n    return {'jaccard': jaccard, 'levenshtein': levenshtein}", "docstring": "Evaluate the similarity between gold-standard and extracted content,\ntypically for a single HTML document, as another way of evaluating the\nperformance of an extractor model.\n\nArgs:\ngold_content (str or Sequence[str]): Gold-standard content, either as a\nstring or as an already-tokenized list of tokens.\nextr_content (str or Sequence[str]): Extracted content, either as a\nstring or as an already-tokenized list of tokens.\n\nReturns:\nDict[str, float]", "source": "codesearchnet"}
{"code": "def camelize(word):\n    return ''.join(((w[0].upper() + w[1:]) for w in re.sub('[^A-Z^a-z^0-9^:]+', ' ', word).split(' ')))", "docstring": "Convert a word from lower_with_underscores to CamelCase.\n\nArgs:\nword: The string to convert.\nReturns:\nThe modified string.", "source": "codesearchnet"}
{"code": "def is_common(schema):\n    \n    if isinstance(schema, StreamSchema):\n        return schema.schema() in _SCHEMA_COMMON\n    if isinstance(schema, CommonSchema):\n        return True\n    if isinstance(schema, basestring):\n        return is_common(StreamSchema(schema))\n    return False", "docstring": "Is `schema` an common schema.\n\nArgs:\nschema: Scheme to test.\n\nReturns:\nbool: ``True`` if schema is a common schema, otherwise ``False``.", "source": "juraj-google-style"}
{"code": "def get_config(self):\n    return {}", "docstring": "Returns a Python dict of the object config.\n\nA constraint config is a Python dictionary (JSON-serializable) that can\nbe used to reinstantiate the same object.\n\nReturns:\nPython dict containing the configuration of the constraint object.", "source": "github-repos"}
{"code": "def tpu_model_inference_fn(features):\n\n    def custom_getter(getter, name, *args, **kwargs):\n        with tf.control_dependencies(None):\n            return tf.guarantee_const(getter(name, *args, **kwargs), name=(name + '/GuaranteeConst'))\n    with tf.variable_scope('', custom_getter=custom_getter):\n        t = int(time.time())\n        epoch_time = tf.constant(t, name=('epoch_time_%d' % t))\n        with tf.control_dependencies([epoch_time]):\n            return model_inference_fn(features, False, FLAGS.flag_values_dict())", "docstring": "Builds the model graph suitable for running on TPU.\n\nIt does two things:\n1) Mark all weights as constant, which improves TPU inference performance\nbecause it prevents the weights being transferred to the TPU every call\nto Session.run().\n2) Adds constant to the graph with a unique value and marks it as a\ndependency on the rest of the model. This works around a TensorFlow bug\nthat prevents multiple models being run on a single TPU.\n\nReturns:\n(policy_output, value_output, logits) tuple of tensors.", "source": "codesearchnet"}
{"code": "def compose_full_url(pub, uuid_url=False):\n    \n    url = compose_path(pub, uuid_url)\n\n    if WEB_PORT == 80:\n        return \"%s:\n\n    return \"%s:", "docstring": "Compose full url for given `pub`, with protocol, server's address and port.\n\nArgs:\npub (obj): :class:`.DBPublication` instance.\nuuid_url (bool, default False): Compose URL using UUID.\n\nReturns:\nstr: Absolute url of the publication.\nRaises:\nPrivatePublicationError: When the `pub` is private publication.", "source": "juraj-google-style"}
{"code": "def cast(self, dtype: tf.DType) -> 'TensorFluent':\n    if (self.dtype == dtype):\n        return self\n    t = tf.cast(self.tensor, dtype)\n    scope = self.scope.as_list()\n    batch = self.batch\n    return TensorFluent(t, scope, batch=batch)", "docstring": "Returns a TensorFluent for the cast operation with given `dtype`.\n\nArgs:\ndtype: The output's data type.\n\nReturns:\nA TensorFluent wrapping the cast operation.", "source": "codesearchnet"}
{"code": "def match_as_dict(self, film_sl_vectors, substrate_sl_vectors, film_vectors, substrate_vectors, match_area):\n        \n        d = {}\n        d[\"film_sl_vecs\"] = np.asarray(film_sl_vectors)\n        d[\"sub_sl_vecs\"] = np.asarray(substrate_sl_vectors)\n        d[\"match_area\"] = match_area\n        d[\"film_vecs\"] = np.asarray(film_vectors)\n        d[\"sub_vecs\"] = np.asarray(substrate_vectors)\n\n        return d", "docstring": "Returns dict which contains ZSL match\n\nArgs:\nfilm_miller(array)\nsubstrate_miller(array)", "source": "juraj-google-style"}
{"code": "def mutate(self, dna: pg.DNA, global_state: pg.geno.AttributeDict, step: int=0) -> typing.Union[pg.DNA, List[pg.DNA]]:\n    raise NotImplementedError()", "docstring": "Mutates the DNA at a given step.\n\nUser should override this method or `mutate_list` method with optional\nkeyword arguments 'global_state' and 'step'.\n\nArgs:\ndna: DNA to mutate.\nglobal_state: An `AttributeDict` object as the container of global states.\nstep: Number of examples historically proposed, which can be used for\ndetermining a mutation schedule.\n\nReturns:\nA new DNA or a DNA list as the result of the mutation.", "source": "github-repos"}
{"code": "def migration_exchange(self, *, users: List[str], **kwargs) -> SlackResponse:\n    kwargs.update({'users': users})\n    return self.api_call('migration.exchange', http_verb='GET', params=kwargs)", "docstring": "For Enterprise Grid workspaces, map local user IDs to global user IDs\n\nArgs:\nusers (list): A list of user ids, up to 400 per request.\ne.g. ['W1234567890', 'U2345678901', 'U3456789012']", "source": "codesearchnet"}
{"code": "def search(self):\n    safeEnvDict = {'freeSearch': self.freeSearch, 'extentSearch': self.extentSearch, 'indexSearch': self.indexSearch}\n    for col in self._dataFrame.columns:\n        safeEnvDict[col] = self._dataFrame[col]\n    try:\n        searchIndex = eval(self._filterString, {'__builtins__': None}, safeEnvDict)\n    except NameError:\n        return ([], False)\n    except SyntaxError:\n        return ([], False)\n    except ValueError:\n        return ([], False)\n    except TypeError:\n        return ([], False)\n    return (searchIndex, True)", "docstring": "Applies the filter to the stored dataframe.\n\nA safe environment dictionary will be created, which stores all allowed\nfunctions and attributes, which may be used for the filter.\nIf any object in the given `filterString` could not be found in the\ndictionary, the filter does not apply and returns `False`.\n\nReturns:\ntuple: A (indexes, success)-tuple, which indicates identified objects\nby applying the filter and if the operation was successful in\ngeneral.", "source": "codesearchnet"}
{"code": "def generate_state_data(means, weights):\n    x_true = np.dot(means, weights)\n    sample = np.random.poisson(x_true)\n    return sample.astype(float)", "docstring": "Generates data according to the Poisson Convex Mixture Model.\n\nArgs:\nmeans (array): Cell types- genes x clusters\nweights (array): Cell cluster assignments- clusters x cells\n\nReturns:\ndata matrix - genes x cells", "source": "codesearchnet"}
{"code": "def disasm(code, addr=0, syntax=None, target=None):\n    if (target is None):\n        target = pwnypack.target.target\n    if (syntax is None):\n        if (target.arch is pwnypack.target.Target.Arch.x86):\n            syntax = AsmSyntax.nasm\n        else:\n            syntax = AsmSyntax.att\n    if (syntax is AsmSyntax.nasm):\n        if (target.arch is not pwnypack.target.Target.Arch.x86):\n            raise NotImplementedError('nasm only supports x86.')\n        p = subprocess.Popen(['ndisasm', '-b', str(target.bits.value), '-o', str(addr), '-'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n        (stdout, stderr) = p.communicate(code)\n        if p.returncode:\n            raise RuntimeError(stderr.decode('utf-8'))\n        return [line.split(None, 2)[2] for line in stdout.decode('utf-8').split('\\n') if (line and (not line.startswith(' ')))]\n    elif (syntax in (AsmSyntax.intel, AsmSyntax.att)):\n        md = prepare_capstone(syntax, target)\n        statements = []\n        total_size = 0\n        for (_, size, mnemonic, op_str) in md.disasm_lite(code, addr):\n            statements.append(((mnemonic + ' ') + op_str).strip())\n            total_size += size\n        return statements\n    else:\n        raise NotImplementedError('Unsupported syntax for host platform.')", "docstring": "Disassemble machine readable code into human readable statements.\n\nArgs:\ncode(bytes): The machine code that is to be disassembled.\naddr(int): The memory address of the code (used for relative\nreferences).\nsyntax(AsmSyntax): The output assembler syntax. This defaults to\nnasm on x86 architectures, AT&T on all other architectures.\ntarget(~pwnypack.target.Target): The architecture for which the code\nwas written.  The global target is used if this argument is\n``None``.\n\nReturns:\nlist of str: The disassembled machine code.\n\nRaises:\nNotImplementedError: In an unsupported target platform is specified.\nRuntimeError: If ndisasm encounters an error.\n\nExample:\n>>> from pwny import *\n>>> disasm(b'_\\\\xc3', target=Target(arch=Target.Arch.x86, bits=64))\n['pop rdi', 'ret']", "source": "codesearchnet"}
{"code": "def from_json(cls, data):\n    required_keys = ('hum_type', 'hum_value')\n    optional_keys = {'barometric_pressure': 101325, 'schedule': '', 'wet_bulb_range': ''}\n    for key in required_keys:\n        assert (key in data), 'Required key \"{}\" is missing!'.format(key)\n    for (key, val) in optional_keys.items():\n        if (key not in data):\n            data[key] = val\n    return cls(data['hum_type'], data['hum_value'], data['barometric_pressure'], data['schedule'], data['wet_bulb_range'])", "docstring": "Create a Humidity Condition from a dictionary.\n\nArgs:\ndata = {\n\"hum_type\": string,\n\"hum_value\": float,\n\"barometric_pressure\": float,\n\"schedule\": string,\n\"wet_bulb_range\": string}", "source": "codesearchnet"}
{"code": "def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens):\n    new_lm_head_decoder = old_lm_head_decoder\n    is_input_output_equals = tf.reduce_any(self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder)\n    if old_lm_head_decoder is not None and (not is_input_output_equals):\n        old_embedding_dim = shape_list(old_lm_head_decoder)[1]\n        decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens)\n        new_lm_head_decoder = self.add_weight(shape=(new_num_tokens, old_embedding_dim), initializer='zeros', trainable=True, name=old_lm_head_decoder.name.split(':')[0])\n        init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value())\n        new_lm_head_decoder.assign(init_decoder)\n    return new_lm_head_decoder", "docstring": "Build a resized decoder from the old ones. Increasing the size will add newly initialized vectors at the end.\nReducing the size will remove vectors from the end\n\nArgs:\nold_lm_head_decoder (`tf.Variable`):\nOld lm head decoder to be resized.\nnew_num_tokens (`int`, *optional*):\nNew number of tokens in the linear matrix.\n\nIncreasing the size will add newly initialized vectors at the end. Reducing the size will remove\nvectors from the end. If not provided or `None`, just returns None\n\nReturn:\n`tf.Variable`: Pointer to the resized decoder or None if the output embeddings are different from the input\nones.", "source": "github-repos"}
{"code": "def __init__(self, initializer: tf.keras.initializers.Initializer=tf.keras.initializers.RandomUniform()):\n    super().__init__()\n    self._initializer = initializer", "docstring": "Initializes a VariableDot layer.\n\nArgs:\ninitializer: A `tf.keras.initializers.Initializer` which specifies how to\ninitialize the values of the parameters.", "source": "github-repos"}
{"code": "def _standardize_and_copy_config(config):\n    kwargs = config.copy()\n    for k, v in kwargs.items():\n        if isinstance(v, list):\n            kwargs[k] = tuple(v)\n    return kwargs", "docstring": "Returns a shallow copy of config with lists turned to tuples.\n\nKeras serialization uses nest to listify everything.\nThis causes problems with the NumericColumn shape, which becomes\nunhashable. We could try to solve this on the Keras side, but that\nwould require lots of tracking to avoid changing existing behavior.\nInstead, we ensure here that we revive correctly.\n\nArgs:\nconfig: dict that will be used to revive a Feature Column\n\nReturns:\nShallow copy of config with lists turned to tuples.", "source": "github-repos"}
{"code": "def _RunOsLoginNssCache(self):\n    try:\n        return subprocess.call([constants.OSLOGIN_NSS_CACHE_SCRIPT])\n    except OSError as e:\n        if (e.errno == errno.ENOENT):\n            return None\n        else:\n            raise", "docstring": "Run the OS Login NSS cache binary.\n\nReturns:\nint, the return code from the call, or None if the script is not found.", "source": "codesearchnet"}
{"code": "def validate(self, nanopub: Mapping[(str, Any)]) -> Tuple[(bool, List[Tuple[(str, str)]])]:\n    (is_valid, messages) = validate_to_schema(nanopub, self.nanopub_schema)\n    if (not is_valid):\n        return messages\n    if (nanopub['nanopub']['type']['name'].upper() == 'BEL'):\n        bel_version = nanopub['nanopub']['type']['version']\n    else:\n        is_valid = False\n        return (is_valid, f\"Not a BEL Nanopub according to nanopub.type.name: {nanopub['nanopub']['type']['name']}\")\n    all_messages = []\n    bel_obj = bel.lang.belobj.BEL(bel_version, self.endpoint)\n    for edge in nanopub['nanopub']['edges']:\n        bel_statement = f\"{edge['subject']} {edge['relation']} {edge['object']}\"\n        parse_obj = bel_obj.parse(bel_statement)\n        if (not parse_obj.valid):\n            all_messages.extend(('ERROR', f'BEL statement parse error {parse_obj.error}, {parse_obj.err_visual}'))\n    for context in nanopub['nanopub']['context']:\n        (is_valid, messages) = self.validate_context(context)\n        all_messages.extend(messages)\n    is_valid = True\n    for (_type, msg) in all_messages:\n        if (_type == 'ERROR'):\n            is_valid = False\n    return (is_valid, all_messages)", "docstring": "Validates using the nanopub schema\n\nArgs:\nnanopub (Mapping[str, Any]): nanopub dict\n\nReturns:\nTuple[bool, List[Tuple[str, str]]]:\nbool: Is valid?  Yes = True, No = False\nList[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg)\ne.g. [('WARNING', \"Context ID not found\")]", "source": "codesearchnet"}
{"code": "def expand(self, pcoll: beam.PCollection[Chunk]) -> beam.PTransform[Chunk, Any]:\n    write_transform = self.database_config.create_write_transform()\n    return pcoll | write_transform", "docstring": "Creates and applies the database-specific write transform.\n\nArgs:\npcoll: PCollection of Chunks with embeddings to write to the\nvector database. Each Chunk must have:\n- An embedding\n- An ID\n- Metadata used to filter results as specified by database config\n\nReturns:\nResult of writing to database (implementation specific).", "source": "github-repos"}
{"code": "def _set_avg_session_metrics(session_group):\n    assert session_group.sessions, 'SessionGroup cannot be empty.'\n    metric_stats = collections.defaultdict(_MetricStats)\n    for session in session_group.sessions:\n        for metric_value in session.metric_values:\n            metric_name = _MetricIdentifier(group=metric_value.name.group, tag=metric_value.name.tag)\n            stats = metric_stats[metric_name]\n            stats.total += metric_value.value\n            stats.count += 1\n            stats.total_step += metric_value.training_step\n            stats.total_wall_time_secs += metric_value.wall_time_secs\n    del session_group.metric_values[:]\n    for (metric_name, stats) in six.iteritems(metric_stats):\n        session_group.metric_values.add(name=api_pb2.MetricName(group=metric_name.group, tag=metric_name.tag), value=(float(stats.total) / float(stats.count)), training_step=(stats.total_step", "docstring": "Sets the metrics for the group to be the average of its sessions.\n\nThe resulting session group metrics consist of the union of metrics across\nthe group's sessions. The value of each session group metric is the average\nof that metric values across the sessions in the group. The 'step' and\n'wall_time_secs' fields of the resulting MetricValue field in the session\ngroup are populated with the corresponding averages (truncated for 'step')\nas well.\n\nArgs:\nsession_group: A SessionGroup protobuffer.", "source": "codesearchnet"}
{"code": "def rprint(sep='\\n', end='\\n', file=sys.stdout, flush=False):\n    \n    try:\n        first_item = (yield)\n        file.write(str(first_item))\n        if flush:\n            file.flush()\n        while True:\n            item = (yield)\n            file.write(sep)\n            file.write(str(item))\n            if flush:\n                file.flush()\n    except GeneratorExit:\n        file.write(end)\n        if flush:\n            file.flush()", "docstring": "A coroutine sink which prints received items stdout\n\nArgs:\nsep: Optional separator to be printed between received items.\nend: Optional terminator to be printed after the last item.\nfile: Optional stream to which to print.\nflush: Optional flag to force flushing after each item.", "source": "juraj-google-style"}
{"code": "def upload(self, file_path, uri=None, timeout=(- 1)):\n    if (not uri):\n        uri = self._uri\n    upload_file_name = os.path.basename(file_path)\n    (task, entity) = self._connection.post_multipart_with_response_handling(uri, file_path, upload_file_name)\n    if (not task):\n        return entity\n    return self._task_monitor.wait_for_task(task, timeout)", "docstring": "Makes a multipart request.\n\nArgs:\nfile_path:\nFile to upload.\nuri:\nA specific URI (optional).\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Response body.", "source": "codesearchnet"}
{"code": "def to_grayscale(img, num_output_channels=1):\n    if (not _is_pil_image(img)):\n        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n    if (num_output_channels == 1):\n        img = img.convert('L')\n    elif (num_output_channels == 3):\n        img = img.convert('L')\n        np_img = np.array(img, dtype=np.uint8)\n        np_img = np.dstack([np_img, np_img, np_img])\n        img = Image.fromarray(np_img, 'RGB')\n    else:\n        raise ValueError('num_output_channels should be either 1 or 3')\n    return img", "docstring": "Convert image to grayscale version of image.\n\nArgs:\nimg (PIL Image): Image to be converted to grayscale.\n\nReturns:\nPIL Image: Grayscale version of the image.\nif num_output_channels = 1 : returned image is single channel\n\nif num_output_channels = 3 : returned image is 3 channel with r = g = b", "source": "codesearchnet"}
{"code": "def usufyToXlsExport(d, fPath):\n    from pyexcel_xls import get_data\n    try:\n        oldData = {'OSRFramework': get_data(fPath)}\n    except:\n        oldData = {'OSRFramework': []}\n    tabularData = _generateTabularData(d, oldData)\n    from pyexcel_xls import save_data\n    save_data(fPath, tabularData)", "docstring": "Workaround to export to a .xls file.\n\nArgs:\n-----\nd: Data to export.\nfPath: File path for the output file.", "source": "codesearchnet"}
{"code": "def stack(list_or_tensor, element_dtype=None, strict=True):\n    if strict:\n\n        def raise_error(x):\n            raise ValueError('%s must be stackable when strict=True' % x)\n        original_call = raise_error\n    else:\n        original_call = lambda x: x\n    return data_structures.list_stack(list_or_tensor, data_structures.ListStackOpts(element_dtype=element_dtype, original_call=original_call))", "docstring": "Stacks the input, if it admits the notion of stacking.\n\nFor example, a list of tensors can be stacked into a larger tensor. This\nfunction is similar to tf.stack, but it accepts non-lists and lists of\nnon-tensors as arguments. In the latter case, the function does nothing.\n\nArgs:\nlist_or_tensor: Any\nelement_dtype: tf.DType, optional dtypedtype for the elements in the list.\nRequired if the input is stackable, and the list is untyped.\nstrict: bool, if True an error is raised if the input is not stackable.\nOtherwise the function is a no-op.\n\nReturns:\nAny, if the input is stackable, the result will be a tf.Tensor. Otherwise,\nif strict=False, the result will be list_or_tensor.\n\nRaises:\nValueError: if strict=True and the input is not stackable.", "source": "github-repos"}
{"code": "def save(self, path, compressed=True, exist_ok=False):\n    path = os.path.expandvars(os.path.expanduser(path))\n    if (os.path.isfile(path) and (not exist_ok)):\n        raise OSError(17, os.strerror(17), path)\n    if os.path.isdir(path):\n        path = os.path.join(path, 'out.gdg')\n    if compressed:\n        bytes_written = cgaddag.gdg_save_compressed(self.gdg, path.encode('ascii'))\n    else:\n        bytes_written = cgaddag.gdg_save(self.gdg, path.encode('ascii'))\n    if (bytes_written == (- 1)):\n        errno = ctypes.c_int.in_dll(ctypes.pythonapi, 'errno').value\n        raise OSError(errno, os.strerror(errno), path)\n    return bytes_written", "docstring": "Save the GADDAG to file.\n\nArgs:\npath: path to save the GADDAG to.\ncompressed: compress the saved GADDAG using gzip.\nexist_ok: overwrite existing file at `path`.", "source": "codesearchnet"}
{"code": "def arg_types(parsed: Parsed, errors: Errors) -> Tuple[Parsed, Errors]:\n    \n\n    func_pattern = re.compile(r\"\\s*[a-zA-Z]+\\(\")\n    nsarg_pattern = re.compile(r\"^\\s*([A-Z]+):(.*?)\\s*$\")\n\n    for span in parsed:\n        if parsed[span][\"type\"] != \"Function\" or \"parens_span\" not in parsed[span]:\n            continue\n\n        for i, arg in enumerate(parsed[span][\"args\"]):\n            nsarg_matches = nsarg_pattern.match(arg[\"arg\"])\n            if func_pattern.match(arg[\"arg\"]):\n                parsed[span][\"args\"][i].update({\"type\": \"Function\"})\n            elif nsarg_matches:\n                (start, end) = arg[\"span\"]\n                ns = nsarg_matches.group(1)\n                ns_val = nsarg_matches.group(2)\n                ns_span = nsarg_matches.span(1)\n                ns_span = (ns_span[0] + start, ns_span[1] + start - 1)\n                ns_val_span = nsarg_matches.span(2)\n                ns_val_span = (ns_val_span[0] + start, ns_val_span[1] + start - 1)\n\n                parsed[span][\"args\"][i].update(\n                    {\n                        \"type\": \"NSArg\",\n                        \"ns\": ns,\n                        \"ns_span\": ns_span,\n                        \"ns_val\": ns_val,\n                        \"ns_val_span\": ns_val_span,\n                    }\n                )\n            else:\n                parsed[span][\"args\"][i].update({\"type\": \"StrArg\"})\n\n    return parsed, errors", "docstring": "Add argument types to parsed function data structure\n\nArgs:\nparsed: function and arg locations in BEL string\nerrors: error messages\n\nReturns:\n(parsed, errors): parsed, arguments with arg types plus error messages", "source": "juraj-google-style"}
{"code": "def gpu(self: EagerTensorType, gpu_index=0) -> EagerTensorType:\n    return self._copy(context.context(), 'GPU:' + str(gpu_index))", "docstring": "A copy of this Tensor with contents backed by memory on the GPU.\n\nArgs:\ngpu_index: Identifies which GPU to place the contents on the returned\nTensor in.\n\nReturns:\nA GPU-memory backed Tensor object initialized with the same contents\nas this Tensor.", "source": "github-repos"}
{"code": "def _get_password_url(self):\n    password_url = None\n    if (self._settings['user'] or self._settings['authorization']):\n        if self._settings['url']:\n            password_url = self._settings['url']\n        elif self._settings['base_url']:\n            password_url = self._settings['base_url']\n    return password_url", "docstring": "Get URL used for authentication\n\nReturns:\nstring: URL", "source": "codesearchnet"}
{"code": "def get_structure_seqs(self, model):\n        \n\n        \n        dont_overwrite = []\n        chains = list(model.get_chains())\n        for x in chains:\n            if self.chains.has_id(x.id):\n                if self.chains.get_by_id(x.id).seq_record:\n                    dont_overwrite.append(x.id)\n        if len(dont_overwrite) == len(chains):\n            log.debug('Not writing structure sequences, already stored')\n            return\n\n        \n        structure_seqs = ssbio.protein.structure.properties.residues.get_structure_seqrecords(model)\n        log.debug('{}: gathered chain sequences'.format(self.id))\n\n        \n        for seq_record in structure_seqs:\n            log.debug('{}: adding chain sequence to ChainProp'.format(seq_record.id))\n            my_chain = self.chains.get_by_id(seq_record.id)\n            my_chain.seq_record = seq_record", "docstring": "Gather chain sequences and store in their corresponding ``ChainProp`` objects in the ``chains`` attribute.\n\nArgs:\nmodel (Model): Biopython Model object of the structure you would like to parse", "source": "juraj-google-style"}
{"code": "def _compute_args(self, data=dict(), **kwargs):\n        \n\n        for name, remote_attribute in self._attributes.items():\n            default_value = BambouConfig.get_default_attribute_value(self.__class__, name, remote_attribute.attribute_type)\n            setattr(self, name, default_value)\n\n        if len(data) > 0:\n            self.from_dict(data)\n\n        for key, value in kwargs.items():\n            if hasattr(self, key):\n                setattr(self, key, value)", "docstring": "Compute the arguments\n\nTry to import attributes from data.\nOtherwise compute kwargs arguments.\n\nArgs:\ndata: a dict()\nkwargs: a list of arguments", "source": "juraj-google-style"}
{"code": "def contextmanager(target: Callable[..., Iterator[_T]]) -> Callable[..., ContextManager[_T]]:\n    context_manager = _contextlib.contextmanager(target)\n    return tf_decorator.make_decorator(target, context_manager, 'contextmanager')", "docstring": "A tf_decorator-aware wrapper for `contextlib.contextmanager`.\n\nUsage is identical to `contextlib.contextmanager`.\n\nArgs:\ntarget: A callable to be wrapped in a contextmanager.\nReturns:\nA callable that can be used inside of a `with` statement.", "source": "github-repos"}
{"code": "def __init__(self, given, enum_type, options):\n    super(InvalidEnumValue, self).__init__('Could not parse [{0}] into a valid {1}.  Valid values are [{2}]'.format(given, enum_type, ', '.join(options)))", "docstring": "Constructs a new exception.\n\nArgs:\ngiven: str, The given string that could not be parsed.\nenum_type: str, The human readable name of the enum you were trying to\nparse.\noptions: list(str), The valid values for this enum.", "source": "github-repos"}
{"code": "def infer_pyarrow_schema(data):\n    column_data = OrderedDict()\n    for row in data:\n        for key, value in row.items():\n            column_data.setdefault(key, []).append(value)\n    column_types = OrderedDict([(key, pa.array(value).type) for key, value in column_data.items()])\n    return pa.schema(list(column_types.items()))", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\nInfer PyArrow schema for tabular data.\n\nArgs:\ndata (List[dict]): A list of dictionaries representing rows in a table.\n\nReturns:\nA PyArrow schema object.", "source": "github-repos"}
{"code": "def _flatten_location_translations(location_translations):\n    sources_to_process = set(six.iterkeys(location_translations))\n\n    def _update_translation(source):\n        'Return the proper (fully-flattened) translation for the given location.'\n        destination = location_translations[source]\n        if (destination not in location_translations):\n            return destination\n        else:\n            sources_to_process.discard(destination)\n            final_destination = _update_translation(destination)\n            location_translations[source] = final_destination\n            return final_destination\n    while sources_to_process:\n        _update_translation(sources_to_process.pop())", "docstring": "If location A translates to B, and B to C, then make A translate directly to C.\n\nArgs:\nlocation_translations: dict of Location -> Location, where the key translates to the value.\nMutated in place for efficiency and simplicity of implementation.", "source": "codesearchnet"}
{"code": "def Reformat(llines, lines=None):\n    final_lines = []\n    prev_line = None\n    indent_width = style.Get('INDENT_WIDTH')\n    for lline in _SingleOrMergedLines(llines):\n        first_token = lline.first\n        _FormatFirstToken(first_token, lline.depth, prev_line, final_lines)\n        indent_amt = indent_width * lline.depth\n        state = format_decision_state.FormatDecisionState(lline, indent_amt)\n        state.MoveStateToNextToken()\n        if not lline.disable:\n            if lline.first.is_comment:\n                lline.first.value = lline.first.value.rstrip()\n            elif lline.last.is_comment:\n                lline.last.value = lline.last.value.rstrip()\n            if prev_line and prev_line.disable:\n                _RetainRequiredVerticalSpacingBetweenTokens(lline.first, prev_line.last, lines)\n            if any((tok.is_comment for tok in lline.tokens)):\n                _RetainVerticalSpacingBeforeComments(lline)\n        if lline.disable or _LineHasContinuationMarkers(lline):\n            _RetainHorizontalSpacing(lline)\n            _RetainRequiredVerticalSpacing(lline, prev_line, lines)\n            _EmitLineUnformatted(state)\n        elif _LineContainsPylintDisableLineTooLong(lline) or _LineContainsI18n(lline):\n            _RetainRequiredVerticalSpacing(lline, prev_line, lines)\n            _EmitLineUnformatted(state)\n        elif _CanPlaceOnSingleLine(lline) and (not any((tok.must_break_before for tok in lline.tokens))):\n            while state.next_token:\n                state.AddTokenToState(newline=False, dry_run=False)\n        elif not _AnalyzeSolutionSpace(state):\n            state = format_decision_state.FormatDecisionState(lline, indent_amt)\n            state.MoveStateToNextToken()\n            _RetainHorizontalSpacing(lline)\n            _RetainRequiredVerticalSpacing(lline, prev_line, None)\n            _EmitLineUnformatted(state)\n        final_lines.append(lline)\n        prev_line = lline\n    _AlignTrailingComments(final_lines)\n    return _FormatFinalLines(final_lines)", "docstring": "Reformat the logical lines.\n\nArguments:\nllines: (list of logical_line.LogicalLine) Lines we want to format.\nlines: (set of int) The lines which can be modified or None if there is no\nline range restriction.\n\nReturns:\nA string representing the reformatted code.", "source": "github-repos"}
{"code": "def assembly(self, value):\n        \n        if value == self._defaults['assembly'] and 'assembly' in self._values:\n            del self._values['assembly']\n        else:\n            self._values['assembly'] = value", "docstring": "The assembly property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def make_report_table(fp, title, reports):\n    reports.sort(key=lambda x: x[1]['tflite_converter'], reverse=False)\n    reports.sort(key=lambda x: x[1]['tf'], reverse=True)\n\n    def result_cell(x, row, col):\n        \n        s = html.escape(repr(x), quote=True)\n        color = '\n        handler = 'ShowLog(%d, %d)' % (row, col)\n        fp.write(\"<td style='background-color: %s' onclick='%s'>%s</td>\\n\" % (color, handler, s))\n    fp.write('<html>\\n<head>\\n<title>tflite report</title>\\n<style>\\nbody { font-family: Arial; }\\nth { background-color: \n    fp.write('<script> \\n')\n    fp.write('\\nfunction ShowLog(row, col) {\\n\\nvar log = document.getElementById(\"log\");\\nlog.innerHTML = \"<pre>\" + data[row][col]  + \"</pre>\";\\n}\\n')\n    fp.write('var data = \\n')\n    logs = json.dumps([[escape_and_normalize(x[1]['tf_log']), escape_and_normalize(x[1]['tflite_converter_log'])] for x in reports])\n    fp.write(logs)\n    fp.write(';</script>\\n')\n    fp.write('\\n<body>\\n<h1>TensorFlow Lite Conversion</h1>\\n<h2>%s</h2>\\n' % title)\n    param_keys = {}\n    for params, _ in reports:\n        for k in params.keys():\n            param_keys[k] = True\n    fp.write('<table>\\n')\n    fp.write(\"<tr><td class='horiz'>\\n\")\n    fp.write(\"<div style='height:1000px; overflow:auto'>\\n\")\n    fp.write('<table>\\n')\n    fp.write('<tr>\\n')\n    for p in param_keys:\n        fp.write('<th>%s</th>\\n' % html.escape(p, quote=True))\n    fp.write('<th>TensorFlow</th>\\n')\n    fp.write('<th>TensorFlow Lite Converter</th>\\n')\n    fp.write('</tr>\\n')\n    for idx, (params, vals) in enumerate(reports):\n        fp.write('<tr>\\n')\n        for p in param_keys:\n            fp.write('  <td>%s</td>\\n' % html.escape(repr(params.get(p, None)), quote=True))\n        result_cell(vals['tf'], idx, 0)\n        result_cell(vals['tflite_converter'], idx, 1)\n        fp.write('</tr>\\n')\n    fp.write('</table>\\n')\n    fp.write('</div>\\n')\n    fp.write('</td>\\n')\n    fp.write(\"<td class='horiz' id='log'></td></tr>\\n\")\n    fp.write('</table>\\n')\n    fp.write('<script>\\n')\n    fp.write('</script>\\n')\n    fp.write('\\n    </body>\\n    </html>\\n    ')", "docstring": "Make an HTML report of the success/failure reports.\n\nArgs:\nfp: File-like object in which to put the html.\ntitle: \"Title of the zip file this pertains to.\"\nreports: a list of conversion attempts. (report_args, report_vals) i.e.\n({\"shape\": [1,2,3], \"type\": \"tf.float32\"},\n{\"tf\": \"SUCCESS\", \"tflite_converter\": \"FAILURE\",\n\"tf_log\": \"\", \"tflite_converter_log\": \"Unsupported type.\"})", "source": "github-repos"}
{"code": "def env(mounts):\n    f_mounts = [m.strip('/') for m in mounts]\n    root = local.path('/')\n    ld_libs = [((root / m) / 'lib') for m in f_mounts]\n    ld_libs.extend([((root / m) / 'lib64') for m in f_mounts])\n    paths = [((root / m) / 'bin') for m in f_mounts]\n    paths.extend([((root / m) / 'sbin') for m in f_mounts])\n    paths.extend([(root / m) for m in f_mounts])\n    return (paths, ld_libs)", "docstring": "Compute the environment of the change root for the user.\n\nArgs:\nmounts: The mountpoints of the current user.\nReturn:\npaths\nld_libs", "source": "codesearchnet"}
{"code": "def get_point_index(self, point):\n    for (i, segment) in enumerate(self.segments):\n        idx = segment.getPointIndex(point)\n        if (idx != (- 1)):\n            return (i, idx)\n    return ((- 1), (- 1))", "docstring": "Gets of the closest first point\n\nArgs:\npoint (:obj:`Point`)\nReturns:\n(int, int): Segment id and point index in that segment", "source": "codesearchnet"}
{"code": "def set_default_by_index(self, index):\n        \n        if index >= len(self._datasets):\n            raise DataInvalidIndex('A dataset with index {} does not exist'.format(index))\n\n        self._default_index = index", "docstring": "Set the default dataset by its index.\n\nAfter changing the default dataset, all calls without explicitly specifying the\ndataset by index or alias will be redirected to this dataset.\n\nArgs:\nindex (int): The index of the dataset that should be made the default.\n\nRaises:\nDataInvalidIndex: If the index does not represent a valid dataset.", "source": "juraj-google-style"}
{"code": "def setup_modules(self, args):\n    \n\n    def _setup_module_thread(module_description):\n      \n      new_args = utils.import_args_from_dict(\n          module_description['args'], vars(args), self.config)\n      module = self._module_pool[module_description['name']]\n      try:\n        module.setup(**new_args)\n      except Exception as error:  \n        self.add_error(\n            'An unknown error occurred: {0!s}\\nFull traceback:\\n{1:s}'.format(\n                error, traceback.format_exc()),\n            critical=True)\n\n      self.events[module_description['name']] = threading.Event()\n      self.cleanup()\n\n    threads = []\n    for module_description in self.recipe['modules']:\n      t = threading.Thread(\n          target=_setup_module_thread,\n          args=(module_description, )\n      )\n      threads.append(t)\n      t.start()\n    for t in threads:\n      t.join()\n\n    self.check_errors(is_global=True)", "docstring": "Performs setup tasks for each module in the module pool.\n\nThreads declared modules' setup() functions. Takes CLI arguments into\naccount when replacing recipe parameters for each module.\n\nArgs:\nargs: Command line arguments that will be used to replace the parameters\ndeclared in the recipe.", "source": "juraj-google-style"}
{"code": "def from_encoder_decoder_configs(cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs) -> PretrainedConfig:\n    logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config')\n    decoder_config.is_decoder = True\n    decoder_config.add_cross_attention = True\n    return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`VisionEncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model\nconfiguration and decoder model configuration.\n\nReturns:\n[`VisionEncoderDecoderConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def check_version(self, node_id=None, timeout=2, strict=False):\n    self._lock.acquire()\n    end = (time.time() + timeout)\n    while (time.time() < end):\n        try_node = (node_id or self.least_loaded_node())\n        if (try_node is None):\n            self._lock.release()\n            raise Errors.NoBrokersAvailable()\n        self._maybe_connect(try_node)\n        conn = self._conns[try_node]\n        self._refresh_on_disconnects = False\n        try:\n            remaining = (end - time.time())\n            version = conn.check_version(timeout=remaining, strict=strict, topics=list(self.config['bootstrap_topics_filter']))\n            if (version >= (0, 10, 0)):\n                self._api_versions = conn.get_api_versions()\n            self._lock.release()\n            return version\n        except Errors.NodeNotReadyError:\n            if (node_id is not None):\n                self._lock.release()\n                raise\n        finally:\n            self._refresh_on_disconnects = True\n    else:\n        self._lock.release()\n        raise Errors.NoBrokersAvailable()", "docstring": "Attempt to guess the version of a Kafka broker.\n\nNote: It is possible that this method blocks longer than the\nspecified timeout. This can happen if the entire cluster\nis down and the client enters a bootstrap backoff sleep.\nThis is only possible if node_id is None.\n\nReturns: version tuple, i.e. (0, 10), (0, 9), (0, 8, 2), ...\n\nRaises:\nNodeNotReadyError (if node_id is provided)\nNoBrokersAvailable (if node_id is None)\nUnrecognizedBrokerVersion: please file bug if seen!\nAssertionError (if strict=True): please file bug if seen!", "source": "codesearchnet"}
{"code": "def send(self, response):\n    self._connection.connection.set('{}:{}'.format(SIGNAL_REDIS_PREFIX, response.uid), pickle.dumps(response))", "docstring": "Send a response back to the client that issued a request.\n\nArgs:\nresponse (Response): Reference to the response object that should be sent.", "source": "codesearchnet"}
{"code": "def get_config_parameter_boolean(config: ConfigParser,\n                                 section: str,\n                                 param: str,\n                                 default: bool) -> bool:\n    \n    try:\n        value = config.getboolean(section, param)\n    except (TypeError, ValueError, NoOptionError):\n        log.warning(\n            \"Configuration variable {} not found or improper in section [{}]; \"\n            \"using default of {!r}\", param, section, default)\n        value = default\n    return value", "docstring": "Get Boolean parameter from ``configparser`` ``.INI`` file.\n\nArgs:\nconfig: :class:`ConfigParser` object\nsection: section name within config file\nparam: name of parameter within section\ndefault: default value\nReturns:\nparameter value, or default", "source": "juraj-google-style"}
{"code": "def is_github_task(task):\n    return any(((task.get('schedulerId') == 'taskcluster-github'), task.get('extra', {}).get('tasks_for', '').startswith('github-'), is_github_url(task.get('metadata', {}).get('source', ''))))", "docstring": "Determine if a task is related to GitHub.\n\nThis function currently looks into the ``schedulerId``, ``extra.tasks_for``, and\n``metadata.source``.\n\nArgs:\ntask (dict): the task definition to check.\n\nReturns:\nbool: True if a piece of data refers to GitHub", "source": "codesearchnet"}
{"code": "def __init__(self, base_object=None, query=None):\n    \n    if not query:\n      raise errors.FormatError('Missing query value.')\n\n    super(WMIQuerySourceType, self).__init__()\n    self.base_object = base_object\n    self.query = query", "docstring": "Initializes a source type.\n\nArgs:\nbase_object (Optional[str]): WMI base object.\nquery (Optional[str]): WMI query.\n\nRaises:\nFormatError: when query is not set.", "source": "juraj-google-style"}
{"code": "def save_to_file(self, filename, remap_dim0=None, remap_dim1=None):\n    with open(filename, 'w') as fobj:\n        columns = list(sorted(self._dim1))\n        for col in columns:\n            fobj.write(',')\n            fobj.write(str((remap_dim1[col] if remap_dim1 else col)))\n        fobj.write('\\n')\n        for row in sorted(self._dim0):\n            fobj.write(str((remap_dim0[row] if remap_dim0 else row)))\n            for col in columns:\n                fobj.write(',')\n                fobj.write(str(self[(row, col)]))\n            fobj.write('\\n')", "docstring": "Saves matrix to the file.\n\nArgs:\nfilename: name of the file where to save matrix\nremap_dim0: dictionary with mapping row indices to row names which should\nbe saved to file. If none then indices will be used as names.\nremap_dim1: dictionary with mapping column indices to column names which\nshould be saved to file. If none then indices will be used as names.", "source": "codesearchnet"}
{"code": "def __getitem__(self, key):\n    \n    if not isinstance(key, basestring):\n      raise Exception(\"LRU cache can only be indexed by strings (%s has type %s)\" %\n                      (str(key), str(type(key))))\n\n    if key in self._cache:\n      entry = self._cache[key]\n      entry['last_used'] = datetime.datetime.now()\n      return entry['value']\n    else:\n      raise KeyError(key)", "docstring": "Get an item from the cache.\n\nArgs:\nkey: a string used as the lookup key.\nReturns:\nThe cached item, if any.\nRaises:\nException if the key is not a string.\nKeyError if the key is not found.", "source": "juraj-google-style"}
{"code": "def GetEntries(\n      self, parser_mediator, cookie_data=None, url=None, **kwargs):\n    \n    fields = cookie_data.split('.')\n    number_of_fields = len(fields)\n\n    if number_of_fields > 5:\n      variables = '.'.join(fields[4:])\n      fields = fields[0:4]\n      fields.append(variables)\n      number_of_fields = len(fields)\n\n    if number_of_fields not in (1, 5):\n      parser_mediator.ProduceExtractionWarning(\n          'unsupported number of fields: {0:d} in cookie: {1:s}'.format(\n              number_of_fields, self.COOKIE_NAME))\n      return\n\n    if number_of_fields == 1:\n      domain_hash = None\n\n      try:\n        \n        last_visit_posix_time = int(fields[0], 10) / 10000000\n      except ValueError:\n        last_visit_posix_time = None\n\n      number_of_sessions = None\n      number_of_sources = None\n      extra_attributes = {}\n\n    elif number_of_fields == 5:\n      domain_hash = fields[0]\n\n      try:\n        last_visit_posix_time = int(fields[1], 10)\n      except ValueError:\n        last_visit_posix_time = None\n\n      try:\n        number_of_sessions = int(fields[2], 10)\n      except ValueError:\n        number_of_sessions = None\n\n      try:\n        number_of_sources = int(fields[3], 10)\n      except ValueError:\n        number_of_sources = None\n\n      extra_variables = fields[4].split('|')\n\n      extra_attributes = {}\n      for variable in extra_variables:\n        key, _, value = variable.partition('=')\n\n        \n        \n        \n        if isinstance(value, py2to3.UNICODE_TYPE) and py2to3.PY_2:\n          try:\n            value = codecs.decode(value, 'ascii')\n          except UnicodeEncodeError:\n            value = codecs.decode(value, 'ascii', errors='replace')\n            parser_mediator.ProduceExtractionWarning(\n                'Cookie contains non 7-bit ASCII characters, which have been '\n                'replaced with a \"?\".')\n\n        value = urlparse.unquote(value)\n\n        if py2to3.PY_2:\n          try:\n            value = codecs.encode(value, 'utf-8')\n          except UnicodeDecodeError:\n            value = codecs.encode(value, 'utf-8', errors='replace')\n            parser_mediator.ProduceExtractionWarning(\n                'Cookie value did not contain a Unicode string. Non UTF-8 '\n                'characters have been replaced.')\n\n        extra_attributes[key] = value\n\n    if last_visit_posix_time is not None:\n      date_time = dfdatetime_posix_time.PosixTime(\n          timestamp=last_visit_posix_time)\n      timestamp_description = definitions.TIME_DESCRIPTION_LAST_VISITED\n    else:\n      date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n      timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME\n\n    event_data = GoogleAnalyticsEventData('utmz')\n    event_data.cookie_name = self.COOKIE_NAME\n    event_data.domain_hash = domain_hash\n    event_data.sessions = number_of_sessions\n    event_data.sources = number_of_sources\n    event_data.url = url\n\n    for key, value in iter(extra_attributes.items()):\n      setattr(event_data, key, value)\n\n    event = time_events.DateTimeValuesEvent(date_time, timestamp_description)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts event objects from the cookie.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\ncookie_data (str): cookie data.\nurl (str): URL or path where the cookie got set.", "source": "juraj-google-style"}
{"code": "def gnuplot(script_name, args_dict={}, data=[], silent=True):\n    gnuplot_command = 'gnuplot'\n    if data:\n        assert ('data' not in args_dict), \"Can't use 'data' variable twice.\"\n        data_temp = _GnuplotDataTemp(*data)\n        args_dict['data'] = data_temp.name\n    if args_dict:\n        gnuplot_command += ' -e \"'\n        for arg in args_dict.items():\n            gnuplot_command += (arg[0] + '=')\n            if isinstance(arg[1], str):\n                gnuplot_command += ((\"'\" + arg[1]) + \"'\")\n            elif isinstance(arg[1], bool):\n                if (arg[1] is True):\n                    gnuplot_command += '1'\n                else:\n                    gnuplot_command += '0'\n            elif hasattr(arg[1], '__iter__'):\n                gnuplot_command += ((\"'\" + ' '.join([str(v) for v in arg[1]])) + \"'\")\n            else:\n                gnuplot_command += str(arg[1])\n            gnuplot_command += '; '\n        gnuplot_command = gnuplot_command[:(- 1)]\n        gnuplot_command += '\"'\n    gnuplot_command += (' ' + script_name)\n    if silent:\n        gnuplot_command += ' > /dev/null 2>&1'\n    os.system(gnuplot_command)\n    return gnuplot_command", "docstring": "Call a Gnuplot script, passing it arguments and\ndatasets.\n\nArgs:\nscipt_name(str): The name of the Gnuplot script.\nargs_dict(dict): A dictionary of parameters to pass\nto the script.  The `key` is the name of the variable\nthat the `item` will be passed to the Gnuplot script\nwith.\ndata(list): A list of lists containing lists to be plotted.\nThe lists can be accessed by plotting the variable\n`data` in the Gnuplot script.  The first list in the\nlist of lists corresponds to the first column in data,\nand so on.\nsilent (bool): `True` if Gnuplot stdout should be silenced,\n`False` if not.\n\nReturns:\nstr: The Gnuplot command used to call the script.", "source": "codesearchnet"}
{"code": "def MakeJoint(pmf1, pmf2):\n    \n    joint = Joint()\n    for v1, p1 in pmf1.Items():\n        for v2, p2 in pmf2.Items():\n            joint.Set((v1, v2), p1 * p2)\n    return joint", "docstring": "Joint distribution of values from pmf1 and pmf2.\n\nArgs:\npmf1: Pmf object\npmf2: Pmf object\n\nReturns:\nJoint pmf of value pairs", "source": "juraj-google-style"}
{"code": "def case_study_social_link_facebook(value):\n    \n\n    parsed = parse.urlparse(value.lower())\n    if not parsed.netloc.endswith('facebook.com'):\n        raise ValidationError(MESSAGE_NOT_FACEBOOK)", "docstring": "Confirms that the social media url is pointed at the correct domain.\n\nArgs:\nvalue (string): The url to check.\n\nRaises:\ndjango.forms.ValidationError", "source": "juraj-google-style"}
{"code": "def get_file_path(self, digest):\n    relPath = Fsdb.generate_tree_path(digest, self._conf['depth'])\n    return os.path.join(self.fsdbRoot, relPath)", "docstring": "Retrieve the absolute path to the file with the given digest\n\nArgs:\ndigest -- digest of the file\nReturns:\nString rapresenting the absolute path of the file", "source": "codesearchnet"}
{"code": "def instrument(self, package, options=None, runner=None, handler=None):\n    if (runner is None):\n        runner = DEFAULT_INSTRUMENTATION_RUNNER\n    if (options is None):\n        options = {}\n    options_list = []\n    for (option_key, option_value) in options.items():\n        options_list.append(('-e %s %s' % (option_key, option_value)))\n    options_string = ' '.join(options_list)\n    instrumentation_command = ('am instrument -r -w %s %s/%s' % (options_string, package, runner))\n    logging.info('AndroidDevice|%s: Executing adb shell %s', self.serial, instrumentation_command)\n    if (handler is None):\n        self._exec_adb_cmd('shell', instrumentation_command, shell=False, timeout=None, stderr=None)\n    else:\n        return self._execute_adb_and_process_stdout('shell', instrumentation_command, shell=False, handler=handler)", "docstring": "Runs an instrumentation command on the device.\n\nThis is a convenience wrapper to avoid parameter formatting.\n\nExample:\n\n.. code-block:: python\n\ndevice.instrument(\n'com.my.package.test',\noptions = {\n'class': 'com.my.package.test.TestSuite',\n},\n)\n\nArgs:\npackage: string, the package of the instrumentation tests.\noptions: dict, the instrumentation options including the test\nclass.\nrunner: string, the test runner name, which defaults to\nDEFAULT_INSTRUMENTATION_RUNNER.\nhandler: optional func, when specified the function is used to parse\nthe instrumentation stdout line by line as the output is\ngenerated; otherwise, the stdout is simply returned once the\ninstrumentation is finished.\n\nReturns:\nThe stdout of instrumentation command or the stderr if the handler\nis set.", "source": "codesearchnet"}
{"code": "def _list(self, dir_or_prefix):\n    try:\n        for path, (size, updated) in self._blobstorageIO().list_files(dir_or_prefix, with_metadata=True):\n            yield FileMetadata(path, size, updated)\n    except Exception as e:\n        raise BeamIOError('List operation failed', {dir_or_prefix: e})", "docstring": "List files in a location.\nListing is non-recursive (for filesystems that support directories).\nArgs:\ndir_or_prefix: (string) A directory or location prefix (for filesystems\nthat don't have directories).\nReturns:\nGenerator of ``FileMetadata`` objects.\nRaises:\n``BeamIOError``: if listing fails, but not if no files were found.", "source": "github-repos"}
{"code": "def contacts(self, *args, **kwargs):\n    n = Contacts.read_cellframe(self, prune_neighbors=True)\n    if ('measured_regions' in kwargs):\n        n.measured_regions = kwargs['measured_regions']\n    else:\n        n.measured_regions = self.get_measured_regions()\n    if ('measured_phenotypes' in kwargs):\n        n.measured_phenotypes = kwargs['measured_phenotypes']\n    else:\n        n.measured_phenotypes = self.phenotypes\n    n.microns_per_pixel = self.microns_per_pixel\n    return n", "docstring": "Use assess the cell-to-cell contacts recorded in the celldataframe\n\nReturns:\nContacts: returns a class that holds cell-to-cell contact information for whatever phenotypes were in the CellDataFrame before execution.", "source": "codesearchnet"}
{"code": "def load_config(self, config):\n    for (k, v) in config.items():\n        if hasattr(self, k):\n            raise DeviceError(self, ('Attribute %s already exists with value %s, cannot set again.' % (k, getattr(self, k))))\n        setattr(self, k, v)", "docstring": "Add attributes to the AndroidDevice object based on config.\n\nArgs:\nconfig: A dictionary representing the configs.\n\nRaises:\nError: The config is trying to overwrite an existing attribute.", "source": "codesearchnet"}
{"code": "def _detect(self):\n    results = []\n    self.results = []\n    self.visited_all_paths = {}\n    for contract in self.slither.contracts:\n        for function in contract.functions:\n            if function.is_implemented:\n                uninitialized_storage_variables = [v for v in function.local_variables if (v.is_storage and v.uninitialized)]\n                function.entry_point.context[self.key] = uninitialized_storage_variables\n                self._detect_uninitialized(function, function.entry_point, [])\n    for (function, uninitialized_storage_variable) in self.results:\n        var_name = uninitialized_storage_variable.name\n        info = '{} in {}.{} ({}) is a storage variable never initialiazed\\n'\n        info = info.format(var_name, function.contract.name, function.name, uninitialized_storage_variable.source_mapping_str)\n        json = self.generate_json_result(info)\n        self.add_variable_to_json(uninitialized_storage_variable, json)\n        self.add_function_to_json(function, json)\n        results.append(json)\n    return results", "docstring": "Detect uninitialized storage variables\n\nRecursively visit the calls\nReturns:\ndict: [contract name] = set(storage variable uninitialized)", "source": "codesearchnet"}
{"code": "def _Execute(statements, context, callback, trace):\n    \n    \n    if trace:\n        trace.exec_depth += 1\n    for i, statement in enumerate(statements):\n        if isinstance(statement, six.string_types):\n            callback(statement)\n        else:\n            \n            \n            try:\n                func, args = statement\n                func(args, context, callback, trace)\n            except UndefinedVariable as e:\n                \n                start = max(0, i - 3)\n                end = i + 3\n                e.near = statements[start:end]\n                e.trace = trace  \n                raise", "docstring": "Execute a bunch of template statements in a ScopedContext.\n\nArgs:\ncallback: Strings are \"written\" to this callback function.\ntrace: Trace object, or None\n\nThis is called in a mutually recursive fashion.", "source": "juraj-google-style"}
{"code": "def update_variant_compounds(self, variant, variant_objs=None):\n    compound_objs = []\n    for compound in variant.get('compounds', []):\n        not_loaded = True\n        gene_objs = []\n        if variant_objs:\n            variant_obj = variant_objs.get(compound['variant'])\n        else:\n            variant_obj = self.variant_collection.find_one({'_id': compound['variant']})\n        if variant_obj:\n            not_loaded = False\n            compound['rank_score'] = variant_obj['rank_score']\n            for gene in variant_obj.get('genes', []):\n                gene_obj = {'hgnc_id': gene['hgnc_id'], 'hgnc_symbol': gene.get('hgnc_symbol'), 'region_annotation': gene.get('region_annotation'), 'functional_annotation': gene.get('functional_annotation')}\n                gene_objs.append(gene_obj)\n                compound['genes'] = gene_objs\n        compound['not_loaded'] = not_loaded\n        compound_objs.append(compound)\n    return compound_objs", "docstring": "Update compounds for a variant.\n\nThis will add all the necessary information of a variant on a compound object.\n\nArgs:\nvariant(scout.models.Variant)\nvariant_objs(dict): A dictionary with _ids as keys and variant objs as values.\n\nReturns:\ncompound_objs(list(dict)): A dictionary with updated compound objects.", "source": "codesearchnet"}
{"code": "def image_needs_building(image):\n    d = docker_client()\n    try:\n        d.images.get(image)\n    except docker.errors.ImageNotFound:\n        pass\n    else:\n        return False\n    return image_needs_pushing(image)", "docstring": "Return whether an image needs building\n\nChecks if the image exists (ignores commit range),\neither locally or on the registry.\n\nArgs:\n\nimage (str): the `repository:tag` image to be build.\n\nReturns:\n\nTrue: if image needs to be built\nFalse: if not (image already exists)", "source": "codesearchnet"}
{"code": "def set_hostname(hostname=None, deploy=False):\n    \n\n    if not hostname:\n        raise CommandExecutionError(\"Hostname option must not be none.\")\n\n    ret = {}\n\n    query = {'type': 'config',\n             'action': 'set',\n             'xpath': '/config/devices/entry[@name=\\'localhost.localdomain\\']/deviceconfig/system',\n             'element': '<hostname>{0}</hostname>'.format(hostname)}\n\n    ret.update(__proxy__['panos.call'](query))\n\n    if deploy is True:\n        ret.update(commit())\n\n    return ret", "docstring": "Set the hostname of the Palo Alto proxy minion. A commit will be required before this is processed.\n\nCLI Example:\n\nArgs:\nhostname (str): The hostname to set\n\ndeploy (bool): If true then commit the full candidate configuration, if false only set pending change.\n\n.. code-block:: bash\n\nsalt '*' panos.set_hostname newhostname\nsalt '*' panos.set_hostname newhostname deploy=True", "source": "juraj-google-style"}
{"code": "def create_software_renderer(self, surface):\n    renderer = object.__new__(Renderer)\n    renderer._ptr = self._ptr = check_ptr_err(lib.SDL_CreateSoftwareRenderer(surface._ptr))\n    return renderer", "docstring": "Create a 2D software rendering context for a surface.\n\nArgs:\nsurface (Surface): The surface where rendering is done.\n\nReturns:\nRenderer: A 2D software rendering context.\n\nRaises:\nSDLError: If there was an error creating the renderer.", "source": "codesearchnet"}
{"code": "def delete(self, *args, **kwargs):\n    api = Api()\n    api.authenticate()\n    api.delete_video(self.video_id)\n    return super(Video, self).delete(*args, **kwargs)", "docstring": "Deletes the video from youtube\n\nRaises:\nOperationError", "source": "codesearchnet"}
{"code": "def SetDecryptedStreamSize(self, decrypted_stream_size):\n    if self._is_open:\n        raise IOError('Already open.')\n    if (decrypted_stream_size < 0):\n        raise ValueError('Invalid decrypted stream size: {0:d} value out of bounds.'.format(decrypted_stream_size))\n    self._decrypted_stream_size = decrypted_stream_size", "docstring": "Sets the decrypted stream size.\n\nThis function is used to set the decrypted stream size if it can be\ndetermined separately.\n\nArgs:\ndecrypted_stream_size (int): size of the decrypted stream in bytes.\n\nRaises:\nIOError: if the file-like object is already open.\nOSError: if the file-like object is already open.\nValueError: if the decrypted stream size is invalid.", "source": "codesearchnet"}
{"code": "def range(cls, start=None, stop=None, step=None, inclusive=False):\n\n    def sign(x):\n        'Inner function for determining the sign of a float\\n            '\n        return ((- 1), 1)[(x >= 0)]\n    if (not step):\n        raise ValueError('Null step')\n    if isinstance(stop, timedelta):\n        stop = (start + stop)\n    if (sign((stop - start).total_seconds()) != sign(step.total_seconds())):\n        raise ValueError('start/stop order not coherent with step')\n    date = start\n    if (step.total_seconds() > 0):\n        oper = ('__le__' if inclusive else '__lt__')\n    else:\n        oper = ('__ge__' if inclusive else '__gt__')\n    while getattr(date, oper)(stop):\n        (yield date)\n        date += step", "docstring": "Generator of a date range\n\nArgs:\nstart (Date):\nstop (Date or datetime.timedelta)!\nstep (timedelta):\nKeyword Args:\ninclusive (bool): If ``False``, the stopping date is not included.\nThis is the same behavior as the built-in :py:func:`range`.\nYield:\nDate:", "source": "codesearchnet"}
{"code": "def _buckets(data, bucket_count=None):\n  \n  \n  import tensorflow.compat.v1 as tf\n  if bucket_count is None:\n    bucket_count = summary_v2.DEFAULT_BUCKET_COUNT\n  with tf.name_scope('buckets', values=[data, bucket_count]), \\\n       tf.control_dependencies([tf.assert_scalar(bucket_count),\n                                tf.assert_type(bucket_count, tf.int32)]):\n    data = tf.reshape(data, shape=[-1])  \n    data = tf.cast(data, tf.float64)\n    is_empty = tf.equal(tf.size(input=data), 0)\n\n    def when_empty():\n      return tf.constant([], shape=(0, 3), dtype=tf.float64)\n\n    def when_nonempty():\n      min_ = tf.reduce_min(input_tensor=data)\n      max_ = tf.reduce_max(input_tensor=data)\n      range_ = max_ - min_\n      is_singular = tf.equal(range_, 0)\n\n      def when_nonsingular():\n        bucket_width = range_ / tf.cast(bucket_count, tf.float64)\n        offsets = data - min_\n        bucket_indices = tf.cast(tf.floor(offsets / bucket_width),\n                                 dtype=tf.int32)\n        clamped_indices = tf.minimum(bucket_indices, bucket_count - 1)\n        one_hots = tf.one_hot(clamped_indices, depth=bucket_count)\n        bucket_counts = tf.cast(tf.reduce_sum(input_tensor=one_hots, axis=0),\n                                dtype=tf.float64)\n        edges = tf.linspace(min_, max_, bucket_count + 1)\n        left_edges = edges[:-1]\n        right_edges = edges[1:]\n        return tf.transpose(a=tf.stack(\n            [left_edges, right_edges, bucket_counts]))\n\n      def when_singular():\n        center = min_\n        bucket_starts = tf.stack([center - 0.5])\n        bucket_ends = tf.stack([center + 0.5])\n        bucket_counts = tf.stack([tf.cast(tf.size(input=data), tf.float64)])\n        return tf.transpose(\n            a=tf.stack([bucket_starts, bucket_ends, bucket_counts]))\n\n      return tf.cond(is_singular, when_singular, when_nonsingular)\n\n    return tf.cond(is_empty, when_empty, when_nonempty)", "docstring": "Create a TensorFlow op to group data into histogram buckets.\n\nArguments:\ndata: A `Tensor` of any shape. Must be castable to `float64`.\nbucket_count: Optional positive `int` or scalar `int32` `Tensor`.\nReturns:\nA `Tensor` of shape `[k, 3]` and type `float64`. The `i`th row is\na triple `[left_edge, right_edge, count]` for a single bucket.\nThe value of `k` is either `bucket_count` or `1` or `0`.", "source": "juraj-google-style"}
{"code": "def get_header(vcf_file_path):\n    \n    logger.info(\"Parsing header of file {0}\".format(vcf_file_path))\n    head = HeaderParser()\n    handle = get_vcf_handle(infile=vcf_file_path)\n    \n    for line in handle:\n        line = line.rstrip()\n        if line.startswith('\n            if line.startswith('\n                head.parse_meta_data(line)\n            else:\n                head.parse_header_line(line)\n        else:\n            break\n\n    handle.close()\n\n    return head", "docstring": "Parse the header and return a header object\n\nArgs:\nvcf_file_path(str): Path to vcf\n\nReturns:\nhead: A HeaderParser object", "source": "juraj-google-style"}
{"code": "def set_tensor_shapes(tensors, shapes):\n    if shapes:\n        tensor_names_to_tensor = {get_tensor_name(tensor): tensor for tensor in tensors}\n        for name, shape in shapes.items():\n            if name not in tensor_names_to_tensor:\n                raise ValueError(\"Invalid tensor '{}' found in tensor shapes map.\".format(name))\n            if shape is not None:\n                tensor = tensor_names_to_tensor[name]\n                try:\n                    tensor.set_shape(shape)\n                except ValueError as error:\n                    message = \"The shape of tensor '{0}' cannot be changed from {1} to {2}. {3}\".format(name, tensor.shape, shape, str(error))\n                    raise ValueError(message)", "docstring": "Sets Tensor shape for each tensor if the shape is defined.\n\nArgs:\ntensors: TensorFlow tensor.Tensor.\nshapes: Dict of strings representing input tensor names to list of\nintegers representing input shapes (e.g., {\"foo\": : [1, 16, 16, 3]}).\n\nRaises:\nValueError:\n`shapes` contains an invalid tensor.\n`shapes` contains an invalid shape for a valid tensor.", "source": "github-repos"}
{"code": "def iter_variants(self):\n    for variant in self.repository.iter_variants(self.resource):\n        (yield Variant(variant, context=self.context, parent=self))", "docstring": "Iterate over the variants within this package, in index order.\n\nReturns:\n`Variant` iterator.", "source": "codesearchnet"}
{"code": "def _on_notification_received(self, success, result, failure_reason):\n        \n        if not success:\n            self._logger.info(\"Notification received with failure failure_reason=%s\", failure_reason)\n\n        notification_id = (result['connection_handle'], result['attribute_handle'])\n\n        callback = None\n        with self.notification_callbacks_lock:\n            if notification_id in self.notification_callbacks:\n                callback, once = self.notification_callbacks[notification_id]\n\n                if once:\n                    del self.notification_callbacks[notification_id]\n\n        if callback is not None:\n            callback(result['value'])", "docstring": "Callback function called when a notification has been received.\nIt is executed in the baBLE working thread: should not be blocking.\n\nArgs:\nsuccess (bool): A bool indicating that the operation is successful or not\nresult (dict): The notification information\n- value (bytes): Data notified\nfailure_reason (any): An object indicating the reason why the operation is not successful (else None)", "source": "juraj-google-style"}
{"code": "def ParseRecord(self, parser_mediator, key, structure):\n    if (key != 'log_entry'):\n        raise errors.ParseError('Unable to parse record, unknown structure: {0:s}'.format(key))\n    event_data = BashHistoryEventData()\n    event_data.command = structure.command\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=structure.timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a record and produces a Bash history event.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nkey (str): name of the parsed structure.\nstructure (pyparsing.ParseResults): elements parsed from the file.\n\nRaises:\nParseError: when the structure type is unknown.", "source": "codesearchnet"}
{"code": "def complete(self, default_output=None):\n    \n    \n    \n    \n    if not self.async:\n      raise UnexpectedPipelineError(\n          'May only call complete() method for asynchronous pipelines.')\n    self._context.fill_slot(\n        self._pipeline_key, self.outputs.default, default_output)", "docstring": "Marks this asynchronous Pipeline as complete.\n\nArgs:\ndefault_output: What value the 'default' output slot should be assigned.\n\nRaises:\nUnexpectedPipelineError if the slot no longer exists or this method was\ncalled for a pipeline that is not async.", "source": "juraj-google-style"}
{"code": "def delete(self, filething=None):\n        \n\n        self.tags.clear()\n        self.save(filething, padding=lambda x: 0)", "docstring": "delete(filething=None)\n\nArgs:\nfilething (filething)\nRaises:\nmutagen.MutagenError", "source": "juraj-google-style"}
{"code": "def handleresult(self, r):\n        \n        if r.status_code >= 400 and r.status_code < 500:\n            msg = r.json()\n            raise AuthenticationError(str(msg[\"code\"]) + \": \" + msg[\"msg\"] +\n                                      \" (\" + msg[\"ref\"] + \")\")\n        elif r.status_code > 300:\n            err = None\n            try:\n                msg = r.json()\n                err = ServerError(str(msg[\"code\"]) + \": \" + msg[\"msg\"] + \" (\" +\n                                  msg[\"ref\"] + \")\")\n            except:\n                raise ServerError(\n                    \"Server returned error, but did not give a valid error message\")\n            raise err\n        return r", "docstring": "Handles HTTP error codes for the given request\n\nRaises:\nAuthenticationError on the appropriate 4** errors\nServerError if the response is not an ok (2**)\n\nArguments:\nr -- The request result", "source": "juraj-google-style"}
{"code": "def _ExtractRequestSummaryFields(self, request, error=None):\n    headers = request.headers\n    summary_fields = {'server': request.get_full_url(), 'contentRange': headers['Content-range'], 'contentLength': headers['Content-length']}\n    if error:\n        summary_fields['isError'] = True\n        summary_fields['errorMessage'] = error.reason\n    else:\n        summary_fields['isError'] = False\n    return summary_fields", "docstring": "Extract fields used in the summary logs.\n\nArgs:\nrequest:  a urllib2.Request instance configured to make the request.\n[optional]\nerror: a urllib2.HttpError instance used to retrieve error details.\n\nReturns:\nA dict containing the fields to be output in the summary logs.", "source": "codesearchnet"}
{"code": "def cap17(msg):\n    allbds = ['05', '06', '07', '08', '09', '0A', '20', '21', '40', '41', '42', '43', '44', '45', '48', '50', '51', '52', '53', '54', '55', '56', '5F', '60', 'NA', 'NA', 'E1', 'E2']\n    d = hex2bin(data(msg))\n    idx = [i for (i, v) in enumerate(d[:28]) if (v == '1')]\n    capacity = [('BDS' + allbds[i]) for i in idx if (allbds[i] is not 'NA')]\n    return capacity", "docstring": "Extract capacities from BDS 1,7 message\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nlist: list of suport BDS codes", "source": "codesearchnet"}
{"code": "def cancel(self, **kwargs):\n        \n        path = '%s/%s/cancel' % (self.manager.path, self.get_id())\n        self.manager.gitlab.http_post(path)", "docstring": "Cancel the job.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabJobCancelError: If the job could not be canceled", "source": "juraj-google-style"}
{"code": "def put(cls, obj):\n    return PyarrowOnRayFramePartition(ray.put(pyarrow.Table.from_pandas(obj)))", "docstring": "Put an object in the Plasma store and wrap it in this object.\n\nArgs:\nobj: The object to be put.\n\nReturns:\nA `RayRemotePartition` object.", "source": "codesearchnet"}
{"code": "def set_bias(self, bias):\n        \n        self.x_offset += (bias - self._bias)\n        self._bias = bias\n\n        self._build_cdict()", "docstring": "Adjusts the image bias.\n\nBias determines where the color changes start.  At low bias, low\nintensities (i.e., low pixel values) will have non-zero color\ndifferences, while at high bias only high pixel values will have\nnon-zero differences\n\nArgs:\nbias: float\nA number between 0 and 1.  Note that upon initialization the\ncolormap has a default bias of 0.5.\n\nReturns: void", "source": "juraj-google-style"}
{"code": "def set_key(self, structure_prefix, key_line):\n    self._empty = False\n    key_value = self._remove_structure_prefix(structure_prefix, key_line)\n    if '=' in key_value:\n        key, value = key_value.split('=', 1)\n        self.current_key = key\n        if key in self.known_keys:\n            self.known_keys[key].append(value)\n        else:\n            self.unknown_keys[key].append(key_value)", "docstring": "Sets the current key for the instrumentation block.\n\nFor unknown keys, the key is added to the value list in order to\nbetter contextualize the value in the output.\n\nArgs:\nstructure_prefix: string, the structure prefix that was matched\nand that needs to be removed.\nkey_line: string, the raw instrumentation output line that contains\nthe key-value pair.", "source": "github-repos"}
{"code": "def _splitGenoSlidingWindow(self,size=5e4,step=None,minSnps=1.,maxSnps=SP.inf):\n        \n        if step is None:    step = 0.5*size\n        chroms  = SP.unique(self.chrom)\n        wnd_pos       = []\n        idx_wnd_start = []\n        nSnps         = []\n        wnd_i = 0\n\n        nSnps = []\n        for chrom_i in chroms:\n            start = 0\n            Ichrom = self.chrom==chrom_i\n            idx_chrom_start = SP.where(Ichrom)[0][0]\n            pos_chr = self.pos[Ichrom]\n            pos_chr_max = pos_chr.max()\n            while 1:\n                if start>pos_chr_max: break\n                end = start+size\n                Ir = (self.pos>=start)*(self.pos<end)\n                _nSnps = Ir.sum()\n                if _nSnps>minSnps and _nSnps<maxSnps:\n                    wnd_pos.append([chrom_i,start,start+size])\n                    nSnps.append(_nSnps)\n                    idx_wnd_start.append(idx_chrom_start+SP.where(Ir)[0][0])\n                    wnd_i+=1\n                start += step\n        self._wnd_pos = SP.array(wnd_pos)\n        self._idx_wnd_start = SP.array(idx_wnd_start)\n        self._nSnps = SP.array(nSnps)", "docstring": "split into windows using a slide criterion\nArgs:\nsize:       window size\nstep:       moving step (default: 0.5*size)\nminSnps:    only windows with nSnps>=minSnps are considered\nmaxSnps:    only windows with nSnps>=maxSnps are considered", "source": "juraj-google-style"}
{"code": "def global_horizontal_radiation(self, value=9999.0):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `global_horizontal_radiation`'.format(value))\n        if (value < 0.0):\n            raise ValueError('value need to be greater or equal 0.0 for field `global_horizontal_radiation`')\n    self._global_horizontal_radiation = value", "docstring": "Corresponds to IDD Field `global_horizontal_radiation`\n\nArgs:\nvalue (float): value for IDD Field `global_horizontal_radiation`\nUnit: Wh/m2\nvalue >= 0.0\nMissing value: 9999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def JoinPath(stem='', *parts):\n    parts = [SmartUnicode(path) for path in parts]\n    result = (stem + NormalizePath(u'/'.join(parts))).replace('\n    result = result.rstrip('/')\n    return (result or '/')", "docstring": "A sane version of os.path.join.\n\nThe intention here is to append the stem to the path. The standard module\nremoves the path if the stem begins with a /.\n\nArgs:\nstem: The stem to join to.\n*parts: parts of the path to join. The first arg is always the root and\ndirectory traversal is not allowed.\n\nReturns:\na normalized path.", "source": "codesearchnet"}
{"code": "def conv2d_transpose(x, kernel, output_shape, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)):\n    if data_format is None:\n        data_format = image_data_format()\n    if data_format not in {'channels_first', 'channels_last'}:\n        raise ValueError('Unknown data_format: ' + str(data_format))\n    if data_format == 'channels_first' and dilation_rate != (1, 1):\n        force_transpose = True\n    else:\n        force_transpose = False\n    x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose)\n    if data_format == 'channels_first' and tf_data_format == 'NHWC':\n        output_shape = (output_shape[0], output_shape[2], output_shape[3], output_shape[1])\n    if output_shape[0] is None:\n        output_shape = (shape(x)[0],) + tuple(output_shape[1:])\n    if isinstance(output_shape, (tuple, list)):\n        output_shape = array_ops_stack.stack(list(output_shape))\n    padding = _preprocess_padding(padding)\n    if tf_data_format == 'NHWC':\n        strides = (1,) + strides + (1,)\n    else:\n        strides = (1, 1) + strides\n    if dilation_rate == (1, 1):\n        x = nn.conv2d_transpose(x, kernel, output_shape, strides, padding=padding, data_format=tf_data_format)\n    else:\n        assert dilation_rate[0] == dilation_rate[1]\n        x = nn.atrous_conv2d_transpose(x, kernel, output_shape, rate=dilation_rate[0], padding=padding)\n    if data_format == 'channels_first' and tf_data_format == 'NHWC':\n        x = array_ops.transpose(x, (0, 3, 1, 2))\n    return x", "docstring": "2D deconvolution (i.e.\n\ntransposed convolution).\n\nArgs:\nx: Tensor or variable.\nkernel: kernel tensor.\noutput_shape: 1D int tensor for the output shape.\nstrides: strides tuple.\npadding: string, `\"same\"` or `\"valid\"`.\ndata_format: string, `\"channels_last\"` or `\"channels_first\"`.\ndilation_rate: Tuple of 2 integers.\n\nReturns:\nA tensor, result of transposed 2D convolution.\n\nRaises:\nValueError: if `data_format` is neither `channels_last` or\n`channels_first`.", "source": "github-repos"}
{"code": "def map_creative_and_association_feeds(self, creative_feed, creative_association_feed):\n    for creative in creative_feed:\n        creative['associations'] = [association for association in creative_association_feed if self._assignment_matches(creative, association)]", "docstring": "Maps creative association feed to the corresponding creative.\n\nCreative association is a child object to the creative, and there is a 1\ncreative to many creative association relationship. In Bulkdozer they are\nrepresented by two separate tab in the feed, and this method maps the\ncreatives to their respective creative association based on the creative ID.\n\nArgs:\ncreative_feed: Creative feed.\ncreative_association_feed: Creative association feed.", "source": "github-repos"}
{"code": "def get_read_write_resource_inputs(op):\n    reads = object_identity.ObjectIdentitySet()\n    writes = object_identity.ObjectIdentitySet()\n    if op.type in RESOURCE_READ_OPS:\n        reads.update((t for t in op.inputs if t.dtype == dtypes.resource))\n        return (reads, writes)\n    try:\n        read_only_input_indices = op.get_attr(READ_ONLY_RESOURCE_INPUTS_ATTR)\n    except ValueError:\n        writes.update((t for t in op.inputs if t.dtype == dtypes.resource))\n        return (reads, writes)\n    read_only_index = 0\n    for i, t in enumerate(op.inputs):\n        if op.inputs[i].dtype != dtypes.resource:\n            continue\n        if read_only_index < len(read_only_input_indices) and i == read_only_input_indices[read_only_index]:\n            reads.add(op.inputs[i])\n            read_only_index += 1\n        else:\n            writes.add(op.inputs[i])\n    return (reads, writes)", "docstring": "Returns a tuple of resource reads, writes in op.inputs.\n\nArgs:\nop: Operation\n\nReturns:\nA 2-tuple of ObjectIdentitySets, the first entry containing read-only\nresource handles and the second containing read-write resource handles in\n`op.inputs`.", "source": "github-repos"}
{"code": "def refer(self, text):\n        \n        data = self.reply(text)\n        data['refer_key'] = self['key']\n        return data", "docstring": "Refers current message and replys a new message\n\nArgs:\ntext(str): message content\n\nReturns:\nRTMMessage", "source": "juraj-google-style"}
{"code": "def get_optional_artifacts_per_task_id(upstream_artifacts):\n    optional_artifacts_per_task_id = {}\n    for artifact_definition in upstream_artifacts:\n        if (artifact_definition.get('optional', False) is True):\n            task_id = artifact_definition['taskId']\n            artifacts_paths = artifact_definition['paths']\n            add_enumerable_item_to_dict(dict_=optional_artifacts_per_task_id, key=task_id, item=artifacts_paths)\n    return optional_artifacts_per_task_id", "docstring": "Return every optional artifact defined in ``upstream_artifacts``, ordered by taskId.\n\nArgs:\nupstream_artifacts: the list of upstream artifact definitions\n\nReturns:\ndict: list of paths to downloaded artifacts ordered by taskId", "source": "codesearchnet"}
{"code": "def check_satpy(readers=None, writers=None, extras=None):\n    from satpy.readers import configs_for_reader\n    from satpy.writers import configs_for_writer\n    print('Readers')\n    print('=======')\n    for (reader, res) in sorted(check_yaml_configs(configs_for_reader(reader=readers), 'reader').items()):\n        print((reader + ': '), res)\n    print()\n    print('Writers')\n    print('=======')\n    for (writer, res) in sorted(check_yaml_configs(configs_for_writer(writer=writers), 'writer').items()):\n        print((writer + ': '), res)\n    print()\n    print('Extras')\n    print('======')\n    module_names = (extras if (extras is not None) else ('cartopy', 'geoviews'))\n    for (module_name, res) in sorted(_check_import(module_names).items()):\n        print((module_name + ': '), res)\n    print()", "docstring": "Check the satpy readers and writers for correct installation.\n\nArgs:\nreaders (list or None): Limit readers checked to those specified\nwriters (list or None): Limit writers checked to those specified\nextras (list or None): Limit extras checked to those specified\n\nReturns: bool\nTrue if all specified features were successfully loaded.", "source": "codesearchnet"}
{"code": "def cluster_spec(self):\n    task_list = []\n    self._gpu_allocation = []\n    self._cluster_allocation = {}\n    for host, num_tasks in sorted(self._task_configuration.items()):\n        for port_offset, gpu_offset in zip(range(num_tasks), range(0, self._gpus_per_node, self._gpus_per_task)):\n            host_addr = '%s:%d' % (host, self._port_base + port_offset)\n            task_list.append(host_addr)\n            gpu_id_list = []\n            for gpu_id in range(gpu_offset, gpu_offset + self._gpus_per_task):\n                gpu_id_list.append(str(gpu_id))\n            self._gpu_allocation.append(','.join(gpu_id_list))\n    cluster_rank_offset_start = 0\n    cluster_rank_offset_end = 0\n    for task_type, num_tasks in sorted(self._jobs.items()):\n        cluster_rank_offset_end = cluster_rank_offset_start + num_tasks\n        self._cluster_allocation[task_type] = task_list[cluster_rank_offset_start:cluster_rank_offset_end]\n        if cluster_rank_offset_start <= self._rank < cluster_rank_offset_end:\n            self.task_type = task_type\n            self.task_id = self._rank - cluster_rank_offset_start\n        cluster_rank_offset_start = cluster_rank_offset_end\n    if self._auto_set_gpu:\n        os.environ['CUDA_VISIBLE_DEVICES'] = self._gpu_allocation[self._rank]\n    return ClusterSpec(self._cluster_allocation)", "docstring": "Returns a ClusterSpec object based on the latest instance group info.\n\nThis returns a ClusterSpec object for use based on information from the\nspecified initialization parameters and Slurm environment variables. The\ncluster specification is resolved each time this function is called. The\nresolver extract hostnames of nodes by scontrol and pack tasks in that\norder until a node a has number of tasks that is equal to specification.\nGPUs on nodes are allocated to tasks by specification through setting\nCUDA_VISIBLE_DEVICES environment variable.\n\nReturns:\nA ClusterSpec containing host information retrieved from Slurm's\nenvironment variables.", "source": "github-repos"}
{"code": "def create_keyvault(access_token, subscription_id, rgname, vault_name, location, template_deployment=True, tenant_id=None, object_id=None):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults/', vault_name, '?api-version=', KEYVAULT_API])\n    if (tenant_id is None):\n        ret = list_tenants(access_token)\n        tenant_id = ret['value'][0]['tenantId']\n    access_policies = [{'tenantId': tenant_id, 'objectId': object_id, 'permissions': {'keys': ['get', 'create', 'delete', 'list', 'update', 'import', 'backup', 'restore', 'recover'], 'secrets': ['get', 'list', 'set', 'delete', 'backup', 'restore', 'recover'], 'certificates': ['get', 'list', 'delete', 'create', 'import', 'update', 'managecontacts', 'getissuers', 'listissuers', 'setissuers', 'deleteissuers', 'manageissuers', 'recover'], 'storage': ['get', 'list', 'delete', 'set', 'update', 'regeneratekey', 'setsas', 'listsas', 'getsas', 'deletesas']}}]\n    vault_properties = {'tenantId': tenant_id, 'sku': {'family': 'A', 'name': 'standard'}, 'enabledForTemplateDeployment': template_deployment, 'accessPolicies': access_policies}\n    vault_body = {'location': location, 'properties': vault_properties}\n    body = json.dumps(vault_body)\n    return do_put(endpoint, body, access_token)", "docstring": "Create a new key vault in the named resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\nvault_name (str): Name of the new key vault.\nlocation (str): Azure data center location. E.g. westus2.\ntemplate_deployment (boolean): Whether to allow deployment from template.\ntenant_id (str): Optionally specify a tenant ID (otherwise picks first response) from\nist_tenants().\nobject_id (str): Optionally specify an object ID representing user or principal for the\naccess policy.\n\nReturns:\nHTTP response. JSON body of key vault properties.", "source": "codesearchnet"}
{"code": "def read_index(fn):\n    index = None\n    with open(fn, 'rb') as i_file:\n        if (i_file.read(len(_CHECK_STRING)) != _CHECK_STRING):\n            raise ValueError('{}: not a valid index file'.format(fn))\n        index = pd.read_csv(io.StringIO(zlib.decompress(i_file.read()).decode(encoding='utf-8')))\n    return index", "docstring": "Reads index from file.\n\nArgs:\nfn (str): the name of the file containing the index.\n\nReturns:\npandas.DataFrame: the index of the file.\n\nBefore reading the index, we check the first couple of bytes to see if it\nis a valid index file.", "source": "codesearchnet"}
{"code": "def remove_padding_from_sc(value_in_checkpoint: tensor.Tensor, variable_shape: tuple[int, int]) -> tensor.Tensor:\n    checkpoint_value_shape = value_in_checkpoint.shape.as_list()\n    is_init_value_padded = all([i >= j for i, j in zip(checkpoint_value_shape, variable_shape)])\n    if not is_init_value_padded:\n        return value_in_checkpoint\n    begin = [0] * len(checkpoint_value_shape)\n    return array_ops.slice(value_in_checkpoint, begin=begin, size=variable_shape)", "docstring": "Removes padding, if any, from sparsecore checkpoint.\n\nArgs:\nvalue_in_checkpoint: input tensor value, usually from checkpoint.\nvariable_shape: Expected shape of tensor after removing padding.\n\nReturns:\nA slice of the input tensor to match the variable_shape if the\nvariable shape is a valid slice if the input tensor.", "source": "github-repos"}
{"code": "def chglog(amend: bool = False, stage: bool = False, next_version: str = None, auto_next_version: bool = False):\n    \n    changed_files = CTX.repo.changed_files()\n    changelog_file_path: Path = config.CHANGELOG_FILE_PATH()\n    changelog_file_name = changelog_file_path.name\n    if changelog_file_name in changed_files:\n        LOGGER.error('changelog has changed; cannot update it')\n        exit(-1)\n    _chglog(amend, stage, next_version, auto_next_version)", "docstring": "Writes the changelog\n\nArgs:\namend: amend last commit with changes\nstage: stage changes\nnext_version: indicates next version\nauto_next_version: infer next version from VCS", "source": "juraj-google-style"}
{"code": "def from_dict(cls, video_processor_dict: Dict[str, Any], **kwargs):\n    video_processor_dict = video_processor_dict.copy()\n    return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)\n    if 'size' in kwargs and 'size' in video_processor_dict:\n        video_processor_dict['size'] = kwargs.pop('size')\n    if 'crop_size' in kwargs and 'crop_size' in video_processor_dict:\n        video_processor_dict['crop_size'] = kwargs.pop('crop_size')\n    video_processor = cls(**video_processor_dict)\n    to_remove = []\n    for key, value in kwargs.items():\n        if hasattr(video_processor, key):\n            setattr(video_processor, key, value)\n            to_remove.append(key)\n    for key in to_remove:\n        kwargs.pop(key, None)\n    logger.info(f'Video processor {video_processor}')\n    if return_unused_kwargs:\n        return (video_processor, kwargs)\n    else:\n        return video_processor", "docstring": "Instantiates a type of [`~video_processing_utils.VideoProcessorBase`] from a Python dictionary of parameters.\n\nArgs:\nvideo_processor_dict (`Dict[str, Any]`):\nDictionary that will be used to instantiate the video processor object. Such a dictionary can be\nretrieved from a pretrained checkpoint by leveraging the\n[`~video_processing_utils.VideoProcessorBase.to_dict`] method.\nkwargs (`Dict[str, Any]`):\nAdditional parameters from which to initialize the video processor object.\n\nReturns:\n[`~video_processing_utils.VideoProcessorBase`]: The video processor object instantiated from those\nparameters.", "source": "github-repos"}
{"code": "def IsSynced(self):\n    if (Blockchain.Default().Height == 0):\n        return False\n    if (int(((100 * self._current_height) / Blockchain.Default().Height)) < 100):\n        return False\n    else:\n        return True", "docstring": "Check if wallet is synced.\n\nReturns:\nbool: True if wallet is synced.", "source": "codesearchnet"}
{"code": "def change_password(username, new_password):\n    assert (username in passwd_reader.load_users()), (\"Username '%s' not found!\" % username)\n    sh.ftpasswd('--change-password', passwd=True, name=username, stdin=True, file=settings.LOGIN_FILE, _in=new_password)\n    reload_configuration()", "docstring": "Change password for given `username`.\n\nArgs:\nusername (str): User's name.\nnew_password (str): User's new password.", "source": "codesearchnet"}
{"code": "def format(self, record):\n    if ((not FLAGS['showprefixforinfo'].value) and (FLAGS['verbosity'].value == converter.ABSL_INFO) and (record.levelno == logging.INFO) and (_absl_handler.python_handler.stream == sys.stderr)):\n        prefix = ''\n    else:\n        prefix = get_absl_log_prefix(record)\n    return (prefix + super(PythonFormatter, self).format(record))", "docstring": "Appends the message from the record to the results of the prefix.\n\nArgs:\nrecord: logging.LogRecord, the record to be formatted.\n\nReturns:\nThe formatted string representing the record.", "source": "codesearchnet"}
{"code": "def command_factory(command):\n\n    def communicate(body={}, root_dir=None):\n        'Communicate with the daemon.\\n\\n        This function sends a payload to the daemon and returns the unpickled\\n        object sent by the daemon.\\n\\n        Args:\\n            body (dir): Any other arguments that should be put into the payload.\\n            root_dir (str): The root directory in which we expect the daemon.\\n                            We need this to connect to the daemons socket.\\n        Returns:\\n            function: The returned payload.\\n        '\n        client = connect_socket(root_dir)\n        body['mode'] = command\n        if ('func' in body):\n            del body['func']\n        data_string = pickle.dumps(body, (- 1))\n        client.send(data_string)\n        response = receive_data(client)\n        return response\n    return communicate", "docstring": "A factory which returns functions for direct daemon communication.\n\nThis factory will create a function which sends a payload to the daemon\nand returns the unpickled object which is returned by the daemon.\n\nArgs:\ncommand (string): The type of payload this should be. This determines\nas what kind of instruction this will be interpreted by the daemon.\nReturns:\nfunction: The created function.", "source": "codesearchnet"}
{"code": "def read_until(self, s, echo=None):\n        \n\n        s_len = len(s)\n        buf = self.read(s_len, echo)\n\n        while buf[-s_len:] != s:\n            buf += self.read(1, echo)\n\n        return buf", "docstring": "Read until a certain string is encountered..\n\nArgs:\ns(bytes): The string to wait for.\necho(bool): Whether to write the read data to stdout.\n\nReturns:\nbytes: The data up to and including *s*.\n\nRaises:\nEOFError: If the channel was closed.", "source": "juraj-google-style"}
{"code": "def assign(self, variable, value):\n    variable.assign(value)", "docstring": "Assign a value to a variable.\n\nThis should be used in optimizers instead of `variable.assign(value)` to\nsupport backend specific optimizations.\nNote that the variable can be a model variable or an optimizer variable;\nit can be a backend native variable or a Keras variable.\n\nArgs:\nvariable: The variable to update.\nvalue: The value to add to the variable.", "source": "github-repos"}
{"code": "def add_cell_argument(self, name, help, required=False):\n    \n\n    for action in self._actions:\n      if action.dest == name:\n        raise ValueError('Arg \"%s\" was added by add_argument already.' % name)\n\n    self._cell_args[name] = {'required': required, 'help': help}", "docstring": "Add a cell only argument.\n\nArgs:\nname: name of the argument. No need to start with \"-\" or \"--\".\nhelp: the help string of the argument.\nrequired: Whether it is required in cell content.", "source": "juraj-google-style"}
{"code": "def silu(x):\n    if any_symbolic_tensors((x,)):\n        return Silu().symbolic_call(x)\n    return backend.nn.silu(x)", "docstring": "Sigmoid Linear Unit (SiLU) activation function, also known as Swish.\n\nThe SiLU activation function is computed by the sigmoid function multiplied\nby its input. It is defined as `f(x) = x * sigmoid(x)`.\n\nArgs:\nx: Input tensor.\n\nReturns:\nA tensor with the same shape as `x`.\n\nExample:\n\n>>> x = keras.ops.convert_to_tensor([-6.0, 1.0, 0.0, 1.0, 6.0])\n>>> keras.ops.sigmoid(x)\narray([0.00247262, 0.7310586, 0.5, 0.7310586, 0.9975274], dtype=float32)\n>>> keras.ops.silu(x)\narray([-0.0148357, 0.7310586, 0.0, 0.7310586, 5.9851646], dtype=float32)", "source": "github-repos"}
{"code": "def to_dict(self, drop_null=True, camel=False):\n\n    def to_dict(obj, drop_null, camel):\n        'Recursively constructs the dict.'\n        if isinstance(obj, (Body, BodyChild)):\n            obj = obj.__dict__\n        if isinstance(obj, dict):\n            data = {}\n            for (attr, val) in six.iteritems(obj):\n                if camel:\n                    attr = _snake_to_camel(attr)\n                valid_null = (isinstance(val, bool) or (val == 0) or (val and to_dict(val, drop_null, camel)))\n                if ((not drop_null) or (drop_null and valid_null)):\n                    data[attr] = to_dict(val, drop_null, camel)\n            return data\n        elif isinstance(obj, list):\n            data = []\n            for val in obj:\n                valid_null = (isinstance(val, bool) or (val == 0) or (val and to_dict(val, drop_null, camel)))\n                if ((not drop_null) or (drop_null and valid_null)):\n                    data.append(to_dict(val, drop_null, camel))\n            return data\n        else:\n            return obj\n    return to_dict(self, drop_null, camel)", "docstring": "Serialize self as dict.\n\nArgs:\ndrop_null: bool, default True. Remove 'empty' attributes.\ncamel: bool, default True. Convert keys to camelCase.\n\nReturn:\ndict: object params.", "source": "codesearchnet"}
{"code": "def count_star(session: Union[Session, Engine, Connection],\n               tablename: str,\n               *criteria: Any) -> int:\n    \n    \n    \n    query = select([func.count()]).select_from(table(tablename))\n    for criterion in criteria:\n        query = query.where(criterion)\n    return session.execute(query).scalar()", "docstring": "Returns the result of ``COUNT(*)`` from the specified table (with\nadditional ``WHERE`` criteria if desired).\n\nArgs:\nsession: SQLAlchemy :class:`Session`, :class:`Engine`, or\n:class:`Connection` object\ntablename: name of the table\ncriteria: optional SQLAlchemy \"where\" criteria\n\nReturns:\na scalar", "source": "juraj-google-style"}
{"code": "def ConvertToWireFormat(self, value):\n    output = _SerializeEntries(((python_format, wire_format, value.type_descriptor) for (python_format, wire_format) in value.wrapped_list))\n    return (b'', b'', output)", "docstring": "Convert to the wire format.\n\nArgs:\nvalue: is of type RepeatedFieldHelper.\n\nReturns:\nA wire format representation of the value.", "source": "codesearchnet"}
{"code": "def decode(self, ids, strip_extraneous=False):\n    \n    if strip_extraneous:\n      ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))\n    return \" \".join(self.decode_list(ids))", "docstring": "Transform a sequence of int ids into a human-readable string.\n\nEOS is not expected in ids.\n\nArgs:\nids: list of integers to be converted.\nstrip_extraneous: bool, whether to strip off extraneous tokens\n(EOS and PAD).\n\nReturns:\ns: human-readable string.", "source": "juraj-google-style"}
{"code": "def groups(self, group_type=None, filters=None, params=None):\n    group = self._tcex.ti.group(group_type)\n    for g in self.tc_requests.groups_from_tag(group, self.name, filters=filters, params=params):\n        (yield g)", "docstring": "Gets all groups from a tag.\n\nArgs:\nfilters:\nparams:\ngroup_type:", "source": "codesearchnet"}
{"code": "def Execute(self, http):\n    self._Execute(http)\n    for key in self.__request_response_handlers:\n        response = self.__request_response_handlers[key].response\n        callback = self.__request_response_handlers[key].handler\n        exception = None\n        if (response.status_code >= 300):\n            exception = exceptions.HttpError.FromResponse(response)\n        if (callback is not None):\n            callback(response, exception)\n        if (self.__callback is not None):\n            self.__callback(response, exception)", "docstring": "Execute all the requests as a single batched HTTP request.\n\nArgs:\nhttp: A httplib2.Http object to be used with the request.\n\nReturns:\nNone\n\nRaises:\nBatchError if the response is the wrong format.", "source": "codesearchnet"}
{"code": "def add_error(self, error, critical=False):\n    \n    self.errors.append((error, critical))", "docstring": "Adds an error to the state.\n\nArgs:\nerror: The text that will be added to the error list.\ncritical: If set to True and the error is checked with check_errors, will\ndfTimewolf will abort.", "source": "juraj-google-style"}
{"code": "def tf_loss(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None):\n    loss_per_instance = self.fn_loss_per_instance(states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, next_states=next_states, next_internals=next_internals, update=update, reference=reference)\n    updated = self.memory.update_batch(loss_per_instance=loss_per_instance)\n    with tf.control_dependencies(control_inputs=(updated,)):\n        loss = tf.reduce_mean(input_tensor=loss_per_instance, axis=0)\n        if ('losses' in self.summary_labels):\n            tf.contrib.summary.scalar(name='loss-without-regularization', tensor=loss)\n        losses = self.fn_regularization_losses(states=states, internals=internals, update=update)\n        if (len(losses) > 0):\n            loss += tf.add_n(inputs=[losses[name] for name in sorted(losses)])\n            if ('regularization' in self.summary_labels):\n                for name in sorted(losses):\n                    tf.contrib.summary.scalar(name=('regularization/' + name), tensor=losses[name])\n        if (('losses' in self.summary_labels) or ('total-loss' in self.summary_labels)):\n            tf.contrib.summary.scalar(name='total-loss', tensor=loss)\n        return loss", "docstring": "Creates the TensorFlow operations for calculating the full loss of a batch.\n\nArgs:\nstates: Dict of state tensors.\ninternals: List of prior internal state tensors.\nactions: Dict of action tensors.\nterminal: Terminal boolean tensor.\nreward: Reward tensor.\nnext_states: Dict of successor state tensors.\nnext_internals: List of posterior internal state tensors.\nupdate: Boolean tensor indicating whether this call happens during an update.\nreference: Optional reference tensor(s), in case of a comparative loss.\n\nReturns:\nLoss tensor.", "source": "codesearchnet"}
{"code": "def ReadClientMetadata(self, client_id):\n    \n    result = self.MultiReadClientMetadata([client_id])\n    try:\n      return result[client_id]\n    except KeyError:\n      raise UnknownClientError(client_id)", "docstring": "Reads the ClientMetadata record for a single client.\n\nArgs:\nclient_id: A GRR client id string, e.g. \"C.ea3b2b71840d6fa7\".\n\nReturns:\nAn rdfvalues.object.ClientMetadata object.\n\nRaises:\nUnknownClientError: if no client with corresponding id was found.", "source": "juraj-google-style"}
{"code": "def get_tensor_from_tensor_info(tensor_info, graph=None, import_scope=None):\n    graph = graph or ops.get_default_graph()\n\n    def _get_tensor(name):\n        return graph.get_tensor_by_name(ops.prepend_name_scope(name, import_scope=import_scope))\n    encoding = tensor_info.WhichOneof('encoding')\n    if encoding == 'name':\n        return _get_tensor(tensor_info.name)\n    elif encoding == 'coo_sparse':\n        return sparse_tensor.SparseTensor(_get_tensor(tensor_info.coo_sparse.indices_tensor_name), _get_tensor(tensor_info.coo_sparse.values_tensor_name), _get_tensor(tensor_info.coo_sparse.dense_shape_tensor_name))\n    elif encoding == 'composite_tensor':\n        spec_proto = struct_pb2.StructuredValue(type_spec_value=tensor_info.composite_tensor.type_spec)\n        spec = nested_structure_coder.decode_proto(spec_proto)\n        components = [_get_tensor(component.name) for component in tensor_info.composite_tensor.components]\n        return nest.pack_sequence_as(spec, components, expand_composites=True)\n    else:\n        raise ValueError(f'Invalid TensorInfo.encoding: {encoding}. Expected `coo_sparse`, `composite_tensor`, or `name` for a dense tensor.')", "docstring": "Returns the Tensor or CompositeTensor described by a TensorInfo proto.\n\nArgs:\ntensor_info: A TensorInfo proto describing a Tensor or SparseTensor or\nCompositeTensor.\ngraph: The tf.Graph in which tensors are looked up. If None, the\ncurrent default graph is used.\nimport_scope: If not None, names in `tensor_info` are prefixed with this\nstring before lookup.\n\nReturns:\nThe Tensor or SparseTensor or CompositeTensor in `graph` described by\n`tensor_info`.\n\nRaises:\nKeyError: If `tensor_info` does not correspond to a tensor in `graph`.\nValueError: If `tensor_info` is malformed.", "source": "github-repos"}
{"code": "def write_bashrc(_path):\n    \n    cfg_mounts = CFG[\"container\"][\"mounts\"].value\n    cfg_prefix = CFG[\"container\"][\"prefixes\"].value\n\n    path.mkfile_uchroot(\"/etc/portage/bashrc\")\n    mounts = uchroot.mounts(\"mnt\", cfg_mounts)\n    p_paths, p_libs = uchroot.env(cfg_prefix)\n    paths, libs = uchroot.env(mounts)\n\n    paths = paths + p_paths\n    libs = libs + p_libs\n\n    with open(_path, 'w') as bashrc:\n        lines = .format(path.list_to_path(paths), path.list_to_path(libs))\n\n        bashrc.write(lines)", "docstring": "Write a valid gentoo bashrc file to :path:.\n\nArgs:\npath - The output path of the make.conf", "source": "juraj-google-style"}
{"code": "def read(self, x):\n    access_logits = self._address_content(x)\n    weights = tf.nn.softmax(access_logits)\n    retrieved_mem = tf.reduce_sum(tf.multiply(tf.expand_dims(weights, 3), tf.expand_dims(self.mem_vals, axis=1)), axis=2)\n    return (access_logits, retrieved_mem)", "docstring": "Read from the memory.\n\nAn external component can use the results via a simple MLP,\ne.g., fn(x W_x + retrieved_mem W_m).\n\nArgs:\nx: a tensor in the shape of [batch_size, length, depth].\nReturns:\naccess_logits: the logits for accessing the memory in shape of\n[batch_size, length, memory_size].\nretrieved_mem: the retrieved results in the shape of\n[batch_size, length, val_depth].", "source": "codesearchnet"}
{"code": "def is20(msg):\n    \n\n    if allzeros(msg):\n        return False\n\n    d = hex2bin(data(msg))\n\n    if d[0:8] != '00100000':\n        return False\n\n    cs = cs20(msg)\n\n    if '\n        return False\n\n    return True", "docstring": "Check if a message is likely to be BDS code 2,0\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nbool: True or False", "source": "juraj-google-style"}
{"code": "def read_tree_nexus(nexus):\n    \n    if not isinstance(nexus, str):\n        raise TypeError(\"nexus must be a str\")\n    if nexus.lower().endswith('.gz'): \n        f = gopen(expanduser(nexus))\n    elif isfile(expanduser(nexus)): \n        f = open(expanduser(nexus))\n    else:\n        f = nexus.splitlines()\n    trees = dict()\n    for line in f:\n        if isinstance(line,bytes):\n            l = line.decode().strip()\n        else:\n            l = line.strip()\n        if l.lower().startswith('tree '):\n            i = l.index('='); left = l[:i].strip(); right = l[i+1:].strip()\n            name = ' '.join(left.split(' ')[1:])\n            trees[name] = read_tree_newick(right)\n    if hasattr(f,'close'):\n        f.close()\n    return trees", "docstring": "Read a tree from a Nexus string or file\n\nArgs:\n``nexus`` (``str``): Either a Nexus string or the path to a Nexus file (plain-text or gzipped)\n\nReturns:\n``dict`` of ``Tree``: A dictionary of the trees represented by ``nexus``, where keys are tree names (``str``) and values are ``Tree`` objects", "source": "juraj-google-style"}
{"code": "def parse_line(self, line):\n    line = line.lstrip()\n    toks = shlex.split(line)\n    cmd = toks[0]\n    arg = line[len(cmd):]\n    return (cmd, [arg])", "docstring": "Parser for the debugging shell.\n\nTreat everything after the first token as one literal entity. Whitespace\ncharacters between the first token and the next first non-whitespace\ncharacter are preserved.\n\nFor example, '  foo   dicj didiw  ' is parsed as\n( 'foo', '   dicj didiw  ' )\n\nReturns:\nA tuple (cmd, args), where the args is a list that consists of one\nand only one string containing everything after the cmd as is.", "source": "codesearchnet"}
{"code": "def stop_gradient(input_layer):\n  \n  if input_layer.is_sequence():\n    result = [tf.stop_gradient(t) for t in input_layer.sequence]\n    return input_layer.with_sequence(result)\n  else:\n    return tf.stop_gradient(input_layer)", "docstring": "Cuts off the gradient at this point.\n\nThis works on both sequence and regular Pretty Tensors.\n\nArgs:\ninput_layer: The input.\nReturns:\nA new Pretty Tensor of the same type with stop_gradient applied.", "source": "juraj-google-style"}
{"code": "def get(url, params={}):\n    request_url = url\n    if len(params):\n        request_url = '{}?{}'.format(url, urlencode(params))\n    try:\n        req = Request(request_url, headers={'User-Agent': 'Mozilla/5.0'})\n        response = json.loads(urlopen(req).read().decode('utf-8'))\n        return response\n    except HTTPError as err:\n        raise MtgException(err.read())", "docstring": "Invoke an HTTP GET request on a url\n\nArgs:\nurl (string): URL endpoint to request\nparams (dict): Dictionary of url parameters\nReturns:\ndict: JSON response as a dictionary", "source": "codesearchnet"}
{"code": "def _pull_out_unaffected_blocks_lhs(lhs, rest, out_port, in_port):\n    (_, block_index) = lhs.index_in_block(out_port)\n    bs = lhs.block_structure\n    (nbefore, nblock, nafter) = (sum(bs[:block_index]), bs[block_index], sum(bs[(block_index + 1):]))\n    (before, block, after) = lhs.get_blocks((nbefore, nblock, nafter))\n    if ((before != cid(nbefore)) or (after != cid(nafter))):\n        outer_lhs = ((before + cid((nblock - 1))) + after)\n        inner_lhs = ((cid(nbefore) + block) + cid(nafter))\n        return (outer_lhs << Feedback.create(SeriesProduct.create(inner_lhs, *rest), out_port=out_port, in_port=in_port))\n    elif (block == cid(nblock)):\n        outer_lhs = ((before + cid((nblock - 1))) + after)\n        return (outer_lhs << Feedback.create(SeriesProduct.create(*rest), out_port=out_port, in_port=in_port))\n    raise CannotSimplify()", "docstring": "In a self-Feedback of a series product, where the left-most operand is\nreducible, pull all non-trivial blocks outside of the feedback.\n\nArgs:\nlhs (Circuit): The reducible circuit\nrest (tuple): The other SeriesProduct operands\nout_port (int): The feedback output port index\nin_port (int): The feedback input port index\n\nReturns:\nCircuit: The simplified circuit", "source": "codesearchnet"}
{"code": "def __init__(self, fail_on_unset: bool = False, default: str = 'none'):\n        \n        self.fail_on_unset = bool(fail_on_unset)\n        self.default = str(default)", "docstring": "Initializer.\nArgs:\nfail_on_unset (bool): If set to True an exception will be raised when the environment\nvariable is unset; otherwise the default value (see next) will be used instead.\ndefault (str): If a environment variable is unset, it will get this value instead.", "source": "juraj-google-style"}
{"code": "def addStreamHandler(self,lvl=20):\n        \n        sh = logging.StreamHandler(sys.stdout)\n        sh.setLevel(lvl)\n        sFrmt = logging.Formatter('%(message)s')\n        if False:\n            \n            sFrmt = logging.Formatter('%(name)s - %(levelname)s - %(message)s')\n        sh.setFormatter(sFrmt)\n        self.addHandler(sh)", "docstring": "This function will add a stream handler to a log with the provided level.\n\nArgs:\nlvl (int): The severity level of messages printed to the screen with\nthe stream handler, default = 20.", "source": "juraj-google-style"}
{"code": "def search(cls, session, queries):\n    return super(Customers, cls).search(session, queries, SearchCustomer)", "docstring": "Search for a customer given a domain.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nqueries (helpscout.models.Domain or iter): The queries for the\ndomain. If a ``Domain`` object is provided, it will simply be\nreturned. Otherwise, a ``Domain`` object will be generated\nfrom the complex queries. In this case, the queries should\nconform to the interface in\n:func:`helpscout.domain.Domain.from_tuple`.\n\nReturns:\nRequestPaginator(output_type=helpscout.models.SearchCustomer):\nSearchCustomer iterator.", "source": "codesearchnet"}
{"code": "def codify(combination):\n    if (isinstance(combination, int) and ((combination < 0) or (combination >= LIMIT))):\n        raise errors.FlagError('Out-of-range flag-combination!')\n    codes = []\n    for enum in (Style, Color, Fill):\n        for flag in enum:\n            if (combination & flag):\n                codes.append(str(flag))\n    return ';'.join(codes)", "docstring": "Gets escape-codes for flag combinations.\n\nArguments:\ncombination (int): Either a single integer-convertible flag\nor an OR'd flag-combination.\nReturns:\nA semi-colon-delimited string of appropriate escape sequences.\n\nRaises:\nerrors.FlagError if the combination is out-of-range.", "source": "codesearchnet"}
{"code": "def _match_instance_against_type(self, left, other_type, subst, view):\n    if isinstance(other_type, abstract.LiteralClass):\n        other_value = other_type.value\n        if isinstance(left, abstract.ConcreteValue) and isinstance(other_value, abstract.ConcreteValue):\n            return subst if left.pyval == other_value.pyval else None\n        elif isinstance(left, abstract.Instance) and left.cls.is_enum and isinstance(other_value, abstract.Instance) and other_value.cls.is_enum:\n            names_match = left.name == other_value.name\n            clses_match = left.cls == other_value.cls\n            return subst if names_match and clses_match else None\n        else:\n            return None\n    elif isinstance(other_type, typed_dict.TypedDictClass):\n        if not self._match_dict_against_typed_dict(left, other_type):\n            return None\n        return subst\n    elif isinstance(other_type, abstract.ParameterizedClass) and isinstance(other_type.base_cls, typed_dict.TypedDictClass):\n        if not self._match_dict_against_typed_dict(left, other_type.base_cls):\n            return None\n        return self._match_instance_parameters(left.cls, left, other_type, subst, view)\n    elif isinstance(left.cls, typed_dict.TypedDictClass):\n        return self._match_typed_dict_against_dict(left, other_type, subst, view)\n    elif isinstance(other_type, (fiddle_overlay.BuildableBuilder, fiddle_overlay.BuildableType)):\n        return self._match_fiddle_instance(left.cls, left, other_type, subst, view)\n    elif isinstance(other_type, abstract.PyTDClass) and fiddle_overlay.is_fiddle_buildable_pytd(other_type.pytd_cls) and isinstance(left, fiddle_overlay.Buildable):\n        return self._match_fiddle_instance_against_bare_type(left.cls, left, other_type, subst, view)\n    elif isinstance(other_type, abstract.Class):\n        if not self._satisfies_noniterable_str(left.cls, other_type):\n            self._noniterable_str_error = error_types.NonIterableStrError(left.cls, other_type)\n            return None\n        base = self.match_from_mro(left.cls, other_type)\n        if base is None:\n            if other_type.is_protocol:\n                with self._track_partially_matched_protocols():\n                    return self._match_against_protocol(left, other_type, subst, view)\n            elif other_type.has_protocol_base():\n                return subst\n            return None\n        elif isinstance(base, abstract.AMBIGUOUS_OR_EMPTY):\n            return self._match_maybe_parameterized_instance(base, left, other_type, subst, view)\n        else:\n            return self._match_instance(base, left, other_type, subst, view)\n    elif isinstance(other_type, abstract.Empty):\n        return None\n    else:\n        raise NotImplementedError(f\"Can't match {left!r} against {other_type!r}\")", "docstring": "Checks whether an instance of a type is compatible with a (formal) type.\n\nArgs:\nleft: An instance of a type.\nother_type: A formal type. E.g. abstract.Class or abstract.Union.\nsubst: The current type parameter assignment.\nview: The current mapping of Variable to Value.\n\nReturns:\nA new type parameter assignment if the matching succeeded, None otherwise.", "source": "github-repos"}
{"code": "def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    encoder_hidden_states = encoder_outputs[0]\n    if encoder_attention_mask is None:\n        batch_size, sequence_length = encoder_hidden_states.shape[:2]\n        encoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    batch_size, sequence_length = decoder_input_ids.shape\n    if decoder_attention_mask is None:\n        decoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    if decoder_position_ids is None:\n        if past_key_values is not None:\n            raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.')\n        decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n    inputs = {'params': params or self.params}\n    if past_key_values:\n        inputs['cache'] = past_key_values\n        mutable = ['cache']\n    else:\n        mutable = False\n\n    def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):\n        decoder_module = module._get_decoder_module()\n        outputs = decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)\n        hidden_states = outputs[0]\n        if self.config.tie_word_embeddings:\n            shared_embedding = module.model.variables['params']['shared']['embedding']\n            lm_logits = module.lm_head.apply({'params': {'kernel': shared_embedding.T}}, hidden_states)\n        else:\n            lm_logits = module.lm_head(hidden_states)\n        lm_logits += module.final_logits_bias.astype(self.dtype)\n        return (lm_logits, outputs)\n    outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)\n    if past_key_values is None:\n        lm_logits, decoder_outputs = outputs\n    else:\n        (lm_logits, decoder_outputs), past = outputs\n    if return_dict:\n        outputs = FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions)\n    else:\n        outputs = (lm_logits,) + decoder_outputs[1:]\n    if past_key_values is not None and return_dict:\n        outputs['past_key_values'] = unfreeze(past['cache'])\n        return outputs\n    elif past_key_values is not None and (not return_dict):\n        outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> import jax.numpy as jnp\n>>> from transformers import AutoTokenizer, FlaxMarianMTModel\n\n>>> model = FlaxMarianMTModel.from_pretrained(\"Helsinki-NLP/opus-mt-en-de\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"Helsinki-NLP/opus-mt-en-de\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, max_length=64, return_tensors=\"jax\")\n>>> encoder_outputs = model.encode(**inputs)\n\n>>> decoder_start_token_id = model.config.decoder_start_token_id\n>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype=\"i4\") * decoder_start_token_id\n\n>>> outputs = model.decode(decoder_input_ids, encoder_outputs)\n>>> logits = outputs.logits\n```", "source": "github-repos"}
{"code": "def shift_by_n_processors(self, x, mesh_axis, offset, wrap):\n    n = self.shape[mesh_axis].size\n    source_pcoord = []\n    for i in xrange(n):\n        c = (i - offset)\n        if (c != (c % n)):\n            if wrap:\n                c = (c % n)\n            else:\n                c = None\n        source_pcoord.append(c)\n    return self.receive(x, mesh_axis, source_pcoord)", "docstring": "Receive the slice from processor pcoord - offset.\n\nArgs:\nx: a LaidOutTensor\nmesh_axis: an integer\noffset: an integer\nwrap: a boolean. If True, then wrap around. Otherwise, pad with zeros.", "source": "codesearchnet"}
{"code": "def _wrap_usage_section(source, width):\n    \n    \n    if not any(len(line) > width for line in source.splitlines()):\n        return source\n    section_header = source[:source.index(':') + 1].strip()\n    lines = [section_header]\n    for commands, args in parse_commands(source):\n        command = '  {} '.format(' '.join(commands))\n        max_len = width - len(command)\n        sep = '\\n' + ' ' * len(command)\n        wrapped_args = sep.join(textwrap.wrap(' '.join(args), max_len))\n        full_command = command + wrapped_args\n        lines += full_command.splitlines()\n    return '\\n'.join(lines)", "docstring": "Wrap the given usage section string to the current terminal size.\n\nNote:\nCommands arguments are wrapped to the column that the arguments began\non the first line of the command.\n\nArgs:\nsource: The section string to wrap.\n\nReturns:\nThe wrapped section string.", "source": "juraj-google-style"}
{"code": "def _find_all_hints_in_nodes(nodes):\n    func_calls = _collections.defaultdict(_LiteFuncCall)\n    for node in nodes:\n        attr = node.attr\n        if OpHint.FUNCTION_UUID_ATTR not in attr or not attr[OpHint.FUNCTION_UUID_ATTR].s:\n            continue\n        uuid = attr[OpHint.FUNCTION_UUID_ATTR].s\n        call_def = func_calls[uuid]\n        call_def.uuid = uuid\n        call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s\n        call_def.level = attr[OpHint.FUNCTION_LEVEL_ATTR].i\n        sort = attr[OpHint.FUNCTION_SORT_INDEX_ATTR].i if OpHint.FUNCTION_SORT_INDEX_ATTR in attr else None\n        if sort == -1:\n            sort = None\n        aggregation = None\n        if OpHint.FUNCTION_AGGREGATE_ATTR in attr:\n            aggregation = _compat.as_text(attr[OpHint.FUNCTION_AGGREGATE_ATTR].s)\n        if OpHint.CHILDREN_INPUTS_MAPPINGS in attr:\n            call_def.children_inputs_mappings = _json.loads(_compat.as_text(attr[OpHint.CHILDREN_INPUTS_MAPPINGS].s))\n\n        def put_operand(stuff, index, sort, operand, aggregation):\n            \n            if sort is None:\n                stuff[index] = _LiteSingleOperand(operand)\n            else:\n                if index not in stuff:\n                    stuff[index] = _LiteAggregateOperand(aggregation)\n                stuff[index].add(sort, operand)\n        if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr:\n            put_operand(call_def.inputs, attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i, sort, node, aggregation)\n        if OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr:\n            put_operand(call_def.outputs, attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i, sort, node, aggregation)\n        for a in attr:\n            if a.startswith('_tflite_attr_'):\n                call_def.params[a.replace('_tflite_attr_,', '')] = attr[a].tensor\n    return func_calls", "docstring": "Look at the all the input nodes and return a list of LiteFuncCall objs.\n\nArgs:\nnodes: A TensorFlow graph_def to look for LiteFuncCalls.\n\nReturns:\na list of `LifeFuncCall` objects in the form", "source": "github-repos"}
{"code": "def verify(self, byts, sign):\n    try:\n        chosen_hash = c_hashes.SHA256()\n        hasher = c_hashes.Hash(chosen_hash, default_backend())\n        hasher.update(byts)\n        digest = hasher.finalize()\n        self.publ.verify(sign, digest, c_ec.ECDSA(c_utils.Prehashed(chosen_hash)))\n        return True\n    except InvalidSignature:\n        logger.exception('Error in publ.verify')\n        return False", "docstring": "Verify the signature for the given bytes using the ECC\npublic key.\n\nArgs:\nbyts (bytes): The data bytes.\nsign (bytes): The signature bytes.\n\nReturns:\nbool: True if the data was verified, False otherwise.", "source": "codesearchnet"}
{"code": "def _count_and_gen_subtokens(token_counts, alphabet, subtoken_dict, max_subtoken_length):\n    subtoken_counts = collections.defaultdict(int)\n    for (token, count) in six.iteritems(token_counts):\n        token = _escape_token(token, alphabet)\n        subtokens = _split_token_to_subtokens(token, subtoken_dict, max_subtoken_length)\n        start = 0\n        for subtoken in subtokens:\n            for end in xrange((start + 1), (len(token) + 1)):\n                new_subtoken = token[start:end]\n                subtoken_counts[new_subtoken] += count\n            start += len(subtoken)\n    return subtoken_counts", "docstring": "Count number of times subtokens appear, and generate new subtokens.\n\nArgs:\ntoken_counts: dict mapping tokens to the number of times they appear in the\noriginal files.\nalphabet: list of allowed characters. Used to escape the tokens, which\nguarantees that all tokens can be split into subtokens.\nsubtoken_dict: dict mapping subtokens to ids.\nmax_subtoken_length: maximum length of subtoken in subtoken_dict.\n\nReturns:\nA defaultdict mapping subtokens to the number of times they appear in the\ntokens. The dict may contain new subtokens.", "source": "codesearchnet"}
{"code": "def print_variant(variant_line, outfile=None, silent=False):\n    \n    variant_line = variant_line.rstrip()\n    if not variant_line.startswith('\n        if outfile:\n            outfile.write(variant_line+'\\n')\n        else:\n            if not silent:\n                print(variant_line)\n    return", "docstring": "Print a variant.\n\nIf a result file is provided the variante will be appended to the file,\notherwise they are printed to stdout.\n\nArgs:\nvariants_file (str): A string with the path to a file\noutfile (FileHandle): An opened file_handle\nsilent (bool): Bool. If nothing should be printed.", "source": "juraj-google-style"}
{"code": "def id_to_int(cls, _id: Union[int, ObjectId]) -> int:\n    if isinstance(_id, int):\n        return _id\n    ints = struct.unpack('>III', _id.binary)\n    return (ints[0] << 64) + (ints[1] << 32) + ints[2]", "docstring": "Args:\n_id: ObjectId required for each MongoDB document _id field.\n\nReturns: Converted integer value of ObjectId's 12 bytes binary value.", "source": "github-repos"}
{"code": "def drop_dimension(self, dimensions):\n        \n        dimensions = [dimensions] if np.isscalar(dimensions) else dimensions\n        dims = [d for d in self.kdims if d not in dimensions]\n        dim_inds = [self.get_dimension_index(d) for d in dims]\n        key_getter = itemgetter(*dim_inds)\n        return self.clone([(key_getter(k), v) for k, v in self.data.items()],\n                          kdims=dims)", "docstring": "Drops dimension(s) from keys\n\nArgs:\ndimensions: Dimension(s) to drop\n\nReturns:\nClone of object with with dropped dimension(s)", "source": "juraj-google-style"}
{"code": "def normalize(x, axis=-1, order=2, epsilon=None):\n    if any_symbolic_tensors((x,)):\n        return Normalize(axis=axis, order=order, epsilon=epsilon).symbolic_call(x)\n    return _normalize(x, axis=axis, order=order, epsilon=epsilon)", "docstring": "Normalizes `x` over the specified axis.\n\nIt is defined as: `normalize(x) = x / max(norm(x), epsilon)`.\n\nArgs:\nx: Input tensor.\naxis: The axis or axes along which to perform normalization.\nDefault to -1.\norder: The exponent value in the norm formulation.\nDefaults to 2.\nepsilon: A lower bound value for the norm.\nDefaults to `backend.epsilon()`.\n\nReturns:\nThe normalized array.\n\nExample:\n\n>>> x = keras.ops.convert_to_tensor([[1, 2, 3], [4, 5, 6]])\n>>> x_norm = keras.ops.math.normalize(x)\n>>> print(x_norm)\narray([[0.26726124 0.5345225  0.8017837 ]\n[0.45584232 0.5698029  0.68376344]], shape=(2, 3), dtype=float32)", "source": "github-repos"}
{"code": "def bundle_apps(self, bundle_name, bundle_apps):\n        \n        bundle_file = os.path.join(\n            self.app_path, self.args.outdir, '{}-bundle.zip'.format(bundle_name)\n        )\n        z = zipfile.ZipFile(bundle_file, 'w')\n        for app in bundle_apps:\n            \n            self.package_data['bundle'].append(\n                {'action': 'Adding App:', 'output': os.path.basename(app)}\n            )\n            z.write(app, os.path.basename(app))\n\n        \n        self.package_data['bundle'].append(\n            {'action': 'Created Bundle:', 'output': os.path.basename(bundle_file)}\n        )\n        z.close()", "docstring": "Bundle multiple Job or Playbook Apps (.tcx files) into a single zip file.\n\nArgs:\nbundle_name (str): The output name of the bundle zip file.\nbundle_apps (list): A list of Apps to include in the bundle.", "source": "juraj-google-style"}
{"code": "def get_numpy_to_framework_fn(arr) -> Callable:\n    if isinstance(arr, np.ndarray):\n        return np.array\n    if is_tf_available() and is_tf_tensor(arr):\n        import tensorflow as tf\n        return tf.convert_to_tensor\n    if is_torch_available() and is_torch_tensor(arr):\n        import torch\n        return torch.tensor\n    if is_flax_available() and is_jax_tensor(arr):\n        import jax.numpy as jnp\n        return jnp.array\n    raise ValueError(f'Cannot convert arrays of type {type(arr)}')", "docstring": "Returns a function that converts a numpy array to the framework of the input array.\n\nArgs:\narr (`np.ndarray`): The array to convert.", "source": "github-repos"}
{"code": "def _jvp_helper_wrapper(op_name, attr_tuple, inputs, outputs, tangents, use_batch):\n    if use_batch:\n        for primal, tangent in zip(inputs, tangents):\n            if not tangent.shape.is_compatible_with([None] + primal.shape):\n                raise ValueError('Tangent {} was expected to be of shape {} but is instead of shape {}'.format(tangent, [None] + primal.shape, tangent.shape))\n        return control_flow_ops.vectorized_map(functools.partial(_jvp_helper, op_name, attr_tuple, inputs, outputs), tangents)\n    return _jvp_helper(op_name, attr_tuple, inputs, outputs, tangents)", "docstring": "Computes a batch of Jacobian-vector product for an op.\n\nArgs:\nop_name: A string, the type of operation being executed.\nattr_tuple: Attributes of the operation.\ninputs: A flat list of input Tensors to the operation.\noutputs: A flat list of output Tensors from the operation.\ntangents: A flat list of Tensors, compatible with shape `[None] +\ninput_shape`.\nuse_batch: A bool, True to vetorize over batch of tangents of shape `[None]\n+ input_shape`.\n\nReturns:\nA flat list of tangents compatible with `outputs`\nor `[None] + output_shape`.\n\nRaises:\nValueError: if tangent shapes are not compatible with input shapes.", "source": "github-repos"}
{"code": "def full_like(x, fill_value, dtype=None):\n    if any_symbolic_tensors((x, fill_value)):\n        return FullLike(dtype=dtype).symbolic_call(x, fill_value)\n    return backend.numpy.full_like(x, fill_value, dtype=dtype)", "docstring": "Return a full tensor with the same shape and type as the given tensor.\n\nArgs:\nx: Input tensor.\nfill_value: Fill value.\ndtype: Overrides data type of the result.\n\nReturns:\nTensor of `fill_value` with the same shape and type as `x`.", "source": "github-repos"}
{"code": "def _view_options(self):\n    return {'window_mapping_fn': self._window_mapping_fn, 'coder': self._windowed_coder()}", "docstring": "Internal options corresponding to specific view.\n\nIntended for internal use by runner implementations.\n\nReturns:\nTuple of options for the given view.", "source": "github-repos"}
{"code": "def merge_sites(self, tol=0.01, mode=\"sum\"):\n        \n        mode = mode.lower()[0]\n        from scipy.spatial.distance import squareform\n        from scipy.cluster.hierarchy import fcluster, linkage\n\n        d = self.distance_matrix\n        np.fill_diagonal(d, 0)\n        clusters = fcluster(linkage(squareform((d + d.T) / 2)),\n                            tol, 'distance')\n        sites = []\n        for c in np.unique(clusters):\n            inds = np.where(clusters == c)[0]\n            species = self[inds[0]].species\n            coords = self[inds[0]].frac_coords\n            props = self[inds[0]].properties\n            for n, i in enumerate(inds[1:]):\n                sp = self[i].species\n                if mode == \"s\":\n                    species += sp\n                offset = self[i].frac_coords - coords\n                coords = coords + ((offset - np.round(offset)) / (n + 2)).astype(\n                    coords.dtype)\n                for key in props.keys():\n                    if props[key] is not None and self[i].properties[key] != props[key]:\n                        if mode  == 'a' and isinstance(props[key], float):\n                            \n                            props[key] = props[key]*(n+1)/(n+2) + self[i].properties[key]/(n+2)\n                        else:\n                            props[key] = None\n                            warnings.warn(\"Sites with different site property %s are merged. \"\n                                        \"So property is set to none\" % key)\n            sites.append(PeriodicSite(species, coords, self.lattice, properties=props))\n\n        self._sites = sites", "docstring": "Merges sites (adding occupancies) within tol of each other.\nRemoves site properties.\n\nArgs:\ntol (float): Tolerance for distance to merge sites.\nmode (str): Three modes supported. \"delete\" means duplicate sites are\ndeleted. \"sum\" means the occupancies are summed for the sites.\n\"average\" means that the site is deleted but the properties are averaged\nOnly first letter is considered.", "source": "juraj-google-style"}
{"code": "def buckets_delete(self, bucket):\n    \n    url = Api._ENDPOINT + (Api._BUCKET_PATH % bucket)\n    google.datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials,\n                                      raw_response=True)", "docstring": "Issues a request to delete a bucket.\n\nArgs:\nbucket: the name of the bucket.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"}
{"code": "def _get_validation_labels(val_path):\n    labels_path = tfds.core.get_tfds_path(_VALIDATION_LABELS_FNAME)\n    with tf.io.gfile.GFile(labels_path) as labels_f:\n        labels = labels_f.read().strip().split('\\n')\n    with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj:\n        tar = tarfile.open(mode='r:', fileobj=tar_f_obj)\n        images = sorted(tar.getnames())\n    return dict(zip(images, labels))", "docstring": "Returns labels for validation.\n\nArgs:\nval_path: path to TAR file containing validation images. It is used to\nretrieve the name of pictures and associate them to labels.\n\nReturns:\ndict, mapping from image name (str) to label (str).", "source": "codesearchnet"}
{"code": "def apply_grad_processors(opt, gradprocs):\n    \n    assert isinstance(gradprocs, (list, tuple)), gradprocs\n    for gp in gradprocs:\n        assert isinstance(gp, GradientProcessor), gp\n\n    class _ApplyGradientProcessor(ProxyOptimizer):\n        def __init__(self, opt, gradprocs):\n            self._gradprocs = gradprocs[:]\n            super(_ApplyGradientProcessor, self).__init__(opt)\n\n        def apply_gradients(self, grads_and_vars,\n                            global_step=None, name=None):\n            g = self._apply(grads_and_vars)\n            return self._opt.apply_gradients(g, global_step, name)\n\n        def _apply(self, g):\n            for proc in self._gradprocs:\n                g = proc.process(g)\n            return g\n\n    return _ApplyGradientProcessor(opt, gradprocs)", "docstring": "Wrapper around optimizers to apply gradient processors.\n\nArgs:\nopt (tf.train.Optimizer):\ngradprocs (list[GradientProcessor]): gradient processors to add to the\noptimizer.\n\nReturns:\na :class:`tf.train.Optimizer` instance which runs the gradient\nprocessors before updating the variables.", "source": "juraj-google-style"}
{"code": "def get_intersection(self, range_):\n        \n        result = []\n\n        for entry in self.entries:\n            package, value = entry\n\n            if value is None:\n                continue  \n\n            if package.version not in range_:\n                continue\n\n            if isinstance(value, list):\n                variants = value\n                entry_ = _PackageEntry(package, variants, self.solver)\n                result.append(entry_)\n                continue\n\n            \n            if self.solver.package_filter:\n                rule = self.solver.package_filter.excludes(package)\n                if rule:\n                    if config.debug_package_exclusions:\n                        print_debug(\"Package '%s' was excluded by rule '%s'\"\n                                    % (package.qualified_name, str(rule)))\n                    entry[1] = None\n                    continue\n\n            \n            if self.solver.package_load_callback:\n                self.solver.package_load_callback(package)\n\n            variants_ = []\n            for var in package.iter_variants():\n                variant = PackageVariant(var, self.solver.building)\n                variants_.append(variant)\n\n            entry[1] = variants_\n            entry_ = _PackageEntry(package, variants_, self.solver)\n            result.append(entry_)\n\n        return result or None", "docstring": "Get a list of variants that intersect with the given range.\n\nArgs:\nrange_ (`VersionRange`): Package version range.\n\nReturns:\nList of `_PackageEntry` objects.", "source": "juraj-google-style"}
{"code": "def convert_adaptive_max_pool2d(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting adaptive_avg_pool2d...')\n\n    if names == 'short':\n        tf_name = 'APOL' + random_string(4)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    global_pool = keras.layers.GlobalMaxPooling2D(data_format='channels_first', name=tf_name)\n    layers[scope_name] = global_pool(layers[inputs[0]])\n\n    def target_layer(x):\n        import keras\n        return keras.backend.expand_dims(x)\n\n    lambda_layer = keras.layers.Lambda(target_layer, name=tf_name + 'E')\n    layers[scope_name] = lambda_layer(layers[scope_name])  \n    layers[scope_name] = lambda_layer(layers[scope_name])", "docstring": "Convert convert_adaptive_max_pool2d layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def get_optimizer_group(self, param: Optional[Union[str, torch.nn.parameter.Parameter]]=None):\n    if self.optimizer is None:\n        raise ValueError('Trainer optimizer is None, please make sure you have setup the optimizer before.')\n    if param is not None:\n        for group in self.optimizer.param_groups:\n            if param in group['params']:\n                return group\n    return [group['params'] for group in self.optimizer.param_groups]", "docstring": "Returns optimizer group for a parameter if given, else returns all optimizer groups for params.\n\nArgs:\nparam (`str` or `torch.nn.parameter.Parameter`, *optional*):\nThe parameter for which optimizer group needs to be returned.", "source": "github-repos"}
{"code": "def __init__(self, api, path, options):\n    \n    self._init(api, path, options)", "docstring": "Initialize.\n\nArgs:\napi: storage_api instance.\npath: bucket path of form '/bucket'.\noptions: a dict of listbucket options. Please see listbucket doc.", "source": "juraj-google-style"}
{"code": "def _MultipleModulesFoundError(path, candidates):\n  \n  assert len(candidates) > 1\n  params = [path] + _StripCommonPathPrefix(candidates[:2])\n  if len(candidates) == 2:\n    fmt = ERROR_LOCATION_MULTIPLE_MODULES_3\n  else:\n    fmt = ERROR_LOCATION_MULTIPLE_MODULES_4\n    params.append(str(len(candidates) - 2))\n  return fmt, params", "docstring": "Generates an error message to be used when multiple matches are found.\n\nArgs:\npath: The breakpoint location path that the user provided.\ncandidates: List of paths that match the user provided path. Must\ncontain at least 2 entries (throws AssertionError otherwise).\n\nReturns:\nA (format, parameters) tuple that should be used in the description\nfield of the breakpoint error status.", "source": "juraj-google-style"}
{"code": "def save_aggregate_reports_to_kafka(self, aggregate_reports,\n                                        aggregate_topic):\n        \n        if (type(aggregate_reports) == dict or\n           type(aggregate_reports) == OrderedDict):\n            aggregate_reports = [aggregate_reports]\n\n        if len(aggregate_reports) < 1:\n            return\n\n        for report in aggregate_reports:\n            report['date_range'] = self.generate_daterange(report)\n            report = self.strip_metadata(report)\n\n            for slice in report['records']:\n                slice['date_range'] = report['date_range']\n                slice['org_name'] = report['org_name']\n                slice['org_email'] = report['org_email']\n                slice['policy_published'] = report['policy_published']\n                slice['report_id'] = report['report_id']\n                logger.debug(\"Sending slice.\")\n                try:\n                    logger.debug(\"Saving aggregate report to Kafka\")\n                    self.producer.send(aggregate_topic, slice)\n                except UnknownTopicOrPartitionError:\n                    raise KafkaError(\n                        \"Kafka error: Unknown topic or partition on broker\")\n                except Exception as e:\n                    raise KafkaError(\n                        \"Kafka error: {0}\".format(e.__str__()))\n                try:\n                    self.producer.flush()\n                except Exception as e:\n                    raise KafkaError(\n                        \"Kafka error: {0}\".format(e.__str__()))", "docstring": "Saves aggregate DMARC reports to Kafka\n\nArgs:\naggregate_reports (list):  A list of aggregate report dictionaries\nto save to Kafka\naggregate_topic (str): The name of the Kafka topic", "source": "juraj-google-style"}
{"code": "def _isbn_pairing(items):\n    NameWrapper = namedtuple('NameWrapper', ['name', 'obj'])\n    metas = map((lambda x: NameWrapper(_just_name(x.filename), x)), filter((lambda x: isinstance(x, MetadataFile)), items))\n    ebooks = map((lambda x: NameWrapper(_just_name(x.filename), x)), filter((lambda x: isinstance(x, EbookFile)), items))\n    metas = sorted(metas, key=(lambda x: x.name))\n    ebooks = sorted(ebooks, key=(lambda x: x.name), reverse=True)\n    while metas:\n        meta = metas.pop()\n        if (not isbn_validator.is_valid_isbn(meta.name)):\n            continue\n        if (not ebooks):\n            break\n        ebook_index = _index(ebooks, meta.name, key=(lambda x: x.name))\n        if (ebook_index >= 0):\n            logger.debug((\"Pairing '%s' and '%s'.\" % (meta.obj.filename, ebooks[ebook_index].obj.filename)))\n            items.append(DataPair(metadata_file=meta.obj, ebook_file=ebooks[ebook_index].obj))\n            items.remove(meta.obj)\n            items.remove(ebooks[ebook_index].obj)\n            ebooks = ebooks[(ebook_index + 1):]\n    return items", "docstring": "Pair `items` with same ISBN into `DataPair` objects.\n\nArgs:\nitems (list): list of items, which will be searched.\n\nReturns:\nlist: list with paired items. Paired items are removed, `DataPair` is \\\nadded instead.", "source": "codesearchnet"}
{"code": "def main(args=None):\n    \n    parser = get_parser()\n    args = parser.parse_args(args=args)\n\n    if not (args.matrix or args.dependencies or args.treemap or args.graph):\n        args.matrix = True\n\n    \n    packages = []\n    for arg in args.packages:\n        if ',' in arg:\n            for package in arg.split(','):\n                if package not in packages:\n                    packages.append(package)\n        elif arg not in packages:\n            packages.append(arg)\n\n    \n    depth = args.depth\n    if depth is None:\n        depth = guess_depth(packages)\n\n    \n    output = args.output\n    if isinstance(output, str):\n        output = open(output, 'w')\n\n    dsm = DSM(*packages, build_tree=True, build_dependencies=True,\n              enforce_init=not args.greedy)\n\n    if dsm.empty:\n        return 1\n\n    indent = args.indent\n    if indent is None:\n        if args.format == CSV:\n            indent = 0\n        else:\n            indent = 2\n    elif indent < 0 and args.format == JSON:\n        \n        indent = None\n\n    try:\n        if args.dependencies:\n            dsm.print(format=args.format, output=output, indent=indent)\n        elif args.matrix:\n            dsm.print_matrix(format=args.format, output=output,\n                             depth=depth, indent=indent)\n        elif args.treemap:\n            dsm.print_treemap(format=args.format, output=output)\n        elif args.graph:\n            dsm.print_graph(format=args.format, output=output,\n                            depth=depth, indent=indent)\n    except BrokenPipeError:\n        \n        return 2\n\n    return 0", "docstring": "Main function.\n\nThis function is the command line entry point.\n\nArgs:\nargs (list of str): the arguments passed to the program.\n\nReturns:\nint: return code being 0 (OK), 1 (dsm empty) or 2 (error).", "source": "juraj-google-style"}
{"code": "def authenticate(self, request, username=None, password=None):\n        \n\n        if not isinstance(username, str):\n            return None\n\n        \n        username = re.sub(r'\\W', '', username)\n\n        krb_ticket = self.get_kerberos_ticket(username, password)\n\n        if krb_ticket == \"reset\":\n            user, status = User.objects.get_or_create(username=\"RESET_PASSWORD\", user_type=\"service\", id=999999)\n            return user\n\n        if not krb_ticket:\n            return None\n        else:\n            logger.debug(\"Authentication successful\")\n            try:\n                user = User.objects.get(username__iexact=username)\n            except User.DoesNotExist:\n                return None\n            return user", "docstring": "Authenticate a username-password pair.\n\nCreates a new user if one is not already in the database.\n\nArgs:\nusername\nThe username of the `User` to authenticate.\npassword\nThe password of the `User` to authenticate.\n\nReturns:\n`User`", "source": "juraj-google-style"}
{"code": "def graphviz_imshow(self, ax=None, figsize=None, dpi=300, fmt=\"png\", **kwargs):\n        \n        graph = self.get_graphviz(**kwargs)\n        graph.format = fmt\n        graph.attr(dpi=str(dpi))\n        \n        _, tmpname = tempfile.mkstemp()\n        path = graph.render(tmpname, view=False, cleanup=True)\n        ax, fig, _ = get_ax_fig_plt(ax=ax, figsize=figsize, dpi=dpi)\n        import matplotlib.image as mpimg\n        ax.imshow(mpimg.imread(path, format=\"png\")) \n        ax.axis(\"off\")\n\n        return fig", "docstring": "Generate flow graph in the DOT language and plot it with matplotlib.\n\nArgs:\nax: matplotlib :class:`Axes` or None if a new figure should be created.\nfigsize: matplotlib figure size (None to use default)\ndpi: DPI value.\nfmt: Select format for output image\n\nReturn: matplotlib Figure", "source": "juraj-google-style"}
{"code": "def start_after(self, document_fields):\n    query = query_mod.Query(self)\n    return query.start_after(document_fields)", "docstring": "Start query after a cursor with this collection as parent.\n\nSee\n:meth:`~.firestore_v1beta1.query.Query.start_after` for\nmore information on this method.\n\nArgs:\ndocument_fields (Union[~.firestore_v1beta1.\\\ndocument.DocumentSnapshot, dict, list, tuple]): a document\nsnapshot or a dictionary/list/tuple of fields representing a\nquery results cursor. A cursor is a collection of values that\nrepresent a position in a query result set.\n\nReturns:\n~.firestore_v1beta1.query.Query: A query with cursor.", "source": "codesearchnet"}
{"code": "def _AddCampaignsToGroup(client, campaign_group_id, campaign_ids):\n  \n  \n  campaign_service = client.GetService('CampaignService', version='v201809')\n\n  \n  operations = [{\n      'operator': 'SET',\n      'operand': {\n          'id': campaign_id,\n          'campaignGroupId': campaign_group_id\n      }\n  } for campaign_id in campaign_ids]\n\n  campaign_service.mutate(operations)\n\n  \n  print ('The following campaign IDs were added to the campaign group with ID '\n         '\"%d\":\\n\\t%s' % (campaign_group_id, campaign_ids))", "docstring": "Adds multiple campaigns to a campaign group.\n\nArgs:\nclient: an AdWordsClient instance.\ncampaign_group_id: an integer ID for the campaign group.\ncampaign_ids: a list of integer IDs for campaigns.", "source": "juraj-google-style"}
{"code": "def auto_plot_array(*, video_min_num_frames: int=15, height: None | int | tuple[int, int]=(100, 250), show_images_kwargs: Optional[dict[str, Any]]=None, show_videos_kwargs: Optional[dict[str, Any]]=None) -> None:\n    ipython = IPython.get_ipython()\n    if ipython is None:\n        return\n    array_repr_html_fn = functools.partial(array_repr_html, video_min_num_frames=video_min_num_frames, height=height, show_images_kwargs=show_images_kwargs, show_videos_kwargs=show_videos_kwargs)\n    print('Display big np/tf/jax arrays as image for nicer IPython display')\n    formatter = ipython.display_formatter.formatters['text/html']\n    try:\n        jnp = enp.lazy.jnp\n    except ImportError:\n        pass\n    else:\n        jax_array_cls = type(jnp.zeros(shape=()))\n        formatter.for_type(jax_array_cls, array_repr_html_fn)\n    try:\n        tf = enp.lazy.tf\n    except ImportError:\n        pass\n    else:\n        formatter.for_type(tf.Tensor, array_repr_html_fn)\n    try:\n        torch = enp.lazy.torch\n    except ImportError:\n        pass\n    else:\n        formatter.for_type(torch.Tensor, array_repr_html_fn)\n    formatter.for_type(enp.lazy.np.ndarray, array_repr_html_fn)", "docstring": "If called, 2d/3d imgage arrays will be plotted as images in colab/jupyter.\n\nUsage:\n\n>>> ecolab.auto_plot_array()\n>>> np.zeros((28, 28, 3))  # Displayed as image\n\nArgs:\nvideo_min_num_frames: Video `(num_frames, h, w, c)` with less than this\nnumber of frames will be displayed as individual images\nheight: `(min, max)` image height in pixels. Images smaller/larger will be\nreshaped. `None` to disable. If a single number, assume `min == max`.\nshow_images_kwargs: Kwargs forwarded to `mediapy.show_images`\nshow_videos_kwargs: Kwargs forwarded to `mediapy.show_videos`", "source": "github-repos"}
{"code": "def from_key(cls, *args):\n    key = (args if (len(args) > 1) else args[0])\n    return cls._instances.get(key, None)", "docstring": "Return flyweight object with specified key, if it has already been created.\n\nReturns:\ncls or None: Previously constructed flyweight object with given\nkey or None if key not found", "source": "codesearchnet"}
{"code": "def fix_variables(self, fixed):\n    for (v, val) in fixed.items():\n        self.fix_variable(v, val)", "docstring": "Fix the value of the variables and remove it from a binary quadratic model.\n\nArgs:\nfixed (dict):\nA dictionary of variable assignments.\n\nExamples:\n>>> bqm = dimod.BinaryQuadraticModel({'a': -.5, 'b': 0., 'c': 5}, {('a', 'b'): -1}, 0.0, dimod.SPIN)\n>>> bqm.fix_variables({'a': -1, 'b': +1})", "source": "codesearchnet"}
{"code": "def resolve_type(arg):\n    arg_type = type(arg)\n    if (arg_type == list):\n        assert isinstance(arg, list)\n        sample = arg[:min(4, len(arg))]\n        tentative_type = TentativeType()\n        for sample_item in sample:\n            tentative_type.add(resolve_type(sample_item))\n        return ListType(tentative_type)\n    elif (arg_type == set):\n        assert isinstance(arg, set)\n        sample = []\n        iterator = iter(arg)\n        for i in range(0, min(4, len(arg))):\n            sample.append(next(iterator))\n        tentative_type = TentativeType()\n        for sample_item in sample:\n            tentative_type.add(resolve_type(sample_item))\n        return SetType(tentative_type)\n    elif (arg_type == FakeIterator):\n        assert isinstance(arg, FakeIterator)\n        sample = []\n        iterator = iter(arg)\n        for i in range(0, min(4, len(arg))):\n            sample.append(next(iterator))\n        tentative_type = TentativeType()\n        for sample_item in sample:\n            tentative_type.add(resolve_type(sample_item))\n        return IteratorType(tentative_type)\n    elif (arg_type == tuple):\n        assert isinstance(arg, tuple)\n        sample = list(arg[:min(10, len(arg))])\n        return TupleType([resolve_type(sample_item) for sample_item in sample])\n    elif (arg_type == dict):\n        assert isinstance(arg, dict)\n        key_tt = TentativeType()\n        val_tt = TentativeType()\n        for (i, (k, v)) in enumerate(iteritems(arg)):\n            if (i > 4):\n                break\n            key_tt.add(resolve_type(k))\n            val_tt.add(resolve_type(v))\n        return DictType(key_tt, val_tt)\n    else:\n        return type(arg)", "docstring": "Resolve object to one of our internal collection types or generic built-in type.\n\nArgs:\narg: object to resolve", "source": "codesearchnet"}
{"code": "def to_obj(self, wd=False, pack=False, relpath=None):\n    obj = CommentedMap()\n    if pack:\n        obj['run'] = self.orig\n    elif (relpath is not None):\n        if self.from_url:\n            obj['run'] = self.run\n        else:\n            obj['run'] = os.path.relpath(self.run, relpath)\n    elif wd:\n        if self.from_url:\n            obj['run'] = self.run\n        else:\n            obj['run'] = os.path.basename(self.run)\n    else:\n        obj['run'] = self.run\n    obj['in'] = self.step_inputs\n    obj['out'] = self.output_names\n    if self.is_scattered:\n        obj['scatter'] = self.scattered_inputs\n        if (self.scatter_method is not None):\n            obj['scatterMethod'] = self.scatter_method\n    return obj", "docstring": "Return the step as an dict that can be written to a yaml file.\n\nReturns:\ndict: yaml representation of the step.", "source": "codesearchnet"}
{"code": "def artifact_bundles(self):\n    if (not self.__artifact_bundles):\n        self.__artifact_bundles = ArtifactBundles(self.__connection)\n    return self.__artifact_bundles", "docstring": "Gets the Artifact Bundles API client.\n\nReturns:\nArtifactBundles:", "source": "codesearchnet"}
{"code": "def enclosure_groups(self):\n    if (not self.__enclosure_groups):\n        self.__enclosure_groups = EnclosureGroups(self.__connection)\n    return self.__enclosure_groups", "docstring": "Gets the EnclosureGroups API client.\n\nReturns:\nEnclosureGroups:", "source": "codesearchnet"}
{"code": "def step(self, action):\n    \n    observ, reward, done, info = self._env.step(action)\n    observ = self._convert_observ(observ)\n    reward = self._convert_reward(reward)\n    return observ, reward, done, info", "docstring": "Forward action to the wrapped environment.\n\nArgs:\naction: Action to apply to the environment.\n\nRaises:\nValueError: Invalid action.\n\nReturns:\nConverted observation, converted reward, done flag, and info object.", "source": "juraj-google-style"}
{"code": "def set_weight_collections(self, weight_collections):\n    self._weight_collections = weight_collections", "docstring": "Sets the weight collections for the layer.\n\nArgs:\nweight_collections: A list of collection names to which the Variable will\nbe added.", "source": "github-repos"}
{"code": "def find_all(container):\n    if isinstance(container, dict):\n        names = container.keys()\n    else:\n        names = dir(container)\n    built_context = BasicContext()\n    for name in names:\n        if name.startswith('_'):\n            continue\n        if isinstance(container, dict):\n            obj = container[name]\n        else:\n            obj = getattr(container, name)\n        if (isinstance(container, dict) and isinstance(obj, str)):\n            built_context[name] = obj\n        elif (hasattr(obj, 'metadata') and isinstance(getattr(obj, 'metadata'), AnnotatedMetadata)):\n            built_context[name] = obj\n    return built_context", "docstring": "Find all annotated function inside of a container.\n\nAnnotated functions are identified as those that:\n- do not start with a _ character\n- are either annotated with metadata\n- or strings that point to lazily loaded modules\n\nArgs:\ncontainer (object): The container to search for annotated functions.\n\nReturns:\ndict: A dict with all of the found functions in it.", "source": "codesearchnet"}
{"code": "def complete(command_line, current_token, position, shell: arg(choices=('bash', 'fish'))):\n    position = int(position)\n    tokens = shlex.split(command_line[:position])\n    (all_argv, run_argv, command_argv) = run.partition_argv(tokens[1:])\n    run_args = run.parse_args(run_argv)\n    module = run_args.get('commands_module')\n    module = (module or DEFAULT_COMMANDS_MODULE)\n    module = normalize_path(module)\n    try:\n        collection = Collection.load_from_module(module)\n    except Exception:\n        collection = {}\n    found_command = (find_command(collection, tokens) or run)\n    if current_token:\n        if current_token.startswith('-'):\n            if (current_token not in found_command.option_map):\n                print_command_options(found_command, current_token)\n        else:\n            print_commands(collection, shell)\n            path = os.path.expanduser(current_token)\n            path = os.path.expandvars(path)\n            paths = glob.glob(('%s*' % path))\n            if paths:\n                for entry in paths:\n                    if os.path.isdir(entry):\n                        print(('%s/' % entry))\n                    else:\n                        print(entry)\n    else:\n        option = found_command.option_map.get(tokens[(- 1)])\n        if (option and option.takes_value):\n            if option.choices:\n                for choice in option.choices:\n                    print(choice)\n            else:\n                for entry in os.listdir():\n                    if os.path.isdir(entry):\n                        print(('%s/' % entry))\n                    else:\n                        print(entry)\n        else:\n            print_command_options(found_command)\n            print_commands(collection, shell)", "docstring": "Find completions for current command.\n\nThis assumes that we'll handle all completion logic here and that\nthe shell's automatic file name completion is disabled.\n\nArgs:\ncommand_line: Command line\ncurrent_token: Token at cursor\nposition: Current cursor position\nshell: Name of shell", "source": "codesearchnet"}
{"code": "def check_against_mro(ctx: 'context.Context', target: '_base.BaseValue', class_spec: '_instance_base.SimpleValue') -> bool | None:\n    classes = []\n    ambiguous = flatten(class_spec, classes)\n    for c in classes:\n        if ctx.matcher(None).match_from_mro(target, c, allow_compat_builtins=False):\n            return True\n    return None if ambiguous else False", "docstring": "Check if any of the classes are in the target's MRO.\n\nArgs:\nctx: The abstract context.\ntarget: A BaseValue whose MRO will be checked.\nclass_spec: A Class or PythonConstant tuple of classes (i.e. the second\nargument to isinstance or issubclass).\n\nReturns:\nTrue if any class in classes is found in the target's MRO,\nFalse if no match is found and None if it's ambiguous.", "source": "github-repos"}
{"code": "def GetNewSessionID(self, **_):\n    return rdfvalue.SessionID(base='aff4:/hunts', queue=self.runner_args.queue)", "docstring": "Returns a random integer session ID for this hunt.\n\nAll hunts are created under the aff4:/hunts namespace.\n\nReturns:\na formatted session id string.", "source": "codesearchnet"}
{"code": "def set(self, context_id, address_value_list):\n    if (context_id not in self._contexts):\n        LOGGER.warning('Context_id not in contexts, %s', context_id)\n        return False\n    context = self._contexts.get(context_id)\n    add_value_dict = {}\n    for d in address_value_list:\n        for (add, val) in d.items():\n            if (not self.address_is_valid(address=add)):\n                raise AuthorizationException(address=add)\n            add_value_dict[add] = val\n    context.set_direct(add_value_dict)\n    return True", "docstring": "Within a context, sets addresses to a value.\n\nArgs:\ncontext_id (str): the context id returned by create_context\naddress_value_list (list): list of {address: value} dicts\n\nReturns:\n(bool): True if the operation is successful, False if\nthe context_id doesn't reference a known context.\n\nRaises:\nAuthorizationException if an address is given in the\naddress_value_list that was not in the original\ntransaction's outputs, or was under a namespace but the\ncharacters after the namespace are not valid address\ncharacters.", "source": "codesearchnet"}
{"code": "def __parse_tonodes(self, text, **kwargs):\n        \n        n = self.options.get('nbest', 1)\n\n        try:\n            if self._KW_BOUNDARY in kwargs:\n                patt = kwargs.get(self._KW_BOUNDARY, '.')\n                tokens = list(self.__split_pattern(text, patt))\n                text = ''.join([t[0] for t in tokens])\n\n                btext = self.__str2bytes(text)\n                self.__mecab.mecab_lattice_set_sentence(self.lattice, btext)\n\n                bpos = 0\n                self.__mecab.mecab_lattice_set_boundary_constraint(\n                    self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY)\n\n                for (token, match) in tokens:\n                    bpos += 1\n                    if match:\n                        mark = self.MECAB_INSIDE_TOKEN\n                    else:\n                        mark = self.MECAB_ANY_BOUNDARY\n\n                    for _ in range(1, len(self.__str2bytes(token))):\n                        self.__mecab.mecab_lattice_set_boundary_constraint(\n                            self.lattice, bpos, mark)\n                        bpos += 1\n                    self.__mecab.mecab_lattice_set_boundary_constraint(\n                        self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY)\n            elif self._KW_FEATURE in kwargs:\n                features = kwargs.get(self._KW_FEATURE, ())\n                fd = {morph: self.__str2bytes(feat) for morph, feat in features}\n\n                tokens = self.__split_features(text, [e[0] for e in features])\n                text = ''.join([t[0] for t in tokens])\n\n                btext = self.__str2bytes(text)\n                self.__mecab.mecab_lattice_set_sentence(self.lattice, btext)\n\n                bpos = 0\n                for chunk, match in tokens:\n                    c = len(self.__str2bytes(chunk))\n                    if match:\n                        self.__mecab.mecab_lattice_set_feature_constraint(\n                            self.lattice, bpos, bpos+c, fd[chunk])\n                    bpos += c\n            else:\n                btext = self.__str2bytes(text)\n                self.__mecab.mecab_lattice_set_sentence(self.lattice, btext)\n\n            self.__mecab.mecab_parse_lattice(self.tagger, self.lattice)\n\n            for _ in range(n):\n                check = self.__mecab.mecab_lattice_next(self.lattice)\n                if n == 1 or check:\n                    nptr = self.__mecab.mecab_lattice_get_bos_node(self.lattice)\n                    while nptr != self.__ffi.NULL:\n                        \n                        if nptr.stat != MeCabNode.BOS_NODE:\n                            raws = self.__ffi.string(\n                                nptr.surface[0:nptr.length])\n                            surf = self.__bytes2str(raws).strip()\n\n                            if 'output_format_type' in self.options or \\\n                               'node_format' in self.options:\n                                sp = self.__mecab.mecab_format_node(\n                                    self.tagger, nptr)\n                                if sp != self.__ffi.NULL:\n                                    rawf = self.__ffi.string(sp)\n                                else:\n                                    err = self.__mecab.mecab_strerror(\n                                            self.tagger)\n                                    err = self.__bytes2str(\n                                            self.__ffi.string(err))\n                                    msg = self._ERROR_NODEFORMAT.format(\n                                            surf, err)\n                                    raise MeCabError(msg)\n                            else:\n                                rawf = self.__ffi.string(nptr.feature)\n                            feat = self.__bytes2str(rawf).strip()\n\n                            mnode = MeCabNode(nptr, surf, feat)\n                            yield mnode\n                        nptr = getattr(nptr, 'next')\n        except GeneratorExit:\n            logger.debug('close invoked on generator')\n        except MeCabError:\n            raise\n        except:\n            err = self.__mecab.mecab_lattice_strerror(self.lattice)\n            logger.error(self.__bytes2str(self.__ffi.string(err)))\n            raise MeCabError(self.__bytes2str(self.__ffi.string(err)))", "docstring": "Builds and returns the MeCab function for parsing to nodes using\nmorpheme boundary constraints.\n\nArgs:\nformat_feature: flag indicating whether or not to format the feature\nvalue for each node yielded.\n\nReturns:\nA function which returns a Generator, tailored to using boundary\nconstraints and parsing as nodes, using either the default or\nN-best behavior.", "source": "juraj-google-style"}
{"code": "def parse(self, stream, parser=None):\n    (force, parsers) = self._get_parsers(parser)\n    try:\n        stream.seek(0)\n        lookup = stream.read(1024)\n        stream.seek(0)\n    except (io.UnsupportedOperation, AttributeError):\n        lookup = None\n    for p in parsers:\n        if p.hook(path=self.path, force=force, lookup=lookup):\n            (self.meta, self.terms, self.imports, self.typedefs) = p.parse(stream)\n            self._parsed_by = p.__name__\n            break", "docstring": "Parse the given file using available `BaseParser` instances.\n\nRaises:\nTypeError: when the parser argument is not a string or None.\nValueError: when the parser argument is a string that does\nnot name a `BaseParser`.", "source": "codesearchnet"}
{"code": "def run(self, data):\n        \n        result_type = namedtuple('Result', 'code messages')\n\n        if self.passes is True:\n            result = result_type(Checker.Code.PASSED, '')\n        elif self.passes is False:\n            if self.allow_failure:\n                result = result_type(Checker.Code.IGNORED, '')\n            else:\n                result = result_type(Checker.Code.FAILED, '')\n        else:\n            try:\n                result = self.check(data, **self.arguments)\n                messages = ''\n                if isinstance(result, tuple):\n                    result, messages = result\n\n                if result not in Checker.Code:\n                    result = Checker.Code.PASSED if bool(result) else Checker.Code.FAILED\n\n                if result == Checker.Code.FAILED and self.allow_failure:\n                    result = Checker.Code.IGNORED\n\n                result = result_type(result, messages)\n            except NotImplementedError:\n                result = result_type(Checker.Code.NOT_IMPLEMENTED, '')\n        self.result = result", "docstring": "Run the check method and format the result for analysis.\n\nArgs:\ndata (DSM/DMM/MDM): DSM/DMM/MDM instance to check.\n\nReturns:\ntuple (int, str): status constant from Checker class and messages.", "source": "juraj-google-style"}
{"code": "def get_data_for_sensors(macs=[], search_duratio_sec=5, bt_device=''):\n    log.info('Get latest data for sensors. Stop with Ctrl+C.')\n    log.info('Stops automatically in %ss', search_duratio_sec)\n    log.info('MACs: %s', macs)\n    datas = dict()\n    for new_data in RuuviTagSensor._get_ruuvitag_datas(macs, search_duratio_sec, bt_device=bt_device):\n        datas[new_data[0]] = new_data[1]\n    return datas", "docstring": "Get lates data for sensors in the MAC's list.\n\nArgs:\nmacs (array): MAC addresses\nsearch_duratio_sec (int): Search duration in seconds. Default 5\nbt_device (string): Bluetooth device id\nReturns:\ndict: MAC and state of found sensors", "source": "codesearchnet"}
{"code": "def log_run_info(self, model_name):\n    \n    run_info = {\n        \"model_name\": model_name,\n        \"machine_config\": {},\n        \"run_date\": datetime.datetime.now().strftime(_DATE_TIME_FORMAT_PATTERN)}\n    _collect_tensorflow_info(run_info)\n    _collect_tensorflow_environment_variables(run_info)\n    _collect_cpu_info(run_info)\n    _collect_gpu_info(run_info)\n    _collect_memory_info(run_info)\n\n    with tf.gfile.GFile(os.path.join(\n        self._logging_dir, BENCHMARK_RUN_LOG_FILE_NAME), \"w\") as f:\n      try:\n        json.dump(run_info, f)\n        f.write(\"\\n\")\n      except (TypeError, ValueError) as e:\n        tf.logging.warning(\"Failed to dump benchmark run info to log file: %s\",\n                           e)", "docstring": "Collect most of the TF runtime information for the local env.\n\nThe schema of the run info follows official/benchmark/datastore/schema.\n\nArgs:\nmodel_name: string, the name of the model.", "source": "juraj-google-style"}
{"code": "def flatten_zip_dataset(*args):\n  \n  flattened = tf.data.Dataset.from_tensors(args[0])\n  for ex in args[1:]:\n    flattened = flattened.concatenate(tf.data.Dataset.from_tensors(ex))\n  return flattened", "docstring": "A list of examples to a dataset containing mixed examples.\n\nGiven a list of `n` dataset examples, flatten them by converting\neach element into a dataset and concatenating them to convert into a\nsingle dataset.\n\nArgs:\n*args: A list containing one example each from `n` different datasets.\n\nReturns:\nflattened: A new dataset containing the examples from the list as part\nof a single dataset.", "source": "juraj-google-style"}
{"code": "def determine_drift(self):\n    try:\n        response = self._cloud_formation.detect_stack_drift(StackName=self._stack_name)\n        drift_request_id = response.get('StackDriftDetectionId', None)\n        if drift_request_id:\n            logging.info('drift_request_id: %s - polling', drift_request_id)\n            drift_calc_done = False\n            while (not drift_calc_done):\n                time.sleep(self.nap_time)\n                response = self._cloud_formation.describe_stack_drift_detection_status(StackDriftDetectionId=drift_request_id)\n                current_state = response.get('DetectionStatus', None)\n                logging.info('describe_stack_drift_detection_status(): {}'.format(current_state))\n                drift_calc_done = (current_state in CALC_DONE_STATES)\n                drift_answer = response.get('StackDriftStatus', 'UNKNOWN')\n            logging.info('drift of {}: {}'.format(self._stack_name, drift_answer))\n            if (drift_answer == 'DRIFTED'):\n                if self._verbose:\n                    self._print_drift_report()\n                return False\n            else:\n                return True\n        else:\n            logging.warning('drift_request_id is None')\n        return False\n    except Exception as wtf:\n        logging.error(wtf, exc_info=True)\n        return False", "docstring": "Determine the drift of the stack.\n\nArgs:\nNone\n\nReturns:\nGood or Bad; True or False", "source": "codesearchnet"}
{"code": "def delete_jobs(self, user_ids, job_ids, task_ids, labels, create_time_min=None, create_time_max=None):\n    tasks = list(self.lookup_job_tasks({'RUNNING'}, user_ids=user_ids, job_ids=job_ids, task_ids=task_ids, labels=labels, create_time_min=create_time_min, create_time_max=create_time_max))\n    print(('Found %d tasks to delete.' % len(tasks)))\n    return google_base.cancel(self._service.new_batch_http_request, self._service.operations().cancel, tasks)", "docstring": "Kills the operations associated with the specified job or job.task.\n\nArgs:\nuser_ids: List of user ids who \"own\" the job(s) to cancel.\njob_ids: List of job_ids to cancel.\ntask_ids: List of task-ids to cancel.\nlabels: List of LabelParam, each must match the job(s) to be canceled.\ncreate_time_min: a timezone-aware datetime value for the earliest create\ntime of a task, inclusive.\ncreate_time_max: a timezone-aware datetime value for the most recent\ncreate time of a task, inclusive.\n\nReturns:\nA list of tasks canceled and a list of error messages.", "source": "codesearchnet"}
{"code": "def ShlexSplit(string):\n  \n  precondition.AssertType(string, Text)\n\n  if PY2:\n    string = string.encode(\"utf-8\")\n\n  parts = shlex.split(string)\n\n  if PY2:\n    \n    \n    parts = [part.decode(\"utf-8\") for part in parts]\n    \n\n  return parts", "docstring": "A wrapper for `shlex.split` that works with unicode objects.\n\nArgs:\nstring: A unicode string to split.\n\nReturns:\nA list of unicode strings representing parts of the input string.", "source": "juraj-google-style"}
{"code": "class ThresholdedReLU(Layer):\n\n    def __init__(self, theta=1.0, **kwargs):\n        super(ThresholdedReLU, self).__init__(**kwargs)\n        if theta is None:\n            raise ValueError('Theta of a Thresholded ReLU layer cannot be None, requires a float. Got %s' % theta)\n        if theta < 0:\n            raise ValueError('The theta value of a Thresholded ReLU layer should be >=0, got %s' % theta)\n        self.supports_masking = True\n        self.theta = backend.cast_to_floatx(theta)\n\n    def call(self, inputs):\n        theta = math_ops.cast(self.theta, inputs.dtype)\n        return inputs * math_ops.cast(math_ops.greater(inputs, theta), inputs.dtype)\n\n    def get_config(self):\n        config = {'theta': float(self.theta)}\n        base_config = super(ThresholdedReLU, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))\n\n    @tf_utils.shape_type_conversion\n    def compute_output_shape(self, input_shape):\n        return input_shape", "docstring": "Thresholded Rectified Linear Unit.\n\nIt follows:\n\n```\nf(x) = x for x > theta\nf(x) = 0 otherwise`\n```\n\nInput shape:\nArbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model.\n\nOutput shape:\nSame shape as the input.\n\nArgs:\ntheta: Float >= 0. Threshold location of activation.", "source": "github-repos"}
{"code": "def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n    size = get_size_dict(size, default_to_square=False)\n    output_size = get_resize_output_image_size(image, size=(size['height'], size['width']), default_to_square=False, input_data_format=input_data_format)\n    return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\nresized to keep the input aspect ratio.\n\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nsize (`Dict[str, int]`):\nSize of the output image.\nresample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\nResampling filter to use when resiizing the image.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred from the input\nimage.", "source": "github-repos"}
{"code": "def event(self, **kwargs):\n    if (self.callback.noargs and (self.streams == [])):\n        self.param.warning('No streams declared. To update a DynamicMaps using generators (or callables without arguments) use streams=[Next()]')\n        return\n    if (self.streams == []):\n        self.param.warning('No streams on DynamicMap, calling event will have no effect')\n        return\n    stream_params = set(util.stream_parameters(self.streams))\n    invalid = [k for k in kwargs.keys() if (k not in stream_params)]\n    if invalid:\n        msg = 'Key(s) {invalid} do not correspond to stream parameters'\n        raise KeyError(msg.format(invalid=', '.join((('%r' % i) for i in invalid))))\n    streams = []\n    for stream in self.streams:\n        contents = stream.contents\n        applicable_kws = {k: v for (k, v) in kwargs.items() if (k in set(contents.keys()))}\n        if ((not applicable_kws) and contents):\n            continue\n        streams.append(stream)\n        rkwargs = util.rename_stream_kwargs(stream, applicable_kws, reverse=True)\n        stream.update(**rkwargs)\n    Stream.trigger(streams)", "docstring": "Updates attached streams and triggers events\n\nAutomatically find streams matching the supplied kwargs to\nupdate and trigger events on them.\n\nArgs:\n**kwargs: Events to update streams with", "source": "codesearchnet"}
{"code": "def _build_url(self, path):\n    if (path.startswith('http:\n        return path\n    else:\n        return ('%s%s' % (self._url, path))", "docstring": "Returns the full url from path.\n\nIf path is already a url, return it unchanged. If it's a path, append\nit to the stored url.\n\nReturns:\nstr: The full URL", "source": "codesearchnet"}
{"code": "def ReadDataAtOffset(self, file_offset, size):\n    self._file_object.seek(file_offset, os.SEEK_SET)\n    return self._file_object.read(size)", "docstring": "Reads a byte string from the file-like object at a specific offset.\n\nArgs:\nfile_offset (int): file offset.\nsize (int): number of bytes to read.\n\nReturns:\nbytes: data read.\n\nRaises:\nIOError: if the read failed.\nOSError: if the read failed.", "source": "codesearchnet"}
{"code": "def solve(a, b):\n    if any_symbolic_tensors((a, b)):\n        return Solve().symbolic_call(a, b)\n    return _solve(a, b)", "docstring": "Solves a linear system of equations given by `a x = b`.\n\nArgs:\na: A tensor of shape `(..., M, M)` representing the coefficients matrix.\nb: A tensor of shape `(..., M)` or `(..., M, N)` representing the\nright-hand side or \"dependent variable\" matrix.\n\nReturns:\nA tensor of shape `(..., M)` or `(..., M, N)` representing the solution\nof the linear system. Returned shape is identical to `b`.", "source": "github-repos"}
{"code": "def minmax(self, minimum=None, maximum=None):\n\t\t\n\n\t\t\n\t\tif minimum is None and maximum is None:\n\t\t\treturn {\"minimum\": self._minimum, \"maximum\": self._maximum};\n\n\t\t\n\t\tif minimum != None:\n\n\t\t\t\n\t\t\tif self._type in ['base64', 'date', 'datetime', 'ip', 'time']:\n\n\t\t\t\t\n\t\t\t\tif not isinstance(minimum, basestring) \\\n\t\t\t\t\tor not _typeToRegex[self._type].match(minimum):\n\t\t\t\t\traise ValueError('__minimum__')\n\n\t\t\t\n\t\t\t\n\t\t\telif self._type in ['int', 'string', 'timestamp', 'uint']:\n\n\t\t\t\t\n\t\t\t\tif not isinstance(minimum, (int, long)):\n\n\t\t\t\t\t\n\t\t\t\t\tif isinstance(minimum, basestring) \\\n\t\t\t\t\t\tand _typeToRegex['int'].match(minimum):\n\n\t\t\t\t\t\t\n\t\t\t\t\t\tminimum = int(minimum, 0)\n\n\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\traise ValueError('__minimum__')\n\n\t\t\t\t\t\n\t\t\t\t\tif self._type in ['base64', 'string', 'timestamp', 'uint']:\n\n\t\t\t\t\t\t\n\t\t\t\t\t\tif minimum < 0:\n\t\t\t\t\t\t\traise ValueError('__minimum__')\n\n\t\t\t\n\t\t\telif self._type == 'decimal':\n\n\t\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\tminimum = Decimal(minimum)\n\t\t\t\texcept ValueError:\n\t\t\t\t\traise ValueError('__minimum__')\n\n\t\t\t\n\t\t\telif self._type == 'float':\n\n\t\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\tminimum = float(minimum)\n\t\t\t\texcept ValueError:\n\t\t\t\t\traise ValueError('__minimum__')\n\n\t\t\t\n\t\t\telif self._type == 'price':\n\n\t\t\t\t\n\t\t\t\tif not isinstance(minimum, basestring) or not _typeToRegex['price'].match(minimum):\n\t\t\t\t\traise ValueError('__minimum__')\n\n\t\t\t\t\n\t\t\t\tminimum = Decimal(minimum)\n\n\t\t\t\n\t\t\telse:\n\t\t\t\traise TypeError('can not set __minimum__ for ' + self._type)\n\n\t\t\t\n\t\t\tself._minimum = minimum\n\n\t\t\n\t\tif maximum != None:\n\n\t\t\t\n\t\t\tif self._type in ['date', 'datetime', 'ip', 'time']:\n\n\t\t\t\t\n\t\t\t\tif not isinstance(maximum, basestring) \\\n\t\t\t\t\tor not _typeToRegex[self._type].match(maximum):\n\t\t\t\t\traise ValueError('__maximum__')\n\n\t\t\t\n\t\t\t\n\t\t\telif self._type in ['int', 'string', 'timestamp', 'uint']:\n\n\t\t\t\t\n\t\t\t\tif not isinstance(maximum, (int, long)):\n\n\t\t\t\t\t\n\t\t\t\t\tif isinstance(maximum, basestring) \\\n\t\t\t\t\t\tand _typeToRegex['int'].match(maximum):\n\n\t\t\t\t\t\t\n\t\t\t\t\t\tmaximum = int(maximum, 0)\n\n\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\traise ValueError('__minimum__')\n\n\t\t\t\t\t\n\t\t\t\t\tif self._type in ['string', 'timestamp', 'uint']:\n\n\t\t\t\t\t\t\n\t\t\t\t\t\tif maximum < 0:\n\t\t\t\t\t\t\traise ValueError('__maximum__')\n\n\t\t\t\n\t\t\telif self._type == 'decimal':\n\n\t\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\tmaximum = Decimal(maximum)\n\t\t\t\texcept ValueError:\n\t\t\t\t\traise ValueError('__maximum__')\n\n\t\t\t\n\t\t\telif self._type == 'float':\n\n\t\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\tminimum = float(minimum)\n\t\t\t\texcept ValueError:\n\t\t\t\t\traise ValueError('__maximum__')\n\n\t\t\t\n\t\t\telif self._type == 'price':\n\n\t\t\t\t\n\t\t\t\tif not isinstance(maximum, basestring) or not _typeToRegex['price'].match(maximum):\n\t\t\t\t\traise ValueError('__maximum__')\n\n\t\t\t\t\n\t\t\t\tmaximum = Decimal(maximum)\n\n\t\t\t\n\t\t\telse:\n\t\t\t\traise TypeError('can not set __maximum__ for ' + self._type)\n\n\t\t\t\n\t\t\tif self._minimum is not None:\n\n\t\t\t\t\n\t\t\t\tif self._type == 'ip':\n\n\t\t\t\t\t\n\t\t\t\t\tif self.__compare_ips(self._minimum, maximum) == 1:\n\t\t\t\t\t\traise ValueError('__maximum__')\n\n\t\t\t\t\n\t\t\t\telse:\n\n\t\t\t\t\t\n\t\t\t\t\tif self._minimum > maximum:\n\t\t\t\t\t\traise ValueError('__maximum__')\n\n\t\t\t\n\t\t\tself._maximum = maximum", "docstring": "Min/Max\n\nSets or gets the minimum and/or maximum values for the Node. For\ngetting, returns {\"minimum\":mixed,\"maximum\":mixed}\n\nArguments:\nminimum {mixed} -- The minimum value\nmaximum {mixed} -- The maximum value\n\nRaises:\nTypeError, ValueError\n\nReturns:\nNone | dict", "source": "juraj-google-style"}
{"code": "def _restart(self, downtime_secs, job):\n    self._cluster.kill_task(job, 0)\n    time.sleep(downtime_secs)\n    self.assertFalse(context.check_alive('/job:%s/replica:0/task:0' % job))\n    self._cluster.start_task(job, 0)\n    while not context.check_alive('/job:%s/replica:0/task:0' % job):\n        time.sleep(1)", "docstring": "Kills `job` (index: 0) and restarts it after `downtime_secs`.\n\nArgs:\ndowntime_secs: secs before restarting the job.\njob: a string specifying the job to restart.", "source": "github-repos"}
{"code": "def eval_from_json(json):\n        \n        closes = poloniex.get_attribute(json, 'close')\n        volumes = poloniex.get_attribute(json, 'volume')\n        obv = 0\n        for date in range(1, len(json)):\n            curr = {'close': closes[date], 'volume': volumes[date]}\n            prev = {'close': closes[date - 1], 'obv': obv}\n            obv = OBV.eval_algorithm(curr, prev)\n        return obv", "docstring": "Evaluates OBV from JSON (typically Poloniex API response)\n\nArgs:\njson: List of dates where each entry is a dict of raw market data.\n\nReturns:\nFloat of OBV", "source": "juraj-google-style"}
{"code": "def _Execute(self, funcname, *args, **kwargs):\n    wait_for_completion = kwargs.get('wait_for_completion', False)\n    rpc_dict = {'func': funcname, 'args': args}\n    self._Send(json.dumps(rpc_dict))\n    timeout = (TIMEOUT_FOREVER if wait_for_completion else TIMEOUT_DEFAULT)\n    result_string = self._Recv(timeout)\n    try:\n        result = json.loads(result_string, object_hook=self._JsonDecodeDict)\n        if isinstance(result, unicode):\n            result = self._TryStr(result)\n        elif isinstance(result, list):\n            result = self._JsonDecodeList(result)\n    except ValueError:\n        raise ValueError(('Response JSON invalid: ' + str(result_string)))\n    except TypeError:\n        raise ValueError(('Response JSON invalid: ' + str(result_string)))\n    return result", "docstring": "Send an RPC request to the gdb-internal python.\n\nBlocks for 3 seconds by default and returns any results.\nArgs:\nfuncname: the name of the function to call.\n*args: the function's arguments.\n**kwargs: Only the key 'wait_for_completion' is inspected, which decides\nwhether to wait forever for completion or just 3 seconds.\nReturns:\nThe result of the function call.", "source": "codesearchnet"}
{"code": "def route(cls, route, config=None):\n\n    def decorator(wrapped_class, **kwds):\n        cls._routes.append(dict(url=route, request_handler=wrapped_class))\n        return wrapped_class\n    return decorator", "docstring": "This method provides a decorator for adding endpoints to the\nhttp server.\n\nArgs:\nroute (str): The url to be handled by the RequestHandled\nconfig (dict): Configuration for the request handler\n\nExample:\n\n.. code-block:: python\n\nimport nautilus\nfrom nauilus.network.http import RequestHandler\n\nclass MyService(nautilus.Service):\n# ...\n\n@MyService.route('/')\nclass HelloWorld(RequestHandler):\ndef get(self):\nreturn self.finish('hello world')", "source": "codesearchnet"}
{"code": "def from_tensor_list(element_spec, tensor_list):\n    return _from_tensor_list_helper(lambda spec, value: spec._from_tensor_list(value), element_spec, tensor_list)", "docstring": "Returns an element constructed from the given spec and tensor list.\n\nArgs:\nelement_spec: A nested structure of `tf.TypeSpec` objects representing to\nelement type specification.\ntensor_list: A list of tensors to use for constructing the value.\n\nReturns:\nAn element constructed from the given spec and tensor list.\n\nRaises:\nValueError: If the number of tensors needed to construct an element for\nthe given spec does not match the given number of tensors or the given\nspec is not compatible with the tensor list.", "source": "github-repos"}
{"code": "def __init__(self, interpreter=None, signature_key=None):\n    if not interpreter:\n        raise ValueError('None interpreter provided.')\n    if not signature_key:\n        raise ValueError('None signature_key provided.')\n    self._interpreter = interpreter\n    self._interpreter_wrapper = interpreter._interpreter\n    self._signature_key = signature_key\n    signature_defs = interpreter._get_full_signature_list()\n    if signature_key not in signature_defs:\n        raise ValueError(f'Invalid signature_key provided: \"{signature_key}\".')\n    self._signature_def = signature_defs[signature_key]\n    self._outputs = self._signature_def['outputs'].items()\n    self._inputs = self._signature_def['inputs']\n    self._subgraph_index = self._interpreter_wrapper.GetSubgraphIndexFromSignature(self._signature_key)", "docstring": "Constructor.\n\nArgs:\ninterpreter: Interpreter object that is already initialized with the\nrequested model.\nsignature_key: SignatureDef key to be used.", "source": "github-repos"}
{"code": "def do_state(args):\n    \n    rest_client = RestClient(args.url, args.user)\n\n    if args.subcommand == 'list':\n        response = rest_client.list_state(args.subtree, args.head)\n        leaves = response['data']\n        head = response['head']\n        keys = ('address', 'size', 'data')\n        headers = tuple(k.upper() for k in keys)\n\n        def parse_leaf_row(leaf, decode=True):\n            decoded = b64decode(leaf['data'])\n            return (\n                leaf['address'],\n                len(decoded),\n                str(decoded) if decode else leaf['data'])\n\n        if args.format == 'default':\n            fmt.print_terminal_table(headers, leaves, parse_leaf_row)\n            print('HEAD BLOCK: \"{}\"'.format(head))\n\n        elif args.format == 'csv':\n            fmt.print_csv(headers, leaves, parse_leaf_row)\n            print('(data for head block: \"{}\")'.format(head))\n\n        elif args.format == 'json' or args.format == 'yaml':\n            state_data = {\n                'head': head,\n                'data': [{k: d for k, d in zip(keys, parse_leaf_row(l, False))}\n                         for l in leaves]}\n\n            if args.format == 'yaml':\n                fmt.print_yaml(state_data)\n            elif args.format == 'json':\n                fmt.print_json(state_data)\n            else:\n                raise AssertionError('Missing handler: {}'.format(args.format))\n\n        else:\n            raise AssertionError('Missing handler: {}'.format(args.format))\n\n    if args.subcommand == 'show':\n        output = rest_client.get_leaf(args.address, args.head)\n        if output is not None:\n            print('DATA: \"{}\"'.format(b64decode(output['data'])))\n            print('HEAD: \"{}\"'.format(output['head']))\n        else:\n            raise CliException('No data available at {}'.format(args.address))", "docstring": "Runs the batch list or batch show command, printing output to the\nconsole\n\nArgs:\nargs: The parsed arguments sent to the command at runtime", "source": "juraj-google-style"}
{"code": "def format(obj, options):\n    \n    formatters = {\n        float_types: lambda x: '{:.{}g}'.format(x, options.digits),\n    }\n    for _types, fmtr in formatters.items():\n        if isinstance(obj, _types):\n            return fmtr(obj)\n    try:\n        if six.PY2 and isinstance(obj, six.string_types):\n            return str(obj.encode('utf-8'))\n        return str(obj)\n    except:\n        return 'OBJECT'", "docstring": "Return a string representation of the Python object\n\nArgs:\nobj: The Python object\noptions: Format options", "source": "juraj-google-style"}
{"code": "def normalize_docroot(app, root):\n    srcdir = app.env.srcdir\n    default_version = app.config.javalink_default_version\n    if isinstance(root, basestring):\n        (url, base) = _parse_docroot_str(srcdir, root)\n        return {'root': url, 'base': base, 'version': default_version}\n    else:\n        normalized = {}\n        normalized['root'] = _parse_docroot_str(srcdir, root['root'])[0]\n        if ('base' in root):\n            normalized['base'] = _parse_docroot_str(srcdir, root['base'])[1]\n        else:\n            normalized['base'] = _parse_docroot_str(srcdir, root['root'])[1]\n        if ('version' in root):\n            normalized['version'] = root['version']\n        else:\n            normalized['version'] = default_version\n        return normalized", "docstring": "Creates a package-list URL and a link base from a docroot element.\n\nArgs:\napp: the global app object\nroot: the docroot element [string or dictionary]", "source": "codesearchnet"}
{"code": "def _reshape(self, input_dims=None, output_dims=None):\n    if (input_dims is not None):\n        if (np.product(input_dims) != self._input_dim):\n            raise QiskitError('Reshaped input_dims are incompatible with combined input dimension.')\n        self._input_dims = tuple(input_dims)\n    if (output_dims is not None):\n        if (np.product(output_dims) != self._output_dim):\n            raise QiskitError('Reshaped input_dims are incompatible with combined input dimension.')\n        self._output_dims = tuple(output_dims)\n    return self", "docstring": "Reshape input and output dimensions of operator.\n\nArg:\ninput_dims (tuple): new subsystem input dimensions.\noutput_dims (tuple): new subsystem output dimensions.\n\nReturns:\nOperator: returns self with reshaped input and output dimensions.\n\nRaises:\nQiskitError: if combined size of all subsystem input dimension or\nsubsystem output dimensions is not constant.", "source": "codesearchnet"}
{"code": "def request(self, batch: Sequence[Any], model: genai.Client, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n    if inference_args is None:\n        inference_args = {}\n    responses = self.request_fn(self.model_name, batch, model, inference_args)\n    return utils._convert_to_result(batch, responses, self.model_name)", "docstring": "Sends a prediction request to a Gemini service containing a batch\nof inputs and matches that input with the prediction response from\nthe endpoint as an iterable of PredictionResults.\n\nArgs:\nbatch: a sequence of any values to be passed to the Gemini service.\nShould be inputs accepted by the provided inference function.\nmodel: a genai.Client object configured to access the desired service.\ninference_args: any additional arguments to send as part of the\nprediction request.\n\nReturns:\nAn iterable of Predictions.", "source": "github-repos"}
{"code": "def DirnamePath(self, path):\n    if path.endswith(self.PATH_SEPARATOR):\n        path = path[:(- 1)]\n    if (not path):\n        return None\n    (dirname, _, _) = path.rpartition(self.PATH_SEPARATOR)\n    return dirname", "docstring": "Determines the directory name of the path.\n\nThe file system root is represented by an empty string.\n\nArgs:\npath (str): path.\n\nReturns:\nstr: directory name of the path or None.", "source": "codesearchnet"}
{"code": "def download_tabular_rows_as_dicts(self, url, headers=1, keycolumn=1, **kwargs):\n    kwargs['headers'] = headers\n    stream = self.get_tabular_stream(url, **kwargs)\n    output_dict = dict()\n    headers = stream.headers\n    key_header = headers[(keycolumn - 1)]\n    for row in stream.iter(keyed=True):\n        first_val = row[key_header]\n        output_dict[first_val] = dict()\n        for header in row:\n            if (header == key_header):\n                continue\n            else:\n                output_dict[first_val][header] = row[header]\n    return output_dict", "docstring": "Download multicolumn csv from url and return dictionary where keys are first column and values are\ndictionaries with keys from column headers and values from columns beneath\n\nArgs:\nurl (str): URL to download\nheaders (Union[int, List[int], List[str]]): Number of row(s) containing headers or list of headers. Defaults to 1.\nkeycolumn (int): Number of column to be used for key. Defaults to 1.\n**kwargs:\nfile_type (Optional[str]): Type of file. Defaults to inferring.\ndelimiter (Optional[str]): Delimiter used for values in each row. Defaults to inferring.\n\nReturns:\nDict[Dict]: Dictionary where keys are first column and values are dictionaries with keys from column\nheaders and values from columns beneath", "source": "codesearchnet"}
{"code": "def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_length):\n    input_shape = inputs_embeds.size()[:-1]\n    sequence_length = input_shape[1]\n    position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device)\n    return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length", "docstring": "We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.\n\nArgs:\ninputs_embeds: torch.Tensor\n\nReturns: torch.Tensor", "source": "github-repos"}
{"code": "def get_transcript_ids(ensembl, gene_id):\n    \n    \n    ensembl_genes = ensembl.get_genes_for_hgnc_id(gene_id)\n    transcript_ids = ensembl.get_transcript_ids_for_ensembl_gene_ids(ensembl_genes, [gene_id])\n    \n    \n    \n    \n    alt_symbols = []\n    if len(transcript_ids) == 0:\n        alt_symbols = ensembl.get_previous_symbol(gene_id)\n        genes = [ensembl.get_genes_for_hgnc_id(symbol) for symbol in alt_symbols]\n        genes = [item for sublist in genes for item in sublist]\n        ensembl_genes += genes\n        symbols = [gene_id] + alt_symbols\n        \n        transcript_ids = ensembl.get_transcript_ids_for_ensembl_gene_ids(ensembl_genes, symbols)\n    \n    return get_transcript_lengths(ensembl, transcript_ids)", "docstring": "gets transcript IDs for a gene.\n\nArgs:\nensembl: EnsemblRequest object to request data from ensembl\ngene_id: HGNC symbol for gene\n\nReturns:\ndictionary of transcript ID: transcript lengths for all transcripts\nfor a given HGNC symbol.", "source": "juraj-google-style"}
{"code": "def add_object(self, file_path, file_object, error_fct=None):\n        \n        error_fct = error_fct or self.raise_os_error\n        if not file_path:\n            target_directory = self.root\n        else:\n            target_directory = self.resolve(file_path)\n            if not S_ISDIR(target_directory.st_mode):\n                error = errno.ENOENT if self.is_windows_fs else errno.ENOTDIR\n                error_fct(error, file_path)\n        target_directory.add_entry(file_object)", "docstring": "Add a fake file or directory into the filesystem at file_path.\n\nArgs:\nfile_path: The path to the file to be added relative to self.\nfile_object: File or directory to add.\nerror_class: The error class to be thrown if file_path does\nnot correspond to a directory (used internally(\n\nRaises:\nIOError or OSError: if file_path does not correspond to a\ndirectory.", "source": "juraj-google-style"}
{"code": "def external_ids(self, **kwargs):\n    path = self._get_series_id_season_number_path('external_ids')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the external ids that we have stored for a TV season by season\nnumber.\n\nArgs:\nlanguage: (optional) ISO 639 code.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def isin(self, values):\n        \n        return self.__constructor__(\n            query_compiler=self._query_compiler.isin(values=values)\n        )", "docstring": "Fill a DataFrame with booleans for cells contained in values.\n\nArgs:\nvalues (iterable, DataFrame, Series, or dict): The values to find.\n\nReturns:\nA new DataFrame with booleans representing whether or not a cell\nis in values.\nTrue: cell is contained in values.\nFalse: otherwise", "source": "juraj-google-style"}
{"code": "def skip_while(self, predicate):\n    if self.closed():\n        raise ValueError('Attempt to call take_while() on a closed Queryable.')\n    if (not is_callable(predicate)):\n        raise TypeError('skip_while() parameter predicate={0} is not callable'.format(repr(predicate)))\n    return self._create(itertools.dropwhile(predicate, self))", "docstring": "Omit elements from the start for which a predicate is True.\n\nNote: This method uses deferred execution.\n\nArgs:\npredicate: A single argument predicate function.\n\nReturns:\nA Queryable over the sequence of elements beginning with the first\nelement for which the predicate returns False.\n\nRaises:\nValueError: If the Queryable is closed().\nTypeError: If predicate is not callable.", "source": "codesearchnet"}
{"code": "def get_nodes(self):\n    nodes = []\n    for (age, level) in enumerate(self.nodes):\n        nodes.append([])\n        for node in level:\n            nodes[age].append(node.get_tuple())\n    return nodes", "docstring": "Get the tree nodes as list.\n\nReturns:\nlist: A 2d-list holding the grown nodes coordinates as tupel for every age.\nExample:\n[\n[(10, 40)],\n[(20, 80), (100, 30)],\n[(100, 90), (120, 40), ...],\n...\n]", "source": "codesearchnet"}
{"code": "def __init__(self, object_type: str, object_id: str = None):\n        \n        if object_type not in [PB_KEY, SBI_KEY]:\n            raise RuntimeError('Invalid object type')\n        self._type = object_type\n        self._id = object_id\n        self._key = self.get_key(object_type, object_id)\n        self._check_object_exists()", "docstring": "Initialise variables.\n\nArgs:\nobject_type (str): Type of object.\nobject_id (str): ID of the object.", "source": "juraj-google-style"}
{"code": "def check_info_annotation(annotation, info, extra_info, alternatives, individuals=[]):\n    \n    \n    number = extra_info['Number']\n    if is_number(number):\n        number_of_entrys = float(number)\n        if number_of_entrys != 0:\n            if len(annotation) != number_of_entrys:\n                raise SyntaxError(\"Info field {0} has the wrong \"\\\n                \"number of entrys according to the vcf header.\"\\\n                \" Vcf header specifies {1} should have {2} entry(s)\".format(\n                    '='.join([info, ','.join(annotation)]), \n                    info,\n                    number\n                ))\n    elif number == 'A':\n        if len(annotation) != len(alternatives):\n            raise SyntaxError(\"Info field {0} has the wrong \"\\\n            \"number of entrys according to the vcf header.\"\\\n            \"Vcf header specifies {1} should have {2} entry(s)\".format(\n                    '='.join([info, ','.join(annotation)]), \n                    info,\n                    number\n            ))\n    elif number == 'R':\n        if len(annotation) != (len(alternatives) + 1):\n            raise SyntaxError(\"Info field {0} has the wrong \"\\\n            \"number of entrys according to the vcf header.\"\\\n            \"Vcf header specifies {1} should have {2} entry(s)\".format(\n                    '='.join([info, ','.join(annotation)]), \n                    info,\n                    number\n            ))\n    elif number == 'G':\n        if len(annotation) != len(individuals):\n            raise SyntaxError(\"Info field {0} has the wrong \"\\\n            \"number of entrys according to the vcf header.\"\\\n            \"Vcf header specifies {1} should have {2} entry(s)\".format(\n                    '='.join([info, ','.join(annotation)]), \n                    info,\n                    number\n            ))\n    return True", "docstring": "Check if the info annotation corresponds to the metadata specification\n\nArguments:\nannotation (list): The annotation from the vcf file\ninfo (str): Name of the info field\nextra_info (dict): The metadata specification\nalternatives (list): A list with the alternative variants\nindividuals (list): a list with the individuals\n\nReturns:\nbool: If the annotation is correct or not", "source": "juraj-google-style"}
{"code": "def _gather_beams(nested, beam_indices, batch_size, new_beam_size):\n    batch_pos = (tf.range((batch_size * new_beam_size)) \n    batch_pos = tf.reshape(batch_pos, [batch_size, new_beam_size])\n    coordinates = tf.stack([batch_pos, beam_indices], axis=2)\n    return nest.map_structure((lambda state: tf.gather_nd(state, coordinates)), nested)", "docstring": "Gather beams from nested structure of tensors.\n\nEach tensor in nested represents a batch of beams, where beam refers to a\nsingle search state (beam search involves searching through multiple states\nin parallel).\n\nThis function is used to gather the top beams, specified by\nbeam_indices, from the nested tensors.\n\nArgs:\nnested: Nested structure (tensor, list, tuple or dict) containing tensors\nwith shape [batch_size, beam_size, ...].\nbeam_indices: int32 tensor with shape [batch_size, new_beam_size]. Each\nvalue in beam_indices must be between [0, beam_size), and are not\nnecessarily unique.\nbatch_size: int size of batch\nnew_beam_size: int number of beams to be pulled from the nested tensors.\n\nReturns:\nNested structure containing tensors with shape\n[batch_size, new_beam_size, ...]", "source": "codesearchnet"}
{"code": "def hook(self, function, event, dependencies):\n    if (event is None):\n        for e in self._events.keys():\n            self.hook(function, e, dependencies)\n        return\n    if ((not isinstance(event, str)) and isinstance(event, Iterable)):\n        for e in event:\n            self.hook(function, e, dependencies)\n        return\n    event_list = self._events.get(event, None)\n    if (event_list is None):\n        raise NameError((\"Invalid key provided '%s'. Valid options: %s\" % (event, ', '.join(self._events.keys()))))\n        return\n    return event_list.hook(function, dependencies)", "docstring": "Tries to load the hook to the event\n\nArgs:\nfunction (func): Function that will be called when the event is called\n\nKwargs:\ndependencies (str): String or Iterable with modules whose hooks should be called before this one\n\nRaises:\nNameError\n\nNote that the dependencies are module-wide, that means that if\n`parent.foo` and `parent.bar` are both subscribed to `example` event\nand `child` enumerates `parent` as dependcy, **both** `foo` and `bar`\nmust be called in order for the dependcy to get resolved.", "source": "codesearchnet"}
{"code": "def __new__(cls, input_array, tol=1e-3):\n        \n        obj = super().__new__(cls, input_array, check_rank=3)\n        if not (obj - np.transpose(obj, (0, 2, 1)) < tol).all():\n            warnings.warn(\"Input piezo tensor does \"\n                          \"not satisfy standard symmetries\")\n        return obj.view(cls)", "docstring": "Create an PiezoTensor object.  The constructor throws an error if\nthe shape of the input_matrix argument is not 3x3x3, i. e. in true\ntensor notation. Note that the constructor uses __new__ rather than\n__init__ according to the standard method of subclassing numpy\nndarrays.\n\nArgs:\ninput_matrix (3x3x3 array-like): the 3x6 array-like\nrepresenting the piezo tensor", "source": "juraj-google-style"}
{"code": "def names(self):\n    result = []\n    for (key, value) in self.iteritems():\n        if (value & self.bitmask):\n            result.append(key)\n    return result", "docstring": "List of selected enum names.\n\nReturns:\nlist: Enum names.", "source": "codesearchnet"}
{"code": "def exp2(x):\n    if any_symbolic_tensors((x,)):\n        return Exp2().symbolic_call(x)\n    return backend.numpy.exp2(x)", "docstring": "Calculate the base-2 exponential of all elements in the input tensor.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput tensor, element-wise base-2 exponential of `x`.", "source": "github-repos"}
{"code": "def grid(self, force_rerun=False):\n        \n        log.debug('{}: running grid maker...'.format(self.id))\n\n        if not self.receptormol2_path or not self.box_path:\n            return ValueError('Please run protein_only_and_noH and showbox')\n\n        gridscript = op.join(self.dock_dir, \"{}_grid.in\".format(self.id))\n        out_name = op.join(self.dock_dir, \"{}_grid.out\".format(self.id))\n\n        if ssbio.utils.force_rerun(flag=force_rerun, outfile=out_name):\n            with open(gridscript, \"w\") as f:\n                grid_text = .format(op.basename(self.receptormol2_path), op.basename(self.box_path), self.amb_file, self.id)\n\n                f.write(grid_text)\n\n            os.chdir(self.dock_dir)\n            cmd = \"grid -i {} -o {}\".format(op.basename(gridscript), op.basename(out_name))\n            os.system(cmd)\n\n        if ssbio.utils.is_non_zero_file(out_name):\n            self.grid_path = out_name\n            log.debug('{}: successful grid creation'.format(self.grid_path))\n        else:\n            log.critical('{}: grid failed to run on receptor + box file'.format(self.box_path))", "docstring": "Create the scoring grid within the dummy box.\n\nArgs:\nforce_rerun (bool): If method should be rerun even if output file exists", "source": "juraj-google-style"}
{"code": "def from_bank_code(cls, country_code, bank_code):\n    try:\n        return cls(registry.get('bank_code')[(country_code, bank_code)]['bic'])\n    except KeyError:\n        raise ValueError('Invalid bank code {!r} for country {!r}'.format(bank_code, country_code))", "docstring": "Create a new BIC object from country- and bank-code.\n\nExamples:\n>>> bic = BIC.from_bank_code('DE', '20070000')\n>>> bic.country_code\n'DE'\n>>> bic.bank_code\n'DEUT'\n>>> bic.location_code\n'HH'\n\n>>> BIC.from_bank_code('DE', '01010101')\nTraceback (most recent call last):\n...\nValueError: Invalid bank code '01010101' for country 'DE'\n\n\nArgs:\ncountry_code (str): ISO 3166 alpha2 country-code.\nbank_code (str): Country specific bank-code.\n\nReturns:\nBIC: a BIC object generated from the given country code and bank code.\n\nRaises:\nValueError: If the given bank code wasn't found in the registry\n\nNote:\nThis currently only works for German bank-codes.", "source": "codesearchnet"}
{"code": "def add_topic(self, topic):\n        \n        if topic in self._topics:\n            return Future().success(set(self._topics))\n\n        self._topics.add(topic)\n        return self.cluster.request_update()", "docstring": "Add a topic to the list of topics tracked via metadata.\n\nArguments:\ntopic (str): topic to track\n\nReturns:\nFuture: resolves after metadata request/response", "source": "juraj-google-style"}
{"code": "def sum(vari, axis=None):\n    if isinstance(vari, Poly):\n        core = vari.A.copy()\n        for key in vari.keys:\n            core[key] = sum(core[key], axis)\n        return Poly(core, vari.dim, None, vari.dtype)\n    return np.sum(vari, axis)", "docstring": "Sum the components of a shapeable quantity along a given axis.\n\nArgs:\nvari (chaospy.poly.base.Poly, numpy.ndarray):\nInput data.\naxis (int):\nAxis over which the sum is taken. By default ``axis`` is None, and\nall elements are summed.\n\nReturns:\n(chaospy.poly.base.Poly, numpy.ndarray):\nPolynomial array with same shape as ``vari``, with the specified\naxis removed. If ``vari`` is an 0-d array, or ``axis`` is None,\na (non-iterable) component is returned.\n\nExamples:\n>>> vari = cp.prange(3)\n>>> print(vari)\n[1, q0, q0^2]\n>>> print(cp.sum(vari))\nq0^2+q0+1", "source": "codesearchnet"}
{"code": "def load(self, source, mode='create', source_format='csv', csv_options=None, ignore_unknown_values=False, max_bad_records=0):\n    job = self.load_async(source, mode=mode, source_format=source_format, csv_options=csv_options, ignore_unknown_values=ignore_unknown_values, max_bad_records=max_bad_records)\n    if (job is not None):\n        job.wait()\n    return job", "docstring": "Load the table from GCS.\n\nArgs:\nsource: the URL of the source objects(s). Can include a wildcard '*' at the end of the item\nname. Can be a single source or a list.\nmode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the\ntable does not already exist, while 'create' will fail if it does. The default is\n'create'. If 'create' the schema will be inferred if necessary.\nsource_format: the format of the data, 'csv' or 'json'; default 'csv'.\ncsv_options: if source format is 'csv', additional options as a CSVOptions object.\nignore_unknown_values: if True, accept rows that contain values that do not match the schema;\nthe unknown values are ignored (default False).\nmax_bad_records: the maximum number of bad records that are allowed (and ignored) before\nreturning an 'invalid' error in the Job result (default 0).\n\nReturns:\nA Job object for the completed load Job if it was started successfully; else None.", "source": "codesearchnet"}
{"code": "def add_response(self, req, resp):\n        \n        if self._cache is None:\n            return\n        signature = sign(req.checkRequest)\n        with self._cache as c:\n            now = self._timer()\n            quota_scale = 0  \n            item = c.get(signature)\n            if item is None:\n                c[signature] = CachedItem(\n                    resp, self.service_name, now, quota_scale)\n            else:\n                \n                item.last_check_time = now\n                item.response = resp\n                item.quota_scale = quota_scale\n                item.is_flushing = False\n                c[signature] = item", "docstring": "Adds the response from sending to `req` to this instance's cache.\n\nArgs:\nreq (`ServicecontrolServicesCheckRequest`): the request\nresp (CheckResponse): the response from sending the request", "source": "juraj-google-style"}
{"code": "def fill_slot(self, filler_pipeline_key, slot, value):\n    if (not isinstance(filler_pipeline_key, db.Key)):\n        filler_pipeline_key = db.Key(filler_pipeline_key)\n    if _TEST_MODE:\n        slot._set_value_test(filler_pipeline_key, value)\n    else:\n        encoded_value = json.dumps(value, sort_keys=True, cls=mr_util.JsonEncoder)\n        value_text = None\n        value_blob = None\n        if (len(encoded_value) <= _MAX_JSON_SIZE):\n            value_text = db.Text(encoded_value)\n        else:\n            value_blob = _write_json_blob(encoded_value, filler_pipeline_key.name())\n\n        def txn():\n            slot_record = db.get(slot.key)\n            if (slot_record is None):\n                raise UnexpectedPipelineError(('Tried to fill missing slot \"%s\" by pipeline ID \"%s\" with value: %r' % (slot.key, filler_pipeline_key.name(), value)))\n            slot_record.filler = filler_pipeline_key\n            slot_record.value_text = value_text\n            slot_record.value_blob = value_blob\n            slot_record.status = _SlotRecord.FILLED\n            slot_record.fill_time = self._gettime()\n            slot_record.put()\n            task = taskqueue.Task(url=self.barrier_handler_path, params=dict(slot_key=slot.key, use_barrier_indexes=True), headers={'X-Ae-Slot-Key': slot.key, 'X-Ae-Filler-Pipeline-Key': filler_pipeline_key})\n            task.add(queue_name=self.queue_name, transactional=True)\n        db.run_in_transaction_options(db.create_transaction_options(propagation=db.ALLOWED), txn)\n    self.session_filled_output_names.add(slot.name)", "docstring": "Fills a slot, enqueueing a task to trigger pending barriers.\n\nArgs:\nfiller_pipeline_key: db.Key or stringified key of the _PipelineRecord\nthat filled this slot.\nslot: The Slot instance to fill.\nvalue: The serializable value to assign.\n\nRaises:\nUnexpectedPipelineError if the _SlotRecord for the 'slot' could not\nbe found in the Datastore.", "source": "codesearchnet"}
{"code": "def run(self):\n        \n        while self.should_run:\n            try:\n                self.logger.debug('Sending heartbeat, seq ' + last_sequence)\n                self.ws.send(json.dumps({\n                    'op': 1,\n                    'd': last_sequence\n                }))\n            except Exception as e:\n                self.logger.error(f'Got error in heartbeat: {str(e)}')\n            finally:\n                elapsed = 0.0\n                while elapsed < self.interval and self.should_run:\n                    time.sleep(self.TICK_INTERVAL)\n                    elapsed += self.TICK_INTERVAL", "docstring": "Runs the thread\n\nThis method handles sending the heartbeat to the Discord websocket server, so the connection\ncan remain open and the bot remain online for those commands that require it to be.\n\nArgs:\nNone", "source": "juraj-google-style"}
{"code": "def bots(self):\n    json = self.skype.conn('GET', '{0}/agents'.format(SkypeConnection.API_BOT), auth=SkypeConnection.Auth.SkypeToken).json().get('agentDescriptions', [])\n    return [self.merge(SkypeBotUser.fromRaw(self.skype, raw)) for raw in json]", "docstring": "Retrieve a list of all known bots.\n\nReturns:\nSkypeBotUser list: resulting bot user objects", "source": "codesearchnet"}
{"code": "def _ParseCachedEntryVista(self, value_data, cached_entry_offset):\n    try:\n        cached_entry = self._ReadStructureFromByteStream(value_data[cached_entry_offset:], cached_entry_offset, self._cached_entry_data_type_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse cached entry value with error: {0!s}'.format(exception))\n    path_size = cached_entry.path_size\n    maximum_path_size = cached_entry.maximum_path_size\n    path_offset = cached_entry.path_offset\n    if ((path_offset > 0) and (path_size > 0)):\n        path_size += path_offset\n        maximum_path_size += path_offset\n        try:\n            path = value_data[path_offset:path_size].decode('utf-16-le')\n        except UnicodeDecodeError:\n            raise errors.ParseError('Unable to decode cached entry path to string')\n    cached_entry_object = AppCompatCacheCachedEntry()\n    cached_entry_object.cached_entry_size = self._cached_entry_data_type_map.GetByteSize()\n    cached_entry_object.insertion_flags = cached_entry.insertion_flags\n    cached_entry_object.last_modification_time = cached_entry.last_modification_time\n    cached_entry_object.path = path\n    cached_entry_object.shim_flags = cached_entry.shim_flags\n    return cached_entry_object", "docstring": "Parses a Windows Vista cached entry.\n\nArgs:\nvalue_data (bytes): value data.\ncached_entry_offset (int): offset of the first cached entry data\nrelative to the start of the value data.\n\nReturns:\nAppCompatCacheCachedEntry: cached entry.\n\nRaises:\nParseError: if the value data could not be parsed.", "source": "codesearchnet"}
{"code": "def Copy(self):\n    result = QueueManager(store=self.data_store, token=self.token)\n    result.prev_frozen_timestamps = self.prev_frozen_timestamps\n    result.frozen_timestamp = self.frozen_timestamp\n    return result", "docstring": "Return a copy of the queue manager.\n\nReturns:\nCopy of the QueueManager object.\nNOTE: pending writes/deletions are not copied. On the other hand, if the\noriginal object has a frozen timestamp, a copy will have it as well.", "source": "codesearchnet"}
{"code": "def _get_back_up_generator(frame_function, *args, **kwargs):\n    lines = next(frame_function(*args, **kwargs)).split('\\n')\n    width = len(lines[0])\n    height = len(lines)\n    if (height == 1):\n        return util.BACKSPACE_GEN(width)\n    return util.BACKLINE_GEN(height)", "docstring": "Create a generator for the provided animation function that backs up\nthe cursor after a frame. Assumes that the animation function provides\na generator that yields strings of constant width and height.\n\nArgs:\nframe_function: A function that returns a FrameGenerator.\nargs: Arguments for frame_function.\nkwargs: Keyword arguments for frame_function.\nReturns:\na generator that generates backspace/backline characters for\nthe animation func generator.", "source": "codesearchnet"}
{"code": "def parse_global_args(argv):\n    \n\n    parser = create_parser()\n    args = parser.parse_args(argv)\n\n    should_log = args.include or args.exclude or (args.verbose > 0)\n    verbosity = args.verbose\n\n    root = logging.getLogger()\n\n    if should_log:\n        formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname).3s %(name)s %(message)s',\n                                      '%y-%m-%d %H:%M:%S')\n        if args.logfile:\n            handler = logging.FileHandler(args.logfile)\n        else:\n            handler = logging.StreamHandler()\n\n        handler.setFormatter(formatter)\n\n        if args.include and args.exclude:\n            print(\"You cannot combine whitelisted (-i) and blacklisted (-e) loggers, you must use one or the other.\")\n            sys.exit(1)\n\n        loglevels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]\n        if verbosity >= len(loglevels):\n            verbosity = len(loglevels) - 1\n\n        level = loglevels[verbosity]\n\n        if args.include:\n            for name in args.include:\n                logger = logging.getLogger(name)\n                logger.setLevel(level)\n                logger.addHandler(handler)\n\n            root.addHandler(logging.NullHandler())\n        else:\n            \n            for name in args.exclude:\n                logger = logging.getLogger(name)\n                logger.disabled = True\n\n            root.setLevel(level)\n            root.addHandler(handler)\n    else:\n        root.addHandler(logging.NullHandler())\n\n    return args", "docstring": "Parse all global iotile tool arguments.\n\nAny flag based argument at the start of the command line is considered as\na global flag and parsed.  The first non flag argument starts the commands\nthat are passed to the underlying hierarchical shell.\n\nArgs:\nargv (list): The command line for this command\n\nReturns:\nNamespace: The parsed arguments, with all of the commands that should\nbe executed in an iotile shell as the attribute 'commands'", "source": "juraj-google-style"}
{"code": "def tf_broadcast(*args):\n    if len(args) <= 1:\n        return args\n    sh = array_ops.shape(args[0])\n    for arg in args[1:]:\n        sh = array_ops.broadcast_dynamic_shape(sh, array_ops.shape(arg))\n    return [array_ops.broadcast_to(arg, sh) for arg in args]", "docstring": "Broadcast tensors.\n\nArgs:\n*args: a list of tensors whose shapes are broadcastable against each other.\n\nReturns:\nTensors broadcasted to the common shape.", "source": "github-repos"}
{"code": "def Spearman(poly, dist, sample=10000, retall=False, **kws):\n    samples = dist.sample(sample, **kws)\n    poly = polynomials.flatten(poly)\n    Y = poly(*samples)\n    if retall:\n        return spearmanr(Y.T)\n    return spearmanr(Y.T)[0]", "docstring": "Calculate Spearman's rank-order correlation coefficient.\n\nArgs:\npoly (Poly):\nPolynomial of interest.\ndist (Dist):\nDefines the space where correlation is taken.\nsample (int):\nNumber of samples used in estimation.\nretall (bool):\nIf true, return p-value as well.\n\nReturns:\n(float, numpy.ndarray):\nCorrelation output ``rho``. Of type float if two-dimensional problem.\nCorreleation matrix if larger.\n(float, numpy.ndarray):\nThe two-sided p-value for a hypothesis test whose null hypothesis\nis that two sets of data are uncorrelated, has same dimension as\n``rho``.", "source": "codesearchnet"}
{"code": "def CompileReport(self, mediator):\n    \n    \n    lines_of_text = []\n    if self._output_format == 'yaml':\n      lines_of_text.append(\n          yaml.safe_dump_all(self._service_collection.services))\n    else:\n      lines_of_text.append('Listing Windows Services')\n      for service in self._service_collection.services:\n        lines_of_text.append(self._FormatServiceText(service))\n        lines_of_text.append('')\n\n    lines_of_text.append('')\n    report_text = '\\n'.join(lines_of_text)\n    return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)", "docstring": "Compiles an analysis report.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between analysis\nplugins and other components, such as storage and dfvfs.\n\nReturns:\nAnalysisReport: report.", "source": "juraj-google-style"}
{"code": "def parse_timing(self, nids=None):\n        \n        \n        paths = [task.output_file.path for task in self.iflat_tasks(nids=nids)]\n\n        \n        from .abitimer import AbinitTimerParser\n        parser = AbinitTimerParser()\n        read_ok = parser.parse(paths)\n        if read_ok:\n            return parser\n        return None", "docstring": "Parse the timer data in the main output file(s) of Abinit.\nRequires timopt /= 0 in the input file (usually timopt = -1)\n\nArgs:\nnids: optional list of node identifiers used to filter the tasks.\n\nReturn: :class:`AbinitTimerParser` instance, None if error.", "source": "juraj-google-style"}
{"code": "def log_images(self, name, images, step=None):\n        \n        if isinstance(images, six.string_types):\n            raise TypeError('\"images\" should be a list of ndarrays, got {}'\n                            .format(type(images)))\n\n        self._check_step(step)\n        tf_name = self._ensure_tf_name(name)\n\n        summary = self._image_summary(tf_name, images, step=step)\n        self._log_summary(tf_name, summary, images, step=step)", "docstring": "Log new images for given name on given step.\n\nArgs:\nname (str): name of the variable (it will be converted to a valid\ntensorflow summary name).\nimages (list): list of images to visualize\nstep (int): non-negative integer used for visualization", "source": "juraj-google-style"}
{"code": "def _string_from_ip_int(cls, ip_int):\n        \n        return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]\n                                    if isinstance(b, bytes)\n                                    else b)\n                        for b in _compat_to_bytes(ip_int, 4, 'big'))", "docstring": "Turns a 32-bit integer into dotted decimal notation.\n\nArgs:\nip_int: An integer, the IP address.\n\nReturns:\nThe IP address as a string in dotted decimal notation.", "source": "juraj-google-style"}
{"code": "def easeInElastic(n, amplitude=1, period=0.3):\n    \n    _checkRange(n)\n    return 1 - easeOutElastic(1-n, amplitude=amplitude, period=period)", "docstring": "An elastic tween function that begins with an increasing wobble and then snaps into the destination.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "juraj-google-style"}
{"code": "def _configure_from_mapping(self, item, whitelist_keys=False, whitelist=None):\n    if (whitelist is None):\n        whitelist = self.config.keys()\n    if whitelist_keys:\n        item = {k: v for (k, v) in item.items() if (k in whitelist)}\n    self.config.from_mapping(item)\n    return self", "docstring": "Configure from a mapping, or dict, like object.\n\nArgs:\nitem (dict):\nA dict-like object that we can pluck values from.\n\nKeyword Args:\nwhitelist_keys (bool):\nShould we whitelist the keys before adding them to the\nconfiguration? If no whitelist is provided, we use the\npre-existing config keys as a whitelist.\nwhitelist (list[str]):\nAn explicit list of keys that should be allowed. If provided\nand ``whitelist_keys`` is true, we will use that as our\nwhitelist instead of pre-existing app config keys.\n\nReturns:\nfleaker.App:\nReturns itself.", "source": "codesearchnet"}
{"code": "def get_subject_with_local_validation(jwt_bu64, cert_obj):\n    try:\n        jwt_dict = validate_and_decode(jwt_bu64, cert_obj)\n    except JwtException as e:\n        return log_jwt_bu64_info(logging.error, str(e), jwt_bu64)\n    try:\n        return jwt_dict['sub']\n    except LookupError:\n        log_jwt_dict_info(logging.error, 'Missing \"sub\" key', jwt_dict)", "docstring": "Validate the JWT and return the subject it contains.\n\n- The JWT is validated by checking that it was signed with a CN certificate.\n- The returned subject can be trusted for authz and authn operations.\n\n- Possible validation errors include:\n\n- A trusted (TLS/SSL) connection could not be made to the CN holding the\nsigning certificate.\n- The JWT could not be decoded.\n- The JWT signature signature was invalid.\n- The JWT claim set contains invalid \"Not Before\" or \"Expiration Time\" claims.\n\nArgs:\njwt_bu64: bytes\nThe JWT encoded using a a URL safe flavor of Base64.\n\ncert_obj: cryptography.Certificate\nPublic certificate used for signing the JWT (typically the CN cert).\n\nReturns:\n- On successful validation, the subject contained in the JWT is returned.\n\n- If validation fails for any reason, errors are logged and None is returned.", "source": "codesearchnet"}
{"code": "def group(self, group_id):\n    self._validate_group_id(group_id)\n    return self._Context(self, group_id)", "docstring": "Enter a context where the lock is with group `group_id`.\n\nArgs:\ngroup_id: The group for which to acquire and release the lock.\n\nReturns:\nA context manager which will acquire the lock for `group_id`.", "source": "github-repos"}
{"code": "def _next_dna(self, dna: Optional['DNA']=None) -> Optional['DNA']:\n    if dna is None:\n        return DNA(self.min_value)\n    raise NotImplementedError('`next_dna` is not supported on `Float` yet.')", "docstring": "Returns the next DNA in the space represented by this spec.\n\nArgs:\ndna: The DNA whose next will be returned. If None, `next_dna` will return\nthe first DNA.\n\nReturns:\nThe next DNA or None if there is no next DNA.", "source": "github-repos"}
{"code": "def get_stored_variation(self, experiment, user_profile):\n    \n\n    user_id = user_profile.user_id\n    variation_id = user_profile.get_variation_for_experiment(experiment.id)\n\n    if variation_id:\n      variation = self.config.get_variation_from_id(experiment.key, variation_id)\n      if variation:\n        self.logger.info('Found a stored decision. User \"%s\" is in variation \"%s\" of experiment \"%s\".' % (\n          user_id,\n          variation.key,\n          experiment.key\n        ))\n        return variation\n\n    return None", "docstring": "Determine if the user has a stored variation available for the given experiment and return that.\n\nArgs:\nexperiment: Object representing the experiment for which user is to be bucketed.\nuser_profile: UserProfile object representing the user's profile.\n\nReturns:\nVariation if available. None otherwise.", "source": "juraj-google-style"}
{"code": "def roll50(msg):\n    \n    d = hex2bin(data(msg))\n\n    if d[0] == '0':\n        return None\n\n    sign = int(d[1])    \n    value = bin2int(d[2:11])\n\n    if sign:\n        value = value - 512\n\n    angle = value * 45.0 / 256.0    \n    return round(angle, 1)", "docstring": "Roll angle, BDS 5,0 message\n\nArgs:\nmsg (String): 28 bytes hexadecimal message (BDS50) string\n\nReturns:\nfloat: angle in degrees,\nnegative->left wing down, positive->right wing down", "source": "juraj-google-style"}
{"code": "def LessThanOrEqualTo(self, value):\n    \n    self._awql = self._CreateSingleValueCondition(value, '<=')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"less than or equal to.\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "juraj-google-style"}
{"code": "def delete(self, key):\n        \n        data = None\n        if key is not None:\n            data = self.db.delete(key.strip())\n        else:\n            self.tcex.log.warning(u'The key field was None.')\n        return data", "docstring": "Delete method of CRUD operation for all data types.\n\nArgs:\nkey (string): The variable to write to the DB.\n\nReturns:\n(string): Result of DB write.", "source": "juraj-google-style"}
{"code": "def parse_uniprot_xml_metadata(sr):\n    xref_dbs_to_keep = ['GO', 'KEGG', 'PDB', 'PROSITE', 'Pfam', 'RefSeq']\n    infodict = {}\n    infodict['alt_uniprots'] = list(set(sr.annotations['accessions']).difference([sr.id]))\n    infodict['gene_name'] = None\n    if ('gene_name_primary' in sr.annotations):\n        infodict['gene_name'] = sr.annotations['gene_name_primary']\n    infodict['description'] = sr.description\n    infodict['taxonomy'] = None\n    if ('organism' in sr.annotations):\n        infodict['taxonomy'] = sr.annotations['organism']\n    infodict['seq_version'] = sr.annotations['sequence_version']\n    infodict['seq_date'] = sr.annotations['sequence_modified']\n    infodict['entry_version'] = sr.annotations['version']\n    infodict['entry_date'] = sr.annotations['modified']\n    tmp = defaultdict(list)\n    for xref in sr.dbxrefs:\n        database = xref.split(':', 1)[0]\n        xrefs = xref.split(':', 1)[(- 1)]\n        if (database in xref_dbs_to_keep):\n            if (database == 'PDB'):\n                tmp['pdbs'].append(xrefs)\n            else:\n                tmp[database.lower()].append(xrefs)\n    infodict.update(tmp)\n    return infodict", "docstring": "Load relevant attributes and dbxrefs from a parsed UniProt XML file in a SeqRecord.\n\nReturns:\ndict: All parsed information", "source": "codesearchnet"}
{"code": "def make_acro(past, prefix, s):\n\n    def _make_acro(s, t=0):\n        'Make an acronym of s for trial t'\n        v = ['a', 'e', 'i', 'o', 'u', 'y']\n        c = [chr(x) for x in six_xrange(ord('a'), (ord('z') + 1)) if (chr(x) not in v)]\n        s = re.sub('\\\\W+', '', s.lower())\n        vx = [x for x in s if (x in v)]\n        cx = [x for x in s if (x in c)]\n        if s.startswith('Mc'):\n            if (t < 1):\n                return ('Mc' + v[0])\n            if (t < 2):\n                return ('Mc' + c[0])\n        if (s[0] in v):\n            if (t < 1):\n                return ((vx[0] + cx[0]) + cx[1])\n            if (t < 2):\n                return ((vx[0] + vx[1]) + cx[0])\n        if ((s[0] in c) and (s[1] in c)):\n            if (t < 1):\n                return ((cx[0] + cx[1]) + vx[0])\n            if (t < 2):\n                return ((cx[0] + cx[1]) + cx[2])\n        if (t < 3):\n            return ((cx[0] + vx[0]) + cx[1])\n        if (t < 4):\n            return ((cx[0] + cx[1]) + cx[2])\n        if (t < 5):\n            return ((cx[0] + vx[0]) + vx[1])\n        if (t < 6):\n            return ((cx[0] + cx[1]) + cx[(- 1)])\n        if (t < 7):\n            return s[0:3]\n        if (t < 8):\n            return s[1:4]\n        if (t < 9):\n            return s[2:5]\n        if (t < 10):\n            return s[3:6]\n        return None\n    for t in six_xrange(11):\n        try:\n            a = _make_acro(s, t)\n            if (a is not None):\n                if prefix:\n                    aps = (prefix + a)\n                else:\n                    aps = a\n                if (aps not in past):\n                    past.add(aps)\n                    return a\n        except IndexError:\n            pass\n    raise Exception('Could not get acronym.')", "docstring": "Create a three letter acronym from the input string s.\n\nArgs:\npast: A set object, for storing acronyms that have already been created\nprefix: A prefix added to the acronym before storing in the set\ns: The string to create the acronym from.", "source": "codesearchnet"}
{"code": "def _clean_query_string(q):\n    q = q.replace('()', '').strip()\n    if q.endswith('('):\n        q = q[:(- 1)].strip()\n    if ((q[(- 3):] == 'AND') or (q[(- 3):] == 'NOT')):\n        q = q[:(- 3)]\n    elif (q[(- 2):] == 'OR'):\n        q = q[:(- 2)]\n    while (q.count('(') > q.count(')')):\n        q += ')'\n    while (q.count(')') > q.count('(')):\n        q = ('(' + q)\n    return q.strip()", "docstring": "Clean up a query string for searching.\n\nRemoves unmatched parentheses and joining operators.\n\nArguments:\nq (str): Query string to be cleaned\n\nReturns:\nstr: The clean query string.", "source": "codesearchnet"}
{"code": "def _non_slot_variables(self):\n    return self._non_slot_dict.values()", "docstring": "Additional variables created by the `Optimizer`.\n\nReturns:\nA list or tuple of variables.", "source": "github-repos"}
{"code": "def handle_discovery_request(self, path, request, start_response):\n    \n    if path == self._GET_REST_API:\n      return self._get_rest_doc(request, start_response)\n    elif path == self._GET_RPC_API:\n      error_msg = ('RPC format documents are no longer supported with the '\n                   'Endpoints Framework for Python. Please use the REST '\n                   'format.')\n      _logger.error('%s', error_msg)\n      return util.send_wsgi_error_response(error_msg, start_response)\n    elif path == self._LIST_API:\n      return self._list(request, start_response)\n    return False", "docstring": "Returns the result of a discovery service request.\n\nThis calls start_response and returns the response body.\n\nArgs:\npath: A string containing the API path (the portion of the path\nafter /_ah/api/).\nrequest: An ApiRequest, the transformed request sent to the Discovery API.\nstart_response: A function with semantics defined in PEP-333.\n\nReturns:\nThe response body.  Or returns False if the request wasn't handled by\nDiscoveryService.", "source": "juraj-google-style"}
{"code": "def generate(data, iterations=1000, force_strength=5.0, dampening=0.01, max_velocity=2.0, max_distance=50, is_3d=True):\n    edges = [{'source': s, 'target': t} for (s, t) in data]\n    nodes = force_directed_layout.run(edges, iterations, force_strength, dampening, max_velocity, max_distance, is_3d)\n    return {'edges': edges, 'nodes': nodes}", "docstring": "Runs a force-directed algorithm on a graph, returning a data structure.\n\nArgs:\ndata: An adjacency list of tuples (ie. [(1,2),...])\niterations: (Optional) Number of FDL iterations to run in coordinate\ngeneration\nforce_strength: (Optional) Strength of Coulomb and Hooke forces\n(edit this to scale the distance between nodes)\ndampening: (Optional) Multiplier to reduce force applied to nodes\nmax_velocity: (Optional) Maximum distance a node can move in one step\nmax_distance: (Optional) The maximum inter-node distance considered\nis_3d: (Optional) Generates three-dimensional coordinates\n\nOutputs a json-serializable Python object. To visualize, pass the output to\n`jgraph.draw(...)`.", "source": "codesearchnet"}
{"code": "def register(cls, name: str, plugin: Type[ConnectionPlugin]) -> None:\n        \n        existing_plugin = cls.available.get(name)\n        if existing_plugin is None:\n            cls.available[name] = plugin\n        elif existing_plugin != plugin:\n            raise ConnectionPluginAlreadyRegistered(\n                f\"Connection plugin {plugin.__name__} can't be registered as \"\n                f\"{name!r} because plugin {existing_plugin.__name__} \"\n                f\"was already registered under this name\"\n            )", "docstring": "Registers a connection plugin with a specified name\n\nArgs:\nname: name of the connection plugin to register\nplugin: defined connection plugin class\n\nRaises:\n:obj:`nornir.core.exceptions.ConnectionPluginAlreadyRegistered` if\nanother plugin with the specified name was already registered", "source": "juraj-google-style"}
{"code": "def str2dict(str_in):\n    \n    dict_out = safe_eval(str_in)\n    if not isinstance(dict_out, dict):\n        dict_out = None\n    return dict_out", "docstring": "Extracts a dict from a string.\n\nArgs:\nstr_in (string) that contains python dict\nReturns:\n(dict) or None if no valid dict was found\nRaises:\n-", "source": "juraj-google-style"}
{"code": "def stop_stream_capturer(self, address):\n    address = str(address)\n    if (address not in self._stream_capturers):\n        raise ValueError('Capturer address does not match a managed capturer')\n    stream_cap = self._stream_capturers[address]\n    self._pool.killone(stream_cap[1])\n    del self._stream_capturers[address]", "docstring": "Stop a capturer that the manager controls.\n\nArgs:\naddress:\nAn address array of the form ['host', 'port'] or similar\ndepending on the connection type of the stream capturer being\nterminated. The capturer for the address will be terminated\nalong with all handlers for that capturer if the address is\nthat of a managed capturer.\n\nRaises:\nValueError:\nThe provided address doesn't match a capturer that is\ncurrently managed.", "source": "codesearchnet"}
{"code": "def run(self, copy_to_current_on_exit=False, site_property=None):\n    scratch = tempfile.gettempdir()\n    with ScratchDir(scratch, copy_to_current_on_exit=copy_to_current_on_exit) as scratch_dir:\n        self._write_input(input_dir=scratch_dir)\n        packmol_input = open(os.path.join(scratch_dir, self.input_file), 'r')\n        p = Popen(self.packmol_bin, stdin=packmol_input, stdout=PIPE, stderr=PIPE)\n        (stdout, stderr) = p.communicate()\n        output_file = os.path.join(scratch_dir, self.control_params['output'])\n        if os.path.isfile(output_file):\n            packed_mol = BabelMolAdaptor.from_file(output_file, self.control_params['filetype'])\n            packed_mol = packed_mol.pymatgen_mol\n            print('packed molecule written to {}'.format(self.control_params['output']))\n            if site_property:\n                packed_mol = self.restore_site_properties(site_property=site_property, filename=output_file)\n            return packed_mol\n        else:\n            print('Packmol execution failed')\n            print(stdout, stderr)\n            return None", "docstring": "Write the input file to the scratch directory, run packmol and return\nthe packed molecule.\n\nArgs:\ncopy_to_current_on_exit (bool): Whether or not to copy the packmol\ninput/output files from the scratch directory to the current\ndirectory.\nsite_property (str): if set then the specified site property\nfor the the final packed molecule will be restored.\n\nReturns:\nMolecule object", "source": "codesearchnet"}
{"code": "def cluster_info(cpu, cfg):\n    \n    cpus = cpu.cpu_count\n    pods_per_core = cfg.doc.find(\"pods-per-core\")\n    pods_per_core_int = int(pods_per_core.value) if pods_per_core else PODS_PER_CORE\n    cfg_max_pods = cfg.doc.find(\"max-pods\")\n    cfg_max_pods_int = int(cfg_max_pods.value) if cfg_max_pods else MAX_PODS\n    calc_max_pods = cpus * pods_per_core_int\n\n    return {\n        \"cpu_count\": cpus,\n        \"pods_per_core\": pods_per_core_int,\n        \"pods_per_core_customized\": bool(pods_per_core),\n        \"max_pods\": min(cfg_max_pods_int, calc_max_pods),\n        \"max_pods_customized\": bool(cfg_max_pods)\n    }", "docstring": "Collects fact for each host\n\nCollects the cpu and node configuration facts to be used by the rule.\n\nArguments:\ncpu (CpuInfo): Parser object for the cpu info.\ncfg (NodeConfig): Parser object for the node configuration.\n\nReturns:\ndict: Dictionary of fact information including the keys\n``cpu_count``, ``pods_per_core_int``, ``pods_per_core_customized``,\n``max_pods``, and ``max_pods_customized``.", "source": "juraj-google-style"}
{"code": "def get_boards(board_name_list, *args, **kwargs):\n    \n    if isinstance(board_name_list, basestring):\n        board_name_list = board_name_list.split()\n    return [Board(name, *args, **kwargs) for name in board_name_list]", "docstring": "Given a list of boards, return :class:`basc_py4chan.Board` objects.\n\nArgs:\nboard_name_list (list): List of board names to get, eg: ['b', 'tg']\n\nReturns:\ndict of :class:`basc_py4chan.Board`: Requested boards.", "source": "juraj-google-style"}
{"code": "def __field_to_parameter_type(self, field):\n    \n    \n    variant = field.variant\n    if variant == messages.Variant.MESSAGE:\n      raise TypeError('A message variant can\\'t be used in a parameter.')\n\n    custom_variant_map = {\n        messages.Variant.SINT32: 'int32',\n        messages.Variant.SINT64: 'int64',\n        messages.Variant.BOOL: 'boolean',\n        messages.Variant.ENUM: 'string',\n    }\n    return custom_variant_map.get(variant) or variant.name.lower()", "docstring": "Converts the field variant type into a string describing the parameter.\n\nArgs:\nfield: An instance of a subclass of messages.Field.\n\nReturns:\nA string corresponding to the variant enum of the field, with a few\nexceptions. In the case of signed ints, the 's' is dropped; for the BOOL\nvariant, 'boolean' is used; and for the ENUM variant, 'string' is used.\n\nRaises:\nTypeError: if the field variant is a message variant.", "source": "juraj-google-style"}
{"code": "def gaussian_bags_of_words(Y, vocab=vocab1k, sigma=1, bag_size=[25, 50], **kwargs):\n\n    def make_distribution(sigma, num_words):\n        p = abs(np.random.normal(0, sigma, num_words))\n        return (p / sum(p))\n    num_words = len(vocab)\n    word_dists = {y: make_distribution(sigma, num_words) for y in set(Y)}\n    bag_sizes = np.random.choice(range(min(bag_size), max(bag_size)), len(Y))\n    X = []\n    items = []\n    for (i, (y, length)) in enumerate(zip(Y, bag_sizes)):\n        x = torch.from_numpy(np.random.choice(num_words, length, p=word_dists[y]))\n        X.append(x)\n        items.append(' '.join((vocab[j] for j in x)))\n    return (X, items)", "docstring": "Generate Gaussian bags of words based on label assignments\n\nArgs:\nY: np.array of true labels\nsigma: (float) the standard deviation of the Gaussian distributions\nbag_size: (list) the min and max length of bags of words\n\nReturns:\nX: (Tensor) a tensor of indices representing tokens\nD: (list) a list of sentences (strings)\n\nThe sentences are conditionally independent, given a label.\nNote that technically we use a half-normal distribution here because we\ntake the absolute value of the normal distribution.\n\nExample:\nTBD", "source": "codesearchnet"}
{"code": "def apply_transformation(self, structure):\n    sga = SpacegroupAnalyzer(structure, symprec=self.symprec, angle_tolerance=self.angle_tolerance)\n    return sga.get_conventional_standard_structure(international_monoclinic=self.international_monoclinic)", "docstring": "Returns most primitive cell for structure.\n\nArgs:\nstructure: A structure\n\nReturns:\nThe same structure in a conventional standard setting", "source": "codesearchnet"}
{"code": "def matmul(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:\n    out = math_ops.matmul(input_tensor, random_tensor_gen_fn((2, 3)))\n    out = math_ops.matmul(out, random_tensor_gen_fn((3, 4)))\n    return {'output': out}", "docstring": "Performs a matrix multiplication.\n\nArgs:\ninput_tensor: Input tensor to matmul with the filter.\n\nReturns:\nA 'output' -> output tensor mapping", "source": "github-repos"}
{"code": "def parameter_combinations(test_parameters: Sequence[Mapping[str, Sequence[Any]]]) -> Sequence[Mapping[str, Any]]:\n    real_parameters = []\n    for parameters in test_parameters:\n        keys = parameters.keys()\n        for curr in itertools.product(*parameters.values()):\n            real_parameters.append(dict(zip(keys, curr)))\n    return real_parameters", "docstring": "Generate all combinations of test parameters.\n\nArgs:\ntest_parameters: List of dictionaries that maps parameter keys and values.\n\nReturns:\nreal_parameters: All possible combinations of the parameters as list of\ndictionaries.", "source": "github-repos"}
{"code": "def get_rml_processors(es_defs):\n    \n    proc_defs = es_defs.get(\"kds_esRmlProcessor\", [])\n    if proc_defs:\n        new_defs = []\n        for proc in proc_defs:\n            params = proc['kds_rmlProcessorParams'][0]\n            proc_kwargs = {}\n            if params.get(\"kds_rtn_format\"):\n                proc_kwargs[\"rtn_format\"] = params.get(\"kds_rtn_format\")[0]\n            new_def = dict(name=proc['rdfs_label'][0],\n                           subj=params[\"kds_subjectKwarg\"][0],\n                           proc_kwargs=proc_kwargs,\n                           force=proc.get('kds_forceNested',[False])[0],\n                           processor=CFG.rml.get_processor(\\\n                                proc['rdfs_label'][0],\n                                proc['kds_esRmlMapping'],\n                                proc['rdf_type'][0]))\n            new_defs.append(new_def)\n        es_defs['kds_esRmlProcessor'] = new_defs\n    return es_defs", "docstring": "Returns the es_defs with the instaniated rml_processor\n\nArgs:\n-----\nes_defs: the rdf_class elacticsearch defnitions\ncls_name: the name of the tied class", "source": "juraj-google-style"}
{"code": "def Completions(component, verbose=False):\n    if inspect.isroutine(component) or inspect.isclass(component):\n        spec = inspectutils.GetFullArgSpec(component)\n        return _CompletionsFromArgs(spec.args + spec.kwonlyargs)\n    if isinstance(component, (tuple, list)):\n        return [str(index) for index in range(len(component))]\n    if inspect.isgenerator(component):\n        return []\n    return [_FormatForCommand(member_name) for member_name, _ in VisibleMembers(component, verbose=verbose)]", "docstring": "Gives possible Fire command completions for the component.\n\nA completion is a string that can be appended to a command to continue that\ncommand. These are used for TAB-completions in Bash for Fire CLIs.\n\nArgs:\ncomponent: The component whose completions to list.\nverbose: Whether to include all completions, even private members.\nReturns:\nA list of completions for a command that would so far return the component.", "source": "github-repos"}
{"code": "def multiple(layer: int, limit: int) -> Set[str]:\n    return {str(x).zfill(2) for x in [(2 ** x) for x in range(limit)] if ((x % (2 ** (layer - 1))) == 0)}", "docstring": "Returns a set of strings to be used as Slots with Pabianas default Clock.\n\nArgs:\nlayer: The layer in the hierarchy this Area is placed in.\nTechnically, the number specifies how many of the Clocks signals are relevant to the Area.\nBetween 1 and limit.\nlimit: The number of layers of the hierarchy.", "source": "codesearchnet"}
{"code": "def generate_secret_file(file_path, pattern, service, environment, clients):\n  \n  changed = False\n  with open(file_path) as json_file:\n    data = json.load(json_file, object_pairs_hook=OrderedDict)\n    try:\n      for key, value in data[\"params\"][environment].items():\n        if pattern in key:\n          if \"aws:kms:decrypt\" in value:\n            print(\"Found match, key {} but value is encrypted already; skipping...\".format(key))\n          else:\n            print(\"Found match, encrypting key {}\".format(key))\n            encrypted_password = ef_utils.kms_encrypt(clients['kms'], service, environment, value)\n            data[\"params\"][environment][key] = format_secret(encrypted_password)\n            changed = True\n    except KeyError:\n      ef_utils.fail(\"Error env: {} does not exist in parameters file\".format(environment))\n\n  if changed:\n    with open(file_path, \"w\") as encrypted_file:\n      json.dump(data, encrypted_file, indent=2, separators=(',', ': '))\n      \n      encrypted_file.write(\"\\n\")", "docstring": "Generate a parameter files with it's secrets encrypted in KMS\nArgs:\nfile_path (string): Path to the parameter file to be encrypted\npattern (string): Pattern to do fuzzy string matching\nservice (string): Service to use KMS key to encrypt file\nenvironment (string): Environment to encrypt values\nclients (dict): KMS AWS client that has been instantiated\nReturns:\nNone\nRaises:\nIOError: If the file does not exist", "source": "juraj-google-style"}
{"code": "def get_without(self, fragments, use_lookup=None):\n    if (use_lookup is None):\n        use_lookup = settings['defaults']['use_lookup']\n    if pd.api.types.is_list_like(fragments):\n        for fragment in fragments:\n            try:\n                index_of_all_fragments |= fragment.index\n            except NameError:\n                index_of_all_fragments = fragment.index\n    else:\n        index_of_all_fragments = fragments.index\n    missing_part = self.loc[self.index.difference(index_of_all_fragments)]\n    missing_part = missing_part.fragmentate(use_lookup=use_lookup)\n    return sorted(missing_part, key=len, reverse=True)", "docstring": "Return self without the specified fragments.\n\nArgs:\nfragments: Either a list of :class:`~chemcoord.Cartesian` or a\n:class:`~chemcoord.Cartesian`.\nuse_lookup (bool): Use a lookup variable for\n:meth:`~chemcoord.Cartesian.get_bonds`. The default is\nspecified in ``settings['defaults']['use_lookup']``\n\nReturns:\nlist: List containing :class:`~chemcoord.Cartesian`.", "source": "codesearchnet"}
{"code": "def _md5sum(file_path):\n    \n    md5 = hashlib.md5()\n\n    with open(file_path, \"rb\") as md5_file:\n        while True:\n            data = md5_file.read(1024 * 1024 * 4)\n            if not data:\n                break\n            md5.update(data)\n\n    return md5.digest()", "docstring": "Helper function that builds and md5sum from a file in chunks.\n\nArgs:\nfile_path: The path to the file you want an md5sum for.\n\nReturns:\nA string containing an md5sum.", "source": "juraj-google-style"}
{"code": "def set_ocha_url(cls, url=None):\n        \n        \n        if url is None:\n            url = cls._ochaurl_int\n        cls._ochaurl = url", "docstring": "Set World Bank url from which to retrieve countries data\n\nArgs:\nurl (str): World Bank url from which to retrieve countries data. Defaults to internal value.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def update(self, current, values=None, finalize=None):\n    if finalize is None:\n        if self.target is None:\n            finalize = False\n        else:\n            finalize = current >= self.target\n    values = values or []\n    for k, v in values:\n        if k not in self._values_order:\n            self._values_order.append(k)\n        if k not in self.stateful_metrics:\n            if finalize:\n                self._values[k] = [v, 1]\n            else:\n                value_base = max(current - self._seen_so_far, 1)\n                if k not in self._values:\n                    self._values[k] = [v * value_base, value_base]\n                else:\n                    self._values[k][0] += v * value_base\n                    self._values[k][1] += value_base\n        else:\n            self._values[k] = [v, 1]\n    self._seen_so_far = current\n    message = ''\n    special_char_len = 0\n    now = time.time()\n    time_per_unit = self._estimate_step_duration(current, now)\n    if self.verbose == 1:\n        if now - self._last_update < self.interval and (not finalize):\n            return\n        if self._dynamic_display:\n            message += '\\x08' * self._prev_total_width\n            message += '\\r'\n        else:\n            message += '\\n'\n        if self.target is not None:\n            numdigits = int(math.log10(self.target)) + 1\n            bar = ('%' + str(numdigits) + 'd/%d') % (current, self.target)\n            bar = f'\\x1b[1m{bar}\\x1b[0m '\n            special_char_len += 8\n            prog = float(current) / self.target\n            prog_width = int(self.width * prog)\n            if prog_width > 0:\n                bar += '\\x1b[32m' + '━' * prog_width + '\\x1b[0m'\n                special_char_len += 9\n            bar += '\\x1b[37m' + '━' * (self.width - prog_width) + '\\x1b[0m'\n            special_char_len += 9\n        else:\n            bar = '%7d/Unknown' % current\n        message += bar\n        if self.target is not None and (not finalize):\n            eta = time_per_unit * (self.target - current)\n            if eta > 3600:\n                eta_format = '%d:%02d:%02d' % (eta \n            elif eta > 60:\n                eta_format = '%d:%02d' % (eta \n            else:\n                eta_format = '%ds' % eta\n            info = f' \\x1b[1m{eta_format}\\x1b[0m'\n        else:\n            info = f' \\x1b[1m{now - self._start:.0f}s\\x1b[0m'\n        special_char_len += 8\n        info += self._format_time(time_per_unit, self.unit_name)\n        for k in self._values_order:\n            info += f' - {k}:'\n            if isinstance(self._values[k], list):\n                avg = backend.convert_to_numpy(backend.numpy.mean(self._values[k][0] / max(1, self._values[k][1])))\n                avg = float(avg)\n                if abs(avg) > 0.001:\n                    info += f' {avg:.4f}'\n                else:\n                    info += f' {avg:.4e}'\n            else:\n                info += f' {self._values[k]}'\n        message += info\n        total_width = len(bar) + len(info) - special_char_len\n        if self._prev_total_width > total_width:\n            message += ' ' * (self._prev_total_width - total_width)\n        if finalize:\n            message += '\\n'\n        io_utils.print_msg(message, line_break=False)\n        self._prev_total_width = total_width\n        message = ''\n    elif self.verbose == 2:\n        if finalize:\n            numdigits = int(math.log10(self.target)) + 1\n            count = ('%' + str(numdigits) + 'd/%d') % (current, self.target)\n            info = f'{count} - {now - self._start:.0f}s'\n            info += ' -' + self._format_time(time_per_unit, self.unit_name)\n            for k in self._values_order:\n                info += f' - {k}:'\n                avg = backend.convert_to_numpy(backend.numpy.mean(self._values[k][0] / max(1, self._values[k][1])))\n                if avg > 0.001:\n                    info += f' {avg:.4f}'\n                else:\n                    info += f' {avg:.4e}'\n            info += '\\n'\n            message += info\n            io_utils.print_msg(message, line_break=False)\n            message = ''\n    self._last_update = now", "docstring": "Updates the progress bar.\n\nArgs:\ncurrent: Index of current step.\nvalues: List of tuples: `(name, value_for_last_step)`. If `name` is\nin `stateful_metrics`, `value_for_last_step` will be displayed\nas-is. Else, an average of the metric over time will be\ndisplayed.\nfinalize: Whether this is the last update for the progress bar. If\n`None`, defaults to `current >= self.target`.", "source": "github-repos"}
{"code": "def draw_line(self, x1, y1, x2, y2, color):\n    check_int_err(lib.lineRGBA(self._ptr, x1, y1, x2, y2, color[0], color[1], color[2], color[3]))", "docstring": "Draw a line.\n\nArgs:\nx1 (int): The x coordinate of the start of the line.\ny1 (int): The y coordinate of the start of the line.\nx2 (int): The x coordinate of the end of the line.\ny2 (int): The y coordinate of the end of the line.\ncolor (Tuple[int, int, int, int]): The color of the circle.\n\nRaises:\nSDLError: If an error is encountered.", "source": "codesearchnet"}
{"code": "def _GetNumberOfDaysInCentury(self, year):\n    \n    if year < 0:\n      raise ValueError('Year value out of bounds.')\n\n    year, _ = divmod(year, 100)\n\n    if self._IsLeapYear(year):\n      return 36525\n    return 36524", "docstring": "Retrieves the number of days in a century.\n\nArgs:\nyear (int): year in the century e.g. 1970.\n\nReturns:\nint: number of (remaining) days in the century.\n\nRaises:\nValueError: if the year value is out of bounds.", "source": "juraj-google-style"}
{"code": "def get(self):\n    try:\n        item = self._queue.get_nowait()\n    except (Empty, PersistEmpty):\n        return None\n    if self._persistence_path:\n        self._queue.task_done()\n    return item", "docstring": "Gets a single item from the queue and returns it. If the queue is empty, this method will return None.\n\nReturns:\n:class:`contracts.Envelope`. a telemetry envelope object or None if the queue is empty.", "source": "codesearchnet"}
{"code": "def loop_until_timeout_or_valid(timeout_s, function, validation_fn, sleep_s=1):\n    if ((timeout_s is None) or (not hasattr(timeout_s, 'has_expired'))):\n        timeout_s = PolledTimeout(timeout_s)\n    while True:\n        result = function()\n        if (validation_fn(result) or timeout_s.has_expired()):\n            return result\n        time.sleep(sleep_s)", "docstring": "Loops until the specified function returns valid or a timeout is reached.\n\nNote: The function may return anything which, when passed to validation_fn,\nevaluates to implicit True.  This function will loop calling the function as\nlong as the result of validation_fn(function_result) returns something which\nevaluates to False. We ensure function is called at least once regardless\nof timeout.\n\nArgs:\ntimeout_s: The number of seconds to wait until a timeout condition is\nreached. As a convenience, this accepts None to mean never timeout.  Can\nalso be passed a PolledTimeout object instead of an integer.\nfunction: The function to call each iteration.\nvalidation_fn: The validation function called on the function result to\ndetermine whether to keep looping.\nsleep_s: The number of seconds to wait after calling the function.\n\nReturns:\nWhatever the function returned last.", "source": "codesearchnet"}
{"code": "def fit(self, volumes, energies):\n        \n        eos_fit = self.model(np.array(volumes), np.array(energies))\n        eos_fit.fit()\n        return eos_fit", "docstring": "Fit energies as function of volumes.\n\nArgs:\nvolumes (list/np.array)\nenergies (list/np.array)\n\nReturns:\nEOSBase: EOSBase object", "source": "juraj-google-style"}
{"code": "def get_params_from_sqlalchemy_url(db_url):\n    result = urlsplit(db_url)\n    return {'database': result.path[1:], 'host': result.hostname, 'port': result.port, 'username': result.username, 'password': result.password, 'driver': result.scheme}", "docstring": "Gets PostgreSQL database connection parameters from SQLAlchemy url\n\nArgs:\ndb_url (str): SQLAlchemy url\n\nReturns:\nDict[str,Any]: Dictionary of database connection parameters", "source": "codesearchnet"}
{"code": "def heightmap_get_minmax(hm: np.ndarray) -> Tuple[float, float]:\n    \n    mi = ffi.new(\"float *\")\n    ma = ffi.new(\"float *\")\n    lib.TCOD_heightmap_get_minmax(_heightmap_cdata(hm), mi, ma)\n    return mi[0], ma[0]", "docstring": "Return the min and max values of this heightmap.\n\nArgs:\nhm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.\n\nReturns:\nTuple[float, float]: The (min, max) values.\n\n.. deprecated:: 2.0\nUse ``hm.min()`` or ``hm.max()`` instead.", "source": "juraj-google-style"}
{"code": "def get_tag_hash(self, tag_name):\n    tag_object = get_single_item_from_sequence(sequence=self._github_repository.tags(), condition=(lambda tag: (tag.name == tag_name)), no_item_error_message='No tag \"{}\" exist'.format(tag_name), too_many_item_error_message='Too many tags \"{}\" found'.format(tag_name))\n    return tag_object.commit.sha", "docstring": "Fetch the commit hash that was tagged with ``tag_name``.\n\nArgs:\ntag_name (str): the name of the tag\n\nReturns:\nstr: the commit hash linked by the tag", "source": "codesearchnet"}
{"code": "def preprocess_model(self, model: 'PreTrainedModel', **kwargs):\n    model.is_quantized = True\n    model.quantization_method = self.quantization_config.quant_method\n    if self.pre_quantized:\n        self._convert_model_for_quantization(model)\n    return self._process_model_before_weight_loading(model, **kwargs)", "docstring": "Setting model attributes and/or converting model before weights loading. At this point\nthe model should be initialized on the meta device so you can freely manipulate the skeleton\nof the model in order to replace modules in-place. Make sure to override the abstract method `_process_model_before_weight_loading`.\n\nArgs:\nmodel (`~transformers.PreTrainedModel`):\nThe model to quantize\nkwargs (`dict`, *optional*):\nThe keyword arguments that are passed along `_process_model_before_weight_loading`.", "source": "github-repos"}
{"code": "def random_new_from_seed(\n    seed: Hashable, algo: int = RNG_CMWC\n) -> tcod.random.Random:\n    \n    return tcod.random.Random(algo, seed)", "docstring": "Return a new Random instance.  Using the given ``seed`` and ``algo``.\n\nArgs:\nseed (Hashable): The RNG seed.  Should be a 32-bit integer, but any\nhashable object is accepted.\nalgo (int): The random number algorithm to use.\n\nReturns:\nRandom: A new Random instance using the given algorithm.", "source": "juraj-google-style"}
{"code": "def _ip_int_from_string(self, ip_str):\n        \n        octets = ip_str.split('.')\n        if len(octets) != 4:\n            raise AddressValueError(ip_str)\n\n        packed_ip = 0\n        for oc in octets:\n            try:\n                packed_ip = (packed_ip << 8) | self._parse_octet(oc)\n            except ValueError:\n                raise AddressValueError(ip_str)\n        return packed_ip", "docstring": "Turn the given IP string into an integer for comparison.\n\nArgs:\nip_str: A string, the IP ip_str.\n\nReturns:\nThe IP ip_str as an integer.\n\nRaises:\nAddressValueError: if ip_str isn't a valid IPv4 Address.", "source": "juraj-google-style"}
{"code": "def verify(self, obj):\n        \n\n        if not isinstance(obj, float):\n            raise ValidationError(\"Object is not a float\", reason='object is not a float', object=obj)\n\n        return obj", "docstring": "Verify that the object conforms to this verifier's schema.\n\nArgs:\nobj (object): A python object to verify\n\nRaises:\nValidationError: If there is a problem verifying the dictionary, a\nValidationError is thrown with at least the reason key set indicating\nthe reason for the lack of validation.", "source": "juraj-google-style"}
{"code": "def put(self, file_path, upload_path = ''):\n        \n        f = open(file_path, \"r\")\n        c = f.read()\n\n        file_name = os.path.basename(file_path)\n\n        now = datetime.datetime.now().isoformat()\n        url = nurls['put'] + upload_path + file_name\n\n        headers = {'userid': self.user_id,\n                   'useridx': self.useridx,\n                   'MODIFYDATE': now,\n                   'Content-Type': magic.from_file(file_path, mime=True),\n                   'charset': 'UTF-8',\n                   'Origin': 'http:\n        }\n        r = self.session.put(url = url, data = c, headers = headers)\n\n        return self.resultManager(r.text)", "docstring": "PUT\n\nArgs:\nfile_path: Full path for a file you want to upload\nupload_path: Ndrive path where you want to upload file\nex) /Picture/\n\nReturns:\nTrue: Upload success\nFalse: Upload failed", "source": "juraj-google-style"}
{"code": "def _parse_impute2_line(self, line):\n        \n        \n        row = line.rstrip(\"\\r\\n\").split(\" \")\n\n        \n        prob = np.array(row[5:], dtype=float)\n        prob.shape = (prob.shape[0] \n\n        \n        dosage = 2 * prob[:, 2] + prob[:, 1]\n        if self.prob_t > 0:\n            dosage[~np.any(prob >= self.prob_t, axis=1)] = np.nan\n\n        return Genotypes(\n            Variant(row[1], CHROM_STR_ENCODE.get(row[0], row[0]), int(row[2]),\n                    [row[3], row[4]]),\n            dosage,\n            reference=row[3],\n            coded=row[4],\n            multiallelic=False,\n        )", "docstring": "Parses the current IMPUTE2 line (a single variant).\n\nArgs:\nline (str): An IMPUTE2 line.\n\nReturns:\nGenotypes: The genotype in dosage format.\n\nWarning\n=======\nBy default, the genotypes object has multiallelic set to False.", "source": "juraj-google-style"}
{"code": "def from_name(cls, name, *, queue=DefaultJobQueueName.Workflow, clear_data_store=True, arguments=None):\n    new_workflow = cls(queue=queue, clear_data_store=clear_data_store)\n    new_workflow.load(name, arguments=arguments)\n    return new_workflow", "docstring": "Create a workflow object from a workflow script.\n\nArgs:\nname (str): The name of the workflow script.\nqueue (str): Name of the queue the workflow should be scheduled to.\nclear_data_store (bool): Remove any documents created during the workflow\nrun in the data store after the run.\narguments (dict): Dictionary of additional arguments that are ingested\ninto the data store prior to the execution of the workflow.\n\nReturns:\nWorkflow: A fully initialised workflow object", "source": "codesearchnet"}
{"code": "def VerifyStructure(self, parser_mediator, lines):\n    if self._VERIFICATION_REGEX.match(lines):\n        return True\n    return False", "docstring": "Verifies whether content corresponds to a Zsh extended_history file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nlines (str): one or more lines from the text file.\n\nReturns:\nbool: True if the line was successfully parsed.", "source": "codesearchnet"}
{"code": "def visit_node(self, node):\n    raise NotImplementedError('Subclasses must implement this.')", "docstring": "Visitor function.\n\nArgs:\nnode: Node\n\nReturns:\nbool, whether the node should be revisited; subclasses can visit every\nreachable node exactly once by always returning False", "source": "github-repos"}
{"code": "def _subscribe_new(tensor, side_effects, control_cache):\n    update_input = []\n    for consumer_op in list(tensor.consumers()):\n        update_input.append((consumer_op, list(consumer_op.inputs).index(tensor)))\n    update_control_input = control_cache.get_control_outputs(tensor.op)\n    name_scope = tensor.op.name + '/subscription/'\n    with ops.name_scope(name_scope):\n        outs = []\n        for s in side_effects:\n            outs += s(tensor)\n        with ops.control_dependencies(outs):\n            out = array_ops.identity(tensor)\n    for consumer_op, index in update_input:\n        consumer_op._update_input(index, out)\n    for consumer_op in update_control_input:\n        new_control_inputs = consumer_op.control_inputs\n        if tensor.op in new_control_inputs:\n            new_control_inputs.remove(tensor.op)\n        new_control_inputs.append(out.op)\n        consumer_op._remove_all_control_inputs()\n        consumer_op._add_control_inputs(new_control_inputs)\n    return out", "docstring": "Helper method that subscribes a single tensor to a list of side_effects.\n\nArgs:\ntensor: `tf.Tensor`\nside_effects: List of side_effect functions see subscribe for details.\ncontrol_cache: `_ControlOutputCache` helper to get control_outputs faster.\n\nReturns:\nThe modified replacement to the passed in tensor which triggers the side\neffects.", "source": "github-repos"}
{"code": "def typed_dict_error(self, stack, obj, name):\n    if name:\n        err_msg = f'TypedDict {obj.class_name} does not contain key {name}'\n    else:\n        err_msg = f'TypedDict {obj.class_name} requires all keys to be constant strings'\n    self.error(stack, err_msg)", "docstring": "Accessing a nonexistent key in a typed dict.\n\nArgs:\nstack: the frame stack\nobj: the typed dict instance\nname: the key name", "source": "github-repos"}
{"code": "def _create_deployment_object(self, job_name, job_image,\n                                  deployment_name, port=80,\n                                  replicas=1,\n                                  cmd_string=None,\n                                  engine_json_file='~/.ipython/profile_default/security/ipcontroller-engine.json',\n                                  engine_dir='.'):\n        \n\n        \n        \n        security_context = None\n        if 'security' in self.config['execution']:\n            security_context = client.V1SecurityContext(run_as_group=self.group_id,\n                                                        run_as_user=self.user_id,\n                                                        run_as_non_root=self.run_as_non_root)\n            \n            \n            \n        \n        environment_vars = client.V1EnvVar(name=\"TEST\", value=\"SOME DATA\")\n\n        launch_args = [\"-c\", \"{0}; /app/deploy.sh;\".format(cmd_string)]\n        print(launch_args)\n\n        \n        container = None\n        if security_context:\n            container = client.V1Container(\n                name=job_name,\n                image=job_image,\n                ports=[client.V1ContainerPort(container_port=port)],\n                command=['/bin/bash'],\n                args=launch_args,\n                env=[environment_vars],\n                security_context=security_context)\n        else:\n            container = client.V1Container(\n                name=job_name,\n                image=job_image,\n                ports=[client.V1ContainerPort(container_port=port)],\n                command=['/bin/bash'],\n                args=launch_args,\n                env=[environment_vars])\n        \n        secret = None\n        if self.secret:\n            secret = client.V1LocalObjectReference(name=self.secret)\n\n        \n        template = client.V1PodTemplateSpec(\n            metadata=client.V1ObjectMeta(labels={\"app\": job_name}),\n            spec=client.V1PodSpec(containers=[container], image_pull_secrets=[secret]))\n\n        \n        spec = client.ExtensionsV1beta1DeploymentSpec(replicas=replicas,\n                                                      template=template)\n\n        \n        deployment = client.ExtensionsV1beta1Deployment(\n            api_version=\"extensions/v1beta1\",\n            kind=\"Deployment\",\n            metadata=client.V1ObjectMeta(name=deployment_name),\n            spec=spec)\n\n        return deployment", "docstring": "Create a kubernetes deployment for the job.\n\nArgs:\n- job_name (string) : Name of the job and deployment\n- job_image (string) : Docker image to launch\n\nKWargs:\n- port (integer) : Container port\n- replicas : Number of replica containers to maintain\n\nReturns:\n- True: The deployment object to launch", "source": "juraj-google-style"}
{"code": "def get_device(self, addr_or_name):\n        \n        if addr_or_name in self._devices:\n            return self._devices[addr_or_name]\n\n        for v in self._devices.values():\n            if v == addr_or_name:\n                return v\n\n        return None", "docstring": "Retrieve a device with a given address or name from the results.\n\nArgs:\naddr_or_name (str): a string containing either a BLE address in xx:xx:xx:xx:xx:xx\nformat, or a plain device name. The supplied value is checked as an address\nfirst and if that fails to produce a result, it is matched against each\nnamed device in the collection.\n\nReturns:\nThe first matching :class:`ScanResult` instance, or None.", "source": "juraj-google-style"}
{"code": "def validate_default_element(self, value):\n    if isinstance(value, (six.string_types, six.integer_types)):\n        if self.__type:\n            self.__type(value)\n        return value\n    return super(EnumField, self).validate_default_element(value)", "docstring": "Validate default element of Enum field.\n\nEnum fields allow for delayed resolution of default values\nwhen the type of the field has not been resolved. The default\nvalue of a field may be a string or an integer. If the Enum\ntype of the field has been resolved, the default value is\nvalidated against that type.\n\nArgs:\nvalue: Value to validate.\n\nRaises:\nValidationError if value is not expected message type.", "source": "codesearchnet"}
{"code": "def get_components(edges, vertices=None):\n    \n    if vertices is None:\n        vertices = set(chain(edges.ix[:, 0], edges.ix[:, 1]))\n\n    visited = set()\n    components = []\n\n    for id in vertices:\n        if id not in visited:\n            c = follow(id, edges)\n            visited.update(c)\n            components.append(c)\n\n    return components", "docstring": "Return connected components from graph determined by edges matrix\nArgs:\nedges: DataFrame of (undirected) edges.\nvertices: set of vertices in graph. Defaults to union of all vertices in edges.\n\nReturns:\nset of connected components, each of which is a set of vertices.", "source": "juraj-google-style"}
{"code": "def get_layer(self, name=None, index=None):\n    if index is not None and name is not None:\n        raise ValueError(f'Provide only a layer name or a layer index. Received: index={index}, name={name}.')\n    if index is not None:\n        if len(self.layers) <= index:\n            raise ValueError(f'Was asked to retrieve layer at index {index} but model only has {len(self.layers)} layers.')\n        else:\n            return self.layers[index]\n    if name is not None:\n        for layer in self.layers:\n            if layer.name == name:\n                return layer\n        raise ValueError(f'No such layer: {name}. Existing layers are: {list((layer.name for layer in self.layers))}.')\n    raise ValueError('Provide either a layer name or layer index at `get_layer`.')", "docstring": "Retrieves a layer based on either its name (unique) or index.\n\nIf `name` and `index` are both provided, `index` will take precedence.\nIndices are based on order of horizontal graph traversal (bottom-up).\n\nArgs:\nname: String, name of layer.\nindex: Integer, index of layer.\n\nReturns:\nA layer instance.", "source": "github-repos"}
{"code": "def port(self, check=False):\n        \n        if not self.__ports:  \n            self.refresh()\n\n        try:\n            port = self.__ports.pop()\n            if check:\n                while not self.__check_port(port):\n                    self.release_port(port)\n                    port = self.__ports.pop()\n        except (IndexError, KeyError):\n            raise IndexError(\"Could not find a free port,\\nclosed ports: {closed}\".format(closed=self.__closed))\n        self.__closed.add(port)\n        return port", "docstring": "return next opened port\nArgs:\ncheck - check is port realy free", "source": "juraj-google-style"}
{"code": "def get_cert_contents(kwargs):\n    \n    paths = {\n        \"certificate\": kwargs.get(\"path_to_certificate\"),\n        \"private_key\": kwargs.get(\"path_to_private_key\"),\n        \"chain\": kwargs.get(\"path_to_chain\"),\n    }\n\n    for key, value in paths.items():\n        if value is not None:\n            continue\n\n        path = input(\"Path to %s (skip): \" % (key,))\n        if path == \"skip\" or not path.strip():\n            continue\n\n        paths[key] = path\n\n    parameters = {\n        \"ServerCertificateName\": kwargs.get(\"cert_name\"),\n    }\n\n    for key, path in paths.items():\n        if not path:\n            continue\n\n        \n        try:\n            contents = path.read()\n        except AttributeError:\n            with open(utils.full_path(path)) as read_file:\n                contents = read_file.read()\n\n        if key == \"certificate\":\n            parameters[\"CertificateBody\"] = contents\n        elif key == \"private_key\":\n            parameters[\"PrivateKey\"] = contents\n        elif key == \"chain\":\n            parameters[\"CertificateChain\"] = contents\n\n    return parameters", "docstring": "Builds parameters with server cert file contents.\n\nArgs:\nkwargs(dict): The keyword args passed to ensure_server_cert_exists,\noptionally containing the paths to the cert, key and chain files.\n\nReturns:\ndict: A dictionary containing the appropriate parameters to supply to\nupload_server_certificate. An empty dictionary if there is a\nproblem.", "source": "juraj-google-style"}
{"code": "def _fromData(cls, header, tflags, data):\n    if (header.version >= header._V24):\n        if (tflags & (Frame.FLAG24_COMPRESS | Frame.FLAG24_DATALEN)):\n            datalen_bytes = data[:4]\n            data = data[4:]\n        if ((tflags & Frame.FLAG24_UNSYNCH) or header.f_unsynch):\n            try:\n                data = unsynch.decode(data)\n            except ValueError:\n                pass\n        if (tflags & Frame.FLAG24_ENCRYPT):\n            raise ID3EncryptionUnsupportedError\n        if (tflags & Frame.FLAG24_COMPRESS):\n            try:\n                data = zlib.decompress(data)\n            except zlib.error:\n                data = (datalen_bytes + data)\n                try:\n                    data = zlib.decompress(data)\n                except zlib.error as err:\n                    raise ID3JunkFrameError(('zlib: %s: %r' % (err, data)))\n    elif (header.version >= header._V23):\n        if (tflags & Frame.FLAG23_COMPRESS):\n            (usize,) = unpack('>L', data[:4])\n            data = data[4:]\n        if (tflags & Frame.FLAG23_ENCRYPT):\n            raise ID3EncryptionUnsupportedError\n        if (tflags & Frame.FLAG23_COMPRESS):\n            try:\n                data = zlib.decompress(data)\n            except zlib.error as err:\n                raise ID3JunkFrameError(('zlib: %s: %r' % (err, data)))\n    frame = cls()\n    frame._readData(header, data)\n    return frame", "docstring": "Construct this ID3 frame from raw string data.\n\nRaises:\n\nID3JunkFrameError in case parsing failed\nNotImplementedError in case parsing isn't implemented\nID3EncryptionUnsupportedError in case the frame is encrypted.", "source": "codesearchnet"}
{"code": "def TempDirPath(suffix = \"\", prefix = \"tmp\"):\n  \n  precondition.AssertType(suffix, Text)\n  precondition.AssertType(prefix, Text)\n\n  return tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=_TempRootPath())", "docstring": "Creates a temporary directory based on the environment configuration.\n\nThe directory will be placed in folder as specified by the `TEST_TMPDIR`\nenvironment variable if available or fallback to `Test.tmpdir` of the current\nconfiguration if not.\n\nArgs:\nsuffix: A suffix to end the directory name with.\nprefix: A prefix to begin the directory name with.\n\nReturns:\nAn absolute path to the created directory.", "source": "juraj-google-style"}
{"code": "def exe_cmd(*cmds):\n    cmd = ' '.join(cmds)\n    proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)\n    (out, err) = proc.communicate()\n    if (not err):\n        return out\n    return err", "docstring": "Executes commands in a new shell. Directing stderr to PIPE.\n\nThis is fastboot's own exe_cmd because of its peculiar way of writing\nnon-error info to stderr.\n\nArgs:\ncmds: A sequence of commands and arguments.\n\nReturns:\nThe output of the command run.\n\nRaises:\nException: An error occurred during the command execution.", "source": "codesearchnet"}
{"code": "def parse_example_tensor(examples, train_config, keep_target):\n  \n\n  csv_header = []\n  if keep_target:\n    csv_header = train_config['csv_header']\n  else:\n    csv_header = [name for name in train_config['csv_header']\n                  if name != train_config['target_column']]\n\n  \n  \n  record_defaults = [[train_config['csv_defaults'][name]]\n                     for name in csv_header]\n  tensors = tf.decode_csv(examples, record_defaults, name='csv_to_tensors')\n\n  \n  \n  tensors = [tf.expand_dims(x, axis=1) for x in tensors]\n\n  tensor_dict = dict(zip(csv_header, tensors))\n  return tensor_dict", "docstring": "Read the csv files.\n\nArgs:\nexamples: string tensor\ntrain_config: training config\nkeep_target: if true, the target column is expected to exist and it is\nreturned in the features dict.\n\nReturns:\nDict of feature_name to tensor. Target feature is in the dict.", "source": "juraj-google-style"}
{"code": "def list(self, request):\n    kwargs = {'Bucket': request.bucket, 'Prefix': request.prefix}\n    if request.continuation_token is not None:\n        kwargs['ContinuationToken'] = request.continuation_token\n    try:\n        boto_response = self.client.list_objects_v2(**kwargs)\n    except Exception as e:\n        raise messages.S3ClientError(str(e), get_http_error_code(e))\n    if boto_response['KeyCount'] == 0:\n        message = 'Tried to list nonexistent S3 path: s3:\n        raise messages.S3ClientError(message, 404)\n    items = [messages.Item(etag=content['ETag'], key=content['Key'], last_modified=content['LastModified'], size=content['Size']) for content in boto_response['Contents']]\n    try:\n        next_token = boto_response['NextContinuationToken']\n    except KeyError:\n        next_token = None\n    response = messages.ListResponse(items, next_token)\n    return response", "docstring": "Retrieves a list of objects matching the criteria.\n\nArgs:\nrequest: (ListRequest) input message\nReturns:\n(ListResponse) The response message.", "source": "github-repos"}
{"code": "def __init__(self, cell, residual_fn=None, **kwargs):\n    super(ResidualWrapperBase, self).__init__(cell, **kwargs)\n    self._residual_fn = residual_fn", "docstring": "Constructs a `ResidualWrapper` for `cell`.\n\nArgs:\ncell: An instance of `RNNCell`.\nresidual_fn: (Optional) The function to map raw cell inputs and raw cell\noutputs to the actual cell outputs of the residual network.\nDefaults to calling nest.map_structure on (lambda i, o: i + o), inputs\nand outputs.\n**kwargs: dict of keyword arguments for base layer.", "source": "github-repos"}
{"code": "def _getClassInstance(path, args=None):\n    if (not path.endswith('.py')):\n        return None\n    if (args is None):\n        args = {}\n    classname = AtomShieldsScanner._getClassName(path)\n    basename = os.path.basename(path).replace('.py', '')\n    sys.path.append(os.path.dirname(path))\n    try:\n        mod = __import__(basename, globals(), locals(), [classname], (- 1))\n        class_ = getattr(mod, classname)\n        instance = class_(**args)\n    except Exception as e:\n        AtomShieldsScanner._debug(('[!] %s' % e))\n        return None\n    finally:\n        sys.path.remove(os.path.dirname(path))\n    return instance", "docstring": "Returns a class instance from a .py file.\n\nArgs:\npath (str): Absolute path to .py file\nargs (dict): Arguments passed via class constructor\n\nReturns:\nobject: Class instance or None", "source": "codesearchnet"}
{"code": "def blit_rect(\n        self,\n        console: tcod.console.Console,\n        x: int,\n        y: int,\n        width: int,\n        height: int,\n        bg_blend: int,\n    ) -> None:\n        \n        lib.TCOD_image_blit_rect(\n            self.image_c, _console(console), x, y, width, height, bg_blend\n        )", "docstring": "Blit onto a Console without scaling or rotation.\n\nArgs:\nconsole (Console): Blit destination Console.\nx (int): Console tile X position starting from the left at 0.\ny (int): Console tile Y position starting from the top at 0.\nwidth (int): Use -1 for Image width.\nheight (int): Use -1 for Image height.\nbg_blend (int): Background blending mode to use.", "source": "juraj-google-style"}
{"code": "def segment_similarity(A, B, T=CLOSE_DISTANCE_THRESHOLD):\n    \n    l_a = len(A.points)\n    l_b = len(B.points)\n\n    idx = index.Index()\n    dex = 0\n    for i in range(l_a-1):\n        idx.insert(dex, bounding_box_from(A.points, i, i+1, T), obj=[A.points[i], A.points[i+1]])\n        dex = dex + 1\n\n    prox_acc = []\n\n    for i in range(l_b-1):\n        ti = B.points[i].gen2arr()\n        ti1 = B.points[i+1].gen2arr()\n        bb = bounding_box_from(B.points, i, i+1, T)\n        intersects = idx.intersection(bb, objects=True)\n        n_prox = []\n        i_prox = 0\n        a = 0\n        for x in intersects:\n            a = a + 1\n            pi = x.object[0].gen2arr()\n            pi1 = x.object[1].gen2arr()\n            prox = line_similarity(ti, ti1, pi, pi1, T)\n            i_prox = i_prox + prox\n            n_prox.append(prox)\n\n        if a != 0:\n            prox_acc.append(i_prox / a)\n            \n        else:\n            prox_acc.append(0)\n\n    return np.mean(prox_acc), prox_acc", "docstring": "Computes the similarity between two segments\n\nArgs:\nA (:obj:`Segment`)\nB (:obj:`Segment`)\nReturns:\nfloat: between 0 and 1. Where 1 is very similar and 0 is completely different", "source": "juraj-google-style"}
{"code": "def read_chunk_body(self):\n    bytes_left = self._bytes_left\n    if (bytes_left > 0):\n        size = min(bytes_left, self._read_size)\n        data = (yield from self._connection.read(size))\n        self._bytes_left -= len(data)\n        return (data, data)\n    elif (bytes_left < 0):\n        raise ProtocolError('Chunked-transfer overrun.')\n    elif bytes_left:\n        raise NetworkError('Connection closed.')\n    newline_data = (yield from self._connection.readline())\n    if (len(newline_data) > 2):\n        raise ProtocolError('Error reading newline after chunk.')\n    self._chunk_size = self._bytes_left = None\n    return (b'', newline_data)", "docstring": "Read a fragment of a single chunk.\n\nCall :meth:`read_chunk_header` first.\n\nReturns:\ntuple: 2-item tuple with the content data and raw data.\nFirst item is empty bytes string when chunk is fully read.\n\nCoroutine.", "source": "codesearchnet"}
{"code": "async def _on_report_notification(self, event):\n        \n\n        conn_string = event.get('connection_string')\n        report = self._report_parser.deserialize_report(event.get('serialized_report'))\n\n        self.notify_event(conn_string, 'report', report)", "docstring": "Callback function called when a report event is received.\n\nArgs:\nevent (dict): The report_event", "source": "juraj-google-style"}
{"code": "def validate_to_schema(nanopub, schema) -> Tuple[bool, List[Tuple[str, str]]]:\n    \n\n    v = jsonschema.Draft4Validator(schema)\n    messages = []\n    errors = sorted(v.iter_errors(nanopub), key=lambda e: e.path)\n    for error in errors:\n        for suberror in sorted(error.context, key=lambda e: e.schema_path):\n            print(list(suberror.schema_path), suberror.message, sep=\", \")\n            messages.append((\"ERROR\", suberror.message))\n\n    is_valid = True\n    if errors:\n        is_valid = False\n\n    return (is_valid, messages)", "docstring": "Validate nanopub against jsonschema for nanopub\n\nArgs:\nnanopub (Mapping[str, Any]): nanopub dict\nschema (Mapping[str, Any]): nanopub schema\n\nReturns:\nTuple[bool, List[str]]:\nbool: Is valid?  Yes = True, No = False\nList[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('Error|Warning', msg)\ne.g. [('ERROR', \"'subject' is a required property\")]", "source": "juraj-google-style"}
{"code": "def reshape(x, newshape):\n    if any_symbolic_tensors((x,)):\n        return Reshape(newshape).symbolic_call(x)\n    return backend.numpy.reshape(x, newshape)", "docstring": "Gives a new shape to a tensor without changing its data.\n\nArgs:\nx: Input tensor.\nnewshape: The new shape should be compatible with the original shape.\nOne shape dimension can be -1 in which case the value is\ninferred from the length of the array and remaining dimensions.\n\nReturns:\nThe reshaped tensor.", "source": "github-repos"}
{"code": "def squad_v2_f1(y_true: List[List[str]], y_predicted: List[str]) -> float:\n    \n    f1_total = 0.0\n    for ground_truth, prediction in zip(y_true, y_predicted):\n        prediction_tokens = normalize_answer(prediction).split()\n        f1s = []\n        for gt in ground_truth:\n            gt_tokens = normalize_answer(gt).split()\n            if len(gt_tokens) == 0 or len(prediction_tokens) == 0:\n                f1s.append(float(gt_tokens == prediction_tokens))\n                continue\n            common = Counter(prediction_tokens) & Counter(gt_tokens)\n            num_same = sum(common.values())\n            if num_same == 0:\n                f1s.append(0.0)\n                continue\n            precision = 1.0 * num_same / len(prediction_tokens)\n            recall = 1.0 * num_same / len(gt_tokens)\n            f1 = (2 * precision * recall) / (precision + recall)\n            f1s.append(f1)\n        f1_total += max(f1s)\n    return 100 * f1_total / len(y_true) if len(y_true) > 0 else 0", "docstring": "Calculates F-1 score between y_true and y_predicted\nF-1 score uses the best matching y_true answer\n\nThe same as in SQuAD-v2.0\n\nArgs:\ny_true: list of correct answers (correct answers are represented by list of strings)\ny_predicted: list of predicted answers\n\nReturns:\nF-1 score : float", "source": "juraj-google-style"}
{"code": "def with_target_audience(self, target_audience):\n    return self.__class__(self._signer, service_account_email=self._service_account_email, token_uri=self._token_uri, target_audience=target_audience, additional_claims=self._additional_claims.copy())", "docstring": "Create a copy of these credentials with the specified target\naudience.\n\nArgs:\ntarget_audience (str): The intended audience for these credentials,\nused when requesting the ID Token.\n\nReturns:\ngoogle.auth.service_account.IDTokenCredentials: A new credentials\ninstance.", "source": "codesearchnet"}
{"code": "def get_formatted_as_type(self, value, default=None, out_type=str):\n    if (value is None):\n        value = default\n    if isinstance(value, SpecialTagDirective):\n        result = value.get_value(self)\n        return types.cast_to_type(result, out_type)\n    if isinstance(value, str):\n        result = self.get_formatted_string(value)\n        result_type = type(result)\n        if (out_type is result_type):\n            return result\n        elif ((out_type is bool) and (result_type is str)):\n            return (result.lower() in ['true', '1', '1.0'])\n        else:\n            return out_type(result)\n    else:\n        return out_type(value)", "docstring": "Return formatted value for input value, returns as out_type.\n\nCaveat emptor: if out_type is bool and value a string,\nreturn will be True if str is 'True'. It will be False for all other\ncases.\n\nArgs:\nvalue: the value to format\ndefault: if value is None, set to this\nout_type: cast return as this type\n\nReturns:\nFormatted value of type out_type", "source": "codesearchnet"}
{"code": "def get_config_dir(program='', system_wide=False):\n    config_homes = []\n    if system_wide:\n        if (os.name == 'nt'):\n            config_homes.append(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'))\n        else:\n            config_homes.append('/etc')\n            config_homes.append('/etc/xdg')\n            if (os.name == 'darwin'):\n                config_homes.append('/Library')\n    elif (os.name == 'nt'):\n        import winreg\n        config_homes.append(winreg.ExpandEnvironmentStrings('%LOCALAPPDATA%'))\n        config_homes.append(os.path.join(winreg.ExpandEnvironmentStrings('%APPDATA%'), 'Roaming'))\n    elif os.getenv('XDG_CONFIG_HOME'):\n        config_homes.append(os.getenv('XDG_CONFIG_HOME'))\n    else:\n        try:\n            from xdg import BaseDirectory\n            config_homes.append(BaseDirectory.xdg_config_home)\n        except ImportError:\n            config_homes.append(os.path.expanduser('~/.config'))\n        config_homes.append(os.path.expanduser('~'))\n        if (os.name == 'darwin'):\n            config_homes.append(os.path.expanduser('~/Library'))\n    if program:\n\n        def __find_homes(app, dirs):\n            homes = []\n            for home in dirs:\n                if os.path.isdir(os.path.join(home, app)):\n                    homes.append(os.path.join(home, app))\n                if os.path.isdir(os.path.join(home, ('.' + app))):\n                    homes.append(os.path.join(home, ('.' + app)))\n                if os.path.isdir(os.path.join(home, (app + '.d'))):\n                    homes.append(os.path.join(home, (app + '.d')))\n            return homes\n        app_homes = __find_homes(program, config_homes)\n        if (program == 'vim'):\n            app_homes.extend(__find_homes('vimfiles', config_homes))\n        elif (program == 'chrome'):\n            app_homes.extend(__find_homes('google-chrome', config_homes))\n        elif (program in ['firefox', 'thunderbird']):\n            app_homes.extend(__find_homes(program, [os.path.expanduser('~/.mozilla')]))\n        return app_homes\n    return config_homes", "docstring": "Get the configuration directory.\n\nGet the configuration directories, optionally for a specific program.\n\nArgs:\nprogram\t(str) : The name of the program whose configuration directories have to be found.\nsystem_wide (bool): Gets the system-wide configuration directories.\n\nReturns:\nlist: A list of all matching configuration directories found.", "source": "codesearchnet"}
{"code": "def transform_log_prob_fn(log_prob_fn: PotentialFn, bijector: BijectorNest, init_state: State=None) -> Union[(PotentialFn, Tuple[(PotentialFn, State)])]:\n\n    def wrapper(*args):\n        'Transformed wrapper.'\n        bijector_ = bijector\n        args = tf.nest.map_structure((lambda x: (0.0 + x)), args)\n        if (len(args) == 1):\n            args = args[0]\n        elif isinstance(bijector_, list):\n            bijector_ = tuple(bijector_)\n        original_space_args = tf.nest.map_structure((lambda b, x: b.forward(x)), bijector_, args)\n        original_space_args = original_space_args\n        (original_space_log_prob, extra) = call_fn(log_prob_fn, original_space_args)\n        event_ndims = tf.nest.map_structure((lambda x: (tf.rank(x) - tf.rank(original_space_log_prob))), args)\n        return ((original_space_log_prob + sum(tf.nest.flatten(tf.nest.map_structure((lambda b, x, e: b.forward_log_det_jacobian(x, event_ndims=e)), bijector_, args, event_ndims)))), [original_space_args, extra])\n    if (init_state is None):\n        return wrapper\n    else:\n        return (wrapper, tf.nest.map_structure((lambda b, s: b.inverse(s)), bijector, init_state))", "docstring": "Transforms a log-prob function using a bijector.\n\nThis takes a log-prob function and creates a new log-prob function that now\ntakes takes state in the domain of the bijector, forward transforms that state\nand calls the original log-prob function. It then returns the log-probability\nthat correctly accounts for this transformation.\n\nThe forward-transformed state is pre-pended to the original log-prob\nfunction's extra returns and returned as the new extra return.\n\nFor convenience you can also pass the initial state (in the original space),\nand this function will return the inverse transformed as the 2nd return value.\nYou'd use this to initialize MCMC operators that operate in the transformed\nspace.\n\nArgs:\nlog_prob_fn: Log prob fn.\nbijector: Bijector(s), must be of the same structure as the `log_prob_fn`\ninputs.\ninit_state: Initial state, in the original space.\n\nReturns:\ntransformed_log_prob_fn: Transformed log prob fn.\ntransformed_init_state: If `init_state` is provided. Initial state in the\ntransformed space.", "source": "codesearchnet"}
{"code": "def path_get_destination(p: tcod.path.AStar) -> Tuple[(int, int)]:\n    x = ffi.new('int *')\n    y = ffi.new('int *')\n    lib.TCOD_path_get_destination(p._path_c, x, y)\n    return (x[0], y[0])", "docstring": "Get the current destination position.\n\nArgs:\np (AStar): An AStar instance.\nReturns:\nTuple[int, int]: An (x, y) point.", "source": "codesearchnet"}
{"code": "def apply_grad_cartesian_tensor(grad_X, zmat_dist):\n    columns = ['bond', 'angle', 'dihedral']\n    C_dist = zmat_dist.loc[(:, columns)].values.T\n    try:\n        C_dist = C_dist.astype('f8')\n        C_dist[([1, 2], :)] = np.radians(C_dist[([1, 2], :)])\n    except (TypeError, AttributeError):\n        C_dist[([1, 2], :)] = sympy.rad(C_dist[([1, 2], :)])\n    cart_dist = np.tensordot(grad_X, C_dist, axes=([3, 2], [0, 1])).T\n    from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian\n    return Cartesian(atoms=zmat_dist['atom'], coords=cart_dist, index=zmat_dist.index)", "docstring": "Apply the gradient for transformation to cartesian space onto zmat_dist.\n\nArgs:\ngrad_X (:class:`numpy.ndarray`): A ``(3, n, n, 3)`` array.\nThe mathematical details of the index layout is explained in\n:meth:`~chemcoord.Cartesian.get_grad_zmat()`.\nzmat_dist (:class:`~chemcoord.Zmat`):\nDistortions in Zmatrix space.\n\nReturns:\n:class:`~chemcoord.Cartesian`: Distortions in cartesian space.", "source": "codesearchnet"}
{"code": "def clone_source_dir(source_dir, dest_dir):\n    if os.path.isdir(dest_dir):\n        print('removing', dest_dir)\n        shutil.rmtree(dest_dir)\n    shutil.copytree(source_dir, dest_dir)", "docstring": "Copies the source Protobuf files into a build directory.\n\nArgs:\nsource_dir (str): source directory of the Protobuf files\ndest_dir (str): destination directory of the Protobuf files", "source": "codesearchnet"}
{"code": "def on_predict_batch_end(self, batch, logs=None):", "docstring": "Called at the end of a batch in `predict` methods.\n\nSubclasses should override for any actions to run.\n\nNote that if the `steps_per_execution` argument to `compile` in\n`Model` is set to `N`, this method will only be called every\n`N` batches.\n\nArgs:\nbatch: Integer, index of batch within the current epoch.\nlogs: Dict. Aggregated metric results up until this batch.", "source": "github-repos"}
{"code": "def process_git_configs(git_short=''):\n    LOG.info('Processing application.json files from GitLab \"%s\".', git_short)\n    file_lookup = FileLookup(git_short=git_short)\n    app_configs = process_configs(file_lookup, (RUNWAY_BASE_PATH + '/application-master-{env}.json'), (RUNWAY_BASE_PATH + '/pipeline.json'))\n    commit_obj = file_lookup.project.commits.get('master')\n    config_commit = commit_obj.attributes['id']\n    LOG.info('Commit ID used: %s', config_commit)\n    app_configs['pipeline']['config_commit'] = config_commit\n    return app_configs", "docstring": "Retrieve _application.json_ files from GitLab.\n\nArgs:\ngit_short (str): Short Git representation of repository, e.g.\nforrest/core.\n\nReturns:\ncollections.defaultdict: Configurations stored for each environment\nfound.", "source": "codesearchnet"}
{"code": "def replace_with_aqlm_linear(model, quantization_config=None, linear_weights_not_to_quantize=None, current_key_name=None, has_been_replaced=False):\n    if not is_aqlm_available():\n        raise ValueError('AQLM is not available. Please install it with `pip install aqlm[cpu,gpu]`')\n    if not is_accelerate_available():\n        raise ValueError(f\"AQLM requires Accelerate to be installed: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`\")\n    if linear_weights_not_to_quantize is None:\n        linear_weights_not_to_quantize = []\n    from accelerate import init_empty_weights\n    from aqlm import QuantizedLinear\n    for name, module in model.named_children():\n        if current_key_name is None:\n            current_key_name = []\n        current_key_name.append(name)\n        if isinstance(module, nn.Linear):\n            if '.'.join(current_key_name) + '.weight' not in linear_weights_not_to_quantize:\n                with init_empty_weights():\n                    in_features = module.in_features\n                    out_features = module.out_features\n                    model._modules[name] = QuantizedLinear(in_features, out_features, bias=module.bias is not None, in_group_size=quantization_config.in_group_size, out_group_size=quantization_config.out_group_size, num_codebooks=quantization_config.num_codebooks, nbits_per_codebook=quantization_config.nbits_per_codebook)\n                    has_been_replaced = True\n                    model._modules[name].source_cls = type(module)\n                    model._modules[name].requires_grad_(False)\n        if len(list(module.children())) > 0:\n            _, has_been_replaced = replace_with_aqlm_linear(module, quantization_config=quantization_config, linear_weights_not_to_quantize=linear_weights_not_to_quantize, current_key_name=current_key_name, has_been_replaced=has_been_replaced)\n        current_key_name.pop(-1)\n    return (model, has_been_replaced)", "docstring": "Public method that recursively replaces the Linear layers of the given model with AQLM quantized layers.\n`accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the\nconversion has been successful or not.\n\nArgs:\nmodel (`torch.nn.Module`):\nThe model to convert, can be any `torch.nn.Module` instance.\nquantization_config (`AqlmConfig`):\nThe quantization config object that contains the quantization parameters.\nlinear_weights_not_to_quantize (`list[str]`, *optional*):\nA list of nn.Linear weights to not convert. If a parameter path is in the list (e.g. `lm_head.weight`), the corresponding module will not be\nconverted.\ncurrent_key_name (`list`, *optional*):\nA list that contains the current key name. This is used for recursion and should not be passed by the user.\nhas_been_replaced (`bool`, *optional*):\nA boolean that indicates if the conversion has been successful or not. This is used for recursion and\nshould not be passed by the user.", "source": "github-repos"}
{"code": "def batch_insert_into(self, insert_intos: Iterable[Tuple[(int, ops.Operation)]]) -> None:\n    copy = self.copy()\n    for (i, op) in insert_intos:\n        copy._moments[i] = copy._moments[i].with_operation(op)\n    self._device.validate_circuit(copy)\n    self._moments = copy._moments", "docstring": "Inserts operations into empty spaces in existing moments.\n\nIf any of the insertions fails (due to colliding with an existing\noperation), this method fails without making any changes to the circuit.\n\nArgs:\ninsert_intos: A sequence of (moment_index, new_operation)\npairs indicating a moment to add a new operation into.\n\nValueError:\nOne of the insertions collided with an existing operation.\n\nIndexError:\nInserted into a moment index that doesn't exist.", "source": "codesearchnet"}
{"code": "def __init__(self, name, aliases=None, description=None, urls=None):\n    \n    super(DataTypeDefinitionWithMembers, self).__init__(\n        name, aliases=aliases, description=description, urls=urls)\n    self._byte_size = None\n    self.members = []\n    self.sections = []", "docstring": "Initializes a data type definition.\n\nArgs:\nname (str): name.\naliases (Optional[list[str]]): aliases.\ndescription (Optional[str]): description.\nurls (Optional[list[str]]): URLs.", "source": "juraj-google-style"}
{"code": "def console_put_char(\n    con: tcod.console.Console,\n    x: int,\n    y: int,\n    c: Union[int, str],\n    flag: int = BKGND_DEFAULT,\n) -> None:\n    \n    lib.TCOD_console_put_char(_console(con), x, y, _int(c), flag)", "docstring": "Draw the character c at x,y using the default colors and a blend mode.\n\nArgs:\ncon (Console): Any Console instance.\nx (int): Character x position from the left.\ny (int): Character y position from the top.\nc (Union[int, AnyStr]): Character to draw, can be an integer or string.\nflag (int): Blending mode to use, defaults to BKGND_DEFAULT.", "source": "juraj-google-style"}
{"code": "class TFOPTPreTrainedModel(TFPreTrainedModel):\n    config_class = OPTConfig\n    base_model_prefix = 'model'", "docstring": "TFOPT Pretrained Model that inheritates from transformers.TFPreTrainedModel\n\nArgs:\nconfig: OPTConfig", "source": "github-repos"}
{"code": "def update_resource_fields(self, data, data_to_add):\n        \n        for key, value in data_to_add.items():\n            if not data.get(key):\n                data[key] = value\n\n        return data", "docstring": "Update resource data with new fields.\n\nArgs:\ndata: resource data\ndata_to_update: dict of data to update resource data\n\nReturnes:\nReturnes dict", "source": "juraj-google-style"}
{"code": "def by_image_seq(blocks, image_seq):\n    return list(filter((lambda block: (blocks[block].ec_hdr.image_seq == image_seq)), blocks))", "docstring": "Filter blocks to return only those associated with the provided image_seq number.\n\nArgument:\nList:blocks       -- List of block objects to sort.\nInt:image_seq    -- image_seq number found in ec_hdr.\n\nReturns:\nList        -- List of block indexes matching image_seq number.", "source": "codesearchnet"}
{"code": "def _calculate_replicas_with_values(strategy, input_workers, optional_list):\n    worker_has_values = []\n    for worker, optionals in zip(input_workers.worker_devices, optional_list):\n        with ops.device(worker):\n            device_has_values = [math_ops.cast(v.has_value(), dtypes.int64) for v in optionals]\n            worker_has_values.append(math_ops.reduce_sum(device_has_values, keepdims=True))\n    client_has_values = math_ops.reduce_sum(worker_has_values, keepdims=True)\n    if strategy.extended._in_multi_worker_mode():\n        global_has_values = strategy.reduce(reduce_util.ReduceOp.SUM, client_has_values, axis=None)\n        return array_ops.reshape(global_has_values, [])\n    else:\n        return array_ops.reshape(client_has_values, [])", "docstring": "Computes the number of replicas that have values.\n\nArgs:\nstrategy: the `tf.distribute.Strategy`.\ninput_workers: the `InputWorkers`.\noptional_list: a list of lists `tf.experimental.Optional`. The values from\neach compute device grouped by the input device.\n\nReturns:\nA scalar Tensor.", "source": "github-repos"}
{"code": "def remove(self, path, relative=False):\n        \n        if not relative:\n            path = self.relpath(path)\n        self._remove(self.get_client_kwargs(path))", "docstring": "Remove an object.\n\nArgs:\npath (str): Path or URL.\nrelative (bool): Path is relative to current root.", "source": "juraj-google-style"}
{"code": "def enter_loop_section(self, section_id, entry_node):\n    assert section_id not in self.section_entry\n    assert section_id not in self.continues\n    self.continues[section_id] = set()\n    node = self.add_ordinary_node(entry_node)\n    self.section_entry[section_id] = node", "docstring": "Enters a loop section.\n\nLoop sections define an entry node. The end of the section always flows back\nto the entry node. These admit continue jump nodes which also flow to the\nentry node.\n\nArgs:\nsection_id: Hashable, the same node that will be used in calls to the\nast_node arg passed to add_continue_node\nentry_node: ast.AST, the entry node into the loop (e.g. the test node for\nwhile loops)", "source": "github-repos"}
{"code": "def get_without_ethernet(self, id_or_uri):\n        \n        uri = self._client.build_uri(id_or_uri) + \"/withoutEthernet\"\n        return self._client.get(uri)", "docstring": "Gets the logical downlink with the specified ID without ethernet.\n\nArgs:\nid_or_uri: Can be either the logical downlink id or the logical downlink uri.\n\nReturns:\ndict", "source": "juraj-google-style"}
{"code": "def set_uid(self, uid, schema=None):\n    try:\n        (uid, schema) = author_id_normalize_and_schema(uid, schema)\n    except UnknownUIDSchema:\n        pass\n    self._ensure_field('ids', [])\n    self.obj['ids'] = [id_ for id_ in self.obj['ids'] if (id_.get('schema') != schema)]\n    self._add_uid(uid, schema)", "docstring": "Set a unique ID.\n\nIf a UID of a given schema already exists in a record it will\nbe overwritten, otherwise it will be appended to the record.\n\nArgs:\nuid (string): unique identifier.\nschema (Optional[string]): schema of the unique identifier. If\n``None``, the schema will be guessed based on the shape of\n``uid``.\n\nRaises:\nSchemaUIDConflict: it UID and schema are not matching", "source": "codesearchnet"}
{"code": "def expand(self, pcoll: beam.PCollection[ExampleT]) -> Union[beam.PCollection[MLTransformOutputT], tuple[beam.PCollection[MLTransformOutputT], beam.PCollection[beam.Row]]]:\n    upstream_errors = []\n    _ = [self._validate_transform(transform) for transform in self.transforms]\n    if self._artifact_mode == ArtifactMode.PRODUCE:\n        ptransform_partitioner = _MLTransformToPTransformMapper(transforms=self.transforms, artifact_location=self._parent_artifact_location, artifact_mode=self._artifact_mode, pipeline_options=pcoll.pipeline.options)\n        ptransform_list = ptransform_partitioner.create_and_save_ptransform_list()\n    else:\n        ptransform_list = _MLTransformToPTransformMapper.load_transforms_from_artifact_location(self._parent_artifact_location)\n        for i in range(len(ptransform_list)):\n            if hasattr(ptransform_list[i], 'artifact_mode'):\n                ptransform_list[i].artifact_mode = self._artifact_mode\n    transform_name = None\n    for ptransform in ptransform_list:\n        if self._with_exception_handling:\n            if hasattr(ptransform, 'with_exception_handling'):\n                ptransform = ptransform.with_exception_handling(**self._exception_handling_args)\n                pcoll, bad_results = pcoll | ptransform\n                if isinstance(bad_results, RunInferenceDLQ):\n                    bad_results = bad_results.failed_inferences\n                    transform_name = ptransform.annotations()['model_handler']\n                elif not isinstance(bad_results, beam.PCollection):\n                    raise NotImplementedError(f'Unexpected type for bad_results: {type(bad_results)}')\n                bad_results = bad_results | beam.Map(lambda x: _map_errors_to_beam_row(x, transform_name))\n                upstream_errors.append(bad_results)\n        else:\n            pcoll = pcoll | ptransform\n    _ = pcoll.pipeline | 'MLTransformMetricsUsage' >> MLTransformMetricsUsage(self)\n    if self._with_exception_handling:\n        bad_pcoll = upstream_errors | beam.Flatten()\n        return (pcoll, bad_pcoll)\n    return pcoll", "docstring": "This is the entrypoint for the MLTransform. This method will\ninvoke the process_data() method of the ProcessHandler instance\nto process the incoming data.\n\nprocess_data takes in a PCollection and applies the PTransforms\nnecessary to process the data and returns a PCollection of\ntransformed data.\nArgs:\npcoll: A PCollection of ExampleT type.\nReturns:\nA PCollection of MLTransformOutputT type", "source": "github-repos"}
{"code": "def add_stream(self, stream, path, compress, flags):\n    self.data_fileobj.seek(self.last_offset)\n    if (compress == 'bz2'):\n        stream = bz2_compress_stream(stream)\n    elif (compress == 'xz'):\n        stream = xz_compress_stream(stream)\n    elif (compress is None):\n        pass\n    else:\n        raise ValueError('Unsupported compression type: {}'.format(compress))\n    size = write_to_file(stream, self.data_fileobj)\n    if (os.sep == '\\\\'):\n        path = path.replace('\\\\', '/')\n    e = dict(name=six.u(path), offset=self.last_offset, size=size, flags=flags)\n    self.entries.append(e)\n    self.last_offset += e['size']", "docstring": "Add the contents of an iterable to the MAR file.\n\nArgs:\nstream (iterable): yields blocks of data\npath (str): name of this file in the MAR file\ncompress (str): One of 'xz', 'bz2', or None. Defaults to None.\nflags (int): permission of this file in the MAR file", "source": "codesearchnet"}
{"code": "def read_header(filename, return_idxs=False):\n    \n    with open(filename, 'rb') as fh:\n        header_dict = {}\n        header_idxs = {}\n\n        \n        keyword, value, idx = read_next_header_keyword(fh)\n\n        try:\n            assert keyword == b'HEADER_START'\n        except AssertionError:\n            raise RuntimeError(\"Not a valid blimpy file.\")\n\n        while True:\n            keyword, value, idx = read_next_header_keyword(fh)\n            if keyword == b'HEADER_END':\n                break\n            else:\n                header_dict[keyword] = value\n                header_idxs[keyword] = idx\n\n    if return_idxs:\n        return header_idxs\n    else:\n        return header_dict", "docstring": "Read blimpy header and return a Python dictionary of key:value pairs\n\nArgs:\nfilename (str): name of file to open\n\nOptional args:\nreturn_idxs (bool): Default False. If true, returns the file offset indexes\nfor values\n\nreturns", "source": "juraj-google-style"}
{"code": "def _consume(self, message):\n    try:\n        self.validate(message)\n    except RuntimeWarning as e:\n        self.log.warn('Received invalid message {0}'.format(e))\n        return\n    if (isinstance(message, dict) and ('headers' in message) and ('body' in message)):\n        message['body']['headers'] = message['headers']\n    if hasattr(self, 'replay_name'):\n        for m in check_for_replay(self.replay_name, self.name_to_seq_id, message, self.hub.config):\n            try:\n                self.validate(m)\n                return super(FedmsgConsumer, self)._consume(m)\n            except RuntimeWarning as e:\n                self.log.warn('Received invalid message {}'.format(e))\n    else:\n        return super(FedmsgConsumer, self)._consume(message)", "docstring": "Called when a message is consumed.\n\nThis private method handles some administrative setup and teardown\nbefore calling the public interface `consume` typically implemented\nby a subclass.\n\nWhen `moksha.blocking_mode` is set to `False` in the config, this\nmethod always returns `None`.  The argued message is stored in an\ninternal queue where the consumer's worker threads should eventually\npick it up.\n\nWhen `moksha.blocking_mode` is set to `True` in the config, this\nmethod should return True or False, indicating whether the message\nwas handled or not.  Specifically, in the event that the inner\n`consume` method raises an exception of any kind, this method\nshould return `False` indicating that the message was not\nsuccessfully handled.\n\nArgs:\nmessage (dict): The message as a dictionary.\n\nReturns:\nbool: Should be interpreted as whether or not the message was\nhandled by the consumer, or `None` if `moksha.blocking_mode` is\nset to False.", "source": "codesearchnet"}
{"code": "def get_file_behaviour(self, resources):\n        \n        api_name = 'virustotal-file-behaviour'\n        api_endpoint = 'file/behaviour'\n        return self._extract_all_responses(resources, api_endpoint, api_name)", "docstring": "Retrieves a report about the behaviour of a md5, sha1, and/or sha2 hash of\na file when executed in a sandboxed environment (Cuckoo sandbox).\n\nArgs:\nresources: list of string hashes.", "source": "juraj-google-style"}
{"code": "def set_pyftpsync_logger(logger=True):\n    global _logger\n    prev_logger = _logger\n    if (logger is True):\n        logging.basicConfig(level=logging.INFO)\n        _logger = logging.getLogger('pyftpsync')\n        _logger.setLevel(logging.DEBUG)\n    else:\n        _logger = logger\n    return prev_logger", "docstring": "Define target for common output.\n\nArgs:\nlogger (bool | None | logging.Logger):\nPass None to use `print()` to stdout instead of logging.\nPass True to create a simple standard logger.", "source": "codesearchnet"}
{"code": "def __init__(self, texts, text_type=None):\n    self.texts = texts\n    self.text_type = text_type", "docstring": "String of text and a corresponding type to use to style that text.\n\nArgs:\ntexts: (list[str]), list of strs or TypedText objects\nthat should be styled using text_type.\ntext_type: (TextTypes), the semantic type of the text that\nwill be used to style text.", "source": "github-repos"}
{"code": "def get_field_tag(proto: message.Message, fields: FieldTypes) -> Sequence[chunk_pb2.FieldIndex]:\n    field_tags = []\n    for _, field_desc, map_key, list_index in _walk_fields(proto, fields):\n        field_tags.append(chunk_pb2.FieldIndex(field=field_desc.number))\n        if map_key is not None:\n            key_type = field_desc.message_type.fields_by_name['key'].type\n            field_tags.append(chunk_pb2.FieldIndex(map_key=_map_key_proto(key_type, map_key)))\n        elif list_index is not None:\n            field_tags.append(chunk_pb2.FieldIndex(index=list_index))\n    return field_tags", "docstring": "Generates FieldIndex proto for a nested field within a proto.\n\nArgs:\nproto: Parent proto of any message type.\nfields: List of string/int/map key fields, e.g. [\"nodes\", \"attr\", \"value\"]\ncan represent `proto.nodes.attr[\"value\"]`.\n\nReturns:\nA list of FieldIndex protos with the same length as `fields`.", "source": "github-repos"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    eos = [self.eos_token_id]\n    if token_ids_1 is None:\n        return len(token_ids_0 + eos) * [0]\n    return len(token_ids_0 + eos + token_ids_1 + eos) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. ByT5 does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def ConsumeFloat(self):\n    try:\n        result = ParseFloat(self.token)\n    except ValueError as e:\n        raise self._ParseError(str(e))\n    self.NextToken()\n    return result", "docstring": "Consumes an floating point number.\n\nReturns:\nThe number parsed.\n\nRaises:\nParseError: If a floating point number couldn't be consumed.", "source": "codesearchnet"}
{"code": "def _add_weight(self, name, initial_value, dtype=None):\n    variable = variable_v1.VariableV1(initial_value=initial_value, name=name, dtype=dtype, trainable=False, use_resource=True, synchronization=variables.VariableSynchronization.AUTO, aggregation=variables.VariableAggregation.NONE)\n    if context.executing_eagerly():\n        graph_key = None\n    else:\n        graph = ops.get_default_graph()\n        graph_key = graph._graph_key\n    key = (name, graph_key)\n    if self._weights.get(key, None) is not None:\n        raise RuntimeError('Duplicate variables detected. {}'.format(key))\n    self._weights[key] = variable\n    self._handle_deferred_dependencies(name=name, trackable=variable)\n    return variable", "docstring": "Adds a weight to this loss scale.\n\nArgs:\nname: Variable name.\ninitial_value: The variable's initial value.\ndtype: The type of the variable.\n\nReturns:\nA variable.\n\nRaises:\nRuntimeError: If a weight with `name` has already been added.", "source": "github-repos"}
{"code": "def __getDecision(self, result, multiple=False, **values):\n    values = self.__toString(values)\n    __valueKeyWithHeaderIndex = self.__valueKeyWithHeaderIndex(values)\n    errors = self.__checkDecisionParameters(result, **values)\n    if errors:\n        view.Tli.showErrors('ParametersError', errors)\n    machingData = {}\n    for line in self.decisions:\n        match = True\n        for index in __valueKeyWithHeaderIndex:\n            if (line[index] != __valueKeyWithHeaderIndex[index]):\n                if (line[index] != self.__wildcardSymbol):\n                    match = False\n                    break\n        if match:\n            if multiple:\n                for header in result:\n                    if (header not in machingData):\n                        machingData[header] = [line[self.header.index(header)]]\n                    else:\n                        machingData[header].append(line[self.header.index(header)])\n            else:\n                for header in result:\n                    machingData[header] = line[self.header.index(header)]\n                return machingData\n    if multiple:\n        if machingData:\n            return machingData\n    return dict(((key, None) for key in result))", "docstring": "The main method for decision picking.\n\nArgs:\nresult (array of str): What values you want to get in return array.\nmultiple (bolean, optional): Do you want multiple result if it finds many maching decisions.\n**values (dict): What should finder look for, (headerString : value).\n\nReturns: Maped result values with finded elements in row/row.", "source": "codesearchnet"}
{"code": "def _GetMountpoints(only_physical=True):\n  \n  partitions = psutil.disk_partitions(all=not only_physical)\n  return set(partition.mountpoint for partition in partitions)", "docstring": "Fetches a list of mountpoints.\n\nArgs:\nonly_physical: Determines whether only mountpoints for physical devices\n(e.g. hard disks) should be listed. If false, mountpoints for things such\nas memory partitions or `/dev/shm` will be returned as well.\n\nReturns:\nA set of mountpoints.", "source": "juraj-google-style"}
{"code": "def locate_file(start_path, file_name):\n    \n    if os.path.isfile(start_path):\n        start_dir_path = os.path.dirname(start_path)\n    elif os.path.isdir(start_path):\n        start_dir_path = start_path\n    else:\n        raise exceptions.FileNotFound(\"invalid path: {}\".format(start_path))\n\n    file_path = os.path.join(start_dir_path, file_name)\n    if os.path.isfile(file_path):\n        return os.path.abspath(file_path)\n\n    \n    if os.path.abspath(start_dir_path) in [os.getcwd(), os.path.abspath(os.sep)]:\n        raise exceptions.FileNotFound(\"{} not found in {}\".format(file_name, start_path))\n\n    \n    return locate_file(os.path.dirname(start_dir_path), file_name)", "docstring": "locate filename and return absolute file path.\nsearching will be recursive upward until current working directory.\n\nArgs:\nstart_path (str): start locating path, maybe file path or directory path\n\nReturns:\nstr: located file path. None if file not found.\n\nRaises:\nexceptions.FileNotFound: If failed to locate file.", "source": "juraj-google-style"}
{"code": "def creating_schema_and_index(self, models, func):\n        \n        waiting_models = []\n        self.base_thread.do_with_submit(func, models, waiting_models, threads=self.threads)\n        if waiting_models:\n            print(\"WAITING MODELS ARE CHECKING...\")\n            self.creating_schema_and_index(waiting_models, func)", "docstring": "Executes given functions with given models.\n\nArgs:\nmodels: models to execute\nfunc: function name to execute\n\nReturns:", "source": "juraj-google-style"}
{"code": "def __init__(self, location=None, parent=None, **kwargs):\n    \n    if not parent:\n      raise ValueError('Missing parent value.')\n\n    super(ZipPathSpec, self).__init__(\n        location=location, parent=parent, **kwargs)", "docstring": "Initializes a path specification.\n\nNote that the zip file path specification must have a parent.\n\nArgs:\nlocation (Optional[str]): ZIP file internal location string prefixed\nwith a path separator character.\nparent (Optional[PathSpec]): parent path specification.\n\nRaises:\nValueError: when parent is not set.", "source": "juraj-google-style"}
{"code": "def _open_interface(self, conn_id, iface, callback):\n    try:\n        context = self.conns.get_context(conn_id)\n    except ArgumentError:\n        callback(conn_id, self.id, False, 'Could not find connection information')\n        return\n    self.conns.begin_operation(conn_id, 'open_interface', callback, self.get_config('default_timeout'))\n    topics = context['topics']\n    open_iface_message = {'key': context['key'], 'type': 'command', 'operation': 'open_interface', 'client': self.name, 'interface': iface}\n    self.client.publish(topics.action, open_iface_message)", "docstring": "Open an interface on this device\n\nArgs:\nconn_id (int): the unique identifier for the connection\niface (string): the interface name to open\ncallback (callback): Callback to be called when this command finishes\ncallback(conn_id, adapter_id, success, failure_reason)", "source": "codesearchnet"}
{"code": "def _PrintEventsStatus(self, events_status):\n    if events_status:\n        table_view = views.CLITabularTableView(column_names=['Events:', 'Filtered', 'In time slice', 'Duplicates', 'MACB grouped', 'Total'], column_sizes=[15, 15, 15, 15, 15, 0])\n        table_view.AddRow(['', events_status.number_of_filtered_events, events_status.number_of_events_from_time_slice, events_status.number_of_duplicate_events, events_status.number_of_macb_grouped_events, events_status.total_number_of_events])\n        self._output_writer.Write('\\n')\n        table_view.Write(self._output_writer)", "docstring": "Prints the status of the events.\n\nArgs:\nevents_status (EventsStatus): events status.", "source": "codesearchnet"}
{"code": "def Deserialize(self, reader):\n        \n        self.Magic = reader.ReadUInt32()\n        self.Command = reader.ReadFixedString(12).decode('utf-8')\n        self.Length = reader.ReadUInt32()\n\n        if self.Length > self.PayloadMaxSizeInt:\n            raise Exception(\"invalid format- payload too large\")\n\n        self.Checksum = reader.ReadUInt32()\n        self.Payload = reader.ReadBytes(self.Length)\n\n        checksum = Message.GetChecksum(self.Payload)\n\n        if checksum != self.Checksum:\n            raise ChecksumException(\"checksum mismatch\")", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def assert_equal(first, second, msg=None, extras=None):\n    _call_unittest_assertion(_pyunit_proxy.assertEqual, first, second, msg=msg, extras=extras)", "docstring": "Asserts the equality of objects, otherwise fail the test.\n\nError message is \"first != second\" by default. Additional explanation can\nbe supplied in the message.\n\nArgs:\nfirst: The first object to compare.\nsecond: The second object to compare.\nmsg: A string that adds additional info about the failure.\nextras: An optional field for extra information to be included in\ntest result.", "source": "github-repos"}
{"code": "def bit_for_bit(model_path, bench_path, config):\n    fname = model_path.split(os.path.sep)[(- 1)]\n    if (not (os.path.isfile(bench_path) and os.path.isfile(model_path))):\n        return elements.error('Bit for Bit', (('File named ' + fname) + ' has no suitable match!'))\n    try:\n        model_data = Dataset(model_path)\n        bench_data = Dataset(bench_path)\n    except (FileNotFoundError, PermissionError):\n        return elements.error('Bit for Bit', (('File named ' + fname) + ' could not be read!'))\n    if (not (netcdf.has_time(model_data) and netcdf.has_time(bench_data))):\n        return elements.error('Bit for Bit', (('File named ' + fname) + ' could not be read!'))\n    headers = ['Max Error', 'Index of Max Error', 'RMS Error', 'Plot']\n    stats = LIVVDict()\n    for (i, var) in enumerate(config['bit_for_bit_vars']):\n        if ((var in model_data.variables) and (var in bench_data.variables)):\n            m_vardata = model_data.variables[var][:]\n            b_vardata = bench_data.variables[var][:]\n            diff_data = (m_vardata - b_vardata)\n            if diff_data.any():\n                stats[var]['Max Error'] = np.amax(np.absolute(diff_data))\n                stats[var]['Index of Max Error'] = str(np.unravel_index(np.absolute(diff_data).argmax(), diff_data.shape))\n                stats[var]['RMS Error'] = np.sqrt((np.sum(np.square(diff_data).flatten()) / diff_data.size))\n                pf = plot_bit_for_bit(fname, var, m_vardata, b_vardata, diff_data)\n            else:\n                stats[var]['Max Error'] = stats[var]['RMS Error'] = 0\n                pf = stats[var]['Index of Max Error'] = 'N/A'\n            stats[var]['Plot'] = pf\n        else:\n            stats[var] = {'Max Error': 'No Match', 'RMS Error': 'N/A', 'Plot': 'N/A'}\n    model_data.close()\n    bench_data.close()\n    return elements.bit_for_bit('Bit for Bit', headers, stats)", "docstring": "Checks whether the given files have bit for bit solution matches\non the given variable list.\n\nArgs:\nmodel_path: absolute path to the model dataset\nbench_path: absolute path to the benchmark dataset\nconfig: the configuration of the set of analyses\n\nReturns:\nA dictionary created by the elements object corresponding to\nthe results of the bit for bit testing", "source": "codesearchnet"}
{"code": "def run_inference(self, batch: Sequence[str], pipeline: Pipeline, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n    inference_args = {} if not inference_args else inference_args\n    predictions = self._inference_fn(batch, pipeline, inference_args)\n    return _convert_to_result(batch, predictions)", "docstring": "Runs inferences on a batch of examples passed as a string resource.\nThese can either be string sentences, or string path to images or\naudio files.\n\nArgs:\nbatch: A sequence of strings resources.\npipeline: A Hugging Face Pipeline.\ninference_args: Non-batchable arguments required as inputs to the model's\ninference function.\nReturns:\nAn Iterable of type PredictionResult.", "source": "github-repos"}
{"code": "def color(self, color):\n    self._data['color'] = color\n    request = self._base_request\n    request['color'] = color\n    return self._tc_requests.update(request, owner=self.owner)", "docstring": "Updates the security labels color.\n\nArgs:\ncolor:", "source": "codesearchnet"}
{"code": "def check_task(taskid, timeout=DEFAULT_TASK_TIMEOUT, wait=2):\n    \n    max_attempts = int(timeout / wait)\n    try:\n        return retry_call(\n            partial(_check_task, taskid),\n            max_attempts=max_attempts,\n            wait=wait,\n            exceptions=(AssertionError, ValueError), )\n    except ValueError:\n        raise SpinnakerTaskInconclusiveError('Task failed to complete in {0} seconds: {1}'.format(timeout, taskid))", "docstring": "Wrap check_task.\n\nArgs:\ntaskid (str): Existing Spinnaker Task ID.\ntimeout (int, optional): Consider Task failed after given seconds.\nwait (int, optional): Seconds to pause between polling attempts.\n\nReturns:\nstr: Task status.\n\nRaises:\nAssertionError: API did not respond with a 200 status code.\n:obj:`foremast.exceptions.SpinnakerTaskInconclusiveError`: Task did not\nreach a terminal state before the given time out.", "source": "juraj-google-style"}
{"code": "def join_sources(source_module: DeploymentModule, contract_name: str):\n    \n    joined_file = Path(__file__).parent.joinpath('joined.sol')\n    remapping = {module: str(path) for module, path in contracts_source_path().items()}\n    command = [\n        './utils/join-contracts.py',\n        '--import-map',\n        json.dumps(remapping),\n        str(contracts_source_path_of_deployment_module(\n            source_module,\n        ).joinpath(contract_name + '.sol')),\n        str(joined_file),\n    ]\n    working_dir = Path(__file__).parent.parent\n    try:\n        subprocess.check_call(command, cwd=working_dir)\n    except subprocess.CalledProcessError as ex:\n        print(f'cd {str(working_dir)}; {subprocess.list2cmdline(command)} failed.')\n        raise ex\n\n    return joined_file.read_text()", "docstring": "Use join-contracts.py to concatenate all imported Solidity files.\n\nArgs:\nsource_module: a module name to look up contracts_source_path()\ncontract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc.", "source": "juraj-google-style"}
{"code": "def typical_or_extreme_period_name(self, value=None):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type str '\n                    'for field `typical_or_extreme_period_name`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `typical_or_extreme_period_name`')\n\n        self._typical_or_extreme_period_name = value", "docstring": "Corresponds to IDD Field `typical_or_extreme_period_name`\n\nArgs:\nvalue (str): value for IDD Field `typical_or_extreme_period_name`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def __init__(self, method, stop_if_false=False):\n    \n    self.method = method\n    self.stopped = threading.Event()\n    self.thread = None\n    self.stop_if_false = stop_if_false", "docstring": "Initializes the Interval.\n\nArgs:\nmethod: A callable to execute, it should take no arguments.\nstop_if_false: If True, the interval will exit if the method returns\nFalse.", "source": "juraj-google-style"}
{"code": "def check_media_service_name_availability(access_token, subscription_id, msname):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/providers/microsoft.media/CheckNameAvailability?',\n                        'api-version=', MEDIA_API])\n    ms_body = {'name': msname}\n    ms_body['type'] = 'mediaservices'\n    body = json.dumps(ms_body)\n    return do_post(endpoint, body, access_token)", "docstring": "Check media service name availability.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nmsname (str): media service name.\n\nReturns:\nHTTP response.", "source": "juraj-google-style"}
{"code": "def generate_identifier(sender, instance, **kwargs):\n    \n    identifier = Concept.create_identifier(instance.query)\n    qs = Concept.objects.filter(identifier=identifier, lang=instance.lang)\n    if instance.pk:\n        qs = qs.exclude(pk=instance.pk)\n    if qs.count() > 0:\n        raise ValueError(\"Concept identifier conflict\")\n    instance.identifier = identifier", "docstring": "Generate and set identifier of concept before saving object to DB\n\nArgs:\nsender (class): should be Concept\ninstance (Concept): saving concept", "source": "juraj-google-style"}
{"code": "def _grad_fn(func_graph, grads):\n    assert len(func_graph.outputs) == len(grads)\n    ys = []\n    grad_ys = []\n    for y, grad_y in zip(func_graph.outputs, grads):\n        if not backprop_util.IsTrainable(y):\n            continue\n        ys.append(y)\n        grad_ys.append(grad_y)\n    result = gradients_util._GradientsHelper(ys, func_graph.inputs, grad_ys=grad_ys, src_graph=func_graph)\n    return result", "docstring": "The gradient function for each conditional branch.\n\nThis function builds the gradient graph of the corresponding forward-pass\nconditional branch in `func_graph`. This is done by differentiating\nfunc_graph's outputs w.r.t. its inputs.\n\nArgs:\nfunc_graph: FuncGraph. The corresponding forward-pass function.\ngrads: The list of input gradient Tensors.\n\nReturns:\nThe output gradient Tensors.", "source": "github-repos"}
{"code": "def fetch_url(self, url):\n    url_path = urlparse.urlsplit(url).path\n    dst_path = os.path.basename(url_path)\n    dst_path = self.paths.prefixed(dst_path)\n    with LogTask(('Downloading %s' % url)):\n        urllib.urlretrieve(url=os.path.expandvars(url), filename=dst_path)\n    return dst_path", "docstring": "Retrieves the given url to the prefix\n\nArgs:\nurl(str): Url to retrieve\n\nReturns:\nstr: path to the downloaded file", "source": "codesearchnet"}
{"code": "def create(self, path, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO) -> BinaryIO:\n    return self._path_open(path, 'wb', mime_type, compression_type)", "docstring": "Returns a write channel for the given file path.\n\nArgs:\npath: string path of the file object to be written to the system\nmime_type: MIME type to specify the type of content in the file object\ncompression_type: Type of compression to be used for this object\n\nReturns: file handle with a close function for the user to use", "source": "github-repos"}
{"code": "def short_repr(obj, max_len=40):\n  \n  obj_repr = repr(obj)\n  if len(obj_repr) <= max_len:\n    return obj_repr\n  return '<{} of length {}>'.format(type(obj).__name__, len(obj_repr))", "docstring": "Returns a short, term-friendly string representation of the object.\n\nArgs:\nobj: An object for which to return a string representation.\nmax_len: Maximum length of the returned string. Longer reprs will be turned\ninto a brief descriptive string giving the type and length of obj.", "source": "juraj-google-style"}
{"code": "def search(self, query):\n    results = self.skype.conn('GET', SkypeConnection.API_DIRECTORY, auth=SkypeConnection.Auth.SkypeToken, params={'searchstring': query, 'requestId': '0'}).json().get('results', [])\n    return [SkypeUser.fromRaw(self.skype, json.get('nodeProfileData', {})) for json in results]", "docstring": "Search the Skype Directory for a user.\n\nArgs:\nquery (str): name to search for\n\nReturns:\nSkypeUser list: collection of possible results", "source": "codesearchnet"}
{"code": "def _valid_dtypes(self):\n    return set([dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64])", "docstring": "Valid types for loss, variables and gradients.\n\nSubclasses should override to allow other float types.\n\nReturns:\nValid types for loss, variables and gradients.", "source": "github-repos"}
{"code": "def build_losses(self, logits_real, logits_fake):\n    with tf.name_scope('GAN_loss'):\n        score_real = tf.sigmoid(logits_real)\n        score_fake = tf.sigmoid(logits_fake)\n        tf.summary.histogram('score-real', score_real)\n        tf.summary.histogram('score-fake', score_fake)\n        with tf.name_scope('discrim'):\n            d_loss_pos = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_real, labels=tf.ones_like(logits_real)), name='loss_real')\n            d_loss_neg = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_fake, labels=tf.zeros_like(logits_fake)), name='loss_fake')\n            d_pos_acc = tf.reduce_mean(tf.cast((score_real > 0.5), tf.float32), name='accuracy_real')\n            d_neg_acc = tf.reduce_mean(tf.cast((score_fake < 0.5), tf.float32), name='accuracy_fake')\n            d_accuracy = tf.add((0.5 * d_pos_acc), (0.5 * d_neg_acc), name='accuracy')\n            self.d_loss = tf.add((0.5 * d_loss_pos), (0.5 * d_loss_neg), name='loss')\n        with tf.name_scope('gen'):\n            self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_fake, labels=tf.ones_like(logits_fake)), name='loss')\n            g_accuracy = tf.reduce_mean(tf.cast((score_fake > 0.5), tf.float32), name='accuracy')\n        add_moving_summary(self.g_loss, self.d_loss, d_accuracy, g_accuracy)", "docstring": "Build standard GAN loss and set `self.g_loss` and `self.d_loss`.\n\nD and G play two-player minimax game with value function V(G,D)\n\nmin_G max _D V(D, G) = IE_{x ~ p_data} [log D(x)] + IE_{z ~ p_fake} [log (1 - D(G(z)))]\n\nArgs:\nlogits_real (tf.Tensor): discrim logits from real samples\nlogits_fake (tf.Tensor): discrim logits from fake samples produced by generator", "source": "codesearchnet"}
{"code": "def check_type(o, acceptable_types, may_be_none=True):\n    if (not isinstance(acceptable_types, tuple)):\n        acceptable_types = (acceptable_types,)\n    if (may_be_none and (o is None)):\n        pass\n    elif isinstance(o, acceptable_types):\n        pass\n    else:\n        error_message = 'We were expecting to receive an instance of one of the following types: {types}{none}; but instead we received {o} which is a {o_type}.'.format(types=', '.join([repr(t.__name__) for t in acceptable_types]), none=(\"or 'None'\" if may_be_none else ''), o=o, o_type=repr(type(o).__name__))\n        raise TypeError(error_message)", "docstring": "Object is an instance of one of the acceptable types or None.\n\nArgs:\no: The object to be inspected.\nacceptable_types: A type or tuple of acceptable types.\nmay_be_none(bool): Whether or not the object may be None.\n\nRaises:\nTypeError: If the object is None and may_be_none=False, or if the\nobject is not an instance of one of the acceptable types.", "source": "codesearchnet"}
{"code": "def mesh_axis_to_cumprod(self, tensor_shape):\n    \n    tensor_layout = self.tensor_layout(tensor_shape)\n    ma2ta = tensor_layout.mesh_axis_to_tensor_axis(self.ndims)\n    ta2cumprod = tensor_shape.cumprod\n    return [None if ta is None else ta2cumprod[ta] for ta in ma2ta]", "docstring": "For each mesh axis, give the product of previous tensor axes.\n\nArgs:\ntensor_shape: Shape.\n\nReturns:\nlist with length self.ndims where each element is an integer or None.", "source": "juraj-google-style"}
{"code": "def convert_to_bq_name(name: str) -> str:\n    return BQ_REGEX.sub('_', name).lower()", "docstring": "Tranform the given string into a valid BigQuery name -\nconvert non-alphanumeric characters to an underscore (_)\nand lowercase the result for consistency.\n\nArgs:\n* name: original name\n\nReturns:\n* Transformed valid name", "source": "github-repos"}
{"code": "def orthorhombic(a: float, b: float, c: float):\n    return Lattice.from_parameters(a, b, c, 90, 90, 90)", "docstring": "Convenience constructor for an orthorhombic lattice.\n\nArgs:\na (float): *a* lattice parameter of the orthorhombic cell.\nb (float): *b* lattice parameter of the orthorhombic cell.\nc (float): *c* lattice parameter of the orthorhombic cell.\n\nReturns:\nOrthorhombic lattice of dimensions a x b x c.", "source": "codesearchnet"}
{"code": "def get_service_name(self, service_id: str) -> str:\n    if (not self._manager):\n        raise RuntimeError('Only the Swarm manager node can retrieve all the services details.')\n    service = self._client.services.get(service_id)\n    return service.name", "docstring": "Get the name of the docker service.\n\nOnly the manager nodes can retrieve service name\n\nArgs:\nservice_id (string): List of service ID\n\nReturns:\nstring, name of the docker service", "source": "codesearchnet"}
{"code": "def SkipAhead(self, file_object, number_of_characters):\n    \n    lines_size = len(self.lines)\n    while number_of_characters >= lines_size:\n      number_of_characters -= lines_size\n\n      self.lines = ''\n      self.ReadLines(file_object)\n      lines_size = len(self.lines)\n      if lines_size == 0:\n        return\n\n    self.lines = self.lines[number_of_characters:]", "docstring": "Skips ahead a number of characters.\n\nArgs:\nfile_object (dfvfs.FileIO): file-like object.\nnumber_of_characters (int): number of characters.", "source": "juraj-google-style"}
{"code": "def forward(self, spectrogram: torch.FloatTensor) -> torch.FloatTensor:\n    if self.config.normalize_before:\n        spectrogram = (spectrogram - self.mean) / self.scale\n    is_batched = spectrogram.dim() == 3\n    if not is_batched:\n        spectrogram = spectrogram.unsqueeze(0)\n    hidden_states = spectrogram.transpose(2, 1)\n    hidden_states = self.conv_pre(hidden_states)\n    for i in range(self.num_upsamples):\n        hidden_states = nn.functional.leaky_relu(hidden_states, self.config.leaky_relu_slope)\n        hidden_states = self.upsampler[i](hidden_states)\n        res_state = self.resblocks[i * self.num_kernels](hidden_states)\n        for j in range(1, self.num_kernels):\n            res_state += self.resblocks[i * self.num_kernels + j](hidden_states)\n        hidden_states = res_state / self.num_kernels\n    hidden_states = nn.functional.leaky_relu(hidden_states)\n    hidden_states = self.conv_post(hidden_states)\n    hidden_states = torch.tanh(hidden_states)\n    if not is_batched:\n        waveform = hidden_states.squeeze(0).transpose(1, 0).view(-1)\n    else:\n        waveform = hidden_states.squeeze(1)\n    return waveform", "docstring": "spectrogram (`torch.FloatTensor`):\nTensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,\nconfig.model_in_dim)`, or un-batched and of shape `(sequence_length, config.model_in_dim)`.\n\nReturns:\n`torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of\nshape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.", "source": "github-repos"}
{"code": "def sort_variants(vcf_handle):\n    \n    logger.debug(\"Creating temp file\")\n    temp_file = NamedTemporaryFile(delete=False)\n    temp_file.close()\n    logger.debug(\"Opening temp file with codecs\")\n    temp_file_handle = codecs.open(\n                        temp_file.name,\n                        mode='w',\n                        encoding='utf-8',\n                        errors='replace'\n                        )\n\n    try:\n        with codecs.open(temp_file.name,mode='w',encoding='utf-8',errors='replace') as f:\n            for line in vcf_handle:\n                if not line.startswith('\n                    line = line.rstrip().split('\\t')\n                    chrom = line[0]\n                    priority = get_chromosome_priority(chrom)\n                \n                    print_line = \"{0}\\t{1}\\n\".format(priority, '\\t'.join(line))\n                    f.write(print_line)\n        \n        sort_variant_file(temp_file.name)\n        \n        with codecs.open(temp_file.name,mode='r',encoding='utf-8',errors='replace') as f:\n            for line in f:\n                line = line.rstrip().split('\\t')\n                yield '\\t'.join(line[1:])\n\n    except Exception as err:\n        logger.error(\"Something went wrong\")\n        logger.error(err)\n    finally:\n        logger.debug(\"Deleting temp file\")\n        os.remove(temp_file.name)\n        logger.debug(\"Temp file deleted\")", "docstring": "Sort the variants of a vcf file\n\nArgs:\nvcf_handle\nmode (str): position or rank score\n\nReturns:\nsorted_variants (Iterable): An iterable with sorted variants", "source": "juraj-google-style"}
{"code": "def similarity(self, track):\n    idx = index.Index()\n    i = 0\n    for (i, segment) in enumerate(self.segments):\n        idx.insert(i, segment.bounds(), obj=segment)\n    final_siml = []\n    final_diff = []\n    for (i, segment) in enumerate(track.segments):\n        query = idx.intersection(segment.bounds(), objects=True)\n        res_siml = []\n        res_diff = []\n        for result in query:\n            (siml, diff) = segment_similarity(segment, result.object)\n            res_siml.append(siml)\n            res_diff.append((result.id, i, diff))\n        if (len(res_siml) > 0):\n            final_siml.append(max(res_siml))\n            final_diff.append(res_diff[np.argmax(res_siml)])\n        else:\n            final_siml.append(0)\n            final_diff.append([])\n    return (np.mean(final_siml), final_diff)", "docstring": "Compares two tracks based on their topology\n\nThis method compares the given track against this\ninstance. It only verifies if given track is close\nto this one, not the other way arround\n\nArgs:\ntrack (:obj:`Track`)\nReturns:\nTwo-tuple with global similarity between tracks\nand an array the similarity between segments", "source": "codesearchnet"}
{"code": "def _get_resource_params(self, resource, for_update=False):\n    if isinstance(resource, CollectionResource):\n        return self._get_collection_params(resource)\n    if isinstance(resource, ExperimentResource):\n        return self._get_experiment_params(resource, for_update)\n    if isinstance(resource, CoordinateFrameResource):\n        return self._get_coordinate_params(resource, for_update)\n    if isinstance(resource, ChannelResource):\n        return self._get_channel_params(resource, for_update)\n    raise TypeError('resource is not supported type.')", "docstring": "Get dictionary containing all parameters for the given resource.\n\nWhen getting params for a coordinate frame update, only name and\ndescription are returned because they are the only fields that can\nbe updated.\n\nArgs:\nresource (intern.resource.boss.resource.BossResource): A sub-class\nwhose parameters will be extracted into a dictionary.\nfor_update (bool): True if params will be used for an update.\n\nReturns:\n(dictionary): A dictionary containing the resource's parameters as\nrequired by the Boss API.\n\nRaises:\nTypeError if resource is not a supported class.", "source": "codesearchnet"}
{"code": "def _WsdlHasMethod(self, method_name):\n    return (method_name in self.suds_client.wsdl.services[0].ports[0].methods)", "docstring": "Determine if the wsdl contains a method.\n\nArgs:\nmethod_name: The name of the method to search.\n\nReturns:\nTrue if the method is in the WSDL, otherwise False.", "source": "codesearchnet"}
{"code": "def FromTrimmedData(byts):\n    block = Block()\n    block.__is_trimmed = True\n    ms = StreamManager.GetStream(byts)\n    reader = BinaryReader(ms)\n    block.DeserializeUnsigned(reader)\n    reader.ReadByte()\n    witness = Witness()\n    witness.Deserialize(reader)\n    block.Script = witness\n    bc = GetBlockchain()\n    tx_list = []\n    for tx_hash in reader.ReadHashes():\n        tx = bc.GetTransaction(tx_hash)[0]\n        if (not tx):\n            raise Exception('Could not find transaction!\\n Are you running code against a valid Blockchain instance?\\n Tests that accesses transactions or size of a block but inherit from NeoTestCase instead of BlockchainFixtureTestCase will not work.')\n        tx_list.append(tx)\n    if (len(tx_list) < 1):\n        raise Exception(('Invalid block, no transactions found for block %s ' % block.Index))\n    block.Transactions = tx_list\n    StreamManager.ReleaseStream(ms)\n    return block", "docstring": "Deserialize a block from raw bytes.\n\nArgs:\nbyts:\n\nReturns:\nBlock:", "source": "codesearchnet"}
{"code": "def get_rel_timestamps(self, node_name, output_slot, debug_op, device_name=None):\n    device_name = self._infer_device_name(device_name, node_name)\n    watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)\n    if watch_key not in self._watch_key_to_datum[device_name]:\n        raise WatchKeyDoesNotExistInDebugDumpDirError('Watch key \"%s\" does not exist in the debug dump' % watch_key)\n    return self._watch_key_to_rel_time[device_name][watch_key]", "docstring": "Get the relative timestamp from for a debug-dumped tensor.\n\nRelative timestamp means (absolute timestamp - `t0`), where `t0` is the\nabsolute timestamp of the first dumped tensor in the dump root. The tensor\nmay be dumped multiple times in the dump root directory, so a list of\nrelative timestamps (`numpy.ndarray`) is returned.\n\nArgs:\nnode_name: (`str`) name of the node that the tensor is produced by.\noutput_slot: (`int`) output slot index of tensor.\ndebug_op: (`str`) name of the debug op.\ndevice_name: (`str`) name of the device. If there is only one device or if\nthe specified debug_watch_key exists on only one device, this argument\nis optional.\n\nReturns:\n(`list` of `int`) list of relative timestamps.\n\nRaises:\nWatchKeyDoesNotExistInDebugDumpDirError: If the tensor watch key does not\nexist in the debug dump data.", "source": "github-repos"}
{"code": "def keep_artifacts(self, **kwargs):\n        \n        path = '%s/%s/artifacts/keep' % (self.manager.path, self.get_id())\n        self.manager.gitlab.http_post(path)", "docstring": "Prevent artifacts from being deleted when expiration is set.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabCreateError: If the request could not be performed", "source": "juraj-google-style"}
{"code": "def get_issue(self, issue_id, params=None):\n    return self._get((self.API_URL + 'issue/{}'.format(issue_id)), params=params)", "docstring": "Returns a full representation of the issue for the given issue key.\n\nThe issue JSON consists of the issue key and a collection of fields. Additional information like links to\nworkflow transition sub-resources, or HTML rendered values of the fields supporting HTML rendering can be\nretrieved with expand request parameter specified.\n\nThe fields request parameter accepts a comma-separated list of fields to include in the response. It can be used\nto retrieve a subset of fields. By default all fields are returned in the response. A particular field can be\nexcluded from the response if prefixed with a \"-\" (minus) sign. Parameter can be provided multiple times on a\nsingle request.\n\nBy default, all fields are returned in the response. Note: this is different from a JQL search - only navigable\nfields are returned by default (*navigable).\n\n\nArgs:\nissue_id:\nparams:\n\nReturns:", "source": "codesearchnet"}
{"code": "class Softmax(Layer):\n\n    def __init__(self, axis=-1, **kwargs):\n        super().__init__(**kwargs)\n        self.axis = axis\n        self.supports_masking = True\n        self._build_at_init()\n\n    def call(self, inputs, mask=None):\n        if mask is not None:\n            adder = (1.0 - backend.cast(mask, inputs.dtype)) * _large_negative_number(inputs.dtype)\n            inputs += adder\n        if isinstance(self.axis, (tuple, list)):\n            if len(self.axis) > 1:\n                outputs = backend.numpy.exp(inputs - backend.math.logsumexp(inputs, axis=self.axis, keepdims=True))\n            else:\n                outputs = activations.softmax(inputs, axis=self.axis[0])\n        else:\n            outputs = activations.softmax(inputs, axis=self.axis)\n        if mask is not None:\n            outputs = backend.numpy.multiply(outputs, backend.cast(mask, outputs.dtype))\n        return outputs\n\n    def get_config(self):\n        config = super().get_config()\n        config.update({'axis': self.axis})\n        return config\n\n    def compute_output_shape(self, input_shape):\n        return input_shape", "docstring": "Softmax activation layer.\n\nFormula:\n``` python\nexp_x = exp(x - max(x))\nf(x) = exp_x / sum(exp_x)\n```\n\nExample:\n>>> softmax_layer = keras.layers.Softmax()\n>>> input = np.array([1.0, 2.0, 1.0])\n>>> result = softmax_layer(input)\n>>> result\n[0.21194157, 0.5761169, 0.21194157]\n\n\nArgs:\naxis: Integer, or list of Integers, axis along which the softmax\nnormalization is applied.\n**kwargs: Base layer keyword arguments, such as `name` and `dtype`.\n\nCall arguments:\ninputs: The inputs (logits) to the softmax layer.\nmask: A boolean mask of the same shape as `inputs`. The mask\nspecifies 1 to keep and 0 to mask. Defaults to `None`.\n\nReturns:\nSoftmaxed output with the same shape as `inputs`.", "source": "github-repos"}
{"code": "def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):\n    raise NotImplementedError", "docstring": "Calls the wrapped cell and performs the wrapping logic.\n\nThis method is called from the wrapper's `call` or `__call__` methods.\n\nArgs:\ninputs: A tensor with wrapped cell's input.\nstate: A tensor or tuple of tensors with wrapped cell's state.\ncell_call_fn: Wrapped cell's method to use for step computation (cell's\n`__call__` or 'call' method).\n**kwargs: Additional arguments.\n\nReturns:\nA pair containing:\n- Output: A tensor with cell's output.\n- New state: A tensor or tuple of tensors with new wrapped cell's state.", "source": "github-repos"}
{"code": "def upsert(self):\n        \n\n        required_parameters = []\n        self._stackParameters = []\n\n        try:\n            self._initialize_upsert()\n        except Exception:\n            return False\n\n        try:\n            available_parameters = self._parameters.keys()\n\n            for parameter_name in self._template.get('Parameters', {}):\n                required_parameters.append(str(parameter_name))\n\n            logging.info(' required parameters: ' + str(required_parameters))\n            logging.info('available parameters: ' + str(available_parameters))\n\n            parameters = []\n            for required_parameter in required_parameters:\n                parameter = {}\n                parameter['ParameterKey'] = str(required_parameter)\n\n                required_parameter = str(required_parameter)\n                if required_parameter in self._parameters:\n                    parameter['ParameterValue'] = self._parameters[required_parameter]\n                else:\n                    parameter['ParameterValue'] = self._parameters[required_parameter.lower()]\n\n                parameters.append(parameter)\n\n            if not self._analyze_stuff():\n                sys.exit(1)\n\n            if self._config.get('dryrun', False):\n                logging.info('Generating change set')\n                set_id = self._generate_change_set(parameters)\n                if set_id:\n                    self._describe_change_set(set_id)\n\n                logging.info('This was a dryrun')\n                sys.exit(0)\n\n            self._tags.append({\"Key\": \"CODE_VERSION_SD\", \"Value\": self._config.get('codeVersion')})\n            self._tags.append({\"Key\": \"ANSWER\", \"Value\": str(42)})\n            if self._updateStack:\n                stack = self._cloudFormation.update_stack(\n                    StackName=self._config.get('environment', {}).get('stack_name', None),\n                    TemplateURL=self._templateUrl,\n                    Parameters=parameters,\n                    Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],\n                    Tags=self._tags,\n                    ClientRequestToken=str(uuid.uuid4())\n                )\n                logging.info('existing stack ID: {}'.format(stack.get('StackId', 'unknown')))\n            else:\n                stack = self._cloudFormation.create_stack(\n                    StackName=self._config.get('environment', {}).get('stack_name', None),\n                    TemplateURL=self._templateUrl,\n                    Parameters=parameters,\n                    Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],\n                    Tags=self._tags,\n                    ClientRequestToken=str(uuid.uuid4())\n                )\n                logging.info('new stack ID: {}'.format(stack.get('StackId', 'unknown')))\n        except Exception as x:\n            if self._verbose:\n                logging.error(x, exc_info=True)\n            else:\n                logging.error(x, exc_info=False)\n\n            return False\n\n        return True", "docstring": "The main event of the utility. Create or update a Cloud Formation\nstack. Injecting properties where needed\n\nArgs:\nNone\n\nReturns:\nTrue if the stack create/update is started successfully else\nFalse if the start goes off in the weeds.\n\nExits:\nIf the user asked for a dryrun exit(with a code 0) the thing here. There is no\npoint continuing after that point.", "source": "juraj-google-style"}
{"code": "def fillCreate(self, qry_str):\n        \n        count = 0\n        for fld in self.m_all_fields:\n            fld_type = self.m_all_fields[fld][MeterData.TypeValue]\n            fld_len = self.m_all_fields[fld][MeterData.SizeValue]\n            qry_spec = self.mapTypeToSql(fld_type, fld_len)\n            if count > 0:\n                qry_str += \", \\n\"\n            qry_str = qry_str + '   ' + fld + ' ' + qry_spec\n            count += 1\n\n        qry_str += (\",\\n\\t\" + Field.Time_Stamp + \" BIGINT,\\n\\t\" +\n                    \"Raw_A VARCHAR(512),\\n\\t\" +\n                    \"Raw_B VARCHAR(512)\\n)\")\n\n        return qry_str", "docstring": "Return query portion below CREATE.\nArgs:\nqry_str (str): String as built.\n\nReturns:\nstring: Passed string with fields appended.", "source": "juraj-google-style"}
{"code": "async def check_record(self, record, timeout=60):\n    start_time = time.time()\n    (name, rr_data, r_type, ttl) = self._extract_record_data(record)\n    r_type_code = async_dns.types.get_code(r_type)\n    resolvable_record = False\n    retries = 0\n    sleep_time = 5\n    while ((not resolvable_record) and (timeout > (retries * sleep_time))):\n        retries += 1\n        resolver_res = (await self._resolver.query(name, r_type_code))\n        possible_ans = resolver_res.an\n        resolvable_record = (await self._check_resolver_ans(possible_ans, name, rr_data, ttl, r_type_code))\n        if (not resolvable_record):\n            (await asyncio.sleep(sleep_time))\n    if (not resolvable_record):\n        logging.info(f'Sending metric record-checker-failed: {record}.')\n    else:\n        final_time = float((time.time() - start_time))\n        success_msg = f'This record: {record} took {final_time} to register.'\n        logging.info(success_msg)", "docstring": "Measures the time for a DNS record to become available.\n\nQuery a provided DNS server multiple times until the reply matches the\ninformation in the record or until timeout is reached.\n\nArgs:\nrecord (dict): DNS record as a dict with record properties.\ntimeout (int): Time threshold to query the DNS server.", "source": "codesearchnet"}
{"code": "def _update(self, item, feed_item):\n    pass", "docstring": "Performs an update in DCM.\n\nSince this method is not allowed for creative assets because those cannot be\nupdated, this method reimplements _update from BaseDAO but doesn't do\nanything to prevent an error.\n\nArgs:\nitem: The item to update in DCM.\nfeed_item: The feed item representing the creative asset in the Bulkdozer\nfeed.", "source": "github-repos"}
{"code": "def delete(self, webhookId):\n        \n        check_type(webhookId, basestring, may_be_none=False)\n\n        \n        self._session.delete(API_ENDPOINT + '/' + webhookId)", "docstring": "Delete a webhook, by ID.\n\nArgs:\nwebhookId(basestring): The ID of the webhook to be deleted.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "juraj-google-style"}
{"code": "def find_unique(self, product_type, short_name, include_hidden=False):\n    prods = self.find_all(product_type, short_name, include_hidden)\n    if (len(prods) == 0):\n        raise BuildError('Could not find product by name in find_unique', name=short_name, type=product_type)\n    if (len(prods) > 1):\n        raise BuildError('Multiple providers of the same product in find_unique', name=short_name, type=product_type, products=prods)\n    if self._tracking:\n        self._resolved_products.append(prods[0])\n    return prods[0]", "docstring": "Find the unique provider of a given product by its short name.\n\nThis function will ensure that the product is only provided by exactly\none tile (either this tile or one of its dependencies and raise a\nBuildError if not.\n\nArgs:\nproduct_type (str): The type of product that we are looking for, like\nfirmware_image, library etc.\nshort_name (str): The short name of the product that we wish to find,\nusually its os.path.basename()\ninclude_hidden (bool): Return products that are hidden and not selected\nas visible in the depends section of this tile's module settings.\nThis defaults to False.\n\nReturns:\nProductInfo: The information of the one unique provider of this product.", "source": "codesearchnet"}
{"code": "def _load_attributes(self, mft_config, attrs_view):\n        \n        offset = 0\n        load_attrs = mft_config.attribute_load_list\n\n        while (attrs_view[offset:offset+4] != b'\\xff\\xff\\xff\\xff'):\n            attr_type, attr_len, non_resident = _get_attr_info(attrs_view[offset:])\n            if attr_type in load_attrs:\n                \n                \n                attr = Attribute.create_from_binary(non_resident, mft_config.load_dataruns, attrs_view[offset:])\n                if not attr.header.attr_type_id is AttrTypes.DATA:\n                    self.attrs[attr.header.attr_type_id].append(attr) \n                else:\n                    self._add_data_attribute(attr)\n            offset += attr_len", "docstring": "Loads all the attributes of an entry.\n\nOnce executed, all the attributes should have been loaded in the\nattribute *attrs* instance attribute.\n\nArgs:\nmft_config (:obj:`MFTConfig`) - An instance of MFTConfig, as this tells\nhow the library will interpret data.\nattrs_view (memoryview(bytearray)) - A binary stream that starts at\nthe first attribute until the end of the entry", "source": "juraj-google-style"}
{"code": "def contact(self, id):\n    try:\n        json = self.skype.conn('POST', '{0}/users/batch/profiles'.format(SkypeConnection.API_USER), json={'usernames': [id]}, auth=SkypeConnection.Auth.SkypeToken).json()\n        contact = SkypeContact.fromRaw(self.skype, json[0])\n        if (contact.id not in self.contactIds):\n            self.contactIds.append(contact.id)\n        return self.merge(contact)\n    except SkypeApiException as e:\n        if ((len(e.args) >= 2) and (getattr(e.args[1], 'status_code', None) == 403)):\n            return None\n        raise", "docstring": "Retrieve all details for a specific contact, including fields such as birthday and mood.\n\nArgs:\nid (str): user identifier to lookup\n\nReturns:\nSkypeContact: resulting contact object", "source": "codesearchnet"}
{"code": "def __init__(self, instrumentation_key, *args, **kwargs):\n        \n        if not instrumentation_key:\n            raise Exception('Instrumentation key was required but not provided')\n        telemetry_channel = kwargs.get('telemetry_channel')\n        if 'telemetry_channel' in kwargs:\n            del kwargs['telemetry_channel']\n        self.client = applicationinsights.TelemetryClient(instrumentation_key, telemetry_channel)\n        super(LoggingHandler, self).__init__(*args, **kwargs)", "docstring": "Initialize a new instance of the class.\n\nArgs:\ninstrumentation_key (str). the instrumentation key to use while sending telemetry to the service.", "source": "juraj-google-style"}
{"code": "def to_api_repr(self):\n    answer = {'mode': self.mode.upper(), 'name': self.name, 'type': self.field_type.upper(), 'description': self.description}\n    if (self.field_type.upper() == 'RECORD'):\n        answer['fields'] = [f.to_api_repr() for f in self.fields]\n    return answer", "docstring": "Return a dictionary representing this schema field.\n\nReturns:\ndict: A dictionary representing the SchemaField in a serialized\nform.", "source": "codesearchnet"}
{"code": "def convert_to_string(self, productions):\n    symbols = []\n    for production in tf.unstack(productions, axis=1):\n        (lhs, rhs) = self.production_rules[tf.argmax(input=production, axis=(- 1))]\n        if (not symbols):\n            if (lhs != self.start_symbol):\n                raise ValueError('`productions` must begin with `self.start_symbol`.')\n            symbols = rhs\n        else:\n            index = symbols.index(lhs)\n            symbols = ((symbols[:index] + rhs) + symbols[(index + 1):])\n    string = ''.join(symbols)\n    return string", "docstring": "Converts a sequence of productions into a string of terminal symbols.\n\nArgs:\nproductions: Tensor of shape [1, num_productions, num_production_rules].\nSlices along the `num_productions` dimension represent one-hot vectors.\n\nReturns:\nstr that concatenates all terminal symbols from `productions`.\n\nRaises:\nValueError: If the first production rule does not begin with\n`self.start_symbol`.", "source": "codesearchnet"}
{"code": "def save(self, path, check=True):\n        \n        with open(path, 'w') as f:\n            if check:\n                if (\"LOCATION\" not in self._data or\n                        self._data[\"LOCATION\"] is None):\n                    raise ValueError('location is not valid.')\n                if (\"DESIGN CONDITIONS\" not in self._data or\n                        self._data[\"DESIGN CONDITIONS\"] is None):\n                    raise ValueError('design_conditions is not valid.')\n                if (\"TYPICAL/EXTREME PERIODS\" not in self._data or\n                        self._data[\"TYPICAL/EXTREME PERIODS\"] is None):\n                    raise ValueError(\n                        'typical_or_extreme_periods is not valid.')\n                if (\"GROUND TEMPERATURES\" not in self._data or\n                        self._data[\"GROUND TEMPERATURES\"] is None):\n                    raise ValueError('ground_temperatures is not valid.')\n                if (\"HOLIDAYS/DAYLIGHT SAVINGS\" not in self._data or\n                        self._data[\"HOLIDAYS/DAYLIGHT SAVINGS\"] is None):\n                    raise ValueError(\n                        'holidays_or_daylight_savings is not valid.')\n                if (\"COMMENTS 1\" not in self._data or\n                        self._data[\"COMMENTS 1\"] is None):\n                    raise ValueError('comments_1 is not valid.')\n                if (\"COMMENTS 2\" not in self._data or\n                        self._data[\"COMMENTS 2\"] is None):\n                    raise ValueError('comments_2 is not valid.')\n                if (\"DATA PERIODS\" not in self._data or\n                        self._data[\"DATA PERIODS\"] is None):\n                    raise ValueError('data_periods is not valid.')\n            if (\"LOCATION\" in self._data and\n                    self._data[\"LOCATION\"] is not None):\n                f.write(self._data[\"LOCATION\"].export() + \"\\n\")\n            if (\"DESIGN CONDITIONS\" in self._data and\n                    self._data[\"DESIGN CONDITIONS\"] is not None):\n                f.write(self._data[\"DESIGN CONDITIONS\"].export() + \"\\n\")\n            if (\"TYPICAL/EXTREME PERIODS\" in self._data and\n                    self._data[\"TYPICAL/EXTREME PERIODS\"] is not None):\n                f.write(self._data[\"TYPICAL/EXTREME PERIODS\"].export() + \"\\n\")\n            if (\"GROUND TEMPERATURES\" in self._data and\n                    self._data[\"GROUND TEMPERATURES\"] is not None):\n                f.write(self._data[\"GROUND TEMPERATURES\"].export() + \"\\n\")\n            if (\"HOLIDAYS/DAYLIGHT SAVINGS\" in self._data and\n                    self._data[\"HOLIDAYS/DAYLIGHT SAVINGS\"] is not None):\n                f.write(\n                    self._data[\"HOLIDAYS/DAYLIGHT SAVINGS\"].export() +\n                    \"\\n\")\n            if (\"COMMENTS 1\" in self._data and\n                    self._data[\"COMMENTS 1\"] is not None):\n                f.write(self._data[\"COMMENTS 1\"].export() + \"\\n\")\n            if (\"COMMENTS 2\" in self._data and\n                    self._data[\"COMMENTS 2\"] is not None):\n                f.write(self._data[\"COMMENTS 2\"].export() + \"\\n\")\n            if (\"DATA PERIODS\" in self._data and\n                    self._data[\"DATA PERIODS\"] is not None):\n                f.write(self._data[\"DATA PERIODS\"].export() + \"\\n\")\n            for item in self._data[\"WEATHER DATA\"]:\n                f.write(item.export(False) + \"\\n\")", "docstring": "Save WeatherData in EPW format to path.\n\nArgs:\npath (str): path where EPW file should be saved", "source": "juraj-google-style"}
{"code": "def get_priority(priority):\n    if isinstance(priority, int):\n        if ((priority < 0) or (priority > 100)):\n            raise ValueError('priority must be between 0 and 100')\n        return priority\n    elif isinstance(priority, Priority):\n        return priority.value\n    elif isinstance(priority, str):\n        return Priority[priority.upper()].value\n    else:\n        raise TypeError('priority must be an integer or Priority enum value')", "docstring": "Get priority value.\n\nArgs:\npriority (int or str or :obj:`Priority`): Priority.\n\nReturns:\nint: The priority value.", "source": "codesearchnet"}
{"code": "def inception_resnet_v2_arg_scope(weight_decay=0.00004,\n                                  batch_norm_decay=0.9997,\n                                  batch_norm_epsilon=0.001):\n  \n  \n  with slim.arg_scope([slim.conv2d, slim.fully_connected],\n                      weights_regularizer=slim.l2_regularizer(weight_decay),\n                      biases_regularizer=slim.l2_regularizer(weight_decay)):\n\n    batch_norm_params = {\n        'decay': batch_norm_decay,\n        'epsilon': batch_norm_epsilon,\n    }\n    \n    with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu,\n                        normalizer_fn=slim.batch_norm,\n                        normalizer_params=batch_norm_params) as scope:\n      return scope", "docstring": "Returns the scope with the default parameters for inception_resnet_v2.\n\nArgs:\nweight_decay: the weight decay for weights variables.\nbatch_norm_decay: decay for the moving average of batch_norm momentums.\nbatch_norm_epsilon: small float added to variance to avoid dividing by zero.\n\nReturns:\na arg_scope with the parameters needed for inception_resnet_v2.", "source": "juraj-google-style"}
{"code": "def _relation_exists(cls, connection, relation):\n        \n        schema_name, table_name = relation.split('.')\n\n        exists_query = \n        with connection.cursor() as cursor:\n            cursor.execute(exists_query, [schema_name, table_name])\n            result = cursor.fetchall()\n            return result == [(1,)]", "docstring": "Returns True if relation exists in the postgres db. Otherwise returns False.\n\nArgs:\nconnection: connection to postgres database who stores mpr data.\nrelation (str): name of the table, view or materialized view.\n\nNote:\nrelation means table, view or materialized view here.\n\nReturns:\nboolean: True if relation exists, False otherwise.", "source": "juraj-google-style"}
{"code": "def ParseNameSpace(self, parser_mediator, cache=None, database=None, table=None, **unused_kwargs):\n    if (database is None):\n        raise ValueError('Missing database value.')\n    if (table is None):\n        raise ValueError('Missing table value.')\n    strings = cache.GetResults('strings')\n    if (not strings):\n        esedb_table = database.get_table_by_name('string')\n        strings = self._GetDictFromStringsTable(parser_mediator, esedb_table)\n        cache.StoreDictInCache('strings', strings)\n    for esedb_record in table.records:\n        if parser_mediator.abort:\n            break\n        record_values = self._GetRecordValues(parser_mediator, table.name, esedb_record)\n        event_data = FileHistoryNamespaceEventData()\n        event_data.file_attribute = record_values.get('fileAttrib', None)\n        event_data.identifier = record_values.get('id', None)\n        event_data.parent_identifier = record_values.get('parentId', None)\n        event_data.usn_number = record_values.get('usn', None)\n        event_data.original_filename = strings.get(event_data.identifier, None)\n        created_timestamp = record_values.get('fileCreated')\n        if created_timestamp:\n            date_time = dfdatetime_filetime.Filetime(timestamp=created_timestamp)\n            event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n            parser_mediator.ProduceEventWithEventData(event, event_data)\n        modified_timestamp = record_values.get('fileModified')\n        if modified_timestamp:\n            date_time = dfdatetime_filetime.Filetime(timestamp=modified_timestamp)\n            event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n            parser_mediator.ProduceEventWithEventData(event, event_data)\n        if ((not created_timestamp) and (not modified_timestamp)):\n            date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n            event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)\n            parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses the namespace table.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ncache (Optional[ESEDBCache]): cache.\ndatabase (Optional[pyesedb.file]): ESE database.\ntable (Optional[pyesedb.table]): table.\n\nRaises:\nValueError: if the database or table value is missing.", "source": "codesearchnet"}
{"code": "def create_storage_client(pipeline_options, use_credentials=True):\n    if use_credentials:\n        credentials = auth.get_service_credentials(pipeline_options)\n    else:\n        credentials = None\n    if credentials:\n        google_cloud_options = pipeline_options.view_as(GoogleCloudOptions)\n        from google.api_core import client_info\n        beam_client_info = client_info.ClientInfo(user_agent='apache-beam/%s (GPN:Beam)' % beam_version.__version__)\n        extra_headers = {'x-goog-custom-audit-job': google_cloud_options.job_name if google_cloud_options.job_name else 'UNKNOWN'}\n        if google_cloud_options.gcs_custom_audit_entries is not None:\n            extra_headers.update(google_cloud_options.gcs_custom_audit_entries)\n        return storage.Client(credentials=credentials.get_google_auth_credentials(), project=google_cloud_options.project, client_info=beam_client_info, extra_headers=extra_headers)\n    else:\n        return storage.Client.create_anonymous_client()", "docstring": "Create a GCS client for Beam via GCS Client Library.\n\nArgs:\npipeline_options(apache_beam.options.pipeline_options.PipelineOptions):\nthe options of the pipeline.\nuse_credentials(bool): whether to create an authenticated client based\non pipeline options or an anonymous client.\n\nReturns:\nA google.cloud.storage.client.Client instance.", "source": "github-repos"}
{"code": "def __ginibre_matrix(nrow, ncol=None, seed=None):\n    \n    if ncol is None:\n        ncol = nrow\n    if seed is not None:\n        np.random.seed(seed)\n    G = np.random.normal(size=(nrow, ncol)) + \\\n        np.random.normal(size=(nrow, ncol)) * 1j\n    return G", "docstring": "Return a normally distributed complex random matrix.\n\nArgs:\nnrow (int): number of rows in output matrix.\nncol (int): number of columns in output matrix.\nseed (int): Optional. To set a random seed.\nReturns:\nndarray: A complex rectangular matrix where each real and imaginary\nentry is sampled from the normal distribution.", "source": "juraj-google-style"}
{"code": "def send_log_messages(self, messages: List[LogMessage]) -> None:\n    errors = upload_rows(self._bq_client, self._table_metadata, cast(List[Dict], messages))\n    if errors:\n        for error in errors:\n            self._fallback_logger.send_log_message({'log_type': LogType.SYSTEM.value, 'error': error})\n        raise RuntimeError('BigQuery logging failed: Check Cloud Logs.')", "docstring": "Sends multiple log messages to BigQuery.\n\nArgs:\n* messages: list of LogMessage dictionaries\n\nReturns:\n* None\n\nRaises:\n* RuntimeError: if BigQuery insert fails", "source": "github-repos"}
{"code": "def netmiko_save_config(\n    task: Task, cmd: str = \"\", confirm: bool = False, confirm_response: str = \"\"\n) -> Result:\n    \n    conn = task.host.get_connection(\"netmiko\", task.nornir.config)\n    if cmd:\n        result = conn.save_config(\n            cmd=cmd, confirm=confirm, confirm_response=confirm_response\n        )\n    else:\n        result = conn.save_config(confirm=confirm, confirm_response=confirm_response)\n    return Result(host=task.host, result=result, changed=True)", "docstring": "Execute Netmiko save_config method\nArguments:\ncmd(str, optional): Command used to save the configuration.\nconfirm(bool, optional): Does device prompt for confirmation before executing save operation\nconfirm_response(str, optional): Response send to device when it prompts for confirmation\n\nReturns:\n:obj: `nornir.core.task.Result`:\n* result (``str``): String showing the CLI output from the save operation", "source": "juraj-google-style"}
{"code": "def create_tree(tree):\n    \n    \n    config.LOGGER.info(\"\\nCreating tree on Kolibri Studio...\")\n    channel_id, channel_link = tree.upload_tree()\n    \n\n    return channel_link, channel_id", "docstring": "create_tree: Upload tree to Kolibri Studio\nArgs:\ntree (ChannelManager): manager to handle communication to Kolibri Studio\nReturns: channel id of created channel and link to channel", "source": "juraj-google-style"}
{"code": "def _resize_for_patching(self, image: 'torch.Tensor', target_resolution: tuple, interpolation: 'F.InterpolationMode', input_data_format: ChannelDimension) -> 'torch.Tensor':\n    new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format)\n    resized_image = F.resize(image, (new_height, new_width), interpolation=interpolation)\n    return resized_image", "docstring": "Resizes an image to a target resolution while maintaining aspect ratio.\n\nArgs:\nimage (\"torch.Tensor\"):\nThe input image.\ntarget_resolution (tuple):\nThe target resolution (height, width) of the image.\ninterpolation (`InterpolationMode`):\nResampling filter to use if resizing the image.\ninput_data_format (`ChannelDimension` or `str`):\nThe channel dimension format of the input image.\n\nReturns:\n\"torch.Tensor\": The resized and padded image.", "source": "github-repos"}
{"code": "def overlap(ival0, ival1):\n    \n    min0, max0 = ival0\n    min1, max1 = ival1\n    return max(0, min(max0, max1) - max(min0, min1)) > 0", "docstring": "Determine if two interval tuples have overlap.\n\nArgs:\niv0 ((int,int)):    An interval tuple\niv1 ((int,int));    An interval tuple\n\nReturns:\n(bool): True if the intervals overlap, otherwise False", "source": "juraj-google-style"}
{"code": "def write_hex(fout, buf, offset, width=16):\n    skipped_zeroes = 0\n    for (i, chunk) in enumerate(chunk_iter(buf, width)):\n        if (chunk == (b'\\x00' * width)):\n            skipped_zeroes += 1\n            continue\n        elif (skipped_zeroes != 0):\n            fout.write('  -- skipped zeroes: {}\\n'.format(skipped_zeroes))\n            skipped_zeroes = 0\n        fout.write('{:016x}  '.format(((i * width) + offset)))\n        column = '  '.join([' '.join(['{:02x}'.format(c) for c in subchunk]) for subchunk in chunk_iter(chunk, 8)])\n        w = (((width * 2) + (width - 1)) + ((width \n        if (len(column) != w):\n            column += (' ' * (w - len(column)))\n        fout.write(column)\n        fout.write('  |')\n        for c in chunk:\n            if (c in PRINTABLE_CHARS):\n                fout.write(chr(c))\n            else:\n                fout.write('.')\n        if (len(chunk) < width):\n            fout.write((' ' * (width - len(chunk))))\n        fout.write('|')\n        fout.write('\\n')", "docstring": "Write the content of 'buf' out in a hexdump style\n\nArgs:\nfout: file object to write to\nbuf: the buffer to be pretty printed\noffset: the starting offset of the buffer\nwidth: how many bytes should be displayed per row", "source": "codesearchnet"}
{"code": "def most_specific_convertible_shape(self, other):\n    other = as_shape(other)\n    if ((self._dims is None) or (other.dims is None) or (self.ndims != other.ndims)):\n        return unknown_shape()\n    dims = ([Dimension(None)] * self.ndims)\n    for (i, (d1, d2)) in enumerate(zip(self._dims, other.dims)):\n        if ((d1 is not None) and (d2 is not None) and (d1 == d2)):\n            dims[i] = d1\n    return TensorShape(dims)", "docstring": "Returns the most specific TensorShape convertible with `self` and `other`.\n\n* TensorShape([None, 1]) is the most specific TensorShape convertible with\nboth TensorShape([2, 1]) and TensorShape([5, 1]). Note that\nTensorShape(None) is also convertible with above mentioned TensorShapes.\n\n* TensorShape([1, 2, 3]) is the most specific TensorShape convertible with\nboth TensorShape([1, 2, 3]) and TensorShape([1, 2, 3]). There are more\nless specific TensorShapes convertible with above mentioned TensorShapes,\ne.g. TensorShape([1, 2, None]), TensorShape(None).\n\nArgs:\nother: Another `TensorShape`.\n\nReturns:\nA `TensorShape` which is the most specific convertible shape of `self`\nand `other`.", "source": "codesearchnet"}
{"code": "def search(self, query, verbose=0):\n    if (verbose > 0):\n        print(('searching ' + query))\n    query = query.lower()\n    qgram = ng(query, self.slb)\n    qocument = set()\n    for q in qgram:\n        if (q in self.ngrams.keys()):\n            for i in self.ngrams[q]:\n                qocument.add(i)\n    self.qocument = qocument\n    results = {}\n    for i in qocument:\n        for j in self.D[i].keys():\n            if (not (j in results.keys())):\n                results[j] = 0\n            results[j] = (results[j] + self.D[i][j])\n    sorted_results = sorted(results.items(), key=operator.itemgetter(1), reverse=True)\n    return [self.elements[f[0]] for f in sorted_results]", "docstring": "Searches files satisfying query\n\nIt first decompose the query in ngrams, then score each document containing\nat least one ngram with the number. The ten document having the most ngrams\nin common with the query are selected.\n\nArgs:\nquery (str): what to search;\nresults_number (int): number of results to return (default: 10)", "source": "codesearchnet"}
{"code": "def abi_to_fasta(input, output):\n    direcs = [input]\n    zip_files = list_files(input, ['zip'])\n    if zip_files:\n        direcs.extend(_process_zip_files(zip_files))\n    for d in direcs:\n        files = list_files(d, ['ab1', 'abi'])\n        seqs = [SeqIO.read(open(f, 'rb'), 'abi') for f in files]\n        fastas = ['>{}\\n{}'.format(s.id, str(s.seq)) for s in seqs]\n        ofile = (os.path.basename(os.path.normpath(d)) + '.fasta')\n        opath = os.path.join(output, ofile)\n        open(opath, 'w').write('\\n'.join(fastas))", "docstring": "Converts ABI or AB1 files to FASTA format.\n\nArgs:\n\ninput (str): Path to a file or directory containing abi/ab1 files or\nzip archives of abi/ab1 files\n\noutput (str): Path to a directory for the output FASTA files", "source": "codesearchnet"}
{"code": "def parse_response(response, encoding='utf-8'):\n    \n    return requests_toolbelt.multipart.decoder.MultipartDecoder.from_response(\n        response, encoding\n    ).parts", "docstring": "Parse a multipart Requests.Response into a tuple of BodyPart objects.\n\nArgs:\nresponse: Requests.Response\n\nencoding:\nThe parser will assume that any text in the HTML body is encoded with this\nencoding when decoding it for use in the ``text`` attribute.\n\nReturns:\ntuple of BodyPart\nMembers: headers (CaseInsensitiveDict), content (bytes), text (Unicode),\nencoding (str).", "source": "juraj-google-style"}
{"code": "def detect_intent_knowledge(project_id, session_id, language_code,\n                            knowledge_base_id, texts):\n    \n    import dialogflow_v2beta1 as dialogflow\n    session_client = dialogflow.SessionsClient()\n\n    session_path = session_client.session_path(project_id, session_id)\n    print('Session path: {}\\n'.format(session_path))\n\n    for text in texts:\n        text_input = dialogflow.types.TextInput(\n            text=text, language_code=language_code)\n\n        query_input = dialogflow.types.QueryInput(text=text_input)\n\n        knowledge_base_path = dialogflow.knowledge_bases_client \\\n            .KnowledgeBasesClient \\\n            .knowledge_base_path(project_id, knowledge_base_id)\n\n        query_params = dialogflow.types.QueryParameters(\n            knowledge_base_names=[knowledge_base_path])\n\n        response = session_client.detect_intent(\n            session=session_path, query_input=query_input,\n            query_params=query_params)\n\n        print('=' * 20)\n        print('Query text: {}'.format(response.query_result.query_text))\n        print('Detected intent: {} (confidence: {})\\n'.format(\n            response.query_result.intent.display_name,\n            response.query_result.intent_detection_confidence))\n        print('Fulfillment text: {}\\n'.format(\n            response.query_result.fulfillment_text))\n        print('Knowledge results:')\n        knowledge_answers = response.query_result.knowledge_answers\n        for answers in knowledge_answers.answers:\n            print(' - Answer: {}'.format(answers.answer))\n            print(' - Confidence: {}'.format(\n                answers.match_confidence))", "docstring": "Returns the result of detect intent with querying Knowledge Connector.\n\nArgs:\nproject_id: The GCP project linked with the agent you are going to query.\nsession_id: Id of the session, using the same `session_id` between requests\nallows continuation of the conversation.\nlanguage_code: Language of the queries.\nknowledge_base_id: The Knowledge base's id to query against.\ntexts: A list of text queries to send.", "source": "juraj-google-style"}
{"code": "def upload_predictions(self, file_path, tournament=1):\n        \n        self.logger.info(\"uploading predictions...\")\n\n        auth_query = \n        arguments = {'filename': os.path.basename(file_path),\n                     'tournament': tournament}\n        submission_resp = self.raw_query(auth_query, arguments,\n                                         authorization=True)\n        submission_auth = submission_resp['data']['submission_upload_auth']\n        with open(file_path, 'rb') as fh:\n            requests.put(submission_auth['url'], data=fh.read())\n        create_query = \n        arguments = {'filename': submission_auth['filename'],\n                     'tournament': tournament}\n        create = self.raw_query(create_query, arguments, authorization=True)\n        self.submission_id = create['data']['create_submission']['id']\n        return self.submission_id", "docstring": "Upload predictions from file.\n\nArgs:\nfile_path (str): CSV file with predictions that will get uploaded\ntournament (int): ID of the tournament (optional, defaults to 1)\n\nReturns:\nstr: submission_id\n\nExample:\n>>> api = NumerAPI(secret_key=\"..\", public_id=\"..\")\n>>> api.upload_predictions()\n'93c46857-fed9-4594-981e-82db2b358daf'", "source": "juraj-google-style"}
{"code": "def connect(self, uuid_value, wait=None):\n    if self.connected:\n        raise HardwareError('Cannot connect when we are already connected')\n    if (uuid_value not in self._scanned_devices):\n        self.scan(wait=wait)\n    with self._scan_lock:\n        if (uuid_value not in self._scanned_devices):\n            raise HardwareError('Could not find device to connect to by UUID', uuid=uuid_value)\n        connstring = self._scanned_devices[uuid_value]['connection_string']\n    self.connect_direct(connstring)", "docstring": "Connect to a specific device by its uuid\n\nAttempt to connect to a device that we have previously scanned using its UUID.\nIf wait is not None, then it is used in the same was a scan(wait) to override\ndefault wait times with an explicit value.\n\nArgs:\nuuid_value (int): The unique id of the device that we would like to connect to.\nwait (float): Optional amount of time to force the device adapter to wait before\nattempting to connect.", "source": "codesearchnet"}
{"code": "def add_dict_to_hash(a_hash, a_dict):\n    \n    if a_dict is None:\n        return\n    for k, v in a_dict.items():\n        a_hash.update(b'\\x00' + k.encode('utf-8') + b'\\x00' + v.encode('utf-8'))", "docstring": "Adds `a_dict` to `a_hash`\n\nArgs:\na_hash (`Hash`): the secure hash, e.g created by hashlib.md5\na_dict (dict[string, [string]]): the dictionary to add to the hash", "source": "juraj-google-style"}
{"code": "def WriteTaskCompletion(self, aborted=False):\n    \n    self._RaiseIfNotWritable()\n\n    if self._storage_type != definitions.STORAGE_TYPE_TASK:\n      raise IOError('Unsupported storage type.')\n\n    self._task.aborted = aborted\n    task_completion = self._task.CreateTaskCompletion()\n    self._storage_file.WriteTaskCompletion(task_completion)", "docstring": "Writes task completion information.\n\nArgs:\naborted (Optional[bool]): True if the session was aborted.\n\nRaises:\nIOError: if the storage type is not supported or\nwhen the storage writer is closed.\nOSError: if the storage type is not supported or\nwhen the storage writer is closed.", "source": "juraj-google-style"}
{"code": "def prepare_framework_container_def(model, instance_type, s3_operations):\n    \n    deploy_image = model.image\n    if not deploy_image:\n        region_name = model.sagemaker_session.boto_session.region_name\n        deploy_image = fw_utils.create_image_uri(\n            region_name, model.__framework_name__, instance_type, model.framework_version, model.py_version)\n\n    base_name = utils.base_name_from_image(deploy_image)\n    model.name = model.name or utils.name_from_base(base_name)\n\n    bucket = model.bucket or model.sagemaker_session._default_bucket\n    script = os.path.basename(model.entry_point)\n    key = '{}/source/sourcedir.tar.gz'.format(model.name)\n\n    if model.source_dir and model.source_dir.lower().startswith('s3:\n        code_dir = model.source_dir\n        model.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script)\n    else:\n        code_dir = 's3:\n        model.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script)\n        s3_operations['S3Upload'] = [{\n            'Path': model.source_dir or script,\n            'Bucket': bucket,\n            'Key': key,\n            'Tar': True\n        }]\n\n    deploy_env = dict(model.env)\n    deploy_env.update(model._framework_env_vars())\n\n    try:\n        if model.model_server_workers:\n            deploy_env[sagemaker.model.MODEL_SERVER_WORKERS_PARAM_NAME.upper()] = str(model.model_server_workers)\n    except AttributeError:\n        \n        pass\n\n    return sagemaker.container_def(deploy_image, model.model_data, deploy_env)", "docstring": "Prepare the framework model container information. Specify related S3 operations for Airflow to perform.\n(Upload `source_dir`)\n\nArgs:\nmodel (sagemaker.model.FrameworkModel): The framework model\ninstance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'.\ns3_operations (dict): The dict to specify S3 operations (upload `source_dir`).\n\nReturns:\ndict: The container information of this framework model.", "source": "juraj-google-style"}
{"code": "def _sanitize_input_structure(input_structure):\n    input_structure = input_structure.copy()\n    input_structure.remove_spin()\n    input_structure = input_structure.get_primitive_structure(use_site_props=False)\n    if ('magmom' in input_structure.site_properties):\n        input_structure.remove_site_property('magmom')\n    return input_structure", "docstring": "Sanitize our input structure by removing magnetic information\nand making primitive.\n\nArgs:\ninput_structure: Structure\n\nReturns: Structure", "source": "codesearchnet"}
{"code": "def process_exception_message(exception):\n        \n        exception_message = str(exception)\n        for replace_char in ['\\t', '\\n', '\\\\n']:\n            exception_message = exception_message.replace(replace_char, '' if replace_char != '\\t' else ' ')\n        return exception_message.replace('section', 'alias')", "docstring": "Process an exception message.\n\nArgs:\nexception: The exception to process.\n\nReturns:\nA filtered string summarizing the exception.", "source": "juraj-google-style"}
{"code": "async def connect(self, conn_id, connection_string):\n        \n\n        id_number = int(connection_string)\n        if id_number not in self.devices:\n            raise DeviceAdapterError(conn_id, 'connect', 'device not found')\n\n        if self._get_conn_id(connection_string) is not None:\n            raise DeviceAdapterError(conn_id, 'connect', 'device already connected')\n\n        dev = self.devices[id_number]\n\n        if dev.connected:\n            raise DeviceAdapterError(conn_id, 'connect', 'device already connected')\n\n        dev.connected = True\n\n        self._setup_connection(conn_id, connection_string)\n        self._track_property(conn_id, 'device', dev)", "docstring": "Asynchronously connect to a device\n\nArgs:\nconn_id (int): A unique identifer that will refer to this connection\nconnection_string (string): A DeviceAdapter specific string that can be used to connect to\na device using this DeviceAdapter.\ncallback (callable): A function that will be called when the connection attempt finishes as\ncallback(conection_id, adapter_id, success: bool, failure_reason: string or None)", "source": "juraj-google-style"}
{"code": "def _with_inner_rank(self, inner_rank):\n    rank = self.rank\n    if rank is None:\n        raise ValueError('Rank must be known to adjust inner_rank')\n    elif rank < 2:\n        if inner_rank == rank:\n            return self\n        raise ValueError('Cannot change inner_rank if rank < 2')\n    else:\n        new_num_row_partitions = rank - inner_rank\n        return self._with_num_row_partitions(new_num_row_partitions)", "docstring": "Returns the same shape but a different inner_rank.\n\nAll dimensions that are to be represented in the inner_shape must be dense.\nSee inner_rank.\n\nArgs:\ninner_rank: the new inner_rank of the shape.\n\nReturns:\nthe same shape but a different inner_rank\n\nRaises:\nValueError if the new dense rank is invalid, or the old rank is unknown.", "source": "github-repos"}
{"code": "def _RunScripts(self, run_dir=None):\n    with _CreateTempDir(self.script_type, run_dir=run_dir) as dest_dir:\n        try:\n            self.logger.info('Starting %s scripts.', self.script_type)\n            script_dict = self.retriever.GetScripts(dest_dir)\n            self.executor.RunScripts(script_dict)\n        finally:\n            self.logger.info('Finished running %s scripts.', self.script_type)", "docstring": "Retrieve metadata scripts and execute them.\n\nArgs:\nrun_dir: string, the base directory location of the temporary directory.", "source": "codesearchnet"}
{"code": "def _check_params(self, parameters):\n        \n        a_valid_fn = []\n        if self.target_fn is None:\n            if callable(self):\n                a_valid_fn.append(self.__call__)\n            else:\n                raise TypeError('invalid argument: tested object is not callable,\\\n                 please provide a valid target_fn')\n        elif isinstance(self.target_fn, types.FunctionType) \\\n                or isinstance(self.target_fn, types.MethodType):\n            a_valid_fn.append(self.target_fn)\n        else:\n            a_valid_fn.append(self.target_fn.__call__)\n\n        if not isinstance(parameters, str):\n            for p in parameters:\n                for fn in a_valid_fn:\n                    if has_arg(fn, p):\n                        pass\n                    else:\n                        raise ValueError('{} is not a valid parameter'.format(p))\n        else:\n            raise TypeError('invalid argument: list or dictionnary expected')", "docstring": "Checks for mistakes in 'parameters'\n\nArgs :\nparameters: dict, parameters to be checked\n\nRaises :\nValueError: if any parameter is not a valid argument for the target function\nor the target function is not defined\nTypeError: if argument parameters is not iterable", "source": "juraj-google-style"}
{"code": "def Glob2Regex(glob_pattern):\n  \n  if not glob_pattern:\n    raise ValueError('Missing glob pattern.')\n\n  regex_pattern = []\n\n  glob_pattern_index = 0\n  glob_pattern_length = len(glob_pattern)\n  while glob_pattern_index < glob_pattern_length:\n    character = glob_pattern[glob_pattern_index]\n    glob_pattern_index += 1\n\n    if character == '*':\n      regex_pattern.append('.*')\n\n    elif character == '?':\n      regex_pattern.append('.')\n\n    elif character != '[':\n      regex_character = re.escape(character)\n      regex_pattern.append(regex_character)\n\n    else:\n      glob_group_index = glob_pattern_index\n\n      if (glob_group_index < glob_pattern_length and\n          glob_pattern[glob_group_index] == '!'):\n        glob_group_index += 1\n\n      if (glob_group_index < glob_pattern_length and\n          glob_pattern[glob_group_index] == ']'):\n        glob_group_index += 1\n\n      while (glob_group_index < glob_pattern_length and\n             glob_pattern[glob_group_index] != ']'):\n        glob_group_index += 1\n\n      if glob_group_index >= glob_pattern_length:\n        regex_pattern.append('\\\\[')\n        continue\n\n      glob_group = glob_pattern[glob_pattern_index:glob_group_index]\n      glob_pattern_index = glob_group_index + 1\n\n      glob_group = glob_group.replace('\\\\', '\\\\\\\\')\n      if py2to3.PY_3_7_AND_LATER:\n        glob_group = glob_group.replace('|', '\\\\|')\n\n      regex_pattern.append('[')\n\n      if glob_group[0] == '!':\n        regex_pattern.append('^')\n        glob_group = glob_group[1:]\n\n      elif glob_group[0] == '^':\n        regex_pattern.append('\\\\')\n\n      regex_pattern.append(glob_group)\n      regex_pattern.append(']')\n\n  return ''.join(regex_pattern)", "docstring": "Converts a glob pattern to a regular expression.\n\nThis function supports basic glob patterns that consist of:\n*       matches everything\n?       matches any single character\n[seq]   matches any character in sequence\n[!seq]  matches any character not in sequence\n\nArgs:\nglob_pattern (str): glob pattern.\n\nReturns:\nstr: regular expression pattern.\n\nRaises:\nValueError: if the glob pattern cannot be converted.", "source": "juraj-google-style"}
{"code": "def update_parameters(parameters, grads, learning_rate=1.2):\n    W1 = parameters['W1']\n    b1 = parameters['b1']\n    W2 = parameters['W2']\n    b2 = parameters['b2']\n    dW1 = grads['dW1']\n    db1 = grads['db1']\n    dW2 = grads['dW2']\n    db2 = grads['db2']\n    W1 -= (learning_rate * dW1)\n    b1 -= (learning_rate * db1)\n    W2 -= (learning_rate * dW2)\n    b2 -= (learning_rate * db2)\n    parameters = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2}\n    return parameters", "docstring": "Updates parameters using the gradient descent update rule given above\n\nArguments:\nparameters -- python dictionary containing your parameters\ngrads -- python dictionary containing your gradients\n\nReturns:\nparameters -- python dictionary containing your updated parameters", "source": "codesearchnet"}
{"code": "def cube(width, height, depth, center=(0.0, 0.0, 0.0), normals=True, uvs=True) -> VAO:\n    \n    width, height, depth = width / 2.0, height / 2.0, depth / 2.0\n\n    pos = numpy.array([\n        center[0] + width, center[1] - height, center[2] + depth,\n        center[0] + width, center[1] + height, center[2] + depth,\n        center[0] - width, center[1] - height, center[2] + depth,\n        center[0] + width, center[1] + height, center[2] + depth,\n        center[0] - width, center[1] + height, center[2] + depth,\n        center[0] - width, center[1] - height, center[2] + depth,\n        center[0] + width, center[1] - height, center[2] - depth,\n        center[0] + width, center[1] + height, center[2] - depth,\n        center[0] + width, center[1] - height, center[2] + depth,\n        center[0] + width, center[1] + height, center[2] - depth,\n        center[0] + width, center[1] + height, center[2] + depth,\n        center[0] + width, center[1] - height, center[2] + depth,\n        center[0] + width, center[1] - height, center[2] - depth,\n        center[0] + width, center[1] - height, center[2] + depth,\n        center[0] - width, center[1] - height, center[2] + depth,\n        center[0] + width, center[1] - height, center[2] - depth,\n        center[0] - width, center[1] - height, center[2] + depth,\n        center[0] - width, center[1] - height, center[2] - depth,\n        center[0] - width, center[1] - height, center[2] + depth,\n        center[0] - width, center[1] + height, center[2] + depth,\n        center[0] - width, center[1] + height, center[2] - depth,\n        center[0] - width, center[1] - height, center[2] + depth,\n        center[0] - width, center[1] + height, center[2] - depth,\n        center[0] - width, center[1] - height, center[2] - depth,\n        center[0] + width, center[1] + height, center[2] - depth,\n        center[0] + width, center[1] - height, center[2] - depth,\n        center[0] - width, center[1] - height, center[2] - depth,\n        center[0] + width, center[1] + height, center[2] - depth,\n        center[0] - width, center[1] - height, center[2] - depth,\n        center[0] - width, center[1] + height, center[2] - depth,\n        center[0] + width, center[1] + height, center[2] - depth,\n        center[0] - width, center[1] + height, center[2] - depth,\n        center[0] + width, center[1] + height, center[2] + depth,\n        center[0] - width, center[1] + height, center[2] - depth,\n        center[0] - width, center[1] + height, center[2] + depth,\n        center[0] + width, center[1] + height, center[2] + depth,\n    ], dtype=numpy.float32)\n\n    if normals:\n        normal_data = numpy.array([\n            -0, 0, 1,\n            -0, 0, 1,\n            -0, 0, 1,\n            0, 0, 1,\n            0, 0, 1,\n            0, 0, 1,\n            1, 0, 0,\n            1, 0, 0,\n            1, 0, 0,\n            1, 0, 0,\n            1, 0, 0,\n            1, 0, 0,\n            0, -1, 0,\n            0, -1, 0,\n            0, -1, 0,\n            0, -1, 0,\n            0, -1, 0,\n            0, -1, 0,\n            -1, -0, 0,\n            -1, -0, 0,\n            -1, -0, 0,\n            -1, -0, 0,\n            -1, -0, 0,\n            -1, -0, 0,\n            0, 0, -1,\n            0, 0, -1,\n            0, 0, -1,\n            0, 0, -1,\n            0, 0, -1,\n            0, 0, -1,\n            0, 1, 0,\n            0, 1, 0,\n            0, 1, 0,\n            0, 1, 0,\n            0, 1, 0,\n            0, 1, 0,\n        ], dtype=numpy.float32)\n\n    if uvs:\n        uvs_data = numpy.array([\n            1, 0,\n            1, 1,\n            0, 0,\n            1, 1,\n            0, 1,\n            0, 0,\n            1, 0,\n            1, 1,\n            0, 0,\n            1, 1,\n            0, 1,\n            0, 0,\n            1, 1,\n            0, 1,\n            0, 0,\n            1, 1,\n            0, 0,\n            1, 0,\n            0, 1,\n            0, 0,\n            1, 0,\n            0, 1,\n            1, 0,\n            1, 1,\n            1, 0,\n            1, 1,\n            0, 1,\n            1, 0,\n            0, 1,\n            0, 0,\n            1, 1,\n            0, 1,\n            1, 0,\n            0, 1,\n            0, 0,\n            1, 0\n        ], dtype=numpy.float32)\n\n    vao = VAO(\"geometry:cube\")\n\n    \n    vao.buffer(pos, '3f', ['in_position'])\n    if normals:\n        vao.buffer(normal_data, '3f', ['in_normal'])\n    if uvs:\n        vao.buffer(uvs_data, '2f', ['in_uv'])\n\n    return vao", "docstring": "Creates a cube VAO with normals and texture coordinates\n\nArgs:\nwidth (float): Width of the cube\nheight (float): Height of the cube\ndepth (float): Depth of the cube\n\nKeyword Args:\ncenter: center of the cube as a 3-component tuple\nnormals: (bool) Include normals\nuvs: (bool) include uv coordinates\n\nReturns:\nA :py:class:`demosys.opengl.vao.VAO` instance", "source": "juraj-google-style"}
{"code": "def list_dir(root, prefix=False):\n    root = os.path.expanduser(root)\n    directories = list(filter((lambda p: os.path.isdir(os.path.join(root, p))), os.listdir(root)))\n    if (prefix is True):\n        directories = [os.path.join(root, d) for d in directories]\n    return directories", "docstring": "List all directories at a given root\n\nArgs:\nroot (str): Path to directory whose folders need to be listed\nprefix (bool, optional): If true, prepends the path to each result, otherwise\nonly returns the name of the directories found", "source": "codesearchnet"}
{"code": "def create_assembly_instance(self, assembly_uri, part_uri, configuration):\n        \n\n        payload = {\n          \"documentId\": part_uri[\"did\"],\n          \"elementId\": part_uri[\"eid\"],\n          \n          \n          \n          \n          \"versionId\": part_uri[\"wvm\"],\n          \n          \"isAssembly\": False,\n          \"isWholePartStudio\": True,\n          \"configuration\": self.encode_configuration(part_uri[\"did\"], part_uri[\"eid\"], configuration)\n        }\n        return self._api.request('post', '/api/assemblies/d/' + assembly_uri[\"did\"] + '/' + assembly_uri[\"wvm_type\"] +\n                                 '/' + assembly_uri[\"wvm\"] + '/e/' + assembly_uri[\"eid\"] + '/instances', body=payload)", "docstring": "Insert a configurable part into an assembly.\n\nArgs:\n- assembly (dict): eid, wid, and did of the assembly into which will be inserted\n- part (dict): eid and did of the configurable part\n- configuration (dict): the configuration\n\nReturns:\n- requests.Response: Onshape response data", "source": "juraj-google-style"}
{"code": "def save_wav_file(filename, wav_data, sample_rate):\n    with tf.compat.v1.Session(graph=tf.Graph()) as sess:\n        wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, [])\n        sample_rate_placeholder = tf.compat.v1.placeholder(tf.int32, [])\n        wav_data_placeholder = tf.compat.v1.placeholder(tf.float32, [None, 1])\n        wav_encoder = tf.audio.encode_wav(wav_data_placeholder, sample_rate_placeholder)\n        wav_saver = io_ops.write_file(wav_filename_placeholder, wav_encoder)\n        sess.run(wav_saver, feed_dict={wav_filename_placeholder: filename, sample_rate_placeholder: sample_rate, wav_data_placeholder: np.reshape(wav_data, (-1, 1))})", "docstring": "Saves audio sample data to a .wav audio file.\n\nArgs:\nfilename: Path to save the file to.\nwav_data: 2D array of float PCM-encoded audio data.\nsample_rate: Samples per second to encode in the file.", "source": "github-repos"}
{"code": "def autodiff_tree(func, wrt, motion, mode, preserve_result, check_dims, verbose):\n    import tangent\n    namespace = {'tangent': tangent, 'numpy': numpy}\n    done = set()\n    final = gast.Module(body=[])\n    namespace.update(six.get_function_globals(func))\n    (node, required) = autodiff_ast(func, wrt, motion, mode, preserve_result, check_dims, verbose)\n    final.body.extend(node.body)\n    to_do = set(required)\n    if ((motion == 'split') and (mode == 'reverse')):\n        done.add((func, wrt))\n        to_do -= done\n    while to_do:\n        (func, wrt) = to_do.pop()\n        namespace.update(six.get_function_globals(func))\n        (node, required) = autodiff_ast(func=func, wrt=wrt, motion='split', mode=mode, preserve_result=True, check_dims=False, verbose=verbose)\n        final.body.extend(node.body)\n        done.add((func, wrt))\n        to_do.update(required)\n        to_do -= done\n    return (final, namespace)", "docstring": "Perform AD on all functions in a call tree.\n\nThis function walks the call tree and differentiates each function in it. It\nalso ensures that the global namespaces that each function in the call tree\nwas in are merged.\n\nThe `tangent` and `numpy` packages are added to the namespace here, so that\nthe gradient templates can assume that they are present.\n\nArgs:\nSee `grad`.\n\nReturns:\nfinal: A single module which contains the primals and adjoints of all the\nfunctions in the call tree.\nnamespace: A merged dictionary with all the variables in the global\nnamespaces of each function. The primals and adjoints need access to\nthese in order to execute.", "source": "codesearchnet"}
{"code": "def _translate(pattern, case_sensitive=True):\n    if (not case_sensitive):\n        pattern = pattern.lower()\n    (i, n) = (0, len(pattern))\n    res = ''\n    while (i < n):\n        c = pattern[i]\n        i = (i + 1)\n        if (c == '*'):\n            res = (res + '[^/]*')\n        elif (c == '?'):\n            res = (res + '.')\n        elif (c == '['):\n            j = i\n            if ((j < n) and (pattern[j] == '!')):\n                j = (j + 1)\n            if ((j < n) and (pattern[j] == ']')):\n                j = (j + 1)\n            while ((j < n) and (pattern[j] != ']')):\n                j = (j + 1)\n            if (j >= n):\n                res = (res + '\\\\[')\n            else:\n                stuff = pattern[i:j].replace('\\\\', '\\\\\\\\')\n                i = (j + 1)\n                if (stuff[0] == '!'):\n                    stuff = ('^' + stuff[1:])\n                elif (stuff[0] == '^'):\n                    stuff = ('\\\\' + stuff)\n                res = ('%s[%s]' % (res, stuff))\n        else:\n            res = (res + re.escape(c))\n    return res", "docstring": "Translate a wildcard pattern to a regular expression.\n\nThere is no way to quote meta-characters.\n\nArguments:\npattern (str): A wildcard pattern.\ncase_sensitive (bool): Set to `False` to use a case\ninsensitive regex (default `True`).\n\nReturns:\nstr: A regex equivalent to the given pattern.", "source": "codesearchnet"}
{"code": "def snapshot(self, wiki=False, streamed=False, action=None, chunk_size=1024, **kwargs):\n    path = ('/projects/%s/snapshot' % self.get_id())\n    result = self.manager.gitlab.http_get(path, streamed=streamed, raw=True, **kwargs)\n    return utils.response_content(result, streamed, action, chunk_size)", "docstring": "Return a snapshot of the repository.\n\nArgs:\nwiki (bool): If True return the wiki repository\nstreamed (bool): If True the data will be processed by chunks of\n`chunk_size` and each chunk is passed to `action` for\ntreatment.\naction (callable): Callable responsible of dealing with chunk of\ndata\nchunk_size (int): Size of each chunk\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the content could not be retrieved\n\nReturns:\nstr: The uncompressed tar archive of the repository", "source": "codesearchnet"}
{"code": "class DPTPreActResidualLayer(nn.Module):\n\n    def __init__(self, config):\n        super().__init__()\n        self.use_batch_norm = config.use_batch_norm_in_fusion_residual\n        use_bias_in_fusion_residual = config.use_bias_in_fusion_residual if config.use_bias_in_fusion_residual is not None else not self.use_batch_norm\n        self.activation1 = nn.ReLU()\n        self.convolution1 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual)\n        self.activation2 = nn.ReLU()\n        self.convolution2 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual)\n        if self.use_batch_norm:\n            self.batch_norm1 = nn.BatchNorm2d(config.fusion_hidden_size)\n            self.batch_norm2 = nn.BatchNorm2d(config.fusion_hidden_size)\n\n    def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:\n        residual = hidden_state\n        hidden_state = self.activation1(hidden_state)\n        hidden_state = self.convolution1(hidden_state)\n        if self.use_batch_norm:\n            hidden_state = self.batch_norm1(hidden_state)\n        hidden_state = self.activation2(hidden_state)\n        hidden_state = self.convolution2(hidden_state)\n        if self.use_batch_norm:\n            hidden_state = self.batch_norm2(hidden_state)\n        return hidden_state + residual", "docstring": "ResidualConvUnit, pre-activate residual unit.\n\nArgs:\nconfig (`[DPTConfig]`):\nModel configuration class defining the model architecture.", "source": "github-repos"}
{"code": "def _FormatPropertyName(self, property_name):\n    fix_key = re.sub('(.)([A-Z][a-z]+)', '\\\\1_\\\\2', property_name)\n    return re.sub('([a-z0-9])([A-Z])', '\\\\1_\\\\2', fix_key).lower()", "docstring": "Formats a camel case property name as snake case.\n\nArgs:\nproperty_name (str): property name in camel case.\n\nReturns:\nstr: property name in snake case.", "source": "codesearchnet"}
{"code": "def wait_for_js(function):\n    \n\n    @functools.wraps(function)\n    def wrapper(*args, **kwargs):  \n\n        \n        if len(args) < 1:\n            return function(*args, **kwargs)\n\n        \n        else:\n            self = args[0]\n\n            \n            \n            \n            if hasattr(self, 'wait_for_js'):\n                self.wait_for_js()\n\n            \n            return function(*args, **kwargs)\n\n    return wrapper", "docstring": "Method decorator that waits for JavaScript dependencies before executing `function`.\nIf the function is not a method, the decorator has no effect.\n\nArgs:\nfunction (callable): Method to decorate.\n\nReturns:\nDecorated method", "source": "juraj-google-style"}
{"code": "def AddKeywordsForName(self, name, keywords):\n    \n    data_store.DB.IndexAddKeywordsForName(self.urn, name, keywords)", "docstring": "Associates keywords with name.\n\nRecords that keywords are associated with name.\n\nArgs:\nname: A name which should be associated with some keywords.\nkeywords: A collection of keywords to associate with name.", "source": "juraj-google-style"}
{"code": "def IsErrorSuppressedByNolint(category, linenum):\n  \n  return (linenum in _error_suppressions.get(category, set()) or\n          linenum in _error_suppressions.get(None, set()))", "docstring": "Returns true if the specified error category is suppressed on this line.\n\nConsults the global error_suppressions map populated by\nParseNolintSuppressions/ResetNolintSuppressions.\n\nArgs:\ncategory: str, the category of the error.\nlinenum: int, the current line number.\nReturns:\nbool, True iff the error should be suppressed due to a NOLINT comment.", "source": "juraj-google-style"}
{"code": "def load_ini(self, ini_file):\n    if (ini_file and (not os.path.exists(ini_file))):\n        self.log.critical(f'Settings file specified but not found. {ini_file}')\n        sys.exit(1)\n    if (not ini_file):\n        ini_file = f'{self.cwd}/settings.ini'\n    if os.path.exists(ini_file):\n        config = configparser.RawConfigParser(allow_no_value=True)\n        config.read(ini_file)\n        for (key, value) in self.spec.items():\n            entry = None\n            if (value['type'] == str):\n                entry = config.get('settings', option=key.lower(), fallback=None)\n            elif (value['type'] == bool):\n                entry = config.getboolean('settings', option=key.lower(), fallback=None)\n            elif (value['type'] == int):\n                entry = config.getint('settings', option=key.lower(), fallback=None)\n            elif (value['type'] == float):\n                entry = config.getfloat('settings', option=key.lower(), fallback=None)\n            elif (value['type'] in [list, dict]):\n                entries = config.get('settings', option=key.lower(), fallback=None)\n                if entries:\n                    try:\n                        entry = json.loads(entries)\n                    except json.decoder.JSONDecodeError as _err:\n                        self.log.critical(f'Error parsing json from ini file. {entries}')\n                        sys.exit(1)\n            if (entry is not None):\n                setattr(self, key.upper(), entry)", "docstring": "Load the contents from the ini file\n\nArgs:\nini_file (str): The file from which the settings should be loaded", "source": "codesearchnet"}
{"code": "def ProduceEventWithEventData(self, event, event_data):\n    \n    if event.timestamp is None:\n      raise errors.InvalidEvent('Event timestamp value not set.')\n\n    if event.timestamp < self._INT64_MIN or event.timestamp > self._INT64_MAX:\n      raise errors.InvalidEvent('Event timestamp value out of bounds.')\n\n    event_data_hash = event_data.GetAttributeValuesHash()\n    if event_data_hash != self._last_event_data_hash:\n      \n      event_data = copy.deepcopy(event_data)\n\n      \n      self.ProcessEvent(\n          event_data, parser_chain=self.GetParserChain(),\n          file_entry=self._file_entry)\n\n      self._storage_writer.AddEventData(event_data)\n\n      self._last_event_data_hash = event_data_hash\n      self._last_event_data_identifier = event_data.GetIdentifier()\n\n    if self._last_event_data_identifier:\n      event.SetEventDataIdentifier(self._last_event_data_identifier)\n\n    \n    \n    event.parser = self.GetParserChain()\n\n    self._storage_writer.AddEvent(event)\n    self._number_of_events += 1\n\n    self.last_activity_timestamp = time.time()", "docstring": "Produces an event.\n\nArgs:\nevent (EventObject): event.\nevent_data (EventData): event data.\n\nRaises:\nInvalidEvent: if the event timestamp value is not set or out of bounds.", "source": "juraj-google-style"}
{"code": "def _create_slots(self, var_list):\n    pass", "docstring": "Create all slots needed by the variables.\n\nArgs:\nvar_list: A list of `Variable` objects.", "source": "github-repos"}
{"code": "def decode(image, symbols=None):\n    \n    pixels, width, height = _pixel_data(image)\n\n    results = []\n    with _image_scanner() as scanner:\n        if symbols:\n            \n            disable = set(ZBarSymbol).difference(symbols)\n            for symbol in disable:\n                zbar_image_scanner_set_config(\n                    scanner, symbol, ZBarConfig.CFG_ENABLE, 0\n                )\n            \n            \n            \n            \n            for symbol in symbols:\n                zbar_image_scanner_set_config(\n                    scanner, symbol, ZBarConfig.CFG_ENABLE, 1\n                )\n        with _image() as img:\n            zbar_image_set_format(img, _FOURCC['L800'])\n            zbar_image_set_size(img, width, height)\n            zbar_image_set_data(img, cast(pixels, c_void_p), len(pixels), None)\n            decoded = zbar_scan_image(scanner, img)\n            if decoded < 0:\n                raise PyZbarError('Unsupported image format')\n            else:\n                results.extend(_decode_symbols(_symbols_for_image(img)))\n\n    return results", "docstring": "Decodes datamatrix barcodes in `image`.\n\nArgs:\nimage: `numpy.ndarray`, `PIL.Image` or tuple (pixels, width, height)\nsymbols: iter(ZBarSymbol) the symbol types to decode; if `None`, uses\n`zbar`'s default behaviour, which is to decode all symbol types.\n\nReturns:\n:obj:`list` of :obj:`Decoded`: The values decoded from barcodes.", "source": "juraj-google-style"}
{"code": "def is_tracking_shield_displayed(self):\n    with self.selenium.context(self.selenium.CONTEXT_CHROME):\n        if (self.window.firefox_version >= 63):\n            el = self.root.find_element(*self._tracking_protection_shield_locator)\n            return (el.get_attribute('active') is not None)\n        el = self.root.find_element(By.ID, 'tracking-protection-icon')\n        return bool(el.get_attribute('state'))", "docstring": "Tracking Protection shield.\n\nReturns:\nbool: True or False if the Tracking Shield is displayed.", "source": "codesearchnet"}
{"code": "def stack50(op, delay):\n    n = 50\n    delays = delay + tf.range(0, n, dtype=float) / 10000.0\n    start_t = time.time()\n    func = tf.function(lambda: tf.stack([op(delays[i]) for i in range(n)]))\n    r_numpy = func().numpy()\n    end_t = time.time()\n    print('')\n    print('Total time = %5.3f seconds using %s' % (end_t - start_t, str(op)))\n    print('Returned values from the ops:')\n    np.set_printoptions(precision=4, suppress=True)\n    print(r_numpy)\n    sys.stdout.flush()", "docstring": "Create a tf.stack of 50 sleep ops.\n\nArgs:\nop: The sleep op, either sleep_op.SyncSleep or sleep_op.AsyncSleep.\ndelay: Each op should finish at least float `delay` seconds after it starts.", "source": "github-repos"}
{"code": "def generate(self, past_values: torch.Tensor) -> SamplePatchTSMixerRegressionOutput:\n    num_parallel_samples = self.num_parallel_samples\n    outputs = self(past_values=past_values, target_values=None, output_hidden_states=False)\n    distribution = self.distribution_output.distribution(outputs.regression_outputs)\n    samples = [distribution.sample() for _ in range(num_parallel_samples)]\n    samples = torch.stack(samples, dim=1).view(-1, num_parallel_samples, self.config.num_targets)\n    return SamplePatchTSMixerRegressionOutput(sequences=samples)", "docstring": "Generate sequences of sample predictions from a model with a probability distribution head.\n\nArgs:\npast_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`):\nPast values of the time series that serves as context in order to predict the target values.\n\nReturn:\n[`SamplePatchTSMixerRegressionOutput`] where the outputs `sequences` tensor will have shape `(batch_size,\nnumber of samples, num_targets)`.", "source": "github-repos"}
{"code": "def is_subtype_of(self, other: trace.TraceType) -> bool:\n    if type(self) is not type(other):\n        return False\n    is_subtype = True\n\n    def check_attribute(attribute_self, attribute_other):\n        nonlocal is_subtype\n        if not is_subtype:\n            return\n        if isinstance(attribute_self, trace.TraceType):\n            if not attribute_self.is_subtype_of(attribute_other):\n                is_subtype = False\n                return\n        elif attribute_self != attribute_other:\n            is_subtype = False\n    try:\n        nest.map_structure(check_attribute, self._serialize(), other._serialize())\n    except (ValueError, TypeError):\n        return False\n    return is_subtype", "docstring": "Returns True if `self` is a subtype of `other`.\n\nImplements the tf.types.experimental.func.TraceType interface.\n\nIf not overridden by a subclass, the default behavior is to assume the\nTypeSpec is covariant upon attributes that implement TraceType and\ninvariant upon rest of the attributes as well as the structure and type\nof the TypeSpec.\n\nArgs:\nother: A TraceType object.", "source": "github-repos"}
{"code": "def inquire_property(name, doc=None):\n\n    def inquire_property(self):\n        if (not self._started):\n            msg = 'Cannot read {0} from a security context whose establishment has not yet been started.'\n            raise AttributeError(msg)\n        return getattr(self._inquire(**{name: True}), name)\n    return property(inquire_property, doc=doc)", "docstring": "Creates a property based on an inquire result\n\nThis method creates a property that calls the\n:python:`_inquire` method, and return the value of the\nrequested information.\n\nArgs:\nname (str): the name of the 'inquire' result information\n\nReturns:\nproperty: the created property", "source": "codesearchnet"}
{"code": "def fit2dArrayToFn(arr, fn, mask=None, down_scale_factor=None, output_shape=None, guess=None, outgrid=None):\n    if (mask is None):\n        mask = np.ones(shape=arr.shape, dtype=bool)\n    if (down_scale_factor is None):\n        if (mask.sum() > 1000):\n            down_scale_factor = 0.3\n        else:\n            down_scale_factor = 1\n    if (down_scale_factor != 1):\n        arr2 = zoom(arr, down_scale_factor)\n        mask = zoom(mask, down_scale_factor, output=bool)\n    else:\n        arr2 = arr\n    (x, y) = np.where(mask)\n    z = arr2[mask]\n    (parameters, cov_matrix) = curve_fit(fn, (x, y), z, p0=guess)\n    perr = np.sqrt(np.diag(cov_matrix))\n    if (outgrid is not None):\n        (yy, xx) = outgrid\n        rebuilt = fn((yy, xx), *parameters)\n    else:\n        if (output_shape is None):\n            output_shape = arr.shape\n        fx = (arr2.shape[0] / output_shape[0])\n        fy = (arr2.shape[1] / output_shape[1])\n        rebuilt = np.fromfunction((lambda x, y: fn(((x * fx), (y * fy)), *parameters)), output_shape)\n    return (rebuilt, parameters, perr)", "docstring": "Fit a 2d array to a 2d function\n\nUSE ONLY MASKED VALUES\n\n* [down_scale_factor] map to speed up fitting procedure, set value smaller than 1\n* [output_shape] shape of the output array\n* [guess] must be scaled using [scale_factor]\n\nReturns:\nFitted map, fitting params (scaled), error", "source": "codesearchnet"}
{"code": "def annotations_from_file(filename):\n    import edflib\n    e = edflib.EdfReader(filename, annotations_mode='all')\n    return e.read_annotations()", "docstring": "Get a list of event annotations from an EDF (European Data Format file\nor EDF+ file, using edflib.\n\nArgs:\nfilename: EDF+ file\n\nReturns:\nlist: annotation events, each in the form [start_time, duration, text]", "source": "codesearchnet"}
{"code": "def get_missing_services(self, services):\n    required_services = set(services)\n    provided_services = set(self._services.keys())\n    missing_services = required_services.difference(provided_services)\n    return sorted(missing_services)", "docstring": "Check if all required services are provided\n\nArgs:\nservices: List with the service names which are required\nReturns:\nList with missing services", "source": "codesearchnet"}
{"code": "def new(cls, access_token, environment='prod'):\n        \n\n        api_client = ApiClient.new(access_token, environment)\n        return cls(api_client)", "docstring": "Create new storage service client.\n\nArguments:\nenvironment(str): The service environment to be used for the client.\n'prod' or 'dev'.\naccess_token(str): The access token used to authenticate with the\nservice\n\nReturns:\nA storage_service.Client instance", "source": "juraj-google-style"}
{"code": "def from_tokenizer(cls, tokenizer: GPT2Tokenizer, *args, **kwargs):\n    merges = [' '.join(m) for m in tokenizer.bpe_ranks.keys()]\n    vocab = tokenizer.get_vocab()\n    return cls(vocab, merges, *args, **kwargs)", "docstring": "Creates TFGPT2Tokenizer from GPT2Tokenizer\n\nArgs:\ntokenizer (GPT2Tokenizer)\n\nExamples:\n\n```python\nfrom transformers import AutoTokenizer, TFGPT2Tokenizer\n\ntokenizer = AutoTokenizer.from_pretrained(\"openai-community/gpt2\")\ntf_tokenizer = TFGPT2Tokenizer.from_tokenizer(tokenizer)\n```", "source": "github-repos"}
{"code": "def notify(self, cuuid, event_data):\n    euuid = str(uuid.uuid1())\n    if ('encryption' in self.registry[cuuid]):\n        client_key = self.registry[cuuid]['encryption']\n    else:\n        client_key = None\n    logger.debug(('<%s> <%s> Sending NOTIFY event to client with event data: %s' % (str(cuuid), str(euuid), pformat(event_data))))\n    try:\n        ip_address = self.registry[cuuid]['host']\n    except KeyError:\n        logger.warning(('<%s> <%s> Host not found in registry! Transmit Canceled' % (str(cuuid), str(euuid))))\n        return False\n    try:\n        port = self.registry[cuuid]['port']\n    except KeyError:\n        logger.warning(('<%s> <%s> Port not found! Transmit Canceled' % (str(cuuid), str(euuid))))\n        return False\n    packet = serialize_data({'method': 'NOTIFY', 'event_data': event_data, 'euuid': euuid}, self.compression, self.encryption, client_key)\n    address = (ip_address, port)\n    self.event_uuids[euuid] = 0\n    logger.debug(('<%s> Currently processing events: %s' % (cuuid, pformat(self.event_uuids))))\n    logger.debug(('<%s> New NOTIFY event being processed:' % cuuid))\n    logger.debug(('<%s> EUUID: %s' % (cuuid, euuid)))\n    logger.debug(('<%s> Event Data: %s' % (cuuid, pformat(event_data))))\n    self.listener.send_datagram(packet, address)\n    self.listener.call_later(self.timeout, self.retransmit, {'euuid': euuid, 'response': packet, 'cuuid': cuuid})", "docstring": "This function will send a NOTIFY event to a registered client.\n\nNOTIFY messages are nearly identical to EVENT messages, except that\nNOTIFY messages are always sent from server -> client. EVENT messages\nare always sent from client -> server. In addition to this difference,\nNOTIFY messages are not processed by a middleware to determine if\nthey are legal or not, since all messages from the server should be\nconsidered LEGAL.\n\nArgs:\ncuuid (string): The client uuid to send the event data to.\nevent_data (any): The event data that we will be sending to the\nclient.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _parse_trunk_groups(self, config):\n    values = re.findall('switchport trunk group ([^\\\\s]+)', config, re.M)\n    return dict(trunk_groups=values)", "docstring": "Scans the specified config and parses the trunk group values\n\nArgs:\nconfig (str): The interface configuraiton blcok\n\nReturns:\nA dict object with the trunk group values that can be merged\ninto the resource dict", "source": "codesearchnet"}
{"code": "def PauliX(local_space, states=None):\n    (local_space, states) = _get_pauli_args(local_space, states)\n    (g, e) = states\n    return (LocalSigma.create(g, e, hs=local_space) + LocalSigma.create(e, g, hs=local_space))", "docstring": "r\"\"\"Pauli-type X-operator\n\n.. math::\n\n\\hat{\\sigma}_x = \\begin{pmatrix}\n0 & 1 \\\\\n1 & 0\n\\end{pmatrix}\n\non an arbitrary two-level system.\n\nArgs:\nlocal_space (str or int or .LocalSpace): Associated Hilbert space.\nIf :class:`str` or :class:`int`, a :class:`LocalSpace` with a\nmatching label will be created.\nstates (None or tuple[int or str]): The labels for the basis states\nfor the two levels on which the operator acts. If None, the two\nlowest levels are used.\n\nReturns:\nOperator: Local X-operator as a linear combination of\n:class:`LocalSigma`", "source": "codesearchnet"}
{"code": "def __init__(\n      self, resolver_context, compression_method=None, file_object=None):\n    \n    if file_object is not None and compression_method is None:\n      raise ValueError(\n          'File-like object provided without corresponding compression '\n          'method.')\n\n    super(CompressedStream, self).__init__(resolver_context)\n    self._compression_method = compression_method\n    self._file_object = file_object\n    self._file_object_set_in_init = bool(file_object)\n    self._compressed_data = b''\n    self._current_offset = 0\n    self._decompressor = None\n    self._realign_offset = True\n    self._uncompressed_data = b''\n    self._uncompressed_data_offset = 0\n    self._uncompressed_data_size = 0\n    self._uncompressed_stream_size = None", "docstring": "Initializes a file-like object.\n\nIf the file-like object is chained do not separately use the parent\nfile-like object.\n\nArgs:\nresolver_context (Context): resolver context.\ncompression_method (Optional[str]): method used to the compress the data.\nfile_object (Optional[file]): parent file-like object.\n\nRaises:\nValueError: if file_object provided but compression_method is not.", "source": "juraj-google-style"}
{"code": "def register_test_preprocessor(cls, test_names: Union[str, List]):\n    if isinstance(test_names, str):\n        test_names = [test_names]\n\n    def apply(preprocessor):\n        for test_name in test_names:\n            if test_name not in cls._test_preprocessor:\n                cls._test_preprocessor[test_name] = []\n            cls._test_preprocessor[test_name].append(preprocessor)\n        return preprocessor\n    return apply", "docstring": "Decorator to register a preprocessor function for specific tests.\n\nThis decorator is used to associate a preprocessor function with one or\nmore test names. The preprocessor function will be called before the\ncorresponding test is executed, allowing for modification of the test\nspecification or environment setup.\n\nArgs:\ntest_names: A string or a list of strings representing the names of the\ntests for which the preprocessor should be registered. The test names\nshould match the names generated by `parse_test_methods`.\n\nReturns:\nA decorator function that takes the preprocessor function as an argument\nand registers it.", "source": "github-repos"}
{"code": "def __init__(self, num_classes=1000):\n        \n        super(Xception, self).__init__()\n        self.num_classes = num_classes\n\n        self.conv1 = nn.Conv2d(3, 32, 3,2, 0, bias=False)\n        self.bn1 = nn.BatchNorm2d(32)\n        self.relu1 = nn.ReLU(inplace=True)\n\n        self.conv2 = nn.Conv2d(32,64,3,bias=False)\n        self.bn2 = nn.BatchNorm2d(64)\n        self.relu2 = nn.ReLU(inplace=True)\n        \n\n        self.block1=Block(64,128,2,2,start_with_relu=False,grow_first=True)\n        self.block2=Block(128,256,2,2,start_with_relu=True,grow_first=True)\n        self.block3=Block(256,728,2,2,start_with_relu=True,grow_first=True)\n\n        self.block4=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n        self.block5=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n        self.block6=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n        self.block7=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n\n        self.block8=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n        self.block9=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n        self.block10=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n        self.block11=Block(728,728,3,1,start_with_relu=True,grow_first=True)\n\n        self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)\n\n        self.conv3 = SeparableConv2d(1024,1536,3,1,1)\n        self.bn3 = nn.BatchNorm2d(1536)\n        self.relu3 = nn.ReLU(inplace=True)\n\n        \n        self.conv4 = SeparableConv2d(1536,2048,3,1,1)\n        self.bn4 = nn.BatchNorm2d(2048)\n\n        self.fc = nn.Linear(2048, num_classes)", "docstring": "Constructor\nArgs:\nnum_classes: number of classes", "source": "juraj-google-style"}
{"code": "def __setitem__(self, predicates, new_value):\n        \n        if self.df is not None and self.column_name is not None:\n            self.df[self.column_name] = self.mask(predicates, new_value)", "docstring": "Summary\n\nArgs:\npredicates (TYPE): Description\nnew_value (TYPE): Description\n\nReturns:\nTYPE: Description", "source": "juraj-google-style"}
{"code": "def put(self, filename, encoding=None):\n        \n        from . import LocalFile\n\n        if os.path.isdir(filename) and self.source is None:\n            raise ValueError(\"Cannot write this object to \"\n                             \"directory %s without an explicit filename.\" % filename)\n\n        target = get_target_path(filename, self.source)\n\n        if (encoding is not None) and (encoding != self.encoded_with):\n            raise ValueError('%s is already encoded as \"%s\"' % self, self.encoded_with)\n\n        with self.open('rb') as infile, open(target, 'wb') as outfile:\n            for line in infile:\n                outfile.write(line)\n        return LocalFile(target)", "docstring": "Write the file to the given path\n\nArgs:\nfilename(str): path to write this file to\n\nReturns:\nLocalFile: reference to the copy of the file stored at ``filename``", "source": "juraj-google-style"}
{"code": "def __init__(self, min_interval_sec=10, max_interval_sec=600, multiplier=2):\n    \n    self.min_interval_sec = min_interval_sec\n    self.max_interval_sec = max_interval_sec\n    self.multiplier = multiplier\n    self.Succeeded()", "docstring": "Class constructor.\n\nArgs:\nmin_interval_sec: initial small delay.\nmax_interval_sec: maximum delay between retries.\nmultiplier: factor for exponential increase.", "source": "juraj-google-style"}
{"code": "def read_from_hdx(identifier, configuration=None):\n    resourceview = ResourceView(configuration=configuration)\n    result = resourceview._load_from_hdx('resource view', identifier)\n    if result:\n        return resourceview\n    return None", "docstring": "Reads the resource view given by identifier from HDX and returns ResourceView object\n\nArgs:\nidentifier (str): Identifier of resource view\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nOptional[ResourceView]: ResourceView object if successful read, None if not", "source": "codesearchnet"}
{"code": "def _RetryLoop(self, func, timeout=None):\n    \n    timeout = timeout or self.DEFAULT_TIMEOUT\n    deadline = time.time() + timeout\n    sleep = 1\n    while True:\n      try:\n        return func(timeout)\n      except grpc.RpcError:\n        if time.time() + sleep > deadline:\n          raise\n        time.sleep(sleep)\n        sleep *= 2\n        timeout = deadline - time.time()", "docstring": "Retries an operation until success or deadline.\n\nArgs:\n\nfunc: The function to run. Must take a timeout, in seconds, as a single\nparameter. If it raises grpc.RpcError and deadline has not be reached,\nit will be run again.\n\ntimeout: Retries will continue until timeout seconds have passed.", "source": "juraj-google-style"}
{"code": "async def _on_event(self, event_):\n    conv_id = event_.conversation_id.id\n    try:\n        conv = (await self._get_or_fetch_conversation(conv_id))\n    except exceptions.NetworkError:\n        logger.warning('Failed to fetch conversation for event notification: %s', conv_id)\n    else:\n        self._sync_timestamp = parsers.from_timestamp(event_.timestamp)\n        conv_event = conv.add_event(event_)\n        if (conv_event is not None):\n            (await self.on_event.fire(conv_event))\n            (await conv.on_event.fire(conv_event))", "docstring": "Receive a hangouts_pb2.Event and fan out to Conversations.\n\nArgs:\nevent_: hangouts_pb2.Event instance", "source": "codesearchnet"}
{"code": "def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides):\n        \n        if 'datatype' not in overrides:\n            datatypes = [self.interface.datatype] + self.datatype\n            overrides['datatype'] = list(util.unique_iterator(datatypes))\n        return super(Dataset, self).clone(data, shared_data, new_type, *args, **overrides)", "docstring": "Clones the object, overriding data and parameters.\n\nArgs:\ndata: New data replacing the existing data\nshared_data (bool, optional): Whether to use existing data\nnew_type (optional): Type to cast object to\n*args: Additional arguments to pass to constructor\n**overrides: New keyword arguments to pass to constructor\n\nReturns:\nCloned object", "source": "juraj-google-style"}
{"code": "def get_electron_number(self, charge=0):\n    atomic_number = constants.elements['atomic_number'].to_dict()\n    return (sum([atomic_number[atom] for atom in self['atom']]) - charge)", "docstring": "Return the number of electrons.\n\nArgs:\ncharge (int): Charge of the molecule.\n\nReturns:\nint:", "source": "codesearchnet"}
{"code": "def __init__(self, file_entry):\n    \n    super(VShadowVolume, self).__init__(file_entry.name)\n    self._file_entry = file_entry", "docstring": "Initializes a volume.\n\nArgs:\nfile_entry (VShadowFileEntry): a VSS file entry.", "source": "juraj-google-style"}
{"code": "def check_status(status, expected, path, headers=None, resp_headers=None, body=None, extras=None):\n    if (status in expected):\n        return\n    msg = ('Expect status %r from Google Storage. But got status %d.\\nPath: %r.\\nRequest headers: %r.\\nResponse headers: %r.\\nBody: %r.\\nExtra info: %r.\\n' % (expected, status, path, headers, resp_headers, body, extras))\n    if (status == httplib.UNAUTHORIZED):\n        raise AuthorizationError(msg)\n    elif (status == httplib.FORBIDDEN):\n        raise ForbiddenError(msg)\n    elif (status == httplib.NOT_FOUND):\n        raise NotFoundError(msg)\n    elif (status == httplib.REQUEST_TIMEOUT):\n        raise TimeoutError(msg)\n    elif (status == httplib.REQUESTED_RANGE_NOT_SATISFIABLE):\n        raise InvalidRange(msg)\n    elif ((status == httplib.OK) and (308 in expected) and (httplib.OK not in expected)):\n        raise FileClosedError(msg)\n    elif (status >= 500):\n        raise ServerError(msg)\n    else:\n        raise FatalError(msg)", "docstring": "Check HTTP response status is expected.\n\nArgs:\nstatus: HTTP response status. int.\nexpected: a list of expected statuses. A list of ints.\npath: filename or a path prefix.\nheaders: HTTP request headers.\nresp_headers: HTTP response headers.\nbody: HTTP response body.\nextras: extra info to be logged verbatim if error occurs.\n\nRaises:\nAuthorizationError: if authorization failed.\nNotFoundError: if an object that's expected to exist doesn't.\nTimeoutError: if HTTP request timed out.\nServerError: if server experienced some errors.\nFatalError: if any other unexpected errors occurred.", "source": "codesearchnet"}
{"code": "def get_paths(self, key):\n    final_paths = []\n    if (key in self.__cli):\n        paths = (self.__cli[key] or [])\n        from_conf = False\n    else:\n        paths = (self.__config.get(key) or [])\n        from_conf = True\n    for path in flatten_list(paths):\n        final_path = self.__abspath(path, from_conf)\n        if final_path:\n            final_paths.append(final_path)\n    return final_paths", "docstring": "Same as `ConfigParser.get_path` for a list of paths.\n\nArgs:\nkey: str, the key to lookup the paths with\n\nReturns:\nlist: The paths.", "source": "codesearchnet"}
{"code": "def candidates(self, word):\n        \n        if self.known([word]):  \n            return {word}\n        \n        res = [x for x in self.edit_distance_1(word)]\n        tmp = self.known(res)\n        if tmp:\n            return tmp\n        \n        if self._distance == 2:\n            tmp = self.known([x for x in self.__edit_distance_alt(res)])\n            if tmp:\n                return tmp\n        return {word}", "docstring": "Generate possible spelling corrections for the provided word up to\nan edit distance of two, if and only when needed\n\nArgs:\nword (str): The word for which to calculate candidate spellings\nReturns:\nset: The set of words that are possible candidates", "source": "juraj-google-style"}
{"code": "def throw(self, exception_class, should_throw):\n        \n        return self.__copy_and_set('throws', self._throws + [(exception_class, should_throw)])", "docstring": "Defines if the an exception should be thrown after the request is sent\n\nArgs:\nexception_class (class): The class of the exception to instantiate\nshould_throw (function): The predicate that should indicate if the exception\nshould be thrown. This function will be called with the response as a parameter\n\nReturns:\nThe request builder instance in order to chain calls", "source": "juraj-google-style"}
{"code": "def gcs(line, cell=None):\n  \n  parser = google.datalab.utils.commands.CommandParser(prog='%gcs', description=)\n\n  \n  \n  \n  \n  \n  \n  \n  \n  copy_parser = parser.subcommand('copy', 'Copy one or more Google Cloud Storage objects to a '\n                                          'different location.')\n  copy_parser.add_argument('-s', '--source', help='The name of the object(s) to copy', nargs='+')\n  copy_parser.add_argument('-d', '--destination', required=True,\n                           help='The copy destination. For multiple source objects this must be a '\n                                'bucket.')\n  copy_parser.set_defaults(func=_gcs_copy)\n\n  create_parser = parser.subcommand('create', 'Create one or more Google Cloud Storage buckets.')\n  create_parser.add_argument('-p', '--project', help='The project associated with the objects')\n  create_parser.add_argument('-b', '--bucket', help='The name of the bucket(s) to create',\n                             nargs='+')\n  create_parser.set_defaults(func=_gcs_create)\n\n  delete_parser = parser.subcommand('delete', 'Delete one or more Google Cloud Storage buckets or '\n                                              'objects.')\n  delete_parser.add_argument('-b', '--bucket', nargs='*',\n                             help='The name of the bucket(s) to remove')\n  delete_parser.add_argument('-o', '--object', nargs='*',\n                             help='The name of the object(s) to remove')\n  delete_parser.set_defaults(func=_gcs_delete)\n\n  list_parser = parser.subcommand('list', 'List buckets in a project, or contents of a bucket.')\n  list_parser.add_argument('-p', '--project', help='The project associated with the objects')\n  list_parser.add_argument('-o', '--objects',\n                           help='List objects under the given Google Cloud Storage path',\n                           nargs='?')\n  list_parser.set_defaults(func=_gcs_list)\n\n  read_parser = parser.subcommand('read', 'Read the contents of a Google Cloud Storage object into '\n                                          'a Python variable.')\n  read_parser.add_argument('-o', '--object', help='The name of the object to read',\n                           required=True)\n  read_parser.add_argument('-v', '--variable', required=True,\n                           help='The name of the Python variable to set')\n  read_parser.set_defaults(func=_gcs_read)\n\n  view_parser = parser.subcommand('view', 'View the contents of a Google Cloud Storage object.')\n  view_parser.add_argument('-n', '--head', type=int, default=20,\n                           help='The number of initial lines to view')\n  view_parser.add_argument('-t', '--tail', type=int, default=20,\n                           help='The number of lines from end to view')\n  view_parser.add_argument('-o', '--object', help='The name of the object to view',\n                           required=True)\n  view_parser.set_defaults(func=_gcs_view)\n\n  write_parser = parser.subcommand('write', 'Write the value of a Python variable to a Google '\n                                            'Cloud Storage object.')\n  write_parser.add_argument('-v', '--variable', help='The name of the source Python variable',\n                            required=True)\n  write_parser.add_argument('-o', '--object', required=True,\n                            help='The name of the destination Google Cloud Storage object to write')\n  write_parser.add_argument('-c', '--content_type', help='MIME type', default='text/plain')\n  write_parser.set_defaults(func=_gcs_write)\n\n  return google.datalab.utils.commands.handle_magic_line(line, cell, parser)", "docstring": "Implements the gcs cell magic for ipython notebooks.\n\nArgs:\nline: the contents of the gcs line.\nReturns:\nThe results of executing the cell.", "source": "juraj-google-style"}
{"code": "def ParseVideoRow(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = KodiVideoEventData()\n    event_data.filename = self._GetRowValue(query_hash, row, 'strFilename')\n    event_data.play_count = self._GetRowValue(query_hash, row, 'playCount')\n    event_data.query = query\n\n    timestamp = self._GetRowValue(query_hash, row, 'lastPlayed')\n    date_time = dfdatetime_time_elements.TimeElements()\n    date_time.CopyFromDateTimeString(timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a Video row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def _GetArgDefault(flag, spec):\n    num_defaults = len(spec.defaults)\n    args_with_defaults = spec.args[-num_defaults:]\n    for arg, default in zip(args_with_defaults, spec.defaults):\n        if arg == flag:\n            return repr(default)\n    if flag in spec.kwonlydefaults:\n        return repr(spec.kwonlydefaults[flag])\n    return ''", "docstring": "Returns a string describing a flag's default value.\n\nArgs:\nflag: The name of the flag.\nspec: An instance of fire.inspectutils.FullArgSpec, containing type and\ndefault information about the arguments to a callable.\nReturns:\nA string to be used in constructing the help screen for the function, the\nempty string if the flag does not have a default or the default is not\navailable.", "source": "github-repos"}
{"code": "def GlobForPaths(self, paths, pathtype='OS', root_path=None, process_non_regular_files=False, collect_ext_attrs=False):\n    patterns = []\n    if (not paths):\n        return\n    self.state.pathtype = pathtype\n    self.state.root_path = root_path\n    self.state.process_non_regular_files = process_non_regular_files\n    self.state.collect_ext_attrs = collect_ext_attrs\n    for path in paths:\n        patterns.extend(path.Interpolate(knowledge_base=self.client_knowledge_base))\n    patterns.sort(key=len, reverse=True)\n    for pattern in patterns:\n        curr_node = self.state.component_tree\n        components = self.ConvertGlobIntoPathComponents(pattern)\n        for (i, curr_component) in enumerate(components):\n            is_last_component = (i == (len(components) - 1))\n            next_node = curr_node.get(curr_component.SerializeToString(), {})\n            if (is_last_component and next_node):\n                curr_node[curr_component.SerializeToString()] = {}\n            else:\n                curr_node = curr_node.setdefault(curr_component.SerializeToString(), {})\n    root_path = next(iterkeys(self.state.component_tree))\n    self.CallStateInline(messages=[None], next_state='ProcessEntry', request_data=dict(component_path=[root_path]))", "docstring": "Starts the Glob.\n\nThis is the main entry point for this flow mixin.\n\nFirst we convert the pattern into regex components, and then we\ninterpolate each component. Finally, we generate a cartesian product of all\ncombinations.\n\nArgs:\npaths: A list of GlobExpression instances.\npathtype: The pathtype to use for creating pathspecs.\nroot_path: A pathspec where to start searching from.\nprocess_non_regular_files: Work with all kinds of files - not only with\nregular ones.\ncollect_ext_attrs: Whether to gather information about file extended\nattributes.", "source": "codesearchnet"}
{"code": "def NgramScorer(frequency_map):\n    \n    \n    length = len(next(iter(frequency_map)))\n    \n    floor = math.log10(0.01 / sum(frequency_map.values()))\n    ngrams = frequency.frequency_to_probability(frequency_map, decorator=math.log10)\n\n    def inner(text):\n        \n        \n        text = ''.join(text)\n        text = remove(text.upper(), string.whitespace + string.punctuation)\n        return sum(ngrams.get(ngram, floor) for ngram in iterate_ngrams(text, length))\n\n    return inner", "docstring": "Compute the score of a text by using the frequencies of ngrams.\n\nExample:\n>>> fitness = NgramScorer(english.unigrams)\n>>> fitness(\"ABC\")\n-4.3622319742618245\n\nArgs:\nfrequency_map (dict): ngram to frequency mapping", "source": "juraj-google-style"}
{"code": "def is_torch_support_available(self) -> bool:\n    if is_torch_available():\n        from transformers.utils import get_torch_version\n        return version.parse(get_torch_version()) >= self.torch_onnx_minimum_version\n    else:\n        return False", "docstring": "The minimum PyTorch version required to export the model.\n\nReturns:\n`bool`: Whether the installed version of PyTorch is compatible with the model.", "source": "github-repos"}
{"code": "def precheck_ami_id(context):\n  \n  \n  key = \"{}/{}\".format(context.env, context.service_name)\n  print_if_verbose(\"precheck_ami_id with key: {}\".format(key))\n  current_ami = context.versionresolver.lookup(\"ami-id,{}\".format(key))\n  print_if_verbose(\"ami found: {}\".format(current_ami))\n\n  \n  \n  if current_ami is None:\n    print_if_verbose(\"precheck passed without check because current AMI is None\")\n    return True\n\n  \n  \n  instances_running_ami = context.aws_client(\"ec2\").describe_instances(\n      Filters=[{\n          'Name': 'image-id',\n          'Values': [current_ami]\n      }]\n  )[\"Reservations\"]\n  if instances_running_ami:\n    instances_running_ami = [resv[\"Instances\"][0][\"InstanceId\"] for resv in instances_running_ami]\n  print_if_verbose(\"instances running ami {}:\\n{}\".format(current_ami, repr(instances_running_ami)))\n\n  \n  env_service = \"{}-{}\".format(context.env, context.service_name)\n  instances_running_as_env_service = context.aws_client(\"ec2\").describe_instances(\n      Filters=[{\n          'Name': 'iam-instance-profile.arn',\n          'Values': [\"arn:aws:iam::*:instance-profile/{}-{}\".format(context.env, context.service_name)]\n      }]\n  )[\"Reservations\"]\n  if instances_running_as_env_service:\n    instances_running_as_env_service = \\\n        [resv[\"Instances\"][0][\"InstanceId\"] for resv in instances_running_as_env_service]\n  print_if_verbose(\"instances running as {}\".format(env_service))\n  print_if_verbose(repr(instances_running_as_env_service))\n\n  \n  for instance_id in instances_running_as_env_service:\n    if instance_id not in instances_running_ami:\n      raise RuntimeError(\"Instance: {} not running expected ami: {}\".format(instance_id, current_ami))\n\n  \n  return True", "docstring": "Is the AMI in service the same as the AMI marked current in the version records?\nThis tool won't update records unless the world state is coherent.\nArgs:\ncontext: a populated EFVersionContext object\nReturns:\nTrue if ok to proceed\nRaises:\nRuntimeError if not ok to proceed", "source": "juraj-google-style"}
{"code": "def is_action(task):\n    result = False\n    if _extract_from_env_in_payload(task, 'ACTION_CALLBACK'):\n        result = True\n    if (task.get('extra', {}).get('action') is not None):\n        result = True\n    return result", "docstring": "Determine if a task is an action task.\n\nTrusted decision and action tasks are important in that they can generate\nother valid tasks. The verification of decision and action tasks is slightly\ndifferent, so we need to be able to tell them apart.\n\nThis checks for the following things::\n\n* ``task.payload.env.ACTION_CALLBACK`` exists\n* ``task.extra.action`` exists\n\nArgs:\ntask (dict): the task definition to check\n\nReturns:\nbool: True if it's an action", "source": "codesearchnet"}
{"code": "def list_installed():\n    cmd = 'Get-WindowsFeature -ErrorAction SilentlyContinue -WarningAction SilentlyContinue | Select DisplayName,Name,Installed'\n    features = _pshell_json(cmd)\n    ret = {}\n    for entry in features:\n        if entry['Installed']:\n            ret[entry['Name']] = entry['DisplayName']\n    return ret", "docstring": "List installed features. Supported on Windows Server 2008 and Windows 8 and\nnewer.\n\nReturns:\ndict: A dictionary of installed features\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_servermanager.list_installed", "source": "codesearchnet"}
{"code": "def _parse_dataset(file_path, tmp_dir, train):\n    input_path = file_path\n    file_name = ('train' if train else 'dev')\n    gen_output_path = os.path.join(tmp_dir, (file_name + '.txt'))\n    example_output_path = os.path.join(tmp_dir, _EXAMPLES_FILE)\n    print(('input path: ' + input_path))\n    print(('gen_output_path: ' + gen_output_path))\n    print(('example_output_path: ' + example_output_path))\n    input_file = tf.gfile.Open(input_path, mode='r')\n    examples = []\n    for (counter, line) in enumerate(input_file):\n        if (counter == 0):\n            continue\n        line_split = line.split('\\t')\n        parse1 = line_split[_PARSE1_INDEX]\n        parse2 = line_split[_PARSE2_INDEX]\n        consensus_label = line_split[_LABEL_INDEX]\n        tokens1 = _get_tokens_and_tags(parse1)\n        tokens2 = _get_tokens_and_tags(parse2)\n        tokens1_str = ' '.join(tokens1)\n        tokens2_str = ' '.join(tokens2)\n        if (consensus_label != '-'):\n            examples.append([tokens1_str, tokens2_str, consensus_label])\n    input_file.close()\n    with tf.gfile.GFile(gen_output_path, 'w') as f:\n        for (tokens1_str, tokens2_str, consensus_label) in examples:\n            f.write(('%s\\t%s\\t%s\\n' % (tokens1_str, tokens2_str, consensus_label)))\n    if train:\n        with tf.gfile.GFile(example_output_path, 'w') as f:\n            for (tokens1_str, tokens2_str, consensus_label) in examples:\n                f.write(('%s %s\\n' % (tokens1_str, tokens2_str)))", "docstring": "Convert the dataset in to a simpler format.\n\nThis function creates two files. One for being processed to produce a vocab\nand another to generate the data.\n\nArgs:\nfile_path: string, path to the file to parse.\ntmp_dir: string, path to the directory to output the files.\ntrain: bool, indicating if we are parsing the training set.", "source": "codesearchnet"}
{"code": "def diff_compute(self, text1, text2, checklines, deadline):\n    \n    if not text1:\n      \n      return [(self.DIFF_INSERT, text2)]\n\n    if not text2:\n      \n      return [(self.DIFF_DELETE, text1)]\n\n    if len(text1) > len(text2):\n      (longtext, shorttext) = (text1, text2)\n    else:\n      (shorttext, longtext) = (text1, text2)\n    i = longtext.find(shorttext)\n    if i != -1:\n      \n      diffs = [(self.DIFF_INSERT, longtext[:i]), (self.DIFF_EQUAL, shorttext),\n               (self.DIFF_INSERT, longtext[i + len(shorttext):])]\n      \n      if len(text1) > len(text2):\n        diffs[0] = (self.DIFF_DELETE, diffs[0][1])\n        diffs[2] = (self.DIFF_DELETE, diffs[2][1])\n      return diffs\n\n    if len(shorttext) == 1:\n      \n      \n      return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]\n\n    \n    hm = self.diff_halfMatch(text1, text2)\n    if hm:\n      \n      (text1_a, text1_b, text2_a, text2_b, mid_common) = hm\n      \n      diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline)\n      diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline)\n      \n      return diffs_a + [(self.DIFF_EQUAL, mid_common)] + diffs_b\n\n    if checklines and len(text1) > 100 and len(text2) > 100:\n      return self.diff_lineMode(text1, text2, deadline)\n\n    return self.diff_bisect(text1, text2, deadline)", "docstring": "Find the differences between two texts.  Assumes that the texts do not\nhave any common prefix or suffix.\n\nArgs:\ntext1: Old string to be diffed.\ntext2: New string to be diffed.\nchecklines: Speedup flag.  If false, then don't run a line-level diff\nfirst to identify the changed areas.\nIf true, then run a faster, slightly less optimal diff.\ndeadline: Time when the diff should be complete by.\n\nReturns:\nArray of changes.", "source": "juraj-google-style"}
{"code": "def reformat_css(input_file, output_file):\n    \n    \n    line_count = get_line_count(input_file)\n\n    \n    f = open(input_file, 'r+')\n    output = open(output_file, 'w')\n\n    \n    for line in range(line_count):\n        \n        string = f.readline().strip()\n        \n        string = re.sub('\\{', '{\\n', string)\n        \n        string = re.sub('; ', ';', string)\n        string = re.sub(';', ';\\n', string)\n        \n        string = re.sub('} ', '*/\\n', string)\n        \n        output.write(string)\n\n    \n    output.close()\n    f.close()\n\n    \n    indent_css(output_file, output_file)\n\n    \n    add_whitespace_before(\"{\", output_file, output_file)", "docstring": "Reformats poorly written css. This function does not validate or fix errors in the code.\nIt only gives code the proper indentation.\n\nArgs:\ninput_file: string, path to the input file.\n\noutput_file: string, path to where the reformatted css should be saved. If the target file\ndoesn't exist, a new file is created.\n\nReturns:\nNone.", "source": "juraj-google-style"}
{"code": "def marcxml2record(marcxml):\n    \n    marcjson = create_record(marcxml, keep_singletons=False)\n    collections = _get_collections(marcjson)\n\n    if 'conferences' in collections:\n        return conferences.do(marcjson)\n    elif 'data' in collections:\n        return data.do(marcjson)\n    elif 'experiment' in collections:\n        return experiments.do(marcjson)\n    elif 'hepnames' in collections:\n        return hepnames.do(marcjson)\n    elif 'institution' in collections:\n        return institutions.do(marcjson)\n    elif 'job' in collections or 'jobhidden' in collections:\n        return jobs.do(marcjson)\n    elif 'journals' in collections or 'journalsnew' in collections:\n        return journals.do(marcjson)\n    return hep.do(marcjson)", "docstring": "Convert a MARCXML string to a JSON record.\n\nTries to guess which set of rules to use by inspecting the contents\nof the ``980__a`` MARC field, but falls back to HEP in case nothing\nmatches, because records belonging to special collections logically\nbelong to the Literature collection but don't have ``980__a:HEP``.\n\nArgs:\nmarcxml(str): a string containing MARCXML.\n\nReturns:\ndict: a JSON record converted from the string.", "source": "juraj-google-style"}
{"code": "def check_column(df: DataFrame, row: int, name: str, fn: Callable[[float], bool]) -> bool:\n    is_ok = True\n    if df(row, 'trt_model'):\n        if not fn(df(row, name)):\n            logging.error('Unsatisfied %s found at: %s', name, df(row))\n            is_ok = False\n    return is_ok", "docstring": "Checks the values of a column using a custom function and logs abnormals.\n\nThe check is only performed on TensorRT models, not native CPU/GPU models.\n\nArgs:\ndf: The DataFrame to be checked.\nrow: The row in the DataFrame\nname: The name of the column to be checked.\nfn: The function that takes a value of at the specified column and returns\nif the value satisfies the check.\n\nReturns:\nWhether all the values of the specified column satisfies the provided check.", "source": "github-repos"}
{"code": "def change_tz(cal, new_timezone, default, utc_only=False, utc_tz=icalendar.utc):\n    for vevent in getattr(cal, 'vevent_list', []):\n        start = getattr(vevent, 'dtstart', None)\n        end = getattr(vevent, 'dtend', None)\n        for node in (start, end):\n            if node:\n                dt = node.value\n                if (isinstance(dt, datetime) and ((not utc_only) or (dt.tzinfo == utc_tz))):\n                    if (dt.tzinfo is None):\n                        dt = dt.replace(tzinfo=default)\n                    node.value = dt.astimezone(new_timezone)", "docstring": "Change the timezone of the specified component.\n\nArgs:\ncal (Component): the component to change\nnew_timezone (tzinfo): the timezone to change to\ndefault (tzinfo): a timezone to assume if the dtstart or dtend in cal\ndoesn't have an existing timezone\nutc_only (bool): only convert dates that are in utc\nutc_tz (tzinfo): the tzinfo to compare to for UTC when processing\nutc_only=True", "source": "codesearchnet"}
{"code": "def fetch_friends(self, user, paginate=False):\n    if USING_ALLAUTH:\n        social_app = SocialApp.objects.get_current('facebook')\n        oauth_token = SocialToken.objects.get(account=user, app=social_app).token\n    else:\n        social_auth_backend = FacebookBackend()\n        tokens = social_auth_backend.tokens(user)\n        oauth_token = tokens['access_token']\n    graph = facebook.GraphAPI(oauth_token)\n    friends = graph.get_connections('me', 'friends')\n    if paginate:\n        total_friends = friends.copy()\n        total_friends.pop('paging')\n        while (('paging' in friends) and ('next' in friends['paging']) and friends['paging']['next']):\n            next_url = friends['paging']['next']\n            next_url_parsed = urlparse.urlparse(next_url)\n            query_data = urlparse.parse_qs(next_url_parsed.query)\n            query_data.pop('access_token')\n            for (k, v) in query_data.items():\n                query_data[k] = v[0]\n            friends = graph.get_connections('me', 'friends', **query_data)\n            total_friends['data'] = sum([total_friends['data'], friends['data']], [])\n    else:\n        total_friends = friends\n    return total_friends", "docstring": "fethces friends from facebook using the oauth_token\nfethched by django-social-auth.\n\nNote - user isn't a user - it's a UserSocialAuth if using social auth, or a SocialAccount if using allauth\n\nReturns:\ncollection of friend objects fetched from facebook", "source": "codesearchnet"}
{"code": "def duration_distance(item_a, item_b, max_value):\n    \n    duration_a = item_a.times.size\n    duration_b = item_b.times.size\n    return np.minimum(np.abs(duration_a - duration_b), max_value) / float(max_value)", "docstring": "Absolute difference in the duration of two items\n\nArgs:\nitem_a: STObject from the first set in TrackMatcher\nitem_b: STObject from the second set in TrackMatcher\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "juraj-google-style"}
{"code": "def __init__(self, ascii_codepage='cp1252', key_path_prefix=''):\n    \n    super(REGFWinRegistryFile, self).__init__(\n        ascii_codepage=ascii_codepage, key_path_prefix=key_path_prefix)\n    self._file_object = None\n    self._regf_file = pyregf.file()\n    self._regf_file.set_ascii_codepage(ascii_codepage)", "docstring": "Initializes the Windows Registry file.\n\nArgs:\nascii_codepage (Optional[str]): ASCII string codepage.\nkey_path_prefix (Optional[str]): Windows Registry key path prefix.", "source": "juraj-google-style"}
{"code": "def mkdirs(self, path):\n    try:\n        os.makedirs(path)\n    except OSError as err:\n        raise IOError(err)", "docstring": "Recursively create directories for the provided path.\n\nArgs:\npath: string path of the directory structure that should be created\n\nRaises:\nIOError: if leaf directory already exists.", "source": "github-repos"}
{"code": "def is_compatible(self, other: 'Schema') -> bool:\n    if not isinstance(other, Schema):\n        raise TypeError(f\"Argument 'other' should be a Schema object. Encountered {other}.\")\n    for key_spec in other.keys():\n        if key_spec not in self:\n            return False\n    for key_spec, field in self.items():\n        if key_spec not in other:\n            return False\n        if not field.value.is_compatible(other[key_spec].value):\n            return False\n    return True", "docstring": "Returns whether current schema is compatible with the other schema.\n\nNOTE(daiyip): schema A is compatible with schema B when:\nschema A and schema B have the same keys, with compatible values specs.\n\nArgs:\nother: Other schema.\n\nReturns:\nTrue if values that is acceptable to the other schema is acceptable to\ncurrent schema.\nRaises:\nTypeError: If `other` is not a schema object.", "source": "github-repos"}
{"code": "def describe(self):\n    response = {'TransformJobStatus': self.state, 'ModelName': self.model_name, 'TransformJobName': self.name, 'TransformJobArn': _UNUSED_ARN, 'TransformEndTime': self.end_time, 'CreationTime': self.start_time, 'TransformStartTime': self.start_time, 'Environment': {}, 'BatchStrategy': self.batch_strategy}\n    if self.transform_resources:\n        response['TransformResources'] = self.transform_resources\n    if self.output_data:\n        response['TransformOutput'] = self.output_data\n    if self.input_data:\n        response['TransformInput'] = self.input_data\n    return response", "docstring": "Describe this _LocalTransformJob\n\nThe response is a JSON-like dictionary that follows the response of the\nboto describe_transform_job() API.\n\nReturns:\ndict: description of this _LocalTransformJob", "source": "codesearchnet"}
{"code": "def with_rank_at_most(self, rank):\n    if ((self.ndims is not None) and (self.ndims > rank)):\n        raise ValueError(('Shape %s must have rank at most %d' % (self, rank)))\n    else:\n        return self", "docstring": "Returns a shape based on `self` with at most the given rank.\n\nArgs:\nrank: An integer.\n\nReturns:\nA shape that is at least as specific as `self` with at most the given\nrank.\n\nRaises:\nValueError: If `self` does not represent a shape with at most the given\n`rank`.", "source": "codesearchnet"}
{"code": "def to_dict(self):\n    output = super().to_dict()\n    if isinstance(self.esmfold_config, EsmFoldConfig):\n        output['esmfold_config'] = self.esmfold_config.to_dict()\n    return output", "docstring": "Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].\n\nReturns:\n`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,", "source": "github-repos"}
{"code": "def heightmap_lerp_hm(\n    hm1: np.ndarray, hm2: np.ndarray, hm3: np.ndarray, coef: float\n) -> None:\n    \n    lib.TCOD_heightmap_lerp_hm(\n        _heightmap_cdata(hm1),\n        _heightmap_cdata(hm2),\n        _heightmap_cdata(hm3),\n        coef,\n    )", "docstring": "Perform linear interpolation between two heightmaps storing the result\nin ``hm3``.\n\nThis is the same as doing ``hm3[:] = hm1[:] + (hm2[:] - hm1[:]) * coef``\n\nArgs:\nhm1 (numpy.ndarray): The first heightmap.\nhm2 (numpy.ndarray): The second heightmap to add to the first.\nhm3 (numpy.ndarray): A destination heightmap to store the result.\ncoef (float): The linear interpolation coefficient.", "source": "juraj-google-style"}
{"code": "def replace_punctuation(self, text, excluded=None, replacement=''):\n    if (excluded is None):\n        excluded = set()\n    elif (not isinstance(excluded, set)):\n        excluded = set(excluded)\n    punct = ''.join(self.__punctuation.difference(excluded))\n    return self.replace_characters(text, characters=punct, replacement=replacement)", "docstring": "Replace punctuation symbols in text.\n\nRemoves punctuation from input text or replaces them with a\nstring if specified. Characters replaced will be those\nin string.punctuation.\n\nArgs:\ntext: The text to be processed.\nexcluded: Set of characters to exclude.\nreplacement: New text that will replace punctuation.\n\nReturns:\nThe text without punctuation.", "source": "codesearchnet"}
{"code": "def __init__(self, type_, value):\n        \n        \n        self.type_ = type_\n        self.value = value\n        super(CastError, self).__init__(\n            'Unable to cast \"{}\" to {}.'.format(value, type_.__name__))", "docstring": "Instantiate the exception with a descriptive message.\n\nArgs:\ntype_: The type to which the cast was attempting to convert the\nvalue.\nvalue: The value that was attempted to be cast.", "source": "juraj-google-style"}
{"code": "def add_dspam_headers(self, results):\n    for header in self.headers:\n        hname = (self.header_prefix + header)\n        if (header.lower() in results):\n            hvalue = results[header.lower()]\n            logger.debug('<{}> Adding header {}: {}'.format(self.id, hname, hvalue))\n            self.addheader(hname, hvalue)\n        elif (header == 'Processed'):\n            hvalue = datetime.datetime.now().strftime('%a %b %d %H:%M:%S %Y')\n            logger.debug('<{}> Adding header {}: {}'.format(self.id, hname, hvalue))\n            self.addheader(hname, hvalue)\n        else:\n            logger.warning('<{}> Not adding header {}, no data available in DSPAM results'.format(self.id, hname))", "docstring": "Format DSPAM headers with passed results, and add them to the message.\n\nArgs:\nresults -- A results dictionary from DspamClient.", "source": "codesearchnet"}
{"code": "def __init__(self, trainer_id):\n    if not trainer_id:\n        raise ValueError('tf.data service cross-trainer cache requires a non-empty trainer ID.')\n    self.trainer_id = trainer_id", "docstring": "Constructs a CrossTrainerCache.\n\nArgs:\ntrainer_id: Each training job has a unique ID. Once a job has consumed\ndata, the data remains in the cache and is re-used by jobs with different\n`trainer_id`s. Requests with the same `trainer_id` do not re-use data.\n\nRaises:\nValueError if `trainer_id` is empty.", "source": "github-repos"}
{"code": "def __init__(self, add_tag_methods=None):\n        \n        \n        super(PacketTags, self).__init__()\n\n        \n        self.tag_methods = [PacketTags._tag_net_direction, PacketTags._tag_nxdomain]\n        if add_tag_methods:\n            self.tag_methods += add_tag_methods\n\n        \n        self.output_stream = self.tag_stuff()", "docstring": "Initialize PacketTags Class\n\nArgs:\nadd_tag_methods: a list of additional tag methods (optional, defaults to None))\nNote: all methods must take the data dictionary as an argmument (e.g. tag_method(data))", "source": "juraj-google-style"}
{"code": "def _shadow_model_variables(shadow_vars):\n    G = tf.get_default_graph()\n    curr_shadow_vars = set([v.name for v in shadow_vars])\n    model_vars = tf.model_variables()\n    shadow_model_vars = []\n    for v in model_vars:\n        assert v.name.startswith('tower'), 'Found some MODEL_VARIABLES created outside of the tower function!'\n        (stripped_op_name, stripped_var_name) = get_op_tensor_name(re.sub('^tower[0-9]+/', '', v.name))\n        if (stripped_op_name in curr_shadow_vars):\n            continue\n        try:\n            G.get_tensor_by_name(stripped_var_name)\n            logger.warn('Model Variable {} also appears in other collections.'.format(stripped_var_name))\n            continue\n        except KeyError:\n            pass\n        new_v = tf.get_variable(stripped_op_name, dtype=v.dtype.base_dtype, initializer=v.initial_value, trainable=False)\n        curr_shadow_vars.add(stripped_op_name)\n        shadow_vars.append(new_v)\n        shadow_model_vars.append((new_v, v))\n    return shadow_model_vars", "docstring": "Create shadow vars for model_variables as well, and add to the list of ``shadow_vars``.\n\nReturns:\nlist of (shadow_model_var, local_model_var) used for syncing.", "source": "codesearchnet"}
{"code": "def to_view(self, view_name):\n    from . import _view\n    return _view.View(view_name, self._context).create(self._sql)", "docstring": "Create a View from this Query.\n\nArgs:\nview_name: the name of the View either as a string or a 3-part tuple\n(projectid, datasetid, name).\n\nReturns:\nA View for the Query.", "source": "codesearchnet"}
{"code": "def trace_set_buffer_capacity(self, size):\n    cmd = enums.JLinkTraceCommand.SET_CAPACITY\n    data = ctypes.c_uint32(size)\n    res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n    if (res == 1):\n        raise errors.JLinkException('Failed to set trace buffer size.')\n    return None", "docstring": "Sets the capacity for the trace buffer.\n\nArgs:\nself (JLink): the ``JLink`` instance.\nsize (int): the new capacity for the trace buffer.\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def get_attribute_from_config(config, section, attribute):\n    section = config.get(section)\n    if section:\n        option = section.get(attribute)\n        if option:\n            return option\n    raise ConfigurationError(\"Config file badly formed!\\nFailed to get attribute '{}' from section '{}'!\".format(attribute, section))", "docstring": "Try to parse an attribute of the config file.\n\nArgs:\nconfig (defaultdict): A defaultdict.\nsection (str): The section of the config file to get information from.\nattribute (str): The attribute of the section to fetch.\nReturns:\nstr: The string corresponding to the section and attribute.\nRaises:\nConfigurationError", "source": "codesearchnet"}
{"code": "def Verify(self, mempool):\n        \n        logger.info(\"Verifying transaction: %s \" % self.Hash.ToBytes())\n\n        return Helper.VerifyScripts(self)", "docstring": "Verify the transaction.\n\nArgs:\nmempool:\n\nReturns:\nbool: True if verified. False otherwise.", "source": "juraj-google-style"}
{"code": "def record_factory(app, fields=None):\n    \n    \n    record = Record(app, {\n        '$type': Record._type,\n        'isNew': True,\n        'applicationId': app.id,\n        'comments': {\n            '$type': 'System.Collections.Generic.Dictionary`2[[System.String, mscorlib],[System.Collections.Generic.List`1[[Core.Models.Record.Comments, Core]], mscorlib]], mscorlib'\n        },\n        'values': {\n            '$type': 'System.Collections.Generic.Dictionary`2[[System.String, mscorlib],[System.Object, mscorlib]], mscorlib'\n        }\n    })\n\n    fields = fields or {}\n\n    for name, value in six.iteritems(fields):\n        record[name] = value\n\n    \n    copy_raw = copy.copy(record._raw)\n    values_dict = {}\n    for key, value in six.iteritems(copy_raw['values']):\n        if value is not None:\n            values_dict[key] = value\n    record._raw['values'] = values_dict\n\n    return record", "docstring": "Return a temporary Record instance to be used for field validation and value parsing\n\nArgs:\napp (App): Target App to create a transient Record instance for\nfields (dict): Optional dict of fields and values to set on new Record instance before returning\n\nReturns:\nRecord: Unsaved Record instance to be used for validation, creation, etc.", "source": "juraj-google-style"}
{"code": "def parse_binary_descriptor(bindata):\n    func_names = {0: 'copy_latest_a', 1: 'average_a', 2: 'copy_all_a', 3: 'sum_a', 4: 'copy_count_a', 5: 'trigger_streamer', 6: 'call_rpc', 7: 'subtract_afromb'}\n    if (len(bindata) != 20):\n        raise ArgumentError('Invalid binary node descriptor with incorrect size', size=len(bindata), expected=20, bindata=bindata)\n    (a_trig, b_trig, stream_id, a_id, b_id, proc, a_cond, b_cond, trig_combiner) = struct.unpack('<LLHHHBBBB2x', bindata)\n    node_stream = DataStream.FromEncoded(stream_id)\n    if (a_id == 65535):\n        raise ArgumentError('Invalid binary node descriptor with invalid first input', input_selector=a_id)\n    a_selector = DataStreamSelector.FromEncoded(a_id)\n    a_trigger = _process_binary_trigger(a_trig, a_cond)\n    b_selector = None\n    b_trigger = None\n    if (b_id != 65535):\n        b_selector = DataStreamSelector.FromEncoded(b_id)\n        b_trigger = _process_binary_trigger(b_trig, b_cond)\n    if (trig_combiner == SGNode.AndTriggerCombiner):\n        comb = '&&'\n    elif (trig_combiner == SGNode.OrTriggerCombiner):\n        comb = '||'\n    else:\n        raise ArgumentError('Invalid trigger combiner in binary node descriptor', combiner=trig_combiner)\n    if (proc not in func_names):\n        raise ArgumentError('Unknown processing function', function_id=proc, known_functions=func_names)\n    func_name = func_names[proc]\n    if (b_selector is None):\n        return '({} {}) => {} using {}'.format(a_selector, a_trigger, node_stream, func_name)\n    return '({} {} {} {} {}) => {} using {}'.format(a_selector, a_trigger, comb, b_selector, b_trigger, node_stream, func_name)", "docstring": "Convert a binary node descriptor into a string descriptor.\n\nBinary node descriptor are 20-byte binary structures that encode all\ninformation needed to create a graph node.  They are used to communicate\nthat information to an embedded device in an efficent format.  This\nfunction exists to turn such a compressed node description back into\nan understandable string.\n\nArgs:\nbindata (bytes): The raw binary structure that contains the node\ndescription.\n\nReturns:\nstr: The corresponding string description of the same sensor_graph node", "source": "codesearchnet"}
{"code": "def server_hardware_types(self):\n    if (not self.__server_hardware_types):\n        self.__server_hardware_types = ServerHardwareTypes(self.__connection)\n    return self.__server_hardware_types", "docstring": "Gets the ServerHardwareTypes API client.\n\nReturns:\nServerHardwareTypes:", "source": "codesearchnet"}
{"code": "def parse_int(value: Any) -> Numeric:\n    return int(value)", "docstring": "Attempts to parse a valid integer value from the provided value.\n\nArgs:\n* value: of Any type\n\nReturns:\n* int value: if valid\n\nRaises:\n* ValueError: if parsing failed", "source": "github-repos"}
{"code": "def _get_context_name(self, app=None):\n    elements = [self.__class__.__name__, 'context', text_type(id(self))]\n    if app:\n        elements.append(text_type(id(app)))\n    else:\n        try:\n            elements.append(text_type(id(self.app)))\n        except RuntimeError:\n            pass\n    return '_'.join(elements)", "docstring": "Generate the name of the context variable for this component & app.\n\nBecause we store the ``context`` in a Local so the component\ncan be used across multiple apps, we cannot store the context on the\ninstance itself. This function will generate a unique and predictable\nkey in which to store the context.\n\nReturns:\nstr: The name of the context variable to set and get the context\nfrom.", "source": "codesearchnet"}
{"code": "def match_rules_context(tree, rules, parent_context={}):\n    \n    for template, match_rules in rules.items():\n        context = parent_context.copy()\n        if match_template(tree, template, context):\n            for key, child_rules in match_rules.items():\n                child_context = match_rules_context(context[key], child_rules, context)\n                if child_context:\n                    for k, v in child_context.items():\n                        context[k] = v\n                else:\n                    return None\n            return context\n    return None", "docstring": "Recursively matches a Tree structure with rules and returns context\n\nArgs:\ntree (Tree): Parsed tree structure\nrules (dict): See match_rules\nparent_context (dict): Context of parent call\nReturns:\ndict: Context matched dictionary of matched rules or\nNone if no match", "source": "juraj-google-style"}
{"code": "def _get_members(self, class_obj, member_type, include_in_public=None):\n    try:\n        app = self.state.document.settings.env.app\n    except AttributeError:\n        app = None\n    if (not include_in_public):\n        include_in_public = []\n    all_members = []\n    for member_name in dir(class_obj):\n        try:\n            documenter = get_documenter(app, safe_getattr(class_obj, member_name), class_obj)\n        except AttributeError:\n            continue\n        if (documenter.objtype == member_type):\n            all_members.append(member_name)\n    public_members = [x for x in all_members if ((x in include_in_public) or (not x.startswith('_')))]\n    return (public_members, all_members)", "docstring": "Return class members of the specified type.\n\nclass_obj: Class object.\n\nmember_type: Member type ('method' or 'attribute').\n\ninclude_in_public: set/list/tuple with member names that should be\nincluded in public members in addition to the public names (those\nstarting without underscore).\n\nReturns:\ntuple(public_members, all_members): Names of the class members of\nthe specified member type (public / all).", "source": "codesearchnet"}
{"code": "def group_protos(cls, proto_list: List[types.ProtobufBaseType], **kwargs) -> Dict[str, List[types.ProtobufBaseType]]:\n    del proto_list, kwargs\n    return []", "docstring": "Creates a dict of batchable protos.\n\nFor a list of protos, generates a dictionary `{key: grouped_protos}` such\nthat the `grouped_protos` can be batched together.\n\nArgs:\nproto_list: A list of `Instrument` protos.\n**kwargs: Any extra arguments. E.g., pricing configuration.\n\nReturns:\nA dictionary of grouped protos.", "source": "github-repos"}
{"code": "def unzip(input_layer, split_dim=0, num_splits=2):\n  \n  shape = input_layer.shape\n  _check_split_dims(num_splits, split_dim, shape)\n  splits = functions.unzip(input_layer, split_dim, shape[split_dim], num_splits)\n  return input_layer.with_sequence(splits)", "docstring": "Unzips this Tensor along the split_dim into num_splits Equal chunks.\n\nExamples:\n\n* `[1, 2, 3, 4] -> [1, 3], [2, 4]`\n* `[[1, 1], [2, 2], [3, 3], [4, 4]] -> [[1, 1], [3, 3]], [[2, 2], [4, 4]]`\n\nArgs:\ninput_layer: The chainable object, supplied.\nsplit_dim: The dimension to split along. Defaults to batch.\nnum_splits: The number of splits.\nReturns:\nA list of PrettyTensors.\nRaises:\nValueError: If split_dim is out of range or isn't divided evenly by\nnum_splits.", "source": "juraj-google-style"}
{"code": "def determine_action(self, issue):\n    resource_type = self.resource_types[issue.resource.resource_type_id]\n    issue_alert_schedule = (self.alert_schedule[resource_type] if (resource_type in self.alert_schedule) else self.alert_schedule['*'])\n    action_item = {'action': None, 'action_description': None, 'last_alert': issue.last_alert, 'issue': issue, 'resource': self.resource_classes[self.resource_types[issue.resource.resource_type_id]](issue.resource), 'owners': [], 'stop_after': issue_alert_schedule['stop'], 'remove_after': issue_alert_schedule['remove'], 'notes': issue.notes, 'missing_tags': issue.missing_tags}\n    time_elapsed = (time.time() - issue.created)\n    stop_schedule = pytimeparse.parse(issue_alert_schedule['stop'])\n    remove_schedule = pytimeparse.parse(issue_alert_schedule['remove'])\n    if self.collect_only:\n        action_item['action'] = AuditActions.IGNORE\n    elif (remove_schedule and (time_elapsed >= remove_schedule)):\n        action_item['action'] = AuditActions.REMOVE\n        action_item['action_description'] = 'Resource removed'\n        action_item['last_alert'] = remove_schedule\n        if issue.update({'last_alert': remove_schedule}):\n            db.session.add(issue.issue)\n    elif (stop_schedule and (time_elapsed >= stop_schedule)):\n        action_item['action'] = AuditActions.STOP\n        action_item['action_description'] = 'Resource stopped'\n        action_item['last_alert'] = stop_schedule\n        if issue.update({'last_alert': stop_schedule}):\n            db.session.add(issue.issue)\n    else:\n        alert_selection = self.determine_alert(issue_alert_schedule['alert'], issue.get_property('created').value, issue.get_property('last_alert').value)\n        if alert_selection:\n            action_item['action'] = AuditActions.ALERT\n            action_item['action_description'] = '{} alert'.format(alert_selection)\n            action_item['last_alert'] = alert_selection\n            if issue.update({'last_alert': alert_selection}):\n                db.session.add(issue.issue)\n        else:\n            action_item['action'] = AuditActions.IGNORE\n    db.session.commit()\n    return action_item", "docstring": "Determine the action we should take for the issue\n\nArgs:\nissue: Issue to determine action for\n\nReturns:\n`dict`", "source": "codesearchnet"}
{"code": "def __init__(self, *timeslots: List[Timeslot]):\n        \n        self._table = defaultdict(list)\n\n        for slot in timeslots:\n            for interval in self._table[slot.channel]:\n                if slot.interval.has_overlap(interval):\n                    raise PulseError(\"Cannot create TimeslotCollection from overlapped timeslots\")\n            self._table[slot.channel].append(slot.interval)\n\n        self._timeslots = tuple(timeslots)", "docstring": "Create a new time-slot collection.\n\nArgs:\n*timeslots: list of time slots\nRaises:\nPulseError: when overlapped time slots are specified", "source": "juraj-google-style"}
{"code": "def wait_for_bq_job(self, job_reference, sleep_duration_sec=5, max_retries=0):\n    retry = 0\n    while True:\n        retry += 1\n        job = self.get_job(job_reference.projectId, job_reference.jobId, job_reference.location)\n        _LOGGER.info('Job %s status: %s', job.id, job.status.state)\n        if job.status.state == 'DONE' and job.status.errorResult:\n            raise RuntimeError('BigQuery job {} failed. Error Result: {}'.format(job_reference.jobId, job.status.errorResult))\n        elif job.status.state == 'DONE':\n            return True\n        else:\n            time.sleep(sleep_duration_sec)\n            if max_retries != 0 and retry >= max_retries:\n                raise RuntimeError('The maximum number of retries has been reached')", "docstring": "Poll job until it is DONE.\n\nArgs:\njob_reference: bigquery.JobReference instance.\nsleep_duration_sec: Specifies the delay in seconds between retries.\nmax_retries: The total number of times to retry. If equals to 0,\nthe function waits forever.\n\nRaises:\n`RuntimeError`: If the job is FAILED or the number of retries has been\nreached.", "source": "github-repos"}
{"code": "def _create_state_graph(self, name):\n    import_collections = [tf_v1.GraphKeys.GLOBAL_VARIABLES, tf_v1.GraphKeys.MODEL_VARIABLES, tf_v1.GraphKeys.TABLE_INITIALIZERS, tf_v1.GraphKeys.ASSET_FILEPATHS, tf_v1.GraphKeys.COND_CONTEXT, tf_v1.GraphKeys.WHILE_CONTEXT]\n    if self._trainable:\n        import_collections.extend([tf_v1.GraphKeys.TRAINABLE_VARIABLES, tf_v1.GraphKeys.REGULARIZATION_LOSSES])\n    absolute_scope_name = tf_v1.get_default_graph().unique_name(name, mark_as_used=False)\n    relative_scope_name = absolute_scope_name.split('/')[(- 1)]\n    assert (relative_scope_name == name)\n    meta_graph = meta_graph_pb2.MetaGraphDef()\n    meta_graph.CopyFrom(self._meta_graph)\n    meta_graph_lib.filter_collections(meta_graph, import_collections)\n    meta_graph_lib.prefix_shared_name_attributes(meta_graph, absolute_scope_name)\n    tf_v1.train.import_meta_graph(meta_graph, input_map={}, import_scope=relative_scope_name)\n    variables_tensor_map = {}\n    for var in tf_v1.global_variables():\n        if var.op.name.startswith((absolute_scope_name + '/')):\n            variables_tensor_map[var.name[(len(absolute_scope_name) + 1):]] = var\n\n    def _get_tensor(tensor_name):\n        return tf_v1.get_default_graph().get_tensor_by_name(meta_graph_lib.prepend_name_scope(tensor_name, import_scope=absolute_scope_name))\n    state_op_names = list_registered_stateful_ops_without_inputs()\n    state_map = get_state_map(meta_graph, state_op_names, set(), _get_tensor)\n    return (variables_tensor_map, state_map)", "docstring": "Creates the graph nodes that hold the state of the Module.\n\nArgs:\nname: name scope to create the state graph in.\n\nReturns:\nA tuple consisting of:\nvariables_tensor_map: a map from tensor names in the original graph def\nto the created Variables objects.\nstate_map: a map from tensors names in the original graph def to the\ninstantiated tensors to be used as a state_map.", "source": "codesearchnet"}
{"code": "def _order_pases(self, passes):\n    passes = set(passes)\n    pass_deps = {}\n    for opt in passes:\n        (_, before, after) = self._known_passes[opt]\n        if (opt not in pass_deps):\n            pass_deps[opt] = set()\n        for after_pass in after:\n            pass_deps[opt].add(after_pass)\n        for other in before:\n            if (other not in passes):\n                continue\n            if (other not in pass_deps):\n                pass_deps[other] = set()\n            pass_deps[other].add(opt)\n    return toposort_flatten(pass_deps)", "docstring": "Topologically sort optimization passes.\n\nThis ensures that the resulting passes are run in order\nrespecting before/after constraints.\n\nArgs:\npasses (iterable): An iterable of pass names that should\nbe included in the optimization passes run.", "source": "codesearchnet"}
{"code": "def listdir(self, target_directory):\n        \n        target_directory = self.resolve_path(target_directory, allow_fd=True)\n        directory = self.confirmdir(target_directory)\n        directory_contents = directory.contents\n        return list(directory_contents.keys())", "docstring": "Return a list of file names in target_directory.\n\nArgs:\ntarget_directory: Path to the target directory within the\nfake filesystem.\n\nReturns:\nA list of file names within the target directory in arbitrary\norder.\n\nRaises:\nOSError: if the target is not a directory.", "source": "juraj-google-style"}
{"code": "def AddSubkey(self, registry_key):\n    \n    name = registry_key.name.upper()\n    if name in self._subkeys:\n      raise KeyError(\n          'Subkey: {0:s} already exists.'.format(registry_key.name))\n\n    self._subkeys[name] = registry_key\n\n    key_path = self._JoinKeyPath([self._key_path, registry_key.name])\n    registry_key._key_path = key_path", "docstring": "Adds a subkey.\n\nArgs:\nregistry_key (WinRegistryKey): Windows Registry subkey.\n\nRaises:\nKeyError: if the subkey already exists.", "source": "juraj-google-style"}
{"code": "def init_op(self):\n    return self._init_op", "docstring": "Return the Init Op used by the supervisor.\n\nReturns:\nAn Op or `None`.", "source": "github-repos"}
{"code": "def match_criterion(self, tag):\n        \n        return tag.name == self.reference_tag_name and \\\n            tag.attrs.get('kind', '') == self.reference_tag_kind", "docstring": "Override. Determine if a tag has the desired name and kind attribute\nvalue.\n\nArgs:\ntag: A BeautifulSoup Tag.\n\nReturns:\nTrue if tag has the desired name and kind, otherwise False.", "source": "juraj-google-style"}
{"code": "def _pad_modernbert_output(inputs: torch.Tensor, indices: torch.Tensor, batch: int, seqlen: int) -> torch.Tensor:\n    if inputs.dim() == 1:\n        output = torch.zeros(batch * seqlen, dtype=inputs.dtype, device=inputs.device)\n        output[indices] = inputs\n        padded_inputs = output.view(batch, seqlen)\n    else:\n        _, *rest = inputs.shape\n        output = torch.zeros(batch * seqlen, *rest, dtype=inputs.dtype, device=inputs.device)\n        output[indices] = inputs\n        padded_inputs = output.view(batch, seqlen, *rest)\n    return padded_inputs", "docstring": "Add padding to sequences.\n\nArgs:\ninputs: (total_nnz, ...) or (total_nnz,), where total_nnz = number of tokens selected in attention_mask.\nindices: (total_nnz)\nbatch: int, batch size\nseqlen: int, max sequence length\n\nReturns:\npadded_inputs: (batch, seqlen, ...) or (batch, seqlen)", "source": "github-repos"}
{"code": "def is_generic_union(type_: Type) -> bool:\n    \n    if hasattr(typing, '_GenericAlias'):\n        \n        return (isinstance(type_, typing._GenericAlias) and     \n                type_.__origin__ is Union)\n    else:\n        if hasattr(typing, '_Union'):\n            \n            return isinstance(type_, typing._Union)             \n        else:\n            \n            return isinstance(type_, typing.UnionMeta)          \n    raise RuntimeError('Could not determine whether type is a Union. Is this'\n                       ' a YAtiML-supported Python version?')", "docstring": "Determines whether a type is a Union[...].\n\nHow to do this varies for different Python versions, due to the\ntyping library not having a stable API. This functions smooths\nover the differences.\n\nArgs:\ntype_: The type to check.\n\nReturns:\nTrue iff it's a Union[...something...].", "source": "juraj-google-style"}
{"code": "def FromEncoded(cls, bindata):\n    if (len(bindata) != 8):\n        raise ArgumentError('Invalid binary slot descriptor with invalid length', length=len(bindata), expected=8, data=bindata)\n    (slot, match_op) = struct.unpack('<B6xB', bindata)\n    match_name = cls.KNOWN_MATCH_CODES.get(match_op)\n    if (match_name is None):\n        raise ArgumentError('Unknown match operation specified in binary slot descriptor', operation=match_op, known_match_ops=cls.KNOWN_MATCH_CODES)\n    if (match_name == 'match_controller'):\n        return SlotIdentifier(controller=True)\n    if (match_name == 'match_slot'):\n        return SlotIdentifier(slot=slot)\n    raise ArgumentError('Unsupported match operation in binary slot descriptor', match_op=match_name)", "docstring": "Create a slot identifier from an encoded binary descriptor.\n\nThese binary descriptors are used to communicate slot targeting\nto an embedded device.  They are exactly 8 bytes in length.\n\nArgs:\nbindata (bytes): The 8-byte binary descriptor.\n\nReturns:\nSlotIdentifier", "source": "codesearchnet"}
{"code": "def _push_frontier(self, early_frontier: Dict[(ops.Qid, int)], late_frontier: Dict[(ops.Qid, int)], update_qubits: Iterable[ops.Qid]=None) -> Tuple[(int, int)]:\n    if (update_qubits is None):\n        update_qubits = set(early_frontier).difference(late_frontier)\n    n_new_moments = (max(((early_frontier.get(q, 0) - late_frontier[q]) for q in late_frontier)) if late_frontier else 0)\n    if (n_new_moments > 0):\n        insert_index = min(late_frontier.values())\n        self._moments[insert_index:insert_index] = ([ops.Moment()] * n_new_moments)\n        for q in update_qubits:\n            if (early_frontier.get(q, 0) > insert_index):\n                early_frontier[q] += n_new_moments\n        return (insert_index, n_new_moments)\n    return (0, 0)", "docstring": "Inserts moments to separate two frontiers.\n\nAfter insertion n_new moments, the following holds:\nfor q in late_frontier:\nearly_frontier[q] <= late_frontier[q] + n_new\nfor q in update_qubits:\nearly_frontier[q] the identifies the same moment as before\n(but whose index may have changed if this moment is after\nthose inserted).\n\nArgs:\nearly_frontier: The earlier frontier. For qubits not in the later\nfrontier, this is updated to account for the newly inserted\nmoments.\nlate_frontier: The later frontier. This is not modified.\nupdate_qubits: The qubits for which to update early_frontier to\naccount for the newly inserted moments.\n\nReturns:\n(index at which new moments were inserted, how many new moments\nwere inserted) if new moments were indeed inserted. (0, 0)\notherwise.", "source": "codesearchnet"}
{"code": "def run(self, resources):\n        \n        hwman = resources['connection']\n        con = hwman.hwman.controller()\n        test_interface = con.test_interface()\n        try:\n            test_interface.synchronize_clock()\n            print('Time currently set at %s' % test_interface.current_time_str())\n        except:\n            raise ArgumentError('Error setting RTC time, check if controller actually has RTC or if iotile-support-lib-controller-3 is updated')", "docstring": "Sets the RTC timestamp to UTC.\n\nArgs:\nresources (dict): A dictionary containing the required resources that\nwe needed access to in order to perform this step.", "source": "juraj-google-style"}
{"code": "def _get_data_buffer_time_limit_ms(experiments):\n    for experiment in experiments:\n        if re.match('data_buffer_time_limit_ms=', experiment):\n            return int(re.match('data_buffer_time_limit_ms=(?P<data_buffer_time_limit_ms>.*)', experiment).group('data_buffer_time_limit_ms'))\n    return 0", "docstring": "Defines the time limt of the outbound data buffering.\n\nNote: data_buffer_time_limit_ms is an experimental flag and might\nnot be available in future releases.\n\nReturns:\nan int indicating the time limit in milliseconds of the outbound\ndata buffering. Default is 0 (disabled)", "source": "github-repos"}
{"code": "def sample_with_temperature(x, dim, temperature=1.0, dtype=tf.int32, name=None):\n  \n  dim = convert_to_dimension(dim)\n  with tf.name_scope(name, default_name=\"sample_with_temperature\"):\n    if temperature != 0.0:\n      \n      \n      \n      \n      \n      tiny_val = 1e-9\n      g = -log(-log(\n          random_uniform(\n              x.mesh,\n              x.shape,\n              minval=tiny_val,\n              maxval=1.,\n              dtype=x.dtype)))\n      x += g * temperature\n    return argmax(x, dim, dtype, name)", "docstring": "Either argmax or random sampling.\n\nArgs:\nx: a Tensor.\ndim: a Dimension in x.shape.dims\ntemperature: a float  0.0=argmax 1.0=random\ndtype: a tf.dtype (for the output)\nname: an optional string\n\nReturns:\na Tensor with type dtype.", "source": "juraj-google-style"}
{"code": "def _process_celeba_config_file(self, file_path):\n    \n    with tf.io.gfile.GFile(file_path) as f:\n      data_raw = f.read()\n    lines = data_raw.split(\"\\n\")\n\n    keys = lines[1].strip().split()\n    values = {}\n    \n    for line in lines[2:-1]:\n      row_values = line.strip().split()\n      \n      values[row_values[0]] = [int(v) for v in row_values[1:]]\n    return keys, values", "docstring": "Unpack the celeba config file.\n\nThe file starts with the number of lines, and a header.\nAfterwards, there is a configuration for each file: one per line.\n\nArgs:\nfile_path: Path to the file with the configuration.\n\nReturns:\nkeys: names of the attributes\nvalues: map from the file name to the list of attribute values for\nthis file.", "source": "juraj-google-style"}
{"code": "def _align_monomer(self, monomer, mon_vector, move_direction):\n        \n        axis = np.cross(mon_vector, move_direction)\n        origin = monomer[self.start].coords\n        angle = get_angle(mon_vector, move_direction)\n        op = SymmOp.from_origin_axis_angle(origin, axis, angle)\n        monomer.apply_operation(op)", "docstring": "rotate the monomer so that it is aligned along the move direction\n\nArgs:\nmonomer (Molecule)\nmon_vector (numpy.array): molecule vector that starts from the\nstart atom index to the end atom index\nmove_direction (numpy.array): the direction of the polymer chain\nextension", "source": "juraj-google-style"}
{"code": "def Matches(self, file_entry):\n    \n    if not self._file_scanner or not file_entry.IsFile():\n      return None\n\n    file_object = file_entry.GetFileObject()\n    if not file_object:\n      return False\n\n    try:\n      scan_state = pysigscan.scan_state()\n      self._file_scanner.scan_file_object(scan_state, file_object)\n\n    except IOError as exception:\n      \n      location = getattr(file_entry.path_spec, 'location', '')\n      logging.error((\n          '[skipping] unable to scan file: {0:s} for signatures '\n          'with error: {1!s}').format(location, exception))\n      return False\n\n    finally:\n      file_object.close()\n\n    return scan_state.number_of_scan_results > 0", "docstring": "Compares the file entry against the filter.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry to compare.\n\nReturns:\nbool: True if the file entry matches the filter, False if not or\nNone if the filter does not apply.", "source": "juraj-google-style"}
{"code": "def _to_backend_layout(tensor_layout):\n    if tensor_layout.device_mesh is None:\n        raise ValueError('Cannot create sharding when device mesh is not set for TensorLayout.')\n    sharding_specs = [axis if axis else dtensor.UNSHARDED for axis in tensor_layout.axes]\n    dtensor_mesh = tensor_layout.device_mesh.backend_mesh\n    return dtensor.Layout(sharding_specs=sharding_specs, mesh=dtensor_mesh)", "docstring": "Convert the TensorLayout to Tensorflow backend specific Sharding.\n\nArgs:\ntensor_layout: TensorLayout instance to convert.\n\nReturns:\nA `tf.dtensor.Layout` instance.", "source": "github-repos"}
{"code": "def rtt_get_num_down_buffers(self):\n        \n        cmd = enums.JLinkRTTCommand.GETNUMBUF\n        dir = ctypes.c_int(enums.JLinkRTTDirection.DOWN)\n        return self.rtt_control(cmd, dir)", "docstring": "After starting RTT, get the current number of down buffers.\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nThe number of configured down buffers on the target.\n\nRaises:\nJLinkRTTException if the underlying JLINK_RTTERMINAL_Control call fails.", "source": "juraj-google-style"}
{"code": "def _check_lambda_alias(self):\n    aliases = self.lambda_client.list_aliases(FunctionName=self.app_name)\n    matched_alias = False\n    for alias in aliases['Aliases']:\n        if (alias['Name'] == self.env):\n            LOG.info('Found alias %s for function %s', self.env, self.app_name)\n            matched_alias = True\n            break\n    else:\n        LOG.info('No alias %s found for function %s', self.env, self.app_name)\n    return matched_alias", "docstring": "Check if lambda alias exists.\n\nReturns:\nTrue if alias exists\nFalse if alias does not exist", "source": "codesearchnet"}
{"code": "def _build_projection_expression(clean_table_keys):\n    \n    projection_expression = ''\n    for key in clean_table_keys[:-1]:\n        projection_expression += ('{},').format(key)\n    projection_expression += clean_table_keys[-1]\n    return projection_expression", "docstring": "Given cleaned up keys, this will return a projection expression for\nthe dynamodb lookup.\n\nArgs:\nclean_table_keys (dict): keys without the data types attached\n\nReturns:\nstr: A projection expression for the dynamodb lookup.", "source": "juraj-google-style"}
{"code": "def xsrf_secret_key():\n    secret = memcache.get(XSRF_MEMCACHE_ID, namespace=OAUTH2CLIENT_NAMESPACE)\n    if (not secret):\n        model = SiteXsrfSecretKey.get_or_insert(key_name='site')\n        if (not model.secret):\n            model.secret = _generate_new_xsrf_secret_key()\n            model.put()\n        secret = model.secret\n        memcache.add(XSRF_MEMCACHE_ID, secret, namespace=OAUTH2CLIENT_NAMESPACE)\n    return str(secret)", "docstring": "Return the secret key for use for XSRF protection.\n\nIf the Site entity does not have a secret key, this method will also create\none and persist it.\n\nReturns:\nThe secret key.", "source": "codesearchnet"}
{"code": "def derive_temporary_python2_environment(\n        destination_directory: str,\n        python3_environment: PreparedEnv,\n        verbose: bool,\n        env_name: str = '.test_virtualenv_py2',\n        python_path: str = \"/usr/bin/python2.7\") -> PreparedEnv:\n    \n\n    shutil.rmtree(destination_directory)\n    input_directory = cast(str, python3_environment.destination_directory)\n    os.chdir(input_directory)\n    conversion_script_path = os.path.join(\n        input_directory,\n        'dev_tools',\n        'python2.7-generate.sh')\n    shell_tools.run_cmd('bash',\n                        conversion_script_path,\n                        destination_directory,\n                        input_directory,\n                        python3_environment.virtual_env_path,\n                        out=sys.stderr)\n    os.chdir(destination_directory)\n\n    \n    env_path = os.path.join(destination_directory, env_name)\n    \n    req_path = os.path.join(destination_directory, 'requirements.txt')\n    dev_req_path = os.path.join(destination_directory,\n                                'pip-list-test-tools.txt')\n    contrib_req_path = os.path.join(destination_directory,\n                                    'cirq',\n                                    'contrib',\n                                    'contrib-requirements.txt')\n    req_paths = [req_path, dev_req_path, contrib_req_path]\n    create_virtual_env(venv_path=env_path,\n                       python_path=python_path,\n                       requirements_paths=req_paths,\n                       verbose=verbose)\n\n    return PreparedEnv(github_repo=python3_environment.repository,\n                       actual_commit_id=python3_environment.actual_commit_id,\n                       compare_commit_id=python3_environment.compare_commit_id,\n                       destination_directory=destination_directory,\n                       virtual_env_path=env_path)", "docstring": "Creates a python 2.7 environment starting from a prepared python 3 one.\n\nArgs:\ndestination_directory: Where to put the python 2 environment.\npython3_environment: The prepared environment to start from.\nverbose: When set, more progress output is produced.\nenv_name: The name to use for the virtualenv directory.\npython_path: The python binary to use.\n\nReturns:\nA description of the environment that was prepared.", "source": "juraj-google-style"}
{"code": "def _reset_offset(self, partition):\n    timestamp = self._subscriptions.assignment[partition].reset_strategy\n    if (timestamp is OffsetResetStrategy.EARLIEST):\n        strategy = 'earliest'\n    elif (timestamp is OffsetResetStrategy.LATEST):\n        strategy = 'latest'\n    else:\n        raise NoOffsetForPartitionError(partition)\n    log.debug('Resetting offset for partition %s to %s offset.', partition, strategy)\n    offsets = self._retrieve_offsets({partition: timestamp})\n    if (partition not in offsets):\n        raise NoOffsetForPartitionError(partition)\n    offset = offsets[partition][0]\n    if self._subscriptions.is_assigned(partition):\n        self._subscriptions.seek(partition, offset)", "docstring": "Reset offsets for the given partition using the offset reset strategy.\n\nArguments:\npartition (TopicPartition): the partition that needs reset offset\n\nRaises:\nNoOffsetForPartitionError: if no offset reset strategy is defined", "source": "codesearchnet"}
{"code": "def get_highest_values(self, count):\n    count = int(count)\n    assert (count <= len(self._values)), 'count must be smaller than or equal to values length. {} > {}.'.format(count, len(self._values))\n    assert (count > 0), 'count must be greater than 0. Got {}.'.format(count)\n    highest_values = sorted(self._values, reverse=True)[0:count]\n    highest_values_index = sorted(list(xrange(len(self._values))), key=(lambda k: self._values[k]), reverse=True)[0:count]\n    return (highest_values, highest_values_index)", "docstring": "Get a list of the the x highest values of the Data Collection and their indices.\n\nThis is useful for situations where one needs to know the times of\nthe year when the largest values of a data collection occur.  For example,\nthere is a European dayight code that requires an analysis for the hours\nof the year with the greatest exterior illuminance level.  This method\ncan be used to help build a shcedule for such a study.\n\nArgs:\ncount: Integer representing the number of highest values to account for.\n\nReturns:\nhighest_values: The n highest values in data list, ordered from\nhighest to lowest.\nhighest_values_index: Indicies of the n highest values in data\nlist, ordered from highest to lowest.", "source": "codesearchnet"}
{"code": "def _validate_testbed_configs(testbed_configs):\n    seen_names = set()\n    for config in testbed_configs:\n        name = config[keys.Config.key_testbed_name.value]\n        _validate_testbed_name(name)\n        if name in seen_names:\n            raise MoblyConfigError('Duplicate testbed name %s found.' % name)\n        seen_names.add(name)", "docstring": "Validates the testbed configurations.\n\nArgs:\ntestbed_configs: A list of testbed configuration dicts.\n\nRaises:\nMoblyConfigError: Some parts of the configuration is invalid.", "source": "github-repos"}
{"code": "def _WriteTimestamp(self, timestamp, filename):\n    try:\n        os.makedirs(self.timestamp_dir)\n    except OSError as e:\n        if e.errno == errno.EEXIST and os.path.isdir(self.timestamp_dir):\n            pass\n        else:\n            raise\n    filedesc, temp_filename = tempfile.mkstemp(prefix='nsscache-update-', dir=self.timestamp_dir)\n    time_string = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(timestamp))\n    try:\n        os.write(filedesc, b'%s\\n' % time_string.encode())\n        os.fsync(filedesc)\n        os.close(filedesc)\n    except OSError:\n        os.unlink(temp_filename)\n        self.log.warning('writing timestamp failed!')\n        return False\n    os.chmod(temp_filename, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)\n    os.rename(temp_filename, filename)\n    self.log.debug('wrote timestamp %s to file %r', time_string, filename)\n    return True", "docstring": "Write a given timestamp out to a file, converting to the ISO-8601\nformat.\n\nWe convert internal timestamp format (epoch) to ISO-8601 format, i.e.\nYYYY-MM-DDThh:mm:ssZ which is basically UTC time, then write it out to a\nfile.\n\nArgs:\ntimestamp: A String in nss_cache internal timestamp format, aka time_t.\nfilename: A String naming the file to write to.\n\nReturns:\nA boolean indicating success of write.", "source": "github-repos"}
{"code": "def human_timestamp(__timestamp: datetime.datetime) -> str:\n    numstr = '. a two three four five six seven eight nine ten'.split()\n    matches = [(((60 * 60) * 24) * 365), (((60 * 60) * 24) * 28), (((60 * 60) * 24) * 7), ((60 * 60) * 24), (60 * 60), 60, 1]\n    match_names = ['year', 'month', 'week', 'day', 'hour', 'minute', 'second']\n    if (__timestamp.tzinfo is None):\n        __timestamp = __timestamp.replace(tzinfo=datetime.timezone.utc)\n    now = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)\n    delta = int((now - __timestamp).total_seconds())\n    for scale in matches:\n        i = (delta \n        if i:\n            name = match_names[matches.index(scale)]\n            break\n    else:\n        i = 0\n    if (i == 0):\n        result = 'right now'\n    elif ((i == 1) and (name in ('year', 'month', 'week'))):\n        result = 'last {}'.format(name)\n    elif ((i == 1) and (name == 'day')):\n        result = 'yesterday'\n    elif ((i == 1) and (name == 'hour')):\n        result = 'about an hour ago'\n    else:\n        result = 'about {} {}{} ago'.format((i if (i > 10) else numstr[i]), name, ('s' if (i > 1) else ''))\n    return result", "docstring": "Format a relative time.\n\nArgs:\n__timestamp: Event to generate relative timestamp against\nReturns:\nHuman readable date and time offset", "source": "codesearchnet"}
{"code": "def ParseChat(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    participants = self._GetRowValue(query_hash, row, 'participants')\n    author = self._GetRowValue(query_hash, row, 'author')\n    dialog_partner = self._GetRowValue(query_hash, row, 'dialog_partner')\n    from_displayname = self._GetRowValue(query_hash, row, 'from_displayname')\n    accounts = []\n    participants = participants.split(' ')\n    for participant in participants:\n        if (participant != author):\n            accounts.append(participant)\n    to_account = ', '.join(accounts)\n    if (not to_account):\n        to_account = (dialog_partner or 'Unknown User')\n    from_account = '{0:s} <{1:s}>'.format(from_displayname, author)\n    event_data = SkypeChatEventData()\n    event_data.from_account = from_account\n    event_data.query = query\n    event_data.text = self._GetRowValue(query_hash, row, 'body_xml')\n    event_data.title = self._GetRowValue(query_hash, row, 'title')\n    event_data.to_account = to_account\n    timestamp = self._GetRowValue(query_hash, row, 'timestamp')\n    if timestamp:\n        date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, 'Chat from Skype')\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a chat message.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from query.", "source": "codesearchnet"}
{"code": "def __driver_helper(self, line):\n    if (line.strip() == '?'):\n        self.stdout.write('\\n')\n        self.stdout.write(self.doc_string())\n    else:\n        toks = shlex.split(line[:(- 1)])\n        try:\n            msg = self.__get_help_message(toks)\n        except Exception as e:\n            self.stderr.write('\\n')\n            self.stderr.write(traceback.format_exc())\n            self.stderr.flush()\n        self.stdout.write('\\n')\n        self.stdout.write(msg)\n    self.stdout.write('\\n')\n    self.stdout.write(self.prompt)\n    self.stdout.write(line)\n    self.stdout.flush()", "docstring": "Driver level helper method.\n\n1.  Display help message for the given input. Internally calls\nself.__get_help_message() to obtain the help message.\n2.  Re-display the prompt and the input line.\n\nArguments:\nline: The input line.\n\nRaises:\nErrors from helper methods print stack trace without terminating\nthis shell. Other exceptions will terminate this shell.", "source": "codesearchnet"}
{"code": "def are_checksums_equal(checksum_a_pyxb, checksum_b_pyxb):\n    if (checksum_a_pyxb.algorithm != checksum_b_pyxb.algorithm):\n        raise ValueError('Cannot compare checksums calculated with different algorithms. a=\"{}\" b=\"{}\"'.format(checksum_a_pyxb.algorithm, checksum_b_pyxb.algorithm))\n    return (checksum_a_pyxb.value().lower() == checksum_b_pyxb.value().lower())", "docstring": "Determine if checksums are equal.\n\nArgs:\nchecksum_a_pyxb, checksum_b_pyxb: PyXB Checksum objects to compare.\n\nReturns:\nbool\n- **True**: The checksums contain the same hexadecimal values calculated with\nthe same algorithm. Identical checksums guarantee (for all practical\npurposes) that the checksums were calculated from the same sequence of bytes.\n- **False**: The checksums were calculated with the same algorithm but the\nhexadecimal values are different.\n\nRaises:\nValueError\nThe checksums were calculated with different algorithms, hence cannot be\ncompared.", "source": "codesearchnet"}
{"code": "def UpdateIncludeState(filename, include_dict, io=codecs):\n    headerfile = None\n    try:\n        headerfile = io.open(filename, 'r', 'utf8', 'replace')\n    except IOError:\n        return False\n    linenum = 0\n    for line in headerfile:\n        linenum += 1\n        clean_line = CleanseComments(line)\n        match = _RE_PATTERN_INCLUDE.search(clean_line)\n        if match:\n            include = match.group(2)\n            include_dict.setdefault(include, linenum)\n    return True", "docstring": "Fill up the include_dict with new includes found from the file.\n\nArgs:\nfilename: the name of the header to read.\ninclude_dict: a dictionary in which the headers are inserted.\nio: The io factory to use to read the file. Provided for testability.\n\nReturns:\nTrue if a header was successfully added. False otherwise.", "source": "codesearchnet"}
{"code": "def ExtractCredentialsFromPathSpec(self, path_spec):\n    credentials = manager.CredentialsManager.GetCredentials(path_spec)\n    for identifier in credentials.CREDENTIALS:\n        value = getattr(path_spec, identifier, None)\n        if (value is None):\n            continue\n        self.SetCredential(path_spec, identifier, value)", "docstring": "Extracts credentials from a path specification.\n\nArgs:\npath_spec (PathSpec): path specification to extract credentials from.", "source": "codesearchnet"}
{"code": "def clip_boxes(box, box_size: Tuple[int, int]):\n    assert torch.isfinite(box).all(), 'Box tensor contains infinite or NaN!'\n    height, width = box_size\n    x1 = box[:, 0].clamp(min=0, max=width)\n    y1 = box[:, 1].clamp(min=0, max=height)\n    x2 = box[:, 2].clamp(min=0, max=width)\n    y2 = box[:, 3].clamp(min=0, max=height)\n    box = torch.stack((x1, y1, x2, y2), dim=-1)\n    return box", "docstring": "Clip the boxes by limiting x coordinates to the range [0, width]\nand y coordinates to the range [0, height].\n\nArgs:\nbox (Tensor): The box to be clipped.\nbox_size (height, width): The clipping box's size.", "source": "github-repos"}
{"code": "def get_header_from_ops_and_kernels(ops_and_kernels, include_all_ops_and_kernels):\n    ops_and_kernels = sorted(ops_and_kernels)\n    ops = set((op for op, _ in ops_and_kernels))\n    result_list = []\n\n    def append(s):\n        result_list.append(s)\n    _, script_name = os.path.split(sys.argv[0])\n    append('\n    append('\n    append('\n    if include_all_ops_and_kernels:\n        append('\n        append('\n        append('\n    else:\n        line = \"\\n    namespace {\\n      constexpr const char* skip(const char* x) {\\n        return (*x) ? (*x == ' ' ? skip(x + 1) : x) : x;\\n      }\\n\\n      constexpr bool isequal(const char* x, const char* y) {\\n        return (*skip(x) && *skip(y))\\n                   ? (*skip(x) == *skip(y) && isequal(skip(x) + 1, skip(y) + 1))\\n                   : (!*skip(x) && !*skip(y));\\n      }\\n\\n      template<int N>\\n      struct find_in {\\n        static constexpr bool f(const char* x, const char* const y[N]) {\\n          return isequal(x, y[0]) || find_in<N - 1>::f(x, y + 1);\\n        }\\n      };\\n\\n      template<>\\n      struct find_in<0> {\\n        static constexpr bool f(const char* x, const char* const y[]) {\\n          return false;\\n        }\\n      };\\n    }  \n        line += 'constexpr const char* kNecessaryOpKernelClasses[] = {\\n'\n        for _, kernel_class in ops_and_kernels:\n            if kernel_class is None:\n                continue\n            line += '\"%s\",\\n' % kernel_class\n        line += '};'\n        append(line)\n        append('\n        append('')\n        append('constexpr inline bool ShouldRegisterOp(const char op[]) {')\n        append('  return false')\n        for op in sorted(ops):\n            append('     || isequal(op, \"%s\")' % op)\n        append('  ;')\n        append('}')\n        append('\n        append('')\n        append('\n    append('\n    return '\\n'.join(result_list)", "docstring": "Returns a header for use with tensorflow SELECTIVE_REGISTRATION.\n\nArgs:\nops_and_kernels: a set of (op_name, kernel_class_name) pairs to include.\ninclude_all_ops_and_kernels: if True, ops_and_kernels is ignored and all op\nkernels are included.\n\nReturns:\nthe string of the header that should be written as ops_to_register.h.", "source": "github-repos"}
{"code": "def input_fn(filenames, tf_transform_output, batch_size=200):\n    transformed_feature_spec = tf_transform_output.transformed_feature_spec().copy()\n    transformed_features = tf.contrib.learn.io.read_batch_features(filenames, batch_size, transformed_feature_spec, reader=_gzip_reader_fn)\n    return (transformed_features, transformed_features.pop(taxi.transformed_name(taxi.LABEL_KEY)))", "docstring": "Generates features and labels for training or evaluation.\n\nArgs:\nfilenames: [str] list of CSV files to read data from.\ntf_transform_output: A TFTransformOutput.\nbatch_size: int First dimension size of the Tensors returned by input_fn\n\nReturns:\nA (features, indices) tuple where features is a dictionary of\nTensors, and indices is a single Tensor of label indices.", "source": "github-repos"}
{"code": "def load(self, sess, tags, import_scope=None, **saver_kwargs):\n    saved_model_proto = parse_saved_model(self._export_dir)\n    metrics.IncrementReadApi(_LOADER_LABEL)\n    with sess.graph.as_default():\n        saver, _ = self.load_graph(sess.graph, tags, import_scope, **saver_kwargs)\n        self.restore_variables(sess, saver, import_scope)\n        self.run_init_ops(sess, tags, import_scope)\n    meta_graph_def = self.get_meta_graph_def_from_tags(tags)\n    if len(saved_model_proto.meta_graphs) == 1 and saved_model_proto.meta_graphs[0].HasField('object_graph_def'):\n        metrics.IncrementRead(write_version='2')\n    else:\n        metrics.IncrementRead(write_version='1')\n    return meta_graph_def", "docstring": "Load the MetaGraphDef graph and restore variable values into the session.\n\nArgs:\nsess: tf.compat.v1.Session to restore variable values.\ntags: a set of string tags identifying a MetaGraphDef.\nimport_scope: Optional `string` -- if specified, prepend this string\nfollowed by '/' to all loaded tensor names. This scope is applied to\ntensor instances loaded into the passed session, but it is *not* written\nthrough to the static `MetaGraphDef` protocol buffer that is returned.\n**saver_kwargs: keyword arguments to pass to tf.train.import_meta_graph.\n\nReturns:\n`MetagraphDef` proto of the graph that was loaded.", "source": "github-repos"}
{"code": "def _build_migrated_variables(checkpoint_reader, name_value_fn):\n    names_to_shapes = checkpoint_reader.get_variable_to_shape_map()\n    new_name_to_variable = {}\n    name_to_new_name = {}\n    for name in names_to_shapes:\n        value = checkpoint_reader.get_tensor(name)\n        (new_name, new_value) = name_value_fn(name, value)\n        if (new_name is None):\n            continue\n        name_to_new_name[name] = new_name\n        new_name_to_variable[new_name] = tf.Variable(new_value)\n    return (new_name_to_variable, name_to_new_name)", "docstring": "Builds the TensorFlow variables of the migrated checkpoint.\n\nArgs:\ncheckpoint_reader: A `tf.train.NewCheckPointReader` of the checkpoint to\nbe read from.\nname_value_fn: Function taking two arguments, `name` and `value`, which\nreturns the pair of new name and value for that a variable of that name.\n\nReturns:\nTuple of a dictionary with new variable names as keys and `tf.Variable`s as\nvalues, and a dictionary that maps the old variable names to the new\nvariable names.", "source": "codesearchnet"}
{"code": "def start_of_chunk(prev_tag, tag, prev_type, type_):\n    chunk_start = False\n    if (tag == 'B'):\n        chunk_start = True\n    if (tag == 'S'):\n        chunk_start = True\n    if ((prev_tag == 'E') and (tag == 'E')):\n        chunk_start = True\n    if ((prev_tag == 'E') and (tag == 'I')):\n        chunk_start = True\n    if ((prev_tag == 'S') and (tag == 'E')):\n        chunk_start = True\n    if ((prev_tag == 'S') and (tag == 'I')):\n        chunk_start = True\n    if ((prev_tag == 'O') and (tag == 'E')):\n        chunk_start = True\n    if ((prev_tag == 'O') and (tag == 'I')):\n        chunk_start = True\n    if ((tag != 'O') and (tag != '.') and (prev_type != type_)):\n        chunk_start = True\n    return chunk_start", "docstring": "Checks if a chunk started between the previous and current word.\n\nArgs:\nprev_tag: previous chunk tag.\ntag: current chunk tag.\nprev_type: previous type.\ntype_: current type.\n\nReturns:\nchunk_start: boolean.", "source": "codesearchnet"}
{"code": "def Tensors(self, run, tag):\n    \n    accumulator = self.GetAccumulator(run)\n    return accumulator.Tensors(tag)", "docstring": "Retrieve the tensor events associated with a run and tag.\n\nArgs:\nrun: A string name of the run for which values are retrieved.\ntag: A string name of the tag for which values are retrieved.\n\nRaises:\nKeyError: If the run is not found, or the tag is not available for\nthe given run.\n\nReturns:\nAn array of `event_accumulator.TensorEvent`s.", "source": "juraj-google-style"}
{"code": "def build(self, backend=None):\n        \n\n        \n        n_total = len(self.data.index)\n        if len(self.completes):\n            completes = [set(x) for x in sum(self.completes, [])]\n            completes = set.intersection(*completes)\n        else:\n            completes = [x for x in range(len(self.data.index))]\n        self.clean_data = self.data.iloc[list(completes), :]\n        \n        if len(completes) < n_total:\n            msg = \"Automatically removing {}/{} rows from the dataset.\"\n            msg = msg.format(n_total - len(completes), n_total)\n            warnings.warn(msg)\n\n        \n        for term_args in self.added_terms:\n            self._add(**term_args)\n\n        \n        self._set_priors(**self._added_priors)\n\n        \n        for name, term in self.terms.items():\n            type_ = 'intercept' if name == 'Intercept' else \\\n                'random' if self.terms[name].random else 'fixed'\n            term.prior = self._prepare_prior(term.prior, type_)\n\n        \n        if backend is None:\n            if self._backend_name is None:\n                raise ValueError(\"Error: no backend was passed or set in the \"\n                                 \"Model; did you forget to call fit()?\")\n            backend = self._backend_name\n\n        \n        if self.y is None:\n            raise ValueError(\"No outcome (y) variable is set! Please specify \"\n                             \"an outcome variable using the formula interface \"\n                             \"before build() or fit().\")\n\n        \n        \n        \n        terms = [t for t in self.fixed_terms.values() if t.name != 'Intercept']\n\n        if len(self.fixed_terms) > 1:\n\n            X = [pd.DataFrame(x.data, columns=x.levels) for x in terms]\n            X = pd.concat(X, axis=1)\n\n            self.dm_statistics = {\n                'r2_x': pd.Series({\n                    x: sm.OLS(endog=X[x],\n                              exog=sm.add_constant(X.drop(x, axis=1))\n                              if 'Intercept' in self.term_names\n                              else X.drop(x, axis=1)).fit().rsquared\n                    for x in list(X.columns)}),\n                'sd_x': X.std(),\n                'mean_x': X.mean(axis=0)\n            }\n\n            \n            \n            \n            mat = X.corr()\n            for x in list(mat.columns):\n                mat.loc[x, x] = self.dm_statistics['mean_x'][x]\n            self._diagnostics = {\n                \n                \n                'VIF': 1/(1 - self.dm_statistics['r2_x']),\n                'corr_mean_X': mat\n            }\n\n            \n            if any(self.dm_statistics['r2_x'] > .999):\n                raise ValueError(\n                    \"There is perfect collinearity among the fixed effects!\\n\"\n                    \"Printing some design matrix statistics:\\n\" +\n                    str(self.dm_statistics) + '\\n' +\n                    str(self._diagnostics))\n\n        \n        \n        num_cats = [x.data.size for x in self.fixed_terms.values()]\n        if any(np.array(num_cats) == 0):\n            raise ValueError(\n                \"At least one categorical predictor contains only 1 category!\")\n\n        \n        if len(self.terms) > 0:\n            \n            if self.taylor is not None:\n                taylor = self.taylor\n            else:\n                taylor = 5 if self.family.name == 'gaussian' else 1\n            scaler = PriorScaler(self, taylor=taylor)\n            scaler.scale()\n\n        \n        \n        if self.family.name == 'bernoulli' and np.max(self.y.data) < 1.01:\n            event = next(\n                i for i, x in enumerate(self.y.data.flatten()) if x > .99)\n            warnings.warn('Modeling the probability that {}==\\'{}\\''.format(\n                self.y.name, str(self.clean_data[self.y.name].iloc[event])))\n\n        self._set_backend(backend)\n        self.backend.build(self)\n        self.built = True", "docstring": "Set up the model for sampling/fitting.\n\nPerforms any steps that require access to all model terms (e.g., scaling priors\non each term), then calls the BackEnd's build() method.\n\nArgs:\nbackend (str): The name of the backend to use for model fitting.\nCurrently, 'pymc' and 'stan' are supported. If None, assume\nthat fit() has already been called (possibly without building),\nand look in self._backend_name.", "source": "juraj-google-style"}
{"code": "def GetIamPolicy(self, request, global_params=None):\n    config = self.GetMethodConfig('GetIamPolicy')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.\n\nArgs:\nrequest: (BigqueryTablesGetIamPolicyRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Policy) The response message.", "source": "github-repos"}
{"code": "def receiveds_format(receiveds):\n    \n    log.debug(\"Receiveds for this email are parsed\")\n\n    output = []\n    counter = Counter()\n\n    for i in receiveds[::-1]:\n        \n        j = {k: v.strip() for k, v in i.items() if v}\n\n        \n        j[\"hop\"] = counter[\"hop\"] + 1\n\n        \n        if i.get(\"date\"):\n            \n            \n            i[\"date\"] = i[\"date\"].split(\";\")[-1]\n            try:\n                j[\"date_utc\"], _ = convert_mail_date(i[\"date\"])\n            except TypeError:\n                j[\"date_utc\"] = None\n\n        \n        size = len(output)\n        now = j.get(\"date_utc\")\n\n        if size and now:\n            before = output[counter[\"hop\"] - 1].get(\"date_utc\")\n            if before:\n                j[\"delay\"] = (now - before).total_seconds()\n            else:\n                j[\"delay\"] = 0\n        else:\n            j[\"delay\"] = 0\n\n        \n        output.append(j)\n\n        \n        counter[\"hop\"] += 1\n    else:\n        for i in output:\n            if i.get(\"date_utc\"):\n                i[\"date_utc\"] = i[\"date_utc\"].isoformat()\n        else:\n            return output", "docstring": "Given a list of receiveds hop, adds metadata and reformat\nfield values\n\nArgs:\nreceiveds (list): list of receiveds hops already formatted\n\nReturns:\nlist of receiveds reformated and with new fields", "source": "juraj-google-style"}
{"code": "def users_getPresence(self, *, user: str, **kwargs) -> SlackResponse:\n        \n        kwargs.update({\"user\": user})\n        return self.api_call(\"users.getPresence\", http_verb=\"GET\", params=kwargs)", "docstring": "Gets user presence information.\n\nArgs:\nuser (str): User to get presence info on. Defaults to the authed user.\ne.g. 'W1234567890'", "source": "juraj-google-style"}
{"code": "def chown(self, path, uid, gid, dir_fd=None, follow_symlinks=None):\n    if (follow_symlinks is None):\n        follow_symlinks = True\n    elif (sys.version_info < (3, 3)):\n        raise TypeError(\"chown() got an unexpected keyword argument 'follow_symlinks'\")\n    path = self._path_with_dir_fd(path, self.chown, dir_fd)\n    try:\n        file_object = self.filesystem.resolve(path, follow_symlinks, allow_fd=True)\n    except IOError as io_error:\n        if (io_error.errno == errno.ENOENT):\n            self.filesystem.raise_os_error(errno.ENOENT, path)\n        raise\n    if (not ((is_int_type(uid) or (uid is None)) and (is_int_type(gid) or (gid is None)))):\n        raise TypeError('An integer is required')\n    if (uid != (- 1)):\n        file_object.st_uid = uid\n    if (gid != (- 1)):\n        file_object.st_gid = gid", "docstring": "Set ownership of a faked file.\n\nArgs:\npath: (str) Path to the file or directory.\nuid: (int) Numeric uid to set the file or directory to.\ngid: (int) Numeric gid to set the file or directory to.\ndir_fd: (int) If not `None`, the file descriptor of a directory,\nwith `path` being relative to this directory.\nNew in Python 3.3.\nfollow_symlinks: (bool) If `False` and path points to a symlink,\nthe link itself is changed instead of the linked object.\nNew in Python 3.3.\n\nRaises:\nOSError: if path does not exist.\n\n`None` is also allowed for `uid` and `gid`.  This permits `os.rename`\nto use `os.chown` even when the source file `uid` and `gid` are\n`None` (unset).", "source": "codesearchnet"}
{"code": "def parse(self, message, schema):\n        \n        func = {\n            'audit-log': self._parse_audit_log_msg,\n            'event': self._parse_event_msg,\n        }[schema]\n        return func(message)", "docstring": "Parse message according to schema.\n\n`message` should already be validated against the given schema.\nSee :ref:`schemadef` for more information.\n\nArgs:\nmessage (dict): message data to parse.\nschema (str): valid message schema.\nReturns:\n(dict): parsed message", "source": "juraj-google-style"}
{"code": "def get_adversary_phone_asset(self, main_type, sub_type, unique_id, asset_id, params=None):\n        \n        return self.adversary_phone_asset(main_type, sub_type, unique_id, asset_id, params=params)", "docstring": "Args:\nmain_type:\nsub_type:\nunique_id:\nasset_id:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def should_use_network(self, request):\n    return (self.networking and all((fn(request) for fn in self.network_filters)))", "docstring": "Verifies if real networking mode should be used for the given\nrequest, passing it to the registered network filters.\n\nArguments:\nrequest (pook.Request): outgoing HTTP request to test.\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def _parse_publisher(details):\n    \n    publisher = _get_td_or_none(\n        details,\n        \"ctl00_ContentPlaceHolder1_tblRowNakladatel\"\n    )\n\n    \n    if not publisher:\n        return None\n\n    publisher = dhtmlparser.removeTags(publisher).strip()\n\n    \n    if not publisher:\n        return None\n\n    return publisher", "docstring": "Parse publisher of the book.\n\nArgs:\ndetails (obj): HTMLElement containing slice of the page with details.\n\nReturns:\nstr/None: Publisher's name as string or None if not found.", "source": "juraj-google-style"}
{"code": "def _extract_field_with_regex(self, field):\n    matched = re.search(field, self.text)\n    if (not matched):\n        err_msg = u'Failed to extract data with regex! => {}\\n'.format(field)\n        err_msg += u'response body: {}\\n'.format(self.text)\n        logger.log_error(err_msg)\n        raise exceptions.ExtractFailure(err_msg)\n    return matched.group(1)", "docstring": "extract field from response content with regex.\nrequests.Response body could be json or html text.\n\nArgs:\nfield (str): regex string that matched r\".*\\(.*\\).*\"\n\nReturns:\nstr: matched content.\n\nRaises:\nexceptions.ExtractFailure: If no content matched with regex.\n\nExamples:\n>>> # self.text: \"LB123abcRB789\"\n>>> filed = \"LB[\\d]*(.*)RB[\\d]*\"\n>>> _extract_field_with_regex(field)\nabc", "source": "codesearchnet"}
{"code": "def __tf_unflatten__(cls, metadata, components):", "docstring": "Create a user-defined object from (metadata, components).\n\nArgs:\nmetadata: a custom Python object that stands for the static config for\nreconstructing a new object of the current class.\ncomponents: a `tuple` that contains the dynamic data fields of the current\nclass, for object reconstruction.\n\nReturns:\nThe user-defined object, with the same class of the current object.\n\nImplementation Note:\n- This method should not invoke any TensorFlow ops.\n- This method only needs to unflatten the current level. If the object has\nan attribute that also need custom unflattening, nest functions will\nutilize this method to do recursive unflattening.", "source": "github-repos"}
{"code": "def set_signal_type(self, sig_type):\n    if isinstance(sig_type, str):\n        sig_type = [sig_type]\n    self.snr_input.signal_type = sig_type\n    return", "docstring": "Set the signal type of interest.\n\nSets the signal type for which the SNR is calculated.\nThis means inspiral, merger, and/or ringdown.\n\nArgs:\nsig_type (str or list of str): Signal type desired by user.\nChoices are `ins`, `mrg`, `rd`, `all` for circular waveforms created with PhenomD.\nIf eccentric waveforms are used, must be `all`.", "source": "codesearchnet"}
{"code": "def _summarize_eager(tensor, summarize=None):\n    if summarize is None:\n        summarize = 3\n    elif summarize < 0:\n        summarize = array_ops.size(tensor)\n    if tensor._rank():\n        flat = tensor.numpy().reshape((-1,))\n        lst = [str(x) for x in flat[:summarize]]\n        if len(lst) < flat.size:\n            lst.append('...')\n    elif gen_math_ops.not_equal(summarize, 0):\n        lst = [str(tensor.numpy())]\n    else:\n        lst = []\n    return ', '.join(lst)", "docstring": "Returns a summarized string representation of eager `tensor`.\n\nArgs:\ntensor: EagerTensor to summarize\nsummarize: Include these many first elements of `array`", "source": "github-repos"}
{"code": "def register_name(self, register_index):\n    result = self._dll.JLINKARM_GetRegisterName(register_index)\n    return ctypes.cast(result, ctypes.c_char_p).value.decode()", "docstring": "Retrives and returns the name of an ARM CPU register.\n\nArgs:\nself (JLink): the ``JLink`` instance\nregister_index (int): index of the register whose name to retrieve\n\nReturns:\nName of the register.", "source": "codesearchnet"}
{"code": "def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, deterministic: bool=True, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    encoder_hidden_states = encoder_outputs[0]\n    if encoder_attention_mask is None:\n        batch_size, sequence_length = encoder_hidden_states.shape[:2]\n        encoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    batch_size, sequence_length = decoder_input_ids.shape\n    if decoder_attention_mask is None:\n        decoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    if decoder_position_ids is None:\n        if past_key_values is not None:\n            raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.')\n        decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n    inputs = {'params': params or self.params}\n    if past_key_values:\n        inputs['cache'] = past_key_values\n        mutable = ['cache']\n    else:\n        mutable = False\n\n    def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):\n        decoder_module = module._get_decoder_module()\n        outputs = decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)\n        hidden_states = outputs[0]\n        if self.config.tie_word_embeddings:\n            shared_embedding = module.model.variables['params']['shared']['embedding']\n            lm_logits = module.lm_head.apply({'params': {'kernel': shared_embedding.T}}, hidden_states)\n        else:\n            lm_logits = module.lm_head(hidden_states)\n        lm_logits += module.final_logits_bias.astype(self.dtype)\n        return (lm_logits, outputs)\n    outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, rngs=rngs, mutable=mutable, method=_decoder_forward)\n    if past_key_values is None:\n        lm_logits, decoder_outputs = outputs\n    else:\n        (lm_logits, decoder_outputs), past = outputs\n    if return_dict:\n        outputs = FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions)\n    else:\n        outputs = (lm_logits,) + decoder_outputs[1:]\n    if past_key_values is not None and return_dict:\n        outputs['past_key_values'] = unfreeze(past['cache'])\n        return outputs\n    elif past_key_values is not None and (not return_dict):\n        outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> import jax.numpy as jnp\n>>> from transformers import AutoTokenizer, FlaxPegasusForConditionalGeneration\n\n>>> model = FlaxPegasusForConditionalGeneration.from_pretrained(\"google/pegasus-large\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google/pegasus-large\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, max_length=1024, return_tensors=\"np\")\n>>> encoder_outputs = model.encode(**inputs)\n\n>>> decoder_start_token_id = model.config.decoder_start_token_id\n>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype=\"i4\") * decoder_start_token_id\n\n>>> outputs = model.decode(decoder_input_ids, encoder_outputs)\n>>> logits = outputs.logits\n```", "source": "github-repos"}
{"code": "def spawn_watcher(self, label, target=None, eternal=False):\n    if (label not in self._sources):\n        raise YapconfSourceError(('Cannot watch %s no source named %s' % (label, label)))\n    current_config = self._sources[label].get_data()\n    handler = ConfigChangeHandler(current_config, self, target)\n    return self._sources[label].watch(handler, eternal)", "docstring": "Spawns a config watcher in a separate daemon thread.\n\nIf a particular config value changes, and the item has a\n``watch_target`` defined, then that method will be called.\n\nIf a ``target`` is passed in, then it will call the ``target``\nanytime the config changes.\n\nArgs:\nlabel (str): Should match a label added through ``add_source``\ntarget (func): Should be a function that takes two arguments,\nthe old configuration and the new configuration.\neternal (bool): Determines if watcher threads should be restarted\nif they die.\n\nReturns:\nThe thread that was spawned.", "source": "codesearchnet"}
{"code": "def _DepthwiseConv2dNumpy(x1, x2, strides, padding, data_format, dilations):\n    if data_format == 'NCHW':\n        x1 = np.transpose(x1, (0, 3, 1, 2))\n        strides = [strides[0], strides[3], strides[1], strides[2]]\n        if dilations:\n            dilations = [dilations[0], dilations[3], dilations[1], dilations[2]]\n    if dilations:\n        fh, fw, c, o = x2.shape\n        new_fh = (fh - 1) * dilations[0] + 1\n        new_fw = (fw - 1) * dilations[1] + 1\n        new_x2 = np.zeros((new_fh, new_fw, c, o))\n        for i in range(fh):\n            for j in range(fw):\n                new_x2[i * dilations[0], j * dilations[1], :] = x2[i, j, :, :]\n        x2 = new_x2\n    if padding == 'SAME':\n\n        def PaddingsForDim(input_dim, filter_dim, stride):\n            \n            if input_dim % stride == 0:\n                total_padding = max(filter_dim - stride, 0)\n            else:\n                total_padding = max(filter_dim - input_dim % stride, 0)\n            pad_before = total_padding \n            pad_after = total_padding - pad_before\n            return (pad_before, pad_after)\n        padding = [(0, 0), PaddingsForDim(x1.shape[1], x2.shape[0], strides[1]), PaddingsForDim(x1.shape[2], x2.shape[1], strides[2]), (0, 0)]\n    elif padding == 'VALID':\n        padding = [(0, 0)] * 4\n    x1 = np.pad(x1, padding, 'constant')\n    y = _DepthwiseConv2dNumpyBasic(x1, x2, strides)\n    if data_format == 'NCHW':\n        y = np.transpose(y, (0, 2, 3, 1))\n    return y", "docstring": "Compute depthwise_conv2d using Numpy.\n\nThis allows use to test TensorFlow's depthwise_conv2d by comparing to the\nNumpy version.\n\nUnlike `_DepthwiseConv2dNumpyBasic`, this supports more advanced features\nlike padding.\n\nArgs:\nx1: The input Numpy array.\nx2: The filter Numpy array.\nstrides: A Python list of 4 elements representing the strides.\npadding: The padding. \"SAME\", \"VALID\", or a list of explicit paddings.\ndata_format: \"NHWC\" or \"NCHW\".\ndilations: A list of 2 elements, representing the dilations.\n\nReturns:\nThe depthwise conv2d as a Numpy array.", "source": "github-repos"}
{"code": "def __init__(self, process: Process):\n        \n        self.process = process\n        self.stopped_due_to_worker_shutdown = False", "docstring": "Constructor.\n\nArgs:\nprocess (Process): task process", "source": "juraj-google-style"}
{"code": "def fetch(self, subscription_id, data={}, **kwargs):\n    return super(Subscription, self).fetch(subscription_id, data, **kwargs)", "docstring": "Fetch Subscription for given Id\n\nArgs:\nsubscription_id : Id for which subscription object is retrieved\n\nReturns:\nSubscription dict for given subscription Id", "source": "codesearchnet"}
{"code": "def convert_datetime_array(array):\n    \n\n    if not isinstance(array, np.ndarray):\n        return array\n\n    try:\n        dt2001 = np.datetime64('2001')\n        legacy_datetime64 = (dt2001.astype('int64') ==\n                             dt2001.astype('datetime64[ms]').astype('int64'))\n    except AttributeError as e:\n        if e.args == (\"'module' object has no attribute 'datetime64'\",):\n            \n            if 'PyPy' in sys.version:\n                legacy_datetime64 = False\n                pass\n            else:\n                raise e\n        else:\n            raise e\n\n    \n    if array.dtype.kind == 'M':\n        if legacy_datetime64:\n            if array.dtype == np.dtype('datetime64[ns]'):\n                array = array.astype('int64') / 10**6.0\n        else:\n            array =  array.astype('datetime64[us]').astype('int64') / 1000.\n\n    elif array.dtype.kind == 'm':\n        array = array.astype('timedelta64[us]').astype('int64') / 1000.\n\n    return array", "docstring": "Convert NumPy datetime arrays to arrays to milliseconds since epoch.\n\nArgs:\narray : (obj)\nA NumPy array of datetime to convert\n\nIf the value passed in is not a NumPy array, it will be returned as-is.\n\nReturns:\narray", "source": "juraj-google-style"}
{"code": "def ms_to_frames(ms, fps):\n    \n    if fps <= 0:\n        raise ValueError(\"Framerate must be positive number (%f).\" % fps)\n\n    return int(round((ms / 1000) * fps))", "docstring": "Convert milliseconds to number of frames.\n\nArguments:\nms: Number of milliseconds (may be int, float or other numeric class).\nfps: Framerate (must be a positive number, eg. 23.976).\n\nReturns:\nNumber of frames (int).\n\nRaises:\nValueError: fps was negative or zero.", "source": "juraj-google-style"}
{"code": "def context(self, name):\n        \n        data = self._context(name)\n        context = data.get(\"context\")\n        if context:\n            return context\n\n        assert self.load_path\n        context_path = os.path.join(self.load_path, \"contexts\", \"%s.rxt\" % name)\n        context = ResolvedContext.load(context_path)\n        data[\"context\"] = context\n        data[\"loaded\"] = True\n        return context", "docstring": "Get a context.\n\nArgs:\nname (str): Name to store the context under.\n\nReturns:\n`ResolvedContext` object.", "source": "juraj-google-style"}
{"code": "def match(self, message) -> bool:\n        \n        if self.to and message.to != self.to:\n            return False\n\n        if self.sender and message.sender != self.sender:\n            return False\n\n        if self.body and message.body != self.body:\n            return False\n\n        if self.thread and message.thread != self.thread:\n            return False\n\n        for key, value in self.metadata.items():\n            if message.get_metadata(key) != value:\n                return False\n\n        logger.debug(f\"message matched {self} == {message}\")\n        return True", "docstring": "Returns wether a message matches with this message or not.\nThe message can be a Message object or a Template object.\n\nArgs:\nmessage (spade.message.Message): the message to match to\n\nReturns:\nbool: wether the message matches or not", "source": "juraj-google-style"}
{"code": "def _GetCh(self):\n    fd = self._tty.fileno()\n    old = termios.tcgetattr(fd)\n    try:\n        tty.setraw(fd)\n        ch = self._tty.read(1)\n        if (ord(ch) == 27):\n            ch += self._tty.read(2)\n    finally:\n        termios.tcsetattr(fd, termios.TCSADRAIN, old)\n    return ch", "docstring": "Read a single character from the user.\n\nReturns:\nA string, the character read.", "source": "codesearchnet"}
{"code": "def _insert_operations(self, operations: Sequence[ops.Operation], insertion_indices: Sequence[int]) -> None:\n    if (len(operations) != len(insertion_indices)):\n        raise ValueError('operations and insertion_indices must have thesame length.')\n    self._moments += [ops.Moment() for _ in range(((1 + max(insertion_indices)) - len(self)))]\n    moment_to_ops = defaultdict(list)\n    for (op_index, moment_index) in enumerate(insertion_indices):\n        moment_to_ops[moment_index].append(operations[op_index])\n    for (moment_index, new_ops) in moment_to_ops.items():\n        self._moments[moment_index] = ops.Moment((self._moments[moment_index].operations + tuple(new_ops)))", "docstring": "Inserts operations at the specified moments. Appends new moments if\nnecessary.\n\nArgs:\noperations: The operations to insert.\ninsertion_indices: Where to insert them, i.e. operations[i] is\ninserted into moments[insertion_indices[i].\n\nRaises:\nValueError: operations and insert_indices have different lengths.\n\nNB: It's on the caller to ensure that the operations won't conflict\nwith operations already in the moment or even each other.", "source": "codesearchnet"}
{"code": "class TFCLIPEncoder(keras.layers.Layer):\n\n    def __init__(self, config: CLIPConfig, **kwargs):\n        super().__init__(**kwargs)\n        self.layers = [TFCLIPEncoderLayer(config, name=f'layers_._{i}') for i in range(config.num_hidden_layers)]\n\n    def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:\n        all_hidden_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        for i, layer_module in enumerate(self.layers):\n            if output_hidden_states:\n                all_hidden_states = all_hidden_states + (hidden_states,)\n            layer_outputs = layer_module(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, training=training)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            all_hidden_states = all_hidden_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None))\n        return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions)\n\n    def build(self, input_shape=None):\n        if self.built:\n            return\n        self.built = True\n        if getattr(self, 'layers', None) is not None:\n            for layer in self.layers:\n                with tf.name_scope(layer.name):\n                    layer.build(None)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`TFCLIPEncoderLayer`].\n\nArgs:\nconfig: CLIPConfig", "source": "github-repos"}
{"code": "def serialize_to_xml(root, block):\n    \n    root.tag = 'ubcpi'\n\n    if block.rationale_size is not None:\n        if block.rationale_size.get('min'):\n            root.set('rationale_size_min', unicode(block.rationale_size.get('min')))\n        if block.rationale_size.get('max'):\n            root.set('rationale_size_max', unicode(block.rationale_size['max']))\n\n    if block.algo:\n        if block.algo.get('name'):\n            root.set('algorithm', block.algo.get('name'))\n        if block.algo.get('num_responses'):\n            root.set('num_responses', unicode(block.algo.get('num_responses')))\n\n    display_name = etree.SubElement(root, 'display_name')\n    display_name.text = block.display_name\n\n    question = etree.SubElement(root, 'question')\n    question_text = etree.SubElement(question, 'text')\n    question_text.text = block.question_text['text']\n    serialize_image(block.question_text, question)\n\n    options = etree.SubElement(root, 'options')\n    serialize_options(options, block)\n\n    seeds = etree.SubElement(root, 'seeds')\n    serialize_seeds(seeds, block)", "docstring": "Serialize the Peer Instruction XBlock's content to XML.\n\nArgs:\nblock (PeerInstructionXBlock): The peer instruction block to serialize.\nroot (etree.Element): The XML root node to update.\n\nReturns:\netree.Element", "source": "juraj-google-style"}
{"code": "def __init__(self, project=None, deidentification_template_name=None, deidentification_config=None, inspection_template_name=None, inspection_config=None, timeout=None):\n    self.config = {}\n    self.project = project\n    self.timeout = timeout\n    if deidentification_template_name is not None and deidentification_config is not None:\n        raise ValueError('Both deidentification_template_name and deidentification_config were specified. Please specify only one of these.')\n    elif deidentification_template_name is None and deidentification_config is None:\n        raise ValueError('deidentification_template_name or deidentification_config must be specified.')\n    elif deidentification_template_name is not None:\n        self.config['deidentify_template_name'] = deidentification_template_name\n    else:\n        self.config['deidentify_config'] = deidentification_config\n    if inspection_config is None and inspection_template_name is None:\n        raise ValueError('inspection_template_name or inspection_config must be specified')\n    if inspection_template_name is not None:\n        self.config['inspect_template_name'] = inspection_template_name\n    if inspection_config is not None:\n        self.config['inspect_config'] = inspection_config", "docstring": "Initializes a :class:`MaskDetectedDetails` transform.\n\nArgs:\nproject: Optional. GCP project name in which inspection will be performed\ndeidentification_template_name (str): Either this or\n`deidentification_config` required. Name of\ndeidentification template to be used on detected sensitive information\ninstances in text.\ndeidentification_config\n(``Union[dict, google.cloud.dlp_v2.types.DeidentifyConfig]``):\nConfiguration for the de-identification of the content item.\nIf both template name and config are supplied,\nconfig is more important.\ninspection_template_name (str): This or `inspection_config` required.\nName of inspection template to be used\nto detect sensitive data in text.\ninspection_config\n(``Union[dict, google.cloud.dlp_v2.types.InspectConfig]``):\nConfiguration for the inspector used to detect sensitive data in text.\nIf both template name and config are supplied,\nconfig takes precedence.\ntimeout (float): Optional. The amount of time, in seconds, to wait for\nthe request to complete.", "source": "github-repos"}
{"code": "def orient_undirected_graph(self, data, umg, alg='HC'):\n    warnings.warn('The pairwise GNN model is computed on each edge of the UMG to initialize the model and start CGNN with a DAG')\n    gnn = GNN(nh=self.nh, lr=self.lr)\n    og = gnn.orient_graph(data, umg, nb_runs=self.nb_runs, nb_max_runs=self.nb_runs, nb_jobs=self.nb_jobs, train_epochs=self.train_epochs, test_epochs=self.test_epochs, verbose=self.verbose, gpu=self.gpu)\n    dag = dagify_min_edge(og)\n    return self.orient_directed_graph(data, dag, alg=alg)", "docstring": "Orient the undirected graph using GNN and apply CGNN to improve the graph.\n\nArgs:\ndata (pandas.DataFrame): Observational data on which causal\ndiscovery has to be performed.\numg (nx.Graph): Graph that provides the skeleton, on which the GNN\nthen the CGNN algorithm will be applied.\nalg (str): Exploration heuristic to use, among [\"HC\", \"HCr\",\n\"tabu\", \"EHC\"]\nReturns:\nnetworkx.DiGraph: Solution given by CGNN.\n\n.. note::\nGNN (``cdt.causality.pairwise.GNN``) is first used to orient the\nundirected graph and output a DAG before applying CGNN.", "source": "codesearchnet"}
{"code": "def _get_reference(document_path, reference_map):\n    try:\n        return reference_map[document_path]\n    except KeyError:\n        msg = _BAD_DOC_TEMPLATE.format(document_path)\n        raise ValueError(msg)", "docstring": "Get a document reference from a dictionary.\n\nThis just wraps a simple dictionary look-up with a helpful error that is\nspecific to :meth:`~.firestore.client.Client.get_all`, the\n**public** caller of this function.\n\nArgs:\ndocument_path (str): A fully-qualified document path.\nreference_map (Dict[str, .DocumentReference]): A mapping (produced\nby :func:`_reference_info`) of fully-qualified document paths to\ndocument references.\n\nReturns:\n.DocumentReference: The matching reference.\n\nRaises:\nValueError: If ``document_path`` has not been encountered.", "source": "codesearchnet"}
{"code": "def OverwriteAndClose(self, compressed_data, size):\n    \n    self.Set(self.Schema.CONTENT(compressed_data))\n    self.Set(self.Schema.SIZE(size))\n    super(AFF4MemoryStreamBase, self).Close()", "docstring": "Directly overwrite the current contents.\n\nReplaces the data currently in the stream with compressed_data,\nand closes the object. Makes it possible to avoid recompressing\nthe data.\nArgs:\ncompressed_data: The data to write, must be zlib compressed.\nsize: The uncompressed size of the data.", "source": "juraj-google-style"}
{"code": "def _compile_constant_expression(self,\n                                     expr: Expression,\n                                     scope: Dict[str, TensorFluent],\n                                     batch_size: Optional[int] = None,\n                                     noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:\n        \n        etype = expr.etype\n        args = expr.args\n        dtype = utils.python_type_to_dtype(etype[1])\n        fluent = TensorFluent.constant(args, dtype=dtype)\n        return fluent", "docstring": "Compile a constant expression `expr` into a TensorFluent\nin the given `scope` with optional batch size.\n\nArgs:\nexpr (:obj:`rddl2tf.expr.Expression`): A RDDL constant expression.\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.\nbatch_size (Optional[size]): The batch size.\n\nReturns:\n:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.", "source": "juraj-google-style"}
{"code": "def UploadFilePath(self, filepath, offset=0, amount=None):\n    return self._UploadChunkStream(self._streamer.StreamFilePath(filepath, offset=offset, amount=amount))", "docstring": "Uploads chunks of a file on a given path to the transfer store flow.\n\nArgs:\nfilepath: A path to the file to upload.\noffset: An integer offset at which the file upload should start on.\namount: An upper bound on number of bytes to stream. If it is `None` then\nthe whole file is uploaded.\n\nReturns:\nA `BlobImageDescriptor` object.", "source": "codesearchnet"}
{"code": "def random_sparse(strategy, prob, obj_reaction, flux_threshold):\n    essential = set()\n    deleted = set()\n    for (entity, deleted_reactions) in strategy.iter_tests():\n        if (obj_reaction in deleted_reactions):\n            logger.info('Marking entity {} as essential because the objective reaction depends on this entity...'.format(entity))\n            essential.add(entity)\n            continue\n        if (len(deleted_reactions) == 0):\n            logger.info('No reactions were removed when entity {} was deleted'.format(entity))\n            deleted.add(entity)\n            strategy.delete(entity, deleted_reactions)\n            continue\n        logger.info('Deleted reactions: {}'.format(', '.join(deleted_reactions)))\n        constr = []\n        for r in deleted_reactions:\n            flux_var = prob.get_flux_var(r)\n            (c,) = prob.prob.add_linear_constraints((flux_var == 0))\n            constr.append(c)\n        logger.info('Trying FBA without reactions {}...'.format(', '.join(deleted_reactions)))\n        try:\n            prob.maximize(obj_reaction)\n        except fluxanalysis.FluxBalanceError:\n            logger.info('FBA is infeasible, marking {} as essential'.format(entity))\n            for c in constr:\n                c.delete()\n            essential.add(entity)\n            continue\n        logger.debug('Reaction {} has flux {}'.format(obj_reaction, prob.get_flux(obj_reaction)))\n        if (prob.get_flux(obj_reaction) < flux_threshold):\n            for c in constr:\n                c.delete()\n            essential.add(entity)\n            logger.info('Entity {} was essential'.format(entity))\n        else:\n            deleted.add(entity)\n            strategy.delete(entity, deleted_reactions)\n            logger.info('Entity {} was deleted'.format(entity))\n    return (essential, deleted)", "docstring": "Find a random minimal network of model reactions.\n\nGiven a reaction to optimize and a threshold, delete entities randomly\nuntil the flux of the reaction to optimize falls under the threshold.\nKeep deleting until no more entities can be deleted. It works\nwith two strategies: deleting reactions or deleting genes (reactions\nrelated to certain genes).\n\nArgs:\nstrategy: :class:`.ReactionDeletionStrategy` or\n:class:`.GeneDeletionStrategy`.\nprob: :class:`psamm.fluxanalysis.FluxBalanceProblem`.\nobj_reaction: objective reactions to optimize.\nflux_threshold: threshold of max reaction flux.", "source": "codesearchnet"}
{"code": "def register_controller(self, module, required=True, min_number=1):\n    verify_controller_module(module)\n    module_ref_name = module.__name__.split('.')[(- 1)]\n    if (module_ref_name in self._controller_objects):\n        raise signals.ControllerError(('Controller module %s has already been registered. It cannot be registered again.' % module_ref_name))\n    module_config_name = module.MOBLY_CONTROLLER_CONFIG_NAME\n    if (module_config_name not in self.controller_configs):\n        if required:\n            raise signals.ControllerError(('No corresponding config found for %s' % module_config_name))\n        logging.warning('No corresponding config found for optional controller %s', module_config_name)\n        return None\n    try:\n        original_config = self.controller_configs[module_config_name]\n        controller_config = copy.deepcopy(original_config)\n        objects = module.create(controller_config)\n    except:\n        logging.exception('Failed to initialize objects for controller %s, abort!', module_config_name)\n        raise\n    if (not isinstance(objects, list)):\n        raise signals.ControllerError(('Controller module %s did not return a list of objects, abort.' % module_ref_name))\n    actual_number = len(objects)\n    if (actual_number < min_number):\n        module.destroy(objects)\n        raise signals.ControllerError(('Expected to get at least %d controller objects, got %d.' % (min_number, actual_number)))\n    self._controller_objects[module_ref_name] = copy.copy(objects)\n    logging.debug('Found %d objects for controller %s', len(objects), module_config_name)\n    self._controller_modules[module_ref_name] = module\n    return objects", "docstring": "Loads a controller module and returns its loaded devices.\n\nThis is to be used in a mobly test class.\n\nArgs:\nmodule: A module that follows the controller module interface.\nrequired: A bool. If True, failing to register the specified\ncontroller module raises exceptions. If False, the objects\nfailed to instantiate will be skipped.\nmin_number: An integer that is the minimum number of controller\nobjects to be created. Default is one, since you should not\nregister a controller module without expecting at least one\nobject.\n\nReturns:\nA list of controller objects instantiated from controller_module, or\nNone if no config existed for this controller and it was not a\nrequired controller.\n\nRaises:\nControllerError:\n* The controller module has already been registered.\n* The actual number of objects instantiated is less than the\n* `min_number`.\n* `required` is True and no corresponding config can be found.\n* Any other error occurred in the registration process.", "source": "codesearchnet"}
{"code": "async def getNodeByBuid(self, buid):\n        \n        node = self.livenodes.get(buid)\n        if node is not None:\n            return node\n\n        props = {}\n        proplayr = {}\n        for layr in self.layers:\n            layerprops = await layr.getBuidProps(buid)\n            props.update(layerprops)\n            proplayr.update({k: layr for k in layerprops})\n\n        node = s_node.Node(self, buid, props.items(), proplayr=proplayr)\n\n        \n        await asyncio.sleep(0)\n\n        if node.ndef is None:\n            return None\n\n        \n        self.buidcache.append(node)\n        self.livenodes[buid] = node\n        return node", "docstring": "Retrieve a node tuple by binary id.\n\nArgs:\nbuid (bytes): The binary ID for the node.\n\nReturns:\nOptional[s_node.Node]: The node object or None.", "source": "juraj-google-style"}
{"code": "def _copy_script_migrated(self, filename, id_=(- 1), file_type=SCRIPT_FILE_TYPE):\n    basefname = os.path.basename(filename)\n    resource = open(filename, 'rb')\n    headers = {'DESTINATION': '1', 'OBJECT_ID': str(id_), 'FILE_TYPE': file_type, 'FILE_NAME': basefname}\n    response = self.connection['jss'].session.post(url=('%s/%s' % (self.connection['jss'].base_url, 'dbfileupload')), data=resource, headers=headers)\n    return response", "docstring": "Upload a script to a migrated JSS's database.\n\nOn a \"migrated\" JSS, scripts are POSTed to the JSS. Pass an id\nif you wish to associate the script with an existing Script\nobject, otherwise, it will create a new Script object.\n\nArgs:\nfilename: Path to script file.\nid_: Int ID of Script object to associate this file with.\nDefault is -1, which creates a new Script.", "source": "codesearchnet"}
{"code": "def __call__(self,\n            state: Sequence[tf.Tensor],\n            timestep: tf.Tensor) -> Sequence[tf.Tensor]:\n        \n        return self._default", "docstring": "Returns the default action fluents regardless of the current `state` and `timestep`.\n\nArgs:\nstate (Sequence[tf.Tensor]): The current state fluents.\ntimestep (tf.Tensor): The current timestep.\n\nReturns:\nSequence[tf.Tensor]: A tuple of action fluents.", "source": "juraj-google-style"}
{"code": "def launch_run(self, command, project=None, entity=None, run_id=None):\n        \n        query = gql()\n        patch = BytesIO()\n        if self.git.dirty:\n            self.git.repo.git.execute(['git', 'diff'], output_stream=patch)\n            patch.seek(0)\n        cwd = \".\"\n        if self.git.enabled:\n            cwd = cwd + os.getcwd().replace(self.git.repo.working_dir, \"\")\n        return self.gql(query, variable_values={\n            'entity': entity or self.settings('entity'),\n            'model': project or self.settings('project'),\n            'command': command,\n            'runId': run_id,\n            'patch': patch.read().decode(\"utf8\"),\n            'cwd': cwd\n        })", "docstring": "Launch a run in the cloud.\n\nArgs:\ncommand (str): The command to run\nprogram (str): The file to run\nproject (str): The project to scope the runs to\nentity (str, optional): The entity to scope this project to.  Defaults to public models\nrun_id (str, optional): The run_id to scope to\n\nReturns:\n[{\"podName\",\"status\"}]", "source": "juraj-google-style"}
{"code": "def abs_path(path):\n    return os.path.abspath(os.path.expanduser(path))", "docstring": "Resolve the '.' and '~' in a path to get the absolute path.\n\nArgs:\npath: The path to expand.\n\nReturns:\nThe absolute path of the input path.", "source": "github-repos"}
{"code": "class InformerFeatureEmbedder(nn.Module):\n\n    def __init__(self, cardinalities: List[int], embedding_dims: List[int]) -> None:\n        super().__init__()\n        self.num_features = len(cardinalities)\n        self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)])\n\n    def forward(self, features: torch.Tensor) -> torch.Tensor:\n        if self.num_features > 1:\n            cat_feature_slices = torch.chunk(features, self.num_features, dim=-1)\n        else:\n            cat_feature_slices = [features]\n        return torch.cat([embed(cat_feature_slice.squeeze(-1)) for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices)], dim=-1)", "docstring": "Embed a sequence of categorical features.\n\nArgs:\ncardinalities (`list[int]`):\nList of cardinalities of the categorical features.\nembedding_dims (`list[int]`):\nList of embedding dimensions of the categorical features.", "source": "github-repos"}
{"code": "def init_backend(self, phonemizer_lang: str):\n    requires_backends(self, 'phonemizer')\n    from phonemizer.backend import BACKENDS\n    self.backend = BACKENDS[self.phonemizer_backend](phonemizer_lang, language_switch='remove-flags')", "docstring": "Initializes the backend.\n\nArgs:\nphonemizer_lang (`str`): The language to be used.", "source": "github-repos"}
{"code": "def serialCmdPwdAuth(self, password_str):\n        \n        result = False\n        try:\n            req_start = \"0150310228\" + binascii.hexlify(password_str) + \"2903\"\n            req_crc = self.calc_crc16(req_start[2:].decode(\"hex\"))\n            req_str = req_start + req_crc\n            self.m_serial_port.write(req_str.decode(\"hex\"))\n            if self.m_serial_port.getResponse(self.getContext()).encode(\"hex\") == \"06\":\n                ekm_log(\"Password accepted (\" + self.getContext() + \")\")\n                result = True\n            else:\n                ekm_log(\"Password call failure no 06(\" + self.getContext() + \")\")\n        except:\n            ekm_log(\"Password call failure by exception(\" + self.getContext() + \")\")\n\n            ekm_log(traceback.format_exc(sys.exc_info()))\n\n        return result", "docstring": "Password step of set commands\n\nThis method is normally called within another serial command, so it\ndoes not issue a termination string.  Any default password is set\nin the caller parameter list, never here.\n\nArgs:\npassword_str (str): Required password.\n\nReturns:\nbool: True on completion and ACK.", "source": "juraj-google-style"}
{"code": "def decode_conjure_union_type(cls, obj, conjure_type):\n    type_of_union = obj['type']\n    for (attr, conjure_field) in conjure_type._options().items():\n        if (conjure_field.identifier == type_of_union):\n            attribute = attr\n            conjure_field_definition = conjure_field\n            break\n    else:\n        raise ValueError('unknown union type {0} for {1}'.format(type_of_union, conjure_type))\n    deserialized = {}\n    if ((type_of_union not in obj) or (obj[type_of_union] is None)):\n        cls.check_null_field(obj, deserialized, conjure_field_definition)\n    else:\n        value = obj[type_of_union]\n        field_type = conjure_field_definition.field_type\n        deserialized[attribute] = cls.do_decode(value, field_type)\n    return conjure_type(**deserialized)", "docstring": "Decodes json into a conjure union type.\n\nArgs:\nobj: the json object to decode\nconjure_type: a class object which is the union type\nwe're decoding into\nReturns:\nAn instance of type conjure_type.", "source": "codesearchnet"}
{"code": "def init_config_json(config_file):\n    \n    json_data = None\n    try:\n        if os.path.exists(config_file):\n        \n\n            with open(config_file) as json_file:\n                json_data = json.load(json_file)\n                return unicode_convert(json_data)\n        else:\n            return None\n    except:\n        line, filename, synerror = trace()\n        raise ArcRestHelperError({\n                    \"function\": \"init_config_json\",\n                    \"line\": line,\n                    \"filename\":  filename,\n                    \"synerror\": synerror,\n                                    }\n                                    )\n    finally:\n        json_data = None\n\n        del json_data\n\n        gc.collect()", "docstring": "Deserializes a JSON configuration file.\n\nArgs:\nconfig_file (str): The path to the JSON file.\nReturns:\ndict: A dictionary object containing the JSON data. If ``config_file`` does not exist, returns ``None``.", "source": "juraj-google-style"}
{"code": "def add_sync_methods(cls):\n  \n  for name in cls.__dict__.keys():\n    if name.endswith('_async'):\n      sync_name = name[:-6]\n      if not hasattr(cls, sync_name):\n        setattr(cls, sync_name, _make_sync_method(name))\n  return cls", "docstring": "Class decorator to add synchronous methods corresponding to async methods.\n\nThis modifies the class in place, adding additional methods to it.\nIf a synchronous method of a given name already exists it is not\nreplaced.\n\nArgs:\ncls: A class.\n\nReturns:\nThe same class, modified in place.", "source": "juraj-google-style"}
{"code": "def load_validation_plugin(name=None):\n    \n    if not name:\n        return BaseValidationRules\n\n    \n    \n    \n    \n    plugin = None\n    for entry_point in iter_entry_points('bigchaindb.validation', name):\n        plugin = entry_point.load()\n\n    \n    if not plugin:\n        raise ResolutionError(\n            'No plugin found in group `bigchaindb.validation` with name `{}`'.\n            format(name))\n\n    \n    \n    if not issubclass(plugin, (BaseValidationRules,)):\n        raise TypeError('object of type \"{}\" does not implement `bigchaindb.'\n                        'validation.BaseValidationRules`'.format(type(plugin)))\n\n    return plugin", "docstring": "Find and load the chosen validation plugin.\n\nArgs:\nname (string): the name of the entry_point, as advertised in the\nsetup.py of the providing package.\n\nReturns:\nan uninstantiated subclass of ``bigchaindb.validation.AbstractValidationRules``", "source": "juraj-google-style"}
{"code": "async def selfplay(state, flagfile='selfplay'):\n  \n\n  output_dir = os.path.join(fsdb.selfplay_dir(), state.output_model_name)\n  holdout_dir = os.path.join(fsdb.holdout_dir(), state.output_model_name)\n\n  lines = await run(\n      'bazel-bin/cc/selfplay',\n      '--flagfile={}.flags'.format(os.path.join(FLAGS.flags_dir, flagfile)),\n      '--model={}'.format(state.best_model_path),\n      '--output_dir={}'.format(output_dir),\n      '--holdout_dir={}'.format(holdout_dir),\n      '--seed={}'.format(state.seed))\n  result = '\\n'.join(lines[-6:])\n  logging.info(result)\n  stats = parse_win_stats_table(result, 1)[0]\n  num_games = stats.total_wins\n  logging.info('Black won %0.3f, white won %0.3f',\n               stats.black_wins.total / num_games,\n               stats.white_wins.total / num_games)\n\n  \n  pattern = os.path.join(output_dir, '*', '*.zz')\n  random.seed(state.seed)\n  tf.set_random_seed(state.seed)\n  np.random.seed(state.seed)\n  \n  \n  \n  \n  buffer = example_buffer.ExampleBuffer(sampling_frac=1.0)\n\n  \n  \n  logging.info('Writing golden chunk from \"{}\"'.format(pattern))\n  buffer.parallel_fill(tf.gfile.Glob(pattern))\n  buffer.flush(os.path.join(fsdb.golden_chunk_dir(),\n                            state.output_model_name + '.tfrecord.zz'))", "docstring": "Run selfplay and write a training chunk to the fsdb golden_chunk_dir.\n\nArgs:\nstate: the RL loop State instance.\nflagfile: the name of the flagfile to use for selfplay, either 'selfplay'\n(the default) or 'boostrap'.", "source": "juraj-google-style"}
{"code": "def _group_and_publish_tasks_statistics(self, result):\n    for i in result:\n        executor_id = i['executor_id']\n        i['executor_id'] = executor_id[:executor_id.rfind('.')]\n        i['statistics']['instances_count'] = 1\n    r = {}\n    for i in result:\n        executor_id = i['executor_id']\n        r[executor_id] = r.get(executor_id, {})\n        r[executor_id]['framework_id'] = i['framework_id']\n        r[executor_id]['statistics'] = r[executor_id].get('statistics', {})\n        r[executor_id]['statistics'] = self._sum_statistics(i['statistics'], r[executor_id]['statistics'])\n    self._add_cpu_usage(r)\n    self._add_cpu_percent(r)\n    self._add_mem_percent(r)\n    self._publish(r)", "docstring": "This function group statistics of same tasks by adding them.\nIt also add 'instances_count' statistic to get information about\nhow many instances is running on the server\n\nArgs:\nresult: result of mesos query. List of dictionaries with\n'executor_id', 'framework_id' as a strings and 'statistics'\nas dictionary of labeled numbers", "source": "codesearchnet"}
{"code": "def find_dependencies(self, dataset_keys, **dfilter):\n        \n        unknown_datasets = set()\n        for key in dataset_keys.copy():\n            n, unknowns = self._find_dependencies(key, **dfilter)\n\n            dataset_keys.discard(key)  \n            if n is not None:\n                dataset_keys.add(n.name)  \n            if unknowns:\n                unknown_datasets.update(unknowns)\n                continue\n\n            self.add_child(self, n)\n\n        return unknown_datasets", "docstring": "Create the dependency tree.\n\nArgs:\ndataset_keys (iterable): Strings or DatasetIDs to find dependencies for\n**dfilter (dict): Additional filter parameters. See\n`satpy.readers.get_key` for more details.\n\nReturns:\n(Node, set): Root node of the dependency tree and a set of unknown datasets", "source": "juraj-google-style"}
{"code": "def run_cm(cm, time_scale):\n    cm = np.linalg.matrix_power(cm, time_scale)\n    cm[(cm > 1)] = 1\n    return cm", "docstring": "Iterate a connectivity matrix the specified number of steps.\n\nArgs:\ncm (np.ndarray): A connectivity matrix.\ntime_scale (int): The number of steps to run.\n\nReturns:\nnp.ndarray: The connectivity matrix at the new timescale.", "source": "codesearchnet"}
{"code": "def CheckVlogArguments(filename, clean_lines, linenum, error):\n  \n  line = clean_lines.elided[linenum]\n  if Search(r'\\bVLOG\\((INFO|ERROR|WARNING|DFATAL|FATAL)\\)', line):\n    error(filename, linenum, 'runtime/vlog', 5,\n          'VLOG() should be used with numeric verbosity level.  '\n          'Use LOG() if you want symbolic severity levels.')", "docstring": "Checks that VLOG() is only used for defining a logging level.\n\nFor example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and\nVLOG(FATAL) are not.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def clean_output_files(self, follow_parents=True):\n        \n        paths = []\n        if self.status != self.S_OK:\n            logger.warning(\"Calling task.clean_output_files on a task whose status != S_OK\")\n\n        \n        self.tmpdir.clean()\n\n        \n        \n        except_exts = set()\n        for child in self.get_children():\n            if child.status == self.S_OK: continue\n            \n            i = [dep.node for dep in child.deps].index(self)\n            except_exts.update(child.deps[i].exts)\n\n        \n        exts = self.gc.exts.difference(except_exts)\n        \n        paths += self.outdir.remove_exts(exts)\n        if not follow_parents: return paths\n\n        \n        for parent in self.get_parents():\n\n            \n            \n            ext2nodes = collections.defaultdict(list)\n            for child in parent.get_children():\n                if child.status == child.S_OK: continue\n                i = [d.node for d in child.deps].index(parent)\n                for ext in child.deps[i].exts:\n                    ext2nodes[ext].append(child)\n\n            \n            except_exts = [k for k, lst in ext2nodes.items() if lst]\n            exts = self.gc.exts.difference(except_exts)\n            \n            paths += parent.outdir.remove_exts(exts)\n\n        self.history.info(\"Removed files: %s\" % paths)\n        return paths", "docstring": "This method is called when the task reaches S_OK. It removes all the output files\nproduced by the task that are not needed by its children as well as the output files\nproduced by its parents if no other node needs them.\n\nArgs:\nfollow_parents: If true, the output files of the parents nodes will be removed if possible.\n\nReturn:\nlist with the absolute paths of the files that have been removed.", "source": "juraj-google-style"}
{"code": "def GetSysFeeAmountByHeight(self, height):\n        \n        hash = self.GetBlockHash(height)\n        return self.GetSysFeeAmount(hash)", "docstring": "Get the system fee for the specified block.\n\nArgs:\nheight (int): block height.\n\nReturns:\nint:", "source": "juraj-google-style"}
{"code": "def merge_csv(filenames: List[str],\n              outfile: TextIO = sys.stdout,\n              input_dialect: str = 'excel',\n              output_dialect: str = 'excel',\n              debug: bool = False,\n              headers: bool = True) -> None:\n    \n    writer = csv.writer(outfile, dialect=output_dialect)\n    written_header = False\n    header_items = []  \n    for filename in filenames:\n        log.info(\"Processing file \" + repr(filename))\n        with open(filename, 'r') as f:\n            reader = csv.reader(f, dialect=input_dialect)\n            if headers:\n                if not written_header:\n                    header_items = next(reader)\n                    if debug:\n                        log.debug(\"Header row: {!r}\", header_items)\n                    writer.writerow(header_items)\n                    written_header = True\n                else:\n                    new_headers = next(reader)\n                    if new_headers != header_items:\n                        raise ValueError(\n                            \"Header line in file {filename} doesn't match - \"\n                            \"it was {new} but previous was {old}\".format(\n                                filename=repr(filename),\n                                new=repr(new_headers),\n                                old=repr(header_items),\n                            ))\n                    if debug:\n                        log.debug(\"Header row matches previous\")\n            else:\n                if debug:\n                    log.debug(\"No headers in use\")\n            for row in reader:\n                if debug:\n                    log.debug(\"Data row: {!r}\", row)\n                writer.writerow(row)", "docstring": "Amalgamate multiple CSV/TSV/similar files into one.\n\nArgs:\nfilenames: list of filenames to process\noutfile: file-like object to write output to\ninput_dialect: dialect of input files, as passed to ``csv.reader``\noutput_dialect: dialect to write, as passed to ``csv.writer``\ndebug: be verbose?\nheaders: do the files have header lines?", "source": "juraj-google-style"}
{"code": "def parse_keys(self, sn: 'DataNode') -> Dict[(InstanceName, ScalarValue)]:\n    res = {}\n    for k in self.keys:\n        knod = sn.get_data_child(*k)\n        if (knod is None):\n            raise NonexistentSchemaNode(sn.qual_name, *k)\n        kval = knod.type.parse_value(self.keys[k])\n        if (kval is None):\n            raise InvalidKeyValue(self.keys[k])\n        res[knod.iname()] = kval\n    return res", "docstring": "Parse key dictionary in the context of a schema node.\n\nArgs:\nsn: Schema node corresponding to a list.", "source": "codesearchnet"}
{"code": "def van_enc_2d(x, first_depth, reuse=False):\n  \n  with tf.variable_scope('van_enc', reuse=reuse):\n    a = 4  \n    b = 4\n    \n    enc = tf.nn.relu(x)\n    enc = tf.layers.dense(enc, first_depth * a * b, tf.nn.relu)\n    enc = tf.contrib.layers.layer_norm(enc)\n\n    enc = tf.reshape(enc, [-1, a, b, first_depth])\n\n    enc = tf.layers.conv2d_transpose(\n        enc, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)\n    enc = tf.contrib.layers.layer_norm(enc)\n    enc = tf.layers.conv2d_transpose(\n        enc,\n        first_depth * 2,\n        3,\n        padding='same',\n        activation=tf.nn.relu,\n        strides=2)\n    van_higher_level_2 = tf.reshape(enc, [-1, a * 2 * b * 2 * first_depth * 2])\n\n    enc = tf.layers.conv2d_transpose(\n        enc,\n        first_depth * 2,\n        3,\n        padding='same',\n        activation=tf.nn.relu,\n        strides=1)\n    enc = tf.contrib.layers.layer_norm(enc)\n    enc = tf.layers.conv2d_transpose(\n        enc,\n        first_depth * 4,\n        3,\n        padding='same',\n        activation=tf.nn.relu,\n        strides=1)\n    van_higher_level_4 = tf.reshape(enc, [-1, a * 2 * b * 2 * first_depth * 4])\n\n    van_higher_level = tf.concat([x, van_higher_level_2, van_higher_level_4], 1)\n\n    return enc, van_higher_level", "docstring": "The higher level structure encoder for the VAN.\n\nThe high level structure is a vector instead of an image.\n\nArgs:\nx: The higher level structure to encode.\nfirst_depth: The depth of the first layer. Depth is increased in subsequent\nlayers.\nreuse: To reuse in variable scope or not.\n\nReturns:\nThe encoded image.", "source": "juraj-google-style"}
{"code": "def get_descriptor_defaults(self, api_info, hostname=None):\n    \n    hostname = (hostname or endpoints_util.get_app_hostname() or\n                api_info.hostname)\n    protocol = 'http' if ((hostname and hostname.startswith('localhost')) or\n                          endpoints_util.is_running_on_devserver()) else 'https'\n    base_path = api_info.base_path.strip('/')\n    defaults = {\n        'extends': 'thirdParty.api',\n        'root': '{0}:\n        'name': api_info.name,\n        'version': api_info.api_version,\n        'api_version': api_info.api_version,\n        'path_version': api_info.path_version,\n        'defaultVersion': True,\n        'abstract': False,\n        'adapter': {\n            'bns': '{0}:\n            'type': 'lily',\n            'deadline': 10.0\n        }\n    }\n    if api_info.canonical_name:\n      defaults['canonicalName'] = api_info.canonical_name\n    if api_info.owner_domain:\n      defaults['ownerDomain'] = api_info.owner_domain\n    if api_info.owner_name:\n      defaults['ownerName'] = api_info.owner_name\n    if api_info.package_path:\n      defaults['packagePath'] = api_info.package_path\n    if api_info.title:\n      defaults['title'] = api_info.title\n    if api_info.documentation:\n      defaults['documentation'] = api_info.documentation\n    return defaults", "docstring": "Gets a default configuration for a service.\n\nArgs:\napi_info: _ApiInfo object for this service.\nhostname: string, Hostname of the API, to override the value set on the\ncurrent service. Defaults to None.\n\nReturns:\nA dictionary with the default configuration.", "source": "juraj-google-style"}
{"code": "def query_icao(icao: str):\n        \n        params = {\n            'dataSource': 'metars',\n            'requestType': 'retrieve',\n            'format': 'csv',\n            'hoursBeforeNow': 24,\n        }\n        AWC._validate_icao(icao)\n        params['stationString'] = icao\n        try:\n            return AWC._query(params)\n        except RequestsConnectionError:\n            raise AWCRequestFailed('failed to obtain requested data from AWC')", "docstring": "Queries AWC for the METAR of a given station\n\nArgs:\nicao: station ID as a four letters-digits ICAO code\n\nReturns: AWC result for the station", "source": "juraj-google-style"}
{"code": "def recipe_dv360_segmentology(config, auth_read, recipe_timezone, auth_write, recipe_name, date_range, recipe_slug, partners, advertisers):\n    dataset(config, {'description': 'Create a dataset for bigquery tables.', 'hour': [4], 'auth': auth_write, 'dataset': recipe_slug})\n    bigquery(config, {'auth': auth_write, 'function': 'Pearson Significance Test', 'to': {'dataset': recipe_slug}})\n    dbm(config, {'auth': auth_read, 'report': {'filters': {'FILTER_PARTNER': {'values': partners}, 'FILTER_ADVERTISER': {'values': advertisers}}, 'body': {'timezoneCode': recipe_timezone, 'metadata': {'title': recipe_name, 'dataRange': date_range, 'format': 'CSV'}, 'params': {'type': 'TYPE_CROSS_PARTNER', 'groupBys': ['FILTER_PARTNER', 'FILTER_PARTNER_NAME', 'FILTER_ADVERTISER', 'FILTER_ADVERTISER_NAME', 'FILTER_MEDIA_PLAN', 'FILTER_MEDIA_PLAN_NAME', 'FILTER_ZIP_POSTAL_CODE'], 'metrics': ['METRIC_BILLABLE_IMPRESSIONS', 'METRIC_CLICKS', 'METRIC_TOTAL_CONVERSIONS']}, 'schedule': {'frequency': 'WEEKLY'}}}})\n    dbm(config, {'auth': auth_read, 'report': {'name': recipe_name}, 'out': {'bigquery': {'auth': auth_write, 'dataset': recipe_slug, 'table': 'DV360_KPI', 'header': True, 'schema': [{'name': 'Partner_Id', 'type': 'INTEGER', 'mode': 'REQUIRED'}, {'name': 'Partner', 'type': 'STRING', 'mode': 'REQUIRED'}, {'name': 'Advertiser_Id', 'type': 'INTEGER', 'mode': 'REQUIRED'}, {'name': 'Advertiser', 'type': 'STRING', 'mode': 'REQUIRED'}, {'name': 'Campaign_Id', 'type': 'INTEGER', 'mode': 'REQUIRED'}, {'name': 'Campaign', 'type': 'STRING', 'mode': 'REQUIRED'}, {'name': 'Zip', 'type': 'STRING', 'mode': 'NULLABLE'}, {'name': 'Impressions', 'type': 'FLOAT', 'mode': 'NULLABLE'}, {'name': 'Clicks', 'type': 'FLOAT', 'mode': 'NULLABLE'}, {'name': 'Conversions', 'type': 'FLOAT', 'mode': 'NULLABLE'}]}}})\n    bigquery(config, {'auth': auth_write, 'from': {'query': 'SELECT\\n          Partner_Id,\\n          Partner,\\n          Advertiser_Id,\\n          Advertiser,\\n          Campaign_Id,\\n          Campaign,\\n          Zip,\\n          SAFE_DIVIDE(Impressions, SUM(Impressions) OVER(PARTITION BY Advertiser_Id)) AS Impression,\\n          SAFE_DIVIDE(Clicks, Impressions) AS Click,\\n          SAFE_DIVIDE(Conversions, Impressions) AS Conversion,\\n          Impressions AS Impressions          FROM\\n          `{dataset}.DV360_KPI`;        ', 'parameters': {'dataset': recipe_slug}, 'legacy': False}, 'to': {'dataset': recipe_slug, 'view': 'DV360_KPI_Normalized'}})\n    census(config, {'auth': auth_write, 'normalize': {'census_geography': 'zip_codes', 'census_year': '2018', 'census_span': '5yr'}, 'to': {'dataset': recipe_slug, 'type': 'view'}})\n    census(config, {'auth': auth_write, 'correlate': {'join': 'Zip', 'pass': ['Partner_Id', 'Partner', 'Advertiser_Id', 'Advertiser', 'Campaign_Id', 'Campaign'], 'sum': ['Impressions'], 'correlate': ['Impression', 'Click', 'Conversion'], 'dataset': recipe_slug, 'table': 'DV360_KPI_Normalized', 'significance': 80}, 'to': {'dataset': recipe_slug, 'type': 'view'}})", "docstring": "DV360 funnel analysis using Census data.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nrecipe_timezone (timezone) - Timezone for report dates.\nauth_write (authentication) - Authorization used for writing data.\nrecipe_name (string) - Name of report, not needed if ID used.\ndate_range (choice) - Timeframe to run the report for.\nrecipe_slug (string) - Name of Google BigQuery dataset to create.\npartners (integer_list) - DV360 partner id.\nadvertisers (integer_list) - Comma delimited list of DV360 advertiser ids.", "source": "github-repos"}
{"code": "def delete(self, *, auto_commit=False):\n    try:\n        db.session.delete(self.resource)\n        if auto_commit:\n            db.session.commit()\n    except SQLAlchemyError:\n        self.log.exception('Failed deleting resource: {}'.format(self.id))\n        db.session.rollback()", "docstring": "Removes a resource from the database\n\nArgs:\nauto_commit (bool): Automatically commit the transaction. Default: `False`\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "class AriaSharedExpertsMLP(LlamaMLP):\n\n    def __init__(self, config: AriaTextConfig):\n        super().__init__(self)\n        self.intermediate_size = config.intermediate_size * config.moe_num_shared_experts", "docstring": "Shared Expert MLP for shared experts.\n\nUnlike routed experts, shared experts process all tokens without routing.\nThis class reconfigures the intermediate size in comparison to the LlamaMLP.\n\nArgs:\nconfig (`AriaTextConfig`): Configuration object for the Aria language model.", "source": "github-repos"}
{"code": "def take_bug_reports(ads, test_name, begin_time, destination=None):\n    begin_time = mobly_logger.normalize_log_line_timestamp(str(begin_time))\n\n    def take_br(test_name, begin_time, ad, destination):\n        ad.take_bug_report(test_name, begin_time, destination=destination)\n    args = [(test_name, begin_time, ad, destination) for ad in ads]\n    utils.concurrent_exec(take_br, args)", "docstring": "Takes bug reports on a list of android devices.\n\nIf you want to take a bug report, call this function with a list of\nandroid_device objects in on_fail. But reports will be taken on all the\ndevices in the list concurrently. Bug report takes a relative long\ntime to take, so use this cautiously.\n\nArgs:\nads: A list of AndroidDevice instances.\ntest_name: Name of the test method that triggered this bug report.\nbegin_time: timestamp taken when the test started, can be either\nstring or int.\ndestination: string, path to the directory where the bugreport\nshould be saved.", "source": "codesearchnet"}
{"code": "def setup(options):\n        \n\n        if not options.misc.debug:\n            requests.packages.urllib3.disable_warnings(\n                requests.packages.urllib3.exceptions.InsecureRequestWarning\n            )", "docstring": "Initialize debug/logging in third party libraries correctly.\n\nArgs:\noptions (:class:`nyawc.Options`): The options to use for the current crawling runtime.", "source": "juraj-google-style"}
{"code": "def _virtual_molecule(self, mol, ilabels, eq_atoms):\n    vmol = ob.OBMol()\n    non_unique_atoms = set([a for g in eq_atoms for a in g])\n    all_atoms = set(range(1, (len(ilabels) + 1)))\n    unique_atom_labels = sorted((all_atoms - non_unique_atoms))\n    for i in unique_atom_labels:\n        orig_idx = ilabels[(i - 1)]\n        oa1 = mol.GetAtom(orig_idx)\n        a1 = vmol.NewAtom()\n        a1.SetAtomicNum(oa1.GetAtomicNum())\n        a1.SetVector(oa1.GetVector())\n    if (vmol.NumAtoms() < 3):\n        for symm in eq_atoms:\n            (c1x, c1y, c1z) = self._group_centroid(mol, ilabels, symm)\n            min_distance = float('inf')\n            for i in range(1, (vmol.NumAtoms() + 1)):\n                va = vmol.GetAtom(i)\n                distance = math.sqrt(((((c1x - va.x()) ** 2) + ((c1y - va.y()) ** 2)) + ((c1z - va.z()) ** 2)))\n                if (distance < min_distance):\n                    min_distance = distance\n            if (min_distance > 0.2):\n                a1 = vmol.NewAtom()\n                a1.SetAtomicNum(9)\n                a1.SetVector(c1x, c1y, c1z)\n    return vmol", "docstring": "Create a virtual molecule by unique atoms, the centriods of the\nequivalent atoms\n\nArgs:\nmol: The molecule. OpenBabel OBMol object\nilables: inchi label map\neq_atoms: equivalent atom labels\nfarthest_group_idx: The equivalent atom group index in which\nthere is the farthest atom to the centroid\n\nReturn:\nThe virtual molecule", "source": "codesearchnet"}
{"code": "def lu_slogdet(LU):\n    LU = (asarray(LU[0], float), asarray(LU[1], float))\n    adet = _sum(log(_abs(LU[0].diagonal())))\n    s = prod(sign(LU[0].diagonal()))\n    nrows_exchange = (LU[1].size - _sum((LU[1] == arange(LU[1].size, dtype='int32'))))\n    odd = ((nrows_exchange % 2) == 1)\n    if odd:\n        s *= (- 1.0)\n    return (s, adet)", "docstring": "r\"\"\"Natural logarithm of a LU decomposition.\n\nArgs:\nLU (tuple): LU decomposition.\n\nReturns:\ntuple: sign and log-determinant.", "source": "codesearchnet"}
{"code": "def save(self, filething=None, padding=None):\n        \n\n        \n        self.to_content_description = {}\n        self.to_extended_content_description = {}\n        self.to_metadata = {}\n        self.to_metadata_library = []\n        for name, value in self.tags:\n            library_only = (value.data_size() > 0xFFFF or value.TYPE == GUID)\n            can_cont_desc = value.TYPE == UNICODE\n\n            if library_only or value.language is not None:\n                self.to_metadata_library.append((name, value))\n            elif value.stream is not None:\n                if name not in self.to_metadata:\n                    self.to_metadata[name] = value\n                else:\n                    self.to_metadata_library.append((name, value))\n            elif name in ContentDescriptionObject.NAMES:\n                if name not in self.to_content_description and can_cont_desc:\n                    self.to_content_description[name] = value\n                else:\n                    self.to_metadata_library.append((name, value))\n            else:\n                if name not in self.to_extended_content_description:\n                    self.to_extended_content_description[name] = value\n                else:\n                    self.to_metadata_library.append((name, value))\n\n        \n        header = self._header\n        if header.get_child(ContentDescriptionObject.GUID) is None:\n            header.objects.append(ContentDescriptionObject())\n        if header.get_child(ExtendedContentDescriptionObject.GUID) is None:\n            header.objects.append(ExtendedContentDescriptionObject())\n        header_ext = header.get_child(HeaderExtensionObject.GUID)\n        if header_ext is None:\n            header_ext = HeaderExtensionObject()\n            header.objects.append(header_ext)\n        if header_ext.get_child(MetadataObject.GUID) is None:\n            header_ext.objects.append(MetadataObject())\n        if header_ext.get_child(MetadataLibraryObject.GUID) is None:\n            header_ext.objects.append(MetadataLibraryObject())\n\n        fileobj = filething.fileobj\n        \n        old_size = header.parse_size(fileobj)[0]\n        data = header.render_full(self, fileobj, old_size, padding)\n        size = len(data)\n        resize_bytes(fileobj, old_size, size, 0)\n        fileobj.seek(0)\n        fileobj.write(data)", "docstring": "save(filething=None, padding=None)\n\nSave tag changes back to the loaded file.\n\nArgs:\nfilething (filething)\npadding (:obj:`mutagen.PaddingFunction`)\nRaises:\nmutagen.MutagenError", "source": "juraj-google-style"}
{"code": "def read_submissions_from_directory(dirname, use_gpu):\n  \n  result = []\n  for sub_dir in os.listdir(dirname):\n    submission_path = os.path.join(dirname, sub_dir)\n    try:\n      if not os.path.isdir(submission_path):\n        continue\n      if not os.path.exists(os.path.join(submission_path, 'metadata.json')):\n        continue\n      with open(os.path.join(submission_path, 'metadata.json')) as f:\n        metadata = json.load(f)\n      if use_gpu and ('container_gpu' in metadata):\n        container = metadata['container_gpu']\n      else:\n        container = metadata['container']\n      entry_point = metadata['entry_point']\n      submission_type = metadata['type']\n      if submission_type == 'attack' or submission_type == 'targeted_attack':\n        submission = Attack(submission_path, container, entry_point, use_gpu)\n      elif submission_type == 'defense':\n        submission = Defense(submission_path, container, entry_point, use_gpu)\n      else:\n        raise ValueError('Invalid type of submission: %s' % submission_type)\n      result.append(submission)\n    except (IOError, KeyError, ValueError):\n      print('Failed to read submission from directory ', submission_path)\n  return result", "docstring": "Scans directory and read all submissions.\n\nArgs:\ndirname: directory to scan.\nuse_gpu: whether submissions should use GPU. This argument is\nused to pick proper Docker container for each submission and create\ninstance of Attack or Defense class.\n\nReturns:\nList with submissions (subclasses of Submission class).", "source": "juraj-google-style"}
{"code": "def from_api_repr(cls, resource, client):\n        \n        job_ref_properties = resource.get(\"jobReference\", {\"projectId\": client.project})\n        job_ref = _JobReference._from_api_repr(job_ref_properties)\n        job = cls(job_ref, client)\n        \n        \n        resource[\"jobReference\"] = job_ref_properties\n        job._properties = resource\n        return job", "docstring": "Construct an UnknownJob from the JSON representation.\n\nArgs:\nresource (dict): JSON representation of a job.\nclient (google.cloud.bigquery.client.Client):\nClient connected to BigQuery API.\n\nReturns:\nUnknownJob: Job corresponding to the resource.", "source": "juraj-google-style"}
{"code": "def set(self, value):\n    pywrap_tfe.TFE_MonitoringBoolGaugeCellSet(self._cell, value)", "docstring": "Atomically set the value.\n\nArgs:\nvalue: bool value.", "source": "github-repos"}
{"code": "def delete_handler(Model, name=None, **kwds):\n    from nautilus.database import db\n\n    async def action_handler(service, action_type, payload, props, notify=True, **kwds):\n        if (action_type == get_crud_action('delete', (name or Model))):\n            try:\n                message_props = {}\n                if ('correlation_id' in props):\n                    message_props['correlation_id'] = props['correlation_id']\n                record_id = (payload['id'] if ('id' in payload) else payload['pk'])\n                try:\n                    model_query = Model.select().where((Model.primary_key() == record_id))\n                except KeyError:\n                    raise RuntimeError('Could not find appropriate id to remove service record.')\n                model_query.get().delete_instance()\n                if notify:\n                    (await service.event_broker.send(payload='{\"status\":\"ok\"}', action_type=change_action_status(action_type, success_status()), **message_props))\n            except Exception as err:\n                if notify:\n                    (await service.event_broker.send(payload=str(err), action_type=change_action_status(action_type, error_status()), **message_props))\n                else:\n                    raise err\n    return action_handler", "docstring": "This factory returns an action handler that deletes a new instance of\nthe specified model when a delete action is recieved, assuming the\naction follows nautilus convetions.\n\nArgs:\nModel (nautilus.BaseModel): The model to delete when the action\nreceived.\n\nReturns:\nfunction(type, payload): The action handler for this model", "source": "codesearchnet"}
{"code": "def _post(self, url, data, scope):\n    self._create_session(scope)\n    response = self.session.post(url, data=data)\n    return (response.status_code, response.text)", "docstring": "Make a POST request using the session object to a Degreed endpoint.\n\nArgs:\nurl (str): The url to send a POST request to.\ndata (str): The json encoded payload to POST.\nscope (str): Must be one of the scopes Degreed expects:\n- `CONTENT_PROVIDER_SCOPE`\n- `COMPLETION_PROVIDER_SCOPE`", "source": "codesearchnet"}
{"code": "def delete(self, main_type, sub_type, unique_id, owner=None):\n        \n        params = {'owner': owner} if owner else {}\n        if not sub_type:\n            url = '/v2/{}/{}'.format(main_type, unique_id)\n        else:\n            url = '/v2/{}/{}/{}'.format(main_type, sub_type, unique_id)\n        return self.tcex.session.delete(url, params=params)", "docstring": "Deletes the Indicator/Group/Victim or Security Label\nArgs:\nmain_type:\nsub_type:\nunique_id:\nowner:", "source": "juraj-google-style"}
{"code": "def set_string(self, option, value):\n    if (not isinstance(value, str)):\n        raise TypeError(('%s must be a string' % option))\n    self.options[option] = value", "docstring": "Set a string option.\n\nArgs:\noption (str): name of option.\nvalue (str): value of the option.\n\nRaises:\nTypeError: Value must be a string.", "source": "codesearchnet"}
{"code": "def group_associations(self, main_type, sub_type, unique_id, owner=None, params=None):\n        \n        params = params or {}\n        if owner:\n            params['owner'] = owner\n\n        if not sub_type:\n            url = '/v2/{}/{}/groups'.format(main_type, unique_id)\n        else:\n            url = '/v2/{}/{}/{}/groups'.format(main_type, sub_type, unique_id)\n\n        for ga in self._iterate(url, params, 'group'):\n            yield ga", "docstring": "Args:\nowner:\nmain_type:\nsub_type:\nunique_id:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def get_cached_filename(self, filename, extention, settings_list=None):\n    cached_name = '_'.join([filename, self.get_hash()])\n    return '.'.join([cached_name, extention])", "docstring": "Creates a filename with md5 cache string based on settings list\n\nArgs:\nfilename (str): the filename without extention\nextention (str): the file extention without dot. (i.e. 'pkl')\nsettings_list (dict|list): the settings list as list (optional)\nNB! The dictionaries have to be sorted or hash id will change\narbitrarely.", "source": "codesearchnet"}
{"code": "def delete(self, table_name):\n        \n        dataset = Dataset(self, table_name)\n        deleted = dataset.delete()\n        if deleted:\n            return deleted\n\n        raise CartoException(.format(table_name))", "docstring": "Delete a table in user's CARTO account.\n\nArgs:\ntable_name (str): Name of table to delete\n\nReturns:\nbool: `True` if table is removed", "source": "juraj-google-style"}
{"code": "def relu6(x):\n    if any_symbolic_tensors((x,)):\n        return Relu6().symbolic_call(x)\n    return backend.nn.relu6(x)", "docstring": "Rectified linear unit activation function with upper bound of 6.\n\nIt is defined as `f(x) = np.clip(x, 0, 6)`.\n\nArgs:\nx: Input tensor.\n\nReturns:\nA tensor with the same shape as `x`.\n\nExample:\n\n>>> x = keras.ops.convert_to_tensor([-3.0, -2.0, 0.1, 0.2, 6.0, 8.0])\n>>> keras.ops.relu6(x)\narray([0.0, 0.0, 0.1, 0.2, 6.0, 6.0], dtype=float32)", "source": "github-repos"}
{"code": "def normalized_start(self):\n    namespaces_after_key = list(self.make_datastore_query().Run(limit=1))\n    if (not namespaces_after_key):\n        return None\n    namespace_after_key = (namespaces_after_key[0].name() or '')\n    return NamespaceRange(namespace_after_key, self.namespace_end, _app=self.app)", "docstring": "Returns a NamespaceRange with leading non-existant namespaces removed.\n\nReturns:\nA copy of this NamespaceRange whose namespace_start is adjusted to exclude\nthe portion of the range that contains no actual namespaces in the\ndatastore. None is returned if the NamespaceRange contains no actual\nnamespaces in the datastore.", "source": "codesearchnet"}
{"code": "def create_binary_descriptor(descriptor):\n    \n\n    func_names = {0: 'copy_latest_a', 1: 'average_a',\n                  2: 'copy_all_a', 3: 'sum_a',\n                  4: 'copy_count_a', 5: 'trigger_streamer',\n                  6: 'call_rpc', 7: 'subtract_afromb'}\n\n    func_codes = {y: x for x, y in func_names.items()}\n\n    node, inputs, processing = parse_node_descriptor(descriptor, DeviceModel())\n\n    func_code = func_codes.get(processing)\n    if func_code is None:\n        raise ArgumentError(\"Unknown processing function\", function=processing)\n\n    stream_a, trigger_a = inputs[0]\n    stream_a = stream_a.encode()\n\n    if len(inputs) == 2:\n        stream_b, trigger_b = inputs[1]\n        stream_b = stream_b.encode()\n    else:\n        stream_b, trigger_b = 0xFFFF, None\n\n    if trigger_a is None:\n        trigger_a = TrueTrigger()\n\n    if trigger_b is None:\n        trigger_b = TrueTrigger()\n\n    ref_a = 0\n    if isinstance(trigger_a, InputTrigger):\n        ref_a = trigger_a.reference\n\n    ref_b = 0\n    if isinstance(trigger_b, InputTrigger):\n        ref_b = trigger_b.reference\n\n    trigger_a = _create_binary_trigger(trigger_a)\n    trigger_b = _create_binary_trigger(trigger_b)\n\n    combiner = node.trigger_combiner\n\n    bin_desc = struct.pack(\"<LLHHHBBBB2x\", ref_a, ref_b, node.stream.encode(), stream_a, stream_b, func_code, trigger_a, trigger_b, combiner)\n    return bin_desc", "docstring": "Convert a string node descriptor into a 20-byte binary descriptor.\n\nThis is the inverse operation of parse_binary_descriptor and composing\nthe two operations is a noop.\n\nArgs:\ndescriptor (str): A string node descriptor\n\nReturns:\nbytes: A 20-byte binary node descriptor.", "source": "juraj-google-style"}
{"code": "def world_info(world_name, world_config=None, initial_indent=\"\", next_indent=\"  \"):\n    \n    if world_config is None:\n        for config, _ in _iter_packages():\n            for world in config[\"maps\"]:\n                if world[\"name\"] == world_name:\n                    world_config = world\n\n    if world_config is None:\n        raise HolodeckException(\"Couldn't find world \" + world_name)\n\n    second_indent = initial_indent + next_indent\n    agent_indent = second_indent + next_indent\n    sensor_indent = agent_indent + next_indent\n\n    print(initial_indent, world_config[\"name\"])\n    print(second_indent, \"Resolution:\", world_config[\"window_width\"], \"x\", world_config[\"window_height\"])\n    print(second_indent, \"Agents:\")\n    for agent in world_config[\"agents\"]:\n        print(agent_indent, \"Name:\", agent[\"agent_name\"])\n        print(agent_indent, \"Type:\", agent[\"agent_type\"])\n        print(agent_indent, \"Sensors:\")\n        for sensor in agent[\"sensors\"]:\n            print(sensor_indent, sensor)", "docstring": "Gets and prints the information of a world.\n\nArgs:\nworld_name (str): the name of the world to retrieve information for\nworld_config (dict optional): A dictionary containing the world's configuration. Will find the config if None. Defaults to None.\ninitial_indent (str optional): This indent will apply to each output line. Defaults to \"\".\nnext_indent (str optional): This indent will be applied within each nested line. Defaults to \"  \".", "source": "juraj-google-style"}
{"code": "def clip_gradient(net, clip_value_min, clip_value_max, name=None):\n    if (not net.dtype.is_floating):\n        raise ValueError('clip_gradient does not support non-float `net` inputs.')\n    with tf.name_scope(name, 'clip_gradient', values=[net]):\n        dtype = net.dtype.base_dtype\n        min_tensor = tf.convert_to_tensor(clip_value_min, dtype=dtype)\n        max_tensor = tf.convert_to_tensor(clip_value_max, dtype=dtype)\n        clip_gradient_op = _clip_gradient_op(dtype)\n        output = clip_gradient_op(net, min_tensor, max_tensor)\n        output.set_shape(net.get_shape())\n    return output", "docstring": "Clips respective gradients of a given tensor.\n\nActs as identity for the forward pass, but clips gradient tensor element-wise\nby value during the backward pass. Any gradient values less than\n`clip_value_min` or greater than `clip_values_max` are set to the respective\nlimit values.\n\nArgs:\nnet: A `tf.Tensor`.\nclip_value_min: A 0-D Tensor or scalar. The minimum value to clip by.\nclip_value_max: A 0-D Tensor or scalar. The maximum value to clip by.\nname: A name for the operation (optional, default 'clip_gradient').\n\nReturns:\nA `tf.Tensor` with the same type as the input tensor.\n\nRaises:\nValueError: If `net` dtype is non-float.", "source": "codesearchnet"}
{"code": "def _get_oauth2_client_id_and_secret(settings_instance):\n    secret_json = getattr(settings_instance, 'GOOGLE_OAUTH2_CLIENT_SECRETS_JSON', None)\n    if (secret_json is not None):\n        return _load_client_secrets(secret_json)\n    else:\n        client_id = getattr(settings_instance, 'GOOGLE_OAUTH2_CLIENT_ID', None)\n        client_secret = getattr(settings_instance, 'GOOGLE_OAUTH2_CLIENT_SECRET', None)\n        if ((client_id is not None) and (client_secret is not None)):\n            return (client_id, client_secret)\n        else:\n            raise exceptions.ImproperlyConfigured('Must specify either GOOGLE_OAUTH2_CLIENT_SECRETS_JSON, or both GOOGLE_OAUTH2_CLIENT_ID and GOOGLE_OAUTH2_CLIENT_SECRET in settings.py')", "docstring": "Initializes client id and client secret based on the settings.\n\nArgs:\nsettings_instance: An instance of ``django.conf.settings``.\n\nReturns:\nA 2-tuple, the first item is the client id and the second\nitem is the client secret.", "source": "codesearchnet"}
{"code": "def tanh(x):\n    return nn.tanh(x)", "docstring": "Hyperbolic tangent activation function.\n\nFor example:\n\n>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)\n>>> b = tf.keras.activations.tanh(a)\n>>> b.numpy()\narray([-0.9950547, -0.7615942,  0.,  0.7615942,  0.9950547], dtype=float32)\n\nArgs:\nx: Input tensor.\n\nReturns:\nTensor of same shape and dtype of input `x`, with tanh activation:\n`tanh(x) = sinh(x)/cosh(x) = ((exp(x) - exp(-x))/(exp(x) + exp(-x)))`.", "source": "github-repos"}
{"code": "def __init__(self, value, ctype=None):\n        \n        if isinstance(value, str) and value == 'INFINITY':\n            self._value = np.inf\n        elif isinstance(value, str) and value == '-INFINITY':\n            self._value = -np.inf\n        else:\n            self._value = np.array(value)\n        self._ctype = ctype or dtype_to_ctype(self._value.dtype)\n        self._mot_float_dtype = None", "docstring": "A kernel input scalar.\n\nThis will insert the given value directly into the kernel's source code, and will not load it as a buffer.\n\nArgs:\nvalue (number): the number to insert into the kernel as a scalar.\nctype (str): the desired c-type for in use in the kernel, like ``int``, ``float`` or ``mot_float_type``.\nIf None it is implied from the value.", "source": "juraj-google-style"}
{"code": "def prepare_data(data_dir, fileroot, block_pct_tokens_thresh=0.1):\n    if (not (0.0 <= block_pct_tokens_thresh <= 1.0)):\n        raise ValueError('block_pct_tokens_thresh must be in the range [0.0, 1.0]')\n    html = read_html_file(data_dir, fileroot)\n    blocks = read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True)\n    content_blocks = []\n    comments_blocks = []\n    for block in blocks:\n        block_split = block.split('\\t')\n        num_block_tokens = len(block_split[2].split())\n        content_blocks.append((float(block_split[0]), num_block_tokens, block_split[3].split()))\n        comments_blocks.append((float(block_split[1]), num_block_tokens, block_split[4].split()))\n    parsed_content_blocks = _parse_content_or_comments_blocks(content_blocks, block_pct_tokens_thresh)\n    parsed_comments_blocks = _parse_content_or_comments_blocks(comments_blocks, block_pct_tokens_thresh)\n    return (html, parsed_content_blocks, parsed_comments_blocks)", "docstring": "Prepare data for a single HTML + gold standard blocks example, uniquely\nidentified by ``fileroot``.\n\nArgs:\ndata_dir (str)\nfileroot (str)\nblock_pct_tokens_thresh (float): must be in [0.0, 1.0]\n\nReturns:\nTuple[str, Tuple[np.array[int], np.array[int], List[str]], Tuple[np.array[int], np.array[int], List[str]]]:\nThe first element is simply the raw html as a string. The second and\nthird elements are 3-tuples for content and comments, respectively,\nwhere the first element is a numpy array of 1s and 0s whose values\ncorrespond to whether or not a given block is considered non-content\nor not; the second element is a numpy integer array whose values are\nthe total number of tokens in each block; and the third element is\na flat list of content or comment tokens as strings, concatenated\nfrom all blocks.\n\nSee Also:\n:func:`prepare_all_data`", "source": "codesearchnet"}
{"code": "def run_multiple(self, eventLoops):\n    self.nruns += len(eventLoops)\n    return self.communicationChannel.put_multiple(eventLoops)", "docstring": "run the event loops in the background.\n\nArgs:\neventLoops (list): a list of event loops to run", "source": "codesearchnet"}
{"code": "def enqueue_tpu_embedding_integer_batch(batch, device_ordinal, mode_override=None, name=None):\n    if mode_override is None:\n        mode_override = 'unspecified'\n    return gen_tpu_ops.enqueue_tpu_embedding_integer_batch(batch=batch, device_ordinal=device_ordinal, mode_override=mode_override, name=name)", "docstring": "A placeholder op for enqueueing embedding IDs to the TPU.\n\nArgs:\nbatch: A list of 1D tensors, one for each embedding table, containing the\nindices into the tables.\ndevice_ordinal: The TPU device to use. Should be >= 0 and less than the\nnumber of TPU cores in the task on which the node is placed.\nmode_override: A string input that overrides the mode specified in the\nTPUEmbeddingConfiguration. Supported values are {'unspecified',\n'inference', 'train', 'backward_pass_only'}. When set to 'unspecified',\nthe mode set in TPUEmbeddingConfiguration is used, otherwise mode_override\nis used (optional).\nname: A name for the operation (optional).\n\nReturns:\nAn EnqueueTPUEmbeddingIntegerBatch operation.", "source": "github-repos"}
{"code": "def output_classes(self):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_classes(), self._element_spec)", "docstring": "Returns the class of each component of an element of this iterator.\n\nThe expected values are `tf.Tensor` and `tf.sparse.SparseTensor`.\n\nReturns:\nA (nested) structure of Python `type` objects corresponding to each\ncomponent of an element of this dataset.", "source": "github-repos"}
{"code": "def in_labelset(xmrs, nodeids, label=None):\n    nodeids = set(nodeids)\n    if (label is None):\n        label = xmrs.ep(next(iter(nodeids))).label\n    return nodeids.issubset(xmrs._vars[label]['refs']['LBL'])", "docstring": "Test if all nodeids share a label.\n\nArgs:\nnodeids: iterable of nodeids\nlabel (str, optional): the label that all nodeids must share\nReturns:\nbool: `True` if all nodeids share a label, otherwise `False`", "source": "codesearchnet"}
{"code": "def zpath(filename):\n    \n    for ext in [\"\", '.gz', '.GZ', '.bz2', '.BZ2', '.z', '.Z']:\n        zfilename = \"{}{}\".format(filename, ext)\n        if os.path.exists(zfilename):\n            return zfilename\n    return filename", "docstring": "Returns an existing (zipped or unzipped) file path given the unzipped\nversion. If no path exists, returns the filename unmodified.\n\nArgs:\nfilename: filename without zip extension\n\nReturns:\nfilename with a zip extension (unless an unzipped version\nexists). If filename is not found, the same filename is returned\nunchanged.", "source": "juraj-google-style"}
{"code": "def fillPelicanHole(site, username, password, tstat_name, start_time, end_time):\n    start = datetime.strptime(start_time, _INPUT_TIME_FORMAT).replace(tzinfo=pytz.utc).astimezone(_pelican_time)\n    end = datetime.strptime(end_time, _INPUT_TIME_FORMAT).replace(tzinfo=pytz.utc).astimezone(_pelican_time)\n    heat_needs_fan = _lookupHeatNeedsFan(site, username, password, tstat_name)\n    if (heat_needs_fan is None):\n        return None\n    history_blocks = []\n    while (start < end):\n        block_start = start\n        block_end = min((start + timedelta(days=30)), end)\n        blocks = _lookupHistoricalData(site, username, password, tstat_name, block_start, block_end)\n        if (blocks is None):\n            return None\n        history_blocks.extend(blocks)\n        start += timedelta(days=30, minutes=1)\n    output_rows = []\n    for block in history_blocks:\n        runStatus = block.find('runStatus').text\n        if runStatus.startswith('Heat'):\n            fanState = (heatNeedsFan == 'Yes')\n        else:\n            fanState = (runStatus != 'Off')\n        api_time = datetime.strptime(block.find('timestamp').text, '%Y-%m-%dT%H:%M').replace(tzinfo=_pelican_time)\n        timestamp = int((api_time.timestamp() * (10 ** 9)))\n        output_rows.append({'temperature': float(block.find('temperature').text), 'relative_humidity': float(block.find('humidity').text), 'heating_setpoint': float(block.find('heatSetting').text), 'cooling_setpoint': float(block.find('coolSetting').text), 'override': (block.find('setBy').text != 'Schedule'), 'fan': fanState, 'mode': _mode_name_mappings[block.find('system').text], 'state': _state_mappings.get(runStatus, 0), 'time': timestamp})\n    df = pd.DataFrame(output_rows)\n    df.drop_duplicates(subset='time', keep='first', inplace=True)\n    return df", "docstring": "Fill a hole in a Pelican thermostat's data stream.\n\nArguments:\nsite -- The thermostat's Pelican site name\nusername -- The Pelican username for the site\npassword -- The Pelican password for the site\ntstat_name -- The name of the thermostat, as identified by Pelican\nstart_time -- The start of the data hole in UTC, e.g. \"2018-01-29 15:00:00\"\nend_time -- The end of the data hole in UTC, e.g. \"2018-01-29 16:00:00\"\n\nReturns:\nA Pandas dataframe with historical Pelican data that falls between the\nspecified start and end times.\n\nNote that this function assumes the Pelican thermostat's local time zone is\nUS/Pacific. It will properly handle PST vs. PDT.", "source": "codesearchnet"}
{"code": "def getent(refresh=False):\n    \n    if 'group.getent' in __context__ and not refresh:\n        return __context__['group.getent']\n\n    ret = []\n\n    results = _get_all_groups()\n\n    for result in results:\n        group = {'gid': __salt__['file.group_to_gid'](result.Name),\n                'members': [_get_username(x) for x in result.members()],\n                'name': result.Name,\n                'passwd': 'x'}\n        ret.append(group)\n    __context__['group.getent'] = ret\n    return ret", "docstring": "Return info on all groups\n\nArgs:\n\nrefresh (bool):\nRefresh the info for all groups in ``__context__``. If False only\nthe groups in ``__context__`` will be returned. If True the\n``__context__`` will be refreshed with current data and returned.\nDefault is False\n\nReturns:\nA list of groups and their information\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' group.getent", "source": "juraj-google-style"}
{"code": "def get_sitej(self, site_index, image_index):\n        \n        atoms_n_occu = self.s[site_index].species\n        lattice = self.s.lattice\n        coords = self.s[site_index].frac_coords + self.offsets[image_index]\n        return PeriodicSite(atoms_n_occu, coords, lattice)", "docstring": "Assuming there is some value in the connectivity array at indices\n(1, 3, 12). sitei can be obtained directly from the input structure\n(structure[1]). sitej can be obtained by passing 3, 12 to this function\n\nArgs:\nsite_index (int): index of the site (3 in the example)\nimage_index (int): index of the image (12 in the example)", "source": "juraj-google-style"}
{"code": "def get_roles(self):\n    prefix = (_IDENTITY_NS + _ROLE_NS)\n    rolelist_list = [_create_from_bytes(d, identity_pb2.RoleList) for (_, d) in self._state_view.leaves(prefix=prefix)]\n    roles = []\n    for role_list in rolelist_list:\n        for role in role_list.roles:\n            roles.append(role)\n    return sorted(roles, key=(lambda r: r.name))", "docstring": "Return all the Roles under the Identity namespace.\n\nReturns:\n(list): A list containing all the Roles under the Identity\nnamespace.", "source": "codesearchnet"}
{"code": "def get_items_by_ids(self, item_ids, item_type=None):\n        \n        urls = [urljoin(self.item_url, F\"{i}.json\") for i in item_ids]\n        result = self._run_async(urls=urls)\n        items = [Item(r) for r in result if r]\n        if item_type:\n            return [item for item in items if item.item_type == item_type]\n        else:\n            return items", "docstring": "Given a list of item ids, return all the Item objects\n\nArgs:\nitem_ids (obj): List of item IDs to query\nitem_type (str): (optional) Item type to filter results with\n\nReturns:\nList of `Item` objects for given item IDs and given item type", "source": "juraj-google-style"}
{"code": "def parse_readable_time_str(time_str):\n\n    def parse_positive_float(value_str):\n        value = float(value_str)\n        if value < 0:\n            raise ValueError('Invalid time %s. Time value must be positive.' % value_str)\n        return value\n    time_str = time_str.strip()\n    if time_str.endswith('us'):\n        return int(parse_positive_float(time_str[:-2]))\n    elif time_str.endswith('ms'):\n        return int(parse_positive_float(time_str[:-2]) * 1000.0)\n    elif time_str.endswith('s'):\n        return int(parse_positive_float(time_str[:-1]) * 1000000.0)\n    return int(parse_positive_float(time_str))", "docstring": "Parses a time string in the format N, Nus, Nms, Ns.\n\nArgs:\ntime_str: (`str`) string consisting of an integer time value optionally\nfollowed by 'us', 'ms', or 's' suffix. If suffix is not specified,\nvalue is assumed to be in microseconds. (e.g. 100us, 8ms, 5s, 100).\n\nReturns:\nMicroseconds value.", "source": "github-repos"}
{"code": "def __init__(self, msg, exception_details=None):\n    message = '%s with exceptions %s' % (msg, exception_details)\n    super().__init__(message)\n    self.exception_details = exception_details", "docstring": "Class representing the errors thrown in the batch file operations.\nArgs:\nmsg: Message string for the exception thrown\nexception_details: Optional map of individual input to exception for\nfailed operations in batch. This parameter is optional so if specified\nthe user can assume that the all errors in the filesystem operation\nhave been reported. When the details are missing then the operation\nmay have failed anywhere so the user should use match to determine\nthe current state of the system.", "source": "github-repos"}
{"code": "def replace_in_file(filename: str, text_from: str, text_to: str) -> None:\n    log.info('Amending {}: {} -> {}', filename, repr(text_from), repr(text_to))\n    with open(filename) as infile:\n        contents = infile.read()\n    contents = contents.replace(text_from, text_to)\n    with open(filename, 'w') as outfile:\n        outfile.write(contents)", "docstring": "Replaces text in a file.\n\nArgs:\nfilename: filename to process (modifying it in place)\ntext_from: original text to replace\ntext_to: replacement text", "source": "codesearchnet"}
{"code": "async def client_event_handler(self, client_id, event_tuple, user_data):\n        \n\n        \n\n        conn_string, event_name, event = event_tuple\n\n        if event_name == 'report':\n            report = event.serialize()\n            report['encoded_report'] = base64.b64encode(report['encoded_report'])\n            msg_payload = dict(connection_string=conn_string, serialized_report=report)\n            msg_name = OPERATIONS.NOTIFY_REPORT\n        elif event_name == 'trace':\n            encoded_payload = base64.b64encode(event)\n            msg_payload = dict(connection_string=conn_string, payload=encoded_payload)\n            msg_name = OPERATIONS.NOTIFY_TRACE\n        elif event_name == 'progress':\n            msg_payload = dict(connection_string=conn_string, operation=event.get('operation'),\n                               done_count=event.get('finished'), total_count=event.get('total'))\n            msg_name = OPERATIONS.NOTIFY_PROGRESS\n        elif event_name == 'device_seen':\n            msg_payload = event\n            msg_name = OPERATIONS.NOTIFY_DEVICE_FOUND\n        elif event_name == 'broadcast':\n            report = event.serialize()\n            report['encoded_report'] = base64.b64encode(report['encoded_report'])\n            msg_payload = dict(connection_string=conn_string, serialized_report=report)\n            msg_name = OPERATIONS.NOTIFY_BROADCAST\n        else:\n            self._logger.debug(\"Not forwarding unknown event over websockets: %s\", event_tuple)\n            return\n\n        try:\n            self._logger.debug(\"Sending event %s: %s\", msg_name, msg_payload)\n            await self.server.send_event(user_data, msg_name, msg_payload)\n        except websockets.exceptions.ConnectionClosed:\n            self._logger.debug(\"Could not send notification because connection was closed for client %s\", client_id)", "docstring": "Forward an event on behalf of a client.\n\nThis method is called by StandardDeviceServer when it has an event that\nshould be sent to a client.\n\nArgs:\nclient_id (str): The client that we should send this event to\nevent_tuple (tuple): The conn_string, event_name and event\nobject passed from the call to notify_event.\nuser_data (object): The user data passed in the call to\n:meth:`setup_client`.", "source": "juraj-google-style"}
{"code": "def RegisterRecordType(cls, record_class):\n        \n\n        record_type = record_class.MatchType()\n        if record_type not in UpdateRecord.KNOWN_CLASSES:\n            UpdateRecord.KNOWN_CLASSES[record_type] = []\n\n        UpdateRecord.KNOWN_CLASSES[record_type].append(record_class)", "docstring": "Register a known record type in KNOWN_CLASSES.\n\nArgs:\nrecord_class (UpdateRecord): An update record subclass.", "source": "juraj-google-style"}
{"code": "def create_graph_from_data(self, data, **kwargs):\n        \n        \n        self.arguments['{VERBOSE}'] = str(self.verbose).upper()\n        results = self._run_ccdr(data, verbose=self.verbose)\n        return nx.relabel_nodes(nx.DiGraph(results),\n                                {idx: i for idx, i in enumerate(data.columns)})", "docstring": "Apply causal discovery on observational data using CCDr.\n\nArgs:\ndata (pandas.DataFrame): DataFrame containing the data\n\nReturns:\nnetworkx.DiGraph: Solution given by the CCDR algorithm.", "source": "juraj-google-style"}
{"code": "def validate_instance(instance, options=None):\n    \n    if 'type' not in instance:\n        raise ValidationError(\"Input must be an object with a 'type' property.\")\n\n    if not options:\n        options = ValidationOptions()\n\n    error_gens = []\n\n    \n    if instance['type'] == 'bundle' and 'objects' in instance:\n        \n        for sdo in instance['objects']:\n            if 'type' not in sdo:\n                raise ValidationError(\"Each object in bundle must have a 'type' property.\")\n            error_gens += _schema_validate(sdo, options)\n    else:\n        error_gens += _schema_validate(instance, options)\n\n    \n    must_checks = _get_musts(options)\n    should_checks = _get_shoulds(options)\n    output.info(\"Running the following additional checks: %s.\"\n                % \", \".join(x.__name__ for x in chain(must_checks, should_checks)))\n    try:\n        errors = _iter_errors_custom(instance, must_checks, options)\n        warnings = _iter_errors_custom(instance, should_checks, options)\n\n        if options.strict:\n            chained_errors = chain(errors, warnings)\n            warnings = []\n        else:\n            chained_errors = errors\n            warnings = [pretty_error(x, options.verbose) for x in warnings]\n    except schema_exceptions.RefResolutionError:\n        raise SchemaInvalidError('Invalid JSON schema: a JSON reference '\n                                 'failed to resolve')\n\n    \n    \n    error_gens += [(chained_errors, '')]\n\n    \n    \n    error_list = []\n    for gen, prefix in error_gens:\n        for error in gen:\n            msg = prefix + pretty_error(error, options.verbose)\n            error_list.append(SchemaError(msg))\n\n    if error_list:\n        valid = False\n    else:\n        valid = True\n\n    return ObjectValidationResults(is_valid=valid, object_id=instance.get('id', ''),\n                                   errors=error_list, warnings=warnings)", "docstring": "Perform STIX JSON Schema validation against STIX input.\n\nFind the correct schema by looking at the 'type' property of the\n`instance` JSON object.\n\nArgs:\ninstance: A Python dictionary representing a STIX object with a\n'type' property.\noptions: ValidationOptions instance with validation options for this\nvalidation run.\n\nReturns:\nA dictionary of validation results", "source": "juraj-google-style"}
{"code": "def extract_string_pairs_in_ib_file(file_path, special_ui_components_prefix):\n    \n    try:\n        results = []\n        xmldoc = minidom.parse(file_path)\n\n        element_name_to_add_func = {'label': add_string_pairs_from_label_element,\n                                    'button': add_string_pairs_from_button_element,\n                                    'textField': add_string_pairs_from_text_field_element,\n                                    'textView': add_string_pairs_from_text_view_element}\n\n        for element_name in element_name_to_add_func:\n            add_func = element_name_to_add_func[element_name]\n            elements = xmldoc.getElementsByTagName(element_name)\n            for element in elements:\n                add_func(file_path, results, element, special_ui_components_prefix)\n\n        \n        jtl_brackets_find_results = re.findall(JTL_REGEX, open(file_path).read())\n        unescaped_jtl_brackets_find_results = [(unescape(x), unescape(y)) for (x, y) in jtl_brackets_find_results]\n        results += unescaped_jtl_brackets_find_results\n\n        if len(results) > 0:\n            results = [(None, os.path.basename(file_path))] + results\n        return results\n\n    except Exception, e:\n        logging.warn(\"ERROR: Error processing %s (%s: %s)\", file_path, type(e), str(e))\n        return []", "docstring": "Extract the strings pairs (key and comment) from a xib file.\n\nArgs:\nfile_path (str): The path to the xib file.\nspecial_ui_components_prefix (str):\nIf not None, extraction will not warn about internationalized UI components with this class prefix.\n\nReturns:\nlist: List of tuples representing the string pairs.", "source": "juraj-google-style"}
{"code": "def __init__(self, name, fn, dataFormat = DataFormats.DEFAULT):\n    \n    Aggregator.__init__(self, name)\n    self.fn = fn", "docstring": "Creates a highlight aggregator - this will pick one of the values to highlight.\n\nArgs:\nname: The name of this aggregator.\nfn: Callable that takes (a, b) and returns True if b should be selected as the highlight, where as is the\nprevious chosen highlight.", "source": "juraj-google-style"}
{"code": "def line_distance_similarity(p1a, p1b, p2a, p2b, T=CLOSE_DISTANCE_THRESHOLD):\n    \n    d1 = distance_similarity(p1a, p1b, p2a, T=T)\n    d2 = distance_similarity(p1a, p1b, p2b, T=T)\n    return abs(d1 + d2) * 0.5", "docstring": "Line distance similarity between two line segments\n\nArgs:\np1a ([float, float]): x and y coordinates. Line A start\np1b ([float, float]): x and y coordinates. Line A end\np2a ([float, float]): x and y coordinates. Line B start\np2b ([float, float]): x and y coordinates. Line B end\nReturns:\nfloat: between 0 and 1. Where 1 is very similar and 0 is completely different", "source": "juraj-google-style"}
{"code": "def moma(self, wt_fluxes):\n        \n        reactions = set(self._adjustment_reactions())\n        v = self._v\n\n        obj_expr = 0\n        for f_reaction, f_value in iteritems(wt_fluxes):\n            if f_reaction in reactions:\n                \n                obj_expr += (f_value - v[f_reaction])**2\n\n        self._prob.set_objective(obj_expr)\n        self._solve(lp.ObjectiveSense.Minimize)", "docstring": "Minimize the redistribution of fluxes using Euclidean distance.\n\nMinimizing the redistribution of fluxes using a quadratic objective\nfunction. The distance is minimized by minimizing the sum of\n(wild type - knockout)^2.\n\nArgs:\nwt_fluxes: Dictionary of all the wild type fluxes that will be\nused to find a close MOMA solution. Fluxes can be expiremental\nor calculated using :meth: get_fba_flux(objective).", "source": "juraj-google-style"}
{"code": "def set_of_vars(arg_plot):\n    \n    sovs = set(tuple((var + '+').split('+')[:2])\n               for var in arg_plot.split(','))\n    sovs.discard(('', ''))\n    return sovs", "docstring": "Build set of needed field variables.\n\nEach var is a tuple, first component is a scalar field, second component is\neither:\n\n- a scalar field, isocontours are added to the plot.\n- a vector field (e.g. 'v' for the (v1,v2,v3) vector), arrows are added to\nthe plot.\n\nArgs:\narg_plot (str): string with variable names separated with\n``,`` (figures), and ``+`` (same plot).\nReturns:\nset of str: set of needed field variables.", "source": "juraj-google-style"}
{"code": "def _default_ising_beta_range(h, J):\n    \n    \n    abs_h = [abs(hh) for hh in h.values() if hh != 0]\n    abs_J = [abs(jj) for jj in J.values() if jj != 0]\n    abs_biases = abs_h + abs_J\n\n    if not abs_biases:\n        return [0.1, 1.0]\n\n    \n    min_delta_energy = min(abs_biases)\n\n    \n    abs_bias_dict = {k: abs(v) for k, v in h.items()}\n    for (k1, k2), v in J.items():\n        abs_bias_dict[k1] += abs(v)\n        abs_bias_dict[k2] += abs(v)\n\n    \n    max_delta_energy = max(abs_bias_dict.values())\n\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    hot_beta = np.log(2) / max_delta_energy\n    cold_beta = np.log(100) / min_delta_energy\n\n    return [hot_beta, cold_beta]", "docstring": "Determine the starting and ending beta from h J\n\nArgs:\nh (dict)\n\nJ (dict)\n\nAssume each variable in J is also in h.\n\nWe use the minimum bias to give a lower bound on the minimum energy gap, such at the\nfinal sweeps we are highly likely to settle into the current valley.", "source": "juraj-google-style"}
{"code": "def get_mac_dot_app_dir(directory):\n    \n    return os.path.dirname(os.path.dirname(os.path.dirname(directory)))", "docstring": "Returns parent directory of mac .app\n\nArgs:\n\ndirectory (str): Current directory\n\nReturns:\n\n(str): Parent directory of mac .app", "source": "juraj-google-style"}
{"code": "def index(self, text, terms=None, **kwargs):\n    self.clear()\n    terms = (terms or text.terms.keys())\n    pairs = combinations(terms, 2)\n    count = comb(len(terms), 2)\n    for (t1, t2) in bar(pairs, expected_size=count, every=1000):\n        score = text.score_braycurtis(t1, t2, **kwargs)\n        self.set_pair(t1, t2, score)", "docstring": "Index all term pair distances.\n\nArgs:\ntext (Text): The source text.\nterms (list): Terms to index.", "source": "codesearchnet"}
{"code": "def get_checkpoint_path(model_path):\n    if (os.path.basename(model_path) == model_path):\n        model_path = os.path.join('.', model_path)\n    if (os.path.basename(model_path) == 'checkpoint'):\n        assert tfv1.gfile.Exists(model_path), model_path\n        model_path = tf.train.latest_checkpoint(os.path.dirname(model_path))\n    new_path = model_path\n    if ('00000-of-00001' in model_path):\n        new_path = model_path.split('.data')[0]\n    elif model_path.endswith('.index'):\n        new_path = model_path.split('.index')[0]\n    if (new_path != model_path):\n        logger.info('Checkpoint path {} is auto-corrected to {}.'.format(model_path, new_path))\n        model_path = new_path\n    assert (tfv1.gfile.Exists(model_path) or tfv1.gfile.Exists((model_path + '.index'))), model_path\n    return model_path", "docstring": "Work around TF problems in checkpoint path handling.\n\nArgs:\nmodel_path: a user-input path\nReturns:\nstr: the argument that can be passed to NewCheckpointReader", "source": "codesearchnet"}
{"code": "def create_config(sections, section_contents):\n    (sections_length, section_contents_length) = (len(sections), len(section_contents))\n    if (sections_length != section_contents_length):\n        raise ValueError('Mismatch between argument lengths.\\nlen(sections) = {}\\nlen(section_contents) = {}'.format(sections_length, section_contents_length))\n    config = configparser.ConfigParser()\n    for (section, section_content) in zip(sections, section_contents):\n        config[section] = section_content\n    return config", "docstring": "Create a config file from the provided sections and key value pairs.\n\nArgs:\nsections (List[str]): A list of section keys.\nkey_value_pairs (Dict[str, str]): A list of of dictionaries. Must be as long as\nthe list of sections. That is to say, if there are two sections, there should be two\ndicts.\nReturns:\nconfigparser.ConfigParser: A ConfigParser.\nRaises:\nValueError", "source": "codesearchnet"}
{"code": "def Aggregated(self, request, global_params=None):\n    config = self.GetMethodConfig('Aggregated')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "List the jobs of a project across all regions. **Note:** This method doesn't support filtering the list of jobs by name.\n\nArgs:\nrequest: (DataflowProjectsJobsAggregatedRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ListJobsResponse) The response message.", "source": "github-repos"}
{"code": "def _date_to_datetime(value):\n    if (not isinstance(value, datetime.date)):\n        raise TypeError(('Cannot convert to datetime expected date value; received %s' % value))\n    return datetime.datetime(value.year, value.month, value.day)", "docstring": "Convert a date to a datetime for Cloud Datastore storage.\n\nArgs:\nvalue: A datetime.date object.\n\nReturns:\nA datetime object with time set to 0:00.", "source": "codesearchnet"}
{"code": "def get_by_alias(self, alias):\n        \n        if alias not in self._aliases:\n            raise DataInvalidAlias('A dataset with alias {} does not exist'.format(alias))\n\n        return self.get_by_index(self._aliases[alias])", "docstring": "Return a dataset by its alias.\n\nArgs:\nalias (str): The alias of the dataset that should be returned.\n\nRaises:\nDataInvalidAlias: If the alias does not represent a valid dataset.", "source": "juraj-google-style"}
{"code": "def PyParseJoinList(string, location, tokens):\n  \n  join_list = []\n  for token in tokens:\n    try:\n      join_list.append(str(token))\n    except UnicodeDecodeError:\n      join_list.append(repr(token))\n\n  tokens[0] = ''.join(join_list)\n  del tokens[1:]", "docstring": "Return a joined token from a list of tokens.\n\nThis is a callback method for pyparsing setParseAction that modifies\nthe returned token list to join all the elements in the list to a single\ntoken.\n\nArgs:\nstring (str): original string.\nlocation (int): location in the string where the match was made.\ntokens (list[str]): extracted tokens, where the string to be converted\nis stored.", "source": "juraj-google-style"}
{"code": "def rep(parser: Union[(Parser, Sequence[Input])]) -> RepeatedParser:\n    if isinstance(parser, str):\n        parser = lit(parser)\n    return RepeatedParser(parser)", "docstring": "Match a parser zero or more times repeatedly.\n\nThis matches ``parser`` multiple times in a row. A list is returned\ncontaining the value from each match. If there are no matches, an empty list\nis returned.\n\nArgs:\nparser: Parser or literal", "source": "codesearchnet"}
{"code": "def _normalize_pattern(pattern):\n        \n        if pattern.startswith('regex:'):\n            pattern_type = 'regex'\n            pattern = pattern[len('regex:'):]\n        elif pattern.startswith('wildcard:'):\n            pattern_type = 'wildcard'\n            pattern = pattern[len('wildcard:'):]\n        elif pattern.startswith('literal:'):\n            pattern_type = 'literal'\n            pattern = pattern[len('literal:'):]\n        elif RegexRoute.like(pattern):\n            pattern_type = 'regex'\n        elif WildcardRoute.like(pattern):\n            pattern_type = 'wildcard'\n        else:\n            pattern_type = 'literal'\n        return pattern_type, pattern", "docstring": "Return a normalized form of the pattern.\n\nNormalize the pattern by removing pattern type prefix if it\nexists in the pattern. Then return the pattern type and the\npattern as a tuple of two strings.\n\nArguments:\npattern (str): Route pattern to match request paths\n\nReturns:\ntuple: Ruple of pattern type (str) and pattern (str)", "source": "juraj-google-style"}
{"code": "def trunc_normal_tf_(tensor: torch.Tensor, mean: float=0.0, std: float=1.0, a: float=-2.0, b: float=2.0) -> torch.Tensor:\n    with torch.no_grad():\n        _trunc_normal_(tensor, 0, 1.0, a, b)\n        tensor.mul_(std).add_(mean)", "docstring": "Fills the input Tensor with values drawn from a truncated\nnormal distribution. The values are effectively drawn from the\nnormal distribution :math:`\\mathcal{N}(     ext{mean},      ext{std}^2)`\nwith values outside :math:`[a, b]` redrawn until they are within\nthe bounds. The method used for generating the random values works\nbest when :math:`a \\leq     ext{mean} \\leq b`.\n\nNOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the\nbounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0\nand the result is subsequently scaled and shifted by the mean and std args.\n\nArgs:\ntensor: an n-dimensional `torch.Tensor`\nmean: the mean of the normal distribution\nstd: the standard deviation of the normal distribution\na: the minimum cutoff value\nb: the maximum cutoff value", "source": "github-repos"}
{"code": "def serialize_example(transformed_json_data, info_dict):\n  \n  import six\n  import tensorflow as tf\n\n  def _make_int64_list(x):\n    return tf.train.Feature(int64_list=tf.train.Int64List(value=x))\n  def _make_bytes_list(x):\n    return tf.train.Feature(bytes_list=tf.train.BytesList(value=x))\n  def _make_float_list(x):\n    return tf.train.Feature(float_list=tf.train.FloatList(value=x))\n\n  if sorted(six.iterkeys(transformed_json_data)) != sorted(six.iterkeys(info_dict)):\n    raise ValueError('Keys do not match %s, %s' % (list(six.iterkeys(transformed_json_data)),\n                     list(six.iterkeys(info_dict))))\n\n  ex_dict = {}\n  for name, info in six.iteritems(info_dict):\n    if info['dtype'] == tf.int64:\n      ex_dict[name] = _make_int64_list(transformed_json_data[name])\n    elif info['dtype'] == tf.float32:\n      ex_dict[name] = _make_float_list(transformed_json_data[name])\n    elif info['dtype'] == tf.string:\n      ex_dict[name] = _make_bytes_list(transformed_json_data[name])      \n    else:\n      raise ValueError('Unsupported data type %s' % info['dtype'])\n\n  ex = tf.train.Example(features=tf.train.Features(feature=ex_dict))\n  return ex.SerializeToString()", "docstring": "Makes a serialized tf.example.\n\nArgs:\ntransformed_json_data: dict of transformed data.\ninfo_dict: output of feature_transforms.get_transfrormed_feature_info()\n\nReturns:\nThe serialized tf.example version of transformed_json_data.", "source": "juraj-google-style"}
{"code": "def handle_document_error(self, item_session: ItemSession) -> Actions:\n    self._waiter.increment()\n    self._statistics.errors[ServerError] += 1\n    action = self.handle_response(item_session)\n    if (action == Actions.NORMAL):\n        item_session.set_status(Status.error)\n    return action", "docstring": "Callback for when the document only describes an server error.\n\nReturns:\nA value from :class:`.hook.Actions`.", "source": "codesearchnet"}
{"code": "def sort_prefixes(orig, prefixes='@+'):\n    \n    new = ''\n    for prefix in prefixes:\n        if prefix in orig:\n            new += prefix\n    return new", "docstring": "Returns a sorted list of prefixes.\n\nArgs:\norig (str): Unsorted list of prefixes.\nprefixes (str): List of prefixes, from highest-priv to lowest.", "source": "juraj-google-style"}
{"code": "def send_tpu_embedding_gradients(inputs, config, learning_rates=None, name=None):\n    if learning_rates is None:\n        learning_rates = []\n    return gen_tpu_ops.send_tpu_embedding_gradients(inputs=inputs, learning_rates=learning_rates, config=config, name=name)", "docstring": "A placeholder op for feeding per-sample gradients to the embedding layer.\n\nArgs:\ninputs: A TensorList of gradients with which to update embedding tables.\nThis argument has the same length and shapes as the return value of\nRecvTPUEmbeddingActivations, but contains gradients of the model's loss\nwith respect to the embedding activations. The embedding tables are\nupdated from these gradients via the optimizers specified in the TPU\nembedding configuration given to tpu.initialize_system.\nconfig: Serialized TPUEmbeddingConfiguration proto.\nlearning_rates: A TensorList of float32 scalars, one for each dynamic\nlearning rate tag: see the comments in\n//third_party/tensorflow/core/protobuf/tpu/\noptimization_parameters.proto. Multiple tables can share the same\ndynamic learning rate tag as specified in the configuration. If the\nlearning rates for all tables are constant, this list should be empty.\nname: A name for the operation (optional).\n\nReturns:\nA SendTPUEmbeddingGradients operation.", "source": "github-repos"}
{"code": "def shift(self, time: int) -> 'Timeslot':\n    return Timeslot(self.interval.shift(time), self.channel)", "docstring": "Return a new Timeslot shifted by `time`.\n\nArgs:\ntime: time to be shifted", "source": "codesearchnet"}
{"code": "def protein_only_and_noH(self, keep_ligands=None, force_rerun=False):\n    log.debug('{}: running protein receptor isolation...'.format(self.id))\n    if (not self.dockprep_path):\n        return ValueError('Please run dockprep')\n    receptor_mol2 = op.join(self.dock_dir, '{}_receptor.mol2'.format(self.id))\n    receptor_noh = op.join(self.dock_dir, '{}_receptor_noH.pdb'.format(self.id))\n    prly_com = op.join(self.dock_dir, 'prly.com')\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=receptor_noh):\n        with open(prly_com, 'w') as f:\n            f.write('open {}\\n'.format(self.dockprep_path))\n            keep_str = 'delete ~protein'\n            if keep_ligands:\n                keep_ligands = ssbio.utils.force_list(keep_ligands)\n                for res in keep_ligands:\n                    keep_str += ' & ~:{} '.format(res)\n            keep_str = (keep_str.strip() + '\\n')\n            f.write(keep_str)\n            f.write('write format mol2 0 {}\\n'.format(receptor_mol2))\n            f.write('delete element.H\\n')\n            f.write('write format pdb 0 {}\\n'.format(receptor_noh))\n        cmd = 'chimera --nogui {}'.format(prly_com)\n        os.system(cmd)\n        os.remove(prly_com)\n    if (ssbio.utils.is_non_zero_file(receptor_mol2) and ssbio.utils.is_non_zero_file(receptor_noh)):\n        self.receptormol2_path = receptor_mol2\n        self.receptorpdb_path = receptor_noh\n        log.debug('{}: successful receptor isolation (mol2)'.format(self.receptormol2_path))\n        log.debug('{}: successful receptor isolation (pdb)'.format(self.receptorpdb_path))\n    else:\n        log.critical('{}: protein_only_and_noH failed to run on dockprep file'.format(self.dockprep_path))", "docstring": "Isolate the receptor by stripping everything except protein and specified ligands.\n\nArgs:\nkeep_ligands (str, list): Ligand(s) to keep in PDB file\nforce_rerun (bool): If method should be rerun even if output file exists", "source": "codesearchnet"}
{"code": "def limit(self, accountID, **kwargs):\n        \n        return self.create(\n            accountID,\n            order=LimitOrderRequest(**kwargs)\n        )", "docstring": "Shortcut to create a Limit Order in an Account\n\nArgs:\naccountID : The ID of the Account\nkwargs : The arguments to create a LimitOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "juraj-google-style"}
{"code": "def unexpected_disconnect(self, conn_or_internal_id):\n        \n\n        data = {\n            'id': conn_or_internal_id\n        }\n\n        action = ConnectionAction('force_disconnect', data, sync=False)\n        self._actions.put(action)", "docstring": "Notify that there was an unexpected disconnection of the device.\n\nAny in progress operations are canceled cleanly and the device is transitioned\nto a disconnected state.\n\nArgs:\nconn_or_internal_id (string, int): Either an integer connection id or a string\ninternal_id", "source": "juraj-google-style"}
{"code": "def copy_update(pb_message, **kwds):\n    result = pb_message.__class__()\n    result.CopyFrom(pb_message)\n    for (k, v) in kwds.items():\n        setattr(result, k, v)\n    return result", "docstring": "Returns a copy of the PB object, with some fields updated.\n\nArgs:\npb_message:\n**kwds:\n\nReturns:", "source": "codesearchnet"}
{"code": "def _extract_nn_info(self, structure, nns):\n    if (self.targets is None):\n        targets = structure.composition.elements\n    else:\n        targets = self.targets\n    siw = []\n    max_weight = max((nn[self.weight] for nn in nns.values()))\n    for nstats in nns.values():\n        site = nstats['site']\n        if ((nstats[self.weight] > (self.tol * max_weight)) and self._is_in_targets(site, targets)):\n            nn_info = {'site': site, 'image': self._get_image(structure, site), 'weight': (nstats[self.weight] / max_weight), 'site_index': self._get_original_site(structure, site)}\n            if self.extra_nn_info:\n                poly_info = nstats\n                del poly_info['site']\n                nn_info['poly_info'] = poly_info\n            siw.append(nn_info)\n    return siw", "docstring": "Given Voronoi NNs, extract the NN info in the form needed by NearestNeighbors\n\nArgs:\nstructure (Structure): Structure being evaluated\nnns ([dicts]): Nearest neighbor information for a structure\nReturns:\n(list of tuples (Site, array, float)): See nn_info", "source": "codesearchnet"}
{"code": "def orthologize(ast, bo, species_id: str):\n    if (not species_id):\n        bo.validation_messages.append(('WARNING', 'No species id was provided for orthologization'))\n        return ast\n    if isinstance(ast, NSArg):\n        if ast.orthologs:\n            if ast.orthologs.get(species_id, None):\n                orthologized_nsarg_val = ast.orthologs[species_id]['decanonical']\n                (ns, value) = orthologized_nsarg_val.split(':')\n                ast.change_nsvalue(ns, value)\n                ast.canonical = ast.orthologs[species_id]['canonical']\n                ast.decanonical = ast.orthologs[species_id]['decanonical']\n                ast.orthologized = True\n                bo.ast.species.add((species_id, ast.orthologs[species_id]['species_label']))\n            else:\n                bo.ast.species.add((ast.species_id, ast.species_label))\n                bo.validation_messages.append(('WARNING', f'No ortholog found for {ast.namespace}:{ast.value}'))\n        elif ast.species_id:\n            bo.ast.species.add((ast.species_id, ast.species_label))\n    if hasattr(ast, 'args'):\n        for arg in ast.args:\n            orthologize(arg, bo, species_id)\n    return ast", "docstring": "Recursively orthologize BEL Entities in BEL AST using API endpoint\n\nNOTE: - will take first ortholog returned in BEL.bio API result (which may return more than one ortholog)\n\nArgs:\nast (BEL): BEL AST\nendpoint (str): endpoint url with a placeholder for the term_id\n\nReturns:\nBEL: BEL AST", "source": "codesearchnet"}
{"code": "def CreateSmartCampaign(client, budget_id, merchant_id):\n    campaign_service = client.GetService('CampaignService', version='v201809')\n    campaign = {'name': ('Shopping campaign \n    campaign_operations = [{'operator': 'ADD', 'operand': campaign}]\n    result = campaign_service.mutate(campaign_operations)['value'][0]\n    print(('Smart Shopping campaign with name \"%s\" and ID \"%s\" was added.' % (result['name'], result['id'])))\n    return result['id']", "docstring": "Adds a new Smart Shopping campaign.\n\nArgs:\nclient: an AdWordsClient instance.\nbudget_id: the str ID of the budget to be associated with the Shopping\ncampaign.\nmerchant_id: the str ID of the merchant account to be associated with the\nShopping campaign.\nReturns:\nA campaign ID.", "source": "codesearchnet"}
{"code": "def update_course(self, course, enterprise_customer, enterprise_context):\n    course['course_runs'] = self.update_course_runs(course_runs=(course.get('course_runs') or []), enterprise_customer=enterprise_customer, enterprise_context=enterprise_context)\n    marketing_url = course.get('marketing_url')\n    if marketing_url:\n        query_parameters = dict(enterprise_context, **utils.get_enterprise_utm_context(enterprise_customer))\n        course.update({'marketing_url': utils.update_query_parameters(marketing_url, query_parameters)})\n    course.update(enterprise_context)\n    return course", "docstring": "Update course metadata of the given course and return updated course.\n\nArguments:\ncourse (dict): Course Metadata returned by course catalog API\nenterprise_customer (EnterpriseCustomer): enterprise customer instance.\nenterprise_context (dict): Enterprise context to be added to course runs and URLs..\n\nReturns:\n(dict): Updated course metadata", "source": "codesearchnet"}
{"code": "def beam_row_from_dict(row: dict, schema):\n    if not isinstance(schema, (bigquery.TableSchema, bigquery.TableFieldSchema)):\n        schema = get_bq_tableschema(schema)\n    beam_row = {}\n    for field in schema.fields:\n        name = field.name\n        mode = field.mode.upper()\n        type = field.type.upper()\n        if name not in row and mode != 'REQUIRED':\n            row[name] = None\n        value = row[name]\n        if type in ['RECORD', 'STRUCT'] and value:\n            if mode == 'REPEATED':\n                list_of_beam_rows = []\n                for record in value:\n                    list_of_beam_rows.append(beam_row_from_dict(record, field))\n                beam_row[name] = list_of_beam_rows\n            else:\n                beam_row[name] = beam_row_from_dict(value, field)\n        else:\n            beam_row[name] = value\n    return apache_beam.pvalue.Row(**beam_row)", "docstring": "Converts a dictionary row to a Beam Row.\nNested records and lists are supported.\n\nArgs:\nrow (dict):\nThe row to convert.\nschema (str, dict, ~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema):\nThe table schema. Will be used to help convert the row.\n\nReturns:\n~apache_beam.pvalue.Row: The converted row.", "source": "github-repos"}
{"code": "def match(self, message: Message) -> bool:\n    if self.template:\n        return self.template.match(message)\n    return True", "docstring": "Matches a message with the behaviour's template\n\nArgs:\nmessage(spade.message.Message): the message to match with\n\nReturns:\nbool: wheter the messaged matches or not", "source": "codesearchnet"}
{"code": "def write_data(worksheet, data):\n    \n    if not data:\n        return\n\n    if isinstance(data, list):\n        rows = data\n    else:\n        rows = [data]\n\n    if isinstance(rows[0], dict):\n        keys = get_keys(rows)\n        worksheet.append([utilities.convert_snake_to_title_case(key) for key in keys])\n        for row in rows:\n            values = [get_value_from_row(row, key) for key in keys]\n            worksheet.append(values)\n    elif isinstance(rows[0], list):\n        for row in rows:\n            values = [utilities.normalize_cell_value(value) for value in row]\n            worksheet.append(values)\n    else:\n        for row in rows:\n            worksheet.append([utilities.normalize_cell_value(row)])", "docstring": "Writes data into worksheet.\n\nArgs:\nworksheet: worksheet to write into\ndata: data to be written", "source": "juraj-google-style"}
{"code": "def crop_and_resize(image, boxes, box_ind, crop_size, pad_border=True):\n    \n    assert isinstance(crop_size, int), crop_size\n    boxes = tf.stop_gradient(boxes)\n\n    \n    if pad_border:\n        \n        image = tf.pad(image, [[0, 0], [0, 0], [1, 1], [1, 1]], mode='SYMMETRIC')\n        boxes = boxes + 1\n\n    @under_name_scope()\n    def transform_fpcoor_for_tf(boxes, image_shape, crop_shape):\n        \n        x0, y0, x1, y1 = tf.split(boxes, 4, axis=1)\n\n        spacing_w = (x1 - x0) / tf.cast(crop_shape[1], tf.float32)\n        spacing_h = (y1 - y0) / tf.cast(crop_shape[0], tf.float32)\n\n        imshape = [tf.cast(image_shape[0] - 1, tf.float32), tf.cast(image_shape[1] - 1, tf.float32)]\n        nx0 = (x0 + spacing_w / 2 - 0.5) / imshape[1]\n        ny0 = (y0 + spacing_h / 2 - 0.5) / imshape[0]\n\n        nw = spacing_w * tf.cast(crop_shape[1] - 1, tf.float32) / imshape[1]\n        nh = spacing_h * tf.cast(crop_shape[0] - 1, tf.float32) / imshape[0]\n\n        return tf.concat([ny0, nx0, ny0 + nh, nx0 + nw], axis=1)\n\n    \n    \n    \n    \n    \n    \n    \n    \n\n    image_shape = tf.shape(image)[2:]\n    boxes = transform_fpcoor_for_tf(boxes, image_shape, [crop_size, crop_size])\n    image = tf.transpose(image, [0, 2, 3, 1])   \n    ret = tf.image.crop_and_resize(\n        image, boxes, tf.cast(box_ind, tf.int32),\n        crop_size=[crop_size, crop_size])\n    ret = tf.transpose(ret, [0, 3, 1, 2])   \n    return ret", "docstring": "Aligned version of tf.image.crop_and_resize, following our definition of floating point boxes.\n\nArgs:\nimage: NCHW\nboxes: nx4, x1y1x2y2\nbox_ind: (n,)\ncrop_size (int):\nReturns:\nn,C,size,size", "source": "juraj-google-style"}
{"code": "def cn_occupation_energy( self, delta_occupation=None ):\n        \n        nn_occupations = self.site_specific_nn_occupation()\n        if delta_occupation:\n            for site in delta_occupation:\n                assert( site in nn_occupations )\n                nn_occupations[ site ] += delta_occupation[ site ]\n        return sum( [ self.cn_occupation_energies[ s ][ n ] for s, n in nn_occupations.items() ] )", "docstring": "The coordination-number dependent energy for this site.\n\nArgs:\ndelta_occupation (:obj:Dict(Str:Int), optional): A dictionary of a change in (site-type specific) coordination number, e.g. { 'A' : 1, 'B' : -1 }.\nIf this is not None, the coordination-number dependent energy is calculated including these changes in neighbour-site occupations. Defaults to None\n\nReturns:\n(Float): The coordination-number dependent energy for this site.", "source": "juraj-google-style"}
{"code": "def set_optimizer_experimental_options(self, options):\n    self._optimizer_experimental_options.update(options)\n    self._thread_local_data.function_call_options = None", "docstring": "Set experimental options for the optimizer.\n\nArgs:\noptions: Dictionary of options to modify", "source": "github-repos"}
{"code": "def update(self, span: typing.Tuple[(int, int)], line_type: LineType) -> None:\n    (first_block_line, last_block_line) = span\n    for i in range(first_block_line, (last_block_line + 1)):\n        try:\n            self.__setitem__(i, line_type)\n        except ValueError as error:\n            raise ValidationError((i + self.fn_offset), 1, 'AAA99 {}'.format(error))", "docstring": "Updates line types for a block's span.\n\nArgs:\nspan: First and last relative line number of a Block.\nline_type: The type of line to update to.\n\nRaises:\nValidationError: A special error on collision. This prevents Flake8\nfrom crashing because it is converted to a Flake8 error tuple,\nbut it indicates to the user that something went wrong with\nprocessing the function.", "source": "codesearchnet"}
{"code": "def torus(script, major_radius=3.0, minor_radius=1.0, inner_diameter=None, outer_diameter=None, major_segments=48, minor_segments=12, color=None):\n    if ((inner_diameter is not None) and (outer_diameter is not None)):\n        major_radius = ((inner_diameter + outer_diameter) / 4)\n        minor_radius = (major_radius - (inner_diameter / 2))\n    filter_xml = ''.join(['  <filter name=\"Torus\">\\n', '    <Param name=\"hRadius\" ', ('value=\"%s\" ' % major_radius), 'description=\"Horizontal Radius\" ', 'type=\"RichFloat\" ', '/>\\n', '    <Param name=\"vRadius\" ', ('value=\"%s\" ' % minor_radius), 'description=\"Vertical Radius\" ', 'type=\"RichFloat\" ', '/>\\n', '    <Param name=\"hSubdiv\" ', ('value=\"%d\" ' % major_segments), 'description=\"Horizontal Subdivision\" ', 'type=\"RichInt\" ', '/>\\n', '    <Param name=\"vSubdiv\" ', ('value=\"%d\" ' % minor_segments), 'description=\"Vertical Subdivision\" ', 'type=\"RichInt\" ', '/>\\n', '  </filter>\\n'])\n    util.write_filter(script, filter_xml)\n    if isinstance(script, FilterScript):\n        script.add_layer('Torus', change_layer=True)\n    if (color is not None):\n        vert_color.function(script, color=color)\n    return None", "docstring": "Create a torus mesh\n\nArgs:\nmajor_radius (float, (optional)): radius from the origin to the\ncenter of the cross sections\nminor_radius (float, (optional)): radius of the torus cross\nsection\ninner_diameter (float, (optional)): inner diameter of torus. If\nboth inner_diameter and outer_diameter are provided then\nthese will override major_radius and minor_radius.,\nouter_diameter (float, (optional)): outer diameter of torus. If\nboth inner_diameter and outer_diameter are provided then\nthese will override major_radius and minor_radius.\nmajor_segments (int (optional)): number of segments for the main\nring of the torus\nminor_segments (int (optional)): number of segments for the minor\nring of the torus\ncolor (str (optional)): color name to apply vertex colors to the\nnewly created mesh\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def validate(tmpl, path):\n    pattern = (_generate_pattern_for_template(tmpl) + '$')\n    return (True if (re.match(pattern, path) is not None) else False)", "docstring": "Validate a path against the path template.\n\n.. code-block:: python\n\n>>> validate('users/*/messages/*', 'users/me/messages/123')\nTrue\n>>> validate('users/*/messages/*', 'users/me/drafts/123')\nFalse\n>>> validate('/v1/{name=shelves/*/books/*}', /v1/shelves/1/books/3)\nTrue\n>>> validate('/v1/{name=shelves/*/books/*}', /v1/shelves/1/tapes/3)\nFalse\n\nArgs:\ntmpl (str): The path template.\npath (str): The expanded path.\n\nReturns:\nbool: True if the path matches.", "source": "codesearchnet"}
{"code": "def format_image_annotations_as_coco(image_id: str, categories: list[int], areas: list[float], bboxes: list[tuple[float]]) -> dict:\n    annotations = []\n    for category, area, bbox in zip(categories, areas, bboxes):\n        formatted_annotation = {'image_id': image_id, 'category_id': category, 'iscrowd': 0, 'area': area, 'bbox': list(bbox)}\n        annotations.append(formatted_annotation)\n    return {'image_id': image_id, 'annotations': annotations}", "docstring": "Format one set of image annotations to the COCO format\n\nArgs:\nimage_id (str): image id. e.g. \"0001\"\ncategories (List[int]): list of categories/class labels corresponding to provided bounding boxes\nareas (List[float]): list of corresponding areas to provided bounding boxes\nbboxes (List[Tuple[float]]): list of bounding boxes provided in COCO format\n([center_x, center_y, width, height] in absolute coordinates)\n\nReturns:\ndict: {\n\"image_id\": image id,\n\"annotations\": list of formatted annotations\n}", "source": "github-repos"}
{"code": "def wait(animation='elipses', text='', speed=0.2):\n\n    def decorator(func):\n        func.animation = animation\n        func.speed = speed\n        func.text = text\n\n        @wraps(func)\n        def wrapper(*args, **kwargs):\n            animation = func.animation\n            text = func.text\n            if ((not isinstance(animation, (list, tuple))) and (not hasattr(animations, animation))):\n                text = (animation if (text == '') else text)\n                animation = 'elipses'\n            wait = Wait(animation=animation, text=text, speed=func.speed)\n            wait.start()\n            try:\n                ret = func(*args, **kwargs)\n            finally:\n                wait.stop()\n            sys.stdout.write('\\n')\n            return ret\n        return wrapper\n    return decorator", "docstring": "Decorator for adding wait animation to long running\nfunctions.\n\nArgs:\nanimation (str, tuple): String reference to animation or tuple\nwith custom animation.\nspeed (float): Number of seconds each cycle of animation.\n\nExamples:\n>>> @animation.wait('bar')\n>>> def long_running_function():\n>>>     ... 5 seconds later ...\n>>>     return", "source": "codesearchnet"}
{"code": "def clear(self, name=None):\n    if name is None:\n        name = '%s_clear' % self._name\n    return gen_data_flow_ops.stage_clear(name=name, shared_name=self._name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit)", "docstring": "Clears the staging area.\n\nArgs:\nname: A name for the operation (optional)\n\nReturns:\nThe created op", "source": "github-repos"}
{"code": "def asserts_truth(func):\n    if re.match('_[^_]', func.__name__):\n        raise AttributeError('@asserts_truth may not be applied to methods beginning with \"_\".')\n\n    def AssertThat(*args, **kwargs):\n        try:\n            return func(*args, **kwargs)\n        except TruthAssertionError as truth_assertion:\n            if hasattr(truth_assertion, 'with_traceback'):\n                truth_assertion.with_traceback(None)\n                raise truth_assertion\n            raise\n    return AssertThat", "docstring": "Decorator for every public method that might raise TruthAssertionError.\n\nArgs:\nfunc: the function to be decorated.\n\nReturns:\nThe decorated function. In Python 2, the function behaves identically.\nOtherwise, if that function raises a TruthAssertionError, then that error\nis re-raised with a modified, minimal traceback.\n\nRaises:\nAttributeError: if attempted to be applied to a method whose name begins\nwith a single '_'. This decorator's purpose is to reduce the traceback\ndepth of exceptions raised by nested calls in this library, so that the\nfailing assertion has only two frames: the original AssertThat() call,\nand the \"raise truth_assertion\" in the decorated function.\nAnnotating inner method calls is contrary to that goal.", "source": "github-repos"}
{"code": "def _import_module_by_name(self, module_name) -> _AST | None:\n    existing = self._modules.get_existing_ast(module_name)\n    if existing:\n        return existing\n    assert path_utils.sep not in module_name, (path_utils.sep, module_name)\n    log.debug('Trying to import %r', module_name)\n    mod = self._load_builtin('builtins', module_name)\n    if mod:\n        return mod\n    mod_ast = None\n    default = None\n    mod_info = self._module_loader.find_import(module_name)\n    if mod_info:\n        if mod_info.file_exists:\n            mod_ast = self.load_module(mod_info)\n            assert mod_ast is not None, mod_info.filename\n        else:\n            mod_ast = self._create_empty(mod_info)\n        if mod_info.is_default_pyi():\n            default = self._modules.get(module_name)\n            del self._modules[module_name]\n        elif module_name in _ALWAYS_PREFER_TYPESHED:\n            del self._modules[module_name]\n        else:\n            return mod_ast\n    mod = self._load_builtin('stdlib', module_name)\n    if mod:\n        return mod\n    mod = self._load_builtin('third_party', module_name)\n    if mod:\n        return mod\n    if mod_ast:\n        assert default\n        self._modules[module_name] = default\n        return mod_ast\n    return None", "docstring": "Load a name like 'sys' or 'foo.bar.baz'.\n\nArgs:\nmodule_name: The name of the module. May contain dots.\n\nReturns:\nThe parsed file, instance of pytd.TypeDeclUnit, or None if we\nthe module wasn't found.", "source": "github-repos"}
{"code": "def CreateClass(cls, data_type_definition):\n    \n    cls._ValidateDataTypeDefinition(data_type_definition)\n\n    class_definition = cls._CreateClassTemplate(data_type_definition)\n\n    namespace = {\n        '__builtins__' : {\n            'object': builtins.object,\n            'super': builtins.super},\n        '__name__': '{0:s}'.format(data_type_definition.name)}\n\n    if sys.version_info[0] >= 3:\n      \n      namespace['__builtins__']['__build_class__'] = builtins.__build_class__\n\n    exec(class_definition, namespace)  \n\n    return namespace[data_type_definition.name]", "docstring": "Creates a new structure values class.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\n\nReturns:\nclass: structure values class.", "source": "juraj-google-style"}
{"code": "def parse(source):\n    \n    if isinstance(source, str):\n        return parse_stream(six.StringIO(source))\n    else:\n        return parse_stream(source)", "docstring": "Parses source code returns an array of instructions suitable for\noptimization and execution by a Machine.\n\nArgs:\nsource: A string or stream containing source code.", "source": "juraj-google-style"}
{"code": "def load_genes(path):\n    \n    \n    with open(path, 'rt') as f:\n        lines = [ x.split('\\t')[:2] for x in f if not x.startswith('hgnc') ]\n    \n    transcripts = {}\n    for symbol, tx in lines:\n        if symbol not in transcripts:\n            transcripts[symbol] = []\n        transcripts[symbol].append(tx)\n    \n    return transcripts", "docstring": "load a file listing gene and transcript IDs\n\nArgs:\npath: path to file containing gene IDs and transcript IDs e.g.\ngene_1    transcript_1.1    length_1    denovo_count\ngene_2    transcript_2.1    length_3    denovo_count\n\nReturns:\ndict of transcripts eg {'CTC1': [\"ENST00000315684\", \"ENST00000485511\"]}", "source": "juraj-google-style"}
{"code": "def delete(self, customer_id, token_id, data={}, **kwargs):\n        \n        url = \"{}/{}/tokens/{}\".format(self.base_url, customer_id, token_id)\n        return self.delete_url(url, data, **kwargs)", "docstring": "Delete Given Token For a Customer\n\nArgs:\ncustomer_id : Customer Id for which tokens have to be deleted\ntoken_id    : Id for which TOken object has to be deleted\nReturns:\nDict for deleted token", "source": "juraj-google-style"}
{"code": "def delete(self, resource_id):\n    endpoint = '{}/{}'.format(self.endpoint, resource_id)\n    response = self.api.execute('DELETE', endpoint)\n    if (not response.ok):\n        raise Error.parse(response.json())\n    return self._cls.parse(response.json())", "docstring": "Deletes an existing resource\n\nArgs:\nresource_id - int - The resource ID to be deleted", "source": "codesearchnet"}
{"code": "def dict_of_sets_add(dictionary, key, value):\n    set_objs = dictionary.get(key, set())\n    set_objs.add(value)\n    dictionary[key] = set_objs", "docstring": "Add value to a set in a dictionary by key\n\nArgs:\ndictionary (DictUpperBound): Dictionary to which to add values\nkey (Any): Key within dictionary\nvalue (Any): Value to add to set in dictionary\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def validate_all_keys(obj_name, obj, validation_fun):\n    \n    for key, value in obj.items():\n        validation_fun(obj_name, key)\n        if isinstance(value, dict):\n            validate_all_keys(obj_name, value, validation_fun)", "docstring": "Validate all (nested) keys in `obj` by using `validation_fun`.\n\nArgs:\nobj_name (str): name for `obj` being validated.\nobj (dict): dictionary object.\nvalidation_fun (function): function used to validate the value\nof `key`.\n\nReturns:\nNone: indicates validation successful\n\nRaises:\nValidationError: `validation_fun` will raise this error on failure", "source": "juraj-google-style"}
{"code": "def path_status(path, filename='', status=None, verbosity=0):\n    \n    status = status or {}\n    if not filename:\n        dir_path, filename = os.path.split()  \n    else:\n        dir_path = path\n    full_path = os.path.join(dir_path, filename)\n    if verbosity > 1:\n        print(full_path)\n    status['name'] = filename\n    status['path'] = full_path\n    status['dir'] = dir_path\n    status['type'] = []\n    try:\n        status['size'] = os.path.getsize(full_path)\n        status['accessed'] = datetime.datetime.fromtimestamp(os.path.getatime(full_path))\n        status['modified'] = datetime.datetime.fromtimestamp(os.path.getmtime(full_path))\n        status['created'] = datetime.datetime.fromtimestamp(os.path.getctime(full_path))\n        status['mode'] = os.stat(full_path).st_mode   \n        if os.path.ismount(full_path):\n            status['type'] += ['mount-point']\n        elif os.path.islink(full_path):\n            status['type'] += ['symlink']\n        if os.path.isfile(full_path):\n            status['type'] += ['file']\n        elif os.path.isdir(full_path):\n            status['type'] += ['dir']\n        if not status['type']:\n            if os.stat.S_ISSOCK(status['mode']):\n                status['type'] += ['socket']\n            elif os.stat.S_ISCHR(status['mode']):\n                status['type'] += ['special']\n            elif os.stat.S_ISBLK(status['mode']):\n                status['type'] += ['block-device']\n            elif os.stat.S_ISFIFO(status['mode']):\n                status['type'] += ['pipe']\n        if not status['type']:\n            status['type'] += ['unknown']\n        elif status['type'] and status['type'][-1] == 'symlink':\n            status['type'] += ['broken']\n    except OSError:\n        status['type'] = ['nonexistent'] + status['type']\n        if verbosity > -1:\n            warnings.warn(\"Unable to stat path '{}'\".format(full_path))\n    status['type'] = '->'.join(status['type'])\n\n    return status", "docstring": "Retrieve the access, modify, and create timetags for a path along with its size\n\nArguments:\npath (str): full path to the file or directory to be statused\nstatus (dict): optional existing status to be updated/overwritten with new status values\n\nReturns:\ndict: {'size': bytes (int), 'accessed': (datetime), 'modified': (datetime), 'created': (datetime)}", "source": "juraj-google-style"}
{"code": "def _PrintTasksInformation(self, storage_reader):\n    table_view = views.ViewsFactory.GetTableView(self._views_format_type, title='Tasks')\n    for (task_start, _) in storage_reader.GetSessions():\n        start_time = timelib.Timestamp.CopyToIsoFormat(task_start.timestamp)\n        task_identifier = uuid.UUID(hex=task_start.identifier)\n        task_identifier = '{0!s}'.format(task_identifier)\n        table_view.AddRow([task_identifier, start_time])\n    table_view.Write(self._output_writer)", "docstring": "Prints information about the tasks.\n\nArgs:\nstorage_reader (StorageReader): storage reader.", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.FloatTensor, attention_masks: Optional[torch.BoolTensor]=None, position_embeddings: Optional[torch.FloatTensor]=None) -> Tuple[torch.FloatTensor, torch.FloatTensor]:\n    if attention_masks.dim() == 3 and attention_masks.shape[0] == hidden_states.shape[0]:\n        attention_masks = attention_masks[:, None, :, :]\n        attention_masks = attention_masks.repeat(1, self.num_heads, 1, 1)\n        dtype = hidden_states.dtype\n        attention_masks = attention_masks.to(dtype=dtype)\n        attention_masks = (1.0 - attention_masks) * torch.finfo(dtype).min\n    queries = keys = self.with_pos_embed(hidden_states, position_embeddings)\n    attention_output, attention_weights = self.self_attn(queries=queries, keys=keys, values=hidden_states, attention_mask=attention_masks, output_attentions=True)\n    attention_output = nn.functional.dropout(attention_output, p=self.dropout, training=self.training)\n    hidden_states = hidden_states + attention_output\n    hidden_states = self.layer_norm_before(hidden_states)\n    residual = hidden_states\n    hidden_states = self.activation(self.fc1(hidden_states))\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = hidden_states + residual\n    hidden_states = self.layer_norm_after(hidden_states)\n    return (hidden_states, attention_weights)", "docstring": "Text self-attention to enhance projection of text features generated by\nthe text encoder (AutoModel based on text_config) within GroundingDinoEncoderLayer\n\nArgs:\nhidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_dim)`):\nText features generated by the text encoder.\nattention_masks (`torch.BoolTensor`, *optional*):\nAttention mask for text self-attention. False for real tokens and True for padding tokens.\nposition_embeddings (`torch.FloatTensor`, *optional*):\nPosition embeddings to be added to the hidden states.\n\nReturns:\n`tuple(torch.FloatTensor)` comprising two elements:\n- **hidden_states** (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`) --\nOutput of the text self-attention layer.\n- **attention_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, sequence_length,\nsequence_length)`) --\nAttention weights of the text self-attention layer.", "source": "github-repos"}
{"code": "def _getH2singleTrait(self, K, verbose=None):\n        \n        verbose = dlimix.getVerbose(verbose)\n        \n        varg  = sp.zeros(self.P)\n        varn  = sp.zeros(self.P)\n        fixed = sp.zeros((1,self.P))\n\n        for p in range(self.P):\n            y = self.Y[:,p:p+1]\n            \n            I  = sp.isnan(y[:,0])\n            if I.sum()>0:\n                y  = y[~I,:]\n                _K = K[~I,:][:,~I]\n            else:\n                _K  = copy.copy(K)\n            lmm = dlimix.CLMM()\n            lmm.setK(_K)\n            lmm.setSNPs(sp.ones((y.shape[0],1)))\n            lmm.setPheno(y)\n            lmm.setCovs(sp.zeros((y.shape[0],1)))\n            lmm.setVarcompApprox0(-20, 20, 1000)\n            lmm.process()\n            delta = sp.exp(lmm.getLdelta0()[0,0])\n            Vtot  = sp.exp(lmm.getLSigma()[0,0])\n\n            varg[p] = Vtot\n            varn[p] = delta*Vtot\n            fixed[:,p] = lmm.getBetaSNP()\n\n            if verbose: print(p)\n\n        sth = {}\n        sth['varg']  = varg\n        sth['varn']  = varn\n        sth['fixed'] = fixed\n\n        return sth", "docstring": "Internal function for parameter initialization\nestimate variance components and fixed effect using a linear mixed model with an intercept and 2 random effects (one is noise)\nArgs:\nK:        covariance matrix of the non-noise random effect term", "source": "juraj-google-style"}
{"code": "def insert_system(cur, system_name, encoded_data=None):\n    if (encoded_data is None):\n        encoded_data = {}\n    if ('system_name' not in encoded_data):\n        encoded_data['system_name'] = system_name\n    insert = 'INSERT OR IGNORE INTO system(system_name) VALUES (:system_name);'\n    cur.execute(insert, encoded_data)", "docstring": "Insert a system name into the cache.\n\nArgs:\ncur (:class:`sqlite3.Cursor`):\nAn sqlite3 cursor. This function is meant to be run within a :obj:`with` statement.\n\nsystem_name (str):\nThe unique name of a system\n\nencoded_data (dict, optional):\nIf a dictionary is provided, it will be populated with the serialized data. This is\nuseful for preventing encoding the same information many times.", "source": "codesearchnet"}
{"code": "def energy_prof(step):\n    \n    diff, rad = diffs_prof(step)\n    adv, _ = advts_prof(step)\n    return (diff + np.append(adv, 0)), rad", "docstring": "Energy flux.\n\nThis computation takes sphericity into account if necessary.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nReturns:\ntuple of :class:`numpy.array`: the energy flux and the radial position\nat which it is evaluated.", "source": "juraj-google-style"}
{"code": "def trace_read(self, offset, num_items):\n    buf_size = ctypes.c_uint32(num_items)\n    buf = (structs.JLinkTraceData * num_items)()\n    res = self._dll.JLINKARM_TRACE_Read(buf, int(offset), ctypes.byref(buf_size))\n    if (res == 1):\n        raise errors.JLinkException('Failed to read from trace buffer.')\n    return list(buf)[:int(buf_size.value)]", "docstring": "Reads data from the trace buffer and returns it.\n\nArgs:\nself (JLink): the ``JLink`` instance.\noffset (int): the offset from which to start reading from the trace\nbuffer.\nnum_items (int): number of items to read from the trace buffer.\n\nReturns:\nA list of ``JLinkTraceData`` instances corresponding to the items\nread from the trace buffer.  Note that this list may have size less\nthan ``num_items`` in the event that there are not ``num_items``\nitems in the trace buffer.\n\nRaises:\nJLinkException: on error.", "source": "codesearchnet"}
{"code": "def set_redirect(self, url, status=HttpStatusCodes.HTTP_303):\n    self.set_status(status)\n    self.set_content('')\n    self.set_header(HttpResponseHeaders.LOCATION, url)", "docstring": "Helper method to set a redirect response.\n\nArgs:\nurl (:obj:`str`): URL to redirect to\nstatus (:obj:`str`, optional): Status code of the response", "source": "codesearchnet"}
{"code": "def _check_keyword_parentheses(self, tokens, start):\n    if (self._inside_brackets(':') and (tokens[start][1] == 'for')):\n        self._pop_token()\n    if (tokens[(start + 1)][1] != '('):\n        return\n    found_and_or = False\n    depth = 0\n    keyword_token = str(tokens[start][1])\n    line_num = tokens[start][2][0]\n    for i in range(start, (len(tokens) - 1)):\n        token = tokens[i]\n        if (token[0] == tokenize.NL):\n            return\n        if (token[1] == '('):\n            depth += 1\n        elif (token[1] == ')'):\n            depth -= 1\n            if depth:\n                continue\n            if ((tokens[(i + 1)][1] in (':', ')', ']', '}', 'in')) or (tokens[(i + 1)][0] in (tokenize.NEWLINE, tokenize.ENDMARKER, tokenize.COMMENT))):\n                if (i == (start + 2)):\n                    return\n                if (keyword_token == 'not'):\n                    if (not found_and_or):\n                        self.add_message('superfluous-parens', line=line_num, args=keyword_token)\n                elif (keyword_token in ('return', 'yield')):\n                    self.add_message('superfluous-parens', line=line_num, args=keyword_token)\n                elif (keyword_token not in self._keywords_with_parens):\n                    if (not found_and_or):\n                        self.add_message('superfluous-parens', line=line_num, args=keyword_token)\n            return\n        elif (depth == 1):\n            if (token[1] == ','):\n                return\n            if (token[1] in ('and', 'or')):\n                found_and_or = True\n            elif (token[1] == 'yield'):\n                return\n            elif (token[1] == 'for'):\n                return", "docstring": "Check that there are not unnecessary parens after a keyword.\n\nParens are unnecessary if there is exactly one balanced outer pair on a\nline, and it is followed by a colon, and contains no commas (i.e. is not a\ntuple).\n\nArgs:\ntokens: list of Tokens; the entire list of Tokens.\nstart: int; the position of the keyword in the token list.", "source": "codesearchnet"}
{"code": "def user_list_membership(self, username, member_type='USER', recursive=True, max_return_count=999):\n    return self.client.service.getUserListMembership(username, member_type, recursive, max_return_count, self.proxy_id)", "docstring": "Get info for lists a user is a member of.\n\nThis is similar to :meth:`user_lists` but with a few differences:\n\n#. It returns list info objects instead of list names.\n#. It has an option to fully resolve a user's list hierarchy. That\nis, if a user is a member of a nested list, this method can\nretrieve both the nested list and the parent lists that contain\nthe nested list.\n\nArgs:\nusername (str): The MIT username of the user\nmember_type(str): The type of user, \"USER\" or \"STRING\"\nrecursive(bool): Whether to fully resolve the list hierarchy\nmax_return_count(int): limit the number of items returned\n\nReturns:\nlist of dicts: info dicts, one per list.", "source": "codesearchnet"}
{"code": "def handle_run_exception(self, pipeline_key, pipeline_func, e):\n    if isinstance(e, Retry):\n        retry_message = str(e)\n        logging.warning('User forced retry for pipeline ID \"%s\" of %r: %s', pipeline_key.name(), pipeline_func, retry_message)\n        self.transition_retry(pipeline_key, retry_message)\n    elif isinstance(e, Abort):\n        abort_message = str(e)\n        logging.warning('User forced abort for pipeline ID \"%s\" of %r: %s', pipeline_key.name(), pipeline_func, abort_message)\n        pipeline_func.abort(abort_message)\n    else:\n        retry_message = ('%s: %s' % (e.__class__.__name__, str(e)))\n        logging.exception('Generator %r\n        self.transition_retry(pipeline_key, retry_message)\n    return pipeline_func.task_retry", "docstring": "Handles an exception raised by a Pipeline's user code.\n\nArgs:\npipeline_key: The pipeline that raised the error.\npipeline_func: The class path name of the Pipeline that was running.\ne: The exception that was raised.\n\nReturns:\nTrue if the exception should be re-raised up through the calling stack\nby the caller of this method.", "source": "codesearchnet"}
{"code": "def add_group_coordinator(self, group, response):\n        \n        log.debug(\"Updating coordinator for %s: %s\", group, response)\n        error_type = Errors.for_code(response.error_code)\n        if error_type is not Errors.NoError:\n            log.error(\"GroupCoordinatorResponse error: %s\", error_type)\n            self._groups[group] = -1\n            return False\n\n        node_id = response.coordinator_id\n        coordinator = BrokerMetadata(\n            response.coordinator_id,\n            response.host,\n            response.port,\n            None)\n\n        \n        \n        if node_id not in self._brokers:\n            self._brokers[node_id] = coordinator\n\n        \n        \n        else:\n            node = self._brokers[node_id]\n            if coordinator.host != node.host or coordinator.port != node.port:\n                log.error(\"GroupCoordinator metadata conflicts with existing\"\n                          \" broker metadata. Coordinator: %s, Broker: %s\",\n                          coordinator, node)\n                self._groups[group] = node_id\n                return False\n\n        log.info(\"Group coordinator for %s is %s\", group, coordinator)\n        self._groups[group] = node_id\n        return True", "docstring": "Update with metadata for a group coordinator\n\nArguments:\ngroup (str): name of group from GroupCoordinatorRequest\nresponse (GroupCoordinatorResponse): broker response\n\nReturns:\nbool: True if metadata is updated, False on error", "source": "juraj-google-style"}
{"code": "def percent_point(self, U):\n    self.check_fit()\n    return norm.ppf(U, loc=self.mean, scale=self.std)", "docstring": "Given a cumulated distribution value, returns a value in original space.\n\nArguments:\nU: `np.ndarray` of shape (n, 1) and values in [0,1]\n\nReturns:\n`np.ndarray`: Estimated values in original space.", "source": "codesearchnet"}
{"code": "def update_refresh_state(self, id_or_uri, refresh_state_data):\n        \n        uri = self._client.build_uri(id_or_uri) + \"/refreshState\"\n        return self._client.update(refresh_state_data, uri=uri)", "docstring": "Refreshes a given intelligent power delivery device.\n\nArgs:\nid_or_uri:\nCan be either the power device id or the uri\nrefresh_state_data:\nPower device refresh request\n\nReturns:\nstr: The power state", "source": "juraj-google-style"}
{"code": "def get_effect_class(self, effect_name: str, package_name: str=None) -> Type['Effect']:\n    return self._project.get_effect_class(effect_name, package_name=package_name)", "docstring": "Get an effect class by the class name\n\nArgs:\neffect_name (str): Name of the effect class\n\nKeyword Args:\npackage_name (str): The package the effect belongs to. This is optional and only\nneeded when effect class names are not unique.\n\nReturns:\n:py:class:`Effect` class", "source": "codesearchnet"}
{"code": "def _split_result_for_readers(axis, num_splits, df):\n    splits = split_result_of_axis_func_pandas(axis, num_splits, df)\n    if (not isinstance(splits, list)):\n        splits = [splits]\n    return splits", "docstring": "Splits the DataFrame read into smaller DataFrames and handles all edge cases.\n\nArgs:\naxis: Which axis to split over.\nnum_splits: The number of splits to create.\ndf: The DataFrame after it has been read.\n\nReturns:\nA list of pandas DataFrames.", "source": "codesearchnet"}
{"code": "def _build_predicate_for_coding_in_value_set(self, expanded_value_set: value_set_pb2.ValueSet, coding_column: Optional[_sql_data_types.Identifier]=None) -> _sql_data_types.StandardSqlExpression:\n    codes_per_system = {}\n    for concept in expanded_value_set.expansion.contains:\n        codes_per_system.setdefault(concept.system.value, []).append(concept.code.value)\n    codes_per_system = list(codes_per_system.items())\n    codes_per_system.sort(key=operator.itemgetter(0))\n    for _, codes in codes_per_system:\n        codes.sort()\n    if coding_column is None:\n        code_col = _sql_data_types.Identifier('code', _sql_data_types.String)\n        system_col = _sql_data_types.Identifier('system', _sql_data_types.String)\n    else:\n        code_col = coding_column.dot('code', _sql_data_types.String)\n        system_col = coding_column.dot('system', _sql_data_types.String)\n    code_system_predicates = []\n    for system, codes in codes_per_system:\n        system = _sql_data_types.RawExpression('\"%s\"' % system, _sql_data_types.String)\n        codes = [_sql_data_types.RawExpression('\"%s\"' % code, _sql_data_types.String) for code in codes]\n        code_system_predicates.append(system_col.eq_(system).and_(code_col.in_(codes)))\n    return functools.reduce(lambda acc, pred: acc.or_(pred), code_system_predicates)", "docstring": "Builds a predicate asserting the coding column is bound to the value_set.\n\nEnsures that the codings contained in `coding_column` are codings found in\n`expanded_value_set`.\nProduces SQL like:\n(`coding_column`.system = system1 AND `coding_column`.code IN (\ncode1, code2)) OR\n(`coding_column`.system = system2 AND `coding_column`.code IN (\ncode3, code4))\n\nArgs:\nexpanded_value_set: The expanded value set containing the coding values to\nassert membership against.\ncoding_column: The column containing the coding values. If given, columns\n`coding_column`.system and `coding_column`.code will be referenced in\nthe predicate. If not given, columns 'system' and 'code' will be\nreferenced.\n\nReturns:\nThe SQL for the value set binding predicate.", "source": "github-repos"}
{"code": "def load_hgnc_bulk(self, gene_objs):\n    LOG.info('Loading gene bulk with length %s', len(gene_objs))\n    try:\n        result = self.hgnc_collection.insert_many(gene_objs)\n    except (DuplicateKeyError, BulkWriteError) as err:\n        raise IntegrityError(err)\n    return result", "docstring": "Load a bulk of hgnc gene objects\n\nRaises IntegrityError if there are any write concerns\n\nArgs:\ngene_objs(iterable(scout.models.hgnc_gene))\n\nReturns:\nresult (pymongo.results.InsertManyResult)", "source": "codesearchnet"}
{"code": "def set_sig_figs(n=4):\n    \n    u.default_format = '.' + str(n) + 'g'\n    pd.options.display.float_format = ('{:,.' + str(n) + '}').format", "docstring": "Set the number of significant figures used to print Pint, Pandas, and\nNumPy quantities.\n\nArgs:\nn (int): Number of significant figures to display.", "source": "juraj-google-style"}
{"code": "def firmware_version(self):\n    buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n    self._dll.JLINKARM_GetFirmwareString(buf, self.MAX_BUF_SIZE)\n    return ctypes.string_at(buf).decode()", "docstring": "Returns a firmware identification string of the connected J-Link.\n\nIt consists of the following:\n- Product Name (e.g. J-Link)\n- The string: compiled\n- Compile data and time.\n- Optional additional information.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nFirmware identification string.", "source": "codesearchnet"}
{"code": "def label(self, input_grid):\n    unset = 0\n    (high_labels, num_labels) = label((input_grid > self.high_thresh))\n    region_ranking = np.argsort(maximum(input_grid, high_labels, index=np.arange(1, (num_labels + 1))))[::(- 1)]\n    output_grid = np.zeros(input_grid.shape, dtype=int)\n    stack = []\n    for rank in region_ranking:\n        label_num = (rank + 1)\n        (label_i, label_j) = np.where((high_labels == label_num))\n        for i in range(label_i.size):\n            if (output_grid[(label_i[i], label_j[i])] == unset):\n                stack.append((label_i[i], label_j[i]))\n        while (len(stack) > 0):\n            index = stack.pop()\n            output_grid[index] = label_num\n            for i in range((index[0] - 1), (index[0] + 2)):\n                for j in range((index[1] - 1), (index[1] + 2)):\n                    if ((0 <= i < output_grid.shape[0]) and (0 <= j < output_grid.shape[1])):\n                        if ((input_grid[(i, j)] > self.low_thresh) and (output_grid[(i, j)] == unset)):\n                            stack.append((i, j))\n    return output_grid", "docstring": "Label input grid with hysteresis method.\n\nArgs:\ninput_grid: 2D array of values.\n\nReturns:\nLabeled output grid.", "source": "codesearchnet"}
{"code": "def _apply(self, ctx: ExtensionContext) -> Any:\n    (_, external_path) = ctx.node\n    return ctx.mentor.load_yaml(self.locator(external_path, (cast(str, ctx.document) if Validator.is_file(document=ctx.document) else None)))", "docstring": "Loads a yaml fragment from an external file.\n\nArgs:\nctx: The processing context.\n\nReturns:\nThe external resource as a python dictionary. The fragment is already send through\nthe processor as well.", "source": "codesearchnet"}
{"code": "def unhide_tool(self, context_name, tool_name):\n    data = self._context(context_name)\n    hidden_tools = data['hidden_tools']\n    if (tool_name in hidden_tools):\n        hidden_tools.remove(tool_name)\n        self._flush_tools()", "docstring": "Unhide a tool so that it may be exposed in a suite.\n\nNote that unhiding a tool doesn't guarantee it can be seen - a tool of\nthe same name from a different context may be overriding it.\n\nArgs:\ncontext_name (str): Context containing the tool.\ntool_name (str): Name of tool to unhide.", "source": "codesearchnet"}
{"code": "def _send_offset_commit_request(self, offsets):\n    assert (self.config['api_version'] >= (0, 8, 1)), 'Unsupported Broker API'\n    assert all(map((lambda k: isinstance(k, TopicPartition)), offsets))\n    assert all(map((lambda v: isinstance(v, OffsetAndMetadata)), offsets.values()))\n    if (not offsets):\n        log.debug('No offsets to commit')\n        return Future().success(None)\n    node_id = self.coordinator()\n    if (node_id is None):\n        return Future().failure(Errors.GroupCoordinatorNotAvailableError)\n    offset_data = collections.defaultdict(dict)\n    for (tp, offset) in six.iteritems(offsets):\n        offset_data[tp.topic][tp.partition] = offset\n    if self._subscription.partitions_auto_assigned():\n        generation = self.generation()\n    else:\n        generation = Generation.NO_GENERATION\n    if ((self.config['api_version'] >= (0, 9)) and (generation is None)):\n        return Future().failure(Errors.CommitFailedError())\n    if (self.config['api_version'] >= (0, 9)):\n        request = OffsetCommitRequest[2](self.group_id, generation.generation_id, generation.member_id, OffsetCommitRequest[2].DEFAULT_RETENTION_TIME, [(topic, [(partition, offset.offset, offset.metadata) for (partition, offset) in six.iteritems(partitions)]) for (topic, partitions) in six.iteritems(offset_data)])\n    elif (self.config['api_version'] >= (0, 8, 2)):\n        request = OffsetCommitRequest[1](self.group_id, (- 1), '', [(topic, [(partition, offset.offset, (- 1), offset.metadata) for (partition, offset) in six.iteritems(partitions)]) for (topic, partitions) in six.iteritems(offset_data)])\n    elif (self.config['api_version'] >= (0, 8, 1)):\n        request = OffsetCommitRequest[0](self.group_id, [(topic, [(partition, offset.offset, offset.metadata) for (partition, offset) in six.iteritems(partitions)]) for (topic, partitions) in six.iteritems(offset_data)])\n    log.debug('Sending offset-commit request with %s for group %s to %s', offsets, self.group_id, node_id)\n    future = Future()\n    _f = self._client.send(node_id, request)\n    _f.add_callback(self._handle_offset_commit_response, offsets, future, time.time())\n    _f.add_errback(self._failed_request, node_id, request, future)\n    return future", "docstring": "Commit offsets for the specified list of topics and partitions.\n\nThis is a non-blocking call which returns a request future that can be\npolled in the case of a synchronous commit or ignored in the\nasynchronous case.\n\nArguments:\noffsets (dict of {TopicPartition: OffsetAndMetadata}): what should\nbe committed\n\nReturns:\nFuture: indicating whether the commit was successful or not", "source": "codesearchnet"}
{"code": "def _AssignVar(self, matched, value):\n    \n    _value = self._GetValue(value)\n    if _value is not None:\n        _value.AssignVar(matched.group(value))", "docstring": "Assigns variable into current record from a matched rule.\n\nIf a record entry is a list then append, otherwise values are replaced.\n\nArgs:\nmatched: (regexp.match) Named group for each matched value.\nvalue: (str) The matched value.", "source": "juraj-google-style"}
{"code": "def __init__(self, latitude, longitude, units='metric',\n                 angle='degrees', timezone=0, time=None):\n        \n        super(TimedPoint, self).__init__(latitude, longitude, units, angle,\n                                         timezone)\n        self.time = time", "docstring": "Initialise a new ``TimedPoint`` object.\n\nArgs:\nlatitude (float, tuple or list): Location's latitude\nlongitude (float, tuple or list): Location's longitude\nangle (str): Type for specified angles\nunits (str): Units type to be used for distances\ntimezone (int): Offset from UTC in minutes\ntime (datetime.datetime): Time associated with the location", "source": "juraj-google-style"}
{"code": "def _find_countour_yaml(start, checked, names=None):\n    extensions = []\n    if names:\n        for name in names:\n            if (not os.path.splitext(name)[1]):\n                extensions.append((name + '.yaml'))\n                extensions.append((name + '.yml'))\n    yaml_names = (((names or []) + CONTOUR_YAML_NAMES) + extensions)\n    directory = start\n    while (directory not in checked):\n        checked.add(directory)\n        for fs_yaml_name in yaml_names:\n            yaml_path = os.path.join(directory, fs_yaml_name)\n            if os.path.exists(yaml_path):\n                return yaml_path\n        directory = os.path.dirname(directory)\n    return", "docstring": "Traverse the directory tree identified by start\nuntil a directory already in checked is encountered or the path\nof countour.yaml is found.\n\nChecked is present both to make the loop termination easy\nto reason about and so the same directories do not get\nrechecked\n\nArgs:\nstart: the path to start looking in and work upward from\nchecked: the set of already checked directories\n\nReturns:\nthe path of the countour.yaml file or None if it is not found", "source": "codesearchnet"}
{"code": "def _convert_id_to_token(self, token_id: int) -> list:\n    token_type_value = self.decoder.get(token_id, f'{self.unk_token}_TOKEN_TIME')\n    token_type_value = token_type_value.split('_')\n    token_type, value = ('_'.join(token_type_value[1:]), int(token_type_value[0]))\n    return [token_type, value]", "docstring": "Decodes the token ids generated by the transformer into notes.\n\nArgs:\ntoken_id (`int`):\nThis denotes the ids generated by the transformers to be converted to Midi tokens.\n\nReturns:\n`List`: A list consists of token_type (`str`) and value (`int`).", "source": "github-repos"}
{"code": "def compare(array, other, op, ty_str):\n    weld_obj = WeldObject(encoder_, decoder_)\n    array_var = weld_obj.update(array)\n    if isinstance(array, WeldObject):\n        array_var = array.obj_id\n        weld_obj.dependencies[array_var] = array\n    if (isinstance(other, str) or isinstance(other, WeldObject)):\n        other_var = weld_obj.update(other)\n        if isinstance(other, WeldObject):\n            other_var = tmp.obj_id\n            weld_obj.dependencies[other_var] = other\n    else:\n        other_var = ('%s(%s)' % (ty_str, str(other)))\n    weld_template = '\\n       map(\\n         %(array)s,\\n         |a: %(ty)s| a %(op)s %(other)s\\n       )\\n    '\n    weld_obj.weld_code = (weld_template % {'array': array_var, 'other': other_var, 'op': op, 'ty': ty_str})\n    return weld_obj", "docstring": "Performs passed-in comparison op between every element in the passed-in\narray and other, and returns an array of booleans.\n\nArgs:\narray (WeldObject / Numpy.ndarray): Input array\nother (WeldObject / Numpy.ndarray): Second input array\nop (str): Op string used for element-wise comparison (== >= <= !=)\nty (WeldType): Type of each element in the input array\n\nReturns:\nA WeldObject representing this computation", "source": "codesearchnet"}
{"code": "def Insert(self, key, value, row_index):\n        \n        if row_index < 0:\n            row_index += len(self)\n\n        if not 0 <= row_index < len(self):\n            raise IndexError('Index \"%s\" is out of bounds.' % row_index)\n\n        new_row = Row()\n        for idx in self.header:\n            if self.index(idx) == row_index:\n                new_row[key] = value\n            new_row[idx] = self[idx]\n        self._keys = new_row.header\n        self._values = new_row.values\n        del new_row\n        self._BuildIndex()", "docstring": "Inserts new values at a specified offset.\n\nArgs:\nkey: string for header value.\nvalue: string for a data value.\nrow_index: Offset into row for data.\n\nRaises:\nIndexError: If the offset is out of bands.", "source": "juraj-google-style"}
{"code": "def get_resources(self):\n    json_resources = self.rest_client.make_request(self.resource_url)['resources']\n    return [RestResource(resource, self.rest_client) for resource in json_resources]", "docstring": "Retrieves a list of all known Streams high-level REST resources.\n\nReturns:\n:py:obj:`list` of :py:class:`~.rest_primitives.RestResource`: List of all Streams high-level REST resources.", "source": "codesearchnet"}
{"code": "def update_configuration(self, configuration):\n    return self._client.update(configuration, uri=(self.URI + '/configuration'))", "docstring": "Updates the metrics configuration with the new values. Overwrites the existing configuration.\n\nArgs:\nconfiguration (dict):\nDictionary with a list of objects which contain frequency, sample interval, and source type for each\nresource-type.\n\nReturns:\ndict: The current configuration for which metrics are being relayed.", "source": "codesearchnet"}
{"code": "def _add_consequences(self, variant_obj, raw_variant_line):\n    consequences = []\n    for consequence in SO_TERMS:\n        if (consequence in raw_variant_line):\n            consequences.append(consequence)\n    variant_obj.consequences = consequences", "docstring": "Add the consequences found for a variant\n\nArgs:\nvariant_obj (puzzle.models.Variant)\nraw_variant_line (str): A raw vcf variant line", "source": "codesearchnet"}
{"code": "def unknown_shape(rank=None, **kwargs) -> 'TensorShape':\n    if rank is None and 'ndims' in kwargs:\n        rank = kwargs.pop('ndims')\n    if kwargs:\n        raise TypeError('Unknown argument: %s' % kwargs)\n    if rank is None:\n        return TensorShape(None)\n    else:\n        return TensorShape([Dimension(None)] * rank)", "docstring": "Returns an unknown TensorShape, optionally with a known rank.\n\nArgs:\nrank: (Optional) If specified, the number of dimensions in the shape.\n**kwargs: For backwards compatibility.\n\nReturns:\nAn unknown TensorShape.\n\nRaises:\nTypeError: In case of invalid arguments.", "source": "github-repos"}
{"code": "def get_dimension_type(self, dim):\n        \n        dim = self.get_dimension(dim)\n        if dim is None:\n            return None\n        elif dim.type is not None:\n            return dim.type\n        elif dim in self.vdims:\n            return np.float64\n        return self.interface.dimension_type(self, dim)", "docstring": "Get the type of the requested dimension.\n\nType is determined by Dimension.type attribute or common\ntype of the dimension values, otherwise None.\n\nArgs:\ndimension: Dimension to look up by name or by index\n\nReturns:\nDeclared type of values along the dimension", "source": "juraj-google-style"}
{"code": "def add_arc(self, src, dst, char):\n        \n        if src not in self.automaton.states():\n            self.add_state()\n        arc = fst.Arc(self.isyms[char], self.osyms[char],  fst.Weight.One(self.automaton.weight_type()), dst)\n        self.automaton.add_arc(src, arc)", "docstring": "Adds a new Arc\nArgs:\nsrc (int): The source state identifier\ndst (int): The destination state identifier\nchar (str): The character for the transition\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def run(self):\n    cmd = list(self.vasp_cmd)\n    if self.auto_gamma:\n        vi = VaspInput.from_directory('.')\n        kpts = vi['KPOINTS']\n        if ((kpts.style == Kpoints.supported_modes.Gamma) and (tuple(kpts.kpts[0]) == (1, 1, 1))):\n            if ((self.gamma_vasp_cmd is not None) and which(self.gamma_vasp_cmd[(- 1)])):\n                cmd = self.gamma_vasp_cmd\n            elif which((cmd[(- 1)] + '.gamma')):\n                cmd[(- 1)] += '.gamma'\n    logger.info('Running {}'.format(' '.join(cmd)))\n    with open(self.output_file, 'w') as f_std, open(self.stderr_file, 'w', buffering=1) as f_err:\n        p = subprocess.Popen(cmd, stdout=f_std, stderr=f_err)\n    return p", "docstring": "Perform the actual VASP run.\n\nReturns:\n(subprocess.Popen) Used for monitoring.", "source": "codesearchnet"}
{"code": "def _from_base_type(self, value):\n        \n        if not value:\n            return None\n        try:\n            \n            credentials = client.Credentials.new_from_json(value)\n        except ValueError:\n            credentials = None\n        return credentials", "docstring": "Converts our stored JSON string back to the desired type.\n\nArgs:\nvalue: A value from the datastore to be converted to the\ndesired type.\n\nReturns:\nA deserialized Credentials (or subclass) object, else None if\nthe value can't be parsed.", "source": "juraj-google-style"}
{"code": "def from_row_and_group(row: int, group: int):\n    for sym in _pt_data.keys():\n        el = Element(sym)\n        if ((el.row == row) and (el.group == group)):\n            return el\n    raise ValueError('No element with this row and group!')", "docstring": "Returns an element from a row and group number.\n\nArgs:\nrow (int): Row number\ngroup (int): Group number\n\n.. note::\nThe 18 group number system is used, i.e., Noble gases are group 18.", "source": "codesearchnet"}
{"code": "def intersection(self, *others):\n    result = self.__copy__()\n    _elements = result._elements\n    _total = result._total\n    for other in map(self._as_mapping, others):\n        for (element, multiplicity) in list(_elements.items()):\n            new_multiplicity = other.get(element, 0)\n            if (new_multiplicity < multiplicity):\n                if (new_multiplicity > 0):\n                    _elements[element] = new_multiplicity\n                    _total -= (multiplicity - new_multiplicity)\n                else:\n                    del _elements[element]\n                    _total -= multiplicity\n    result._total = _total\n    return result", "docstring": "r\"\"\"Return a new multiset with elements common to the multiset and all others.\n\n>>> ms = Multiset('aab')\n>>> sorted(ms.intersection('abc'))\n['a', 'b']\n\nYou can also use the ``&`` operator for the same effect. However, the operator version\nwill only accept a set as other operator, not any iterable, to avoid errors.\n\n>>> ms = Multiset('aab')\n>>> sorted(ms & Multiset('aaac'))\n['a', 'a']\n\nFor a variant of the operation which modifies the multiset in place see\n:meth:`intersection_update`.\n\nArgs:\nothers: The other sets intersect with the multiset. Can also be any :class:`~typing.Iterable`\\[~T]\nor :class:`~typing.Mapping`\\[~T, :class:`int`] which are then converted to :class:`Multiset`\\[~T].\n\nReturns:\nThe multiset resulting from the intersection of the sets.", "source": "codesearchnet"}
{"code": "def yesno(question, default=None):\n    \n\n    if default is not None:\n        if isinstance(default, bool):\n            pass\n        else:\n            default_ = default.upper()\n            if default_ not in ('Y', 'YES', 'N', 'NO'):\n                raise RuntimeError(\"Invalid default value: '{}'\".format(default))\n            default = default_ in ('Y', 'YES')\n\n    while True:\n        ans = input(\"{} ({}/{})? \".format(question, \"Y\" if default == True else \"y\",\n                                         \"N\" if default == False else \"n\")).upper()\n        if ans == \"\" and default is not None:\n            ret = default\n            break\n        elif ans in (\"N\", \"NO\"):\n            ret = False\n            break\n        elif ans in (\"Y\", \"YES\"):\n            ret = True\n            break\n    return ret", "docstring": "Asks a yes/no question\n\nArgs:\nquestion: string **without** the question mark and without the options.\nExample: 'Create links'\ndefault: default option. Accepted values are 'Y', 'YES', 'N', 'NO' or lowercase versions of\nthese valus (this argument is case-insensitive)\n\nReturns:\nbool: True if user answered Yes, False otherwise", "source": "juraj-google-style"}
{"code": "def assign(self, droplet_id):\n        \n        return self.get_data(\n            \"floating_ips/%s/actions/\" % self.ip,\n            type=POST,\n            params={\"type\": \"assign\", \"droplet_id\": droplet_id}\n        )", "docstring": "Assign a FloatingIP to a Droplet.\n\nArgs:\ndroplet_id: int - droplet id", "source": "juraj-google-style"}
{"code": "def get_attr_info(binary_view):\n    \n    global _ATTR_BASIC\n\n    attr_type, attr_len, non_resident = _ATTR_BASIC.unpack(binary_view[:9])\n\n    return (AttrTypes(attr_type), attr_len, bool(non_resident))", "docstring": "Gets basic information from a binary stream to allow correct processing of\nthe attribute header.\n\nThis function allows the interpretation of the Attribute type, attribute length\nand if the attribute is non resident.\n\nArgs:\nbinary_view (memoryview of bytearray) - A binary stream with the\ninformation of the attribute\n\nReturns:\nAn tuple with the attribute type, the attribute length, in bytes, and\nif the attribute is resident or not.", "source": "juraj-google-style"}
{"code": "def state_fluent_variables(self) -> FluentParamsList:\n    fluents = self.domain.state_fluents\n    ordering = self.domain.state_fluent_ordering\n    return self._fluent_params(fluents, ordering)", "docstring": "Returns the instantiated state fluents in canonical order.\n\nReturns:\nSequence[Tuple[str, List[str]]]: A tuple of pairs of fluent name\nand a list of instantiated fluents represented as strings.", "source": "codesearchnet"}
{"code": "def ndtr(x, name='ndtr'):\n    with ops.name_scope(name, values=[x]):\n        x = ops.convert_to_tensor(x, name='x')\n        if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:\n            raise TypeError('x.dtype=%s is not handled, see docstring for supported types.' % x.dtype)\n        return _ndtr(x)", "docstring": "Normal distribution function.\n\nReturns the area under the Gaussian probability density function, integrated\nfrom minus infinity to x:\n\n```\n1       / x\nndtr(x)  = ----------  |    exp(-0.5 t**2) dt\nsqrt(2 pi)  /-inf\n\n= 0.5 (1 + erf(x / sqrt(2)))\n= 0.5 erfc(x / sqrt(2))\n```\n\nArgs:\nx: `Tensor` of type `float32`, `float64`.\nname: Python string. A name for the operation (default=\"ndtr\").\n\nReturns:\nndtr: `Tensor` with `dtype=x.dtype`.\n\nRaises:\nTypeError: if `x` is not floating-type.", "source": "github-repos"}
{"code": "def _rnn_scan(self, hidden_states: torch.Tensor, recurrent_gate: torch.Tensor, reset: torch.Tensor, recurrent_states: Union[torch.Tensor, None], acc_dtype: torch.dtype=torch.float32) -> Tuple[torch.Tensor, torch.Tensor]:\n    recurrent_gate = recurrent_gate * ~reset\n    if hidden_states.shape[1] == 1:\n        if recurrent_states is None:\n            return (hidden_states, hidden_states[:, 0].type(acc_dtype))\n        else:\n            contextualized_states = recurrent_gate.type(acc_dtype) * recurrent_states[:, None].to(recurrent_gate.device)\n            contextualized_states += hidden_states.type(acc_dtype)\n            return (contextualized_states.type(hidden_states.dtype), contextualized_states[:, -1])\n    else:\n        if recurrent_states is None:\n            recurrent_states = torch.zeros(hidden_states[:, 0].shape, dtype=acc_dtype, device=hidden_states.device)\n        contextualized_states = torch.zeros_like(hidden_states)\n        for t in range(hidden_states.shape[1]):\n            recurrent_states = recurrent_gate[:, t].type(acc_dtype) * recurrent_states.to(recurrent_gate.device)\n            recurrent_states = recurrent_states + hidden_states[:, t].type(acc_dtype)\n            contextualized_states[:, t] = recurrent_states.type(hidden_states.dtype)\n    return (contextualized_states, recurrent_states)", "docstring": "Runs the recurrence of a linear RNN.\n\nArgs:\nhidden_states: The input sequence.\nrecurrent_gate: The diagonal of the recurrence matrix `A`.\nreset: Indicator of document boundaries, e.g. when to reset the hidden state\nof the RNN.\nrecurrent_states: The initial hidden state.\nacc_dtype: The data type for the accumulation.\n\nReturns:\nThe output of the linear recurrence.", "source": "github-repos"}
{"code": "def _remove_hidden_parts(projected_surface):\n    \n    surface = np.copy(projected_surface)\n    surface[~_make_occlusion_mask(projected_surface)] = np.nan\n    return surface", "docstring": "Removes parts of a projected surface that are not visible.\n\nArgs:\nprojected_surface (surface): the surface to use\n\nReturns:\nsurface: A projected surface.", "source": "juraj-google-style"}
{"code": "def master(self, task_type=None, task_id=None, rpc_layer=None):\n    session_master = _get_value_in_tfconfig(_SESSION_MASTER_KEY)\n    if session_master is not None:\n        return session_master\n    cluster_spec = self.cluster_spec()\n    if not cluster_spec.jobs or (len(cluster_spec.jobs) == 1 and len(cluster_spec.job_tasks(cluster_spec.jobs[0])) == 1):\n        return ''\n    task_type = task_type if task_type is not None else self.task_type\n    task_id = task_id if task_id is not None else self.task_id\n    rpc_layer = rpc_layer if rpc_layer is not None else self.rpc_layer\n    return format_master_url(cluster_spec.task_address(task_type, task_id), rpc_layer)", "docstring": "Returns the master address to use when creating a TensorFlow session.\n\nNote: this is only useful for TensorFlow 1.x.\n\nArgs:\ntask_type: (String, optional) Overrides and sets the task_type of the\nmaster.\ntask_id: (Integer, optional) Overrides and sets the task id of the\nmaster.\nrpc_layer: (String, optional) Overrides and sets the protocol over which\nTensorFlow nodes communicate with each other.\n\nReturns:\nThe address of the master.\n\nRaises:\nRuntimeError: If the task_type or task_id is not specified and the\n`TF_CONFIG` environment variable does not contain a task section.", "source": "github-repos"}
{"code": "def _nested_to_proto(nested_value, nested_proto, process_leafs, already_processed):\n    if (not isinstance(nested_proto, module_pb2.NestedData)):\n        raise base_errors.ModuleInfoError('Expected module_pb2.NestedData.')\n    if (id(nested_value) in already_processed):\n        nested_proto.value = ''\n        return\n    for (type_name, type_info) in six.iteritems(_TO_PROTO_SPECIAL_TYPES):\n        if type_info.check(nested_value):\n            nested_proto.special_type.name = type_name\n            type_info.to_proto(nested_value, nested_proto.special_type.object, process_leafs, already_processed)\n            return\n    if _is_iterable(nested_value):\n        already_processed.add(id(nested_value))\n        if isinstance(nested_value, dict):\n            nested_proto.dict.SetInParent()\n            for (key, child) in six.iteritems(nested_value):\n                str_key = str(key)\n                child_proto = nested_proto.dict.map[str_key]\n                _nested_to_proto(child, child_proto, process_leafs, already_processed)\n        elif isinstance(nested_value, tuple):\n            if _is_namedtuple(nested_value):\n                nested_proto.named_tuple.name = type(nested_value).__name__\n                for str_key in nested_value._fields:\n                    child = getattr(nested_value, str_key)\n                    child_proto = nested_proto.named_tuple.map[str_key]\n                    _nested_to_proto(child, child_proto, process_leafs, already_processed)\n            else:\n                nested_proto.tuple.SetInParent()\n                for child in nested_value:\n                    child_proto = nested_proto.tuple.list.add()\n                    _nested_to_proto(child, child_proto, process_leafs, already_processed)\n        else:\n            nested_proto.list.SetInParent()\n            for child in nested_value:\n                child_proto = nested_proto.list.list.add()\n                _nested_to_proto(child, child_proto, process_leafs, already_processed)\n    else:\n        nested_proto.value = process_leafs(nested_value)", "docstring": "Serializes `nested_value` into `nested_proto`.\n\nArgs:\nnested_value: A nested Python value.\nnested_proto: A `module_pb2.NestedData` instance to be filled from the value\nin `nested_value`.\nprocess_leafs: A function to be applied to the leaf values of the nested\nstructure.\nalready_processed: Set of already processed objects (used to avoid\ninfinite recursion).\nRaises:\nModuleInfoError: If `nested_proto` is not an instance of\n`module_pb2.NestedData`.", "source": "codesearchnet"}
{"code": "def write_tabular(obj, filepath):\n    \n    _, fn, ext = splitext2(filepath)\n    if ext == '.h5':\n        _write_tabular_h5(obj, filepath)\n    elif ext == '.pkl':\n        _write_tabular_pickle(obj, filepath)\n    else:\n        raise NotImplementedError", "docstring": "Write tabular object in HDF5 or pickle format\n\nArgs:\nobj (array or DataFrame): tabular object to write\nfilepath (path-like): path to write to; must end in '.h5' or '.pkl'", "source": "juraj-google-style"}
{"code": "def exclude(self, **filters):\n    exclude = {('-%s' % key): value for (key, value) in filters.items()}\n    return self.filter(**exclude)", "docstring": "Applies query filters for excluding matching records from result set.\n\nArgs:\n**filters: Query filters as keyword arguments.\n\nReturns:\nSelf. Queryset object.\n\nExamples:\n>>> Person.objects.exclude(age=None)\n>>> Person.objects.filter(name__startswith='jo').exclude(age__lte=16)", "source": "codesearchnet"}
{"code": "def floating_point_ops(self, inputs: dict[str, Union[torch.Tensor, Any]]):\n    if hasattr(self.model, 'floating_point_ops'):\n        return self.model.floating_point_ops(inputs)\n    else:\n        return 0", "docstring": "For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point\noperations for every backward + forward pass. If using another model, either implement such a method in the\nmodel or subclass and override this method.\n\nArgs:\ninputs (`Dict[str, Union[torch.Tensor, Any]]`):\nThe inputs and targets of the model.\n\nReturns:\n`int`: The number of floating-point operations.", "source": "github-repos"}
{"code": "def add(self, document_data, document_id=None):\n    if (document_id is None):\n        (parent_path, expected_prefix) = self._parent_info()\n        document_pb = document_pb2.Document()\n        created_document_pb = self._client._firestore_api.create_document(parent_path, collection_id=self.id, document_id=None, document=document_pb, mask=None, metadata=self._client._rpc_metadata)\n        new_document_id = _helpers.get_doc_id(created_document_pb, expected_prefix)\n        document_ref = self.document(new_document_id)\n        set_result = document_ref.set(document_data)\n        return (set_result.update_time, document_ref)\n    else:\n        document_ref = self.document(document_id)\n        write_result = document_ref.create(document_data)\n        return (write_result.update_time, document_ref)", "docstring": "Create a document in the Firestore database with the provided data.\n\nArgs:\ndocument_data (dict): Property names and values to use for\ncreating the document.\ndocument_id (Optional[str]): The document identifier within the\ncurrent collection. If not provided, an ID will be\nautomatically assigned by the server (the assigned ID will be\na random 20 character string composed of digits,\nuppercase and lowercase letters).\n\nReturns:\nTuple[google.protobuf.timestamp_pb2.Timestamp, \\\n~.firestore_v1beta1.document.DocumentReference]: Pair of\n\n* The ``update_time`` when the document was created (or\noverwritten).\n* A document reference for the created document.\n\nRaises:\n~google.cloud.exceptions.Conflict: If ``document_id`` is provided\nand the document already exists.", "source": "codesearchnet"}
{"code": "def all(self, customer_id, data={}, **kwargs):\n        \n        url = \"{}/{}/tokens\".format(self.base_url, customer_id)\n        return self.get_url(url, data, **kwargs)", "docstring": "Get all tokens for given customer Id\n\nArgs:\ncustomer_id : Customer Id for which tokens have to be fetched\n\nReturns:\nToken dicts for given cutomer Id", "source": "juraj-google-style"}
{"code": "def from_dir(cls, top, exts=None, exclude_dirs=\"_*\"):\n        \n        pseudos = []\n\n        if exts == \"all_files\":\n            for f in [os.path.join(top, fn) for fn in os.listdir(top)]:\n                if os.path.isfile(f):\n                    try:\n                        p = Pseudo.from_file(f)\n                        if p:\n                            pseudos.append(p)\n                        else:\n                            logger.info('Skipping file %s' % f)\n                    except:\n                        logger.info('Skipping file %s' % f)\n            if not pseudos:\n                logger.warning('No pseudopotentials parsed from folder %s' % top)\n                return None\n            logger.info('Creating PseudoTable with %i pseudopotentials' % len(pseudos))\n\n        else:\n            if exts is None: exts=(\"psp8\",)\n\n            for p in find_exts(top, exts, exclude_dirs=exclude_dirs):\n                try:\n                    pseudos.append(Pseudo.from_file(p))\n                except Exception as exc:\n                    logger.critical(\"Error in %s:\\n%s\" % (p, exc))\n\n        return cls(pseudos).sort_by_z()", "docstring": "Find all pseudos in the directory tree starting from top.\n\nArgs:\ntop: Top of the directory tree\nexts: List of files extensions. if exts == \"all_files\"\nwe try to open all files in top\nexclude_dirs: Wildcard used to exclude directories.\n\nreturn: :class:`PseudoTable` sorted by atomic number Z.", "source": "juraj-google-style"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def visit_arithmetic(self, arithmetic: _evaluation.ArithmeticNode) -> _sql_data_types.Select:\n    lhs_result = self.visit(arithmetic.left)\n    rhs_result = self.visit(arithmetic.right)\n    sql_data_type = _sql_data_types.coerce(lhs_result.sql_data_type, rhs_result.sql_data_type)\n    lhs_subquery = lhs_result.as_operand()\n    rhs_subquery = rhs_result.as_operand()\n    if sql_data_type == _sql_data_types.String:\n        sql_value = f'CONCAT({lhs_subquery}, {rhs_subquery})'\n    elif arithmetic.op == _ast.Arithmetic.Op.MODULO:\n        sql_value = f'MOD({lhs_subquery}, {rhs_subquery})'\n    elif arithmetic.op == _ast.Arithmetic.Op.TRUNCATED_DIVISION:\n        sql_value = f'DIV({lhs_subquery}, {rhs_subquery})'\n    else:\n        sql_value = f'({lhs_subquery} {arithmetic.op} {rhs_subquery})'\n    return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_value, _sql_data_type=sql_data_type, _sql_alias='arith_'), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK)", "docstring": "Translates a FHIRPath arithmetic expression to Spark SQL.\n\nEach operand is expected to be a collection of a single element. Both\noperands must be of the same type, or of compatible types according to the\nrules of implicit conversion.\n\nArgs:\narithmetic: The `_Arithmetic` Expression node.\n\nReturns:\nA compiled Spark SQL expression.", "source": "github-repos"}
{"code": "def prepare_kwargs(raw, string_parameter='name'):\n    \n    kwargs = dict()\n\n    if isinstance(raw, dict):\n        kwargs.update(raw)\n    elif isinstance(raw, str):\n        kwargs[string_parameter] = raw\n\n    return kwargs", "docstring": "Utility method to convert raw string/diction input into a dictionary to pass\ninto a function.  Always returns a dictionary.\n\nArgs:\nraw: string or dictionary, string is assumed to be the name of the activation\nactivation function.  Dictionary will be passed through unchanged.\n\nReturns: kwargs dictionary for **kwargs", "source": "juraj-google-style"}
{"code": "def teleport(self, location=None, rotation=None):\n        \n        val = 0\n        if location is not None:\n            val += 1\n            np.copyto(self._teleport_buffer, location)\n        if rotation is not None:\n            np.copyto(self._rotation_buffer, rotation)\n            val += 2\n        self._teleport_bool_buffer[0] = val", "docstring": "Teleports the agent to a specific location, with a specific rotation.\n\nArgs:\nlocation (np.ndarray, optional): An array with three elements specifying the target world coordinate in meters.\nIf None, keeps the current location. Defaults to None.\nrotation (np.ndarray, optional): An array with three elements specifying the target rotation of the agent.\nIf None, keeps the current rotation. Defaults to None.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def WriteEventBody(self, event):\n    \n    if not hasattr(event, 'timestamp'):\n      return\n\n    row = self._GetSanitizedEventValues(event)\n    try:\n      self._cursor.execute(self._INSERT_QUERY, row)\n    except MySQLdb.Error as exception:\n      logger.warning(\n          'Unable to insert into database with error: {0!s}.'.format(\n              exception))\n\n    self._count += 1\n\n    \n    \n    if self._count % 10000 == 0:\n      self._connection.commit()\n      if self._set_status:\n        self._set_status('Inserting event: {0:d}'.format(self._count))", "docstring": "Writes the body of an event object to the output.\n\nArgs:\nevent (EventObject): event.", "source": "juraj-google-style"}
{"code": "def from_args_list(cls, args_list: list[str]) -> 'CompileCommand':\n    cc_file = None\n    filtered_args = []\n    for arg in args_list:\n        if arg in _DISALLOWED_ARGS:\n            continue\n        if arg.endswith('.cc'):\n            cc_file = arg\n        filtered_args.append(arg)\n    return cls(cc_file, filtered_args)", "docstring": "Alternative constructor which uses the args_list from `bazel aquery`.\n\nThis collects arguments and the file being run on from the output of\n`bazel aquery`. Also filters out arguments which break clang-tidy.\n\nArguments:\nargs_list: List of arguments generated by `bazel aquery`\n\nReturns:\nThe corresponding ClangTidyCommand.", "source": "github-repos"}
{"code": "def obtain_input_shape(input_shape, default_size, min_size, data_format, require_flatten, weights=None):\n    if weights != 'imagenet' and input_shape and (len(input_shape) == 3):\n        if data_format == 'channels_first':\n            correct_channel_axis = 1 if len(input_shape) == 4 else 0\n            if input_shape[correct_channel_axis] not in {1, 3}:\n                warnings.warn(f'This model usually expects 1 or 3 input channels. However, it was passed an input_shape with {input_shape[0]} input channels.', stacklevel=2)\n            default_shape = (input_shape[0], default_size, default_size)\n        else:\n            if input_shape[-1] not in {1, 3}:\n                warnings.warn(f'This model usually expects 1 or 3 input channels. However, it was passed an input_shape with {input_shape[-1]} input channels.', stacklevel=2)\n            default_shape = (default_size, default_size, input_shape[-1])\n    elif data_format == 'channels_first':\n        default_shape = (3, default_size, default_size)\n    else:\n        default_shape = (default_size, default_size, 3)\n    if weights == 'imagenet' and require_flatten:\n        if input_shape is not None:\n            if input_shape != default_shape:\n                raise ValueError(f'When setting `include_top=True` and loading `imagenet` weights, `input_shape` should be {default_shape}.  Received: input_shape={input_shape}')\n        return default_shape\n    if input_shape:\n        if data_format == 'channels_first':\n            if input_shape is not None:\n                if len(input_shape) != 3:\n                    raise ValueError('`input_shape` must be a tuple of three integers.')\n                if input_shape[0] != 3 and weights == 'imagenet':\n                    raise ValueError(f'The input must have 3 channels; Received `input_shape={input_shape}`')\n                if input_shape[1] is not None and input_shape[1] < min_size or (input_shape[2] is not None and input_shape[2] < min_size):\n                    raise ValueError(f'Input size must be at least {min_size}x{min_size}; Received: input_shape={input_shape}')\n        elif input_shape is not None:\n            if len(input_shape) != 3:\n                raise ValueError('`input_shape` must be a tuple of three integers.')\n            if input_shape[-1] != 3 and weights == 'imagenet':\n                raise ValueError(f'The input must have 3 channels; Received `input_shape={input_shape}`')\n            if input_shape[0] is not None and input_shape[0] < min_size or (input_shape[1] is not None and input_shape[1] < min_size):\n                raise ValueError(f'Input size must be at least {min_size}x{min_size}; Received: input_shape={input_shape}')\n    elif require_flatten:\n        input_shape = default_shape\n    elif data_format == 'channels_first':\n        input_shape = (3, None, None)\n    else:\n        input_shape = (None, None, 3)\n    if require_flatten:\n        if None in input_shape:\n            raise ValueError(f'If `include_top` is True, you should specify a static `input_shape`. Received: input_shape={input_shape}')\n    return input_shape", "docstring": "Internal utility to compute/validate a model's input shape.\n\nArgs:\ninput_shape: Either None (will return the default network input shape),\nor a user-provided shape to be validated.\ndefault_size: Default input width/height for the model.\nmin_size: Minimum input width/height accepted by the model.\ndata_format: Image data format to use.\nrequire_flatten: Whether the model is expected to\nbe linked to a classifier via a Flatten layer.\nweights: One of `None` (random initialization)\nor 'imagenet' (pre-training on ImageNet).\nIf weights='imagenet' input channels must be equal to 3.\n\nReturns:\nAn integer shape tuple (may include None entries).\n\nRaises:\nValueError: In case of invalid argument values.", "source": "github-repos"}
{"code": "def alternatives(self, Class=None, set=None):\n        \n\n        for e in self.select(AlternativeLayers,None, True, ['Original','Suggestion']): \n            if Class is None:\n                yield e\n            elif len(e) >= 1: \n                for e2 in e:\n                    try:\n                        if isinstance(e2, Class):\n                            try:\n                                if set is None or e2.set == set:\n                                    yield e \n                                    break \n                            except AttributeError:\n                                continue\n                    except AttributeError:\n                        continue", "docstring": "Generator over alternatives, either all or only of a specific annotation type, and possibly restrained also by set.\n\nArguments:\n* ``Class`` - The Class you want to retrieve (e.g. PosAnnotation). Or set to None to select all alternatives regardless of what type they are.\n* ``set``   - The set you want to retrieve (defaults to None, which selects irregardless of set)\n\nReturns:\nGenerator over Alternative elements", "source": "juraj-google-style"}
{"code": "def get(self, path, params=None, headers=None):\n    response = requests.get(self._url_for(path), params=params, headers=self._headers(headers))\n    self._handle_errors(response)\n    return response", "docstring": "Perform a GET request, optionally providing query-string params.\n\nArgs:\npath (str): A path that gets appended to ``base_url``.\nparams (dict, optional): Dictionary of param names to values.\n\nExample:\napi_client.get('/users', params={'active': True})\n\nReturns:\nA requests ``Response`` object.", "source": "codesearchnet"}
{"code": "def difference_update(self, *others):\n    for other in map(self._as_multiset, others):\n        for (element, multiplicity) in other.items():\n            self.discard(element, multiplicity)", "docstring": "r\"\"\"Remove all elements contained the others from this multiset.\n\n>>> ms = Multiset('aab')\n>>> ms.difference_update('abc')\n>>> sorted(ms)\n['a']\n\nYou can also use the ``-=`` operator for the same effect. However, the operator version\nwill only accept a set as other operator, not any iterable, to avoid errors.\n\n>>> ms = Multiset('aabbbc')\n>>> ms -= Multiset('abd')\n>>> sorted(ms)\n['a', 'b', 'b', 'c']\n\nFor a variant of the operation which does not modify the multiset, but returns a new\nmultiset instead see :meth:`difference`.\n\nArgs:\nothers: The other sets to remove from this multiset. Can also be any :class:`~typing.Iterable`\\[~T]\nor :class:`~typing.Mapping`\\[~T, :class:`int`] which are then converted to :class:`Multiset`\\[~T].", "source": "codesearchnet"}
{"code": "def save_features(self, train_features, test_features, feature_names, feature_list_id):\n    self.save_feature_names(feature_names, feature_list_id)\n    self.save_feature_list(train_features, 'train', feature_list_id)\n    self.save_feature_list(test_features, 'test', feature_list_id)", "docstring": "Save features for the training and test sets to disk, along with their metadata.\n\nArgs:\ntrain_features: A NumPy array of features for the training set.\ntest_features: A NumPy array of features for the test set.\nfeature_names: A list containing the names of the feature columns.\nfeature_list_id: The name for this feature list.", "source": "codesearchnet"}
{"code": "def get_other_answers(pool, seeded_answers, get_student_item_dict, algo, options):\n    num_responses = (len(options) if (('num_responses' not in algo) or (algo['num_responses'] == '\n    if (algo['name'] == 'simple'):\n        return get_other_answers_simple(pool, seeded_answers, get_student_item_dict, num_responses)\n    elif (algo['name'] == 'random'):\n        return get_other_answers_random(pool, seeded_answers, get_student_item_dict, num_responses)\n    else:\n        raise UnknownChooseAnswerAlgorithm()", "docstring": "Select other student's answers from answer pool or seeded answers based on the selection algorithm\n\nArgs:\npool (dict): answer pool, format:\n{\noption1_index: {\nstudent_id: { can store algorithm specific info here }\n},\noption2_index: {\nstudent_id: { ... }\n}\n}\nseeded_answers (list): seeded answers from instructor\n[\n{'answer': 0, 'rationale': 'rationale A'},\n{'answer': 1, 'rationale': 'rationale B'},\n]\nget_student_item_dict (callable): get student item dict function to return student item dict\nalgo (str): selection algorithm\noptions (dict): answer options for the question\n\nReturns:\ndict: answers based on the selection algorithm", "source": "codesearchnet"}
{"code": "def IsWalletTransaction(self, tx):\n        \n        for key, contract in self._contracts.items():\n\n            for output in tx.outputs:\n                if output.ScriptHash.ToBytes() == contract.ScriptHash.ToBytes():\n                    return True\n\n            for script in tx.scripts:\n\n                if script.VerificationScript:\n                    if bytes(contract.Script) == script.VerificationScript:\n                        return True\n\n        for watch_script_hash in self._watch_only:\n            for output in tx.outputs:\n                if output.ScriptHash == watch_script_hash:\n                    return True\n            for script in tx.scripts:\n                if Crypto.ToScriptHash(script.VerificationScript, unhex=False) == watch_script_hash:\n                    return True\n\n        return False", "docstring": "Verifies if a transaction belongs to the wallet.\n\nArgs:\ntx (TransactionOutput):an instance of type neo.Core.TX.Transaction.TransactionOutput to verify.\n\nReturns:\nbool: True, if transaction belongs to wallet. False, if not.", "source": "juraj-google-style"}
{"code": "def write_script(script, tempdir):\n    \n\n    name = \"script\" + self.suffix\n    path = os.path.join(tempdir, name)\n\n    with open(path, \"w\") as f:\n        f.write(\"\\n\".join(script))\n\n    return path", "docstring": "Write script to a temporary directory\n\nArguments:\nscript (list): Commands which to put into a file\n\nReturns:\nAbsolute path to script", "source": "juraj-google-style"}
{"code": "def __init__(self, functions, inference_args, input_tangents, tape_watching):\n    self._functions = functions\n    self._inference_args = inference_args\n    self._input_tangents = input_tangents\n    self._tape_watching = tape_watching", "docstring": "Collects information about the function call.\n\nArgs:\nfunctions: An object which produces forward and backward functions, either\na _DelayedRewriteGradientFunctions or a _TapeGradientFunctions object.\ninference_args: A flat list of Tensors, arguments to the inference\nfunction.\ninput_tangents: A flat list of Tensors, jvps associated with\n`inference_args`.\ntape_watching: Boolean, with True indicating that recording is necessary.", "source": "github-repos"}
{"code": "def call_later(self, time_seconds, callback, arguments):\n    scheduled_call = {'ts': (time.time() + time_seconds), 'callback': callback, 'args': arguments}\n    self.scheduled_calls.append(scheduled_call)", "docstring": "Schedules a function to be run x number of seconds from now.\n\nThe call_later method is primarily used to resend messages if we\nhaven't received a confirmation message from the receiving host.\nWe can wait x number of seconds for a response and then try\nsending the message again.\n\nArgs:\ntime_seconds (float): The number of seconds from now we should call\nthe provided function.\ncallback (function): The method to execute when our time has been\nreached. E.g. self.retransmit\narguments (dict): A dictionary of arguments to send to the callback.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def ApproximateDistanceBetweenPoints(pa, pb):\n    (alat, alon) = pa\n    (blat, blon) = pb\n    sa = transitfeed.Stop(lat=alat, lng=alon)\n    sb = transitfeed.Stop(lat=blat, lng=blon)\n    return transitfeed.ApproximateDistanceBetweenStops(sa, sb)", "docstring": "Finds the distance between two points on the Earth's surface.\n\nThis is an approximate distance based on assuming that the Earth is a sphere.\nThe points are specified by their lattitude and longitude.\n\nArgs:\npa: the first (lat, lon) point tuple\npb: the second (lat, lon) point tuple\n\nReturns:\nThe distance as a float in metres.", "source": "codesearchnet"}
{"code": "def cmdargs(mysqldump: str, username: str, password: str, database: str, verbose: bool, with_drop_create_database: bool, max_allowed_packet: str, hide_password: bool=False) -> List[str]:\n    ca = [mysqldump, '-u', username, '-p{}'.format(('*****' if hide_password else password)), '--max_allowed_packet={}'.format(max_allowed_packet), '--hex-blob']\n    if verbose:\n        ca.append('--verbose')\n    if with_drop_create_database:\n        ca.extend(['--add-drop-database', '--databases', database])\n    else:\n        ca.append(database)\n        pass\n    return ca", "docstring": "Returns command arguments for a ``mysqldump`` call.\n\nArgs:\nmysqldump: ``mysqldump`` executable filename\nusername: user name\npassword: password\ndatabase: database name\nverbose: verbose output?\nwith_drop_create_database: produce commands to ``DROP`` the database\nand recreate it?\nmax_allowed_packet: passed to ``mysqldump``\nhide_password: obscure the password (will break the arguments but\nprovide a safe version to show the user)?\n\nReturns:\nlist of command-line arguments", "source": "codesearchnet"}
{"code": "def write_credentials(self, credentials=None, profile=None,\n                          cache_token=None):\n        \n        d = {\n            'profile': profile,\n            'client_id': credentials.client_id,\n            'client_secret': credentials.client_secret,\n            'refresh_token': credentials.refresh_token\n        }\n        if cache_token:\n            d.update({'access_token': credentials.access_token})\n        with self.lock:\n            return self.db.upsert(\n                d, self.query.profile == profile\n            )", "docstring": "Write credentials.\n\nWrite credentials to credentials file. Performs ``upsert``.\n\nArgs:\ncache_token (bool): If ``True``, stores ``access_token`` in token store. Defaults to ``True``.\ncredentials (class): Read-only credentials.\nprofile (str): Credentials profile. Defaults to ``'default'``.\n\nReturns:\nint: Affected document ID.", "source": "juraj-google-style"}
{"code": "def download_from_url(url, destination_path, force=False, aspera=False, silent=False):\n    if (aspera and url.startswith('http')):\n        logger.warn('Aspera Connect allows only FTP servers - falling back to normal download')\n        aspera = False\n    try:\n        fn = Downloader(url, outdir=os.path.dirname(destination_path))\n        if aspera:\n            fn.download_aspera(user='anonftp', host='ftp-trace.ncbi.nlm.nih.gov', silent=silent)\n        else:\n            fn.download(silent=silent, force=force)\n    except URLError:\n        logger.error(('Cannot find file %s' % url))", "docstring": "Download file from remote server.\n\nIf the file is already downloaded and  ``force`` flag is on the file will\nbe removed.\n\nArgs:\nurl (:obj:`str`): Path to the file on remote server (including file\nname)\ndestination_path (:obj:`str`): Path to the file on local machine\n(including file name)\nforce (:obj:`bool`): If file exist force to overwrite it. Defaults to\nFalse.\naspera (:obj:`bool`): Download with Aspera Connect. Defaults to False.\nsilent (:obj:`bool`): Do not print any message. Defaults to False.", "source": "codesearchnet"}
{"code": "def from_bigquery(sql):\n    \n    if isinstance(sql, bq.Query):\n      sql = sql._expanded_sql()\n\n    parts = sql.split('.')\n    if len(parts) == 1 or len(parts) > 3 or any(' ' in x for x in parts):\n      sql = '(' + sql + ')'  \n    else:\n      sql = '`' + sql + '`'  \n\n    query = bq.Query(\n        'SELECT target, predicted, count(*) as count FROM %s group by target, predicted' % sql)\n    df = query.execute().result().to_dataframe()\n    labels = sorted(set(df['target']) | set(df['predicted']))\n    labels_count = len(labels)\n    df['target'] = [labels.index(x) for x in df['target']]\n    df['predicted'] = [labels.index(x) for x in df['predicted']]\n    cm = [[0] * labels_count for i in range(labels_count)]\n    for index, row in df.iterrows():\n      cm[row['target']][row['predicted']] = row['count']\n    return ConfusionMatrix(cm, labels)", "docstring": "Create a ConfusionMatrix from a BigQuery table or query.\n\nArgs:\nsql: Can be one of:\nA SQL query string.\nA Bigquery table string.\nA Query object defined with '%%bq query --name [query_name]'.\nThe query results or table must include \"target\", \"predicted\" columns.\nReturns:\nA ConfusionMatrix that can be plotted.\nRaises:\nValueError if query results or table does not include 'target' or 'predicted' columns.", "source": "juraj-google-style"}
{"code": "def __init__(self, n_classes=256, act=torch.softmax):\n    super().__init__()\n    self.k = n_classes\n    self.act = act\n    self.register_buffer('k_idx', torch.arange(0, n_classes).view(1, -1, 1, 1), persistent=False)\n    self.register_buffer('k_minus_1', torch.tensor([self.k - 1]).view(1, -1, 1, 1), persistent=False)", "docstring": "Compute log binomial distribution for n_classes\n\nArgs:\nn_classes (`int`, *optional*, defaults to 256):\nNumber of output classes.\nact (`torch.nn.Module`, *optional*, defaults to `torch.softmax`):\nActivation function to apply to the output.", "source": "github-repos"}
{"code": "def log(self, metric):\n        \n        message = self.LOGFMT.format(**metric)\n        if metric['context']:\n            message += ' context: {context}'.format(context=metric['context'])\n        self._logger.log(self.level, message)", "docstring": "Format and output metric.\n\nArgs:\nmetric (dict): Complete metric.", "source": "juraj-google-style"}
{"code": "def from_scf_task(cls, scf_task, ddk_tolerance=None, ph_tolerance=None, manager=None):\n        \n        new = cls(manager=manager)\n        new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance)\n        return new", "docstring": "Build tasks for the computation of Born effective charges from a ground-state task.\n\nArgs:\nscf_task: ScfTask object.\nddk_tolerance: tolerance used in the DDK run if with_becs. None to use AbiPy default.\nph_tolerance: dict {\"varname\": value} with the tolerance used in the phonon run.\nNone to use AbiPy default.\nmanager: :class:`TaskManager` object.", "source": "juraj-google-style"}
{"code": "def split_vert_on_nonmanifold_face(script, vert_displacement_ratio=0.0):\n    \n    filter_xml = ''.join([\n        '  <filter name=\"Split Vertexes Incident on Non Manifold Faces\">\\n',\n        '    <Param name=\"VertDispRatio\" ',\n        'value=\"{}\" '.format(vert_displacement_ratio),\n        'description=\"Vertex Displacement Ratio\" ',\n        'type=\"RichFloat\" ',\n        '/>\\n',\n        '  </filter>\\n'])\n    util.write_filter(script, filter_xml)\n    return None", "docstring": "Split non-manifold vertices until it becomes two-manifold.\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter to.\nvert_displacement_ratio (float): When a vertex is split it is moved\nalong the average vector going from its position to the centroid\nof the FF connected faces sharing it.\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "juraj-google-style"}
{"code": "def _get_params(mapper_spec, allowed_keys=None, allow_old=True):\n    if ('input_reader' not in mapper_spec.params):\n        message = \"Input reader's parameters should be specified in input_reader subdictionary.\"\n        if ((not allow_old) or allowed_keys):\n            raise errors.BadReaderParamsError(message)\n        params = mapper_spec.params\n        params = dict(((str(n), v) for (n, v) in params.iteritems()))\n    else:\n        if (not isinstance(mapper_spec.params.get('input_reader'), dict)):\n            raise errors.BadReaderParamsError('Input reader parameters should be a dictionary')\n        params = mapper_spec.params.get('input_reader')\n        params = dict(((str(n), v) for (n, v) in params.iteritems()))\n        if allowed_keys:\n            params_diff = (set(params.keys()) - allowed_keys)\n            if params_diff:\n                raise errors.BadReaderParamsError(('Invalid input_reader parameters: %s' % ','.join(params_diff)))\n    return params", "docstring": "Obtain input reader parameters.\n\nUtility function for input readers implementation. Fetches parameters\nfrom mapreduce specification giving appropriate usage warnings.\n\nArgs:\nmapper_spec: The MapperSpec for the job\nallowed_keys: set of all allowed keys in parameters as strings. If it is not\nNone, then parameters are expected to be in a separate \"input_reader\"\nsubdictionary of mapper_spec parameters.\nallow_old: Allow parameters to exist outside of the input_reader\nsubdictionary for compatability.\n\nReturns:\nmapper parameters as dict\n\nRaises:\nBadReaderParamsError: if parameters are invalid/missing or not allowed.", "source": "codesearchnet"}
{"code": "def assert_hermitian_spectrum(self, name='assert_hermitian_spectrum'):\n    eps = np.finfo(self.dtype.real_dtype.as_numpy_dtype).eps\n    with self._name_scope(name):\n        max_err = eps * self.domain_dimension_tensor()\n        imag_convolution_kernel = math_ops.imag(self.convolution_kernel())\n        return check_ops.assert_less(math_ops.abs(imag_convolution_kernel), max_err, message='Spectrum was not Hermitian')", "docstring": "Returns an `Op` that asserts this operator has Hermitian spectrum.\n\nThis operator corresponds to a real-valued matrix if and only if its\nspectrum is Hermitian.\n\nArgs:\nname:  A name to give this `Op`.\n\nReturns:\nAn `Op` that asserts this operator has Hermitian spectrum.", "source": "github-repos"}
{"code": "def encode_configuration(self, did, eid, parameters):\n        \n        \n        parameters = [{\"parameterId\": k, \"parameterValue\": v} for (k,v) in parameters.items()]\n\n        payload = {\n            'parameters':parameters\n\n        }\n        req_headers = {\n            'Accept': 'application/vnd.onshape.v1+json',\n            'Content-Type': 'application/json'\n        }\n\n        res = self._api.request('post', '/api/elements/d/' + did  + '/e/' + eid + '/configurationencodings', body=payload, headers=req_headers)\n\n        return json.loads(res.content.decode(\"utf-8\"))[\"encodedId\"]", "docstring": "Encode parameters as a URL-ready string\n\nArgs:\n- did (str): Document ID\n- eid (str): Element ID\n- parameters (dict): key-value pairs of the parameters to be encoded\nReturns:\n- configuration (str): the url-ready configuration string.", "source": "juraj-google-style"}
{"code": "def parse_frequency(variant, info_key):\n    \n    raw_annotation = variant.INFO.get(info_key)\n    raw_annotation = None if raw_annotation == '.' else raw_annotation\n    frequency = float(raw_annotation) if raw_annotation else None\n    return frequency", "docstring": "Parse any frequency from the info dict\n\nArgs:\nvariant(cyvcf2.Variant)\ninfo_key(str)\n\nReturns:\nfrequency(float): or None if frequency does not exist", "source": "juraj-google-style"}
{"code": "def get_flat_neurites(neuron, tol=0.1, method='ratio'):\n    \n    return [n for n in neuron.neurites if is_flat(n, tol, method)]", "docstring": "Check if a neuron has neurites that are flat within a tolerance\n\nArgs:\nneurite(Neurite): neurite to operate on\ntol(float): the tolerance or the ratio\nmethod(string): 'tolerance' or 'ratio' described in :meth:`is_flat`\n\nReturns:\nBool list corresponding to the flatness check for each neurite\nin neuron neurites with respect to the given criteria", "source": "juraj-google-style"}
{"code": "def __init__(self, object_id: str, allowed_states: List[str],\n                 allowed_transitions: dict, allowed_target_states: dict):\n        \n        self._id = object_id\n        self._type = STATES_KEY  \n        self._key = '{}:{}'.format(STATES_KEY, self._id)\n        self._allowed_states = [state.lower() for state in allowed_states]\n        self._allowed_transitions = self._dict_lower(allowed_transitions)\n        self._allowed_target_states = self._dict_lower(allowed_target_states)\n        if not DB.key_exists(self._key):\n            \n            DB.save_dict(self._key, self._initialise())", "docstring": "Initialise a state object.\n\nArgs:\nallowed_states (List[str]): List of allowed states.\nallowed_transitions (dict): Dict of allowed state transitions\nallowed_target_states (dict): Dict of allowed target states", "source": "juraj-google-style"}
{"code": "def GetFileObjectReferenceCount(self, path_spec):\n    \n    cache_value = self._file_object_cache.GetCacheValue(path_spec.comparable)\n    if not cache_value:\n      return None\n\n    return cache_value.reference_count", "docstring": "Retrieves the reference count of a cached file-like object.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nint: reference count or None if there is no file-like object for\nthe corresponding path specification cached.", "source": "juraj-google-style"}
{"code": "def compute_predecessors(nodes: Iterable[_PredecessorNode]) -> dict[_PredecessorNode, set[_PredecessorNode]]:\n    predecessors = {n: {n} for n in nodes}\n    discovered = set()\n    for start in nodes:\n        if start in discovered:\n            continue\n        unprocessed = [(start, n) for n in start.outgoing]\n        while unprocessed:\n            from_node, node = unprocessed.pop(0)\n            node_predecessors = predecessors[node]\n            length_before = len(node_predecessors)\n            node_predecessors |= predecessors[from_node]\n            if length_before != len(node_predecessors):\n                unprocessed.extend(((node, n) for n in node.outgoing))\n                discovered.add(node)\n    return predecessors", "docstring": "Build a transitive closure.\n\nFor a list of nodes, compute all the predecessors of each node.\n\nArgs:\nnodes: A list of nodes or blocks.\n\nReturns:\nA dictionary that maps each node to a set of all the nodes that can reach\nthat node.", "source": "github-repos"}
{"code": "def run_benchmark(self, dataset, num_elements, iters=1, warmup=True, apply_default_optimizations=False, session_config=None):\n    options = options_lib.Options()\n    options.experimental_optimization.apply_default_optimizations = apply_default_optimizations\n    dataset = dataset.with_options(options)\n    dataset = dataset.skip(num_elements - 1)\n    if context.executing_eagerly():\n        median_duration = self._run_eager_benchmark(iterable=dataset, iters=iters, warmup=warmup)\n        return median_duration / float(num_elements)\n    iterator = dataset_ops.make_initializable_iterator(dataset)\n    next_element = iterator.get_next()\n    op = nest.flatten(next_element)[0].op\n    median_duration = self._run_graph_benchmark(iterable=op, iters=iters, warmup=warmup, session_config=session_config, initializer=iterator.initializer)\n    return median_duration / float(num_elements)", "docstring": "Benchmarks the dataset.\n\nRuns the dataset `iters` times. In each iteration, the benchmark measures\nthe time it takes to go through `num_elements` elements of the dataset.\n\nArgs:\ndataset: Dataset to benchmark.\nnum_elements: Number of dataset elements to iterate through each benchmark\niteration.\niters: Number of times to repeat the timing.\nwarmup: If true, warms up the session caches by running an untimed run.\napply_default_optimizations: Determines whether default optimizations\nshould be applied.\nsession_config: A ConfigProto protocol buffer with configuration options\nfor the session. Applicable only for benchmarking in graph mode.\n\nReturns:\nA float, representing the per-element wall time of the dataset in seconds.\nThis is the median time (with respect to `iters`) it takes for the dataset\nto go through `num_elements` elements, divided by `num_elements.`", "source": "github-repos"}
{"code": "def __init__(self, error_name, error_id, error_msg, stack_patterns):\n    self.error_name = error_name\n    self.error_id = error_id\n    self.error_msg = error_msg\n    self._stack_patterns = stack_patterns", "docstring": "Create a ParserError that matches against any of the |stack_patterns|.\n\nArgs:\nerror_name: A short, human readable name for the error,\nusing lowercase-with-dashes-format.\nerror_id: An integer to identify a specific error:\n100s: Lexer errors.\n200s: Low level parsing errors.\n300s: High level parsing errors.\nerror_msg: A message to display with this error that describes\nclearly what caused the error.\nstack_patterns: A list of \"stack patterns\", where each stack pattern\nis a list of strings corresponding to symbols on the parser's symbol\nstack at the time it errored out. The string values for the symbols\ncan match essentially any terminal or non-terminal symbol used in the\ngrammar from parser.py.\nExamples: ['TRANSITION', 'NAME', 'params', '=']\n(or None to match against any symbol stack).\n\nReturns:\nParserError that matches against |stack_patterns|.", "source": "github-repos"}
{"code": "def detail_parking(self, **kwargs):\n    date = util.datetime_string(kwargs.get('day', 1), kwargs.get('month', 1), kwargs.get('year', 1970), kwargs.get('hour', 0), kwargs.get('minute', 0))\n    params = {'language': util.language_code(kwargs.get('lang')), 'publicData': True, 'date': date, 'id': kwargs.get('parking'), 'family': kwargs.get('family')}\n    result = self.make_request('detail_parking', {}, **params)\n    if (not util.check_result(result)):\n        return (False, result.get('message', 'UNKNOWN ERROR'))\n    values = util.response_list(result, 'Data')\n    return (True, [emtype.ParkingDetails(**a) for a in values])", "docstring": "Obtain detailed info of a given parking.\n\nArgs:\nlang (str):  Language code (*es* or *en*).\nday (int): Day of the month in format DD.\nThe number is automatically padded if it only has one digit.\nmonth (int): Month number in format MM.\nThe number is automatically padded if it only has one digit.\nyear (int): Year number in format YYYY.\nhour (int): Hour of the day in format hh.\nThe number is automatically padded if it only has one digit.\nminute (int): Minute of the hour in format mm.\nThe number is automatically padded if it only has one digit.\nparking (int): ID of the parking to query.\nfamily (str): Family code of the parking (3 chars).\n\nReturns:\nStatus boolean and parsed response (list[ParkingDetails]), or message\nstring in case of error.", "source": "codesearchnet"}
{"code": "def send_offer_assignment_email(self, user_email, offer_assignment_id, subject, email_body, site_code=None):\n    \n    config = get_sailthru_configuration(site_code)\n    response = _send_offer_assignment_notification_email(config, user_email, subject, email_body, site_code, self)\n    if response and response.is_ok():\n        send_id = response.get_body().get('send_id')  \n        if _update_assignment_email_status(offer_assignment_id, send_id, 'success'):\n            logger.info('[Offer Assignment] Offer assignment notification sent with message --- {message}'.format(\n                message=email_body))\n        else:\n            logger.exception(\n                '[Offer Assignment] An error occurred while updating email status data for '\n                'offer {token_offer} and email {token_email} via the ecommerce API.'.format(\n                    token_offer=offer_assignment_id,\n                    token_email=user_email,\n                )\n            )", "docstring": "Sends the offer assignment email.\nArgs:\nself: Ignore.\nuser_email (str): Recipient's email address.\noffer_assignment_id (str): Key of the entry in the offer_assignment model.\nsubject (str): Email subject.\nemail_body (str): The body of the email.\nsite_code (str): Identifier of the site sending the email.", "source": "juraj-google-style"}
{"code": "def start(host, port, profiler_stats, dont_start_browser, debug_mode):\n    \n    stats_handler = functools.partial(StatsHandler, profiler_stats)\n    if not debug_mode:\n        sys.stderr = open(os.devnull, 'w')\n    print('Starting HTTP server...')\n    if not dont_start_browser:\n        webbrowser.open('http:\n    try:\n        StatsServer((host, port), stats_handler).serve_forever()\n    except KeyboardInterrupt:\n        print('Stopping...')\n        sys.exit(0)", "docstring": "Starts HTTP server with specified parameters.\n\nArgs:\nhost: Server host name.\nport: Server port.\nprofiler_stats: A dict with collected program stats.\ndont_start_browser: Whether to open browser after profiling.\ndebug_mode: Whether to redirect stderr to /dev/null.", "source": "juraj-google-style"}
{"code": "def get_cmd_out(command):\n\t\n\n\tif isinstance(command, list):\n\t\tresult = sp.check_output(command)\n\telse:\n\t\tresult = sp.check_output(command, shell=True)\n\n\treturn result.decode('utf-8').rstrip()", "docstring": "Get the output of a command.\n\nGets a nice Unicode no-extra-whitespace string of the ``stdout`` of a given command.\n\nArgs:\ncommand (str or list): A string of the command, or a list of the arguments (as would be used in :class:`subprocess.Popen`).\n\nNote:\nIf ``command`` is a ``str``, it will be evaluated with ``shell=True`` i.e. in the default shell (for example, bash).\n\nReturns:\nstr: The ``stdout`` of the command.", "source": "juraj-google-style"}
{"code": "def _kl_normal_normal(n_a, n_b, name=None):\n    with ops.name_scope(name, 'kl_normal_normal', [n_a.loc, n_b.loc]):\n        one = constant_op.constant(1, dtype=n_a.dtype)\n        two = constant_op.constant(2, dtype=n_a.dtype)\n        half = constant_op.constant(0.5, dtype=n_a.dtype)\n        s_a_squared = math_ops.square(n_a.scale)\n        s_b_squared = math_ops.square(n_b.scale)\n        ratio = s_a_squared / s_b_squared\n        return math_ops.squared_difference(n_a.loc, n_b.loc) / (two * s_b_squared) + half * (ratio - one - math_ops.log(ratio))", "docstring": "Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.\n\nArgs:\nn_a: instance of a Normal distribution object.\nn_b: instance of a Normal distribution object.\nname: (optional) Name to use for created operations.\ndefault is \"kl_normal_normal\".\n\nReturns:\nBatchwise KL(n_a || n_b)", "source": "github-repos"}
{"code": "def add_frequency(self, name, value):\n    logger.debug('Adding frequency {0} with value {1} to variant {2}'.format(name, value, self['variant_id']))\n    self['frequencies'].append({'label': name, 'value': value})", "docstring": "Add a frequency that will be displayed on the variant level\n\nArgs:\nname (str): The name of the frequency field", "source": "codesearchnet"}
{"code": "def read_from_hdx(identifier, configuration=None):\n        \n        \n\n        dataset = Dataset(configuration=configuration)\n        result = dataset._dataset_load_from_hdx(identifier)\n        if result:\n            return dataset\n        return None", "docstring": "Reads the dataset given by identifier from HDX and returns Dataset object\n\nArgs:\nidentifier (str): Identifier of dataset\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nOptional[Dataset]: Dataset object if successful read, None if not", "source": "juraj-google-style"}
{"code": "def script(experiment, projects):\n    benchbuild_c = local[local.path(sys.argv[0])]\n    slurm_script = (((local.cwd / experiment.name) + '-') + str(CFG['slurm']['script']))\n    srun = local['srun']\n    srun_args = []\n    if (not CFG['slurm']['multithread']):\n        srun_args.append('--hint=nomultithread')\n    if (not CFG['slurm']['turbo']):\n        srun_args.append('--pstate-turbo=off')\n    srun = srun[srun_args]\n    srun = srun[benchbuild_c['run']]\n    return __save__(slurm_script, srun, experiment, projects)", "docstring": "Prepare a slurm script that executes the experiment for a given project.\n\nArgs:\nexperiment: The experiment we want to execute\nprojects: All projects we generate an array job for.", "source": "codesearchnet"}
{"code": "def setall(self, key, values):\n        \n\n        self.delall(key)\n        for tag in values:\n            self[tag.HashKey] = tag", "docstring": "Delete frames of the given type and add frames in 'values'.\n\nArgs:\nkey (text): key for frames to delete\nvalues (list[Frame]): frames to add", "source": "juraj-google-style"}
{"code": "def select_sites(self, site_labels):\n    if (type(site_labels) in (list, set)):\n        selected_sites = [s for s in self.sites if (s.label in site_labels)]\n    elif (type(site_labels) is str):\n        selected_sites = [s for s in self.sites if (s.label is site_labels)]\n    else:\n        raise ValueError(str(site_labels))\n    return selected_sites", "docstring": "Selects sites in the lattice with specified labels.\n\nArgs:\nsite_labels (List(Str)|Set(Str)|Str): Labels of sites to select.\nThis can be a List [ 'A', 'B' ], a Set ( 'A', 'B' ), or a String 'A'.\n\nReturns:\n(List(Site)): List of sites with labels given by `site_labels`.", "source": "codesearchnet"}
{"code": "def __init__(self, experimental_debug_info_func):\n    super(TFLiteConverterBaseV1, self).__init__()\n    self.inference_type = _dtypes.float32\n    self.inference_input_type = None\n    self.inference_output_type = None\n    self.output_format = constants.TFLITE\n    self.quantized_input_stats = {}\n    self.default_ranges_stats = None\n    self.drop_control_dependency = True\n    self.reorder_across_fake_quant = False\n    self.change_concat_input_ranges = False\n    self.dump_graphviz_dir = None\n    self.dump_graphviz_video = False\n    self.conversion_summary_dir = None\n    self._debug_info_func = experimental_debug_info_func\n    self._metadata.environment.apiVersion = 1", "docstring": "Constructor for TFLiteConverter.\n\nArgs:\nexperimental_debug_info_func: An experimental function to retrieve the\ngraph debug info for a set of nodes from the `graph_def`.", "source": "github-repos"}
{"code": "def _resolve_prefix(self, token):\n    if token in self._handlers:\n        return token\n    elif token in self._alias_to_prefix:\n        return self._alias_to_prefix[token]\n    else:\n        return None", "docstring": "Resolve command prefix from the prefix itself or its alias.\n\nArgs:\ntoken: a str to be resolved.\n\nReturns:\nIf resolvable, the resolved command prefix.\nIf not resolvable, None.", "source": "github-repos"}
{"code": "def _parse_state_value(state, user):\n    \n    uri, token = state.rsplit(':', 1)\n    if xsrfutil.validate_token(xsrf_secret_key(), token, user.user_id(),\n                               action_id=uri):\n        return uri\n    else:\n        return None", "docstring": "Parse the value of the 'state' parameter.\n\nParses the value and validates the XSRF token in the state parameter.\n\nArgs:\nstate: string, The value of the state parameter.\nuser: google.appengine.api.users.User, The current user.\n\nReturns:\nThe redirect URI, or None if XSRF token is not valid.", "source": "juraj-google-style"}
{"code": "def merge_with(x, other):\n    return type(x)(tf.TensorShape(x).merge_with(other))", "docstring": "Returns a shape combining the information in `x` and `other`.\n\nThe dimensions in `x` and `other` are merged elementwise, according to the\nrules defined for `tf.Dimension.merge_with()`.\n\nFor more details, see `help(tf.TensorShape.merge_with)`.\n\nArgs:\nx: object representing a shape; convertible to `tf.TensorShape`.\nother: object representing a shape; convertible to `tf.TensorShape`.\n\nReturns:\nmerged_shape: shape having `type(x)` containing the combined information of\n`x` and `other`.\n\nRaises:\nValueError: If `x` and `other` are not compatible.", "source": "codesearchnet"}
{"code": "def clear(self):\n    self._push_all_models_freeze()\n    try:\n        while (len(self._roots) > 0):\n            r = next(iter(self._roots))\n            self.remove_root(r)\n    finally:\n        self._pop_all_models_freeze()", "docstring": "Remove all content from the document but do not reset title.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def layout(self, dimensions=None, **kwargs):\n        \n        return self.groupby(dimensions, container_type=NdLayout, **kwargs)", "docstring": "Groups data by supplied dimension(s) laying the groups along\nthe dimension(s) out in a NdLayout.\n\nArgs:\ndimensions: Dimension/str or list\nDimension or list of dimensions to group by\n\nReturns:\nlayout: NdLayout\nNdLayout with supplied dimensions", "source": "juraj-google-style"}
{"code": "def _parse_url_and_validate(cls, url):\n        \n        parsed_url = urlparse(url)\n        if parsed_url.scheme and parsed_url.netloc:\n            final_url = parsed_url.geturl()\n        else:\n            raise BadURLException\n        return final_url", "docstring": "Recieves a URL string and validates it using urlparse.\n\nArgs:\nurl: A URL string\nReturns:\nparsed_url: A validated URL\nRaises:\nBadURLException", "source": "juraj-google-style"}
{"code": "def set_default_language(self, language: str):\n    if language not in self.config.languages:\n        raise ValueError(f'{self} does not have an adapter for {language}. Supported languages: {list(self.config.languages)}')\n    self.config.default_language = language", "docstring": "Set the default language code for the model. This is used when the language is not specified in the input.\n\nArgs:\nlanguage (`str`): The language code, such as `\"en_XX\"` or `\"de_DE\"`.", "source": "github-repos"}
{"code": "def List(self, request, global_params=None):\n    config = self.GetMethodConfig('List')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Lists all routines in the specified dataset. Requires the READER dataset role.\n\nArgs:\nrequest: (BigqueryRoutinesListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ListRoutinesResponse) The response message.", "source": "github-repos"}
{"code": "def wait(self, timeout_s: float = None) -> int:\n        \n        if not self.running:\n            return 0\n        retcode = self.process.wait(timeout=timeout_s)\n        \n        if retcode is None:\n            self.error(\"Subprocess finished, but return code was None\")\n            retcode = 1  \n        elif retcode == 0:\n            self.info(\"Subprocess finished cleanly (return code 0).\")\n        else:\n            self.error(\n                \"Subprocess finished, but FAILED (return code {}). \"\n                \"Logs were: {} (stdout), {} (stderr)\".format(\n                    retcode,\n                    self.details.logfile_out,\n                    self.details.logfile_err))\n        self.running = False\n        return retcode", "docstring": "Wait for up to ``timeout_s`` for the child process to finish.\n\nArgs:\ntimeout_s: maximum time to wait or ``None`` to wait forever\n\nReturns:\nprocess return code; or ``0`` if it wasn't running, or ``1`` if\nit managed to exit without a return code\n\nRaises:\nsubprocess.TimeoutExpired: if the process continues to run", "source": "juraj-google-style"}
{"code": "def list_storage_accounts_rg(access_token, subscription_id, rgname):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourcegroups/', rgname,\n                        '/providers/Microsoft.Storage/storageAccounts',\n                        '?api-version=', STORAGE_API])\n    return do_get(endpoint, access_token)", "docstring": "List the storage accounts in the specified resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\n\nReturns:\nHTTP response. JSON body list of storage accounts.", "source": "juraj-google-style"}
{"code": "def get_environ(cls, prefix):\n        \n        return ((key[len(prefix) + 1:], value)\n                for key, value in os.environ.items()\n                if key.startswith('%s_' % prefix))", "docstring": "Retrieves environment variables from a namespace.\n\nArgs:\nprefix (str): The prefix, without a trailing underscore.\n\nReturns:\nlist: A list of environment variable keys and values.", "source": "juraj-google-style"}
{"code": "def abs(x):\n    return math_ops.abs(x)", "docstring": "Element-wise absolute value.\n\nArgs:\nx: Tensor or variable.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def verify_ed25519_signature(public_key, contents, signature, message):\n    \n    try:\n        public_key.verify(signature, contents)\n    except InvalidSignature as exc:\n        raise ScriptWorkerEd25519Error(message % {'exc': str(exc)})", "docstring": "Verify that ``signature`` comes from ``public_key`` and ``contents``.\n\nArgs:\npublic_key (Ed25519PublicKey): the key to verify the signature\ncontents (bytes): the contents that was signed\nsignature (bytes): the signature to verify\nmessage (str): the error message to raise.\n\nRaises:\nScriptWorkerEd25519Error: on failure", "source": "juraj-google-style"}
{"code": "def _set_scripts(self, host_metadata, scripts):\n    scripts_key = 'deploy-scripts'\n    if ('ovirt-scritps' in host_metadata):\n        scripts_key = 'ovirt-scripts'\n    host_metadata[scripts_key] = scripts\n    return host_metadata", "docstring": "Temporary method to set the host scripts\n\nTODO:\nremove once the \"ovirt-scripts\" option gets deprecated\n\nArgs:\nhost_metadata(dict): host metadata to set scripts in\n\nReturns:\ndict: the updated metadata", "source": "codesearchnet"}
{"code": "def hamming_distance(str1, str2):\n    if (len(str1) != len(str2)):\n        raise VisualizationError('Strings not same length.')\n    return sum(((s1 != s2) for (s1, s2) in zip(str1, str2)))", "docstring": "Calculate the Hamming distance between two bit strings\n\nArgs:\nstr1 (str): First string.\nstr2 (str): Second string.\nReturns:\nint: Distance between strings.\nRaises:\nVisualizationError: Strings not same length", "source": "codesearchnet"}
{"code": "def copy(self, src, dst, other_system=None):\n        \n        copy_source = self.get_client_kwargs(src)\n        copy_destination = self.get_client_kwargs(dst)\n        with _handle_oss_error():\n            bucket = self._get_bucket(copy_destination)\n            bucket.copy_object(\n                source_bucket_name=copy_source['bucket_name'],\n                source_key=copy_source['key'],\n                target_key=copy_destination['key'])", "docstring": "Copy object of the same storage.\n\nArgs:\nsrc (str): Path or URL.\ndst (str): Path or URL.\nother_system (pycosio._core.io_system.SystemBase subclass): Unused.", "source": "juraj-google-style"}
{"code": "def token_to_id(self, token):\n        \n        token = self.process_token(token)\n        return self._token2id.get(token, len(self._token2id) - 1)", "docstring": "Get the token_id of given token.\n\nArgs:\ntoken (str): token from vocabulary.\n\nReturns:\nint: int id of token.", "source": "juraj-google-style"}
{"code": "def find_config(test_file=None, defaults=None, root=os.curdir):\n    if (defaults is None):\n        defaults = ['.benchbuild.yml', '.benchbuild.yaml']\n\n    def walk_rec(cur_path, root):\n        cur_path = (local.path(root) / test_file)\n        if cur_path.exists():\n            return cur_path\n        new_root = (local.path(root) / os.pardir)\n        return (walk_rec(cur_path, new_root) if (new_root != root) else None)\n    if (test_file is not None):\n        return walk_rec(test_file, root)\n    for test_file in defaults:\n        ret = walk_rec(test_file, root)\n        if (ret is not None):\n            return ret", "docstring": "Find the path to the default config file.\n\nWe look at :root: for the :default: config file. If we can't find it\nthere we start looking at the parent directory recursively until we\nfind a file named :default: and return the absolute path to it.\nIf we can't find anything, we return None.\n\nArgs:\ndefault: The name of the config file we look for.\nroot: The directory to start looking for.\n\nReturns:\nPath to the default config file, None if we can't find anything.", "source": "codesearchnet"}
{"code": "def draw_line(self, x1, y1, x2, y2, color):\n        \n        check_int_err(lib.lineRGBA(self._ptr, x1, y1, x2, y2, color[0], color[1], color[2], color[3]))", "docstring": "Draw a line.\n\nArgs:\nx1 (int): The x coordinate of the start of the line.\ny1 (int): The y coordinate of the start of the line.\nx2 (int): The x coordinate of the end of the line.\ny2 (int): The y coordinate of the end of the line.\ncolor (Tuple[int, int, int, int]): The color of the circle.\n\nRaises:\nSDLError: If an error is encountered.", "source": "juraj-google-style"}
{"code": "def db004(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `db004`'.format(value))\n\n        self._db004 = value", "docstring": "Corresponds to IDD Field `db004`\nDry-bulb temperature corresponding to 0.4% annual cumulative frequency of occurrence (warm conditions)\n\nArgs:\nvalue (float): value for IDD Field `db004`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def scatter_add(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta)\n    return gen_state_ops.scatter_add(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)", "docstring": "Adds `tf.IndexedSlices` to this variable.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to be added to this variable.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nA `Tensor` that will hold the new value of this variable after\nthe scattered addition has completed.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n    output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    if token_ids_1 is not None:\n        output += token_ids_1 + [self.sep_token_id]\n    return output", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A BERT sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def StoreRequestsAndResponses(self,\n                                new_requests=None,\n                                new_responses=None,\n                                requests_to_delete=None):\n    \n    to_write = {}\n    if new_requests is not None:\n      for request, timestamp in new_requests:\n        subject = request.session_id.Add(\"state\")\n        queue = to_write.setdefault(subject, {})\n        queue.setdefault(self.FLOW_REQUEST_TEMPLATE % request.id, []).append(\n            (request.SerializeToString(), timestamp))\n\n    if new_responses is not None:\n      for response, timestamp in new_responses:\n        \n        \n        \n        if response.type == rdf_flows.GrrMessage.Type.STATUS:\n          subject = response.session_id.Add(\"state\")\n          attribute = self.FLOW_STATUS_TEMPLATE % response.request_id\n          to_write.setdefault(subject, {}).setdefault(attribute, []).append(\n              (response.SerializeToString(), timestamp))\n\n        subject = self.GetFlowResponseSubject(response.session_id,\n                                              response.request_id)\n        attribute = self.FLOW_RESPONSE_TEMPLATE % (response.request_id,\n                                                   response.response_id)\n        to_write.setdefault(subject, {}).setdefault(attribute, []).append(\n            (response.SerializeToString(), timestamp))\n\n    to_delete = {}\n    if requests_to_delete is not None:\n      for request in requests_to_delete:\n        queue = to_delete.setdefault(request.session_id.Add(\"state\"), [])\n        queue.append(self.FLOW_REQUEST_TEMPLATE % request.id)\n        queue.append(self.FLOW_STATUS_TEMPLATE % request.id)\n\n    for subject in set(to_write) | set(to_delete):\n      self.MultiSet(\n          subject,\n          to_write.get(subject, {}),\n          to_delete=to_delete.get(subject, []),\n          sync=True)", "docstring": "Stores new flow requests and responses to the data store.\n\nArgs:\nnew_requests: A list of tuples (request, timestamp) to store in the data\nstore.\nnew_responses: A list of tuples (response, timestamp) to store in the data\nstore.\nrequests_to_delete: A list of requests that should be deleted from the\ndata store.", "source": "juraj-google-style"}
{"code": "def from_snl(cls, snl):\n    hist = []\n    for h in snl.history:\n        d = h.description\n        d['_snl'] = {'url': h.url, 'name': h.name}\n        hist.append(d)\n    return cls(snl.structure, history=hist)", "docstring": "Create TransformedStructure from SNL.\n\nArgs:\nsnl (StructureNL): Starting snl\n\nReturns:\nTransformedStructure", "source": "codesearchnet"}
{"code": "def buckets_insert(self, bucket, project_id=None):\n    args = {'project': (project_id if project_id else self._project_id)}\n    data = {'name': bucket}\n    url = (Api._ENDPOINT + (Api._BUCKET_PATH % ''))\n    return datalab.utils.Http.request(url, args=args, data=data, credentials=self._credentials)", "docstring": "Issues a request to create a new bucket.\n\nArgs:\nbucket: the name of the bucket.\nproject_id: the project to use when inserting the bucket.\nReturns:\nA parsed bucket information dictionary.\nRaises:\nException if there is an error performing the operation.", "source": "codesearchnet"}
{"code": "def __init__(self, input_reader=None, output_writer=None):\n    \n    super(CLITool, self).__init__()\n\n    preferred_encoding = locale.getpreferredencoding()\n    if not preferred_encoding:\n      preferred_encoding = self._PREFERRED_ENCODING\n    elif isinstance(preferred_encoding, py2to3.BYTES_TYPE):\n      preferred_encoding = preferred_encoding.decode('utf-8')\n\n    if not input_reader:\n      input_reader = StdinInputReader(encoding=preferred_encoding)\n    if not output_writer:\n      output_writer = StdoutOutputWriter(encoding=preferred_encoding)\n\n    self._data_location = None\n    self._debug_mode = False\n    self._encode_errors = 'strict'\n    self._input_reader = input_reader\n    self._log_file = None\n    self._output_writer = output_writer\n    self._preferred_time_zone = None\n    self._quiet_mode = False\n    self._views_format_type = views.ViewsFactory.FORMAT_TYPE_CLI\n\n    self.list_timezones = False\n    self.preferred_encoding = preferred_encoding", "docstring": "Initializes a command line interface tool.\n\nArgs:\ninput_reader (Optional[CLIInputReader]): input reader, where None\nindicates that the stdin input reader should be used.\noutput_writer (Optional[CLIOutputWriter]): output writer, where None\nindicates that the stdout output writer should be used.", "source": "juraj-google-style"}
{"code": "def sync_results(vcs, signature):\n    \n    results_directory = _get_results_directory(vcs, signature)\n    if not os.path.exists(results_directory):\n        raise ResultsNotFoundError\n    with open(os.path.join(results_directory, 'patterns'), 'r') as f:\n        patterns = f.read().strip().split()\n    includes = ['--include={}'.format(x)\n                for x in patterns]\n    cmd = ['rsync', '-r'] + includes + ['--exclude=*',\n                                        os.path.join(\n                                            results_directory, 'results', ''),\n                                        os.path.join(vcs.path, '')]\n    subprocess.check_call(cmd)", "docstring": "Sync the saved results for `signature` back to the project.\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\nsignature (str)\nRaises:\nResultsNotFoundError", "source": "juraj-google-style"}
{"code": "def find_triggers(\n    nodes,\n    trigger_words,\n    nosec_lines\n):\n    \n    trigger_nodes = list()\n    for node in nodes:\n        if node.line_number not in nosec_lines:\n            trigger_nodes.extend(iter(label_contains(node, trigger_words)))\n    return trigger_nodes", "docstring": "Find triggers from the trigger_word_list in the nodes.\n\nArgs:\nnodes(list[Node]): the nodes to find triggers in.\ntrigger_word_list(list[Union[Sink, Source]]): list of trigger words to look for.\nnosec_lines(set): lines with # nosec whitelisting\n\nReturns:\nList of found TriggerNodes", "source": "juraj-google-style"}
{"code": "def laid_out_slice_num(self, tensor_shape):\n    ret = self.slicewise((lambda : tf.to_int32(0)))\n    tensor_layout = self.tensor_layout(tensor_shape)\n    for mesh_axis in tensor_layout.tensor_axis_to_mesh_axis:\n        if (mesh_axis is not None):\n\n            def my_fn(x, pcoord, mesh_dim_size):\n                return ((x * mesh_dim_size) + pcoord)\n            ret = self.slicewise(my_fn, ret, self.laid_out_pcoord(mesh_axis), self.shape[mesh_axis].size)\n    return ret", "docstring": "A LaidOutTensor with an int32 scalar, identical for identical slices.\n\nThis is useful for synchronizing random operations.\n\nArgs:\ntensor_shape: a TensorShape\nReturns:\na LaidOutTensor where each slice is an integer scalar.", "source": "codesearchnet"}
{"code": "def get_cmd_out(command):\n    if isinstance(command, list):\n        result = sp.check_output(command)\n    else:\n        result = sp.check_output(command, shell=True)\n    return result.decode('utf-8').rstrip()", "docstring": "Get the output of a command.\n\nGets a nice Unicode no-extra-whitespace string of the ``stdout`` of a given command.\n\nArgs:\ncommand (str or list): A string of the command, or a list of the arguments (as would be used in :class:`subprocess.Popen`).\n\nNote:\nIf ``command`` is a ``str``, it will be evaluated with ``shell=True`` i.e. in the default shell (for example, bash).\n\nReturns:\nstr: The ``stdout`` of the command.", "source": "codesearchnet"}
{"code": "def infer_types(source, options):\n    with io.wrap_pytype_exceptions(PytypeError, filename=options.input):\n        return traces.trace(source, options)", "docstring": "Infer types for the provided source.\n\nArgs:\nsource: Text, the source code to analyze.\noptions: pytype.config.Options, the options to pass onto Pytype.\n\nReturns:\nsource.Code object with information gathered by Pytype.", "source": "github-repos"}
{"code": "def to_dense_one_hot(labels, class_count):\n    if (not isinstance(class_count, tf.compat.integral_types)):\n        raise TypeError('class_count must be an integer type.')\n    if (labels.dtype.base_dtype not in (tf.int32, tf.int64)):\n        raise TypeError(('Labels must be an integer: %s' % labels.dtype))\n    if (labels.get_shape().ndims != 1):\n        raise ValueError(('Labels must be a rank 1 tensor: %s' % labels.get_shape()))\n    dtype = labels.dtype.base_dtype\n    class_tensor = tf.convert_to_tensor(class_count, dtype=dtype, name='class_count')\n    batch = tf.gather(tf.shape(labels), 0)\n    count = tf.expand_dims(tf.range(0, limit=batch), 1)\n    labels = tf.expand_dims(labels, 1)\n    batch = tf.gather(tf.shape(labels), 0)\n    if (dtype != tf.int32):\n        count = tf.cast(count, dtype)\n        batch = tf.cast(batch, dtype)\n    result = tf.sparse_to_dense(tf.concat([count, labels], 1), tf.concat([tf.expand_dims(batch, 0), tf.expand_dims(class_tensor, 0)], 0), 1.0, 0.0)\n    result.set_shape([labels.get_shape().dims[0], class_count])\n    return result", "docstring": "Converts a vector that specified one-hot per batch into a dense version.\n\nArgs:\nlabels: The labels input.\nclass_count: The number of classes as an int.\nReturns:\nOne dense vector for each item in the batch.\nRaises:\nValueError: If labels is not rank 1.\nTypeError: If class_count is not an integer or labels is not an integer\nTensor.", "source": "codesearchnet"}
{"code": "def save_source(driver, name):\n    source = driver.page_source\n    file_name = os.path.join(os.environ.get('SAVED_SOURCE_DIR'), '{name}.html'.format(name=name))\n    try:\n        with open(file_name, 'wb') as output_file:\n            output_file.write(source.encode('utf-8'))\n    except Exception:\n        msg = u'Could not save the browser page source to {}.'.format(file_name)\n        LOGGER.warning(msg)", "docstring": "Save the rendered HTML of the browser.\n\nThe location of the source can be configured\nby the environment variable `SAVED_SOURCE_DIR`.  If not set,\nthis defaults to the current working directory.\n\nArgs:\ndriver (selenium.webdriver): The Selenium-controlled browser.\nname (str): A name to use in the output file name.\nNote that \".html\" is appended automatically\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def ModuleHelp(self, module):\n    helplist = []\n    self.__RenderOurModuleKeyFlags(module, helplist)\n    return '\\n'.join(helplist)", "docstring": "Describe the key flags of a module.\n\nArgs:\nmodule: A module object or a module name (a string).\n\nReturns:\nstring describing the key flags of a module.", "source": "codesearchnet"}
{"code": "def runcoro(async_function):\n    \n\n    future = _asyncio.run_coroutine_threadsafe(async_function, client.loop)\n    result = future.result()\n    return result", "docstring": "Runs an asynchronous function without needing to use await - useful for lambda\n\nArgs:\nasync_function (Coroutine): The asynchronous function to run", "source": "juraj-google-style"}
{"code": "def get_table(self, project_id, dataset_id, table_id):\n    request = bigquery.BigqueryTablesGetRequest(projectId=project_id, datasetId=dataset_id, tableId=table_id)\n    response = self.client.tables.Get(request)\n    return response", "docstring": "Lookup a table's metadata object.\n\nArgs:\nclient: bigquery.BigqueryV2 instance\nproject_id: table lookup parameter\ndataset_id: table lookup parameter\ntable_id: table lookup parameter\n\nReturns:\nbigquery.Table instance\nRaises:\nHttpError: if lookup failed.", "source": "github-repos"}
{"code": "def parse_node(self, node):\n        \n        spec = super(CamundaProcessParser, self).parse_node(node)\n        spec.data = self._parse_input_data(node)\n        spec.data['lane_data'] = self._get_lane_properties(node)\n        spec.defines = spec.data\n        service_class = node.get(full_attr('assignee'))\n        if service_class:\n            self.parsed_nodes[node.get('id')].service_class = node.get(full_attr('assignee'))\n        return spec", "docstring": "Overrides ProcessParser.parse_node\nParses and attaches the inputOutput tags that created by Camunda Modeller\n\nArgs:\nnode: xml task node\nReturns:\nTaskSpec", "source": "juraj-google-style"}
{"code": "def flaskify(response, headers=None, encoder=None):\n    status_code = response.status\n    data = (response.errors or response.message)\n    mimetype = 'text/plain'\n    if (isinstance(data, list) or isinstance(data, dict)):\n        mimetype = 'application/json'\n        data = json.dumps(data, cls=encoder)\n    return flask.Response(response=data, status=status_code, headers=headers, mimetype=mimetype)", "docstring": "Format the response to be consumeable by flask.\n\nThe api returns mostly JSON responses. The format method converts the dicts\ninto a json object (as a string), and the right response is returned (with\nthe valid mimetype, charset and status.)\n\nArgs:\nresponse (Response): The dictionary object to convert into a json\nobject. If the value is a string, a dictionary is created with the\nkey \"message\".\nheaders (dict): optional headers for the flask response.\nencoder (Class): The class of the encoder (if any).\n\nReturns:\nflask.Response: The flask response with formatted data, headers, and\nmimetype.", "source": "codesearchnet"}
{"code": "def conv_elems_1d(x, factor, out_depth=None):\n    out_depth = (out_depth or x.get_shape().as_list()[(- 1)])\n    x = tf.expand_dims(x, 1)\n    x = layers().Conv2D(filters=out_depth, kernel_size=(1, factor), strides=(1, factor), padding='valid', data_format='channels_last')(x)\n    x = tf.squeeze(x, 1)\n    return x", "docstring": "Decrease the length and change the dimensionality.\n\nMerge/restore/compress factors positions of dim depth of the input into\na single position of dim out_depth.\nThis is basically just a strided convolution without overlap\nbetween each strides. The original length has to be divided by factor.\n\nArgs:\nx (tf.Tensor): shape [batch_size, length, depth]\nfactor (int): Length compression factor.\nout_depth (int): Output depth\n\nReturns:\ntf.Tensor: shape [batch_size, length//factor, out_depth]", "source": "codesearchnet"}
{"code": "def tpu_device_ordinal_at_coordinates(self, device_coordinates):\n    return self._topology_devices[tuple(device_coordinates)]", "docstring": "Returns the TensorFlow device number at `device_coordinates`.\n\nArgs:\ndevice_coordinates: An integer sequence describing a device's physical\ncoordinates in the TPU fabric.\n\nReturns:\nReturns the TensorFlow device number within the task corresponding to\nattached to the device with those physical coordinates.", "source": "github-repos"}
{"code": "def __init__(self, config=None):\n        \n        self.http = urllib3.PoolManager()\n        self.serving_port = 8080\n        self.config = config\n        self.serving_port = get_config_value('local.serving_port', config) or 8080", "docstring": "Initializes a LocalSageMakerRuntimeClient\n\nArgs:\nconfig (dict): Optional configuration for this client. In particular only\nthe local port is read.", "source": "juraj-google-style"}
{"code": "def _stop_profiler(self, save=True):\n    if not self._profiler_started:\n        return\n    try:\n        profiler.stop(save=save)\n    except errors.UnavailableError as e:\n        logging.error('Failed to stop profiler: %s', e.message)\n    finally:\n        self._profiler_started = False", "docstring": "Stops the profiler if currently active.\n\nArgs:\nsave: Whether to save the profiler results to TensorBoard.", "source": "github-repos"}
{"code": "def __init__(self, year, month, day_of_month):\n    \n    super(DateTimeEpoch, self).__init__()\n    self.day_of_month = day_of_month\n    self.month = month\n    self.year = year", "docstring": "Initializes a date time epoch.\n\nArgs:\nyear (int): year that is the start of the epoch e.g. 1970.\nmonth (int): month that is the start of the epoch, where 1 represents\nJanuary.\nday_of_month (int): day of the month that is the start of the epoch,\nwhere 1 represents the first day.", "source": "juraj-google-style"}
{"code": "def Process(self, parser_mediator, plist_name, top_level, **kwargs):\n    \n    super(MacUserPlugin, self).Process(\n        parser_mediator, plist_name=self.PLIST_PATH, top_level=top_level)", "docstring": "Check if it is a valid MacOS system  account plist file name.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nplist_name (str): name of the plist.\ntop_level (dict[str, object]): plist top-level key.", "source": "juraj-google-style"}
{"code": "def to_dict(self):\n    entity_dict = {}\n    for (field, val) in six.iteritems(self._fields):\n        if field.multiple:\n            if val:\n                val = [_dictify(field, x) for x in val]\n            else:\n                val = []\n        else:\n            val = _dictify(field, val)\n        if ((val is not None) and (val != [])):\n            entity_dict[field.key_name] = val\n    self._finalize_dict(entity_dict)\n    return entity_dict", "docstring": "Convert to a ``dict``\n\nSubclasses can override this function.\n\nReturns:\nPython dict with keys set from this Entity.", "source": "codesearchnet"}
{"code": "def get_conf(conf, sect, opt):\n    argu = getattr(args, ('mambupy_' + opt.lower()))\n    if (not argu):\n        envir = os.environ.get(('MAMBUPY_' + opt.upper()))\n        if (not envir):\n            try:\n                return conf.get(sect, opt)\n            except NoSectionError:\n                return default_configs[opt]\n        return envir\n    return argu", "docstring": "Gets a config 'opt' from 'conf' file, under section 'sect'.\n\nIf no 'opt' exists under 'sect', it looks for option on the default_configs\ndictionary\n\nIf there exists an environmental variable named MAMBUPY_{upper_case_opt},\nit overrides whatever the conf files or default_configs dict says.\n\nBut if you send a command line argument named mambupy_{lower_case_opt},\nit overrides anything else.\n\nArgs:\nconf (ConfigParser): ConfigParser that reads from certain config file (INI\nformat)\nsect (string): section under the config file\nopt (string): option to read\n\nReturns:\nstring: configuration option. If not found on conf, returns a value from\ndefault_configs dict. If environmental variable exists with name\nMAMBUPY_{upper_case_opt} it overrides anything else", "source": "codesearchnet"}
{"code": "def _validate_config(config):\n    required_keys = [KEY_ADDRESS, KEY_MODEL, KEY_PORT, KEY_PATHS]\n    for key in required_keys:\n        if (key not in config):\n            raise Error('Required key %s missing from config %s', (key, config))", "docstring": "Verifies that a config dict for an attenuator device is valid.\n\nArgs:\nconfig: A dict that is the configuration for an attenuator device.\n\nRaises:\nattenuator.Error: A config is not valid.", "source": "codesearchnet"}
{"code": "def _prepare_resource_chunks(self, resources, resource_delim=','):\n    return [self._prepare_resource_chunk(resources, resource_delim, pos) for pos in range(0, len(resources), self._resources_per_req)]", "docstring": "As in some VirusTotal API methods the call can be made for multiple\nresources at once this method prepares a list of concatenated resources\naccording to the maximum number of resources per requests.\n\nArgs:\nresources: a list of the resources.\nresource_delim: a string used to separate the resources.\nDefault value is a comma.\nReturns:\nA list of the concatenated resources.", "source": "codesearchnet"}
{"code": "def get_drift_corrected_structures(self, start=None, stop=None, step=None):\n        \n        coords = np.array(self.structure.cart_coords)\n        species = self.structure.species_and_occu\n        lattices = self.lattices\n        nsites, nsteps, dim = self.corrected_displacements.shape\n\n        for i in range(start or 0, stop or nsteps, step or 1):\n            latt = lattices[0] if len(lattices) == 1 else lattices[i]\n            yield Structure(\n                latt, species,\n                coords + self.corrected_displacements[:, i, :],\n                coords_are_cartesian=True)", "docstring": "Returns an iterator for the drift-corrected structures. Use of\niterator is to reduce memory usage as # of structures in MD can be\nhuge. You don't often need all the structures all at once.\n\nArgs:\nstart, stop, step (int): applies a start/stop/step to the iterator.\nFaster than applying it after generation, as it reduces the\nnumber of structures created.", "source": "juraj-google-style"}
{"code": "def find(cls, session, resource_id, include=None):\n        \n        url = session._build_url(cls._resource_path(), resource_id)\n        params = build_request_include(include, None)\n        process = cls._mk_one(session, include=include)\n        return session.get(url, CB.json(200, process), params=params)", "docstring": "Retrieve a single resource.\n\nThis should only be called from sub-classes.\n\nArgs:\n\nsession(Session): The session to find the resource in\n\nresource_id: The ``id`` for the resource to look up\n\nKeyword Args:\n\ninclude: Resource classes to include\n\nReturns:\n\nResource: An instance of a resource, or throws a\n:class:`NotFoundError` if the resource can not be found.", "source": "juraj-google-style"}
{"code": "def read(self, filename, binary_mode=False, size=None, offset=None):\n        \n        s3 = boto3.resource(\"s3\")\n        bucket, path = self.bucket_and_path(filename)\n        args = {}\n        endpoint = 0\n        if size is not None or offset is not None:\n            if offset is None:\n                offset = 0\n            endpoint = '' if size is None else (offset + size)\n            args['Range'] = 'bytes={}-{}'.format(offset, endpoint)\n        try:\n            stream = s3.Object(bucket, path).get(**args)['Body'].read()\n        except botocore.exceptions.ClientError as exc:\n            if exc.response['Error']['Code'] == '416':\n                if size is not None:\n                    \n                    \n                    client = boto3.client(\"s3\")\n                    obj = client.head_object(Bucket=bucket, Key=path)\n                    len = obj['ContentLength']\n                    endpoint = min(len, offset + size)\n                if offset == endpoint:\n                    \n                    stream = b''\n                else:\n                    args['Range'] = 'bytes={}-{}'.format(offset, endpoint)\n                    stream = s3.Object(bucket, path).get(**args)['Body'].read()\n            else:\n                raise\n        if binary_mode:\n            return bytes(stream)\n        else:\n            return stream.decode('utf-8')", "docstring": "Reads contents of a file to a string.\n\nArgs:\nfilename: string, a path\nbinary_mode: bool, read as binary if True, otherwise text\nsize: int, number of bytes or characters to read, otherwise\nread all the contents of the file from the offset\noffset: int, offset into file to read from, otherwise read\nfrom the very beginning\n\nReturns:\nSubset of the contents of the file as a string or bytes.", "source": "juraj-google-style"}
{"code": "def get_all_boards(*args, **kwargs):\n    https = kwargs.get('https', (args[1] if (len(args) > 1) else False))\n    url_generator = Url(None, https)\n    _fetch_boards_metadata(url_generator)\n    return get_boards(_metadata.keys(), *args, **kwargs)", "docstring": "Returns every board on 4chan.\n\nReturns:\ndict of :class:`basc_py4chan.Board`: All boards.", "source": "codesearchnet"}
{"code": "def logs(self, container, stdout=True, stderr=True, stream=False, timestamps=False, tail='all', since=None, follow=None, until=None):\n    if (follow is None):\n        follow = stream\n    params = {'stderr': ((stderr and 1) or 0), 'stdout': ((stdout and 1) or 0), 'timestamps': ((timestamps and 1) or 0), 'follow': ((follow and 1) or 0)}\n    if ((tail != 'all') and ((not isinstance(tail, int)) or (tail < 0))):\n        tail = 'all'\n    params['tail'] = tail\n    if (since is not None):\n        if isinstance(since, datetime):\n            params['since'] = utils.datetime_to_timestamp(since)\n        elif (isinstance(since, int) and (since > 0)):\n            params['since'] = since\n        else:\n            raise errors.InvalidArgument('since value should be datetime or positive int, not {}'.format(type(since)))\n    if (until is not None):\n        if utils.version_lt(self._version, '1.35'):\n            raise errors.InvalidVersion('until is not supported for API version < 1.35')\n        if isinstance(until, datetime):\n            params['until'] = utils.datetime_to_timestamp(until)\n        elif (isinstance(until, int) and (until > 0)):\n            params['until'] = until\n        else:\n            raise errors.InvalidArgument('until value should be datetime or positive int, not {}'.format(type(until)))\n    url = self._url('/containers/{0}/logs', container)\n    res = self._get(url, params=params, stream=stream)\n    output = self._get_result(container, stream, res)\n    if stream:\n        return CancellableStream(output, res)\n    else:\n        return output", "docstring": "Get logs from a container. Similar to the ``docker logs`` command.\n\nThe ``stream`` parameter makes the ``logs`` function return a blocking\ngenerator you can iterate over to retrieve log output as it happens.\n\nArgs:\ncontainer (str): The container to get logs from\nstdout (bool): Get ``STDOUT``. Default ``True``\nstderr (bool): Get ``STDERR``. Default ``True``\nstream (bool): Stream the response. Default ``False``\ntimestamps (bool): Show timestamps. Default ``False``\ntail (str or int): Output specified number of lines at the end of\nlogs. Either an integer of number of lines or the string\n``all``. Default ``all``\nsince (datetime or int): Show logs since a given datetime or\ninteger epoch (in seconds)\nfollow (bool): Follow log output. Default ``False``\nuntil (datetime or int): Show logs that occurred before the given\ndatetime or integer epoch (in seconds)\n\nReturns:\n(generator or str)\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def days_in_leap_years_between(start_date, end_date):\n\n    def days_in_leap_years_since_1jan0001(date):\n        prev_year = date.year() - 1\n        leap_years_before = prev_year \n        n_leap_days = leap_years_before * 366\n        days_in_cur_year = date.day_of_year() - 1\n        n_leap_days += tf.where(is_leap_year(date.year()), days_in_cur_year, 0)\n        return n_leap_days\n    return days_in_leap_years_since_1jan0001(end_date) - days_in_leap_years_since_1jan0001(start_date)", "docstring": "Calculates number of days between two dates that fall on leap years.\n\n'start_date' is included and 'end_date' is excluded from the period.\n\nFor example, for dates `2019-12-24` and `2024-2-10` the result is\n406: 366 days in 2020, 31 in Jan 2024 and 9 in Feb 2024.\n\nIf `end_date` is earlier than `start_date`, the result will be negative or\nzero.\n\nArgs:\nstart_date: DateTensor.\nend_date: DateTensor compatible with `start_date`.\n\nReturns:\nTensor of type 'int32'.", "source": "github-repos"}
{"code": "def normalize_url(url):\n    uri = urlparse(url)\n    query = (uri.query or '')\n    pairs = parse_qsl(query)\n    decoded_pairs = [(unquote(key), value) for (key, value) in pairs]\n    encoded_pairs = [(quote(key), value) for (key, value) in decoded_pairs]\n    normalized_query = urlencode(encoded_pairs)\n    return ParseResult(scheme=uri.scheme, netloc=uri.netloc, path=uri.path, params=uri.params, query=normalized_query, fragment=uri.fragment).geturl()", "docstring": "Returns the given URL with all query keys properly escaped.\n\nArgs:\nurl (str): The URL to normalize.\n\nReturns:\nstr: The normalized URL.", "source": "codesearchnet"}
{"code": "def _data_to_json(data):\n    if (type(data) not in [str, unicode]):\n        data = json.dumps(data)\n    return data", "docstring": "Convert to json if it isn't already a string.\n\nArgs:\ndata (str): data to convert to json", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]:\n    out = []\n    for i, hidden_state in enumerate(hidden_states):\n        hidden_state = hidden_state[:, 1:]\n        batch_size, _, num_channels = hidden_state.shape\n        hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels)\n        hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()\n        hidden_state = self.layers[i](hidden_state)\n        out.append(hidden_state)\n    return out", "docstring": "Args:\nhidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`):\nList of hidden states from the backbone.", "source": "github-repos"}
{"code": "def content_matchs(tag_content, content_transformer=None):\n    \n    def content_matchs_closure(element):\n        if not element.isTag():\n            return False\n\n        cont = element.getContent()\n        if content_transformer:\n            cont = content_transformer(cont)\n\n        return tag_content == cont\n\n    return content_matchs_closure", "docstring": "Generate function, which checks whether the content of the tag matchs\n`tag_content`.\n\nArgs:\ntag_content (str): Content of the tag which will be matched thru whole\nDOM.\ncontent_transformer (fn, default None): Function used to transform all\ntags before matching.\n\nThis function can be used as parameter for .find() method in HTMLElement.", "source": "juraj-google-style"}
{"code": "def __init__(self, retry_definition):\n        \n        logger.debug(\"starting\")\n\n        if isinstance(retry_definition, dict):\n            \n            self.max = retry_definition.get('max', None)\n\n            \n            self.sleep = retry_definition.get('sleep', 0)\n\n            \n            self.stop_on = retry_definition.get('stopOn', None)\n\n            \n            self.retry_on = retry_definition.get('retryOn', None)\n        else:\n            \n            logger.error(f\"retry decorator definition incorrect.\")\n            raise PipelineDefinitionError(\"retry decorator must be a dict \"\n                                          \"(i.e a map) type.\")\n\n        logger.debug(\"done\")", "docstring": "Initialize the class. No duh, huh.\n\nYou can happily expect the initializer to initialize all\nmember attributes.\n\nArgs:\nretry_definition: dict. This is the actual retry definition as it\nexists in the pipeline yaml.", "source": "juraj-google-style"}
{"code": "def run_plugins(context_obj, boto3_clients):\n\n    def print_if_verbose(message):\n        if context_obj.verbose:\n            print(message)\n    service_name = os.path.basename(sys.argv[0]).replace('.py', '')\n    try:\n        import plugins\n    except ImportError:\n        print_if_verbose('no plugins detected.')\n        return\n    else:\n        for (plugin_importer, plugin_name, plugin_ispkg) in pkgutil.iter_modules(plugins.__path__):\n            if plugin_ispkg:\n                plugin_package = importlib.import_module('plugins.{}'.format(plugin_name))\n                for (importer, modname, ispkg) in pkgutil.iter_modules(plugin_package.__path__):\n                    plugin_module = importlib.import_module('plugins.{}.{}'.format(plugin_name, modname))\n                    for (name, obj) in inspect.getmembers(plugin_module):\n                        if (inspect.isclass(obj) and (obj.__name__ == 'EFPlugin')):\n                            plugin_class = getattr(plugin_module, name)\n                            plugin_instance = plugin_class(context=context_obj, clients=boto3_clients)\n                            if (plugin_instance.service == service_name):\n                                print_if_verbose(\"plugin '{}' loaded\".format(plugin_name))\n                                if (not context_obj.commit):\n                                    print_if_verbose('dryrun: skipping plugin execution.')\n                                else:\n                                    try:\n                                        plugin_instance.run()\n                                    except AttributeError:\n                                        print(\"error executing plugin '{}'\".format(modname))", "docstring": "Executes all loaded plugins designated for the service calling the function.\n\nArgs:\ncontext_obj (obj:EFContext): The EFContext object created by the service.\nboto3_clients (dict): Dictionary of boto3 clients created by ef_utils.create_aws_clients()", "source": "codesearchnet"}
{"code": "def _ParseFileEntry(self, knowledge_base, file_entry):\n    \n    file_object = file_entry.GetFileObject()\n    try:\n      self._ParseFileData(knowledge_base, file_object)\n    finally:\n      file_object.close()", "docstring": "Parses a file entry for a preprocessing attribute.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nfile_entry (dfvfs.FileEntry): file entry that contains the artifact\nvalue data.\n\nRaises:\nPreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def add(self, private_key):\n    if (not isinstance(private_key, PaillierPrivateKey)):\n        raise TypeError(('private_key should be of type PaillierPrivateKey, not %s' % type(private_key)))\n    self.__keyring[private_key.public_key] = private_key", "docstring": "Add a key to the keyring.\n\nArgs:\nprivate_key (PaillierPrivateKey): a key to add to this keyring.", "source": "codesearchnet"}
{"code": "def parse_results_mol2(mol2_outpath):\n    \n    docked_ligands = pd.DataFrame()\n\n    lines = [line.strip() for line in open(mol2_outpath, 'r')]\n    props = {}\n\n    for i, line in enumerate(lines):\n        if line.startswith('\n            ligand = line.strip().strip('\n            line = lines[i + 1]\n            props = {}\n            props['Ligand'] = ligand\n        if line.startswith('\n            splitter = line.strip().strip('\n            props[splitter[0]] = float(splitter[1])\n        if line.startswith('@<TRIPOS>MOLECULE'):\n            if props:\n                docked_ligands = docked_ligands.append(props, ignore_index=True)\n\n    return docked_ligands", "docstring": "Parse a DOCK6 mol2 output file, return a Pandas DataFrame of the results.\n\nArgs:\nmol2_outpath (str): Path to mol2 output file\n\nReturns:\nDataFrame: Pandas DataFrame of the results", "source": "juraj-google-style"}
{"code": "def get_head_mask(self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool=False) -> Tensor:\n    if head_mask is not None:\n        head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)\n        if is_attention_chunked is True:\n            head_mask = head_mask.unsqueeze(-1)\n    else:\n        head_mask = [None] * num_hidden_layers\n    return head_mask", "docstring": "Prepare the head mask if needed.\n\nArgs:\nhead_mask (`torch.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*):\nThe mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).\nnum_hidden_layers (`int`):\nThe number of hidden layers in the model.\nis_attention_chunked (`bool`, *optional*, defaults to `False`):\nWhether or not the attentions scores are computed by chunks or not.\n\nReturns:\n`torch.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with\n`[None]` for each layer.", "source": "github-repos"}
{"code": "def fit(self, X, *args, **kwargs):\n    self.constant_value = self._get_constant_value(X)\n    if (self.constant_value is None):\n        if self.unfittable_model:\n            self.model = getattr(scipy.stats, self.model_class)(*args, **kwargs)\n        else:\n            self.model = getattr(scipy.stats, self.model_class)(X, *args, **kwargs)\n        for name in self.METHOD_NAMES:\n            attribute = getattr(self.__class__, name)\n            if isinstance(attribute, str):\n                setattr(self, name, getattr(self.model, attribute))\n            elif (attribute is None):\n                setattr(self, name, missing_method_scipy_wrapper((lambda x: x)))\n    else:\n        self._replace_constant_methods()\n    self.fitted = True", "docstring": "Fit scipy model to an array of values.\n\nArgs:\nX(`np.ndarray` or `pd.DataFrame`):  Datapoints to be estimated from. Must be 1-d\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "class QuantAct(nn.Module):\n\n    def __init__(self, activation_bit, act_range_momentum=0.95, per_channel=False, channel_len=None, quant_mode=False):\n        super().__init__()\n        self.activation_bit = activation_bit\n        self.act_range_momentum = act_range_momentum\n        self.quant_mode = quant_mode\n        self.per_channel = per_channel\n        self.percentile = False\n        self.act_function = SymmetricQuantFunction.apply\n        if not self.per_channel:\n            self.register_buffer('x_min', torch.zeros(1))\n            self.register_buffer('x_max', torch.zeros(1))\n            self.register_buffer('act_scaling_factor', torch.zeros(1))\n            self.x_min -= 1e-05\n            self.x_max += 1e-05\n        else:\n            raise NotImplementedError('per-channel mode is not currently supported for activation.')\n\n    def __repr__(self):\n        return f'{self.__class__.__name__}(activation_bit={self.activation_bit}, quant_mode: {self.quant_mode}, Act_min: {self.x_min.item():.2f}, Act_max: {self.x_max.item():.2f})'\n\n    def forward(self, x, pre_act_scaling_factor=None, identity=None, identity_scaling_factor=None, specified_min=None, specified_max=None):\n        x_act = x if identity is None else identity + x\n        if self.training:\n            assert not self.percentile, 'percentile mode is not currently supported for activation.'\n            assert not self.per_channel, 'per-channel mode is not currently supported for activation.'\n            x_min = x_act.data.min()\n            x_max = x_act.data.max()\n            assert x_max.isnan().sum() == 0 and x_min.isnan().sum() == 0, 'NaN detected when computing min/max of the activation'\n            if self.x_min.min() > -1.1e-05 and self.x_max.max() < 1.1e-05:\n                self.x_min = self.x_min + x_min\n                self.x_max = self.x_max + x_max\n            elif self.act_range_momentum == -1:\n                self.x_min = torch.min(self.x_min, x_min)\n                self.x_max = torch.max(self.x_max, x_max)\n            else:\n                self.x_min = self.x_min * self.act_range_momentum + x_min * (1 - self.act_range_momentum)\n                self.x_max = self.x_max * self.act_range_momentum + x_max * (1 - self.act_range_momentum)\n        if not self.quant_mode:\n            return (x_act, None)\n        x_min = self.x_min if specified_min is None else specified_min\n        x_max = self.x_max if specified_max is None else specified_max\n        self.act_scaling_factor = symmetric_linear_quantization_params(self.activation_bit, x_min, x_max, per_channel=self.per_channel)\n        if pre_act_scaling_factor is None:\n            quant_act_int = self.act_function(x, self.activation_bit, self.percentile, self.act_scaling_factor)\n        else:\n            quant_act_int = FixedPointMul.apply(x, pre_act_scaling_factor, self.activation_bit, self.act_scaling_factor, identity, identity_scaling_factor)\n        correct_output_scale = self.act_scaling_factor.view(-1)\n        return (quant_act_int * correct_output_scale, self.act_scaling_factor)", "docstring": "Quantizes the given activation.\n\nArgs:\nactivation_bit (`int`):\nBitwidth for the quantized activation.\nact_range_momentum (`float`, *optional*, defaults to `0.95`):\nMomentum for updating the activation quantization range.\nper_channel (`bool`, *optional*, defaults to `False`):\nWhether to or not use channel-wise quantization.\nchannel_len (`int`, *optional*):\nSpecify the channel length when set the *per_channel* True.\nquant_mode (`bool`, *optional*, defaults to `False`):\nWhether or not the layer is quantized.", "source": "github-repos"}
{"code": "def action_size(self) -> Sequence[Sequence[int]]:\n    fluents = self.domain.action_fluents\n    ordering = self.domain.action_fluent_ordering\n    return self._fluent_size(fluents, ordering)", "docstring": "The size of each action fluent in canonical order.\n\nReturns:\nSequence[Sequence[int]]: A tuple of tuple of integers\nrepresenting the shape and size of each fluent.", "source": "codesearchnet"}
{"code": "def signature(cert, sig, body):\n    \n    body = six.b(body)\n\n    sig = base64.decodestring(sig)\n    padder = padding.PKCS1v15()\n    public_key = cert.public_key()\n    try:\n        public_key.verify(sig, body, padder, hashes.SHA1())\n        return True\n    except InvalidSignature:\n        warnings.warn('Signature verification failed.')\n        return False", "docstring": "Validate data request signature.\n\nSee `validate.request` for additional info.\n\nArgs:\ncert: cryptography.hazmat.backends.openssl.x509._Certificate. The Amazon\nsigning certificate.\nsig: str. Signature header value sent by request.\nbody: str. HTTPS request body.\n\nReturns:\nbool: True if valid, False otherwise.", "source": "juraj-google-style"}
{"code": "def _ws_on_error(self, ws: websocket.WebSocketApp, error: Exception):\n        \n        self.logger.error(f'Got error from websocket connection: {str(error)}')", "docstring": "Callback for receiving errors from the websocket connection\n\nArgs:\nws: websocket connection\nerror: exception raised", "source": "juraj-google-style"}
{"code": "def _AttemptAutoDetectTagFile(self, analysis_mediator):\n    self._autodetect_tag_file_attempt = True\n    if (not analysis_mediator.data_location):\n        return False\n    operating_system = analysis_mediator.operating_system.lower()\n    filename = self._OS_TAG_FILES.get(operating_system, None)\n    if (not filename):\n        return False\n    logger.info('Using auto detected tag file: {0:s}'.format(filename))\n    tag_file_path = os.path.join(analysis_mediator.data_location, filename)\n    self.SetAndLoadTagFile(tag_file_path)\n    return True", "docstring": "Detects which tag file is most appropriate.\n\nArgs:\nanalysis_mediator (AnalysisMediator): analysis mediator.\n\nReturns:\nbool: True if a tag file is autodetected.", "source": "codesearchnet"}
{"code": "def gets(self, key, default=None, cas_default=None):\n    defaults = (default, cas_default)\n    return self._fetch_cmd(b'gets', [key], True).get(key, defaults)", "docstring": "The memcached \"gets\" command for one key, as a convenience.\n\nArgs:\nkey: str, see class docs for details.\ndefault: value that will be returned if the key was not found.\ncas_default: same behaviour as default argument.\n\nReturns:\nA tuple of (value, cas)\nor (default, cas_defaults) if the key was not found.", "source": "codesearchnet"}
{"code": "class RemoteMonitor(Callback):\n\n    def __init__(self, root='http:\n        super(RemoteMonitor, self).__init__()\n        self.root = root\n        self.path = path\n        self.field = field\n        self.headers = headers\n        self.send_as_json = send_as_json\n\n    def on_epoch_end(self, epoch, logs=None):\n        if requests is None:\n            raise ImportError('RemoteMonitor requires the `requests` library.')\n        logs = logs or {}\n        send = {}\n        send['epoch'] = epoch\n        for k, v in logs.items():\n            if isinstance(v, (np.ndarray, np.generic)):\n                send[k] = v.item()\n            else:\n                send[k] = v\n        try:\n            if self.send_as_json:\n                requests.post(self.root + self.path, json=send, headers=self.headers)\n            else:\n                requests.post(self.root + self.path, {self.field: json.dumps(send)}, headers=self.headers)\n        except requests.exceptions.RequestException:\n            logging.warning('Warning: could not reach RemoteMonitor root server at ' + str(self.root))", "docstring": "Callback used to stream events to a server.\n\nRequires the `requests` library.\nEvents are sent to `root + '/publish/epoch/end/'` by default. Calls are\nHTTP POST, with a `data` argument which is a\nJSON-encoded dictionary of event data.\nIf `send_as_json=True`, the content type of the request will be\n`\"application/json\"`.\nOtherwise the serialized JSON will be sent within a form.\n\nArgs:\nroot: String; root url of the target server.\npath: String; path relative to `root` to which the events will be sent.\nfield: String; JSON field under which the data will be stored.\nThe field is used only if the payload is sent within a form\n(i.e. send_as_json is set to False).\nheaders: Dictionary; optional custom HTTP headers.\nsend_as_json: Boolean; whether the request should be\nsent as `\"application/json\"`.", "source": "github-repos"}
{"code": "def upload_file(self, local_file, dest_path, mimetype):\n    self.__validate_storage_path(dest_path)\n    if dest_path.endswith('/'):\n        raise StorageArgumentException('Must specify target file name in dest_path argument')\n    if local_file.endswith(os.path.sep):\n        raise StorageArgumentException('Must specify source file name in local_file argument, directory upload not supported')\n    new_file = self.api_client.create_file(name=dest_path.split('/').pop(), content_type=mimetype, parent=self.get_parent(dest_path)['uuid'])\n    etag = self.api_client.upload_file_content(new_file['uuid'], source=local_file)\n    new_file['etag'] = etag\n    return new_file", "docstring": "Upload local file content to a storage service destination folder.\n\nArgs:\nlocal_file(str)\ndest_path(str):\nabsolute Storage service path '/project' prefix is essential\nsuffix should be the name the file will have on in the destination folder\ni.e.: /project/folder/.../file_name\nmimetype(str): set the contentType attribute\n\nReturns:\nThe uuid of created file entity as string\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "codesearchnet"}
{"code": "def nearest_neighbor(self, x, means):\n    \n    x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keep_dims=True)\n    means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keep_dims=True)\n    scalar_prod = tf.matmul(\n        tf.transpose(x, perm=[1, 0, 2]), tf.transpose(means, perm=[0, 2, 1]))\n    scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2])\n    dist = x_norm_sq + tf.transpose(\n        means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod\n\n    if self.hparams.soft_em:\n      nearest_idx = tf.stack(\n          [\n              tf.multinomial(\n                  -dist[:, i, :], num_samples=self.hparams.num_samples)\n              for i in range(self.hparams.num_blocks)\n          ],\n          axis=1)\n      nearest_hot = tf.one_hot(nearest_idx, depth=self.hparams.block_v_size)\n      nearest_hot = tf.reduce_mean(nearest_hot, axis=-2)\n    else:\n      if self.hparams.random_top_k > 1:\n        _, top_k_idx = tf.nn.top_k(-dist, k=self.hparams.random_top_k)\n        nearest_idx = tf.gather(\n            top_k_idx,\n            tf.random_uniform(\n                [1],\n                minval=0,\n                maxval=self.hparams.random_top_k - 1,\n                dtype=tf.int32),\n            axis=-1)\n      else:\n        if self.hparams.use_scales:\n          dist /= tf.reshape(self.hparams.scales,\n                             [1, 1, self.hparams.moe_num_experts])\n        nearest_idx = tf.argmax(-dist, axis=-1)\n      nearest_hot = tf.one_hot(nearest_idx, self.hparams.block_v_size)\n    return nearest_hot", "docstring": "Find the nearest element in means to elements in x.\n\nArgs:\nx: Batch of encoder continuous latent states sliced/projected into\nshape [-1, num_blocks, block_dim].\nmeans: Embedding means of shape.\n\nReturns:\nTensor with nearest element in mean encoded in one-hot notation.", "source": "juraj-google-style"}
{"code": "def qualified_name(self):\n    idxstr = ('' if (self.index is None) else str(self.index))\n    return ('%s[%s]' % (self.qualified_package_name, idxstr))", "docstring": "Get the qualified name of the variant.\n\nReturns:\nstr: Name of the variant with version and index, eg \"maya-2016.1[1]\".", "source": "codesearchnet"}
{"code": "def flatten(self, in_place=True):\n    new_dataset = TaskData()\n    for (i, dataset) in enumerate(self._datasets):\n        if (i != self._default_index):\n            new_dataset.merge(dataset)\n    new_dataset.merge(self.default_dataset)\n    new_aliases = {alias: 0 for (alias, _) in self._aliases.items()}\n    if in_place:\n        self._datasets = [new_dataset]\n        self._aliases = new_aliases\n        self._default_index = 0\n    else:\n        return MultiTaskData(dataset=new_dataset, aliases=list(new_aliases.keys()))", "docstring": "Merge all datasets into a single dataset.\n\nThe default dataset is the last dataset to be merged, as it is considered to be\nthe primary source of information and should overwrite all existing fields with\nthe same key.\n\nArgs:\nin_place (bool): Set to ``True`` to replace the existing datasets with the\nmerged one. If set to ``False``, will return a new MultiTaskData\nobject containing the merged dataset.\n\nReturns:\nMultiTaskData: If the in_place flag is set to False.", "source": "codesearchnet"}
{"code": "def AddColumn(self, column, default=\"\", col_index=-1):\n        \n        if column in self.table:\n            raise TableError(\"Column %r already in table.\" % column)\n        if col_index == -1:\n            self._table[0][column] = column\n            for i in range(1, len(self._table)):\n                self._table[i][column] = default\n        else:\n            self._table[0].Insert(column, column, col_index)\n            for i in range(1, len(self._table)):\n                self._table[i].Insert(column, default, col_index)", "docstring": "Appends a new column to the table.\n\nArgs:\ncolumn: A string, name of the column to add.\ndefault: Default value for entries. Defaults to ''.\ncol_index: Integer index for where to insert new column.\n\nRaises:\nTableError: Column name already exists.", "source": "juraj-google-style"}
{"code": "def bitwise_right_shift(x, y):\n    if any_symbolic_tensors((x, y)):\n        return BitwiseRightShift().symbolic_call(x, y)\n    return backend.numpy.bitwise_right_shift(x, y)", "docstring": "Shift the bits of an integer to the right.\n\nBits are shifted to the right `y`. Because the internal representation of\nnumbers is in binary format, this operation is equivalent to dividing `x` by\n`2**y`.\n\nArgs:\nx: Input integer tensor.\ny: Input integer tensor.\n\nReturns:\nResult tensor.", "source": "github-repos"}
{"code": "def search(cls, term, fields=()):\n    if (not any((cls._meta.search_fields, fields))):\n        raise AttributeError(\"A list of searchable fields must be provided in the class's search_fields or provided to this function in the `fields` kwarg.\")\n    if (not fields):\n        fields = cls._meta.search_fields\n    query = cls.select()\n    like_term = ''.join((term, '%'))\n    full_like_term = ''.join(('%', term, '%'))\n    order_by = []\n    clauses = []\n    for field_name in fields:\n        field = getattr(cls, field_name)\n        clauses.append((((field == term) | (field ** like_term)) | (field ** full_like_term)))\n        order_by.append(case(None, (((field == term), 0), ((field ** like_term), 1), ((field ** full_like_term), 2)), default=3).asc())\n    query = query.where(reduce(operator.or_, clauses))\n    query = query.order_by(*order_by)\n    return query", "docstring": "Generic SQL search function that uses SQL ``LIKE`` to search the\ndatabase for matching records. The records are sorted by their\nrelavancey to the search term.\n\nThe query searches and sorts on the folling criteria, in order, where\nthe target string is ``exactly``:\n\n1. Straight equality (``x = 'exactly'``)\n2. Right hand ``LIKE`` (``x LIKE 'exact%'``)\n3. Substring ``LIKE`` (``x LIKE %act%``)\n\nArgs:\nterm (str): The search term to apply to the query.\n\nKeyword Args:\nfields (list|tuple|None): An optional list of fields to apply the\nsearch to. If not provided, the class variable\n``Meta.search_fields`` will be used by default.\nReturns:\npeewee.SelectQuery: An unexecuted query for the records.\n\nRaises:\nAttributeError: Raised if `search_fields` isn't defined in the\nclass and `fields` aren't provided for the function.", "source": "codesearchnet"}
{"code": "def add_electrode(self, electrode, label=None):\n        \n        if not label:\n            label = \"Electrode {}\".format(len(self._electrodes) + 1)\n        self._electrodes[label] = electrode", "docstring": "Add an electrode to the plot.\n\nArgs:\nelectrode: An electrode. All electrodes satisfying the\nAbstractElectrode interface should work.\nlabel: A label for the electrode. If None, defaults to a counting\nsystem, i.e. 'Electrode 1', 'Electrode 2', ...", "source": "juraj-google-style"}
{"code": "def encrypt(self, message, public_key):\n    max_str_len = (rsa.common.byte_size(public_key.n) - 11)\n    if (len(message) > max_str_len):\n        message = textwrap.wrap(message, width=max_str_len)\n    else:\n        message = [message]\n    enc_msg = []\n    for line in message:\n        enc_line = rsa.encrypt(line, public_key)\n        enc_line_converted = binascii.b2a_base64(enc_line)\n        enc_msg.append(enc_line_converted)\n    enc_msg = json.dumps(enc_msg)\n    return enc_msg", "docstring": "Encrypts a string using a given rsa.PublicKey object. If the message\nis larger than the key, it will split it up into a list and encrypt\neach line in the list.\n\nArgs:\nmessage (string): The string to encrypt.\npublic_key (rsa.PublicKey): The key object used to encrypt the\nmessage. Only the paired private key can decrypt it.\n\nReturns:\nA json string of the list of encrypted lines of the message.", "source": "codesearchnet"}
{"code": "def _create_mlir_loc(self, loc):\n    if loc is not None and loc.loc.filename:\n        file_name = os.path.basename(loc.loc.filename)\n        return 'loc(\"{}\":{}:{})'.format(file_name, loc.loc.lineno, loc.loc.col_offset)\n    else:\n        return 'loc(unknown)'", "docstring": "Creates mlir location from autograph ORIGIN value.\n\nArgs:\nloc: OriginInfo\n\nReturns:\nA serialized mlir location string.", "source": "github-repos"}
{"code": "def _base_expansion_size(num, bases):\n    return np.floor(np.log(num) / np.log(bases)) + 1", "docstring": "Computes the number of terms in the place value expansion.\n\nLet num = a0 + a1 b + a2 b^2 + ... ak b^k be the place value expansion of\n`num` in base b (ak <> 0). This function computes and returns `k+1` for each\nbase `b` specified in `bases`.\n\nThis can be inferred from the base `b` logarithm of `num` as follows:\n$$k = Floor(log_b (num)) + 1  = Floor( log(num) / log(b)) + 1$$\n\nArgs:\nnum: Scalar numpy array of dtype either `float32` or `float64`. The number\nto compute the base expansion size of.\nbases: Numpy array of the same dtype as num. The bases to compute the size\nagainst.\n\nReturns:\nTensor of same dtype and shape as `bases` containing the size of num when\nwritten in that base.", "source": "github-repos"}
{"code": "def starts_when(iterable, condition):\n    if (not callable(condition)):\n        cond_value = condition\n\n        def condition(x):\n            return (x == cond_value)\n    return itertools.dropwhile((lambda x: (not condition(x))), iterable)", "docstring": "Start yielding items when a condition arise.\n\nArgs:\niterable: the iterable to filter.\ncondition: if the callable returns True once, start yielding\nitems. If it's not a callable, it will be converted\nto one as `lambda condition: condition == item`.\n\nExample:\n\n>>> list(starts_when(range(10), lambda x: x > 5))\n[6, 7, 8, 9]\n>>> list(starts_when(range(10), 7))\n[7, 8, 9]", "source": "codesearchnet"}
{"code": "def _SetRow(self, new_values, row=0):\n    if (not row):\n        row = self._row_index\n    if (row > self.size):\n        raise TableError(('Entry %s beyond table size %s.' % (row, self.size)))\n    self._table[row].values = new_values", "docstring": "Sets the current row to new list.\n\nArgs:\nnew_values: List|dict of new values to insert into row.\nrow: int, Row to insert values into.\n\nRaises:\nTableError: If number of new values is not equal to row size.", "source": "codesearchnet"}
{"code": "def prerequisite_check():\n    if (sys.version_info < (3, 6)):\n        version_str = ('%s.%s.%s' % sys.version_info[:3])\n        search_url = build_search_query((_('install') + ' Python 3.7'))\n        return _('EH Forwarder Bot requires a minimum of Python 3.6 to run.  You are currently using Python {version}. \\n\\nYou may want to try:\\n{url}').format(version=version_str, url=search_url)\n    modules_err = _('You may want to visit the modules repository to find a list of available modules to install.\\nhttps:\n    try:\n        next(pkg_resources.iter_entry_points('ehforwarderbot.master'))\n    except StopIteration:\n        return ((_('No master channel detected.  EH Forwarder Bot requires at least one master channel installed to run.') + '\\n\\n') + modules_err)\n    try:\n        next(pkg_resources.iter_entry_points('ehforwarderbot.slave'))\n    except StopIteration:\n        return ((_('No slave channel detected.  EH Forwarder Bot requires at least one slave channel installed to run.') + '\\n\\n') + modules_err)", "docstring": "Check prerequisites of the framework, including Python version, installation of\nmodules, etc.\n\nReturns:\nOptional[str]: If the check is not passed, return error message regarding\nfailed test case.  None is returned otherwise.", "source": "codesearchnet"}
{"code": "def update(table, columns, values):\n    rows = len(values)\n    cells = len(columns) * len(values)\n    return _Mutator(mutation=Mutation(update=batch._make_write_pb(table, columns, values)), operation=WriteMutation._OPERATION_UPDATE, rows=rows, cells=cells, kwargs={'table': table, 'columns': columns, 'values': values})", "docstring": "Update one or more existing table rows.\n\nArgs:\ntable: Name of the table to be modified.\ncolumns: Name of the table columns to be modified.\nvalues: Values to be modified.", "source": "github-repos"}
{"code": "def _isbn_cleaner(fn):\n\n    @wraps(fn)\n    def wrapper(isbn):\n        return fn(_clean_isbn(isbn))\n    return wrapper", "docstring": "Decorator for calling other functions from this module.\n\nPurpose of this decorator is to clean the ISBN string from garbage and\nreturn list of digits.\n\nArgs:\nfn (function): function in which will be :func:`_clean_isbn(isbn)` call\nwrapped.", "source": "codesearchnet"}
{"code": "def Close(self, abort=False):\n    \n    if not self._closed_event or not self._terminate_event:\n      raise RuntimeError('Missing closed or terminate event.')\n\n    if not abort and self._closed_event.is_set():\n      raise errors.QueueAlreadyClosed()\n\n    self._closed_event.set()\n\n    if abort:\n      if not self._closed_event.is_set():\n        logger.warning(\n            '{0:s} queue aborting. Contents may be lost.'.format(self.name))\n\n      self._linger_seconds = 0\n\n      \n      \n      \n      self._terminate_event.set()\n\n    else:\n      logger.debug(\n          '{0:s} queue closing, will linger for up to {1:d} seconds'.format(\n              self.name, self._linger_seconds))", "docstring": "Closes the queue.\n\nArgs:\nabort (Optional[bool]): whether the Close is the result of an abort\ncondition. If True, queue contents may be lost.\n\nRaises:\nQueueAlreadyClosed: if the queue is not started, or has already been\nclosed.\nRuntimeError: if closed or terminate event is missing.", "source": "juraj-google-style"}
{"code": "def remove(self, iterable, data=None):\n    return self.root.remove(iterable, data=data)", "docstring": "Used to remove from the root node\n\nArgs:\niterable(hashable): index or key used to identify\nitem to remove\ndata: data to be paired with the key", "source": "codesearchnet"}
{"code": "def send(self, cumulative_counters=None, gauges=None, counters=None):\n        \n        if not gauges and not cumulative_counters and not counters:\n            return\n\n        data = {\n            'cumulative_counter': cumulative_counters,\n            'gauge': gauges,\n            'counter': counters,\n        }\n        _logger.debug('Sending datapoints to SignalFx: %s', data)\n\n        for metric_type, datapoints in data.items():\n            if not datapoints:\n                continue\n            if not isinstance(datapoints, list):\n                raise TypeError('Datapoints not of type list %s', datapoints)\n            for datapoint in datapoints:\n                self._add_extra_dimensions(datapoint)\n                self._add_to_queue(metric_type, datapoint)\n\n        \n        self._start_thread()", "docstring": "Send the given metrics to SignalFx.\n\nArgs:\ncumulative_counters (list): a list of dictionaries representing the\ncumulative counters to report.\ngauges (list): a list of dictionaries representing the gauges to\nreport.\ncounters (list): a list of dictionaries representing the counters\nto report.", "source": "juraj-google-style"}
{"code": "def GetFileEntryByPathSpec(self, path_spec):\n    \n    if not self.FileEntryExistsByPathSpec(path_spec):\n      return None\n\n    location = getattr(path_spec, 'location', None)\n\n    if len(location) == 1:\n      return zip_file_entry.ZipFileEntry(\n          self._resolver_context, self, path_spec, is_root=True,\n          is_virtual=True)\n\n    kwargs = {}\n    try:\n      kwargs['zip_info'] = self._zip_file.getinfo(location[1:])\n    except KeyError:\n      kwargs['is_virtual'] = True\n\n    return zip_file_entry.ZipFileEntry(\n        self._resolver_context, self, path_spec, **kwargs)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): path specification of the file entry.\n\nReturns:\nZipFileEntry: a file entry or None.", "source": "juraj-google-style"}
{"code": "def create(self, model_name):\n    \n    body = {'name': model_name}\n    parent = 'projects/' + self._project_id\n    \n    return self._api.projects().models().create(body=body, parent=parent).execute()", "docstring": "Create a model.\n\nArgs:\nmodel_name: the short name of the model, such as \"iris\".\nReturns:\nIf successful, returns informaiton of the model, such as\n{u'regions': [u'us-central1'], u'name': u'projects/myproject/models/mymodel'}\nRaises:\nIf the model creation failed.", "source": "juraj-google-style"}
{"code": "def render_template(self, template_name, out_path=None):\n    return render_template(template_name, self.to_dict(), out_path=out_path)", "docstring": "Render a template based on this TileBus Block.\n\nThe template has access to all of the attributes of this block as a\ndictionary (the result of calling self.to_dict()).\n\nYou can optionally render to a file by passing out_path.\n\nArgs:\ntemplate_name (str): The name of the template to load.  This must\nbe a file in config/templates inside this package\nout_path (str): An optional path of where to save the output\nfile, otherwise it is just returned as a string.\n\nReturns:\nstring: The rendered template data.", "source": "codesearchnet"}
{"code": "def auth_proxy(self, method):\n        \n        def _proxy(*args, **kwargs):\n            \n            return method(self.session, *args, **kwargs)\n\n        return _proxy", "docstring": "Authentication proxy for API requests.\n\nThis is required because the API objects are naive of ``HelpScout``,\nso they would otherwise be unauthenticated.\n\nArgs:\nmethod (callable): A method call that should be authenticated. It\nshould accept a ``requests.Session`` as its first parameter,\nwhich should be used for the actual API call.\n\nReturns:\nmixed: The results of the authenticated callable.", "source": "juraj-google-style"}
{"code": "def plot_zt_mu(self, temp=600, output='eig', relaxation_time=1e-14,\n                   xlim=None):\n        \n        import matplotlib.pyplot as plt\n        plt.figure(figsize=(9, 7))\n        zt = self._bz.get_zt(relaxation_time=relaxation_time, output=output,\n                             doping_levels=False)[temp]\n        plt.plot(self._bz.mu_steps, zt, linewidth=3.0)\n        self._plot_bg_limits()\n        self._plot_doping(temp)\n        if output == 'eig':\n            plt.legend(['ZT$_1$', 'ZT$_2$', 'ZT$_3$'])\n        if xlim is None:\n            plt.xlim(-0.5, self._bz.gap + 0.5)\n        else:\n            plt.xlim(xlim)\n        plt.ylabel(\"ZT\", fontsize=30.0)\n        plt.xlabel(\"E-E$_f$ (eV)\", fontsize=30.0)\n        plt.xticks(fontsize=25)\n        plt.yticks(fontsize=25)\n        plt.tight_layout()\n        return plt", "docstring": "Plot the ZT in function of Fermi level.\n\nArgs:\ntemp: the temperature\nxlim: a list of min and max fermi energy by default (0, and band\ngap)\ntau: A relaxation time in s. By default none and the plot is by\nunits of relaxation time\n\nReturns:\na matplotlib object", "source": "juraj-google-style"}
{"code": "def unwrap_or_else(self, callback: Callable[[], U]) -> Union[T, U]:\n        \n        return self._val if self._is_some else callback()", "docstring": "Returns the contained value or computes it from ``callback``.\n\nArgs:\ncallback: The the default callback.\n\nReturns:\nThe contained value if the :py:class:`Option` is ``Some``,\notherwise ``callback()``.\n\nExamples:\n>>> Some(0).unwrap_or_else(lambda: 111)\n0\n>>> NONE.unwrap_or_else(lambda: 'ha')\n'ha'", "source": "juraj-google-style"}
{"code": "def get(self, object_id):\n    if object_id is None:\n        return\n    return self._obj_ids_to_obj.get(object_id)", "docstring": "Given a shared object ID, returns a previously instantiated object.\n\nArgs:\nobject_id: shared object ID to use when attempting to find\nalready-loaded object.\n\nReturns:\nThe object, if we've seen this ID before. Else, `None`.", "source": "github-repos"}
{"code": "def get_file_tracebacks(self, file_path):\n    if (file_path not in self._source_file_content):\n        raise ValueError(('Source file of path \"%s\" has not been received by this instance of SourceManager.' % file_path))\n    lineno_to_op_names_and_stack_position = dict()\n    for op_log_entry in self._graph_traceback.log_entries:\n        for (stack_pos, trace) in enumerate(op_log_entry.code_def.traces):\n            if (self._graph_traceback.id_to_string[trace.file_id] == file_path):\n                if (trace.lineno not in lineno_to_op_names_and_stack_position):\n                    lineno_to_op_names_and_stack_position[trace.lineno] = []\n                lineno_to_op_names_and_stack_position[trace.lineno].append((op_log_entry.name, stack_pos))\n    return lineno_to_op_names_and_stack_position", "docstring": "Get the lists of ops created at lines of a specified source file.\n\nArgs:\nfile_path: Path to the source file.\n\nReturns:\nA dict mapping line number to a list of 2-tuples,\n`(op_name, stack_position)`\n`op_name` is the name of the name of the op whose creation traceback\nincludes the line.\n`stack_position` is the position of the line in the op's creation\ntraceback, represented as a 0-based integer.\n\nRaises:\nValueError: If `file_path` does not point to a source file that has been\nreceived by this instance of `SourceManager`.", "source": "codesearchnet"}
{"code": "def _reshape_tensors(tensors, shape):\n    reshaped = []\n    for t in tensors:\n        with ops.colocate_with(t):\n            reshaped.append(array_ops.reshape(t, shape))\n    return reshaped", "docstring": "Reshape tensors flattened by _flatten_tensors.\n\nArgs:\ntensors: list of `tf.Tensor` of identical length 1D tensors.\nshape: list of integers describing the desired shape.  Product of\nthe elements must equal the length of each tensor.\n\nReturns:\nlist of `tf.Tensor` which are the reshaped inputs.", "source": "github-repos"}
{"code": "async def get(self, uid: int, cached_msg: CachedMessage = None,\n                  requirement: FetchRequirement = FetchRequirement.METADATA) \\\n            -> Optional[MessageT]:\n        \n        ...", "docstring": "Return the message with the given UID.\n\nArgs:\nuid: The message UID.\ncached_msg: The last known cached message.\nrequirement: The data required from each message.\n\nRaises:\nIndexError: The UID is not valid in the mailbox.", "source": "juraj-google-style"}
{"code": "def sparse_segment_sqrt_n(data, indices, segment_ids, name=None, num_segments=None, sparse_gradient=False):\n    if num_segments is not None:\n        return gen_math_ops.sparse_segment_sqrt_n_with_num_segments(data=data, indices=indices, segment_ids=segment_ids, num_segments=num_segments, name=name, sparse_gradient=sparse_gradient)\n    else:\n        return gen_math_ops.sparse_segment_sqrt_n(data=data, indices=indices, segment_ids=segment_ids, name=name, sparse_gradient=sparse_gradient)", "docstring": "Computes the sum along sparse segments of a tensor divided by the sqrt(N).\n\n`N` is the size of the segment being reduced.\n\nArgs:\ndata: A `Tensor` with data that will be assembled in the output.\nindices: A 1-D `Tensor` with indices into `data`. Has same rank as\n`segment_ids`.\nsegment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values\nshould be sorted and can be repeated.\nname: A name for the operation (optional).\nnum_segments: An optional int32 scalar. Indicates the size of the output\n`Tensor`.\nsparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the\ngradient of this function will be sparse (IndexedSlices) instead of dense\n(Tensor).\n\nReturns:\nA `tensor` of the shape as data, except for dimension 0 which\nhas size `k`, the number of segments specified via `num_segments` or\ninferred for the last element in `segments_ids`.", "source": "github-repos"}
{"code": "def encode_dataset(dataset, vocabulary):\n  \n  def encode(features):\n    return {k: vocabulary.encode_tf(v) for k, v in features.items()}\n  return dataset.map(encode, num_parallel_calls=tf.data.experimental.AUTOTUNE)", "docstring": "Encode from strings to token ids.\n\nArgs:\ndataset: a tf.data.Dataset with string values.\nvocabulary: a mesh_tensorflow.transformer.Vocabulary\nReturns:\na tf.data.Dataset with integer-vector values ending in EOS=1", "source": "juraj-google-style"}
{"code": "def get_imap_capabilities(server):\n    capabilities = list(map(str, list(server.capabilities())))\n    for i in range(len(capabilities)):\n        capabilities[i] = str(capabilities[i]).replace(\"b'\", '').replace(\"'\", '')\n    logger.debug('IMAP server supports: {0}'.format(capabilities))\n    return capabilities", "docstring": "Returns a list of an IMAP server's capabilities\n\nArgs:\nserver (imapclient.IMAPClient): An instance of imapclient.IMAPClient\n\nReturns (list): A list of capabilities", "source": "codesearchnet"}
{"code": "def __init__(self, in_features, out_features, mlp_dim=128):\n    super().__init__()\n    self.conv1 = nn.Conv2d(in_features, mlp_dim, 1, 1, 0)\n    self.act = nn.ReLU(inplace=True)\n    self.conv2 = nn.Conv2d(mlp_dim, out_features, 1, 1, 0)", "docstring": "Projector MLP.\n\nArgs:\nin_features (`int`):\nNumber of input channels.\nout_features (`int`):\nNumber of output channels.\nmlp_dim (`int`, *optional*, defaults to 128):\nHidden dimension.", "source": "github-repos"}
{"code": "def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n    if not grads_and_vars:\n        raise ValueError('Must supply at least one variable')\n    if global_step is None:\n        raise ValueError('Global step is required to check staleness')\n    self._global_step = global_step\n    train_ops = []\n    aggregated_grad = []\n    var_list = []\n    local_anchor = control_flow_ops.no_op()\n    distribution_strategy = distribute_lib.get_strategy()\n    with distribution_strategy.extended.colocate_vars_with(local_anchor):\n        self._local_step = variable_v1.VariableV1(initial_value=0, trainable=False, collections=[ops.GraphKeys.LOCAL_VARIABLES], dtype=global_step.dtype.base_dtype, name='sync_rep_local_step')\n    self.local_step_init_op = state_ops.assign(self._local_step, global_step)\n    chief_init_ops = [self.local_step_init_op]\n    self.ready_for_local_init_op = variables.report_uninitialized_variables(variables.global_variables())\n    with ops.name_scope(None, self._name):\n        for grad, var in grads_and_vars:\n            var_list.append(var)\n            with ops.device(var.device):\n                if grad is None:\n                    aggregated_grad.append(None)\n                    continue\n                elif isinstance(grad, tensor.Tensor):\n                    grad_accum = data_flow_ops.ConditionalAccumulator(grad.dtype, shape=var.get_shape(), shared_name=var.name + '/grad_accum')\n                    train_ops.append(grad_accum.apply_grad(grad, local_step=self._local_step))\n                    aggregated_grad.append(grad_accum.take_grad(self._replicas_to_aggregate))\n                else:\n                    if not isinstance(grad, indexed_slices.IndexedSlices):\n                        raise ValueError('Unknown grad type!')\n                    grad_accum = data_flow_ops.SparseConditionalAccumulator(grad.dtype, shape=(), shared_name=var.name + '/grad_accum')\n                    train_ops.append(grad_accum.apply_indexed_slices_grad(grad, local_step=self._local_step))\n                    aggregated_grad.append(grad_accum.take_indexed_slices_grad(self._replicas_to_aggregate))\n                self._accumulator_list.append((grad_accum, var.device))\n        aggregated_grads_and_vars = zip(aggregated_grad, var_list)\n        with ops.device(global_step.device), ops.name_scope(''):\n            update_op = self._opt.apply_gradients(aggregated_grads_and_vars, global_step)\n        with ops.device(global_step.device), ops.name_scope(''):\n            sync_token_queue = data_flow_ops.FIFOQueue(-1, global_step.dtype.base_dtype, shapes=(), name='sync_token_q', shared_name='sync_token_q')\n            self._sync_token_queue = sync_token_queue\n        with ops.device(global_step.device), ops.name_scope(''):\n            with ops.control_dependencies(train_ops):\n                token = sync_token_queue.dequeue()\n            train_op = state_ops.assign(self._local_step, token)\n            with ops.control_dependencies([update_op]):\n                tokens = array_ops.fill([self._tokens_per_step], global_step)\n                sync_op = sync_token_queue.enqueue_many((tokens,))\n            if self._variable_averages is not None:\n                with ops.control_dependencies([sync_op]), ops.name_scope(''):\n                    sync_op = self._variable_averages.apply(self._variables_to_average)\n            self._chief_queue_runner = queue_runner.QueueRunner(sync_token_queue, [sync_op])\n        for accum, dev in self._accumulator_list:\n            with ops.device(dev):\n                chief_init_ops.append(accum.set_global_step(global_step, name='SetGlobalStep'))\n        self.chief_init_op = control_flow_ops.group(*chief_init_ops)\n        self._gradients_applied = True\n        return train_op", "docstring": "Apply gradients to variables.\n\nThis contains most of the synchronization implementation and also wraps the\napply_gradients() from the real optimizer.\n\nArgs:\ngrads_and_vars: List of (gradient, variable) pairs as returned by\ncompute_gradients().\nglobal_step: Optional Variable to increment by one after the\nvariables have been updated.\nname: Optional name for the returned operation.  Default to the\nname passed to the Optimizer constructor.\n\nReturns:\ntrain_op: The op to dequeue a token so the replicas can exit this batch\nand start the next one. This is executed by each replica.\n\nRaises:\nValueError: If the grads_and_vars is empty.\nValueError: If global step is not provided, the staleness cannot be\nchecked.", "source": "github-repos"}
{"code": "def _sort_or_argsort(values, axis, direction, return_argsort):\n    if direction not in _SORT_IMPL:\n        valid_directions = ', '.join(sorted(_SORT_IMPL.keys()))\n        raise ValueError(f'Argument `direction` should be one of {valid_directions}. Received: direction={direction}')\n    axis = framework_ops.convert_to_tensor(axis, name='axis')\n    axis_static = tensor_util.constant_value(axis)\n    if axis.shape.ndims not in (None, 0) or axis_static is None:\n        raise ValueError(f'Argument `axis` must be a constant scalar. Received: axis={axis}.')\n    axis_static = int(axis_static)\n    values = framework_ops.convert_to_tensor(values, name='values')\n    return _SORT_IMPL[direction](values, axis_static, return_argsort)", "docstring": "Internal sort/argsort implementation.\n\nArgs:\nvalues: The input values.\naxis: The axis along which to sort.\ndirection: 'ASCENDING' or 'DESCENDING'.\nreturn_argsort: Whether to return the argsort result.\n\nReturns:\nEither the sorted values, or the indices of the sorted values in the\noriginal tensor. See the `sort` and `argsort` docstrings.\n\nRaises:\nValueError: If axis is not a constant scalar, or the direction is invalid.", "source": "github-repos"}
{"code": "def call(self, x):\n    with tf.name_scope('embedding'):\n        embeddings = tf.gather(self.shared_weights, x)\n        embeddings *= (self.hidden_size ** 0.5)\n        padding = model_utils.get_padding(x)\n        embeddings *= tf.expand_dims((1 - padding), (- 1))\n        return embeddings", "docstring": "Get token embeddings of x.\n\nArgs:\nx: An int64 tensor with shape [batch_size, length]\nReturns:\nembeddings: float32 tensor with shape [batch_size, length, embedding_size]\npadding: float32 tensor with shape [batch_size, length] indicating the\nlocations of the padding tokens in x.", "source": "codesearchnet"}
{"code": "def configTestMesh(device_type_mesh_map: typing.Dict[typing.Text, layout_lib.Mesh]) -> layout_lib.Mesh:\n    reset_context()\n\n    def get_mesh(device_type):\n        mesh = device_type_mesh_map.get(device_type, None)\n        if mesh is None:\n            raise ValueError('Requires a %s mesh to run test on %s.' % (device_type, device_type))\n        return mesh\n    mesh = None\n    if is_tpu_present():\n        mesh = get_mesh('TPU')\n        reset_context()\n        accelerator_util.initialize_accelerator_system('TPU')\n    elif tf_config.list_physical_devices('GPU'):\n        mesh = get_mesh('GPU')\n        reset_logical_devices('GPU', np.prod(mesh.shape()))\n        accelerator_util.initialize_accelerator_system('GPU')\n    else:\n        mesh = get_mesh('CPU')\n        reset_logical_devices('CPU', np.prod(mesh.shape()))\n        accelerator_util.initialize_accelerator_system('CPU')\n    return mesh", "docstring": "Configs corresponding mesh given test context.\n\nIf runs on a CPU mesh, set virtual device on CPU.\nIf runs on a GPU mesh, sets virtual device on GPU with proper memory limits.\nif runs on a TPU mesh, initializes TPU system.\n\nArgs:\ndevice_type_mesh_map: A dictionary containing device_type -> mesh mapping.\n\nReturns:\nA properly configured mesh for use in test.", "source": "github-repos"}
{"code": "def extractBests(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0, limit=5):\n    best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff)\n    return (heapq.nlargest(limit, best_list, key=(lambda i: i[1])) if (limit is not None) else sorted(best_list, key=(lambda i: i[1]), reverse=True))", "docstring": "Get a list of the best matches to a collection of choices.\n\nConvenience function for getting the choices with best scores.\n\nArgs:\nquery: A string to match against\nchoices: A list or dictionary of choices, suitable for use with\nextract().\nprocessor: Optional function for transforming choices before matching.\nSee extract().\nscorer: Scoring function for extract().\nscore_cutoff: Optional argument for score threshold. No matches with\na score less than this number will be returned. Defaults to 0.\nlimit: Optional maximum for the number of elements returned. Defaults\nto 5.\n\nReturns: A a list of (match, score) tuples.", "source": "codesearchnet"}
{"code": "def removeRow(self, triggered):\n    if triggered:\n        model = self.tableView.model()\n        selection = self.tableView.selectedIndexes()\n        rows = [index.row() for index in selection]\n        model.removeDataFrameRows(set(rows))\n        self.sender().setChecked(False)", "docstring": "Removes a row to the model.\n\nThis method is also a slot.\n\nArgs:\ntriggered (bool): If the corresponding button was\nactivated, the selected row will be removed\nfrom the model.", "source": "codesearchnet"}
{"code": "def export_msdt(self, filename):\n        \n        fmt = \"csv\" if filename.lower().endswith(\".csv\") else \"dat\"\n        delimiter = \", \" if fmt == \"csv\" else \" \"\n        with open(filename, \"wt\") as f:\n            if fmt == \"dat\":\n                f.write(\"\n            f.write(delimiter.join([\"t\", \"MSD\", \"MSD_a\", \"MSD_b\", \"MSD_c\",\n                                    \"MSCD\"]))\n            f.write(\"\\n\")\n            for dt, msd, msdc, mscd in zip(self.dt, self.msd,\n                                           self.msd_components, self.mscd):\n                f.write(delimiter.join([\"%s\" % v for v in [dt, msd] + list(\n                    msdc) + [mscd]]))\n                f.write(\"\\n\")", "docstring": "Writes MSD data to a csv file that can be easily plotted in other\nsoftware.\n\nArgs:\nfilename (str): Filename. Supported formats are csv and dat. If\nthe extension is csv, a csv file is written. Otherwise,\na dat format is assumed.", "source": "juraj-google-style"}
{"code": "def campaign(self, name, owner=None, **kwargs):\n        \n        return Campaign(self.tcex, name, owner=owner, **kwargs)", "docstring": "Create the Campaign TI object.\n\nArgs:\nowner:\nname:\n**kwargs:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def text(name, data, step=None, description=None):\n    summary_metadata = metadata.create_summary_metadata(display_name=None, description=description)\n    summary_scope = (getattr(tf.summary.experimental, 'summary_scope', None) or tf.summary.summary_scope)\n    with summary_scope(name, 'text_summary', values=[data, step]) as (tag, _):\n        tf.debugging.assert_type(data, tf.string)\n        return tf.summary.write(tag=tag, tensor=data, step=step, metadata=summary_metadata)", "docstring": "Write a text summary.\n\nArguments:\nname: A name for this summary. The summary tag used for TensorBoard will\nbe this name prefixed by any active name scopes.\ndata: A UTF-8 string tensor value.\nstep: Explicit `int64`-castable monotonic step value for this summary. If\nomitted, this defaults to `tf.summary.experimental.get_step()`, which must\nnot be None.\ndescription: Optional long-form description for this summary, as a\nconstant `str`. Markdown is supported. Defaults to empty.\n\nReturns:\nTrue on success, or false if no summary was emitted because no default\nsummary writer was available.\n\nRaises:\nValueError: if a default writer exists, but no step was provided and\n`tf.summary.experimental.get_step()` is None.", "source": "codesearchnet"}
{"code": "def __init__(self, xid=None, flags=None, miss_send_len=None):\n        \n        super().__init__(xid)\n        self.flags = flags\n        self.miss_send_len = miss_send_len", "docstring": "Create a SwitchConfig with the optional parameters below.\n\nArgs:\nxid (int): xid to be used on the message header.\nflags (ConfigFlag): OFPC_* flags.\nmiss_send_len (int): UBInt16 max bytes of new flow that the\ndatapath should send to the controller.", "source": "juraj-google-style"}
{"code": "def Delete(self, request, global_params=None):\n    config = self.GetMethodConfig('Delete')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Deletes the table specified by tableId from the dataset. If the table contains data, all the data will be deleted.\n\nArgs:\nrequest: (BigqueryTablesDeleteRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(BigqueryTablesDeleteResponse) The response message.", "source": "github-repos"}
{"code": "def diff(f, s):\n    if (isinstance(f, base.Root) or (f._yang_type in ('container', None))):\n        result = _diff_root(f, s)\n    elif (f._yang_type in ('list',)):\n        result = _diff_list(f, s)\n    else:\n        result = {}\n        first = '{}'.format(f)\n        second = '{}'.format(s)\n        if (first != second):\n            result = {'first': first, 'second': second}\n    return result", "docstring": "Given two models, return the difference between them.\n\nArgs:\n\nf (Pybindbase): First element.\ns (Pybindbase): Second element.\n\nReturns:\n\ndict: A dictionary highlighting the differences.\n\nExamples:\n\n>>> diff = napalm_yang.utils.diff(candidate, running)\n>>> pretty_print(diff)\n>>> {\n>>>     \"interfaces\": {\n>>>         \"interface\": {\n>>>             \"both\": {\n>>>                 \"Port-Channel1\": {\n>>>                     \"config\": {\n>>>                         \"mtu\": {\n>>>                             \"first\": \"0\",\n>>>                             \"second\": \"9000\"\n>>>                         }\n>>>                     }\n>>>                 }\n>>>             },\n>>>             \"first_only\": [\n>>>                 \"Loopback0\"\n>>>             ],\n>>>             \"second_only\": [\n>>>                 \"Loopback1\"\n>>>             ]\n>>>         }\n>>>     }\n>>> }", "source": "codesearchnet"}
{"code": "def populate_conversion_metadata(model_object, metadata):\n    try:\n        metadata_builder = flatbuffers.Builder(0)\n        metadata_builder.Finish(metadata.Pack(metadata_builder))\n        buffer_field = schema_fb.BufferT()\n        buffer_field.data = metadata_builder.Output()\n        if not model_object.metadata:\n            model_object.metadata = []\n        else:\n            for meta in model_object.metadata:\n                if meta.name.decode('utf-8') == CONVERSION_METADATA_FIELD_NAME:\n                    model_object.buffers[meta.buffer] = buffer_field\n                    return model_object\n        if not model_object.buffers:\n            model_object.buffers = []\n        model_object.buffers.append(buffer_field)\n        metadata_field = schema_fb.MetadataT()\n        metadata_field.name = CONVERSION_METADATA_FIELD_NAME\n        metadata_field.buffer = len(model_object.buffers) - 1\n        model_object.metadata.append(metadata_field)\n        return model_object\n    except Exception:\n        return model_object", "docstring": "Add or update conversion metadata to a tflite model.\n\nArgs:\nmodel_object: A tflite model in object form.\nmetadata: The conversion metadata.\n\nReturns:\nA tflite model object with embedded conversion metadata.", "source": "github-repos"}
{"code": "def _Check(self):\n    success = True\n    for path in self._paths:\n        if not os.path.isfile(path):\n            logging.error('No such file: %s', path)\n            success = False\n        elif not os.access(path, os.R_OK):\n            logging.error('No read access: %s', path)\n            success = False\n        elif not FLAGS.output and (not os.access(path, os.W_OK)):\n            logging.error('No write access: %s', path)\n            success = False\n    return success", "docstring": "Verifies the existence and read+write access to all paths.\n\nReturns:\nBoolean, True if all paths are OK, otherwise False.", "source": "github-repos"}
{"code": "def parameterized_codec(raw, b64):\n    \n\n    if isinstance(raw, bytes):\n        raw = raw.decode('utf-8')\n\n    result = _parameterize_string(raw)\n\n    \n    \n    \n    return Base64(result.data) if b64 else result", "docstring": "Parameterize a string, possibly encoding it as Base64 afterwards\n\nArgs:\nraw (`str` | `bytes`): String to be processed. Byte strings will be\ninterpreted as UTF-8.\nb64 (`bool`): Whether to wrap the output in a Base64 CloudFormation\ncall\n\nReturns:\n:class:`troposphere.AWSHelperFn`: output to be included in a\nCloudFormation template.", "source": "juraj-google-style"}
{"code": "async def find(self, seq_set: SequenceSet, selected: SelectedMailbox,\n                   requirement: FetchRequirement = FetchRequirement.METADATA) \\\n            -> AsyncIterable[Tuple[int, MessageT]]:\n        \n        for seq, cached_msg in selected.messages.get_all(seq_set):\n            msg = await self.get(cached_msg.uid, cached_msg, requirement)\n            if msg is not None:\n                yield (seq, msg)", "docstring": "Find the active message UID and message pairs in the mailbox that\nare contained in the given sequences set. Message sequence numbers\nare resolved by the selected mailbox session.\n\nArgs:\nseq_set: The sequence set of the desired messages.\nselected: The selected mailbox session.\nrequirement: The data required from each message.", "source": "juraj-google-style"}
{"code": "def download(\n            self,\n            file: Union[IO[bytes], asyncio.StreamWriter, None]=None,\n            raw: bool=False, rewind: bool=True,\n            duration_timeout: Optional[float]=None):\n        \n        if self._session_state != SessionState.request_sent:\n            raise RuntimeError('Request not sent')\n\n        if rewind and file and hasattr(file, 'seek'):\n            original_offset = file.tell()\n        else:\n            original_offset = None\n\n        if not hasattr(file, 'drain'):\n            self._response.body = file\n\n            if not isinstance(file, Body):\n                self._response.body = Body(file)\n\n        read_future = self._stream.read_body(self._request, self._response, file=file, raw=raw)\n\n        try:\n            yield from asyncio.wait_for(read_future, timeout=duration_timeout)\n        except asyncio.TimeoutError as error:\n            raise DurationTimeout(\n                'Did not finish reading after {} seconds.'\n                    .format(duration_timeout)\n            ) from error\n\n        self._session_state = SessionState.response_received\n\n        if original_offset is not None:\n            file.seek(original_offset)\n\n        self.event_dispatcher.notify(self.Event.end_response, self._response)\n        self.recycle()", "docstring": "Read the response content into file.\n\nArgs:\nfile: A file object or asyncio stream.\nraw: Whether chunked transfer encoding should be included.\nrewind: Seek the given file back to its original offset after\nreading is finished.\nduration_timeout: Maximum time in seconds of which the\nentire file must be read.\n\nBe sure to call :meth:`start` first.\n\nCoroutine.", "source": "juraj-google-style"}
{"code": "def detect_content_type(self, path=None, payload=None, objectInput=None):\n        \n        \n        if objectInput:\n            message = \"Detection content type with file object is not stable.\"\n            log.exception(message)\n            raise TikaAppError(message)\n\n        f = file_path(path, payload, objectInput)\n        switches = [\"-d\", f]\n        result = self._command_template(switches).lower()\n        return result, path, f", "docstring": "Return the content type of passed file or payload.\n\nArgs:\npath (string): Path of file to analyze\npayload (string): Payload base64 to analyze\nobjectInput (object): file object/standard input to analyze\n\nReturns:\ncontent type of file (string)", "source": "juraj-google-style"}
{"code": "def Parse(self, parser_mediator):\n    file_entry = parser_mediator.GetFileEntry()\n    if (not file_entry):\n        raise errors.UnableToParseFile('Invalid file entry')\n    parser_mediator.AppendToParserChain(self)\n    try:\n        self.ParseFileEntry(parser_mediator, file_entry)\n    finally:\n        parser_mediator.PopFromParserChain()", "docstring": "Parsers the file entry and extracts event objects.\n\nArgs:\nparser_mediator (ParserMediator): a parser mediator.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def _deserialization_dependencies(self, children):\n    del children\n    return {}", "docstring": "Returns a dictionary containing `Trackables` that this object depends on.\n\nDependencies define the order to serialize and deserialize objects in the\nSavedModel. For example:\n\nclass A(Trackable):\nb = B()\ndef _deserialization_dependencies(self, children):\nreturn {'b': self.b}\n\nclass B(Trackable):\npass\n\nWe say that object `a=A()` depends on `a.b`.\n\nDependencies are guaranteed to be serialized and deserialized before the\nobject depending on them. The following methods use dependencies:\n- `_deserialize_from_proto` [loading]\n\nSavedModel loads with the bottom-up approach, by first creating all objects\nin the order defined by the dependencies, then connecting the children.\n\nUnlike `_trackable_children`, this function does not define the\n`SavedObjectGraph`. It only changes the order in which things are\nsaved/loaded. Therefore, if there are dependencies that are not in the\n`SavedObjectGraph`, saving will fail.\n\nArgs:\nchildren: Dict returned from `_trackable_children`.\n\nReturns:\nA dictionary mapping names to `Trackable`.", "source": "github-repos"}
{"code": "def _makedirs(name, user=None, group=None, dir_mode=None, win_owner=None, win_perms=None, win_deny_perms=None, win_inheritance=None):\n    if salt.utils.platform.is_windows():\n        (drive, path) = os.path.splitdrive(name)\n        if (not os.path.isdir(drive)):\n            raise CommandExecutionError(drive)\n        win_owner = (win_owner if win_owner else user)\n        return __salt__['file.makedirs'](path=name, owner=win_owner, grant_perms=win_perms, deny_perms=win_deny_perms, inheritance=win_inheritance)\n    else:\n        return __salt__['file.makedirs'](path=name, user=user, group=group, mode=dir_mode)", "docstring": "Helper function for creating directories when the ``makedirs`` option is set\nto ``True``. Handles Unix and Windows based systems\n\n.. versionadded:: 2017.7.8\n\nArgs:\nname (str): The directory path to create\nuser (str): The linux user to own the directory\ngroup (str): The linux group to own the directory\ndir_mode (str): The linux mode to apply to the directory\nwin_owner (str): The Windows user to own the directory\nwin_perms (dict): A dictionary of grant permissions for Windows\nwin_deny_perms (dict): A dictionary of deny permissions for Windows\nwin_inheritance (bool): True to inherit permissions on Windows\n\nReturns:\nbool: True if successful, otherwise False on Windows\nstr: Error messages on failure on Linux\nNone: On successful creation on Linux\n\nRaises:\nCommandExecutionError: If the drive is not mounted on Windows", "source": "codesearchnet"}
{"code": "def _VerifyValuesWithDilation(self, tensor_in_sizes, filter_in_sizes, stride, dilation, padding, data_type, data_format='NHWC'):\n    total_size_1 = 1\n    total_size_2 = 1\n    for s in tensor_in_sizes:\n        total_size_1 *= s\n    for s in filter_in_sizes:\n        total_size_2 *= s\n    x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)], dtype=data_type).reshape(tensor_in_sizes)\n    x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)], dtype=data_type).reshape(filter_in_sizes)\n    with self.session() as sess:\n        if data_type == np.float32:\n            tolerance = 0.01\n        else:\n            self.assertEqual(data_type, np.float64)\n            tolerance = 1e-08\n        t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=data_type)\n        t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=data_type)\n        native_t1 = t1\n        strides = [1, stride, stride, 1]\n        dilations = [dilation, dilation]\n        if data_format == 'NCHW':\n            native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])\n            strides = [1, 1, stride, stride]\n        with self.test_scope():\n            conv_native = nn_impl.depthwise_conv2d(native_t1, t2, strides=strides, rate=dilations, data_format=data_format, padding=padding)\n        if data_format == 'NCHW':\n            conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])\n        with ops.device('CPU'):\n            strides = [1, stride, stride, 1]\n            conv_interface = nn_impl.depthwise_conv2d(t1, t2, strides=strides, rate=dilations, padding=padding)\n        native_result = sess.run(conv_native, {t1: x1, t2: x2})\n        interface_result = sess.run(conv_interface, {t1: x1, t2: x2})\n    print('data_type:', data_type, 'max diff = ', np.amax(np.absolute(native_result - interface_result)))\n    self.assertAllClose(np.ravel(native_result), np.ravel(interface_result), rtol=tolerance)", "docstring": "Verifies the output values of the convolution function.\n\nArgs:\ntensor_in_sizes: Input tensor dimensions in [batch, input_rows,\ninput_cols, input_depth].\nfilter_in_sizes: Filter tensor dimensions in [filter_rows, filter_cols,\ninput_depth, depth_multiplier].\nstride: Stride.\ndilation: Dilation.\npadding: Padding type.\ndata_type: The data type to use.\ndata_format: The data_format of the input. \"NHWC\" or \"NCHW\".", "source": "github-repos"}
{"code": "def get_graph(self):\n    st = self.status\n    if (st in (SolverStatus.solved, SolverStatus.unsolved)):\n        phase = self._latest_nonfailed_phase()\n        return phase.get_graph()\n    else:\n        return self.get_fail_graph()", "docstring": "Returns the most recent solve graph.\n\nThis gives a graph showing the latest state of the solve. The specific\ngraph returned depends on the solve status. When status is:\nunsolved: latest unsolved graph is returned;\nsolved:   final solved graph is returned;\nfailed:   most appropriate failure graph is returned (see `failure_reason`);\ncyclic:   last failure is returned (contains cycle).\n\nReturns:\nA pygraph.digraph object.", "source": "codesearchnet"}
{"code": "def df_first_row_to_dict(df):\n        \n        if df is not None:\n            return [dict(r) for i, r in df.head(1).iterrows()][0]", "docstring": "First DataFrame row to list of dict\n\nArgs:\ndf (pandas.DataFrame): A DataFrame with at least one row\n\nReturns:\nA list of dict that looks like:\n\n[{'C1': 'x'}, {'C2': 'y'}, {'C3': 'z'}]\n\nfrom a DataFrame that looks like:\n\nC1  C2  C3\n1   x   y   z\n\nElse if `df` is `None`, returns `None`", "source": "juraj-google-style"}
{"code": "def _verify(self):\n    if (self._num_sides < 2):\n        raise ValueError('At least two sides required.')\n    for (prev, curr) in six.moves.zip(self._edges, self._edges[1:]):\n        self._verify_pair(prev, curr)\n    prev = self._edges[(- 1)]\n    curr = self._edges[0]\n    self._verify_pair(prev, curr)", "docstring": "Verify that the edges define a curved polygon.\n\nThis may not be entirely comprehensive, e.g. won't check\nself-intersection of the defined polygon.\n\n.. note::\n\nThis currently checks that edge endpoints match **exactly**\nbut allowing some roundoff may be desired.\n\nRaises:\nValueError: If there are fewer than two sides.\nValueError: If one of the sides is not in 2D.\nValueError: If consecutive sides don't share an endpoint.", "source": "codesearchnet"}
{"code": "def register_intent_parser(self, intent_parser, domain=0):\n        \n        if domain not in self.domains:\n            self.register_domain(domain=domain)\n        self.domains[domain].register_intent_parser(\n            intent_parser=intent_parser)", "docstring": "Register a intent parser with a domain.\n\nArgs:\nintent_parser(intent): The intent parser you wish to register.\ndomain(str): a string representing the domain you wish register the intent\nparser to.", "source": "juraj-google-style"}
{"code": "def _truncate_filename(filename, max_length):\n    if len(filename) <= max_length:\n        return filename\n    if '.' in filename:\n        filename, extension = filename.rsplit('.', 1)\n        if len(extension) > max_length - 1:\n            return filename[:max_length]\n        return '.'.join([filename[:max_length - len(extension) - 1], extension])\n    else:\n        return filename[:max_length]", "docstring": "Truncates a filename while trying to preserve the extension.\n\nArgs:\nfilename: string, the filename to potentially truncate.\n\nReturns:\nThe truncated filename that is less than or equal to the given maximum\nlength.", "source": "github-repos"}
{"code": "def pause(self, device):\n        \n        resp = self.post('pause', params={'device': device},\n                         return_response=True)\n        error = resp.text\n        if not error:\n            error = None\n        return {'success': resp.status_code == requests.codes.ok,\n                'error': error}", "docstring": "Pause the given device.\n\nArgs:\ndevice (str): Device ID.\n\nReturns:\ndict: with keys ``success`` and ``error``.", "source": "juraj-google-style"}
{"code": "def _parse_address(self, config):\n    match = re.search('ip address ([^\\\\s]+)', config)\n    value = (match.group(1) if match else None)\n    return dict(address=value)", "docstring": "Parses the config block and returns the ip address value\n\nThe provided configuration block is scaned and the configured value\nfor the IP address is returned as a dict object.  If the IP address\nvalue is not configured, then None is returned for the value\n\nArgs:\nconfig (str): The interface configuration block to parse\n\nReturn:\ndict: A dict object intended to be merged into the resource dict", "source": "codesearchnet"}
{"code": "def _get_int_removals_helper(self, spec_amts_oxi, oxid_el, oxid_els, numa):\n    oxid_old = min([spec.oxi_state for spec in spec_amts_oxi if (spec.symbol == oxid_el.symbol)])\n    oxid_new = math.floor((oxid_old + 1))\n    if (oxid_new > oxid_el.max_oxidation_state):\n        return numa\n    spec_old = Specie(oxid_el.symbol, oxid_old)\n    spec_new = Specie(oxid_el.symbol, oxid_new)\n    specamt = spec_amts_oxi[spec_old]\n    spec_amts_oxi = {sp: amt for (sp, amt) in spec_amts_oxi.items() if (sp != spec_old)}\n    spec_amts_oxi[spec_new] = specamt\n    spec_amts_oxi = Composition(spec_amts_oxi)\n    oxi_noA = sum([(spec.oxi_state * spec_amts_oxi[spec]) for spec in spec_amts_oxi if (spec.symbol not in self.cation.symbol)])\n    a = max(0, ((- oxi_noA) / self.cation_charge))\n    numa = numa.union({a})\n    if (a == 0):\n        return numa\n    else:\n        for oxid_el in oxid_els:\n            numa = numa.union(self._get_int_removals_helper(spec_amts_oxi.copy(), oxid_el, oxid_els, numa))\n        return numa", "docstring": "This is a helper method for get_removals_int_oxid!\n\nArgs:\nspec_amts_oxi - a dict of species to their amounts in the structure\noxid_el - the element to oxidize\noxid_els - the full list of elements that might be oxidized\nnuma - a running set of numbers of A cation at integer oxidation steps\nReturns:\na set of numbers A; steps for for oxidizing oxid_el first, then the other oxid_els in this list", "source": "codesearchnet"}
{"code": "def _compute_best_partitions(num_part, sizes, nfps):\n    if (num_part < 2):\n        raise ValueError('num_part cannot be less than 2')\n    if (num_part > len(sizes)):\n        raise ValueError('num_part cannot be greater than the domain size of all set sizes')\n    if (num_part == 2):\n        (total_nfps, u) = min((((nfps[(0, u1)] + nfps[((u1 + 1), (len(sizes) - 1))]), u1) for u1 in range(0, (len(sizes) - 1))))\n        return ([(sizes[0], sizes[u]), (sizes[(u + 1)], sizes[(- 1)])], total_nfps, None)\n    cost = np.zeros((len(sizes), (num_part - 2)))\n    p2i = (lambda p: (p - 2))\n    for p in range(2, num_part):\n        for u in range((p - 1), len(sizes)):\n            if (p == 2):\n                cost[(u, p2i(p))] = min(((nfps[(0, u1)] + nfps[((u1 + 1), u)]) for u1 in range(u)))\n            else:\n                cost[(u, p2i(p))] = min(((cost[(u1, p2i((p - 1)))] + nfps[((u1 + 1), u)]) for u1 in range(((p - 1) - 1), u)))\n    p = num_part\n    (total_nfps, u) = min((((cost[(u1, p2i((p - 1)))] + nfps[((u1 + 1), (len(sizes) - 1))]), u1) for u1 in range(((p - 1) - 1), (len(sizes) - 1))))\n    partitions = [(sizes[(u + 1)], sizes[(- 1)])]\n    p -= 1\n    while (p > 1):\n        (_, u1_best) = min((((cost[(u1, p2i(p))] + nfps[((u1 + 1), u)]), u1) for u1 in range(((p - 1) - 1), u)))\n        partitions.insert(0, (sizes[(u1_best + 1)], sizes[u]))\n        u = u1_best\n        p -= 1\n    partitions.insert(0, (sizes[0], sizes[u]))\n    return [partitions, total_nfps, cost]", "docstring": "Computes the optimal partitions given the size distributions\nand computed number of expected false positives for all sub-intervals.\n\nArgs:\nnum_part (int): The number of partitions to create.\nsizes (numpy.array): The complete domain of set sizes in sorted order.\nnfps (numpy.array): The computed number of expected false positives\nfor all sub-intervals; axis-0 is for the indexes of lower bounds and\naxis-1 is for the indexes of upper bounds.\n\nReturns:\npartitions (list): list of lower and upper bounds of set sizes for\nall partitions.\ntotal_nfps (float): total number of expected false positives from all\npartitions.\ncost (numpy.array): a N x p-1 matrix of the computed optimal NFPs for\nall sub-problems given upper bound set size and number of partitions.", "source": "codesearchnet"}
{"code": "def __bool__(self):\n    self._disallow_bool_casting()", "docstring": "Dummy method to prevent a tensor from being used as a Python `bool`.\n\nThis overload raises a `TypeError` when the user inadvertently\ntreats a `Tensor` as a boolean (most commonly in an `if` or `while`\nstatement), in code that was not converted by AutoGraph. For example:\n\n```python\nif tf.constant(True):  # Will raise.\n# ...\n\nif tf.constant(5) < tf.constant(7):  # Will raise.\n# ...\n```\n\nRaises:\n`TypeError`.", "source": "github-repos"}
{"code": "class TFBaseModelOutputWithNoAttention(ModelOutput):\n    last_hidden_state: Optional[tf.Tensor] = None\n    hidden_states: Optional[Tuple[tf.Tensor, ...]] = None", "docstring": "Base class for model's outputs, with potential hidden states.\n\nArgs:\nlast_hidden_state (`tf.Tensor` shape `(batch_size, num_channels, height, width)`):\nSequence of hidden-states at the output of the last layer of the model.\nhidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\nTuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for\nthe output of each layer) of shape `(batch_size, num_channels, height, width)`.\n\nHidden-states of the model at the output of each layer plus the optional initial embedding outputs.", "source": "github-repos"}
{"code": "def _ParseRecord(self, parser_mediator, page_data, record_offset):\n    \n    record_header_map = self._GetDataTypeMap('binarycookies_record_header')\n\n    try:\n      record_header = self._ReadStructureFromByteStream(\n          page_data[record_offset:], record_offset, record_header_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError((\n          'Unable to map record header data at offset: 0x{0:08x} with error: '\n          '{1!s}').format(record_offset, exception))\n\n    event_data = SafariBinaryCookieEventData()\n    event_data.flags = record_header.flags\n\n    if record_header.url_offset:\n      data_offset = record_offset + record_header.url_offset\n      event_data.url = self._ParseCString(page_data, data_offset)\n\n    if record_header.name_offset:\n      data_offset = record_offset + record_header.name_offset\n      event_data.cookie_name = self._ParseCString(page_data, data_offset)\n\n    if record_header.path_offset:\n      data_offset = record_offset + record_header.path_offset\n      event_data.path = self._ParseCString(page_data, data_offset)\n\n    if record_header.value_offset:\n      data_offset = record_offset + record_header.value_offset\n      event_data.cookie_value = self._ParseCString(page_data, data_offset)\n\n    if record_header.creation_time:\n      date_time = dfdatetime_cocoa_time.CocoaTime(\n          timestamp=record_header.creation_time)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_CREATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    if record_header.expiration_time:\n      date_time = dfdatetime_cocoa_time.CocoaTime(\n          timestamp=record_header.expiration_time)\n    else:\n      date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_EXPIRATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    for plugin in self._cookie_plugins:\n      if parser_mediator.abort:\n        break\n\n      if event_data.cookie_name != plugin.COOKIE_NAME:\n        continue\n\n      try:\n        plugin.UpdateChainAndProcess(\n            parser_mediator, cookie_name=event_data.cookie_name,\n            cookie_data=event_data.cookie_value, url=event_data.url)\n\n      except Exception as exception:  \n        parser_mediator.ProduceExtractionWarning(\n            'plugin: {0:s} unable to parse cookie with error: {1!s}'.format(\n                plugin.NAME, exception))", "docstring": "Parses a record from the page data.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\npage_data (bytes): page data.\nrecord_offset (int): offset of the record relative to the start\nof the page.\n\nRaises:\nParseError: when the record cannot be parsed.", "source": "juraj-google-style"}
{"code": "def simple_lmm(snps, pheno, K=None, covs=None, test='lrt', NumIntervalsDelta0=100, NumIntervalsDeltaAlt=0, searchDelta=False):\n    t0 = time.time()\n    if (K is None):\n        K = SP.eye(snps.shape[0])\n    lm = limix.CLMM()\n    lm.setK(K)\n    lm.setSNPs(snps)\n    lm.setPheno(pheno)\n    if (covs is None):\n        covs = SP.ones((snps.shape[0], 1))\n    lm.setCovs(covs)\n    if (test == 'lrt'):\n        lm.setTestStatistics(0)\n    elif (test == 'f'):\n        lm.setTestStatistics(1)\n    else:\n        print(test)\n        raise NotImplementedError('only f or lrt are implemented')\n    lm.setNumIntervals0(NumIntervalsDelta0)\n    if searchDelta:\n        lm.setNumIntervalsAlt(NumIntervalsDeltaAlt)\n    else:\n        lm.setNumIntervalsAlt(0)\n    lm.process()\n    t1 = time.time()\n    print(('finished GWAS testing in %.2f seconds' % (t1 - t0)))\n    return lm", "docstring": "Univariate fixed effects linear mixed model test for all SNPs\n\nArgs:\nsnps:   [N x S] SP.array of S SNPs for N individuals\npheno:  [N x 1] SP.array of 1 phenotype for N individuals\nK:      [N x N] SP.array of LMM-covariance/kinship koefficients (optional)\nIf not provided, then linear regression analysis is performed\ncovs:   [N x D] SP.array of D covariates for N individuals\ntest:   'lrt' for likelihood ratio test (default) or 'f' for F-test\nNumIntervalsDelta0:  number of steps for delta optimization on the null model (100)\nNumIntervalsDeltaAlt:number of steps for delta optimization on the alt. model (0 - no optimization)\nsearchDelta:     Carry out delta optimization on the alternative model? if yes We use NumIntervalsDeltaAlt steps\n\nReturns:\nlimix LMM object", "source": "codesearchnet"}
{"code": "def errors(self, batch_id, halt_on_error=True):\n    errors = []\n    try:\n        r = self.tcex.session.get('/v2/batch/{}/errors'.format(batch_id))\n        self.tcex.log.debug('Retrieve Errors for ID {}: status code {}, errors {}'.format(batch_id, r.status_code, r.text))\n        if r.ok:\n            errors = json.loads(r.text)\n        for error in errors:\n            error_reason = error.get('errorReason')\n            for error_msg in self._critical_failures:\n                if re.findall(error_msg, error_reason):\n                    self.tcex.handle_error(10500, [error_reason], halt_on_error)\n        return errors\n    except Exception as e:\n        self.tcex.handle_error(560, [e], halt_on_error)", "docstring": "Retrieve Batch errors to ThreatConnect API.\n\n.. code-block:: javascript\n\n[{\n\"errorReason\": \"Incident incident-001 has an invalid status.\",\n\"errorSource\": \"incident-001 is not valid.\"\n}, {\n\"errorReason\": \"Incident incident-002 has an invalid status.\",\n\"errorSource\":\"incident-002 is not valid.\"\n}]\n\nArgs:\nbatch_id (str): The ID returned from the ThreatConnect API for the current batch job.\nhalt_on_error (bool, default:True): If True any exception will raise an error.", "source": "codesearchnet"}
{"code": "def get(self):\n    logger.info('Loading refresh_token from %s', repr(self._filename))\n    try:\n        with open(self._filename) as f:\n            return f.read()\n    except IOError as e:\n        logger.info('Failed to load refresh_token: %s', e)", "docstring": "Get cached refresh token.\n\nReturns:\nCached refresh token, or ``None`` on failure.", "source": "codesearchnet"}
{"code": "def next_id(self, channel):\n        \n\n        if channel not in self.topics:\n            self.topics[channel] = 0\n            return 0\n\n        self.topics[channel] += 1\n        return self.topics[channel]", "docstring": "Get the next sequence number for a named channel or topic\n\nIf channel has not been sent to next_id before, 0 is returned\notherwise next_id returns the last id returned + 1.\n\nArgs:\nchannel (string): The name of the channel to get a sequential\nid for.\n\nReturns:\nint: The next id for this channel", "source": "juraj-google-style"}
{"code": "def unique_fetches(self):\n    raise NotImplementedError('unique_fetches must be implemented by subclasses')", "docstring": "Return the list of unique tensors or ops needed by this fetch mapper.\n\nReturns:\nA list of tensors or ops.", "source": "github-repos"}
{"code": "def zeros_like(x, dtype=None, name=None):\n    return array_ops.zeros_like(x, dtype=dtype, name=name)", "docstring": "Instantiates an all-zeros variable of the same shape as another tensor.\n\nArgs:\nx: Keras variable or Keras tensor.\ndtype: dtype of returned Keras variable.\n`None` uses the dtype of `x`.\nname: name for the variable to create.\n\nReturns:\nA Keras variable with the shape of `x` filled with zeros.\n\nExample:\n\n```python\nfrom tensorflow.keras import backend as K\nkvar = K.variable(np.random.random((2,3)))\nkvar_zeros = K.zeros_like(kvar)\nK.eval(kvar_zeros)\n# array([[ 0.,  0.,  0.], [ 0.,  0.,  0.]], dtype=float32)\n```", "source": "github-repos"}
{"code": "def stops_when(iterable, condition):\n    if (not callable(condition)):\n        cond_value = condition\n\n        def condition(x):\n            return (x == cond_value)\n    return itertools.takewhile((lambda x: (not condition(x))), iterable)", "docstring": "Stop yielding items when a condition arise.\n\nArgs:\niterable: the iterable to filter.\ncondition: if the callable returns True once, stop yielding\nitems. If it's not a callable, it will be converted\nto one as `lambda condition: condition == item`.\n\nExample:\n\n>>> list(stops_when(range(10), lambda x: x > 5))\n[0, 1, 2, 3, 4, 5]\n>>> list(stops_when(range(10), 7))\n[0, 1, 2, 3, 4, 5, 6]", "source": "codesearchnet"}
{"code": "def _comparison_functions(cls, partial=False):\n\n    def prerelease_cmp(a, b):\n        'Compare prerelease components.\\n\\n            Special rule: a version without prerelease component has higher\\n            precedence than one with a prerelease component.\\n            '\n        if (a and b):\n            return identifier_list_cmp(a, b)\n        elif a:\n            return (- 1)\n        elif b:\n            return 1\n        else:\n            return 0\n\n    def build_cmp(a, b):\n        'Compare build metadata.\\n\\n            Special rule: there is no ordering on build metadata.\\n            '\n        if (a == b):\n            return 0\n        else:\n            return NotImplemented\n\n    def make_optional(orig_cmp_fun):\n        \"Convert a cmp-like function to consider 'None == *'.\"\n\n        @functools.wraps(orig_cmp_fun)\n        def alt_cmp_fun(a, b):\n            if ((a is None) or (b is None)):\n                return 0\n            return orig_cmp_fun(a, b)\n        return alt_cmp_fun\n    if partial:\n        return [base_cmp, make_optional(base_cmp), make_optional(base_cmp), make_optional(prerelease_cmp), make_optional(build_cmp)]\n    else:\n        return [base_cmp, base_cmp, base_cmp, prerelease_cmp, build_cmp]", "docstring": "Retrieve comparison methods to apply on version components.\n\nThis is a private API.\n\nArgs:\npartial (bool): whether to provide 'partial' or 'strict' matching.\n\nReturns:\n5-tuple of cmp-like functions.", "source": "codesearchnet"}
{"code": "def classifier_factory(clf):\n    required_methods = ['fit', 'score', 'predict']\n    for method in required_methods:\n        if (not hasattr(clf, method)):\n            raise TypeError('\"{}\" is not in clf. Did you pass a classifier instance?'.format(method))\n    optional_methods = ['predict_proba']\n    for method in optional_methods:\n        if (not hasattr(clf, method)):\n            warnings.warn('{} not in clf. Some plots may not be possible to generate.'.format(method))\n    additional_methods = {'plot_learning_curve': plot_learning_curve, 'plot_confusion_matrix': plot_confusion_matrix_with_cv, 'plot_roc_curve': plot_roc_curve_with_cv, 'plot_ks_statistic': plot_ks_statistic_with_cv, 'plot_precision_recall_curve': plot_precision_recall_curve_with_cv, 'plot_feature_importances': plot_feature_importances}\n    for (key, fn) in six.iteritems(additional_methods):\n        if hasattr(clf, key):\n            warnings.warn('\"{}\" method already in clf. Overriding anyway. This may result in unintended behavior.'.format(key))\n        setattr(clf, key, types.MethodType(fn, clf))\n    return clf", "docstring": "Embeds scikit-plot instance methods in an sklearn classifier.\n\nArgs:\nclf: Scikit-learn classifier instance\n\nReturns:\nThe same scikit-learn classifier instance passed in **clf**\nwith embedded scikit-plot instance methods.\n\nRaises:\nValueError: If **clf** does not contain the instance methods\nnecessary for scikit-plot instance methods.", "source": "codesearchnet"}
{"code": "def get_cached_response(self, key):\n        \n        cached_value = self.data.get(key, _CACHE_MISS)\n        is_found = cached_value is not _CACHE_MISS\n        return CachedResponse(is_found, key, cached_value)", "docstring": "Retrieves a CachedResponse for the provided key.\n\nArgs:\nkey (string)\n\nReturns:\nA CachedResponse with is_found status and value.", "source": "juraj-google-style"}
{"code": "def cancelTickByTickData(self, contract: Contract, tickType: str):\n    ticker = self.ticker(contract)\n    reqId = self.wrapper.endTicker(ticker, tickType)\n    if reqId:\n        self.client.cancelTickByTickData(reqId)\n    else:\n        self._logger.error(f'cancelMktData: No reqId found for contract {contract}')", "docstring": "Unsubscribe from tick-by-tick data\n\nArgs:\ncontract: The exact contract object that was used to\nsubscribe with.", "source": "codesearchnet"}
{"code": "def set_status(self, name: str=None):\n    game = None\n    if name:\n        game = {'name': name}\n    payload = {'op': WebSocketEvent.STATUS_UPDATE.value, 'd': {'game': game, 'status': 'online', 'afk': False, 'since': 0.0}}\n    data = json.dumps(payload, indent=2)\n    self.logger.debug(f'Sending status update payload: {data}')\n    self._ws.send(data)", "docstring": "Updates the bot's status\n\nThis is used to get the game that the bot is \"playing\" or to clear it.\nIf you want to set a game, pass a name; if you want to clear it, either\ncall this method without the optional ``name`` parameter or explicitly\npass ``None``.\n\nArgs:\nname: the game's name, or None", "source": "codesearchnet"}
{"code": "def search_client_by_id(self, clientID) -> Client:\n        \n        for c in self.clients:\n            if c.id == clientID:\n                return c\n        return None", "docstring": "searches a client by given id\n\nArgs:\nclientID(str): the client to search for\n\nReturns\nthe client object or None if it couldn't find a client", "source": "juraj-google-style"}
{"code": "def detect_incorrect_erc20_interface(contract):\n    functions = [f for f in contract.functions if ((f.contract == contract) and IncorrectERC20InterfaceDetection.incorrect_erc20_interface(f.signature))]\n    return functions", "docstring": "Detect incorrect ERC20 interface\n\nReturns:\nlist(str) : list of incorrect function signatures", "source": "codesearchnet"}
{"code": "def info(self, message, domain=None):\n    if (domain is None):\n        domain = self.extension_name\n    info(message, domain)", "docstring": "Shortcut function for `utils.loggable.info`\n\nArgs:\nmessage: see `utils.loggable.info`\ndomain: see `utils.loggable.info`", "source": "codesearchnet"}
{"code": "def emit_obj_create(self, category: str, name: str, timestamp: int, pid: int, tid: int, object_id: int) -> None:\n    event = self._create_event('N', category, name, pid, tid, timestamp)\n    event['id'] = object_id\n    self._events.append(event)", "docstring": "Adds an object creation event to the trace.\n\nArgs:\ncategory: The event category as a string.\nname:  The event name as a string.\ntimestamp:  The timestamp of this event as a long integer.\npid:  Identifier of the process generating this event as an integer.\ntid:  Identifier of the thread generating this event as an integer.\nobject_id: Identifier of the object as an integer.", "source": "github-repos"}
{"code": "def send_notification(*, subsystem, recipients, subject, body_html, body_text):\n    \n    from cloud_inquisitor import CINQ_PLUGINS\n\n    if not body_html and not body_text:\n        raise ValueError('body_html or body_text must be provided')\n\n    \n    recipients = list(set(recipients))\n\n    notifiers = map(lambda plugin: plugin.load(), CINQ_PLUGINS['cloud_inquisitor.plugins.notifiers']['plugins'])\n\n    for cls in filter(lambda x: x.enabled(), notifiers):\n        for recipient in recipients:\n            if isinstance(recipient, NotificationContact):\n                if recipient.type == cls.notifier_type:\n                    try:\n                        notifier = cls()\n                        notifier.notify(subsystem, recipient.value, subject, body_html, body_text)\n                    except Exception:\n                        log.exception('Failed sending notification for {}/{}'.format(\n                            recipient.type,\n                            recipient.value\n                        ))\n            else:\n                log.warning('Unexpected recipient {}'.format(recipient))", "docstring": "Method to send a notification. A plugin may use only part of the information, but all fields are required.\n\nArgs:\nsubsystem (`str`): Name of the subsystem originating the notification\nrecipients (`list` of :obj:`NotificationContact`): List of recipients\nsubject (`str`): Subject / title of the notification\nbody_html (`str)`: HTML formatted version of the message\nbody_text (`str`): Text formatted version of the message\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def __init__(self, filenames, compression_type=None, buffer_size=None, name=None):\n    self._filenames = filenames\n    self._compression_type = convert.optional_param_to_tensor('compression_type', compression_type, argument_default='', argument_dtype=dtypes.string)\n    self._buffer_size = convert.optional_param_to_tensor('buffer_size', buffer_size, argument_default=_DEFAULT_TF_RECORD_BUFFER_SIZE_BYTES)\n    self._name = name\n    variant_tensor = gen_dataset_ops.tf_record_dataset(self._filenames, self._compression_type, self._buffer_size, metadata=self._metadata.SerializeToString())\n    super(_TFRecordDataset, self).__init__(variant_tensor)", "docstring": "Creates a `TFRecordDataset`.\n\nArgs:\nfilenames: A `tf.string` tensor containing one or more filenames.\ncompression_type: (Optional.) A `tf.string` scalar evaluating to one of\n`\"\"` (no compression), `\"ZLIB\"`, or `\"GZIP\"`.\nbuffer_size: (Optional.) A `tf.int64` scalar representing the number of\nbytes in the read buffer. 0 means no buffering.\nname: (Optional.) A name for the tf.data operation.", "source": "github-repos"}
{"code": "def fit_size_models(self, model_names,\n                        model_objs,\n                        input_columns,\n                        output_column=\"Hail_Size\",\n                        output_start=5,\n                        output_step=5,\n                        output_stop=100):\n        \n        print(\"Fitting size models\")\n        groups = self.data[\"train\"][\"member\"][self.group_col].unique()\n        output_start = int(output_start)\n        output_step = int(output_step)\n        output_stop = int(output_stop)\n        for group in groups:\n            group_data = self.data[\"train\"][\"combo\"].loc[self.data[\"train\"][\"combo\"][self.group_col] == group]\n            group_data.dropna(inplace=True)\n            group_data = group_data[group_data[output_column] >= output_start]\n            output_data = group_data[output_column].values.astype(int)\n            output_data[output_data > output_stop] = output_stop\n            discrete_data = ((output_data - output_start) \n            self.size_models[group] = {}\n            self.size_models[group][\"outputvalues\"] = np.arange(output_start, output_stop + output_step, output_step,\n                                                                dtype=int)\n            for m, model_name in enumerate(model_names):\n                print(\"{0} {1}\".format(group, model_name))\n                self.size_models[group][model_name] = deepcopy(model_objs[m])\n                self.size_models[group][model_name].fit(group_data[input_columns], discrete_data)", "docstring": "Fit size models to produce discrete pdfs of forecast hail sizes.\nArgs:\nmodel_names: List of model names\nmodel_objs: List of model objects\ninput_columns: List of input variables\noutput_column: Output variable name\noutput_start: Hail size bin start\noutput_step: hail size bin step\noutput_stop: hail size bin stop", "source": "juraj-google-style"}
{"code": "def load_scheduler_plugins(self):\n    if (not self.scheduler_plugins):\n        for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.schedulers']['plugins']:\n            cls = entry_point.load()\n            self.scheduler_plugins[cls.__name__] = cls\n            if (cls.__name__ == self.active_scheduler):\n                self.log.debug('Scheduler loaded: {} in module {}'.format(cls.__name__, cls.__module__))\n            else:\n                self.log.debug('Scheduler disabled: {} in module {}'.format(cls.__name__, cls.__module__))", "docstring": "Refresh the list of available schedulers\n\nReturns:\n`list` of :obj:`BaseScheduler`", "source": "codesearchnet"}
{"code": "def _read_mode_rsralt(self, size, kind):\n    if (size != 4):\n        raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format')\n    _code = self._read_unpack(2)\n    data = dict(kind=kind, type=self._read_opt_type(kind), length=size, alert=_ROUTER_ALERT.get(_code, 'Reserved'), code=_code)\n    return data", "docstring": "Read Router Alert option.\n\nPositional arguments:\nsize - int, length of option\nkind - int, 148 (RTRALT)\n\nReturns:\n* dict -- extracted Router Alert (RTRALT) option\n\nStructure of Router Alert (RTRALT) option [RFC 2113]:\n+--------+--------+--------+--------+\n|10010100|00000100|  2 octet value  |\n+--------+--------+--------+--------+\n\nOctets      Bits        Name                    Description\n0           0     ip.rsralt.kind          Kind (148)\n0           0     ip.rsralt.type.copy     Copied Flag (1)\n0           1     ip.rsralt.type.class    Option Class (0)\n0           3     ip.rsralt.type.number   Option Number (20)\n1           8     ip.rsralt.length        Length (4)\n2          16     ip.rsralt.alert         Alert\n2          16     ip.rsralt.code          Alert Code", "source": "codesearchnet"}
{"code": "def dependency_of_targets(targets, op):\n    \n    \n    if isinstance(op, tf.Tensor):\n        op = op.op\n    assert isinstance(op, tf.Operation), op\n\n    from tensorflow.contrib.graph_editor import get_backward_walk_ops\n    \n    dependent_ops = get_backward_walk_ops(targets, control_inputs=True)\n    return op in dependent_ops", "docstring": "Check that op is in the subgraph induced by the dependencies of targets.\nThe result is memoized.\n\nThis is useful if some SessionRunHooks should be run only together with certain ops.\n\nArgs:\ntargets: a tuple of ops or tensors. The targets to find dependencies of.\nop (tf.Operation or tf.Tensor):\n\nReturns:\nbool: True if any one of `targets` depend on `op`.", "source": "juraj-google-style"}
{"code": "def format_time(\n        self,\n        hour_expression,\n        minute_expression,\n        second_expression=''\n    ):\n        \n        hour = int(hour_expression)\n\n        period = ''\n        if self._options.use_24hour_time_format is False:\n            period = \" PM\" if (hour >= 12) else \" AM\"\n            if hour > 12:\n                hour -= 12\n\n        minute = str(int(minute_expression))  \n        second = ''\n        if second_expression is not None and second_expression:\n            second = \"{}{}\".format(\":\", str(int(second_expression)).zfill(2))\n\n        return \"{0}:{1}{2}{3}\".format(str(hour).zfill(2), minute.zfill(2), second, period)", "docstring": "Given time parts, will contruct a formatted time description\nArgs:\nhour_expression: Hours part\nminute_expression: Minutes part\nsecond_expression: Seconds part\nReturns:\nFormatted time description", "source": "juraj-google-style"}
{"code": "def _google_section_permitted(line_info, state):\n    if state.section.indentation is None:\n        return True\n    return line_info.indentation <= state.section.indentation or line_info.indentation < state.section.line1_indentation", "docstring": "Returns whether a new google section is permitted to start here.\n\nQ: Why might a new Google section not be allowed?\nA: If we're in the middle of a Google \"Args\" section, then lines that start\n\"param:\" will usually be a new arg, rather than a new section.\nWe use whitespace to determine when the Args section has actually ended.\n\nA Google section ends when either:\n- A new google section begins at either\n- indentation less than indentation of line 1 of the previous section\n- or <= indentation of the previous section\n- Or the docstring terminates.\n\nArgs:\nline_info: Information about the current line.\nstate: The state of the parser.\nReturns:\nTrue or False, indicating whether a new Google section is permitted at the\ncurrent line.", "source": "github-repos"}
{"code": "def auto_batch_size(sequence_length, mesh_shape, layout_rules, tokens_per_split=2048):\n    num_splits = mtf.tensor_dim_to_mesh_dim_size(layout_rules, mesh_shape, mtf.Dimension('batch', 0))\n    ret = (max(1, (tokens_per_split \n    tf.logging.info(('AUTO_BATCH_SIZE tokens_per_split=%s num_splits=%s sequence_length=%s batch_size=%s' % (tokens_per_split, num_splits, sequence_length, ret)))\n    return ret", "docstring": "Automatically compute batch size.\n\nArgs:\nsequence_length: an integer\nmesh_shape: an input to mtf.convert_to_shape()\nlayout_rules: an input to mtf.convert_to_layout_rules()\ntokens_per_split: an integer\nReturns:\nan integer", "source": "codesearchnet"}
{"code": "def breakpoint_set(self, addr, thumb=False, arm=False):\n    flags = enums.JLinkBreakpoint.ANY\n    if thumb:\n        flags = (flags | enums.JLinkBreakpoint.THUMB)\n    elif arm:\n        flags = (flags | enums.JLinkBreakpoint.ARM)\n    handle = self._dll.JLINKARM_SetBPEx(int(addr), flags)\n    if (handle <= 0):\n        raise errors.JLinkException('Breakpoint could not be set.')\n    return handle", "docstring": "Sets a breakpoint at the specified address.\n\nIf ``thumb`` is ``True``, the breakpoint is set in THUMB-mode, while if\n``arm`` is ``True``, the breakpoint is set in ARM-mode, otherwise a\nnormal breakpoint is set.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): the address where the breakpoint will be set\nthumb (bool): boolean indicating to set the breakpoint in THUMB mode\narm (bool): boolean indicating to set the breakpoint in ARM mode\n\nReturns:\nAn integer specifying the breakpoint handle.  This handle should be\nretained for future breakpoint operations.\n\nRaises:\nTypeError: if the given address is not an integer.\nJLinkException: if the breakpoint could not be set.", "source": "codesearchnet"}
{"code": "def get_config(self, name, default=MISSING):\n        \n\n        res = self.config.get(name, default)\n        if res is MISSING:\n            raise ArgumentError(\"Could not find config value by name and no default supplied\", name=name)\n\n        return res", "docstring": "Get a config value from this adapter by name\n\nArgs:\nname (string): The name of the config variable\ndefault (object): The default value to return if config is not found\n\nReturns:\nobject: the value associated with the name\n\nRaises:\nArgumentError: if the name is not found and no default is supplied", "source": "juraj-google-style"}
{"code": "def _get_encoding(dom, default='utf-8'):\n    encoding = dom.find('meta', {'http-equiv': 'Content-Type'})\n    if (not encoding):\n        return default\n    encoding = encoding[0].params.get('content', None)\n    if (not encoding):\n        return default\n    return encoding.lower().split('=')[(- 1)]", "docstring": "Try to look for meta tag in given `dom`.\n\nArgs:\ndom (obj): pyDHTMLParser dom of HTML elements.\ndefault (default \"utr-8\"): What to use if encoding is not found in\n`dom`.\n\nReturns:\nstr/default: Given encoding or `default` parameter if not found.", "source": "codesearchnet"}
{"code": "def from_variant(variant, structure):\n    return _VariantDataset(variant, structure)", "docstring": "Constructs a dataset from the given variant and (nested) structure.\n\nArgs:\nvariant: A scalar `tf.variant` tensor representing a dataset.\nstructure: A (nested) structure of `tf.TypeSpec` objects representing the\nstructure of each element in the dataset.\n\nReturns:\nA `tf.data.Dataset` instance.", "source": "github-repos"}
{"code": "def get_schema_descendant(self, route: SchemaRoute) -> Optional[SchemaNode]:\n    node = self\n    for p in route:\n        node = node.get_child(*p)\n        if (node is None):\n            return None\n    return node", "docstring": "Return descendant schema node or ``None`` if not found.\n\nArgs:\nroute: Schema route to the descendant node\n(relative to the receiver).", "source": "codesearchnet"}
{"code": "def me(self):\n    json_data = self._session.get((API_ENDPOINT + '/me'))\n    return self._object_factory(OBJECT_TYPE, json_data)", "docstring": "Get the details of the person accessing the API.\n\nRaises:\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"}
{"code": "def override_parent_subgraph(self, parent_subgraph, invisible_edges=None):\n        \n        with transaction.atomic():\n            if invisible_edges is None:\n                invisible_edges = set()\n            children = list(parent_subgraph.keys())\n            all_old_relations = dict(proso.list.group_by(\n                list(ItemRelation.objects.filter(child_id__in=children)),\n                by=lambda relation: relation.child_id\n            ))\n            to_delete = set()\n            for child_id, parents in parent_subgraph.items():\n                old_relations = {\n                    relation.parent_id: relation\n                    for relation in all_old_relations.get(child_id, [])\n                }\n                for parent_id in parents:\n                    if parent_id not in old_relations:\n                        ItemRelation.objects.create(\n                            parent_id=parent_id,\n                            child_id=child_id,\n                            visible=(child_id, parent_id) not in invisible_edges\n                        )\n                    elif old_relations[parent_id].visible != ((child_id, parent_id) not in invisible_edges):\n                        old_relations[parent_id].visible = (child_id, parent_id) not in invisible_edges\n                        old_relations[parent_id].save()\n                to_delete |= {old_relations[parent_id].pk for parent_id in set(old_relations.keys()) - set(parents)}\n            ItemRelation.objects.filter(pk__in=to_delete).delete()", "docstring": "Get all items with outcoming edges from the given subgraph, drop all\ntheir parent relations, and then add parents according to the given\nsubgraph.\n\nArgs:\nparent_subgraph (dict): item id -> list of parents(item ids)\ninvisible_edges (list|set): set of (from, to) tuples specifying\ninvisible edges", "source": "juraj-google-style"}
{"code": "def get_http_header(self) -> Response:\n    with wpull.util.reset_file_offset(self.block_file):\n        data = self.block_file.read(4096)\n    match = re.match(b'(.*?\\\\r?\\\\n\\\\r?\\\\n)', data)\n    if (not match):\n        return\n    (status_line, dummy, field_str) = match.group(1).partition(b'\\n')\n    try:\n        (version, code, reason) = Response.parse_status_line(status_line)\n    except ValueError:\n        return\n    response = Response(status_code=code, reason=reason, version=version)\n    try:\n        response.fields.parse(field_str, strict=False)\n    except ValueError:\n        return\n    return response", "docstring": "Return the HTTP header.\n\nIt only attempts to read the first 4 KiB of the payload.\n\nReturns:\nResponse, None: Returns an instance of\n:class:`.http.request.Response` or None.", "source": "codesearchnet"}
{"code": "def _CalculateStorageCounters(self, storage_reader):\n    analysis_reports_counter = collections.Counter()\n    analysis_reports_counter_error = False\n    event_labels_counter = collections.Counter()\n    event_labels_counter_error = False\n    parsers_counter = collections.Counter()\n    parsers_counter_error = False\n    for session in storage_reader.GetSessions():\n        if isinstance(session.analysis_reports_counter, dict):\n            analysis_reports_counter += collections.Counter(session.analysis_reports_counter)\n        elif isinstance(session.analysis_reports_counter, collections.Counter):\n            analysis_reports_counter += session.analysis_reports_counter\n        else:\n            analysis_reports_counter_error = True\n        if isinstance(session.event_labels_counter, dict):\n            event_labels_counter += collections.Counter(session.event_labels_counter)\n        elif isinstance(session.event_labels_counter, collections.Counter):\n            event_labels_counter += session.event_labels_counter\n        else:\n            event_labels_counter_error = True\n        if isinstance(session.parsers_counter, dict):\n            parsers_counter += collections.Counter(session.parsers_counter)\n        elif isinstance(session.parsers_counter, collections.Counter):\n            parsers_counter += session.parsers_counter\n        else:\n            parsers_counter_error = True\n    storage_counters = {}\n    warnings_by_path_spec = collections.Counter()\n    warnings_by_parser_chain = collections.Counter()\n    for warning in list(storage_reader.GetWarnings()):\n        warnings_by_path_spec[warning.path_spec.comparable] += 1\n        warnings_by_parser_chain[warning.parser_chain] += 1\n    storage_counters['warnings_by_path_spec'] = warnings_by_path_spec\n    storage_counters['warnings_by_parser_chain'] = warnings_by_parser_chain\n    if (not analysis_reports_counter_error):\n        storage_counters['analysis_reports'] = analysis_reports_counter\n    if (not event_labels_counter_error):\n        storage_counters['event_labels'] = event_labels_counter\n    if (not parsers_counter_error):\n        storage_counters['parsers'] = parsers_counter\n    return storage_counters", "docstring": "Calculates the counters of the entire storage.\n\nArgs:\nstorage_reader (StorageReader): storage reader.\n\nReturns:\ndict[str,collections.Counter]: storage counters.", "source": "codesearchnet"}
{"code": "def get_lats():\n    lats = {}\n    fname = pkg_resources.resource_filename(__name__, 'resources/Latitudes-Longitudes.csv')\n    with open(fname, 'rb') as csvfile:\n        reader = csv.reader(csvfile, delimiter=',')\n        for row in reader:\n            word = row[0].lower()\n            word = re.sub(' ', '', word)\n            lats[word] = float(row[1])\n    return lats", "docstring": "Get a dictionary that maps Backpage city names to their respective latitudes.\n\nReturns:\ndictionary that maps city names (Strings) to latitudes (Floats)", "source": "codesearchnet"}
{"code": "def pad_to_multiple_2d(x, block_shape):\n    old_shape = x.get_shape().dims\n    last = old_shape[(- 1)]\n    if (len(old_shape) == 4):\n        height_padding = ((- common_layers.shape_list(x)[1]) % block_shape[0])\n        width_padding = ((- common_layers.shape_list(x)[2]) % block_shape[1])\n        paddings = [[0, 0], [0, height_padding], [0, width_padding], [0, 0]]\n    elif (len(old_shape) == 5):\n        height_padding = ((- common_layers.shape_list(x)[2]) % block_shape[0])\n        width_padding = ((- common_layers.shape_list(x)[3]) % block_shape[1])\n        paddings = [[0, 0], [0, 0], [0, height_padding], [0, width_padding], [0, 0]]\n    padded_x = tf.pad(x, paddings)\n    padded_shape = padded_x.get_shape().as_list()\n    padded_shape = (padded_shape[:(- 1)] + [last])\n    padded_x.set_shape(padded_shape)\n    return padded_x", "docstring": "Making sure x is a multiple of shape.\n\nArgs:\nx: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor\nblock_shape: a 2-d list of integer shapes\n\nReturns:\npadded_x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor", "source": "codesearchnet"}
{"code": "def is_str(string):\n    \n    if sys.version_info[:2] >= (3, 0):\n        return isinstance(string, str)\n\n    return isinstance(string, basestring)", "docstring": "Python 2 and 3 compatible string checker.\n\nArgs:\nstring (str | basestring): the string to check\n\nReturns:\nbool: True or False", "source": "juraj-google-style"}
{"code": "def download_and_uncompress(self, fileobj, dst_path):\n    try:\n        with tarfile.open(mode='r|*', fileobj=fileobj) as tgz:\n            for tarinfo in tgz:\n                abs_target_path = _merge_relative_path(dst_path, tarinfo.name)\n                if tarinfo.isfile():\n                    self._extract_file(tgz, tarinfo, abs_target_path)\n                elif tarinfo.isdir():\n                    tf_v1.gfile.MakeDirs(abs_target_path)\n                else:\n                    raise ValueError(('Unexpected object type in tar archive: %s' % tarinfo.type))\n            total_size_str = tf_utils.bytes_to_readable_str(self._total_bytes_downloaded, True)\n            self._print_download_progress_msg(('Downloaded %s, Total size: %s' % (self._url, total_size_str)), flush=True)\n    except tarfile.ReadError:\n        raise IOError(('%s does not appear to be a valid module.' % self._url))", "docstring": "Streams the content for the 'fileobj' and stores the result in dst_path.\n\nArgs:\nfileobj: File handle pointing to .tar/.tar.gz content.\ndst_path: Absolute path where to store uncompressed data from 'fileobj'.\n\nRaises:\nValueError: Unknown object encountered inside the TAR file.", "source": "codesearchnet"}
{"code": "def verify_abort(func, *args, **kwargs):\n    expected_exception = kwargs.pop('expected_exception', runez.system.AbortException)\n    with CaptureOutput() as logged:\n        try:\n            value = func(*args, **kwargs)\n            assert False, ('%s did not raise, but returned %s' % (func, value))\n        except expected_exception:\n            return str(logged)", "docstring": "Convenient wrapper around functions that should exit or raise an exception\n\nExample:\nassert \"Can't create folder\" in verify_abort(ensure_folder, \"/dev/null/not-there\")\n\nArgs:\nfunc (callable): Function to execute\n*args: Args to pass to 'func'\n**kwargs: Named args to pass to 'func'\n\nReturns:\n(str): Chatter from call to 'func', if it did indeed raise", "source": "codesearchnet"}
{"code": "def conv(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:\n    q_input = array_ops.fake_quant_with_min_max_args(input_tensor, min=-0.1, max=0.2, num_bits=8, narrow_range=False)\n    filter_tensor = ops.convert_to_tensor(self.filter_value)\n    filter_min = array_ops.identity(array_ops.constant([-0.5, -0.5], dtype=dtypes.float32))\n    filter_max = array_ops.identity(array_ops.constant([0.5, 0.5], dtype=dtypes.float32))\n    q_filter = array_ops.fake_quant_with_min_max_vars_per_channel(filter_tensor, filter_min, filter_max, num_bits=8, narrow_range=True)\n    bias = array_ops.constant([0.1, 0.2], dtype=dtypes.float32)\n    scale, offset = ([1.0] * 2, [0.5] * 2)\n    mean, variance = (scale, offset)\n    out = nn_ops.conv2d(q_input, q_filter, strides=[1, 1, 2, 1], dilations=[1, 1, 1, 1], padding='SAME', data_format='NHWC', name='sample/conv2d')\n    if has_bias:\n        out = nn_ops.bias_add(out, bias, data_format='NHWC')\n    if activation_fn is not None:\n        if has_batch_norm:\n            out, _, _, _, _, _ = nn_ops.fused_batch_norm_v3(out, scale, offset, mean, variance, is_training=False)\n        out = activation_fn(out)\n    out_min = array_ops.constant([-0.18, -0.32], dtype=dtypes.float32)\n    out_max = array_ops.constant([0.5, 0.5], dtype=dtypes.float32)\n    q_out = array_ops.fake_quant_with_min_max_vars_per_channel(out, min=out_min, max=out_max, num_bits=8, narrow_range=True)\n    return {'output': q_out}", "docstring": "Performs a 2D convolution operation.\n\nArgs:\ninput_tensor: Input tensor to perform convolution on.\n\nReturns:\nA map of: output key -> output result.", "source": "github-repos"}
{"code": "def resize(self, image: torch.Tensor, size: SizeDict, patch_size: SizeDict, interpolation: 'F.InterpolationMode'=None, **kwargs) -> torch.Tensor:\n    interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR\n    if size.longest_edge:\n        size = (size.longest_edge, size.longest_edge)\n    elif size.height and size.width:\n        size = (size.height, size.width)\n    else:\n        raise ValueError(\"size must contain either 'longest_edge' or 'height' and 'width'.\")\n    if patch_size.height and patch_size.width:\n        patch_size = (patch_size.height, patch_size.width)\n    else:\n        raise ValueError(\"patch_size must contain either 'shortest_edge' or 'height' and 'width'.\")\n    output_size = get_resize_output_image_size(image, size=size, patch_size=patch_size)\n    return F.resize(image, size=output_size, interpolation=interpolation, **kwargs)", "docstring": "Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\nresized to keep the input aspect ratio.\n\nArgs:\nimage (`torch.Tensor`):\nImage to resize.\nsize (`SizeDict`):\nDict containing the longest possible edge of the image.\npatch_size (`SizeDict`):\nPatch size used to calculate the size of the output image.\ninterpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):\nResampling filter to use when resiizing the image.", "source": "github-repos"}
{"code": "def Verify(self, completely=False):\n    res = super(Block, self).Verify()\n    if (not res):\n        return False\n    from neo.Blockchain import GetBlockchain, GetConsensusAddress\n    if (self.Transactions[0].Type != TransactionType.MinerTransaction):\n        return False\n    for tx in self.Transactions[1:]:\n        if (tx.Type == TransactionType.MinerTransaction):\n            return False\n    if completely:\n        bc = GetBlockchain()\n        if (self.NextConsensus != GetConsensusAddress(bc.GetValidators(self.Transactions).ToArray())):\n            return False\n        for tx in self.Transactions:\n            if (not tx.Verify()):\n                pass\n        logger.error('Blocks cannot be fully validated at this moment.  please pass completely=False')\n        raise NotImplementedError()\n    return True", "docstring": "Verify the integrity of the block.\n\nArgs:\ncompletely: (Not functional at this time).\n\nReturns:\nbool: True if valid. False otherwise.", "source": "codesearchnet"}
{"code": "def approximate_split(x, num_splits, axis=0):\n  \n  size = shape_list(x)[axis]\n  size_splits = [tf.div(size + i, num_splits) for i in range(num_splits)]\n  return tf.split(x, size_splits, axis=axis)", "docstring": "Split approximately equally into num_splits parts.\n\nArgs:\nx: a Tensor\nnum_splits: an integer\naxis: an integer.\n\nReturns:\na list of num_splits Tensors.", "source": "juraj-google-style"}
{"code": "def read_from_hdx(identifier, configuration=None):\n        \n        \n\n        resourceview = ResourceView(configuration=configuration)\n        result = resourceview._load_from_hdx('resource view', identifier)\n        if result:\n            return resourceview\n        return None", "docstring": "Reads the resource view given by identifier from HDX and returns ResourceView object\n\nArgs:\nidentifier (str): Identifier of resource view\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nOptional[ResourceView]: ResourceView object if successful read, None if not", "source": "juraj-google-style"}
{"code": "def take_profit(self, accountID, **kwargs):\n        \n        return self.create(\n            accountID,\n            order=TakeProfitOrderRequest(**kwargs)\n        )", "docstring": "Shortcut to create a Take Profit Order in an Account\n\nArgs:\naccountID : The ID of the Account\nkwargs : The arguments to create a TakeProfitOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "juraj-google-style"}
{"code": "def run(self, shell=False, ignore_errors=False, stdin=False, check_output=False):\n    previous_directory = os.getcwd()\n    os.chdir(self.directory)\n    try:\n        kwargs = {'stderr': sys.stderr, 'stdin': (sys.stdin if stdin else None), 'env': self.env_vars, 'shell': shell}\n        if check_output:\n            return subprocess.check_output(self.command, **kwargs).decode('utf8')\n        else:\n            kwargs['stdout'] = sys.stdout\n            return subprocess.check_call(self.command, **kwargs)\n    except subprocess.CalledProcessError:\n        if ignore_errors:\n            pass\n        else:\n            raise\n    os.chdir(previous_directory)", "docstring": "Run subcommand.\n\nArgs:\nshell (Optional[bool]): Run command using shell (default False)\nignore_errors (Optional[bool]): If the command has a non-zero return code, don't raise an exception (default False)\nstdin (Optional[bool]): Plug input from stdin when running command (default False)\ncheck_output (Optional[bool]): Return command output as string (default False)\n\nReturns:\nString if check_output is True, else None.\n\nRaises:\nsubprocess.CalledProcessError when the command has an error, unless ignore_errors is True.", "source": "codesearchnet"}
{"code": "def sg_input(shape=None, dtype=sg_floatx, name=None):\n    r\n    if shape is None:\n        return tf.placeholder(dtype, shape=None, name=name)\n    else:\n        if not isinstance(shape, (list, tuple)):\n            shape = [shape]\n        return tf.placeholder(dtype, shape=[None] + list(shape), name=name)", "docstring": "r\"\"\"Creates a placeholder.\n\nArgs:\nshape: A tuple/list of integers. If an integers is given, it will turn to a list.\ndtype: A data type. Default is float32.\nname: A name for the placeholder.\n\nReturns:\nA wrapped placeholder `Tensor`.", "source": "juraj-google-style"}
{"code": "def sample(self, n):\n    total = bq.Query(('select count(*) from %s' % self._get_source())).execute().result()[0].values()[0]\n    if (n > total):\n        raise ValueError('sample larger than population')\n    sampling = bq.Sampling.random(percent=((n * 100.0) / float(total)))\n    if (self._query is not None):\n        source = self._query\n    else:\n        source = ('SELECT * FROM `%s`' % self._table)\n    sample = bq.Query(source).execute(sampling=sampling).result()\n    df = sample.to_dataframe()\n    return df", "docstring": "Samples data into a Pandas DataFrame. Note that it calls BigQuery so it will\nincur cost.\n\nArgs:\nn: number of sampled counts. Note that the number of counts returned is approximated.\nReturns:\nA dataframe containing sampled data.\nRaises:\nException if n is larger than number of rows.", "source": "codesearchnet"}
{"code": "def save_attributes_to_hdf5_group(group, name, data):\n    HDF5_OBJECT_HEADER_LIMIT = 64512\n    bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT]\n    if bad_attributes:\n        raise RuntimeError(f'The following attributes cannot be saved to HDF5 file because they are larger than {HDF5_OBJECT_HEADER_LIMIT} bytes: {bad_attributes}')\n    data_npy = np.asarray(data)\n    num_chunks = 1\n    chunked_data = np.array_split(data_npy, num_chunks)\n    while any((x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data)):\n        num_chunks += 1\n        chunked_data = np.array_split(data_npy, num_chunks)\n    if num_chunks > 1:\n        for chunk_id, chunk_data in enumerate(chunked_data):\n            group.attrs['%s%d' % (name, chunk_id)] = chunk_data\n    else:\n        group.attrs[name] = data", "docstring": "Saves attributes (data) of the specified name into the HDF5 group.\n\nThis method deals with an inherent problem of HDF5 file which is not able to store data larger than\nHDF5_OBJECT_HEADER_LIMIT bytes.\n\nArgs:\ngroup: A pointer to a HDF5 group.\nname: A name of the attributes to save.\ndata: Attributes data to store.\n\nRaises:\nRuntimeError: If any single attribute is too large to be saved.\n\nCopied from Keras to Transformers to avoid versioning issues.", "source": "github-repos"}
{"code": "def _check_dims_and_partition_or_replicate_on_host(self, tensor, dims):\n    self._check_input_partition_dims(tensor, dims)\n    return partition_or_replicate_on_host(tensor, dims)", "docstring": "Checks dims and partitions or replicates the input tensor.\n\nThe ops inside this function are placed on the host side.\n\nArgs:\ntensor: The input tensor which will be partitioned or replicated.\ndims: A list of integer describes how to partition the input tensor.\n\nReturns:\nAn iterator of `Tensor`s or a list of partitioned tensors.", "source": "github-repos"}
{"code": "def ask_when_work_is_populated(self, work):\n    \n    work.read_all_from_datastore()\n    if work.work:\n      print('Work is already written to datastore.\\n'\n            'If you continue these data will be overwritten and '\n            'possible corrupted.')\n      inp = input_str('Do you want to continue? '\n                      '(type \"yes\" without quotes to confirm): ')\n      return inp == 'yes'\n    else:\n      return True", "docstring": "When work is already populated asks whether we should continue.\n\nThis method prints warning message that work is populated and asks\nwhether user wants to continue or not.\n\nArgs:\nwork: instance of WorkPiecesBase\n\nReturns:\nTrue if we should continue and populate datastore, False if we should stop", "source": "juraj-google-style"}
{"code": "def summary(self, stdout=True, plot=False):\n    if stdout:\n        print('Collinearity summary:')\n        print(pd.concat([self.results['Eigenvalues'], self.results['ConditionIndices'], self.results['VIFs'], self.results['CorrelationMatrix']], axis=1))\n        print('Outlier summary:')\n        print(self.results['RowMahalanobisDistances'])\n        print(self.results['ColumnMahalanobisDistances'])\n        print('Validity summary:')\n        print(self.results['Variances'])\n    if plot:\n        verify_dependencies('seaborn')\n        for (key, result) in self.results.items():\n            if (key == 'CorrelationMatrix'):\n                ax = plt.axes()\n                sns.heatmap(result, cmap='Blues', ax=ax)\n                ax.set_title(key)\n                sns.plt.show()\n            else:\n                result.plot(kind='bar', title=key)\n                plt.show()", "docstring": "Displays diagnostics to the user\n\nArgs:\nstdout (bool): print results to the console\nplot (bool): use Seaborn to plot results", "source": "codesearchnet"}
{"code": "def _check_audience(payload_dict, audience):\n    if (audience is None):\n        return\n    audience_in_payload = payload_dict.get('aud')\n    if (audience_in_payload is None):\n        raise AppIdentityError('No aud field in token: {0}'.format(payload_dict))\n    if (audience_in_payload != audience):\n        raise AppIdentityError('Wrong recipient, {0} != {1}: {2}'.format(audience_in_payload, audience, payload_dict))", "docstring": "Checks audience field from a JWT payload.\n\nDoes nothing if the passed in ``audience`` is null.\n\nArgs:\npayload_dict: dict, A dictionary containing a JWT payload.\naudience: string or NoneType, an audience to check for in\nthe JWT payload.\n\nRaises:\nAppIdentityError: If there is no ``'aud'`` field in the payload\ndictionary but there is an ``audience`` to check.\nAppIdentityError: If the ``'aud'`` field in the payload dictionary\ndoes not match the ``audience``.", "source": "codesearchnet"}
{"code": "def resolve(phrases: typing.List[str], html: str, separator: str='\\u200b') -> str:\n    resolver = HTMLChunkResolver(phrases, separator)\n    resolver.feed(html)\n    result = '<span style=\"%s\">%s</span>' % (PARENT_CSS_STYLE, resolver.output)\n    return result", "docstring": "Wraps phrases in the HTML string with non-breaking markup.\n\nArgs:\nphrases (List[str]): The phrases included in the HTML string.\nhtml (str): The HTML string to resolve.\nseparator (str, optional): The separator string.\n\nReturns:\nThe HTML string with phrases wrapped in non-breaking markup.", "source": "github-repos"}
{"code": "def save_weights_to_hdf5_group(f, model):\n    from keras.src import __version__ as keras_version\n    save_attributes_to_hdf5_group(f, 'layer_names', [layer.name.encode('utf8') for layer in model.layers])\n    f.attrs['backend'] = backend.backend().encode('utf8')\n    f.attrs['keras_version'] = str(keras_version).encode('utf8')\n    for layer in sorted(model.layers, key=lambda x: x.name):\n        g = f.create_group(layer.name)\n        weights = _legacy_weights(layer)\n        save_subset_weights_to_hdf5_group(g, weights)\n    weights = list((v for v in model._trainable_variables + model._non_trainable_variables if v in model.weights))\n    g = f.create_group('top_level_model_weights')\n    save_subset_weights_to_hdf5_group(g, weights)", "docstring": "Saves the weights of a list of layers to a HDF5 group.\n\nArgs:\nf: HDF5 group.\nmodel: Model instance.", "source": "github-repos"}
{"code": "def format_auth_params(params):\n        \n        parts = []\n        for (key, value) in params.items():\n            if value:\n                parts.append('{}=\"{}\"'.format(key, value))\n        return \", \".join(parts)", "docstring": "Generate the format expected by HTTP Headers from parameters.\n\nArgs:\nparams (dict): {key: value} to convert to key=value\n\nReturns:\nA formatted header string.", "source": "juraj-google-style"}
{"code": "def imshow(img, win_name='', wait_time=0):\n    \n    cv2.imshow(win_name, imread(img))\n    cv2.waitKey(wait_time)", "docstring": "Show an image.\n\nArgs:\nimg (str or ndarray): The image to be displayed.\nwin_name (str): The window name.\nwait_time (int): Value of waitKey param.", "source": "juraj-google-style"}
{"code": "def unpack(self, buff, offset=0):\n        \n        super().unpack(buff, offset)\n        if not self.is_valid():\n            raise UnpackException(\"Unsupported protocols in ARP packet\")", "docstring": "Unpack a binary struct into this object's attributes.\n\nReturn the values instead of the lib's basic types.\nCheck if the protocols involved are Ethernet and IPv4. Other protocols\nare currently not supported.\n\nArgs:\nbuff (bytes): Binary buffer.\noffset (int): Where to begin unpacking.\n\nRaises:\n:exc:`~.exceptions.UnpackException`: If unpack fails.", "source": "juraj-google-style"}
{"code": "def load_own_variables(self, store):\n    all_vars = self._trainable_variables + self._non_trainable_variables\n    if len(store.keys()) != len(all_vars):\n        if len(all_vars) == 0 and (not self.built):\n            raise ValueError(f\"Layer '{self.name}' was never built and thus it doesn't have any variables. However the weights file lists {len(store.keys())} variables for this layer.\\nIn most cases, this error indicates that either:\\n\\n1. The layer is owned by a parent layer that implements a `build()` method, but calling the parent's `build()` method did NOT create the state of the child layer '{self.name}'. A `build()` method must create ALL state for the layer, including the state of any children layers.\\n\\n2. You need to implement the `def build_from_config(self, config)` method on layer '{self.name}', to specify how to rebuild it during loading. In this case, you might also want to implement the method that generates the build config at saving time, `def get_build_config(self)`. The method `build_from_config()` is meant to create the state of the layer (i.e. its variables) upon deserialization.\")\n        raise ValueError(f\"Layer '{self.name}' expected {len(all_vars)} variables, but received {len(store.keys())} variables during loading. Expected: {[v.name for v in all_vars]}\")\n    for i, v in enumerate(all_vars):\n        v.assign(store[f'{i}'])", "docstring": "Loads the state of the layer.\n\nYou can override this method to take full control of how the state of\nthe layer is loaded upon calling `keras.models.load_model()`.\n\nArgs:\nstore: Dict from which the state of the model will be loaded.", "source": "github-repos"}
{"code": "def ts_to_dt(jwt_dict):\n    \n    d = jwt_dict.copy()\n    for k, v in [v[:2] for v in CLAIM_LIST if v[2]]:\n        if k in jwt_dict:\n            d[k] = d1_common.date_time.dt_from_ts(jwt_dict[k])\n    return d", "docstring": "Convert timestamps in JWT to datetime objects.\n\nArgs:\njwt_dict: dict\nJWT with some keys containing timestamps.\n\nReturns:\ndict: Copy of input dict where timestamps have been replaced with\ndatetime.datetime() objects.", "source": "juraj-google-style"}
{"code": "def dispatch_event(self, event: \"Event\") -> None:\n        \n        \n        \n        if event.target is None:\n            event.set_target(self)\n\n        listeners: dict[types.MethodType, bool] = self._registered_listeners.get(event.type)\n        if listeners is None:\n            return\n\n        for listener in listeners:\n            listener(event)", "docstring": "Dispatches the given event.\n\nIt is the duty of this method to set the target of the dispatched event by calling\n`event.set_target(self)`.\n\nArgs:\nevent (Event): The event to dispatch. Must not be `None`.\n\nRaises:\nTypeError: If the event is `None` or its type is incorrect.", "source": "juraj-google-style"}
{"code": "def has_full_stack(self, value):\n        \n        if value == self._defaults['hasFullStack'] and 'hasFullStack' in self._values:\n            del self._values['hasFullStack']\n        else:\n            self._values['hasFullStack'] = value", "docstring": "The has_full_stack property.\n\nArgs:\nvalue (bool). the property value.", "source": "juraj-google-style"}
{"code": "def identify_triggers(cfg, sources, sinks, lattice, nosec_lines):\n    assignment_nodes = filter_cfg_nodes(cfg, AssignmentNode)\n    tainted_nodes = filter_cfg_nodes(cfg, TaintedNode)\n    tainted_trigger_nodes = [TriggerNode(Source('Framework function URL parameter'), cfg_node=node) for node in tainted_nodes]\n    sources_in_file = find_triggers(assignment_nodes, sources, nosec_lines)\n    sources_in_file.extend(tainted_trigger_nodes)\n    find_secondary_sources(assignment_nodes, sources_in_file, lattice)\n    sinks_in_file = find_triggers(cfg.nodes, sinks, nosec_lines)\n    sanitiser_node_dict = build_sanitiser_node_dict(cfg, sinks_in_file)\n    return Triggers(sources_in_file, sinks_in_file, sanitiser_node_dict)", "docstring": "Identify sources, sinks and sanitisers in a CFG.\n\nArgs:\ncfg(CFG): CFG to find sources, sinks and sanitisers in.\nsources(tuple): list of sources, a source is a (source, sanitiser) tuple.\nsinks(tuple): list of sources, a sink is a (sink, sanitiser) tuple.\nnosec_lines(set): lines with # nosec whitelisting\n\nReturns:\nTriggers tuple with sink and source nodes and a sanitiser node dict.", "source": "codesearchnet"}
{"code": "def take_bug_report(self,\n                        test_name,\n                        begin_time,\n                        timeout=300,\n                        destination=None):\n        \n        new_br = True\n        try:\n            stdout = self.adb.shell('bugreportz -v').decode('utf-8')\n            \n            \n            if 'not found' in stdout:\n                new_br = False\n        except adb.AdbError:\n            new_br = False\n        if destination:\n            br_path = utils.abs_path(destination)\n        else:\n            br_path = os.path.join(self.log_path, 'BugReports')\n        utils.create_dir(br_path)\n        base_name = ',%s,%s.txt' % (begin_time, self._normalized_serial)\n        if new_br:\n            base_name = base_name.replace('.txt', '.zip')\n        test_name_len = utils.MAX_FILENAME_LEN - len(base_name)\n        out_name = test_name[:test_name_len] + base_name\n        full_out_path = os.path.join(br_path, out_name.replace(' ', r'\\ '))\n        \n        self.wait_for_boot_completion()\n        self.log.info('Taking bugreport for %s.', test_name)\n        if new_br:\n            out = self.adb.shell('bugreportz', timeout=timeout).decode('utf-8')\n            if not out.startswith('OK'):\n                raise DeviceError(self, 'Failed to take bugreport: %s' % out)\n            br_out_path = out.split(':')[1].strip()\n            self.adb.pull([br_out_path, full_out_path])\n        else:\n            \n            \n            self.adb.bugreport(\n                ' > \"%s\"' % full_out_path, shell=True, timeout=timeout)\n        self.log.info('Bugreport for %s taken at %s.', test_name,\n                      full_out_path)", "docstring": "Takes a bug report on the device and stores it in a file.\n\nArgs:\ntest_name: Name of the test method that triggered this bug report.\nbegin_time: Timestamp of when the test started.\ntimeout: float, the number of seconds to wait for bugreport to\ncomplete, default is 5min.\ndestination: string, path to the directory where the bugreport\nshould be saved.", "source": "juraj-google-style"}
{"code": "def DeserializeExclusiveData(self, reader):\n        \n        if self.Version is not 0:\n            raise Exception('Invalid format')\n\n        self.PublicKey = ECDSA.Deserialize_Secp256r1(reader)", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):\n\nRaises:\nException: If the version read is incorrect.", "source": "juraj-google-style"}
{"code": "def insert_tile(self, tile_info):\n        \n\n        for i, tile in enumerate(self.registered_tiles):\n            if tile.slot == tile_info.slot:\n                self.registered_tiles[i] = tile_info\n                return\n\n        self.registered_tiles.append(tile_info)", "docstring": "Add or replace an entry in the tile cache.\n\nArgs:\ntile_info (TileInfo): The newly registered tile.", "source": "juraj-google-style"}
{"code": "def event_shape(self):\n    return tensor_shape.as_shape(self._event_shape())", "docstring": "Shape of a single sample from a single batch as a `TensorShape`.\n\nMay be partially defined or unknown.\n\nReturns:\nevent_shape: `TensorShape`, possibly unknown.", "source": "github-repos"}
{"code": "def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(QueryRequestPayload, self).read(input_buffer, kmip_version=kmip_version)\n    local_buffer = utils.BytearrayStream(input_buffer.read(self.length))\n    query_functions = []\n    while self.is_tag_next(enums.Tags.QUERY_FUNCTION, local_buffer):\n        query_function = primitives.Enumeration(enums.QueryFunction, tag=enums.Tags.QUERY_FUNCTION)\n        query_function.read(local_buffer, kmip_version=kmip_version)\n        query_functions.append(query_function)\n    if query_functions:\n        self._query_functions = query_functions\n    else:\n        raise exceptions.InvalidKmipEncoding('The Query request payload encoding is missing the query functions.')\n    self.is_oversized(local_buffer)", "docstring": "Read the data encoding the QueryRequestPayload object and decode it\ninto its constituent parts.\n\nArgs:\ninput_buffer (Stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nInvalidKmipEncoding: Raised if the query functions are missing\nfrom the encoded payload.", "source": "codesearchnet"}
{"code": "def to(self, new_unit):\n        \n        return self.__class__(\n            np.array(self) * self.unit.get_conversion_factor(new_unit),\n            unit_type=self.unit_type, unit=new_unit)", "docstring": "Conversion to a new_unit.\n\nArgs:\nnew_unit:\nNew unit type.\n\nReturns:\nA ArrayWithFloatWithUnit object in the new units.\n\nExample usage:\n>>> e = EnergyArray([1, 1.1], \"Ha\")\n>>> e.to(\"eV\")\narray([ 27.21138386,  29.93252225]) eV", "source": "juraj-google-style"}
{"code": "def merge(metric_kind, prior, latest):\n    (prior_type, _) = _detect_value(prior)\n    (latest_type, _) = _detect_value(latest)\n    if (prior_type != latest_type):\n        _logger.warn(u'Metric values are not compatible: %s, %s', prior, latest)\n        raise ValueError(u'Incompatible delta metric values')\n    if (prior_type is None):\n        _logger.warn(u'Bad metric values, types not known for : %s, %s', prior, latest)\n        raise ValueError(u'Unsupported delta metric types')\n    if (metric_kind == MetricKind.DELTA):\n        return _merge_delta_metric(prior, latest)\n    else:\n        return _merge_cumulative_or_gauge_metrics(prior, latest)", "docstring": "Merges `prior` and `latest`\n\nArgs:\nmetric_kind (:class:`MetricKind`): indicates the kind of metrics\nbeing merged\nprior (:class:`MetricValue`): an prior instance of the metric\nlatest (:class:`MetricValue`: the latest instance of the metric", "source": "codesearchnet"}
{"code": "def parse_dtype_info(flags):\n  \n  if flags.dtype in (i[0] for i in DTYPE_MAP.values()):\n    return  \n\n  try:\n    flags.dtype, default_loss_scale = DTYPE_MAP[flags.dtype]\n  except KeyError:\n    raise ValueError(\"Invalid dtype: {}\".format(flags.dtype))\n\n  flags.loss_scale = flags.loss_scale or default_loss_scale", "docstring": "Convert dtype string to tf dtype, and set loss_scale default as needed.\n\nArgs:\nflags: namespace object returned by arg parser.\n\nRaises:\nValueError: If an invalid dtype is provided.", "source": "juraj-google-style"}
{"code": "def _parse_graph(self):\n\n\t\t\n\n\t\t\n\t\tif self.exists:\n\t\t\tself.rdf.graph = self.repo.api.parse_rdf_payload(self.rdf.data, self.headers)\n\n\t\t\n\t\telse:\n\t\t\tself.rdf.graph = rdflib.Graph()\n\n\t\t\n\t\tself.rdf.namespace_manager = rdflib.namespace.NamespaceManager(self.rdf.graph)\n\t\tfor ns_prefix, ns_uri in self.rdf.prefixes.__dict__.items():\n\t\t\tself.rdf.namespace_manager.bind(ns_prefix, ns_uri, override=False)\n\n\t\t\n\t\tfor ns_prefix, ns_uri in self.rdf.graph.namespaces():\n\t\t\tsetattr(self.rdf.prefixes, ns_prefix, rdflib.Namespace(ns_uri))\n\t\t\tsetattr(self.rdf.uris, rdflib.Namespace(ns_uri), ns_prefix)\n\n\t\t\n\t\tself.rdf._orig_graph = copy.deepcopy(self.rdf.graph)\n\n\t\t\n\t\tself.parse_object_like_triples()", "docstring": "use Content-Type from headers to determine parsing method\n\nArgs:\nNone\n\nReturn:\nNone: sets self.rdf by parsing data from GET request, or setting blank graph of resource does not yet exist", "source": "juraj-google-style"}
{"code": "def GetParserObjects(cls, parser_filter_expression=None):\n    (includes, excludes) = cls._GetParserFilters(parser_filter_expression)\n    parser_objects = {}\n    for (parser_name, parser_class) in iter(cls._parser_classes.items()):\n        if ((not includes) and (parser_name in excludes)):\n            continue\n        if (includes and (parser_name not in includes)):\n            continue\n        parser_object = parser_class()\n        if parser_class.SupportsPlugins():\n            plugin_includes = None\n            if (parser_name in includes):\n                plugin_includes = includes[parser_name]\n            parser_object.EnablePlugins(plugin_includes)\n        parser_objects[parser_name] = parser_object\n    return parser_objects", "docstring": "Retrieves the parser objects.\n\nArgs:\nparser_filter_expression (Optional[str]): parser filter expression,\nwhere None represents all parsers and plugins.\n\nReturns:\ndict[str, BaseParser]: parsers per name.", "source": "codesearchnet"}
{"code": "def to_dlpack(tf_tensor):\n    return pywrap_tfe.TFE_ToDlpackCapsule(tf_tensor)", "docstring": "Returns the dlpack capsule representing the tensor.\n\nThis operation ensures the underlying data memory is ready when returns.\n\n```python\na = tf.tensor([1, 10])\ndlcapsule = tf.experimental.dlpack.to_dlpack(a)\n# dlcapsule represents the dlpack data structure\n```\n\nArgs:\ntf_tensor: Tensorflow eager tensor, to be converted to dlpack capsule.\n\nReturns:\nA PyCapsule named as dltensor, which shares the underlying memory to other\nframework. This PyCapsule can be consumed only once.", "source": "github-repos"}
{"code": "def _ContinueReportCompilation(self):\n    analyzer_alive = self._analyzer.is_alive()\n    hash_queue_has_tasks = (self.hash_queue.unfinished_tasks > 0)\n    analysis_queue = (not self.hash_analysis_queue.empty())\n    return ((analyzer_alive and hash_queue_has_tasks) or analysis_queue)", "docstring": "Determines if the plugin should continue trying to compile the report.\n\nReturns:\nbool: True if the plugin should continue, False otherwise.", "source": "codesearchnet"}
{"code": "def docx_text_from_xml(xml: str, config: TextProcessingConfig) -> str:\n    root = ElementTree.fromstring(xml)\n    return docx_text_from_xml_node(root, 0, config)", "docstring": "Converts an XML tree of a DOCX file to string contents.\n\nArgs:\nxml: raw XML text\nconfig: :class:`TextProcessingConfig` control object\n\nReturns:\ncontents as a string", "source": "codesearchnet"}
{"code": "def ParseRow(header,\n             row):\n  \n  precondition.AssertDictType(row, Text, Text)\n\n  result = rdf_osquery.OsqueryRow()\n  for column in header.columns:\n    result.values.append(row[column.name])\n  return result", "docstring": "Parses a single row of osquery output.\n\nArgs:\nheader: A parsed header describing the row format.\nrow: A row in a \"parsed JSON\" representation.\n\nReturns:\nA parsed `rdf_osquery.OsqueryRow` instance.", "source": "juraj-google-style"}
{"code": "class SamplePatchTSMixerRegressionOutput(ModelOutput):\n    sequences: Optional[torch.FloatTensor] = None", "docstring": "Base class for time series model's predictions outputs that contains the sampled values from the chosen\ndistribution.\n\nArgs:\nsequences (`torch.FloatTensor` of shape `(batch_size, num_samples, num_targets)`\nSampled values from the chosen distribution.", "source": "github-repos"}
{"code": "def __init__(self, pattern):\n    \n    super(Interpolator, self).__init__()\n    self._pattern = pattern\n\n    if isinstance(pattern, bytes):\n      var_regex = re.compile(self._VAR_PLACEHOLDER_PATTERN.encode(\"ascii\"))\n      scope_regex = re.compile(self._SCOPE_PLACEHOLDER_PATTERN.encode(\"ascii\"))\n      decoder = lambda _: _.decode(\"ascii\")\n    elif isinstance(pattern, Text):\n      var_regex = re.compile(self._VAR_PLACEHOLDER_PATTERN)\n      scope_regex = re.compile(self._SCOPE_PLACEHOLDER_PATTERN)\n      decoder = lambda _: _\n    else:\n      raise TypeError(\"Unexpected pattern type '{}'\".format(type(pattern)))\n\n    self._vars = set()\n    for matches in var_regex.finditer(pattern):\n      var = matches.group(\"var\")\n      self._vars.add(decoder(var))\n\n    self._scopes = dict()\n    for matches in scope_regex.finditer(pattern):\n      scope = matches.group(\"scope\")\n      var = matches.group(\"var\")\n      self._scopes.setdefault(decoder(scope), set()).add(decoder(var))\n\n    self._var_bindings = collections.defaultdict(lambda: [])\n    self._scope_bindings = collections.defaultdict(lambda: [])", "docstring": "Initializes the interpolator.\n\nArgs:\npattern: A string (either of unicode or byte characters) with placeholders\nto format.", "source": "juraj-google-style"}
{"code": "def default(self, name, action, seqno):\n    return self.configure(('default route-map %s %s %s' % (name, action, seqno)))", "docstring": "Defaults the routemap on the node\n\nNote:\nThis method will attempt to default the routemap from the nodes\noperational config. Since routemaps do not exist by default,\nthe default action is essentially a negation and the result will\nbe the removal of the routemap clause.\nIf the routemap does not exist then this\nmethod will not perform any changes but still return True\n\nArgs:\nname (string): The full name of the routemap.\naction (string): The action to take for this routemap clause.\nseqno (integer): The sequence number for the routemap clause.\n\nReturns:\nTrue if the routemap could be deleted otherwise False (see Node)", "source": "codesearchnet"}
{"code": "async def updateCronJob(self, iden, query):\n        \n        cron = self.cell.agenda.appts.get(iden)\n        if cron is None:\n            raise s_exc.NoSuchIden()\n        self._trig_auth_check(cron.useriden)\n        await self.cell.agenda.mod(iden, query)", "docstring": "Change an existing cron job's query\n\nArgs:\niden (bytes):  The iden of the cron job to be changed", "source": "juraj-google-style"}
{"code": "def __init__(self, name, formatter=None):\n        \n        if formatter is not None:\n            name = formatter(name)\n        self._tag_data = {'name': name}\n        \n        self._valid = True\n        if not name:\n            self._valid = False", "docstring": "Initialize Class Properties.\n\nArgs:\nname (str): The value for this tag.\nformatter (method, optional): A method that take a tag value and returns a\nformatted tag.", "source": "juraj-google-style"}
{"code": "def set_item(target, i, x):\n    if isinstance(target, tensor_array_ops.TensorArray):\n        return _tf_tensorarray_set_item(target, i, x)\n    elif tensor_util.is_tf_type(target):\n        if target.dtype == dtypes.variant:\n            return _tf_tensor_list_set_item(target, i, x)\n        else:\n            return _tf_tensor_set_item(target, i, x)\n    else:\n        return _py_set_item(target, i, x)", "docstring": "The slice write operator (i.e. __setitem__).\n\nNote: it is unspecified whether target will be mutated or not. In general,\nif target is mutable (like Python lists), it will be mutated.\n\nArgs:\ntarget: An entity that supports setitem semantics.\ni: Index to modify.\nx: The new element value.\n\nReturns:\nSame as target, after the update was performed.\n\nRaises:\nValueError: if target is not of a supported type.", "source": "github-repos"}
{"code": "def ami_lookup(region='us-east-1', name='tomcat8'):\n    \n    if AMI_JSON_URL:\n        ami_dict = _get_ami_dict(AMI_JSON_URL)\n        ami_id = ami_dict[region][name]\n    elif GITLAB_TOKEN:\n        warn_user('Use AMI_JSON_URL feature instead.')\n        ami_contents = _get_ami_file(region=region)\n        ami_dict = json.loads(ami_contents)\n        ami_id = ami_dict[name]\n    else:\n        ami_id = name\n\n    LOG.info('Using AMI: %s', ami_id)\n\n    return ami_id", "docstring": "Look up AMI ID.\n\nUse _name_ to find AMI ID. If no ami_base_url or gitlab_token is provided,\n_name_ is returned as the ami id.\n\nArgs:\nregion (str): AWS Region to find AMI ID.\nname (str): Simple AMI base name to lookup.\n\nReturns:\nstr: AMI ID for _name_ in _region_.", "source": "juraj-google-style"}
{"code": "def define_saver(exclude=None):\n  \n  variables = []\n  exclude = exclude or []\n  exclude = [re.compile(regex) for regex in exclude]\n  for variable in tf.global_variables():\n    if any(regex.match(variable.name) for regex in exclude):\n      continue\n    variables.append(variable)\n  saver = tf.train.Saver(variables, keep_checkpoint_every_n_hours=5)\n  return saver", "docstring": "Create a saver for the variables we want to checkpoint.\n\nArgs:\nexclude: List of regexes to match variable names to exclude.\n\nReturns:\nSaver object.", "source": "juraj-google-style"}
{"code": "def set_query(self, value):\n    if (isinstance(value, basestring) or (value is None)):\n        self._content['query'] = value\n    elif hasattr(value, 'keys'):\n        self._content['query'] = query.terms_from_dict(value)\n    else:\n        raise TypeError((('Query must be a string or dict. Got: ' + type(value)) + ' insted!'))", "docstring": "Convert a dict form of query in a string of needed and store the query string.\n\nArgs:\nvalue -- A query string or a dict with query xpaths as keys and text or\nnested query dicts as values.", "source": "codesearchnet"}
{"code": "def lookup(self, keys, name=None):\n    if keys.dtype.base_dtype != self._key_dtype:\n        raise TypeError(f'Dtype of argument `keys` must be {self._key_dtype}, received: {keys.dtype}')\n    values = keys\n    if isinstance(keys, (sparse_tensor.SparseTensor, internal.RaggedTensor)):\n        values = keys.values\n    if self._table and self._table.key_dtype.base_dtype == dtypes.int64:\n        values = math_ops.cast(values, dtypes.int64)\n    if self._num_oov_buckets == 0:\n        ids = self._table.lookup(values, name=name)\n    else:\n        with ops.name_scope(name, '%s_Lookup' % self.name):\n            str_to_hash_bucket = self._get_string_to_hash_bucket_fn(self._hasher_spec)\n            buckets = str_to_hash_bucket(_as_string(values), num_buckets=self._num_oov_buckets, name='hash_bucket')\n            if self._table:\n                ids = self._table.lookup(values)\n                buckets = math_ops.add(buckets, self._table.size())\n                is_id_non_default = math_ops.not_equal(ids, self._table.default_value)\n                ids = array_ops.where_v2(is_id_non_default, ids, buckets)\n            else:\n                ids = buckets\n    if isinstance(keys, sparse_tensor.SparseTensor):\n        return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape)\n    elif isinstance(keys, internal.RaggedTensor):\n        return keys.with_values(ids)\n    return ids", "docstring": "Looks up `keys` in the table, outputs the corresponding values.\n\nIt assigns out-of-vocabulary keys to buckets based in their hashes.\n\nArgs:\nkeys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.\nname: Optional name for the op.\n\nReturns:\nA `SparseTensor` if keys are sparse, a `RaggedTensor` if keys are ragged,\notherwise a dense `Tensor`.\n\nRaises:\nTypeError: when `keys` doesn't match the table key data type.", "source": "github-repos"}
{"code": "def parse_json(self, values_json):\n    values_map = json.loads(values_json)\n    return self.override_from_dict(values_map)", "docstring": "Override existing hyperparameter values, parsing new values from a json object.\n\nArgs:\nvalues_json: String containing a json object of name:value pairs.\n\nReturns:\nThe `HParams` instance.\n\nRaises:\nKeyError: If a hyperparameter in `values_json` doesn't exist.\nValueError: If `values_json` cannot be parsed.", "source": "codesearchnet"}
{"code": "def variants(self, case_id, skip=0, count=1000, filters=None):\n    filters = (filters or {})\n    logger.debug('Looking for variants in {0}'.format(case_id))\n    limit = (count + skip)\n    gemini_query = (filters.get('gemini_query') or 'SELECT * from variants v')\n    any_filter = False\n    if filters.get('frequency'):\n        frequency = filters['frequency']\n        extra_info = '(v.max_aaf_all < {0} or v.max_aaf_all is Null)'.format(frequency)\n        gemini_query = self.build_gemini_query(gemini_query, extra_info)\n    if filters.get('cadd'):\n        cadd_score = filters['cadd']\n        extra_info = '(v.cadd_scaled > {0})'.format(cadd_score)\n        gemini_query = self.build_gemini_query(gemini_query, extra_info)\n    if filters.get('gene_ids'):\n        gene_list = [gene_id.strip() for gene_id in filters['gene_ids']]\n        gene_string = 'v.gene in ('\n        for (index, gene_id) in enumerate(gene_list):\n            if (index == 0):\n                gene_string += \"'{0}'\".format(gene_id)\n            else:\n                gene_string += \", '{0}'\".format(gene_id)\n        gene_string += ')'\n        gemini_query = self.build_gemini_query(gemini_query, gene_string)\n    if filters.get('range'):\n        chrom = filters['range']['chromosome']\n        if (not chrom.startswith('chr')):\n            chrom = 'chr{0}'.format(chrom)\n        range_string = \"v.chrom = '{0}' AND ((v.start BETWEEN {1} AND {2}) OR (v.end BETWEEN {1} AND {2}))\".format(chrom, filters['range']['start'], filters['range']['end'])\n        gemini_query = self.build_gemini_query(gemini_query, range_string)\n    filtered_variants = self._variants(case_id=case_id, gemini_query=gemini_query)\n    if filters.get('consequence'):\n        consequences = set(filters['consequence'])\n        filtered_variants = (variant for variant in filtered_variants if set(variant.consequences).intersection(consequences))\n    if filters.get('impact_severities'):\n        severities = set([severity.strip() for severity in filters['impact_severities']])\n        new_filtered_variants = []\n        filtered_variants = (variant for variant in filtered_variants if set([variant.impact_severity]).intersection(severities))\n    if filters.get('sv_len'):\n        sv_len = int(filters['sv_len'])\n        filtered_variants = (variant for variant in filtered_variants if (variant.sv_len >= sv_len))\n    variants = []\n    for (index, variant_obj) in enumerate(filtered_variants):\n        if (index >= skip):\n            if (index < limit):\n                variants.append(variant_obj)\n            else:\n                break\n    return Results(variants, len(variants))", "docstring": "Return count variants for a case.\n\nThis function needs to have different behaviours based on what is asked\nfor. It should allways try to give minimal information back to improve\non speed. For example, if consequences are not asked for we will not\nbuild all transcripts. If not sv variants we will not build sv\ncoordinates.\nSo the minimal case is to just show what is asked for in the variants\ninterface.\n\nArgs:\ncase_id (str): A gemini db\nskip (int): Skip first variants\ncount (int): The number of variants to return\nfilters (dict): A dictionary with filters. Currently this will\nlook like: {\ngene_list: [] (list of hgnc ids),\nfrequency: None (float),\ncadd: None (float),\nconsequence: [] (list of consequences),\nimpact_severities: [] (list of consequences),\ngenetic_models [] (list of genetic models)\n}\nReturns:\npuzzle.constants.Results : Named tuple with variants and\nnr_of_variants", "source": "codesearchnet"}
{"code": "def compile_state_cpfs(self, scope: Dict[(str, TensorFluent)], batch_size: Optional[int]=None, noise: Optional[Noise]=None) -> List[CPFPair]:\n    next_state_fluents = []\n    with self.graph.as_default():\n        with tf.name_scope('state_cpfs'):\n            for cpf in self.rddl.domain.state_cpfs:\n                cpf_noise = (noise.get(cpf.name, None) if (noise is not None) else None)\n                name_scope = utils.identifier(cpf.name)\n                with tf.name_scope(name_scope):\n                    t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise)\n                next_state_fluents.append((cpf.name, t))\n            key = (lambda f: self.rddl.domain.next_state_fluent_ordering.index(f[0]))\n            next_state_fluents = sorted(next_state_fluents, key=key)\n    return next_state_fluents", "docstring": "Compiles the next state fluent CPFs given the current `state` and `action` scope.\n\nArgs:\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.\nbatch_size (Optional[int]): The batch size.\n\nReturns:\nA list of state fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`.", "source": "codesearchnet"}
{"code": "def PushEvent(self, timestamp, event_data):\n    heap_values = (timestamp, event_data)\n    heapq.heappush(self._heap, heap_values)\n    self.data_size += len(event_data)", "docstring": "Pushes a serialized event onto the heap.\n\nArgs:\ntimestamp (int): event timestamp, which contains the number of\nmicro seconds since January 1, 1970, 00:00:00 UTC.\nevent_data (bytes): serialized event.", "source": "codesearchnet"}
{"code": "def convert_version_to_int(version):\n    version = version.split('-')[0]\n    version_segments = version.split('.')\n    if len(version_segments) == 2:\n        version_segments.append('0')\n    for seg in version_segments:\n        if not seg.isdigit():\n            return None\n    version_str = ''.join(['%03d' % int(seg) for seg in version_segments])\n    return int(version_str)", "docstring": "Convert a version number to a integer that can be used to compare.\n\nVersion strings of the form X.YZ and X.Y.Z-xxxxx are supported. The\n'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.\n\nArgs:\nversion: a version to be converted\n\nReturns:\nAn integer if converted successfully, otherwise return None.", "source": "github-repos"}
{"code": "def __version_capture_slp(self, pkg_id, version_binary, version_display, display_name):\n    if (self.__pkg_obj and hasattr(self.__pkg_obj, 'version_capture')):\n        (version_str, src, version_user_str) = self.__pkg_obj.version_capture(pkg_id, version_binary, version_display, display_name)\n        if ((src != 'use-default') and version_str and src):\n            return (version_str, src, version_user_str)\n        elif (src != 'use-default'):\n            raise ValueError(\"version capture within object '{0}' failed for pkg id: '{1}' it returned '{2}' '{3}' '{4}'\".format(six.text_type(self.__pkg_obj), pkg_id, version_str, src, version_user_str))\n    if (version_display and (re.match('\\\\d+', version_display, flags=(re.IGNORECASE + re.UNICODE)) is not None)):\n        version_str = version_display\n        src = 'display-version'\n    elif (version_binary and (re.match('\\\\d+', version_binary, flags=(re.IGNORECASE + re.UNICODE)) is not None)):\n        version_str = version_binary\n        src = 'version-binary'\n    else:\n        src = 'none'\n        version_str = '0.0.0.0.0'\n    return (version_str, src, version_str)", "docstring": "This returns the version and where the version string came from, based on instructions\nunder ``version_capture``, if ``version_capture`` is missing, it defaults to\nvalue of display-version.\n\nArgs:\npkg_id (str): Publisher of the software/component.\nversion_binary (str): Name of the software.\nversion_display (str): True if package is a component.\ndisplay_name (str): True if the software/component is 32bit architecture.\n\nReturns:\nstr: Package Id", "source": "codesearchnet"}
{"code": "def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):\n  \n  line = clean_lines.elided[linenum]\n\n  \n  \n  \n  \n  fncall = line    \n  for pattern in (r'\\bif\\s*\\((.*)\\)\\s*{',\n                  r'\\bfor\\s*\\((.*)\\)\\s*{',\n                  r'\\bwhile\\s*\\((.*)\\)\\s*[{;]',\n                  r'\\bswitch\\s*\\((.*)\\)\\s*{'):\n    match = Search(pattern, line)\n    if match:\n      fncall = match.group(1)    \n      break\n\n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  if (  \n      not Search(r'\\b(if|for|while|switch|return|new|delete|catch|sizeof)\\b',\n                 fncall) and\n      \n      not Search(r' \\([^)]+\\)\\([^)]*(\\)|,$)', fncall) and\n      \n      not Search(r' \\([^)]+\\)\\[[^\\]]+\\]', fncall)):\n    if Search(r'\\w\\s*\\(\\s(?!\\s*\\\\$)', fncall):      \n      error(filename, linenum, 'whitespace/parens', 4,\n            'Extra space after ( in function call')\n    elif Search(r'\\(\\s+(?!(\\s*\\\\)|\\()', fncall):\n      error(filename, linenum, 'whitespace/parens', 2,\n            'Extra space after (')\n    if (Search(r'\\w\\s+\\(', fncall) and\n        not Search(r'_{0,2}asm_{0,2}\\s+_{0,2}volatile_{0,2}\\s+\\(', fncall) and\n        not Search(r'\n        not Search(r'\\w\\s+\\((\\w+::)*\\*\\w+\\)\\(', fncall) and\n        not Search(r'\\bcase\\s+\\(', fncall)):\n      \n      \n      if Search(r'\\boperator_*\\b', line):\n        error(filename, linenum, 'whitespace/parens', 0,\n              'Extra space before ( in function call')\n      else:\n        error(filename, linenum, 'whitespace/parens', 4,\n              'Extra space before ( in function call')\n    \n    \n    if Search(r'[^)]\\s+\\)\\s*[^{\\s]', fncall):\n      \n      \n      if Search(r'^\\s+\\)', fncall):\n        error(filename, linenum, 'whitespace/parens', 2,\n              'Closing ) should be moved to the previous line')\n      else:\n        error(filename, linenum, 'whitespace/parens', 2,\n              'Extra space before )')", "docstring": "Checks for the correctness of various spacing around function calls.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def CheckForNewlineAtEOF(filename, lines, error):\n  \n\n  \n  \n  \n  \n  if len(lines) < 3 or lines[-2]:\n    error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,\n          'Could not find a newline character at the end of the file.')", "docstring": "Logs an error if there is no newline char at the end of the file.\n\nArgs:\nfilename: The name of the current file.\nlines: An array of strings, each representing a line of the file.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def in_range(self, ver, req):\n    if req.exclude is not None:\n        for v in ver:\n            if v in req.exclude:\n                return False\n    include_checked = False\n    if req.include is not None:\n        for v in ver:\n            if v in req.include:\n                return True\n        include_checked = True\n    if req.range != [None, None]:\n        min_v = req.range[0]\n        max_v = req.range[1]\n        ver = ver[0]\n        lg = _compare_versions(min_v, ver)['larger']\n        sm = _compare_versions(ver, max_v)['smaller']\n        if lg in [ver, 'equal'] and sm in [ver, 'equal', 'inf']:\n            return True\n        else:\n            err_msg = '[Error] Version is outside of supported range. '\n            err_msg += '(config = %s, ' % str(req.config)\n            err_msg += 'version = %s, ' % str(ver)\n            err_msg += 'supported range = %s)' % str(req.range)\n            logging.warning(err_msg)\n            self.warning_msg.append(err_msg)\n            return False\n    else:\n        err_msg = ''\n        if include_checked:\n            err_msg = '[Error] Version is outside of supported range. '\n        else:\n            err_msg = '[Error] Missing specification. '\n        err_msg += '(config = %s, ' % str(req.config)\n        err_msg += 'version = %s, ' % str(ver)\n        err_msg += 'supported range = %s)' % str(req.range)\n        logging.warning(err_msg)\n        self.warning_msg.append(err_msg)\n        return False", "docstring": "Checks if a version satisfies a version and/or compatibility requirement.\n\nArgs:\nver: List whose first item is a config version that needs to be checked\nfor support status and version compatibility.\ne.g. ver = [`1.0`]\nreq: `_Reqs` class instance that represents a configuration version and\ncompatibility specifications.\n\nReturns:\nBoolean output of checking if version `ver` meets the requirement\nstored in `req` (or a `_Reqs` requirements class instance).", "source": "github-repos"}
{"code": "def get_attribute_id(self, attribute_key):\n    \n\n    attribute = self.attribute_key_map.get(attribute_key)\n    has_reserved_prefix = attribute_key.startswith(RESERVED_ATTRIBUTE_PREFIX)\n\n    if attribute:\n      if has_reserved_prefix:\n        self.logger.warning(('Attribute %s unexpectedly has reserved prefix %s; using attribute ID '\n                             'instead of reserved attribute name.' % (attribute_key, RESERVED_ATTRIBUTE_PREFIX)))\n\n      return attribute.id\n\n    if has_reserved_prefix:\n      return attribute_key\n\n    self.logger.error('Attribute \"%s\" is not in datafile.' % attribute_key)\n    self.error_handler.handle_error(exceptions.InvalidAttributeException(enums.Errors.INVALID_ATTRIBUTE_ERROR))\n    return None", "docstring": "Get attribute ID for the provided attribute key.\n\nArgs:\nattribute_key: Attribute key for which attribute is to be fetched.\n\nReturns:\nAttribute ID corresponding to the provided attribute key.", "source": "juraj-google-style"}
{"code": "def num_parameters(self, only_trainable: bool=False) -> int:\n    if only_trainable:\n        return int(sum((np.prod(w.shape.as_list()) for w in self.trainable_variables)))\n    else:\n        return self.count_params()", "docstring": "Get the number of (optionally, trainable) parameters in the model.\n\nArgs:\nonly_trainable (`bool`, *optional*, defaults to `False`):\nWhether or not to return only the number of trainable parameters\n\nReturns:\n`int`: The number of parameters.", "source": "github-repos"}
{"code": "def make_trace_api(client):\n    \n    generated = trace_service_client.TraceServiceClient(\n        credentials=client._credentials, client_info=_CLIENT_INFO\n    )\n    return _TraceAPI(generated, client)", "docstring": "Create an instance of the gapic Trace API.\n\nArgs:\nclient (~google.cloud.trace.client.Client): The client that holds\nconfiguration details.\n\nReturns:\nA :class:`~google.cloud.trace._gapic._TraceAPI` instance with the\nproper configurations.", "source": "juraj-google-style"}
{"code": "def skip_summary():\n    replica_context = distribute_lib.get_replica_context()\n    if not replica_context:\n        return False\n    replica_id = replica_context.replica_id_in_sync_group\n    if isinstance(replica_id, tensor.Tensor):\n        replica_id = tensor_util.constant_value(replica_id)\n    return replica_id and replica_id > 0", "docstring": "Determines if summary should be skipped.\n\nIf using multiple replicas in distributed strategy, skip summaries on all\nreplicas except the first one (replica_id=0).\n\nReturns:\nTrue if the summary is skipped; False otherwise.", "source": "github-repos"}
{"code": "async def leader(self):\n    response = (await self._api.get('/v1/status/leader'))\n    if (response.status == 200):\n        return response.body", "docstring": "Returns the current Raft leader\n\nReturns:\nstr: address of leader such as ``10.1.10.12:8300``", "source": "codesearchnet"}
{"code": "def add_rel(self, source_node_id, target_node_id, rel):\n        \n\n        \n        n1_ref = self.graph_db.get_indexed_node('Node', 'node_id', source_node_id)\n        n2_ref = self.graph_db.get_indexed_node('Node', 'node_id', target_node_id)\n\n        \n        if not n1_ref or not n2_ref:\n            print 'Cannot add relationship between unfound nodes: %s --> %s' % (source_node_id, target_node_id)\n            return\n        path = neo4j.Path(n1_ref, rel, n2_ref)\n        path.get_or_create(self.graph_db)", "docstring": "Add a relationship between nodes.\n\nArgs:\nsource_node_id: Node Id for the source node.\ntarget_node_id: Node Id for the target node.\nrel: Name of the relationship 'contains'", "source": "juraj-google-style"}
{"code": "def get_absorbing_atom_symbol_index(absorbing_atom, structure):\n    if isinstance(absorbing_atom, str):\n        return (absorbing_atom, structure.indices_from_symbol(absorbing_atom)[0])\n    elif isinstance(absorbing_atom, int):\n        return (str(structure[absorbing_atom].specie), absorbing_atom)\n    else:\n        raise ValueError('absorbing_atom must be either specie symbol or site index')", "docstring": "Return the absorbing atom symboll and site index in the given structure.\n\nArgs:\nabsorbing_atom (str/int): symbol or site index\nstructure (Structure)\n\nReturns:\nstr, int: symbol and site index", "source": "codesearchnet"}
{"code": "def segment(self, text):\n    files = {'text': text}\n    (res, status_code) = self.post(self.segmentation_service, files=files)\n    if (status_code != 200):\n        logger.debug('Segmentation failed.')\n    return (self.decode(res), status_code)", "docstring": "Call the segmenter in order to split text in sentences.\n\nArgs:\ntext (str): Text to be segmented.\n\nReturns:\ndict, int: A dict containing a list of dicts with the offsets of\neach sentence; an integer representing the response code.", "source": "codesearchnet"}
{"code": "def controller_factory(cls, passes, options, **partial_controller):\n    if (None in partial_controller.values()):\n        raise TranspilerError('The controller needs a condition.')\n    if partial_controller:\n        for registered_controller in cls.registered_controllers.keys():\n            if (registered_controller in partial_controller):\n                return cls.registered_controllers[registered_controller](passes, options, **partial_controller)\n        raise TranspilerError(('The controllers for %s are not registered' % partial_controller))\n    else:\n        return FlowControllerLinear(passes, options)", "docstring": "Constructs a flow controller based on the partially evaluated controller arguments.\n\nArgs:\npasses (list[BasePass]): passes to add to the flow controller.\noptions (dict): PassManager options.\n**partial_controller (dict): Partially evaluated controller arguments in the form\n`{name:partial}`\n\nRaises:\nTranspilerError: When partial_controller is not well-formed.\n\nReturns:\nFlowController: A FlowController instance.", "source": "codesearchnet"}
{"code": "def lin_moma2(self, objective, wt_obj):\n    reactions = set(self._adjustment_reactions())\n    z_diff = self._z_diff\n    v = self._v\n    v_wt = self._v_wt\n    with self.constraints() as constr:\n        for f_reaction in reactions:\n            constr.add((z_diff[f_reaction] >= (v_wt[f_reaction] - v[f_reaction])), ((v_wt[f_reaction] - v[f_reaction]) >= (- z_diff[f_reaction])))\n        self._prob.set_objective(z_diff.sum(reactions))\n        constr.add((self._v_wt[objective] >= wt_obj))\n        self._solve(lp.ObjectiveSense.Minimize)", "docstring": "Find the smallest redistribution vector using a linear objective.\n\nThe change in flux distribution is mimimized by minimizing the sum\nof the absolute values of the differences of wild type FBA solution\nand the knockout strain flux solution.\n\nCreates the constraint that the we select the optimal flux vector that\nis closest to the wildtype. This might still return an arbitrary flux\nvector the maximizes the objective function.\n\nArgs:\nobjective: Objective reaction for the model.\nwt_obj: The flux value for your wild type objective reactions.\nCan either use an expiremental value or on determined by FBA\nby using :meth:`.get_fba_obj_flux(objective)`.", "source": "codesearchnet"}
{"code": "def _combine_sparse_successor(parent_indices, parent_shape, child_indices, child_values, child_shape, name=None):\n    with ops.name_scope(name, 'CombineSparseSuccessor', [parent_indices, parent_shape, child_indices, child_values, child_shape]):\n        (indices, values, shape) = ops_module.combine_sparse_successor(parent_indices, parent_shape, child_indices, child_values, child_shape)\n        return tf.SparseTensor(indices=indices, values=values, dense_shape=shape)", "docstring": "Combines two string `SparseTensor`s, where second `SparseTensor` is the result of expanding\nfirst `SparseTensor`'s values.\n\nArgs:\nparent_indices: 2D int64 `Tensor` with parent `SparseTensor` indices\nparent_shape: 1D int64 `Tensor` with parent `SparseTensor` dense_shape\nchild_indices: 2D int64 `Tensor` with child `SparseTensor` indices\nchild_values: 1D int64 `Tensor` with child `SparseTensor` values\nchild_shape: 1D int64 `Tensor` with child `SparseTensor` dense_shape\nname: A name for the operation (optional).\nReturns:\n`SparseTensor` with an additional dimension of size 1 added.", "source": "codesearchnet"}
{"code": "def circuits_to_qobj(circuits, qobj_header=None, qobj_id=None, backend_name=None, config=None, shots=None, max_credits=None, basis_gates=None, coupling_map=None, seed=None, memory=None):\n    warnings.warn('circuits_to_qobj is deprecated and will be removed in Qiskit Terra 0.9. Use qiskit.compiler.assemble() to serialize circuits into a qobj.', DeprecationWarning)\n    qobj_header = (qobj_header or QobjHeader())\n    if backend_name:\n        qobj_header.backend_name = backend_name\n    if basis_gates:\n        warnings.warn('basis_gates was unused and will be removed.', DeprecationWarning)\n    if coupling_map:\n        warnings.warn('coupling_map was unused and will be removed.', DeprecationWarning)\n    qobj = assemble(experiments=circuits, qobj_id=qobj_id, qobj_header=qobj_header, shots=shots, memory=memory, max_credits=max_credits, seed_simulator=seed, config=config)\n    return qobj", "docstring": "Convert a list of circuits into a qobj.\n\nArgs:\ncircuits (list[QuantumCircuits] or QuantumCircuit): circuits to compile\nqobj_header (QobjHeader): header to pass to the results\nqobj_id (int): TODO: delete after qiskit-terra 0.8\nbackend_name (str): TODO: delete after qiskit-terra 0.8\nconfig (dict): TODO: delete after qiskit-terra 0.8\nshots (int): TODO: delete after qiskit-terra 0.8\nmax_credits (int): TODO: delete after qiskit-terra 0.8\nbasis_gates (str): TODO: delete after qiskit-terra 0.8\ncoupling_map (list): TODO: delete after qiskit-terra 0.8\nseed (int): TODO: delete after qiskit-terra 0.8\nmemory (bool): TODO: delete after qiskit-terra 0.8\n\nReturns:\nQobj: the Qobj to be run on the backends", "source": "codesearchnet"}
{"code": "def _take_screenshot(self, screenshot=False, name_prefix='unknown'):\n        \n        if isinstance(screenshot, bool):\n            if not screenshot:\n                return\n            return self._save_screenshot(name_prefix=name_prefix)\n        if isinstance(screenshot, Image.Image):\n            return self._save_screenshot(screen=screenshot, name_prefix=name_prefix)\n\n        raise TypeError(\"invalid type for func _take_screenshot: \"+ type(screenshot))", "docstring": "This is different from _save_screenshot.\nThe return value maybe None or the screenshot path\n\nArgs:\nscreenshot: bool or PIL image", "source": "juraj-google-style"}
{"code": "def require_representation(self, req):\n        \n        try:\n            type_, subtype, _ = parse_mime_type(req.content_type)\n            content_type = '/'.join((type_, subtype))\n        except:\n            raise falcon.HTTPUnsupportedMediaType(\n                description=\"Invalid Content-Type header: {}\".format(\n                    req.content_type\n                )\n            )\n\n        if content_type == 'application/json':\n            body = req.stream.read()\n            return json.loads(body.decode('utf-8'))\n        else:\n            raise falcon.HTTPUnsupportedMediaType(\n                description=\"only JSON supported, got: {}\".format(content_type)\n            )", "docstring": "Require raw representation dictionary from falcon request object.\n\nThis does not perform any field parsing or validation but only uses\nallowed content-encoding handler to decode content body.\n\nNote:\nCurrently only JSON is allowed as content type.\n\nArgs:\nreq (falcon.Request): request object\n\nReturns:\ndict: raw dictionary of representation supplied in request body", "source": "juraj-google-style"}
{"code": "def execute_before(self, sensor_graph, scope_stack):\n        \n\n        sensor_graph.add_constant(self.stream, 0)\n\n        new_scope = GatedClockScope(sensor_graph, scope_stack, (self.stream, self.trigger))\n        scope_stack.append(new_scope)", "docstring": "Execute statement before children are executed.\n\nArgs:\nsensor_graph (SensorGraph): The sensor graph that we are building or\nmodifying\nscope_stack (list(Scope)): A stack of nested scopes that may influence\nhow this statement allocates clocks or other stream resources.", "source": "juraj-google-style"}
{"code": "def SendReply(self, response, tag=None):\n    \n    if not isinstance(response, rdfvalue.RDFValue):\n      raise ValueError(\"SendReply can only send RDFValues\")\n\n    if self.rdf_flow.parent_flow_id:\n      response = rdf_flow_objects.FlowResponse(\n          client_id=self.rdf_flow.client_id,\n          request_id=self.rdf_flow.parent_request_id,\n          response_id=self.GetNextResponseId(),\n          payload=response,\n          flow_id=self.rdf_flow.parent_flow_id,\n          tag=tag)\n\n      self.flow_responses.append(response)\n    else:\n      reply = rdf_flow_objects.FlowResult(\n          client_id=self.rdf_flow.client_id,\n          flow_id=self.rdf_flow.flow_id,\n          hunt_id=self.rdf_flow.parent_hunt_id,\n          payload=response,\n          tag=tag)\n      self.replies_to_write.append(reply)\n      self.replies_to_process.append(reply)\n\n    self.rdf_flow.num_replies_sent += 1", "docstring": "Allows this flow to send a message to its parent flow.\n\nIf this flow does not have a parent, the message is ignored.\n\nArgs:\nresponse: An RDFValue() instance to be sent to the parent.\ntag: If specified, tag the result with this tag.\n\nRaises:\nValueError: If responses is not of the correct type.", "source": "juraj-google-style"}
{"code": "def is_stateful(self) -> bool:\n    return True", "docstring": "Indicates whether this ThresholdFn is stateful.\n\nReturns:\nbool: Always True for `QuantileThreshold` as it is stateful.", "source": "github-repos"}
{"code": "def read_video_opencv(video_path: str, sample_indices_fn: Callable, **kwargs):\n    requires_backends(read_video_opencv, ['cv2'])\n    import cv2\n    video = cv2.VideoCapture(video_path)\n    total_num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n    video_fps = video.get(cv2.CAP_PROP_FPS)\n    duration = total_num_frames / video_fps if video_fps else 0\n    metadata = VideoMetadata(total_num_frames=int(total_num_frames), fps=float(video_fps), duration=float(duration), video_backend='opencv')\n    indices = sample_indices_fn(metadata=metadata, **kwargs)\n    index = 0\n    frames = []\n    while video.isOpened():\n        success, frame = video.read()\n        if not success:\n            break\n        if index in indices:\n            height, width, channel = frame.shape\n            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n            frames.append(frame[0:height, 0:width, 0:channel])\n        if success:\n            index += 1\n        if index >= total_num_frames:\n            break\n    video.release()\n    metadata.frames_indices = indices\n    return (np.stack(frames), metadata)", "docstring": "Decode a video using the OpenCV backend.\n\nArgs:\nvideo_path (`str`):\nPath to the video file.\nsample_indices_fn (`Callable`):\nA callable function that will return indices at which the video should be sampled. If the video has to be loaded using\nby a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`.\nIf not provided, simple uniform sampling with fps is performed.\nExample:\ndef sample_indices_fn(metadata, **kwargs):\nreturn np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int)\n\nReturns:\nTuple[`np.array`, `VideoMetadata`]: A tuple containing:\n- Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).\n- `VideoMetadata` object.", "source": "github-repos"}
{"code": "def parse_flags_with_usage(args):\n    try:\n        return FLAGS(args)\n    except flags.Error as error:\n        sys.stderr.write(('FATAL Flags parsing error: %s\\n' % error))\n        sys.stderr.write('Pass --helpshort or --helpfull to see help on flags.\\n')\n        sys.exit(1)", "docstring": "Tries to parse the flags, print usage, and exit if unparseable.\n\nArgs:\nargs: [str], a non-empty list of the command line arguments including\nprogram name.\n\nReturns:\n[str], a non-empty list of remaining command line arguments after parsing\nflags, including program name.", "source": "codesearchnet"}
{"code": "def hist_axis_func(axis_type: enum.Enum) -> Callable[([Hist], Axis)]:\n\n    def axis_func(hist: Hist) -> Axis:\n        ' Retrieve the axis associated with the ``HistAxisRange`` object for a given hist.\\n\\n        Args:\\n            hist: Histogram from which the selected axis should be retrieved.\\n            axis_type: Enumeration corresponding to the axis to be restricted. The numerical\\n                value of the enum should be axis number (for a THnBase).\\n        Returns:\\n            ROOT.TAxis: The axis associated with the ``HistAxisRange`` object.\\n        '\n        try:\n            hist_axis_type = axis_type.value\n        except AttributeError:\n            hist_axis_type = axis_type\n        if (hasattr(hist, 'ProjectionND') and hasattr(hist, 'Projection')):\n            return hist.GetAxis(hist_axis_type)\n        else:\n            axis_function_map = {TH1AxisType.x_axis.value: hist.GetXaxis, TH1AxisType.y_axis.value: hist.GetYaxis, TH1AxisType.z_axis.value: hist.GetZaxis}\n            return_func = axis_function_map[hist_axis_type]\n            return return_func()\n    return axis_func", "docstring": "Wrapper to retrieve the axis of a given histogram.\n\nThis can be convenient outside of just projections, so it's made available in the API.\n\nArgs:\naxis_type: The type of axis to retrieve.\nReturns:\nCallable to retrieve the specified axis when given a hist.", "source": "codesearchnet"}
{"code": "def window(self, begin, end=None):\n    if (self._name_parts.decorator != ''):\n        raise Exception('Cannot use window() on an already decorated table')\n    start = Table._convert_decorator_time(begin)\n    if (end is None):\n        if isinstance(begin, datetime.timedelta):\n            end = datetime.timedelta(0)\n        else:\n            end = datetime.datetime.utcnow()\n    stop = Table._convert_decorator_time(end)\n    if ((start > 0 >= stop) or (stop > 0 >= start)):\n        raise Exception(('window: Between arguments must both be absolute or relative: %s, %s' % (str(begin), str(end))))\n    if (start > stop):\n        raise Exception(('window: Between arguments: begin must be before end: %s, %s' % (str(begin), str(end))))\n    return Table(('%s@%s-%s' % (self._full_name, str(start), str(stop))), context=self._context)", "docstring": "Return a new Table limited to the rows added to this Table during the specified time range.\n\nArgs:\nbegin: the start time of the window. This can be a Python datetime (absolute) or timedelta\n(relative to current time). The result must be after the table was created and no more\nthan seven days in the past.\n\nNote that using a relative value will provide a varying snapshot, not a fixed\nsnapshot; any queries issued against such a Table will be done against a snapshot\nthat has an age relative to the execution time of the query.\n\nend: the end time of the snapshot; if None, then the current time is used. The types and\ninterpretation of values is as for start.\n\nReturns:\nA new Table object referencing the window.\n\nRaises:\nAn exception if this Table is already decorated, or if the time specified is invalid.", "source": "codesearchnet"}
{"code": "def to_proto(self, export_scope=None):\n    raise NotImplementedError", "docstring": "Converts a `Variable` to a `VariableDef` protocol buffer.\n\nArgs:\nexport_scope: Optional `string`. Name scope to remove.\n\nReturns:\nA `VariableDef` protocol buffer, or `None` if the `Variable` is not\nin the specified name scope.", "source": "github-repos"}
{"code": "def bounded_uniform(cls, lowest, highest, weight_interval=None):\n    if (weight_interval is None):\n        weights = [(lowest, 1), (highest, 1)]\n    else:\n        i = lowest\n        weights = []\n        while (i < highest):\n            weights.append((i, 1))\n            i += weight_interval\n        weights.append((highest, 1))\n    return cls(weights)", "docstring": "Initialize with a uniform distribution between two values.\n\nIf no ``weight_interval`` is passed, this weight distribution\nwill just consist of ``[(lowest, 1), (highest, 1)]``. If specified,\nweights (still with uniform weight distribution) will be added every\n``weight_interval``. Use this if you intend to modify the weights\nin any complex way after initialization.\n\nArgs:\nlowest (float or int):\nhighest (float or int):\nweight_interval (int):\n\nReturns:\nSoftFloat: A newly constructed instance.", "source": "codesearchnet"}
{"code": "def __user_location(__pkg: str, type_) -> str:\n    \n    if ALLOW_DARWIN and sys.platform == 'darwin':\n        user_dir = '~/Library/{}'.format(__LOCATIONS[type_][0])\n    else:\n        user_dir = getenv('XDG_{}_HOME'.format(type_.upper()),\n                           path.sep.join([getenv('HOME', ''),\n                                          __LOCATIONS[type_][1]]))\n    return path.expanduser(path.sep.join([user_dir, __pkg]))", "docstring": "Utility function to look up XDG basedir locations\n\nArgs:\n__pkg: Package name\n__type: Location type", "source": "juraj-google-style"}
{"code": "def __get_conn(self, flag_force_new=False, filename=None):\n    flag_open_new = (flag_force_new or (not self._conn_is_open()))\n    if flag_open_new:\n        if (filename is None):\n            filename = self.filename\n        conn = self._get_conn(filename)\n        self._conn = conn\n    else:\n        conn = self._conn\n    return conn", "docstring": "Returns connection to database. Tries to return existing connection, unless flag_force_new\n\nArgs:\nflag_force_new:\nfilename:\n\nReturns: sqlite3.Connection object\n\n**Note** this is a private method because you can get a connection to any file, so it has to\nbe used in the right moment", "source": "codesearchnet"}
{"code": "def save_target_classes_for_batch(self, filename, image_batches, batch_id):\n    images = image_batches.data[batch_id]['images']\n    with open(filename, 'w') as f:\n        for (image_id, image_val) in iteritems(images):\n            target_class = self.get_target_class(image_val['dataset_image_id'])\n            f.write('{0}.png,{1}\\n'.format(image_id, target_class))", "docstring": "Saves file with target class for given dataset batch.\n\nArgs:\nfilename: output filename\nimage_batches: instance of ImageBatchesBase with dataset batches\nbatch_id: dataset batch ID", "source": "codesearchnet"}
{"code": "def matmul(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:\n    out = math_ops.matmul(input_tensor, self.filters, name='sample/matmul')\n    if bias_fn is not None:\n        out = bias_fn(out, self.bias)\n    if activation_fn is not None:\n        out = activation_fn(out)\n    return {'output': out}", "docstring": "Performs a matrix multiplication.\n\nDepending on self.bias_fn and self.activation_fn, it may add a bias\nterm or go through the activaction function.\n\nArgs:\ninput_tensor: Input tensor to matmul with the filter.\n\nReturns:\nA map of: output key -> output result.", "source": "github-repos"}
{"code": "def diff(self, container):\n    return self._result(self._get(self._url('/containers/{0}/changes', container)), True)", "docstring": "Inspect changes on a container's filesystem.\n\nArgs:\ncontainer (str): The container to diff\n\nReturns:\n(str)\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def __init__(self, indices, num_segments, batch_dims=0):\n    self.indices = torch.as_tensor(indices, device=indices.device)\n    self.num_segments = torch.as_tensor(num_segments, device=indices.device)\n    self.batch_dims = batch_dims", "docstring": "Creates an index\n\nArgs:\nindices (`torch.LongTensor`, same shape as a *values* Tensor to which the indices refer):\nTensor containing the indices.\nnum_segments (`torch.LongTensor`):\nScalar tensor, the number of segments. All elements in a batched segmented tensor must have the same\nnumber of segments (although many segments can be empty).\nbatch_dims (`int`, *optional*, defaults to 0):\nThe number of batch dimensions. The first *batch_dims* dimensions of a SegmentedTensor are treated as\nbatch dimensions. Segments in different batch elements are always distinct even if they have the same\nindex.", "source": "github-repos"}
{"code": "def read_tree_newick(newick):\n    if (not isinstance(newick, str)):\n        try:\n            newick = str(newick)\n        except:\n            raise TypeError('newick must be a str')\n    if newick.lower().endswith('.gz'):\n        f = gopen(expanduser(newick))\n        ts = f.read().decode().strip()\n        f.close()\n    elif isfile(expanduser(newick)):\n        f = open(expanduser(newick))\n        ts = f.read().strip()\n        f.close()\n    else:\n        ts = newick.strip()\n    lines = ts.splitlines()\n    if (len(lines) != 1):\n        return [read_tree_newick(l) for l in lines]\n    try:\n        t = Tree()\n        t.is_rooted = ts.startswith('[&R]')\n        if (ts[0] == '['):\n            ts = ']'.join(ts.split(']')[1:]).strip()\n            ts = ts.replace(', ', ',')\n        n = t.root\n        i = 0\n        while (i < len(ts)):\n            if (ts[i] == ';'):\n                if ((i != (len(ts) - 1)) or (n != t.root)):\n                    raise RuntimeError(INVALID_NEWICK)\n            elif (ts[i] == '('):\n                c = Node()\n                n.add_child(c)\n                n = c\n            elif (ts[i] == ')'):\n                n = n.parent\n            elif (ts[i] == ','):\n                n = n.parent\n                c = Node()\n                n.add_child(c)\n                n = c\n            elif (ts[i] == ':'):\n                i += 1\n                ls = ''\n                while ((ts[i] != ',') and (ts[i] != ')') and (ts[i] != ';')):\n                    ls += ts[i]\n                    i += 1\n                n.edge_length = float(ls)\n                i -= 1\n            else:\n                label = ''\n                while ((ts[i] != ':') and (ts[i] != ',') and (ts[i] != ';') and (ts[i] != ')')):\n                    label += ts[i]\n                    i += 1\n                i -= 1\n                n.label = label\n            i += 1\n    except Exception as e:\n        raise RuntimeError(('Failed to parse string as Newick: %s' % ts))\n    return t", "docstring": "Read a tree from a Newick string or file\n\nArgs:\n``newick`` (``str``): Either a Newick string or the path to a Newick file (plain-text or gzipped)\n\nReturns:\n``Tree``: The tree represented by ``newick``. If the Newick file has multiple trees (one per line), a ``list`` of ``Tree`` objects will be returned", "source": "codesearchnet"}
{"code": "def parse_query(self, query, index, stop_current, shuffle):\n        \n\n        if index is not None and len(self.queue) > 0:\n            if index < 0 or index >= len(self.queue):\n                if len(self.queue) == 1:\n                    self.statuslog.error(\"Play index must be 1 (1 song in queue)\")\n                    return\n                else:\n                    self.statuslog.error(\"Play index must be between 1 and {}\".format(len(self.queue)))\n                    return\n\n        try:\n            yt_videos = api_music.parse_query(query, self.statuslog)\n            if shuffle:\n                random.shuffle(yt_videos)\n\n            if len(yt_videos) == 0:\n                self.statuslog.error(\"No results for: {}\".format(query))\n                return\n\n            if index is None:\n                self.queue = self.queue + yt_videos\n            else:\n                if len(self.queue) > 0:\n                    self.queue = self.queue[:index] + yt_videos + self.queue[index:]\n                else:\n                    self.queue = yt_videos\n\n            self.update_queue()\n\n            if stop_current:\n                if self.streamer:\n                    self.streamer.stop()\n        except Exception as e:\n            logger.exception(e)", "docstring": "Parses a query and adds it to the queue\n\nArgs:\nquery (str): Either a search term or a link\nindex (int): The index to enqueue at (None for end)\nstop_current (bool): Whether to stop the current song after the songs are queued\nshuffle (bool): Whether to shuffle the added songs", "source": "juraj-google-style"}
{"code": "def clamp(value, maximum=None):\n        \n        value = max(value, 0)\n\n        if maximum is not None:\n            return min(value, maximum)\n        else:\n            return value", "docstring": "Clamp numeric values to be non-negative, an optionally, less than a\ngiven maximum.\n\nArgs:\nvalue (float) :\nA number to clamp.\n\nmaximum (float, optional) :\nA max bound to to clamp to. If None, there is no upper bound,\nand values are only clamped to be non-negative. (default: None)\n\nReturns:\nfloat", "source": "juraj-google-style"}
{"code": "def _AsTensorList(x, p):\n    if not isinstance(x, (list, _basetuple)):\n        x = [x]\n    l = []\n    for v in x:\n        if isinstance(v, ops.Operation):\n            v = with_dependencies([v], p)\n        v = ops.convert_to_tensor_or_composite(v)\n        if isinstance(v, tensor_lib.Tensor):\n            l.append(array_ops.identity(v))\n        else:\n            l.append(indexed_slices.IndexedSlices(array_ops.identity(v.values), array_ops.identity(v.indices)))\n    return l", "docstring": "Return x as a list of Tensors or IndexedSlices.\n\nFor entries of `x` that are Operations, this returns an Identity of `p`\nwith a dependency on the operation.\n\nArgs:\nx: A Tensor/IndexedSlices/Operation or a list or tuple of them.\np: A Tensor to return for entries in `x` that are Operations.\n\nReturns:\nA list of Tensors or IndexedSlices.", "source": "github-repos"}
{"code": "def Append(self, value, timestamp):\n    \n\n    timestamp = self._NormalizeTime(timestamp)\n    if self.data and timestamp < self.data[-1][1]:\n      raise RuntimeError(\"Next timestamp must be larger.\")\n    self.data.append([value, timestamp])", "docstring": "Adds value at timestamp.\n\nValues must be added in order of increasing timestamp.\n\nArgs:\nvalue: An observed value.\ntimestamp: The timestamp at which value was observed.\n\nRaises:\nRuntimeError: If timestamp is smaller than the previous timstamp.", "source": "juraj-google-style"}
{"code": "def SaveGDAL(filename, rda):\n  \n  if type(rda) is not rdarray:\n    raise Exception(\"A richdem.rdarray or numpy.ndarray is required!\")\n\n  if not GDAL_AVAILABLE:\n    raise Exception(\"richdem.SaveGDAL() requires GDAL.\")\n\n  driver    = gdal.GetDriverByName('GTiff')\n  data_type = gdal.GDT_Float32 \n  data_set  = driver.Create(filename, xsize=rda.shape[1], ysize=rda.shape[0], bands=1, eType=data_type)\n  data_set.SetGeoTransform(rda.geotransform)\n  data_set.SetProjection(rda.projection)\n  band = data_set.GetRasterBand(1)\n  band.SetNoDataValue(rda.no_data)\n  band.WriteArray(np.array(rda))\n  for k,v in rda.metadata.items():\n    data_set.SetMetadataItem(str(k),str(v))", "docstring": "Save a GDAL file.\n\nSaves a RichDEM array to a data file in GeoTIFF format.\n\nIf you need to do something more complicated, look at the source of this\nfunction.\n\nArgs:\nfilename (str):     Name of the raster file to be created\nrda      (rdarray): Data to save.\n\nReturns:\nNo Return", "source": "juraj-google-style"}
{"code": "def terminate(self, end):\n    if self.terminated:\n        raise TdlError('Cannot terminate a closed list.')\n    if (end == LIST_TYPE):\n        self.terminated = False\n    elif (end == EMPTY_LIST_TYPE):\n        if self._last_path:\n            self[self._last_path] = None\n        else:\n            self._avm = None\n        self.terminated = True\n    elif self._last_path:\n        self[self._last_path] = end\n        self.terminated = True\n    else:\n        raise TdlError('Empty list must be {} or {}'.format(LIST_TYPE, EMPTY_LIST_TYPE))", "docstring": "Set the value of the tail of the list.\n\nAdding values via :meth:`append` places them on the `FIRST`\nfeature of some level of the feature structure (e.g.,\n`REST.FIRST`), while :meth:`terminate` places them on the\nfinal `REST` feature (e.g., `REST.REST`). If *end* is a\n:class:`Conjunction` or :class:`Term`, it is typically a\n:class:`Coreference`, otherwise *end* is set to\n`tdl.EMPTY_LIST_TYPE` or `tdl.LIST_TYPE`. This method does\nnot necessarily close the list; if *end* is `tdl.LIST_TYPE`,\nthe list is left open, otherwise it is closed.\n\nArgs:\nend (str, :class:`Conjunction`, :class:`Term`): value to\nuse as the end of the list.", "source": "codesearchnet"}
{"code": "def __init__(self, mh_map: dict[str, ModelHandler]):\n    self._max_models = None\n    self._mh_map: dict[str, ModelHandler] = mh_map\n    self._key_to_last_update: dict[str, str] = defaultdict(str)\n    self._tag_map: dict[str, str] = OrderedDict()\n    self._proxy_map: dict[str, multi_process_shared.MultiProcessShared] = {}", "docstring": "Args:\nmh_map: A map from keys to model handlers which can be used to load a\nmodel.", "source": "github-repos"}
{"code": "def match_opcodes(opcode_traces, lineno, op_match_list):\n    out = []\n    for trace in opcode_traces[lineno]:\n        for match_op, match_symbol in op_match_list:\n            if trace.op == match_op and match_symbol in [None, trace.symbol]:\n                out.append((trace.op, trace.symbol, trace.types))\n    return out", "docstring": "Get all opcodes matching op_match_list on a given line.\n\nArgs:\nopcode_traces: traces\nlineno: line number to get ops from.\nop_match_list: [(opcode_name, symbol|None), ...]; None matches any symbol.\n\nReturns:\nA list of matching opcodes.", "source": "github-repos"}
{"code": "def __tomo_linear_inv(freqs, ops, weights=None, trace=None):\n    if (weights is not None):\n        W = np.array(weights)\n        if (W.ndim == 1):\n            W = np.diag(W)\n    S = np.array([vectorize(m).conj() for m in ops]).reshape(len(ops), ops[0].size)\n    if (weights is not None):\n        S = np.dot(W, S)\n    v = np.array(freqs)\n    if (weights is not None):\n        v = np.dot(W, freqs)\n    Sdg = S.T.conj()\n    inv = np.linalg.pinv(np.dot(Sdg, S))\n    ret = devectorize(np.dot(inv, np.dot(Sdg, v)))\n    if (trace is not None):\n        ret = ((trace * ret) / np.trace(ret))\n    return ret", "docstring": "Reconstruct a matrix through linear inversion.\n\nArgs:\nfreqs (list[float]): list of observed frequences.\nops (list[np.array]): list of corresponding projectors.\nweights (list[float] or array_like):\nweights to be used for weighted fitting.\ntrace (float or None): trace of returned operator.\n\nReturns:\nnumpy.array: A numpy array of the reconstructed operator.", "source": "codesearchnet"}
{"code": "def output_sector_csv(self,csv_path,file_dict_key,out_path):\n        \n        csv_file = csv_path + \"{0}_{1}_{2}_{3}.csv\".format(\n                                                        file_dict_key,\n                                                        self.ensemble_name,\n                                                        self.member,\n                                                        self.run_date.strftime(self.date_format))\n        if exists(csv_file):\n            csv_data = pd.read_csv(csv_file)\n            \n            if self.inds is None:\n                lon_obj = csv_data.loc[:,\"Centroid_Lon\"]\n                lat_obj = csv_data.loc[:,\"Centroid_Lat\"]\n            \n                self.inds = np.where((self.ne_lat>=lat_obj)&(self.sw_lat<=lat_obj)\\\n                        &(self.ne_lon>=lon_obj)&(self.sw_lon<=lon_obj))[0]\n            \n            if np.shape(self.inds)[0] > 0:\n                csv_data = csv_data.reindex(np.array(self.inds)) \n                sector_csv_filename = out_path + \"{0}_{1}_{2}_{3}.csv\".format(\n                                                        file_dict_key,\n                                                        self.ensemble_name,\n                                                        self.member,\n                                                        self.run_date.strftime(self.date_format))\n                print(\"Output sector csv file \" + sector_csv_filename)\n                csv_data.to_csv(sector_csv_filename,\n                        na_rep=\"nan\",\n                        float_format=\"%0.5f\",\n                        index=False)\n                os.chmod(sector_csv_filename, 0o666)\n            else:\n                print('No {0} {1} sector data found'.format(self.member,\n                                self.run_date.strftime(\"%Y%m%d\")))\n            \n        else:\n            print('No {0} {1} csv file found'.format(self.member,\n                                self.run_date.strftime(\"%Y%m%d\")))\n        return", "docstring": "Segment forecast tracks to only output data contined within a\nregion in the CONUS, as defined by the mapfile.\n\nArgs:\ncsv_path(str): Path to the full CONUS csv file.\nfile_dict_key(str): Dictionary key for the csv files,\ncurrently either 'track_step' or 'track_total'\nout_path (str): Path to output new segmented csv files.\nReturns:\nSegmented forecast tracks in a csv file.", "source": "juraj-google-style"}
{"code": "def verify_fully_used_iterator(self, ds_fn, num_outputs, sparse_tensors=False, assert_items_equal=False):\n    self.verify_run_with_breaks(ds_fn, [num_outputs], num_outputs, sparse_tensors=sparse_tensors, assert_items_equal=assert_items_equal)", "docstring": "Verifies that saving and restoring a fully used iterator works.\n\nNote that this only checks saving and restoring an iterator from which\n`num_outputs` items have been produced but does not check for an\nexhausted iterator, i.e., one from which an OutOfRange error has been\nreturned.\n\nArgs:\nds_fn: 0-argument function that returns a Dataset.\nnum_outputs: Total number of outputs expected from this Dataset.\nsparse_tensors: Whether dataset is built from SparseTensor(s).\nassert_items_equal: Tests the output has the expected elements regardless\nof order.\n\nRaises:\nAssertionError if test fails.", "source": "github-repos"}
{"code": "def get_cpu_vendor(cls, family, arch='x86'):\n        \n\n        props = cls.get_cpu_props(family, arch)\n        vendor = 'generic'\n        try:\n            vendor = props.xpath('vendor/@name')[0]\n        except IndexError:\n            pass\n        return vendor", "docstring": "Get CPU vendor, if vendor is not available will return 'generic'\n\nArgs:\nfamily(str): CPU family\narch(str): CPU arch\n\nReturns:\nstr: CPU vendor if found otherwise 'generic'", "source": "juraj-google-style"}
{"code": "def concat(self, second_iterable):\n    if self.closed():\n        raise ValueError('Attempt to call concat() on a closed Queryable.')\n    if (not is_iterable(second_iterable)):\n        raise TypeError('Cannot compute concat() with second_iterable of non-iterable {0}'.format(str(type(second_iterable))[7:(- 1)]))\n    return self._create(itertools.chain(self, second_iterable))", "docstring": "Concatenates two sequences.\n\nNote: This method uses deferred execution.\n\nArgs:\nsecond_iterable: The sequence to concatenate on to the sequence.\n\nReturns:\nA Queryable over the concatenated sequences.\n\nRaises:\nValueError: If the Queryable is closed().\nTypeError: If second_iterable is not in fact iterable.", "source": "codesearchnet"}
{"code": "def Process(self, parser_mediator, zip_file, archive_members):\n    if (not self.REQUIRED_PATHS):\n        raise ValueError('REQUIRED_PATHS not specified')\n    if (not set(archive_members).issuperset(self.REQUIRED_PATHS)):\n        raise errors.WrongCompoundZIPPlugin(self.NAME)\n    logger.debug('Compound ZIP Plugin used: {0:s}'.format(self.NAME))\n    self.InspectZipFile(parser_mediator, zip_file)", "docstring": "Determines if this is the correct plugin; if so proceed with processing.\n\nThis method checks if the zip file being contains the paths specified in\nREQUIRED_PATHS. If all paths are present, the plugin logic processing\ncontinues in InspectZipFile.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nzip_file (zipfile.ZipFile): the zip file. It should not be closed in\nthis method, but will be closed by the parser logic in czip.py.\narchive_members (list[str]): file paths in the archive.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.\nValueError: if a subclass has not specified REQUIRED_PATHS.", "source": "codesearchnet"}
{"code": "def get_config_file(basename):\n    \n    locations = [\n        os.path.join(os.curdir, basename),\n        os.path.join(\n            os.path.expanduser(\"~\"),\n            \".config\",\n            \"scriptabit\",\n            basename),\n        resource_filename(\n            Requirement.parse(\"scriptabit\"),\n            os.path.join('scriptabit', basename))\n    ]\n\n    for location in locations:\n        if os.path.isfile(location):\n            return location", "docstring": "Looks for a configuration file in 3 locations:\n\n- the current directory\n- the user config directory (~/.config/scriptabit)\n- the version installed with the package (using setuptools resource API)\n\nArgs:\nbasename (str): The base filename.\n\nReturns:\nstr: The full path to the configuration file.", "source": "juraj-google-style"}
{"code": "def DisjoinCalendars(self, cutoff):\n    \n\n    def TruncatePeriod(service_period, start, end):\n      \n      service_period.start_date = max(service_period.start_date, start)\n      service_period.end_date = min(service_period.end_date, end)\n      dates_to_delete = []\n      for k in service_period.date_exceptions:\n        if (k < start) or (k > end):\n          dates_to_delete.append(k)\n      for k in dates_to_delete:\n        del service_period.date_exceptions[k]\n\n    \n    year = int(cutoff[:4])\n    month = int(cutoff[4:6])\n    day = int(cutoff[6:8])\n    cutoff_date = datetime.date(year, month, day)\n    one_day_delta = datetime.timedelta(days=1)\n    before = (cutoff_date - one_day_delta).strftime('%Y%m%d')\n\n    for a in self.feed_merger.a_schedule.GetServicePeriodList():\n      TruncatePeriod(a, 0, before)\n    for b in self.feed_merger.b_schedule.GetServicePeriodList():\n      TruncatePeriod(b, cutoff, '9'*8)", "docstring": "Forces the old and new calendars to be disjoint about a cutoff date.\n\nThis truncates the service periods of the old schedule so that service\nstops one day before the given cutoff date and truncates the new schedule\nso that service only begins on the cutoff date.\n\nArgs:\ncutoff: The cutoff date as a string in YYYYMMDD format. The timezone\nis the same as used in the calendar.txt file.", "source": "juraj-google-style"}
{"code": "def _parse_format_pages_isbn(html_chunk):\n    \n    ppi = get_first_content(\n        html_chunk.find(\"div\", {\"class\": \"price-overflow\"})\n    )\n\n    if not ppi:\n        return None, None, None\n\n    \n    ppi = filter(lambda x: x.strip(), ppi.split(\"<br />\"))[0]\n\n    \n    isbn = dhtmlparser.parseString(ppi)\n    isbn = isbn.find(\"b\")\n    isbn = isbn[0].getContent() if isbn else None\n\n    \n    pages = None\n    book_format = None\n    details = ppi.split(\"|\")\n\n    if len(details) >= 2:\n        book_format = details[0].strip()\n        pages = details[1].strip()\n\n    return book_format, pages, isbn", "docstring": "Parse format, number of pages and ISBN.\n\nArgs:\nhtml_chunk (obj): HTMLElement containing slice of the page with details.\n\nReturns:\ntuple: (format, pages, isbn), all as string.", "source": "juraj-google-style"}
{"code": "def table(name=None, mode='create', use_cache=True, priority='interactive', allow_large_results=False):\n    output = QueryOutput()\n    output._output_type = 'table'\n    output._table_name = name\n    output._table_mode = mode\n    output._use_cache = use_cache\n    output._priority = priority\n    output._allow_large_results = allow_large_results\n    return output", "docstring": "Construct a query output object where the result is a table\n\nArgs:\nname: the result table name as a string or TableName; if None (the default), then a\ntemporary table will be used.\ntable_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request\nwill fail if the table exists.\nuse_cache: whether to use past query results or ignore cache. Has no effect if destination is\nspecified (default True).\npriority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled\nto run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much\nas three hours but are not rate-limited.\nallow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is\nslower and requires a name to be specified) (default False).", "source": "codesearchnet"}
{"code": "def add_deploy(state, deploy_func, *args, **kwargs):\n    frameinfo = get_caller_frameinfo()\n    kwargs['frameinfo'] = frameinfo\n    for host in state.inventory:\n        deploy_func(state, host, *args, **kwargs)", "docstring": "Prepare & add an deploy to pyinfra.state by executing it on all hosts.\n\nArgs:\nstate (``pyinfra.api.State`` obj): the deploy state to add the operation\ndeploy_func (function): the operation function from one of the modules,\nie ``server.user``\nargs/kwargs: passed to the operation function", "source": "codesearchnet"}
{"code": "def to_geotiff(arr, path='./output.tif', proj=None, spec=None, bands=None, **kwargs):\n    \n        \n    assert has_rasterio, \"To create geotiff images please install rasterio\" \n\n    try:\n        img_md = arr.rda.metadata[\"image\"]\n        x_size = img_md[\"tileXSize\"]\n        y_size = img_md[\"tileYSize\"]\n    except (AttributeError, KeyError):\n        x_size = kwargs.get(\"chunk_size\", 256)\n        y_size = kwargs.get(\"chunk_size\", 256)\n\n    try:\n        tfm = kwargs['transform'] if 'transform' in kwargs else arr.affine\n    except:\n        tfm = None\n\n    dtype = arr.dtype.name if arr.dtype.name != 'int8' else 'uint8' \n\n    if spec is not None and spec.lower() == 'rgb':\n        if bands is None:\n            bands = arr._rgb_bands\n        \n        if not arr.options.get('dra'):\n            \n            from gbdxtools.rda.interface import RDA\n            rda = RDA()\n            dra = rda.HistogramDRA(arr)\n            \n            arr = dra.aoi(bbox=arr.bounds)\n        arr = arr[bands,...].astype(np.uint8)\n        dtype = 'uint8'\n    else:\n        if bands is not None:\n            arr = arr[bands,...]\n    meta = {\n        'width': arr.shape[2],\n        'height': arr.shape[1],\n        'count': arr.shape[0],\n        'dtype': dtype,\n        'driver': 'GTiff',\n        'transform': tfm\n    }\n    if proj is not None:\n        meta[\"crs\"] = {'init': proj}\n\n    if \"tiled\" in kwargs and kwargs[\"tiled\"]:\n        meta.update(blockxsize=x_size, blockysize=y_size, tiled=\"yes\")\n\n    with rasterio.open(path, \"w\", **meta) as dst:\n        writer = rio_writer(dst)\n        result = store(arr, writer, compute=False)\n        result.compute(scheduler=threaded_get)\n    \n    return path", "docstring": "Write out a geotiff file of the image\n\nArgs:\npath (str): path to write the geotiff file to, default is ./output.tif\nproj (str): EPSG string of projection to reproject to\nspec (str): if set to 'rgb', write out color-balanced 8-bit RGB tif\nbands (list): list of bands to export. If spec='rgb' will default to RGB bands\n\nReturns:\nstr: path the geotiff was written to", "source": "juraj-google-style"}
{"code": "def AddTask(self, target, args=(), name='Unnamed task', blocking=True, inline=True):\n    if (not self.started):\n        raise ThreadPoolNotStartedError(self.name)\n    if (self.max_threads == 0):\n        target(*args)\n        return\n    if inline:\n        blocking = False\n    with self.lock:\n        while True:\n            if (len(self) < self.max_threads):\n                try:\n                    self._AddWorker()\n                except (RuntimeError, threading.ThreadError) as e:\n                    logging.error('Threadpool exception: Could not spawn worker threads: %s', e)\n            try:\n                self._queue.put((target, args, name, time.time()), block=False)\n                return\n            except queue.Full:\n                if (len(self) < self.max_threads):\n                    try:\n                        self._AddWorker()\n                        continue\n                    except (RuntimeError, threading.ThreadError) as e:\n                        logging.error('Threadpool exception: Could not spawn worker threads: %s', e)\n                if inline:\n                    break\n                elif blocking:\n                    try:\n                        self._queue.put((target, args, name, time.time()), block=True, timeout=1)\n                        return\n                    except queue.Full:\n                        continue\n                else:\n                    raise Full()\n    if inline:\n        target(*args)", "docstring": "Adds a task to be processed later.\n\nArgs:\ntarget: A callable which should be processed by one of the workers.\nargs: A tuple of arguments to target.\nname: The name of this task. Used to identify tasks in the log.\nblocking: If True we block until the task is finished, otherwise we raise\nqueue.Full\ninline: If set, process the task inline when the queue is full. This\nimplies no blocking. Specifying inline helps if the worker tasks are\nblocked because it still ensures some progress is made. However, this\ncan generally block the calling thread even after the threadpool is\navailable again and therefore decrease efficiency.\n\nRaises:\nThreadPoolNotStartedError: if the pool was not started yet.\nqueue.Full: if the pool is full and can not accept new jobs.", "source": "codesearchnet"}
{"code": "def elmo_loss2ppl(losses: List[np.ndarray]) -> float:\n    avg_loss = np.mean(losses)\n    return float(np.exp(avg_loss))", "docstring": "Calculates perplexity by loss\n\nArgs:\nlosses: list of numpy arrays of model losses\n\nReturns:\nperplexity : float", "source": "codesearchnet"}
{"code": "def partial_declaration_path(decl):\n    \n\n    \n    \n    \n    if not decl:\n        return []\n    if not decl.cache.partial_declaration_path:\n        result = [decl.partial_name]\n        parent = decl.parent\n        while parent:\n            if parent.cache.partial_declaration_path:\n                result.reverse()\n                decl.cache.partial_declaration_path \\\n                    = parent.cache.partial_declaration_path + result\n                return decl.cache.partial_declaration_path\n            else:\n                result.append(parent.partial_name)\n                parent = parent.parent\n        result.reverse()\n        decl.cache.partial_declaration_path = result\n        return result\n\n    return decl.cache.partial_declaration_path", "docstring": "Returns a list of parent declarations names without template arguments that\nhave default value.\n\nArgs:\ndecl (declaration_t): declaration for which the partial declaration\npath should be calculated.\n\nReturns:\nlist[(str | basestring)]: list of names, where first item is the top\nparent name and last item the inputted\ndeclaration name.", "source": "juraj-google-style"}
{"code": "def __init__(self, bundle_context_manager: execution.BundleContextManager, progress_frequency: Optional[float]=None, cache_token_generator=FnApiRunner.get_cache_token_generator(), split_managers=()) -> None:\n    self.bundle_context_manager: execution.BundleContextManager = bundle_context_manager\n    self._progress_frequency = progress_frequency\n    self._worker_handler: Optional[WorkerHandler] = None\n    self._cache_token_generator = cache_token_generator\n    self.split_managers = split_managers", "docstring": "Set up a bundle manager.\n\nArgs:\nprogress_frequency", "source": "github-repos"}
{"code": "def _HasId(self, schedule, entity_id):\n    \n    try:\n      self._GetById(schedule, entity_id)\n      has = True\n    except KeyError:\n      has = False\n    return has", "docstring": "Check if the schedule has an entity with the given id.\n\nArgs:\nschedule: The transitfeed.Schedule instance to look in.\nentity_id: The id of the entity.\n\nReturns:\nTrue if the schedule has an entity with the id or False if not.", "source": "juraj-google-style"}
{"code": "def analyze_directory(self, directory: Path, identifier: Union[str, None]=None, ignore_files: Union[list[str], None]=None, n_identifier: Union[str, list[str], None]=None, only_modules: bool=True):\n    files = [file for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file))]\n    if identifier is not None:\n        files = [file for file in files if identifier in file]\n    if n_identifier is not None:\n        if isinstance(n_identifier, list):\n            for n_ in n_identifier:\n                files = [file for file in files if n_ not in file]\n        else:\n            files = [file for file in files if n_identifier not in file]\n    ignore_files = ignore_files or []\n    ignore_files.append('__init__.py')\n    files = [file for file in files if file not in ignore_files]\n    for file in files:\n        print('Testing', file)\n        if only_modules:\n            module_identifier = file.split('.')[0]\n            try:\n                module_identifier = getattr(transformers, module_identifier)\n                suite = doctest.DocTestSuite(module_identifier)\n                result = unittest.TextTestRunner().run(suite)\n                self.assertIs(len(result.failures), 0)\n            except AttributeError:\n                logger.info(f'{module_identifier} is not a module.')\n        else:\n            result = doctest.testfile(str('..' / directory / file), optionflags=doctest.ELLIPSIS)\n            self.assertIs(result.failed, 0)", "docstring": "Runs through the specific directory, looking for the files identified with `identifier`. Executes\nthe doctests in those files\n\nArgs:\ndirectory (`Path`): Directory containing the files\nidentifier (`str`): Will parse files containing this\nignore_files (`List[str]`): List of files to skip\nn_identifier (`str` or `List[str]`): Will not parse files containing this/these identifiers.\nonly_modules (`bool`): Whether to only analyze modules", "source": "github-repos"}
{"code": "def verify_profile_name(msg, cfg):\n    \n    if msg.profile not in cfg.data:\n        raise UnknownProfileError(msg.profile)", "docstring": "Verifies the profile name exists in the config.json file.\n\nArgs:\n:msg: (Message class) an instance of a message class.\n:cfg: (jsonconfig.Config) config instance.", "source": "juraj-google-style"}
{"code": "def launch(self, image, command, **kwargs):\n    if isinstance(command, PythonCall):\n        return PythonJob(self, image, command, **kwargs)\n    else:\n        return Job(self, image, command, **kwargs)", "docstring": "Create a job on this engine\n\nArgs:\nimage (str): name of the docker image to launch\ncommand (str): shell command to run", "source": "codesearchnet"}
{"code": "def diagonal_gaussian_posterior_builder(getter, name, shape=None, *args, **kwargs):\n    parameter_shapes = tfp.distributions.Normal.param_static_shapes(shape)\n    loc_var = getter((name + '/posterior_loc'), *args, shape=parameter_shapes['loc'], **kwargs)\n    scale_var = getter((name + '/posterior_scale'), *args, shape=parameter_shapes['scale'], **kwargs)\n    posterior = tfp.distributions.Normal(loc=loc_var, scale=tf.nn.softplus(scale_var), name='{}_posterior_dist'.format(name))\n    return posterior", "docstring": "A pre-canned builder for diagonal gaussian posterior distributions.\n\nGiven a true `getter` function and arguments forwarded from `tf.get_variable`,\nreturn a distribution object for a diagonal posterior over a variable of the\nrequisite shape.\n\nArgs:\ngetter: The `getter` passed to a `custom_getter`. Please see the\ndocumentation for `tf.get_variable`.\nname: The `name` argument passed to `tf.get_variable`.\nshape: The `shape` argument passed to `tf.get_variable`.\n*args: See positional arguments passed to `tf.get_variable`.\n**kwargs: See keyword arguments passed to `tf.get_variable`.\n\nReturns:\nAn instance of `tfp.distributions.Normal` representing the posterior\ndistribution over the variable in question.", "source": "codesearchnet"}
{"code": "def replace_keywords(self, sentence):\n    if (not sentence):\n        return sentence\n    new_sentence = []\n    orig_sentence = sentence\n    if (not self.case_sensitive):\n        sentence = sentence.lower()\n    current_word = ''\n    current_dict = self.keyword_trie_dict\n    current_white_space = ''\n    sequence_end_pos = 0\n    idx = 0\n    sentence_len = len(sentence)\n    while (idx < sentence_len):\n        char = sentence[idx]\n        current_word += orig_sentence[idx]\n        if (char not in self.non_word_boundaries):\n            current_white_space = char\n            if ((self._keyword in current_dict) or (char in current_dict)):\n                sequence_found = None\n                longest_sequence_found = None\n                is_longer_seq_found = False\n                if (self._keyword in current_dict):\n                    sequence_found = current_dict[self._keyword]\n                    longest_sequence_found = current_dict[self._keyword]\n                    sequence_end_pos = idx\n                if (char in current_dict):\n                    current_dict_continued = current_dict[char]\n                    current_word_continued = current_word\n                    idy = (idx + 1)\n                    while (idy < sentence_len):\n                        inner_char = sentence[idy]\n                        current_word_continued += orig_sentence[idy]\n                        if ((inner_char not in self.non_word_boundaries) and (self._keyword in current_dict_continued)):\n                            current_white_space = inner_char\n                            longest_sequence_found = current_dict_continued[self._keyword]\n                            sequence_end_pos = idy\n                            is_longer_seq_found = True\n                        if (inner_char in current_dict_continued):\n                            current_dict_continued = current_dict_continued[inner_char]\n                        else:\n                            break\n                        idy += 1\n                    else:\n                        if (self._keyword in current_dict_continued):\n                            current_white_space = ''\n                            longest_sequence_found = current_dict_continued[self._keyword]\n                            sequence_end_pos = idy\n                            is_longer_seq_found = True\n                    if is_longer_seq_found:\n                        idx = sequence_end_pos\n                        current_word = current_word_continued\n                current_dict = self.keyword_trie_dict\n                if longest_sequence_found:\n                    new_sentence.append((longest_sequence_found + current_white_space))\n                    current_word = ''\n                    current_white_space = ''\n                else:\n                    new_sentence.append(current_word)\n                    current_word = ''\n                    current_white_space = ''\n            else:\n                current_dict = self.keyword_trie_dict\n                new_sentence.append(current_word)\n                current_word = ''\n                current_white_space = ''\n        elif (char in current_dict):\n            current_dict = current_dict[char]\n        else:\n            current_dict = self.keyword_trie_dict\n            idy = (idx + 1)\n            while (idy < sentence_len):\n                char = sentence[idy]\n                current_word += orig_sentence[idy]\n                if (char not in self.non_word_boundaries):\n                    break\n                idy += 1\n            idx = idy\n            new_sentence.append(current_word)\n            current_word = ''\n            current_white_space = ''\n        if ((idx + 1) >= sentence_len):\n            if (self._keyword in current_dict):\n                sequence_found = current_dict[self._keyword]\n                new_sentence.append(sequence_found)\n            else:\n                new_sentence.append(current_word)\n        idx += 1\n    return ''.join(new_sentence)", "docstring": "Searches in the string for all keywords present in corpus.\nKeywords present are replaced by the clean name and a new string is returned.\n\nArgs:\nsentence (str): Line of text where we will replace keywords\n\nReturns:\nnew_sentence (str): Line of text with replaced keywords\n\nExamples:\n>>> from flashtext import KeywordProcessor\n>>> keyword_processor = KeywordProcessor()\n>>> keyword_processor.add_keyword('Big Apple', 'New York')\n>>> keyword_processor.add_keyword('Bay Area')\n>>> new_sentence = keyword_processor.replace_keywords('I love Big Apple and bay area.')\n>>> new_sentence\n>>> 'I love New York and Bay Area.'", "source": "codesearchnet"}
{"code": "def flowread(flow_or_path, quantize=False, concat_axis=0, *args, **kwargs):\n    if isinstance(flow_or_path, np.ndarray):\n        if ((flow_or_path.ndim != 3) or (flow_or_path.shape[(- 1)] != 2)):\n            raise ValueError('Invalid flow with shape {}'.format(flow_or_path.shape))\n        return flow_or_path\n    elif (not is_str(flow_or_path)):\n        raise TypeError('\"flow_or_path\" must be a filename or numpy array, not {}'.format(type(flow_or_path)))\n    if (not quantize):\n        with open(flow_or_path, 'rb') as f:\n            try:\n                header = f.read(4).decode('utf-8')\n            except Exception:\n                raise IOError('Invalid flow file: {}'.format(flow_or_path))\n            else:\n                if (header != 'PIEH'):\n                    raise IOError('Invalid flow file: {}, header does not contain PIEH'.format(flow_or_path))\n            w = np.fromfile(f, np.int32, 1).squeeze()\n            h = np.fromfile(f, np.int32, 1).squeeze()\n            flow = np.fromfile(f, np.float32, ((w * h) * 2)).reshape((h, w, 2))\n    else:\n        assert (concat_axis in [0, 1])\n        cat_flow = imread(flow_or_path, flag='unchanged')\n        if (cat_flow.ndim != 2):\n            raise IOError('{} is not a valid quantized flow file, its dimension is {}.'.format(flow_or_path, cat_flow.ndim))\n        assert ((cat_flow.shape[concat_axis] % 2) == 0)\n        (dx, dy) = np.split(cat_flow, 2, axis=concat_axis)\n        flow = dequantize_flow(dx, dy, *args, **kwargs)\n    return flow.astype(np.float32)", "docstring": "Read an optical flow map.\n\nArgs:\nflow_or_path (ndarray or str): A flow map or filepath.\nquantize (bool): whether to read quantized pair, if set to True,\nremaining args will be passed to :func:`dequantize_flow`.\nconcat_axis (int): The axis that dx and dy are concatenated,\ncan be either 0 or 1. Ignored if quantize is False.\n\nReturns:\nndarray: Optical flow represented as a (h, w, 2) numpy array", "source": "codesearchnet"}
{"code": "def qhull_cmd(cmd, options, points):\n    prep_str = [str(len(points[0])), str(len(points))]\n    prep_str.extend([' '.join(map(repr, row)) for row in points])\n    output = getattr(hull, cmd)(options, '\\n'.join(prep_str))\n    return list(map(str.strip, output.strip().split('\\n')))", "docstring": "Generalized helper method to perform a qhull based command.\n\nArgs:\ncmd:\nCommand to perform. Supported commands are qconvex,\nqdelaunay and qvoronoi.\noptions:\nOptions to be provided for qhull command. See specific methods for\ninfo on supported options. Up to two options separated by spaces\nare supported.\npoints:\nSequence of points as input to qhull command.\n\nReturns:\nOutput as a list of strings. E.g., ['4', '0 2', '1 0', '2 3 ', '3 1']", "source": "codesearchnet"}
{"code": "def from_file_msg(cls, fp):\n        \n        log.debug(\"Parsing email from file Outlook\")\n        f, _ = msgconvert(fp)\n        return cls.from_file(f, True)", "docstring": "Init a new object from a Outlook message file,\nmime type: application/vnd.ms-outlook\n\nArgs:\nfp (string): file path of raw Outlook email\n\nReturns:\nInstance of MailParser", "source": "juraj-google-style"}
{"code": "def from_dense(tensor, name=None):\n    with ops.name_scope(name, 'dense_to_sparse'):\n        tensor = ops.convert_to_tensor(tensor)\n        indices = array_ops.where_v2(math_ops.not_equal(tensor, array_ops.zeros_like(tensor)))\n        values = array_ops.gather_nd(tensor, indices)\n        shape = array_ops.shape(tensor, out_type=dtypes.int64)\n        return sparse_tensor.SparseTensor(indices, values, shape)", "docstring": "Converts a dense tensor into a sparse tensor.\n\nOnly elements not equal to zero will be present in the result. The resulting\n`SparseTensor` has the same dtype and shape as the input.\n\n>>> sp = tf.sparse.from_dense([0, 0, 3, 0, 1])\n>>> sp.shape.as_list()\n[5]\n>>> sp.values.numpy()\narray([3, 1], dtype=int32)\n>>> sp.indices.numpy()\narray([[2],\n[4]])\n\nArgs:\ntensor: A dense `Tensor` to be converted to a `SparseTensor`.\nname: Optional name for the op.\n\nReturns:\nThe `SparseTensor`.", "source": "github-repos"}
{"code": "def download_apcor(self, uri):\n        \n\n        local_file = os.path.basename(uri)\n        if os.access(local_file, os.F_OK):\n            fobj = open(local_file)\n        else:\n            fobj = storage.vofile(uri, view='data')\n            fobj.seek(0)\n        str = fobj.read()\n        fobj.close()\n        apcor_str = str\n        return ApcorData.from_string(apcor_str)", "docstring": "Downloads apcor data.\n\nArgs:\nuri: The URI of the apcor data file.\n\nReturns:\napcor: ossos.downloads.core.ApcorData", "source": "juraj-google-style"}
{"code": "def load(self, txt_fst_filename):\n        \n        with open(txt_fst_filename, 'r') as txt_fst:\n            for line in txt_fst:\n                line = line.strip()\n                splitted_line = line.split()\n                if len(splitted_line) == 1:\n                    self[int(splitted_line[0])].final = True\n                else:\n                    self.add_arc(int(splitted_line[0]), int(\n                        splitted_line[1]), splitted_line[2].decode('hex'))", "docstring": "Save the transducer in the text file format of OpenFST.\nThe format is specified as follows:\narc format: src dest ilabel olabel [weight]\nfinal state format: state [weight]\nlines may occur in any order except initial state must be first line\nArgs:\ntxt_fst_filename (string): The name of the file\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def intersects(self, other):\n        \n        try:\n            return (self.min_x <= other.max_x and\n                    self.max_x >= other.min_x and\n                    self.min_y <= other.max_y and\n                    self.max_y >= other.min_y)\n        except AttributeError:\n            return self.intersects(Envelope(other))", "docstring": "Returns true if this envelope intersects another.\n\nArguments:\nother -- Envelope or tuple of (minX, minY, maxX, maxY)", "source": "juraj-google-style"}
{"code": "def __delitem__(self, anchor_id):\n        \n        try:\n            self._anchor_path(anchor_id).unlink()\n        except OSError:\n            raise KeyError('No anchor with id {}'.format(anchor_id))", "docstring": "Remove an anchor from storage.\n\nArgs:\nanchor_id: The ID of the anchor to remove.\n\nRaises:\nKeyError: There is no anchor with that ID.", "source": "juraj-google-style"}
{"code": "def get_vm(access_token, subscription_id, resource_group, vm_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachines/', vm_name, '?api-version=', COMP_API])\n    return do_get(endpoint, access_token)", "docstring": "Get virtual machine details.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvm_name (str): Name of the virtual machine.\n\nReturns:\nHTTP response. JSON body of VM properties.", "source": "codesearchnet"}
{"code": "async def update_notifications(self, on_match_open: bool=None, on_tournament_end: bool=None):\n    params = {}\n    if (on_match_open is not None):\n        params['notify_users_when_matches_open'] = on_match_open\n    if (on_tournament_end is not None):\n        params['notify_users_when_the_tournament_ends'] = on_tournament_end\n    assert_or_raise((len(params) > 0), ValueError, 'At least one of the notifications must be given')\n    (await self.update(**params))", "docstring": "update participants notifications for this tournament\n\n|methcoro|\n\nArgs:\non_match_open: Email registered Challonge participants when matches open up for them\non_tournament_end: Email registered Challonge participants the results when this tournament ends\n\nRaises:\nAPIException", "source": "codesearchnet"}
{"code": "def _compile_output_step(outputs):\n    if (not outputs):\n        raise GraphQLCompilationError(u'No fields were selected for output! Please mark at least one field with the @output directive.')\n    output_fields = {}\n    for (output_name, output_context) in six.iteritems(outputs):\n        location = output_context['location']\n        optional = output_context['optional']\n        graphql_type = output_context['type']\n        expression = None\n        existence_check = None\n        if isinstance(location, FoldScopeLocation):\n            if optional:\n                raise AssertionError(u'Unreachable state reached, optional in fold: {}'.format(output_context))\n            if (location.field == COUNT_META_FIELD_NAME):\n                expression = expressions.FoldCountContextField(location)\n            else:\n                expression = expressions.FoldedContextField(location, graphql_type)\n        else:\n            expression = expressions.OutputContextField(location, graphql_type)\n            if optional:\n                existence_check = expressions.ContextFieldExistence(location.at_vertex())\n        if existence_check:\n            expression = expressions.TernaryConditional(existence_check, expression, expressions.NullLiteral)\n        output_fields[output_name] = expression\n    return blocks.ConstructResult(output_fields)", "docstring": "Construct the final ConstructResult basic block that defines the output format of the query.\n\nArgs:\noutputs: dict, output name (string) -> output data dict, specifying the location\nfrom where to get the data, and whether the data is optional (and therefore\nmay be missing); missing optional data is replaced with 'null'\n\nReturns:\na ConstructResult basic block that constructs appropriate outputs for the query", "source": "codesearchnet"}
{"code": "def add_task(self, tile_address, coroutine):\n    self._loop.call_soon_threadsafe(self._add_task, tile_address, coroutine)", "docstring": "Add a task into the event loop.\n\nThis is the main entry point for registering background tasks that are\nassociated with a tile. The tasks are added to the EmulationLoop and\nthe tile they are a part of is recorded.  When the tile is reset, all\nof its background tasks are canceled as part of the reset process.\n\nIf you have a task that should not be associated with any tile, you\nmay pass `None` for tile_address and the task will not be cancelled\nwhen any tile is reset.\n\nArgs:\ntile_address (int): The address of the tile running\nthe task.\ncoroutine (coroutine): A coroutine that will be added\nto the event loop.", "source": "codesearchnet"}
{"code": "def check_function_argument_count(func, input_arity, infeed_queue):\n\n    def format_error(complaint, quantity):\n        return '%s %d argument%s' % (complaint, quantity, '' if quantity == 1 else 's')\n    num_args_supplied = input_arity\n    if infeed_queue is not None:\n        num_args_supplied += infeed_queue.number_of_tuple_elements\n    arg_spec = tf_inspect.getargspec(func)\n    num_func_args = len(arg_spec.args)\n    if arg_spec.defaults is None:\n        num_func_defaults = 0\n    else:\n        num_func_defaults = len(arg_spec.defaults)\n    min_func_args = num_func_args - num_func_defaults\n    if num_args_supplied < min_func_args:\n        if num_func_defaults == 0 and arg_spec.varargs is None:\n            return format_error('exactly', num_func_args)\n        else:\n            return format_error('at least', min_func_args)\n    if arg_spec.varargs is None and num_args_supplied > num_func_args:\n        if num_func_defaults == 0:\n            return format_error('exactly', num_func_args)\n        else:\n            return format_error('at most', num_func_args)\n    return None", "docstring": "Validate the number of input arguments to an XLA function.\n\nArgs:\nfunc: the Python function that will be called to generate the body of an XLA\ncomputation graph.\ninput_arity: the number of explicit arguments supplied by the caller.\ninfeed_queue: if not None, the infeed queue that will supply\nadditional arguments to the function.\n\nReturns:\nNone if function can be called with the supplied number of\narguments, or an error string if it cannot.", "source": "github-repos"}
{"code": "def split_line(what, indent='', cols=79):\n    \n    if len(indent) > cols:\n        raise ValueError(\"The indent can't be longer than cols.\")\n\n    if cols < 2:\n        raise ValueError(\n            \"The cols can't be smaller than 2 (a char plus a possible '-')\"\n        )\n\n    what = indent + what.lstrip()\n\n    if len(what) <= cols:\n        what, new_line = '', what\n    else:\n        try:\n            closest_space = what[:cols].rindex(' ')\n        except ValueError:\n            closest_space = -1\n\n        if closest_space > len(indent):\n            what, new_line = (\n                what[closest_space:],\n                what[:closest_space],\n            )\n        elif what[cols] == ' ':\n            what, new_line = (\n                what[cols:],\n                what[:cols],\n            )\n        else:\n            what, new_line = what[cols - 1:], what[:cols - 1] + '-'\n\n    return what.lstrip(), new_line.rstrip()", "docstring": "Split a line on the closest space, or break the last word with '-'.\n\nArgs:\nwhat(str): text to spli one line of.\nindent(str): will prepend this indent to the split line, taking it into\naccount in the column count.\ncols(int): maximum length of the split line.\n\nReturns:\ntuple(str, str): rest of the text and split line in that order.\n\nRaises:\nValueError: when the indent is greater than the indent, or the cols\nparam is too small", "source": "juraj-google-style"}
{"code": "def purity(state):\n    \n    rho = np.array(state)\n    if rho.ndim == 1:\n        return 1.0\n    return np.real(np.trace(rho.dot(rho)))", "docstring": "Calculate the purity of a quantum state.\n\nArgs:\nstate (ndarray): a quantum state\nReturns:\nfloat: purity.", "source": "juraj-google-style"}
{"code": "def _UpdateUsers(self, update_users):\n    for (user, ssh_keys) in update_users.items():\n        if ((not user) or (user in self.invalid_users)):\n            continue\n        configured_keys = self.user_ssh_keys.get(user, [])\n        if (set(ssh_keys) != set(configured_keys)):\n            if (not self.utils.UpdateUser(user, ssh_keys)):\n                self.invalid_users.add(user)\n            else:\n                self.user_ssh_keys[user] = ssh_keys[:]", "docstring": "Provision and update Linux user accounts based on account metadata.\n\nArgs:\nupdate_users: dict, authorized users mapped to their public SSH keys.", "source": "codesearchnet"}
{"code": "def fit_to_structure(self, structure, symprec=0.1):\n        \n        sga = SpacegroupAnalyzer(structure, symprec)\n        symm_ops = sga.get_symmetry_operations(cartesian=True)\n        return sum([self.transform(symm_op)\n                    for symm_op in symm_ops]) / len(symm_ops)", "docstring": "Returns a tensor that is invariant with respect to symmetry\noperations corresponding to a structure\n\nArgs:\nstructure (Structure): structure from which to generate\nsymmetry operations\nsymprec (float): symmetry tolerance for the Spacegroup Analyzer\nused to generate the symmetry operations", "source": "juraj-google-style"}
{"code": "def __init__(self, group, provider, checker, code, messages):\n        \n        self.group = group\n        self.provider = provider\n        self.checker = checker\n        self.code = code\n        self.messages = messages", "docstring": "Initialization method.\n\nArgs:\ngroup (AnalysisGroup): parent group.\nprovider (Provider): parent Provider.\nchecker (Checker): parent Checker.\ncode (int): constant from Checker class.\nmessages (str): messages string.", "source": "juraj-google-style"}
{"code": "def get_file_list(wildcard):\n    files = glob.glob(os.path.expanduser(wildcard))\n    return files", "docstring": "Search for files to be concatenated. Currently very basic, but could\nexpand to be more sophisticated.\n\nArgs:\nwildcard (regular expression string)\n\nReturns:\nfiles (list of full file paths)", "source": "codesearchnet"}
{"code": "def read(keypath, configfile=None):\n    \n    if configfile in _configs:\n        appconfig = _configs[configfile]\n    else:\n        appconfig = AppConfig(configfile=configfile)\n        _configs[configfile] = appconfig\n\n    return appconfig.read(keypath)", "docstring": "Reads a value from the configuration file.\n\nArgs:\nkeypath: str\nSpecifies the key for which the value is desired.  It can be a\nhierarchical path.  Example: \"section1.subsection.key1\"\nconfigfile: str\nPath to the config file to read.  Defaults to None, in which case\nthe application's default config file is used.\n\nReturns:\nvalue from configuration file", "source": "juraj-google-style"}
{"code": "def _get_client_by_id(self, client_id):\n    \n    client = self.grr_api.Client(client_id)\n    print('Checking for client approval')\n    self._check_approval_wrapper(client, client.ListFlows)\n    print('{0:s}: Client approval is valid'.format(client_id))\n    return client.Get()", "docstring": "Get GRR client dictionary and make sure valid approvals exist.\n\nArgs:\nclient_id: GRR client ID.\n\nReturns:\nGRR API Client object", "source": "juraj-google-style"}
{"code": "def util_pattern_space(time_series, lag, dim):\n    n = len(time_series)\n    if ((lag * dim) > n):\n        raise Exception('Result matrix exceeded size limit, try to change lag or dim.')\n    elif (lag < 1):\n        raise Exception('Lag should be greater or equal to 1.')\n    pattern_space = np.empty(((n - (lag * (dim - 1))), dim))\n    for i in range((n - (lag * (dim - 1)))):\n        for j in range(dim):\n            pattern_space[i][j] = time_series[(i + (j * lag))]\n    return pattern_space", "docstring": "Create a set of sequences with given lag and dimension\n\nArgs:\ntime_series: Vector or string of the sample data\nlag: Lag between beginning of sequences\ndim: Dimension (number of patterns)\n\nReturns:\n2D array of vectors", "source": "codesearchnet"}
{"code": "def get_resource(self, feature_column, name):\n    del feature_column, name\n    raise NotImplementedError('StateManager.get_resource')", "docstring": "Returns an already created resource.\n\nResources can be things such as tables, variables, trackables, etc.\n\nArgs:\nfeature_column: A `FeatureColumn` object this variable corresponds to.\nname: Name of the resource.", "source": "github-repos"}
{"code": "def create_bulk(self, resource, timeout=-1):\n        \n        uri = self.URI + '/bulk'\n        default_values = self._get_default_values(self.BULK_DEFAULT_VALUES)\n        updated_data = self._helper.update_resource_fields(resource, default_values)\n\n        self._helper.create(updated_data, uri=uri, timeout=timeout)\n\n        return self.get_range(resource['namePrefix'], resource['vlanIdRange'])", "docstring": "Creates bulk Ethernet networks.\n\nArgs:\nresource (dict): Specifications to create in bulk.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\nlist: List of created Ethernet Networks.", "source": "juraj-google-style"}
{"code": "def make(cls, name: str, ctx: 'context.Context', module: str, pyval_name: str | None=None) -> 'PyTDFunction':\n    pyval = ctx.loader.lookup_pytd(module, pyval_name or name)\n    if isinstance(pyval, pytd.Alias) and isinstance(pyval.type, pytd.Function):\n        pyval = pyval.type\n    pyval = pyval.Replace(name=f'{module}.{name}')\n    f = ctx.convert.constant_to_value(pyval, {}, ctx.root_node)\n    self = cls(name, f.signatures, pyval.kind, pyval.decorators, ctx)\n    self.module = module\n    return self", "docstring": "Create a PyTDFunction.\n\nArgs:\nname: The function name.\nctx: The abstract context.\nmodule: The module that the function is in.\npyval_name: Optionally, the name of the pytd.Function object to look up,\nif it is different from the function name.\n\nReturns:\nA new PyTDFunction.", "source": "github-repos"}
{"code": "def _OpenFile(self, path):\n    if (not self._registry_file_reader):\n        return None\n    return self._registry_file_reader.Open(path, ascii_codepage=self._ascii_codepage)", "docstring": "Opens a Windows Registry file.\n\nArgs:\npath (str): path of the Windows Registry file.\n\nReturns:\nWinRegistryFile: Windows Registry file or None if not available.", "source": "codesearchnet"}
{"code": "def annotate(self, sent):\n    preds = []\n    words = []\n    for (word, fv) in self.sent2examples(sent):\n        probs = self.predictor(fv)\n        tags = probs.argsort()\n        tag = self.ID_TAG[tags[(- 1)]]\n        words.append(word)\n        preds.append(tag)\n    annotations = zip(words, preds)\n    return annotations", "docstring": "Annotate a squence of words with entity tags.\n\nArgs:\nsent: sequence of strings/words.", "source": "codesearchnet"}
{"code": "def flush(cls, *args):\n    return _remove_keys([], [((cls._make_key(args) if args else cls.PREFIX) + '*')])", "docstring": "Removes all keys of this namespace\nWithout args, clears all keys starting with cls.PREFIX\nif called with args, clears keys starting with given cls.PREFIX + args\n\nArgs:\n*args: Arbitrary number of arguments.\n\nReturns:\nList of removed keys.", "source": "codesearchnet"}
{"code": "def save_data_files(vr, bs, prefix=None, directory=None):\n    \n    filename = '{}_band.dat'.format(prefix) if prefix else 'band.dat'\n    directory = directory if directory else '.'\n    filename = os.path.join(directory, filename)\n\n    if bs.is_metal():\n        zero = vr.efermi\n    else:\n        zero = bs.get_vbm()['energy']\n\n    with open(filename, 'w') as f:\n        header = '\n        f.write(header)\n\n        \n        for band in bs.bands[Spin.up]:\n            for d, e in zip(bs.distance, band):\n                f.write('{:.8f} {:.8f}\\n'.format(d, e - zero))\n            f.write('\\n')\n\n        \n        if bs.is_spin_polarized:\n            for band in bs.bands[Spin.down]:\n                for d, e in zip(bs.distance, band):\n                    f.write('{:.8f} {:.8f}\\n'.format(d, e - zero))\n                f.write('\\n')\n    return filename", "docstring": "Write the band structure data files to disk.\n\nArgs:\nvs (`Vasprun`): Pymatgen `Vasprun` object.\nbs (`BandStructureSymmLine`): Calculated band structure.\nprefix (`str`, optional): Prefix for data file.\ndirectory (`str`, optional): Directory in which to save the data.\n\nReturns:\nThe filename of the written data file.", "source": "juraj-google-style"}
{"code": "def get_variants(self, chromosome=None, start=None, end=None):\n        \n        query = {}\n        if chromosome:\n            query['chrom'] = chromosome\n        if start:\n            query['start'] = {'$lte': end}\n            query['end'] = {'$gte': start}\n        LOG.info(\"Find all variants {}\".format(query))\n        return self.db.variant.find(query).sort([('start', ASCENDING)])", "docstring": "Return all variants in the database\nIf no region is specified all variants will be returned.\n\nArgs:\nchromosome(str)\nstart(int)\nend(int)\n\n\nReturns:\nvariants(Iterable(Variant))", "source": "juraj-google-style"}
{"code": "def is_empty(self):\n    for family in self.iter_package_families():\n        for pkg in self.iter_packages(family):\n            return False\n    return True", "docstring": "Determine if the repository contains any packages.\n\nReturns:\nTrue if there are no packages, False if there are at least one.", "source": "codesearchnet"}
{"code": "def start(self, **kwargs):\n        \n        if not self.is_running():\n            self.websock_url = self.chrome.start(**kwargs)\n            self.websock = websocket.WebSocketApp(self.websock_url)\n            self.websock_thread = WebsockReceiverThread(\n                    self.websock, name='WebsockThread:%s' % self.chrome.port)\n            self.websock_thread.start()\n\n            self._wait_for(lambda: self.websock_thread.is_open, timeout=30)\n\n            \n            self.send_to_chrome(method='Network.enable')\n            self.send_to_chrome(method='Page.enable')\n            self.send_to_chrome(method='Console.enable')\n            self.send_to_chrome(method='Runtime.enable')\n            self.send_to_chrome(method='ServiceWorker.enable')\n            self.send_to_chrome(method='ServiceWorker.setForceUpdateOnPageLoad')\n\n            \n            self.send_to_chrome(\n                method='Network.setBlockedURLs',\n                params={'urls': ['*google-analytics.com/analytics.js',\n                                 '*google-analytics.com/ga.js']})", "docstring": "Starts chrome if it's not running.\n\nArgs:\n**kwargs: arguments for self.chrome.start(...)", "source": "juraj-google-style"}
{"code": "def persons_significant_control(self, num, statements=False, **kwargs):\n    baseuri = (self._BASE_URI + 'company/{}/persons-with-significant-control'.format(num))\n    if (statements is True):\n        baseuri += '-statements'\n    res = self.session.get(baseuri, params=kwargs)\n    self.handle_http_error(res)\n    return res", "docstring": "Search for a list of persons with significant control.\n\nSearches for persons of significant control based on company number for\na specified company. Specify statements=True to only search for\nofficers with statements.\n\nArgs:\nnum (str, int): Company number to search on.\nstatements (Optional[bool]): Search only for persons with\nstatements. Default is False.\nkwargs (dict): additional keywords passed into requests.session.get\n*params* keyword.", "source": "codesearchnet"}
{"code": "def GetRawDevice(path):\n    path = CanonicalPathToLocalPath(path)\n    try:\n        path = win32file.GetLongPathName(path)\n    except pywintypes.error:\n        pass\n    try:\n        mount_point = win32file.GetVolumePathName(path)\n    except pywintypes.error as details:\n        logging.info('path not found. %s', details)\n        raise IOError(('No mountpoint for path: %s' % path))\n    if (not path.startswith(mount_point)):\n        stripped_mp = mount_point.rstrip('\\\\')\n        if (not path.startswith(stripped_mp)):\n            raise IOError(('path %s is not mounted under %s' % (path, mount_point)))\n    corrected_path = LocalPathToCanonicalPath(path[len(mount_point):])\n    corrected_path = utils.NormalizePath(corrected_path)\n    volume = win32file.GetVolumeNameForVolumeMountPoint(mount_point).rstrip('\\\\')\n    volume = LocalPathToCanonicalPath(volume)\n    result = rdf_paths.PathSpec(path=volume, pathtype=rdf_paths.PathSpec.PathType.OS, mount_point=mount_point.rstrip('\\\\'))\n    return (result, corrected_path)", "docstring": "Resolves the raw device that contains the path.\n\nArgs:\npath: A path to examine.\n\nReturns:\nA pathspec to read the raw device as well as the modified path to read\nwithin the raw device. This is usually the path without the mount point.\n\nRaises:\nIOError: if the path does not exist or some unexpected behaviour occurs.", "source": "codesearchnet"}
{"code": "async def set_headline(self, name, level, message):\n    if (name not in self.services):\n        raise ArgumentError('Unknown service name', short_name=name)\n    self.services[name]['state'].set_headline(level, message)\n    headline = self.services[name]['state'].headline.to_dict()\n    (await self._notify_update(name, 'new_headline', headline))", "docstring": "Set the sticky headline for a service.\n\nArgs:\nname (string): The short name of the service to query\nlevel (int): The level of the message (info, warning, error)\nmessage (string): The message contents", "source": "codesearchnet"}
{"code": "def compute_shader(self, source) -> 'ComputeShader':\n        \n\n        res = ComputeShader.__new__(ComputeShader)\n        res.mglo, ls1, ls2, ls3, ls4, res._glo = self.mglo.compute_shader(source)\n\n        members = {}\n\n        for item in ls1:\n            obj = Uniform.__new__(Uniform)\n            obj.mglo, obj._location, obj._array_length, obj._dimension, obj._name = item\n            members[obj.name] = obj\n\n        for item in ls2:\n            obj = UniformBlock.__new__(UniformBlock)\n            obj.mglo, obj._index, obj._size, obj._name = item\n            members[obj.name] = obj\n\n        res._members = members\n        res.ctx = self\n        res.extra = None\n        return res", "docstring": "A :py:class:`ComputeShader` is a Shader Stage that is used entirely for computing arbitrary information.\nWhile it can do rendering, it is generally used for tasks not directly related to drawing.\n\nArgs:\nsource (str): The source of the compute shader.\n\nReturns:\n:py:class:`ComputeShader` object", "source": "juraj-google-style"}
{"code": "def _RunAction(self, rule, client_id):\n    \n    actions_count = 0\n\n    try:\n      if self._CheckIfHuntTaskWasAssigned(client_id, rule.hunt_id):\n        logging.info(\n            \"Foreman: ignoring hunt %s on client %s: was started \"\n            \"here before\", client_id, rule.hunt_id)\n      else:\n        logging.info(\"Foreman: Starting hunt %s on client %s.\", rule.hunt_id,\n                     client_id)\n\n        \n        if rule.hunt_name:\n          flow_cls = registry.AFF4FlowRegistry.FlowClassByName(rule.hunt_name)\n          hunt_urn = rdfvalue.RDFURN(\"aff4:/hunts/%s\" % rule.hunt_id)\n          flow_cls.StartClients(hunt_urn, [client_id])\n        else:\n          hunt.StartHuntFlowOnClient(client_id, rule.hunt_id)\n\n        actions_count += 1\n\n    \n    \n    except Exception as e:  \n      logging.exception(\"Failure running foreman action on client %s: %s\",\n                        rule.hunt_id, e)\n\n    return actions_count", "docstring": "Run all the actions specified in the rule.\n\nArgs:\nrule: Rule which actions are to be executed.\nclient_id: Id of a client where rule's actions are to be executed.\n\nReturns:\nNumber of actions started.", "source": "juraj-google-style"}
{"code": "def dependency_of_fetches(fetches, op):\n    \n    try:\n        from tensorflow.python.client.session import _FetchHandler as FetchHandler\n        \n        handler = FetchHandler(op.graph, fetches, {})\n        targets = tuple(handler.fetches() + handler.targets())\n    except ImportError:\n        if isinstance(fetches, list):\n            targets = tuple(fetches)\n        elif isinstance(fetches, dict):\n            raise ValueError(\"Don't know how to parse dictionary to fetch list! \"\n                             \"This is a bug of tensorpack.\")\n        else:\n            targets = (fetches, )\n    return dependency_of_targets(targets, op)", "docstring": "Check that op is in the subgraph induced by the dependencies of fetches.\nfetches may have more general structure.\n\nArgs:\nfetches: An argument to `sess.run`. Nested structure will affect performance.\nop (tf.Operation or tf.Tensor):\n\nReturns:\nbool: True if any of `fetches` depend on `op`.", "source": "juraj-google-style"}
{"code": "def label_matrix_to_one_hot(L, k=None):\n    \n    n, m = L.shape\n    if k is None:\n        k = L.max()\n    L_onehot = torch.zeros(n, m, k + 1)\n    for i, row in enumerate(L):\n        for j, k in enumerate(row):\n            if k > 0:\n                L_onehot[i, j, k - 1] = 1\n    return L_onehot", "docstring": "Converts a 2D [n,m] label matrix into an [n,m,k] one hot 3D tensor\n\nNote that in the returned 3D matrix, abstain votes continue to be\nrepresented by 0s, not 1s.\n\nArgs:\nL: a [n,m] label matrix with categorical labels (0 = abstain)\nk: the number of classes that could appear in L\nif None, k is inferred as the max element in L", "source": "juraj-google-style"}
{"code": "def _create_node(self, index: int, name: str, external_id: Optional[str] = None) -> SpotifyArtistNode:\n        \n        if external_id is None:\n            graph: SpotifyArtistGraph = self._graph\n            items: List[NameExternalIDPair] = graph.client.search_artists_by_name(name)\n            for item in items:\n                if item.name == name:\n                    external_id = item.external_id\n                    break\n\n        return SpotifyArtistNode(graph=self._graph, index=index, name=name, external_id=external_id)", "docstring": "Returns a new `SpotifyArtistNode` instance with the given index and name.\n\nArguments:\nindex (int): The index of the node to create.\nname (str): The name of the node to create.\nexternal_id (Optional[str]): The external ID of the node.", "source": "juraj-google-style"}
{"code": "def GetZipInfoByPathSpec(self, path_spec):\n    \n    location = getattr(path_spec, 'location', None)\n    if location is None:\n      raise errors.PathSpecError('Path specification missing location.')\n\n    if not location.startswith(self.LOCATION_ROOT):\n      raise errors.PathSpecError('Invalid location in path specification.')\n\n    if len(location) > 1:\n      return self._zip_file.getinfo(location[1:])\n\n    return None", "docstring": "Retrieves the ZIP info for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\nzipfile.ZipInfo: a ZIP info object or None if not available.\n\nRaises:\nPathSpecError: if the path specification is incorrect.", "source": "juraj-google-style"}
{"code": "def crscode_to_string(codetype, code, format):\n    \n    link = 'http:\n    result = urllib2.urlopen(link).read()\n    if not isinstance(result, str):\n        result = result.decode()\n    return result", "docstring": "Lookup crscode on spatialreference.org and return in specified format.\n\nArguments:\n\n- *codetype*: \"epsg\", \"esri\", or \"sr-org\".\n- *code*: The code.\n- *format*: The crs format of the returned string. One of \"ogcwkt\", \"esriwkt\", or \"proj4\", but also several others...\n\nReturns:\n\n- Crs string in the specified format.", "source": "juraj-google-style"}
{"code": "def micros_to_timestamp(micros, timestamp):\n    seconds = long((micros / _MICROS_PER_SECOND))\n    micro_remainder = (micros % _MICROS_PER_SECOND)\n    timestamp.seconds = seconds\n    timestamp.nanos = (micro_remainder * _NANOS_PER_MICRO)", "docstring": "Convert microseconds from utc epoch to google.protobuf.timestamp.\n\nArgs:\nmicros: a long, number of microseconds since utc epoch.\ntimestamp: a google.protobuf.timestamp.Timestamp to populate.", "source": "codesearchnet"}
{"code": "def activate_backup_image(reset=False):\n    \n\n    dn = \"sys/rack-unit-1/mgmt/fw-boot-def/bootunit-combined\"\n\n    r = \"no\"\n\n    if reset is True:\n        r = \"yes\"\n\n    inconfig = .format(r)\n\n    ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False)\n\n    return ret", "docstring": "Activates the firmware backup image.\n\nCLI Example:\n\nArgs:\nreset(bool): Reset the CIMC device on activate.\n\n.. code-block:: bash\n\nsalt '*' cimc.activate_backup_image\nsalt '*' cimc.activate_backup_image reset=True", "source": "juraj-google-style"}
{"code": "def update(self, grads):\n    grads = nest.flatten(grads)\n    if distribute_lib.has_strategy() and distribute_lib.in_cross_replica_context():\n        distribution = distribute_lib.get_strategy()\n        is_finite_per_replica = distribution.extended.call_for_each_replica(_is_all_finite, args=(grads,))\n        is_finite = distribution.experimental_local_results(is_finite_per_replica)[0]\n    else:\n        is_finite = _is_all_finite(grads)\n\n    def update_if_finite_grads():\n        \n\n        def incr_loss_scale():\n            new_loss_scale = self.current_loss_scale * self.multiplier\n            return control_flow_ops.group(_assign_if_finite(self.current_loss_scale, new_loss_scale), self.counter.assign(0))\n        return cond.cond(self.counter + 1 >= self.growth_steps, incr_loss_scale, lambda: _op_in_graph_mode(self.counter.assign_add(1)))\n\n    def update_if_not_finite_grads():\n        \n        new_loss_scale = math_ops.maximum(self.current_loss_scale / self.multiplier, 1)\n        return control_flow_ops.group(self.counter.assign(0), self.current_loss_scale.assign(new_loss_scale))\n    update_op = cond.cond(is_finite, update_if_finite_grads, update_if_not_finite_grads)\n    should_apply_gradients = is_finite\n    return (update_op, should_apply_gradients)", "docstring": "Updates the value of the loss scale.\n\nArgs:\ngrads: A nested structure of unscaled gradients, each which is an\nall-reduced gradient of the loss with respect to a weight.\n\nReturns:\nupdate_op: In eager mode, None. In graph mode, an op to update the loss\nscale.\nshould_apply_gradients: Either a bool or a scalar boolean tensor. If\nFalse, the caller should skip applying `grads` to the variables this\nstep.", "source": "github-repos"}
{"code": "def decode(self, ids):\n    \n    _, tmp_file_path = tempfile.mkstemp()\n    wavfile.write(tmp_file_path, self._sample_rate, np.asarray(ids))\n    return tmp_file_path", "docstring": "Transform a sequence of float32 into a waveform.\n\nArgs:\nids: list of integers to be converted.\n\nReturns:\nPath to the temporary file where the waveform was saved.\n\nRaises:\nValueError: if the ids are not of the appropriate size.", "source": "juraj-google-style"}
{"code": "def removeTags(dom):\n    try:\n        string_type = basestring\n    except NameError:\n        string_type = str\n    element_stack = None\n    if (type(dom) in [list, tuple]):\n        element_stack = dom\n    elif isinstance(dom, HTMLElement):\n        element_stack = (dom.childs if dom.isTag() else [dom])\n    elif isinstance(dom, string_type):\n        element_stack = parseString(dom).childs\n    else:\n        element_stack = dom\n    output = ''\n    while element_stack:\n        el = element_stack.pop(0)\n        if (not (el.isTag() or el.isComment() or (not el.getTagName()))):\n            output += el.__str__()\n        if el.childs:\n            element_stack = (el.childs + element_stack)\n    return output", "docstring": "Remove all tags from `dom` and obtain plaintext representation.\n\nArgs:\ndom (str, obj, array): str, HTMLElement instance or array of elements.\n\nReturns:\nstr: Plain string without tags.", "source": "codesearchnet"}
{"code": "def find(self, title):\n        \n        if title not in self._titles:\n            raise KeyError(title)\n        return self._titles[title][0]", "docstring": "Return the first worksheet with the given title.\n\nArgs:\ntitle(str): title/name of the worksheet to return\nReturns:\nWorkSheet: contained worksheet object\nRaises:\nKeyError: if the spreadsheet has no no worksheet with the given ``title``", "source": "juraj-google-style"}
{"code": "def get_processid(config):\n    \n    pidfile = config.get('daemon', 'pidfile', fallback=None)\n    if pidfile is None:\n        raise ValueError(\"Configuration doesn't have pidfile option!\")\n\n    try:\n        with open(pidfile, 'r') as _file:\n            pid = _file.read().rstrip()\n            try:\n                pid = int(pid)\n            except ValueError:\n                raise ValueError(\"stale pid file with invalid data:{}\"\n                                 .format(pid))\n            else:\n                if pid in [-1, 1]:\n                    raise ValueError(\"invalid PID ({})\".format(pid))\n                else:\n                    return pid\n    except OSError as exc:\n        if exc.errno == 2:\n            print(\"CRITICAL: anycast-healthchecker could be down as pid file \"\n                  \"{} doesn't exist\".format(pidfile))\n            sys.exit(2)\n        else:\n            raise ValueError(\"error while reading pid file:{}\".format(exc))", "docstring": "Return process id of anycast-healthchecker.\n\nArguments:\nconfig (obj): A configparser object with the configuration of\nanycast-healthchecker.\n\nReturns:\nThe process id found in the pid file\n\nRaises:\nValueError in the following cases\n- pidfile option is missing from the configuration\n- pid is either -1 or 1\n- stale pidfile, either with no data or invalid data\n- failure to read pidfile", "source": "juraj-google-style"}
{"code": "def _read_file(file_name):\n    \n    with open(file_name) as config_file:\n        data = json.load(config_file)\n    return data", "docstring": "Read the file content and load it as JSON.\n\nArguments:\nfile_name (:py:class:`str`): The filename.\n\nReturns:\n:py:class:`dict`: The loaded JSON data.\n\nRaises:\n:py:class:`FileNotFoundError`: If the file is not found.", "source": "juraj-google-style"}
{"code": "def _get_first_approximation(self):\n    equalities = set(chain((implication.extract_equalities() for _, _, implication in self._iter_implications()))).union(self.ground_truth.extract_equalities())\n    var_assignments = {}\n    value_assignments = {}\n    for var in self.variables:\n        var_assignments[var] = {var}\n        value_assignments[var] = self._get_nonfalse_values(var)\n    for var, value in equalities:\n        if value in self.variables:\n            other_var = value\n            value_assignments[var] |= value_assignments[other_var]\n            for var_assignment in var_assignments[other_var]:\n                var_assignments[var].add(var_assignment)\n                var_assignments[var_assignment] = var_assignments[var]\n                value_assignments[var_assignment] = value_assignments[var]\n        else:\n            value_assignments[var].add(value)\n    return value_assignments", "docstring": "Get all (variable, value) combinations to consider.\n\nThis gets the (variable, value) combinations that the solver needs to\nconsider based on the equalities that appear in the implications. E.g.,\nwith the following implication:\nt1 = v1 => t1 = t2 | t3 = v2\nthe combinations to consider are\n(t1, v1) because t1 = v1 appears,\n(t2, v1) because t1 = t2 and t1 = v1 appear, and\n(t3, v2) because t3 = v2 appears.\n\nReturns:\nA dictionary D mapping strings (variables) to sets of strings\n(values). For two variables t1 and t2, if t1 = t2 is a possible\nassignment (by first approximation), then D[t1] and D[t2] point\nto the same memory location.", "source": "github-repos"}
{"code": "def __getattr__(self, attr):  \n    \n    if not self._protocol:\n      raise usb_exceptions.HandleClosedError()\n\n    val = getattr(self._protocol, attr)\n    if callable(val):\n      def _retry_wrapper(*args, **kwargs):\n        \n        result = _retry_usb_function(self._num_retries, val, *args, **kwargs)\n        _LOG.debug('LIBUSB FASTBOOT: %s(*%s, **%s) -> %s',\n                   attr, args, kwargs, result)\n        return result\n      return _retry_wrapper\n    return val", "docstring": "Fallthrough to underlying FastbootProtocol handler.\n\nArgs:\nattr: Attribute to get.\nReturns:\nEither the attribute from the device or a retrying function-wrapper\nif attr is a method on the device.", "source": "juraj-google-style"}
{"code": "def _get_tensors_for_gradient(x):\n    if not isinstance(x, composite_tensor.CompositeTensor):\n        return x\n    if not isinstance(x, CompositeTensorGradientProtocol):\n        raise ValueError(f'Type {type(x).__name__} is not supported as a gradient source or gradient target.')\n    composite_gradient = x.__composite_gradient__\n    gradient_components = composite_gradient.get_gradient_components(x)\n    if gradient_components is x:\n        return x\n    return nest.map_structure(_get_tensors_for_gradient, gradient_components)", "docstring": "Returns the Tensors in `x` that should be differentiated.\n\nArgs:\nx: A `Tensor` or `CompositeTensor`.\n\nReturns:\nA `Tensor` or a nested structure of `Tensor`.", "source": "github-repos"}
{"code": "def add_output(self, name, value):\n        \n        self.template.add_output(Output(name, Value=value))", "docstring": "Simple helper for adding outputs.\n\nArgs:\nname (str): The name of the output to create.\nvalue (str): The value to put in the output.", "source": "juraj-google-style"}
{"code": "def _parse_hparams(hparams):\n  \n  prefixes = [\"agent_\", \"optimizer_\", \"runner_\", \"replay_buffer_\"]\n  ret = []\n\n  for prefix in prefixes:\n    ret_dict = {}\n    for key in hparams.values():\n      if prefix in key:\n        par_name = key[len(prefix):]\n        ret_dict[par_name] = hparams.get(key)\n    ret.append(ret_dict)\n\n  return ret", "docstring": "Split hparams, based on key prefixes.\n\nArgs:\nhparams: hyperparameters\n\nReturns:\nTuple of hparams for respectably: agent, optimizer, runner, replay_buffer.", "source": "juraj-google-style"}
{"code": "def release(self, subnets):\n    if (isinstance(subnets, str) or isinstance(subnets, IPNetwork)):\n        subnets = [subnets]\n    subnets_iter = ((str(subnet) if isinstance(subnet, IPNetwork) else subnet) for subnet in subnets)\n    try:\n        with self._create_lock():\n            for subnet in subnets_iter:\n                self._release(self.create_lease_object_from_subnet(subnet))\n    except (utils.TimerException, IOError):\n        raise LagoSubnetLeaseLockException(self.path)", "docstring": "Free the lease of the given subnets\n\nArgs:\nsubnets (list of str or netaddr.IPAddress): dotted ipv4 subnet in\nCIDR notation (for example ```192.168.200.0/24```) or IPAddress\nobject.\n\nRaises:\nLagoSubnetLeaseException: If subnet is a str and can't be parsed\nLagoSubnetLeaseLockException:\nIf the lock to self.path can't be acquired.", "source": "codesearchnet"}
{"code": "def read_from_tfrecord(file_pattern: str, coder: Optional[coders.BytesCoder]=coders.BytesCoder(), compression_type: str='AUTO', validate: Optional[bool]=True):\n    return ReadFromTFRecord(file_pattern=file_pattern, compression_type=getattr(CompressionTypes, compression_type), validate=validate) | beam.Map(lambda s: beam.Row(record=s))", "docstring": "Reads data from TFRecord.\n\nArgs:\nfile_pattern (str): A file glob pattern to read TFRecords from.\ncoder (coders.BytesCoder): Coder used to decode each record.\ncompression_type (CompressionTypes): Used to handle compressed input files.\nDefault value is CompressionTypes.AUTO, in which case the file_path's\nextension will be used to detect the compression.\nvalidate (bool): Boolean flag to verify that the files exist during the\npipeline creation time.", "source": "github-repos"}
{"code": "def parse_genetic_models(models_info, case_id):\n    genetic_models = []\n    if models_info:\n        for family_info in models_info.split(','):\n            splitted_info = family_info.split(':')\n            if (splitted_info[0] == case_id):\n                genetic_models = splitted_info[1].split('|')\n    return genetic_models", "docstring": "Parse the genetic models entry of a vcf\n\nArgs:\nmodels_info(str): The raw vcf information\ncase_id(str)\n\nReturns:\ngenetic_models(list)", "source": "codesearchnet"}
{"code": "def get_average_voltage(self, min_voltage=None, max_voltage=None):\n        \n        pairs_in_range = self._select_in_voltage_range(min_voltage,\n                                                       max_voltage)\n        if len(pairs_in_range) == 0:\n            return 0\n        total_cap_in_range = sum([p.mAh for p in pairs_in_range])\n        total_edens_in_range = sum([p.mAh * p.voltage for p in pairs_in_range])\n        return total_edens_in_range / total_cap_in_range", "docstring": "Average voltage for path satisfying between a min and max voltage.\n\nArgs:\nmin_voltage (float): The minimum allowable voltage for a given\nstep.\nmax_voltage (float): The maximum allowable voltage allowable for a\ngiven step.\n\nReturns:\nAverage voltage in V across the insertion path (a subset of the\npath can be chosen by the optional arguments)", "source": "juraj-google-style"}
{"code": "def authorization_code_pkce(self, client_id, code_verifier, code, redirect_uri, grant_type='authorization_code'):\n    return self.post('https:", "docstring": "Authorization code pkce grant\n\nThis is the OAuth 2.0 grant that mobile apps utilize in order to access an API.\nUse this endpoint to exchange an Authorization Code for a Token.\n\nArgs:\ngrant_type (str): Denotes the flow you're using. For authorization code pkce\nuse authorization_code\n\nclient_id (str): your application's client Id\n\ncode_verifier (str): Cryptographically random key that was used to generate\nthe code_challenge passed to /authorize.\n\ncode (str): The Authorization Code received from the /authorize Calls\n\nredirect_uri (str, optional): This is required only if it was set at\nthe GET /authorize endpoint. The values must match\n\nReturns:\naccess_token, id_token", "source": "codesearchnet"}
{"code": "def get_policies_from_aws(client, scope='Local'):\n    done = False\n    marker = None\n    policies = []\n    while (not done):\n        if marker:\n            response = client.list_policies(Marker=marker, Scope=scope)\n        else:\n            response = client.list_policies(Scope=scope)\n        policies += response['Policies']\n        if response['IsTruncated']:\n            marker = response['Marker']\n        else:\n            done = True\n    return policies", "docstring": "Returns a list of all the policies currently applied to an AWS Account. Returns a list containing all the\npolicies for the specified scope\n\nArgs:\nclient (:obj:`boto3.session.Session`): A boto3 Session object\nscope (`str`): The policy scope to use. Default: Local\n\nReturns:\n:obj:`list` of `dict`", "source": "codesearchnet"}
{"code": "def events_from_file(filepath):\n    records = list(tf_record.tf_record_iterator(filepath))\n    result = []\n    for r in records:\n        event = event_pb2.Event()\n        event.ParseFromString(r)\n        result.append(event)\n    return result", "docstring": "Returns all events in a single event file.\n\nArgs:\nfilepath: Path to the event file.\n\nReturns:\nA list of all tf.compat.v1.Event protos in the event file.", "source": "github-repos"}
{"code": "def _TopKGrad(op: ops.Operation, grad, _):\n    in_shape = array_ops.shape(op.inputs[0])\n    ind_shape = array_ops.shape(op.outputs[1])\n    ind_lastdim = array_ops.gather(math_ops.cast(ind_shape, dtypes.int64), array_ops.size(ind_shape) - 1)\n    ind_2d = array_ops.reshape(op.outputs[1], array_ops_stack.stack([-1, ind_lastdim]))\n    in_lastdim = array_ops.gather(math_ops.cast(in_shape, dtypes.int64), array_ops.size(in_shape) - 1)\n    outerdim = array_ops.shape(ind_2d)[0]\n    ind = array_ops.reshape(ind_2d + math_ops.cast(array_ops.expand_dims(math_ops.range(0, math_ops.cast(outerdim, dtypes.int64) * in_lastdim, in_lastdim), -1), dtypes.int32), [-1])\n    return [array_ops.reshape(array_ops.scatter_nd(array_ops.expand_dims(ind, -1), array_ops.reshape(grad, [-1]), [math_ops.reduce_prod(in_shape)]), in_shape), array_ops.zeros([], dtype=dtypes.int32)]", "docstring": "Return the gradients for TopK.\n\nArgs:\nop: The TopKOp for which we need to generate gradients.\ngrad: Tensor. The gradients passed to the TopKOp.\n\nReturns:\nA list of two tensors, the first being the gradient w.r.t to the input and\nTopK, and the second being the gradient w.r.t. to the indices (all zero).", "source": "github-repos"}
{"code": "def fmt_addr_raw(addr, reverse=True):\n    \n    addr = addr.replace(':', '')\n    raw_addr = [int(addr[i:i+2], 16) for i in range(0, len(addr), 2)]\n    if reverse:\n        raw_addr.reverse()\n\n    \n    if sys.version_info[0] == 2:\n        return str(bytearray(raw_addr))\n    return bytearray(raw_addr)", "docstring": "Given a string containing a xx:xx:xx:xx:xx:xx address, return as a byte sequence.\n\nArgs:\naddr (str): Bluetooth address in xx:xx:xx:xx:xx:xx format.\nreverse (bool): True if the byte ordering should be reversed in the output.\n\nReturns:\nA bytearray containing the converted address.", "source": "juraj-google-style"}
{"code": "def read_dftbp(filename):\n    \n\n    infile = open(filename, 'r')\n\n    lines = infile.readlines()\n\n    \n    for ss in lines:\n        if ss.strip().startswith('\n            lines.remove(ss)\n\n    natoms = int(lines[0].split()[0])\n    symbols = lines[1].split()\n\n    if (lines[0].split()[1].lower() == 'f'):\n        is_scaled = True\n        scale_pos = 1\n        scale_latvecs = dftbpToBohr\n    else:\n        is_scaled = False\n        scale_pos = dftbpToBohr\n        scale_latvecs = dftbpToBohr\n\n    \n    positions = []\n    expaned_symbols = []\n\n    for ii in range(2, natoms+2):\n        lsplit = lines[ii].split()\n\n        expaned_symbols.append(symbols[int(lsplit[1]) - 1])\n        positions.append([float(ss)*scale_pos for ss in lsplit[2:5]])\n\n    \n    origin = [float(ss) for ss in lines[natoms+2].split()]\n\n    \n    cell = []\n\n    for ii in range(natoms+3, natoms+6):\n        lsplit = lines[ii].split()\n\n        cell.append([float(ss)*scale_latvecs for ss in lsplit[:3]])\n    cell = np.array(cell)\n\n    if is_scaled:\n        atoms = Atoms(symbols=expaned_symbols,\n                      cell=cell,\n                      scaled_positions=positions)\n    else:\n        atoms = Atoms(symbols=expaned_symbols,\n                      cell=cell,\n                      positions=positions)\n\n    return atoms", "docstring": "Reads DFTB+ structure files in gen format.\n\nArgs:\nfilename: name of the gen-file to be read\n\nReturns:\natoms: an object of the phonopy.Atoms class, representing the structure\nfound in filename", "source": "juraj-google-style"}
{"code": "def migrate_database(adapter):\n    \n    \n    all_variants = adapter.get_variants()\n    nr_variants = all_variants.count()\n    nr_updated = 0\n    with progressbar(all_variants, label=\"Updating variants\", length=nr_variants) as bar:\n        for variant in bar:\n            \n            if 'chrom' in variant:\n                continue\n            nr_updated += 1\n            splitted_id = variant['_id'].split('_')\n            \n            chrom = splitted_id[0]\n            start = int(splitted_id[1])\n            ref = splitted_id[2]\n            alt = splitted_id[3]\n            \n            \n            end = start + (max(len(ref), len(alt)) - 1)\n            \n            adapter.db.variant.find_one_and_update(\n                {'_id': variant['_id']},\n                {\n                    '$set': {\n                        'chrom': chrom,\n                        'start': start,\n                        'end': end\n                    }\n                }\n            )\n    \n    return nr_updated", "docstring": "Migrate an old loqusdb instance to 1.0\n\nArgs:\nadapter\n\nReturns:\nnr_updated(int): Number of variants that where updated", "source": "juraj-google-style"}
{"code": "def str_internal(self, is_recursive=False):\n    printable_name = self.__class__.__name__\n    if hasattr(self, 'step_name'):\n        printable_name += ' %s' % self.name_context.logging_name()\n        if is_recursive:\n            return '<%s>' % printable_name\n    if self.spec is None:\n        printable_fields = []\n    else:\n        printable_fields = operation_specs.worker_printable_fields(self.spec)\n    if not is_recursive and getattr(self, 'receivers', []):\n        printable_fields.append('receivers=[%s]' % ', '.join([str(receiver) for receiver in self.receivers]))\n    return '<%s %s>' % (printable_name, ', '.join(printable_fields))", "docstring": "Internal helper for __str__ that supports recursion.\n\nWhen recursing on receivers, keep the output short.\nArgs:\nis_recursive: whether to omit some details, particularly receivers.\nReturns:\nCompact string representing this object.", "source": "github-repos"}
{"code": "def create_handler(Model, name=None, **kwds):\n\n    async def action_handler(service, action_type, payload, props, notify=True, **kwds):\n        if (action_type == get_crud_action('create', (name or Model))):\n            try:\n                message_props = {}\n                if ('correlation_id' in props):\n                    message_props['correlation_id'] = props['correlation_id']\n                for requirement in Model.required_fields():\n                    field_name = requirement.name\n                    if ((not (field_name in payload)) and (field_name != 'id')):\n                        raise ValueError(('Required field not found in payload: %s' % field_name))\n                new_model = Model(**payload)\n                new_model.save()\n                if notify:\n                    (await service.event_broker.send(payload=ModelSerializer().serialize(new_model), action_type=change_action_status(action_type, success_status()), **message_props))\n            except Exception as err:\n                if notify:\n                    (await service.event_broker.send(payload=str(err), action_type=change_action_status(action_type, error_status()), **message_props))\n                else:\n                    raise err\n    return action_handler", "docstring": "This factory returns an action handler that creates a new instance of\nthe specified model when a create action is recieved, assuming the\naction follows nautilus convetions.\n\nArgs:\nModel (nautilus.BaseModel): The model to create when the action\nreceived.\n\nReturns:\nfunction(action_type, payload): The action handler for this model", "source": "codesearchnet"}
{"code": "async def iter(self, url: Union[(str, methods)], data: Optional[MutableMapping]=None, headers: Optional[MutableMapping]=None, *, limit: int=200, iterkey: Optional[str]=None, itermode: Optional[str]=None, minimum_time: Optional[int]=None, as_json: Optional[bool]=None) -> AsyncIterator[dict]:\n    itervalue = None\n    if (not data):\n        data = {}\n    last_request_time = None\n    while True:\n        current_time = time.time()\n        if (minimum_time and last_request_time and ((last_request_time + minimum_time) > current_time)):\n            (await self.sleep(((last_request_time + minimum_time) - current_time)))\n        (data, iterkey, itermode) = sansio.prepare_iter_request(url, data, iterkey=iterkey, itermode=itermode, limit=limit, itervalue=itervalue)\n        last_request_time = time.time()\n        response_data = (await self.query(url, data, headers, as_json))\n        itervalue = sansio.decode_iter_request(response_data)\n        for item in response_data[iterkey]:\n            (yield item)\n        if (not itervalue):\n            break", "docstring": "Iterate over a slack API method supporting pagination\n\nWhen using :class:`slack.methods` the request is made `as_json` if available\n\nArgs:\nurl: :class:`slack.methods` or url string\ndata: JSON encodable MutableMapping\nheaders:\nlimit: Maximum number of results to return per call.\niterkey: Key in response data to iterate over (required for url string).\nitermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`)\nminimum_time: Minimum elapsed time (in seconds) between two calls to the Slack API (default to 0).\nIf not reached the client will sleep for the remaining time.\nas_json: Post JSON to the slack API\nReturns:\nAsync iterator over `response_data[key]`", "source": "codesearchnet"}
{"code": "def get_object_metadata(self, request):\n    file_ = self.get_file(request.bucket, request.object)\n    return file_.get_metadata()", "docstring": "Retrieves an object's metadata.\n\nArgs:\nrequest: (GetRequest) input message\n\nReturns:\n(Item) The response message.", "source": "github-repos"}
{"code": "def should_stop_early(self) -> bool:\n    if not self._trial.measurements:\n        return False\n    return self._should_stop_early_fn(self._trial)", "docstring": "Tells whether current trial should be stopped early.\n\nIn `pg.sample`, an optional `EarlyStoppingPolicy` can be provided, which is\nuseful for terminating trials which are progressive evaluated. Progressive\nevaluation on examples can be achieved by calling `feedback.add_measurement`\nmultiple times at different steps. In-between these steps, users can call\nthis method to determine if current trial is considered less competitive by\nthe early stopping policy, and thus can be abandoned. In that case, users\nshould call `feedback.skip()` to abandon current trial without feeding back\nthe reward to the search algorithm.\n\nReturns:\nIf current trial can be stopped early.", "source": "github-repos"}
{"code": "def _load_partition_graphs(self, client_partition_graphs, validate):\n    self._debug_graphs = {}\n    self._node_devices = {}\n    partition_graphs_and_device_names = []\n    for device_name in self._device_names:\n        partition_graph = None\n        if device_name in self._dump_graph_file_paths:\n            partition_graph = _load_graph_def_from_event_file(self._dump_graph_file_paths[device_name])\n        else:\n            logging.warn('Failed to load partition graphs for device %s from disk. As a fallback, the client graphs will be used. This may cause mismatches in device names.' % device_name)\n            partition_graph = self._find_partition_graph(client_partition_graphs, device_name)\n        if partition_graph:\n            partition_graphs_and_device_names.append((partition_graph, device_name))\n    for partition_graph, maybe_device_name in partition_graphs_and_device_names:\n        debug_graph = debug_graphs.DebugGraph(partition_graph, device_name=maybe_device_name)\n        self._debug_graphs[debug_graph.device_name] = debug_graph\n        self._collect_node_devices(debug_graph)\n        if validate and debug_graph.device_name in self._dump_tensor_data:\n            self._validate_dump_with_graphs(debug_graph.device_name)", "docstring": "Load and process partition graphs.\n\nLoad the graphs; parse the input and control input structure; obtain the\ndevice and op type of each node; remove the Copy and debug ops inserted\nby the debugger. The gathered information can be used to validate the\ntensor dumps.\n\nArgs:\nclient_partition_graphs: A repeated field of GraphDefs representing the\npartition graphs executed by the TensorFlow runtime, from the Python\nclient. These partition graphs are used only if partition graphs\ncannot be loaded from the dump directory on the file system.\nvalidate: (`bool`) Whether the dump files are to be validated against the\npartition graphs.\n\nRaises:\nValueError: If the partition GraphDef of one or more devices fail to be\nloaded.", "source": "github-repos"}
{"code": "def quantize(self, input_grid):\n        \n        pixels = {}\n        for i in range(self.max_bin+1):\n            pixels[i] = []\n\n        data = (np.array(input_grid, dtype=int) - self.min_thresh) / self.data_increment\n        data[data < 0] = -1\n        data[data > self.max_bin] = self.max_bin\n        good_points = np.where(data >= 0)\n        for g in np.arange(good_points[0].shape[0]):\n            pixels[data[(good_points[0][g], good_points[1][g])]].append((good_points[0][g], good_points[1][g]))\n        return pixels, data", "docstring": "Quantize a grid into discrete steps based on input parameters.\n\nArgs:\ninput_grid: 2-d array of values\n\nReturns:\nDictionary of value pointing to pixel locations, and quantized 2-d array of data", "source": "juraj-google-style"}
{"code": "def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, expected, data_format, dtype, use_gpu, op_name):\n    if use_gpu and (not test.is_gpu_available(cuda_only=True)):\n        self.skipTest('GPU not available')\n    results = []\n    result = self._SetupValuesForDevice(tensor_in_sizes, filter_in_sizes, stride, padding, data_format, dtype, use_gpu=use_gpu, op_name=op_name)\n    results.append(result)\n    with self.cached_session() as sess:\n        values = self.evaluate(results)\n        for value in values:\n            tf_logging.debug('expected = %s', expected)\n            tf_logging.debug('actual = %s', value)\n            self.assertAllCloseAccordingToType(expected, value.flatten())", "docstring": "Verifies the output values of the convolution function.\n\nArgs:\ntensor_in_sizes: Input tensor dimensions [batch, input_x, input_y,\ninput_z, input_depth].\nfilter_in_sizes: Filter tensor dimensions [kernel_x, kernel_y, kernel_z,\ninput_depth, output_depth].\nstride: [x_stride, y_stride, z_stride]\npadding: Padding type.\nexpected: Value that the output of computation should match\ndata_format: Format of the data tensors.\ndtype: Data type for inputs and outputs.\nuse_gpu: True if the operations should be run on GPU\nop_name: Name of the op to be tested\n\nReturns:\nNone", "source": "github-repos"}
{"code": "def alias_inplace_update(x, i, v):\n    return _inplace_helper(x, i, v, gen_array_ops.inplace_update)", "docstring": "Applies an inplace update on input x at index i with value v. Aliases x.\n\nIf i is None, x and v must be the same shape. Computes\nx = v;\nIf i is a scalar, x has a rank 1 higher than v's. Computes\nx[i, :] = v;\nOtherwise, x and v must have the same rank. Computes\nx[i, :] = v;\n\nArgs:\nx: A Tensor.\ni: None, a scalar or a vector.\nv: A Tensor.\n\nReturns:\nReturns x.", "source": "github-repos"}
{"code": "def parse_location(location):\n\n    def split_dms(text, hemisphere):\n        'Split degrees, minutes and seconds string.\\n\\n        Args:\\n            text (str): Text to split\\n\\n        Returns::\\n            float: Decimal degrees\\n        '\n        out = []\n        sect = []\n        for i in text:\n            if i.isdigit():\n                sect.append(i)\n            else:\n                out.append(sect)\n                sect = []\n        (d, m, s) = [float(''.join(i)) for i in out]\n        if (hemisphere in 'SW'):\n            (d, m, s) = [((- 1) * x) for x in (d, m, s)]\n        return to_dd(d, m, s)\n    for sep in ';, ':\n        chunks = location.split(sep)\n        if (len(chunks) == 2):\n            if chunks[0].endswith('N'):\n                latitude = float(chunks[0][:(- 1)])\n            elif chunks[0].endswith('S'):\n                latitude = ((- 1) * float(chunks[0][:(- 1)]))\n            else:\n                latitude = float(chunks[0])\n            if chunks[1].endswith('E'):\n                longitude = float(chunks[1][:(- 1)])\n            elif chunks[1].endswith('W'):\n                longitude = ((- 1) * float(chunks[1][:(- 1)]))\n            else:\n                longitude = float(chunks[1])\n            return (latitude, longitude)\n        elif (len(chunks) == 4):\n            if chunks[0].endswith(('s', '\"')):\n                latitude = split_dms(chunks[0], chunks[1])\n            else:\n                latitude = float(chunks[0])\n                if (chunks[1] == 'S'):\n                    latitude = ((- 1) * latitude)\n            if chunks[2].endswith(('s', '\"')):\n                longitude = split_dms(chunks[2], chunks[3])\n            else:\n                longitude = float(chunks[2])\n                if (chunks[3] == 'W'):\n                    longitude = ((- 1) * longitude)\n            return (latitude, longitude)", "docstring": "Parse latitude and longitude from string location.\n\nArgs:\nlocation (str): String to parse\n\nReturns:\ntuple of float: Latitude and longitude of location", "source": "codesearchnet"}
{"code": "def _validate_testbed_name(name):\n    if not name:\n        raise MoblyConfigError(\"Test bed names can't be empty.\")\n    name = str(name)\n    for char in name:\n        if char not in utils.valid_filename_chars:\n            raise MoblyConfigError('Char \"%s\" is not allowed in test bed names.' % char)", "docstring": "Validates the name of a test bed.\n\nSince test bed names are used as part of the test run id, it needs to meet\ncertain requirements.\n\nArgs:\nname: The test bed's name specified in config file.\n\nRaises:\nMoblyConfigError: The name does not meet any criteria.", "source": "github-repos"}
{"code": "def write(self, session, directory, name, replaceParamFile=None, **kwargs):\n    name_split = name.split('.')\n    name = name_split[0]\n    extension = ''\n    if (len(name_split) >= 2):\n        extension = name_split[(- 1)]\n    try:\n        name = self._namePreprocessor(name)\n    except:\n        'DO NOTHING'\n    if (extension == ''):\n        filename = '{0}.{1}'.format(name, self.fileExtension)\n    else:\n        filename = '{0}.{1}'.format(name, extension)\n    filePath = os.path.join(directory, filename)\n    with io_open(filePath, 'w') as openFile:\n        self._write(session=session, openFile=openFile, replaceParamFile=replaceParamFile, **kwargs)", "docstring": "Write from database back to file.\n\nArgs:\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.\ndirectory (str): Directory where the file will be written.\nname (str): The name of the file that will be created (including the file extension is optional).\nreplaceParamFile (:class:`gsshapy.orm.ReplaceParamFile`, optional): ReplaceParamFile instance. Use this if\nthe file you are writing contains replacement parameters.", "source": "codesearchnet"}
{"code": "def rotate(self, image, angle, resample=None, expand=0, center=None, translate=None, fillcolor=None):\n    resample = resample if resample is not None else PIL.Image.NEAREST\n    self._ensure_format_supported(image)\n    if not isinstance(image, PIL.Image.Image):\n        image = self.to_pil_image(image)\n    return image.rotate(angle, resample=resample, expand=expand, center=center, translate=translate, fillcolor=fillcolor)", "docstring": "Returns a rotated copy of `image`. This method returns a copy of `image`, rotated the given number of degrees\ncounter clockwise around its centre.\n\nArgs:\nimage (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):\nThe image to rotate. If `np.ndarray` or `torch.Tensor`, will be converted to `PIL.Image.Image` before\nrotating.\n\nReturns:\nimage: A rotated `PIL.Image.Image`.", "source": "github-repos"}
{"code": "def true_num_genes(model, custom_spont_id=None):\n    \n    true_num = 0\n    for gene in model.genes:\n        if not is_spontaneous(gene, custom_id=custom_spont_id):\n            true_num += 1\n    return true_num", "docstring": "Return the number of genes in a model ignoring spontaneously labeled genes.\n\nArgs:\nmodel (Model):\ncustom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``\n\nReturns:\nint: Number of genes excluding spontaneous genes", "source": "juraj-google-style"}
{"code": "def create_analyzer_ui(debug_dump, tensor_filters=None, ui_type='readline', on_ui_exit=None, config=None):\n    if config is None:\n        config = cli_config.CLIConfig()\n    analyzer = DebugAnalyzer(debug_dump, config=config)\n    if tensor_filters:\n        for tensor_filter_name in tensor_filters:\n            analyzer.add_tensor_filter(tensor_filter_name, tensor_filters[tensor_filter_name])\n    cli = ui_factory.get_ui(ui_type, on_ui_exit=on_ui_exit, config=config)\n    cli.register_command_handler('list_tensors', analyzer.list_tensors, analyzer.get_help('list_tensors'), prefix_aliases=['lt'])\n    cli.register_command_handler('node_info', analyzer.node_info, analyzer.get_help('node_info'), prefix_aliases=['ni'])\n    cli.register_command_handler('list_inputs', analyzer.list_inputs, analyzer.get_help('list_inputs'), prefix_aliases=['li'])\n    cli.register_command_handler('list_outputs', analyzer.list_outputs, analyzer.get_help('list_outputs'), prefix_aliases=['lo'])\n    cli.register_command_handler('print_tensor', analyzer.print_tensor, analyzer.get_help('print_tensor'), prefix_aliases=['pt'])\n    cli.register_command_handler('print_source', analyzer.print_source, analyzer.get_help('print_source'), prefix_aliases=['ps'])\n    cli.register_command_handler('list_source', analyzer.list_source, analyzer.get_help('list_source'), prefix_aliases=['ls'])\n    cli.register_command_handler('eval', analyzer.evaluate_expression, analyzer.get_help('eval'), prefix_aliases=['ev'])\n    dumped_tensor_names = []\n    for datum in debug_dump.dumped_tensor_data:\n        dumped_tensor_names.append('%s:%d' % (datum.node_name, datum.output_slot))\n    cli.register_tab_comp_context(['print_tensor', 'pt'], dumped_tensor_names)\n    return cli", "docstring": "Create an instance of ReadlineUI based on a DebugDumpDir object.\n\nArgs:\ndebug_dump: (debug_data.DebugDumpDir) The debug dump to use.\ntensor_filters: (dict) A dict mapping tensor filter name (str) to tensor\nfilter (Callable).\nui_type: (str) requested UI type, only \"readline\" is supported.\non_ui_exit: (`Callable`) the callback to be called when the UI exits.\nconfig: A `cli_config.CLIConfig` object.\n\nReturns:\n(base_ui.BaseUI) A BaseUI subtype object with a set of standard analyzer\ncommands and tab-completions registered.", "source": "github-repos"}
{"code": "def gremove(pattern):\n    \n    for item in glob.glob(pattern):\n        if not remove(item):\n            return False\n    return True", "docstring": "Remove all file found by glob.glob(pattern).\n\nArgs:\npattern (str): Pattern of files to remove\nReturns:\nbool: True if the operation is successful, False otherwise.", "source": "juraj-google-style"}
{"code": "def monitoring_helper(service_addr, duration_ms, monitoring_level, num_queries):\n    if monitoring_level <= 0 or monitoring_level > 2:\n        sys.exit('Please choose a monitoring level between 1 and 2.')\n    for query in range(0, num_queries):\n        res = profiler_client.monitor(service_addr, duration_ms, monitoring_level)\n        print('Cloud TPU Monitoring Results (Sample ', query, '):\\n\\n', res)", "docstring": "Helper function to print monitoring results.\n\nHelper function to print monitoring results for num_queries times.\n\nArgs:\nservice_addr: Address of the TPU profiler service.\nduration_ms: Duration of one monitoring sample in milliseconds.\nmonitoring_level: An integer between 1 and 2. Level 2 is more verbose than\nlevel 1 and shows more metrics.\nnum_queries: Number of monitoring samples to collect.", "source": "github-repos"}
{"code": "def attach_template(self, _template, _key, **unbound_var_values):\n    if (_key in unbound_var_values):\n        raise ValueError(('%s specified twice.' % _key))\n    unbound_var_values[_key] = self\n    return _template.as_layer().construct(**unbound_var_values)", "docstring": "Attaches the template to this such that _key=this layer.\n\nNote: names were chosen to avoid conflicts with any likely unbound_var keys.\n\nArgs:\n_template: The template to construct.\n_key: The key that this layer should replace.\n**unbound_var_values: The values for the unbound_vars.\nReturns:\nA new layer with operation applied.\nRaises:\nValueError: If _key is specified twice or there is a problem computing the\ntemplate.", "source": "codesearchnet"}
{"code": "def needs_keras_history(tensors, ignore_call_context=False):\n    input_tensors = nest.flatten(tensors)\n    if call_context().in_call and (not ignore_call_context):\n        return False\n    if all((getattr(tensor, '_keras_history', None) is not None for tensor in input_tensors)):\n        return False\n    return uses_keras_history(tensors)", "docstring": "Check if any Tensors need to be wrapped in TensorFlowOpLayers.\n\nThis will never return True inside a sublayer, because sublayers\ndo not need to create Keras History. Otherwise, this returns True\nif one or more of `tensors` originates from a `keras.Input` and\ndoes not have `_keras_history` set.\n\nArgs:\ntensors: An arbitrary nested structure of Tensors.\nignore_call_context: Whether to ignore the check of if currently\noutside of a `call` context. This is `True` when creating\nKerasHistory inside `Node`, where we always know that Tensors\nare being used with the Functional API.\n\nReturns:\nBool, whether at least one Tensor needs to be wrapped.", "source": "github-repos"}
{"code": "def recipe_sdf_to_bigquery(config, auth_write, partner_id, file_types, filter_type, filter_ids, dataset, version, table_suffix, time_partitioned_table, create_single_day_table):\n    dataset(config, {'auth': auth_write, 'dataset': dataset})\n    sdf(config, {'auth': 'user', 'version': version, 'partner_id': partner_id, 'file_types': file_types, 'filter_type': filter_type, 'read': {'filter_ids': {'single_cell': True, 'values': filter_ids}}, 'time_partitioned_table': time_partitioned_table, 'create_single_day_table': create_single_day_table, 'dataset': dataset, 'table_suffix': table_suffix})", "docstring": "Download SDF reports into a BigQuery table.\n\nArgs:\nauth_write (authentication) - Credentials used for writing data.\npartner_id (integer) - The sdf file types.\nfile_types (string_list) - The sdf file types.\nfilter_type (choice) - The filter type for the filter ids.\nfilter_ids (integer_list) - Comma separated list of filter ids for the request.\ndataset (string) - Dataset to be written to in BigQuery.\nversion (choice) - The sdf version to be returned.\ntable_suffix (string) - Optional: Suffix string to put at the end of the table name (Must contain alphanumeric or underscores)\ntime_partitioned_table (boolean) - Is the end table a time partitioned\ncreate_single_day_table (boolean) - Would you like a separate table for each day? This will result in an extra table each day and the end table with the most up to date SDF.", "source": "github-repos"}
{"code": "def reflection(normal, origin=(0, 0, 0)):\n    n = (np.array(normal, dtype=float) / np.linalg.norm(normal))\n    (u, v, w) = n\n    translation = np.eye(4)\n    translation[(0:3, 3)] = (- np.array(origin))\n    xx = (1 - (2 * (u ** 2)))\n    yy = (1 - (2 * (v ** 2)))\n    zz = (1 - (2 * (w ** 2)))\n    xy = (((- 2) * u) * v)\n    xz = (((- 2) * u) * w)\n    yz = (((- 2) * v) * w)\n    mirror_mat = [[xx, xy, xz, 0], [xy, yy, yz, 0], [xz, yz, zz, 0], [0, 0, 0, 1]]\n    if (np.linalg.norm(origin) > 1e-06):\n        mirror_mat = np.dot(np.linalg.inv(translation), np.dot(mirror_mat, translation))\n    return SymmOp(mirror_mat)", "docstring": "Returns reflection symmetry operation.\n\nArgs:\nnormal (3x1 array): Vector of the normal to the plane of\nreflection.\norigin (3x1 array): A point in which the mirror plane passes\nthrough.\n\nReturns:\nSymmOp for the reflection about the plane", "source": "codesearchnet"}
{"code": "def area_frac_vs_chempot_plot(self, ref_delu, chempot_range, delu_dict=None, delu_default=0, increments=10, no_clean=False, no_doped=False):\n    delu_dict = (delu_dict if delu_dict else {})\n    chempot_range = sorted(chempot_range)\n    all_chempots = np.linspace(min(chempot_range), max(chempot_range), increments)\n    hkl_area_dict = {}\n    for hkl in self.all_slab_entries.keys():\n        hkl_area_dict[hkl] = []\n    for u in all_chempots:\n        delu_dict[ref_delu] = u\n        wulffshape = self.wulff_from_chempot(delu_dict=delu_dict, no_clean=no_clean, no_doped=no_doped, delu_default=delu_default)\n        for hkl in wulffshape.area_fraction_dict.keys():\n            hkl_area_dict[hkl].append(wulffshape.area_fraction_dict[hkl])\n    plt = pretty_plot(width=8, height=7)\n    axes = plt.gca()\n    for hkl in self.all_slab_entries.keys():\n        clean_entry = list(self.all_slab_entries[hkl].keys())[0]\n        if all([(a == 0) for a in hkl_area_dict[hkl]]):\n            continue\n        else:\n            plt.plot(all_chempots, hkl_area_dict[hkl], '--', color=self.color_dict[clean_entry], label=str(hkl))\n    plt.ylabel('Fractional area $A^{Wulff}_{hkl}/A^{Wulff}$')\n    self.chempot_plot_addons(plt, chempot_range, str(ref_delu).split('_')[1], axes, rect=[(- 0.0), 0, 0.95, 1], pad=5, ylim=[0, 1])\n    return plt", "docstring": "1D plot. Plots the change in the area contribution\nof each facet as a function of chemical potential.\n\nArgs:\nref_delu (sympy Symbol): The free variable chempot with the format:\nSymbol(\"delu_el\") where el is the name of the element.\nchempot_range (list): Min/max range of chemical potential to plot along\ndelu_dict (Dict): Dictionary of the chemical potentials to be set as\nconstant. Note the key should be a sympy Symbol object of the\nformat: Symbol(\"delu_el\") where el is the name of the element.\ndelu_default (float): Default value for all unset chemical potentials\nincrements (int): Number of data points between min/max or point\nof intersection. Defaults to 10 points.\n\nReturns:\n(Pylab): Plot of area frac on the Wulff shape\nfor each facet vs chemical potential.", "source": "codesearchnet"}
{"code": "def __init__(self, file_system, mount_point, environment_variables=None):\n    \n    super(FileSystemWinRegistryFileReader, self).__init__()\n    self._file_system = file_system\n    self._path_resolver = self._CreateWindowsPathResolver(\n        file_system, mount_point, environment_variables=environment_variables)", "docstring": "Initializes a Windows Registry file reader object.\n\nArgs:\nfile_system (dfvfs.FileSystem): file system.\nmount_point (dfvfs.PathSpec): mount point path specification.\nenvironment_variables (Optional[list[EnvironmentVariableArtifact]]):\nenvironment variables.", "source": "juraj-google-style"}
{"code": "def parse_content(self, content):\n        \n        \n        \n        \n        self.active_lines_unparsed = get_active_lines(content) if content is not None else []\n        \n        self.active_settings = split_kv_pairs(content, use_partition=False) if content is not None else []", "docstring": "Main parsing class method which stores all interesting data from the content.\n\nArgs:\ncontent (context.content): Parser context content", "source": "juraj-google-style"}
{"code": "def _disc_kn(clearness_index, airmass, max_airmass=12):\n    \n    \n    kt = clearness_index\n    am = airmass\n\n    am = min(am, max_airmass)  \n\n    \n    kt2 = kt * kt  \n    kt3 = kt2 * kt  \n\n    if kt <= 0.6:\n        a = 0.512 - 1.56*kt + 2.286*kt2 - 2.222*kt3\n        b = 0.37 + 0.962*kt\n        c = -0.28 + 0.932*kt - 2.048*kt2\n    else:\n        a = -5.743 + 21.77*kt - 27.49*kt2 + 11.56*kt3\n        b = 41.4 - 118.5*kt + 66.05*kt2 + 31.9*kt3\n        c = -47.01 + 184.2*kt - 222.0*kt2 + 73.81*kt3\n\n    delta_kn = a + b * math.exp(c*am)\n\n    Knc = 0.866 - 0.122*am + 0.0121*am**2 - 0.000653*am**3 + 1.4e-05*am**4\n    Kn = Knc - delta_kn\n    return Kn, am", "docstring": "Calculate Kn for `disc`\n\nArgs:\nclearness_index : numeric\nairmass : numeric\nmax_airmass : float\nairmass > max_airmass is set to max_airmass before being used\nin calculating Kn.\n\nReturns:\nKn : numeric\nam : numeric\nairmass used in the calculation of Kn. am <= max_airmass.", "source": "juraj-google-style"}
{"code": "def crossing_times(ts, c=0.0, d=0.0):\n    ts = ts.squeeze()\n    if (ts.ndim is not 1):\n        raise ValueError('Currently can only use on single variable timeseries')\n    ts = (ts - c)\n    tsa = ts[0:(- 1)]\n    tsb = ts[1:]\n    zc = (np.nonzero((((tsa < 0) & (tsb >= 0)) | ((tsa > 0) & (tsb <= 0))))[0] + 1)\n    va = ts[(zc - 1)]\n    vb = ts[zc]\n    ct = (((np.abs(vb) * ts.tspan[(zc - 1)]) + (np.abs(va) * ts.tspan[zc])) / np.abs((vb - va)))\n    if (ts[0] == 0.0):\n        zc = np.r_[(np.array([0]), zc)]\n        ct = np.r_[(np.array([ts.tspan[0]]), ct)]\n    if ((d == 0.0) or (ct.shape[0] is 0)):\n        return ct\n    dc = (np.nonzero((((tsa < d) & (tsb >= d)) | ((tsa > (- d)) & (tsb <= (- d)))))[0] + 1)\n    splice = np.searchsorted(dc, zc)\n    which_zc = np.r_[(np.array([0]), (np.nonzero((splice[0:(- 1)] - splice[1:]))[0] + 1))]\n    return ct[which_zc]", "docstring": "For a single variable timeseries, find the times at which the\nvalue crosses ``c`` from above or below. Can optionally set a non-zero\n``d`` to impose the condition that the value must wander at least ``d``\nunits away from ``c`` between crossings.\n\nIf the timeseries begins (or ends) exactly at ``c``, then time zero\n(or the ending time) is also included as a crossing event,\nso that the boundaries of the first and last excursions are included.\n\nIf the actual crossing time falls between two time steps, linear\ninterpolation is used to estimate the crossing time.\n\nArgs:\nts: Timeseries (single variable)\n\nc (float): Critical value at which to report crossings.\n\nd (float): Optional min distance from c to be attained between crossings.\n\nReturns:\narray of float", "source": "codesearchnet"}
{"code": "def is_valid(container, path):\n    try:\n        tmp_hash_path = (container.filename + '.hash')\n        with open(tmp_hash_path, 'r') as tmp_file:\n            tmp_hash = tmp_file.readline()\n    except IOError:\n        LOG.info('No .hash-file in the tmp-directory.')\n    container_hash_path = (local.path(path) / 'gentoo.tar.bz2.hash')\n    if container_hash_path.exists():\n        with open(container_hash_path, 'r') as hash_file:\n            container_hash = hash_file.readline()\n            return (container_hash == tmp_hash)\n    return False", "docstring": "Checks if a container exists and is unpacked.\n\nArgs:\npath: The location where the container is expected.\n\nReturns:\nTrue if the container is valid, False if the container needs to\nunpacked or if the path does not exist yet.", "source": "codesearchnet"}
{"code": "def _FishScript(name, commands, default_options=None):\n    default_options = default_options or set()\n    global_options, options_map, subcommands_map = _GetMaps(name, commands, default_options)\n    fish_source = 'function __fish_using_command\\n    set cmd (commandline -opc)\\n    for i in (seq (count $cmd) 1)\\n        switch $cmd[$i]\\n        case \"-*\"\\n        case \"*\"\\n            if [ $cmd[$i] = $argv[1] ]\\n                return 0\\n            else\\n                return 1\\n            end\\n        end\\n    end\\n    return 1\\nend\\n\\nfunction __option_entered_check\\n    set cmd (commandline -opc)\\n    for i in (seq (count $cmd))\\n        switch $cmd[$i]\\n        case \"-*\"\\n            if [ $cmd[$i] = $argv[1] ]\\n                return 1\\n            end\\n        end\\n    end\\n    return 0\\nend\\n\\nfunction __is_prev_global\\n    set cmd (commandline -opc)\\n    set global_options {global_options}\\n    set prev (count $cmd)\\n\\n    for opt in $global_options\\n        if [ \"--$opt\" = $cmd[$prev] ]\\n            echo $prev\\n            return 0\\n        end\\n    end\\n    return 1\\nend\\n\\n'\n    subcommand_template = \"complete -c {name} -n '__fish_using_command {command}' -f -a {subcommand}\\n\"\n    flag_template = \"complete -c {name} -n '__fish_using_command {command};{prev_global_check} and __option_entered_check --{option}' -l {option}\\n\"\n    prev_global_check = ' and __is_prev_global;'\n    for command in set(subcommands_map.keys()).union(set(options_map.keys())):\n        for subcommand in subcommands_map[command]:\n            fish_source += subcommand_template.format(name=name, command=command, subcommand=subcommand)\n        for option in options_map[command].union(global_options):\n            check_needed = command != name\n            fish_source += flag_template.format(name=name, command=command, prev_global_check=prev_global_check if check_needed else '', option=option.lstrip('--'))\n    return fish_source.format(global_options=' '.join((f'\"{option}\"' for option in global_options)))", "docstring": "Returns a Fish script registering a completion function for the commands.\n\nArgs:\nname: The first token in the commands, also the name of the command.\ncommands: A list of all possible commands that tab completion can complete\nto. Each command is a list or tuple of the string tokens that make up\nthat command.\ndefault_options: A dict of options that can be used with any command. Use\nthis if there are flags that can always be appended to a command.\nReturns:\nA string which is the Fish script. Source the fish script to enable tab\ncompletion in Fish.", "source": "github-repos"}
{"code": "def search_stack_for_var(varname, verbose=util_arg.NOT_QUIET):\n    \n    curr_frame = inspect.currentframe()\n    if verbose:\n        print(' * Searching parent frames for: ' + six.text_type(varname))\n    frame_no = 0\n    while curr_frame.f_back is not None:\n        if varname in curr_frame.f_locals.keys():\n            if verbose:\n                print(' * Found local in frame: ' + six.text_type(frame_no))\n            return curr_frame.f_locals[varname]\n        if varname in curr_frame.f_globals.keys():\n            if verbose:\n                print(' * Found global in frame: ' + six.text_type(frame_no))\n            return curr_frame.f_globals[varname]\n        frame_no += 1\n        curr_frame = curr_frame.f_back\n    if verbose:\n        print('... Found nothing in all ' + six.text_type(frame_no) + ' frames.')\n    return None", "docstring": "Finds a varable (local or global) somewhere in the stack and returns the value\n\nArgs:\nvarname (str): variable name\n\nReturns:\nNone if varname is not found else its value", "source": "juraj-google-style"}
{"code": "def find(self, name):\n    collectors = self.get_collectors()\n    for collector in collectors:\n        if (name.lower() == collector['name'].lower()):\n            self.collector_id = collector['id']\n            return collector\n    return {'status': 'No results found.'}", "docstring": "Returns a dict of collector's details if found.\n\nArgs:\nname (str): name of collector searching for", "source": "codesearchnet"}
{"code": "def is_descriptor_class(desc, include_abstract=False):\n    return (isinstance(desc, type) and issubclass(desc, Descriptor) and (True if include_abstract else (not inspect.isabstract(desc))))", "docstring": "r\"\"\"Check calculatable descriptor class or not.\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def get_impacted_files_from_tiny_model_summary(diff_with_last_commit: bool=False) -> List[str]:\n    repo = Repo(PATH_TO_REPO)\n    folder = Path(repo.working_dir)\n    if not diff_with_last_commit:\n        print(f'main is at {repo.refs.main.commit}')\n        print(f'Current head is at {repo.head.commit}')\n        commits = repo.merge_base(repo.refs.main, repo.head)\n        for commit in commits:\n            print(f'Branching commit: {commit}')\n    else:\n        print(f'main is at {repo.head.commit}')\n        commits = repo.head.commit.parents\n        for commit in commits:\n            print(f'Parent commit: {commit}')\n    if not os.path.isfile(folder / 'tests/utils/tiny_model_summary.json'):\n        return []\n    files = set()\n    for commit in commits:\n        with checkout_commit(repo, commit):\n            with open(folder / 'tests/utils/tiny_model_summary.json', 'r', encoding='utf-8') as f:\n                old_content = f.read()\n        with open(folder / 'tests/utils/tiny_model_summary.json', 'r', encoding='utf-8') as f:\n            new_content = f.read()\n        old_content = json.loads(old_content)\n        new_content = json.loads(new_content)\n        old_keys = set(old_content.keys())\n        new_keys = set(new_content.keys())\n        keys_with_diff = old_keys.symmetric_difference(new_keys)\n        common_keys = old_keys.intersection(new_keys)\n        for key in common_keys:\n            if old_content[key] != new_content[key]:\n                keys_with_diff.add(key)\n        impacted_model_classes = []\n        for key in keys_with_diff:\n            if key in new_keys:\n                impacted_model_classes.extend(new_content[key]['model_classes'])\n        with open(folder / 'src/transformers/__init__.py') as fp:\n            lines = fp.readlines()\n            new_lines = []\n            for line in lines:\n                if line == '_import_structure = {\\n':\n                    new_lines.append(line)\n                elif line == '\n                    break\n                elif len(new_lines) > 0:\n                    line = re.sub('is_.+_available\\\\(\\\\)', 'True', line)\n                    line = line.replace('OptionalDependencyNotAvailable', 'Exception')\n                    line = line.replace('Exception()', 'Exception')\n                    new_lines.append(line)\n        with tempfile.TemporaryDirectory() as tmpdirname:\n            with open(os.path.join(tmpdirname, 'temp_init.py'), 'w') as fp:\n                fp.write(''.join(new_lines))\n            spec = importlib.util.spec_from_file_location('temp_init', os.path.join(tmpdirname, 'temp_init.py'))\n            module = importlib.util.module_from_spec(spec)\n            spec.loader.exec_module(module)\n            import_structure = module._import_structure\n            reversed_structure = {}\n            for key, values in import_structure.items():\n                for value in values:\n                    reversed_structure[value] = key\n            for model_class in impacted_model_classes:\n                module = reversed_structure[model_class]\n                framework = ''\n                if model_class.startswith('TF'):\n                    framework = 'tf'\n                elif model_class.startswith('Flax'):\n                    framework = 'flax'\n                fn = f'modeling_{module.split('.')[-1]}.py' if framework == '' else f'modeling_{framework}_{module.split('.')[-1]}.py'\n                files.add(f'src.transformers.{module}.{fn}'.replace('.', os.path.sep).replace(f'{os.path.sep}py', '.py'))\n    return sorted(files)", "docstring": "Return a list of python modeling files that are impacted by the changes of `tiny_model_summary.json` in between:\n\n- the current head and the main branch if `diff_with_last_commit=False` (default)\n- the current head and its parent commit otherwise.\n\nReturns:\n`List[str]`: The list of Python modeling files that are impacted by the changes of `tiny_model_summary.json`.", "source": "github-repos"}
{"code": "def __add_scraped_requests_to_queue(self, queue_item, scraped_requests):\n    new_queue_items = []\n    for scraped_request in scraped_requests:\n        HTTPRequestHelper.patch_with_options(scraped_request, self.__options, queue_item)\n        if (not HTTPRequestHelper.complies_with_scope(queue_item, scraped_request, self.__options.scope)):\n            continue\n        if self.queue.has_request(scraped_request):\n            continue\n        scraped_request.depth = (queue_item.request.depth + 1)\n        if (self.__options.scope.max_depth is not None):\n            if (scraped_request.depth > self.__options.scope.max_depth):\n                continue\n        new_queue_item = self.queue.add_request(scraped_request)\n        new_queue_items.append(new_queue_item)\n    return new_queue_items", "docstring": "Convert the scraped requests to queue items, return them and also add them to the queue.\n\nArgs:\nqueue_item (:class:`nyawc.QueueItem`): The request/response pair that finished.\nnew_requests list(:class:`nyawc.http.Request`): All the requests that were found during this request.\n\nReturns:\nlist(:class:`nyawc.QueueItem`): The new queue items.", "source": "codesearchnet"}
{"code": "def _parse_batch_lastlog(last_log):\n        \n        regexp = re.compile('(-?[0-9]\\d*):\\W+(.*)')\n\n        wrong_commands = list()\n        for line in last_log:\n            result = regexp.match(line)\n            if result is not None:\n                status_code = result.group(1)\n                command = result.group(2)\n                if int(status_code) < 0:\n                    wrong_commands.append((status_code, command))\n\n        return wrong_commands", "docstring": "This static method will help reading the result of the commit, command by command.\n\nArgs:\nlast_log(list): A list containing, line by line, the result of committing the changes.\n\nReturns:\nA list of tuples that went wrong. The tuple will contain (*status_code*, *command*)", "source": "juraj-google-style"}
{"code": "def populate_ast_nsarg_orthologs(ast, species):\n    \n\n    ortholog_namespace = \"EG\"\n\n    if isinstance(ast, NSArg):\n        if re.match(ortholog_namespace, ast.canonical):\n            orthologs = bel.terms.orthologs.get_orthologs(\n                ast.canonical, list(species.keys())\n            )\n            for species_id in species:\n                if species_id in orthologs:\n                    orthologs[species_id][\"species_label\"] = species[species_id]\n\n            ast.orthologs = copy.deepcopy(orthologs)\n\n    \n    if hasattr(ast, \"args\"):\n        for arg in ast.args:\n            populate_ast_nsarg_orthologs(arg, species)\n\n    return ast", "docstring": "Recursively collect NSArg orthologs for BEL AST\n\nThis requires bo.collect_nsarg_norms() to be run first so NSArg.canonical is available\n\nArgs:\nast: AST at recursive point in belobj\nspecies: dictionary of species ids vs labels for or", "source": "juraj-google-style"}
{"code": "def refresh(self, updated_self):\n    logger.debug('refreshing binary attributes')\n    self.mimetype = updated_self.binary.mimetype\n    self.data = updated_self.binary.data", "docstring": "method to refresh binary attributes and data\n\nArgs:\nupdated_self (Resource): resource this binary data attaches to\n\nReturns:\nNone: updates attributes", "source": "codesearchnet"}
{"code": "def __init__(self, cipher_suites=None):\n        \n        super(TLS12AuthenticationSuite, self).__init__(cipher_suites)\n        self._protocol = ssl.PROTOCOL_TLSv1_2", "docstring": "Create a TLS12AuthenticationSuite object.\n\nArgs:\ncipher_suites (list): A list of strings representing the names of\ncipher suites to use. Overrides the default set of cipher\nsuites. Optional, defaults to None.", "source": "juraj-google-style"}
{"code": "def _do_logoff(self):\n    session_uri = '/api/sessions/this-session'\n    self.delete(session_uri, logon_required=False)\n    self._session_id = None\n    self._session = None\n    self._headers.pop('X-API-Session', None)", "docstring": "Log off, unconditionally.\n\nRaises:\n\n:exc:`~zhmcclient.ServerAuthError`\n:exc:`~zhmcclient.ConnectionError`\n:exc:`~zhmcclient.ParseError`\n:exc:`~zhmcclient.HTTPError`", "source": "codesearchnet"}
{"code": "def run_and_monitor(args, pid_to_wait, std_out_filter_fn=None, cwd=None):\n  \n\n  monitor_process = None\n  try:\n    p = subprocess.Popen(args,\n                         cwd=cwd,\n                         env=os.environ,\n                         stdout=subprocess.PIPE,\n                         stderr=subprocess.STDOUT)\n\n    pids_to_kill = [p.pid]\n    script = ('import %s;%s._wait_and_kill(%s, %s)' %\n              (__name__, __name__, str(pid_to_wait), str(pids_to_kill)))\n    monitor_process = subprocess.Popen(['python', '-c', script], env=os.environ)\n    while p.poll() is None:\n      line = p.stdout.readline()\n\n      if not six.PY2:\n        line = line.decode()\n\n      if std_out_filter_fn is None or std_out_filter_fn(line):\n        sys.stdout.write(line)\n        \n  finally:\n    if monitor_process:\n      monitor_process.kill()", "docstring": "Start a process, and have it depend on another specified process.\n\nArgs:\nargs: the args of the process to start and monitor.\npid_to_wait: the process to wait on. If the process ends, also kill the started process.\nstd_out_filter_fn: a filter function which takes a string content from the stdout of the\nstarted process, and returns True if the string should be redirected to console stdout.\ncwd: the current working directory for the process to start.", "source": "juraj-google-style"}
{"code": "def process_python_objects(data, filepath=None):\n\n    def _process(value):\n        if isinstance(value, dict):\n            for (k, v) in value.items():\n                value[k] = _process(v)\n            return value\n        elif isfunction(value):\n            func = value\n            if hasattr(func, '_early'):\n                import types\n                fn = types.FunctionType(func.func_code, func.func_globals.copy(), name=func.func_name, argdefs=func.func_defaults, closure=func.func_closure)\n                fn.func_globals['this'] = EarlyThis(data)\n                fn.func_globals.update(get_objects())\n                spec = getargspec(func)\n                args = (spec.args or [])\n                if (len(args) not in (0, 1)):\n                    raise ResourceError('@early decorated function must take zero or one args only')\n                if args:\n                    value_ = fn(data)\n                else:\n                    value_ = fn()\n                return _process(value_)\n            elif hasattr(func, '_late'):\n                return SourceCode(func=func, filepath=filepath, eval_as_function=True)\n            elif (func.__name__ in package_rex_keys):\n                return SourceCode(func=func, filepath=filepath, eval_as_function=False)\n            else:\n                return func\n        else:\n            return value\n\n    def _trim(value):\n        if isinstance(value, dict):\n            for (k, v) in value.items():\n                if isfunction(v):\n                    if (v.__name__ == 'preprocess'):\n                        pass\n                    else:\n                        del value[k]\n                elif (ismodule(v) or k.startswith('__')):\n                    del value[k]\n                else:\n                    value[k] = _trim(v)\n        return value\n    data = _process(data)\n    data = _trim(data)\n    return data", "docstring": "Replace certain values in the given package data dict.\n\nDoes things like:\n* evaluates @early decorated functions, and replaces with return value;\n* converts functions into `SourceCode` instances so they can be serialized\nout to installed packages, and evaluated later;\n* strips some values (modules, __-leading variables) that are never to be\npart of installed packages.\n\nReturns:\ndict: Updated dict.", "source": "codesearchnet"}
{"code": "def CheckParenthesisSpacing(filename, clean_lines, linenum, error):\n  \n  line = clean_lines.elided[linenum]\n\n  \n  match = Search(r' (if\\(|for\\(|while\\(|switch\\()', line)\n  if match:\n    error(filename, linenum, 'whitespace/parens', 5,\n          'Missing space before ( in %s' % match.group(1))\n\n  \n  \n  \n  \n  \n  match = Search(r'\\b(if|for|while|switch)\\s*'\n                 r'\\(([ ]*)(.).*[^ ]+([ ]*)\\)\\s*{\\s*$',\n                 line)\n  if match:\n    if len(match.group(2)) != len(match.group(4)):\n      if not (match.group(3) == ';' and\n              len(match.group(2)) == 1 + len(match.group(4)) or\n              not match.group(2) and Search(r'\\bfor\\s*\\(.*; \\)', line)):\n        error(filename, linenum, 'whitespace/parens', 5,\n              'Mismatching spaces inside () in %s' % match.group(1))\n    if len(match.group(2)) not in [0, 1]:\n      error(filename, linenum, 'whitespace/parens', 5,\n            'Should have zero or one spaces inside ( and ) in %s' %\n            match.group(1))", "docstring": "Checks for horizontal spacing around parentheses.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def array_to_jsbuffer(array):\n  \n  if array.ndim != 1:\n    raise TypeError('Only 1d arrays can be converted JS TypedArray.')\n  if array.dtype.name not in JS_ARRAY_TYPES:\n    raise TypeError('Array dtype not supported by JS TypedArray.')\n  js_type_name = array.dtype.name.capitalize() + 'Array'\n  data_base64 = base64.b64encode(array.tobytes()).decode('ascii')\n  code =  % (data_base64, js_type_name)\n  return code", "docstring": "Serialize 1d NumPy array to JS TypedArray.\n\nData is serialized to base64-encoded string, which is much faster\nand memory-efficient than json list serialization.\n\nArgs:\narray: 1d NumPy array, dtype must be one of JS_ARRAY_TYPES.\n\nReturns:\nJS code that evaluates to a TypedArray as string.\n\nRaises:\nTypeError: if array dtype or shape not supported.", "source": "juraj-google-style"}
{"code": "def delete_group_maintainer(self, grp_name, user):\n        \n        self.service.delete_group_maintainer(\n            grp_name, user, self.url_prefix, self.auth, self.session,\n            self.session_send_opts)", "docstring": "Delete the given user to the named group.\n\nBoth group and user must already exist for this to succeed.\n\nArgs:\nname (string): Name of group.\nuser (string): User to add to group.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):\n\n    def _should_dropout(p):\n        return not isinstance(p, float) or p < 1\n    if _should_dropout(self._input_keep_prob):\n        inputs = self._dropout(inputs, 'input', self._recurrent_input_noise, self._input_keep_prob)\n    output, new_state = cell_call_fn(inputs, state, **kwargs)\n    if _should_dropout(self._state_keep_prob):\n        shallow_filtered_substructure = nest.get_traverse_shallow_structure(self._dropout_state_filter, new_state)\n        new_state = self._dropout(new_state, 'state', self._recurrent_state_noise, self._state_keep_prob, shallow_filtered_substructure)\n    if _should_dropout(self._output_keep_prob):\n        output = self._dropout(output, 'output', self._recurrent_output_noise, self._output_keep_prob)\n    return (output, new_state)", "docstring": "Runs the wrapped cell and applies dropout.\n\nArgs:\ninputs: A tensor with wrapped cell's input.\nstate: A tensor or tuple of tensors with wrapped cell's state.\ncell_call_fn: Wrapped cell's method to use for step computation (cell's\n`__call__` or 'call' method).\n**kwargs: Additional arguments.\n\nReturns:\nA pair containing:\n\n- Output: A tensor with cell's output.\n- New state: A tensor or tuple of tensors with new wrapped cell's state.", "source": "github-repos"}
{"code": "def __init__(self, channel):\n        \n        self.ListProfiles = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.ProfileService/ListProfiles\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__service__pb2.ListProfilesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__service__pb2.ListProfilesResponse.FromString,\n        )\n        self.CreateProfile = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.ProfileService/CreateProfile\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__service__pb2.CreateProfileRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__pb2.Profile.FromString,\n        )\n        self.GetProfile = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.ProfileService/GetProfile\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__service__pb2.GetProfileRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__pb2.Profile.FromString,\n        )\n        self.UpdateProfile = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.ProfileService/UpdateProfile\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__service__pb2.UpdateProfileRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__pb2.Profile.FromString,\n        )\n        self.DeleteProfile = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.ProfileService/DeleteProfile\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__service__pb2.DeleteProfileRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n        self.SearchProfiles = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.ProfileService/SearchProfiles\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__service__pb2.SearchProfilesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_profile__service__pb2.SearchProfilesResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def dataset_docs_str(datasets=None):\n  \n  module_to_builder = make_module_to_builder_dict(datasets)\n\n  sections = sorted(list(module_to_builder.keys()))\n  section_tocs = []\n  section_docs = []\n  for section in sections:\n    builders = tf.nest.flatten(module_to_builder[section])\n    builders = sorted(builders, key=lambda b: b.name)\n    builder_docs = [document_single_builder(builder) for builder in builders]\n    section_doc = SECTION_DATASETS.format(\n        section_name=section, datasets=\"\\n\".join(builder_docs))\n    section_toc = create_section_toc(section, builders)\n\n\n    section_docs.append(section_doc)\n    section_tocs.append(section_toc)\n\n  full_doc = DOC.format(toc=\"\\n\".join(section_tocs),\n                        datasets=\"\\n\".join(section_docs))\n  return full_doc", "docstring": "Create dataset documentation string for given datasets.\n\nArgs:\ndatasets: list of datasets for which to create documentation.\nIf None, then all available datasets will be used.\n\nReturns:\nstring describing the datasets (in the MarkDown format).", "source": "juraj-google-style"}
{"code": "def _create_op_from_tf_operation(self, c_op, compute_device=True) -> 'Operation':\n    self._check_not_finalized()\n    ret = Operation._from_c_op(c_op=c_op, g=self)\n    name_key = ret.name.lower()\n    if name_key not in self._names_in_use:\n        self._names_in_use[name_key] = 1\n    self._create_op_helper(ret, compute_device=compute_device)\n    return ret", "docstring": "Creates an `Operation` in this graph from the supplied TF_Operation.\n\nThis method is like create_op() except the new Operation is constructed\nusing `c_op`. The returned Operation will have `c_op` as its _c_op\nfield. This is used to create Operation objects around TF_Operations created\nindirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).\n\nThis function does not call Operation._control_flow_post_processing or\nGraph._control_dependencies_for_inputs (since the inputs may not be\navailable yet). The caller is responsible for calling these methods.\n\nArgs:\nc_op: a wrapped TF_Operation\ncompute_device: (Optional.) If True, device functions will be executed to\ncompute the device property of the Operation.\n\nReturns:\nAn `Operation` object.", "source": "github-repos"}
{"code": "def image(request, data):\n    \n\n    \n    \n    try:\n        width = int(request.GET.get(\"w\", PYDENTICON_WIDTH))\n    except ValueError:\n        raise SuspiciousOperation(\"Identicon width must be a positive integer.\")\n    try:\n        height = int(request.GET.get(\"h\", PYDENTICON_HEIGHT))\n    except ValueError:\n        raise SuspiciousOperation(\"Identicon height must be a positive integer.\")\n    output_format = request.GET.get(\"f\", PYDENTICON_FORMAT)\n    try:\n        padding = [int(p) for p in request.GET[\"p\"].split(\",\")]\n    except KeyError:\n        padding = PYDENTICON_PADDING\n    except ValueError:\n        raise SuspiciousOperation(\"Identicon padding must consist out of 4 positive integers separated with commas.\")\n    if \"i\" in request.GET:\n        inverted = request.GET.get(\"i\")\n        if inverted.lower() == \"true\":\n            inverted = True\n        elif inverted.lower() == \"false\":\n            inverted = False\n        else:\n            raise SuspiciousOperation(\"Inversion parameter must be a boolean (true/false).\")\n    else:\n        inverted = PYDENTICON_INVERT\n\n    \n    if not isinstance(width, int) or width <= 0:\n        raise SuspiciousOperation(\"Identicon width must be a positive integer.\")\n    if not isinstance(height, int) or height <= 0:\n        raise SuspiciousOperation(\"Identicon height must be a positive integer.\")\n    if not all([isinstance(p, int) and p >= 0 for p in padding]) or len(padding) != 4:\n        raise SuspiciousOperation(\"Padding must be a 4-element tuple consisting out of positive integers.\")\n\n    \n    if output_format == \"png\":\n        content_type = \"image/png\"\n    elif output_format == \"ascii\":\n        content_type = \"text/plain\"\n    else:\n        raise SuspiciousOperation(\"Unsupported identicon format requested - '%s' % output_format\")\n\n    \n    generator = Generator(PYDENTICON_ROWS, PYDENTICON_COLUMNS,\n                          foreground = PYDENTICON_FOREGROUND, background = PYDENTICON_BACKGROUND,\n                          digest = PYDENTICON_DIGEST)\n\n    \n    content = generator.generate(data, width, height, padding=padding, output_format=output_format, inverted=inverted)\n\n    \n    response = HttpResponse(content, content_type=content_type)\n\n    return response", "docstring": "Generates identicon image based on passed data.\n\nArguments:\n\ndata - Data which should be used for generating an identicon. This data\nwill be used in order to create a digest which is used for generating the\nidenticon. If the data passed is a hex digest already, the digest will be\nused as-is.\n\nReturns:\n\nIdenticon image in raw format.", "source": "juraj-google-style"}
{"code": "def name(self):\n        \n        return ctypes.cast(self.sName, ctypes.c_char_p).value.decode()", "docstring": "Returns the name of the device.\n\nArgs:\nself (JLinkDeviceInfo): the ``JLinkDeviceInfo`` instance\n\nReturns:\nDevice name.", "source": "juraj-google-style"}
{"code": "def add_prop_descriptor_to_class(self, class_name, new_class_attrs, names_with_refs, container_names, dataspecs):\n    from .bases import ContainerProperty\n    from .dataspec import DataSpec\n    name = self.name\n    if (name in new_class_attrs):\n        raise RuntimeError(('Two property generators both created %s.%s' % (class_name, name)))\n    new_class_attrs[name] = self\n    if self.has_ref:\n        names_with_refs.add(name)\n    if isinstance(self, BasicPropertyDescriptor):\n        if isinstance(self.property, ContainerProperty):\n            container_names.add(name)\n        if isinstance(self.property, DataSpec):\n            dataspecs[name] = self", "docstring": "``MetaHasProps`` calls this during class creation as it iterates\nover properties to add, to update its registry of new properties.\n\nThe parameters passed in are mutable and this function is expected to\nupdate them accordingly.\n\nArgs:\nclass_name (str) :\nname of the class this descriptor is added to\n\nnew_class_attrs(dict[str, PropertyDescriptor]) :\nmapping of attribute names to PropertyDescriptor that this\nfunction will update\n\nnames_with_refs (set[str]) :\nset of all property names for properties that also have\nreferences, that this function will update\n\ncontainer_names (set[str]) :\nset of all property names for properties that are\ncontainer props, that this function will update\n\ndataspecs(dict[str, PropertyDescriptor]) :\nmapping of attribute names to PropertyDescriptor for DataSpec\nproperties that this function will update\n\nReturn:\nNone", "source": "codesearchnet"}
{"code": "def get_named_tensor(self, name):\n    if (name in self.named_tensors):\n        return (True, self.named_tensors[name])\n    else:\n        return (False, None)", "docstring": "Returns a named tensor if available.\n\nReturns:\nvalid: True if named tensor found, False otherwise\ntensor: If valid, will be a tensor, otherwise None", "source": "codesearchnet"}
{"code": "def encode_all_features(dataset, vocabulary):\n  \n  def my_fn(features):\n    ret = {}\n    for k, v in features.items():\n      v = vocabulary.encode_tf(v)\n      v = tf.concat([tf.to_int64(v), [1]], 0)\n      ret[k] = v\n    return ret\n  return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)", "docstring": "Encode all features.\n\nArgs:\ndataset: a tf.data.Dataset\nvocabulary: a vocabulary.Vocabulary\nReturns:\na tf.data.Dataset", "source": "juraj-google-style"}
{"code": "def _normalize_direction(heading: int) -> int:\n    while (heading > 359):\n        heading = int((heading - 359))\n    while (heading < 0):\n        heading = int((heading + 359))\n    return heading", "docstring": "Make sure that 0 < heading < 360\n\nArgs:\nheading: base heading\n\nReturns: corrected heading", "source": "codesearchnet"}
{"code": "def _FindCodeObjectsReferents(module, start_objects, visit_recorder):\n\n    def CheckIgnoreCodeObject(code_object):\n        'Checks if the code object can be ignored.\\n\\n    Code objects that are not implemented in the module, or are from a lambda or\\n    generator expression can be ignored.\\n\\n    If the module was precompiled, the code object may point to .py file, while\\n    the module says that it originated from .pyc file. We just strip extension\\n    altogether to work around it.\\n\\n    Args:\\n      code_object: code object that we want to check against module.\\n\\n    Returns:\\n      True if the code object can be ignored, False otherwise.\\n    '\n        if (code_object.co_name in ('<lambda>', '<genexpr>')):\n            return True\n        code_object_file = os.path.splitext(code_object.co_filename)[0]\n        module_file = os.path.splitext(module.__file__)[0]\n        if (code_object_file == module_file):\n            return False\n        return True\n\n    def CheckIgnoreClass(cls):\n        'Returns True if the class is definitely not coming from \"module\".'\n        cls_module = sys.modules.get(cls.__module__)\n        if (not cls_module):\n            return False\n        return ((cls_module is not module) and (getattr(cls_module, '__file__', None) != module.__file__))\n    code_objects = set()\n    current = start_objects\n    for obj in current:\n        visit_recorder.Record(current)\n    depth = 0\n    while (current and (depth < _MAX_REFERENTS_BFS_DEPTH)):\n        new_current = []\n        for current_obj in current:\n            referents = gc.get_referents(current_obj)\n            if ((current_obj is not module.__dict__) and (len(referents) > _MAX_OBJECT_REFERENTS)):\n                continue\n            for obj in referents:\n                if (isinstance(obj, _BFS_IGNORE_TYPES) or (not visit_recorder.Record(obj))):\n                    continue\n                if (isinstance(obj, types.CodeType) and CheckIgnoreCodeObject(obj)):\n                    continue\n                if (isinstance(obj, six.class_types) and CheckIgnoreClass(obj)):\n                    continue\n                if isinstance(obj, types.CodeType):\n                    code_objects.add(obj)\n                else:\n                    new_current.append(obj)\n        current = new_current\n        depth += 1\n    return code_objects", "docstring": "Looks for all the code objects referenced by objects in start_objects.\n\nThe traversal implemented by this function is a shallow one. In other words\nif the reference chain is a -> b -> co1 -> c -> co2, this function will\nreturn [co1] only.\n\nThe traversal is implemented with BFS. The maximum depth is limited to avoid\ntouching all the objects in the process. Each object is only visited once\nusing visit_recorder.\n\nArgs:\nmodule: module in which we are looking for code objects.\nstart_objects: initial set of objects for the BFS traversal.\nvisit_recorder: instance of _VisitRecorder class to ensure each object is\nvisited at most once.\n\nReturns:\nList of code objects.", "source": "codesearchnet"}
{"code": "def convert_bboxes_from_albumentations(bboxes, target_format, rows, cols, check_validity=False):\n    return [convert_bbox_from_albumentations(bbox, target_format, rows, cols, check_validity) for bbox in bboxes]", "docstring": "Convert a list of bounding boxes from the format used by albumentations to a format, specified\nin `target_format`.\n\nArgs:\nbboxes (list): List of bounding box with coordinates in the format used by albumentations\ntarget_format (str): required format of the output bounding box. Should be 'coco' or 'pascal_voc'.\nrows (int): image height\ncols (int): image width\ncheck_validity (bool): check if all boxes are valid boxes", "source": "codesearchnet"}
{"code": "def MultiNotifyQueue(self, notifications, mutation_pool=None):\n    extract_queue = (lambda notification: notification.session_id.Queue())\n    for (queue, notifications) in iteritems(collection.Group(notifications, extract_queue)):\n        self._MultiNotifyQueue(queue, notifications, mutation_pool=mutation_pool)", "docstring": "This is the same as NotifyQueue but for several session_ids at once.\n\nArgs:\nnotifications: A list of notifications.\nmutation_pool: A MutationPool object to schedule Notifications on.\n\nRaises:\nRuntimeError: An invalid session_id was passed.", "source": "codesearchnet"}
{"code": "def automatic_control_dependencies(f):\n\n    def wrapper(*args, **kwargs):\n        with AutomaticControlDependencies() as a:\n            result = f(*args, **kwargs)\n            result_flat = [a.mark_as_return(t) for t in nest.flatten(result)]\n            return nest.pack_sequence_as(result, result_flat)\n    return tf_decorator.make_decorator(f, wrapper)", "docstring": "Wraps f to automatically insert control dependencies.\n\nThe inserted dependencies ensure that:\n1. All stateful ops in f run when the result of f runs\n2. Updates to the same resources happen in order.\n\nArgs:\nf: the function to be wrapped.\n\nReturns:\nThe wrapped function.", "source": "github-repos"}
{"code": "def argv(cls, name, short_name=None, type=None, help=None):\n        \n        cls.__hierarchy.append(argv.Argv(name, short_name, type, help))", "docstring": "Set command line arguments as a source\n\nParses the command line arguments described by the parameters.\n\nArgs:\nname: the long name of the argument (foo)\nshort_name: the optional short name of the argument (f)\ntype: the optional type of the argument, defaults to bool\nhelp: the optional help text for the argument", "source": "juraj-google-style"}
{"code": "def _PromptUserForPartitionIdentifiers(\n      self, volume_system, volume_identifiers):\n    \n    print_header = True\n    while True:\n      if print_header:\n        self._PrintTSKPartitionIdentifiersOverview(\n            volume_system, volume_identifiers)\n\n        print_header = False\n\n      lines = self._textwrapper.wrap(self._USER_PROMPT_TSK)\n      self._output_writer.Write('\\n'.join(lines))\n      self._output_writer.Write('\\n\\nPartition identifiers: ')\n\n      try:\n        selected_volumes = self._ReadSelectedVolumes(volume_system, prefix='p')\n        if (selected_volumes and\n            not set(selected_volumes).difference(volume_identifiers)):\n          break\n      except ValueError:\n        pass\n\n      self._output_writer.Write('\\n')\n\n      lines = self._textwrapper.wrap(\n          'Unsupported partition identifier(s), please try again or abort with '\n          'Ctrl^C.')\n      self._output_writer.Write('\\n'.join(lines))\n      self._output_writer.Write('\\n\\n')\n\n    return selected_volumes", "docstring": "Prompts the user to provide partition identifiers.\n\nArgs:\nvolume_system (dfvfs.TSKVolumeSystem): volume system.\nvolume_identifiers (list[str]): volume identifiers including prefix.\n\nReturns:\nlist[str]: selected volume identifiers including prefix or None.", "source": "juraj-google-style"}
{"code": "def get_ethernet_settings(self):\n    uri = '{}/ethernetSettings'.format(self.data['uri'])\n    return self._helper.do_get(uri)", "docstring": "Gets the Ethernet interconnect settings for the Logical Interconnect.\n\nReturns:\ndict: Ethernet Interconnect Settings", "source": "codesearchnet"}
{"code": "def close(self):\n    self._dll.JLINKARM_Close()\n    if (self._lock is not None):\n        del self._lock\n        self._lock = None\n    return None", "docstring": "Closes the open J-Link.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\n``None``\n\nRaises:\nJLinkException: if there is no connected JLink.", "source": "codesearchnet"}
{"code": "def set_acl(self, role, users):\n        \n        acl_updates = [{\"user\": user, \"role\": role} for user in users]\n        r = fapi.update_repository_method_acl(\n            self.namespace, self.name, self.snapshot_id,\n            acl_updates, self.api_url\n        )\n        fapi._check_response_code(r, 200)", "docstring": "Set permissions for this method.\n\nArgs:\nrole (str): Access level\none of {one of \"OWNER\", \"READER\", \"WRITER\", \"NO ACCESS\"}\nusers (list(str)): List of users to give role to", "source": "juraj-google-style"}
{"code": "def combine(self, x):\n    \n    depth = tf.shape(x)[-1]\n    x *= tf.expand_dims(self._nonpadding, -1)\n    ret = tf.unsorted_segment_sum(\n        x, self._flat_indices, num_segments=self._batch * self._length)\n    ret = tf.reshape(ret, [self._batch, self._length, depth])\n    return ret", "docstring": "Return the output from the experts.\n\nWhen one example goes to multiple experts, the outputs are summed.\n\nArgs:\nx: a Tensor with shape [batch, num_experts, expert_capacity, depth]\n\nReturns:\na `Tensor` with shape `[batch, length, depth]", "source": "juraj-google-style"}
{"code": "def are_equal(self, sp1, sp2):\n    for s1 in sp1.keys():\n        spin1 = getattr(s1, 'spin', 0)\n        oxi1 = getattr(s1, 'oxi_state', 0)\n        for s2 in sp2.keys():\n            spin2 = getattr(s2, 'spin', 0)\n            oxi2 = getattr(s2, 'oxi_state', 0)\n            if ((s1.symbol == s2.symbol) and (oxi1 == oxi2) and (spin2 == (- spin1))):\n                break\n        else:\n            return False\n    return True", "docstring": "True if species are exactly the same, i.e., Fe2+ == Fe2+ but not\nFe3+. and the spins are reversed. i.e., spin up maps to spin down,\nand vice versa.\n\nArgs:\nsp1: First species. A dict of {specie/element: amt} as per the\ndefinition in Site and PeriodicSite.\nsp2: Second species. A dict of {specie/element: amt} as per the\ndefinition in Site and PeriodicSite.\n\nReturns:\nBoolean indicating whether species are equal.", "source": "codesearchnet"}
{"code": "def is_subgroup(self, supergroup):\n        \n        warnings.warn(\"This is not fully functional. Only trivial subsets are tested right now. \")\n        return set(self.symmetry_ops).issubset(supergroup.symmetry_ops)", "docstring": "True if this group is a subgroup of the supplied group.\n\nArgs:\nsupergroup (SymmetryGroup): Supergroup to test.\n\nReturns:\nTrue if this group is a subgroup of the supplied group.", "source": "juraj-google-style"}
{"code": "def eval(self, session=None):\n    return self._variable.eval(session=session)", "docstring": "In a session, computes and returns the value of this variable.\n\nThis is not a graph construction method, it does not add ops to the graph.\n\nThis convenience method requires a session where the graph\ncontaining this variable has been launched. If no session is\npassed, the default session is used.  See `tf.compat.v1.Session` for more\ninformation on launching a graph and on sessions.\n\n```python\nv = tf.Variable([1, 2])\ninit = tf.compat.v1.global_variables_initializer()\n\nwith tf.compat.v1.Session() as sess:\nsess.run(init)\n# Usage passing the session explicitly.\nprint(v.eval(sess))\n# Usage with the default session.  The 'with' block\n# above makes 'sess' the default session.\nprint(v.eval())\n```\n\nArgs:\nsession: The session to use to evaluate this variable. If none, the\ndefault session is used.\n\nReturns:\nA numpy `ndarray` with a copy of the value of this variable.", "source": "github-repos"}
{"code": "def run(self, tag=None, output=None, **kwargs):\n    start = datetime.datetime.now()\n    count = 0\n    if tag:\n        tag = Uri(tag)\n        xml_generator = etree.iterparse(self.source, tag=tag.etree)\n    else:\n        xml_generator = etree.iterparse(self.source)\n    i = 0\n    for (event, element) in xml_generator:\n        type_tags = element.findall(_RDF_TYPE_TAG)\n        rdf_types = [el.get(_RES_TAG) for el in type_tags if el.get(_RES_TAG)]\n        if (str(self.filter_val) in rdf_types):\n            pdb.set_trace()\n            count += 1\n        i += 1\n        element.clear()\n    print(\"Found '{}' items in {}\".format(count, (datetime.datetime.now() - start)))", "docstring": "runs the extractor\n\nArgs:\n-----\noutput: ['filepath', None]", "source": "codesearchnet"}
{"code": "def determine_git_ref(self, config):\n        \n        \n        \n        ref_config_keys = 0\n        for i in ['commit', 'tag', 'branch']:\n            if config.get(i):\n                ref_config_keys += 1\n        if ref_config_keys > 1:\n            raise ImportError(\"Fetching remote git sources failed: \"\n                              \"conflicting revisions (e.g. 'commit', 'tag', \"\n                              \"'branch') specified for a package source\")\n\n        \n        \n        if config.get('commit'):\n            ref = config['commit']\n        elif config.get('tag'):\n            ref = config['tag']\n        else:\n            \n            \n            ref = self.git_ls_remote(\n                config['uri'],\n                self.determine_git_ls_remote_ref(config)\n            )\n        if sys.version_info[0] > 2 and isinstance(ref, bytes):\n            return ref.decode()\n        return ref", "docstring": "Determine the ref to be used for 'git checkout'.\n\nArgs:\nconfig (dict): git config dictionary\n\nReturns:\nstr: A commit id or tag name", "source": "juraj-google-style"}
{"code": "def parse(type: Type):\n\n    def decorator(parser):\n        EnvVar.parsers[type] = parser\n        return parser\n    return decorator", "docstring": "Register a parser for a attribute type.\n\nParsers will be used to parse `str` type objects from either\nthe commandline arguments or environment variables.\n\nArgs:\ntype: the type the decorated function will be responsible\nfor parsing a environment variable to.", "source": "codesearchnet"}
{"code": "def parse_column_path(column: str) -> list:\n    nested_columns = []\n    for col in column.split('.'):\n        parts = PATTERN.match(col)\n        if parts:\n            column_name, key = (parts.groups()[0], parts.groups()[1])\n        else:\n            column_name, key = (col, None)\n        if not column_name:\n            raise ValueError(f'Invalid column path: {column}')\n        nested_columns.append((column_name, key))\n    return nested_columns", "docstring": "Parse the column string to extract nested fields and array indices.\n\nArgs:\ncolumn (str): The column string with potential nested fields and array\nindices.\n\nReturns:\nlist: A list of tuples, where each tuple contains the column name and the\nkey/index.", "source": "github-repos"}
{"code": "def __init__(self, input_dataset, target_device, source_device='/cpu:0'):\n    self._input_dataset = input_dataset._apply_debug_options()\n    self._target_device = target_device\n    spec = framework_device.DeviceSpec().from_string(self._target_device)\n    self._is_gpu_target = spec.device_type == 'GPU'\n    self._source_device_string = source_device\n    self._source_device = ops.convert_to_tensor(source_device)\n    wrap_ds_variant = gen_dataset_ops.wrap_dataset_variant(self._input_dataset._variant_tensor)\n\n    @def_function.function()\n    def _init_func():\n        \n        ds_variant = gen_dataset_ops.unwrap_dataset_variant(wrap_ds_variant)\n        resource = gen_dataset_ops.anonymous_iterator(**self._input_dataset._flat_structure)\n        with ops.control_dependencies([gen_dataset_ops.make_iterator(ds_variant, resource)]):\n            return gen_dataset_ops.iterator_to_string_handle(resource)\n    init_func_concrete = _init_func.get_concrete_function()\n\n    @def_function.function()\n    def _remote_init_func():\n        return functional_ops.remote_call(target=self._source_device, args=init_func_concrete.captured_inputs, Tout=[dtypes.string], f=init_func_concrete)\n    self._init_func = _remote_init_func.get_concrete_function()\n    self._init_captured_args = self._init_func.captured_inputs\n\n    @def_function.function(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])\n    def _next_func(string_handle):\n        \n        with ops.device(self._source_device_string):\n            iterator = iterator_ops.Iterator.from_string_handle(string_handle, dataset_ops.get_legacy_output_types(self), dataset_ops.get_legacy_output_shapes(self), dataset_ops.get_legacy_output_classes(self))\n        return structure.to_tensor_list(self.element_spec, iterator.get_next())\n    next_func_concrete = _next_func.get_concrete_function()\n\n    @def_function.function(input_signature=[tensor_spec.TensorSpec([], dtypes.string)], experimental_attributes={'experimental_ints_on_device': True})\n    def _remote_next_func(string_handle):\n        return functional_ops.remote_call(target=self._source_device, args=[string_handle] + next_func_concrete.captured_inputs, Tout=self._input_dataset._flat_types, f=next_func_concrete)\n    self._next_func = _remote_next_func.get_concrete_function()\n    self._next_captured_args = self._next_func.captured_inputs\n\n    @def_function.function(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])\n    def _finalize_func(string_handle):\n        \n        iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(string_handle, **self._input_dataset._flat_structure)\n        with ops.control_dependencies([resource_variable_ops.destroy_resource_op(iterator_resource, ignore_lookup_error=True)]):\n            return array_ops.constant(0, dtypes.int64)\n    finalize_func_concrete = _finalize_func.get_concrete_function()\n\n    @def_function.function(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])\n    def _remote_finalize_func(string_handle):\n        return functional_ops.remote_call(target=self._source_device, args=[string_handle] + finalize_func_concrete.captured_inputs, Tout=[dtypes.int64], f=finalize_func_concrete)\n    self._finalize_func = _remote_finalize_func.get_concrete_function()\n    self._finalize_captured_args = self._finalize_func.captured_inputs\n    g = ops.get_default_graph()\n    self._init_func.add_to_graph(g)\n    self._next_func.add_to_graph(g)\n    self._finalize_func.add_to_graph(g)\n    with ops.device(self._target_device):\n        variant_tensor = gen_dataset_ops.generator_dataset(self._init_captured_args, self._next_captured_args, self._finalize_captured_args, init_func=self._init_func, next_func=self._next_func, finalize_func=self._finalize_func, **self._input_dataset._flat_structure)\n    super(_CopyToDeviceDataset, self).__init__(input_dataset, variant_tensor)", "docstring": "Constructs a _CopyToDeviceDataset.\n\nArgs:\ninput_dataset: `Dataset` to be copied\ntarget_device: The name of the device to which elements would be copied.\nsource_device: Device where input_dataset would be placed.", "source": "github-repos"}
{"code": "def dot(r1, r2):\n    \n    if r1.size != r2.size:\n        raise ValueError(\"Both arguments must have the same input size.\")\n    if r1.deriv != r2.deriv:\n        raise ValueError(\"Both arguments must have the same deriv.\")\n    return r1.x*r2.x + r1.y*r2.y + r1.z*r2.z", "docstring": "Compute the dot product\n\nArguments:\n| ``r1``, ``r2``  -- two :class:`Vector3` objects\n\n(Returns a Scalar)", "source": "juraj-google-style"}
{"code": "def parse(self, values):\n    \n    type_map = {}\n    for name, t in self._hparam_types.items():\n      param_type, _ = t\n      type_map[name] = param_type\n\n    values_map = parse_values(values, type_map)\n    return self.override_from_dict(values_map)", "docstring": "Override existing hyperparameter values, parsing new values from a string.\n\nSee parse_values for more detail on the allowed format for values.\n\nArgs:\nvalues: String.  Comma separated list of `name=value` pairs where 'value'\nmust follow the syntax described above.\n\nReturns:\nThe `HParams` instance.\n\nRaises:\nValueError: If `values` cannot be parsed or a hyperparameter in `values`\ndoesn't exist.", "source": "juraj-google-style"}
{"code": "def get_checkpoint_factories_and_keys(object_names, object_map=None):\n    checkpoint_factory_map = object_identity.ObjectIdentityDictionary()\n    unmapped_registered_savers = collections.defaultdict(dict)\n    for trackable, object_name in object_names.items():\n        object_to_save = util.get_mapped_trackable(trackable, object_map)\n        saver_name = registration.get_registered_saver_name(object_to_save)\n        if saver_name:\n            unmapped_registered_savers[saver_name][object_name] = trackable\n        else:\n            checkpoint_factory_map[trackable] = []\n            for name, saveable_factory in saveable_object_util.saveable_objects_from_trackable(object_to_save).items():\n                key_suffix = saveable_compat.get_saveable_name(object_to_save) or name\n                checkpoint_key = trackable_utils.checkpoint_key(object_name, key_suffix)\n                if not saveable_compat.force_checkpoint_conversion_enabled():\n                    name = key_suffix\n                checkpoint_factory_map[trackable].append(_CheckpointFactoryData(factory=saveable_factory, name=name, checkpoint_key=checkpoint_key))\n    return (checkpoint_factory_map, unmapped_registered_savers)", "docstring": "Gets a map of saveable factories and corresponding checkpoint keys.\n\nArgs:\nobject_names: a dictionary that maps `Trackable` objects to auto-generated\nstring names.\nobject_map: a dictionary mapping `Trackable` to copied `Trackable` objects.\nThe copied objects are generated from `Trackable.\n_export_to_saved_model_graph()` which copies the object into another\ngraph. Generally only resource objects (e.g. Variables, Tables) will be\nin this map.\n\nReturns:\nA tuple of (\nDictionary mapping trackable -> list of _CheckpointFactoryData,\nDictionary mapping registered saver name -> {object name -> trackable})", "source": "github-repos"}
{"code": "def create_function(self, vpc_config):\n        \n        zip_file = 'lambda-holder.zip'\n        with zipfile.ZipFile(zip_file, mode='w') as zipped:\n            zipped.writestr('index.py', 'print \"Hello world\"')\n\n        contents = ''\n        with open('lambda-holder.zip', 'rb') as openfile:\n            contents = openfile.read()\n\n        LOG.info('Creating lambda function: %s', self.app_name)\n\n        try:\n            self.lambda_client.create_function(\n                Environment=self.lambda_environment,\n                FunctionName=self.app_name,\n                Runtime=self.runtime,\n                Role=self.role_arn,\n                Handler=self.handler,\n                Code={'ZipFile': contents},\n                Description=self.description,\n                Timeout=int(self.timeout),\n                MemorySize=int(self.memory),\n                Publish=False,\n                VpcConfig=vpc_config,\n                Tags={'app_group': self.group,\n                      'app_name': self.app_name})\n        except boto3.exceptions.botocore.exceptions.ClientError as error:\n            if 'CreateNetworkInterface' in error.response['Error']['Message']:\n                message = '{0} is missing \"ec2:CreateNetworkInterface\"'.format(self.role_arn)\n                LOG.critical(message)\n                raise SystemExit(message)\n\n            raise\n\n        LOG.info(\"Successfully created Lambda function and alias\")", "docstring": "Create lambda function, configures lambda parameters.\n\nWe need to upload non-zero zip when creating function. Uploading\nhello_world python lambda function since AWS doesn't care which\nexecutable is in ZIP.\n\nArgs:\nvpc_config (dict): Dictionary of SubnetIds and SecurityGroupsIds for using\na VPC in lambda", "source": "juraj-google-style"}
{"code": "def submit_files(self, halt_on_error=True):\n    if (self.halt_on_file_error is not None):\n        halt_on_error = self.halt_on_file_error\n    upload_status = []\n    for (xid, content_data) in self._files.items():\n        del self._files[xid]\n        status = True\n        if (self.debug and (xid in self.saved_xids)):\n            self.tcex.log.debug('skipping previously saved file {}.'.format(xid))\n            continue\n        content = content_data.get('fileContent')\n        if callable(content):\n            content = content_data.get('fileContent')(xid)\n        if (content is None):\n            upload_status.append({'uploaded': False, 'xid': xid})\n            self.tcex.log.warning('File content was null for xid {}.'.format(xid))\n            continue\n        if (content_data.get('type') == 'Document'):\n            api_branch = 'documents'\n        elif (content_data.get('type') == 'Report'):\n            api_branch = 'reports'\n        url = '/v2/groups/{}/{}/upload'.format(api_branch, xid)\n        headers = {'Content-Type': 'application/octet-stream'}\n        params = {'owner': self._owner}\n        r = self.submit_file_content('POST', url, content, headers, params, halt_on_error)\n        if (r.status_code == 401):\n            self.tcex.log.info('Received 401 status code using POST. Trying PUT to update.')\n            r = self.submit_file_content('PUT', url, content, headers, params, halt_on_error)\n        self.tcex.log.debug('{} Upload URL: {}.'.format(content_data.get('type'), r.url))\n        if (not r.ok):\n            status = False\n            self.tcex.handle_error(585, [r.status_code, r.text], halt_on_error)\n        elif self.debug:\n            self.saved_xids.append(xid)\n        self.tcex.log.info('Status {} for file upload with xid {}.'.format(r.status_code, xid))\n        upload_status.append({'uploaded': status, 'xid': xid})\n    return upload_status", "docstring": "Submit Files for Documents and Reports to ThreatConnect API.\n\nCritical Errors\n\n* There is insufficient document storage allocated to this account.\n\nArgs:\nhalt_on_error (bool, default:True): If True any exception will raise an error.\n\nReturns:\ndict: The upload status for each xid.", "source": "codesearchnet"}
{"code": "def search(cls, session, queries):\n    return super(Conversations, cls).search(session, queries, SearchConversation)", "docstring": "Search for a conversation given a domain.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nqueries (helpscout.models.Domain or iter): The queries for the\ndomain. If a ``Domain`` object is provided, it will simply be\nreturned. Otherwise, a ``Domain`` object will be generated\nfrom the complex queries. In this case, the queries should\nconform to the interface in\n:func:`helpscout.domain.Domain.from_tuple`.\n\nReturns:\nRequestPaginator(output_type=helpscout.models.SearchCustomer):\nSearchCustomer iterator.", "source": "codesearchnet"}
{"code": "def __init__(self, identifier):\n    \n    super(Volume, self).__init__()\n    self.identifier = identifier\n    self._attributes = {}\n    self._extents = []\n    self._is_parsed = False", "docstring": "Initializes a volume.\n\nArgs:\nidentifier (str): identifier of the attribute within the volume.", "source": "juraj-google-style"}
{"code": "def AddEventSource(self, event_source):\n    \n    self._RaiseIfNotWritable()\n\n    self._AddAttributeContainer(\n        self._CONTAINER_TYPE_EVENT_SOURCE, event_source)", "docstring": "Adds an event source.\n\nArgs:\nevent_source (EventSource): event source.\n\nRaises:\nIOError: when the storage file is closed or read-only.\nOSError: when the storage file is closed or read-only.", "source": "juraj-google-style"}
{"code": "def stacked_bi_rnn(units: tf.Tensor, n_hidden_list: List, cell_type='gru', seq_lengths=None, use_peepholes=False, name='RNN_layer'):\n    for (n, n_hidden) in enumerate(n_hidden_list):\n        with tf.variable_scope(((name + '_') + str(n))):\n            if (cell_type == 'gru'):\n                forward_cell = tf.nn.rnn_cell.GRUCell(n_hidden)\n                backward_cell = tf.nn.rnn_cell.GRUCell(n_hidden)\n            elif (cell_type == 'lstm'):\n                forward_cell = tf.nn.rnn_cell.LSTMCell(n_hidden, use_peepholes=use_peepholes)\n                backward_cell = tf.nn.rnn_cell.LSTMCell(n_hidden, use_peepholes=use_peepholes)\n            else:\n                raise RuntimeError('cell_type must be either gru or lstm')\n            ((rnn_output_fw, rnn_output_bw), (fw, bw)) = tf.nn.bidirectional_dynamic_rnn(forward_cell, backward_cell, units, dtype=tf.float32, sequence_length=seq_lengths)\n            units = tf.concat([rnn_output_fw, rnn_output_bw], axis=2)\n            if (cell_type == 'gru'):\n                last_units = tf.concat([fw, bw], axis=1)\n            else:\n                ((c_fw, h_fw), (c_bw, h_bw)) = (fw, bw)\n                c = tf.concat([c_fw, c_bw], axis=1)\n                h = tf.concat([h_fw, h_bw], axis=1)\n                last_units = (h, c)\n    return (units, last_units)", "docstring": "Stackted recurrent neural networks GRU or LSTM\n\nArgs:\nunits: a tensorflow tensor with dimensionality [None, n_tokens, n_features]\nn_hidden_list: list with number of hidden units at the ouput of each layer\nseq_lengths: length of sequences for different length sequences in batch\ncan be None for maximum length as a length for every sample in the batch\ncell_type: 'lstm' or 'gru'\nuse_peepholes: whether to use peephole connections (only 'lstm' case affected)\nname: what variable_scope to use for the network parameters\nReturns:\nunits: tensor at the output of the last recurrent layer\nwith dimensionality [None, n_tokens, n_hidden_list[-1]]\nlast_units: tensor of last hidden states for GRU and tuple\nof last hidden stated and last cell states for LSTM\ndimensionality of cell states and hidden states are\nsimilar and equal to [B x 2 * H], where B - batch\nsize and H is number of hidden units", "source": "codesearchnet"}
{"code": "def __init__(self, job_context, shard_state):\n    \n    self.job_context = job_context\n    self.id = shard_state.shard_id\n    self.number = shard_state.shard_number\n    self.attempt = shard_state.retries + 1\n    self._state = shard_state", "docstring": "Init.\n\nThe signature of __init__ is subject to change.\n\nRead only properties:\njob_context: JobContext object.\nid: str. of format job_id-shard_number.\nnumber: int. shard number. 0 indexed.\nattempt: int. The current attempt at executing this shard.\nStarting at 1.\n\nArgs:\njob_context: map_job.JobConfig.\nshard_state: model.ShardState.", "source": "juraj-google-style"}
{"code": "def proxy_num(self, protocol=None):\n        \n        http_num = len(self.proxies['http'])\n        https_num = len(self.proxies['https'])\n        if protocol == 'http':\n            return http_num\n        elif protocol == 'https':\n            return https_num\n        else:\n            return http_num + https_num", "docstring": "Get the number of proxies in the pool\n\nArgs:\nprotocol (str, optional): 'http' or 'https' or None. (default None)\n\nReturns:\nIf protocol is None, return the total number of proxies, otherwise,\nreturn the number of proxies of corresponding protocol.", "source": "juraj-google-style"}
{"code": "def get_model_filepath(self, infodict):\n    u = infodict['uniprot_ac']\n    original_filename = '{}_{}_{}_{}'.format(infodict['from'], infodict['to'], infodict['template'], infodict['coordinate_id'])\n    file_path = op.join(self.metadata_dir, u[:2], u[2:4], u[4:6], 'swissmodel', '{}.pdb'.format(original_filename))\n    if op.exists(file_path):\n        return file_path\n    else:\n        log.warning('{}: no file {} found for model'.format(u, file_path))\n        return None", "docstring": "Get the path to the homology model using information from the index dictionary for a single model.\n\nExample: use self.get_models(UNIPROT_ID) to get all the models, which returns a list of dictionaries.\nUse one of those dictionaries as input to this function to get the filepath to the model itself.\n\nArgs:\ninfodict (dict): Information about a model from get_models\n\nReturns:\nstr: Path to homology model", "source": "codesearchnet"}
{"code": "def is_array_str(x: Any) -> bool:\n    if isinstance(x, (bytes, str)):\n        return True\n    elif is_array(x):\n        return is_dtype_str(x.dtype)\n    else:\n        return False", "docstring": "Returns True if the given array is a `str` array.\n\nNote: Also returns True for scalar `str`, `bytes` values. For compatibility\nwith `tensor.numpy()` which returns `bytes`\n\nArgs:\nx: The array to test\n\nReturns:\nTrue or False", "source": "github-repos"}
{"code": "def set_authentication_profile(profile=None, deploy=False):\n    \n\n    if not profile:\n        raise CommandExecutionError(\"Profile name option must not be none.\")\n\n    ret = {}\n\n    query = {'type': 'config',\n             'action': 'set',\n             'xpath': '/config/devices/entry[@name=\\'localhost.localdomain\\']/deviceconfig/system/'\n                      'authentication-profile',\n             'element': '<authentication-profile>{0}</authentication-profile>'.format(profile)}\n\n    ret.update(__proxy__['panos.call'](query))\n\n    if deploy is True:\n        ret.update(commit())\n\n    return ret", "docstring": "Set the authentication profile of the Palo Alto proxy minion. A commit will be required before this is processed.\n\nCLI Example:\n\nArgs:\nprofile (str): The name of the authentication profile to set.\n\ndeploy (bool): If true then commit the full candidate configuration, if false only set pending change.\n\n.. code-block:: bash\n\nsalt '*' panos.set_authentication_profile foo\nsalt '*' panos.set_authentication_profile foo deploy=True", "source": "juraj-google-style"}
{"code": "def _send_request(self, url, method=\"get\", data=None, extra_headers=None):\n        \n        headers = {'Content-type': 'application/json'}\n        if isinstance(extra_headers, dict):\n            headers.update(extra_headers)\n\n        if not data or \"password\" not in data:\n            logger.debug(\"Sending {method} request to {url} with data {data}\".format(\n                method=method.upper(), url=url, data=data)\n            )\n        r = self.session.request(method, url, headers=headers, data=data)\n        r.raise_for_status()\n        return r.json()", "docstring": "Performs a given request and returns a json object\n\nArgs:\nurl (str): URL of the request\nmethod (str): Any of \"get\", \"post\", \"delete\"\ndata (any): Possible extra data to send with the request\nextra_headers (dict): Possible extra headers to send along in the request\nReturns:\ndict", "source": "juraj-google-style"}
{"code": "def assert_almost_eq(arr_test, arr_target, thresh=1E-11):\n    r\n    if util_arg.NO_ASSERTS:\n        return\n    import utool as ut\n    arr1 = np.array(arr_test)\n    arr2 = np.array(arr_target)\n    passed, error = ut.almost_eq(arr1, arr2, thresh, ret_error=True)\n    if not np.all(passed):\n        failed_xs = np.where(np.logical_not(passed))\n        failed_error = error.take(failed_xs)\n        failed_arr_test = arr1.take(failed_xs)\n        failed_arr_target = arr2.take(failed_xs)\n\n        msg_list = [\n            'FAILED ASSERT ALMOST EQUAL',\n            '  * failed_xs = %r' % (failed_xs,),\n            '  * failed_error = %r' % (failed_error,),\n            '  * failed_arr_test   = %r' % (failed_arr_test,),\n            '  * failed_arr_target = %r' % (failed_arr_target,),\n        ]\n        msg = '\\n'.join(msg_list)\n        raise AssertionError(msg)\n    return error", "docstring": "r\"\"\"\nArgs:\narr_test (ndarray or list):\narr_target (ndarray or list):\nthresh (scalar or ndarray or list):", "source": "juraj-google-style"}
{"code": "def make_anchor(file_path: pathlib.Path, offset: int, width: int, context_width: int, metadata, encoding: str='utf-8', handle=None):\n\n    @contextmanager\n    def get_handle():\n        if (handle is None):\n            with file_path.open(mode='rt', encoding=encoding) as fp:\n                (yield fp)\n        else:\n            (yield handle)\n    with get_handle() as fp:\n        context = _make_context(fp, offset, width, context_width)\n    return Anchor(file_path=file_path, encoding=encoding, context=context, metadata=metadata)", "docstring": "Construct a new `Anchor`.\n\nArgs:\nfile_path: The absolute path to the target file for the anchor.\noffset: The offset of the anchored text in codepoints in `file_path`'s\ncontents.\nwidth: The width in codepoints of the anchored text.\ncontext_width: The width in codepoints of context on either side of the\nanchor.\nmetadata: The metadata to attach to the anchor. Must be json-serializeable.\nencoding: The encoding of the contents of `file_path`.\nhandle: If not `None`, this is a file-like object the contents of which\nare used to calculate the context of the anchor. If `None`, then\nthe file indicated by `file_path` is opened instead.\n\nRaises:\nValueError: `width` characters can't be read at `offset`.\nValueError: `file_path` is not absolute.", "source": "codesearchnet"}
{"code": "def secure_channel(target, credentials, options=None, *, loop=None, executor=None,\n                   standalone_pool_for_streaming=False):\n    \n    return Channel(_grpc.secure_channel(target, credentials, options),\n                   loop, executor, standalone_pool_for_streaming)", "docstring": "Creates a secure Channel to a server.\n\nArgs:\ntarget: The server address.\ncredentials: A ChannelCredentials instance.\noptions: An optional list of key-value pairs (channel args in gRPC runtime)\nto configure the channel.\n\nReturns:\nA Channel object.", "source": "juraj-google-style"}
{"code": "def DeserializeExclusiveData(self, reader):\n        \n        self.Nonce = reader.ReadUInt32()\n        self.Type = TransactionType.MinerTransaction", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def AddBlob(self, blob_id, length):\n    \n    if self.finalized and length > 0:\n      raise IOError(\"Can't add blobs to finalized BlobImage\")\n\n    self.content_dirty = True\n    self.index.seek(0, 2)\n    self.index.write(blob_id.AsBytes())\n    self.size += length\n\n    if length < self.chunksize:\n      self.finalized = True", "docstring": "Add another blob to this image using its hash.\n\nOnce a blob is added that is smaller than the chunksize we finalize the\nfile, since handling adding more blobs makes the code much more complex.\n\nArgs:\nblob_id: rdf_objects.BlobID object.\nlength: int length of blob\n\nRaises:\nIOError: if blob has been finalized.", "source": "juraj-google-style"}
{"code": "def forward(self, encoder_hidden_states):\n    hidden_states = encoder_hidden_states.transpose(1, -1)\n    for layer in self.conv_layers:\n        hidden_states = layer(hidden_states)\n    hidden_states = self.linear(hidden_states.transpose(1, -1)).squeeze(-1)\n    if not self.training:\n        hidden_states = torch.clamp(torch.round(hidden_states.exp() - self.log_domain_offset), min=0).long()\n    return hidden_states", "docstring": "Args:\nhidden_states (`torch.Tensor` of shape `(batch_size, max_text_length, input_dim)`):\nBatch of input sequences.\npadding_masks (`torch.ByteTensor` of shape `(batch_size, max_text_length)`, *optional*):\nBatch of masks indicating padded part.\n\nReturns:\n`torch.Tensor`: Batch of predicted durations in log domain `(batch_size, max_text_length)`.", "source": "github-repos"}
{"code": "def order_by(self, *args):\n        \n        clone = copy.deepcopy(self)\n        clone.adapter.ordered = True\n        if args:\n            clone.adapter.order_by(*args)\n        return clone", "docstring": "Applies query ordering.\n\nArgs:\n**args: Order by fields names.\nDefaults to ascending, prepend with hypen (-) for desecending ordering.\n\nReturns:\nSelf. Queryset object.\n\nExamples:\n>>> Person.objects.order_by('-name', 'join_date')", "source": "juraj-google-style"}
{"code": "def circuit_to_image(circ: Circuit,\n                     qubits: Qubits = None) -> PIL.Image:   \n    \n    latex = circuit_to_latex(circ, qubits)\n    img = render_latex(latex)\n    return img", "docstring": "Create an image of a quantum circuit.\n\nA convenience function that calls circuit_to_latex() and render_latex().\n\nArgs:\ncirc:       A quantum Circuit\nqubits:     Optional qubit list to specify qubit order\n\nReturns:\nReturns: A PIL Image (Use img.show() to display)\n\nRaises:\nNotImplementedError: For unsupported gates.\nOSError: If an external dependency is not installed.", "source": "juraj-google-style"}
{"code": "def update_labels(self, node_name: str, labels: dict):\n    if (not self._manager):\n        raise RuntimeError('Only the Swarm manager node can update node details.')\n    node_spec = {'Availability': 'active', 'Name': node_name, 'Role': 'manager', 'Labels': labels}\n    node = self._client.nodes.get(node_name)\n    node.update(node_spec)", "docstring": "Update label of a node.\n\nArgs:\nnode_name (string): Name of the node.\nlabels (dict): Label to add to the node", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    display_name = parser_mediator.GetDisplayName()\n\n    if not zipfile.is_zipfile(file_object):\n      raise errors.UnableToParseFile(\n          '[{0:s}] unable to parse file: {1:s} with error: {2:s}'.format(\n              self.NAME, display_name, 'Not a Zip file.'))\n\n    try:\n      zip_file = zipfile.ZipFile(file_object, 'r', allowZip64=True)\n      self._ProcessZipFileWithPlugins(parser_mediator, zip_file)\n      zip_file.close()\n\n    \n    \n    except (zipfile.BadZipfile, struct.error) as exception:\n      raise errors.UnableToParseFile(\n          '[{0:s}] unable to parse file: {1:s} with error: {2!s}'.format(\n              self.NAME, display_name, exception))", "docstring": "Parses a compound ZIP file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def shakespeare(chunk_size):\n  \n  file_name = maybe_download('http:\n                             'shakespear.txt')\n  with open(file_name) as f:\n    shakespeare_full = f.read()\n\n  \n  length = (len(shakespeare_full) \n  if length < len(shakespeare_full):\n    shakespeare_full = shakespeare_full[:length]\n  arr = np.array([convert_to_int(c) for c in shakespeare_full])[\n      0:len(shakespeare_full) / chunk_size * chunk_size]\n  return arr.reshape((len(arr) / chunk_size, chunk_size))", "docstring": "Downloads Shakespeare, converts it into ASCII codes and chunks it.\n\nArgs:\nchunk_size: The dataset is broken down so that it is shaped into batches x\nchunk_size.\nReturns:\nA numpy array of ASCII codes shaped into batches x chunk_size.", "source": "juraj-google-style"}
{"code": "def resize_file(fobj, diff, BUFFER_SIZE=2 ** 16):\n    \n\n    fobj.seek(0, 2)\n    filesize = fobj.tell()\n\n    if diff < 0:\n        if filesize + diff < 0:\n            raise ValueError\n        \n        fobj.truncate(filesize + diff)\n    elif diff > 0:\n        try:\n            while diff:\n                addsize = min(BUFFER_SIZE, diff)\n                fobj.write(b\"\\x00\" * addsize)\n                diff -= addsize\n            fobj.flush()\n        except IOError as e:\n            if e.errno == errno.ENOSPC:\n                \n                \n                \n                \n                fobj.truncate(filesize)\n            raise", "docstring": "Resize a file by `diff`.\n\nNew space will be filled with zeros.\n\nArgs:\nfobj (fileobj)\ndiff (int): amount of size to change\nRaises:\nIOError", "source": "juraj-google-style"}
{"code": "def fn(x: int) -> int:\n    return x", "docstring": "Test function\n\nArgs:\nx: The input\n\n\nReturns:\nThe output", "source": "github-repos"}
{"code": "def trace_region_count(self):\n    cmd = enums.JLinkTraceCommand.GET_NUM_REGIONS\n    data = ctypes.c_uint32(0)\n    res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n    if (res == 1):\n        raise errors.JLinkException('Failed to get trace region count.')\n    return data.value", "docstring": "Retrieves a count of the number of available trace regions.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\nCount of the number of available trace regions.", "source": "codesearchnet"}
{"code": "def clear(self, color: Tuple[(int, int, int)]) -> None:\n    lib.TCOD_image_clear(self.image_c, color)", "docstring": "Fill this entire Image with color.\n\nArgs:\ncolor (Union[Tuple[int, int, int], Sequence[int]]):\nAn (r, g, b) sequence or Color instance.", "source": "codesearchnet"}
{"code": "def VerifyServerPEM(self, http_object):\n    try:\n        server_pem = http_object.data\n        server_url = http_object.url\n        if (b'BEGIN CERTIFICATE' in server_pem):\n            server_certificate = rdf_crypto.RDFX509Cert(server_pem)\n            self.communicator.LoadServerCertificate(server_certificate=server_certificate, ca_certificate=self.ca_cert)\n            logging.info('Server PEM re-keyed.')\n            return True\n    except Exception as e:\n        logging.info('Unable to verify server certificate at %s: %s', server_url, e)\n        return False", "docstring": "Check the server PEM for validity.\n\nThis is used to determine connectivity to the server. Sometimes captive\nportals return a valid HTTP status, but the data is corrupted.\n\nArgs:\nhttp_object: The response received from the server.\n\nReturns:\nTrue if the response contains a valid server certificate.", "source": "codesearchnet"}
{"code": "def from_maildir(self, codes: str) -> FrozenSet[Flag]:\n        \n        flags = set()\n        for code in codes:\n            if code == ',':\n                break\n            to_sys = self._to_sys.get(code)\n            if to_sys is not None:\n                flags.add(to_sys)\n            else:\n                to_kwd = self._to_kwd.get(code)\n                if to_kwd is not None:\n                    flags.add(to_kwd)\n        return frozenset(flags)", "docstring": "Return the set of IMAP flags that correspond to the letter codes.\n\nArgs:\ncodes: The letter codes to map.", "source": "juraj-google-style"}
{"code": "def problem(problem_name, **kwargs):\n  \n  spec = parse_problem_name(problem_name)\n  try:\n    return Registries.problems[spec.base_name](\n        was_copy=spec.was_copy, was_reversed=spec.was_reversed)\n  except KeyError:\n    \n    return env_problem(problem_name, **kwargs)", "docstring": "Get possibly copied/reversed problem in `base_registry` or `env_registry`.\n\nArgs:\nproblem_name: string problem name. See `parse_problem_name`.\n**kwargs: forwarded to env problem's initialize method.\n\nReturns:\npossibly reversed/copied version of base problem registered in the given\nregistry.", "source": "juraj-google-style"}
{"code": "def encode(self, s):\n    return [(int(w) + self._num_reserved_ids) for w in s.split()]", "docstring": "Transform a human-readable string into a sequence of int ids.\n\nThe ids should be in the range [num_reserved_ids, vocab_size). Ids [0,\nnum_reserved_ids) are reserved.\n\nEOS is not appended.\n\nArgs:\ns: human-readable string to be converted.\n\nReturns:\nids: list of integers", "source": "codesearchnet"}
{"code": "def napalm_cli(task: Task, commands: List[str]) -> Result:\n    \n    device = task.host.get_connection(\"napalm\", task.nornir.config)\n    result = device.cli(commands)\n    return Result(host=task.host, result=result)", "docstring": "Run commands on remote devices using napalm\n\nArguments:\ncommands: commands to execute\n\nReturns:\nResult object with the following attributes set:\n* result (``dict``): result of the commands execution", "source": "juraj-google-style"}
{"code": "def increment_id(cls, _id: ObjectId, inc: int) -> ObjectId:\n    id_number = _ObjectIdHelper.id_to_int(_id)\n    new_number = id_number + inc\n    if new_number < 0 or new_number >= 1 << 96:\n        raise ValueError('invalid incremental, inc value must be within [%s, %s)' % (0 - id_number, 1 << 96 - id_number))\n    return _ObjectIdHelper.int_to_id(new_number)", "docstring": "Increment object_id binary value by inc value and return new object id.\n\nArgs:\n_id: The `_id` to change.\ninc(int): The incremental int value to be added to `_id`.\n\nReturns:\n`_id` incremented by `inc` value", "source": "github-repos"}
{"code": "def infer(msg, mrar=False):\n    df = common.df(msg)\n    if common.allzeros(msg):\n        return 'EMPTY'\n    if (df == 17):\n        tc = common.typecode(msg)\n        if (1 <= tc <= 4):\n            return 'BDS08'\n        if (5 <= tc <= 8):\n            return 'BDS06'\n        if (9 <= tc <= 18):\n            return 'BDS05'\n        if (tc == 19):\n            return 'BDS09'\n        if (20 <= tc <= 22):\n            return 'BDS05'\n        if (tc == 28):\n            return 'BDS61'\n        if (tc == 29):\n            return 'BDS62'\n        if (tc == 31):\n            return 'BDS65'\n    IS10 = bds10.is10(msg)\n    IS17 = bds17.is17(msg)\n    IS20 = bds20.is20(msg)\n    IS30 = bds30.is30(msg)\n    IS40 = bds40.is40(msg)\n    IS50 = bds50.is50(msg)\n    IS60 = bds60.is60(msg)\n    IS44 = bds44.is44(msg)\n    IS45 = bds45.is45(msg)\n    if mrar:\n        allbds = np.array(['BDS10', 'BDS17', 'BDS20', 'BDS30', 'BDS40', 'BDS44', 'BDS45', 'BDS50', 'BDS60'])\n        mask = [IS10, IS17, IS20, IS30, IS40, IS44, IS45, IS50, IS60]\n    else:\n        allbds = np.array(['BDS10', 'BDS17', 'BDS20', 'BDS30', 'BDS40', 'BDS50', 'BDS60'])\n        mask = [IS10, IS17, IS20, IS30, IS40, IS50, IS60]\n    bds = ','.join(sorted(allbds[mask]))\n    if (len(bds) == 0):\n        return None\n    else:\n        return bds", "docstring": "Estimate the most likely BDS code of an message.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\nmrar (bool): Also infer MRAR (BDS 44) and MHR (BDS 45). Defaults to False.\n\nReturns:\nString or None: BDS version, or possible versions, or None if nothing matches.", "source": "codesearchnet"}
{"code": "def wrap(access_pyxb, read_only=False):\n    \n    w = AccessPolicyWrapper(access_pyxb)\n    yield w\n    if not read_only:\n        w.get_normalized_pyxb()", "docstring": "Work with the AccessPolicy in a SystemMetadata PyXB object.\n\nArgs:\naccess_pyxb : AccessPolicy PyXB object\nThe AccessPolicy to modify.\n\nread_only: bool\nDo not update the wrapped AccessPolicy.\n\nWhen only a single AccessPolicy operation is needed, there's no need to use this\ncontext manager. Instead, use the generated context manager wrappers.", "source": "juraj-google-style"}
{"code": "def FileEntryExistsByPathSpec(self, path_spec):\n    \n    location = getattr(path_spec, 'location', None)\n\n    if (location is None or\n        not location.startswith(self.LOCATION_ROOT)):\n      return False\n\n    if len(location) == 1:\n      return True\n\n    try:\n      self._tar_file.getmember(location[1:])\n      return True\n    except KeyError:\n      pass\n\n    \n    for name in iter(self._tar_file.getnames()):\n      \n      \n      if name.startswith(location[1:]):\n        return True\n\n    return False", "docstring": "Determines if a file entry for a path specification exists.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nbool: True if the file entry exists.", "source": "juraj-google-style"}
{"code": "def private_map(self):\n    return self._private_map", "docstring": "A map from parents to symbols that should not be included at all.\n\nThis map can be edited, but it should not be edited once traversal has\nbegun.\n\nReturns:\nThe map marking symbols to not include.", "source": "github-repos"}
{"code": "def __init__(self, match=None, qps=None, user_qps=None, daily=None,\n               analytics_id=None):\n    \n    _CheckType(match, basestring, 'match')\n    _CheckType(qps, int, 'qps')\n    _CheckType(user_qps, int, 'user_qps')\n    _CheckType(daily, int, 'daily')\n    _CheckType(analytics_id, basestring, 'analytics_id')\n\n    self.__match = match\n    self.__qps = qps\n    self.__user_qps = user_qps\n    self.__daily = daily\n    self.__analytics_id = analytics_id", "docstring": "Constructor for ApiFrontEndLimitRule.\n\nArgs:\nmatch: string, the matching rule that defines this traffic segment.\nqps: int, the aggregate QPS for this segment.\nuser_qps: int, the per-end-user QPS for this segment.\ndaily: int, the aggregate daily maximum for this segment.\nanalytics_id: string, the project ID under which traffic for this segment\nwill be logged.", "source": "juraj-google-style"}
{"code": "def __init__(self, username, email, manager):\n        \n        \n        super(User, self).__init__(manager)\n\n        \n        self.username = username\n        self.email = email", "docstring": "Initialize a user.\n\nArgs:\nusername (str): The user's username.\nemail (str): The user's email.\nmanager (:class:`saltant.models.user.UserManager`):\nThe manager which spawned this user instance.", "source": "juraj-google-style"}
{"code": "def do_usufy(self, query, **kwargs):\n        \n        \n        try:\n            self.wrapperAPI = TwitterAPIWrapper()\n\n            results = self.wrapperAPI.get_user(query)\n\n            for r in results:\n                \n                aux = {}\n                aux[\"type\"]=\"i3visio.uri\"\n                alias=r[\"value\"].split(' - ')[1]\n                aux[\"value\"]= self.createURL(word=alias, mode=\"usufy\")\n                aux[\"attributes\"]= []\n                r[\"attributes\"].append(aux)\n\n        \n        except Exception, e:\n            return super(Twitter, self).do_usufy(query, **kwargs)", "docstring": "Verifying a usufy query in this platform.\n\nThis might be redefined in any class inheriting from Platform.\n\nArgs:\n-----\nquery: The element to be searched.\n\nReturn:\n-------\nA list of elements to be appended.", "source": "juraj-google-style"}
{"code": "def CreateDataTypeMapByType(cls, data_type_definition):\n    \n    data_type_map_class = cls._MAP_PER_DEFINITION.get(\n        data_type_definition.TYPE_INDICATOR, None)\n    if not data_type_map_class:\n      return None\n\n    return data_type_map_class(data_type_definition)", "docstring": "Creates a specific data type map by type indicator.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\n\nReturns:\nDataTypeMap: data type map or None if the date type definition\nis not available.", "source": "juraj-google-style"}
{"code": "def append_item(self, item):\n        \n        did_remove = self.remove_exit()\n        item.menu = self\n        self.items.append(item)\n        if did_remove:\n            self.add_exit()", "docstring": "Add an item to the end of the menu before the exit item.\n\nArgs:\nitem (MenuItem): The item to be added.", "source": "juraj-google-style"}
{"code": "def persist_perf(run, session, svg_path):\n    \n    from benchbuild.utils import schema as s\n\n    with open(svg_path, 'r') as svg_file:\n        svg_data = svg_file.read()\n        session.add(\n            s.Metadata(name=\"perf.flamegraph\", value=svg_data, run_id=run.id))", "docstring": "Persist the flamegraph in the database.\n\nThe flamegraph exists as a SVG image on disk until we persist it in the\ndatabase.\n\nArgs:\nrun: The run we attach these perf measurements to.\nsession: The db transaction we belong to.\nsvg_path: The path to the SVG file we want to store.", "source": "juraj-google-style"}
{"code": "def merge_json_fhir_object_into_proto(json_value: Dict[str, Any], target: message.Message, *, validate: bool=True, default_timezone: str=_primitive_time_utils.SIMPLE_ZULU) -> None:\n    parser = _json_parser.JsonParser.json_parser_with_default_timezone(_PRIMITIVE_HANDLER, default_timezone=default_timezone)\n    parser.merge_value(json_value, target)\n    if validate:\n        resource_validation.validate_resource(target, _PRIMITIVE_HANDLER)", "docstring": "Merges the provided json_value object into a target Message.\n\nArgs:\njson_value: The parsed JSON object to merge into target.\ntarget: The Message instance to merge raw_json into.\nvalidate: A Boolean value indicating if validation should be performed on\nthe resultant Message. Validation takes the form of ensuring that basic\nchecks such as cardinality guarantees, required field adherence, etc. are\nmet. Defaults to True.\ndefault_timezone: A string specifying the timezone string to use for time-\nlike FHIR data during parsing. Defaults to 'Z' for UTC.\n\nRaises:\nfhir_errors.InvalidFhirError: In the event that validation fails after\nparsing.", "source": "github-repos"}
{"code": "def __init__(self, action_meanings):\n    \n    self.action_meanings = action_meanings\n    self._wait = True\n    \n    self.action_space = None\n    self._last_step_tuples = None\n    self.action_meanings = action_meanings\n    self.name_to_action_num = {name: num for num, name in\n                               enumerate(self.action_meanings)}", "docstring": "Constructor for PlayerEnv.\n\nArgs:\naction_meanings: list of strings indicating action names. Can be obtain by\n>>> env = gym.make(\"PongNoFrameskip-v4\")  # insert your game name\n>>> env.unwrapped.get_action_meanings()\nSee gym AtariEnv get_action_meanings() for more details.", "source": "juraj-google-style"}
{"code": "def run(self, args):\n    jlink = self.create_jlink(args)\n    if args.downgrade:\n        if (not jlink.firmware_newer()):\n            print('DLL firmware is not older than J-Link firmware.')\n        else:\n            jlink.invalidate_firmware()\n            try:\n                jlink.update_firmware()\n            except pylink.JLinkException as e:\n                jlink = self.create_jlink(args)\n            print(('Firmware Downgraded: %s' % jlink.firmware_version))\n    elif args.upgrade:\n        if (not jlink.firmware_outdated()):\n            print('DLL firmware is not newer than J-Link firmware.')\n        else:\n            try:\n                jlink.update_firmware()\n            except pylink.JLinkException as e:\n                jlink = self.create_jlink(args)\n            print(('Firmware Updated: %s' % jlink.firmware_version))\n    return None", "docstring": "Runs the firmware command.\n\nArgs:\nself (FirmwareCommand): the ``FirmwareCommand`` instance\nargs (Namespace): arguments to parse\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def extract_all(self):\n    (longmin, longmax, latmin, latmax) = self.Boundary()\n    (sample_min, sample_max) = map(int, (self.SAMPLE_FIRST_PIXEL, self.SAMPLE_LAST_PIXEL))\n    (line_min, line_max) = map(int, (self.LINE_FIRST_PIXEL, self.LINE_LAST_PIXEL))\n    X = np.array(map(self.long_id, range(sample_min, (sample_max + 1), 1)))\n    Y = np.array(map(self.lat_id, range(line_min, (line_max + 1), 1)))\n    for (i, line) in enumerate(range(int(line_min), (int(line_max) + 1))):\n        start = (((line - 1) * int(self.SAMPLE_LAST_PIXEL)) + sample_min)\n        chunk_size = int((sample_max - sample_min))\n        Za = self.array(chunk_size, start, self.bytesize)\n        if (i == 0):\n            Z = Za\n        else:\n            Z = np.vstack((Z, Za))\n    (X, Y) = np.meshgrid(X, Y)\n    return (X, Y, Z)", "docstring": "Extract all the image\n\nReturns:\nA tupple of three arrays ``(X,Y,Z)`` with ``X`` contains the\nlongitudes, ``Y`` contains the latitude and ``Z`` the values\nextracted from the image.\n\nNote:\nAll return arrays have the same size.\n\nAll coordinate are in degree.", "source": "codesearchnet"}
{"code": "def validate_json_schema(data, schema, name=\"task\"):\n    \n    try:\n        jsonschema.validate(data, schema)\n    except jsonschema.exceptions.ValidationError as exc:\n        raise ScriptWorkerTaskException(\n            \"Can't validate {} schema!\\n{}\".format(name, str(exc)),\n            exit_code=STATUSES['malformed-payload']\n        )", "docstring": "Given data and a jsonschema, let's validate it.\n\nThis happens for tasks and chain of trust artifacts.\n\nArgs:\ndata (dict): the json to validate.\nschema (dict): the jsonschema to validate against.\nname (str, optional): the name of the json, for exception messages.\nDefaults to \"task\".\n\nRaises:\nScriptWorkerTaskException: on failure", "source": "juraj-google-style"}
{"code": "def loss_masks(self, masks_queries_logits: Tensor, mask_labels: List[Tensor], indices: Tuple[np.array], num_masks: int) -> Dict[str, Tensor]:\n    src_idx = self._get_predictions_permutation_indices(indices)\n    tgt_idx = self._get_targets_permutation_indices(indices)\n    pred_masks = masks_queries_logits[src_idx]\n    target_masks, _ = self._pad_images_to_max_in_batch(mask_labels)\n    target_masks = target_masks[tgt_idx]\n    pred_masks = pred_masks[:, None]\n    target_masks = target_masks[:, None]\n    with torch.no_grad():\n        point_coords = self.sample_points_using_uncertainty(pred_masks, self.calculate_uncertainty, self.num_points, self.oversample_ratio, self.importance_sample_ratio)\n        point_labels = sample_point(target_masks, point_coords, align_corners=False).squeeze(1)\n    point_logits = sample_point(pred_masks, point_coords, align_corners=False).squeeze(1)\n    losses = {'loss_mask': sigmoid_cross_entropy_loss(point_logits, point_labels, num_masks), 'loss_dice': dice_loss(point_logits, point_labels, num_masks)}\n    del pred_masks\n    del target_masks\n    return losses", "docstring": "Compute the losses related to the masks using focal and dice loss.\n\nArgs:\nmasks_queries_logits (`torch.Tensor`):\nA tensor of shape `batch_size, num_queries, height, width`\nmask_labels (`torch.Tensor`):\nList of mask labels of shape `(labels, height, width)`.\nindices (`Tuple[np.array])`:\nThe indices computed by the Hungarian matcher.\nnum_masks (`int)`:\nThe number of masks, used for normalization.\n\nReturns:\n`Dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys:\n- **loss_mask** -- The loss computed using sigmoid ce loss on the predicted and ground truth masks.\n- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth\nmasks.", "source": "github-repos"}
{"code": "def _get_configured_module(option_name, known_modules=None):\n    \n    from furious.job_utils import path_to_reference\n\n    config = get_config()\n    option_value = config[option_name]\n\n    \n    if not known_modules:\n        known_modules = {}\n\n    module_path = known_modules.get(option_value) or option_value\n    return path_to_reference(module_path)", "docstring": "Get the module specified by the value of option_name. The value of the\nconfiguration option will be used to load the module by name from the known\nmodule list or treated as a path if not found in known_modules.\nArgs:\noption_name: name of persistence module\nknown_modules: dictionary of module names and module paths,\nie: {'ndb':'furious.extras.appengine.ndb_persistence'}\nReturns:\nmodule of the module path matching the name in known_modules", "source": "juraj-google-style"}
{"code": "def __init__(self, variant_type='snv'):\n        \n        super(VcfPlugin, self).__init__()\n\n        self.individual_objs = []\n        self.case_objs = []\n\n        self.variant_type = variant_type\n        logger.info(\"Setting variant type to {0}\".format(variant_type))\n\n        self.variant_columns = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER']\n        \n        self.head = None\n        self.vep_header = None\n        self.snpeff_header = None\n        \n        \n        self.filters.can_filter_gene = True\n        self.filters.can_filter_frequency = True\n        self.filters.can_filter_cadd = True\n        self.filters.can_filter_consequence = True\n        self.filters.can_filter_impact_severity = True\n        self.filters.can_filter_sv = True\n        self.filters.can_filter_sv_len = True\n        self.filters.can_filter_inheritance = True", "docstring": "Initialize a vcf adapter.\n\nWhen instansiating all cases are found.\n\nArgs:\nvariant_type(str) : 'snv' or 'sv'", "source": "juraj-google-style"}
{"code": "def char_ngrams(s, n=3, token_fn=tokens.on_whitespace):\n    tokens = token_fn(s)\n    ngram_tuples = [__ngrams(t, n=min(len(t), n)) for t in tokens]\n\n    def unpack(l):\n        return sum(l, [])\n\n    def untuple(l):\n        return [''.join(t) for t in l]\n    return untuple(unpack(ngram_tuples))", "docstring": "Character-level n-grams from within the words in a string.\n\nBy default, the word boundary is assumed to be whitespace.  n-grams are\nnot taken across word boundaries, only within words.\n\nIf a word's length is less than or equal to n, the n-grams are simply a\nlist with the word itself.\n\n>>> ng.char_ngrams('This is not a test!')\n['Thi', 'his', 'is', 'not', 'a', 'tes', 'est', 'st!']\n\nTherefore some n-grams may have a length less than n, like 'is' and 'a'\nin this example.\n\nArgs:\ns: a string\nn: an int for the n in n-gram\ntoken_fn: a function that splits a string into a list of strings\n\nReturns:\nlist: strings of char-level n-grams", "source": "codesearchnet"}
{"code": "def remove_waiter(self, waiter_handle):\n        \n\n        spec, waiter = waiter_handle\n        self._remove_waiter(spec, waiter)", "docstring": "Remove a message callback.\n\nThis call will remove a callback previously registered using\nevery_match.\n\nArgs:\nwaiter_handle (object): The opaque handle returned by the\nprevious call to every_match().", "source": "juraj-google-style"}
{"code": "def stop_dag(self, name=None):\n        \n        return self._client.send(\n            Request(\n                action='stop_dag',\n                payload={'name': name if name is not None else self._dag_name}\n            )\n        ).success", "docstring": "Send a stop signal to the specified dag or the dag that hosts this task.\n\nArgs:\nname str: The name of the dag that should be stopped. If no name is given the\ndag that hosts this task is stopped.\n\nUpon receiving the stop signal, the dag will not queue any new tasks and wait\nfor running tasks to terminate.\n\nReturns:\nbool: True if the signal was sent successfully.", "source": "juraj-google-style"}
{"code": "def __init__(self, action_type=None, nw_addr=None):\n        \n        super().__init__(action_type, length=8)\n        self.nw_addr = nw_addr", "docstring": "Create an ActionNWAddr with the optional parameters below.\n\nArgs:\naction_type (:class:`~pyof.v0x01.common.action.ActionType`):\n:attr:`~ActionType.OFPAT_SET_NW_SRC` or\n:attr:`~ActionType.OFPAT_SET_NW_DST`.\nnw_addr (int): IP Address.", "source": "juraj-google-style"}
{"code": "def InsertData(self, table_id, fd, schema, job_id):\n    configuration = {'schema': {'fields': schema}, 'destinationTable': {'projectId': self.project_id, 'tableId': table_id, 'datasetId': self.dataset_id}, 'sourceFormat': 'NEWLINE_DELIMITED_JSON'}\n    body = {'configuration': {'load': configuration}, 'jobReference': {'projectId': self.project_id, 'jobId': job_id}}\n    mediafile = http.MediaFileUpload(fd.name, mimetype='application/octet-stream')\n    job = self.service.jobs().insert(projectId=self.project_id, body=body, media_body=mediafile)\n    try:\n        response = job.execute()\n        return response\n    except errors.HttpError as e:\n        if self.GetDataset(self.dataset_id):\n            logging.exception('Error with job: %s', job_id)\n        else:\n            logging.info('Attempting to create dataset: %s', self.dataset_id)\n            self.CreateDataset()\n        return self.RetryUpload(job, job_id, e)", "docstring": "Insert data into a bigquery table.\n\nIf the table specified doesn't exist, it will be created with the specified\nschema.\n\nArgs:\ntable_id: string table id\nfd: open file descriptor containing the newline separated JSON\nschema: BigQuery schema dict\njob_id: string job id\n\nReturns:\nAPI response object on success, None on failure", "source": "codesearchnet"}
{"code": "def __init__(self, fetches, contraction_fn):\n    self._unique_fetches = []\n    for fetch in fetches:\n        try:\n            self._unique_fetches.append(ops.get_default_graph().as_graph_element(fetch, allow_tensor=True, allow_operation=True))\n        except TypeError as e:\n            raise TypeError(f'Argument `fetch` = {fetch} has invalid type \"{type(fetch).__name__}\" must be a string or Tensor. ({str(e)})')\n        except ValueError as e:\n            raise ValueError(f'Argument `fetch` = {fetch} cannot be interpreted as a Tensor. ({str(e)})')\n        except KeyError as e:\n            raise ValueError(f'Argument `fetch` = {fetch} cannot be interpreted as a Tensor. ({str(e)})')\n    self._contraction_fn = contraction_fn", "docstring": "Creates an _ElementFetchMapper.\n\nThis is the fetch mapper used for leaves in the fetch struct.  Because of\nthe expansions mechanism, a leaf can actually fetch more than one tensor.\n\nAlso note that the fetches here can be just strings (tensor or op names) or\nany other object that the graph knows how to convert to a tensor, such as a\nVariable.  So we have to run each fetch through `as_graph_element()` to get\nthe corresponding tensor or op.\n\nArgs:\nfetches: List of objects, as returned by a fetch_fn defined in\n_REGISTERED_EXPANSIONS.\ncontraction_fn: Callable as returned by a fetch_fn.", "source": "github-repos"}
{"code": "def view(value: Any, *, name: Optional[str]=None, root_path: Optional[utils.KeyPath]=None, view_id: str='html-tree-view', **kwargs) -> Content:\n    if isinstance(value, Content):\n        return value\n    with view_options(**kwargs) as options:\n        view_object = View.create(view_id)\n        return view_object.render(value, name=name, root_path=root_path or utils.KeyPath(), **options)", "docstring": "Views an object through generating content based on a specific view.\n\nArgs:\nvalue: The value to view.\nname: The name of the value.\nroot_path: The root path of the value.\nview_id: The ID of the view to use. See `pg.View.dir()` for all available\nview IDs.\n**kwargs: Additional keyword arguments passed to the view, wich\nwill be used as the preset arguments for the View and Extension methods.\n\nReturns:\nThe rendered `Content` object.", "source": "github-repos"}
{"code": "def inner_join(df, other, **kwargs):\n    (left_on, right_on, suffixes) = get_join_parameters(kwargs)\n    joined = df.merge(other, how='inner', left_on=left_on, right_on=right_on, suffixes=suffixes)\n    return joined", "docstring": "Joins on values present in both DataFrames.\n\nArgs:\ndf (pandas.DataFrame): Left DataFrame (passed in via pipe)\nother (pandas.DataFrame): Right DataFrame\n\nKwargs:\nby (str or list): Columns to join on. If a single string, will join\non that column. If a list of lists which contain strings or\nintegers, the right/left columns to join on.\nsuffixes (list): String suffixes to append to column names in left\nand right DataFrames.\n\nExample:\na >> inner_join(b, by='x1')\n\nx1  x2     x3\n0  A   1   True\n1  B   2  False", "source": "codesearchnet"}
{"code": "def neighborhood_probability(self, threshold, radius, sigmas=None):\n    if (sigmas is None):\n        sigmas = [0]\n    weights = disk(radius)\n    filtered_prob = []\n    for sigma in sigmas:\n        filtered_prob.append(EnsembleConsensus(np.zeros(self.data.shape[1:], dtype=np.float32), 'neighbor_prob_r_{0:d}_s_{1:d}'.format(radius, sigma), self.ensemble_name, self.run_date, (self.variable + '_{0:0.2f}'.format(threshold)), self.start_date, self.end_date, ''))\n    thresh_data = np.zeros(self.data.shape[2:], dtype=np.uint8)\n    neighbor_prob = np.zeros(self.data.shape[2:], dtype=np.float32)\n    for t in range(self.data.shape[1]):\n        for m in range(self.data.shape[0]):\n            thresh_data[(self.data[(m, t)] >= threshold)] = 1\n            maximized = fftconvolve(thresh_data, weights, mode='same')\n            maximized[(maximized > 1)] = 1\n            maximized[(maximized < 1)] = 0\n            neighbor_prob += fftconvolve(maximized, weights, mode='same')\n            neighbor_prob[(neighbor_prob < 1)] = 0\n            thresh_data[:] = 0\n        neighbor_prob /= (self.data.shape[0] * float(weights.sum()))\n        for (s, sigma) in enumerate(sigmas):\n            if (sigma > 0):\n                filtered_prob[s].data[t] = gaussian_filter(neighbor_prob, sigma=sigma)\n            else:\n                filtered_prob[s].data[t] = neighbor_prob\n        neighbor_prob[:] = 0\n    return filtered_prob", "docstring": "Hourly probability of exceeding a threshold based on model values within a specified radius of a point.\n\nArgs:\nthreshold (float): probability of exceeding this threshold\nradius (int): distance from point in number of grid points to include in neighborhood calculation.\nsigmas (array of ints): Radii for Gaussian filter used to smooth neighborhood probabilities.\n\nReturns:\nlist of EnsembleConsensus objects containing neighborhood probabilities for each forecast hour.", "source": "codesearchnet"}
{"code": "def remove_codeblock_syntax_sentinals(code_text):\n    flags = (re.MULTILINE | re.DOTALL)\n    code_text_ = code_text\n    code_text_ = re.sub('^ *\n    code_text_ = re.sub('^ *\n    code_text_ = re.sub('^ *\n    code_text_ = code_text_.rstrip()\n    return code_text_", "docstring": "r\"\"\"\nRemoves template comments and vim sentinals\n\nArgs:\ncode_text (str):\n\nReturns:\nstr: code_text_", "source": "codesearchnet"}
{"code": "def restores(self):\n    if (not self.__restores):\n        self.__restores = Restores(self.__connection)\n    return self.__restores", "docstring": "Gets the Restores API client.\n\nReturns:\nRestores:", "source": "codesearchnet"}
{"code": "def from_str(cls, label: str) -> int:\n    label_norm = label.replace('1', 'one').upper()\n    if (label_norm in cls.__members__):\n        return DecayType[label_norm]\n    else:\n        raise NotImplementedError", "docstring": "Convert given string label of decay type to special index\n\nArgs:\nlabel: name of decay type.\nSet of values: `\"linear\"`, `\"cosine\"`, `\"exponential\"`,\n`\"onecycle\"`, `\"trapezoid\"`, `[\"polynomial\", K]`, where K is a polynomial power\n\nReturns:\nindex of decay type", "source": "codesearchnet"}
{"code": "def patch_so(srcs_dir: str) -> None:\n    to_patch = {'tensorflow/python/_pywrap_tensorflow_internal.so': '$ORIGIN/../../tensorflow/compiler/xla/tsl/python/lib/core', 'tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_function_lib.so': '$ORIGIN/../../../../../python', 'tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_quantize_model.so': '$ORIGIN/../../../../../python', 'tensorflow/compiler/mlir/tensorflow_to_stablehlo/python/pywrap_tensorflow_to_stablehlo.so': '$ORIGIN/../../../../python', 'tensorflow/compiler/mlir/lite/python/_pywrap_converter_api.so': '$ORIGIN/../../../../python'}\n    for file, path in to_patch.items():\n        rpath = subprocess.check_output(['patchelf', '--print-rpath', '{}/{}'.format(srcs_dir, file)]).decode().strip()\n        new_rpath = rpath + ':' + path\n        subprocess.run(['patchelf', '--set-rpath', new_rpath, '{}/{}'.format(srcs_dir, file)], check=True)\n        subprocess.run(['patchelf', '--shrink-rpath', '{}/{}'.format(srcs_dir, file)], check=True)", "docstring": "Patch .so files.\n\nWe must patch some of .so files otherwise auditwheel will fail.\n\nArgs:\nsrcs_dir: target directory with .so files to patch.", "source": "github-repos"}
{"code": "def _DrawTrips(self, triplist, colpar=''):\n    stations = []\n    if ((not self._stations) and triplist):\n        self._stations = self._CalculateYLines(self._TravelTimes(triplist))\n        if (not self._stations):\n            self._AddWarning('Failed to use traveltimes for graph')\n            self._stations = self._CalculateYLines(self._Uniform(triplist))\n            if (not self._stations):\n                self._AddWarning('Failed to calculate station distances')\n                return\n    stations = self._stations\n    tmpstrs = []\n    servlist = []\n    for t in triplist:\n        if (not colpar):\n            if (t.service_id not in servlist):\n                servlist.append(t.service_id)\n            shade = int(((servlist.index(t.service_id) * (200 / len(servlist))) + 55))\n            color = ('\n        else:\n            color = colpar\n        start_offsets = [0]\n        first_stop = t.GetTimeStops()[0]\n        for (j, freq_offset) in enumerate(start_offsets):\n            if ((j > 0) and (not colpar)):\n                color = 'purple'\n            scriptcall = ('onmouseover=\"LineClick(\\'%s\\',\\'Trip %s starting %s\\')\"' % (t.trip_id, t.trip_id, transitfeed.FormatSecondsSinceMidnight(t.GetStartTime())))\n            tmpstrhead = ('<polyline class=\"T\" id=\"%s\" stroke=\"%s\" %s points=\"' % (str(t.trip_id), color, scriptcall))\n            tmpstrs.append(tmpstrhead)\n            for (i, s) in enumerate(t.GetTimeStops()):\n                arr_t = s[0]\n                dep_t = s[1]\n                if ((arr_t is None) or (dep_t is None)):\n                    continue\n                arr_x = (int(((arr_t / 3600.0) * self._hour_grid)) - (self._hour_grid * self._offset))\n                dep_x = (int(((dep_t / 3600.0) * self._hour_grid)) - (self._hour_grid * self._offset))\n                tmpstrs.append(('%s,%s ' % (int((arr_x + 20)), int((stations[i] + 20)))))\n                tmpstrs.append(('%s,%s ' % (int((dep_x + 20)), int((stations[i] + 20)))))\n            tmpstrs.append('\" />')\n    return ''.join(tmpstrs)", "docstring": "Generates svg polylines for each transit trip.\n\nArgs:\n# Class Trip is defined in transitfeed.py\n[Trip, Trip, ...]\n\nReturns:\n# A string containing a polyline tag for each trip\n' <polyline class=\"T\" stroke=\"#336633\" points=\"433,0 ...'", "source": "codesearchnet"}
{"code": "def oauth2decorator_from_clientsecrets(filename, scope, message=None, cache=None):\n    return OAuth2DecoratorFromClientSecrets(filename, scope, message=message, cache=cache)", "docstring": "Creates an OAuth2Decorator populated from a clientsecrets file.\n\nArgs:\nfilename: string, File name of client secrets.\nscope: string or list of strings, scope(s) of the credentials being\nrequested.\nmessage: string, A friendly string to display to the user if the\nclientsecrets file is missing or invalid. The message may\ncontain HTML and will be presented on the web interface for\nany method that uses the decorator.\ncache: An optional cache service client that implements get() and set()\nmethods. See clientsecrets.loadfile() for details.\n\nReturns: An OAuth2Decorator", "source": "codesearchnet"}
{"code": "def stop_loss_replace(self, accountID, orderID, **kwargs):\n        \n        return self.replace(\n            accountID,\n            orderID,\n            order=StopLossOrderRequest(**kwargs)\n        )", "docstring": "Shortcut to replace a pending Stop Loss Order in an Account\n\nArgs:\naccountID : The ID of the Account\norderID : The ID of the Stop Loss Order to replace\nkwargs : The arguments to create a StopLossOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "juraj-google-style"}
{"code": "def create(window, root):\n        \n        notifications = {}\n        _id = root.get_property(\"id\")\n        from foxpuppet.windows.browser.notifications import addons\n\n        notifications.update(addons.NOTIFICATIONS)\n        return notifications.get(_id, BaseNotification)(window, root)", "docstring": "Create a notification object.\n\nArgs:\nwindow (:py:class:`BrowserWindow`): Window object this region\nappears in.\nroot\n(:py:class:`~selenium.webdriver.remote.webelement.WebElement`):\nWebDriver element object that serves as the root for the\nnotification.\n\nReturns:\n:py:class:`BaseNotification`: Firefox notification.", "source": "juraj-google-style"}
{"code": "def _load_data(self, resource, default=DEFAULT_VALUE_SAFEGUARD, **kwargs):\n    default_val = (default if (default != self.DEFAULT_VALUE_SAFEGUARD) else {})\n    try:\n        return (get_edx_api_data(api_config=CatalogIntegration.current(), resource=resource, api=self.client, **kwargs) or default_val)\n    except (SlumberBaseException, ConnectionError, Timeout) as exc:\n        LOGGER.exception('Failed to load data from resource [%s] with kwargs [%s] due to: [%s]', resource, kwargs, str(exc))\n        return default_val", "docstring": "Load data from API client.\n\nArguments:\nresource(string): type of resource to load\ndefault(any): value to return if API query returned empty result. Sensible values: [], {}, None etc.\n\nReturns:\ndict: Deserialized response from Course Catalog API", "source": "codesearchnet"}
{"code": "def attention_bias_proximal(length):\n    r = tf.to_float(tf.range(length))\n    diff = (tf.expand_dims(r, 0) - tf.expand_dims(r, 1))\n    return tf.expand_dims(tf.expand_dims((- tf.log1p(tf.abs(diff))), 0), 0)", "docstring": "Bias for self-attention to encourage attention to close positions.\n\nArgs:\nlength: an integer scalar.\n\nReturns:\na Tensor with shape [1, 1, length, length]", "source": "codesearchnet"}
{"code": "def list_asgs_all(access_token, subscription_id):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/providers/Microsoft.Network/virtualNetworks/',\n                        '?api-version=', NETWORK_API])\n    return do_get(endpoint, access_token)", "docstring": "Get details about the application security groups for a resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. ASG JSON body.", "source": "juraj-google-style"}
{"code": "def text_colour_for_hex(hexx, percent=50, dark='\n    return (light if hex_is_dark(hexx, percent=percent) else dark)", "docstring": "Function to decide what colour to use for a given hex colour.\n\nArgs:\nhexx (str): A hexadecimal colour, starting with '#'.\n\nReturns:\nbool: The colour's brightness is less than the given percent.", "source": "codesearchnet"}
{"code": "def symlink(src, link):\n    if (sys.getwindowsversion().major < 6):\n        raise SaltInvocationError('Symlinks are only supported on Windows Vista or later.')\n    if (not os.path.exists(src)):\n        raise SaltInvocationError('The given source path does not exist.')\n    if (not os.path.isabs(src)):\n        raise SaltInvocationError('File path must be absolute.')\n    src = os.path.normpath(src)\n    link = os.path.normpath(link)\n    is_dir = os.path.isdir(src)\n    try:\n        win32file.CreateSymbolicLink(link, src, int(is_dir))\n        return True\n    except pywinerror as exc:\n        raise CommandExecutionError(\"Could not create '{0}' - [{1}] {2}\".format(link, exc.winerror, exc.strerror))", "docstring": "Create a symbolic link to a file\n\nThis is only supported with Windows Vista or later and must be executed by\na user with the SeCreateSymbolicLink privilege.\n\nThe behavior of this function matches the Unix equivalent, with one\nexception - invalid symlinks cannot be created. The source path must exist.\nIf it doesn't, an error will be raised.\n\nArgs:\nsrc (str): The path to a file or directory\nlink (str): The path to the link\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' file.symlink /path/to/file /path/to/link", "source": "codesearchnet"}
{"code": "def trace_set_buffer_capacity(self, size):\n        \n        cmd = enums.JLinkTraceCommand.SET_CAPACITY\n        data = ctypes.c_uint32(size)\n        res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n        if (res == 1):\n            raise errors.JLinkException('Failed to set trace buffer size.')\n        return None", "docstring": "Sets the capacity for the trace buffer.\n\nArgs:\nself (JLink): the ``JLink`` instance.\nsize (int): the new capacity for the trace buffer.\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def supported_tifs(self):\n        \n        buf = ctypes.c_uint32()\n        self._dll.JLINKARM_TIF_GetAvailable(ctypes.byref(buf))\n        return buf.value", "docstring": "Returns a bitmask of the supported target interfaces.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nBitfield specifying which target interfaces are supported.", "source": "juraj-google-style"}
{"code": "def _ConvertAttributeContainerToDict(cls, attribute_container):\n    if (not isinstance(attribute_container, containers_interface.AttributeContainer)):\n        raise TypeError('{0:s} is not an attribute container type.'.format(type(attribute_container)))\n    container_type = getattr(attribute_container, 'CONTAINER_TYPE', None)\n    if (not container_type):\n        raise ValueError('Unsupported attribute container type: {0:s}.'.format(type(attribute_container)))\n    json_dict = {'__type__': 'AttributeContainer', '__container_type__': container_type}\n    for (attribute_name, attribute_value) in attribute_container.GetAttributes():\n        json_dict[attribute_name] = cls._ConvertAttributeValueToDict(attribute_value)\n    return json_dict", "docstring": "Converts an attribute container object into a JSON dictionary.\n\nThe resulting dictionary of the JSON serialized objects consists of:\n{\n'__type__': 'AttributeContainer'\n'__container_type__': ...\n...\n}\n\nHere '__type__' indicates the object base type. In this case\n'AttributeContainer'.\n\n'__container_type__' indicates the container type and rest of the elements\nof the dictionary make up the attributes of the container.\n\nArgs:\nattribute_container (AttributeContainer): attribute container.\n\nReturns:\ndict[str, object]: JSON serialized objects.\n\nRaises:\nTypeError: if not an instance of AttributeContainer.\nValueError: if the attribute container type is not supported.", "source": "codesearchnet"}
{"code": "def parse_columns(lines):\n    data = []\n    index = []\n    for line in lines:\n        line = line.rstrip()\n        if line.startswith('\n            tmp = __parse_entry(line)\n            data.append(tmp[1])\n            index.append(tmp[0])\n    return DataFrame(data, index=index, columns=['description'])", "docstring": "Parse list of lines with columns description from SOFT file.\n\nArgs:\nlines (:obj:`Iterable`): Iterator over the lines.\n\nReturns:\n:obj:`pandas.DataFrame`: Columns description.", "source": "codesearchnet"}
{"code": "def GetEventTagByIdentifier(self, identifier):\n    event_tag = self._GetAttributeContainerByIndex(self._CONTAINER_TYPE_EVENT_TAG, (identifier.row_identifier - 1))\n    if event_tag:\n        event_identifier = identifiers.SQLTableIdentifier(self._CONTAINER_TYPE_EVENT, event_tag.event_row_identifier)\n        event_tag.SetEventIdentifier(event_identifier)\n        del event_tag.event_row_identifier\n    return event_tag", "docstring": "Retrieves a specific event tag.\n\nArgs:\nidentifier (SQLTableIdentifier): event tag identifier.\n\nReturns:\nEventTag: event tag or None if not available.", "source": "codesearchnet"}
{"code": "def kill_reporter(self, check_alive=True):\n        \n        \n        if PY3:\n            self._kill_process_type(\n                ray_constants.PROCESS_TYPE_REPORTER, check_alive=check_alive)", "docstring": "Kill the reporter.\n\nArgs:\ncheck_alive (bool): Raise an exception if the process was already\ndead.", "source": "juraj-google-style"}
{"code": "def csv_to_matrix(csv_file_path):\n    \n    mtx = []\n    with open(csv_file_path) as csv_data_file:\n        for row in csv_data_file:\n            mtx.append(row.split(','))\n    return mtx", "docstring": "Load a CSV file into a Python matrix of strings.\n\nArgs:\ncsv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)", "source": "juraj-google-style"}
{"code": "def softplus(x, scale=1.0, name=None):\n  \n  if scale == 1:\n    return tf.nn.softplus(x)\n  else:\n    with tf.name_scope(name, 'softplus', [x]):\n      scale = tf.convert_to_tensor(scale, dtype=x.dtype.base_dtype)\n      return tf.nn.softplus(x * scale) / scale", "docstring": "Computes softplus with a scale factor to sharpen of the hinge.\n\nThis is an alternate non-linearity to relu. It has a similar shape, but\nit has a smooth transition from the linear part to 0.\n\nArgs:\nx: A tensor.\nscale: A float that sharpens the curve.\nname: Optional name.\nReturns:\ny = log(1 + exp(scale * x)) / scale", "source": "juraj-google-style"}
{"code": "def prep_parallel(self, binary_args, other_args):\n        \n        if self.length < 100:\n            raise Exception(\"Run this across 1 processor by setting num_processors kwarg to None.\")\n        if self.num_processors == -1:\n            self.num_processors = mp.cpu_count()\n\n        split_val = int(np.ceil(self.length/self.num_splits))\n        split_inds = [self.num_splits*i for i in np.arange(1, split_val)]\n\n        inds_split_all = np.split(np.arange(self.length), split_inds)\n\n        self.args = []\n        for i, ind_split in enumerate(inds_split_all):\n            trans_args = []\n            for arg in binary_args:\n                try:\n                    trans_args.append(arg[ind_split])\n                except TypeError:\n                    trans_args.append(arg)\n\n            self.args.append((i, tuple(trans_args)) + other_args)\n        return", "docstring": "Prepare the parallel calculations\n\nPrepares the arguments to be run in parallel.\nIt will divide up arrays according to num_splits.\n\nArgs:\nbinary_args (list): List of binary arguments for input into the SNR function.\nother_args (tuple of obj): tuple of other args for input into parallel snr function.", "source": "juraj-google-style"}
{"code": "def __init__(self, job_id, context):\n    \n    super(GCPJob, self).__init__(job_id)\n    if context is None:\n      context = google.datalab.Context.default()\n    self._context = context\n    self._api = self._create_api(context)", "docstring": "Initializes an instance of a Job.\n\nArgs:\njob_id: the BigQuery job ID corresponding to this job.\ncontext: a Context object providing project_id and credentials.", "source": "juraj-google-style"}
{"code": "def preprocess(self, raw_inputs):\n    image_arrays = []\n    for raw_im in raw_inputs:\n        im = raw_im.convert('L')\n        im = im.resize(MNIST_DIM, Image.ANTIALIAS)\n        arr = np.array(im)\n        image_arrays.append(arr)\n    inputs = np.array(image_arrays)\n    return (inputs.reshape(len(inputs), MNIST_DIM[0], MNIST_DIM[1], 1).astype('float32') / 255)", "docstring": "Convert images into the format required by our model.\n\nOur model requires that inputs be grayscale (mode 'L'), be resized to\n`MNIST_DIM`, and be represented as float32 numpy arrays in range\n[0, 1].\n\nArgs:\nraw_inputs (list of Images): a list of PIL Image objects\n\nReturns:\narray (float32): num images * height * width * num channels", "source": "codesearchnet"}
{"code": "def _page_to_text(page):\n    start_pos = page.find(u'<text')\n    assert (start_pos != (- 1))\n    end_tag_pos = page.find(u'>', start_pos)\n    assert (end_tag_pos != (- 1))\n    end_tag_pos += len(u'>')\n    end_pos = page.find(u'</text>')\n    if (end_pos == (- 1)):\n        return u''\n    return page[end_tag_pos:end_pos]", "docstring": "Extract the text from a page.\n\nArgs:\npage: a unicode string\nReturns:\na unicode string", "source": "codesearchnet"}
{"code": "def rated_movies(self, **kwargs):\n    path = self._get_guest_session_id_path('rated_movies')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get a list of rated moview for a specific guest session id.\n\nArgs:\npage: (optional) Minimum 1, maximum 1000.\nsort_by: (optional) 'created_at.asc' | 'created_at.desc'\nlanguage: (optional) ISO 639-1 code.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def execute(self):\n    for name_context, spec in zip(self._map_task.name_contexts, self._map_task.operations):\n        op = create_operation(name_context, spec, self._counter_factory, None, self._state_sampler, test_shuffle_source=self._test_shuffle_source, test_shuffle_sink=self._test_shuffle_sink)\n        self._ops.append(op)\n        if hasattr(op.spec, 'input'):\n            producer, output_index = op.spec.input\n            self._ops[producer].add_receiver(op, output_index)\n        if hasattr(op.spec, 'inputs'):\n            for producer, output_index in op.spec.inputs:\n                self._ops[producer].add_receiver(op, output_index)\n    for ix, op in reversed(list(enumerate(self._ops))):\n        _LOGGER.debug('Starting op %d %s', ix, op)\n        op.start()\n    for op in self._ops:\n        op.finish()", "docstring": "Executes all the operation_specs.Worker* instructions in a map task.\n\nWe update the map_task with the execution status, expressed as counters.\n\nRaises:\nRuntimeError: if we find more than on read instruction in task spec.\nTypeError: if the spec parameter is not an instance of the recognized\noperation_specs.Worker* classes.", "source": "github-repos"}
{"code": "def _ragged_split(tensor, pieces):\n    shape = tensor.shape\n    if 1 != len(shape):\n        raise ValueError('input tensor must be 1D')\n    tensor_len = shape.dims[0].value\n    chunk_size = tensor_len \n    with ops.colocate_with(tensor):\n        if tensor_len != pieces * chunk_size:\n            assert pieces > 1\n            last_chunk_size = tensor_len - (pieces - 1) * chunk_size\n            assert last_chunk_size > 0\n            piece_lens = [chunk_size for _ in range(pieces - 1)] + [last_chunk_size]\n            return array_ops.split(tensor, piece_lens)\n        else:\n            return array_ops.split(tensor, pieces)", "docstring": "Like split for 1D tensors but allows case where len % pieces != 0.\n\nArgs:\ntensor: `tf.Tensor` that must be 1D.\npieces: a positive integer specifying the number of pieces into which\ntensor should be split.\n\nReturns:\nlist of `tf.Tensor` of length pieces, which hold the values of\nthe input tensor, in order. The final tensor may be shorter\nthan the others, which will all be of equal length.\n\nRaises:\nValueError: input tensor must be 1D.", "source": "github-repos"}
{"code": "class PatchTSMixerForRegressionOutput(ModelOutput):\n    loss: Optional[torch.FloatTensor] = None\n    regression_outputs: Optional[torch.FloatTensor] = None\n    last_hidden_state: Optional[torch.FloatTensor] = None\n    hidden_states: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "Output type of [`PatchTSMixerForRegressionOutput`].\n\nArgs:\nregression_outputs (`torch.FloatTensor` of shape `(batch_size, num_targets)`):\nPrediction output from the regression head.\nlast_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`):\nBackbone embeddings before passing through the head.\nhidden_states (`tuple(torch.FloatTensor)`, *optional*):\nHidden-states of the model at the output of each layer plus the optional initial embedding outputs.\nloss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`):\nTotal loss.", "source": "github-repos"}
{"code": "def GetContainingWhileContext(ctxt, stop_ctxt=None):\n    while ctxt:\n        if ctxt.IsWhileContext() or ctxt == stop_ctxt:\n            return ctxt\n        ctxt = ctxt.outer_context\n    return None", "docstring": "Returns the first ancestor WhileContext of `ctxt`.\n\nReturns `ctxt` if `ctxt` is a WhileContext, or None if `ctxt` is not in a\nwhile loop.\n\nArgs:\nctxt: ControlFlowContext\nstop_ctxt: ControlFlowContext, optional. If provided, the search will end\nif it sees stop_ctxt.\n\nReturns:\n`ctxt` if `ctxt` is a WhileContext, the most nested WhileContext containing\n`ctxt`, or None if `ctxt` is not in a while loop.  If `stop_ctxt` is not\n`None`, this returns `ctxt` if it matches `stop_ctxt` in its traversal.", "source": "github-repos"}
{"code": "def Create(conf, map_name, automount_mountpoint=None):\n    global _cache_implementations\n    if not _cache_implementations:\n        raise RuntimeError('no cache implementations exist')\n    cache_name = conf['name']\n    if cache_name not in _cache_implementations:\n        raise RuntimeError('cache not implemented: %r' % (cache_name,))\n    if map_name not in _cache_implementations[cache_name]:\n        raise RuntimeError('map %r not supported by cache %r' % (map_name, cache_name))\n    return _cache_implementations[cache_name][map_name](conf, map_name, automount_mountpoint=automount_mountpoint)", "docstring": "Cache creation factory method.\n\nArgs:\nconf: a dictionary of configuration key/value pairs, including one\nrequired attribute 'name'\nmap_name: a string identifying the map name to handle\nautomount_mountpoint: A string containing the automount mountpoint, used only\nby automount maps.\n\nReturns:\nan instance of a Cache\n\nRaises:\nRuntimeError: problem instantiating the requested cache", "source": "github-repos"}
{"code": "def structure_2_lmpdata(structure, ff_elements=None, atom_style=\"charge\"):\n    \n    s = structure.get_sorted_structure()\n\n    a, b, c = s.lattice.abc\n    m = s.lattice.matrix\n    xhi = a\n    xy = np.dot(m[1], m[0] / xhi)\n    yhi = np.sqrt(b ** 2 - xy ** 2)\n    xz = np.dot(m[2], m[0] / xhi)\n    yz = (np.dot(m[1], m[2]) - xy * xz) / yhi\n    zhi = np.sqrt(c ** 2 - xz ** 2 - yz ** 2)\n    box_bounds = [[0.0, xhi], [0.0, yhi], [0.0, zhi]]\n    box_tilt = [xy, xz, yz]\n    box_tilt = None if not any(box_tilt) else box_tilt\n    box = LammpsBox(box_bounds, box_tilt)\n    new_latt = Lattice([[xhi, 0, 0], [xy, yhi, 0], [xz, yz, zhi]])\n    s.lattice = new_latt\n\n    symbols = list(s.symbol_set)\n    if ff_elements:\n        symbols.extend(ff_elements)\n    elements = sorted(Element(el) for el in set(symbols))\n    mass_info = [tuple([i.symbol] * 2) for i in elements]\n    ff = ForceField(mass_info)\n    topo = Topology(s)\n    return LammpsData.from_ff_and_topologies(box=box, ff=ff, topologies=[topo],\n                                             atom_style=atom_style)", "docstring": "Converts a structure to a LammpsData object with no force field\nparameters and topologies.\n\nArgs:\nstructure (Structure): Input structure.\nff_elements ([str]): List of strings of elements that must be\npresent due to force field settings but not necessarily in\nthe structure. Default to None.\natom_style (str): Choose between \"atomic\" (neutral) and\n\"charge\" (charged). Default to \"charge\".\n\nReturns:\nLammpsData", "source": "juraj-google-style"}
{"code": "def lookup(self, keys, name=None):\n    with ops.name_scope(name, '%s_lookup_table_find' % self.name, [self.resource_handle, keys]):\n        keys = ops.convert_to_tensor(keys, dtype=self._key_dtype, name='keys')\n        with ops.colocate_with(self.resource_handle):\n            values = gen_lookup_ops.lookup_table_find_v2(self.resource_handle, keys, self._default_value)\n    return values", "docstring": "Looks up `keys` in a table, outputs the corresponding values.\n\nThe `default_value` is used for keys not present in the table.\n\nArgs:\nkeys: Keys to look up. Can be a tensor of any shape. Must match the\ntable's key_dtype.\nname: A name for the operation (optional).\n\nReturns:\nA tensor containing the values in the same shape as `keys` using the\ntable's value type.\n\nRaises:\nTypeError: when `keys` do not match the table data types.", "source": "github-repos"}
{"code": "def __init__(self, ball_radius=10, n_chains=4, chain_length=10, monomer=None):\n        \n        super(Tnp, self).__init__()\n\n        if not monomer:\n            monomer = Bead(particle_kind='t')\n\n        n = 129  \n        self.add(Sphere(n=n, radius=ball_radius, port_distance_from_surface=0.7), label=\"np\")\n\n        \n        pattern = mb.SpherePattern(n_chains)\n\n        \n        pattern.scale(ball_radius)\n\n        chain_proto = mb.Polymer(monomer, n=chain_length)\n\n        \n        chain_protos, empty_backfill = pattern.apply_to_compound(chain_proto,\n                guest_port_name=\"down\", host=self['np'])\n        self.add(chain_protos)\n\n        self.generate_bonds('np', 'np', sqrt(4 * ball_radius ** 2 * pi / n) - 0.5,\n                            sqrt(4 * ball_radius**2 * pi / n) + 0.5)\n        self.generate_bonds('np', 't', 0.1, 0.3)\n        self.generate_bonds('t', 'np', 0.1, 0.3)", "docstring": "Initialize a tethered nanoparticle.\n\nArgs:\nball_radius (float): Radius of the nanoparticle.\nn_chains (int): Number of chains to attach to the nanoparticle.\nchain_length (int): Length of the chains being attached.\nmonomer (Compound, optional): Type of chain being attached.", "source": "juraj-google-style"}
{"code": "def save_plot(self, filename, img_format=\"eps\", ylim=None, units=\"thz\"):\n        \n        plt = self.get_plot(ylim=ylim, units=units)\n        plt.savefig(filename, format=img_format)\n        plt.close()", "docstring": "Save matplotlib plot to a file.\n\nArgs:\nfilename: Filename to write to.\nimg_format: Image format to use. Defaults to EPS.\nylim: Specifies the y-axis limits.\nunits: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.", "source": "juraj-google-style"}
{"code": "def upload_dict(s3_conn, s3_prefix, data_to_sync):\n    (bucket_name, prefix) = split_s3_path(s3_prefix)\n    bucket = s3_conn.get_bucket(bucket_name)\n    for (key, value) in data_to_sync.items():\n        full_name = '{}/{}.json'.format(prefix, key)\n        s3_key = boto.s3.key.Key(bucket=bucket, name=full_name)\n        logging.info('uploading key %s', full_name)\n        s3_key.set_contents_from_string(json.dumps(value))", "docstring": "Syncs a dictionary to an S3 bucket, serializing each value in the\ndictionary as a JSON file with the key as its name.\n\nArgs:\ns3_conn: (boto.s3.connection) an s3 connection\ns3_prefix: (str) the destination prefix\ndata_to_sync: (dict)", "source": "codesearchnet"}
{"code": "def initial_sql(self, value):\n        \n\n        self._initial_sql = value\n        \n        if value is None:\n            try:\n                del self._connectionXML.attrib['one-time-sql']\n            except KeyError:\n                pass\n        else:\n            self._connectionXML.set('one-time-sql', value)", "docstring": "Set the connection's initial_sql property.\n\nArgs:\nvalue:  New initial_sql value. String.\n\nReturns:\nNothing.", "source": "juraj-google-style"}
{"code": "def get_unbound_arg_names(arg_names, arg_binding_keys):\n    \n    bound_arg_names = [abk._arg_name for abk in arg_binding_keys]\n    return [arg_name for arg_name in arg_names\n            if arg_name not in bound_arg_names]", "docstring": "Determines which args have no arg binding keys.\n\nArgs:\narg_names: a sequence of the names of possibly bound args\narg_binding_keys: a sequence of ArgBindingKey each of whose arg names is\nin arg_names\nReturns:\na sequence of arg names that is a (possibly empty, possibly non-proper)\nsubset of arg_names", "source": "juraj-google-style"}
{"code": "def add_file_to_tree(tree, file_path, file_contents, is_executable=False):\n    \n    record = {\n        \"path\": file_path,\n        \"mode\": \"100755\" if is_executable else \"100644\",\n        \"type\": \"blob\",\n        \"content\": file_contents,\n        }\n    tree.append(record)\n    return tree", "docstring": "Add a file to a tree.\n\nArgs:\n\ntree\nA list of dicts containing info about each blob in a tree.\n\nfile_path\nThe path of the new file in the tree.\n\nfile_contents\nThe (UTF-8 encoded) contents of the new file.\n\nis_executable\nIf ``True``, the new file will get executable permissions (0755).\nOtherwise, it will get 0644 permissions.\n\nReturns:\nThe provided tree, but with the new file added.", "source": "juraj-google-style"}
{"code": "def save_prefixed_metrics(results, output_dir, file_name: str='all_results.json', metric_key_prefix: str='eval'):\n    for key in list(results.keys()):\n        if not key.startswith(f'{metric_key_prefix}_'):\n            results[f'{metric_key_prefix}_{key}'] = results.pop(key)\n    with open(os.path.join(output_dir, file_name), 'w') as f:\n        json.dump(results, f, indent=4)", "docstring": "Save results while prefixing metric names.\n\nArgs:\nresults: (:obj:`dict`):\nA dictionary of results.\noutput_dir: (:obj:`str`):\nAn output directory.\nfile_name: (:obj:`str`, `optional`, defaults to :obj:`all_results.json`):\nAn output file name.\nmetric_key_prefix: (:obj:`str`, `optional`, defaults to :obj:`eval`):\nA metric name prefix.", "source": "github-repos"}
{"code": "def full(shape, fill_value, dtype=None):\n    if any_symbolic_tensors((fill_value,)):\n        return Full(shape=shape, dtype=dtype).symbolic_call(fill_value)\n    return backend.numpy.full(shape, fill_value, dtype=dtype)", "docstring": "Return a new tensor of given shape and type, filled with `fill_value`.\n\nArgs:\nshape: Shape of the new tensor.\nfill_value: Fill value.\ndtype: Desired data type of the tensor.\n\nReturns:\nOutput tensor.", "source": "github-repos"}
{"code": "def safe_tag(self, tag, errors='strict'):\n        \n        if tag is not None:\n            try:\n                \n                tag = quote(self.s(tag, errors=errors), safe='~')[:128]\n            except KeyError as e:\n                warn = 'Failed converting tag to safetag ({})'.format(e)\n                self.log.warning(warn)\n        return tag", "docstring": "URL Encode and truncate tag to match limit (128 characters) of ThreatConnect API.\n\nArgs:\ntag (string): The tag to be truncated\n\nReturns:\n(string): The truncated tag", "source": "juraj-google-style"}
{"code": "def _preprocess_params(cls, kwargs):\n        \n        \n        for attr, val in kwargs.items():\n            if cls.is_the_primary_key(attr) and cls._prevent_primary_key_initialization_:\n                del kwargs[attr]\n                continue\n            if val == \"\":\n                \n                \n                \n                \n                kwargs[attr] = None\n                continue\n            if attr in class_mapper(cls).relationships and attr not in cls._no_overwrite_:\n                rel = class_mapper(cls).relationships[attr]\n                if rel.uselist:\n                    if isinstance(val, list):\n                        if all(isinstance(v, dict) for v in val):\n                            rel_cls = cls.mapped_rel_class(attr)\n                            kwargs[attr] = rel_cls.update_or_new_all(\n                                list_of_kwargs=val, keys=[rel_cls.primary_key_name()])\n                    elif isinstance(val, dict):\n                        rel_cls = cls.mapped_rel_class(attr)\n                        mapping_col = rel.collection_class().keyfunc.name\n                        list_of_kwargs = [merge(v, {mapping_col: k}) for k, v in val.items()]\n                        kwargs[attr] = {getattr(obj, mapping_col): obj for obj in rel_cls.update_or_new_all(\n                            list_of_kwargs=list_of_kwargs, keys=[rel_cls.primary_key_name()])}\n                elif isinstance(val, dict):\n                    rel_cls = cls.mapped_rel_class(attr)\n                    kwargs[attr] = rel_cls.update_or_new(\n                        **merge(val, {'keys': [rel_cls.primary_key_name()]}))\n        return kwargs", "docstring": "Returns a preprocessed dictionary of parameters.\nUse this to filter the kwargs passed to `new`, `create`,\n`build` methods.\n\nArgs:\n\n**kwargs: a dictionary of parameters", "source": "juraj-google-style"}
{"code": "def make_innermost_setter(setter):\n\n    @functools.wraps(setter)\n    def _new_setter(kernel_results, *args, **kwargs):\n        'Wrapped setter.'\n        results_stack = []\n        while hasattr(kernel_results, 'inner_results'):\n            results_stack.append(kernel_results)\n            kernel_results = kernel_results.inner_results\n        new_kernel_results = setter(kernel_results, *args, **kwargs)\n        for outer_results in reversed(results_stack):\n            new_kernel_results = outer_results._replace(inner_results=new_kernel_results)\n        return new_kernel_results\n    return _new_setter", "docstring": "Wraps a setter so it applies to the inner-most results in `kernel_results`.\n\nThe wrapped setter unwraps `kernel_results` and applies `setter` to the first\nresults without an `inner_results` attribute.\n\nArgs:\nsetter: A callable that takes the kernel results as well as some `*args` and\n`**kwargs` and returns a modified copy of those kernel results.\n\nReturns:\nnew_setter: A wrapped `setter`.", "source": "codesearchnet"}
{"code": "def make_2d_block_raster_mask(query_shape, memory_flange):\n  \n  \n  query_triangle = common_layers.ones_matrix_band_part(\n      np.prod(query_shape), np.prod(query_shape), -1, 0)\n  split_query_masks = tf.split(query_triangle, query_shape[0], axis=1)\n  \n  mask_pieces = [\n      tf.concat(  \n          [tf.ones([np.prod(query_shape), memory_flange[1]]),\n           split_query_masks[i],\n           tf.zeros([np.prod(query_shape), memory_flange[1]])],\n          axis=1) for i in range(query_shape[0])\n  ]\n  \n  final_mask = tf.concat(\n      [\n          tf.ones([\n              np.prod(query_shape),\n              (query_shape[1] + 2 * memory_flange[1]) * memory_flange[0]\n          ]),\n          tf.concat(mask_pieces, axis=1)\n      ],\n      axis=1)\n  \n  return 1. - final_mask", "docstring": "Creates a mask for 2d block raster scan.\n\nThe query mask can look to the left, top left, top, and top right, but\nnot to the right. Inside the query, we have the standard raster scan\nmasking.\nArgs:\nquery_shape: A tuple of ints (query_height, query_width)\nmemory_flange: A tuple of ints\n(memory_flange_height, memory_flange_width)\n\nReturns:\nA tensor of shape query_size, memory_size", "source": "juraj-google-style"}
{"code": "def _parse_slices(slicing_string):\n    parsed = []\n    for slice_string in slicing_string[1:-1].split(','):\n        indices = slice_string.split(':')\n        if len(indices) == 1:\n            parsed.append(int(indices[0].strip()))\n        elif 2 <= len(indices) <= 3:\n            parsed.append(slice(*[int(index.strip()) if index.strip() else None for index in indices]))\n        else:\n            raise ValueError('Invalid tensor-slicing string.')\n    return tuple(parsed)", "docstring": "Construct a tuple of slices from the slicing string.\n\nThe string must be a valid slicing string.\n\nArgs:\nslicing_string: (str) Input slicing string to be parsed.\n\nReturns:\ntuple(slice1, slice2, ...)\n\nRaises:\nValueError: If tensor_slicing is not a valid numpy ndarray slicing str.", "source": "github-repos"}
{"code": "def save_intraday(data: pd.DataFrame, ticker: str, dt, typ='TRADE'):\n    cur_dt = pd.Timestamp(dt).strftime('%Y-%m-%d')\n    logger = logs.get_logger(save_intraday, level='debug')\n    info = f'{ticker} / {cur_dt} / {typ}'\n    data_file = hist_file(ticker=ticker, dt=dt, typ=typ)\n    if (not data_file):\n        return\n    if data.empty:\n        logger.warning(f'data is empty for {info} ...')\n        return\n    exch = const.exch_info(ticker=ticker)\n    if exch.empty:\n        return\n    end_time = pd.Timestamp(const.market_timing(ticker=ticker, dt=dt, timing='FINISHED')).tz_localize(exch.tz)\n    now = (pd.Timestamp('now', tz=exch.tz) - pd.Timedelta('1H'))\n    if (end_time > now):\n        logger.debug(f'skip saving cause market close ({end_time}) < now - 1H ({now}) ...')\n        return\n    logger.info(f'saving data to {data_file} ...')\n    files.create_folder(data_file, is_file=True)\n    data.to_parquet(data_file)", "docstring": "Check whether data is done for the day and save\n\nArgs:\ndata: data\nticker: ticker\ndt: date\ntyp: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]\n\nExamples:\n>>> os.environ['BBG_ROOT'] = 'xbbg/tests/data'\n>>> sample = pd.read_parquet('xbbg/tests/data/aapl.parq')\n>>> save_intraday(sample, 'AAPL US Equity', '2018-11-02')\n>>> # Invalid exchange\n>>> save_intraday(sample, 'AAPL XX Equity', '2018-11-02')\n>>> # Invalid empty data\n>>> save_intraday(pd.DataFrame(), 'AAPL US Equity', '2018-11-02')\n>>> # Invalid date - too close\n>>> cur_dt = utils.cur_time()\n>>> save_intraday(sample, 'AAPL US Equity', cur_dt)", "source": "codesearchnet"}
{"code": "def _TestGetItem(self, rt, slice_spec, expected, expected_shape=None):\n    tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True)\n    tensor_slice_spec2 = _make_tensor_slice_spec(slice_spec, False)\n    value1 = rt.__getitem__(slice_spec)\n    value2 = rt.__getitem__(tensor_slice_spec1)\n    value3 = rt.__getitem__(tensor_slice_spec2)\n    self.assertAllEqual(value1, expected, 'slice_spec=%s' % (slice_spec,))\n    self.assertAllEqual(value2, expected, 'slice_spec=%s' % (slice_spec,))\n    self.assertAllEqual(value3, expected, 'slice_spec=%s' % (slice_spec,))\n    if expected_shape is not None:\n        value1.shape.assert_is_compatible_with(expected_shape)\n        value2.shape.assert_is_compatible_with(expected_shape)\n        value3.shape.assert_is_compatible_with(expected_shape)", "docstring": "Helper function for testing RaggedTensor.__getitem__.\n\nChecks that calling `rt.__getitem__(slice_spec) returns the expected value.\nChecks three different configurations for each slice spec:\n\n* Call __getitem__ with the slice spec as-is (with int values)\n* Call __getitem__ with int values in the slice spec wrapped in\n`tf.constant()`.\n* Call __getitem__ with int values in the slice spec wrapped in\n`tf.compat.v1.placeholder()` (so value is not known at graph\nconstruction time).\n\nArgs:\nrt: The RaggedTensor to test.\nslice_spec: The slice spec.\nexpected: The expected value of rt.__getitem__(slice_spec), as a python\nlist; or an exception class.\nexpected_shape: The expected shape for `rt.__getitem__(slice_spec)`.", "source": "github-repos"}
{"code": "def get_num_bytes(self, batch: Sequence[pandas.DataFrame]) -> int:\n    return sum((df.memory_usage(deep=True).sum() for df in batch))", "docstring": "Returns:\nThe number of bytes of data for a batch of Numpy arrays.", "source": "github-repos"}
{"code": "def coalescence_waiting_times(self, backward=True):\n    if (not isinstance(backward, bool)):\n        raise TypeError('backward must be a bool')\n    times = list()\n    lowest_leaf_dist = float('-inf')\n    for (n, d) in self.distances_from_root():\n        if (len(n.children) > 1):\n            times.append(d)\n        elif ((len(n.children) == 0) and (d > lowest_leaf_dist)):\n            lowest_leaf_dist = d\n    times.append(lowest_leaf_dist)\n    times.sort(reverse=backward)\n    for i in range((len(times) - 1)):\n        (yield abs((times[i] - times[(i + 1)])))", "docstring": "Generator over the waiting times of successive coalescence events\n\nArgs:\n``backward`` (``bool``): ``True`` to go backward in time (i.e., leaves to root), otherwise ``False``", "source": "codesearchnet"}
{"code": "def style_str(cls, style: Union[str, Dict[str, Any], None]) -> Optional[str]:\n    if not style:\n        return None\n    if isinstance(style, str):\n        return style\n    else:\n        assert isinstance(style, dict), style\n        return ''.join([f'{k.replace('_', '-')}:{v};' for k, v in style.items() if v is not None]) or None", "docstring": "Gets a string representing an inline CSS style.\n\nArgs:\nstyle: A single CSS style string, or a dictionary for CSS properties.\nWhen dictionary form is used, underscore in the key name will be\nreplaced by dash in the generated CSS style string.\nFor example, `background_color` will be converted to `background-color`.\n\nReturns:\nA CSS style string or None if no CSS property is provided.", "source": "github-repos"}
{"code": "def confab_conformers(self, forcefield='mmff94', freeze_atoms=None, rmsd_cutoff=0.5, energy_cutoff=50.0, conf_cutoff=100000, verbose=False):\n    if (self._obmol.GetDimension() != 3):\n        self.make3d()\n    else:\n        self.add_hydrogen()\n    ff = ob.OBForceField_FindType(forcefield)\n    if (ff == 0):\n        print(\"Could not find forcefield {} in openbabel, the forcefield will be reset as default 'mmff94'\".format(forcefield))\n        ff = ob.OBForceField_FindType('mmff94')\n    if freeze_atoms:\n        print('{} atoms will be freezed'.format(len(freeze_atoms)))\n        constraints = ob.OBFFConstraints()\n        for atom in ob.OBMolAtomIter(self._obmol):\n            atom_id = (atom.GetIndex() + 1)\n            if (id in freeze_atoms):\n                constraints.AddAtomConstraint(atom_id)\n        ff.SetConstraints(constraints)\n    ff.DiverseConfGen(rmsd_cutoff, conf_cutoff, energy_cutoff, verbose)\n    ff.GetConformers(self._obmol)\n    conformer_num = self._obmol.NumConformers()\n    conformers = []\n    for i in range(conformer_num):\n        self._obmol.SetConformer(i)\n        conformer = copy.deepcopy(BabelMolAdaptor(self._obmol).pymatgen_mol)\n        conformers.append(conformer)\n    self._obmol.SetConformer(0)\n    return conformers", "docstring": "Conformer generation based on Confab to generate all diverse low-energy\nconformers for molecules. This is different from rotor_conformer or\ngen3d_conformer as it aims to not simply to find a low energy\nconformation but to generate several different conformations.\n\nArgs:\nforcefield (str): Default is mmff94. Options are 'gaff', 'ghemical',\n'mmff94', 'mmff94s', and 'uff'.\nfreeze_atoms ([int]): index of atoms to be freezed when performing\nconformer search, default is None.\nrmsd_cutoff (float): rmsd_cufoff, default is 0.5 Angstrom.\nenergy_cutoff (float): energy_cutoff, default is 50.0 kcal/mol.\nconf_cutoff (float): max number of conformers to test,\ndefault is 1 million.\nverbose (bool): whether to display information on torsions found,\ndefault is False.\n\nReturns:\n(list): list of pymatgen Molecule objects for generated conformers.", "source": "codesearchnet"}
{"code": "def get_gradebook_id(self, gbuuid):\n    gradebook = self.get('gradebook', params={'uuid': gbuuid})\n    if ('data' not in gradebook):\n        failure_messsage = 'Error in get_gradebook_id for {0} - no data'.format(gradebook)\n        log.error(failure_messsage)\n        raise PyLmodUnexpectedData(failure_messsage)\n    return gradebook['data']['gradebookId']", "docstring": "Return gradebookid for a given gradebook uuid.\n\nArgs:\ngbuuid (str): gradebook uuid, i.e. ``STELLAR:/project/gbngtest``\n\nRaises:\nPyLmodUnexpectedData: No gradebook id returned\nrequests.RequestException: Exception connection error\nValueError: Unable to decode response content\n\nReturns:\nstr: value of gradebook id", "source": "codesearchnet"}
{"code": "def _collective_with_groups(self, x, mesh_axes, collective):\n    \n    if not mesh_axes:\n      return x\n    x = x.to_laid_out_tensor()\n    if len(mesh_axes) == self.ndims:\n      return self.LaidOutTensor(collective(x.tensor_list, self._devices))\n    else:\n      groups = mtf.processor_groups(self.shape, mesh_axes)\n      ret = [None] * self.size\n      for g in groups:\n        inputs = [x.tensor_list[pnum] for pnum in g]\n        devices = [self._devices[pnum] for pnum in g]\n        reduced = collective(inputs, devices)\n        for pnum, y in zip(g, reduced):\n          ret[pnum] = y\n      return self.LaidOutTensor(ret)", "docstring": "Grouped collective, (across the given dimensions).\n\nArgs:\nx: a LaidOutTensor\nmesh_axes: a list of integers - the mesh dimensions to be reduced\ncollective: fn from list(tf.Tensor), list(device) -> list(tf.Tensor)\nReturns:\na LaidOutTensor", "source": "juraj-google-style"}
{"code": "def render(engine, format, filepath, renderer=None, formatter=None, quiet=False):\n    (cmd, rendered) = command(engine, format, filepath, renderer, formatter)\n    run(cmd, capture_output=True, check=True, quiet=quiet)\n    return rendered", "docstring": "Render file with Graphviz ``engine`` into ``format``,  return result filename.\n\nArgs:\nengine: The layout commmand used for rendering (``'dot'``, ``'neato'``, ...).\nformat: The output format used for rendering (``'pdf'``, ``'png'``, ...).\nfilepath: Path to the DOT source file to render.\nrenderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...).\nformatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...).\nquiet (bool): Suppress ``stderr`` output.\nReturns:\nThe (possibly relative) path of the rendered file.\nRaises:\nValueError: If ``engine``, ``format``, ``renderer``, or ``formatter`` are not known.\ngraphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None.\ngraphviz.ExecutableNotFound: If the Graphviz executable is not found.\nsubprocess.CalledProcessError: If the exit status is non-zero.", "source": "codesearchnet"}
{"code": "def findall_operations_with_gate_type(\n            self,\n            gate_type: Type[T_DESIRED_GATE_TYPE]\n    ) -> Iterable[Tuple[int,\n                        ops.GateOperation,\n                        T_DESIRED_GATE_TYPE]]:\n        \n        result = self.findall_operations(lambda operation: bool(\n            ops.op_gate_of_type(operation, gate_type)))\n        for index, op in result:\n            gate_op = cast(ops.GateOperation, op)\n            yield index, gate_op, cast(T_DESIRED_GATE_TYPE, gate_op.gate)", "docstring": "Find the locations of all gate operations of a given type.\n\nArgs:\ngate_type: The type of gate to find, e.g. XPowGate or\nMeasurementGate.\n\nReturns:\nAn iterator (index, operation, gate)'s for operations with the given\ngate type.", "source": "juraj-google-style"}
{"code": "def is_flaky(max_attempts: int=5, wait_before_retry: Optional[float]=None, description: Optional[str]=None):\n\n    def decorator(test_func_ref):\n\n        @functools.wraps(test_func_ref)\n        def wrapper(*args, **kwargs):\n            retry_count = 1\n            while retry_count < max_attempts:\n                try:\n                    return test_func_ref(*args, **kwargs)\n                except Exception as err:\n                    logger.error(f'Test failed with {err} at try {retry_count}/{max_attempts}.')\n                    if wait_before_retry is not None:\n                        time.sleep(wait_before_retry)\n                    retry_count += 1\n            return test_func_ref(*args, **kwargs)\n        return unittest.skipUnless(_run_flaky_tests, 'test is flaky')(wrapper)\n    return decorator", "docstring": "To decorate flaky tests. They will be retried on failures.\n\nPlease note that our push tests use `pytest-rerunfailures`, which prompts the CI to rerun certain types of\nfailed tests. More specifically, if the test exception contains any substring in `FLAKY_TEST_FAILURE_PATTERNS`\n(in `.circleci/create_circleci_config.py`), it will be rerun. If you find a recurrent pattern of failures,\nexpand `FLAKY_TEST_FAILURE_PATTERNS` in our CI configuration instead of using `is_flaky`.\n\nArgs:\nmax_attempts (`int`, *optional*, defaults to 5):\nThe maximum number of attempts to retry the flaky test.\nwait_before_retry (`float`, *optional*):\nIf provided, will wait that number of seconds before retrying the test.\ndescription (`str`, *optional*):\nA string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,\netc.)", "source": "github-repos"}
{"code": "def datasets_list(self, project_id=None, max_results=0, page_token=None):\n    \n    if project_id is None:\n      project_id = self._project_id\n    url = Api._ENDPOINT + (Api._DATASETS_PATH % (project_id, ''))\n\n    args = {}\n    if max_results != 0:\n      args['maxResults'] = max_results\n    if page_token is not None:\n      args['pageToken'] = page_token\n\n    return datalab.utils.Http.request(url, args=args, credentials=self._credentials)", "docstring": "Issues a request to list the datasets in the project.\n\nArgs:\nproject_id: the project id to use to fetch the results; use None for the default project.\nmax_results: an optional maximum number of tables to retrieve.\npage_token: an optional token to continue the retrieval.\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"}
{"code": "def get_public_key_pem(cert_obj):\n    \n    return cert_obj.public_key().public_bytes(\n        encoding=cryptography.hazmat.primitives.serialization.Encoding.PEM,\n        format=cryptography.hazmat.primitives.serialization.PublicFormat.PKCS1,\n    )", "docstring": "Extract public key from certificate as PEM encoded PKCS#1.\n\nArgs:\ncert_obj: cryptography.Certificate\n\nReturns:\nbytes: PEM encoded PKCS#1 public key.", "source": "juraj-google-style"}
{"code": "def set_unrecognized_field(self, key, value, variant):\n    if (not isinstance(variant, Variant)):\n        raise TypeError(('Variant type %s is not valid.' % variant))\n    self.__unrecognized_fields[key] = (value, variant)", "docstring": "Set an unrecognized field, used when decoding a message.\n\nArgs:\nkey: The name or number used to refer to this unknown value.\nvalue: The value of the field.\nvariant: Type information needed to interpret the value or re-encode\nit.\n\nRaises:\nTypeError: If the variant is not an instance of messages.Variant.", "source": "codesearchnet"}
{"code": "def _conform_to_outputs(self, outputs, struct):\n    struct = map_to_output_names(outputs, self._output_names, struct)\n    struct = map_missing_dict_keys(outputs, struct)\n    if not nest.is_nested(struct) and nest.is_nested(outputs):\n        struct = nest.map_structure(lambda _: struct, outputs)\n    return struct", "docstring": "Convenience method to conform `struct` to `outputs` structure.\n\nMappings performed:\n\n(1) Map a dict to a list of outputs, using the output names.\n(2) Fill missing keys in a dict w/ `None`s.\n(3) Map a single item to all outputs.\n\nArgs:\noutputs: Model predictions.\nstruct: Arbitrary nested structure (e.g. of labels, sample_weights,\nlosses, or metrics).\n\nReturns:\nMapping of `struct` to `outputs` structure.", "source": "github-repos"}
{"code": "def seek(self, offset, whence=os.SEEK_SET):\n    \n    if not self._is_open:\n      raise IOError('Not opened.')\n\n    if self._current_offset < 0:\n      raise IOError(\n          'Invalid current offset: {0:d} value less than zero.'.format(\n              self._current_offset))\n\n    if whence == os.SEEK_CUR:\n      offset += self._current_offset\n\n    elif whence == os.SEEK_END:\n      if self._uncompressed_stream_size is None:\n        self._uncompressed_stream_size = self._GetUncompressedStreamSize()\n        if self._uncompressed_stream_size is None:\n          raise IOError('Invalid uncompressed stream size.')\n\n      offset += self._uncompressed_stream_size\n\n    elif whence != os.SEEK_SET:\n      raise IOError('Unsupported whence.')\n\n    if offset < 0:\n      raise IOError('Invalid offset value less than zero.')\n\n    if offset != self._current_offset:\n      self._current_offset = offset\n      self._realign_offset = True", "docstring": "Seeks to an offset within the file-like object.\n\nArgs:\noffset (int): offset to seek to.\nwhence (Optional(int)): value that indicates whether offset is an absolute\nor relative position within the file.\n\nRaises:\nIOError: if the seek failed.\nOSError: if the seek failed.", "source": "juraj-google-style"}
{"code": "def replace(self, **kwargs):\n    init_kwargs = dict(job=self.job, replica=self.replica, task=self.task, device_type=self.device_type, device_index=self.device_index)\n    init_kwargs.update(kwargs)\n    return self.__class__(**init_kwargs)", "docstring": "Convenience method for making a new DeviceSpec by overriding fields.\n\nFor instance:\n```\nmy_spec = DeviceSpec=(job=\"my_job\", device=\"CPU\")\nmy_updated_spec = my_spec.replace(device=\"GPU\")\nmy_other_spec = my_spec.replace(device=None)\n```\n\nArgs:\n**kwargs: This method takes the same args as the DeviceSpec constructor\n\nReturns:\nA DeviceSpec with the fields specified in kwargs overridden.", "source": "github-repos"}
{"code": "def _validate_fhir_constraints(msg: message.Message, base_name: str, primitive_handler_: primitive_handler.PrimitiveHandler) -> None:\n    if annotation_utils.is_primitive_type(msg):\n        _ = primitive_handler_.primitive_wrapper_from_primitive(msg)\n        return\n    if proto_utils.is_message_type(msg, any_pb2.Any):\n        return\n    for field in msg.DESCRIPTOR.fields:\n        field_name = f'{base_name}.{proto_utils.json_field_name(field)}'\n        _validate_field(msg, field, field_name, primitive_handler_)\n    for oneof in msg.DESCRIPTOR.oneofs:\n        if msg.WhichOneof(oneof.name) is None and (not oneof.GetOptions().HasExtension(annotations_pb2.fhir_oneof_is_optional)):\n            raise fhir_errors.InvalidFhirError(f'Empty oneof: `{oneof.full_name}`.')", "docstring": "Iterates over fields of the provided message and validates constraints.\n\nArgs:\nmsg: The message to validate.\nbase_name: The root message name for recursive validation of nested message\nfields.\nprimitive_handler_: Responsible for returning PrimitiveWrappers.\n\nRaises:\nfhir_errors.InvalidFhirError: In the event that a field is found to be\nviolating FHIR constraints or a required oneof is not set.", "source": "github-repos"}
{"code": "def _handle_stop_dag(self, request):\n    if ((request.payload['name'] is not None) and (request.payload['name'] not in self._stop_dags)):\n        self._stop_dags.append(request.payload['name'])\n    return Response(success=True, uid=request.uid)", "docstring": "The handler for the stop_dag request.\n\nThe stop_dag request adds a dag to the list of dags that should be stopped.\nThe dag will then stop queueing new tasks and will eventually stop running.\n\nArgs:\nrequest (Request): Reference to a request object containing the\nincoming request. The payload has to contain the\nfollowing fields:\n'name': the name of the dag that should be stopped\n\nReturns:\nResponse: A response object containing the following fields:\n- success: True if the dag was added successfully to the list\nof dags that should be stopped.", "source": "codesearchnet"}
{"code": "def from_json(cls, json_data):\n        \n        data = json.loads(_helpers._from_bytes(json_data))\n        if (data.get('token_expiry') and\n                not isinstance(data['token_expiry'], datetime.datetime)):\n            try:\n                data['token_expiry'] = datetime.datetime.strptime(\n                    data['token_expiry'], EXPIRY_FORMAT)\n            except ValueError:\n                data['token_expiry'] = None\n        retval = cls(\n            data['access_token'],\n            data['client_id'],\n            data['client_secret'],\n            data['refresh_token'],\n            data['token_expiry'],\n            data['token_uri'],\n            data['user_agent'],\n            revoke_uri=data.get('revoke_uri', None),\n            id_token=data.get('id_token', None),\n            id_token_jwt=data.get('id_token_jwt', None),\n            token_response=data.get('token_response', None),\n            scopes=data.get('scopes', None),\n            token_info_uri=data.get('token_info_uri', None))\n        retval.invalid = data['invalid']\n        return retval", "docstring": "Instantiate a Credentials object from a JSON description of it.\n\nThe JSON should have been produced by calling .to_json() on the object.\n\nArgs:\njson_data: string or bytes, JSON to deserialize.\n\nReturns:\nAn instance of a Credentials subclass.", "source": "juraj-google-style"}
{"code": "def RegionalWebhook(self, request, global_params=None):\n    config = self.GetMethodConfig('RegionalWebhook')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "ReceiveRegionalWebhook is called when the API receives a regional GitHub webhook.\n\nArgs:\nrequest: (CloudbuildLocationsRegionalWebhookRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Empty) The response message.", "source": "github-repos"}
{"code": "def multinomial_sample(x, vocab_size=None, sampling_method=\"random\",\n                       temperature=1.0):\n  \n  vocab_size = vocab_size or common_layers.shape_list(x)[-1]\n  if sampling_method == \"random\" and temperature > 0.0:\n    samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]) / temperature, 1)\n  else:\n    samples = tf.argmax(x, axis=-1)\n  reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1])\n  return reshaped_samples", "docstring": "Multinomial sampling from a n-dimensional tensor.\n\nArgs:\nx: Tensor of shape [..., vocab_size]. Parameterizes logits of multinomial.\nvocab_size: Number of classes in multinomial distribution.\nsampling_method: String, \"random\" or otherwise deterministic.\ntemperature: Positive float.\n\nReturns:\nTensor of shape [...].", "source": "juraj-google-style"}
{"code": "def sawtooth(duration: int, amp: complex, period: float=None, phase: float=0, name: str=None) -> SamplePulse:\n    if (period is None):\n        period = duration\n    return _sampled_sawtooth_pulse(duration, amp, period, phase=phase, name=name)", "docstring": "Generates sawtooth wave `SamplePulse`.\n\nArgs:\nduration: Duration of pulse. Must be greater than zero.\namp: Pulse amplitude. Wave range is [-amp, amp].\nperiod: Pulse period, units of dt. If `None` defaults to single cycle.\nphase: Pulse phase.\nname: Name of pulse.", "source": "codesearchnet"}
{"code": "def delete(self, key):\n    \n    try:\n      del self._collection(key)[key]\n\n      if len(self._collection(key)) == 0:\n        del self._items[str(key.path)]\n    except KeyError, e:\n      pass", "docstring": "Removes the object named by `key`.\n\nRemoves the object from the collection corresponding to ``key.path``.\n\nArgs:\nkey: Key naming the object to remove.", "source": "juraj-google-style"}
{"code": "def _protobuf_value_type(value):\n  \n  if value.HasField(\"number_value\"):\n    return api_pb2.DATA_TYPE_FLOAT64\n  if value.HasField(\"string_value\"):\n    return api_pb2.DATA_TYPE_STRING\n  if value.HasField(\"bool_value\"):\n    return api_pb2.DATA_TYPE_BOOL\n  return None", "docstring": "Returns the type of the google.protobuf.Value message as an api.DataType.\n\nReturns None if the type of 'value' is not one of the types supported in\napi_pb2.DataType.\n\nArgs:\nvalue: google.protobuf.Value message.", "source": "juraj-google-style"}
{"code": "def _ParseEntryObjectOffsets(self, file_object, file_offset):\n    entry_array_object = self._ParseEntryArrayObject(file_object, file_offset)\n    entry_object_offsets = list(entry_array_object.entry_object_offsets)\n    while (entry_array_object.next_entry_array_offset != 0):\n        entry_array_object = self._ParseEntryArrayObject(file_object, entry_array_object.next_entry_array_offset)\n        entry_object_offsets.extend(entry_array_object.entry_object_offsets)\n    return entry_object_offsets", "docstring": "Parses entry array objects for the offset of the entry objects.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object.\nfile_offset (int): offset of the first entry array object relative to\nthe start of the file-like object.\n\nReturns:\nlist[int]: offsets of the entry objects.", "source": "codesearchnet"}
{"code": "def expand_dims(x, axis=-1):\n    return array_ops.expand_dims(x, axis)", "docstring": "Adds a 1-sized dimension at index \"axis\".\n\nArgs:\nx: A tensor or variable.\naxis: Position where to add a new axis.\n\nReturns:\nA tensor with expanded dimensions.", "source": "github-repos"}
{"code": "def get_setting(name):\n    current_settings = get_settings(category='All')\n    for setting in current_settings:\n        if (name.lower() == setting.lower()):\n            return current_settings[setting]\n    raise KeyError('Invalid name: {0}'.format(name))", "docstring": "Get the current configuration for the named audit setting\n\nArgs:\nname (str): The name of the setting to retrieve\n\nReturns:\nstr: The current configuration for the named setting\n\nRaises:\nKeyError: On invalid setting name\nCommandExecutionError: If an error is encountered retrieving the settings\n\nUsage:\n\n.. code-block:: python\n\nimport salt.utils.win_lgpo_auditpol\n\n# Get current state of the \"Credential Validation\" setting\nsalt.utils.win_lgpo_auditpol.get_setting(name='Credential Validation')", "source": "codesearchnet"}
{"code": "def _build_ring_gather(input_tensors, devices, num_subchunks, pred_by_s_d, rank_by_s_d, red_op):\n    num_devices = len(input_tensors)\n    if num_devices == 0:\n        return []\n    if num_devices == 1:\n        return input_tensors\n    shape = input_tensors[0].shape\n    if 1 != len(shape):\n        raise ValueError('input tensors must be 1D')\n    num_chunks = num_devices * num_subchunks\n    num_ticks = num_devices - 1\n    chunks_by_dev = []\n    split_pad_len = 0\n    for d in range(0, num_devices):\n        with ops.device(devices[d]):\n            splits, split_pad_len = _padded_split(input_tensors[d], num_chunks)\n            chunks_by_dev.append(splits)\n    for tick in range(0, num_ticks):\n        new_partial_reductions = [None for _ in range(0, num_chunks)]\n        for d in range(0, num_devices):\n            with ops.device(devices[d]):\n                for s in range(0, num_subchunks):\n                    rank = rank_by_s_d[s][d]\n                    seg_index = (rank + num_devices - (2 + tick)) % num_devices\n                    pred_dev = pred_by_s_d[s][d]\n                    chunk_index = seg_index * num_subchunks + s\n                    new_partial_reductions[chunk_index] = red_op(chunks_by_dev[pred_dev][chunk_index], chunks_by_dev[d][chunk_index])\n        for d in range(0, num_devices):\n            for s in range(0, num_subchunks):\n                rank = rank_by_s_d[s][d]\n                seg_index = (rank + num_devices - (2 + tick)) % num_devices\n                chunk_index = seg_index * num_subchunks + s\n                chunks_by_dev[d][chunk_index] = new_partial_reductions[chunk_index]\n    return (chunks_by_dev, split_pad_len)", "docstring": "Construct a subgraph for the first (reduction) pass of ring all-reduce.\n\nArgs:\ninput_tensors: a list of `tf.Tensor` 1D input tensors of same\nshape and type.\ndevices: array of device name strings\nnum_subchunks: number of subchunks each device should process in one tick.\npred_by_s_d: as produced by _ring_permutations\nrank_by_s_d: as produced by _ring_permutations\nred_op: a binary operator for elementwise reduction\n\nRaises:\nValueError: tensors must all be one dimensional.\n\nReturns:\nlist of list of `tf.Tensor` of (partially) reduced values where\nexactly num_subchunks chunks at each device are fully reduced.", "source": "github-repos"}
{"code": "def distance(self, method='haversine'):\n    distances = []\n    for segment in self:\n        if (len(segment) < 2):\n            distances.append([])\n        else:\n            distances.append(segment.distance(method))\n    return distances", "docstring": "Calculate distances between locations in segments.\n\nArgs:\nmethod (str): Method used to calculate distance\n\nReturns:\nlist of list of float: Groups of distance between points in\nsegments", "source": "codesearchnet"}
{"code": "def use_pcm(self, pcm_params=None, solvent_key=\"solvent\", solvent_params=None,\n                radii_force_field=None):\n        \n        self.params[\"pcm\"] = dict()\n        self.params[solvent_key] = dict()\n        default_pcm_params = {\"Theory\": \"SSVPE\",\n                              \"vdwScale\": 1.1,\n                              \"Radii\": \"UFF\"}\n        if not solvent_params:\n            solvent_params = {\"Dielectric\": 78.3553}\n        if pcm_params:\n            for k, v in pcm_params.items():\n                self.params[\"pcm\"][k.lower()] = v.lower() \\\n                    if isinstance(v, str) else v\n\n        for k, v in default_pcm_params.items():\n            if k.lower() not in self.params[\"pcm\"].keys():\n                self.params[\"pcm\"][k.lower()] = v.lower() \\\n                    if isinstance(v, str) else v\n        for k, v in solvent_params.items():\n            self.params[solvent_key][k.lower()] = v.lower() \\\n                if isinstance(v, str) else copy.deepcopy(v)\n        self.params[\"rem\"][\"solvent_method\"] = \"pcm\"\n        if radii_force_field:\n            self.params[\"pcm\"][\"radii\"] = \"bondi\"\n            self.params[\"rem\"][\"force_fied\"] = radii_force_field.lower()", "docstring": "Set the solvent model to PCM. Default parameters are trying to comply to\ngaussian default value\n\nArgs:\npcm_params (dict): The parameters of \"$pcm\" section.\nsolvent_key (str): for versions < 4.2 the section name is \"pcm_solvent\"\nsolvent_params (dict): The parameters of solvent_key section\nradii_force_field (str): The force fied used to set the solute\nradii. Default to UFF.", "source": "juraj-google-style"}
{"code": "def get_namespace(self, name_seq):\n        \n        namespaces = self.namespaces\n        result = []\n        for name in name_seq:\n            namespaces = namespaces.get(name)\n            if not namespaces:\n                break\n            result.append(name)\n        return result", "docstring": "Returns the prefix of names from name_seq that are known namespaces.\n\nArgs:\nname_seq: ['names', 'of', 'possible', 'namespace', 'to', 'find']\n\nReturns:\n['names', 'that', 'are', 'namespaces', 'possibly', 'empty', 'list']", "source": "juraj-google-style"}
{"code": "def subscribe(self, requested_timeout=None, auto_renew=False):\n\n    class AutoRenewThread(threading.Thread):\n        'Used by the auto_renew code to renew a subscription from within\\n            a thread.\\n\\n            '\n\n        def __init__(self, interval, stop_flag, sub, *args, **kwargs):\n            super(AutoRenewThread, self).__init__(*args, **kwargs)\n            self.interval = interval\n            self.sub = sub\n            self.stop_flag = stop_flag\n            self.daemon = True\n\n        def run(self):\n            sub = self.sub\n            stop_flag = self.stop_flag\n            interval = self.interval\n            while (not stop_flag.wait(interval)):\n                log.info('Autorenewing subscription %s', sub.sid)\n                sub.renew()\n    self.requested_timeout = requested_timeout\n    if self._has_been_unsubscribed:\n        raise SoCoException('Cannot resubscribe instance once unsubscribed')\n    service = self.service\n    if (not event_listener.is_running):\n        event_listener.start(service.soco)\n    (ip_address, port) = event_listener.address\n    if config.EVENT_ADVERTISE_IP:\n        ip_address = config.EVENT_ADVERTISE_IP\n    headers = {'Callback': '<http:\n    if (requested_timeout is not None):\n        headers['TIMEOUT'] = 'Second-{}'.format(requested_timeout)\n    with _subscriptions_lock:\n        response = requests.request('SUBSCRIBE', (service.base_url + service.event_subscription_url), headers=headers)\n        response.raise_for_status()\n        self.sid = response.headers['sid']\n        timeout = response.headers['timeout']\n        if (timeout.lower() == 'infinite'):\n            self.timeout = None\n        else:\n            self.timeout = int(timeout.lstrip('Second-'))\n        self._timestamp = time.time()\n        self.is_subscribed = True\n        log.info('Subscribed to %s, sid: %s', (service.base_url + service.event_subscription_url), self.sid)\n        _subscriptions[self.sid] = self\n    atexit.register(self.unsubscribe)\n    if (not auto_renew):\n        return\n    interval = ((self.timeout * 85) / 100)\n    auto_renew_thread = AutoRenewThread(interval, self._auto_renew_thread_flag, self)\n    auto_renew_thread.start()", "docstring": "Subscribe to the service.\n\nIf requested_timeout is provided, a subscription valid for that number\nof seconds will be requested, but not guaranteed. Check\n`timeout` on return to find out what period of validity is\nactually allocated.\n\nNote:\nSoCo will try to unsubscribe any subscriptions which are still\nsubscribed on program termination, but it is good practice for\nyou to clean up by making sure that you call :meth:`unsubscribe`\nyourself.\n\nArgs:\nrequested_timeout(int, optional): The timeout to be requested.\nauto_renew (bool, optional): If `True`, renew the subscription\nautomatically shortly before timeout. Default `False`.", "source": "codesearchnet"}
{"code": "def _get_ami_dict(json_url):\n    LOG.info('Getting AMI from %s', json_url)\n    response = requests.get(json_url)\n    assert response.ok, 'Error getting ami info from {}'.format(json_url)\n    ami_dict = response.json()\n    LOG.debug('AMI json contents: %s', ami_dict)\n    return ami_dict", "docstring": "Get ami from a web url.\n\nArgs:\nregion (str): AWS Region to find AMI ID.\n\nReturns:\ndict: Contents in dictionary format.", "source": "codesearchnet"}
{"code": "def get_subset_counts(self, *keys):\n        \n        if self.prepickle:\n            key_set = [pickle.dumps(key) for key in set(keys)]\n        else:\n            key_set = list(set(keys))\n        hashtables = [unordered_storage({'type': 'dict'}) for _ in\n                      range(self.b)]\n        Hss = self.keys.getmany(*key_set)\n        for key, Hs in zip(key_set, Hss):\n            for H, hashtable in zip(Hs, hashtables):\n                hashtable.insert(H, key)\n        return [hashtable.itemcounts() for hashtable in hashtables]", "docstring": "Returns the bucket allocation counts (see :func:`~datasketch.MinHashLSH.get_counts` above)\nrestricted to the list of keys given.\n\nArgs:\nkeys (hashable) : the keys for which to get the bucket allocation\ncounts", "source": "juraj-google-style"}
{"code": "def poisson(data):\n    \n    data = np.hstack(([0.0], np.array(data)))\n    cumm = np.cumsum(data)\n\n    def cost(s, t):\n        \n        diff = cumm[t]-cumm[s]\n        if diff == 0:\n            return -2 * diff * (- np.log(t-s) - 1)\n        else:\n            return -2 * diff * (np.log(diff) - np.log(t-s) - 1)\n\n    return cost", "docstring": "Creates a segment cost function for a time series with a\npoisson distribution with changing mean\n\nArgs:\ndata (:obj:`list` of float): 1D time series data\nReturns:\nfunction: Function with signature\n(int, int) -> float\nwhere the first arg is the starting index, and the second\nis the last arg. Returns the cost of that segment", "source": "juraj-google-style"}
{"code": "def indicator(self, data):\n        \n        try:\n            ip = ipaddress.ip_address(data)\n        except ValueError:\n            ip = ipaddress.ip_address(u'{}'.format(data))\n        if ip.version == 6:\n            data = ip.exploded\n            sections = []\n            \n            for s in data.split(':'):\n                if s == '0000':\n                    s = '0'\n                else:\n                    s = s.lstrip('0')\n                sections.append(s)\n            data = ':'.join(sections)\n        super(Address, self).indicator(data)", "docstring": "Update the request URI to include the Indicator for specific indicator retrieval.\n\nOverload to handle formatting of ipv6 addresses\n\nArgs:\ndata (string): The indicator value", "source": "juraj-google-style"}
{"code": "def dbmax20years(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `dbmax20years`'.format(value))\n    self._dbmax20years = value", "docstring": "Corresponds to IDD Field `dbmax20years`\n20-year return period values for maximum extreme dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmax20years`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def set_zone(timezone):\n    \n    \n    if timezone.lower() in mapper.win_to_unix:\n        win_zone = timezone\n\n    elif timezone.lower() in mapper.unix_to_win:\n        \n        win_zone = mapper.get_win(timezone)\n\n    else:\n        \n        raise CommandExecutionError('Invalid timezone passed: {0}'.format(timezone))\n\n    \n    cmd = ['tzutil', '/s', win_zone]\n    res = __salt__['cmd.run_all'](cmd, python_shell=False)\n    if res['retcode']:\n        raise CommandExecutionError('tzutil encountered an error setting '\n                                    'timezone: {0}'.format(timezone),\n                                    info=res)\n    return zone_compare(timezone)", "docstring": "Sets the timezone using the tzutil.\n\nArgs:\ntimezone (str): A valid timezone\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\nRaises:\nCommandExecutionError: If invalid timezone is passed\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' timezone.set_zone 'America/Denver'", "source": "juraj-google-style"}
{"code": "def _binary_3d_label_to_sparse_value(labels):\n    indices = []\n    values = []\n    for d0, labels_d0 in enumerate(labels):\n        for d1, labels_d1 in enumerate(labels_d0):\n            d2 = 0\n            for class_id, label in enumerate(labels_d1):\n                if label == 1:\n                    values.append(class_id)\n                    indices.append([d0, d1, d2])\n                    d2 += 1\n                else:\n                    assert label == 0\n    shape = [len(labels), len(labels[0]), len(labels[0][0])]\n    return sparse_tensor.SparseTensorValue(np.array(indices, np.int64), np.array(values, np.int64), np.array(shape, np.int64))", "docstring": "Convert dense 3D binary indicator tensor to sparse tensor.\n\nOnly 1 values in `labels` are included in result.\n\nArgs:\nlabels: Dense 2D binary indicator tensor.\n\nReturns:\n`SparseTensorValue` whose values are indices along the last dimension of\n`labels`.", "source": "github-repos"}
{"code": "def Get(self, project_id):\n    \n    if project_id in self._emulators:\n      return self._emulators[project_id]\n\n    emulator = self.Create(project_id)\n    self._emulators[project_id] = emulator\n    return emulator", "docstring": "Returns an existing emulator instance for the provided project_id.\n\nIf an emulator instance doesn't yet exist, it creates one.\n\nArgs:\nproject_id: project ID\n\nReturns:\na DatastoreEmulator", "source": "juraj-google-style"}
{"code": "def consume(self, message):\n    if (('jsonrpc' not in message) or (message['jsonrpc'] != JSONRPC_VERSION)):\n        log.warn('Unknown message type %s', message)\n        return\n    if ('id' not in message):\n        log.debug('Handling notification from client %s', message)\n        self._handle_notification(message['method'], message.get('params'))\n    elif ('method' not in message):\n        log.debug('Handling response from client %s', message)\n        self._handle_response(message['id'], message.get('result'), message.get('error'))\n    else:\n        try:\n            log.debug('Handling request from client %s', message)\n            self._handle_request(message['id'], message['method'], message.get('params'))\n        except JsonRpcException as e:\n            log.exception('Failed to handle request %s', message['id'])\n            self._consumer({'jsonrpc': JSONRPC_VERSION, 'id': message['id'], 'error': e.to_dict()})\n        except Exception:\n            log.exception('Failed to handle request %s', message['id'])\n            self._consumer({'jsonrpc': JSONRPC_VERSION, 'id': message['id'], 'error': JsonRpcInternalError.of(sys.exc_info()).to_dict()})", "docstring": "Consume a JSON RPC message from the client.\n\nArgs:\nmessage (dict): The JSON RPC message sent by the client", "source": "codesearchnet"}
{"code": "def AddCredentialOptions(self, argument_group):\n    \n    argument_group.add_argument(\n        '--credential', action='append', default=[], type=str,\n        dest='credentials', metavar='TYPE:DATA', help=(\n            'Define a credentials that can be used to unlock encrypted '\n            'volumes e.g. BitLocker. The credential is defined as type:data '\n            'e.g. \"password:BDE-test\". Supported credential types are: '\n            '{0:s}. Binary key data is expected to be passed in BASE-16 '\n            'encoding (hexadecimal). WARNING credentials passed via command '\n            'line arguments can end up in logs, so use this option with '\n            'care.').format(', '.join(self._SUPPORTED_CREDENTIAL_TYPES)))", "docstring": "Adds the credential options to the argument group.\n\nThe credential options are use to unlock encrypted volumes.\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "juraj-google-style"}
{"code": "def __init__(self, access_token, user_agent, revoke_uri=None):\n        \n        super(AccessTokenCredentials, self).__init__(\n            access_token,\n            None,\n            None,\n            None,\n            None,\n            None,\n            user_agent,\n            revoke_uri=revoke_uri)", "docstring": "Create an instance of OAuth2Credentials\n\nThis is one of the few types if Credentials that you should contrust,\nCredentials objects are usually instantiated by a Flow.\n\nArgs:\naccess_token: string, access token.\nuser_agent: string, The HTTP User-Agent to provide for this\napplication.\nrevoke_uri: string, URI for revoke endpoint. Defaults to None; a\ntoken can't be revoked if this is None.", "source": "juraj-google-style"}
{"code": "def subscribe(object_type: str, subscriber: str,\n              callback_handler: Callable = None) -> EventQueue:\n    \n    key = _keys.subscribers(object_type)\n    DB.remove_from_list(key, subscriber)\n    DB.append_to_list(key, subscriber)\n    return EventQueue(object_type, subscriber, callback_handler)", "docstring": "Subscribe to the specified object type.\n\nReturns an EventQueue object which can be used to query events\nassociated with the object type for this subscriber.\n\nArgs:\nobject_type (str): Object type\nsubscriber (str): Subscriber name\ncallback_handler (function, optional): Callback handler function.\n\nReturns:\nEventQueue, event queue object.", "source": "juraj-google-style"}
{"code": "def download_report_hook(count, block_size, total_size):\n    percent = int((((count * block_size) * 100) / total_size))\n    print((('\\r%d%%' % percent) + ' completed'), end='\\r')", "docstring": "Report hook for download progress.\n\nArgs:\ncount: current block number\nblock_size: block size\ntotal_size: total size", "source": "codesearchnet"}
{"code": "def _recursive_remove_blank_dirs(self, path):\n    path = os.path.abspath(path)\n    if ((path == self.path) or (len(path) <= len(self.path))):\n        return\n    if (not os.path.exists(path)):\n        return self._recursive_remove_blank_dirs(os.path.dirname(path))\n    if os.listdir(path):\n        return\n    shutil.rmtree(path)\n    return self._recursive_remove_blank_dirs(os.path.dirname(path))", "docstring": "Make sure, that blank directories are removed from the storage.\n\nArgs:\npath (str): Path which you suspect that is blank.", "source": "codesearchnet"}
{"code": "def trace_model_call(model, input_signature=None):\n    if input_signature is None:\n        if isinstance(model.call, def_function.Function):\n            input_signature = model.call.input_signature\n    if input_signature is None:\n        input_signature = model_input_signature(model)\n    if input_signature is None:\n        raise_model_input_error(model)\n\n    @def_function.function(input_signature=input_signature, autograph=False)\n    def _wrapped_model(*args):\n        \n        inputs = args[0] if len(input_signature) == 1 else list(args)\n        with keras_deps.get_call_context_function()().enter(model, inputs=inputs, build_graph=False, call_context_args={'training': False}, saving=True):\n            outputs = model(inputs, training=False)\n        return outputs\n    return _wrapped_model", "docstring": "Trace the model call to create a tf.function for exporting a Keras model.\n\nArgs:\nmodel: A Keras model.\ninput_signature: optional, a list of tf.TensorSpec objects specifying the\ninputs to the model.\n\nReturns:\nA tf.function wrapping the model's call function with input signatures set.\n\nRaises:\nValueError: if input signature cannot be inferred from the model.", "source": "github-repos"}
{"code": "def count_variables_by_type(variables=None):\n    if (variables is None):\n        variables = (tf.global_variables() + tf.local_variables())\n    unique_types = set((v.dtype.base_dtype for v in variables))\n    results_dict = {}\n    for dtype in unique_types:\n        if (dtype == tf.string):\n            tf.logging.warning('NB: string Variables present. The memory usage for these  Variables will not be accurately computed as it depends on the exact strings stored in a particular session.')\n        vars_of_type = [v for v in variables if (v.dtype.base_dtype == dtype)]\n        num_scalars = sum((v.shape.num_elements() for v in vars_of_type))\n        results_dict[dtype] = {'num_variables': len(vars_of_type), 'num_scalars': num_scalars}\n    return results_dict", "docstring": "Returns a dict mapping dtypes to number of variables and scalars.\n\nArgs:\nvariables: iterable of `tf.Variable`s, or None. If None is passed, then all\nglobal and local variables in the current graph are used.\n\nReturns:\nA dict mapping tf.dtype keys to a dict containing the keys 'num_scalars' and\n'num_variables'.", "source": "codesearchnet"}
{"code": "def restore_walker(self, dumped_state):\n    selector_string = dumped_state.get(u'selector')\n    if (selector_string is None):\n        raise ArgumentError(\"Invalid stream walker state in restore_walker, missing 'selector' key\", state=dumped_state)\n    selector = DataStreamSelector.FromString(selector_string)\n    walker = self.create_walker(selector)\n    walker.restore(dumped_state)\n    return walker", "docstring": "Restore a stream walker that was previously serialized.\n\nSince stream walkers need to be tracked in an internal list for\nnotification purposes, we need to be careful with how we restore\nthem to make sure they remain part of the right list.\n\nArgs:\ndumped_state (dict): The dumped state of a stream walker\nfrom a previous call to StreamWalker.dump()\n\nReturns:\nStreamWalker: The correctly restored StreamWalker subclass.", "source": "codesearchnet"}
{"code": "def group_id(self, resource_id):\n        \n        if self._name != 'group':\n            self._request_uri = '{}/{}'.format(self._api_uri, resource_id)", "docstring": "Update the request URI to include the Group ID for specific group retrieval.\n\nArgs:\nresource_id (string): The group id.", "source": "juraj-google-style"}
{"code": "def firmware_outdated(self):\n    datefmt = ' %b %d %Y %H:%M:%S'\n    compat_date = self.compatible_firmware_version.split('compiled')[1]\n    compat_date = datetime.datetime.strptime(compat_date, datefmt)\n    fw_date = self.firmware_version.split('compiled')[1]\n    fw_date = datetime.datetime.strptime(fw_date, datefmt)\n    return (compat_date > fw_date)", "docstring": "Returns whether the J-Link's firmware version is older than the one\nthat the DLL is compatible with.\n\nNote:\nThis is not the same as calling ``not jlink.firmware_newer()``.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\n``True`` if the J-Link's firmware is older than the one supported by\nthe DLL, otherwise ``False``.", "source": "codesearchnet"}
{"code": "def setDataFrame(self, dataFrame):\n    if (not isinstance(dataFrame, pandas.core.frame.DataFrame)):\n        raise TypeError('Argument is not of type pandas.core.frame.DataFrame')\n    self.layoutAboutToBeChanged.emit()\n    self._dataFrame = dataFrame\n    self.layoutChanged.emit()", "docstring": "setter function to _dataFrame. Holds all data.\n\nNote:\nIt's not implemented with python properties to keep Qt conventions.\n\nRaises:\nTypeError: if dataFrame is not of type pandas.core.frame.DataFrame.\n\nArgs:\ndataFrame (pandas.core.frame.DataFrame): assign dataFrame to _dataFrame. Holds all the data displayed.", "source": "codesearchnet"}
{"code": "def load_transliteration_table(lang=\"en\", version=\"2\"):\n  \n  src_dir = \"transliteration{}\".format(version)\n  p = locate_resource(src_dir, lang)\n  file_handler = _open(p)\n  return pickle.load(file_handler)", "docstring": "Return a morfessor model for `lang` and of version `version`\n\nArgs:\nlang (string): language code.\nversion (string): version of the parameters to be used.", "source": "juraj-google-style"}
{"code": "def sort_resources(cls, request, resources, fail_enum, header_proto=None):\n        \n        if not request.sorting:\n            return resources\n\n        value_handlers = cls._get_handler_set(request, fail_enum, header_proto)\n\n        def sorter(resource_a, resource_b):\n            for handler in value_handlers:\n                val_a, val_b = handler.get_sort_values(resource_a, resource_b)\n\n                if val_a < val_b:\n                    return handler.xform_result(-1)\n                if val_a > val_b:\n                    return handler.xform_result(1)\n\n            return 0\n\n        return sorted(resources, key=cmp_to_key(sorter))", "docstring": "Sorts a list of resources based on a list of sort controls\n\nArgs:\nrequest (object): The parsed protobuf request object\nresources (list of objects): The resources to be sorted\nfail_enum (int, enum): The enum status to raise with invalid keys\nheader_proto(class): Class to decode a resources header\n\nReturns:\nlist: The sorted list of resources", "source": "juraj-google-style"}
{"code": "def get_longs():\n    longs = {}\n    fname = pkg_resources.resource_filename(__name__, 'resources/Latitudes-Longitudes.csv')\n    with open(fname, 'rb') as csvfile:\n        reader = csv.reader(csvfile, delimiter=',')\n        for row in reader:\n            word = row[0].lower()\n            word = re.sub(' ', '', word)\n            longs[word] = float(row[2])\n    return longs", "docstring": "Get a dictionary that maps Backpage city names to their respective longitudes.\n\nReturns:\ndictionary that maps city names (Strings) to longitudes (Floats)", "source": "codesearchnet"}
{"code": "def add(self, watch_key, tensor_value):\n    if (watch_key not in self._tensor_data):\n        self._tensor_data[watch_key] = _WatchStore(watch_key, mem_bytes_limit=self._watch_mem_bytes_limit)\n    self._tensor_data[watch_key].add(tensor_value)", "docstring": "Add a tensor value.\n\nArgs:\nwatch_key: A string representing the debugger tensor watch, e.g.,\n'Dense_1/BiasAdd:0:DebugIdentity'.\ntensor_value: The value of the tensor as a numpy.ndarray.", "source": "codesearchnet"}
{"code": "def insert_rows(self, project_id, dataset_id, table_id, rows, insert_ids=None, skip_invalid_rows=False, ignore_unknown_values=False):\n    insert_ids = [str(self.unique_row_id) if not insert_ids else insert_ids[i] for i, _ in enumerate(rows)]\n    rows = [fast_json_loads(fast_json_dumps(r, default=default_encoder)) for r in rows]\n    result, errors = self._insert_all_rows(project_id, dataset_id, table_id, rows, insert_ids, skip_invalid_rows=skip_invalid_rows, ignore_unknown_values=ignore_unknown_values)\n    return (result, errors)", "docstring": "Inserts rows into the specified table.\n\nArgs:\nproject_id: The project id owning the table.\ndataset_id: The dataset id owning the table.\ntable_id: The table id.\nrows: A list of plain Python dictionaries. Each dictionary is a row and\neach key in it is the name of a field.\nskip_invalid_rows: If there are rows with insertion errors, whether they\nshould be skipped, and all others should be inserted successfully.\nignore_unknown_values: Set this option to true to ignore unknown column\nnames. If the input rows contain columns that are not\npart of the existing table's schema, those columns are ignored, and\nthe rows are successfully inserted.\n\nReturns:\nA tuple (bool, errors). If first element is False then the second element\nwill be a bigquery.InsertErrorsValueListEntry instance containing\nspecific errors.", "source": "github-repos"}
{"code": "def __init__(self, warm_start_type, parents):\n        \n\n        if warm_start_type not in WarmStartTypes:\n            raise ValueError(\n                \"Invalid type: {}, valid warm start types are: [{}]\".format(warm_start_type,\n                                                                            [t for t in WarmStartTypes]))\n\n        if not parents:\n            raise ValueError(\"Invalid parents: {}, parents should not be None/empty\".format(parents))\n\n        self.type = warm_start_type\n        self.parents = set(parents)", "docstring": "Initializes the ``WarmStartConfig`` with the provided ``WarmStartTypes`` and parents.\n\nArgs:\nwarm_start_type (sagemaker.tuner.WarmStartTypes): This should be one of the supported warm start types\nin WarmStartType\nparents (set{str}): Set of parent tuning jobs which will be used to warm start the new tuning job.", "source": "juraj-google-style"}
{"code": "def setup(self, steps=None, drop_na=False, **kwargs):\n    input_nodes = None\n    selectors = self.model.get('input', {}).copy()\n    selectors.update(kwargs)\n    for (i, b) in enumerate(self.steps):\n        if ((steps is not None) and (i not in steps) and (b.name not in steps)):\n            continue\n        b.setup(input_nodes, drop_na=drop_na, **selectors)\n        input_nodes = b.output_nodes", "docstring": "Set up the sequence of steps for analysis.\n\nArgs:\nsteps (list): Optional list of steps to set up. Each element\nmust be either an int giving the index of the step in the\nJSON config block list, or a str giving the (unique) name of\nthe step, as specified in the JSON config. Steps that do not\nmatch either index or name will be skipped.\ndrop_na (bool): Boolean indicating whether or not to automatically\ndrop events that have a n/a amplitude when reading in data\nfrom event files.", "source": "codesearchnet"}
{"code": "def add_depth_embedding(x):\n  \n  x_shape = common_layers.shape_list(x)\n  depth = x_shape[-1]\n  num_steps = x_shape[0]\n  shape = [num_steps, 1, 1, depth]\n  depth_embedding = (\n      tf.get_variable(\n          \"depth_embedding\",\n          shape,\n          initializer=tf.random_normal_initializer(0, depth**-0.5)) * (depth**\n                                                                       0.5))\n\n  x += depth_embedding\n  return x", "docstring": "Add n-dimensional embedding as the depth embedding (timing signal).\n\nAdds embeddings to represent the position of the step in the recurrent\ntower.\n\nArgs:\nx: a tensor with shape [max_step, batch, length, depth]\n\nReturns:\na Tensor the same shape as x.", "source": "juraj-google-style"}
{"code": "def get_service_state(self, service_id: str) -> str:\n        \n        \n        service = self._client.services.get(service_id)\n\n        \n        for service_task in service.tasks():\n            service_state = service_task['DesiredState']\n        return service_state", "docstring": "Get the state of the service.\n\nOnly the manager nodes can retrieve service state\n\nArgs:\nservice_id (str): Service id\n\nReturns:\nstr, state of the service", "source": "juraj-google-style"}
{"code": "def closest_point(a, b, p):\n    \n    ap = [p[0]-a[0], p[1]-a[1]]\n    ab = [b[0]-a[0], b[1]-a[1]]\n    mag = float(ab[0]**2 + ab[1]**2)\n    proj = dot(ap, ab)\n    if mag ==0 :\n        dist = 0\n    else:\n        dist = proj / mag\n    if dist < 0:\n        return [a[0], a[1]]\n    elif dist > 1:\n        return [b[0], b[1]]\n    else:\n        return [a[0] + ab[0] * dist, a[1] + ab[1] * dist]", "docstring": "Finds closest point in a line segment\n\nArgs:\na ([float, float]): x and y coordinates. Line start\nb ([float, float]): x and y coordinates. Line end\np ([float, float]): x and y coordinates. Point to find in the segment\nReturns:\n(float, float): x and y coordinates of the closest point", "source": "juraj-google-style"}
{"code": "def _compute_numeric_jacobian(f, y_size, y_dtype, xs, param, delta):\n    x_shape = xs[param].shape\n    x_dtype = xs[param].dtype\n    x_size = _product(x_shape) * (2 if x_dtype.is_complex else 1)\n    y_size = y_size * (2 if y_dtype.is_complex else 1)\n    x_dtype = x_dtype.real_dtype.as_numpy_dtype\n    y_dtype = y_dtype.real_dtype.as_numpy_dtype\n    xs_dtypes = [x.dtype for x in xs]\n    xs_shapes = [x.shape for x in xs]\n    xs = [numpy_compat.np_asarray(_to_numpy(x)) for x in xs]\n    x = xs[param]\n    scale = numpy_compat.np_asarray(2 * delta, dtype=y_dtype)[()]\n    jacobian = np.zeros((y_size, x_size), dtype=x_dtype)\n    f = _prepare(f, xs_dtypes, xs_shapes)\n    for col in range(x_size):\n        original = x.ravel().view(x_dtype)[col]\n        x.ravel().view(x_dtype)[col] += delta\n        y_pos = _to_numpy(f(*xs))\n        x.ravel().view(x_dtype)[col] = original\n        x.ravel().view(x_dtype)[col] -= delta\n        y_neg = _to_numpy(f(*xs))\n        x.ravel().view(x_dtype)[col] = original\n        diff = (y_pos - y_neg) / scale\n        jacobian[:, col] = diff.ravel().view(y_dtype)\n    logging.vlog(1, 'Numeric Jacobian =\\n%s', jacobian)\n    return jacobian", "docstring": "Computes the numeric Jacobian for f regarding xs[param].\n\nOne can think of the relation among f, xs and y as y = f(xs).\n\nArgs:\nf: the function.\ny_size: the number of elements of the result.\ny_dtype: the dtype of the result.\nxs: a list of tensors.\nparam: the index of the target parameter.\ndelta: the amount of perturbation we give to the input.\n\nReturns:\nA 2-d numpy array representing the Jacobian. It has \"y_size\" rows\nand \"x_size\" columns where \"x_size\" is the number of elements in xs[param]\nand \"y_size\" is the number of elements in the result.", "source": "github-repos"}
{"code": "def register(self, numerics_alert):\n    key = (numerics_alert.device_name, numerics_alert.tensor_name)\n    if (key in self._data):\n        self._data[key].add(numerics_alert)\n    elif (len(self._data) < self._capacity):\n        history = NumericsAlertHistory()\n        history.add(numerics_alert)\n        self._data[key] = history", "docstring": "Register an alerting numeric event.\n\nArgs:\nnumerics_alert: An instance of `NumericsAlert`.", "source": "codesearchnet"}
{"code": "def get_urls_for_profiles(edx_video_id, profiles):\n    \n    profiles_to_urls = {profile: None for profile in profiles}\n    try:\n        video_info = get_video_info(edx_video_id)\n    except ValVideoNotFoundError:\n        return profiles_to_urls\n\n    for encoded_video in video_info[\"encoded_videos\"]:\n        if encoded_video[\"profile\"] in profiles:\n            profiles_to_urls[encoded_video[\"profile\"]] = encoded_video[\"url\"]\n\n    return profiles_to_urls", "docstring": "Returns a dict mapping profiles to URLs.\n\nIf the profiles or video is not found, urls will be blank.\n\nArgs:\nedx_video_id (str): id of the video\nprofiles (list): list of profiles we want to search for\n\nReturns:\n(dict): A dict containing the profile to url pair", "source": "juraj-google-style"}
{"code": "def map(self, entity, ext_id, dcm_id):\n    if not entity in self._id_map:\n        self._id_map[entity] = {}\n    self._id_map[entity][ext_id] = dcm_id\n    self._id_map[entity][dcm_id] = ext_id", "docstring": "Maps a CM id and an ext id for an entity.\n\nArgs:\nentity: The name of the entity for which the ID relates.\next_id: Placeholder ext id.\ndcm_id: Real CM id of the object.", "source": "github-repos"}
{"code": "def sqrt(x):\n    if any_symbolic_tensors((x,)):\n        return Sqrt().symbolic_call(x)\n    x = backend.convert_to_tensor(x)\n    return backend.numpy.sqrt(x)", "docstring": "Return the non-negative square root of a tensor, element-wise.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput tensor, the non-negative square root of `x`.", "source": "github-repos"}
{"code": "def add_string(self, data):\n    lines = []\n    while data:\n        match = self._line_end_re.search(data)\n        if (match is None):\n            chunk = data\n        else:\n            chunk = data[:match.end()]\n        data = data[len(chunk):]\n        if (self._buf and self._buf[(- 1)].endswith(b('\\r')) and (not chunk.startswith(b('\\n')))):\n            lines.append(self._finish_line())\n        self._buf.append(chunk)\n        if chunk.endswith(b('\\n')):\n            lines.append(self._finish_line())\n    return lines", "docstring": "Process some data splitting it into complete lines and buffering the rest\n\nArgs:\ndata: A `str` in Python 2 or `bytes` in Python 3\nReturns:\nlist of complete lines ending with a carriage return (eg. a progress\nbar) or a newline.", "source": "codesearchnet"}
{"code": "def add_oxidation_state_by_site_fraction(structure, oxidation_states):\n    try:\n        for (i, site) in enumerate(structure):\n            new_sp = collections.defaultdict(float)\n            for (j, (el, occu)) in enumerate(get_z_ordered_elmap(site.species)):\n                specie = Specie(el.symbol, oxidation_states[i][j])\n                new_sp[specie] += occu\n            structure[i] = new_sp\n        return structure\n    except IndexError:\n        raise ValueError('Oxidation state of all sites must be specified in the list.')", "docstring": "Add oxidation states to a structure by fractional site.\n\nArgs:\noxidation_states (list): List of list of oxidation states for each\nsite fraction for each site.\nE.g., [[2, 4], [3], [-2], [-2], [-2]]", "source": "codesearchnet"}
{"code": "def patch_f90_compiler(f90_compiler):\n    \n    \n    \n    from numpy.distutils.fcompiler import gnu\n\n    \n    if os.name != \"nt\":\n        return\n\n    \n    if not isinstance(f90_compiler, gnu.Gnu95FCompiler):\n        return\n\n    f90_compiler.compiler_f77[:] = _remove_fpic(f90_compiler.compiler_f77)\n    f90_compiler.compiler_f90[:] = _remove_fpic(f90_compiler.compiler_f90)\n    c_compiler = f90_compiler.c_compiler\n    if c_compiler.compiler_type != \"msvc\":\n        raise NotImplementedError(\n            \"MSVC is the only supported C compiler on Windows.\"\n        )", "docstring": "Patch up ``f90_compiler.library_dirs``.\n\nUpdates flags in ``gfortran`` and ignores other compilers. The only\nmodification is the removal of ``-fPIC`` since it is not used on Windows\nand the build flags turn warnings into errors.\n\nArgs:\nf90_compiler (numpy.distutils.fcompiler.FCompiler): A Fortran compiler\ninstance.", "source": "juraj-google-style"}
{"code": "def _reduce_helper(input_shape,\n                   output_shape,\n                   input_tensor_layout,\n                   reduction_fn_string=\"SUM\"):\n  \n  reduce_dims_indices = [\n      i for i, d in enumerate(input_shape.dims) if d not in output_shape.dims]\n  reduced_input_shape = Shape([\n      d for d in input_shape.dims if d in output_shape.dims])\n  perm = [reduced_input_shape.dims.index(d) for d in output_shape.dims]\n  def reduce_slice_fn(xslice):\n    ret = xslice\n    if reduce_dims_indices:\n      ret = reduction_fn(reduction_fn_string)(xslice, reduce_dims_indices)\n    if perm != list(xrange(len(perm))):\n      ret = tf.transpose(ret, perm)\n    return ret\n  reduced_mesh_axes = []\n  for i in reduce_dims_indices:\n    mesh_axis = input_tensor_layout[i]\n    if mesh_axis is not None:\n      reduced_mesh_axes.append(mesh_axis)\n  return reduce_slice_fn, reduced_mesh_axes", "docstring": "Returns slicewise function and reduced mesh dimensions.\n\nArgs:\ninput_shape: a Shape\noutput_shape: a Shape\ninput_tensor_layout: a TensorLayout\nreduction_fn_string: \"SUM\" or \"MAX\"\nReturns:\nreduce_slice_fn: a function from tf.Tensor to tf.Tensor\nreduced_mesh_axes: a list of integers", "source": "juraj-google-style"}
{"code": "def __init__(self, settings, room, queue, files):\n        \n        Process.__init__(self)\n        self._room = room\n        self._queue = queue\n        self._files = files\n        self._data = {}\n        self._connection = Connection.create_from_settings(settings)\n        self._reactor = None\n        self._producer = None\n        self._receiver = None", "docstring": "Initialize.\n\nArgs:\nsettings (dict): Settings used to create a :class:`Connection` instance\nroom (int): Room\nqueue (:class:`multiprocessing.Queue`): Queue to share data between processes\nfiles (dict): Dictionary, where key is the field name, and value is the path", "source": "juraj-google-style"}
{"code": "def ctc(y_true, y_pred):\n    if len(ops.shape(y_true)) != 2:\n        raise ValueError(f'Targets `y_true` are expected to be a tensor of shape `(batch_size, max_length)` in integer format. Received: y_true.shape={ops.shape(y_true)}')\n    if len(ops.shape(y_pred)) != 3:\n        raise ValueError(f'Logits `y_pred` are expected to be a tensor of shape `(batch_size, max_length, num_classes)`. Received: y_pred.shape={ops.shape(y_pred)}')\n    mask_index = 0\n    batch_length = ops.shape(y_pred)[0]\n    input_length = ops.shape(y_pred)[1]\n    input_length = input_length * ops.ones((batch_length,), dtype='int32')\n    label_length = ops.cast(ops.sum(y_true != mask_index, axis=-1), dtype='int32')\n    return ops.ctc_loss(y_true, y_pred, label_length, input_length, mask_index=mask_index)", "docstring": "CTC (Connectionist Temporal Classification) loss.\n\nArgs:\ny_true: A tensor of shape `(batch_size, max_length)` containing\nthe true labels in integer format. `0` always represents\nthe blank/mask index and should not be used for classes.\ny_pred: A tensor of shape `(batch_size, max_length, num_classes)`\ncontaining logits (the output of your model).\nThey should *not* be normalized via softmax.", "source": "github-repos"}
{"code": "def get_trivial_search_space():\n    return pg.floatv(0.0, 1.0)", "docstring": "Trivial search space.\n\nEach point in the space is a value in [0, 1].\n\nReturns:\nA tunable value.", "source": "github-repos"}
{"code": "def remove_child(self, child):\n        \n        if not isinstance(child, Node):\n            raise TypeError(\"child must be a Node\")\n        try:\n            self.children.remove(child); child.parent = None\n        except:\n            raise RuntimeError(\"Attempting to remove non-existent child\")", "docstring": "Remove child from ``Node`` object\n\nArgs:\n``child`` (``Node``): The child to remove", "source": "juraj-google-style"}
{"code": "def make_tstore_conn(params, **kwargs):\n    \n    log.setLevel(params.get('log_level', __LOG_LEVEL__))\n    log.debug(\"\\n%s\", params)\n    params.update(kwargs)\n    try:\n        vendor = RdfwConnections['triplestore'][params.get('vendor')]\n    except KeyError:\n        vendor = RdfwConnections['triplestore']['blazegraph']\n    conn = vendor(**params)\n    return conn", "docstring": "Returns a triplestore connection\n\nargs:\nattr_name: The name the connection will be assigned in the\nconfig manager\nparams: The paramaters of the connection\n\nkwargs:\nlog_level: logging level to use", "source": "juraj-google-style"}
{"code": "def _run_between_graph_clients(self, client_fn, cluster_spec, num_gpus, *args, **kwargs):\n    threads = []\n    for task_type in ['chief', 'worker']:\n        for task_id in range(len(cluster_spec.get(task_type, []))):\n            t = threading.Thread(target=self._run_client, args=(client_fn, task_type, task_id, num_gpus, context.executing_eagerly()) + args, kwargs=kwargs)\n            t.start()\n            threads.append(t)\n    self._coord.join(threads)", "docstring": "Runs several clients for between-graph replication.\n\nArgs:\nclient_fn: a function that needs to accept `task_type`, `task_id`,\n`num_gpus`.\ncluster_spec: a dict specifying jobs in a cluster.\nnum_gpus: number of GPUs per worker.\n*args: will be passed to `client_fn`.\n**kwargs: will be passed to `client_fn`.", "source": "github-repos"}
{"code": "def squash_sequence(input_layer):\n  \n  timesteps = len(input_layer.sequence)\n  if not timesteps:\n    raise ValueError('Empty tensor sequence.')\n  elif timesteps == 1:\n    result = input_layer.sequence[0]\n  else:\n    result = tf.concat(input_layer.sequence, 0)\n  return input_layer.with_tensor(result).with_defaults(unroll=timesteps)", "docstring": "Squashes a sequence into a single Tensor with dim 1 being time*batch.\n\nA sequence is an array of Tensors, which is not appropriate for most\noperations, this squashes them together into Tensor.\n\nDefaults are assigned such that cleave_sequence requires no args.\n\nArgs:\ninput_layer: The input layer.\nReturns:\nA PrettyTensor containing a single tensor with the first dim containing\nboth time and batch.\nRaises:\nValueError: If the sequence is empty.", "source": "juraj-google-style"}
{"code": "def _calculateEncodingKey(comparator):\n    \n    encodingName = None\n    for k, v in list(_encodings.items()):\n        if v == comparator:\n            encodingName = k\n            break\n    return encodingName", "docstring": "Gets the first key of all available encodings where the corresponding\nvalue matches the comparator.\n\nArgs:\ncomparator (string): A view name for an encoding.\n\nReturns:\nstr: A key for a specific encoding used by python.", "source": "juraj-google-style"}
{"code": "async def enqueue(content: AsyncIterable[_T], queue: asyncio.Queue[_T | None]) -> None:\n    try:\n        async for part in content:\n            await queue.put(part)\n    finally:\n        await queue.put(None)", "docstring": "Enqueues all content into a queue.\n\nWhen the queue is unbounded, this function will not block. When the queue is\nbounded, this function will block until the queue has space.\n\nArgs:\ncontent: The content to enqueue.\nqueue: The queue to enqueue to.", "source": "github-repos"}
{"code": "def VerifyStructure(self, parser_mediator, line):\n    try:\n        structure = self._HEADER.parseString(line)\n    except pyparsing.ParseException:\n        logger.debug('Not a XChat log file')\n        return False\n    (_, month, day, hours, minutes, seconds, year) = structure.date_time\n    month = timelib.MONTH_DICT.get(month.lower(), 0)\n    time_elements_tuple = (year, month, day, hours, minutes, seconds)\n    try:\n        dfdatetime_time_elements.TimeElements(time_elements_tuple=time_elements_tuple)\n    except ValueError:\n        logger.debug('Not a XChat log file, invalid date and time: {0!s}'.format(structure.date_time))\n        return False\n    return True", "docstring": "Verify that this file is a XChat log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nline (str): line from a text file.\n\nReturns:\nbool: True if the line is in the expected format, False if not.", "source": "codesearchnet"}
{"code": "def get_metrics(pred: jax.Array, actual: jax.Array) -> Result:\n    tp: int = jnp.sum(jnp.logical_and(pred == 1, actual == 1))\n    tn: int = jnp.sum(jnp.logical_and(pred == 0, actual == 0))\n    fp: int = jnp.sum(jnp.logical_and(pred == 1, actual == 0))\n    fn: int = jnp.sum(jnp.logical_and(pred == 0, actual == 1))\n    accuracy = (tp + tn) / (tp + tn + fp + fn + EPS)\n    precision = tp / (tp + fp + EPS)\n    recall = tp / (tp + fn + EPS)\n    fscore = 2 * precision * recall / (precision + recall + EPS)\n    return Result(tp=tp, tn=tn, fp=fp, fn=fn, accuracy=accuracy, precision=precision, recall=recall, fscore=fscore)", "docstring": "Gets evaluation metrics from the prediction and the actual target.\n\nArgs:\npred (jax.Array): A prediction of the target.\nactual (jax.Array): The actual target.\n\nReturns:\nresult (Result): A result.", "source": "github-repos"}
{"code": "def save(self, name=None, output='png', dirc=None):\n        \n\n        self.render()\n        if dirc:\n            if not os.path.isdir(os.getcwd() + \"/\" + str(dirc)):\n                os.makedirs(os.getcwd() + \"/\" + str(dirc))\n        if name is None:\n            if dirc:\n                self.fig.savefig(os.getcwd() + \"/\" + str(dirc) + '/bloch_' +\n                                 str(self.savenum) + '.' + output)\n            else:\n                self.fig.savefig(os.getcwd() + '/bloch_' + str(self.savenum) +\n                                 '.' + output)\n        else:\n            self.fig.savefig(name)\n        self.savenum += 1\n        if self.fig:\n            plt.close(self.fig)", "docstring": "Saves Bloch sphere to file of type ``format`` in directory ``dirc``.\nArgs:\nname (str):\nName of saved image. Must include path and format as well.\ni.e. '/Users/Paul/Desktop/bloch.png'\nThis overrides the 'format' and 'dirc' arguments.\noutput (str):\nFormat of output image.\ndirc (str):\nDirectory for output images. Defaults to current working directory.", "source": "juraj-google-style"}
{"code": "def on_heartbeat(self, message):\n        \n\n        logger.info(\"Got a heartbeat\")\n        logger.info(\"Heartbeat message: {}\".format(message))\n        self.heartbeat_thread.update_sequence(message['d'])\n        return", "docstring": "Runs on a heartbeat event from websocket connection\n\nArgs:\nmessage (dict): Full message from Discord websocket connection\"", "source": "juraj-google-style"}
{"code": "def log_response(self, response: Response, trim_log_values: bool=False, **kwargs: Any) -> None:\n    return log_(response.text, response_log, 'info', trim=trim_log_values, **kwargs)", "docstring": "Log a response.\n\nNote this is different to log_request, in that it takes a Response object, not a\nstring.\n\nArgs:\nresponse: The Response object to log. Note this is different to log_request\nwhich takes a string.\ntrim_log_values: Log an abbreviated version of the response.", "source": "codesearchnet"}
{"code": "def _inv_hessian_control_inputs(inv_hessian):\n    is_positive_definite = tf.reduce_all(input_tensor=tf.math.is_finite(tf.linalg.cholesky(inv_hessian)), axis=[(- 1), (- 2)])\n    is_symmetric = tf.equal(bfgs_utils.norm((inv_hessian - _batch_transpose(inv_hessian)), dims=2), 0)\n    return [tf.Assert(is_positive_definite, ['Initial inverse Hessian is not positive definite.', inv_hessian]), tf.Assert(is_symmetric, ['Initial inverse Hessian is not symmetric', inv_hessian])]", "docstring": "Computes control inputs to validate a provided inverse Hessian.\n\nThese ensure that the provided inverse Hessian is positive definite and\nsymmetric.\n\nArgs:\ninv_hessian: The starting estimate for the inverse of the Hessian at the\ninitial point.\n\nReturns:\nA list of tf.Assert ops suitable for use with tf.control_dependencies.", "source": "codesearchnet"}
{"code": "def recover_cfg(self, start=None, end=None, symbols=None, callback=None, arch_mode=None):\n    if (arch_mode is None):\n        arch_mode = self.binary.architecture_mode\n    self._load(arch_mode=arch_mode)\n    start = (start if start else self.binary.entry_point)\n    (cfg, _) = self._recover_cfg(start=start, end=end, symbols=symbols, callback=callback)\n    return cfg", "docstring": "Recover CFG.\n\nArgs:\nstart (int): Start address.\nend (int): End address.\nsymbols (dict): Symbol table.\ncallback (function): A callback function which is called after each successfully recovered CFG.\narch_mode (int): Architecture mode.\n\nReturns:\nControlFlowGraph: A CFG.", "source": "codesearchnet"}
{"code": "def _parse_list(cls, args):\n    argparser = ArgumentParser(prog='cluster list')\n    group = argparser.add_mutually_exclusive_group()\n    group.add_argument('--id', dest='cluster_id', help='show cluster with this id')\n    group.add_argument('--label', dest='label', help='show cluster with this label')\n    group.add_argument('--state', dest='state', action='store', choices=['up', 'down', 'pending', 'terminating'], help='list only clusters in the given state')\n    pagination_group = group.add_argument_group()\n    pagination_group.add_argument('--page', dest='page', action='store', type=int, help='page number')\n    pagination_group.add_argument('--per-page', dest='per_page', action='store', type=int, help='number of clusters to be retrieved per page')\n    arguments = argparser.parse_args(args)\n    return vars(arguments)", "docstring": "Parse command line arguments to construct a dictionary of cluster\nparameters that can be used to determine which clusters to list.\n\nArgs:\n`args`: sequence of arguments\n\nReturns:\nDictionary that can be used to determine which clusters to list", "source": "codesearchnet"}
{"code": "def copy_function(func, name=None):\n    code = func.__code__\n    newname = (name or func.__name__)\n    newcode = CodeType(code.co_argcount, code.co_kwonlyargcount, code.co_nlocals, code.co_stacksize, code.co_flags, code.co_code, code.co_consts, code.co_names, code.co_varnames, code.co_filename, newname, code.co_firstlineno, code.co_lnotab, code.co_freevars, code.co_cellvars)\n    newfunc = FunctionType(newcode, func.__globals__, newname, func.__defaults__, func.__closure__)\n    newfunc.__dict__.update(func.__dict__)\n    return newfunc", "docstring": "Copy a function object with different name.\n\nArgs:\nfunc (function): Function to be copied.\nname (string, optional): Name of the new function.\nIf not spacified, the same name of `func` will be used.\n\nReturns:\nnewfunc (function): New function with different name.", "source": "codesearchnet"}
{"code": "def remove_father(self, father):\n    self._fathers = [x for x in self._fathers if (x.node_id != father.node_id)]", "docstring": "Remove the father node. Do nothing if the node is not a father\n\nArgs:\nfathers: list of fathers to add", "source": "codesearchnet"}
{"code": "def get_url(self, url):\n        \n        try:\n            req = requests.get(url, headers={\n                'Authorization': 'Token {}'.format(self._user_token)\n            }, verify=False)\n            if req.status_code is 403:\n                raise ValueError(\"Access Denied\")\n            else:\n                return req\n        except requests.exceptions.ConnectionError as e:\n            if str(e) == '403 Client Error: Forbidden':\n                raise ValueError('Access Denied')\n            else:\n                raise e", "docstring": "Get a response object for a given url.\n\nArguments:\nurl (str): The url make a get to\ntoken (str): The authentication token\n\nReturns:\nobj: The response object", "source": "juraj-google-style"}
{"code": "def read(path):\n    if fs.exists(path):\n        with open(path) as infile:\n            components = infile.read().split()\n            pid = int(components[0])\n            date = datetime.date.fromtimestamp(float(components[1]))\n        return (pid, date)\n    else:\n        return (None, None)", "docstring": "Read the contents of a LockFile.\n\nArguments:\npath (str): Path to lockfile.\n\nReturns:\nTuple(int, datetime): The integer PID of the lock owner, and the\ndate the lock was required. If the lock is not claimed, both\nvalues are None.", "source": "codesearchnet"}
{"code": "def call(self, name, *args, **kwargs):\n    payload = (name, args, kwargs)\n    self._conn.send((self._CALL, payload))\n    return self._receive", "docstring": "Asynchronously call a method of the external environment.\n\nArgs:\nname: Name of the method to call.\n*args: Positional arguments to forward to the method.\n**kwargs: Keyword arguments to forward to the method.\n\nReturns:\nPromise object that blocks and provides the return value when called.", "source": "codesearchnet"}
{"code": "def _generate_api_config_with_root(self, request):\n    \n    actual_root = self._get_actual_root(request)\n    generator = api_config.ApiConfigGenerator()\n    api = request.body_json['api']\n    version = request.body_json['version']\n    lookup_key = (api, version)\n\n    service_factories = self._backend.api_name_version_map.get(lookup_key)\n    if not service_factories:\n      return None\n\n    service_classes = [service_factory.service_class\n                       for service_factory in service_factories]\n    config_dict = generator.get_config_dict(\n        service_classes, hostname=actual_root)\n\n    \n    for config in config_dict.get('items', []):\n      lookup_key_with_root = (\n          config.get('name', ''), config.get('version', ''), actual_root)\n      self._config_manager.save_config(lookup_key_with_root, config)\n\n    return config_dict", "docstring": "Generate an API config with a specific root hostname.\n\nThis uses the backend object and the ApiConfigGenerator to create an API\nconfig specific to the hostname of the incoming request. This allows for\nflexible API configs for non-standard environments, such as localhost.\n\nArgs:\nrequest: An ApiRequest, the transformed request sent to the Discovery API.\n\nReturns:\nA string representation of the generated API config.", "source": "juraj-google-style"}
{"code": "def are_equivalent_pyxb(a_pyxb, b_pyxb, ignore_timestamps=False):\n    normalize_in_place(a_pyxb, ignore_timestamps)\n    normalize_in_place(b_pyxb, ignore_timestamps)\n    a_xml = d1_common.xml.serialize_to_xml_str(a_pyxb)\n    b_xml = d1_common.xml.serialize_to_xml_str(b_pyxb)\n    are_equivalent = d1_common.xml.are_equivalent(a_xml, b_xml)\n    if (not are_equivalent):\n        logger.debug('XML documents not equivalent:')\n        logger.debug(d1_common.xml.format_diff_xml(a_xml, b_xml))\n    return are_equivalent", "docstring": "Determine if SystemMetadata PyXB objects are semantically equivalent.\n\nNormalize then compare SystemMetadata PyXB objects for equivalency.\n\nArgs:\na_pyxb, b_pyxb : SystemMetadata PyXB objects to compare\n\nreset_timestamps: bool\n``True``: Timestamps in the SystemMetadata are set to a standard value so that\nobjects that are compared after normalization register as equivalent if only\ntheir timestamps differ.\n\nReturns:\nbool: **True** if SystemMetadata PyXB objects are semantically equivalent.\n\nNotes:\nThe SystemMetadata is normalized by removing any redundant information and\nordering all sections where there are no semantics associated with the order. The\nnormalized SystemMetadata is intended to be semantically equivalent to the\nun-normalized one.", "source": "codesearchnet"}
{"code": "def make_prediction_pipeline(pipeline, args):\n    (predicted_values, errors) = (((pipeline | ('Read CSV Files' >> beam.io.ReadFromText(str(args.predict_data), strip_trailing_newlines=True))) | ('Batch Input' >> beam.ParDo(EmitAsBatchDoFn(args.batch_size)))) | ('Run TF Graph on Batches' >> beam.ParDo(RunGraphDoFn(args.trained_model_dir)).with_outputs('errors', main='main')))\n    ((predicted_values, errors) | ('Format and Save' >> FormatAndSave(args)))", "docstring": "Builds the prediction pipeline.\n\nReads the csv files, prepends a ',' if the target column is missing, run\nprediction, and then prints the formated results to a file.\n\nArgs:\npipeline: the pipeline\nargs: command line args", "source": "codesearchnet"}
{"code": "def initialize_block(self, block_header):\n    state_view = BlockWrapper.state_view_for_block(self._block_cache.block_store.chain_head, self._state_view_factory)\n    settings_view = SettingsView(state_view)\n    self._min_wait_time = settings_view.get_setting('sawtooth.consensus.min_wait_time', self._min_wait_time, int)\n    self._max_wait_time = settings_view.get_setting('sawtooth.consensus.max_wait_time', self._max_wait_time, int)\n    self._valid_block_publishers = settings_view.get_setting('sawtooth.consensus.valid_block_publishers', self._valid_block_publishers, list)\n    block_header.consensus = b'Devmode'\n    self._start_time = time.time()\n    self._wait_time = random.uniform(self._min_wait_time, self._max_wait_time)\n    return True", "docstring": "Do initialization necessary for the consensus to claim a block,\nthis may include initiating voting activates, starting proof of work\nhash generation, or create a PoET wait timer.\n\nArgs:\nblock_header (BlockHeader): the BlockHeader to initialize.\nReturns:\nTrue", "source": "codesearchnet"}
{"code": "def print_error_messages_raylet(task_error_queue, threads_stopped):\n    \n\n    while True:\n        \n        if threads_stopped.is_set():\n            return\n\n        try:\n            error, t = task_error_queue.get(block=False)\n        except queue.Empty:\n            threads_stopped.wait(timeout=0.01)\n            continue\n        \n        \n        while t + UNCAUGHT_ERROR_GRACE_PERIOD > time.time():\n            threads_stopped.wait(timeout=1)\n            if threads_stopped.is_set():\n                break\n        if t < last_task_error_raise_time + UNCAUGHT_ERROR_GRACE_PERIOD:\n            logger.debug(\"Suppressing error from worker: {}\".format(error))\n        else:\n            logger.error(\n                \"Possible unhandled error from worker: {}\".format(error))", "docstring": "Prints message received in the given output queue.\n\nThis checks periodically if any un-raised errors occured in the background.\n\nArgs:\ntask_error_queue (queue.Queue): A queue used to receive errors from the\nthread that listens to Redis.\nthreads_stopped (threading.Event): A threading event used to signal to\nthe thread that it should exit.", "source": "juraj-google-style"}
{"code": "def children(self, as_resources=False):\n    children = [o for (s, p, o) in self.rdf.graph.triples((None, self.rdf.prefixes.ldp.contains, None))]\n    if as_resources:\n        logger.debug('retrieving children as resources')\n        children = [self.repo.get_resource(child) for child in children]\n    return children", "docstring": "method to return hierarchical  children of this resource\n\nArgs:\nas_resources (bool): if True, opens each as appropriate resource type instead of return URI only\n\nReturns:\n(list): list of resources", "source": "codesearchnet"}
{"code": "def maybe_propagate_compile_time_consts_in_xla(op):\n    if control_flow_util.GraphOrParentsInXlaContext(op.graph):\n        op._set_attr('_xla_propagate_compile_time_consts', attr_value_pb2.AttrValue(b=True))", "docstring": "Tells XLA whether to propagate compile-time consts in the loop body.\n\nThis is needed to make compile time constants available to ops, for example\n`max_num_elements` in `EmptyTensorList`, inside the loop body. Ideally this\nwould always be turned on, but that doesn't work with legacy functionalized\nwhile_loops.\n\nArgs:\nop: A `While` Operation.", "source": "github-repos"}
{"code": "def list_projects(self, entity=None):\n    query = gql('\\n        query Models($entity: String!) {\\n            models(first: 10, entityName: $entity) {\\n                edges {\\n                    node {\\n                        id\\n                        name\\n                        description\\n                    }\\n                }\\n            }\\n        }\\n        ')\n    return self._flatten_edges(self.gql(query, variable_values={'entity': (entity or self.settings('entity'))})['models'])", "docstring": "Lists projects in W&B scoped by entity.\n\nArgs:\nentity (str, optional): The entity to scope this project to.\n\nReturns:\n[{\"id\",\"name\",\"description\"}]", "source": "codesearchnet"}
{"code": "def buckets_get(self, bucket, projection='noAcl'):\n    args = {'projection': projection}\n    url = (Api._ENDPOINT + (Api._BUCKET_PATH % bucket))\n    return google.datalab.utils.Http.request(url, credentials=self._credentials, args=args)", "docstring": "Issues a request to retrieve information about a bucket.\n\nArgs:\nbucket: the name of the bucket.\nprojection: the projection of the bucket information to retrieve.\nReturns:\nA parsed bucket information dictionary.\nRaises:\nException if there is an error performing the operation.", "source": "codesearchnet"}
{"code": "def parse(self, filename):\n    path = os.path.abspath(filename)\n    if filename.endswith('.xml'):\n        return PawXmlSetup(path)\n    ppdesc = self.read_ppdesc(path)\n    if (ppdesc is None):\n        logger.critical(('Cannot find ppdesc in %s' % path))\n        return None\n    psp_type = ppdesc.psp_type\n    parsers = {'FHI': NcAbinitHeader.fhi_header, 'GTH': NcAbinitHeader.gth_header, 'TM': NcAbinitHeader.tm_header, 'Teter': NcAbinitHeader.tm_header, 'HGH': NcAbinitHeader.hgh_header, 'HGHK': NcAbinitHeader.hgh_header, 'ONCVPSP': NcAbinitHeader.oncvpsp_header, 'PAW_abinit_text': PawAbinitHeader.paw_header}\n    try:\n        header = parsers[ppdesc.name](path, ppdesc)\n    except Exception:\n        raise self.Error(((path + ':\\n') + straceback()))\n    if (psp_type == 'NC'):\n        pseudo = NcAbinitPseudo(path, header)\n    elif (psp_type == 'PAW'):\n        pseudo = PawAbinitPseudo(path, header)\n    else:\n        raise NotImplementedError('psp_type not in [NC, PAW]')\n    return pseudo", "docstring": "Read and parse a pseudopotential file. Main entry point for client code.\n\nReturns:\npseudopotential object or None if filename is not a valid pseudopotential file.", "source": "codesearchnet"}
{"code": "class FlaxElectraSequenceSummary(nn.Module):\n    config: ElectraConfig\n    dtype: jnp.dtype = jnp.float32\n\n    def setup(self):\n        self.summary = identity\n        if hasattr(self.config, 'summary_use_proj') and self.config.summary_use_proj:\n            if hasattr(self.config, 'summary_proj_to_labels') and self.config.summary_proj_to_labels and (self.config.num_labels > 0):\n                num_classes = self.config.num_labels\n            else:\n                num_classes = self.config.hidden_size\n            self.summary = nn.Dense(num_classes, dtype=self.dtype)\n        activation_string = getattr(self.config, 'summary_activation', None)\n        self.activation = ACT2FN[activation_string] if activation_string else lambda x: x\n        self.first_dropout = identity\n        if hasattr(self.config, 'summary_first_dropout') and self.config.summary_first_dropout > 0:\n            self.first_dropout = nn.Dropout(self.config.summary_first_dropout)\n        self.last_dropout = identity\n        if hasattr(self.config, 'summary_last_dropout') and self.config.summary_last_dropout > 0:\n            self.last_dropout = nn.Dropout(self.config.summary_last_dropout)\n\n    def __call__(self, hidden_states, cls_index=None, deterministic: bool=True):\n        \n        output = hidden_states[:, 0]\n        output = self.first_dropout(output, deterministic=deterministic)\n        output = self.summary(output)\n        output = self.activation(output)\n        output = self.last_dropout(output, deterministic=deterministic)\n        return output", "docstring": "Compute a single vector summary of a sequence hidden states.\n\nArgs:\nconfig ([`PretrainedConfig`]):\nThe config used by the model. Relevant arguments in the config class of the model are (refer to the actual\nconfig class of your model for the default values it uses):\n\n- **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.\n- **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes\n(otherwise to `config.hidden_size`).\n- **summary_activation** (`Optional[str]`) -- Set to `\"tanh\"` to add a tanh activation to the output,\nanother string or `None` will add no activation.\n- **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.\n- **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.", "source": "github-repos"}
{"code": "def check(self, dsm, independence_factor=5, **kwargs):\n    least_common_mechanism = False\n    message = ''\n    data = dsm.data\n    categories = dsm.categories\n    dsm_size = dsm.size[0]\n    if (not categories):\n        categories = (['appmodule'] * dsm_size)\n    dependent_module_number = []\n    for j in range(0, dsm_size):\n        dependent_module_number.append(0)\n        for i in range(0, dsm_size):\n            if ((categories[i] != 'framework') and (categories[j] != 'framework') and (data[i][j] > 0)):\n                dependent_module_number[j] += 1\n    for (index, item) in enumerate(dsm.categories):\n        if ((item == 'broker') or (item == 'applib')):\n            dependent_module_number[index] = 0\n    if (max(dependent_module_number) <= (dsm_size / independence_factor)):\n        least_common_mechanism = True\n    else:\n        maximum = max(dependent_module_number)\n        message = ('Dependencies to %s (%s) > matrix size (%s) / independence factor (%s) = %s' % (dsm.entities[dependent_module_number.index(maximum)], maximum, dsm_size, independence_factor, (dsm_size / independence_factor)))\n    return (least_common_mechanism, message)", "docstring": "Check least common mechanism.\n\nArgs:\ndsm (:class:`DesignStructureMatrix`): the DSM to check.\nindependence_factor (int): if the maximum dependencies for one\nmodule is inferior or equal to the DSM size divided by the\nindependence factor, then this criterion is verified.\n\nReturns:\nbool: True if least common mechanism, else False", "source": "codesearchnet"}
{"code": "def get_success_enrollment_message(cls, users, enrolled_in):\n        \n        enrolled_count = len(users)\n        return (\n            'success',\n            ungettext(\n                '{enrolled_count} learner was enrolled in {enrolled_in}.',\n                '{enrolled_count} learners were enrolled in {enrolled_in}.',\n                enrolled_count,\n            ).format(\n                enrolled_count=enrolled_count,\n                enrolled_in=enrolled_in,\n            )\n        )", "docstring": "Create message for the users who were enrolled in a course or program.\n\nArgs:\nusers: An iterable of users who were successfully enrolled\nenrolled_in (str): A string identifier for the course or program the users were enrolled in\n\nReturns:\ntuple: A 2-tuple containing a message type and message text", "source": "juraj-google-style"}
{"code": "def translate_transcode_config(self, transcode_configs):\n    result = []\n    REALLY_BIG_INT = 9223372036854775807\n    try:\n        for video_format in self.get_video_formats():\n            for transcode_config in transcode_configs:\n                min_width = int(transcode_config.get(FieldMap.TRANSCODE_MIN_WIDTH, 0))\n                min_height = int(transcode_config.get(FieldMap.TRANSCODE_MIN_HEIGHT, 0))\n                min_bitrate = int(transcode_config.get(FieldMap.TRANSCODE_MIN_BITRATE, 0))\n                max_width = int(transcode_config.get(FieldMap.TRANSCODE_MAX_WIDTH, REALLY_BIG_INT))\n                max_height = int(transcode_config.get(FieldMap.TRANSCODE_MAX_HEIGHT, REALLY_BIG_INT))\n                max_bitrate = int(transcode_config.get(FieldMap.TRANSCODE_MAX_BITRATE, REALLY_BIG_INT))\n                file_format = transcode_config.get(FieldMap.TRANSCODE_FORMAT, '')\n                if file_format == 'SOURCE_FILE':\n                    if 15 not in result:\n                        result.append(15)\n                elif min_width <= video_format['resolution']['width'] and video_format['resolution']['width'] <= max_width and (min_height <= video_format['resolution']['height']) and (video_format['resolution']['height'] <= max_height) and (min_bitrate <= video_format['targetBitRate']) and (video_format['targetBitRate'] <= max_bitrate) and (video_format.get('fileType', '') == file_format):\n                    if video_format['id'] not in result:\n                        result.append(video_format['id'])\n    except:\n        raise Exception('Error determining file formats for transcode')\n    return result", "docstring": "Given a transcode config, returns the CM transcodes that match the config.\n\nArgs:\ntranscode_config: The transcode configuration feed item.\n\nReturns:\nAll trancode objects from Campaign Manager that match the transcode\nconfiguration specified.", "source": "github-repos"}
{"code": "def construct(self, **bindings):\n    \n    context = _assign_values_to_unbound_vars(self._unbound_vars, bindings)\n    context.update(self._partial_context)\n    return self._construct(context)", "docstring": "Constructs the graph and returns either a tensor or a sequence.\n\nArgs:\n**bindings: Arguments for every deferred parameter.\nReturns:\nThe value that is placed into this.", "source": "juraj-google-style"}
{"code": "def get_dropbox_folder_location():\n    host_db_path = os.path.join(os.environ['HOME'], '.dropbox/host.db')\n    try:\n        with open(host_db_path, 'r') as f_hostdb:\n            data = f_hostdb.read().split()\n    except IOError:\n        error('Unable to find your Dropbox install =(')\n    dropbox_home = base64.b64decode(data[1]).decode()\n    return dropbox_home", "docstring": "Try to locate the Dropbox folder.\n\nReturns:\n(str) Full path to the current Dropbox folder", "source": "codesearchnet"}
{"code": "def __init__(self, project_key=None, run_asyncore_thread=True):\n        \n        self.project_key = project_key\n        self.default_type = OOBTree\n\n        self._root = None  \n        self._connection = None  \n\n        if run_asyncore_thread:\n            _init_zeo()\n\n        self._open_connection()\n        self._init_zeo_root()", "docstring": "Initialize the object.\n\nArgs:\nconf_path (str): See :attr:`conf_path`.\nproject_key (str, default None): See :attr:`project_key`. If not\nset, the root of the database is used (this may cause\nperformace issues).\nrun_asyncore_thread (bool, default True): Run external asyncore\nthread, which handles connections to database? Default True.", "source": "juraj-google-style"}
{"code": "def en020(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `en020`'.format(value))\n\n        self._en020 = value", "docstring": "Corresponds to IDD Field `en020`\nmean coincident dry-bulb temperature to\nEnthalpy corresponding to 2.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `en020`\nUnit: kJ/kg\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def postprocess_monograph(marc_xml, mods, uuid, counter, url):\n    \n    dom = double_linked_dom(mods)\n\n    if not isinstance(marc_xml, MARCXMLRecord):\n        marc_xml = MARCXMLRecord(marc_xml)\n\n    add_missing_xml_attributes(dom, counter)\n    fix_invalid_type_parameter(dom)\n\n    if uuid:\n        add_uuid(dom, uuid)\n\n    add_marccountry_tag(dom)\n\n    \n    add_genre(dom)\n\n    \n    remove_hairs_from_tags(dom)\n\n    fix_issuance(dom)\n    fix_location_tag(dom)\n    fix_related_item_tag(dom)\n\n    fix_missing_electronic_locator_tag(dom, url)\n    fix_missing_lang_tags(marc_xml, dom)\n\n    return dom.prettify()", "docstring": "Fix bugs in `mods` produced by XSLT template.\n\nArgs:\nmarc_xml (str): Original Aleph record.\nmods (str): XML string generated by XSLT template.\nuuid (str): UUID of the package.\ncounter (int): Number of record, is added to XML headers.\nurl (str): URL of the publication (public or not).\n\nReturns:\nstr: Updated XML.", "source": "juraj-google-style"}
{"code": "def CreateProductPartition(client, adgroup_id):\n    ad_group_criterion_service = client.GetService('AdGroupCriterionService', 'v201809')\n    helper = ProductPartitionHelper(adgroup_id)\n    root = helper.CreateSubdivision()\n    new_product_canonical_condition = {'xsi_type': 'ProductCanonicalCondition', 'condition': 'NEW'}\n    used_product_canonical_condition = {'xsi_type': 'ProductCanonicalCondition', 'condition': 'USED'}\n    other_product_canonical_condition = {'xsi_type': 'ProductCanonicalCondition'}\n    helper.CreateUnit(root, new_product_canonical_condition)\n    helper.CreateUnit(root, used_product_canonical_condition)\n    helper.CreateUnit(root, other_product_canonical_condition)\n    result = ad_group_criterion_service.mutate(helper.operations)\n    return result['value']", "docstring": "Creates a ProductPartition tree for the given AdGroup ID.\n\nArgs:\nclient: an AdWordsClient instance.\nadgroup_id: a str AdGroup ID.\n\nReturns:\nThe ProductPartition tree as a sudsobject.", "source": "codesearchnet"}
{"code": "def repsep(parser: Union[Parser, Sequence[Input]], separator: Union[Parser, Sequence[Input]]) \\\n        -> RepeatedSeparatedParser:\n    \n    if isinstance(parser, str):\n        parser = lit(parser)\n    if isinstance(separator, str):\n        separator = lit(separator)\n    return RepeatedSeparatedParser(parser, separator)", "docstring": "Match a parser zero or more times separated by another parser.\n\nThis matches repeated sequences of ``parser`` separated by ``separator``. A\nlist is returned containing the value from each match of ``parser``. The\nvalues from ``separator`` are discarded. If there are no matches, an empty\nlist is returned.\n\nArgs:\nparser: Parser or literal\nseparator: Parser or literal", "source": "juraj-google-style"}
{"code": "def coordinate_tensor(shape, axis):\n    if (axis < 0):\n        axis = (tf.size(shape) + axis)\n    r = tf.range(shape[axis])\n    r_shape = tf.one_hot(axis, tf.size(shape), on_value=(- 1), off_value=1, dtype=tf.int32)\n    return (tf.zeros(shape, dtype=tf.int32) + tf.reshape(r, r_shape))", "docstring": "Return a tensor with given shape containing coordinate along given axis.\n\nArgs:\nshape: a Tensor representing the shape of the output Tensor\naxis: an integer\n\nReturns:\nA tensor with shape shape and type tf.int32, where each elements its\ncoordinate along the given axis.", "source": "codesearchnet"}
{"code": "def approximate_jacobian(f, variables, delta=0.1):\n\n    def var_jacobian(var):\n        \n        derivatives = tf.map_fn(lambda x: _five_point_stencil(f, var, x, delta), tf.range(tf.size(var)), fn_output_signature=tf.float32)\n        f_shape = tf.shape(derivatives)[1:]\n        transpose_perm = list(range(1, len(f_shape) + 1)) + [0]\n        transpose_derivatives = tf.transpose(derivatives, transpose_perm)\n        reshape_shape = tf.concat([f_shape, tf.shape(var)], 0)\n        return tf.reshape(transpose_derivatives, reshape_shape)\n    return tf.nest.map_structure(var_jacobian, variables)", "docstring": "Approximates the jacobian of f using five point stencil.\n\nSuppose the input function returns a tensor `r` under gradient tape `t`.  Then\nthis function returns an approximation to\n`t.jacobian(r, variables, unconnected_gradients=tf.UnconnectedGradients.ZERO)`\n\nArgs:\nf: Callable taking no arguments and returning a `tf.Tensor`.\nvariables: Possibly nested structure of `tf.Variable` in which to\ndifferentiate `f`.\ndelta: Size of the fundamental perturbation in the stencil.\n\nReturns:\nThe approximate jacobian.  Has the same structure as the return from a\ncorresponding call to `tf.GradientTape().jacobian`.", "source": "github-repos"}
{"code": "def parse_hpo_disease(hpo_line):\n    \n    hpo_line = hpo_line.rstrip().split('\\t')\n    hpo_info = {}\n    disease = hpo_line[0].split(':')\n    \n    hpo_info['source'] = disease[0]\n    hpo_info['disease_nr'] = int(disease[1])\n    hpo_info['hgnc_symbol'] = None\n    hpo_info['hpo_term'] = None\n    \n    if len(hpo_line) >= 3:\n        hpo_info['hgnc_symbol'] = hpo_line[2]\n\n        if len(hpo_line) >= 4:\n            hpo_info['hpo_term'] = hpo_line[3]\n    \n    \n    return hpo_info", "docstring": "Parse hpo disease line\n\nArgs:\nhpo_line(str)", "source": "juraj-google-style"}
{"code": "def is_subtype_of(self, other: 'TraceType') -> bool:", "docstring": "Returns True if `self` is a subtype of `other`.\n\nFor example, `tf.function` uses subtyping for dispatch:\nif `a.is_subtype_of(b)` is True, then an argument of `TraceType`\n`a` can be used as argument to a `ConcreteFunction` traced with an\na `TraceType` `b`.\n\nArgs:\nother: A TraceType object to be compared against.\n\nExample:\n\n```python\nclass Dimension(TraceType):\ndef __init__(self, value: Optional[int]):\nself.value = value\n\ndef is_subtype_of(self, other):\n# Either the value is the same or other has a generalized value that\n# can represent any specific ones.\nreturn (self.value == other.value) or (other.value is None)\n```", "source": "github-repos"}
{"code": "def SetCampaignTargetingCriteria(client, campaign):\n    campaign_criterion_service = client.GetService('CampaignCriterionService')\n    criteria = [{'xsi_type': 'Location', 'id': 21137}, {'xsi_type': 'Location', 'id': 2484}, {'xsi_type': 'Language', 'id': 1000}, {'xsi_type': 'Language', 'id': 1003}]\n    operations = [{'operator': 'ADD', 'operand': {'campaignId': campaign['id'], 'criterion': criterion}} for criterion in criteria]\n    response = campaign_criterion_service.mutate(operations)\n    if (response and ('value' in response)):\n        for criterion in response['value']:\n            print(('Campaign criteria of type \"%s\" and id \"%s\" was added.' % (criterion['criterion']['type'], criterion['criterion']['id'])))", "docstring": "Sets targeting criteria for the given campaign.\n\nArgs:\nclient: An AdWordsClient instance.\ncampaign: A suds object representing the campaign we wish to attach\ntargeting criteria.", "source": "codesearchnet"}
{"code": "def setLCD(self, password=\"00000000\"):\n        \n        result = False\n        self.setContext(\"setLCD\")\n        try:\n            self.clearCmdMsg()\n\n            if len(password) != 8:\n                self.writeCmdMsg(\"Invalid password length.\")\n                self.setContext(\"\")\n                return result\n\n            if not self.request():\n                self.writeCmdMsg(\"Bad read CRC on setting\")\n            else:\n                if not self.serialCmdPwdAuth(password):\n                    self.writeCmdMsg(\"Password failure\")\n                else:\n                    req_table = \"\"\n\n                    fill_len = 40 - len(self.m_lcd_items)\n\n                    for lcdid in self.m_lcd_items:\n                        append_val = binascii.hexlify(str(lcdid).zfill(2))\n                        req_table += append_val\n\n                    for i in range(0, fill_len):\n                        append_val = binascii.hexlify(str(0).zfill(2))\n                        req_table += append_val\n\n                    req_str = \"015731023030443228\" + req_table + \"2903\"\n                    req_str += self.calc_crc16(req_str[2:].decode(\"hex\"))\n                    self.m_serial_port.write(req_str.decode(\"hex\"))\n                    if self.m_serial_port.getResponse(self.getContext()).encode(\"hex\") == \"06\":\n                        self.writeCmdMsg(\"Success: 06 returned.\")\n                        result = True\n            self.serialPostEnd()\n        except:\n            ekm_log(traceback.format_exc(sys.exc_info()))\n\n        self.setContext(\"\")\n        return result", "docstring": "Serial call to set LCD using meter object bufer.\n\nUsed with :func:`~ekmmeters.V4Meter.addLcdItem`.\n\nArgs:\npassword (str): Optional password\n\nReturns:\nbool: True on completion and ACK.", "source": "juraj-google-style"}
{"code": "def API_Iterator(function, kwargs, results=None, limit=None):\n\n    class API_Iterator_Instance:\n        \n\n        def __init__(self, function, kwargs, results=None, limit=None):\n            self.function = function\n            self.kwargs = kwargs\n            self.limit = limit\n            self.results = results\n            self.position = 0\n            self.count = 0\n            self.iterable = None\n            self.__find_tag__()\n\n        def __find_tag__(self):\n            if self.results:\n                for tag in iter(self.results.keys()):\n                    if isinstance(self.results[tag], list):\n                        self.iterable = tag\n                        break\n                if self.iterable is None:\n                    print('WARNING API RETURNED NO KEYS WITH LISTS:', ', '.join(self.results.keys()))\n\n        def __iter__(self):\n            return self\n\n        def __next__(self):\n            return self.next()\n\n        def next(self):\n            if self.results is None:\n                self.results = API_Retry(self.function(**self.kwargs))\n                self.__find_tag__()\n            if self.iterable and self.position >= len(self.results[self.iterable]):\n                page_token = self.results.get('nextPageToken', None)\n                if page_token:\n                    if 'body' in self.kwargs:\n                        self.kwargs['body']['pageToken'] = page_token\n                    else:\n                        self.kwargs['pageToken'] = page_token\n                    self.results = API_Retry(self.function(**self.kwargs))\n                    self.position = 0\n                else:\n                    raise StopIteration\n            if self.iterable and self.position < len(self.results.get(self.iterable, [])):\n                value = self.results[self.iterable][self.position]\n                self.position += 1\n                if self.limit is not None:\n                    self.count += 1\n                    if self.count > self.limit:\n                        raise StopIteration\n                return value\n            else:\n                raise StopIteration\n    return iter(API_Iterator_Instance(function, kwargs, results, limit))", "docstring": "See below API_Iterator_Instance for documentaion, this is just an iter wrapper.\n\nReturns:\niter(API_Iterator_Instance(function, kwargs, results))", "source": "github-repos"}
{"code": "def handle_type_error(fn):\n\n    @wraps(fn)\n    def handle_type_error_wrapper(*args, **kwargs):\n\n        def any_match(string_list, obj):\n            return filter((lambda x: (x in obj)), string_list)\n        try:\n            return fn(*args, **kwargs)\n        except TypeError as e:\n            message = e.__str__()\n            str_list = ['takes exactly', 'got an unexpected', 'takes no argument']\n            if ((fn.__name__ in message) and any_match(str_list, message)):\n                raise HTTPError(400, message)\n            raise\n    return handle_type_error_wrapper", "docstring": "Convert ``TypeError`` to ``bottle.HTTPError`` with ``400`` code and message\nabout wrong parameters.\n\nRaises:\nHTTPError: 400 in case too many/too little function parameters were \\\ngiven.", "source": "codesearchnet"}
{"code": "def vapor_pressure(temp, hum):\n    \n\n    if np.isscalar(hum):\n        hum = np.zeros(temp.shape) + hum\n\n    assert(temp.shape == hum.shape)\n\n    positives = np.array(temp >= 273.15)\n    vap_press = np.zeros(temp.shape) * np.nan\n    vap_press[positives] = 6.112 * np.exp((17.62 * (temp[positives] - 273.15)) / (243.12 + (temp[positives] - 273.15))) * hum[positives] / 100.\n    vap_press[~positives] = 6.112 * np.exp((22.46 * (temp[~positives] - 273.15)) / (272.62  + (temp[~positives] - 273.15))) * hum[~positives] / 100.\n\n    return vap_press", "docstring": "Calculates vapor pressure from temperature and humidity after Sonntag (1990).\n\nArgs:\ntemp: temperature values\nhum: humidity value(s). Can be scalar (e.g. for calculating saturation vapor pressure).\n\nReturns:\nVapor pressure in hPa.", "source": "juraj-google-style"}
{"code": "def readline(self, size=None):\n        \n        data = EMPTY\n\n        if size == 0:\n            return data\n\n        while True:\n            if size and len(data) >= size:\n                return data\n\n            if not self.buffer:\n                self._fetch()\n                if not self.buffer:\n                    \n                    return data\n\n            newline_pos = self.buffer.find(LF)\n            if size:\n                if newline_pos == -1:\n                    remaining = size - len(data)\n                    data += self.buffer[:remaining]\n                    self.buffer = self.buffer[remaining:]\n                else:\n                    remaining = min(size - len(data), newline_pos)\n                    data += self.buffer[:remaining]\n                    self.buffer = self.buffer[remaining:]\n            else:\n                if newline_pos == -1:\n                    data += self.buffer\n                    self.buffer = EMPTY\n                else:\n                    data += self.buffer[:newline_pos]\n                    self.buffer = self.buffer[newline_pos:]", "docstring": "Read a single line from rfile buffer and return it.\n\nArgs:\nsize (int): minimum amount of data to read\n\nReturns:\nbytes: One line from rfile.", "source": "juraj-google-style"}
{"code": "def operations_happening_at_same_time_as(\n        self, scheduled_operation: ScheduledOperation\n    ) -> List[ScheduledOperation]:\n        \n        overlaps = self.query(\n            time=scheduled_operation.time,\n            duration=scheduled_operation.duration)\n        return [e for e in overlaps if e != scheduled_operation]", "docstring": "Finds operations happening at the same time as the given operation.\n\nArgs:\nscheduled_operation: The operation specifying the time to query.\n\nReturns:\nScheduled operations that overlap with the given operation.", "source": "juraj-google-style"}
{"code": "def resize(self, images: 'torch.Tensor', size: SizeDict, interpolation: Optional['F.InterpolationMode']=None, size_divisor: Optional[int]=None) -> 'torch.Tensor':\n    if interpolation is None:\n        interpolation = self.resample\n    shorter = size.shortest_edge\n    longer = int(MAX_LONGER_EDGE / MAX_SHORTER_EDGE * shorter)\n    heights = images.shape[-2]\n    widths = images.shape[-1]\n    if heights < widths:\n        new_heights = shorter\n        new_widths = widths * (shorter / heights)\n    else:\n        new_heights = heights * (shorter / widths)\n        new_widths = shorter\n    if max(new_heights, new_widths) > longer:\n        scale = longer / max(new_heights, new_widths)\n        new_heights = new_heights * scale\n        new_widths = new_widths * scale\n    new_heights = int(new_heights + 0.5)\n    new_widths = int(new_widths + 0.5)\n    if size_divisor is not None:\n        new_heights = new_heights \n        new_widths = new_widths \n    return F.resize(images, [new_heights, new_widths], interpolation=interpolation)", "docstring": "Resize an image or batch of images to specified size.\n\nArgs:\nimages (`torch.Tensor`): Image or batch of images to resize.\nsize (`Dict[str, int]`): Size dictionary with shortest_edge key.\ninterpolation (`F.InterpolationMode`, *optional*): Interpolation method to use.\nsize_divisor (`int`, *optional*): Value to ensure height/width are divisible by.\n\nReturns:\n`torch.Tensor`: Resized image or batch of images.", "source": "github-repos"}
{"code": "def assert_files_same(path1, path2):\n    \n    \n    difflines = compare_files(path1, path2)\n    assert len(difflines) == 0, ''.join(['\\n'] + difflines)", "docstring": "Asserts that two files are the same and returns delta using\n-, ?, + format if not\n\nArgs:\npath1 (str): Path to first file\npath2 (str): Path to second file\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def add_done_callback(self, fn):\n    if self._result_set:\n        _helpers.safe_invoke_callback(fn, self)\n        return\n    self._done_callbacks.append(fn)\n    if (self._polling_thread is None):\n        self._polling_thread = _helpers.start_daemon_thread(target=self._blocking_poll)", "docstring": "Add a callback to be executed when the operation is complete.\n\nIf the operation is not already complete, this will start a helper\nthread to poll for the status of the operation in the background.\n\nArgs:\nfn (Callable[Future]): The callback to execute when the operation\nis complete.", "source": "codesearchnet"}
{"code": "def check_partition_column(partition_column, cols):\n    \n    for k, v in cols.items():\n        if k == partition_column:\n            if v == \"int\":\n                return\n            else:\n                raise InvalidPartitionColumn(\n                    \"partition_column must be int, and not {0}\".format(v)\n                )\n    raise InvalidPartitionColumn(\n        \"partition_column {0} not found in the query\".format(partition_column)\n    )", "docstring": "Check partition_column existence and type\n\nArgs:\npartition_column: partition_column name\ncols: dict with columns names and python types\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def setRelay(self, seconds, relay, status, password='00000000'):\n    result = False\n    self.setContext('setRelay')\n    try:\n        self.clearCmdMsg()\n        if (len(password) != 8):\n            self.writeCmdMsg('Invalid password length.')\n            self.setContext('')\n            return result\n        if ((seconds < 0) or (seconds > 9999)):\n            self.writeCmdMsg('Relay duration must be between 0 and 9999.')\n            self.setContext('')\n            return result\n        if (not self.requestA()):\n            self.writeCmdMsg('Bad read CRC on setting')\n        elif (not self.serialCmdPwdAuth(password)):\n            self.writeCmdMsg('Password failure')\n        else:\n            req_str = ''\n            req_str = ((((('01573102303038' + binascii.hexlify(str(relay)).zfill(2)) + '28') + binascii.hexlify(str(status)).zfill(2)) + binascii.hexlify(str(seconds).zfill(4))) + '2903')\n            req_str += self.calc_crc16(req_str[2:].decode('hex'))\n            self.m_serial_port.write(req_str.decode('hex'))\n            if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'):\n                self.writeCmdMsg('Success: 06 returned.')\n                result = True\n        self.serialPostEnd()\n    except:\n        ekm_log(traceback.format_exc(sys.exc_info()))\n    self.setContext('')\n    return result", "docstring": "Serial call to set relay.\n\nArgs:\nseconds (int): Seconds to hold, ero is hold forever. See :class:`~ekmmeters.RelayInterval`.\nrelay (int): Selected relay, see :class:`~ekmmeters.Relay`.\nstatus (int): Status to set, see :class:`~ekmmeters.RelayState`\npassword (str): Optional password\n\nReturns:\nbool: True on completion and ACK.", "source": "codesearchnet"}
{"code": "def app_uninstall(self, package_name, keep_data=False):\n        \n        if keep_data:\n            return self.run_cmd('uninstall', '-k', package_name)\n        else:\n            return self.run_cmd('uninstall', package_name)", "docstring": "Uninstall package\n\nArgs:\n- package_name(string): package name ex: com.example.demo\n- keep_data(bool): keep the data and cache directories", "source": "juraj-google-style"}
{"code": "def _delete_example(self, request):\n    index = int(request.args.get('index'))\n    if (index >= len(self.examples)):\n        return http_util.Respond(request, {'error': 'invalid index provided'}, 'application/json', code=400)\n    del self.examples[index]\n    self.updated_example_indices = set([(i if (i < index) else (i - 1)) for i in self.updated_example_indices])\n    self.generate_sprite([ex.SerializeToString() for ex in self.examples])\n    return http_util.Respond(request, {}, 'application/json')", "docstring": "Deletes the specified example.\n\nArgs:\nrequest: A request that should contain 'index'.\n\nReturns:\nAn empty response.", "source": "codesearchnet"}
{"code": "def get_line_count(fname):\n    i = 0\n    with open(fname) as f:\n        for (i, l) in enumerate(f):\n            pass\n    return (i + 1)", "docstring": "Counts the number of lines in a file.\n\nArgs:\nfname: string, name of the file.\n\nReturns:\ninteger, the number of lines in the file.", "source": "codesearchnet"}
{"code": "def _init_vocab_from_list(self, vocab_list):\n\n    def token_gen():\n        for token in vocab_list:\n            if (token not in RESERVED_TOKENS):\n                (yield token)\n    self._init_vocab(token_gen())", "docstring": "Initialize tokens from a list of tokens.\n\nIt is ok if reserved tokens appear in the vocab list. They will be\nremoved. The set of tokens in vocab_list should be unique.\n\nArgs:\nvocab_list: A list of tokens.", "source": "codesearchnet"}
{"code": "def from_json_file(cls, file_name):\n        \n        with open(file_name) as json_data:\n            config = json.load(json_data)\n\n        return cls(config)", "docstring": "Construct OneViewClient using a json file.\n\nArgs:\nfile_name: json full path.\n\nReturns:\nOneViewClient:", "source": "juraj-google-style"}
{"code": "def LocaltimeToUTC(cls, timestamp, timezone, is_dst=False):\n    if (timezone and (timezone != pytz.UTC)):\n        datetime_object = (datetime.datetime(1970, 1, 1, 0, 0, 0, 0, tzinfo=None) + datetime.timedelta(microseconds=timestamp))\n        datetime_delta = timezone.utcoffset(datetime_object, is_dst=is_dst)\n        seconds_delta = int(datetime_delta.total_seconds())\n        timestamp -= (seconds_delta * definitions.MICROSECONDS_PER_SECOND)\n    return timestamp", "docstring": "Converts the timestamp in localtime of the timezone to UTC.\n\nArgs:\ntimestamp: The timestamp which is an integer containing the number\nof micro seconds since January 1, 1970, 00:00:00 UTC.\ntimezone: The timezone (pytz.timezone) object.\nis_dst: A boolean to indicate the timestamp is corrected for daylight\nsavings time (DST) only used for the DST transition period.\n\nReturns:\nThe timestamp which is an integer containing the number of micro seconds\nsince January 1, 1970, 00:00:00 UTC or 0 on error.", "source": "codesearchnet"}
{"code": "def determine_drift(self):\n        \n        try:\n            response = self._cloud_formation.detect_stack_drift(StackName=self._stack_name)\n            drift_request_id = response.get('StackDriftDetectionId', None)\n            if drift_request_id:\n                logging.info('drift_request_id: %s - polling', drift_request_id)\n                drift_calc_done = False\n                while not drift_calc_done:\n                    time.sleep(self.nap_time)\n                    response = self._cloud_formation.describe_stack_drift_detection_status(\n                        StackDriftDetectionId=drift_request_id\n                    )\n                    current_state = response.get('DetectionStatus', None)\n                    logging.info(\n                        'describe_stack_drift_detection_status(): {}'.format(current_state)\n                    )\n                    drift_calc_done = current_state in CALC_DONE_STATES\n                    drift_answer = response.get('StackDriftStatus', 'UNKNOWN')\n\n                logging.info('drift of {}: {}'.format(\n                    self._stack_name,\n                    drift_answer\n                ))\n\n                if drift_answer == 'DRIFTED':\n                    if self._verbose:\n                        self._print_drift_report()\n                    return False\n                else:\n                    return True\n            else:\n                logging.warning('drift_request_id is None')\n\n            return False\n        except Exception as wtf:\n            logging.error(wtf, exc_info=True)\n            return False", "docstring": "Determine the drift of the stack.\n\nArgs:\nNone\n\nReturns:\nGood or Bad; True or False", "source": "juraj-google-style"}
{"code": "def indexSearch(self, indexes):\n    if (not self._dataFrame.empty):\n        filter0 = (self._dataFrame.index == (- 9999))\n        for index in indexes:\n            filter1 = (self._dataFrame.index == index)\n            filter0 = np.logical_or(filter0, filter1)\n        return filter0\n    else:\n        return []", "docstring": "Filters the data by a list of indexes.\n\nArgs:\nindexes (list of int): List of index numbers to return.\n\nReturns:\nlist: A list containing all indexes with filtered data. Matches\nwill be `True`, the remaining items will be `False`. If the\ndataFrame is empty, an empty list will be returned.", "source": "codesearchnet"}
{"code": "def get_object(self, object_ids):\n    for object_id in object_ids:\n        if (not isinstance(object_id, ObjectID)):\n            raise TypeError('Attempting to call `get` on the value {}, which is not an ray.ObjectID.'.format(object_id))\n    plain_object_ids = [plasma.ObjectID(object_id.binary()) for object_id in object_ids]\n    for i in range(0, len(object_ids), ray._config.worker_fetch_request_size()):\n        self.raylet_client.fetch_or_reconstruct(object_ids[i:(i + ray._config.worker_fetch_request_size())], True)\n    final_results = self.retrieve_and_deserialize(plain_object_ids, 0)\n    unready_ids = {plain_object_ids[i].binary(): i for (i, val) in enumerate(final_results) if (val is plasma.ObjectNotAvailable)}\n    if (len(unready_ids) > 0):\n        while (len(unready_ids) > 0):\n            object_ids_to_fetch = [plasma.ObjectID(unready_id) for unready_id in unready_ids.keys()]\n            ray_object_ids_to_fetch = [ObjectID(unready_id) for unready_id in unready_ids.keys()]\n            fetch_request_size = ray._config.worker_fetch_request_size()\n            for i in range(0, len(object_ids_to_fetch), fetch_request_size):\n                self.raylet_client.fetch_or_reconstruct(ray_object_ids_to_fetch[i:(i + fetch_request_size)], False, self.current_task_id)\n            results = self.retrieve_and_deserialize(object_ids_to_fetch, max([ray._config.get_timeout_milliseconds(), int((0.01 * len(unready_ids)))]))\n            for (i, val) in enumerate(results):\n                if (val is not plasma.ObjectNotAvailable):\n                    object_id = object_ids_to_fetch[i].binary()\n                    index = unready_ids[object_id]\n                    final_results[index] = val\n                    unready_ids.pop(object_id)\n        self.raylet_client.notify_unblocked(self.current_task_id)\n    assert (len(final_results) == len(object_ids))\n    return final_results", "docstring": "Get the value or values in the object store associated with the IDs.\n\nReturn the values from the local object store for object_ids. This will\nblock until all the values for object_ids have been written to the\nlocal object store.\n\nArgs:\nobject_ids (List[object_id.ObjectID]): A list of the object IDs\nwhose values should be retrieved.", "source": "codesearchnet"}
{"code": "def _read(self, entry):\n    \n    start_time = time.time()\n    content = self._zip.read(entry.filename)\n\n    ctx = context.get()\n    if ctx:\n      operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx)\n      operation.counters.Increment(\n          COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)\n\n    return content", "docstring": "Read entry content.\n\nArgs:\nentry: zip file entry as zipfile.ZipInfo.\nReturns:\nEntry content as string.", "source": "juraj-google-style"}
{"code": "def forward(self, permuted_tokens, tokens_per_expert):\n    fc1_output = self.fc1(permuted_tokens, tokens_per_expert)\n    projection, gate = torch.chunk(fc1_output, 2, dim=-1)\n    fc1_output = nn.functional.silu(projection) * gate\n    fc2_output = self.fc2(fc1_output, tokens_per_expert)\n    return fc2_output", "docstring": "Forward pass of the Grouped MLP.\n\nArgs:\npermuted_tokens (torch.Tensor): Permuted input tokens.\ntokens_per_expert (torch.Tensor): Number of tokens assigned to each expert.\n\nReturns:\ntorch.Tensor: Output tensor after passing through the MLP.", "source": "github-repos"}
{"code": "def __init__(self,\n                 property_type=TableFeaturePropType.OFPTFPT_WRITE_ACTIONS,\n                 action_ids=None):\n        \n        super().__init__(property_type)\n        self.action_ids = action_ids if action_ids else ListOfActions()\n        self.update_length()", "docstring": "Create a ActionsProperty with the optional parameters below.\n\nArgs:\ntype(|TableFeaturePropType_v0x04|):\nProperty Type value of this instance.\naction_ids(|ListOfActions_v0x04|):\nList of Action instances.", "source": "juraj-google-style"}
{"code": "def are_symmetrically_related(self, point_a, point_b, tol=0.001):\n        \n        if np.allclose(self.operate(point_a), point_b, atol=tol):\n            return True\n        if np.allclose(self.operate(point_b), point_a, atol=tol):\n            return True\n        return False", "docstring": "Checks if two points are symmetrically related.\n\nArgs:\npoint_a (3x1 array): First point.\npoint_b (3x1 array): Second point.\ntol (float): Absolute tolerance for checking distance.\n\nReturns:\nTrue if self.operate(point_a) == point_b or vice versa.", "source": "juraj-google-style"}
{"code": "def master(self, task_type=None, task_id=None, rpc_layer=None):\n    session_master = _get_value_in_tfconfig(_SESSION_MASTER_KEY, self._port)\n    if session_master is not None:\n        return session_master\n    cluster_spec = self.cluster_spec()\n    if not cluster_spec.jobs or (len(cluster_spec.jobs) == 1 and len(cluster_spec.job_tasks(cluster_spec.jobs[0])) == 1):\n        return ''\n    task_type = task_type if task_type is not None else self.task_type\n    task_id = task_id if task_id is not None else self.task_id\n    rpc_layer = rpc_layer if rpc_layer is not None else self.rpc_layer\n    return format_master_url(cluster_spec.task_address(task_type, task_id), rpc_layer)", "docstring": "Returns the master address to use when creating a TensorFlow session.\n\nNote: this is only useful for TensorFlow 1.x.\n\nArgs:\ntask_type: (String, optional) Overrides and sets the task_type of the\nmaster.\ntask_id: (Integer, optional) Overrides and sets the task id of the master.\nrpc_layer: (String, optional) Overrides and sets the protocol over which\nTensorFlow nodes communicate with each other.\n\nReturns:\nThe address of the master.\n\nRaises:\nRuntimeError: If the task_type or task_id is not specified and the\nSageMaker environment variables does not contain a task section.", "source": "github-repos"}
{"code": "def _calc_min_size(self, conv_layers):\n    \n    input_size = 1\n\n    for _, conv_params, max_pooling in reversed(conv_layers):\n      if max_pooling is not None:\n        kernel_size, stride = max_pooling\n        input_size = input_size * stride + (kernel_size - stride)\n\n      if conv_params is not None:\n        kernel_size, stride = conv_params\n        input_size = input_size * stride + (kernel_size - stride)\n\n    return input_size", "docstring": "Calculates the minimum size of the input layer.\n\nGiven a set of convolutional layers, calculate the minimum value of\nthe `input_height` and `input_width`, i.e. such that the output has\nsize 1x1. Assumes snt.VALID padding.\n\nArgs:\nconv_layers: List of tuples `(output_channels, (kernel_size, stride),\n(pooling_size, pooling_stride))`\n\nReturns:\nMinimum value of input height and width.", "source": "juraj-google-style"}
{"code": "def insort_event_right(self, event, lo=0, hi=None):\n    \n\n    if lo < 0:\n      raise ValueError('lo must be non-negative')\n    if hi is None:\n      hi = len(self.queue)\n    while lo < hi:\n      mid = (lo + hi) \n      if event[0] < self.queue[mid][0]:\n        hi = mid\n      else:\n        lo = mid + 1\n    self.queue.insert(lo, event)", "docstring": "Insert event in queue, and keep it sorted assuming queue is sorted.\n\nIf event is already in queue, insert it to the right of the rightmost\nevent (to keep FIFO order).\n\nOptional args lo (default 0) and hi (default len(a)) bound the\nslice of a to be searched.\n\nArgs:\nevent: a (time in sec since unix epoch, callback, args, kwds) tuple.", "source": "juraj-google-style"}
{"code": "def update_hash(a_hash, mv):\n    if mv.labels:\n        signing.add_dict_to_hash(a_hash, encoding.MessageToPyValue(mv.labels))\n    money_value = mv.get_assigned_value(u'moneyValue')\n    if (money_value is not None):\n        a_hash.update(b'\\x00')\n        a_hash.update(money_value.currencyCode.encode('utf-8'))", "docstring": "Adds ``mv`` to ``a_hash``\n\nArgs:\na_hash (`Hash`): the secure hash, e.g created by hashlib.md5\nmv (:class:`MetricValue`): the instance to add to the hash", "source": "codesearchnet"}
{"code": "def getWeights(self, term_i=None):\n        \n        assert self.init, 'GP not initialised'\n        if term_i==None:\n            if self.gp.mean.n_terms==1:\n                term_i = 0\n            else:\n                print('VarianceDecomposition: Specify fixed effect term index')\n        return self.gp.mean.B[term_i]", "docstring": "Return weights for fixed effect term term_i\n\nArgs:\nterm_i:     fixed effect term index\nReturns:\nweights of the spefied fixed effect term.\nThe output will be a KxL matrix of weights will be returned,\nwhere K is F.shape[1] and L is A.shape[1] of the correspoding fixed effect term\n(L will be always 1 for single-trait analysis).", "source": "juraj-google-style"}
{"code": "def CopyFromStringTuple(self, time_elements_tuple):\n    \n    if len(time_elements_tuple) < 6:\n      raise ValueError((\n          'Invalid time elements tuple at least 6 elements required,'\n          'got: {0:d}').format(len(time_elements_tuple)))\n\n    try:\n      year = int(time_elements_tuple[0], 10)\n    except (TypeError, ValueError):\n      raise ValueError('Invalid year value: {0!s}'.format(\n          time_elements_tuple[0]))\n\n    try:\n      month = int(time_elements_tuple[1], 10)\n    except (TypeError, ValueError):\n      raise ValueError('Invalid month value: {0!s}'.format(\n          time_elements_tuple[1]))\n\n    try:\n      day_of_month = int(time_elements_tuple[2], 10)\n    except (TypeError, ValueError):\n      raise ValueError('Invalid day of month value: {0!s}'.format(\n          time_elements_tuple[2]))\n\n    try:\n      hours = int(time_elements_tuple[3], 10)\n    except (TypeError, ValueError):\n      raise ValueError('Invalid hours value: {0!s}'.format(\n          time_elements_tuple[3]))\n\n    try:\n      minutes = int(time_elements_tuple[4], 10)\n    except (TypeError, ValueError):\n      raise ValueError('Invalid minutes value: {0!s}'.format(\n          time_elements_tuple[4]))\n\n    try:\n      seconds = int(time_elements_tuple[5], 10)\n    except (TypeError, ValueError):\n      raise ValueError('Invalid seconds value: {0!s}'.format(\n          time_elements_tuple[5]))\n\n    self._normalized_timestamp = None\n    self._number_of_seconds = self._GetNumberOfSecondsFromElements(\n        year, month, day_of_month, hours, minutes, seconds)\n    self._time_elements_tuple = (\n        year, month, day_of_month, hours, minutes, seconds)", "docstring": "Copies time elements from string-based time elements tuple.\n\nArgs:\ntime_elements_tuple (Optional[tuple[str, str, str, str, str, str]]):\ntime elements, contains year, month, day of month, hours, minutes and\nseconds.\n\nRaises:\nValueError: if the time elements tuple is invalid.", "source": "juraj-google-style"}
{"code": "def honeycomb_lattice( a, b, spacing, alternating_sites=False ):\n    \n    if alternating_sites:\n        site_labels = [ 'A', 'B', 'A', 'B' ]\n    else:\n        site_labels = [ 'L', 'L', 'L', 'L' ]\n    unit_cell_lengths = np.array( [ sqrt(3), 3.0, 0.0 ] ) * spacing\n    cell_lengths = unit_cell_lengths * np.array( [ a, b, 1.0 ] )\n    grid = np.array( list( range( 1, int( a * b * 4 + 1 ) ) ) ).reshape( a, b, 4, order='C' )\n    sites = []\n    for i in range( a ):\n        for j in range( b ):\n            \n            r = np.array( [ i * sqrt(3) * spacing, j * 3 * spacing, 0.0 ] )\n            neighbours = [ grid[ i, j, 1 ],\n                           np.roll( grid, +1, axis=0 )[ i, j, 1 ],\n                           np.roll( grid, +1, axis=1 )[ i, j, 3 ] ]\n            sites.append( lattice_site.Site( grid[ i, j, 0 ], r, neighbours, 0.0, site_labels[0] ) )\n            \n            r = np.array( [ i * sqrt(3) * spacing + sqrt(3)/2 * spacing, ( j * 3 + 0.5 ) * spacing, 0.0 ] )\n            neighbours = [ grid[ i, j, 0 ], \n                           grid[ i, j, 2 ], \n                           np.roll( grid, -1, axis=0 )[ i, j, 0 ] ]\n            sites.append( lattice_site.Site( grid[ i, j, 1 ], r, neighbours, 0.0, site_labels[1] ) )\n            \n            r = np.array( [ i * sqrt(3) * spacing + sqrt(3)/2 * spacing, ( j * 3 + 1.5 ) * spacing, 0.0 ] )\n            neighbours = [ grid[ i, j, 1 ],\n                           grid[ i, j, 3 ],\n                           np.roll( grid, -1, axis=0 )[ i, j, 3 ] ]\n            sites.append( lattice_site.Site( grid[ i, j, 2 ], r, neighbours, 0.0, site_labels[2] ) )\n            \n            r = np.array( [ i * sqrt(3) * spacing, ( j * 3 + 2 ) * spacing, 0.0 ] )\n            neighbours = [ grid[ i, j, 2 ], \n                           np.roll( grid, +1, axis=0 )[ i, j, 2 ],\n                           np.roll( grid, -1, axis=1 )[ i, j, 0 ] ]\n            sites.append( lattice_site.Site( grid[ i, j, 3 ], r, neighbours, 0.0, site_labels[3] ) )\n    return lattice.Lattice( sites, cell_lengths=cell_lengths )", "docstring": "Generate a honeycomb lattice.\n\nArgs:\na (Int):         Number of lattice repeat units along x.\nb (Int):         Number of lattice repeat units along y.\nspacing (Float): Distance between lattice sites.\nalternating_sites (Bool, optional): Label alternating sites with 'A' and 'B'. Defaults to False.\n\nReturns:\n(Lattice): The new lattice\n\nNotes:\nThe returned lattice is 3D periodic, but all sites and edges lie in the xy plane.", "source": "juraj-google-style"}
{"code": "def __init__(self, parameter_name=None):\n    self._parameter_name = parameter_name", "docstring": "Construct a parameter modifier that may be specific to a parameter.\n\nArgs:\nparameter_name:  A `ParameterModifier` instance may operate on a class of\nparameters or on a parameter with a particular name.  Only\n`ParameterModifier` instances that are of a unique type or were\ninitialized with a unique `parameter_name` will be executed.\nSee `__eq__` and `__hash__`.", "source": "github-repos"}
{"code": "def __init__(self, output_mediator):\n    \n    super(DynamicOutputModule, self).__init__(output_mediator)\n    self._dynamic_fields_helper = DynamicFieldsHelper(output_mediator)\n    self._field_delimiter = self._DEFAULT_FIELD_DELIMITER\n    self._fields = self._DEFAULT_FIELDS", "docstring": "Initializes an output module object.\n\nArgs:\noutput_mediator (OutputMediator): an output mediator.", "source": "juraj-google-style"}
{"code": "def gaussian_deriv(duration: int, amp: complex, sigma: float, name: str=None) -> SamplePulse:\n    center = (duration / 2)\n    return _sampled_gaussian_deriv_pulse(duration, amp, center, sigma, name=name)", "docstring": "r\"\"\"Generates unnormalized gaussian derivative `SamplePulse`.\n\nApplies `left` sampling strategy to generate discrete pulse from continuous function.\n\nArgs:\nduration: Duration of pulse. Must be greater than zero.\namp: Pulse amplitude at `center`.\nsigma: Width (standard deviation) of pulse.\nname: Name of pulse.", "source": "codesearchnet"}
{"code": "async def set_notification_level(self, level):\n        \n        await self._client.set_conversation_notification_level(\n            hangouts_pb2.SetConversationNotificationLevelRequest(\n                request_header=self._client.get_request_header(),\n                conversation_id=hangouts_pb2.ConversationId(id=self.id_),\n                level=level,\n            )\n        )", "docstring": "Set the notification level of this conversation.\n\nArgs:\nlevel: ``NOTIFICATION_LEVEL_QUIET`` to disable notifications, or\n``NOTIFICATION_LEVEL_RING`` to enable them.\n\nRaises:\n.NetworkError: If the request fails.", "source": "juraj-google-style"}
{"code": "def jitChol(A, maxTries=10, warning=True):\n    jitter = 0\n    i = 0\n    while True:\n        try:\n            if (jitter == 0):\n                jitter = ((abs(SP.trace(A)) / A.shape[0]) * 1e-06)\n                LC = linalg.cholesky(A, lower=True)\n                return (LC.T, 0.0)\n            else:\n                if warning:\n                    logging.error(('Adding jitter of %f in jitChol().' % jitter))\n                LC = linalg.cholesky((A + (jitter * SP.eye(A.shape[0]))), lower=True)\n                return (LC.T, jitter)\n        except linalg.LinAlgError:\n            if (i < maxTries):\n                jitter = (jitter * 10)\n            else:\n                raise linalg.LinAlgError((((('Matrix non positive definite, jitter of ' + str(jitter)) + ' added but failed after ') + str(i)) + ' trials.'))\n        i += 1\n    return LC", "docstring": "Do a Cholesky decomposition with jitter.\n\nDescription:\n\n\nU, jitter = jitChol(A, maxTries, warning) attempts a Cholesky\ndecomposition on the given matrix, if matrix isn't positive\ndefinite the function adds 'jitter' and tries again. Thereafter\nthe amount of jitter is multiplied by 10 each time it is added\nagain. This is continued for a maximum of 10 times.  The amount of\njitter added is returned.\nReturns:\nU - the Cholesky decomposition for the matrix.\njitter - the amount of jitter that was added to the matrix.\nArguments:\nA - the matrix for which the Cholesky decomposition is required.\nmaxTries - the maximum number of times that jitter is added before\ngiving up (default 10).\nwarning - whether to give a warning for adding jitter (default is True)\n\nSee also\nCHOL, PDINV, LOGDET\n\n\nCopyright (c) 2005, 2006 Neil D. Lawrence", "source": "codesearchnet"}
{"code": "def set_data(self, data, from_db=False):\n        \n        self._load_data(data, from_db)\n        return self", "docstring": "Fills the object's fields with given data dict.\nInternally calls the self._load_data() method.\n\nArgs:\ndata (dict): Data to fill object's fields.\nfrom_db (bool): if data coming from db then we will\nuse related field type's _load_data method\n\nReturns:\nSelf. Returns objects itself for chainability.", "source": "juraj-google-style"}
{"code": "def make_lines_texture(num_lines=10, resolution=50):\n    (x, y) = np.meshgrid(np.hstack([np.linspace(0, 1, resolution), np.nan]), np.linspace(0, 1, num_lines))\n    y[np.isnan(x)] = np.nan\n    return (x.flatten(), y.flatten())", "docstring": "Makes a texture consisting of a given number of horizontal lines.\n\nArgs:\nnum_lines (int): the number of lines to draw\nresolution (int): the number of midpoints on each line\n\nReturns:\nA texture.", "source": "codesearchnet"}
{"code": "def unsubscribe(self, topic):\n        \n\n        del self.queues[topic]\n\n        try:\n            self.client.unsubscribe(topic)\n        except operationError as exc:\n            raise InternalError(\"Could not unsubscribe from topic\", topic=topic, message=exc.message)", "docstring": "Unsubscribe from messages on a given topic\n\nArgs:\ntopic (string): The MQTT topic to unsubscribe from", "source": "juraj-google-style"}
{"code": "def _CheckIsPipe(self, file_entry):\n    if (definitions.FILE_ENTRY_TYPE_PIPE not in self._file_entry_types):\n        return False\n    return file_entry.IsPipe()", "docstring": "Checks the is_pipe find specification.\n\nArgs:\nfile_entry (FileEntry): file entry.\n\nReturns:\nbool: True if the file entry matches the find specification, False if not.", "source": "codesearchnet"}
{"code": "def metrics(self):\n    raise NotImplementedError()", "docstring": "Returns :class:`~apache_beam.metrics.metric.MetricResults` object to\nquery metrics from the runner.\n\nRaises:\nNotImplementedError: If the runner does not support this\noperation.", "source": "github-repos"}
{"code": "def main():\n    pip_package_dependencies = subprocess.check_output(['bazel', 'cquery', '--experimental_cc_shared_library', PIP_PACKAGE_QUERY_EXPRESSION])\n    if isinstance(pip_package_dependencies, bytes):\n        pip_package_dependencies = pip_package_dependencies.decode('utf-8')\n    pip_package_dependencies_list = pip_package_dependencies.strip().split('\\n')\n    pip_package_dependencies_list = [x.split()[0] for x in pip_package_dependencies_list]\n    print('Pip package superset size: %d' % len(pip_package_dependencies_list))\n    tf_py_test_dependencies = subprocess.check_output(['bazel', 'cquery', '--experimental_cc_shared_library', PY_TEST_QUERY_EXPRESSION])\n    if isinstance(tf_py_test_dependencies, bytes):\n        tf_py_test_dependencies = tf_py_test_dependencies.decode('utf-8')\n    tf_py_test_dependencies_list = tf_py_test_dependencies.strip().split('\\n')\n    tf_py_test_dependencies_list = [x.split()[0] for x in tf_py_test_dependencies.strip().split('\\n')]\n    print('Pytest dependency subset size: %d' % len(tf_py_test_dependencies_list))\n    missing_dependencies = []\n    ignore_extensions = ['_test', '_test.py', '_test_cpu', '_test_cpu.py', '_test_gpu', '_test_gpu.py', '_test_lib']\n    ignored_files_count = 0\n    denylisted_dependencies_count = len(DEPENDENCY_DENYLIST)\n    for dependency in tf_py_test_dependencies_list:\n        if dependency and dependency.startswith('\n            ignore = False\n            if any((dependency.endswith(ext) for ext in ignore_extensions)):\n                ignore = True\n                ignored_files_count += 1\n            if not (ignore or dependency in pip_package_dependencies_list or dependency in DEPENDENCY_DENYLIST):\n                missing_dependencies.append(dependency)\n    print('Ignored files count: %d' % ignored_files_count)\n    print('Denylisted dependencies count: %d' % denylisted_dependencies_count)\n    if missing_dependencies:\n        print('Missing the following dependencies from pip_packages:')\n        for missing_dependency in missing_dependencies:\n            print('\\nMissing dependency: %s ' % missing_dependency)\n            print('Affected Tests:')\n            rdep_query = 'rdeps(kind(py_test, %s), %s)' % (' + '.join(PYTHON_TARGETS), missing_dependency)\n            affected_tests = subprocess.check_output(['bazel', 'cquery', '--experimental_cc_shared_library', rdep_query])\n            affected_tests_list = affected_tests.split('\\n')[:-2]\n            print('\\n'.join(affected_tests_list))\n        raise RuntimeError('\\n    One or more added test dependencies are not in the pip package.\\nIf these test dependencies need to be in TensorFlow pip package, please add them to \n    else:\n        print('TEST PASSED')", "docstring": "This script runs the pip smoke test.\n\nRaises:\nRuntimeError: If any dependencies for py_tests exist in subSet\n\nPrerequisites:\n1. Bazel is installed.\n2. Running in github repo of tensorflow.\n3. Configure has been run.", "source": "github-repos"}
{"code": "def encode_value(value):\n    if (value is None):\n        return document_pb2.Value(null_value=struct_pb2.NULL_VALUE)\n    if isinstance(value, bool):\n        return document_pb2.Value(boolean_value=value)\n    if isinstance(value, six.integer_types):\n        return document_pb2.Value(integer_value=value)\n    if isinstance(value, float):\n        return document_pb2.Value(double_value=value)\n    if isinstance(value, DatetimeWithNanoseconds):\n        return document_pb2.Value(timestamp_value=value.timestamp_pb())\n    if isinstance(value, datetime.datetime):\n        return document_pb2.Value(timestamp_value=_datetime_to_pb_timestamp(value))\n    if isinstance(value, six.text_type):\n        return document_pb2.Value(string_value=value)\n    if isinstance(value, six.binary_type):\n        return document_pb2.Value(bytes_value=value)\n    document_path = getattr(value, '_document_path', None)\n    if (document_path is not None):\n        return document_pb2.Value(reference_value=document_path)\n    if isinstance(value, GeoPoint):\n        return document_pb2.Value(geo_point_value=value.to_protobuf())\n    if isinstance(value, list):\n        value_list = [encode_value(element) for element in value]\n        value_pb = document_pb2.ArrayValue(values=value_list)\n        return document_pb2.Value(array_value=value_pb)\n    if isinstance(value, dict):\n        value_dict = encode_dict(value)\n        value_pb = document_pb2.MapValue(fields=value_dict)\n        return document_pb2.Value(map_value=value_pb)\n    raise TypeError('Cannot convert to a Firestore Value', value, 'Invalid type', type(value))", "docstring": "Converts a native Python value into a Firestore protobuf ``Value``.\n\nArgs:\nvalue (Union[NoneType, bool, int, float, datetime.datetime, \\\nstr, bytes, dict, ~google.cloud.Firestore.GeoPoint]): A native\nPython value to convert to a protobuf field.\n\nReturns:\n~google.cloud.firestore_v1beta1.types.Value: A\nvalue encoded as a Firestore protobuf.\n\nRaises:\nTypeError: If the ``value`` is not one of the accepted types.", "source": "codesearchnet"}
{"code": "def _restore_training_state(self, restore_state):\n    self.load_state_dict(restore_state['model'])\n    self.optimizer.load_state_dict(restore_state['optimizer'])\n    self.lr_scheduler.load_state_dict(restore_state['lr_scheduler'])\n    start_iteration = (restore_state['iteration'] + 1)\n    if self.config['verbose']:\n        print(f'Restored checkpoint to iteration {start_iteration}.')\n    if restore_state['best_model_found']:\n        self.checkpointer.best_model_found = True\n        self.checkpointer.best_iteration = restore_state['best_iteration']\n        self.checkpointer.best_score = restore_state['best_score']\n        if self.config['verbose']:\n            print(f'Updated checkpointer: best_score={self.checkpointer.best_score:.3f}, best_iteration={self.checkpointer.best_iteration}')\n    return start_iteration", "docstring": "Restores the model and optimizer states\n\nThis helper function restores the model's state to a given iteration so\nthat a user can resume training at any epoch.\n\nArgs:\nrestore_state: a state_dict dictionary", "source": "codesearchnet"}
{"code": "def load(self):\n    projects = {}\n    path = os.path.expanduser(self.path)\n    if (not os.path.isdir(path)):\n        return projects\n    logger.debug('Load project configs from %s', path)\n    for filename in os.listdir(path):\n        filename_parts = os.path.splitext(filename)\n        if (filename_parts[1][1:] != PROJECT_CONFIG_EXTENSION):\n            continue\n        name = filename_parts[0]\n        try:\n            project_file_path = os.path.join(path, filename)\n            with open(project_file_path) as f:\n                data = yaml.load(f)\n            projects[name] = data\n        except ValueError:\n            continue\n        logger.debug(\"Project '{}' config readed from {}\".format(name, project_file_path))\n    return projects", "docstring": "Load the projects config data from local path\n\nReturns:\nDict: project_name -> project_data", "source": "codesearchnet"}
{"code": "def wrap_deepmind(env, dim=84, framestack=True):\n    env = MonitorEnv(env)\n    env = NoopResetEnv(env, noop_max=30)\n    if ('NoFrameskip' in env.spec.id):\n        env = MaxAndSkipEnv(env, skip=4)\n    env = EpisodicLifeEnv(env)\n    if ('FIRE' in env.unwrapped.get_action_meanings()):\n        env = FireResetEnv(env)\n    env = WarpFrame(env, dim)\n    if framestack:\n        env = FrameStack(env, 4)\n    return env", "docstring": "Configure environment for DeepMind-style Atari.\n\nNote that we assume reward clipping is done outside the wrapper.\n\nArgs:\ndim (int): Dimension to resize observations to (dim x dim).\nframestack (bool): Whether to framestack observations.", "source": "codesearchnet"}
{"code": "def read(self, vals):\n        \n        i = 0\n        if len(vals[i]) == 0:\n            self.leapyear_observed = None\n        else:\n            self.leapyear_observed = vals[i]\n        i += 1\n        if len(vals[i]) == 0:\n            self.daylight_saving_start_day = None\n        else:\n            self.daylight_saving_start_day = vals[i]\n        i += 1\n        if len(vals[i]) == 0:\n            self.daylight_saving_end_day = None\n        else:\n            self.daylight_saving_end_day = vals[i]\n        i += 1\n        count = int(vals[i])\n        i += 1\n        for _ in range(count):\n            obj = Holiday()\n            obj.read(vals[i:i + obj.field_count])\n            self.add_holiday(obj)\n            i += obj.field_count", "docstring": "Read values.\n\nArgs:\nvals (list): list of strings representing values", "source": "juraj-google-style"}
{"code": "def initialize_schema(connection):\n    cursor = connection.cursor()\n    cursor.execute('PRAGMA application_id={}'.format(_TENSORBOARD_APPLICATION_ID))\n    cursor.execute('PRAGMA user_version={}'.format(_TENSORBOARD_USER_VERSION))\n    with connection:\n        for statement in _SCHEMA_STATEMENTS:\n            lines = statement.strip('\\n').split('\\n')\n            message = (lines[0] + ('...' if (len(lines) > 1) else ''))\n            logger.debug('Running DB init statement: %s', message)\n            cursor.execute(statement)", "docstring": "Initializes the TensorBoard sqlite schema using the given connection.\n\nArgs:\nconnection: A sqlite DB connection.", "source": "codesearchnet"}
{"code": "def from_file(filename='feff.inp'):\n    with zopen(filename, 'rt') as f:\n        lines = list(clean_lines(f.readlines()))\n    params = {}\n    eels_params = []\n    ieels = (- 1)\n    ieels_max = (- 1)\n    for (i, line) in enumerate(lines):\n        m = re.match('([A-Z]+\\\\d*\\\\d*)\\\\s*(.*)', line)\n        if m:\n            key = m.group(1).strip()\n            val = m.group(2).strip()\n            val = Tags.proc_val(key, val)\n            if (key not in ('ATOMS', 'POTENTIALS', 'END', 'TITLE')):\n                if (key in ['ELNES', 'EXELFS']):\n                    ieels = i\n                    ieels_max = (ieels + 5)\n                else:\n                    params[key] = val\n        if (ieels >= 0):\n            if ((i >= ieels) and (i <= ieels_max)):\n                if (i == (ieels + 1)):\n                    if (int(line.split()[1]) == 1):\n                        ieels_max -= 1\n                eels_params.append(line)\n    if eels_params:\n        if (len(eels_params) == 6):\n            eels_keys = ['BEAM_ENERGY', 'BEAM_DIRECTION', 'ANGLES', 'MESH', 'POSITION']\n        else:\n            eels_keys = ['BEAM_ENERGY', 'ANGLES', 'MESH', 'POSITION']\n        eels_dict = {'ENERGY': Tags._stringify_val(eels_params[0].split()[1:])}\n        for (k, v) in zip(eels_keys, eels_params[1:]):\n            eels_dict[k] = str(v)\n        params[str(eels_params[0].split()[0])] = eels_dict\n    return Tags(params)", "docstring": "Creates a Feff_tag dictionary from a PARAMETER or feff.inp file.\n\nArgs:\nfilename: Filename for either PARAMETER or feff.inp file\n\nReturns:\nFeff_tag object", "source": "codesearchnet"}
{"code": "def write_index_and_rst_files(self, overwrite: bool=False, mock: bool=False) -> None:\n    for f in self.files_to_index:\n        if isinstance(f, FileToAutodocument):\n            f.write_rst(prefix=self.rst_prefix, suffix=self.rst_suffix, heading_underline_char=self.source_rst_heading_underline_char, overwrite=overwrite, mock=mock)\n        elif isinstance(f, AutodocIndex):\n            f.write_index_and_rst_files(overwrite=overwrite, mock=mock)\n        else:\n            fail('Unknown thing in files_to_index: {!r}'.format(f))\n    self.write_index(overwrite=overwrite, mock=mock)", "docstring": "Writes both the individual RST files and the index.\n\nArgs:\noverwrite: allow existing files to be overwritten?\nmock: pretend to write, but don't", "source": "codesearchnet"}
{"code": "def save_with_exif_info(img, *args, **kwargs):\n    \n    if 'exif' in kwargs:\n        exif = kwargs.pop('exif')\n    else:\n        exif = img.info.get('exif')\n    img.save(*args, exif=exif, **kwargs)", "docstring": "Saves an image using PIL, preserving the exif information.\n\nArgs:\nimg (PIL.Image.Image):\n*args: The arguments for the `save` method of the Image class.\n**kwargs: The keywords for the `save` method of the Image class.", "source": "juraj-google-style"}
{"code": "def predict_undirected_graph(self, data):\n    graph = Graph()\n    for (idx_i, i) in enumerate(data.columns):\n        for (idx_j, j) in enumerate(data.columns[(idx_i + 1):]):\n            score = self.predict(data[i].values, data[j].values)\n            if (abs(score) > 0.001):\n                graph.add_edge(i, j, weight=score)\n    return graph", "docstring": "Build a skeleton using a pairwise independence criterion.\n\nArgs:\ndata (pandas.DataFrame): Raw data table\n\nReturns:\nnetworkx.Graph: Undirected graph representing the skeleton.", "source": "codesearchnet"}
{"code": "def append(self, transitions, rows=None):\n    \n    rows = tf.range(self._capacity) if rows is None else rows\n    assert rows.shape.ndims == 1\n    assert_capacity = tf.assert_less(\n        rows, self._capacity,\n        message='capacity exceeded')\n    with tf.control_dependencies([assert_capacity]):\n      assert_max_length = tf.assert_less(\n          tf.gather(self._length, rows), self._max_length,\n          message='max length exceeded')\n    with tf.control_dependencies([assert_max_length]):\n      timestep = tf.gather(self._length, rows)\n      indices = tf.stack([rows, timestep], 1)\n      append_ops = tools.nested.map(\n          lambda var, val: tf.scatter_nd_update(var, indices, val),\n          self._buffers, transitions, flatten=True)\n    with tf.control_dependencies(append_ops):\n      episode_mask = tf.reduce_sum(tf.one_hot(\n          rows, self._capacity, dtype=tf.int32), 0)\n      return self._length.assign_add(episode_mask)", "docstring": "Append a batch of transitions to rows of the memory.\n\nArgs:\ntransitions: Tuple of transition quantities with batch dimension.\nrows: Episodes to append to, defaults to all.\n\nReturns:\nOperation.", "source": "juraj-google-style"}
{"code": "def parse_conservation(variant, info_key):\n    raw_score = variant.INFO.get(info_key)\n    conservations = []\n    if raw_score:\n        if isinstance(raw_score, numbers.Number):\n            raw_score = (raw_score,)\n        for score in raw_score:\n            if (score >= CONSERVATION[info_key]['conserved_min']):\n                conservations.append('Conserved')\n            else:\n                conservations.append('NotConserved')\n    return conservations", "docstring": "Get the conservation prediction\n\nArgs:\nvariant(dict): A variant dictionary\ninfo_key(str)\n\nReturns:\nconservations(list): List of censervation terms", "source": "codesearchnet"}
{"code": "def restore(self, sess, save_path):\n    start_time = time.time()\n    if self._is_empty:\n        return\n    if save_path is None:\n        raise ValueError(\"Can't load save_path when it is None.\")\n    checkpoint_prefix = compat.as_text(save_path)\n    if not checkpoint_management.checkpoint_exists_internal(checkpoint_prefix):\n        raise ValueError('The passed save_path is not a valid checkpoint: ' + checkpoint_prefix)\n    logging.info('Restoring parameters from %s', checkpoint_prefix)\n    try:\n        if context.executing_eagerly():\n            self._build_eager(save_path, build_save=False, build_restore=True)\n        else:\n            sess.run(self.saver_def.restore_op_name, {self.saver_def.filename_tensor_name: save_path})\n    except errors.NotFoundError as err:\n        try:\n            names_to_keys = object_graph_key_mapping(save_path)\n        except errors.NotFoundError:\n            raise _wrap_restore_error_with_msg(err, 'a Variable name or other graph key that is missing')\n        logging.warning('Restoring an object-based checkpoint using a name-based saver. This may be somewhat fragile, and will re-build the Saver. Instead, consider loading object-based checkpoints using tf.train.Checkpoint().')\n        self._object_restore_saver = saver_from_object_based_checkpoint(checkpoint_path=save_path, var_list=self._var_list, builder=self._builder, names_to_keys=names_to_keys, cached_saver=self._object_restore_saver)\n        self._object_restore_saver.restore(sess=sess, save_path=save_path)\n    except errors.InvalidArgumentError as err:\n        raise _wrap_restore_error_with_msg(err, 'a mismatch between the current graph and the graph')\n    metrics.AddCheckpointReadDuration(api_label=_SAVER_LABEL, microseconds=_get_duration_microseconds(start_time, time.time()))", "docstring": "Restores previously saved variables.\n\nThis method runs the ops added by the constructor for restoring variables.\nIt requires a session in which the graph was launched.  The variables to\nrestore do not have to have been initialized, as restoring is itself a way\nto initialize variables.\n\nThe `save_path` argument is typically a value previously returned from a\n`save()` call, or a call to `latest_checkpoint()`.\n\nArgs:\nsess: A `Session` to use to restore the parameters. None in eager mode.\nsave_path: Path where parameters were previously saved.\n\nRaises:\nValueError: If save_path is None or not a valid checkpoint.", "source": "github-repos"}
{"code": "def place_line(self,\n                   device: 'cirq.google.XmonDevice',\n                   length: int) -> GridQubitLineTuple:\n        \n\n        if not device.qubits:\n            return GridQubitLineTuple()\n\n        start = min(device.qubits)  \n        sequences = []  \n        greedy_search = {\n            'minimal_connectivity': [\n                _PickFewestNeighbors(device, start),\n            ],\n            'largest_area': [\n                _PickLargestArea(device, start),\n            ],\n            'best': [\n                _PickFewestNeighbors(device, start),\n                _PickLargestArea(device, start),\n            ]\n        }  \n\n        algos = greedy_search.get(self.algorithm)\n        if algos is None:\n            raise ValueError(\n                \"Unknown greedy search algorithm %s\" % self.algorithm)\n\n        for algorithm in algos:\n            sequences.append(algorithm.get_or_search())\n\n        return GridQubitLineTuple.best_of(sequences, length)", "docstring": "Runs line sequence search.\n\nArgs:\ndevice: Chip description.\nlength: Required line length.\n\nReturns:\nLinear sequences found on the chip.\n\nRaises:\nValueError: If search algorithm passed on initialization is not\nrecognized.", "source": "juraj-google-style"}
{"code": "def _CreateFeedMapping(client, feed_details):\n    feed_mapping_service = client.GetService('FeedMappingService', version='v201809')\n    operation = {'operand': {'criterionType': DSA_PAGE_FEED_CRITERION_TYPE, 'feedId': feed_details.feed_id, 'attributeFieldMappings': [{'feedAttributeId': feed_details.url_attribute_id, 'fieldId': DSA_PAGE_URLS_FIELD_ID}, {'feedAttributeId': feed_details.label_attribute_id, 'fieldId': DSA_LABEL_FIELD_ID}]}, 'operator': 'ADD'}\n    feed_mapping_service.mutate([operation])", "docstring": "Creates the feed mapping for DSA page feeds.\n\nArgs:\nclient: an AdWordsClient instance.\nfeed_details: a _DSAFeedDetails instance.", "source": "codesearchnet"}
{"code": "def __init__(self,\n                 html_id=None,\n                 title=None,\n                 description=None,\n                 widgets=None,\n                 template=None,\n                 context=None,\n                 **kwargs):\n        \n        if widgets is not None:\n            if not isinstance(widgets, (list, tuple)):\n                raise AttributeError('Box widgets attribute '\n                                     'must be a list or tuple')\n            if not all([isinstance(e, Widget) for e in widgets]):\n                raise ValueError('All elements of Box must be Widget instances')  \n\n            try:\n                self.widgets = widgets\n            except AttributeError:\n                self._widgets = widgets\n\n        self.type = 'box'\n\n        if html_id is not None:\n            try:\n                self.html_id = html_id\n            except AttributeError:\n                self._html_id = html_id\n        if title is not None:\n            try:\n                self.title = title\n            except AttributeError:\n                self._title = title\n        if description is not None:\n            try:\n                self.description = description\n            except AttributeError:\n                self._description = description\n        if template is not None:\n            try:\n                self.template = template\n            except AttributeError:\n                self._template = template\n        if context is not None:\n            try:\n                self.context = context\n            except AttributeError:\n                self._context = context\n\n        for kw, arg in kwargs.items():\n            setattr(self, kw, arg)", "docstring": "Init method.\n\nArgs:\nhtml_id (str): an ID to set on the HTML box.\ntitle (str): a title to display on the top of the box.\ndescription (str): a description to display after the title box.\nwidgets (list): the box's list of widgets.\ntemplate (str): the path to a custom template to use for this box.\ncontext (dict): additional context to pass to the box.", "source": "juraj-google-style"}
{"code": "def experimental_local_results(self, value):\n    return super(CentralStorageStrategy, self).experimental_local_results(value)", "docstring": "Returns the list of all local per-replica values contained in `value`.\n\nIn `CentralStorageStrategy` there is a single worker so the value returned\nwill be all the values on that worker.\n\nArgs:\nvalue: A value returned by `run()`, `extended.call_for_each_replica()`,\nor a variable created in `scope`.\n\nReturns:\nA tuple of values contained in `value`. If `value` represents a single\nvalue, this returns `(value,).`", "source": "github-repos"}
{"code": "def image_summary(seqs, name, num=None):\n  \n  seqs = tf.clip_by_value(seqs, 0., 1.)\n  seqs = tf.unstack(seqs[:num])\n  joined_seqs = [tf.concat(tf.unstack(seq), 1) for seq in seqs]\n  joined_seqs = tf.expand_dims(tf.concat(joined_seqs, 0), 0)\n  tf.compat.v2.summary.image(\n      name,\n      joined_seqs,\n      max_outputs=1,\n      step=tf.compat.v1.train.get_or_create_global_step())", "docstring": "Visualizes sequences as TensorBoard summaries.\n\nArgs:\nseqs: A tensor of shape [n, t, h, w, c].\nname: String name of this summary.\nnum: Integer for the number of examples to visualize. Defaults to\nall examples.", "source": "juraj-google-style"}
{"code": "def compute_edges(self, rules: List[str]=None, ast_result=False, fmt='medium') -> List[Mapping[(str, Any)]]:\n    if (not self.ast):\n        return self\n    edges_asts = bel.edge.computed.compute_edges(self.ast, self.spec)\n    if ast_result:\n        return edges_asts\n    edges = []\n    for ast in edges_asts:\n        edges.append({'subject': ast.bel_subject.to_string(), 'relation': ast.bel_relation, 'object': ast.bel_object.to_string()})\n    return edges", "docstring": "Computed edges from primary BEL statement\n\nTakes an AST and generates all computed edges based on BEL Specification YAML computed signatures.\nWill run only the list of computed edge rules if given.\n\nArgs:\nrules (list): a list of rules to filter; only the rules in this list will be applied to computed\nfmt (str): short, medium or long version of BEL Edge (function and relation names)\nReturns:\nList[Mapping[str, Any]]: BEL Edges in medium format", "source": "codesearchnet"}
{"code": "def write(self, *parts: WritableTypes, shared_parts_only: bool=False) -> 'Content':\n    content_updated = False\n    for p in parts:\n        p = self._to_content(p)\n        if p is None:\n            continue\n        if not isinstance(p, (str, self.__class__)):\n            raise TypeError(f'{p!r} ({type(p)}) cannot be writable. Only str, None, {self.__class__.__name__} and callable object that returns one of them are supported.')\n        if isinstance(p, Content):\n            current = self._shared_parts\n            for k, v in p.shared_parts.items():\n                current[k].add(v)\n            p = p.content\n        if not shared_parts_only:\n            self._content_stream.write(p)\n            content_updated = True\n    if content_updated:\n        self.__dict__.pop('content', None)\n    return self", "docstring": "Writes one or more parts to current Content.\n\nArgs:\n*parts: The parts to be written. Each part can be a string, a Content\nobject, a callable that returns one of the above, or None.\nshared_parts_only: If True, only write the shared parts.\n\nReturns:\nThe current Content object for chaining.", "source": "github-repos"}
{"code": "def advise(self, options):\n    advise_pb = tfprof_output_pb2.AdviceProto()\n    opts = _build_advisor_options(options)\n    advise_pb.ParseFromString(print_mdl.Profile('advise'.encode('utf-8'), opts.SerializeToString()))\n    return advise_pb", "docstring": "Automatically detect problems and generate reports.\n\nArgs:\noptions: A dict of options. See ALL_ADVICE example above.\n\nReturns:\nAn Advise proto that contains the reports from all checkers.", "source": "github-repos"}
{"code": "def energy_string_to_float( string ):\n    \n    energy_re = re.compile( \"(-?\\d+\\.\\d+)\" )\n    return float( energy_re.match( string ).group(0) )", "docstring": "Convert a string of a calculation energy, e.g. '-1.2345 eV' to a float.\n\nArgs:\nstring (str): The string to convert.\n\nReturn\n(float)", "source": "juraj-google-style"}
{"code": "def test_skip(self, e=None):\n    self._test_end(TestResultEnums.TEST_RESULT_SKIP, e)", "docstring": "To mark the test as skipped in this record.\n\nArgs:\ne: An instance of mobly.signals.TestSkip.", "source": "github-repos"}
{"code": "def get_resolution(pdb_id):\n    pdb_id = pdb_id.upper()\n    if (pdb_id not in _property_table().index):\n        raise ValueError('PDB ID not in property table')\n    else:\n        resolution = _property_table().ix[(pdb_id, 'resolution')]\n        if pd.isnull(resolution):\n            log.debug('{}: no resolution available, probably not an X-ray crystal structure')\n            resolution = float('inf')\n    return resolution", "docstring": "Quick way to get the resolution of a PDB ID using the table of results from the REST service\n\nReturns infinity if the resolution is not available.\n\nReturns:\nfloat: resolution of a PDB ID in Angstroms\n\nTODO:\n- Unit test", "source": "codesearchnet"}
{"code": "def plot_ax(self, ax=None, fontsize=12, **kwargs):\n    (ax, fig, plt) = get_ax_fig_plt(ax=ax)\n    color = kwargs.get('color', 'r')\n    label = kwargs.get('label', '{} fit'.format(self.__class__.__name__))\n    lines = [('Equation of State: %s' % self.__class__.__name__), ('Minimum energy = %1.2f eV' % self.e0), ('Minimum or reference volume = %1.2f Ang^3' % self.v0), ('Bulk modulus = %1.2f eV/Ang^3 = %1.2f GPa' % (self.b0, self.b0_GPa)), ('Derivative of bulk modulus wrt pressure = %1.2f' % self.b1)]\n    text = '\\n'.join(lines)\n    text = kwargs.get('text', text)\n    ax.plot(self.volumes, self.energies, linestyle='None', marker='o', color=color)\n    (vmin, vmax) = (min(self.volumes), max(self.volumes))\n    (vmin, vmax) = ((vmin - (0.01 * abs(vmin))), (vmax + (0.01 * abs(vmax))))\n    vfit = np.linspace(vmin, vmax, 100)\n    ax.plot(vfit, self.func(vfit), linestyle='dashed', color=color, label=label)\n    ax.grid(True)\n    ax.set_xlabel('Volume $\\\\AA^3$')\n    ax.set_ylabel('Energy (eV)')\n    ax.legend(loc='best', shadow=True)\n    ax.text(0.5, 0.5, text, fontsize=fontsize, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes)\n    return fig", "docstring": "Plot the equation of state on axis `ax`\n\nArgs:\nax: matplotlib :class:`Axes` or None if a new figure should be created.\nfontsize: Legend fontsize.\ncolor (str): plot color.\nlabel (str): Plot label\ntext (str): Legend text (options)\n\nReturns:\nMatplotlib figure object.", "source": "codesearchnet"}
{"code": "def __init__(self, scope, parent, result, value=(), paren=False):\n        \n        try:\n            value = list(value)\n        except TypeError as te:\n            raise AssertionError(str(te))\n\n        CodeLiteral.__init__(self, scope, parent, value, result, paren)", "docstring": "Constructor for a compound literal.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.\nvalue (iterable): The initial value sequence in this composition.\nresult (str): The return type of the literal in the program.\n\nKwargs:\nparen (bool): Whether the literal is enclosed in parentheses.", "source": "juraj-google-style"}
{"code": "def transpose(a, axes=None):\n    if isinstance(a, np.ndarray):\n        return np.transpose(a, axes)\n    elif isinstance(a, RemoteArray):\n        return a.transpose(*axes)\n    elif isinstance(a, Remote):\n        return _remote_to_array(a).transpose(*axes)\n    elif isinstance(a, DistArray):\n        if (axes is None):\n            axes = range((a.ndim - 1), (- 1), (- 1))\n        axes = list(axes)\n        if (len(set(axes)) < len(axes)):\n            raise ValueError('repeated axis in transpose')\n        if (sorted(axes) != list(range(a.ndim))):\n            raise ValueError(\"axes don't match array\")\n        distaxis = a._distaxis\n        new_distaxis = axes.index(distaxis)\n        new_subarrays = [ra.transpose(*axes) for ra in a._subarrays]\n        return DistArray(new_subarrays, new_distaxis)\n    else:\n        return np.transpose(a, axes)", "docstring": "Returns a view of the array with axes transposed.\n\nFor a 1-D array, this has no effect.\nFor a 2-D array, this is the usual matrix transpose.\nFor an n-D array, if axes are given, their order indicates how the\naxes are permuted\n\nArgs:\na (array_like): Input array.\naxes (list of int, optional): By default, reverse the dimensions,\notherwise permute the axes according to the values given.", "source": "codesearchnet"}
{"code": "def __init__(self, ident, latitude, longitude, visible=False, user=None,\n                 timestamp=None, tags=None):\n        \n        super(Node, self).__init__(latitude, longitude)\n\n        self.ident = ident\n        self.visible = visible\n        self.user = user\n        self.timestamp = timestamp\n        self.tags = tags", "docstring": "Initialise a new ``Node`` object.\n\nArgs:\nident (int): Unique identifier for the node\nlatitude (float): Nodes's latitude\nlongitude (float): Node's longitude\nvisible (bool): Whether the node is visible\nuser (str): User who logged the node\ntimestamp (str): The date and time a node was logged\ntags (dict): Tags associated with the node", "source": "juraj-google-style"}
{"code": "def get_data_with_timestamps(self):\n    result = []\n    for (t, d) in zip(self.timestamps, self.data_points):\n        result.append(t, round(d, self.lr))\n    return result", "docstring": "Returns the data points with timestamps.\n\nReturns:\nA list of tuples in the format of (timestamp, data)", "source": "codesearchnet"}
{"code": "def discard_observer(self, observer):\n        \n        discarded = False\n        key = self.make_key(observer)\n        if key in self.observers:\n            del self.observers[key]\n            discarded = True\n        return discarded", "docstring": "Un-register an observer.\n\nArgs:\nobserver: The observer to un-register.\n\nReturns true if an observer was removed, otherwise False.", "source": "juraj-google-style"}
{"code": "def merge(self, other):\n    if (other.seed != self.seed):\n        raise ValueError('Cannot merge MinHash with                    different seeds')\n    if (len(self) != len(other)):\n        raise ValueError('Cannot merge MinHash with                    different numbers of permutation functions')\n    self.hashvalues = np.minimum(other.hashvalues, self.hashvalues)", "docstring": "Merge the other MinHash with this one, making this one the union\nof both.\n\nArgs:\nother (datasketch.MinHash): The other MinHash.", "source": "codesearchnet"}
{"code": "def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, List[Tuple]]=None, top_k: int=100):\n    out_logits, out_bbox = (outputs.logits, outputs.pred_boxes)\n    if target_sizes is not None:\n        if len(out_logits) != len(target_sizes):\n            raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n    prob = out_logits.sigmoid()\n    prob = prob.view(out_logits.shape[0], -1)\n    k_value = min(top_k, prob.size(1))\n    topk_values, topk_indexes = torch.topk(prob, k_value, dim=1)\n    scores = topk_values\n    topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor')\n    labels = topk_indexes % out_logits.shape[2]\n    boxes = center_to_corners_format(out_bbox)\n    boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))\n    if target_sizes is not None:\n        if isinstance(target_sizes, List):\n            img_h = torch.Tensor([i[0] for i in target_sizes])\n            img_w = torch.Tensor([i[1] for i in target_sizes])\n        else:\n            img_h, img_w = target_sizes.unbind(1)\n        scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)\n        boxes = boxes * scale_fct[:, None, :]\n    results = []\n    for s, l, b in zip(scores, labels, boxes):\n        score = s[s > threshold]\n        label = l[s > threshold]\n        box = b[s > threshold]\n        results.append({'scores': score, 'labels': label, 'boxes': box})\n    return results", "docstring": "Converts the raw output of [`YolosForObjectDetection`] into final bounding boxes in (top_left_x,\ntop_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.\n\nArgs:\noutputs ([`YolosObjectDetectionOutput`]):\nRaw outputs of the model.\nthreshold (`float`, *optional*):\nScore threshold to keep object detection predictions.\ntarget_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):\nTensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size\n(height, width) of each image in the batch. If left to None, predictions will not be resized.\ntop_k (`int`, *optional*, defaults to 100):\nKeep only top k bounding boxes before filtering by thresholding.\n\nReturns:\n`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image\nin the batch as predicted by the model.", "source": "github-repos"}
{"code": "def _ConvertHeaderToId(header):\n    if (not (header.startswith('<') or header.endswith('>'))):\n        raise exceptions.BatchError(('Invalid value for Content-ID: %s' % header))\n    if ('+' not in header):\n        raise exceptions.BatchError(('Invalid value for Content-ID: %s' % header))\n    (_, request_id) = header[1:(- 1)].rsplit('+', 1)\n    return urllib_parse.unquote(request_id)", "docstring": "Convert a Content-ID header value to an id.\n\nPresumes the Content-ID header conforms to the format that\n_ConvertIdToHeader() returns.\n\nArgs:\nheader: A string indicating the Content-ID header value.\n\nReturns:\nThe extracted id value.\n\nRaises:\nBatchError if the header is not in the expected format.", "source": "codesearchnet"}
{"code": "def claim(self, file_readers):\n    unclaimed_readers = []\n    vcf_readers = []\n    for file_reader in file_readers:\n        if self._is_mutect_vcf(file_reader):\n            vcf_reader = vcf.VcfReader(file_reader)\n            vcf_readers.append(_MutectVcfReader(vcf_reader))\n        else:\n            unclaimed_readers.append(file_reader)\n    return (unclaimed_readers, vcf_readers)", "docstring": "Recognizes and claims MuTect VCFs form the set of all input VCFs.\n\nEach defined caller has a chance to evaluate and claim all the incoming\nfiles as something that it can process.\n\nArgs:\nfile_readers: the collection of currently unclaimed files\n\nReturns:\nA tuple of unclaimed readers and MuTectVcfReaders.", "source": "codesearchnet"}
{"code": "def insert_arguments_into_sql_query(compilation_result, arguments):\n    if (compilation_result.language != SQL_LANGUAGE):\n        raise AssertionError(u'Unexpected query output language: {}'.format(compilation_result))\n    base_query = compilation_result.query\n    return base_query.params(**arguments)", "docstring": "Insert the arguments into the compiled SQL query to form a complete query.\n\nArgs:\ncompilation_result: CompilationResult, compilation result from the GraphQL compiler.\narguments: Dict[str, Any], parameter name -> value, for every parameter the query expects.\n\nReturns:\nSQLAlchemy Selectable, a executable SQL query with parameters bound.", "source": "codesearchnet"}
{"code": "def _register_info(self, server):\n    server_url = urllib.parse.urlparse(server.get_url())\n    info = manager.TensorBoardInfo(version=version.VERSION, start_time=int(time.time()), port=server_url.port, pid=os.getpid(), path_prefix=self.flags.path_prefix, logdir=self.flags.logdir, db=self.flags.db, cache_key=self.cache_key)\n    atexit.register(manager.remove_info_file)\n    manager.write_info_file(info)", "docstring": "Write a TensorBoardInfo file and arrange for its cleanup.\n\nArgs:\nserver: The result of `self._make_server()`.", "source": "codesearchnet"}
{"code": "def match(sel, obj, arr=None, bailout_fn=None):\n    if arr:\n        sel = interpolate(sel, arr)\n    sel = parse(sel)[1]\n    return _forEach(sel, obj, bailout_fn=bailout_fn)", "docstring": "Match a selector to an object, yielding the matched values.\n\nArgs:\nsel: The JSONSelect selector to apply (a string)\nobj: The object against which to apply the selector\narr: If sel contains ? characters, then the values in this array will\nbe safely interpolated into the selector.\nbailout_fn: A callback which takes two parameters, |obj| and |matches|.\nThis will be called on every node in obj. If it returns True, the\nsearch for matches will be aborted below that node. The |matches|\nparameter indicates whether the node matched the selector. This is\nintended to be used as a performance optimization.", "source": "codesearchnet"}
{"code": "def get_ip_prefixes_from_config(config, services, ip_version):\n    \n    ip_prefixes = set()\n\n    for service in services:\n        ip_prefix = ipaddress.ip_network(config.get(service, 'ip_prefix'))\n        if ip_prefix.version == ip_version:\n            ip_prefixes.add(ip_prefix.with_prefixlen)\n\n    return ip_prefixes", "docstring": "Build a set of IP prefixes found in service configuration files.\n\nArguments:\nconfig (obg): A configparser object which holds our configuration.\nservices (list): A list of section names which are the name of the\nservice checks.\nip_version (int): IP protocol version\n\nReturns:\nA set of IP prefixes.", "source": "juraj-google-style"}
{"code": "def outputZip(self,figtype='png'):\n        \n        from zipfile import ZipFile\n        with ZipFile(self.outfile+'.zip', 'w') as zipcontainer:\n            zipcontainer.writestr(\n                'summary.txt',\n                '\n                    self.title,\n                    self.p,\n                    ('\\n\n                ).encode()\n            )\n            c = count(1)\n            for section in self.sections:\n                section.sectionOutZip(zipcontainer,'s{}_{}/'.format(next(c),section.title.replace(' ','_')),\n                                      figtype=figtype)", "docstring": "Outputs the report in a zip container.\nFigs and tabs as pngs and excells.\n\nArgs:\nfigtype (str): Figure type of images in the zip folder.", "source": "juraj-google-style"}
{"code": "def add_scheduling_block(config, schema_path=None):\n    if (schema_path is None):\n        schema_path = os.path.join(os.path.dirname(__file__), 'sbi_post.json')\n    schema = load_schema(schema_path)\n    jsonschema.validate(config, schema)\n    DB.set('scheduling_block/{}'.format(config['id']), json.dumps(config))\n    DB.rpush('scheduling_block_events', json.dumps(dict(type='created', id=config['id'])))", "docstring": "Add a Scheduling Block to the Configuration Database.\n\nThe configuration dictionary must match the schema defined in\nin the schema_path variable at the top of the function.\n\nArgs:\nconfig (dict): Scheduling Block instance request configuration.\nschema_path (str): Path to schema file used to validate the\nScheduling Block Instance request", "source": "codesearchnet"}
{"code": "def test_ingraph_train_loop(self, mode):\n    self._maybe_skip(mode)\n    if tf2.enabled():\n        self.skipTest('TensorFlow 1 required')\n    with ops.device(_get_device(mode)):\n        random_seed.set_random_seed(1234)\n        np.random.seed(1234)\n        num_iter, bs, nchan, nclass = (100, 64, 32, 100)\n        data = np.random.normal(size=(bs * num_iter, nchan)).astype(np.float32)\n        labels = np.random.randint(nclass, size=(bs * num_iter,))\n        ds = dataset_ops.Dataset.from_tensor_slices((data, labels))\n        ds = ds.batch(bs).prefetch(3)\n        it = ds.make_one_shot_iterator()\n\n        def body(_, i):\n            i += 1\n            x, yt = it.get_next()\n            dense = layers.Dense(nclass)\n            y = dense(x)\n            loss = losses.sparse_softmax_cross_entropy(yt, y)\n            opt = adam.AdamOptimizer()\n            train_op = opt.minimize(loss, var_list=dense.trainable_weights)\n            with ops.control_dependencies([train_op]):\n                loss = array_ops.identity(loss)\n            return (loss, i)\n        begin, end = (constant_op.constant(0), constant_op.constant(num_iter))\n        loss, _ = while_loop.while_loop(lambda loss, i: math_ops.less(i, end), body, [0.0, begin])\n    output_val_ref, output_val, cost_graph = self._run(mode, loss)\n    node_map = _build_node_map(cost_graph.node)\n    self._assert_output_f16(mode, node_map, 'while/dense/MatMul')\n    self._assert_output_f16(mode, node_map, 'while/gradients/while/dense/MatMul_grad/MatMul_1')\n    self.assertAllClose(output_val_ref, output_val, atol=0.001, rtol=0.001)", "docstring": "Tests a graph containing a while loop around a training update.\n\nThis requires the grappler pass to take special care with its handling of\nEnter ops that appear in front of reads from non-resource variables. See\nthe use of NodeImplicitlyReadsVariable in auto_mixed_precision.cc.\n\nArgs:\nmode: Either 'cuda' or 'mkl'.", "source": "github-repos"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n    output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    if token_ids_1 is not None:\n        output += token_ids_1 + [self.sep_token_id]\n    return output", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A REALM sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def add_period_and_roll(self, date_tensor, period_tensor, roll_convention=constants.BusinessDayConvention.NONE):\n    pass", "docstring": "Adds given periods to given dates and rolls to business days.\n\nThe original dates are not rolled prior to addition.\n\nArgs:\ndate_tensor: DateTensor of dates to add to.\nperiod_tensor: PeriodTensor broadcastable to `date_tensor`.\nroll_convention: BusinessDayConvention. Determines how to roll a date that\nfalls on a holiday.\n\nReturns:\nThe resulting DateTensor.", "source": "github-repos"}
{"code": "def create_opengl_context(surface_size=(640, 480)):\n  \n  egl_display = egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY)\n\n  major, minor = egl.EGLint(), egl.EGLint()\n  egl.eglInitialize(egl_display, pointer(major), pointer(minor))\n\n  config_attribs = [\n      egl.EGL_SURFACE_TYPE, egl.EGL_PBUFFER_BIT, egl.EGL_BLUE_SIZE, 8,\n      egl.EGL_GREEN_SIZE, 8, egl.EGL_RED_SIZE, 8, egl.EGL_DEPTH_SIZE, 24,\n      egl.EGL_RENDERABLE_TYPE, egl.EGL_OPENGL_BIT, egl.EGL_NONE\n  ]\n  config_attribs = (egl.EGLint * len(config_attribs))(*config_attribs)\n\n  num_configs = egl.EGLint()\n  egl_cfg = egl.EGLConfig()\n  egl.eglChooseConfig(egl_display, config_attribs, pointer(egl_cfg), 1,\n                      pointer(num_configs))\n\n  width, height = surface_size\n  pbuffer_attribs = [\n      egl.EGL_WIDTH,\n      width,\n      egl.EGL_HEIGHT,\n      height,\n      egl.EGL_NONE,\n  ]\n  pbuffer_attribs = (egl.EGLint * len(pbuffer_attribs))(*pbuffer_attribs)\n  egl_surf = egl.eglCreatePbufferSurface(egl_display, egl_cfg, pbuffer_attribs)\n\n  egl.eglBindAPI(egl.EGL_OPENGL_API)\n\n  egl_context = egl.eglCreateContext(egl_display, egl_cfg, egl.EGL_NO_CONTEXT,\n                                     None)\n  egl.eglMakeCurrent(egl_display, egl_surf, egl_surf, egl_context)", "docstring": "Create offscreen OpenGL context and make it current.\n\nUsers are expected to directly use EGL API in case more advanced\ncontext management is required.\n\nArgs:\nsurface_size: (width, height), size of the offscreen rendering surface.", "source": "juraj-google-style"}
{"code": "def set_scf_algorithm_and_iterations(self, algorithm='diis', iterations=50):\n    available_algorithms = {'diis', 'dm', 'diis_dm', 'diis_gdm', 'gdm', 'rca', 'rca_diis', 'roothaan'}\n    if (algorithm.lower() not in available_algorithms):\n        raise ValueError((('Algorithm ' + algorithm) + ' is not available in QChem'))\n    self.params['rem']['scf_algorithm'] = algorithm.lower()\n    self.params['rem']['max_scf_cycles'] = iterations", "docstring": "Set algorithm used for converging SCF and max number of SCF iterations.\n\nArgs:\nalgorithm: The algorithm used for converging SCF. (str)\niterations: The max number of SCF iterations. (Integer)", "source": "codesearchnet"}
{"code": "def wait_until_element_not_visible(webdriver, locator_lambda_expression, timeout=WTF_TIMEOUT_MANAGER.NORMAL, sleep=0.5):\n    try:\n        stoptime = (datetime.now() + timedelta(seconds=timeout))\n        while (datetime.now() < stoptime):\n            element = WebDriverWait(webdriver, WTF_TIMEOUT_MANAGER.BRIEF).until(locator_lambda_expression)\n            if element.is_displayed():\n                time.sleep(sleep)\n            else:\n                break\n    except TimeoutException:\n        pass", "docstring": "Wait for a WebElement to disappear.\n\nArgs:\nwebdriver (Webdriver) - Selenium Webdriver\nlocator (lambda) - Locator lambda expression.\n\nKwargs:\ntimeout (number) - timeout period\nsleep (number) - sleep period between intervals.", "source": "codesearchnet"}
{"code": "def _GetFileByPath(self, key_path_upper):\n    \n    \n\n    key_path_prefix, registry_file = self._GetCachedFileByPath(key_path_upper)\n    if not registry_file:\n      for mapping in self._GetFileMappingsByPath(key_path_upper):\n        try:\n          registry_file = self._OpenFile(mapping.windows_path)\n        except IOError:\n          registry_file = None\n\n        if not registry_file:\n          continue\n\n        if not key_path_prefix:\n          key_path_prefix = mapping.key_path_prefix\n\n        self.MapFile(key_path_prefix, registry_file)\n        key_path_prefix = key_path_prefix.upper()\n        break\n\n    return key_path_prefix, registry_file", "docstring": "Retrieves a Windows Registry file for a specific path.\n\nArgs:\nkey_path_upper (str): Windows Registry key path, in upper case with\na resolved root key alias.\n\nReturns:\ntuple: consists:\n\nstr: upper case key path prefix\nWinRegistryFile: corresponding Windows Registry file or None if not\navailable.", "source": "juraj-google-style"}
{"code": "def NeedsSeparatingHyphenHyphen(self, flag='help'):\n    element = self.GetLastHealthyElement()\n    component = element.component\n    spec = inspectutils.GetFullArgSpec(component)\n    return spec.varkw is not None or flag in spec.args or flag in spec.kwonlyargs", "docstring": "Returns whether a the trace need '--' before '--help'.\n\n'--' is needed when the component takes keyword arguments, when the value of\nflag matches one of the argument of the component, or the component takes in\nkeyword-only arguments(e.g. argument with default value).\n\nArgs:\nflag: the flag available for the trace\n\nReturns:\nTrue for needed '--', False otherwise.", "source": "github-repos"}
{"code": "def attention_mask_autoregressive(query_pos, dtype=tf.float32):\n  \n  memory_pos = rename_length_to_memory_length(query_pos)\n  return mtf.cast(mtf.less(query_pos, memory_pos), dtype) * -1e9", "docstring": "Bias for self-attention where attention to the right is disallowed.\n\nArgs:\nquery_pos: a mtf.Tensor with shape [..., length_dim]\ndtype: a tf.dtype\n\nReturns:\na mtf.Tensor with shape [..., length_dim, memory_length_dim]", "source": "juraj-google-style"}
{"code": "def _get_duration_microseconds(start_time_seconds, end_time_seconds):\n    if end_time_seconds < start_time_seconds:\n        return 0\n    return round((end_time_seconds - start_time_seconds) * 1000000)", "docstring": "Calculate the duration between start and end time.\n\nArgs:\nstart_time_seconds: The start time in seconds.\nend_time_seconds: The end time in seconds.\n\nReturns:\nThe duration between the start and the end time. Return 0 if\nend_time_seconds < start_time_seconds.", "source": "github-repos"}
{"code": "def with_headers(self, headers):\n        \n        copy = headers.copy()\n        copy.update(self._headers)\n        return self.__copy_and_set('headers', copy)", "docstring": "Adds headers to the request\n\nArgs:\nheaders (dict): The headers to add the request headers\n\nReturns:\nThe request builder instance in order to chain calls", "source": "juraj-google-style"}
{"code": "def extract_objects(self, fname, type_filter=None):\n    objects = []\n    if (fname in self.object_cache):\n        objects = self.object_cache[fname]\n    else:\n        with io.open(fname, 'rt', encoding='utf-8') as fh:\n            text = fh.read()\n            objects = parse_verilog(text)\n            self.object_cache[fname] = objects\n    if type_filter:\n        objects = [o for o in objects if isinstance(o, type_filter)]\n    return objects", "docstring": "Extract objects from a source file\n\nArgs:\nfname(str): Name of file to read from\ntype_filter (class, optional): Object class to filter results\nReturns:\nList of objects extracted from the file.", "source": "codesearchnet"}
{"code": "def render_to_image_file(self, image_out_path, width_pixels=None, height_pixels=None, dpi=90):\n    self._render_type = 'file'\n    self._tree.render(file_name=image_out_path, w=width_pixels, h=height_pixels, dpi=dpi, units='px', tree_style=self._get_tree_style())", "docstring": "Render the SubjectInfo to an image file.\n\nArgs:\nimage_out_path : str\nPath to where image image will be written. Valid extensions are\n``.svg,`` ``.pdf``, and ``.png``.\n\nwidth_pixels : int\nWidth of image to write.\n\nheight_pixels : int\nHeight of image to write, in pixels.\n\ndpi:\nDots Per Inch to declare in image file. This does not change the\nresolution of the image but may change the size of the image when\nrendered.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def set_fresh_watermark(game_queue, count_from, window_size, fresh_fraction=0.05, minimum_fresh=20000):\n    already_played = (game_queue.latest_game_number - count_from)\n    print('== already_played: ', already_played, flush=True)\n    if (window_size > count_from):\n        game_queue.require_fresh_games(int((minimum_fresh * 0.9)))\n    else:\n        num_to_play = max(0, (math.ceil(((window_size * 0.9) * fresh_fraction)) - already_played))\n        print('== Num to play: ', num_to_play, flush=True)\n        game_queue.require_fresh_games(num_to_play)", "docstring": "Sets the metadata cell used to block until some quantity of games have been played.\n\nThis sets the 'freshness mark' on the `game_queue`, used to block training\nuntil enough new games have been played.  The number of fresh games required\nis the larger of:\n- The fraction of the total window size\n- The `minimum_fresh` parameter\nThe number of games required can be indexed from the 'count_from' parameter.\nArgs:\ngame_queue: A GameQueue object, on whose backing table will be modified.\ncount_from: the index of the game to compute the increment from\nwindow_size:  an integer indicating how many past games are considered\nfresh_fraction: a float in (0,1] indicating the fraction of games to wait for\nminimum_fresh:  an integer indicating the lower bound on the number of new\ngames.", "source": "codesearchnet"}
{"code": "def http_request(self, verb, uri, data=None, headers=None, files=None, response_format=None, is_rdf=True, stream=False):\n    if is_rdf:\n        '\\n\\t\\t\\tAcceptable content negotiated response formats include:\\n\\t\\t\\t\\tapplication/ld+json (discouraged, if not prohibited, as it drops prefixes used in repository)\\n\\t\\t\\t\\tapplication/n-triples\\n\\t\\t\\t\\tapplication/rdf+xml\\n\\t\\t\\t\\ttext/n3 (or text/rdf+n3)\\n\\t\\t\\t\\ttext/plain\\n\\t\\t\\t\\ttext/turtle (or application/x-turtle)\\n\\t\\t\\t'\n        if (verb == 'GET'):\n            if (not response_format):\n                response_format = self.repo.default_serialization\n            if (headers and ('Accept' not in headers.keys())):\n                headers['Accept'] = response_format\n            else:\n                headers = {'Accept': response_format}\n    if (type(uri) == rdflib.term.URIRef):\n        uri = uri.toPython()\n    logger.debug(('%s request for %s, format %s, headers %s' % (verb, uri, response_format, headers)))\n    session = requests.Session()\n    request = requests.Request(verb, uri, auth=(self.repo.username, self.repo.password), data=data, headers=headers, files=files)\n    prepped_request = session.prepare_request(request)\n    response = session.send(prepped_request, stream=stream)\n    return response", "docstring": "Primary route for all HTTP requests to repository.  Ability to set most parameters for requests library,\nwith some additional convenience parameters as well.\n\nArgs:\nverb (str): HTTP verb to use for request, e.g. PUT, POST, GET, HEAD, PATCH, etc.\nuri (rdflib.term.URIRef,str): input URI\ndata (str,file): payload of data to send for request, may be overridden in preperation of request\nheaders (dict): optional dictionary of headers passed directly to requests.request\nfiles (dict): optional dictionary of files passed directly to requests.request\nresponse_format (str): desired response format for resource's payload, e.g. 'application/rdf+xml', 'text/turtle', etc.\nis_rdf (bool): if True, set Accept header based on combination of response_format and headers\nstream (bool): passed directly to requests.request for stream parameter\n\nReturns:\nrequests.models.Response", "source": "codesearchnet"}
{"code": "def basistransform(self, new_basis, old_basis=None, orthonormalize=True):\n    if (old_basis is None):\n        old_basis = np.identity(3)\n    is_rotation_matrix = np.isclose(np.linalg.det(new_basis), 1)\n    if ((not is_rotation_matrix) and orthonormalize):\n        new_basis = xyz_functions.orthonormalize_righthanded(new_basis)\n        is_rotation_matrix = True\n    if is_rotation_matrix:\n        return dot(np.dot(new_basis.T, old_basis), self)\n    else:\n        return dot(np.dot(np.linalg.inv(new_basis), old_basis), self)", "docstring": "Transform the frame to a new basis.\n\nThis function transforms the cartesian coordinates from an\nold basis to a new one. Please note that old_basis and\nnew_basis are supposed to have full Rank and consist of\nthree linear independent vectors. If rotate_only is True,\nit is asserted, that both bases are orthonormal and right\nhanded. Besides all involved matrices are transposed\ninstead of inverted.\nIn some applications this may require the function\n:func:`xyz_functions.orthonormalize` as a previous step.\n\nArgs:\nold_basis (np.array):\nnew_basis (np.array):\nrotate_only (bool):\n\nReturns:\nCartesian: The transformed molecule.", "source": "codesearchnet"}
{"code": "def do_REMOTE(self,\n                  target: str,\n                  remote_command: str,\n                  source: list,\n                  *args,\n                  **kwargs) -> None:\n        \n        if target == self.messaging._service_name:\n            info = 'target for remote command is the bot itself! Returning the function'\n            self.logger.info(info)\n            return self._handle_command(remote_command, source, *args, **kwargs)\n\n        try:\n            target = self.messaging._address_map[target]\n        except KeyError:\n            warn = ' Target %s, not found in addresses. Are you sure that %s sent an IDENT message?'\n            self.logger.warn(warn, target, target)\n            \n            \n            return\n\n        self.logger.info(' REMOTE %s, target: %s | %s, %s',\n                         remote_command, target, args, kwargs)\n\n        \n        source = target + source\n        self.messaging.send_command_response(source,\n                                             remote_command,\n                                             *args, \n                                             **kwargs)", "docstring": "Send a remote command to a service. Used\n\nArgs:\ntarget: The service that the command gets set to\nremote_command: The command to do remotely.\nsource: the binary source of the zmq_socket. Packed to send to the", "source": "juraj-google-style"}
{"code": "def __init__(self, message):\n        \n        super(InvalidField, self).__init__(\n            reason=enums.ResultReason.INVALID_FIELD,\n            message=message\n        )", "docstring": "Create an InvalidField exception.\n\nArgs:\nmessage (string): A string containing information about the error.", "source": "juraj-google-style"}
{"code": "def parse(self, message, schema):\n    func = {'audit-log': self._parse_audit_log_msg, 'event': self._parse_event_msg}[schema]\n    return func(message)", "docstring": "Parse message according to schema.\n\n`message` should already be validated against the given schema.\nSee :ref:`schemadef` for more information.\n\nArgs:\nmessage (dict): message data to parse.\nschema (str): valid message schema.\nReturns:\n(dict): parsed message", "source": "codesearchnet"}
{"code": "def reduce_logsumexp(x, reduced_dim, extra_logit=None, name=None):\n  \n  reduced_dim = convert_to_dimension(reduced_dim)\n  with tf.variable_scope(name, default_name=\"reduce_logsumexp\"):\n    reduced_shape = x.shape - reduced_dim\n    max_logit = reduce_max(stop_gradient(x), output_shape=reduced_shape)\n    if extra_logit is not None:\n      if isinstance(extra_logit, Tensor):\n        extra_logit = stop_gradient(extra_logit)\n      max_logit = maximum(max_logit, extra_logit)\n    x -= max_logit\n    exp_x = exp(x)\n    sum_exp_x = reduce_sum(exp_x, output_shape=reduced_shape)\n    if extra_logit is not None:\n      sum_exp_x += exp(extra_logit - max_logit)\n    return log(sum_exp_x) + max_logit", "docstring": "Numerically stable version of log(reduce_sum(exp(x))).\n\nUnlike other reductions, the output has the same shape as the input.\nNote: with a minor change, we could allow multiple reduced dimensions.\n\nArgs:\nx: a Tensor\nreduced_dim: a dimension in x\nextra_logit: an optional Tensor broadcastable to (x.shape - reduced_dim)\nname: an optional string\nReturns:\na Tensor with the same shape and dtype as x.", "source": "juraj-google-style"}
{"code": "def __init__(self, action_type=None, tp_port=None):\n        \n        super().__init__(action_type, length=8)\n        self.tp_port = tp_port", "docstring": "Create an ActionTPPort with the optional parameters below.\n\nArgs:\naction_type (:class:`~pyof.v0x01.common.action.ActionType`):\n:attr:`~ActionType.OFPAT_SET_TP_SRC` or\n:attr:`~ActionType.OFPAT_SET_TP_DST`.\ntp_port (int): TCP/UDP/other port to set.", "source": "juraj-google-style"}
{"code": "def true_num_reactions(model, custom_spont_id=None):\n    \n    true_num = 0\n    for rxn in model.reactions:\n        if len(rxn.genes) == 0:\n            continue\n        if len(rxn.genes) == 1 and is_spontaneous(list(rxn.genes)[0], custom_id=custom_spont_id):\n            continue\n        else:\n            true_num += 1\n    return true_num", "docstring": "Return the number of reactions associated with a gene.\n\nArgs:\nmodel (Model):\ncustom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``\n\nReturns:\nint: Number of reactions associated with a gene", "source": "juraj-google-style"}
{"code": "def run_inside_wrap_function_in_eager_mode(graph_function):\n\n    def wrap_and_execute(self):\n        if context.executing_eagerly():\n            wrapped = wrap_function.wrap_function(graph_function, [self])\n            wrapped()\n        else:\n            graph_function(self)\n    return wrap_and_execute", "docstring": "Decorator to execute the same graph code in eager and graph modes.\n\nIn graph mode, we just execute the graph_function passed as argument. In eager\nmode, we wrap the function using wrap_function and then execute the wrapped\nresult.\n\nArgs:\ngraph_function: python function containing graph code to be wrapped\n\nReturns:\ndecorated function", "source": "github-repos"}
{"code": "def unprotect(self, **kwargs):\n    id = self.get_id().replace('/', '%2F')\n    path = ('%s/%s/unprotect' % (self.manager.path, id))\n    self.manager.gitlab.http_put(path, **kwargs)\n    self._attrs['protected'] = False", "docstring": "Unprotect the branch.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabProtectError: If the branch could not be unprotected", "source": "codesearchnet"}
{"code": "def fitness(self, width, height):\n    assert ((width > 0) and (height > 0))\n    (rect, max_rect) = self._select_position(width, height)\n    if (rect is None):\n        return None\n    return self._rect_fitness(max_rect, rect.width, rect.height)", "docstring": "Metric used to rate how much space is wasted if a rectangle is placed.\nReturns a value greater or equal to zero, the smaller the value the more\n'fit' is the rectangle. If the rectangle can't be placed, returns None.\n\nArguments:\nwidth (int, float): Rectangle width\nheight (int, float): Rectangle height\n\nReturns:\nint, float: Rectangle fitness\nNone: Rectangle can't be placed", "source": "codesearchnet"}
{"code": "def _isbn_cleanse(isbn, checksum=True):\n    \n    if not isinstance(isbn, string_types):\n        raise TypeError('ISBN must be a string, received %r' % isbn)\n\n    if PY2 and isinstance(isbn, str):  \n        isbn = unicode(isbn)\n        uni_input = False\n    else:  \n        uni_input = True\n\n    for dash in DASHES:\n        isbn = isbn.replace(dash, unicode())\n\n    if checksum:\n        if not isbn[:-1].isdigit():\n            raise IsbnError('non-digit parts')\n        if len(isbn) == 9:\n            isbn = '0' + isbn\n        if len(isbn) == 10:\n            if not (isbn[-1].isdigit() or isbn[-1] in 'Xx'):\n                raise IsbnError('non-digit or X checksum')\n        elif len(isbn) == 13:\n            if not isbn[-1].isdigit():\n                raise IsbnError('non-digit checksum')\n            if not isbn.startswith(('978', '979')):\n                raise IsbnError('invalid Bookland region')\n        else:\n            raise IsbnError('ISBN must be either 10 or 13 characters long')\n    else:\n        if len(isbn) == 8:\n            isbn = '0' + isbn\n        elif len(isbn) == 12 and not isbn[:3].startswith(('978', '979')):\n            raise IsbnError('invalid Bookland region')\n        if not isbn.isdigit():\n            raise IsbnError('non-digit parts')\n        if not len(isbn) in (9, 12):\n            raise IsbnError('ISBN must be either 9 or 12 characters long '\n                            'without checksum')\n    if PY2 and not uni_input:  \n        \n        \n        return str(isbn)\n    else:  \n        return isbn", "docstring": "Check ISBN is a string, and passes basic sanity checks.\n\nArgs:\nisbn (str): SBN, ISBN-10 or ISBN-13\nchecksum (bool): ``True`` if ``isbn`` includes checksum character\n\nReturns:\n``str``: ISBN with hyphenation removed, including when called with a\nSBN\n\nRaises:\nTypeError: ``isbn`` is not a ``str`` type\nIsbnError: Incorrect length for ``isbn``\nIsbnError: Incorrect SBN or ISBN formatting", "source": "juraj-google-style"}
{"code": "def _use_temp_cache(self):\n    if self._use_tensor_buffer():\n        return False\n    if self._use_tensor_values_cache():\n        return self._parameters.use_temp_cache_var\n    else:\n        return False", "docstring": "Returns true if the intermediate values should be stacked instead of being stored in a tf.Variable.\n\nReturns:\nA boolean, denoting whether to use a temporary cache or not.", "source": "github-repos"}
{"code": "def _derive_namespaces(self):\n    for graph in [self.diffs.overlap, self.diffs.removed, self.diffs.added]:\n        for (s, p, o) in graph:\n            try:\n                (ns_prefix, ns_uri, predicate) = graph.compute_qname(p)\n                self.update_namespaces.add(ns_uri)\n            except:\n                logger.debug(('could not parse Object URI: %s' % ns_uri))\n            try:\n                (ns_prefix, ns_uri, predicate) = graph.compute_qname(o)\n                self.update_namespaces.add(ns_uri)\n            except:\n                logger.debug(('could not parse Object URI: %s' % ns_uri))\n    logger.debug(self.update_namespaces)\n    for ns_uri in self.update_namespaces:\n        for k in self.prefixes.__dict__:\n            if (str(ns_uri) == str(self.prefixes.__dict__[k])):\n                logger.debug(('adding prefix %s for uri %s to unique_prefixes' % (k, str(ns_uri))))\n                self.update_prefixes[k] = self.prefixes.__dict__[k]", "docstring": "Small method to loop through three graphs in self.diffs, identify unique namespace URIs.\nThen, loop through provided dictionary of prefixes and pin one to another.\n\nArgs:\nNone: uses self.prefixes and self.diffs\n\nReturns:\nNone: sets self.update_namespaces and self.update_prefixes", "source": "codesearchnet"}
{"code": "def get_voronoi_polyhedra(self, structure, n):\n    if (self.targets is None):\n        targets = structure.composition.elements\n    else:\n        targets = self.targets\n    center = structure[n]\n    cutoff = self.cutoff\n    corners = [[1, 1, 1], [(- 1), 1, 1], [1, (- 1), 1], [1, 1, (- 1)]]\n    d_corners = [np.linalg.norm(structure.lattice.get_cartesian_coords(c)) for c in corners]\n    max_cutoff = (max(d_corners) + 0.01)\n    while True:\n        try:\n            neighbors = structure.get_sites_in_sphere(center.coords, cutoff)\n            neighbors = [i[0] for i in sorted(neighbors, key=(lambda s: s[1]))]\n            qvoronoi_input = [s.coords for s in neighbors]\n            voro = Voronoi(qvoronoi_input)\n            cell_info = self._extract_cell_info(structure, 0, neighbors, targets, voro, self.compute_adj_neighbors)\n            break\n        except RuntimeError as e:\n            if (cutoff >= max_cutoff):\n                if (e.args and ('vertex' in e.args[0])):\n                    raise e\n                else:\n                    raise RuntimeError('Error in Voronoi neighbor finding; max cutoff exceeded')\n            cutoff = min((cutoff * 2), (max_cutoff + 0.001))\n    return cell_info", "docstring": "Gives a weighted polyhedra around a site.\n\nSee ref: A Proposed Rigorous Definition of Coordination Number,\nM. O'Keeffe, Acta Cryst. (1979). A35, 772-775\n\nArgs:\nstructure (Structure): structure for which to evaluate the\ncoordination environment.\nn (integer): site index.\n\nReturns:\nA dict of sites sharing a common Voronoi facet with the site\nn mapped to a directory containing statistics about the facet:\n- solid_angle - Solid angle subtended by face\n- angle_normalized - Solid angle normalized such that the\nfaces with the largest\n- area - Area of the facet\n- face_dist - Distance between site n and the facet\n- volume - Volume of Voronoi cell for this face\n- n_verts - Number of vertices on the facet", "source": "codesearchnet"}
{"code": "def structure_lines(self, structure, cell_flg=True, frac_flg=True, anion_shell_flg=True, cation_shell_flg=False, symm_flg=True):\n    gin = ''\n    if cell_flg:\n        gin += 'cell\\n'\n        l = structure.lattice\n        lat_str = [str(i) for i in [l.a, l.b, l.c, l.alpha, l.beta, l.gamma]]\n        gin += (' '.join(lat_str) + '\\n')\n    if frac_flg:\n        gin += 'frac\\n'\n        coord_attr = 'frac_coords'\n    else:\n        gin += 'cart\\n'\n        coord_attr = 'coords'\n    for site in structure.sites:\n        coord = [str(i) for i in getattr(site, coord_attr)]\n        specie = site.specie\n        core_site_desc = (((specie.symbol + ' core ') + ' '.join(coord)) + '\\n')\n        gin += core_site_desc\n        if (((specie in _anions) and anion_shell_flg) or ((specie in _cations) and cation_shell_flg)):\n            shel_site_desc = (((specie.symbol + ' shel ') + ' '.join(coord)) + '\\n')\n            gin += shel_site_desc\n        else:\n            pass\n    if symm_flg:\n        gin += 'space\\n'\n        gin += (str(SpacegroupAnalyzer(structure).get_space_group_number()) + '\\n')\n    return gin", "docstring": "Generates GULP input string corresponding to pymatgen structure.\n\nArgs:\nstructure: pymatgen Structure object\ncell_flg (default = True): Option to use lattice parameters.\nfractional_flg (default = True): If True, fractional coordinates\nare used. Else, cartesian coodinates in Angstroms are used.\n******\nGULP convention is to use fractional coordinates for periodic\nstructures and cartesian coordinates for non-periodic\nstructures.\n******\nanion_shell_flg (default = True): If True, anions are considered\npolarizable.\ncation_shell_flg (default = False): If True, cations are\nconsidered polarizable.\nsymm_flg (default = True): If True, symmetry information is also\nwritten.\n\nReturns:\nstring containing structure for GULP input", "source": "codesearchnet"}
{"code": "def _InvokeImportCallbackBySuffix(names):\n\n    def GetModuleFromName(name, path):\n        \"Returns the loaded module for this name/path, or None if not found.\\n\\n    Args:\\n      name: A string that may represent the name of a loaded Python module.\\n      path: If 'name' ends with '.*', then the last path component in 'path' is\\n            used to identify what the wildcard may map to. Does not contain file\\n            extension.\\n\\n    Returns:\\n      The loaded module for the given name and path, or None if a loaded module\\n      was not found.\\n    \"\n        if name.endswith('.*'):\n            name = ((name.rpartition('.')[0] + '.') + path.split('/')[(- 1)])\n        return sys.modules.get(name)\n    for (path, callbacks) in list(_import_callbacks.items()):\n        root = os.path.splitext(path)[0]\n        nonempty_names = (n for n in names if n)\n        modules = (GetModuleFromName(name, root) for name in nonempty_names)\n        nonempty_modules = (m for m in modules if m)\n        for module in nonempty_modules:\n            mod_file = getattr(module, '__file__', None)\n            if (not mod_file):\n                continue\n            mod_root = os.path.splitext(mod_file)[0]\n            if (not os.path.isabs(mod_root)):\n                mod_root = os.path.join(os.curdir, mod_root)\n            if module_utils2.IsPathSuffix(mod_root, root):\n                for callback in callbacks.copy():\n                    callback(module)\n                break", "docstring": "Invokes import callbacks for newly loaded modules.\n\nUses a path suffix match to identify whether a loaded module matches the\nfile path provided by the user.\n\nArgs:\nnames: A set of names for modules that are loaded by the current import.\nThe set may contain some superfluous entries that were already\nloaded before this import, or some entries that do not correspond\nto a module. The list is expected to be much smaller than the exact\nsys.modules so that a linear search is not as costly.", "source": "codesearchnet"}
{"code": "def objects_to_serialize(self, serialization_cache):\n    raise NotImplementedError", "docstring": "Returns dictionary of extra checkpointable objects to serialize.\n\nSee `functions_to_serialize` for an explanation of this function's\neffects.\n\nArgs:\nserialization_cache: Dictionary passed to all objects in the same object\ngraph during serialization.\n\nReturns:\nA dictionary mapping attribute names to checkpointable objects.", "source": "github-repos"}
{"code": "def _update_unenrolled_list(sailthru_client, email, course_url, unenroll):\n    try:\n        sailthru_response = sailthru_client.api_get('user', {'id': email, 'fields': {'vars': 1}})\n        if (not sailthru_response.is_ok()):\n            error = sailthru_response.get_error()\n            logger.error('Error attempting to read user record from Sailthru: %s', error.get_message())\n            return (not can_retry_sailthru_request(error))\n        response_json = sailthru_response.json\n        unenroll_list = []\n        if (response_json and ('vars' in response_json) and response_json['vars'] and ('unenrolled' in response_json['vars'])):\n            unenroll_list = response_json['vars']['unenrolled']\n        changed = False\n        if unenroll:\n            if (course_url not in unenroll_list):\n                unenroll_list.append(course_url)\n                changed = True\n        elif (course_url in unenroll_list):\n            unenroll_list.remove(course_url)\n            changed = True\n        if changed:\n            sailthru_response = sailthru_client.api_post('user', {'id': email, 'key': 'email', 'vars': {'unenrolled': unenroll_list}})\n            if (not sailthru_response.is_ok()):\n                error = sailthru_response.get_error()\n                logger.error('Error attempting to update user record in Sailthru: %s', error.get_message())\n                return (not can_retry_sailthru_request(error))\n        return True\n    except SailthruClientError as exc:\n        logger.exception('Exception attempting to update user record for %s in Sailthru - %s', email, text_type(exc))\n        return False", "docstring": "Maintain a list of courses the user has unenrolled from in the Sailthru user record\n\nArguments:\nsailthru_client (object): SailthruClient\nemail (str): user's email address\ncourse_url (str): LMS url for course info page.\nunenroll (boolean): True if unenrolling, False if enrolling\n\nReturns:\nFalse if retryable error, else True", "source": "codesearchnet"}
{"code": "def random_state(dim, seed=None):\n    \n    if seed is None:\n        seed = np.random.randint(0, np.iinfo(np.int32).max)\n    rng = np.random.RandomState(seed)\n    \n    x = rng.rand(dim)\n    x += x == 0\n    x = -np.log(x)\n    sumx = sum(x)\n    phases = rng.rand(dim)*2.0*np.pi\n    return np.sqrt(x/sumx)*np.exp(1j*phases)", "docstring": "Return a random quantum state from the uniform (Haar) measure on\nstate space.\n\nArgs:\ndim (int): the dim of the state spaxe\nseed (int): Optional. To set a random seed.\n\nReturns:\nndarray:  state(2**num) a random quantum state.", "source": "juraj-google-style"}
{"code": "def find_node(self, x: int, y: int) -> Optional['BSP']:\n    if (not self.contains(x, y)):\n        return None\n    for child in self.children:\n        found = child.find_node(x, y)\n        if found:\n            return found\n    return self", "docstring": "Return the deepest node which contains these coordinates.\n\nReturns:\nOptional[BSP]: BSP object or None.", "source": "codesearchnet"}
{"code": "def __init__(self, skip_header_lines=None, name=None):\n    rr = gen_io_ops.text_line_reader_v2(skip_header_lines=skip_header_lines, name=name)\n    super(TextLineReader, self).__init__(rr)", "docstring": "Create a TextLineReader.\n\nArgs:\nskip_header_lines: An optional int. Defaults to 0.  Number of lines\nto skip from the beginning of every file.\nname: A name for the operation (optional).", "source": "github-repos"}
{"code": "def _GetDictFromStringsTable(self, parser_mediator, table):\n    \n    if not table:\n      return {}\n\n    record_values = {}\n    for record in table.records:\n      if parser_mediator.abort:\n        break\n\n      if record.get_number_of_values() != 2:\n        continue\n\n      identification = self._GetRecordValue(record, 0)\n      filename = self._GetRecordValue(record, 1)\n\n      if not identification:\n        continue\n      record_values[identification] = filename\n\n    return record_values", "docstring": "Build a dictionary of the value in the strings table.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ntable (pyesedb.table): strings table.\n\nReturns:\ndict[str,object]: values per column name.", "source": "juraj-google-style"}
{"code": "def parse_value(self):\n    parsers = [self._maybe_parse_container, self._maybe_parse_basic_type, self._maybe_parse_configurable_reference, self._maybe_parse_macro]\n    for parser in parsers:\n        (success, value) = parser()\n        if success:\n            return value\n    self._raise_syntax_error('Unable to parse value.')", "docstring": "Parse a single literal value.\n\nReturns:\nThe parsed value.", "source": "codesearchnet"}
{"code": "def resolve_input(self, input_name):\n    name_elts = input_name.split(':')\n    source_name = name_elts[0]\n    if source_name[0] == '^':\n        source_name = source_name[1:]\n    source_index = 0\n    if len(name_elts) > 1 and name_elts[-1].isnumeric():\n        source_index = int(name_elts[-1])\n    if self._function is None:\n        return _EndPoint(self._enclosing_graph.nodes[source_name], source_index)\n    if source_index != 0 or source_name in self._function.nodes:\n        return _EndPoint(self._function.nodes[source_name], source_index)\n    inputs = [i.name for i in self._function.function.signature.input_arg]\n    return _EndPoint(self._function, inputs.index(source_name))", "docstring": "Resolves an input into its _EndPoint.\n\nA NodeDef's input name can refer to either global NodeDefs (in the\nGraphDef's node list), a NodeDef in a function's node list, or a Function\n(in the GraphDef's function library). The name can also carry semantic\ninformation, depending on whether it starts with \"^\". This method handles\nall that logic in order to find the object to which the input name refers\nto.\n\nArgs:\ninput_name: The input name to resolve.\n\nReturns:\nThe object referred to by 'input_name'.", "source": "github-repos"}
{"code": "def find_mip(self, direction, mechanism, purview):\n    if (not purview):\n        return _null_ria(direction, mechanism, purview)\n    repertoire = self.repertoire(direction, mechanism, purview)\n\n    def _mip(phi, partition, partitioned_repertoire):\n        return RepertoireIrreducibilityAnalysis(phi=phi, direction=direction, mechanism=mechanism, purview=purview, partition=partition, repertoire=repertoire, partitioned_repertoire=partitioned_repertoire, node_labels=self.node_labels)\n    if ((direction == Direction.CAUSE) and np.all((repertoire == 0))):\n        return _mip(0, None, None)\n    mip = _null_ria(direction, mechanism, purview, phi=float('inf'))\n    for partition in mip_partitions(mechanism, purview, self.node_labels):\n        (phi, partitioned_repertoire) = self.evaluate_partition(direction, mechanism, purview, partition, repertoire=repertoire)\n        if (phi == 0):\n            return _mip(0.0, partition, partitioned_repertoire)\n        if (phi < mip.phi):\n            mip = _mip(phi, partition, partitioned_repertoire)\n    return mip", "docstring": "Return the minimum information partition for a mechanism over a\npurview.\n\nArgs:\ndirection (Direction): |CAUSE| or |EFFECT|.\nmechanism (tuple[int]): The nodes in the mechanism.\npurview (tuple[int]): The nodes in the purview.\n\nReturns:\nRepertoireIrreducibilityAnalysis: The irreducibility analysis for\nthe mininum-information partition in one temporal direction.", "source": "codesearchnet"}
{"code": "def update_compounds(self, variants):\n        \n        LOG.debug(\"Updating compound objects\")\n\n        for var_id in variants:\n            variant_obj = variants[var_id]\n            if not variant_obj.get('compounds'):\n                continue\n\n            updated_compounds = self.update_variant_compounds(variant_obj, variants)\n            variant_obj['compounds'] = updated_compounds\n\n        LOG.debug(\"Compounds updated\")\n\n        return variants", "docstring": "Update the compounds for a set of variants.\n\nArgs:\nvariants(dict): A dictionary with _ids as keys and variant objs as values", "source": "juraj-google-style"}
{"code": "def run(self, sensor_graph, model):\n        \n\n        \n        \n        \n        \n        \n        \n\n        for node, inputs, outputs in sensor_graph.iterate_bfs():\n            can_remove = False\n\n            \n            if len(outputs) != 0:\n                continue\n\n            \n            if sensor_graph.is_output(node.stream):\n                continue\n\n            \n            if node.stream.stream_id < StreamAllocator.StartingID:\n                continue\n\n            \n            if node.func_name == u'call_rpc':\n                continue\n\n            \n            if node.stream.buffered:\n                \n                \n                continue\n\n            \n            if node.func_name == u'trigger_streamer':\n                continue\n\n            \n            \n            \n            \n            for input_node in inputs:\n                input_node.outputs.remove(node)\n\n            if node in sensor_graph.roots:\n                sensor_graph.roots.remove(node)\n\n            sensor_graph.nodes.remove(node)\n\n            \n\n            return True\n\n        return False", "docstring": "Run this optimization pass on the sensor graph\n\nIf necessary, information on the device model being targeted\ncan be found in the associated model argument.\n\nArgs:\nsensor_graph (SensorGraph): The sensor graph to optimize\nmodel (DeviceModel): The device model we're using", "source": "juraj-google-style"}
{"code": "def get_current_epoch_time():\n    return int(round(time.time() * 1000))", "docstring": "Current epoch time in milliseconds.\n\nReturns:\nAn integer representing the current epoch time in milliseconds.", "source": "github-repos"}
{"code": "def _on_cancelok(self, cancel_frame):\n    _log.info('Consumer canceled; returning all unprocessed messages to the queue')\n    self._channel.basic_nack(delivery_tag=0, multiple=True, requeue=True)", "docstring": "Called when the server acknowledges a cancel request.\n\nArgs:\ncancel_frame (pika.spec.Basic.CancelOk): The cancelok frame from\nthe server.", "source": "codesearchnet"}
{"code": "def _PrintAPFSVolumeIdentifiersOverview(\n      self, volume_system, volume_identifiers):\n    \n    header = 'The following Apple File System (APFS) volumes were found:\\n'\n    self._output_writer.Write(header)\n\n    column_names = ['Identifier', 'Name']\n    table_view = views.CLITabularTableView(column_names=column_names)\n\n    for volume_identifier in volume_identifiers:\n      volume = volume_system.GetVolumeByIdentifier(volume_identifier)\n      if not volume:\n        raise errors.SourceScannerError(\n            'Volume missing for identifier: {0:s}.'.format(\n                volume_identifier))\n\n      volume_attribute = volume.GetAttribute('name')\n      table_view.AddRow([volume.identifier, volume_attribute.value])\n\n    self._output_writer.Write('\\n')\n    table_view.Write(self._output_writer)\n    self._output_writer.Write('\\n')", "docstring": "Prints an overview of APFS volume identifiers.\n\nArgs:\nvolume_system (dfvfs.APFSVolumeSystem): volume system.\nvolume_identifiers (list[str]): allowed volume identifiers.\n\nRaises:\nSourceScannerError: if a volume cannot be resolved from the volume\nidentifier.", "source": "juraj-google-style"}
{"code": "def convert_slice(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting slice ...')\n    if (len(params['axes']) > 1):\n        raise AssertionError('Cannot convert slice by multiple dimensions')\n    if (params['axes'][0] not in [0, 1, 2, 3]):\n        raise AssertionError('Slice by dimension more than 3 or less than 0 is not supported')\n\n    def target_layer(x, axis=int(params['axes'][0]), start=int(params['starts'][0]), end=int(params['ends'][0])):\n        if (axis == 0):\n            return x[start:end]\n        elif (axis == 1):\n            return x[(:, start:end)]\n        elif (axis == 2):\n            return x[(:, :, start:end)]\n        elif (axis == 3):\n            return x[(:, :, :, start:end)]\n    lambda_layer = keras.layers.Lambda(target_layer)\n    layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert slice operation.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def forward(self, pixel_values: torch.FloatTensor, spatial_shapes: torch.LongTensor) -> torch.Tensor:\n    target_dtype = self.patch_embedding.weight.dtype\n    patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))\n    positional_embeddings = self.position_embedding.weight.reshape(self.position_embedding_size, self.position_embedding_size, -1)\n    resized_positional_embeddings = self.resize_positional_embeddings(positional_embeddings, spatial_shapes, max_length=pixel_values.shape[1])\n    embeddings = patch_embeds + resized_positional_embeddings\n    return embeddings", "docstring": "Args:\npixel_values (`torch.FloatTensor`):\nPixel values of shape (batch_size, max_num_patches, num_channels * patch_size * patch_size)\nspatial_shapes (`List[Tuple[int, int]]`):\nSpatial shapes of shape (batch_size, 2) to resize the positional embeddings to", "source": "github-repos"}
{"code": "def real(x):\n    if any_symbolic_tensors((x,)):\n        return Real().symbolic_call(x)\n    return backend.numpy.real(x)", "docstring": "Return the real part of the complex argument.\n\nArgs:\nx: Input tensor.\n\nReturns:\nThe real component of the complex argument.", "source": "github-repos"}
{"code": "def Scripts(unicode_dir=_UNICODE_DIR):\n    scripts = {}\n\n    def DoLine(codes, fields):\n        'Process single Scripts.txt line, updating scripts.'\n        (_, name) = fields\n        scripts.setdefault(name, []).extend(codes)\n    ReadUnicodeTable((unicode_dir + '/Scripts.txt'), 2, DoLine)\n    return scripts", "docstring": "Returns dict mapping script names to code lists.\n\nArgs:\nunicode_dir: Unicode data directory\n\nReturns:\ndict mapping script names to code lists", "source": "codesearchnet"}
{"code": "def with_scopes_if_required(credentials, scopes):\n    if (isinstance(credentials, Scoped) and credentials.requires_scopes):\n        return credentials.with_scopes(scopes)\n    else:\n        return credentials", "docstring": "Creates a copy of the credentials with scopes if scoping is required.\n\nThis helper function is useful when you do not know (or care to know) the\nspecific type of credentials you are using (such as when you use\n:func:`google.auth.default`). This function will call\n:meth:`Scoped.with_scopes` if the credentials are scoped credentials and if\nthe credentials require scoping. Otherwise, it will return the credentials\nas-is.\n\nArgs:\ncredentials (google.auth.credentials.Credentials): The credentials to\nscope if necessary.\nscopes (Sequence[str]): The list of scopes to use.\n\nReturns:\ngoogle.auth.credentials.Credentials: Either a new set of scoped\ncredentials, or the passed in credentials instance if no scoping\nwas required.", "source": "codesearchnet"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    for subkey in registry_key.GetSubkeys():\n      drive_letter = subkey.name\n      if not drive_letter:\n        continue\n\n      values_dict = {\n          'DriveLetter': drive_letter,\n          'Type': 'Mapped Drive'}\n\n      \n      remote_path_value = subkey.GetValueByName('RemotePath')\n      if remote_path_value:\n        remote_path = remote_path_value.GetDataAsObject()\n\n        if remote_path.startswith('\\\\\\\\'):\n          server_name, _, share_name = remote_path[2:].partition('\\\\')\n          values_dict['RemoteServer'] = server_name\n          values_dict['ShareName'] = '\\\\{0:s}'.format(\n              share_name.replace('\n\n      event_data = windows_events.WindowsRegistryEventData()\n      event_data.key_path = registry_key.path\n      event_data.offset = subkey.offset\n      event_data.regvalue = values_dict\n      event_data.source_append = self._SOURCE_APPEND\n      event_data.urls = self.URLS\n\n      event = time_events.DateTimeValuesEvent(\n          subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def delay(self, secs):\n        \n        secs = int(secs)\n        for i in reversed(range(secs)):\n            sys.stdout.write('\\r')\n            sys.stdout.write(\"sleep %ds, left %2ds\" % (secs, i+1))\n            sys.stdout.flush()\n            time.sleep(1)\n        sys.stdout.write(\"\\n\")\n        return self", "docstring": "Delay some seconds\nArgs:\nsecs: float seconds\n\nReturns:\nself", "source": "juraj-google-style"}
{"code": "def __init__(self, terms: Mapping[raw_types.Gate, value.Scalar]) -> None:\n        \n        super().__init__(terms, validator=self._is_compatible)", "docstring": "Initializes linear combination from a collection of terms.\n\nArgs:\nterms: Mapping of gates to coefficients in the linear combination\nbeing initialized.", "source": "juraj-google-style"}
{"code": "def set_datetime_format(self, format):\n        \n        if not format in [\"UNIX\", \"RFC3339\"]:\n            return\n\n        self.datetime_format = format\n\n        self.set_header(\"Accept-Datetime-Format\", self.datetime_format)", "docstring": "Set the Accept-Datetime-Format header to an acceptable\nvalue\n\nArgs:\nformat: UNIX or RFC3339", "source": "juraj-google-style"}
{"code": "def read_gbq(table, dataset=None, project_id=None, use_bqstorage_api=False, **kwargs):\n    if table is None:\n        raise ValueError('Please specify a BigQuery table to read from.')\n    elif len(kwargs) > 0:\n        raise ValueError(f'Encountered unsupported parameter(s) in read_gbq: {kwargs.keys()!r}')\n    return _ReadGbq(table, dataset, project_id, use_bqstorage_api)", "docstring": "This function reads data from a BigQuery table and produces a\n:class:`~apache_beam.dataframe.frames.DeferredDataFrame.\n\nArgs:\ntable (str): Please specify a table. This can be done in the format\n'PROJECT:dataset.table' if one would not wish to utilize\nthe parameters below.\ndataset (str): Please specify the dataset\n(can omit if table was specified as 'PROJECT:dataset.table').\nproject_id (str): Please specify the project ID\n(can omit if table was specified as 'PROJECT:dataset.table').\nuse_bqstorage_api (bool): If you would like to utilize\nthe BigQuery Storage API in ReadFromBigQuery, please set\nthis flag to true. Otherwise, please set flag\nto false or leave it unspecified.", "source": "github-repos"}
{"code": "def instantiate_references_json(references_json):\n    references = {}\n    for obj in references_json:\n        obj_id = obj['id']\n        obj_type = obj.get('subtype', obj['type'])\n        cls = get_class(obj_type)\n        instance = cls.__new__(cls, id=obj_id)\n        if (instance is None):\n            raise RuntimeError(('Error loading model from JSON (type: %s, id: %s)' % (obj_type, obj_id)))\n        references[instance.id] = instance\n    return references", "docstring": "Given a JSON representation of all the models in a graph, return a\ndict of new model objects.\n\nArgs:\nreferences_json (``JSON``)\nJSON specifying new Bokeh models to create\n\nReturns:\ndict[str, Model]", "source": "codesearchnet"}
{"code": "def set_flowcontrol_receive(self, name, value=None, default=False, disable=False):\n    return self.set_flowcontrol(name, 'receive', value, default, disable)", "docstring": "Configures the interface flowcontrol receive value\n\nArgs:\nname (string): The interface identifier.  It must be a full\ninterface name (ie Ethernet, not Et)\n\nvalue (boolean): True if the interface should enable receiving\nflow control packets, otherwise False\n\ndefault (boolean): Specifies to default the interface flow\ncontrol receive value\n\ndisable (boolean): Specifies to disable the interface flow\ncontrol receive value\n\nReturns:\nTrue if the operation succeeds otherwise False is returned", "source": "codesearchnet"}
{"code": "def GetAddress(self):\n    script = ((b'21' + self.PublicKey.encode_point(True)) + b'ac')\n    script_hash = Crypto.ToScriptHash(script)\n    address = Crypto.ToAddress(script_hash)\n    return address", "docstring": "Returns the public NEO address for this KeyPair\n\nReturns:\nstr: The private key", "source": "codesearchnet"}
{"code": "def adjust(self, amount, update=True, flow=True, fee=0.0):\n    self._capital += amount\n    self._last_fee += fee\n    if flow:\n        self._net_flows += amount\n    if update:\n        self.root.stale = True", "docstring": "Adjust capital - used to inject capital to a Strategy. This injection\nof capital will have no effect on the children.\n\nArgs:\n* amount (float): Amount to adjust by.\n* update (bool): Force update?\n* flow (bool): Is this adjustment a flow? A flow will not have an\nimpact on the performance (price index). Example of flows are\nsimply capital injections (say a monthly contribution to a\nportfolio). This should not be reflected in the returns. A\nnon-flow (flow=False) does impact performance. A good example\nof this is a commission, or a dividend.", "source": "codesearchnet"}
{"code": "def reminders_info(self, *, reminder: str, **kwargs) -> SlackResponse:\n        \n        self._validate_xoxp_token()\n        kwargs.update({\"reminder\": reminder})\n        return self.api_call(\"reminders.info\", http_verb=\"GET\", params=kwargs)", "docstring": "Gets information about a reminder.\n\nArgs:\nreminder (str): The ID of the reminder. e.g. 'Rm12345678'", "source": "juraj-google-style"}
{"code": "def get_area_url(location, distance):\n    locations = [location.destination(i, distance) for i in range(0, 360, 90)]\n    latitudes = list(map(attrgetter('latitude'), locations))\n    longitudes = list(map(attrgetter('longitude'), locations))\n    bounds = (min(longitudes), min(latitudes), max(longitudes), max(latitudes))\n    return ('http:", "docstring": "Generate URL for downloading OSM data within a region.\n\nThis function defines a boundary box where the edges touch a circle of\n``distance`` kilometres in radius.  It is important to note that the box is\nneither a square, nor bounded within the circle.\n\nThe bounding box is strictly a trapezoid whose north and south edges are\ndifferent lengths, which is longer is dependant on whether the box is\ncalculated for a location in the Northern or Southern hemisphere.  You will\nget a shorter north edge in the Northern hemisphere, and vice versa.  This\nis simply because we are applying a flat transformation to a spherical\nobject, however for all general cases the difference will be negligible.\n\nArgs:\nlocation (Point): Centre of the region\ndistance (int): Boundary distance in kilometres\n\nReturns:\nstr: URL that can be used to fetch the OSM data within ``distance`` of\n``location``", "source": "codesearchnet"}
{"code": "def pred_to_prob(Y_h, k):\n    Y_h = Y_h.clone()\n    if (Y_h.dim() > 1):\n        Y_h = Y_h.squeeze()\n    assert (Y_h.dim() == 1)\n    assert (Y_h >= 1).all()\n    assert (Y_h <= k).all()\n    n = Y_h.shape[0]\n    Y_s = torch.zeros((n, k), dtype=Y_h.dtype, device=Y_h.device)\n    for (i, j) in enumerate(Y_h):\n        Y_s[(i, (j - 1))] = 1.0\n    return Y_s", "docstring": "Converts a 1D tensor of predicted labels into a 2D tensor of probabilistic labels\n\nArgs:\nY_h: an [n], or [n,1] tensor of predicted (int) labels in {1,...,k}\nk: the largest possible label in Y_h\nReturns:\nY_s: a torch.FloatTensor of shape [n, k] where Y_s[i, j-1] is the probabilistic\nlabel for item i and label j", "source": "codesearchnet"}
{"code": "def _infer_fused_data_format(self, input_batch):\n    input_shape = input_batch.get_shape().as_list()\n    input_shape_len = len(input_shape)\n    if (input_shape_len != 4):\n        raise NotImplementedError('fused batch norm supports only input with 4 dimensions, it received input of dimensionality {:d}'.format(input_shape_len))\n    axis = (range(input_shape_len)[:(- 1)] if (self._axis is None) else self._axis)\n    axis = tuple(axis)\n    if (axis == (0, 1, 2)):\n        return 'NHWC'\n    elif (axis == (0, 2, 3)):\n        return 'NCHW'\n    else:\n        raise ValueError('Invalid axis option {}. This does not correspond to either the NHWC format (0, 1, 2) or the NCHW (0, 2, 3).'.format(axis))", "docstring": "Infers the data format for the fused batch norm.\n\nIt uses the axis option to infer this information. Specifically, the\naxis value (0, 1, 2) corresponds to data format NHWC and the\naxis value (0, 2, 3) to data format NCHW.\n\nArgs:\ninput_batch: A Tensor of arbitrary dimension.\n\nReturns:\nA string description of the data format NHWC or NCHW.\n\nRaises:\nNotImplementedError: for input of dimensionality different from 4.\nValueError: for axis configuration different from (0, 1, 2) and (0, 2, 3).", "source": "codesearchnet"}
{"code": "def Convert(self, input_file, output_file):\n    for version, schema, raw_binary, _ in self._schemas:\n        try:\n            data_candidate = self._Read(input_file, schema, raw_binary)\n        except RuntimeError:\n            continue\n        if 'version' not in data_candidate:\n            data_candidate['version'] = 1\n        elif data_candidate['version'] == 0:\n            data_candidate['version'] = 1\n        if data_candidate['version'] == version:\n            self._PerformUpgrade(data_candidate)\n            self._Write(data_candidate, output_file)\n            return\n    raise RuntimeError('No schema that the converter understands worked with the data file you provided.')", "docstring": "Perform schema conversion from input_file to output_file.\n\nArgs:\ninput_file: Filename of TensorFlow Lite data to convert from. Must\nbe `.json` or `.bin` extension files for JSON or Binary forms of\nthe TensorFlow FlatBuffer schema.\noutput_file: Filename to write to. Extension also must be `.json`\nor `.bin`.\n\nRaises:\nRuntimeError: Generated when none of the upgrader supported schemas\nmatche the `input_file` data.", "source": "github-repos"}
{"code": "def generate_data(self, data_dir, tmp_dir, task_id=-1):\n    \n    tf.logging.info(\"generate_data task_id=%s\" % task_id)\n    encoder = self.get_or_create_vocab(data_dir, tmp_dir)\n    assert task_id >= 0 and task_id < self.num_generate_tasks\n    if task_id < self.num_train_shards:\n      out_file = self.training_filepaths(\n          data_dir, self.num_train_shards, shuffled=False)[task_id]\n    else:\n      out_file = self.dev_filepaths(\n          data_dir, self.num_dev_shards,\n          shuffled=False)[task_id - self.num_train_shards]\n    generator_utils.generate_files(\n        self.example_generator(encoder, tmp_dir, task_id), [out_file])\n    generator_utils.shuffle_dataset([out_file])", "docstring": "Generates training/dev data.\n\nArgs:\ndata_dir: a string\ntmp_dir: a string\ntask_id: an optional integer\nReturns:\nshard or shards for which data was generated.", "source": "juraj-google-style"}
{"code": "def get_matching_text_in_strs(a, b, match_min_size=30, ignore='', end_characters=''):\n    compare = difflib.SequenceMatcher((lambda x: (x in ignore)))\n    compare.set_seqs(a=a, b=b)\n    matching_text = list()\n    for match in compare.get_matching_blocks():\n        start = match.a\n        text = a[start:(start + match.size)]\n        if end_characters:\n            prev_text = text\n            while ((len(text) != 0) and (text[0] in end_characters)):\n                text = text[1:]\n            while ((len(text) != 0) and (text[(- 1)] not in end_characters)):\n                text = text[:(- 1)]\n            if (len(text) == 0):\n                text = prev_text\n        if (len(text) >= match_min_size):\n            matching_text.append(text)\n    return matching_text", "docstring": "Returns a list of matching blocks of text in a and b\n\nArgs:\na (str): First string to match\nb (str): Second string to match\nmatch_min_size (int): Minimum block size to match on. Defaults to 30.\nignore (str): Any characters to ignore in matching. Defaults to ''.\nend_characters (str): End characters to look for. Defaults to ''.\n\nReturns:\nList[str]: List of matching blocks of text", "source": "codesearchnet"}
{"code": "def _field(self, field, value):\n        \n        \n        field = str(field)\n        value = str(value)\n\n        \n        \n        if (any([char in value for char in QUOTE_LIST]) and '\"' not in value\n                and not any([char in value for char in UNQUOTE_LIST])):\n            value = '\"' + value + '\"'\n\n        \n        if field and value:\n            self.__query[\"q\"] += field + \":\" + value\n            \n            self.__query[\"advanced\"] = True\n\n        return self", "docstring": "Add a ``field:value`` term to the query.\nMatches will have the ``value`` in the ``field``.\n\nNote:\nThis method triggers advanced mode.\n\nArguments:\nfield (str): The field to check for the value, in Elasticsearch dot syntax.\nvalue (str): The value to match.\n\nReturns:\nSearchHelper: Self", "source": "juraj-google-style"}
{"code": "def update(self, friendly_name=None, description=None, expiry=None, schema=None):\n    self._load_info()\n    if (friendly_name is not None):\n        self._info['friendlyName'] = friendly_name\n    if (description is not None):\n        self._info['description'] = description\n    if (expiry is not None):\n        if isinstance(expiry, datetime.datetime):\n            expiry = (calendar.timegm(expiry.utctimetuple()) * 1000)\n        self._info['expirationTime'] = expiry\n    if (schema is not None):\n        if isinstance(schema, _schema.Schema):\n            schema = schema._bq_schema\n        self._info['schema'] = {'fields': schema}\n    try:\n        self._api.table_update(self._name_parts, self._info)\n    except datalab.utils.RequestException:\n        self._info = None\n    except Exception as e:\n        raise e", "docstring": "Selectively updates Table information.\n\nAny parameters that are omitted or None are not updated.\n\nArgs:\nfriendly_name: if not None, the new friendly name.\ndescription: if not None, the new description.\nexpiry: if not None, the new expiry time, either as a DateTime or milliseconds since epoch.\nschema: if not None, the new schema: either a list of dictionaries or a Schema.", "source": "codesearchnet"}
{"code": "def run(self, dag):\n        \n        if self.layout is None:\n            if self.property_set[\"layout\"]:\n                self.layout = self.property_set[\"layout\"]\n            else:\n                self.layout = Layout.generate_trivial_layout(*dag.qregs.values())\n\n        self.property_set['is_direction_mapped'] = True\n        edges = self.coupling_map.get_edges()\n\n        for gate in dag.twoQ_gates():\n            physical_q0 = self.layout[gate.qargs[0]]\n            physical_q1 = self.layout[gate.qargs[1]]\n\n            if isinstance(gate.op, (CXBase, CnotGate)) and (\n                    physical_q0, physical_q1) not in edges:\n                self.property_set['is_direction_mapped'] = False\n                return", "docstring": "If `dag` is mapped and the direction is correct the property\n`is_direction_mapped` is set to True (or to False otherwise).\n\nArgs:\ndag (DAGCircuit): DAG to check.", "source": "juraj-google-style"}
{"code": "def export_to_xml(video_id, resource_fs, static_dir, course_id=None):\n    video_image_name = ''\n    video = _get_video(video_id)\n    try:\n        course_video = CourseVideo.objects.select_related('video_image').get(course_id=course_id, video=video)\n        video_image_name = course_video.video_image.image.name\n    except ObjectDoesNotExist:\n        pass\n    video_el = Element('video_asset', attrib={'client_video_id': video.client_video_id, 'duration': six.text_type(video.duration), 'image': video_image_name})\n    for encoded_video in video.encoded_videos.all():\n        SubElement(video_el, 'encoded_video', {name: six.text_type(getattr(encoded_video, name)) for name in ['profile', 'url', 'file_size', 'bitrate']})\n    return create_transcripts_xml(video_id, video_el, resource_fs, static_dir)", "docstring": "Exports data for a video into an xml object.\n\nNOTE: For external video ids, only transcripts information will be added into xml.\nIf external=False, then edx_video_id is going to be on first index of the list.\n\nArguments:\nvideo_id (str): Video id of the video to export transcripts.\ncourse_id (str): The ID of the course with which this video is associated.\nstatic_dir (str): The Directory to store transcript file.\nresource_fs (SubFS): Export file system.\n\nReturns:\nAn lxml video_asset element containing export data\n\nRaises:\nValVideoNotFoundError: if the video does not exist", "source": "codesearchnet"}
{"code": "def get_cytoband_coordinates(chrom, pos):\n    coordinate = ''\n    if (chrom in CYTOBANDS):\n        for interval in CYTOBANDS[chrom][pos]:\n            coordinate = interval.data\n    return coordinate", "docstring": "Get the cytoband coordinate for a position\n\nArgs:\nchrom(str)\npos(int)\n\nReturns:\ncoordinate(str)", "source": "codesearchnet"}
{"code": "def basis_state(str_state, num):\n    \n    n = int(str_state, 2)\n    if num >= len(str_state):\n        state = np.zeros(1 << num, dtype=complex)\n        state[n] = 1\n        return state\n    else:\n        raise QiskitError('size of bitstring is greater than num.')", "docstring": "Return a basis state ndarray.\n\nArgs:\nstr_state (string): a string representing the state.\nnum (int): the number of qubits\nReturns:\nndarray:  state(2**num) a quantum state with basis basis state.\nRaises:\nQiskitError: if the dimensions is wrong", "source": "juraj-google-style"}
{"code": "def shift_relative_position_tensor(self, pos_tensor):\n    zero_pad = torch.zeros((*pos_tensor.size()[:3], 1), device=pos_tensor.device, dtype=pos_tensor.dtype)\n    pos_tensor_padded = torch.cat([zero_pad, pos_tensor], dim=-1)\n    pos_tensor_padded = pos_tensor_padded.view(*pos_tensor.size()[:2], pos_tensor.size(3) + 1, pos_tensor.size(2))\n    pos_tensor = pos_tensor_padded[:, :, 1:].view_as(pos_tensor)[:, :, :, :pos_tensor.size(-1) \n    return pos_tensor", "docstring": "Args:\npos_tensor (torch.Tensor of shape (batch_size, head, time1, 2*time1-1)): Input tensor.", "source": "github-repos"}
{"code": "def _handle_response(self, response, valid_status_codes, resource):\n    if (response.status_code not in valid_status_codes):\n        raise InvalidStatusCodeError(status_code=response.status_code, expected_status_codes=valid_status_codes)\n    if response.content:\n        data = response.json()\n        if isinstance(data, list):\n            return [resource(**x) for x in data]\n        else:\n            key = getattr(resource.Meta, 'pagination_key', None)\n            if isinstance(data.get(key), list):\n                return [resource(**x) for x in data.get(key)]\n            else:\n                return [resource(**data)]\n    return []", "docstring": "Handles Response objects\n\nArgs:\nresponse: An HTTP reponse object\nvalid_status_codes: A tuple list of valid status codes\nresource: The resource class to build from this response\n\nreturns:\nresources: A list of Resource instances", "source": "codesearchnet"}
{"code": "def partitioned_dim_sizes(self):\n    return self._partitioned_dim_sizes", "docstring": "The partitioned dimension sizes for this shape.\n\nReturns:\nA `list` of 0-D or 1-D integer `Tensor`.", "source": "github-repos"}
{"code": "def date_range(start, end, boo):\n    earliest = datetime.strptime(start.replace('-', ' '), '%Y %m %d')\n    latest = datetime.strptime(end.replace('-', ' '), '%Y %m %d')\n    num_days = ((latest - earliest).days + 1)\n    all_days = [(latest - timedelta(days=x)) for x in range(num_days)]\n    all_days.reverse()\n    output = []\n    if boo:\n        for d in all_days:\n            output.append(int(str(d).replace('-', '')[:8]))\n    else:\n        for d in all_days:\n            output.append(str(d)[:10])\n    return output", "docstring": "Return list of dates within a specified range, inclusive.\n\nArgs:\nstart: earliest date to include, String (\"2015-11-25\")\nend: latest date to include, String (\"2015-12-01\")\nboo: if true, output list contains Numbers (20151230); if false, list contains Strings (\"2015-12-30\")\nReturns:\nlist of either Numbers or Strings", "source": "codesearchnet"}
{"code": "def _validate_min_version(min_version):\n  \n  if min_version is not None:\n    try:\n      parsed_min_version = version.StrictVersion(min_version)\n    except ValueError:\n      return ExtensionVersionResult(\n          error_reason=ExtensionValidationError.UNPARSEABLE_REQUESTED_VERSION,\n          requested_extension_version=min_version)\n\n    if parsed_min_version > HANDLER_VERSION:\n      return ExtensionVersionResult(\n          error_reason=ExtensionValidationError.OUTDATED_VERSION,\n          requested_extension_version=str(parsed_min_version))\n\n  return ExtensionVersionResult(\n      error_reason=None, requested_extension_version=min_version)", "docstring": "Validates the extension version matches the requested version.\n\nArgs:\nmin_version: Minimum version passed as a query param when establishing the\nconnection.\n\nReturns:\nAn ExtensionVersionResult indicating validation status. If there is a\nproblem, the error_reason field will be non-empty.", "source": "juraj-google-style"}
{"code": "def sort(self, by=None, reverse=False):\n        \n        if by is None:\n            by = self.kdims\n        elif not isinstance(by, list):\n            by = [by]\n        sorted_columns = self.interface.sort(self, by, reverse)\n        return self.clone(sorted_columns)", "docstring": "Sorts the data by the values along the supplied dimensions.\n\nArgs:\nby: Dimension(s) to sort by\nreverse (bool, optional): Reverse sort order\n\nReturns:\nSorted Dataset", "source": "juraj-google-style"}
{"code": "def object_key(self, root_path: KeyPath, *, value: Any, parent: Any, css_classes: Optional[Sequence[str]]=None, key_color: Union[Tuple[Optional[str], Optional[str]], Callable[[KeyPath, Any, Any], Tuple[Optional[str], Optional[str]]]]=None, enable_key_tooltip: bool=True, key_tooltip_fn: Optional[Callable[..., Html]]=None, **kwargs) -> Html:\n    del kwargs\n    key_tooltip_fn = key_tooltip_fn or self.tooltip\n    key_color = self.get_color(key_color, root_path, value, parent)\n    return (Html.element('span', [str(root_path.key)], css_classes=['object-key', type(root_path.key).__name__, css_classes], styles=dict(color=key_color[0], background_color=key_color[1])) + (lambda: key_tooltip_fn(value=root_path, root_path=root_path, parent=parent) if enable_key_tooltip else None)).add_style(\"\\n        \\n        .object-key {\\n          margin: 0.15em 0.3em 0.15em 0;\\n          display: block;\\n        }\\n        .object-key:hover + .tooltip {\\n          visibility: visible;\\n          background-color: darkblue;\\n        }\\n        .object-key.str {\\n          color: gray;\\n          border: 1px solid lightgray;\\n          background-color: ButtonFace;\\n          border-radius: 0.2em;\\n          padding: 0.3em;\\n        }\\n        .object-key.int::before{\\n          content: '[';\\n        }\\n        .object-key.int::after{\\n          content: ']';\\n        }\\n        .object-key.int{\\n          border: 0;\\n          color: lightgray;\\n          background-color: transparent;\\n          border-radius: 0;\\n          padding: 0;\\n        }\\n        \")", "docstring": "Renders a label-style key for the value.\n\nArgs:\nroot_path: The root path of the value.\nvalue: The value to render.\nparent: The parent of the value.\ncss_classes: The CSS classes to add to the HTML element.\nkey_color: The color of the key. If None, the key will be rendered\nwithout a color. If a tuple, the first element is the text color and\nthe second element is the background color. If a function, the function\ntakes (root_path, value, parent) and returns a tuple of (text_color,\nbackground_color).\nenable_key_tooltip: Whether to enable the tooltip.\nkey_tooltip_fn: The function to render the key tooltip.\n**kwargs: Additional arguments passed by the user that will be ignored.\n\nReturns:\nThe rendered HTML as the key of the value.", "source": "github-repos"}
{"code": "def calculate_sun(self, month, day, hour, is_solar_time=False):\n        \n        datetime = DateTime(month, day, *self._calculate_hour_and_minute(hour),\n                            leap_year=self.is_leap_year)\n        return self.calculate_sun_from_date_time(datetime, is_solar_time)", "docstring": "Get Sun data for an hour of the year.\n\nArgs:\nmonth: An integer between 1-12\nday: An integer between 1-31\nhour: A positive number between 0..23\nis_solar_time: A boolean to indicate if the input hour is solar time.\n(Default: False)\n\nReturns:\nA sun object for this particular time", "source": "juraj-google-style"}
{"code": "def get_all_results_for_query_batch(self, batch_id, job_id=None, chunk_size=2048):\n        \n        result_ids = self.get_query_batch_result_ids(batch_id, job_id=job_id)\n        if not result_ids:\n            raise RuntimeError('Batch is not complete')\n        for result_id in result_ids:\n            yield self.get_query_batch_results(\n                batch_id,\n                result_id,\n                job_id=job_id,\n                chunk_size=chunk_size\n            )", "docstring": "Gets result ids and generates each result set from the batch and returns it\nas an generator fetching the next result set when needed\n\nArgs:\nbatch_id: id of batch\njob_id: id of job, if not provided, it will be looked up", "source": "juraj-google-style"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.LongTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=False):\n    pass", "docstring": "Encodes images into continuous embeddings that can be forwarded to the language model.\n\nArgs:\npixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\nThe tensors corresponding to the input images.", "source": "github-repos"}
{"code": "def _string_to_byte_list(self, data):\n    bytes_length = 16\n    m = self.digest()\n    m.update(str.encode(data))\n    hex_digest = m.hexdigest()\n    return list((int(hex_digest[(num * 2):((num * 2) + 2)], bytes_length) for num in range(bytes_length)))", "docstring": "Creates a hex digest of the input string given to create the image,\nif it's not already hexadecimal\n\nReturns:\nLength 16 list of rgb value range integers\n(each representing a byte of the hex digest)", "source": "codesearchnet"}
{"code": "def check_whitelist(host, whitelist):\n    \n    if ':' not in host:\n        host = host + ':80'\n\n    if host in whitelist:\n        return True\n\n    return any(match_host(host, pattern) for pattern in whitelist)", "docstring": "Check a given request host against a whitelist.\n\nArgs:\nhost (str) :\nA host string to compare against a whitelist.\n\nIf the host does not specify a port, then ``\":80\"`` is implicitly\nassumed.\n\nwhitelist (seq[str]) :\nA list of host patterns to match against\n\nReturns:\n``True``, if ``host`` matches any pattern in ``whitelist``, otherwise\n``False``", "source": "juraj-google-style"}
{"code": "def _create_c_op(graph, node_def, inputs, control_inputs, op_def=None, extract_traceback=True) -> pywrap_tf_session.TF_Operation:\n    if op_def is None:\n        op_def = graph.op_def_for_type(node_def.op)\n    inputs = _reconstruct_sequence_inputs(op_def, inputs, node_def.attr)\n    with graph._c_graph.get() as c_graph:\n        op_desc = pywrap_tf_session.TF_NewOperation(c_graph, compat.as_str(node_def.op), compat.as_str(node_def.name))\n    if node_def.device:\n        pywrap_tf_session.TF_SetDevice(op_desc, compat.as_str(node_def.device))\n    for op_input in inputs:\n        if isinstance(op_input, (list, tuple)):\n            pywrap_tf_session.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input])\n        else:\n            pywrap_tf_session.TF_AddInput(op_desc, op_input._as_tf_output())\n    for control_input in control_inputs:\n        pywrap_tf_session.TF_AddControlInput(op_desc, control_input._c_op)\n    for name, attr_value in node_def.attr.items():\n        serialized = attr_value.SerializeToString()\n        pywrap_tf_session.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized)\n    try:\n        c_op = pywrap_tf_session.TF_FinishOperation(op_desc)\n    except errors.InvalidArgumentError as e:\n        raise ValueError(e.message)\n    if extract_traceback:\n        pywrap_tf_session.TF_SetOpStackTrace(c_op, tf_stack.extract_stack(stacklevel=3))\n    return c_op", "docstring": "Creates a TF_Operation.\n\nArgs:\ngraph: a `Graph`.\nnode_def: `node_def_pb2.NodeDef` for the operation to create.\ninputs: A flattened list of `Tensor`s. This function handles grouping\ntensors into lists as per attributes in the `node_def`.\ncontrol_inputs: A list of `Operation`s to set as control dependencies.\nop_def: Optional. `op_def_pb2.OpDef` for the operation to create. If not\nspecified, is looked up from the `graph` using `node_def.op`.\nextract_traceback: if True, extract the current Python traceback to the\nTF_Operation.\n\nReturns:\nA wrapped TF_Operation*.", "source": "github-repos"}
{"code": "def plugin_wait_time(seconds: float, item_session: ItemSession, error: Optional[Exception]=None) -> float:\n    return seconds", "docstring": "Return the wait time between requests.\n\nArgs:\nseconds: The original time in seconds.\nitem_session:\nerror:\n\nReturns:\nThe time in seconds.", "source": "codesearchnet"}
{"code": "def to_array(tensor):\n    if tensor.HasField('segment'):\n        raise ValueError('Currently not supporting loading segments.')\n    if (tensor.data_type == TensorProto.UNDEFINED):\n        raise ValueError('The data type is not defined.')\n    tensor_dtype = tensor.data_type\n    np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[tensor_dtype]\n    storage_type = mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[tensor_dtype]\n    storage_np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[storage_type]\n    storage_field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[storage_type]\n    dims = tensor.dims\n    if (tensor.data_type == TensorProto.STRING):\n        utf8_strings = getattr(tensor, storage_field)\n        ss = list((s.decode('utf-8') for s in utf8_strings))\n        return np.asarray(ss).astype(np_dtype).reshape(dims)\n    if tensor.HasField('raw_data'):\n        return np.frombuffer(tensor.raw_data, dtype=np_dtype).reshape(dims)\n    else:\n        data = (getattr(tensor, storage_field),)\n        if ((tensor_dtype == TensorProto.COMPLEX64) or (tensor_dtype == TensorProto.COMPLEX128)):\n            data = combine_pairs_to_complex(data)\n        return np.asarray(data, dtype=storage_np_dtype).astype(np_dtype).reshape(dims)", "docstring": "Converts a tensor def object to a numpy array.\n\nInputs:\ntensor: a TensorProto object.\nReturns:\narr: the converted array.", "source": "codesearchnet"}
{"code": "def _inspect_history_cache(self, cache, replica_id, step_num, tensor_trace_order):\n    if not tensor_trace_order.traced_tensors:\n        logging.warn('TT history mode has no tensors in the cache to check.')\n        return control_flow_ops.no_op\n    stats = ['\\n\\n', 'core:', replica_id, ',', 'step:', step_num]\n    diffs = []\n    for tensor_name, cache_idx in sorted(tensor_trace_order.tensorname_to_cache_idx.items(), key=lambda item: item[1]):\n        tensor_to_write = cache[cache_idx, 0]\n        snapshot_variable = self._create_or_get_tensor_history_values_cache(tensor_to_write.name, tensor_to_write.op.graph, tensor_to_write.shape.as_list(), tensor_to_write.dtype)\n        with ops.control_dependencies([snapshot_variable]):\n            old_value = state_ops.assign_add(snapshot_variable, 0.0)\n        with ops.control_dependencies([old_value]):\n            new_value = math_ops.cast(tensor_to_write, dtypes.float32)\n            delta = math_ops.abs(math_ops.subtract(old_value, new_value))\n            updated = state_ops.assign(snapshot_variable, new_value)\n            diffs.append(delta)\n        with ops.control_dependencies([updated]):\n            new_value_from_var = state_ops.assign_add(snapshot_variable, 0.0)\n        stats.extend(['\\n', 'core:', replica_id, ',', 'step:', step_num, ',', tensor_name, '-->', old_value, new_value_from_var, delta])\n    diff_stack = array_ops_stack.stack(diffs)\n    step_max = math_ops.reduce_max(diff_stack)\n    return cond.cond(math_ops.greater(step_max, tensor_tracer_flags.DELTA_THRESHOLD.value), lambda: logging_ops.print_v2(*stats, summarize=-1), lambda: control_flow_ops.no_op())", "docstring": "Generates a conditional print operation to log differences in tensor values.\n\nArgs:\ncache: Tensor storing the trace results for the step.\nreplica_id: Tensor storing the replica id of the running core.\nstep_num: Step number.\ntensor_trace_order: TensorTraceOrder object holding tensorname to id map.\n\nReturns:\nThe Op to flush the cache to file.", "source": "github-repos"}
{"code": "def apply(self, func, **kwargs):\n        \n        import dask\n\n        \n        delayed_call = self.delayed_call\n        self.delayed_call = self.dask_obj\n        return self.__class__(dask.delayed(func)(delayed_call, **kwargs))", "docstring": "Apply some callable function to the data in this partition.\n\nNote: It is up to the implementation how kwargs are handled. They are\nan important part of many implementations. As of right now, they\nare not serialized.\n\nArgs:\nfunc: The lambda to apply (may already be correctly formatted)\n\nReturns:\nA new `BaseFramePartition` containing the object that has had `func`\napplied to it.", "source": "juraj-google-style"}
{"code": "def on_epoch_end(self, epoch, logs=None):", "docstring": "Called at the end of an epoch.\n\nSubclasses should override for any actions to run. This function should\nonly be called during TRAIN mode.\n\nArgs:\nepoch: Integer, index of epoch.\nlogs: Dict, metric results for this training epoch, and for the\nvalidation epoch if validation is performed. Validation result\nkeys are prefixed with `val_`. For training epoch, the values of\nthe `Model`'s metrics are returned. Example:\n`{'loss': 0.2, 'accuracy': 0.7}`.", "source": "github-repos"}
{"code": "def get_cohesive_energy(self, material_id, per_atom=False):\n        \n        entry = self.get_entry_by_material_id(material_id)\n        ebulk = entry.energy / \\\n                entry.composition.get_integer_formula_and_factor()[1]\n        comp_dict = entry.composition.reduced_composition.as_dict()\n\n        isolated_atom_e_sum, n = 0, 0\n        for el in comp_dict.keys():\n            e = self._make_request(\"/element/%s/tasks/isolated_atom\" % (el),\n                                  mp_decode=False)[0]\n            isolated_atom_e_sum += e['output'][\"final_energy\"] * comp_dict[el]\n            n += comp_dict[el]\n        ecoh_per_formula = isolated_atom_e_sum - ebulk\n        return ecoh_per_formula/n if per_atom else ecoh_per_formula", "docstring": "Gets the cohesive for a material (eV per formula unit). Cohesive energy\nis defined as the difference between the bulk energy and the sum of\ntotal DFT energy of isolated atoms for atom elements in the bulk.\nArgs:\nmaterial_id (str): Materials Project material_id, e.g. 'mp-123'.\nper_atom (bool): Whether or not to return cohesive energy per atom\nReturns:\nCohesive energy (eV).", "source": "juraj-google-style"}
{"code": "def remove_roles(self, databaseName, roleNames, collectionName=None):\n        \n        for roleName in roleNames:\n            self.remove_role(databaseName, roleName, collectionName)", "docstring": "Remove multiple roles\n\nArgs:\ndatabaseName (str): Database Name\nroleNames (list of RoleSpecs): roles\n\nKeyword Args:\ncollectionName (str): Collection", "source": "juraj-google-style"}
{"code": "def find(name, arg=None):\n    \n    for p in get_processes():\n        if p.name.lower().find(name.lower()) != -1:\n            if arg is not None:\n                for a in p.cmdline or []:\n                    if a.lower().find(arg.lower()) != -1:\n                        return p\n            else:\n                return p\n    return None", "docstring": "Find process by name or by argument in command line.\n\nArgs:\nname (str): Process name to search for.\narg (str): Command line argument for a process to search for.\n\nReturns:\ntea.process.base.IProcess: Process object if found.", "source": "juraj-google-style"}
{"code": "def set_weather_from_metar(metar: typing.Union[(Metar.Metar, str)], in_file: typing.Union[(str, Path)], out_file: typing.Union[(str, Path)]=None) -> typing.Tuple[(typing.Union[(str, None)], typing.Union[(str, None)])]:\n    (error, metar) = custom_metar.CustomMetar.get_metar(metar)\n    if error:\n        return (error, None)\n    if metar:\n        LOGGER.debug('METAR: %s', metar.code)\n    in_file = elib.path.ensure_file(in_file)\n    if (out_file is None):\n        out_file = in_file\n    else:\n        out_file = elib.path.ensure_file(out_file, must_exist=False)\n    LOGGER.debug('applying metar: %s -> %s', in_file, out_file)\n    try:\n        LOGGER.debug('building MissionWeather')\n        _mission_weather = mission_weather.MissionWeather(metar)\n        with Miz(str(in_file)) as miz:\n            _mission_weather.apply_to_miz(miz)\n            miz.zip(str(out_file))\n            return (None, f'successfully applied METAR to {in_file}')\n    except ValueError:\n        error = f\n        return (error, None)", "docstring": "Applies the weather from a METAR object to a MIZ file\n\nArgs:\nmetar: metar object\nin_file: path to MIZ file\nout_file: path to output MIZ file (will default to in_file)\n\nReturns: tuple of error, success", "source": "codesearchnet"}
{"code": "def CreateCustomizerFeedItems(client, adgroup_ids, ad_customizer_feed):\n  \n  \n  feed_item_service = client.GetService('FeedItemService', 'v201809')\n  now = datetime.now()\n  mars_date = datetime(now.year, now.month, 1, 0, 0)\n  venus_date = datetime(now.year, now.month, 15, 0, 0)\n  time_format = '%Y%m%d %H%M%S'\n\n  feed_item_operations = [\n      CreateFeedItemAddOperation(\n          'Mars', '$1234.56', mars_date.strftime(time_format),\n          ad_customizer_feed),\n      CreateFeedItemAddOperation(\n          'Venus', '$1450.00', venus_date.strftime(time_format),\n          ad_customizer_feed)\n  ]\n\n  response = feed_item_service.mutate(feed_item_operations)\n\n  if 'value' in response:\n    for feed_item in response['value']:\n      print 'Added FeedItem with ID %d.' % feed_item['feedItemId']\n  else:\n    raise errors.GoogleAdsError('No FeedItems were added.')\n\n  for feed_item, adgroup_id in zip(response['value'], adgroup_ids):\n    RestrictFeedItemToAdGroup(client, feed_item, adgroup_id)", "docstring": "Creates FeedItems for the specified AdGroups.\n\nThese FeedItems contain values to use in ad customizations for the AdGroups.\n\nArgs:\nclient: an AdWordsClient instance.\nadgroup_ids: a list containing two AdGroup Ids.\nad_customizer_feed: the AdCustomizerFeed we're associating the FeedItems\nwith.\n\nRaises:\nGoogleAdsError: if no FeedItems were added.", "source": "juraj-google-style"}
{"code": "def _num_elements(self):\n    return math_ops.reduce_prod(self.inner_shape)", "docstring": "Number of elements in a shape.\n\nReturns:\nThe number of elements in the shape.", "source": "github-repos"}
{"code": "def unpack_message(buffer):\n    \n    hdr_size = Header().get_size()\n    hdr_buff, msg_buff = buffer[:hdr_size], buffer[hdr_size:]\n    header = Header()\n    header.unpack(hdr_buff)\n    message = new_message_from_header(header)\n    message.unpack(msg_buff)\n    return message", "docstring": "Unpack the whole buffer, including header pack.\n\nArgs:\nbuffer (bytes): Bytes representation of a openflow message.\n\nReturns:\nobject: Instance of openflow message.", "source": "juraj-google-style"}
{"code": "def malware(self, malware, password, file_name):\n    if (not self.can_update()):\n        self._tcex.handle_error(910, [self.type])\n    self._data['malware'] = malware\n    self._data['password'] = password\n    self._data['fileName'] = file_name\n    request = {'malware': malware, 'password': password, 'fileName': file_name}\n    return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)", "docstring": "Uploads to malware vault.\n\nArgs:\nmalware:\npassword:\nfile_name:", "source": "codesearchnet"}
{"code": "def omim_terms(case_obj):\n    \n    LOG.info(\"Collecting OMIM disorders for case {}\".format(case_obj.get('display_name')))\n    disorders = []\n\n    case_disorders = case_obj.get('diagnosis_phenotypes') \n    if case_disorders:\n        for disorder in case_disorders:\n            disorder_obj = {\n                \"id\" : ':'.join([ 'MIM', str(disorder)])\n            }\n            disorders.append(disorder_obj)\n    return disorders", "docstring": "Extract all OMIM phenotypes available for the case\nArgs:\ncase_obj(dict): a scout case object\nReturns:\ndisorders(list): a list of OMIM disorder objects", "source": "juraj-google-style"}
{"code": "def shared_s3_app_bucket(self, include_region=False):\n        \n        if include_region:\n            shared_s3_app_bucket = self.format['shared_s3_app_region_bucket'].format(**self.data)\n        else:\n            shared_s3_app_bucket = self.format['shared_s3_app_bucket'].format(**self.data)\n        return shared_s3_app_bucket", "docstring": "Generate shared s3 application bucket name.\n\nArgs:\ninclude_region (bool): Include region in the name generation.", "source": "juraj-google-style"}
{"code": "def add_cell_argument(self, name, help, required=False):\n    for action in self._actions:\n        if (action.dest == name):\n            raise ValueError(('Arg \"%s\" was added by add_argument already.' % name))\n    self._cell_args[name] = {'required': required, 'help': help}", "docstring": "Add a cell only argument.\n\nArgs:\nname: name of the argument. No need to start with \"-\" or \"--\".\nhelp: the help string of the argument.\nrequired: Whether it is required in cell content.", "source": "codesearchnet"}
{"code": "def run(self, args):\n    jlink = self.create_jlink(args)\n    if args.product:\n        print(('Product: %s' % jlink.product_name))\n        manufacturer = ('SEGGER' if (jlink.oem is None) else jlink.oem)\n        print(('Manufacturer: %s' % manufacturer))\n        print(('Hardware Version: %s' % jlink.hardware_version))\n        print(('Firmware: %s' % jlink.firmware_version))\n        print(('DLL Version: %s' % jlink.version))\n        print(('Features: %s' % ', '.join(jlink.features)))\n    elif args.jtag:\n        status = jlink.hardware_status\n        print(('TCK Pin Status: %d' % status.tck))\n        print(('TDI Pin Status: %d' % status.tdi))\n        print(('TDO Pin Status: %d' % status.tdo))\n        print(('TMS Pin Status: %d' % status.tms))\n        print(('TRES Pin Status: %d' % status.tres))\n        print(('TRST Pin Status: %d' % status.trst))", "docstring": "Runs the information command.\n\nArgs:\nself (InfoCommand): the ``InfoCommand`` instance\nargs (Namespace): the arguments passed on the command-line\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def get_open_clinvar_submission(self, user_id, institute_id):\n        \n\n        LOG.info(\"Retrieving an open clinvar submission for user '%s' and institute %s\", user_id, institute_id)\n        query = dict(user_id=user_id, institute_id=institute_id, status='open')\n        submission = self.clinvar_submission_collection.find_one(query)\n\n        \n        if submission is None:\n            submission_id = self.create_submission(user_id, institute_id)\n            submission = self.clinvar_submission_collection.find_one({'_id':submission_id})\n\n        return submission", "docstring": "Retrieve the database id of an open clinvar submission for a user and institute,\nif none is available then create a new submission and return it\n\nArgs:\nuser_id(str): a user ID\ninstitute_id(str): an institute ID\n\nReturns:\nsubmission(obj) : an open clinvar submission object", "source": "juraj-google-style"}
{"code": "def parse_args():\n    parser = argparse.ArgumentParser()\n    parser.register('type', 'bool', lambda v: v.lower() == 'true')\n    parser.add_argument('--max_steps', type=int, default=10, help='Number of steps to run trainer.')\n    parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size used during training.')\n    parser.add_argument('--learning_rate', type=float, default=0.025, help='Initial learning rate.')\n    parser.add_argument('--data_dir', type=str, default='/tmp/mnist_data', help='Directory for storing data')\n    parser.add_argument('--ui_type', type=str, default='readline', help='Command-line user interface type (only readline is supported)')\n    parser.add_argument('--fake_data', type='bool', nargs='?', const=True, default=False, help='Use fake MNIST data for unit testing')\n    parser.add_argument('--debug', type='bool', nargs='?', const=True, default=False, help='Use debugger to track down bad values during training. Mutually exclusive with the --tensorboard_debug_address flag.')\n    parser.add_argument('--tensorboard_debug_address', type=str, default=None, help='Connect to the TensorBoard Debugger Plugin backend specified by the gRPC address (e.g., localhost:1234). Mutually exclusive with the --debug flag.')\n    parser.add_argument('--use_random_config_path', type='bool', nargs='?', const=True, default=False, help='If set, set config file path to a random file in the temporary\\n      directory.')\n    return parser.parse_known_args()", "docstring": "Parses commandline arguments.\n\nReturns:\nA tuple (parsed, unparsed) of the parsed object and a group of unparsed\narguments that did not match the parser.", "source": "github-repos"}
{"code": "def save(value: Any, path: str, *args, **kwargs) -> Any:\n    save_handler = flags.get_save_handler() or default_save_handler\n    return save_handler(value, path, *args, **kwargs)", "docstring": "Save a symbolic value using the global save handler.\n\nExample::\n\n@pg.members([\n('x', pg.typing.Any())\n])\nclass A(pg.Object):\npass\n\na1 = A(1)\nfile = 'my_file.json'\na1.save(file)\na2 = pg.load(file)\nassert pg.eq(a1, a2)\n\nArgs:\nvalue: value to save.\npath: A path string for saving `value`.\n*args: Positional arguments that will be passed through to the global\nsave handler.\n**kwargs: Keyword arguments that will be passed through to the global\nsave handler.\n\nReturns:\nReturn value from the global save handler.\n\nRaises:\nRuntimeError: if global save handler is not set.", "source": "github-repos"}
{"code": "def was_init():\n    mask = lib.SDL_WasInit(0)\n    return enumtools.get_items(InitFlags, mask, {InitFlags.everything})", "docstring": "This function returns the subsystems which have previously been initialized.\n\nReturns:\nSet[InitFlag]: Flags indicating which subsystems have been initialized.", "source": "codesearchnet"}
{"code": "def __init__(self, saved_model_dir, saved_model_tags=None, saved_model_exported_names=None, trackable_obj=None):\n    super(TFLiteSavedModelConverterV2, self).__init__()\n    self.saved_model_dir = saved_model_dir\n    self._saved_model_tags = saved_model_tags\n    self._saved_model_exported_names = saved_model_exported_names\n    self._trackable_obj = trackable_obj\n    self._parse_saved_model_args(always_enable_saved_model_import=True)", "docstring": "Constructor for TFLiteConverter.\n\nArgs:\nsaved_model_dir: Directory of the SavedModel.\nsaved_model_tags: Set of tags identifying the MetaGraphDef within the\nSavedModel to analyze. All tags in the tag set must be present. (default\n{tf.saved_model.SERVING}).\nsaved_model_exported_names: Names to be exported when the saved model\nimport path is on.\ntrackable_obj: tf.AutoTrackable object associated with `funcs`. A\nreference to this object needs to be maintained so that Variables do not\nget garbage collected since functions have a weak reference to\nVariables. This is only required when the tf.AutoTrackable object is not\nmaintained by the user (e.g. `from_saved_model`).", "source": "github-repos"}
{"code": "def regularizer(name, regularization_fn, name_filter='weights'):\n    regex = re.compile(name_filter)\n\n    def fn(var_name, variable, phase):\n        if ((phase is pt.Phase.train) and regex.search(var_name)):\n            with tf.name_scope(None, name, [variable]):\n                loss = regularization_fn(variable)\n            if (loss is not None):\n                tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, loss)\n        return variable\n    return fn", "docstring": "Wraps a regularizer in a parameter-function.\n\nArgs:\nname: The name scope for this regularizer.\nregularization_fn: A function with signature:\nfn(variable) -> loss `Tensor` or `None`.\nname_filter: A regex that will be used to filter variables by name.\n\nReturns:\nA parameter modification function that adds the loss to the\nREGULARIZATION_LOSSES graph key.", "source": "codesearchnet"}
{"code": "def read_tree_newick(newick):\n    \n    if not isinstance(newick, str):\n        try:\n            newick = str(newick)\n        except:\n            raise TypeError(\"newick must be a str\")\n    if newick.lower().endswith('.gz'): \n        f = gopen(expanduser(newick)); ts = f.read().decode().strip(); f.close()\n    elif isfile(expanduser(newick)): \n        f = open(expanduser(newick)); ts = f.read().strip(); f.close()\n    else:\n        ts = newick.strip()\n    lines = ts.splitlines()\n    if len(lines) != 1:\n        return [read_tree_newick(l) for l in lines]\n    try:\n        t = Tree(); t.is_rooted = ts.startswith('[&R]')\n        if ts[0] == '[':\n            ts = ']'.join(ts.split(']')[1:]).strip(); ts = ts.replace(', ',',')\n        n = t.root; i = 0\n        while i < len(ts):\n            if ts[i] == ';':\n                if i != len(ts)-1 or n != t.root:\n                    raise RuntimeError(INVALID_NEWICK)\n            elif ts[i] == '(':\n                c = Node(); n.add_child(c); n = c\n            elif ts[i] == ')':\n                n = n.parent\n            elif ts[i] == ',':\n                n = n.parent; c = Node(); n.add_child(c); n = c\n            elif ts[i] == ':':\n                i += 1; ls = ''\n                while ts[i] != ',' and ts[i] != ')' and ts[i] != ';':\n                    ls += ts[i]; i += 1\n                n.edge_length = float(ls); i -= 1\n            else:\n                label = ''\n                while ts[i] != ':' and ts[i] != ',' and ts[i] != ';' and ts[i] != ')':\n                    label += ts[i]; i += 1\n                i -= 1; n.label = label\n            i += 1\n    except Exception as e:\n        raise RuntimeError(\"Failed to parse string as Newick: %s\"%ts)\n    return t", "docstring": "Read a tree from a Newick string or file\n\nArgs:\n``newick`` (``str``): Either a Newick string or the path to a Newick file (plain-text or gzipped)\n\nReturns:\n``Tree``: The tree represented by ``newick``. If the Newick file has multiple trees (one per line), a ``list`` of ``Tree`` objects will be returned", "source": "juraj-google-style"}
{"code": "def _set_notification(self, conn, char, enabled, timeout=1.0):\n        \n\n        if 'client_configuration' not in char:\n            return False, {'reason': 'Cannot enable notification without a client configuration attribute for characteristic'}\n\n        props = char['properties']\n        if not props.notify:\n            return False, {'reason': 'Cannot enable notification on a characteristic that does not support it'}\n\n        value = char['client_configuration']['value']\n\n        \n        current_state = bool(value & (1 << 0))\n        if current_state == enabled:\n            return\n\n        if enabled:\n            value |= 1 << 0\n        else:\n            value &= ~(1 << 0)\n\n        char['client_configuration']['value'] = value\n\n        valarray = struct.pack(\"<H\", value)\n        return self._write_handle(conn, char['client_configuration']['handle'], True, valarray, timeout)", "docstring": "Enable/disable notifications on a GATT characteristic\n\nArgs:\nconn (int): The connection handle for the device we should interact with\nchar (dict): The characteristic we should modify\nenabled (bool): Should we enable or disable notifications\ntimeout (float): How long to wait before failing", "source": "juraj-google-style"}
{"code": "def create_sns_topic(self, region):\n    sns = self.session.client('sns', region_name=region)\n    self.log.info('Creating SNS topic for {}/{}'.format(self.account, region))\n    res = sns.create_topic(Name=self.topic_name)\n    arn = res['TopicArn']\n    tmpl = get_template('cloudtrail_sns_policy.json')\n    policy = tmpl.render(region=region, account_id=self.account.account_number, topic_name=self.topic_name)\n    sns.set_topic_attributes(TopicArn=arn, AttributeName='Policy', AttributeValue=policy)\n    auditlog(event='cloudtrail.create_sns_topic', actor=self.ns, data={'account': self.account.account_name, 'region': region})\n    return arn", "docstring": "Creates an SNS topic if needed. Returns the ARN if the created SNS topic\n\nArgs:\nregion (str): Region name\n\nReturns:\n`str`", "source": "codesearchnet"}
{"code": "def headless(self, value):\n        \n        if value is True:\n            self._arguments.append('-headless')\n        elif '-headless' in self._arguments:\n            self._arguments.remove('-headless')", "docstring": "Sets the headless argument\n\nArgs:\nvalue: boolean value indicating to set the headless option", "source": "juraj-google-style"}
{"code": "def indexSearch(self, indexes):\n        \n\n        if not self._dataFrame.empty:\n            filter0 = self._dataFrame.index == -9999\n            for index in indexes:\n                filter1 = self._dataFrame.index == index\n                filter0 = np.logical_or(filter0, filter1)\n\n            return filter0\n        else:\n            return []", "docstring": "Filters the data by a list of indexes.\n\nArgs:\nindexes (list of int): List of index numbers to return.\n\nReturns:\nlist: A list containing all indexes with filtered data. Matches\nwill be `True`, the remaining items will be `False`. If the\ndataFrame is empty, an empty list will be returned.", "source": "juraj-google-style"}
{"code": "def locked_put(self, credentials):\n        \n        entity = self._model.get_or_insert(self._key_name)\n        setattr(entity, self._property_name, credentials)\n        entity.put()\n        if self._cache:\n            self._cache.set(self._key_name, credentials.to_json())", "docstring": "Write a Credentials to the datastore.\n\nArgs:\ncredentials: Credentials, the credentials to store.", "source": "juraj-google-style"}
{"code": "def update(dst, src):\n    \n    for k, v in src.items():\n        if isinstance(v, Mapping):\n            r = update(dst.get(k, {}), v)\n            dst[k] = r\n        else:\n            dst[k] = src[k]\n    return dst", "docstring": "Recursively update values in dst from src.\n\nUnlike the builtin dict.update() function, this method will decend into\nnested dicts, updating all nested values.\n\nArguments:\ndst (dict): Destination dict.\nsrc (dict): Source dict.\n\nReturns:\ndict: dst updated with entries from src.", "source": "juraj-google-style"}
{"code": "def __init__(self, use_memory_view_min_size=4096):\n        \n        self.use_memory_view_min_size = use_memory_view_min_size\n        self._deque = collections.deque()\n        self.clear()", "docstring": "Constructor.\n\nArgs:\nuse_memory_view_min_size (int): minimum size before using\nmemoryview objects (advanced option, the default is probably\ngood for you).", "source": "juraj-google-style"}
{"code": "def DeserializeFromDB(buffer):\n        \n        m = StreamManager.GetStream(buffer)\n        reader = BinaryReader(m)\n        uns = UnspentCoinState()\n        uns.Deserialize(reader)\n\n        StreamManager.ReleaseStream(m)\n\n        return uns", "docstring": "Deserialize full object.\n\nArgs:\nbuffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from.\n\nReturns:\nUnspentCoinState:", "source": "juraj-google-style"}
{"code": "def write(self, data):\n    start_time = time.time()\n    self._get_write_buffer().write(data)\n    ctx = context.get()\n    operation.counters.Increment(COUNTER_IO_WRITE_BYTES, len(data))(ctx)\n    operation.counters.Increment(COUNTER_IO_WRITE_MSEC, int(((time.time() - start_time) * 1000)))(ctx)", "docstring": "Write data to the GoogleCloudStorage file.\n\nArgs:\ndata: string containing the data to be written.", "source": "codesearchnet"}
{"code": "def create_queue(self, register=False):\n        \n\n        queue = asyncio.Queue(loop=self._loop)\n        if register:\n            self._work_queues.add(queue)\n\n        return queue", "docstring": "Create a new work queue and optionally register it.\n\nThis will make sure the queue is attached to the correct event loop.\nYou can optionally choose to automatically register it so that\nwait_idle() will block until the queue is empty.\n\nArgs:\nregister (bool): Whether to call register_workqueue() automatically.\n\nReturns:\nasyncio.Queue: The newly created queue.", "source": "juraj-google-style"}
{"code": "def _CreateShapesFolder(self, schedule, doc):\n    \n    if not schedule.GetShapeList():\n      return None\n    shapes_folder = self._CreateFolder(doc, 'Shapes')\n    shapes = list(schedule.GetShapeList())\n    shapes.sort(key=lambda x: x.shape_id)\n    for shape in shapes:\n      placemark = self._CreatePlacemark(shapes_folder, shape.shape_id)\n      self._CreateLineStringForShape(placemark, shape)\n      if self.shape_points:\n        self._CreateShapePointFolder(shapes_folder, shape)\n    return shapes_folder", "docstring": "Create a KML Folder containing all the shapes in a schedule.\n\nThe folder contains a placemark for each shape. If there are no shapes in\nthe schedule then the folder is not created and None is returned.\n\nArgs:\nschedule: The transitfeed.Schedule instance.\ndoc: The KML Document ElementTree.Element instance.\n\nReturns:\nThe Folder ElementTree.Element instance or None.", "source": "juraj-google-style"}
{"code": "def validate(self, config):\n        \n        if not isinstance(config, dict):\n            raise errors.SchemeValidationError(\n                'Scheme can only validate a dictionary config, but was given '\n                '{} (type: {})'.format(config, type(config))\n            )\n\n        for arg in self.args:\n            \n            if arg.name in config:\n                arg.validate(config[arg.name])\n\n            \n            else:\n                \n                \n                if arg.required:\n                    raise errors.SchemeValidationError(\n                        'Option \"{}\" is required, but not found.'.format(arg.name)\n                    )", "docstring": "Validate the given config against the `Scheme`.\n\nArgs:\nconfig (dict): The configuration to validate.\n\nRaises:\nerrors.SchemeValidationError: The configuration fails\nvalidation against the `Schema`.", "source": "juraj-google-style"}
{"code": "def __init__(self, api_key=None):\n        \n        try:\n            self.api_key = api_key or os.environ['AIRTABLE_API_KEY']\n        except KeyError:\n            raise KeyError('Api Key not found. Pass api_key as a kwarg \\\n                            or set an env var AIRTABLE_API_KEY with your key')", "docstring": "Authentication used by Airtable Class\n\nArgs:\napi_key (``str``): Airtable API Key. Optional.\nIf not set, it will look for\nenviroment variable ``AIRTABLE_API_KEY``", "source": "juraj-google-style"}
{"code": "def stage_tc_batch(self, owner, staging_data):\n    batch = self.tcex.batch(owner)\n    for group in (staging_data.get('group') or []):\n        variable = group.pop('variable', None)\n        path = group.pop('path', None)\n        data = self.path_data(group, path)\n        if (group.get('xid') is None):\n            group['xid'] = self.stage_tc_batch_xid(group.get('type'), group.get('name'), owner)\n        group['ownerName'] = owner\n        batch.add_group(group)\n        if ((variable is not None) and (data is not None)):\n            self.stage_redis(variable, self.stage_tc_group_entity(data))\n    for indicator in (staging_data.get('indicator') or []):\n        variable = indicator.pop('variable', None)\n        path = indicator.pop('path', None)\n        if (indicator.get('xid') is None):\n            indicator['xid'] = self.stage_tc_batch_xid(indicator.get('type'), indicator.get('summary'), owner)\n        indicator['ownerName'] = owner\n        batch.add_indicator(indicator)\n        data = self.path_data(dict(indicator), path)\n        if ((variable is not None) and (data is not None)):\n            self.stage_redis(variable, self.stage_tc_indicator_entity(data))\n    batch_results = batch.submit()\n    self.log.debug('[stage] Batch Results: {}'.format(batch_results))\n    for error in (batch_results.get('errors') or []):\n        self.log.error('[stage] {}'.format(error))", "docstring": "Stage data in ThreatConnect Platform using batch API.\n\nArgs:\nowner (str): The ThreatConnect owner to submit batch job.\nstaging_data (dict): A dict of ThreatConnect batch data.", "source": "codesearchnet"}
{"code": "def _close_open_file(self, file_des):\n    self.open_files[file_des] = None\n    heapq.heappush(self._free_fd_heap, file_des)", "docstring": "Remove file object with given descriptor from the list\nof open files.\n\nSets the entry in open_files to None.\n\nArgs:\nfile_des: Descriptor of file object to be removed from\nopen files list.", "source": "codesearchnet"}
{"code": "def parsed_top_level_errors(parsed, errors, component_type: str = \"\") -> Errors:\n    \n\n    \n    fn_cnt = 0\n    rel_cnt = 0\n    nested_cnt = 0\n    for key in parsed:\n        if parsed[key][\"type\"] == \"Function\":\n            fn_cnt += 1\n        if parsed[key][\"type\"] == \"Relation\":\n            rel_cnt += 1\n        if parsed[key][\"type\"] == \"Nested\":\n            nested_cnt += 1\n\n    if not component_type:\n        if nested_cnt > 1:\n            errors.append(\n                (\n                    \"Error\",\n                    \"Too many nested objects - can only have one per BEL Assertion\",\n                )\n            )\n\n        if nested_cnt:\n            if rel_cnt > 2:\n                errors.append(\n                    (\n                        \"Error\",\n                        \"Too many relations - can only have two in a nested BEL Assertion\",\n                    )\n                )\n            elif fn_cnt > 4:\n                errors.append((\"Error\", \"Too many BEL subject and object candidates\"))\n\n        else:\n            if rel_cnt > 1:\n                errors.append(\n                    (\n                        \"Error\",\n                        \"Too many relations - can only have one in a BEL Assertion\",\n                    )\n                )\n            elif fn_cnt > 2:\n                errors.append((\"Error\", \"Too many BEL subject and object candidates\"))\n\n    elif component_type == \"subject\":\n        if rel_cnt > 0:\n            errors.append(\n                (\"Error\", \"Too many relations - cannot have any in a BEL Subject\")\n            )\n        elif fn_cnt > 1:\n            errors.append(\n                (\"Error\", \"Too many BEL subject candidates - can only have one\")\n            )\n\n    elif component_type == \"object\":\n        if nested_cnt:\n            if rel_cnt > 1:\n                errors.append(\n                    (\n                        \"Error\",\n                        \"Too many relations - can only have one in a nested BEL object\",\n                    )\n                )\n            elif fn_cnt > 2:\n                errors.append(\n                    (\n                        \"Error\",\n                        \"Too many BEL subject and object candidates in a nested BEL object\",\n                    )\n                )\n        else:\n            if rel_cnt > 0:\n                errors.append(\n                    (\"Error\", \"Too many relations - cannot have any in a BEL Subject\")\n                )\n            elif fn_cnt > 1:\n                errors.append(\n                    (\"Error\", \"Too many BEL subject candidates - can only have one\")\n                )\n\n    return errors", "docstring": "Check full parse for errors\n\nArgs:\nparsed:\nerrors:\ncomponent_type: Empty string or 'subject' or 'object' to indicate that we\nare parsing the subject or object field input", "source": "juraj-google-style"}
{"code": "def _generate_visualization(template_file: str, loader: jinja2.BaseLoader, **kwargs) -> str:\n    env = jinja2.Environment(loader=loader)\n    template = env.get_template(template_file)\n    return template.render(cytoscape_url=_CYTOSCAPE_URL, dagre_url=_DAGRE_URL, cytoscape_dagre_url=_CYTOSCAPE_DAGRE_URL, **kwargs)", "docstring": "Generate the visualization webpage.\n\nArgs:\ntemplate_file: str. A jinja2 template filename.\nloader: jinja2.BaseLoader. The loader needs to be able to load files in this\nfile's directory.\n**kwargs: Additional args passed on to the template.\n\nReturns:\nstr. The rendered visualization page.", "source": "github-repos"}
{"code": "def AddEventAttribute(self, attribute_name, attribute_value):\n    if (attribute_name in self._extra_event_attributes):\n        raise KeyError('Event attribute {0:s} already set'.format(attribute_name))\n    self._extra_event_attributes[attribute_name] = attribute_value", "docstring": "Adds an attribute that will be set on all events produced.\n\nSetting attributes using this method will cause events produced via this\nmediator to have an attribute with the provided name set with the\nprovided value.\n\nArgs:\nattribute_name (str): name of the attribute to add.\nattribute_value (str): value of the attribute to add.\n\nRaises:\nKeyError: if the event attribute is already set.", "source": "codesearchnet"}
{"code": "def closest_point_to(self, point, thr=20.0):\n    i = 0\n    point_arr = point.gen2arr()\n\n    def closest_in_line(pointA, pointB):\n        temp = closest_point(pointA.gen2arr(), pointB.gen2arr(), point_arr)\n        return Point(temp[1], temp[0], None)\n    for (p_a, p_b) in pairwise(self.points):\n        candidate = closest_in_line(p_a, p_b)\n        if (candidate.distance(point) <= thr):\n            if (p_a.distance(point) <= thr):\n                return (i, p_a)\n            elif (p_b.distance(point) <= thr):\n                return ((i + 1), p_b)\n            else:\n                return (i, candidate)\n        i = (i + 1)\n    return ((- 1), None)", "docstring": "Finds the closest point in the segment to a given point\n\nArgs:\npoint (:obj:`Point`)\nthr (float, optional): Distance threshold, in meters, to be considered\nthe same point. Defaults to 20.0\nReturns:\n(int, Point): Index of the point. -1 if doesn't exist. A point is given if it's along the segment", "source": "codesearchnet"}
{"code": "def __init__(self, *args, **kwargs):\n        \n        super(MemoryStream, self).__init__(*args, **kwargs)", "docstring": "Create an instance.\n\nArgs:\n*args:\n**kwargs:", "source": "juraj-google-style"}
{"code": "def wait_for_contract(self, contract_address_hex, timeout=None):\n    contract_address = decode_hex(contract_address_hex)\n    start_time = time.time()\n    result = self._raiden.chain.client.web3.eth.getCode(to_checksum_address(contract_address))\n    current_time = time.time()\n    while (not result):\n        if (timeout and ((start_time + timeout) > current_time)):\n            return False\n        result = self._raiden.chain.client.web3.eth.getCode(to_checksum_address(contract_address))\n        gevent.sleep(0.5)\n        current_time = time.time()\n    return (len(result) > 0)", "docstring": "Wait until a contract is mined\n\nArgs:\ncontract_address_hex (string): hex encoded address of the contract\ntimeout (int): time to wait for the contract to get mined\n\nReturns:\nTrue if the contract got mined, false otherwise", "source": "codesearchnet"}
{"code": "def build_institute(internal_id, display_name, sanger_recipients=None,\n                    coverage_cutoff=None, frequency_cutoff=None):\n    \n\n    LOG.info(\"Building institute %s with display name %s\", internal_id,display_name)\n\n    institute_obj = Institute(\n        internal_id=internal_id, \n        display_name=display_name, \n        sanger_recipients=sanger_recipients,\n        coverage_cutoff = coverage_cutoff,\n        frequency_cutoff = frequency_cutoff\n    )\n    \n    for key in list(institute_obj):\n        if institute_obj[key] is None:\n            institute_obj.pop(key)\n\n    return institute_obj", "docstring": "Build a institute object\n\nArgs:\ninternal_id(str)\ndisplay_name(str)\nsanger_recipients(list(str)): List with email addresses\n\nReturns:\ninstitute_obj(scout.models.Institute)", "source": "juraj-google-style"}
{"code": "def to_string(\n        self,\n        fmt: str = \"medium\",\n        canonicalize: bool = False,\n        decanonicalize: bool = False,\n        orthologize: str = None,\n    ) -> str:\n        \n\n        arg_string = \", \".join([a.to_string(fmt=fmt) for a in self.args])\n\n        if fmt in [\"short\", \"medium\"]:\n            function_name = self.name_short\n        else:\n            function_name = self.name\n\n        return \"{}({})\".format(function_name, arg_string)", "docstring": "Convert AST object to string\n\nArgs:\nfmt (str): short, medium, long formatted BEL statements\nshort = short function and short relation format\nmedium = short function and long relation format\nlong = long function and long relation format\n\nReturns:\nstr: string version of BEL AST", "source": "juraj-google-style"}
{"code": "def stop(self, accountID, **kwargs):\n    return self.create(accountID, order=StopOrderRequest(**kwargs))", "docstring": "Shortcut to create a Stop Order in an Account\n\nArgs:\naccountID : The ID of the Account\nkwargs : The arguments to create a StopOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "codesearchnet"}
{"code": "def _ExtractPath(response, pathspec_attribute=None):\n    path_specification = response\n    if (pathspec_attribute is not None):\n        if response.HasField(pathspec_attribute):\n            path_specification = response.Get(pathspec_attribute)\n    if path_specification.HasField('pathspec'):\n        path_specification = path_specification.pathspec\n    if path_specification.HasField('path'):\n        path_specification = path_specification.path\n    if isinstance(path_specification, Text):\n        return path_specification\n    return None", "docstring": "Returns the path from a client action response as a string.\n\nArgs:\nresponse: A client action response.\npathspec_attribute: Specifies the field which stores the pathspec.\n\nReturns:\nThe path as a string or None if no path is found.", "source": "codesearchnet"}
{"code": "def register_trainable(name, trainable):\n    \n\n    from ray.tune.trainable import Trainable\n    from ray.tune.function_runner import wrap_function\n\n    if isinstance(trainable, type):\n        logger.debug(\"Detected class for trainable.\")\n    elif isinstance(trainable, FunctionType):\n        logger.debug(\"Detected function for trainable.\")\n        trainable = wrap_function(trainable)\n    elif callable(trainable):\n        logger.warning(\n            \"Detected unknown callable for trainable. Converting to class.\")\n        trainable = wrap_function(trainable)\n\n    if not issubclass(trainable, Trainable):\n        raise TypeError(\"Second argument must be convertable to Trainable\",\n                        trainable)\n    _global_registry.register(TRAINABLE_CLASS, name, trainable)", "docstring": "Register a trainable function or class.\n\nArgs:\nname (str): Name to register.\ntrainable (obj): Function or tune.Trainable class. Functions must\ntake (config, status_reporter) as arguments and will be\nautomatically converted into a class during registration.", "source": "juraj-google-style"}
{"code": "def start(self, extra_args=\"\", tag=\"\"):\n        \n        if self.started:\n            return\n        utils.create_dir(self.log_path)\n        if tag:\n            tag = tag + ','\n        out_file_name = \"IPerfServer,{},{}{}.log\".format(\n            self.port, tag, len(self.log_files))\n        full_out_path = os.path.join(self.log_path, out_file_name)\n        cmd = '%s %s > %s' % (self.iperf_str, extra_args, full_out_path)\n        self.iperf_process = utils.start_standing_subprocess(cmd, shell=True)\n        self.log_files.append(full_out_path)\n        self.started = True", "docstring": "Starts iperf server on specified port.\n\nArgs:\nextra_args: A string representing extra arguments to start iperf\nserver with.\ntag: Appended to log file name to identify logs from different\niperf runs.", "source": "juraj-google-style"}
{"code": "def _super_stack(inputs,\n                 attention_bias,\n                 hparams,\n                 mp,\n                 padding=\"LEFT\"):\n  \n  layers = hparams.layers.strip(\",\").split(\",\")\n  moe_hidden_sizes = [int(s) for s in hparams.moe_hidden_sizes.split(\",\")]\n  if hparams.diet_experts:\n    hsize, = moe_hidden_sizes\n    def _diet_expert(x):\n      return diet.diet_expert(x, hsize, diet.diet_adam_optimizer_params())\n    expert_fn = _diet_expert\n  else:\n    expert_fn = expert_utils.ffn_expert_fn(\n        hparams.hidden_size, moe_hidden_sizes, hparams.hidden_size)\n  \n  \n  attention_bias_3d = mp(tf.squeeze, attention_bias, 1)\n  mix_size = int(hparams.mix_fraction * hparams.hidden_size)\n  accumulator = inputs\n  x = inputs\n  extra_losses = []\n  for layer_num, layer_type in enumerate(layers):\n    with tf.variable_scope(\"%s_%d\" % (layer_type, layer_num)):\n      tf.logging.info(\"%s_%d\" % (layer_type, layer_num))\n      if layer_type == \"a\":\n        \n        accumulator = mp(tf.add, x, accumulator)\n        x = accumulator\n      elif layer_type == \"n\":\n        \n        x = mp(common_layers.apply_norm,\n               x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon)\n      elif layer_type == \"d\":\n        \n        x = mp(tf.nn.dropout, x, 1.0 - hparams.layer_prepostprocess_dropout)\n      elif layer_type == \"m\":\n        \n        def _split(t):\n          return tuple(tf.split(\n              t, [mix_size, hparams.hidden_size - mix_size], 2))\n        to_mix, to_keep = mp(_split, x)\n        mixed = expert_utils.all_reduce_ring(to_mix, mp)\n        mixed = mp(tf.multiply, mixed, mp.n ** -0.5)\n        x = mp(lambda a, b: tf.concat([a, b], 2), mixed, to_keep)\n      elif layer_type == \"att\":\n        \n        q = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False,\n               name=\"q_transform\")\n        x = mp(\n            common_attention.scaled_dot_product_attention_simple,\n            q, x, x, attention_bias_3d)\n        x = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False,\n               name=\"o_transform\")\n      elif layer_type == \"multihead-att\":\n        \n        x = mp(\n            common_attention.multihead_attention,\n            x,\n            None,\n            attention_bias,  \n            hparams.multihead_attention_key_channels or hparams.hidden_size,\n            hparams.multihead_attention_value_channels or hparams.hidden_size,\n            hparams.hidden_size,\n            hparams.multihead_attention_num_heads,\n            hparams.attention_dropout)\n      elif layer_type == \"ffn\":\n        x = mp(\n            common_layers.dense_relu_dense, x,\n            hparams.filter_size, hparams.hidden_size)\n      elif layer_type == \"conv\":\n        \n        x = mp(\n            common_layers.conv1d,\n            x,\n            hparams.hidden_size,\n            hparams.kernel_height,\n            activation=tf.nn.relu,\n            padding=padding,\n        )\n      elif layer_type == \"moe\":\n        \n        x, loss = mp(\n            expert_utils.local_moe,\n            x,\n            train=hparams.mode == tf.estimator.ModeKeys.TRAIN,\n            expert_fn=expert_fn,\n            num_experts=hparams.moe_num_experts,\n            k=hparams.moe_k,\n            loss_coef=hparams.moe_loss_coef)\n        extra_losses.extend(loss)\n      else:\n        assert False, \"unknown sublayer %s\" % layer_type\n  if extra_losses:\n    extra_loss = tf.add_n(extra_losses)\n  else:\n    extra_loss = None\n  return x, extra_loss", "docstring": "A stack of super_lm layers.\n\nArgs:\ninputs: a list of Tensors\nattention_bias: list of bias Tensor for self-attention\n(see common_attention.attention_bias())\nhparams: hyperparameters for model\nmp: a Parallelism object\npadding: a string\n\nReturns:\ny: a list of Tensors\nextra_loss: an optional scalar", "source": "juraj-google-style"}
{"code": "def vel_in_A_to_vel_in_B(vel_A, ang_vel_A, pose_A_in_B):\n    \n    pos_A_in_B = pose_A_in_B[:3, 3]\n    rot_A_in_B = pose_A_in_B[:3, :3]\n    skew_symm = _skew_symmetric_translation(pos_A_in_B)\n    vel_B = rot_A_in_B.dot(vel_A) + skew_symm.dot(rot_A_in_B.dot(ang_vel_A))\n    ang_vel_B = rot_A_in_B.dot(ang_vel_A)\n    return vel_B, ang_vel_B", "docstring": "Converts linear and angular velocity of a point in frame A to the equivalent in frame B.\n\nArgs:\nvel_A: 3-dim iterable for linear velocity in A\nang_vel_A: 3-dim iterable for angular velocity in A\npose_A_in_B: numpy array of shape (4,4) corresponding to the pose of A in frame B\n\nReturns:\nvel_B, ang_vel_B: two numpy arrays of shape (3,) for the velocities in B", "source": "juraj-google-style"}
{"code": "def verify_gmt_integrity(gmt):\n    \n\n    \n    set_ids = [d[SET_IDENTIFIER_FIELD] for d in gmt]\n    assert len(set(set_ids)) == len(set_ids), (\n        \"Set identifiers should be unique. set_ids: {}\".format(set_ids))", "docstring": "Make sure that set ids are unique.\n\nArgs:\ngmt (GMT object): list of dicts\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def GetConfig(self, request, global_params=None):\n    config = self.GetMethodConfig('GetConfig')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Get encoded debug configuration for component. Not cacheable.\n\nArgs:\nrequest: (DataflowProjectsJobsDebugGetConfigRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(GetDebugConfigResponse) The response message.", "source": "github-repos"}
{"code": "def setData(self, column, role, value):\n        \n        assert isinstance(column, int)\n        assert isinstance(role, int)\n\n        \n        \n        if isinstance(value, (QtWidgets.QComboBox, QtWidgets.QCheckBox)):\n            self.treeWidget().setCurrentItem(self)\n\n        \n        if role == 2 and column == 1:\n\n            if isinstance(value, str):\n                value = self.cast_type(value) \n\n            if isinstance(value, QtCore.QVariant):\n                value = self.cast_type(value.toString())  \n\n            if isinstance(value, QtWidgets.QComboBox):\n                value = self.cast_type(value.currentText())\n\n            if isinstance(value, QtWidgets.QCheckBox):\n                value = bool(int(value.checkState()))  \n\n            \n            self.value = value\n\n        elif column == 0:\n            \n            value = self.name\n\n        if value is None:\n            value = self.value\n\n        \n        \n        if not isinstance(value, bool):\n            super(B26QTreeItem, self).setData(column, role, value)\n\n        else:\n            self.emitDataChanged()", "docstring": "if value is valid sets the data to value\nArgs:\ncolumn: column of item\nrole: role of item (see Qt doc)\nvalue: value to be set", "source": "juraj-google-style"}
{"code": "def isValidUnit(self, w):\n    bad = set(['point', 'a'])\n    if (w in bad):\n        return False\n    try:\n        pq.Quantity(0.0, w)\n        return True\n    except:\n        return (w == '/')", "docstring": "Checks if a string represents a valid quantities unit.\n\nArgs:\nw (str): A string to be tested against the set of valid\nquantities units.\n\nReturns:\nTrue if the string can be used as a unit in the quantities\nmodule.", "source": "codesearchnet"}
{"code": "def plot_stacked_hist(self, key='wall_time', nmax=5, ax=None, **kwargs):\n    (ax, fig, plt) = get_ax_fig_plt(ax=ax)\n    mpi_rank = '0'\n    timers = self.timers(mpi_rank=mpi_rank)\n    n = len(timers)\n    (names, values) = ([], [])\n    rest = np.zeros(n)\n    for (idx, sname) in enumerate(self.section_names(ordkey=key)):\n        sections = self.get_sections(sname)\n        svals = np.asarray([s.__dict__[key] for s in sections])\n        if (idx < nmax):\n            names.append(sname)\n            values.append(svals)\n        else:\n            rest += svals\n    names.append(('others (nmax=%d)' % nmax))\n    values.append(rest)\n    ind = np.arange(n)\n    width = 0.35\n    colors = (nmax * ['r', 'g', 'b', 'c', 'k', 'y', 'm'])\n    bars = []\n    bottom = np.zeros(n)\n    for (idx, vals) in enumerate(values):\n        color = colors[idx]\n        bar = ax.bar(ind, vals, width, color=color, bottom=bottom)\n        bars.append(bar)\n        bottom += vals\n    ax.set_ylabel(key)\n    ax.set_title(('Stacked histogram with the %d most important sections' % nmax))\n    ticks = (ind + (width / 2.0))\n    labels = [('MPI=%d, OMP=%d' % (t.mpi_nprocs, t.omp_nthreads)) for t in timers]\n    ax.set_xticks(ticks)\n    ax.set_xticklabels(labels, rotation=15)\n    ax.legend([bar[0] for bar in bars], names, loc='best')\n    return fig", "docstring": "Plot stacked histogram of the different timers.\n\nArgs:\nkey: Keyword used to extract data from the timers. Only the first `nmax`\nsections with largest value are show.\nmmax: Maximum nuber of sections to show. Other entries are grouped together\nin the `others` section.\nax: matplotlib :class:`Axes` or None if a new figure should be created.\n\nReturns:\n`matplotlib` figure", "source": "codesearchnet"}
{"code": "def _set_input(el, value):\n        \n        if isinstance(value, dict):\n            el.value = value[\"val\"]\n        elif type(value) in [list, tuple]:\n            el.value = \", \".join(item[\"val\"] for item in value)\n        else:\n            el.value = value", "docstring": "Set content of given `el` to `value`.\n\nArgs:\nel (obj): El reference to input you wish to set.\nvalue (obj/list): Value to which the `el` will be set.", "source": "juraj-google-style"}
{"code": "def cylindrical_vert(script, radius=1.0, inside=True):\n    \n    if inside:\n        function = 'sqrt(x^2+y^2)<={}'.format(radius)\n    else:\n        function = 'sqrt(x^2+y^2)>={}'.format(radius)\n    vert_function(script, function=function)\n    return None", "docstring": "Select all vertices within a cylindrical radius\n\nArgs:\nradius (float): radius of the sphere\ncenter_pt (3 coordinate tuple or list): center point of the sphere\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "juraj-google-style"}
{"code": "def update(self, forecasts, observations):\n    for (t, threshold) in enumerate(self.thresholds):\n        tp = np.count_nonzero(((forecasts >= threshold) & (observations >= self.obs_threshold)))\n        fp = np.count_nonzero(((forecasts >= threshold) & (observations < self.obs_threshold)))\n        fn = np.count_nonzero(((forecasts < threshold) & (observations >= self.obs_threshold)))\n        tn = np.count_nonzero(((forecasts < threshold) & (observations < self.obs_threshold)))\n        self.contingency_tables.iloc[t] += [tp, fp, fn, tn]", "docstring": "Update the ROC curve with a set of forecasts and observations\n\nArgs:\nforecasts: 1D array of forecast values\nobservations: 1D array of observation values.", "source": "codesearchnet"}
{"code": "def recv(self, request_id):\n    log.debug(('Reading response %d from Kafka' % request_id))\n    if (not self._sock):\n        self.reinit()\n    resp = self._read_bytes(4)\n    (size,) = struct.unpack('>i', resp)\n    resp = self._read_bytes(size)\n    return resp", "docstring": "Get a response packet from Kafka\n\nArguments:\nrequest_id: can be any int (only used for debug logging...)\n\nReturns:\nstr: Encoded kafka packet response from server", "source": "codesearchnet"}
{"code": "def is_function_or_method(obj):\n    \n    return inspect.isfunction(obj) or inspect.ismethod(obj) or is_cython(obj)", "docstring": "Check if an object is a function or method.\n\nArgs:\nobj: The Python object in question.\n\nReturns:\nTrue if the object is an function or method.", "source": "juraj-google-style"}
{"code": "def validate(self):\n    if self.value:\n        if (not isinstance(self.value, bool)):\n            raise TypeError('expected: {0}, observed: {1}'.format(bool, type(self.value)))", "docstring": "Verify that the value of the Boolean object is valid.\n\nRaises:\nTypeError: if the value is not of type bool.", "source": "codesearchnet"}
{"code": "def paint(self):\n    snippet = {'heatmap-radius': VectorStyle.get_style_value(self.radius), 'heatmap-opacity': VectorStyle.get_style_value(self.opacity), 'heatmap-color': VectorStyle.get_style_value(self.color), 'heatmap-intensity': VectorStyle.get_style_value(self.intensity), 'heatmap-weight': VectorStyle.get_style_value(self.weight)}\n    return snippet", "docstring": "Renders a javascript snippet suitable for use as a mapbox-gl heatmap paint entry\n\nReturns:\nA dict that can be converted to a mapbox-gl javascript paint snippet", "source": "codesearchnet"}
{"code": "def image_size_to_num_patches(image_size, grid_pinpoints, patch_size: int):\n    if not isinstance(grid_pinpoints, list):\n        raise TypeError('grid_pinpoints should be a list of tuples or lists')\n    if not isinstance(image_size, (list, tuple)):\n        if not isinstance(image_size, (torch.Tensor, np.ndarray)):\n            raise TypeError(f'image_size invalid type {type(image_size)} with value {image_size}')\n        image_size = image_size.tolist()\n    best_resolution = select_best_resolution(image_size, grid_pinpoints)\n    height, width = best_resolution\n    num_patches = 0\n    for i in range(0, height, patch_size):\n        for j in range(0, width, patch_size):\n            num_patches += 1\n    num_patches += 1\n    return num_patches", "docstring": "Calculate the number of patches after the preprocessing for images of any resolution.\n\nArgs:\nimage_size (`torch.LongTensor` or `np.ndarray` or `Tuple[int, int]`):\nThe size of the input image in the format (height, width). ?\ngrid_pinpoints (`List`):\nA list containing possible resolutions. Each item in the list should be a tuple or list\nof the form `(height, width)`.\npatch_size (`int`):\nThe size of each image patch.\n\nReturns:\nint: the number of patches", "source": "github-repos"}
{"code": "def assert_count_equal(first, second, msg=None, extras=None):\n    _call_unittest_assertion(_pyunit_proxy.assertCountEqual, first, second, msg=msg, extras=extras)", "docstring": "Asserts that two iterables have the same elements, the same number of\ntimes, without regard to order.\n\nSimilar to assert_equal(Counter(list(first)), Counter(list(second))).\n\nArgs:\nfirst: The first iterable to compare.\nsecond: The second iterable to compare.\nmsg: A string that adds additional info about the failure.\nextras: An optional field for extra information to be included in\ntest result.\n\nExample:\nassert_count_equal([0, 1, 1], [1, 0, 1]) passes the assertion.\nassert_count_equal([0, 0, 1], [0, 1]) raises an assertion error.", "source": "github-repos"}
{"code": "def decode_schedule(string):\n    splits = string.split()\n    steps = [int(x[1:]) for x in splits[1:] if (x[0] == '@')]\n    pmfs = np.reshape([float(x) for x in splits[1:] if (x[0] != '@')], [len(steps), (- 1)])\n    return (splits[0], tuplize(steps), tuplize(pmfs))", "docstring": "Decodes a string into a schedule tuple.\n\nArgs:\nstring: The string encoding of a schedule tuple.\n\nReturns:\nA schedule tuple, see encode_schedule for details.", "source": "codesearchnet"}
{"code": "def OverwriteAndClose(self, compressed_data, size):\n    self.Set(self.Schema.CONTENT(compressed_data))\n    self.Set(self.Schema.SIZE(size))\n    super(AFF4MemoryStreamBase, self).Close()", "docstring": "Directly overwrite the current contents.\n\nReplaces the data currently in the stream with compressed_data,\nand closes the object. Makes it possible to avoid recompressing\nthe data.\nArgs:\ncompressed_data: The data to write, must be zlib compressed.\nsize: The uncompressed size of the data.", "source": "codesearchnet"}
{"code": "def _wait_for_response(self, requests):\n    failed_requests = []\n    responses_for_requests = OrderedDict.fromkeys(requests)\n    for retry in range(self._max_retry):\n        try:\n            logging.debug('Try \n            self._availability_limiter.map_with_retries(requests, responses_for_requests)\n            failed_requests = []\n            for (request, response) in responses_for_requests.items():\n                if (self._drop_404s and (response is not None) and (response.status_code == 404)):\n                    logging.warning('Request to {0} failed with status code 404, dropping.'.format(request.url))\n                elif (not response):\n                    failed_requests.append((request, response))\n            if (not failed_requests):\n                break\n            logging.warning('Try \n            requests = [fr[0] for fr in failed_requests]\n        except InvalidRequestError:\n            raise\n        except Exception as e:\n            logging.exception('Try \n            pass\n    if failed_requests:\n        logging.warning('Still {0} failed request(s) after {1} retries:'.format(len(failed_requests), self._max_retry))\n        for (failed_request, failed_response) in failed_requests:\n            if (failed_response is not None):\n                failed_response_text = failed_response.text.encode('ascii', 'xmlcharrefreplace')\n                logging.warning('Request to {0} failed with status code {1}. Response text: {2}'.format(failed_request.url, failed_response.status_code, failed_response_text))\n            else:\n                logging.warning('Request to {0} failed with None response.'.format(failed_request.url))\n    return list(responses_for_requests.values())", "docstring": "Issues a batch of requests and waits for the responses.\nIf some of the requests fail it will retry the failed ones up to `_max_retry` times.\n\nArgs:\nrequests - A list of requests\nReturns:\nA list of `requests.models.Response` objects\nRaises:\nInvalidRequestError - if any of the requests returns \"403 Forbidden\" response", "source": "codesearchnet"}
{"code": "def trace_function(args=None, kwargs=None, tracing_options=None):\n    if not tracing_options:\n        tracing_options = TracingOptions()\n    args = args if args else ()\n    kwargs = kwargs if kwargs else {}\n    if tracing_options.input_signature and (args or kwargs):\n        bound_args = function_type_utils.bind_function_inputs(args, kwargs, tracing_options.polymorphic_type, tracing_options.default_values)\n        args, kwargs = (bound_args.args, bound_args.kwargs)\n    with tracing_options.lock or contextlib.nullcontext():\n        if tracing_options.input_signature and (not args) and (not kwargs):\n            args = tracing_options.input_signature\n            kwargs = {}\n        concrete_function = _maybe_define_function(args, kwargs, tracing_options)\n    if not tracing_options.bind_graph_to_function:\n        concrete_function._garbage_collector.release()\n    return concrete_function", "docstring": "Returns a `ConcreteFunction` specialized to inputs and execution context.\n\nCompiles a Graph corresponding to the Python function logic and uses that\nto generate a differentiable ConcreteFunction.\n\nArgs:\nargs: inputs to specialize on. Can be concrete values (e.g. 1) or\n`tf.Tensor` or `tf.TensorSpec`.\nkwargs: keyword inputs to specialize on. Concrete values (e.g. 1) or\n`tf.Tensor` or `tf.TensorSpec`.\ntracing_options: TracingOptions for the tracing process.", "source": "github-repos"}
{"code": "def format_datetime(self, time_input, tz=None, date_format=None):\n    dt_value = self.any_to_datetime(time_input, tz)\n    if (date_format == '%s'):\n        dt_value = calendar.timegm(dt_value.timetuple())\n    elif date_format:\n        dt_value = dt_value.strftime(date_format)\n    else:\n        dt_value = dt_value.isoformat()\n    return dt_value", "docstring": "Return timestamp from multiple input formats.\n\nFormats:\n\n#. Human Input (e.g 30 days ago, last friday)\n#. ISO 8601 (e.g. 2017-11-08T16:52:42Z)\n#. Loose Date format (e.g. 2017 12 25)\n#. Unix Time/Posix Time/Epoch Time (e.g. 1510686617 or 1510686617.298753)\n\n.. note:: To get a unix timestamp format use the strftime format **%s**. Python\ndoes not natively support **%s**, however this method has support.\n\nArgs:\ntime_input (string): The time input string (see formats above).\ntz (string): The time zone for the returned data.\ndate_format (string): The strftime format to use, ISO by default.\n\nReturns:\n(string): Formatted datetime string.", "source": "codesearchnet"}
{"code": "def GetUpdates(self, gcs_client, bucket_name, obj, since):\n    bucket = gcs_client.bucket(bucket_name)\n    blob = bucket.get_blob(obj)\n    if blob is None:\n        self.log.error('GCS object gs:\n        raise error.SourceUnavailable('unable to download object from GCS.')\n    if since and timestamps.FromDateTimeToTimestamp(blob.updated) < since:\n        return []\n    data_map = self.GetMap(cache_info=blob.open())\n    data_map.SetModifyTimestamp(timestamps.FromDateTimeToTimestamp(blob.updated))\n    return data_map", "docstring": "Gets updates from a source.\n\nArgs:\ngcs_client: initialized gcs client\nbucket_name: gcs bucket name\nobj: object with the data\nsince: a timestamp representing the last change (None to force-get)\n\nReturns:\nA tuple containing the map of updates and a maximum timestamp", "source": "github-repos"}
{"code": "def device_function(self, var):\n    \n    if var.type not in ('Variable', 'VariableV2', 'VarHandleOp'):\n      tf.logging.debug('Place {} on last device: {}.'.format(\n          var.name, self._last_device))\n      return self._last_device\n\n    shape = tf.TensorShape(var.get_attr('shape'))\n    assert shape.num_elements() is not None\n\n    size = var.get_attr('dtype').size\n    mem, device = heapq.heappop(self._mem_device_heap)\n    mem += shape.num_elements() * size\n    heapq.heappush(self._mem_device_heap, (mem, device))\n    tf.logging.debug('Place variable {} on {} and consumes {} Bytes.'.format(\n        var.name, device, mem))\n    self._last_device = device\n\n    return device", "docstring": "Choose a device for the input variable.\n\nArgs:\nvar: an Variable.\n\nReturns:\nThe device for placing the var.", "source": "juraj-google-style"}
{"code": "def _filter_headers(self):\n    headers = {}\n    for user in self.usernames:\n        headers['fedora_messaging_user_{}'.format(user)] = True\n    for package in self.packages:\n        headers['fedora_messaging_rpm_{}'.format(package)] = True\n    for container in self.containers:\n        headers['fedora_messaging_container_{}'.format(container)] = True\n    for module in self.modules:\n        headers['fedora_messaging_module_{}'.format(module)] = True\n    for flatpak in self.flatpaks:\n        headers['fedora_messaging_flatpak_{}'.format(flatpak)] = True\n    return headers", "docstring": "Add headers designed for filtering messages based on objects.\n\nReturns:\ndict: Filter-related headers to be combined with the existing headers", "source": "codesearchnet"}
{"code": "def parse_readable_size_str(size_str):\n    size_str = size_str.strip()\n    if size_str.endswith('B'):\n        size_str = size_str[:-1]\n    if size_str.isdigit():\n        return int(size_str)\n    elif size_str.endswith('k'):\n        return int(float(size_str[:-1]) * 1024)\n    elif size_str.endswith('M'):\n        return int(float(size_str[:-1]) * 1048576)\n    elif size_str.endswith('G'):\n        return int(float(size_str[:-1]) * 1073741824)\n    else:\n        raise ValueError('Failed to parsed human-readable byte size str: \"%s\"' % size_str)", "docstring": "Convert a human-readable str representation to number of bytes.\n\nOnly the units \"kB\", \"MB\", \"GB\" are supported. The \"B character at the end\nof the input `str` may be omitted.\n\nArgs:\nsize_str: (`str`) A human-readable str representing a number of bytes\n(e.g., \"0\", \"1023\", \"1.1kB\", \"24 MB\", \"23GB\", \"100 G\".\n\nReturns:\n(`int`) The parsed number of bytes.\n\nRaises:\nValueError: on failure to parse the input `size_str`.", "source": "github-repos"}
{"code": "def _add_countriesdata(cls, iso3, country):\n        \n        \n        countryname = country.get('\n        cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3\n        iso2 = country.get('\n        if iso2:\n            cls._countriesdata['iso2iso3'][iso2] = iso3\n            \n            cls._countriesdata['iso2iso3'][iso3] = iso2\n        m49 = country.get('\n        if m49:\n            m49 = int(m49)\n            cls._countriesdata['m49iso3'][m49] = iso3\n            \n            cls._countriesdata['m49iso3'][iso3] = m49\n        cls._countriesdata['aliases'][iso3] = re.compile(country.get('\n        regionname = country.get('\n        sub_regionname = country.get('\n        intermediate_regionname = country.get('\n        regionid = country.get('\n        if regionid:\n            regionid = int(regionid)\n        sub_regionid = country.get('\n        if sub_regionid:\n            sub_regionid = int(sub_regionid)\n        intermediate_regionid = country.get('\n        if intermediate_regionid:\n            intermediate_regionid = int(intermediate_regionid)\n\n        \n        def add_country_to_set(colname, idval, iso3):\n            value = cls._countriesdata[colname].get(idval)\n            if value is None:\n                value = set()\n                cls._countriesdata['regioncodes2countries'][idval] = value\n            value.add(iso3)\n\n        if regionname:\n            add_country_to_set('regioncodes2countries', regionid, iso3)\n            cls._countriesdata['regioncodes2names'][regionid] = regionname\n            cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid\n        if sub_regionname:\n            add_country_to_set('regioncodes2countries', sub_regionid, iso3)\n            cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname\n            cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid\n        if intermediate_regionname:\n            add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)\n            cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname\n            cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \\\n                intermediate_regionid", "docstring": "Set up countries data from data in form provided by UNStats and World Bank\n\nArgs:\niso3 (str): ISO3 code for country\ncountry (hxl.Row): Country information\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def create_model(self, vpc_config_override=VPC_CONFIG_DEFAULT):\n        \n        return KNNModel(self.model_data, self.role, sagemaker_session=self.sagemaker_session,\n                        vpc_config=self.get_vpc_config(vpc_config_override))", "docstring": "Return a :class:`~sagemaker.amazon.KNNModel` referencing the latest\ns3 model data produced by this Estimator.\n\nArgs:\nvpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the model.\nDefault: use subnets and security groups from this Estimator.\n* 'Subnets' (list[str]): List of subnet ids.\n* 'SecurityGroupIds' (list[str]): List of security group ids.", "source": "juraj-google-style"}
{"code": "def next_counter(start=0, step=1):\n    r\n    count_gen = it.count(start, step)\n    next_ = functools.partial(six.next, count_gen)\n    return next_", "docstring": "r\"\"\"\nArgs:\nstart (int): (default = 0)\nstep (int): (default = 1)\n\nReturns:\nfunc: next_\n\nCommandLine:\npython -m utool.util_iter --test-next_counter\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_iter import *  # NOQA\n>>> start = 1\n>>> step = 1\n>>> next_ = next_counter(start, step)\n>>> result = str([next_(), next_(), next_()])\n>>> print(result)\n[1, 2, 3]", "source": "juraj-google-style"}
{"code": "def apply_and_name(self, aggregator):\n        \n        reduced_df = self._apply(aggregator)\n        if len(self.names) != len(reduced_df.columns):\n            raise IndexError(\"ColumnFunction creates more columns than it has names for.\")\n        reduced_df.columns = self.names\n        return reduced_df", "docstring": "Fetches the row-aggregated input columns for this ColumnFunction.\n\nArgs:\naggregator (Aggregator)\n\nReturns:\npd.DataFrame: The dataframe has columns with names self.names\nthat were created by this ColumnFunction,\nand is indexed by the index that was passed to\naggregator.aggregate(index).", "source": "juraj-google-style"}
{"code": "def __init__(self, structuring_element=None):\n        \n        if structuring_element is None:\n            self.strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\n        else:\n            self.strel = structuring_element\n        self.fgbg = cv2.bgsegm.createBackgroundSubtractorGMG()", "docstring": "Initializes the `BackgroundSubtractorGMG`.\n\n*Note:* Requires OpenCV to be built with `--contrib` as it uses the\n`bgsegm` package.\n\nUnless a custom `structuring_element` is specified, it uses:\n`cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))`\n\nArgs:\nstructuring_element: The structuring element.", "source": "juraj-google-style"}
{"code": "def _get_tensor_by_tf_output(self, tf_output) -> tensor_lib.Tensor:\n    op = self._get_operation_by_tf_operation(tf_output.oper)\n    return op.outputs[tf_output.index]", "docstring": "Returns the `Tensor` representing `tf_output`.\n\nNote that there is only one such `Tensor`, i.e. multiple calls to this\nfunction with the same TF_Output value will always return the same `Tensor`\nobject.\n\nArgs:\ntf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`).\n\nReturns:\nThe `Tensor` that represents `tf_output`.", "source": "github-repos"}
{"code": "def get_dG_at_T(seq, temp):\n    r_cal = (scipy.constants.R / scipy.constants.calorie)\n    seq = ssbio.protein.sequence.utils.cast_to_str(seq)\n    oobatake = {}\n    for t in range(20, 51):\n        oobatake[t] = calculate_oobatake_dG(seq, t)\n    stable = [i for i in oobatake.values() if (i > 0)]\n    if (len(stable) == 0):\n        dG = (0.238846 * calculate_dill_dG(len(seq), temp))\n        method = 'Dill'\n    else:\n        dG = oobatake[temp]\n        method = 'Oobatake'\n    keq = math.exp((((- 1) * dG) / (r_cal * (temp + 273.15))))\n    return (dG, keq, method)", "docstring": "Predict dG at temperature T, using best predictions from Dill or Oobatake methods.\n\nArgs:\nseq (str, Seq, SeqRecord): Amino acid sequence\ntemp (float): Temperature in degrees C\n\nReturns:\n(tuple): tuple containing:\n\ndG (float) Free energy of unfolding dG (cal/mol)\nkeq (float): Equilibrium constant Keq\nmethod (str): Method used to calculate", "source": "codesearchnet"}
{"code": "def package_info(package, image=None):\n    cmd = ['DISM', '/English', ('/Image:{0}'.format(image) if image else '/Online'), '/Get-PackageInfo']\n    if ('~' in package):\n        cmd.append('/PackageName:{0}'.format(package))\n    else:\n        cmd.append('/PackagePath:{0}'.format(package))\n    out = __salt__['cmd.run_all'](cmd)\n    if (out['retcode'] == 0):\n        ret = dict()\n        for line in six.text_type(out['stdout']).splitlines():\n            if (' : ' in line):\n                info = line.split(' : ')\n                if (len(info) < 2):\n                    continue\n                ret[info[0]] = info[1]\n    else:\n        ret = out\n    return ret", "docstring": "Display information about a package\n\nArgs:\npackage (str): The full path to the package. Can be either a .cab file\nor a folder. Should point to the original source of the package, not\nto where the file is installed. You cannot use this command to get\npackage information for .msu files\nimage (Optional[str]): The path to the root directory of an offline\nWindows image. If `None` is passed, the running operating system is\ntargeted. Default is None.\n\nReturns:\ndict: A dictionary containing the results of the command\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' dism. package_info C:\\\\packages\\\\package.cab", "source": "codesearchnet"}
{"code": "def get_num_bytes(self, batch: Sequence[Union[tf.Tensor, torch.Tensor]]) -> int:\n    if self._framework == 'tf':\n        return sum((sys.getsizeof(element) for element in batch))\n    else:\n        return sum((el.element_size() for tensor in batch for el in tensor.values()))", "docstring": "Returns:\nThe number of bytes of data for the Tensors batch.", "source": "github-repos"}
{"code": "class DepthAnythingNeck(nn.Module):\n\n    def __init__(self, config):\n        super().__init__()\n        self.config = config\n        self.reassemble_stage = DepthAnythingReassembleStage(config)\n        self.convs = nn.ModuleList()\n        for channel in config.neck_hidden_sizes:\n            self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False))\n        self.fusion_stage = DepthAnythingFeatureFusionStage(config)\n\n    def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]:\n        \n        if not isinstance(hidden_states, (tuple, list)):\n            raise TypeError('hidden_states should be a tuple or list of tensors')\n        if len(hidden_states) != len(self.config.neck_hidden_sizes):\n            raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.')\n        hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)\n        features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]\n        output = self.fusion_stage(features)\n        return output", "docstring": "DepthAnythingNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as\ninput and produces another list of tensors as output. For DepthAnything, it includes 2 stages:\n\n* DepthAnythingReassembleStage\n* DepthAnythingFeatureFusionStage.\n\nArgs:\nconfig (dict): config dict.", "source": "github-repos"}
{"code": "def generate_tree_path(fileDigest, depth):\n        \n        if(depth < 0):\n            raise Exception(\"depth level can not be negative\")\n        if(os.path.split(fileDigest)[1] != fileDigest):\n            raise Exception(\"fileDigest cannot contain path separator\")\n\n        \n        min = (2**(depth + 1)) - 1\n        if(len(fileDigest) < min):\n            raise Exception(\"fileDigest too short for the given depth\")\n\n        path = \"\"\n        index = 0\n        for p in range(1, depth + 1):\n            jump = 2**p\n            path = os.path.join(path, fileDigest[index:index + jump])\n            index += jump\n        path = os.path.join(path, fileDigest[index:])\n        return path", "docstring": "Generate a relative path from the given fileDigest\nrelative path has a numbers of directories levels according to @depth\n\nArgs:\nfileDigest -- digest for which the relative path will be generate\ndepth -- number of levels to use in relative path generation\nReturns:\nrelative path for the given digest", "source": "juraj-google-style"}
{"code": "def launch_job(job_name, cmd=None,\n               code_dir=None, excludes='*.ipynb .git .ipynb_checkpoints', dependencies=tuple(),\n               queue='john', image='codalab/python', memory='18g',\n               debug=False, tail=False):\n    \n    print 'Remember to set up SSH tunnel and LOG IN through the command line before calling this.'\n\n    def execute(cmd):\n        return shell(cmd, verbose=True, debug=debug)\n\n    if code_dir:\n        execute('cl up -n code -w {} {} -x {}'.format(worksheet, code_dir, excludes))\n\n    options = '-v -n {} -w {} --request-queue {} --request-docker-image {} --request-memory {}'.format(\n        job_name, worksheet, queue, image, memory)\n    dep_str = ' '.join(['{0}:{0}'.format(dep) for dep in dependencies])\n    cmd = \"cl run {} {} '{}'\".format(options, dep_str, cmd)\n    if tail:\n        cmd += ' -t'\n    execute(cmd)", "docstring": "Launch a job on CodaLab (optionally upload code that the job depends on).\n\nArgs:\njob_name: name of the job\ncmd: command to execute\ncode_dir: path to code folder. If None, no code is uploaded.\nexcludes: file types to exclude from the upload\ndependencies: list of other bundles that we depend on\ndebug: if True, prints SSH commands, but does not execute them\ntail: show the streaming output returned by CodaLab once it launches the job", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.ListTraces = channel.unary_unary(\n            \"/google.devtools.cloudtrace.v1.TraceService/ListTraces\",\n            request_serializer=google_dot_devtools_dot_cloudtrace__v1_dot_proto_dot_trace__pb2.ListTracesRequest.SerializeToString,\n            response_deserializer=google_dot_devtools_dot_cloudtrace__v1_dot_proto_dot_trace__pb2.ListTracesResponse.FromString,\n        )\n        self.GetTrace = channel.unary_unary(\n            \"/google.devtools.cloudtrace.v1.TraceService/GetTrace\",\n            request_serializer=google_dot_devtools_dot_cloudtrace__v1_dot_proto_dot_trace__pb2.GetTraceRequest.SerializeToString,\n            response_deserializer=google_dot_devtools_dot_cloudtrace__v1_dot_proto_dot_trace__pb2.Trace.FromString,\n        )\n        self.PatchTraces = channel.unary_unary(\n            \"/google.devtools.cloudtrace.v1.TraceService/PatchTraces\",\n            request_serializer=google_dot_devtools_dot_cloudtrace__v1_dot_proto_dot_trace__pb2.PatchTracesRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n                 asynchronous_correlation_value=None,\n                 cancellation_result=None):\n        \n        super(CancelResponsePayload, self).__init__(\n            enums.Tags.RESPONSE_PAYLOAD\n        )\n\n        self._asynchronous_correlation_value = None\n        self._cancellation_result = None\n\n        self.asynchronous_correlation_value = asynchronous_correlation_value\n        self.cancellation_result = cancellation_result", "docstring": "Construct a Cancel response payload struct.\n\nArgs:\nasynchronous_correlation_value (bytes): The ID of a pending\noperation that was cancelled, in bytes. Optional, defaults to\nNone.\ncancellation_result (enum): A CancellationResult enumeration\nspecifying the result of canceling the operation. Optional,\ndefaults to None.", "source": "juraj-google-style"}
{"code": "def should_close(http_version, connection_field):\n    connection_field = (connection_field or '').lower()\n    if (http_version == 'HTTP/1.0'):\n        return (connection_field.replace('-', '') != 'keepalive')\n    else:\n        return (connection_field == 'close')", "docstring": "Return whether the connection should be closed.\n\nArgs:\nhttp_version (str): The HTTP version string like ``HTTP/1.0``.\nconnection_field (str): The value for the ``Connection`` header.", "source": "codesearchnet"}
{"code": "def DummyMethod(name, *params):\n\n    def make_param(param):\n        return pytd.Parameter(param, type=pytd.AnythingType(), kind=pytd.ParameterKind.REGULAR, optional=False, mutated_type=None)\n    sig = pytd.Signature(tuple((make_param(param) for param in params)), starargs=None, starstarargs=None, return_type=pytd.AnythingType(), exceptions=(), template=())\n    return pytd.Function(name=name, signatures=(sig,), kind=pytd.MethodKind.METHOD, flags=pytd.MethodFlag.NONE)", "docstring": "Create a simple method using only \"Any\"s as types.\n\nArguments:\nname: The name of the method\n*params: The parameter names.\n\nReturns:\nA pytd.Function.", "source": "github-repos"}
{"code": "def save_pcoder(self, pcoder, *labels):\n    raise NotImplementedError", "docstring": "Saves pcoder for given PCollection.\n\nCorrect reading of PCollection from Cache requires PCoder to be known.\nThis method saves desired PCoder for PCollection that will subsequently\nbe used by sink(...), source(...), and, most importantly, read(...) method.\nThe latter must be able to read a PCollection written by Beam using\nnon-Beam IO.\n\nArgs:\npcoder: A PCoder to be used for reading and writing a PCollection.\n*labels: List of labels for PCollection instance.", "source": "github-repos"}
{"code": "def convert_avgpool(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting pooling ...')\n    if (names == 'short'):\n        tf_name = ('P' + random_string(7))\n    elif (names == 'keep'):\n        tf_name = w_name\n    else:\n        tf_name = (w_name + str(random.random()))\n    if ('kernel_shape' in params):\n        (height, width) = params['kernel_shape']\n    else:\n        (height, width) = params['kernel_size']\n    if ('strides' in params):\n        (stride_height, stride_width) = params['strides']\n    else:\n        (stride_height, stride_width) = params['stride']\n    if ('pads' in params):\n        (padding_h, padding_w, _, _) = params['pads']\n    else:\n        (padding_h, padding_w) = params['padding']\n    input_name = inputs[0]\n    pad = 'valid'\n    if (((height % 2) == 1) and ((width % 2) == 1) and ((height \n        pad = 'same'\n    else:\n        padding_name = (tf_name + '_pad')\n        padding_layer = keras.layers.ZeroPadding2D(padding=(padding_h, padding_w), name=padding_name)\n        layers[padding_name] = padding_layer(layers[inputs[0]])\n        input_name = padding_name\n    pooling = keras.layers.AveragePooling2D(pool_size=(height, width), strides=(stride_height, stride_width), padding=pad, name=tf_name, data_format='channels_first')\n    layers[scope_name] = pooling(layers[input_name])", "docstring": "Convert Average pooling.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def log(self, message):\n    self._buffer.append([datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.000%z'), message])\n    if not self.buffered or (self._flush_threshold and len(self._buffer) >= self._flush_threshold):\n        self.flush()", "docstring": "Logs a message to the Bulkdozer feed's Log tab.\n\nArgs:\nmessage: The message to log to the feed, it will be appended at the bottom\nof the log, after the last message that was written.", "source": "github-repos"}
{"code": "def GetAnalyzersInformation(cls):\n    analyzer_information = []\n    for (_, analyzer_class) in cls.GetAnalyzers():\n        description = getattr(analyzer_class, 'DESCRIPTION', '')\n        analyzer_information.append((analyzer_class.NAME, description))\n    return analyzer_information", "docstring": "Retrieves the analyzers information.\n\nReturns:\nlist[tuple]: containing:\n\nstr: analyzer name.\nstr: analyzer description.", "source": "codesearchnet"}
{"code": "def update_offset(self, new_offset):\n        \n        self.offset = new_offset\n        self.data_points = self._data_points[self.offset:]\n        self.timestamps = self._timestamps[self.offset:]", "docstring": "Updates how many data points to skip in caculations.\n\nAlways use this function to update offset instead of directly setting\nself.offset.\n\nArgs:\nnew_offset: The new offset.", "source": "juraj-google-style"}
{"code": "def make_path_writable(path):\n    from rez.config import config\n    try:\n        orig_mode = os.stat(path).st_mode\n        new_mode = orig_mode\n        if (config.make_package_temporarily_writable and (not os.access(path, os.W_OK))):\n            new_mode = (orig_mode | stat.S_IWUSR)\n        if (new_mode != orig_mode):\n            os.chmod(path, new_mode)\n    except OSError:\n        orig_mode = None\n        new_mode = None\n    try:\n        (yield)\n    finally:\n        if (new_mode != orig_mode):\n            os.chmod(path, orig_mode)", "docstring": "Temporarily make `path` writable, if possible.\n\nDoes nothing if:\n- config setting 'make_package_temporarily_writable' is False;\n- this can't be done (eg we don't own `path`).\n\nArgs:\npath (str): Path to make temporarily writable", "source": "codesearchnet"}
{"code": "def write_int64(self, value, little_endian=True):\n        \n        if little_endian:\n            endian = \"<\"\n        else:\n            endian = \">\"\n        return self.pack('%sq' % endian, value)", "docstring": "Pack the value as a signed integer and write 8 bytes to the stream.\n\nArgs:\nvalue:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint: the number of bytes written.", "source": "juraj-google-style"}
{"code": "def create_composite_loss(self,\n                            losses,\n                            regularize=True,\n                            include_marked=True,\n                            name='cost'):\n    \n    all_losses = []\n    if losses:\n      all_losses.extend(losses)\n    if include_marked:\n      all_losses.extend(self.marked_losses)\n    if not all_losses:\n      raise ValueError('No losses specified!')\n    if regularize:\n      all_losses.extend(self.regularization_losses)\n    with self._g.as_default():\n      result = tf.add_n(all_losses, name=name)\n      self.add_scalar_summary(result)\n      return result", "docstring": "Creates a loss that is the sum of all specified losses.\n\nArgs:\nlosses: A sequence of losses to include.\nregularize: Whether or not to include regularization losses.\ninclude_marked: Whether or not to use the marked losses.\nname: The name for this variable.\nReturns:\nA single tensor that is the sum of all losses.\nRaises:\nValueError: if there are no losses.", "source": "juraj-google-style"}
{"code": "def _maybe_truncate_traceback(traceback):\n    if len(traceback) > MAX_TRACEBACK_LENGTH:\n        return traceback[:MAX_TRACEBACK_LENGTH - 2] + [_ELLIPSIS, traceback[-1]]\n    else:\n        return traceback", "docstring": "Truncate the traceback if it is too long.\n\nArgs:\ntraceback: A list representing an error's traceback. There should be one\nlist item per entry in the traceback (in the right order); beyond that,\nthis function does not care about the item types.\n\nReturns:\nThe traceback, possibly with some items removed and an _ELLIPSIS inserted.\nGuaranteed to be no longer than MAX_TRACEBACK_LENGTH.", "source": "github-repos"}
{"code": "def json(self, data):\n        \n        self._headers['Content-Type'] = 'application/json'\n        if not isinstance(data, str):\n            data = json.dumps(data, indent=4)\n        self._body = data", "docstring": "Defines the mock response JSON body.\n\nArguments:\ndata (dict|list|str): JSON body data.\n\nReturns:\nself: ``pook.Response`` current instance.", "source": "juraj-google-style"}
{"code": "def sort_edge(edges):\n        \n        return sorted(edges, key=lambda x: (x.L, x.R))", "docstring": "Sort iterable of edges first by left node indices then right.\n\nArgs:\nedges(list[Edge]): List of edges to be sorted.\n\nReturns:\nlist[Edge]: Sorted list by left and right node indices.", "source": "juraj-google-style"}
{"code": "def preprocess_JPEG(self, image, **kwargs):\n    save_kwargs = {'progressive': VERSATILEIMAGEFIELD_PROGRESSIVE_JPEG, 'quality': QUAL}\n    if (image.mode != 'RGB'):\n        image = image.convert('RGB')\n    return (image, save_kwargs)", "docstring": "Receive a PIL Image instance of a JPEG and returns 2-tuple.\n\nArgs:\n* [0]: Image instance, converted to RGB\n* [1]: Dict with a quality key (mapped to the value of `QUAL` as\ndefined by the `VERSATILEIMAGEFIELD_JPEG_RESIZE_QUALITY`\nsetting)", "source": "codesearchnet"}
{"code": "def _GetEarliestYearFromFileEntry(self):\n    file_entry = self.GetFileEntry()\n    if (not file_entry):\n        return None\n    stat_object = file_entry.GetStat()\n    posix_time = getattr(stat_object, 'crtime', None)\n    if (posix_time is None):\n        posix_time = getattr(stat_object, 'ctime', None)\n    if (file_entry.TYPE_INDICATOR == dfvfs_definitions.TYPE_INDICATOR_GZIP):\n        posix_time = getattr(stat_object, 'mtime', None)\n    if (posix_time is None):\n        logger.warning('Unable to determine earliest year from file stat information.')\n        return None\n    try:\n        year = timelib.GetYearFromPosixTime(posix_time, timezone=self._knowledge_base.timezone)\n        return year\n    except ValueError as exception:\n        logger.error('Unable to determine earliest year from file stat information with error: {0!s}'.format(exception))\n        return None", "docstring": "Retrieves the year from the file entry date and time values.\n\nThis function uses the creation time if available otherwise the change\ntime (metadata last modification time) is used.\n\nReturns:\nint: year of the file entry or None.", "source": "codesearchnet"}
{"code": "def summary(self, title, sentences=0, chars=0, auto_suggest=True, redirect=True):\n    page_info = self.page(title, auto_suggest=auto_suggest, redirect=redirect)\n    return page_info.summarize(sentences, chars)", "docstring": "Get the summary for the title in question\n\nArgs:\ntitle (str): Page title to summarize\nsentences (int): Number of sentences to return in summary\nchars (int): Number of characters to return in summary\nauto_suggest (bool): Run auto-suggest on title before \\\nsummarizing\nredirect (bool): Use page redirect on title before summarizing\nReturns:\nstr: The summarized results of the page\nNote:\nPrecedence for parameters: sentences then chars; if both are \\\n0 then the entire first section is returned", "source": "codesearchnet"}
{"code": "def observations_np(self, boundary=20):\n    list_observations_np_ts = [t.observations_np for t in self.trajectories]\n    OBS = list_observations_np_ts[0].shape[1:]\n    num_time_steps = [t.num_time_steps for t in self.trajectories]\n    t_max = max(num_time_steps)\n    boundary = int(boundary)\n    bucket_length = (boundary * int(np.ceil((float(t_max) / boundary))))\n\n    def padding_config(obs):\n        num_to_pad = ((bucket_length + 1) - obs.shape[0])\n        return ([(0, num_to_pad)] + ([(0, 0)] * len(OBS)))\n    return (np.stack([np.pad(obs, padding_config(obs), 'constant') for obs in list_observations_np_ts]), num_time_steps)", "docstring": "Pads the observations in all the trajectories and returns them.\n\nArgs:\nboundary: integer, Observations will be padded to (n * boundary) + 1 where\nn is an integer.\n\nReturns:\na tuple(padded_observations, time_steps), with shapes:\npadded_observations: (self.batch_size, n * boundary + 1) + OBS\ntime_steps: integer list of length = self.batch_size", "source": "codesearchnet"}
{"code": "def __str__(self):\n    info = {'section': self._section, 'config': self.config, 'req_type': self._req_type, 'req': str(self.req), 'range': str(self.range), 'exclude': str(self.exclude), 'include': str(self.include), 'init': str(self._initialized)}\n    req_str = '\\n >>> _Reqs Instance <<<\\n'\n    req_str += 'Section: {section}\\n'\n    req_str += 'Configuration name: {config}\\n'\n    req_str += 'Requirement type: {req_type}\\n'\n    req_str += 'Requirement: {req}\\n'\n    req_str += 'Range: {range}\\n'\n    req_str += 'Exclude: {exclude}\\n'\n    req_str += 'Include: {include}\\n'\n    req_str += 'Initialized: {init}\\n\\n'\n    return req_str.format(**info)", "docstring": "Prints a requirement and its components.\n\nReturns:\nString that has concatenated information about a requirement.", "source": "github-repos"}
{"code": "def random_shuffle(value, seed=None, name=None):\n    with ops.name_scope(name, 'shuffle', [value, seed]):\n        if value.rank == 0:\n            raise ValueError('Cannot shuffle a scalar StructuredTensor')\n        first_dimension = value.nrows()\n        index = random_ops.random_shuffle(math_ops.range(first_dimension), seed=seed)\n        return gather(value, index, axis=0)", "docstring": "Shuffle a structured tensor on the zeroth axis.\n\nArgs:\nvalue: a structured tensor of rank at least one.\nseed: the seed for shuffling.\nname: the name for shuffle.\n\nReturns:\nThe shuffled structured tensor.", "source": "github-repos"}
{"code": "def invitation_backend(backend=None, namespace=None):\n    backend = (backend or ORGS_INVITATION_BACKEND)\n    (class_module, class_name) = backend.rsplit('.', 1)\n    mod = import_module(class_module)\n    return getattr(mod, class_name)(namespace=namespace)", "docstring": "Returns a specified invitation backend\n\nArgs:\nbackend: dotted path to the invitation backend class\nnamespace: URL namespace to use\n\nReturns:\nan instance of an InvitationBackend", "source": "codesearchnet"}
{"code": "def getFingerprintsForExpressions(self, body, sparsity=1.0):\n        \n        return self._expressions.resolveBulkExpression(self._retina, body, sparsity)", "docstring": "Bulk resolution of expressions\nArgs:\nbody, ExpressionOperation: The JSON encoded expression to be evaluated (required)\nsparsity, float: Sparsify the resulting expression to this percentage (optional)\nReturns:\nlist of Fingerprint\nRaises:\nCorticalioException: if the request was not successful", "source": "juraj-google-style"}
{"code": "def get_tokens(max_value):\n  \n  vocab = [str(i) for i in range(max_value)]\n  vocab = set(vocab)\n  vocab.update(CodeOp.LITERALS)\n  vocab.update(CodeOp.KEYWORDS)\n  vocab |= set(\"\".join(vocab))\n  return sorted(vocab)", "docstring": "Defines tokens.\n\nArgs:\nmax_value: the maximum numeric range for the token.\n\nReturns:\nlist of string tokens in vocabulary.", "source": "juraj-google-style"}
{"code": "def dismiss_confirm(self, text=None, wait=None):\n        \n\n        with self.driver.dismiss_modal(\"confirm\", text=text, wait=wait):\n            yield", "docstring": "Execute the wrapped code, dismissing a confirm.\n\nArgs:\ntext (str | RegexObject, optional): Text to match against the text in the modal.\nwait (int | float, optional): Maximum time to wait for the modal to appear after\nexecuting the wrapped code.\n\nRaises:\nModalNotFound: If a modal dialog hasn't been found.", "source": "juraj-google-style"}
{"code": "def fit_size_distribution_models(self, model_names, model_objs, input_columns,\n                                     output_columns=None, calibrate=False):\n        \n        if output_columns is None:\n            output_columns = [\"Shape\", \"Location\", \"Scale\"]\n        groups = np.unique(self.data[\"train\"][\"member\"][self.group_col])\n        \n        weights=None\n        \n        for group in groups:\n            group_data = self.data[\"train\"][\"combo\"].loc[self.data[\"train\"][\"combo\"][self.group_col] == group]\n            group_data = group_data.dropna()\n            group_data = group_data[group_data[output_columns[-1]] > 0]\n            if self.sector:\n        \n                lon_obj = group_data.loc[:,'Centroid_Lon']\n                lat_obj = group_data.loc[:,'Centroid_Lat']\n                \n                conus_lat_lon_points = zip(lon_obj.values.ravel(),lat_obj.values.ravel())\n                center_lon, center_lat = self.proj_dict[\"lon_0\"],self.proj_dict[\"lat_0\"] \n            \n                distances = np.array([np.sqrt((x-center_lon)**2+\\\n                        (y-center_lat)**2) for (x, y) in conus_lat_lon_points])\n            \n                min_dist, max_minus_min = min(distances),max(distances)-min(distances)\n\n                distance_0_1 = [1.0-((d - min_dist)/(max_minus_min)) for d in distances]\n                weights = np.array(distance_0_1)\n\n            self.size_distribution_models[group] = {\"multi\": {}, \"lognorm\": {}}\n            if calibrate:\n                self.size_distribution_models[group][\"calshape\"] = {}\n                self.size_distribution_models[group][\"calscale\"] = {}\n            log_labels = np.log(group_data[output_columns].values)\n            log_means = log_labels.mean(axis=0)\n            log_sds = log_labels.std(axis=0)\n            self.size_distribution_models[group]['lognorm']['mean'] = log_means\n            self.size_distribution_models[group]['lognorm']['sd'] = log_sds\n            for m, model_name in enumerate(model_names):\n                print(group, model_name)\n                self.size_distribution_models[group][\"multi\"][model_name] = deepcopy(model_objs[m])\n                try:\n                    self.size_distribution_models[group][\"multi\"][model_name].fit(group_data[input_columns],\n                                                                              (log_labels - log_means) / log_sds,\n                                                                        sample_weight=weights)\n                except:\n                    self.size_distribution_models[group][\"multi\"][model_name].fit(group_data[input_columns],\n                                                                              (log_labels - log_means) / log_sds)\n                if calibrate:\n                    training_predictions = self.size_distribution_models[\n                        group][\"multi\"][model_name].predict(group_data[input_columns])\n                    self.size_distribution_models[group][\"calshape\"][model_name] = LinearRegression()\n                    self.size_distribution_models[group][\"calshape\"][model_name].fit(training_predictions[:, 0:1],\n                                                                                     (log_labels[:, 0] - log_means[0]) /\n                                                                                     log_sds[\n                                                                                         0],\n                                                                                    sample_weight=weights)\n                    self.size_distribution_models[group][\"calscale\"][model_name] = LinearRegression()\n                    self.size_distribution_models[group][\"calscale\"][model_name].fit(training_predictions[:, 1:],\n                                                                                     (log_labels[:, 1] - log_means[1]) /\n                                                                                     log_sds[\n                                                                                         1],\n                                                                            sample_weight=weights)", "docstring": "Fits multitask machine learning models to predict the parameters of a size distribution\nArgs:\nmodel_names: List of machine learning model names\nmodel_objs: scikit-learn style machine learning model objects\ninput_columns: Training data columns used as input for ML model\noutput_columns: Training data columns used for prediction\ncalibrate: Whether or not to fit a log-linear regression to predictions from ML model", "source": "juraj-google-style"}
{"code": "def encode(self, s):\n    try:\n        import matplotlib.image as im\n    except ImportError as e:\n        tf.logging.warning('Reading an image requires matplotlib to be installed: %s', e)\n        raise NotImplementedError('Image reading not implemented.')\n    return im.imread(s)", "docstring": "Transform a string with a filename into a list of RGB integers.\n\nArgs:\ns: path to the file with an image.\n\nReturns:\nids: list of integers", "source": "codesearchnet"}
{"code": "def SetBalanceFor(self, assetId, fixed8_val):\n        \n        found = False\n        for key, val in self.Balances.items():\n            if key == assetId:\n                self.Balances[key] = fixed8_val\n                found = True\n\n        if not found:\n            self.Balances[assetId] = fixed8_val", "docstring": "Set the balance for an asset id.\nArgs:\nassetId (UInt256):\nfixed8_val (Fixed8): balance value.", "source": "juraj-google-style"}
{"code": "def _select_position(self, width, height):\n    positions = self._generate_placements(width, height)\n    if (self.rot and (width != height)):\n        positions += self._generate_placements(height, width)\n    if (not positions):\n        return (None, None)\n    return min(((p[0], self._rect_fitness(*p)) for p in positions), key=operator.itemgetter(1))", "docstring": "Search for the placement with the bes fitness for the rectangle.\n\nReturns:\ntuple (Rectangle, fitness) - Rectangle placed in the fittest position\nNone - Rectangle couldn't be placed", "source": "codesearchnet"}
{"code": "def _absolute_position_to_relative_position_unmasked(x):\n    (batch, heads, length, _) = common_layers.shape_list(x)\n    x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [0, (length - 1)]])\n    x_flat = tf.reshape(x, [batch, heads, ((length ** 2) + (length * (length - 1)))])\n    x_flat = tf.pad(x_flat, [[0, 0], [0, 0], [length, 0]])\n    x = tf.reshape(x_flat, [batch, heads, length, (2 * length)])\n    x = tf.slice(x, [0, 0, 0, 1], [batch, heads, length, ((2 * length) - 1)])\n    return x", "docstring": "Helper function for dot_product_unmasked_self_attention_relative_v2.\n\nRearrange an attention logits or weights Tensor.\n\nThe dimensions of the input represent:\n[batch, heads, query_position, memory_position]\n\nThe dimensions of the output represent:\n[batch, heads, query_position, memory_position - query_position + length - 1]\n\nOnly works with unmasked_attention.\n\nArgs:\nx: a Tensor with shape [batch, heads, length, length]\n\nReturns:\na Tensor with shape [batch, heads, length, 2*length-1]", "source": "codesearchnet"}
{"code": "def AddKeyByPath(self, key_path, registry_key):\n    if (not key_path.startswith(definitions.KEY_PATH_SEPARATOR)):\n        raise ValueError('Key path does not start with: {0:s}'.format(definitions.KEY_PATH_SEPARATOR))\n    if (not self._root_key):\n        self._root_key = FakeWinRegistryKey(self._key_path_prefix)\n    path_segments = key_paths.SplitKeyPath(key_path)\n    parent_key = self._root_key\n    for path_segment in path_segments:\n        try:\n            subkey = FakeWinRegistryKey(path_segment)\n            parent_key.AddSubkey(subkey)\n        except KeyError:\n            subkey = parent_key.GetSubkeyByName(path_segment)\n        parent_key = subkey\n    parent_key.AddSubkey(registry_key)", "docstring": "Adds a Windows Registry key for a specific key path.\n\nArgs:\nkey_path (str): Windows Registry key path to add the key.\nregistry_key (WinRegistryKey): Windows Registry key.\n\nRaises:\nKeyError: if the subkey already exists.\nValueError: if the Windows Registry key cannot be added.", "source": "codesearchnet"}
{"code": "def __eq__(self, other):\n        \n        \n        if (type(self) is type(other) and\n                self._index == other._index and\n                self._drives == other._drives and\n                self._controls == other._controls and\n                self._measures == other._measures and\n                self._acquires == other._acquires):\n            return True\n        return False", "docstring": "Two physical qubits are the same if they have the same index and channels.\n\nArgs:\nother (Qubit): other Qubit\n\nReturns:\nbool: are self and other equal.", "source": "juraj-google-style"}
{"code": "def export(self, remote_function):\n        \n        if self._worker.mode is None:\n            \n            \n            self._functions_to_export.append(remote_function)\n            return\n        if self._worker.mode != ray.worker.SCRIPT_MODE:\n            \n            return\n        self._do_export(remote_function)", "docstring": "Export a remote function.\n\nArgs:\nremote_function: the RemoteFunction object.", "source": "juraj-google-style"}
{"code": "def tox(args=''):\n    \n    basedir = dirname(__file__)\n\n    latest_pythons = _determine_latest_pythons()\n    \n    highest_minor_python = _highest_minor(latest_pythons)\n\n    _local_needs_pythons(flo('cd {basedir}  &&  '\n                             'python{highest_minor_python} -m tox {args}'))", "docstring": "Run tox.\n\nBuild package and run unit tests against several pythons.\n\nArgs:\nargs: Optional arguments passed to tox.\nExample:\n\nfab tox:'-e py36 -r'", "source": "juraj-google-style"}
{"code": "def search(self, q):\n        \n        results = self._api.search(q=q)\n\n        return results", "docstring": "Search tweets by keyword.\n\nArgs:\nq: keyword\n\nReturns:\nlist: tweet list", "source": "juraj-google-style"}
{"code": "def get_version():\n    \n    \n    if not 'win' in sys.platform:\n        return NO_WIN\n    \n    win_ver = sys.getwindowsversion()\n    try:\n        \n        major, minor, build = win_ver.platform_version\n    except AttributeError:\n        if sys.version_info < (3, 0):\n            \n            from platform import _get_real_winver\n            major, minor, build = _get_real_winver(win_ver.major, win_ver.minor, win_ver.build)\n            major, minor, build = int(major), int(minor), int(build) \n        else:\n            \n            major, minor, build = win_ver.major, win_ver.minor, win_ver.build\n    \n    try:\n        is_server = 1 if win_ver.product_type == 3 else 0\n    except AttributeError:\n        is_server = 0\n    \n    try:\n        if major == 10:\n            \n            \n            sp_ver = build\n        else:\n            sp_ver = win_ver.service_pack_major or 0\n    except AttributeError:\n        try:\n            sp_ver = int(win_ver.service_pack.rsplit(' ', 1))\n        except (IndexError, ValueError):\n            sp_ver = 0\n    \n    return (major, minor, sp_ver, is_server)", "docstring": "Get the Windows OS version running on the machine.\n\nParams:\nNone\n\nReturns:\nThe Windows OS version running on the machine (comparables with the values list in the class).", "source": "juraj-google-style"}
{"code": "def categorical_partition_data(data):\n    series = pd.Series(data)\n    value_counts = series.value_counts(dropna=True)\n    null_indexes = series.isnull()\n    nonnull_count = (null_indexes == False).sum()\n    weights = (value_counts.values / nonnull_count)\n    return {'values': value_counts.index.tolist(), 'weights': weights}", "docstring": "Convenience method for creating weights from categorical data.\n\nArgs:\ndata (list-like): The data from which to construct the estimate.\n\nReturns:\nA new partition object::\n\n{\n\"partition\": (list) The categorical values present in the data\n\"weights\": (list) The weights of the values in the partition.\n}", "source": "codesearchnet"}
{"code": "def __init__(self, engine_id, client):\n        \n        self._client = client\n        self._dv = client.direct_view(targets='all')\n        self._dv.use_dill()\n        nengines = len(client)\n        super(ObjectHub, self).__init__(engine_id, nengines)", "docstring": "Make an ObjectHub.\nArgs:\nengine_id: ipyparallel engine id number where this Hub is located,\nor a negative number if it is on an ipyparallel client.\nclient: ipyparallel.Client", "source": "juraj-google-style"}
{"code": "def create(cls, session, record, imported=False, auto_reply=False):\n    return super(Conversations, cls).create(session, record, imported=imported, auto_reply=auto_reply)", "docstring": "Create a conversation.\n\nPlease note that conversation cannot be created with more than 100\nthreads, if attempted the API will respond with HTTP 412.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nrecord (helpscout.models.Conversation): The conversation\nto be created.\nimported (bool, optional): The ``imported`` request parameter\nenables conversations to be created for historical purposes (i.e.\nif moving from a different platform, you can import your\nhistory). When ``imported`` is set to ``True``, no outgoing\nemails or notifications will be generated.\nauto_reply (bool): The ``auto_reply`` request parameter enables\nauto replies to be sent when a conversation is created via the\nAPI. When ``auto_reply`` is set to ``True``, an auto reply will\nbe sent as long as there is at least one ``customer`` thread in\nthe conversation.\n\nReturns:\nhelpscout.models.Conversation: Newly created conversation.", "source": "codesearchnet"}
{"code": "def input_selector_schema(config_cls):\n    \n    config_type = resolve_config_cls_arg(config_cls)\n    check.param_invariant(config_type.is_selector, 'config_cls')\n\n    def _wrap(func):\n        def _selector(context, config_value):\n            selector_key, selector_value = single_item(config_value)\n            return func(context, selector_key, selector_value)\n\n        return _create_input_schema(config_type, _selector)\n\n    return _wrap", "docstring": "A decorator for annotating a function that can take the selected properties\nfrom a ``config_value`` in to an instance of a custom type.\n\nArgs:\nconfig_cls (Selector)", "source": "juraj-google-style"}
{"code": "def resize(x, mode, factor=4):\n    assert (mode in ['bilinear', 'nearest']), mode\n    shp = (tf.shape(x)[2:] * factor)\n    x = tf.transpose(x, [0, 2, 3, 1])\n    if (mode == 'bilinear'):\n        x = tf.image.resize_bilinear(x, shp, align_corners=True)\n    else:\n        x = tf.image.resize_nearest_neighbor(x, shp, align_corners=False)\n    return tf.transpose(x, [0, 3, 1, 2])", "docstring": "Resize input tensor with unkown input-shape by a factor\n\nArgs:\nx (tf.Tensor): tensor NCHW\nfactor (int, optional): resize factor for H, W\n\nNote:\nDifferences here against Caffe have huge impacts on the\nquality of the predictions.\n\nReturns:\ntf.Tensor: resized tensor NCHW", "source": "codesearchnet"}
{"code": "def _reset(self, indices):\n    \n\n    \n    self.assert_common_preconditions()\n\n    \n    \n    return np.stack([self._envs[index].reset() for index in indices])", "docstring": "Resets environments at indices shouldn't pre-process or record.\n\nSubclasses should override this to do the actual reset if something other\nthan the default implementation is desired.\n\nArgs:\nindices: list of indices of underlying envs to call reset on.\n\nReturns:\nnp.ndarray of stacked observations from the reset-ed envs.", "source": "juraj-google-style"}
{"code": "def should_use(intersection):\n    if (intersection.interior_curve in ACCEPTABLE_CLASSIFICATIONS):\n        return True\n    if (intersection.interior_curve in TANGENT_CLASSIFICATIONS):\n        return ((intersection.s == 0.0) or (intersection.t == 0.0))\n    return False", "docstring": "Check if an intersection can be used as part of a curved polygon.\n\nWill return :data:`True` if the intersection is classified as\n:attr:`~.IntersectionClassification.FIRST`,\n:attr:`~.IntersectionClassification.SECOND` or\n:attr:`~.IntersectionClassification.COINCIDENT` or if the intersection\nis classified is a corner / edge end which is classified as\n:attr:`~.IntersectionClassification.TANGENT_FIRST` or\n:attr:`~.IntersectionClassification.TANGENT_SECOND`.\n\nArgs:\nintersection (.Intersection): An intersection to be added.\n\nReturns:\nbool: Indicating if the intersection will be used.", "source": "codesearchnet"}
{"code": "def lists_to_tuples(structure):\n    return tree_impl.lists_to_tuples(structure)", "docstring": "Returns the structure with list instances changed to tuples.\n\nArgs:\nstructure: Arbitrarily nested structure.\n\nReturns:\nThe same structure but with tuples instead of lists.", "source": "github-repos"}
{"code": "def get_training_or_validation_split(samples, labels, validation_split, subset):\n    if not validation_split:\n        return (samples, labels)\n    num_val_samples = int(validation_split * len(samples))\n    if subset == 'training':\n        io_utils.print_msg(f'Using {len(samples) - num_val_samples} files for training.')\n        samples = samples[:-num_val_samples]\n        if labels is not None:\n            labels = labels[:-num_val_samples]\n    elif subset == 'validation':\n        io_utils.print_msg(f'Using {num_val_samples} files for validation.')\n        samples = samples[-num_val_samples:]\n        if labels is not None:\n            labels = labels[-num_val_samples:]\n    else:\n        raise ValueError(f'`subset` must be either \"training\" or \"validation\", received: {subset}')\n    return (samples, labels)", "docstring": "Potentially restrict samples & labels to a training or validation split.\n\nArgs:\nsamples: List of elements.\nlabels: List of corresponding labels.\nvalidation_split: Float, fraction of data to reserve for validation.\nsubset: Subset of the data to return.\nEither `\"training\"`, `\"validation\"`, or `None`.\nIf `None`, we return all of the data.\n\nReturns:\ntuple (samples, labels), potentially restricted to the specified subset.", "source": "github-repos"}
{"code": "def __init__(self, dtype, shape=None, shared_name=None, name='conditional_accumulator', reduction_type='MEAN'):\n    accumulator_ref = gen_data_flow_ops.resource_conditional_accumulator(dtype=dtype, shape=shape, shared_name=shared_name, name=name, reduction_type=reduction_type)\n    if context.executing_eagerly():\n        self._resource_deleter = resource_variable_ops.EagerResourceDeleter(handle=accumulator_ref, handle_device=context.context().device_name)\n    super(ConditionalAccumulator, self).__init__(dtype, shape, accumulator_ref)", "docstring": "Creates a new ConditionalAccumulator.\n\nArgs:\ndtype: Datatype of the accumulated gradients.\nshape: Shape of the accumulated gradients.\nshared_name: Optional. If non-empty, this accumulator will be shared under\nthe given name across multiple sessions.\nname: Optional name for the accumulator.\nreduction_type: Reduction type to use when taking the gradient.", "source": "github-repos"}
{"code": "def csv(self, ondemand=False):\n    self._request_uri = '{}/{}'.format(self._api_uri, 'csv')\n    self._stream = True\n    if ondemand:\n        self._request.add_payload('runNow', True)", "docstring": "Update request URI to return CSV data.\n\nFor onDemand bulk generation to work it must first be enabled in the\nThreatConnect platform under System settings.\n\nArgs:\nondemand (boolean): Enable on demand bulk generation.", "source": "codesearchnet"}
{"code": "def supported_language(lang):\n    try:\n        self.get_collection(lang=lang)\n        return True\n    except LanguageNotSupported as e:\n        return False", "docstring": "Return True if polyglot supports the language.\n\nArgs:\nlang (string): Language code.", "source": "codesearchnet"}
{"code": "def take_screenshot(webdriver, file_name):\n        \n        folder_location = os.path.join(ProjectUtils.get_project_root(),\n                                       WebScreenShotUtil.SCREEN_SHOT_LOCATION)\n\n        WebScreenShotUtil.__capture_screenshot(\n            webdriver, folder_location, file_name + \".png\")", "docstring": "Captures a screenshot.\n\nArgs:\nwebdriver (WebDriver) - Selenium webdriver.\nfile_name (str) - File name to save screenshot as.", "source": "juraj-google-style"}
{"code": "def full_like(array, fill_value, reverse=False, dtype=None, keepmeta=True):\n    if keepmeta:\n        return (dc.zeros_like(array) + fill_value).astype(dtype)\n    else:\n        return dc.full(array.shape, fill_value, dtype)", "docstring": "Create an array of `fill_value` with the same shape and type as the input array.\n\nArgs:\narray (xarray.DataArray): The shape and data-type of it define\nthese same attributes of the output array.\nfill_value (scalar or numpy.ndarray): Fill value or array.\ndtype (data-type, optional): If spacified, this function overrides\nthe data-type of the output array.\nkeepmeta (bool, optional): Whether *coords, attrs, and name of the input\narray are kept in the output one. Default is True.\n\nReturns:\narray (decode.array): Decode array filled with `fill_value`.", "source": "codesearchnet"}
{"code": "def InitFromDataPoints(self, start_stats, complete_stats):\n    \n    self.start_points = self._ConvertToResultList(start_stats)\n    self.complete_points = self._ConvertToResultList(complete_stats)\n    return self", "docstring": "Check that this approval applies to the given token.\n\nArgs:\nstart_stats: A list of lists, each containing two values (a timestamp and\nthe number of clients started at this time).\ncomplete_stats: A list of lists, each containing two values (a timestamp\nand the number of clients completed at this time).\n\nReturns:\nA reference to the current instance to allow method chaining.", "source": "juraj-google-style"}
{"code": "def DEFINE_multi_enum(name, default, enum_values, help, flag_values=_flagvalues.FLAGS, case_sensitive=True, **args):\n    parser = _argument_parser.EnumParser(enum_values, case_sensitive)\n    serializer = _argument_parser.ArgumentSerializer()\n    DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)", "docstring": "Registers a flag whose value can be a list strings from enum_values.\n\nUse the flag on the command line multiple times to place multiple\nenum values into the list.  The 'default' may be a single string\n(which will be converted into a single-element list) or a list of\nstrings.\n\nArgs:\nname: str, the flag name.\ndefault: Union[Iterable[Text], Text, None], the default value of the flag;\nsee `DEFINE_multi`.\nenum_values: [str], a non-empty list of strings with the possible values for\nthe flag.\nhelp: str, the help message.\nflag_values: FlagValues, the FlagValues instance with which the flag will\nbe registered. This should almost never need to be overridden.\ncase_sensitive: Whether or not the enum is to be case-sensitive.\n**args: Dictionary with extra keyword args that are passed to the\nFlag __init__.", "source": "codesearchnet"}
{"code": "def parse_google_format_docstring(docstring: str) -> tuple[Optional[str], Optional[dict], Optional[str]]:\n    description_match = description_re.search(docstring)\n    args_match = args_re.search(docstring)\n    returns_match = returns_re.search(docstring)\n    description = description_match.group(1).strip() if description_match else None\n    docstring_args = args_match.group(1).strip() if args_match else None\n    returns = returns_match.group(1).strip() if returns_match else None\n    if docstring_args is not None:\n        docstring_args = '\\n'.join([line for line in docstring_args.split('\\n') if line.strip()])\n        matches = args_split_re.findall(docstring_args)\n        args_dict = {match[0]: re.sub('\\\\s*\\\\n+\\\\s*', ' ', match[1].strip()) for match in matches}\n    else:\n        args_dict = {}\n    return (description, args_dict, returns)", "docstring": "Parses a Google-style docstring to extract the function description,\nargument descriptions, and return description.\n\nArgs:\ndocstring (str): The docstring to parse.\n\nReturns:\nThe function description, arguments, and return description.", "source": "github-repos"}
{"code": "def add_stream_logger(level=logging.DEBUG, name=None):\n    \n\n    logger = logging.getLogger(name)\n    logger.setLevel(level)\n    handler = logging.StreamHandler()\n    handler.setFormatter(get_default_log_formatter())\n    handler.setLevel(level)\n    logger.addHandler(handler)", "docstring": "Add a stream logger. This can be used for printing all SDK calls to stdout\nwhile working in an interactive session. Note this is a logger for the\nentire module, which will apply to all environments started in the same\nsession. If you need a specific logger pass a ``logfile`` to\n:func:`~sdk.init`\n\nArgs:\nlevel(int): :mod:`logging` log level\nname(str): logger name, will default to the root logger.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def AddComment(self, comment):\n    \n    if not comment:\n      return\n\n    if not self.comment:\n      self.comment = comment\n    else:\n      self.comment = ''.join([self.comment, comment])", "docstring": "Adds a comment to the event tag.\n\nArgs:\ncomment (str): comment.", "source": "juraj-google-style"}
{"code": "def _CompareFields(field, other_field):\n  \n  field_attrs = _GetFieldAttributes(field)\n  other_field_attrs = _GetFieldAttributes(other_field)\n  if field_attrs != other_field_attrs:\n    return False\n  return field.__class__ == other_field.__class__", "docstring": "Checks if two ProtoRPC fields are \"equal\".\n\nCompares the arguments, rather than the id of the elements (which is\nthe default __eq__ behavior) as well as the class of the fields.\n\nArgs:\nfield: A ProtoRPC message field to be compared.\nother_field: A ProtoRPC message field to be compared.\n\nReturns:\nBoolean indicating whether the fields are equal.", "source": "juraj-google-style"}
{"code": "def setNetworkDataRequirement(self, eDataRequirement):\n        \n        print '%s call setNetworkDataRequirement' % self.port\n        print eDataRequirement\n\n        if eDataRequirement == Device_Data_Requirement.ALL_DATA:\n            self.networkDataRequirement = 'n'\n        return True", "docstring": "set whether the Thread device requires the full network data\nor only requires the stable network data\n\nArgs:\neDataRequirement: is true if requiring the full network data\n\nReturns:\nTrue: successful to set the network requirement", "source": "juraj-google-style"}
{"code": "def broadcast(cls,\n            shape1: 'TensorFluentShape',\n            shape2: 'TensorFluentShape') -> Tuple[Reshaping, Reshaping]:\n        \n        reshape_1, reshape_2 = None, None\n\n        if not (shape1._batch or shape2._batch):\n            return reshape_1, reshape_2\n\n        size_1, size_2 = shape1.fluent_size, shape2.fluent_size\n        size_diff = abs(size_1 - size_2)\n        if size_diff == 0:\n            return reshape_1, reshape_2\n\n        if size_2 > size_1 and not (size_1 == 0 and not shape1._batch):\n            reshape_1 = [1] * size_diff + list(shape1.fluent_shape)\n            if shape1._batch:\n                reshape_1 = [shape1.batch_size] + reshape_1\n        elif size_1 > size_2 and not (size_2 == 0 and not shape2._batch):\n            reshape_2 = [1] * size_diff + list(shape2.fluent_shape)\n            if shape2._batch:\n                reshape_2 = [shape2.batch_size] + reshape_2\n        return reshape_1, reshape_2", "docstring": "It broadcasts the fluent shapes if any input is in batch mode.\n\nIt handles input shapes in different modes, expanding its\ndimensions if necessary. It outputs a tuple with new shapes.\nIf no input shape is in batch mode, return (None, None).\nIf an input shape does not need to be changed, return None.\n\nArgs:\nshape1: A fluent's shape.\nshape2: A fluent's shape.\n\nReturns:\nA pair of new shapes.", "source": "juraj-google-style"}
{"code": "def from_string(contents):\n    if (contents[(- 1)] != '\\n'):\n        contents += '\\n'\n    white_space = '[ \\\\t\\\\r\\\\f\\\\v]'\n    natoms_line = (((white_space + '*\\\\d+') + white_space) + '*\\\\n')\n    comment_line = '[^\\\\n]*\\\\n'\n    coord_lines = '(\\\\s*\\\\w+\\\\s+[0-9\\\\-\\\\+\\\\.eEdD]+\\\\s+[0-9\\\\-\\\\+\\\\.eEdD]+\\\\s+[0-9\\\\-\\\\+\\\\.eEdD]+\\\\s*\\\\n)+'\n    frame_pattern_text = ((natoms_line + comment_line) + coord_lines)\n    pat = re.compile(frame_pattern_text, re.MULTILINE)\n    mols = []\n    for xyz_match in pat.finditer(contents):\n        xyz_text = xyz_match.group(0)\n        mols.append(XYZ._from_frame_string(xyz_text))\n    return XYZ(mols)", "docstring": "Creates XYZ object from a string.\n\nArgs:\ncontents: String representing an XYZ file.\n\nReturns:\nXYZ object", "source": "codesearchnet"}
{"code": "def get_climate(self, device_label):\n        \n        response = None\n        try:\n            response = requests.get(\n                urls.climate(self._giid),\n                headers={\n                    'Accept': 'application/json, text/javascript, */*; q=0.01',\n                    'Cookie': 'vid={}'.format(self._vid)},\n                params={\n                    \"deviceLabel\": device_label})\n        except requests.exceptions.RequestException as ex:\n            raise RequestError(ex)\n        _validate_response(response)\n        return json.loads(response.text)", "docstring": "Get climate history\nArgs:\ndevice_label: device label of climate device", "source": "juraj-google-style"}
{"code": "def singleprint_from_saved_model_proto(export_dir: str) -> str:\n    try:\n        return fingerprinting_pywrap.SingleprintFromSM(export_dir)\n    except FingerprintException as e:\n        raise ValueError(e) from None", "docstring": "Returns the singleprint of `saved_model.pb` in `export_dir`.\n\nArgs:\nexport_dir: The directory that contains `saved_model.pb`.\n\nReturns:\nA string containing the singleprint of `saved_model.pb` in `export_dir`.\n\nRaises:\nValueError: If a valid singleprint cannot be constructed from\n`saved_model.pb`.", "source": "github-repos"}
{"code": "def call(self, func, *args, **kwargs):\n    for timer in self:\n        with timer:\n            func(*args, **kwargs)\n    return self", "docstring": "Alternative way to time a simple function call using condensed syntax.\n\nReturns:\nself (timerit.Timerit): Use `min`, or `mean` to get a scalar. Use\n`print` to output a report to stdout.\n\nExample:\n>>> import math\n>>> time = Timerit(num=10).call(math.factorial, 50).min()\n>>> assert time > 0", "source": "codesearchnet"}
{"code": "def _check_properties(cls, property_names, require_indexed=True):\n    assert isinstance(property_names, (list, tuple)), repr(property_names)\n    for name in property_names:\n        assert isinstance(name, basestring), repr(name)\n        if ('.' in name):\n            (name, rest) = name.split('.', 1)\n        else:\n            rest = None\n        prop = cls._properties.get(name)\n        if (prop is None):\n            cls._unknown_property(name)\n        else:\n            prop._check_property(rest, require_indexed=require_indexed)", "docstring": "Internal helper to check the given properties exist and meet specified\nrequirements.\n\nCalled from query.py.\n\nArgs:\nproperty_names: List or tuple of property names -- each being a string,\npossibly containing dots (to address subproperties of structured\nproperties).\n\nRaises:\nInvalidPropertyError if one of the properties is invalid.\nAssertionError if the argument is not a list or tuple of strings.", "source": "codesearchnet"}
{"code": "def trace_start(self):\n        \n        cmd = enums.JLinkTraceCommand.START\n        res = self._dll.JLINKARM_TRACE_Control(cmd, 0)\n        if (res == 1):\n            raise errors.JLinkException('Failed to start trace.')\n        return None", "docstring": "Starts collecting trace data.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def initialize_typeshed_or_die():\n    try:\n        return typeshed.Typeshed()\n    except OSError as e:\n        logging.critical(str(e))\n        sys.exit(1)", "docstring": "Initialize a Typeshed object or die.\n\nReturns:\nAn instance of Typeshed()", "source": "github-repos"}
{"code": "def __init__(self, name=None, url=None, timezone=None, id=None, email=None,\n               field_dict=None, lang=None, **kwargs):\n    \n    self._schedule = None\n\n    if not field_dict:\n      if name:\n        kwargs['agency_name'] = name\n      if url:\n        kwargs['agency_url'] = url\n      if timezone:\n        kwargs['agency_timezone'] = timezone\n      if id:\n        kwargs['agency_id'] = id\n      if lang:\n        kwargs['agency_lang'] = lang\n      if email:\n        kwargs['agency_email'] = email\n      field_dict = kwargs\n\n    self.__dict__.update(field_dict)", "docstring": "Initialize a new Agency object.\n\nArgs:\nfield_dict: A dictionary mapping attribute name to unicode string\nname: a string, ignored when field_dict is present\nurl: a string, ignored when field_dict is present\ntimezone: a string, ignored when field_dict is present\nid: a string, ignored when field_dict is present\nkwargs: arbitrary keyword arguments may be used to add attributes to the\nnew object, ignored when field_dict is present", "source": "juraj-google-style"}
{"code": "def substitute(dict_, source):\n    \n    d_esc = (re.escape(k) for k in dict_.keys())\n    pattern = re.compile('|'.join(d_esc))\n    return pattern.sub(lambda x: dict_[x.group()], source)", "docstring": "Perform re.sub with the patterns in the given dict\nArgs:\ndict_: {pattern: repl}\nsource: str", "source": "juraj-google-style"}
{"code": "def get_content_field(self, name):\n    fields = self._content.findall(name)\n    if (not fields):\n        return None\n    elif (len(fields) == 1):\n        return etree_to_dict(fields[0])[name]\n    else:\n        return [etree_to_dict(field)[name] for field in fields]", "docstring": "Get the contents of a specific subtag from Clusterpoint Storage's response's content tag.\n\nArgs:\nname -- A name string of the content's subtag to be returned.\n\nReturns:\nA dict representing the contents of the specified field or a list of dicts\nif there are multiple fields with that tag name. Returns None if no field found.", "source": "codesearchnet"}
{"code": "def filter_parts(cls, part_info):\n        \n        \n        filtered = OrderedDict()\n        for part_name, info_list in part_info.items():\n            if info_list is None or isinstance(info_list, Exception):\n                continue\n            info_list = [i for i in info_list if isinstance(i, cls)]\n            if info_list:\n                filtered[part_name] = info_list\n        return filtered", "docstring": "Filter the part_info dict looking for instances of our class\n\nArgs:\npart_info (dict): {part_name: [Info] or None} as returned from\nController.run_hook()\n\nReturns:\ndict: {part_name: [info]} where info is a subclass of cls", "source": "juraj-google-style"}
{"code": "def _MakeGroupFromRootSection(root_section, undefined_str):\n    \n    group = {}\n    for statement in root_section.Statements():\n        if isinstance(statement, six.string_types):\n            continue\n        func, args = statement\n        \n        if func is _DoDef and isinstance(args, _Section):\n            section = args\n            \n            t = Template._FromSection(section, group, undefined_str)\n            group[section.section_name] = t\n    return group", "docstring": "Construct a dictinary { template name -> Template() instance }\n\nArgs:\nroot_section: _Section instance -- root of the original parse tree", "source": "juraj-google-style"}
{"code": "def remove_pad(x, pad_remover, mode):\n    x = expert_utils.flatten_all_but_last(x)\n    if (mode != ModeKeys.PREDICT):\n        x = pad_remover.remove(x)\n    x = tf.expand_dims(x, axis=0)\n    return x", "docstring": "Remove padding by concatenating all dimension into one.\n\nArgs:\nx (tf.Tensor): input of shape [batch_size, length, depth]\npad_remover (obj): a PadRemover object\nmode (ModeKeys): infer, train or eval. If inference, the padding remover is\nnot applied\n\nReturns:\ntf.Tensor of shape [1,length_nonpad,depth] where\nlength_nonpad <= batch_size*length", "source": "codesearchnet"}
{"code": "def pad_trajectories(trajectories, boundary=20):\n    t_max = max((r.shape[0] for (_, _, r) in trajectories))\n    boundary = int(boundary)\n    bucket_length = (boundary * int(np.ceil((float(t_max) / boundary))))\n    padded_observations = []\n    padded_actions = []\n    padded_rewards = []\n    padded_lengths = []\n    reward_masks = []\n    for (o, a, r) in trajectories:\n        num_to_pad = ((bucket_length + 1) - o.shape[0])\n        padded_lengths.append(num_to_pad)\n        if (num_to_pad == 0):\n            padded_observations.append(o)\n            padded_actions.append(a)\n            padded_rewards.append(r)\n            reward_masks.append(onp.ones_like(r, dtype=np.int32))\n            continue\n        padding_config = [(0, num_to_pad, 0)]\n        for _ in range((o.ndim - 1)):\n            padding_config.append((0, 0, 0))\n        padding_config = tuple(padding_config)\n        padding_value = get_padding_value(o.dtype)\n        action_padding_value = get_padding_value(a.dtype)\n        reward_padding_value = get_padding_value(r.dtype)\n        padded_obs = lax.pad(o, padding_value, padding_config)\n        padded_observations.append(padded_obs)\n        assert ((a.ndim == 1) and (r.ndim == 1))\n        padding_config = ((0, num_to_pad, 0),)\n        padded_action = lax.pad(a, action_padding_value, padding_config)\n        padded_actions.append(padded_action)\n        padded_reward = lax.pad(r, reward_padding_value, padding_config)\n        padded_rewards.append(padded_reward)\n        reward_mask = onp.ones_like(r, dtype=np.int32)\n        reward_masks.append(lax.pad(reward_mask, 0, padding_config))\n    return (padded_lengths, np.stack(reward_masks), np.stack(padded_observations), np.stack(padded_actions), np.stack(padded_rewards))", "docstring": "Pad trajectories to a bucket length that is a multiple of boundary.\n\nArgs:\ntrajectories: list[(observation, actions, rewards)], where each observation\nis shaped (t+1,) + OBS and actions & rewards are shaped (t,), with the\nlength of the list being B (batch size).\nboundary: int, bucket length, the actions and rewards are padded to integer\nmultiples of boundary.\n\nReturns:\ntuple: (padding lengths, reward_mask, padded_observations, padded_actions,\npadded_rewards) where padded_observations is shaped (B, T+1) + OBS and\npadded_actions, padded_rewards & reward_mask are shaped (B, T).\nWhere T is max(t) rounded up to an integer multiple of boundary.\npadded_length is how much padding we've added and\nreward_mask is 1s for actual rewards and 0s for the padding.", "source": "codesearchnet"}
{"code": "def preprocess_input_examples_arg_string(input_examples_str):\n    input_dict = preprocess_input_exprs_arg_string(input_examples_str)\n    for input_key, example_list in input_dict.items():\n        if not isinstance(example_list, list):\n            raise ValueError('tf.Example input must be a list of dictionaries, but \"%s\" is %s' % (example_list, type(example_list)))\n        input_dict[input_key] = [_create_example_string(example) for example in example_list]\n    return input_dict", "docstring": "Parses input into dict that maps input keys to lists of tf.Example.\n\nParses input string in the format of 'input_key1=[{feature_name:\nfeature_list}];input_key2=[{feature_name:feature_list}];' into a dictionary\nthat maps each input_key to its list of serialized tf.Example.\n\nArgs:\ninput_examples_str: A string that specifies a list of dictionaries of\nfeature_names and their feature_lists for each input.\nEach input is separated by semicolon. For each input key:\n'input=[{feature_name1: feature_list1, feature_name2:feature_list2}]'\nitems in feature_list can be the type of float, int, long or str.\n\nReturns:\nA dictionary that maps input keys to lists of serialized tf.Example.\n\nRaises:\nValueError: An error when the given tf.Example is not a list.", "source": "github-repos"}
{"code": "def _run_graph(self, device, input_shape, axes, num_layers, mode, scale, train, num_iters):\n    graph = ops.Graph()\n    with graph.as_default():\n        outputs = build_graph(device, input_shape, axes, num_layers, mode, scale, train)\n    with session_lib.Session(graph=graph) as session:\n        variables.global_variables_initializer().run()\n        _ = session.run([out.op for out in outputs])\n        start_time = time.time()\n        for _ in range(num_iters):\n            _ = session.run([out.op for out in outputs])\n        duration = time.time() - start_time\n    print('%s shape:%d/%d \n    name_template = 'batch_norm_{device}_input_shape_{shape}_axes_{axes}_mode_{mode}_layers_{num_layers}_scale_{scale}_train_{train}'\n    self.report_benchmark(name=name_template.format(device=device, mode=mode, num_layers=num_layers, scale=scale, train=train, shape=str(input_shape).replace(' ', ''), axes=str(axes)).replace(' ', ''), iters=num_iters, wall_time=duration / num_iters)\n    return duration", "docstring": "Run the graph and print its execution time.\n\nArgs:\ndevice: string, the device to run on.\ninput_shape: shape of the input tensor.\naxes: axes that are to be normalized across.\nnum_layers: number of batch normalization layers in the graph.\nmode: \"op\", \"py\" or \"slow\" depending on the implementation.\nscale: scale after normalization.\ntrain: if true, also run backprop.\nnum_iters: number of steps to run.\n\nReturns:\nThe duration of the run in seconds.", "source": "github-repos"}
{"code": "def _parse_trunk_native_vlan(self, config):\n        \n        match = re.search(r'switchport trunk native vlan (\\d+)', config)\n        return dict(trunk_native_vlan=match.group(1))", "docstring": "Scans the specified config and parse the trunk native vlan value\n\nArgs:\nconfig (str): The interface configuration block to scan\n\nReturns:\ndict: A Python dict object with the value of switchport trunk\nnative vlan value.  The dict returned is intended to be\nmerged into the resource dict", "source": "juraj-google-style"}
{"code": "def get_yaml_parser_roundtrip():\n    yaml_writer = yamler.YAML(typ='rt', pure=True)\n    yaml_writer.indent(mapping=2, sequence=4, offset=2)\n    return yaml_writer", "docstring": "Create the yaml parser object with this factory method.\n\nThe round-trip parser preserves:\n- comments\n- block style and key ordering are kept, so you can diff the round-tripped\nsource\n- flow style sequences ( ‘a: b, c, d’) (based on request and test by\nAnthony Sottile)\n- anchor names that are hand-crafted (i.e. not of the form``idNNN``)\n- merges in dictionaries are preserved\n\nReturns:\nruamel.yaml.YAML object with round-trip loader", "source": "codesearchnet"}
{"code": "def _post_process(self, feed_item, item):\n    if item['assetIdentifier']['name']:\n        feed_item[FieldMap.CREATIVE_ASSET_NAME] = item['assetIdentifier']['name']", "docstring": "Maps ids and names of related entities so they can be updated in the Bulkdozer feed.\n\nWhen Bulkdozer is done processing an item, it writes back the updated names\nand ids of related objects, this method makes sure those are updated in the\ncreative asset feed.\n\nArgs:\nfeed_item: Feed item representing the creative asset from the Bulkdozer\nfeed.\nitem: The DCM creative asset being updated or created.", "source": "github-repos"}
{"code": "def _assign_method(self, resource_class, method_type):\n    \"\\n        If we assigned the same method to each method, it's the same\\n        method in memory, so we need one for each acceptable HTTP method.\\n        \"\n    method_name = resource_class.get_method_name(resource_class, method_type)\n    valid_status_codes = getattr(resource_class.Meta, 'valid_status_codes', DEFAULT_VALID_STATUS_CODES)\n\n    def get(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs):\n        return self.call_api(method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs)\n\n    def put(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs):\n        return self.call_api(method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs)\n\n    def post(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs):\n        return self.call_api(method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs)\n\n    def patch(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs):\n        return self.call_api(method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs)\n\n    def delete(self, method_type=method_type, method_name=method_name, valid_status_codes=valid_status_codes, resource=resource_class, data=None, uid=None, **kwargs):\n        return self.call_api(method_type, method_name, valid_status_codes, resource, data, uid=uid, **kwargs)\n    method_map = {'GET': get, 'PUT': put, 'POST': post, 'PATCH': patch, 'DELETE': delete}\n    setattr(self, method_name, types.MethodType(method_map[method_type], self))", "docstring": "Using reflection, assigns a new method to this class.\n\nArgs:\nresource_class: A resource class\nmethod_type: The HTTP method type", "source": "codesearchnet"}
{"code": "def item(self, key):\n    \n    return _item.Item(self._name, key, context=self._context)", "docstring": "Retrieves an Item object for the specified key in this bucket.\n\nThe item need not exist.\n\nArgs:\nkey: the key of the item within the bucket.\nReturns:\nAn Item instance representing the specified key.", "source": "juraj-google-style"}
{"code": "def update_handler(Model, name=None, **kwds):\n    \n    async def action_handler(service, action_type, payload, props, notify=True, **kwds):\n        \n        if action_type == get_crud_action('update', name or Model):\n            try:\n                \n                message_props = {}\n                \n                if 'correlation_id' in props:\n                    \n                    message_props['correlation_id'] = props['correlation_id']\n\n\n                \n                pk_field = Model.primary_key()\n\n                \n                if not pk_field.name in payload:\n                    \n                    raise ValueError(\"Must specify the pk of the model when updating\")\n\n                \n                model = Model.select().where(pk_field == payload[pk_field.name]).get()\n\n                \n                payload.pop(pk_field.name, None)\n\n                \n                for key, value in payload.items():\n                    \n                    \n                    setattr(model, key, value)\n\n                \n                model.save()\n\n                \n                if notify:\n                    \n                    await service.event_broker.send(\n                        payload=ModelSerializer().serialize(model),\n                        action_type=change_action_status(action_type, success_status()),\n                        **message_props\n                    )\n\n            \n            except Exception as err:\n                \n                if notify:\n                    \n                    await service.event_broker.send(\n                        payload=str(err),\n                        action_type=change_action_status(action_type, error_status()),\n                        **message_props\n                    )\n                \n                else:\n                    \n                    raise err\n\n    \n    return action_handler", "docstring": "This factory returns an action handler that updates a new instance of\nthe specified model when a update action is recieved, assuming the\naction follows nautilus convetions.\n\nArgs:\nModel (nautilus.BaseModel): The model to update when the action\nreceived.\n\nReturns:\nfunction(type, payload): The action handler for this model", "source": "juraj-google-style"}
{"code": "def _find_furious_yaml(start, checked):\n    \n    directory = start\n    while directory not in checked:\n        checked.add(directory)\n        for fs_yaml_name in FURIOUS_YAML_NAMES:\n            yaml_path = os.path.join(directory, fs_yaml_name)\n            if os.path.exists(yaml_path):\n                return yaml_path\n        directory = os.path.dirname(directory)\n    return None", "docstring": "Traverse the directory tree identified by start\nuntil a directory already in checked is encountered or the path\nof furious.yaml is found.\n\nChecked is present both to make the loop termination easy\nto reason about and so the same directories do not get\nrechecked\n\nArgs:\nstart: the path to start looking in and work upward from\nchecked: the set of already checked directories\n\nReturns:\nthe path of the furious.yaml file or None if it is not found", "source": "juraj-google-style"}
{"code": "def getexcfo(e):\n    tb = sys.exc_info()[2]\n    tbinfo = traceback.extract_tb(tb)\n    (path, line, name, src) = ('', '', '', None)\n    if tbinfo:\n        (path, line, name, sorc) = tbinfo[(- 1)]\n    retd = {'msg': str(e), 'file': path, 'line': line, 'name': name, 'src': src}\n    if isinstance(e, s_exc.SynErr):\n        retd['syn:err'] = e.errinfo\n    return (e.__class__.__name__, retd)", "docstring": "Get an err tufo from an exception.\n\nArgs:\ne (Exception): An Exception (or Exception subclass).\n\nNotes:\nThis can be called outside of the context of an exception handler,\nhowever details such as file, line, function name and source may be\nmissing.\n\nReturns:\n((str, dict)):", "source": "codesearchnet"}
{"code": "def GetFileObject(self, data_stream_name=''):\n    \n    data_stream_names = [\n        data_stream.name for data_stream in self._GetDataStreams()]\n    if data_stream_name and data_stream_name not in data_stream_names:\n      return None\n\n    path_spec = copy.deepcopy(self.path_spec)\n    if data_stream_name:\n      \n      \n      \n      \n      if self._file_system.IsHFS() and data_stream_name == 'DECOMP':\n        data_stream_name = ''\n\n      setattr(path_spec, 'data_stream', data_stream_name)\n\n    return resolver.Resolver.OpenFileObject(\n        path_spec, resolver_context=self._resolver_context)", "docstring": "Retrieves the file-like object.\n\nArgs:\ndata_stream_name (Optional[str]): data stream name, where an empty\nstring represents the default data stream.\n\nReturns:\nTSKFileIO: file-like object or None.", "source": "juraj-google-style"}
{"code": "def convert_config_value(self, value, label):\n    if isinstance(value, six.string_types):\n        value = value.lower()\n    if (value in self.TRUTHY_VALUES):\n        return True\n    elif (value in self.FALSY_VALUES):\n        return False\n    else:\n        raise YapconfValueError('Cowardly refusing to interpret config value as a boolean. Name: {0}, Value: {1}'.format(self.name, value))", "docstring": "Converts all 'Truthy' values to True and 'Falsy' values to False.\n\nArgs:\nvalue: Value to convert\nlabel: Label of the config which this item was found.\n\nReturns:", "source": "codesearchnet"}
{"code": "def argpartition(x, kth, axis=-1):\n    if any_symbolic_tensors((x,)):\n        return Argpartition(kth, axis).symbolic_call(x)\n    return backend.numpy.argpartition(x, kth, axis)", "docstring": "Performs an indirect partition along the given axis.\n\nIt returns an array\nof indices of the same shape as `x` that index data along the given axis\nin partitioned order.\n\nArgs:\na: Array to sort.\nkth: Element index to partition by.\nThe k-th element will be in its final sorted position and all\nsmaller elements will be moved before it and all larger elements\nbehind it. The order of all elements in the partitions is undefined.\nIf provided with a sequence of k-th it will partition all of them\ninto their sorted position at once.\naxis: Axis along which to sort. The default is -1 (the last axis).\nIf `None`, the flattened array is used.\n\nReturns:\nArray of indices that partition `x` along the specified `axis`.", "source": "github-repos"}
{"code": "def get_input_mask_at(self, node_index):\n    inputs = self.get_input_at(node_index)\n    if isinstance(inputs, list):\n        return [getattr(x, '_keras_mask', None) for x in inputs]\n    else:\n        return getattr(inputs, '_keras_mask', None)", "docstring": "Retrieves the input mask tensor(s) of a layer at a given node.\n\nArgs:\nnode_index: Integer, index of the node\nfrom which to retrieve the attribute.\nE.g. `node_index=0` will correspond to the\nfirst time the layer was called.\n\nReturns:\nA mask tensor\n(or list of tensors if the layer has multiple inputs).", "source": "github-repos"}
{"code": "def get_data_xls(file_name, file_contents=None, on_demand=False):\n    \n    def tuple_to_iso_date(tuple_date):\n        \n        (y,m,d, hh,mm,ss) = tuple_date\n        non_zero = lambda n: n!=0\n        date = \"%04d-%02d-%02d\"  % (y,m,d)    if list(filter(non_zero, (y,m,d)))               else ''\n        time = \"T%02d:%02d:%02d\" % (hh,mm,ss) if list(filter(non_zero, (hh,mm,ss))) or not date else ''\n        return date+time\n\n    def format_excel_val(book, val_type, value, want_tuple_date):\n        \n        \n        \n        \n        \n        \n        \n        \n        if   val_type == 2: \n            if value == int(value): value = int(value)\n        elif val_type == 3: \n            datetuple = xlrd.xldate_as_tuple(value, book.datemode)\n            value = datetuple if want_tuple_date else tuple_to_iso_date(datetuple)\n        elif val_type == 5: \n            value = xlrd.error_text_from_code[value]\n        return value\n\n    def xlrd_xsl_to_array(file_name, file_contents=None):\n        \n        book = xlrd.open_workbook(file_name, file_contents=file_contents, on_demand=on_demand)\n        formatter = lambda t_v: format_excel_val(book, t_v[0], t_v[1], False)\n        row_builder = lambda s, r: list(map(formatter, zip(s.row_types(r), s.row_values(r))))\n\n        data = [SheetYielder(book, index, row_builder) for index in range(book.nsheets)]\n        if not on_demand:\n            for sheet in data:\n                sheet.load()\n            book.release_resources()\n        return data\n\n    return xlrd_xsl_to_array(file_name, file_contents)", "docstring": "Loads the old excel format files. New format files will automatically\nget loaded as well.\n\nArgs:\nfile_name: The name of the local file, or the holder for the\nextension type when the file_contents are supplied.\nfile_contents: The file-like object holding contents of file_name.\nIf left as None, then file_name is directly loaded.\non_demand: Requests that a yielder be used in place of a full data\ncopy.", "source": "juraj-google-style"}
{"code": "def quickhull(sample):\n    \n\n    link = lambda a, b: np.concatenate((a, b[1:]))\n    edge = lambda a, b: np.concatenate(([a], [b]))\n\n    def dome(sample, base):\n        h, t = base\n        dists = np.dot(sample - h, np.dot(((0, -1), (1, 0)), (t - h)))\n        outer = np.repeat(sample, dists > 0, axis=0)\n\n        if len(outer):\n            pivot = sample[np.argmax(dists)]\n            return link(dome(outer, edge(h, pivot)),\n                dome(outer, edge(pivot, t)))\n        else:\n            return base\n\n    if len(sample) > 2:\n        axis = sample[:, 0]\n        base = np.take(sample, [np.argmin(axis), np.argmax(axis)], axis=0)\n        return link(dome(sample, base),\n            dome(sample, base[::-1]))\n    else:\n        return sample", "docstring": "Find data points on the convex hull of a supplied data set\n\nArgs:\nsample: data points as column vectors n x d\nn - number samples\nd - data dimension (should be two)\n\nReturns:\na k x d matrix containint the convex hull data points", "source": "juraj-google-style"}
{"code": "def _queue_dag(self, name, *, data=None):\n    if self._stop_workflow:\n        return None\n    if (name not in self._dags_blueprint):\n        raise DagNameUnknown()\n    new_dag = copy.deepcopy(self._dags_blueprint[name])\n    new_dag.workflow_name = self.name\n    self._dags_running[new_dag.name] = self._celery_app.send_task(JobExecPath.Dag, args=(new_dag, self._workflow_id, data), queue=new_dag.queue, routing_key=new_dag.queue)\n    return new_dag.name", "docstring": "Add a new dag to the queue.\n\nIf the stop workflow flag is set, no new dag can be queued.\n\nArgs:\nname (str): The name of the dag that should be queued.\ndata (MultiTaskData): The data that should be passed on to the new dag.\n\nRaises:\nDagNameUnknown: If the specified dag name does not exist\n\nReturns:\nstr: The name of the queued dag.", "source": "codesearchnet"}
{"code": "def load_config(self, filepath=None):\n        \n\n        \n\n        def load_settings(filepath):\n            \n\n            instruments_loaded = {}\n            probes_loaded = {}\n            scripts_loaded = {}\n\n            if filepath and os.path.isfile(filepath):\n                in_data = load_b26_file(filepath)\n\n                instruments = in_data['instruments'] if 'instruments' in in_data else {}\n                scripts = in_data['scripts'] if 'scripts' in in_data else {}\n                probes = in_data['probes'] if 'probes' in in_data else {}\n\n                try:\n                    instruments_loaded, failed = Instrument.load_and_append(instruments)\n                    if len(failed) > 0:\n                        print(('WARNING! Following instruments could not be loaded: ', failed))\n\n                    scripts_loaded, failed, instruments_loaded = Script.load_and_append(\n                        script_dict=scripts,\n                        instruments=instruments_loaded,\n                        log_function=self.log,\n                        data_path=self.gui_settings['data_folder'])\n\n                    if len(failed) > 0:\n                        print(('WARNING! Following scripts could not be loaded: ', failed))\n\n                    probes_loaded, failed, instruments_loadeds = Probe.load_and_append(\n                        probe_dict=probes,\n                        probes=probes_loaded,\n                        instruments=instruments_loaded)\n\n                    self.log('Successfully loaded from previous save.')\n                except ImportError:\n                    self.log('Could not load instruments or scripts from file.')\n                    self.log('Opening with blank GUI.')\n            return instruments_loaded, scripts_loaded, probes_loaded\n\n        config = None\n\n        try:\n            config = load_b26_file(filepath)\n            config_settings = config['gui_settings']\n            if config_settings['gui_settings'] != filepath:\n                print((\n                'WARNING path to settings file ({:s}) in config file is different from path of settings file ({:s})'.format(\n                    config_settings['gui_settings'], filepath)))\n            config_settings['gui_settings'] = filepath\n        except Exception as e:\n            if filepath:\n                self.log('The filepath was invalid --- could not load settings. Loading blank GUI.')\n            config_settings = self._DEFAULT_CONFIG\n\n\n            for x in self._DEFAULT_CONFIG.keys():\n                if x in config_settings:\n                    if not os.path.exists(config_settings[x]):\n                        try:\n                            os.makedirs(config_settings[x])\n                        except Exception:\n                            config_settings[x] = self._DEFAULT_CONFIG[x]\n                            os.makedirs(config_settings[x])\n                            print(('WARNING: failed validating or creating path: set to default path'.format(config_settings[x])))\n                else:\n                    config_settings[x] = self._DEFAULT_CONFIG[x]\n                    os.makedirs(config_settings[x])\n                    print(('WARNING: path {:s} not specified set to default {:s}'.format(x, config_settings[x])))\n\n        \n        if filepath is not None and os.path.exists(os.path.dirname(filepath)):\n            config_settings['gui_settings'] = filepath\n\n        self.gui_settings = config_settings\n\n        if(config):\n            self.gui_settings_hidden = config['gui_settings_hidden']\n        else:\n            self.gui_settings_hidden['script_source_folder'] = ''\n\n        self.instruments, self.scripts, self.probes = load_settings(filepath)\n\n\n        self.refresh_tree(self.tree_gui_settings, self.gui_settings)\n        self.refresh_tree(self.tree_scripts, self.scripts)\n        self.refresh_tree(self.tree_settings, self.instruments)\n\n        self._hide_parameters(filepath)", "docstring": "checks if the file is a valid config file\nArgs:\nfilepath:", "source": "juraj-google-style"}
{"code": "def slice_arrays(arrays, indices, contiguous=True):\n    converted_to_list = False\n    if not isinstance(arrays, list):\n        converted_to_list = True\n        arrays = [arrays]\n    if any((tensor_util.is_tf_type(x) for x in arrays)):\n        if not contiguous:\n            entries = [[x[i:i + 1] for i in indices] for x in arrays]\n            slices = [array_ops.concat(x, axis=0) for x in entries]\n        else:\n            slices = [x[indices[0]:indices[-1] + 1] for x in arrays]\n    else:\n        slices = generic_utils.slice_arrays(arrays, indices)\n    if converted_to_list:\n        slices = slices[0]\n    return slices", "docstring": "Slices batches out of provided arrays (workaround for eager tensors).\n\nUnfortunately eager tensors don't have the same slicing behavior as\nNumpy arrays (they follow the same slicing behavior as symbolic TF tensors),\nhence we cannot use `generic_utils.slice_arrays` directly\nand we have to implement this workaround based on `concat`. This has a\nperformance cost.\n\nArgs:\narrays: Single array or list of arrays.\nindices: List of indices in the array that should be included in the output\nbatch.\ncontiguous: Boolean flag indicating whether the indices are contiguous.\n\nReturns:\nSlice of data (either single array or list of arrays).", "source": "github-repos"}
{"code": "def __init__(self,corpus_dir,datastore_type='file',db_name='corpus.db'):\n        \n\n        self.g = Goose({'browser_user_agent': 'Mozilla','parser_class':'soup'})\n        \n        self.corpus_dir = corpus_dir\n        self.datastore_type = datastore_type\n        self.db_name = db_name\n        self.stats = defaultdict(int)\n\n        self._create_corpus_dir(self.corpus_dir)\n\n        self.db = None\n        if self.datastore_type == 'sqlite':\n            self.db = self.corpus_dir + '/' + self.db_name\n            self._set_up_db(self.db)", "docstring": "Read links and associated categories for specified articles\nin text file seperated by a space\n\nArgs:\ncorpus_dir (str): The directory to save the generated corpus\ndatastore_type (Optional[str]): Format to save generated corpus.\nSpecify either 'file' or 'sqlite'.\ndb_name (Optional[str]): Name of database if 'sqlite' is selected.", "source": "juraj-google-style"}
{"code": "def parse(self, args: List[str]) -> Optional[argparse.Namespace]:\n    try:\n        return self._parser.parse_args(args)\n    except KeyboardInterrupt:\n        raise\n    except:\n        return None", "docstring": "Parses a list of string inputs.\n\nThe parsed namespace contains these attributes:\noutput_name: Optional[str], the output variable name.\nverbose: bool, whether to display more details of the magic execution.\nquery: Optional[List[str]], the beam SQL query to execute.\n\nReturns:\nThe parsed args or None if fail to parse.", "source": "github-repos"}
{"code": "def get_by(self, field, value):\n        \n        if not field:\n            logger.exception(RESOURCE_CLIENT_INVALID_FIELD)\n            raise ValueError(RESOURCE_CLIENT_INVALID_FIELD)\n\n        filter = \"\\\"{0}='{1}'\\\"\".format(field, value)\n        results = self.get_all(filter=filter)\n\n        \n        if \".\" not in field:\n            \n            results = [item for item in results if str(item.get(field, \"\")).lower() == value.lower()]\n\n        return results", "docstring": "Get the resource by passing a field and its value.\n\nNote:\nThis function uses get_all passing a filter.The search is case-insensitive.\n\nArgs:\nfield: Field name to filter.\nvalue: Value to filter.\n\nReturns:\ndict", "source": "juraj-google-style"}
{"code": "def _parse_networks(self, config):\n    networks = list()\n    regexp = 'network (.+)/(\\\\d+) area (\\\\d+\\\\.\\\\d+\\\\.\\\\d+\\\\.\\\\d+)'\n    matches = re.findall(regexp, config)\n    for (network, netmask, area) in matches:\n        networks.append(dict(network=network, netmask=netmask, area=area))\n    return dict(networks=networks)", "docstring": "Parses config file for the networks advertised\nby the OSPF process\n\nArgs:\nconfig(str):  Running configuration\nReturns:\nlist: dict:\nkeys: network (str)\nnetmask (str)\narea (str)", "source": "codesearchnet"}
{"code": "def _vmap_for_bhqkv(mask_function: Callable, bh_indices: bool=True) -> Callable:\n    dimensions = [(None, None, None, 0), (None, None, 0, None)]\n    if bh_indices:\n        dimensions.extend([(None, 0, None, None), (0, None, None, None)])\n    for dims in dimensions:\n        mask_function = torch.vmap(mask_function, in_dims=dims, out_dims=0)\n    return mask_function", "docstring": "Used to vmap our mask_functions over the q_idx and kv_idx dimensions of the inputs. Optionally, vmap over\nthe batch and head indices as well if `bh_indices=True`.\nUsing vmap here allows us to keep the performance of vectorized ops, while having a single set of primitive\nfunctions between attention interfaces (i.e. between flex and sdpa/eager, FA2 being a bit different).\n\nArgs:\nmask_function (`Callable`):\nThe mask_function to vmap.\nbh_indices (`bool`, optional):\nWhether to vmap over the batch and head indices as well, or only q and kv indices.\n\nReturns:\nCallable: The vmapped function.", "source": "github-repos"}
{"code": "def encode_function_call(self, function_name, args):\n    if (function_name not in self.function_data):\n        raise ValueError('Unkown function {}'.format(function_name))\n    description = self.function_data[function_name]\n    function_selector = zpad(encode_int(description['prefix']), 4)\n    arguments = encode_abi(description['encode_types'], args)\n    return (function_selector + arguments)", "docstring": "Return the encoded function call.\n\nArgs:\nfunction_name (str): One of the existing functions described in the\ncontract interface.\nargs (List[object]): The function arguments that wll be encoded and\nused in the contract execution in the vm.\n\nReturn:\nbin: The encoded function name and arguments so that it can be used\nwith the evm to execute a funcion call, the binary string follows\nthe Ethereum Contract ABI.", "source": "codesearchnet"}
{"code": "def __init__(self, features: List[np.ndarray], timestamps: np.ndarray, schema: Optional[Schema]=None) -> None:\n    self.features = features\n    self.timestamps = timestamps\n    if schema is not None:\n        self.check_schema(schema)", "docstring": "Initializes the IndexData object by checking and setting the features\nand timestamps.\n\nRaises:\nValueError: If features are not one-dimensional arrays.\nValueError: If the number of elements in features and timestamps\ndo not match.", "source": "github-repos"}
{"code": "def insert_arguments_into_match_query(compilation_result, arguments):\n    \n    if compilation_result.language != MATCH_LANGUAGE:\n        raise AssertionError(u'Unexpected query output language: {}'.format(compilation_result))\n\n    base_query = compilation_result.query\n    argument_types = compilation_result.input_metadata\n\n    \n    sanitized_arguments = {\n        key: _safe_match_argument(argument_types[key], value)\n        for key, value in six.iteritems(arguments)\n    }\n\n    return base_query.format(**sanitized_arguments)", "docstring": "Insert the arguments into the compiled MATCH query to form a complete query.\n\nArgs:\ncompilation_result: a CompilationResult object derived from the GraphQL compiler\narguments: dict, mapping argument name to its value, for every parameter the query expects.\n\nReturns:\nstring, a MATCH query with inserted argument data", "source": "juraj-google-style"}
{"code": "def get_updates_for(self, inputs):\n    warnings.warn('`layer.get_updates_for` is deprecated and will be removed in a future version. Please use `layer.updates` method instead.')\n    return self.updates", "docstring": "Deprecated, do NOT use!\n\nRetrieves updates relevant to a specific set of inputs.\n\nArgs:\ninputs: Input tensor or list/tuple of input tensors.\n\nReturns:\nList of update ops of the layer that depend on `inputs`.", "source": "github-repos"}
{"code": "def load_method(path,method,class_name = None,instance_creator = None):\n    \n    \n    module  = load_module(path)\n    if class_name :\n        \n        class_type  = getattr(module, class_name)\n        if instance_creator:\n            ic_rest     = instance_creator\n            nxt         = module\n            while ('.' in ic_rest)  :\n                nxt         = getattr(nxt    , instance_creator.split('.')[0])\n                ic_rest     = '.'.join(ic_rest.split('.')[1:])\n            instance = getattr(module, instance_creator)()\n        else :\n            instance = class_type()\n        return getattr(instance , method)\n    else :\n        return getattr(module   , method)", "docstring": "Returns an instance of the method specified.\nArgs :\npath            : The path to the module contianing the method or function.\nmethod          : The name of the function.\nclass_name      : The name of the class if the funtion is a method.\ninstance_creator: The name of the method to return the class instance.", "source": "juraj-google-style"}
{"code": "def compute_invariants(self, graph_file, input_format, invariants=Invariants.ALL, email=None, use_threads=False, callback=None):\n    if (email is None):\n        email = self.email\n    if (input_format not in GraphFormats._any):\n        raise ValueError('Invalid input format, {}.'.format(input_format))\n    if (not (set(invariants) <= set(Invariants.ALL))):\n        raise ValueError('Invariants must be a subset of Invariants.ALL.')\n    if (use_threads and (callback is not None)):\n        if (not hasattr(callback, '__call__')):\n            raise ValueError('callback must be a function.')\n        if (len(inspect.getargspec(callback).args) != 1):\n            raise ValueError('callback must take exactly 1 argument.')\n    url = 'graphupload/{}/{}/{}/'.format(email, input_format, '/'.join(invariants))\n    if (' ' in url):\n        raise ValueError('Arguments cannot have spaces in them.')\n    if (not os.path.exists(graph_file)):\n        raise ValueError('File {} does not exist.'.format(graph_file))\n    if use_threads:\n        upload_thread = threading.Thread(target=self._run_compute_invariants, args=[url, graph_file, callback])\n        upload_thread.start()\n    else:\n        return self._run_compute_invariants(url, graph_file)\n    return", "docstring": "Compute invariants from an existing GraphML file using the remote\ngrute graph services.\n\nArguments:\ngraph_file (str): The filename of the graphml file\ninput_format (str): One of grute.GraphFormats\ninvariants (str[]: Invariants.ALL)*: An array of grute.Invariants\nto compute on the graph\nemail (str: self.email)*: The email to notify upon completion\nuse_threads (bool: False)*: Whether to use Python threads to run\ncomputation in the background when waiting for the server to\nreturn the invariants\ncallback (function: None)*: The function to run upon completion of\nthe call, if using threads. (Will not be called if use_threads\nis set to False.)\n\nReturns:\nHTTP Response if use_threads is False. Otherwise, None\n\nRaises:\nValueError: If the graph file does not exist, or if there are\nissues with the passed arguments\nRemoteDataUploadError: If there is an issue packing the file\nRemoteError: If the server experiences difficulty computing invs", "source": "codesearchnet"}
{"code": "def _GetTableNames(self, database):\n    \n    table_names = []\n    for esedb_table in database.tables:\n      table_names.append(esedb_table.name)\n\n    return table_names", "docstring": "Retrieves the table names in a database.\n\nArgs:\ndatabase (pyesedb.file): ESE database.\n\nReturns:\nlist[str]: table names.", "source": "juraj-google-style"}
{"code": "def plot_axis(self, ax, legend, ladder=False, default_width=1, match_only=None, colour=None, colour_function=None, cmap=None, default=None, width_field=None, **kwargs):\n    default_c = None\n    patches = []\n    for iv in self.__list:\n        origin = (0, iv.top.z)\n        d = legend.get_decor(iv.primary, match_only=match_only)\n        thick = (iv.base.z - iv.top.z)\n        if ladder:\n            if (width_field is not None):\n                w = iv.data.get(width_field, 1)\n                w = ((default_width * w) / self.max_field(width_field))\n                default_c = 'gray'\n            elif (legend is not None):\n                w = (d.width or default_width)\n                try:\n                    w = ((default_width * w) / legend.max_width)\n                except:\n                    w = default_width\n        else:\n            w = default_width\n        this_patch_kwargs = kwargs.copy()\n        lw = this_patch_kwargs.pop('lw', 0)\n        ec = this_patch_kwargs.pop('ec', 'k')\n        fc = (this_patch_kwargs.pop('fc', None) or default_c or d.colour)\n        if (colour is None):\n            rect = mpl.patches.Rectangle(origin, w, thick, fc=fc, lw=lw, hatch=d.hatch, ec=ec, **this_patch_kwargs)\n            ax.add_patch(rect)\n        else:\n            rect = mpl.patches.Rectangle(origin, w, thick, lw=lw, ec=ec, **this_patch_kwargs)\n            patches.append(rect)\n    if (colour is not None):\n        cmap = (cmap or 'viridis')\n        p = mpl.collections.PatchCollection(patches, cmap=cmap, lw=lw)\n        p.set_array(self.get_data(colour, colour_function, default=default))\n        ax.add_collection(p)\n        cb = plt.colorbar(p)\n        cb.outline.set_linewidth(0)\n    return ax", "docstring": "Plotting, but only the Rectangles. You have to set up the figure.\nReturns a matplotlib axis object.\n\nArgs:\nax (axis): The matplotlib axis to plot into.\nlegend (Legend): The Legend to use for colours, etc.\nladder (bool): Whether to use widths or not. Default False.\ndefault_width (int): A width for the plot if not using widths.\nDefault 1.\nmatch_only (list): A list of strings matching the attributes you\nwant to compare when plotting.\ncolour (str): Which data field to use for colours.\ncmap (cmap): Matplotlib colourmap. Default ``viridis``.\ndefault (float): The default (null) value.\nwidth_field (str): The field to use for the width of the patches.\n**kwargs are passed through to matplotlib's ``patches.Rectangle``.\n\nReturns:\naxis: The matplotlib.pyplot axis.", "source": "codesearchnet"}
{"code": "def localize_file(path_or_buffer):\n    \n\n    path_or_buffer = _stringify_path(path_or_buffer)\n\n    if _is_url(path_or_buffer):\n        req = urlopen(path_or_buffer)\n        filename = os.path.basename(req.geturl())\n        if os.path.splitext(filename)[-1] is not \".pdf\":\n            pid = os.getpid()\n            filename = \"{0}.pdf\".format(pid)\n\n        with open(filename, 'wb') as f:\n            shutil.copyfileobj(req, f)\n\n        return filename, True\n\n    elif is_file_like(path_or_buffer):\n        pid = os.getpid()\n        filename = \"{0}.pdf\".format(pid)\n\n        with open(filename, 'wb') as f:\n            shutil.copyfileobj(path_or_buffer, f)\n\n        return filename, True\n\n    \n    else:\n        return os.path.expanduser(path_or_buffer), False", "docstring": "Ensure localize target file.\n\nIf the target file is remote, this function fetches into local storage.\n\nArgs:\npath (str):\nFile path or file like object or URL of target file.\n\nReturns:\nfilename (str): file name in local storage\ntemporary_file_flag (bool): temporary file flag", "source": "juraj-google-style"}
{"code": "def stop_gradient(input_layer):\n    if input_layer.is_sequence():\n        result = [tf.stop_gradient(t) for t in input_layer.sequence]\n        return input_layer.with_sequence(result)\n    else:\n        return tf.stop_gradient(input_layer)", "docstring": "Cuts off the gradient at this point.\n\nThis works on both sequence and regular Pretty Tensors.\n\nArgs:\ninput_layer: The input.\nReturns:\nA new Pretty Tensor of the same type with stop_gradient applied.", "source": "codesearchnet"}
{"code": "def clone(self, *args, **overrides):\n        \n        clone = super(Layout, self).clone(*args, **overrides)\n        clone._max_cols = self._max_cols\n        return clone", "docstring": "Clones the Layout, overriding data and parameters.\n\nArgs:\ndata: New data replacing the existing data\nshared_data (bool, optional): Whether to use existing data\nnew_type (optional): Type to cast object to\n*args: Additional arguments to pass to constructor\n**overrides: New keyword arguments to pass to constructor\n\nReturns:\nCloned Layout object", "source": "juraj-google-style"}
{"code": "def get_peers(self, id=None, endpoint=None):\n        \n        return self._call_endpoint(GET_PEERS, id=id, endpoint=endpoint)", "docstring": "Get the current peers of a remote node\nArgs:\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\n\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"}
{"code": "def _write_install_json(self, filename, install_json):\n        \n        \n        if os.path.isfile(filename):\n            with open(filename, 'w') as fh:\n                json.dump(install_json, fh, indent=4, sort_keys=True)\n        else:\n            err = 'Could not write file: {}.'.format(filename)\n            \n            self.package_data['errors'].append(err)", "docstring": "Write install.json file.\n\nSome projects have bundles App with multiple install.json files.  Typically these files are\nprefixed with the App name (e.g., MyApp.install.json).\n\nArgs:\nfilename (str): The install.json file name.\ninstall_json (dict): The contents of the install.json file.", "source": "juraj-google-style"}
{"code": "def find_rootfs(conn, disk_root):\n    rootfs = conn.inspect_os()\n    if ((not rootfs) or (len(rootfs) > 1)):\n        filesystems = conn.list_filesystems()\n        if (disk_root in filesystems):\n            rootfs = [disk_root]\n        else:\n            rootfs = [fs for fs in filesystems.keys() if (disk_root in fs)]\n            if (not rootfs):\n                raise GuestFSError('no root fs {0} could be found from list {1}'.format(disk_root, str(filesystems)))\n    return sorted(rootfs)[0]", "docstring": "Find the image's device root filesystem, and return its path.\n\n1. Use :func:`guestfs.GuestFS.inspect_os` method. If it returns more than\none root filesystem or None, try:\n2. Find an exact match of `disk_root` from\n:func:`guestfs.GuestFS.list_filesystems`, if none is found, try:\n3. Return the device that has the substring `disk_root` contained in it,\nfrom the output of :func:`guestfs.GuestFS.list_filesystems`.\n\nArgs:\nconn(guestfs.GuestFS): Open GuestFS handle.\ndisk_root(str): Root device to search for. Note that by default, if\nguestfs can deduce the filesystem, it will not be used.\n\nReturns:\nstr: root device path\n\nRaises:\n:exc:`GuestFSError` if no root filesystem was found", "source": "codesearchnet"}
{"code": "def add_vectors(self, vectors):\n    if isinstance(vectors[0], (list, np.ndarray)):\n        for vec in vectors:\n            self.vectors.append(vec)\n    else:\n        self.vectors.append(vectors)", "docstring": "Add a list of vectors to Bloch sphere.\n\nArgs:\nvectors (array_like):\nArray with vectors of unit length or smaller.", "source": "codesearchnet"}
{"code": "def inspect_task(self, task):\n        \n        url = self._url('/tasks/{0}', task)\n        return self._result(self._get(url), True)", "docstring": "Retrieve information about a task.\n\nArgs:\ntask (str): Task ID\n\nReturns:\n(dict): Information about the task.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def check_candidate_exists(self, basepath, candidates):\n        \n        checked = []\n        for item in candidates:\n            abspath = os.path.join(basepath, item)\n            if os.path.exists(abspath):\n                checked.append(abspath)\n\n        return checked", "docstring": "Check that at least one candidate exist into a directory.\n\nArgs:\nbasepath (str): Directory path where to search for candidate.\ncandidates (list): List of candidate file paths.\n\nReturns:\nlist: List of existing candidates.", "source": "juraj-google-style"}
{"code": "def __partial_trace_vec(vec, trace_systems, dimensions, reverse=True):\n    if reverse:\n        dimensions = dimensions[::(- 1)]\n        trace_systems = ((len(dimensions) - 1) - np.array(trace_systems))\n    rho = vec.reshape(dimensions)\n    rho = np.tensordot(rho, rho.conj(), axes=(trace_systems, trace_systems))\n    d = int(np.sqrt(np.product(rho.shape)))\n    return rho.reshape(d, d)", "docstring": "Partial trace over subsystems of multi-partite vector.\n\nArgs:\nvec (vector_like): complex vector N\ntrace_systems (list(int)): a list of subsystems (starting from 0) to\ntrace over.\ndimensions (list(int)): a list of the dimensions of the subsystems.\nIf this is not set it will assume all\nsubsystems are qubits.\nreverse (bool): ordering of systems in operator.\nIf True system-0 is the right most system in tensor product.\nIf False system-0 is the left most system in tensor product.\n\nReturns:\nndarray: A density matrix with the appropriate subsystems traced over.", "source": "codesearchnet"}
{"code": "def Pack(cls, obj, version):\n    if isinstance(obj, ServiceQuery):\n        return str(obj)\n    return obj", "docstring": "Pack the given object using AdWords-specific logic.\n\nArgs:\nobj: an object to be packed for SOAP using AdWords-specific logic, if\napplicable.\nversion: the version of the current API, e.g. 'v201809'\n\nReturns:\nThe given object packed with AdWords-specific logic for SOAP, if\napplicable. Otherwise, returns the given object unmodified.", "source": "codesearchnet"}
{"code": "def get_normalized_variable_map(scope_or_module, collection=tf.GraphKeys.GLOBAL_VARIABLES, context=None, group_sliced_variables=True):\n    scope_name = get_variable_scope_name(scope_or_module)\n    if (context is None):\n        context = scope_or_module\n    prefix = get_variable_scope_name(context)\n    prefix_length = ((len(prefix) + 1) if prefix else 0)\n    if (not _is_scope_prefix(scope_name, prefix)):\n        raise ValueError(\"Scope '{}' is not prefixed by '{}'.\".format(scope_name, prefix))\n    variables = get_variables_in_scope(scope_name, collection)\n    if (not group_sliced_variables):\n        single_vars = variables\n        grouped_vars = dict()\n    else:\n        (single_vars, grouped_vars) = _get_sliced_variables(variables)\n    var_map = {var.op.name[prefix_length:]: var for var in single_vars}\n    for (full_name, var_group) in grouped_vars.items():\n        name = full_name[prefix_length:]\n        if (name in var_map):\n            raise ValueError(('Mixing slices and non-slices with the same name: ' + str(name)))\n        var_map[name] = var_group\n    return var_map", "docstring": "Builds map of `tf.Variable`s in scope or module with normalized names.\n\nThe names of the variables are normalized to remove the scope prefix.\n\nArgs:\nscope_or_module: Scope or module to build map from.\ncollection: Collection to restrict query to. By default this is\n`tf.Graphkeys.GLOBAL_VARIABLES`, which includes non-trainable variables\nsuch as moving averages.\ncontext: Scope or module, identical to or parent of `scope`. If given, this\nwill be used as the stripped prefix. By default `None`, which means\n`context=scope`.\ngroup_sliced_variables: Boolean, if set to True, sliced variables are\ngrouped together in the returned map; if set to False, each partition of\na sliced variable is a separate (key, value) pair.\n\nReturns:\nDictionary mapping normalized variable name to `tf.Variable`, or a list\nof `tf.Variables` if the variable is a sliced (partitioned) variable.\n\nRaises:\nValueError: If `context` is given but is not a proper prefix of `scope`.", "source": "codesearchnet"}
{"code": "def submit_batch_prediction(job_request, job_id=None):\n    if (job_id is None):\n        job_id = ('prediction_' + datetime.datetime.now().strftime('%y%m%d_%H%M%S'))\n    job = {'job_id': job_id, 'prediction_input': job_request}\n    context = datalab.Context.default()\n    cloudml = discovery.build('ml', 'v1', credentials=context.credentials)\n    request = cloudml.projects().jobs().create(body=job, parent=('projects/' + context.project_id))\n    request.headers['user-agent'] = 'GoogleCloudDataLab/1.0'\n    request.execute()\n    return Job(job_id)", "docstring": "Submit a batch prediction job.\n\nArgs:\njob_request: the arguments of the training job in a dict. For example,\n{\n'version_name': 'projects/my-project/models/my-model/versions/my-version',\n'data_format': 'TEXT',\n'input_paths': ['gs://my_bucket/my_file.csv'],\n'output_path': 'gs://my_bucket/predict_output',\n'region': 'us-central1',\n'max_worker_count': 1,\n}\njob_id: id for the training job. If None, an id based on timestamp will be generated.\n\nReturns:\nA Job object representing the batch prediction job.", "source": "codesearchnet"}
{"code": "def get_capacity_grav(self, min_voltage=None, max_voltage=None, use_overall_normalization=True):\n    pairs_in_range = self._select_in_voltage_range(min_voltage, max_voltage)\n    normalization_mass = (self.normalization_mass if (use_overall_normalization or (len(pairs_in_range) == 0)) else pairs_in_range[(- 1)].mass_discharge)\n    return (sum([pair.mAh for pair in pairs_in_range]) / normalization_mass)", "docstring": "Get the gravimetric capacity of the electrode.\n\nArgs:\nmin_voltage (float): The minimum allowable voltage for a given\nstep.\nmax_voltage (float): The maximum allowable voltage allowable for a\ngiven step.\nuse_overall_normalization (booL): If False, normalize by the\ndischarged state of only the voltage pairs matching the voltage\ncriteria. if True, use default normalization of the full\nelectrode path.\n\nReturns:\nGravimetric capacity in mAh/g across the insertion path (a subset\nof the path can be chosen by the optional arguments).", "source": "codesearchnet"}
{"code": "def publish_metric(self, metric_name, metric_value, epoch_seconds=None):\n    if (epoch_seconds is None):\n        epoch_seconds = self._reactor.seconds()\n    self._client_factory.publish_metric(metric_name, metric_value, int(epoch_seconds))", "docstring": "Record a single hit on a given metric.\n\nArgs:\nmetric_name: The name of the metric to record with Carbon.\nmetric_value: The value to record with Carbon.\nepoch_seconds: Optionally specify the time for the metric hit.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def generate_algebra_inverse_sample(vlist, ops, solve_ops, min_depth, max_depth):\n    side = random.randrange(2)\n    left_depth = random.randrange((min_depth if side else 0), (max_depth + 1))\n    right_depth = random.randrange((min_depth if (not side) else 0), (max_depth + 1))\n    var_index = random.randrange(len(vlist))\n    var = vlist[var_index]\n    consts = (vlist[:var_index] + vlist[(var_index + 1):])\n    left = random_expr_with_required_var(left_depth, (var if side else None), consts, ops)\n    right = random_expr_with_required_var(right_depth, (var if (not side) else None), consts, ops)\n    left_str = str(left)\n    right_str = str(right)\n    target = str(algebra_inverse_solve(left, right, var, solve_ops))\n    sample = ('%s:%s=%s' % (var, left_str, right_str))\n    return (sample, target)", "docstring": "Randomly generate an algebra inverse dataset sample.\n\nGiven an input equation and variable, produce the expression equal to the\nvariable.\n\nArgs:\nvlist: Variable list. List of chars that can be used in the expression.\nops: List of ExprOp instances. The allowed operators for the expression.\nsolve_ops: See `solve_ops` documentation in `algebra_inverse_solve`.\nmin_depth: Expression trees will not have a smaller depth than this. 0 means\nthere is just a variable. 1 means there is one operation.\nmax_depth: Expression trees will not have a larger depth than this. To make\nall trees have the same depth, set this equal to `min_depth`.\n\nReturns:\nsample: String representation of the input. Will be of the form\n'solve_var:left_side=right_side'.\ntarget: String representation of the solution.", "source": "codesearchnet"}
{"code": "def layer_preprocess(layer_input, hparams, layer_collection=None):\n  \n  assert \"a\" not in hparams.layer_preprocess_sequence, (\n      \"No residual connections allowed in hparams.layer_preprocess_sequence\")\n  assert \"z\" not in hparams.layer_preprocess_sequence, (\n      \"No residual connections allowed in hparams.layer_preprocess_sequence\")\n  return layer_prepostprocess(\n      None,\n      layer_input,\n      sequence=hparams.layer_preprocess_sequence,\n      dropout_rate=hparams.layer_prepostprocess_dropout,\n      norm_type=hparams.norm_type,\n      depth=None,\n      epsilon=hparams.norm_epsilon,\n      dropout_broadcast_dims=comma_separated_string_to_integer_list(\n          getattr(hparams, \"layer_prepostprocess_dropout_broadcast_dims\", \"\")),\n      default_name=\"layer_prepostprocess\",\n      layer_collection=layer_collection)", "docstring": "Apply layer preprocessing.\n\nSee layer_prepostprocess() for details.\n\nA hyperparameters object is passed for convenience.  The hyperparameters\nthat may be used are:\n\nlayer_preprocess_sequence\nlayer_prepostprocess_dropout\nnorm_type\nhidden_size\nnorm_epsilon\n\nArgs:\nlayer_input: a Tensor\nhparams: a hyperparameters object.\nlayer_collection: A tensorflow_kfac.LayerCollection. Only used by the\nKFAC optimizer. Default is None.\n\nReturns:\na Tensor", "source": "juraj-google-style"}
{"code": "def select_executor(elem, doc):\n    executor = EXECUTORS['default']\n    if ('cmd' in elem.attributes.keys()):\n        executor = elem.attributes['cmd']\n    elif ('runas' in elem.attributes.keys()):\n        executor = EXECUTORS[elem.attributes['runas']]\n    elif (elem.classes[0] != 'exec'):\n        executor = EXECUTORS[elem.classes[0]]\n    return executor", "docstring": "Determines the executor for the code in `elem.text`.\n\nThe elem attributes and classes select the executor in this order (highest\nto lowest):\n- custom commands (cmd=...)\n- runas (runas=...) takes a key for the executors\n- first element class (.class) determines language and thus executor\n\nArgs:\nelem The AST element.\ndoc  The document.\n\nReturns:\nThe command to execute code.", "source": "codesearchnet"}
{"code": "def build_single_handler_applications(paths, argvs=None):\n    applications = {}\n    argvs = ({} or argvs)\n    for path in paths:\n        application = build_single_handler_application(path, argvs.get(path, []))\n        route = application.handlers[0].url_path()\n        if (not route):\n            if ('/' in applications):\n                raise RuntimeError((\"Don't know the URL path to use for %s\" % path))\n            route = '/'\n        applications[route] = application\n    return applications", "docstring": "Return a dictionary mapping routes to Bokeh applications built using\nsingle handlers, for specified files or directories.\n\nThis function iterates over ``paths`` and ``argvs`` and calls\n:func:`~bokeh.command.util.build_single_handler_application` on each\nto generate the mapping.\n\nArgs:\npath (seq[str]) : paths to files or directories for creating Bokeh\napplications.\n\nargvs (dict[str, list[str]], optional) : mapping of paths to command\nline arguments to pass to the handler for each path\n\nReturns:\ndict[str, Application]\n\nRaises:\nRuntimeError", "source": "codesearchnet"}
{"code": "def price(self, market: pmd.ProcessedMarketData, name: Optional[str]=None):\n    model = self._config.model or models.InterestRateModelType.HULL_WHITE_ONE_FACTOR\n    name = name or self._name + '_price'\n    with tf.name_scope(name):\n        valuation_date = dateslib.convert_to_date_tensor(market.date)\n        strike = self._swap.fixed_rate()\n        expiry_time = dateslib.daycount_actual_365_fixed(start_date=valuation_date, end_date=self._expiry_date, dtype=self._dtype)\n        if model == models.InterestRateModelType.HULL_WHITE_ONE_FACTOR:\n            option_value = self._price_hull_white_1_factor(valuation_date, market, strike, expiry_time)\n        else:\n            raise ValueError('Unsupported model.')\n        return option_value", "docstring": "Returns the present value of the swaption on the valuation date.\n\nArgs:\nmarket: A instance of type `ProcessedMarketData` which contains the\nnecessary information for pricing the swaption.\nname: Python str. The name to give to the ops created by this function.\nDefault value: `None` which maps to 'price'.\n\nReturns:\nA Rank `Tensor` of shape `batch_shape` containing the modeled price of\neach  Swaption contract based on the input market data.\n\nRaises:\nValueError: If an unsupported model is supplied to the function.", "source": "github-repos"}
{"code": "def _randomFloats(self, shape, low=0.0, high=1.0, dtype=dtypes.float32):\n    val = np.random.random_sample(shape)\n    diff = high - low\n    val *= diff\n    val += low\n    return constant_op.constant(val, dtype=dtype)", "docstring": "Generate a tensor of random floating-point values.\n\nValues will be continuously distributed in the range [low, high).\n\nNote that we use numpy to generate random numbers and then feed the result\nthrough a constant op to avoid the re-rolling of TensorFlow random ops on\neach run in graph mode.\n\nArgs:\nshape: The output shape.\nlow: Lower bound of random numbers generated, inclusive.\nhigh: Upper bound of random numbers generated, exclusive.\ndtype: The output dtype.\n\nReturns:\nA random tensor", "source": "github-repos"}
{"code": "def accumulate_from_superclasses(cls, propname):\n    \n    cachename = \"__cached_all\" + propname\n    \n    \n    if cachename not in cls.__dict__:\n        s = set()\n        for c in inspect.getmro(cls):\n            if issubclass(c, HasProps) and hasattr(c, propname):\n                base = getattr(c, propname)\n                s.update(base)\n        setattr(cls, cachename, s)\n    return cls.__dict__[cachename]", "docstring": "Traverse the class hierarchy and accumulate the special sets of names\n``MetaHasProps`` stores on classes:\n\nArgs:\nname (str) : name of the special attribute to collect.\n\nTypically meaningful values are: ``__container_props__``,\n``__properties__``, ``__properties_with_refs__``", "source": "juraj-google-style"}
{"code": "def _PrintEventLabelsCounter(\n      self, event_labels_counter, session_identifier=None):\n    \n    if not event_labels_counter:\n      return\n\n    title = 'Event tags generated per label'\n    if session_identifier:\n      title = '{0:s}: {1:s}'.format(title, session_identifier)\n\n    table_view = views.ViewsFactory.GetTableView(\n        self._views_format_type,\n        column_names=['Label', 'Number of event tags'], title=title)\n\n    for key, value in sorted(event_labels_counter.items()):\n      if key == 'total':\n        continue\n      table_view.AddRow([key, value])\n\n    try:\n      total = event_labels_counter['total']\n    except KeyError:\n      total = 'N/A'\n\n    table_view.AddRow(['Total', total])\n\n    table_view.Write(self._output_writer)", "docstring": "Prints the event labels counter.\n\nArgs:\nevent_labels_counter (collections.Counter): number of event tags per\nlabel.\nsession_identifier (Optional[str]): session identifier.", "source": "juraj-google-style"}
{"code": "def __Build(leaves):\n        \n        if len(leaves) < 1:\n            raise Exception('Leaves must have length')\n        if len(leaves) == 1:\n            return leaves[0]\n\n        num_parents = int((len(leaves) + 1) / 2)\n        parents = [MerkleTreeNode() for i in range(0, num_parents)]\n\n        for i in range(0, num_parents):\n            node = parents[i]\n            node.LeftChild = leaves[i * 2]\n            leaves[i * 2].Parent = node\n            if (i * 2 + 1 == len(leaves)):\n                node.RightChild = node.LeftChild\n            else:\n                node.RightChild = leaves[i * 2 + 1]\n                leaves[i * 2 + 1].Parent = node\n\n            hasharray = bytearray(node.LeftChild.Hash.ToArray() + node.RightChild.Hash.ToArray())\n            node.Hash = UInt256(data=Crypto.Hash256(hasharray))\n\n        return MerkleTree.__Build(parents)", "docstring": "Build the merkle tree.\n\nArgs:\nleaves (list): items are of type MerkleTreeNode.\n\nReturns:\nMerkleTreeNode: the root node.", "source": "juraj-google-style"}
{"code": "def get_contour_pd_plot(self):\n    from scipy import interpolate\n    from matplotlib import cm\n    pd = self._pd\n    entries = pd.qhull_entries\n    data = np.array(pd.qhull_data)\n    plt = self._get_2d_plot()\n    data[(:, 0:2)] = triangular_coord(data[(:, 0:2)]).transpose()\n    for (i, e) in enumerate(entries):\n        data[(i, 2)] = self._pd.get_e_above_hull(e)\n    gridsize = 0.005\n    xnew = np.arange(0, 1.0, gridsize)\n    ynew = np.arange(0, 1, gridsize)\n    f = interpolate.LinearNDInterpolator(data[(:, 0:2)], data[(:, 2)])\n    znew = np.zeros((len(ynew), len(xnew)))\n    for (i, xval) in enumerate(xnew):\n        for (j, yval) in enumerate(ynew):\n            znew[(j, i)] = f(xval, yval)\n    plt.contourf(xnew, ynew, znew, 1000, cmap=cm.autumn_r)\n    plt.colorbar()\n    return plt", "docstring": "Plot a contour phase diagram plot, where phase triangles are colored\naccording to degree of instability by interpolation. Currently only\nworks for 3-component phase diagrams.\n\nReturns:\nA matplotlib plot object.", "source": "codesearchnet"}
{"code": "def get_exe_info(dir_, flag_protected=False):\n    \n\n    ret = []\n    \n    ff = glob.glob(os.path.join(dir_, \"*.py\"))\n    \n    ff = [f for f in ff if flag_protected or not os.path.basename(f).startswith(\"_\")]\n    ff.sort()\n\n    for f in ff:\n        _, filename = os.path.split(f)\n        flag_error = False\n        flag_gui = None\n        descr = \"(no doc)\"\n        try:\n            \n\n            with open(f, \"r\") as h:\n                flag_gui = \"QApplication\" in h.read()\n\n            try:\n                script_ = None\n                script_ = import_module(f)  \n            except SystemExit:\n                descr = \"? (called sys.exit())\"\n            else:\n                if script_.__doc__ is not None:\n                    descr = script_.__doc__.strip().split(\"\\n\")[0]  \n\n        except Exception as e:\n            flag_error = True\n            descr = \"*{0!s}*: {1!s}\".format(e.__class__.__name__, str(e))\n\n        if len(descr) == 0:\n            descr = \"(no doc)\"\n\n        ret.append(ExeInfo(filename, descr, flag_error, flag_gui))\n\n    \n    sisi_gra = [si for si in ret if si.flag_gui]\n    sisi_cmd = [si for si in ret if not si.flag_gui]\n    sisi_gra = sorted(sisi_gra, key=lambda x: x.filename)\n    sisi_cmd = sorted(sisi_cmd, key=lambda x: x.filename)\n    ret = sisi_cmd+sisi_gra\n\n    return ret", "docstring": "Returns a list of ExeInfo objects, which represent Python scripts within dir_\n\nArgs:\ndir_: string, path to directory\nflag_protected: whether or not to include files starting with a '_'\n\nReturns:\nlist of ExeInfo objects\n\nThe ExeInfo objects represent the \".py\" files in directory dir_,", "source": "juraj-google-style"}
{"code": "def __init__(self, value=None):\n        \n        super(ApplicationData, self).__init__(value, Tags.APPLICATION_DATA)", "docstring": "Construct an ApplicationData object.\n\nArgs:\nvalue (str): A string representing data for a particular namespace.\nOptional, defaults to None.", "source": "juraj-google-style"}
{"code": "def get_country_by_id(self, country_id) -> 'Country':\n        \n        VALID_POSITIVE_INT.validate(country_id, 'get_country_by_id', exc=ValueError)\n        if country_id not in self._countries_by_id.keys():\n            for country in self.countries:\n\n                if country.country_id == country_id:\n                    return country\n            raise ValueError(country_id)\n        else:\n            return self._countries_by_id[country_id]", "docstring": "Gets a country in this coalition by its ID\n\nArgs:\ncountry_id: country Id\n\nReturns: Country", "source": "juraj-google-style"}
{"code": "def bind_to_storage_buffer(self, binding=0, *, offset=0, size=(- 1)) -> None:\n    self.mglo.bind_to_storage_buffer(binding, offset, size)", "docstring": "Bind the buffer to a shader storage buffer.\n\nArgs:\nbinding (int): The shader storage binding.\n\nKeyword Args:\noffset (int): The offset.\nsize (int): The size. Value ``-1`` means all.", "source": "codesearchnet"}
{"code": "def swo_disable(self, port_mask):\n        \n        res = self._dll.JLINKARM_SWO_DisableTarget(port_mask)\n        if res != 0:\n            raise errors.JLinkException(res)\n        return None", "docstring": "Disables ITM & Stimulus ports.\n\nArgs:\nself (JLink): the ``JLink`` instance\nport_mask (int): mask specifying which ports to disable\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error", "source": "juraj-google-style"}
{"code": "def download_and_prep_data() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n    mnist_dataset = tf.keras.datasets.mnist\n    (tr_x, tr_y), (te_x, te_y) = mnist_dataset.load_data()\n    tr_x = tr_x / 255.0\n    te_x = te_x / 255.0\n    return (tr_x, tr_y, te_x, te_y)", "docstring": "Download dataset and scale to [0, 1].\n\nReturns:\ntr_x: Training data.\ntr_y: Training labels.\nte_x: Testing data.\nte_y: Testing labels.", "source": "github-repos"}
{"code": "def replace_dots_to_underscores_at_last(path):\n    \n    if path == '':\n        return path\n    bits = path.split('/')\n    bits[-1] = bits[-1].replace('.', '_')\n    return '/'.join(bits)", "docstring": "Remove dot ('.') while a dot is treated as a special character in backends\n\nArgs:\npath (str): A target path string\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def refresh_role(self, role, file_hierarchy):\n        \n        if role not in self.cache:\n            self.cache[role] = {}\n        was_change = self._refresh_hierarchy_recursive(self.cache[role], file_hierarchy)\n        if was_change:\n            cf = open(self.cache_file, 'w')\n            yaml.dump(self.cache, cf, Dumper=Dumper)\n            cf.close()", "docstring": "Checks and refreshes (if needed) all assistants with given role.\n\nArgs:\nrole: role of assistants to refresh\nfile_hierarchy: hierarchy as returned by devassistant.yaml_assistant_loader.\\\nYamlAssistantLoader.get_assistants_file_hierarchy", "source": "juraj-google-style"}
{"code": "def init_on_device(device: 'torch.device', include_buffers: bool=False):\n    if include_buffers:\n        with device:\n            yield\n        return\n    old_register_parameter = nn.Module.register_parameter\n    if include_buffers:\n        old_register_buffer = nn.Module.register_buffer\n\n    def register_empty_parameter(module, name, param):\n        old_register_parameter(module, name, param)\n        if param is not None:\n            param_cls = type(module._parameters[name])\n            kwargs = module._parameters[name].__dict__\n            kwargs['requires_grad'] = param.requires_grad\n            module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)\n\n    def register_empty_buffer(module, name, buffer, persistent=True):\n        old_register_buffer(module, name, buffer, persistent=persistent)\n        if buffer is not None:\n            module._buffers[name] = module._buffers[name].to(device)\n    if include_buffers:\n        tensor_constructors_to_patch = {torch_function_name: getattr(torch, torch_function_name) for torch_function_name in ['empty', 'zeros', 'ones', 'full']}\n    else:\n        tensor_constructors_to_patch = {}\n\n    def patch_tensor_constructor(fn):\n\n        def wrapper(*args, **kwargs):\n            kwargs['device'] = device\n            return fn(*args, **kwargs)\n        return wrapper\n    try:\n        nn.Module.register_parameter = register_empty_parameter\n        if include_buffers:\n            nn.Module.register_buffer = register_empty_buffer\n        for torch_function_name in tensor_constructors_to_patch.keys():\n            setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name)))\n        yield\n    finally:\n        nn.Module.register_parameter = old_register_parameter\n        if include_buffers:\n            nn.Module.register_buffer = old_register_buffer\n        for torch_function_name, old_torch_function in tensor_constructors_to_patch.items():\n            setattr(torch, torch_function_name, old_torch_function)", "docstring": "A context manager under which models are initialized with all parameters on the specified device.\n\nArgs:\ndevice (`torch.device`):\nDevice to initialize all parameters on.\ninclude_buffers (`bool`, *optional*):\nWhether or not to also put all buffers on the meta device while initializing.\n\nExample:\n\n```python\nimport torch.nn as nn\nfrom accelerate import init_on_device\n\nwith init_on_device(device=torch.device(\"cuda\")):\ntst = nn.Linear(100, 100)  # on `cuda` device\n```", "source": "github-repos"}
{"code": "def add_child(self, child):\n    if (not isinstance(child, Node)):\n        raise TypeError('child must be a Node')\n    self.children.append(child)\n    child.parent = self", "docstring": "Add child to ``Node`` object\n\nArgs:\n``child`` (``Node``): The child ``Node`` to be added", "source": "codesearchnet"}
{"code": "def m_seg(p1, p2, rad, dist):\n    \n    v = vector(p1, p2)\n    m = unit(rotate(v, rad), dist)\n    return translate(p1, m), translate(p2, m)", "docstring": "move segment by distance\nArgs:\np1, p2: point(x, y)\nrad: relative direction angle(radian)\ndist: distance\nReturn:\ntranslated segment(p1, p2)", "source": "juraj-google-style"}
{"code": "def _prefix_from_prefix_string(self, prefixlen_str):\n        \n        try:\n            if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):\n                raise ValueError\n            prefixlen = int(prefixlen_str)\n            if not (0 <= prefixlen <= self._max_prefixlen):\n               raise ValueError\n        except ValueError:\n            raise NetmaskValueError('%s is not a valid prefix length' %\n                                    prefixlen_str)\n        return prefixlen", "docstring": "Turn a prefix length string into an integer.\n\nArgs:\nprefixlen_str: A decimal string containing the prefix length.\n\nReturns:\nThe prefix length as an integer.\n\nRaises:\nNetmaskValueError: If the input is malformed or out of range.", "source": "juraj-google-style"}
{"code": "def add_permissions(self, grp_name, resource, permissions):\n        \n        self.project_service.set_auth(self._token_project)\n        self.project_service.add_permissions(grp_name, resource, permissions)", "docstring": "Add additional permissions for the group associated with the resource.\n\nArgs:\ngrp_name (string): Name of group.\nresource (intern.resource.boss.Resource): Identifies which data\nmodel object to operate on.\npermissions (list): List of permissions to add to the given resource\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def _GetImportTimestamps(self, pefile_object):\n    \n    import_timestamps = []\n    if not hasattr(pefile_object, 'DIRECTORY_ENTRY_IMPORT'):\n      return import_timestamps\n    for importdata in pefile_object.DIRECTORY_ENTRY_IMPORT:\n      dll_name = getattr(importdata, 'dll', '')\n      try:\n        dll_name = dll_name.decode('ascii')\n      except UnicodeDecodeError:\n        dll_name = dll_name.decode('ascii', errors='replace')\n      if not dll_name:\n        dll_name = '<NO DLL NAME>'\n\n      timestamp = getattr(importdata.struct, 'TimeDateStamp', 0)\n      if timestamp:\n        import_timestamps.append([dll_name, timestamp])\n    return import_timestamps", "docstring": "Retrieves timestamps from the import directory, if available.\n\nArgs:\npefile_object (pefile.PE): pefile object.\n\nReturns:\nlist[int]: import timestamps.", "source": "juraj-google-style"}
{"code": "def __call__(self, *args, **kwargs) -> Any:", "docstring": "Calls the functor.\n\nArgs:\n*args: Any positional arguments.\n**kwargs: Any keyword arguments.\n\nReturns:\nAny value.", "source": "github-repos"}
{"code": "def group_alleles_by_start_end_Xbp(arr, bp=28):\n    \n    starts = arr[:,0:bp]\n    ends = arr[:,-bp:]\n    starts_ends_idxs = defaultdict(list)\n    l, seq_len = arr.shape\n    for i in range(l):\n        start_i = starts[i]\n        end_i = ends[i]\n        start_i_str = ''.join([str(x) for x in start_i])\n        end_i_str = ''.join([str(x) for x in end_i])\n        starts_ends_idxs[start_i_str + end_i_str].append(i)\n    return starts_ends_idxs", "docstring": "Group alleles by matching ends\n\nArgs:\narr (numpy.array): 2D int matrix of alleles\nbp (int): length of ends to group by\n\nReturns:\ndict of lists: key of start + end strings to list of indices of alleles with matching ends", "source": "juraj-google-style"}
{"code": "def _update_in_hdx(self, object_type, id_field_name, file_to_upload=None, **kwargs):\n        \n        \n\n        self._check_load_existing_object(object_type, id_field_name)\n        \n        \n        \n        self._merge_hdx_update(object_type, id_field_name, file_to_upload, **kwargs)", "docstring": "Helper method to check if HDX object exists in HDX and if so, update it\n\nArgs:\nobject_type (str): Description of HDX object type (for messages)\nid_field_name (str): Name of field containing HDX object identifier\nfile_to_upload (Optional[str]): File to upload to HDX\n**kwargs: See below\noperation (string): Operation to perform eg. patch. Defaults to update.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def update_variant_rank(self, case_obj, variant_type='clinical', category='snv'):\n    variants = self.variant_collection.find({'case_id': case_obj['_id'], 'category': category, 'variant_type': variant_type}).sort('rank_score', pymongo.DESCENDING)\n    LOG.info('Updating variant_rank for all variants')\n    requests = []\n    for (index, var_obj) in enumerate(variants):\n        if (len(requests) > 5000):\n            try:\n                self.variant_collection.bulk_write(requests, ordered=False)\n                requests = []\n            except BulkWriteError as err:\n                LOG.warning('Updating variant rank failed')\n                raise err\n        operation = pymongo.UpdateOne({'_id': var_obj['_id']}, {'$set': {'variant_rank': (index + 1)}})\n        requests.append(operation)\n    try:\n        self.variant_collection.bulk_write(requests, ordered=False)\n    except BulkWriteError as err:\n        LOG.warning('Updating variant rank failed')\n        raise err\n    LOG.info('Updating variant_rank done')", "docstring": "Updates the manual rank for all variants in a case\n\nAdd a variant rank based on the rank score\nWhenever variants are added or removed from a case we need to update the variant rank\n\nArgs:\ncase_obj(Case)\nvariant_type(str)", "source": "codesearchnet"}
{"code": "def remove_repeated_comments(node):\n    last_comment = {'text': None}\n    for _node in gast.walk(node):\n        if anno.hasanno(_node, 'comment'):\n            comment = anno.getanno(_node, 'comment')\n            if (comment['text'] == last_comment['text']):\n                anno.delanno(_node, 'comment')\n            last_comment = comment\n    return node", "docstring": "Remove comments that repeat themselves.\n\nMultiple statements might be annotated with the same comment. This way if one\nof the statements is deleted during optimization passes, the comment won't be\nlost. This pass removes sequences of identical comments, leaving only the\nfirst one.\n\nArgs:\nnode: An AST\n\nReturns:\nAn AST where comments are not repeated in sequence.", "source": "codesearchnet"}
{"code": "def get_attr(self, name):\n    fields = ('s', 'i', 'f', 'b', 'type', 'shape', 'tensor', 'func')\n    try:\n        with c_api_util.tf_buffer() as buf:\n            pywrap_tf_session.TF_OperationGetAttrValueProto(self._c_op, name, buf)\n            data = pywrap_tf_session.TF_GetBuffer(buf)\n    except errors.InvalidArgumentError as e:\n        raise ValueError(e.message)\n    x = attr_value_pb2.AttrValue()\n    x.ParseFromString(data)\n    oneof_value = x.WhichOneof('value')\n    if oneof_value is None:\n        return []\n    if oneof_value == 'list':\n        for f in fields:\n            if getattr(x.list, f):\n                if f == 'type':\n                    return [dtypes.as_dtype(t) for t in x.list.type]\n                else:\n                    return list(getattr(x.list, f))\n        return []\n    if oneof_value == 'type':\n        return dtypes.as_dtype(x.type)\n    assert oneof_value in fields, 'Unsupported field type in ' + str(x)\n    return getattr(x, oneof_value)", "docstring": "Returns the value of the attr of this op with the given `name`.\n\nArgs:\nname: The name of the attr to fetch.\n\nReturns:\nThe value of the attr, as a Python object.\n\nRaises:\nValueError: If this op does not have an attr with the given `name`.", "source": "github-repos"}
{"code": "def path_new_using_function(w: int, h: int, func: Callable[([int, int, int, int, Any], float)], userData: Any=0, dcost: float=1.41) -> tcod.path.AStar:\n    return tcod.path.AStar(tcod.path._EdgeCostFunc((func, userData), (w, h)), dcost)", "docstring": "Return a new AStar using the given callable function.\n\nArgs:\nw (int): Clipping width.\nh (int): Clipping height.\nfunc (Callable[[int, int, int, int, Any], float]):\nuserData (Any):\ndcost (float): A multiplier for the cost of diagonal movement.\nCan be set to 0 to disable diagonal movement.\nReturns:\nAStar: A new AStar instance.", "source": "codesearchnet"}
{"code": "def commandline_parser(parser=None, arguments=None):\n    if parser is None:\n        parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent('        Command line to execute all tasks in a recipe once. ( Common Entry Point )\\n\\n        This script dispatches all the tasks in a JSON recipe to handlers in sequence.\\n        For each task, it calls a subprocess to execute the JSON instructions, waits\\n        for the process to complete and dispatches the next task, until all tasks are\\n        complete or a critical failure ( exception ) is raised.\\n\\n        If an exception is raised in any task, all following tasks are not executed by design.\\n\\n        Example: python run.py [path to recipe file]\\n        Caution: This script does NOT check if the last job finished, potentially causing overruns.\\n        Notes:\\n          - To avoid running the entire script when debugging a single task, the command line\\n            can easily replace \"all\" with the name of any \"task\" in the json.  For example\\n            python tool/recipe.py scripts/say_hello.json\\n\\n          - Can be easily replaced with the following to run only the \"hello\" task:\\n            python task/hello/run.py scripts/say_hello.json\\n\\n          - Or specified further to run only the second hello task:\\n            python task/hello/run.py scripts/say_hello.json -i 2\\n\\n    '))\n    if arguments is None:\n        parser.add_argument('json', help='Path to recipe json file to load.')\n    elif '-j' in arguments:\n        parser.add_argument('--json', '-j', help='Path to recipe json file to load.')\n    if arguments is None or '-p' in arguments:\n        parser.add_argument('--project', '-p', help='Cloud ID of Google Cloud Project.', default=None)\n    if arguments is None or '-k' in arguments:\n        parser.add_argument('--key', '-k', help='API Key of Google Cloud Project.', default=None)\n    if arguments is None or '-u' in arguments:\n        parser.add_argument('--user', '-u', help='Path to USER credentials json file.', default=None)\n    if arguments is None or '-s' in arguments:\n        parser.add_argument('--service', '-s', help='Path to SERVICE credentials json file.', default=None)\n    if arguments is None or '-c' in arguments:\n        parser.add_argument('--client', '-c', help='Path to CLIENT credentials json file.', default=None)\n    if arguments is None or '-t' in arguments:\n        parser.add_argument('--task', '-t', help='Task number of the task to run starting at 1.', default=None, type=int)\n    if arguments is None or '-v' in arguments:\n        parser.add_argument('--verbose', '-v', help='Print all the steps as they happen.', action='store_true')\n    if arguments is None or '-f' in arguments:\n        parser.add_argument('--force', '-force', help='Not used but included for compatiblity with another script.', action='store_true')\n    if arguments is None or '-tp' in arguments:\n        parser.add_argument('--trace_print', '-tp', help='Execution trace written to stdout.', action='store_true')\n    if arguments is None or '-tf' in arguments:\n        parser.add_argument('--trace_file', '-tf', help='Execution trace written to file.', action='store_true')\n    if arguments is None or '-ni' in arguments:\n        parser.add_argument('--no_input', '-ni', help='Raise exception if fields requiring input are in recipe.', action='store_true')\n    return parser", "docstring": "Used in StarThinker scripts as entry point for command line calls.\n\nDefines standard parameters used by almost every entry point.\n\nUsage example:\n\n```\nimport argparse\nfrom starthinker.util.configuration import commandline_parser\n\nif __name__ == \"__main__\":\n\n# custom parameters\nparser = argparse.ArgumentParser()\nparser.add_argument('custom', help='custom parameter to be added.')\n\n# initialize project\ncommandline_parser(parser=parser, ['-c', '-u'])\n\n# access arguments\nprint(args.client)\n```\n\nArgs:\n* parser: (ArgumentParser) optional custom argument parser\n* arguments: (String) optional list of parameters to use when invoking, all set if None\n\nReturns:\nArgumentParser - parser with added parameters", "source": "github-repos"}
{"code": "def view_quick_save_page(name=None):\n    response.set_header('Cache-control', 'no-cache')\n    response.set_header('Pragma', 'no-cache')\n    if (request.method == 'PUT'):\n        if (name is None):\n            if (len(request.forms.filename) > 0):\n                name = request.forms.filename\n        if (name is not None):\n            filename = '{0}.rst'.format(name)\n            file_handle = open(filename, 'w')\n            content = request.body.read()\n            content = content.decode('utf-8')\n            file_handle.write(content.encode('utf-8'))\n            file_handle.close()\n            return 'OK'\n        else:\n            return abort(404)", "docstring": "Quick save a page.\n\n.. note:: this is a bottle view\n\n* this view must be called with the PUT method\nwrite the new page content to the file, and not not commit or redirect\n\nKeyword Arguments:\n:name: (str) -- name of the rest file (without the .rst extension)\n\nReturns:\nbottle response object (200 OK)", "source": "codesearchnet"}
{"code": "def load_module_functions(module):\n    \n    module_functions = {}\n\n    for name, item in vars(module).items():\n        if validator.is_function(item):\n            module_functions[name] = item\n\n    return module_functions", "docstring": "load python module functions.\n\nArgs:\nmodule: python module\n\nReturns:\ndict: functions mapping for specified python module\n\n{\n\"func1_name\": func1,\n\"func2_name\": func2\n}", "source": "juraj-google-style"}
{"code": "def _load_from_file_object(self, f):\n    \n    subtoken_strings = []\n    for line in f:\n      s = line.strip()\n      \n      if ((s.startswith(\"'\") and s.endswith(\"'\")) or\n          (s.startswith(\"\\\"\") and s.endswith(\"\\\"\"))):\n        s = s[1:-1]\n      subtoken_strings.append(native_to_unicode(s))\n    self._init_subtokens_from_list(subtoken_strings)\n    self._init_alphabet_from_tokens(subtoken_strings)", "docstring": "Load from a file object.\n\nArgs:\nf: File object to load vocabulary from", "source": "juraj-google-style"}
{"code": "def scheduled_sample_count(ground_truth_x,\n                           generated_x,\n                           batch_size,\n                           scheduled_sample_var):\n  \n  num_ground_truth = scheduled_sample_var\n  idx = tf.random_shuffle(tf.range(batch_size))\n  ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))\n  generated_idx = tf.gather(idx, tf.range(num_ground_truth, batch_size))\n\n  ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)\n  generated_examps = tf.gather(generated_x, generated_idx)\n\n  output = tf.dynamic_stitch([ground_truth_idx, generated_idx],\n                             [ground_truth_examps, generated_examps])\n  \n  if isinstance(batch_size, int):\n    output.set_shape([batch_size] + common_layers.shape_list(output)[1:])\n  return output", "docstring": "Sample batch with specified mix of groundtruth and generated data points.\n\nArgs:\nground_truth_x: tensor of ground-truth data points.\ngenerated_x: tensor of generated data points.\nbatch_size: batch size\nscheduled_sample_var: number of ground-truth examples to include in batch.\nReturns:\nNew batch with num_ground_truth sampled from ground_truth_x and the rest\nfrom generated_x.", "source": "juraj-google-style"}
{"code": "class ViltFastImageProcessorKwargs(DefaultFastImageProcessorKwargs):\n    do_pad: Optional[bool]\n    size_divisor: Optional[int]\n    rescale_factor: Optional[float]", "docstring": "Args:\ndo_pad (`bool`, *optional*, defaults to `True`):\nWhether to pad the image. If `True`, will pad the images in the batch to the largest height and width\nin the batch. Padding will be applied to the bottom and right with zeros.\nsize_divisor (`int`, *optional*, defaults to 32):\nThe size to make the height and width divisible by.\nrescale_factor (`float`, *optional*, defaults to 1/255):\nThe factor to rescale the image by.", "source": "github-repos"}
{"code": "def fetch_all_messages(self, conn, directory, readonly):\n        \n\n        conn.select(directory, readonly)\n\n        message_data = []\n\n        typ, data = conn.search(None, 'All')\n\n        \n        for num in data[0].split():\n\n            typ, data = conn.fetch(num, '(RFC822)')\n\n            for response_part in data:\n\n                if isinstance(response_part, tuple):\n\n                    email_parser = email.parser.BytesFeedParser()\n                    email_parser.feed(response_part[1])\n\n                    msg = email_parser.close()\n\n                    body = self.get_body(msg)\n                    subject = self.get_subject(msg)\n\n                    message_data.append((subject, body))\n\n        return message_data", "docstring": "Fetches all messages at @conn from @directory.\n\nParams:\nconn        IMAP4_SSL connection\ndirectory   The IMAP directory to look for\nreadonly    readonly mode, true or false\nReturns:\nList of subject-body tuples", "source": "juraj-google-style"}
{"code": "def _compute_intersection(boxes1, boxes2):\n    y_min1, x_min1, y_max1, x_max1 = ops.split(boxes1[..., :4], 4, axis=-1)\n    y_min2, x_min2, y_max2, x_max2 = ops.split(boxes2[..., :4], 4, axis=-1)\n    boxes2_rank = len(boxes2.shape)\n    perm = [1, 0] if boxes2_rank == 2 else [0, 2, 1]\n    intersect_ymax = ops.minimum(y_max1, ops.transpose(y_max2, perm))\n    intersect_ymin = ops.maximum(y_min1, ops.transpose(y_min2, perm))\n    intersect_xmax = ops.minimum(x_max1, ops.transpose(x_max2, perm))\n    intersect_xmin = ops.maximum(x_min1, ops.transpose(x_min2, perm))\n    intersect_height = intersect_ymax - intersect_ymin\n    intersect_width = intersect_xmax - intersect_xmin\n    zeros_t = ops.cast(0, intersect_height.dtype)\n    intersect_height = ops.maximum(zeros_t, intersect_height)\n    intersect_width = ops.maximum(zeros_t, intersect_width)\n    return intersect_height * intersect_width", "docstring": "Computes intersection area between two sets of boxes.\n\nArgs:\nboxes1: [N, 4] or [batch_size, N, 4] float Tensor boxes.\nboxes2: [M, 4] or [batch_size, M, 4] float Tensor boxes.\nReturns:\na [N, M] or [batch_size, N, M] float Tensor.", "source": "github-repos"}
{"code": "def _iter_errors_custom(instance, checks, options):\n    for v_function in checks:\n        try:\n            result = v_function(instance)\n        except TypeError:\n            result = v_function(instance, options)\n        if isinstance(result, Iterable):\n            for x in result:\n                (yield x)\n        elif (result is not None):\n            (yield result)\n    for field in instance:\n        if (type(instance[field]) is list):\n            for obj in instance[field]:\n                if _is_stix_obj(obj):\n                    for err in _iter_errors_custom(obj, checks, options):\n                        (yield err)", "docstring": "Perform additional validation not possible merely with JSON schemas.\n\nArgs:\ninstance: The STIX object to be validated.\nchecks: A sequence of callables which do the checks.  Each callable\nmay be written to accept 1 arg, which is the object to check,\nor 2 args, which are the object and a ValidationOptions instance.\noptions: ValidationOptions instance with settings affecting how\nvalidation should be done.", "source": "codesearchnet"}
{"code": "def _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op):\n    with tf.Session() as sess:\n        ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n        if (ckpt and ckpt.model_checkpoint_path):\n            print('ckpt.model_checkpoint_path: {0}'.format(ckpt.model_checkpoint_path))\n            saver.restore(sess, ckpt.model_checkpoint_path)\n            global_step = ckpt.model_checkpoint_path.split('/')[(- 1)].split('-')[(- 1)]\n            print(('Successfully loaded model from %s at step=%s.' % (ckpt.model_checkpoint_path, global_step)))\n        else:\n            print('No checkpoint file found')\n            return\n        coord = tf.train.Coordinator()\n        try:\n            threads = []\n            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):\n                threads.extend(qr.create_threads(sess, coord=coord, daemon=True, start=True))\n            num_iter = int(math.ceil((FLAGS.num_examples / FLAGS.batch_size)))\n            count_top_1 = 0.0\n            count_top_5 = 0.0\n            total_sample_count = (num_iter * FLAGS.batch_size)\n            step = 0\n            print(('%s: starting evaluation on (%s).' % (datetime.now(), FLAGS.subset)))\n            start_time = time.time()\n            while ((step < num_iter) and (not coord.should_stop())):\n                (top_1, top_5) = sess.run([top_1_op, top_5_op])\n                count_top_1 += np.sum(top_1)\n                count_top_5 += np.sum(top_5)\n                step += 1\n                if ((step % 20) == 0):\n                    duration = (time.time() - start_time)\n                    sec_per_batch = (duration / 20.0)\n                    examples_per_sec = (FLAGS.batch_size / sec_per_batch)\n                    print(('%s: [%d batches out of %d] (%.1f examples/sec; %.3fsec/batch)' % (datetime.now(), step, num_iter, examples_per_sec, sec_per_batch)))\n                    start_time = time.time()\n            precision_at_1 = (count_top_1 / total_sample_count)\n            recall_at_5 = (count_top_5 / total_sample_count)\n            print(('%s: precision @ 1 = %.4f recall @ 5 = %.4f [%d examples]' % (datetime.now(), precision_at_1, recall_at_5, total_sample_count)))\n            summary = tf.Summary()\n            summary.ParseFromString(sess.run(summary_op))\n            summary.value.add(tag='Precision @ 1', simple_value=precision_at_1)\n            summary.value.add(tag='Recall @ 5', simple_value=recall_at_5)\n            summary_writer.add_summary(summary, global_step)\n        except Exception as e:\n            coord.request_stop(e)\n        coord.request_stop()\n        coord.join(threads, stop_grace_period_secs=10)", "docstring": "Runs Eval once.\n\nArgs:\nsaver: Saver.\nsummary_writer: Summary writer.\ntop_1_op: Top 1 op.\ntop_5_op: Top 5 op.\nsummary_op: Summary op.", "source": "codesearchnet"}
{"code": "def next(self):\n    try:\n        entry = {}\n        row = self._csv_reader.next()\n        for i in range(0, len(row)):\n            entry[self._headers[i]] = row[i]\n        return entry\n    except Exception as e:\n        self._file.close()\n        raise e", "docstring": "Gets next entry as a dictionary.\n\nReturns:\nobject - Object key/value pair representing a row.\n{key1: value1, key2: value2, ...}", "source": "codesearchnet"}
{"code": "def add_to_tensor(self, mat, name='add_to_tensor'):\n    return self._possibly_broadcast_batch_shape(mat)", "docstring": "Add matrix represented by this operator to `mat`.  Equiv to `I + mat`.\n\nArgs:\nmat:  `Tensor` with same `dtype` and shape broadcastable to `self`.\nname:  A name to give this `Op`.\n\nReturns:\nA `Tensor` with broadcast shape and same `dtype` as `self`.", "source": "github-repos"}
{"code": "def __init__(self, plugin_callback, plugin_dir = 'workers'):\n        \n\n        \n        self.plugin_callback = plugin_callback\n        self.plugin_dir = plugin_dir\n        self.load_all_plugins()\n\n        \n        self.watcher = dir_watcher.DirWatcher(self.plugin_path)\n        self.watcher.register_callbacks(self.on_created, self.on_modified, self.on_deleted)\n        self.watcher.start_monitoring()", "docstring": "Initialize the Plugin Manager for Workbench.\n\nArgs:\nplugin_callback: The callback for plugin. This is called when plugin is added.\nplugin_dir: The dir where plugin resides.", "source": "juraj-google-style"}
{"code": "def get_configuration(head, update, head_source=None):\n    \n    head_source = (head_source or get_head_source(head))\n    update_source = get_acquisition_source(update)\n\n    if not is_arxiv_and_publisher(head_source, update_source) and is_manual_merge(head, update):\n        return ManualMergeOperations\n\n    if head_source == 'arxiv':\n        if update_source == 'arxiv':\n            return ArxivOnArxivOperations\n        else:\n            return PublisherOnArxivOperations\n    else:\n        if update_source == 'arxiv':\n            return ArxivOnPublisherOperations\n        else:\n            return PublisherOnPublisherOperations", "docstring": "This function return the right configuration for the inspire_merge\nfunction in according to the given sources. Both parameters can not be None.\n\nParams:\nhead(dict): the HEAD record\nupdate(dict): the UPDATE record\nhead_source(string): the source of the HEAD record\n\nReturns:\nMergerConfigurationOperations: an object containing\nthe rules needed to merge HEAD and UPDATE", "source": "juraj-google-style"}
{"code": "def to_string(self):\n        \n        def filt(x):\n            return '+'+x[0] in PROJ4_PARAMS.keys() and x[1] is not False\n\n        items = []\n        for k, v in sorted(filter(filt, self.items())):\n            items.append(\n                \"+\" + \"=\".join(\n                    map(str, filter(\n                        lambda y: (y or y == 0) and y is not True, (k, v)))))\n        return \" \".join(items)", "docstring": "Turn a CRS dict into a PROJ.4 string. Mapping keys are tested against\n``all_proj_keys`` list. Values of ``True`` are omitted, leaving the key\nbare: {'no_defs': True} -> \"+no_defs\" and items where the value is\notherwise not a str, int, or float are omitted.\n\nArgs:\ncrs: A CRS dict as used in Location.\n\nReturns:\nstr. The string representation.", "source": "juraj-google-style"}
{"code": "def dump_table_as_insert_sql(engine: Engine,\n                             table_name: str,\n                             fileobj: TextIO,\n                             wheredict: Dict[str, Any] = None,\n                             include_ddl: bool = False,\n                             multirow: bool = False) -> None:\n    \n    \n    \n    \n    \n    log.info(\"dump_data_as_insert_sql: table_name={}\", table_name)\n    writelines_nl(fileobj, [\n        SEP1,\n        sql_comment(\"Data for table: {}\".format(table_name)),\n        SEP2,\n        sql_comment(\"Filters: {}\".format(wheredict)),\n    ])\n    dialect = engine.dialect\n    if not dialect.supports_multivalues_insert:\n        multirow = False\n    if multirow:\n        log.warning(\"dump_data_as_insert_sql: multirow parameter substitution \"\n                    \"not working yet\")\n        multirow = False\n\n    \n\n    meta = MetaData(bind=engine)\n    log.debug(\"... retrieving schema\")\n    table = Table(table_name, meta, autoload=True)\n    if include_ddl:\n        log.debug(\"... producing DDL\")\n        dump_ddl(table.metadata, dialect_name=engine.dialect.name,\n                 fileobj=fileobj)\n    \n    \n    log.debug(\"... fetching records\")\n    \n    \n    \n    \n    query = select(table.columns)\n    if wheredict:\n        for k, v in wheredict.items():\n            col = table.columns.get(k)\n            query = query.where(col == v)\n    \n    cursor = engine.execute(query)\n    if multirow:\n        row_dict_list = []\n        for r in cursor:\n            row_dict_list.append(dict(r))\n        \n        if row_dict_list:\n            statement = table.insert().values(row_dict_list)\n            \n            \n            insert_str = get_literal_query(statement, bind=engine)\n            \n            writeline_nl(fileobj, insert_str)\n        else:\n            writeline_nl(fileobj, sql_comment(\"No data!\"))\n    else:\n        found_one = False\n        for r in cursor:\n            found_one = True\n            row_dict = dict(r)\n            statement = table.insert(values=row_dict)\n            \n            insert_str = get_literal_query(statement, bind=engine)\n            \n            \n            writeline_nl(fileobj, insert_str)\n        if not found_one:\n            writeline_nl(fileobj, sql_comment(\"No data!\"))\n    writeline_nl(fileobj, SEP2)\n    log.debug(\"... done\")", "docstring": "Reads a table from the database, and writes SQL to replicate the table's\ndata to the output ``fileobj``.\n\nArgs:\nengine: SQLAlchemy :class:`Engine`\ntable_name: name of the table\nfileobj: file-like object to write to\nwheredict: optional dictionary of ``{column_name: value}`` to use as\n``WHERE`` filters\ninclude_ddl: if ``True``, include the DDL to create the table as well\nmultirow: write multi-row ``INSERT`` statements", "source": "juraj-google-style"}
{"code": "def recipe_trends_places_to_bigquery_via_query(config, auth_write, secret, key, places_dataset, places_query, places_legacy, destination_dataset, destination_table):\n    twitter(config, {'auth': auth_write, 'secret': secret, 'key': key, 'trends': {'places': {'single_cell': True, 'bigquery': {'dataset': places_dataset, 'query': places_query, 'legacy': places_legacy}}}, 'out': {'bigquery': {'dataset': destination_dataset, 'table': destination_table}}})", "docstring": "Move using a WOEID query.\n\nArgs:\nauth_write (authentication) - Credentials used for writing data.\nsecret (string) - NA\nkey (string) - NA\nplaces_dataset (string) - NA\nplaces_query (string) - NA\nplaces_legacy (boolean) - NA\ndestination_dataset (string) - NA\ndestination_table (string) - NA", "source": "github-repos"}
{"code": "def index_sample(self, md5, index_name):\n        \n        generator = self.stream_sample(md5)\n        for row in generator:\n            self.indexer.index_data(row, index_name)", "docstring": "Index a stored sample with the Indexer.\nArgs:\nmd5: the md5 of the sample\nindex_name: the name of the index\nReturns:\nNothing", "source": "juraj-google-style"}
{"code": "def easeInOutElastic(n, amplitude=1, period=0.5):\n    _checkRange(n)\n    n *= 2\n    if (n < 1):\n        return (easeInElastic(n, amplitude=amplitude, period=period) / 2)\n    else:\n        return ((easeOutElastic((n - 1), amplitude=amplitude, period=period) / 2) + 0.5)", "docstring": "An elastic tween function wobbles towards the midpoint.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "codesearchnet"}
{"code": "def _get_weights(max_length):\n  \n  weights = [1]\n  for i in range(1, max_length):\n    weights.append(weights[i-1] * len(_ALPHABET) + 1)\n  weights.reverse()\n  return weights", "docstring": "Get weights for each offset in str of certain max length.\n\nArgs:\nmax_length: max length of the strings.\n\nReturns:\nA list of ints as weights.\n\nExample:\nIf max_length is 2 and alphabet is \"ab\", then we have order \"\", \"a\", \"aa\",\n\"ab\", \"b\", \"ba\", \"bb\". So the weight for the first char is 3.", "source": "juraj-google-style"}
{"code": "async def delete(\n        self, name: str, *, force: bool = False, noprune: bool = False\n    ) -> List:\n        \n        params = {\"force\": force, \"noprune\": noprune}\n        response = await self.docker._query_json(\n            \"images/{name}\".format(name=name), \"DELETE\", params=params\n        )\n        return response", "docstring": "Remove an image along with any untagged parent\nimages that were referenced by that image\n\nArgs:\nname: name/id of the image to delete\nforce: remove the image even if it is being used\nby stopped containers or has other tags\nnoprune: don't delete untagged parent images\n\nReturns:\nList of deleted images", "source": "juraj-google-style"}
{"code": "def scrape(text, ptype=None):\n    for (ruletype, rule, info) in scrape_types:\n        if (ptype and (ptype != ruletype)):\n            continue\n        regx = regexes.get(ruletype)\n        for valu in regx.findall(text):\n            (yield (ruletype, valu))", "docstring": "Scrape types from a blob of text and return node tuples.\n\nArgs:\ntext (str): Text to scrape.\nptype (str): Optional ptype to scrape. If present, only scrape rules which match the provided type.\n\nReturns:\n(str, str): Yield tuples of type, valu strings.", "source": "codesearchnet"}
{"code": "def adjust_target_dtype(self, torch_dtype: 'torch.dtype') -> 'torch.dtype':\n    return torch_dtype", "docstring": "Override this method if you want to adjust the `target_dtype` variable used in `from_pretrained`\nto compute the device_map in case the device_map is a `str`. E.g. for bitsandbytes we force-set `target_dtype`\nto `torch.int8` and for 4-bit we pass a custom enum `accelerate.CustomDtype.int4`.\n\nArgs:\ntorch_dtype (`torch.dtype`, *optional*):\nThe torch_dtype that is used to compute the device_map.", "source": "github-repos"}
{"code": "def get_oneformer_resize_output_image_size(image: np.ndarray, size: Union[int, Tuple[int, int], List[int], Tuple[int]], max_size: Optional[int]=None, default_to_square: bool=True, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> tuple:\n    output_size = get_resize_output_image_size(input_image=image, size=size, default_to_square=default_to_square, max_size=max_size, input_data_format=input_data_format)\n    return output_size", "docstring": "Computes the output size given the desired size.\n\nArgs:\nimage (`np.ndarray`):\nThe input image.\nsize (`int` or `Tuple[int, int]` or `List[int]` or `Tuple[int]`):\nThe size of the output image.\nmax_size (`int`, *optional*):\nThe maximum size of the output image.\ndefault_to_square (`bool`, *optional*, defaults to `True`):\nWhether to default to square if no size is provided.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If unset, will use the inferred format from the input.\n\nReturns:\n`Tuple[int, int]`: The output size.", "source": "github-repos"}
{"code": "def exec_iteration(self, counter, context, step_method):\n    logger.debug('starting')\n    context['whileCounter'] = counter\n    logger.info(f'while: running step with counter {counter}')\n    step_method(context)\n    logger.debug(f'while: done step {counter}')\n    result = False\n    if self.stop:\n        result = context.get_formatted_as_type(self.stop, out_type=bool)\n    logger.debug('done')\n    return result", "docstring": "Run a single loop iteration.\n\nThis method abides by the signature invoked by poll.while_until_true,\nwhich is to say (counter, *args, **kwargs). In a normal execution\nchain, this method's args passed by self.while_loop where context\nand step_method set. while_until_true injects counter as a 1st arg.\n\nArgs:\ncounter. int. loop counter, which number of iteration is this.\ncontext: (pypyr.context.Context) The pypyr context. This arg will\nmutate - after method execution will contain the new\nupdated context.\nstep_method: (method/function) This is the method/function that\nwill execute on every loop iteration. Signature is:\nfunction(context)\n\nReturns:\nbool. True if self.stop evaluates to True after step execution,\nFalse otherwise.", "source": "codesearchnet"}
{"code": "def top_stories(self, raw=False, limit=None):\n    top_stories = self._get_stories('topstories', limit)\n    if raw:\n        top_stories = [story.raw for story in top_stories]\n    return top_stories", "docstring": "Returns list of item ids of current top stories\n\nArgs:\nlimit (int): specifies the number of stories to be returned.\nraw (bool): Flag to indicate whether to represent all\nobjects in raw json.\n\nReturns:\n`list` object containing ids of top stories.", "source": "codesearchnet"}
{"code": "def import_laid_out_tensor(mesh, laid_out_tensor, shape, name=None):\n  \n  return ImportLaidOutTensorOperation(\n      mesh, laid_out_tensor, convert_to_shape(shape), name=name).outputs[0]", "docstring": "Import a laid_out_tensor.\n\nFor expert users.\nThe input must be laid out appropriately given the eventual MeshImpl,\nand layout.\n\nArgs:\nmesh: a Mesh\nlaid_out_tensor: a LaidOutTensor\nshape: a mtf.Shape\nname: an optional string\n\nReturns:\na mtf.Tensor", "source": "juraj-google-style"}
{"code": "def validate(self):\n    if (self.value is not None):\n        if (type(self.value) not in six.integer_types):\n            raise TypeError('expected (one of): {0}, observed: {1}'.format(six.integer_types, type(self.value)))\n        elif (self.value > Interval.MAX):\n            raise ValueError('interval value greater than accepted max')\n        elif (self.value < Interval.MIN):\n            raise ValueError('interval value less than accepted min')", "docstring": "Verify that the value of the Interval is valid.\n\nRaises:\nTypeError: if the value is not of type int or long\nValueError: if the value cannot be represented by an unsigned\n32-bit integer", "source": "codesearchnet"}
{"code": "def compute_area_key(features, max_area_width, max_area_height=1, height=1, mode='mean', training=True, name=None):\n    tf.logging.info('area_attention mode=%s', mode)\n    (area_mean, area_std, _, area_heights, area_widths) = compute_area_features(features, max_area_width=max_area_width, max_area_height=max_area_height, height=height)\n    if (mode == 'mean'):\n        return area_mean\n    elif (mode == 'max'):\n        (area_max, _, _) = basic_pool(features, max_area_width=max_area_width, max_area_height=max_area_height, height=height)\n        return area_max\n    elif (mode == 'sample'):\n        if training:\n            area_mean += (area_std * tf.random_normal(tf.shape(area_std)))\n        return area_mean\n    with tf.variable_scope(name, default_name='combine_area_features', values=[area_mean, area_std, area_heights, area_widths]):\n        depth = common_layers.shape_list(area_mean)[(- 1)]\n        height_embed = tf.nn.embedding_lookup(params=tf.get_variable('area_height_emb', [max_area_height, (depth \n        width_embed = tf.nn.embedding_lookup(params=tf.get_variable('area_width_emb', [max_area_width, (depth \n        size_embed = tf.concat([height_embed, width_embed], (- 1))\n        if (mode == 'concat'):\n            feature_concat = tf.concat([area_mean, area_std, size_embed], (- 1))\n        elif (mode == 'max_concat'):\n            (area_max, _, _) = basic_pool(features, max_area_width=max_area_width, max_area_height=max_area_height, height=height)\n            feature_concat = tf.concat([area_max, size_embed], (- 1))\n        elif (mode == 'sum'):\n            feature_concat = ((size_embed + area_mean) + area_std)\n        elif (mode == 'sample_concat'):\n            if training:\n                area_mean += (area_std * tf.random_normal(tf.shape(area_std)))\n            feature_concat = tf.concat([area_mean, size_embed], (- 1))\n        elif (mode == 'sample_sum'):\n            if training:\n                area_mean += (area_std * tf.random_normal(tf.shape(area_std)))\n            feature_concat = (area_mean + size_embed)\n        else:\n            raise ValueError(('Unsupported area key mode=%s' % mode))\n        feature_hidden = tf.layers.dense(inputs=feature_concat, units=depth, activation=tf.nn.relu)\n        area_key = tf.layers.dense(feature_hidden, units=depth)\n        return area_key", "docstring": "Computes the key for each area.\n\nArgs:\nfeatures: a Tensor in a shape of [batch_size, height * width, depth].\nmax_area_width: the max width allowed for an area.\nmax_area_height: the max height allowed for an area.\nheight: the height of the image.\nmode: whether to combine different area features or only use\nthe vector mean of each area, which can be \"mean\", \"concat\", \"sum\",\n\"sample_concat\", and \"sample_sum\".\ntraining: indicating if it is in the training mode.\nname: the name for setting the variable scope.\nReturns:\narea_key: a Tensor in the shape of [batch_size, num_areas, depth]", "source": "codesearchnet"}
{"code": "def del_method(self, m):\n    if (isinstance(m, types.FunctionType) and (not iscoroutinefunction(m))):\n        wrkey = ('function', id(m))\n    else:\n        (f, obj) = get_method_vars(m)\n        wrkey = (f, id(obj))\n    if (wrkey in self):\n        del self[wrkey]", "docstring": "Remove an instance method or function if it exists\n\nArgs:\nm: The instance method or function to remove", "source": "codesearchnet"}
{"code": "def to_dict(self) -> dict[str, Any]:\n    output = copy.deepcopy(self.__dict__)\n    if hasattr(self.__class__, 'model_type'):\n        output['model_type'] = self.__class__.model_type\n    output['transformers_version'] = __version__\n    for key, value in output.items():\n        if isinstance(value, PretrainedConfig):\n            value = value.to_dict()\n            del value['transformers_version']\n        output[key] = value\n    self._remove_keys_not_serialized(output)\n    if hasattr(self, 'quantization_config'):\n        output['quantization_config'] = self.quantization_config.to_dict() if not isinstance(self.quantization_config, dict) else self.quantization_config\n    self.dict_torch_dtype_to_str(output)\n    return output", "docstring": "Serializes this instance to a Python dictionary.\n\nReturns:\n`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.", "source": "github-repos"}
{"code": "class TFDebertaV2XSoftmax(keras.layers.Layer):\n\n    def __init__(self, axis=-1, **kwargs):\n        super().__init__(**kwargs)\n        self.axis = axis\n\n    def call(self, inputs: tf.Tensor, mask: tf.Tensor):\n        rmask = tf.logical_not(tf.cast(mask, tf.bool))\n        output = tf.where(rmask, tf.cast(float('-inf'), dtype=self.compute_dtype), inputs)\n        output = stable_softmax(tf.cast(output, dtype=tf.float32), self.axis)\n        output = tf.where(rmask, 0.0, output)\n        return output", "docstring": "Masked Softmax which is optimized for saving memory\n\nArgs:\ninput (`tf.Tensor`): The input tensor that will apply softmax.\nmask (`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.\ndim (int): The dimension that will apply softmax", "source": "github-repos"}
{"code": "def _build_nccl_hybrid(input_tensors, red_op, upper_level_f):\n    input_tensors, shape = _flatten_tensors(input_tensors)\n    devices = [t.device for t in input_tensors]\n    per_worker_devices, per_worker_values = _split_by_task(devices, input_tensors)\n    num_workers = len(per_worker_devices)\n    up_values = [None for w in range(0, num_workers)]\n    up_devices = up_values[:]\n    down_values = up_values[:]\n    for w in range(0, num_workers):\n        worker_values = build_nccl_all_reduce(per_worker_values[w], red_op)\n        with ops.control_dependencies(worker_values):\n            with ops.device(worker_values[0].device):\n                up_values[w] = array_ops.identity(worker_values[0])\n            up_devices[w] = per_worker_devices[w][0]\n    level_2_output = upper_level_f(up_values)\n    for w in range(0, num_workers):\n        dst_tensors = []\n        with ops.device(per_worker_devices[w][0]):\n            broadcast_src = nccl_ops.broadcast(array_ops.identity(level_2_output[w]))\n        for d in per_worker_devices[w]:\n            with ops.device(d):\n                dst_tensors.append(array_ops.identity(broadcast_src))\n        down_values[w] = dst_tensors\n    output_tensors = [v for sublist in down_values for v in sublist]\n    if len(shape) != 1:\n        output_tensors = _reshape_tensors(output_tensors, shape)\n    return output_tensors", "docstring": "Construct a subgraph for NCCL hybrid all-reduce.\n\nArgs:\ninput_tensors: list of `tf.Tensor` of same-shape and type values to\nbe reduced.\nred_op: binary elementwise reduction operator.\nupper_level_f: function for reducing one value per worker, across\nworkers.\n\nReturns:\nlist of `tf.Tensor` of reduced values.\n\nRaises:\nValueError: inputs not well-formed.", "source": "github-repos"}
{"code": "def _unary_assert_doc(sym, sym_name):\n\n    def _decorator(func):\n        \n        opname = func.__name__\n        cap_sym_name = sym_name.capitalize()\n        func.__doc__ = '\\n    Assert the condition `x {sym}` holds element-wise.\\n\\n    When running in graph mode, you should add a dependency on this operation\\n    to ensure that it runs. Example of adding a dependency to an operation:\\n\\n    ```python\\n    with tf.control_dependencies([tf.debugging.{opname}(x, y)]):\\n      output = tf.reduce_sum(x)\\n    ```\\n\\n    {sym_name} means, for every element `x[i]` of `x`, we have `x[i] {sym}`.\\n    If `x` is empty this is trivially satisfied.\\n\\n    Args:\\n      x:  Numeric `Tensor`.\\n      data:  The tensors to print out if the condition is False.  Defaults to\\n        error message and first few entries of `x`.\\n      summarize: Print this many entries of each tensor.\\n      message: A string to prefix to the default message.\\n      name: A name for this operation (optional).  Defaults to \"{opname}\".\\n\\n    Returns:\\n      Op that raises `InvalidArgumentError` if `x {sym}` is False.\\n      @compatibility(eager)\\n        returns None\\n      @end_compatibility\\n\\n    Raises:\\n      InvalidArgumentError: if the check can be performed immediately and\\n        `x {sym}` is False. The check can be performed immediately during\\n        eager execution or if `x` is statically known.\\n    '.format(sym=sym, sym_name=cap_sym_name, opname=opname)\n        return func\n    return _decorator", "docstring": "Common docstring for assert_* ops that evaluate a unary predicate over every element of a tensor.\n\nArgs:\nsym: Mathematical symbol for the check performed on each element, i.e. \"> 0\"\nsym_name: English-language name for the op described by sym\n\nReturns:\nDecorator that adds the appropriate docstring to the function for symbol\n`sym`.", "source": "github-repos"}
{"code": "def get_tensor_mtf_dimension_names(self, tensor_name):\n    tensor = self._name_to_tensor(tensor_name)\n    if isinstance(tensor, mtf.Tensor):\n        return tensor.shape.dimension_names\n    else:\n        return []", "docstring": "The Mesh TensorFlow dimensions associated with a tensor.\n\nArgs:\ntensor_name: a string, name of a tensor in the graph.\n\nReturns:\na [string], the names of Mesh TensorFlow dimensions.", "source": "codesearchnet"}
{"code": "def register_peer(self, connection_id, endpoint):\n    with self._lock:\n        if (len(self._peers) < self._maximum_peer_connectivity):\n            self._peers[connection_id] = endpoint\n            self._topology.set_connection_status(connection_id, PeerStatus.PEER)\n            LOGGER.debug('Added connection_id %s with endpoint %s, connected identities are now %s', connection_id, endpoint, self._peers)\n        else:\n            raise PeeringException('At maximum configured number of peers: {} Rejecting peering request from {}.'.format(self._maximum_peer_connectivity, endpoint))\n    public_key = self.peer_to_public_key(connection_id)\n    if public_key:\n        self._consensus_notifier.notify_peer_connected(public_key)", "docstring": "Registers a connected connection_id.\n\nArgs:\nconnection_id (str): A unique identifier which identifies an\nconnection on the network server socket.\nendpoint (str): The publically reachable endpoint of the new\npeer", "source": "codesearchnet"}
{"code": "def __init__(self, pfor: 'PFor', op: ops.Operation, inputs):\n    self.pfor = pfor\n    self._op = op\n    self._inputs = inputs", "docstring": "Creates a _PforInput object.\n\nArgs:\npfor: PFor converter object.\nop: the Operation object that is being converted.\ninputs: list of WrappedTensor objects representing converted values of the\ninputs of `op`.", "source": "github-repos"}
{"code": "def search_messages(self, *, query: str, **kwargs) -> SlackResponse:\n    self._validate_xoxp_token()\n    kwargs.update({'query': query})\n    return self.api_call('search.messages', http_verb='GET', params=kwargs)", "docstring": "Searches for messages matching a query.\n\nArgs:\nquery (str): Search query. May contains booleans, etc.\ne.g. 'pickleface'", "source": "codesearchnet"}
{"code": "def construct_end_message(self):\n    app_count = self.dfk.task_count\n    site_count = len([x for x in self.dfk.config.executors if x.managed])\n    app_fails = len([t for t in self.dfk.tasks if (self.dfk.tasks[t]['status'] in FINAL_FAILURE_STATES)])\n    message = {'uuid': self.uuid, 'end': time.time(), 't_apps': app_count, 'sites': site_count, 'c_time': None, 'failed': app_fails, 'test': self.test_mode}\n    return json.dumps(message)", "docstring": "Collect the final run information at the time of DFK cleanup.\n\nReturns:\n- Message dict dumped as json string, ready for UDP", "source": "codesearchnet"}
{"code": "def bounded_trie(namespace: Union[Type, str], name: str) -> 'Metrics.DelegatingBoundedTrie':\n    namespace = Metrics.get_namespace(namespace)\n    return Metrics.DelegatingBoundedTrie(MetricName(namespace, name))", "docstring": "Obtains or creates a Bounded Trie metric.\n\nArgs:\nnamespace: A class or string that gives the namespace to a metric\nname: A string that gives a unique name to a metric\n\nReturns:\nA BoundedTrie object.", "source": "github-repos"}
{"code": "def recode_sam_reads(sam_fn, fastq_rnf_fo, fai_fo, genome_id, number_of_read_tuples=(10 ** 9), simulator_name=None, allow_unmapped=False):\n    fai_index = rnftools.utils.FaIdx(fai_fo)\n    read_tuple_id_width = len(format(number_of_read_tuples, 'x'))\n    fq_creator = rnftools.rnfformat.FqCreator(fastq_fo=fastq_rnf_fo, read_tuple_id_width=read_tuple_id_width, genome_id_width=2, chr_id_width=fai_index.chr_id_width, coor_width=fai_index.coor_width, info_reads_in_tuple=True, info_simulator=simulator_name)\n    cigar_reg_shift = re.compile('([0-9]+)([MDNP=X])')\n    reverse_complement_dict = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C', 'N': 'N'}\n    read_tuple_id = 0\n    last_read_tuple_name = None\n    with pysam.AlignmentFile(sam_fn, check_header=False) as samfile:\n        for alignment in samfile:\n            if ((alignment.query_name != last_read_tuple_name) and (last_read_tuple_name is not None)):\n                read_tuple_id += 1\n            last_read_tuple_name = alignment.query_name\n            if alignment.is_unmapped:\n                rnftools.utils.error(\"SAM files used for conversion should not contain unaligned segments. This condition is broken by read tuple '{}' in file '{}'.\".format(alignment.query_name, sam_fn), program='RNFtools', subprogram='MIShmash', exception=NotImplementedError)\n            if alignment.is_reverse:\n                direction = 'R'\n                bases = ''.join([reverse_complement_dict[nucl] for nucl in alignment.seq[::(- 1)]])\n                qualities = str(alignment.qual[::(- 1)])\n            else:\n                direction = 'F'\n                bases = alignment.seq[:]\n                qualities = str(alignment.qual[:])\n            if (fai_index.dict_chr_ids != {}):\n                chr_id = fai_index.dict_chr_ids[samfile.getrname(alignment.reference_id)]\n            else:\n                chr_id = '0'\n            left = (int(alignment.reference_start) + 1)\n            right = (left - 1)\n            for (steps, operation) in cigar_reg_shift.findall(alignment.cigarstring):\n                right += int(steps)\n            segment = rnftools.rnfformat.Segment(genome_id=genome_id, chr_id=chr_id, direction=direction, left=left, right=right)\n            fq_creator.add_read(read_tuple_id=read_tuple_id, bases=bases, qualities=qualities, segments=[segment])\n    fq_creator.flush_read_tuple()", "docstring": "Transform a SAM file to RNF-compatible FASTQ.\n\nArgs:\nsam_fn (str): SAM/BAM file - file name.\nfastq_rnf_fo (str): Output FASTQ file - file object.\nfai_fo (str): FAI index of the reference genome - file object.\ngenome_id (int): Genome ID for RNF.\nnumber_of_read_tuples (int): Expected number of read tuples (to set width of read tuple id).\nsimulator_name (str): Name of the simulator. Used for comment in read tuple name.\nallow_unmapped (bool): Allow unmapped reads.\n\nRaises:\nNotImplementedError", "source": "codesearchnet"}
{"code": "def name_scope(name):\n    return ops.name_scope_v2(name)", "docstring": "A context manager for use when defining a Python op.\n\nThis context manager pushes a name scope, which will make the name of all\noperations added within it have a prefix.\n\nFor example, to define a new Python op called `my_op`:\n\n\ndef my_op(a):\nwith tf.name_scope(\"MyOp\") as scope:\na = tf.convert_to_tensor(a, name=\"a\")\n# Define some computation that uses `a`.\nreturn foo_op(..., name=scope)\n\n\nWhen executed, the Tensor `a` will have the name `MyOp/a`.\n\nArgs:\nname: The prefix to use on all names created within the name scope.\n\nReturns:\nName scope context manager.", "source": "github-repos"}
{"code": "def apply_step(self, variables, deltas):\n        \n        if len(variables) != len(deltas):\n            raise TensorForceError(\"Invalid variables and deltas lists.\")\n        return tf.group(\n            *(tf.assign_add(ref=variable, value=delta) for variable, delta in zip(variables, deltas))\n        )", "docstring": "Applies the given (and already calculated) step deltas to the variable values.\n\nArgs:\nvariables: List of variables.\ndeltas: List of deltas of same length.\n\nReturns:\nThe step-applied operation. A tf.group of tf.assign_add ops.", "source": "juraj-google-style"}
{"code": "def cancelHistoricalData(self, bars: BarDataList):\n        \n        self.client.cancelHistoricalData(bars.reqId)\n        self.wrapper.endSubscription(bars)", "docstring": "Cancel the update subscription for the historical bars.\n\nArgs:\nbars: The bar list that was obtained from ``reqHistoricalData``\nwith a keepUpToDate subscription.", "source": "juraj-google-style"}
{"code": "def find_all_hinted_output_nodes(session=None, graph_def=None):\n    if session is not None and graph_def is not None:\n        raise ValueError('Provide only one of session and graph_def.')\n    hinted_outputs_nodes = []\n    if session is not None:\n        hints = _find_all_hints_in_nodes(session.graph_def.node)\n    elif graph_def is not None:\n        hints = _find_all_hints_in_nodes(graph_def.node)\n    for hint in hints.values():\n        _, output_nodes = hint.flattened_inputs_and_outputs()\n        hinted_outputs_nodes.extend(output_nodes)\n    return hinted_outputs_nodes", "docstring": "Find all Ophints output nodes in the graph.\n\nThis is used to get all the output nodes those are ophinted, it is important\nfor operation like convert_variables_to_constants keep all ophints structure.\nNote: only one of session or graph_def should be used, not both.\nWhy this can be useful? Some TensorFlow ops (e.g. bidirectional rnn), can\ngenerate multiple outputs for unfused subgraph. If not all output nodes are\nconsumed, graph optimization can potentially drop the unused nodes and cause\nophints in an invalid states (due to missing ophinted output nodes). So it's\nimportant for us to find all those hinted output nodes and make sure they're\nnot discarded away.\n\nArgs:\nsession: A TensorFlow session that contains the graph to convert.\ngraph_def: A graph def that we should convert.\n\nReturns:\nA list of OpHints output nodes.\nRaises:\nValueError: If both session and graph_def are provided.", "source": "github-repos"}
{"code": "def _ParseRegisteredDLLs(self, parser_mediator, registry_key):\n    notify_key = registry_key.GetSubkeyByName('Notify')\n    if (not notify_key):\n        return\n    for subkey in notify_key.GetSubkeys():\n        for trigger in self._TRIGGERS:\n            handler_value = subkey.GetValueByName(trigger)\n            if (not handler_value):\n                continue\n            values_dict = {'Application': subkey.name, 'Handler': handler_value.GetDataAsObject(), 'Trigger': trigger}\n            command_value = subkey.GetValueByName('DllName')\n            if command_value:\n                values_dict['Command'] = command_value.GetDataAsObject()\n            event_data = windows_events.WindowsRegistryEventData()\n            event_data.key_path = subkey.path\n            event_data.offset = subkey.offset\n            event_data.regvalue = values_dict\n            event_data.source_append = ': Winlogon'\n            event = time_events.DateTimeValuesEvent(subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n            parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses the registered DLLs that receive event notifications.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "codesearchnet"}
{"code": "def alt40fms(msg):\n    d = hex2bin(data(msg))\n    if (d[13] == '0'):\n        return None\n    alt = (bin2int(d[14:26]) * 16)\n    return alt", "docstring": "Selected altitude, FMS\n\nArgs:\nmsg (String): 28 bytes hexadecimal message (BDS40) string\n\nReturns:\nint: altitude in feet", "source": "codesearchnet"}
{"code": "def optimize(self, init_method='default', inference=None, n_times=10, perturb=False, pertSize=0.001, verbose=None):\n    assert (init_method in ['default', 'random', None]), 'VarianceDecomposition: specified init_method not valid'\n    if (init_method == 'default'):\n        self._init_params_default()\n    if ((init_method is not 'random') and (not perturb)):\n        n_times = 1\n    if (inference is None):\n        inference = self._det_inference()\n    else:\n        self._check_inference(inference)\n    self._inference = inference\n    if (self.gp is None):\n        self._initGP()\n    params0 = self.gp.getParams()\n    for i in range(n_times):\n        if (init_method == 'random'):\n            params = {'covar': sp.randn(params0['covar'].shape[0])}\n            self.gp.setParams(params)\n        elif perturb:\n            params = {'covar': (params0['covar'] + (pertSize * sp.randn(params0['covar'].shape[0])))}\n            self.gp.setParams(params)\n        (conv, info) = self.gp.optimize()\n        if conv:\n            break\n    if verbose:\n        if (conv == False):\n            print('No local minimum found for the tested initialization points')\n        else:\n            print(('Local minimum found at iteration %d' % i))\n    return conv", "docstring": "Train the model using the specified initialization strategy\n\nArgs:\ninit_method:    initialization strategy:\n'default': variance is equally split across the different random effect terms. For mulit-trait models the empirical covariance between traits is used\n'random': variance component parameters (scales) are sampled from a normal distribution with mean 0 and std 1,\nNone: no initialization is considered. Initial parameters can be specifies by using the single covariance getTraitCovarfun()\ninference:      inference gp method, by default algebrically efficient inference (i.e., gp2kronSum, gp2KronSumLR, gp3KronSumLR) will be used when possible.\nFor models with high a standard inference scheme (gp_base) will be used.\nn_times:        number of restarts to converge\nperturb:        if true, the initial point (if random initializaiton is not being used) is perturbed with gaussian noise for each restart (default, False)\nperturbSize:    std of the gassian noise used to perturb the initial point\nverbose:        print if convergence is achieved and how many restarted were needed", "source": "codesearchnet"}
{"code": "def docker_list(registry_pass):\n    \n    \n    registry = conf.get('docker.registry', None)\n\n    if registry is None:\n        log.err(\"You must define docker.registry conf variable to list images\")\n        sys.exit(-1)\n\n    registry_user = conf.get('docker.registry_user', None)\n\n    if registry_user is None:\n        registry_user = click.prompt(\"Username\")\n\n    rc = client.RegistryClient(registry, registry_user, registry_pass)\n    images = {x: rc.list_tags(x) for x in rc.list_images()}\n\n    shell.cprint(\"<32>Images in <34>{} <32>registry:\", registry)\n    for image, tags in images.items():\n        shell.cprint('  <92>{}', image)\n        for tag in tags:\n            shell.cprint('      <90>{}:<35>{}', image, tag)", "docstring": "List docker images stored in the remote registry.\n\nArgs:\nregistry_pass (str):\nRemote docker registry password.", "source": "juraj-google-style"}
{"code": "def DownloadFile(file_obj, target_path, buffer_size=BUFFER_SIZE):\n  \n  logging.info(u\"Downloading: %s to: %s\", file_obj.urn, target_path)\n\n  target_file = open(target_path, \"wb\")\n  file_obj.Seek(0)\n  count = 0\n\n  data_buffer = file_obj.Read(buffer_size)\n  while data_buffer:\n    target_file.write(data_buffer)\n    data_buffer = file_obj.Read(buffer_size)\n    count += 1\n    if not count % 3:\n      logging.debug(u\"Downloading: %s: %s done\", file_obj.urn,\n                    utils.FormatNumberAsString(count * buffer_size))\n  target_file.close()", "docstring": "Download an aff4 file to the local filesystem overwriting it if it exists.\n\nArgs:\nfile_obj: An aff4 object that supports the file interface (Read, Seek)\ntarget_path: Full path of file to write to.\nbuffer_size: Read in chunks this size.", "source": "juraj-google-style"}
{"code": "def should_generate_summaries():\n    name_scope = tf.contrib.framework.get_name_scope()\n    if (name_scope and ('while/' in name_scope)):\n        return False\n    if tf.get_variable_scope().reuse:\n        return False\n    return True", "docstring": "Is this an appropriate context to generate summaries.\n\nReturns:\na boolean", "source": "codesearchnet"}
{"code": "def _ifft(self, x):\n    x_complex = _to_complex(x)\n    return _IFFT_OP[self.block_depth](x_complex)", "docstring": "IFFT along the last self.block_depth dimensions of x.\n\nArgs:\nx: `Tensor` with floating or complex dtype.  Should be in the form\nreturned by self._vectorize_then_blockify.\n\nReturns:\n`Tensor` with `dtype` `complex64`.", "source": "github-repos"}
{"code": "async def retry_create_artifact(*args, **kwargs):\n    \n    await retry_async(\n        create_artifact,\n        retry_exceptions=(\n            ScriptWorkerRetryException,\n            aiohttp.ClientError\n        ),\n        args=args,\n        kwargs=kwargs\n    )", "docstring": "Retry create_artifact() calls.\n\nArgs:\n*args: the args to pass on to create_artifact\n**kwargs: the args to pass on to create_artifact", "source": "juraj-google-style"}
{"code": "def Read(f):\n  \n  try:\n    yaml_data = yaml.load(f)\n  except yaml.YAMLError as e:\n    raise ParseError('%s' % e)\n  except IOError as e:\n    raise YAMLLoadError('%s' % e)\n\n  _CheckData(yaml_data)\n\n  try:\n    return Config(\n        yaml_data.get('blacklist', ()),\n        yaml_data.get('whitelist', ('*')))\n  except UnicodeDecodeError as e:\n    raise YAMLLoadError('%s' % e)", "docstring": "Reads and returns Config data from a yaml file.\n\nArgs:\nf: Yaml file to parse.\n\nReturns:\nConfig object as defined in this file.\n\nRaises:\nError (some subclass): If there is a problem loading or parsing the file.", "source": "juraj-google-style"}
{"code": "def __init__(self, issuers_to_provider_ids, jwks_supplier, cache_capacity=200):\n        \n        self._issuers_to_provider_ids = issuers_to_provider_ids\n        self._jwks_supplier = jwks_supplier\n\n        arguments = {u\"capacity\": cache_capacity}\n        expiration_time = datetime.timedelta(minutes=5)\n        self._cache = cache.make_region().configure(u\"lru_cache\",\n                                                    arguments=arguments,\n                                                    expiration_time=expiration_time)", "docstring": "Construct an instance of AuthTokenDecoder.\n\nArgs:\nissuers_to_provider_ids: a dictionary mapping from issuers to provider\nIDs defined in the service configuration.\njwks_supplier: an instance of JwksSupplier that supplies JWKS based on\nissuer.\ncache_capacity: the cache_capacity with default value of 200.", "source": "juraj-google-style"}
{"code": "def list_insights_components(access_token, subscription_id, resource_group):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/microsoft.insights/', '/components?api-version=', INSIGHTS_COMPONENTS_API])\n    return do_get(endpoint, access_token)", "docstring": "List the Microsoft Insights components in a resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\n\nReturns:\nHTTP response. JSON body of components.", "source": "codesearchnet"}
{"code": "def get_dict_to_print(field_to_obs):\n  \n\n  def compressed_steps(steps):\n    return {'num_steps': len(set(steps)),\n            'min_step': min(steps),\n            'max_step': max(steps),\n            'last_step': steps[-1],\n            'first_step': steps[0],\n            'outoforder_steps': get_out_of_order(steps)}\n\n  def full_steps(steps):\n    return {'steps': steps, 'outoforder_steps': get_out_of_order(steps)}\n\n  output = {}\n  for field, observations in field_to_obs.items():\n    if not observations:\n      output[field] = None\n      continue\n\n    steps = [x['step'] for x in observations]\n    if field in SHORT_FIELDS:\n      output[field] = compressed_steps(steps)\n    if field in LONG_FIELDS:\n      output[field] = full_steps(steps)\n\n  return output", "docstring": "Transform the field-to-obs mapping into a printable dictionary.\n\nArgs:\nfield_to_obs: Dict that maps string field to `Observation` list.\n\nReturns:\nA dict with the keys and values to print to console.", "source": "juraj-google-style"}
{"code": "def get_metadata_as_dict(self, user_id=None, source=None):\n    if ((self.metadata is None) or (self.metadata == '')):\n        return {}\n    metadata_dict = (self.metadata if isinstance(self.metadata, dict) else json.loads(self.metadata))\n    metadata_keys = [m.lower() for m in metadata_dict]\n    if ((user_id is not None) and ('user_id' not in metadata_keys)):\n        metadata_dict['user_id'] = six.text_type(user_id)\n    if ((source is not None) and ('source' not in metadata_keys)):\n        metadata_dict['source'] = six.text_type(source)\n    return {k: six.text_type(v) for (k, v) in metadata_dict.items()}", "docstring": "Convert a metadata json string into a dictionary.\n\nArgs:\nuser_id (int): Optional: Insert user_id into the metadata if specified\nsource (string): Optional: Insert source (the name of the app typically) into the metadata if necessary.\n\nReturns:\ndict: THe metadata as a python dictionary", "source": "codesearchnet"}
{"code": "def do_get_next(endpoint, access_token):\n    headers = {'Authorization': ('Bearer ' + access_token)}\n    headers['User-Agent'] = get_user_agent()\n    looping = True\n    value_list = []\n    vm_dict = {}\n    while looping:\n        get_return = requests.get(endpoint, headers=headers).json()\n        if (not ('value' in get_return)):\n            return get_return\n        if (not ('nextLink' in get_return)):\n            looping = False\n        else:\n            endpoint = get_return['nextLink']\n        value_list += get_return['value']\n    vm_dict['value'] = value_list\n    return vm_dict", "docstring": "Do an HTTP GET request, follow the nextLink chain and return JSON.\n\nArgs:\nendpoint (str): Azure Resource Manager management endpoint.\naccess_token (str): A valid Azure authentication token.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def infeed_dequeue(dtype, shape, name=None):\n    if dtype not in _SUPPORTED_INFEED_DTYPES:\n        raise TypeError(\"Operation '{}' has type {} which is not a supported TPU infeed type. Supported types are: {}\".format(name, dtype, list(_SUPPORTED_INFEED_DTYPES)))\n    return gen_tpu_ops.infeed_dequeue(dtype, shape, name=name)", "docstring": "A placeholder op for a value that will be fed into the computation.\n\nArgs:\ndtype: A `tf.DType`. The type of elements in the tensor.\nshape: A `tf.TensorShape` or list of `ints`. The shape of the tensor.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` of type `dtype`.\nA tensor that will be provided using the infeed mechanism.\n\nRaises:\nTypeError: If 'dtype` is not a supported infeed type.", "source": "github-repos"}
{"code": "def dict_product(*d, **kwargs):\n    \n    d = dict(dict_merge(*d), **kwargs)\n    holdout = {k: d[k] for k in d if not isinstance(d[k], list)}\n    d = {k: d[k] for k in d if k not in holdout}\n\n    items = d.items()\n    if len(items) == 0:\n        dicts = [{}]\n    else:\n        keys, values = zip(*items)\n        dicts = [dict_filter_none(dict(zip(keys, v))) for v in product(*values)]\n\n    for d in dicts:\n        d.update(holdout)\n\n    return dicts", "docstring": "cartesian product of dict whose values are lists\nArgs:\nd: dictionary to take product of. multiple dictionaries will first\nbe merged by dict_merge\nkwargs: additional kwargs for convenience\nReturns:\na list of dictionaries with the same keys as d and kwargs", "source": "juraj-google-style"}
{"code": "def gradient_helper(optimizer, loss, var_list=None):\n    \n    if var_list is None:\n      var_list = tf.compat.v1.trainable_variables()\n\n    grads_and_vars = optimizer.compute_gradients(loss, var_list=var_list)\n    grads = [pair[0] for pair in grads_and_vars]\n\n    return grads, optimizer.apply_gradients(grads_and_vars)", "docstring": "A helper to get the gradients out at each step.\n\nArgs:\noptimizer: the optimizer op.\nloss: the op that computes your loss value.\n\nReturns: the gradient tensors and the train_step op.", "source": "juraj-google-style"}
{"code": "def gen_pypirc(username=None, password=None):\n    \n    \n    path = join(conf.getenv('HOME'), '.pypirc')\n    username = username or conf.getenv('PYPI_USER', None)\n    password = password or conf.getenv('PYPI_PASS', None)\n\n    if username is None or password is None:\n        log.err(\"You must provide $PYPI_USER and $PYPI_PASS\")\n        sys.exit(1)\n\n    log.info(\"Generating <94>{}\".format(path))\n\n    fs.write_file(path, util.remove_indent(.format(\n        username=username,\n        password=password\n    )))", "docstring": "Generate ~/.pypirc with the given credentials.\n\nUseful for CI builds. Can also get credentials through env variables\n``PYPI_USER`` and ``PYPI_PASS``.\n\nArgs:\nusername (str):\npypi username. If not given it will try to take it from the\n`` PYPI_USER`` env variable.\npassword (str):\npypi password. If not given it will try to take it from the\n`` PYPI_PASS`` env variable.", "source": "juraj-google-style"}
{"code": "def VerifyConfiguration(conf, nsswitch_filename=FILE_NSSWITCH):\n    warnings, errors = (0, 0)\n    if not conf.maps:\n        logging.error('No maps are configured.')\n        errors += 1\n    nsswitch = ParseNSSwitchConf(nsswitch_filename)\n    for configured_map in conf.maps:\n        if configured_map == 'sshkey':\n            continue\n        if conf.options[configured_map].cache['name'] == 'nssdb':\n            logging.error('nsscache no longer supports nssdb cache')\n            errors += 1\n        if conf.options[configured_map].cache['name'] == 'files':\n            nss_module_name = 'files'\n            if 'cache_filename_suffix' in conf.options[configured_map].cache and conf.options[configured_map].cache['cache_filename_suffix'] == 'cache':\n                nss_module_name = 'cache'\n        else:\n            nss_module_name = 'cache'\n        if nss_module_name not in nsswitch[configured_map]:\n            logging.warning('nsscache is configured to build maps for %r, but NSS is not configured (in %r) to use it', configured_map, nsswitch_filename)\n            warnings += 1\n    return (warnings, errors)", "docstring": "Verify that the system configuration matches the nsscache configuration.\n\nChecks that NSS configuration has the cache listed for each map that\nis configured in the nsscache configuration, i.e. that the system is\nconfigured to use the maps we are building.\n\nArgs:\nconf: a Configuration\nnsswitch_filename: optionally the name of the file to parse\nReturns:\n(warnings, errors) a tuple counting the number of warnings and\nerrors detected", "source": "github-repos"}
{"code": "def setMaxDemandPeriod(self, period, password='00000000'):\n    result = False\n    self.setContext('setMaxDemandPeriod')\n    try:\n        if ((period < 1) or (period > 3)):\n            self.writeCmdMsg('Correct parameter: 1 = 15 minute, 2 = 30 minute, 3 = hour')\n            self.setContext('')\n            return result\n        if (not self.request(False)):\n            self.writeCmdMsg('Bad read CRC on setting')\n        elif (not self.serialCmdPwdAuth(password)):\n            self.writeCmdMsg('Password failure')\n        else:\n            req_str = (('015731023030353028' + binascii.hexlify(str(period)).zfill(2)) + '2903')\n            req_str += self.calc_crc16(req_str[2:].decode('hex'))\n            self.m_serial_port.write(req_str.decode('hex'))\n            if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'):\n                self.writeCmdMsg('Success(setMaxDemandPeriod): 06 returned.')\n                result = True\n        self.serialPostEnd()\n    except:\n        ekm_log(traceback.format_exc(sys.exc_info()))\n    self.setContext('')\n    return result", "docstring": "Serial call to set max demand period.\n\nArgs:\nperiod (int): : as int.\npassword (str): Optional password.\n\nReturns:\nbool: True on completion with ACK.", "source": "codesearchnet"}
{"code": "def parse_content(self, content):\n    self.active_lines_unparsed = (get_active_lines(content) if (content is not None) else [])\n    self.active_settings = (split_kv_pairs(content, use_partition=False) if (content is not None) else [])", "docstring": "Main parsing class method which stores all interesting data from the content.\n\nArgs:\ncontent (context.content): Parser context content", "source": "codesearchnet"}
{"code": "def Add(self, category, label, age):\n    now = rdfvalue.RDFDatetime.Now()\n    category = utils.SmartUnicode(category)\n    for active_time in self.active_days:\n        self.categories[active_time].setdefault(label, {})\n        if ((now - age).seconds < (((active_time * 24) * 60) * 60)):\n            self.categories[active_time][label][category] = (self.categories[active_time][label].get(category, 0) + 1)", "docstring": "Adds another instance of this category into the active_days counter.\n\nWe automatically count the event towards all relevant active_days. For\nexample, if the category \"Windows\" was seen 8 days ago it will be counted\ntowards the 30 day active, 14 day active but not against the 7 and 1 day\nactives.\n\nArgs:\ncategory: The category name to account this instance against.\nlabel: Client label to which this should be applied.\nage: When this instance occurred.", "source": "codesearchnet"}
{"code": "def altitude(msg):\n    \n\n    tc = typecode(msg)\n\n    if tc<5 or tc==19 or tc>22:\n        raise RuntimeError(\"%s: Not a position message\" % msg)\n\n    if tc>=5 and tc<=8:\n        \n        return 0\n\n    msgbin = common.hex2bin(msg)\n    q = msgbin[47]\n    if q:\n        n = common.bin2int(msgbin[40:47]+msgbin[48:52])\n        alt = n * 25 - 1000\n        return alt\n    else:\n        return None", "docstring": "Decode aircraft altitude\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string\n\nReturns:\nint: altitude in feet", "source": "juraj-google-style"}
{"code": "def get(self, statediag, accepted=None):\n        \n        count = 0\n        statesmap = {}\n        newstatediag = {}\n        for state in statediag:\n\n            \n            if statediag[state].id not in statesmap:\n                statesmap[statediag[state].id] = count\n                mapped = count\n                count = count + 1\n            else:\n                mapped = statesmap[statediag[state].id]\n\n            \n\n            transitions = {}\n            for nextstate in statediag[state].trans:\n                if nextstate not in statesmap:\n                    statesmap[nextstate] = count\n                    transmapped = count\n                    count = count + 1\n                else:\n                    transmapped = statesmap[nextstate]\n                transitions[transmapped] = statediag[state].trans[nextstate]\n            newstate = PDAState()\n            newstate.id = mapped\n            newstate.type = statediag[state].type\n            newstate.sym = statediag[state].sym\n            newstate.trans = transitions\n            newstatediag[mapped] = newstate\n        newaccepted = None\n        if accepted is not None:\n            newaccepted = []\n            for accepted_state in accepted :\n                if (0, accepted_state) in statesmap:\n                    newaccepted.append(statesmap[(0, accepted_state)])\n        return newstatediag, count, newaccepted", "docstring": "Replaces complex state IDs as generated from the product operation,\ninto simple sequencial numbers. A dictionaty is maintained in order\nto map the existed IDs.\nArgs:\nstatediag (list): The states of the PDA\naccepted (list): the list of DFA accepted states\nReturns:\nlist:", "source": "juraj-google-style"}
{"code": "def add_observer(self, o, component_type=ComponentType):\n    self.observers[component_type].add(o)", "docstring": "Add a callback that will get invoked after each component is called.\n\nArgs:\no (func): the callback function\n\nKeyword Args:\ncomponent_type (ComponentType): the :class:`ComponentType` to observe.\nThe callback will fire any time an instance of the class or its\nsubclasses is invoked.\nThe callback should look like this:\n\n.. code-block:: python\n\ndef callback(comp, broker):\nvalue = broker.get(comp)\n# do something with value\npass", "source": "codesearchnet"}
{"code": "def to_unicode(self, s):\n        \n\n        \n        if isinstance(s, unicode):\n            return s\n        if isinstance(s, str):\n            return unicode(s, errors='ignore')\n\n        \n        return s", "docstring": "Convert an elementary datatype to unicode.\n\nArgs:\ns: the datatype to be unicoded.\n\nReturns:\nUnicoded data.", "source": "juraj-google-style"}
{"code": "def match(pattern, name):\n    try:\n        re_pat = _PATTERN_CACHE[(pattern, True)]\n    except KeyError:\n        res = (('(?ms)' + _translate(pattern)) + '\\\\Z')\n        _PATTERN_CACHE[(pattern, True)] = re_pat = re.compile(res)\n    return (re_pat.match(name) is not None)", "docstring": "Test whether a name matches a wildcard pattern.\n\nArguments:\npattern (str): A wildcard pattern, e.g. ``\"*.py\"``.\nname (str): A filename.\n\nReturns:\nbool: `True` if the filename matches the pattern.", "source": "codesearchnet"}
{"code": "def locale(self, value):\n        \n        if value == self._defaults['ai.device.locale'] and 'ai.device.locale' in self._values:\n            del self._values['ai.device.locale']\n        else:\n            self._values['ai.device.locale'] = value", "docstring": "The locale property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def get_tr(self, derivatives=False, **selectors):\n        \n        \n        selectors.update(suffix='bold', datatype='func')\n        scope = None if derivatives else 'raw'\n        images = self.get(extensions=['.nii', '.nii.gz'], scope=scope,\n                          **selectors)\n        if not images:\n            raise ValueError(\"No functional images that match criteria found.\")\n\n        all_trs = set()\n        for img in images:\n            md = self.get_metadata(img.path, suffix='bold', full_search=True)\n            all_trs.add(round(float(md['RepetitionTime']), 5))\n \n        if len(all_trs) > 1:\n            raise ValueError(\"Unique TR cannot be found given selectors {!r}\"\n                             .format(selectors))\n        return all_trs.pop()", "docstring": "Returns the scanning repetition time (TR) for one or more runs.\n\nArgs:\nderivatives (bool): If True, also checks derivatives images.\nselectors: Optional keywords used to constrain the selected runs.\nCan be any arguments valid for a .get call (e.g., BIDS entities\nor JSON sidecar keys).\n\nReturns: A single float.\n\nNotes: Raises an exception if more than one unique TR is found.", "source": "juraj-google-style"}
{"code": "def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, decoder_input_ids: np.ndarray | tf.Tensor | None=None, decoder_attention_mask: np.ndarray | tf.Tensor | None=None, decoder_position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, decoder_head_mask: np.ndarray | tf.Tensor | None=None, cross_attn_head_mask: np.ndarray | tf.Tensor | None=None, encoder_outputs: Optional[TFBaseModelOutput]=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSeq2SeqSequenceClassifierOutput, Tuple[tf.Tensor]]:\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    if labels is not None:\n        use_cache = False\n    if input_ids is None and inputs_embeds is not None:\n        raise NotImplementedError(f'Passing input embeddings is currently not supported for {self.__class__.__name__}')\n    outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n    last_hidden_state = outputs[0]\n    eos_mask = tf.equal(input_ids, self.config.eos_token_id)\n    self_masked = tf.reshape(tf.boolean_mask(eos_mask, eos_mask), (tf.shape(input_ids)[0], -1))\n    tf.Assert(tf.reduce_all(self_masked[:, -1]), ['All examples must have the same number of <eos> tokens.'])\n    masked = tf.reshape(tf.boolean_mask(last_hidden_state, eos_mask), (tf.shape(input_ids)[0], tf.shape(self_masked)[1], tf.shape(last_hidden_state)[-1]))\n    sentence_representation = masked[:, -1, :]\n    logits = self.classification_head(sentence_representation)\n    loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)\n    if not return_dict:\n        output = (logits,) + outputs[1:]\n        return (loss,) + output if loss is not None else output\n    return TFSeq2SeqSequenceClassifierOutput(loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)", "docstring": "labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\nLabels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\nconfig.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n\nReturns:", "source": "github-repos"}
{"code": "def _InsertEvent(self, event, force_flush=False):\n    if event:\n        event_document = {'index': {'_index': self._index_name, '_type': self._document_type}}\n        event_values = self._GetSanitizedEventValues(event)\n        self._event_documents.append(event_document)\n        self._event_documents.append(event_values)\n        self._number_of_buffered_events += 1\n    if (force_flush or (self._number_of_buffered_events > self._flush_interval)):\n        self._FlushEvents()", "docstring": "Inserts an event.\n\nEvents are buffered in the form of documents and inserted to Elasticsearch\nwhen either forced to flush or when the flush interval (threshold) has been\nreached.\n\nArgs:\nevent (EventObject): event.\nforce_flush (bool): True if buffered event documents should be inserted\ninto Elasticsearch.", "source": "codesearchnet"}
{"code": "def List(self, request, global_params=None):\n    config = self.GetMethodConfig('List')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Lists all datasets in the specified project to which you have been granted the READER dataset role.\n\nArgs:\nrequest: (BigqueryDatasetsListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(DatasetList) The response message.", "source": "github-repos"}
{"code": "def set_computer_name(name):\n    \n    if six.PY2:\n        name = _to_unicode(name)\n\n    if windll.kernel32.SetComputerNameExW(\n            win32con.ComputerNamePhysicalDnsHostname, name):\n        ret = {'Computer Name': {'Current': get_computer_name()}}\n        pending = get_pending_computer_name()\n        if pending not in (None, False):\n            ret['Computer Name']['Pending'] = pending\n        return ret\n\n    return False", "docstring": "Set the Windows computer name\n\nArgs:\n\nname (str):\nThe new name to give the computer. Requires a reboot to take effect.\n\nReturns:\ndict:\nReturns a dictionary containing the old and new names if successful.\n``False`` if not.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt 'minion-id' system.set_computer_name 'DavesComputer'", "source": "juraj-google-style"}
{"code": "def overlap(ival0, ival1):\n    (min0, max0) = ival0\n    (min1, max1) = ival1\n    return (max(0, (min(max0, max1) - max(min0, min1))) > 0)", "docstring": "Determine if two interval tuples have overlap.\n\nArgs:\niv0 ((int,int)):    An interval tuple\niv1 ((int,int));    An interval tuple\n\nReturns:\n(bool): True if the intervals overlap, otherwise False", "source": "codesearchnet"}
{"code": "def _create_none_optionals(func_graph, n):\n    with func_graph.as_default():\n        return [gen_optional_ops.optional_none() for _ in range(n)]", "docstring": "Creates `n` `None` optionals in func_graph.\n\nArgs:\nfunc_graph: FuncGraph.\nn: `int` the number of `None` optionals to make.\n\nReturns:\nA list of tensors in func_graph.", "source": "github-repos"}
{"code": "def broadcast(cls, shape1: 'TensorFluentShape', shape2: 'TensorFluentShape') -> Tuple[(Reshaping, Reshaping)]:\n    (reshape_1, reshape_2) = (None, None)\n    if (not (shape1._batch or shape2._batch)):\n        return (reshape_1, reshape_2)\n    (size_1, size_2) = (shape1.fluent_size, shape2.fluent_size)\n    size_diff = abs((size_1 - size_2))\n    if (size_diff == 0):\n        return (reshape_1, reshape_2)\n    if ((size_2 > size_1) and (not ((size_1 == 0) and (not shape1._batch)))):\n        reshape_1 = (([1] * size_diff) + list(shape1.fluent_shape))\n        if shape1._batch:\n            reshape_1 = ([shape1.batch_size] + reshape_1)\n    elif ((size_1 > size_2) and (not ((size_2 == 0) and (not shape2._batch)))):\n        reshape_2 = (([1] * size_diff) + list(shape2.fluent_shape))\n        if shape2._batch:\n            reshape_2 = ([shape2.batch_size] + reshape_2)\n    return (reshape_1, reshape_2)", "docstring": "It broadcasts the fluent shapes if any input is in batch mode.\n\nIt handles input shapes in different modes, expanding its\ndimensions if necessary. It outputs a tuple with new shapes.\nIf no input shape is in batch mode, return (None, None).\nIf an input shape does not need to be changed, return None.\n\nArgs:\nshape1: A fluent's shape.\nshape2: A fluent's shape.\n\nReturns:\nA pair of new shapes.", "source": "codesearchnet"}
{"code": "def create_tar_file(source_files, target=None):\n    if target:\n        filename = target\n    else:\n        (_, filename) = tempfile.mkstemp()\n    with tarfile.open(filename, mode='w:gz') as t:\n        for sf in source_files:\n            t.add(sf, arcname=os.path.basename(sf))\n    return filename", "docstring": "Create a tar file containing all the source_files\n\nArgs:\nsource_files (List[str]): List of file paths that will be contained in the tar file\n\nReturns:\n(str): path to created tar file", "source": "codesearchnet"}
{"code": "def setReplicationStatus(\n        self, pid, nodeRef, status, dataoneError=None, vendorSpecific=None\n    ):\n        \n        response = self.setReplicationStatusResponse(\n            pid, nodeRef, status, dataoneError, vendorSpecific\n        )\n        return self._read_boolean_response(response)", "docstring": "See Also: setReplicationStatusResponse()\n\nArgs:\npid:\nnodeRef:\nstatus:\ndataoneError:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def loop_until_true_else_raise(timeout_s, function, invert=False, message=None, sleep_s=1):\n\n    def validate(x):\n        return (bool(x) != invert)\n    result = loop_until_timeout_or_valid(timeout_s, function, validate, sleep_s=1)\n    if validate(result):\n        return result\n    if (message is not None):\n        raise RuntimeError(message)\n    name = '(unknown)'\n    if hasattr(function, '__name__'):\n        name = function.__name__\n    elif (isinstance(function, functools.partial) and hasattr(function.func, '__name__')):\n        name = function.func.__name__\n    raise RuntimeError(('Function %s failed to return %s within %d seconds.' % (name, ('falsey' if invert else 'truthy'), timeout_s)))", "docstring": "Repeatedly call the given function until truthy, or raise on a timeout.\n\nArgs:\ntimeout_s: The number of seconds to wait until a timeout condition is\nreached. As a convenience, this accepts None to mean never timeout. Can\nalso be passed a PolledTimeout object instead of an integer.\nfunction: The function to call each iteration.\ninvert: If True, wait for the callable to return falsey instead of truthy.\nmessage: Optional custom error message to use on a timeout.\nsleep_s: Seconds to sleep between call attempts.\n\nReturns:\nThe final return value of the function.\n\nRaises:\nRuntimeError if the timeout is reached before the function returns truthy.", "source": "codesearchnet"}
{"code": "def setPollingValues(self, max_waits, wait_sleep):\n        \n        self.m_max_waits = max_waits\n        self.m_wait_sleep = wait_sleep", "docstring": "Optional polling loop control\n\nArgs:\nmax_waits (int):   waits\nwait_sleep (int):  ms per wait", "source": "juraj-google-style"}
{"code": "def predict_proba(self, L):\n        \n        \n        \n        \n        Y_pf = LabelModel.predict_proba(self, L)\n        n, k = Y_pf.shape\n\n        \n        \n        Y_p = [np.zeros((n, k_t)) for k_t in self.task_graph.K]\n        for yi, y in enumerate(self.task_graph.feasible_set()):\n            for t in range(self.t):\n                k_t = int(y[t])\n                Y_p[t][:, k_t - 1] += Y_pf[:, yi]\n        return Y_p", "docstring": "Returns the task marginals estimated by the model: a t-length list of\n[n,k_t] matrices where the (i,j) entry of the sth matrix represents the\nestimated P((Y_i)_s | \\lambda_j(x_i))\n\nArgs:\nL: A t-length list of [n,m] scipy.sparse label matrices with values\nin {0,1,...,k}", "source": "juraj-google-style"}
{"code": "def on(self, *qubits: Qid) -> 'gate_operation.GateOperation':\n        \n        \n        from cirq.ops import gate_operation\n        return gate_operation.GateOperation(self, list(qubits))", "docstring": "Returns an application of this gate to the given qubits.\n\nArgs:\n*qubits: The collection of qubits to potentially apply the gate to.", "source": "juraj-google-style"}
{"code": "def fingerprints(data):\n    Hashes = namedtuple('Hashes', 'md5 sha1 sha256 sha512')\n    if six.PY2:\n        if (not isinstance(data, str)):\n            data = data.encode('utf-8')\n    elif six.PY3:\n        if (not isinstance(data, bytes)):\n            data = data.encode('utf-8')\n    md5 = hashlib.md5()\n    md5.update(data)\n    md5 = md5.hexdigest()\n    sha1 = hashlib.sha1()\n    sha1.update(data)\n    sha1 = sha1.hexdigest()\n    sha256 = hashlib.sha256()\n    sha256.update(data)\n    sha256 = sha256.hexdigest()\n    sha512 = hashlib.sha512()\n    sha512.update(data)\n    sha512 = sha512.hexdigest()\n    return Hashes(md5, sha1, sha256, sha512)", "docstring": "This function return the fingerprints of data.\n\nArgs:\ndata (string): raw data\n\nReturns:\nnamedtuple: fingerprints md5, sha1, sha256, sha512", "source": "codesearchnet"}
{"code": "def sgn_prod(p1, p2):\n        r\n        phase = Pauli._prod_phase(p1, p2)\n        new_pauli = p1 * p2\n        return new_pauli, phase", "docstring": "r\"\"\"\nMultiply two Paulis and track the phase.\n\n$P_3 = P_1 \\otimes P_2$: X*Y\n\nArgs:\np1 (Pauli): pauli 1\np2 (Pauli): pauli 2\n\nReturns:\nPauli: the multiplied pauli\ncomplex: the sign of the multiplication, 1, -1, 1j or -1j", "source": "juraj-google-style"}
{"code": "def _parse_domain_id(self, config):\n        \n        match = re.search(r'domain-id (.+)$', config)\n        value = match.group(1) if match else None\n        return dict(domain_id=value)", "docstring": "Scans the config block and parses the domain-id value\n\nArgs:\nconfig (str): The config block to scan\n\nReturns:\ndict: A dict object that is intended to be merged into the\nresource dict", "source": "juraj-google-style"}
{"code": "def modify_job_state(self, job_id, new_state):\n    if new_state == 'JOB_STATE_DONE':\n        new_state = dataflow.Job.RequestedStateValueValuesEnum.JOB_STATE_DONE\n    elif new_state == 'JOB_STATE_CANCELLED':\n        new_state = dataflow.Job.RequestedStateValueValuesEnum.JOB_STATE_CANCELLED\n    elif new_state == 'JOB_STATE_DRAINING':\n        new_state = dataflow.Job.RequestedStateValueValuesEnum.JOB_STATE_DRAINING\n    else:\n        return False\n    request = dataflow.DataflowProjectsLocationsJobsUpdateRequest()\n    request.jobId = job_id\n    request.projectId = self.google_cloud_options.project\n    request.location = self.google_cloud_options.region\n    request.job = dataflow.Job(requestedState=new_state)\n    self._client.projects_locations_jobs.Update(request)\n    return True", "docstring": "Modify the run state of the job.\n\nArgs:\njob_id: The id of the job.\nnew_state: A string representing the new desired state. It could be set to\neither 'JOB_STATE_DONE', 'JOB_STATE_CANCELLED' or 'JOB_STATE_DRAINING'.\n\nReturns:\nTrue if the job was modified successfully.", "source": "github-repos"}
{"code": "def get_messages(module):\n    \n    answer = collections.OrderedDict()\n    for name in dir(module):\n        candidate = getattr(module, name)\n        if inspect.isclass(candidate) and issubclass(candidate, message.Message):\n            answer[name] = candidate\n    return answer", "docstring": "Discovers all protobuf Message classes in a given import module.\n\nArgs:\nmodule (module): A Python module; :func:`dir` will be run against this\nmodule to find Message subclasses.\n\nReturns:\ndict[str, google.protobuf.message.Message]: A dictionary with the\nMessage class names as keys, and the Message subclasses themselves\nas values.", "source": "juraj-google-style"}
{"code": "def _add_unitary_two(self, gate, qubit0, qubit1):\n        \n        \n        indexes = einsum_vecmul_index([qubit0, qubit1], self._number_of_qubits)\n        \n        gate_tensor = np.reshape(np.array(gate, dtype=complex), 4 * [2])\n        \n        self._statevector = np.einsum(indexes, gate_tensor,\n                                      self._statevector,\n                                      dtype=complex,\n                                      casting='no')", "docstring": "Apply a two-qubit unitary matrix.\n\nArgs:\ngate (matrix_like): a the two-qubit gate matrix\nqubit0 (int): gate qubit-0\nqubit1 (int): gate qubit-1", "source": "juraj-google-style"}
{"code": "def set_hostname(self, value=None, default=False, disable=False):\n    cmd = self.command_builder('hostname', value=value, default=default, disable=disable)\n    return self.configure(cmd)", "docstring": "Configures the global system hostname setting\n\nEosVersion:\n4.13.7M\n\nArgs:\nvalue (str): The hostname value\ndefault (bool): Controls use of the default keyword\ndisable (bool): Controls the use of the no keyword\n\nReturns:\nbool: True if the commands are completed successfully", "source": "codesearchnet"}
{"code": "def add(self, other):\n        \n        if not isinstance(other, Operator):\n            other = Operator(other)\n        if self.dim != other.dim:\n            raise QiskitError(\"other operator has different dimensions.\")\n        return Operator(self.data + other.data, self.input_dims(),\n                        self.output_dims())", "docstring": "Return the operator self + other.\n\nArgs:\nother (Operator): an operator object.\n\nReturns:\nOperator: the operator self + other.\n\nRaises:\nQiskitError: if other is not an operator, or has incompatible\ndimensions.", "source": "juraj-google-style"}
{"code": "def usufyToOdsExport(d, fPath):\n    \n    from pyexcel_ods import get_data\n    try:\n        \n        \n        oldData = {\"OSRFramework\": get_data(fPath) }\n    except:\n        \n        oldData = {\"OSRFramework\":[]}\n\n    \n    tabularData = _generateTabularData(d, oldData)\n\n    from pyexcel_ods import save_data\n    \n    save_data(fPath, tabularData)", "docstring": "Workaround to export to a .ods file.\n\nArgs:\n-----\nd: Data to export.\nfPath: File path for the output file.", "source": "juraj-google-style"}
{"code": "def __getitem__(self, key):\n    \n    getitem = self._class_to_mock.__dict__.get('__getitem__', None)\n\n    \n    if getitem is None:\n      raise TypeError('unsubscriptable object')\n\n    \n    if self._replay_mode:\n      return MockMethod('__getitem__', self._expected_calls_queue,\n                        self._replay_mode)(key)\n\n\n    \n    return self._CreateMockMethod('__getitem__')(key)", "docstring": "Provide custom logic for mocking classes that are subscriptable.\n\nArgs:\nkey: Key to return the value for.\n\nReturns:\nExpected return value in replay mode.  A MockMethod object for the\n__getitem__ method that has already been called if not in replay mode.\n\nRaises:\nTypeError if the underlying class is not subscriptable.\nUnexpectedMethodCallError if the object does not expect the call to\n__setitem__.", "source": "juraj-google-style"}
{"code": "def _einsum_equation(input_shapes, output_shape):\n  \n  ret = []\n  next_letter = ord(\"a\")\n  dim_to_letter = {}\n  for shape_num, shape in enumerate(input_shapes + [output_shape]):\n    if shape_num == len(input_shapes):\n      ret.append(\"->\")\n    elif shape_num > 0:\n      ret.append(\",\")\n    for d in shape.dims:\n      if d not in dim_to_letter:\n        dim_to_letter[d] = chr(next_letter)\n        next_letter += 1\n      ret.append(dim_to_letter[d])\n\n  return \"\".join(ret)", "docstring": "Turn shapes into an einsum equation.\n\ne.g. \"ij,jk->ik\"\n\nArgs:\ninput_shapes: a list of Shapes\noutput_shape: a Shape\nReturns:\na string", "source": "juraj-google-style"}
{"code": "def model_fn(features, labels, mode, params, config):\n  \n  del labels, config\n\n  \n  logit_concentration = tf.compat.v1.get_variable(\n      \"logit_concentration\",\n      shape=[1, params[\"num_topics\"]],\n      initializer=tf.compat.v1.initializers.constant(\n          _softplus_inverse(params[\"prior_initial_value\"])))\n  concentration = _clip_dirichlet_parameters(\n      tf.nn.softplus(logit_concentration))\n\n  num_words = features.shape[1]\n  topics_words_logits = tf.compat.v1.get_variable(\n      \"topics_words_logits\",\n      shape=[params[\"num_topics\"], num_words],\n      initializer=tf.compat.v1.glorot_normal_initializer())\n  topics_words = tf.nn.softmax(topics_words_logits, axis=-1)\n\n  \n  \n  lda_variational = make_lda_variational(\n      params[\"activation\"],\n      params[\"num_topics\"],\n      params[\"layer_sizes\"])\n  with ed.tape() as variational_tape:\n    _ = lda_variational(features)\n\n  with ed.tape() as model_tape:\n    with ed.interception(\n        make_value_setter(topics=variational_tape[\"topics_posterior\"])):\n      posterior_predictive = latent_dirichlet_allocation(concentration,\n                                                         topics_words)\n\n  log_likelihood = posterior_predictive.distribution.log_prob(features)\n  tf.compat.v1.summary.scalar(\"log_likelihood\",\n                              tf.reduce_mean(input_tensor=log_likelihood))\n\n  \n  \n  \n  kl = variational_tape[\"topics_posterior\"].distribution.kl_divergence(\n      model_tape[\"topics\"].distribution)\n  tf.compat.v1.summary.scalar(\"kl\", tf.reduce_mean(input_tensor=kl))\n\n  \n  \n  with tf.control_dependencies(\n      [tf.compat.v1.assert_greater(kl, -1e-3, message=\"kl\")]):\n    kl = tf.identity(kl)\n\n  elbo = log_likelihood - kl\n  avg_elbo = tf.reduce_mean(input_tensor=elbo)\n  tf.compat.v1.summary.scalar(\"elbo\", avg_elbo)\n  loss = -avg_elbo\n\n  \n  global_step = tf.compat.v1.train.get_or_create_global_step()\n  optimizer = tf.compat.v1.train.AdamOptimizer(params[\"learning_rate\"])\n\n  \n  \n  \n  grads_and_vars = optimizer.compute_gradients(loss)\n  grads_and_vars_except_prior = [\n      x for x in grads_and_vars if x[1] != logit_concentration]\n\n  def train_op_except_prior():\n    return optimizer.apply_gradients(\n        grads_and_vars_except_prior,\n        global_step=global_step)\n\n  def train_op_all():\n    return optimizer.apply_gradients(\n        grads_and_vars,\n        global_step=global_step)\n\n  train_op = tf.cond(\n      pred=global_step < params[\"prior_burn_in_steps\"],\n      true_fn=train_op_except_prior,\n      false_fn=train_op_all)\n\n  \n  words_per_document = tf.reduce_sum(input_tensor=features, axis=1)\n  log_perplexity = -elbo / words_per_document\n  tf.compat.v1.summary.scalar(\n      \"perplexity\", tf.exp(tf.reduce_mean(input_tensor=log_perplexity)))\n  (log_perplexity_tensor,\n   log_perplexity_update) = tf.compat.v1.metrics.mean(log_perplexity)\n  perplexity_tensor = tf.exp(log_perplexity_tensor)\n\n  \n  topics = tf.compat.v1.py_func(\n      functools.partial(get_topics_strings, vocabulary=params[\"vocabulary\"]),\n      [topics_words, concentration],\n      tf.string,\n      stateful=False)\n  tf.compat.v1.summary.text(\"topics\", topics)\n\n  return tf.estimator.EstimatorSpec(\n      mode=mode,\n      loss=loss,\n      train_op=train_op,\n      eval_metric_ops={\n          \"elbo\": tf.compat.v1.metrics.mean(elbo),\n          \"log_likelihood\": tf.compat.v1.metrics.mean(log_likelihood),\n          \"kl\": tf.compat.v1.metrics.mean(kl),\n          \"perplexity\": (perplexity_tensor, log_perplexity_update),\n          \"topics\": (topics, tf.no_op()),\n      },\n  )", "docstring": "Builds the model function for use in an Estimator.\n\nArguments:\nfeatures: The input features for the Estimator.\nlabels: The labels, unused here.\nmode: Signifies whether it is train or test or predict.\nparams: Some hyperparameters as a dictionary.\nconfig: The RunConfig, unused here.\n\nReturns:\nEstimatorSpec: A tf.estimator.EstimatorSpec instance.", "source": "juraj-google-style"}
{"code": "def get_artifact_filename(self, package_name, artifact_name):\n        \n\n        project_name = self.packages.normalize(package_name)\n        return self.records.get((project_name, artifact_name))", "docstring": "Similar to pkg_resources.resource_filename, however this works\nwith the information cached in this registry instance, and\narguments are not quite the same.\n\nArguments:\n\npackage_name\nThe name of the package to get the artifact from\nartifact_name\nThe exact name of the artifact.\n\nReturns the path of where the artifact should be if it has been\ndeclared, otherwise None.", "source": "juraj-google-style"}
{"code": "def _ParseValueData(self, parser_mediator, registry_key, registry_value):\n    \n    value_data = registry_value.data\n\n    value_data_size = len(value_data)\n    if value_data_size < 4:\n      return\n\n    header_map = self._GetDataTypeMap('programscache_header')\n\n    try:\n      header = self._ReadStructureFromByteStream(\n          value_data, 0, header_map)\n    except (ValueError, errors.ParseError) as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to parse header value with error: {0!s}'.format(\n              exception))\n      return\n\n    if header.format_version not in (1, 9, 12, 19):\n      parser_mediator.ProduceExtractionWarning(\n          'unsupported format version: {0:d}'.format(header.format_version))\n      return\n\n    known_folder_identifier = None\n    if header.format_version == 1:\n      value_data_offset = 8\n\n    elif header.format_version == 9:\n      value_data_offset = 6\n\n    elif header.format_version in (12, 19):\n      known_folder_identifier = uuid.UUID(bytes_le=value_data[4:20])\n      value_data_offset = 20\n\n    entry_header_map = self._GetDataTypeMap('programscache_entry_header')\n    entry_footer_map = self._GetDataTypeMap('programscache_entry_footer')\n\n    sentinel = 0\n    if header.format_version != 9:\n      try:\n        entry_footer = self._ReadStructureFromByteStream(\n            value_data[value_data_offset:], value_data_offset, entry_footer_map)\n      except (ValueError, errors.ParseError) as exception:\n        parser_mediator.ProduceExtractionWarning((\n            'unable to parse sentinel at offset: 0x{0:08x} '\n            'with error: {1!s}').format(value_data_offset, exception))\n        return\n\n      value_data_offset += entry_footer_map.GetByteSize()\n\n      sentinel = entry_footer.sentinel\n\n    link_targets = []\n    while sentinel in (0x00, 0x01):\n      if value_data_offset >= value_data_size:\n        break\n\n      try:\n        entry_header = self._ReadStructureFromByteStream(\n            value_data[value_data_offset:], value_data_offset, entry_header_map)\n      except (ValueError, errors.ParseError) as exception:\n        parser_mediator.ProduceExtractionWarning((\n            'unable to parse entry header at offset: 0x{0:08x} '\n            'with error: {1!s}').format(value_data_offset, exception))\n        break\n\n      value_data_offset += entry_header_map.GetByteSize()\n\n      display_name = '{0:s} {1:s}'.format(\n          registry_key.path, registry_value.name)\n\n      shell_items_parser = shell_items.ShellItemsParser(display_name)\n      shell_items_parser.ParseByteStream(\n          parser_mediator, value_data[value_data_offset:],\n          codepage=parser_mediator.codepage)\n\n      link_target = shell_items_parser.CopyToPath()\n      link_targets.append(link_target)\n\n      value_data_offset += entry_header.data_size\n\n      try:\n        entry_footer = self._ReadStructureFromByteStream(\n            value_data[value_data_offset:], value_data_offset, entry_footer_map)\n      except (ValueError, errors.ParseError) as exception:\n        parser_mediator.ProduceExtractionWarning((\n            'unable to parse entry footer at offset: 0x{0:08x} '\n            'with error: {1!s}').format(value_data_offset, exception))\n        return\n\n      value_data_offset += entry_footer_map.GetByteSize()\n\n      sentinel = entry_footer.sentinel\n\n    \n\n    if known_folder_identifier:\n      known_folder_identifier = '{0!s}'.format(known_folder_identifier)\n\n    event_data = windows_events.WindowsRegistryListEventData()\n    event_data.key_path = registry_key.path\n    event_data.known_folder_identifier = known_folder_identifier\n    event_data.list_name = registry_value.name\n    event_data.list_values = ' '.join([\n        '{0:d}: {1:s}'.format(index, link_target)\n        for index, link_target in enumerate(link_targets)])\n    event_data.value_name = registry_value.name\n\n    event = time_events.DateTimeValuesEvent(\n        registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts event objects from a Explorer ProgramsCache value data.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\nregistry_value (dfwinreg.WinRegistryValue): Windows Registry value.\n\nRaises:\nParseError: if the value data could not be parsed.", "source": "juraj-google-style"}
{"code": "def get_all(self, uids: Iterable[int]) -> Mapping[int, Record]:\n        \n        return {uid: self._records[uid] for uid in uids\n                if uid in self._records}", "docstring": "Get records by a set of UIDs.\n\nArgs:\nuids: The message UIDs.", "source": "juraj-google-style"}
{"code": "def Set(self, name, value):\n    \n    \n    \n    if self.writeback is None:\n      logging.warning(\"Attempting to modify a read only config object for %s.\",\n                      name)\n    if name in self.constants:\n      raise ConstModificationError(\n          \"Attempting to modify constant value %s\" % name)\n\n    writeback_data = self.writeback_data\n\n    \n    if value is not None:\n      if isinstance(value, Text):\n        value = self.EscapeString(value)\n\n    writeback_data[name] = value\n    self.FlushCache()", "docstring": "Update the configuration option with a new value.\n\nNote that this forces the value to be set for all contexts. The value is\nwritten to the writeback location if Save() is later called.\n\nArgs:\nname: The name of the parameter to set.\nvalue: The value to set it to. The value will be validated against the\noption's type descriptor.\n\nRaises:\nConstModificationError: When attempting to change a constant option.", "source": "juraj-google-style"}
{"code": "def number_text_lines(text):\n    r\n    numbered_linelist = [\n        ''.join((('%2d' % (count + 1)), ' >>> ', line))\n        for count, line in enumerate(text.splitlines())\n    ]\n    text_with_lineno = '\\n'.join(numbered_linelist)\n    return text_with_lineno", "docstring": "r\"\"\"\nArgs:\ntext (str):\n\nReturns:\nstr: text_with_lineno - string with numbered lines", "source": "juraj-google-style"}
{"code": "def add_file_recursive(self, filename, trim=False):\n    assert (not self.final), 'Trying to mutate a final graph.'\n    self.add_source_file(filename)\n    queue = collections.deque([filename])\n    seen = set()\n    while queue:\n        filename = queue.popleft()\n        self.graph.add_node(filename)\n        try:\n            (deps, broken) = self.get_file_deps(filename)\n        except parsepy.ParseError:\n            if filename.endswith('.py'):\n                self.unreadable_files.add(filename)\n            else:\n                self.graph.remove_node(filename)\n            continue\n        for f in broken:\n            self.broken_deps[filename].add(f)\n        for f in deps:\n            if self.follow_file(f, seen, trim):\n                queue.append(f)\n                seen.add(f)\n            self.graph.add_node(f)\n            self.graph.add_edge(filename, f)", "docstring": "Add a file and all its recursive dependencies to the graph.\n\nArgs:\nfilename: The name of the file.\ntrim: Whether to trim the dependencies of builtin and system files.", "source": "codesearchnet"}
{"code": "def patch_with_options(request, options, parent_queue_item=None):\n        \n\n        request.auth = copy.deepcopy(options.identity.auth)\n        request.cookies = copy.deepcopy(options.identity.cookies)\n        request.headers = copy.deepcopy(options.identity.headers)\n        request.proxies = copy.deepcopy(options.identity.proxies)\n        request.timeout = copy.copy(options.performance.request_timeout)\n\n        if parent_queue_item != None:\n            for cookie in parent_queue_item.request.cookies:\n                request.cookies.set(cookie.name, cookie.value, domain=cookie.domain, path=cookie.path)\n\n            for cookie in parent_queue_item.response.cookies:\n                request.cookies.set(cookie.name, cookie.value, domain=cookie.domain, path=cookie.path)\n\n        if options.misc.verify_ssl_certificates and options.misc.trusted_certificates:\n            request.verify = options.misc.trusted_certificates\n        else:\n            request.verify = options.misc.verify_ssl_certificates", "docstring": "Patch the given request with the given options (e.g. user agent).\n\nArgs:\nrequest (:class:`nyawc.http.Request`): The request to patch.\noptions (:class:`nyawc.Options`): The options to patch the request with.\nparent_queue_item (:class:`nyawc.QueueItem`): The parent queue item object (request/response pair) if exists.", "source": "juraj-google-style"}
{"code": "def visit_statements(self, nodes):\n    for node in nodes:\n        if isinstance(node, gast.AST):\n            self.to_prepend.append(deque())\n            self.to_append.append(deque())\n            node = self.visit(node)\n            self.visit_statements(self.to_prepend.pop())\n            if isinstance(node, gast.AST):\n                self.to_insert[(- 1)].append(node)\n            elif node:\n                self.to_insert[(- 1)].extend(node)\n            self.visit_statements(self.to_append.pop())\n        else:\n            self.to_insert[(- 1)].append(node)\n    return self.to_insert[(- 1)]", "docstring": "Visit a series of nodes in a node body.\n\nThis function is factored out so that it can be called recursively on\nstatements that are appended or prepended. This allows e.g. a nested\nexpression to prepend a statement, and that statement can prepend a\nstatement again, etc.\n\nArgs:\nnodes: A list of statements.\n\nReturns:\nA list of transformed statements.", "source": "codesearchnet"}
{"code": "def FindFieldByName(self, full_name):\n    full_name = _NormalizeFullyQualifiedName(full_name)\n    (message_name, _, field_name) = full_name.rpartition('.')\n    message_descriptor = self.FindMessageTypeByName(message_name)\n    return message_descriptor.fields_by_name[field_name]", "docstring": "Loads the named field descriptor from the pool.\n\nArgs:\nfull_name: The full name of the field descriptor to load.\n\nReturns:\nThe field descriptor for the named field.\n\nRaises:\nKeyError: if the field cannot be found in the pool.", "source": "codesearchnet"}
{"code": "def getmtime(self, path=None, client_kwargs=None, header=None):\n    return self._getmtime_from_header(self.head(path, client_kwargs, header))", "docstring": "Return the time of last access of path.\n\nArgs:\npath (str): File path or URL.\nclient_kwargs (dict): Client arguments.\nheader (dict): Object header.\n\nReturns:\nfloat: The number of seconds since the epoch\n(see the time module).", "source": "codesearchnet"}
{"code": "def _audience_condition_deserializer(obj_dict):\n  \n  return [\n    obj_dict.get('name'),\n    obj_dict.get('value'),\n    obj_dict.get('type'),\n    obj_dict.get('match')\n  ]", "docstring": "Deserializer defining how dict objects need to be decoded for audience conditions.\n\nArgs:\nobj_dict: Dict representing one audience condition.\n\nReturns:\nList consisting of condition key with corresponding value, type and match.", "source": "juraj-google-style"}
{"code": "def _create_disk(self, name, spec, template_repo=None, template_store=None):\n    LOGGER.debug(('Spec: %s' % spec))\n    with LogTask(('Create disk %s' % spec['name'])):\n        disk_metadata = {}\n        if (spec['type'] == 'template'):\n            (disk_path, disk_metadata) = self._handle_template(host_name=name, template_spec=spec, template_repo=template_repo, template_store=template_store)\n        elif (spec['type'] == 'empty'):\n            (disk_path, disk_metadata) = self._handle_empty_disk(host_name=name, disk_spec=spec)\n        elif (spec['type'] == 'file'):\n            (disk_path, disk_metadata) = self._handle_file_disk(disk_spec=spec)\n        else:\n            raise RuntimeError(('Unknown drive spec %s' % str(spec)))\n        return (disk_path, disk_metadata)", "docstring": "Creates a disc with the given name from the given repo or store\n\nArgs:\nname (str): Name of the domain to create the disk for\nspec (dict): Specification of the disk to create\ntemplate_repo (TemplateRepository or None): template repo instance\nto use\ntemplate_store (TemplateStore or None): template store instance to\nuse\n\nReturns:\nTuple(str, dict): Path to the disk and disk metadata\n\nRaises:\nRuntimeError: If the type of the disk is not supported or failed to\ncreate the disk", "source": "codesearchnet"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, image_sizes: torch.LongTensor):\n    image_tokens = self.get_image_tokens(pixel_values, image_sizes)\n    split_sizes = [height \n    image_features = self.get_input_embeddings()(image_tokens)\n    image_features = torch.split(image_features, split_sizes)\n    return image_features", "docstring": "Tokenizes images into discrete tokens with VQGAN module and embeds\nthem with text embeddings layer\n\nArgs:\npixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):\nThe tensors corresponding to the input images.", "source": "github-repos"}
{"code": "def __ne__(self, other):\n    \n    if not isinstance(other, DateTimeValues):\n      return True\n\n    normalized_timestamp = self._GetNormalizedTimestamp()\n    other_normalized_timestamp = other._GetNormalizedTimestamp()  \n\n    if normalized_timestamp is None and other_normalized_timestamp is not None:\n      return True\n\n    if normalized_timestamp is not None and other_normalized_timestamp is None:\n      return True\n\n    return normalized_timestamp != other_normalized_timestamp", "docstring": "Determines if the date time values are not equal to other.\n\nArgs:\nother (DateTimeValues): date time values to compare against.\n\nReturns:\nbool: True if the date time values are not equal to other.", "source": "juraj-google-style"}
{"code": "def cherry_pick(self, branch, **kwargs):\n    path = ('%s/%s/cherry_pick' % (self.manager.path, self.get_id()))\n    post_data = {'branch': branch}\n    self.manager.gitlab.http_post(path, post_data=post_data, **kwargs)", "docstring": "Cherry-pick a commit into a branch.\n\nArgs:\nbranch (str): Name of target branch\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabCherryPickError: If the cherry-pick could not be performed", "source": "codesearchnet"}
{"code": "def has_platform(self, platform):\n    if (platform and (not isinstance(platform, dict))):\n        parts = platform.split('/')\n        if ((len(parts) > 3) or (len(parts) < 1)):\n            raise InvalidArgument('\"{0}\" is not a valid platform descriptor'.format(platform))\n        platform = {'os': parts[0]}\n        if (len(parts) > 2):\n            platform['variant'] = parts[2]\n        if (len(parts) > 1):\n            platform['architecture'] = parts[1]\n    return (normalize_platform(platform, self.client.version()) in self.attrs['Platforms'])", "docstring": "Check whether the given platform identifier is available for this\ndigest.\n\nArgs:\nplatform (str or dict): A string using the ``os[/arch[/variant]]``\nformat, or a platform dictionary.\n\nReturns:\n(bool): ``True`` if the platform is recognized as available,\n``False`` otherwise.\n\nRaises:\n:py:class:`docker.errors.InvalidArgument`\nIf the platform argument is not a valid descriptor.", "source": "codesearchnet"}
{"code": "def VerifySignature(self, message, signature, public_key, unhex=True):\n        \n        return Crypto.VerifySignature(message, signature, public_key, unhex=unhex)", "docstring": "Verify the integrity of the message.\n\nArgs:\nmessage (str): the message to verify.\nsignature (bytearray): the signature belonging to the message.\npublic_key (ECPoint): the public key to use for verifying the signature.\nunhex (bool): whether the message should be unhexlified before verifying\n\nReturns:\nbool: True if verification passes. False otherwise.", "source": "juraj-google-style"}
{"code": "def _PromptUserForInput(self, input_text):\n    \n    self._output_writer.Write('{0:s}: '.format(input_text))\n    return self._input_reader.Read()", "docstring": "Prompts user for an input.\n\nArgs:\ninput_text (str): text used for prompting the user for input.\n\nReturns:\nstr: input read from the user.", "source": "juraj-google-style"}
{"code": "def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):\n    vision_data = {}\n    if image_sizes is not None:\n        num_image_tokens = [self.image_seq_length] * len(image_sizes)\n        num_image_patches = [1] * len(image_sizes)\n        vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})\n    return MultiModalData(**vision_data)", "docstring": "Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.\n\nArgs:\nimage_sizes (`List[List[int]]`, *optional*):\nThe input sizes formatted as (height, width) per each image.\n\nReturns:\n`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided\ninput modalities, along with other useful data.", "source": "github-repos"}
{"code": "def PreparePairedSequenceBatch(source, target_in, pad=0):\n    target = target_in[(:, :(- 1))]\n    target_y = target_in[(:, 1:)]\n    source_mask = np.reshape((source != pad), (source.shape[0], 1, 1, source.shape[(- 1)]))\n    target_mask = MakeTargetMask(target, pad)\n    memory_mask = np.reshape((np.arange(target.shape[(- 1)]) < source.shape[(- 1)]), [(- 1), 1])\n    ntokens = np.sum((target_y != pad))\n    return (source, target, target_y, source_mask, target_mask, memory_mask, ntokens)", "docstring": "Build masks for this batch.\n\nArgs:\nsource: (batch, source_len) array of integer-coded symbols for inputs\ntarget_in: (batch, batch_len) array of integer-coded symbols for targets\npad: int: the padding symbol used to pad the above\n\nReturns:\nPrepared batch of tuple of arrays: source, input-target, shifted-target,\nsource mask, target mask, source-target \"memory\" mask, minibatch token count", "source": "codesearchnet"}
{"code": "def list_dir(self, context):\n        \n\n        doc = inspect.getdoc(context)\n\n        listing = \"\"\n        listing += \"\\n\"\n\n        listing += annotate.context_name(context) + \"\\n\"\n\n        if doc is not None:\n            doc = inspect.cleandoc(doc)\n            listing += doc + \"\\n\"\n\n        listing += \"\\nDefined Functions:\\n\"\n        is_dict = False\n\n        if isinstance(context, dict):\n            funs = context.keys()\n            is_dict = True\n        else:\n            funs = utils.find_all(context)\n\n        for fun in sorted(funs):\n            override_name = None\n            if is_dict:\n                override_name = fun\n\n            fun = self.find_function(context, fun)\n\n            if isinstance(fun, dict):\n                if is_dict:\n                    listing += \" - \" + override_name + '\\n'\n                else:\n                    listing += \" - \" + fun.metadata.name + '\\n'\n            else:\n                listing += \" - \" + fun.metadata.signature(name=override_name) + '\\n'\n\n            if annotate.short_description(fun) != \"\":\n                listing += \"   \" + annotate.short_description(fun) + '\\n'\n\n        listing += \"\\nBuiltin Functions\\n\"\n        for bif in sorted(self.builtins.keys()):\n            listing += ' - ' + bif + '\\n'\n\n        listing += '\\n'\n        return listing", "docstring": "Return a listing of all of the functions in this context including builtins.\n\nArgs:\ncontext (object): The context to print a directory for.\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def from_text_vision_configs(cls, text_config: Dict, vision_config: Dict, **kwargs):\n    config_dict = {}\n    config_dict['text_config'] = text_config\n    config_dict['vision_config'] = vision_config\n    return cls.from_dict(config_dict, **kwargs)", "docstring": "Instantiate a [`OwlViTConfig`] (or a derived class) from owlvit text model configuration and owlvit vision\nmodel configuration.\n\nReturns:\n[`OwlViTConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def set_token(self, token):\n        \n\n        self.token = token\n\n        self.set_header(\n            'Authorization',\n            \"Bearer {}\".format(token)\n        )", "docstring": "Set the token for the v20 context\n\nArgs:\ntoken: The token used to access the v20 REST api", "source": "juraj-google-style"}
{"code": "def _print_choice_field(self, field_name: str, field: descriptor.FieldDescriptor, choice_container: message.Message) -> None:\n    if len(choice_container.DESCRIPTOR.oneofs) != 1:\n        raise ValueError(f'Invalid value for choice field {field_name}: {choice_container}.')\n    oneof_group = choice_container.DESCRIPTOR.oneofs[0]\n    set_oneof_name = choice_container.WhichOneof(oneof_group.name)\n    if set_oneof_name is None:\n        raise ValueError(f'Oneof not set on choice type: {choice_container.DESCRIPTOR.full_name}.')\n    value_field = choice_container.DESCRIPTOR.fields_by_name[set_oneof_name]\n    oneof_field_name = proto_utils.json_field_name(value_field)\n    oneof_field_name = oneof_field_name[0].upper() + oneof_field_name[1:]\n    value = proto_utils.get_value_at_field(choice_container, value_field)\n    if annotation_utils.is_primitive_type(value_field.message_type):\n        self._print_primitive_field(field_name + oneof_field_name, value_field, value)\n    else:\n        self._print_message_field(field_name + oneof_field_name, value_field, value)", "docstring": "Prints a FHIR choice field.\n\nThis field is expected to have one valid oneof set.\n\nArgs:\nfield_name: The name of the field.\nfield: The FieldDescriptor whose contents to print.\nchoice_container: The value present at field, which should be a oneof with\na single value set.", "source": "github-repos"}
{"code": "def _ParseCachedEntry2003(self, value_data, cached_entry_offset):\n    \n\n    try:\n      cached_entry = self._ReadStructureFromByteStream(\n          value_data[cached_entry_offset:], cached_entry_offset,\n          self._cached_entry_data_type_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError(\n          'Unable to parse cached entry value with error: {0!s}'.format(\n              exception))\n\n    path_size = cached_entry.path_size\n    maximum_path_size = cached_entry.maximum_path_size\n    path_offset = cached_entry.path_offset\n\n    if path_offset > 0 and path_size > 0:\n      path_size += path_offset\n      maximum_path_size += path_offset\n\n      try:\n        path = value_data[path_offset:path_size].decode('utf-16-le')\n      except UnicodeDecodeError:\n        raise errors.ParseError('Unable to decode cached entry path to string')\n\n    cached_entry_object = AppCompatCacheCachedEntry()\n    cached_entry_object.cached_entry_size = (\n        self._cached_entry_data_type_map.GetByteSize())\n    cached_entry_object.file_size = getattr(cached_entry, 'file_size', None)\n    cached_entry_object.last_modification_time = (\n        cached_entry.last_modification_time)\n    cached_entry_object.path = path\n\n    return cached_entry_object", "docstring": "Parses a Windows 2003 cached entry.\n\nArgs:\nvalue_data (bytes): value data.\ncached_entry_offset (int): offset of the first cached entry data\nrelative to the start of the value data.\n\nReturns:\nAppCompatCacheCachedEntry: cached entry.\n\nRaises:\nParseError: if the value data could not be parsed.", "source": "juraj-google-style"}
{"code": "def _fluent_size(self, fluents, ordering) -> Sequence[Sequence[int]]:\n    shapes = []\n    for name in ordering:\n        fluent = fluents[name]\n        shape = self._param_types_to_shape(fluent.param_types)\n        shapes.append(shape)\n    return tuple(shapes)", "docstring": "Returns the sizes of `fluents` following the given `ordering`.\n\nReturns:\nSequence[Sequence[int]]: A tuple of tuple of integers\nrepresenting the shape and size of each fluent.", "source": "codesearchnet"}
{"code": "def get(self, attr, value=None, resolve=True):\n    try:\n        if resolve:\n            value = self._resolve_attribute(attr)\n        else:\n            value = self.attributes[attr]\n    except KeyError:\n        pass\n    return value", "docstring": "Get the value of an attribute from submit description file.\n\nArgs:\nattr (str): The name of the attribute whose value should be returned.\nvalue (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None.\nresolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If\nFalse then return the raw value of 'attr'. Defaults to True.\n\nReturns:\nstr: The value assigned to 'attr' if 'attr' exists, otherwise 'value'.", "source": "codesearchnet"}
{"code": "def _GetParentModificationTime(self, gzip_file_entry):\n    parent_file_entry = path_spec_resolver.Resolver.OpenFileEntry(gzip_file_entry.path_spec.parent)\n    if (not parent_file_entry):\n        return None\n    return parent_file_entry.modification_time", "docstring": "Retrieves the modification time of the file entry's parent file.\n\nNote that this retrieves the time from the file entry of the parent of the\ngzip file entry's path spec, which is different from trying to retrieve it\nfrom the gzip file entry's parent file entry.\n\nIt would be preferable to retrieve the modification time from the metadata\nin the gzip file itself, but it appears to not be set when the file is\nwritten by fseventsd.\n\nArgs:\ngzip_file_entry (dfvfs.FileEntry): file entry of the gzip file containing\nthe fseventsd data.\n\nReturns:\ndfdatetime.DateTimeValues: parent modification time, or None if not\navailable.", "source": "codesearchnet"}
{"code": "def label_count(self):\n    occurrences = collections.defaultdict(int)\n    for label in self:\n        occurrences[label.value] += 1\n    return occurrences", "docstring": "Return for each label the number of occurrences within the list.\n\nReturns:\ndict: A dictionary containing for every label-value (key)\nthe number of occurrences (value).\n\nExample:\n>>> ll = LabelList(labels=[\n>>>     Label('a', 3.2, 4.5),\n>>>     Label('b', 5.1, 8.9),\n>>>     Label('a', 7.2, 10.5),\n>>>     Label('b', 10.5, 14),\n>>>     Label('a', 15, 18)\n>>> ])\n>>> ll.label_count()\n{'a': 3 'b': 2}", "source": "codesearchnet"}
{"code": "def getRetinas(self, retina_name=None):\n        \n\n        resourcePath = '/retinas'\n        method = 'GET'\n\n        queryParams = {}\n        headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}\n        postData = None\n\n        queryParams['retina_name'] = retina_name\n        response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)\n        return [retina.Retina(**r) for r in response.json()]", "docstring": "Information about retinas\nArgs:\nretina_name, str: The retina name (optional) (optional)\nReturns: Array[Retina]", "source": "juraj-google-style"}
{"code": "def from_hising(cls, h, J, offset=None):\n        \n        poly = {(k,): v for k, v in h.items()}\n        poly.update(J)\n        if offset is not None:\n            poly[frozenset([])] = offset\n        return cls(poly, Vartype.SPIN)", "docstring": "Construct a binary polynomial from a higher-order Ising problem.\n\nArgs:\nh (dict):\nThe linear biases.\n\nJ (dict):\nThe higher-order biases.\n\noffset (optional, default=0.0):\nConstant offset applied to the model.\n\nReturns:\n:obj:`.BinaryPolynomial`\n\nExamples:\n>>> poly = dimod.BinaryPolynomial.from_hising({'a': 2}, {'ab': -1}, 0)", "source": "juraj-google-style"}
{"code": "def get_fast_tokenizer_file(tokenization_files: List[str]) -> str:\n    tokenizer_files_map = {}\n    for file_name in tokenization_files:\n        search = _re_tokenizer_file.search(file_name)\n        if search is not None:\n            v = search.groups()[0]\n            tokenizer_files_map[v] = file_name\n    available_versions = sorted(tokenizer_files_map.keys())\n    tokenizer_file = FULL_TOKENIZER_FILE\n    transformers_version = version.parse(__version__)\n    for v in available_versions:\n        if version.parse(v) <= transformers_version:\n            tokenizer_file = tokenizer_files_map[v]\n        else:\n            break\n    return tokenizer_file", "docstring": "Get the tokenization file to use for this version of transformers.\n\nArgs:\ntokenization_files (`List[str]`): The list of available configuration files.\n\nReturns:\n`str`: The tokenization file to use.", "source": "github-repos"}
{"code": "def p40baro(msg):\n    d = hex2bin(data(msg))\n    if (d[26] == '0'):\n        return None\n    p = ((bin2int(d[27:39]) * 0.1) + 800)\n    return p", "docstring": "Barometric pressure setting\n\nArgs:\nmsg (String): 28 bytes hexadecimal message (BDS40) string\n\nReturns:\nfloat: pressure in millibar", "source": "codesearchnet"}
{"code": "def get_diff_for_doctesting(repo: Repo, base_commit: str, commits: List[str]) -> List[str]:\n    print('\\n\n    code_diff = []\n    for commit in commits:\n        for diff_obj in commit.diff(base_commit):\n            if not diff_obj.b_path.endswith('.py') and (not diff_obj.b_path.endswith('.md')):\n                continue\n            if diff_obj.change_type in ['A']:\n                code_diff.append(diff_obj.b_path)\n            elif diff_obj.change_type in ['M', 'R']:\n                if diff_obj.a_path != diff_obj.b_path:\n                    code_diff.extend([diff_obj.a_path, diff_obj.b_path])\n                elif diff_contains_doc_examples(repo, commit, diff_obj.b_path):\n                    code_diff.append(diff_obj.a_path)\n                else:\n                    print(f\"Ignoring diff in {diff_obj.b_path} as it doesn't contain any doc example.\")\n    return code_diff", "docstring": "Get the diff in doc examples between a base commit and one or several commits.\n\nArgs:\nrepo (`git.Repo`):\nA git repository (for instance the Transformers repo).\nbase_commit (`str`):\nThe commit reference of where to compare for the diff. This is the current commit, not the branching point!\ncommits (`List[str]`):\nThe list of commits with which to compare the repo at `base_commit` (so the branching point).\n\nReturns:\n`List[str]`: The list of Python and Markdown files with a diff (files added or renamed are always returned, files\nmodified are returned if the diff in the file is only in doctest examples).", "source": "github-repos"}
{"code": "def _make_3d(field, twod):\n    shp = list(field.shape)\n    if (twod and ('X' in twod)):\n        shp.insert(1, 1)\n    elif twod:\n        shp.insert(0, 1)\n    return field.reshape(shp)", "docstring": "Add a dimension to field if necessary.\n\nArgs:\nfield (numpy.array): the field that need to be 3d.\ntwod (str): 'XZ', 'YZ' or None depending on what is relevant.\nReturns:\nnumpy.array: reshaped field.", "source": "codesearchnet"}
{"code": "def _CreatePlacemark(self, parent, name, style_id=None, visible=True, description=None):\n    placemark = ET.SubElement(parent, 'Placemark')\n    placemark_name = ET.SubElement(placemark, 'name')\n    placemark_name.text = name\n    if (description is not None):\n        desc_tag = ET.SubElement(placemark, 'description')\n        desc_tag.text = description\n    if (style_id is not None):\n        styleurl = ET.SubElement(placemark, 'styleUrl')\n        styleurl.text = ('\n    if (not visible):\n        visibility = ET.SubElement(placemark, 'visibility')\n        visibility.text = '0'\n    return placemark", "docstring": "Create a KML Placemark element.\n\nArgs:\nparent: The parent ElementTree.Element instance.\nname: The placemark name as a string.\nstyle_id: If not None, the id of a style to use for the placemark.\nvisible: Whether the placemark is initially visible or not.\ndescription: A description string or None.\n\nReturns:\nThe placemark ElementTree.Element instance.", "source": "codesearchnet"}
{"code": "def get_special_tokens_mask(self, token_ids_0: list, token_ids_1: Optional[list]=None, already_has_special_tokens: bool=False) -> list[int]:\n    if already_has_special_tokens:\n        if token_ids_1 is not None:\n            raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))", "docstring": "Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of ids of the first sequence.\ntoken_ids_1 (`List[int]`, *optional*):\nList of ids of the second sequence.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\nA list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def unbind(self, binding):\n        \n        \n        username = self.backend.config.generate_binding_username(binding)\n        \n        try:\n            self.backend.atlas.DatabaseUsers.delete_a_database_user(username)\n        except ErrAtlasNotFound:\n            \n            \n            \n            pass\n\n        self.backend.storage.remove(binding)", "docstring": "Unbind the instance\n\nArgs:\nbinding (AtlasServiceBinding.Binding): Existing or New binding", "source": "juraj-google-style"}
{"code": "def mean_pooling(self, model_output, attention_mask):\n    token_embeddings = model_output[0]\n    input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n    return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-09)", "docstring": "Calculates the mean of token embeddings\n\nArgs:\nmodel_output: The output of the model.\nattention_mask: This is a tensor that contains 1s for all input tokens and\n0s for all padding tokens.\n\nReturns:\nThe mean of the token embeddings.", "source": "github-repos"}
{"code": "def write(self, data):\n    \n    self._check_open()\n    if not isinstance(data, str):\n      raise TypeError('Expected str but got %s.' % type(data))\n    if not data:\n      return\n    self._buffer.append(data)\n    self._buffered += len(data)\n    self._offset += len(data)\n    if self._buffered >= self._flushsize:\n      self._flush()", "docstring": "Write some bytes.\n\nArgs:\ndata: data to write. str.\n\nRaises:\nTypeError: if data is not of type str.", "source": "juraj-google-style"}
{"code": "def AddArguments(cls, argument_group):\n    \n    argument_group.add_argument(\n        '--fields', dest='fields', type=str, action='store',\n        default=cls._DEFAULT_FIELDS, help=(\n            'Defines which fields should be included in the output.'))\n    argument_group.add_argument(\n        '--additional_fields', dest='additional_fields', type=str,\n        action='store', default='', help=(\n            'Defines extra fields to be included in the output, in addition to'\n            ' the default fields, which are {0:s}.'.format(\n                cls._DEFAULT_FIELDS)))\n    argument_group.add_argument(\n        '--timestamp_format', dest='timestamp_format', type=str,\n        action='store', default=cls._DEFAULT_TIMESTAMP_FORMAT, help=(\n            'Set the timestamp format that will be used in the datetime'\n            'column of the XLSX spreadsheet.'))", "docstring": "Adds command line arguments the helper supports to an argument group.\n\nThis function takes an argument parser or an argument group object and adds\nto it all the command line arguments this helper supports.\n\nArgs:\nargument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\nargparse group.", "source": "juraj-google-style"}
{"code": "def delta_E( reactants, products, check_balance=True ):\n    \n    if check_balance:\n        if delta_stoichiometry( reactants, products ) != {}:\n            raise ValueError( \"reaction is not balanced: {}\".format( delta_stoichiometry( reactants, products) ) )\n    return sum( [ r.energy for r in products ] ) - sum( [ r.energy for r in reactants ] )", "docstring": "Calculate the change in energy for reactants --> products.\n\nArgs:\nreactants (list(vasppy.Calculation): A list of vasppy.Calculation objects. The initial state.\nproducts  (list(vasppy.Calculation): A list of vasppy.Calculation objects. The final state.\ncheck_balance (bool:optional): Check that the reaction stoichiometry is balanced. Default: True.\n\nReturns:\n(float) The change in energy.", "source": "juraj-google-style"}
{"code": "def modify_ack_deadline(self, items):\n    ack_ids = [item.ack_id for item in items]\n    seconds = [item.seconds for item in items]\n    request = types.StreamingPullRequest(modify_deadline_ack_ids=ack_ids, modify_deadline_seconds=seconds)\n    self._manager.send(request)", "docstring": "Modify the ack deadline for the given messages.\n\nArgs:\nitems(Sequence[ModAckRequest]): The items to modify.", "source": "codesearchnet"}
{"code": "def pick(self, connections):\n        \n        if len(connections) == 1:\n            return connections[0]\n\n        def key(conn):\n            return (datetime.min\n                    if conn.backoff_time is None\n                    else conn.backoff_time)\n\n        return min(*connections, key=key)", "docstring": "Picks a connection with the earliest backoff time.\n\nAs a result, the first connection is picked\nfor as long as it has no backoff time.\nOtherwise, the connections are tried in a round robin fashion.\n\nArgs:\nconnections (:obj:list): List of\n:class:`~bigchaindb_driver.connection.Connection` instances.", "source": "juraj-google-style"}
{"code": "def now_playing(self, **kwargs):\n        \n        path = self._get_path('now_playing')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the list of movies playing in theatres. This list refreshes\nevery day. The maximum number of items this list will include is 100.\n\nArgs:\npage: (optional) Minimum value of 1.  Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def init_app(self, app, context=DEFAULT_DICT):\n        \n        if context is not _CONTEXT_MISSING:\n            self.update_context(context, app=app)\n\n        \n        \n        if (app not in _CONTEXT_CALLBACK_MAP\n                and context is not _CONTEXT_MISSING):\n            key = self._get_context_name(app=app)\n            self._context_callbacks(app, key, original_context=context)", "docstring": "Lazy constructor for the :class:`Component` class.\n\nThis method will allow the component to be used like a Flask\nextension/singleton.\n\nArgs:\napp (flask.Flask): The Application to base this Component upon.\nUseful for app wide singletons.\n\nKeyword Args:\ncontext (dict, optional): The contextual information to supply to\nthis component.", "source": "juraj-google-style"}
{"code": "def filter_paragraph(p):\n  \n  \n  tokens = p.split()\n  if len(tokens) < 6:\n    return True\n\n  \n  if not re.search(_SOME_ALPHA_RE, p):\n    return True\n\n  \n  \n  \n  last = 0\n  found_sentence = False\n  num_alpha = 0\n  for i, x in enumerate(tokens):\n    if x == '.':\n      if i - last > 3 and num_alpha >= 3:\n        found_sentence = True\n        break\n      last = i\n      num_alpha = 0\n    if re.match(_ONLY_ALPHA_RE, x):\n      num_alpha += 1\n  if not found_sentence:\n    return True\n\n  return False", "docstring": "Simple filter to remove obviously bad paragraphs (bad text extraction).\n\nNote this needs to run very quickly as it is applied to every paragraph\nin the corpus, so nothing fancy! This whole method should be linear\nexpected time in len(p).\n\nArgs:\np: string, paragraph\n\nReturns:\nTrue if we should remove the paragraph.", "source": "juraj-google-style"}
{"code": "def _copy_non_source(op, graph, op_map, base_graph):\n    input_mutations = []\n    control_mutations = []\n    copied_inputs = []\n    for input_index, original_input in enumerate(op.inputs):\n        copied_input = op_map.get(original_input, None)\n        if copied_input is None:\n            copied_input = array_ops.placeholder(name='unused_control_flow_input', shape=original_input.shape, dtype=original_input.dtype)\n            input_mutations.append(_InputMutation(copied_op=None, input_index=input_index, old_graph_tensor=original_input))\n        copied_inputs.append(copied_input)\n    copied_control_inputs = []\n    for original_control_input in op.control_inputs:\n        copied_control_input = op_map.get(original_control_input, None)\n        if copied_control_input is None:\n            control_mutations.append(_ControlMutation(copied_op=None, old_graph_op=original_control_input))\n        else:\n            copied_control_inputs.append(copied_control_input)\n    with ops.control_dependencies(copied_control_inputs), ops.device(op.device):\n        f = base_graph._functions.get(op.type, None)\n        if f is not None and compat.as_str(f.name) not in graph._functions:\n            f.add_to_graph(graph)\n        copied_op = graph.create_op(op_type=op.type, inputs=copied_inputs, dtypes=[x.dtype for x in op.outputs], attrs={key: value for key, value in op.node_def.attr.items() if not key.startswith('_class') and (not key.startswith('_tpu_replicate'))}, name=op.name)\n    op_map[op] = copied_op\n    for i, o in enumerate(op.outputs):\n        op_map[o] = copied_op.outputs[i]\n    return ([mutation._replace(copied_op=copied_op) for mutation in input_mutations], [mutation._replace(copied_op=copied_op) for mutation in control_mutations])", "docstring": "Copy an op directly to a given graph.\n\nGenerally `op`'s inputs should already have been copied. If this is not the\ncase, for example with v1 while_loops, then `_copy_non_source` inserts\nplaceholders for the unavailable Tensors and returns a list of required\nmutations.\n\nArgs:\nop: The op to be copied.\ngraph: The destination graph.\nop_map: A dict mapping ops and tensors in the old graph to the new one.\nbase_graph: The graph we're copying from, for any necessary functions.\nReturns:\nA tuple of (required_inputs, required_control_inputs):\nrequired_inputs:\nA list of `_InputMutation` tuples containing inputs to `copied_op` which\nmust be updated once `old_graph_tensor` has been copied.\nrequired_control_inputs:\nA list of `_ControlMutation` tuples containing control inputs to\n`copied_op` which must be added once `old_graph_op` has been copied.", "source": "github-repos"}
{"code": "def replace_pyof_version(module_fullname, version):\n    module_version = MetaStruct.get_pyof_version(module_fullname)\n    if ((not module_version) or (module_version == version)):\n        return None\n    return module_fullname.replace(module_version, version)", "docstring": "Replace the OF Version of a module fullname.\n\nGet's a module name (eg. 'pyof.v0x01.common.header') and returns it on\na new 'version' (eg. 'pyof.v0x02.common.header').\n\nArgs:\nmodule_fullname (str): The fullname of the module\n(e.g.: pyof.v0x01.common.header)\nversion (str): The version to be 'inserted' on the module fullname.\n\nReturns:\nstr: module fullname\nThe new module fullname, with the replaced version,\non the format \"pyof.v0x01.common.header\". If the requested\nversion is the same as the one of the module_fullname or if\nthe module_fullname is not a 'OF version' specific module,\nreturns None.", "source": "codesearchnet"}
{"code": "def set_from_json(self, name, json, models=None, setter=None):\n    if (name in self.properties()):\n        log.trace('Patching attribute %r of %r with %r', name, self, json)\n        descriptor = self.lookup(name)\n        descriptor.set_from_json(self, json, models, setter)\n    else:\n        log.warning(\"JSON had attr %r on obj %r, which is a client-only or invalid attribute that shouldn't have been sent\", name, self)", "docstring": "Set a property value on this object from JSON.\n\nArgs:\nname: (str) : name of the attribute to set\n\njson: (JSON-value) : value to set to the attribute to\n\nmodels (dict or None, optional) :\nMapping of model ids to models (default: None)\n\nThis is needed in cases where the attributes to update also\nhave values that have references.\n\nsetter(ClientSession or ServerSession or None, optional) :\nThis is used to prevent \"boomerang\" updates to Bokeh apps.\n\nIn the context of a Bokeh server application, incoming updates\nto properties will be annotated with the session that is\ndoing the updating. This value is propagated through any\nsubsequent change notifications that the update triggers.\nThe session can compare the event setter to itself, and\nsuppress any updates that originate from itself.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def find_rule(condition):\n    final_condition = re.sub('{{.*}}', '42', condition)\n    ast_tokens = Condition.get_tokens(final_condition)\n    ast_compressed_tokens = Condition.compress_tokens(ast_tokens)\n    name = 'undefined'\n    function = (lambda tokens: False)\n    if (len(ast_compressed_tokens) > 0):\n        for rule in Condition.RULES:\n            if Condition.match_tokens(ast_compressed_tokens, rule['types']):\n                name = rule['name']\n                function = rule['evaluate']\n                break\n    return (name, ast_tokens, function)", "docstring": "Find rule for given condition.\n\nArgs:\ncondition (str): Python condition as string.\n\nReturns:\nstr, list, function: found rule name, list of AST tokens for condition\nand verification function.", "source": "codesearchnet"}
{"code": "def _convert(self, value, dtype):\n    if isinstance(value, resource_variable_ops.ResourceVariable):\n        raise RuntimeError(f'Attempting to return a variable from an eagerly executed py_func. Only numeric data structures like Tensors or NumPy arrays should be returned; to return the value of a variable, make sure to obtain the Tensor backing it by calling `.read_value()` on the variable in question: {value}')\n    if value is None and self._is_grad_func:\n        return constant_op.constant(0.0, dtype=dtype)\n    return ops.convert_to_tensor(value, dtype=dtype)", "docstring": "Converts `value` to a tensor of type `dtype`, with error checking.\n\nArgs:\nvalue: The tensor to convert.\ndtype: The desired dtype.\n\nReturns:\nA tensor of type `dtype`, or a zeros tensor if value is None and\nthis function is in fact a gradient function.\n\nRaises:\nRuntimeError: if `value` is a variable.", "source": "github-repos"}
{"code": "def save_graph(graph_str, dest_file, fmt=None, image_ratio=None):\n    \n    g = pydot.graph_from_dot_data(graph_str)\n\n    \n    if fmt is None:\n        fmt = os.path.splitext(dest_file)[1].lower().strip('.') or \"png\"\n    if hasattr(g, \"write_\" + fmt):\n        write_fn = getattr(g, \"write_\" + fmt)\n    else:\n        raise Exception(\"Unsupported graph format: '%s'\" % fmt)\n\n    if image_ratio:\n        g.set_ratio(str(image_ratio))\n    write_fn(dest_file)\n    return fmt", "docstring": "Render a graph to an image file.\n\nArgs:\ngraph_str (str): Dot-language graph string.\ndest_file (str): Filepath to save the graph to.\nfmt (str): Format, eg \"png\", \"jpg\".\nimage_ratio (float): Image ratio.\n\nReturns:\nString representing format that was written, such as 'png'.", "source": "juraj-google-style"}
{"code": "def buckets_insert(self, bucket, project_id=None):\n    \n    args = {'project': project_id if project_id else self._project_id}\n    data = {'name': bucket}\n\n    url = Api._ENDPOINT + (Api._BUCKET_PATH % '')\n    return datalab.utils.Http.request(url, args=args, data=data, credentials=self._credentials)", "docstring": "Issues a request to create a new bucket.\n\nArgs:\nbucket: the name of the bucket.\nproject_id: the project to use when inserting the bucket.\nReturns:\nA parsed bucket information dictionary.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"}
{"code": "def delete(self, path):\n    self.__validate_storage_path(path, projects_allowed=False)\n    entity = self.api_client.get_entity_by_query(path=path)\n    if (entity['entity_type'] in self.__BROWSABLE_TYPES):\n        contents = self.api_client.list_folder_content(entity['uuid'])\n        if (contents['count'] > 0):\n            raise StorageArgumentException('This method cannot delete non-empty folder. Please empty the folder first.')\n        self.api_client.delete_folder(entity['uuid'])\n    elif (entity['entity_type'] == 'file'):\n        self.api_client.delete_file(entity['uuid'])", "docstring": "Delete an entity from the storage service using its path.\n\nArgs:\npath(str): The path of the entity to be delete\n\nReturns:\nThe uuid of created file entity as string\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "codesearchnet"}
{"code": "def _update_flags(compiler_flags, remove_flags=()):\n    for flag in GFORTRAN_SHARED_FLAGS:\n        if (flag not in compiler_flags):\n            compiler_flags.append(flag)\n    if (DEBUG_ENV in os.environ):\n        to_add = GFORTRAN_DEBUG_FLAGS\n        to_remove = GFORTRAN_OPTIMIZE_FLAGS\n    else:\n        to_add = GFORTRAN_OPTIMIZE_FLAGS\n        if (os.environ.get(WHEEL_ENV) is None):\n            to_add += (GFORTRAN_NATIVE_FLAG,)\n        to_remove = GFORTRAN_DEBUG_FLAGS\n    for flag in to_add:\n        if (flag not in compiler_flags):\n            compiler_flags.append(flag)\n    return [flag for flag in compiler_flags if (not ((flag in to_remove) or (flag in remove_flags)))]", "docstring": "Update a given set of compiler flags.\n\nArgs:\ncompiler_flags (List[str]): Existing flags associated with a compiler.\nremove_flags (Optional[Container[str]]): A container of flags to remove\nthat will override any of the defaults.\n\nReturns:\nList[str]: The modified list (i.e. some flags added and some removed).", "source": "codesearchnet"}
{"code": "def __init__(self, callback):\n        \n        self._callback = callback\n        self._interface = brocade_interface(\n            callback=pynos.utilities.return_xml\n        )\n        self._rbridge = brocade_rbridge(\n            callback=pynos.utilities.return_xml\n        )\n        self._mac_address_table = brocade_mac_address_table(\n            callback=pynos.utilities.return_xml\n        )\n        self._tunnels = brocade_tunnels(\n            callback=pynos.utilities.return_xml\n        )", "docstring": "Interface init function.\n\nArgs:\ncallback: Callback function that will be called for each action.\n\nReturns:\nInterface Object\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def tangent(f):\n    node = annotate.resolve_calls(f)\n    RemoveWith().visit(node)\n    wrapped = functools.wraps(f)(compile_.compile_function(node))\n    wrapped.tangent = f\n    return wrapped", "docstring": "A decorator which removes the `with insert_grad_of` statement.\n\nThis allows the function to be called as usual.\n\nArgs:\nf: A function\n\nReturns:\nA function with any `with insert_grad_of` context managers removed.", "source": "codesearchnet"}
{"code": "def make_parser():\n    parser = argparse.ArgumentParser(usage='%(prog)s [options] input')\n    parser.add_argument('--output-cfg', type=str, action='store', dest='output_cfg', default=None, help='Output control flow graph as SVG.')\n    parser.add_argument('--output-typegraph', type=str, action='store', dest='output_typegraph', default=None, help='Output typegraph as SVG.')\n    parser.add_argument('--visualize', type=str, action='store', dest='visualize_typegraph', default=None, help='Generate an HTML visualization of the typegraph.')\n    parser.add_argument('--visualize-blocks', type=str, action='store', dest='visualize_block_graph', default=None, help='Generate an HTML visualization of the blockgraph.')\n    wrapper = datatypes.ParserWrapper(parser)\n    pytype_config.add_all_pytype_options(wrapper)\n    return arg_parser.Parser(parser, pytype_single_args=wrapper.actions)", "docstring": "Make parser for command line args.\n\nReturns:\nA Parser object.", "source": "github-repos"}
{"code": "def __init__(self, max_workers=None):\n        \n        _remove_dead_thread_references()\n\n        if max_workers is None:\n            self._max_workers = multiprocessing.cpu_count()\n        else:\n            self._max_workers = max_workers\n\n        \n        \n        \n        self._call_queue = multiprocessing.Queue(self._max_workers +\n                                                 EXTRA_QUEUED_CALLS)\n        self._result_queue = multiprocessing.Queue()\n        self._work_ids = queue.Queue()\n        self._queue_management_thread = None\n        self._processes = set()\n\n        \n        self._shutdown_thread = False\n        self._shutdown_process_event = multiprocessing.Event()\n        self._shutdown_lock = threading.Lock()\n        self._queue_count = 0\n        self._pending_work_items = {}", "docstring": "Initializes a new ProcessPoolExecutor instance.\n\nArgs:\nmax_workers: The maximum number of processes that can be used to\nexecute the given calls. If None or not given then as many\nworker processes will be created as the machine has processors.", "source": "juraj-google-style"}
{"code": "def __init__(self, packet_count=None, byte_count=None):\n        \n        super().__init__()\n        self.packet_count = packet_count\n        self.byte_count = byte_count", "docstring": "Create BucketCounter with the optional parameters below.\n\nArgs:\npacket_count (int): Number of packets processed by bucket.\nbyte_count (int): Number of bytes processed by bucket.", "source": "juraj-google-style"}
{"code": "def get_package_from_handle(package_handle):\n    \n    if isinstance(package_handle, dict):\n        package_handle = ResourceHandle.from_dict(package_handle)\n    package_resource = package_repository_manager.get_resource_from_handle(package_handle)\n    package = Package(package_resource)\n    return package", "docstring": "Create a package given its handle (or serialized dict equivalent)\n\nArgs:\npackage_handle (`ResourceHandle` or dict): Resource handle, or\nequivalent serialized dict representation from\nResourceHandle.to_dict\n\nReturns:\n`Package`.", "source": "juraj-google-style"}
{"code": "def WriteGraphOpCreation(self, graph_op_creation):\n    debug_event = debug_event_pb2.DebugEvent(graph_op_creation=graph_op_creation)\n    self._EnsureTimestampAdded(debug_event)\n    _pywrap_debug_events_writer.WriteGraphOpCreation(self._dump_root, debug_event)", "docstring": "Write a GraphOpCreation proto with the writer.\n\nArgs:\ngraph_op_creation: A GraphOpCreation proto, describing the details of the\ncreation of an op inside a TensorFlow Graph.", "source": "github-repos"}
{"code": "def test_sample_paths_2d(self, use_time_grid, supply_normal_draws):\n    dtype = tf.float64\n    mu = np.array([0.2, 0.7])\n    a = np.array([[0.4, 0.1], [0.3, 0.2]])\n    b = np.array([[0.33, -0.03], [0.21, 0.5]])\n\n    def drift_fn(t, x):\n        return mu * tf.sqrt(t) * tf.ones_like(x, dtype=t.dtype)\n\n    def vol_fn(t, x):\n        del x\n        return (a * t + b) * tf.ones([2, 2], dtype=t.dtype)\n    process = tff.models.GenericItoProcess(dim=2, drift_fn=drift_fn, volatility_fn=vol_fn)\n    times = np.array([0.1, 0.21, 0.32, 0.43, 0.55])\n    x0 = np.array([0.1, -1.1])\n    if use_time_grid:\n        times_grid = tf.linspace(tf.constant(0.0, dtype=dtype), 0.55, 56)\n        time_step = None\n    else:\n        times_grid = None\n        time_step = 0.01\n    if supply_normal_draws:\n        num_samples = 1\n        normal_draws = tf.random.normal(shape=[5000, times_grid.shape[0] - 1, 2], dtype=dtype)\n        normal_draws = tf.concat([normal_draws, -normal_draws], axis=0)\n    else:\n        num_samples = 10000\n        normal_draws = None\n    paths = self.evaluate(process.sample_paths(times, num_samples=num_samples, initial_state=x0, time_step=time_step, times_grid=times_grid, normal_draws=normal_draws, seed=12134))\n    num_samples = 10000\n    self.assertAllClose(paths.shape, (num_samples, 5, 2), atol=0)\n    means = np.mean(paths, axis=0)\n    times = np.reshape(times, [-1, 1])\n    expected_means = x0 + 2.0 / 3.0 * mu * np.power(times, 1.5)\n    self.assertAllClose(means, expected_means, rtol=0.01, atol=0.01)", "docstring": "Tests path properties for 2-dimentional Ito process.\n\nWe construct the following Ito processes.\n\ndX_1 = mu_1 sqrt(t) dt + s11 dW_1 + s12 dW_2\ndX_2 = mu_2 sqrt(t) dt + s21 dW_1 + s22 dW_2\n\nmu_1, mu_2 are constants.\ns_ij = a_ij t + b_ij\n\nFor this process expected value at time t is (x_0)_i + 2/3 * mu_i * t^1.5.\n\nArgs:\nuse_time_grid: A boolean to indicate whther `times_grid` is supplied.\nsupply_normal_draws: A boolean to indicate whether `normal_draws` is\nsupplied.", "source": "github-repos"}
{"code": "def getGUA(self, filterByPrefix=None):\n        \n        print '%s call getGUA' % self.port\n        print filterByPrefix\n        globalAddrs = []\n        try:\n            \n            globalAddrs = self.getGlobal()\n\n            if filterByPrefix is None:\n                return globalAddrs[0]\n            else:\n                for line in globalAddrs:\n                    fullIp = ModuleHelper.GetFullIpv6Address(line)\n                    if fullIp.startswith(filterByPrefix):\n                        return fullIp\n                print 'no global address matched'\n                return str(globalAddrs[0])\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger(\"getGUA() Error: \" + str(e))", "docstring": "get expected global unicast IPv6 address of Thread device\n\nArgs:\nfilterByPrefix: a given expected global IPv6 prefix to be matched\n\nReturns:\na global IPv6 address", "source": "juraj-google-style"}
{"code": "def stop_capture_handler(self, name):\n    empty_capturers_indeces = []\n    for (k, sc) in self._stream_capturers.iteritems():\n        stream_capturer = sc[0]\n        stream_capturer.remove_handler(name)\n        if (stream_capturer.handler_count == 0):\n            self._pool.killone(sc[1])\n            empty_capturers_indeces.append(k)\n    for i in empty_capturers_indeces:\n        del self._stream_capturers[i]", "docstring": "Remove all handlers with a given name\n\nArgs:\nname:\nThe name of the handler(s) to remove.", "source": "codesearchnet"}
{"code": "def handle_unsubscribe(self, request, path):\n    ret = []\n    if path:\n        name = path[0]\n        child = self.children[name]\n        ret += child.handle_unsubscribe(request, path[1:])\n        if ((not child.children) and (not child.update_requests) and (not child.delta_requests)):\n            del self.children[name]\n    else:\n        if (request in self.update_requests):\n            self.update_requests.remove(request)\n        else:\n            self.delta_requests.remove(request)\n        ret.append(request.return_response())\n    return ret", "docstring": "Remove from the notifier list and send a return\n\nArgs:\nrequest (Subscribe): The original subscribe request\npath (list): The relative path from ourself\n\nReturns:\nlist: [(callback, Response)] that need to be called", "source": "codesearchnet"}
{"code": "def gvd(self, wavelength):\n        \n        g = (wavelength*1.e-9)**3./(2.*spc.pi*spc.c**2.) * self.nDer2(wavelength)\n        return g", "docstring": "The group velocity dispersion (GVD) with respect to wavelength.\n\nArgs:\nwavelength (float, list, None): The wavelength(s) the GVD will\nbe evaluated at.\n\nReturns:\nfloat, list: The GVD at the target wavelength(s).", "source": "juraj-google-style"}
{"code": "def end(self: EventSetOrNode) -> EventSetOrNode:\n    from temporian.core.operators.end import end\n    return end(self)", "docstring": "Generates a single timestamp at the end of an\n[`EventSet`][temporian.EventSet], per index key.\n\nUsage example:\n```python\n>>> a = tp.event_set(\n...     timestamps=[5, 6, 7, 1],\n...     features={\"f\": [50, 60, 70, 10], \"idx\": [1, 1, 1, 2]},\n...     indexes=[\"idx\"]\n... )\n\n>>> a_end = a.end()\n>>> a_end\nindexes: [('idx', int64)]\nfeatures: []\nevents:\nidx=1 (1 events):\ntimestamps: [7.]\nidx=2 (1 events):\ntimestamps: [1.]\n...\n\n```\n\nReturns:\nA feature-less EventSet with a single timestamp per index group.", "source": "github-repos"}
{"code": "def for_document(cls, document_ref, snapshot_callback, snapshot_class_instance, reference_class_instance):\n    return cls(document_ref, document_ref._client, {'documents': {'documents': [document_ref._document_path]}, 'target_id': WATCH_TARGET_ID}, document_watch_comparator, snapshot_callback, snapshot_class_instance, reference_class_instance)", "docstring": "Creates a watch snapshot listener for a document. snapshot_callback\nreceives a DocumentChange object, but may also start to get\ntargetChange and such soon\n\nArgs:\ndocument_ref: Reference to Document\nsnapshot_callback: callback to be called on snapshot\nsnapshot_class_instance: instance of DocumentSnapshot to make\nsnapshots with to pass to snapshot_callback\nreference_class_instance: instance of DocumentReference to make\nreferences", "source": "codesearchnet"}
{"code": "def delete_detector(self, detector_id, **kwargs):\n    resp = self._delete(self._u(self._DETECTOR_ENDPOINT_SUFFIX, detector_id), **kwargs)\n    resp.raise_for_status()\n    return resp", "docstring": "Remove a detector.\n\nArgs:\ndetector_id (string): the ID of the detector.", "source": "codesearchnet"}
{"code": "def __init__(self, servers, debug=False):\n        \n        self.servers = [servers] if isinstance(servers, basestring) else servers\n        self.key_hasher = self._debug_key_hash if debug else self._key_hash\n        self._client = None\n        self.debug = debug\n        self.current = ''", "docstring": "Create a memcached client.\n\nArgs:\nservers (str or list of str): Server URI(s), eg '127.0.0.1:11211'.\ndebug (bool): If True, quasi human readable keys are used. This helps\ndebugging - run 'memcached -vv' in the foreground to see the keys\nbeing get/set/stored.", "source": "juraj-google-style"}
{"code": "def abs_url(self, url):\n        \n        parsed_url = urllib.parse.urlparse(url)\n        if not parsed_url.scheme and not parsed_url.netloc:\n            \n            return urllib.parse.urljoin(str(self.base_url), str(url))\n        else:\n            \n            return url", "docstring": "Given a relative or absolute URL; return an absolute URL.\n\nArgs:\nurl(basestring): A relative or absolute URL.\n\nReturns:\nstr: An absolute URL.", "source": "juraj-google-style"}
{"code": "def variance(numbers, type='population'):\n    mean = average(numbers)\n    variance = 0\n    for number in numbers:\n        variance += ((mean - number) ** 2)\n    if (type == 'population'):\n        return (variance / len(numbers))\n    else:\n        return (variance / (len(numbers) - 1))", "docstring": "Calculates the population or sample variance of a list of numbers.\nA large number means the results are all over the place, while a\nsmall number means the results are comparatively close to the average.\n\nArgs:\nnumbers: a list  of integers or floating point numbers to compare.\n\ntype: string, 'population' or 'sample', the kind of variance to be computed.\n\nReturns:\nThe computed population or sample variance.\nDefaults to population variance.\n\nRequires:\nThe math module, average()", "source": "codesearchnet"}
{"code": "def file_to_list(file_name, file_location):\n    \n    file = __os.path.join(file_location, file_name)\n    read_file = open(file, \"r\")\n    temp_list = read_file.read().splitlines()\n    read_file.close()\n    return temp_list", "docstring": "Function to import a text file to a list\nArgs:\nfile_name: The name of file to be import\nfile_location: The location of the file, derive from the os module\n\nReturns: returns a list", "source": "juraj-google-style"}
{"code": "def add(self, index):\n        \n        if (index - self.flush_at) < self.interval:\n            return\n        now = time.time()\n        elapsed = now - self.lap\n        elapsed_total = now - self.start\n        it = index - self.flush_at\n        self.lap = now\n        if self.verbose:\n            logger.info(\"iter={} {{{}}}={}[sec/{}iter] {}[sec]\".format(\n                index, self.name, elapsed, it, elapsed_total))\n        if self.fd is not None:\n            print(\"{} {} {} {}\".format(index, elapsed,\n                                       it, elapsed_total), file=self.fd)\n        self.flush_at = index", "docstring": "Calculate time elapsed from the point previously called\nthis method or this object is created to this is called.\n\nArgs:\nindex (int): Index to be displayed, and be used to take intervals.", "source": "juraj-google-style"}
{"code": "def GetFileEntryByPathSpec(self, path_spec):\n    \n    if not self.FileEntryExistsByPathSpec(path_spec):\n      return None\n\n    location = getattr(path_spec, 'location', None)\n\n    if len(location) == 1:\n      return tar_file_entry.TARFileEntry(\n          self._resolver_context, self, path_spec, is_root=True,\n          is_virtual=True)\n\n    kwargs = {}\n    try:\n      kwargs['tar_info'] = self._tar_file.getmember(location[1:])\n    except KeyError:\n      kwargs['is_virtual'] = True\n\n    return tar_file_entry.TARFileEntry(\n        self._resolver_context, self, path_spec, **kwargs)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nTARFileEntry: file entry or None.", "source": "juraj-google-style"}
{"code": "def tf_retrieve_indices(self, buffer_elements, priority_indices):\n        \n        states = dict()\n        buffer_start = self.buffer_index - buffer_elements\n        buffer_end = self.buffer_index\n\n        \n        for name in sorted(self.states_memory):\n            buffer_state_memory = self.states_buffer[name]\n            \n            \n            buffer_states = buffer_state_memory[buffer_start:buffer_end]\n            \n            memory_states = tf.gather(params=self.states_memory[name], indices=priority_indices)\n            states[name] = tf.concat(values=(buffer_states, memory_states), axis=0)\n\n        internals = dict()\n        for name in sorted(self.internals_memory):\n            internal_buffer_memory = self.internals_buffer[name]\n            buffer_internals = internal_buffer_memory[buffer_start:buffer_end]\n            memory_internals = tf.gather(params=self.internals_memory[name], indices=priority_indices)\n            internals[name] = tf.concat(values=(buffer_internals, memory_internals), axis=0)\n\n        actions = dict()\n        for name in sorted(self.actions_memory):\n            action_buffer_memory = self.actions_buffer[name]\n            buffer_action = action_buffer_memory[buffer_start:buffer_end]\n            memory_action = tf.gather(params=self.actions_memory[name], indices=priority_indices)\n            actions[name] = tf.concat(values=(buffer_action, memory_action), axis=0)\n\n        buffer_terminal = self.terminal_buffer[buffer_start:buffer_end]\n        priority_terminal = tf.gather(params=self.terminal_memory, indices=priority_indices)\n        terminal = tf.concat(values=(buffer_terminal, priority_terminal), axis=0)\n\n        buffer_reward = self.reward_buffer[buffer_start:buffer_end]\n        priority_reward = tf.gather(params=self.reward_memory, indices=priority_indices)\n        reward = tf.concat(values=(buffer_reward, priority_reward), axis=0)\n\n        if self.include_next_states:\n            assert util.rank(priority_indices) == 1\n            next_priority_indices = (priority_indices + 1) % self.capacity\n            next_buffer_start = (buffer_start + 1) % self.buffer_size\n            next_buffer_end = (buffer_end + 1) % self.buffer_size\n\n            next_states = dict()\n            for name in sorted(self.states_memory):\n                buffer_state_memory = self.states_buffer[name]\n                buffer_next_states = buffer_state_memory[next_buffer_start:next_buffer_end]\n                memory_next_states = tf.gather(params=self.states_memory[name], indices=next_priority_indices)\n                next_states[name] = tf.concat(values=(buffer_next_states, memory_next_states), axis=0)\n\n            next_internals = dict()\n            for name in sorted(self.internals_memory):\n                buffer_internal_memory = self.internals_buffer[name]\n                buffer_next_internals = buffer_internal_memory[next_buffer_start:next_buffer_end]\n                memory_next_internals = tf.gather(params=self.internals_memory[name], indices=next_priority_indices)\n                next_internals[name] = tf.concat(values=(buffer_next_internals, memory_next_internals), axis=0)\n\n            return dict(\n                states=states,\n                internals=internals,\n                actions=actions,\n                terminal=terminal,\n                reward=reward,\n                next_states=next_states,\n                next_internals=next_internals\n            )\n        else:\n            return dict(\n                states=states,\n                internals=internals,\n                actions=actions,\n                terminal=terminal,\n                reward=reward\n            )", "docstring": "Fetches experiences for given indices by combining entries from buffer\nwhich have no priorities, and entries from priority memory.\n\nArgs:\nbuffer_elements: Number of buffer elements to retrieve\npriority_indices: Index tensor for priority memory\n\nReturns: Batch of experiences", "source": "juraj-google-style"}
{"code": "def system(self, error: str) -> None:\n    log = self._build_system_message(error)\n    self.queue_log_message(log)", "docstring": "Adds system error information to base log message and\nsends it to the logger for writing.\n\nArgs:\n* error: error that occurred\n\nReturns:\n* None", "source": "github-repos"}
{"code": "def write(self):\n    if self.description:\n        return '@{0} {1}{4}{2}{4}+{4}{3}{4}'.format(self.id, self.description, self.sequence, self.quality, os.linesep)\n    else:\n        return '@{0}{3}{1}{3}+{3}{2}{3}'.format(self.id, self.sequence, self.quality, os.linesep)", "docstring": "Return FASTQ formatted string\n\nReturns:\nstr: FASTQ formatted string containing entire FASTQ entry", "source": "codesearchnet"}
{"code": "def unpack(self, buff=None, offset=0):\n        \n        property_type = UBInt16(enum_ref=TableFeaturePropType)\n        property_type.unpack(buff, offset)\n        self.__class__ = TableFeaturePropType(property_type.value).find_class()\n\n        length = UBInt16()\n        length.unpack(buff, offset=offset+2)\n        super().unpack(buff[:offset+length.value], offset=offset)", "docstring": "Unpack *buff* into this object.\n\nThis method will convert a binary data into a readable value according\nto the attribute format.\n\nArgs:\nbuff (bytes): Binary buffer.\noffset (int): Where to begin unpacking.\n\nRaises:\n:exc:`~.exceptions.UnpackException`: If unpack fails.", "source": "juraj-google-style"}
{"code": "def create(self, resource):\n        \n        uri = self.URI + self.RESOURCES_PATH\n        return self._client.create(resource=resource, uri=uri)", "docstring": "Set all the labels for a resource.\n\nArgs:\nresource: The object containing the resource URI and a list of labels\n\nReturns:\ndict: Resource Labels", "source": "juraj-google-style"}
{"code": "def _GetFrameCodeObjectName(frame):\n    if ((frame.f_code.co_argcount >= 1) and ('self' == frame.f_code.co_varnames[0])):\n        return ((frame.f_locals['self'].__class__.__name__ + '.') + frame.f_code.co_name)\n    else:\n        return frame.f_code.co_name", "docstring": "Gets the code object name for the frame.\n\nArgs:\nframe: the frame to get the name from\n\nReturns:\nThe function name if the code is a static function or the class name with\nthe method name if it is an member function.", "source": "codesearchnet"}
{"code": "def kron(*matrices: np.ndarray) -> np.ndarray:\n    \n    product = np.eye(1)\n    for m in matrices:\n        product = np.kron(product, m)\n    return np.array(product)", "docstring": "Computes the kronecker product of a sequence of matrices.\n\nA *args version of lambda args: functools.reduce(np.kron, args).\n\nArgs:\n*matrices: The matrices and controls to combine with the kronecker\nproduct.\n\nReturns:\nThe resulting matrix.", "source": "juraj-google-style"}
{"code": "def fetch(self, settlement_id, data={}, **kwargs):\n        \n        return super(Settlement, self).fetch(settlement_id, data, **kwargs)", "docstring": "Fetch Settlement data for given Id\n\nArgs:\nsettlement_id : Id for which settlement object has to be retrieved\n\nReturns:\nsettlement dict for given settlement id", "source": "juraj-google-style"}
{"code": "def RegisterMountPoint(cls, mount_point, path_spec):\n    \n    if mount_point in cls._mount_points:\n      raise KeyError('Mount point: {0:s} already set.'.format(mount_point))\n\n    cls._mount_points[mount_point] = path_spec", "docstring": "Registers a path specification mount point.\n\nArgs:\nmount_point (str): mount point identifier.\npath_spec (PathSpec): path specification of the mount point.\n\nRaises:\nKeyError: if the corresponding mount point is already set.", "source": "juraj-google-style"}
{"code": "def add_arguments(cls, parser):\n        \n\n        parser.add_argument(\n            '-i', '--issue',\n            action='store',\n            nargs='?',\n            const='',\n            dest='issue',\n            help=\"[pr] issue \n            )\n\n        parser.add_argument(\n            '-br', '--branch',\n            action='store',\n            nargs='?',\n            const='',\n            dest='branch',\n            help=\"[pr] branch\",\n            )\n\n        parser.add_argument(\n            '-tbr', '--target-branch',\n            action='store',\n            nargs='?',\n            const='',\n            default='master',\n            dest='target_branch',\n            help=\"[pr] name of branch to pull changes into\\n(defaults to: master)\",\n            )", "docstring": "Add arguments to the parser for collection in app.args.\n\nArgs:\nparser:\n`argparse.ArgumentParser`. Parser.\nArguments added here are server on\nself.args.", "source": "juraj-google-style"}
{"code": "def get_lowest_decomposition(self, composition):\n    entries_list = []\n    elements = [e.symbol for e in composition.elements]\n    for i in range(len(elements)):\n        for combi in itertools.combinations(elements, (i + 1)):\n            chemsys = [Element(e) for e in combi]\n            x = self.costdb.get_entries(chemsys)\n            entries_list.extend(x)\n    try:\n        pd = PhaseDiagram(entries_list)\n        return pd.get_decomposition(composition)\n    except IndexError:\n        raise ValueError('Error during PD building; most likely, cost data does not exist!')", "docstring": "Get the decomposition leading to lowest cost\n\nArgs:\ncomposition:\nComposition as a pymatgen.core.structure.Composition\nReturns:\nDecomposition as a dict of {Entry: amount}", "source": "codesearchnet"}
{"code": "def create_multispan_plots(tag_ids):\n    \n    import matplotlib.gridspec as gridspec\n    fig = plt.figure()\n    nrows = 1\n    if len(tag_ids) > 1:\n        nrows = 2\n    fig.set_size_inches(10, 5*nrows)\n\n    gs = gridspec.GridSpec(nrows, len(tag_ids))\n    ax_list = [fig.add_subplot(g) for g in gs]\n    ax_dict = {}\n    for i, tag_dict in enumerate(tag_ids):\n        ax_dict[tag_dict['id']] = ax_list[i]\n        ax_dict[tag_dict['id']].set_title(\n            'System {} (id {})'.format(tag_dict['name'], tag_dict['id']))\n\n    if nrows > 1:\n        ax_total = plt.subplot(gs[1, :])\n\n        title = 'Combined {}'.format(tag_ids[0]['name'])\n        for i in range(1, len(tag_ids)):\n            title = title + ' and {}'.format(tag_ids[i]['name'])\n        ax_total.set_title(title)\n        gs.tight_layout(fig, rect=[0, 0.03, 1, 0.95])\n        return fig, ax_dict, ax_total\n    gs.tight_layout(fig, rect=[0, 0.03, 1, 0.95])\n    return fig, ax_dict, None", "docstring": "Create detail plots (first row) and total block(second row) of experiments.\n\nArgs:\ntag_ids: list of tag-dictionaries, where the dictionaries must have fields 'name' (used for naming)\nand 'id' (used for numbering axis_dict)\n\nReturns:\nFigure element fig, ax_dict containing the first row plots (accessed via id) and ax_total containing the\nsecond row block.", "source": "juraj-google-style"}
{"code": "def post_url(self, url, token='', json=None, data=None, headers=None):\n        \n        if (token == ''):\n            token = self._user_token\n\n        if headers:\n            headers.update({'Authorization': 'Token {}'.format(token)})\n        else:\n            headers = {'Authorization': 'Token {}'.format(token)}\n\n        if json:\n            return requests.post(url,\n                                 headers=headers,\n                                 json=json,\n                                 verify=False)\n        if data:\n            return requests.post(url,\n                                 headers=headers,\n                                 data=data,\n                                 verify=False)\n\n        return requests.post(url,\n                             headers=headers,\n                             verify=False)", "docstring": "Returns a post resquest object taking in a url, user token, and\npossible json information.\n\nArguments:\nurl (str): The url to make post to\ntoken (str): The authentication token\njson (dict): json info to send\n\nReturns:\nobj: Post request object", "source": "juraj-google-style"}
{"code": "def _unbind_topics(self, topics):\n        \n\n        self.client.unsubscribe(topics.status)\n        self.client.unsubscribe(topics.tracing)\n        self.client.unsubscribe(topics.streaming)\n        self.client.unsubscribe(topics.response)", "docstring": "Unsubscribe to all of the topics we needed for communication with device\n\nArgs:\ntopics (MQTTTopicValidator): The topic validator for this device that\nwe have connected to.", "source": "juraj-google-style"}
{"code": "def decode(data):\n    \n    dom = None\n    try:\n        dom = dhtmlparser.parseString(data)\n    except Exception, e:\n        raise MetaParsingException(\"Can't parse your XML data: %s\" % e.message)\n\n    root = dom.find(\"root\")\n\n    \n    if not root:\n        raise MetaParsingException(\"All elements have to be inside <root>.\")\n\n    \n    if len(root) > 1:\n        raise MetaParsingException(\"Too many <root> elements in your XML!\")\n\n    items = root[0].find(\"item\")\n\n    \n    if not items:\n        raise MetaParsingException(\"There are no <items> in your XML <root>!\")\n\n    decoded = []\n    for item in items:\n        if \"key\" not in item.params:\n            raise MetaParsingException(\n                \"There is no 'key' parameter in %s.\" % str(item)\n            )\n\n        decoded.append([\n            item.params[\"key\"],\n            item.getContent().strip()\n        ])\n\n    decoded = validator.check_structure(decoded)\n\n    return decoded", "docstring": "Handles decoding of the XML `data`.\n\nArgs:\ndata (str): Data which will be decoded.\n\nReturns:\ndict: Dictionary with decoded data.", "source": "juraj-google-style"}
{"code": "def _control_flow_post_processing(self, input_tensors=None) -> None:\n    if input_tensors is None:\n        input_tensors = self.inputs\n    for input_tensor in input_tensors:\n        control_flow_util.CheckInputFromValidContext(self, input_tensor.op)\n    if self._control_flow_context is not None:\n        self._control_flow_context.AddOp(self)", "docstring": "Add this op to its control flow context.\n\nThis may add new ops and change this op's inputs. self.inputs must be\navailable before calling this method.\n\nArgs:\ninput_tensors: (Optional.) A list of `Tensors` corresponding to the inputs\nof this op, which should be equivalent to `self.inputs`. Pass this\nargument to avoid evaluating `self.inputs` unnecessarily.", "source": "github-repos"}
{"code": "def __init__(self,\n                 descriptors=None,\n                 descriptor_loader=import_descriptor_loader):\n        \n        self.__descriptor_loader = descriptor_loader\n        self.__descriptors = descriptors or {}", "docstring": "Constructor.\n\nArgs:\ndescriptors: A dictionary or dictionary-like object that can be used\nto store and cache descriptors by definition name.\ndefinition_loader: A function used for resolving missing descriptors.\nThe function takes a definition name as its parameter and returns\nan appropriate descriptor.  It may raise DefinitionNotFoundError.", "source": "juraj-google-style"}
{"code": "def sort_request(request: Dict[(str, Any)]) -> OrderedDict:\n    sort_order = ['jsonrpc', 'method', 'params', 'id']\n    return OrderedDict(sorted(request.items(), key=(lambda k: sort_order.index(k[0]))))", "docstring": "Sort a JSON-RPC request dict.\n\nThis has no effect other than making the request nicer to read.\n\n>>> json.dumps(sort_request(\n...     {'id': 2, 'params': [2, 3], 'method': 'add', 'jsonrpc': '2.0'}))\n'{\"jsonrpc\": \"2.0\", \"method\": \"add\", \"params\": [2, 3], \"id\": 2}'\n\nArgs:\nrequest: JSON-RPC request in dict format.", "source": "codesearchnet"}
{"code": "def string_join(inputs, separator='', name=None):\n    return gen_string_ops.string_join(inputs, separator=separator, name=name)", "docstring": "Perform element-wise concatenation of a list of string tensors.\n\nGiven a list of string tensors of same shape, performs element-wise\nconcatenation of the strings of the same index in all tensors.\n\n\n>>> tf.strings.join(['abc','def']).numpy()\nb'abcdef'\n>>> tf.strings.join([['abc','123'],\n...                  ['def','456'],\n...                  ['ghi','789']]).numpy()\narray([b'abcdefghi', b'123456789'], dtype=object)\n>>> tf.strings.join([['abc','123'],\n...                  ['def','456']],\n...                  separator=\" \").numpy()\narray([b'abc def', b'123 456'], dtype=object)\n\nThe reduction version of this elementwise operation is\n`tf.strings.reduce_join`\n\nArgs:\ninputs: A list of `tf.Tensor` objects of same size and `tf.string` dtype.\nseparator: A string added between each string being joined.\nname: A name for the operation (optional).\n\nReturns:\nA `tf.string` tensor.", "source": "github-repos"}
{"code": "def _PrintSessionsDetails(self, storage_reader):\n    for (session_number, session) in enumerate(storage_reader.GetSessions()):\n        session_identifier = uuid.UUID(hex=session.identifier)\n        session_identifier = '{0!s}'.format(session_identifier)\n        start_time = 'N/A'\n        if (session.start_time is not None):\n            start_time = timelib.Timestamp.CopyToIsoFormat(session.start_time)\n        completion_time = 'N/A'\n        if (session.completion_time is not None):\n            completion_time = timelib.Timestamp.CopyToIsoFormat(session.completion_time)\n        enabled_parser_names = 'N/A'\n        if session.enabled_parser_names:\n            enabled_parser_names = ', '.join(sorted(session.enabled_parser_names))\n        command_line_arguments = (session.command_line_arguments or 'N/A')\n        parser_filter_expression = (session.parser_filter_expression or 'N/A')\n        preferred_encoding = (session.preferred_encoding or 'N/A')\n        if isinstance(preferred_encoding, py2to3.BYTES_TYPE):\n            preferred_encoding = preferred_encoding.decode('utf-8')\n        if session.artifact_filters:\n            artifact_filters_string = ', '.join(session.artifact_filters)\n        else:\n            artifact_filters_string = 'N/A'\n        filter_file = (session.filter_file or 'N/A')\n        title = 'Session: {0:s}'.format(session_identifier)\n        table_view = views.ViewsFactory.GetTableView(self._views_format_type, title=title)\n        table_view.AddRow(['Start time', start_time])\n        table_view.AddRow(['Completion time', completion_time])\n        table_view.AddRow(['Product name', session.product_name])\n        table_view.AddRow(['Product version', session.product_version])\n        table_view.AddRow(['Command line arguments', command_line_arguments])\n        table_view.AddRow(['Parser filter expression', parser_filter_expression])\n        table_view.AddRow(['Enabled parser and plugins', enabled_parser_names])\n        table_view.AddRow(['Preferred encoding', preferred_encoding])\n        table_view.AddRow(['Debug mode', session.debug_mode])\n        table_view.AddRow(['Artifact filters', artifact_filters_string])\n        table_view.AddRow(['Filter file', filter_file])\n        table_view.Write(self._output_writer)\n        if self._verbose:\n            self._PrintPreprocessingInformation(storage_reader, (session_number + 1))\n            self._PrintParsersCounter(session.parsers_counter, session_identifier=session_identifier)\n            self._PrintAnalysisReportCounter(session.analysis_reports_counter, session_identifier=session_identifier)\n            self._PrintEventLabelsCounter(session.event_labels_counter, session_identifier=session_identifier)", "docstring": "Prints the details of the sessions.\n\nArgs:\nstorage_reader (BaseStore): storage.", "source": "codesearchnet"}
{"code": "def _run_pytype(self, pytype_args_dict):\n    with self._create_pytype_subprocess(pytype_args_dict) as p:\n        self.stdout, self.stderr = (s.decode('utf-8') for s in p.communicate())\n        self.returncode = p.returncode", "docstring": "A single command-line call to the pytype binary.\n\nTypically you'll want to use _CheckTypesAndErrors or\n_InferTypesAndCheckErrors, which will set up the command-line arguments\nproperly and check that the errors file is in the right state after the\ncall. (The errors check is bundled in to avoid the user forgetting to call\nassertHasErrors() with no arguments when expecting no errors.)\n\nArgs:\npytype_args_dict: A dictionary of the arguments to pass to pytype, minus\nthe binary name. For example, to run pytype simple.py --output=- the\narguments should be {\"simple.py\": self.INCLUDE, \"--output\": \"-\"}", "source": "github-repos"}
{"code": "def __init__(self, api_key=None, endpoint=None, dtype=None, verbose=None, debug=None):\n        \n        self.api_key = api_key if api_key else os.environ['MPDS_KEY']\n\n        self.network = httplib2.Http()\n\n        self.endpoint = endpoint or self.endpoint\n        self.dtype = dtype or MPDSDataTypes.PEER_REVIEWED\n        self.verbose = verbose if verbose is not None else self.verbose\n        self.debug = debug or self.debug", "docstring": "MPDS API consumer constructor\n\nArgs:\napi_key: (str) The MPDS API key, or None if the MPDS_KEY envvar is set\nendpoint: (str) MPDS API gateway URL\n\nReturns: None", "source": "juraj-google-style"}
{"code": "def backward_propagation(parameters, cache, X, Y):\n    m = X.shape[1]\n    W1 = parameters['W1']\n    W2 = parameters['W2']\n    A1 = cache['A1']\n    A2 = cache['A2']\n    dZ2 = (A2 - Y)\n    dW2 = ((1.0 / m) * np.dot(dZ2, A1.T))\n    db2 = ((1.0 / m) * np.sum(dZ2, axis=1, keepdims=True))\n    dZ1 = ((W2.T * dZ2) * (1 - np.power(A1, 2)))\n    dW1 = ((1.0 / m) * np.dot(dZ1, X.T))\n    db1 = ((1.0 / m) * np.sum(dZ1, axis=1, keepdims=True))\n    grads = {'dW1': dW1, 'db1': db1, 'dW2': dW2, 'db2': db2}\n    return grads", "docstring": "Implement the backward propagation using the instructions above.\n\nArguments:\nparameters -- python dictionary containing our parameters\ncache -- a dictionary containing \"Z1\", \"A1\", \"Z2\" and \"A2\".\nX -- input data of shape (2, number of examples)\nY -- \"true\" labels vector of shape (1, number of examples)\n\nReturns:\ngrads -- python dictionary containing your gradients with respect to different parameters", "source": "codesearchnet"}
{"code": "def BuildLSTMLayer(batch_size, seq_length, num_inputs, num_nodes):\n    weights = RandomVar(LSTMCellWeightsShape(num_inputs, num_nodes), name='weights')\n    m = array_ops.zeros([batch_size, num_nodes], name='init_m')\n    c = array_ops.zeros([batch_size, num_nodes], name='init_c')\n    x_seq, pad_seq = RandomInputs(batch_size, seq_length, num_inputs)\n    out_seq = LSTMLayer('lstm', weights, m, c, x_seq, pad_seq)\n    return (out_seq, [weights])", "docstring": "Builds a single LSTM layer with random weights and inputs.\n\nArgs:\nbatch_size: Inputs are fed in batches of this size.\nseq_length: The sequence length to unroll the LSTM layer.\nnum_inputs: Dimension of inputs that are fed into each LSTM cell.\nnum_nodes: The number of nodes in each LSTM cell.\n\nReturns:\n(out_seq, weights) pair.  The out_seq is a list of per-sequence-step\noutputs, each with shape [batch_size, num_nodes].  The weights are a list of\nweight variables that may be trained.", "source": "github-repos"}
{"code": "def __init__(self, image_processor, tokenizer, chat_template=None, image_seq_length=256, policy_definitions=None, **kwargs):\n    super().__init__(image_processor, tokenizer, chat_template, image_seq_length, **kwargs)\n    if policy_definitions is None:\n        self.policy_definitions = DEFAULT_SHIELDGEMMA2_POLICIES\n    else:\n        self.policy_definitions = policy_definitions", "docstring": "A processor for the ShieldGemma 2 model.\n\nArgs:\nimage_processor: The image processor to use, typically a `Gemma3ImageProcessorFast` instance.\ntokenizer: The tokenizer to use, typically a `GemmaTokenizerFast` instance.\nchat_template: The chat template to use with this processor. Typically, this is unset as the processor\nconfiguration on Hugging Face Hub includes this value already.\nimage_seq_length: The number of soft tokens per image. Typically, this is unset as the processor\nconfiguration on Hugging Face Hub includes this value already.\npolicy_definitions: A mapping from policy name to its description in text used as the default policies to\nclassify images against. The policy descriptions are included in the text of the prompts generated by\nthis processor. Typically, this is unset as the processor configuration on Hugging Face Hub includes\nthe base policies ShieldGemma was trained on.", "source": "github-repos"}
{"code": "def build_chunk(oscillators):\n    \n    step_random_processes(oscillators)\n    subchunks = []\n    for osc in oscillators:\n        osc.amplitude.step_amp()\n        osc_chunk = osc.get_samples(config.CHUNK_SIZE)\n        if osc_chunk is not None:\n            subchunks.append(osc_chunk)\n    if len(subchunks):\n        new_chunk = sum(subchunks)\n    else:\n        new_chunk = numpy.zeros(config.CHUNK_SIZE)\n    \n    chunk_amplitude = amplitude.find_amplitude(new_chunk)\n    if chunk_amplitude > config.MAX_AMPLITUDE:\n        \n        new_chunk = amplitude.normalize_amplitude(new_chunk,\n                                                  config.MAX_AMPLITUDE)\n        \n        \n        avg_amp = (sum(osc.amplitude.value for osc in oscillators) /\n                   len(oscillators))\n        for osc in oscillators:\n            if (osc.amplitude.value > avg_amp and rand.prob_bool(0.1) or\n                    rand.prob_bool(0.01)):\n                osc.amplitude.drift_target = rand.weighted_rand(\n                    [(-5, 1), (0, 10)])\n                osc.amplitude.change_rate = rand.weighted_rand(\n                    osc.amplitude.change_rate_weights)\n    return new_chunk.astype(config.SAMPLE_DATA_TYPE).tostring()", "docstring": "Build an audio chunk and progress the oscillator states.\n\nArgs:\noscillators (list): A list of oscillator.Oscillator objects\nto build chunks from\n\nReturns:\nstr: a string of audio sample bytes ready to be written to a wave file", "source": "juraj-google-style"}
{"code": "def daylight_saving_end_day(self, value=None):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `daylight_saving_end_day`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `daylight_saving_end_day`')\n    self._daylight_saving_end_day = value", "docstring": "Corresponds to IDD Field `daylight_saving_end_day`\n\nArgs:\nvalue (str): value for IDD Field `daylight_saving_end_day`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def Deserialize(self, reader):\n        \n        super(ContractState, self).Deserialize(reader)\n\n        code = FunctionCode()\n        code.Deserialize(reader)\n        self.Code = code\n\n        self.ContractProperties = reader.ReadUInt8()\n        self.Name = reader.ReadVarString(max=252)\n        self.CodeVersion = reader.ReadVarString(max=252)\n        self.Author = reader.ReadVarString(max=252)\n        self.Email = reader.ReadVarString(max=252)\n        self.Description = reader.ReadVarString(max=65536)", "docstring": "Deserialize full object.\n\nArgs:\nreader (neocore.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def from_json(json):\n        \n        return Point(\n            lat=json['lat'],\n            lon=json['lon'],\n            time=isostr_to_datetime(json['time'])\n        )", "docstring": "Creates Point instance from JSON representation\n\nArgs:\njson (:obj:`dict`): Must have at least the following keys: lat (float), lon (float),\ntime (string in iso format). Example,\n{\n\"lat\": 9.3470298,\n\"lon\": 3.79274,\n\"time\": \"2016-07-15T15:27:53.574110\"\n}\njson: map representation of Point instance\nReturns:\n:obj:`Point`", "source": "juraj-google-style"}
{"code": "def initialize():\n    global __initialized\n    if __initialized:\n        return\n    try:\n        for data in DEFAULT_CONFIG_OPTIONS:\n            nsobj = _get_config_namespace(data['prefix'], data['name'], sort_order=data['sort_order'])\n            for opt in data['options']:\n                _register_default_option(nsobj, opt)\n            db.session.add(nsobj)\n        for (ns, info) in CINQ_PLUGINS.items():\n            if (info['name'] == 'commands'):\n                continue\n            for entry_point in info['plugins']:\n                _cls = entry_point.load()\n                if hasattr(_cls, 'ns'):\n                    ns_name = '{}: {}'.format(info['name'].capitalize(), _cls.name)\n                    if (not isinstance(_cls.options, abstractproperty)):\n                        nsobj = _get_config_namespace(_cls.ns, ns_name)\n                        if _cls.options:\n                            for opt in _cls.options:\n                                _register_default_option(nsobj, opt)\n                        db.session.add(nsobj)\n        _add_default_roles()\n        _import_templates()\n        db.session.commit()\n        dbconfig.reload_data()\n        __initialized = True\n    except ProgrammingError as ex:\n        if (str(ex).find('1146') != (- 1)):\n            logging.getLogger('cloud_inquisitor').error('Missing required tables, please make sure you run `cloud-inquisitor db upgrade`')", "docstring": "Initialize the application configuration, adding any missing default configuration or roles\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def bessel_i0e(x, name=None):\n    with ops.name_scope(name, 'bessel_i0e', [x]):\n        return gen_special_math_ops.bessel_i0e(x)", "docstring": "Computes the Bessel i0e function of `x` element-wise.\n\nModified Bessel function of order 0.\n\n>>> tf.math.special.bessel_i0e([-1., -0.5, 0.5, 1.]).numpy()\narray([0.46575961, 0.64503527, 0.64503527, 0.46575961], dtype=float32)\n\nArgs:\nx: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n`float32`, `float64`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.i0e\n@end_compatibility", "source": "github-repos"}
{"code": "def _get_unique_function_name(function_type, functions):\n    function_name = function_name_base = function_type\n    count = 2\n    while (function_name in functions):\n        function_name = '{}_{}'.format(function_name_base, count)\n        count += 1\n    return function_name", "docstring": "Get a unique function name.\n\nArgs:\nfunction_type(str): Name of Function. Ex) Convolution, Affine\nfunctions(OrderedDict of (str, Function)\n\nReturns: str\nA unique function name", "source": "codesearchnet"}
{"code": "def Factory(cls, name, min_threads, max_threads=None):\n    with cls.factory_lock:\n        result = cls.POOLS.get(name)\n        if (result is None):\n            cls.POOLS[name] = result = cls(name, min_threads, max_threads=max_threads)\n        return result", "docstring": "Creates a new thread pool with the given name.\n\nIf the thread pool of this name already exist, we just return the existing\none. This allows us to have different pools with different characteristics\nused by different parts of the code, at the same time.\n\nArgs:\nname: The name of the required pool.\nmin_threads: The number of threads in the pool.\nmax_threads: The maximum number of threads to grow the pool to. If not set\nwe do not grow the pool.\n\nReturns:\nA threadpool instance.", "source": "codesearchnet"}
{"code": "def _DepthwiseConv2dNumpyBasic(x1, x2, strides):\n    n, h, w, c = x1.shape\n    fh, fw, c2, o = x2.shape\n    assert c == c2\n    _, sh, sw, _ = strides\n    out_rows = (h - fh + sh) \n    out_cols = (w - fw + sw) \n    out = np.zeros([n, out_rows, out_cols, c * o])\n    for i in range(out_rows):\n        for j in range(out_cols):\n            for k in range(c):\n                start_height = i * sh\n                end_height = start_height + fh\n                start_width = j * sw\n                end_width = start_width + fw\n                multiplied_slice = x1[:, start_height:end_height, start_width:end_width, k, np.newaxis] * x2[:, :, k, :]\n                out[:, i, j, k * o:(k + 1) * o] = np.sum(multiplied_slice, axis=(1, 2))\n    return out", "docstring": "Compute depthwise_conv2d using Numpy.\n\nThis allows use to test TensorFlow's depthwise_conv2d by comparing to the\nNumpy version.\n\nArgs:\nx1: The input Numpy array, in NHWC format.\nx2: The filter Numpy array.\nstrides: A Python list of 4 elements representing the strides.\n\nReturns:\nThe depthwise conv2d output as a Numpy array.", "source": "github-repos"}
{"code": "def compute_specificity_at_sensitivity(tp, tn, fp, fn, name):\n    sensitivities = math_ops.divide(tp, tp + fn + kepsilon)\n    min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))\n    indices_at_minval = math_ops.equal(math_ops.abs(sensitivities - sensitivity), min_val)\n    indices_at_minval = math_ops.cast(indices_at_minval, dtypes.int64)\n    indices_at_minval = math_ops.cumsum(indices_at_minval)\n    tf_index = math_ops.argmax(indices_at_minval, 0)\n    tf_index = math_ops.cast(tf_index, dtypes.int32)\n    return math_ops.divide(tn[tf_index], tn[tf_index] + fp[tf_index] + kepsilon, name)", "docstring": "Computes the specificity at the given sensitivity.\n\nArgs:\ntp: True positives.\ntn: True negatives.\nfp: False positives.\nfn: False negatives.\nname: The name of the operation.\n\nReturns:\nThe specificity using the aggregated values.", "source": "github-repos"}
{"code": "def create_resource_group(access_token, subscription_id, rgname, location):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourcegroups/', rgname,\n                        '?api-version=', RESOURCE_API])\n    rg_body = {'location': location}\n    body = json.dumps(rg_body)\n    return do_put(endpoint, body, access_token)", "docstring": "Create a resource group in the specified location.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\nlocation (str): Azure data center location. E.g. westus.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def arccos(x):\n    if any_symbolic_tensors((x,)):\n        return Arccos().symbolic_call(x)\n    return backend.numpy.arccos(x)", "docstring": "Trigonometric inverse cosine, element-wise.\n\nThe inverse of `cos` so that, if `y = cos(x)`, then `x = arccos(y)`.\n\nArgs:\nx: Input tensor.\n\nReturns:\nTensor of the angle of the ray intersecting the unit circle at the given\nx-coordinate in radians `[0, pi]`.\n\nExample:\n>>> x = keras.ops.convert_to_tensor([1, -1])\n>>> keras.ops.arccos(x)\narray([0.0, 3.1415927], dtype=float32)", "source": "github-repos"}
{"code": "def _collapse_state(args: Dict[str, Any]):\n    \n    index = args['index']\n    result = args['result']\n    prob_one = args['prob_one']\n\n    state = _state_shard(args)\n    normalization = np.sqrt(prob_one if result else 1 - prob_one)\n    state *= (_one_projector(args, index) * result +\n              (1 - _one_projector(args, index)) * (1 - result))\n    state /= normalization", "docstring": "Projects state shards onto the appropriate post measurement state.\n\nThis function makes no assumptions about the interpretation of quantum\ntheory.\n\nArgs:\nargs: The args from shard_num_args.", "source": "juraj-google-style"}
{"code": "def Query(self, query, parameters=None):\n    \n    \n    \n    \n    if parameters:\n      self._cursor.execute(query, parameters)\n    else:\n      self._cursor.execute(query)\n\n    return self._cursor.fetchall()", "docstring": "Queries the database file.\n\nArgs:\nquery (str): SQL query.\nparameters (Optional[dict|tuple]): query parameters.\n\nReturns:\nlist[sqlite3.Row]: rows resulting from the query.", "source": "juraj-google-style"}
{"code": "def remove_trunk_group(self, intf, value):\n    string = 'no switchport trunk group {}'.format(value)\n    return self.configure_interface(intf, string)", "docstring": "Removes a specified trunk group to the interface\n\nArgs:\nintf (str): The interface name to remove the trunk group from\nvalue (str): The trunk group value\n\nReturns:\nTrue if the operation as successfully applied otherwise false", "source": "codesearchnet"}
{"code": "def email_users(users, subject, text_body, html_body=None, sender=None, configuration=None, **kwargs):\n    if (not users):\n        raise ValueError('No users supplied')\n    recipients = list()\n    for user in users:\n        recipients.append(user.data['email'])\n    if (configuration is None):\n        configuration = users[0].configuration\n    configuration.emailer().send(recipients, subject, text_body, html_body=html_body, sender=sender, **kwargs)", "docstring": "Email a list of users\n\nArgs:\nusers (List[User]): List of users\nsubject (str): Email subject\ntext_body (str): Plain text email body\nhtml_body (str): HTML email body\nsender (Optional[str]): Email sender. Defaults to SMTP username.\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to configuration of first user in list.\n**kwargs: See below\nmail_options (List): Mail options (see smtplib documentation)\nrcpt_options (List): Recipient options (see smtplib documentation)\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def schedule(self, api_call, bundle_id, bundle_desc, bundling_request, kwargs=None):\n    kwargs = (kwargs or dict())\n    bundle = self._bundle_for(api_call, bundle_id, bundle_desc, bundling_request, kwargs)\n    elts = getattr(bundling_request, bundle_desc.bundled_field)\n    event = bundle.extend(elts)\n    count_threshold = self._options.element_count_threshold\n    if ((count_threshold > 0) and (bundle.element_count >= count_threshold)):\n        self._run_now(bundle.bundle_id)\n    size_threshold = self._options.request_byte_threshold\n    if ((size_threshold > 0) and (bundle.request_bytesize >= size_threshold)):\n        self._run_now(bundle.bundle_id)\n    return event", "docstring": "Schedules bundle_desc of bundling_request as part of bundle_id.\n\nThe returned value an :class:`Event` that\n\n* has a ``result`` attribute that will eventually be set to the result\nthe api call\n* will be used to wait for the response\n* holds the canceller function for canceling this part of the bundle\n\nArgs:\napi_call (callable[[object], object]): the scheduled API call.\nbundle_id (str): identifies the bundle on which the API call should be\nmade.\nbundle_desc (gax.BundleDescriptor): describes the structure of the\nbundled call.\nbundling_request (object): the request instance to use in the API\ncall.\nkwargs (dict): optional, the keyword arguments passed to the API call.\n\nReturns:\nEvent: the scheduled event.", "source": "codesearchnet"}
{"code": "def get_storage(self, id_or_uri):\n    uri = (self.URI + '/{}/storage'.format(extract_id_from_uri(id_or_uri)))\n    return self._client.get(uri)", "docstring": "Get storage details of an OS Volume.\n\nArgs:\nid_or_uri: ID or URI of the OS Volume.\n\nReturns:\ndict: Storage details", "source": "codesearchnet"}
{"code": "def experimental_tpu_test_loop(model, dataset, verbose=0, steps=None, callbacks=None):\n    mode = ModeKeys.TEST\n    current_strategy = model._distribution_strategy\n    iterator = dist_utils.get_iterator(dataset, current_strategy)\n    scope = dist_utils.distributed_scope(strategy=current_strategy, learning_phase=0)\n    scope.__enter__()\n    out_labels = model.metrics_names\n\n    def _test_step_fn(inputs):\n        \n        if isinstance(inputs, (tuple, list)) and len(inputs) == 2:\n            inputs, targets = inputs\n        else:\n            targets = None\n        distribute_lib.get_replica_context().merge_call(_build_model, args=(model, mode, inputs, targets))\n        _, outputs, updates, _ = _per_replica_execution_function(dist_utils.get_distributed_model(model, mode), mode)\n        with ops.control_dependencies([updates]):\n            return [array_ops.identity(out) for out in outputs]\n    test_input_data = iterator.get_next()\n    per_replica_outputs = current_strategy.run(_test_step_fn, args=(test_input_data,))\n    output_tensors = {}\n    for label, output in zip(out_labels, per_replica_outputs):\n        if label == 'loss':\n            reduce_op = ds_reduce_util.ReduceOp.SUM\n        else:\n            reduce_op = ds_reduce_util.ReduceOp.MEAN\n        output_tensors[label] = current_strategy.reduce(reduce_op, output, axis=None)\n    test_op = control_flow_ops.group(list(output_tensors.values()))\n    if verbose >= 1:\n        progbar = Progbar(target=steps)\n    if model._compile_distribution:\n        dist_utils._copy_weights_to_distributed_model(model, mode)\n    dist_utils._reset_metrics(model)\n    callbacks = cbks.configure_callbacks(callbacks, model, do_validation=False, epochs=1, steps_per_epoch=steps, verbose=verbose, count_mode='steps', mode=ModeKeys.TEST)\n    callbacks._call_begin_hook(mode)\n    outs = [0.0] * len(model.metrics_names)\n    if steps is not None:\n        target_steps = steps\n    else:\n        raise ValueError('Number of steps could not be inferred from the data, please pass the steps argument.')\n    current_step = 0\n    while current_step < target_steps:\n        batch_logs = {'batch': current_step, 'size': 1}\n        callbacks._call_batch_hook(mode, 'begin', current_step, batch_logs)\n        try:\n            _, batch_outs = backend.batch_get_value([test_op, output_tensors])\n        except errors.OutOfRangeError:\n            warning_msg = 'Make sure that your dataset can generate at least `steps` batches (in this case, {} batches).'.format(steps)\n            logging.warning('Your dataset iterator ran out of data; interrupting evaluation. ' + warning_msg)\n            target_steps = current_step\n            break\n        for i, label in enumerate(model.metrics_names):\n            if i == 0:\n                outs[i] += batch_outs[label]\n            else:\n                outs[i] = batch_outs[label]\n        batch_logs = cbks.make_logs(model, batch_logs, outs, mode)\n        callbacks._call_batch_hook(mode, 'end', current_step, batch_logs)\n        if verbose == 1:\n            progbar.update(current_step + 1)\n        current_step += 1\n    if verbose >= 1:\n        progbar.update(target_steps)\n    callbacks._call_end_hook(mode)\n    scope.__exit__(None, None, None)\n    if len(outs) >= 0:\n        outs[0] /= target_steps\n    if len(outs) == 1:\n        return outs[0]\n    return outs", "docstring": "Test loop for evaluating with TPU tf.distribute.Strategy.\n\nArgs:\nmodel: Keras Model instance.\ndataset: Dataset for input data.\nverbose: Integer, Verbosity mode 0 or 1.\nsteps: Total number of steps (batches of samples)\nbefore declaring predictions finished.\nIgnored with the default value of `None`.\ncallbacks: List of callbacks to be called during training\n\nReturns:\nScalar loss (if the model has a single output and no metrics)\nor list of scalars (if the model has multiple outputs\nand/or metrics). The attribute `model.metrics_names` will give you\nthe display labels for the outputs.", "source": "github-repos"}
{"code": "def __init__(self, batch_size=8, data_dir=None):\n        \n        self._train_data, self._train_labels = None, None\n        self._test_data, self._test_labels = None, None\n        self._batch_size = batch_size\n        self.img_size = IMAGE_SIZE\n        self.num_channels = NUM_CHANNELS\n        self.num_classes = NUM_CLASSES\n        self.train_len = NUM_TRAIN_SAMPLES\n        self.test_len = NUM_TEST_SAMPLES\n        self.data_dir = data_dir or \"./test_data\"\n        self.cifar10_dir = os.path.join(self.data_dir, 'cifar-10-batches-py')\n        self.cifar10_tarball = os.path.join(self.data_dir, CIFAR10_URL.split('/')[-1])\n        self.maybe_download_and_extract()", "docstring": "CIFAR-10 dataset and TF model constructor.\nArgs:\nbatch_size: dataset batch size.", "source": "juraj-google-style"}
{"code": "def _update_bird_conf_file(self, operation):\n        \n        conf_updated = False\n        prefixes = []\n        ip_version = operation.ip_version\n        config_file = self.bird_configuration[ip_version]['config_file']\n        variable_name = self.bird_configuration[ip_version]['variable_name']\n        changes_counter =\\\n            self.bird_configuration[ip_version]['changes_counter']\n        dummy_ip_prefix =\\\n            self.bird_configuration[ip_version]['dummy_ip_prefix']\n\n        try:\n            prefixes = get_ip_prefixes_from_bird(config_file)\n        except OSError as error:\n            self.log.error(\"failed to open Bird configuration %s, this is a \"\n                           \"FATAL error, thus exiting main program\", error)\n            sys.exit(1)\n\n        if not prefixes:\n            self.log.error(\"found empty bird configuration %s, this is a FATAL\"\n                           \" error, thus exiting main program\", config_file)\n            sys.exit(1)\n\n        if dummy_ip_prefix not in prefixes:\n            self.log.warning(\"dummy IP prefix %s wasn't found in bird \"\n                             \"configuration, adding it. This shouldn't have \"\n                             \"happened!\", dummy_ip_prefix)\n            prefixes.insert(0, dummy_ip_prefix)\n            conf_updated = True\n\n        ip_prefixes_without_check = set(prefixes).difference(\n            self.ip_prefixes[ip_version])\n        if ip_prefixes_without_check:\n            self.log.warning(\"found %s IP prefixes in Bird configuration but \"\n                             \"we aren't configured to run health checks on \"\n                             \"them. Either someone modified the configuration \"\n                             \"manually or something went horrible wrong. We \"\n                             \"remove them from Bird configuration\",\n                             ','.join(ip_prefixes_without_check))\n            \n            \n            \n            prefixes[:] = (ip for ip in prefixes\n                           if ip not in ip_prefixes_without_check)\n            conf_updated = True\n\n        \n        if operation.update(prefixes):\n            conf_updated = True\n\n        if not conf_updated:\n            self.log.info('no updates for bird configuration')\n            return conf_updated\n\n        if self.bird_configuration[ip_version]['keep_changes']:\n            archive_bird_conf(config_file, changes_counter)\n\n        \n        \n        tempname = write_temp_bird_conf(\n            dummy_ip_prefix,\n            config_file,\n            variable_name,\n            prefixes\n        )\n        try:\n            os.rename(tempname, config_file)\n        except OSError as error:\n            self.log.critical(\"failed to create Bird configuration %s, this \"\n                              \"is a FATAL error, thus exiting main program\",\n                              error)\n            sys.exit(1)\n        else:\n            self.log.info(\"Bird configuration for IPv%s is updated\",\n                          ip_version)\n\n        \n        if len(prefixes) == 1:\n            self.log.warning(\"Bird configuration doesn't have IP prefixes for \"\n                             \"any of the services we monitor! It means local \"\n                             \"node doesn't receive any traffic\")\n\n        return conf_updated", "docstring": "Update BIRD configuration.\n\nIt adds to or removes IP prefix from BIRD configuration. It also\nupdates generation time stamp in the configuration file.\n\nMain program will exit if configuration file cant be read/written.\n\nArguments:\noperation (obj): Either an AddOperation or DeleteOperation object\n\nReturns:\nTrue if BIRD configuration was updated otherwise False.", "source": "juraj-google-style"}
{"code": "def Reinit(self, pid, auto_symfile_loading=True):\n    self.ShutDownGdb()\n    self.__init__(pid, auto_symfile_loading, architecture=self.arch)", "docstring": "Reinitializes the object with a new pid.\n\nSince all modes might need access to this object at any time, this object\nneeds to be long-lived. To make this clear in the API, this shorthand is\nsupplied.\nArgs:\npid: the pid of the target process\nauto_symfile_loading: whether the symbol file should automatically be\nloaded by gdb.", "source": "codesearchnet"}
{"code": "def _update_run_calls_state(self, run_call_count, fetches, feed_dict, is_callable_runner=False):\n    self._run_call_count = run_call_count\n    self._feed_dict = feed_dict\n    self._run_description = cli_shared.get_run_short_description(run_call_count, fetches, feed_dict, is_callable_runner=is_callable_runner)\n    self._run_through_times -= 1\n    self._run_info = cli_shared.get_run_start_intro(run_call_count, fetches, feed_dict, self._tensor_filters, is_callable_runner=is_callable_runner)", "docstring": "Update the internal state with regard to run() call history.\n\nArgs:\nrun_call_count: (int) Number of run() calls that have occurred.\nfetches: a node/tensor or a list of node/tensor that are the fetches of\nthe run() call. This is the same as the fetches argument to the run()\ncall.\nfeed_dict: None of a dict. This is the feed_dict argument to the run()\ncall.\nis_callable_runner: (bool) whether a runner returned by\nSession.make_callable is being run.", "source": "github-repos"}
{"code": "def construct_concept_to_indicator_mapping(n: int=1) -> Dict[(str, List[str])]:\n    df = pd.read_sql_table('concept_to_indicator_mapping', con=engine)\n    gb = df.groupby('Concept')\n    _dict = {k: [get_variable_and_source(x) for x in take(n, v['Indicator'].values)] for (k, v) in gb}\n    return _dict", "docstring": "Create a dictionary mapping high-level concepts to low-level indicators\n\nArgs:\nn: Number of indicators to return\n\nReturns:\nDictionary that maps concept names to lists of indicator names.", "source": "codesearchnet"}
{"code": "def add_cidr_rules(self, rules):\n    session = boto3.session.Session(profile_name=self.env, region_name=self.region)\n    client = session.client('ec2')\n    group_id = get_security_group_id(self.app_name, self.env, self.region)\n    for rule in rules:\n        data = {'DryRun': False, 'GroupId': group_id, 'IpPermissions': [{'IpProtocol': rule['protocol'], 'FromPort': rule['start_port'], 'ToPort': rule['end_port'], 'IpRanges': [{'CidrIp': rule['app']}]}]}\n        self.log.debug('Security Group rule: %s', data)\n        try:\n            client.authorize_security_group_ingress(**data)\n        except botocore.exceptions.ClientError as error:\n            if ('InvalidPermission.Duplicate' in str(error)):\n                self.log.debug('Duplicate rule exist, that is OK.')\n            else:\n                msg = 'Unable to add cidr rules to {}'.format(rule.get('app'))\n                self.log.error(msg)\n                raise SpinnakerSecurityGroupError(msg)\n    return True", "docstring": "Add cidr rules to security group via boto.\n\nArgs:\nrules (list): Allowed Security Group ports and protocols.\n\nReturns:\nTrue: Upon successful completion.\n\nRaises:\nSpinnakerSecurityGroupError: boto3 call failed to add CIDR block to\nSecurity Group.", "source": "codesearchnet"}
{"code": "def delete_user(self, user):\n        \n        self.service.delete_user(\n            user, self.url_prefix, self.auth, self.session, self.session_send_opts)", "docstring": "Delete the given user.\n\nArgs:\nuser (string): User name.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def _get_app_path(url):\n    \n    app_path = urlparse(url).path.rstrip(\"/\")\n    if not app_path.startswith(\"/\"):\n        app_path = \"/\" + app_path\n    return app_path", "docstring": "Extract the app path from a Bokeh server URL\n\nArgs:\nurl (str) :\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def _RunInTransaction(self, function, readonly=False):\n    start_query = 'START TRANSACTION;'\n    if readonly:\n        start_query = 'START TRANSACTION WITH CONSISTENT SNAPSHOT, READ ONLY;'\n    for retry_count in range(_MAX_RETRY_COUNT):\n        with contextlib.closing(self.pool.get()) as connection:\n            try:\n                with contextlib.closing(connection.cursor()) as cursor:\n                    cursor.execute(start_query)\n                ret = function(connection)\n                if (not readonly):\n                    connection.commit()\n                return ret\n            except MySQLdb.OperationalError as e:\n                connection.rollback()\n                if ((retry_count >= _MAX_RETRY_COUNT) or (not _IsRetryable(e))):\n                    raise\n        time.sleep((random.uniform(1.0, 2.0) * math.pow(1.5, retry_count)))\n    raise Exception('Looped ended early - last exception swallowed.')", "docstring": "Runs function within a transaction.\n\nAllocates a connection, begins a transaction on it and passes the connection\nto function.\n\nIf function finishes without raising, the transaction is committed.\n\nIf function raises, the transaction will be rolled back, if a retryable\ndatabase error is raised, the operation may be repeated.\n\nArgs:\nfunction: A function to be run, must accept a single MySQLdb.connection\nparameter.\nreadonly: Indicates that only a readonly (snapshot) transaction is\nrequired.\n\nReturns:\nThe value returned by the last call to function.\n\nRaises: Any exception raised by function.", "source": "codesearchnet"}
{"code": "def get_column(self, column_name, column_type, index, verbose=True):\n        \n        return LazyOpResult(\n            grizzly_impl.get_column(\n                self.expr,\n                self.weld_type,\n                index\n            ),\n            column_type,\n            1\n        )", "docstring": "Summary\n\nArgs:\ncolumn_name (TYPE): Description\ncolumn_type (TYPE): Description\nindex (TYPE): Description\n\nReturns:\nTYPE: Description", "source": "juraj-google-style"}
{"code": "def sys_wait_for_event(mask: int, k: Optional[Key], m: Optional[Mouse], flush: bool) -> int:\n    return int(lib.TCOD_sys_wait_for_event(mask, (k.key_p if k else ffi.NULL), (m.mouse_p if m else ffi.NULL), flush))", "docstring": "Wait for an event then return.\n\nIf flush is True then the buffer will be cleared before waiting. Otherwise\neach available event will be returned in the order they're recieved.\n\nArgs:\nmask (int): :any:`Event types` to wait for.\nk (Optional[Key]): A tcod.Key instance which might be updated with\nan event.  Can be None.\nm (Optional[Mouse]): A tcod.Mouse instance which might be updated\nwith an event.  Can be None.\nflush (bool): Clear the event buffer before waiting.\n\n.. deprecated:: 9.3\nUse the :any:`tcod.event.wait` function to wait for events.", "source": "codesearchnet"}
{"code": "def __init__(self, location, optional=False):\n        \n        super(Backtrack, self).__init__(location, optional=optional)\n        self.location = location\n        self.optional = optional\n        self.validate()", "docstring": "Create a new Backtrack block, returning to the given location in the query.\n\nArgs:\nlocation: Location object, specifying where to backtrack to\noptional: optional bool, specifying whether the steps between the current location\nand the location to which Backtrack is returning were optional or not\n\nReturns:\nnew Backtrack object", "source": "juraj-google-style"}
{"code": "def type_based_dispatch_signatures_for(cls):\n\n    def contains_cls(x):\n        \n        if isinstance(x, dict):\n            return any((contains_cls(v) for v in x.values()))\n        elif x is cls:\n            return True\n        elif type_annotations.is_generic_list(x) or type_annotations.is_generic_union(x):\n            type_args = type_annotations.get_generic_type_args(x)\n            return any((contains_cls(arg) for arg in type_args))\n        else:\n            return False\n    result = {}\n    for api, api_signatures in _TYPE_BASED_DISPATCH_SIGNATURES.items():\n        for _, signatures in api_signatures.items():\n            filtered = list(filter(contains_cls, signatures))\n            if filtered:\n                result.setdefault(api, []).extend(filtered)\n    return result", "docstring": "Returns dispatch signatures that have been registered for a given class.\n\nThis function is intended for documentation-generation purposes.\n\nArgs:\ncls: The class to search for.  Type signatures are searched recursively, so\ne.g., if `cls=RaggedTensor`, then information will be returned for all\ndispatch targets that have `RaggedTensor` anywhere in their type\nannotations (including nested in `typing.Union` or `typing.List`.)\n\nReturns:\nA `dict` mapping `api` -> `signatures`, where `api` is a TensorFlow API\nfunction; and `signatures` is a list of dispatch signatures for `api`\nthat include `cls`.  (Each signature is a dict mapping argument names to\ntype annotations; see `dispatch_for_api` for more info.)", "source": "github-repos"}
{"code": "def _make_model(self, data, key=None):\n    if (data['deleted'] and (not self.adapter.want_deleted)):\n        raise ObjectDoesNotExist('Deleted object returned')\n    model = self._model_class(self._current_context, _pass_perm_checks=self._pass_perm_checks)\n    model.setattr('key', (ub_to_str(key) if key else ub_to_str(data.get('key'))))\n    model = model.set_data(data, from_db=True)\n    model._initial_data = model.clean_value()\n    return model", "docstring": "Creates a model instance with the given data.\n\nArgs:\ndata: Model data returned from DB.\nkey: Object key\nReturns:\npyoko.Model object.", "source": "codesearchnet"}
{"code": "def get_variation(self, experiment, user_id, attributes, ignore_user_profile=False):\n    if (not experiment_helper.is_experiment_running(experiment)):\n        self.logger.info(('Experiment \"%s\" is not running.' % experiment.key))\n        return None\n    variation = self.config.get_forced_variation(experiment.key, user_id)\n    if variation:\n        return variation\n    variation = self.get_forced_variation(experiment, user_id)\n    if variation:\n        return variation\n    user_profile = UserProfile(user_id)\n    if ((not ignore_user_profile) and self.user_profile_service):\n        try:\n            retrieved_profile = self.user_profile_service.lookup(user_id)\n        except:\n            self.logger.exception(('Unable to retrieve user profile for user \"%s\" as lookup failed.' % user_id))\n            retrieved_profile = None\n        if validator.is_user_profile_valid(retrieved_profile):\n            user_profile = UserProfile(**retrieved_profile)\n            variation = self.get_stored_variation(experiment, user_profile)\n            if variation:\n                return variation\n        else:\n            self.logger.warning('User profile has invalid format.')\n    if (not audience_helper.is_user_in_experiment(self.config, experiment, attributes, self.logger)):\n        self.logger.info(('User \"%s\" does not meet conditions to be in experiment \"%s\".' % (user_id, experiment.key)))\n        return None\n    bucketing_id = self._get_bucketing_id(user_id, attributes)\n    variation = self.bucketer.bucket(experiment, user_id, bucketing_id)\n    if variation:\n        if ((not ignore_user_profile) and self.user_profile_service):\n            try:\n                user_profile.save_variation_for_experiment(experiment.id, variation.id)\n                self.user_profile_service.save(user_profile.__dict__)\n            except:\n                self.logger.exception(('Unable to save user profile for user \"%s\".' % user_id))\n        return variation\n    return None", "docstring": "Top-level function to help determine variation user should be put in.\n\nFirst, check if experiment is running.\nSecond, check if user is forced in a variation.\nThird, check if there is a stored decision for the user and return the corresponding variation.\nFourth, figure out if user is in the experiment by evaluating audience conditions if any.\nFifth, bucket the user and return the variation.\n\nArgs:\nexperiment: Experiment for which user variation needs to be determined.\nuser_id: ID for user.\nattributes: Dict representing user attributes.\nignore_user_profile: True to ignore the user profile lookup. Defaults to False.\n\nReturns:\nVariation user should see. None if user is not in experiment or experiment is not running.", "source": "codesearchnet"}
{"code": "def __init__(self, rnn_class=LSTM, hidden_dims=[50, 50], bidirectional=True, dropout_rate=0.5, **rnn_kwargs):\n        \n        super(StackedRNN, self).__init__(dropout_rate)\n        self.rnn_class = rnn_class\n        self.hidden_dims = hidden_dims\n        self.bidirectional = bidirectional\n        self.rnn_kwargs = rnn_kwargs", "docstring": "Creates a stacked RNN.\n\nArgs:\nrnn_class: The type of RNN to use. (Default Value = LSTM)\nencoder_dims: The number of hidden units of RNN. (Default Value: 50)\nbidirectional: Whether to use bidirectional encoding. (Default Value = True)\n**rnn_kwargs: Additional args for building the RNN.", "source": "juraj-google-style"}
{"code": "def topics(self, exclude_internal_topics=True):\n    topics = set(self._partitions.keys())\n    if exclude_internal_topics:\n        return (topics - self.internal_topics)\n    else:\n        return topics", "docstring": "Get set of known topics.\n\nArguments:\nexclude_internal_topics (bool): Whether records from internal topics\n(such as offsets) should be exposed to the consumer. If set to\nTrue the only way to receive records from an internal topic is\nsubscribing to it. Default True\n\nReturns:\nset: {topic (str), ...}", "source": "codesearchnet"}
{"code": "def process_extra_vars(extra_vars_list, force_json=True):\n    \n    \n    extra_vars = {}\n    extra_vars_yaml = \"\"\n    for extra_vars_opt in extra_vars_list:\n        \n        if extra_vars_opt.startswith(\"@\"):\n            with open(extra_vars_opt[1:], 'r') as f:\n                extra_vars_opt = f.read()\n            \n            opt_dict = string_to_dict(extra_vars_opt, allow_kv=False)\n        else:\n            \n            opt_dict = string_to_dict(extra_vars_opt, allow_kv=True)\n        \n        if any(line.startswith(\"\n            extra_vars_yaml += extra_vars_opt + \"\\n\"\n        elif extra_vars_opt != \"\":\n            extra_vars_yaml += yaml.dump(\n                opt_dict, default_flow_style=False) + \"\\n\"\n        \n        extra_vars.update(opt_dict)\n\n    \n    if not force_json:\n        try:\n            \n            try_dict = yaml.load(extra_vars_yaml, Loader=yaml.SafeLoader)\n            assert type(try_dict) is dict\n            debug.log('Using unprocessed YAML', header='decision', nl=2)\n            return extra_vars_yaml.rstrip()\n        except Exception:\n            debug.log('Failed YAML parsing, defaulting to JSON',\n                      header='decison', nl=2)\n    if extra_vars == {}:\n        return \"\"\n    return json.dumps(extra_vars, ensure_ascii=False)", "docstring": "Returns a string that is valid JSON or YAML and contains all the\nvariables in every extra_vars_opt inside of extra_vars_list.\n\nArgs:\nparse_kv (bool): whether to allow key=value syntax.\nforce_json (bool): if True, always output json.", "source": "juraj-google-style"}
{"code": "def Query(self):\n        \n        if self.name is not None:\n            \n            return\n\n        sb = ScriptBuilder()\n        sb.EmitAppCallWithOperation(self.ScriptHash, 'name')\n        sb.EmitAppCallWithOperation(self.ScriptHash, 'symbol')\n        sb.EmitAppCallWithOperation(self.ScriptHash, 'decimals')\n\n        engine = None\n        try:\n            engine = ApplicationEngine.Run(sb.ToArray(), exit_on_error=True, gas=Fixed8.FromDecimal(10.0), test_mode=False)\n        except Exception as e:\n            pass\n\n        if engine and len(engine.ResultStack.Items) == 3:\n            results = engine.ResultStack.Items\n\n            try:\n                self.name = results[0].GetString()\n                self.symbol = results[1].GetString()\n                self.decimals = results[2].GetBigInteger()\n                if len(self.name) > 1 and self.name != 'Stack Item' \\\n                        and len(self.symbol) > 1 and self.symbol != 'Stack Item' \\\n                        and self.decimals < 10:\n                    return True\n            except Exception as e:\n                pass\n        return False", "docstring": "Query the smart contract for its token information (name, symbol, decimals).\n\nArgs:\nwallet (neo.Wallets.Wallet): a wallet instance.\n\nReturns:\nNone: if the NEP5Token instance `Name` is already set.\nTrue: if all information was retrieved.\nFalse: if information retrieval failed.", "source": "juraj-google-style"}
{"code": "def send_to_prv_exchange(self, user_id, message=None):\n        \n        exchange = 'prv_%s' % user_id.lower()\n        msg = json.dumps(message, cls=ZEngineJSONEncoder)\n        log.debug(\"Sending following users \\\"%s\\\" exchange:\\n%s \" % (exchange, msg))\n        self.get_channel().publish(exchange=exchange, routing_key='', body=msg)", "docstring": "Send messages through logged in users private exchange.\n\nArgs:\nuser_id string: User key\nmessage dict: Message object", "source": "juraj-google-style"}
{"code": "def __eq__(self, other):\n        \n        return type(self) is type(other) and \\\n            self.p == other.p and \\\n            self.m == other.m and \\\n            np.array_equal(self.reg, other.reg)", "docstring": "Check equivalence between two HyperLogLogs\n\nArgs:\nother (datasketch.HyperLogLog):\n\nReturns:\nbool: True if both have the same internal state.", "source": "juraj-google-style"}
{"code": "def transform_normalize_unicode(source, form, name=None):\n    with ops.name_scope(name, 'TransformNormalizeUnicode', [source]):\n        source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string)\n        if isinstance(source, tf.SparseTensor):\n            result = tf.SparseTensor(indices=source.indices, values=ops_module.transform_normalize_unicode(source.values, form), dense_shape=source.dense_shape)\n        else:\n            result = ops_module.transform_normalize_unicode(source, form)\n        return result", "docstring": "Normalize unicode strings tensor.\n\nArgs:\nsource: `Tensor` or `SparseTensor` of any shape, strings to normalize.\nform: Scalar value, name of normalization algorithm.\nOne of `\"NFD\"`, `\"NFC\"`, `\"NFKD\"`, `\"NFKC\"`.\nname: A name for the operation (optional).\nReturns:\n`Tensor` or `SparseTensor` of same shape and size as input.", "source": "codesearchnet"}
{"code": "def set_setpoint(self, setpointvalue):\n        \n        _checkSetpointValue( setpointvalue, self.setpoint_max )\n        self.write_register( 4097, setpointvalue, 1)", "docstring": "Set the setpoint.\n\nArgs:\nsetpointvalue (float): Setpoint [most often in degrees]", "source": "juraj-google-style"}
{"code": "def _check_interpret_cell(self, cell, prior_cell, row_index, column_index):\n    changed = False\n    if ((not is_empty_cell(cell)) and (not is_text_cell(cell))):\n        self.flag_change(self.flags, 'interpreted', (row_index, column_index), self.worksheet, self.FLAGS['converted-to-string'])\n        cell = str(cell)\n        changed = True\n    elif is_empty_cell(cell):\n        self.flag_change(self.flags, 'interpreted', (row_index, column_index), self.worksheet, self.FLAGS['copied-title'])\n        cell = prior_cell\n        changed = True\n    return (cell, changed)", "docstring": "Helper function which checks cell type and performs cell translation to strings where\nnecessary.\n\nReturns:\nA tuple of the form '(cell, changed)' where 'changed' indicates if 'cell' differs from\ninput.", "source": "codesearchnet"}
{"code": "def typical_or_extreme_period_type(self, value=None):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type str '\n                    'for field `typical_or_extreme_period_type`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `typical_or_extreme_period_type`')\n\n        self._typical_or_extreme_period_type = value", "docstring": "Corresponds to IDD Field `typical_or_extreme_period_type`\n\nArgs:\nvalue (str): value for IDD Field `typical_or_extreme_period_type`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def query_with_attributes(type_to_query, client):\n    session = client.create_session()\n    query = session.query(Attribute.name, Attribute.value, Entity.id).join(Entity).filter((Entity.type == type_to_query))\n    df = client.df_query(query)\n    session.close()\n    df = df.dropna(how='any')\n    df = df.set_index(['id', 'name']).unstack().reset_index()\n    df.columns = (['id'] + list(df.columns.get_level_values(1)[1:]))\n    return df", "docstring": "Query all entities of a specific type, with their attributes\n\nArgs:\ntype_to_query (str): type of entity to query\nclient: DB client to perform query with\n\nReturns:\npandas.DataFrame: table of entities, with attributes as columns", "source": "codesearchnet"}
{"code": "def rank_dated_files(pattern, dir, descending=True):\n    \n    files = glob.glob(op.join(dir, pattern))\n    return sorted(files, reverse=descending)", "docstring": "Search a directory for files that match a pattern. Return an ordered list of these files by filename.\n\nArgs:\npattern: The glob pattern to search for.\ndir: Path to directory where the files will be searched for.\ndescending: Default True, will sort alphabetically by descending order.\n\nReturns:\nlist: Rank-ordered list by filename.", "source": "juraj-google-style"}
{"code": "def update_node(self, node_id, version, node_spec=None):\n    url = self._url('/nodes/{0}/update?version={1}', node_id, str(version))\n    res = self._post_json(url, data=node_spec)\n    self._raise_for_status(res)\n    return True", "docstring": "Update the node's configuration\n\nArgs:\n\nnode_id (string): ID of the node to be updated.\nversion (int): The version number of the node object being\nupdated. This is required to avoid conflicting writes.\nnode_spec (dict): Configuration settings to update. Any values\nnot provided will be removed. Default: ``None``\n\nReturns:\n`True` if the request went through.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.\n\nExample:\n\n>>> node_spec = {'Availability': 'active',\n'Name': 'node-name',\n'Role': 'manager',\n'Labels': {'foo': 'bar'}\n}\n>>> client.update_node(node_id='24ifsmvkjbyhk', version=8,\nnode_spec=node_spec)", "source": "codesearchnet"}
{"code": "def fetch_git_package(self, config):\n    from git import Repo\n    ref = self.determine_git_ref(config)\n    dir_name = self.sanitize_git_path(uri=config['uri'], ref=ref)\n    cached_dir_path = os.path.join(self.package_cache_dir, dir_name)\n    if (not os.path.isdir(cached_dir_path)):\n        logger.debug('Remote repo %s does not appear to have been previously downloaded - starting clone to %s', config['uri'], cached_dir_path)\n        tmp_dir = tempfile.mkdtemp(prefix='stacker')\n        try:\n            tmp_repo_path = os.path.join(tmp_dir, dir_name)\n            with Repo.clone_from(config['uri'], tmp_repo_path) as repo:\n                repo.head.reference = ref\n                repo.head.reset(index=True, working_tree=True)\n            shutil.move(tmp_repo_path, self.package_cache_dir)\n        finally:\n            shutil.rmtree(tmp_dir)\n    else:\n        logger.debug('Remote repo %s appears to have been previously cloned to %s -- bypassing download', config['uri'], cached_dir_path)\n    self.update_paths_and_config(config=config, pkg_dir_name=dir_name)", "docstring": "Make a remote git repository available for local use.\n\nArgs:\nconfig (dict): git config dictionary", "source": "codesearchnet"}
{"code": "def GetEventData(self, data_type):\n    \n    event_data = events.EventData(data_type=data_type)\n    for property_name, property_value in iter(self._properties.items()):\n      if isinstance(property_value, py2to3.BYTES_TYPE):\n        property_value = repr(property_value)\n      setattr(event_data, property_name, property_value)\n\n    return event_data", "docstring": "Retrieves the properties as event data.\n\nArgs:\ndata_type (str): event data type.\n\nReturns:\nEventData: event data.", "source": "juraj-google-style"}
{"code": "def resolve(node, source_info, graphs, include_annotations=True):\n    node = TreeAnnotator(source_info, graphs, include_annotations).visit(node)\n    return node", "docstring": "Resolves the live symbols at the exit of control flow statements.\n\nArgs:\nnode: ast.AST\nsource_info: transformer.SourceInfo\ngraphs: Dict[ast.FunctionDef, cfg.Graph]\ninclude_annotations: Bool, whether type annotations should be included in\nthe analysis.\nReturns:\nast.AST", "source": "github-repos"}
{"code": "def encode_mezzanine_asset(access_token, processor_id, asset_id, output_assetname, json_profile):\n    \n    path = '/Jobs'\n    endpoint = ''.join([ams_rest_endpoint, path])\n    assets_path = ''.join([\"/Assets\", \"('\", asset_id, \"')\"])\n    assets_path_encoded = urllib.parse.quote(assets_path, safe='')\n    endpoint_assets = ''.join([ams_rest_endpoint, assets_path_encoded])\n    body = '{ \\\n    \t\t\"Name\":\"' + output_assetname + '\", \\\n   \t\t\"InputMediaAssets\":[{ \\\n       \t  \t\t\"__metadata\":{ \\\n       \t     \t\t\t\"uri\":\"' + endpoint_assets + '\" \\\n       \t  \t\t} \\\n     \t \t}], \\\n   \t\t\"Tasks\":[{ \\\n       \t  \t\t\"Configuration\":\\'' + json_profile + '\\', \\\n       \t  \t\t\"MediaProcessorId\":\"' + processor_id + '\", \\\n       \t  \t\t\"TaskBody\":\"<?xml version=\\\\\"1.0\\\\\" encoding=\\\\\"utf-16\\\\\"?><taskBody><inputAsset>JobInputAsset(0)</inputAsset><outputAsset assetCreationOptions=\\\\\"0\\\\\" assetName=\\\\\"' + output_assetname + '\\\\\">JobOutputAsset(0)</outputAsset></taskBody>\" \\\n      \t\t}] \\\n\t}'\n    return do_ams_post(endpoint, path, body, access_token)", "docstring": "Get Media Service Encode Mezanine Asset.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nprocessor_id (str): A Media Service Processor ID.\nasset_id (str): A Media Service Asset ID.\noutput_assetname (str): A Media Service Asset Name.\njson_profile (str): A Media Service JSON Profile.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def is_cpu_target_available(target):\n    return _test_util.IsCPUTargetAvailable(target)", "docstring": "Indicates whether TensorFlow was built with support for a given CPU target.\n\nArgs:\ntarget: The name of the CPU target whose support to check for.\n\nReturns:\nA boolean indicating whether TensorFlow was built with support for the\ngiven CPU target.\n\nThis method should only be used in tests written with `tf.test.TestCase`. A\ntypical usage is to skip tests that should only run with a given target.\n\n>>> class MyTest(tf.test.TestCase):\n...\n...   def test_add_on_aarch64(self):\n...     if not tf.test.is_cpu_target_available('aarch64'):\n...       self.skipTest(\"test is only applicable on AArch64\")\n\n...     @tf.function(jit_compile=True)\n...     def add(x, y):\n...       return tf.math.add(x, y)\n...\n...     self.assertEqual(add(tf.ones(()), tf.ones(())), 2.0)", "source": "github-repos"}
{"code": "def from_file(cls, fp, format_=None, fps=None, **kwargs):\n        \n        if format_ is None:\n            \n            \n            \n            text = fp.read()\n            fragment = text[:10000]\n            format_ = autodetect_format(fragment)\n            fp = io.StringIO(text)\n\n        impl = get_format_class(format_)\n        subs = cls() \n        subs.format = format_\n        subs.fps = fps\n        impl.from_file(subs, fp, format_, fps=fps, **kwargs)\n        return subs", "docstring": "Read subtitle file from file object.\n\nSee :meth:`SSAFile.load()` for full description.\n\nNote:\nThis is a low-level method. Usually, one of :meth:`SSAFile.load()`\nor :meth:`SSAFile.from_string()` is preferable.\n\nArguments:\nfp (file object): A file object, ie. :class:`io.TextIOBase` instance.\nNote that the file must be opened in text mode (as opposed to binary).\n\nReturns:\nSSAFile", "source": "juraj-google-style"}
{"code": "def reverse_ad(node, wrt, preserve_result, check_dims):\n    if (not isinstance(node, gast.FunctionDef)):\n        raise TypeError\n    cfg.forward(node, cfg.Active(wrt))\n    ad = ReverseAD(wrt, preserve_result, check_dims)\n    (pri, adj) = ad.visit(node)\n    mod = gast.Module(body=[pri, adj])\n    mod = annotate.find_stacks(mod)\n    return (mod, ad.required, ad.stack)", "docstring": "Perform reverse-mode AD on an AST.\n\nThis function analyses the AST to determine which variables are active and\nproceeds by taking the naive derivative. Before returning the primal and\nadjoint it annotates push and pop statements as such.\n\nArgs:\nnode: A `FunctionDef` AST node.\nwrt: A tuple of argument indices with respect to which we take the\nderivative.\npreserve_result: A boolean indicating whether the generated\nderivative function should also return the original return value.\ncheck_dims: A boolean indicating whether the seed derivatives should have\ntheir dimensions checked to match their primal counterpart.\n\n\nReturns:\nmod: A `Module` node containing the naive primal and adjoint of the\nfunction which can be fed to the `split` and `joint` functions.\nrequired: A list of tuples of functions and argument indices. These\nfunctions were called by the function but did not have an adjoint.", "source": "codesearchnet"}
{"code": "def get(self, key, default) -> Union[(Uniform, UniformBlock, Subroutine, Attribute, Varying)]:\n    return self._members.get(key, default)", "docstring": "Returns a Uniform, UniformBlock, Subroutine, Attribute or Varying.\n\nArgs:\ndefault: This is the value to be returned in case key does not exist.\n\nReturns:\n:py:class:`Uniform`, :py:class:`UniformBlock`, :py:class:`Subroutine`,\n:py:class:`Attribute` or :py:class:`Varying`", "source": "codesearchnet"}
{"code": "def trans_v(self, structure):\n        \n        nsites = structure.num_sites\n        volume = structure.volume\n        natoms = structure.composition.num_atoms\n        weight = float(structure.composition.weight)\n        mass_density = 1.6605e3 * nsites * weight / (natoms * volume)\n        if self.g_vrh < 0:\n            raise ValueError(\"k_vrh or g_vrh is negative, \"\n                             \"sound velocity is undefined\")\n        return (1e9 * self.g_vrh / mass_density) ** 0.5", "docstring": "Calculates transverse sound velocity (in SI units) using the\nVoigt-Reuss-Hill average bulk modulus\n\nArgs:\nstructure: pymatgen structure object\n\nReturns: transverse sound velocity (in SI units)", "source": "juraj-google-style"}
{"code": "def assert_key_has_value(self, key, caller):\n    assert key, 'key parameter must be specified.'\n    self.assert_key_exists(key, caller)\n    if (self[key] is None):\n        raise KeyInContextHasNoValueError(f\"context['{key}'] must have a value for {caller}.\")", "docstring": "Assert that context contains key which also has a value.\n\nArgs:\nkey: validate this key exists in context AND has a value that isn't\nNone.\ncaller: string. calling function name - this used to construct\nerror messages\n\nRaises:\nKeyNotInContextError: Key doesn't exist\nKeyInContextHasNoValueError: context[key] is None\nAssertionError: if key is None", "source": "codesearchnet"}
{"code": "def remove_object(self, file_path):\n        \n        file_path = self.absnormpath(self._original_path(file_path))\n        if self._is_root_path(file_path):\n            self.raise_os_error(errno.EBUSY, file_path)\n        try:\n            dirname, basename = self.splitpath(file_path)\n            target_directory = self.resolve(dirname)\n            target_directory.remove_entry(basename)\n        except KeyError:\n            self.raise_io_error(errno.ENOENT, file_path)\n        except AttributeError:\n            self.raise_io_error(errno.ENOTDIR, file_path)", "docstring": "Remove an existing file or directory.\n\nArgs:\nfile_path: The path to the file relative to self.\n\nRaises:\nIOError: if file_path does not correspond to an existing file, or\nif part of the path refers to something other than a directory.\nOSError: if the directory is in use (eg, if it is '/').", "source": "juraj-google-style"}
{"code": "def WriteScanContext(self, scan_context, scan_step=None):\n    \n    if scan_step is not None:\n      print('Scan step: {0:d}'.format(scan_step))\n\n    print('Source type\\t\\t: {0:s}'.format(scan_context.source_type))\n    print('')\n\n    scan_node = scan_context.GetRootScanNode()\n    self.WriteScanNode(scan_context, scan_node)\n    print('')", "docstring": "Writes the source scanner context to stdout.\n\nArgs:\nscan_context (SourceScannerContext): the source scanner context.\nscan_step (Optional[int]): the scan step, where None represents no step.", "source": "juraj-google-style"}
{"code": "def id_pools_ipv4_ranges(self):\n    if (not self.__id_pools_ipv4_ranges):\n        self.__id_pools_ipv4_ranges = IdPoolsIpv4Ranges(self.__connection)\n    return self.__id_pools_ipv4_ranges", "docstring": "Gets the IdPoolsIpv4Ranges API client.\n\nReturns:\nIdPoolsIpv4Ranges:", "source": "codesearchnet"}
{"code": "def matches_hostname(cls, certificate: cryptography.x509.Certificate, hostname: str) -> None:\n    certificate_names = {'subject': (tuple([('commonName', name) for name in cls.get_common_names(certificate.subject)]),), 'subjectAltName': tuple([('DNS', name) for name in cls.get_dns_subject_alternative_names(certificate)])}\n    ssl.match_hostname(certificate_names, hostname)", "docstring": "Verify that the certificate was issued for the given hostname.\n\nRaises:\nCertificateError: If the certificate was not issued for the supplied hostname.", "source": "codesearchnet"}
{"code": "def set_direct(self, address_value_dict):\n    with self._lock:\n        for (address, value) in address_value_dict.items():\n            self._validate_write(address)\n            if (address in self._state):\n                self._state[address].set_result(result=value)\n            else:\n                fut = _ContextFuture(address=address)\n                self._state[address] = fut\n                fut.set_result(result=value)", "docstring": "Called in the context manager's set method to either overwrite the\nvalue for an address, or create a new future and immediately set a\nvalue in the future.\n\nArgs:\naddress_value_dict (dict of str:bytes): The unique full addresses\nwith bytes to set at that address.\n\nRaises:\nAuthorizationException", "source": "codesearchnet"}
{"code": "def train(self, X_train, Y_train, X_test, Y_test):\n        \n\n        while True:\n            print(1)\n            time.sleep(1)\n            if random.randint(0, 9) >= 5:\n                break", "docstring": "Train and validate the LR on a train and test dataset\n\nArgs:\nX_train (np.array): Training data\nY_train (np.array): Training labels\nX_test (np.array): Test data\nY_test (np.array): Test labels", "source": "juraj-google-style"}
{"code": "def _reference_info(references):\n    document_paths = []\n    reference_map = {}\n    for reference in references:\n        doc_path = reference._document_path\n        document_paths.append(doc_path)\n        reference_map[doc_path] = reference\n    return (document_paths, reference_map)", "docstring": "Get information about document references.\n\nHelper for :meth:`~.firestore_v1beta1.client.Client.get_all`.\n\nArgs:\nreferences (List[.DocumentReference, ...]): Iterable of document\nreferences.\n\nReturns:\nTuple[List[str, ...], Dict[str, .DocumentReference]]: A two-tuple of\n\n* fully-qualified documents paths for each reference in ``references``\n* a mapping from the paths to the original reference. (If multiple\n``references`` contains multiple references to the same document,\nthat key will be overwritten in the result.)", "source": "codesearchnet"}
{"code": "def attach(self, observer):\n        \n        if not observer in self._observers:\n            self._observers.append(observer)\n        return self", "docstring": "Attach an observer.\n\nArgs:\nobserver (func): A function to be called when new messages arrive\n\nReturns:\n:class:`Stream`. Current instance to allow chaining", "source": "juraj-google-style"}
{"code": "def dew_point_temperature(self, value=99.9):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `dew_point_temperature`'.format(value))\n        if (value <= (- 70.0)):\n            raise ValueError('value need to be greater -70.0 for field `dew_point_temperature`')\n        if (value >= 70.0):\n            raise ValueError('value need to be smaller 70.0 for field `dew_point_temperature`')\n    self._dew_point_temperature = value", "docstring": "Corresponds to IDD Field `dew_point_temperature`\n\nArgs:\nvalue (float): value for IDD Field `dew_point_temperature`\nUnit: C\nvalue > -70.0\nvalue < 70.0\nMissing value: 99.9\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def get_changeset(changeset):\n    \n    url = 'https:\n        changeset\n        )\n    return ET.fromstring(requests.get(url).content)", "docstring": "Get the changeset using the OSM API and return the content as a XML\nElementTree.\n\nArgs:\nchangeset: the id of the changeset.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n                 title: Text,\n                 value: Optional[Any] = None,\n                 disabled: Optional[Text] = None,\n                 checked: bool = False,\n                 shortcut_key: Optional[Text] = None) -> None:\n        \n\n        self.disabled = disabled\n        self.value = value if value is not None else title\n        self.title = title\n        self.checked = checked\n\n        if shortcut_key is not None:\n            self.shortcut_key = str(shortcut_key)\n        else:\n            self.shortcut_key = None", "docstring": "Create a new choice.\n\nArgs:\ntitle: Text shown in the selection list.\n\nvalue: Value returned, when the choice is selected.\n\ndisabled: If set, the choice can not be selected by the user. The\nprovided text is used to explain, why the selection is\ndisabled.\n\nchecked: Preselect this choice when displaying the options.\n\nshortcut_key: Key shortcut used to select this item.", "source": "juraj-google-style"}
{"code": "def vgg_layer(inputs,\n              nout,\n              kernel_size=3,\n              activation=tf.nn.leaky_relu,\n              padding=\"SAME\",\n              is_training=True,\n              has_batchnorm=False,\n              scope=None):\n  \n  with tf.variable_scope(scope):\n    net = tfl.conv2d(inputs, nout, kernel_size=kernel_size, padding=padding,\n                     activation=None, name=\"conv\")\n    if has_batchnorm:\n      net = tfl.batch_normalization(net, training=is_training, name=\"bn\")\n    net = activation(net)\n  return net", "docstring": "A layer of VGG network with batch norm.\n\nArgs:\ninputs: image tensor\nnout: number of output channels\nkernel_size: size of the kernel\nactivation: activation function\npadding: padding of the image\nis_training: whether it is training mode or not\nhas_batchnorm: whether batchnorm is applied or not\nscope: variable scope of the op\nReturns:\nnet: output of layer", "source": "juraj-google-style"}
{"code": "def _Aff4Read(aff4_obj, offset, length):\n  \n  length = length or (_Aff4Size(aff4_obj) - offset)\n\n  aff4_obj.Seek(offset)\n  return aff4_obj.Read(length)", "docstring": "Reads contents of given AFF4 file.\n\nArgs:\naff4_obj: An AFF4 stream instance to retrieve contents for.\noffset: An offset to start the reading from.\nlength: A number of bytes to read. Reads the whole file if 0.\n\nReturns:\nContents of specified AFF4 stream.\n\nRaises:\nTypeError: If `aff4_obj` is not an instance of AFF4 stream.", "source": "juraj-google-style"}
{"code": "def _force_edges_active_move(self, state: _STATE) -> _STATE:\n        \n        for _ in range(self._rand.randint(1, 4)):\n            state = self._force_edge_active_move(state)\n        return state", "docstring": "Move function which repeats _force_edge_active_move a few times.\n\nArgs:\nstate: Search state, not mutated.\n\nReturns:\nNew search state which consists of incremental changes of the\noriginal state.", "source": "juraj-google-style"}
{"code": "def parse_query(query_str):\n    \n    def _generate_match_all_fields_query():\n        \n        stripped_query_str = ' '.join(query_str.replace(':', ' ').split())\n        return {'multi_match': {'query': stripped_query_str, 'fields': ['_all'], 'zero_terms_query': 'all'}}\n\n    if not isinstance(query_str, six.text_type):\n        query_str = six.text_type(query_str.decode('utf-8'))\n\n    logger.info('Parsing: \"' + query_str + '\\\".')\n\n    parser = StatefulParser()\n    rst_visitor = RestructuringVisitor()\n    es_visitor = ElasticSearchVisitor()\n\n    try:\n        unrecognized_text, parse_tree = parser.parse(query_str, Query)\n\n        if unrecognized_text:  \n            msg = 'Parser returned unrecognized text: \"' + unrecognized_text + \\\n                  '\" for query: \"' + query_str + '\".'\n\n            if query_str == unrecognized_text and parse_tree is None:\n                \n                logger.warn(msg)\n                return _generate_match_all_fields_query()\n            else:\n                msg += 'Continuing with recognized parse tree.'\n            logger.warn(msg)\n\n    except SyntaxError as e:\n        logger.warn('Parser syntax error (' + six.text_type(e) + ') with query: \"' + query_str +\n                    '\". Continuing with a match_all with the given query.')\n        return _generate_match_all_fields_query()\n\n    \n    try:\n        restructured_parse_tree = parse_tree.accept(rst_visitor)\n        logger.debug('Parse tree: \\n' + emit_tree_format(restructured_parse_tree))\n\n    except Exception as e:\n        logger.exception(\n            RestructuringVisitor.__name__ + \" crashed\" + (\": \" + six.text_type(e) + \".\") if six.text_type(e) else '.'\n        )\n        return _generate_match_all_fields_query()\n\n    try:\n        es_query = restructured_parse_tree.accept(es_visitor)\n    except Exception as e:\n        logger.exception(\n            ElasticSearchVisitor.__name__ + \" crashed\" + (\": \" + six.text_type(e) + \".\") if six.text_type(e) else '.'\n        )\n        return _generate_match_all_fields_query()\n\n    if not es_query:\n        \n        return _generate_match_all_fields_query()\n\n    return es_query", "docstring": "Drives the whole logic, by parsing, restructuring and finally, generating an ElasticSearch query.\n\nArgs:\nquery_str (six.text_types): the given query to be translated to an ElasticSearch query\n\nReturns:\nsix.text_types: Return an ElasticSearch query.\n\nNotes:\nIn case there's an error, an ElasticSearch `multi_match` query is generated with its `query` value, being the\nquery_str argument.", "source": "juraj-google-style"}
{"code": "def default(self):\n    cmd = self.command_builder('ntp source', default=True)\n    return self.configure(cmd)", "docstring": "Default the NTP source entry from the node.\n\nReturns:\nTrue if the operation succeeds, otherwise False.", "source": "codesearchnet"}
{"code": "def charges(self, num, charge_id=None, **kwargs):\n        \n        baseuri = self._BASE_URI + \"company/{}/charges\".format(num)\n        if charge_id is not None:\n            baseuri += \"/{}\".format(charge_id)\n            res = self.session.get(baseuri, params=kwargs)\n        else:\n            res = self.session.get(baseuri, params=kwargs)\n        self.handle_http_error(res)\n        return res", "docstring": "Search for charges against a company by company number.\n\nArgs:\nnum (str): Company number to search on.\ntransaction (Optional[str]): Filing record number.\nkwargs (dict): additional keywords passed into\nrequests.session.get params keyword.", "source": "juraj-google-style"}
{"code": "def download_archive_artifact_bundle(self, id_or_uri, file_path):\n        \n\n        uri = self.BACKUP_ARCHIVE_PATH + '/' + extract_id_from_uri(id_or_uri)\n        return self._client.download(uri, file_path)", "docstring": "Downloads an archive for the Artifact Bundle.\n\nArgs:\nid_or_uri: ID or URI of the Artifact Bundle.\nfile_path(str): Destination file path.\n\nReturns:\nbool: Successfully downloaded.", "source": "juraj-google-style"}
{"code": "def stop_gradient(variables):\n    if isinstance(variables, (list, tuple)):\n        return map(array_ops.stop_gradient, variables)\n    return array_ops.stop_gradient(variables)", "docstring": "Returns `variables` but with zero gradient w.r.t. every other variable.\n\nArgs:\nvariables: Tensor or list of tensors to consider constant with respect\nto any other variable.\n\n\nReturns:\nA single tensor or a list of tensors (depending on the passed argument)\nthat has no gradient with respect to any other variable.", "source": "github-repos"}
{"code": "def get(self, key):\n    key = self._service_key(key)\n    return self._service_ops['get'](key)", "docstring": "Return the object in `service` named by `key` or None.\n\nArgs:\nkey: Key naming the object to retrieve.\n\nReturns:\nobject or None", "source": "codesearchnet"}
{"code": "def Print(self, x, data, message, **kwargs):\n    tf.logging.info('PlacementMeshImpl::Print')\n    new_slices = x.tensor_list[:]\n    with tf.device(self._devices[0]):\n        new_slices[0] = tf.Print(new_slices[0], [t for d in data for t in d.tensor_list], message, **kwargs)\n    return self.LaidOutTensor(new_slices)", "docstring": "call tf.Print.\n\nArgs:\nx: a LaidOutTensor\ndata: a list of LaidOutTensor\nmessage: a string\n**kwargs: keyword arguments to tf.print\nReturns:\na LaidOutTensor", "source": "codesearchnet"}
{"code": "def obs_space_info(obs_space):\n    if isinstance(obs_space, gym.spaces.Dict):\n        assert isinstance(obs_space.spaces, OrderedDict)\n        subspaces = obs_space.spaces\n    else:\n        subspaces = {None: obs_space}\n    keys = []\n    shapes = {}\n    dtypes = {}\n    for (key, box) in subspaces.items():\n        keys.append(key)\n        shapes[key] = box.shape\n        dtypes[key] = box.dtype\n    return (keys, shapes, dtypes)", "docstring": "Get dict-structured information about a gym.Space.\n\nReturns:\nA tuple (keys, shapes, dtypes):\nkeys: a list of dict keys.\nshapes: a dict mapping keys to shapes.\ndtypes: a dict mapping keys to dtypes.", "source": "codesearchnet"}
{"code": "def is_layer_block(node):\n    match = LAYER_SUFFIX_RE.match(node.get('module_path', ''))\n    if not match or not node.get('children'):\n        return False\n    number = match.group(2)\n    return any((f'.{number}.' in child.get('module_path', '') for child in node['children']))", "docstring": "Checks whether a node represents a layer block with submodules.\n\nArgs:\nnode (`dict`): A node from the call tree.\n\nReturns:\n`bool`: Whether the node is a layer block.", "source": "github-repos"}
{"code": "def traverse_postorder(self, leaves=True, internal=True):\n        \n        for node in self.root.traverse_postorder(leaves=leaves, internal=internal):\n            yield node", "docstring": "Perform a postorder traversal of the ``Node`` objects in this ``Tree``\n\nArgs:\n``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``", "source": "juraj-google-style"}
{"code": "def update_offset(self, new_offset):\n    self.offset = new_offset\n    self.data_points = self._data_points[self.offset:]\n    self.timestamps = self._timestamps[self.offset:]", "docstring": "Updates how many data points to skip in caculations.\n\nAlways use this function to update offset instead of directly setting\nself.offset.\n\nArgs:\nnew_offset: The new offset.", "source": "codesearchnet"}
{"code": "def join(*paths):\n    absolute = False\n    relpaths = []\n    for p in paths:\n        if p:\n            if (p[0] == '/'):\n                del relpaths[:]\n                absolute = True\n            relpaths.append(p)\n    path = normpath('/'.join(relpaths))\n    if absolute:\n        path = abspath(path)\n    return path", "docstring": "Join any number of paths together.\n\nArguments:\n*paths (str): Paths to join, given as positional arguments.\n\nReturns:\nstr: The joined path.\n\nExample:\n>>> join('foo', 'bar', 'baz')\n'foo/bar/baz'\n>>> join('foo/bar', '../baz')\n'foo/baz'\n>>> join('foo/bar', '/baz')\n'/baz'", "source": "codesearchnet"}
{"code": "def _PushParameterListState(self, newline):\n    current = self.next_token\n    previous = current.previous_token\n    if _IsFunctionDefinition(previous):\n        first_param_column = previous.total_length + self.stack[-2].indent\n        self.param_list_stack.append(object_state.ParameterListState(previous, newline, first_param_column))", "docstring": "Push a new parameter list state for a function definition.\n\nArgs:\nnewline: Whether the current token is to be added on a newline.", "source": "github-repos"}
{"code": "def save_image(tensor, filename, nrow=8, padding=2, pad_value=0):\n    \n    from PIL import Image\n    grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value)\n    im = Image.fromarray(pre_pillow_float_img_process(grid))\n    im.save(filename)", "docstring": "Save a given Tensor into an image file.\n\nArgs:\ntensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\nsaves the tensor as a grid of images by calling ``make_grid``.\n**kwargs: Other arguments are documented in ``make_grid``.", "source": "juraj-google-style"}
{"code": "def process_remote_sources(raw_config, environment=None):\n    config = yaml.safe_load(raw_config)\n    if (config and config.get('package_sources')):\n        processor = SourceProcessor(sources=config['package_sources'], stacker_cache_dir=config.get('stacker_cache_dir'))\n        processor.get_package_sources()\n        if processor.configs_to_merge:\n            for i in processor.configs_to_merge:\n                logger.debug('Merging in remote config \"%s\"', i)\n                remote_config = yaml.safe_load(open(i))\n                config = merge_map(remote_config, config)\n            if (not environment):\n                environment = {}\n            return render(str(config), environment)\n    return raw_config", "docstring": "Stage remote package sources and merge in remote configs.\n\nArgs:\nraw_config (str): the raw stacker configuration string.\nenvironment (dict, optional): any environment values that should be\npassed to the config\n\nReturns:\nstr: the raw stacker configuration string", "source": "codesearchnet"}
{"code": "def commits(self, **kwargs):\n    path = ('%s/%s/commits' % (self.manager.path, self.get_id()))\n    data_list = self.manager.gitlab.http_list(path, as_list=False, **kwargs)\n    manager = ProjectCommitManager(self.manager.gitlab, parent=self.manager._parent)\n    return RESTObjectList(manager, ProjectCommit, data_list)", "docstring": "List the merge request commits.\n\nArgs:\nall (bool): If True, return all the items, without pagination\nper_page (int): Number of items to retrieve per request\npage (int): ID of the page to return (starts with page 1)\nas_list (bool): If set to False and no pagination option is\ndefined, return a generator instead of a list\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabListError: If the list could not be retrieved\n\nReturns:\nRESTObjectList: The list of commits", "source": "codesearchnet"}
{"code": "def savefits(cube, fitsname, **kwargs):\n    \n    \n    dropdeg = kwargs.pop('dropdeg', False)\n    ndim    = len(cube.dims)\n\n    \n    FITSINFO = get_data('decode', 'data/fitsinfo.yaml')\n    hdrdata = yaml.load(FITSINFO, dc.utils.OrderedLoader)\n\n    \n    if ndim == 2:\n        header = fits.Header(hdrdata['dcube_2d'])\n        data   = cube.values.T\n    elif ndim == 3:\n        if dropdeg:\n            header = fits.Header(hdrdata['dcube_2d'])\n            data   = cube.values[:, :, 0].T\n        else:\n            header = fits.Header(hdrdata['dcube_3d'])\n\n            kidfq     = cube.kidfq.values\n            freqrange = ~np.isnan(kidfq)\n            orderedfq = np.argsort(kidfq[freqrange])\n            newcube   = cube[:, :, orderedfq]\n            data      = newcube.values.T\n    else:\n        raise TypeError(ndim)\n\n    \n    if cube.coordsys == 'AZEL':\n        header.update({'CTYPE1': 'dAZ', 'CTYPE2': 'dEL'})\n    elif cube.coordsys == 'RADEC':\n        header.update({'OBSRA': float(cube.xref), 'OBSDEC': float(cube.yref)})\n    else:\n        pass\n    header.update({'CRVAL1': float(cube.x[0]),\n                   'CDELT1': float(cube.x[1] - cube.x[0]),\n                   'CRVAL2': float(cube.y[0]),\n                   'CDELT2': float(cube.y[1] - cube.y[0]),\n                   'DATE': datetime.now(timezone('UTC')).isoformat()})\n    if (ndim == 3) and (not dropdeg):\n        header.update({'CRVAL3': float(newcube.kidfq[0]),\n                       'CDELT3': float(newcube.kidfq[1] - newcube.kidfq[0])})\n\n    fitsname = str(Path(fitsname).expanduser())\n    fits.writeto(fitsname, data, header, **kwargs)\n    logger.info('{} has been created.'.format(fitsname))", "docstring": "Save a cube to a 3D-cube FITS file.\n\nArgs:\ncube (xarray.DataArray): Cube to be saved.\nfitsname (str): Name of output FITS file.\nkwargs (optional): Other arguments common with astropy.io.fits.writeto().", "source": "juraj-google-style"}
{"code": "def scheme(name, bins, bin_method='quantiles'):\n    return {'name': name, 'bins': bins, 'bin_method': (bin_method if isinstance(bins, int) else '')}", "docstring": "Return a custom scheme based on CARTOColors.\n\nArgs:\nname (str): Name of a CARTOColor.\nbins (int or iterable): If an `int`, the number of bins for classifying\ndata. CARTOColors have 7 bins max for quantitative data, and 11 max\nfor qualitative data. If `bins` is a `list`, it is the upper range\nfor classifying data. E.g., `bins` can be of the form ``(10, 20, 30,\n40, 50)``.\nbin_method (str, optional): One of methods in :obj:`BinMethod`.\nDefaults to ``quantiles``. If `bins` is an interable, then that is\nthe bin method that will be used and this will be ignored.\n\n.. Warning::\n\nInput types are particularly sensitive in this function, and little\nfeedback is given for errors. ``name`` and ``bin_method`` arguments\nare case-sensitive.", "source": "codesearchnet"}
{"code": "def dumps(graphs, triples=False, cls=PENMANCodec, **kwargs):\n    \n    codec = cls(**kwargs)\n    strings = [codec.encode(g, triples=triples) for g in graphs]\n    return '\\n\\n'.join(strings)", "docstring": "Serialize each graph in *graphs* to the PENMAN format.\n\nArgs:\ngraphs: an iterable of Graph objects\ntriples: if True, write graphs as triples instead of as PENMAN\nReturns:\nthe string of serialized graphs", "source": "juraj-google-style"}
{"code": "def get(object_ids):\n    if isinstance(object_ids, (tuple, np.ndarray)):\n        return ray.get(list(object_ids))\n    elif isinstance(object_ids, dict):\n        keys_to_get = [k for (k, v) in object_ids.items() if isinstance(v, ray.ObjectID)]\n        ids_to_get = [v for (k, v) in object_ids.items() if isinstance(v, ray.ObjectID)]\n        values = ray.get(ids_to_get)\n        result = object_ids.copy()\n        for (key, value) in zip(keys_to_get, values):\n            result[key] = value\n        return result\n    else:\n        return ray.get(object_ids)", "docstring": "Get a single or a collection of remote objects from the object store.\n\nThis method is identical to `ray.get` except it adds support for tuples,\nndarrays and dictionaries.\n\nArgs:\nobject_ids: Object ID of the object to get, a list, tuple, ndarray of\nobject IDs to get or a dict of {key: object ID}.\n\nReturns:\nA Python object, a list of Python objects or a dict of {key: object}.", "source": "codesearchnet"}
{"code": "def change_extension(self, filepath, new_extension):\n        \n        filename, ext = os.path.splitext(filepath)\n        return '.'.join([filename, new_extension])", "docstring": "Change final filename extension.\n\nArgs:\nfilepath (str): A file path (relative or absolute).\nnew_extension (str): New extension name (without leading dot) to\napply.\n\nReturns:\nstr: Filepath with new extension.", "source": "juraj-google-style"}
{"code": "def create_branch(profile, name, branch_off):\n    \n    branch_off_sha = get_branch_sha(profile, branch_off)\n    ref = \"heads/\" + name\n    data = refs.create_ref(profile, ref, branch_off_sha)\n    return data", "docstring": "Create a branch.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nname\nThe name of the new branch.\n\nbranch_off\nThe name of a branch to create the new branch off of.\n\nReturns:\nA dict with data about the new branch.", "source": "juraj-google-style"}
{"code": "def include(filename, hosts=False, when=True):\n    if (not pyinfra.is_cli):\n        raise PyinfraError('local.include is only available in CLI mode.')\n    if (not when):\n        return\n    if (hosts is not False):\n        hosts = ensure_host_list(hosts, inventory=pseudo_state.inventory)\n        if (pseudo_host not in hosts):\n            return\n    if pseudo_state.deploy_dir:\n        filename = path.join(pseudo_state.deploy_dir, filename)\n    frameinfo = get_caller_frameinfo()\n    logger.debug('Including local file: {0}'.format(filename))\n    try:\n        from pyinfra_cli.config import extract_file_config\n        from pyinfra_cli.util import exec_file\n        config_data = extract_file_config(filename)\n        kwargs = {key.lower(): value for (key, value) in six.iteritems(config_data) if (key in ['SUDO', 'SUDO_USER', 'SU_USER', 'PRESERVE_SUDO_ENV', 'IGNORE_ERRORS'])}\n        with pseudo_state.deploy(filename, kwargs, None, frameinfo.lineno, in_deploy=False):\n            exec_file(filename)\n    except IOError as e:\n        raise PyinfraError('Could not include local file: {0}\\n{1}'.format(filename, e))", "docstring": "Executes a local python file within the ``pyinfra.pseudo_state.deploy_dir``\ndirectory.\n\nArgs:\nhosts (string, list): group name or list of hosts to limit this include to\nwhen (bool): indicate whether to trigger operations in this include", "source": "codesearchnet"}
{"code": "def _CreateShapesFolder(self, schedule, doc):\n    if (not schedule.GetShapeList()):\n        return None\n    shapes_folder = self._CreateFolder(doc, 'Shapes')\n    shapes = list(schedule.GetShapeList())\n    shapes.sort(key=(lambda x: x.shape_id))\n    for shape in shapes:\n        placemark = self._CreatePlacemark(shapes_folder, shape.shape_id)\n        self._CreateLineStringForShape(placemark, shape)\n        if self.shape_points:\n            self._CreateShapePointFolder(shapes_folder, shape)\n    return shapes_folder", "docstring": "Create a KML Folder containing all the shapes in a schedule.\n\nThe folder contains a placemark for each shape. If there are no shapes in\nthe schedule then the folder is not created and None is returned.\n\nArgs:\nschedule: The transitfeed.Schedule instance.\ndoc: The KML Document ElementTree.Element instance.\n\nReturns:\nThe Folder ElementTree.Element instance or None.", "source": "codesearchnet"}
{"code": "def do_import(self, keys, values, name=None):\n    with tf.name_scope(name or '%s_lookup_table_import' % self._name):\n        op = gen_simple_hash_table_op.examples_simple_hash_table_import(self.resource_handle, keys, values)\n        return op", "docstring": "Import all `key` and `value` pairs.\n\n(Note that \"import\" is a python reserved word, so it cannot be the name of\na method.)\n\nArgs:\nkeys: Tensor of all keys.\nvalues: Tensor of all values.\nname: A name for the operation (optional).\n\nReturns:\nA tuple of two tensors, the first with the `keys` and the second with\nthe `values`.", "source": "github-repos"}
{"code": "def _find_countour_yaml(start, checked, names=None):\n    \n    extensions = []\n\n    if names:\n        for name in names:\n            if not os.path.splitext(name)[1]:\n                extensions.append(name + \".yaml\")\n                extensions.append(name + \".yml\")\n\n    yaml_names = (names or []) + CONTOUR_YAML_NAMES + extensions\n    directory = start\n\n    while directory not in checked:\n        checked.add(directory)\n\n        for fs_yaml_name in yaml_names:\n            yaml_path = os.path.join(directory, fs_yaml_name)\n\n            if os.path.exists(yaml_path):\n                return yaml_path\n\n        directory = os.path.dirname(directory)\n\n    return", "docstring": "Traverse the directory tree identified by start\nuntil a directory already in checked is encountered or the path\nof countour.yaml is found.\n\nChecked is present both to make the loop termination easy\nto reason about and so the same directories do not get\nrechecked\n\nArgs:\nstart: the path to start looking in and work upward from\nchecked: the set of already checked directories\n\nReturns:\nthe path of the countour.yaml file or None if it is not found", "source": "juraj-google-style"}
{"code": "def _find_address_range(addresses):\n    first = last = addresses[0]\n    last_index = 0\n    for ip in addresses[1:]:\n        if (ip._ip == (last._ip + 1)):\n            last = ip\n            last_index += 1\n        else:\n            break\n    return (first, last, last_index)", "docstring": "Find a sequence of addresses.\n\nArgs:\naddresses: a list of IPv4 or IPv6 addresses.\n\nReturns:\nA tuple containing the first and last IP addresses in the sequence,\nand the index of the last IP address in the sequence.", "source": "codesearchnet"}
{"code": "def _init_from_proto(self, variable_def, import_scope=None):\n    assert isinstance(variable_def, variable_pb2.VariableDef)\n    g = ops.get_default_graph()\n    self._variable = g.as_graph_element(ops.prepend_name_scope(variable_def.variable_name, import_scope=import_scope))\n    self._name = self._variable.name\n    self._initializer_op = g.as_graph_element(ops.prepend_name_scope(variable_def.initializer_name, import_scope=import_scope))\n    if hasattr(variable_def, 'initial_value_name') and variable_def.initial_value_name:\n        self._initial_value = g.as_graph_element(ops.prepend_name_scope(variable_def.initial_value_name, import_scope=import_scope))\n    else:\n        self._initial_value = None\n    synchronization, aggregation, trainable = variables.validate_synchronization_aggregation_trainable(variable_def.synchronization, variable_def.aggregation, variable_def.trainable, variable_def.variable_name)\n    self._synchronization = synchronization\n    self._aggregation = aggregation\n    self._trainable = trainable\n    self._snapshot = g.as_graph_element(ops.prepend_name_scope(variable_def.snapshot_name, import_scope=import_scope))\n    if variable_def.HasField('save_slice_info_def'):\n        self._save_slice_info = variables.Variable.SaveSliceInfo(save_slice_info_def=variable_def.save_slice_info_def, import_scope=import_scope)\n    else:\n        self._save_slice_info = None\n    self._caching_device = None\n    self._constraint = None", "docstring": "Recreates the Variable object from a `VariableDef` protocol buffer.\n\nArgs:\nvariable_def: `VariableDef` protocol buffer, describing a variable whose\nnodes already exists in the graph.\nimport_scope: Optional `string`. Name scope to add.", "source": "github-repos"}
{"code": "def _create_uninitialized_mirrored_tpu_replicated_variables(**kwargs):\n    dtype = kwargs.get('dtype', None)\n    shape = kwargs.get('shape', None)\n    initial_value = kwargs.get('initial_value', None)\n    if initial_value is None:\n        return _create_mirrored_tpu_replicated_variables(**kwargs)\n    with maybe_init_scope():\n        if initial_value is not None:\n            if callable(initial_value):\n                initial_value = initial_value()\n            initial_value = ops.convert_to_tensor(initial_value, dtype=dtype)\n            kwargs['initial_value'] = initial_value\n            if dtype is None:\n                kwargs['dtype'] = kwargs['initial_value'].dtype\n            if shape is None:\n                kwargs['shape'] = kwargs['initial_value'].shape\n    mirrored_replicated_var_list = []\n    for replica_id in range(num_replicas):\n        replicated_var_list = []\n        for logic_core_id in range(num_cores_per_replica):\n            with ops.device(self._tpu_devices[replica_id][logic_core_id]):\n                v = uninitialized_variable_creator(**kwargs)\n            replicated_var_list.append(v)\n        replica_name = '{}/r:{}'.format(kwargs['name'], replica_id)\n        tpu_replicated_var = tpu_replicated_variable.TPUReplicatedVariable(variables=replicated_var_list, name=replica_name)\n        mirrored_replicated_var_list.append(tpu_replicated_var)\n    return mirrored_replicated_var_list", "docstring": "Returns a list of `TPUReplicatedVariable`s.\n\nThe list consists of `num_replicas` `TPUReplicatedVariable`s and can be\nused to initialize a `TPUMirroredVariable`. Each `TPUReplicatedVariable`\ncontains a list of `tf.Variable`s which are replicated to\n`num_cores_per_replica` logical cores to enable XLA SPMD compilation.\n\nArgs:\n**kwargs: the keyword arguments for creating a variable", "source": "github-repos"}
{"code": "def generate_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes):\n    batch_size = enc_output.shape[0]\n    proposals = []\n    current_position = 0\n    for level, (height, width) in enumerate(spatial_shapes):\n        mask_flatten_ = padding_mask[:, current_position:current_position + height * width]\n        mask_flatten_ = mask_flatten_.view(batch_size, height, width, 1)\n        valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1)\n        valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1)\n        grid_y, grid_x = meshgrid(torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device), torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device), indexing='ij')\n        grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)\n        scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2)\n        grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale\n        width_height = torch.ones_like(grid) * 0.05 * 2.0 ** level\n        proposal = torch.cat((grid, width_height), -1).view(batch_size, -1, 4)\n        proposals.append(proposal)\n        current_position += height * width\n    output_proposals = torch.cat(proposals, 1)\n    output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)\n    output_proposals = torch.log(output_proposals / (1 - output_proposals))\n    output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float('inf'))\n    output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))\n    object_query = enc_output\n    object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0))\n    object_query = object_query.masked_fill(~output_proposals_valid, float(0))\n    object_query = self.enc_output_norm(self.enc_output(object_query))\n    return (object_query, output_proposals)", "docstring": "Generate the encoder output proposals from encoded enc_output.\n\nArgs:\nenc_output (`torch.Tensor[batch_size, sequence_length, hidden_size]`): Output of the encoder.\npadding_mask (`torch.Tensor[batch_size, sequence_length]`): Padding mask for `enc_output`.\nspatial_shapes (`torch.Tensor[num_feature_levels, 2]`): Spatial shapes of the feature maps.\n\nReturns:\n`tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction.\n- object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to\ndirectly predict a bounding box. (without the need of a decoder)\n- output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse\nsigmoid.", "source": "github-repos"}
{"code": "def handle_server_error(error: Exception) -> ResponseReturnValue:\n    return (DQMResponse(name=error.__class__.__name__, description=str(error), code=500), 500)", "docstring": "DQM Server Error Response.\n\nArgs:\n* error: Server error\n\nReturns:\n* DQMResponse for the error with a 500 status code", "source": "github-repos"}
{"code": "def with_device(\n            self,\n            new_device: devices.Device,\n            qubit_mapping: Callable[[ops.Qid], ops.Qid] = lambda e: e,\n    ) -> 'Circuit':\n        \n        return Circuit(\n            moments=[ops.Moment(operation.transform_qubits(qubit_mapping)\n                            for operation in moment.operations)\n                     for moment in self._moments],\n            device=new_device\n        )", "docstring": "Maps the current circuit onto a new device, and validates.\n\nArgs:\nnew_device: The new device that the circuit should be on.\nqubit_mapping: How to translate qubits from the old device into\nqubits on the new device.\n\nReturns:\nThe translated circuit.", "source": "juraj-google-style"}
{"code": "def ParseNSSwitchConf(nsswitch_filename):\n    with open(nsswitch_filename, 'r') as nsswitch_file:\n        nsswitch = {}\n        map_re = re.compile('^([a-z]+): *(.*)$')\n        for line in nsswitch_file:\n            match = map_re.match(line)\n            if match:\n                sources = match.group(2).split()\n                nsswitch[match.group(1)] = sources\n    return nsswitch", "docstring": "Parse /etc/nsswitch.conf and return the sources for each map.\n\nArgs:\nnsswitch_filename: Full path to an nsswitch.conf to parse.  See manpage\nnsswitch.conf(5) for full details on the format expected.\n\nReturns:\na dictionary keyed by map names and containing a list of sources\nfor each map.", "source": "github-repos"}
{"code": "def format_formula(formula):\n    formatted_formula = ''\n    number_format = ''\n    for (i, s) in enumerate(formula):\n        if s.isdigit():\n            if (not number_format):\n                number_format = '_{'\n            number_format += s\n            if (i == (len(formula) - 1)):\n                number_format += '}'\n                formatted_formula += number_format\n        else:\n            if number_format:\n                number_format += '}'\n                formatted_formula += number_format\n                number_format = ''\n            formatted_formula += s\n    return ('$%s$' % formatted_formula)", "docstring": "Converts str of chemical formula into\nlatex format for labelling purposes\n\nArgs:\nformula (str): Chemical formula", "source": "codesearchnet"}
{"code": "def restore_app_connection(self, port=None):\n    self.host_port = (port or utils.get_available_host_port())\n    self._adb.forward([('tcp:%d' % self.host_port), ('tcp:%d' % self.device_port)])\n    try:\n        self.connect()\n    except:\n        self.log.exception('Failed to re-connect to app.')\n        raise jsonrpc_client_base.AppRestoreConnectionError(self._ad, ('Failed to restore app connection for %s at host port %s, device port %s' % (self.package, self.host_port, self.device_port)))\n    self._proc = None\n    self._restore_event_client()", "docstring": "Restores the app after device got reconnected.\n\nInstead of creating new instance of the client:\n- Uses the given port (or find a new available host_port if none is\ngiven).\n- Tries to connect to remote server with selected port.\n\nArgs:\nport: If given, this is the host port from which to connect to remote\ndevice port. If not provided, find a new available port as host\nport.\n\nRaises:\nAppRestoreConnectionError: When the app was not able to be started.", "source": "codesearchnet"}
{"code": "def __call__(self, image):  \n        \n        frame_height = image.shape[0]\n        frame_width = image.shape[1]\n\n        faces = self.find_faces(image, self.draw_box)\n\n        for x, y, w, h in faces:  \n            hat = self.hat.copy()\n\n            \n            hat_width = int(w * self.w_offset)\n            hat_height = int(hat_width * hat.shape[0] / hat.shape[1])\n            hat = cv2.resize(hat, (hat_width, hat_height))\n\n            \n            hat_left = 0\n            hat_top = 0\n            hat_bottom = hat_height\n            hat_right = hat_width\n            y0 = y - hat_height + self.y_offset\n            if y0 < 0:  \n                hat_top = abs(y0)  \n                y0 = 0\n            y1 = y0 + hat_height - hat_top\n            if y1 > frame_height:\n                hat_bottom = hat_height - (y1 - frame_height)\n                y1 = frame_height\n            x0 = x + self.x_offset\n            if x0 < 0:\n                hat_left = abs(x0)\n                x0 = 0\n            x1 = x0 + hat_width - hat_left\n            if x1 > frame_width:\n                hat_right = hat_width - (x1 - frame_width)\n                x1 = frame_width\n\n            \n            for c in range(0, 3):\n                hat_slice = hat[hat_top:hat_bottom, hat_left:hat_right, c] * \\\n                    (hat[hat_top:hat_bottom, hat_left:hat_right, 3] / 255.0)\n                bg_slice = image[y0:y1, x0:x1, c] * \\\n                    (1.0 - hat[hat_top:hat_bottom, hat_left:hat_right, 3]\n                     / 255.0)\n                image[y0:y1, x0:x1, c] = hat_slice + bg_slice\n\n        return image", "docstring": "Draws a hat on top of detected faces inside the image.\n\nArgs:\nimage: The image.\n\nReturns:\nThe image with a hat.", "source": "juraj-google-style"}
{"code": "def sonos_uri_from_id(self, item_id):\n    item_id = quote_url(item_id.encode('utf-8'))\n    account = self.account\n    result = 'soco:\n    return result", "docstring": "Get a uri which can be sent for playing.\n\nArgs:\nitem_id (str): The unique id of a playable item for this music\nservice, such as that returned in the metadata from\n`get_metadata`, eg ``spotify:track:2qs5ZcLByNTctJKbhAZ9JE``\n\nReturns:\nstr: A URI of the form: ``soco://spotify%3Atrack\n%3A2qs5ZcLByNTctJKbhAZ9JE?sid=2311&sn=1`` which encodes the\n``item_id``, and relevant data from the account for the music\nservice. This URI can be sent to a Sonos device for playing,\nand the device itself will retrieve all the necessary metadata\nsuch as title, album etc.", "source": "codesearchnet"}
{"code": "def checkStatus(self):\n        \n        checkAccount()\n\n        data = {'userid': self.user_id,\n                'useridx': self.useridx\n               }\n        r = self.session.post(nurls['checkStatus'], data = data)\n\n        p = re.compile(r'\\<message\\>(?P<message>.+)\\</message\\>')\n        message = p.search(r.text).group('message')\n\n        if message == 'success':\n            return True\n        else:\n            return False", "docstring": "Check status\n\nArgs:\n\nReturns:\nTrue: Sucess\nFalse: Failed", "source": "juraj-google-style"}
{"code": "def GetAttributeContainerByIndex(self, index):\n    if (index < 0):\n        raise IndexError('Unsupported negative index value: {0:d}.'.format(index))\n    if (index < len(self._list)):\n        return self._list[index]\n    return None", "docstring": "Retrieves a specific serialized attribute container from the list.\n\nArgs:\nindex (int): attribute container index.\n\nReturns:\nbytes: serialized attribute container data or None if not available.\n\nRaises:\nIndexError: if the index is less than zero.", "source": "codesearchnet"}
{"code": "def _FindAugmentingEdge(self, queue):\n    for y in (v for v in self.right - self.t if self.slack[v] == 0):\n        if y not in self.matches:\n            return (True, self.slackx[y], y)\n        self.t.add(y)\n        if self.matches[y] not in self.s:\n            queue.append(self.matches[y])\n            self._AddToTree(self.matches[y], self.slackx[y])\n    return (False, None, None)", "docstring": "Find a final edge for an augmenting path after updating labels.\n\nAt least one new edge should have been added to the equality subgraph, so\nwe check if any new edges will create an augmenting path.\n\nArgs:\nqueue: Queue for performing BFS traversal.\nReturns:\nfound: True if path was found.\nx: Left vertex of final path edge.\ny: Right vertex of final path edge.", "source": "github-repos"}
{"code": "def __init__(self, observations, sources, provisional_name):\n        \n        self.mpc_observations = {}\n        self.observations = observations\n        self.sys_header = None\n        self.sources = [astrom.Source(reading_list, provisional_name) for reading_list in sources]", "docstring": "Constructs a new astronomy data set object.\n\nArgs:\nobservations: list(Observations)\nThe observations that are part of the data set.", "source": "juraj-google-style"}
{"code": "def add_arguments(self, parser):\n    parser.add_argument('-p', '--product', action='store_true', help='print the production information')\n    parser.add_argument('-j', '--jtag', action='store_true', help='print the JTAG pin status')\n    return self.add_common_arguments(parser, False)", "docstring": "Adds the information commands to the parser.\n\nArgs:\nself (InfoCommand): the ``InfoCommand`` instance\nparser (argparse.ArgumentParser): the parser to add the arguments to\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def get_length(alt_len, ref_len, category, pos, end, svtype=None, svlen=None):\n    \n    \n    length = -1\n    if category in ('snv', 'indel', 'cancer'):\n        if ref_len == alt_len:\n            length = alt_len\n        else:\n            length = abs(ref_len - alt_len)\n\n    elif category == 'sv':\n        if svtype == 'bnd':\n            length = int(10e10)\n        else:\n            if svlen:\n                length = abs(int(svlen))\n            \n            elif end:\n                if end != pos:\n                    length = end - pos\n    return length", "docstring": "Return the length of a variant\n\nArgs:\nalt_len(int)\nref_len(int)\ncategory(str)\nsvtype(str)\nsvlen(int)", "source": "juraj-google-style"}
{"code": "def op_functions_and_classes(ops_module):\n    for op_class_name in dir(ops_module):\n        op_class = getattr(ops_module, op_class_name)\n        if isinstance(op_class, type) and Operation in op_class.__mro__:\n            op_function_name = to_snake_case(op_class_name)\n            op_function_name = {'batch_norm': 'batch_normalization', 'rms_norm': 'rms_normalization', 'search_sorted': 'searchsorted'}.get(op_function_name, op_function_name)\n            op_function = getattr(ops_module, op_function_name, None)\n            if op_function is not None:\n                yield (op_function, op_class)", "docstring": "Enumerate pairs of op function and op classes in a module.\n\nWill return for instance `(ExpandDims, expand_dims)`, `(Sum, sum)`, ...\n\nArgs:\nops_module: the module to explore.\n\nReturns:\niterable returning tuples with function and class pairs.", "source": "github-repos"}
{"code": "def get_params(self, deep=True):\n    params = {'weights': self.coef_, 'bias': self.intercept_}\n    if deep:\n        for (key, value) in self.B.items():\n            params[('b_' + str(key))] = value\n    return params", "docstring": "Get parameters for the estimator.\n\nArgs:\ndeep (boolean, optional) : If True, will return the parameters for this estimator and contained subobjects that are estimators.\n\nReturns:\nparams : mapping of string to any contained subobjects that are estimators.", "source": "codesearchnet"}
{"code": "def get_parameter(self, name):\n        \n        i = self.get_parameter_names(include_frozen=True).index(name)\n        return self.get_parameter_vector(include_frozen=True)[i]", "docstring": "Get a parameter value by name\n\nArgs:\nname: The name of the parameter", "source": "juraj-google-style"}
{"code": "def from_dict(cls, dict_repr: Dict[Union['DecisionPoint', str], Union[None, 'DNA', float, int, str]], dna_spec: DNASpec, use_ints_as_literals: bool=False) -> 'DNA':\n\n    def _get_decision(spec: DNASpec):\n        \n        decision = dict_repr.get(spec.id, None)\n        if decision is None:\n            decision = dict_repr.get(spec, None)\n        if decision is None and spec.name:\n            decision = dict_repr.get(spec.name, None)\n            if isinstance(decision, list):\n                dict_repr[spec.name] = decision[1:]\n                decision = decision[0] if decision else None\n        return decision\n\n    def _choice_index(subchoice, value: Union[int, float, str]) -> int:\n        \n        if isinstance(value, int) and (not use_ints_as_literals):\n            index = value\n            if index < 0 or index >= len(subchoice.candidates):\n                identifier = subchoice.name or subchoice.id\n                raise ValueError(f\"Candidate index out of range at choice '{identifier}'. Index={index}, Number of candidates={len(subchoice.candidates)}.\")\n        else:\n            index = subchoice.candidate_index(value)\n        return index\n\n    def _make_dna(spec: DNASpec) -> DNA:\n        \n        if spec.is_space:\n            children = []\n            for elem in spec.elements:\n                child = _make_dna(elem)\n                if child is not None:\n                    children.append(child)\n            return DNA(None, children)\n        elif spec.is_categorical:\n            children = []\n            for choice_idx in range(spec.num_choices):\n                subchoice = spec.subchoice(choice_idx)\n                value = _get_decision(subchoice)\n                if value is None and subchoice.is_subchoice:\n                    parent_decisions = _get_decision(spec)\n                    if parent_decisions is not None:\n                        assert len(parent_decisions) == spec.num_choices, (parent_decisions, spec)\n                        value = parent_decisions[choice_idx]\n                if value is None:\n                    identifier = subchoice.name or subchoice.id\n                    raise ValueError(f\"Value for '{identifier}' is not found in the dictionary {dict_repr!r}.\")\n                if isinstance(value, DNA):\n                    children.append(value)\n                else:\n                    choice_index = _choice_index(subchoice, value)\n                    subspace_dna = _make_dna(subchoice.candidates[choice_index])\n                    children.append(DNA(choice_index, [subspace_dna] if subspace_dna else []))\n            return DNA(None, children)\n        elif spec.is_numerical or spec.is_custom_decision_point:\n            value = _get_decision(spec)\n            if value is None:\n                raise ValueError(f\"Value for '{spec.name or spec.id}' is not found in the dictionary {dict_repr!r}.\")\n            if isinstance(value, DNA):\n                value = value.value\n            if spec.is_numerical:\n                if value < spec.min_value:\n                    raise ValueError(f\"The decision for '{spec.name or spec.id}' should be no less than {spec.min_value}. Encountered {value}.\")\n                if value > spec.max_value:\n                    raise ValueError(f\"The decision for '{spec.name or spec.id}' should be no greater than {spec.max_value}. Encountered {value}.\")\n            elif not isinstance(value, str):\n                raise ValueError(f\"The decision for '{spec.name or spec.id}' should be a string. Encountered {value}.\")\n            return DNA(value, None)\n        else:\n            raise NotImplementedError('Should never happen.')\n    dna = _make_dna(dna_spec)\n    return dna.use_spec(dna_spec)", "docstring": "Create a DNA from its dictionary representation.\n\nArgs:\ndict_repr: The dictionary representation of the DNA.\nThe keys should be either strings as the decision point ID\nor DNASpec objects. The values should be either numeric or literal\nvalues for the decisions.\nFor inactive decisions, their ID/spec should either be absent from the\ndictionary, or use None as their values.\ndna_spec: The DNASpec that applies to the DNA.\nuse_ints_as_literals: If True, when an integer is encountered for\na dictinonary value, treat it as the literal value.\nOtherwise, always treat it as a candidate index.\n\nReturns:\nA DNA object.", "source": "github-repos"}
{"code": "def py_hash(key, num_buckets):\n    (b, j) = ((- 1), 0)\n    if (num_buckets < 1):\n        raise ValueError('num_buckets must be a positive number')\n    while (j < num_buckets):\n        b = int(j)\n        key = (((key * long(2862933555777941757)) + 1) & 18446744073709551615)\n        j = (float((b + 1)) * (float((1 << 31)) / float(((key >> 33) + 1))))\n    return int(b)", "docstring": "Generate a number in the range [0, num_buckets).\n\nArgs:\nkey (int): The key to hash.\nnum_buckets (int): Number of buckets to use.\n\nReturns:\nThe bucket number `key` computes to.\n\nRaises:\nValueError: If `num_buckets` is not a positive number.", "source": "codesearchnet"}
{"code": "def isdir(self, path, follow_symlinks=True):\n        \n        return self._is_of_type(path, S_IFDIR, follow_symlinks)", "docstring": "Determine if path identifies a directory.\n\nArgs:\npath: Path to filesystem object.\n\nReturns:\n`True` if path points to a directory (following symlinks).\n\nRaises:\nTypeError: if path is None.", "source": "juraj-google-style"}
{"code": "def pack_range(key, packing, grad_vars, rng):\n    to_pack = grad_vars[rng[0]:(rng[1] + 1)]\n    members = []\n    variables = []\n    restore_shapes = []\n    with tf.name_scope('pack'):\n        for (g, v) in to_pack:\n            variables.append(v)\n            restore_shapes.append(g.shape)\n            with tf.device(g.device):\n                members.append(tf.reshape(g, [(- 1)]))\n        packing[key] = GradPackTuple(indices=range(rng[0], (rng[1] + 1)), vars=variables, shapes=restore_shapes)\n        with tf.device(members[0].device):\n            return tf.concat(members, 0)", "docstring": "Form the concatenation of a specified range of gradient tensors.\n\nArgs:\nkey: Value under which to store meta-data in packing that will be used\nlater to restore the grad_var list structure.\npacking: Dict holding data describing packed ranges of small tensors.\ngrad_vars: List of (grad, var) pairs for one tower.\nrng: A pair of integers giving the first, last indices of a consecutive\nrange of tensors to be packed.\n\nReturns:\nA tensor that is the concatenation of all the specified small tensors.", "source": "codesearchnet"}
{"code": "def Info(component):\n    try:\n        from IPython.core import oinspect\n        try:\n            inspector = oinspect.Inspector(theme_name='neutral')\n        except TypeError:\n            inspector = oinspect.Inspector()\n        info = inspector.info(component)\n        if info['docstring'] == '<no docstring>':\n            info['docstring'] = None\n    except ImportError:\n        info = _InfoBackup(component)\n    try:\n        unused_code, lineindex = inspect.findsource(component)\n        info['line'] = lineindex + 1\n    except (TypeError, OSError):\n        info['line'] = None\n    if 'docstring' in info:\n        info['docstring_info'] = docstrings.parse(info['docstring'])\n    return info", "docstring": "Returns a dict with information about the given component.\n\nThe dict will have at least some of the following fields.\ntype_name: The type of `component`.\nstring_form: A string representation of `component`.\nfile: The file in which `component` is defined.\nline: The line number at which `component` is defined.\ndocstring: The docstring of `component`.\ninit_docstring: The init docstring of `component`.\nclass_docstring: The class docstring of `component`.\ncall_docstring: The call docstring of `component`.\nlength: The length of `component`.\n\nArgs:\ncomponent: The component to analyze.\nReturns:\nA dict with information about the component.", "source": "github-repos"}
{"code": "def rename(self, renaming: Dict[(str, str)]) -> 'Substitution':\n    return Substitution(((renaming.get(name, name), value) for (name, value) in self.items()))", "docstring": "Return a copy of the substitution with renamed variables.\n\nExample:\n\nRename the variable *x* to *y*:\n\n>>> subst = Substitution({'x': a})\n>>> subst.rename({'x': 'y'})\n{'y': Symbol('a')}\n\nArgs:\nrenaming:\nA dictionary mapping old variable names to new ones.\n\nReturns:\nA copy of the substitution where variable names have been replaced according to the given renaming\ndictionary. Names that are not contained in the dictionary are left unchanged.", "source": "codesearchnet"}
{"code": "def check_hours(tickers, tz_exch, tz_loc=DEFAULT_TZ) -> pd.DataFrame:\n    \n    cols = ['Trading_Day_Start_Time_EOD', 'Trading_Day_End_Time_EOD']\n    con, _ = create_connection()\n    hours = con.ref(tickers=tickers, flds=cols)\n    cur_dt = pd.Timestamp('today').strftime('%Y-%m-%d ')\n    hours.loc[:, 'local'] = hours.value.astype(str).str[:-3]\n    hours.loc[:, 'exch'] = pd.DatetimeIndex(\n        cur_dt + hours.value.astype(str)\n    ).tz_localize(tz_loc).tz_convert(tz_exch).strftime('%H:%M')\n\n    hours = pd.concat([\n        hours.set_index(['ticker', 'field']).exch.unstack().loc[:, cols],\n        hours.set_index(['ticker', 'field']).local.unstack().loc[:, cols],\n    ], axis=1)\n    hours.columns = ['Exch_Start', 'Exch_End', 'Local_Start', 'Local_End']\n\n    return hours", "docstring": "Check exchange hours vs local hours\n\nArgs:\ntickers: list of tickers\ntz_exch: exchange timezone\ntz_loc: local timezone\n\nReturns:\nLocal and exchange hours", "source": "juraj-google-style"}
{"code": "def insert_into_obj(self, data):\n        \n        if not data:\n            data = ''\n        size = len(data)\n        n1 = size%256\n        n2 = size/256\n            \n        self.send('^DI'+chr(n1)+chr(n2)+data)", "docstring": "Insert text into selected object.\n\nArgs:\ndata: The data you want to insert.\nReturns:\nNone\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def _build_split_filenames(self, split_info_list):\n    \n\n    filenames = []\n    for split_info in split_info_list:\n      filenames.extend(naming.filepaths_for_dataset_split(\n          dataset_name=self.name,\n          split=split_info.name,\n          num_shards=split_info.num_shards,\n          data_dir=self._data_dir,\n          filetype_suffix=self._file_format_adapter.filetype_suffix,\n      ))\n    return filenames", "docstring": "Construct the split filenames associated with the split info.\n\nThe filenames correspond to the pre-processed datasets files present in\nthe root directory of the dataset.\n\nArgs:\nsplit_info_list: (list[SplitInfo]) List of split from which generate the\nfilenames\n\nReturns:\nfilenames: (list[str]) The list of filenames path corresponding to the\nsplit info object", "source": "juraj-google-style"}
{"code": "def serve(self, model_dir, environment):\n        \n        logger.info(\"serving\")\n\n        self.container_root = self._create_tmp_folder()\n        logger.info('creating hosting dir in {}'.format(self.container_root))\n\n        volumes = self._prepare_serving_volumes(model_dir)\n\n        \n        if sagemaker.estimator.DIR_PARAM_NAME.upper() in environment:\n            script_dir = environment[sagemaker.estimator.DIR_PARAM_NAME.upper()]\n            parsed_uri = urlparse(script_dir)\n            if parsed_uri.scheme == 'file':\n                volumes.append(_Volume(parsed_uri.path, '/opt/ml/code'))\n                \n                environment = environment.copy()\n                environment[sagemaker.estimator.DIR_PARAM_NAME.upper()] = '/opt/ml/code'\n\n        if _ecr_login_if_needed(self.sagemaker_session.boto_session, self.image):\n            _pull_image(self.image)\n\n        self._generate_compose_file('serve',\n                                    additional_env_vars=environment,\n                                    additional_volumes=volumes)\n        compose_command = self._compose()\n        self.container = _HostingContainer(compose_command)\n        self.container.start()", "docstring": "Host a local endpoint using docker-compose.\nArgs:\nprimary_container (dict): dictionary containing the container runtime settings\nfor serving. Expected keys:\n- 'ModelDataUrl' pointing to a file or s3:// location.\n- 'Environment' a dictionary of environment variables to be passed to the hosting container.", "source": "juraj-google-style"}
{"code": "def concatenate(cls, list_of_stats):\n    all_stats = np.stack([stats.values for stats in list_of_stats])\n    all_counts = all_stats[(:, 4)]\n    all_counts_relative = (all_counts / np.sum(all_counts))\n    min_value = float(np.min(all_stats[(:, 2)]))\n    max_value = float(np.max(all_stats[(:, 3)]))\n    mean_value = float(np.sum((all_counts_relative * all_stats[(:, 0)])))\n    var_value = float(np.sum((all_counts_relative * (all_stats[(:, 1)] + np.power((all_stats[(:, 0)] - mean_value), 2)))))\n    num_value = int(np.sum(all_counts))\n    return cls(mean_value, var_value, min_value, max_value, num_value)", "docstring": "Take a list of stats from different sets of data points and\nmerge the stats for getting stats overall data points.\n\nArgs:\nlist_of_stats (iterable): A list containing stats for different sets of data points.\n\nReturns:\nDataStats: Stats calculated overall sets of data points.", "source": "codesearchnet"}
{"code": "def get_offset(self, envelope):\n        \n        if isinstance(envelope, collections.Sequence):\n            envelope = Envelope(envelope)\n        if not (self.envelope.contains(envelope) or\n                self.envelope.intersects(envelope)):\n            raise ValueError('Envelope does not intersect with this extent')\n        coords = self.affine.transform((envelope.ul, envelope.lr))\n        nxy = [(min(dest, size) - origin) or 1\n               for size, origin, dest in zip(self.size, *coords)]\n        return coords[0] + tuple(nxy)", "docstring": "Returns a 4-tuple pixel window (x_offset, y_offset, x_size, y_size).\n\nArguments:\nenvelope -- coordinate extent tuple or Envelope", "source": "juraj-google-style"}
{"code": "def write_version(name=None, path=None):\n    \n    \n    \n    if name in (None, '__main__'):\n        path = path or os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),\n                                    \"version.json\")\n        contents = {\n            'version': __version__,\n            'version_string': __version_string__,\n        }\n        with open(path, 'w') as filehandle:\n            filehandle.write(json.dumps(contents, sort_keys=True, indent=4))", "docstring": "Write the version info to ../version.json, for setup.py.\n\nArgs:\nname (Optional[str]): this is for the ``write_version(name=__name__)``\nbelow.  That's one way to both follow the\n``if __name__ == '__main__':`` convention but also allow for full\ncoverage without ignoring parts of the file.\n\npath (Optional[str]): the path to write the version json to.  Defaults\nto ../version.json", "source": "juraj-google-style"}
{"code": "def manual_get_pfam_annotations(seq, outpath, searchtype='phmmer', force_rerun=False):\n    \n    if op.exists(outpath):\n        with open(outpath, 'r') as f:\n            json_results = json.loads(json.load(f))\n\n    else:\n        fseq = '>Seq\\n' + seq\n        if searchtype == 'phmmer':\n            parameters = {'seqdb': 'pdb', 'seq': fseq}\n        if searchtype == 'hmmscan':\n            parameters = {'hmmdb': 'pfam', 'seq': fseq}\n        enc_params = urllib.urlencode(parameters).encode('utf-8')\n        request = urllib2.Request('http:\n        url = (urllib2.urlopen(request).geturl() + '?output=json')\n        request = str(url)\n        request_read = urlopen(request).read().decode(\"utf-8\")\n\n        with open(outpath, 'w') as f:\n            json.dump(request_read, f)\n\n        json_results = json.loads(request_read)\n\n    return json_results['results']['hits']", "docstring": "Retrieve and download PFAM results from the HMMER search tool.\n\nArgs:\nseq:\noutpath:\nsearchtype:\nforce_rerun:\n\nReturns:\n\nTodo:\n* Document and test!", "source": "juraj-google-style"}
{"code": "def ipv4_lstrip_zeros(address):\n    obj = address.strip().split('.')\n    for (x, y) in enumerate(obj):\n        obj[x] = y.split('/')[0].lstrip('0')\n        if (obj[x] in ['', None]):\n            obj[x] = '0'\n    return '.'.join(obj)", "docstring": "The function to strip leading zeros in each octet of an IPv4 address.\n\nArgs:\naddress (:obj:`str`): An IPv4 address.\n\nReturns:\nstr: The modified IPv4 address.", "source": "codesearchnet"}
{"code": "def __init__(self, enterprise_configuration):\n        \n        super(DegreedAPIClient, self).__init__(enterprise_configuration)\n        self.global_degreed_config = apps.get_model('degreed', 'DegreedGlobalConfiguration').current()\n        self.session = None\n        self.expires_at = None", "docstring": "Instantiate a new client.\n\nArgs:\nenterprise_configuration (DegreedEnterpriseCustomerConfiguration): An enterprise customers's\nconfiguration model for connecting with Degreed", "source": "juraj-google-style"}
{"code": "def step(self, actions):\n    (observations, raw_rewards, dones, infos) = self._step(actions)\n    raw_rewards = raw_rewards.astype(np.float32)\n    processed_rewards = self.process_rewards(raw_rewards)\n    processed_observations = self.process_observations(observations)\n    self.trajectories.step(processed_observations, raw_rewards, processed_rewards, dones, actions)\n    return (processed_observations, processed_rewards, dones, infos)", "docstring": "Takes a step in all environments.\n\nSubclasses should override _step to do the actual reset if something other\nthan the default implementation is desired.\n\nArgs:\nactions: Batch of actions.\n\nReturns:\n(preprocessed_observations, processed_rewards, dones, infos).", "source": "codesearchnet"}
{"code": "def generate_visualizations(methods, data, true_labels, base_dir='visualizations', figsize=(18, 10), **scatter_options):\n    plt.figure(figsize=figsize)\n    for method in methods:\n        preproc = method[0]\n        if isinstance(preproc, Preprocess):\n            (preprocessed, ll) = preproc.run(data)\n            output_names = preproc.output_names\n        else:\n            p1 = data\n            output_names = ['']\n            for p in preproc:\n                (p1, ll) = p.run(p1)\n                p1 = p1[0]\n                output_names[0] = (output_names[0] + p.output_names[0])\n            preprocessed = [p1]\n        for (r, name) in zip(preprocessed, output_names):\n            print(name)\n            if (r.shape[0] == 2):\n                r_dim_red = r\n            elif (sparse.issparse(r) and (r.shape[0] > 100)):\n                name = ('tsvd_' + name)\n                tsvd = TruncatedSVD(50)\n                r_dim_red = tsvd.fit_transform(r.T)\n                try:\n                    tsne = TSNE(2)\n                    r_dim_red = tsne.fit_transform(r_dim_red).T\n                    name = ('tsne_' + name)\n                except:\n                    tsvd2 = TruncatedSVD(2)\n                    r_dim_red = tsvd2.fit_transform(r_dim_red).T\n            else:\n                name = ('tsne_' + name)\n                tsne = TSNE(2)\n                r_dim_red = tsne.fit_transform(r.T).T\n            if isinstance(method[1], list):\n                for clustering_method in method[1]:\n                    try:\n                        cluster_labels = clustering_method.run(r)\n                    except:\n                        print('clustering failed')\n                        continue\n                    output_path = (base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name))\n                    visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options)\n            else:\n                clustering_method = method[1]\n                try:\n                    cluster_labels = clustering_method.run(r)\n                except:\n                    print('clustering failed')\n                    continue\n                output_path = (base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name))\n                visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options)\n            output_path = (base_dir + '/{0}_true_labels.png'.format(name))\n            visualize_dim_red(r_dim_red, true_labels, output_path, **scatter_options)", "docstring": "Generates visualization scatters for all the methods.\n\nArgs:\nmethods: follows same format as run_experiments. List of tuples.\ndata: genes x cells\ntrue_labels: array of integers\nbase_dir: base directory to save all the plots\nfigsize: tuple of ints representing size of figure\nscatter_options: options for plt.scatter", "source": "codesearchnet"}
{"code": "def tokenize(self, vector_list):\n        \n        vector_arr = np.array(vector_list)\n        if vector_arr.ndim == 1:\n            key_arr = vector_arr.argmax()\n        else:\n            key_arr = vector_arr.argmax(axis=-1)\n        return self.__token_arr[key_arr]", "docstring": "Tokenize vector.\n\nArgs:\nvector_list:    The list of vector of one token.\n\nReturns:\ntoken", "source": "juraj-google-style"}
{"code": "def _send(self, method, path, data, filename):\n    if (filename is None):\n        return self._send_json(method, path, data)\n    else:\n        return self._send_file(method, path, data, filename)", "docstring": "Send data to a remote server, either with a POST or a PUT request.\n\nArgs:\n`method`: The method (POST or PUT) to use.\n`path`: The path to the resource.\n`data`: The data to send.\n`filename`: The filename of the file to send (if any).\nReturns:\nThe content of the response.\nRaises:\nAn exception depending on the HTTP status code of the response.", "source": "codesearchnet"}
{"code": "def _check_load_existing_object(self, object_type, id_field_name, operation='update'):\n        \n        \n        self._check_existing_object(object_type, id_field_name)\n        if not self._load_from_hdx(object_type, self.data[id_field_name]):\n            raise HDXError('No existing %s to %s!' % (object_type, operation))", "docstring": "Check metadata exists and contains HDX object identifier, and if so load HDX object\n\nArgs:\nobject_type (str): Description of HDX object type (for messages)\nid_field_name (str): Name of field containing HDX object identifier\noperation (str): Operation to report if error. Defaults to update.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def toggle_scan(self, enable, filter_duplicates=False):\n        \n        command = struct.pack(\">BB\", enable, filter_duplicates)\n        self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, command)", "docstring": "Enables or disables BLE scanning\n\nArgs:\nenable: boolean value to enable (True) or disable (False) scanner\nfilter_duplicates: boolean value to enable/disable filter, that\nomits duplicated packets", "source": "juraj-google-style"}
{"code": "def assert_no_new_python_objects(self, threshold=None):\n    self._python_memory_checker.assert_no_new_objects(threshold=threshold)", "docstring": "Raises an exception if there are new Python objects created.\n\nIt computes the number of new Python objects per type using the first and\nthe last snapshots.\n\nArgs:\nthreshold: A dictionary of [Type name string], [count] pair. It won't\nraise an exception if the new Python objects are under this threshold.", "source": "github-repos"}
{"code": "def OpenSourcePath(self, source_path):\n    source_path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_OS, location=source_path)\n    self.AddScanNode(source_path_spec, None)", "docstring": "Opens the source path.\n\nArgs:\nsource_path (str): source path.", "source": "codesearchnet"}
{"code": "def peek_with_kwargs(init, args=[]):\n\n    def peek(store, container, _stack=None):\n        return init(*[store.peek(attr, container, _stack=_stack) for attr in args], **dict([(attr, store.peek(attr, container, _stack=_stack)) for attr in container if (attr not in args)]))\n    return peek", "docstring": "Make datatypes passing keyworded arguments to the constructor.\n\nThis is a factory function; returns the actual `peek` routine.\n\nArguments:\n\ninit (callable): type constructor.\n\nargs (iterable): arguments NOT to be keyworded; order does matter.\n\nReturns:\n\ncallable: deserializer (`peek` routine).\n\nAll the peeked attributes that are not referenced in `args` are passed to `init` as\nkeyworded arguments.", "source": "codesearchnet"}
{"code": "def validate_txn_obj(obj_name, obj, key, validation_fun):\n    backend = bigchaindb.config['database']['backend']\n    if (backend == 'localmongodb'):\n        data = obj.get(key, {})\n        if isinstance(data, dict):\n            validate_all_keys_in_obj(obj_name, data, validation_fun)\n        elif isinstance(data, list):\n            validate_all_items_in_list(obj_name, data, validation_fun)", "docstring": "Validate value of `key` in `obj` using `validation_fun`.\n\nArgs:\nobj_name (str): name for `obj` being validated.\nobj (dict): dictionary object.\nkey (str): key to be validated in `obj`.\nvalidation_fun (function): function used to validate the value\nof `key`.\n\nReturns:\nNone: indicates validation successful\n\nRaises:\nValidationError: `validation_fun` will raise exception on failure", "source": "codesearchnet"}
{"code": "def __init__(self, oxm_class=OxmClass.OFPXMC_OPENFLOW_BASIC,\n                 oxm_field=None, oxm_hasmask=False, oxm_value=None):\n        \n        super().__init__()\n        self.oxm_class = oxm_class\n        self.oxm_field_and_mask = None\n        self.oxm_length = None\n        self.oxm_value = oxm_value\n        \n        self.oxm_field = oxm_field\n        self.oxm_hasmask = oxm_hasmask", "docstring": "Create an OXM TLV struct with the optional parameters below.\n\nArgs:\noxm_class (OxmClass): Match class: member class or reserved class\noxm_field (OxmMatchFields, OxmOfbMatchField): Match field within\nthe class\noxm_hasmask (bool): Set if OXM include a bitmask in payload\noxm_value (bytes): OXM Payload", "source": "juraj-google-style"}
{"code": "def solution(swarm):\n    best = swarm[0]\n    cmp = comparator(best.best_fitness)\n    for particle in swarm:\n        if cmp(particle.best_fitness, best.best_fitness):\n            best = particle\n    return best", "docstring": "Determines the global best particle in the swarm.\n\nArgs:\nswarm: iterable: an iterable that yields all particles in the swarm.\n\nReturns:\ncipy.algorithms.pso.Particle: The best particle in the swarm when\ncomparing the best_fitness values of the particles.", "source": "codesearchnet"}
{"code": "def listdir(dir_name, get_dirs=None, get_files=None, hide_ignored=False):\n    if ((get_dirs is None) and (get_files is None)):\n        get_dirs = True\n        get_files = True\n    source_dir = os.path.join(settings.BASE_DIR, 'app', dir_name)\n    dirs = []\n    for dir_or_file_name in os.listdir(source_dir):\n        path = os.path.join(source_dir, dir_or_file_name)\n        if (hide_ignored and dir_or_file_name.startswith('_')):\n            continue\n        is_dir = os.path.isdir(path)\n        if ((get_dirs and is_dir) or (get_files and (not is_dir))):\n            dirs.append(dir_or_file_name)\n    return dirs", "docstring": "Return list of all dirs and files inside given dir.\n\nAlso can filter contents to return only dirs or files.\n\nArgs:\n- dir_name: Which directory we need to scan (relative)\n- get_dirs: Return dirs list\n- get_files: Return files list\n- hide_ignored: Exclude files and dirs with initial underscore", "source": "codesearchnet"}
{"code": "def myGrades(year, candidateNumber, badFormat, length):\n    weights1 = [1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5]\n    weights2 = [1, 1, 1, 1, 1, 1, 0.5, 0.5]\n    if (year == 1):\n        myFinalResult = (sum([(int(badFormat[candidateNumber][(2 * (i + 1))]) * weights1[i]) for i in range((length - 1))]) / 6)\n    elif ((year == 2) or (year == 3)):\n        myFinalResult = (sum([(int(badFormat[candidateNumber][(2 * (i + 1))]) * weights2[i]) for i in range((length - 1))]) / 7)\n    elif (year == 4):\n        myFinalResult = (sum([int(badFormat[candidateNumber][(2 * (i + 1))]) for i in range((length - 1))]) / 8)\n    return myFinalResult", "docstring": "returns final result of candidateNumber in year\n\nArguments:\nyear {int} -- the year candidateNumber is in\ncandidateNumber {str} -- the candidateNumber of candidateNumber\nbadFormat {dict} -- candNumber : [results for candidate]\nlength {int} -- length of each row in badFormat divided by 2\n\n\nReturns:\nint -- a weighted average for a specific candidate number and year", "source": "codesearchnet"}
{"code": "def underlying_variable(t):\n    t = underlying_variable_ref(t)\n    assert (t is not None)\n    if (not hasattr(tf.get_default_graph(), 'var_index')):\n        tf.get_default_graph().var_index = {}\n    var_index = tf.get_default_graph().var_index\n    for v in tf.global_variables()[len(var_index):]:\n        var_index[v.name] = v\n    return var_index[t.name]", "docstring": "Find the underlying tf.Variable object.\n\nArgs:\nt: a Tensor\n\nReturns:\ntf.Variable.", "source": "codesearchnet"}
{"code": "def move_to(self, x=0, y=0):\n        \n        self._driver.move_to(self, x, y)", "docstring": "Deprecated use element.touch('drag', { toX, toY, duration(s) }) instead.\nMove the mouse by an offset of the specificed element.\n\nSupport:\nAndroid\n\nArgs:\nx(float): X offset to move to, relative to the\ntop-left corner of the element.\ny(float): Y offset to move to, relative to the\ntop-left corner of the element.\n\nReturns:\nWebElement object.", "source": "juraj-google-style"}
{"code": "def Patch(self, request, global_params=None):\n    config = self.GetMethodConfig('Patch')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Patch specific fields in the specified model.\n\nArgs:\nrequest: (BigqueryModelsPatchRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Model) The response message.", "source": "github-repos"}
{"code": "def bbox_flip(bbox, d, rows, cols):\n    if (d == 0):\n        bbox = bbox_vflip(bbox, rows, cols)\n    elif (d == 1):\n        bbox = bbox_hflip(bbox, rows, cols)\n    elif (d == (- 1)):\n        bbox = bbox_hflip(bbox, rows, cols)\n        bbox = bbox_vflip(bbox, rows, cols)\n    else:\n        raise ValueError('Invalid d value {}. Valid values are -1, 0 and 1'.format(d))\n    return bbox", "docstring": "Flip a bounding box either vertically, horizontally or both depending on the value of `d`.\n\nRaises:\nValueError: if value of `d` is not -1, 0 or 1.", "source": "codesearchnet"}
{"code": "def AddWarning(self, warning):\n    \n    self._RaiseIfNotWritable()\n\n    self._storage_file.AddWarning(warning)\n    self.number_of_warnings += 1", "docstring": "Adds an warning.\n\nArgs:\nwarning (ExtractionWarning): an extraction warning.\n\nRaises:\nIOError: when the storage writer is closed.\nOSError: when the storage writer is closed.", "source": "juraj-google-style"}
{"code": "def _FindNodeWithStandaloneLineParent(node):\n    if pytree_utils.NodeName(node.parent) in _STANDALONE_LINE_NODES:\n        return node\n    else:\n        return _FindNodeWithStandaloneLineParent(node.parent)", "docstring": "Find a node whose parent is a 'standalone line' node.\n\nSee the comment above _STANDALONE_LINE_NODES for more details.\n\nArguments:\nnode: node to start from\n\nReturns:\nSuitable node that's either the node itself or one of its ancestors.", "source": "github-repos"}
{"code": "def _zsh_comp_command(self, zcf, cmd, grouping, add_help=True):\n    if add_help:\n        if grouping:\n            print(\"+ '(help)'\", end=BLK, file=zcf)\n        print(\"'--help[show help message]'\", end=BLK, file=zcf)\n        print(\"'-h[show help message]'\", end=BLK, file=zcf)\n    no_comp = ('store_true', 'store_false')\n    cmd_dict = (self._opt_cmds[cmd] if cmd else self._opt_bare)\n    for (opt, sct) in cmd_dict.items():\n        meta = self._conf[sct].def_[opt]\n        if (meta.cmd_kwargs.get('action') == 'append'):\n            (grpfmt, optfmt) = (\"+ '{}'\", \"'*{}[{}]{}'\")\n            if (meta.comprule is None):\n                meta.comprule = ''\n        else:\n            (grpfmt, optfmt) = (\"+ '({})'\", \"'{}[{}]{}'\")\n        if ((meta.cmd_kwargs.get('action') in no_comp) or (meta.cmd_kwargs.get('nargs') == 0)):\n            meta.comprule = None\n        if (meta.comprule is None):\n            compstr = ''\n        elif (meta.comprule == ''):\n            optfmt = optfmt.split('[')\n            optfmt = ((optfmt[0] + '=[') + optfmt[1])\n            compstr = ': :( )'\n        else:\n            optfmt = optfmt.split('[')\n            optfmt = ((optfmt[0] + '=[') + optfmt[1])\n            compstr = ': :{}'.format(meta.comprule)\n        if grouping:\n            print(grpfmt.format(opt), end=BLK, file=zcf)\n        for name in _names(self._conf[sct], opt):\n            print(optfmt.format(name, meta.help.replace(\"'\", '\\'\"\\'\"\\''), compstr), end=BLK, file=zcf)", "docstring": "Write zsh _arguments compdef for a given command.\n\nArgs:\nzcf (file): zsh compdef file.\ncmd (str): command name, set to None or '' for bare command.\ngrouping (bool): group options (zsh>=5.4).\nadd_help (bool): add an help option.", "source": "codesearchnet"}
{"code": "def __init__(self, *args, **kwargs):\n        \n        self.command = list(args)\n        self.directory = kwargs['directory'] if 'directory' in kwargs else None\n        self.env_vars = kwargs['env_vars'] if 'env_vars' in kwargs else None", "docstring": "Define a subcommand.\n\nArgs:\n*args (str): Sequence of program arguments needed to run the command.\ndirectory (Optional[str]): Directory the command is run in.\nenv_vars (Optional[dict]): Environment variable to feed to the subcommand.", "source": "juraj-google-style"}
{"code": "def forward_transfer_pair(payer_transfer: LockedTransferSignedState, available_routes: List['RouteState'], channelidentifiers_to_channels: Dict, pseudo_random_generator: random.Random, block_number: BlockNumber) -> Tuple[(Optional[MediationPairState], List[Event])]:\n    transfer_pair = None\n    mediated_events: List[Event] = list()\n    lock_timeout = BlockTimeout((payer_transfer.lock.expiration - block_number))\n    payee_channel = next_channel_from_routes(available_routes=available_routes, channelidentifiers_to_channels=channelidentifiers_to_channels, transfer_amount=payer_transfer.lock.amount, lock_timeout=lock_timeout)\n    if payee_channel:\n        assert (payee_channel.settle_timeout >= lock_timeout)\n        assert (payee_channel.token_address == payer_transfer.token)\n        message_identifier = message_identifier_from_prng(pseudo_random_generator)\n        lock = payer_transfer.lock\n        lockedtransfer_event = channel.send_lockedtransfer(channel_state=payee_channel, initiator=payer_transfer.initiator, target=payer_transfer.target, amount=get_lock_amount_after_fees(lock, payee_channel), message_identifier=message_identifier, payment_identifier=payer_transfer.payment_identifier, expiration=lock.expiration, secrethash=lock.secrethash)\n        assert lockedtransfer_event\n        transfer_pair = MediationPairState(payer_transfer, payee_channel.partner_state.address, lockedtransfer_event.transfer)\n        mediated_events = [lockedtransfer_event]\n    return (transfer_pair, mediated_events)", "docstring": "Given a payer transfer tries a new route to proceed with the mediation.\n\nArgs:\npayer_transfer: The transfer received from the payer_channel.\navailable_routes: Current available routes that may be used, it's\nassumed that the routes list is ordered from best to worst.\nchannelidentifiers_to_channels: All the channels available for this\ntransfer.\npseudo_random_generator: Number generator to generate a message id.\nblock_number: The current block number.", "source": "codesearchnet"}
{"code": "def whois_domains(self, domains):\n        \n        api_name = 'opendns-whois-domain'\n        fmt_url_path = u'whois/{0}'\n        return self._multi_get(api_name, fmt_url_path, domains)", "docstring": "Calls WHOIS domain end point\n\nArgs:\ndomains: An enumerable of domains\nReturns:\nA dict of {domain: domain_result}", "source": "juraj-google-style"}
{"code": "def _single_quote_handler_factory(on_single_quote, on_other):\n\n    @coroutine\n    def single_quote_handler(c, ctx, is_field_name=False):\n        assert (c == _SINGLE_QUOTE)\n        (c, self) = (yield)\n        if ((c == _SINGLE_QUOTE) and (not _is_escaped(c))):\n            (yield on_single_quote(c, ctx, is_field_name))\n        else:\n            ctx.set_unicode(quoted_text=True)\n            (yield on_other(c, ctx, is_field_name))\n    return single_quote_handler", "docstring": "Generates handlers used for classifying tokens that begin with one or more single quotes.\n\nArgs:\non_single_quote (callable): Called when another single quote is found. Accepts the current character's ordinal,\nthe current context, and True if the token is a field name; returns a Transition.\non_other (callable): Called when any character other than a single quote is found.  Accepts the current\ncharacter's ordinal, the current context, and True if the token is a field name; returns a Transition.", "source": "codesearchnet"}
{"code": "def execute_dynamo_definition(self, definition_path, show_ui=False, shutdown=True, automation=False, path_exec=True):\n    self._add_entry(templates.DYNAMO_COMMAND.format(dynamo_def_path=definition_path, dyn_show_ui=show_ui, dyn_automation=automation, dyn_path_exec=path_exec, dyn_shutdown=shutdown))", "docstring": "Execute a dynamo definition.\n\nArgs:\ndefinition_path (str): full path to dynamo definition file\nshow_ui (bool): show dynamo UI at execution\nshutdown (bool): shutdown model after execution\nautomation (bool): activate dynamo automation\npath_exec (bool): activate dynamo path execute\n\nExamples:\n>>> jm = JournalMaker()\n>>> jm.execute_dynamo_definition(\n...     definition_path='C:/testdef.dyn',\n...     show_ui=True,\n...     shutdown=True\n... )", "source": "codesearchnet"}
{"code": "def __init__(self, cluster_resolver=None):\n    if cluster_resolver is None:\n        cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()\n    super(ParameterServerStrategyV1, self).__init__(ParameterServerStrategyExtended(self, cluster_resolver=cluster_resolver))\n    distribute_lib.distribution_strategy_gauge.get_cell('V1').set('ParameterServerStrategy')", "docstring": "Initializes this strategy with an optional `cluster_resolver`.\n\nArgs:\ncluster_resolver: Optional\n`tf.distribute.cluster_resolver.ClusterResolver` object. Defaults to a\n`tf.distribute.cluster_resolver.TFConfigClusterResolver`.", "source": "github-repos"}
{"code": "def _create_dom(data):\n    \n    if not isinstance(data, dhtmlparser.HTMLElement):\n        data = dhtmlparser.parseString(\n            utils.handle_encodnig(data)\n        )\n\n    dhtmlparser.makeDoubleLinked(data)\n\n    return data", "docstring": "Creates doublelinked DOM from `data`.\n\nArgs:\ndata (str/HTMLElement): Either string or HTML element.\n\nReturns:\nobj: HTMLElement containing double linked DOM.", "source": "juraj-google-style"}
{"code": "def _text_io_wrapper(stream, mode, encoding, errors, newline):\n    if (('t' in mode) and (not hasattr(stream, 'encoding'))):\n        text_stream = TextIOWrapper(stream, encoding=encoding, errors=errors, newline=newline)\n        (yield text_stream)\n        text_stream.flush()\n    else:\n        (yield stream)", "docstring": "Wrap a binary stream to Text stream.\n\nArgs:\nstream (file-like object): binary stream.\nmode (str): Open mode.\nencoding (str): Stream encoding.\nerrors (str): Decoding error handling.\nnewline (str): Universal newlines", "source": "codesearchnet"}
{"code": "def forward(self, prompt_masks: torch.FloatTensor, pred_masks: torch.FloatTensor, labels: torch.FloatTensor, bool_masked_pos: torch.BoolTensor):\n    ground_truth = torch.cat((prompt_masks, labels), dim=2)\n    mask = bool_masked_pos[:, :, None].repeat(1, 1, self.patch_size ** 2 * 3)\n    mask = unpatchify(mask, ground_truth.shape[2] \n    loss = F.smooth_l1_loss(pred_masks, ground_truth, reduction='none', beta=self.beta)\n    loss = (loss * mask).sum() / mask.sum()\n    return loss", "docstring": "Computes the L1 loss between the predicted masks and the ground truth masks.\n\nArgs:\nprompt_masks (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\nPixel values from mask prompt.\n\npred_masks (`torch.FloatTensor` of shape `(batch_size, num_channels, 2*height, width)`):\nPredicted masks.\n\nlabels (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\nGround truth mask for input images.\n\nbool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):\nBoolean masked positions. Indicates which patches are masked (1) and which aren't (0).\n\nReturns:\n`torch.FloatTensor`: The mean L1 loss between the predicted masks and the ground truth masks.", "source": "github-repos"}
{"code": "def set_suite_run_display_name(self, suite_run_display_name):\n    self._suite_run_display_name = suite_run_display_name", "docstring": "Interface for sub-classes to set a customized display name.\n\nThis name provides run-specific context intended for display. Default to\nsuite class name. Set this in sub-classes to include run-specific context.\n\nArgs:\nsuite_run_display_name: str, the display name to set.", "source": "github-repos"}
{"code": "def EnableNetworkInterfaces(self, interfaces):\n    \n    \n    \n    if not interfaces or set(interfaces) == self.interfaces:\n      return\n\n    self.logger.info('Ethernet interfaces: %s.', interfaces)\n    self.interfaces = set(interfaces)\n\n    if self.dhcp_command:\n      try:\n        subprocess.check_call([self.dhcp_command])\n      except subprocess.CalledProcessError:\n        self.logger.warning('Could not enable Ethernet interfaces.')\n      return\n\n    \n    self.distro_utils.EnableNetworkInterfaces(\n        interfaces, self.logger, dhclient_script=self.dhclient_script)", "docstring": "Enable the list of network interfaces.\n\nArgs:\ninterfaces: list of string, the output device names to enable.", "source": "juraj-google-style"}
{"code": "def raster_to_asc(raster_f, asc_f):\n        \n        raster_r = RasterUtilClass.read_raster(raster_f)\n        RasterUtilClass.write_asc_file(asc_f, raster_r.data, raster_r.nCols, raster_r.nRows,\n                                       raster_r.geotrans, raster_r.noDataValue)", "docstring": "Converting Raster format to ASCII raster.\n\nArgs:\nraster_f: raster file.\nasc_f: output ASCII file.", "source": "juraj-google-style"}
{"code": "def get_full_filename_by_suffixes(dir_src, suffixes):\n    file_names = FileClass.get_filename_by_suffixes(dir_src, suffixes)\n    if (file_names is None):\n        return None\n    return list((((dir_src + os.sep) + name) for name in file_names))", "docstring": "get full file names with the given suffixes in the given directory\n\nArgs:\ndir_src: directory path\nsuffixes: wanted suffixes\n\nReturns:\nfull file names with the given suffixes as list", "source": "codesearchnet"}
{"code": "async def confirmbalance(self, *args, **kwargs):\n    if kwargs.get('message'):\n        kwargs = json.loads(kwargs.get('message', '{}'))\n    txid = kwargs.get('txid')\n    coinid = kwargs.get('coinid')\n    buyer_address = kwargs.get('buyer_address')\n    cid = kwargs.get('cid')\n    address = kwargs.get('buyer_address')\n    try:\n        coinid = coinid.replace('TEST', '')\n    except:\n        pass\n    if (not all([coinid, cid, buyer_address, txid])):\n        return {'error': 400, 'reason': 'Confirm balance. Missed required fields'}\n    if (not (coinid in settings.bridges.keys())):\n        return (await self.error_400(('Confirm balance. Invalid coinid: %s' % coinid)))\n    self.account.blockchain.setendpoint(settings.bridges[coinid])\n    offer = (await self.account.blockchain.getoffer(cid=cid, buyer_address=buyer_address))\n    amount = int(offer['price'])\n    coinid = 'PUT'\n    history_database = self.client[settings.HISTORY]\n    history_collection = history_database[coinid]\n    history = (await history_collection.find_one({'txid': txid}))\n    try:\n        account = (await self.account.getaccountdata(public_key=history['public_key']))\n    except:\n        return (await self.error_404('Confirm balance. Not found current deal.'))\n    database = self.client[self.collection]\n    balance_collection = database[coinid]\n    balance = (await balance_collection.find_one({'uid': account['id']}))\n    submitted = (int(balance['amount_frozen']) - int(amount))\n    if (submitted < 0):\n        return (await self.error_400('Not enough frozen amount.'))\n    decremented = (await balance_collection.find_one_and_update({'uid': account['id']}, {'$set': {'amount_frozen': str(submitted)}}))\n    difference = (int(balance['amount_active']) + int(amount))\n    updated = (await balance_collection.find_one_and_update({'uid': account['id']}, {'$set': {'amount_active': str(difference)}}))\n    if (not updated):\n        return {'error': 404, 'reason': 'Confirm balance. Not found current transaction id'}\n    (await history_collection.find_one_and_update({'txid': txid}, {'$unset': {'txid': 1}}))\n    if (int(account['level']) == 2):\n        (await self.account.updatelevel(**{'id': account['id'], 'level': 3}))\n    return {i: updated[i] for i in updated if ((i != '_id') and (i != 'txid'))}", "docstring": "Confirm balance after trading\n\nAccepts:\n- message (signed dictionary):\n- \"txid\" - str\n- \"coinid\" - str\n- \"amount\" - int\n\nReturns:\n- \"address\" - str\n- \"coinid\" - str\n- \"amount\" - int\n- \"uid\" - int\n- \"unconfirmed\" - int (0 by default)\n- \"deposit\" - int (0 by default)\n\nVerified: True", "source": "codesearchnet"}
{"code": "def splitpath(self, path):\n        \n        path = self.normcase(path)\n        sep = self._path_separator(path)\n        path_components = path.split(sep)\n        if not path_components:\n            return ('', '')\n\n        starts_with_drive = self._starts_with_drive_letter(path)\n        basename = path_components.pop()\n        colon = self._matching_string(path, ':')\n        if not path_components:\n            if starts_with_drive:\n                components = basename.split(colon)\n                return (components[0] + colon, components[1])\n            return ('', basename)\n        for component in path_components:\n            if component:\n                \n                \n                while not path_components[-1]:\n                    path_components.pop()\n                if starts_with_drive:\n                    if not path_components:\n                        components = basename.split(colon)\n                        return (components[0] + colon, components[1])\n                    if (len(path_components) == 1 and\n                            path_components[0].endswith(colon)):\n                        return (path_components[0] + sep, basename)\n                return (sep.join(path_components), basename)\n        \n        return (sep, basename)", "docstring": "Mimic os.path.splitpath using the specified path_separator.\n\nMimics os.path.splitpath using the path_separator that was specified\nfor this FakeFilesystem.\n\nArgs:\npath:  (str) The path to split.\n\nReturns:\n(str) A duple (pathname, basename) for which pathname does not\nend with a slash, and basename does not contain a slash.", "source": "juraj-google-style"}
{"code": "def FromJsonString(self, value):\n    \n    if len(value) < 1 or value[-1] != 's':\n      raise ParseError(\n          'Duration must end with letter \"s\": {0}.'.format(value))\n    try:\n      pos = value.find('.')\n      if pos == -1:\n        self.seconds = int(value[:-1])\n        self.nanos = 0\n      else:\n        self.seconds = int(value[:pos])\n        if value[0] == '-':\n          self.nanos = int(round(float('-0{0}'.format(value[pos: -1])) *1e9))\n        else:\n          self.nanos = int(round(float('0{0}'.format(value[pos: -1])) *1e9))\n    except ValueError:\n      raise ParseError(\n          'Couldn\\'t parse duration: {0}.'.format(value))", "docstring": "Converts a string to Duration.\n\nArgs:\nvalue: A string to be converted. The string must end with 's'. Any\nfractional digits (or none) are accepted as long as they fit into\nprecision. For example: \"1s\", \"1.01s\", \"1.0000001s\", \"-3.100s\n\nRaises:\nParseError: On parsing problems.", "source": "juraj-google-style"}
{"code": "def create_mask(x):\n    unique = np.unique(x)\n    num_unique_elems = len(unique)\n    keys = range(num_unique_elems)\n    d = dict(zip(unique, keys))\n    mask_map = dict(zip(keys, unique))\n    return ([d[el] for el in x], mask_map, num_unique_elems)", "docstring": "Given a list of object creates integer mask for unique values in the list.\n\nArgs:\nx: 1-d numpy array.\n\nReturns:\nA tuple of three objects:\n* A list of integers that is the mask for `x`,\n* A dictionary map between  entries of `x` and the list\n* The number of unique elements.", "source": "github-repos"}
{"code": "def get_full_alias(self, query):\n        \n        if query in self.alias_table.sections():\n            return query\n\n        return next((section for section in self.alias_table.sections() if section.split()[0] == query), '')", "docstring": "Get the full alias given a search query.\n\nArgs:\nquery: The query this function performs searching on.\n\nReturns:\nThe full alias (with the placeholders, if any).", "source": "juraj-google-style"}
{"code": "def inspect_edge(G: AnalysisGraph, source: str, target: str):\n    return create_statement_inspection_table(G[source][target]['InfluenceStatements'])", "docstring": "'Drill down' into an edge in the analysis graph and inspect its\nprovenance. This function prints the provenance.\n\nArgs:\nG\nsource\ntarget", "source": "codesearchnet"}
{"code": "def parse(self, ping_message):\n    try:\n        if typepy.is_not_null_string(ping_message.stdout):\n            ping_message = ping_message.stdout\n    except AttributeError:\n        pass\n    logger.debug('parsing ping result: {}'.format(ping_message))\n    self.__parser = NullPingParser()\n    if typepy.is_null_string(ping_message):\n        logger.debug('ping_message is empty')\n        self.__stats = PingStats()\n        return self.__stats\n    ping_lines = _to_unicode(ping_message).splitlines()\n    parser_class_list = (LinuxPingParser, WindowsPingParser, MacOsPingParser, AlpineLinuxPingParser)\n    for parser_class in parser_class_list:\n        self.__parser = parser_class()\n        try:\n            self.__stats = self.__parser.parse(ping_lines)\n            return self.__stats\n        except ParseError as e:\n            if (e.reason != ParseErrorReason.HEADER_NOT_FOUND):\n                raise e\n        except pp.ParseException:\n            pass\n    self.__parser = NullPingParser()\n    return self.__stats", "docstring": "Parse ping command output.\n\nArgs:\nping_message (str or :py:class:`~pingparsing.PingResult`):\n``ping`` command output.\n\nReturns:\n:py:class:`~pingparsing.PingStats`: Parsed result.", "source": "codesearchnet"}
{"code": "def restore_from_checkpoint(self, checkpoint_path):\n    \n    import tensorflow as tf\n    \n    \n    \n    all_vars = tf.contrib.slim.get_variables_to_restore(\n        exclude=['InceptionV3/AuxLogits', 'InceptionV3/Logits', 'global_step'])\n\n    saver = tf.train.Saver(all_vars)\n    saver.restore(self.tf_session, checkpoint_path)", "docstring": "To restore inception model variables from the checkpoint file.\n\nSome variables might be missing in the checkpoint file, so it only\nloads the ones that are avialable, assuming the rest would be\ninitialized later.\nArgs:\ncheckpoint_path: Path to the checkpoint file for the Inception graph.", "source": "juraj-google-style"}
{"code": "def convert_bytes(value):\n    \n    n = np.rint(len(str(value))/4).astype(int)\n    return value/(1024**n), sizes[n]", "docstring": "Reduces bytes to more convenient units (i.e. KiB, GiB, TiB, etc.).\n\nArgs:\nvalues (int): Value in Bytes\n\nReturns:\ntup (tuple): Tuple of value, unit (e.g. (10, 'MiB'))", "source": "juraj-google-style"}
{"code": "def to_dict(pipe: BeamEventSet, schema: Schema, timestamp_key: str='timestamp', format: DictEventSetFormatChoices=DictEventSetFormat.GROUPED_BY_INDEX) -> beam.PCollection[Dict[str, Any]]:\n    grouped_by_features = add_feature_idx_and_flatten(pipe) | 'Group by index ' >> beam.GroupByKey()\n    if format == DictEventSetFormat.GROUPED_BY_INDEX:\n        return grouped_by_features | 'Convert to dict' >> beam.Map(_convert_to_dict_event_set_key_value, schema, timestamp_key)\n    elif format == DictEventSetFormat.SINGLE_EVENTS:\n        return grouped_by_features | 'Convert to dict' >> beam.FlatMap(_convert_to_dict_event_key_value, schema, timestamp_key)\n    else:\n        raise ValueError(f'Unknown format {format}')", "docstring": "Converts a Beam EventSet to PCollection of key->value.\n\nThis method is compatible with the output of `from_csv_raw` and the\nOfficial Beam IO connectors. This method is the inverse of `to_event_set`.\n\nArgs:\npipe: PCollection of Beam EventSet.\nschema: Schema of the data.\ntimestamp_key: Key containing the timestamps.\nformat: Format of the events inside the output dictionary. See\n[DictEventSetFormat][temporian.io.format.DictEventSetFormat] for\nmore.\n\nReturns:\nBeam pipe of key values.", "source": "github-repos"}
{"code": "def moveaxis(x, source, destination):\n    if any_symbolic_tensors((x,)):\n        return Moveaxis(source, destination).symbolic_call(x)\n    return backend.numpy.moveaxis(x, source=source, destination=destination)", "docstring": "Move axes of a tensor to new positions.\n\nOther axes remain in their original order.\n\nArgs:\nx: Tensor whose axes should be reordered.\nsource: Original positions of the axes to move. These must be unique.\ndestination: Destinations positions for each of the original axes.\nThese must also be unique.\n\nReturns:\nTensor with moved axes.", "source": "github-repos"}
{"code": "def __init__(self, *columns, **kwargs):\n        \n        if not all([isinstance(c, Column) for c in columns]):\n            raise TypeError('All elements of Row must be Column instances')\n        self.type = 'row'\n        self.columns = columns", "docstring": "Init method.\n\nArgs:\n*columns (): the instances of Column.\n**kwargs (): not used.", "source": "juraj-google-style"}
{"code": "def predict_proba(self, text):\n        \n        assert isinstance(text, str)\n\n        words = self.tokenizer(text)\n        X = self.preprocessor.transform([words])\n        y = self.model.predict(X)\n        y = y[0]  \n\n        return y", "docstring": "Probability estimates.\n\nThe returned estimates for all classes are ordered by the\nlabel of classes.\n\nArgs:\ntext : string, the input text.\n\nReturns:\ny : array-like, shape = [num_words, num_classes]\nReturns the probability of the word for each class in the model,", "source": "juraj-google-style"}
{"code": "def get_acgt_geno_marker(self, marker):\n        \n        \n        geno, snp_position = self.get_geno_marker(marker, return_index=True)\n\n        \n        return self._allele_encoding[snp_position][geno]", "docstring": "Gets the genotypes for a given marker (ACGT format).\n\nArgs:\nmarker (str): The name of the marker.\n\nReturns:\nnumpy.ndarray: The genotypes of the marker (ACGT format).", "source": "juraj-google-style"}
{"code": "def _GetFileSystemTypeFromFileEntry(self, file_entry):\n    \n    if file_entry.type_indicator != dfvfs_definitions.TYPE_INDICATOR_TSK:\n      return file_entry.type_indicator\n\n    \n    \n    file_system = file_entry.GetFileSystem()\n    fs_info = file_system.GetFsInfo()\n    if fs_info.info:\n      type_string = '{0!s}'.format(fs_info.info.ftype)\n      if type_string.startswith('TSK_FS_TYPE_'):\n        type_string = type_string[12:]\n      if type_string.endswith('_DETECT'):\n        type_string = type_string[:-7]\n\n    return type_string", "docstring": "Retrieves the file system type indicator of a file entry.\n\nArgs:\nfile_entry (dfvfs.FileEntry): a file entry.\n\nReturns:\nstr: file system type.", "source": "juraj-google-style"}
{"code": "def is_copy_constructor(constructor):\n    assert isinstance(constructor, calldef_members.constructor_t)\n    args = constructor.arguments\n    parent = constructor.parent\n    if (len(args) != 1):\n        return False\n    arg = args[0]\n    if (not isinstance(arg.decl_type, cpptypes.compound_t)):\n        return False\n    if (not type_traits.is_reference(arg.decl_type)):\n        return False\n    if (not type_traits.is_const(arg.decl_type.base)):\n        return False\n    un_aliased = type_traits.remove_alias(arg.decl_type.base)\n    if (not isinstance(un_aliased.base, cpptypes.declarated_t)):\n        return False\n    return (id(un_aliased.base.declaration) == id(parent))", "docstring": "Check if the declaration is a copy constructor,\n\nArgs:\nconstructor (declarations.constructor_t): the constructor\nto be checked.\n\nReturns:\nbool: True if this is a copy constructor, False instead.", "source": "codesearchnet"}
{"code": "def textx_isinstance(obj, obj_cls):\n    \n    if isinstance(obj, obj_cls):\n        return True\n    if hasattr(obj_cls, \"_tx_fqn\") and hasattr(obj, \"_tx_fqn\"):\n        if obj_cls._tx_fqn == obj._tx_fqn:\n            return True\n    if hasattr(obj_cls, \"_tx_inh_by\"):\n        for cls in obj_cls._tx_inh_by:\n            if (textx_isinstance(obj, cls)):\n                return True\n    return False", "docstring": "This function determines, if a textx object is an instance of a\ntextx class.\nArgs:\nobj: the object to be analyzed\nobj_cls: the class to be checked\n\nReturns:\nTrue if obj is an instance of obj_cls.", "source": "juraj-google-style"}
{"code": "def get_source_var_declaration(self, var):\n        \n        return next((x.source_mapping for x in self.variables if x.name == var))", "docstring": "Return the source mapping where the variable is declared\n\nArgs:\nvar (str): variable name\nReturns:\n(dict): sourceMapping", "source": "juraj-google-style"}
{"code": "def vae(x, z_size, name=None):\n    with tf.variable_scope(name, default_name='vae'):\n        mu = tf.layers.dense(x, z_size, name='mu')\n        log_sigma = tf.layers.dense(x, z_size, name='log_sigma')\n        shape = common_layers.shape_list(x)\n        epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])\n        z = (mu + (tf.exp((log_sigma / 2)) * epsilon))\n        kl = (0.5 * tf.reduce_mean(((tf.expm1(log_sigma) + tf.square(mu)) - log_sigma), axis=(- 1)))\n        free_bits = (z_size \n        kl_loss = tf.reduce_mean(tf.maximum((kl - free_bits), 0.0))\n        return (z, kl_loss, mu, log_sigma)", "docstring": "Simple variational autoencoder without discretization.\n\nArgs:\nx: Input to the discretization bottleneck.\nz_size: Number of bits, where discrete codes range from 1 to 2**z_size.\nname: Name for the bottleneck scope.\n\nReturns:\nEmbedding function, latent, loss, mu and log_simga.", "source": "codesearchnet"}
{"code": "def result_wrapper(result_fn):\n\n    def decorated(metric_obj, *args):\n        \n        has_strategy = distribute_lib.has_strategy()\n        replica_context = distribute_lib.get_replica_context()\n        if not has_strategy or replica_context is None or (not distribute_lib.get_strategy().extended._use_merge_call()):\n            with distribute_lib.variable_sync_on_read_context():\n                raw_result = result_fn(*args)\n                if isinstance(raw_result, (tensor.Tensor, variables_module.Variable, float, int)):\n                    result_t = array_ops.identity(raw_result)\n                elif isinstance(raw_result, dict):\n                    result_t = {key: array_ops.identity(value) for key, value in raw_result.items()}\n                else:\n                    try:\n                        result_t = array_ops.identity(raw_result)\n                    except (ValueError, TypeError):\n                        raise RuntimeError('The output of `metric.result()` can only be a single Tensor/Variable, or a dict of Tensors/Variables. For metric %s, got result %s.' % (metric_obj.name, raw_result))\n        else:\n\n            def merge_fn_wrapper(distribution, merge_fn, *args):\n                result = distribution.experimental_local_results(merge_fn)[0](*args)\n                return array_ops.identity(result)\n            result_t = replica_context.merge_call(merge_fn_wrapper, args=(result_fn,) + args)\n        metric_obj._call_result = result_t\n        return result_t\n    return tf_decorator.make_decorator(result_fn, decorated)", "docstring": "Decorator to wrap metric `result()` function in `merge_call()`.\n\nResult computation is an idempotent operation that simply calculates the\nmetric value using the state variables.\n\nIf metric state variables are distributed across replicas/devices and\n`result()` is requested from the context of one device - This function wraps\n`result()` in a distribution strategy `merge_call()`. With this,\nthe metric state variables will be aggregated across devices.\n\nArgs:\nresult_fn: function that computes the metric result.\n\nReturns:\nDecorated function that wraps `result_fn()` in distribution strategy\n`merge_call()`.", "source": "github-repos"}
{"code": "class AveragePooling2D(keras_layers.AveragePooling2D, base.Layer):\n\n    def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs):\n        if strides is None:\n            raise ValueError('Argument `strides` must not be None.')\n        super(AveragePooling2D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)", "docstring": "Average pooling layer for 2D inputs (e.g. images).\n\nArgs:\npool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)\nspecifying the size of the pooling window.\nCan be a single integer to specify the same value for\nall spatial dimensions.\nstrides: An integer or tuple/list of 2 integers,\nspecifying the strides of the pooling operation.\nCan be a single integer to specify the same value for\nall spatial dimensions.\npadding: A string. The padding method, either 'valid' or 'same'.\nCase-insensitive.\ndata_format: A string. The ordering of the dimensions in the inputs.\n`channels_last` (default) and `channels_first` are supported.\n`channels_last` corresponds to inputs with shape\n`(batch, height, width, channels)` while `channels_first` corresponds to\ninputs with shape `(batch, channels, height, width)`.\nname: A string, the name of the layer.", "source": "github-repos"}
{"code": "def visit_ImportFrom(self, node):\n    if not node.module:\n        self.generic_visit(node)\n        return\n    from_import = node.module\n    for import_alias in node.names:\n        full_module_name = '%s.%s' % (from_import, import_alias.name)\n        full_import = (full_module_name, import_alias.asname)\n        detection = self._api_analysis_spec.imports_to_detect.get(full_import, None)\n        if detection:\n            self.add_result(detection)\n            self.add_log(detection.log_level, node.lineno, node.col_offset, detection.log_message)\n    self.generic_visit(node)", "docstring": "Handle visiting an import-from node in the AST.\n\nArgs:\nnode: Current Node", "source": "github-repos"}
{"code": "def delete_group(self, name):\n        \n        self.service.delete_group(\n            name, self.url_prefix, self.auth, self.session,\n            self.session_send_opts)", "docstring": "Delete given group.\n\nArgs:\nname (string): Name of group.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def _request(self, method, url, **kwargs):\n        \n        resp = self._session.request(method,\n                                     '{}/{}'.format(self._base_url, url),\n                                     headers=self._headers,\n                                     **kwargs)\n\n        try:\n            resp.raise_for_status()\n        except HTTPError as e:\n            logging.error(resp.content)\n            raise RestClientError(e)\n\n        return resp", "docstring": "Make HTTP request and return response object\n\nArgs:\nmethod (str): GET, POST, PUT, DELETE\nurl (str): path appended to the base_url to create request\n**kwargs: passed directly to a requests.request object", "source": "juraj-google-style"}
{"code": "def tensor_layout(self, arg):\n    if isinstance(arg, Tensor):\n        arg = arg.shape\n    return self.layout_rules.tensor_layout(arg, self.shape)", "docstring": "Compute TensorLayout for a Tensor or a Shape.\n\nArgs:\narg: Tensor or Shape.\n\nReturns:\nTensorLayout.", "source": "codesearchnet"}
{"code": "def _mapping(self):\n    return self.__search_client.get('/unstable/index/{}/mapping'.format(mdf_toolbox.translate_index(self.index)))['mappings']", "docstring": "Fetch the entire mapping for the specified index.\n\nReturns:\ndict: The full mapping for the index.", "source": "codesearchnet"}
{"code": "def combine(**kwargs):\n    if not kwargs:\n        return [OrderedDict()]\n    sort_by_key = lambda k: k[0]\n    kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))\n    first = list(kwargs.items())[0]\n    rest = dict(list(kwargs.items())[1:])\n    rest_combined = combine(**rest)\n    key = first[0]\n    values = first[1]\n    if not isinstance(values, list):\n        values = [values]\n    return [OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key)) for v in values for combined in rest_combined]", "docstring": "Generate combinations based on its keyword arguments.\n\nTwo sets of returned combinations can be concatenated using +.  Their product\ncan be computed using `times()`.\n\nArgs:\n**kwargs: keyword arguments of form `option=[possibilities, ...]`\nor `option=the_only_possibility`.\n\nReturns:\na list of dictionaries for each combination. Keys in the dictionaries are\nthe keyword argument names.  Each key has one value - one of the\ncorresponding keyword argument values.", "source": "github-repos"}
{"code": "def convert_wav(org_wav_fn: Path, tgt_wav_fn: Path) -> None:\n    \n    if not org_wav_fn.exists():\n        raise FileNotFoundError\n    args = [config.FFMPEG_PATH,\n            \"-i\", str(org_wav_fn), \"-ac\", \"1\", \"-ar\", \"16000\", str(tgt_wav_fn)]\n    subprocess.run(args)", "docstring": "Converts the wav into a 16bit mono 16000Hz wav.\n\nArgs:\norg_wav_fn: A `Path` to the original wave file\ntgt_wav_fn: The `Path` to output the processed wave file", "source": "juraj-google-style"}
{"code": "def _module_info_from_proto_safe(module_info_def, import_scope=None):\n  \n  try:\n    return _module_info_from_proto(module_info_def, import_scope)\n  except Exception as e:  \n    logging.warning(\n        \"Error encountered when deserializing sonnet ModuleInfo:\\n%s\", str(e))\n    return None", "docstring": "Deserializes the `module_info_def` proto without raising exceptions.\n\nArgs:\nmodule_info_def: An instance of `module_pb2.SonnetModule`.\nimport_scope: Optional `string`. Name scope to use.\n\nReturns:\nAn instance of `ModuleInfo`.", "source": "juraj-google-style"}
{"code": "def _NonEagerInputs(op: ops.Operation, xs_set):\n    return [t for t in _Inputs(op, xs_set) if not isinstance(t, ops.EagerTensor)]", "docstring": "Returns the inputs of op, crossing closure boundaries where necessary.\n\nDoes not return any captured EagerTensors, i.e., the number of tensors\nreturned may be less than the actual number of inputs.\n\nArgs:\nop: Operation\nxs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t.\n\nReturns:\nA list of tensors. The tensors may be from multiple Graph/FuncGraphs if op\nis in a FuncGraph and has captured inputs.", "source": "github-repos"}
{"code": "def CopyFromDateTimeString(self, time_string):\n    \n    date_time_values = self._CopyDateTimeFromString(time_string)\n\n    year = date_time_values.get('year', 0)\n    month = date_time_values.get('month', 0)\n    day_of_month = date_time_values.get('day_of_month', 0)\n    hours = date_time_values.get('hours', 0)\n    minutes = date_time_values.get('minutes', 0)\n    seconds = date_time_values.get('seconds', 0)\n    microseconds = date_time_values.get('microseconds', 0)\n\n    timestamp = self._GetNumberOfSecondsFromElements(\n        year, month, day_of_month, hours, minutes, seconds)\n    timestamp *= definitions.MILLISECONDS_PER_SECOND\n\n    if microseconds:\n      milliseconds, _ = divmod(\n          microseconds, definitions.MILLISECONDS_PER_SECOND)\n      timestamp += milliseconds\n\n    self._timestamp = timestamp\n    self.is_local_time = False", "docstring": "Copies a POSIX timestamp from a date and time string.\n\nArgs:\ntime_string (str): date and time value formatted as:\nYYYY-MM-DD hh:mm:ss.######[+-]##:##\n\nWhere # are numeric digits ranging from 0 to 9 and the seconds\nfraction can be either 3 or 6 digits. The time of day, seconds\nfraction and time zone offset are optional. The default time zone\nis UTC.", "source": "juraj-google-style"}
{"code": "def _ExtractResponseSummaryFields(document):\n  \n  headers = document.childAtPath('Envelope/Header/ResponseHeader')\n  body = document.childAtPath('Envelope/Body')\n  summary_fields = {}\n\n  if headers is not None:\n    summary_fields['requestId'] = headers.getChild('requestId').text\n    summary_fields['responseTime'] = headers.getChild('responseTime').text\n\n    \n    \n    service_name = headers.getChild('serviceName')\n    if service_name is not None:\n      summary_fields['serviceName'] = service_name.text\n\n    method_name = headers.getChild('methodName')\n    if method_name is not None:\n      summary_fields['methodName'] = method_name.text\n\n    operations = headers.getChild('operations')\n    if operations is not None:\n      summary_fields['operations'] = operations.text\n\n  if body is not None:\n    \n    fault = body.getChild('Fault')\n    if fault is not None:\n      summary_fields['isFault'] = True\n      \n      summary_fields['faultMessage'] = fault.getChild(\n          'faultstring').text[:16000]\n    else:\n      summary_fields['isFault'] = False\n\n  return summary_fields", "docstring": "Extract logging fields from the response's suds.sax.document.Document.\n\nArgs:\ndocument: A suds.sax.document.Document instance containing the parsed\nAPI response for a given API request.\n\nReturns:\nA dict mapping logging field names to their corresponding value.", "source": "juraj-google-style"}
{"code": "def secondary_structure_summary(dssp_df):\n    chains = dssp_df.chain.unique()\n    infodict = {}\n    for chain in chains:\n        expoinfo = defaultdict(int)\n        chain_df = dssp_df[(dssp_df.chain == chain)]\n        counts = chain_df.ss.value_counts()\n        total = float(len(chain_df))\n        for (ss, count) in iteritems(counts):\n            if (ss == '-'):\n                expoinfo['percent_C-dssp'] = (count / total)\n            if (ss == 'H'):\n                expoinfo['percent_H-dssp'] = (count / total)\n            if (ss == 'B'):\n                expoinfo['percent_B-dssp'] = (count / total)\n            if (ss == 'E'):\n                expoinfo['percent_E-dssp'] = (count / total)\n            if (ss == 'G'):\n                expoinfo['percent_G-dssp'] = (count / total)\n            if (ss == 'I'):\n                expoinfo['percent_I-dssp'] = (count / total)\n            if (ss == 'T'):\n                expoinfo['percent_T-dssp'] = (count / total)\n            if (ss == 'S'):\n                expoinfo['percent_S-dssp'] = (count / total)\n        for per in ['percent_C-dssp', 'percent_H-dssp', 'percent_B-dssp', 'percent_E-dssp', 'percent_G-dssp', 'percent_I-dssp', 'percent_T-dssp', 'percent_S-dssp']:\n            if (per not in expoinfo):\n                expoinfo[per] = 0.0\n        infodict[chain] = dict(expoinfo)\n    return infodict", "docstring": "Summarize the secondary structure content of the DSSP dataframe for each chain.\n\nArgs:\ndssp_df: Pandas DataFrame of parsed DSSP results\n\nReturns:\ndict: Chain to secondary structure summary dictionary", "source": "codesearchnet"}
{"code": "def _check_one_size(self):\n    block_one = ((self.end[0] == (self.start[0] + 1)) or (self.end[1] == (self.start[1] + 1)))\n    if block_one:\n        self.flag_change(self.flags, 'error', self.start, self.worksheet, message=self.FLAGS['1-size'])\n    return block_one", "docstring": "Checks for single height or single width blocks and flags the occurrence.\n\nReturns:\nTrue if the block is size 1.", "source": "codesearchnet"}
{"code": "def load_profiles_from_file(self, fqfn):\n        \n        if self.args.verbose:\n            print('Loading profiles from File: {}{}{}'.format(c.Style.BRIGHT, c.Fore.MAGENTA, fqfn))\n        with open(fqfn, 'r+') as fh:\n            data = json.load(fh)\n            for profile in data:\n                \n                self.profile_update(profile)\n                if self.args.action == 'validate':\n                    self.validate(profile)\n            fh.seek(0)\n            fh.write(json.dumps(data, indent=2, sort_keys=True))\n            fh.truncate()\n\n        for d in data:\n            if d.get('profile_name') in self.profiles:\n                self.handle_error(\n                    'Found a duplicate profile name ({}).'.format(d.get('profile_name'))\n                )\n            self.profiles.setdefault(\n                d.get('profile_name'),\n                {'data': d, 'ij_filename': d.get('install_json'), 'fqfn': fqfn},\n            )", "docstring": "Load profiles from file.\n\nArgs:\nfqfn (str): Fully qualified file name.", "source": "juraj-google-style"}
{"code": "def execute_query(self, verb, verb_arguments):\n        \n        request = self._build_request(verb, verb_arguments)\n        return self._execute(request)", "docstring": "Executes query (ex. get) via a dedicated http object.\n\nArgs:\nverb (str): Method to execute on the component (ex. get, list).\nverb_arguments (dict): key-value pairs to be passed to _BuildRequest.\n\nReturns:\ndict: Service Response.", "source": "juraj-google-style"}
{"code": "def has_event_handler(self, handler, event_name=None):\n    if (event_name is not None):\n        if (event_name not in self._event_handlers):\n            return False\n        events = [event_name]\n    else:\n        events = self._event_handlers\n    for e in events:\n        for (h, _, _) in self._event_handlers[e]:\n            if (h == handler):\n                return True\n    return False", "docstring": "Check if the specified event has the specified handler.\n\nArgs:\nhandler (callable): the callable event handler.\nevent_name: The event the handler attached to. Set this\nto ``None`` to search all events.", "source": "codesearchnet"}
{"code": "def has_object_error(self):\n    if (self._has_object_error is None):\n        self._has_object_error = next((True for o in self.objects() if o.has_error()), False)\n    return self._has_object_error", "docstring": "Returns true if any requested object had a business logic error,\notherwise returns false\n\nReturns:\nboolean", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n    \n    self.CheckConfig = channel.unary_unary(\n        '/pulumirpc.ResourceProvider/CheckConfig',\n        request_serializer=provider__pb2.CheckRequest.SerializeToString,\n        response_deserializer=provider__pb2.CheckResponse.FromString,\n        )\n    self.DiffConfig = channel.unary_unary(\n        '/pulumirpc.ResourceProvider/DiffConfig',\n        request_serializer=provider__pb2.DiffRequest.SerializeToString,\n        response_deserializer=provider__pb2.DiffResponse.FromString,\n        )\n    self.Configure = channel.unary_unary(\n        '/pulumirpc.ResourceProvider/Configure',\n        request_serializer=provider__pb2.ConfigureRequest.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n    self.Invoke = channel.unary_unary(\n        '/pulumirpc.ResourceProvider/Invoke',\n        request_serializer=provider__pb2.InvokeRequest.SerializeToString,\n        response_deserializer=provider__pb2.InvokeResponse.FromString,\n        )\n    self.Check = channel.unary_unary(\n        '/pulumirpc.ResourceProvider/Check',\n        request_serializer=provider__pb2.CheckRequest.SerializeToString,\n        response_deserializer=provider__pb2.CheckResponse.FromString,\n        )\n    self.Diff = channel.unary_unary(\n        '/pulumirpc.ResourceProvider/Diff',\n        request_serializer=provider__pb2.DiffRequest.SerializeToString,\n        response_deserializer=provider__pb2.DiffResponse.FromString,\n        )\n    self.Create = channel.unary_unary(\n        '/pulumirpc.ResourceProvider/Create',\n        request_serializer=provider__pb2.CreateRequest.SerializeToString,\n        response_deserializer=provider__pb2.CreateResponse.FromString,\n        )\n    self.Read = channel.unary_unary(\n        '/pulumirpc.ResourceProvider/Read',\n        request_serializer=provider__pb2.ReadRequest.SerializeToString,\n        response_deserializer=provider__pb2.ReadResponse.FromString,\n        )\n    self.Update = channel.unary_unary(\n        '/pulumirpc.ResourceProvider/Update',\n        request_serializer=provider__pb2.UpdateRequest.SerializeToString,\n        response_deserializer=provider__pb2.UpdateResponse.FromString,\n        )\n    self.Delete = channel.unary_unary(\n        '/pulumirpc.ResourceProvider/Delete',\n        request_serializer=provider__pb2.DeleteRequest.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n    self.Cancel = channel.unary_unary(\n        '/pulumirpc.ResourceProvider/Cancel',\n        request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n    self.GetPluginInfo = channel.unary_unary(\n        '/pulumirpc.ResourceProvider/GetPluginInfo',\n        request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n        response_deserializer=plugin__pb2.PluginInfo.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def __cmp__(self, other):\n        \n        prec_self = OPERATOR_MAP[self.value][1]\n        prec_other = OPERATOR_MAP[other.value][1]\n        if prec_self < prec_other:\n            return -1\n        if prec_self > prec_other:\n            return 1\n        return 0", "docstring": "Compare using operator precedence.\n\nArgs:\nother (Operator): The ``Operator`` we are comparing precedence\nagainst.\n\nReturns:\ninteger: ``1`` if greater than ``other``, ``-1`` if less than\n``other``, and ``0`` if of equal precedence of ``other``.", "source": "juraj-google-style"}
{"code": "def master(self, task_type=None, task_id=None, rpc_layer=None):\n    if self._tpu != 'local':\n        cluster_spec = self.cluster_spec()\n        if task_type is not None and task_id is not None:\n            master = cluster_spec.task_address(task_type, task_id)\n        elif self.task_type is not None and self.task_id is not None:\n            master = cluster_spec.task_address(self.task_type, self.task_id)\n        else:\n            job_tasks = cluster_spec.job_tasks(self.task_type)\n            if not job_tasks:\n                raise ValueError('No TPUs with the specified names exist.')\n            master = job_tasks[0]\n        return cluster_resolver_lib.format_master_url(master, 'grpc')\n    else:\n        return ''", "docstring": "Get the Master string to be used for the session.\n\nIn the normal case, this returns the grpc path (grpc://1.2.3.4:8470) of\nfirst instance in the ClusterSpec returned by the cluster_spec function.\n\nIf a non-TPU name is used when constructing a TPUClusterResolver, that will\nbe returned instead (e.g. If the tpus argument's value when constructing\nthis TPUClusterResolver was 'grpc://10.240.1.2:8470',\n'grpc://10.240.1.2:8470' will be returned).\n\nArgs:\ntask_type: (Optional, string) The type of the TensorFlow task of the\nmaster.\ntask_id: (Optional, integer) The index of the TensorFlow task of the\nmaster.\nrpc_layer: (Optional, string) The RPC protocol TensorFlow should use to\ncommunicate with TPUs.\n\nReturns:\nstring, the connection string to use when creating a session.\n\nRaises:\nValueError: If none of the TPUs specified exists.", "source": "github-repos"}
{"code": "def subscriber(address, topics, callback, message_type):\n    return Subscriber(address, topics, callback, message_type)", "docstring": "Creates a subscriber binding to the given address and\nsubscribe the given topics.\nThe callback is invoked for every message received.\n\nArgs:\n- address: the address to bind the PUB socket to.\n- topics: the topics to subscribe\n- callback: the callback to invoke for every message. Must accept 2 variables - topic and message\n- message_type: the type of message to receive", "source": "codesearchnet"}
{"code": "def confirm_cw_log(self, account, region, vpcname):\n        \n        try:\n            cw = self.session.client('logs', region)\n            token = None\n            log_groups = []\n            while True:\n                result = cw.describe_log_groups() if not token else cw.describe_log_groups(nextToken=token)\n                token = result.get('nextToken')\n                log_groups.extend([x['logGroupName'] for x in result.get('logGroups', [])])\n\n                if not token:\n                    break\n\n            if vpcname not in log_groups:\n                cw.create_log_group(logGroupName=vpcname)\n\n                cw_vpc = VPC.get(vpcname)\n                cw_vpc.set_property('vpc_flow_logs_log_group', vpcname)\n\n                self.log.info('Created log group {}/{}/{}'.format(account.account_name, region, vpcname))\n                auditlog(\n                    event='vpc_flow_logs.create_cw_log_group',\n                    actor=self.ns,\n                    data={\n                        'account': account.account_name,\n                        'region': region,\n                        'log_group_name': vpcname,\n                        'vpc': vpcname\n                    }\n                )\n            return True\n\n        except Exception:\n            self.log.exception('Failed creating log group for {}/{}/{}.'.format(\n                account,\n                region, vpcname\n            ))", "docstring": "Create a new CloudWatch log group based on the VPC Name if none exists. Returns `True` if succesful\n\nArgs:\naccount (:obj:`Account`): Account to create the log group in\nregion (`str`): Region to create the log group in\nvpcname (`str`): Name of the VPC the log group is fow\n\nReturns:\n`bool`", "source": "juraj-google-style"}
{"code": "def hash_stream(fileobj, hasher=None, blocksize=65536):\n    hasher = (hasher or hashlib.sha1())\n    buf = fileobj.read(blocksize)\n    while buf:\n        hasher.update(buf)\n        buf = fileobj.read(blocksize)\n    return hasher", "docstring": "Read from fileobj stream, return hash of its contents.\n\nArgs:\nfileobj: File-like object with read()\nhasher: Hash object such as hashlib.sha1(). Defaults to sha1.\nblocksize: Read from fileobj this many bytes at a time.", "source": "codesearchnet"}
{"code": "def range(self, dim, data_range=True, dimension_range=True):\n        \n        dim = self.get_dimension(dim)\n\n        if dim is None or (not data_range and not dimension_range):\n            return (None, None)\n        elif all(util.isfinite(v) for v in dim.range) and dimension_range:\n            return dim.range\n        elif dim in self.dimensions() and data_range and bool(self):\n            lower, upper = self.interface.range(self, dim)\n        else:\n            lower, upper = (np.NaN, np.NaN)\n        if not dimension_range:\n            return lower, upper\n        return util.dimension_range(lower, upper, dim.range, dim.soft_range)", "docstring": "Return the lower and upper bounds of values along dimension.\n\nArgs:\ndimension: The dimension to compute the range on.\ndata_range (bool): Compute range from data values\ndimension_range (bool): Include Dimension ranges\nWhether to include Dimension range and soft_range\nin range calculation\n\nReturns:\nTuple containing the lower and upper bound", "source": "juraj-google-style"}
{"code": "def generate_pseudo(strain_states, order=3):\n    s = sp.Symbol('s')\n    nstates = len(strain_states)\n    ni = (np.array(strain_states) * s)\n    (mis, absent_syms) = ([], [])\n    for degree in range(2, (order + 1)):\n        (cvec, carr) = get_symbol_list(degree)\n        sarr = np.zeros((nstates, 6), dtype=object)\n        for (n, strain_v) in enumerate(ni):\n            exps = carr.copy()\n            for i in range((degree - 1)):\n                exps = np.dot(exps, strain_v)\n            exps /= np.math.factorial((degree - 1))\n            sarr[n] = [sp.diff(exp, s, (degree - 1)) for exp in exps]\n        svec = sarr.ravel()\n        present_syms = set.union(*[exp.atoms(sp.Symbol) for exp in svec])\n        absent_syms += [(set(cvec) - present_syms)]\n        m = np.zeros(((6 * nstates), len(cvec)))\n        for (n, c) in enumerate(cvec):\n            m[(:, n)] = v_diff(svec, c)\n        mis.append(np.linalg.pinv(m))\n    return (mis, absent_syms)", "docstring": "Generates the pseudoinverse for a given set of strains.\n\nArgs:\nstrain_states (6xN array like): a list of voigt-notation\n\"strain-states\", i. e. perturbed indices of the strain\nas a function of the smallest strain e. g. (0, 1, 0, 0, 1, 0)\norder (int): order of pseudoinverse to calculate\n\nReturns:\nmis: pseudo inverses for each order tensor, these can\nbe multiplied by the central difference derivative\nof the stress with respect to the strain state\nabsent_syms: symbols of the tensor absent from the PI\nexpression", "source": "codesearchnet"}
{"code": "def start(self, channel):\n        \n\n        if self._started:\n            raise InternalError(\"The method start() was called twice on VirtualIOTileDevice.\")\n\n        self._push_channel = channel\n        self.start_workers()", "docstring": "Start running this virtual device including any necessary worker threads.\n\nArgs:\nchannel (IOTilePushChannel): the channel with a stream and trace\nroutine for streaming and tracing data through a VirtualInterface", "source": "juraj-google-style"}
{"code": "def CreateTaskStorage(self, task):\n    \n    if task.identifier in self._task_storage_writers:\n      raise IOError('Storage writer for task: {0:s} already exists.'.format(\n          task.identifier))\n\n    storage_writer = FakeStorageWriter(\n        self._session, storage_type=definitions.STORAGE_TYPE_TASK, task=task)\n    self._task_storage_writers[task.identifier] = storage_writer\n    return storage_writer", "docstring": "Creates a task storage.\n\nArgs:\ntask (Task): task.\n\nReturns:\nFakeStorageWriter: storage writer.\n\nRaises:\nIOError: if the task storage already exists.\nOSError: if the task storage already exists.", "source": "juraj-google-style"}
{"code": "def delete(self, messageId):\n    check_type(messageId, basestring, may_be_none=False)\n    self._session.delete(((API_ENDPOINT + '/') + messageId))", "docstring": "Delete a message.\n\nArgs:\nmessageId(basestring): The ID of the message to be deleted.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"}
{"code": "def get(self, secret_id):\n        \n        return self.prepare_model(self.client.api.inspect_secret(secret_id))", "docstring": "Get a secret.\n\nArgs:\nsecret_id (str): Secret ID.\n\nReturns:\n(:py:class:`Secret`): The secret.\n\nRaises:\n:py:class:`docker.errors.NotFound`\nIf the secret does not exist.\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def __init__(self, username=None, password=None):\n        \n        super(UsernamePasswordCredential, self).__init__(\n            tag=Tags.CREDENTIAL_VALUE\n        )\n\n        self._username = None\n        self._password = None\n\n        self.username = username\n        self.password = password", "docstring": "Construct a UsernamePasswordCredential struct.\n\nArgs:\nusername (string): The username identifying the credential.\nOptional, defaults to None. Required for encoding and decoding.\npassword (string): The password associated with the username.\nOptional, defaults to None.", "source": "juraj-google-style"}
{"code": "def convert_batchnorm(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting batchnorm ...')\n\n    if names == 'short':\n        tf_name = 'BN' + random_string(6)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    bias_name = '{0}.bias'.format(w_name)\n    weights_name = '{0}.weight'.format(w_name)\n    mean_name = '{0}.running_mean'.format(w_name)\n    var_name = '{0}.running_var'.format(w_name)\n\n    if bias_name in weights:\n        beta = weights[bias_name].numpy()\n\n    if weights_name in weights:\n        gamma = weights[weights_name].numpy()\n\n    mean = weights[mean_name].numpy()\n    variance = weights[var_name].numpy()\n\n    eps = params['epsilon']\n    momentum = params['momentum']\n\n    if weights_name not in weights:\n        bn = keras.layers.BatchNormalization(\n            axis=1, momentum=momentum, epsilon=eps,\n            center=False, scale=False,\n            weights=[mean, variance],\n            name=tf_name\n        )\n    else:\n        bn = keras.layers.BatchNormalization(\n            axis=1, momentum=momentum, epsilon=eps,\n            weights=[gamma, beta, mean, variance],\n            name=tf_name\n        )\n    layers[scope_name] = bn(layers[inputs[0]])", "docstring": "Convert batch normalization layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def __init__(self, service_endpoint_uri):\n        \n        self._service_endpoint_uri = service_endpoint_uri\n        self._queue = None\n        self._send_buffer_size = 100\n        self._timeout = 10", "docstring": "Initializes a new instance of the class.\n\nArgs:\nservice_endpoint_uri (str) the address of the service to send telemetry data to.", "source": "juraj-google-style"}
{"code": "def RemoveScanNode(self, path_spec):\n    \n    scan_node = self._scan_nodes.get(path_spec, None)\n    if not scan_node:\n      return None\n\n    if scan_node.sub_nodes:\n      raise RuntimeError('Scan node has sub nodes.')\n\n    parent_scan_node = scan_node.parent_node\n    if parent_scan_node:\n      parent_scan_node.sub_nodes.remove(scan_node)\n\n    if path_spec == self._root_path_spec:\n      self._root_path_spec = None\n    del self._scan_nodes[path_spec]\n\n    if path_spec.IsFileSystem():\n      del self._file_system_scan_nodes[path_spec]\n\n    return parent_scan_node", "docstring": "Removes a scan node of a certain path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nSourceScanNode: parent scan node or None if not available.\n\nRaises:\nRuntimeError: if the scan node has sub nodes.", "source": "juraj-google-style"}
{"code": "def get_head_mask(self, head_mask: tf.Tensor | None, num_hidden_layers: int) -> tf.Tensor:\n    if head_mask is not None:\n        head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)\n    else:\n        head_mask = [None] * num_hidden_layers\n    return head_mask", "docstring": "Prepare the head mask if needed.\n\nArgs:\nhead_mask (`tf.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*):\nThe mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).\nnum_hidden_layers (`int`):\nThe number of hidden layers in the model.\n\nReturns:\n`tf.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with\n`[None]` for each layer.", "source": "github-repos"}
{"code": "def get_decoder_self_attention_bias(length):\n  \n  with tf.name_scope(\"decoder_self_attention_bias\"):\n    valid_locs = tf.matrix_band_part(tf.ones([length, length]), -1, 0)\n    valid_locs = tf.reshape(valid_locs, [1, 1, length, length])\n    decoder_bias = _NEG_INF * (1.0 - valid_locs)\n  return decoder_bias", "docstring": "Calculate bias for decoder that maintains model's autoregressive property.\n\nCreates a tensor that masks out locations that correspond to illegal\nconnections, so prediction at position i cannot draw information from future\npositions.\n\nArgs:\nlength: int length of sequences in batch.\n\nReturns:\nfloat tensor of shape [1, 1, length, length]", "source": "juraj-google-style"}
{"code": "def join(*paths):\n    \n    \n    absolute = False\n    relpaths = []  \n    for p in paths:\n        if p:\n            if p[0] == \"/\":\n                del relpaths[:]\n                absolute = True\n            relpaths.append(p)\n\n    path = normpath(\"/\".join(relpaths))\n    if absolute:\n        path = abspath(path)\n    return path", "docstring": "Join any number of paths together.\n\nArguments:\n*paths (str): Paths to join, given as positional arguments.\n\nReturns:\nstr: The joined path.\n\nExample:\n>>> join('foo', 'bar', 'baz')\n'foo/bar/baz'\n>>> join('foo/bar', '../baz')\n'foo/baz'\n>>> join('foo/bar', '/baz')\n'/baz'", "source": "juraj-google-style"}
{"code": "def encrpyt_file(self, filename):\n        \n        if not os.path.exists(filename):\n            print \"Invalid filename %s. Does not exist\" % filename\n            return\n\n        if self.vault_password is None:\n            print \"ENV Variable PYANSI_VAULT_PASSWORD not set\"\n            return\n\n        if self.is_file_encrypted(filename):\n            \n            return\n\n        cipher = 'AES256'\n        vaulteditor = VaultEditor(cipher, self.vault_password, filename)\n        vaulteditor.encrypt_file()", "docstring": "Encrypt File\nArgs:\nfilename: Pass the filename to encrypt.\nReturns:\nNo return.", "source": "juraj-google-style"}
{"code": "def triangle(duration: int, amp: complex, period: float=None, phase: float=0, name: str=None) -> SamplePulse:\n    if (period is None):\n        period = duration\n    return _sampled_triangle_pulse(duration, amp, period, phase=phase, name=name)", "docstring": "Generates triangle wave `SamplePulse`.\n\nApplies `left` sampling strategy to generate discrete pulse from continuous function.\n\nArgs:\nduration: Duration of pulse. Must be greater than zero.\namp: Pulse amplitude. Wave range is [-amp, amp].\nperiod: Pulse period, units of dt. If `None` defaults to single cycle.\nphase: Pulse phase.\nname: Name of pulse.", "source": "codesearchnet"}
{"code": "def ParseRow(header, row):\n    precondition.AssertDictType(row, Text, Text)\n    result = rdf_osquery.OsqueryRow()\n    for column in header.columns:\n        result.values.append(row[column.name])\n    return result", "docstring": "Parses a single row of osquery output.\n\nArgs:\nheader: A parsed header describing the row format.\nrow: A row in a \"parsed JSON\" representation.\n\nReturns:\nA parsed `rdf_osquery.OsqueryRow` instance.", "source": "codesearchnet"}
{"code": "def called_with_tracing(self, function_name, omit_warning):\n    self._call_count += 1\n    self._calls_per_tracings.append(1)\n    while self._calls_per_tracings:\n        if self._call_count - self._calls_per_tracings[0] > FREQUENT_TRACING_WARNING_MAX_CALL_HISTORY:\n            self._call_count -= self._calls_per_tracings.pop(0)\n        else:\n            break\n    if omit_warning or self._total_warning_count >= FREQUENT_TRACING_WARNING_MAX_WARNING_PER_DETECTOR:\n        return\n    if len(self._calls_per_tracings) >= FREQUENT_TRACING_WARNING_THRESHOLD:\n        self._total_warning_count += 1\n        logging.warning('{} out of the last {} calls to {} triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https:", "docstring": "Updates the list of most recent calls' tracing information.\n\nWarns the user when recent calls caused retracing too often.\n\nArgs:\nfunction_name: the python function being traced.\nomit_warning: If 'True', this call will not warn the user even if\nretracing happens too often.", "source": "github-repos"}
{"code": "def HandleAccounts(self, result):\n    self.logger.debug('Checking for changes to user accounts.')\n    configured_users = self.utils.GetConfiguredUsers()\n    enable_oslogin = self._GetEnableOsLoginValue(result)\n    enable_two_factor = self._GetEnableTwoFactorValue(result)\n    if enable_oslogin:\n        desired_users = {}\n        self.oslogin.UpdateOsLogin(True, two_factor_desired=enable_two_factor)\n    else:\n        desired_users = self._GetAccountsData(result)\n        self.oslogin.UpdateOsLogin(False)\n    remove_users = sorted((set(configured_users) - set(desired_users.keys())))\n    self._UpdateUsers(desired_users)\n    self._RemoveUsers(remove_users)\n    self.utils.SetConfiguredUsers(desired_users.keys())", "docstring": "Called when there are changes to the contents of the metadata server.\n\nArgs:\nresult: json, the deserialized contents of the metadata server.", "source": "codesearchnet"}
{"code": "class GitVisionEncoder(nn.Module):\n\n    def __init__(self, config: GitVisionConfig):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([GitVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for idx, encoder_layer in enumerate(self.layers):\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions)\n            else:\n                layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`GitVisionEncoderLayer`].\n\nArgs:\nconfig: GitVisionConfig", "source": "github-repos"}
{"code": "def argparse_funckw(func, defaults={}, **kwargs):\n    import utool as ut\n    funckw_ = ut.get_funckw(func, recursive=True)\n    funckw_.update(defaults)\n    funckw = ut.argparse_dict(funckw_, **kwargs)\n    return funckw", "docstring": "allows kwargs to be specified on the commandline from testfuncs\n\nArgs:\nfunc (function):\n\nKwargs:\nlbl, verbose, only_specified, force_keys, type_hint, alias_dict\n\nReturns:\ndict: funckw\n\nCommandLine:\npython -m utool.util_inspect argparse_funckw\n\nSeeAlso:\nexec_funckw\nrecursive_parse_kwargs\nparse_kwarg_keys\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_inspect import *  # NOQA\n>>> import utool as ut\n>>> func = get_instance_attrnames\n>>> funckw = argparse_funckw(func)\n>>> result = ('funckw = %s' % (ut.repr3(funckw),))\n>>> print(result)\nfunckw = {\n'default': True,\n'with_methods': True,\n'with_properties': True,\n}", "source": "codesearchnet"}
{"code": "def _process_thread(self, client):\n    \n    file_list = self.files\n    if not file_list:\n      return\n    print('Filefinder to collect {0:d} items'.format(len(file_list)))\n\n    flow_action = flows_pb2.FileFinderAction(\n        action_type=flows_pb2.FileFinderAction.DOWNLOAD)\n    flow_args = flows_pb2.FileFinderArgs(\n        paths=file_list,\n        action=flow_action,)\n    flow_id = self._launch_flow(client, 'FileFinder', flow_args)\n    self._await_flow(client, flow_id)\n    collected_flow_data = self._download_files(client, flow_id)\n    if collected_flow_data:\n      print('{0!s}: Downloaded: {1:s}'.format(flow_id, collected_flow_data))\n      fqdn = client.data.os_info.fqdn.lower()\n      self.state.output.append((fqdn, collected_flow_data))", "docstring": "Process a single client.\n\nArgs:\nclient: GRR client object to act on.", "source": "juraj-google-style"}
{"code": "async def start_server_in_loop(runner, hostname, port, agent):\n    (await runner.setup())\n    agent.web.server = aioweb.TCPSite(runner, hostname, port)\n    (await agent.web.server.start())\n    logger.info(f'Serving on http:", "docstring": "Listens to http requests and sends them to the webapp.\n\nArgs:\nrunner: AppRunner to process the http requests\nhostname: host name to listen from.\nport: port to listen from.\nagent: agent that owns the web app.", "source": "codesearchnet"}
{"code": "def split_raster(rs, split_shp, field_name, temp_dir):\n    UtilClass.rmmkdir(temp_dir)\n    ds = ogr_Open(split_shp)\n    lyr = ds.GetLayer(0)\n    lyr.ResetReading()\n    ft = lyr.GetNextFeature()\n    while ft:\n        cur_field_name = ft.GetFieldAsString(field_name)\n        for r in rs:\n            cur_file_name = r.split(os.sep)[(- 1)]\n            outraster = ((temp_dir + os.sep) + cur_file_name.replace('.tif', ('_%s.tif' % cur_field_name.replace(' ', '_'))))\n            subprocess.call(['gdalwarp', r, outraster, '-cutline', split_shp, '-crop_to_cutline', '-cwhere', (\"'%s'='%s'\" % (field_name, cur_field_name)), '-dstnodata', '-9999'])\n        ft = lyr.GetNextFeature()\n    ds = None", "docstring": "Split raster by given shapefile and field name.\n\nArgs:\nrs: origin raster file.\nsplit_shp: boundary (ESRI Shapefile) used to spilt raster.\nfield_name: field name identify the spilt value.\ntemp_dir: directory to store the spilt rasters.", "source": "codesearchnet"}
{"code": "def select_one(self, selector):\n    result = list(self.select(selector))\n    if (len(result) > 1):\n        raise ValueError(('Found more than one model matching %s: %r' % (selector, result)))\n    if (len(result) == 0):\n        return None\n    return result[0]", "docstring": "Query this document for objects that match the given selector.\nRaises an error if more than one object is found.  Returns\nsingle matching object, or None if nothing is found\n\nArgs:\nselector (JSON-like query dictionary) : you can query by type or by\nname, e.g. ``{\"type\": HoverTool}``, ``{\"name\": \"mycircle\"}``\n\nReturns:\nModel or None", "source": "codesearchnet"}
{"code": "def getEvents(self):\n    events = []\n    for json in self.conn.endpoints['self'].getEvents():\n        events.append(SkypeEvent.fromRaw(self, json))\n    return events", "docstring": "Retrieve a list of events since the last poll.  Multiple calls may be needed to retrieve all events.\n\nIf no events occur, the API will block for up to 30 seconds, after which an empty list is returned.  As soon as\nan event is received in this time, it is returned immediately.\n\nReturns:\n:class:`.SkypeEvent` list: a list of events, possibly empty", "source": "codesearchnet"}
{"code": "def generate_ngram_data_set(self, token_list, n=2):\n        \n        n_gram_tuple_zip = self.generate_tuple_zip(token_list, n)\n        n_gram_tuple_list = [n_gram_tuple for n_gram_tuple in n_gram_tuple_zip]\n        n_gram_data_set = self.generate_tuple_zip(n_gram_tuple_list, 2)\n        return n_gram_data_set", "docstring": "Generate the N-gram's pair.\n\nArgs:\ntoken_list:     The list of tokens.\nn               N\n\nReturns:\nzip of Tuple(Training N-gram data, Target N-gram data)", "source": "juraj-google-style"}
{"code": "def _ParseMRUListExEntryValue(\n      self, parser_mediator, registry_key, entry_index, entry_number,\n      codepage='cp1252', **kwargs):\n    \n    value_string = ''\n\n    value = registry_key.GetValueByName('{0:d}'.format(entry_number))\n    if value is None:\n      parser_mediator.ProduceExtractionWarning(\n          'missing MRUListEx value: {0:d} in key: {1:s}.'.format(\n              entry_number, registry_key.path))\n\n    elif not value.DataIsBinaryData():\n      logger.debug((\n          '[{0:s}] Non-binary MRUListEx entry value: {1:d} in key: '\n          '{2:s}.').format(self.NAME, entry_number, registry_key.path))\n\n    elif value.data:\n      shell_items_parser = shell_items.ShellItemsParser(registry_key.path)\n      shell_items_parser.ParseByteStream(\n          parser_mediator, value.data, codepage=codepage)\n\n      value_string = 'Shell item path: {0:s}'.format(\n          shell_items_parser.CopyToPath())\n\n    return value_string", "docstring": "Parses the MRUListEx entry value.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains\nthe MRUListEx value.\nentry_index (int): MRUListEx entry index.\nentry_number (int): entry number.\ncodepage (Optional[str]): extended ASCII string codepage.\n\nReturns:\nstr: MRUList entry value.", "source": "juraj-google-style"}
{"code": "def cleave_sequence(input_layer, unroll=None):\n  \n  if unroll is None:\n    raise ValueError('You must set unroll either here or in the defaults.')\n\n  shape = input_layer.shape\n  if shape[0] is not None and shape[0] % unroll != 0:\n    raise ValueError('Must divide the split dimension evenly: %d mod %d != 0' %\n                     (shape[0], unroll))\n\n  if unroll <= 0:\n    raise ValueError('Unroll must be > 0: %s' % unroll)\n  elif unroll == 1:\n    splits = [input_layer.tensor]\n  else:\n    splits = tf.split(\n        value=input_layer.tensor, num_or_size_splits=unroll, axis=0)\n  result = input_layer.with_sequence(splits)\n\n  \n  \n  defaults = result.defaults\n  if 'unroll' in defaults:\n    del defaults['unroll']\n  return result", "docstring": "Cleaves a tensor into a sequence, this is the inverse of squash.\n\nRecurrent methods unroll across an array of Tensors with each one being a\ntimestep.  This cleaves the first dim so that each it is an array of Tensors.\nIt is the inverse of squash_sequence.\n\nArgs:\ninput_layer: The input layer.\nunroll: The number of time steps.\nReturns:\nA PrettyTensor containing an array of tensors.\nRaises:\nValueError: If unroll is not specified and it has no default or it is <= 0.", "source": "juraj-google-style"}
{"code": "def copy(self, src, dst, other_system=None):\n        \n        container, obj = self.split_locator(src)\n        with _handle_client_exception():\n            self.client.copy_object(\n                container=container, obj=obj, destination=self.relpath(dst))", "docstring": "Copy object of the same storage.\n\nArgs:\nsrc (str): Path or URL.\ndst (str): Path or URL.\nother_system (pycosio._core.io_system.SystemBase subclass): Unused.", "source": "juraj-google-style"}
{"code": "def parse_string_descriptor(string_desc):\n    if (not isinstance(string_desc, str)):\n        string_desc = str(string_desc)\n    if (not string_desc.endswith(';')):\n        string_desc += ';'\n    parsed = get_streamer_parser().parseString(string_desc)[0]\n    realtime = ('realtime' in parsed)\n    broadcast = ('broadcast' in parsed)\n    encrypted = (('security' in parsed) and (parsed['security'] == 'encrypted'))\n    signed = (('security' in parsed) and (parsed['security'] == 'signed'))\n    auto = ('manual' not in parsed)\n    with_other = None\n    if ('with_other' in parsed):\n        with_other = parsed['with_other']\n        auto = False\n    dest = SlotIdentifier.FromString('controller')\n    if ('explicit_tile' in parsed):\n        dest = parsed['explicit_tile']\n    selector = parsed['selector']\n    if (realtime and (encrypted or signed)):\n        raise SensorGraphSemanticError('Realtime streamers cannot be either signed or encrypted')\n    if (broadcast and (encrypted or signed)):\n        raise SensorGraphSemanticError('Broadcast streamers cannot be either signed or encrypted')\n    report_type = ('broadcast' if broadcast else 'telegram')\n    dest = dest\n    selector = selector\n    if (realtime or broadcast):\n        report_format = u'individual'\n    elif signed:\n        report_format = u'signedlist_userkey'\n    elif encrypted:\n        raise SensorGraphSemanticError('Encrypted streamers are not yet supported')\n    else:\n        report_format = u'hashedlist'\n    return DataStreamer(selector, dest, report_format, auto, report_type=report_type, with_other=with_other)", "docstring": "Parse a string descriptor of a streamer into a DataStreamer object.\n\nArgs:\nstring_desc (str): The string descriptor that we wish to parse.\n\nReturns:\nDataStreamer: A DataStreamer object representing the streamer.", "source": "codesearchnet"}
{"code": "def switch_to_frame(self, frame):\n    if isinstance(frame, Element):\n        self.driver.switch_to_frame(frame)\n        self._scopes.append('frame')\n    elif (frame == 'parent'):\n        if (self._scopes[(- 1)] != 'frame'):\n            raise ScopeError('`switch_to_frame(\"parent\")` cannot be called from inside a descendant frame\\'s `scope` context.')\n        self._scopes.pop()\n        self.driver.switch_to_frame('parent')\n    elif (frame == 'top'):\n        if ('frame' in self._scopes):\n            idx = self._scopes.index('frame')\n            if any([(scope not in ['frame', None]) for scope in self._scopes[idx:]]):\n                raise ScopeError('`switch_to_frame(\"top\")` cannot be called from inside a descendant frame\\'s `scope` context.')\n            self._scopes = self._scopes[:idx]\n            self.driver.switch_to_frame('top')\n    else:\n        raise ValueError('You must provide a frame element, \"parent\", or \"top\" when calling switch_to_frame')", "docstring": "Switch to the given frame.\n\nIf you use this method you are responsible for making sure you switch back to the parent\nframe when done in the frame changed to. :meth:`frame` is preferred over this method and\nshould be used when possible. May not be supported by all drivers.\n\nArgs:\nframe (Element | str): The iframe/frame element to switch to.", "source": "codesearchnet"}
{"code": "def sort(x, axis=-1):\n    if any_symbolic_tensors((x,)):\n        return Sort(axis=axis).symbolic_call(x)\n    return backend.numpy.sort(x, axis=axis)", "docstring": "Sorts the elements of `x` along a given axis in ascending order.\n\nArgs:\nx: Input tensor.\naxis: Axis along which to sort. If `None`, the tensor is flattened\nbefore sorting. Defaults to `-1`; the last axis.\n\nReturns:\nSorted tensor.", "source": "github-repos"}
{"code": "def Serialize(self, writer):\n        \n        super(ContractState, self).Serialize(writer)\n\n        self.Code.Serialize(writer)\n        writer.WriteUInt8(self.ContractProperties)\n        writer.WriteVarString(self.Name)\n        writer.WriteVarString(self.CodeVersion)\n        writer.WriteVarString(self.Author)\n        writer.WriteVarString(self.Email)\n        writer.WriteVarString(self.Description)", "docstring": "Serialize full object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True):\n    fname = os.path.join(data_dir, GOLD_STANDARD_BLOCKS_DIRNAME, (fileroot + GOLD_STANDARD_BLOCKS_EXT))\n    with io.open(fname, mode='r') as f:\n        data = f.read()\n    if split_blocks:\n        return filter(None, data[:(- 1)].split('\\n'))\n    return filter(None, data)", "docstring": "Read the gold standard blocks file corresponding to identifier ``fileroot``\nin the gold standard blocks directory below the root ``data_dir``.\n\nArgs:\ndata_dir (str)\nfileroot (str)\nsplit_blocks (bool): If True, split the file's content into blocks.\n\nReturns:\nstr or List[str]", "source": "codesearchnet"}
{"code": "def dot(poly1, poly2):\n    \n    if not isinstance(poly1, Poly) and not isinstance(poly2, Poly):\n        return numpy.dot(poly1, poly2)\n\n    poly1 = Poly(poly1)\n    poly2 = Poly(poly2)\n\n    poly = poly1*poly2\n    if numpy.prod(poly1.shape) <= 1 or numpy.prod(poly2.shape) <= 1:\n        return poly\n    return chaospy.poly.sum(poly, 0)", "docstring": "Dot product of polynomial vectors.\n\nArgs:\npoly1 (Poly) : left part of product.\npoly2 (Poly) : right part of product.\n\nReturns:\n(Poly) : product of poly1 and poly2.\n\nExamples:\n>>> poly = cp.prange(3, 1)\n>>> print(poly)\n[1, q0, q0^2]\n>>> print(cp.dot(poly, numpy.arange(3)))\n2q0^2+q0\n>>> print(cp.dot(poly, poly))\nq0^4+q0^2+1", "source": "juraj-google-style"}
{"code": "def dumps(ms, single=False, pretty_print=False, **kwargs):\n    if single:\n        ms = [ms]\n    return serialize(ms, pretty_print=pretty_print, **kwargs)", "docstring": "Serialize an Xmrs object to the Prolog representation\n\nArgs:\nms: an iterator of Xmrs objects to serialize (unless the\n*single* option is `True`)\nsingle: if `True`, treat *ms* as a single Xmrs object instead\nof as an iterator\npretty_print: if `True`, add newlines and indentation\nReturns:\nthe Prolog string representation of a corpus of Xmrs", "source": "codesearchnet"}
{"code": "def _extend_with_testcase(test_dict, testcase_def_dict):\n    \n    \n    testcase_def_dict[\"config\"].setdefault(\"variables\", {})\n    testcase_def_variables = utils.ensure_mapping_format(testcase_def_dict[\"config\"].get(\"variables\", {}))\n    testcase_def_variables.update(test_dict.pop(\"variables\", {}))\n    testcase_def_dict[\"config\"][\"variables\"] = testcase_def_variables\n\n    \n    \n    test_base_url = test_dict.pop(\"base_url\", \"\")\n    if not testcase_def_dict[\"config\"].get(\"base_url\"):\n        testcase_def_dict[\"config\"][\"base_url\"] = test_base_url\n\n    \n    test_name = test_dict.pop(\"name\", None) \\\n        or testcase_def_dict[\"config\"].pop(\"name\", None) \\\n        or \"testcase name undefined\"\n\n    \n    testcase_def_dict[\"config\"].update(test_dict)\n    testcase_def_dict[\"config\"][\"name\"] = test_name\n\n    test_dict.clear()\n    test_dict.update(testcase_def_dict)", "docstring": "extend test with testcase definition\ntest will merge and override testcase config definition.\n\nArgs:\ntest_dict (dict): test block\ntestcase_def_dict (dict): testcase definition\n\nReturns:\ndict: extended test dict.", "source": "juraj-google-style"}
{"code": "def FromMany(cls, samples):\n    \n    if not samples:\n      raise ValueError(\"Empty `samples` argument\")\n\n    return IOSample(\n        timestamp=max(sample.timestamp for sample in samples),\n        read_bytes=max(sample.read_bytes for sample in samples),\n        write_bytes=max(sample.write_bytes for sample in samples))", "docstring": "Constructs a single sample that best represents a list of samples.\n\nArgs:\nsamples: An iterable collection of `IOSample` instances.\n\nReturns:\nAn `IOSample` instance representing `samples`.\n\nRaises:\nValueError: If `samples` is empty.", "source": "juraj-google-style"}
{"code": "def manufacturer(self):\n    buf = ctypes.cast(self.sManu, ctypes.c_char_p).value\n    return (buf.decode() if buf else None)", "docstring": "Returns the name of the manufacturer of the device.\n\nArgs:\nself (JLinkDeviceInfo): the ``JLinkDeviceInfo`` instance\n\nReturns:\nManufacturer name.", "source": "codesearchnet"}
{"code": "def format(self, number, **kwargs):\n    if check_type(number, 'list'):\n        return map((lambda val: self.format(val, **kwargs)))\n    number = self.parse(number)\n    if check_type(kwargs, 'dict'):\n        options = self.settings['number'].update(kwargs)\n    precision = self._change_precision(options['precision'])\n    negative = (lambda num: ('-' if (num < 0) else ''))(number)\n    base = str(int(self.to_fixed((abs(number) or 0), precision)), 10)\n    mod = (lambda num: ((len(num) % 3) if (len(num) > 3) else 0))(base)\n    num = (negative + (lambda num: (base[0:num] if num else ''))(mod))\n    num += re.sub('/(\\\\d{3})(?=\\\\d)/g', ('$1' + options['thousand']), base[mod:])\n    num += (lambda val: ((options['decimal'] + self.to_fixed(abs(number), precision).split('.')[1]) if val else ''))(precision)\n    return num", "docstring": "Format a given number.\n\nFormat a number, with comma-separated thousands and\ncustom precision/decimal places\n\nLocalise by overriding the precision and thousand / decimal separators\n2nd parameter `precision` can be an object matching `settings.number`\n\nArgs:\nnumber (TYPE): Description\nprecision (TYPE): Description\nthousand (TYPE): Description\ndecimal (TYPE): Description\n\nReturns:\nname (TYPE): Description", "source": "codesearchnet"}
{"code": "def _setBitOn(x, bitNum):\n    _checkInt(x, minvalue=0, description='input value')\n    _checkInt(bitNum, minvalue=0, description='bitnumber')\n    return (x | (1 << bitNum))", "docstring": "Set bit 'bitNum' to True.\n\nArgs:\n* x (int): The value before.\n* bitNum (int): The bit number that should be set to True.\n\nReturns:\nThe value after setting the bit. This is an integer.\n\nFor example:\nFor x = 4 (dec) = 0100 (bin), setting bit number 0 results in 0101 (bin) = 5 (dec).", "source": "codesearchnet"}
{"code": "def rekey(self, uid=None, offset=None, **kwargs):\n    if (uid is not None):\n        if (not isinstance(uid, six.string_types)):\n            raise TypeError('The unique identifier must be a string.')\n    if (offset is not None):\n        if (not isinstance(offset, six.integer_types)):\n            raise TypeError('The offset must be an integer.')\n    attributes = []\n    if kwargs.get('activation_date'):\n        attributes.append(self.attribute_factory.create_attribute(enums.AttributeType.ACTIVATION_DATE, kwargs.get('activation_date')))\n    if kwargs.get('process_start_date'):\n        attributes.append(self.attribute_factory.create_attribute(enums.AttributeType.PROCESS_START_DATE, kwargs.get('process_start_date')))\n    if kwargs.get('protect_stop_date'):\n        attributes.append(self.attribute_factory.create_attribute(enums.AttributeType.PROTECT_STOP_DATE, kwargs.get('protect_stop_date')))\n    if kwargs.get('deactivation_date'):\n        attributes.append(self.attribute_factory.create_attribute(enums.AttributeType.DEACTIVATION_DATE, kwargs.get('deactivation_date')))\n    template_attribute = cobjects.TemplateAttribute(attributes=attributes)\n    result = self.proxy.rekey(uuid=uid, offset=offset, template_attribute=template_attribute)\n    status = result.get('result_status')\n    if (status == enums.ResultStatus.SUCCESS):\n        return result.get('unique_identifier')\n    else:\n        raise exceptions.KmipOperationFailure(status, result.get('result_reason'), result.get('result_message'))", "docstring": "Rekey an existing key.\n\nArgs:\nuid (string): The unique ID of the symmetric key to rekey.\nOptional, defaults to None.\noffset (int): The time delta, in seconds, between the new key's\ninitialization date and activation date. Optional, defaults\nto None.\n**kwargs (various): A placeholder for object attributes that\nshould be set on the newly rekeyed key. Currently\nsupported attributes include:\nactivation_date (int)\nprocess_start_date (int)\nprotect_stop_date (int)\ndeactivation_date (int)\n\nReturns:\nstring: The unique ID of the newly rekeyed key.\n\nRaises:\nClientConnectionNotOpen: if the client connection is unusable\nKmipOperationFailure: if the operation result is a failure\nTypeError: if the input arguments are invalid", "source": "codesearchnet"}
{"code": "def _gal2idx(self, gal):\n        \n\n        \n        l = coordinates.Longitude(gal.l, wrap_angle=180.*units.deg)\n\n        j = (self._inv_pix_scale * (l.deg - self._l_bounds[0])).astype('i4')\n        k = (self._inv_pix_scale * (gal.b.deg - self._b_bounds[0])).astype('i4')\n\n        idx = (j < 0) | (j >= self._shape[0]) | (k < 0) | (k >= self._shape[1])\n\n        if np.any(idx):\n            j[idx] = -1\n            k[idx] = -1\n\n        return j, k, ~idx", "docstring": "Converts from Galactic coordinates to pixel indices.\n\nArgs:\ngal (:obj:`astropy.coordinates.SkyCoord`): Galactic coordinates. Must\nstore an array of coordinates (i.e., not be scalar).\n\nReturns:\n``j, k, mask`` - Pixel indices of the coordinates, as well as a mask\nof in-bounds coordinates. Outputs have the same shape as the input\ncoordinates.", "source": "juraj-google-style"}
{"code": "def check_captcha(self, captcha_id, solution, author_name=None, author_url=None, author_mail=None, author_ip=None, author_id=None, author_open_id=None, honeypot=None):\n    check_catpcha_endpoint = Template('${rest_root}/captcha/${captcha_id}')\n    url = check_catpcha_endpoint.substitute(rest_root=self._rest_root, captcha_id=captcha_id)\n    data = {'solution': solution}\n    response = self.__post_request(url, data)\n    return (response['captcha']['solved'] == '1')", "docstring": "Checks a CAPTCHA that was solved by the end-user.\n\nKeyword arguments:\ncaptcha_id -- Unique identifier of the CAPTCHA solved.\nsolution -- Solution provided by the end-user for the CAPTCHA.\nauthor_name -- The name of the content author.\nauthor_url -- The homepage/website URL of the content author.\nauthor_mail -- The e-mail address of the content author.\nauthor_ip -- The IP address of the content author.\nauthor_id -- The local user ID on the client site of the content author.\nauthor_open_id -- List of Open IDs of the content author.\nhoneypot -- The value of a client-side honeypot form element, if non-empty.\n\nReturns:\nsolved -- Boolean whether or not the CAPTCHA was solved correctly.\nIf the CAPTCHA is associated with an unsure contents, it is recommended to recheck the content.", "source": "codesearchnet"}
{"code": "def set_voltage(self, volt, ramp=False):\n    if ramp:\n        self.mon.RampVoltage(self.mon.start_voltage, volt)\n    else:\n        self.mon.SetVoltage(volt)", "docstring": "Sets the output voltage of monsoon.\n\nArgs:\nvolt: Voltage to set the output to.\nramp: If true, the output voltage will be increased gradually to\nprevent tripping Monsoon overvoltage.", "source": "codesearchnet"}
{"code": "def flip_variable(self, v):\n    adj = self.adj\n    linear = self.linear\n    quadratic = self.quadratic\n    if (v not in adj):\n        return\n    if (self.vartype is Vartype.SPIN):\n        linear[v] *= (- 1.0)\n        for u in adj[v]:\n            adj[v][u] *= (- 1.0)\n            adj[u][v] *= (- 1.0)\n            if ((u, v) in quadratic):\n                quadratic[(u, v)] *= (- 1.0)\n            elif ((v, u) in quadratic):\n                quadratic[(v, u)] *= (- 1.0)\n            else:\n                raise RuntimeError('quadratic is missing an interaction')\n    elif (self.vartype is Vartype.BINARY):\n        self.offset += linear[v]\n        linear[v] *= (- 1)\n        for u in adj[v]:\n            bias = adj[v][u]\n            adj[v][u] *= (- 1.0)\n            adj[u][v] *= (- 1.0)\n            linear[u] += bias\n            if ((u, v) in quadratic):\n                quadratic[(u, v)] *= (- 1.0)\n            elif ((v, u) in quadratic):\n                quadratic[(v, u)] *= (- 1.0)\n            else:\n                raise RuntimeError('quadratic is missing an interaction')\n    else:\n        raise RuntimeError('Unexpected vartype')\n    try:\n        self._counterpart.flip_variable(v)\n    except AttributeError:\n        pass", "docstring": "Flip variable v in a binary quadratic model.\n\nArgs:\nv (variable):\nVariable in the binary quadratic model. If v is not in the binary\nquadratic model, it is ignored.\n\nExamples:\nThis example creates a binary quadratic model with two variables and inverts\nthe value of one.\n\n>>> import dimod\n...\n>>> bqm = dimod.BinaryQuadraticModel({1: 1, 2: 2}, {(1, 2): 0.5}, 0.5, dimod.SPIN)\n>>> bqm.flip_variable(1)\n>>> bqm.linear[1], bqm.linear[2], bqm.quadratic[(1, 2)]\n(-1.0, 2, -0.5)", "source": "codesearchnet"}
{"code": "def datasets_get(self, dataset_name):\n    url = (Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name))\n    return datalab.utils.Http.request(url, credentials=self._credentials)", "docstring": "Issues a request to retrieve information about a dataset.\n\nArgs:\ndataset_name: the name of the dataset\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "codesearchnet"}
{"code": "class XGBoostModelHandlerPandas(XGBoostModelHandler[pandas.DataFrame, PredictionResult, Union[xgboost.Booster, xgboost.XGBModel]]):\n\n    def run_inference(self, batch: Sequence[pandas.DataFrame], model: Union[xgboost.Booster, xgboost.XGBModel], inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n        \n        return self._inference_fn(batch, model, inference_args)\n\n    def get_num_bytes(self, batch: Sequence[pandas.DataFrame]) -> int:\n        \n        return sum((df.memory_usage(deep=True).sum() for df in batch))", "docstring": "Implementation of the ModelHandler interface for XGBoost\nusing pandas dataframes as input.\n\nExample Usage::\n\npcoll | RunInference(\nXGBoostModelHandlerPandas(\nmodel_class=\"XGBoost Model Class\",\nmodel_state=\"my_model_state.json\")))\n\nArgs:\nmodel_class: class of the XGBoost model that defines the model\nstructure.\nmodel_state: path to a json file that contains the model's\nconfiguration.\ninference_fn: the inference function to use during RunInference.\ndefault=default_xgboost_inference_fn", "source": "github-repos"}
{"code": "def _add_open_file(self, file_obj):\n        \n        if self._free_fd_heap:\n            open_fd = heapq.heappop(self._free_fd_heap)\n            self.open_files[open_fd] = [file_obj]\n            return open_fd\n\n        self.open_files.append([file_obj])\n        return len(self.open_files) - 1", "docstring": "Add file_obj to the list of open files on the filesystem.\nUsed internally to manage open files.\n\nThe position in the open_files array is the file descriptor number.\n\nArgs:\nfile_obj: File object to be added to open files list.\n\nReturns:\nFile descriptor number for the file object.", "source": "juraj-google-style"}
{"code": "def remove_node(self, node_id, force=False):\n        \n        url = self._url('/nodes/{0}', node_id)\n        params = {\n            'force': force\n        }\n        res = self._delete(url, params=params)\n        self._raise_for_status(res)\n        return True", "docstring": "Remove a node from the swarm.\n\nArgs:\nnode_id (string): ID of the node to be removed.\nforce (bool): Force remove an active node. Default: `False`\n\nRaises:\n:py:class:`docker.errors.NotFound`\nIf the node referenced doesn't exist in the swarm.\n\n:py:class:`docker.errors.APIError`\nIf the server returns an error.\nReturns:\n`True` if the request was successful.", "source": "juraj-google-style"}
{"code": "async def _async_start(self, auto_register=True):\n        \n\n        if auto_register:\n            await self._async_register()\n        self.client = aioxmpp.PresenceManagedClient(self.jid,\n                                                    aioxmpp.make_security_layer(self.password,\n                                                                                no_verify=not self.verify_security),\n                                                    loop=self.loop,\n                                                    logger=logging.getLogger(self.jid.localpart))\n\n        \n        self.message_dispatcher = self.client.summon(SimpleMessageDispatcher)\n\n        \n        self.presence = PresenceManager(self)\n\n        await self._async_connect()\n\n        \n        self.message_dispatcher.register_callback(\n            aioxmpp.MessageType.CHAT,\n            None,\n            self._message_received,\n        )\n        await self.setup()\n        self._alive.set()\n        for behaviour in self.behaviours:\n            if not behaviour.is_running:\n                behaviour.start()", "docstring": "Starts the agent from a coroutine. This fires some actions:\n\n* if auto_register: register the agent in the server\n* runs the event loop\n* connects the agent to the server\n* runs the registered behaviours\n\nArgs:\nauto_register (bool, optional): register the agent in the server (Default value = True)", "source": "juraj-google-style"}
{"code": "def process_git_configs(git_short=''):\n    \n    LOG.info('Processing application.json files from GitLab \"%s\".', git_short)\n    file_lookup = FileLookup(git_short=git_short)\n    app_configs = process_configs(file_lookup,\n                                  RUNWAY_BASE_PATH + '/application-master-{env}.json',\n                                  RUNWAY_BASE_PATH + '/pipeline.json')\n    commit_obj = file_lookup.project.commits.get('master')\n    config_commit = commit_obj.attributes['id']\n    LOG.info('Commit ID used: %s', config_commit)\n    app_configs['pipeline']['config_commit'] = config_commit\n    return app_configs", "docstring": "Retrieve _application.json_ files from GitLab.\n\nArgs:\ngit_short (str): Short Git representation of repository, e.g.\nforrest/core.\n\nReturns:\ncollections.defaultdict: Configurations stored for each environment\nfound.", "source": "juraj-google-style"}
{"code": "def delete(self, membershipId):\n        \n        check_type(membershipId, basestring)\n\n        \n        self._session.delete(API_ENDPOINT + '/' + membershipId)", "docstring": "Delete a membership, by ID.\n\nArgs:\nmembershipId(basestring): The membership ID.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "juraj-google-style"}
{"code": "def parse_args(args):\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--steps', dest='steps', type=_parse_steps, help='A JSON string that gives a list where each entry of the list is configuration information for a step. Configuration for each step consists of (1) A float \"per_bundle_delay_sec\" (in seconds). Defaults to 0.(2) A float \"per_element_delay_msec\" (in milli seconds).     Defaults to 0.(3) An integer \"output_records_per_input_record\". Defaults to 1.(4) A float \"output_filter_ratio\" in the range [0, 1] .     Defaults to 0.(5) A bool \"splittable\" that defaults to false.(6) An integer \"initial_splitting_num_bundles\". Defaults to 8.')\n    parser.add_argument('--input', dest='input', type=json.loads, help='A JSON string that describes the properties of the SyntheticSource used by the pipeline. Configuration is similar to Java SyntheticBoundedInput.Currently supports following properties. (1) An integer \"numRecords\". (2) An integer \"keySize\". (3) An integer \"valueSize\". (4) A tuple \"bundleSizeDistribution\" with following values.     A string \"type\". Allowed values are \"const\" and \"zipf\".     An float \"param\". Only used if \"type\"==\"zipf\". Must be     larger than 1. (5) An integer \"forceNumInitialBundles\". (6) An integer \"splitPointFrequencyRecords\". (7) A tuple \"delayDistribution\" with following values.     A string \"type\". Only allowed value is \"const\".     An integer \"const\". (8) A string \"algorithm\". Allowed values are \"builtin\" for Python     builtin random generator, and \"lcg\" for the linear congruential     generator equivalent to Java (java.util.Random).')\n    parser.add_argument('--barrier', dest='barrier', default='shuffle', choices=['shuffle', 'side-input', 'expand-gbk', 'expand-second-output', 'merge-gbk', 'merge-side-input'], help='Whether to use shuffle as the barrier (as opposed to side inputs).')\n    parser.add_argument('--output', dest='output', default='', help='Destination to write output.')\n    return parser.parse_known_args(args)", "docstring": "Parses a given set of arguments.\n\nArgs:\nargs: set of arguments to be passed.\n\nReturns:\na tuple where first item gives the set of arguments defined and parsed\nwithin this method and second item gives the set of unknown arguments.", "source": "github-repos"}
{"code": "def sort_objects_in_import(import_statement: str) -> str:\n\n    def _replace(match):\n        imports = match.groups()[0]\n        if ',' not in imports:\n            return f'[{imports}]'\n        keys = [part.strip().replace('\"', '') for part in imports.split(',')]\n        if len(keys[-1]) == 0:\n            keys = keys[:-1]\n        return '[' + ', '.join([f'\"{k}\"' for k in sort_objects(keys)]) + ']'\n    lines = import_statement.split('\\n')\n    if len(lines) > 3:\n        idx = 2 if lines[1].strip() == '[' else 1\n        keys_to_sort = [(i, _re_strip_line.search(line).groups()[0]) for i, line in enumerate(lines[idx:-idx])]\n        sorted_indices = sort_objects(keys_to_sort, key=lambda x: x[1])\n        sorted_lines = [lines[x[0] + idx] for x in sorted_indices]\n        return '\\n'.join(lines[:idx] + sorted_lines + lines[-idx:])\n    elif len(lines) == 3:\n        if _re_bracket_content.search(lines[1]) is not None:\n            lines[1] = _re_bracket_content.sub(_replace, lines[1])\n        else:\n            keys = [part.strip().replace('\"', '') for part in lines[1].split(',')]\n            if len(keys[-1]) == 0:\n                keys = keys[:-1]\n            lines[1] = get_indent(lines[1]) + ', '.join([f'\"{k}\"' for k in sort_objects(keys)])\n        return '\\n'.join(lines)\n    else:\n        import_statement = _re_bracket_content.sub(_replace, import_statement)\n        return import_statement", "docstring": "Sorts the imports in a single import statement.\n\nArgs:\nimport_statement (`str`): The import statement in which to sort the imports.\n\nReturns:\n`str`: The same as the input, but with objects properly sorted.", "source": "github-repos"}
{"code": "def cudnn_lstm(units, n_hidden, n_layers=1, trainable_initial_states=None, seq_lengths=None, initial_h=None, initial_c=None, name='cudnn_lstm', reuse=False):\n    with tf.variable_scope(name, reuse=reuse):\n        lstm = tf.contrib.cudnn_rnn.CudnnLSTM(num_layers=n_layers, num_units=n_hidden)\n        if trainable_initial_states:\n            init_h = tf.get_variable('init_h', [n_layers, 1, n_hidden])\n            init_h = tf.tile(init_h, (1, tf.shape(units)[0], 1))\n            init_c = tf.get_variable('init_c', [n_layers, 1, n_hidden])\n            init_c = tf.tile(init_c, (1, tf.shape(units)[0], 1))\n        else:\n            init_h = init_c = tf.zeros([n_layers, tf.shape(units)[0], n_hidden])\n        initial_h = (initial_h or init_h)\n        initial_c = (initial_c or init_c)\n        (h, (h_last, c_last)) = lstm(tf.transpose(units, (1, 0, 2)), (initial_h, initial_c))\n        h = tf.transpose(h, (1, 0, 2))\n        h_last = h_last[(- 1)]\n        c_last = c_last[(- 1)]\n        if (seq_lengths is not None):\n            indices = tf.stack([tf.range(tf.shape(h)[0]), (seq_lengths - 1)], axis=1)\n            h_last = tf.gather_nd(h, indices)\n        return (h, (h_last, c_last))", "docstring": "Fast CuDNN LSTM implementation\n\nArgs:\nunits: tf.Tensor with dimensions [B x T x F], where\nB - batch size\nT - number of tokens\nF - features\nn_hidden: dimensionality of hidden state\nn_layers: number of layers\ntrainable_initial_states: whether to create a special trainable variable\nto initialize the hidden states of the network or use just zeros\nseq_lengths: tensor of sequence lengths with dimension [B]\ninitial_h: optional initial hidden state, masks trainable_initial_states\nif provided\ninitial_c: optional initial cell state, masks trainable_initial_states\nif provided\nname: name of the variable scope to use\nreuse:whether to reuse already initialized variable\n\n\nReturns:\nh - all hidden states along T dimension,\ntf.Tensor with dimensionality [B x T x F]\nh_last - last hidden state, tf.Tensor with dimensionality [B x H]\nwhere H - number of hidden units\nc_last - last cell state, tf.Tensor with dimensionality [B x H]\nwhere H - number of hidden units", "source": "codesearchnet"}
{"code": "def transition_retry(self, pipeline_key, retry_message):\n\n    def txn():\n        pipeline_record = db.get(pipeline_key)\n        if (pipeline_record is None):\n            logging.warning('Tried to retry pipeline ID \"%s\" but it does not exist.', pipeline_key.name())\n            raise db.Rollback()\n        if (pipeline_record.status not in (_PipelineRecord.WAITING, _PipelineRecord.RUN)):\n            logging.warning('Tried to retry pipeline ID \"%s\", found bad state: %s', pipeline_key.name(), pipeline_record.status)\n            raise db.Rollback()\n        params = pipeline_record.params\n        offset_seconds = (params['backoff_seconds'] * (params['backoff_factor'] ** pipeline_record.current_attempt))\n        pipeline_record.next_retry_time = (self._gettime() + datetime.timedelta(seconds=offset_seconds))\n        pipeline_record.current_attempt += 1\n        pipeline_record.retry_message = retry_message\n        pipeline_record.status = _PipelineRecord.WAITING\n        if (pipeline_record.current_attempt >= pipeline_record.max_attempts):\n            root_pipeline_key = _PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record)\n            logging.warning('Giving up on pipeline ID \"%s\" after %d attempt(s); causing abort all the way to the root pipeline ID \"%s\"', pipeline_key.name(), pipeline_record.current_attempt, root_pipeline_key.name())\n            pipeline_record.abort_message = ('Aborting after %d attempts' % pipeline_record.current_attempt)\n            task = taskqueue.Task(url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key))\n            task.add(queue_name=self.queue_name, transactional=True)\n        else:\n            task = taskqueue.Task(url=self.pipeline_handler_path, eta=pipeline_record.next_retry_time, params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.START, attempt=pipeline_record.current_attempt), headers={'X-Ae-Pipeline-Key': pipeline_key}, target=pipeline_record.params['target'])\n            task.add(queue_name=self.queue_name, transactional=True)\n        pipeline_record.put()\n    db.run_in_transaction(txn)", "docstring": "Marks the given pipeline as requiring another retry.\n\nDoes nothing if all attempts have been exceeded.\n\nArgs:\npipeline_key: db.Key of the _PipelineRecord that needs to be retried.\nretry_message: User-supplied message indicating the reason for the retry.", "source": "codesearchnet"}
{"code": "def get_doctest_files(diff_with_last_commit: bool=False) -> List[str]:\n    repo = Repo(PATH_TO_REPO)\n    test_files_to_run = []\n    if not diff_with_last_commit:\n        print(f'main is at {repo.refs.main.commit}')\n        print(f'Current head is at {repo.head.commit}')\n        branching_commits = repo.merge_base(repo.refs.main, repo.head)\n        for commit in branching_commits:\n            print(f'Branching commit: {commit}')\n        test_files_to_run = get_diff_for_doctesting(repo, repo.head.commit, branching_commits)\n    else:\n        print(f'main is at {repo.head.commit}')\n        parent_commits = repo.head.commit.parents\n        for commit in parent_commits:\n            print(f'Parent commit: {commit}')\n        test_files_to_run = get_diff_for_doctesting(repo, repo.head.commit, parent_commits)\n    all_test_files_to_run = get_all_doctest_files()\n    new_test_files = get_new_doctest_files(repo, repo.head.commit, repo.refs.main.commit)\n    test_files_to_run = list(set(test_files_to_run + new_test_files))\n    with open('utils/slow_documentation_tests.txt') as fp:\n        slow_documentation_tests = set(fp.read().strip().split('\\n'))\n    test_files_to_run = [x for x in test_files_to_run if x in all_test_files_to_run and x not in slow_documentation_tests]\n    test_files_to_run = [f for f in test_files_to_run if (PATH_TO_REPO / f).exists()]\n    return sorted(test_files_to_run)", "docstring": "Return a list of python and Markdown files where doc example have been modified between:\n\n- the current head and the main branch if `diff_with_last_commit=False` (default)\n- the current head and its parent commit otherwise.\n\nReturns:\n`List[str]`: The list of Python and Markdown files with a diff (files added or renamed are always returned, files\nmodified are returned if the diff in the file is only in doctest examples).", "source": "github-repos"}
{"code": "def _use_datastore(self, key, options=None):\n    flag = ContextOptions.use_datastore(options)\n    if (flag is None):\n        flag = self._datastore_policy(key)\n    if (flag is None):\n        flag = ContextOptions.use_datastore(self._conn.config)\n    if (flag is None):\n        flag = True\n    return flag", "docstring": "Return whether to use the datastore for this key.\n\nArgs:\nkey: Key instance.\noptions: ContextOptions instance, or None.\n\nReturns:\nTrue if the datastore should be used, False otherwise.", "source": "codesearchnet"}
{"code": "def _GetSerializedAttributeContainerByIndex(self, container_type, index):\n    container_list = self._GetSerializedAttributeContainerList(container_type)\n    return container_list.GetAttributeContainerByIndex(index)", "docstring": "Retrieves a specific serialized attribute container.\n\nArgs:\ncontainer_type (str): attribute container type.\nindex (int): attribute container index.\n\nReturns:\nbytes: serialized attribute container data or None if not available.", "source": "codesearchnet"}
{"code": "def get_node_details(self, node_id: list) -> dict:\n    if (not self._manager):\n        raise RuntimeError('Only the Swarm manager node can retrieve node details.')\n    node = self._client.nodes.get(node_id)\n    return node.attrs", "docstring": "Get details of a node.\n\nOnly the manager nodes can retrieve details of a node\n\nArgs:\nnode_id (list): List of node ID\n\nReturns:\ndict, details of the node", "source": "codesearchnet"}
{"code": "def from_json_file(cls, filename):\n    with open(filename, 'r') as fp:\n        return cls(json.load(fp))", "docstring": "Load a lexicon from a JSON file.\n\nArgs:\nfilename (str): The path to a JSON dump.", "source": "codesearchnet"}
{"code": "def exceptions(error_is_fatal=True, error_messages=None):\n    \n\n    def exception_decorator(func):\n        nonlocal error_messages\n\n        @functools.wraps(func)\n        def exc_wrapper(*args, **kwargs):\n            nonlocal error_messages\n            try:\n                result = func(*args, **kwargs)\n            except sa.exc.SQLAlchemyError as err:\n                result = None\n                details = None\n                err_type = err.__class__\n                if error_messages and err_type in error_messages:\n                    details = error_messages[err_type]\n                if details:\n                    LOG.error(details)\n                LOG.error(\"For developers: (%s) %s\", err.__class__, str(err))\n                if error_is_fatal:\n                    sys.exit(\"Abort, SQL operation failed.\")\n                if not ui.ask(\n                        \"I can continue at your own risk, do you want that?\"):\n                    raise err\n            return result\n\n        return exc_wrapper\n\n    return exception_decorator", "docstring": "Handle SQLAlchemy exceptions in a sane way.\n\nArgs:\nfunc: An arbitrary function to wrap.\nerror_is_fatal: Should we exit the program on exception?\nreraise: Should we reraise the exception, after logging? Only makes sense\nif error_is_fatal is False.\nerror_messages: A dictionary that assigns an exception class to a\ncustomized error message.", "source": "juraj-google-style"}
{"code": "def run_coroutine(self, cor, *args, **kwargs):\n        \n\n        if self.stopping:\n            raise LoopStoppingError(\"Could not launch coroutine because loop is shutting down: %s\" % cor)\n\n        self.start()\n\n        cor = _instaniate_coroutine(cor, args, kwargs)\n\n        if self.inside_loop():\n            raise InternalError(\"BackgroundEventLoop.run_coroutine called from inside event loop, \"\n                                \"would have deadlocked.\")\n\n        future = self.launch_coroutine(cor)\n        return future.result()", "docstring": "Run a coroutine to completion and return its result.\n\nThis method may only be called outside of the event loop.\nAttempting to call it from inside the event loop would deadlock\nand will raise InternalError instead.\n\nArgs:\ncor (coroutine): The coroutine that we wish to run in the\nbackground and wait until it finishes.\n\nReturns:\nobject: Whatever the coroutine cor returns.", "source": "juraj-google-style"}
{"code": "def get_learning_rate(self, iter):\n        \n        return self.init_lr * ((1.0 - iter * 1.0 / self.max_iter) ** self.power)", "docstring": "Get learning rate with polymomial decay based on current iteration.\n\nArgs:\niter (int): current iteration (starting with 0).\n\nReturns:\nfloat: Learning rate", "source": "juraj-google-style"}
{"code": "def compress(a, b):\n    \n    from difflib import ndiff\n    left = a.splitlines(1) if isinstance(a, string_types) else a\n    right = b.splitlines(1) if isinstance(b, string_types) else b\n    ldiff = list(ndiff(left, right))\n    \n    result = {}\n    latest = None   \n    combo = None\n    icombo = 0\n    iorig = 0\n    \n    for i, line in enumerate(ldiff):\n        cs = [l[0] for l in ldiff[i:min((i+4, len(ldiff)))]]\n        if cs[0] != ' ':\n            \n            if latest is None:\n                latest = iorig\n                result[latest] = []\n                \n            \n            \n            \n            if combo is None:\n                if cs[0] == '-':\n                    \n                    if (len(cs) >=3 and cs[1] == '+' and cs[2] == '?'):\n                        combo = 3\n                    elif (len(cs) >= 4 and cs[1] == '?' and cs[2] == '+'\n                          and cs[3] == '?'):\n                        combo = 4\n                    else:\n                        \n                        combo = 1\n                elif cs[0] == '+':\n                    \n                    combo = 1\n                \n            if icombo < combo:\n                result[latest].append(line)\n                icombo += 1\n            \n            if icombo == combo:\n                if combo > 1:\n                    latest = None\n                combo = None\n                icombo = 0\n                if cs[0] != '+':\n                    iorig += 1\n        else:\n            latest = None\n            iorig += 1\n\n    return result", "docstring": "Performs the *compressed* diff of `a` and `b` such that the original\ncontents of the :func:`difflib.ndiff` call can be reconstructed using\n:func:`~acorn.logging.diff.restore`.\n\nArgs:\na (str or list): *original* string or list of strings to diff.\nb (str or list): *edited* string or list of strings to diff.", "source": "juraj-google-style"}
{"code": "def find_all(self, product_type, short_name, include_hidden=False):\n    all_prods = []\n    if (product_type is None):\n        for prod_dict in self._product_map.values():\n            all_prods.extend([prod for prod in prod_dict.get(short_name, []) if (include_hidden or (not prod.hidden))])\n        return all_prods\n    all_prods = self._product_map.get(product_type, {})\n    return [prod for prod in all_prods.get(short_name, []) if (include_hidden or (not prod.hidden))]", "docstring": "Find all providers of a given product by its short name.\n\nThis function will return all providers of a given product. If you\nwant to ensure that a product's name is unique among all dependencies,\nyou should use find_unique.\n\nArgs:\nproduct_type (str): The type of product that we are looking for, like\nfirmware_image, library etc.\nshort_name (str): The short name of the product that we wish to find,\nusually its os.path.basename()\ninclude_hidden (bool): Return products that are hidden and not selected\nas visible in the depends section of this tile's module settings.\nThis defaults to False.\n\nReturns:\nlist of ProductInfo: A list of all of the matching products.  If no matching\nproducts are found, an empty list is returned.  If you want to raise\na BuildError in that case use find_unique.", "source": "codesearchnet"}
{"code": "def which(self, cmd, parent_environ=None, fallback=False):\n        \n        env = self.get_environ(parent_environ=parent_environ)\n        path = which(cmd, env=env)\n        if fallback and path is None:\n            path = which(cmd)\n        return path", "docstring": "Find a program in the resolved environment.\n\nArgs:\ncmd: String name of the program to find.\nparent_environ: Environment to interpret the context within,\ndefaults to os.environ if None.\nfallback: If True, and the program is not found in the context,\nthe current environment will then be searched.\n\nReturns:\nPath to the program, or None if the program was not found.", "source": "juraj-google-style"}
{"code": "def _CheckStatusAnalysisProcess(self, pid):\n    \n    \n    \n    self._RaiseIfNotRegistered(pid)\n\n    if pid in self._completed_analysis_processes:\n      status_indicator = definitions.STATUS_INDICATOR_COMPLETED\n      process_status = {\n          'processing_status': status_indicator}\n      used_memory = 0\n\n    else:\n      process = self._processes_per_pid[pid]\n\n      process_status = self._QueryProcessStatus(process)\n      if process_status is None:\n        process_is_alive = False\n      else:\n        process_is_alive = True\n\n      process_information = self._process_information_per_pid[pid]\n      used_memory = process_information.GetUsedMemory() or 0\n\n      if self._worker_memory_limit and used_memory > self._worker_memory_limit:\n        logger.warning((\n            'Process: {0:s} (PID: {1:d}) killed because it exceeded the '\n            'memory limit: {2:d}.').format(\n                process.name, pid, self._worker_memory_limit))\n        self._KillProcess(pid)\n\n      if isinstance(process_status, dict):\n        self._rpc_errors_per_pid[pid] = 0\n        status_indicator = process_status.get('processing_status', None)\n\n        if status_indicator == definitions.STATUS_INDICATOR_COMPLETED:\n          self._completed_analysis_processes.add(pid)\n\n      else:\n        rpc_errors = self._rpc_errors_per_pid.get(pid, 0) + 1\n        self._rpc_errors_per_pid[pid] = rpc_errors\n\n        if rpc_errors > self._MAXIMUM_RPC_ERRORS:\n          process_is_alive = False\n\n        if process_is_alive:\n          rpc_port = process.rpc_port.value\n          logger.warning((\n              'Unable to retrieve process: {0:s} (PID: {1:d}) status via '\n              'RPC socket: http:\n                  process.name, pid, rpc_port))\n\n          processing_status_string = 'RPC error'\n          status_indicator = definitions.STATUS_INDICATOR_RUNNING\n        else:\n          processing_status_string = 'killed'\n          status_indicator = definitions.STATUS_INDICATOR_KILLED\n\n        process_status = {\n            'processing_status': processing_status_string}\n\n    self._UpdateProcessingStatus(pid, process_status, used_memory)\n\n    if status_indicator in definitions.ERROR_STATUS_INDICATORS:\n      logger.error((\n          'Process {0:s} (PID: {1:d}) is not functioning correctly. '\n          'Status code: {2!s}.').format(\n              process.name, pid, status_indicator))\n\n      self._TerminateProcessByPid(pid)", "docstring": "Checks the status of an analysis process.\n\nArgs:\npid (int): process ID (PID) of a registered analysis process.\n\nRaises:\nKeyError: if the process is not registered with the engine.", "source": "juraj-google-style"}
{"code": "def easeOutElastic(n, amplitude=1, period=0.3):\n    _checkRange(n)\n    if (amplitude < 1):\n        amplitude = 1\n        s = (period / 4)\n    else:\n        s = ((period / (2 * math.pi)) * math.asin((1 / amplitude)))\n    return (((amplitude * (2 ** ((- 10) * n))) * math.sin(((n - s) * ((2 * math.pi) / period)))) + 1)", "docstring": "An elastic tween function that overshoots the destination and then \"rubber bands\" into the destination.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "codesearchnet"}
{"code": "def gen_ordered_statistics(transaction_manager, record):\n    \n    items = record.items\n    for combination_set in combinations(sorted(items), len(items) - 1):\n        items_base = frozenset(combination_set)\n        items_add = frozenset(items.difference(items_base))\n        confidence = (\n            record.support / transaction_manager.calc_support(items_base))\n        lift = confidence / transaction_manager.calc_support(items_add)\n        yield OrderedStatistic(\n            frozenset(items_base), frozenset(items_add), confidence, lift)", "docstring": "Returns a generator of ordered statistics as OrderedStatistic instances.\n\nArguments:\ntransaction_manager -- Transactions as a TransactionManager instance.\nrecord -- A support record as a SupportRecord instance.", "source": "juraj-google-style"}
{"code": "def content_type(self):\n    return (self.headers.get('ContentType') or self.headers.get('Content-Type') or _content_types.JSON)", "docstring": "The request's content-type.\n\nReturns:\n(str): The value, if any, of the header 'ContentType' (used by some AWS services) and 'Content-Type'.\nOtherwise, returns 'application/json' as default.", "source": "codesearchnet"}
{"code": "def from_string(cls, jss, xml_string):\n    root = ElementTree.fromstring(xml_string.encode('utf-8'))\n    return cls(jss, root)", "docstring": "Creates a new JSSObject from an UTF-8 XML string.\n\nArgs:\njss: A JSS object.\nxml_string: String XML file data used to create object.", "source": "codesearchnet"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor):\n    image_tokens = self.get_image_tokens(pixel_values)\n    vision_embeddings = self.get_input_embeddings()(image_tokens)\n    return vision_embeddings", "docstring": "Tokenizes images into discrete tokens with VQGAN module and embeds\nthem with text embeddings layer\n\nArgs:\npixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):\nThe tensors corresponding to the input images.", "source": "github-repos"}
{"code": "def survival_function(self, value, name='survival_function'):\n    return self._call_survival_function(value, name)", "docstring": "Survival function.\n\nGiven random variable `X`, the survival function is defined:\n\n```none\nsurvival_function(x) = P[X > x]\n= 1 - P[X <= x]\n= 1 - cdf(x).\n```\n\nArgs:\nvalue: `float` or `double` `Tensor`.\nname: Python `str` prepended to names of ops created by this function.\n\nReturns:\n`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type\n`self.dtype`.", "source": "github-repos"}
{"code": "def get_ssm_parameter(parameter_name):\n    try:\n        response = boto3.client('ssm').get_parameters(Names=[parameter_name], WithDecryption=True)\n        return response.get('Parameters', None)[0].get('Value', '')\n    except Exception:\n        pass\n    return ''", "docstring": "Get the decrypted value of an SSM parameter\n\nArgs:\nparameter_name - the name of the stored parameter of interest\n\nReturn:\nValue if allowed and present else None", "source": "codesearchnet"}
{"code": "def build_input_fns(data_dir, batch_size):\n  \n\n  with open(download(data_dir, \"vocab.pkl\"), \"r\") as f:\n    words_to_idx = pickle.load(f)\n  num_words = len(words_to_idx)\n\n  vocabulary = [None] * num_words\n  for word, idx in words_to_idx.items():\n    vocabulary[idx] = word\n\n  \n  def train_input_fn():\n    dataset = newsgroups_dataset(\n        data_dir, \"train\", num_words, shuffle_and_repeat=True)\n    \n    dataset = dataset.batch(batch_size).prefetch(32)\n    return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()\n\n  \n  def eval_input_fn():\n    dataset = newsgroups_dataset(\n        data_dir, \"test\", num_words, shuffle_and_repeat=False)\n    dataset = dataset.batch(batch_size)\n    return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()\n\n  return train_input_fn, eval_input_fn, vocabulary", "docstring": "Builds iterators for train and evaluation data.\n\nEach object is represented as a bag-of-words vector.\n\nArguments:\ndata_dir: Folder in which to store the data.\nbatch_size: Batch size for both train and evaluation.\n\nReturns:\ntrain_input_fn: A function that returns an iterator over the training data.\neval_input_fn: A function that returns an iterator over the evaluation data.\nvocabulary: A mapping of word's integer index to the corresponding string.", "source": "juraj-google-style"}
{"code": "def letter_score(letter):\n    \n\n    score_map = {\n        1: [\"a\", \"e\", \"i\", \"o\", \"u\", \"l\", \"n\", \"r\", \"s\", \"t\"],\n        2: [\"d\", \"g\"],\n        3: [\"b\", \"c\", \"m\", \"p\"],\n        4: [\"f\", \"h\", \"v\", \"w\", \"y\"],\n        5: [\"k\"],\n        8: [\"j\", \"x\"],\n        10: [\"q\", \"z\"],\n    }\n\n    for score, letters in score_map.items():\n        if letter.lower() in letters:\n            return score\n    else:\n        raise TypeError(\"Invalid letter: %s\", letter)", "docstring": "Returns the Scrabble score of a letter.\n\nArgs:\nletter: a single character string\n\nRaises:\nTypeError if a non-Scrabble character is supplied", "source": "juraj-google-style"}
{"code": "def rename_state_fluent(name: str) -> str:\n    \n    i = name.index('/')\n    functor = name[:i]\n    arity = name[i+1:]\n    return \"{}'/{}\".format(functor, arity)", "docstring": "Returns current state fluent canonical name.\n\nArgs:\nname (str): The next state fluent name.\n\nReturns:\nstr: The current state fluent name.", "source": "juraj-google-style"}
{"code": "def blit(self, src_rect, dst_surf, dst_rect):\n    check_int_err(lib.SDL_UpperBlit(self._ptr, src_rect._ptr, dst_surf._ptr, dst_rect._ptr))", "docstring": "Performs a fast blit from the source surface to the destination surface.\nThis assumes that the source and destination rectangles are\nthe same size.  If either src_rect or dst_rect are None, the entire\nsurface is copied.  The final blit rectangles are saved\nin src_rect and dst_rect after all clipping is performed.\n\nArgs:\nsrc_rect (Rect): Source rect.\ndst_surf (Surface): Destination surface.\ndst_rect (Rect): Destination rect.\n\nRaises:\nSDLError: If the blit fails.", "source": "codesearchnet"}
{"code": "def __init__(self, path, encoding=\"utf-8\", chunk_size=io.DEFAULT_BUFFER_SIZE):\n        \n        if encoding.lower() not in supported_encodings:\n            error_message = \"{0} encoding was not supported/tested.\".format(encoding)\n            error_message += \"Supported encodings are '{0}'\".format(\",\".join(supported_encodings))\n            raise NotImplementedError(error_message)\n\n        self.path = path\n        self.encoding = encoding.lower()\n        self.chunk_size = chunk_size\n        self.iterator = FileReadBackwardsIterator(io.open(self.path, mode=\"rb\"), self.encoding, self.chunk_size)", "docstring": "Constructor for FileReadBackwards.\n\nArgs:\npath: Path to the file to be read\nencoding (str): Encoding\nchunk_size (int): How many bytes to read at a time", "source": "juraj-google-style"}
{"code": "def events_filter(\n            self,\n            topics: List[str] = None,\n            from_block: BlockSpecification = None,\n            to_block: BlockSpecification = None,\n    ) -> StatelessFilter:\n        \n        return self.client.new_filter(\n            self.address,\n            topics=topics,\n            from_block=from_block,\n            to_block=to_block,\n        )", "docstring": "Install a new filter for an array of topics emitted by the contract.\n\nArgs:\ntopics: A list of event ids to filter for. Can also be None,\nin which case all events are queried.\nfrom_block: The block number at which to start looking for events.\nto_block: The block number at which to stop looking for events.\nReturn:\nFilter: The filter instance.", "source": "juraj-google-style"}
{"code": "def read_model_with_mutable_tensors(input_tflite_file):\n    return copy.deepcopy(read_model(input_tflite_file))", "docstring": "Reads a tflite model as a python object with mutable tensors.\n\nSimilar to read_model() with the addition that the returned object has\nmutable tensors (read_model() returns an object with immutable tensors).\n\nNOTE: This API only works for TFLite generated with\n_experimental_use_buffer_offset=false\n\nArgs:\ninput_tflite_file: Full path name to the input tflite file\n\nRaises:\nRuntimeError: If input_tflite_file path is invalid.\nIOError: If input_tflite_file cannot be opened.\n\nReturns:\nA mutable python object corresponding to the input tflite file.", "source": "github-repos"}
{"code": "def encode_row(fields):\n    \n    \n    unicode_fields = [unicode(f) for f in fields]\n    escaped_fields = map(escape, unicode_fields)\n    return _field_delimiter.join(escaped_fields)", "docstring": "Encode a list of column values into a [incr tsdb()] profile line.\n\nEncoding involves escaping special characters for each value, then\njoining the values into a single string with the field delimiter\n(`\"@\"` by default). It does not fill in default values (see\nmake_row()).\n\nArgs:\nfields: a list of column values\nReturns:\nA [incr tsdb()]-encoded string", "source": "juraj-google-style"}
{"code": "def is_old(self):\n    if (not self.processing_started_ts):\n        return True\n    if self.processing_ended_ts:\n        return ((self.processing_ended_ts + DB_CACHE_TIME) < time.time())\n    expected_end_ts = (self.creation_ts + DB_MAX_WAIT_TIME)\n    if (expected_end_ts < time.time()):\n        logger.error('Prosessing timeouted and properites were not set!')\n    return (expected_end_ts < time.time())", "docstring": "Is the object cached for too long, so it should be redownloaded?\n\nSee :attr:`.DB_MAX_WAIT_TIME` and :attr:`.DB_CACHE_TIME` for details.\n\nReturns:\nbool: True if it is.", "source": "codesearchnet"}
{"code": "def GetUserByEmail(self, email):\n    user = self.rpc_helper.GetAccountInfoByEmail(email)\n    return GitkitUser.FromApiResponse(user)", "docstring": "Gets user info by email.\n\nArgs:\nemail: string, the user email.\n\nReturns:\nGitkitUser, containing the user info.", "source": "codesearchnet"}
{"code": "def get_doc_id(document_pb, expected_prefix):\n    (prefix, document_id) = document_pb.name.rsplit(DOCUMENT_PATH_DELIMITER, 1)\n    if (prefix != expected_prefix):\n        raise ValueError('Unexpected document name', document_pb.name, 'Expected to begin with', expected_prefix)\n    return document_id", "docstring": "Parse a document ID from a document protobuf.\n\nArgs:\ndocument_pb (google.cloud.proto.firestore.v1beta1.\\\ndocument_pb2.Document): A protobuf for a document that\nwas created in a ``CreateDocument`` RPC.\nexpected_prefix (str): The expected collection prefix for the\nfully-qualified document name.\n\nReturns:\nstr: The document ID from the protobuf.\n\nRaises:\nValueError: If the name does not begin with the prefix.", "source": "codesearchnet"}
{"code": "def get_connection(db_type, db_pth, user=None, password=None, name=None):\n    if (db_type == 'sqlite'):\n        print(db_pth)\n        conn = sqlite3.connect(db_pth)\n    elif (db_type == 'mysql'):\n        import mysql.connector\n        conn = mysql.connector.connect(user=user, password=password, database=name)\n    elif (db_type == 'django_mysql'):\n        from django.db import connection as conn\n    else:\n        print('unsupported database type: {}, choices are \"sqlite\", \"mysql\" or \"django_mysql\"'.format(db_type))\n    return conn", "docstring": "Get a connection to a SQL database. Can be used for SQLite, MySQL or Django MySQL database\n\nExample:\n>>> from msp2db.db import get_connection\n>>> conn = get_connection('sqlite', 'library.db')\n\nIf using \"mysql\" mysql.connector needs to be installed.\n\nIf using \"django_mysql\" Django needs to be installed.\n\nArgs:\ndb_type (str): Type of database can either be \"sqlite\", \"mysql\" or \"django_mysql\"\n\n\nReturns:\nsql connection object", "source": "codesearchnet"}
{"code": "def _ParseRelationshipsXMLFile(self, xml_data):\n    \n    xml_root = ElementTree.fromstring(xml_data)\n\n    property_files = []\n    for xml_element in xml_root.iter():\n      type_attribute = xml_element.get('Type')\n      if 'properties' in repr(type_attribute):\n        target_attribute = xml_element.get('Target')\n        property_files.append(target_attribute)\n\n    return property_files", "docstring": "Parses the relationships XML file (_rels/.rels).\n\nArgs:\nxml_data (bytes): data of a _rels/.rels XML file.\n\nReturns:\nlist[str]: property file paths. The path is relative to the root of\nthe ZIP file.\n\nRaises:\nzipfile.BadZipfile: if the relationship XML file cannot be read.", "source": "juraj-google-style"}
{"code": "def __init__(self, mutation_list):\n        \n        self.mutation_list = [(i[0], int(i[1]), self._standard_resname(i[2])) for i in mutation_list]\n        self.chains_and_residues = [(i[0], int(i[1])) for i in mutation_list]", "docstring": "Initialize the parameters which indicate what mutations will occur\n\nArgs:\nchain:\nresidue_number:\nmutate_to:", "source": "juraj-google-style"}
{"code": "def get_schedule_distribution(schedule, global_step=None):\n    (interpolation, steps, pmfs) = schedule\n    if (len(pmfs) == 1):\n        return pmfs[0]\n    if (global_step is None):\n        global_step = tf.train.get_or_create_global_step()\n    if (interpolation == 'step'):\n        interpolation_fn = step_interpolation\n    elif (interpolation == 'linear'):\n        interpolation_fn = linear_interpolation\n    else:\n        raise ValueError(('Invalid interpolation strategy: %s' % interpolation))\n    return tf.reshape(tf.py_func(func=(lambda x: interpolation_fn(x, np.array(steps), np.array(pmfs))), inp=[global_step], Tout=tf.float32), [len(pmfs[0])])", "docstring": "Computes the pmf of a schedule given the global_step.\n\nArgs:\nschedule: A schedule tuple, see encode_schedule for details.\nglobal_step: A scalar tensor, the step to query the schedule.\n\nReturns:\nA 1-D tensor of probs, the sampling distribution of the global_step.", "source": "codesearchnet"}
{"code": "def _auditpol_cmd(cmd):\n    \n    ret = salt.modules.cmdmod.run_all(cmd='auditpol {0}'.format(cmd),\n                                      python_shell=True)\n    if ret['retcode'] == 0:\n        return ret['stdout'].splitlines()\n\n    msg = 'Error executing auditpol command: {0}\\n'.format(cmd)\n    msg += '\\n'.join(ret['stdout'])\n    raise CommandExecutionError(msg)", "docstring": "Helper function for running the auditpol command\n\nArgs:\ncmd (str): the auditpol command to run\n\nReturns:\nlist: A list containing each line of the return (splitlines)\n\nRaises:\nCommandExecutionError: If the command encounters an error", "source": "juraj-google-style"}
{"code": "def authorizer(self, schemes, resource, action, request_args):\n    if (not schemes):\n        return (u'', u'')\n    for scheme in schemes:\n        if ((scheme in self.schemes) and self.has_auth_params(scheme)):\n            cred = Context.format_auth_params(self.schemes[scheme][u'params'])\n            if hasattr(self, 'mfa_token'):\n                cred = '{}, mfa_token=\"{}\"'.format(cred, self.mfa_token)\n            return (scheme, cred)\n    raise AuthenticationError(self, schemes)", "docstring": "Construct the Authorization header for a request.\n\nArgs:\nschemes (list of str): Authentication schemes supported for the\nrequested action.\nresource (str): Object upon which an action is being performed.\naction (str): Action being performed.\nrequest_args (list of str): Arguments passed to the action call.\n\nReturns:\n(str, str) A tuple of the auth scheme satisfied, and the credential\nfor the Authorization header or empty strings if none could be\nsatisfied.", "source": "codesearchnet"}
{"code": "def __init__(self, file_name=None, path=None, date=None):\n        \n        self._utils = TcExUtils()\n        self._occurrence_data = {}\n        if file_name is not None:\n            self._occurrence_data['fileName'] = file_name\n        if path is not None:\n            self._occurrence_data['path'] = path\n        if date is not None:\n            self._occurrence_data['date'] = self._utils.format_datetime(\n                date, date_format='%Y-%m-%dT%H:%M:%SZ'\n            )", "docstring": "Initialize Class Properties\n\nArgs:\nfile_name (str, optional): The file name for this occurrence.\npath (str, optional): The file path for this occurrence.\ndate (str, optional): The datetime expression for this occurrence.", "source": "juraj-google-style"}
{"code": "def locked_put(self, credentials):\n        \n        self._create_file_if_needed()\n        _helpers.validate_file(self._filename)\n        f = open(self._filename, 'w')\n        f.write(credentials.to_json())\n        f.close()", "docstring": "Write Credentials to file.\n\nArgs:\ncredentials: Credentials, the credentials to store.\n\nRaises:\nIOError if the file is a symbolic link.", "source": "juraj-google-style"}
{"code": "async def claim_work(context):\n    log.debug('Calling claimWork...')\n    payload = {'workerGroup': context.config['worker_group'], 'workerId': context.config['worker_id'], 'tasks': 1}\n    try:\n        return (await context.queue.claimWork(context.config['provisioner_id'], context.config['worker_type'], payload))\n    except (taskcluster.exceptions.TaskclusterFailure, aiohttp.ClientError) as exc:\n        log.warning('{} {}'.format(exc.__class__, exc))", "docstring": "Find and claim the next pending task in the queue, if any.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\n\nReturns:\ndict: a dict containing a list of the task definitions of the tasks claimed.", "source": "codesearchnet"}
{"code": "def write_to_path(self, path, suffix='', format='png', overwrite=False):\n    if (os.path.exists(path) and (overwrite is False)):\n        raise ValueError('Error: use ovewrite=True to overwrite images')\n    if (not os.path.exists(path)):\n        os.makedirs(path)\n    for (i, r) in self.iterrows():\n        spath = os.path.join(path, r['project_name'], r['sample_name'])\n        if (not os.path.exists(spath)):\n            os.makedirs(spath)\n        if (suffix == ''):\n            fname = os.path.join(spath, ((r['frame_name'] + '.') + format))\n        else:\n            fname = os.path.join(spath, ((((r['frame_name'] + '_') + suffix) + '.') + format))\n        imageio.imwrite(fname, r['image'], format=format)", "docstring": "Output the data the dataframe's 'image' column to a directory structured by project->sample and named by frame\n\nArgs:\npath (str): Where to write the directory of images\nsuffix (str): for labeling the imaages you write\nformat (str): default 'png' format to write the file\noverwrite (bool): default False. if true can overwrite files in the path\n\nModifies:\nCreates path folder if necessary and writes images to path", "source": "codesearchnet"}
{"code": "def clear_collection(self, name) -> None:\n    self._check_not_finalized()\n    with self._lock:\n        if name in self._collections:\n            del self._collections[name]", "docstring": "Clears all values in a collection.\n\nArgs:\nname: The key for the collection. The `GraphKeys` class contains many\nstandard names for collections.", "source": "github-repos"}
{"code": "def delete_subscription(self, subscription_id):\n        \n        return self.client._delete(self.url + 'subscriptions/{}'.format(subscription_id), headers=self.get_headers())", "docstring": "Unsubscribe, delete the relationship of the customer with the plan.\n\nArgs:\nsubscription_id: Identification of the subscription.\n\nReturns:", "source": "juraj-google-style"}
{"code": "def from_function(cls, f, *args, **kwargs):\n        \n\n        return cls.from_code(six.get_function_code(f), *args, **kwargs)", "docstring": "Create a new instance from a function. Gets the code object from\nthe function and passes it and any other specified parameters to\n:meth:`from_code`.\n\nArguments:\nf(function): The function to get the code object from.\n\nReturns:\nCodeObject: A new :class:`CodeObject` instance.", "source": "juraj-google-style"}
{"code": "def from_seed(cls, seed, alg=None):\n    if alg is None:\n        alg = DEFAULT_ALGORITHM\n    alg = random_ops_util.convert_alg_to_int(alg)\n    state = create_rng_state(seed, alg)\n    return cls(state=state, alg=alg)", "docstring": "Creates a generator from a seed.\n\nA seed is a 1024-bit unsigned integer represented either as a Python\ninteger or a vector of integers. Seeds shorter than 1024-bit will be\npadded. The padding, the internal structure of a seed and the way a seed\nis converted to a state are all opaque (unspecified). The only semantics\nspecification of seeds is that two different seeds are likely to produce\ntwo independent generators (but no guarantee).\n\nArgs:\nseed: the seed for the RNG.\nalg: (optional) the RNG algorithm. If None, it will be auto-selected. See\n`__init__` for its possible values.\n\nReturns:\nThe new generator.", "source": "github-repos"}
{"code": "def _ReadFileHeader(self, file_object):\n    data_type_map = self._GetDataTypeMap('keychain_file_header')\n    (file_header, _) = self._ReadStructureFromFileObject(file_object, 0, data_type_map)\n    if (file_header.signature != self._FILE_SIGNATURE):\n        raise errors.ParseError('Unsupported file signature.')\n    if ((file_header.major_format_version != self._MAJOR_VERSION) or (file_header.minor_format_version != self._MINOR_VERSION)):\n        raise errors.ParseError('Unsupported format version: {0:s}.{1:s}'.format(file_header.major_format_version, file_header.minor_format_version))\n    return file_header", "docstring": "Reads the file header.\n\nArgs:\nfile_object (file): file-like object.\n\nReturns:\nkeychain_file_header: file header.\n\nRaises:\nParseError: if the file header cannot be read.", "source": "codesearchnet"}
{"code": "def tokenize(self, text):\n        \n\n        output_tokens = []\n        for token in whitespace_tokenize(text):\n            chars = list(token)\n            if len(chars) > self.max_input_chars_per_word:\n                output_tokens.append(self.unk_token)\n                continue\n\n            is_bad = False\n            start = 0\n            sub_tokens = []\n            while start < len(chars):\n                end = len(chars)\n                cur_substr = None\n                while start < end:\n                    substr = \"\".join(chars[start:end])\n                    if start > 0:\n                        substr = \"\n                    if substr in self.vocab:\n                        cur_substr = substr\n                        break\n                    end -= 1\n                if cur_substr is None:\n                    is_bad = True\n                    break\n                sub_tokens.append(cur_substr)\n                start = end\n\n            if is_bad:\n                output_tokens.append(self.unk_token)\n            else:\n                output_tokens.extend(sub_tokens)\n        return output_tokens", "docstring": "Tokenizes a piece of text into its word pieces.\n\nThis uses a greedy longest-match-first algorithm to perform tokenization\nusing the given vocabulary.\n\nFor example:\ninput = \"unaffable\"\noutput = [\"un\", \"##aff\", \"##able\"]\n\nArgs:\ntext: A single token or whitespace separated tokens. This should have\nalready been passed through `BasicTokenizer`.\n\nReturns:\nA list of wordpiece tokens.", "source": "juraj-google-style"}
{"code": "def _required_constraint_name(table: str, field, key):\n        \n\n        return '{table}_{field}_required_{postfix}'.format(\n            table=table,\n            field=field.column,\n            postfix=key\n        )", "docstring": "Gets the name for a CONSTRAINT that applies\nto a single hstore key.\n\nArguments:\ntable:\nThe name of the table the field is\na part of.\n\nfield:\nThe hstore field to create a\nUNIQUE INDEX for.\n\nkey:\nThe name of the hstore key\nto create the name for.\n\nReturns:\nThe name for the UNIQUE index.", "source": "juraj-google-style"}
{"code": "def copy(self):\n    new_store = EagerVariableStore()\n    for key, var in self._store._vars.items():\n        try:\n            index = var.name.index(':')\n        except ValueError:\n            stripped_var_name = var.name\n        else:\n            stripped_var_name = var.name[:index]\n        new_var = resource_variable_ops.ResourceVariable(var.read_value(), name=stripped_var_name, trainable=var.trainable)\n        new_store._store._vars[key] = new_var\n    return new_store", "docstring": "Copy this variable store and all of its contents.\n\nVariables contained in this store will be copied over to the new variable\nstore, meaning that they can be modified without affecting the variables in\nthis store.\n\nReturns:\nA new EagerVariableStore instance containing copied variables.", "source": "github-repos"}
{"code": "def add_tree(self, tree, parent=None):\n        \n        if tree.path in self.path_db:\n            self.remove_tree_by_path(tree.path)\n\n        \n        for index in tree.indexes:\n            if not getattr(tree, index):\n                continue\n\n            self._add_to(\n                getattr(self, index + \"_db\"),\n                getattr(tree, index),\n                tree,\n            )\n\n        if parent:\n            self._add_to(self.parent_db, tree.path, parent)\n\n        \n        for sub_tree in tree.sub_trees:\n            assert sub_tree.path.startswith(tree.path)\n\n        for sub_tree in tree.sub_trees:\n            self.add_tree(sub_tree, parent=tree)", "docstring": "Add `tree` into database.\n\nArgs:\ntree (obj): :class:`.Tree` instance.\nparent (ref, default None): Reference to parent tree. This is used\nfor all sub-trees in recursive call.", "source": "juraj-google-style"}
{"code": "def _offset(value):\n    o = int(value)\n    if (o == 0):\n        return 0\n    a = abs(o)\n    s = ((a * 36) + ((a % 100) * 24))\n    return ((o", "docstring": "Parse timezone to offset in seconds.\n\nArgs:\nvalue: A timezone in the '+0000' format. An integer would also work.\n\nReturns:\nThe timezone offset from GMT in seconds as an integer.", "source": "codesearchnet"}
{"code": "def _BuildStations(self, stoplist):\n    stations = []\n    dists = self._EuclidianDistances(stoplist)\n    stations = self._CalculateYLines(dists)\n    return stations", "docstring": "Dispatches the best algorithm for calculating station line position.\n\nArgs:\n# Class Stop is defined in transitfeed.py\nstoplist: [Stop, Stop, ...]\n# Class Trip is defined in transitfeed.py\ntriplist: [Trip, Trip, ...]\n\nReturns:\n# One integer y-coordinate for each station normalized between\n# 0 and X, where X is the height of the graph in pixels\n[0, 33, 140, ... , X]", "source": "codesearchnet"}
{"code": "def _FloatingPointEncoder(wire_type, format):\n    value_size = struct.calcsize(format)\n    if (value_size == 4):\n\n        def EncodeNonFiniteOrRaise(write, value):\n            if (value == _POS_INF):\n                write(b'\\x00\\x00\\x80\\x7f')\n            elif (value == _NEG_INF):\n                write(b'\\x00\\x00\\x80\\xff')\n            elif (value != value):\n                write(b'\\x00\\x00\\xc0\\x7f')\n            else:\n                raise\n    elif (value_size == 8):\n\n        def EncodeNonFiniteOrRaise(write, value):\n            if (value == _POS_INF):\n                write(b'\\x00\\x00\\x00\\x00\\x00\\x00\\xf0\\x7f')\n            elif (value == _NEG_INF):\n                write(b'\\x00\\x00\\x00\\x00\\x00\\x00\\xf0\\xff')\n            elif (value != value):\n                write(b'\\x00\\x00\\x00\\x00\\x00\\x00\\xf8\\x7f')\n            else:\n                raise\n    else:\n        raise ValueError((\"Can't encode floating-point values that are %d bytes long (only 4 or 8)\" % value_size))\n\n    def SpecificEncoder(field_number, is_repeated, is_packed):\n        local_struct_pack = struct.pack\n        if is_packed:\n            tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)\n            local_EncodeVarint = _EncodeVarint\n\n            def EncodePackedField(write, value):\n                write(tag_bytes)\n                local_EncodeVarint(write, (len(value) * value_size))\n                for element in value:\n                    try:\n                        write(local_struct_pack(format, element))\n                    except SystemError:\n                        EncodeNonFiniteOrRaise(write, element)\n            return EncodePackedField\n        elif is_repeated:\n            tag_bytes = TagBytes(field_number, wire_type)\n\n            def EncodeRepeatedField(write, value):\n                for element in value:\n                    write(tag_bytes)\n                    try:\n                        write(local_struct_pack(format, element))\n                    except SystemError:\n                        EncodeNonFiniteOrRaise(write, element)\n            return EncodeRepeatedField\n        else:\n            tag_bytes = TagBytes(field_number, wire_type)\n\n            def EncodeField(write, value):\n                write(tag_bytes)\n                try:\n                    write(local_struct_pack(format, value))\n                except SystemError:\n                    EncodeNonFiniteOrRaise(write, value)\n            return EncodeField\n    return SpecificEncoder", "docstring": "Return a constructor for an encoder for float fields.\n\nThis is like StructPackEncoder, but catches errors that may be due to\npassing non-finite floating-point values to struct.pack, and makes a\nsecond attempt to encode those values.\n\nArgs:\nwire_type:  The field's wire type, for encoding tags.\nformat:  The format string to pass to struct.pack().", "source": "codesearchnet"}
{"code": "def triggered(self, manual=False):\n    if (self.walker is None):\n        raise InternalError('You can only check if a streamer is triggered if you create it with a SensorLog')\n    if ((not self.automatic) and (not manual)):\n        return False\n    return self.has_data()", "docstring": "Check if this streamer should generate a report.\n\nStreamers can be triggered automatically whenever they have data\nor they can be triggered manually. This method returns True if the\nstreamer is currented triggered.\n\nA streamer is triggered if it:\n- (has data AND is automatic) OR\n- (has data AND is manually triggered)\n\nArgs:\nmanual (bool): Indicate that the streamer has been manually triggered.\n\nReturns:\nbool: Whether the streamer can generate a report right now.", "source": "codesearchnet"}
{"code": "def mme_matches(case_obj, institute_obj, mme_base_url, mme_token):\n    \n    data = {\n        'institute' : institute_obj,\n        'case' : case_obj,\n        'server_errors' : []\n    }\n    matches = {}\n    \n    if not case_obj.get('mme_submission'):\n        return None\n\n    for patient in case_obj['mme_submission']['patients']:\n        patient_id = patient['id']\n        matches[patient_id] = None\n        url = ''.join([ mme_base_url, '/matches/', patient_id])\n        server_resp = matchmaker_request(url=url, token=mme_token, method='GET')\n        if 'status_code' in server_resp: \n            \n            pat_matches = []\n            if server_resp.get('matches'):\n                pat_matches = parse_matches(patient_id, server_resp['matches'])\n            matches[patient_id] = pat_matches\n        else:\n            LOG.warning('Server returned error message: {}'.format(server_resp['message']))\n            data['server_errors'].append(server_resp['message'])\n\n    data['matches'] = matches\n\n    return data", "docstring": "Show Matchmaker submission data for a sample and eventual matches.\n\nArgs:\ncase_obj(dict): a scout case object\ninstitute_obj(dict): an institute object\nmme_base_url(str) base url of the MME server\nmme_token(str) auth token of the MME server\n\nReturns:\ndata(dict): data to display in the html template", "source": "juraj-google-style"}
{"code": "def from_bytes_list(cls, function_descriptor_list):\n    assert isinstance(function_descriptor_list, list)\n    if (len(function_descriptor_list) == 0):\n        return FunctionDescriptor.for_driver_task()\n    elif ((len(function_descriptor_list) == 3) or (len(function_descriptor_list) == 4)):\n        module_name = ensure_str(function_descriptor_list[0])\n        class_name = ensure_str(function_descriptor_list[1])\n        function_name = ensure_str(function_descriptor_list[2])\n        if (len(function_descriptor_list) == 4):\n            return cls(module_name, function_name, class_name, function_descriptor_list[3])\n        else:\n            return cls(module_name, function_name, class_name)\n    else:\n        raise Exception('Invalid input for FunctionDescriptor.from_bytes_list')", "docstring": "Create a FunctionDescriptor instance from list of bytes.\n\nThis function is used to create the function descriptor from\nbackend data.\n\nArgs:\ncls: Current class which is required argument for classmethod.\nfunction_descriptor_list: list of bytes to represent the\nfunction descriptor.\n\nReturns:\nThe FunctionDescriptor instance created from the bytes list.", "source": "codesearchnet"}
{"code": "def _create_extractors(col_params):\n    result = []\n    for col_param in col_params:\n        result.append(_create_extractor(col_param))\n    return result", "docstring": "Creates extractors to extract properties corresponding to 'col_params'.\n\nArgs:\ncol_params: List of ListSessionGroupsRequest.ColParam protobufs.\nReturns:\nA list of extractor functions. The ith element in the\nreturned list extracts the column corresponding to the ith element of\n_request.col_params", "source": "codesearchnet"}
{"code": "def console(discord_token, discord_client_id):\n    \n\n    state, response = datatools.get_compare_version()\n\n    logger.info(\"Starting Modis in console\")\n    logger.info(response)\n\n    import threading\n    import asyncio\n\n    logger.debug(\"Loading packages\")\n    from modis.discord_modis import main as discord_modis_console\n    from modis.reddit_modis import main as reddit_modis_console\n    from modis.facebook_modis import main as facebook_modis_console\n\n    \n    logger.debug(\"Initiating threads\")\n    loop = asyncio.get_event_loop()\n    discord_thread = threading.Thread(\n        target=discord_modis_console.start,\n        args=[discord_token, discord_client_id, loop])\n    reddit_thread = threading.Thread(\n        target=reddit_modis_console.start, args=[])\n    facebook_thread = threading.Thread(\n        target=facebook_modis_console.start, args=[])\n\n    \n    logger.debug(\"Starting threads\")\n    discord_thread.start()\n    reddit_thread.start()\n    facebook_thread.start()\n\n    logger.debug(\"Root startup completed\")", "docstring": "Start Modis in console format.\n\nArgs:\ndiscord_token (str): The bot token for your Discord application\ndiscord_client_id: The bot's client ID", "source": "juraj-google-style"}
{"code": "def any_soco():\n    cls = config.SOCO_CLASS\n    try:\n        device = next((d for d in cls._instances[cls._class_group].values() if d.is_visible))\n    except (KeyError, StopIteration):\n        devices = discover()\n        return (None if (devices is None) else devices.pop())\n    return device", "docstring": "Return any visible soco device, for when it doesn't matter which.\n\nTry to obtain an existing instance, or use `discover` if necessary.\nNote that this assumes that the existing instance has not left\nthe network.\n\nReturns:\nSoCo: A `SoCo` instance (or subclass if `config.SOCO_CLASS` is set,\nor `None` if no instances are found", "source": "codesearchnet"}
{"code": "def detect(self, text):\n    \n    t = text.encode(\"utf-8\")\n    reliable, index, top_3_choices = cld2.detect(t, bestEffort=False)\n\n    if not reliable:\n      self.reliable = False\n      reliable, index, top_3_choices = cld2.detect(t, bestEffort=True)\n      \n      if not self.quiet:\n        if not reliable:\n          raise UnknownLanguage(\"Try passing a longer snippet of text\")\n        else:\n          logger.warning(\"Detector is not able to detect the language reliably.\")\n\n    self.languages = [Language(x) for x in top_3_choices]\n    self.language = self.languages[0]\n    return self.language", "docstring": "Decide which language is used to write the text.\n\nThe method tries first to detect the language with high reliability. If\nthat is not possible, the method switches to best effort strategy.\n\n\nArgs:\ntext (string): A snippet of text, the longer it is the more reliable we\ncan detect the language used to write the text.", "source": "juraj-google-style"}
{"code": "def reach_max_num(self):\n    if self.signal.get('reach_max_num'):\n        return True\n    if ((self.max_num > 0) and (self.fetched_num >= self.max_num)):\n        return True\n    else:\n        return False", "docstring": "Check if downloaded images reached max num.\n\nReturns:\nbool: if downloaded images reached max num.", "source": "codesearchnet"}
{"code": "def file_exists(file_path, credentials=None):\n  \n  if file_path.startswith('gs:\n    return _file_exists_in_gcs(file_path, credentials)\n  else:\n    return os.path.isfile(file_path)", "docstring": "Check whether the file exists, on local disk or GCS.\n\nArgs:\nfile_path: The target file path; should have the 'gs://' prefix if in gcs.\ncredentials: Optional credential to be used to load the file from gcs.\n\nReturns:\nTrue if the file's there.", "source": "juraj-google-style"}
{"code": "def get_file(self, filename, scope='all'):\n        \n        filename = os.path.abspath(os.path.join(self.root, filename))\n        layouts = self._get_layouts_in_scope(scope)\n        for ly in layouts:\n            if filename in ly.files:\n                return ly.files[filename]\n        return None", "docstring": "Returns the BIDSFile object with the specified path.\n\nArgs:\nfilename (str): The path of the file to retrieve. Must be either\nan absolute path, or relative to the root of this BIDSLayout.\nscope (str, list): Scope of the search space. If passed, only\nBIDSLayouts that match the specified scope will be\nsearched. See BIDSLayout docstring for valid values.\n\nReturns: A BIDSFile, or None if no match was found.", "source": "juraj-google-style"}
{"code": "def _StartMonitoringProcess(self, process):\n    \n    if process is None:\n      raise ValueError('Missing process.')\n\n    pid = process.pid\n\n    if pid in self._process_information_per_pid:\n      raise KeyError(\n          'Already monitoring process (PID: {0:d}).'.format(pid))\n\n    if pid in self._rpc_clients_per_pid:\n      raise KeyError(\n          'RPC client (PID: {0:d}) already exists'.format(pid))\n\n    rpc_client = plaso_xmlrpc.XMLProcessStatusRPCClient()\n\n    \n    \n    rpc_port = process.rpc_port.value\n    time_waited_for_process = 0.0\n    while not rpc_port:\n      time.sleep(0.1)\n      rpc_port = process.rpc_port.value\n      time_waited_for_process += 0.1\n\n      if time_waited_for_process >= self._RPC_SERVER_TIMEOUT:\n        raise IOError(\n            'RPC client unable to determine server (PID: {0:d}) port.'.format(\n                pid))\n\n    hostname = 'localhost'\n\n    if not rpc_client.Open(hostname, rpc_port):\n      raise IOError((\n          'RPC client unable to connect to server (PID: {0:d}) '\n          'http:\n\n    self._rpc_clients_per_pid[pid] = rpc_client\n    self._process_information_per_pid[pid] = process_info.ProcessInfo(pid)", "docstring": "Starts monitoring a process.\n\nArgs:\nprocess (MultiProcessBaseProcess): process.\n\nRaises:\nIOError: if the RPC client cannot connect to the server.\nKeyError: if the process is not registered with the engine or\nif the process is already being monitored.\nOSError: if the RPC client cannot connect to the server.\nValueError: if the process is missing.", "source": "juraj-google-style"}
{"code": "def get_fastq_dxfile_objects(self,barcode=None):\n        \n        fq_ext_glob = \"*{}\".format(self.FQEXT)\n        name = fq_ext_glob\n        if barcode:\n            name = \"*_{barcode}_*{FQEXT}\".format(barcode=barcode, FQEXT=self.FQEXT)\n        fastqs= dxpy.find_data_objects(project=self.dx_project_id,folder=self.DX_FASTQ_FOLDER,name=name,name_mode=\"glob\")\n        if not fastqs:\n            \n            fastqs= dxpy.find_data_objects(project=self.dx_project_id,name=name,name_mode=\"glob\")\n           \n        if not fastqs:\n            msg = \"No FASTQ files found for run {run} \".format(run=proj_name)\n            if barcode:\n                msg += \"and barcode {barcode}.\".format(barcode=barcode)\n            raise FastqNotFound(msg)\n        fastqs = [dxpy.DXFile(project=x[\"project\"],dxid=x[\"id\"]) for x in fastqs]\n        return fastqs", "docstring": "Retrieves all the FASTQ files in project self.dx_project_name as DXFile objects.\n\nArgs:\nbarcode: `str`. If set, then only FASTQ file properties for FASTQ files having the specified barcode are returned.\n\nReturns:\n`list` of DXFile objects representing FASTQ files.\n\nRaises:\n`dnanexus_utils.FastqNotFound`: No FASTQ files were found.", "source": "juraj-google-style"}
{"code": "def sanity_check_tensor_sync(tensor: torch.Tensor, mesh: DeviceMesh, rtol: float=0.0001, atol: float=0.0001, not_sync: bool=False) -> None:\n    if not dist.is_initialized() or mesh.size() == 1:\n        return\n    pg = mesh.get_group()\n    if hasattr(tensor, 'to_local'):\n        local_tensor = tensor.to_local()\n    else:\n        local_tensor = tensor\n    world_size = dist.get_world_size(pg)\n    gathered_tensors = [torch.empty_like(local_tensor) for _ in range(world_size)]\n    dist.all_gather(gathered_tensors, local_tensor, group=pg)\n    for i in range(1, world_size):\n        try:\n            torch.testing.assert_close(gathered_tensors[0], gathered_tensors[i], rtol=rtol, atol=atol)\n        except AssertionError as e:\n            if not_sync:\n                continue\n            raise e", "docstring": "Verify that a tensor is synchronized (or not synchronized) across all processes in the mesh's process group.\nHandles both regular tensors and DTensors.\n\nArgs:\ntensor (torch.Tensor): The tensor to check for synchronization (can be DTensor)\nmesh (DeviceMesh): The device mesh containing the process group\nrtol (float): Relative tolerance for comparison\natol (float): Absolute tolerance for comparison\nnot_sync (bool): If True, asserts that tensors are NOT synchronized. If False, asserts they are synchronized.", "source": "github-repos"}
{"code": "def plot(self, data):\n    import IPython\n    if (((sys.version_info.major > 2) and isinstance(data, str)) or ((sys.version_info.major <= 2) and isinstance(data, basestring))):\n        data = bq.Query(data)\n    if isinstance(data, bq.Query):\n        df = data.execute().result().to_dataframe()\n        data = self._get_lantern_format(df)\n    elif isinstance(data, pd.core.frame.DataFrame):\n        data = self._get_lantern_format(data)\n    else:\n        raise Exception('data needs to be a sql query, or a pandas DataFrame.')\n    HTML_TEMPLATE = '<link rel=\"import\" href=\"/nbextensions/gcpdatalab/extern/lantern-browser.html\" >\\n        <lantern-browser id=\"{html_id}\"></lantern-browser>\\n        <script>\\n        var browser = document.querySelector(\\'\n    metrics_str = str(map(str, data[0]['metricValues'].keys()))\n    data_str = str([{str(k): json.dumps(v) for (k, v) in elem.iteritems()} for elem in data])\n    html_id = ('l' + datalab.utils.commands.Html.next_id())\n    html = HTML_TEMPLATE.format(html_id=html_id, metrics=metrics_str, data=data_str)\n    IPython.display.display(IPython.display.HTML(html))", "docstring": "Plots a featire slice view on given data.\n\nArgs:\ndata: Can be one of:\nA string of sql query.\nA sql query module defined by \"%%sql --module module_name\".\nA pandas DataFrame.\nRegardless of data type, it must include the following columns:\n\"feature\": identifies a slice of features. For example: \"petal_length:4.0-4.2\".\n\"count\": number of instances in that slice of features.\nAll other columns are viewed as metrics for its feature slice. At least one is required.", "source": "codesearchnet"}
{"code": "def repay_funding(self, amount, currency):\n        \n        params = {\n            'amount': amount,\n            'currency': currency  \n            }\n        return self._send_message('post', '/funding/repay',\n                                  data=json.dumps(params))", "docstring": "Repay funding. Repays the older funding records first.\n\nArgs:\namount (int): Amount of currency to repay\ncurrency (str): The currency, example USD\n\nReturns:\nNot specified by cbpro.", "source": "juraj-google-style"}
{"code": "def get_by_addr(self, address):\n        \n        addr = address\n        if isinstance(address, str) and len(address) == 34:\n            addr = Helper.AddrStrToScriptHash(address)\n\n        if not isinstance(addr, UInt160):\n            raise Exception(\"Incorrect address format\")\n\n        addrlist_snapshot = self.db.prefixed_db(NotificationPrefix.PREFIX_ADDR).snapshot()\n        results = []\n\n        for val in addrlist_snapshot.iterator(prefix=bytes(addr.Data), include_key=False):\n            if len(val) > 4:\n                try:\n                    event = SmartContractEvent.FromByteArray(val)\n                    results.append(event)\n                except Exception as e:\n                    logger.error(\"could not parse event: %s %s\" % (e, val))\n        return results", "docstring": "Lookup a set of notifications by address\nArgs:\naddress (UInt160 or str): hash of address for notifications\n\nReturns:\nlist: a list of notifications", "source": "juraj-google-style"}
{"code": "def sparse_grid(func, order, dim=None, skew=None):\n    \n    if not isinstance(order, int):\n        orders = numpy.array(order).flatten()\n        dim = orders.size\n        m_order = int(numpy.min(orders))\n        skew = [order-m_order for order in orders]\n        return sparse_grid(func, m_order, dim, skew)\n\n    abscissas, weights = [], []\n    bindex = chaospy.bertran.bindex(order-dim+1, order, dim)\n\n    if skew is None:\n        skew = numpy.zeros(dim, dtype=int)\n    else:\n        skew = numpy.array(skew, dtype=int)\n        assert len(skew) == dim\n\n    for idx in range(\n            chaospy.bertran.terms(order, dim)\n            - chaospy.bertran.terms(order-dim, dim)):\n\n        idb = bindex[idx]\n        abscissa, weight = func(skew+idb)\n        weight *= (-1)**(order-sum(idb))*comb(dim-1, order-sum(idb))\n        abscissas.append(abscissa)\n        weights.append(weight)\n\n    abscissas = numpy.concatenate(abscissas, 1)\n    weights = numpy.concatenate(weights, 0)\n\n    abscissas = numpy.around(abscissas, 15)\n    order = numpy.lexsort(tuple(abscissas))\n    abscissas = abscissas.T[order].T\n    weights = weights[order]\n\n    \n    diff = numpy.diff(abscissas.T, axis=0)\n    unique = numpy.ones(len(abscissas.T), bool)\n    unique[1:] = (diff != 0).any(axis=1)\n\n    \n    length = len(weights)\n    idx = 1\n    while idx < length:\n        while idx < length and unique[idx]:\n            idx += 1\n        idy = idx+1\n        while idy < length and not unique[idy]:\n            idy += 1\n        if idy-idx > 1:\n            weights[idx-1] = numpy.sum(weights[idx-1:idy])\n        idx = idy+1\n\n    abscissas = abscissas[:, unique]\n    weights = weights[unique]\n\n    return abscissas, weights", "docstring": "Smolyak sparse grid constructor.\n\nArgs:\nfunc (:py:data:typing.Callable):\nFunction that takes a single argument ``order`` of type\n``numpy.ndarray`` and with ``order.shape = (dim,)``\norder (int, numpy.ndarray):\nThe order of the grid. If ``numpy.ndarray``, it overrides both\n``dim`` and ``skew``.\ndim (int):\nNumber of dimension.\nskew (list):\nOrder skewness.", "source": "juraj-google-style"}
{"code": "def drift(data, n=3, **kwargs):\n    yi = data[(- n)]\n    yf = data[(- 1)]\n    slope = ((yf - yi) / (n - 1))\n    forecast = (yf + slope)\n    return forecast", "docstring": "The drift forecast for the next point is a linear extrapolation from the previous ``n``\npoints in the series.\n\nArgs:\ndata (np.array): Observed data, presumed to be ordered in time.\nn (int): period over which to calculate linear model for extrapolation\n\nReturns:\nfloat: a single-valued forecast for the next value in the series.", "source": "codesearchnet"}
{"code": "def inspect(self, **kwargs):\n    what = kwargs.pop('what', 'hist')\n    if (what == 'hist'):\n        with self.open_hist() as hist:\n            return (hist.plot(**kwargs) if hist else None)\n    elif (what == 'scf'):\n        relaxation = abiinspect.Relaxation.from_file(self.output_file.path)\n        if ('title' not in kwargs):\n            kwargs['title'] = str(self)\n        return (relaxation.plot(**kwargs) if (relaxation is not None) else None)\n    else:\n        raise ValueError(('Wrong value for what %s' % what))", "docstring": "Plot the evolution of the structural relaxation with matplotlib.\n\nArgs:\nwhat: Either \"hist\" or \"scf\". The first option (default) extracts data\nfrom the HIST file and plot the evolution of the structural\nparameters, forces, pressures and energies.\nThe second option, extracts data from the main output file and\nplot the evolution of the SCF cycles (etotal, residuals, etc).\n\nReturns:\n`matplotlib` figure, None if some error occurred.", "source": "codesearchnet"}
{"code": "def equal(x, y):\n    \n    if PY_3:\n        return test_case().assertEqual(x, y) or True\n\n    assert x == y", "docstring": "Shortcut function for ``unittest.TestCase.assertEqual()``.\n\nArguments:\nx (mixed)\ny (mixed)\n\nRaises:\nAssertionError: in case of assertion error.\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def _SetValues(self, values):\n\n    def _ToStr(value):\n        'Convert individul list entries to string.'\n        if isinstance(value, (list, tuple)):\n            result = []\n            for val in value:\n                result.append(str(val))\n            return result\n        else:\n            return str(value)\n    if isinstance(values, Row):\n        if (self._keys != values.header):\n            raise TypeError('Attempt to append row with mismatched header.')\n        self._values = copy.deepcopy(values.values)\n    elif isinstance(values, dict):\n        for key in self._keys:\n            if (key not in values):\n                raise TypeError('Dictionary key mismatch with row.')\n        for key in self._keys:\n            self[key] = _ToStr(values[key])\n    elif (isinstance(values, list) or isinstance(values, tuple)):\n        if (len(values) != len(self._values)):\n            raise TypeError('Supplied list length != row length')\n        for (index, value) in enumerate(values):\n            self._values[index] = _ToStr(value)\n    else:\n        raise TypeError('Supplied argument must be Row, dict or list, not %s', type(values))", "docstring": "Set values from supplied dictionary or list.\n\nArgs:\nvalues: A Row, dict indexed by column name, or list.\n\nRaises:\nTypeError: Argument is not a list or dict, or list is not equal row\nlength or dictionary keys don't match.", "source": "codesearchnet"}
{"code": "def set_ipv4_routing(self, vrf_name, default=False, disable=False):\n    cmd = ('ip routing vrf %s' % vrf_name)\n    if default:\n        cmd = ('default %s' % cmd)\n    elif disable:\n        cmd = ('no %s' % cmd)\n    cmd = make_iterable(cmd)\n    return self.configure(cmd)", "docstring": "Configures ipv4 routing for the vrf\n\nArgs:\nvrf_name (str): The VRF name to configure\ndefault (bool): Configures ipv4 routing for the vrf value to\ndefault if this value is true\ndisable (bool): Negates the ipv4 routing for the vrf if set to true\n\nReturns:\nTrue if the operation was successful otherwise False", "source": "codesearchnet"}
{"code": "def get_raw(tree):\n    \n    if isinstance(tree, Tree):\n        words = []\n        for child in tree:\n            words.append(get_raw(child))\n        return ' '.join(words)\n    else:\n        return tree", "docstring": "Get the exact words in lowercase in the tree object.\n\nArgs:\ntree (Tree): Parsed tree structure\nReturns:\nResulting string of tree ``(Ex: \"The red car\")``", "source": "juraj-google-style"}
{"code": "def _GetMountpointBlacklist(xdev):\n  \n  if xdev == rdf_file_finder.FileFinderArgs.XDev.NEVER:\n    \n    return _GetMountpoints(only_physical=False)\n\n  if xdev == rdf_file_finder.FileFinderArgs.XDev.LOCAL:\n    \n    physical = _GetMountpoints(only_physical=True)\n    return _GetMountpoints(only_physical=False) - physical\n\n  if xdev == rdf_file_finder.FileFinderArgs.XDev.ALWAYS:\n    \n    return set()\n\n  raise ValueError(\"Incorrect `xdev` value: %s\" % xdev)", "docstring": "Builds a list of mountpoints to ignore during recursive searches.\n\nArgs:\nxdev: A `XDev` value that determines policy for crossing device boundaries.\n\nReturns:\nA set of mountpoints to ignore.\n\nRaises:\nValueError: If `xdev` value is invalid.", "source": "juraj-google-style"}
{"code": "def compile_state_action_constraints(self,\n            state: Sequence[tf.Tensor],\n            action: Sequence[tf.Tensor]) -> List[TensorFluent]:\n        \n        scope = self.transition_scope(state, action)\n        constraints = []\n        with self.graph.as_default():\n            with tf.name_scope('state_action_constraints'):\n                for p in self.rddl.domain.constraints:\n                    fluent = self._compile_expression(p, scope)\n                    constraints.append(fluent)\n                return constraints", "docstring": "Compiles the state-action constraints given current `state` and `action` fluents.\n\nArgs:\nstate (Sequence[tf.Tensor]): The current state fluents.\naction (Sequence[tf.Tensor]): The action fluents.\n\nReturns:\nA list of :obj:`rddl2tf.fluent.TensorFluent`.", "source": "juraj-google-style"}
{"code": "def cache_connect(database=None):\n    if (database is None):\n        database = cache_file()\n    if os.path.isfile(database):\n        conn = sqlite3.connect(database)\n    else:\n        conn = sqlite3.connect(database)\n        conn.executescript(schema)\n    with conn as cur:\n        cur.execute('PRAGMA foreign_keys = ON;')\n    conn.row_factory = sqlite3.Row\n    return conn", "docstring": "Returns a connection object to a sqlite database.\n\nArgs:\ndatabase (str, optional): The path to the database the user wishes\nto connect to. If not specified, a default is chosen using\n:func:`.cache_file`. If the special database name ':memory:'\nis given, then a temporary database is created in memory.\n\nReturns:\n:class:`sqlite3.Connection`", "source": "codesearchnet"}
{"code": "def videos(self, **kwargs):\n        \n        path = self._get_id_path('videos')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the videos (trailers, teasers, clips, etc...) for a\nspecific movie id.\n\nArgs:\nappend_to_response: (optional) Comma separated, any movie method.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def as_objective(obj):\n    if isinstance(obj, Objective):\n        return obj\n    elif callable(obj):\n        return obj\n    elif isinstance(obj, str):\n        (layer, n) = obj.split(':')\n        (layer, n) = (layer.strip(), int(n))\n        return channel(layer, n)", "docstring": "Convert obj into Objective class.\n\nStrings of the form \"layer:n\" become the Objective channel(layer, n).\nObjectives are returned unchanged.\n\nArgs:\nobj: string or Objective.\n\nReturns:\nObjective", "source": "codesearchnet"}
{"code": "def get_max_size(pool, num_option, item_length):\n    max_items = (POOL_SIZE / item_length)\n    existing = ((POOL_OPTION_MIN_SIZE * num_option) + sum([max(0, (len(pool.get(i, {})) - 5)) for i in xrange(num_option)]))\n    return int((max_items - existing))", "docstring": "Calculate the max number of item that an option can stored in the pool at give time.\n\nThis is to limit the pool size to POOL_SIZE\n\nArgs:\noption_index (int): the index of the option to calculate the size for\npool (dict): answer pool\nnum_option (int): total number of options available for the question\nitem_length (int): the length of the item\n\nReturns:\nint: the max number of items that `option_index` can have", "source": "codesearchnet"}
{"code": "def similar_movies(self, **kwargs):\n        \n        path = self._get_id_path('similar_movies')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the similar movies for a specific movie id.\n\nArgs:\npage: (optional) Minimum value of 1.  Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\nappend_to_response: (optional) Comma separated, any movie method.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def _CreateFeedItems(client, feed_details, label_name):\n  \n  \n  feed_item_service = client.GetService('FeedItemService', version='v201809')\n\n  \n  \n  urls = ('http:\n          'http:\n          'http:\n\n  \n  operations = [{\n      \n      'operand': {\n          'feedId': feed_details.feed_id,\n          'attributeValues': [\n              {\n                  'feedAttributeId': feed_details.url_attribute_id,\n                  'stringValues': [url]\n              },\n              {\n                  'feedAttributeId': feed_details.label_attribute_id,\n                  'stringValues': [label_name]\n              }\n          ]\n      },\n      'operator': 'ADD'\n  } for url in urls]\n\n  \n  feed_item_service.mutate(operations)", "docstring": "Creates the page URLs in the DSA page feed.\n\nArgs:\nclient: an AdWordsClient instance.\nfeed_details: a _DSAFeedDetails instance.\nlabel_name: a str containing the page feed URL label.", "source": "juraj-google-style"}
{"code": "def init_properties(env='dev', app='unnecessary', **_):\n    aws_env = boto3.session.Session(profile_name=env)\n    s3client = aws_env.resource('s3')\n    generated = get_details(app=app, env=env)\n    archaius = generated.archaius()\n    archaius_file = '{path}/application.properties'.format(path=archaius['path'])\n    try:\n        s3client.Object(archaius['bucket'], archaius_file).get()\n        LOG.info('Found: %(bucket)s/%(file)s', {'bucket': archaius['bucket'], 'file': archaius_file})\n        return True\n    except boto3.exceptions.botocore.client.ClientError:\n        s3client.Object(archaius['bucket'], archaius_file).put()\n        LOG.info('Created: %(bucket)s/%(file)s', {'bucket': archaius['bucket'], 'file': archaius_file})\n        return False", "docstring": "Make sure _application.properties_ file exists in S3.\n\nFor Applications with Archaius support, there needs to be a file where the\ncloud environment variable points to.\n\nArgs:\nenv (str): Deployment environment/account, i.e. dev, stage, prod.\napp (str): GitLab Project name.\n\nReturns:\nTrue when application.properties was found.\nFalse when application.properties needed to be created.", "source": "codesearchnet"}
{"code": "def make_sgf(\n    move_history,\n    result_string,\n    ruleset=\"Chinese\",\n    komi=7.5,\n    white_name=PROGRAM_IDENTIFIER,\n    black_name=PROGRAM_IDENTIFIER,\n    comments=[]\n):\n    \n    boardsize = go.N\n    game_moves = ''.join(translate_sgf_move(*z)\n                         for z in itertools.zip_longest(move_history, comments))\n    result = result_string\n    return SGF_TEMPLATE.format(**locals())", "docstring": "Turn a game into SGF.\n\nDoesn't handle handicap games or positions with incomplete history.\n\nArgs:\nmove_history: iterable of PlayerMoves\nresult_string: \"B+R\", \"W+0.5\", etc.\ncomments: iterable of string/None. Will be zipped with move_history.", "source": "juraj-google-style"}
{"code": "def _encode_reference_type_constraints(self, builder: expressions.Builder, elem: message.Message) -> List[validation_pb2.SqlRequirement]:\n    field_name = _last_path_token(builder)\n    constraint_key = f'{field_name}-resource-type-exclusivity'\n    if constraint_key in self._options.skip_keys:\n        return []\n    element_definition = cast(Any, elem)\n    type_codes = _utils.element_type_codes(element_definition)\n    if type_codes != ['Reference']:\n        return []\n    allowed_reference_types = [target_profile.value for target_profile in element_definition.type[0].target_profile]\n    if len(allowed_reference_types) <= 1:\n        return []\n    num_references_exist: expressions.Builder = _num_fields_exist((builder.getReferenceKey(reference_type) for reference_type in sorted(allowed_reference_types)))\n    constraint: expressions.Builder = num_references_exist <= 1\n    if _fhir_path_data_types.is_collection(builder.return_type):\n        constraint: expressions.Builder = builder.all(constraint)\n    constraint_sql = self._encode_fhir_path_builder_constraint(constraint, builder.get_parent_builder())\n    if constraint_sql is None:\n        return []\n    reference_type_path = self._abs_path_invocation(builder)\n    column_name = f'{_path_to_sql_column_name(reference_type_path)}_{_key_to_sql_column_name(constraint_key)}'\n    parent_path = self._abs_path_invocation(builder.get_parent_builder())\n    description = f'Reference type {reference_type_path} links to multiple resources or to resources of a type restricted by the profile.'\n    return [validation_pb2.SqlRequirement(column_name=column_name, sql_expression=constraint_sql.sql, severity=validation_pb2.ValidationSeverity.SEVERITY_ERROR, type=validation_pb2.ValidationType.VALIDATION_TYPE_REFERENCE_TYPE, element_path=parent_path, description=description, fhir_path_key=constraint_key, fhir_path_expression=constraint_sql.builder.fhir_path, fields_referenced_by_expression=[field_name])]", "docstring": "Generates constraints for reference types.\n\nEnsures that a reference type only has a value for one of the resourceId\ncolumns across each of the possible resources the reference can link.\n\nArgs:\nbuilder: The builder to the reference type for which to encode\nconstraints.\nelem: Element definition of the builder.\n\nReturns:\nA constraint enforcing the above requirements for the given reference\ntype.", "source": "github-repos"}
{"code": "def _value_set_from_url(self, url: str) -> Optional[value_set_pb2.ValueSet]:\n    url, version = url_utils.parse_url_version(url)\n    value_set = self._package_manager.get_resource(url)\n    if value_set is None:\n        logging.info('Unable to find value set for url: %s in given resolver packages.', url)\n        return None\n    elif not isinstance(value_set, value_set_pb2.ValueSet):\n        raise ValueError('URL: %s does not refer to a value set, found: %s' % (url, value_set.DESCRIPTOR.name))\n    elif version is not None and version != value_set.version.value:\n        logging.warning('Found incompatible version for value set with url: %s. Requested: %s, found: %s', url, version, value_set.version.value)\n        return None\n    else:\n        return value_set", "docstring": "Retrieves the value set for the given URL.\n\nThe value set is assumed to be a member of one of the packages contained in\nself._package_manager. This function will not attempt to look up resources\nover the network in other locations.\n\nArgs:\nurl: The url of the value set to retrieve.\n\nReturns:\nThe value set for the given URL or None if it can not be found in the\npackage manager.\n\nRaises:\nValueError: If the URL belongs to a resource that is not a value set.", "source": "github-repos"}
{"code": "def translate_sites(self, indices, vector, frac_coords=True,\n                        to_unit_cell=True):\n        \n        if not isinstance(indices, collections.abc.Iterable):\n            indices = [indices]\n\n        for i in indices:\n            site = self._sites[i]\n            if frac_coords:\n                fcoords = site.frac_coords + vector\n            else:\n                fcoords = self._lattice.get_fractional_coords(\n                    site.coords + vector)\n            if to_unit_cell:\n                fcoords = np.mod(fcoords, 1)\n            self._sites[i].frac_coords = fcoords", "docstring": "Translate specific sites by some vector, keeping the sites within the\nunit cell.\n\nArgs:\nindices: Integer or List of site indices on which to perform the\ntranslation.\nvector: Translation vector for sites.\nfrac_coords (bool): Whether the vector corresponds to fractional or\ncartesian coordinates.\nto_unit_cell (bool): Whether new sites are transformed to unit\ncell", "source": "juraj-google-style"}
{"code": "def stft_magnitude(signal, fft_length, hop_length=None, window_length=None):\n    frames = frame(signal, window_length, hop_length)\n    window = periodic_hann(window_length)\n    windowed_frames = (frames * window)\n    return np.abs(np.fft.rfft(windowed_frames, int(fft_length)))", "docstring": "Calculate the short-time Fourier transform magnitude.\n\nArgs:\nsignal: 1D np.array of the input time-domain signal.\nfft_length: Size of the FFT to apply.\nhop_length: Advance (in samples) between each frame passed to FFT.\nwindow_length: Length of each block of samples to pass to FFT.\n\nReturns:\n2D np.array where each row contains the magnitudes of the fft_length/2+1\nunique values of the FFT for the corresponding frame of input samples.", "source": "codesearchnet"}
{"code": "def __init__(self, plist_filename):\n        \n        self.filename = plist_filename\n\n        with open(self.filename, 'r') as plist_file:\n            self.soup = BeautifulSoup(plist_file, 'lxml-xml')\n            self.properties = self.soup.findChild(name='dict')\n\n            if self.properties is None:\n                raise RuntimeError('Invalid property list file provided')", "docstring": "Initialize a property list representation from an existing file.\n\nArgs:\nplist_filename: A string containing the full path to a\nDoxygen-generated property list file.\n\nRaises:\nOSError / FileNotFoundError: Input file cannot be read\nRuntimeError: The property list file is not of the expected format", "source": "juraj-google-style"}
{"code": "def check_cell_type(cell, cell_type):\n    if ((cell_type == None) or (cell_type == type(None))):\n        return ((cell == None) or (isinstance(cell, basestring) and (not cell)))\n    else:\n        return isinstance(cell, cell_type)", "docstring": "Checks the cell type to see if it represents the cell_type passed in.\n\nArgs:\ncell_type: The type id for a cell match or None for empty match.", "source": "codesearchnet"}
{"code": "def profile_settings_args_layout_json(self, required):\n        \n\n        profile_args = {}\n        self.db_create_table(self.input_table, self.install_json_params().keys())\n        self.db_insert_record(self.input_table, self.install_json_params().keys())\n        self.gen_permutations()\n        try:\n            for pn in self._input_permutations[self.args.permutation_id]:\n                p = self.install_json_params().get(pn.get('name'))\n                if p.get('required', False) != required:\n                    continue\n                if p.get('type').lower() == 'boolean':\n                    \n                    profile_args[p.get('name')] = pn.get('value')\n                elif p.get('type').lower() == 'choice':\n                    \n                    profile_args[p.get('name')] = pn.get('value')\n                elif p.get('name') in ['api_access_id', 'api_secret_key']:\n                    \n                    pass\n                else:\n                    \n                    types = '|'.join(p.get('playbookDataType', []))\n                    if types:\n                        profile_args[p.get('name')] = p.get('default', '<{}>'.format(types))\n                    else:\n                        profile_args[p.get('name')] = p.get('default', '')\n        except IndexError:\n            self.handle_error('Invalid permutation index provided.')\n        return profile_args", "docstring": "Return args based on layout.json and conditional rendering.\n\nArgs:\nrequired (bool): If True only required args will be returned.\n\nReturns:\ndict: Dictionary of required or optional App args.", "source": "juraj-google-style"}
{"code": "def _bash_comp_command(self, cmd, add_help=True):\n    out = (['-h', '--help'] if add_help else [])\n    cmd_dict = (self._opt_cmds[cmd] if cmd else self._opt_bare)\n    for (opt, sct) in cmd_dict:\n        out.extend(_names(self._conf[sct], opt))\n    return out", "docstring": "Build a list of all options for a given command.\n\nArgs:\ncmd (str): command name, set to None or '' for bare command.\nadd_help (bool): add an help option.\n\nReturns:\nlist of str: list of CLI options strings.", "source": "codesearchnet"}
{"code": "def body(self, body):\n        \n        if isinstance(body, bytes):\n            body = body.decode('utf-8')\n\n        self._body = body", "docstring": "Defines response body data.\n\nArguments:\nbody (str|bytes): response body to use.\n\nReturns:\nself: ``pook.Response`` current instance.", "source": "juraj-google-style"}
{"code": "def _time_delta_from_info(info):\n  \n  delta_seconds = int(time.time()) - info.start_time\n  return str(datetime.timedelta(seconds=delta_seconds))", "docstring": "Format the elapsed time for the given TensorBoardInfo.\n\nArgs:\ninfo: A TensorBoardInfo value.\n\nReturns:\nA human-readable string describing the time since the server\ndescribed by `info` started: e.g., \"2 days, 0:48:58\".", "source": "juraj-google-style"}
{"code": "def get_unrecognized_field_info(self, key, value_default=None,\n                                    variant_default=None):\n        \n        value, variant = self.__unrecognized_fields.get(key, (value_default,\n                                                              variant_default))\n        return value, variant", "docstring": "Get the value and variant of an unknown field in this message.\n\nArgs:\nkey: The name or number of the field to retrieve.\nvalue_default: Value to be returned if the key isn't found.\nvariant_default: Value to be returned as variant if the key isn't\nfound.\n\nReturns:\n(value, variant), where value and variant are whatever was passed\nto set_unrecognized_field.", "source": "juraj-google-style"}
{"code": "def measure_topology(fbasename=None, log=None, ml_version=ml_version):\n    ml_script1_file = 'TEMP3D_measure_topology.mlx'\n    ml_script1 = mlx.FilterScript(file_in=fbasename, ml_version=ml_version)\n    compute.measure_topology(ml_script1)\n    ml_script1.save_to_file(ml_script1_file)\n    ml_script1.run_script(log=log, script_file=ml_script1_file)\n    topology = ml_script1.topology\n    return topology", "docstring": "Measures mesh topology\n\nArgs:\nfbasename (str): input filename.\nlog (str): filename to log output\n\nReturns:\ndict: dictionary with the following keys:\nvert_num (int): number of vertices\nedge_num (int): number of edges\nface_num (int): number of faces\nunref_vert_num (int): number or unreferenced vertices\nboundry_edge_num (int): number of boundary edges\npart_num (int): number of parts (components) in the mesh.\nmanifold (bool): True if mesh is two-manifold, otherwise false.\nnon_manifold_edge (int): number of non_manifold edges.\nnon_manifold_vert (int): number of non-manifold verices\ngenus (int or str): genus of the mesh, either a number or\n'undefined' if the mesh is non-manifold.\nholes (int or str): number of holes in the mesh, either a number\nor 'undefined' if the mesh is non-manifold.", "source": "codesearchnet"}
{"code": "def get_table(bq_legacy_client: BigQueryLegacyClient, table_metadata: TableMetadata) -> Table | None:\n    table: Table | None\n    try:\n        table = bq_legacy_client.get_table(table_metadata.full_table_id)\n    except NotFound:\n        table = None\n    return table", "docstring": "Get a table if it exists in BigQuery given the ID.\n\nArgs:\n* bq_legacy_client: BigQuery Legacy API client\n* table_metadata: TableMetadata object\n\nReturns:\n* Table object if it exists, else None", "source": "github-repos"}
{"code": "def __init__(self, encoding='utf-8'):\n    \n    super(StdoutOutputWriter, self).__init__(sys.stdout, encoding=encoding)", "docstring": "Initializes a stdout output writer.\n\nArgs:\nencoding (Optional[str]): output encoding.", "source": "juraj-google-style"}
{"code": "def add_tasks_r(addon_module, package_module, package_name):\n    \n    module_dict = package_module.__dict__\n    for attr_name, attr_val in module_dict.items():\n\n        if isinstance(attr_val, fabric.tasks.WrappedCallableTask):\n            addon_module.__dict__[attr_name] = attr_val\n\n        elif attr_name != package_name \\\n                and isinstance(attr_val, types.ModuleType) \\\n                and attr_val.__name__.startswith('fabsetup_') \\\n                and attr_name.split('.')[-1] != package_name:\n\n            submodule_name = flo('{addon_module.__name__}.{attr_name}')\n            submodule = get_or_create_module_r(submodule_name)\n            package_module = attr_val\n\n            add_tasks_r(submodule, package_module, package_name)\n            addon_module.__dict__[attr_name] = submodule", "docstring": "Recursively iterate through 'package_module' and add every fabric task\nto the 'addon_module' keeping the task hierarchy.\n\nArgs:\naddon_module(types.ModuleType)\npackage_module(types.ModuleType)\npackage_name(str): Required, to avoid redundant addition of tasks\n\nReturn: None", "source": "juraj-google-style"}
{"code": "def _MergeSameAgency(self, a_agency_id, b_agency_id):\n    \n    a_agency_id = (a_agency_id or\n                   self.feed_merger.a_schedule.GetDefaultAgency().agency_id)\n    b_agency_id = (b_agency_id or\n                   self.feed_merger.b_schedule.GetDefaultAgency().agency_id)\n    a_agency = self.feed_merger.a_schedule.GetAgency(\n        a_agency_id)._migrated_entity\n    b_agency = self.feed_merger.b_schedule.GetAgency(\n        b_agency_id)._migrated_entity\n    if a_agency != b_agency:\n      raise MergeError('agency must be the same')\n    return a_agency.agency_id", "docstring": "Merge agency ids to the corresponding agency id in the merged schedule.\n\nArgs:\na_agency_id: an agency id from the old schedule\nb_agency_id: an agency id from the new schedule\n\nReturns:\nThe agency id of the corresponding merged agency.\n\nRaises:\nMergeError: If a_agency_id and b_agency_id do not correspond to the same\nmerged agency.\nKeyError: Either aaid or baid is not a valid agency id.", "source": "juraj-google-style"}
{"code": "def __closely_associated_score(self, normalized_sentences, top_n_words):\n        \n        scores_list = []\n        sentence_idx = -1\n\n        for sentence in normalized_sentences:\n            self.tokenize(sentence)\n            sentence = self.token\n\n            sentence_idx += 1\n            word_idx = []\n\n            for w in top_n_words:\n                try:\n                    word_idx.append(sentence.index(w))\n                except ValueError:\n                    pass\n\n            word_idx.sort()\n\n            if len(word_idx) == 0:\n                continue\n\n            clusters = []\n            cluster = [word_idx[0]]\n            i = 1\n            while i < len(word_idx):\n                if word_idx[i] - word_idx[i - 1] < self.cluster_threshold:\n                    cluster.append(word_idx[i])\n                else:\n                    clusters.append(cluster[:])\n                    cluster = [word_idx[i]]\n                i += 1\n            clusters.append(cluster)\n\n            max_cluster_score = 0\n            for c in clusters:\n                significant_words_in_cluster = len(c)\n                total_words_in_cluster = c[-1] - c[0] + 1\n                score = 1.0 * significant_words_in_cluster \\\n                    * significant_words_in_cluster / total_words_in_cluster\n\n                if score > max_cluster_score:\n                    max_cluster_score = score\n\n            scores_list.append((sentence_idx, score))\n\n        return scores_list", "docstring": "Scoring the sentence with closely associations.\n\nArgs:\nnormalized_sentences:   The list of sentences.\ntop_n_words:            Important sentences.\n\nReturns:\nThe list of scores.", "source": "juraj-google-style"}
{"code": "def _maybe_repeat(self, x):\n    if isinstance(x, list):\n        assert (len(x) == self.n)\n        return x\n    else:\n        return ([x] * self.n)", "docstring": "Utility function for processing arguments that are singletons or lists.\n\nArgs:\nx: either a list of self.n elements, or not a list.\n\nReturns:\na list of self.n elements.", "source": "codesearchnet"}
{"code": "def _compute_fans(shape):\n    if (len(shape) < 1):\n        fan_in = fan_out = 1\n    elif (len(shape) == 1):\n        fan_in = fan_out = shape[0]\n    elif (len(shape) == 2):\n        fan_in = shape[0]\n        fan_out = shape[1]\n    else:\n        receptive_field_size = 1.0\n        for dim in shape[:(- 2)]:\n            receptive_field_size *= dim\n        fan_in = (shape[(- 2)] * receptive_field_size)\n        fan_out = (shape[(- 1)] * receptive_field_size)\n    if isinstance(fan_in, tf.Dimension):\n        fan_in = fan_in.value\n    if isinstance(fan_out, tf.Dimension):\n        fan_out = fan_out.value\n    return (fan_in, fan_out)", "docstring": "Computes the number of input and output units for a weight shape.\n\nArgs:\nshape: Integer shape tuple or TF tensor shape.\n\nReturns:\nA tuple of scalars (fan_in, fan_out).", "source": "codesearchnet"}
{"code": "def check_compatibility(self):\n    usr_keys = list(self.usr_config.keys())\n    for k in self.usr_config.keys():\n        if k not in usr_keys:\n            err_msg = '[Error] Required config not found in user config.'\n            err_msg += '(required = %s, ' % str(k)\n            err_msg += 'user configs = %s)' % str(usr_keys)\n            logging.error(err_msg)\n            self.error_msg.append(err_msg)\n            self.failures.append([k, err_msg])\n            return False\n    overall_status = True\n    for config_name, spec in self.usr_config.items():\n        temp_status = True\n        in_required = config_name in list(self.required.keys())\n        in_optional = config_name in list(self.optional.keys())\n        in_unsupported = config_name in list(self.unsupported.keys())\n        in_dependency = config_name in list(self.dependency.keys())\n        if not (in_required or in_optional or in_unsupported or in_dependency):\n            warn_msg = '[Error] User config not defined in config file.'\n            warn_msg += '(user config = %s)' % str(config_name)\n            logging.warning(warn_msg)\n            self.warning_msg.append(warn_msg)\n            self.failures.append([config_name, warn_msg])\n            temp_status = False\n        else:\n            if in_unsupported:\n                if self.in_range(spec, self.unsupported[config_name]):\n                    err_msg = '[Error] User config is unsupported. It is '\n                    err_msg += \"defined under 'Unsupported' section in the config file.\"\n                    err_msg += ' (config = %s, spec = %s)' % (config_name, str(spec))\n                    logging.error(err_msg)\n                    self.error_msg.append(err_msg)\n                    self.failures.append([config_name, err_msg])\n                    temp_status = False\n            if in_required:\n                if not self.in_range(spec, self.required[config_name]):\n                    err_msg = '[Error] User config cannot be supported. It is not in '\n                    err_msg += \"the supported range as defined in the 'Required' \"\n                    err_msg += 'section. (config = %s, ' % config_name\n                    err_msg += 'spec = %s)' % str(spec)\n                    logging.error(err_msg)\n                    self.error_msg.append(err_msg)\n                    self.failures.append([config_name, err_msg])\n                    temp_status = False\n            if in_optional:\n                if not self.in_range(spec, self.optional[config_name]):\n                    err_msg = '[Error] User config cannot be supported. It is not in '\n                    err_msg += \"the supported range as defined in the 'Optional' \"\n                    err_msg += 'section. (config = %s, ' % config_name\n                    err_msg += 'spec = %s)' % str(spec)\n                    logging.error(err_msg)\n                    self.error_msg.append(err_msg)\n                    self.failures.append([config_name, err_msg])\n                    temp_status = False\n            if in_dependency:\n                dep_list = self.dependency[config_name]\n                if dep_list:\n                    for rule in dep_list:\n                        cfg = rule[0]\n                        cfg_req = rule[1]\n                        dep = rule[2]\n                        dep_req = rule[3]\n                        try:\n                            cfg_name = self.usr_config[cfg]\n                            dep_name = self.usr_config[dep]\n                            cfg_status = self.in_range(cfg_name, cfg_req)\n                            dep_status = self.in_range(dep_name, dep_req)\n                            if cfg_status:\n                                if not dep_status:\n                                    err_msg = '[Error] User config has a dependency that cannot'\n                                    err_msg += ' be supported. '\n                                    err_msg += \"'%s' has a dependency on \" % str(config_name)\n                                    err_msg += \"'%s'.\" % str(dep)\n                                    logging.error(err_msg)\n                                    self.error_msg.append(err_msg)\n                                    self.failures.append([config_name, err_msg])\n                                    temp_status = False\n                        except KeyError:\n                            err_msg = '[Error] Dependency is missing from `Required`. '\n                            err_msg += '(config = %s, dep = %s)' % (cfg, dep)\n                            logging.error(err_msg)\n                            self.error_msg.append(err_msg)\n                            self.failures.append([config_name, err_msg])\n                            temp_status = False\n        if temp_status:\n            self.successes.append([config_name, spec])\n        else:\n            overall_status = False\n    return overall_status", "docstring": "Checks version and dependency compatibility for a given configuration.\n\n`check_compatibility` immediately returns with `False` (or failure status)\nif any child process or checks fail. For error and warning messages, either\nprint `self.(error_msg|warning_msg)` or call `_print` function.\n\nReturns:\nBoolean that is a status of the compatibility check result.", "source": "github-repos"}
{"code": "def destroy(ads):\n    for ad in ads:\n        try:\n            ad.services.stop_all()\n        except Exception:\n            ad.log.exception('Failed to clean up properly.')", "docstring": "Cleans up AndroidDevice objects.\n\nArgs:\nads: A list of AndroidDevice objects.", "source": "github-repos"}
{"code": "def from_str(self, in_str):\n    parts = in_str.split(';')\n    for part in parts:\n        (var_name, value) = part.split(':')\n        if (var_name == 'Obs_Threshold'):\n            self.obs_threshold = float(value)\n        elif (var_name == 'Thresholds'):\n            self.thresholds = np.array(value.split(), dtype=float)\n            self.contingency_tables = pd.DataFrame(columns=self.contingency_tables.columns, data=np.zeros((self.thresholds.size, self.contingency_tables.columns.size)))\n        elif (var_name in self.contingency_tables.columns):\n            self.contingency_tables[var_name] = np.array(value.split(), dtype=int)", "docstring": "Read the DistributedROC string and parse the contingency table values from it.\n\nArgs:\nin_str (str): The string output from the __str__ method", "source": "codesearchnet"}
{"code": "def devectorize(vectorized_mat, method='col'):\n    vectorized_mat = np.array(vectorized_mat)\n    dimension = int(np.sqrt(vectorized_mat.size))\n    if (len(vectorized_mat) != (dimension * dimension)):\n        raise Exception('Input is not a vectorized square matrix')\n    if (method == 'col'):\n        return vectorized_mat.reshape(dimension, dimension, order='F')\n    elif (method == 'row'):\n        return vectorized_mat.reshape(dimension, dimension, order='C')\n    elif (method in ['pauli', 'pauli_weights']):\n        num_qubits = int(np.log2(dimension))\n        if (dimension != (2 ** num_qubits)):\n            raise Exception('Input state must be n-qubit state')\n        if (method == 'pauli_weights'):\n            pgroup = pauli_group(num_qubits, case='weight')\n        else:\n            pgroup = pauli_group(num_qubits, case='tensor')\n        pbasis = (np.array([p.to_matrix() for p in pgroup]) / (2 ** num_qubits))\n        return np.tensordot(vectorized_mat, pbasis, axes=1)\n    return None", "docstring": "Devectorize a vectorized square matrix.\n\nArgs:\nvectorized_mat (ndarray): a vectorized density matrix.\nmethod (str): the method of devectorization. Allowed values are\n- 'col' (default): flattens to column-major vector.\n- 'row': flattens to row-major vector.\n- 'pauli': flattens in the n-qubit Pauli basis.\n- 'pauli-weights': flattens in the n-qubit Pauli basis ordered by\nweight.\n\nReturns:\nndarray: the resulting matrix.\nRaises:\nException: if input state is not a n-qubit state", "source": "codesearchnet"}
{"code": "def clone(self, callable=None, **overrides):\n        \n        old = {k: v for k, v in self.get_param_values()\n               if k not in ['callable', 'name']}\n        params = dict(old, **overrides)\n        callable = self.callable if callable is None else callable\n        return self.__class__(callable, **params)", "docstring": "Clones the Callable optionally with new settings\n\nArgs:\ncallable: New callable function to wrap\n**overrides: Parameter overrides to apply\n\nReturns:\nCloned Callable object", "source": "juraj-google-style"}
{"code": "def write_config(params, config_path=None):\n    \n    if config_path is None:\n        config_path = tempfile.mktemp(prefix=\"mongo-\")\n\n    cfg = params.copy()\n    if 'setParameter' in cfg:\n        set_parameters = cfg.pop('setParameter')\n        try:\n            for key, value in set_parameters.items():\n                cfg['setParameter = ' + key] = value\n        except AttributeError:\n            reraise(RequestError,\n                    'Not a valid value for setParameter: %r '\n                    'Expected \"setParameter\": {<param name> : value, ...}'\n                    % set_parameters)\n\n    \n    for key, value in cfg.items():\n        if isinstance(value, bool):\n            cfg[key] = json.dumps(value)\n\n    with open(config_path, 'w') as fd:\n        data = '\\n'.join('%s=%s' % (key, item) for key, item in cfg.items())\n        fd.write(data)\n    return config_path", "docstring": "write mongo*'s config file\nArgs:\nparams - options wich file contains\nconfig_path - path to the config_file, will create if None\nReturn config_path\nwhere config_path - path to mongo*'s options file", "source": "juraj-google-style"}
{"code": "def _assert_sparse_indices_are_ragged_right(indices):\n    index_prefix = indices[:, :-1]\n    index_suffix = indices[:, -1]\n    index_prefix_changed = math_ops.reduce_any(math_ops.not_equal(index_prefix[1:], index_prefix[:-1]), axis=1)\n    index_ok = array_ops.where(index_prefix_changed, math_ops.equal(index_suffix[1:], 0), math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1))\n    sparse_indices_are_ragged_right = math_ops.logical_and(math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)), math_ops.reduce_all(index_ok))\n    message = ['SparseTensor is not right-ragged', 'SparseTensor.indices =', indices]\n    return [control_flow_assert.Assert(sparse_indices_are_ragged_right, message)]", "docstring": "Checks that the given SparseTensor.indices tensor is ragged-right.\n\nExample: `indices = [[0, 0], [0, 1], [2, 0], [3, 1]]` is not ragged right\nbecause the entry `[3, 1]` skips a cell.\n\nArgs:\nindices: The SparseTensor indices to check.\n\nReturns:\nA list of control dependency op tensors.", "source": "github-repos"}
{"code": "def kill_plasma_store(self, check_alive=True):\n        \n        self._kill_process_type(\n            ray_constants.PROCESS_TYPE_PLASMA_STORE, check_alive=check_alive)", "docstring": "Kill the plasma store.\n\nArgs:\ncheck_alive (bool): Raise an exception if the process was already\ndead.", "source": "juraj-google-style"}
{"code": "def agg_dims(arr, stat):\n    \n    axis = None\n    if arr.ndim > 2:\n        axis = 1\n        arr = arr.reshape(arr.shape[0], -1)\n    module = np.ma if hasattr(arr, 'mask') else np\n    return getattr(module, stat)(arr, axis)", "docstring": "Returns a 1D array with higher dimensions aggregated using stat fn.\n\nArguments:\narr -- ndarray\nstat -- numpy or numpy.ma function as str to call", "source": "juraj-google-style"}
{"code": "def double(self, count: float=0) -> float:\n    return 2 * count", "docstring": "Returns the input multiplied by 2.\n\nArgs:\ncount: Input number that you want to double.\n\nReturns:\nA number that is the double of count.", "source": "github-repos"}
{"code": "def inspect_service(self, service, insert_defaults=None):\n    url = self._url('/services/{0}', service)\n    params = {}\n    if (insert_defaults is not None):\n        if utils.version_lt(self._version, '1.29'):\n            raise errors.InvalidVersion('insert_defaults is not supported in API version < 1.29')\n        params['insertDefaults'] = insert_defaults\n    return self._result(self._get(url, params=params), True)", "docstring": "Return information about a service.\n\nArgs:\nservice (str): Service name or ID.\ninsert_defaults (boolean): If true, default values will be merged\ninto the service inspect output.\n\nReturns:\n(dict): A dictionary of the server-side representation of the\nservice, including all relevant properties.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def ParseFileEntryMetadata(self, parser_mediator, file_entry):\n    \n    if self._filestat_parser:\n      self._ParseFileEntryWithParser(\n          parser_mediator, self._filestat_parser, file_entry)", "docstring": "Parses the file entry metadata e.g. file system data.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nfile_entry (dfvfs.FileEntry): file entry.", "source": "juraj-google-style"}
{"code": "def IsSimpleGroup(component):\n    assert isinstance(component, dict)\n    for unused_key, value in component.items():\n        if not IsValue(value) and (not isinstance(value, (list, dict))):\n            return False\n    return True", "docstring": "If a group is simple enough, then we treat it as a value in PrintResult.\n\nOnly if a group contains all value types do we consider it simple enough to\nprint as a value.\n\nArgs:\ncomponent: The group to check for value-group status.\nReturns:\nA boolean indicating if the group should be treated as a value for printing\npurposes.", "source": "github-repos"}
{"code": "def _CreateReadAccessHelper(self):\n    h = CheckAccessHelper('read')\n    h.Allow('aff4:/')\n    h.Allow('aff4:/users')\n    h.Allow('aff4:/users/*', self._IsHomeDir)\n    h.Allow('aff4:/foreman', self._UserHasAdminLabel)\n    h.Allow('aff4:/blobs')\n    h.Allow('aff4:/blobs/*')\n    h.Allow('aff4:/FP')\n    h.Allow('aff4:/FP/*')\n    h.Allow('aff4:/files')\n    h.Allow('aff4:/files/*')\n    h.Allow('aff4:/index')\n    h.Allow('aff4:/index/*')\n    h.Allow('aff4:/client_index')\n    h.Allow('aff4:/client_index/*')\n    h.Allow('aff4:/ACL')\n    h.Allow('aff4:/ACL/*')\n    h.Allow('aff4:/stats')\n    h.Allow('aff4:/stats/*')\n    h.Allow('aff4:/config')\n    h.Allow('aff4:/config/*')\n    h.Allow('aff4:/flows')\n    h.Allow('aff4:/flows/*')\n    h.Allow('aff4:/hunts')\n    h.Allow('aff4:/hunts/*')\n    h.Allow('aff4:/cron')\n    h.Allow('aff4:/cron/*')\n    h.Allow('aff4:/audit')\n    h.Allow('aff4:/audit/*')\n    h.Allow('aff4:/audit/logs')\n    h.Allow('aff4:/audit/logs/*')\n    h.Allow(self.CLIENT_URN_PATTERN)\n    h.Allow((self.CLIENT_URN_PATTERN + '/*'), self._HasAccessToClient)\n    h.Allow('aff4:/artifact_store')\n    h.Allow('aff4:/artifact_store/*')\n    return h", "docstring": "Creates a CheckAccessHelper for controlling read access.\n\nThis function and _CreateQueryAccessHelper essentially define GRR's ACL\npolicy. Please refer to these 2 functions to either review or modify\nGRR's ACLs.\n\nRead access gives you the ability to open and read aff4 objects for which\nyou already have the URN.\n\nReturns:\nCheckAccessHelper for controlling read access.", "source": "codesearchnet"}
{"code": "def _manage_location(attr):\n    return property((lambda self: getattr(self, ('_%s' % attr))), (lambda self, value: self._set_location(attr, value)))", "docstring": "Build managed property interface.\n\nArgs:\nattr (str): Property's name\n\nReturns:\nproperty: Managed property interface", "source": "codesearchnet"}
{"code": "def getslice_slot(self, node: cfg.CFGNode, start_var: cfg.Variable, end_var: cfg.Variable) -> tuple[cfg.CFGNode, cfg.Variable]:\n    node, ret = self.call_pytd(node, '__getslice__', start_var, end_var)\n    results = []\n    unresolved = False\n    if self.is_concrete:\n        for start_val, end_val in cfg_utils.variable_product([start_var, end_var]):\n            try:\n                start = self._get_index(start_val.data)\n                end = self._get_index(end_val.data)\n            except abstract_utils.ConversionError:\n                unresolved = True\n            else:\n                results.append(List(self.pyval[start:end], self.ctx).to_variable(node))\n    if unresolved or not self.is_concrete:\n        results.append(ret)\n    return (node, self.ctx.join_variables(node, results))", "docstring": "Implements __getslice__ for List.\n\nArguments:\nnode: The current CFG node.\nstart_var: A Variable containing the i in lst[i:j].\nend_var: A Variable containing the j in lst[i:j].\n\nReturns:\nTuple of (node, return_variable). node may be the same as the argument.\nreturn_variable is a Variable with bindings of the possible return values.", "source": "github-repos"}
{"code": "def get_inheritance(obj_name, obj_type='file'):\n    obj_dacl = dacl(obj_name=obj_name, obj_type=obj_type)\n    inherited = win32security.INHERITED_ACE\n    for i in range(0, obj_dacl.dacl.GetAceCount()):\n        ace = obj_dacl.dacl.GetAce(i)\n        if ((ace[0][1] & inherited) == inherited):\n            return True\n    return False", "docstring": "Get an object's inheritance.\n\nArgs:\n\nobj_name (str):\nThe name of the object\n\nobj_type (Optional[str]):\nThe type of object. Only three object types allow inheritance. Valid\nobjects are:\n\n- file (default): This is a file or directory\n- registry\n- registry32 (for WOW64)\n\nThe following should return False as there is no inheritance:\n\n- service\n- printer\n- share\n\nReturns:\nbool: True if enabled, otherwise False\n\nUsage:\n\n.. code-block:: python\n\nsalt.utils.win_dacl.get_inheritance('HKLM\\\\SOFTWARE\\\\salt', 'registry')", "source": "codesearchnet"}
{"code": "def to_concat_skip_model(self, start_id, end_id):\n        \n        self.operation_history.append((\"to_concat_skip_model\", start_id, end_id))\n        filters_end = self.layer_list[end_id].output.shape[-1]\n        filters_start = self.layer_list[start_id].output.shape[-1]\n        start_node_id = self.layer_id_to_output_node_ids[start_id][0]\n\n        pre_end_node_id = self.layer_id_to_input_node_ids[end_id][0]\n        end_node_id = self.layer_id_to_output_node_ids[end_id][0]\n\n        skip_output_id = self._insert_pooling_layer_chain(start_node_id, end_node_id)\n\n        concat_input_node_id = self._add_node(deepcopy(self.node_list[end_node_id]))\n        self._redirect_edge(pre_end_node_id, end_node_id, concat_input_node_id)\n\n        concat_layer = StubConcatenate()\n        concat_layer.input = [\n            self.node_list[concat_input_node_id],\n            self.node_list[skip_output_id],\n        ]\n        concat_output_node_id = self._add_node(Node(concat_layer.output_shape))\n        self._add_edge(concat_layer, concat_input_node_id, concat_output_node_id)\n        self._add_edge(concat_layer, skip_output_id, concat_output_node_id)\n        concat_layer.output = self.node_list[concat_output_node_id]\n        self.node_list[concat_output_node_id].shape = concat_layer.output_shape\n\n        \n        new_conv_layer = get_conv_class(self.n_dim)(\n            filters_start + filters_end, filters_end, 1\n        )\n        self._add_edge(new_conv_layer, concat_output_node_id, end_node_id)\n        new_conv_layer.input = self.node_list[concat_output_node_id]\n        new_conv_layer.output = self.node_list[end_node_id]\n        self.node_list[end_node_id].shape = new_conv_layer.output_shape\n\n        if self.weighted:\n            filter_shape = (1,) * self.n_dim\n            weights = np.zeros((filters_end, filters_end) + filter_shape)\n            for i in range(filters_end):\n                filter_weight = np.zeros((filters_end,) + filter_shape)\n                center_index = (i,) + (0,) * self.n_dim\n                filter_weight[center_index] = 1\n                weights[i, ...] = filter_weight\n            weights = np.concatenate(\n                (weights, np.zeros((filters_end, filters_start) + filter_shape)), axis=1\n            )\n            bias = np.zeros(filters_end)\n            new_conv_layer.set_weights(\n                (add_noise(weights, np.array([0, 1])), add_noise(bias, np.array([0, 1])))\n            )", "docstring": "Add a weighted add concatenate connection from after start node to end node.\nArgs:\nstart_id: The convolutional layer ID, after which to start the skip-connection.\nend_id: The convolutional layer ID, after which to end the skip-connection.", "source": "juraj-google-style"}
{"code": "def pack_x_y_sample_weight(x, y=None, sample_weight=None):\n    if y is None:\n        if not isinstance(x, (tuple, list)):\n            return x\n        else:\n            return (x,)\n    elif sample_weight is None:\n        return (x, y)\n    else:\n        return (x, y, sample_weight)", "docstring": "Packs user-provided data into a tuple.\n\nThis is a convenience utility for packing data into the tuple formats\nthat `Model.fit()` uses.\n\nExample:\n\n>>> x = ops.ones((10, 1))\n>>> data = pack_x_y_sample_weight(x)\n>>> isinstance(data, ops.Tensor)\nTrue\n>>> y = ops.ones((10, 1))\n>>> data = pack_x_y_sample_weight(x, y)\n>>> isinstance(data, tuple)\nTrue\n>>> x, y = data\n\nArgs:\nx: Features to pass to `Model`.\ny: Ground-truth targets to pass to `Model`.\nsample_weight: Sample weight for each element.\n\nReturns:\nTuple in the format used in `Model.fit()`.", "source": "github-repos"}
{"code": "def create_effect(self, label: str, name: str, *args, **kwargs) -> Effect:\n        \n        effect_cls = effects.find_effect_class(name)\n        effect = effect_cls(*args, **kwargs)\n        effect._label = label\n\n        if label in self._effects:\n            raise ValueError(\"An effect with label '{}' already exists\".format(label))\n\n        self._effects[label] = effect\n\n        return effect", "docstring": "Create an effect instance adding it to the internal effects dictionary using the label as key.\n\nArgs:\nlabel (str): The unique label for the effect instance\nname (str): Name or full python path to the effect class we want to instantiate\nargs: Positional arguments to the effect initializer\nkwargs: Keyword arguments to the effect initializer\n\nReturns:\nThe newly created Effect instance", "source": "juraj-google-style"}
{"code": "def get_index(uid, i):\n    return _SHARED_SEQUENCES[uid][i]", "docstring": "Get the value from the PyDataset `uid` at index `i`.\n\nTo allow multiple PyDatasets to be used at the same time, we use `uid` to\nget a specific one. A single PyDataset would cause the validation to\noverwrite the training PyDataset.\n\nThis methods is called from worker threads.\n\nArgs:\nuid: int, PyDataset identifier\ni: index\n\nReturns:\nThe value at index `i`.", "source": "github-repos"}
{"code": "def WriteFileEntry(self, path):\n    string = '{0:s}\\n'.format(path)\n    encoded_string = self._EncodeString(string)\n    self._file_object.write(encoded_string)", "docstring": "Writes the file path to file.\n\nArgs:\npath (str): path of the file.", "source": "codesearchnet"}
{"code": "def _duplicate_example(self, request):\n    index = int(request.args.get('index'))\n    if (index >= len(self.examples)):\n        return http_util.Respond(request, {'error': 'invalid index provided'}, 'application/json', code=400)\n    new_example = self.example_class()\n    new_example.CopyFrom(self.examples[index])\n    self.examples.append(new_example)\n    self.updated_example_indices.add((len(self.examples) - 1))\n    self.generate_sprite([ex.SerializeToString() for ex in self.examples])\n    return http_util.Respond(request, {}, 'application/json')", "docstring": "Duplicates the specified example.\n\nArgs:\nrequest: A request that should contain 'index'.\n\nReturns:\nAn empty response.", "source": "codesearchnet"}
{"code": "def set_metadata(self, key: str, value: str):\n    if ((not isinstance(key, str)) or (not isinstance(value, str))):\n        raise TypeError(\"'key' and 'value' of metadata MUST be strings\")\n    self.metadata[key] = value", "docstring": "Add a new metadata to the message\n\nArgs:\nkey (str): name of the metadata\nvalue (str): value of the metadata", "source": "codesearchnet"}
{"code": "def image(array, domain=None, width=None, format='png', **kwargs):\n    image_data = serialize_array(array, fmt=format, domain=domain)\n    image = IPython.display.Image(data=image_data, format=format, width=width)\n    IPython.display.display(image)", "docstring": "Display an image.\n\nArgs:\narray: NumPy array representing the image\nfmt: Image format e.g. png, jpeg\ndomain: Domain of pixel values, inferred from min & max values if None\nw: width of output image, scaled using nearest neighbor interpolation.\nsize unchanged if None", "source": "codesearchnet"}
{"code": "def make_multiscale(image, resolutions,\n                    resize_method=tf.image.ResizeMethod.BICUBIC,\n                    num_channels=3):\n  \n  scaled_images = []\n  for height in resolutions:\n    scaled_image = tf.image.resize_images(\n        image,\n        size=[height, height],  \n        method=resize_method)\n    scaled_image = tf.to_int64(scaled_image)\n    scaled_image.set_shape([height, height, num_channels])\n    scaled_images.append(scaled_image)\n\n  return scaled_images", "docstring": "Returns list of scaled images, one for each resolution.\n\nArgs:\nimage: Tensor of shape [height, height, num_channels].\nresolutions: List of heights that image's height is resized to.\nresize_method: tf.image.ResizeMethod.\nnum_channels: Number of channels in image.\n\nReturns:\nList of Tensors, one for each resolution with shape given by\n[resolutions[i], resolutions[i], num_channels].", "source": "juraj-google-style"}
{"code": "def call(self, inputs):\n    del inputs\n    with tf.compat.v1.name_scope(self._name):\n        return tfd.MultivariateNormalDiag(self.loc, self.scale_diag)", "docstring": "Runs the model to generate multivariate normal distribution.\n\nArgs:\ninputs: Unused.\n\nReturns:\nA MultivariateNormalDiag distribution with event shape\n[dimensions], batch shape [], and sample shape [sample_shape,\ndimensions].", "source": "codesearchnet"}
{"code": "def remove_tree_by_path(self, path):\n        \n        with transaction.manager:\n            trees = self.path_db.get(path, None)\n\n        if not trees:\n            return\n\n        for tree in trees:\n            return self._remove_tree(tree)", "docstring": "Remove the tree from database by given `path`.\n\nArgs:\npath (str): Path of the tree.", "source": "juraj-google-style"}
{"code": "def _unable_to_call_layer_due_to_serialization_issue(layer, *unused_args, **unused_kwargs):\n    raise ValueError('Cannot call custom layer {} of type {}, because the call function was not serialized to the SavedModel.Please try one of the following methods to fix this issue:\\n\\n(1) Implement `get_config` and `from_config` in the layer/model class, and pass the object to the `custom_objects` argument when loading the model. For more details, see: https:", "docstring": "Replaces the `layer.call` if the layer was not fully serialized.\n\nKeras Model/Layer serialization is relatively relaxed because SavedModels\nare not always loaded back as keras models. Thus, when there is an issue\ntracing a non-signature function, a warning is logged instead of raising an\nerror. This results in a SavedModel where the model's call function is saved,\nbut the internal layer call functions are not.\n\nWhen deserialized with `tf.keras.models.load_model`, the internal layers\nwhich do not have serialized call functions should raise an error when called.\n\nArgs:\nlayer: Layer without the serialized call function.\n\nRaises:\nValueError", "source": "github-repos"}
{"code": "def add_signature(key, inputs, outputs):\n    _check_dict_maps_to_tensors_or_sparse_tensors(inputs)\n    _check_dict_maps_to_tensors_or_sparse_tensors(outputs)\n    input_info = {input_name: tf_v1.saved_model.utils.build_tensor_info(tensor) for (input_name, tensor) in inputs.items()}\n    output_info = {output_name: tf_v1.saved_model.utils.build_tensor_info(tensor) for (output_name, tensor) in outputs.items()}\n    signature = tf_v1.saved_model.signature_def_utils.build_signature_def(input_info, output_info)\n    tf_v1.add_to_collection(_SIGNATURE_COLLECTION, (key, signature))", "docstring": "Adds a signature to current graph.\n\nArgs:\nkey: Signature key as a string.\ninputs: Signature inputs as a map from string to Tensor or SparseTensor.\noutputs: Signature outputs as a map from string to Tensor or SparseTensor.\n(Recall that a Variable is not a Tensor, but Variable.value() is.)\n\nRaises:\nTypeError: if the arguments have the wrong types.", "source": "codesearchnet"}
{"code": "def get_segment(neuron, section_id, segment_id):\n    sec = neuron.sections[section_id]\n    return sec.points[segment_id:(segment_id + 2)][(:, COLS.XYZR)]", "docstring": "Get a segment given a section and segment id\n\nReturns:\narray of two [x, y, z, r] points defining segment", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n        \n        self.CreateCluster = channel.unary_unary(\n            \"/google.cloud.dataproc.v1beta2.ClusterController/CreateCluster\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.CreateClusterRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n        self.UpdateCluster = channel.unary_unary(\n            \"/google.cloud.dataproc.v1beta2.ClusterController/UpdateCluster\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.UpdateClusterRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n        self.DeleteCluster = channel.unary_unary(\n            \"/google.cloud.dataproc.v1beta2.ClusterController/DeleteCluster\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.DeleteClusterRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n        self.GetCluster = channel.unary_unary(\n            \"/google.cloud.dataproc.v1beta2.ClusterController/GetCluster\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.GetClusterRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.Cluster.FromString,\n        )\n        self.ListClusters = channel.unary_unary(\n            \"/google.cloud.dataproc.v1beta2.ClusterController/ListClusters\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.ListClustersRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.ListClustersResponse.FromString,\n        )\n        self.DiagnoseCluster = channel.unary_unary(\n            \"/google.cloud.dataproc.v1beta2.ClusterController/DiagnoseCluster\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.DiagnoseClusterRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def query(self, attributes=None, filters=None, only_unique=True, use_attr_names=False, dtypes=None):\n    root = ElementTree.Element('Query')\n    root.set('virtualSchemaName', self._virtual_schema)\n    root.set('formatter', 'TSV')\n    root.set('header', '1')\n    root.set('uniqueRows', native_str(int(only_unique)))\n    root.set('datasetConfigVersion', '0.6')\n    dataset = ElementTree.SubElement(root, 'Dataset')\n    dataset.set('name', self.name)\n    dataset.set('interface', 'default')\n    if (attributes is None):\n        attributes = list(self.default_attributes.keys())\n    for name in attributes:\n        try:\n            attr = self.attributes[name]\n            self._add_attr_node(dataset, attr)\n        except KeyError:\n            raise BiomartException('Unknown attribute {}, check dataset attributes for a list of valid attributes.'.format(name))\n    if (filters is not None):\n        for (name, value) in filters.items():\n            try:\n                filter_ = self.filters[name]\n                self._add_filter_node(dataset, filter_, value)\n            except KeyError:\n                raise BiomartException('Unknown filter {}, check dataset filters for a list of valid filters.'.format(name))\n    response = self.get(query=ElementTree.tostring(root))\n    if ('Query ERROR' in response.text):\n        raise BiomartException(response.text)\n    try:\n        result = pd.read_csv(StringIO(response.text), sep='\\t', dtype=dtypes)\n    except TypeError as err:\n        raise ValueError('Non valid data type is used in dtypes')\n    if use_attr_names:\n        column_map = {self.attributes[attr].display_name: attr for attr in attributes}\n        result.rename(columns=column_map, inplace=True)\n    return result", "docstring": "Queries the dataset to retrieve the contained data.\n\nArgs:\nattributes (list[str]): Names of attributes to fetch in query.\nAttribute names must correspond to valid attributes. See\nthe attributes property for a list of valid attributes.\nfilters (dict[str,any]): Dictionary of filters --> values\nto filter the dataset by. Filter names and values must\ncorrespond to valid filters and filter values. See the\nfilters property for a list of valid filters.\nonly_unique (bool): Whether to return only rows containing\nunique values (True) or to include duplicate rows (False).\nuse_attr_names (bool): Whether to use the attribute names\nas column names in the result (True) or the attribute\ndisplay names (False).\ndtypes (dict[str,any]): Dictionary of attributes --> data types\nto describe to pandas how the columns should be handled\n\nReturns:\npandas.DataFrame: DataFrame containing the query results.", "source": "codesearchnet"}
{"code": "def apply(\n        self,\n        func,\n        num_splits=None,\n        other_axis_partition=None,\n        maintain_partitioning=True,\n        **kwargs\n    ):\n        \n        import dask\n\n        if num_splits is None:\n            num_splits = len(self.list_of_blocks)\n\n        if other_axis_partition is not None:\n            return [\n                DaskFramePartition(dask.delayed(obj))\n                for obj in deploy_func_between_two_axis_partitions(\n                    self.axis,\n                    func,\n                    num_splits,\n                    len(self.list_of_blocks),\n                    kwargs,\n                    *dask.compute(\n                        *tuple(\n                            self.list_of_blocks + other_axis_partition.list_of_blocks\n                        )\n                    )\n                )\n            ]\n\n        args = [self.axis, func, num_splits, kwargs, maintain_partitioning]\n\n        args.extend(dask.compute(*self.list_of_blocks))\n        return [\n            DaskFramePartition(dask.delayed(obj)) for obj in deploy_axis_func(*args)\n        ]", "docstring": "Applies func to the object.\n\nSee notes in Parent class about this method.\n\nArgs:\nfunc: The function to apply.\nnum_splits: The number of times to split the result object.\nother_axis_partition: Another `DaskFrameAxisPartition` object to apply to\nfunc with this one.\n\nReturns:\nA list of `DaskFramePartition` objects.", "source": "juraj-google-style"}
{"code": "def create(self, interface, vrid, **kwargs):\n    if ('enable' not in kwargs):\n        kwargs['enable'] = False\n    return self._vrrp_set(interface, vrid, **kwargs)", "docstring": "Creates a vrrp instance from an interface\n\nNote:\nThis method will attempt to create a vrrp in the node's\noperational config. If the vrrp already exists on the\ninterface, then this method will set the properties of\nthe existing vrrp to those that have been passed in, if\npossible.\n\nArgs:\ninterface (string): The interface to configure.\nvrid (integer): The vrid number for the vrrp to be created.\nkwargs (dict): A dictionary specifying the properties to\nbe applied to the new vrrp instance. See library\ndocumentation for available keys and values.\n\nReturns:\nTrue if the vrrp could be created otherwise False (see Node)", "source": "codesearchnet"}
{"code": "def addBorrowers(self, *borrowers):\n    self._borrowers.extend(borrowers)\n    ((debug.logger & debug.flagCompiler) and debug.logger(('current MIB borrower(s): %s' % ', '.join([str(x) for x in self._borrowers]))))\n    return self", "docstring": "Add more transformed MIBs repositories to borrow MIBs from.\n\nWhenever MibCompiler.compile encounters MIB module which neither of\nthe *searchers* can find or fetched ASN.1 MIB module can not be\nparsed (due to syntax errors), these *borrowers* objects will be\ninvoked in order of their addition asking each if already transformed\nMIB can be fetched (borrowed).\n\nArgs:\nborrowers: borrower object(s)\n\nReturns:\nreference to itself (can be used for call chaining)", "source": "codesearchnet"}
{"code": "def get_csv_row_count(filename: str) -> int:\n    row_count = 0\n    with open(filename, 'r') as f:\n        for _ in f:\n            row_count += 1\n        if row_count != 0:\n            row_count -= 1\n    return row_count", "docstring": "Quickly count number of rows in the given csv file.\n\nArgs:\n* filename: Path to CSV file\n\nReturns:\n* number of rows, minus header", "source": "github-repos"}
{"code": "def save(self, checkpoint_dir=None):\n        \n\n        checkpoint_dir = os.path.join(checkpoint_dir or self.logdir,\n                                      \"checkpoint_{}\".format(self._iteration))\n        if not os.path.exists(checkpoint_dir):\n            os.makedirs(checkpoint_dir)\n        checkpoint = self._save(checkpoint_dir)\n        saved_as_dict = False\n        if isinstance(checkpoint, string_types):\n            if (not checkpoint.startswith(checkpoint_dir)\n                    or checkpoint == checkpoint_dir):\n                raise ValueError(\n                    \"The returned checkpoint path must be within the \"\n                    \"given checkpoint dir {}: {}\".format(\n                        checkpoint_dir, checkpoint))\n            if not os.path.exists(checkpoint):\n                raise ValueError(\n                    \"The returned checkpoint path does not exist: {}\".format(\n                        checkpoint))\n            checkpoint_path = checkpoint\n        elif isinstance(checkpoint, dict):\n            saved_as_dict = True\n            checkpoint_path = os.path.join(checkpoint_dir, \"checkpoint\")\n            with open(checkpoint_path, \"wb\") as f:\n                pickle.dump(checkpoint, f)\n        else:\n            raise ValueError(\n                \"`_save` must return a dict or string type: {}\".format(\n                    str(type(checkpoint))))\n        with open(checkpoint_path + \".tune_metadata\", \"wb\") as f:\n            pickle.dump({\n                \"experiment_id\": self._experiment_id,\n                \"iteration\": self._iteration,\n                \"timesteps_total\": self._timesteps_total,\n                \"time_total\": self._time_total,\n                \"episodes_total\": self._episodes_total,\n                \"saved_as_dict\": saved_as_dict\n            }, f)\n        return checkpoint_path", "docstring": "Saves the current model state to a checkpoint.\n\nSubclasses should override ``_save()`` instead to save state.\nThis method dumps additional metadata alongside the saved path.\n\nArgs:\ncheckpoint_dir (str): Optional dir to place the checkpoint.\n\nReturns:\nCheckpoint path that may be passed to restore().", "source": "juraj-google-style"}
{"code": "def _open_config_files(self, command_line_args):\n        \n        \n        config_files = [open(f) for files in map(glob.glob, map(os.path.expanduser, self._default_config_files))\n                        for f in files]\n\n        \n        \n        user_config_file_arg_actions = [\n            a for a in self._actions if getattr(a, \"is_config_file_arg\", False)]\n\n        if not user_config_file_arg_actions:\n            return config_files\n\n        for action in user_config_file_arg_actions:\n            \n            \n            arg_parser = argparse.ArgumentParser(\n                prefix_chars=self.prefix_chars,\n                add_help=False)\n\n            arg_parser._add_action(action)\n\n            \n            \n            \n            def error_method(self, message):\n                pass\n            arg_parser.error = types.MethodType(error_method, arg_parser)\n\n            \n            parsed_arg = arg_parser.parse_known_args(args=command_line_args)\n            if not parsed_arg:\n                continue\n            namespace, _ = parsed_arg\n            user_config_file = getattr(namespace, action.dest, None)\n\n            if not user_config_file:\n                continue\n            \n            user_config_file = os.path.expanduser(user_config_file)\n            if not os.path.isfile(user_config_file):\n                self.error('File not found: %s' % user_config_file)\n\n            config_files += [open(user_config_file)]\n\n        return config_files", "docstring": "Tries to parse config file path(s) from within command_line_args.\nReturns a list of opened config files, including files specified on the\ncommandline as well as any default_config_files specified in the\nconstructor that are present on disk.\n\nArgs:\ncommand_line_args: List of all args (already split on spaces)", "source": "juraj-google-style"}
{"code": "def get_interpolated_value(self, energy):\n        \n        f = {}\n        for spin in self.densities.keys():\n            f[spin] = get_linear_interpolated_value(self.energies,\n                                                    self.densities[spin],\n                                                    energy)\n        return f", "docstring": "Returns interpolated density for a particular energy.\n\nArgs:\nenergy: Energy to return the density for.", "source": "juraj-google-style"}
{"code": "def LoadFromStorage(cls, path=None):\n    \n    if path is None:\n      path = os.path.join(os.path.expanduser('~'), 'googleads.yaml')\n\n    return cls(**googleads.common.LoadFromStorage(\n        path, cls._YAML_KEY, cls._REQUIRED_INIT_VALUES,\n        cls._OPTIONAL_INIT_VALUES))", "docstring": "Creates an AdWordsClient with information stored in a yaml file.\n\nArgs:\n[optional]\npath: The path string to the file containing cached AdWords data.\n\nReturns:\nAn AdWordsClient initialized with the values cached in the file.\n\nRaises:\nA GoogleAdsValueError if the given yaml file does not contain the\ninformation necessary to instantiate a client object - either a\nrequired key was missing or an OAuth2 key was missing.", "source": "juraj-google-style"}
{"code": "def bessel_k1e(x, name=None):\n    with ops.name_scope(name, 'bessel_k1e', [x]):\n        return gen_special_math_ops.bessel_k1e(x)", "docstring": "Computes the Bessel k1e function of `x` element-wise.\n\nModified Bessel function of order 1.\n\n>>> tf.math.special.bessel_k1e([0.5, 1., 2., 4.]).numpy()\narray([2.73100971, 1.63615349, 1.03347685, 0.68157595], dtype=float32)\n\nArgs:\nx: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n`float32`, `float64`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.k1e\n@end_compatibility", "source": "github-repos"}
{"code": "def _GetTextInside(text, start_pattern):\n    matching_punctuation = {'(': ')', '{': '}', '[': ']'}\n    closing_punctuation = set(itervalues(matching_punctuation))\n    match = re.search(start_pattern, text, re.M)\n    if (not match):\n        return None\n    start_position = match.end(0)\n    assert (start_position > 0), 'start_pattern must ends with an opening punctuation.'\n    assert (text[(start_position - 1)] in matching_punctuation), 'start_pattern must ends with an opening punctuation.'\n    punctuation_stack = [matching_punctuation[text[(start_position - 1)]]]\n    position = start_position\n    while (punctuation_stack and (position < len(text))):\n        if (text[position] == punctuation_stack[(- 1)]):\n            punctuation_stack.pop()\n        elif (text[position] in closing_punctuation):\n            return None\n        elif (text[position] in matching_punctuation):\n            punctuation_stack.append(matching_punctuation[text[position]])\n        position += 1\n    if punctuation_stack:\n        return None\n    return text[start_position:(position - 1)]", "docstring": "r\"\"\"Retrieves all the text between matching open and close parentheses.\n\nGiven a string of lines and a regular expression string, retrieve all the text\nfollowing the expression and between opening punctuation symbols like\n(, [, or {, and the matching close-punctuation symbol. This properly nested\noccurrences of the punctuations, so for the text like\nprintf(a(), b(c()));\na call to _GetTextInside(text, r'printf\\(') will return 'a(), b(c())'.\nstart_pattern must match string having an open punctuation symbol at the end.\n\nArgs:\ntext: The lines to extract text. Its comments and strings must be elided.\nIt can be single line and can span multiple lines.\nstart_pattern: The regexp string indicating where to start extracting\nthe text.\nReturns:\nThe extracted text.\nNone if either the opening string or ending punctuation could not be found.", "source": "codesearchnet"}
{"code": "def establish_ssh_connection(ip, ssh_private_key_file, ssh_user, port, attempts=5, timeout=None):\n    client = paramiko.SSHClient()\n    client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n    while attempts:\n        try:\n            client.connect(ip, port=port, username=ssh_user, key_filename=ssh_private_key_file, timeout=timeout)\n        except:\n            attempts -= 1\n            time.sleep(10)\n        else:\n            return client\n    raise IpaSSHException('Failed to establish SSH connection to instance.')", "docstring": "Establish ssh connection and return paramiko client.\n\nRaises:\nIpaSSHException: If connection cannot be established\nin given number of attempts.", "source": "codesearchnet"}
{"code": "def _get_compositor_prereqs(self, parent, prereq_names, skip=False, **dfilter):\n    prereq_ids = []\n    unknowns = set()\n    for prereq in prereq_names:\n        (n, u) = self._find_dependencies(prereq, **dfilter)\n        if u:\n            unknowns.update(u)\n            if skip:\n                u_str = ', '.join([str(x) for x in u])\n                LOG.debug('Skipping optional %s: Unknown dataset %s', str(prereq), u_str)\n        else:\n            prereq_ids.append(n)\n            self.add_child(parent, n)\n    return (prereq_ids, unknowns)", "docstring": "Determine prerequisite Nodes for a composite.\n\nArgs:\nparent (Node): Compositor node to add these prerequisites under\nprereq_names (sequence): Strings (names), floats (wavelengths), or\nDatasetIDs to analyze.\nskip (bool, optional): If True, prerequisites are considered\noptional if they can't be found and a\ndebug message is logged. If False (default),\nthe missing prerequisites are not logged\nand are expected to be handled by the\ncaller.", "source": "codesearchnet"}
{"code": "def substructure(mol, query, largest_only=True, ignore_hydrogen=True):\n    \n    def subset_filter(cnt1, cnt2):\n        diff = cnt2\n        diff.subtract(cnt1)\n        if any(v < 0 for v in diff.values()):\n            return True\n\n    if not (len(mol) and len(query)):\n        return False  \n    m = molutil.clone(mol)\n    q = molutil.clone(query)\n    if largest_only:\n        m = molutil.largest_graph(m)\n        q = molutil.largest_graph(q)\n    if ignore_hydrogen:\n        m = molutil.make_Hs_implicit(m)\n        q = molutil.make_Hs_implicit(q)\n    if filter_(m, q, f=subset_filter):\n        gm = GraphMatcher(q.graph, m.graph, node_match=atom_match)\n        return gm.subgraph_is_isomorphic()\n    return False", "docstring": "if mol is a substructure of the query, return True\nArgs:\nmol: Compound\nquery: Compound\nlargest_only: compare only largest graph molecule", "source": "juraj-google-style"}
{"code": "def ones_like(x, dtype=None):\n    if any_symbolic_tensors((x,)):\n        return OnesLike(dtype=dtype).symbolic_call(x)\n    return backend.numpy.ones_like(x, dtype=dtype)", "docstring": "Return a tensor of ones with the same shape and type of `x`.\n\nArgs:\nx: Input tensor.\ndtype: Overrides the data type of the result.\n\nReturns:\nA tensor of ones with the same shape and type as `x`.", "source": "github-repos"}
{"code": "def sort(self, by=None, reverse=False):\n    if (by is None):\n        by = self.kdims\n    elif (not isinstance(by, list)):\n        by = [by]\n    sorted_columns = self.interface.sort(self, by, reverse)\n    return self.clone(sorted_columns)", "docstring": "Sorts the data by the values along the supplied dimensions.\n\nArgs:\nby: Dimension(s) to sort by\nreverse (bool, optional): Reverse sort order\n\nReturns:\nSorted Dataset", "source": "codesearchnet"}
{"code": "def get_power_state(self, id_or_uri):\n        \n        uri = self._client.build_uri(id_or_uri) + \"/powerState\"\n        return self._client.get(uri)", "docstring": "Gets the power state (on, off or unknown) of the specified power delivery device that supports power control.\nThe device must be an HP Intelligent Outlet.\n\nArgs:\nid_or_uri:\nCan be either the power device id or the uri\n\nReturns:\nstr: The power state", "source": "juraj-google-style"}
{"code": "def union(self, streamSet):\n        \n        if(not isinstance(streamSet,set)) :\n            raise TypeError(\"The union operator parameter must be a set object\")\n        if(len(streamSet) == 0):\n            return self        \n        op = self.topology.graph.addOperator(\"$Union$\")\n        op.addInputPort(outputPort=self.oport)\n        for stream in streamSet:\n            op.addInputPort(outputPort=stream.oport)\n        oport = op.addOutputPort(schema=self.oport.schema)\n        return Stream(self.topology, oport)", "docstring": "Creates a stream that is a union of this stream and other streams\n\nArgs:\nstreamSet: a set of Stream objects to merge with this stream\nReturns:\nStream:", "source": "juraj-google-style"}
{"code": "def _viscounts2radiance(counts, slope, offset):\n    rad = ((counts * slope) + offset)\n    return rad.clip(min=0)", "docstring": "Convert VIS counts to radiance\n\nReferences: [VIS]\n\nArgs:\ncounts: Raw detector counts\nslope: Slope [W m-2 um-1 sr-1]\noffset: Offset [W m-2 um-1 sr-1]\nReturns:\nRadiance [W m-2 um-1 sr-1]", "source": "codesearchnet"}
{"code": "def remove_item(name, system_wide=False):\n    desktop_env = system.get_name()\n    if (desktop_env == 'windows'):\n        import winreg\n        if system_wide:\n            startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\\\Windows\\\\Start Menu\\\\Programs\\\\Startup')\n        else:\n            startup_dir = os.path.join(directories.get_config_dir()[0], 'Roaming\\\\Microsoft\\\\Windows\\\\Start Menu\\\\Programs\\\\Startup')\n        for startup_file in os.path.listdir(start_dir):\n            if ((startup_file == name) or (startup_file.split('.')[0] == name)):\n                os.remove(os.path.join(startup_dir, startup_file))\n    elif (desktop_env == 'mac'):\n        sp.Popen(['launchctl', 'remove', name])\n    elif (desktop_env == 'unknown'):\n        if system_wide:\n            login_file = '/etc/profile'\n        else:\n            login_file = os.path.expanduser('~/.profile')\n        with open(login_file) as f:\n            login_file_contents = f.read()\n        final_login_file_contents = ''\n        for line in login_file_contents.split('\\n'):\n            if (line.split(' ')[0] != name):\n                final_login_file_contents += line\n        with open(login_file, 'w') as f:\n            f.write(final_login_file_contents)\n    else:\n        try:\n            desktop_file_name = (name + '.desktop')\n            startup_file = os.path.join(directories.get_config_dir('autostart', system_wide=system_wide)[0], desktop_file_name)\n            if (not os.path.isfile(startup_file)):\n                for possible_startup_file in os.listdir(directories.get_config_dir('autostart', system_wide=system_wide)[0]):\n                    possible_startup_file_parsed = desktopfile.parse(possible_startup_file)\n                    if (possible_startup_file_parsed['Name'] == name):\n                        startup_file = possible_startup_file\n            os.remove(startup_file)\n        except IndexError:\n            pass", "docstring": "Removes a program from startup.\n\nRemoves a program from startup.\n\nArgs:\nname        (str) : The name of the program (as known to the system) to remove. See :func:``list_items``.\nsystem_wide (bool): Remove it from system-wide startup.\n\nNote:\n``system_wide`` requires superuser/admin privileges.", "source": "codesearchnet"}
{"code": "def _process_req_body(self, body):\n    try:\n        return json.loads(body)\n    except ValueError:\n        return urlparse.parse_qs(body, keep_blank_values=True)", "docstring": "Process the body of the HTTP request.\n\nIf the body is valid JSON, return the JSON as a dict.\nElse, convert the key=value format to a dict and return that.\n\nArgs:\nbody: The body of the HTTP request.", "source": "codesearchnet"}
{"code": "def get_value_at_field(msg: message.Message, field: Union[descriptor.FieldDescriptor, str]) -> Any:\n    if isinstance(field, str):\n        field = _field_descriptor_for_name(msg, field)\n    return getattr(msg, field.name)", "docstring": "Returns the value at the field desribed by field.\n\nArgs:\nmsg: The message whose fields to examine.\nfield: The FieldDescriptor or name of the field to retrieve.\n\nReturns:\nThe value of msg at field.", "source": "github-repos"}
{"code": "def isconst(cls, val):\n    return (isinstance(val, string_types) and (((len(val) == 7) and (val[0] == '", "docstring": "Whether the value is a string color literal.\n\nChecks for a well-formed hexadecimal color value or a named color.\n\nArgs:\nval (str) : the value to check\n\nReturns:\nTrue, if the value is a string color literal", "source": "codesearchnet"}
{"code": "def add_namespace_uri(self, ns_uri, prefix=None, schema_location=None):\n    assert ns_uri\n    if (ns_uri in self.__ns_uri_map):\n        ni = self.__lookup_uri(ns_uri)\n        new_ni = copy.deepcopy(ni)\n        if prefix:\n            self.__check_prefix_conflict(ni, prefix)\n            new_ni.prefixes.add(prefix)\n        self.__merge_schema_locations(new_ni, schema_location)\n        for p in new_ni.prefixes:\n            self.__prefix_map[p] = new_ni\n        self.__ns_uri_map[new_ni.uri] = new_ni\n    else:\n        if prefix:\n            self.__check_prefix_conflict(ns_uri, prefix)\n        ni = _NamespaceInfo(ns_uri, prefix, schema_location)\n        self.__add_namespaceinfo(ni)", "docstring": "Adds a new namespace to this set, optionally with a prefix and\nschema location URI.\n\nIf the namespace already exists, the given prefix and schema location\nare merged with the existing entry:\n* If non-None, ``prefix`` is added to the set.  The preferred\nprefix is not modified.\n* If a schema location is not already associated with the\nnamespace, it is set to ``schema_location`` (if given).\n\nIf the namespace doesn't already exist in this set (so a new one is\nbeing created) and a prefix is given, that prefix becomes preferred.\nIf not given, a preference as a default namespace is used.\n\nArgs:\nns_uri (str): The URI of the new namespace\nprefix (str): The desired prefix for the new namespace (optional)\nschema_location (str): The desired schema location for the new\nnamespace (optional).\n\nRaises:\nDuplicatePrefixError: If a prefix is given which already maps to a\ndifferent namespace\nConflictingSchemaLocationError: If a schema location is given and\nthe namespace already exists in this set with a different\nschema location.", "source": "codesearchnet"}
{"code": "def __init__(self, was_reversed=False, was_copy=False):\n    \n    self._was_reversed = was_reversed\n    self._was_copy = was_copy\n    self._encoders = None\n    self._hparams = None\n    self._feature_info = None\n    self._task_id = -1", "docstring": "Create a Problem.\n\nArgs:\nwas_reversed: bool, whether to reverse inputs and targets.\nwas_copy: bool, whether to copy inputs to targets. Can be composed with\nwas_reversed so that if both are true, the targets become the inputs,\nwhich are then copied to targets so that the task is targets->targets.", "source": "juraj-google-style"}
{"code": "def convert_fields_for_spec(fields, field_values):\n    _convert_fields(fields, field_values, context=_ConversionContext.SPEC)", "docstring": "Type-checks and converts field values for a TypeSpec (in place).\n\nThis is similar to `convert_fields`, except that we expect a `TypeSpec` for\ntensor-like types.  In particular, if the `value_type` of a field is\n`tf.Tensor` or a `CompositeTensor` subclass, then the corresponding value in\n`fields` is expected to contain a `TypeSpec` (rather than a value described by\nthat `TypeSpec`).\n\nArgs:\nfields: A list of `ExtensionTypeField` objects.\nfield_values: A `dict` mapping field names to values.  Must contain an entry\nfor each field.  I.e., `set(field_values.keys())` must be equal to\n`set([f.name for f in fields])`.\n\nRaises:\nValueError: If the keys of `field_values` do not match the names of\nthe fields in `fields`.\nTypeError: If any value in `field_values` does not have the type indicated\nby the corresponding `ExtensionTypeField` object.", "source": "github-repos"}
{"code": "def read_hdf(cls, path_or_buf, **kwargs):\n    if (cls.read_hdf_remote_task is None):\n        return super(RayIO, cls).read_hdf(path_or_buf, **kwargs)\n    format = cls._validate_hdf_format(path_or_buf=path_or_buf)\n    if (format is None):\n        ErrorMessage.default_to_pandas('File format seems to be `fixed`. For better distribution consider saving the file in `table` format. df.to_hdf(format=`table`).')\n        return cls.from_pandas(pandas.read_hdf(path_or_buf=path_or_buf, **kwargs))\n    columns = kwargs.get('columns', None)\n    if (not columns):\n        empty_pd_df = pandas.read_hdf(path_or_buf, start=0, stop=0)\n        columns = empty_pd_df.columns\n    num_partitions = cls.frame_mgr_cls._compute_num_partitions()\n    num_splits = min(len(columns), num_partitions)\n    column_splits = ((len(columns) \n    col_partitions = [columns[i:(i + column_splits)] for i in range(0, len(columns), column_splits)]\n    blk_partitions = np.array([cls.read_hdf_remote_task._remote(args=(path_or_buf, cols, num_splits, kwargs), num_return_vals=(num_splits + 1)) for cols in col_partitions]).T\n    remote_partitions = np.array([[cls.frame_partition_cls(obj) for obj in row] for row in blk_partitions[:(- 1)]])\n    index_len = ray.get(blk_partitions[(- 1)][0])\n    index = pandas.RangeIndex(index_len)\n    new_query_compiler = cls.query_compiler_cls(cls.frame_mgr_cls(remote_partitions), index, columns)\n    return new_query_compiler", "docstring": "Load a h5 file from the file path or buffer, returning a DataFrame.\n\nArgs:\npath_or_buf: string, buffer or path object\nPath to the file to open, or an open :class:`pandas.HDFStore` object.\nkwargs: Pass into pandas.read_hdf function.\n\nReturns:\nDataFrame constructed from the h5 file.", "source": "codesearchnet"}
{"code": "def to_b58check(self, testnet=False):\n    b = (self.testnet_bytes if testnet else bytes(self))\n    return base58.b58encode_check(b)", "docstring": "Generates a Base58Check encoding of this key.\n\nArgs:\ntestnet (bool): True if the key is to be used with\ntestnet, False otherwise.\nReturns:\nstr: A Base58Check encoded string representing the key.", "source": "codesearchnet"}
{"code": "def adversary_assets(self, main_type, sub_type, unique_id, params=None):\n        \n        params = params or {}\n\n        url = '/v2/{}/{}/{}/adversaryAssets'.format(main_type, sub_type, unique_id)\n        for aa in self._iterate(url, params, 'adversaryAsset'):\n            yield aa", "docstring": "Args:\nmain_type:\nsub_type:\nunique_id:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def set_long_features(self, features, columns_to_set=[], partition=2):\n    features_long = self.set_features(partition=(2 * partition))\n    unwanted_features = [f for f in features.columns if (f not in columns_to_set)]\n    features_long = features_long.drop(unwanted_features, axis=1)\n    features_long.columns = ['long_{0}'.format(f) for f in features_long.columns]\n    skip = partition\n    return pd.concat([features[skip:].reset_index(drop=True), features_long], axis=1)", "docstring": "Sets features of double the duration\n\nExample: Setting 14 day RSIs to longer will create add a\nfeature column of a 28 day RSIs.\n\nArgs:\nfeatures: Pandas DataFrame instance with columns as numpy.float32 features.\ncolumns_to_set: List of strings of feature names to make longer\npartition: Int of how many dates to take into consideration\nwhen evaluating technical analysis indicators.\n\nReturns:\nPandas DataFrame instance with columns as numpy.float32 features.", "source": "codesearchnet"}
{"code": "def get_listed_projects():\n    index_path = ((Path().resolve() / 'docs') / 'index.md')\n    with open(index_path, 'r') as index_file:\n        lines = index_file.readlines()\n    listed_projects = set()\n    project_section = False\n    for (_, l) in enumerate(lines):\n        idx = l.find(PROJECT_KEY)\n        if (idx >= 0):\n            project_section = True\n        if project_section:\n            start = l.find('](')\n            if (start > 0):\n                closing_parenthesis = sorted([m.start() for m in re.finditer('\\\\)', l) if (m.start() > start)])[0]\n                project = l[(start + 2):closing_parenthesis]\n                listed_projects.add(project)\n        if ((len(listed_projects) > 0) and l.startswith('\n            return listed_projects\n    return listed_projects", "docstring": "Find the projects listed in the Home Documentation's\nindex.md file\n\nReturns:\nset(str): projects' names, with the '/' in their beginings", "source": "codesearchnet"}
{"code": "def exists(self) -> 'Builder':\n    return self._to_builder(_evaluation.ExistsFunction(self.node.context, self.node, []))", "docstring": "The FHIRPath exists() function.\n\nReturns:\nAn expression that returns True if the parent expression evaluates\nto one or more values.", "source": "github-repos"}
{"code": "def zero_or_more(e, delimiter=None):\n    \n    if delimiter is None:\n        delimiter = lambda s, grm, pos: (s, Ignore, (pos, pos))\n    def match_zero_or_more(s, grm=None, pos=0):\n        start = pos\n        try:\n            s, obj, span = e(s, grm, pos)\n            pos = span[1]\n            data = [] if obj is Ignore else [obj]\n        except PegreError:\n            return PegreResult(s, [], (pos, pos))\n        try:\n            while True:\n                s, obj, span = delimiter(s, grm, pos)\n                pos = span[1]\n                if obj is not Ignore:\n                    data.append(obj)\n                s, obj, span = e(s, grm, pos)\n                pos = span[1]\n                if obj is not Ignore:\n                    data.append(obj)\n        except PegreError:\n            pass\n        return PegreResult(s, data, (start, pos))\n    return match_zero_or_more", "docstring": "Create a PEG function to match zero or more expressions.\n\nArgs:\ne: the expression to match\ndelimiter: an optional expression to match between the\nprimary *e* matches.", "source": "juraj-google-style"}
{"code": "def observe_timestamp(self, timestamp: timestamp.Timestamp) -> None:\n    raise NotImplementedError(type(self))", "docstring": "Update tracking  watermark with latest output timestamp.\n\nArgs:\ntimestamp: the `timestamp.Timestamp` of current output element.\n\nThis is called with the timestamp of every element output from the DoFn.", "source": "github-repos"}
{"code": "def _GenApiConfigCallback(args, api_func=GenApiConfig):\n    service_configs = api_func(args.service, hostname=args.hostname, application_path=args.application)\n    for (api_name_version, config) in service_configs.iteritems():\n        _WriteFile(args.output, (api_name_version + '.api'), config)", "docstring": "Generate an api file.\n\nArgs:\nargs: An argparse.Namespace object to extract parameters from.\napi_func: A function that generates and returns an API configuration\nfor a list of services.", "source": "codesearchnet"}
{"code": "def compare_config(self, target, init=True, indent_level=0):\n    if init:\n        fwd = self.full_path_fwd\n        bwd = self.full_path_bwd\n    else:\n        fwd = self.rel_path_fwd\n        bwd = self.rel_path_bwd\n    indent = ((4 * indent_level) * ' ')\n    if ((indent_level == 0) and (self.vdom is not None)):\n        if (self.vdom == 'global'):\n            pre = 'conf global\\n'\n        else:\n            pre = ('conf vdom\\n  edit %s\\n' % self.vdom)\n        post = 'end'\n    else:\n        pre = ''\n        post = ''\n    pre_block = ('%s%s' % (indent, fwd))\n    post_block = ('%s%s' % (indent, bwd))\n    my_params = self.parameters.keys()\n    ot_params = target.parameters.keys()\n    text = ''\n    for param in my_params:\n        if (param not in ot_params):\n            text += ('  %sunset %s\\n' % (indent, param))\n        elif (str(self.get_param(param)).replace('\"', '') != str(target.get_param(param)).replace('\"', '')):\n            text += ('  %sset %s %s\\n' % (indent, param, target.get_param(param)))\n    for param in ot_params:\n        if (param not in my_params):\n            text += ('  %sset %s %s\\n' % (indent, param, target.get_param(param)))\n    my_blocks = self.sub_blocks.keys()\n    ot_blocks = target.sub_blocks.keys()\n    for block_name in my_blocks:\n        if (block_name not in ot_blocks):\n            text += ('    %sdelete %s\\n' % (indent, block_name))\n        else:\n            text += self[block_name].compare_config(target[block_name], False, (indent_level + 1))\n    for block_name in ot_blocks:\n        if (block_name not in my_blocks):\n            text += target[block_name].to_text(True, (indent_level + 1), True)\n    if (text == ''):\n        return ''\n    else:\n        return ('%s%s%s%s%s' % (pre, pre_block, text, post_block, post))", "docstring": "This method will return all the necessary commands to get from the config we are in to the target\nconfig.\n\nArgs:\n* **target** (:class:`~pyFG.forticonfig.FortiConfig`) - Target config.\n* **init** (bool) - This tells to the method if this is the first call to the method or if we are inside\\\nthe recursion. You can ignore this parameter.\n* **indent_level** (int) - This tells the method how deep you are in the recursion. You can ignore it.\n\nReturns:\nA string containing all the necessary commands to reach the target config.", "source": "codesearchnet"}
{"code": "def get_pourbaix_entries(self, chemsys):\n    from pymatgen.analysis.pourbaix_diagram import PourbaixEntry, IonEntry\n    from pymatgen.analysis.phase_diagram import PhaseDiagram\n    from pymatgen.core.ion import Ion\n    from pymatgen.entries.compatibility import MaterialsProjectAqueousCompatibility\n    pbx_entries = []\n    url = ('/pourbaix_diagram/reference_data/' + '-'.join(chemsys))\n    ion_data = self._make_request(url)\n    ion_ref_comps = [Composition(d['Reference Solid']) for d in ion_data]\n    ion_ref_elts = list(itertools.chain.from_iterable((i.elements for i in ion_ref_comps)))\n    ion_ref_entries = self.get_entries_in_chemsys(list(set(([str(e) for e in ion_ref_elts] + ['O', 'H']))), property_data=['e_above_hull'], compatible_only=False)\n    compat = MaterialsProjectAqueousCompatibility('Advanced')\n    ion_ref_entries = compat.process_entries(ion_ref_entries)\n    ion_ref_pd = PhaseDiagram(ion_ref_entries)\n    for (n, i_d) in enumerate(ion_data):\n        ion_entry = IonEntry(Ion.from_formula(i_d['Name']), i_d['Energy'])\n        refs = [e for e in ion_ref_entries if (e.composition.reduced_formula == i_d['Reference Solid'])]\n        if (not refs):\n            raise ValueError('Reference solid not contained in entry list')\n        stable_ref = sorted(refs, key=(lambda x: x.data['e_above_hull']))[0]\n        rf = stable_ref.composition.get_reduced_composition_and_factor()[1]\n        solid_diff = (ion_ref_pd.get_form_energy(stable_ref) - (i_d['Reference solid energy'] * rf))\n        elt = i_d['Major_Elements'][0]\n        correction_factor = (ion_entry.ion.composition[elt] / stable_ref.composition[elt])\n        ion_entry.energy += (solid_diff * correction_factor)\n        pbx_entries.append(PourbaixEntry(ion_entry, 'ion-{}'.format(n)))\n    extra_elts = ((set(ion_ref_elts) - {Element(s) for s in chemsys}) - {Element('H'), Element('O')})\n    for entry in ion_ref_entries:\n        entry_elts = set(entry.composition.elements)\n        if (not ((entry_elts <= {Element('H'), Element('O')}) or extra_elts.intersection(entry_elts))):\n            form_e = ion_ref_pd.get_form_energy(entry)\n            new_entry = deepcopy(entry)\n            new_entry.uncorrected_energy = form_e\n            new_entry.correction = 0.0\n            pbx_entry = PourbaixEntry(new_entry)\n            pbx_entries.append(pbx_entry)\n    return pbx_entries", "docstring": "A helper function to get all entries necessary to generate\na pourbaix diagram from the rest interface.\n\nArgs:\nchemsys ([str]): A list of elements comprising the chemical\nsystem, e.g. ['Li', 'Fe']", "source": "codesearchnet"}
{"code": "def _register_array_types(self, objects):\n    \n    \n    types = [o for o in objects if isinstance(o, VhdlType) and o.type_of == 'array_type']\n    for t in types:\n      self.array_types.add(t.name)\n\n    subtypes = {o.name:o.base_type for o in objects if isinstance(o, VhdlSubtype)}\n\n    \n    for k,v in subtypes.iteritems():\n      while v in subtypes: \n        v = subtypes[v]\n      if v in self.array_types:\n        self.array_types.add(k)", "docstring": "Add array type definitions to internal registry\n\nArgs:\nobjects (list of VhdlType or VhdlSubtype): Array types to track", "source": "juraj-google-style"}
{"code": "def get_job(self, id):\n        \n        return self._get_element_by_id(self.jobs, 'jobs', Job, str(id))", "docstring": "Retrieves a job matching the given `id`\n\nArgs:\nid (str): Job `id` to match.\n\nReturns:\nJob: Job matching the given `id`\n\nRaises:\nValueError: No resource matches given `id` or multiple resources matching given `id`", "source": "juraj-google-style"}
{"code": "def update_compounds(self, variants):\n    LOG.debug('Updating compound objects')\n    for var_id in variants:\n        variant_obj = variants[var_id]\n        if (not variant_obj.get('compounds')):\n            continue\n        updated_compounds = self.update_variant_compounds(variant_obj, variants)\n        variant_obj['compounds'] = updated_compounds\n    LOG.debug('Compounds updated')\n    return variants", "docstring": "Update the compounds for a set of variants.\n\nArgs:\nvariants(dict): A dictionary with _ids as keys and variant objs as values", "source": "codesearchnet"}
{"code": "def get_metadata(changeset):\n    \n    url = 'https:\n    return ET.fromstring(requests.get(url).content).getchildren()[0]", "docstring": "Get the metadata of a changeset using the OSM API and return it as a XML\nElementTree.\n\nArgs:\nchangeset: the id of the changeset.", "source": "juraj-google-style"}
{"code": "def call(self, input_ids: Optional[tf.Tensor]=None, bbox: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, token_type_ids: Optional[tf.Tensor]=None, inputs_embeds: Optional[tf.Tensor]=None, training: bool=False) -> tf.Tensor:\n    assert not (input_ids is None and inputs_embeds is None)\n    if input_ids is not None:\n        check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n        inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n    input_shape = shape_list(inputs_embeds)[:-1]\n    if token_type_ids is None:\n        token_type_ids = tf.fill(dims=input_shape, value=0)\n    if position_ids is None:\n        position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)\n    if position_ids is None:\n        position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)\n    if bbox is None:\n        bbox = bbox = tf.fill(input_shape + [4], value=0)\n    try:\n        left_position_embeddings = tf.gather(self.x_position_embeddings, bbox[:, :, 0])\n        upper_position_embeddings = tf.gather(self.y_position_embeddings, bbox[:, :, 1])\n        right_position_embeddings = tf.gather(self.x_position_embeddings, bbox[:, :, 2])\n        lower_position_embeddings = tf.gather(self.y_position_embeddings, bbox[:, :, 3])\n    except IndexError as e:\n        raise IndexError('The `bbox`coordinate values should be within 0-1000 range.') from e\n    h_position_embeddings = tf.gather(self.h_position_embeddings, bbox[:, :, 3] - bbox[:, :, 1])\n    w_position_embeddings = tf.gather(self.w_position_embeddings, bbox[:, :, 2] - bbox[:, :, 0])\n    position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)\n    token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)\n    final_embeddings = inputs_embeds + position_embeds + token_type_embeds + left_position_embeddings + upper_position_embeddings + right_position_embeddings + lower_position_embeddings + h_position_embeddings + w_position_embeddings\n    final_embeddings = self.LayerNorm(inputs=final_embeddings)\n    final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n    return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\nfinal_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github-repos"}
{"code": "def _SetSELinuxContext(path):\n    restorecon = '/sbin/restorecon'\n    if (os.path.isfile(restorecon) and os.access(restorecon, os.X_OK)):\n        subprocess.call([restorecon, path])", "docstring": "Set the appropriate SELinux context, if SELinux tools are installed.\n\nCalls /sbin/restorecon on the provided path to set the SELinux context as\nspecified by policy. This call does not operate recursively.\n\nOnly some OS configurations use SELinux. It is therefore acceptable for\nrestorecon to be missing, in which case we do nothing.\n\nArgs:\npath: string, the path on which to fix the SELinux context.", "source": "codesearchnet"}
{"code": "def GetWarnings(self):\n    if self._HasAttributeContainers(self._CONTAINER_TYPE_EXTRACTION_ERROR):\n        return self._GetExtractionErrorsAsWarnings()\n    return self._GetAttributeContainers(self._CONTAINER_TYPE_EXTRACTION_WARNING)", "docstring": "Retrieves the warnings.\n\nReturns:\ngenerator(ExtractionWarning): warning generator.", "source": "codesearchnet"}
{"code": "def _compute_offsets(self, token_ids, time_precision=0.02, segment_size=1500):\n    offsets = []\n    if 'torch' in str(type(token_ids)) and (hasattr(token_ids, 'cpu') and callable(token_ids.cpu)):\n        token_ids = token_ids.cpu()\n    token_ids = np.array(token_ids)\n    if token_ids.shape[0] > 1 and len(token_ids.shape) > 1:\n        raise ValueError('Can only process a single input at a time')\n    timestamp_begin = self.all_special_ids[-1] + 1\n    timestamp_tokens = token_ids >= timestamp_begin\n    consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1\n    if consecutive.shape[0] == 0 and timestamp_tokens.sum() <= 1:\n        return []\n    elif np.where(timestamp_tokens)[0][-1] + 1 not in consecutive:\n        consecutive = np.append(consecutive, np.where(timestamp_tokens)[0][-1] + 1)\n    last_slice = np.where(timestamp_tokens)[0][0]\n    cur_max_timestamp = 0\n    prev_segments_len = 0\n    for current_slice in consecutive:\n        sliced_tokens = token_ids[last_slice:current_slice]\n        if len(sliced_tokens) > 1:\n            start_timestamp_position = sliced_tokens[0].item() - timestamp_begin\n            end_timestamp_position = sliced_tokens[-1].item() - timestamp_begin\n            if start_timestamp_position < cur_max_timestamp:\n                is_single_ending = last_slice >= 2 and (not (token_ids[last_slice - 2] >= timestamp_begin and token_ids[last_slice - 1] >= timestamp_begin))\n                if is_single_ending:\n                    prev_segments_len += segment_size\n                else:\n                    prev_segments_len += cur_max_timestamp\n            cur_max_timestamp = end_timestamp_position\n            sliced_tokens = self._preprocess_token_ids(sliced_tokens)\n            text = self._decode(sliced_tokens)\n            text = self._filter_timestamp_ids(text)\n            offsets.append({'text': text, 'timestamp': (start_timestamp_position * time_precision + prev_segments_len * time_precision, end_timestamp_position * time_precision + prev_segments_len * time_precision)})\n        last_slice = current_slice\n    return offsets", "docstring": "Compute offsets for a given tokenized input\n\nArgs:\ntoken_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):\nList of tokenized input ids. Can be obtained using the `__call__` method.\ntime_precision (`float`, *optional*, defaults to 0.02):\nThe time ratio to convert from token to time.\nsegment_size (`int`, *optional*, defaults to 1500):\nThe number of features in the input mel spectrogram.", "source": "github-repos"}
{"code": "def validate_checksum(filename, md5sum):\n    filename = match_filename(filename)\n    md5_hash = file_md5(filename=filename)\n    if (md5_hash != md5sum):\n        raise ValueError('md5 checksums are inconsistent: {}'.format(filename))", "docstring": "Compares the md5 checksum of a file with an expected value.\nIf the calculated and expected checksum values are not equal,\nValueError is raised.\nIf the filename `foo` is not found, will try to read a gzipped file named\n`foo.gz`. In this case, the checksum is calculated for the unzipped file.\n\nArgs:\nfilename (str): Path for the file to be checksummed.\nmd5sum (str):  The expected hex checksum.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def insert(self, parts, leaf_value, update=False):\n    tree = self\n    if (not parts):\n        return tree\n    cur = tree\n    last = (len(parts) - 1)\n    for (i, part) in enumerate(parts):\n        if (part not in cur):\n            cur[part] = (TreeMap() if (i != last) else leaf_value)\n        elif (i == last):\n            if update:\n                cur[part].update(leaf_value)\n            else:\n                cur[part] = leaf_value\n        cur = cur[part]\n    return self", "docstring": "Add a list of nodes into the tree.\n\nThe list will be converted into a TreeMap (chain) and then\nmerged with the current TreeMap.\n\nFor example, this method would insert `['a','b','c']` as\n`{'a':{'b':{'c':{}}}}`.\n\nArguments:\nparts: List of nodes representing a chain.\nleaf_value: Value to insert into the leaf of the chain.\nupdate: Whether or not to update the leaf with the given value or\nto replace the value.\n\nReturns:\nself", "source": "codesearchnet"}
{"code": "def plots_html_page(query_module):\n    \n\n    \n    template = jenv.get_template(\"analysis.html\")\n\n    \n    context = dict(extended=config.EXTENDED)\n\n    \n    cl = client.get_client()\n    session = cl.create_session()\n\n    \n    seaborn.set_style('whitegrid')\n\n    \n    \n    \n\n    decade_df = query_module.decade_query()\n\n    pix_size = pixels_to_inches((600, 400))\n    ax = seaborn.lmplot(x='decade', y='area', data=decade_df,\n                        size=pix_size[1], aspect=pix_size[0] / pix_size[1],\n                        scatter_kws={\"s\": 30, \"alpha\": 0.3})\n    ax.set(xlabel='Decade', ylabel='Area, m^2')\n    context['area_by_decade_svg'] = fig_to_svg(plt.gcf())\n    plt.close('all')\n\n    \n    \n    \n\n    if config.EXTENDED:\n        gender_df = query_module.gender_query()\n\n        pix_size = pixels_to_inches((600, 400))\n        g = seaborn.FacetGrid(gender_df, hue=\"gender\", margin_titles=True,\n                              size=pix_size[1], aspect=pix_size[0] / pix_size[1])\n        bins = np.linspace(0, 5, 30)\n        g.map(plt.hist, \"area\", bins=bins, lw=0, alpha=0.5, normed=True)\n        g.axes[0, 0].set_xlabel('Area, m^2')\n        g.axes[0, 0].set_ylabel('Percentage of paintings')\n        context['area_by_gender_svg'] = fig_to_svg(plt.gcf())\n        plt.close('all')\n\n    \n    \n    \n\n    out_file = path.join(out_dir, \"analysis.html\")\n    html_content = template.render(**context)\n    with open(out_file, 'w') as f:\n        f.write(html_content)\n\n    \n    plt.close('all')\n    session.close()", "docstring": "Generate analysis output as html page\n\nArgs:\nquery_module (module): module to use for querying data for the\ndesired model/pipeline variant, e.g. leonardo.standard.queries", "source": "juraj-google-style"}
{"code": "def is_treshold_reached(self, scraped_request):\n        \n\n        for route in self.__routing_options.routes:\n            if re.compile(route).match(scraped_request.url):\n                count_key = str(route) + scraped_request.method\n\n                if count_key in self.__routing_count.keys():\n                    return self.__routing_count[count_key] >= self.__routing_options.minimum_threshold\n                \n        return False", "docstring": "Check if similar requests to the given requests have already been crawled X times. Where X is the\nminimum treshold amount from the options.\n\nArgs:\nscraped_request (:class:`nyawc.http.Request`): The request that possibly reached the minimum treshold.\n\nReturns:\nbool: True if treshold reached, false otherwise.", "source": "juraj-google-style"}
{"code": "def __floordiv__(self, other: Self | Processor) -> PartProcessor | Processor:\n    if isinstance(other, _ParallelPartProcessor):\n        return _ParallelPartProcessor([self] + other._processor_list)\n    elif isinstance(other, PartProcessor):\n        return _ParallelPartProcessor([self, other])\n    else:\n        raise ValueError(f'Parallel operator not valid between a PartProcessor and {type(other)}.')", "docstring": "Make `other` be computed in parallel to this processor.\n\nArgs:\nother: a processor to compute in parallel to `self`.\n\nReturns:\nThe parallel computation of this process with `other`.", "source": "github-repos"}
{"code": "def __init__(self, underlying_result, pipeline_instrument):\n    super().__init__(underlying_result.state)\n    self._underlying_result = underlying_result\n    self._pipeline_instrument = pipeline_instrument", "docstring": "Constructor of PipelineResult.\n\nArgs:\nunderlying_result: (PipelineResult) the result returned by the underlying\nrunner running the pipeline.\npipeline_instrument: (PipelineInstrument) pipeline instrument describing\nthe pipeline being executed with interactivity applied and related\nmetadata including where the interactivity-backing cache lies.", "source": "github-repos"}
{"code": "def _parse_lambda(lam):\n    mod = inspect.getmodule(lam)\n    f = inspect.getsourcefile(lam)\n    def_line = lam.__code__.co_firstlineno\n    lines = linecache.getlines(f, mod.__dict__)\n    source = ''.join(lines)\n    all_nodes = parse(source, preamble_len=0, single_node=False)\n    search_nodes = []\n    for node in all_nodes:\n        if getattr(node, 'lineno', def_line) <= def_line:\n            search_nodes.append(node)\n        else:\n            break\n    lambda_nodes = []\n    for node in search_nodes:\n        lambda_nodes.extend((n for n in gast.walk(node) if isinstance(n, gast.Lambda)))\n    candidates = []\n    for ln in lambda_nodes:\n        minl, maxl = (MAX_SIZE, 0)\n        for n in gast.walk(ln):\n            minl = min(minl, getattr(n, 'lineno', minl))\n            lineno = getattr(n, 'lineno', maxl)\n            end_lineno = getattr(n, 'end_lineno', None)\n            if end_lineno is not None:\n                lineno = end_lineno\n            maxl = max(maxl, lineno)\n        if minl <= def_line <= maxl:\n            candidates.append((ln, minl, maxl))\n    if len(candidates) == 1:\n        (node, minl, maxl), = candidates\n        return _without_context(node, lines, minl, maxl)\n    elif not candidates:\n        lambda_codes = '\\n'.join([unparse(l) for l in lambda_nodes])\n        raise errors.UnsupportedLanguageElementError(f'could not parse the source code of {lam}: no matching AST found among candidates:\\n{lambda_codes}')\n    matches = [v for v in candidates if _node_matches_argspec(v[0], lam)]\n    if len(matches) == 1:\n        (node, minl, maxl), = matches\n        return _without_context(node, lines, minl, maxl)\n    matches = '\\n'.join(('Match {}:\\n{}\\n'.format(i, unparse(node, include_encoding_marker=False)) for i, (node, _, _) in enumerate(matches)))\n    raise errors.UnsupportedLanguageElementError(f'could not parse the source code of {lam}: found multiple definitions with identical signatures at the location. This error may be avoided by defining each lambda on a single line and with unique argument names. The matching definitions were:\\n{matches}')", "docstring": "Returns the AST and source code of given lambda function.\n\nArgs:\nlam: types.LambdaType, Python function/method/class\n\nReturns:\ngast.AST, Text: the parsed AST node; the source code that was parsed to\ngenerate the AST (including any prefixes that this function may have added).", "source": "github-repos"}
{"code": "def isdisjoint(self, other):\n        \n        other = self._cast_to_frameset(other)\n        if other is NotImplemented:\n            return NotImplemented\n        return self.items.isdisjoint(other.items)", "docstring": "Check if the contents of :class:self has no common intersection with the\ncontents of :class:other.\n\nArgs:\nother (:class:`FrameSet`):\n\nReturns:\nbool:\n:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`", "source": "juraj-google-style"}
{"code": "def load(self, profile_args):\n    for (key, value) in profile_args.items():\n        self.add(key, value)", "docstring": "Load provided CLI Args.\n\nArgs:\nargs (dict): Dictionary of args in key/value format.", "source": "codesearchnet"}
{"code": "def res_name(self, ns, types_ns, name):\n    raise NotImplementedError('subclasses must implement')", "docstring": "Resolves the type/value an external (e.g. closure, global) variable.\n\nArgs:\nns: namespace\ntypes_ns: types namespace\nname: symbol name\n\nReturns:\nTuple (type, static_value). The first element is the type to use for\ninference. The second is the static value to use. Return None to treat it\nas unknown.", "source": "github-repos"}
{"code": "def reminders_complete(self, *, reminder: str, **kwargs) -> SlackResponse:\n    self._validate_xoxp_token()\n    kwargs.update({'reminder': reminder})\n    return self.api_call('reminders.complete', json=kwargs)", "docstring": "Marks a reminder as complete.\n\nArgs:\nreminder (str): The ID of the reminder to be marked as complete.\ne.g. 'Rm12345678'", "source": "codesearchnet"}
{"code": "def calculate_elem_per_kb(max_chunk_kb, matrix_dtype):\n    if (matrix_dtype == numpy.float32):\n        return ((max_chunk_kb * 8) / 32)\n    elif (matrix_dtype == numpy.float64):\n        return ((max_chunk_kb * 8) / 64)\n    else:\n        msg = 'Invalid matrix_dtype: {}; only numpy.float32 and numpy.float64 are currently supported'.format(matrix_dtype)\n        logger.error(msg)\n        raise Exception(('write_gctx.calculate_elem_per_kb ' + msg))", "docstring": "Calculates the number of elem per kb depending on the max chunk size set.\n\nInput:\n- max_chunk_kb (int, default=1024): The maximum number of KB a given chunk will occupy\n- matrix_dtype (numpy dtype, default=numpy.float32): Storage data type for data matrix.\nCurrently needs to be np.float32 or np.float64 (TODO: figure out a better way to get bits from a numpy dtype).\n\nReturns:\nelem_per_kb (int), the number of elements per kb for matrix dtype specified.", "source": "codesearchnet"}
{"code": "def is_transcript_available(video_id, language_code=None):\n    \n    filter_attrs = {'video__edx_video_id': video_id}\n    if language_code:\n        filter_attrs['language_code'] = language_code\n\n    transcript_set = VideoTranscript.objects.filter(**filter_attrs)\n    return transcript_set.exists()", "docstring": "Returns whether the transcripts are available for a video.\n\nArguments:\nvideo_id: it can be an edx_video_id or an external_id extracted from external sources in a video component.\nlanguage_code: it will the language code of the requested transcript.", "source": "juraj-google-style"}
{"code": "def _get_feed(self):\n    if self.feed_name in self._feed_name_tab_map:\n        for tab_name in self._feed_name_tab_map[self.feed_name]:\n            for sheet in self.spreadsheet['sheets']:\n                if sheet['properties']['title'] == tab_name:\n                    self.tab_name = tab_name\n                    return sheets_read(self.config, self.auth, self.trix_id, tab_name, self.trix_range)\n    return [[]]", "docstring": "Fetches the feed based on initialization parameters.\n\nReturns:\nList of lists that represents the rows and columns of the feed. If the\nfeed isn't found returns a list with an empty list.", "source": "github-repos"}
{"code": "def make_target(url, extra_opts=None):\n    \n    \n    parts = compat.urlparse(url, allow_fragments=False)\n    \n    scheme = parts.scheme.lower()\n    if scheme in [\"ftp\", \"ftps\"]:\n        creds = parts.username, parts.password\n        tls = scheme == \"ftps\"\n        from ftpsync import ftp_target\n\n        target = ftp_target.FtpTarget(\n            parts.path,\n            parts.hostname,\n            parts.port,\n            username=creds[0],\n            password=creds[1],\n            tls=tls,\n            timeout=None,\n            extra_opts=extra_opts,\n        )\n    else:\n        target = FsTarget(url, extra_opts)\n\n    return target", "docstring": "Factory that creates `_Target` objects from URLs.\n\nFTP targets must begin with the scheme ``ftp://`` or ``ftps://`` for TLS.\n\nNote:\nTLS is only supported on Python 2.7/3.2+.\nArgs:\nurl (str):\nextra_opts (dict, optional): Passed to Target constructor. Default: None.\nReturns:\n:class:`_Target`", "source": "juraj-google-style"}
{"code": "def default(fields=None, count=5):\n    \n    projection = Sampling._create_projection(fields)\n    return lambda sql: 'SELECT %s FROM (%s) LIMIT %d' % (projection, sql, count)", "docstring": "Provides a simple default sampling strategy which limits the result set by a count.\n\nArgs:\nfields: an optional list of field names to retrieve.\ncount: optional number of rows to limit the sampled results to.\nReturns:\nA sampling function that can be applied to get a random sampling.", "source": "juraj-google-style"}
{"code": "def resolve(self, pid, vendorSpecific=None):\n        \n        response = self.resolveResponse(pid, vendorSpecific)\n        return self._read_dataone_type_response(\n            response, 'ObjectLocationList', response_is_303_redirect=True\n        )", "docstring": "See Also: resolveResponse()\n\nArgs:\npid:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def __init__(self, context=None):\n    \n    self._context = context or google.datalab.Context.default()\n    self._client = _utils.make_client(self._context)\n    self._group_dict = None", "docstring": "Initializes the Groups for a Stackdriver project.\n\nArgs:\ncontext: An optional Context object to use instead of the global default.", "source": "juraj-google-style"}
{"code": "def map_indices_in_shard(num_sparse_cores: int, offset_in_shard: int, shard_rotation: int, row_indices: tensor.Tensor) -> tuple[tensor.Tensor, tensor.Tensor]:\n    shard_index = (row_indices % num_sparse_cores + shard_rotation) % num_sparse_cores\n    position_in_shard = offset_in_shard + row_indices \n    return (shard_index, position_in_shard)", "docstring": "Maps a row of a given table to its sparse core shard and position.\n\nMaps a given a row index of a logical table and its layout in sparse core,\nreturns the index of the shard where the row is placed and its relative\nposition within\nthat sparse core shard.\nArgs:\nnum_sparse_cores: The number of sparsecores, this determines the number of\nshards present.\noffset_in_shard: Offset within a shard where the queried table starts.\nshard_rotation: The rotation of this table's shards.\nrow_indices: row indices of the embedding table being looked up.\n\nReturns:\nA Tuple representing shard_index and position of the row in that shard.", "source": "github-repos"}
{"code": "def enter_diff_mode(self, context_model=None):\n        \n        assert not self.diff_mode\n        self.diff_mode = True\n\n        if context_model is None:\n            self.diff_from_source = True\n            self.diff_context_model = self.context_model.copy()\n        else:\n            self.diff_from_source = False\n            self.diff_context_model = context_model\n\n        self.clear()\n        self.setColumnCount(5)\n        self.refresh()", "docstring": "Enter diff mode.\n\nArgs:\ncontext_model (`ContextModel`): Context to diff against. If None, a\ncopy of the current context is used.", "source": "juraj-google-style"}
{"code": "def convert_to_tensor_or_composite(value, dtype=None, name=None) -> Union[EagerTensor, SymbolicTensor, composite_tensor.CompositeTensor]:\n    return internal_convert_to_tensor_or_composite(value=value, dtype=dtype, name=name, as_ref=False)", "docstring": "Converts the given object to a `Tensor` or `CompositeTensor`.\n\nIf `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it\nis converted to a `Tensor` using `convert_to_tensor()`.\n\nArgs:\nvalue: A `CompositeTensor` or an object that can be consumed by\n`convert_to_tensor()`.\ndtype: (Optional.) The required `DType` of the returned `Tensor` or\n`CompositeTensor`.\nname: (Optional.) A name to use if a new `Tensor` is created.\n\nReturns:\nA `Tensor` or `CompositeTensor`, based on `value`.\n\nRaises:\nValueError: If `dtype` does not match the element type of `value`.", "source": "github-repos"}
{"code": "def Close(self):\n    if (self.locked and (self.CheckLease() == 0)):\n        raise LockError('Can not update lease that has already expired.')\n    self._WriteAttributes()\n    if self.locked:\n        self.transaction.Release()\n    if self.parent:\n        self.parent.Close()\n    self.mode = ''", "docstring": "Close and destroy the object.\n\nThis is similar to Flush, but does not maintain object validity. Hence the\nobject should not be interacted with after Close().\n\nRaises:\nLockError: The lease for this object has expired.", "source": "codesearchnet"}
{"code": "def deserialize_feature_columns(configs, custom_objects=None):\n    columns_by_name = {}\n    return [deserialize_feature_column(c, custom_objects, columns_by_name) for c in configs]", "docstring": "Deserializes a list of FeatureColumns configs.\n\nReturns a list of FeatureColumns given a list of config dicts acquired by\n`serialize_feature_columns`.\n\nArgs:\nconfigs: A list of Dicts with the serialization of feature columns acquired\nby `serialize_feature_columns`.\ncustom_objects: A Dict from custom_object name to the associated keras\nserializable objects (FeatureColumns, classes or functions).\n\nReturns:\nFeatureColumn objects corresponding to the input configs.\n\nRaises:\nValueError if called with input that is not a list of FeatureColumns.", "source": "github-repos"}
{"code": "def get_params(self, deep=True):\n\t\t\n\t\tparams = {'weights':self.coef_, 'bias':self.intercept_}\n\t\tif deep:\n\t\t\tfor key, value in self.B.items():\n\t\t\t\tparams['b_'+str(key)] = value\n\t\t\n\t\treturn params", "docstring": "Get parameters for the estimator.\n\nArgs:\ndeep (boolean, optional) : If True, will return the parameters for this estimator and contained subobjects that are estimators.\n\nReturns:\nparams : mapping of string to any contained subobjects that are estimators.", "source": "juraj-google-style"}
{"code": "def TransferFrom(self, wallet, from_addr, to_addr, amount):\n    invoke_args = [self.ScriptHash.ToString(), 'transferFrom', [PromptUtils.parse_param(from_addr, wallet), PromptUtils.parse_param(to_addr, wallet), PromptUtils.parse_param(amount)]]\n    (tx, fee, results, num_ops, engine_success) = TestInvokeContract(wallet, invoke_args, None, True)\n    return (tx, fee, results)", "docstring": "Transfer a specified amount of a token from the wallet specified in the `from_addr` to the `to_addr`\nif the originator `wallet` has been approved to do so.\n\nArgs:\nwallet (neo.Wallets.Wallet): a wallet instance.\nfrom_addr (str): public address of the account to transfer the given amount from.\nto_addr (str): public address of the account to transfer the given amount to.\namount (int): quantity to send.\n\nReturns:\ntuple:\nInvocationTransaction: the transaction.\nint: the transaction fee.\nlist: the neo VM evaluation stack results.", "source": "codesearchnet"}
{"code": "def run(self, circuit):\n        \n        name = circuit.name\n        dag = circuit_to_dag(circuit)\n        del circuit\n        for passset in self.working_list:\n            for pass_ in passset:\n                dag = self._do_pass(pass_, dag, passset.options)\n        circuit = dag_to_circuit(dag)\n        circuit.name = name\n        return circuit", "docstring": "Run all the passes on a QuantumCircuit\n\nArgs:\ncircuit (QuantumCircuit): circuit to transform via all the registered passes\n\nReturns:\nQuantumCircuit: Transformed circuit.", "source": "juraj-google-style"}
{"code": "def should_invoke_op_callbacks():\n    ctx = context.context()\n    return ctx.op_callbacks and (not ctx.invoking_op_callbacks)", "docstring": "Determine if op callbacks are present and should be invoked.\n\nReturns:\nA thread-local result (boolean) indicating whether any op callback(s) exist\nand should be invoked.", "source": "github-repos"}
{"code": "def _output_types(self) -> list[int]:\n    num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op)\n    output_types = [int(pywrap_tf_session.TF_OperationOutputType(self._tf_output(i))) for i in range(num_outputs)]\n    return output_types", "docstring": "List this operation's output types.\n\nReturns:\nList of the types of the Tensors computed by this operation.\nEach element in the list is an integer whose value is one of\nthe TF_DataType enums defined in pywrap_tf_session.h\nThe length of this list indicates the number of output endpoints\nof the operation.", "source": "github-repos"}
{"code": "def set_cache_policy(self, func):\n    \n    if func is None:\n      func = self.default_cache_policy\n    elif isinstance(func, bool):\n      func = lambda unused_key, flag=func: flag\n    self._cache_policy = func", "docstring": "Set the context cache policy function.\n\nArgs:\nfunc: A function that accepts a Key instance as argument and returns\na bool indicating if it should be cached.  May be None.", "source": "juraj-google-style"}
{"code": "def Encode(self):\n    assert self.value_dict_or_array is not None\n    logging.log(1, 'Encoding ' + self.name)\n    resolved = MessageValue._ResolveVars(self.value_dict_or_array)\n    logging.debug('Resolved: ' + str(resolved))\n    return self.msg.encoding.SerializeToString(resolved, self.msg)", "docstring": "Encode this message instance into actual data stream.\n\nThe supported encoding methods are: json, protobuf, and user-defined\nencodings.\n\nReturns:\nA string encoded.", "source": "github-repos"}
{"code": "def get_organisations(self, **query_params):\n    organisations = self.get_organisations_json(self.base_uri, query_params=query_params)\n    organisations_list = []\n    for organisation_json in organisations:\n        organisations_list.append(self.create_organisation(organisation_json))\n    return organisations_list", "docstring": "Get all organisations this member is attached to. Return a list of\nOrganisation objects.\n\nReturns:\nlist(Organisation): Return all organisations this member is\nattached to", "source": "codesearchnet"}
{"code": "def observe(self, terminal, reward, index=0):\n        \n        self.current_terminal = terminal\n        self.current_reward = reward\n\n        if self.batched_observe:\n            \n            self.observe_terminal[index].append(self.current_terminal)\n            self.observe_reward[index].append(self.current_reward)\n\n            if self.current_terminal or len(self.observe_terminal[index]) >= self.batching_capacity:\n                self.episode = self.model.observe(\n                    terminal=self.observe_terminal[index],\n                    reward=self.observe_reward[index],\n                    index=index\n                )\n                self.observe_terminal[index] = list()\n                self.observe_reward[index] = list()\n\n        else:\n            self.episode = self.model.observe(\n                terminal=self.current_terminal,\n                reward=self.current_reward\n            )", "docstring": "Observe experience from the environment to learn from. Optionally pre-processes rewards\nChild classes should call super to get the processed reward\nEX: terminal, reward = super()...\n\nArgs:\nterminal (bool): boolean indicating if the episode terminated after the observation.\nreward (float): scalar reward that resulted from executing the action.", "source": "juraj-google-style"}
{"code": "def parts(path):\n    \n    \n    _path = normpath(path)\n    components = _path.strip(\"/\")\n\n    _parts = [\"/\" if _path.startswith(\"/\") else \"./\"]\n    if components:\n        _parts += components.split(\"/\")\n    return _parts", "docstring": "Split a path in to its component parts.\n\nArguments:\npath (str): Path to split in to parts.\n\nReturns:\nlist: List of components\n\nExample:\n>>> parts('/foo/bar/baz')\n['/', 'foo', 'bar', 'baz']", "source": "juraj-google-style"}
{"code": "def preprocess_async(train_dataset, output_dir, eval_dataset=None, checkpoint=None, cloud=None):\n    with warnings.catch_warnings():\n        warnings.simplefilter('ignore')\n        if (cloud is None):\n            return _local.Local.preprocess(train_dataset, output_dir, eval_dataset, checkpoint)\n        if (not isinstance(cloud, dict)):\n            cloud = {}\n        return _cloud.Cloud.preprocess(train_dataset, output_dir, eval_dataset, checkpoint, cloud)", "docstring": "Preprocess data. Produce output that can be used by training efficiently.\n\nArgs:\ntrain_dataset: training data source to preprocess. Can be CsvDataset or BigQueryDataSet.\nIf eval_dataset is None, the pipeline will randomly split train_dataset into\ntrain/eval set with 7:3 ratio.\noutput_dir: The output directory to use. Preprocessing will create a sub directory under\nit for each run, and also update \"latest\" file which points to the latest preprocessed\ndirectory. Users are responsible for cleanup. Can be local or GCS path.\neval_dataset: evaluation data source to preprocess. Can be CsvDataset or BigQueryDataSet.\nIf specified, it will be used for evaluation during training, and train_dataset will be\ncompletely used for training.\ncheckpoint: the Inception checkpoint to use. If None, a default checkpoint is used.\ncloud: a DataFlow pipeline option dictionary such as {'num_workers': 3}. If anything but\nnot None, it will run in cloud. Otherwise, it runs locally.\n\nReturns:\nA google.datalab.utils.Job object that can be used to query state from or wait.", "source": "codesearchnet"}
{"code": "def draw(self):\n        \n        for age, level in enumerate(self.tree.get_branches()):\n            if age in self.ages:\n                thickness = self._get_thickness(age)\n                color = self._get_color(age)\n                for branch in level:\n                    self._draw_branch(branch, color, thickness, age)", "docstring": "Draws the tree.\n\nArgs:\nages (array): Contains the ages you want to draw.", "source": "juraj-google-style"}
{"code": "def binfiles_set(self, isnap):\n        \n        possible_files = set(self.filename(fstem, isnap, force_legacy=True)\n                             for fstem in phyvars.FIELD_FILES)\n        return possible_files & self.files", "docstring": "Set of existing binary files at a given snap.\n\nArgs:\nisnap (int): snapshot index.\nReturns:\nset of pathlib.Path: the set of output files available for this\nsnapshot number.", "source": "juraj-google-style"}
{"code": "def register_token(\n            self,\n            registry_address_hex: typing.AddressHex,\n            token_address_hex: typing.AddressHex,\n            retry_timeout: typing.NetworkTimeout = DEFAULT_RETRY_TIMEOUT,\n    ) -> TokenNetwork:\n        \n        registry_address = decode_hex(registry_address_hex)\n        token_address = decode_hex(token_address_hex)\n\n        registry = self._raiden.chain.token_network_registry(registry_address)\n        contracts_version = self._raiden.contract_manager.contracts_version\n\n        if contracts_version == DEVELOPMENT_CONTRACT_VERSION:\n            token_network_address = registry.add_token_with_limits(\n                token_address=token_address,\n                channel_participant_deposit_limit=UINT256_MAX,\n                token_network_deposit_limit=UINT256_MAX,\n            )\n        else:\n            token_network_address = registry.add_token_without_limits(\n                token_address=token_address,\n            )\n\n        \n        waiting.wait_for_payment_network(\n            self._raiden,\n            registry.address,\n            token_address,\n            retry_timeout,\n        )\n\n        return self._raiden.chain.token_network(token_network_address)", "docstring": "Register a token with the raiden token manager.\n\nArgs:\nregistry_address: registry address\ntoken_address_hex (string): a hex encoded token address.\n\nReturns:\n\nThe token network proxy.", "source": "juraj-google-style"}
{"code": "def find_site_python(module_name, paths=None):\n    from rez.packages_ import iter_packages\n    import subprocess\n    import ast\n    import os\n    py_cmd = 'import {x}; print {x}.__path__'.format(x=module_name)\n    p = popen(['python', '-c', py_cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n    (out, err) = p.communicate()\n    if p.returncode:\n        raise InvalidPackageError((\"Failed to find installed python module '%s':\\n%s\" % (module_name, err)))\n    module_paths = ast.literal_eval(out.strip())\n\n    def issubdir(path, parent_path):\n        return path.startswith((parent_path + os.sep))\n    for package in iter_packages('python', paths=paths):\n        if (not hasattr(package, '_site_paths')):\n            continue\n        contained = True\n        for module_path in module_paths:\n            if (not any((issubdir(module_path, x) for x in package._site_paths))):\n                contained = False\n        if contained:\n            return package\n    raise InvalidPackageError((\"Failed to find python installation containing the module '%s'. Has python been installed as a rez package?\" % module_name))", "docstring": "Find the rez native python package that contains the given module.\n\nThis function is used by python 'native' rez installers to find the native\nrez python package that represents the python installation that this module\nis installed into.\n\nNote:\nThis function is dependent on the behavior found in the python '_native'\npackage found in the 'rez-recipes' repository. Specifically, it expects\nto find a python package with a '_site_paths' list attribute listing\nthe site directories associated with the python installation.\n\nArgs:\nmodule_name (str): Target python module.\npaths (list of str, optional): paths to search for packages,\ndefaults to `config.packages_path`.\n\nReturns:\n`Package`: Native python package containing the named module.", "source": "codesearchnet"}
{"code": "def lines_from_string(string, as_interned=False):\n    if as_interned:\n        return [sys.intern(line) for line in string.splitlines()]\n    return string.splitlines()", "docstring": "Create a list of file lines from a given string.\n\nArgs:\nstring (str): File string\nas_interned (bool): List of \"interned\" strings (default False)\n\nReturns:\nstrings (list): File line list", "source": "codesearchnet"}
{"code": "def create_temp(node, namer):\n    if isinstance(node, gast.Name):\n        name = node.id\n    elif isinstance(node, (gast.Attribute, gast.Subscript)):\n        name = node.value.id\n    else:\n        raise TypeError\n    temp_node = gast.Name(id=namer.temp(name), annotation=None, ctx=None)\n    anno.setanno(temp_node, 'temp_var', node)\n    return temp_node", "docstring": "Create a temporary variable.\n\nArgs:\nnode: Create a temporary variable to store this variable in.\nnamer: A naming object that guarantees the names are unique.\n\nReturns:\nnode: See `create_grad`. Returns a temporary variable, which is always a\nsimple variable annotated with `temp_var`.", "source": "codesearchnet"}
{"code": "def _controller_name(self, objtype):\n        \n        \n        if objtype.endswith('y'):\n            return objtype[:-1] + 'ies'\n\n        if objtype[-1] in 'sx' or objtype[-2:] in ['sh', 'ch']:\n            return objtype + 'es'\n\n        if objtype.endswith('an'):\n            return objtype[:-2] + 'en'\n\n        return objtype + 's'", "docstring": "Determines the controller name for the object's type\n\nArgs:\nobjtype (str): The object type\n\nReturns:\nA string with the controller name", "source": "juraj-google-style"}
{"code": "def pprint_cell(self, row, col):\n    ndims = self.ndims\n    if (col >= self.cols):\n        raise Exception((('Maximum column index is %d' % self.cols) - 1))\n    elif (row >= self.rows):\n        raise Exception((('Maximum row index is %d' % self.rows) - 1))\n    elif (row == 0):\n        if (col >= ndims):\n            if self.vdims:\n                return self.vdims[(col - ndims)].pprint_label\n            else:\n                return ''\n        return self.kdims[col].pprint_label\n    else:\n        dim = self.get_dimension(col)\n        return dim.pprint_value(self.iloc[((row - 1), col)])", "docstring": "Formatted contents of table cell.\n\nArgs:\nrow (int): Integer index of table row\ncol (int): Integer index of table column\n\nReturns:\nFormatted table cell contents", "source": "codesearchnet"}
{"code": "def generate_nb_state_data(means, weights, R):\n    cells = weights.shape[1]\n    x_true = np.dot(means, weights)\n    R_ = np.tile(R, (cells, 1)).T\n    P_true = (x_true / (R_ + x_true))\n    sample = np.random.negative_binomial(np.tile(R, (cells, 1)).T, P_true)\n    return sample.astype(float)", "docstring": "Generates data according to the Negative Binomial Convex Mixture Model.\n\nArgs:\nmeans (array): Cell types- genes x clusters\nweights (array): Cell cluster assignments- clusters x cells\nR (array): dispersion parameter - 1 x genes\n\nReturns:\ndata matrix - genes x cells", "source": "codesearchnet"}
{"code": "def analyze(self) -> Sequence[_HasReturnT]:", "docstring": "Calls every signature of this function with appropriate fake arguments.\n\nReturns:\nA sequence of objects with information about the result of calling the\nfunction with each of its signatures, with get_return_value() methods\nthat retrieve the return values.", "source": "github-repos"}
{"code": "def validate_and_copy_one_submission(self, submission_path):\n    \n    if os.path.exists(self.download_dir):\n      shutil.rmtree(self.download_dir)\n    os.makedirs(self.download_dir)\n    if os.path.exists(self.validate_dir):\n      shutil.rmtree(self.validate_dir)\n    os.makedirs(self.validate_dir)\n    logging.info('\\n' + ('\n                 + '\n    local_path = self.copy_submission_locally(submission_path)\n    metadata = self.base_validator.validate_submission(local_path)\n    if not metadata:\n      logging.error('Submission \"%s\" is INVALID', submission_path)\n      self.stats.add_failure()\n      return\n    submission_type = metadata['type']\n    container_name = metadata['container_gpu']\n    logging.info('Submission \"%s\" is VALID', submission_path)\n    self.list_of_containers.add(container_name)\n    self.stats.add_success(submission_type)\n    if self.do_copy:\n      submission_id = '{0:04}'.format(self.cur_submission_idx)\n      self.cur_submission_idx += 1\n      self.copy_submission_to_destination(submission_path,\n                                          TYPE_TO_DIR[submission_type],\n                                          submission_id)\n      self.id_to_path_mapping[submission_id] = submission_path", "docstring": "Validates one submission and copies it to target directory.\n\nArgs:\nsubmission_path: path in Google Cloud Storage of the submission file", "source": "juraj-google-style"}
{"code": "def get_user(self, username):\n    response = self._get((self.rest_url + '/user'), params={'username': username, 'expand': 'attributes'})\n    if (not response.ok):\n        return None\n    return response.json()", "docstring": "Retrieve information about a user\n\nReturns:\ndict: User information\n\nNone: If no user or failure occurred", "source": "codesearchnet"}
{"code": "def create_route53_zone(client, zone_name):\n    if (not zone_name.endswith('.')):\n        zone_name += '.'\n    zone_id = get_or_create_hosted_zone(client, zone_name)\n    old_soa = get_soa_record(client, zone_id, zone_name)\n    if (old_soa.text.min_ttl == '300'):\n        return zone_id\n    new_soa = copy.deepcopy(old_soa)\n    logger.debug('Updating negative caching value on zone %s to 300.', zone_name)\n    new_soa.text.min_ttl = '300'\n    client.change_resource_record_sets(HostedZoneId=zone_id, ChangeBatch={'Comment': 'Update SOA min_ttl to 300.', 'Changes': [{'Action': 'UPSERT', 'ResourceRecordSet': {'Name': zone_name, 'Type': 'SOA', 'TTL': old_soa.ttl, 'ResourceRecords': [{'Value': str(new_soa.text)}]}}]})\n    return zone_id", "docstring": "Creates the given zone_name if it doesn't already exists.\n\nAlso sets the SOA negative caching TTL to something short (300 seconds).\n\nArgs:\nclient (:class:`botocore.client.Route53`): The connection used to\ninteract with Route53's API.\nzone_name (string): The name of the DNS hosted zone to create.\n\nReturns:\nstring: The zone id returned from AWS for the existing, or newly\ncreated zone.", "source": "codesearchnet"}
{"code": "def configuration_check(config):\n    log_level = config.get('daemon', 'loglevel')\n    num_level = getattr(logging, log_level.upper(), None)\n    pidfile = config.get('daemon', 'pidfile')\n    if (not os.path.isdir(os.path.dirname(pidfile))):\n        raise ValueError(\"{d} doesn't exit\".format(d=os.path.dirname(pidfile)))\n    if (not isinstance(num_level, int)):\n        raise ValueError('Invalid log level: {}'.format(log_level))\n    for _file in ('log_file', 'stderr_file'):\n        if config.has_option('daemon', _file):\n            try:\n                touch(config.get('daemon', _file))\n            except OSError as exc:\n                raise ValueError(exc)\n    for (option, getter) in DAEMON_OPTIONS_TYPE.items():\n        try:\n            getattr(config, getter)('daemon', option)\n        except configparser.NoOptionError as error:\n            if (option not in DAEMON_OPTIONAL_OPTIONS):\n                raise ValueError(error)\n        except configparser.Error as error:\n            raise ValueError(error)\n        except ValueError as exc:\n            msg = \"invalid data for '{opt}' option in daemon section: {err}\".format(opt=option, err=exc)\n            raise ValueError(msg)\n    service_configuration_check(config)", "docstring": "Perform a sanity check on configuration.\n\nFirst it performs a sanity check against settings for daemon\nand then against settings for each service check.\n\nArguments:\nconfig (obj): A configparser object which holds our configuration.\n\nReturns:\nNone if all checks are successfully passed otherwise raises a\nValueError exception.", "source": "codesearchnet"}
{"code": "def _GetSanitizedEventValues(self, event):\n    \n    data_type = getattr(event, 'data_type', 'UNKNOWN')\n\n    event_formatter = self._output_mediator.GetEventFormatter(event)\n    if not event_formatter:\n      raise errors.NoFormatterFound(\n          'Unable to find event formatter for: {0:s}.'.format(data_type))\n\n    message, _ = self._output_mediator.GetFormattedMessages(event)\n    if message is None:\n      raise errors.NoFormatterFound(\n          'Unable to find event formatter for: {0:s}.'.format(data_type))\n\n    source_short, source = self._output_mediator.GetFormattedSources(event)\n    if source is None or source_short is None:\n      raise errors.NoFormatterFound(\n          'Unable to find event formatter for: {0:s}.'.format(data_type))\n\n    datetime_string = self._FormatDateTime(event)\n\n    format_variables = self._output_mediator.GetFormatStringAttributeNames(\n        event)\n    if format_variables is None:\n      raise errors.NoFormatterFound(\n          'Unable to find event formatter for: {0:s}.'.format(data_type))\n\n    extra_attributes = []\n    for attribute_name, attribute_value in sorted(event.GetAttributes()):\n      if (attribute_name in definitions.RESERVED_VARIABLE_NAMES or\n          attribute_name in format_variables):\n        continue\n      extra_attributes.append(\n          '{0:s}: {1!s} '.format(attribute_name, attribute_value))\n\n    extra_attributes = ' '.join(extra_attributes)\n\n    inode = event.inode\n    if inode is None and hasattr(event, 'pathspec'):\n      inode = getattr(event.pathspec, 'inode', '-')\n    if inode is None:\n      inode = '-'\n\n    tags = None\n    if getattr(event, 'tag', None):\n      tags = getattr(event.tag, 'tags', None)\n\n    taglist = ''\n    if isinstance(tags, (list, tuple)):\n      taglist = ','.join(tags)\n\n    offset = event.offset\n    if offset is None:\n      offset = 0\n\n    row = {\n        'timezone': '{0!s}'.format(self._output_mediator.timezone),\n        'MACB': self._output_mediator.GetMACBRepresentation(event),\n        'source': source_short,\n        'sourcetype': source,\n        'type': event.timestamp_desc or '-',\n        'user': getattr(event, 'username', '-'),\n        'host': getattr(event, 'hostname', '-'),\n        'description': message,\n        'filename': getattr(event, 'filename', '-'),\n        'inode': inode,\n        'notes': getattr(event, 'notes', '-'),\n        'format': getattr(event, 'parser', '-'),\n        'extra': extra_attributes,\n        'datetime': datetime_string,\n        'reportnotes': '',\n        'inreport': '',\n        'tag': taglist,\n        'offset': offset,\n        'vss_store_number': self._GetVSSNumber(event),\n        'URL': getattr(event, 'url', '-'),\n        'record_number': getattr(event, 'record_number', 0),\n        'event_identifier': getattr(event, 'event_identifier', '-'),\n        'event_type': getattr(event, 'event_type', '-'),\n        'source_name': getattr(event, 'source_name', '-'),\n        'user_sid': getattr(event, 'user_sid', '-'),\n        'computer_name': getattr(event, 'computer_name', '-'),\n        'evidence': self._evidence}\n\n    return row", "docstring": "Sanitizes the event for use in 4n6time.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\ndict[str, object]: dictionary containing the sanitized event values.\n\nRaises:\nNoFormatterFound: If no event formatter can be found to match the data\ntype in the event object.", "source": "juraj-google-style"}
{"code": "def _load_config_include(self, include_directory):\n        \n        include_directory = os.path.join(self.app_path, include_directory)\n        if not os.path.isdir(include_directory):\n            msg = 'Provided include directory does not exist ({}).'.format(include_directory)\n            sys.exit(msg)\n\n        profiles = []\n        for filename in sorted(os.listdir(include_directory)):\n            if filename.endswith('.json'):\n                self.log.info('Loading config: {}'.format(filename))\n                print('Include File: {}{}{}'.format(c.Style.BRIGHT, c.Fore.MAGENTA, filename))\n                config_file = os.path.join(include_directory, filename)\n                with open(config_file) as data_file:\n                    try:\n                        profiles.extend(json.load(data_file))\n                    except ValueError as e:\n                        print('Invalid JSON file: {}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, e))\n                        sys.exit(1)\n        return profiles", "docstring": "Load included configuration files.\n\nArgs:\ninclude_directory (str): The name of the config include directory.\n\nReturns:\nlist: A list of all profiles for the current App.", "source": "juraj-google-style"}
{"code": "def stop(self, timeout=5):\n        \n        \n        \n        for worker in self._threads:\n            self._queue.put(_SHUTDOWNREQUEST)\n\n        \n        current = threading.currentThread()\n        if timeout is not None and timeout >= 0:\n            endtime = time.time() + timeout\n        while self._threads:\n            worker = self._threads.pop()\n            if worker is not current and worker.isAlive():\n                try:\n                    if timeout is None or timeout < 0:\n                        worker.join()\n                    else:\n                        remaining_time = endtime - time.time()\n                        if remaining_time > 0:\n                            worker.join(remaining_time)\n                        if worker.isAlive():\n                            \n                            \n                            c = worker.conn\n                            if c and not c.rfile.closed:\n                                try:\n                                    c.socket.shutdown(socket.SHUT_RD)\n                                except TypeError:\n                                    \n                                    c.socket.shutdown()\n                            worker.join()\n                except (\n                    AssertionError,\n                    \n                    \n                    \n                    KeyboardInterrupt,\n                ):\n                    pass", "docstring": "Terminate all worker threads.\n\nArgs:\ntimeout (int): time to wait for threads to stop gracefully", "source": "juraj-google-style"}
{"code": "def withdraw(self, amount, currency, payment_method_id):\n    params = {'amount': amount, 'currency': currency, 'payment_method_id': payment_method_id}\n    return self._send_message('post', '/withdrawals/payment-method', data=json.dumps(params))", "docstring": "Withdraw funds to a payment method.\n\nSee AuthenticatedClient.get_payment_methods() to receive\ninformation regarding payment methods.\n\nArgs:\namount (Decimal): The amount to withdraw.\ncurrency (str): Currency type (eg. 'BTC')\npayment_method_id (str): ID of the payment method.\n\nReturns:\ndict: Withdraw details. Example::\n{\n\"id\":\"593533d2-ff31-46e0-b22e-ca754147a96a\",\n\"amount\": \"10.00\",\n\"currency\": \"USD\",\n\"payout_at\": \"2016-08-20T00:31:09Z\"\n}", "source": "codesearchnet"}
{"code": "def DeserializeUnsigned(self, reader):\n    self.Version = reader.ReadUInt32()\n    self.PrevHash = reader.ReadUInt256()\n    self.MerkleRoot = reader.ReadUInt256()\n    self.Timestamp = reader.ReadUInt32()\n    self.Index = reader.ReadUInt32()\n    self.ConsensusData = reader.ReadUInt64()\n    self.NextConsensus = reader.ReadUInt160()", "docstring": "Deserialize unsigned data only.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "codesearchnet"}
{"code": "def partition_or_replicate_on_host(tensor, dims):\n    if dims is None:\n        return itertools.repeat(tensor)\n    dims = np.array(dims)\n    output = [tensor]\n    shape_list = np.array(tensor.shape.as_list())\n    quotients, remainders = np.divmod(shape_list, dims)\n    for axis, (quotient, remainder, dim, original_size) in enumerate(zip(quotients, remainders, dims, shape_list)):\n        if dim <= 1:\n            continue\n        if remainder > 0:\n            ceil_ratio = quotient + 1\n            num_full_slots, left_over = np.divmod(original_size, ceil_ratio)\n            num_or_size_splits = [ceil_ratio] * num_full_slots + [left_over]\n            if len(num_or_size_splits) < dim:\n                num_or_size_splits += [0] * (dim - len(num_or_size_splits))\n            new_output = []\n            for x in output:\n                new_output.append(array_ops.split(x, num_or_size_splits=num_or_size_splits, axis=axis))\n            output = new_output\n        else:\n            output = [array_ops.split(x, int(dim), axis=axis) for x in output]\n        output = nest.flatten(output)\n    return output", "docstring": "Partitions or replicates the input tensor.\n\nThe ops inside this function are placed on the host side.\n\nArgs:\ntensor: The input tensor which will be partitioned or replicated.\ndims: A list of integer describes how to partition the input tensor.\n\nReturns:\nAn iterator of `Tensor`s or a list of partitioned tensors.", "source": "github-repos"}
{"code": "def to_hour(num) -> str:\n    to_str = str(int(num))\n    return pd.Timestamp(f'{to_str[:(- 2)]}:{to_str[(- 2):]}').strftime('%H:%M')", "docstring": "Convert YAML input to hours\n\nArgs:\nnum: number in YMAL file, e.g., 900, 1700, etc.\n\nReturns:\nstr\n\nExamples:\n>>> to_hour(900)\n'09:00'\n>>> to_hour(1700)\n'17:00'", "source": "codesearchnet"}
{"code": "def add(self, text, checked=False, sort=None):\n    if (self.parent is None):\n        raise exception.InvalidException('Item has no parent')\n    node = self.parent.add(text, checked, sort)\n    self.indent(node)\n    return node", "docstring": "Add a new sub item to the list. This item must already be attached to a list.\n\nArgs:\ntext (str): The text.\nchecked (bool): Whether this item is checked.\nsort (int): Item id for sorting.", "source": "codesearchnet"}
{"code": "def heightmap_count_cells(hm: np.ndarray, mi: float, ma: float) -> int:\n    return int(lib.TCOD_heightmap_count_cells(_heightmap_cdata(hm), mi, ma))", "docstring": "Return the number of map cells which value is between ``mi`` and ``ma``.\n\nArgs:\nhm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.\nmi (float): The lower bound.\nma (float): The upper bound.\n\nReturns:\nint: The count of values which fall between ``mi`` and ``ma``.\n\n.. deprecated:: 8.1\nCan be replaced by an equivalent NumPy function such as:\n``numpy.count_nonzero((mi <= hm) & (hm < ma))``", "source": "codesearchnet"}
{"code": "def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=''):\n    \n    key_flags = self._GetKeyFlagsForModule(module)\n    if key_flags:\n      self.__RenderModuleFlags(module, key_flags, output_lines, prefix)", "docstring": "Generates a help string for the key flags of a given module.\n\nArgs:\nmodule: A module object or a module name (a string).\noutput_lines: A list of strings.  The generated help message\nlines will be appended to this list.\nprefix: A string that is prepended to each generated help line.", "source": "juraj-google-style"}
{"code": "def format_sec_to_dhm(sec):\n    \n    rem_int, s_int = divmod(int(sec), 60)\n    rem_int, m_int, = divmod(rem_int, 60)\n    d_int, h_int, = divmod(rem_int, 24)\n    return '{}d{:02d}h{:02d}m'.format(d_int, h_int, m_int)", "docstring": "Format seconds to days, hours, minutes.\n\nArgs:\nsec: float or int\nNumber of seconds in a period of time\n\nReturns:\nPeriod of time represented as a string on the form ``0d:00h:00m``.", "source": "juraj-google-style"}
{"code": "def copy(self, src, dst, other_system=None):\n        \n        with _handle_azure_exception():\n            self.client.copy_file(\n                copy_source=(other_system or self)._format_src_url(src, self),\n                **self.get_client_kwargs(dst))", "docstring": "Copy object of the same storage.\n\nArgs:\nsrc (str): Path or URL.\ndst (str): Path or URL.\nother_system (pycosio.storage.azure._AzureBaseSystem subclass):\nThe source storage system.", "source": "juraj-google-style"}
{"code": "def __matches(s1, s2, ngrams_fn, n=3):\n    (ngrams1, ngrams2) = (set(ngrams_fn(s1, n=n)), set(ngrams_fn(s2, n=n)))\n    return ngrams1.intersection(ngrams2)", "docstring": "Returns the n-grams that match between two sequences\n\nSee also: SequenceMatcher.get_matching_blocks\n\nArgs:\ns1: a string\ns2: another string\nn: an int for the n in n-gram\n\nReturns:\nset:", "source": "codesearchnet"}
{"code": "def pandas_dataframe(self, start, stop, ncol, **kwargs):\n    try:\n        int(start)\n        int(stop)\n    except TypeError:\n        print('start and stop must be ints')\n    try:\n        ncol = int(ncol)\n        return pd.read_csv(six.StringIO('\\n'.join(self[start:stop])), delim_whitespace=True, names=range(ncol), **kwargs)\n    except TypeError:\n        try:\n            ncol = list(ncol)\n            return pd.read_csv(six.StringIO('\\n'.join(self[start:stop])), delim_whitespace=True, names=ncol, **kwargs)\n        except TypeError:\n            print('Cannot pandas_dataframe if ncol is {}, must be int or list'.format(type(ncol)))", "docstring": "Returns the result of tab-separated pandas.read_csv on\na subset of the file.\n\nArgs:\nstart (int): line number where structured data starts\nstop (int): line number where structured data stops\nncol (int or list): the number of columns in the structured\ndata or a list of that length with column names\n\nReturns:\npd.DataFrame: structured data", "source": "codesearchnet"}
{"code": "def update_snmp_configuration(self, configuration, timeout=(- 1)):\n    data = configuration.copy()\n    if ('type' not in data):\n        data['type'] = 'snmp-configuration'\n    uri = '{}{}'.format(self.data['uri'], self.SNMP_CONFIGURATION_PATH)\n    return self._helper.update(data, uri=uri, timeout=timeout)", "docstring": "Updates the SNMP configuration of a logical interconnect. Changes to the SNMP configuration are asynchronously\napplied to all managed interconnects.\n\nArgs:\nconfiguration: snmp configuration.\n\nReturns:\ndict: The Logical Interconnect.", "source": "codesearchnet"}
{"code": "def GetTopLevel(self, file_object):\n    try:\n        top_level_object = biplist.readPlist(file_object)\n    except (biplist.InvalidPlistException, biplist.NotBinaryPlistException) as exception:\n        raise errors.UnableToParseFile('Unable to parse plist with error: {0!s}'.format(exception))\n    return top_level_object", "docstring": "Returns the deserialized content of a plist as a dictionary object.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object to parse.\n\nReturns:\ndict[str, object]: contents of the plist.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def parse_machine_listing(text: str, convert: bool=True, strict: bool=True) -> List[dict]:\n    listing = []\n    for line in text.splitlines(False):\n        facts = line.split(';')\n        row = {}\n        filename = None\n        for fact in facts:\n            (name, sep, value) = fact.partition('=')\n            if sep:\n                name = name.strip().lower()\n                value = value.strip().lower()\n                if convert:\n                    try:\n                        value = convert_machine_list_value(name, value)\n                    except ValueError:\n                        if strict:\n                            raise\n                row[name] = value\n            elif (name[0:1] == ' '):\n                filename = name[1:]\n            else:\n                name = name.strip().lower()\n                row[name] = ''\n        if filename:\n            row['name'] = filename\n            listing.append(row)\n        elif strict:\n            raise ValueError('Missing filename.')\n    return listing", "docstring": "Parse machine listing.\n\nArgs:\ntext: The listing.\nconvert: Convert sizes and dates.\nstrict: Method of handling errors. ``True`` will raise\n``ValueError``. ``False`` will ignore rows with errors.\n\nReturns:\nlist: A list of dict of the facts defined in RFC 3659.\nThe key names must be lowercase. The filename uses the key\n``name``.", "source": "codesearchnet"}
{"code": "def ReadLine(self, file_object):\n    \n    line, _, self.lines = self.lines.partition('\\n')\n    if not line:\n      self.ReadLines(file_object)\n      line, _, self.lines = self.lines.partition('\\n')\n\n    return line", "docstring": "Reads a line.\n\nArgs:\nfile_object (dfvfs.FileIO): file-like object.\n\nReturns:\nstr: line read from the lines buffer.", "source": "juraj-google-style"}
{"code": "def _init_params(self, amplitude, length_scale, validate_args):\n    \n    dtype = util.maybe_get_common_dtype(\n        [amplitude, length_scale])\n    if amplitude is not None:\n      amplitude = tf.convert_to_tensor(\n          value=amplitude, name='amplitude', dtype=dtype)\n    self._amplitude = _validate_arg_if_not_none(\n        amplitude, tf.compat.v1.assert_positive, validate_args)\n    if length_scale is not None:\n      length_scale = tf.convert_to_tensor(\n          value=length_scale, name='length_scale', dtype=dtype)\n    self._length_scale = _validate_arg_if_not_none(\n        length_scale, tf.compat.v1.assert_positive, validate_args)\n    return dtype", "docstring": "Shared init logic for `amplitude` and `length_scale` params.\n\nArgs:\namplitude: `Tensor` (or convertible) or `None` to convert, validate.\nlength_scale: `Tensor` (or convertible) or `None` to convert, validate.\nvalidate_args: If `True`, parameters are checked for validity despite\npossibly degrading runtime performance\n\nReturns:\ndtype: The common `DType` of the parameters.", "source": "juraj-google-style"}
{"code": "def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs):\n    return self.tokenizer.batch_decode(generated_outputs, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)", "docstring": "Post-process the output of the model to decode the text.\n\nArgs:\ngenerated_outputs (`torch.Tensor` or `np.ndarray`):\nThe output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`\nor `(sequence_length,)`.\nskip_special_tokens (`bool`, *optional*, defaults to `True`):\nWhether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.\nclean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):\nWhether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.\n**kwargs:\nAdditional arguments to be passed to the tokenizer's `batch_decode method`.\n\nReturns:\n`List[str]`: The decoded text.", "source": "github-repos"}
{"code": "def save_image(imager, grid_data, grid_norm, output_file):\n    imager.finalise_plane(grid_data, grid_norm)\n    grid_data = numpy.real(grid_data)\n    border = ((imager.plane_size - imager.image_size) \n    if (border > 0):\n        end = (border + imager.image_size)\n        grid_data = grid_data[(border:end, border:end)]\n    hdr = fits.header.Header()\n    fits.writeto(output_file, grid_data, hdr, clobber=True)", "docstring": "Makes an image from gridded visibilities and saves it to a FITS file.\n\nArgs:\nimager (oskar.Imager):          Handle to configured imager.\ngrid_data (numpy.ndarray):      Final visibility grid.\ngrid_norm (float):              Grid normalisation to apply.\noutput_file (str):              Name of output FITS file to write.", "source": "codesearchnet"}
{"code": "def DeserializeTX(buffer):\n        \n        mstream = MemoryStream(buffer)\n        reader = BinaryReader(mstream)\n\n        tx = Transaction.DeserializeFrom(reader)\n\n        return tx", "docstring": "Deserialize the stream into a Transaction object.\n\nArgs:\nbuffer (BytesIO): stream to deserialize the Transaction from.\n\nReturns:\nneo.Core.TX.Transaction:", "source": "juraj-google-style"}
{"code": "def _get_ops_from_nodedefs(node_defs):\n    ops = set()\n    for node_def in node_defs:\n        op_and_kernel = get_ops_from_nodedef(node_def)\n        if op_and_kernel:\n            ops.add(op_and_kernel)\n    return ops", "docstring": "Gets the ops and kernels needed from the list of NodeDef.\n\nIf a NodeDef's op is not in the allowlist of ops without kernel and there is\nno kernel found for this NodeDef, then skip that NodeDef and proceed to the\nnext one.\n\nArgs:\nnode_defs: list of NodeDef's to get op/kernel information.\n\nReturns:\nA set of (op_name, kernel_name) tuples.", "source": "github-repos"}
{"code": "def load_dict(self, db_key: str, hierarchical: bool=False) -> dict:\n    if (not hierarchical):\n        db_values = self._db.hgetall(db_key)\n        for (_key, _value) in db_values.items():\n            if isinstance(_value, str):\n                db_values[_key] = ast.literal_eval(_value)\n        my_dict = db_values\n    else:\n        my_dict = self._load_dict_hierarchical(db_key)\n    return my_dict", "docstring": "Load the dictionary at the specified key.\n\nHierarchically stored dictionaries use a ':' separator to expand\nthe dictionary into a set of Redis hashes.\n\nArgs:\ndb_key (str): Key at which the dictionary is stored in the db.\nhierarchical (bool): If True, expect the dictionary to have been\nstored hierarchically. If False, expect the dictionary to have\nbeen stored flat.\n\nReturns:\ndict, the dictionary stored at key", "source": "codesearchnet"}
{"code": "def prepare_to_run_task(context, claim_task):\n    current_task_info = {}\n    context.claim_task = claim_task\n    current_task_info['taskId'] = get_task_id(claim_task)\n    current_task_info['runId'] = get_run_id(claim_task)\n    log.info('Going to run taskId {taskId} runId {runId}!'.format(**current_task_info))\n    context.write_json(os.path.join(context.config['work_dir'], 'current_task_info.json'), current_task_info, 'Writing current task info to {path}...')\n    return current_task_info", "docstring": "Given a `claim_task` json dict, prepare the `context` and `work_dir`.\n\nSet `context.claim_task`, and write a `work_dir/current_task_info.json`\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\nclaim_task (dict): the claim_task dict.\n\nReturns:\ndict: the contents of `current_task_info.json`", "source": "codesearchnet"}
{"code": "def sendCommand(self, command):\n        \n        command_data = [ord(x) for x in buffer(command)]\n        self.hid.write(command_data)\n        response_data = ''.join(chr(x) for x in self.hid.read(64))\n        response = command.RESPONSE.from_buffer_copy(response_data)\n        if response.status != 0:\n            raise CommandException(response.status)\n        return response", "docstring": "Sends a Command object to the MCP2210 and returns its response.\n\nArguments:\nA commands.Command instance\n\nReturns:\nA commands.Response instance, or raises a CommandException on error.", "source": "juraj-google-style"}
{"code": "def unbatch(self, spec):\n    raise NotImplementedError(f'{type(self).__name__}.unbatch')", "docstring": "Returns the TypeSpec for a single unbatched element in `spec`.\n\nArgs:\nspec: The `TypeSpec` for a batch of values.\n\nReturns:\nA `TypeSpec` for an individual value.", "source": "github-repos"}
{"code": "def edit_distance_2(self, word):\n    word = word.lower()\n    return [e2 for e1 in self.edit_distance_1(word) for e2 in self.edit_distance_1(e1)]", "docstring": "Compute all strings that are two edits away from `word` using only\nthe letters in the corpus\n\nArgs:\nword (str): The word for which to calculate the edit distance\nReturns:\nset: The set of strings that are edit distance two from the \\\nprovided word", "source": "codesearchnet"}
{"code": "def gene_panels(self, panel_id=None, institute_id=None, version=None):\n        \n        query = {}\n        if panel_id:\n            query['panel_name'] = panel_id\n            if version:\n                query['version'] = version\n        if institute_id:\n            query['institute'] = institute_id\n\n        return self.panel_collection.find(query)", "docstring": "Return all gene panels\n\nIf panel_id return all versions of panels by that panel name\n\nArgs:\npanel_id(str)\n\nReturns:\ncursor(pymongo.cursor)", "source": "juraj-google-style"}
{"code": "def profile_stats(adapter, threshold=0.9):\n    profiles = []\n    samples = []\n    distance_dict = {key: 0 for key in HAMMING_RANGES.keys()}\n    for case in adapter.cases():\n        for individual in case['individuals']:\n            if individual.get('profile'):\n                sample_id = f\"{case['case_id']}.{individual['ind_id']}\"\n                ind_profile = individual['profile']\n                distance_array = np.array([], dtype=np.float)\n                for (sample, profile) in zip(samples, profiles):\n                    distance = compare_profiles(ind_profile, profile)\n                    distance_array = np.append(distance_array, distance)\n                    if (distance >= threshold):\n                        LOG.warning(f'{sample_id} is {distance} similar to {sample}')\n                for (key, range) in HAMMING_RANGES.items():\n                    distance_dict[key] += np.sum(((distance_array >= range[0]) & (distance_array < range[1])))\n                profiles.append(ind_profile)\n                samples.append(sample_id)\n    return distance_dict", "docstring": "Compares the pairwise hamming distances for all the sample profiles in\nthe database. Returns a table of the number of distances within given\nranges.\n\nArgs:\nadapter (MongoAdapter): Adapter to mongodb\nthreshold (float): If any distance is found above this threshold\na warning will be given, stating the two matching samples.\n\nReturns:\ndistance_dict (dict): dictionary with ranges as keys, and the number\nof distances that are within these ranges as values.", "source": "codesearchnet"}
{"code": "def resize_bilinear_nd(t, target_shape):\n    shape = t.get_shape().as_list()\n    target_shape = list(target_shape)\n    assert (len(shape) == len(target_shape))\n    d = 0\n    while (d < len(shape)):\n        if (shape[d] == target_shape[d]):\n            d += 1\n            continue\n        new_shape = shape[:]\n        new_shape[d:(d + 2)] = target_shape[d:(d + 2)]\n        shape_ = collapse_shape(shape, d, (d + 2))\n        new_shape_ = collapse_shape(new_shape, d, (d + 2))\n        t_ = tf.reshape(t, shape_)\n        t_ = tf.image.resize_bilinear(t_, new_shape_[1:3])\n        t = tf.reshape(t_, new_shape)\n        shape = new_shape\n        d += 2\n    return t", "docstring": "Bilinear resizes a tensor t to have shape target_shape.\n\nThis function bilinearly resizes a n-dimensional tensor by iteratively\napplying tf.image.resize_bilinear (which can only resize 2 dimensions).\nFor bilinear interpolation, the order in which it is applied does not matter.\n\nArgs:\nt: tensor to be resized\ntarget_shape: the desired shape of the new tensor.\n\nReturns:\nThe resized tensor", "source": "codesearchnet"}
{"code": "def preprocess_GIF(self, image, **kwargs):\n        \n        if 'transparency' in image.info:\n            save_kwargs = {'transparency': image.info['transparency']}\n        else:\n            save_kwargs = {}\n        return (image, save_kwargs)", "docstring": "Receive a PIL Image instance of a GIF and return 2-tuple.\n\nArgs:\n* [0]: Original Image instance (passed to `image`)\n* [1]: Dict with a transparency key (to GIF transparency layer)", "source": "juraj-google-style"}
{"code": "def set_config(config):\n    \n    \n    bigchaindb.config = copy.deepcopy(bigchaindb._config)\n    \n    update(bigchaindb.config, update_types(config, bigchaindb.config))\n    bigchaindb.config['CONFIGURED'] = True", "docstring": "Set bigchaindb.config equal to the default config dict,\nthen update that with whatever is in the provided config dict,\nand then set bigchaindb.config['CONFIGURED'] = True\n\nArgs:\nconfig (dict): the config dict to read for changes\nto the default config\n\nNote:\nAny previous changes made to ``bigchaindb.config`` will be lost.", "source": "juraj-google-style"}
{"code": "def pick_unused_port(pid=None, portserver_address=None):\n    try:\n        port = _free_ports.pop()\n    except KeyError:\n        pass\n    else:\n        _owned_ports.add(port)\n        return port\n    if portserver_address:\n        port = get_port_from_port_server(portserver_address, pid=pid)\n        if port:\n            return port\n    if ('PORTSERVER_ADDRESS' in os.environ):\n        port = get_port_from_port_server(os.environ['PORTSERVER_ADDRESS'], pid=pid)\n        if port:\n            return port\n    return _pick_unused_port_without_server()", "docstring": "A pure python implementation of PickUnusedPort.\n\nArgs:\npid: PID to tell the portserver to associate the reservation with. If\nNone, the current process's PID is used.\nportserver_address: The address (path) of a unix domain socket\nwith which to connect to a portserver, a leading '@'\ncharacter indicates an address in the \"abstract namespace\".  OR\nOn systems without socket.AF_UNIX, this is an AF_INET address.\nIf None, or no port is returned by the portserver at the provided\naddress, the environment will be checked for a PORTSERVER_ADDRESS\nvariable.  If that is not set, no port server will be used.\n\nReturns:\nA port number that is unused on both TCP and UDP.\n\nRaises:\nNoFreePortFoundError: No free port could be found.", "source": "codesearchnet"}
{"code": "def compress(content, method='gzip'):\n    if (method == True):\n        method = 'gzip'\n    method = (method or '').lower()\n    if (method == ''):\n        return content\n    elif (method == 'gzip'):\n        return gzip_compress(content)\n    raise NotImplementedError((str(method) + ' is not currently supported. Supported Options: None, gzip'))", "docstring": "Compresses file content.\n\nRequired:\ncontent (bytes): The information to be compressed\nmethod (str, default: 'gzip'): None or gzip\nRaises:\nNotImplementedError if an unsupported codec is specified.\ncompression.DecodeError if the encoder has an issue\n\nReturn: compressed content", "source": "codesearchnet"}
{"code": "def decode_message(self, message_type, encoded_message):\n        \n        encoded_message = six.ensure_str(encoded_message)\n        if not encoded_message.strip():\n            return message_type()\n\n        dictionary = json.loads(encoded_message)\n        message = self.__decode_dictionary(message_type, dictionary)\n        message.check_initialized()\n        return message", "docstring": "Merge JSON structure to Message instance.\n\nArgs:\nmessage_type: Message to decode data to.\nencoded_message: JSON encoded version of message.\n\nReturns:\nDecoded instance of message_type.\n\nRaises:\nValueError: If encoded_message is not valid JSON.\nmessages.ValidationError if merged message is not initialized.", "source": "juraj-google-style"}
{"code": "def __init__(self, database_config: VectorDatabaseWriteConfig):\n    if not isinstance(database_config, VectorDatabaseWriteConfig):\n        raise TypeError(f'database_config must be VectorDatabaseWriteConfig, got {type(database_config)}')\n    self.database_config = database_config", "docstring": "Initialize transform with database config.\n\nArgs:\ndatabase_config: Configuration for target vector database.", "source": "github-repos"}
{"code": "class DonutFastImageProcessorKwargs(DefaultFastImageProcessorKwargs):\n    do_thumbnail: Optional[bool]\n    do_align_long_axis: Optional[bool]\n    do_pad: Optional[bool]", "docstring": "Args:\ndo_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`):\nWhether to resize the image using thumbnail method.\ndo_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`):\nWhether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.\ndo_pad (`bool`, *optional*, defaults to `self.do_pad`):\nWhether to pad the image. If `random_padding` is set to `True`, each image is padded with a random\namount of padding on each size, up to the largest image size in the batch. Otherwise, all images are\npadded to the largest image size in the batch.", "source": "github-repos"}
{"code": "def process(self, element):\n    (text, uid), prediction = element\n    cluster = prediction.inference.item()\n    if cluster == -1:\n        body = f'Tweet-Id is {uid} and text is {text}'\n        self.yag_smtp_client.send(to=cfg.EMAIL_ADDRESS, subject='Anomaly Detected', contents=body)", "docstring": "Takes a tuple of (text, id) and a prediction, and if the prediction is -1,\nit sends an email to the specified address\n\nArgs:\nelement: The element that is being processed.", "source": "github-repos"}
{"code": "def get_storage(self, contract_hash, storage_key, id=None, endpoint=None):\n        \n        result = self._call_endpoint(GET_STORAGE, params=[contract_hash, binascii.hexlify(storage_key.encode('utf-8')).decode('utf-8')], id=id, endpoint=endpoint)\n        try:\n\n            return bytearray(binascii.unhexlify(result.encode('utf-8')))\n        except Exception as e:\n            raise NEORPCException(\"could not decode result %s \" % e)", "docstring": "Returns a storage item of a specified contract\nArgs:\ncontract_hash: (str) hash of the contract to lookup, for example 'd7678dd97c000be3f33e9362e673101bac4ca654'\nstorage_key: (str) storage key to lookup, for example 'totalSupply'\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\nReturns:\nbytearray: bytearray value of the storage item", "source": "juraj-google-style"}
{"code": "def _setup(self):\n    if isinstance(self.module, torch.nn.RNNBase):\n        self.module.flatten_parameters = noop\n    for name_w in self.weights:\n        w = getattr(self.module, name_w)\n        del self.module._parameters[name_w]\n        self.module.register_parameter((name_w + '_raw'), nn.Parameter(w.data))", "docstring": "for each string defined in self.weights, the corresponding\nattribute in the wrapped module is referenced, then deleted, and subsequently\nregistered as a new parameter with a slightly modified name.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def max_csi(self):\n    csi = (self.contingency_tables['TP'] / ((self.contingency_tables['TP'] + self.contingency_tables['FN']) + self.contingency_tables['FP']))\n    return csi.max()", "docstring": "Calculate the maximum Critical Success Index across all probability thresholds\n\nReturns:\nThe maximum CSI as a float", "source": "codesearchnet"}
{"code": "def create_labels(ptransform=None, namespace=None, name=None, pcollection=None):\n    labels = {}\n    if ptransform:\n        labels[PTRANSFORM_LABEL] = ptransform\n    if namespace:\n        labels[NAMESPACE_LABEL] = namespace\n    if name:\n        labels[NAME_LABEL] = name\n    if pcollection:\n        labels[PCOLLECTION_LABEL] = pcollection\n    return labels", "docstring": "Create the label dictionary based on the provided values.\n\nArgs:\nptransform: The ptransform id used as a label.\npcollection: The pcollection id used as a label.", "source": "github-repos"}
{"code": "def traverse_preorder(self, leaves=True, internal=True):\n        \n        for node in self.root.traverse_preorder(leaves=leaves, internal=internal):\n            yield node", "docstring": "Perform a preorder traversal of the ``Node`` objects in this ``Tree``\n\nArgs:\n``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``", "source": "juraj-google-style"}
{"code": "def update_config_data(msg, cfg):\n    \n    for attr in msg:\n        if attr in cfg.data[msg.profile] and attr is not \"auth\":\n            cfg.data[msg.profile][attr] = getattr(msg, attr)", "docstring": "Updates the profile's config entry with values set in each attr by the\nuser.  This will overwrite existing values.\n\nArgs:\n:msg: (Message class) an instance of a message class.\n:cfg: (jsonconfig.Config) config instance.", "source": "juraj-google-style"}
{"code": "def assert_is_fully_defined(self):\n    if not self.is_fully_defined():\n        raise ValueError('Shape %s is not fully defined' % self)", "docstring": "Raises an exception if `self` is not fully defined in every dimension.\n\nRaises:\nValueError: If `self` does not have a known value for every dimension.", "source": "github-repos"}
{"code": "def _validate_pos_args_syntax(alias_name, alias_command):\n    pos_args_from_alias = get_placeholders(alias_name)\n    pos_args_from_command = [x.split('|')[0].split('.')[0].strip() for x in get_placeholders(alias_command)]\n    if (set(pos_args_from_alias) != set(pos_args_from_command)):\n        arg_diff = (set(pos_args_from_alias) ^ set(pos_args_from_command))\n        raise CLIError(INCONSISTENT_ARG_ERROR.format(('' if (len(arg_diff) == 1) else 's'), arg_diff, ('is' if (len(arg_diff) == 1) else 'are')))", "docstring": "Check if the positional argument syntax is valid in alias name and alias command.\n\nArgs:\nalias_name: The name of the alias to validate.\nalias_command: The command to validate.", "source": "codesearchnet"}
{"code": "def add_toolkit(topology, location):\n    import streamsx.topology.topology\n    assert isinstance(topology, streamsx.topology.topology.Topology)\n    tkinfo = dict()\n    tkinfo['root'] = os.path.abspath(location)\n    topology.graph._spl_toolkits.append(tkinfo)", "docstring": "Add an SPL toolkit to a topology.\n\nArgs:\ntopology(Topology): Topology to include toolkit in.\nlocation(str): Location of the toolkit directory.", "source": "codesearchnet"}
{"code": "def compute_mask(self, inputs, mask=None):\n    if not self._supports_masking:\n        if any((m is not None for m in nest.flatten(mask))):\n            raise TypeError('Layer ' + self.name + ' does not support masking, but was passed an input_mask: ' + str(mask))\n        return None\n    return mask", "docstring": "Computes an output mask tensor.\n\nArgs:\ninputs: Tensor or list of tensors.\nmask: Tensor or list of tensors.\n\nReturns:\nNone or a tensor (or list of tensors,\none per output tensor of the layer).", "source": "github-repos"}
{"code": "def wait_for_postgres(database, host, port, username, password):\n        \n        \n        connecting_string = 'Checking for PostgreSQL...'\n        if port is not None:\n            port = int(port)\n        while True:\n            try:\n                logger.info(connecting_string)\n                connection = psycopg2.connect(\n                    database=database,\n                    host=host,\n                    port=port,\n                    user=username,\n                    password=password,\n                    connect_timeout=3\n                )\n                connection.close()\n                logger.info('PostgreSQL is running!')\n                break\n            except psycopg2.OperationalError:\n                time.sleep(1)", "docstring": "Waits for PostgreSQL database to be up\n\nArgs:\ndatabase (Optional[str]): Database name\nhost (Optional[str]): Host where database is located\nport (Union[int, str, None]): Database port\nusername (Optional[str]): Username to log into database\npassword (Optional[str]): Password to log into database\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def __init__(self, name, aliases=None, description=None, urls=None):\n    \n    super(ConstantDefinition, self).__init__(\n        name, aliases=aliases, description=description, urls=urls)\n    self.value = None", "docstring": "Initializes an enumeration data type definition.\n\nArgs:\nname (str): name.\naliases (Optional[list[str]]): aliases.\ndescription (Optional[str]): description.\nurls (Optional[list[str]]): URLs.", "source": "juraj-google-style"}
{"code": "def override_parent_subgraph(self, parent_subgraph, invisible_edges=None):\n    with transaction.atomic():\n        if (invisible_edges is None):\n            invisible_edges = set()\n        children = list(parent_subgraph.keys())\n        all_old_relations = dict(proso.list.group_by(list(ItemRelation.objects.filter(child_id__in=children)), by=(lambda relation: relation.child_id)))\n        to_delete = set()\n        for (child_id, parents) in parent_subgraph.items():\n            old_relations = {relation.parent_id: relation for relation in all_old_relations.get(child_id, [])}\n            for parent_id in parents:\n                if (parent_id not in old_relations):\n                    ItemRelation.objects.create(parent_id=parent_id, child_id=child_id, visible=((child_id, parent_id) not in invisible_edges))\n                elif (old_relations[parent_id].visible != ((child_id, parent_id) not in invisible_edges)):\n                    old_relations[parent_id].visible = ((child_id, parent_id) not in invisible_edges)\n                    old_relations[parent_id].save()\n            to_delete |= {old_relations[parent_id].pk for parent_id in (set(old_relations.keys()) - set(parents))}\n        ItemRelation.objects.filter(pk__in=to_delete).delete()", "docstring": "Get all items with outcoming edges from the given subgraph, drop all\ntheir parent relations, and then add parents according to the given\nsubgraph.\n\nArgs:\nparent_subgraph (dict): item id -> list of parents(item ids)\ninvisible_edges (list|set): set of (from, to) tuples specifying\ninvisible edges", "source": "codesearchnet"}
{"code": "def restore_site_properties(self, site_property=\"ff_map\", filename=None):\n        \n\n        \n        if not self.control_params[\"filetype\"] == \"pdb\":\n            raise ValueError()\n\n        filename = filename or self.control_params[\"output\"]\n        bma = BabelMolAdaptor.from_file(filename, \"pdb\")\n        pbm = pb.Molecule(bma._obmol)\n\n        assert len(pbm.residues) == sum([x[\"number\"]\n                                         for x in self.param_list])\n\n        packed_mol = self.convert_obatoms_to_molecule(\n            pbm.residues[0].atoms, residue_name=pbm.residues[0].name,\n            site_property=site_property)\n\n        for resid in pbm.residues[1:]:\n            mol = self.convert_obatoms_to_molecule(\n                resid.atoms, residue_name=resid.name,\n                site_property=site_property)\n            for site in mol:\n                packed_mol.append(site.species, site.coords,\n                                  properties=site.properties)\n\n        return packed_mol", "docstring": "Restore the site properties for the final packed molecule.\n\nArgs:\nsite_property (str):\nfilename (str): path to the final packed molecule.\n\nReturns:\nMolecule", "source": "juraj-google-style"}
{"code": "def getctime(self, path):\n        \n        try:\n            file_obj = self.filesystem.resolve(path)\n        except IOError:\n            self.filesystem.raise_os_error(errno.ENOENT)\n        return file_obj.st_ctime", "docstring": "Returns the creation time of the fake file.\n\nArgs:\npath: the path to fake file.\n\nReturns:\n(int, float) the creation time of the fake file in number of\nseconds since the epoch.\n\nRaises:\nOSError: if the file does not exist.", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, use_cache: Optional[bool]=None):\n    outputs = self.layernorm_before_attention(hidden_states)\n    outputs = self.self_attention(outputs, outputs, attention_mask, position_bias, output_attentions, past_key_values, use_cache)\n    outputs, attn_weights, current_key_value = outputs\n    if self.dropout is not None:\n        outputs = self.dropout(outputs)\n    hidden_states = hidden_states + outputs\n    return (hidden_states, attn_weights, current_key_value)", "docstring": "Args:\nhidden_states (`torch.Tensor` of shape `(batch, len_seq, dim_model)`):\nInput of transformer block(self-attention block). It can be the raw embedding of a batch of sequences.\nattention_mask (`torch.Tensor` of shape `(batch, len_seq, len_seq)`):\nAvoid invalid areas to participate in the calculation of self-attention.\nposition_bias (`torch.Tensor` of shape `(batch, len_seq, len_seq)`):\nProvide positional information to self-attention block.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers.\npast_key_values (`Tuple(torch.FloatTensor)`, *optional*):\nCached past key and value projection states.\nuse_cache (`bool`, *optional*):\nIf set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n(see `past_key_values`).", "source": "github-repos"}
{"code": "def create_route53_zone(client, zone_name):\n    \n    if not zone_name.endswith(\".\"):\n        zone_name += \".\"\n    zone_id = get_or_create_hosted_zone(client, zone_name)\n    old_soa = get_soa_record(client, zone_id, zone_name)\n\n    \n    if old_soa.text.min_ttl == \"300\":\n        return zone_id\n\n    new_soa = copy.deepcopy(old_soa)\n    logger.debug(\"Updating negative caching value on zone %s to 300.\",\n                 zone_name)\n    new_soa.text.min_ttl = \"300\"\n    client.change_resource_record_sets(\n        HostedZoneId=zone_id,\n        ChangeBatch={\n            \"Comment\": \"Update SOA min_ttl to 300.\",\n            \"Changes\": [\n                {\n                    \"Action\": \"UPSERT\",\n                    \"ResourceRecordSet\": {\n                        \"Name\": zone_name,\n                        \"Type\": \"SOA\",\n                        \"TTL\": old_soa.ttl,\n                        \"ResourceRecords\": [\n                            {\n                                \"Value\": str(new_soa.text)\n                            }\n                        ]\n                    }\n                },\n            ]\n        }\n    )\n    return zone_id", "docstring": "Creates the given zone_name if it doesn't already exists.\n\nAlso sets the SOA negative caching TTL to something short (300 seconds).\n\nArgs:\nclient (:class:`botocore.client.Route53`): The connection used to\ninteract with Route53's API.\nzone_name (string): The name of the DNS hosted zone to create.\n\nReturns:\nstring: The zone id returned from AWS for the existing, or newly\ncreated zone.", "source": "juraj-google-style"}
{"code": "def test_sample_paths_1d(self, use_batch, watch_params, supply_normal_draws, random_type):\n    dtype = tf.float64\n    mu = 0.2\n    a = 0.4\n    b = 0.33\n\n    def drift_fn(t, x):\n        drift = mu * tf.sqrt(t) * tf.ones_like(x, dtype=t.dtype)\n        return drift\n\n    def vol_fn(t, x):\n        del x\n        if not use_batch:\n            return (a * t + b) * tf.ones([1, 1], dtype=t.dtype)\n        else:\n            return (a * t + b) * tf.ones([2, 1, 1, 1], dtype=t.dtype)\n    times = np.array([0.0, 0.1, 0.21, 0.32, 0.43, 0.55])\n    num_samples = 10000\n    if supply_normal_draws:\n        normal_draws = tf.random.stateless_normal(shape=[2, 5000, 55, 1], seed=[1, 42], dtype=dtype)\n        normal_draws = tf.concat([normal_draws, -normal_draws], axis=1)\n    else:\n        normal_draws = None\n    if use_batch:\n        x0 = np.array([[[0.1]], [[0.1]]])\n    else:\n        x0 = np.array([0.1])\n    paths = self.evaluate(euler_sampling.sample(dim=1, drift_fn=drift_fn, volatility_fn=vol_fn, times=times, num_samples=num_samples, initial_state=x0, random_type=random_type, normal_draws=normal_draws, watch_params=watch_params, time_step=0.01, seed=[1, 42], dtype=dtype))\n    paths_no_zero = self.evaluate(euler_sampling.sample(dim=1, drift_fn=drift_fn, volatility_fn=vol_fn, times=times[1:], num_samples=num_samples, initial_state=x0, random_type=random_type, normal_draws=normal_draws, time_step=0.01, seed=[1, 42], dtype=dtype))\n    with self.subTest('CorrectShape'):\n        if not use_batch:\n            self.assertAllClose(paths.shape, (num_samples, 6, 1), atol=0)\n        else:\n            self.assertAllClose(paths.shape, (2, num_samples, 6, 1), atol=0)\n    if not use_batch:\n        means = np.mean(paths, axis=0).reshape(-1)\n    else:\n        means = np.mean(paths, axis=1).reshape([2, 1, 6])\n    expected_means = x0 + 2.0 / 3.0 * mu * np.power(times, 1.5)\n    with self.subTest('ExpectedResult'):\n        self.assertAllClose(means, expected_means, rtol=0.01, atol=0.01)\n    if not use_batch:\n        with self.subTest('IncludeInitialState'):\n            self.assertAllClose(paths[:, 1:, :], paths_no_zero)", "docstring": "Tests path properties for 1-dimentional Ito process.\n\nWe construct the following Ito process.\n\n````\ndX = mu * sqrt(t) * dt + (a * t + b) dW\n````\n\nFor this process expected value at time t is x_0 + 2/3 * mu * t^1.5 .\nArgs:\nuse_batch: Test parameter to specify if we are testing the batch of Euler\nsampling.\nwatch_params: Triggers custom for loop.\nsupply_normal_draws: Supply normal draws.\nrandom_type: `RandomType` of the sampled normal draws.", "source": "github-repos"}
{"code": "def sync(self, since=None, timeout_ms=30000, filter=None, full_state=None, set_presence=None):\n    request = {'timeout': int(timeout_ms)}\n    if since:\n        request['since'] = since\n    if filter:\n        request['filter'] = filter\n    if full_state:\n        request['full_state'] = json.dumps(full_state)\n    if set_presence:\n        request['set_presence'] = set_presence\n    return self._send('GET', '/sync', query_params=request, api_path=MATRIX_V2_API_PATH)", "docstring": "Perform a sync request.\n\nArgs:\nsince (str): Optional. A token which specifies where to continue a sync from.\ntimeout_ms (int): Optional. The time in milliseconds to wait.\nfilter (int|str): Either a Filter ID or a JSON string.\nfull_state (bool): Return the full state for every room the user has joined\nDefaults to false.\nset_presence (str): Should the client be marked as \"online\" or\" offline\"", "source": "codesearchnet"}
{"code": "def has_abiext(self, ext, single_file=True):\n    if (ext != 'abo'):\n        ext = (ext if ext.startswith('_') else ('_' + ext))\n    files = []\n    for f in self.list_filepaths():\n        if ((ext == '_DDB') and f.endswith('.nc')):\n            continue\n        if ((ext == '_MDF') and (not f.endswith('.nc'))):\n            continue\n        if ((ext == '_DDK') and f.endswith('.nc')):\n            continue\n        if (f.endswith(ext) or f.endswith((ext + '.nc'))):\n            files.append(f)\n    if (not files):\n        files = [f for f in self.list_filepaths() if fnmatch(f, ('*%s*' % ext))]\n    if (not files):\n        return ''\n    if ((len(files) > 1) and single_file):\n        raise ValueError((('Found multiple files with the same extensions:\\n %s\\n' % files) + 'Please avoid using multiple datasets!'))\n    return (files[0] if single_file else files)", "docstring": "Returns the absolute path of the ABINIT file with extension ext.\nSupport both Fortran files and netcdf files. In the later case,\nwe check whether a file with extension ext + \".nc\" is present\nin the directory. Returns empty string is file is not present.\n\nRaises:\n`ValueError` if multiple files with the given ext are found.\nThis implies that this method is not compatible with multiple datasets.", "source": "codesearchnet"}
{"code": "def truediv(self, other, axis=\"columns\", level=None, fill_value=None):\n        \n        return self._binary_op(\n            \"truediv\", other, axis=axis, level=level, fill_value=fill_value\n        )", "docstring": "Divides this DataFrame against another DataFrame/Series/scalar.\n\nArgs:\nother: The object to use to apply the divide against this.\naxis: The axis to divide over.\nlevel: The Multilevel index level to apply divide over.\nfill_value: The value to fill NaNs with.\n\nReturns:\nA new DataFrame with the Divide applied.", "source": "juraj-google-style"}
{"code": "def deepcopy(original_obj):\n    if isinstance(original_obj, list):\n        return list((deepcopy(item) for item in original_obj))\n    elif isinstance(original_obj, dict):\n        return dict(((key, deepcopy(val)) for (key, val) in original_obj.items()))\n    else:\n        return original_obj", "docstring": "Creates a deep copy of an object with no crossed referenced lists or dicts,\nuseful when loading from yaml as anchors generate those cross-referenced\ndicts and lists\n\nArgs:\noriginal_obj(object): Object to deep copy\n\nReturn:\nobject: deep copy of the object", "source": "codesearchnet"}
{"code": "def value_to_pytd_def(self, node, v, name):\n    if isinstance(v, abstract.Module):\n        return pytd.Alias(name, pytd.Module(name, module_name=v.full_name))\n    elif isinstance(v, abstract.BoundFunction):\n        d = self.value_to_pytd_def(node, v.underlying, name)\n        assert isinstance(d, pytd.Function)\n        sigs = tuple((sig.Replace(params=sig.params[1:]) for sig in d.signatures))\n        return d.Replace(signatures=sigs)\n    elif isinstance(v, attr_overlay.AttrsBase):\n        ret = pytd.NamedType('typing.Callable')\n        md = metadata.to_pytd(v.to_metadata())\n        return pytd.Annotated(ret, (\"'pytype_metadata'\", md))\n    elif isinstance(v, abstract.PyTDFunction) and (not isinstance(v, typing_overlay.TypeVar)):\n        return pytd.Function(name=name, signatures=tuple((sig.pytd_sig for sig in v.signatures)), kind=v.kind, flags=pytd.MethodFlag.abstract_flag(v.is_abstract))\n    elif isinstance(v, abstract.InterpreterFunction):\n        return self._function_to_def(node, v, name)\n    elif isinstance(v, abstract.SimpleFunction):\n        return self._simple_func_to_def(node, v, name)\n    elif isinstance(v, (abstract.ParameterizedClass, abstract.Union)):\n        return pytd.Alias(name, v.to_pytd_type_of_instance(node))\n    elif isinstance(v, abstract.PyTDClass) and v.module:\n        return v.to_pytd_type(node)\n    elif isinstance(v, typed_dict.TypedDictClass):\n        return self._typed_dict_to_def(node, v, name)\n    elif isinstance(v, abstract.PyTDClass):\n        assert name != v.name\n        return pytd.Alias(name, pytd.NamedType(v.name))\n    elif isinstance(v, abstract.InterpreterClass):\n        if (v.official_name is None or name == v.official_name or v.official_name.endswith(f'.{name}')) and (not v.module):\n            return self._class_to_def(node, v, name)\n        else:\n            type_name = v.full_name if v.module else v.official_name\n            return pytd.Constant(name, pytd.GenericType(pytd.NamedType('builtins.type'), (pytd.NamedType(type_name),)))\n    elif isinstance(v, abstract.TYPE_VARIABLE_TYPES):\n        return self._type_variable_to_def(node, v, name)\n    elif isinstance(v, abstract.Unsolvable):\n        return pytd.Constant(name, v.to_pytd_type(node))\n    else:\n        raise NotImplementedError(v.__class__.__name__)", "docstring": "Get a PyTD definition for this object.\n\nArgs:\nnode: The node.\nv: The object.\nname: The object name.\n\nReturns:\nA PyTD definition.", "source": "github-repos"}
{"code": "def final_bearing(self, format='numeric'):\n    bearings = []\n    for segment in self:\n        if (len(segment) < 2):\n            bearings.append([])\n        else:\n            bearings.append(segment.final_bearing(format))\n    return bearings", "docstring": "Calculate final bearing between locations in segments.\n\nArgs:\nformat (str): Format of the bearing string to return\n\nReturns:\nlist of list of float: Groups of bearings between points in\nsegments", "source": "codesearchnet"}
{"code": "def difference(self, *others):\n    result = self.__copy__()\n    _elements = result._elements\n    _total = result._total\n    for other in map(self._as_multiset, others):\n        for (element, multiplicity) in other.items():\n            if (element in _elements):\n                old_multiplicity = _elements[element]\n                new_multiplicity = (old_multiplicity - multiplicity)\n                if (new_multiplicity > 0):\n                    _elements[element] = new_multiplicity\n                    _total -= multiplicity\n                else:\n                    del _elements[element]\n                    _total -= old_multiplicity\n    result._total = _total\n    return result", "docstring": "r\"\"\"Return a new multiset with all elements from the others removed.\n\n>>> ms = Multiset('aab')\n>>> sorted(ms.difference('bc'))\n['a', 'a']\n\nYou can also use the ``-`` operator for the same effect. However, the operator version\nwill only accept a set as other operator, not any iterable, to avoid errors.\n\n>>> ms = Multiset('aabbbc')\n>>> sorted(ms - Multiset('abd'))\n['a', 'b', 'b', 'c']\n\nFor a variant of the operation which modifies the multiset in place see\n:meth:`difference_update`.\n\nArgs:\nothers: The other sets to remove from the multiset. Can also be any :class:`~typing.Iterable`\\[~T]\nor :class:`~typing.Mapping`\\[~T, :class:`int`] which are then converted to :class:`Multiset`\\[~T].\n\nReturns:\nThe resulting difference multiset.", "source": "codesearchnet"}
{"code": "def power(self, n):\n        \n        if n > 0:\n            return super().power(n)\n        return PTM(SuperOp(self).power(n))", "docstring": "The matrix power of the channel.\n\nArgs:\nn (int): compute the matrix power of the superoperator matrix.\n\nReturns:\nPTM: the matrix power of the SuperOp converted to a PTM channel.\n\nRaises:\nQiskitError: if the input and output dimensions of the\nQuantumChannel are not equal, or the power is not an integer.", "source": "juraj-google-style"}
{"code": "def ddel_tasks(provider, user_ids=None, job_ids=None, task_ids=None, labels=None, create_time_min=None, create_time_max=None):\n    (deleted_tasks, error_messages) = provider.delete_jobs(user_ids, job_ids, task_ids, labels, create_time_min, create_time_max)\n    for msg in error_messages:\n        print(msg)\n    return deleted_tasks", "docstring": "Kill jobs or job tasks.\n\nThis function separates ddel logic from flag parsing and user output. Users\nof ddel who intend to access the data programmatically should use this.\n\nArgs:\nprovider: an instantiated dsub provider.\nuser_ids: a set of user ids who \"own\" the job(s) to delete.\njob_ids: a set of job ids to delete.\ntask_ids: a set of task ids to delete.\nlabels: a set of LabelParam, each must match the job(s) to be cancelled.\ncreate_time_min: a timezone-aware datetime value for the earliest create\ntime of a task, inclusive.\ncreate_time_max: a timezone-aware datetime value for the most recent create\ntime of a task, inclusive.\n\nReturns:\nlist of job ids which were deleted.", "source": "codesearchnet"}
{"code": "def call(self, image_tokens: tf.Tensor, group_tokens: tf.Tensor, training: bool=False):\n    group_tokens = self.norm_tokens(group_tokens)\n    image_tokens = self.norm_x(image_tokens)\n    projected_group_tokens = self.project_group_token(group_tokens)\n    projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens)\n    new_image_tokens, attention = self.assign(projected_group_tokens, image_tokens)\n    new_image_tokens += projected_group_tokens\n    new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens))\n    return (new_image_tokens, attention)", "docstring": "Args:\nimage_tokens (`tf.Tensor`): image tokens, of shape [batch_size, input_length, channels]\ngroup_tokens (`tf.Tensor`): group tokens, [batch_size, num_group_tokens, channels]", "source": "github-repos"}
{"code": "def after_create_session(self, session, coord):\n    pass", "docstring": "Called when new TensorFlow session is created.\n\nThis is called to signal the hooks that a new session has been created. This\nhas two essential differences with the situation in which `begin` is called:\n\n* When this is called, the graph is finalized and ops can no longer be added\nto the graph.\n* This method will also be called as a result of recovering a wrapped\nsession, not only at the beginning of the overall session.\n\nArgs:\nsession: A TensorFlow Session that has been created.\ncoord: A Coordinator object which keeps track of all threads.", "source": "github-repos"}
{"code": "def _md5_file(fn, block_size=1048576):\n    \n    h = hashlib.md5()\n    with open(fn) as fp:\n        d = 1\n        while d:\n            d = fp.read(block_size)\n            h.update(d)\n    return h.hexdigest()", "docstring": "Builds the MD5 of a file block by block\n\nArgs:\nfn: File path\nblock_size: Size of the blocks to consider (default 1048576)\n\nReturns:\nFile MD5", "source": "juraj-google-style"}
{"code": "def tag(name, message, author=None):\n    cmd = 'git -c \"user.name={author.name}\" -c \"user.email={author.email}\" tag -a \"{name}\" -m \"{message}\"'.format(author=(author or latest_commit().author), name=name, message=message.replace('\"', '\\\\\"').replace('`', '\\\\`'))\n    shell.run(cmd)", "docstring": "Tag the current commit.\n\nArgs:\nname (str):\nThe tag name.\nmessage (str):\nThe tag message. Same as ``-m`` parameter in ``git tag``.\nauthor (Author):\nThe commit author. Will default to the author of the commit.\npretend (bool):\nIf set to **True** it will print the full ``git tag`` command\ninstead of actually executing it.", "source": "codesearchnet"}
{"code": "def _get_validator(name, schema=None, check_schema=True, validator_class=None, **validator_kwargs):\n    if (schema is None):\n        try:\n            schema = _SCHEMAS[name]\n        except KeyError:\n            raise SchemaValidationError('Valid schema name or schema must be provided.')\n    if (name not in _VALIDATORS):\n        if (validator_class is None):\n            validator_class = jsonschema.validators.validator_for(schema)\n        _VALIDATORS[name] = validator_class(schema, **validator_kwargs)\n    validator = _VALIDATORS[name]\n    if check_schema:\n        validator.check_schema(schema)\n    return validator", "docstring": "Generate validator for JSON schema.\n\nArgs:\nname (str): Name for validator. Will be validator key in\n`_VALIDATORS` dict.\nschema (dict): JSON schema `dict`. If not provided searches for schema\nin `_SCHEMAS`.\ncheck_schema (bool): Verify schema is valid.\nvalidator_class (jsonschema.IValidator): jsonschema IValidator instance.\nDefault behavior is to determine this from the schema `$schema`\nfield.\n**validator_kwargs (dict): Additional keyword arguments for validator.\n\nReturn:\njsonschema.IValidator: Validator for JSON schema.\n\nRaises:\nSchemaValidationError: Raised if validation fails.", "source": "codesearchnet"}
{"code": "def deprecate_entity(self, ilx_id: str, note=None) -> None:\n    (term_id, term_version) = [(d['id'], d['version']) for d in self.ilxSearches([ilx_id], crawl=True, _print=False).values()][0]\n    annotations = [{'tid': term_id, 'annotation_tid': '306375', 'value': 'True', 'term_version': term_version, 'annotation_term_version': '1'}]\n    if note:\n        editor_note = {'tid': term_id, 'annotation_tid': '306378', 'value': note, 'term_version': term_version, 'annotation_term_version': '1'}\n        annotations.append(editor_note)\n    self.addAnnotations(annotations, crawl=True, _print=False)\n    print(annotations)", "docstring": "Tagged term in interlex to warn this term is no longer used\n\nThere isn't an proper way to delete a term and so we have to mark it so I can\nextrapolate that in mysql/ttl loads.\n\nArgs:\nterm_id: id of the term of which to be deprecated\nterm_version: version of the term of which to be deprecated\n\nExample: deprecateTerm('ilx_0101431', '6')", "source": "codesearchnet"}
{"code": "def _create_session(self, username, password):\n    session = requests.Session()\n    session.verify = False\n    try:\n        response = session.get(self.host_url)\n    except requests.exceptions.ConnectionError:\n        return False\n    soup = BeautifulSoup(response.text, 'html.parser')\n    csrf_token = soup.find('input', dict(name='csrf_token'))['value']\n    login_data = dict(username=username, password=password)\n    session.headers.update({'x-csrftoken': csrf_token, 'referer': self.host_url})\n    _ = session.post('{0:s}/login/'.format(self.host_url), data=login_data)\n    return session", "docstring": "Create HTTP session.\n\nArgs:\nusername (str): Timesketch username\npassword (str): Timesketch password\n\nReturns:\nrequests.Session: Session object.", "source": "codesearchnet"}
{"code": "def _check_condition(self, name, condition):\n        \n        \n        if condition is not None and condition[0].name not in self.cregs:\n            raise DAGCircuitError(\"invalid creg in condition for %s\" % name)", "docstring": "Verify that the condition is valid.\n\nArgs:\nname (string): used for error reporting\ncondition (tuple or None): a condition tuple (ClassicalRegister,int)\n\nRaises:\nDAGCircuitError: if conditioning on an invalid register", "source": "juraj-google-style"}
{"code": "def _build_mac_signature_key_information(self, value):\n        \n        if value is None:\n            return None\n        if not isinstance(value, dict):\n            raise TypeError(\n                \"MAC/signature key information must be a dictionary.\"\n            )\n\n        cryptographic_parameters = value.get('cryptographic_parameters')\n        if cryptographic_parameters:\n            cryptographic_parameters = self._build_cryptographic_parameters(\n                cryptographic_parameters\n            )\n        mac_signature_key_information = cobjects.MACSignatureKeyInformation(\n            unique_identifier=value.get('unique_identifier'),\n            cryptographic_parameters=cryptographic_parameters\n        )\n        return mac_signature_key_information", "docstring": "Build an MACSignatureKeyInformation struct from a dictionary.\n\nArgs:\nvalue (dict): A dictionary containing the key/value pairs for a\nMACSignatureKeyInformation struct.\n\nReturns:\nMACSignatureInformation: a MACSignatureKeyInformation struct\n\nRaises:\nTypeError: if the input argument is invalid", "source": "juraj-google-style"}
{"code": "def new_type(self, name: str | pytd_node.Node, parameters: list[pytd.Type] | None=None) -> pytd.Type:\n    base_type = self.resolve_type(name)\n    if not isinstance(base_type, pytd.NamedType):\n        type_params = self.type_params + [pytd.TypeParameter('typing.AnyStr')]\n        base_type = base_type.Visit(_InsertTypeParameters(type_params))\n        try:\n            resolved_type = visitors.MaybeSubstituteParameters(base_type, parameters)\n        except ValueError as e:\n            raise _ParseError(str(e)) from e\n        if resolved_type:\n            return resolved_type\n    if parameters is not None:\n        if len(parameters) > 1 and isinstance(base_type, pytd.NamedType) and (base_type.name == 'typing.Optional'):\n            raise _ParseError(f'Too many options to {base_type.name}')\n        return self._parameterized_type(base_type, parameters)\n    else:\n        if isinstance(base_type, pytd.NamedType) and base_type.name in _TYPING_SETS:\n            raise _ParseError(f'Missing options to {base_type.name}')\n        return base_type", "docstring": "Return the AST for a type.\n\nArgs:\nname: The name of the type.\nparameters: Sequence of type parameters.\n\nReturns:\nA pytd type node.\n\nRaises:\nParseError: if the wrong number of parameters is supplied for the\nbase_type - e.g., 2 parameters to Optional or no parameters to Union.", "source": "github-repos"}
{"code": "def _add_thousand_g(self, variant_obj, info_dict):\n        \n        thousand_g = info_dict.get('1000GAF')\n        if thousand_g:\n            logger.debug(\"Updating thousand_g to: {0}\".format(\n                thousand_g))\n            variant_obj.thousand_g = float(thousand_g)\n            variant_obj.add_frequency('1000GAF', variant_obj.get('thousand_g'))", "docstring": "Add the thousand genomes frequency\n\nArgs:\nvariant_obj (puzzle.models.Variant)\ninfo_dict (dict): A info dictionary", "source": "juraj-google-style"}
{"code": "def find(pattern, path=os.path.curdir, recursive=False):\n    \n    root = realpath(path)\n\n    Finder = lambda item: regex.is_regex(pattern) \\\n                    and pattern.match(item) or (pattern == item)\n\n    if recursive:\n        for base, dirs, files in os.walk(root, topdown=True):\n            for segment in itertools.chain(filter(Finder, files), filter(Finder, dirs)):\n                yield FS(os.path.join(base, segment))\n\n    else:\n        for segment in filter(Finder, os.listdir(root)):\n            yield(os.path.join(root, segment))", "docstring": "Find absolute file/folder paths with the given ``re`` pattern.\n\nArgs:\n* pattern: search pattern, support both string (exact match) and `re` pattern.\n* path: root path to start searching, default is current working directory.\n* recursive: whether to recursively find the matched items from `path`, False by default\n\nReturns:\nGenerator of the matched items of Files/Folders.", "source": "juraj-google-style"}
{"code": "def write_data(num_lines, no_data=False, directory=None, prefix=tempfile.template, eol=EOL.LF):\n    all_data = []\n    with tempfile.NamedTemporaryFile(delete=False, dir=directory, prefix=prefix) as f:\n        sep_values = [b'\\n', b'\\r\\n']\n        for i in range(num_lines):\n            data = b'' if no_data else b'line' + str(i).encode()\n            all_data.append(data)\n            if eol == EOL.LF:\n                sep = sep_values[0]\n            elif eol == EOL.CRLF:\n                sep = sep_values[1]\n            elif eol == EOL.MIXED:\n                sep = sep_values[i % len(sep_values)]\n            elif eol == EOL.LF_WITH_NOTHING_AT_LAST_LINE:\n                sep = b'' if i == num_lines - 1 else sep_values[0]\n            else:\n                raise ValueError('Received unknown value %s for eol.' % eol)\n            f.write(data + sep)\n        return (f.name, all_data)", "docstring": "Writes test data to a temporary file.\n\nArgs:\nnum_lines (int): The number of lines to write.\nno_data (bool): If :data:`True`, empty lines will be written, otherwise\neach line will contain a concatenation of b'line' and the line number.\ndirectory (str): The name of the directory to create the temporary file in.\nprefix (str): The prefix to use for the temporary file.\neol (int): The line ending to use when writing.\n:class:`~apache_beam.io.filebasedsource_test.EOL` exposes attributes that\ncan be used here to define the eol.\n\nReturns:\nTuple[str, List[bytes]]: A tuple of the filename and a list of the written\ndata.", "source": "github-repos"}
{"code": "def repr_names(self, callself_repr: 'Callable[[cfg.Variable], str] | None'=None) -> Sequence[str]:\n    callself_repr = callself_repr or (lambda v: v.name)\n    if self._callself and self._callself.bindings:\n        callself_names = [callself_repr(v) for v in self._callself.data]\n    else:\n        callself_names = ['<class>']\n    underlying = self.underlying.name\n    if underlying.count('.') > 0:\n        underlying = underlying.split('.', 1)[-1]\n    return [callself + '.' + underlying for callself in callself_names]", "docstring": "Names to use in the bound function's string representation.\n\nThis function can return multiple names because there may be multiple\nbindings in callself.\n\nArgs:\ncallself_repr: Optionally, a repr function for callself.\n\nReturns:\nA non-empty iterable of string names.", "source": "github-repos"}
{"code": "def GetFilename(self):\n    if (not self._file_entry):\n        return None\n    data_stream = getattr(self._file_entry.path_spec, 'data_stream', None)\n    if data_stream:\n        return '{0:s}:{1:s}'.format(self._file_entry.name, data_stream)\n    return self._file_entry.name", "docstring": "Retrieves the name of the active file entry.\n\nReturns:\nstr: name of the active file entry or None.", "source": "codesearchnet"}
{"code": "def get_shape(self) -> tensor_shape.TensorShape:\n    return self._dense_shape_default", "docstring": "Get the `TensorShape` representing the shape of the dense tensor.\n\nReturns:\nA `TensorShape` object.", "source": "github-repos"}
{"code": "def labelset_heads(self, label):\n        \n        _eps = self._eps\n        _vars = self._vars\n        _hcons = self._hcons\n        nodeids = {nodeid: _eps[nodeid][3].get(IVARG_ROLE, None)\n                for nodeid in _vars[label]['refs']['LBL']}\n        if len(nodeids) <= 1:\n            return list(nodeids)\n\n        scope_sets = {}\n        for nid in nodeids:\n            scope_sets[nid] = _ivs_in_scope(nid, _eps, _vars, _hcons)\n\n        out = {}\n        for n in nodeids:\n            out[n] = 0\n            for role, val in _eps[n][3].items():\n                if role == IVARG_ROLE or role == CONSTARG_ROLE:\n                    continue\n                elif any(val in s for n2, s in scope_sets.items() if n2 != n):\n                    out[n] += 1\n\n        candidates = [n for n, out_deg in out.items() if out_deg == 0]\n        rank = {}\n        for n in candidates:\n            iv = nodeids[n]\n            pred = _eps[n][1]\n            if iv in _vars and self.nodeid(iv, quantifier=True) is not None:\n                rank[n] = 0\n            elif pred.is_quantifier():\n                rank[n] = 0\n            elif pred.type == Pred.ABSTRACT:\n                rank[n] = 2\n            else:\n                rank[n] = 1\n\n        return sorted(candidates, key=lambda n: rank[n])", "docstring": "Return the heads of the labelset selected by *label*.\n\nArgs:\nlabel: the label from which to find head nodes/EPs.\nReturns:\nAn iterable of nodeids.", "source": "juraj-google-style"}
{"code": "class ReadAllFromBigQuery(PTransform):\n    COUNTER = 0\n\n    def __init__(self, gcs_location: Union[str, ValueProvider]=None, validate: bool=False, kms_key: str=None, temp_dataset: Union[str, DatasetReference]=None, bigquery_job_labels: Dict[str, str]=None, query_priority: str=BigQueryQueryPriority.BATCH):\n        if gcs_location:\n            if not isinstance(gcs_location, (str, ValueProvider)):\n                raise TypeError('%s: gcs_location must be of type string or ValueProvider; got %r instead' % (self.__class__.__name__, type(gcs_location)))\n        self.gcs_location = gcs_location\n        self.validate = validate\n        self.kms_key = kms_key\n        self.bigquery_job_labels = bigquery_job_labels\n        self.temp_dataset = temp_dataset\n        self.query_priority = query_priority\n\n    def expand(self, pcoll):\n        job_name = pcoll.pipeline.options.view_as(GoogleCloudOptions).job_name\n        project = pcoll.pipeline.options.view_as(GoogleCloudOptions).project\n        unique_id = str(uuid.uuid4())[0:10]\n        try:\n            step_name = self.label\n        except AttributeError:\n            step_name = 'ReadAllFromBigQuery_%d' % ReadAllFromBigQuery.COUNTER\n            ReadAllFromBigQuery.COUNTER += 1\n        sources_to_read, cleanup_locations = pcoll | beam.ParDo(_BigQueryReadSplit(options=pcoll.pipeline.options, gcs_location=self.gcs_location, validate=self.validate, bigquery_job_labels=self.bigquery_job_labels, job_name=job_name, step_name=step_name, unique_id=unique_id, kms_key=self.kms_key, project=project, temp_dataset=self.temp_dataset, query_priority=self.query_priority)).with_outputs('location_to_cleanup', main='files_to_read')\n        return sources_to_read | SDFBoundedSourceReader(data_to_display=self.display_data()) | _PassThroughThenCleanup(beam.pvalue.AsIter(cleanup_locations))", "docstring": "Read data from BigQuery.\n\nPTransform:ReadFromBigQueryRequest->Rows\n\nThis PTransform uses a BigQuery export job to take a snapshot of the table\non GCS, and then reads from each produced file. Data is exported into\na new subdirectory for each export using UUIDs generated in\n`ReadFromBigQueryRequest` objects.\n\nIt is recommended not to use this PTransform for streaming jobs on\nGlobalWindow, since it will not be able to cleanup snapshots.\n\nArgs:\ngcs_location (str): The name of the Google Cloud Storage\nbucket where the extracted table should be written as a string. If\n:data:`None`, then the temp_location parameter is used.\nvalidate (bool): If :data:`True`, various checks will be done when source\ngets initialized (e.g., is table present?). Set this to :data:`False`\nif the BigQuery export method is slow due to checking file existence.\nkms_key (str): Experimental. Optional Cloud KMS key name for use when\ncreating new temporary tables.", "source": "github-repos"}
{"code": "def has_checked_field(self, locator, **kwargs):\n    kwargs['checked'] = True\n    return self.has_selector('field', locator, **kwargs)", "docstring": "Checks if the page or current node has a radio button or checkbox with the given label,\nvalue, or id, that is currently checked.\n\nArgs:\nlocator (str): The label, name, or id of a checked field.\n**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.\n\nReturns:\nbool: Whether it exists.", "source": "codesearchnet"}
{"code": "def pick(self, connections):\n    if (len(connections) == 1):\n        return connections[0]\n\n    def key(conn):\n        return (datetime.min if (conn.backoff_time is None) else conn.backoff_time)\n    return min(*connections, key=key)", "docstring": "Picks a connection with the earliest backoff time.\n\nAs a result, the first connection is picked\nfor as long as it has no backoff time.\nOtherwise, the connections are tried in a round robin fashion.\n\nArgs:\nconnections (:obj:list): List of\n:class:`~bigchaindb_driver.connection.Connection` instances.", "source": "codesearchnet"}
{"code": "def _prepare_np_fun_name_and_fun(np_fun_name, np_fun):\n    if np_fun_name is not None:\n        assert isinstance(np_fun_name, str)\n    if np_fun is not None:\n        assert not isinstance(np_fun, str)\n    if np_fun is None:\n        assert np_fun_name is not None\n        try:\n            np_fun = getattr(np, str(np_fun_name))\n        except AttributeError:\n            np_fun = None\n    if np_fun_name is None:\n        assert np_fun is not None\n        np_fun_name = np_fun.__name__\n    return (np_fun_name, np_fun)", "docstring": "Mutually propagates information between `np_fun_name` and `np_fun`.\n\nIf one is None and the other is not, we'll try to make the former not None in\na best effort.\n\nArgs:\nnp_fun_name: name for the np_fun symbol. At least one of np_fun or\nnp_fun_name shoud be set.\nnp_fun: the numpy function whose docstring will be used.\n\nReturns:\nProcessed `np_fun_name` and `np_fun`.", "source": "github-repos"}
{"code": "def get_variables(self, include_nontrainable=False):\n    if include_nontrainable:\n        return [self.all_variables[key] for key in sorted(self.all_variables)]\n    else:\n        return [self.variables[key] for key in sorted(self.variables)]", "docstring": "Returns the TensorFlow variables used by the baseline.\n\nReturns:\nList of variables", "source": "codesearchnet"}
{"code": "def _add_partition(self, connection, partition):\n    logger.debug('Creating foreign table for partition.\\n    partition: {}'.format(partition.name))\n    with connection.cursor() as cursor:\n        postgres_med.add_partition(cursor, partition.datafile, partition.vid)", "docstring": "Creates FDW for the partition.\n\nArgs:\nconnection:\npartition (orm.Partition):", "source": "codesearchnet"}
{"code": "def _Parse(self, template):\n    \n\n    if not template:\n      raise TextFSMTemplateError('Null template.')\n\n    \n    self._ParseFSMVariables(template)\n\n    \n    while self._ParseFSMState(template):\n      pass\n\n    \n    self._ValidateFSM()", "docstring": "Parses template file for FSM structure.\n\nArgs:\ntemplate: Valid template file.\n\nRaises:\nTextFSMTemplateError: If template file syntax is invalid.", "source": "juraj-google-style"}
{"code": "def train(cluster_info, cluster_meta, feed_timeout=600, qname='input'):\n\n    def _train(iter):\n        mgr = _get_manager(cluster_info, util.get_ip_address(), util.read_executor_id())\n        try:\n            queue = mgr.get_queue(qname)\n            equeue = mgr.get_queue('error')\n        except (AttributeError, KeyError):\n            msg = \"Queue '{}' not found on this node, check for exceptions on other nodes.\".format(qname)\n            raise Exception(msg)\n        state = str(mgr.get('state'))\n        logging.info('mgr.state={0}'.format(state))\n        terminating = (state == \"'terminating'\")\n        if terminating:\n            logging.info('mgr is terminating, skipping partition')\n            count = sum((1 for item in iter))\n            logging.info('Skipped {0} items from partition'.format(count))\n        else:\n            logging.info('Feeding partition {0} into {1} queue {2}'.format(iter, qname, queue))\n            count = 0\n            for item in iter:\n                count += 1\n                queue.put(item, block=True)\n            joinThr = Thread(target=queue.join)\n            joinThr.start()\n            timeout = feed_timeout\n            while joinThr.isAlive():\n                if (not equeue.empty()):\n                    e_str = equeue.get()\n                    equeue.task_done()\n                    raise Exception(('exception in worker:\\n' + e_str))\n                time.sleep(1)\n                timeout -= 1\n                if (timeout <= 0):\n                    raise Exception('Timeout while feeding partition')\n            logging.info('Processed {0} items in partition'.format(count))\n        if (not terminating):\n            state = str(mgr.get('state'))\n            terminating = (state == \"'terminating'\")\n            if terminating:\n                try:\n                    logging.info('TFSparkNode: requesting stop')\n                    client = reservation.Client(cluster_meta['server_addr'])\n                    client.request_stop()\n                    client.close()\n                except Exception as e:\n                    logging.debug('Error while requesting stop: {0}'.format(e))\n        return [terminating]\n    return _train", "docstring": "Feeds Spark partitions into the shared multiprocessing.Queue.\n\nArgs:\n:cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc)\n:cluster_meta: dictionary of cluster metadata (e.g. cluster_id, reservation.Server address, etc)\n:feed_timeout: number of seconds after which data feeding times out (600 sec default)\n:qname: *INTERNAL_USE*\n\nReturns:\nA dataRDD.mapPartitions() function", "source": "codesearchnet"}
{"code": "def check_graph_consistency(tensor=None, method='add_loss', force_raise=False):\n    if force_raise or (ops.executing_eagerly_outside_functions() and hasattr(tensor, 'graph') and tensor.graph.is_control_flow_graph):\n        if method == 'activity_regularizer':\n            bad_example = \"\\n      class TestModel(tf.keras.Model):\\n\\n        def __init__(self):\\n          super(TestModel, self).__init__(name='test_model')\\n          self.dense = tf.keras.layers.Dense(2, activity_regularizer='l2')\\n\\n        def call(self, x, training=None):\\n          if training:\\n            return self.dense(x)\\n          else:\\n            return self.dense(x)\\n      \"\n            correct_example = \"\\n      class TestModel(tf.keras.Model):\\n\\n        def __init__(self):\\n          super(TestModel, self).__init__(name='test_model')\\n          self.dense = tf.keras.layers.Dense(2, activity_regularizer='l2')\\n\\n        def call(self, x, training=None):\\n          return self.dense(x)\\n      \"\n            raise RuntimeError('You are using a layer with `activity_regularizer` in a control flow branch, e.g.:\\n{bad_example}\\nThis is currently not supported. Please move your call to the layer with `activity_regularizer` out of the control flow branch, e.g.:\\n{correct_example}\\nYou can also resolve this by marking your outer model/layer dynamic (eager-only) by passing `dynamic=True` to the layer constructor. Any kind of control flow is supported with dynamic layers. Note that using `dynamic=True` requires you to implement static shape inference in the `compute_output_shape(input_shape)` method.'.format(bad_example=bad_example, correct_example=correct_example))\n        if method == 'add_metric':\n            bad_example = \"\\n      def call(self, inputs, training=None):\\n        if training:\\n          metric = compute_metric(inputs)\\n          self.add_metric(metric, name='my_metric', aggregation='mean')\\n        return inputs\\n      \"\n            correct_example = \"\\n      def call(self, inputs, training=None):\\n        if training:\\n          metric = compute_metric(inputs)\\n        else:\\n          metric = 0.\\n        self.add_metric(metric, name='my_metric', aggregation='mean')\\n        return inputs\\n      \"\n        elif method == 'add_loss':\n            bad_example = '\\n      def call(self, inputs, training=None):\\n        if training:\\n          loss = compute_loss(inputs)\\n          self.add_loss(loss)\\n        return inputs\\n      '\n            correct_example = '\\n      def call(self, inputs, training=None):\\n        if training:\\n          loss = compute_loss(inputs)\\n        else:\\n          loss = 0.\\n        self.add_loss(loss)\\n        return inputs\\n      '\n        else:\n            bad_example = '\\n      def call(self, inputs, training=None):\\n        if training:\\n          self.add_update(self.w.assign_add(1))\\n        return inputs\\n      '\n            correct_example = '\\n      def call(self, inputs, training=None):\\n        if training:\\n          increment = 1\\n        else:\\n          increment = 0\\n        self.add_update(self.w.assign_add(increment))\\n        return inputs\\n      '\n        raise RuntimeError('You are using the method `{method}` in a control flow branch in your layer, e.g.:\\n{bad_example}\\nThis is not currently supported. Please move your call to {method} out of the control flow branch, e.g.:\\n{correct_example}\\nYou can also resolve this by marking your layer as dynamic (eager-only) by passing `dynamic=True` to the layer constructor. Any kind of control flow is supported with dynamic layers. Note that using `dynamic=True` requires you to implement static shape inference in the `compute_output_shape(input_shape)` method.'.format(method=method, bad_example=bad_example, correct_example=correct_example))", "docstring": "Checks that tensors passed to `add_*` method match the Keras graph.\n\nWhen one of the `add_*` method is called inside a V2 conditional branch,\nthe underlying tensor gets created in a FuncGraph managed by control_flow_v2.\nWe need to raise clear error messages in such cases.\n\nArgs:\ntensor: Tensor to check, or `False` if it is known that an error\nshould be raised.\nmethod: Caller method, one of {'add_metric', 'add_loss', 'add_update'}.\nforce_raise: If an error should be raised regardless of `tensor`.\n\nRaises:\nRuntimeError: In case of an out-of-graph tensor.", "source": "github-repos"}
{"code": "def _SoftmaxGrad(op: ops.Operation, grad_softmax):\n    softmax = op.outputs[0]\n    sum_channels = math_ops.reduce_sum(grad_softmax * softmax, -1, keepdims=True)\n    return (grad_softmax - sum_channels) * softmax", "docstring": "The derivative of the softmax nonlinearity.\n\nWe assume that probs is of shape [batch_size * dim]\nThe formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').\nThis matrix is diagonal minus a rank one matrix, so it is easy to implement\nas follows:\n\ngrad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax\n\nArgs:\nop: the Softmax op.\ngrad_softmax:  the tensor representing the gradient w.r.t. the softmax\noutput.\n\nReturns:\ngradient w.r.t the input to the softmax", "source": "github-repos"}
{"code": "def process_file(self, path):\n        \n        if self._config.verbose:\n            self._logger.info('Processing file \"%s\"', path)\n\n        output_path = '%s%s' % (path, BATCH_EXTENSION)\n\n        with open(output_path, 'w') as file:\n            for line in lines_generator(path):\n                file.write('%s\\n' % self._cucco.normalize(\n                           line.encode().decode('utf-8')))\n\n        self._logger.debug('Created file \"%s\"', output_path)", "docstring": "Process a file applying normalizations.\n\nGet a file as input and generate a new file with the\nresult of applying normalizations to every single line\nin the original file. The extension for the new file\nwill be the one defined in BATCH_EXTENSION.\n\nArgs:\npath: Path to the file.", "source": "juraj-google-style"}
{"code": "def ParseMessage(self, parser_mediator, key, date_time, tokens):\n    if (key != 'task_run'):\n        raise ValueError('Unknown grammar key: {0:s}'.format(key))\n    event_data = CronTaskRunEventData()\n    event_data.body = tokens.get('body', None)\n    event_data.command = tokens.get('command', None)\n    event_data.hostname = tokens.get('hostname', None)\n    event_data.offset = 0\n    event_data.pid = tokens.get('pid', None)\n    event_data.reporter = tokens.get('reporter', None)\n    event_data.severity = tokens.get('severity', None)\n    event_data.username = tokens.get('username', None)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a syslog body that matched one of defined grammars.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nkey (str): name of the matching grammar.\ndate_time (dfdatetime.DateTimeValues): date and time values.\ntokens (dict[str, str]): tokens derived from a syslog message based on\nthe defined grammar.\n\nRaises:\nValueError: If an unknown key is provided.", "source": "codesearchnet"}
{"code": "def set_slats_level(self, slatsLevel=0.0, shutterLevel=None):\n        \n        if shutterLevel is None:\n            shutterLevel = self.shutterLevel\n        data = {\n            \"channelIndex\": 1,\n            \"deviceId\": self.id,\n            \"slatsLevel\": slatsLevel,\n            \"shutterLevel\": shutterLevel,\n        }\n        return self._restCall(\"device/control/setSlatsLevel\", json.dumps(data))", "docstring": "sets the slats and shutter level\n\nArgs:\nslatsLevel(float): the new level of the slats. 0.0 = open, 1.0 = closed,\nshutterLevel(float): the new level of the shutter. 0.0 = open, 1.0 = closed, None = use the current value\nReturns:\nthe result of the _restCall", "source": "juraj-google-style"}
{"code": "def AddService(self, new_service):\n    \n    for service in self._services:\n      if new_service == service:\n        \n        \n        service.sources.append(new_service.sources[0])\n        return\n\n    \n    \n    self._services.append(new_service)", "docstring": "Add a new service to the list of ones we know about.\n\nArgs:\nnew_service (WindowsService): the service to add.", "source": "juraj-google-style"}
{"code": "def ParseGenericRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    event_data = WindowsTimelineGenericEventData()\n    payload_json_bytes = bytes(self._GetRowValue(query_hash, row, 'Payload'))\n    payload_json_string = payload_json_bytes.decode('utf-8')\n    appid_entries_string = self._GetRowValue(query_hash, row, 'AppId')\n    payload = json.loads(payload_json_string)\n    appid_entries = json.loads(appid_entries_string)\n    package_id_locations = ['packageId', 'x_exe_path', 'windows_win32', 'windows_universal', 'alternateId']\n    for location in package_id_locations:\n        for entry in appid_entries:\n            if ((entry['platform'] == location) and (entry['application'] != '')):\n                event_data.package_identifier = entry['application']\n                break\n        if (event_data.package_identifier is None):\n            break\n    if ('description' in payload):\n        event_data.description = payload['description']\n    else:\n        event_data.description = ''\n    if (('appDisplayName' in payload) and (payload['appDisplayName'] != '')):\n        event_data.application_display_name = payload['appDisplayName']\n    elif (('displayText' in payload) and (payload['displayText'] != '')):\n        event_data.application_display_name = payload['displayText']\n    timestamp = self._GetRowValue(query_hash, row, 'StartTime')\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_START)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a generic windows timeline row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "codesearchnet"}
{"code": "def plot_predictions_histogram(Y_ph, Y, title=None):\n    labels = list(set(Y).union(set(Y_ph)))\n    edges = [(x - 0.5) for x in range(min(labels), (max(labels) + 2))]\n    plt.hist([Y_ph, Y], bins=edges, label=['Predicted', 'Gold'])\n    ax = plt.gca()\n    ax.set_xticks(labels)\n    plt.xlabel('Label')\n    plt.ylabel('\n    plt.legend(loc='upper right')\n    if isinstance(title, str):\n        plt.title(title)\n    plt.show()", "docstring": "Plot a histogram comparing int predictions vs true labels by class\n\nArgs:\nY_ph: An [n] or [n, 1] np.ndarray of predicted int labels\nY: An [n] or [n, 1] np.ndarray of gold labels", "source": "codesearchnet"}
{"code": "def download_file(self, remote_filename, local_filename=None):\n        \n        status = 'Failed'\n        if local_filename is None:\n            local_filename = remote_filename\n\n        if not self.args.force and os.access(local_filename, os.F_OK):\n            if not self._confirm_overwrite(local_filename):\n                self._print_results(local_filename, 'Skipped')\n                return\n\n        url = '{}{}'.format(self.base_url, remote_filename)\n        r = requests.get(url, allow_redirects=True)\n        if r.ok:\n            open(local_filename, 'wb').write(r.content)\n            status = 'Success'\n        else:\n            self.handle_error('Error requesting: {}'.format(url), False)\n\n        \n        self._print_results(local_filename, status)", "docstring": "Download file from github.\n\nArgs:\nremote_filename (str): The name of the file as defined in git repository.\nlocal_filename (str, optional): Defaults to None. The name of the file as it should be\nbe written to local filesystem.", "source": "juraj-google-style"}
{"code": "def _get_valid_formats():\n    if NO_SOX:\n        return []\n    so = subprocess.check_output(['sox', '-h'])\n    if (type(so) is not str):\n        so = str(so, encoding='UTF-8')\n    so = so.split('\\n')\n    idx = [i for i in range(len(so)) if ('AUDIO FILE FORMATS:' in so[i])][0]\n    formats = so[idx].split(' ')[3:]\n    return formats", "docstring": "Calls SoX help for a lists of audio formats available with the current\ninstall of SoX.\n\nReturns:\n--------\nformats : list\nList of audio file extensions that SoX can process.", "source": "codesearchnet"}
{"code": "def _clean_query_string(q):\n    \n    q = q.replace(\"()\", \"\").strip()\n    if q.endswith(\"(\"):\n        q = q[:-1].strip()\n    \n    if q[-3:] == \"AND\" or q[-3:] == \"NOT\":\n        q = q[:-3]\n    elif q[-2:] == \"OR\":\n        q = q[:-2]\n\n    \n    while q.count(\"(\") > q.count(\")\"):\n        q += \")\"\n    while q.count(\")\") > q.count(\"(\"):\n        q = \"(\" + q\n\n    return q.strip()", "docstring": "Clean up a query string for searching.\n\nRemoves unmatched parentheses and joining operators.\n\nArguments:\nq (str): Query string to be cleaned\n\nReturns:\nstr: The clean query string.", "source": "juraj-google-style"}
{"code": "def __cloudflare_list_zones(self, *, account, **kwargs):\n    done = False\n    zones = []\n    page = 1\n    while (not done):\n        kwargs['page'] = page\n        response = self.__cloudflare_request(account=account, path='/zones', args=kwargs)\n        info = response['result_info']\n        if (('total_pages' not in info) or (page == info['total_pages'])):\n            done = True\n        else:\n            page += 1\n        zones += response['result']\n    return zones", "docstring": "Helper function to list all zones registered in the CloudFlare system. Returns a `list` of the zones\n\nArgs:\naccount (:obj:`CloudFlareAccount`): A CloudFlare Account object\n**kwargs (`dict`): Extra arguments to pass to the API endpoint\n\nReturns:\n`list` of `dict`", "source": "codesearchnet"}
{"code": "def _call(callable_obj, arg_names, namespace):\n    \n    arguments = {arg_name: getattr(namespace, arg_name)\n                 for arg_name in arg_names}\n    return callable_obj(**arguments)", "docstring": "Actually calls the callable with the namespace parsed from the command\nline.\n\nArgs:\ncallable_obj: a callable object\narg_names: name of the function arguments\nnamespace: the namespace object parsed from the command line", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    if not self.LINE_STRUCTURES:\n      raise errors.UnableToParseFile('Missing line structures.')\n\n    encoding = self._ENCODING or parser_mediator.codepage\n    text_reader = EncodedTextReader(\n        encoding, buffer_size=self.BUFFER_SIZE)\n\n    text_reader.Reset()\n\n    try:\n      text_reader.ReadLines(file_object)\n    except UnicodeDecodeError as exception:\n      raise errors.UnableToParseFile(\n          'Not a text file, with error: {0!s}'.format(exception))\n\n    if not self.VerifyStructure(parser_mediator, text_reader.lines):\n      raise errors.UnableToParseFile('Wrong file structure.')\n\n    \n    \n    for key, structure in self.LINE_STRUCTURES:\n      structure.parseWithTabs()\n\n\n    consecutive_line_failures = 0\n    \n    while text_reader.lines:\n      if parser_mediator.abort:\n        break\n\n      \n      tokens = None\n      start = 0\n      end = 0\n\n      key = None\n\n      index = None\n\n      \n      for index, (key, structure) in enumerate(self._line_structures):\n        try:\n          structure_generator = structure.scanString(\n              text_reader.lines, maxMatches=1)\n          parsed_structure = next(structure_generator, None)\n        except pyparsing.ParseException:\n          parsed_structure = None\n\n        if not parsed_structure:\n          continue\n\n        tokens, start, end = parsed_structure\n\n        \n        \n        if start == 0:\n          break\n\n      if tokens and start == 0:\n        \n        \n        if index is not None and index != 0:\n          key_structure = self._line_structures.pop(index)\n          self._line_structures.insert(0, key_structure)\n\n        try:\n          self.ParseRecord(parser_mediator, key, tokens)\n          consecutive_line_failures = 0\n        except (errors.ParseError, errors.TimestampError) as exception:\n          parser_mediator.ProduceExtractionWarning(\n              'unable to parse record: {0:s} with error: {1!s}'.format(\n                  key, exception))\n\n        text_reader.SkipAhead(file_object, end)\n\n      else:\n        odd_line = text_reader.ReadLine(file_object)\n        if odd_line:\n          if len(odd_line) > 80:\n            odd_line = '{0:s}...'.format(odd_line[:77])\n          parser_mediator.ProduceExtractionWarning(\n              'unable to parse log line: {0:s}'.format(repr(odd_line)))\n          consecutive_line_failures += 1\n          if (consecutive_line_failures >\n              self.MAXIMUM_CONSECUTIVE_LINE_FAILURES):\n            raise errors.UnableToParseFile(\n                'more than {0:d} consecutive failures to parse lines.'.format(\n                    self.MAXIMUM_CONSECUTIVE_LINE_FAILURES))\n      try:\n        text_reader.ReadLines(file_object)\n      except UnicodeDecodeError as exception:\n        parser_mediator.ProduceExtractionWarning(\n            'unable to read lines with error: {0!s}'.format(exception))", "docstring": "Parses a text file-like object using a pyparsing definition.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def validate(message, ssldir=None, **config):\n    for field in ['signature', 'certificate']:\n        if (field not in message):\n            _log.warn('No %s field found.', field)\n            return False\n        if (not isinstance(message[field], six.text_type)):\n            _log.error(('msg[%r] is not a unicode string' % field))\n            try:\n                message[field] = message[field].decode('utf-8')\n            except UnicodeError as e:\n                _log.error(\"Unable to decode the message '%s' field: %s\", field, str(e))\n                return False\n    signature = base64.b64decode(message['signature'])\n    certificate = base64.b64decode(message['certificate'])\n    message = fedmsg.crypto.strip_credentials(message)\n    ca_location = config.get('ca_cert_location', 'https:\n    crl_location = config.get('crl_location', 'https:\n    try:\n        (ca_certificate, crl) = utils.load_certificates(ca_location, crl_location)\n        _validate_signing_cert(ca_certificate, certificate, crl)\n    except (IOError, RequestException, X509StoreContextError) as e:\n        try:\n            (ca_certificate, crl) = utils.load_certificates(ca_location, crl_location, invalidate_cache=True)\n            _validate_signing_cert(ca_certificate, certificate, crl)\n        except (IOError, RequestException, X509StoreContextError) as e:\n            _log.error(str(e))\n            return False\n    try:\n        crypto_certificate = x509.load_pem_x509_certificate(certificate, default_backend())\n        crypto_certificate.public_key().verify(signature, fedmsg.encoding.dumps(message).encode('utf-8'), asymmetric.padding.PKCS1v15(), hashes.SHA1())\n    except InvalidSignature as e:\n        _log.error('message [{m}] has an invalid signature: {e}'.format(m=message, e=str(e)))\n        return False\n    common_name = crypto_certificate.subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME)\n    common_name = common_name[0]\n    routing_policy = config.get('routing_policy', {})\n    nitpicky = config.get('routing_nitpicky', False)\n    return utils.validate_policy(message.get('topic'), common_name.value, routing_policy, nitpicky=nitpicky)", "docstring": "Validate the signature on the given message.\n\nFour things must be true for the signature to be valid:\n\n1) The X.509 cert must be signed by our CA\n2) The cert must not be in our CRL.\n3) We must be able to verify the signature using the RSA public key\ncontained in the X.509 cert.\n4) The topic of the message and the CN on the cert must appear in the\n:ref:`conf-routing-policy` dict.\n\nArgs:\nmessage (dict): A signed message in need of validation. A signed message\ncontains the 'signature' and 'certificate' keys.\nssldir (str): The path to the directory containing PEM-encoded X.509\nkey pairs.\n\nReturns:\nbool: True of the message passes validation, False otherwise.", "source": "codesearchnet"}
{"code": "def Cleanse(obj, encoding='utf-8'):\n  \n  if isinstance(obj, int):\n    return obj\n  elif isinstance(obj, float):\n    if obj == _INFINITY:\n      return 'Infinity'\n    elif obj == _NEGATIVE_INFINITY:\n      return '-Infinity'\n    elif math.isnan(obj):\n      return 'NaN'\n    else:\n      return obj\n  elif isinstance(obj, bytes):\n    return tf.compat.as_text(obj, encoding)\n  elif isinstance(obj, (list, tuple)):\n    return [Cleanse(i, encoding) for i in obj]\n  elif isinstance(obj, set):\n    return [Cleanse(i, encoding) for i in sorted(obj)]\n  elif isinstance(obj, dict):\n    return {Cleanse(k, encoding): Cleanse(v, encoding) for k, v in obj.items()}\n  else:\n    return obj", "docstring": "Makes Python object appropriate for JSON serialization.\n\n- Replaces instances of Infinity/-Infinity/NaN with strings.\n- Turns byte strings into unicode strings.\n- Turns sets into sorted lists.\n- Turns tuples into lists.\n\nArgs:\nobj: Python data structure.\nencoding: Charset used to decode byte strings.\n\nReturns:\nUnicode JSON data structure.", "source": "juraj-google-style"}
{"code": "def _one_body_mapping(a_i, a_j, threshold=0.000001):\n        \n        pauli_list = []\n        for alpha in range(2):\n            for beta in range(2):\n                pauli_prod = Pauli.sgn_prod(a_i[alpha], a_j[beta])\n                coeff = 1.0/4 * pauli_prod[1] * np.power(-1j, alpha) * np.power(1j, beta)\n                pauli_term = [coeff, pauli_prod[0]]\n                if np.absolute(pauli_term[0]) > threshold:\n                    pauli_list.append(pauli_term)\n        return Operator(paulis=pauli_list)", "docstring": "Subroutine for one body mapping.\nArgs:\na_i (Pauli): pauli at index i\na_j (Pauli): pauli at index j\nthreshold: (float): threshold to remove a pauli\nReturns:\nOperator: Operator for those paulis", "source": "juraj-google-style"}
{"code": "def _Upgrade0To1(self, data):\n    subgraph = {}\n    for key_to_promote in ['tensors', 'operators', 'inputs', 'outputs']:\n        subgraph[key_to_promote] = data[key_to_promote]\n        del data[key_to_promote]\n    data['subgraphs'] = [subgraph]", "docstring": "Upgrade data from Version 0 to Version 1.\n\nChanges: Added subgraphs (which contains a subset of formally global\nentries).\n\nArgs:\ndata: Dictionary representing the TensorFlow lite data to be upgraded.\nThis will be modified in-place to be an upgraded version.", "source": "github-repos"}
{"code": "def check_web_config(config_fname):\n    print('Looking for config file at {0} ...'.format(config_fname))\n    config = RawConfigParser()\n    try:\n        config.readfp(open(config_fname))\n        return config\n    except IOError:\n        print(\"ERROR: Seems like the config file does not exist. Please call 'opensubmit-web configcreate' first, or specify a location with the '-c' option.\")\n        return None", "docstring": "Try to load the Django settings.\nIf this does not work, than settings file does not exist.\n\nReturns:\nLoaded configuration, or None.", "source": "codesearchnet"}
{"code": "def resolve_revision(self, dest, url, rev_options):\n    rev = rev_options.arg_rev\n    (sha, is_branch) = self.get_revision_sha(dest, rev)\n    if (sha is not None):\n        rev_options = rev_options.make_new(sha)\n        rev_options.branch_name = (rev if is_branch else None)\n        return rev_options\n    if (not looks_like_hash(rev)):\n        logger.warning(\"Did not find branch or tag '%s', assuming revision or ref.\", rev)\n    if (not rev.startswith('refs/')):\n        return rev_options\n    self.run_command((['fetch', '-q', url] + rev_options.to_args()), cwd=dest)\n    sha = self.get_revision(dest, rev='FETCH_HEAD')\n    rev_options = rev_options.make_new(sha)\n    return rev_options", "docstring": "Resolve a revision to a new RevOptions object with the SHA1 of the\nbranch, tag, or ref if found.\n\nArgs:\nrev_options: a RevOptions object.", "source": "codesearchnet"}
{"code": "def Patch(self, request, global_params=None):\n    config = self.GetMethodConfig('Patch')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Updates a `BuildTrigger` by its project ID and trigger ID. This API is experimental.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsTriggersPatchRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(BuildTrigger) The response message.", "source": "github-repos"}
{"code": "def save_libsvm(X, y, path):\n    dump_svmlight_file(X, y, path, zero_based=False)", "docstring": "Save data as a LibSVM file.\n\nArgs:\nX (numpy or scipy sparse matrix): Data matrix\ny (numpy array): Target vector.\npath (str): Path to the CSV file to save data.", "source": "codesearchnet"}
{"code": "def _FormatSizeInUnitsOf1024(self, size):\n    \n    magnitude_1024 = 0\n    used_memory_1024 = float(size)\n    while used_memory_1024 >= 1024:\n      used_memory_1024 /= 1024\n      magnitude_1024 += 1\n\n    if 0 < magnitude_1024 <= 7:\n      return '{0:.1f} {1:s}'.format(\n          used_memory_1024, self._UNITS_1024[magnitude_1024])\n\n    return '{0:d} B'.format(size)", "docstring": "Represents a number of bytes in units of 1024.\n\nArgs:\nsize (int): size in bytes.\n\nReturns:\nstr: human readable string of the size.", "source": "juraj-google-style"}
{"code": "def _add_to_schema(self, field_name, schema):\n    super(ForeignKeyField, self)._add_to_schema(field_name, schema)\n    if self.get_field_value('convert_fks', default=True):\n        self.attribute = field_name.replace('_id', '')", "docstring": "Set the ``attribute`` attr to the field in question so this always\ngets deserialzed into the field name without ``_id``.\n\nArgs:\nfield_name (str): The name of the field (the attribute name being\nset in the schema).\nschema (marshmallow.Schema): The actual parent schema this field\nbelongs to.", "source": "codesearchnet"}
{"code": "def hum44(msg):\n    \n    d = hex2bin(data(msg))\n\n    if d[49] == '0':\n        return None\n\n    hm = bin2int(d[50:56]) * 100.0 / 64    \n\n    return round(hm, 1)", "docstring": "humidity\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nfloat: percentage of humidity, [0 - 100] %", "source": "juraj-google-style"}
{"code": "def _CheckPythonModuleVersion(self, module_name, module_object, version_property, minimum_version, maximum_version):\n    module_version = None\n    if (not version_property.endswith('()')):\n        module_version = getattr(module_object, version_property, None)\n    else:\n        version_method = getattr(module_object, version_property[:(- 2)], None)\n        if version_method:\n            module_version = version_method()\n    if (not module_version):\n        status_message = 'unable to determine version information for: {0:s}'.format(module_name)\n        return (False, status_message)\n    module_version = '{0!s}'.format(module_version)\n    module_version = self._VERSION_NUMBERS_REGEX.findall(module_version)[0]\n    if (module_version[(- 1)] == '.'):\n        module_version = module_version[:(- 1)]\n    try:\n        module_version_map = list(map(int, self._VERSION_SPLIT_REGEX.split(module_version)))\n    except ValueError:\n        status_message = 'unable to parse module version: {0:s} {1:s}'.format(module_name, module_version)\n        return (False, status_message)\n    if minimum_version:\n        try:\n            minimum_version_map = list(map(int, self._VERSION_SPLIT_REGEX.split(minimum_version)))\n        except ValueError:\n            status_message = 'unable to parse minimum version: {0:s} {1:s}'.format(module_name, minimum_version)\n            return (False, status_message)\n        if (module_version_map < minimum_version_map):\n            status_message = '{0:s} version: {1!s} is too old, {2!s} or later required'.format(module_name, module_version, minimum_version)\n            return (False, status_message)\n    if maximum_version:\n        try:\n            maximum_version_map = list(map(int, self._VERSION_SPLIT_REGEX.split(maximum_version)))\n        except ValueError:\n            status_message = 'unable to parse maximum version: {0:s} {1:s}'.format(module_name, maximum_version)\n            return (False, status_message)\n        if (module_version_map > maximum_version_map):\n            status_message = '{0:s} version: {1!s} is too recent, {2!s} or earlier required'.format(module_name, module_version, maximum_version)\n            return (False, status_message)\n    status_message = '{0:s} version: {1!s}'.format(module_name, module_version)\n    return (True, status_message)", "docstring": "Checks the version of a Python module.\n\nArgs:\nmodule_object (module): Python module.\nmodule_name (str): name of the Python module.\nversion_property (str): version attribute or function.\nminimum_version (str): minimum version.\nmaximum_version (str): maximum version.\n\nReturns:\ntuple: consists:\n\nbool: True if the Python module is available and conforms to\nthe minimum required version, False otherwise.\nstr: status message.", "source": "codesearchnet"}
{"code": "def load_disease_term(self, disease_obj):\n    LOG.debug('Loading disease term %s into database', disease_obj['_id'])\n    try:\n        self.disease_term_collection.insert_one(disease_obj)\n    except DuplicateKeyError as err:\n        raise IntegrityError('Disease term %s already exists in database'.format(disease_obj['_id']))\n    LOG.debug('Disease term saved')", "docstring": "Load a disease term into the database\n\nArgs:\ndisease_obj(dict)", "source": "codesearchnet"}
{"code": "def get_tag(self, name, params=None):\n        \n        return self.tag(name, action='GET', params=params)", "docstring": "Gets a tag from a Indicator/Group/Victim/Security Label\nArgs:\nname: The name of the tag\nparams:", "source": "juraj-google-style"}
{"code": "def from_features(cls, features, types):\n    params = cls()\n    if features:\n        for key in sorted(features.keys()):\n            feature = features[key]\n            if not isinstance(feature, tuple(types)):\n                raise ValueError(f\"Unsupported {type(feature).__name__} {feature} for key '{key}'\")\n            params._add_feature(key, feature)\n    params._validate()\n    return params", "docstring": "Builds _ParseOpParams for a given set of features and allowed types.\n\nArgs:\nfeatures: A `dict` mapping feature keys to objects of a type in `types`.\ntypes: Type of features to allow, among `FixedLenFeature`,\n`VarLenFeature`, `SparseFeature`, and `FixedLenSequenceFeature`.\n\nReturns:\nA `_ParseOpParams` containing the raw parameters for `gen_parsing_ops`.\n\nRaises:\nValueError: if `features` contains an item not in `types`, or an invalid\nfeature.\nValueError: if sparse and dense key sets intersect.\nValueError: if input lengths do not match up.", "source": "github-repos"}
{"code": "def download(self, folder=None):\n        \n        \n        \n        url = self.data.get('url', None)\n        if not url:\n            raise HDXError('No URL to download!')\n        logger.debug('Downloading %s' % url)\n        filename = self.data['name']\n        format = '.%s' % self.data['format']\n        if format not in filename:\n            filename = '%s%s' % (filename, format)\n        with Download(full_agent=self.configuration.get_user_agent()) as downloader:\n            path = downloader.download_file(url, folder, filename)\n            return url, path", "docstring": "Download resource store to provided folder or temporary folder if no folder supplied\n\nArgs:\nfolder (Optional[str]): Folder to download resource to. Defaults to None.\n\nReturns:\nTuple[str, str]: (URL downloaded, Path to downloaded file)", "source": "juraj-google-style"}
{"code": "def abort_collective_ops(self, code, message):\n    self.ensure_initialized()\n    pywrap_tfe.TFE_AbortCollectiveOps(self._handle, code, message)", "docstring": "Abort the collective ops.\n\nThis is intended to be used when a peer failure is detected, which allows\nthe user to handle the case instead of hanging. This aborts all on-going\ncollectives. After all subsequent collectives error immediately, and you\nneed to reset_context() to use collectives again.\n\nArgs:\ncode: a `tf.errors` error code.\nmessage: a string. The error message.", "source": "github-repos"}
{"code": "def base256_encode(n, minwidth=0):  \n    \n    if n > 0:\n        arr = []\n        while n:\n            n, rem = divmod(n, 256)\n            arr.append(rem)\n        b = bytearray(reversed(arr))\n    elif n == 0:\n        b = bytearray(b'\\x00')\n    else:\n        raise ValueError(\"Negative numbers not supported\")\n\n    if minwidth > 0 and len(b) < minwidth:  \n        padding = (minwidth - len(b)) * b'\\x00'\n        b = bytearray(padding) + b\n    b.reverse()\n\n    return b", "docstring": "Encode the input with base256.\n\nArgs:\nn (int): input value.\nminwidth: minimum return value length.\n\nRaises:\nValueError: if a negative number is provided.\n\nReturns:\nbytearray:", "source": "juraj-google-style"}
{"code": "def search_track(self, artist, album=None, track=None,\n                     full_album_art_uri=False):\n        \n        subcategories = [artist]\n        subcategories.append(album or '')\n\n        \n        result = self.get_album_artists(\n            full_album_art_uri=full_album_art_uri,\n            subcategories=subcategories, search_term=track,\n            complete_result=True)\n        result._metadata['search_type'] = 'search_track'\n        return result", "docstring": "Search for an artist, an artist's albums, or specific track.\n\nArgs:\nartist (str): an artist's name.\nalbum (str, optional): an album name. Default `None`.\ntrack (str, optional): a track name. Default `None`.\nfull_album_art_uri (bool): whether the album art URI should be\nabsolute (i.e. including the IP address). Default `False`.\n\nReturns:\nA `SearchResult` instance.", "source": "juraj-google-style"}
{"code": "def diff_compute(self, text1, text2, checklines, deadline):\n    if (not text1):\n        return [(self.DIFF_INSERT, text2)]\n    if (not text2):\n        return [(self.DIFF_DELETE, text1)]\n    if (len(text1) > len(text2)):\n        (longtext, shorttext) = (text1, text2)\n    else:\n        (shorttext, longtext) = (text1, text2)\n    i = longtext.find(shorttext)\n    if (i != (- 1)):\n        diffs = [(self.DIFF_INSERT, longtext[:i]), (self.DIFF_EQUAL, shorttext), (self.DIFF_INSERT, longtext[(i + len(shorttext)):])]\n        if (len(text1) > len(text2)):\n            diffs[0] = (self.DIFF_DELETE, diffs[0][1])\n            diffs[2] = (self.DIFF_DELETE, diffs[2][1])\n        return diffs\n    if (len(shorttext) == 1):\n        return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]\n    hm = self.diff_halfMatch(text1, text2)\n    if hm:\n        (text1_a, text1_b, text2_a, text2_b, mid_common) = hm\n        diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline)\n        diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline)\n        return ((diffs_a + [(self.DIFF_EQUAL, mid_common)]) + diffs_b)\n    if (checklines and (len(text1) > 100) and (len(text2) > 100)):\n        return self.diff_lineMode(text1, text2, deadline)\n    return self.diff_bisect(text1, text2, deadline)", "docstring": "Find the differences between two texts.  Assumes that the texts do not\nhave any common prefix or suffix.\n\nArgs:\ntext1: Old string to be diffed.\ntext2: New string to be diffed.\nchecklines: Speedup flag.  If false, then don't run a line-level diff\nfirst to identify the changed areas.\nIf true, then run a faster, slightly less optimal diff.\ndeadline: Time when the diff should be complete by.\n\nReturns:\nArray of changes.", "source": "codesearchnet"}
{"code": "def __init__(self, exclude_columns, coder=coders.registry.get_coder(Any)):\n    self.coder = coder\n    self.exclude_columns = exclude_columns", "docstring": "Encodes/decodes items of a dictionary into a single element.\nArgs:\nexclude_columns: list of columns to exclude from the encoding.", "source": "github-repos"}
{"code": "def old_format(self, content: BeautifulSoup) -> List[str]:\n\n        \n        \n        b = content.find('body')\n        sender, date, nxt, rep_to = None, None, None, None\n        strongs = b.findAll('strong', recursive=False)\n        for s in strongs:\n            field = str(s).split(\">\")[1].split(\"<\")[0]\n            if 'From' in field:\n                sender = s.next_sibling.split(\"(\")[0].strip()\n            elif 'Date' in field:\n                date_str = s.next_sibling.strip().replace(\"-\",\"\").replace(\"  \",\" \").strip()\n                try:\n                    date = parsedate_to_datetime(date_str).isoformat()[:19]\n                except:\n                    date = None\n        sender = b.find('b').text if sender == None else sender\n        sender = b.find('a').text if len(sender) == 0 else sender\n        date = b.find('i').text[:19] if date == None else date\n\n        try:\n            nav = content.find('ul').findAll('li')\n        except:\n            nav = None\n        if nav != None:\n            for l in nav:\n                s = l.text\n                if 'Next in thread' in s:\n                    nxt = '/'.join(self.email_url.split('/')[:-1]) + '/' + l.find('a')['href']\n                    nxt = nxt[1:] if nxt[0] == '/' else nxt\n                elif 'reply to' in s:\n                    rep_to = '/'.join(self.email_url.split('/')[:-1]) + '/' + l.find('a')['href']\n                    rep_to = rep_to[1:] if rep_to[0] == '/' else rep_to\n        body = content.find('pre')\n        body = body.text.strip() if body != None else None\n        return [str(i) for i in [sender, date, body, nxt, rep_to]]", "docstring": "Extracts email message information if it uses the old Mailman format\nArgs:\ncontent: BeautifulSoup\n\nReturns: List[str]", "source": "juraj-google-style"}
{"code": "def CompileReport(self, mediator):\n    \n    lines_of_text = ['Listing domains visited by all users']\n    for domain in sorted(self._domains):\n      lines_of_text.append(domain)\n\n    lines_of_text.append('')\n    report_text = '\\n'.join(lines_of_text)\n    return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)", "docstring": "Compiles an analysis report.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between\nanalysis plugins and other components, such as storage and dfvfs.\n\nReturns:\nAnalysisReport: the analysis report.", "source": "juraj-google-style"}
{"code": "def subprogram_prototype(vo):\n  \n\n  plist = '; '.join(str(p) for p in vo.parameters)\n\n  if isinstance(vo, VhdlFunction):\n    if len(vo.parameters) > 0:\n      proto = 'function {}({}) return {};'.format(vo.name, plist, vo.return_type)\n    else:\n      proto = 'function {} return {};'.format(vo.name, vo.return_type)\n\n  else: \n    proto = 'procedure {}({});'.format(vo.name, plist)\n\n  return proto", "docstring": "Generate a canonical prototype string\n\nArgs:\nvo (VhdlFunction, VhdlProcedure): Subprogram object\nReturns:\nPrototype string.", "source": "juraj-google-style"}
{"code": "def _pretty_print(data_item, summarize):\n    if isinstance(data_item, tensor_lib.Tensor):\n        arr = data_item.numpy()\n        if np.isscalar(arr):\n            return str(arr)\n        else:\n            flat = arr.reshape((-1,))\n            lst = [str(x) for x in flat[:summarize]]\n            if len(lst) < flat.size:\n                lst.append('...')\n            return str(lst)\n    else:\n        return str(data_item)", "docstring": "Format a data item for use in an error message in eager mode.\n\nArgs:\ndata_item: One of the items in the \"data\" argument to an assert_* function.\nCan be a Tensor or a scalar value.\nsummarize: How many elements to retain of each tensor-valued entry in data.\n\nReturns:\nAn appropriate string representation of data_item", "source": "github-repos"}
{"code": "def transform_kernels(kernels, func, n_gates):\n    return np.hstack([func(k) for k in np.hsplit(kernels, n_gates)])", "docstring": "Transforms kernel for each gate separately using given function.\n\nArgs:\nkernels: Stacked array of kernels for individual gates.\nfunc: Function applied to kernel of each gate.\nn_gates: Number of gates (4 for LSTM, 3 for GRU).\n\nReturns:\nStacked array of transformed kernels.", "source": "github-repos"}
{"code": "def create_asset_accesspolicy(access_token, name, duration, permission='1'):\n    path = '/AccessPolicies'\n    endpoint = ''.join([ams_rest_endpoint, path])\n    body = (((((('{ \\t\\t\"Name\": \"' + str(name)) + '\", \\t\\t\"DurationInMinutes\": \"') + duration) + '\", \\t\\t\"Permissions\": \"') + permission) + '\" \\t}')\n    return do_ams_post(endpoint, path, body, access_token)", "docstring": "Create Media Service Asset Access Policy.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nname (str): A Media Service Asset Access Policy Name.\nduration (str): A Media Service duration.\npermission (str): A Media Service permission.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def _infer_hints_allowing_override(op1, op2, hints):\n    hints = hints or _Hints()\n    if hints.is_self_adjoint is None:\n        is_self_adjoint = op1.is_self_adjoint and op2.is_self_adjoint\n    else:\n        is_self_adjoint = hints.is_self_adjoint\n    if hints.is_positive_definite is None:\n        is_positive_definite = op1.is_positive_definite and op2.is_positive_definite\n    else:\n        is_positive_definite = hints.is_positive_definite\n    if is_positive_definite and hints.is_positive_definite is None:\n        is_non_singular = True\n    else:\n        is_non_singular = hints.is_non_singular\n    return _Hints(is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite)", "docstring": "Infer hints from op1 and op2.  hints argument is an override.\n\nArgs:\nop1:  LinearOperator\nop2:  LinearOperator\nhints:  _Hints object holding \"is_X\" boolean hints to use for returned\noperator.\nIf some hint is None, try to set using op1 and op2.  If the\nhint is provided, ignore op1 and op2 hints.  This allows an override\nof previous hints, but does not allow forbidden hints (e.g. you still\ncannot say a real diagonal operator is not self-adjoint.\n\nReturns:\n_Hints object.", "source": "github-repos"}
{"code": "def visible(self):\n    query_results = self.map((lambda el: el.is_displayed()), 'visible').results\n    if query_results:\n        return all(query_results)\n    return False", "docstring": "Check whether all matched elements are visible.\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def export_json(data, status, headers):\n    dumped = json.dumps(data, ensure_ascii=False)\n    resp = current_app.response_class(dumped, status=status, headers=headers, content_type='application/json; charset=utf-8')\n    return resp", "docstring": "Creates a JSON response\n\nJSON content is encoded by utf-8, not unicode escape.\n\nArgs:\ndata: any type object that can dump to json\nstatus (int): http status code\nheaders (dict): http headers", "source": "codesearchnet"}
{"code": "def handle_enterprise_logistration(backend, user, **kwargs):\n    request = backend.strategy.request\n    enterprise_customer = get_enterprise_customer_for_running_pipeline(request, {'backend': backend.name, 'kwargs': kwargs})\n    if (enterprise_customer is None):\n        return\n    (enterprise_customer_user, _) = EnterpriseCustomerUser.objects.update_or_create(enterprise_customer=enterprise_customer, user_id=user.id)\n    enterprise_customer_user.update_session(request)", "docstring": "Perform the linking of user in the process of logging to the Enterprise Customer.\n\nArgs:\nbackend: The class handling the SSO interaction (SAML, OAuth, etc)\nuser: The user object in the process of being logged in with\n**kwargs: Any remaining pipeline variables", "source": "codesearchnet"}
{"code": "def add(self, text, checked=False, sort=None):\n        \n        node = ListItem(parent_id=self.id, parent_server_id=self.server_id)\n        node.checked = checked\n        node.text = text\n        if sort is not None:\n            node.sort = sort\n        self.append(node, True)\n        self.touch(True)\n        return node", "docstring": "Add a new item to the list.\n\nArgs:\ntext (str): The text.\nchecked (bool): Whether this item is checked.\nsort (int): Item id for sorting.", "source": "juraj-google-style"}
{"code": "def discount_bond_price(self, state: types.RealTensor, times: types.RealTensor, maturities: types.RealTensor, name: str=None) -> types.RealTensor:\n    name = name or self._name + '_discount_bond_prices'\n    with tf.name_scope(name):\n        x_t = tf.convert_to_tensor(state, self._dtype)\n        times = tf.convert_to_tensor(times, self._dtype)\n        maturities = tf.convert_to_tensor(maturities, self._dtype)\n        input_shape_times = tf.shape(times)\n        mean_reversion = self._mean_reversion\n        y_t = self.state_y(times)\n        y_t = tf.reshape(tf.transpose(y_t), tf.concat([input_shape_times, [self._dim, self._dim]], axis=0))\n        values = self._bond_reconstitution(times, maturities, mean_reversion, x_t, y_t, 1, tf.shape(times)[0])\n        return values[0][0]", "docstring": "Returns zero-coupon bond prices `P(t,T)` conditional on `x(t)`.\n\nArgs:\nstate: A `Tensor` of real dtype and shape compatible with\n`(num_times, dim)` specifying the state `x(t)`.\ntimes: A `Tensor` of real dtype and shape `(num_times,)`. The time `t`\nat which discount bond prices are computed.\nmaturities: A `Tensor` of real dtype and shape `(num_times,)`. The time\nto maturity of the discount bonds.\nname: Str. The name to give this op.\nDefault value: `discount_bond_prices`.\n\nReturns:\nA `Tensor` of real dtype and the same shape as `(num_times,)`\ncontaining the price of zero-coupon bonds.", "source": "github-repos"}
{"code": "def get_summary(result):\n    summary = {'success': result.wasSuccessful(), 'stat': {'total': result.testsRun, 'failures': len(result.failures), 'errors': len(result.errors), 'skipped': len(result.skipped), 'expectedFailures': len(result.expectedFailures), 'unexpectedSuccesses': len(result.unexpectedSuccesses)}}\n    summary['stat']['successes'] = (((((summary['stat']['total'] - summary['stat']['failures']) - summary['stat']['errors']) - summary['stat']['skipped']) - summary['stat']['expectedFailures']) - summary['stat']['unexpectedSuccesses'])\n    summary['time'] = {'start_at': result.start_at, 'duration': result.duration}\n    summary['records'] = result.records\n    return summary", "docstring": "get summary from test result\n\nArgs:\nresult (instance): HtmlTestResult() instance\n\nReturns:\ndict: summary extracted from result.\n\n{\n\"success\": True,\n\"stat\": {},\n\"time\": {},\n\"records\": []\n}", "source": "codesearchnet"}
{"code": "def encode(self, input_audio, start_level=0, end_level=None, bs_chunks=1):\n    audio_chunks = torch.chunk(input_audio, bs_chunks, dim=0)\n    music_tokens_list = []\n    for chunk_i in audio_chunks:\n        music_tokens_i = self._encode(chunk_i, start_level=start_level, end_level=end_level)\n        music_tokens_list.append(music_tokens_i)\n    music_tokens = [torch.cat(music_tokens_level, dim=0) for music_tokens_level in zip(*music_tokens_list)]\n    return music_tokens", "docstring": "Transforms the `input_audio` to a discrete representation made out of `music_tokens`.\n\nArgs:\ninput_audio (`torch.Tensor`):\nRaw audio which will be encoded to its discrete representation using the codebook. The closest `code`\nform the codebook will be computed for each sequence of samples.\nstart_level (`int`, *optional*, defaults to 0):\nLevel at which the encoding process will start. Default to 0.\nend_level (`int`, *optional*):\nLevel at which the encoding process will start. Default to None.\nbs_chunks (int, *optional*, defaults to 1):\nNumber of chunks of raw audio to process at the same time.", "source": "github-repos"}
{"code": "def __init__(self, time_elements_tuple=None):\n    \n    fraction_of_second = None\n    if time_elements_tuple:\n      if len(time_elements_tuple) < 7:\n        raise ValueError((\n            'Invalid time elements tuple at least 7 elements required,'\n            'got: {0:d}').format(len(time_elements_tuple)))\n\n      milliseconds = time_elements_tuple[6]\n      time_elements_tuple = time_elements_tuple[:6]\n\n      if (milliseconds < 0 or\n          milliseconds >= definitions.MILLISECONDS_PER_SECOND):\n        raise ValueError('Invalid number of milliseconds.')\n\n      fraction_of_second = (\n          decimal.Decimal(milliseconds) / definitions.MILLISECONDS_PER_SECOND)\n\n    super(TimeElementsInMilliseconds, self).__init__(\n        fraction_of_second=fraction_of_second,\n        time_elements_tuple=time_elements_tuple)\n    self._precision = definitions.PRECISION_1_MILLISECOND", "docstring": "Initializes time elements.\n\nArgs:\ntime_elements_tuple (Optional[tuple[int, int, int, int, int, int, int]]):\ntime elements, contains year, month, day of month, hours, minutes,\nseconds and milliseconds.\n\nRaises:\nValueError: if the time elements tuple is invalid.", "source": "juraj-google-style"}
{"code": "def useQt(qtLib: str = 'PyQt5', period: float = 0.01):\n    \n    def qt_step():\n        loop.call_later(period, qt_step)\n        if not stack:\n            qloop = QEventLoop()\n            timer = QTimer()\n            timer.timeout.connect(qloop.quit)\n            stack.append((qloop, timer))\n        qloop, timer = stack.pop()\n        timer.start(0)\n        qloop.exec_()\n        timer.stop()\n        stack.append((qloop, timer))\n\n    if qtLib not in ('PyQt5', 'PySide2'):\n        raise RuntimeError(f'Unknown Qt library: {qtLib}')\n    if qtLib == 'PyQt5':\n        from PyQt5.Qt import QApplication, QTimer, QEventLoop\n    else:\n        from PySide2.QtWidgets import QApplication\n        from PySide2.QtCore import QTimer, QEventLoop\n    global qApp\n    qApp = QApplication.instance() or QApplication(sys.argv)\n    loop = asyncio.get_event_loop()\n    stack: list = []\n    qt_step()", "docstring": "Run combined Qt5/asyncio event loop.\n\nArgs:\nqtLib: Name of Qt library to use, can be 'PyQt5' or 'PySide2'.\nperiod: Period in seconds to poll Qt.", "source": "juraj-google-style"}
{"code": "def percentile(self, percent):\n        \n        \n        if percent >= 100:\n            percent = 100\n\n        \n        target = len(self) - len(self) * (percent / 100)\n\n        \n        \n        \n        for k in reversed(sorted(self._data.keys())):\n            target -= self._data[k]\n            if target < 0:\n                return k\n\n        \n        \n        return 10", "docstring": "Return the value that is the Nth precentile in the histogram.\n\nArgs:\npercent (Union[int, float]): The precentile being sought. The\ndefault consumer implementations use consistently use ``99``.\n\nReturns:\nint: The value corresponding to the requested percentile.", "source": "juraj-google-style"}
{"code": "def _new_population_genalg(population, fitnesses, mutation_chance=0.02, crossover_chance=0.7, selection_function=gaoperators.tournament_selection, crossover_function=gaoperators.one_point_crossover):\n    intermediate_population = selection_function(population, fitnesses)\n    new_population = _crossover(intermediate_population, crossover_chance, crossover_function)\n    gaoperators.random_flip_mutate(new_population, mutation_chance)\n    return new_population", "docstring": "Perform all genetic algorithm operations on a population, and return a new population.\n\npopulation must have an even number of chromosomes.\n\nArgs:\npopulation: A list of binary lists, ex. [[0,1,1,0], [1,0,1,0]]\nfitness: A list of fitnesses that correspond with chromosomes in the population,\nex. [1.2, 10.8]\nmutation_chance: the chance that a bit will be flipped during mutation\ncrossover_chance: the chance that two parents will be crossed during crossover\nselection_function: A function that will select parents for crossover and mutation\ncrossover_function: A function that will cross two parents\n\nReturns:\nlist; A new population of chromosomes, that should be more fit.", "source": "codesearchnet"}
{"code": "def Unregister(self, name):\n    precondition.AssertType(name, Text)\n    try:\n        del self._constructors[name]\n    except KeyError:\n        raise ValueError((\"Constructor with name '%s' is not registered\" % name))", "docstring": "Unregisters a constructor.\n\nArgs:\nname: A name of the constructor to unregister.\n\nRaises:\nValueError: If constructor with specified name has never been registered.", "source": "codesearchnet"}
{"code": "class MaxTimeCriteria(StoppingCriteria):\n\n    def __init__(self, max_time: float, initial_timestamp: Optional[float]=None):\n        self.max_time = max_time\n        self.initial_timestamp = time.time() if initial_timestamp is None else initial_timestamp\n\n    @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)\n    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:\n        is_done = time.time() - self.initial_timestamp > self.max_time\n        return torch.full((input_ids.shape[0],), is_done, device=input_ids.device, dtype=torch.bool)", "docstring": "This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the\ntime will start being counted when you initialize this function. You can override this by passing an\n`initial_time`.\n\nArgs:\nmax_time (`float`):\nThe maximum allowed time in seconds for the generation.\ninitial_time (`float`, *optional*, defaults to `time.time()`):\nThe start of the generation allowed time.", "source": "github-repos"}
{"code": "def ch_duration(self, *channels: List[Channel]) -> int:\n        \n        return self.timeslots.ch_duration(*channels)", "docstring": "Return duration of supplied channels.\n\nArgs:\n*channels: Supplied channels", "source": "juraj-google-style"}
{"code": "def add_arguments(cls, parser):\n        \n\n        parser.add_argument(\n            '-c', '--create-missing-tasks',\n            action='store_true',\n            dest='create_missing_tasks',\n            help=\"[sync] create asana tasks for issues without tasks\"\n            )\n\n        parser.add_argument(\n            '-l', '--sync-labels',\n            action='store_true',\n            dest='sync_labels',\n            help=\"[sync] sync labels and milestones for each issue\"\n            )", "docstring": "Add arguments to the parser for collection in app.args.\n\nArgs:\nparser:\n`argparse.ArgumentParser`. Parser.\nArguments added here are server on\nself.args.", "source": "juraj-google-style"}
{"code": "def quick_execute(op_name, num_outputs, inputs, attrs, ctx, name=None):\n    device_name = ctx.device_name\n    try:\n        ctx.ensure_initialized()\n        tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name, inputs, attrs, num_outputs)\n    except core._NotOkStatusException as e:\n        if name is not None:\n            e.message += ' name: ' + name\n        raise core._status_to_exception(e) from None\n    except TypeError as e:\n        keras_symbolic_tensors = [x for x in inputs if _is_keras_symbolic_tensor(x)]\n        if keras_symbolic_tensors:\n            raise core._SymbolicException('Inputs to eager execution function cannot be Keras symbolic tensors, but found {}'.format(keras_symbolic_tensors))\n        raise e\n    return tensors", "docstring": "Execute a TensorFlow operation.\n\nArgs:\nop_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to\nexecute.\nnum_outputs: The number of outputs of the operation to fetch. (Explicitly\nprovided instead of being inferred for performance reasons).\ninputs: A list of inputs to the operation. Each entry should be a Tensor, or\na value which can be passed to the Tensor constructor to create one.\nattrs: A tuple with alternating string attr names and attr values for this\noperation.\nctx: The value of context.context().\nname: Customized name for the operation.\n\nReturns:\nList of output Tensor objects. The list is empty if there are no outputs\n\nRaises:\nAn exception on error.", "source": "github-repos"}
{"code": "def load_object(obj) -> object:\n    if isinstance(obj, str):\n        if (':' in obj):\n            (module_name, obj_name) = obj.split(':')\n            if (not module_name):\n                module_name = '.'\n        else:\n            module_name = obj\n        obj = importlib.import_module(module_name)\n        if obj_name:\n            attrs = obj_name.split('.')\n            for attr in attrs:\n                obj = getattr(obj, attr)\n    return obj", "docstring": "Load an object.\n\nArgs:\nobj (str|object): Load the indicated object if this is a string;\notherwise, return the object as is.\n\nTo load a module, pass a dotted path like 'package.module';\nto load an an object from a module pass a path like\n'package.module:name'.\n\nReturns:\nobject", "source": "codesearchnet"}
{"code": "def map_err(self, op: Callable[[E], F]) -> 'Union[Result[T, F], Result[T, E]]':\n        \n        return self if self._is_ok else cast(\n            'Result[T, F]',\n            self._type.Err(op(cast(E, self._val)))\n        )", "docstring": "Applies a function to the contained :meth:`Result.Err` value.\n\nArgs:\nop: The function to apply to the :meth:`Result.Err` value.\n\nReturns:\nA :class:`Result` with its error value as the function result\nif `self` is a :meth:`Result.Err` value, otherwise returns\n`self`.\n\nExamples:\n>>> Ok(1).map_err(lambda x: x * 2)\nOk(1)\n>>> Err(1).map_err(lambda x: x * 2)\nErr(2)", "source": "juraj-google-style"}
{"code": "def cv_squared(x):\n    epsilon = 1e-10\n    float_size = (tf.to_float(tf.size(x)) + epsilon)\n    mean = (tf.reduce_sum(x) / float_size)\n    variance = (tf.reduce_sum(tf.squared_difference(x, mean)) / float_size)\n    return (variance / (tf.square(mean) + epsilon))", "docstring": "The squared coefficient of variation of a sample.\n\nUseful as a loss to encourage a positive distribution to be more uniform.\nEpsilons added for numerical stability.\nReturns 0 for an empty Tensor.\n\nArgs:\nx: a `Tensor`.\n\nReturns:\na `Scalar`.", "source": "codesearchnet"}
{"code": "def load_module(self, namespace, module_name):\n    try:\n        filename, src = self.typeshed.get_module_file(namespace, module_name, self.options.python_version)\n    except OSError:\n        return (None, None)\n    ast = parser.parse_string(src, filename=filename, name=module_name, options=self.options)\n    return (filename, ast)", "docstring": "Load and parse a *.pyi from typeshed.\n\nArgs:\nnamespace: one of \"stdlib\" or \"third_party\"\nmodule_name: the module name (without any file extension or \"__init__\"\nsuffix).\n\nReturns:\n(None, None) if the module doesn't have a definition.\nElse a tuple of the filename and the AST of the module.", "source": "github-repos"}
{"code": "def add_spin_by_site(self, spins):\n    if (len(spins) != len(self.sites)):\n        raise ValueError('Spin of all sites must be specified in the dictionary.')\n    for (site, spin) in zip(self.sites, spins):\n        new_sp = {}\n        for (sp, occu) in site.species.items():\n            sym = sp.symbol\n            oxi_state = getattr(sp, 'oxi_state', None)\n            new_sp[Specie(sym, oxidation_state=oxi_state, properties={'spin': spin})] = occu\n        site.species = new_sp", "docstring": "Add spin states to a structure by site.\n\nArgs:\nspins (list): List of spins\nE.g., [+5, -5, 0, 0]", "source": "codesearchnet"}
{"code": "def get_flights(self, search_key):\n    url = AIRLINE_FLT_BASE.format(search_key, 100)\n    return self._fr24.get_airline_flight_data(url)", "docstring": "Get the flights for a particular airline.\n\nGiven a full or partial flight number string, this method returns the first 100 flights matching that string.\n\nPlease note this method was different in earlier versions. The older versions took an airline code and returned all scheduled flights for that airline\n\nArgs:\nsearch_key (str): Full or partial flight number for any airline e.g. MI47 to get all SilkAir flights starting with MI47\n\nReturns:\nA list of dicts, one for each scheduled flight in the airlines network\n\nExample::\nfrom pyflightdata import FlightData\nf=FlightData()\n#optional login\nf.login(myemail,mypassword)\nf.get_flights('MI47')", "source": "codesearchnet"}
{"code": "def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha, use_tpu=False):\n    return self._beam_decode_slow(features, decode_length, beam_size, top_beams, alpha, use_tpu)", "docstring": "Beam search decoding.\n\nModels should ideally implement a more efficient version of this function.\n\nArgs:\nfeatures: an map of string to `Tensor`\ndecode_length: an integer.  How many additional timesteps to decode.\nbeam_size: number of beams.\ntop_beams: an integer. How many of the beams to return.\nalpha: Float that controls the length penalty. larger the alpha, stronger\nthe preference for longer translations.\nuse_tpu: A bool, whether to do beam decode on TPU.\n\nReturns:\nsamples: an integer `Tensor`. Top samples from the beam search", "source": "codesearchnet"}
{"code": "def _verify_setup(self):\n    if not self._is_chief:\n        for op in self._graph.get_operations():\n            if op.type in ['Variable', 'VariableV2'] and (not op.device):\n                raise ValueError('When using replicas, all Variables must have their device set: %s' % op)", "docstring": "Check that all is good.\n\nRaises:\nValueError: If something is not good.", "source": "github-repos"}
{"code": "def delete_customer(self, customer_id):\n        \n        return self.client._delete(self.url + 'customers/{}'.format(customer_id), headers=self.get_headers())", "docstring": "Removes a user from the system.\n\nArgs:\ncustomer_id: Identifier of the client to be deleted.\n\nReturns:", "source": "juraj-google-style"}
{"code": "def __init__(self, latitude, longitude, comment=None):\n        \n        super(Xearth, self).__init__(latitude, longitude)\n        self.comment = comment", "docstring": "Initialise a new ``Xearth`` object.\n\nArgs:\nlatitude (float): Location's latitude\nlongitude (float): Location's longitude\ncomment (str): Comment for location", "source": "juraj-google-style"}
{"code": "def list_files(file_directory,\n               file_extensions=None,\n               include_subfolders=True,\n               include_root=True,\n               root_dir=None):\n    \n\n    log = logging.getLogger(\"%s\" % (inspect.stack()[0][3]))\n    log.setLevel(__LOG_LEVEL__)\n\n    rtn_list = []\n    if not root_dir:\n        root_dir = file_directory\n    root_dir = root_dir.strip()\n    if root_dir.endswith(os.path.sep):\n        root_dir = root_dir.strip()[:-1]\n    dir_parts_len = len(root_dir.split(os.path.sep))\n    level = 0\n    for root, dirnames, filenames in os.walk(file_directory):\n        root_str = root\n        if level > 0 and not include_subfolders:\n            break\n        if not include_root:\n            root_str = os.path.sep.join(root.split(os.path.sep)[dir_parts_len:])\n        if file_extensions:\n            files = [(x,\n                      os.path.join(root_str, x),\n                      os.path.getmtime(os.path.join(root, x)),\n                      os.path.join(root, x))\n                     for x in filenames \\\n                     if \".\" in x \\\n                     and x.split(\".\")[len(x.split(\".\"))-1] in file_extensions]\n        else:\n            files = [(x,\n                      os.path.join(root_str, x),\n                      os.path.getmtime(os.path.join(root, x)),\n                      os.path.join(root, x))\n                     for x in filenames]\n        rtn_list += files\n        level += 1\n    rtn_list.sort(key=lambda tup: tup[0], reverse=True)\n    return rtn_list", "docstring": "Returns a list of files\n\nargs:\nfile_directory: a sting path to the file directory\nfile_extensions: a list of file extensions to filter example\n['xml', 'rdf']. If none include all files\ninclude_subfolders: as implied\ninclude_root: whether to include the root in the path\nroot_dir: the root directory to remove if include_root is False\n\nreturns:\n(tuple) (file_name, file_path_with_root_mod, modified_time, full_path)", "source": "juraj-google-style"}
{"code": "def dict_load(self, ns_dict):\n    for (prefix, uri) in ns_dict.items():\n        self.bind(prefix, uri, override=False, calc=False)\n    self.__make_dicts__", "docstring": "Reads a dictionary of namespaces and binds them to the manager\n\nArgs:\nns_dict: dictionary with the key as the prefix and the value\nas the uri", "source": "codesearchnet"}
{"code": "def nth(series, n, order_by=None):\n    if (order_by is not None):\n        series = order_series_by(series, order_by)\n    try:\n        return series.iloc[n]\n    except:\n        return np.nan", "docstring": "Returns the nth value of a series.\n\nArgs:\nseries (pandas.Series): column to summarize.\nn (integer): position of desired value. Returns `NaN` if out of range.\n\nKwargs:\norder_by: a pandas.Series or list of series (can be symbolic) to order\nthe input series by before summarization.", "source": "codesearchnet"}
{"code": "def get_log_file_timestamp(delta=None):\n    return _get_timestamp('%m-%d-%Y_%H-%M-%S-%f', delta)", "docstring": "Returns a timestamp in the format used for log file names.\n\nDefault is current time. If a delta is set, the return value will be\nthe current time offset by delta seconds.\n\nArgs:\ndelta: Number of seconds to offset from current time; can be negative.\n\nReturns:\nA timestamp in log filen name format with an offset.", "source": "github-repos"}
{"code": "def Decompress(self, compressed_data):\n    \n    try:\n      if hasattr(lzma, 'LZMA_VERSION'):\n        \n        \n        uncompressed_data = self._lzma_decompressor.decompress(\n            compressed_data, 0)\n      else:\n        uncompressed_data = self._lzma_decompressor.decompress(compressed_data)\n\n      remaining_compressed_data = getattr(\n          self._lzma_decompressor, 'unused_data', b'')\n\n    except (EOFError, IOError, LZMAError) as exception:\n      raise errors.BackEndError((\n          'Unable to decompress XZ compressed stream with error: '\n          '{0!s}.').format(exception))\n\n    return uncompressed_data, remaining_compressed_data", "docstring": "Decompresses the compressed data.\n\nArgs:\ncompressed_data (bytes): compressed data.\n\nReturns:\ntuple(bytes, bytes): uncompressed data and remaining compressed data.\n\nRaises:\nBackEndError: if the XZ compressed stream cannot be decompressed.", "source": "juraj-google-style"}
{"code": "def map_kegg_all_genes(organism_code, target_db):\n    mapping = bs_kegg.conv(target_db, organism_code)\n    new_mapping = {}\n    for (k, v) in mapping.items():\n        new_mapping[k.replace((organism_code + ':'), '')] = str(v.split(':')[1])\n    return new_mapping", "docstring": "Map all of an organism's gene IDs to the target database.\n\nThis is faster than supplying a specific list of genes to map,\nplus there seems to be a limit on the number you can map with a manual REST query anyway.\n\nArgs:\norganism_code: the three letter KEGG code of your organism\ntarget_db: ncbi-proteinid | ncbi-geneid | uniprot\n\nReturns:\nDictionary of ID mapping", "source": "codesearchnet"}
{"code": "def __init__(self, option=None, default=None, *args, **kwargs):\n        \n        super(ListOption, self).__init__(*args, **kwargs)\n        if not isinstance(option, opt.Option):\n\n            raise TypeError(\"Option must be an option type.\")\n\n        self._option = option\n        self._default = default\n\n        if default is not None:\n\n            self._value = self.coerce(default)", "docstring": "Initialize the option with an option type.\n\nArgs:\noption (option.Option): The option which is used to validate all\nlist options.\n\nRaises:\nTypeError: If the given option is not an instance of option.Option.\nTypeError: If the default value is set but not an iterable.", "source": "juraj-google-style"}
{"code": "def copy(self, dest):\n    if os.path.isfile(self.path):\n        shutil.copy2(self.path, dest)\n    else:\n        shutil.copytree(self.path, dest, symlinks=False, ignore=None)", "docstring": "Copy item to the given `dest` path.\n\nArgs:\n* dest: destination path to copy.", "source": "codesearchnet"}
{"code": "def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n    size = get_size_dict(size, default_to_square=False)\n    if 'shortest_edge' in size:\n        output_size = get_resize_output_image_size(image, size['shortest_edge'], default_to_square=False, input_data_format=input_data_format)\n    elif 'height' in size and 'width' in size:\n        output_size = (size['height'], size['width'])\n    else:\n        raise ValueError(f\"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}\")\n    return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Resize an image.\n\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nsize (`Dict[str, int]`):\nSize of the output image. If `size` is of the form `{\"height\": h, \"width\": w}`, the output image will\nhave the size `(h, w)`. If `size` is of the form `{\"shortest_edge\": s}`, the output image will have its\nshortest edge of length `s` while keeping the aspect ratio of the original image.\nresample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):\nResampling filter to use when resiizing the image.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def _group_chunks_by_entities(self, chunks, entities):\n    \n    for entity in entities:\n      chunks_to_concat = chunks.get_overlaps(\n          entity['beginOffset'], len(entity['content']))\n      if not chunks_to_concat:\n        continue\n      new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])\n      new_chunk = Chunk(new_chunk_word)\n      chunks.swap(chunks_to_concat, new_chunk)\n    return chunks", "docstring": "Groups chunks by entities retrieved from NL API Entity Analysis.\n\nArgs:\nchunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed.\nentities (:obj:`list` of :obj:`dict`): List of entities.\n\nReturns:\nA chunk list. (:obj:`budou.chunk.ChunkList`)", "source": "juraj-google-style"}
{"code": "def get_dropout_mask_for_cell(self, inputs, training, count=1):\n    if self.dropout == 0:\n        return None\n    init_kwargs = dict(inputs=inputs, training=training, count=count)\n    return self._dropout_mask_cache.setdefault(kwargs=init_kwargs)", "docstring": "Get the dropout mask for RNN cell's input.\n\nIt will create mask based on context if there isn't any existing cached\nmask. If a new mask is generated, it will update the cache in the cell.\n\nArgs:\ninputs: The input tensor whose shape will be used to generate dropout\nmask.\ntraining: Boolean tensor, whether its in training mode, dropout will be\nignored in non-training mode.\ncount: Int, how many dropout mask will be generated. It is useful for cell\nthat has internal weights fused together.\nReturns:\nList of mask tensor, generated or cached mask based on context.", "source": "github-repos"}
{"code": "def _SanitizedMRO(obj):\n    return_list = []\n    for cls in tf_inspect.getmro(obj):\n        if cls.__name__ == '_NewClass':\n            continue\n        str_repr = _NormalizeType(str(cls))\n        return_list.append(str_repr)\n        if 'tensorflow' not in str_repr and 'keras' not in str_repr:\n            break\n        if 'StubOutForTesting' in str_repr:\n            break\n    return return_list", "docstring": "Get a list of superclasses with minimal amount of non-TF classes.\n\nBased on many parameters like python version, OS, protobuf implementation\nor changes in google core libraries the list of superclasses of a class\ncan change. We only return the first non-TF class to be robust to non API\naffecting changes. The Method Resolution Order returned by `tf_inspect.getmro`\nis still maintained in the return value.\n\nArgs:\nobj: A python routine for us the create the sanitized arspec of.\n\nReturns:\nlist of strings, string representation of the class names.", "source": "github-repos"}
{"code": "def SetTimelineOwner(self, username):\n    self._timeline_owner = username\n    logger.info('Owner of the timeline: {0!s}'.format(self._timeline_owner))", "docstring": "Sets the username of the user that should own the timeline.\n\nArgs:\nusername (str): username.", "source": "codesearchnet"}
{"code": "def add_property(self, set_property, name, starting_value, tag_name=None):\n\n    def del_property(self, tag_name):\n        try:\n            del self._content[tag_name]\n        except KeyError:\n            pass\n\n    def get_property(self, tag_name):\n        try:\n            return self._content[tag_name]\n        except KeyError:\n            return None\n    tag_name = (name if (tag_name is None) else tag_name)\n    fget = (lambda self: get_property(self, tag_name))\n    fdel = (lambda self: del_property(self, tag_name))\n    fset = (lambda self, value: set_property(value))\n    setattr(self.__class__, name, property(fget, fset, fdel))\n    set_property(starting_value)", "docstring": "Set properies of atributes stored in content using stored common fdel and fget and given fset.\n\nArgs:\nset_property -- Function that sets given property.\nname -- Name of the atribute this property must simulate. Used as key in content dict by default.\nstarting_value -- Starting value of given property.\n\nKeyword args:\ntag_name -- The tag name stored in conted dict as a key if different to name.", "source": "codesearchnet"}
{"code": "def setup_prefix_logging(logdir):\n    \n    if not os.path.exists(logdir):\n        os.mkdir(logdir)\n\n    file_handler = logging.FileHandler(\n        filename=os.path.join(logdir, 'lago.log'),\n    )\n    file_formatter = get_default_log_formatter()\n    file_handler.setFormatter(file_formatter)\n    logging.root.addHandler(file_handler)\n    hide_paramiko_logs()\n    hide_stevedore_logs()", "docstring": "Sets up a file logger that will create a log in the given logdir (usually a\nlago prefix)\n\nArgs:\nlogdir (str): path to create the log into, will be created if it does\nnot exist\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def query(  \n        self,\n        url: Union[str, methods],\n        data: Optional[MutableMapping] = None,\n        headers: Optional[MutableMapping] = None,\n        as_json: Optional[bool] = None,\n    ) -> dict:\n        \n        url, body, headers = sansio.prepare_request(\n            url=url,\n            data=data,\n            headers=headers,\n            global_headers=self._headers,\n            token=self._token,\n        )\n        return self._make_query(url, body, headers)", "docstring": "Query the slack API\n\nWhen using :class:`slack.methods` the request is made `as_json` if available\n\nArgs:\nurl: :class:`slack.methods` or url string\ndata: JSON encodable MutableMapping\nheaders: Custom headers\nas_json: Post JSON to the slack API\nReturns:\ndictionary of slack API response data", "source": "juraj-google-style"}
{"code": "async def msetup(self, text_channel):\n    if self.mready:\n        logger.warning('Attempt to init music when already initialised')\n        return\n    if (self.state != 'starting'):\n        logger.error(\"Attempt to init from wrong state ('{}'), must be 'starting'.\".format(self.state))\n        return\n    self.logger.debug('Setting up gui')\n    self.mchannel = text_channel\n    self.new_embed_ui()\n    (await self.embed.send())\n    (await self.embed.usend())\n    (await self.add_reactions())\n    self.mready = True", "docstring": "Creates the gui\n\nArgs:\ntext_channel (discord.Channel): The channel for the embed ui to run in", "source": "codesearchnet"}
{"code": "async def on_message(message):\n    \n\n    \n    server = message.server\n    author = message.author\n    channel = message.channel\n    content = message.content\n\n    data = datatools.get_data()\n\n    if not data[\"discord\"][\"servers\"][server.id][_data.modulename][\"activated\"]:\n        return\n\n    \n    if server is not None and author != channel.server.me:\n        \n        prefix = data[\"discord\"][\"servers\"][server.id][\"prefix\"]\n        if content.startswith(prefix):\n            \n            package = content.split(\" \")\n            command = package[0][len(prefix):]\n            args = package[1:]\n\n            alias_steam = [\"steam\", \"pc\"]\n            alias_ps = [\"ps\", \"psn\", \"playstation\", \"ps4\", \"playstation 4\"]\n            alias_xbox = [\"xbox\", \"xb\", \"xb1\", \"xbone\", \"xbox one\", \"xbox one\"]\n\n            platform = \"steam\"\n            if len(args) > 0:\n                player_name = args[0]\n            else:\n                return\n\n            if len(args) > 1:\n                platform = ' '.join(args[1:]).lower()\n\n            if platform in alias_steam:\n                platform = \"steam\"\n            elif platform in alias_ps:\n                platform = \"ps\"\n            elif platform in alias_xbox:\n                platform = \"xbox\"\n\n            \n            if command == 'rlstats':\n                await client.send_typing(channel)\n\n                \n                success, rldata = api_rocketleaguestats.check_rank(player_name, platform)\n                \n                if success:\n                    embed = ui_embed.success(channel, rldata[0], rldata[1], rldata[2], rldata[3])\n                else:\n                    embed = ui_embed.fail_api(channel)\n\n                await embed.send()", "docstring": "The on_message event handler for this module\n\nArgs:\nmessage (discord.Message): Input message", "source": "juraj-google-style"}
{"code": "def Matches(self, registry_key, search_depth):\n    if (self._key_path_segments is None):\n        key_path_match = None\n    else:\n        key_path_match = self._CheckKeyPath(registry_key, search_depth)\n        if (not key_path_match):\n            return (False, key_path_match)\n        if (search_depth != self._number_of_key_path_segments):\n            return (False, key_path_match)\n    return (True, key_path_match)", "docstring": "Determines if the Windows Registry key matches the find specification.\n\nArgs:\nregistry_key (WinRegistryKey): Windows Registry key.\nsearch_depth (int): number of key path segments to compare.\n\nReturns:\ntuple: contains:\n\nbool: True if the Windows Registry key matches the find specification,\nFalse otherwise.\nbool: True if the key path matches, False if not or None if no key path\nspecified.", "source": "codesearchnet"}
{"code": "def get_data_dirs(__pkg: str) -> List[str]:\n    dirs = [user_data(__pkg)]\n    dirs.extend((path.expanduser(path.sep.join([d, __pkg])) for d in getenv('XDG_DATA_DIRS', '/usr/local/share/:/usr/share/').split(':')))\n    return [d for d in dirs if path.isdir(d)]", "docstring": "Return all data directories for given package.\n\nArgs:\n__pkg: Package name", "source": "codesearchnet"}
{"code": "def _init_pfor(self, parent_pfor, indices, cond_stacked, inputs, inputs_stacked):\n    num_outputs = len(self._outputs)\n    assert len(inputs) == len(self._enters)\n    assert len(inputs_stacked) == len(self._enters)\n    loop_var = parent_pfor.loop_var\n    loop_len = array_ops.size(indices)\n    pfor = PFor(loop_var, loop_len, pfor_ops=self._pfor_ops, all_indices=indices, all_indices_partitioned=cond_stacked, fallback_to_while_loop=self._fallback_to_while_loop, pfor_config=self._pfor_config)\n    for enter in self._direct_enters:\n        enter_input = enter.op.inputs[0]\n        converted_enter, stacked, is_sparse_stacked = parent_pfor._convert_helper(enter_input)\n        assert not stacked and (not is_sparse_stacked), (enter, converted_enter)\n        pfor._add_conversion(enter, wrap(converted_enter, False))\n    for enter, inp, stacked in zip(self._enters, inputs, inputs_stacked):\n        pfor._add_conversion(enter, wrap(inp, stacked))\n    for i in range(num_outputs):\n        wrapped_inp = wrap(inputs[i], inputs_stacked[i])\n        merge = self._enter_merges[i]\n        pfor._add_conversion(merge.outputs[0], wrapped_inp)\n        pfor._add_conversion(merge.outputs[1], wrap(constant_op.constant(-1.0), False))\n        switch = self._exit_switches[i]\n        pfor._add_conversion(switch.outputs[1], wrapped_inp)\n    return pfor", "docstring": "Create a PFor object for converting parts of the while_loop.\n\nArgs:\nparent_pfor: PFor object being used for converting the while_loop.\nindices: int32 Tensor of ids for the iterations that are still active\n(i.e. did not exit the while_loop).\ncond_stacked: True if the while_loop condition is stacked.\ninputs: list of input Tensors corresponding 1-to-1 with self._enters. Note\nthat these Tensors are a subset of the loop variables for the generated\nwhile_loop.\ninputs_stacked: List of booleans corresponding 1-to-1 with `inputs`,\nindicating if the value is stacked or not.\n\nReturns:\nA PFor instance. The instance is initialized by adding conversion mappings\nof nodes that will be external to the conversion that the returned\ninstance will be used for. e.g. Enter nodes as well as Merge and Switch\noutputs are mapped to converted values.", "source": "github-repos"}
{"code": "def stop_on_exception(self):\n    return self._coord.stop_on_exception()", "docstring": "Context handler to stop the supervisor when an exception is raised.\n\nSee `Coordinator.stop_on_exception()`.\n\nReturns:\nA context handler.", "source": "github-repos"}
{"code": "def observations(self, main_type, sub_type, unique_id, owner=None, params=None):\n        \n        params = params or {}\n\n        if owner:\n            params['owner'] = owner\n\n        if not sub_type:\n            url = '/v2/{}/{}/observations'.format(main_type, unique_id)\n        else:\n            url = '/v2/{}/{}/{}/observations'.format(type, sub_type, unique_id)\n\n        return self.tcex.session.get(url, json=params)", "docstring": "Args:\nmain_type:\nsub_type:\nunique_id:\nowner:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def _pick_unused_port_without_server():\n    rng = random.Random()\n    for _ in range(10):\n        port = int(rng.randrange(15000, 25000))\n        if is_port_free(port):\n            _random_ports.add(port)\n            return port\n    for _ in range(10):\n        port = bind(0, _PROTOS[0][0], _PROTOS[0][1])\n        if (port and bind(port, _PROTOS[1][0], _PROTOS[1][1])):\n            _random_ports.add(port)\n            return port\n    raise NoFreePortFoundError()", "docstring": "Pick an available network port without the help of a port server.\n\nThis code ensures that the port is available on both TCP and UDP.\n\nThis function is an implementation detail of PickUnusedPort(), and\nshould not be called by code outside of this module.\n\nReturns:\nA port number that is unused on both TCP and UDP.\n\nRaises:\nNoFreePortFoundError: No free port could be found.", "source": "codesearchnet"}
{"code": "def send_log_message(self, message: LogMessage) -> None:\n    print(message)", "docstring": "Prints the log message to be captured by cloud logging.\n\nArgs:\n* message: LogMessage dictionary\n\nReturns:\n* None", "source": "github-repos"}
{"code": "def write_worksheets(workbook, data_list, result_info_key, identifier_keys):\n    worksheet_keys = get_worksheet_keys(data_list[0], result_info_key)\n    for key in worksheet_keys:\n        title = key.split('/')[1]\n        title = utilities.convert_snake_to_title_case(title)\n        title = KEY_TO_WORKSHEET_MAP.get(title, title)\n        if (key == 'property/nod'):\n            create_property_nod_worksheets(workbook, data_list, result_info_key, identifier_keys)\n        else:\n            worksheet = workbook.create_sheet(title=title[:31])\n            processed_data = process_data(key, data_list, result_info_key, identifier_keys)\n            write_data(worksheet, processed_data)\n    workbook.remove_sheet(workbook.active)", "docstring": "Writes rest of the worksheets to workbook.\n\nArgs:\nworkbook: workbook to write into\ndata_list: Analytics API data as a list of dicts\nresult_info_key: the key in api_data dicts that contains the data results\nidentifier_keys: the list of keys used as requested identifiers\n(address, zipcode, block_id, etc)", "source": "codesearchnet"}
{"code": "def _ParseFileData(self, knowledge_base, file_object):\n    \n    text_file_object = dfvfs_text_file.TextFile(file_object, encoding='utf-8')\n\n    system_product = text_file_object.readline()\n\n    \n    if system_product.startswith('Debian GNU/Linux '):\n      system_product, _, _ = system_product.partition('\\\\')\n      system_product = system_product.rstrip()\n\n    else:\n      system_product = None\n\n    if not knowledge_base.GetValue('operating_system_product'):\n      if system_product:\n        knowledge_base.SetValue('operating_system_product', system_product)", "docstring": "Parses file content (data) for system product preprocessing attribute.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nfile_object (dfvfs.FileIO): file-like object that contains the artifact\nvalue data.\n\nRaises:\nerrors.PreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def read_binary_array(self, key, b64decode=True, decode=False):\n    data = None\n    if (key is not None):\n        data = self.db.read(key.strip())\n        if (data is not None):\n            data_decoded = []\n            for d in json.loads(data, object_pairs_hook=OrderedDict):\n                if b64decode:\n                    dd = base64.b64decode(d)\n                    if decode:\n                        try:\n                            dd = dd.decode('utf-8')\n                        except UnicodeDecodeError:\n                            dd = dd.decode('latin-1')\n                    data_decoded.append(dd)\n                else:\n                    data_decoded.append(d)\n            data = data_decoded\n    else:\n        self.tcex.log.warning(u'The key field was None.')\n    return data", "docstring": "Read method of CRUD operation for binary array data.\n\nArgs:\nkey (string): The variable to read from the DB.\nb64decode (bool): If true the data will be base64 decoded.\ndecode (bool): If true the data will be decoded to a String.\n\nReturns:\n(list): Results retrieved from DB.", "source": "codesearchnet"}
{"code": "def js_adaptor(buffer):\n    \n    buffer = re.sub('true', 'True', buffer)\n    buffer = re.sub('false', 'False', buffer)\n    buffer = re.sub('none', 'None', buffer)\n    buffer = re.sub('NaN', '\"NaN\"', buffer)\n    return buffer", "docstring": "convert javascript objects like true, none, NaN etc. to\nquoted word.\n\nArguments:\nbuffer: string to be converted\n\nReturns:\nstring after conversion", "source": "juraj-google-style"}
{"code": "def extraterrestrial_horizontal_radiation(self, value=9999.0):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `extraterrestrial_horizontal_radiation`'.format(value))\n            if value < 0.0:\n                raise ValueError(\n                    'value need to be greater or equal 0.0 '\n                    'for field `extraterrestrial_horizontal_radiation`')\n\n        self._extraterrestrial_horizontal_radiation = value", "docstring": "Corresponds to IDD Field `extraterrestrial_horizontal_radiation`\n\nArgs:\nvalue (float): value for IDD Field `extraterrestrial_horizontal_radiation`\nUnit: Wh/m2\nvalue >= 0.0\nMissing value: 9999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def get_elements_iterable(self, make_copy: bool=False) -> Iterable[WindowedValue]:\n    if not self._stacked:\n        elements = cast('List[WindowedValue]', self._elements)\n        if self._committed and (not make_copy):\n            return elements\n        return list(elements)\n\n    def iterable_stacked_or_elements(elements):\n        for e in elements:\n            if isinstance(e, _Bundle._StackedWindowedValues):\n                for w in e.windowed_values():\n                    yield w\n            else:\n                yield e\n    if self._committed and (not make_copy):\n        return iterable_stacked_or_elements(self._elements)\n    return [e for e in iterable_stacked_or_elements(self._elements)]", "docstring": "Returns iterable elements.\n\nArgs:\nmake_copy: whether to force returning copy or yielded iterable.\n\nReturns:\nunstacked elements,\nin the form of iterable if committed and make_copy is not True,\nor as a list of copied WindowedValues.", "source": "github-repos"}
{"code": "def find_labels(model_class):\n    model_name = model_class.__name__\n    framework = infer_framework(model_class)\n    if framework == 'tf':\n        signature = inspect.signature(model_class.call)\n    elif framework == 'pt':\n        signature = inspect.signature(model_class.forward)\n    else:\n        signature = inspect.signature(model_class.__call__)\n    if 'QuestionAnswering' in model_name:\n        return [p for p in signature.parameters if 'label' in p or p in ('start_positions', 'end_positions')]\n    else:\n        return [p for p in signature.parameters if 'label' in p]", "docstring": "Find the labels used by a given model.\n\nArgs:\nmodel_class (`type`): The class of the model.", "source": "github-repos"}
{"code": "def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n    self._validate_kwargs(kwargs)\n    dtype = _assert_float_dtype(dtype)\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    return self._random_generator.truncated_normal(shape, self.mean, self.stddev, dtype)", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor. Only floating point types are\nsupported.\n**kwargs: Additional keyword arguments.\n\nRaises:\nValueError: If the dtype is not floating point", "source": "github-repos"}
{"code": "def get_form_energy(self, entry):\n        \n        c = entry.composition\n        return entry.energy - sum([c[el] * self.el_refs[el].energy_per_atom\n                                   for el in c.elements])", "docstring": "Returns the formation energy for an entry (NOT normalized) from the\nelemental references.\n\nArgs:\nentry: A PDEntry-like object.\n\nReturns:\nFormation energy from the elemental references.", "source": "juraj-google-style"}
{"code": "def files_upload(self, *, file: Union[(str, IOBase)]=None, content: str=None, **kwargs) -> SlackResponse:\n    if ((file is None) and (content is None)):\n        raise e.SlackRequestError('The file or content argument must be specified.')\n    if ((file is not None) and (content is not None)):\n        raise e.SlackRequestError('You cannot specify both the file and the content argument.')\n    if file:\n        return self.api_call('files.upload', files={'file': file}, data=kwargs)\n    elif content:\n        data = kwargs.copy()\n        data.update({'content': content})\n        return self.api_call('files.upload', data=data)", "docstring": "Uploads or creates a file.\n\nArgs:\nfile (str): Supply a file path.\nwhen you'd like to upload a specific file. e.g. 'dramacat.gif'\ncontent (str): Supply content when you'd like to create an\neditable text file containing the specified text. e.g. 'launch plan'\nRaises:\nSlackRequestError: If niether or both the `file` and `content` args are specified.", "source": "codesearchnet"}
{"code": "def _build(self, inputs, is_training=True, dropout_keep_prob=0.5):\n    self._input_shape = tuple(inputs.get_shape().as_list())\n    net = inputs\n    final_index = (self._num_layers - 1)\n    for layer_id in xrange(self._num_layers):\n        net = self._layers[layer_id](net)\n        if ((final_index != layer_id) or self._activate_final):\n            if self._use_dropout:\n                keep_prob = utils.smart_cond(is_training, true_fn=(lambda : dropout_keep_prob), false_fn=(lambda : tf.constant(1.0)))\n                net = tf.nn.dropout(net, keep_prob=keep_prob)\n            net = self._activation(net)\n    return net", "docstring": "Assembles the `MLP` and connects it to the graph.\n\nArgs:\ninputs: A 2D Tensor of size `[batch_size, input_size]`.\nis_training: A bool or tf.Bool Tensor. Indicates whether we are\ncurrently training. Defaults to `True`.\ndropout_keep_prob: The probability that each element is kept when\nboth `use_dropout` and `is_training` are True. Defaults to 0.5.\nReturns:\nA 2D Tensor of size `[batch_size, output_sizes[-1]]`.", "source": "codesearchnet"}
{"code": "def insert(cls, cur, table: str, values: dict):\n        \n        keys = cls._COMMA.join(values.keys())\n        value_place_holder = cls._PLACEHOLDER * len(values)\n        query = cls._insert_string.format(table, keys, value_place_holder[:-1])\n        yield from cur.execute(query, tuple(values.values()))\n        return (yield from cur.fetchone())", "docstring": "Creates an insert statement with only chosen fields\n\nArgs:\ntable: a string indicating the name of the table\nvalues: a dict of fields and values to be inserted\n\nReturns:\nA 'Record' object with table columns as properties", "source": "juraj-google-style"}
{"code": "def _get_corrupted_example(self, x):\n    \n    corruption_type = self.builder_config.corruption_type\n    severity = self.builder_config.severity\n\n    return {\n        'gaussian_noise': corruptions.gaussian_noise,\n        'shot_noise': corruptions.shot_noise,\n        'impulse_noise': corruptions.impulse_noise,\n        'defocus_blur': corruptions.defocus_blur,\n        'frosted_glass_blur': corruptions.frosted_glass_blur,\n        'zoom_blur': corruptions.zoom_blur,\n        'fog': corruptions.fog,\n        'brightness': corruptions.brightness,\n        'contrast': corruptions.contrast,\n        'elastic': corruptions.elastic,\n        'pixelate': corruptions.pixelate,\n        'jpeg_compression': corruptions.jpeg_compression,\n    }[corruption_type](x, severity)", "docstring": "Return corrupted images.\n\nArgs:\nx: numpy array, uncorrupted image.\n\nReturns:\nnumpy array, corrupted images.", "source": "juraj-google-style"}
{"code": "def mme_nodes(mme_base_url, token):\n    \n    nodes = []\n    if not mme_base_url or not token:\n        return nodes\n    url = ''.join([mme_base_url, '/nodes'])\n    nodes = matchmaker_request(url=url, token=token, method='GET')\n    LOG.info('Matchmaker has the following connected nodes:{}'.format(nodes))\n    return nodes", "docstring": "Return the available MatchMaker nodes\n\nArgs:\nmme_base_url(str): base URL of MME service\ntoken(str): MME server authorization token\n\nReturns:\nnodes(list): a list of node disctionaries", "source": "juraj-google-style"}
{"code": "def get_vcs_root():\n    for vcs in (git, hg):\n        repo_root = vcs.repository_root()\n        if repo_root:\n            return (vcs, repo_root)\n    return (None, None)", "docstring": "Returns the vcs module and the root of the repo.\n\nReturns:\nA tuple containing the vcs module to use (git, hg) and the root of the\nrepository. If no repository exisits then (None, None) is returned.", "source": "codesearchnet"}
{"code": "class GraniteMoeSharedMLP(nn.Module):\n\n    def __init__(self, config: GraniteMoeSharedConfig):\n        super(GraniteMoeSharedMLP, self).__init__()\n        self.input_size = config.hidden_size\n        self.hidden_size = config.shared_intermediate_size\n        self.activation = ACT2FN[config.hidden_act]\n        self.input_linear = nn.Linear(self.input_size, self.hidden_size * 2, bias=False)\n        self.output_linear = nn.Linear(self.hidden_size, self.input_size, bias=False)\n\n    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n        hidden_states = self.input_linear(hidden_states)\n        chunked_hidden_states = hidden_states.chunk(2, dim=-1)\n        hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1]\n        hidden_states = self.output_linear(hidden_states)\n        return hidden_states", "docstring": "MLP layer for shared experts\n\nArgs:\nconfig:\nConfiguration object with model hyperparameters.", "source": "github-repos"}
{"code": "def get_system_memory():\n    docker_limit = None\n    memory_limit_filename = '/sys/fs/cgroup/memory/memory.limit_in_bytes'\n    if os.path.exists(memory_limit_filename):\n        with open(memory_limit_filename, 'r') as f:\n            docker_limit = int(f.read())\n    psutil_memory_in_bytes = None\n    try:\n        import psutil\n        psutil_memory_in_bytes = psutil.virtual_memory().total\n    except ImportError:\n        pass\n    if (psutil_memory_in_bytes is not None):\n        memory_in_bytes = psutil_memory_in_bytes\n    elif ((sys.platform == 'linux') or (sys.platform == 'linux2')):\n        bytes_in_kilobyte = 1024\n        memory_in_bytes = (vmstat('total memory') * bytes_in_kilobyte)\n    else:\n        memory_in_bytes = sysctl(['sysctl', 'hw.memsize'])\n    if (docker_limit is not None):\n        return min(docker_limit, memory_in_bytes)\n    else:\n        return memory_in_bytes", "docstring": "Return the total amount of system memory in bytes.\n\nReturns:\nThe total amount of system memory in bytes.", "source": "codesearchnet"}
{"code": "def split(pcoll, regex, outputEmpty=False):\n    regex = Regex._regex_compile(regex)\n    outputEmpty = bool(outputEmpty)\n\n    def _process(element):\n        r = regex.split(element)\n        if r and (not outputEmpty):\n            r = list(filter(None, r))\n        yield r\n    return pcoll | FlatMap(_process)", "docstring": "Returns the list string which was splitted on the basis of regular\nexpression. It will not output empty items (by defaults).\n\nArgs:\nregex: the regular expression string or (re.compile) pattern.\noutputEmpty: (optional) Should empty be output. True to output empties\nand false if not.", "source": "github-repos"}
{"code": "def write_graph(graph_or_graph_def, logdir, name, as_text=True):\n    if isinstance(graph_or_graph_def, ops.Graph):\n        graph_def = graph_or_graph_def.as_graph_def()\n    else:\n        graph_def = graph_or_graph_def\n    if sys.byteorder == 'big':\n        if hasattr(graph_def, 'node'):\n            byte_swap_tensor.swap_tensor_content_in_graph_node(graph_def, 'big', 'little')\n        else:\n            byte_swap_tensor.swap_tensor_content_in_graph_function(graph_def, 'big', 'little')\n    if not logdir.startswith('gs:'):\n        file_io.recursive_create_dir(logdir)\n    path = os.path.join(logdir, name)\n    if as_text:\n        file_io.atomic_write_string_to_file(path, text_format.MessageToString(graph_def, float_format=''))\n    else:\n        file_io.atomic_write_string_to_file(path, graph_def.SerializeToString(deterministic=True))\n    return path", "docstring": "Writes a graph proto to a file.\n\nThe graph is written as a text proto unless `as_text` is `False`.\n\n```python\nv = tf.Variable(0, name='my_variable')\nsess = tf.compat.v1.Session()\ntf.io.write_graph(sess.graph_def, '/tmp/my-model', 'train.pbtxt')\n```\n\nor\n\n```python\nv = tf.Variable(0, name='my_variable')\nsess = tf.compat.v1.Session()\ntf.io.write_graph(sess.graph, '/tmp/my-model', 'train.pbtxt')\n```\n\nArgs:\ngraph_or_graph_def: A `Graph` or a `GraphDef` protocol buffer.\nlogdir: Directory where to write the graph. This can refer to remote\nfilesystems, such as Google Cloud Storage (GCS).\nname: Filename for the graph.\nas_text: If `True`, writes the graph as an ASCII proto.\n\nReturns:\nThe path of the output proto file.", "source": "github-repos"}
{"code": "class FlaxTopPLogitsWarper(FlaxLogitsWarper):\n\n    def __init__(self, top_p: float, filter_value: float=-float('Inf'), min_tokens_to_keep: int=1):\n        if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0):\n            raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}')\n        if not isinstance(min_tokens_to_keep, int) or min_tokens_to_keep < 1:\n            raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}')\n        self.top_p = top_p\n        self.filter_value = filter_value\n        self.min_tokens_to_keep = min_tokens_to_keep\n\n    def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:\n        topk_scores, topk_indices = lax.top_k(scores, scores.shape[-1])\n        mask_scores = jnp.full_like(scores, self.filter_value)\n        cumulative_probs = jax.nn.softmax(topk_scores, axis=-1).cumsum(axis=-1)\n        score_mask = cumulative_probs < self.top_p\n        score_mask = jnp.roll(score_mask, 1)\n        score_mask |= score_mask.at[:, 0].set(True)\n        score_mask = score_mask.at[:, :self.min_tokens_to_keep].set(True)\n        topk_next_scores = jnp.where(score_mask, topk_scores, mask_scores)\n        next_scores = jax.lax.sort_key_val(topk_indices, topk_next_scores)[-1]\n        return next_scores", "docstring": "[`FlaxLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off.\n\nArgs:\ntop_p (`float`):\nIf set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or\nhigher are kept for generation.\nfilter_value (`float`, *optional*, defaults to -inf):\nAll filtered values will be set to this float value.\nmin_tokens_to_keep (`int`, *optional*, defaults to 1):\nMinimum number of tokens that cannot be filtered.", "source": "github-repos"}
{"code": "def cpu_halt_reasons(self):\n    buf_size = self.MAX_NUM_MOES\n    buf = (structs.JLinkMOEInfo * buf_size)()\n    num_reasons = self._dll.JLINKARM_GetMOEs(buf, buf_size)\n    if (num_reasons < 0):\n        raise errors.JLinkException(num_reasons)\n    return list(buf)[:num_reasons]", "docstring": "Retrives the reasons that the CPU was halted.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nA list of ``JLInkMOEInfo`` instances specifying the reasons for which\nthe CPU was halted.  This list may be empty in the case that the CPU\nis not halted.\n\nRaises:\nJLinkException: on hardware error.", "source": "codesearchnet"}
{"code": "def split_image(self, image: np.ndarray, input_data_format: Optional[Union[str, ChannelDimension]]=None):\n    height, width = get_image_size(image, input_data_format)\n    mid_width = width \n    mid_height = height \n    return [self._crop(image, 0, 0, mid_width, mid_height, input_data_format), self._crop(image, mid_width, 0, width, mid_height, input_data_format), self._crop(image, 0, mid_height, mid_width, height, input_data_format), self._crop(image, mid_width, mid_height, width, height, input_data_format), image]", "docstring": "Split an image into 4 equal sub-images, and the concatenate that sequence with the original image.\nThat means that a single image becomes a sequence of 5 images.\nThis is a \"trick\" to spend more compute on each image with no changes in the vision encoder.\n\nArgs:\nimage (`np.ndarray`):\nImages to split.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def _disconnect_from_device(self, uuid, key, client, unsolicited=False):\n    conn_id = self._validate_connection('disconnect', uuid, key)\n    if (conn_id is None):\n        return\n    conn_data = self._connections[uuid]\n    slug = self._build_device_slug(uuid)\n    message = {'client': client, 'type': 'response', 'operation': 'disconnect'}\n    self.client.reset_sequence(self.topics.gateway_topic(slug, 'control/connect'))\n    self.client.reset_sequence(self.topics.gateway_topic(slug, 'control/action'))\n    try:\n        resp = (yield self._manager.disconnect(conn_id))\n    except Exception as exc:\n        self._logger.exception('Error in manager disconnect')\n        resp = {'success': False, 'reason': ('Internal error: %s' % str(exc))}\n    self._manager.remove_monitor(conn_data['report_monitor'])\n    self._manager.remove_monitor(conn_data['trace_monitor'])\n    if resp['success']:\n        del self._connections[uuid]\n        message['success'] = True\n    else:\n        message['success'] = False\n        message['failure_reason'] = resp['reason']\n    self._logger.info('Client %s disconnected from device 0x%X', client, uuid)\n    if (unsolicited and resp['success']):\n        self._publish_response(slug, {'client': client, 'type': 'notification', 'operation': 'disconnect'})\n    elif (not unsolicited):\n        self._publish_response(slug, message)", "docstring": "Disconnect from a device that we have previously connected to.\n\nArgs:\nuuid (int): The unique id of the device\nkey (string): A 64 byte string used to secure this connection\nclient (string): The client id for who is trying to connect\nto the device.\nunsolicited (bool): Whether the client asked us to disconnect or we\nare forcibly doing it.  Forcible disconnections are sent as notifications\ninstead of responses.", "source": "codesearchnet"}
{"code": "def fetches(self):\n    return self._final_fetches", "docstring": "Return the unique names of tensors to fetch.\n\nReturns:\nA list of strings.", "source": "github-repos"}
{"code": "def parse_table_name(name, project_id=None, dataset_id=None):\n    _project_id = _dataset_id = _table_id = _decorator = None\n    if isinstance(name, basestring):\n        m = re.match(_ABS_TABLE_NAME_PATTERN, name, re.IGNORECASE)\n        if (m is not None):\n            (_project_id, _dataset_id, _table_id, _decorator) = m.groups()\n        else:\n            m = re.match(_REL_TABLE_NAME_PATTERN, name)\n            if (m is not None):\n                groups = m.groups()\n                (_project_id, _dataset_id, _table_id, _decorator) = (project_id, groups[0], groups[1], groups[2])\n            else:\n                m = re.match(_TABLE_NAME_PATTERN, name)\n                if (m is not None):\n                    groups = m.groups()\n                    (_project_id, _dataset_id, _table_id, _decorator) = (project_id, dataset_id, groups[0], groups[1])\n    elif isinstance(name, dict):\n        try:\n            _table_id = name['table_id']\n            _dataset_id = name['dataset_id']\n            _project_id = name['project_id']\n        except KeyError:\n            pass\n    elif (len(name) == 4):\n        (_project_id, _dataset_id, _table_id, _decorator) = name\n    elif (len(name) == 3):\n        (_project_id, _dataset_id, _table_id) = name\n    elif (len(name) == 2):\n        (_dataset_id, _table_id) = name\n    if (not _table_id):\n        raise Exception(('Invalid table name: ' + str(name)))\n    if (not _project_id):\n        _project_id = project_id\n    if (not _dataset_id):\n        _dataset_id = dataset_id\n    if (not _decorator):\n        _decorator = ''\n    return TableName(_project_id, _dataset_id, _table_id, _decorator)", "docstring": "Parses a table name into its individual parts.\n\nArgs:\nname: the name to parse, or a tuple, dictionary or array containing the parts.\nproject_id: the expected project ID. If the name does not contain a project ID,\nthis will be used; if the name does contain a project ID and it does not match\nthis, an exception will be thrown.\ndataset_id: the expected dataset ID. If the name does not contain a dataset ID,\nthis will be used; if the name does contain a dataset ID and it does not match\nthis, an exception will be thrown.\nReturns:\nA TableName named tuple consisting of the full name and individual name parts.\nRaises:\nException: raised if the name doesn't match the expected formats, or a project_id and/or\ndataset_id was provided that does not match that in the name.", "source": "codesearchnet"}
{"code": "def read_handler(Model, name=None, **kwds):\n    \n    async def action_handler(service, action_type, payload, props, **kwds):\n        \n        if action_type == get_crud_action('read', name or Model):\n            \n            message_props = {}\n            \n            if 'correlation_id' in props:\n                \n                message_props['correlation_id'] = props['correlation_id']\n\n            try:\n                \n                resolved = service.schema.execute(payload)\n                \n                response = json.dumps({\n                    'data': {key:value for key,value in resolved.data.items()},\n                    'errors': resolved.errors\n                })\n\n                \n                await service.event_broker.send(\n                    payload=response,\n                    action_type=change_action_status(action_type, success_status()),\n                    **message_props\n                )\n\n            \n            except Exception as err:\n                \n                await service.event_broker.send(\n                    payload=str(err),\n                    action_type=change_action_status(action_type, error_status()),\n                    **message_props\n                )\n\n\n    \n    return action_handler", "docstring": "This factory returns an action handler that responds to read requests\nby resolving the payload as a graphql query against the internal schema.\n\n\nArgs:\nModel (nautilus.BaseModel): The model to delete when the action\nreceived.\n\nReturns:\nfunction(type, payload): The action handler for this model", "source": "juraj-google-style"}
{"code": "def SetValue(self, identifier, value):\n    if (not isinstance(identifier, py2to3.STRING_TYPES)):\n        raise TypeError('Identifier not a string type.')\n    identifier = identifier.lower()\n    self._values[identifier] = value", "docstring": "Sets a value by identifier.\n\nArgs:\nidentifier (str): case insensitive unique identifier for the value.\nvalue (object): value.\n\nRaises:\nTypeError: if the identifier is not a string type.", "source": "codesearchnet"}
{"code": "def find_equivalent_sites(self, site):\n    for sites in self.equivalent_sites:\n        if (site in sites):\n            return sites\n    raise ValueError('Site not in structure')", "docstring": "Finds all symmetrically equivalent sites for a particular site\n\nArgs:\nsite (PeriodicSite): A site in the structure\n\nReturns:\n([PeriodicSite]): List of all symmetrically equivalent sites.", "source": "codesearchnet"}
{"code": "def substitute(expr, var_map):\n    \n    try:\n        if isinstance(expr, SympyBasic):\n            sympy_var_map = {\n                k: v for (k, v) in var_map.items()\n                if isinstance(k, SympyBasic)}\n            return expr.subs(sympy_var_map)\n        else:\n            return expr.substitute(var_map)\n    except AttributeError:\n        if expr in var_map:\n            return var_map[expr]\n        return expr", "docstring": "Substitute symbols or (sub-)expressions with the given replacements and\nre-evalute the result\n\nArgs:\nexpr: The expression in which to perform the substitution\nvar_map (dict): The substitution dictionary.", "source": "juraj-google-style"}
{"code": "def get_concepts_to_recalculate(self, users, lang, concepts=None):\n    only_one_user = False\n    if (not isinstance(users, list)):\n        only_one_user = True\n        users = [users]\n    mapping = self.get_item_concept_mapping(lang)\n    current_user_stats = defaultdict((lambda : {}))\n    user_stats_qs = UserStat.objects.filter(user__in=users, stat='answer_count')\n    if (concepts is not None):\n        user_stats_qs = user_stats_qs.filter(concept__in=concepts)\n    for user_stat in user_stats_qs:\n        current_user_stats[user_stat.user_id][user_stat.concept_id] = user_stat\n    concepts_to_recalculate = defaultdict((lambda : set()))\n    for (user, item, time) in Answer.objects.filter(user__in=users).values_list('user_id', 'item').annotate(Max('time')):\n        if (item not in mapping):\n            continue\n        time_expiration_lower_bound = get_config('proso_models', 'knowledge_overview.time_shift_hours', default=4)\n        time_expiration_factor = get_config('proso_models', 'knowledge_overview.time_expiration_factor', default=2)\n        for concept in mapping[item]:\n            if ((user in current_user_stats) and (concept in current_user_stats[user]) and (current_user_stats[user][concept].time > time)):\n                if (not self.has_time_expired(current_user_stats[user][concept].time, time, time_expiration_lower_bound, time_expiration_factor)):\n                    continue\n            if ((concepts is None) or (concept in ([c.pk for c in concepts] if (type(concepts[0]) == Concept) else Concept))):\n                concepts_to_recalculate[user].add(concept)\n    if only_one_user:\n        return concepts_to_recalculate[users[0]]\n    return concepts_to_recalculate", "docstring": "Get concept which have same changes and have to be recalculated\n\nArgs:\nusers (list of users or user): users whose user stats we are interesting in\nlang (str): language of used concepts\nconcepts (Optional[list of concepts]): list of primary keys of concepts or concepts\nDefaults to None meaning all concepts.\n\nReturns:\ndict: user -> set of concepts (int) - in case of list of users\nlist of stats (str) - in case of one user", "source": "codesearchnet"}
{"code": "def _or_join(self, terms):\n        \n\n        if isinstance(terms, (tuple, list)):\n            if len(terms) > 1:\n                return '(' + ' OR '.join(terms) + ')'\n            else:\n                return terms[0]\n        else:\n            return terms", "docstring": "Joins terms using OR operator.\n\nArgs:\nterms (list): terms to join\n\nExamples:\nself._or_join(['term1', 'term2']) -> 'term1 OR term2'\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def wait_for_jobs(jobs):\n    all_running = False\n    while (not all_running):\n        all_running = True\n        time.sleep(5)\n        for job in jobs:\n            job.refresh()\n            scheduled = getattr(job, 'scheduled_at', None)\n            if (scheduled is not None):\n                logger.info(('Waiting for %s on %s [%s]' % (job.uid, job.site, _date2h(scheduled))))\n            all_running = (all_running and (job.state == 'running'))\n            if (job.state == 'error'):\n                raise Exception(('The job %s is in error state' % job))\n    logger.info('All jobs are Running !')", "docstring": "Waits for all the jobs to be runnning.\n\nArgs:\njobs(list): list of the python-grid5000 jobs to wait for\n\n\nRaises:\nException: if one of the job gets in error state.", "source": "codesearchnet"}
{"code": "def get_pluggable_module_information(self, id_or_uri):\n        \n        uri = self._client.build_uri(id_or_uri) + \"/pluggableModuleInformation\"\n        return self._client.get(uri)", "docstring": "Gets all the pluggable module information.\n\nArgs:\nid_or_uri: Can be either the interconnect id or uri.\n\nReturns:\narray: dicts of the pluggable module information.", "source": "juraj-google-style"}
{"code": "def _batch_prepare_for_model_boxes(self, batch_text_or_text_pairs, is_pair: Optional[bool]=None, boxes: Optional[List[List[int]]]=None, word_labels: Optional[List[List[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:\n    batch_outputs = {}\n    for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):\n        batch_text_or_text_pair, boxes_example = example\n        outputs = self.prepare_for_model_boxes(batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose)\n        for key, value in outputs.items():\n            if key not in batch_outputs:\n                batch_outputs[key] = []\n            batch_outputs[key].append(value)\n    batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)\n    batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)\n    return batch_outputs", "docstring": "Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It\nadds special tokens, truncates sequences if overflowing while taking into account the special tokens and\nmanages a moving window (with user defined stride) for overflowing tokens\n\nArgs:\nbatch_ids_pairs: list of tokenized input ids or input ids pairs", "source": "github-repos"}
{"code": "def iter_package_families(paths=None):\n    \n    for path in (paths or config.packages_path):\n        repo = package_repository_manager.get_repository(path)\n        for resource in repo.iter_package_families():\n            yield PackageFamily(resource)", "docstring": "Iterate over package families, in no particular order.\n\nNote that multiple package families with the same name can be returned.\nUnlike packages, families later in the searchpath are not hidden by earlier\nfamilies.\n\nArgs:\npaths (list of str, optional): paths to search for package families,\ndefaults to `config.packages_path`.\n\nReturns:\n`PackageFamily` iterator.", "source": "juraj-google-style"}
{"code": "def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, token_type_ids=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor:\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n    pooled_output = text_outputs[1]\n    text_features = self.text_projection(pooled_output)\n    return text_features", "docstring": "Returns:\ntext_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by\napplying the projection layer to the pooled output of [`AltCLIPTextModel`].\n\nExamples:\n\n```python\n>>> from transformers import AutoProcessor, AltCLIPModel\n\n>>> model = AltCLIPModel.from_pretrained(\"BAAI/AltCLIP\")\n>>> processor = AutoProcessor.from_pretrained(\"BAAI/AltCLIP\")\n>>> inputs = processor(text=[\"a photo of a cat\", \"a photo of a dog\"], padding=True, return_tensors=\"pt\")\n>>> text_features = model.get_text_features(**inputs)\n```", "source": "github-repos"}
{"code": "def ChangePassword(self, password_old, password_new):\n        \n        if not self.ValidatePassword(password_old):\n            return False\n\n        if isinstance(password_new, str):\n            password_new = password_new.encode('utf-8')\n\n        password_key = hashlib.sha256(password_new)\n        self.SaveStoredData(\"PasswordHash\", password_key)\n        self.SaveStoredData(\"MasterKey\", AES.new(self._master_key, AES.MODE_CBC, self._iv))\n\n        return True", "docstring": "Change the password used to protect the private key.\n\nArgs:\npassword_old (str): the current password used to encrypt the private key.\npassword_new (str): the new to be used password to encrypt the private key.\n\nReturns:\nbool: whether the password has been changed", "source": "juraj-google-style"}
{"code": "def update_location_centroid(point, cluster, max_distance, min_samples):\n    cluster.append(point)\n    points = [p.gen2arr() for p in cluster]\n    eps = estimate_meters_to_deg(max_distance, precision=6)\n    p_cluster = DBSCAN(eps=eps, min_samples=min_samples)\n    p_cluster.fit(points)\n    clusters = {}\n    for (i, label) in enumerate(p_cluster.labels_):\n        if (label in clusters.keys()):\n            clusters[label].append(points[i])\n        else:\n            clusters[label] = [points[i]]\n    centroids = []\n    biggest_centroid_l = (- float('inf'))\n    biggest_centroid = None\n    for (label, n_cluster) in clusters.items():\n        centroid = compute_centroid(n_cluster)\n        centroids.append(centroid)\n        if ((label >= 0) and (len(n_cluster) >= biggest_centroid_l)):\n            biggest_centroid_l = len(n_cluster)\n            biggest_centroid = centroid\n    if (biggest_centroid is None):\n        biggest_centroid = compute_centroid(points)\n    return (biggest_centroid, cluster)", "docstring": "Updates the centroid of a location cluster with another point\n\nArgs:\npoint (:obj:`Point`): Point to add to the cluster\ncluster (:obj:`list` of :obj:`Point`): Location cluster\nmax_distance (float): Max neighbour distance\nmin_samples (int): Minimum number of samples\nReturns:\n(:obj:`Point`, :obj:`list` of :obj:`Point`): Tuple with the location centroid\nand new point cluster (given cluster + given point)", "source": "codesearchnet"}
{"code": "def write_file(self, filename):\n        \n        with open(filename, \"w\") as f:\n            f.write(self.__str__())", "docstring": "Write the PWSCF input file.\n\nArgs:\nfilename (str): The string filename to output to.", "source": "juraj-google-style"}
{"code": "def is_compatible_with(self, spec_or_value):\n    if not isinstance(spec_or_value, TypeSpec):\n        spec_or_value = type_spec_from_value(spec_or_value)\n    if type(self) is not type(spec_or_value):\n        return False\n    return self.__is_compatible(self._serialize(), spec_or_value._serialize())", "docstring": "Returns true if `spec_or_value` is compatible with this TypeSpec.\n\nPrefer using \"is_subtype_of\" and \"most_specific_common_supertype\" wherever\npossible.\n\nArgs:\nspec_or_value: A TypeSpec or TypeSpec associated value to compare against.", "source": "github-repos"}
{"code": "def get_config(self):\n    raise NotImplementedError(str(self) + ' does not implement get_config()')", "docstring": "Returns the config of the regularizer.\n\nAn regularizer config is a Python dictionary (serializable)\ncontaining all configuration parameters of the regularizer.\nThe same regularizer can be reinstantiated later\n(without any saved state) from this configuration.\n\nThis method is optional if you are just training and executing models,\nexporting to and from SavedModels, or using weight checkpoints.\n\nThis method is required for saving and loading models to HDF5 formats,\nKeras model cloning, some visualization utilities,\nand exporting models to and from JSON.\n\nReturns:\nPython dictionary.", "source": "github-repos"}
{"code": "def ws_db004(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `ws_db004`'.format(value))\n\n        self._ws_db004 = value", "docstring": "Corresponds to IDD Field `ws_db004`\nMean wind speed coincident with 0.4% dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `ws_db004`\nUnit: m/s\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def _DetectStaticBatchSize(node_def):\n    shapes = node_def.attr['_output_shapes'].list.shape\n    batch_size = set((list(s.dim)[0].size if len(s.dim) >= 2 else None for s in shapes))\n    if len(batch_size) == 1 and list(batch_size)[0] >= 1:\n        return list(batch_size)[0]\n    return None", "docstring": "Returns the static batch size of an operation or None.\n\nIt is incorrect to use the output shapes to find the batch size of an\noperation, as the segmenter actually uses the input shapes. However, it is\na simplification and works for most of the cases for the test purposes.\n\nArgs:\nnode_def: `tf.NodeDef`. The target node for analysis.\n\nReturns:\nIf all the outputs of the node have the same static batch size, returns\nthe int value for the batch size. Otherwise returns None.", "source": "github-repos"}
{"code": "def convert(data, in_format, out_format, name=None, pretty=False):\n    dumps = (json.dumps if pretty else json.compress)\n    if ((not has_ob) and (in_format == 'json') and (out_format == 'json')):\n        return dumps((json.loads(data) if is_string(data) else data))\n    elif (not has_ob):\n        raise ImportError('Chemical file format conversion requires pybel.')\n    if (in_format == 'json'):\n        mol = json_to_pybel((json.loads(data) if is_string(data) else data))\n    elif (in_format == 'pybel'):\n        mol = data\n    else:\n        mol = pybel.readstring(in_format, data)\n    if (not mol.OBMol.HasNonZeroCoords()):\n        mol.make3D()\n    if ((in_format == 'mmcif') and hasattr(mol, 'unitcell')):\n        mol.unitcell.FillUnitCell(mol.OBMol)\n        mol.OBMol.ConnectTheDots()\n        mol.OBMol.PerceiveBondOrders()\n    mol.OBMol.Center()\n    if (out_format == 'pybel'):\n        return mol\n    elif (out_format == 'object'):\n        return pybel_to_json(mol, name)\n    elif (out_format == 'json'):\n        return dumps(pybel_to_json(mol, name))\n    else:\n        return mol.write(out_format)", "docstring": "Converts between two inputted chemical formats.\n\nArgs:\ndata: A string representing the chemical file to be converted. If the\n`in_format` is \"json\", this can also be a Python object\nin_format: The format of the `data` string. Can be \"json\" or any format\nrecognized by Open Babel\nout_format: The format to convert to. Can be \"json\" or any format\nrecognized by Open Babel\nname: (Optional) If `out_format` is \"json\", will save the specified\nvalue in a \"name\" property\npretty: (Optional) If True and `out_format` is \"json\", will pretty-\nprint the output for human readability\nReturns:\nA string representing the inputted `data` in the specified `out_format`", "source": "codesearchnet"}
{"code": "def airborne_position(msg0, msg1, t0, t1):\n    mb0 = common.hex2bin(msg0)[32:]\n    mb1 = common.hex2bin(msg1)[32:]\n    cprlat_even = (common.bin2int(mb0[22:39]) / 131072.0)\n    cprlon_even = (common.bin2int(mb0[39:56]) / 131072.0)\n    cprlat_odd = (common.bin2int(mb1[22:39]) / 131072.0)\n    cprlon_odd = (common.bin2int(mb1[39:56]) / 131072.0)\n    air_d_lat_even = (360.0 / 60)\n    air_d_lat_odd = (360.0 / 59)\n    j = common.floor((((59 * cprlat_even) - (60 * cprlat_odd)) + 0.5))\n    lat_even = float((air_d_lat_even * ((j % 60) + cprlat_even)))\n    lat_odd = float((air_d_lat_odd * ((j % 59) + cprlat_odd)))\n    if (lat_even >= 270):\n        lat_even = (lat_even - 360)\n    if (lat_odd >= 270):\n        lat_odd = (lat_odd - 360)\n    if (common.cprNL(lat_even) != common.cprNL(lat_odd)):\n        return None\n    if (t0 > t1):\n        lat = lat_even\n        nl = common.cprNL(lat)\n        ni = max((common.cprNL(lat) - 0), 1)\n        m = common.floor((((cprlon_even * (nl - 1)) - (cprlon_odd * nl)) + 0.5))\n        lon = ((360.0 / ni) * ((m % ni) + cprlon_even))\n    else:\n        lat = lat_odd\n        nl = common.cprNL(lat)\n        ni = max((common.cprNL(lat) - 1), 1)\n        m = common.floor((((cprlon_even * (nl - 1)) - (cprlon_odd * nl)) + 0.5))\n        lon = ((360.0 / ni) * ((m % ni) + cprlon_odd))\n    if (lon > 180):\n        lon = (lon - 360)\n    return (round(lat, 5), round(lon, 5))", "docstring": "Decode airborn position from a pair of even and odd position message\n\nArgs:\nmsg0 (string): even message (28 bytes hexadecimal string)\nmsg1 (string): odd message (28 bytes hexadecimal string)\nt0 (int): timestamps for the even message\nt1 (int): timestamps for the odd message\n\nReturns:\n(float, float): (latitude, longitude) of the aircraft", "source": "codesearchnet"}
{"code": "def getitem_row_array(self, key):\n    key = list(key)\n\n    def getitem(df, internal_indices=[]):\n        return df.iloc[internal_indices]\n    result = self.data.apply_func_to_select_indices(1, getitem, key, keep_remaining=False)\n    new_index = self.index[key]\n    return self.__constructor__(result, new_index, self.columns, self._dtype_cache)", "docstring": "Get row data for target labels.\n\nArgs:\nkey: Target numeric indices by which to retrieve data.\n\nReturns:\nA new QueryCompiler.", "source": "codesearchnet"}
{"code": "def datetimeobj_epoch(value):\n    \n    return datetime.datetime.utcfromtimestamp(int(value)).replace(tzinfo=TZ_GMT)", "docstring": "Convert timestamp string to a datetime object.\n\nTimestamps strings like '1383470155' are able to be converted by this\nfunction.\n\nArgs:\nvalue: A timestamp string as seconds since epoch.\n\nReturns:\nA datetime object.\n\nRaises:\nValueError: If timestamp is invalid.", "source": "juraj-google-style"}
{"code": "def get_sns_subscriptions(app_name, env, region):\n    session = boto3.Session(profile_name=env, region_name=region)\n    sns_client = session.client('sns')\n    lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region)\n    lambda_subscriptions = []\n    subscriptions = sns_client.list_subscriptions()\n    for subscription in subscriptions['Subscriptions']:\n        if ((subscription['Protocol'] == 'lambda') and (subscription['Endpoint'] == lambda_alias_arn)):\n            lambda_subscriptions.append(subscription['SubscriptionArn'])\n    if (not lambda_subscriptions):\n        LOG.debug('SNS subscription for function %s not found', lambda_alias_arn)\n    return lambda_subscriptions", "docstring": "List SNS lambda subscriptions.\n\nReturns:\nlist: List of Lambda subscribed SNS ARNs.", "source": "codesearchnet"}
{"code": "def _instantiate(class_, type_, __value, *args, **kwargs):\n    try:\n        return class_(__value, *args, **kwargs)\n    except TypeError:\n        try:\n            return type_(__value, *args, **kwargs)\n        except Exception:\n            return __value", "docstring": "Instantiate the object if possible.\n\nArgs:\nclass_: The class to instantiate.\ntype_: The the class is uninstantiable, attempt to cast to a base\ntype.\n__value: The value to return if the class and type are\nuninstantiable.\n*args: The positional arguments to pass to the class.\n**kwargs: The keyword arguments to pass to the class.\n\nReturns:\nThe class or base type instantiated using the arguments. If it is\nnot possible to instantiate either, returns __value.", "source": "codesearchnet"}
{"code": "def is_tensor_final(self, tensor_name):\n    tensor = self._name_to_tensor(tensor_name)\n    return (tensor in self._final_tensors)", "docstring": "Whether a tensor is a final output of the computation.\n\nArgs:\ntensor_name: a string, name of a tensor in the graph.\n\nReturns:\na boolean indicating whether the tensor was a final output.", "source": "codesearchnet"}
{"code": "def parse_conf(self, keys=[]):\n        \n        confs = self.app.config.get('WAFFLE_CONFS', {})\n        if not keys:\n            keys = confs.keys()\n\n        result = {}\n\n        for key in keys:\n            \n            if key.startswith('WAFFLE_'):\n                continue\n\n            \n            if key not in confs.keys():\n                continue\n\n            stored_conf = self.configstore.get(key)\n\n            if not stored_conf:\n                \n                value = confs[key].get('default', '')\n                stored_conf = self.configstore.put(key, util.serialize(value))\n                self.configstore.commit()\n\n            else:\n                \n                value = util.deserialize(stored_conf.get_value())\n\n            result[stored_conf.get_key()] = value\n\n        return result", "docstring": "Parse configuration values from the database.\n\nThe extension must have been previously initialized.\n\nIf a key is not found in the database, it will be created with the\ndefault value specified.\n\nArguments:\nkeys (list[str]): list of keys to parse. If the list is empty, then\nall the keys known to the application will be used.\n\nReturns:\ndict of the parsed config values.", "source": "juraj-google-style"}
{"code": "def set_features(self, partition=1):\n        \n        if len(self.json) < partition + 1:\n            raise ValueError('Not enough dates for the specified partition size: {0}.  Try a smaller partition.'.format(partition))\n\n        data = []\n        for offset in range(len(self.json) - partition):\n            json = self.json[offset : offset + partition]\n            data.append(eval_features(json))\n        return pd.DataFrame(data=data, dtype=np.float32)", "docstring": "Parses market data JSON for technical analysis indicators\n\nArgs:\npartition: Int of how many dates to take into consideration\nwhen evaluating technical analysis indicators.\n\nReturns:\nPandas DataFrame instance with columns as numpy.float32 features.", "source": "juraj-google-style"}
{"code": "def call(self, decision_points: List[pg.geno.DecisionPoint], global_state: Optional[pg.geno.AttributeDict]=None, step: int=0) -> List[pg.geno.DecisionPoint]:", "docstring": "Implementation of filtering logic. Subclass to override.\n\nArgs:\ndecision_points: A list of decision points as candidates for filtering.\nglobal_state: An optional keyword argument as the global state.\nstep: An optional keyword argument as current step of evolution.\n\nReturns:\nA list of decision points that should be kept.", "source": "github-repos"}
{"code": "def normalize(input_tensor, output_tensor):\n    image_dims = utils.get_img_shape(input_tensor)[1:]\n    return (output_tensor / np.prod(image_dims))", "docstring": "Normalizes the `output_tensor` with respect to `input_tensor` dimensions.\nThis makes regularizer weight factor more or less uniform across various input image dimensions.\n\nArgs:\ninput_tensor: An tensor of shape: `(samples, channels, image_dims...)` if `image_data_format=\nchannels_first` or `(samples, image_dims..., channels)` if `image_data_format=channels_last`.\noutput_tensor: The tensor to normalize.\n\nReturns:\nThe normalized tensor.", "source": "codesearchnet"}
{"code": "def ef_plugin(service_name):\n  \n  def class_rebuilder(cls):\n\n    class EFPlugin(cls):\n      \n\n      def __init__(self, context, clients):\n        self.service = service_name\n        self.context = context\n        self.clients = clients\n        self.oInstance = cls()\n\n      def __getattribute__(self, s):\n        \n        try:\n          x = super(EFPlugin, self).__getattribute__(s)\n        except AttributeError:\n          pass\n        else:\n          return x\n        return self.oInstance.__getattribute__(s)\n\n    return EFPlugin\n\n  return class_rebuilder", "docstring": "Decorator for ef plugin classes. Any wrapped classes should contain a run() method which executes the plugin code.\n\nArgs:\nservice_name (str): The name of the service being extended.\n\nExample:\n@ef_plugin('ef-generate')\nclass NewRelicPlugin(object):\n\ndef run(self):\nexec_code()", "source": "juraj-google-style"}
{"code": "def wc(filename, contents, parsed=None, is_jekyll=False):\n    if is_jekyll:\n        fmt = 'jekyll'\n    else:\n        fmt = 'md/txt'\n    body = (parsed.strip() if parsed else contents.strip())\n    words = re.sub('\\\\s+', ' ', body, re.MULTILINE)\n    for punctuation in INTERSTITIAL_PUNCTUATION:\n        words = re.sub(punctuation, ' ', words)\n    punct = re.compile('[^\\\\w\\\\s]', re.U)\n    words = punct.sub('', words)\n    real_characters = re.sub('\\\\s', '', words)\n    paragraphs = [(1 if (len(x) == 0) else 0) for x in contents.strip().splitlines()]\n    for (index, paragraph) in enumerate(paragraphs):\n        if ((paragraph == 1) and (paragraphs[(index + 1)] == 1)):\n            paragraphs[index] = 0\n    return {'counts': {'file': filename, 'type': fmt, 'paragraphs': (sum(paragraphs) + 1), 'words': len(re.split('\\\\s+', words)), 'characters_real': len(real_characters), 'characters_total': len(words)}}", "docstring": "Count the words, characters, and paragraphs in a string.\n\nArgs:\ncontents: the original string to count\nfilename (optional): the filename as provided to the CLI\nparsed (optional): a parsed string, expected to be plaintext only\nis_jekyll: whether the original contents were from a Jekyll file\n\nReturns:\nAn object containing the various counts", "source": "codesearchnet"}
{"code": "def fn(x: int, y: int):\n    pass", "docstring": "Test function\n\nArgs:\nx: The first input\n\ny: The second input. This is a longer description\nthat spans multiple lines with indentation and stuff.\n\nReturns:\nGod knows what", "source": "github-repos"}
{"code": "def cancel(self):\n    if (not self.id):\n        raise WorkflowError('Workflow is not running.  Cannot cancel.')\n    if self.batch_values:\n        self.workflow.batch_workflow_cancel(self.id)\n    else:\n        self.workflow.cancel(self.id)", "docstring": "Cancel a running workflow.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def filter_lines(lines, filter_regex, groups=None):\n    pattern = re.compile(filter_regex)\n    for line in lines:\n        match = pattern.search(line)\n        if match:\n            if (groups is None):\n                (yield line)\n            elif (len(groups) == 1):\n                (yield match.group(groups[0]))\n            else:\n                matched_groups = match.groupdict()\n                (yield tuple((matched_groups.get(group) for group in groups)))", "docstring": "Filters out the lines not matching the pattern.\n\nArgs:\nlines: list[string]: lines to filter.\npattern: string: regular expression to filter out lines.\n\nReturns: list[string]: the list of filtered lines.", "source": "codesearchnet"}
{"code": "async def report_winner(self, winner: Participant, scores_csv: str):\n    (await self._report(scores_csv, winner._id))", "docstring": "report scores and give a winner\n\n|methcoro|\n\nArgs:\nwinner: :class:Participant instance\nscores_csv: Comma separated set/game scores with player 1 score first (e.g. \"1-3,3-0,3-2\")\n\nRaises:\nValueError: scores_csv has a wrong format\nAPIException", "source": "codesearchnet"}
{"code": "def load_op_library(library_filename):\n    lib_handle = py_tf.TF_LoadLibrary(library_filename)\n    try:\n        wrappers = _pywrap_python_op_gen.GetPythonWrappers(py_tf.TF_GetOpList(lib_handle))\n    finally:\n        py_tf.TF_DeleteLibraryHandle(lib_handle)\n    module_name = hashlib.sha1(wrappers).hexdigest()\n    if module_name in sys.modules:\n        return sys.modules[module_name]\n    module_spec = importlib.machinery.ModuleSpec(module_name, None)\n    module = importlib.util.module_from_spec(module_spec)\n    exec(wrappers, module.__dict__)\n    setattr(module, '_IS_TENSORFLOW_PLUGIN', True)\n    sys.modules[module_name] = module\n    return module", "docstring": "Loads a TensorFlow plugin, containing custom ops and kernels.\n\nPass \"library_filename\" to a platform-specific mechanism for dynamically\nloading a library. The rules for determining the exact location of the\nlibrary are platform-specific and are not documented here. When the\nlibrary is loaded, ops and kernels registered in the library via the\n`REGISTER_*` macros are made available in the TensorFlow process. Note\nthat ops with the same name as an existing op are rejected and not\nregistered with the process.\n\nArgs:\nlibrary_filename: Path to the plugin.\nRelative or absolute filesystem path to a dynamic library file.\n\nReturns:\nA python module containing the Python wrappers for Ops defined in\nthe plugin.\n\nRaises:\nRuntimeError: when unable to load the library or get the python wrappers.", "source": "github-repos"}
{"code": "def assert_files_same(path1, path2):\n    difflines = compare_files(path1, path2)\n    assert (len(difflines) == 0), ''.join((['\\n'] + difflines))", "docstring": "Asserts that two files are the same and returns delta using\n-, ?, + format if not\n\nArgs:\npath1 (str): Path to first file\npath2 (str): Path to second file\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def languages(self, **kwargs):\n        \n        path = '/projects/%s/languages' % self.get_id()\n        return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "Get languages used in the project with percentage value.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the server failed to perform the request", "source": "juraj-google-style"}
{"code": "def get_kerberos_ticket(username, password):\n        \n\n        cache = \"/tmp/ion-%s\" % uuid.uuid4()\n\n        logger.debug(\"Setting KRB5CCNAME to 'FILE:{}'\".format(cache))\n        os.environ[\"KRB5CCNAME\"] = \"FILE:\" + cache\n\n        try:\n            realm = settings.CSL_REALM\n            kinit = pexpect.spawnu(\"/usr/bin/kinit {}@{}\".format(username, realm), timeout=settings.KINIT_TIMEOUT)\n            kinit.expect(\":\")\n            kinit.sendline(password)\n            returned = kinit.expect([pexpect.EOF, \"password:\"])\n            if returned == 1:\n                logger.debug(\"Password for {}@{} expired, needs reset\".format(username, realm))\n                return \"reset\"\n            kinit.close()\n            exitstatus = kinit.exitstatus\n        except pexpect.TIMEOUT:\n            KerberosAuthenticationBackend.kinit_timeout_handle(username, realm)\n            exitstatus = 1\n\n        if exitstatus != 0:\n            try:\n                realm = settings.AD_REALM\n                kinit = pexpect.spawnu(\"/usr/bin/kinit {}@{}\".format(username, realm), timeout=settings.KINIT_TIMEOUT)\n                kinit.expect(\":\")\n                kinit.sendline(password)\n                returned = kinit.expect([pexpect.EOF, \"password:\"])\n                if returned == 1:\n                    return False\n                kinit.close()\n                exitstatus = kinit.exitstatus\n            except pexpect.TIMEOUT:\n                KerberosAuthenticationBackend.kinit_timeout_handle(username, realm)\n                exitstatus = 1\n\n        if \"KRB5CCNAME\" in os.environ:\n            subprocess.check_call(['kdestroy', '-c', os.environ[\"KRB5CCNAME\"]])\n            del os.environ[\"KRB5CCNAME\"]\n\n        if exitstatus == 0:\n            logger.debug(\"Kerberos authorized {}@{}\".format(username, realm))\n            return True\n        else:\n            logger.debug(\"Kerberos failed to authorize {}\".format(username))\n            return False", "docstring": "Attempts to create a Kerberos ticket for a user.\n\nArgs:\nusername\nThe username.\npassword\nThe password.\n\nReturns:\nBoolean indicating success or failure of ticket creation", "source": "juraj-google-style"}
{"code": "def cancel_signature_request(self, signature_request_id):\n    request = self._get_request()\n    request.post(url=(self.SIGNATURE_REQUEST_CANCEL_URL + signature_request_id), get_json=False)", "docstring": "Cancels a SignatureRequest\n\nCancels a SignatureRequest. After canceling, no one will be able to sign\nor access the SignatureRequest or its documents. Only the requester can\ncancel and only before everyone has signed.\n\nArgs:\n\nsigning_request_id (str): The id of the signature request to cancel\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _PrepareAttributeContainer(self, attribute_container):\n    attribute_values_hash = hash(attribute_container.GetAttributeValuesString())\n    identifier = identifiers.FakeIdentifier(attribute_values_hash)\n    attribute_container.SetIdentifier(identifier)\n    return copy.deepcopy(attribute_container)", "docstring": "Prepares an attribute container for storage.\n\nArgs:\nattribute_container (AttributeContainer): attribute container.\n\nReturns:\nAttributeContainer: copy of the attribute container to store in\nthe fake storage.", "source": "codesearchnet"}
{"code": "def window_partition(hidden_state, window_size):\n    batch_size, height, width, num_channels = hidden_state.shape\n    pad_height = (window_size - height % window_size) % window_size\n    pad_width = (window_size - width % window_size) % window_size\n    hidden_state = nn.functional.pad(hidden_state, (0, 0, 0, pad_width, 0, pad_height))\n    padded_height, padded_width = (height + pad_height, width + pad_width)\n    hidden_state = hidden_state.view(batch_size, padded_height \n    windows = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)\n    return (windows, (padded_height, padded_width))", "docstring": "Partition into non-overlapping windows with padding if needed.\n\nArgs:\nhidden_state (`torch.Tensor`):\nInput tokens with [batch_size, height, width, num_channels].\nwindow_size (`int`):\nWindow size.\n\nReturns:\n`tuple(torch.FloatTensor)` comprising various elements:\n- windows: windows after partition with [batch_size * num_windows, window_size, window_size, num_channels].\n- (padded_height, padded_width): padded height and width before partition", "source": "github-repos"}
{"code": "def escape_meta(self, string, pos):\n\n\t\t\n\n\t\t\n\t\tif pos > 0 and string[pos - 1] == \"\\\\\":\n\t\t\tstring = string[:pos - 1] + string[pos:]\n\t\telse:\n\t\t\twarnings.warn(\"Un-escaped meta-character: '{0}' (Escape\"\n\t\t\t\t\t\t  \" it with a '\\\\')\".format(string[pos]),\n\t\t\t\t\t\t  Warning)\n\t\t\tpos += 1\n\n\t\tmeta = self.meta.search(string, pos)\n\n\t\treturn string, meta", "docstring": "Checks if a meta character is escaped or else warns about it.\n\nIf the meta character has an escape character ('\\') preceding it,\nthe meta character is escaped. If it does not, a warning is emitted\nthat the user should escape it.\n\nArguments:\nstring (str): The relevant string in which the character was found.\npos (int): The index of the meta character within the string.\n\nReturns:\nThe possibly escaped string and the next meta match.", "source": "juraj-google-style"}
{"code": "def save_state(self, out_path):\n    state = self.dump_state()\n    state = _clean_intenum(state)\n    with open(out_path, 'w') as outfile:\n        json.dump(state, outfile, indent=4)", "docstring": "Save the current state of this emulated object to a file.\n\nArgs:\nout_path (str): The path to save the dumped state of this emulated\nobject.", "source": "codesearchnet"}
{"code": "def build_java_worker_command(java_worker_options, redis_address, plasma_store_name, raylet_name, redis_password, temp_dir):\n    assert (java_worker_options is not None)\n    command = 'java '.format(java_worker_options)\n    if (redis_address is not None):\n        command += '-Dray.redis.address={} '.format(redis_address)\n    if (plasma_store_name is not None):\n        command += '-Dray.object-store.socket-name={} '.format(plasma_store_name)\n    if (raylet_name is not None):\n        command += '-Dray.raylet.socket-name={} '.format(raylet_name)\n    if (redis_password is not None):\n        command += '-Dray.redis.password={} '.format(redis_password)\n    command += '-Dray.home={} '.format(RAY_HOME)\n    command += '-Dray.log-dir={} '.format(os.path.join(temp_dir, 'sockets'))\n    if java_worker_options:\n        command += (java_worker_options + ' ')\n    command += 'org.ray.runtime.runner.worker.DefaultWorker'\n    return command", "docstring": "This method assembles the command used to start a Java worker.\n\nArgs:\njava_worker_options (str): The command options for Java worker.\nredis_address (str): Redis address of GCS.\nplasma_store_name (str): The name of the plasma store socket to connect\nto.\nraylet_name (str): The name of the raylet socket to create.\nredis_password (str): The password of connect to redis.\ntemp_dir (str): The path of the temporary directory Ray will use.\nReturns:\nThe command string for starting Java worker.", "source": "codesearchnet"}
{"code": "def completion(self, device, folder):\n        \n        return self.get(\n            'completion',\n            params={'folder': folder, 'device': device}\n        ).get('completion', None)", "docstring": "Returns the completion percentage (0 to 100) for a given device\nand folder.\n\nArgs:\ndevice (str): The Syncthing device the folder is syncing to.\nfolder (str): The folder that is being synced.\n\nReturs:\nint", "source": "juraj-google-style"}
{"code": "def _PrintEventLabelsCounter(self, event_labels_counter, session_identifier=None):\n    if (not event_labels_counter):\n        return\n    title = 'Event tags generated per label'\n    if session_identifier:\n        title = '{0:s}: {1:s}'.format(title, session_identifier)\n    table_view = views.ViewsFactory.GetTableView(self._views_format_type, column_names=['Label', 'Number of event tags'], title=title)\n    for (key, value) in sorted(event_labels_counter.items()):\n        if (key == 'total'):\n            continue\n        table_view.AddRow([key, value])\n    try:\n        total = event_labels_counter['total']\n    except KeyError:\n        total = 'N/A'\n    table_view.AddRow(['Total', total])\n    table_view.Write(self._output_writer)", "docstring": "Prints the event labels counter.\n\nArgs:\nevent_labels_counter (collections.Counter): number of event tags per\nlabel.\nsession_identifier (Optional[str]): session identifier.", "source": "codesearchnet"}
{"code": "def is_commit_id_equal(self, dest, name):\n    if (not name):\n        return False\n    return (self.get_revision(dest) == name)", "docstring": "Return whether the current commit hash equals the given name.\n\nArgs:\ndest: the repository directory.\nname: a string name.", "source": "codesearchnet"}
{"code": "def size(self, url):\n    return self.metadata(url).size_in_bytes", "docstring": "Fetches file size for a URL.\n\nReturns:\nint size of path according to the FileSystem.\n\nRaises:\n``BeamIOError``: if url doesn't exist.", "source": "github-repos"}
{"code": "def msgconvert(email):\n    \n    log.debug(\"Started converting Outlook email\")\n    temph, temp = tempfile.mkstemp(prefix=\"outlook_\")\n    command = [\"msgconvert\", \"--outfile\", temp, email]\n\n    try:\n        if six.PY2:\n            with open(os.devnull, \"w\") as devnull:\n                out = subprocess.Popen(\n                    command, stdin=subprocess.PIPE,\n                    stdout=subprocess.PIPE, stderr=devnull)\n        elif six.PY3:\n            out = subprocess.Popen(\n                command, stdin=subprocess.PIPE,\n                stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n\n    except OSError:\n        message = \"To use this function you must install 'msgconvert' tool\"\n        log.exception(message)\n        raise MailParserOSError(message)\n\n    else:\n        stdoutdata, _ = out.communicate()\n        return temp, stdoutdata.decode(\"utf-8\").strip()\n\n    finally:\n        os.close(temph)", "docstring": "Exec msgconvert tool, to convert msg Outlook\nmail in eml mail format\n\nArgs:\nemail (string): file path of Outlook msg mail\n\nReturns:\ntuple with file path of mail converted and\nstandard output data (unicode Python 2, str Python 3)", "source": "juraj-google-style"}
{"code": "def disaggregate_humidity(data_daily, method='equal', temp=None, a0=None, a1=None, kr=None, month_hour_precip_mean=None, preserve_daily_mean=False):\n    assert (method in ('equal', 'minimal', 'dewpoint_regression', 'min_max', 'linear_dewpoint_variation', 'month_hour_precip_mean')), 'Invalid option'\n    if (method == 'equal'):\n        hum_disagg = melodist.distribute_equally(data_daily.hum)\n    elif (method in ('minimal', 'dewpoint_regression', 'linear_dewpoint_variation')):\n        if (method == 'minimal'):\n            a0 = 0\n            a1 = 1\n        assert ((a0 is not None) and (a1 is not None)), 'a0 and a1 must be specified'\n        tdew_daily = (a0 + (a1 * data_daily.tmin))\n        tdew = melodist.distribute_equally(tdew_daily)\n        if (method == 'linear_dewpoint_variation'):\n            assert (kr is not None), 'kr must be specified'\n            assert (kr in (6, 12)), 'kr must be 6 or 12'\n            tdew_delta = (0.5 * np.sin(((((temp.index.hour + 1) * np.pi) / kr) - ((3.0 * np.pi) / 4.0))))\n            tdew_nextday = tdew.shift((- 24))\n            tdew_nextday.iloc[(- 24):] = tdew.iloc[(- 24):]\n            tdew += (((temp.index.hour / 24.0) * (tdew_nextday - tdew)) + tdew_delta)\n        sat_vap_press_tdew = util.vapor_pressure(tdew, 100)\n        sat_vap_press_t = util.vapor_pressure(temp, 100)\n        hum_disagg = pd.Series(index=temp.index, data=((100 * sat_vap_press_tdew) / sat_vap_press_t))\n    elif (method == 'min_max'):\n        assert (('hum_min' in data_daily.columns) and ('hum_max' in data_daily.columns)), 'Minimum and maximum humidity must be present in data frame'\n        hmin = melodist.distribute_equally(data_daily.hum_min)\n        hmax = melodist.distribute_equally(data_daily.hum_max)\n        tmin = melodist.distribute_equally(data_daily.tmin)\n        tmax = melodist.distribute_equally(data_daily.tmax)\n        hum_disagg = (hmax + (((temp - tmin) / (tmax - tmin)) * (hmin - hmax)))\n    elif (method == 'month_hour_precip_mean'):\n        assert (month_hour_precip_mean is not None)\n        precip_equal = melodist.distribute_equally(data_daily.precip)\n        hum_disagg = pd.Series(index=precip_equal.index)\n        locs = list(zip(hum_disagg.index.month, hum_disagg.index.hour, (precip_equal > 0)))\n        hum_disagg[:] = month_hour_precip_mean.loc[locs].values\n    if preserve_daily_mean:\n        daily_mean_df = pd.DataFrame(data=dict(obs=data_daily.hum, disagg=hum_disagg.resample('D').mean()))\n        bias = melodist.util.distribute_equally((daily_mean_df.disagg - daily_mean_df.obs))\n        bias = bias.fillna(0)\n        hum_disagg -= bias\n    return hum_disagg.clip(0, 100)", "docstring": "general function for humidity disaggregation\n\nArgs:\ndaily_data: daily values\nmethod: keyword specifying the disaggregation method to be used\ntemp: hourly temperature time series (necessary for some methods)\nkr: parameter for linear_dewpoint_variation method (6 or 12)\nmonth_hour_precip_mean: [month, hour, precip(y/n)] categorical mean values\npreserve_daily_mean: if True, correct the daily mean values of the disaggregated\ndata with the observed daily means.\n\nReturns:\nDisaggregated hourly values of relative humidity.", "source": "codesearchnet"}
{"code": "def register_instances(name, instances, region=None, key=None, keyid=None, profile=None):\n    if (isinstance(instances, six.string_types) or isinstance(instances, six.text_type)):\n        instances = [instances]\n    conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n    try:\n        registered_instances = conn.register_instances(name, instances)\n    except boto.exception.BotoServerError as error:\n        log.warning(error)\n        return False\n    registered_instance_ids = [instance.id for instance in registered_instances]\n    register_failures = set(instances).difference(set(registered_instance_ids))\n    if register_failures:\n        log.warning('Instance(s): %s not registered with ELB %s.', list(register_failures), name)\n        register_result = False\n    else:\n        register_result = True\n    return register_result", "docstring": "Register instances with an ELB.  Instances is either a string\ninstance id or a list of string instance id's.\n\nReturns:\n\n- ``True``: instance(s) registered successfully\n- ``False``: instance(s) failed to be registered\n\nCLI example:\n\n.. code-block:: bash\n\nsalt myminion boto_elb.register_instances myelb instance_id\nsalt myminion boto_elb.register_instances myelb \"[instance_id,instance_id]\"", "source": "codesearchnet"}
{"code": "def _functions(self) -> list[StructuredFunctionWrapper]:\n    return []", "docstring": "Returns a list of functions associated with this dataset.\n\nReturns:\nA list of `StructuredFunctionWrapper` objects.", "source": "github-repos"}
{"code": "def orient_directed_graph(self, data, graph):\n    warnings.warn('The algorithm is ran on the skeleton of the given graph.')\n    return self.orient_undirected_graph(data, nx.Graph(graph))", "docstring": "Run the algorithm on a directed_graph.\n\nArgs:\ndata (pandas.DataFrame): DataFrame containing the data\ngraph (networkx.DiGraph): Skeleton of the graph to orient\n\nReturns:\nnetworkx.DiGraph: Solution on the given skeleton.\n\n.. warning::\nThe algorithm is ran on the skeleton of the given graph.", "source": "codesearchnet"}
{"code": "def get_airport_metars(self, iata, page=1, limit=100):\n    url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)\n    w = self._fr24.get_airport_weather(url)\n    return w['metar']", "docstring": "Retrieve the metar data at the current time\n\nGiven the IATA code of an airport, this method returns the metar information.\n\nArgs:\niata (str): The IATA code for an airport, e.g. HYD\npage (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data\nlimit (int): Optional limit on number of records returned\n\nReturns:\nThe metar data for the airport\n\nExample::\n\nfrom pyflightdata import FlightData\nf=FlightData()\n#optional login\nf.login(myemail,mypassword)\nf.get_airport_metars('HYD')", "source": "codesearchnet"}
{"code": "def HandleAccounts(self, result):\n    \n    self.logger.debug('Checking for changes to user accounts.')\n    configured_users = self.utils.GetConfiguredUsers()\n    enable_oslogin = self._GetEnableOsLoginValue(result)\n    enable_two_factor = self._GetEnableTwoFactorValue(result)\n    if enable_oslogin:\n      desired_users = {}\n      self.oslogin.UpdateOsLogin(True, two_factor_desired=enable_two_factor)\n    else:\n      desired_users = self._GetAccountsData(result)\n      self.oslogin.UpdateOsLogin(False)\n    remove_users = sorted(set(configured_users) - set(desired_users.keys()))\n    self._UpdateUsers(desired_users)\n    self._RemoveUsers(remove_users)\n    self.utils.SetConfiguredUsers(desired_users.keys())", "docstring": "Called when there are changes to the contents of the metadata server.\n\nArgs:\nresult: json, the deserialized contents of the metadata server.", "source": "juraj-google-style"}
{"code": "def pull_full_properties(self):\n    full_properties = self.manager.session.get(self._uri)\n    self._properties = dict(full_properties)\n    self._properties_timestamp = int(time.time())\n    self._full_properties = True", "docstring": "Retrieve the full set of resource properties and cache them in this\nobject.\n\nAuthorization requirements:\n\n* Object-access permission to this resource.\n\nRaises:\n\n:exc:`~zhmcclient.HTTPError`\n:exc:`~zhmcclient.ParseError`\n:exc:`~zhmcclient.AuthError`\n:exc:`~zhmcclient.ConnectionError`", "source": "codesearchnet"}
{"code": "def list_files(d, extension=None):\n    \n    if os.path.isdir(d):\n        expanded_dir = os.path.expanduser(d)\n        files = sorted(glob.glob(expanded_dir + '/*'))\n    else:\n        files = [d, ]\n    if extension is not None:\n        if type(extension) in STR_TYPES:\n            extension = [extension, ]\n        files = [f for f in files if any([f.split('.')[-1] in extension,\n                                          f.split('.')[-1].upper() in extension,\n                                          f.split('.')[-1].lower() in extension])]\n    return files", "docstring": "Lists files in a given directory.\n\nArgs:\n\nd (str): Path to a directory.\n\nextension (str): If supplied, only files that contain the\nspecificied extension will be returned. Default is ``False``,\nwhich returns all files in ``d``.\n\nReturns:\n\nlist: A sorted list of file paths.", "source": "juraj-google-style"}
{"code": "def search_next(self, obj):\n    if (('meta' in obj) and ('next' in obj['meta']) and (obj['meta']['next'] != None)):\n        uri = (self.api_url % obj['meta']['next'])\n        (header, content) = self._http_uri_request(uri)\n        resp = json.loads(content)\n        if (not self._is_http_response_ok(header)):\n            error = resp.get('error_message', 'Unknown Error')\n            raise HttpException(header.status, header.reason, error)\n        return resp\n    return {}", "docstring": "Takes the dictionary that is returned by 'search' or 'search_next' function and gets the next batch of results\n\nArgs:\nobj: dictionary returned by the 'search' or 'search_next' function\n\nReturns:\nA dictionary with a data returned by the server\n\nRaises:\nHttpException with the error message from the server", "source": "codesearchnet"}
{"code": "def make_elb_json(self):\n    env = self.env\n    region = self.region\n    elb_settings = self.properties['elb']\n    LOG.debug('Block ELB Settings:\\n%s', pformat(elb_settings))\n    health_settings = elb_settings['health']\n    elb_subnet_purpose = elb_settings.get('subnet_purpose', 'internal')\n    region_subnets = get_subnets(target='elb', purpose=elb_subnet_purpose, env=env, region=region)\n    region_subnets.pop('subnet_ids', None)\n    if (elb_subnet_purpose == 'internal'):\n        is_internal = 'true'\n    else:\n        is_internal = 'false'\n    target = elb_settings.get('target', 'HTTP:80/health')\n    health = splay_health(target)\n    listeners = format_listeners(elb_settings=elb_settings, env=self.env, region=region)\n    idle_timeout = elb_settings.get('idle_timeout', None)\n    access_log = elb_settings.get('access_log', {})\n    connection_draining_timeout = elb_settings.get('connection_draining_timeout', None)\n    security_groups = DEFAULT_ELB_SECURITYGROUPS[env]\n    security_groups.append(self.app)\n    security_groups.extend(self.properties['security_group']['elb_extras'])\n    security_groups = remove_duplicate_sg(security_groups)\n    template_kwargs = {'access_log': json.dumps(access_log), 'app_name': self.app, 'availability_zones': json.dumps(region_subnets), 'connection_draining_timeout': json.dumps(connection_draining_timeout), 'env': env, 'hc_string': target, 'health_interval': health_settings['interval'], 'health_path': health.path, 'health_port': health.port, 'health_protocol': health.proto, 'health_timeout': health_settings['timeout'], 'healthy_threshold': health_settings['threshold'], 'idle_timeout': json.dumps(idle_timeout), 'isInternal': is_internal, 'listeners': json.dumps(listeners), 'region_zones': json.dumps(region_subnets[region]), 'region': region, 'security_groups': json.dumps(security_groups), 'subnet_type': elb_subnet_purpose, 'unhealthy_threshold': health_settings['unhealthy_threshold'], 'vpc_id': get_vpc_id(env, region)}\n    rendered_template = get_template(template_file='infrastructure/elb_data.json.j2', **template_kwargs)\n    return rendered_template", "docstring": "Render the JSON template with arguments.\n\nReturns:\nstr: Rendered ELB template.", "source": "codesearchnet"}
{"code": "def get_coding_intervals(self, build='37', genes=None):\n    intervals = {}\n    if (not genes):\n        genes = self.all_genes(build=build)\n    LOG.info('Building interval trees...')\n    for (i, hgnc_obj) in enumerate(genes):\n        chrom = hgnc_obj['chromosome']\n        start = max((hgnc_obj['start'] - 5000), 1)\n        end = (hgnc_obj['end'] + 5000)\n        if (chrom not in intervals):\n            intervals[chrom] = intervaltree.IntervalTree()\n            intervals[chrom].addi(start, end, i)\n            continue\n        res = intervals[chrom].search(start, end)\n        if (not res):\n            intervals[chrom].addi(start, end, i)\n            continue\n        for interval in res:\n            if (interval.begin < start):\n                start = interval.begin\n            if (interval.end > end):\n                end = interval.end\n            intervals[chrom].remove(interval)\n        intervals[chrom].addi(start, end, i)\n    return intervals", "docstring": "Return a dictionary with chromosomes as keys and interval trees as values\n\nEach interval represents a coding region of overlapping genes.\n\nArgs:\nbuild(str): The genome build\ngenes(iterable(scout.models.HgncGene)):\n\nReturns:\nintervals(dict): A dictionary with chromosomes as keys and overlapping genomic intervals as values", "source": "codesearchnet"}
{"code": "def target(self, value):\n        \n        if value == self._defaults['target'] and 'target' in self._values:\n            del self._values['target']\n        else:\n            self._values['target'] = value", "docstring": "The target property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def get_cache_index_key(resource):\n    if isinstance(resource, APIResource):\n        (attr, attr_value) = list(resource.get_cache_index_keys().items())[0]\n        key = (type(resource), attr, attr_value)\n    else:\n        key = tuple(resource)\n    if (len(key) != 3):\n        raise TypeError('Cache key must be tuple of (class, key, value), got `{!r}` instead'.format(key))\n    if (not issubclass(key[0], APIResource)):\n        raise TypeError('First value of cache key must be a subclass of APIResource, got `{!r}` instead'.format(key[0]))\n    return key", "docstring": "Return a usable cache lookup key for an already initialized resource\n\nArgs:\nresource (APIResource|tuple): APIResource instance or 3-length tuple key returned from this function\n\nRaises:\nTypeError: If resource is not an APIResource instance or acceptable 3-length tuple cache key", "source": "codesearchnet"}
{"code": "def GetFileObject(self, data_stream_name=''):\n    \n    if (not data_stream_name and\n        not self._fsntfs_file_entry.has_default_data_stream()):\n      return None\n\n    \n    \n    path_spec = copy.deepcopy(self.path_spec)\n    if data_stream_name:\n      setattr(path_spec, 'data_stream', data_stream_name)\n\n    return resolver.Resolver.OpenFileObject(\n        path_spec, resolver_context=self._resolver_context)", "docstring": "Retrieves the file-like object.\n\nArgs:\ndata_stream_name (Optional[str]): data stream name, where an empty\nstring represents the default data stream.\n\nReturns:\nNTFSFileIO: file-like object or None.", "source": "juraj-google-style"}
{"code": "def _partitions_list(N):\n    if (N < _NUM_PRECOMPUTED_PARTITION_LISTS):\n        return list(_partition_lists[N])\n    else:\n        raise ValueError('Partition lists not yet available for system with {} nodes or more'.format(_NUM_PRECOMPUTED_PARTITION_LISTS))", "docstring": "Return a list of partitions of the |N| binary nodes.\n\nArgs:\nN (int): The number of nodes under consideration.\n\nReturns:\nlist[list]: A list of lists, where each inner list is the set of\nmicro-elements corresponding to a macro-element.\n\nExample:\n>>> _partitions_list(3)\n[[[0, 1], [2]], [[0, 2], [1]], [[0], [1, 2]], [[0], [1], [2]]]", "source": "codesearchnet"}
{"code": "def categorize(self, categories, default=None):\n    return dim(self, categorize, categories=categories, default=default)", "docstring": "Replaces discrete values with supplied categories\n\nReplaces discrete values in input array into a fixed set of\ncategories defined either as a list or dictionary.\n\nArgs:\ncategories: List or dict of categories to map inputs to\ndefault: Default value to assign if value not in categories", "source": "codesearchnet"}
{"code": "def _restore_resources(resources):\n    \n    resources = deepcopy(resources)\n    for resource in resources:\n        schema = resource['schema']\n        for fk in schema.get('foreignKeys', []):\n            _, name = _restore_path(fk['reference']['resource'])\n            fk['reference']['resource'] = name\n    return resources", "docstring": "Restore schemas from being compatible with storage schemas.\n\nForeign keys related operations.\n\nArgs:\nlist: resources from storage\n\nReturns:\nlist: restored resources", "source": "juraj-google-style"}
{"code": "def remove_service(self, service):\n    url = self._url('/services/{0}', service)\n    resp = self._delete(url)\n    self._raise_for_status(resp)\n    return True", "docstring": "Stop and remove a service.\n\nArgs:\nservice (str): Service name or ID\n\nReturns:\n``True`` if successful.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def summary_writer_initializer_op():\n    if context.executing_eagerly():\n        raise RuntimeError('tf.contrib.summary.summary_writer_initializer_op is only supported in graph mode.')\n    return ops.get_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME)", "docstring": "Graph-mode only. Returns the list of ops to create all summary writers.\n\nReturns:\nThe initializer ops.\n\nRaises:\nRuntimeError: If in Eager mode.", "source": "github-repos"}
{"code": "def repeat(count, max_consecutive_error=None):\n    if count <= 1:\n        raise ValueError(f'The `count` for `repeat` must be larger than 1, got \"{count}\".')\n    if max_consecutive_error is not None and max_consecutive_error > count:\n        raise ValueError(f'The `max_consecutive_error` ({max_consecutive_error}) for `repeat` must be smaller than `count` ({count}).')\n\n    def _outer_decorator(func):\n        setattr(func, ATTR_REPEAT_CNT, count)\n        setattr(func, ATTR_MAX_CONSEC_ERROR, max_consecutive_error)\n\n        @functools.wraps(func)\n        def _wrapper(*args):\n            func(*args)\n        return _wrapper\n    return _outer_decorator", "docstring": "Decorator for repeating a test case multiple times.\n\nThe BaseTestClass will execute the test cases annotated with this decorator\nthe specified number of time.\n\nThis decorator only stores the information needed for the repeat. It does not\nexecute the repeat.\n\nArgs:\ncount: int, the total number of times to execute the decorated test case.\nmax_consecutive_error: int, the maximum number of consecutively failed\niterations allowed. If reached, the remaining iterations is abandoned.\nBy default this is not enabled.\n\nReturns:\nThe wrapped test function.\n\nRaises:\nValueError, if the user input is invalid.", "source": "github-repos"}
{"code": "def seek_to_end(self, *partitions):\n        \n        if not all([isinstance(p, TopicPartition) for p in partitions]):\n            raise TypeError('partitions must be TopicPartition namedtuples')\n        if not partitions:\n            partitions = self._subscription.assigned_partitions()\n            assert partitions, 'No partitions are currently assigned'\n        else:\n            for p in partitions:\n                assert p in self._subscription.assigned_partitions(), 'Unassigned partition'\n\n        for tp in partitions:\n            log.debug(\"Seeking to end of partition %s\", tp)\n            self._subscription.need_offset_reset(tp, OffsetResetStrategy.LATEST)", "docstring": "Seek to the most recent available offset for partitions.\n\nArguments:\n*partitions: Optionally provide specific TopicPartitions, otherwise\ndefault to all assigned partitions.\n\nRaises:\nAssertionError: If any partition is not currently assigned, or if\nno partitions are assigned.", "source": "juraj-google-style"}
{"code": "def MessageReceived(self, m):\n        \n        if m.Command == 'verack':\n            \n            \n            if self.incoming_client:\n                if self.expect_verack_next:\n                    self.expect_verack_next = False\n            else:\n                self.HandleVerack()\n        elif m.Command == 'version':\n            self.HandleVersion(m.Payload)\n        elif m.Command == 'getaddr':\n            self.SendPeerInfo()\n        elif m.Command == 'getdata':\n            self.HandleGetDataMessageReceived(m.Payload)\n        elif m.Command == 'getblocks':\n            self.HandleGetBlocksMessageReceived(m.Payload)\n        elif m.Command == 'inv':\n            self.HandleInvMessage(m.Payload)\n        elif m.Command == 'block':\n            self.HandleBlockReceived(m.Payload)\n        elif m.Command == 'getheaders':\n            self.HandleGetHeadersMessageReceived(m.Payload)\n        elif m.Command == 'headers':\n            self.HandleBlockHeadersReceived(m.Payload)\n        elif m.Command == 'addr':\n            self.HandlePeerInfoReceived(m.Payload)\n        else:\n            logger.debug(f\"{self.prefix} Command not implemented: {m.Command}\")", "docstring": "Process a message.\n\nArgs:\nm (neo.Network.Message):", "source": "juraj-google-style"}
{"code": "def __init__(self, value=KeyFormatTypeEnum.RAW):\n        \n        super(KeyFormatType, self).__init__(\n            KeyFormatTypeEnum, value, Tags.KEY_FORMAT_TYPE)", "docstring": "Construct a KeyFormatType object.\n\nArgs:\nvalue (KeyFormatType): A KeyFormatType enumeration value,\n(e.g., KeyFormatType.PKCS_1). Optional, default to\nKeyFormatType.RAW.", "source": "juraj-google-style"}
{"code": "def _tokenize_table(self, table=None):\n    tokenized_rows = []\n    tokenized_row = []\n    for column in table:\n        if self.strip_column_names:\n            tokenized_row.append(self.tokenize(''))\n        else:\n            tokenized_row.append(self.tokenize(column))\n    tokenized_rows.append(tokenized_row)\n    for idx, row in table.iterrows():\n        tokenized_row = []\n        for cell in row:\n            tokenized_row.append(self.tokenize(cell))\n        tokenized_rows.append(tokenized_row)\n    token_coordinates = []\n    for row_index, row in enumerate(tokenized_rows):\n        for column_index, cell in enumerate(row):\n            for token_index, _ in enumerate(cell):\n                token_coordinates.append(TokenCoordinates(row_index=row_index, column_index=column_index, token_index=token_index))\n    return TokenizedTable(rows=tokenized_rows, selected_tokens=token_coordinates)", "docstring": "Tokenizes column headers and cell texts of a table.\n\nArgs:\ntable (`pd.Dataframe`):\nTable. Returns: `TokenizedTable`: TokenizedTable object.", "source": "github-repos"}
{"code": "def attach_template(self, _template, _key, **unbound_var_values):\n    \n    if _key in unbound_var_values:\n      raise ValueError('%s specified twice.' % _key)\n    unbound_var_values[_key] = self\n    return _DeferredLayer(self.bookkeeper,\n                          _template.as_layer().construct,\n                          [],\n                          unbound_var_values,\n                          scope=self._scope,\n                          defaults=self._defaults,\n                          partial_context=self._partial_context)", "docstring": "Attaches the template to this with the _key is supplied with this layer.\n\nNote: names were chosen to avoid conflicts.\n\nArgs:\n_template: The template to construct.\n_key: The key that this layer should replace.\n**unbound_var_values: The values for the unbound_vars.\nReturns:\nA new layer with operation applied.\nRaises:\nValueError: If _key is specified twice or there is a problem computing the\ntemplate.", "source": "juraj-google-style"}
{"code": "def load_pip_addons(_globals):\n    \n    for package_name in known_pip_addons:\n        _, username = package_username(package_name)\n        try:\n            load_addon(username, package_name.replace('-', '_'), _globals)\n        except ImportError:\n            pass", "docstring": "Load all known fabsetup addons which are installed as pypi pip-packages.\n\nArgs:\n_globals(dict): the globals() namespace of the fabric script.\n\nReturn: None", "source": "juraj-google-style"}
{"code": "def stop(self, timeout_s=None):\n    \n    self.stopped.set()\n    if self.thread:\n      self.thread.join(timeout_s)\n      return not self.thread.isAlive()\n    else:\n      return True", "docstring": "Stops the interval.\n\nIf a timeout is provided and stop returns False then the thread is\neffectively abandoned in whatever state it was in (presumably dead-locked).\n\nArgs:\ntimeout_s: The time in seconds to wait on the thread to finish.  By\ndefault it's forever.\nReturns:\nFalse if a timeout was provided and we timed out.", "source": "juraj-google-style"}
{"code": "def compute_writer_results(results):\n    if (not results):\n        return\n    (sources, targets, delayeds) = split_results(results)\n    if targets:\n        delayeds.append(da.store(sources, targets, compute=False))\n    if delayeds:\n        da.compute(delayeds)\n    if targets:\n        for target in targets:\n            if hasattr(target, 'close'):\n                target.close()", "docstring": "Compute all the given dask graphs `results` so that the files are\nsaved.\n\nArgs:\nresults (iterable): Iterable of dask graphs resulting from calls to\n`scn.save_datasets(..., compute=False)`", "source": "codesearchnet"}
{"code": "def __init__(self, mapping, record=None):\n    super().__init__()\n    self.mapping = mapping\n    self.record = record", "docstring": "Initialize this visitor.\n\nArgs:\nmapping: A dictionary, mapping strings to node instances. Any NamedType or\nClassType with a name in this dictionary will be replaced with the\ncorresponding value.\nrecord: Optional. A set. If given, this records which entries in the map\nwere used.", "source": "github-repos"}
{"code": "def __send_smtp_email(self, recipients, subject, html_body, text_body):\n        \n        smtp = smtplib.SMTP(\n            dbconfig.get('smtp_server', NS_EMAIL, 'localhost'),\n            dbconfig.get('smtp_port', NS_EMAIL, 25)\n        )\n        source_arn = dbconfig.get('source_arn', NS_EMAIL)\n        return_arn = dbconfig.get('return_path_arn', NS_EMAIL)\n        from_arn = dbconfig.get('from_arn', NS_EMAIL)\n        msg = MIMEMultipart('alternative')\n\n        \n        if source_arn and from_arn and return_arn:\n            msg['X-SES-SOURCE-ARN'] = source_arn\n            msg['X-SES-FROM-ARN'] = from_arn\n            msg['X-SES-RETURN-PATH-ARN'] = return_arn\n\n        msg['Subject'] = subject\n        msg['To'] = ','.join(recipients)\n        msg['From'] = self.sender\n\n        \n        if html_body:\n            html_part = MIMEText(html_body, 'html')\n            msg.attach(html_part)\n        if text_body:\n            text_part = MIMEText(text_body, 'plain')\n            msg.attach(text_part)\n\n        \n        if dbconfig.get('smtp_tls', NS_EMAIL, False):\n            smtp.starttls()\n\n        \n        username = dbconfig.get('smtp_username', NS_EMAIL)\n        password = dbconfig.get('smtp_password', NS_EMAIL)\n        if username and password:\n            smtp.login(username, password)\n\n        smtp.sendmail(self.sender, recipients, msg.as_string())\n        smtp.quit()", "docstring": "Send an email using SMTP\n\nArgs:\nrecipients (`list` of `str`): List of recipient email addresses\nsubject (str): Subject of the email\nhtml_body (str): HTML body of the email\ntext_body (str): Text body of the email\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def parse_arguments(\n        argv: Optional[Sequence[str]] = None) -> argparse.Namespace:\n    \n    parser = argparse.ArgumentParser(\n        description='Git credential helper using pass as the data source.',\n        formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n    parser.add_argument(\n        '-m', '--mapping',\n        type=argparse.FileType('r'),\n        metavar='MAPPING_FILE',\n        default=None,\n        help='A mapping file to be used, specifying how hosts '\n             'map to pass entries. Overrides the default mapping files from '\n             'XDG config locations, usually: {config_file}'.format(\n                 config_file=DEFAULT_CONFIG_FILE))\n    parser.add_argument(\n        '-l', '--logging',\n        action='store_true',\n        default=False,\n        help='Print debug messages on stderr. '\n             'Might include sensitive information')\n    parser.add_argument(\n        'action',\n        type=str,\n        metavar='ACTION',\n        help='Action to preform as specified in the git credential API')\n\n    args = parser.parse_args(argv)\n\n    return args", "docstring": "Parse the command line arguments.\n\nArgs:\nargv:\nIf not ``None``, use the provided command line arguments for\nparsing. Otherwise, extract them automatically.\n\nReturns:\nThe argparse object representing the parsed arguments.", "source": "juraj-google-style"}
{"code": "def get_report_zip(results):\n\n    def add_subdir(root_path, subdir):\n        subdir_path = os.path.join(root_path, subdir)\n        for (subdir_root, subdir_dirs, subdir_files) in os.walk(subdir_path):\n            for subdir_file in subdir_files:\n                subdir_file_path = os.path.join(root_path, subdir, subdir_file)\n                if os.path.isfile(subdir_file_path):\n                    rel_path = os.path.relpath(subdir_root, subdir_file_path)\n                    subdir_arc_name = os.path.join(rel_path, subdir_file)\n                    zip_file.write(subdir_file_path, subdir_arc_name)\n            for subdir in subdir_dirs:\n                add_subdir(subdir_path, subdir)\n    storage = BytesIO()\n    tmp_dir = tempfile.mkdtemp()\n    try:\n        save_output(results, tmp_dir)\n        with zipfile.ZipFile(storage, 'w', zipfile.ZIP_DEFLATED) as zip_file:\n            for (root, dirs, files) in os.walk(tmp_dir):\n                for file in files:\n                    file_path = os.path.join(root, file)\n                    if os.path.isfile(file_path):\n                        arcname = os.path.join(os.path.relpath(root, tmp_dir), file)\n                        zip_file.write(file_path, arcname)\n                for directory in dirs:\n                    dir_path = os.path.join(root, directory)\n                    if os.path.isdir(dir_path):\n                        zip_file.write(dir_path, directory)\n                        add_subdir(root, directory)\n    finally:\n        shutil.rmtree(tmp_dir)\n    return storage.getvalue()", "docstring": "Creates a zip file of parsed report output\n\nArgs:\nresults (OrderedDict): The parsed results\n\nReturns:\nbytes: zip file bytes", "source": "codesearchnet"}
{"code": "def post_pipeline(self, pipeline):\n        \n        if isinstance(pipeline, str):\n            pipeline_str = pipeline\n        else:\n            pipeline_str = json.dumps(pipeline)\n\n        pipeline_json = json.loads(pipeline_str)\n\n        \n        name = '{0} (onetime-{1})'.format(pipeline_json['name'], self.environments[0])\n        pipeline_json['name'] = name\n\n        \n        pipeline_id = super().compare_with_existing(onetime=True)\n        if pipeline_id:\n            pipeline_json['id'] = pipeline_id\n        else:\n            del pipeline_json['id']\n\n        \n        for trigger in pipeline_json['triggers']:\n            trigger['enabled'] = False\n\n        self.log.debug('Manual Pipeline JSON:\\n%s', pipeline_json)\n        super().post_pipeline(pipeline_json)", "docstring": "Send Pipeline JSON to Spinnaker.\n\nArgs:\npipeline (dict, str): New Pipeline to create.", "source": "juraj-google-style"}
{"code": "def dump(self):\n    walker = self.dump_walker\n    if (walker is not None):\n        walker = walker.dump()\n    state = {'storage': self.storage.dump(), 'dump_walker': walker, 'next_id': self.next_id}\n    return state", "docstring": "Serialize the state of this subsystem into a dict.\n\nReturns:\ndict: The serialized state", "source": "codesearchnet"}
{"code": "def Logger(name, debug=False, facility=None):\n    logger = logging.getLogger(name)\n    logger.handlers = []\n    logger.addHandler(logging.NullHandler())\n    logger.propagate = False\n    logger.setLevel(logging.DEBUG)\n    formatter = logging.Formatter((name + ': %(levelname)s %(message)s'))\n    if debug:\n        console_handler = logging.StreamHandler()\n        console_handler.setLevel(logging.DEBUG)\n        console_handler.setFormatter(formatter)\n        logger.addHandler(console_handler)\n    if facility:\n        syslog_handler = logging.handlers.SysLogHandler(address=constants.SYSLOG_SOCKET, facility=facility)\n        syslog_handler.setLevel(logging.INFO)\n        syslog_handler.setFormatter(formatter)\n        logger.addHandler(syslog_handler)\n    return logger", "docstring": "Get a logging object with handlers for sending logs to SysLog.\n\nArgs:\nname: string, the name of the logger which will be added to log entries.\ndebug: bool, True if debug output should write to the console.\nfacility: int, an encoding of the SysLog handler's facility and priority.\n\nReturns:\nlogging object, an object for logging entries.", "source": "codesearchnet"}
{"code": "def zip_(*structures, **kwargs):\n    flatten = kwargs.pop('flatten', False)\n    assert (not kwargs), 'zip() got unexpected keyword arguments.'\n    return map((lambda *x: (x if (len(x) > 1) else x[0])), *structures, flatten=flatten)", "docstring": "Combine corresponding elements in multiple nested structure to tuples.\n\nThe nested structures can consist of any combination of lists, tuples, and\ndicts. All provided structures must have the same nesting.\n\nArgs:\n*structures: Nested structures.\nflatten: Whether to flatten the resulting structure into a tuple. Keys of\ndictionaries will be discarded.\n\nReturns:\nNested structure.", "source": "codesearchnet"}
{"code": "def Process(self, parser_mediator, root_item=None, **kwargs):\n    super(DocumentSummaryInformationOLECFPlugin, self).Process(parser_mediator, **kwargs)\n    if (not root_item):\n        raise ValueError('Root item not set.')\n    (root_creation_time, root_modification_time) = self._GetTimestamps(root_item)\n    for item_name in self.REQUIRED_ITEMS:\n        item = root_item.get_sub_item_by_name(item_name)\n        if (not item):\n            continue\n        summary_information = OLECFDocumentSummaryInformation(item)\n        event_data = summary_information.GetEventData(data_type='olecf:document_summary_info')\n        event_data.name = 'Document Summary Information'\n        if root_creation_time:\n            date_time = dfdatetime_filetime.Filetime(timestamp=root_creation_time)\n            event = OLECFDocumentSummaryInformationEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n            parser_mediator.ProduceEventWithEventData(event, event_data)\n        if root_modification_time:\n            date_time = dfdatetime_filetime.Filetime(timestamp=root_modification_time)\n            event = OLECFDocumentSummaryInformationEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n            parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a document summary information OLECF item.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nroot_item (Optional[pyolecf.item]): root item of the OLECF file.\n\nRaises:\nValueError: If the root item is not set.", "source": "codesearchnet"}
{"code": "def parse_conservation(variant, info_key):\n    \n    raw_score = variant.INFO.get(info_key)\n    conservations = []\n\n    if raw_score:\n        if isinstance(raw_score, numbers.Number):\n            raw_score = (raw_score,)\n\n        for score in raw_score:\n            if score >= CONSERVATION[info_key]['conserved_min']:\n                conservations.append('Conserved')\n            else:\n                conservations.append('NotConserved')\n\n    return conservations", "docstring": "Get the conservation prediction\n\nArgs:\nvariant(dict): A variant dictionary\ninfo_key(str)\n\nReturns:\nconservations(list): List of censervation terms", "source": "juraj-google-style"}
{"code": "def get_coordination_sphere(self, index_of_atom, n_sphere=1, give_only_index=False, only_surface=True, exclude=None, use_lookup=None):\n    if (use_lookup is None):\n        use_lookup = settings['defaults']['use_lookup']\n    exclude = (set() if (exclude is None) else exclude)\n    bond_dict = self.get_bonds(use_lookup=use_lookup)\n    i = index_of_atom\n    if (n_sphere != 0):\n        visited = (set([i]) | exclude)\n        try:\n            tmp_bond_dict = {j: (bond_dict[j] - visited) for j in bond_dict[i]}\n        except KeyError:\n            tmp_bond_dict = {}\n        n = 0\n        while (tmp_bond_dict and ((n + 1) < n_sphere)):\n            new_tmp_bond_dict = {}\n            for i in tmp_bond_dict:\n                if (i in visited):\n                    continue\n                visited.add(i)\n                for j in tmp_bond_dict[i]:\n                    new_tmp_bond_dict[j] = (bond_dict[j] - visited)\n            tmp_bond_dict = new_tmp_bond_dict\n            n += 1\n        if only_surface:\n            index_out = set(tmp_bond_dict.keys())\n        else:\n            index_out = (visited | set(tmp_bond_dict.keys()))\n    else:\n        index_out = {i}\n    if give_only_index:\n        return (index_out - exclude)\n    else:\n        return self.loc[(index_out - exclude)]", "docstring": "Return a Cartesian of atoms in the n-th coordination sphere.\n\nConnected means that a path along covalent bonds exists.\n\nArgs:\nindex_of_atom (int):\ngive_only_index (bool): If ``True`` a set of indices is\nreturned. Otherwise a new Cartesian instance.\nn_sphere (int): Determines the number of the coordination sphere.\nonly_surface (bool): Return only the surface of the coordination\nsphere.\nexclude (set): A set of indices that should be ignored\nfor the path finding.\nuse_lookup (bool): Use a lookup variable for\n:meth:`~chemcoord.Cartesian.get_bonds`. The default is\nspecified in ``settings['defaults']['use_lookup']``\n\nReturns:\nA set of indices or a new Cartesian instance.", "source": "codesearchnet"}
{"code": "def cosine(w, A=1, phi=0, offset=0):\n    \n    from math import cos\n    def f(i):\n        return A * cos(w*i + phi) + offset\n    return partial(force, sequence=_advance(f))", "docstring": "Return a driver function that can advance a sequence of cosine values.\n\n.. code-block:: none\n\nvalue = A * cos(w*i + phi) + offset\n\nArgs:\nw (float) : a frequency for the cosine driver\nA (float) : an amplitude for the cosine driver\nphi (float) : a phase offset to start the cosine driver with\noffset (float) : a global offset to add to the driver values", "source": "juraj-google-style"}
{"code": "def plot(self, ax=None, return_fig=False, **kwargs):\n        \n        if ax is None:\n            fig = plt.figure(figsize=(2, 10))\n            ax = fig.add_subplot(111)\n            return_ax = False\n        else:\n            return_ax = True\n\n        hypertime = np.linspace(self.start, self.stop, (10 * self.size - 1) + 1)\n        hyperamp = np.interp(hypertime, self.basis, self)\n\n        ax.plot(hyperamp, hypertime, 'k')\n        ax.fill_betweenx(hypertime, hyperamp, 0, hyperamp > 0.0, facecolor='k', lw=0)\n        ax.invert_yaxis()\n        ax.set_title(self.name)\n\n        if return_ax:\n            return ax\n        elif return_fig:\n            return fig\n        else:\n            return None", "docstring": "Plot a synthetic.\n\nArgs:\nax (ax): A matplotlib axis.\nlegend (Legend): For now, only here to match API for other plot\nmethods.\nreturn_fig (bool): whether to return the matplotlib figure.\nDefault False.\n\nReturns:\nax. If you passed in an ax, otherwise None.", "source": "juraj-google-style"}
{"code": "def median(x, axis=None, keepdims=False):\n    if any_symbolic_tensors((x,)):\n        return Median(axis=axis, keepdims=keepdims).symbolic_call(x)\n    return backend.numpy.median(x, axis=axis, keepdims=keepdims)", "docstring": "Compute the median along the specified axis.\n\nArgs:\nx: Input tensor.\naxis: Axis or axes along which the medians are computed. Defaults to\n`axis=None` which is to compute the median(s) along a flattened\nversion of the array.\nkeepdims: If this is set to `True`, the axes which are reduce\nare left in the result as dimensions with size one.\n\nReturns:\nThe output tensor.", "source": "github-repos"}
{"code": "def _caching_device(rnn_cell):\n    if context.executing_eagerly():\n        return None\n    if not getattr(rnn_cell, '_enable_caching_device', False):\n        return None\n    if control_flow_util.IsInWhileLoop(ops.get_default_graph()):\n        logging.warning('Variable read device caching has been disabled because the RNN is in tf.while_loop loop context, which will cause reading stalled value in forward path. This could slow down the training due to duplicated variable reads. Please consider updating your code to remove tf.while_loop if possible.')\n        return None\n    if rnn_cell._dtype_policy.compute_dtype != rnn_cell._dtype_policy.variable_dtype:\n        logging.warning(\"Variable read device caching has been disabled since it doesn't work with the mixed precision API. This is likely to cause a slowdown for RNN training due to duplicated read of variable for each timestep, which will be significant in a multi remote worker setting. Please consider disabling mixed precision API if the performance has been affected.\")\n        return None\n    return lambda op: op.device", "docstring": "Returns the caching device for the RNN variable.\n\nThis is useful for distributed training, when variable is not located as same\ndevice as the training worker. By enabling the device cache, this allows\nworker to read the variable once and cache locally, rather than read it every\ntime step from remote when it is needed.\n\nNote that this is assuming the variable that cell needs for each time step is\nhaving the same value in the forward path, and only gets updated in the\nbackprop. It is true for all the default cells (SimpleRNN, GRU, LSTM). If the\ncell body relies on any variable that gets updated every time step, then\ncaching device will cause it to read the stall value.\n\nArgs:\nrnn_cell: the rnn cell instance.", "source": "github-repos"}
{"code": "def limit_replace(self, accountID, orderID, **kwargs):\n    return self.replace(accountID, orderID, order=LimitOrderRequest(**kwargs))", "docstring": "Shortcut to replace a pending Limit Order in an Account\n\nArgs:\naccountID : The ID of the Account\norderID : The ID of the Limit Order to replace\nkwargs : The arguments to create a LimitOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "codesearchnet"}
{"code": "def p40baro(msg):\n    \n    d = hex2bin(data(msg))\n\n    if d[26] == '0':\n        return None\n\n    p = bin2int(d[27:39]) * 0.1 + 800    \n    return p", "docstring": "Barometric pressure setting\n\nArgs:\nmsg (String): 28 bytes hexadecimal message (BDS40) string\n\nReturns:\nfloat: pressure in millibar", "source": "juraj-google-style"}
{"code": "def __init__(self, client_id='', service_account_email='', service_account_key='',\n               widget_url='', cookie_name='gtoken', http=None, project_id=''):\n    \n    self.client_id = client_id\n    self.widget_url = widget_url\n    self.cookie_name = cookie_name\n    self.project_id = project_id\n    self.rpc_helper = rpchelper.RpcHelper(service_account_email,\n                                          service_account_key,\n                                          GitkitClient.GOOGLE_API_BASE,\n                                          http)\n    self.config_data_cached = None\n    if not self.client_id:\n      self.client_id = self.GetClientId()", "docstring": "Inits the Gitkit client library.\n\nArgs:\nclient_id: string, developer's Google oauth2 web client id.\nservice_account_email: string, Google service account email.\nservice_account_key: string, Google service account private key.\nwidget_url: string, Gitkit widget URL.\ncookie_name: string, Gitkit cookie name.\nhttp: Http, http client which support cache.\nproject_id: string, developer console's project id.", "source": "juraj-google-style"}
{"code": "def CheckNextIncludeOrder(self, header_type):\n    \n    error_message = ('Found %s after %s' %\n                     (self._TYPE_NAMES[header_type],\n                      self._SECTION_NAMES[self._section]))\n\n    last_section = self._section\n\n    if header_type == _C_SYS_HEADER:\n      if self._section <= self._C_SECTION:\n        self._section = self._C_SECTION\n      else:\n        self._last_header = ''\n        return error_message\n    elif header_type == _CPP_SYS_HEADER:\n      if self._section <= self._CPP_SECTION:\n        self._section = self._CPP_SECTION\n      else:\n        self._last_header = ''\n        return error_message\n    elif header_type == _LIKELY_MY_HEADER:\n      if self._section <= self._MY_H_SECTION:\n        self._section = self._MY_H_SECTION\n      else:\n        self._section = self._OTHER_H_SECTION\n    elif header_type == _POSSIBLE_MY_HEADER:\n      if self._section <= self._MY_H_SECTION:\n        self._section = self._MY_H_SECTION\n      else:\n        \n        \n        self._section = self._OTHER_H_SECTION\n    else:\n      assert header_type == _OTHER_HEADER\n      self._section = self._OTHER_H_SECTION\n\n    if last_section != self._section:\n      self._last_header = ''\n\n    return ''", "docstring": "Returns a non-empty error message if the next header is out of order.\n\nThis function also updates the internal state to be ready to check\nthe next include.\n\nArgs:\nheader_type: One of the _XXX_HEADER constants defined above.\n\nReturns:\nThe empty string if the header is in the right order, or an\nerror message describing what's wrong.", "source": "juraj-google-style"}
{"code": "def visit_Import(self, node):\n    new_aliases = []\n    import_updated = False\n    import_renames = getattr(self._api_change_spec, 'import_renames', {})\n    max_submodule_depth = getattr(self._api_change_spec, 'max_submodule_depth', 1)\n    inserts_after_imports = getattr(self._api_change_spec, 'inserts_after_imports', {})\n    for import_alias in node.names:\n        all_import_components = import_alias.name.split('.')\n        found_update = False\n        for i in reversed(list(range(1, max_submodule_depth + 1))):\n            import_component = all_import_components[0]\n            for j in range(1, min(i, len(all_import_components))):\n                import_component += '.' + all_import_components[j]\n            import_rename_spec = import_renames.get(import_component, None)\n            if not import_rename_spec or excluded_from_module_rename(import_alias.name, import_rename_spec):\n                continue\n            new_name = import_rename_spec.new_name + import_alias.name[len(import_component):]\n            new_asname = import_alias.asname\n            if not new_asname and '.' not in import_alias.name:\n                new_asname = import_alias.name\n            new_alias = ast.alias(name=new_name, asname=new_asname)\n            new_aliases.append(new_alias)\n            import_updated = True\n            found_update = True\n            full_import = (import_alias.name, import_alias.asname)\n            insert_offset = 1\n            for line_to_insert in inserts_after_imports.get(full_import, []):\n                assert self._stack[-1] is node\n                parent = self._stack[-2]\n                new_line_node = pasta.parse(line_to_insert)\n                ast.copy_location(new_line_node, node)\n                parent.body.insert(parent.body.index(node) + insert_offset, new_line_node)\n                insert_offset += 1\n                old_suffix = pasta.base.formatting.get(node, 'suffix')\n                if old_suffix is None:\n                    old_suffix = os.linesep\n                if os.linesep not in old_suffix:\n                    pasta.base.formatting.set(node, 'suffix', old_suffix + os.linesep)\n                pasta.base.formatting.set(new_line_node, 'prefix', pasta.base.formatting.get(node, 'prefix'))\n                pasta.base.formatting.set(new_line_node, 'suffix', os.linesep)\n                self.add_log(INFO, node.lineno, node.col_offset, 'Adding `%s` after import of %s' % (new_line_node, import_alias.name))\n            if found_update:\n                break\n        if not found_update:\n            new_aliases.append(import_alias)\n    if import_updated:\n        assert self._stack[-1] is node\n        parent = self._stack[-2]\n        new_node = ast.Import(new_aliases)\n        ast.copy_location(new_node, node)\n        pasta.ast_utils.replace_child(parent, node, new_node)\n        self.add_log(INFO, node.lineno, node.col_offset, 'Changed import from %r to %r.' % (pasta.dump(node), pasta.dump(new_node)))\n    self.generic_visit(node)", "docstring": "Handle visiting an import node in the AST.\n\nArgs:\nnode: Current Node", "source": "github-repos"}
{"code": "def clear_config(clear_constants=False):\n  \n  _set_config_is_locked(False)\n  _CONFIG.clear()\n  _SINGLETONS.clear()\n  if clear_constants:\n    _CONSTANTS.clear()\n  else:\n    saved_constants = _CONSTANTS.copy()\n    _CONSTANTS.clear()  \n    for name, value in six.iteritems(saved_constants):\n      constant(name, value)\n  _IMPORTED_MODULES.clear()\n  _OPERATIVE_CONFIG.clear()", "docstring": "Clears the global configuration.\n\nThis clears any parameter values set by `bind_parameter` or `parse_config`, as\nwell as the set of dynamically imported modules. It does not remove any\nconfigurable functions or classes from the registry of configurables.\n\nArgs:\nclear_constants: Whether to clear constants created by `constant`. Defaults\nto False.", "source": "juraj-google-style"}
{"code": "def get_feature_data(self, ids=None, features=None, dense=True):\n    result = self.data\n    if (ids is not None):\n        result = result.ix[ids]\n    if (features is not None):\n        result = result.ix[(:, features)]\n    return (result.to_dense() if dense else result)", "docstring": "Slices and returns a subset of feature data.\n\nArgs:\nids (list, array): A list or 1D numpy array of study ids to\nreturn rows for. If None, returns data for all studies\n(i.e., all rows in array).\nfeatures (list, array): A list or 1D numpy array of named features\nto return. If None, returns data for all features (i.e., all\ncolumns in array).\ndense (bool): Optional boolean. When True (default), convert the\nresult to a dense array before returning. When False, keep as\nsparse matrix. Note that if ids is not None, the returned array\nwill always be dense.\nReturns:\nA pandas DataFrame with study IDs in rows and features incolumns.", "source": "codesearchnet"}
{"code": "def fill_treewidget(self, tree, parameters):\n        \n\n        tree.clear()\n        assert isinstance(parameters, (dict, Parameter))\n\n        for key, value in parameters.items():\n            if isinstance(value, Parameter):\n                B26QTreeItem(tree, key, value, parameters.valid_values[key], parameters.info[key])\n            else:\n                B26QTreeItem(tree, key, value, type(value), '')", "docstring": "fills a QTreeWidget with nested parameters, in future replace QTreeWidget with QTreeView and call fill_treeview\nArgs:\ntree: QtWidgets.QTreeWidget\nparameters: dictionary or Parameter object\nshow_all: boolean if true show all parameters, if false only selected ones\nReturns:", "source": "juraj-google-style"}
{"code": "def shift_time(start_time, mins) -> str:\n    \n    s_time = pd.Timestamp(start_time)\n    e_time = s_time + np.sign(mins) * pd.Timedelta(f'00:{abs(mins)}:00')\n    return e_time.strftime('%H:%M')", "docstring": "Shift start time by mins\n\nArgs:\nstart_time: start time in terms of HH:MM string\nmins: number of minutes (+ / -)\n\nReturns:\nend time in terms of HH:MM string", "source": "juraj-google-style"}
{"code": "def __init__(self, client_id, client_secret, refresh_token,\n               manager_account_id, dev_token):\n    \n    credentials = GoogleRefreshTokenClient(client_id, client_secret,\n                                           refresh_token)\n    self.client = AdWordsClient(dev_token, credentials, self._USER_AGENT,\n                                client_customer_id=manager_account_id,\n                                cache=ZeepServiceProxy.NO_CACHE)", "docstring": "Initializes an APIHandler.\n\nArgs:\nclient_id: The client customer id retrieved from the Developers Console.\nclient_secret: The client secret retrieved from the Developers Console.\nrefresh_token: The refresh token retrieved with generate_refresh_token.py.\nmanager_account_id: The AdWords manager account Id.\ndev_token: The AdWords Developer Token.", "source": "juraj-google-style"}
{"code": "def remove_extra_presentations(self, resource, timeout=-1):\n        \n        uri = self.URI + \"/repair\"\n        custom_headers = {'Accept-Language': 'en_US'}\n        return self._client.create(resource, uri=uri, timeout=timeout, custom_headers=custom_headers)", "docstring": "Removes extra presentations from a specified server profile.\n\nArgs:\nresource (dict):\nObject to create\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\nReturns:\ndict: Associated storage attachment resource.", "source": "juraj-google-style"}
{"code": "def startDrag(self, index):\n        \n\n        if not index.isValid():\n            return\n        \n        dataFrame = self.model().dataFrame()\n\n        \n        dfindex = dataFrame.iloc[[index.row()]].index\n        columnName = dataFrame.columns[index.column()]\n        dtype = dataFrame[columnName].dtype\n        value = dataFrame[columnName][dfindex]\n\n        \n        mimePayload = PandasCellPayload(\n            dfindex,\n            columnName,\n            value,\n            dtype,\n            hex(id(self.model()))\n        )\n        mimeData = MimeData()\n        mimeData.setData(mimePayload)\n                \n        \n        drag = QtGui.QDrag(self)\n        drag.setMimeData(mimeData)\n        pixmap = QtGui.QPixmap(\":/icons/insert-table.png\")\n        drag.setHotSpot(QtCore.QPoint(pixmap.width()/3, pixmap.height()/3))\n        drag.setPixmap(pixmap)\n        result = drag.start(Qt.MoveAction)", "docstring": "start a drag operation with a PandasCellPayload on defined index.\n\nArgs:\nindex (QModelIndex): model index you want to start the drag operation.", "source": "juraj-google-style"}
{"code": "def inspect_virtual(self, stream_id):\n        \n\n        stream = DataStream.FromEncoded(stream_id)\n\n        if stream.buffered:\n            return [pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.VIRTUAL_STREAM_NOT_FOUND), 0]\n\n        try:\n            reading = self.storage.inspect_last(stream, only_allocated=True)\n            return [Error.NO_ERROR, reading.value]\n        except StreamEmptyError:\n            return [Error.NO_ERROR, 0]\n        except UnresolvedIdentifierError:\n            return [pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.VIRTUAL_STREAM_NOT_FOUND), 0]", "docstring": "Inspect the last value written into a virtual stream.\n\nArgs:\nstream_id (int): The virtual stream was want to inspect.\n\nReturns:\n(int, int): An error code and the stream value.", "source": "juraj-google-style"}
{"code": "def open_jsonl(path: str, mode: str='r', **kwargs) -> pg_io.Sequence:\n    return pg_io.open_sequence(path, mode, serializer=to_json_str, deserializer=from_json_str, **kwargs)", "docstring": "Open a JSONL file for reading or writing.\n\nExample::\n\nwith pg.open_jsonl('my_file.jsonl', 'w') as f:\nf.add(1)\nf.add('foo')\nf.add(dict(x=1))\n\nwith pg.open_jsonl('my_file.jsonl', 'r') as f:\nfor value in f:\nprint(value)\n\nArgs:\npath: The path to the file.\nmode: The mode of the file.\n**kwargs: Additional keyword arguments that will be passed to\n``pg_io.open_sequence``.\n\nReturns:\nA sequence for PyGlove objects.", "source": "github-repos"}
{"code": "def CheckSupportedFormat(cls, path, check_readable_only=False):\n    try:\n        connection = sqlite3.connect(path, detect_types=(sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES))\n        cursor = connection.cursor()\n        query = 'SELECT * FROM metadata'\n        cursor.execute(query)\n        metadata_values = {row[0]: row[1] for row in cursor.fetchall()}\n        cls._CheckStorageMetadata(metadata_values, check_readable_only=check_readable_only)\n        connection.close()\n        result = True\n    except (IOError, sqlite3.DatabaseError):\n        result = False\n    return result", "docstring": "Checks if the storage file format is supported.\n\nArgs:\npath (str): path to the storage file.\ncheck_readable_only (Optional[bool]): whether the store should only be\nchecked to see if it can be read. If False, the store will be checked\nto see if it can be read and written to.\n\nReturns:\nbool: True if the format is supported.", "source": "codesearchnet"}
{"code": "def install_time(self):\n    time1970 = self.__mod_time1970\n    try:\n        (date_string, item_type) = win32api.RegQueryValueEx(self.__reg_uninstall_handle, 'InstallDate')\n    except pywintypes.error as exc:\n        if (exc.winerror == winerror.ERROR_FILE_NOT_FOUND):\n            return time1970\n        else:\n            raise\n    if (item_type == win32con.REG_SZ):\n        try:\n            date_object = datetime.datetime.strptime(date_string, '%Y%m%d')\n            time1970 = time.mktime(date_object.timetuple())\n        except ValueError:\n            pass\n    return time1970", "docstring": "Return the install time, or provide an estimate of install time.\n\nInstallers or even self upgrading software must/should update the date\nheld within InstallDate field when they change versions. Some installers\ndo not set ``InstallDate`` at all so we use the last modified time on the\nregistry key.\n\nReturns:\nint: Seconds since 1970 UTC.", "source": "codesearchnet"}
{"code": "def to_set(self):\n    if self.closed():\n        raise ValueError('Attempt to call to_set() on a closed Queryable.')\n    if isinstance(self._iterable, set):\n        return self._iterable\n    s = set()\n    for item in self:\n        if (item in s):\n            raise ValueError('Duplicate item value {0} in sequence during to_set()'.format(repr(item)))\n        s.add(item)\n    return s", "docstring": "Convert the source sequence to a set.\n\nNote: This method uses immediate execution.\n\nRaises:\nValueError: If duplicate keys are in the projected source sequence.\nValueError: If the Queryable is closed().", "source": "codesearchnet"}
{"code": "def add_delegate(self, callback):\n        \n\n        if callback in self._delegate_methods:\n            return\n\n        self._delegate_methods.append(callback)", "docstring": "Registers a new delegate callback\n\nThe prototype should be function(data), where data will be the decoded json push\n\nArgs:\ncallback (function): method to trigger when push center receives events", "source": "juraj-google-style"}
{"code": "def readMonthTariffs(self, months_type):\n    self.setContext('readMonthTariffs')\n    try:\n        req_type = binascii.hexlify(str(months_type).zfill(1))\n        req_str = (('01523102303031' + req_type) + '282903')\n        work_table = self.m_mons\n        if (months_type == ReadMonths.kWhReverse):\n            work_table = self.m_rev_mons\n        self.request(False)\n        req_crc = self.calc_crc16(req_str[2:].decode('hex'))\n        req_str += req_crc\n        self.m_serial_port.write(req_str.decode('hex'))\n        raw_ret = self.m_serial_port.getResponse(self.getContext())\n        self.serialPostEnd()\n        unpacked_read = self.unpackStruct(raw_ret, work_table)\n        self.convertData(unpacked_read, work_table, self.m_kwh_precision)\n        return_crc = self.calc_crc16(raw_ret[1:(- 2)])\n        if (str(return_crc) == str(work_table['crc16'][MeterData.StringValue])):\n            ekm_log(('Months CRC success, type = ' + str(req_type)))\n            self.setContext('')\n            return True\n    except:\n        ekm_log(traceback.format_exc(sys.exc_info()))\n    self.setContext('')\n    return False", "docstring": "Serial call to read month tariffs block into meter object buffer.\n\nArgs:\nmonths_type (int): A :class:`~ekmmeters.ReadMonths` value.\n\nReturns:\nbool: True on completion.", "source": "codesearchnet"}
{"code": "def generate_token():\n    length = 50\n    stringset = (string.ascii_letters + string.digits)\n    token = ''.join([stringset[(i % len(stringset))] for i in [ord(x) for x in os.urandom(length)]])\n    return token", "docstring": "Generate a new random security token.\n\n>>> len(generate_token()) == 50\nTrue\n\nReturns:\nstring", "source": "codesearchnet"}
{"code": "def update(self, resource, timeout=-1):\n        \n        return self._client.update(resource, timeout=timeout)", "docstring": "Updates the specified data center resource.\n\nArgs:\nresource (dict): Object to update.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Updated data center.", "source": "juraj-google-style"}
{"code": "def _create_output_from_match(self, match_result):\n        \n        if isinstance(match_result, dict):\n            return LinterOutput(self.name, **match_result)\n        return LinterOutput(self.name, *match_result)", "docstring": "Create Result instance from pattern match results.\n\nArgs:\nmatch: Pattern match.", "source": "juraj-google-style"}
{"code": "def get_classes_in_module(module, superclass=object):\n    \n\n    ret = []\n    for classname in dir(module):\n        attr = module.__getattribute__(classname)\n        try:\n            if issubclass(attr, superclass) and (attr != superclass):\n                ret.append(attr)\n        except TypeError:\n            \n            pass\n        except RuntimeError:\n            \n            \n            pass\n    return ret", "docstring": "Returns a list with all classes in module that descend from parent\n\nArgs:\nmodule: builtins.module\nsuperclass: a class\n\nReturns: list", "source": "juraj-google-style"}
{"code": "def assemble(self, ops):\n    return pwnypack.asm.asm(self.compile(ops), target=self.target)", "docstring": "Assemble a list of operations into executable code.\n\nArguments:\nops(list): A list of shellcode operations.\n\nReturns:\nbytes: The executable code that implements the shellcode.", "source": "codesearchnet"}
{"code": "def cumall(series):\n    \n\n    alls = series.expanding().apply(np.all).astype(bool)\n    return alls", "docstring": "Calculates cumulative all of values. Equivalent to\n`series.expanding().apply(np.all).astype(bool)`.\n\nArgs:\nseries: column to compute cumulative all for.", "source": "juraj-google-style"}
{"code": "def GetMessages(self, formatter_mediator, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    event_values = event.CopyToDict()\n\n    message_type = event_values.get('message_type', None)\n    if message_type is not None:\n      event_values['message_type'] = (\n          self._MESSAGE_TYPE.get(message_type, 'UNKNOWN'))\n\n    message_status = event_values.get('message_status', None)\n    if message_status is not None:\n      event_values['message_status'] = (\n          self._MESSAGE_STATUS.get(message_status, 'UNKNOWN'))\n\n    return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def as_dense_types(types, classes):\n    ret = nest.pack_sequence_as(types, [dtypes.variant if c is sparse_tensor.SparseTensor else ty for ty, c in zip(nest.flatten(types), nest.flatten(classes))])\n    return ret", "docstring": "Converts sparse tensor types to `dtypes.variant`.\n\nArgs:\ntypes: a structure of types to convert.\nclasses: a structure of objects that identify the dataset item classes\n\nReturns:\na structure matching the nested structure of `types`, containing\n`dtypes.variant` at positions where `classes` contains\n`tf.sparse.SparseTensor` and matching contents of `types` otherwise", "source": "github-repos"}
{"code": "def escape(inp, quote='\"'):\n    output = ''\n    for c in inp:\n        if (c == quote):\n            output += '\\\\'\n        output += c\n    return output", "docstring": "Escape `quote` in string `inp`.\n\nExample usage::\n\n>>> escape('hello \"')\n'hello \\\\\"'\n>>> escape('hello \\\\\"')\n'hello \\\\\\\\\"'\n\nArgs:\ninp (str): String in which `quote` will be escaped.\nquote (char, default \"): Specify which character will be escaped.\n\nReturns:\nstr: Escaped string.", "source": "codesearchnet"}
{"code": "def AddArg(self, arg):\n    self.args.append(arg)\n    if (len(self.args) > self.number_of_args):\n        raise ParseError('Too many args for this expression.')\n    elif (len(self.args) == self.number_of_args):\n        return True\n    return False", "docstring": "Adds a new arg to this expression.\n\nArgs:\narg: The argument to add (string).\n\nReturns:\nTrue if this arg is the last arg, False otherwise.\n\nRaises:\nParseError: If there are too many args.", "source": "codesearchnet"}
{"code": "def _evaluate_barycentric(nodes, degree, lambda1, lambda2, lambda3):\n    (dimension, num_nodes) = nodes.shape\n    binom_val = 1.0\n    result = np.zeros((dimension, 1), order='F')\n    index = (num_nodes - 1)\n    result[(:, 0)] += nodes[(:, index)]\n    lambda1 = np.asfortranarray([lambda1])\n    lambda2 = np.asfortranarray([lambda2])\n    for k in six.moves.xrange((degree - 1), (- 1), (- 1)):\n        binom_val = ((binom_val * (k + 1)) / (degree - k))\n        index -= 1\n        new_index = ((index - degree) + k)\n        col_nodes = nodes[(:, new_index:(index + 1))]\n        col_nodes = np.asfortranarray(col_nodes)\n        col_result = _curve_helpers.evaluate_multi_barycentric(col_nodes, lambda1, lambda2)\n        result *= lambda3\n        result += (binom_val * col_result)\n        index = new_index\n    return result", "docstring": "r\"\"\"Compute a point on a surface.\n\nEvaluates :math:`B\\left(\\lambda_1, \\lambda_2, \\lambda_3\\right)` for a\nB |eacute| zier surface / triangle defined by ``nodes``.\n\n.. note::\n\nThere is also a Fortran implementation of this function, which\nwill be used if it can be built.\n\nArgs:\nnodes (numpy.ndarray): Control point nodes that define the surface.\ndegree (int): The degree of the surface define by ``nodes``.\nlambda1 (float): Parameter along the reference triangle.\nlambda2 (float): Parameter along the reference triangle.\nlambda3 (float): Parameter along the reference triangle.\n\nReturns:\nnumpy.ndarray: The evaluated point as a ``D x 1`` array (where ``D``\nis the ambient dimension where ``nodes`` reside).", "source": "codesearchnet"}
{"code": "def one_of_keyword_only(*valid_keywords):\n\n    def decorator(func):\n\n        @functools.wraps(func)\n        def wrapper(*args, **kwargs):\n            sentinel = object()\n            values = {}\n            for key in valid_keywords:\n                kwarg_value = kwargs.pop(key, sentinel)\n                if (kwarg_value is not sentinel):\n                    values[key] = kwarg_value\n            if kwargs:\n                raise TypeError('Unexpected arguments: {}'.format(kwargs))\n            if (not values):\n                raise TypeError('Must provide one of {} as keyword argument'.format(', '.join(valid_keywords)))\n            if (len(values) > 1):\n                raise TypeError('Must provide only one of {} as keyword argument. Received {}'.format(', '.join(valid_keywords), values))\n            return func(*(args + values.popitem()))\n        return wrapper\n    return decorator", "docstring": "Decorator to help make one-and-only-one keyword-only argument functions more reusable\n\nNotes:\nDecorated function should take 2 arguments, the first for the key, the second the value\n\nExamples:\n\n::\n\n@one_of_keyword_only('a', 'b', 'c')\ndef func(key, value):\nif key == 'a':\n...\nelif key == 'b':\n...\nelse:\n# key = 'c'\n...\n\n...\n\nfunc(a=1)\nfunc(b=2)\nfunc(c=3)\n\ntry:\nfunc(d=4)\nexcept TypeError:\n...\n\ntry:\nfunc(a=1, b=2)\nexcept TypeError:\n...\n\nArgs:\n*valid_keywords (str): All allowed keyword argument names\n\nRaises:\nTypeError: On decorated call, if 0 or 2+ arguments are provided or kwargs contains a key not in valid_keywords", "source": "codesearchnet"}
{"code": "def add_number_parameters(self, number):\n    if isinstance(number, list):\n        for x in number:\n            self.add_number_parameters(x)\n        return\n    self._parameters.append((('{ \"value\": ' + str(number)) + ' }'))", "docstring": "Add given number parameters to the internal list.\n\nArgs:\nnumber (list of int or list of float): A number or list of numbers to add to the parameters.", "source": "codesearchnet"}
{"code": "def submit_bsub_job(command, job_id=None, dependent_id=None, memory=None, requeue_code=None, logfile=None):\n    \n    \n    if job_id is None:\n        job_id = get_random_string()\n    \n    job = \"-J \\\"{0}\\\"\".format(job_id)\n    \n    mem = \"\"\n    if memory is not None:\n        mem = \"-R 'select[mem>{0}] rusage[mem={0}]' -M {0}\".format(memory)\n    \n    requeue = \"\"\n    if requeue_code is not None:\n        requeue = \"-Q 'EXCLUDE({0})'\".format(requeue_code)\n    \n    dependent = \"\"\n    if dependent_id is not None:\n        if type(dependent_id) == list:\n            dependent_id = \" && \".join(dependent_id)\n        dependent = \"-w '{0}'\".format(dependent_id)\n    \n    log = \"bjob_output.txt\"\n    if logfile is not None:\n        log = logfile\n    \n    preamble = [\"bsub\", job, dependent, requeue, \"-q\", \"normal\", \"-o\", log, mem]\n    command = [\"bash\", \"-c\", \"\\\"\"] + command + [\"\\\"\"]\n    \n    command = \" \".join(preamble + command)\n    subprocess.call(command, shell=True)", "docstring": "construct a bsub job submission command\n\nArgs:\ncommand: list of strings that forma unix command\njob_id: string for job ID for submission\ndependent_id: job ID, or list of job IDs which the current command needs\nto have finished before the current command will start. Note that\nthe list can be empty, in which case there are no dependencies.\nmemory: minimum memory requirements (in megabytes)\n\nReturns:\nnothing", "source": "juraj-google-style"}
{"code": "def intersection(self, other, recursive=True):\n    if (not isinstance(other, composite)):\n        raise AssertionError('Cannot intersect composite and {} types'.format(type(other)))\n    if (self.meta_type != other.meta_type):\n        return composite({})\n    if (self.meta_type == 'list'):\n        keep = []\n        for item in self._list:\n            if (item in other._list):\n                if (recursive and isinstance(item, composite)):\n                    keep.extend(item.intersection(other.index(item), recursive=True))\n                else:\n                    keep.append(item)\n        return composite(keep)\n    elif (self.meta_type == 'dict'):\n        keep = {}\n        for key in self._dict:\n            item = self._dict[key]\n            if (key in other._dict):\n                if (recursive and isinstance(item, composite) and isinstance(other.get(key), composite)):\n                    keep[key] = item.intersection(other.get(key), recursive=True)\n                elif (item == other[key]):\n                    keep[key] = item\n        return composite(keep)\n    return", "docstring": "Recursively compute intersection of data. For dictionaries, items\nfor specific keys will be reduced to unique items. For lists, items\nwill be reduced to unique items. This method is meant to be analogous\nto set.intersection for composite objects.\n\nArgs:\nother (composite): Other composite object to intersect with.\nrecursive (bool): Whether or not to perform the operation recursively,\nfor all nested composite objects.", "source": "codesearchnet"}
{"code": "def _object_url(self, objtype, objid):\n    return '{base_url}/api/{api_version}/{controller}/{obj_id}'.format(base_url=self._base_url(), api_version=self.api_version, controller=self._controller_name(objtype), obj_id=objid)", "docstring": "Generate the URL for the specified object\n\nArgs:\nobjtype (str): The object's type\nobjid (int): The objects ID\n\nReturns:\nA string containing the URL of the object", "source": "codesearchnet"}
{"code": "def getnamespace(f):\n    namespace = dict(f.__globals__)\n    closure = f.__closure__\n    freevars = f.__code__.co_freevars\n    if freevars and closure:\n        for name, cell in zip(freevars, closure):\n            try:\n                namespace[name] = cell.cell_contents\n            except ValueError:\n                pass\n    return namespace", "docstring": "Returns the complete namespace of a function.\n\nNamespace is defined here as the mapping of all non-local variables to values.\nThis includes the globals and the closure variables. Note that this captures\nthe entire globals collection of the function, and may contain extra symbols\nthat it does not actually use.\n\nArgs:\nf: User defined function.\n\nReturns:\nA dict mapping symbol names to values.", "source": "github-repos"}
{"code": "def __init__(self, index: Optional[int]=None):\n    super().__init__()\n    self._index = index", "docstring": "Constructor.\n\nArgs:\nindex: index of the tuple field that this key spec applies to.\nIf None, this tuple value spec applies to all elements of a\nvariable-length tuple.", "source": "github-repos"}
{"code": "def copy_handle_data(source_t, target_t):\n    if target_t.dtype == dtypes.resource or target_t.dtype == dtypes.variant:\n        handle_data = get_handle_data(source_t)\n        set_handle_data(target_t, handle_data)", "docstring": "Copies HandleData for variant and resource type tensors if available.\n\nThe CppShapeInferenceResult::HandleData proto contains information about the\nshapes and types of the element tensors of resource/variant type tensors.\nWe need to copy this across function boundaries, i.e., when capturing a\nplaceholder or when returning a function tensor as output. If we don't do this\nthe element tensors will have unknown shapes, e.g., if a TensorList variant\ntensor is captured as a placeholder, elements popped from that list would have\nunknown shape.\n\nArgs:\nsource_t: The tensor to copy HandleData from.\ntarget_t: The tensor to copy HandleData to.", "source": "github-repos"}
{"code": "def get_default_settings(sub_scripts, script_order, script_execution_freq, iterator_type):\n        \n        def populate_sweep_param(scripts, parameter_list, trace=''):\n            \n\n            def get_parameter_from_dict(trace, dic, parameter_list, valid_values=None):\n                \n                if valid_values is None and isinstance(dic, Parameter):\n                    valid_values = dic.valid_values\n\n                for key, value in dic.items():\n                    if isinstance(value, dict):  \n                        parameter_list = get_parameter_from_dict(trace + '.' + key, value, parameter_list,\n                                                                 dic.valid_values[key])\n                    elif (valid_values[key] in (float, int)) or \\\n                            (isinstance(valid_values[key], list) and valid_values[key][0] in (float, int)):\n                        parameter_list.append(trace + '.' + key)\n                    else:  \n                        \n                        print(('ignoring sweep parameter', key))\n\n                return parameter_list\n\n            for script_name in list(scripts.keys()):\n                from pylabcontrol.core import ScriptIterator\n                script_trace = trace\n                if script_trace == '':\n                    script_trace = script_name\n                else:\n                    script_trace = script_trace + '->' + script_name\n                if issubclass(scripts[script_name], ScriptIterator):  \n                    populate_sweep_param(vars(scripts[script_name])['_SCRIPTS'], parameter_list=parameter_list,\n                                         trace=script_trace)\n                else:\n                    \n                    for setting in \\\n                    [elem[1] for elem in inspect.getmembers(scripts[script_name]) if elem[0] == '_DEFAULT_SETTINGS'][0]:\n                        parameter_list = get_parameter_from_dict(script_trace, setting, parameter_list)\n\n            return parameter_list\n\n        if iterator_type == 'loop':\n            script_default_settings = [\n                Parameter('script_order', script_order),\n                Parameter('script_execution_freq', script_execution_freq),\n                Parameter('num_loops', 0, int, 'times the subscripts will be executed'),\n                Parameter('run_all_first', True, bool, 'Run all scripts with nonzero frequency in first pass')\n            ]\n\n        elif iterator_type == 'sweep':\n\n            sweep_params = populate_sweep_param(sub_scripts, [])\n\n            script_default_settings = [\n                Parameter('script_order', script_order),\n                Parameter('script_execution_freq', script_execution_freq),\n                Parameter('sweep_param', sweep_params[0], sweep_params, 'variable over which to sweep'),\n                Parameter('sweep_range',\n                          [Parameter('min_value', 0, float, 'min parameter value'),\n                           Parameter('max_value', 0, float, 'max parameter value'),\n                           Parameter('N/value_step', 0, float,\n                                     'either number of steps or parameter value step, depending on mode')]),\n                Parameter('stepping_mode', 'N', ['N', 'value_step'],\n                          'Switch between number of steps and step amount'),\n                Parameter('run_all_first', True, bool, 'Run all scripts with nonzero frequency in first pass')\n            ]\n        else:\n            print(('unknown iterator type ' + iterator_type))\n            raise TypeError('unknown iterator type ' + iterator_type)\n\n        return script_default_settings", "docstring": "assigning the actual script settings depending on the iterator type\n\nthis might be overwritten by classes that inherit form ScriptIterator\n\nArgs:\nsub_scripts: dictionary with the subscripts\nscript_order: execution order of subscripts\nscript_execution_freq: execution frequency of subscripts\n\nReturns:\nthe default setting for the iterator", "source": "juraj-google-style"}
{"code": "def validate_queues(queues):\n    if (not isinstance(queues, dict)):\n        raise exceptions.ConfigurationException(\"'queues' must be a dictionary mapping queue names to settings.\")\n    for (queue, settings) in queues.items():\n        if (not isinstance(settings, dict)):\n            raise exceptions.ConfigurationException(\"the {} queue in the 'queues' setting has a value of type {}, but it should be a dictionary of settings.\".format(queue, type(settings)))\n        missing_keys = []\n        for key in ('durable', 'auto_delete', 'exclusive', 'arguments'):\n            if (key not in settings):\n                missing_keys.append(key)\n        if missing_keys:\n            raise exceptions.ConfigurationException('the {} queue is missing the following keys from its settings value: {}'.format(queue, missing_keys))", "docstring": "Validate the queues configuration.\n\nRaises:\nexceptions.ConfigurationException: If the configuration provided is of an\ninvalid format.", "source": "codesearchnet"}
{"code": "def _is_univariate_marginal(self, index_points):\n    \n    num_index_points = tf.compat.dimension_value(\n        index_points.shape[-(self.kernel.feature_ndims + 1)])\n    if num_index_points is None:\n      warnings.warn(\n          'Unable to detect statically whether the number of index_points is '\n          '1. As a result, defaulting to treating the marginal GP at '\n          '`index_points` as a multivariate Gaussian. This makes some methods, '\n          'like `cdf` unavailable.')\n    return num_index_points == 1", "docstring": "True if the given index_points would yield a univariate marginal.\n\nArgs:\nindex_points: the set of index set locations at which to compute the\nmarginal Gaussian distribution. If this set is of size 1, the marginal is\nunivariate.\n\nReturns:\nis_univariate: Boolean indicating whether the marginal is univariate or\nmultivariate. In the case of dynamic shape in the number of index points,\ndefaults to \"multivariate\" since that's the best we can do.", "source": "juraj-google-style"}
{"code": "def matches(self, node, value):\n    if self.skip(value):\n        return True\n    if (not self._valid_value(value)):\n        msg = 'Invalid value {value} passed to filter {name} - '.format(value=repr(value), name=self.name)\n        if (self.default is not None):\n            warn((msg + 'defaulting to {}'.format(self.default)))\n            value = self.default\n        else:\n            warn((msg + 'skipping'))\n            return True\n    return self.func(node, value)", "docstring": "Returns whether the given node matches the filter rule with the given value.\n\nArgs:\nnode (Element): The node to filter.\nvalue (object): The desired value with which the node should be evaluated.\n\nReturns:\nbool: Whether the given node matches.", "source": "codesearchnet"}
{"code": "def optimize(self, sensor_graph, model):\n    passes = self._order_pases(self._known_passes.keys())\n    for opt_name in passes:\n        rerun = True\n        pass_instance = self._known_passes[opt_name][0]()\n        while rerun:\n            rerun = pass_instance.run(sensor_graph, model=model)", "docstring": "Optimize a sensor graph by running optimization passes.\n\nThe passes are run one at a time and modify the sensor graph\nfor future passes.\n\nArgs:\nsensor_graph (SensorGraph): The graph to be optimized\nmodel (DeviceModel): The device that we are optimizing\nfor, that OptimizationPass objects are free to use\nto guide their optimizations.", "source": "codesearchnet"}
{"code": "def get_doc_id(document_pb, expected_prefix):\n    \n    prefix, document_id = document_pb.name.rsplit(DOCUMENT_PATH_DELIMITER, 1)\n    if prefix != expected_prefix:\n        raise ValueError(\n            \"Unexpected document name\",\n            document_pb.name,\n            \"Expected to begin with\",\n            expected_prefix,\n        )\n\n    return document_id", "docstring": "Parse a document ID from a document protobuf.\n\nArgs:\ndocument_pb (google.cloud.proto.firestore.v1beta1.\\\ndocument_pb2.Document): A protobuf for a document that\nwas created in a ``CreateDocument`` RPC.\nexpected_prefix (str): The expected collection prefix for the\nfully-qualified document name.\n\nReturns:\nstr: The document ID from the protobuf.\n\nRaises:\nValueError: If the name does not begin with the prefix.", "source": "juraj-google-style"}
{"code": "def list_objects(self, path='', relative=False, first_level=False,\n                     max_request_entries=None):\n        \n        entries = 0\n        next_values = []\n        max_request_entries_arg = None\n\n        if not relative:\n            path = self.relpath(path)\n\n        \n        if not path:\n            objects = self._list_locators()\n\n        \n        else:\n            objects = self._list_objects(\n                self.get_client_kwargs(path), max_request_entries)\n\n        \n        for obj in objects:\n            \n            try:\n                name, header, is_directory = obj\n            except ValueError:\n                \n                name, header = obj\n                is_directory = True\n\n            \n            if is_directory and not first_level:\n                name = next_path = name.rstrip('/') + '/'\n\n                if path:\n                    next_path = '/'.join((path.rstrip('/'), name))\n\n                if max_request_entries is not None:\n                    max_request_entries_arg = max_request_entries - entries\n\n                next_values.append((\n                    name, self._generate_async(self.list_objects(\n                        next_path, relative=True,\n                        max_request_entries=max_request_entries_arg))))\n\n            entries += 1\n            yield name, header\n            if entries == max_request_entries:\n                return\n\n        for next_name, generator in next_values:\n            \n            for name, header in generator:\n\n                entries += 1\n                yield '/'.join((next_name.rstrip('/'), name)), header\n                if entries == max_request_entries:\n                    return", "docstring": "List objects.\n\nArgs:\npath (str): Path or URL.\nrelative (bool): Path is relative to current root.\nfirst_level (bool): It True, returns only first level objects.\nElse, returns full tree.\nmax_request_entries (int): If specified, maximum entries returned\nby request.\n\nReturns:\ngenerator of tuple: object name str, object header dict", "source": "juraj-google-style"}
{"code": "def post_process_single(self, generation: str, fix_markdown: bool=True) -> str:\n    generation = re.sub('(?:\\\\n|^)\n    generation = generation.strip()\n    generation = generation.replace('\\n* [leftmargin=*]\\n', '\\n')\n    generation = re.sub('^\n    lines = generation.split('\\n')\n    if lines[-1].startswith('\n        logger.info('Likely hallucinated title at the end of the page: ' + lines[-1])\n        generation = '\\n'.join(lines[:-1])\n    generation = truncate_repetitions(generation)\n    generation = self.remove_hallucinated_references(generation)\n    generation = re.sub('^\\\\* \\\\[\\\\d+\\\\](\\\\s?[A-W]\\\\.+\\\\s?){10,}.*$', '', generation, flags=re.M)\n    generation = re.sub('^(\\\\* \\\\[\\\\d+\\\\])\\\\[\\\\](.*)$', '\\\\1\\\\2', generation, flags=re.M)\n    generation = re.sub('(^\\\\w\\\\n\\\\n|\\\\n\\\\n\\\\w$)', '', generation)\n    generation = re.sub('([\\\\s.,()])_([a-zA-Z0-9])__([a-zA-Z0-9]){1,3}_([\\\\s.,:()])', '\\\\1\\\\(\\\\2_{\\\\3}\\\\)\\\\4', generation)\n    generation = re.sub('([\\\\s.,\\\\d])_([a-zA-Z0-9])_([\\\\s.,\\\\d;])', '\\\\1\\\\(\\\\2\\\\)\\\\3', generation)\n    generation = re.sub('(\\\\nFootnote .*?:) (?:footnotetext|thanks):\\\\W*(.*(?:\\\\n\\\\n|$))', '\\\\1 \\\\2', generation)\n    generation = re.sub('\\\\[FOOTNOTE:.+?\\\\](.*?)\\\\[ENDFOOTNOTE\\\\]', '', generation)\n    generation = normalize_list_like_lines(generation)\n    if generation.endswith(('.', '}')):\n        generation += '\\n\\n'\n    if re.match('[A-Z0-9,;:]$', generation):\n        generation += ' '\n    elif generation.startswith(('\n        generation = '\\n\\n' + generation\n    elif generation.split('\\n')[-1].startswith(('\n        generation = generation + '\\n\\n'\n    else:\n        try:\n            last_word = generation.split(' ')[-1]\n            if last_word in nltk.corpus.words.words():\n                generation += ' '\n        except LookupError:\n            generation += ' '\n    generation = self.correct_tables(generation)\n    generation = generation.replace('\\\\begin{array}[]{', '\\\\begin{array}{')\n    generation = re.sub('\\\\\\\\begin{tabular}{([clr ]){2,}}\\\\s*[& ]*\\\\s*(\\\\\\\\\\\\\\\\)? \\\\\\\\end{tabular}', '', generation)\n    generation = re.sub('(\\\\*\\\\*S\\\\. A\\\\. B\\\\.\\\\*\\\\*\\\\n+){2,}', '', generation)\n    generation = re.sub('^\n    generation = re.sub('^\\\\.\\\\s*$', '', generation, flags=re.M)\n    generation = re.sub('\\\\n{3,}', '\\n\\n', generation)\n    if fix_markdown:\n        return markdown_compatible(generation)\n    else:\n        return generation", "docstring": "Postprocess a single generated text. Regular expressions used here are taken directly from the Nougat article\nauthors. These expressions are commented for clarity and tested end-to-end in most cases.\n\nArgs:\ngeneration (str): The generated text to be postprocessed.\nfix_markdown (bool, optional): Whether to perform Markdown formatting fixes. Default is True.\n\nReturns:\nstr: The postprocessed text.", "source": "github-repos"}
{"code": "def set(self, name, value, autodeclare=False):\n    if ((not autodeclare) and (name not in self._data)):\n        raise KeyError('Key {} has not been declared and autodeclare=False'.format(name))\n    self._ensure_declared(name)\n    self._data[name].set_result(value)", "docstring": "Set the value of a key.\n\nThis method will cause anyone waiting on a key (and any future\nwaiters) to unblock and be returned the value you pass here.\n\nIf the key has not been declared previously, a KeyError() is\nraised unless you pass ``autodeclare=True`` which will cause\nthe key to be declared.  Normally you don't want to autodeclare.\n\nThis method is not a coroutine and does not block.\n\nArgs:\nname (str): The key to set\nvalue (object): The value to set\nautodeclare (bool): Whether to automatically declare the\nkey if is has not already been declared.  Defaults to\nFalse.", "source": "codesearchnet"}
{"code": "def convert_args_to_laid_out_tensors(xs):\n  \n  ret = []\n  for x in xs:\n    if hasattr(x, \"to_laid_out_tensor\"):\n      ret.append(x.to_laid_out_tensor())\n    else:\n      ret.append(x)\n  return ret", "docstring": "Convert list elements to laid-out-tensors when possible.\n\nArgs:\nxs: a list\nReturns:\na list", "source": "juraj-google-style"}
{"code": "def _bbox_intersect(nodes1, nodes2):\n    (left1, right1, bottom1, top1) = _helpers.bbox(nodes1)\n    (left2, right2, bottom2, top2) = _helpers.bbox(nodes2)\n    if ((right2 < left1) or (right1 < left2) or (top2 < bottom1) or (top1 < bottom2)):\n        return BoxIntersectionType.DISJOINT\n    if ((right2 == left1) or (right1 == left2) or (top2 == bottom1) or (top1 == bottom2)):\n        return BoxIntersectionType.TANGENT\n    else:\n        return BoxIntersectionType.INTERSECTION", "docstring": "r\"\"\"Bounding box intersection predicate.\n\n.. note::\n\nThere is also a Fortran implementation of this function, which\nwill be used if it can be built.\n\nDetermines if the bounding box of two sets of control points\nintersects in :math:`\\mathbf{R}^2` with non-trivial\nintersection (i.e. tangent bounding boxes are insufficient).\n\n.. note::\n\nThough we assume (and the code relies on this fact) that\nthe nodes are two-dimensional, we don't check it.\n\nArgs:\nnodes1 (numpy.ndarray): Set of control points for a\nB |eacute| zier shape.\nnodes2 (numpy.ndarray): Set of control points for a\nB |eacute| zier shape.\n\nReturns:\nint: Enum from ``BoxIntersectionType`` indicating the type of\nbounding box intersection.", "source": "codesearchnet"}
{"code": "def remove_tag(self, tag):\n    return self._remove_hdxobject(self.data.get('tags'), tag, matchon='name')", "docstring": "Remove a tag\n\nArgs:\ntag (str): Tag to remove\n\nReturns:\nbool: True if tag removed or False if not", "source": "codesearchnet"}
{"code": "def _batch_prepare_for_model(self, batch_text_or_text_pairs, is_pair: Optional[bool]=None, boxes: Optional[List[List[int]]]=None, word_labels: Optional[List[List[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:\n    batch_outputs = {}\n    for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):\n        batch_text_or_text_pair, boxes_example = example\n        outputs = self.prepare_for_model(batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose)\n        for key, value in outputs.items():\n            if key not in batch_outputs:\n                batch_outputs[key] = []\n            batch_outputs[key].append(value)\n    batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)\n    batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)\n    return batch_outputs", "docstring": "Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It\nadds special tokens, truncates sequences if overflowing while taking into account the special tokens and\nmanages a moving window (with user defined stride) for overflowing tokens.\n\nArgs:\nbatch_ids_pairs: list of tokenized input ids or input ids pairs", "source": "github-repos"}
{"code": "def _initialize_mesh_dimension_name_to_size(self, mesh_shape):\n    mesh_dimension_name_to_size = {}\n    for mesh_dimension in mesh_shape.dims:\n        mesh_dimension_name_to_size[mesh_dimension.name] = mesh_dimension.size\n    return mesh_dimension_name_to_size", "docstring": "Initializer for self._mesh_dimension_name_to_size.\n\nArgs:\nmesh_shape: an mtf.Shape.\n\nReturns:\nA {string: int} mapping mesh dimension names to their sizes.", "source": "codesearchnet"}
{"code": "def to_json(self):\n    return {'resourceType': self.resource.resource_type_id, 'resourceId': self.id, 'accountId': self.resource.account_id, 'account': self.account, 'location': self.resource.location, 'properties': {to_camelcase(prop.name): prop.value for prop in self.resource.properties}, 'tags': [{'key': t.key, 'value': t.value} for t in self.resource.tags]}", "docstring": "Return a `dict` representation of the resource, including all properties and tags\n\nReturns:\n`dict`", "source": "codesearchnet"}
{"code": "def _validate_children_inputs_mappings(self, children_inputs_mappings):\n    assert isinstance(children_inputs_mappings, dict)\n    assert 'parent_first_child_input' in children_inputs_mappings\n    assert 'parent_last_child_output' in children_inputs_mappings\n    assert 'internal_children_input_output' in children_inputs_mappings\n\n    def assert_dictlist_has_keys(dictlist, keys):\n        for dikt in dictlist:\n            assert isinstance(dikt, dict)\n            for key in keys:\n                assert key in dikt\n    assert_dictlist_has_keys(children_inputs_mappings['parent_first_child_input'], ['parent_ophint_input_index', 'first_child_ophint_input_index'])\n    assert_dictlist_has_keys(children_inputs_mappings['parent_last_child_output'], ['parent_output_index', 'child_output_index'])\n    assert_dictlist_has_keys(children_inputs_mappings['internal_children_input_output'], ['child_input_index', 'child_output_index'])", "docstring": "Validate children inputs mappings is in the right format.\n\nArgs:\nchildren_inputs_mappings: the Children ophint inputs/outputs mapping.", "source": "github-repos"}
{"code": "def get_synchronous_execution():\n    return context.context().execution_mode == context.SYNC", "docstring": "Gets whether operations are executed synchronously or asynchronously.\n\nTensorFlow can execute operations synchronously or asynchronously. If\nasynchronous execution is enabled, operations may return \"non-ready\" handles.\n\nReturns:\nCurrent thread execution mode", "source": "github-repos"}
{"code": "def shape(self) -> torch.Size:\n    return self._trans.shape[:-1]", "docstring": "Returns the shape of the shared dimensions of the rotation and the translation.\n\nReturns:\nThe shape of the transformation", "source": "github-repos"}
{"code": "def _abstractify_value(val: '_instances.ConcreteValue', ctx: 'context.Context', seen: 'set[_base.BaseValue] | None'=None) -> '_instances.ConcreteValue':\n    if seen is None:\n        seen = set()\n    if not val.is_concrete or val in seen:\n        return val\n    seen = seen | {val}\n    if not isinstance(val.pyval, (list, tuple)):\n        return ctx.convert.get_maybe_abstract_instance(val)\n    new_content = []\n    for elem in val.pyval:\n        new_elem_data = [_abstractify_value(v, ctx, seen) for v in elem.data]\n        if any((v != new_v for v, new_v in zip(elem.data, new_elem_data))):\n            new_elem = ctx.program.NewVariable()\n            for b, new_data in zip(elem.bindings, new_elem_data):\n                new_elem.PasteBindingWithNewData(b, new_data)\n            new_content.append(new_elem)\n        else:\n            new_content.append(elem)\n    if any((elem != new_elem for elem, new_elem in zip(val.pyval, new_content))):\n        return type(val)(type(val.pyval)(new_content), ctx)\n    else:\n        return val", "docstring": "Converts a maybe-abstract value to a concrete one.\n\nArgs:\nval: A value.\nctx: The context.\nseen: Optionally, a seen values set.\n\nUnlike ctx.convert.get_maybe_abstract_instance, this method recursively\ndescends into lists and tuples.\n\nReturns:\nA concrete value.", "source": "github-repos"}
{"code": "def organize_models(self, outdir, force_rerun=False):\n    uniprot_to_swissmodel = defaultdict(list)\n    for (u, models) in self.all_models.items():\n        for m in models:\n            original_filename = '{}_{}_{}_{}'.format(m['from'], m['to'], m['template'], m['coordinate_id'])\n            file_path = op.join(self.metadata_dir, u[:2], u[2:4], u[4:], 'swissmodel', '{}.pdb'.format(original_filename))\n            if op.exists(file_path):\n                new_filename = '{}_{}_{}_{}.pdb'.format(u, m['from'], m['to'], m['template'][:4])\n                shutil.copy(file_path, op.join(outdir, new_filename))\n                uniprot_to_swissmodel[u].append(new_filename)\n            else:\n                log.warning('{}: no file {} found for model'.format(u, file_path))\n    return uniprot_to_swissmodel", "docstring": "Organize and rename SWISS-MODEL models to a single folder with a name containing template information.\n\nArgs:\noutdir (str): New directory to copy renamed models to\nforce_rerun (bool): If models should be copied again even if they already exist\n\nReturns:\ndict: Dictionary of lists, UniProt IDs as the keys and new file paths as the values", "source": "codesearchnet"}
{"code": "def alltoall(self, x, mesh_axis, split_axis, concat_axis):\n    \n    return self._collective_with_groups(\n        x, [mesh_axis],\n        functools.partial(\n            alltoall_ring, split_axis=split_axis, concat_axis=concat_axis))", "docstring": "Grouped alltoall.\n\nArgs:\nx: a LaidOutTensor\nmesh_axis: an integer the mesh axis along which to group\nsplit_axis: an integer (the Tensor axis along which to split)\nconcat_axis: an integer (the Tensor axis along which to concatenate)\nReturns:\na LaidOutTensor", "source": "juraj-google-style"}
{"code": "def model_fn_sharded(self, sharded_features):\n    \n    dp = self._data_parallelism\n\n    \n    datashard_to_features = self._to_features_per_datashard(sharded_features)\n    if self.use_body_sharded():\n      if  self.hparams.scheduled_sampling_prob > 0.0:\n        raise NotImplementedError(\n            \"Scheduled sampling for non-sharded body only.\")\n\n      \n      transformed_features = dp(self.bottom, datashard_to_features)\n      body_out = self.body_sharded(\n          self._to_single_features_dict(transformed_features))\n      body_out, losses = self._normalize_body_output(body_out)\n      if \"training\" in losses:\n        log_info(\"Skipping T2TModel top and loss because training loss \"\n                 \"returned from body\")\n        sharded_logits = body_out\n      else:\n        if isinstance(body_out, dict):\n          sharded_logits = collections.OrderedDict()\n          sharded_losses = collections.OrderedDict()\n          for k, v in sorted(six.iteritems(body_out)):\n            sharded_logits[k] = dp(self.top, v, datashard_to_features)\n            sharded_losses[k] = dp(self.loss, sharded_logits[k],\n                                   datashard_to_features)\n          training_loss_dict = average_sharded_losses([({\n              \"training\": l\n          } for l in loss) for loss in sharded_losses.values()])\n          losses.update(training_loss_dict)\n        else:\n          sharded_logits = dp(self.top, body_out, datashard_to_features)\n          sharded_losses = dp(self.loss, sharded_logits, datashard_to_features)\n          if isinstance(sharded_losses, tuple):\n            nums, dens = sharded_losses\n            sharded_losses = zip(nums, dens)\n          training_loss_dict = average_sharded_losses([{\n              \"training\": loss\n          } for loss in sharded_losses])\n          losses.update(training_loss_dict)\n    else:\n      sharded_logits, sharded_losses = dp(self.model_fn, datashard_to_features)\n      sharded_logits, sharded_losses = dp(\n          self.maybe_scheduled_sampling,\n          datashard_to_features, sharded_logits, sharded_losses)\n      if isinstance(sharded_logits[0], dict):\n        temp_dict = {k: [] for k, _ in six.iteritems(sharded_logits[0])}\n        for k, _ in six.iteritems(sharded_logits[0]):\n          for l in sharded_logits:\n            temp_dict[k].append(l[k])\n        sharded_logits = temp_dict\n      losses = average_sharded_losses(sharded_losses)\n\n    return sharded_logits, losses", "docstring": "Estimator model_fn sharded along batch dimension.\n\nArgs:\nsharded_features: {str: [Tensor]}. Features sharded along batch dimension.\nEach list is the same length (== number of shards).\n\nReturns:\nsharded_logits: [Tensor]. Logits for each shard of examples.\nlosses: {str: 0-D Tensor}. Loss averaged across shards.", "source": "juraj-google-style"}
{"code": "def take_bug_report(self, test_name, begin_time, timeout=300, destination=None):\n    new_br = True\n    try:\n        stdout = self.adb.shell('bugreportz -v').decode('utf-8')\n        if ('not found' in stdout):\n            new_br = False\n    except adb.AdbError:\n        new_br = False\n    if destination:\n        br_path = utils.abs_path(destination)\n    else:\n        br_path = os.path.join(self.log_path, 'BugReports')\n    utils.create_dir(br_path)\n    base_name = (',%s,%s.txt' % (begin_time, self._normalized_serial))\n    if new_br:\n        base_name = base_name.replace('.txt', '.zip')\n    test_name_len = (utils.MAX_FILENAME_LEN - len(base_name))\n    out_name = (test_name[:test_name_len] + base_name)\n    full_out_path = os.path.join(br_path, out_name.replace(' ', '\\\\ '))\n    self.wait_for_boot_completion()\n    self.log.info('Taking bugreport for %s.', test_name)\n    if new_br:\n        out = self.adb.shell('bugreportz', timeout=timeout).decode('utf-8')\n        if (not out.startswith('OK')):\n            raise DeviceError(self, ('Failed to take bugreport: %s' % out))\n        br_out_path = out.split(':')[1].strip()\n        self.adb.pull([br_out_path, full_out_path])\n    else:\n        self.adb.bugreport((' > \"%s\"' % full_out_path), shell=True, timeout=timeout)\n    self.log.info('Bugreport for %s taken at %s.', test_name, full_out_path)", "docstring": "Takes a bug report on the device and stores it in a file.\n\nArgs:\ntest_name: Name of the test method that triggered this bug report.\nbegin_time: Timestamp of when the test started.\ntimeout: float, the number of seconds to wait for bugreport to\ncomplete, default is 5min.\ndestination: string, path to the directory where the bugreport\nshould be saved.", "source": "codesearchnet"}
{"code": "def get_version():\n    if PackageHelper.__version:\n        return PackageHelper.__version\n    PackageHelper.__version = 'Unknown'\n    file = os.path.realpath(__file__)\n    folder = os.path.dirname(file)\n    try:\n        semver = open((folder + '/../../.semver'), 'r')\n        PackageHelper.__version = semver.read().rstrip()\n        semver.close()\n        return PackageHelper.__version\n    except:\n        pass\n    try:\n        distribution = pkg_resources.get_distribution(PackageHelper.get_alias())\n        if distribution.version:\n            PackageHelper.__version = distribution.version\n        return PackageHelper.__version\n    except:\n        pass\n    return PackageHelper.__version", "docstring": "Get the version number of this package.\n\nReturns:\nstr: The version number (marjor.minor.patch).\n\nNote:\nWhen this package is installed, the version number will be available through the\npackage resource details. Otherwise this method will look for a ``.semver`` file.\n\nNote:\nIn rare cases corrupt installs can cause the version number to be unknown. In this case\nthe version number will be set to the string \"Unknown\".", "source": "codesearchnet"}
{"code": "def add_or_update(data, item, value):\n    \n    data = data.splitlines()\n\n    \n    \n    \n    data = map(lambda x: bytearray(x), data)\n\n    \n    conf = filter(lambda x: x.strip() and x.strip().split()[0] == item, data)\n\n    if conf:\n        conf[0][:] = conf[0].strip().split()[0] + \" \" + value\n    else:\n        \n        comments = filter(\n            lambda x: x.strip().startswith(\"\n                      and len(x.split(\"\n                      and x.split(\"\n                      and x.split(\"\n            data\n        )\n\n        if comments:\n            comments[0][:] = comments[0].split(\"\n        else:\n            \n            data.append(item + \" \" + value + \"\\n\")\n\n    return \"\\n\".join(map(lambda x: str(x), data))", "docstring": "Add or update value in configuration file format used by proftpd.\n\nArgs:\ndata (str): Configuration file as string.\nitem (str): What option will be added/updated.\nvalue (str): Value of option.\n\nReturns:\nstr: updated configuration", "source": "juraj-google-style"}
{"code": "def download(self):\n    self.downloaded_paths = list()\n    for path in self.paths_for_download:\n        downloaded_path = list()\n        utils.mkdir_p(os.path.abspath(self.directory))\n        sra_run = path.split('/')[(- 1)]\n        logger.info(('Analysing %s' % sra_run))\n        url = type(self).FTP_ADDRESS_TPL.format(range_subdir=sra_run[:6], file_dir=sra_run)\n        logger.debug('URL: %s', url)\n        filepath = os.path.abspath(os.path.join(self.directory, ('%s.sra' % sra_run)))\n        utils.download_from_url(url, filepath, aspera=self.aspera, silent=self.silent, force=self.force)\n        if (self.filetype in ('fasta', 'fastq')):\n            if (utils.which('fastq-dump') is None):\n                logger.error('fastq-dump command not found')\n            ftype = ''\n            if (self.filetype == 'fasta'):\n                ftype = ' --fasta '\n            cmd = 'fastq-dump'\n            if (utils.which('parallel-fastq-dump') is None):\n                cmd += ' %s --outdir %s %s'\n            else:\n                logger.debug('Using parallel fastq-dump')\n                cmd = ' parallel-fastq-dump --threads %s'\n                cmd = (cmd % self.threads)\n                cmd += ' %s --outdir %s -s %s'\n            cmd = (cmd % (ftype, self.directory, filepath))\n            for (fqoption, fqvalue) in iteritems(self.fastq_dump_options):\n                if fqvalue:\n                    cmd += (' --%s %s' % (fqoption, fqvalue))\n                elif (fqvalue is None):\n                    cmd += (' --%s' % fqoption)\n            logger.debug(cmd)\n            process = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)\n            logger.info(('Converting to %s/%s*.%s.gz\\n' % (self.directory, sra_run, self.filetype)))\n            (pout, perr) = process.communicate()\n            downloaded_path = glob.glob(os.path.join(self.directory, ('%s*.%s.gz' % (sra_run, self.filetype))))\n        elif (self.filetype == 'sra'):\n            downloaded_path = glob.glob(os.path.join(self.directory, ('%s*.%s' % (sra_run, self.filetype))))\n        else:\n            downloaded_path = glob.glob(os.path.join(self.directory, ('%s*' % sra_run)))\n            logger.error(('Filetype %s not supported.' % self.filetype))\n        if ((not self.keep_sra) and (self.filetype != 'sra')):\n            os.unlink(filepath)\n        self.downloaded_paths += downloaded_path\n    return self.downloaded_paths", "docstring": "Download SRA files.\n\nReturns:\n:obj:`list` of :obj:`str`: List of downloaded files.", "source": "codesearchnet"}
{"code": "def run(self, **kwargs):\n    super().run(**kwargs)\n    scheduler = self.scheduler_plugins[self.active_scheduler]()\n    if (not kwargs['no_daemon']):\n        self.log.info('Starting {} worker with {} threads checking for new messages every {} seconds'.format(scheduler.name, kwargs['threads'], kwargs['delay']))\n        for i in range(kwargs['threads']):\n            thd = threading.Thread(target=self.execute_worker_thread, args=(scheduler.execute_worker, kwargs['delay']))\n            thd.start()\n    else:\n        self.log.info('Starting {} worker for a single non-daemon execution'.format(scheduler.name))\n        scheduler.execute_worker()", "docstring": "Execute the worker thread.\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def load_state_dict(module, state_dict, strict=False, logger=None):\n    unexpected_keys = []\n    own_state = module.state_dict()\n    for (name, param) in state_dict.items():\n        if (name not in own_state):\n            unexpected_keys.append(name)\n            continue\n        if isinstance(param, torch.nn.Parameter):\n            param = param.data\n        try:\n            own_state[name].copy_(param)\n        except Exception:\n            raise RuntimeError('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}.'.format(name, own_state[name].size(), param.size()))\n    missing_keys = (set(own_state.keys()) - set(state_dict.keys()))\n    err_msg = []\n    if unexpected_keys:\n        err_msg.append('unexpected key in source state_dict: {}\\n'.format(', '.join(unexpected_keys)))\n    if missing_keys:\n        err_msg.append('missing keys in source state_dict: {}\\n'.format(', '.join(missing_keys)))\n    err_msg = '\\n'.join(err_msg)\n    if err_msg:\n        if strict:\n            raise RuntimeError(err_msg)\n        elif (logger is not None):\n            logger.warn(err_msg)\n        else:\n            print(err_msg)", "docstring": "Load state_dict to a module.\n\nThis method is modified from :meth:`torch.nn.Module.load_state_dict`.\nDefault value for ``strict`` is set to ``False`` and the message for\nparam mismatch will be shown even if strict is False.\n\nArgs:\nmodule (Module): Module that receives the state_dict.\nstate_dict (OrderedDict): Weights.\nstrict (bool): whether to strictly enforce that the keys\nin :attr:`state_dict` match the keys returned by this module's\n:meth:`~torch.nn.Module.state_dict` function. Default: ``False``.\nlogger (:obj:`logging.Logger`, optional): Logger to log the error\nmessage. If not specified, print function will be used.", "source": "codesearchnet"}
{"code": "def convert_variables_to_tensors(values):\n\n    def _convert_resource_variable_to_tensor(x):\n        if _pywrap_utils.IsResourceVariable(x):\n            return ops.convert_to_tensor(x)\n        elif isinstance(x, composite_tensor.CompositeTensor):\n            return composite_tensor.convert_variables_to_tensors(x)\n        else:\n            return x\n    return nest.map_structure(_convert_resource_variable_to_tensor, values)", "docstring": "Converts `ResourceVariable`s in `values` to `Tensor`s.\n\nIf an object is a `CompositeTensor` and overrides its\n`_convert_variables_to_tensors` method, its `ResourceVariable` components\nwill also be converted to `Tensor`s. Objects other than `ResourceVariable`s\nin `values` will be returned unchanged.\n\nArgs:\nvalues: A nested structure of `ResourceVariable`s, or any other objects.\n\nReturns:\nA new structure with `ResourceVariable`s in `values` converted to `Tensor`s.", "source": "github-repos"}
{"code": "def exists(self, path):\n    try:\n        return self._blobstorageIO().exists(path)\n    except Exception as e:\n        raise BeamIOError('Exists operation failed', {path: e})", "docstring": "Check if the provided path exists on the FileSystem.\n\nArgs:\npath: string path that needs to be checked.\n\nReturns: boolean flag indicating if path exists", "source": "github-repos"}
{"code": "def verify_password(self, password, password_hash):\n    if isinstance(password_hash, self.user_manager.db_manager.UserClass):\n        print('Deprecation warning: verify_password(password, user) has been changed to: verify_password(password, password_hash). The user param will be deprecated. Please change your call with verify_password(password, user) into a call with verify_password(password, user.password) as soon as possible.')\n        password_hash = password_hash.password\n    return self.password_crypt_context.verify(password, password_hash)", "docstring": "Verify plaintext ``password`` against ``hashed password``.\n\nArgs:\npassword(str): Plaintext password that the user types in.\npassword_hash(str): Password hash generated by a previous call to ``hash_password()``.\nReturns:\n| True when ``password`` matches ``password_hash``.\n| False otherwise.\nExample:\n\n::\n\nif verify_password('mypassword', user.password):\nlogin_user(user)", "source": "codesearchnet"}
{"code": "def stepEnabled(self):\n    if ((self.value() > self.minimum()) and (self.value() < self.maximum())):\n        return (self.StepUpEnabled | self.StepDownEnabled)\n    elif (self.value() <= self.minimum()):\n        return self.StepUpEnabled\n    elif (self.value() >= self.maximum()):\n        return self.StepDownEnabled", "docstring": "Virtual function that determines whether stepping up and down is legal at any given time.\n\nReturns:\nored combination of StepUpEnabled | StepDownEnabled", "source": "codesearchnet"}
{"code": "def _add_new_ide_controller_helper(ide_controller_label,\n                                   controller_key,\n                                   bus_number):\n    \n    if controller_key is None:\n        controller_key = randint(-200, 250)\n\n    ide_spec = vim.vm.device.VirtualDeviceSpec()\n    ide_spec.device = vim.vm.device.VirtualIDEController()\n\n    ide_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n\n    ide_spec.device.key = controller_key\n    ide_spec.device.busNumber = bus_number\n    ide_spec.device.deviceInfo = vim.Description()\n    ide_spec.device.deviceInfo.label = ide_controller_label\n    ide_spec.device.deviceInfo.summary = ide_controller_label\n\n    return ide_spec", "docstring": "Helper function for adding new IDE controllers\n\n.. versionadded:: 2016.3.0\n\nArgs:\nide_controller_label: label of the IDE controller\ncontroller_key: if not None, the controller key to use; otherwise it is randomly generated\nbus_number: bus number\n\nReturns: created device spec for an IDE controller", "source": "juraj-google-style"}
{"code": "def get_data(__pkg: str, __name: str) -> str:\n    for dname in get_data_dirs(__pkg):\n        test_path = path.join(dname, __name)\n        if path.exists(test_path):\n            return test_path\n    raise FileNotFoundError('No data file {!r} for {!r}'.format(__name, __pkg))", "docstring": "Return top-most data file for given package.\n\nArgs:\n__pkg: Package name\n__name: Data file name", "source": "codesearchnet"}
{"code": "def encoder_vgg(x, enc_final_size, reuse=False, scope_prefix='', hparams=None,\n                is_training=True):\n  \n  with tf.variable_scope(scope_prefix + 'encoder', reuse=reuse):\n\n    \n    x *= 256\n    x = x - COLOR_NORMALIZATION_VECTOR\n\n    with arg_scope(vgg.vgg_arg_scope()):\n      \n      x = tf.pad(x, [[0, 0], [0, VGG_IMAGE_SIZE - IMG_WIDTH],\n                     [0, VGG_IMAGE_SIZE - IMG_HEIGHT], [0, 0]])\n      _, end_points = vgg.vgg_16(\n          x,\n          num_classes=enc_final_size,\n          is_training=is_training)\n      pool5_key = [key for key in end_points.keys() if 'pool5' in key]\n      assert len(pool5_key) == 1\n      enc = end_points[pool5_key[0]]\n      \n      enc = tf.slice(enc, [0, 0, 0, 0], [-1, 2, 2, -1])\n\n    enc_shape = enc.get_shape().as_list()\n    enc_shape[0] = -1\n    enc_size = enc_shape[1] * enc_shape[2] * enc_shape[3]\n\n    enc_flat = tf.reshape(enc, (-1, enc_size))\n    enc_flat = tf.nn.dropout(enc_flat, hparams.enc_keep_prob)\n\n    enc_flat = tf.layers.dense(\n        enc_flat,\n        enc_final_size,\n        kernel_initializer=tf.truncated_normal_initializer(stddev=1e-4,))\n\n    if hparams.enc_pred_use_l2norm:\n      enc_flat = tf.nn.l2_normalize(enc_flat, 1)\n\n  return enc_flat", "docstring": "VGG network to use as encoder without the top few layers.\n\nCan be pretrained.\n\nArgs:\nx: The image to encode. In the range 0 to 1.\nenc_final_size: The desired size of the encoding.\nreuse: To reuse in variable scope or not.\nscope_prefix: The prefix before the scope name.\nhparams: The python hparams.\nis_training: boolean value indicating if training is happening.\n\nReturns:\nThe generated image.", "source": "juraj-google-style"}
{"code": "def get_timestamped_export_dir(export_dir_base):\n    attempts = 0\n    while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS:\n        timestamp = int(time.time())\n        result_dir = file_io.join(compat.as_bytes(export_dir_base), compat.as_bytes(str(timestamp)))\n        if not gfile.Exists(result_dir):\n            return result_dir\n        time.sleep(1)\n        attempts += 1\n        logging.warn('Directory {} already exists; retrying (attempt {}/{})'.format(compat.as_str(result_dir), attempts, MAX_DIRECTORY_CREATION_ATTEMPTS))\n    raise RuntimeError(f'Failed to obtain a unique export directory name after {MAX_DIRECTORY_CREATION_ATTEMPTS} attempts.')", "docstring": "Builds a path to a new subdirectory within the base directory.\n\nEach export is written into a new subdirectory named using the\ncurrent time.  This guarantees monotonically increasing version\nnumbers even across multiple runs of the pipeline.\nThe timestamp used is the number of seconds since epoch UTC.\n\nArgs:\nexport_dir_base: A string containing a directory to write the exported\ngraph and checkpoints.\nReturns:\nThe full path of the new subdirectory (which is not actually created yet).\n\nRaises:\nRuntimeError: if repeated attempts fail to obtain a unique timestamped\ndirectory name.", "source": "github-repos"}
{"code": "def get_all_supported_aspect_ratios(min_image_tiles: int, max_image_tiles: int) -> List[Tuple[int, int]]:\n    aspect_ratios = []\n    for width in range(1, max_image_tiles + 1):\n        for height in range(1, max_image_tiles + 1):\n            if width * height <= max_image_tiles and width * height >= min_image_tiles:\n                aspect_ratios.append((width, height))\n    aspect_ratios = sorted(aspect_ratios, key=lambda x: x[0] * x[1])\n    return aspect_ratios", "docstring": "Computes all allowed aspect ratios for a given minimum and maximum number of input tiles.\n\nThis function calculates all possible arrangements of tiles that can be formed\nwithin the constraint of the minimum and maximum number of tiles. Each arrangement is\nrepresented by its aspect ratio (width/height) and the corresponding tile configuration.\n\nArgs:\nmin_image_tiles (`int`):\nThe minimum number of tiles allowed.\nmax_image_tiles (`int`):\nThe maximum number of tiles allowed.\n\nReturns:\n`List[Tuple[int, int]]`: A list of tuples, each tuple representing a valid (width, height)\nconfiguration in terms of number of tiles.\n\nExample:\n>>> get_all_supported_aspect_ratios(1, 4)\n[(1, 1), (1, 2), (2, 1), (1, 3), (3, 1), (1, 4), (2, 2), (4, 1)]", "source": "github-repos"}
{"code": "def from_string(cls, key, password='notasecret'):\n    key = _helpers._from_bytes(key)\n    (marker_id, key_bytes) = pem.readPemBlocksFromFile(six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)\n    if (marker_id == 0):\n        pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes, format='DER')\n    elif (marker_id == 1):\n        (key_info, remaining) = decoder.decode(key_bytes, asn1Spec=_PKCS8_SPEC)\n        if (remaining != b''):\n            raise ValueError('Unused bytes', remaining)\n        pkey_info = key_info.getComponentByName('privateKey')\n        pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(), format='DER')\n    else:\n        raise ValueError('No key could be detected.')\n    return cls(pkey)", "docstring": "Construct an RsaSigner instance from a string.\n\nArgs:\nkey: string, private key in PEM format.\npassword: string, password for private key file. Unused for PEM\nfiles.\n\nReturns:\nRsaSigner instance.\n\nRaises:\nValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in\nPEM format.", "source": "codesearchnet"}
{"code": "def serialize_skycoord(o):\n    \n    representation = o.representation.get_name()\n    frame = o.frame.name\n\n    r = o.represent_as('spherical')\n\n    d = dict(\n        _type='astropy.coordinates.SkyCoord',\n        frame=frame,\n        representation=representation,\n        lon=r.lon,\n        lat=r.lat)\n\n    if len(o.distance.unit.to_string()):\n        d['distance'] = r.distance\n\n    return d", "docstring": "Serializes an :obj:`astropy.coordinates.SkyCoord`, for JSONification.\n\nArgs:\no (:obj:`astropy.coordinates.SkyCoord`): :obj:`SkyCoord` to be serialized.\n\nReturns:\nA dictionary that can be passed to :obj:`json.dumps`.", "source": "juraj-google-style"}
{"code": "def get_user_information(self):\n    url = 'https:\n    headers = self.__gen_headers()\n    headers['Content-Type'] = 'application/json'\n    r = requests.get(url, headers=headers)\n    return r.json()", "docstring": "Gets the current user information, including sensor ID\n\nArgs:\nNone\n\nReturns:\ndictionary object containing information about the current user", "source": "codesearchnet"}
{"code": "def __init__(self, topic_path, add_uuids=None, expansion_service=None):\n    if add_uuids is None:\n        add_uuids = False\n    if expansion_service is None:\n        expansion_service = _default_io_expansion_service()\n    super().__init__('beam:transform:org.apache.beam:pubsublite_write:v1', NamedTupleBasedPayloadBuilder(_WriteSchema(topic_path=topic_path, add_uuids=add_uuids)), expansion_service)", "docstring": "Initializes a write operation to Pub/Sub Lite, writing the serialized bytes\nof PubSubMessage protos.\n\nArgs:\ntopic_path: A Pub/Sub Lite Topic path.\nadd_uuids: Whether to add uuids to the 'x-goog-pubsublite-dataflow-uuid'\nuuid attribute.", "source": "github-repos"}
{"code": "def load_op_from_signature_def(signature_def, key, import_scope=None):\n    tensor_info = signature_def.outputs[key]\n    try:\n        return utils.get_element_from_tensor_info(tensor_info, import_scope=import_scope)\n    except KeyError:\n        raise errors.NotFoundError(None, None, f'The key \"{key}\" could not be found in the graph. Please make sure the SavedModel was created by the internal _SavedModelBuilder. If you are using the public API, please make sure the SignatureDef in the SavedModel does not contain the key \"{key}\".')", "docstring": "Load an Op from a SignatureDef created by op_signature_def().\n\nArgs:\nsignature_def: a SignatureDef proto\nkey: string key to op in the SignatureDef outputs.\nimport_scope: Scope used to import the op\n\nReturns:\nOp (or possibly Tensor) in the graph with the same name as saved in the\nSignatureDef.\n\nRaises:\nNotFoundError: If the op could not be found in the graph.", "source": "github-repos"}
{"code": "def matchall(text, patterns):\n    \n\n    ret = []\n    for pattern in patterns:\n        match = re.findall(pattern, text)\n        ret += match\n\n    return ret", "docstring": "Scans through a string for substrings matched some patterns.\n\nArgs:\ntext: A string to be scanned.\npatterns: a list of regex pattern.\n\nReturns:\na list if matched. empty if not.", "source": "juraj-google-style"}
{"code": "def _WsdlHasMethod(self, method_name):\n    try:\n        self._method_bindings.get(method_name)\n        return True\n    except ValueError:\n        return False", "docstring": "Determine if a method is in the wsdl.\n\nArgs:\nmethod_name: The name of the method.\n\nReturns:\nTrue if the method is in the wsdl, otherwise False.", "source": "codesearchnet"}
{"code": "def learn_one(self, x: beam.Row) -> None:\n    raise NotImplementedError", "docstring": "Trains the detector on a single data instance.\n\nArgs:\nx: A `beam.Row` representing the data instance.", "source": "github-repos"}
{"code": "def Where(self, field):\n    \n    where_builder = _WhereBuilder(self, field)\n    self.where_builders.append(where_builder)\n    return where_builder", "docstring": "Creates a WHERE builder using a provided field.\n\nArgs:\nfield: the field to be added as an argument in the WHERE clause.\n\nReturns:\nThe created WHERE builder.", "source": "juraj-google-style"}
{"code": "def _Operations(self, rule, line):\n    if (rule.record_op == 'Record'):\n        self._AppendRecord()\n    elif (rule.record_op == 'Clear'):\n        self._ClearRecord()\n    elif (rule.record_op == 'Clearall'):\n        self._ClearAllRecord()\n    if (rule.line_op == 'Error'):\n        if rule.new_state:\n            raise TextFSMError(('Error: %s. Rule Line: %s. Input Line: %s.' % (rule.new_state, rule.line_num, line)))\n        raise TextFSMError(('State Error raised. Rule Line: %s. Input Line: %s' % (rule.line_num, line)))\n    elif (rule.line_op == 'Continue'):\n        return False\n    return True", "docstring": "Operators on the data record.\n\nOperators come in two parts and are a '.' separated pair:\n\nOperators that effect the input line or the current state (line_op).\n'Next'      Get next input line and restart parsing (default).\n'Continue'  Keep current input line and continue resume parsing.\n'Error'     Unrecoverable input discard result and raise Error.\n\nOperators that affect the record being built for output (record_op).\n'NoRecord'  Does nothing (default)\n'Record'    Adds the current record to the result.\n'Clear'     Clears non-Filldown data from the record.\n'Clearall'  Clears all data from the record.\n\nArgs:\nrule: FSMRule object.\nline: A string, the current input line.\n\nReturns:\nTrue if state machine should restart state with new line.\n\nRaises:\nTextFSMError: If Error state is encountered.", "source": "codesearchnet"}
{"code": "def convertData(self, contents, def_buf, kwh_scale=ScaleKWH.EmptyScale):\n    log_str = ''\n    count = 0\n    if (kwh_scale == ScaleKWH.EmptyScale):\n        scale_offset = int(def_buf.keys().index(Field.kWh_Scale))\n        self.m_kwh_precision = kwh_scale = int(contents[scale_offset])\n    for fld in def_buf:\n        if def_buf[fld][MeterData.CalculatedFlag]:\n            count += 1\n            continue\n        if (len(contents) == 0):\n            count += 1\n            continue\n        try:\n            raw_data = contents[count]\n            fld_type = def_buf[fld][MeterData.TypeValue]\n            fld_scale = def_buf[fld][MeterData.ScaleValue]\n            if (fld_type == FieldType.Float):\n                float_data = float(str(raw_data))\n                divisor = 1\n                if (fld_scale == ScaleType.KWH):\n                    divisor = 1\n                    if (kwh_scale == ScaleKWH.Scale10):\n                        divisor = 10\n                    elif (kwh_scale == ScaleKWH.Scale100):\n                        divisor = 100\n                    elif ((kwh_scale != ScaleKWH.NoScale) and (kwh_scale != ScaleKWH.EmptyScale)):\n                        ekm_log('Unrecognized kwh scale.')\n                elif (fld_scale == ScaleType.Div10):\n                    divisor = 10\n                elif (fld_scale == ScaleType.Div100):\n                    divisor = 100\n                elif (fld_scale != ScaleType.No):\n                    ekm_log('Unrecognized float scale.')\n                float_data /= divisor\n                float_data_str = str(float_data)\n                def_buf[fld][MeterData.StringValue] = float_data_str\n                def_buf[fld][MeterData.NativeValue] = float_data\n            elif (fld_type == FieldType.Hex):\n                hex_data = raw_data.encode('hex')\n                def_buf[fld][MeterData.StringValue] = hex_data\n                def_buf[fld][MeterData.NativeValue] = hex_data\n            elif (fld_type == FieldType.Int):\n                integer_data = int(raw_data)\n                integer_data_str = str(integer_data)\n                if (len(integer_data_str) == 0):\n                    integer_data_str = str(0)\n                def_buf[fld][MeterData.StringValue] = integer_data_str\n                def_buf[fld][MeterData.NativeValue] = integer_data\n            elif (fld_type == FieldType.String):\n                string_data = str(raw_data)\n                def_buf[fld][MeterData.StringValue] = string_data\n                def_buf[fld][MeterData.NativeValue] = string_data\n            elif (fld_type == FieldType.PowerFactor):\n                def_buf[fld][MeterData.StringValue] = str(raw_data)\n                def_buf[fld][MeterData.NativeValue] = str(raw_data)\n            else:\n                ekm_log('Unrecognized field type')\n            log_str = (((((log_str + '\"') + fld) + '\":  \"') + def_buf[fld][MeterData.StringValue]) + '\"\\n')\n        except:\n            ekm_log(('Exception on Field:' + str(fld)))\n            ekm_log(traceback.format_exc(sys.exc_info()))\n            self.writeCmdMsg(('Exception on Field:' + str(fld)))\n        count += 1\n    return True", "docstring": "Move data from raw tuple into scaled and conveted values.\n\nArgs:\ncontents (tuple): Breakout of passed block from unpackStruct().\ndef_buf (): Read buffer destination.\nkwh_scale (int):  :class:`~ekmmeters.ScaleKWH` as int, from Field.kWhScale`\n\nReturns:\nbool: True on completion.", "source": "codesearchnet"}
{"code": "def _unflatten_beam_dim(tensor, batch_size, beam_size):\n  \n  shape = _shape_list(tensor)\n  new_shape = [batch_size, beam_size] + shape[1:]\n  return tf.reshape(tensor, new_shape)", "docstring": "Reshapes first dimension back to [batch_size, beam_size].\n\nArgs:\ntensor: Tensor to reshape of shape [batch_size*beam_size, ...]\nbatch_size: Tensor, original batch size.\nbeam_size: int, original beam size.\n\nReturns:\nReshaped tensor of shape [batch_size, beam_size, ...]", "source": "juraj-google-style"}
{"code": "def _open_rpc_interface(self, connection_id, callback):\n        \n\n        try:\n            context = self.connections.get_context(connection_id)\n        except ArgumentError:\n            callback(connection_id, self.id, False, \"Could not find connection information\")\n            return\n\n        self.connections.begin_operation(connection_id, 'open_interface', callback, self.get_config('default_timeout'))\n\n        try:\n            service = context['services'][TileBusService]\n            header_characteristic = service[ReceiveHeaderChar]\n            payload_characteristic = service[ReceivePayloadChar]\n        except KeyError:\n            self.connections.finish_operation(connection_id, False, \"Can't find characteristics to open rpc interface\")\n            return\n\n        \n        self.bable.set_notification(\n            enabled=True,\n            connection_handle=context['connection_handle'],\n            characteristic=header_characteristic,\n            on_notification_set=[self._on_interface_opened, context, payload_characteristic],\n            on_notification_received=self._on_notification_received,\n            sync=False\n        )", "docstring": "Enable RPC interface for this IOTile device\n\nArgs:\nconnection_id (int): The unique identifier for the connection\ncallback (callback): Callback to be called when this command finishes\ncallback(conn_id, adapter_id, success, failure_reason)", "source": "juraj-google-style"}
{"code": "def get_shannon_radius(self, cn: str, spin: str='', radius_type: str='ionic'):\n    radii = self._el.data['Shannon radii']\n    if (len(radii[str(int(self._oxi_state))][cn]) == 1):\n        (k, data) = list(radii[str(int(self._oxi_state))][cn].items())[0]\n        if (k != spin):\n            warnings.warn(('Specified spin state of %s not consistent with database spin of %s. Only one spin data available, and that value is returned.' % (spin, k)))\n    else:\n        data = radii[str(int(self._oxi_state))][cn][spin]\n    return data[('%s_radius' % radius_type)]", "docstring": "Get the local environment specific ionic radius for species.\n\nArgs:\ncn (str): Coordination using roman letters. Supported values are\nI-IX, as well as IIIPY, IVPY and IVSQ.\nspin (str): Some species have different radii for different\nspins. You can get specific values using \"High Spin\" or\n\"Low Spin\". Leave it as \"\" if not available. If only one spin\ndata is available, it is returned and this spin parameter is\nignored.\nradius_type (str): Either \"crystal\" or \"ionic\" (default).\n\nReturns:\nShannon radius for specie in the specified environment.", "source": "codesearchnet"}
{"code": "def from_raw(self, file_names=None, **kwargs):\n        \n        \n        \n        \n\n        if file_names:\n            self.file_names = file_names\n\n        if not isinstance(file_names, (list, tuple)):\n            self.file_names = [file_names, ]\n\n        \n        raw_file_loader = self.loader\n        set_number = 0\n        test = None\n        counter = 0\n        self.logger.debug(\"start iterating through file(s)\")\n        for f in self.file_names:\n            self.logger.debug(\"loading raw file:\")\n            self.logger.debug(f\"{f}\")\n            new_tests = raw_file_loader(f, **kwargs)\n            if new_tests:\n                if test is not None:\n                    self.logger.debug(\"continuing reading files...\")\n                    _test = self._append(test[set_number], new_tests[set_number])\n                    if not _test:\n                        self.logger.warning(f\"EMPTY TEST: {f}\")\n                        continue\n                    test[set_number] = _test\n                    self.logger.debug(\"added this test - started merging\")\n                    for j in range(len(new_tests[set_number].raw_data_files)):\n                        raw_data_file = new_tests[set_number].raw_data_files[j]\n                        file_size = new_tests[set_number].raw_data_files_length[j]\n                        test[set_number].raw_data_files.append(raw_data_file)\n                        test[set_number].raw_data_files_length.append(file_size)\n                        counter += 1\n                        if counter > 10:\n                            self.logger.debug(\"ERROR? Too many files to merge\")\n                            raise ValueError(\"Too many files to merge - \"\n                                             \"could be a p2-p3 zip thing\")\n                else:\n                    self.logger.debug(\"getting data from first file\")\n                    if new_tests[set_number].no_data:\n                        self.logger.debug(\"NO DATA\")\n                    else:\n                        test = new_tests\n            else:\n                self.logger.debug(\"NOTHING LOADED\")\n\n        self.logger.debug(\"finished loading the raw-files\")\n\n        test_exists = False\n        if test:\n            if test[0].no_data:\n                self.logging.debug(\"the first dataset (or only dataset) loaded from the raw data file is empty\")\n            else:\n                test_exists = True\n\n        if test_exists:\n            if not prms.Reader.sorted_data:\n                self.logger.debug(\"sorting data\")\n                test[set_number] = self._sort_data(test[set_number])\n\n            self.datasets.append(test[set_number])\n        else:\n            self.logger.warning(\"No new datasets added!\")\n        self.number_of_datasets = len(self.datasets)\n        self.status_datasets = self._validate_datasets()\n        self._invent_a_name()\n        return self", "docstring": "Load a raw data-file.\n\nArgs:\nfile_names (list of raw-file names): uses CellpyData.file_names if\nNone. If the list contains more than one file name, then the\nruns will be merged together.", "source": "juraj-google-style"}
{"code": "def DotProductAttention(query, key, value, mask, dropout, mode, rng):\n  \n  depth = np.shape(query)[-1]\n  dots = np.matmul(query, np.swapaxes(key, -1, -2)) / np.sqrt(depth)\n  if mask is not None:\n    dots = np.where(mask, dots, -1e9)\n  \n  dots = np.exp(dots - backend.logsumexp(dots, axis=-1, keepdims=True))\n  if dropout >= 1.0:\n    raise ValueError('Dropout rates must be lower than 1.')\n  if dropout is not None and dropout > 0.0 and mode == 'train':\n    keep = backend.random.bernoulli(rng, 1.0 - dropout, dots.shape)\n    dots = np.where(keep, dots / (1.0 - dropout), 0)\n  out = np.matmul(dots, value)\n  return out", "docstring": "Core dot product self-attention.\n\nArgs:\nquery: array of representations\nkey: array of representations\nvalue: array of representations\nmask: attention-mask, gates attention\ndropout: float: dropout rate\nmode: 'eval' or 'train': whether to use dropout\nrng: JAX PRNGKey: subkey for disposable use\n\nReturns:\nSelf attention for q, k, v arrays.", "source": "juraj-google-style"}
{"code": "def autodiscover(self, autoregister=True):\n    logger.debug(('<%s> Sending autodiscover message to broadcast address' % str(self.cuuid)))\n    if (not self.listener.listening):\n        logger.warning('Neteria client is not listening. The client will not be able to process responses from the server')\n    message = serialize_data({'method': 'OHAI', 'version': self.version, 'cuuid': str(self.cuuid)}, self.compression, encryption=False)\n    if autoregister:\n        self.autoregistering = True\n    self.listener.send_datagram(message, ('<broadcast>', self.server_port), message_type='broadcast')", "docstring": "This function will send out an autodiscover broadcast to find a\nNeteria server. Any servers that respond with an \"OHAI CLIENT\"\npacket are servers that we can connect to. Servers that respond are\nstored in the \"discovered_servers\" list.\n\nArgs:\nautoregister (boolean): Whether or not to automatically register\nwith any responding servers. Defaults to True.\n\nReturns:\nNone\n\nExamples:\n>>> myclient = neteria.client.NeteriaClient()\n>>> myclient.listen()\n>>> myclient.autodiscover()\n>>> myclient.discovered_servers\n{('192.168.0.20', 40080): u'1.0', ('192.168.0.82', 40080): '2.0'}", "source": "codesearchnet"}
{"code": "def encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[List[List[int]]]=None, word_labels: Optional[List[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n    padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)\n    return self._encode_plus(text=text, boxes=boxes, text_pair=text_pair, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)", "docstring": "Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,\n`__call__` should be used instead.\n\nArgs:\ntext (`str`, `List[str]`, `List[List[str]]`):\nThe first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.\ntext_pair (`List[str]` or `List[int]`, *optional*):\nOptional second sequence to be encoded. This can be a list of strings (words of a single example) or a\nlist of list of strings (words of a batch of examples).", "source": "github-repos"}
{"code": "def map_or(self, callback: Callable[([T], U)], default: A) -> Union[(U, A)]:\n    return (callback(self._val) if self._is_some else default)", "docstring": "Applies the ``callback`` to the contained value or returns ``default``.\n\nArgs:\ncallback: The callback to apply to the contained value.\ndefault: The default value.\n\nReturns:\nThe ``callback`` result if the contained value is ``Some``,\notherwise ``default``.\n\nNotes:\nIf you wish to use the result of a function call as ``default``,\nit is recommended to use :py:meth:`map_or_else` instead.\n\nExamples:\n>>> Some(0).map_or(lambda x: x + 1, 1000)\n1\n>>> NONE.map_or(lambda x: x * x, 1)\n1", "source": "codesearchnet"}
{"code": "def get_description(self, description_type=DescriptionTypeEnum.FULL):\n        \n        try:\n            if self._parsed is False:\n                parser = ExpressionParser(self._expression, self._options)\n                self._expression_parts = parser.parse()\n                self._parsed = True\n\n            choices = {\n                DescriptionTypeEnum.FULL: self.get_full_description,\n                DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,\n                DescriptionTypeEnum.HOURS: self.get_hours_description,\n                DescriptionTypeEnum.MINUTES: self.get_minutes_description,\n                DescriptionTypeEnum.SECONDS: self.get_seconds_description,\n                DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,\n                DescriptionTypeEnum.MONTH: self.get_month_description,\n                DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,\n                DescriptionTypeEnum.YEAR: self.get_year_description,\n            }\n\n            description = choices.get(description_type, self.get_seconds_description)()\n\n        except Exception as ex:\n            if self._options.throw_exception_on_parse_error:\n                raise\n            else:\n                description = str(ex)\n        return description", "docstring": "Generates a human readable string for the Cron Expression\n\nArgs:\ndescription_type: Which part(s) of the expression to describe\nReturns:\nThe cron expression description\nRaises:\nException: if throw_exception_on_parse_error is True", "source": "juraj-google-style"}
{"code": "def get_dev_examples(self, data_dir, filename=None):\n    if data_dir is None:\n        data_dir = ''\n    if self.dev_file is None:\n        raise ValueError('SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor')\n    with open(os.path.join(data_dir, self.dev_file if filename is None else filename), 'r', encoding='utf-8') as reader:\n        input_data = json.load(reader)['data']\n    return self._create_examples(input_data, 'dev')", "docstring": "Returns the evaluation example from the data directory.\n\nArgs:\ndata_dir: Directory containing the data files used for training and evaluating.\nfilename: None by default, specify this if the evaluation file has a different name than the original one\nwhich is `dev-v1.1.json` and `dev-v2.0.json` for squad versions 1.1 and 2.0 respectively.", "source": "github-repos"}
{"code": "def payment(self, origin, destination, amount):\n    if (type(amount) != Decimal):\n        amount = Decimal(amount)\n    if (amount <= 0):\n        raise Exception('Amount must be a positive number')\n    all_addresses = []\n    accounts = self.listaccounts()\n    if (origin in accounts):\n        if (destination in accounts):\n            with self.openwallet():\n                result = self.move(origin, destination, amount)\n            return self.record_tx(origin, None, amount, result, destination)\n        for account in accounts:\n            addresses = self.getaddressesbyaccount(account)\n            if (destination in addresses):\n                with self.openwallet():\n                    result = self.move(origin, account, amount)\n                return self.record_tx(origin, destination, amount, result, account)\n        else:\n            with self.openwallet():\n                txhash = self.sendfrom(origin, destination, amount)\n            return self.record_tx(origin, destination, amount, txhash)", "docstring": "Convenience method for sending Bitcoins.\n\nSend coins from origin to destination. Calls record_tx to log the\ntransaction to database.  Uses free, instant \"move\" transfers\nif addresses are both local (in the same wallet), and standard\n\"sendfrom\" transactions otherwise.\n\nThe sender is required to be specified by user_id (account label);\nhowever, the recipient can be specified either by Bitcoin address\n(anyone) or user_id (if the user is local).\n\nPayment tries sending Bitcoins in this order:\n1. \"move\" from account to account (local)\n2. \"move\" from account to address (local)\n3. \"sendfrom\" account to address (broadcast)\n\nArgs:\norigin (str): user_id of the sender\ndestination (str): coin address or user_id of the recipient\namount (str, Decimal, number): amount to send\n\nReturns:\nbool: True if successful, False otherwise", "source": "codesearchnet"}
{"code": "def get_log_id(cls, id):\n    conn = Qubole.agent()\n    r = conn.get_raw((cls.element_path(id) + '/logs'))\n    return r.text", "docstring": "Fetches log for the command represented by this id\n\nArgs:\n`id`: command id", "source": "codesearchnet"}
{"code": "def mark_complex(self, name, serializer, deserializer):\n    self._complex_properties[name] = (serializer, deserializer)", "docstring": "Mark a property as complex with serializer and deserializer functions.\n\nArgs:\nname (str): The name of the complex property.\nserializer (callable): The function to call to serialize the property's\nvalue to something that can be saved in a json.\ndeserializer (callable): The function to call to unserialize the property\nfrom a dict loaded by a json back to the original value.", "source": "codesearchnet"}
{"code": "def get(self, url, params=None, **kwargs):\n    check_type(url, basestring, may_be_none=False)\n    check_type(params, dict)\n    erc = kwargs.pop('erc', EXPECTED_RESPONSE_CODE['GET'])\n    response = self.request('GET', url, erc, params=params, **kwargs)\n    return extract_and_parse_json(response)", "docstring": "Sends a GET request.\n\nArgs:\nurl(basestring): The URL of the API endpoint.\nparams(dict): The parameters for the HTTP GET request.\n**kwargs:\nerc(int): The expected (success) response code for the request.\nothers: Passed on to the requests package.\n\nRaises:\nApiError: If anything other than the expected response code is\nreturned by the Webex Teams API endpoint.", "source": "codesearchnet"}
{"code": "def __convertChannelMask(self, channelsArray):\n        \n        maskSet = 0\n\n        for eachChannel in channelsArray:\n            mask = 1 << eachChannel\n            maskSet = (maskSet | mask)\n\n        return maskSet", "docstring": "convert channelsArray to bitmask format\n\nArgs:\nchannelsArray: channel array (i.e. [21, 22])\n\nReturns:\nbitmask format corresponding to a given channel array", "source": "juraj-google-style"}
{"code": "def insert(self, meter_db):\n        \n        if meter_db:\n            meter_db.dbInsert(self.m_req, self.m_raw_read_a, self.m_raw_read_b)\n        else:\n            ekm_log(\"Attempt to insert when no MeterDB assigned.\")\n        pass", "docstring": "Insert to :class:`~ekmmeters.MeterDB`  subclass.\n\nPlease note MeterDB subclassing is only for simplest-case.\n\nArgs:\nmeter_db (MeterDB): Instance of subclass of MeterDB.", "source": "juraj-google-style"}
{"code": "def run(xml_report_dir, xml_report_filter='TEST-', html_report_path='.', generate_exec_time_graphs=True, html_report_dir='report.th', initial_java_heap_size=None, maximum_java_heap_size=None):\n    cmd = []\n    cmd.append('java')\n    if initial_java_heap_size:\n        cmd.append('-Xms{}'.format(initial_java_heap_size))\n    if maximum_java_heap_size:\n        cmd.append('-Xmx{}'.format(maximum_java_heap_size))\n    cmd.append('-Dunitth.xml.report.filter={}'.format(xml_report_filter))\n    cmd.append('-Dunitth.html.report.path={}'.format(html_report_path))\n    cmd.append('-Dunitth.generate.exectimegraphs={}'.format('{}'.format(generate_exec_time_graphs).lower()))\n    cmd.append('-Dunitth.report.dir={}'.format(html_report_dir))\n    cmd.append('-jar')\n    cmd.append('\"{}\"'.format(resource_filename('unitth', 'lib/unitth/unitth.jar')))\n    cmd.append(xml_report_dir)\n    subprocess.check_call(' '.join(cmd), shell=True)", "docstring": "Use UnitTH to generate a test history report\n\nArgs:\nxml_report_dir (:obj:`str`): Parent directory of XML reports of individual builds to generate a history report of\nxml_report_filter (:obj:`str`, optional): Starts-with filter for individual reports with `xml_report_dir` that should\nbe included in the history report. Set `xml_report_filter` to '' to include all files/subdirectories in the history\nreport.\nhtml_report_path (:obj:`str`, optional): Directory of HTML reports of individual builds (relative to XML directories of\nindividual builds)\ngenerate_exec_time_graphs (:obj:`bool`, optional): Whether execution time graphs shall be generated\nhtml_report_dir (:obj:`str`, optional): directory to store generated HTML history report\ninitial_java_heap_size (:obj:`str`, optional): initial Java heap size\nmaximum_java_heap_size (:obj:`str`, optional): maximum Java heap size", "source": "codesearchnet"}
{"code": "def FlashFromFile(self, partition, source_file, source_len=0,\n                      info_cb=DEFAULT_MESSAGE_CALLBACK, progress_callback=None):\n        \n        if source_len == 0:\n            \n            source_len = os.stat(source_file).st_size\n        download_response = self.Download(\n            source_file, source_len=source_len, info_cb=info_cb,\n            progress_callback=progress_callback)\n        flash_response = self.Flash(partition, info_cb=info_cb)\n        return download_response + flash_response", "docstring": "Flashes a partition from the file on disk.\n\nArgs:\npartition: Partition name to flash to.\nsource_file: Filename to download to the device.\nsource_len: Optional length of source_file, uses os.stat if not provided.\ninfo_cb: See Download.\nprogress_callback: See Download.\n\nReturns:\nDownload and flash responses, normally nothing.", "source": "juraj-google-style"}
{"code": "def update_clinvar_submission_status(self, user_id, submission_id, status):\n        \n        LOG.info('closing clinvar submission \"%s\"', submission_id)\n\n        if status == 'open': \n            \n            self.clinvar_submission_collection.update_many(\n                {'user_id' : user_id},\n                {'$set' :\n                    {'status' : 'closed', 'updated_at' : datetime.now()}\n                }\n            )\n        updated_submission = self.clinvar_submission_collection.find_one_and_update(\n            {'_id'  : ObjectId(submission_id)},\n            {'$set' :\n                {'status' : status, 'updated_at' : datetime.now()}\n            },\n            return_document=pymongo.ReturnDocument.AFTER\n        )\n\n        return updated_submission", "docstring": "Set a clinvar submission ID to 'closed'\n\nArgs:\nsubmission_id(str): the ID of the clinvar submission to close\n\nReturn\nupdated_submission(obj): the submission object with a 'closed' status", "source": "juraj-google-style"}
{"code": "def sigmoid_cross_entropy_with_logits(logits, targets):\n    if (logits.shape != targets.shape):\n        raise ValueError(('logits shape must equal targets shapelogits=%s targets=%s' % (logits.to_string, targets.to_string)))\n    x = logits\n    z = targets\n    return ((mtf.relu(x) - (x * z)) + mtf.log((1 + mtf.exp((- mtf.abs(x))))))", "docstring": "Sigmoid cross-entropy loss.\n\nArgs:\nlogits: a mtf.Tensor\ntargets: a mtf.Tensor with the same shape as logits\n\nReturns:\na mtf.Tensor whose shape is equal to logits.shape\n\nRaises:\nValueError: if the shapes do not match.", "source": "codesearchnet"}
{"code": "def shape_rb_data(raw_rb):\n    \n    rb_data = []\n    rb_data.append(np.mean(raw_rb, 0))\n    rb_data.append(np.std(raw_rb, 0))\n\n    return rb_data", "docstring": "Take the raw rb data and convert it into averages and std dev\n\nArgs:\nraw_rb (numpy.array): m x n x l list where m is the number of seeds, n\nis the number of Clifford sequences and l is the number of qubits\n\nReturn:\nnumpy_array: 2 x n x l list where index 0 is the mean over seeds, 1 is\nthe std dev overseeds", "source": "juraj-google-style"}
{"code": "def hide_stevedore_logs():\n    stevedore_logger = logging.getLogger('stevedore.extension')\n    stevedore_logger.propagate = False\n    stevedore_logger.setLevel(logging.ERROR)\n    stevedore_logger.addHandler(logging.NullHandler())", "docstring": "Hides the logs of stevedore, this function was\nadded in order to support older versions of stevedore\n\nWe are using the NullHandler in order to get rid from\n'No handlers could be found for logger...' msg\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def get_state_event(self, room_id, event_type):\n        \n        return self._send(\"GET\", \"/rooms/{}/state/{}\".format(quote(room_id), event_type))", "docstring": "Perform GET /rooms/$room_id/state/$event_type\n\nArgs:\nroom_id(str): The room ID.\nevent_type (str): The type of the event.\n\nRaises:\nMatrixRequestError(code=404) if the state event is not found.", "source": "juraj-google-style"}
{"code": "def bind(self, **bindings):\n    new_context = dict(self._partial_context)\n    unknown_keys = []\n    for (k, v) in six.iteritems(bindings):\n        if (k not in self._unbound_vars):\n            unknown_keys.append(k)\n        new_context[self._unbound_vars[k]] = v\n    if unknown_keys:\n        raise ValueError(('The following keys are not associated with any unbound vars: %s, legal values are %s' % (unknown_keys, list(self._unbound_vars.keys()))))\n    return _DeferredLayer(self.bookkeeper, None, (), {}, scope=self._scope, defaults=self._defaults, pass_through=self, partial_context=new_context)", "docstring": "Creates a new template with the given unbound variables bound.\n\nArgs:\n**bindings: Arguments for every deferred parameter.\nReturns:\nA new template with the given bindings.\nRaises:\nValueError: If any of the bindings do not correspond to unbound variables.", "source": "codesearchnet"}
{"code": "def _calc_digest(self, origin):\n        \n        if hasattr(origin, 'read') and hasattr(origin, 'seek'):\n            pos = origin.tell()\n            digest = hashtools.calc_digest(origin, algorithm=self._conf['hash_alg'])\n            origin.seek(pos)\n        else:\n            digest = hashtools.calc_file_digest(origin, algorithm=self._conf['hash_alg'])\n        return digest", "docstring": "calculate digest for the given file or readable/seekable object\n\nArgs:\norigin -- could be the path of a file or a readable/seekable object ( fileobject, stream, stringIO...)\nReturns:\nString rapresenting the digest for the given origin", "source": "juraj-google-style"}
{"code": "def add_vcenter(self, **kwargs):\n        \n        config = ET.Element(\"config\")\n        vcenter = ET.SubElement(config, \"vcenter\",\n                                xmlns=\"urn:brocade.com:mgmt:brocade-vswitch\")\n        id = ET.SubElement(vcenter, \"id\")\n        id.text = kwargs.pop('id')\n        credentials = ET.SubElement(vcenter, \"credentials\")\n        url = ET.SubElement(credentials, \"url\")\n        url.text = kwargs.pop('url')\n        username = ET.SubElement(credentials, \"username\")\n        username.text = kwargs.pop('username')\n        password = ET.SubElement(credentials, \"password\")\n        password.text = kwargs.pop('password')\n\n        try:\n            self._callback(config)\n            return True\n\n        except Exception as error:\n            logging.error(error)\n            return False", "docstring": "Add vCenter on the switch\n\nArgs:\nid(str) : Name of an established vCenter\nurl (bool) : vCenter URL\nusername (str): Username of the vCenter\npassword (str): Password of the vCenter\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def __init__(self, unresponsive_kill_period):\n    \n    super(NannyThread, self).__init__(name=\"Nanny\")\n    self.last_heart_beat_time = time.time()\n    self.unresponsive_kill_period = unresponsive_kill_period\n    self.running = True\n    self.daemon = True\n    self.proc = psutil.Process()\n    self.memory_quota = config.CONFIG[\"Client.rss_max_hard\"] * 1024 * 1024", "docstring": "Constructor.\n\nArgs:\nunresponsive_kill_period: The time in seconds which we wait for a\nheartbeat.", "source": "juraj-google-style"}
{"code": "def remove_forwarding_rules(self, forwarding_rules):\n    rules_dict = [rule.__dict__ for rule in forwarding_rules]\n    return self.get_data(('load_balancers/%s/forwarding_rules/' % self.id), type=DELETE, params={'forwarding_rules': rules_dict})", "docstring": "Removes existing forwarding rules from a LoadBalancer.\n\nArgs:\nforwarding_rules (obj:`list`): A list of `ForwrdingRules` objects", "source": "codesearchnet"}
{"code": "def alloc_data(self, value):\n    if isinstance(value, six.binary_type):\n        return self._alloc_data(value)\n    elif isinstance(value, six.text_type):\n        return self._alloc_data((value.encode('utf-8') + b'\\x00'))\n    else:\n        raise TypeError(('No idea how to encode %s' % repr(value)))", "docstring": "Allocate a piece of data that will be included in the shellcode body.\n\nArguments:\nvalue(...): The value to add to the shellcode. Can be bytes or\nstring type.\n\nReturns:\n~pwnypack.types.Offset: The offset used to address the data.", "source": "codesearchnet"}
{"code": "def get_capabilities(image=None):\n    if (salt.utils.versions.version_cmp(__grains__['osversion'], '10') == (- 1)):\n        raise NotImplementedError('`installed_capabilities` is not available on this version of Windows: {0}'.format(__grains__['osversion']))\n    cmd = ['DISM', '/English', ('/Image:{0}'.format(image) if image else '/Online'), '/Get-Capabilities']\n    out = __salt__['cmd.run'](cmd)\n    pattern = 'Capability Identity : (.*)\\\\r\\\\n'\n    capabilities = re.findall(pattern, out, re.MULTILINE)\n    capabilities.sort()\n    return capabilities", "docstring": "List all capabilities on the system\n\nArgs:\nimage (Optional[str]): The path to the root directory of an offline\nWindows image. If `None` is passed, the running operating system is\ntargeted. Default is None.\n\nRaises:\nNotImplementedError: For all versions of Windows that are not Windows 10\nand later. Server editions of Windows use ServerManager instead.\n\nReturns:\nlist: A list of capabilities\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' dism.get_capabilities", "source": "codesearchnet"}
{"code": "def console(discord_token, discord_client_id):\n    (state, response) = datatools.get_compare_version()\n    logger.info('Starting Modis in console')\n    logger.info(response)\n    import threading\n    import asyncio\n    logger.debug('Loading packages')\n    from modis.discord_modis import main as discord_modis_console\n    from modis.reddit_modis import main as reddit_modis_console\n    from modis.facebook_modis import main as facebook_modis_console\n    logger.debug('Initiating threads')\n    loop = asyncio.get_event_loop()\n    discord_thread = threading.Thread(target=discord_modis_console.start, args=[discord_token, discord_client_id, loop])\n    reddit_thread = threading.Thread(target=reddit_modis_console.start, args=[])\n    facebook_thread = threading.Thread(target=facebook_modis_console.start, args=[])\n    logger.debug('Starting threads')\n    discord_thread.start()\n    reddit_thread.start()\n    facebook_thread.start()\n    logger.debug('Root startup completed')", "docstring": "Start Modis in console format.\n\nArgs:\ndiscord_token (str): The bot token for your Discord application\ndiscord_client_id: The bot's client ID", "source": "codesearchnet"}
{"code": "def getNext(self, dataset, requires_initialization=False, shared_name=None):\n\n    def ta_wrapper(gn):\n\n        def _wrapper():\n            r = gn()\n            if isinstance(r, tensor_array_ops.TensorArray):\n                return r.stack()\n            else:\n                return r\n        return _wrapper\n    if context.executing_eagerly() or ops.inside_function():\n        iterator = iter(dataset)\n        return ta_wrapper(iterator._next_internal)\n    else:\n        if requires_initialization:\n            iterator = dataset_ops.make_initializable_iterator(dataset, shared_name)\n            self.evaluate(iterator.initializer)\n        else:\n            iterator = dataset_ops.make_one_shot_iterator(dataset)\n        get_next = iterator.get_next()\n        return ta_wrapper(lambda: get_next)", "docstring": "Returns a callable that returns the next element of the dataset.\n\nExample use:\n```python\n# In both graph and eager modes\ndataset = ...\nget_next = self.getNext(dataset)\nresult = self.evaluate(get_next())\n```\n\nArgs:\ndataset: A dataset whose elements will be returned.\nrequires_initialization: Indicates that when the test is executed in graph\nmode, it should use an initializable iterator to iterate through the\ndataset (e.g. when it contains stateful nodes). Defaults to False.\nshared_name: (Optional.) If non-empty, the returned iterator will be\nshared under the given name across multiple sessions that share the same\ndevices (e.g. when using a remote server).\nReturns:\nA callable that returns the next element of `dataset`. Any `TensorArray`\nobjects `dataset` outputs are stacked.", "source": "github-repos"}
{"code": "def gray2bgr(img):\n    \n    img = img[..., None] if img.ndim == 2 else img\n    out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n    return out_img", "docstring": "Convert a grayscale image to BGR image.\n\nArgs:\nimg (ndarray or str): The input image.\n\nReturns:\nndarray: The converted BGR image.", "source": "juraj-google-style"}
{"code": "def monitoring(line, cell=None):\n  \n  parser = datalab.utils.commands.CommandParser(prog='monitoring', description=(\n      'Execute various Monitoring-related operations. Use \"%monitoring '\n      '<command> -h\" for help on a specific command.'))\n\n  list_parser = parser.subcommand(\n      'list', 'List the metrics or resource types in a monitored project.')\n\n  list_metric_parser = list_parser.subcommand(\n      'metrics',\n      'List the metrics that are available through the Monitoring API.')\n  list_metric_parser.add_argument(\n      '-t', '--type',\n      help='The type of metric(s) to list; can include wildchars.')\n  list_metric_parser.add_argument(\n      '-p', '--project', help='The project on which to execute the request.')\n  list_metric_parser.set_defaults(func=_list_metric_descriptors)\n\n  list_resource_parser = list_parser.subcommand(\n      'resource_types',\n      ('List the monitored resource types that are available through the '\n       'Monitoring API.'))\n  list_resource_parser.add_argument(\n      '-p', '--project', help='The project on which to execute the request.')\n  list_resource_parser.add_argument(\n      '-t', '--type',\n      help='The resource type(s) to list; can include wildchars.')\n  list_resource_parser.set_defaults(func=_list_resource_descriptors)\n\n  list_group_parser = list_parser.subcommand(\n      'groups',\n      ('List the Stackdriver groups in this project.'))\n  list_group_parser.add_argument(\n      '-p', '--project', help='The project on which to execute the request.')\n  list_group_parser.add_argument(\n      '-n', '--name',\n      help='The name of the group(s) to list; can include wildchars.')\n  list_group_parser.set_defaults(func=_list_groups)\n\n  return datalab.utils.commands.handle_magic_line(line, cell, parser)", "docstring": "Implements the monitoring cell magic for ipython notebooks.\n\nArgs:\nline: the contents of the storage line.\nReturns:\nThe results of executing the cell.", "source": "juraj-google-style"}
{"code": "def _build_request_factory(cls, session: AppSession):\n\n    def request_factory(*args, **kwargs):\n        request = session.factory.class_map['Request'](*args, **kwargs)\n        user_agent = (session.args.user_agent or session.default_user_agent)\n        request.fields['User-Agent'] = user_agent\n        if session.args.referer:\n            request.fields['Referer'] = session.args.referer\n        for header_string in session.args.header:\n            request.fields.parse(header_string)\n        if session.args.http_compression:\n            request.fields['Accept-Encoding'] = 'gzip, deflate'\n        if session.args.no_cache:\n            request.fields['Cache-Control'] = 'no-cache, must-revalidate'\n            request.fields['Pragma'] = 'no-cache'\n        return request\n    return request_factory", "docstring": "Create the request factory.\n\nA request factory is any callable object that returns a\n:class:`.http.Request`. The callable must accept the same\narguments to Request.\n\nReturns:\nA callable object", "source": "codesearchnet"}
{"code": "def get(self):\n    if len(self._queue) == 0:\n        return float('nan')\n    with warnings.catch_warnings(record=False):\n        warnings.simplefilter('ignore')\n        return np.nanmean(self._queue)", "docstring": "Calculates and returns the mean of the current sliding window.\n\nReturns:\nfloat: The mean of the values in the current sliding window.\nReturns NaN if the window is empty.", "source": "github-repos"}
{"code": "def save_graph(graph_str, dest_file, fmt=None, image_ratio=None):\n    g = pydot.graph_from_dot_data(graph_str)\n    if (fmt is None):\n        fmt = (os.path.splitext(dest_file)[1].lower().strip('.') or 'png')\n    if hasattr(g, ('write_' + fmt)):\n        write_fn = getattr(g, ('write_' + fmt))\n    else:\n        raise Exception((\"Unsupported graph format: '%s'\" % fmt))\n    if image_ratio:\n        g.set_ratio(str(image_ratio))\n    write_fn(dest_file)\n    return fmt", "docstring": "Render a graph to an image file.\n\nArgs:\ngraph_str (str): Dot-language graph string.\ndest_file (str): Filepath to save the graph to.\nfmt (str): Format, eg \"png\", \"jpg\".\nimage_ratio (float): Image ratio.\n\nReturns:\nString representing format that was written, such as 'png'.", "source": "codesearchnet"}
{"code": "def scale(reader, writer, column, start, stop, multiple):\n  \n  for i, row in enumerate(reader):\n    if i >= start and i <= stop:\n      row[column] = type(multiple)(row[column]) * multiple\n    writer.appendRecord(row)", "docstring": "Multiplies a value over a range of rows.\n\nArgs:\nreader: A FileRecordStream object with input data.\nwriter: A FileRecordStream  object to write output data to.\ncolumn: The column of data to modify.\nstart: The first row in the range to modify.\nend: The last row in the range to modify.\nmultiple: The value to scale/multiply by.", "source": "juraj-google-style"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n    output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    if token_ids_1 is not None:\n        output += token_ids_1 + [self.sep_token_id]\n    return output", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A Funnel sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def StartProfiling(self, configuration, identifier, process_information):\n    \n    if not configuration:\n      return\n\n    if configuration.HaveProfileParsers():\n      identifier = '{0:s}-parsers'.format(identifier)\n\n      self._cpu_time_profiler = profilers.CPUTimeProfiler(\n          identifier, configuration)\n      self._cpu_time_profiler.Start()\n\n      self._memory_profiler = profilers.MemoryProfiler(\n          identifier, configuration)\n      self._memory_profiler.Start()\n\n    self._process_information = process_information", "docstring": "Starts profiling.\n\nArgs:\nconfiguration (ProfilingConfiguration): profiling configuration.\nidentifier (str): identifier of the profiling session used to create\nthe sample filename.\nprocess_information (ProcessInfo): process information.", "source": "juraj-google-style"}
{"code": "def _patch_expand_paths(self, settings, name, value):\n        \n        return [self._patch_expand_path(settings, name, item)\n                for item in value]", "docstring": "Apply ``SettingsPostProcessor._patch_expand_path`` to each element in\nlist.\n\nArgs:\nsettings (dict): Current settings.\nname (str): Setting name.\nvalue (list): List of paths to patch.\n\nReturns:\nlist: Patched path list to an absolute path.", "source": "juraj-google-style"}
{"code": "def dbmin_mean(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `dbmin_mean`'.format(value))\n\n        self._dbmin_mean = value", "docstring": "Corresponds to IDD Field `dbmin_mean`\nMean of extreme annual minimum dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmin_mean`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def __init__(self,\n               receive_port,\n               logdir,\n               always_flush=False):\n    \n    \n    \n    \n    \n    \n    \n    debugger_directory = os.path.join(\n        os.path.expanduser(logdir), constants.DEBUGGER_DATA_DIRECTORY_NAME)\n\n    if not tf.io.gfile.exists(debugger_directory):\n      try:\n        tf.io.gfile.makedirs(debugger_directory)\n        logger.info(\"Created directory for debugger data: %s\",\n                        debugger_directory)\n      except tf.errors.OpError as e:\n        logger.fatal(\n            \"Could not make directory for debugger data: %s. Error: %s\",\n            debugger_directory, e)\n\n    self._events_writer_manager = events_writer_manager_lib.EventsWriterManager(\n        events_directory=debugger_directory,\n        always_flush=always_flush)\n\n    \n    \n    \n    \n    \n    try:\n      self._events_writer_manager.write_event(\n          tf.compat.v1.Event(\n              wall_time=0, step=0, file_version=constants.EVENTS_VERSION))\n    except IOError as e:\n      logger.error(\n          \"Writing to %s failed: %s\",\n          self._events_writer_manager.get_current_file_name(), e)\n\n    \n    self._registry_backup_file_path = os.path.join(\n        debugger_directory, constants.ALERT_REGISTRY_BACKUP_FILE_NAME)\n    initial_data = None\n\n    if tf.io.gfile.exists(self._registry_backup_file_path):\n      \n      with tf.io.gfile.GFile(self._registry_backup_file_path, \"r\") as backup_file:\n        try:\n          \n          initial_data = json.load(backup_file)\n        except ValueError as err:\n          \n          logger.error(\n              \"Could not parse contents of %s: %s\",\n              self._registry_backup_file_path, err)\n\n    self._numerics_alert_registry = numerics_alert.NumericsAlertRegistry(\n        initialization_list=initial_data)\n\n    self._numerics_alert_lock = threading.Lock()\n    curried_handler_constructor = functools.partial(\n        DebuggerDataStreamHandler,\n        self._events_writer_manager,\n        self._numerics_alert_callback)\n    grpc_debug_server.EventListenerBaseServicer.__init__(\n        self, receive_port, curried_handler_constructor)", "docstring": "Receives health pills from a debugger and writes them to disk.\n\nArgs:\nreceive_port: The port at which to receive health pills from the\nTensorFlow debugger.\nlogdir: The directory in which to write events files that TensorBoard will\nread.\nalways_flush: A boolean indicating whether the EventsWriter will be\nflushed after every write. Can be used for testing.", "source": "juraj-google-style"}
{"code": "def add_graph(self, graph, global_step=None, graph_def=None):\n    if graph is not None and graph_def is not None:\n        raise ValueError('Please pass only graph, or graph_def (deprecated), but not both.')\n    if isinstance(graph, ops.Graph) or isinstance(graph_def, ops.Graph):\n        if not isinstance(graph, ops.Graph):\n            logging.warning('When passing a `Graph` object, please use the `graph` named argument instead of `graph_def`.')\n            graph = graph_def\n        true_graph_def = graph.as_graph_def(add_shapes=True)\n        self._write_plugin_assets(graph)\n    elif isinstance(graph, graph_pb2.GraphDef) or isinstance(graph_def, graph_pb2.GraphDef):\n        logging.warning('Passing a `GraphDef` to the SummaryWriter is deprecated. Pass a `Graph` object instead, such as `sess.graph`.')\n        if isinstance(graph, graph_pb2.GraphDef):\n            true_graph_def = graph\n        else:\n            true_graph_def = graph_def\n    else:\n        raise TypeError('The passed graph must be an instance of `Graph` or the deprecated `GraphDef`')\n    self._add_graph_def(true_graph_def, global_step)", "docstring": "Adds a `Graph` to the event file.\n\nThe graph described by the protocol buffer will be displayed by\nTensorBoard. Most users pass a graph in the constructor instead.\n\nArgs:\ngraph: A `Graph` object, such as `sess.graph`.\nglobal_step: Number. Optional global step counter to record with the\ngraph.\ngraph_def: DEPRECATED. Use the `graph` parameter instead.\n\nRaises:\nValueError: If both graph and graph_def are passed to the method.", "source": "github-repos"}
{"code": "def _AddDependencyEdges(self, rdf_artifact):\n    artifact_dependencies = artifact_registry.GetArtifactPathDependencies(rdf_artifact)\n    if artifact_dependencies:\n        for attribute in artifact_dependencies:\n            self._AddEdge(attribute, rdf_artifact.name)\n    else:\n        self.reachable_nodes.add(rdf_artifact.name)\n        self.graph[rdf_artifact.name].is_provided = True", "docstring": "Add an edge for every dependency of the given artifact.\n\nThis method gets the attribute names for a given artifact and for every\nattribute it adds a directed edge from the attribute node to the artifact\nnode. If an artifact does not have any dependencies it is added to the set\nof reachable nodes.\n\nArgs:\nrdf_artifact: The artifact object.", "source": "codesearchnet"}
{"code": "def _serialize_linear_biases(linear, nodelist):\n    linear_bytes = struct.pack(('<' + ('d' * len(linear))), *[linear[i] for i in nodelist])\n    return base64.b64encode(linear_bytes).decode('utf-8')", "docstring": "Serializes the linear biases.\n\nArgs:\nlinear: a interable object where linear[v] is the bias\nassociated with v.\nnodelist (list): an ordered iterable containing the nodes.\n\nReturns:\nstr: base 64 encoded string of little endian 8 byte floats,\none for each of the biases in linear. Ordered according\nto nodelist.\n\nExamples:\n>>> _serialize_linear_biases({1: -1, 2: 1, 3: 0}, [1, 2, 3])\n'AAAAAAAA8L8AAAAAAADwPwAAAAAAAAAA'\n>>> _serialize_linear_biases({1: -1, 2: 1, 3: 0}, [3, 2, 1])\n'AAAAAAAAAAAAAAAAAADwPwAAAAAAAPC/'", "source": "codesearchnet"}
{"code": "def setMeterPassword(self, new_pwd, pwd='00000000'):\n    result = False\n    self.setContext('setMeterPassword')\n    try:\n        if ((len(new_pwd) != 8) or (len(pwd) != 8)):\n            self.writeCmdMsg('Passwords must be exactly eight characters.')\n            self.setContext('')\n            return result\n        if (not self.request(False)):\n            self.writeCmdMsg('Pre command read failed: check serial line.')\n        elif (not self.serialCmdPwdAuth(pwd)):\n            self.writeCmdMsg('Password failure')\n        else:\n            req_pwd = binascii.hexlify(new_pwd.zfill(8))\n            req_str = (('015731023030323028' + req_pwd) + '2903')\n            req_str += self.calc_crc16(req_str[2:].decode('hex'))\n            self.m_serial_port.write(req_str.decode('hex'))\n            if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'):\n                self.writeCmdMsg('Success(setMeterPassword): 06 returned.')\n                result = True\n        self.serialPostEnd()\n    except:\n        ekm_log(traceback.format_exc(sys.exc_info()))\n    self.setContext('')\n    return result", "docstring": "Serial Call to set meter password.  USE WITH CAUTION.\n\nArgs:\nnew_pwd (str): 8 digit numeric password to set\npwd (str): Old 8 digit numeric password.\n\nReturns:\nbool: True on completion with ACK.", "source": "codesearchnet"}
{"code": "def write_filter(script, filter_xml):\n    if isinstance(script, mlx.FilterScript):\n        script.filters.append(filter_xml)\n    elif isinstance(script, str):\n        script_file = open(script, 'a')\n        script_file.write(filter_xml)\n        script_file.close()\n    else:\n        print(filter_xml)\n    return None", "docstring": "Write filter to FilterScript object or filename\n\nArgs:\nscript (FilterScript object or filename str): the FilterScript object\nor script filename to write the filter to.\nfilter_xml (str): the xml filter string", "source": "codesearchnet"}
{"code": "def escalatee(self, main_type, sub_type, unique_id, escalatee_id, action='GET', params=None):\n        \n        params = params or {}\n\n        url = '/v2/{}/{}/{}/escalatees/{}'.format(main_type, sub_type, unique_id, escalatee_id)\n        if action == 'GET':\n            return self.tcex.session.get(url, params=params)\n        if action == 'DELETE':\n            return self.tcex.session.delete(url)\n        if action == 'ADD':\n            return self.tcex.session.post(url)\n        return None", "docstring": "Args:\nmain_type:\nsub_type:\nunique_id:\nescalatee_id:\naction:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def CacheFileSystem(self, path_spec, file_system):\n    \n    identifier = self._GetFileSystemCacheIdentifier(path_spec)\n    self._file_system_cache.CacheObject(identifier, file_system)", "docstring": "Caches a file system object based on a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nfile_system (FileSystem): file system object.", "source": "juraj-google-style"}
{"code": "def concatenate(inputs, axis=-1, **kwargs):\n    return Concatenate(axis=axis, **kwargs)(inputs)", "docstring": "Functional interface to the `Concatenate` layer.\n\nArgs:\ninputs: A list of input tensors.\naxis: Concatenation axis.\n**kwargs: Standard layer keyword arguments.\n\nReturns:\nA tensor, the concatenation of the inputs alongside axis `axis`.", "source": "github-repos"}
{"code": "def _build_watermark_updates(runner_execution_context: execution.FnApiRunnerExecutionContext, stage_inputs: Iterable[str], expected_timers: Iterable[translations.TimerFamilyId], pcolls_with_da: Set[str], transforms_w_splits: Set[str], watermarks_by_transform_and_timer_family: Dict[translations.TimerFamilyId, timestamp.Timestamp]) -> Dict[Union[str, translations.TimerFamilyId], timestamp.Timestamp]:\n    updates: Dict[Union[str, translations.TimerFamilyId], timestamp.Timestamp] = {}\n\n    def get_pcoll_id(transform_id):\n        buffer_id = runner_execution_context.input_transform_to_buffer_id[transform_id]\n        if buffer_id == translations.IMPULSE_BUFFER:\n            pcollection_id = transform_id\n        else:\n            _, pcollection_id = translations.split_buffer_id(buffer_id)\n        return pcollection_id\n    for pcoll in pcolls_with_da:\n        updates[pcoll] = timestamp.MIN_TIMESTAMP\n    for tr in transforms_w_splits:\n        pcoll_id = get_pcoll_id(tr)\n        updates[pcoll_id] = timestamp.MIN_TIMESTAMP\n    for timer_pcoll_id in expected_timers:\n        updates[timer_pcoll_id] = watermarks_by_transform_and_timer_family.get(timer_pcoll_id, timestamp.MAX_TIMESTAMP)\n    for transform_id in stage_inputs:\n        pcoll_id = get_pcoll_id(transform_id)\n        if pcoll_id not in updates:\n            updates[pcoll_id] = timestamp.MAX_TIMESTAMP\n    return updates", "docstring": "Builds a dictionary of PCollection (or TimerFamilyId) to timestamp.\n\nArgs:\nstage_inputs: represent the set of expected input PCollections for a\nstage. These do not include timers.\nexpected_timers: represent the set of TimerFamilyIds that the stage can\nexpect to receive as inputs.\npcolls_with_da: represent the set of stage input PCollections that had\ndelayed applications.\ntransforms_w_splits: represent the set of transforms in the stage that had\ninput splits.\nwatermarks_by_transform_and_timer_family: represent the set of watermark\nholds to be added for each timer family.", "source": "github-repos"}
{"code": "def start_router(router_class, router_name):\n    handle = router_class.remote(router_name)\n    ray.experimental.register_actor(router_name, handle)\n    handle.start.remote()\n    return handle", "docstring": "Wrapper for starting a router and register it.\n\nArgs:\nrouter_class: The router class to instantiate.\nrouter_name: The name to give to the router.\n\nReturns:\nA handle to newly started router actor.", "source": "codesearchnet"}
{"code": "def authenticate_credentials(self, token):\n        \n\n        try:\n            user_info = self.get_user_info(token)\n        except UserInfoRetrievalFailed:\n            msg = 'Failed to retrieve user info. Unable to authenticate.'\n            logger.error(msg)\n            raise exceptions.AuthenticationFailed(msg)\n\n        user, __ = get_user_model().objects.get_or_create(username=user_info['username'], defaults=user_info)\n\n        if not user.is_active:\n            raise exceptions.AuthenticationFailed('User inactive or deleted.')\n\n        return user, token", "docstring": "Validate the bearer token against the OAuth provider.\n\nArguments:\ntoken (str): Access token to validate\n\nReturns:\n(tuple): tuple containing:\n\nuser (User): User associated with the access token\naccess_token (str): Access token\n\nRaises:\nAuthenticationFailed: The user is inactive, or retrieval of user info failed.", "source": "juraj-google-style"}
{"code": "def get_angle_degrees(self, indices):\n    coords = ['x', 'y', 'z']\n    if isinstance(indices, pd.DataFrame):\n        i_pos = self.loc[(indices.index, coords)].values\n        b_pos = self.loc[(indices.loc[(:, 'b')], coords)].values\n        a_pos = self.loc[(indices.loc[(:, 'a')], coords)].values\n    else:\n        indices = np.array(indices)\n        if (len(indices.shape) == 1):\n            indices = indices[(None, :)]\n        i_pos = self.loc[(indices[(:, 0)], coords)].values\n        b_pos = self.loc[(indices[(:, 1)], coords)].values\n        a_pos = self.loc[(indices[(:, 2)], coords)].values\n    (BI, BA) = ((i_pos - b_pos), (a_pos - b_pos))\n    (bi, ba) = [(v / np.linalg.norm(v, axis=1)[(:, None)]) for v in (BI, BA)]\n    dot_product = np.sum((bi * ba), axis=1)\n    dot_product[(dot_product > 1)] = 1\n    dot_product[(dot_product < (- 1))] = (- 1)\n    angles = np.degrees(np.arccos(dot_product))\n    return angles", "docstring": "Return the angles between given atoms.\n\nCalculates the angle in degrees between the atoms with\nindices ``i, b, a``.\nThe indices can be given in three ways:\n\n* As simple list ``[i, b, a]``\n* As list of lists: ``[[i1, b1, a1], [i2, b2, a2]...]``\n* As :class:`pd.DataFrame` where ``i`` is taken from the index and\n``b`` and ``a`` from the respective columns ``'b'`` and ``'a'``.\n\nArgs:\nindices (list):\n\nReturns:\n:class:`numpy.ndarray`: Vector of angles in degrees.", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor]=None, layer_idx: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_value: Optional[ZambaHybridDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, transformer_hidden_states: Optional[torch.Tensor]=None, **kwargs) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n    residual = hidden_states\n    hidden_states = hidden_states + transformer_hidden_states if transformer_hidden_states is not None else hidden_states\n    hidden_states = self.input_layernorm(hidden_states)\n    hidden_states = self.mamba(hidden_states=hidden_states, cache_params=past_key_value, attention_mask=attention_mask)\n    self_attn_weights = None\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (self_attn_weights,)\n    if use_cache:\n        outputs += (past_key_value,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`, *optional*): attention mask of size\n`(batch, sequence_length)` where padding elements are indicated by 0.\npast_key_value (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.\nuse_cache (`bool`, *optional*):\nIf set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n(see `past_key_values`).\ncache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\nIndices depicting the position of the input sequence tokens in the sequence.", "source": "github-repos"}
{"code": "def _in_place_subclassed_model_reset(model):\n    assert not model._is_graph_network\n    version_utils.swap_class(model.__class__, training.Model, training_v1.Model, ops.executing_eagerly_outside_functions())\n    attributes_cache = {}\n    for name in dir(model):\n        if name == 'submodules' or name == '_self_tracked_trackables':\n            continue\n        try:\n            value = getattr(model, name)\n        except (AttributeError, ValueError, TypeError):\n            continue\n        if isinstance(value, Layer):\n            attributes_cache[name] = value\n            assert value in model.layers\n            if hasattr(value, 'layers') and value.layers:\n                raise ValueError('We do not support the use of nested layers in `model_to_estimator` at this time. Found nested layer: %s' % value)\n        elif isinstance(value, (list, tuple)) and name not in ('layers', '_layers', 'metrics', '_compile_metric_functions', '_output_loss_metrics'):\n            if value and all((isinstance(val, Layer) for val in value)):\n                raise ValueError('We do not support the use of list-of-layers attributes in subclassed models used with `model_to_estimator` at this time. Found list model: %s' % name)\n    layers_to_names = {value: key for key, value in attributes_cache.items()}\n    original_layers = list(model._flatten_layers(include_self=False, recursive=False))\n    setattr_tracking = model._setattr_tracking\n    model._setattr_tracking = False\n    model._self_tracked_trackables = []\n    for layer in original_layers:\n        config = layer.get_config()\n        if isinstance(layer, training.Model) and (not layer._is_graph_network):\n            raise ValueError('We do not support the use of nested subclassed models in `model_to_estimator` at this time. Found nested model: %s' % layer)\n        fresh_layer = layer.__class__.from_config(config)\n        name = layers_to_names[layer]\n        setattr(model, name, fresh_layer)\n        model._self_tracked_trackables.append(fresh_layer)\n    if not hasattr(model, '_original_attributes_cache') or model._original_attributes_cache is None:\n        if model.built:\n            attributes_to_cache = ['inputs', 'outputs', 'total_loss', 'optimizer', 'train_function', 'test_function', 'predict_function', '_training_endpoints', '_collected_trainable_weights', '_feed_inputs', '_feed_input_names', '_feed_input_shapes']\n            for name in attributes_to_cache:\n                attributes_cache[name] = getattr(model, name)\n    model._original_attributes_cache = attributes_cache\n    _reset_build_compile_trackers(model)\n    model._setattr_tracking = setattr_tracking", "docstring": "Substitute for model cloning that works for subclassed models.\n\nSubclassed models cannot be cloned because their topology is not serializable.\nTo \"instantiate\" an identical model in a new TF graph, we reuse the original\nmodel object, but we clear its state.\n\nAfter calling this function on a model instance, you can use the model\ninstance as if it were a model clone (in particular you can use it in a new\ngraph).\n\nThis method clears the state of the input model. It is thus destructive.\nHowever the original state can be restored fully by calling\n`_in_place_subclassed_model_state_restoration`.\n\nArgs:\nmodel: Instance of a Keras model created via subclassing.\n\nRaises:\nValueError: In case the model uses a subclassed model as inner layer.", "source": "github-repos"}
{"code": "def position(self, partition):\n        \n        if not isinstance(partition, TopicPartition):\n            raise TypeError('partition must be a TopicPartition namedtuple')\n        assert self._subscription.is_assigned(partition), 'Partition is not assigned'\n        offset = self._subscription.assignment[partition].position\n        if offset is None:\n            self._update_fetch_positions([partition])\n            offset = self._subscription.assignment[partition].position\n        return offset", "docstring": "Get the offset of the next record that will be fetched\n\nArguments:\npartition (TopicPartition): Partition to check\n\nReturns:\nint: Offset", "source": "juraj-google-style"}
{"code": "def notify(\n        self,\n        method_name: str,\n        *args: Any,\n        trim_log_values: Optional[bool] = None,\n        validate_against_schema: Optional[bool] = None,\n        **kwargs: Any\n    ) -> Response:\n        \n        return self.send(\n            Notification(method_name, *args, **kwargs),\n            trim_log_values=trim_log_values,\n            validate_against_schema=validate_against_schema,\n        )", "docstring": "Send a JSON-RPC request, without expecting a response.\n\nArgs:\nmethod_name: The remote procedure's method name.\nargs: Positional arguments passed to the remote procedure.\nkwargs: Keyword arguments passed to the remote procedure.\ntrim_log_values: Abbreviate the log entries of requests and responses.\nvalidate_against_schema: Validate response against the JSON-RPC schema.", "source": "juraj-google-style"}
{"code": "def tomography_data(results, name, tomoset):\n    \n\n    labels = tomography_circuit_names(tomoset, name)\n    circuits = tomoset['circuits']\n    data = []\n    prep = None\n    for j, _ in enumerate(labels):\n        counts = marginal_counts(results.get_counts(labels[j]),\n                                 tomoset['qubits'])\n        shots = sum(counts.values())\n        meas = circuits[j]['meas']\n        prep = circuits[j].get('prep', None)\n        meas_qubits = sorted(meas.keys())\n        if prep:\n            prep_qubits = sorted(prep.keys())\n        circuit = {}\n        for c in counts.keys():\n            circuit[c] = {}\n            circuit[c]['meas'] = [(meas[meas_qubits[k]], int(c[-1 - k]))\n                                  for k in range(len(meas_qubits))]\n            if prep:\n                circuit[c]['prep'] = [prep[prep_qubits[k]]\n                                      for k in range(len(prep_qubits))]\n        data.append({'counts': counts, 'shots': shots, 'circuit': circuit})\n\n    ret = {'data': data, 'meas_basis': tomoset['meas_basis']}\n    if prep:\n        ret['prep_basis'] = tomoset['prep_basis']\n    return ret", "docstring": "Return a results dict for a state or process tomography experiment.\n\nArgs:\nresults (Result): Results from execution of a process tomography\ncircuits on a backend.\nname (string): The name of the circuit being reconstructed.\ntomoset (tomography_set): the dict of tomography configurations.\n\nReturns:\nlist: A list of dicts for the outcome of each process tomography\nmeasurement circuit.", "source": "juraj-google-style"}
{"code": "def as_matrix(self, depth=0):\n    if (depth in self._matrix_cache):\n        return self._matrix_cache[depth]\n    self._matrix_cache[depth] = matrix = Matrix(self, depth=depth)\n    return matrix", "docstring": "Create a matrix with self as node, cache it, return it.\n\nArgs:\ndepth (int): depth of the matrix.\n\nReturns:\nMatrix: an instance of Matrix.", "source": "codesearchnet"}
{"code": "def DeregisterDefinition(self, data_type_definition):\n    name = data_type_definition.name.lower()\n    if (name not in self._definitions):\n        raise KeyError('Definition not set for name: {0:s}.'.format(data_type_definition.name))\n    del self._definitions[name]", "docstring": "Deregisters a data type definition.\n\nThe data type definitions are identified based on their lower case name.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\n\nRaises:\nKeyError: if a data type definition is not set for the corresponding\nname.", "source": "codesearchnet"}
{"code": "def vibrational_free_energy(self, temperature, volume):\n        \n        y = self.debye_temperature(volume) / temperature\n        return self.kb * self.natoms * temperature * (\n            9./8. * y + 3 * np.log(1 - np.exp(-y)) - self.debye_integral(y))", "docstring": "Vibrational Helmholtz free energy, A_vib(V, T).\nEq(4) in doi.org/10.1016/j.comphy.2003.12.001\n\nArgs:\ntemperature (float): temperature in K\nvolume (float)\n\nReturns:\nfloat: vibrational free energy in eV", "source": "juraj-google-style"}
{"code": "def _inter_df_op_handler(self, func, other, **kwargs):\n    axis = kwargs.get('axis', 0)\n    axis = (pandas.DataFrame()._get_axis_number(axis) if (axis is not None) else 0)\n    if isinstance(other, type(self)):\n        return self._inter_manager_operations(other, 'outer', (lambda x, y: func(x, y, **kwargs)))\n    else:\n        return self._scalar_operations(axis, other, (lambda df: func(df, other, **kwargs)))", "docstring": "Helper method for inter-manager and scalar operations.\n\nArgs:\nfunc: The function to use on the Manager/scalar.\nother: The other Manager/scalar.\n\nReturns:\nNew DataManager with new data and index.", "source": "codesearchnet"}
{"code": "def __init__(\n            self,\n            name: str,\n            dtype: type,\n            unique: bool,\n            validators: t.List[VALIDATOR_FUNCTION],\n            recoders: t.List[RECODER_FUNCTION],) -> None:\n        \n        if validators is None:\n            validators = []\n        if recoders is None:\n            recoders = []\n\n        self.name = name\n        self.dtype = dtype\n        self.unique = unique\n        self.validators = self._dict_of_funcs(validators)\n        self.recoders = self._dict_of_funcs(recoders)", "docstring": "Construct a new `Column` object.\n\nArgs:\nname (str): The exact name of the column in a ``pd.DataFrame``.\ndtype (type): The type that each member of the recoded column must belong to.\nunique (bool): Whether values are allowed to recur in this column.\nvalidators (list): A list of validator functions.\nrecoders (list): A list of recoder functions.", "source": "juraj-google-style"}
{"code": "def select_top_predictions(self, predictions):\n    scores = predictions.get_field('scores')\n    keep = torch.nonzero((scores > self.confidence_threshold)).squeeze(1)\n    predictions = predictions[keep]\n    scores = predictions.get_field('scores')\n    (_, idx) = scores.sort(0, descending=True)\n    return predictions[idx]", "docstring": "Select only predictions which have a `score` > self.confidence_threshold,\nand returns the predictions in descending order of score\n\nArguments:\npredictions (BoxList): the result of the computation by the model.\nIt should contain the field `scores`.\n\nReturns:\nprediction (BoxList): the detected objects. Additional information\nof the detection properties can be found in the fields of\nthe BoxList via `prediction.fields()`", "source": "codesearchnet"}
{"code": "def stop(pid):\n    if psutil.pid_exists(pid):\n        try:\n            p = psutil.Process(pid)\n            p.kill()\n        except Exception:\n            pass", "docstring": "Shut down a specific process.\n\nArgs:\npid: the pid of the process to shutdown.", "source": "codesearchnet"}
{"code": "def add_stream(self, stream, path, compress, flags):\n        \n        self.data_fileobj.seek(self.last_offset)\n\n        if compress == 'bz2':\n            stream = bz2_compress_stream(stream)\n        elif compress == 'xz':\n            stream = xz_compress_stream(stream)\n        elif compress is None:\n            pass\n        else:\n            raise ValueError('Unsupported compression type: {}'.format(compress))\n\n        size = write_to_file(stream, self.data_fileobj)\n\n        \n        \n        if os.sep == '\\\\':  \n            path = path.replace('\\\\', '/')\n\n        e = dict(\n            name=six.u(path),\n            offset=self.last_offset,\n            size=size,\n            flags=flags,\n        )\n        self.entries.append(e)\n        self.last_offset += e['size']", "docstring": "Add the contents of an iterable to the MAR file.\n\nArgs:\nstream (iterable): yields blocks of data\npath (str): name of this file in the MAR file\ncompress (str): One of 'xz', 'bz2', or None. Defaults to None.\nflags (int): permission of this file in the MAR file", "source": "juraj-google-style"}
{"code": "def visualize_conv_activations(activation, name):\n    import math\n    with tf.name_scope(('visualize_act_' + name)):\n        (_, h, w, c) = activation.get_shape().as_list()\n        rows = []\n        c_per_row = int(math.sqrt(c))\n        for y in range(0, (c - c_per_row), c_per_row):\n            row = activation[(:, :, :, y:(y + c_per_row))]\n            cols = tf.unstack(row, axis=3)\n            row = tf.concat(cols, 1)\n            rows.append(row)\n        viz = tf.concat(rows, 2)\n    tf.summary.image(('visualize_act_' + name), tf.expand_dims(viz, (- 1)))", "docstring": "Visualize activations for convolution layers.\n\nRemarks:\nThis tries to place all activations into a square.\n\nArgs:\nactivation: tensor with the activation [B,H,W,C]\nname: label for tensorboard\n\nReturns:\nimage of almost all activations", "source": "codesearchnet"}
{"code": "def get_data(self, columns, type='ndarray', with_index=False):\n        \n\n        res = self.select_columns(columns)\n        if type == 'ndarray':\n            if with_index:\n                return res.reset_index().values\n            else:\n                return res.values\n        elif type == 'list':\n            if with_index:\n                return res.reset_index().values.tolist()\n            else:\n                return res.values.tolist()\n        elif type == 'dataframe':\n            if with_index:\n                return res.reset_index()\n            else:\n                return res", "docstring": "获取不同格式的数据\n\nArguments:\ncolumns {[type]} -- [description]\n\nKeyword Arguments:\ntype {str} -- [description] (default: {'ndarray'})\nwith_index {bool} -- [description] (default: {False})\n\nReturns:\n[type] -- [description]", "source": "juraj-google-style"}
{"code": "def _parse(json_str: str, primitive_cls: Type[Time]) -> Time:\n    try:\n        time = datetime.datetime.strptime(json_str, '%H:%M:%S').time()\n        return _primitive_time_utils.build_time(time, _primitive_time_utils.TimePrecision.MICROSECOND.SECOND, primitive_cls)\n    except ValueError:\n        pass\n    try:\n        time = datetime.datetime.strptime(json_str, '%H:%M:%S.%f').time()\n        if _primitive_time_utils.PRECISION_PATTERN_MILLISECOND.search(json_str) is not None:\n            return _primitive_time_utils.build_time(time, _primitive_time_utils.TimePrecision.MILLISECOND, primitive_cls)\n        elif _primitive_time_utils.PRECISION_PATTERN_MICROSECOND.search(json_str) is not None:\n            return _primitive_time_utils.build_time(time, _primitive_time_utils.TimePrecision.MICROSECOND, primitive_cls)\n    except ValueError:\n        pass\n    raise fhir_errors.InvalidFhirError(f'Invalid Time: {json_str!r}.')", "docstring": "Parses the json_str into a Time FHIR primitive.\n\nArgs:\njson_str: The raw JSON string to parse.\nprimitive_cls: The FHIR primitive to parse into.\n\nReturns:\nA FHIR primitive Time instance.\n\nRaises:\nfhir_errors.InvalidFhirError: In the event that no FHIR primitive Time\nformat was able to properly parse the json_str.", "source": "github-repos"}
{"code": "def _gauss(mean: int, sigma: int) -> int:\n        \n        return int(random.gauss(mean, sigma))", "docstring": "Creates a variation from a base value\n\nArgs:\nmean: base value\nsigma: gaussian sigma\n\nReturns: random value", "source": "juraj-google-style"}
{"code": "def _validate_required(self, settings, name, value):\n        \n        if not value:\n            raise SettingsInvalidError((\"Required value from setting '{name}' \"\n                                        \"must not be \"\n                                        \"empty.\").format(name=name))\n\n        return value", "docstring": "Validate a required setting (value can not be empty)\n\nArgs:\nsettings (dict): Current settings.\nname (str): Setting name.\nvalue (str): Required value to validate.\n\nRaises:\nboussole.exceptions.SettingsInvalidError: If value is empty.\n\nReturns:\nstr: Validated value.", "source": "juraj-google-style"}
{"code": "def add_error(self, position, e):\n    if self.result != TestResultEnums.TEST_RESULT_FAIL:\n        self.result = TestResultEnums.TEST_RESULT_ERROR\n    if position in self.extra_errors:\n        raise Error('An exception is already recorded with position \"%s\", cannot reuse.' % position)\n    if isinstance(e, ExceptionRecord):\n        self.extra_errors[position] = e\n    else:\n        self.extra_errors[position] = ExceptionRecord(e, position=position)", "docstring": "Add extra error happened during a test.\n\nIf the test has passed or skipped, this will mark the test result as\nERROR.\n\nIf an error is added the test record, the record's result is equivalent\nto the case where an uncaught exception happened.\n\nIf the test record has not recorded any error, the newly added error\nwould be the main error of the test record. Otherwise the newly added\nerror is added to the record's extra errors.\n\nArgs:\nposition: string, where this error occurred, e.g. 'teardown_test'.\ne: An exception or a `signals.ExceptionRecord` object.", "source": "github-repos"}
{"code": "def process_function_type_comment(node, op, func, ctx):\n    if not op.annotation:\n        return\n    comment, line = op.annotation\n    if func.signature.annotations:\n        ctx.errorlog.redundant_function_type_comment(op.code.filename, line)\n        return\n    fake_stack = ctx.vm.simple_stack(op.at_line(line))\n    m = _FUNCTION_TYPE_COMMENT_RE.match(comment)\n    if not m:\n        ctx.errorlog.invalid_function_type_comment(fake_stack, comment)\n        return\n    args, return_type = m.groups()\n    assert args is not None and return_type is not None\n    if args != '...':\n        annot = args.strip()\n        try:\n            ctx.annotation_utils.eval_multi_arg_annotation(node, func, annot, fake_stack)\n        except abstract_utils.ConversionError:\n            ctx.errorlog.invalid_function_type_comment(fake_stack, annot, details='Must be constant.')\n    ret = ctx.convert.build_string(None, return_type)\n    func.signature.set_annotation('return', ctx.annotation_utils.extract_annotation(node, ret, 'return', fake_stack))", "docstring": "Modifies annotations from a function type comment.\n\nChecks if a type comment is present for the function.  If so, the type\ncomment is used to populate annotations.  It is an error to have\na type comment when annotations is not empty.\n\nArgs:\nnode: The current node.\nop: An opcode (used to determine filename and line number).\nfunc: An abstract.InterpreterFunction.\nctx: The current context.", "source": "github-repos"}
{"code": "def is_collection_aligned(self, data_collection):\n        \n        if self._collection_type != data_collection._collection_type:\n            return False\n        elif len(self.values) != len(data_collection.values):\n            return False\n        elif self.datetimes != data_collection.datetimes:\n            return False\n        else:\n            return True", "docstring": "Check if this Data Collection is aligned with another.\n\nAligned Data Collections are of the same Data Collection class, have the\nsame number of values and have matching datetimes.\n\nArgs:\ndata_collection: The Data Collection which you want to test if this\ncollection is aligned with.\n\nReturn:\nTrue if collections are aligned, False if not aligned", "source": "juraj-google-style"}
{"code": "def get_metadata(self, resource, keys):\n        \n        self.metadata_service.set_auth(self._token_metadata)\n        return self.metadata_service.get(resource, keys)", "docstring": "Gets the values for given keys associated with the given resource.\n\nArgs:\nresource (intern.resource.boss.BossResource)\nkeys (list)\n\nReturns:\n(dictionary)\n\nRaises:\nHTTPErrorList on failure.", "source": "juraj-google-style"}
{"code": "def from_dict(cls, data):\n        \n        try:\n            fulfillment = _fulfillment_from_details(data['condition']['details'])\n        except KeyError:\n            \n            fulfillment = data['condition']['uri']\n        try:\n            amount = int(data['amount'])\n        except ValueError:\n            raise AmountError('Invalid amount: %s' % data['amount'])\n        return cls(fulfillment, data['public_keys'], amount)", "docstring": "Transforms a Python dictionary to an Output object.\n\nNote:\nTo pass a serialization cycle multiple times, a\nCryptoconditions Fulfillment needs to be present in the\npassed-in dictionary, as Condition URIs are not serializable\nanymore.\n\nArgs:\ndata (dict): The dict to be transformed.\n\nReturns:\n:class:`~bigchaindb.common.transaction.Output`", "source": "juraj-google-style"}
{"code": "def GetFileEntryByPathSpec(self, path_spec):\n    \n    volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(path_spec)\n\n    \n    \n    if volume_index is None:\n      location = getattr(path_spec, 'location', None)\n      if location is None or location != self.LOCATION_ROOT:\n        return None\n\n      return apfs_container_file_entry.APFSContainerFileEntry(\n          self._resolver_context, self, path_spec, is_root=True,\n          is_virtual=True)\n\n    if (volume_index < 0 or\n        volume_index >= self._fsapfs_container.number_of_volumes):\n      return None\n\n    return apfs_container_file_entry.APFSContainerFileEntry(\n        self._resolver_context, self, path_spec)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\nAPFSContainerFileEntry: a file entry or None if not exists.", "source": "juraj-google-style"}
{"code": "def GetName(self, number):\n    \n    value = self._data_type_definition.values_per_number.get(number, None)\n    if not value:\n      return None\n\n    return value.name", "docstring": "Retrieves the name of an enumeration value by number.\n\nArgs:\nnumber (int): number.\n\nReturns:\nstr: name of the enumeration value or None if no corresponding\nenumeration value was found.", "source": "juraj-google-style"}
{"code": "def _get_sync(self, url):\n    response = self.session.get(url)\n    if (response.status_code == requests.codes.ok):\n        return response.json()\n    else:\n        raise HTTPError", "docstring": "Internal method used for GET requests\n\nArgs:\nurl (str): URL to fetch\n\nReturns:\nIndividual URL request's response\n\nRaises:\nHTTPError: If HTTP request failed.", "source": "codesearchnet"}
{"code": "def get_modname_from_modpath(module_fpath):\n    modsubdir_list = get_module_subdir_list(module_fpath)\n    modname = '.'.join(modsubdir_list)\n    modname = modname.replace('.__init__', '').strip()\n    modname = modname.replace('.__main__', '').strip()\n    return modname", "docstring": "returns importable name from file path\n\nget_modname_from_modpath\n\nArgs:\nmodule_fpath (str): module filepath\n\nReturns:\nstr: modname\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_path import *  # NOQA\n>>> import utool as ut\n>>> module_fpath = ut.util_path.__file__\n>>> modname = ut.get_modname_from_modpath(module_fpath)\n>>> result = modname\n>>> print(result)\nutool.util_path", "source": "codesearchnet"}
{"code": "def post_request(profile, resource, payload):\n    url = get_url(profile, resource)\n    headers = get_headers(profile)\n    response = requests.post(url, json=payload, headers=headers)\n    return response.json()", "docstring": "Do a POST request to Github's API.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nresource\nThe part of a Github API URL that comes after ``.../:repo/git``.\nFor instance, for ``.../:repo/git/commits``, it's ``/commits``.\n\npayload\nA dict of values to send as the payload of the POST request.\nThe data will be JSON-encoded.\n\nReturns:\nThe body of the response, converted from JSON into a Python dict.", "source": "codesearchnet"}
{"code": "def create_identity_with_nan_gradients_fn(have_nan_gradients):\n\n    @custom_gradient.custom_gradient\n    def _identity_with_nan_gradients(x):\n        \n        x = array_ops.identity(x)\n\n        def grad(dx):\n            return cond.cond(have_nan_gradients, lambda: dx * float('NaN'), lambda: dx)\n        return (x, grad)\n\n    def identity_with_nan_gradients(x):\n        return _identity_with_nan_gradients(x)\n    return identity_with_nan_gradients", "docstring": "Returns a function that optionally has NaN gradients.\n\nThis serves as a hook to introduce NaN gradients to a model. This returns an\nidentity function. The identity's gradient function will check if the boolean\ntensor `have_nan_gradients` is True. If so, the gradient will be NaN.\nOtherwise, the gradient will also be the identity.\n\nArgs:\nhave_nan_gradients: A scalar boolean tensor. If True, gradients will be NaN.\nOtherwise, the gradient function is the identity function.\n\nReturns:\nAn identity function whose gradient function will return NaNs, if\n`have_nan_gradients` is True.", "source": "github-repos"}
{"code": "def update_config_pwd(msg, cfg):\n    \n    msg_type = msg.__class__.__name__.lower()\n    key_fmt = msg.profile + \"_\" + msg_type\n    if isinstance(msg._auth, (MutableSequence, tuple)):\n        cfg.pwd[key_fmt] = \" :: \".join(msg._auth)\n    else:\n        cfg.pwd[key_fmt] = msg._auth", "docstring": "Updates the profile's auth entry with values set by the user.\nThis will overwrite existing values.\n\nArgs:\n:msg: (Message class) an instance of a message class.\n:cfg: (jsonconfig.Config) config instance.", "source": "juraj-google-style"}
{"code": "def pylint_check(files):\n    \n    \n    files = fs.wrap_paths(files)\n    cfg_path = conf.get_path('lint.pylint_cfg', 'ops/tools/pylint.ini')\n    pylint_cmd = 'pylint --rcfile {} {}'.format(cfg_path, files)\n\n    return shell.run(pylint_cmd, exit_on_error=False).return_code", "docstring": "Run code checks using pylint.\n\nArgs:\nfiles (list[str]):\nA list of files to check\n\nReturns:\nbool: **True** if all files passed the checks, **False** otherwise.", "source": "juraj-google-style"}
{"code": "def read_undone_from_datastore(self, shard_id=None, num_shards=None):\n    if (shard_id is not None):\n        shards_list = [((i + shard_id) % num_shards) for i in range(num_shards)]\n    else:\n        shards_list = []\n    shards_list.append(None)\n    for shard in shards_list:\n        self._read_undone_shard_from_datastore(shard)\n        if self._work:\n            return shard\n    return None", "docstring": "Reads undone work from the datastore.\n\nIf shard_id and num_shards are specified then this method will attempt\nto read undone work for shard with id shard_id. If no undone work was found\nthen it will try to read shard (shard_id+1) and so on until either found\nshard with undone work or all shards are read.\n\nArgs:\nshard_id: Id of the start shard\nnum_shards: total number of shards\n\nReturns:\nid of the shard with undone work which was read. None means that work\nfrom all datastore was read.", "source": "codesearchnet"}
{"code": "def zopen(filename, *args, **kwargs):\n    if ((Path is not None) and isinstance(filename, Path)):\n        filename = str(filename)\n    (name, ext) = os.path.splitext(filename)\n    ext = ext.upper()\n    if (ext == '.BZ2'):\n        if (PY_VERSION[0] >= 3):\n            return bz2.open(filename, *args, **kwargs)\n        else:\n            args = list(args)\n            if (len(args) > 0):\n                args[0] = ''.join([c for c in args[0] if (c != 't')])\n            if ('mode' in kwargs):\n                kwargs['mode'] = ''.join([c for c in kwargs['mode'] if (c != 't')])\n            return bz2.BZ2File(filename, *args, **kwargs)\n    elif (ext in ('.GZ', '.Z')):\n        return gzip.open(filename, *args, **kwargs)\n    else:\n        return io.open(filename, *args, **kwargs)", "docstring": "This function wraps around the bz2, gzip and standard python's open\nfunction to deal intelligently with bzipped, gzipped or standard text\nfiles.\n\nArgs:\nfilename (str/Path): filename or pathlib.Path.\n\\*args: Standard args for python open(..). E.g., 'r' for read, 'w' for\nwrite.\n\\*\\*kwargs: Standard kwargs for python open(..).\n\nReturns:\nFile-like object. Supports with context.", "source": "codesearchnet"}
{"code": "def update_note(self, note):\n    if ('key' in note):\n        noteid = note.pop('key', None)\n    else:\n        noteid = uuid.uuid4().hex\n    if ('version' in note):\n        version = note.pop('version', None)\n        url = ('%s/i/%s/v/%s?response=1' % (DATA_URL, noteid, version))\n    else:\n        url = ('%s/i/%s?response=1' % (DATA_URL, noteid))\n    note = self.__remove_simplenote_api_fields(note)\n    request = Request(url, data=json.dumps(note).encode('utf-8'))\n    request.add_header(self.header, self.get_token())\n    request.add_header('Content-Type', 'application/json')\n    response = ''\n    try:\n        response = urllib2.urlopen(request)\n    except HTTPError as e:\n        if (e.code == 401):\n            raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.')\n        else:\n            return (e, (- 1))\n    except IOError as e:\n        return (e, (- 1))\n    note = json.loads(response.read().decode('utf-8'))\n    note = self.__add_simplenote_api_fields(note, noteid, int(response.info().get('X-Simperium-Version')))\n    return (note, 0)", "docstring": "Method to update a specific note object, if the note object does not\nhave a \"key\" field, a new note is created\n\nArguments\n- note (dict): note object to update\n\nReturns:\nA tuple `(note, status)`\n- note (dict): note object\n- status (int): 0 on success and -1 otherwise", "source": "codesearchnet"}
{"code": "def find_all(self, collection):\n        \n        obj = getattr(self.db, collection)\n        result = obj.find()\n        return result", "docstring": "Search a collection for all available items.\n\nArgs:\ncollection: The db collection. See main class documentation.\nReturns:\nList of all items in the collection.", "source": "juraj-google-style"}
{"code": "def active_futures(ticker: str, dt) -> str:\n    t_info = ticker.split()\n    (prefix, asset) = (' '.join(t_info[:(- 1)]), t_info[(- 1)])\n    info = const.market_info(f'{prefix[:(- 1)]}1 {asset}')\n    (f1, f2) = (f'{prefix[:(- 1)]}1 {asset}', f'{prefix[:(- 1)]}2 {asset}')\n    fut_2 = fut_ticker(gen_ticker=f2, dt=dt, freq=info['freq'])\n    fut_1 = fut_ticker(gen_ticker=f1, dt=dt, freq=info['freq'])\n    fut_tk = bdp(tickers=[fut_1, fut_2], flds='Last_Tradeable_Dt', cache=True)\n    if (pd.Timestamp(dt).month < pd.Timestamp(fut_tk.last_tradeable_dt[0]).month):\n        return fut_1\n    d1 = bdib(ticker=f1, dt=dt)\n    d2 = bdib(ticker=f2, dt=dt)\n    return (fut_1 if (d1[f1].volume.sum() > d2[f2].volume.sum()) else fut_2)", "docstring": "Active futures contract\n\nArgs:\nticker: futures ticker, i.e., ESA Index, Z A Index, CLA Comdty, etc.\ndt: date\n\nReturns:\nstr: ticker name", "source": "codesearchnet"}
{"code": "def _list(self, request, start_response):\n    configs = []\n    generator = directory_list_generator.DirectoryListGenerator(request)\n    for config in self._config_manager.configs.itervalues():\n        if (config != self.API_CONFIG):\n            configs.append(config)\n    directory = generator.pretty_print_config_to_json(configs)\n    if (not directory):\n        _logger.error('Failed to get API directory')\n        return util.send_wsgi_not_found_response(start_response)\n    return self._send_success_response(directory, start_response)", "docstring": "Sends HTTP response containing the API directory.\n\nThis calls start_response and returns the response body.\n\nArgs:\nrequest: An ApiRequest, the transformed request sent to the Discovery API.\nstart_response: A function with semantics defined in PEP-333.\n\nReturns:\nA string containing the response body.", "source": "codesearchnet"}
{"code": "def wrap_callable(cls, uri, methods, callable_obj):\n    if isinstance(callable_obj, HandlerMeta):\n        callable_obj.base_endpoint = uri\n        callable_obj.is_valid = True\n        return callable_obj\n    if isinstance(callable_obj, types.FunctionType):\n        return cls(uri=uri, methods=methods, callable_obj=callable_obj)\n    raise RouteError('Invalid handler type.')", "docstring": "Wraps function-based callable_obj into a `Route` instance, else\nproxies a `bottle_neck.handlers.BaseHandler` subclass instance.\n\nArgs:\nuri (str):  The uri relative path.\nmethods (tuple): A tuple of valid method strings.\ncallable_obj (instance): The callable object.\n\nReturns:\nA route instance.\n\nRaises:\nRouteError for invalid callable object type.", "source": "codesearchnet"}
{"code": "def get_data_layout(self, data_shape):\n    raise NotImplementedError()", "docstring": "Retrieve the `TensorLayout` for the input data.\n\nArgs:\ndata_shape: shape for the input data in list or tuple format.\n\nReturns:\nThe `TensorLayout` for the data, which can be used by\n`backend.distribute_value()` to redistribute a input data.", "source": "github-repos"}
{"code": "def __init__(self, event_count=0, first_timestamp=-1, last_timestamp=-1):\n    \n\n    \n    \n    \n    self.event_count = event_count\n    self.first_timestamp = first_timestamp\n    self.last_timestamp = last_timestamp", "docstring": "Tracks events for a single category of values.\n\nArgs:\nevent_count: The initial event count to use.\nfirst_timestamp: The timestamp of the first event with this value.\nlast_timestamp: The timestamp of the last event with this category of\nvalues.", "source": "juraj-google-style"}
{"code": "def get_content_metadata(self, enterprise_customer):\n        \n        content_metadata = OrderedDict()\n\n        \n        if enterprise_customer.catalog:\n            response = self._load_data(\n                self.ENTERPRISE_CUSTOMER_ENDPOINT,\n                detail_resource='courses',\n                resource_id=str(enterprise_customer.uuid),\n                traverse_pagination=True,\n            )\n            for course in response['results']:\n                for course_run in course['course_runs']:\n                    course_run['content_type'] = 'courserun'  \n                    content_metadata[course_run['key']] = course_run\n\n        for enterprise_customer_catalog in enterprise_customer.enterprise_customer_catalogs.all():\n            response = self._load_data(\n                self.ENTERPRISE_CUSTOMER_CATALOGS_ENDPOINT,\n                resource_id=str(enterprise_customer_catalog.uuid),\n                traverse_pagination=True,\n                querystring={'page_size': 1000},\n            )\n\n            for item in response['results']:\n                content_id = utils.get_content_metadata_item_id(item)\n                content_metadata[content_id] = item\n\n        return content_metadata.values()", "docstring": "Return all content metadata contained in the catalogs associated with the EnterpriseCustomer.\n\nArguments:\nenterprise_customer (EnterpriseCustomer): The EnterpriseCustomer to return content metadata for.\n\nReturns:\nlist: List of dicts containing content metadata.", "source": "juraj-google-style"}
{"code": "def _get_populate_values(self, instance) -> Tuple[str, str]:\n        \n\n        return [\n            (\n                lang_code,\n                self._get_populate_from_value(\n                    instance,\n                    self.populate_from,\n                    lang_code\n                ),\n            )\n            for lang_code, _ in settings.LANGUAGES\n        ]", "docstring": "Gets all values (for each language) from the\nspecified's instance's `populate_from` field.\n\nArguments:\ninstance:\nThe instance to get the values from.\n\nReturns:\nA list of (lang_code, value) tuples.", "source": "juraj-google-style"}
{"code": "def filter_sequences(self, seq_type):\n    return DictList((x for x in self.sequences if isinstance(x, seq_type)))", "docstring": "Return a DictList of only specified types in the sequences attribute.\n\nArgs:\nseq_type (SeqProp): Object type\n\nReturns:\nDictList: A filtered DictList of specified object type only", "source": "codesearchnet"}
{"code": "def softplus_inverse(x, name=None):\n  \n  with tf.name_scope(name or \"softplus_inverse\"):\n    x = tf.convert_to_tensor(value=x, name=\"x\")\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    threshold = np.log(np.finfo(dtype_util.as_numpy_dtype(x.dtype)).eps) + 2.\n    is_too_small = tf.less(x, np.exp(threshold))\n    is_too_large = tf.greater(x, -threshold)\n    too_small_value = tf.math.log(x)\n    too_large_value = x\n    \n    \n    x = tf.where(tf.logical_or(is_too_small, is_too_large), tf.ones_like(x), x)\n    y = x + tf.math.log(-tf.math.expm1(-x))  \n    return tf.where(is_too_small, too_small_value,\n                    tf.where(is_too_large, too_large_value, y))", "docstring": "Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)).\n\nMathematically this op is equivalent to:\n\n```none\nsoftplus_inverse = log(exp(x) - 1.)\n```\n\nArgs:\nx: `Tensor`. Non-negative (not enforced), floating-point.\nname: A name for the operation (optional).\n\nReturns:\n`Tensor`. Has the same type/shape as input `x`.", "source": "juraj-google-style"}
{"code": "def __call__(self, *args, **kwargs):\n        \n        for loop, m in self.iter_methods():\n            coro = m(*args, **kwargs)\n            self.submit_coroutine(coro, loop)", "docstring": "Triggers all stored callbacks (coroutines)\n\nArgs:\n*args: Positional arguments to pass to callbacks\n**kwargs: Keyword arguments to pass to callbacks", "source": "juraj-google-style"}
{"code": "def map_resources(self):\n    assert not context.executing_eagerly()\n    object_map = object_identity.ObjectIdentityDictionary()\n    tensor_map = object_identity.ObjectIdentityDictionary()\n    asset_info = _AssetInfo(asset_defs=[], asset_initializers_by_resource=object_identity.ObjectIdentityDictionary(), asset_filename_map={}, asset_index={})\n    for node_id in _dependency_sorted_node_ids(self):\n        obj = self.nodes[node_id]\n        tensors = obj._export_to_saved_model_graph(object_map=object_map, tensor_map=tensor_map, options=self.options)\n        if isinstance(obj, asset.Asset):\n            _add_asset_info(obj, asset_info, tensor_map[obj.asset_path])\n        if tensors:\n            for tensor in tensors:\n                self.captured_tensor_node_ids[tensor] = node_id\n    return (object_map, tensor_map, asset_info)", "docstring": "Makes new resource handle ops corresponding to existing resource tensors.\n\nCreates resource handle ops in the current default graph, whereas\n`accessible_objects` will be from an eager context. Resource mapping adds\nresource handle ops to the main GraphDef of a SavedModel, which allows the\nC++ loader API to interact with resources.\n\nReturns:\nA tuple of (object_map, tensor_map, asset_info):\nobject_map: A dictionary mapping from object in `accessible_objects` to\nreplacement objects created to hold the new resource tensors.\ntensor_map: A dictionary mapping from resource tensors extracted from\n`accessible_objects` to newly created resource tensors.\nasset_info: An _AssetInfo tuple describing external assets referenced\nfrom accessible_objects.", "source": "github-repos"}
{"code": "def filter(self, predicates):\n        \n        tys = []\n        for col_name, raw_column in self.raw_columns.items():\n            dtype = str(raw_column.dtype)\n            if dtype == 'object' or dtype == '|S64':\n                weld_type = WeldVec(WeldChar())\n            else:\n                weld_type = grizzly_impl.numpy_to_weld_type_mapping[dtype]\n            tys.append(weld_type)\n\n        if len(tys) == 1:\n            weld_type = tys[0]\n        else:\n            weld_type = WeldStruct(tys)\n\n        if isinstance(predicates, SeriesWeld):\n            predicates = predicates.expr\n\n        return DataFrameWeldExpr(\n            grizzly_impl.filter(\n                grizzly_impl.zip_columns(\n                    self.raw_columns.values(),\n                ),\n                predicates\n            ),\n            self.raw_columns.keys(),\n            weld_type\n        )", "docstring": "Summary\n\nArgs:\ngrouping_column_name (TYPE): Description\n\nReturns:\nTYPE: Description", "source": "juraj-google-style"}
{"code": "def sampler_to_iterator(dataset, sampler):\n    for sample in sampler:\n        if isinstance(sample, (list, tuple)):\n            (yield [dataset[i] for i in sample])\n        else:\n            (yield dataset[sample])", "docstring": "Given a batch sampler or sampler returns examples instead of indices\n\nArgs:\ndataset (torch.utils.data.Dataset): Dataset to sample from.\nsampler (torch.utils.data.sampler.Sampler): Sampler over the dataset.\n\nReturns:\ngenerator over dataset examples", "source": "codesearchnet"}
{"code": "def get_device_name(self, cached=True):\n    if (cached and (self.name is not None)):\n        return self.name\n    device_name = self.get_characteristic_handle_from_uuid(UUID_DEVICE_NAME)\n    if (device_name is None):\n        logger.warn('Failed to find handle for device name')\n        return None\n    self.name = self.dongle._read_attribute(self.conn_handle, device_name)\n    return self.name", "docstring": "Returns the SK8 device BLE name.\n\nArgs:\ncached (bool): if True, returns the locally cached copy of the name. If this is\nset to False, or the name is not cached, it will read from the device instead.\n\nReturns:\nstr. The current device name. May be `None` if an error occurs.", "source": "codesearchnet"}
{"code": "def save_collection(png_filename_base, numpy_data, start_layers_at=1):\n    \n    file_ext = png_filename_base.split('.')[-1]\n    if file_ext in ['png']:\n        \n        file_base = '.'.join(png_filename_base.split('.')[:-1])\n    else:\n        \n        \n        file_base = png_filename_base\n        file_ext = \".png\"\n\n    file_base_array = file_base.split('*')\n\n    \n    output_files = []\n\n    \n    i = start_layers_at\n    for layer in numpy_data:\n        layer_filename = (str(i).zfill(6)).join(file_base_array) + file_ext\n        output_files.append(save(layer_filename, layer))\n        i += 1\n\n    return output_files", "docstring": "Export a numpy array to a set of png files, with each Z-index 2D\narray as its own 2D file.\n\nArguments:\npng_filename_base:     A filename template, such as \"my-image-*.png\"\nwhich will lead to a collection of files named\n\"my-image-0.png\", \"my-image-1.png\", etc.\nnumpy_data:             The numpy array data to save to png.\n\nReturns:\nArray. A list of expanded filenames that hold png data.", "source": "juraj-google-style"}
{"code": "def get_program(self, program_resource_name: str) -> Dict:\n    return self.service.projects().programs().get(name=program_resource_name).execute()", "docstring": "Returns the previously created quantum program.\n\nParams:\nprogram_resource_name: A string of the form\n`projects/project_id/programs/program_id`.\n\nReturns:\nA dictionary containing the metadata and the program.", "source": "codesearchnet"}
{"code": "def auth_middleware(policy):\n    assert isinstance(policy, AbstractAuthentication)\n\n    async def _auth_middleware_factory(app, handler):\n\n        async def _middleware_handler(request):\n            request[POLICY_KEY] = policy\n            response = (await handler(request))\n            (await policy.process_response(request, response))\n            return response\n        return _middleware_handler\n    return _auth_middleware_factory", "docstring": "Returns a aiohttp_auth middleware factory for use by the aiohttp\napplication object.\n\nArgs:\npolicy: A authentication policy with a base class of\nAbstractAuthentication.", "source": "codesearchnet"}
{"code": "def insert_json(table=None, bulk_size=1000, concurrency=25, hosts=None, output_fmt=None):\n    if (not hosts):\n        return print_only(table)\n    queries = (to_insert(table, d) for d in dicts_from_stdin())\n    bulk_queries = as_bulk_queries(queries, bulk_size)\n    print('Executing inserts: bulk_size={} concurrency={}'.format(bulk_size, concurrency), file=sys.stderr)\n    stats = Stats()\n    with clients.client(hosts, concurrency=concurrency) as client:\n        f = partial(aio.measure, stats, client.execute_many)\n        try:\n            aio.run_many(f, bulk_queries, concurrency)\n        except clients.SqlException as e:\n            raise SystemExit(str(e))\n    try:\n        print(format_stats(stats.get(), output_fmt))\n    except KeyError:\n        if (not stats.sampler.values):\n            raise SystemExit('No data received via stdin')\n        raise", "docstring": "Insert JSON lines fed into stdin into a Crate cluster.\n\nIf no hosts are specified the statements will be printed.\n\nArgs:\ntable: Target table name.\nbulk_size: Bulk size of the insert statements.\nconcurrency: Number of operations to run concurrently.\nhosts: hostname:port pairs of the Crate nodes", "source": "codesearchnet"}
{"code": "def ensure_app_cache_dir(appname, *args):\n    from ubelt import util_path\n    dpath = get_app_cache_dir(appname, *args)\n    util_path.ensuredir(dpath)\n    return dpath", "docstring": "Calls `get_app_cache_dir` but ensures the directory exists.\n\nArgs:\nappname (str): the name of the application\n*args: any other subdirectories may be specified\n\nSeeAlso:\nget_app_cache_dir\n\nExample:\n>>> import ubelt as ub\n>>> dpath = ub.ensure_app_cache_dir('ubelt')\n>>> assert exists(dpath)", "source": "codesearchnet"}
{"code": "def downsample_residual(x, output_channels, dim='2d', stride=1, scope='h'):\n  \n  with tf.variable_scope(scope):\n    if stride > 1:\n      avg_pool = CONFIG[dim]['avg_pool']\n      x = avg_pool(x,\n                   pool_size=(stride, stride),\n                   strides=(stride, stride),\n                   padding='VALID')\n\n    input_channels = tf.shape(x)[3]\n    diff = output_channels - input_channels\n    x = tf.pad(\n        x, [[0, 0], [0, 0], [0, 0],\n            [diff \n    return x", "docstring": "Downsamples 'x' by `stride` using average pooling.\n\nArgs:\nx: input tensor of size [N, H, W, C]\noutput_channels: Desired number of output channels.\ndim: '2d' if 2-dimensional, '3d' if 3-dimensional.\nstride: What stride to use. Usually 1 or 2.\nscope: Optional variable scope.\n\nReturns:\nA downsampled tensor of size [N, H/2, W/2, output_channels] if stride\nis 2, else returns a tensor of size [N, H, W, output_channels] if\nstride is 1.", "source": "juraj-google-style"}
{"code": "def make_agent() -> EcommerceAgent:\n    config_path = find_config('tfidf_retrieve')\n    skill = build_model(config_path)\n    agent = EcommerceAgent(skills=[skill])\n    return agent", "docstring": "Make an agent\n\nReturns:\nagent: created Ecommerce agent", "source": "codesearchnet"}
{"code": "def appliance_device_read_community(self):\n    if (not self.__appliance_device_read_community):\n        self.__appliance_device_read_community = ApplianceDeviceReadCommunity(self.__connection)\n    return self.__appliance_device_read_community", "docstring": "Gets the ApplianceDeviceReadCommunity API client.\n\nReturns:\nApplianceDeviceReadCommunity:", "source": "codesearchnet"}
{"code": "def __random_density_hs(N, rank=None, seed=None):\n    G = __ginibre_matrix(N, rank, seed)\n    G = G.dot(G.conj().T)\n    return (G / np.trace(G))", "docstring": "Generate a random density matrix from the Hilbert-Schmidt metric.\n\nArgs:\nN (int): the length of the density matrix.\nrank (int or None): the rank of the density matrix. The default\nvalue is full-rank.\nseed (int): Optional. To set a random seed.\nReturns:\nndarray: rho (N,N  a density matrix.", "source": "codesearchnet"}
{"code": "def Write(self, map_data):\n    self._Begin()\n    written_keys = set()\n    write_offset = 0\n    try:\n        while 1:\n            entry = map_data.PopItem()\n            for index in self._indices:\n                self._indices[index][str(getattr(entry, index))] = str(write_offset)\n            write_offset += self._WriteData(self.temp_cache_file, entry)\n            written_keys.update(self._ExpectedKeysForEntry(entry))\n    except KeyError:\n        self.temp_cache_file.flush()\n    except:\n        self._Rollback()\n        raise\n    return written_keys", "docstring": "Write the map to the cache.\n\nWarning -- this destroys map_data as it is written.  This is done to save\nmemory and keep our peak footprint smaller.  We consume memory again\non Verify() as we read a new copy of the entries back in.\n\nArgs:\nmap_data: A Map subclass containing the entire map to be written.\n\nReturns:\na set of keys written or None on failure.", "source": "github-repos"}
{"code": "def __call__(self, fn):\n        \n\n        def debug(app, *args, **kwargs):\n            \n\n            data = fn(app, *args, **kwargs)\n            app.tcex.log.debug(\n                'function: \"{}\", args: \"{}\", kwargs: \"{}\"'.format(\n                    self.__class__.__name__, vars(args), kwargs\n                )\n            )\n            return data\n\n        return debug", "docstring": "Implement __call__ function for decorator.\n\nArgs:\nfn (function): The decorated function.\n\nReturns:\nfunction: The custom decorator function.", "source": "juraj-google-style"}
{"code": "def random_strings(self, string_length=1):\n        \n        str_list = []\n        for path in self.uniform_generate(string_length):\n            str_list.append(self._path_to_str(path))\n        return str_list", "docstring": "Generate string_length random strings that belong to the automaton.\nArgs:\nstring_length (integer): The size of the random string\nReturns:\nstr: The generated string", "source": "juraj-google-style"}
{"code": "def __getitem__(self, index: Any) -> Rotation:\n    if type(index) is not tuple:\n        index = (index,)\n    if self._rot_mats is not None:\n        rot_mats = self._rot_mats[index + (slice(None), slice(None))]\n        return Rotation(rot_mats=rot_mats)\n    elif self._quats is not None:\n        quats = self._quats[index + (slice(None),)]\n        return Rotation(quats=quats, normalize_quats=False)\n    else:\n        raise ValueError('Both rotations are None')", "docstring": "Allows torch-style indexing over the virtual shape of the rotation object. See documentation for the shape\nproperty.\n\nArgs:\nindex:\nA torch index. E.g. (1, 3, 2), or (slice(None,))\nReturns:\nThe indexed rotation", "source": "github-repos"}
{"code": "def stack_residual_blocks_v1(x, filters, blocks, stride1=2, name=None):\n    x = residual_block_v1(x, filters, stride=stride1, name=name + '_block1')\n    for i in range(2, blocks + 1):\n        x = residual_block_v1(x, filters, conv_shortcut=False, name=name + '_block' + str(i))\n    return x", "docstring": "A set of stacked residual blocks.\n\nArgs:\nx: Input tensor.\nfilters: Number of filters in the bottleneck layer in a block.\nblocks: Number of blocks in the stacked blocks.\nstride1: Stride of the first layer in the first block. Defaults to `2`.\nname: Stack label.\n\nReturns:\nOutput tensor for the stacked blocks.", "source": "github-repos"}
{"code": "def _get_ngrams_with_counter(segment, max_order):\n  \n  ngram_counts = collections.Counter()\n  for order in xrange(1, max_order + 1):\n    for i in xrange(0, len(segment) - order + 1):\n      ngram = tuple(segment[i:i + order])\n      ngram_counts[ngram] += 1\n  return ngram_counts", "docstring": "Extracts all n-grams up to a given maximum order from an input segment.\n\nArgs:\nsegment: text segment from which n-grams will be extracted.\nmax_order: maximum length in tokens of the n-grams returned by this\nmethods.\n\nReturns:\nThe Counter containing all n-grams upto max_order in segment\nwith a count of how many times each n-gram occurred.", "source": "juraj-google-style"}
{"code": "def schedule(cls, mapreduce_spec):\n    \n    task_name = mapreduce_spec.mapreduce_id + \"-finalize\"\n    finalize_task = taskqueue.Task(\n        name=task_name,\n        url=(mapreduce_spec.params[\"base_path\"] + \"/finalizejob_callback/\" +\n             mapreduce_spec.mapreduce_id),\n        params={\"mapreduce_id\": mapreduce_spec.mapreduce_id},\n        headers=util._get_task_headers(mapreduce_spec.mapreduce_id))\n    queue_name = util.get_queue_name(None)\n    if not _run_task_hook(mapreduce_spec.get_hooks(),\n                          \"enqueue_controller_task\",\n                          finalize_task,\n                          queue_name):\n      try:\n        finalize_task.add(queue_name)\n      except (taskqueue.TombstonedTaskError,\n              taskqueue.TaskAlreadyExistsError), e:\n        logging.warning(\"Task %r already exists. %s: %s\",\n                        task_name, e.__class__, e)", "docstring": "Schedule finalize task.\n\nArgs:\nmapreduce_spec: mapreduce specification as MapreduceSpec.", "source": "juraj-google-style"}
{"code": "def write(self, save_path, options=None):\n    return self._write(save_path, options)", "docstring": "Save the checkpointed variables.\n\nArgs:\nsave_path: The file prefix of the checkpoint file.\noptions: Optional CheckpointOption instance.\n\nReturns:\nThe full path of the checkpoint file.", "source": "github-repos"}
{"code": "def match(self, f, *args):\n        \n        try:\n            match = f(self.tokenizer, *args)\n        except StopIteration:\n            \n            \n            \n            return\n\n        if match is None:\n            return\n\n        if not isinstance(match, grammar.TokenMatch):\n            raise TypeError(\"Invalid grammar function %r returned %r.\"\n                            % (f, match))\n\n        self.matched = match\n        return match", "docstring": "Match grammar function 'f' against next token and set 'self.matched'.\n\nArguments:\nf: A grammar function - see efilter.parsers.common.grammar. Must\nreturn TokenMatch or None.\nargs: Passed to 'f', if any.\n\nReturns:\nInstance of efilter.parsers.common.grammar.TokenMatch or None.\n\nComment:\nIf a match is returned, it will also be stored in self.matched.", "source": "juraj-google-style"}
{"code": "def __init__(self, max_size=10, max_age=600):\n    \n    super(TimeBasedCache, self).__init__(max_size)\n    self.max_age = max_age\n\n    def HouseKeeper():\n      \n      if not time:\n        \n        return\n\n      now = time.time()\n\n      for cache in TimeBasedCache.active_caches:\n        \n        with cache.lock:\n          \n          \n          \n          for node in list(itervalues(cache._hash)):\n            timestamp, obj = node.data\n\n            \n            if timestamp + cache.max_age < now:\n              cache.KillObject(obj)\n\n              cache._age.Unlink(node)\n              cache._hash.pop(node.key, None)\n          \n\n    if not TimeBasedCache.house_keeper_thread:\n      TimeBasedCache.active_caches = weakref.WeakSet()\n      \n      TimeBasedCache.house_keeper_thread = InterruptableThread(\n          name=\"HouseKeeperThread\", target=HouseKeeper)\n      TimeBasedCache.house_keeper_thread.start()\n    TimeBasedCache.active_caches.add(self)", "docstring": "Constructor.\n\nThis cache will refresh the age of the cached object as long as they are\naccessed within the allowed age. The age refers to the time since it was\nlast touched.\n\nArgs:\nmax_size: The maximum number of objects held in cache.\nmax_age: The maximum length of time an object is considered alive.", "source": "juraj-google-style"}
{"code": "def register_entry(self, navbar_kwargs):\n    path = navbar_kwargs.pop('path')\n    if ((not hasattr(path, '__iter__')) or isinstance(path, basestring)):\n        path = [path]\n    entry_group = self.navbar_entries\n    for (name, is_last) in iter_islast(path):\n        kwargs = deepcopy(navbar_kwargs)\n        kwargs['name'] = name\n        for existing_entry in entry_group:\n            if (existing_entry.name == name):\n                entry = existing_entry\n                if is_last:\n                    entry.endpoint = kwargs['endpoint']\n                break\n        else:\n            if (not is_last):\n                kwargs['endpoint'] = None\n            entry = NavbarEntry(**kwargs)\n            entry_group.add(entry)\n        entry_group = entry.children", "docstring": "Register a navbar entry with the copilot.\n\nArgs:\nnavbar_kwargs (dict): Arguments passed to the\n:class:`NavbarEntry` instance.", "source": "codesearchnet"}
{"code": "def _model_source_dir(self):\n    return (self.source_dir if self.sagemaker_session.local_mode else self.uploaded_code.s3_prefix)", "docstring": "Get the appropriate value to pass as source_dir to model constructor on deploying\n\nReturns:\nstr: Either a local or an S3 path pointing to the source_dir to be used for code by the model to be deployed", "source": "codesearchnet"}
{"code": "def __init__(self, array):\n    self.array = array", "docstring": "Specify a NumPy array to wrap.\n\nArgs:\narray: The NumPy array to save and restore (may be overwritten).", "source": "github-repos"}
{"code": "def GetMetadata(\n      self, metadata_key='', recursive=True, timeout=None, retry=True):\n    \n    return self._HandleMetadataUpdate(\n        metadata_key=metadata_key, recursive=recursive, wait=False,\n        timeout=timeout, retry=retry)", "docstring": "Retrieve the contents of metadata server for a metadata key.\n\nArgs:\nmetadata_key: string, the metadata key to watch for changes.\nrecursive: bool, True if we should recursively watch for metadata changes.\ntimeout: int, timeout in seconds for returning metadata output.\nretry: bool, True if we should retry on failure.\n\nReturns:\njson, the deserialized contents of the metadata server or None if error.", "source": "juraj-google-style"}
{"code": "def _GenClientLibCallback(args, client_func=_GenClientLib):\n  \n  client_path = client_func(args.discovery_doc[0], args.language, args.output,\n                            args.build_system)\n  print 'API client library written to %s' % client_path", "docstring": "Generate a client library to file.\n\nArgs:\nargs: An argparse.Namespace object to extract parameters from\nclient_func: A function that generates client libraries and stores them to\nfiles, accepting a path to a discovery doc, a client library language, an\noutput directory, and a build system for the client library language.", "source": "juraj-google-style"}
{"code": "def exists(self, vars_list: List[str]) -> 'TensorFluent':\n        \n        return self._aggregation_op(tf.reduce_any, self, vars_list)", "docstring": "Returns the TensorFluent for the exists aggregation function.\n\nArgs:\nvars_list: The list of variables to be aggregated over.\n\nReturns:\nA TensorFluent wrapping the exists aggregation function.", "source": "juraj-google-style"}
{"code": "def __send_smtp_email(self, recipients, subject, html_body, text_body):\n    smtp = smtplib.SMTP(dbconfig.get('smtp_server', NS_EMAIL, 'localhost'), dbconfig.get('smtp_port', NS_EMAIL, 25))\n    source_arn = dbconfig.get('source_arn', NS_EMAIL)\n    return_arn = dbconfig.get('return_path_arn', NS_EMAIL)\n    from_arn = dbconfig.get('from_arn', NS_EMAIL)\n    msg = MIMEMultipart('alternative')\n    if (source_arn and from_arn and return_arn):\n        msg['X-SES-SOURCE-ARN'] = source_arn\n        msg['X-SES-FROM-ARN'] = from_arn\n        msg['X-SES-RETURN-PATH-ARN'] = return_arn\n    msg['Subject'] = subject\n    msg['To'] = ','.join(recipients)\n    msg['From'] = self.sender\n    if html_body:\n        html_part = MIMEText(html_body, 'html')\n        msg.attach(html_part)\n    if text_body:\n        text_part = MIMEText(text_body, 'plain')\n        msg.attach(text_part)\n    if dbconfig.get('smtp_tls', NS_EMAIL, False):\n        smtp.starttls()\n    username = dbconfig.get('smtp_username', NS_EMAIL)\n    password = dbconfig.get('smtp_password', NS_EMAIL)\n    if (username and password):\n        smtp.login(username, password)\n    smtp.sendmail(self.sender, recipients, msg.as_string())\n    smtp.quit()", "docstring": "Send an email using SMTP\n\nArgs:\nrecipients (`list` of `str`): List of recipient email addresses\nsubject (str): Subject of the email\nhtml_body (str): HTML body of the email\ntext_body (str): Text body of the email\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def convert_to_qutip(expr, full_space=None, mapping=None):\n    if (full_space is None):\n        full_space = expr.space\n    if (not expr.space.is_tensor_factor_of(full_space)):\n        raise ValueError((\"expr '%s' must be in full_space %s\" % (expr, full_space)))\n    if (full_space == TrivialSpace):\n        raise AlgebraError('Cannot convert object in TrivialSpace to qutip. You may pass a non-trivial `full_space`')\n    if (mapping is not None):\n        if (expr in mapping):\n            ret = mapping[expr]\n            if isinstance(ret, qutip.Qobj):\n                return ret\n            else:\n                assert callable(ret)\n                return ret(expr)\n    if (expr is IdentityOperator):\n        local_spaces = full_space.local_factors\n        if (len(local_spaces) == 0):\n            raise ValueError(('full_space %s does not have local factors' % full_space))\n        else:\n            return qutip.tensor(*[qutip.qeye(s.dimension) for s in local_spaces])\n    elif (expr is ZeroOperator):\n        return qutip.tensor(*[qutip.Qobj(csr_matrix((s.dimension, s.dimension))) for s in full_space.local_factors])\n    elif isinstance(expr, LocalOperator):\n        return _convert_local_operator_to_qutip(expr, full_space, mapping)\n    elif (isinstance(expr, Operator) and isinstance(expr, Operation)):\n        return _convert_operator_operation_to_qutip(expr, full_space, mapping)\n    elif isinstance(expr, OperatorTrace):\n        raise NotImplementedError('Cannot convert OperatorTrace to qutip')\n    elif isinstance(expr, State):\n        return _convert_ket_to_qutip(expr, full_space, mapping)\n    elif isinstance(expr, SuperOperator):\n        return _convert_superoperator_to_qutip(expr, full_space, mapping)\n    elif isinstance(expr, Operation):\n        return _convert_state_operation_to_qutip(expr, full_space, mapping)\n    elif isinstance(expr, SLH):\n        raise ValueError('SLH objects can only be converted using SLH_to_qutip routine')\n    else:\n        raise ValueError((\"Cannot convert '%s' of type %s\" % (str(expr), type(expr))))", "docstring": "Convert a QNET expression to a qutip object\n\nArgs:\nexpr: a QNET expression\nfull_space (HilbertSpace): The\nHilbert space in which `expr` is defined. If not given,\n``expr.space`` is used. The Hilbert space must have a well-defined\nbasis.\nmapping (dict): A mapping of any (sub-)expression to either a\n`quip.Qobj` directly, or to a callable that will convert the\nexpression into a `qutip.Qobj`. Useful for e.g. supplying objects\nfor symbols\nRaises:\nValueError: if `expr` is not in `full_space`, or if `expr` cannot be\nconverted.", "source": "codesearchnet"}
{"code": "def from_dict(cls, metadata):\n        \n        hyperparameters = metadata.get('hyperparameters')\n        tunable = metadata.get('tunable_hyperparameters')\n\n        pipeline = cls(\n            metadata['primitives'],\n            metadata.get('init_params'),\n            metadata.get('input_names'),\n            metadata.get('output_names'),\n        )\n\n        if hyperparameters:\n            pipeline.set_hyperparameters(hyperparameters)\n\n        if tunable is not None:\n            pipeline._tunable_hyperparameters = tunable\n\n        return pipeline", "docstring": "Create a new MLPipeline from a dict specification.\n\nThe dict structure is the same as the one created by the `to_dict` method.\n\nArgs:\nmetadata (dict): Dictionary containing the pipeline specification.\n\nReturns:\nMLPipeline:\nA new MLPipeline instance with the details found in the\ngiven specification dictionary.", "source": "juraj-google-style"}
{"code": "def parse(self, sentence, params=None, headers=None):\n        \n        if params is None:\n            params = {}\n        params['input'] = sentence\n\n        hdrs = {'Accept': 'application/json'}\n        if headers is not None:\n            hdrs.update(headers)\n\n        url = urljoin(self.server, 'parse')\n        r = requests.get(url, params=params, headers=hdrs)\n        if r.status_code == 200:\n            return _RestResponse(r.json())\n        else:\n            r.raise_for_status()", "docstring": "Request a parse of *sentence* and return the response.\n\nArgs:\nsentence (str): sentence to be parsed\nparams (dict): a dictionary of request parameters\nheaders (dict): a dictionary of additional request headers\nReturns:\nA ParseResponse containing the results, if the request was\nsuccessful.\nRaises:\nrequests.HTTPError: if the status code was not 200", "source": "juraj-google-style"}
{"code": "def search_groups(self, group):\n    group_url = ('%s/%s/%s' % (self.url, 'group', group))\n    response = self.jss.get(group_url)\n    return LDAPGroupsResults(self.jss, response)", "docstring": "Search for LDAP groups.\n\nArgs:\ngroup: Group to search for. It is not entirely clear how the\nJSS determines the results- are regexes allowed, or\nglobbing?\n\nReturns:\nLDAPGroupsResult object.\n\nRaises:\nJSSGetError if no results are found.", "source": "codesearchnet"}
{"code": "def _ExtractWindowingInfo(pcoll, fields: Optional[Union[Mapping[str, str], Iterable[str]]]=None):\n    if fields is None:\n        fields = ['timestamp', 'window_start', 'window_end']\n    if not isinstance(fields, Mapping):\n        if isinstance(fields, Iterable) and (not isinstance(fields, str)):\n            fields = {fld: fld for fld in fields}\n        else:\n            raise TypeError('Fields must be a mapping or iterable of strings, got {fields}')\n    existing_fields = named_fields_from_element_type(pcoll.element_type)\n    new_fields = []\n    for field, value in fields.items():\n        if value not in _WINDOWING_INFO_TYPES:\n            raise ValueError(f'{value} is not a valid windowing parameter; must be one of {list(_WINDOWING_INFO_TYPES.keys())}')\n        elif field in existing_fields:\n            raise ValueError(f'Input schema already has a field named {field}.')\n        else:\n            new_fields.append((field, _WINDOWING_INFO_TYPES[value]))\n\n    def augment_row(row, timestamp=beam.DoFn.TimestampParam, window=beam.DoFn.WindowParam, pane_info=beam.DoFn.PaneInfoParam):\n        as_dict = row._asdict()\n        for field, value in fields.items():\n            as_dict[field] = _WINDOWING_INFO_EXTRACTORS[value](locals())\n        return beam.Row(**as_dict)\n    return pcoll | beam.Map(augment_row).with_output_types(row_type.RowTypeConstraint.from_fields(existing_fields + new_fields))", "docstring": "Extracts the implicit windowing information from an element and makes it\nexplicit as field(s) in the element itself.\n\nThe following windowing parameter values are supported:\n\n* `timestamp`: The event timestamp of the current element.\n* `window_start`: The start of the window iff it is an interval window.\n* `window_end`: The (exclusive) end of the window.\n* `window_string`: The string representation of the window.\n* `window_type`: The type of the window as a string.\n* `winodw_object`: The actual window object itself,\nas a Java or Python object.\n* `pane_info`: A schema'd representation of the current pane info, including\nits index, whether it was the last firing, etc.\n\nAs a convenience, a list rather than a mapping of fields may be provided,\nin which case the fields will be named according to the requested values.\n\nArgs:\nfields: A mapping of new field names to various windowing parameters,\nas documented above.  If omitted, defaults to\n`[timestamp, window_start, window_end]`.", "source": "github-repos"}
{"code": "def activate(self, user):\n    org_user = self.organization.add_user(user, **self.activation_kwargs())\n    self.invitee = user\n    self.save()\n    return org_user", "docstring": "Updates the `invitee` value and saves the instance\n\nProvided as a way of extending the behavior.\n\nArgs:\nuser: the newly created user\n\nReturns:\nthe linking organization user", "source": "codesearchnet"}
{"code": "def _set_graph_parents(self, graph_parents):\n    graph_parents = [] if graph_parents is None else graph_parents\n    for i, t in enumerate(graph_parents):\n        if t is None or not (linear_operator_util.is_ref(t) or tensor_util.is_tf_type(t)):\n            raise ValueError('Graph parent item %d is not a Tensor; %s.' % (i, t))\n    self._graph_parents = graph_parents", "docstring": "Set self._graph_parents.  Called during derived class init.\n\nThis method allows derived classes to set graph_parents, without triggering\na deprecation warning (which is invoked if `graph_parents` is passed during\n`__init__`.\n\nArgs:\ngraph_parents: Iterable over Tensors.", "source": "github-repos"}
{"code": "def severity_level(self, value):\n        \n        if value == self._defaults['severityLevel'] and 'severityLevel' in self._values:\n            del self._values['severityLevel']\n        else:\n            self._values['severityLevel'] = value", "docstring": "The severity_level property.\n\nArgs:\nvalue (int). the property value.", "source": "juraj-google-style"}
{"code": "def post_process(self, outputs, target_sizes):\n    warnings.warn('`post_process` is deprecated and will be removed in v5 of Transformers, please use `post_process_object_detection` instead, with `threshold=0.` for equivalent results.', FutureWarning)\n    logits, boxes = (outputs.logits, outputs.pred_boxes)\n    if len(logits) != len(target_sizes):\n        raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n    if target_sizes.shape[1] != 2:\n        raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch')\n    probs = torch.max(logits, dim=-1)\n    scores = torch.sigmoid(probs.values)\n    labels = probs.indices\n    boxes = center_to_corners_format(boxes)\n    img_h, img_w = target_sizes.unbind(1)\n    scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)\n    boxes = boxes * scale_fct[:, None, :]\n    results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]\n    return results", "docstring": "Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,\nbottom_right_x, bottom_right_y) format.\n\nArgs:\noutputs ([`OwlViTObjectDetectionOutput`]):\nRaw outputs of the model.\ntarget_sizes (`torch.Tensor` of shape `(batch_size, 2)`):\nTensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original\nimage size (before any data augmentation). For visualization, this should be the image size after data\naugment, but before padding.\nReturns:\n`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image\nin the batch as predicted by the model.", "source": "github-repos"}
{"code": "def registration_backend(backend=None, namespace=None):\n    backend = (backend or ORGS_REGISTRATION_BACKEND)\n    (class_module, class_name) = backend.rsplit('.', 1)\n    mod = import_module(class_module)\n    return getattr(mod, class_name)(namespace=namespace)", "docstring": "Returns a specified registration backend\n\nArgs:\nbackend: dotted path to the registration backend class\nnamespace: URL namespace to use\n\nReturns:\nan instance of an RegistrationBackend", "source": "codesearchnet"}
{"code": "def ratio_split(amount, ratios):\n    ratio_total = sum(ratios)\n    divided_value = (amount / ratio_total)\n    values = []\n    for ratio in ratios:\n        value = (divided_value * ratio)\n        values.append(value)\n    rounded = [v.quantize(Decimal('0.01')) for v in values]\n    remainders = [(v - rounded[i]) for (i, v) in enumerate(values)]\n    remainder = sum(remainders)\n    rounded[(- 1)] = (rounded[(- 1)] + remainder).quantize(Decimal('0.01'))\n    assert (sum(rounded) == amount)\n    return rounded", "docstring": "Split in_value according to the ratios specified in `ratios`\n\nThis is special in that it ensures the returned values always sum to\nin_value (i.e. we avoid losses or gains due to rounding errors). As a\nresult, this method returns a list of `Decimal` values with length equal\nto that of `ratios`.\n\nExamples:\n\n.. code-block:: python\n\n>>> from hordak.utilities.money import ratio_split\n>>> from decimal import Decimal\n>>> ratio_split(Decimal('10'), [Decimal('1'), Decimal('2')])\n[Decimal('3.33'), Decimal('6.67')]\n\nNote the returned values sum to the original input of ``10``. If we were to\ndo this calculation in a naive fashion then the returned values would likely\nbe ``3.33`` and ``6.66``, which would sum to ``9.99``, thereby loosing\n``0.01``.\n\nArgs:\namount (Decimal): The amount to be split\nratios (list[Decimal]): The ratios that will determine the split\n\nReturns: list(Decimal)", "source": "codesearchnet"}
{"code": "def _check_boolean(parameter_name, value, parameter_config):\n    if (parameter_config.get('type') != 'boolean'):\n        return\n    if (value.lower() not in ('1', 'true', '0', 'false')):\n        raise errors.BasicTypeParameterError(parameter_name, value, 'boolean')", "docstring": "Checks if a boolean value is valid.\n\nThis is called by the transform_parameter_value function and shouldn't be\ncalled directly.\n\nThis checks that the string value passed in can be converted to a valid\nboolean value.\n\nArgs:\nparameter_name: A string containing the name of the parameter, which is\neither just a variable name or the name with the index appended. For\nexample 'var' or 'var[2]'.\nvalue: A string containing the value passed in for the parameter.\nparameter_config: The dictionary containing information specific to the\nparameter in question. This is retrieved from request.parameters in\nthe method config.\n\nRaises:\nBasicTypeParameterError: If the given value is not a valid boolean\nvalue.", "source": "codesearchnet"}
{"code": "def make_choice_type_function(choices: list) -> Callable[[str], Any]:\n    str_to_choice = {str(choice): choice for choice in choices}\n    return lambda arg: str_to_choice.get(arg, arg)", "docstring": "Creates a mapping function from each choices string representation to the actual value. Used to support multiple\nvalue types for a single argument.\n\nArgs:\nchoices (list): List of choices.\n\nReturns:\nCallable[[str], Any]: Mapping function from string representation to actual value for each choice.", "source": "github-repos"}
{"code": "def find_vulnerabilities(\n    cfg_list,\n    blackbox_mapping_file,\n    sources_and_sinks_file,\n    interactive=False,\n    nosec_lines=defaultdict(set)\n):\n    \n    vulnerabilities = list()\n    definitions = parse(sources_and_sinks_file)\n\n    with open(blackbox_mapping_file) as infile:\n        blackbox_mapping = json.load(infile)\n    for cfg in cfg_list:\n        find_vulnerabilities_in_cfg(\n            cfg,\n            definitions,\n            Lattice(cfg.nodes),\n            blackbox_mapping,\n            vulnerabilities,\n            interactive,\n            nosec_lines\n        )\n\n    if interactive:\n        with open(blackbox_mapping_file, 'w') as outfile:\n            json.dump(blackbox_mapping, outfile, indent=4)\n\n    return vulnerabilities", "docstring": "Find vulnerabilities in a list of CFGs from a trigger_word_file.\n\nArgs:\ncfg_list(list[CFG]): the list of CFGs to scan.\nblackbox_mapping_file(str)\nsources_and_sinks_file(str)\ninteractive(bool): determines if we ask the user about blackbox functions not in the mapping file.\nReturns:\nA list of vulnerabilities.", "source": "juraj-google-style"}
{"code": "def _get_nadir_pixel(earth_mask, sector):\n        \n        if sector == FULL_DISC:\n            logger.debug('Computing nadir pixel')\n\n            \n            \n            rmin, rmax, cmin, cmax = bbox(earth_mask)\n\n            \n            nadir_row = rmin + (rmax - rmin) \n            nadir_col = cmin + (cmax - cmin) \n\n            return nadir_row, nadir_col\n\n        return None, None", "docstring": "Find the nadir pixel\n\nArgs:\nearth_mask: Mask identifying earth and space pixels\nsector: Specifies the scanned sector\nReturns:\nnadir row, nadir column", "source": "juraj-google-style"}
{"code": "def export(self, path, variables_saver=None):\n    \n    \n    proto = saved_model_pb2.SavedModel()\n    proto.CopyFrom(self._proto)\n    assets_map = _make_assets_key_collection(proto, path)\n\n    self._save_all_assets(path, assets_map)\n    self._save_variables(path, variables_saver)\n    self._save_proto(path, proto)", "docstring": "Exports to SavedModel directory.\n\nArgs:\npath: path where to export the SavedModel to.\nvariables_saver: lambda that receives a directory path where to\nexport checkpoints of variables.", "source": "juraj-google-style"}
{"code": "def _shape_tuple(self) -> NoReturn:\n    raise NotImplementedError()", "docstring": "The shape of this Tensor, as a tuple.\n\nThis is more performant than tuple(shape().as_list()) as it avoids\ntwo list and one object creation. Marked private for now as from an API\nperspective, it would be better to have a single performant way of\ngetting a shape rather than exposing shape() and shape_tuple()\n(and heaven forbid, shape_list() etc. as well!). Punting on that for now,\nbut ideally one would work things out and remove the need for this method.\n\nReturns:\ntuple with the shape.", "source": "github-repos"}
{"code": "def cast(self, value, cast_context) -> Any:\n    del cast_context\n    assert value == self.placeholder_value(PlaceholderContext()), f'Can not cast {value!r} to type {self!r}'\n    return value", "docstring": "Cast value to this type.\n\nArgs:\nvalue: An input value belonging to this TraceType.\ncast_context: A context reserved for internal/future usage.\n\nReturns:\nThe value casted to this TraceType.\n\nRaises:\nAssertionError: When _cast is not overloaded in subclass,\nthe value is returned directly, and it should be the same to\nself.placeholder_value().", "source": "github-repos"}
{"code": "def cuts_connections(self, a, b):\n    n = (max(self.indices) + 1)\n    return self.cut_matrix(n)[np.ix_(a, b)].any()", "docstring": "Check if this cut severs any connections from ``a`` to ``b``.\n\nArgs:\na (tuple[int]): A set of nodes.\nb (tuple[int]): A set of nodes.", "source": "codesearchnet"}
{"code": "def __init__(self, get_media_files_func, media_cls, extra_files):\n        \n        self._get_media_files_func = get_media_files_func\n        self._media_cls = media_cls\n        self._extra_files = extra_files", "docstring": "Initialize the property.\n\nArgs:\nget_media_files_func (callable):\nThe function to call to generate the media files.\n\nmedia_cls (type):\nThe Media class owning the property.\n\nextra_files (object):\nFiles listed in the original ``css`` or ``js`` attribute on\nthe Media class.", "source": "juraj-google-style"}
{"code": "def ensure_dir(path):\n    os.makedirs(os.path.abspath(os.path.dirname(path)), exist_ok=True)", "docstring": "Create all parent directories of path if they don't exist.\n\nArgs:\npath. Path-like object. Create parent dirs to this path.\n\nReturn:\nNone.", "source": "codesearchnet"}
{"code": "def get_street_from_xy(self, **kwargs):\n    params = {'coordinateX': kwargs.get('longitude'), 'coordinateY': kwargs.get('latitude'), 'Radius': kwargs.get('radius'), 'cultureInfo': util.language_code(kwargs.get('lang'))}\n    result = self.make_request('geo', 'get_street_from_xy', **params)\n    if (not util.check_result(result, 'site')):\n        return (False, 'UNKNOWN ERROR')\n    values = util.response_list(result, 'site')\n    return (True, [emtype.Street(**a) for a in values])", "docstring": "Obtain a list of streets around the specified point.\n\nArgs:\nlatitude (double): Latitude in decimal degrees.\nlongitude (double): Longitude in decimal degrees.\nradius (int): Radius (in meters) of the search.\nlang (str): Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[Street]), or message string\nin case of error.", "source": "codesearchnet"}
{"code": "def run(argv=None, save_main_session=True, pipeline=None) -> PipelineResult:\n    known_args, pipeline_args = parse_known_args(argv)\n    pipeline_options = PipelineOptions(pipeline_args)\n    pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n    saved_model_spec = model_spec_pb2.SavedModelSpec(model_path=known_args.model_path)\n    inferece_spec_type = model_spec_pb2.InferenceSpecType(saved_model_spec=saved_model_spec)\n    model_handler = CreateModelHandler(inferece_spec_type)\n    keyed_model_handler = KeyedModelHandler(model_handler)\n    if not pipeline:\n        pipeline = beam.Pipeline(options=pipeline_options)\n    filename_value_pair = pipeline | 'ReadImageNames' >> beam.io.ReadFromText(known_args.input) | 'FilterEmptyLines' >> beam.ParDo(filter_empty_lines) | 'ProcessImageData' >> beam.Map(lambda image_name: read_and_process_image(image_file_name=image_name, path_to_dir=known_args.images_dir))\n    predictions = filename_value_pair | 'ConvertToExampleProto' >> beam.Map(lambda x: (x[0], convert_image_to_example_proto(x[1]))) | 'TFXRunInference' >> RunInference(keyed_model_handler) | 'PostProcess' >> beam.ParDo(ProcessInferenceToString())\n    _ = predictions | 'WriteOutputToGCS' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)\n    result = pipeline.run()\n    result.wait_until_finish()\n    return result", "docstring": "Args:\nargv: Command line arguments defined for this example.\nsave_main_session: Used for internal testing.\ntest_pipeline: Used for internal testing.", "source": "github-repos"}
{"code": "def import_object_from_path(path, object):\n    \n    with open(path) as f:\n        return import_object_from_string_code(f.read(), object)", "docstring": "Used to import an object from an absolute path.\n\nThis function takes an absolute path and imports it as a Python module.\nIt then returns the object with name `object` from the imported module.\n\nArgs:\npath (string): Absolute file path of .py file to import\n\nobject (string): Name of object to extract from imported module", "source": "juraj-google-style"}
{"code": "def analyze(fqdn, result, argl, argd):\n    \n    package = fqdn.split('.')[0]\n    if package not in _methods:\n        _load_methods(package)\n        \n    if _methods[package] is not None and fqdn in _methods[package]:\n        return _methods[package][fqdn](fqdn, result, *argl, **argd)", "docstring": "Analyzes the result from calling the method with the specified FQDN.\n\nArgs:\nfqdn (str): full-qualified name of the method that was called.\nresult: result of calling the method with `fqdn`.\nargl (tuple): positional arguments passed to the method call.\nargd (dict): keyword arguments passed to the method call.", "source": "juraj-google-style"}
{"code": "def Options(items, name):\n    options = {}\n    option_re = re.compile('^%s_(.+)' % name)\n    for item in items:\n        match = option_re.match(item[0])\n        if match:\n            options[match.group(1)] = FixValue(item[1])\n    return options", "docstring": "Returns a dict of options specific to an implementation.\n\nThis is used to retrieve a dict of options for a given\nimplementation.  We look for configuration options in the form of\nname_option and ignore the rest.\n\nArgs:\nitems: [('key1', 'value1'), ('key2, 'value2'), ...]\nname: 'foo'\nReturns:\ndictionary of option:value pairs", "source": "github-repos"}
{"code": "def __call__(self, *args, **kwargs):\n        \n        \n        self.kwargs.update(kwargs)\n\n        if self.data_flow_kernel is None:\n            dfk = DataFlowKernelLoader.dfk()\n        else:\n            dfk = self.data_flow_kernel\n\n        app_fut = dfk.submit(wrap_error(remote_side_bash_executor), self.func, *args,\n                             executors=self.executors,\n                             fn_hash=self.func_hash,\n                             cache=self.cache,\n                             **self.kwargs)\n\n        out_futs = [DataFuture(app_fut, o, tid=app_fut.tid)\n                    for o in kwargs.get('outputs', [])]\n        app_fut._outputs = out_futs\n\n        return app_fut", "docstring": "Handle the call to a Bash app.\n\nArgs:\n- Arbitrary\n\nKwargs:\n- Arbitrary\n\nReturns:\nIf outputs=[...] was a kwarg then:\nApp_fut, [Data_Futures...]\nelse:\nApp_fut", "source": "juraj-google-style"}
{"code": "def _get_credentials(self):\n    site = self.data[self.hdx_site]\n    username = site.get('username')\n    if username:\n        return (b64decode(username).decode('utf-8'), b64decode(site['password']).decode('utf-8'))\n    else:\n        return None", "docstring": "Return HDX site username and password\n\nReturns:\nOptional[Tuple[str, str]]: HDX site username and password or None", "source": "codesearchnet"}
{"code": "async def refresh_token(self):\n    (url, headers, body) = self._setup_token_request()\n    request_id = uuid.uuid4()\n    logging.debug(_utils.REQ_LOG_FMT.format(request_id=request_id, method='POST', url=url, kwargs=None))\n    async with self._session.post(url, headers=headers, data=body) as resp:\n        log_kw = {'request_id': request_id, 'method': 'POST', 'url': resp.url, 'status': resp.status, 'reason': resp.reason}\n        logging.debug(_utils.RESP_LOG_FMT.format(**log_kw))\n        try:\n            resp.raise_for_status()\n        except aiohttp.ClientResponseError as e:\n            msg = f'[{request_id}] Issue connecting to {resp.url}: {e}'\n            logging.error(msg, exc_info=e)\n            raise exceptions.GCPHTTPResponseError(msg, resp.status)\n        response = (await resp.json())\n        try:\n            self.token = response['access_token']\n        except KeyError:\n            msg = '[{request_id}] No access token in response.'\n            logging.error(msg)\n            raise exceptions.GCPAuthError(msg)\n    self.expiry = _client._parse_expiry(response)", "docstring": "Refresh oauth access token attached to this HTTP session.\n\nRaises:\n:exc:`.GCPAuthError`: if no token was found in the\nresponse.\n:exc:`.GCPHTTPError`: if any exception occurred,\nspecifically a :exc:`.GCPHTTPResponseError`, if the\nexception is associated with a response status code.", "source": "codesearchnet"}
{"code": "def get_metadata(self, path, include_entities=False, **kwargs):\n    f = self.get_file(path)\n    self.metadata_index.index_file(f.path)\n    if include_entities:\n        entities = f.entities\n        results = entities\n    else:\n        results = {}\n    results.update(self.metadata_index.file_index[path])\n    return results", "docstring": "Return metadata found in JSON sidecars for the specified file.\n\nArgs:\npath (str): Path to the file to get metadata for.\ninclude_entities (bool): If True, all available entities extracted\nfrom the filename (rather than JSON sidecars) are included in\nthe returned metadata dictionary.\nkwargs (dict): Optional keyword arguments to pass onto\nget_nearest().\n\nReturns: A dictionary of key/value pairs extracted from all of the\ntarget file's associated JSON sidecars.\n\nNotes:\nA dictionary containing metadata extracted from all matching .json\nfiles is returned. In cases where the same key is found in multiple\nfiles, the values in files closer to the input filename will take\nprecedence, per the inheritance rules in the BIDS specification.", "source": "codesearchnet"}
{"code": "def load(fh, single=False):\n    \n    if isinstance(fh, stringtypes):\n        s = open(fh, 'r').read()\n    else:\n        s = fh.read()\n    return loads(s, single=single)", "docstring": "Deserialize :class:`Eds` from a file (handle or filename)\n\nArgs:\nfh (str, file): input filename or file object\nsingle (bool): if `True`, only return the first Xmrs object\nReturns:\na generator of :class:`Eds` objects (unless the *single* option\nis `True`)", "source": "juraj-google-style"}
{"code": "def bestfit_func(self, bestfit_x):\n        \n        bestfit_x = np.array(bestfit_x)\n        if not self.done_bestfit:\n            raise KeyError(\"Do do_bestfit first\")\n        bestfit_y = 0\n        for idx, val in enumerate(self.fit_args):\n            bestfit_y += val * (bestfit_x **\n                                (self.args.get(\"degree\", 1) - idx))\n        return bestfit_y", "docstring": "Returns bestfit_y value\n\nargs:\nbestfit_x: scalar, array_like\nx value\nreturn: scalar, array_like\nbestfit y value", "source": "juraj-google-style"}
{"code": "def matrix_product(mat1, mat2):\n    return np.dot(mat2.T, mat1.T).T", "docstring": "Compute the product of two Fortran contiguous matrices.\n\nThis is to avoid the overhead of NumPy converting to C-contiguous\nbefore computing a matrix product.\n\nDoes so via ``A B = (B^T A^T)^T`` since ``B^T`` and ``A^T`` will be\nC-contiguous without a copy, then the product ``P = B^T A^T`` will\nbe C-contiguous and we can return the view ``P^T`` without a copy.\n\nArgs:\nmat1 (numpy.ndarray): The left-hand side matrix.\nmat2 (numpy.ndarray): The right-hand side matrix.\n\nReturns:\nnumpy.ndarray: The product of the two matrices.", "source": "codesearchnet"}
{"code": "def migrate(belstr: str) -> str:\n    \n\n    bo.ast = bel.lang.partialparse.get_ast_obj(belstr, \"2.0.0\")\n\n    return migrate_ast(bo.ast).to_string()", "docstring": "Migrate BEL 1 to 2.0.0\n\nArgs:\nbel: BEL 1\n\nReturns:\nbel: BEL 2", "source": "juraj-google-style"}
{"code": "def get_nested_plot_frame(obj, key_map, cached=False):\n    clone = obj.map((lambda x: x))\n    for (it1, it2) in zip(obj.traverse((lambda x: x)), clone.traverse((lambda x: x))):\n        if isinstance(it1, DynamicMap):\n            with disable_constant(it2.callback):\n                it2.callback.inputs = it1.callback.inputs\n    with item_check(False):\n        return clone.map((lambda x: get_plot_frame(x, key_map, cached=cached)), [DynamicMap, HoloMap], clone=False)", "docstring": "Extracts a single frame from a nested object.\n\nReplaces any HoloMap or DynamicMap in the nested data structure,\nwith the item corresponding to the supplied key.\n\nArgs:\nobj: Nested Dimensioned object\nkey_map: Dictionary mapping between dimensions and key value\ncached: Whether to allow looking up key in cache\n\nReturns:\nNested datastructure where maps are replaced with single frames", "source": "codesearchnet"}
{"code": "def area_difference(item_a, time_a, item_b, time_b, max_value):\n    size_a = item_a.size(time_a)\n    size_b = item_b.size(time_b)\n    diff = np.sqrt(((size_a - size_b) ** 2))\n    return (np.minimum(diff, max_value) / float(max_value))", "docstring": "RMS Difference in object areas.\n\nArgs:\nitem_a: STObject from the first set in ObjectMatcher\ntime_a: Time integer being evaluated\nitem_b: STObject from the second set in ObjectMatcher\ntime_b: Time integer being evaluated\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "codesearchnet"}
{"code": "def parse_objective_coefficient(entry):\n    for parameter in entry.kinetic_law_reaction_parameters:\n        (pid, name, value, units) = parameter\n        if ((pid == 'OBJECTIVE_COEFFICIENT') or (name == 'OBJECTIVE_COEFFICIENT')):\n            return value\n    return None", "docstring": "Return objective value for reaction entry.\n\nDetect objectives that are specified using the non-standardized\nkinetic law parameters which are used by many pre-FBC SBML models. The\nobjective coefficient is returned for the given reaction, or None if\nundefined.\n\nArgs:\nentry: :class:`SBMLReactionEntry`.", "source": "codesearchnet"}
{"code": "def __add_kickoff_task(cls, job_config, mapreduce_spec):\n    params = {'mapreduce_id': job_config.job_id}\n    kickoff_task = taskqueue.Task(url=((job_config._base_path + '/kickoffjob_callback/') + job_config.job_id), headers=util._get_task_headers(job_config.job_id), params=params)\n    if job_config._hooks_cls:\n        hooks = job_config._hooks_cls(mapreduce_spec)\n        try:\n            hooks.enqueue_kickoff_task(kickoff_task, job_config.queue_name)\n            return\n        except NotImplementedError:\n            pass\n    kickoff_task.add(job_config.queue_name, transactional=True)", "docstring": "Add kickoff task to taskqueue.\n\nArgs:\njob_config: map_job.JobConfig.\nmapreduce_spec: model.MapreduceSpec,", "source": "codesearchnet"}
{"code": "def _preload_simple_restoration(self, name):\n    deferred_dependencies_list = self._deferred_dependencies.get(name, ())\n    if not deferred_dependencies_list:\n        return\n    for checkpoint_position in deferred_dependencies_list:\n        if not checkpoint_position.is_simple_variable():\n            return None\n    checkpoint_position = max(deferred_dependencies_list, key=lambda restore: restore.checkpoint.restore_uid)\n    return CheckpointInitialValueCallable(checkpoint_position=checkpoint_position)", "docstring": "Return a dependency's value for restore-on-create.\n\nNote the restoration is not deleted; if for some reason preload is called\nand then not assigned to the variable (for example because a custom getter\noverrides the initializer), the assignment will still happen once the\nvariable is tracked (determined based on checkpoint.restore_uid).\n\nArgs:\nname: The object-local name of the dependency holding the variable's\nvalue.\n\nReturns:\nAn callable for use as a variable's initializer/initial_value, or None if\none should not be set (either because there was no variable with this name\nin the checkpoint or because it needs more complex deserialization). Any\nnon-trivial deserialization will happen when the variable object is\ntracked.", "source": "github-repos"}
{"code": "def predict_on_batch(self, x):\n    self._check_call_args('predict_on_batch')\n    _disallow_inside_tf_function('predict_on_batch')\n    with self.distribute_strategy.scope():\n        iterator = data_adapter.single_batch_iterator(self.distribute_strategy, x)\n        self.predict_function = self.make_predict_function()\n        outputs = self.predict_function(iterator)\n    return tf_utils.sync_to_numpy_or_python_type(outputs)", "docstring": "Returns predictions for a single batch of samples.\n\nArgs:\nx: Input data. It could be:\n- A Numpy array (or array-like), or a list of arrays (in case the\nmodel has multiple inputs).\n- A TensorFlow tensor, or a list of tensors (in case the model has\nmultiple inputs).\n\nReturns:\nNumpy array(s) of predictions.\n\nRaises:\nRuntimeError: If `model.predict_on_batch` is wrapped in `tf.function`.\nValueError: In case of mismatch between given number of inputs and\nexpectations of the model.", "source": "github-repos"}
{"code": "def send(self, config, log, obs_id, beam_id):\n        \n        log.info('Starting Pulsar Data Transfer...')\n        socket = self._ftp.transfercmd('STOR {0}_{1}'.format(obs_id, beam_id))\n        socket.send(json.dumps(config).encode())\n        socket.send(bytearray(1000 * 1000))\n\n        \n        \n        config['metadata']['name'] = 'candidate_two'\n        socket.send(json.dumps(config).encode())\n        socket.send(bytearray(1000 * 1000))\n        socket.close()\n        log.info('Pulsar Data Transfer Completed...')", "docstring": "Send the pulsar data to the ftp server\n\nArgs:\nconfig (dict): Dictionary of settings\nlog (logging.Logger): Python logging object\nobs_id: observation id\nbeam_id: beam id", "source": "juraj-google-style"}
{"code": "def load_orthologs(fo: IO, metadata: dict):\n    \n\n    version = metadata[\"metadata\"][\"version\"]\n\n    \n    with timy.Timer(\"Load Orthologs\") as timer:\n        arango_client = arangodb.get_client()\n        belns_db = arangodb.get_belns_handle(arango_client)\n        arangodb.batch_load_docs(\n            belns_db, orthologs_iterator(fo, version), on_duplicate=\"update\"\n        )\n\n        log.info(\n            \"Load orthologs\",\n            elapsed=timer.elapsed,\n            source=metadata[\"metadata\"][\"source\"],\n        )\n\n        \n        remove_old_ortholog_edges = f\n        remove_old_ortholog_nodes = f\n        arangodb.aql_query(belns_db, remove_old_ortholog_edges)\n        arangodb.aql_query(belns_db, remove_old_ortholog_nodes)\n\n    \n    metadata[\"_key\"] = f\"Orthologs_{metadata['metadata']['source']}\"\n    try:\n        belns_db.collection(arangodb.belns_metadata_name).insert(metadata)\n    except ArangoError as ae:\n        belns_db.collection(arangodb.belns_metadata_name).replace(metadata)", "docstring": "Load orthologs into ArangoDB\n\nArgs:\nfo: file obj - orthologs file\nmetadata: dict containing the metadata for orthologs", "source": "juraj-google-style"}
{"code": "def set_hash_value(self, key, field, value, pipeline=False):\n        \n        \n        if pipeline:\n            self._pipeline.hset(key, field, str(value))\n        else:\n            self._db.hset(key, field, str(value))", "docstring": "Set the value of field in a hash stored at key.\n\nArgs:\nkey (str): key (name) of the hash\nfield (str): Field within the hash to set\nvalue: Value to set\npipeline (bool): True, start a transaction block. Default false.", "source": "juraj-google-style"}
{"code": "def __init__(self, processor_configuration):\n    \n    \n    transformer_config = processor_configuration[\"transformer\"]\n    FLAGS.output_dir = transformer_config[\"model_dir\"]\n    usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)\n    data_dir = os.path.expanduser(transformer_config[\"data_dir\"])\n\n    \n    self.hparams = trainer_lib.create_hparams(\n        transformer_config[\"hparams_set\"],\n        transformer_config[\"hparams\"],\n        data_dir=data_dir,\n        problem_name=transformer_config[\"problem\"])\n\n    decode_hp = decoding.decode_hparams()\n    decode_hp.add_hparam(\"shards\", 1)\n    decode_hp.add_hparam(\"shard_id\", 0)\n\n    \n    self.estimator = trainer_lib.create_estimator(\n        transformer_config[\"model\"],\n        self.hparams,\n        t2t_trainer.create_run_config(self.hparams),\n        decode_hparams=decode_hp, use_tpu=False)\n\n    \n    self.source_vocab = self.hparams.problem_hparams.vocabulary[\"inputs\"]\n    self.targets_vocab = self.hparams.problem_hparams.vocabulary[\"targets\"]\n    self.const_array_size = 10000\n\n    \n    run_dirs = sorted(glob.glob(os.path.join(\"/tmp/t2t_server_dump\", \"run_*\")))\n    for run_dir in run_dirs:\n      shutil.rmtree(run_dir)", "docstring": "Creates the Transformer estimator.\n\nArgs:\nprocessor_configuration: A ProcessorConfiguration protobuffer with the\ntransformer fields populated.", "source": "juraj-google-style"}
{"code": "def get_dispatcher_event(self, name):\n        \n        e = self.__property_events.get(name)\n        if e is None:\n            e = self.__events[name]\n        return e", "docstring": "Retrieves an Event object by name\n\nArgs:\nname (str): The name of the :class:`Event` or\n:class:`~pydispatch.properties.Property` object to retrieve\n\nReturns:\nThe :class:`Event` instance for the event or property definition\n\n.. versionadded:: 0.1.0", "source": "juraj-google-style"}
{"code": "def _get_longest(value_lst: List) -> List:\n        \n\n        value_lst.sort()\n        result = []\n        pivot = value_lst[0]\n        start, end = pivot[0], pivot[1]\n        pivot_e = end\n        pivot_s = start\n        for idx, (s, e, v, rule_id, _) in enumerate(value_lst):\n            if s == pivot_s and pivot_e < e:\n                pivot_e = e\n                pivot = value_lst[idx]\n            elif s != pivot_s and pivot_e < e:\n                result.append(pivot)\n                pivot = value_lst[idx]\n                pivot_e = e\n                pivot_s = s\n        result.append(pivot)\n        return result", "docstring": "Get the longest match for overlap\nArgs:\nvalue_lst: List\n\nReturns: List", "source": "juraj-google-style"}
{"code": "def _unittest_template(config):\n    \n    output = \"def test_parsers():\\n\"\n\n    links = dict(map(lambda x: (x[\"link\"], x[\"vars\"]), config))\n\n    for link in links.keys():\n        output += IND + \"\n        output += IND + \"html = handle_encodnig(\\n\"\n        output += IND + IND + \"_get_source(%s)\\n\" % repr(link)\n        output += IND + \")\\n\"\n        output += IND + \"dom = dhtmlparser.parseString(html)\\n\"\n        output += IND + \"dhtmlparser.makeDoubleLinked(dom)\\n\\n\"\n\n        for var in links[link]:\n            content = links[link][var][\"data\"].strip()\n\n            output += IND + \"%s = %s(dom)\\n\" % (var, _get_parser_name(var))\n\n            if \"\\n\" in content:\n                output += IND\n                output += \"assert %s.getContent().strip().split() == %s\" % (\n                    var,\n                    repr(content.split())\n                )\n            else:\n                output += IND + \"assert %s.getContent().strip() == %s\" % (\n                    var,\n                    repr(content)\n                )\n\n            output += \"\\n\\n\"\n\n    return output + \"\\n\"", "docstring": "Generate unittests for all of the generated code.\n\nArgs:\nconfig (dict): Original configuration dictionary. See\n:mod:`~harvester.autoparser.conf_reader` for details.\n\nReturns:\nstr: Python code.", "source": "juraj-google-style"}
{"code": "def get_meta_graph_def(saved_model_dir, tag_set):\n    return saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)", "docstring": "DEPRECATED: Use saved_model_utils.get_meta_graph_def instead.\n\nGets MetaGraphDef from SavedModel. Returns the MetaGraphDef for the given\ntag-set and SavedModel directory.\n\nArgs:\nsaved_model_dir: Directory containing the SavedModel to inspect or execute.\ntag_set: Group of tag(s) of the MetaGraphDef to load, in string format,\nseparated by ','. For tag-set contains multiple tags, all tags must be\npassed in.\n\nRaises:\nRuntimeError: An error when the given tag-set does not exist in the\nSavedModel.\n\nReturns:\nA MetaGraphDef corresponding to the tag-set.", "source": "github-repos"}
{"code": "def get_organizations(self, permission='read'):\n        \n        \n        success, result = self._read_from_hdx('user', self.data['name'], 'id', self.actions()['listorgs'],\n                                              permission=permission)\n        organizations = list()\n        if success:\n            for organizationdict in result:\n                organization = hdx.data.organization.Organization.read_from_hdx(organizationdict['id'])\n                organizations.append(organization)\n        return organizations", "docstring": "Get organizations in HDX that this user is a member of.\n\nArgs:\npermission (str): Permission to check for. Defaults to 'read'.\n\nReturns:\nList[Organization]: List of organizations in HDX that this user is a member of", "source": "juraj-google-style"}
{"code": "def sil(msg, version):\n    tc = typecode(msg)\n    if (tc not in [29, 31]):\n        raise RuntimeError(('%s: Not a target state and status messag,                            or operation status message, expecting TC = 29 or 31' % msg))\n    msgbin = common.hex2bin(msg)\n    if (tc == 29):\n        SIL = common.bin2int(msgbin[76:78])\n    elif (tc == 31):\n        SIL = common.bin2int(msgbin[82:84])\n    try:\n        PE_RCu = uncertainty.SIL[SIL]['PE_RCu']\n        PE_VPL = uncertainty.SIL[SIL]['PE_VPL']\n    except KeyError:\n        (PE_RCu, PE_VPL) = (uncertainty.NA, uncertainty.NA)\n    base = 'unknown'\n    if (version == 2):\n        if (tc == 29):\n            SIL_SUP = common.bin2int(msgbin[39])\n        elif (tc == 31):\n            SIL_SUP = common.bin2int(msgbin[86])\n        if (SIL_SUP == 0):\n            base = 'hour'\n        elif (SIL_SUP == 1):\n            base = 'sample'\n    return (PE_RCu, PE_VPL, base)", "docstring": "Calculate SIL, Surveillance Integrity Level\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string with TC = 29, 31\n\nReturns:\nint or string: Probability of exceeding Horizontal Radius of Containment RCu\nint or string: Probability of exceeding Vertical Integrity Containment Region VPL\nstring: SIL supplement based on per \"hour\" or \"sample\", or 'unknown'", "source": "codesearchnet"}
{"code": "def callEventGetAllRpc(self, callback_id, event_name):", "docstring": "Calls snippet lib's RPC to get all existing snippet events.\n\nOverride this method to use this class with various snippet lib\nimplementations.\n\nThis function gets all existing events in the server with the specified\nidentifier without waiting.\n\nArgs:\ncallback_id: str, the callback identifier.\nevent_name: str, the callback name.\n\nReturns:\nA list of event dictionaries.", "source": "github-repos"}
{"code": "def compute_fov(self, x, y, fov='PERMISSIVE', radius=None, light_walls=True, sphere=True, cumulative=False):\n    if (radius is None):\n        radius = 0\n    if cumulative:\n        fov_copy = self.fov.copy()\n    lib.TCOD_map_compute_fov(self.map_c, x, y, radius, light_walls, _get_fov_type(fov))\n    if cumulative:\n        self.fov[:] |= fov_copy\n    return zip(*np.where(self.fov))", "docstring": "Compute the field-of-view of this Map and return an iterator of the\npoints touched.\n\nArgs:\nx (int): Point of view, x-coordinate.\ny (int): Point of view, y-coordinate.\nfov (Text): The type of field-of-view to be used.\n\nAvailable types are:\n'BASIC', 'DIAMOND', 'SHADOW', 'RESTRICTIVE', 'PERMISSIVE',\n'PERMISSIVE0', 'PERMISSIVE1', ..., 'PERMISSIVE8'\nradius (Optional[int]): Maximum view distance from the point of\nview.\n\nA value of 0 will give an infinite distance.\nlight_walls (bool): Light up walls, or only the floor.\nsphere (bool): If True the lit area will be round instead of\nsquare.\ncumulative (bool): If True the lit cells will accumulate instead\nof being cleared before the computation.\n\nReturns:\nIterator[Tuple[int, int]]: An iterator of (x, y) points of tiles\ntouched by the field-of-view.", "source": "codesearchnet"}
{"code": "def getall(self):\n    vrrps = dict()\n    interfaces = re.findall('^interface\\\\s(\\\\S+)', self.config, re.M)\n    for interface in interfaces:\n        vrrp = self.get(interface)\n        if vrrp:\n            vrrps.update({interface: vrrp})\n    return vrrps", "docstring": "Get the vrrp configurations for all interfaces on a node\n\nReturns:\nA dictionary containing the vrrp configurations on the node,\nkeyed by interface.", "source": "codesearchnet"}
{"code": "def get_headline(self, name):\n        \n\n        return self._loop.run_coroutine(self._client.get_headline(name))", "docstring": "Get stored messages for a service.\n\nArgs:\nname (string): The name of the service to get messages from.\n\nReturns:\nServiceMessage: the headline or None if no headline has been set", "source": "juraj-google-style"}
{"code": "def parse_device_list(device_list_str, key):\n    clean_lines = new_str(device_list_str, 'utf-8').strip().split('\\n')\n    results = []\n    for line in clean_lines:\n        tokens = line.strip().split('\\t')\n        if ((len(tokens) == 2) and (tokens[1] == key)):\n            results.append(tokens[0])\n    return results", "docstring": "Parses a byte string representing a list of devices.\n\nThe string is generated by calling either adb or fastboot. The tokens in\neach string is tab-separated.\n\nArgs:\ndevice_list_str: Output of adb or fastboot.\nkey: The token that signifies a device in device_list_str.\n\nReturns:\nA list of android device serial numbers.", "source": "codesearchnet"}
{"code": "def ParseOptions(cls, options, configuration_object):\n    \n    if not isinstance(configuration_object, tools.CLITool):\n      raise errors.BadConfigObject(\n          'Configuration object is not an instance of CLITool')\n\n    filter_expression = cls._ParseStringOption(options, 'filter')\n\n    filter_object = None\n    if filter_expression:\n      filter_object = event_filter.EventObjectFilter()\n\n      try:\n        filter_object.CompileFilter(filter_expression)\n      except errors.ParseError as exception:\n        raise errors.BadConfigOption((\n            'Unable to compile filter expression with error: '\n            '{0!s}').format(exception))\n\n    time_slice_event_time_string = getattr(options, 'slice', None)\n    time_slice_duration = getattr(options, 'slice_size', 5)\n    use_time_slicer = getattr(options, 'slicer', False)\n\n    \n    if time_slice_event_time_string and use_time_slicer:\n      raise errors.BadConfigOption(\n          'Time slice and slicer cannot be used at the same time.')\n\n    time_slice_event_timestamp = None\n    if time_slice_event_time_string:\n      \n      preferred_time_zone = getattr(\n          configuration_object, '_preferred_time_zone', None) or 'UTC'\n      timezone = pytz.timezone(preferred_time_zone)\n      time_slice_event_timestamp = timelib.Timestamp.FromTimeString(\n          time_slice_event_time_string, timezone=timezone)\n      if time_slice_event_timestamp is None:\n        raise errors.BadConfigOption(\n            'Unsupported time slice event date and time: {0:s}'.format(\n                time_slice_event_time_string))\n\n    setattr(configuration_object, '_event_filter_expression', filter_expression)\n\n    if filter_object:\n      setattr(configuration_object, '_event_filter', filter_object)\n\n    setattr(configuration_object, '_use_time_slicer', use_time_slicer)\n\n    if time_slice_event_timestamp is not None or use_time_slicer:\n      \n      \n      time_slice = time_slices.TimeSlice(\n          time_slice_event_timestamp, duration=time_slice_duration)\n      setattr(configuration_object, '_time_slice', time_slice)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.\nBadConfigOption: when a configuration parameter fails validation.", "source": "juraj-google-style"}
{"code": "def from_networkx(graph, layout_function, **kwargs):\n    from ..models.renderers import GraphRenderer\n    from ..models.graphs import StaticLayoutProvider\n    node_dict = dict()\n    node_attr_keys = [attr_key for node in list(graph.nodes(data=True)) for attr_key in node[1].keys()]\n    node_attr_keys = list(set(node_attr_keys))\n    for attr_key in node_attr_keys:\n        values = [(node_attr[attr_key] if (attr_key in node_attr.keys()) else None) for (_, node_attr) in graph.nodes(data=True)]\n        values = _handle_sublists(values)\n        node_dict[attr_key] = values\n    if ('index' in node_attr_keys):\n        from warnings import warn\n        warn(\"Converting node attributes labeled 'index' are skipped. If you want to convert these attributes, please re-label with other names.\")\n    node_dict['index'] = list(graph.nodes())\n    edge_dict = dict()\n    edge_attr_keys = [attr_key for edge in graph.edges(data=True) for attr_key in edge[2].keys()]\n    edge_attr_keys = list(set(edge_attr_keys))\n    for attr_key in edge_attr_keys:\n        values = [(edge_attr[attr_key] if (attr_key in edge_attr.keys()) else None) for (_, _, edge_attr) in graph.edges(data=True)]\n        values = _handle_sublists(values)\n        edge_dict[attr_key] = values\n    if (('start' in edge_attr_keys) or ('end' in edge_attr_keys)):\n        from warnings import warn\n        warn(\"Converting edge attributes labeled 'start' or 'end' are skipped. If you want to convert these attributes, please re-label them with other names.\")\n    edge_dict['start'] = [x[0] for x in graph.edges()]\n    edge_dict['end'] = [x[1] for x in graph.edges()]\n    node_source = ColumnDataSource(data=node_dict)\n    edge_source = ColumnDataSource(data=edge_dict)\n    graph_renderer = GraphRenderer()\n    graph_renderer.node_renderer.data_source.data = node_source.data\n    graph_renderer.edge_renderer.data_source.data = edge_source.data\n    if callable(layout_function):\n        graph_layout = layout_function(graph, **kwargs)\n    else:\n        graph_layout = layout_function\n        node_keys = graph_renderer.node_renderer.data_source.data['index']\n        if (set(node_keys) != set(layout_function.keys())):\n            from warnings import warn\n            warn(\"Node keys in 'layout_function' don't match node keys in the graph. These nodes may not be displayed correctly.\")\n    graph_renderer.layout_provider = StaticLayoutProvider(graph_layout=graph_layout)\n    return graph_renderer", "docstring": "Generate a ``GraphRenderer`` from a ``networkx.Graph`` object and networkx\nlayout function. Any keyword arguments will be passed to the\nlayout function.\n\nOnly two dimensional layouts are supported.\n\nArgs:\ngraph (networkx.Graph) : a networkx graph to render\nlayout_function (function or dict) : a networkx layout function or mapping of node keys to positions.\nThe position is a two element sequence containing the x and y coordinate.\n\nReturns:\ninstance (GraphRenderer)\n\n.. note::\nNode and edge attributes may be lists or tuples. However, a given\nattribute must either have *all* lists or tuple values, or *all*\nscalar values, for nodes or edges it is defined on.\n\n.. warning::\nNode attributes labeled 'index' and edge attributes labeled 'start' or 'end' are ignored.\nIf you want to convert these attributes, please re-label them to other names.\n\nRaises:\nValueError", "source": "codesearchnet"}
{"code": "def showRemoveColumnDialog(self, triggered):\n        \n        if triggered:\n            model = self.tableView.model()\n            if model is not None:\n                columns = model.dataFrameColumns()\n                dialog = RemoveAttributesDialog(columns, self)\n                dialog.accepted.connect(self.removeColumns)\n                dialog.rejected.connect(self.uncheckButton)\n                dialog.show()", "docstring": "Display the dialog to remove column(s) from the model.\n\nThis method is also a slot.\n\nArgs:\ntriggered (bool): If the corresponding button was\nactivated, the dialog will be created and shown.", "source": "juraj-google-style"}
{"code": "def _prepare_variables(self):\n    self._moving_averager = tf.train.ExponentialMovingAverage(decay=self._beta, zero_debias=self._zero_debias)\n    prepare_variables_op = []\n    self._grad_squared = []\n    self._grad_norm_squared = []\n    for (v, g) in zip(self._vars, self._grad):\n        if (g is None):\n            continue\n        with tf.colocate_with(v):\n            self._grad_squared.append(tf.square(g))\n    self._grad_norm_squared = [tf.reduce_sum(g_sq) for g_sq in self._grad_squared]\n    if self._sparsity_debias:\n        avg_op_sparsity = self._grad_sparsity()\n        prepare_variables_op.append(avg_op_sparsity)\n    avg_op = self._moving_averager.apply(self._grad_norm_squared)\n    with tf.control_dependencies([avg_op]):\n        self._grad_norm_squared_avg = [self._moving_averager.average(val) for val in self._grad_norm_squared]\n        self._grad_norm_squared = tf.add_n(self._grad_norm_squared)\n        self._grad_norm_squared_avg = tf.add_n(self._grad_norm_squared_avg)\n    prepare_variables_op.append(avg_op)\n    return tf.group(*prepare_variables_op)", "docstring": "Prepare Variables for YellowFin.\n\nReturns:\nGrad**2, Norm, Norm**2, Mean(Norm**2) ops", "source": "codesearchnet"}
{"code": "def groups_replies(self, *, channel: str, thread_ts: str, **kwargs) -> SlackResponse:\n        \n        self._validate_xoxp_token()\n        kwargs.update({\"channel\": channel, \"thread_ts\": thread_ts})\n        return self.api_call(\"groups.replies\", http_verb=\"GET\", params=kwargs)", "docstring": "Retrieve a thread of messages posted to a private channel\n\nArgs:\nchannel (str): The channel id. e.g. 'C1234567890'\nthread_ts (str): The timestamp of an existing message with 0 or more replies.\ne.g. '1234567890.123456'", "source": "juraj-google-style"}
{"code": "def compute_fans(shape):\n    shape = tuple(shape)\n    if len(shape) < 1:\n        fan_in = fan_out = 1\n    elif len(shape) == 1:\n        fan_in = fan_out = shape[0]\n    elif len(shape) == 2:\n        fan_in = shape[0]\n        fan_out = shape[1]\n    else:\n        receptive_field_size = 1\n        for dim in shape[:-2]:\n            receptive_field_size *= dim\n        fan_in = shape[-2] * receptive_field_size\n        fan_out = shape[-1] * receptive_field_size\n    return (int(fan_in), int(fan_out))", "docstring": "Computes the number of input and output units for a weight shape.\n\nArgs:\nshape: Integer shape tuple.\n\nReturns:\nA tuple of integer scalars: `(fan_in, fan_out)`.", "source": "github-repos"}
{"code": "def _best_effort_input_batch_size(flat_input):\n    for input_ in flat_input:\n        shape = input_.shape\n        if shape.rank is None:\n            continue\n        if shape.rank < 2:\n            raise ValueError(f'Input tensor should have rank >= 2. Received input={input_} of rank {shape.rank}')\n        batch_size = shape.dims[1].value\n        if batch_size is not None:\n            return batch_size\n    return array_ops.shape(flat_input[0])[1]", "docstring": "Get static input batch size if available, with fallback to the dynamic one.\n\nArgs:\nflat_input: An iterable of time major input Tensors of shape `[max_time,\nbatch_size, ...]`. All inputs should have compatible batch sizes.\n\nReturns:\nThe batch size in Python integer if available, or a scalar Tensor otherwise.\n\nRaises:\nValueError: if there is any input with an invalid shape.", "source": "github-repos"}
{"code": "def _parse_redistribution(self, config):\n    redistributions = list()\n    regexp = 'redistribute .*'\n    matches = re.findall(regexp, config)\n    for line in matches:\n        ospf_redist = line.split()\n        if (len(ospf_redist) == 2):\n            protocol = ospf_redist[1]\n            redistributions.append(dict(protocol=protocol))\n        if (len(ospf_redist) == 4):\n            protocol = ospf_redist[1]\n            route_map_name = ospf_redist[3]\n            redistributions.append(dict(protocol=protocol, route_map=route_map_name))\n    return dict(redistributions=redistributions)", "docstring": "Parses config file for the OSPF router ID\n\nArgs:\nconfig (str):  Running configuration\nReturns:\nlist: dict:\nkeys: protocol (str)\nroute-map (optional) (str)", "source": "codesearchnet"}
{"code": "def _update_example(self, request):\n    \n    if request.method != 'POST':\n      return http_util.Respond(request, {'error': 'invalid non-POST request'},\n                               'application/json', code=405)\n    example_json = request.form['example']\n    index = int(request.form['index'])\n    if index >= len(self.examples):\n      return http_util.Respond(request, {'error': 'invalid index provided'},\n                               'application/json', code=400)\n    new_example = self.example_class()\n    json_format.Parse(example_json, new_example)\n    self.examples[index] = new_example\n    self.updated_example_indices.add(index)\n    self.generate_sprite([ex.SerializeToString() for ex in self.examples])\n    return http_util.Respond(request, {}, 'application/json')", "docstring": "Updates the specified example.\n\nArgs:\nrequest: A request that should contain 'index' and 'example'.\n\nReturns:\nAn empty response.", "source": "juraj-google-style"}
{"code": "def reset(self):\n    self._reset_ptr[0] = True\n    self._commands.clear()\n    for _ in range((self._pre_start_steps + 1)):\n        self.tick()\n    return self._default_state_fn()", "docstring": "Resets the environment, and returns the state.\nIf it is a single agent environment, it returns that state for that agent. Otherwise, it returns a dict from\nagent name to state.\n\nReturns:\ntuple or dict: For single agent environment, returns the same as `step`.\nFor multi-agent environment, returns the same as `tick`.", "source": "codesearchnet"}
{"code": "def isset(name):\n    \n    def wrapped(func):\n        @functools.wraps(func)\n        def _decorator(*args, **kwargs):\n            if core.isset(name):\n                return func(*args, **kwargs)\n        return _decorator\n    return wrapped", "docstring": "Only execute the function if the variable is set.\n\nArgs:\nname: The name of the environment variable\n\nReturns:\nThe function return value or `None` if the function was skipped.", "source": "juraj-google-style"}
{"code": "def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    encoder_hidden_states = encoder_outputs[0]\n    if encoder_attention_mask is None:\n        batch_size, sequence_length = encoder_hidden_states.shape[:2]\n        encoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    batch_size, sequence_length = decoder_input_ids.shape\n    if decoder_attention_mask is None:\n        decoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    if decoder_position_ids is None:\n        if past_key_values is not None:\n            raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.')\n        decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n    inputs = {'params': params or self.params}\n    if past_key_values:\n        inputs['cache'] = past_key_values\n        mutable = ['cache']\n    else:\n        mutable = False\n\n    def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):\n        decoder_module = module._get_decoder_module()\n        outputs = decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)\n        hidden_states = outputs[0]\n        if self.config.tie_word_embeddings:\n            shared_embedding = module.model.variables['params']['shared']['embedding']\n            lm_logits = module.lm_head.apply({'params': {'kernel': shared_embedding.T}}, hidden_states)\n        else:\n            lm_logits = module.lm_head(hidden_states)\n        lm_logits += module.final_logits_bias.astype(self.dtype)\n        return (lm_logits, outputs)\n    outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)\n    if past_key_values is None:\n        lm_logits, decoder_outputs = outputs\n    else:\n        (lm_logits, decoder_outputs), past = outputs\n    if return_dict:\n        outputs = FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions)\n    else:\n        outputs = (lm_logits,) + decoder_outputs[1:]\n    if past_key_values is not None and return_dict:\n        outputs['past_key_values'] = unfreeze(past['cache'])\n        return outputs\n    elif past_key_values is not None and (not return_dict):\n        outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> import jax.numpy as jnp\n>>> from transformers import AutoTokenizer, FlaxBartForConditionalGeneration\n\n>>> model = FlaxBartForConditionalGeneration.from_pretrained(\"facebook/bart-large-cnn\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/bart-large-cnn\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, max_length=1024, return_tensors=\"jax\")\n>>> encoder_outputs = model.encode(**inputs)\n\n>>> decoder_start_token_id = model.config.decoder_start_token_id\n>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype=\"i4\") * decoder_start_token_id\n\n>>> outputs = model.decode(decoder_input_ids, encoder_outputs)\n>>> logits = outputs.logits\n```", "source": "github-repos"}
{"code": "def allDecisions(self, result, **values):\n    data = self.__getDecision(result, multiple=True, **values)\n    data = [data[value] for value in result]\n    if (len(data) == 1):\n        return data[0]\n    else:\n        return data", "docstring": "Joust like self.decision but for multiple finded values.\n\nReturns:\nArrays of arrays of finded elements or if finds only one mach, array of strings.", "source": "codesearchnet"}
{"code": "def check(schema, data, trace=False):\n    \n    if trace == True:\n        trace = 1\n    else:\n        trace = None\n    return _check(schema, data, trace=trace)", "docstring": "Verify some json.\n\nArgs:\nschema - the description of a general-case 'valid' json object.\ndata - the json data to verify.\n\nReturns:\nbool: True if data matches the schema, False otherwise.\n\nRaises:\nTypeError:\nIf the schema is of an unknown data type.\nValueError:\nIf the schema contains a string with an invalid value.\nIf the schema attempts to reference a non-existent named schema.", "source": "juraj-google-style"}
{"code": "def rewrite_grad_indexed_slices(grads, body_grad_graph, loop_vars, forward_inputs):\n    inputs_with_grads = [t for g, t in zip(grads, forward_inputs) if g is not None]\n    structured_outputs = body_grad_graph.structured_outputs[3:]\n    for forward_input, output in zip(inputs_with_grads, structured_outputs):\n        if not isinstance(output, indexed_slices.IndexedSlices):\n            continue\n        if forward_input.dtype == dtypes.resource:\n            loop_vars = _rewrite_input_as_indexed_slices(body_grad_graph, output, forward_input, loop_vars)\n        else:\n            _rewrite_output_as_tensor(body_grad_graph, output)\n    return loop_vars", "docstring": "Handles special case of IndexedSlices returned from while gradient.\n\nSome gradient functions return IndexedSlices instead of a Tensor (e.g. the\ngradient of Gather ops). When this happens in the gradient of a while body,\nthe resulting gradient body function will have mismatched inputs and outputs,\nsince the input is a single Tensor, but the IndexedSlices gets unnested into\nthree output Tensors.\n\nThis function fixes this by rewriting the gradient body to have three inputs\nto match the three outputs, i.e., it effectively converts the input Tensor\ninto an input IndexedSlices. It also returns new `loop_vars` to reflect the\nnew inputs.\n\nArgs:\ngrads: the input gradient Tensors to the while gradient computation.\nbody_grad_graph: _WhileBodyGradFuncGraph.\nloop_vars: list of Tensors. The inputs to body_grad_graph.\nforward_inputs: list of Tensors. The (flat) inputs to the forward-pass While\nop.\n\nReturns:\nThe new loop_vars to pass to body_grad_graph.", "source": "github-repos"}
{"code": "def rollaxis(vari, axis, start=0):\n    if isinstance(vari, Poly):\n        core_old = vari.A.copy()\n        core_new = {}\n        for key in vari.keys:\n            core_new[key] = rollaxis(core_old[key], axis, start)\n        return Poly(core_new, vari.dim, None, vari.dtype)\n    return numpy.rollaxis(vari, axis, start)", "docstring": "Roll the specified axis backwards, until it lies in a given position.\n\nArgs:\nvari (chaospy.poly.base.Poly, numpy.ndarray):\nInput array or polynomial.\naxis (int):\nThe axis to roll backwards. The positions of the other axes do not\nchange relative to one another.\nstart (int):\nThe axis is rolled until it lies before thes position.", "source": "codesearchnet"}
{"code": "async def register(*address_list, cluster=None, loop=None):\n    \n\n    loop = loop or asyncio.get_event_loop()\n    for address in address_list:\n        host, port = address.rsplit(':', 1)\n        node = Node(address=(host, int(port)), loop=loop)\n        await node.start()\n\n        for address in cluster:\n            host, port = address.rsplit(':', 1)\n            port = int(port)\n\n            if (host, port) != (node.host, node.port):\n                node.update_cluster((host, port))", "docstring": "Start Raft node (server)\nArgs:\naddress_list — 127.0.0.1:8000 [, 127.0.0.1:8001 ...]\ncluster — [127.0.0.1:8001, 127.0.0.1:8002, ...]", "source": "juraj-google-style"}
{"code": "def parse_tddft(self):\n    start_tag = 'Convergence criterion met'\n    end_tag = 'Excited state energy'\n    singlet_tag = 'singlet excited'\n    triplet_tag = 'triplet excited'\n    state = 'singlet'\n    inside = False\n    lines = self.raw.split('\\n')\n    roots = {'singlet': [], 'triplet': []}\n    while lines:\n        line = lines.pop(0).strip()\n        if (start_tag in line):\n            inside = True\n        elif (end_tag in line):\n            inside = False\n        elif (singlet_tag in line):\n            state = 'singlet'\n        elif (triplet_tag in line):\n            state = 'triplet'\n        elif (inside and ('Root' in line) and ('eV' in line)):\n            toks = line.split()\n            roots[state].append({'energy': float(toks[(- 2)])})\n        elif (inside and ('Dipole Oscillator Strength' in line)):\n            osc = float(line.split()[(- 1)])\n            roots[state][(- 1)]['osc_strength'] = osc\n    return roots", "docstring": "Parses TDDFT roots. Adapted from nw_spectrum.py script.\n\nReturns:\n{\n\"singlet\": [\n{\n\"energy\": float,\n\"osc_strength: float\n}\n],\n\"triplet\": [\n{\n\"energy\": float\n}\n]\n}", "source": "codesearchnet"}
{"code": "def install(self, package: str, option: str = '-r') -> None:\n        \n        if not os.path.isfile(package):\n            raise FileNotFoundError(f'{package!r} does not exist.')\n        for i in option:\n            if i not in '-lrtsdg':\n                raise ValueError(f'There is no option named: {option!r}.')\n        self._execute('-s', self.device_sn, 'install', option, package)", "docstring": "Push package to the device and install it.\n\nArgs:\noption:\n-l: forward lock application\n-r: replace existing application\n-t: allow test packages\n-s: install application on sdcard\n-d: allow version code downgrade (debuggable packages only)\n-g: grant all runtime permissions", "source": "juraj-google-style"}
{"code": "def __init__(self, details):\n\t\t\n\n\t\t\n\t\tif not isinstance(details, dict):\n\t\t\traise ValueError('details')\n\n\t\t\n\t\tif '__array__' not in details:\n\t\t\traise KeyError('__array__')\n\n\t\t\n\t\tif not isinstance(details['__array__'], dict):\n\t\t\tdetails['__array__'] = {\n\t\t\t\t\"type\": details['__array__']\n\t\t\t}\n\n\t\t\n\t\tif not 'type' in details['__array__']:\n\t\t\tself._type = 'unique'\n\n\t\t\n\t\telif details['__array__']['type'] not in self._VALID_ARRAY:\n\t\t\tself._type\t= 'unique'\n\t\t\tsys.stderr.write('\"' + str(details['__array__']['type']) + '\" is not a valid type for __array__, assuming \"unique\"')\n\n\t\t\n\t\telse:\n\t\t\tself._type = details['__array__']['type']\n\n\t\t\n\t\tself._minimum = None\n\t\tself._maximum = None\n\n\t\t\n\t\tif 'minimum' in details['__array__'] \\\n\t\t\tor 'maximum' in details['__array__']:\n\t\t\tself.minmax(\n\t\t\t\t('minimum' in details['__array__'] and details['__array__']['minimum'] or None),\n\t\t\t\t('maximum' in details['__array__'] and details['__array__']['maximum'] or None)\n\t\t\t)\n\n\t\t\n\t\tif '__optional__' in details:\n\t\t\tbOptional = details['__optional__']\n\t\t\tdel details['__optional__']\n\t\telif 'optional' in details['__array__']:\n\t\t\tbOptional = details['__array__']['optional']\n\t\telse:\n\t\t\tbOptional = None\n\n\t\t\n\t\tdel details['__array__']\n\n\t\t\n\t\tself._node = _child(details)\n\n\t\t\n\t\tif bOptional:\n\t\t\tdetails['__optional__'] = sOptional\n\n\t\t\n\t\tsuper(ArrayNode, self).__init__(details, 'ArrayNode')", "docstring": "Constructor\n\nInitialises the instance\n\nArguments:\ndetails {dict} -- Details describing the type of values allowed for\nthe node\n\nRaises:\nKeyError\nValueError\n\nReturns:\nArrayNode", "source": "juraj-google-style"}
{"code": "def _dms_formatter(latitude, longitude, mode, unistr=False):\n    \n    if unistr:\n        chars = ('°', '′', '″')\n    else:\n        chars = ('°', \"'\", '\"')\n\n    latitude_dms = tuple(map(abs, utils.to_dms(latitude, mode)))\n    longitude_dms = tuple(map(abs, utils.to_dms(longitude, mode)))\n    text = []\n    if mode == 'dms':\n        text.append('%%02i%s%%02i%s%%02i%s' % chars % latitude_dms)\n    else:\n        text.append('%%02i%s%%05.2f%s' % chars[:2] % latitude_dms)\n    text.append('S' if latitude < 0 else 'N')\n    if mode == 'dms':\n        text.append(', %%03i%s%%02i%s%%02i%s' % chars % longitude_dms)\n    else:\n        text.append(', %%03i%s%%05.2f%s' % chars[:2] % longitude_dms)\n    text.append('W' if longitude < 0 else 'E')\n    return text", "docstring": "Generate a human readable DM/DMS location string.\n\nArgs:\nlatitude (float): Location's latitude\nlongitude (float): Location's longitude\nmode (str): Coordinate formatting system to use\nunistr (bool): Whether to use extended character set", "source": "juraj-google-style"}
{"code": "def exit(self, code=None, msg=None):\n        \n        if code is None:\n            code = self.tcex.exit_code\n            if code == 3:\n                self.tcex.log.info(u'Changing exit code from 3 to 0.')\n                code = 0  \n        elif code not in [0, 1]:\n            code = 1\n        self.tcex.exit(code, msg)", "docstring": "Playbook wrapper on TcEx exit method\n\nPlaybooks do not support partial failures so we change the exit method from 3 to 1 and call\nit a partial success instead.\n\nArgs:\ncode (Optional [integer]): The exit code value for the app.", "source": "juraj-google-style"}
{"code": "def random_get_int(rnd: Optional[tcod.random.Random], mi: int, ma: int) -> int:\n    \n    return int(\n        lib.TCOD_random_get_int(rnd.random_c if rnd else ffi.NULL, mi, ma)\n    )", "docstring": "Return a random integer in the range: ``mi`` <= n <= ``ma``.\n\nThe result is affected by calls to :any:`random_set_distribution`.\n\nArgs:\nrnd (Optional[Random]): A Random instance, or None to use the default.\nlow (int): The lower bound of the random range, inclusive.\nhigh (int): The upper bound of the random range, inclusive.\n\nReturns:\nint: A random integer in the range ``mi`` <= n <= ``ma``.", "source": "juraj-google-style"}
{"code": "def configuration_from_paths(paths, strict=True):\n    for path in paths:\n        cfg = configfile_from_path(path, strict=strict).config\n    return cfg", "docstring": "Get a Configuration object based on multiple file paths.\n\nArgs:\npaths (iter of str): An iterable of file paths which identify config\nfiles on the system.\nstrict (bool): Whether or not to parse the files in strict mode.\n\nReturns:\nconfpy.core.config.Configuration: The loaded configuration object.\n\nRaises:\nNamespaceNotRegistered: If a file contains a namespace which is not\ndefined.\nOptionNotRegistered: If a file contains an option which is not defined\nbut resides under a valid namespace.\nUnrecognizedFileExtension: If there is no loader for a path.", "source": "codesearchnet"}
{"code": "def cache_json(filename):\n\n    def cache_decorator(cacheable_function):\n\n        @wraps(cacheable_function)\n        def cache_wrapper(*args, **kwargs):\n            path = (CACHE_DIRECTORY + filename)\n            check_create_folder(path)\n            if os.path.exists(path):\n                with open(path) as infile:\n                    return json.load(infile)\n            else:\n                function_output = cacheable_function(*args, **kwargs)\n                with open(path, 'w') as outfile:\n                    json.dump(function_output, outfile)\n                return function_output\n        return cache_wrapper\n    return cache_decorator", "docstring": "Caches the JSON-serializable output of the function to a given file\n\nArgs:\nfilename (str) The filename (sans directory) to store the output\n\nReturns: decorator, applicable to a function that produces JSON-serializable output", "source": "codesearchnet"}
{"code": "def get_tf_dtype(self, allowed_set=None):\n    if allowed_set:\n        index = self.get_int(0, len(allowed_set) - 1)\n        if allowed_set[index] not in _TF_DTYPES:\n            raise tf.errors.InvalidArgumentError(None, None, 'Given dtype {} is not accepted.'.format(allowed_set[index]))\n        return allowed_set[index]\n    else:\n        index = self.get_int(0, len(_TF_DTYPES) - 1)\n        return _TF_DTYPES[index]", "docstring": "Return a random tensorflow dtype.\n\nArgs:\nallowed_set: An allowlisted set of dtypes to choose from instead of all of\nthem.\n\nReturns:\nA random type from the list containing all TensorFlow types.", "source": "github-repos"}
{"code": "def deploy_ray_func(func, partition, kwargs):\n    \n    try:\n        result = func(partition, **kwargs)\n    \n    \n    \n    except Exception:\n        result = func(partition.to_pandas(), **kwargs)\n        if isinstance(result, pandas.Series):\n            result = pandas.DataFrame(result).T\n        if isinstance(result, pandas.DataFrame):\n            return pyarrow.Table.from_pandas(result)\n    return result", "docstring": "Deploy a function to a partition in Ray.\n\nArgs:\nfunc: The function to apply.\npartition: The partition to apply the function to.\nkwargs: A dictionary of keyword arguments for the function.\n\nReturns:\nThe result of the function.", "source": "juraj-google-style"}
{"code": "def _flush(self, buffer, start, end):\n    buffer_size = len(buffer)\n    if (not buffer_size):\n        return\n    with self._size_lock:\n        if (end > self._size):\n            with _handle_azure_exception():\n                self._resize(content_length=end, **self._client_kwargs)\n            self._reset_head()\n    if (buffer_size > self.MAX_FLUSH_SIZE):\n        futures = []\n        for part_start in range(0, buffer_size, self.MAX_FLUSH_SIZE):\n            buffer_part = buffer[part_start:(part_start + self.MAX_FLUSH_SIZE)]\n            if (not len(buffer_part)):\n                break\n            start_range = (start + part_start)\n            futures.append(self._workers.submit(self._update_range, data=buffer_part.tobytes(), start_range=start_range, end_range=((start_range + len(buffer_part)) - 1), **self._client_kwargs))\n        with _handle_azure_exception():\n            for future in _as_completed(futures):\n                future.result()\n    else:\n        with _handle_azure_exception():\n            self._update_range(data=buffer.tobytes(), start_range=start, end_range=(end - 1), **self._client_kwargs)", "docstring": "Flush the write buffer of the stream if applicable.\n\nArgs:\nbuffer (memoryview): Buffer content.\nstart (int): Start of buffer position to flush.\nSupported only with page blobs.\nend (int): End of buffer position to flush.\nSupported only with page blobs.", "source": "codesearchnet"}
{"code": "def ConsumeRange(self, start, end):\n    old = self.CurrentRange()\n    if (old is None):\n        return\n    if (old.start > start):\n        if (old.start < end):\n            raise RuntimeError('Block end too high.')\n        return\n    if (old.start < start):\n        raise RuntimeError('Block start too high.')\n    if (old.end == end):\n        del self.ranges[0]\n    elif (old.end > end):\n        self.ranges[0] = Range(end, old.end)\n    else:\n        raise RuntimeError('Block length exceeds range.')", "docstring": "Consumes an entire range, or part thereof.\n\nIf the finger has no ranges left, or the curent range start is higher\nthan the end of the consumed block, nothing happens. Otherwise,\nthe current range is adjusted for the consumed block, or removed,\nif the entire block is consumed. For things to work, the consumed\nrange and the current finger starts must be equal, and the length\nof the consumed range may not exceed the length of the current range.\n\nArgs:\nstart: Beginning of range to be consumed.\nend: First offset after the consumed range (end + 1).\n\nRaises:\nRuntimeError: if the start position of the consumed range is\nhigher than the start of the current range in the finger, or if\nthe consumed range cuts accross block boundaries.", "source": "codesearchnet"}
{"code": "def get_ssh_client(ip_addr, ssh_key=None, host_name=None, ssh_tries=None, propagate_fail=True, username='root', password='123456'):\n    host_name = (host_name or ip_addr)\n    with LogTask(('Get ssh client for %s' % host_name), level='debug', propagate_fail=propagate_fail):\n        ssh_timeout = int(config.get('ssh_timeout'))\n        if (ssh_tries is None):\n            ssh_tries = int(config.get('ssh_tries', 10))\n        start_time = time.time()\n        client = paramiko.SSHClient()\n        client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n        while (ssh_tries > 0):\n            try:\n                client.connect(ip_addr, username=username, password=password, key_filename=ssh_key, timeout=ssh_timeout)\n                break\n            except (socket.error, socket.timeout) as err:\n                LOGGER.debug('Socket error connecting to %s: %s', host_name, err)\n            except paramiko.ssh_exception.SSHException as err:\n                LOGGER.debug('SSH error connecting to %s: %s', host_name, err)\n            except EOFError as err:\n                LOGGER.debug('EOFError connecting to %s: %s', host_name, err)\n            ssh_tries -= 1\n            LOGGER.debug('Still got %d tries for %s', ssh_tries, host_name)\n            time.sleep(1)\n        else:\n            end_time = time.time()\n            raise LagoSSHTimeoutException(('Timed out (in %d s) trying to ssh to %s' % ((end_time - start_time), host_name)))\n    return client", "docstring": "Get a connected SSH client\n\nArgs:\nip_addr(str): IP address of the endpoint\nssh_key(str or list of str): Path to a file which\ncontains the private key\nhotname(str): The hostname of the endpoint\nssh_tries(int): The number of attempts to connect to the endpoint\npropagate_fail(bool): If set to true, this event will be in the log\nand fail the outer stage. Otherwise, it will be discarded.\nusername(str): The username to authenticate with\npassword(str): Used for password authentication\nor for private key decryption\n\nRaises:\n:exc:`~LagoSSHTimeoutException`: If the client failed to connect after\n\"ssh_tries\"", "source": "codesearchnet"}
{"code": "def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n    known_args, pipeline_args = parse_known_args(argv)\n    pipeline_options = PipelineOptions(pipeline_args)\n    pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n    model_handler = GeminiModelHandler(model_name='gemini-2.0-flash-001', request_fn=generate_from_string, api_key=known_args.api_key, project=known_args.project, location=known_args.location)\n    pipeline = test_pipeline\n    if not test_pipeline:\n        pipeline = beam.Pipeline(options=pipeline_options)\n    prompts = ['What is 5+2?', 'Who is the protagonist of Lord of the Rings?', 'What is the air-speed velocity of a laden swallow?']\n    read_prompts = pipeline | 'Get prompt' >> beam.Create(prompts)\n    predictions = read_prompts | 'RunInference' >> RunInference(model_handler)\n    processed = predictions | 'PostProcess' >> beam.ParDo(PostProcessor())\n    _ = processed | 'PrintOutput' >> beam.Map(print)\n    _ = processed | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)\n    result = pipeline.run()\n    result.wait_until_finish()\n    return result", "docstring": "Args:\nargv: Command line arguments defined for this example.\nsave_main_session: Used for internal testing.\ntest_pipeline: Used for internal testing.", "source": "github-repos"}
{"code": "def ExamineEvent(self, mediator, event):\n    self._EnsureRequesterStarted()\n    path_spec = event.pathspec\n    event_identifiers = self._event_identifiers_by_pathspec[path_spec]\n    event_identifier = event.GetIdentifier()\n    event_identifiers.append(event_identifier)\n    if ((event.data_type not in self.DATA_TYPES) or (not self._analyzer.lookup_hash)):\n        return\n    lookup_hash = '{0:s}_hash'.format(self._analyzer.lookup_hash)\n    lookup_hash = getattr(event, lookup_hash, None)\n    if (not lookup_hash):\n        display_name = mediator.GetDisplayNameForPathSpec(path_spec)\n        logger.warning('Lookup hash attribute: {0:s}_hash missing from event that originated from: {1:s}.'.format(self._analyzer.lookup_hash, display_name))\n        return\n    path_specs = self._hash_pathspecs[lookup_hash]\n    path_specs.append(path_spec)\n    if (len(path_specs) == 1):\n        self.hash_queue.put(lookup_hash)", "docstring": "Evaluates whether an event contains the right data for a hash lookup.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between\nanalysis plugins and other components, such as storage and dfvfs.\nevent (EventObject): event.", "source": "codesearchnet"}
{"code": "def all_tokens(self, delimiter=' ', label_list_ids=None):\n    tokens = set()\n    for utterance in self.utterances.values():\n        tokens = tokens.union(utterance.all_tokens(delimiter=delimiter, label_list_ids=label_list_ids))\n    return tokens", "docstring": "Return a list of all tokens occurring in one of the labels in the corpus.\n\nArgs:\ndelimiter (str): The delimiter used to split labels into tokens\n(see :meth:`audiomate.annotations.Label.tokenized`).\nlabel_list_ids (list): If not None, only labels from label-lists with an idx contained in this list\nare considered.\n\nReturns:\n:class:`set`: A set of distinct tokens.", "source": "codesearchnet"}
{"code": "def node(self, name, label=None, _attributes=None, **attrs):\n        \n        name = self._quote(name)\n        attr_list = self._attr_list(label, attrs, _attributes)\n        line = self._node % (name, attr_list)\n        self.body.append(line)", "docstring": "Create a node.\n\nArgs:\nname: Unique identifier for the node inside the source.\nlabel: Caption to be displayed (defaults to the node ``name``).\nattrs: Any additional node attributes (must be strings).", "source": "juraj-google-style"}
{"code": "def begin_run_group(project):\n    \n    from benchbuild.utils.db import create_run_group\n    from datetime import datetime\n\n    group, session = create_run_group(project)\n    group.begin = datetime.now()\n    group.status = 'running'\n\n    session.commit()\n    return group, session", "docstring": "Begin a run_group in the database.\n\nA run_group groups a set of runs for a given project. This models a series\nof runs that form a complete binary runtime test.\n\nArgs:\nproject: The project we begin a new run_group for.\n\nReturns:\n``(group, session)`` where group is the created group in the\ndatabase and session is the database session this group lives in.", "source": "juraj-google-style"}
{"code": "def seek_to_beginning(self, *partitions):\n        \n        if not all([isinstance(p, TopicPartition) for p in partitions]):\n            raise TypeError('partitions must be TopicPartition namedtuples')\n        if not partitions:\n            partitions = self._subscription.assigned_partitions()\n            assert partitions, 'No partitions are currently assigned'\n        else:\n            for p in partitions:\n                assert p in self._subscription.assigned_partitions(), 'Unassigned partition'\n\n        for tp in partitions:\n            log.debug(\"Seeking to beginning of partition %s\", tp)\n            self._subscription.need_offset_reset(tp, OffsetResetStrategy.EARLIEST)", "docstring": "Seek to the oldest available offset for partitions.\n\nArguments:\n*partitions: Optionally provide specific TopicPartitions, otherwise\ndefault to all assigned partitions.\n\nRaises:\nAssertionError: If any partition is not currently assigned, or if\nno partitions are assigned.", "source": "juraj-google-style"}
{"code": "def filter_framework_files(files: List[Union[str, os.PathLike]], frameworks: Optional[List[str]]=None) -> List[Union[str, os.PathLike]]:\n    if frameworks is None:\n        frameworks = get_default_frameworks()\n    framework_to_file = {}\n    others = []\n    for f in files:\n        parts = Path(f).name.split('_')\n        if 'modeling' not in parts:\n            others.append(f)\n            continue\n        if 'tf' in parts:\n            framework_to_file['tf'] = f\n        elif 'flax' in parts:\n            framework_to_file['flax'] = f\n        else:\n            framework_to_file['pt'] = f\n    return [framework_to_file[f] for f in frameworks if f in framework_to_file] + others", "docstring": "Filter a list of files to only keep the ones corresponding to a list of frameworks.\n\nArgs:\nfiles (`List[Union[str, os.PathLike]]`): The list of files to filter.\nframeworks (`List[str]`, *optional*): The list of allowed frameworks.\n\nReturns:\n`List[Union[str, os.PathLike]]`: The list of filtered files.", "source": "github-repos"}
{"code": "def _CropAndResizeGrad(op: ops.Operation, grad):\n    image = op.inputs[0]\n    if image.get_shape().is_fully_defined():\n        image_shape = image.get_shape().as_list()\n    else:\n        image_shape = array_ops.shape(image)\n    allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64]\n    if op.inputs[0].dtype in allowed_types:\n        grad0 = gen_image_ops.crop_and_resize_grad_image(grad, op.inputs[1], op.inputs[2], image_shape, T=op.get_attr('T'), method=op.get_attr('method'))\n    else:\n        grad0 = None\n    grad1 = gen_image_ops.crop_and_resize_grad_boxes(grad, op.inputs[0], op.inputs[1], op.inputs[2])\n    return [grad0, grad1, None, None]", "docstring": "The derivatives for crop_and_resize.\n\nWe back-propagate to the image only when the input image tensor has floating\npoint dtype but we always back-propagate to the input boxes tensor.\n\nArgs:\nop: The CropAndResize op.\ngrad: The tensor representing the gradient w.r.t. the output.\n\nReturns:\nThe gradients w.r.t. the input image, boxes, as well as the always-None\ngradients w.r.t. box_ind and crop_size.", "source": "github-repos"}
{"code": "def is_polar(self, tol_dipole_per_unit_area=0.001):\n    dip_per_unit_area = (self.dipole / self.surface_area)\n    return (np.linalg.norm(dip_per_unit_area) > tol_dipole_per_unit_area)", "docstring": "Checks whether the surface is polar by computing the dipole per unit\narea. Note that the Slab must be oxidation state-decorated for this\nto work properly. Otherwise, the Slab will always be non-polar.\n\nArgs:\ntol_dipole_per_unit_area (float): A tolerance. If the dipole\nmagnitude per unit area is less than this value, the Slab is\nconsidered non-polar. Defaults to 1e-3, which is usually\npretty good. Normalized dipole per unit area is used as it is\nmore reliable than using the total, which tends to be larger for\nslabs with larger surface areas.", "source": "codesearchnet"}
{"code": "def correct_absolute_refs(self, construction_table):\n        \n        c_table = construction_table.copy()\n        abs_refs = constants.absolute_refs\n        problem_index = self.check_absolute_refs(c_table)\n        for i in problem_index:\n            order_of_refs = iter(permutations(abs_refs.keys()))\n            finished = False\n            while not finished:\n                if self._has_valid_abs_ref(i, c_table):\n                    finished = True\n                else:\n                    row = c_table.index.get_loc(i)\n                    c_table.iloc[row, row:] = next(order_of_refs)[row:3]\n        return c_table", "docstring": "Reindexe construction_table if linear reference in first three rows\npresent.\n\nUses :meth:`~Cartesian.check_absolute_refs` to obtain the problematic\nindices.\n\nArgs:\nconstruction_table (pd.DataFrame):\n\nReturns:\npd.DataFrame: Appropiately renamed construction table.", "source": "juraj-google-style"}
{"code": "def _get_gcc_major_version(path_to_gcc: str) -> int:\n    logging.info('Running echo __GNUC__ | %s -E -P -', path_to_gcc)\n    gcc_version_proc = subprocess.run([path_to_gcc, '-E', '-P', '-'], input='__GNUC__', check=True, capture_output=True, text=True)\n    major_version = int(gcc_version_proc.stdout)\n    logging.info('%s reports major version %s.', path_to_gcc, major_version)\n    return major_version", "docstring": "Gets the major version of the gcc at `path_to_gcc`.\n\nArgs:\npath_to_gcc: Path to a gcc executable\n\nReturns:\nThe major version.", "source": "github-repos"}
{"code": "def search(pattern):\n        \n        def match(napp):\n            \n            \n            \n            username = napp.get('username', napp.get('author'))\n\n            strings = ['{}/{}'.format(username, napp.get('name')),\n                       napp.get('description')] + napp.get('tags')\n            return any(pattern.match(string) for string in strings)\n\n        napps = NAppsClient().get_napps()\n        return [napp for napp in napps if match(napp)]", "docstring": "Search all server NApps matching pattern.\n\nArgs:\npattern (str): Python regular expression.", "source": "juraj-google-style"}
{"code": "def get_push_pop():\n    push = copy.deepcopy(PUSH)\n    pop = copy.deepcopy(POP)\n    anno.setanno(push, 'pop', pop)\n    anno.setanno(push, 'gen_push', True)\n    anno.setanno(pop, 'push', push)\n    op_id = _generate_op_id()\n    return (push, pop, op_id)", "docstring": "Create pop and push nodes that are linked.\n\nReturns:\nA push and pop node which have `push_func` and `pop_func` annotations\nrespectively, identifying them as such. They also have a `pop` and\n`push` annotation respectively, which links the push node to the pop\nnode and vice versa.", "source": "codesearchnet"}
{"code": "def process_file(self, in_filename, out_filename, no_change_to_outfile_on_error=False):\n    with open(in_filename, 'r') as in_file, tempfile.NamedTemporaryFile('w', delete=False) as temp_file:\n        ret = self.process_opened_file(in_filename, in_file, out_filename, temp_file)\n    if no_change_to_outfile_on_error and ret[0] == 0:\n        os.remove(temp_file.name)\n    else:\n        shutil.move(temp_file.name, out_filename)\n    return ret", "docstring": "Process the given python file for incompatible changes.\n\nArgs:\nin_filename: filename to parse\nout_filename: output file to write to\nno_change_to_outfile_on_error: not modify the output file on errors\nReturns:\nA tuple representing number of files processed, log of actions, errors", "source": "github-repos"}
{"code": "def add_messages(self, validation):\n        \n        if not isinstance(validation, Validation):\n            raise TypeError(\"Argument must be of type Validation\")\n\n        self.messages.extend(validation.messages)", "docstring": "Adds all the messages in the specified `Validation` object to this instance's\nmessages array.\n\nArgs:\nvalidation (Validation): An object containing the messages to add to this instance's messages.", "source": "juraj-google-style"}
{"code": "def cache(self, domain, data_type, ttl_minutes=None, mapping=None):\n    from .tcex_cache import TcExCache\n    return TcExCache(self, domain, data_type, ttl_minutes, mapping)", "docstring": "Get instance of the Cache module.\n\nArgs:\ndomain (str): The domain can be either \"system\", \"organization\", or \"local\". When using\n\"organization\" the data store can be accessed by any Application in the entire org,\nwhile \"local\" access is restricted to the App writing the data. The \"system\" option\nshould not be used in almost all cases.\ndata_type (str): The data type descriptor (e.g., tc:whois:cache).\nttl_minutes (int): The number of minutes the cache is valid.\n\nReturns:\nobject: An instance of the Cache Class.", "source": "codesearchnet"}
{"code": "def __init__(self, object_local_name: str, from_shard_layouts: Sequence[sparse_core_layout_pb2.SparseCoreTableLayout], to_shard_layouts: Sequence[sparse_core_layout_pb2.SparseCoreTableLayout]):\n    logging.info('Creating EmbeddingReshardCallback for %s', object_local_name)\n    self._object_local_name = object_local_name\n    self._from_shard_layouts = from_shard_layouts\n    self._to_shard_layouts = to_shard_layouts", "docstring": "Initializes  Reshard callback.\n\nArgs:\nobject_local_name:  The local name of the object being restored.\nfrom_shard_layouts: layouts as in checkpoint being restored from.\nto_shard_layouts: target layouts as specified in the embedding being\nrestored.", "source": "github-repos"}
{"code": "def igmpize(self):\n    gaddr = (self.gaddr if (hasattr(self, 'gaddr') and self.gaddr) else '0.0.0.0')\n    underlayer = self.underlayer\n    if (self.type not in [17, 48]):\n        self.mrcode = 0\n    if isinstance(underlayer, IP):\n        if (self.type == 17):\n            if (gaddr == '0.0.0.0'):\n                underlayer.dst = '224.0.0.1'\n            elif isValidMCAddr(gaddr):\n                underlayer.dst = gaddr\n            else:\n                warning('Invalid IGMP Group Address detected !')\n                return False\n        elif ((self.type == 23) and isValidMCAddr(gaddr)):\n            underlayer.dst = '224.0.0.2'\n        elif (((self.type == 18) or (self.type == 22)) and isValidMCAddr(gaddr)):\n            underlayer.dst = gaddr\n        else:\n            warning('Invalid IGMP Type detected !')\n            return False\n        if (not any((isinstance(x, IPOption_Router_Alert) for x in underlayer.options))):\n            underlayer.options.append(IPOption_Router_Alert())\n        underlayer.ttl = 1\n        _root = self.firstlayer()\n        if _root.haslayer(Ether):\n            _root[Ether].dst = getmacbyip(underlayer.dst)\n    from scapy.contrib.igmpv3 import IGMPv3\n    if isinstance(self, IGMPv3):\n        self.encode_maxrespcode()\n    return True", "docstring": "Called to explicitly fixup the packet according to the IGMP RFC\n\nThe rules are:\nGeneral:\n1.  the Max Response time is meaningful only in Membership Queries and should be zero\nIP:\n1. Send General Group Query to 224.0.0.1 (all systems)\n2. Send Leave Group to 224.0.0.2 (all routers)\n3a.Otherwise send the packet to the group address\n3b.Send reports/joins to the group address\n4. ttl = 1 (RFC 2236, section 2)\n5. send the packet with the router alert IP option (RFC 2236, section 2)\nEther:\n1. Recalculate destination\n\nReturns:\nTrue    The tuple ether/ip/self passed all check and represents\na proper IGMP packet.\nFalse   One of more validation checks failed and no fields\nwere adjusted.\n\nThe function will examine the IGMP message to assure proper format.\nCorrections will be attempted if possible. The IP header is then properly\nadjusted to ensure correct formatting and assignment. The Ethernet header\nis then adjusted to the proper IGMP packet format.", "source": "codesearchnet"}
{"code": "def slideshow(self, **kwargs):\n        \n        for i, cycle in enumerate(self.cycles):\n            cycle.plot(title=\"Relaxation step %s\" % (i + 1),\n                       tight_layout=kwargs.pop(\"tight_layout\", True),\n                       show=kwargs.pop(\"show\", True))", "docstring": "Uses matplotlib to plot the evolution of the structural relaxation.\n\nArgs:\nax_list: List of axes. If None a new figure is produced.\n\nReturns:\n`matplotlib` figure", "source": "juraj-google-style"}
{"code": "def apply(self, func, num_splits=None, other_axis_partition=None, **kwargs):\n        \n        if num_splits is None:\n            num_splits = len(self.list_of_blocks)\n\n        if other_axis_partition is not None:\n            return [\n                PyarrowOnRayFramePartition(obj)\n                for obj in deploy_ray_func_between_two_axis_partitions._remote(\n                    args=(self.axis, func, num_splits, len(self.list_of_blocks), kwargs)\n                    + tuple(self.list_of_blocks + other_axis_partition.list_of_blocks),\n                    num_return_vals=num_splits,\n                )\n            ]\n\n        args = [self.axis, func, num_splits, kwargs]\n        args.extend(self.list_of_blocks)\n        return [\n            PyarrowOnRayFramePartition(obj)\n            for obj in deploy_ray_axis_func._remote(args, num_return_vals=num_splits)\n        ]", "docstring": "Applies func to the object in the plasma store.\n\nSee notes in Parent class about this method.\n\nArgs:\nfunc: The function to apply.\nnum_splits: The number of times to split the result object.\nother_axis_partition: Another `PyarrowOnRayFrameAxisPartition` object to apply to\nfunc with this one.\n\nReturns:\nA list of `RayRemotePartition` objects.", "source": "juraj-google-style"}
{"code": "def __init__(self, wildcard, sep=\"|\"):\n        \n        self.pats = [\"*\"]\n        if wildcard:\n            self.pats = wildcard.split(sep)", "docstring": "Initializes a WildCard.\n\nArgs:\nwildcard (str): String of tokens separated by sep. Each token\nrepresents a pattern.\nsep (str): Separator for shell patterns.", "source": "juraj-google-style"}
{"code": "def alpha_blend(self, other):\n    fa = ((self.__a + other.__a) - (self.__a * other.__a))\n    if (fa == 0):\n        sa = 0\n    else:\n        sa = min(1.0, (self.__a / other.__a))\n    da = (1.0 - sa)\n    (sr, sg, sb) = [(v * sa) for v in self.__rgb]\n    (dr, dg, db) = [(v * da) for v in other.__rgb]\n    return Color(((sr + dr), (sg + dg), (sb + db)), 'rgb', fa, self.__wref)", "docstring": "Alpha-blend this color on the other one.\n\nArgs:\n:other:\nThe grapefruit.Color to alpha-blend with this one.\n\nReturns:\nA grapefruit.Color instance which is the result of alpha-blending\nthis color on the other one.\n\n>>> c1 = Color.from_rgb(1, 0.5, 0, 0.2)\n>>> c2 = Color.from_rgb(1, 1, 1, 0.8)\n>>> c3 = c1.alpha_blend(c2)\n>>> c3\nColor(1.0, 0.875, 0.75, 0.84)", "source": "codesearchnet"}
{"code": "def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple[int, int]]]=None):\n    class_queries_logits = outputs.logits\n    masks_queries_logits = outputs.pred_masks\n    masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]\n    masks_probs = masks_queries_logits.sigmoid()\n    segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs)\n    batch_size = class_queries_logits.shape[0]\n    if target_sizes is not None:\n        if batch_size != len(target_sizes):\n            raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n        semantic_segmentation = []\n        for idx in range(batch_size):\n            resized_logits = nn.functional.interpolate(segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)\n            semantic_map = resized_logits[0].argmax(dim=0)\n            semantic_segmentation.append(semantic_map)\n    else:\n        semantic_segmentation = segmentation.argmax(dim=1)\n        semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]\n    return semantic_segmentation", "docstring": "Converts the output of [`DetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch.\n\nArgs:\noutputs ([`DetrForSegmentation`]):\nRaw outputs of the model.\ntarget_sizes (`List[Tuple[int, int]]`, *optional*):\nA list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the\nbatch. If unset, predictions will not be resized.\nReturns:\n`List[torch.Tensor]`:\nA list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)\ncorresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each\n`torch.Tensor` correspond to a semantic class id.", "source": "github-repos"}
{"code": "def daylight_saving_end_day(self, value=None):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type str '\n                    'for field `daylight_saving_end_day`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `daylight_saving_end_day`')\n\n        self._daylight_saving_end_day = value", "docstring": "Corresponds to IDD Field `daylight_saving_end_day`\n\nArgs:\nvalue (str): value for IDD Field `daylight_saving_end_day`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.DetectIntent = channel.unary_unary(\n        '/google.cloud.dialogflow.v2.Sessions/DetectIntent',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_session__pb2.DetectIntentRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_session__pb2.DetectIntentResponse.FromString,\n        )\n    self.StreamingDetectIntent = channel.stream_stream(\n        '/google.cloud.dialogflow.v2.Sessions/StreamingDetectIntent',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_session__pb2.StreamingDetectIntentRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_session__pb2.StreamingDetectIntentResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def _create_session(self, username, password):\n    \n    session = requests.Session()\n    session.verify = False  \n    try:\n      response = session.get(self.host_url)\n    except requests.exceptions.ConnectionError:\n      return False\n    \n    soup = BeautifulSoup(response.text, 'html.parser')\n    csrf_token = soup.find('input', dict(name='csrf_token'))['value']\n    login_data = dict(username=username, password=password)\n    session.headers.update({\n        'x-csrftoken': csrf_token,\n        'referer': self.host_url\n    })\n    _ = session.post('{0:s}/login/'.format(self.host_url), data=login_data)\n    return session", "docstring": "Create HTTP session.\n\nArgs:\nusername (str): Timesketch username\npassword (str): Timesketch password\n\nReturns:\nrequests.Session: Session object.", "source": "juraj-google-style"}
{"code": "def _ConvertMapFieldValue(self, value, message, field):\n    if (not isinstance(value, dict)):\n        raise ParseError('Map field {0} must be in a dict which is {1}.'.format(field.name, value))\n    key_field = field.message_type.fields_by_name['key']\n    value_field = field.message_type.fields_by_name['value']\n    for key in value:\n        key_value = _ConvertScalarFieldValue(key, key_field, True)\n        if (value_field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE):\n            self.ConvertMessage(value[key], getattr(message, field.name)[key_value])\n        else:\n            getattr(message, field.name)[key_value] = _ConvertScalarFieldValue(value[key], value_field)", "docstring": "Convert map field value for a message map field.\n\nArgs:\nvalue: A JSON object to convert the map field value.\nmessage: A protocol message to record the converted data.\nfield: The descriptor of the map field to be converted.\n\nRaises:\nParseError: In case of convert problems.", "source": "codesearchnet"}
{"code": "def BatchConvert(self, metadata_value_pairs, token=None):\n    msg_dict = {}\n    for (metadata, msg) in metadata_value_pairs:\n        msg_dict.setdefault(msg.source, []).append((metadata, msg))\n    metadata_objects = []\n    metadata_to_fetch = []\n    for client_urn in msg_dict:\n        try:\n            metadata_objects.append(self.cached_metadata[client_urn])\n        except KeyError:\n            metadata_to_fetch.append(client_urn)\n    if metadata_to_fetch:\n        if data_store.RelationalDBEnabled():\n            client_ids = set((urn.Basename() for urn in metadata_to_fetch))\n            infos = data_store.REL_DB.MultiReadClientFullInfo(client_ids)\n            fetched_metadata = [GetMetadata(client_id, info) for (client_id, info) in infos.items()]\n        else:\n            client_fds = aff4.FACTORY.MultiOpen(metadata_to_fetch, mode='r', token=token)\n            fetched_metadata = [GetMetadataLegacy(client_fd, token=token) for client_fd in client_fds]\n        for metadata in fetched_metadata:\n            self.cached_metadata[metadata.client_urn] = metadata\n        metadata_objects.extend(fetched_metadata)\n    data_by_type = {}\n    for metadata in metadata_objects:\n        try:\n            for (original_metadata, message) in msg_dict[metadata.client_urn]:\n                new_metadata = ExportedMetadata(metadata)\n                new_metadata.source_urn = original_metadata.source_urn\n                new_metadata.annotations = original_metadata.annotations\n                new_metadata.original_timestamp = message.payload.age\n                cls_name = message.payload.__class__.__name__\n                if (cls_name not in data_by_type):\n                    converters_classes = ExportConverter.GetConvertersByValue(message.payload)\n                    data_by_type[cls_name] = {'converters': [cls(self.options) for cls in converters_classes], 'batch_data': [(new_metadata, message.payload)]}\n                else:\n                    data_by_type[cls_name]['batch_data'].append((new_metadata, message.payload))\n        except KeyError:\n            pass\n    converted_batch = []\n    for dataset in itervalues(data_by_type):\n        for converter in dataset['converters']:\n            converted_batch.extend(converter.BatchConvert(dataset['batch_data'], token=token))\n    return converted_batch", "docstring": "Converts a batch of GrrMessages into a set of RDFValues at once.\n\nArgs:\nmetadata_value_pairs: a list or a generator of tuples (metadata, value),\nwhere metadata is ExportedMetadata to be used for conversion and value\nis a GrrMessage to be converted.\ntoken: Security token.\n\nReturns:\nResulting RDFValues. Empty list is a valid result and means that\nconversion wasn't possible.", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n        \n        self.DeletePosixAccount = channel.unary_unary(\n            \"/google.cloud.oslogin.v1.OsLoginService/DeletePosixAccount\",\n            request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.DeletePosixAccountRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n        self.DeleteSshPublicKey = channel.unary_unary(\n            \"/google.cloud.oslogin.v1.OsLoginService/DeleteSshPublicKey\",\n            request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.DeleteSshPublicKeyRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n        self.GetLoginProfile = channel.unary_unary(\n            \"/google.cloud.oslogin.v1.OsLoginService/GetLoginProfile\",\n            request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.GetLoginProfileRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.LoginProfile.FromString,\n        )\n        self.GetSshPublicKey = channel.unary_unary(\n            \"/google.cloud.oslogin.v1.OsLoginService/GetSshPublicKey\",\n            request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.GetSshPublicKeyRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_oslogin_dot_common_dot_common__pb2.SshPublicKey.FromString,\n        )\n        self.ImportSshPublicKey = channel.unary_unary(\n            \"/google.cloud.oslogin.v1.OsLoginService/ImportSshPublicKey\",\n            request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.ImportSshPublicKeyRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.ImportSshPublicKeyResponse.FromString,\n        )\n        self.UpdateSshPublicKey = channel.unary_unary(\n            \"/google.cloud.oslogin.v1.OsLoginService/UpdateSshPublicKey\",\n            request_serializer=google_dot_cloud_dot_oslogin__v1_dot_proto_dot_oslogin__pb2.UpdateSshPublicKeyRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_oslogin_dot_common_dot_common__pb2.SshPublicKey.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def get_config_path(appdirs=DEFAULT_APPDIRS, file_name=DEFAULT_CONFIG_FILENAME):\n    return os.path.join(appdirs.user_config_dir, file_name)", "docstring": "Return the path where the config file is stored.\n\nArgs:\napp_name (text_type, optional): Name of the application, defaults to\n``'projecthamster``. Allows you to use your own application specific\nnamespace if you wish.\nfile_name (text_type, optional): Name of the config file. Defaults to\n``config.conf``.\n\nReturns:\nstr: Fully qualified path (dir & filename) where we expect the config file.", "source": "codesearchnet"}
{"code": "def _training(self):\n    with tf.device(('/gpu:0' if self._use_gpu else '/cpu:0')):\n        with tf.name_scope('training'):\n            assert_full = tf.assert_equal(self._num_finished_episodes, self._config.update_every)\n            with tf.control_dependencies([assert_full]):\n                data = self._finished_episodes.data()\n            ((observ, action, old_policy_params, reward), length) = data\n            old_policy_params = tools.nested.map((lambda param: self._mask(param, length, 1)), old_policy_params)\n            with tf.control_dependencies([tf.assert_greater(length, 0)]):\n                length = tf.identity(length)\n            observ = self._observ_filter.transform(observ)\n            reward = self._reward_filter.transform(reward)\n            update_summary = self._perform_update_steps(observ, action, old_policy_params, reward, length)\n            with tf.control_dependencies([update_summary]):\n                penalty_summary = self._adjust_penalty(observ, old_policy_params, length)\n            with tf.control_dependencies([penalty_summary]):\n                clear_memory = tf.group(self._finished_episodes.clear(), self._num_finished_episodes.assign(0))\n            with tf.control_dependencies([clear_memory]):\n                weight_summary = utility.variable_summaries(tf.trainable_variables(), self._config.weight_summaries)\n                return tf.summary.merge([update_summary, penalty_summary, weight_summary])", "docstring": "Perform multiple training iterations of both policy and value baseline.\n\nTraining on the episodes collected in the memory. Reset the memory\nafterwards. Always returns a summary string.\n\nReturns:\nSummary tensor.", "source": "codesearchnet"}
{"code": "def get(self, type: Type[T], query: Mapping[(str, Any)]) -> T:\n    LOGGER.info('Getting SourceHandlers for \"{type}\"'.format(type=type.__name__))\n    try:\n        handlers = self._get_types[type]\n    except KeyError:\n        try:\n            LOGGER.info('Building new SourceHandlers for \"{type}\"'.format(type=type.__name__))\n            handlers = self._get_handlers(type)\n        except NoConversionError:\n            handlers = None\n        self._get_types[type] = handlers\n    if (handlers is None):\n        raise NoConversionError('No source can provide \"{type}\"'.format(type=type.__name__))\n    LOGGER.info('Creating new PipelineContext')\n    context = self._new_context()\n    LOGGER.info('Querying SourceHandlers for \"{type}\"'.format(type=type.__name__))\n    for handler in handlers:\n        try:\n            return handler.get(query, context)\n        except NotFoundError:\n            pass\n    raise NotFoundError('No source returned a query result!')", "docstring": "Gets a query from the data pipeline.\n\n1) Extracts the query the sequence of data sources.\n2) Inserts the result into the data sinks (if appropriate).\n3) Transforms the result into the requested type if it wasn't already.\n4) Inserts the transformed result into any data sinks.\n\nArgs:\nquery: The query being requested.\ncontext: The context for the extraction (mutable).\n\nReturns:\nThe requested object.", "source": "codesearchnet"}
{"code": "def keep_file(self, task, response, min_size=None, max_size=None):\n        \n        try:\n            img = Image.open(BytesIO(response.content))\n        except (IOError, OSError):\n            return False\n        task['img_size'] = img.size\n        if min_size and not self._size_gt(img.size, min_size):\n            return False\n        if max_size and not self._size_lt(img.size, max_size):\n            return False\n        return True", "docstring": "Decide whether to keep the image\n\nCompare image size with ``min_size`` and ``max_size`` to decide.\n\nArgs:\nresponse (Response): response of requests.\nmin_size (tuple or None): minimum size of required images.\nmax_size (tuple or None): maximum size of required images.\nReturns:\nbool: whether to keep the image.", "source": "juraj-google-style"}
{"code": "def min_edit_distance(source: Sequence[T], target: Sequence[T], ins_cost: Callable[(..., int)]=(lambda _x: 1), del_cost: Callable[(..., int)]=(lambda _x: 1), sub_cost: Callable[(..., int)]=(lambda x, y: (0 if (x == y) else 1))) -> int:\n    n = len(target)\n    m = len(source)\n    distance = np.zeros(((m + 1), (n + 1)), dtype=np.int16)\n    for i in range(1, (m + 1)):\n        distance[(i, 0)] = (distance[((i - 1), 0)] + ins_cost(source[(i - 1)]))\n    for j in range(1, (n + 1)):\n        distance[(0, j)] = (distance[(0, (j - 1))] + ins_cost(target[(j - 1)]))\n    for j in range(1, (n + 1)):\n        for i in range(1, (m + 1)):\n            distance[(i, j)] = min((distance[((i - 1), j)] + ins_cost(source[(i - 1)])), (distance[((i - 1), (j - 1))] + sub_cost(source[(i - 1)], target[(j - 1)])), (distance[(i, (j - 1))] + del_cost(target[(j - 1)])))\n    return int(distance[(len(source), len(target))])", "docstring": "Calculates the minimum edit distance between two sequences.\n\nUses the Levenshtein weighting as a default, but offers keyword arguments\nto supply functions to measure the costs for editing with different\nelements.\n\nArgs:\nins_cost: A function describing the cost of inserting a given char\ndel_cost: A function describing the cost of deleting a given char\nsub_cost: A function describing the cost of substituting one char for\n\nReturns:\nThe edit distance between the two input sequences.", "source": "codesearchnet"}
{"code": "def update_one_time_key_counts(self, counts):\n        \n        self.one_time_keys_manager.server_counts = counts\n        if self.one_time_keys_manager.should_upload():\n            logger.info('Uploading new one-time keys.')\n            self.upload_one_time_keys()", "docstring": "Update data on one-time keys count and upload new ones if necessary.\n\nArgs:\ncounts (dict): Counts of keys currently on the HS for each key type.", "source": "juraj-google-style"}
{"code": "def batch_shape_tensor(self):\n    batch_shape = tf.constant([], dtype=tf.int32)\n    for param in self.parameters:\n        batch_shape = tf.broadcast_dynamic_shape(batch_shape, param.prior.batch_shape_tensor())\n    return batch_shape", "docstring": "Runtime batch shape of models represented by this component.\n\nReturns:\nbatch_shape: `int` `Tensor` giving the broadcast batch shape of\nall model parameters. This should match the batch shape of\nderived state space models, i.e.,\n`self.make_state_space_model(...).batch_shape_tensor()`.", "source": "codesearchnet"}
{"code": "def decode(data):\n    \n    decoded = None\n    try:\n        decoded = json.loads(data)\n    except Exception, e:\n        raise MetaParsingException(\"Can't parse your JSON data: %s\" % e.message)\n\n    decoded = validator.check_structure(decoded)\n\n    return decoded", "docstring": "Handles decoding of the JSON `data`.\n\nArgs:\ndata (str): Data which will be decoded.\n\nReturns:\ndict: Dictionary with decoded data.", "source": "juraj-google-style"}
{"code": "def check_publish_block(self, block_header):\n    if any(((publisher_key != block_header.signer_public_key) for publisher_key in self._valid_block_publishers)):\n        return False\n    if (self._min_wait_time == 0):\n        return True\n    if (self._min_wait_time < 0):\n        return False\n    assert (self._min_wait_time > 0)\n    if (self._max_wait_time <= 0):\n        return ((self._start_time + self._min_wait_time) <= time.time())\n    assert (self._max_wait_time > 0)\n    if (self._max_wait_time <= self._min_wait_time):\n        return False\n    assert (0 < self._min_wait_time < self._max_wait_time)\n    return ((self._start_time + self._wait_time) <= time.time())", "docstring": "Check if a candidate block is ready to be claimed.\n\nblock_header (BlockHeader): the block_header to be checked if it\nshould be claimed\nReturns:\nBoolean: True if the candidate block_header should be claimed.", "source": "codesearchnet"}
{"code": "def PreparePairedSequenceBatch(source, target_in, pad=0):\n  \n  target = target_in[:, :-1]\n  target_y = target_in[:, 1:]\n  source_mask = np.reshape(source != pad,\n                           (source.shape[0], 1, 1, source.shape[-1]))\n  target_mask = MakeTargetMask(target, pad)\n  memory_mask = (\n      np.reshape(np.arange(target.shape[-1]) < source.shape[-1], [-1, 1]))\n  ntokens = np.sum(target_y != pad)\n  return (source, target, target_y,\n          source_mask, target_mask, memory_mask, ntokens)", "docstring": "Build masks for this batch.\n\nArgs:\nsource: (batch, source_len) array of integer-coded symbols for inputs\ntarget_in: (batch, batch_len) array of integer-coded symbols for targets\npad: int: the padding symbol used to pad the above\n\nReturns:\nPrepared batch of tuple of arrays: source, input-target, shifted-target,\nsource mask, target mask, source-target \"memory\" mask, minibatch token count", "source": "juraj-google-style"}
{"code": "def _build_node_error_message(op):\n    node_error_message = [f'Detected at node {op.name!r} defined at (most recent call last):']\n    field_dict = _compute_field_dict(op)\n    for frame in field_dict['definition_traceback']:\n        if '<embedded' not in frame:\n            node_error_message.extend([f'  {line}' for line in frame.split('\\n') if line.strip()])\n    node_error_message.append(f'Node: {op.name!r}')\n    return '\\n'.join(node_error_message)", "docstring": "Returns the formatted error message for the given op.\n\nArgs:\nop: The node.\n\nReturns:\nThe formatted error message for the given op with traceback.", "source": "github-repos"}
{"code": "def read_configs(__pkg: str, __name: str='config', *, local: bool=True) -> ConfigParser:\n    configs = get_configs(__pkg, __name)\n    if local:\n        localrc = path.abspath('.{}rc'.format(__pkg))\n        if path.exists(localrc):\n            configs.append(localrc)\n    cfg = ConfigParser(converters={'datetime': parse_datetime, 'humandelta': parse_timedelta, 'timedelta': parse_delta})\n    cfg.read(configs, 'utf-8')\n    cfg.configs = configs\n    if (('NO_COLOUR' in environ) or ('NO_COLOR' in environ)):\n        cfg.colour = False\n    elif (__pkg in cfg):\n        if ('colour' in cfg[__pkg]):\n            cfg.colour = cfg[__pkg].getboolean('colour')\n        if ('color' in cfg[__pkg]):\n            cfg.colour = cfg[__pkg].getboolean('color')\n    else:\n        cfg.colour = True\n    return cfg", "docstring": "Process configuration file stack.\n\nWe export the time parsing functionality of ``jnrbase`` as custom\nconverters for :class:`configparser.ConfigParser`:\n\n===================  ===========================================\nMethod               Function\n===================  ===========================================\n``.getdatetime()``   :func:`~jnrbase.iso_8601.parse_datetime`\n``.gethumantime()``  :func:`~jnrbase.human_time.parse_timedelta`\n``.gettimedelta()``  :func:`~jnrbase.iso_8601.parse_delta`\n===================  ===========================================\n\nArgs:\n__pkg: Package name to use as base for config files\n__name: File name to search for within config directories\nlocal: Whether to include config files from current directory\nReturns:\nParsed configuration files", "source": "codesearchnet"}
{"code": "def get_fixture(self, fixture_id, head2head=None):\n        \n        filters = []\n        if head2head is not None and int(head2head) > 0:\n            self.logger.debug(f'Getting fixture {fixture_id}. head2head is {head2head}.')\n            filters.append(self.__createFilter('head2head', head2head))\n        else:\n            self.logger.debug(f'Getting fixture {fixture_id}.')\n\n        return self._request('fixtures', fixture_id, filters=filters)", "docstring": "Loads a single fixture.\n\nArgs:\n* fixture_id (str): the id of the fixture\n* head2head (int, optional): load the previous n fixture of the two teams\n\nReturns:\n* :obj: json: the fixture-json", "source": "juraj-google-style"}
{"code": "def id_pools_vwwn_ranges(self):\n    if (not self.__id_pools_vwwn_ranges):\n        self.__id_pools_vwwn_ranges = IdPoolsRanges('vwwn', self.__connection)\n    return self.__id_pools_vwwn_ranges", "docstring": "Gets the IdPoolsRanges API Client for VWWN Ranges.\n\nReturns:\nIdPoolsRanges:", "source": "codesearchnet"}
{"code": "def cluster_spec(self):\n    if self._tpu != 'local':\n        network_endpoints = self._cloud_tpu_client.network_endpoints()\n        worker_list = ['%s:%s' % (endpoint['ipAddress'], endpoint['port']) for endpoint in network_endpoints]\n        cluster_spec = {self.task_type: worker_list}\n        if self._coordinator_address:\n            cluster_spec[self._coordinator_name] = [self._coordinator_address]\n        return server_lib.ClusterSpec(cluster_spec)\n    else:\n        return server_lib.ClusterSpec({})", "docstring": "Returns a ClusterSpec object based on the latest TPU information.\n\nWe retrieve the information from the GCE APIs every time this method is\ncalled.\n\nReturns:\nA ClusterSpec containing host information returned from Cloud TPUs,\nor None.\n\nRaises:\nRuntimeError: If the provided TPU is not healthy.", "source": "github-repos"}
{"code": "def _preprocess_tensor_input(x, data_format, mode):\n    ndim = len(x.shape)\n    if mode == 'tf':\n        x /= 127.5\n        x -= 1.0\n        return x\n    elif mode == 'torch':\n        x /= 255.0\n        mean = [0.485, 0.456, 0.406]\n        std = [0.229, 0.224, 0.225]\n    else:\n        if data_format == 'channels_first':\n            if len(x.shape) == 3:\n                x = ops.stack([x[i, ...] for i in (2, 1, 0)], axis=0)\n            else:\n                x = ops.stack([x[:, i, :] for i in (2, 1, 0)], axis=1)\n        else:\n            x = ops.stack([x[..., i] for i in (2, 1, 0)], axis=-1)\n        mean = [103.939, 116.779, 123.68]\n        std = None\n    mean_tensor = ops.convert_to_tensor(-np.array(mean), dtype=x.dtype)\n    if data_format == 'channels_first':\n        mean_tensor = ops.reshape(mean_tensor, (1, 3) + (1,) * (ndim - 2))\n    else:\n        mean_tensor = ops.reshape(mean_tensor, (1,) * (ndim - 1) + (3,))\n    x += mean_tensor\n    if std is not None:\n        std_tensor = ops.convert_to_tensor(np.array(std), dtype=x.dtype)\n        if data_format == 'channels_first':\n            std_tensor = ops.reshape(std_tensor, (-1, 1, 1))\n        x /= std_tensor\n    return x", "docstring": "Preprocesses a tensor encoding a batch of images.\n\nArgs:\nx: Input tensor, 3D or 4D.\ndata_format: Data format of the image tensor.\nmode: One of \"caffe\", \"tf\" or \"torch\".\n- caffe: will convert the images from RGB to BGR,\nthen will zero-center each color channel with\nrespect to the ImageNet dataset,\nwithout scaling.\n- tf: will scale pixels between -1 and 1,\nsample-wise.\n- torch: will scale pixels between 0 and 1 and then\nwill normalize each channel with respect to the\nImageNet dataset.\n\nReturns:\nPreprocessed tensor.", "source": "github-repos"}
{"code": "def is_experimental_feature_activated(feature_name):\n    return feature_name in os.environ.get('TF_TRT_EXPERIMENTAL_FEATURES', default='').split(',')", "docstring": "Determines if a TF-TRT experimental feature is enabled.\n\nThis helper function checks if an experimental feature was enabled using\nthe environment variable `TF_TRT_EXPERIMENTAL_FEATURES=feature_1,feature_2`.\n\nArgs:\nfeature_name: Name of the feature being tested for activation.", "source": "github-repos"}
{"code": "def one_or_more(e, delimiter=None):\n    if (delimiter is None):\n        delimiter = (lambda s, grm, pos: (s, Ignore, (pos, pos)))\n    msg = 'Expected one or more of: {}'.format(repr(e))\n\n    def match_one_or_more(s, grm=None, pos=0):\n        start = pos\n        (s, obj, span) = e(s, grm, pos)\n        pos = span[1]\n        data = ([] if (obj is Ignore) else [obj])\n        try:\n            while True:\n                (s, obj, span) = delimiter(s, grm, pos)\n                pos = span[1]\n                if (obj is not Ignore):\n                    data.append(obj)\n                (s, obj, span) = e(s, grm, pos)\n                pos = span[1]\n                if (obj is not Ignore):\n                    data.append(obj)\n        except PegreError:\n            pass\n        return PegreResult(s, data, (start, pos))\n    return match_one_or_more", "docstring": "Create a PEG function to match one or more expressions.\n\nArgs:\ne: the expression to match\ndelimiter: an optional expression to match between the\nprimary *e* matches.", "source": "codesearchnet"}
{"code": "def int64_user_counter(namespace, name, metric, ptransform=None) -> metrics_pb2.MonitoringInfo:\n    labels = create_labels(ptransform=ptransform, namespace=namespace, name=name)\n    if isinstance(metric, int):\n        metric = coders.VarIntCoder().encode(metric)\n    return create_monitoring_info(USER_COUNTER_URN, SUM_INT64_TYPE, metric, labels)", "docstring": "Return the counter monitoring info for the specifed URN, metric and labels.\n\nArgs:\nurn: The URN of the monitoring info/metric.\nmetric: The payload field to use in the monitoring info or an int value.\nptransform: The ptransform id used as a label.", "source": "github-repos"}
{"code": "def onWith(self, evnt, func):\n    self.on(evnt, func)\n    try:\n        (yield self)\n    finally:\n        self.off(evnt, func)", "docstring": "A context manager which can be used to add a callback and remove it when\nusing a ``with`` statement.\n\nArgs:\nevnt (str):         An event name\nfunc (function):    A callback function to receive event tufo", "source": "codesearchnet"}
{"code": "def update(self, instance, validated_data):\n        \n        is_primary = validated_data.pop(\"is_primary\", False)\n\n        instance = super(EmailSerializer, self).update(\n            instance, validated_data\n        )\n\n        if is_primary:\n            instance.set_primary()\n\n        return instance", "docstring": "Update the instance the serializer is bound to.\n\nArgs:\ninstance:\nThe instance the serializer is bound to.\nvalidated_data:\nThe data to update the serializer with.\n\nReturns:\nThe updated instance.", "source": "juraj-google-style"}
{"code": "def join(self, other):\n        r\n        assert self._load == other._load, 'loads must be the same'\n        self._lists.extend(other._lists)\n        self._cumlen.extend([c + self._len for c in other._cumlen])\n        self._len += other._len", "docstring": "r\"\"\"\nArgs:\nother (?):\n\nCommandLine:\npython -m sortedcontainers.sortedlist join2\n\nExample:\n>>> from utool.experimental.dynamic_connectivity import *  # NOQA\n>>> self = EulerTourList([1, 2, 3, 2, 4, 2, 1], load=3)\n>>> other = EulerTourList([0, 5, 9, 5, 0], load=3)\n>>> result = self.join(other)\n>>> print(result)", "source": "juraj-google-style"}
{"code": "def _prepare_sample_data(self, submission_type):\n    \n    \n    images = np.random.randint(0, 256,\n                               size=[BATCH_SIZE, 299, 299, 3], dtype=np.uint8)\n    for i in range(BATCH_SIZE):\n      Image.fromarray(images[i, :, :, :]).save(\n          os.path.join(self._sample_input_dir, IMAGE_NAME_PATTERN.format(i)))\n    \n    if submission_type == 'targeted_attack':\n      target_classes = np.random.randint(1, 1001, size=[BATCH_SIZE])\n      target_class_filename = os.path.join(self._sample_input_dir,\n                                           'target_class.csv')\n      with open(target_class_filename, 'w') as f:\n        for i in range(BATCH_SIZE):\n          f.write((IMAGE_NAME_PATTERN + ',{1}\\n').format(i, target_classes[i]))", "docstring": "Prepares sample data for the submission.\n\nArgs:\nsubmission_type: type of the submission.", "source": "juraj-google-style"}
{"code": "def find_layer_idx(model, layer_name):\n    \n    layer_idx = None\n    for idx, layer in enumerate(model.layers):\n        if layer.name == layer_name:\n            layer_idx = idx\n            break\n\n    if layer_idx is None:\n        raise ValueError(\"No layer with name '{}' within the model\".format(layer_name))\n    return layer_idx", "docstring": "Looks up the layer index corresponding to `layer_name` from `model`.\n\nArgs:\nmodel: The `keras.models.Model` instance.\nlayer_name: The name of the layer to lookup.\n\nReturns:\nThe layer index if found. Raises an exception otherwise.", "source": "juraj-google-style"}
{"code": "def ensure_dir(path):\n    \n    os.makedirs(os.path.abspath(os.path.dirname(path)), exist_ok=True)", "docstring": "Create all parent directories of path if they don't exist.\n\nArgs:\npath. Path-like object. Create parent dirs to this path.\n\nReturn:\nNone.", "source": "juraj-google-style"}
{"code": "def _get_num_inputs_outputs(op_type):\n\n    def _is_list_arg(arg):\n        return arg.number_attr or arg.type_list_attr\n\n    def _count_args(arg_defs):\n        for arg in arg_defs:\n            if _is_list_arg(arg):\n                return -1\n        return len(arg_defs)\n    op_def = op_def_registry.get(op_type)\n    if not op_def:\n        return (-1, -1)\n    return (_count_args(op_def.input_arg), _count_args(op_def.output_arg))", "docstring": "Returns (num_inputs, num_outputs).\n\nArgs:\nop_type: String. The type of the Operation. Used to lookup the op in the\nregistry.\n\nReturns:\n(num_inputs, num_outputs), for either num_inputs or num_outputs if the value\ncan't be statically inferred from the OpDef alone or of the OpDef lookup\nfails, -1 is returned.", "source": "github-repos"}
{"code": "def build_sanitiser_node_dict(cfg, sinks_in_file):\n    sanitisers = list()\n    for sink in sinks_in_file:\n        sanitisers.extend(sink.sanitisers)\n    sanitisers_in_file = list()\n    for sanitiser in sanitisers:\n        for cfg_node in cfg.nodes:\n            if (sanitiser in cfg_node.label):\n                sanitisers_in_file.append(Sanitiser(sanitiser, cfg_node))\n    sanitiser_node_dict = dict()\n    for sanitiser in sanitisers:\n        sanitiser_node_dict[sanitiser] = list(find_sanitiser_nodes(sanitiser, sanitisers_in_file))\n    return sanitiser_node_dict", "docstring": "Build a dict of string -> TriggerNode pairs, where the string\nis the sanitiser and the TriggerNode is a TriggerNode of the sanitiser.\n\nArgs:\ncfg(CFG): cfg to traverse.\nsinks_in_file(list[TriggerNode]): list of TriggerNodes containing\nthe sinks in the file.\n\nReturns:\nA string -> TriggerNode dict.", "source": "codesearchnet"}
{"code": "def scheduled_sample_prob(ground_truth_x, generated_x, batch_size, scheduled_sample_var):\n    probability_threshold = scheduled_sample_var\n    probability_of_generated = tf.random_uniform([batch_size])\n    return tf.where((probability_of_generated > probability_threshold), generated_x, ground_truth_x)", "docstring": "Probability based scheduled sampling.\n\nArgs:\nground_truth_x: tensor of ground-truth data points.\ngenerated_x: tensor of generated data points.\nbatch_size: batch size\nscheduled_sample_var: probability of choosing from ground_truth.\nReturns:\nNew batch with randomly selected data points.", "source": "codesearchnet"}
{"code": "def fetcher(date=datetime.today(), url_pattern=URL_PATTERN):\n    api_url = (url_pattern % date.strftime('%Y-%m-%d'))\n    headers = {'Referer': 'http:\n    raw_result = requests.get(api_url, headers=headers).json()\n    return raw_result", "docstring": "Fetch json data from n.pl\n\nArgs:\ndate (date) - default today\nurl_patter (string) - default URL_PATTERN\n\nReturns:\ndict - data from api", "source": "codesearchnet"}
{"code": "def connect(filename: str, mode: str='r+', *, validate: bool=True, spec_version: str='2.0.1') -> LoomConnection:\n    return LoomConnection(filename, mode, validate=validate, spec_version=spec_version)", "docstring": "Establish a connection to a .loom file.\n\nArgs:\nfilename:\t\tPath to the Loom file to open\nmode:\t\t\tRead/write mode, 'r+' (read/write) or 'r' (read-only), defaults to 'r+'\nvalidate:\t\tValidate the file structure against the Loom file format specification\nspec_version:\tThe loom file spec version to validate against (e.g. \"2.0.1\" or \"old\")\nReturns:\nA LoomConnection instance.\n\nRemarks:\nThis function should typically be used as a context manager (i.e. inside a ``with``-block):\n\n.. highlight:: python\n.. code-block:: python\n\nimport loompy\nwith loompy.connect(\"mydata.loom\") as ds:\nprint(ds.ca.keys())\n\nThis ensures that the file will be closed automatically when the context block ends\n\nNote: if validation is requested, an exception is raised if validation fails.", "source": "codesearchnet"}
{"code": "def __init__(self, name=None, description=None, hint=None,\n                 allow_failure=False, passes=None, arguments=None):\n        \n        if name:\n            self.name = name\n        if description:\n            self.description = description\n        if hint:\n            self.hint = hint\n\n        self.allow_failure = allow_failure\n        self.passes = passes\n        self.arguments = arguments or {}\n        self.result = None", "docstring": "Initialization method.\n\nArgs:\nallow_failure (bool): still pass if failed or not.\narguments (dict): arguments passed to the check method when run.", "source": "juraj-google-style"}
{"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    return metrics_utils.update_confusion_matrix_variables({self._confusion_matrix_cond: self.accumulator}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, sample_weight=sample_weight)", "docstring": "Accumulates the metric statistics.\n\nArgs:\ny_true: The ground truth values.\ny_pred: The predicted values.\nsample_weight: Optional weighting of each example. Defaults to 1. Can be a\n`Tensor` whose rank is either 0, or the same rank as `y_true`, and must\nbe broadcastable to `y_true`.\n\nReturns:\nUpdate op.", "source": "github-repos"}
{"code": "def chunk_layer(layer: Callable, inputs: Dict[str, Any], chunk_size: int, no_batch_dims: int, low_mem: bool=False, _out: Any=None, _add_into_out: bool=False) -> Any:\n    if not len(inputs) > 0:\n        raise ValueError('Must provide at least one input')\n    initial_dims = [shape[:no_batch_dims] for shape in _fetch_dims(inputs)]\n    orig_batch_dims = tuple([max(s) for s in zip(*initial_dims)])\n\n    def _prep_inputs(t: torch.Tensor) -> torch.Tensor:\n        if not low_mem:\n            if not sum(t.shape[:no_batch_dims]) == no_batch_dims:\n                t = t.expand(orig_batch_dims + t.shape[no_batch_dims:])\n            t = t.reshape(-1, *t.shape[no_batch_dims:])\n        else:\n            t = t.expand(orig_batch_dims + t.shape[no_batch_dims:])\n        return t\n    prepped_inputs: Dict[str, Any] = tensor_tree_map(_prep_inputs, inputs)\n    prepped_outputs = None\n    if _out is not None:\n        prepped_outputs = tensor_tree_map(lambda t: t.view([-1] + list(t.shape[no_batch_dims:])), _out)\n    flat_batch_dim = 1\n    for d in orig_batch_dims:\n        flat_batch_dim *= d\n    no_chunks = flat_batch_dim \n\n    def _select_chunk(t: torch.Tensor) -> torch.Tensor:\n        return t[i:i + chunk_size] if t.shape[0] != 1 else t\n    i = 0\n    out = prepped_outputs\n    for _ in range(no_chunks):\n        if not low_mem:\n            select_chunk = _select_chunk\n        else:\n            select_chunk = partial(_chunk_slice, flat_start=i, flat_end=min(flat_batch_dim, i + chunk_size), no_batch_dims=len(orig_batch_dims))\n        chunks: Dict[str, Any] = tensor_tree_map(select_chunk, prepped_inputs)\n        output_chunk = layer(**chunks)\n        if out is None:\n            out = tensor_tree_map(lambda t: t.new_zeros((flat_batch_dim,) + t.shape[1:]), output_chunk)\n        if isinstance(output_chunk, dict):\n\n            def assign(d1: dict, d2: dict) -> None:\n                for k, v in d1.items():\n                    if isinstance(v, dict):\n                        assign(v, d2[k])\n                    elif _add_into_out:\n                        v[i:i + chunk_size] += d2[k]\n                    else:\n                        v[i:i + chunk_size] = d2[k]\n            assign(out, output_chunk)\n        elif isinstance(output_chunk, tuple):\n            for x1, x2 in zip(out, output_chunk):\n                if _add_into_out:\n                    x1[i:i + chunk_size] += x2\n                else:\n                    x1[i:i + chunk_size] = x2\n        elif isinstance(output_chunk, torch.Tensor):\n            if _add_into_out:\n                out[i:i + chunk_size] += output_chunk\n            else:\n                out[i:i + chunk_size] = output_chunk\n        else:\n            raise TypeError('Not supported')\n        i += chunk_size\n    out = tensor_tree_map(lambda t: t.view(orig_batch_dims + t.shape[1:]), out)\n    return out", "docstring": "Implements the \"chunking\" procedure described in section 1.11.8.\n\nLayer outputs and inputs are assumed to be simple \"pytrees,\" consisting only of (arbitrarily nested) lists, tuples,\nand dicts with torch.Tensor leaves.\n\nArgs:\nlayer:\nThe layer to be applied chunk-wise\ninputs:\nA (non-nested) dictionary of keyworded inputs. All leaves must be tensors and must share the same batch\ndimensions.\nchunk_size:\nThe number of sub-batches per chunk. If multiple batch dimensions are specified, a \"sub-batch\" is defined\nas a single indexing of all batch dimensions simultaneously (s.t. the number of sub-batches is the product\nof the batch dimensions).\nno_batch_dims:\nHow many of the initial dimensions of each input tensor can be considered batch dimensions.\nlow_mem:\nAvoids flattening potentially large input tensors. Unnecessary in most cases, and is ever so slightly\nslower than the default setting.\nReturns:\nThe reassembled output of the layer on the inputs.", "source": "github-repos"}
{"code": "def __init__(self, correction_limit=88., **kwargs):\n        \n        self.correction_limit = correction_limit\n        super(SunZenithCorrector, self).__init__(**kwargs)", "docstring": "Collect custom configuration values.\n\nArgs:\ncorrection_limit (float): Maximum solar zenith angle to apply the\ncorrection in degrees. Pixels beyond this limit have a\nconstant correction applied. Default 88.\nmax_sza (float): Maximum solar zenith angle in degrees that is\nconsidered valid and correctable. Default 95.0.", "source": "juraj-google-style"}
{"code": "def read(self, uri):\n    read_response = self.connect(uri)\n    fedora_graph = rdflib.Graph().parse(data=read_response.read(), format='turtle')\n    return fedora_graph", "docstring": "Method takes uri and creates a RDF graph from Fedora Repository\n\nArgs:\nuri(str): URI of Fedora URI\n\nReturns:\nrdflib.Graph", "source": "codesearchnet"}
{"code": "def get_output_details(self):\n    return [self._get_tensor_details(i, subgraph_index=0) for i in self._interpreter.OutputIndices()]", "docstring": "Gets model output tensor details.\n\nReturns:\nA list in which each item is a dictionary with details about\nan output tensor. The dictionary contains the same fields as\ndescribed for `get_input_details()`.", "source": "github-repos"}
{"code": "def get_containers(self, container_class):\n    with self._store_lock:\n        return self.store.get(container_class.CONTAINER_TYPE, [])", "docstring": "Thread-safe method to retrieve data from the state's store.\n\nArgs:\ncontainer_class: AttributeContainer class used to filter data.\n\nReturns:\nA list of AttributeContainer objects of matching CONTAINER_TYPE.", "source": "codesearchnet"}
{"code": "def load(path):\n    \n\n    importpath = path.replace(\"/\", \".\").replace(\"\\\\\", \".\")\n    if importpath[-3:] == \".py\":\n        importpath = importpath[:-3]\n\n    try:\n        importlib.import_module(importpath)\n    except (ModuleNotFoundError, TypeError):\n        exec(open(path).read())", "docstring": "Helper function that tries to load a filepath (or python module notation)\nas a python module and on failure `exec` it.\n\nArgs:\npath (str): Path or module to load\n\nThe function tries to import `example.module` when either `example.module`,\n`example/module` or `example/module.py` is given.", "source": "juraj-google-style"}
{"code": "def get_workflow(workflow_id: str, workflow_version: str) -> dict:\n    name = 'workflow_definitions:{}:{}'.format(workflow_id, workflow_version)\n    workflow = DB.get_hash_dict(name)\n    workflow['stages'] = ast.literal_eval(workflow['stages'])\n    return workflow", "docstring": "Get a workflow definition from the Configuration Database.\n\nArgs:\nworkflow_id (str): Workflow identifier\nworkflow_version (str): Workflow version\n\nReturns:\ndict, Workflow definition dictionary", "source": "codesearchnet"}
{"code": "def normalize_partial_name(decl):\n    \n    if decl.cache.normalized_partial_name is None:\n        decl.cache.normalized_partial_name = normalize(decl.partial_name)\n    return decl.cache.normalized_partial_name", "docstring": "Cached variant of normalize\n\nArgs:\ndecl (declaration.declaration_t): the declaration\n\nReturns:\nstr: normalized name", "source": "juraj-google-style"}
{"code": "def restore(self, directory=None, file=None):\n        \n        if file is None:\n            file = tf.train.latest_checkpoint(\n                checkpoint_dir=(self.saver_directory if directory is None else directory),\n                \n            )\n        elif directory is None:\n            file = os.path.join(self.saver_directory, file)\n        elif not os.path.isfile(file):\n            file = os.path.join(directory, file)\n\n        \n        \n\n        self.saver.restore(sess=self.session, save_path=file)\n        self.session.run(fetches=self.list_buffer_index_reset_op)", "docstring": "Restore TensorFlow model. If no checkpoint file is given, the latest checkpoint is\nrestored. If no checkpoint directory is given, the model's default saver directory is\nused (unless file specifies the entire path).\n\nArgs:\ndirectory: Optional checkpoint directory.\nfile: Optional checkpoint file, or path if directory not given.", "source": "juraj-google-style"}
{"code": "class LabelAggregation(AggregationFn, _AggModelIdMixin, _SourcePredictionMixin):\n\n    def __init__(self, agg_func: Callable[[Iterable[int]], int], agg_model_id: Optional[str]=None, include_source_predictions: bool=False, normal_label: int=DEFAULT_NORMAL_LABEL, outlier_label: int=DEFAULT_OUTLIER_LABEL, missing_label: int=DEFAULT_MISSING_LABEL):\n        self._agg = agg_func\n        self._normal_label = normal_label\n        self._outlier_label = outlier_label\n        self._missing_label = missing_label\n        _AggModelIdMixin.__init__(self, agg_model_id)\n        _SourcePredictionMixin.__init__(self, include_source_predictions)\n\n    def apply(self, predictions: Iterable[AnomalyPrediction]) -> AnomalyPrediction:\n        \n        result_dict: dict[str, Any] = {}\n        _AggModelIdMixin.add_model_id(self, result_dict)\n        _SourcePredictionMixin.add_source_predictions(self, result_dict, predictions)\n        labels = [prediction.label for prediction in predictions if prediction.label is not None and prediction.label != self._missing_label]\n        if len(labels) > 0:\n            result_dict['label'] = self._agg(labels)\n        elif all(map(lambda x: x.label is None, predictions)):\n            result_dict['label'] = None\n        else:\n            result_dict['label'] = self._missing_label\n        return AnomalyPrediction(**result_dict)", "docstring": "Aggregates anomaly predictions based on their labels.\n\nThis is an abstract base class for `AggregationFn`s that combine multiple\n`AnomalyPrediction` objects into a single `AnomalyPrediction` based on\nthe labels of the input predictions.\n\nArgs:\nagg_func (Callable[[Iterable[int]], int]): A function that aggregates\na collection of anomaly labels (integers) into a single label.\nagg_model_id (Optional[str]): The model id used in aggregated predictions.\nDefaults to None.\ninclude_source_predictions (bool): If True, include the input predictions in\nthe `source_predictions` of the output. Defaults to False.", "source": "github-repos"}
{"code": "def restore_ops(self, reader=None):\n    if self._has_registered_saver():\n        raise ValueError('Unable to run individual checkpoint restore for objects with registered savers.')\n    restore_ops, tensor_saveables, python_positions, _ = self.gather_ops_or_named_saveables()\n    restore_ops.extend(self._checkpoint.restore_saveables(tensor_saveables, python_positions, reader=reader))\n    return restore_ops", "docstring": "Create or fetch restore ops for this object's attributes.\n\nRequires that the `Trackable` Python object has been bound to an object\nID in the checkpoint.\n\nArgs:\nreader: A `CheckpointReader`. If None, a new instance will be created.\n\nReturns:\nA list of operations when graph building, or an empty list when executing\neagerly.", "source": "github-repos"}
{"code": "def get_object_errors(self):\n    if (self._object_errors is None):\n        self._object_errors = [{str(o): o.get_errors()} for o in self.objects() if o.has_error()]\n    return self._object_errors", "docstring": "Gets a list of business error message strings\nfor each of the requested objects that had a business error.\nIf there was no error, returns an empty list\n\nReturns:\nList of strings", "source": "codesearchnet"}
{"code": "def on_epoch_begin(self, epoch, logs=None):\n    logs = self._process_logs(logs)\n    for callback in self.callbacks:\n        callback.on_epoch_begin(epoch, logs)", "docstring": "Calls the `on_epoch_begin` methods of its callbacks.\n\nThis function should only be called during TRAIN mode.\n\nArgs:\nepoch: Integer, index of epoch.\nlogs: Dict. Currently no data is passed to this argument for this method\nbut that may change in the future.", "source": "github-repos"}
{"code": "def build_album_art_full_uri(self, url):\n        \n        \n        \n        if not url.startswith(('http:', 'https:')):\n            url = 'http:\n        return url", "docstring": "Ensure an Album Art URI is an absolute URI.\n\nArgs:\nurl (str): the album art URI.\n\nReturns:\nstr: An absolute URI.", "source": "juraj-google-style"}
{"code": "def delete(self, roomId):\n    check_type(roomId, basestring, may_be_none=False)\n    self._session.delete(((API_ENDPOINT + '/') + roomId))", "docstring": "Delete a room.\n\nArgs:\nroomId(basestring): The ID of the room to be deleted.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"}
{"code": "def list_of_vars(arg_plot):\n    \n    lovs = [[[var for var in svars.split(',') if var]\n             for svars in pvars.split('.') if svars]\n            for pvars in arg_plot.split('-') if pvars]\n    lovs = [[slov for slov in lov if slov] for lov in lovs if lov]\n    return [lov for lov in lovs if lov]", "docstring": "Construct list of variables per plot.\n\nArgs:\narg_plot (str): string with variable names separated with\n``_`` (figures), ``.`` (subplots) and ``,`` (same subplot).\nReturns:\nthree nested lists of str\n\n- variables on the same subplot;\n- subplots on the same figure;\n- figures.", "source": "juraj-google-style"}
{"code": "def confusion_matrix(\n    gold, pred, null_pred=False, null_gold=False, normalize=False, pretty_print=True\n):\n    \n    conf = ConfusionMatrix(null_pred=null_pred, null_gold=null_gold)\n    gold = arraylike_to_numpy(gold)\n    pred = arraylike_to_numpy(pred)\n    conf.add(gold, pred)\n    mat = conf.compile()\n\n    if normalize:\n        mat = mat / len(gold)\n\n    if pretty_print:\n        conf.display(normalize=normalize)\n\n    return mat", "docstring": "A shortcut method for building a confusion matrix all at once.\n\nArgs:\ngold: an array-like of gold labels (ints)\npred: an array-like of predictions (ints)\nnull_pred: If True, include the row corresponding to null predictions\nnull_gold: If True, include the col corresponding to null gold labels\nnormalize: if True, divide counts by the total number of items\npretty_print: if True, pretty-print the matrix before returning", "source": "juraj-google-style"}
{"code": "def less_equal(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return LessEqual().symbolic_call(x1, x2)\n    return backend.numpy.less_equal(x1, x2)", "docstring": "Return the truth value of `x1 <= x2` element-wise.\n\nArgs:\nx1: First input tensor.\nx2: Second input tensor.\n\nReturns:\nOutput tensor, element-wise comparison of `x1` and `x2`.", "source": "github-repos"}
{"code": "def rescale(self, image: 'torch.Tensor', scale: float, **kwargs) -> 'torch.Tensor':\n    return image * scale", "docstring": "Rescale an image by a scale factor. image = image * scale.\n\nArgs:\nimage (`torch.Tensor`):\nImage to rescale.\nscale (`float`):\nThe scaling factor to rescale pixel values by.\n\nReturns:\n`torch.Tensor`: The rescaled image.", "source": "github-repos"}
{"code": "def _delete(self, url, data, scope):\n    self._create_session(scope)\n    response = self.session.delete(url, data=data)\n    return (response.status_code, response.text)", "docstring": "Make a DELETE request using the session object to a Degreed endpoint.\n\nArgs:\nurl (str): The url to send a DELETE request to.\ndata (str): The json encoded payload to DELETE.\nscope (str): Must be one of the scopes Degreed expects:\n- `CONTENT_PROVIDER_SCOPE`\n- `COMPLETION_PROVIDER_SCOPE`", "source": "codesearchnet"}
{"code": "def copy_to_device(target_device, source_device='/cpu:0'):\n\n    def _apply_fn(dataset):\n        return _CopyToDeviceDataset(dataset, target_device=target_device, source_device=source_device)\n    return _apply_fn", "docstring": "A transformation that copies dataset elements to the given `target_device`.\n\nArgs:\ntarget_device: The name of a device to which elements will be copied.\nsource_device: The original device on which `input_dataset` will be placed.\n\nReturns:\nA `Dataset` transformation function, which can be passed to\n`tf.data.Dataset.apply`.", "source": "github-repos"}
{"code": "def convert_to_layout_rules(x):\n  \n  if isinstance(x, LayoutRules):\n    return x\n  if isinstance(x, str):\n    x = _parse_string_to_list_of_pairs(x)\n  return LayoutRules(x)", "docstring": "Converts input to a LayoutRules.\n\nArgs:\nx: LayoutRules, str, or set-like of string pairs.\n\nReturns:\nLayoutRules.", "source": "juraj-google-style"}
{"code": "def get_experiment_in_group(self, group, bucketing_id):\n    experiment_id = self.bucketer.find_bucket(bucketing_id, group.id, group.trafficAllocation)\n    if experiment_id:\n        experiment = self.config.get_experiment_from_id(experiment_id)\n        if experiment:\n            self.logger.info(('User with bucketing ID \"%s\" is in experiment %s of group %s.' % (bucketing_id, experiment.key, group.id)))\n            return experiment\n    self.logger.info(('User with bucketing ID \"%s\" is not in any experiments of group %s.' % (bucketing_id, group.id)))\n    return None", "docstring": "Determine which experiment in the group the user is bucketed into.\n\nArgs:\ngroup: The group to bucket the user into.\nbucketing_id: ID to be used for bucketing the user.\n\nReturns:\nExperiment if the user is bucketed into an experiment in the specified group. None otherwise.", "source": "codesearchnet"}
{"code": "def oauth2_callback(request):\n    if ('error' in request.GET):\n        reason = request.GET.get('error_description', request.GET.get('error', ''))\n        reason = html.escape(reason)\n        return http.HttpResponseBadRequest('Authorization failed {0}'.format(reason))\n    try:\n        encoded_state = request.GET['state']\n        code = request.GET['code']\n    except KeyError:\n        return http.HttpResponseBadRequest('Request missing state or authorization code')\n    try:\n        server_csrf = request.session[_CSRF_KEY]\n    except KeyError:\n        return http.HttpResponseBadRequest('No existing session for this flow.')\n    try:\n        state = json.loads(encoded_state)\n        client_csrf = state['csrf_token']\n        return_url = state['return_url']\n    except (ValueError, KeyError):\n        return http.HttpResponseBadRequest('Invalid state parameter.')\n    if (client_csrf != server_csrf):\n        return http.HttpResponseBadRequest('Invalid CSRF token.')\n    flow = _get_flow_for_token(client_csrf, request)\n    if (not flow):\n        return http.HttpResponseBadRequest('Missing Oauth2 flow.')\n    try:\n        credentials = flow.step2_exchange(code)\n    except client.FlowExchangeError as exchange_error:\n        return http.HttpResponseBadRequest('An error has occurred: {0}'.format(exchange_error))\n    get_storage(request).put(credentials)\n    signals.oauth2_authorized.send(sender=signals.oauth2_authorized, request=request, credentials=credentials)\n    return shortcuts.redirect(return_url)", "docstring": "View that handles the user's return from OAuth2 provider.\n\nThis view verifies the CSRF state and OAuth authorization code, and on\nsuccess stores the credentials obtained in the storage provider,\nand redirects to the return_url specified in the authorize view and\nstored in the session.\n\nArgs:\nrequest: Django request.\n\nReturns:\nA redirect response back to the return_url.", "source": "codesearchnet"}
{"code": "def run(self, host='localhost', port=8000, shutdown_timeout=60.0, **kwargs):\n    print((('Running service on http:\n    self.config.port = port\n    self.config.host = host\n    try:\n        if self.event_broker:\n            self.event_broker.start()\n            self.loop.run_until_complete(self.announce())\n        http_handler = self.app.make_handler()\n        self._http_server = self.loop.create_server(http_handler, host, port)\n        self._server_handler = self.loop.run_until_complete(self._http_server)\n        self.loop.run_forever()\n    except KeyboardInterrupt:\n        pass\n    finally:\n        try:\n            self.cleanup()\n        except UnboundLocalError:\n            pass\n        self.loop.close()", "docstring": "This function starts the service's network intefaces.\n\nArgs:\nport (int): The port for the http server.", "source": "codesearchnet"}
{"code": "def _FormatExpression(self, frame, expression):\n    (rc, value) = _EvaluateExpression(frame, expression)\n    if (not rc):\n        message = _FormatMessage(value['description']['format'], value['description'].get('parameters'))\n        return (('<' + message) + '>')\n    return self._FormatValue(value)", "docstring": "Evaluates a single watched expression and formats it into a string form.\n\nIf expression evaluation fails, returns error message string.\n\nArgs:\nframe: Python stack frame in which the expression is evaluated.\nexpression: string expression to evaluate.\n\nReturns:\nFormatted expression value that can be used in the log message.", "source": "codesearchnet"}
{"code": "def get_metric_group_infos(self):\n    mg_defs = self.get_metric_group_definitions()\n    mg_infos = []\n    for mg_def in mg_defs:\n        metric_infos = []\n        for (metric_name, metric_type) in mg_def.types:\n            metric_infos.append({'metric-name': metric_name, 'metric-type': metric_type})\n        mg_info = {'group-name': mg_def.name, 'metric-infos': metric_infos}\n        mg_infos.append(mg_info)\n    return mg_infos", "docstring": "Get the faked metric group definitions for this context object\nthat are to be returned from its create operation, in the format\nneeded for the \"Create Metrics Context\" operation response.\n\nReturns:\n\n\"metric-group-infos\" JSON object as described for the \"Create Metrics\nContext \"operation response.", "source": "codesearchnet"}
{"code": "def hwvtep_attach_vlan_vid(self, **kwargs):\n        \n        name = kwargs.pop('name')\n        mac = kwargs.pop('mac')\n        vlan = kwargs.pop('vlan')\n        name_args = dict(name=name, vid=vlan, mac=mac)\n        method_name = 'overlay_gateway_attach_vlan_mac'\n        method_class = self._brocade_tunnels\n        gw_attr = getattr(method_class, method_name)\n        config = gw_attr(**name_args)\n        output = self._callback(config)\n        return output", "docstring": "Identifies exported VLANs in VXLAN gateway configurations.\n\nArgs:\nname (str): overlay_gateway name\nvlan(str):  vlan_id range\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def _allocate_subnets(self, conf):\n        \n        allocated_subnets = []\n        try:\n            for net_spec in conf.get('nets', {}).itervalues():\n                if net_spec['type'] != 'nat':\n                    continue\n\n                gateway = net_spec.get('gw')\n                if gateway:\n                    allocated_subnet = self._subnet_store.acquire(\n                        self.paths.uuid(), gateway\n                    )\n                else:\n                    allocated_subnet = self._subnet_store.acquire(\n                        self.paths.uuid()\n                    )\n                    net_spec['gw'] = str(allocated_subnet.iter_hosts().next())\n\n                allocated_subnets.append(allocated_subnet)\n        except:\n            self._subnet_store.release(allocated_subnets)\n            raise\n        return allocated_subnets, conf", "docstring": "Allocate all the subnets needed by the given configuration spec\n\nArgs:\nconf (dict): Configuration spec where to get the nets definitions\nfrom\n\nReturns:\ntuple(list, dict): allocated subnets and modified conf", "source": "juraj-google-style"}
{"code": "def show_error(self, message):\n    assert isinstance(message, string_types)\n    self.post('error', data=message)", "docstring": "Send an error message to the active client. The new error will be\ndisplayed on any active GUI clients.\n\nArgs:\nmessage (str): Plain-text message to display.\n\nReturns:\nNone\n\n>>> s = _syncthing()\n>>> s.system.show_error('my error msg')\n>>> s.system.errors()[0]\n... # doctest: +ELLIPSIS\nErrorEvent(when=datetime.datetime(...), message='\"my error msg\"')\n>>> s.system.clear_errors()\n>>> s.system.errors()\n[]", "source": "codesearchnet"}
{"code": "def user(self, email):\n        \n        LOG.info(\"Fetching user %s\", email)\n        user_obj = self.user_collection.find_one({'_id': email})\n\n        return user_obj", "docstring": "Fetch a user from the database.\n\nArgs:\nemail(str)\n\nReturns:\nuser_obj(dict)", "source": "juraj-google-style"}
{"code": "def __init__(self, fn):\n    if not callable(fn):\n        raise TypeError('Expected a callable object instead of: %r' % fn)\n    self._fn = fn", "docstring": "Initializes a PartitionFn object wrapping a callable.\n\nArgs:\nfn: A callable object, which should accept the following arguments:\nelement - element to assign to a partition.\nnum_partitions - number of output partitions.\nand may accept additional arguments and side inputs.\n\nRaises:\nTypeError: if fn is not a callable type.", "source": "github-repos"}
{"code": "def find_previous(a, value, index=False, return_distance=False):\n    b = (a - value)\n    i = np.where((b > 0))[0][0]\n    d = ((value - a[(i - 1)]) / (a[i] - a[(i - 1)]))\n    if index:\n        if return_distance:\n            return ((i - 1), d)\n        else:\n            return (i - 1)\n    elif return_distance:\n        return (a[(i - 1)], d)\n    else:\n        return a[(i - 1)]", "docstring": "Find the nearest array value, or index of the array value, before some\ngiven value. Optionally also return the fractional distance of the given\nvalue from that previous value.\n\nArgs:\na (ndarray)\nvalue (float)\nindex (bool): whether to return the index instead of the array value.\nDefault: False.\nreturn_distance(bool): whether to return the fractional distance from\nthe nearest value to the specified value. Default: False.\n\nReturns:\nfloat. The array value (or index, as int) before the specified value.\nIf ``return_distance==True`` then a tuple is returned, where the\nsecond value is the distance.", "source": "codesearchnet"}
{"code": "def bfloat16_activations_var_getter(getter, *args, **kwargs):\n    requested_dtype = kwargs['dtype']\n    if (requested_dtype == tf.bfloat16):\n        kwargs['dtype'] = tf.float32\n    var = getter(*args, **kwargs)\n    if (var.dtype.base_dtype != requested_dtype):\n        var = tf.cast(var, requested_dtype)\n    return var", "docstring": "A custom getter function for float32 parameters and bfloat16 activations.\n\nArgs:\ngetter: custom getter\n*args: arguments\n**kwargs: keyword arguments\nReturns:\nvariables with the correct dtype.\nRaises:\nKeyError: if \"dtype\" is not provided as a kwarg.", "source": "codesearchnet"}
{"code": "def create_view(self, state_root_hash=None):\n        \n        \n        \n        if state_root_hash is None:\n            state_root_hash = INIT_ROOT_KEY\n\n        merkle_db = MerkleDatabase(self._database,\n                                   merkle_root=state_root_hash)\n\n        return StateView(merkle_db)", "docstring": "Creates a StateView for the given state root hash.\n\nArgs:\nstate_root_hash (str): The state root hash of the state view\nto return.  If None, returns the state view for the\nReturns:\nStateView: state view locked to the given root hash.", "source": "juraj-google-style"}
{"code": "def traverse_data(obj, use_numpy=True, buffers=None):\n    if (use_numpy and all((isinstance(el, np.ndarray) for el in obj))):\n        return [transform_array(el, buffers=buffers) for el in obj]\n    obj_copy = []\n    for item in obj:\n        if (type(item) is float):\n            if math.isnan(item):\n                item = 'NaN'\n            elif math.isinf(item):\n                if (item > 0):\n                    item = 'Infinity'\n                else:\n                    item = '-Infinity'\n            obj_copy.append(item)\n        elif isinstance(item, (list, tuple)):\n            obj_copy.append(traverse_data(item))\n        else:\n            obj_copy.append(item)\n    return obj_copy", "docstring": "Recursively traverse an object until a flat list is found.\n\nIf NumPy is available, the flat list is converted to a numpy array\nand passed to transform_array() to handle ``nan``, ``inf``, and\n``-inf``.\n\nOtherwise, iterate through all items, converting non-JSON items\n\nArgs:\nobj (list) : a list of values or lists\nuse_numpy (bool, optional) toggle NumPy as a dependency for testing\nThis argument is only useful for testing (default: True)", "source": "codesearchnet"}
{"code": "def recipe_floodlight_monitor(config, auth_read, dcm_account, sheet):\n    floodlight_monitor(config, {'auth': auth_read, 'account': dcm_account, 'template': {'template': {'sheet': 'https:", "docstring": "Monitor floodlight impressions specified in sheet and send email alerts.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\ndcm_account (string) - Specify an account_id as a number.\nsheet (string) - Full Name or URL to Google Sheet, Floodlight Monitor tab will be added.", "source": "github-repos"}
{"code": "def from_gpx(gpx_track_point):\n        \n        return Point(\n            lat=gpx_track_point.latitude,\n            lon=gpx_track_point.longitude,\n            time=gpx_track_point.time\n        )", "docstring": "Creates a point from GPX representation\n\nArguments:\ngpx_track_point (:obj:`gpxpy.GPXTrackPoint`)\nReturns:\n:obj:`Point`", "source": "juraj-google-style"}
{"code": "def _transform_cur_commands(cur_commands, alias_table=None):\n    transformed = []\n    alias_table = (alias_table if alias_table else get_alias_table())\n    for cmd in cur_commands:\n        if ((cmd in alias_table.sections()) and alias_table.has_option(cmd, 'command')):\n            transformed += alias_table.get(cmd, 'command').split()\n        else:\n            transformed.append(cmd)\n    cur_commands[:] = transformed", "docstring": "Transform any aliases in cur_commands into their respective commands.\n\nArgs:\nalias_table: The alias table.\ncur_commands: current commands typed in the console.", "source": "codesearchnet"}
{"code": "def print_info(self, buf=None, format_=FileFormat.yaml, skip_attributes=None, include_release=False):\n    data = self.validated_data().copy()\n    data.pop('config', None)\n    if self.config:\n        if isinstance(self, Package):\n            config_dict = self.data.get('config')\n        else:\n            config_dict = self.parent.data.get('config')\n        data['config'] = config_dict\n    if (not include_release):\n        skip_attributes = (list((skip_attributes or [])) + list(package_release_keys))\n    buf = (buf or sys.stdout)\n    dump_package_data(data, buf=buf, format_=format_, skip_attributes=skip_attributes)", "docstring": "Print the contents of the package.\n\nArgs:\nbuf (file-like object): Stream to write to.\nformat_ (`FileFormat`): Format to write in.\nskip_attributes (list of str): List of attributes to not print.\ninclude_release (bool): If True, include release-related attributes,\nsuch as 'timestamp' and 'changelog'", "source": "codesearchnet"}
{"code": "def _subsample_labels(self, label):\n    pos_idx, neg_idx = subsample_labels(label, self.batch_size_per_image, self.positive_fraction, 0)\n    label.fill_(-1)\n    label.scatter_(0, pos_idx, 1)\n    label.scatter_(0, neg_idx, 0)\n    return label", "docstring": "Randomly sample a subset of positive and negative examples, and overwrite the label vector to the ignore value\n(-1) for all elements that are not included in the sample.\n\nArgs:\nlabels (Tensor): a vector of -1, 0, 1. Will be modified in-place and returned.", "source": "github-repos"}
{"code": "def sign(x):\n    return math_ops.sign(x)", "docstring": "Element-wise sign.\n\nArgs:\nx: Tensor or variable.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def reconstruct_text(tokens: List[Token]) -> str:\n    return ''.join([x.text_with_ws for x in tokens])", "docstring": "Given a list of tokens, reconstruct the original text with as much fidelity as possible.\n\nArgs:\n[tokens]:\n\nReturns: a string.", "source": "codesearchnet"}
{"code": "def initialize_plugs(self, plug_types=None):\n    types = (plug_types if (plug_types is not None) else self._plug_types)\n    for plug_type in types:\n        plug_logger = self.logger.getChild(plug_type.__name__)\n        if (plug_type in self._plugs_by_type):\n            continue\n        try:\n            if (not issubclass(plug_type, BasePlug)):\n                raise InvalidPlugError(('Plug type \"%s\" is not an instance of BasePlug' % plug_type))\n            if (plug_type.logger != _LOG):\n                raise InvalidPlugError('Do not override \"logger\" in your plugs.', plug_type)\n            plug_type.logger = plug_logger\n            try:\n                plug_instance = plug_type()\n            finally:\n                plug_type.logger = _LOG\n            if (plug_instance.logger != _LOG):\n                raise InvalidPlugError('Do not set \"self.logger\" in __init__ in your plugs', plug_type)\n            else:\n                plug_instance.logger = plug_logger\n        except Exception:\n            plug_logger.exception('Exception instantiating plug type %s', plug_type)\n            self.tear_down_plugs()\n            raise\n        self.update_plug(plug_type, plug_instance)", "docstring": "Instantiate required plugs.\n\nInstantiates plug types and saves the instances in self._plugs_by_type for\nuse in provide_plugs().\n\nArgs:\nplug_types: Plug types may be specified here rather than passed\ninto the constructor (this is used primarily for unit testing\nphases).", "source": "codesearchnet"}
{"code": "def Instance(reactor=None):\n        \n        if NodeLeader._LEAD is None:\n            NodeLeader._LEAD = NodeLeader(reactor)\n        return NodeLeader._LEAD", "docstring": "Get the local node instance.\n\nArgs:\nreactor: (optional) custom reactor to use in NodeLeader.\n\nReturns:\nNodeLeader: instance.", "source": "juraj-google-style"}
{"code": "def write(self, noautocmd=False):\n    cmd = ('noautocmd write' if noautocmd else 'write')\n    self._vim.command(cmd)", "docstring": "Writes the file of the current buffer.\n\nArgs:\nnoautocmd (bool): If true, write will skip autocommands.\n\nTodo:\nWe should consider whether ``SourceFileInfo`` can replace most\nusage of noautocmd. See #298", "source": "codesearchnet"}
{"code": "def __init__(self, types=None, capabilities=None, max_groups1=None,\n                 max_groups2=None, max_groups3=None, max_groups4=None,\n                 actions1=None, actions2=None, actions3=None, actions4=None):\n        \n        super().__init__()\n        self.types = types\n        self.capabilities = capabilities\n        self.max_groups1 = max_groups1\n        self.max_groups2 = max_groups2\n        self.max_groups3 = max_groups3\n        self.max_groups4 = max_groups4\n        self.actions1 = actions1\n        self.actions2 = actions2\n        self.actions3 = actions3\n        self.actions4 = actions4", "docstring": "Create a GroupFeatures with the optional parameters below.\n\nArgs:\ntypes: Bitmap of OFPGT_* values supported.\ncapabilities: Bitmap of OFPGFC_* capability supported.\nmax_groups: 4-position array; Maximum number of groups for each\ntype.\nactions: 4-position array; Bitmaps of OFPAT_* that are supported.", "source": "juraj-google-style"}
{"code": "def visit_membership(self, relation: _evaluation.MembershipRelationNode) -> Any:\n    lhs_result = self.visit(relation.left)\n    rhs_result = self.visit(relation.right)\n    in_lhs = lhs_result if isinstance(relation, _evaluation.InNode) else rhs_result\n    in_rhs = rhs_result if isinstance(relation, _evaluation.InNode) else lhs_result\n    sql_expr = f'({in_lhs.as_operand()})\\nIN ({in_rhs.as_operand()})'\n    return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_expr, _sql_data_type=_sql_data_types.Boolean, _sql_alias='mem_'), from_part=None)", "docstring": "Translates a FHIRPath membership relation to Standard SQL.\n\nFor the `IN` relation, the LHS operand is assumed to be a collection of a\nsingle value. For 'CONTAINS', the RHS operand is assumed to be a collection\nof a single value.\n\nArgs:\nrelation: The FHIRPath AST `MembershipRelation` node.\n\nReturns:\nA compiled Standard SQL expression.", "source": "github-repos"}
{"code": "def load_checkpoint(ckpt_dir_or_file):\n    filename = _get_checkpoint_filename(ckpt_dir_or_file)\n    if filename is None:\n        raise ValueError(\"Couldn't find 'checkpoint' file or checkpoints in given directory %s\" % ckpt_dir_or_file)\n    return py_checkpoint_reader.NewCheckpointReader(filename)", "docstring": "Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.\n\nIf `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,\nreader for the latest checkpoint is returned.\n\nExample usage:\n\n```python\nimport tensorflow as tf\na = tf.Variable(1.0)\nb = tf.Variable(2.0)\nckpt = tf.train.Checkpoint(var_list={'a': a, 'b': b})\nckpt_path = ckpt.save('tmp-ckpt')\nreader= tf.train.load_checkpoint(ckpt_path)\nprint(reader.get_tensor('var_list/a/.ATTRIBUTES/VARIABLE_VALUE'))  # 1.0\n```\n\nArgs:\nckpt_dir_or_file: Directory with checkpoints file or path to checkpoint\nfile.\n\nReturns:\n`CheckpointReader` object.\n\nRaises:\nValueError: If `ckpt_dir_or_file` resolves to a directory with no\ncheckpoints.", "source": "github-repos"}
{"code": "def SetHeaders(self, soap_headers, http_headers):\n    \n    self.suds_client.set_options(soapheaders=soap_headers, headers=http_headers)", "docstring": "Set the headers for the underlying client.\n\nArgs:\nsoap_headers: A SOAP element for the SOAP headers.\nhttp_headers: A dictionary for the http headers.", "source": "juraj-google-style"}
{"code": "def setup_modules(self, args):\n\n    def _setup_module_thread(module_description):\n        \"Calls the module's setup() function and sets an Event object for it.\\n\\n      Args:\\n        module_description (dict): Corresponding recipe module description.\\n      \"\n        new_args = utils.import_args_from_dict(module_description['args'], vars(args), self.config)\n        module = self._module_pool[module_description['name']]\n        try:\n            module.setup(**new_args)\n        except Exception as error:\n            self.add_error('An unknown error occurred: {0!s}\\nFull traceback:\\n{1:s}'.format(error, traceback.format_exc()), critical=True)\n        self.events[module_description['name']] = threading.Event()\n        self.cleanup()\n    threads = []\n    for module_description in self.recipe['modules']:\n        t = threading.Thread(target=_setup_module_thread, args=(module_description,))\n        threads.append(t)\n        t.start()\n    for t in threads:\n        t.join()\n    self.check_errors(is_global=True)", "docstring": "Performs setup tasks for each module in the module pool.\n\nThreads declared modules' setup() functions. Takes CLI arguments into\naccount when replacing recipe parameters for each module.\n\nArgs:\nargs: Command line arguments that will be used to replace the parameters\ndeclared in the recipe.", "source": "codesearchnet"}
{"code": "def haversine(px, py, r=r_mm):\n    \n    lat1, lon1 = px\n    lat2, lon2 = py\n\n    dlat = math.radians(lat2 - lat1)\n    dlon = math.radians(lon2 - lon1)\n    lat1 = math.radians(lat1)\n    lat2 = math.radians(lat2)\n\n    a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2\n    c = 2 * math.asin(math.sqrt(a))\n\n    return c * r", "docstring": "Calculate the haversine distance between two points\ndefined by (lat,lon) tuples.\n\nArgs:\npx ((float,float)): lat/long position 1\npy ((float,float)): lat/long position 2\nr (float): Radius of sphere\n\nReturns:\n(int):  Distance in mm.", "source": "juraj-google-style"}
{"code": "def __init__(self, exit_node: tensor_lib.Tensor, pfor_ops: List[ops.Operation], fallback_to_while_loop: bool, pfor_config: 'PForConfig'):\n    self._fallback_to_while_loop = fallback_to_while_loop\n    self._pfor_config = pfor_config\n    self._pfor_ops = set(pfor_ops)\n    self._pfor_op_ids = set((x._id for x in pfor_ops))\n    assert isinstance(exit_node, tensor_lib.Tensor)\n    self._while_context = exit_node.op._get_control_flow_context()\n    assert isinstance(self._while_context, control_flow_ops.WhileContext)\n    self._context_name = self._while_context.name\n    self._condition = self._while_context.pivot.op.inputs[0]\n    self._is_inside_loop = self.op_is_inside_loop(self._condition.op)\n    if self._is_inside_loop:\n        for e in self._while_context.loop_exits:\n            assert self.op_is_inside_loop(e.op)\n    self._exit_switches = []\n    self._body_outputs = []\n    self._next_iter_control_inputs = []\n    self._enter_merges = []\n    self._outputs = []\n    self._enters = []\n    self._direct_enters = []\n    for e in self._while_context.loop_exits:\n        self._outputs.append(e.op.outputs[0])\n        switch = e.op.inputs[0].op\n        assert switch.type == 'Switch', switch\n        self._exit_switches.append(switch)\n        merge = switch.inputs[0].op\n        assert merge.type == 'Merge', merge\n        self._enter_merges.append(merge)\n        enter = merge.inputs[0].op\n        assert enter.type == 'Enter', enter\n        self._enters.append(enter.outputs[0])\n        next_iter = merge.inputs[1].op\n        assert next_iter.type == 'NextIteration', next_iter\n        self._body_outputs.append(next_iter.inputs[0])\n        self._next_iter_control_inputs.append(next_iter.control_inputs)\n    self._is_stateful = False\n    for op in ops.get_default_graph().get_operations():\n        control_flow_context = op._get_control_flow_context()\n        if control_flow_context is None:\n            continue\n        if control_flow_context.name == self._context_name:\n            self._is_stateful |= _is_stateful_pfor_op(op)\n            if op.type == 'Enter':\n                output = op.outputs[0]\n                if output not in self._enters:\n                    if output.dtype in (dtypes.resource, dtypes.variant):\n                        if output not in self._direct_enters:\n                            self._direct_enters.append(output)\n                    else:\n                        self._enters.append(output)", "docstring": "Initializer.\n\nArgs:\nexit_node: A tensor output from the while_loop.\npfor_ops: list of ops inside the current pfor loop.\nfallback_to_while_loop: If True, fallback to while loop when conversion of\nan op is not supported\npfor_config: PForConfig object used while constructing loop body.", "source": "github-repos"}
{"code": "def __init__(self, key_value_pairs):\n        \n        \n        self._dict = OrderedDict()\n        for key, value in key_value_pairs:\n            if key not in self._dict:\n                self._dict[key] = []\n            self._dict[key].append(value)\n\n        \n        for key, value in iteritems(self._dict):\n            grouping = Grouping(key, value)\n            self._dict[key] = grouping\n            \n        super(Lookup, self).__init__(self._dict)", "docstring": "Construct a Lookup with a sequence of (key, value) tuples.\n\nArgs:\nkey_value_pairs:\nAn iterable over 2-tuples each containing a key, value pair.", "source": "juraj-google-style"}
{"code": "def clone(self, *args, **overrides):\n    clone = super(Layout, self).clone(*args, **overrides)\n    clone._max_cols = self._max_cols\n    return clone", "docstring": "Clones the Layout, overriding data and parameters.\n\nArgs:\ndata: New data replacing the existing data\nshared_data (bool, optional): Whether to use existing data\nnew_type (optional): Type to cast object to\n*args: Additional arguments to pass to constructor\n**overrides: New keyword arguments to pass to constructor\n\nReturns:\nCloned Layout object", "source": "codesearchnet"}
{"code": "def list_fastboot_devices():\n    out = fastboot.FastbootProxy().devices()\n    return parse_device_list(out)", "docstring": "List all android devices connected to the computer that are in in\nfastboot mode. These are detected by fastboot.\n\nThis function doesn't raise any error if `fastboot` binary doesn't exist,\nbecause `FastbootProxy` itself doesn't raise any error.\n\nReturns:\nA list of android device serials. Empty if there's none.", "source": "github-repos"}
{"code": "def set_user(self, user):\n        \n        self.session['user_id'] = user.key\n        self.session['user_data'] = user.clean_value()\n        role = self.get_role()\n        \n        \n        self.session['role_id'] = role.key\n        self.current.role_id = role.key\n        self.current.user_id = user.key\n        \n        self.session['permissions'] = role.get_permissions()", "docstring": "Writes user data to session.\n\nArgs:\nuser: User object", "source": "juraj-google-style"}
{"code": "def get_search_space(ss_indicator):\n    info = nats_bench.search_space_info('nats-bench', ss_indicator)\n    if ss_indicator == 'tss':\n        total = info['num_nodes'] * (info['num_nodes'] - 1) \n        return model_tss_spc(pg.sublist_of(total, info['op_names'], choices_distinct=False), info['num_nodes'])\n    elif ss_indicator == 'sss':\n        return model_sss_spc(pg.sublist_of(info['num_layers'], info['candidates'], choices_distinct=False))", "docstring": "The default search space in NATS-Bench.\n\nArgs:\nss_indicator: tss or sss, indicating the topology or size search space.\n\nReturns:\nA hyper model object that repesents a search space.", "source": "github-repos"}
{"code": "def find_existing_record(env, zone_id, dns_name, check_key=None, check_value=None):\n    client = boto3.Session(profile_name=env).client('route53')\n    pager = client.get_paginator('list_resource_record_sets')\n    existingrecord = None\n    for rset in pager.paginate(HostedZoneId=zone_id):\n        for record in rset['ResourceRecordSets']:\n            if check_key:\n                if ((record['Name'].rstrip('.') == dns_name) and (record.get(check_key) == check_value)):\n                    LOG.info('Found existing record: %s', record)\n                    existingrecord = record\n                    break\n    return existingrecord", "docstring": "Check if a specific DNS record exists.\n\nArgs:\nenv (str): Deployment environment.\nzone_id (str): Route53 zone id.\ndns_name (str): FQDN of application's dns entry to add/update.\ncheck_key(str): Key to look for in record. Example: \"Type\"\ncheck_value(str): Value to look for with check_key. Example: \"CNAME\"\n\nReturns:\njson: Found Record. Returns None if no record found", "source": "codesearchnet"}
{"code": "def __init__(self, parameter_name, value, type_name):\n    \n    super(BasicTypeParameterError, self).__init__(parameter_name, value)\n    self.type_name = type_name", "docstring": "Constructor for BasicTypeParameterError.\n\nArgs:\nparameter_name: String; the name of the parameter which had a value\nrejected.\nvalue: The actual value passed in for the enum. Usually string.\ntype_name: Descriptive name of the data type expected.", "source": "juraj-google-style"}
{"code": "def __init__(self, query_builder, field):\n    \n    self._field = field\n    self._query_builder = query_builder\n    self._awql = None", "docstring": "Creates the WHERE builder with specified query builder and field.\n\nThis class should be instantiated through _QueryBuilder.Where. Don't call\nthis constructor directly.\n\nArgs:\nquery_builder: The query builder that this WHERE builder links to.\nfield: The field to be used in the WHERE condition.\n\nReturns:\nThe WHERE builder.", "source": "juraj-google-style"}
{"code": "def _get_css_files(cls, extra_files):\n        \n        packager = Packager()\n        css_packages = getattr(cls, 'css_packages', {})\n\n        return dict(\n            (media_target,\n             cls._get_media_files(packager=packager,\n                                  media_packages=media_packages,\n                                  media_type='css',\n                                  extra_files=extra_files.get(media_target,\n                                                              [])))\n            for media_target, media_packages in six.iteritems(css_packages)\n        )", "docstring": "Return all CSS files from the Media class.\n\nArgs:\nextra_files (dict):\nThe contents of the Media class's original :py:attr:`css`\nattribute, if one was provided.\n\nReturns:\ndict:\nThe CSS media types and files to return for the :py:attr:`css`\nattribute.", "source": "juraj-google-style"}
{"code": "def __init__(\n      self, session, output_file,\n      storage_type=definitions.STORAGE_TYPE_SESSION, task=None):\n    \n    super(StorageFileWriter, self).__init__(\n        session, storage_type=storage_type, task=task)\n    self._merge_task_storage_path = ''\n    self._output_file = output_file\n    self._processed_task_storage_path = ''\n    self._storage_file = None\n    self._task_storage_path = None", "docstring": "Initializes a storage writer.\n\nArgs:\nsession (Session): session the storage changes are part of.\noutput_file (str): path to the output file.\nstorage_type (Optional[str]): storage type.\ntask(Optional[Task]): task.", "source": "juraj-google-style"}
{"code": "def plugin_privileges(self, name):\n    params = {'remote': name}\n    headers = {}\n    (registry, repo_name) = auth.resolve_repository_name(name)\n    header = auth.get_config_header(self, registry)\n    if header:\n        headers['X-Registry-Auth'] = header\n    url = self._url('/plugins/privileges')\n    return self._result(self._get(url, params=params, headers=headers), True)", "docstring": "Retrieve list of privileges to be granted to a plugin.\n\nArgs:\nname (string): Name of the remote plugin to examine. The\n``:latest`` tag is optional, and is the default if omitted.\n\nReturns:\nA list of dictionaries representing the plugin's\npermissions", "source": "codesearchnet"}
{"code": "def save_image(figure, filename):\n    \n    path = os.path.join(IMAGES_DIR, filename)\n    figure.savefig(path, bbox_inches=\"tight\")\n    plt.close(figure)", "docstring": "Save an image to the docs images directory.\n\nArgs:\nfilename (str): The name of the file (not containing\ndirectory info).", "source": "juraj-google-style"}
{"code": "def _ParseOrMerge(self, lines, message):\n    tokenizer = Tokenizer(lines)\n    while (not tokenizer.AtEnd()):\n        self._MergeField(tokenizer, message)", "docstring": "Converts a text representation of a protocol message into a message.\n\nArgs:\nlines: Lines of a message's text representation.\nmessage: A protocol buffer message to merge into.\n\nRaises:\nParseError: On text parsing problems.", "source": "codesearchnet"}
{"code": "def is_greater(a,b):\n    \n    \n    a_chrom = CHROM_TO_INT.get(a.chrom,0)\n    b_chrom = CHROM_TO_INT.get(b.chrom,0)\n    \n    if (a_chrom == 0 or b_chrom == 0):\n        return False\n    \n    if a_chrom > b_chrom:\n        return True\n    \n    if a_chrom == b_chrom:\n        if a.pos > b.pos:\n            return True\n    \n    return False", "docstring": "Check if position a is greater than position b\nThis will look at chromosome and position.\n\nFor example a position where chrom = 2 and pos = 300 is greater than a position where\nchrom = 1 and pos = 1000\n\nIf any of the chromosomes is outside [1-22,X,Y,MT] we can not say which is biggest.\n\nArgs:\na,b(Position)\n\nReturns:\nbool: True if a is greater than b", "source": "juraj-google-style"}
{"code": "def log_first_n(level, msg, n, *args):\n    count = _GetNextLogCountPerToken(_GetFileAndLine())\n    log_if(level, msg, count < n, *args)", "docstring": "Log 'msg % args' at level 'level' only first 'n' times.\n\nNot threadsafe.\n\nArgs:\nlevel: The level at which to log.\nmsg: The message to be logged.\nn: The number of times this should be called before it is logged.\n*args: The args to be substituted into the msg.", "source": "github-repos"}
{"code": "def _uniquify_fetches(fetch_mappers):\n    unique_fetches = []\n    value_indices = []\n    seen_fetches = {}\n    for m in fetch_mappers:\n        m_value_indices = []\n        for f in m.unique_fetches():\n            j = seen_fetches.get(id(f))\n            if j is None:\n                j = len(seen_fetches)\n                seen_fetches[id(f)] = j\n                unique_fetches.append(f)\n            m_value_indices.append(j)\n        value_indices.append(m_value_indices)\n    return (unique_fetches, value_indices)", "docstring": "Uniquifies fetches from a list of fetch_mappers.\n\nThis is a utility function used by _ListFetchMapper and _DictFetchMapper.  It\ngathers all the unique fetches from a list of mappers and builds a list\ncontaining all of them but without duplicates (unique_fetches).\n\nIt also returns a 2-D list of integers (values_indices) indicating at which\nindex in unique_fetches the fetches of the mappers are located.\n\nThis list is as follows:\nvalues_indices[mapper_index][mapper_fetch_index] = unique_fetches_index\n\nArgs:\nfetch_mappers: list of fetch mappers.\n\nReturns:\nA list of fetches.\nA 2-D list of integers.", "source": "github-repos"}
{"code": "def Log(self, frame):\n    \n    \n    if not self._log_message:\n      return {'isError': True,\n              'description': {'format': LOG_ACTION_NOT_SUPPORTED}}\n\n    if self._quota_recovery_start_time:\n      ms_elapsed = (time.time() - self._quota_recovery_start_time) * 1000\n      if ms_elapsed > self.quota_recovery_ms:\n        \n        self._quota_recovery_start_time = None\n      else:\n        \n        return\n\n    \n    message = 'LOGPOINT: ' + _FormatMessage(\n        self._definition.get('logMessageFormat', ''),\n        self._EvaluateExpressions(frame))\n\n    line = self._definition['location']['line']\n    cdbg_logging_location = (NormalizePath(frame.f_code.co_filename), line,\n                             _GetFrameCodeObjectName(frame))\n\n    if native.ApplyDynamicLogsQuota(len(message)):\n      self._log_message(message)\n    else:\n      self._quota_recovery_start_time = time.time()\n      self._log_message(DYNAMIC_LOG_OUT_OF_QUOTA)\n    del cdbg_logging_location\n    return None", "docstring": "Captures the minimal application states, formats it and logs the message.\n\nArgs:\nframe: Python stack frame of breakpoint hit.\n\nReturns:\nNone on success or status message on error.", "source": "juraj-google-style"}
{"code": "def no_results(channel):\n    \n\n    gui = ui_embed.UI(\n        channel,\n        \"No results\",\n        \":c\",\n        modulename=modulename,\n        colour=0xFF8800\n    )\n\n    return gui", "docstring": "Creates an embed UI for when there were no results\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\n\nReturns:\nui (ui_embed.UI): The embed UI object", "source": "juraj-google-style"}
{"code": "def FlatbufferToDict(fb, preserve_as_numpy):\n    if isinstance(fb, int) or isinstance(fb, float) or isinstance(fb, str):\n        return fb\n    elif hasattr(fb, '__dict__'):\n        result = {}\n        for attribute_name in dir(fb):\n            attribute = fb.__getattribute__(attribute_name)\n            if not callable(attribute) and attribute_name[0] != '_':\n                snake_name = CamelCaseToSnakeCase(attribute_name)\n                preserve = True if attribute_name == 'buffers' else preserve_as_numpy\n                result[snake_name] = FlatbufferToDict(attribute, preserve)\n        return result\n    elif isinstance(fb, np.ndarray):\n        return fb if preserve_as_numpy else fb.tolist()\n    elif hasattr(fb, '__len__'):\n        return [FlatbufferToDict(entry, preserve_as_numpy) for entry in fb]\n    else:\n        return fb", "docstring": "Converts a hierarchy of FB objects into a nested dict.\n\nWe avoid transforming big parts of the flat buffer into python arrays. This\nspeeds conversion from ten minutes to a few seconds on big graphs.\n\nArgs:\nfb: a flat buffer structure. (i.e. ModelT)\npreserve_as_numpy: true if all downstream np.arrays should be preserved.\nfalse if all downstream np.array should become python arrays\nReturns:\nA dictionary representing the flatbuffer rather than a flatbuffer object.", "source": "github-repos"}
{"code": "def can_handle(x, y=None):\n    raise NotImplementedError", "docstring": "Whether the current DataAdapter could handle the input x and y.\n\nStructure wise, x and y can be single object, or list of objects if there\nmultiple input/output, or dictionary of objects when the intput/output are\nnamed.\n\nArgs:\nx: input features.\ny: target labels. Note that y could be None in the case of prediction.\n\nReturns:\nboolean", "source": "github-repos"}
{"code": "def run_local(self, commands):\n        \n        process = subprocess.Popen(\n            commands.get('cli_command'),\n            shell=self.shell,\n            stdin=subprocess.PIPE,\n            stdout=subprocess.PIPE,\n            stderr=subprocess.PIPE,\n        )\n\n        out, err = process.communicate()\n\n        \n        self.run_display_app_output(out)\n        self.run_display_app_errors(err)\n\n        \n        return self.run_exit_code(process.returncode)", "docstring": "Run the App on local system.\n\nArgs:\ncommands (dict): A dictionary of the CLI commands.\n\nReturns:\nint: The exit code of the subprocess command.", "source": "juraj-google-style"}
{"code": "def add_subtask(self, subtask):\n    if self.stopped:\n        raise InternalError('Cannot add a subtask to a parent that is already stopped')\n    if (not isinstance(subtask, BackgroundTask)):\n        raise ArgumentError('Subtasks must inherit from BackgroundTask, task={}'.format(subtask))\n    if (subtask._loop != self._loop):\n        raise ArgumentError('Subtasks must run in the same BackgroundEventLoop as their parent', subtask=subtask, parent=self)\n    self.subtasks.append(subtask)", "docstring": "Link a subtask to this parent task.\n\nThis will cause stop() to block until the subtask has also\nfinished.  Calling stop will not directly cancel the subtask.\nIt is expected that your finalizer for this parent task will\ncancel or otherwise stop the subtask.\n\nArgs:\nsubtask (BackgroundTask): Another task that will be stopped\nwhen this task is stopped.", "source": "codesearchnet"}
{"code": "def init(module_paths, work_db, config):\n    \n    operator_names = cosmic_ray.plugins.operator_names()\n    work_db.set_config(config=config)\n\n    work_db.clear()\n\n    for module_path in module_paths:\n        module_ast = get_ast(\n            module_path, python_version=config.python_version)\n\n        for op_name in operator_names:\n            operator = get_operator(op_name)(config.python_version)\n            visitor = WorkDBInitVisitor(module_path, op_name, work_db,\n                                        operator)\n            visitor.walk(module_ast)\n\n    apply_interceptors(work_db, config.sub('interceptors').get('enabled', ()))", "docstring": "Clear and initialize a work-db with work items.\n\nAny existing data in the work-db will be cleared and replaced with entirely\nnew work orders. In particular, this means that any results in the db are\nremoved.\n\nArgs:\nmodule_paths: iterable of pathlib.Paths of modules to mutate.\nwork_db: A `WorkDB` instance into which the work orders will be saved.\nconfig: The configuration for the new session.", "source": "juraj-google-style"}
{"code": "def delete(self, project_id):\n        \n        self.logger.debug('Deleting project by id: ' + project_id)\n        url = '%(base_url)s/%(project_id)s' % {\n            'base_url': self.base_url, 'project_id': project_id\n        }\n        r = self.gbdx_connection.delete(url)\n        r.raise_for_status()", "docstring": "Deletes a project by id\n\nArgs:\nproject_id: The project id to delete\n\nReturns:\nNothing", "source": "juraj-google-style"}
{"code": "def enum_from_yaml(cls: Type[T_EnumFromYAML], constructor: Constructor, node: ruamel.yaml.nodes.ScalarNode) -> T_EnumFromYAML:\n    return cls[node.value]", "docstring": "Decode YAML representation.\n\nThis is a mixin method for reading enum values from YAML. It needs to be added to the enum\nas a classmethod. See the module docstring for further information on this approach and how\nto implement it.\n\nNote:\nThis method assumes that the name of the enumeration value was stored as a scalar node.\n\nArgs:\nconstructor: Constructor from the YAML object.\nnode: Scalar node extracted from the YAML being read.\nReturns:\nThe constructed YAML value from the name of the enumerated value.", "source": "codesearchnet"}
{"code": "def _validate_state_spec(cell_state_sizes, init_state_specs):\n    validation_error = ValueError('An `initial_state` was passed that is not compatible with `cell.state_size`. Received `state_spec`={}; however `cell.state_size` is {}'.format(init_state_specs, cell_state_sizes))\n    flat_cell_state_sizes = nest.flatten(cell_state_sizes)\n    flat_state_specs = nest.flatten(init_state_specs)\n    if len(flat_cell_state_sizes) != len(flat_state_specs):\n        raise validation_error\n    for cell_state_spec, cell_state_size in zip(flat_state_specs, flat_cell_state_sizes):\n        if not tensor_shape.TensorShape(cell_state_spec.shape[1:]).is_compatible_with(tensor_shape.TensorShape(cell_state_size)):\n            raise validation_error", "docstring": "Validate the state spec between the initial_state and the state_size.\n\nArgs:\ncell_state_sizes: list, the `state_size` attribute from the cell.\ninit_state_specs: list, the `state_spec` from the initial_state that is\npassed in `call()`.\n\nRaises:\nValueError: When initial state spec is not compatible with the state size.", "source": "github-repos"}
{"code": "def create_chebyshev_samples(order, dim=1):\n    x_data = ((0.5 * numpy.cos(((numpy.arange(order, 0, (- 1)) * numpy.pi) / (order + 1)))) + 0.5)\n    x_data = chaospy.quad.combine(([x_data] * dim))\n    return x_data.T", "docstring": "Chebyshev sampling function.\n\nArgs:\norder (int):\nThe number of samples to create along each axis.\ndim (int):\nThe number of dimensions to create samples for.\n\nReturns:\nsamples following Chebyshev sampling scheme mapped to the\n``[0, 1]^dim`` hyper-cube and ``shape == (dim, order)``.", "source": "codesearchnet"}
{"code": "def dump(voevent, file, pretty_print=True, xml_declaration=True):\n    \n    file.write(dumps(voevent, pretty_print, xml_declaration))", "docstring": "Writes the voevent to the file object.\n\ne.g.::\n\nwith open('/tmp/myvoevent.xml','wb') as f:\nvoeventparse.dump(v, f)\n\nArgs:\nvoevent(:class:`Voevent`): Root node of the VOevent etree.\nfile (io.IOBase): An open (binary mode) file object for writing.\npretty_print\npretty_print(bool): See :func:`dumps`\nxml_declaration(bool): See :func:`dumps`", "source": "juraj-google-style"}
{"code": "def master(self, task_type=None, task_id=None, rpc_layer=None):\n    task_type = task_type if task_type is not None else self.task_type\n    task_id = task_id if task_id is not None else self.task_id\n    if task_type is not None and task_id is not None:\n        return format_master_url(self.cluster_spec().task_address(task_type, task_id), rpc_layer or self.rpc_layer)\n    return ''", "docstring": "Returns the master address to use when creating a session.\n\nYou must have set the task_type and task_id object properties before\ncalling this function, or pass in the `task_type` and `task_id`\nparameters when using this function. If you do both, the function parameters\nwill override the object properties.\n\nNote: this is only useful for TensorFlow 1.x.\n\nArgs:\ntask_type: (Optional) The type of the TensorFlow task of the master.\ntask_id: (Optional) The index of the TensorFlow task of the master.\nrpc_layer: (Optional) The RPC protocol for the given cluster.\n\nReturns:\nThe name or URL of the session master.", "source": "github-repos"}
{"code": "def AddArguments(cls, argument_group):\n    \n    argument_group.add_argument(\n        '--viper-hash', '--viper_hash', dest='viper_hash', type=str,\n        action='store', choices=viper.ViperAnalyzer.SUPPORTED_HASHES,\n        default=cls._DEFAULT_HASH, metavar='HASH', help=(\n            'Type of hash to use to query the Viper server, the default is: '\n            '{0:s}. Supported options: {1:s}').format(\n                cls._DEFAULT_HASH, ', '.join(\n                    viper.ViperAnalyzer.SUPPORTED_HASHES)))\n\n    argument_group.add_argument(\n        '--viper-host', '--viper_host', dest='viper_host', type=str,\n        action='store', default=cls._DEFAULT_HOST, metavar='HOST',\n        help=(\n            'Hostname of the Viper server to query, the default is: '\n            '{0:s}'.format(cls._DEFAULT_HOST)))\n\n    argument_group.add_argument(\n        '--viper-port', '--viper_port', dest='viper_port', type=int,\n        action='store', default=cls._DEFAULT_PORT, metavar='PORT', help=(\n            'Port of the Viper server to query, the default is: {0:d}.'.format(\n                cls._DEFAULT_PORT)))\n\n    argument_group.add_argument(\n        '--viper-protocol', '--viper_protocol', dest='viper_protocol',\n        type=str, choices=viper.ViperAnalyzer.SUPPORTED_PROTOCOLS,\n        action='store', default=cls._DEFAULT_PROTOCOL, metavar='PROTOCOL',\n        help=(\n            'Protocol to use to query Viper, the default is: {0:s}. '\n            'Supported options: {1:s}').format(\n                cls._DEFAULT_PROTOCOL, ', '.join(\n                    viper.ViperAnalyzer.SUPPORTED_PROTOCOLS)))", "docstring": "Adds command line arguments the helper supports to an argument group.\n\nThis function takes an argument parser or an argument group object and adds\nto it all the command line arguments this helper supports.\n\nArgs:\nargument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\nargparse group.", "source": "juraj-google-style"}
{"code": "def forward(self, key_value_states: torch.Tensor, attn_mask: Optional[torch.Tensor]=None):\n    batch_size, num_patches = (key_value_states.shape[0], key_value_states.shape[1])\n    if num_patches not in self.patch_to_query_dict.keys():\n        raise KeyError(f'Number of patches {num_patches} not found in patch_to_query_dict amongst possible values {self.patch_to_query_dict.keys()}.')\n    query_num = self.patch_to_query_dict[num_patches]\n    queries = self.query[:query_num].unsqueeze(0).repeat(batch_size, 1, 1)\n    if attn_mask is not None:\n        attn_mask = attn_mask.repeat_interleave(self.num_heads, 0)\n        attn_mask = attn_mask.unsqueeze(1).expand(-1, queries.size(1), -1)\n    attention_out = self.cross_attn(key_value_states, queries, attn_mask=attn_mask)\n    out = self.feed_forward(self.layer_norm(attention_out))\n    return out", "docstring": "Forward pass of the Projector module.\n\nArgs:\nkey_value_states (`torch.Tensor`):\nInput tensor of shape (batch_size, num_patches, kv_dim).\nattn_mask (`torch.Tensor`, *optional*, default is None):\nAttention mask.\n\nReturns:\n`torch.Tensor`: Output tensor of shape (batch_size, query_number, output_dim).", "source": "github-repos"}
{"code": "def download_links(self, dir_path):\n    \n    links = self.links\n    if not path.exists(dir_path):\n      makedirs(dir_path)\n    for i, url in enumerate(links):\n      if 'start' in self.cseargs:\n        i += int(self.cseargs['start'])\n      ext = self.cseargs['fileType']\n      ext = '.html' if ext == '' else '.' + ext\n      file_name = self.cseargs['q'].replace(' ', '_') + '_' + str(i) + ext\n      file_path = path.join(dir_path, file_name)\n      r = requests.get(url, stream=True)\n      if r.status_code == 200:\n        with open(file_path, 'wb') as f:\n          r.raw.decode_content = True\n          shutil.copyfileobj(r.raw, f)", "docstring": "Download web pages or images from search result links.\n\nArgs:\ndir_path (str):\nPath of directory to save downloads of :class:`api.results`.links", "source": "juraj-google-style"}
{"code": "def write_libraries(dir, libraries):\n    files = [open(os.path.join(dir, k), 'w') for (k, _) in libraries]\n    for (f, (_, v)) in zip(files, libraries):\n        v.write_markdown_to_file(f)\n    for (f, (_, v)) in zip(files, libraries):\n        v.write_other_members(f)\n        f.close()", "docstring": "Write a list of libraries to disk.\n\nArgs:\ndir: Output directory.\nlibraries: List of (filename, library) pairs.", "source": "codesearchnet"}
{"code": "def scale_vmss(access_token, subscription_id, resource_group, vmss_name, capacity):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API])\n    body = (('{\"sku\":{\"capacity\":\"' + str(capacity)) + '\"}}')\n    return do_patch(endpoint, body, access_token)", "docstring": "Change the instance count of an existing VM Scale Set.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvmss_name (str): Name of the virtual machine scale set.\ncapacity (int): New number of VMs.\nReturns:\nHTTP response.", "source": "codesearchnet"}
{"code": "def set_job(self, key, func, args):\n    (res, pk) = key\n    (jobs, lock) = self._jobs\n    task = _tasks.UpdateTask(func(*args), key)\n    with lock:\n        job = jobs[res].get(pk)\n        had = bool(job)\n        if (not job):\n            job = task\n            jobs[res][pk] = job\n        else:\n            task.cancel()\n    self._log.debug('Scheduling: %s-%s (%s)', res.tag, pk, ('new task' if (not had) else 'dup'))\n    return job", "docstring": "Get a scheduled task or set if none exists.\n\nReturns:\n- task coroutine/continuation", "source": "codesearchnet"}
{"code": "def get_uniform_frame_indices(total_num_frames: int, num_frames: Optional[int]=None):\n    if num_frames is not None:\n        indices = np.arange(0, total_num_frames, total_num_frames / num_frames).astype(int)\n    else:\n        indices = np.arange(0, total_num_frames).astype(int)\n    return indices", "docstring": "Creates a numpy array for uniform sampling of `num_frame` frames from `total_num_frames`\nwhen loading a video.\n\nArgs:\ntotal_num_frames (`int`):\nTotal number of frames that a video has.\nnum_frames (`int`, *optional*):\nNumber of frames to sample uniformly. If not specified, all frames are sampled.\n\nReturns:\nnp.ndarray: np array of frame indices that will be sampled.", "source": "github-repos"}
{"code": "def add_class_error(self, test_record):\n    test_record.update_record()\n    self.error.append(test_record)", "docstring": "Add a record to indicate a test class has failed before any test\ncould execute.\n\nThis is only called before any test is actually executed. So it only\nadds an error entry that describes why the class failed to the tally\nand does not affect the total number of tests requrested or exedcuted.\n\nArgs:\ntest_record: A TestResultRecord object for the test class.", "source": "github-repos"}
{"code": "def guarantee_const(input, name=None):\n    return gen_array_ops.guarantee_const(input=input, name=name)", "docstring": "Promise to the TF runtime that the input tensor is a constant.\n\nThe runtime is then free to make optimizations based on this.\n\nReturns the input tensor without modification.\n\nArgs:\ninput: A `Tensor`.\nname: A name for this operation.\n\nReturns:\nA `Tensor`. Has the same dtype as `input`.", "source": "github-repos"}
{"code": "def mv(src, dst):\n    if (not exists(src)):\n        raise File404(src)\n    try:\n        shutil.move(src, dst)\n    except Exception as e:\n        raise IOError(str(e))", "docstring": "Move a file or directory.\n\nIf the destination already exists, this will attempt to overwrite\nit.\n\nArguments:\n\nsrc (string): path to the source file or directory.\ndst (string): path to the destination file or directory.\n\nRaises:\n\nFile404: if source does not exist.\nIOError: in case of error.", "source": "codesearchnet"}
{"code": "def add_continue_node(self, ast_node, section_id, guards):\n    node = self._add_jump_node(ast_node, guards)\n    self.continues[section_id].add(node)", "docstring": "Grows the graph by adding a reentry node.\n\nThis node causes control flow to go back to the loop section's entry.\n\nArgs:\nast_node: ast.AST\nsection_id: Hashable, the node for which ast_node should be considered to\nbe an exit node\nguards: Tuple[ast.AST, ...], the finally sections that guard ast_node", "source": "github-repos"}
{"code": "def ComputeRoot(hashes):\n        \n        if not len(hashes):\n            raise Exception('Hashes must have length')\n        if len(hashes) == 1:\n            return hashes[0]\n\n        tree = MerkleTree(hashes)\n        return tree.Root.Hash", "docstring": "Compute the root hash.\n\nArgs:\nhashes (list): the list of hashes to build the root from.\n\nReturns:\nbytes: the root hash.", "source": "juraj-google-style"}
{"code": "def make_list_of_images(images, expected_ndims: int=3) -> list[ImageInput]:\n    if is_batched(images):\n        return images\n    if is_pil_image(images):\n        return [images]\n    if is_valid_image(images):\n        if images.ndim == expected_ndims + 1:\n            images = list(images)\n        elif images.ndim == expected_ndims:\n            images = [images]\n        else:\n            raise ValueError(f'Invalid image shape. Expected either {expected_ndims + 1} or {expected_ndims} dimensions, but got {images.ndim} dimensions.')\n        return images\n    raise ValueError(f'Invalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray, but got {type(images)}.')", "docstring": "Ensure that the output is a list of images. If the input is a single image, it is converted to a list of length 1.\nIf the input is a batch of images, it is converted to a list of images.\n\nArgs:\nimages (`ImageInput`):\nImage of images to turn into a list of images.\nexpected_ndims (`int`, *optional*, defaults to 3):\nExpected number of dimensions for a single input image. If the input image has a different number of\ndimensions, an error is raised.", "source": "github-repos"}
{"code": "def get_weights_of_nn_sites(self, structure, n):\n        \n\n        return [e['weight'] for e in self.get_nn_info(structure, n)]", "docstring": "Get weight associated with each near neighbor of site with\nindex n in structure.\n\nArgs:\nstructure (Structure): input structure.\nn (integer): index of site for which to determine the weights.\nReturns:\nweights (list of floats): near-neighbor weights.", "source": "juraj-google-style"}
{"code": "def _exec_query(self):\n    if (not self._solr_locked):\n        if (not self.compiled_query):\n            self._compile_query()\n        try:\n            solr_params = self._process_params()\n            if settings.DEBUG:\n                t1 = time.time()\n            self._solr_cache = self.bucket.search(self.compiled_query, self.index_name, **solr_params)\n            if (settings.DEBUG and (settings.DEBUG_LEVEL >= 5)):\n                print(('QRY => %s\\nSOLR_PARAMS => %s' % (self.compiled_query, solr_params)))\n        except riak.RiakError as err:\n            err.value += self._get_debug_data()\n            raise\n        self._solr_locked = True\n        return self._solr_cache['docs']", "docstring": "Executes solr query if it hasn't already executed.\n\nReturns:\nSelf.", "source": "codesearchnet"}
{"code": "def update_reorders_v2(output_file_path):\n    spec = tf_upgrade_v2.TFAPIChangeSpec()\n    reordered_function_names = spec.reordered_function_names\n    need_kwargs_function_names = spec.function_transformers.keys()\n    function_renames = spec.symbol_renames\n    all_reorders = collect_function_arg_names(reordered_function_names, need_kwargs_function_names, function_renames)\n    rename_lines = [get_reorder_line(name, arg_names) for name, arg_names in all_reorders.items()]\n    renames_file_text = '%sreorders = {\\n%s\\n}\\n' % (_FILE_HEADER, ',\\n'.join(sorted(rename_lines)))\n    file_io.write_string_to_file(output_file_path, renames_file_text)", "docstring": "Writes a Python dictionary mapping function name to argument order.\n\nArgs:\noutput_file_path: File path to write output to. Any existing contents\nwould be replaced.", "source": "github-repos"}
{"code": "def from_string(cls, string_input):\n        \n\n        correlation_grid = {}\n        Exc_DFT_option = {}\n        COHSEX_options = {}\n        GW_options = {}\n        BSE_TDDFT_options = {}\n\n        lines = string_input.strip().split(\"\\n\")\n\n        \n        lines.pop(0)\n        l = lines.pop(0).strip()\n        toks = l.split()\n        nat = toks[0]\n        nsp = toks[1]\n        \n        lines.pop(0)\n        l = lines.pop(0).strip()\n        toks = l.split()\n        nvbands = toks[0]\n\n        \n        \n        lines.pop(0)\n        l = lines.pop(0).strip()\n        toks = l.split()\n        correlation_grid['n_grid'] = toks[0]\n        correlation_grid['dE_grid'] = toks[1]\n\n        \n        \n        lines.pop(0)\n        l = lines.pop(0).strip()\n        toks = l.split()\n        Exc_DFT_option['rdVxcpsi'] = toks[0]\n\n        \n        \n        lines.pop(0)\n        l = lines.pop(0).strip()\n        toks = l.split()\n        COHSEX_options['nv_cohsex'] = toks[0]\n        COHSEX_options['nc_cohsex'] = toks[1]\n        COHSEX_options['eigMethod'] = toks[2]\n        \n        lines.pop(0)\n        l = lines.pop(0).strip()\n        toks = l.split()\n        COHSEX_options['nit_cohsex'] = toks[0]\n        COHSEX_options['resMethod'] = toks[1]\n        COHSEX_options['scf_cohsex_wf'] = toks[2]\n        COHSEX_options['mix_cohsex'] = toks[3]\n\n        \n        \n        lines.pop(0)\n        l = lines.pop(0).strip()\n        toks = l.split()\n        GW_options['nv_corr'] = toks[0]\n        GW_options['nc_corr'] = toks[1]\n        \n        lines.pop(0)\n        l = lines.pop(0).strip()\n        toks = l.split()\n        GW_options['nit_gw'] = toks[0]\n\n        \n        \n        lines.pop(0)\n        l = lines.pop(0).strip()\n        toks = l.split()\n        BSE_TDDFT_options['do_bse'] = toks[0]\n        BSE_TDDFT_options['do_tddft'] = toks[1]\n        \n        lines.pop(0)\n        l = lines.pop(0).strip()\n        toks = l.split()\n        BSE_TDDFT_options['nv_bse'] = toks[0]\n        BSE_TDDFT_options['nc_bse'] = toks[1]\n        \n        lines.pop(0)\n        l = lines.pop(0).strip()\n        toks = l.split()\n        BSE_TDDFT_options['npsi_bse'] = toks[0]\n        BSE_TDDFT_options['nit_bse'] = toks[1]\n\n        \n        \n        lines.pop(0)\n        atname = []\n        i = int(nsp)\n        while i != 0:\n            l = lines.pop(0).strip()\n            toks = l.split()\n            atname.append(toks[0])\n            i -= 1\n\n        \n        lines.pop(0)\n        l = lines.pop(0).strip()\n        toks = l.split()\n        scale = toks[0]\n        \n        lines.pop(0)\n        \n        species = []\n        coords = []\n        i = int(nat)\n        while i != 0:\n            l = lines.pop(0).strip()\n            toks = l.split()\n            coords.append([float(j) for j in toks[0:3]])\n            species.append(atname[int(toks[3]) - 1])\n            i -= 1\n\n        mol = Molecule(species, coords)\n\n        return FiestaInput(mol=mol, correlation_grid=correlation_grid,\n                           Exc_DFT_option=Exc_DFT_option,\n                           COHSEX_options=COHSEX_options,\n                           GW_options=GW_options,\n                           BSE_TDDFT_options=BSE_TDDFT_options)", "docstring": "Read an FiestaInput from a string. Currently tested to work with\nfiles generated from this class itself.\n\nArgs:\nstring_input: string_input to parse.\nReturns:\nFiestaInput object", "source": "juraj-google-style"}
{"code": "def _ParseLine(self, parser_mediator, structure):\n    \n    \n    month, day_of_month, year, hours, minutes, seconds, milliseconds = (\n        structure.date_time)\n\n    year += 2000\n    time_elements_tuple = (\n        year, month, day_of_month, hours, minutes, seconds, milliseconds)\n\n    try:\n      date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(\n          time_elements_tuple=time_elements_tuple)\n    except ValueError:\n      parser_mediator.ProduceExtractionWarning(\n          'invalid date time value: {0!s}'.format(structure.date_time))\n      return\n\n    event_data = SkyDriveLogEventData()\n    \n    \n    event_data.detail = structure.detail.replace('\\n', ' ')\n    event_data.log_level = structure.log_level\n    event_data.module = structure.module\n    event_data.source_code = structure.source_code\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_ADDED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a logline and store appropriate attributes.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.", "source": "juraj-google-style"}
{"code": "def insert(self, i, species, coords, validate_proximity=False, properties=None):\n    new_site = Site(species, coords, properties=properties)\n    if validate_proximity:\n        for site in self:\n            if (site.distance(new_site) < self.DISTANCE_TOLERANCE):\n                raise ValueError('New site is too close to an existing site!')\n    self._sites.insert(i, new_site)", "docstring": "Insert a site to the molecule.\n\nArgs:\ni (int): Index to insert site\nspecies: species of inserted site\ncoords (3x1 array): coordinates of inserted site\nvalidate_proximity (bool): Whether to check if inserted site is\ntoo close to an existing site. Defaults to True.\nproperties (dict): Dict of properties for the Site.\n\nReturns:\nNew molecule with inserted site.", "source": "codesearchnet"}
{"code": "def get_phonopy_structure(pmg_structure):\n    symbols = [site.specie.symbol for site in pmg_structure]\n    return PhonopyAtoms(symbols=symbols, cell=pmg_structure.lattice.matrix, scaled_positions=pmg_structure.frac_coords)", "docstring": "Convert a pymatgen Structure object to a PhonopyAtoms object.\n\nArgs:\npmg_structure (pymatgen Structure): A Pymatgen structure object.", "source": "codesearchnet"}
{"code": "def get_remote_info(url_id):\n    try:\n        data = _send_request(url_id)\n    except Exception as e:\n        sys.stderr.write('Seeder GET error: ')\n        sys.stderr.write(str(e.message))\n        return None\n    return _convert_to_wakat_format(data)", "docstring": "Download data and convert them to dict used in frontend.\n\nArgs:\nurl_id (str): ID used as identification in Seeder.\n\nReturns:\ndict: Dict with data for frontend or None in case of error.", "source": "codesearchnet"}
{"code": "def validate(self, tags, confidence):\n    (intent, tags) = self.validate_with_tags(tags, confidence)\n    return intent", "docstring": "Using this method removes tags from the result of validate_with_tags\n\nReturns:\nintent(intent): Resuts from validate_with_tags", "source": "codesearchnet"}
{"code": "def CredibleInterval(self, percentage=90):\n        \n        prob = (1 - percentage / 100.0) / 2\n        interval = self.Value(prob), self.Value(1 - prob)\n        return interval", "docstring": "Computes the central credible interval.\n\nIf percentage=90, computes the 90% CI.\n\nArgs:\npercentage: float between 0 and 100\n\nReturns:\nsequence of two floats, low and high", "source": "juraj-google-style"}
{"code": "def create_course_completion(self, user_id, payload):\n    url = (self.enterprise_configuration.sapsf_base_url + self.global_sap_config.completion_status_api_path)\n    return self._call_post_with_user_override(user_id, url, payload)", "docstring": "Send a completion status payload to the SuccessFactors OCN Completion Status endpoint\n\nArgs:\nuser_id (str): The sap user id that the completion status is being sent for.\npayload (str): JSON encoded object (serialized from SapSuccessFactorsLearnerDataTransmissionAudit)\ncontaining completion status fields per SuccessFactors documentation.\n\nReturns:\nThe body of the response from SAP SuccessFactors, if successful\nRaises:\nHTTPError: if we received a failure response code from SAP SuccessFactors", "source": "codesearchnet"}
{"code": "def lunr(ref, fields, documents, languages=None):\n    if ((languages is not None) and lang.LANGUAGE_SUPPORT):\n        if isinstance(languages, basestring):\n            languages = [languages]\n        unsupported_languages = (set(languages) - set(lang.SUPPORTED_LANGUAGES))\n        if unsupported_languages:\n            raise RuntimeError('The specified languages {} are not supported, please choose one of {}'.format(', '.join(unsupported_languages), ', '.join(lang.SUPPORTED_LANGUAGES.keys())))\n        builder = lang.get_nltk_builder(languages)\n    else:\n        builder = Builder()\n        builder.pipeline.add(trimmer, stop_word_filter, stemmer)\n        builder.search_pipeline.add(stemmer)\n    builder.ref(ref)\n    for field in fields:\n        if isinstance(field, dict):\n            builder.field(**field)\n        else:\n            builder.field(field)\n    for document in documents:\n        if isinstance(document, (tuple, list)):\n            builder.add(document[0], attributes=document[1])\n        else:\n            builder.add(document)\n    return builder.build()", "docstring": "A convenience function to configure and construct a lunr.Index.\n\nArgs:\nref (str): The key in the documents to be used a the reference.\nfields (list): A list of strings defining fields in the documents to\nindex. Optionally a list of dictionaries with three keys:\n`field_name` defining the document's field, `boost` an integer\ndefining a boost to be applied to the field, and `extractor`\na callable taking the document as a single argument and returning\na string located in the document in a particular way.\ndocuments (list): The list of dictonaries representing the documents\nto index. Optionally a 2-tuple of dicts, the first one being\nthe document and the second the associated attributes to it.\nlanguages (str or list, optional): The languages to use if using\nNLTK language support, ignored if NLTK is not available.\n\nReturns:\nIndex: The populated Index ready to search against.", "source": "codesearchnet"}
{"code": "def convert_attribute_name_to_tag(value):\n    \n    if not isinstance(value, six.string_types):\n        raise ValueError(\"The attribute name must be a string.\")\n\n    for entry in attribute_name_tag_table:\n        if value == entry[0]:\n            return entry[1]\n\n    raise ValueError(\"Unrecognized attribute name: '{}'\".format(value))", "docstring": "A utility function that converts an attribute name string into the\ncorresponding attribute tag.\n\nFor example: 'State' -> enums.Tags.STATE\n\nArgs:\nvalue (string): The string name of the attribute.\n\nReturns:\nenum: The Tags enumeration value that corresponds to the attribute\nname string.\n\nRaises:\nValueError: if the attribute name string is not a string or if it is\nan unrecognized attribute name", "source": "juraj-google-style"}
{"code": "async def get_auth(request):\n    auth_val = request.get(AUTH_KEY)\n    if auth_val:\n        return auth_val\n    auth_policy = request.get(POLICY_KEY)\n    if (auth_policy is None):\n        raise RuntimeError('auth_middleware not installed')\n    request[AUTH_KEY] = (await auth_policy.get(request))\n    return request[AUTH_KEY]", "docstring": "Returns the user_id associated with a particular request.\n\nArgs:\nrequest: aiohttp Request object.\n\nReturns:\nThe user_id associated with the request, or None if no user is\nassociated with the request.\n\nRaises:\nRuntimeError: Middleware is not installed", "source": "codesearchnet"}
{"code": "def spherical_vert(script, radius=1.0, center_pt=(0.0, 0.0, 0.0)):\n    \n    function = 'sqrt((x-{})^2+(y-{})^2+(z-{})^2)<={}'.format(\n        center_pt[0], center_pt[1], center_pt[2], radius)\n    vert_function(script, function=function)\n    return None", "docstring": "Select all vertices within a spherical radius\n\nArgs:\nradius (float): radius of the sphere\ncenter_pt (3 coordinate tuple or list): center point of the sphere\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "juraj-google-style"}
{"code": "def input_mask(self):\n    inputs = self.input\n    if isinstance(inputs, list):\n        return [getattr(x, '_keras_mask', None) for x in inputs]\n    else:\n        return getattr(inputs, '_keras_mask', None)", "docstring": "Retrieves the input mask tensor(s) of a layer.\n\nOnly applicable if the layer has exactly one inbound node,\ni.e. if it is connected to one incoming layer.\n\nReturns:\nInput mask tensor (potentially None) or list of input\nmask tensors.\n\nRaises:\nAttributeError: if the layer is connected to\nmore than one incoming layers.", "source": "github-repos"}
{"code": "def transform(self, col):\n        \n\n        out = pd.DataFrame()\n\n        \n        column = col[self.col_name].replace({np.nan: None})\n        out[self.col_name] = column.apply(self.get_val)\n\n        return out", "docstring": "Prepare the transformer to convert data and return the processed table.\n\nArgs:\ncol(pandas.DataFrame): Data to transform.\n\nReturns:\npandas.DataFrame", "source": "juraj-google-style"}
{"code": "def get_values(self, field_name: str) -> List[object]:\n        \n        result = list()\n        if self.validate_field(field_name):\n            for value_key in self._kg.get(field_name):\n                result.append(value_key[\"value\"])\n        return result", "docstring": "Get a list of all the values of a field.\n\nArgs:\nfield_name:\n\nReturns: the list of values (not the keys)", "source": "juraj-google-style"}
{"code": "def get_commands_in_namespace(namespace=None, level=1):\n    from ..command import Command\n    commands = {}\n    if (namespace is None):\n        frame = inspect.stack()[level][0]\n        namespace = frame.f_globals\n    elif inspect.ismodule(namespace):\n        namespace = vars(namespace)\n    for name in namespace:\n        obj = namespace[name]\n        if isinstance(obj, Command):\n            commands[name] = obj\n    return OrderedDict(((name, commands[name]) for name in sorted(commands)))", "docstring": "Get commands in namespace.\n\nArgs:\nnamespace (dict|module): Typically a module. If not passed, the\nglobals from the call site will be used.\nlevel (int): If not called from the global scope, set this\nappropriately to account for the call stack.\n\nReturns:\nOrderedDict: The commands found in the namespace, ordered by\nname.\n\nCan be used to create ``__all__`` lists::\n\n__all__ = list(get_commands_in_namespace())", "source": "codesearchnet"}
{"code": "def make_batched_videos(videos) -> List[Union['np.ndarray', 'torch.Tensor']]:\n    if not valid_videos:\n        raise ValueError(f'Invalid video input. Expected either a list of video frames or an input of 4 or 5 dimensions, but got type {type(videos)}.')\n    if is_batched_video(videos):\n        pass\n    elif is_valid_video(videos):\n        videos = [videos]\n    elif is_valid_image(videos):\n        videos = [np.array(videos)[None, ...]]\n    elif isinstance(videos[0], (list, tuple)) and is_valid_video(videos[0][0]):\n        videos = [video for sublist in videos for video in sublist]\n    return convert_pil_frames_to_video(videos)", "docstring": "Ensure that the input is a list of videos. If the input is a single video, it is converted to a list of length 1.\nIf the input is a batch of videos, it is converted to a list of 4D video arrays. Videos passed as list `PIL.Image`\nframes are converted to 4D arrays.\n\nWe assume that all inputs in the list are in the same format, based on the type of the first element.\n\nArgs:\nvideos (`VideoInput`):\nVideo inputs to turn into a list of videos.", "source": "github-repos"}
{"code": "def SetCACertificatesPath(self, ca_certificates_path):\n    \n    if not ca_certificates_path:\n      return\n\n    if not os.path.exists(ca_certificates_path):\n      raise errors.BadConfigOption(\n          'No such certificate file: {0:s}.'.format(ca_certificates_path))\n\n    self._ca_certs = ca_certificates_path\n    logger.debug('Elasticsearch ca_certs: {0!s}'.format(ca_certificates_path))", "docstring": "Sets the path to the CA certificates.\n\nArgs:\nca_certificates_path (str): path to file containing a list of root\ncertificates to trust.\n\nRaises:\nBadConfigOption: if the CA certificates file does not exist.", "source": "juraj-google-style"}
{"code": "def _ReadAppJsonFile(self, relative_path):\n    \n    try:\n      with open(os.path.join(sys.path[0], relative_path), 'r') as f:\n        return json.load(f)\n    except (IOError, ValueError):\n      return None", "docstring": "Reads JSON file from an application directory.\n\nArgs:\nrelative_path: file name relative to application root directory.\n\nReturns:\nParsed JSON data or None if the file does not exist, can't be read or\nnot a valid JSON file.", "source": "juraj-google-style"}
{"code": "def resolve_trust_remote_code(trust_remote_code, model_name, has_local_code, has_remote_code, error_message=None, upstream_repo=None):\n    if error_message is None:\n        if upstream_repo is not None:\n            error_message = f'The repository {model_name} references custom code contained in {upstream_repo} which must be executed to correctly load the model. You can inspect the repository content at https:\n        elif os.path.isdir(model_name):\n            error_message = f'The repository {model_name} contains custom code which must be executed to correctly load the model. You can inspect the repository content at {os.path.abspath(model_name)} .\\n'\n        else:\n            error_message = f'The repository {model_name} contains custom code which must be executed to correctly load the model. You can inspect the repository content at https:\n    if trust_remote_code is None:\n        if has_local_code:\n            trust_remote_code = False\n        elif has_remote_code and TIME_OUT_REMOTE_CODE > 0:\n            prev_sig_handler = None\n            try:\n                prev_sig_handler = signal.signal(signal.SIGALRM, _raise_timeout_error)\n                signal.alarm(TIME_OUT_REMOTE_CODE)\n                while trust_remote_code is None:\n                    answer = input(f'{error_message} You can inspect the repository content at https:\n                    if answer.lower() in ['yes', 'y', '1']:\n                        trust_remote_code = True\n                    elif answer.lower() in ['no', 'n', '0', '']:\n                        trust_remote_code = False\n                signal.alarm(0)\n            except Exception:\n                raise ValueError(f'{error_message} You can inspect the repository content at https:\n            finally:\n                if prev_sig_handler is not None:\n                    signal.signal(signal.SIGALRM, prev_sig_handler)\n                    signal.alarm(0)\n        elif has_remote_code:\n            _raise_timeout_error(None, None)\n    if has_remote_code and (not has_local_code) and (not trust_remote_code):\n        raise ValueError(f'{error_message} You can inspect the repository content at https:\n    return trust_remote_code", "docstring": "Resolves the `trust_remote_code` argument. If there is remote code to be loaded, the user must opt-in to loading\nit.\n\nArgs:\ntrust_remote_code (`bool` or `None`):\nUser-defined `trust_remote_code` value.\nmodel_name (`str`):\nThe name of the model repository in huggingface.co.\nhas_local_code (`bool`):\nWhether the model has local code.\nhas_remote_code (`bool`):\nWhether the model has remote code.\nerror_message (`str`, *optional*):\nCustom error message to display if there is remote code to load and the user didn't opt-in. If unset, the error\nmessage will be regarding loading a model with custom code.\n\nReturns:\nThe resolved `trust_remote_code` value.", "source": "github-repos"}
{"code": "def get_config_path():\n    try:\n        return os.environ[environment_vars.CLOUD_SDK_CONFIG_DIR]\n    except KeyError:\n        pass\n    if (os.name != 'nt'):\n        return os.path.join(os.path.expanduser('~'), '.config', _CONFIG_DIRECTORY)\n    else:\n        try:\n            return os.path.join(os.environ[_WINDOWS_CONFIG_ROOT_ENV_VAR], _CONFIG_DIRECTORY)\n        except KeyError:\n            drive = os.environ.get('SystemDrive', 'C:')\n            return os.path.join(drive, '\\\\', _CONFIG_DIRECTORY)", "docstring": "Returns the absolute path the the Cloud SDK's configuration directory.\n\nReturns:\nstr: The Cloud SDK config path.", "source": "codesearchnet"}
{"code": "def api_server(connection, server_class):\n    return server_class(link=xbahn.connection.link.Link(receive=connection, respond=connection))", "docstring": "Establishes an API Server on the supplied connection\n\nArguments:\n- connection (xbahn.connection.Connection)\n- server_class (xbahn.api.Server)\n\nReturns:\n- server_class: server instance", "source": "codesearchnet"}
{"code": "def Deserialize(self, reader):\n        \n        super(StorageItem, self).Deserialize(reader)\n        self.Value = reader.ReadVarBytes()", "docstring": "Deserialize full object.\n\nArgs:\nreader (neocore.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def tetragonal(a: float, c: float):\n        \n        return Lattice.from_parameters(a, a, c, 90, 90, 90)", "docstring": "Convenience constructor for a tetragonal lattice.\n\nArgs:\na (float): *a* lattice parameter of the tetragonal cell.\nc (float): *c* lattice parameter of the tetragonal cell.\n\nReturns:\nTetragonal lattice of dimensions a x a x c.", "source": "juraj-google-style"}
{"code": "def unsubscribe(self, subscription, max=None):\n        \n        if max is None:\n            self._send('UNSUB %d' % subscription.sid)\n            self._subscriptions.pop(subscription.sid)\n        else:\n            subscription.max = max\n            self._send('UNSUB %d %s' % (subscription.sid, max))", "docstring": "Unsubscribe will remove interest in the given subject. If max is\nprovided an automatic Unsubscribe that is processed by the server\nwhen max messages have been received\n\nArgs:\nsubscription (pynats.Subscription): a Subscription object\nmax (int=None): number of messages", "source": "juraj-google-style"}
{"code": "def build_individual(ind):\n    try:\n        ind_obj = dict(individual_id=ind['individual_id'])\n        log.info('Building Individual with id:{0}'.format(ind['individual_id']))\n    except KeyError as err:\n        raise PedigreeError('Individual is missing individual_id')\n    ind_obj['display_name'] = ind.get('display_name', ind_obj['individual_id'])\n    sex = ind.get('sex', 'unknown')\n    try:\n        int(sex)\n        ind_obj['sex'] = str(sex)\n    except ValueError as err:\n        try:\n            ind_obj['sex'] = REV_SEX_MAP[sex]\n        except KeyError as err:\n            raise PedigreeError(('Unknown sex: %s' % sex))\n    phenotype = ind.get('phenotype', 'unknown')\n    try:\n        ped_phenotype = REV_PHENOTYPE_MAP[phenotype]\n        if (ped_phenotype == (- 9)):\n            ped_phenotype = 0\n        ind_obj['phenotype'] = ped_phenotype\n    except KeyError as err:\n        raise PedigreeError(('Unknown phenotype: %s' % phenotype))\n    ind_obj['father'] = ind.get('father')\n    ind_obj['mother'] = ind.get('mother')\n    ind_obj['capture_kits'] = ind.get('capture_kits', [])\n    ind_obj['bam_file'] = ind.get('bam_file')\n    ind_obj['mt_bam'] = ind.get('mt_bam')\n    ind_obj['vcf2cytosure'] = ind.get('vcf2cytosure')\n    ind_obj['confirmed_sex'] = ind.get('confirmed_sex')\n    ind_obj['confirmed_parent'] = ind.get('confirmed_parent')\n    ind_obj['predicted_ancestry'] = ind.get('predicted_ancestry')\n    analysis_type = ind.get('analysis_type', 'unknown')\n    if (not (analysis_type in ANALYSIS_TYPES)):\n        raise PedigreeError('Analysis type %s not allowed', analysis_type)\n    ind_obj['analysis_type'] = analysis_type\n    if ('tmb' in ind):\n        ind_obj['tmb'] = ind['tmb']\n    if ('msi' in ind):\n        ind_obj['msi'] = ind['msi']\n    if ('tumor_purity' in ind):\n        ind_obj['tumor_purity'] = ind['tumor_purity']\n    if ('tumor_type' in ind):\n        ind_obj['tumor_type'] = ind['tumor_type']\n    return ind_obj", "docstring": "Build a Individual object\n\nArgs:\nind (dict): A dictionary with individual information\n\nReturns:\nind_obj (dict): A Individual object\n\ndict(\nindividual_id = str, # required\ndisplay_name = str,\nsex = str,\nphenotype = int,\nfather = str, # Individual id of father\nmother = str, # Individual id of mother\ncapture_kits = list, # List of names of capture kits\nbam_file = str, # Path to bam file\nvcf2cytosure = str, # Path to CGH file\nanalysis_type = str, # choices=ANALYSIS_TYPES\n)", "source": "codesearchnet"}
{"code": "def raster_dilation(rasterfile):\n        \n        if is_string(rasterfile):\n            origin_raster = RasterUtilClass.read_raster(str(rasterfile))\n        elif isinstance(rasterfile, Raster):\n            origin_raster = rasterfile.data\n        elif isinstance(rasterfile, numpy.ndarray):\n            origin_raster = rasterfile\n        else:\n            return 'Your rasterfile has a wrong type. Type must be string or ' \\\n                   'numpy.array or class Raster in pygeoc.'\n        min_value_raster = origin_raster.min()\n        dilation_raster = numpy.zeros((origin_raster.shape[0], origin_raster.shape[1]))\n        \n        \n        \n        add_row = numpy.full((1, origin_raster.shape[1]), min_value_raster)\n        temp_origin_raster = numpy.vstack((numpy.vstack((add_row, origin_raster)), add_row))\n        add_col = numpy.full((origin_raster.shape[0] + 2, 1), min_value_raster)\n        expand_origin_raster = numpy.hstack((numpy.hstack((add_col, temp_origin_raster)), add_col))\n        \n        for i in range(origin_raster.shape[0]):\n            for j in range(origin_raster.shape[1]):\n                max_pixel_value = min_value_raster\n                \n                for k in range(3):\n                    for l in range(3):\n                        if expand_origin_raster[i + k, j + l] >= max_pixel_value:\n                            max_pixel_value = expand_origin_raster[i + k, j + l]\n                            \n                            \n                            \n                    dilation_raster[i, j] = max_pixel_value\n        \n        return dilation_raster", "docstring": "Dilate the raster image.\n\nFind the max pixel's value in 8-neighborhood. Then change the compute\npixel's value into the max pixel's value.\n\nArgs:\nrasterfile: input original raster image, type can be filename(string,\nlike \"test1.tif\"), rasterfile(class Raster) or numpy.ndarray.\n\nReturns:\ndilation_raster: raster image after dilation, type is numpy.ndarray.", "source": "juraj-google-style"}
{"code": "def add_dir(self, path, compress):\n        \n        if not os.path.isdir(path):\n            raise ValueError('{} is not a directory'.format(path))\n        for root, dirs, files in os.walk(path):\n            for f in files:\n                self.add_file(os.path.join(root, f), compress)", "docstring": "Add all files under directory `path` to the MAR file.\n\nArgs:\npath (str): path to directory to add to this MAR file\ncompress (str): One of 'xz', 'bz2', or None. Defaults to None.", "source": "juraj-google-style"}
{"code": "def __call__(self, utterances: list, batch_history: list, *responses: list) -> list:\n        \n        result = [random.choice([t for t, sc in r if t]) for r in zip(*responses)]\n        return result", "docstring": "Selects result of a random skill for each utterance.\n\nArgs:\nutterances_batch: Not used.\nhistory_batch: Not used.\nresponses: Each response positional argument corresponds to\nresponse of one of Agent skills and is represented by\nbatch (list) of (response, confidence) tuple structures.\n\nReturns:\nresult: A batch of responses corresponding to the utterance\nbatch received by agent.", "source": "juraj-google-style"}
{"code": "def split_input(cls, mapper_spec):\n    params = _get_params(mapper_spec)\n    entity_kind_name = params[cls.ENTITY_KIND_PARAM]\n    batch_size = int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))\n    shard_count = mapper_spec.shard_count\n    namespace = params.get(cls.NAMESPACE_PARAM)\n    app = params.get(cls._APP_PARAM)\n    filters = params.get(cls.FILTERS_PARAM)\n    if (namespace is None):\n        namespace_query = datastore.Query('__namespace__', keys_only=True, _app=app)\n        namespace_keys = namespace_query.Get(limit=(cls.MAX_NAMESPACES_FOR_KEY_SHARD + 1))\n        if (len(namespace_keys) > cls.MAX_NAMESPACES_FOR_KEY_SHARD):\n            ns_ranges = namespace_range.NamespaceRange.split(n=shard_count, contiguous=True, _app=app)\n            return [cls(entity_kind_name, key_ranges=None, ns_range=ns_range, batch_size=batch_size, filters=filters) for ns_range in ns_ranges]\n        elif (not namespace_keys):\n            return [cls(entity_kind_name, key_ranges=None, ns_range=namespace_range.NamespaceRange(_app=app), batch_size=shard_count, filters=filters)]\n        else:\n            namespaces = [(namespace_key.name() or '') for namespace_key in namespace_keys]\n    else:\n        namespaces = [namespace]\n    readers = cls._split_input_from_params(app, namespaces, entity_kind_name, params, shard_count)\n    if filters:\n        for reader in readers:\n            reader._filters = filters\n    return readers", "docstring": "Splits query into shards without fetching query results.\n\nTries as best as it can to split the whole query result set into equal\nshards. Due to difficulty of making the perfect split, resulting shards'\nsizes might differ significantly from each other.\n\nArgs:\nmapper_spec: MapperSpec with params containing 'entity_kind'.\nMay have 'namespace' in the params as a string containing a single\nnamespace. If specified then the input reader will only yield values\nin the given namespace. If 'namespace' is not given then values from\nall namespaces will be yielded. May also have 'batch_size' in the params\nto specify the number of entities to process in each batch.\n\nReturns:\nA list of InputReader objects. If the query results are empty then the\nempty list will be returned. Otherwise, the list will always have a length\nequal to number_of_shards but may be padded with Nones if there are too\nfew results for effective sharding.", "source": "codesearchnet"}
{"code": "def get_appliances(self, location_id):\n    url = 'https:\n    headers = self.__gen_headers()\n    headers['Content-Type'] = 'application/json'\n    params = {'locationId': location_id}\n    url = self.__append_url_params(url, params)\n    r = requests.get(url, headers=headers)\n    return r.json()", "docstring": "Get the appliances added for a specified location.\n\nArgs:\nlocation_id (string): identifiying string of appliance\n\nReturns:\nlist: dictionary objects containing appliances data", "source": "codesearchnet"}
{"code": "def predict_features(self, df_features, df_target, idx=0, **kwargs):\n        \n        estimator = SVR(kernel='linear')\n        selector = RFECV(estimator, step=1)\n        selector = selector.fit(df_features.values, df_target.values[:, 0])\n\n        return selector.grid_scores_", "docstring": "For one variable, predict its neighbouring nodes.\n\nArgs:\ndf_features (pandas.DataFrame):\ndf_target (pandas.Series):\nidx (int): (optional) for printing purposes\nkwargs (dict): additional options for algorithms\n\nReturns:\nlist: scores of each feature relatively to the target", "source": "juraj-google-style"}
{"code": "def __init__(self, campaign_db, campaign_runner, check_repo=True):\n        \n        self.db = campaign_db\n        self.runner = campaign_runner\n        self.check_repo = check_repo\n\n        \n        \n        if self.check_repo:\n            self.check_repo_ok()", "docstring": "Initialize the Simulation Execution Manager, using the provided\nCampaignManager and SimulationRunner instances.\n\nThis method should never be used on its own, but only as a constructor\nfrom the new and load @classmethods.\n\nArgs:\ncampaign_db (DatabaseManager): the DatabaseManager object to\nassociate to this campaign.\ncampaign_runner (SimulationRunner): the SimulationRunner object to\nassociate to this campaign.", "source": "juraj-google-style"}
{"code": "def __init__(self, init_args, init_func, next_func, finalize_func, output_signature, name=None):\n    self._init_args = init_args\n    self._init_structure = structure.type_spec_from_value(init_args)\n    self._init_func = structured_function.StructuredFunctionWrapper(init_func, self._transformation_name(), input_structure=self._init_structure)\n    self._next_func = structured_function.StructuredFunctionWrapper(next_func, self._transformation_name(), input_structure=self._init_func.output_structure)\n    self._finalize_func = structured_function.StructuredFunctionWrapper(finalize_func, self._transformation_name(), input_structure=self._init_func.output_structure)\n    self._output_signature = output_signature\n    self._name = name\n    variant_tensor = gen_dataset_ops.generator_dataset(structure.to_tensor_list(self._init_structure, self._init_args) + self._init_func.function.captured_inputs, self._next_func.function.captured_inputs, self._finalize_func.function.captured_inputs, init_func=self._init_func.function, next_func=self._next_func.function, finalize_func=self._finalize_func.function, **self._common_args)\n    super().__init__(variant_tensor)", "docstring": "Constructs a `_GeneratorDataset`.\n\nArgs:\ninit_args: A (nested) structure representing the arguments to `init_func`.\ninit_func: A TensorFlow function that will be called on `init_args` each\ntime a C++ iterator over this dataset is constructed. Returns a (nested)\nstructure representing the \"state\" of the dataset.\nnext_func: A TensorFlow function that will be called on the result of\n`init_func` to produce each element, and that raises `OutOfRangeError`\nto terminate iteration.\nfinalize_func: A TensorFlow function that will be called on the result of\n`init_func` immediately before a C++ iterator over this dataset is\ndestroyed. The return value is ignored.\noutput_signature: A (nested) structure of `tf.TypeSpec` objects describing\nthe output of `next_func`.\nname: Optional. A name for the tf.data transformation.", "source": "github-repos"}
{"code": "def _generate_G_points(self, kpoint):\n    gpoints = []\n    for i in range(((2 * self._nbmax[2]) + 1)):\n        i3 = (((i - (2 * self._nbmax[2])) - 1) if (i > self._nbmax[2]) else i)\n        for j in range(((2 * self._nbmax[1]) + 1)):\n            j2 = (((j - (2 * self._nbmax[1])) - 1) if (j > self._nbmax[1]) else j)\n            for k in range(((2 * self._nbmax[0]) + 1)):\n                k1 = (((k - (2 * self._nbmax[0])) - 1) if (k > self._nbmax[0]) else k)\n                G = np.array([k1, j2, i3])\n                v = (kpoint + G)\n                g = np.linalg.norm(np.dot(v, self.b))\n                E = ((g ** 2) / self._C)\n                if (E < self.encut):\n                    gpoints.append(G)\n    return np.array(gpoints, dtype=np.float64)", "docstring": "Helper function to generate G-points based on nbmax.\n\nThis function iterates over possible G-point values and determines\nif the energy is less than G_{cut}. Valid values are appended to\nthe output array. This function should not be called outside of\ninitialization.\n\nArgs:\nkpoint (np.array): the array containing the current k-point value\n\nReturns:\na list containing valid G-points", "source": "codesearchnet"}
{"code": "def openning(input_rasterfilename, times):\n        \n        input_raster = RasterUtilClass.read_raster(input_rasterfilename)\n        openning_raster = input_raster\n        for i in range(times):\n            openning_raster = RasterUtilClass.raster_erosion(openning_raster)\n        for i in range(times):\n            openning_raster = RasterUtilClass.raster_dilation(openning_raster)\n        return openning_raster", "docstring": "Do openning.\n\nOpenning: Erode firstly, then Dilate.\n\nArgs:\ninput_rasterfilename: input original raster image filename.\ntimes: Erode and Dilate times.\n\nReturns:\nopenning_raster: raster image after open.", "source": "juraj-google-style"}
{"code": "def __write_to_hdf5_light(self, filename_out, *args, **kwargs):\n        \n\n        block_size = 0\n\n        with h5py.File(filename_out, 'w') as h5:\n\n            h5.attrs[b'CLASS']   = b'FILTERBANK'\n            h5.attrs[b'VERSION'] = b'1.0'\n\n            if HAS_BITSHUFFLE:\n                bs_compression = bitshuffle.h5.H5FILTER\n                bs_compression_opts = (block_size, bitshuffle.h5.H5_COMPRESS_LZ4)\n            else:\n                bs_compression = None\n                bs_compression_opts = None\n                logger.warning(\"Warning: bitshuffle not found. No compression applied.\")\n\n\n            dset = h5.create_dataset('data',\n                        data=self.data,\n\n                        compression=bs_compression,\n                        compression_opts=bs_compression_opts)\n\n            dset_mask = h5.create_dataset('mask',\n                        shape=self.file_shape,\n\n                        compression=bs_compression,\n                        compression_opts=bs_compression_opts,\n                        dtype='uint8')\n\n            dset.dims[0].label = b\"frequency\"\n            dset.dims[1].label = b\"feed_id\"\n            dset.dims[2].label = b\"time\"\n\n            dset_mask.dims[0].label = b\"frequency\"\n            dset_mask.dims[1].label = b\"feed_id\"\n            dset_mask.dims[2].label = b\"time\"\n\n            \n            for key, value in self.header.items():\n                dset.attrs[key] = value", "docstring": "Write data to HDF5 file in one go.\n\nArgs:\nfilename_out (str): Name of output file", "source": "juraj-google-style"}
{"code": "def __init__(self, input_reader=None, output_writer=None):\n    \n    super(ExtractionTool, self).__init__(\n        input_reader=input_reader, output_writer=output_writer)\n    self._artifacts_registry = None\n    self._buffer_size = 0\n    self._mount_path = None\n    self._operating_system = None\n    self._parser_filter_expression = None\n    self._preferred_year = None\n    self._presets_file = None\n    self._process_archives = False\n    self._process_compressed_streams = True\n    self._process_memory_limit = None\n    self._queue_size = self._DEFAULT_QUEUE_SIZE\n    self._resolver_context = dfvfs_context.Context()\n    self._single_process_mode = False\n    self._storage_file_path = None\n    self._storage_format = definitions.STORAGE_FORMAT_SQLITE\n    self._temporary_directory = None\n    self._text_prepend = None\n    self._use_zeromq = True\n    self._yara_rules_string = None", "docstring": "Initializes an CLI tool.\n\nArgs:\ninput_reader (Optional[InputReader]): input reader, where None indicates\nthat the stdin input reader should be used.\noutput_writer (Optional[OutputWriter]): output writer, where None\nindicates that the stdout output writer should be used.", "source": "juraj-google-style"}
{"code": "def write(self, output):\n        \n        view_str = output.encode('ascii', 'ignore')\n        if (len(view_str) > 0):\n            self.m_ser.write(view_str)\n            self.m_ser.flush()\n            self.m_ser.reset_input_buffer()\n            time.sleep(self.m_force_wait)\n        pass", "docstring": "Passthrough for pyserial Serial.write().\n\nArgs:\noutput (str): Block to write to port", "source": "juraj-google-style"}
{"code": "def decode_prob(self, class_probabilities):\n    results = []\n    for row in class_probabilities:\n        entries = []\n        for (i, prob) in enumerate(row):\n            entries.append({'index': i, 'name': str(i), 'prob': prob})\n        entries = sorted(entries, key=itemgetter('prob'), reverse=True)[:self.top_probs]\n        for entry in entries:\n            entry['prob'] = '{:.3f}'.format(entry['prob'])\n        results.append(entries)\n    return results", "docstring": "Given predicted class probabilites for a set of examples, annotate\neach logit with a class name.\n\nBy default, we name each class using its index in the logits array.\n\nArgs:\nclass_probabilities (array): Class probabilities as output by\n`self.predict`, i.e., a numpy array of shape (num_examples,\nnum_classes).\n\nReturns:\nAnnotated class probabilities for each input example, as a list of\ndicts where each dict is formatted as:\n{\n'index': class_index,\n'name': class_name,\n'prob': class_probability\n}", "source": "codesearchnet"}
{"code": "def execute(self, triple_map, output, **kwargs):\n    sparql = (PREFIX + triple_map.logicalSource.query.format(**kwargs))\n    bindings = self.__get_bindings__(sparql)\n    iterator = str(triple_map.logicalSource.iterator)\n    for binding in bindings:\n        entity_dict = binding.get(iterator)\n        if isinstance(entity_dict, rdflib.term.Node):\n            entity = entity_dict\n        elif isinstance(entity_dict, dict):\n            raw_value = entity_dict.get('value')\n            if entity_dict.get('type').startswith('bnode'):\n                entity = rdflib.BNode(raw_value)\n            else:\n                entity = rdflib.URIRef(raw_value)\n        if (triple_map.subjectMap.class_ is not None):\n            output.add((entity, rdflib.RDF.type, triple_map.subjectMap.class_))\n        sparql_query = self.__construct_compound_query__(triple_map).format(**kwargs)\n        properties = self.__get_bindings__(sparql_query)\n        for pred_obj_map in triple_map.predicateObjectMap:\n            predicate = pred_obj_map.predicate\n            if (pred_obj_map.constant is not None):\n                output.add((entity, predicate, pred_obj_map.constant))\n                continue\n            if ('\n                key = str(predicate).split('\n            else:\n                key = str(predicate).split('/')[(- 1)]\n            for property_ in properties:\n                if (key in property_.keys()):\n                    info = {'about': property_.get(key)}\n                    object_ = __get_object__(info)\n                    output.add((entity, predicate, object_))", "docstring": "Method iterates through triple map's predicate object maps\nand processes query.\n\nArgs:\ntriple_map(SimpleNamespace): Triple Map", "source": "codesearchnet"}
{"code": "def pytd_type_to_value(self, typ: pytd.Type) -> abstract.BaseValue:\n    if typ not in self._cache.types:\n        self._cache.types[typ] = self._pytd_type_to_value(typ)\n    return self._cache.types[typ]", "docstring": "Converts a pytd type to an abstract value.\n\nArgs:\ntyp: The type.\n\nReturns:\nThe abstract representation of the type. For example, when passed\n`pytd.ClassType(pytd.Class(int))`, this function returns\n`abstract.SimpleClass(int)`.", "source": "github-repos"}
{"code": "def write_dict_to_new_file(file_name, localization_key_to_comment):\n    output_file_descriptor = open_strings_file(file_name, 'w')\n    for (entry_key, entry_comment) in sorted(localization_key_to_comment.iteritems(), key=operator.itemgetter(1)):\n        write_entry_to_file(output_file_descriptor, entry_comment, entry_key)\n        output_file_descriptor.write(u'\\n')\n    output_file_descriptor.close()", "docstring": "Writes dictionary of localization keys and comments to a file.\n\nArgs:\nlocalization_key_to_comment (dict): A mapping between localization keys and comments.\nfile_name (str): The path of the file to append to.", "source": "codesearchnet"}
{"code": "def set_property_filter(filter_proto, name, op, value):\n    filter_proto.Clear()\n    pf = filter_proto.property_filter\n    pf.property.name = name\n    pf.op = op\n    set_value(pf.value, value)\n    return filter_proto", "docstring": "Set property filter contraint in the given datastore.Filter proto message.\n\nArgs:\nfilter_proto: datastore.Filter proto message\nname: property name\nop: datastore.PropertyFilter.Operation\nvalue: property value\n\nReturns:\nthe same datastore.Filter.\n\nUsage:\n>>> set_property_filter(filter_proto, 'foo',\n...   datastore.PropertyFilter.EQUAL, 'a')  # WHERE 'foo' = 'a'", "source": "codesearchnet"}
{"code": "def meas_gate(self, circuit, qreg, op):\n        \n        if self.meas_fun is None:\n            pass\n        else:\n            self.meas_fun(circuit, qreg, op)", "docstring": "Add measurement gates to a circuit.\n\nArgs:\ncircuit (QuantumCircuit): circuit to add measurement to.\nqreg (tuple(QuantumRegister,int)): quantum register being measured.\nop (str): the basis label for the measurement.", "source": "juraj-google-style"}
{"code": "def add_op(self, graph_op_creation_digest):\n    if graph_op_creation_digest.op_name in self._op_by_name:\n        raise ValueError('Duplicate op name: %s (op type: %s)' % (graph_op_creation_digest.op_name, graph_op_creation_digest.op_type))\n    self._op_by_name[graph_op_creation_digest.op_name] = graph_op_creation_digest", "docstring": "Add an op creation data object.\n\nArgs:\ngraph_op_creation_digest: A GraphOpCreationDigest data object describing\nthe creation of an op inside this graph.", "source": "github-repos"}
{"code": "def __init__(self, mimeType=PandasCellMimeType):\n        \n        super(MimeData, self).__init__()\n        self._mimeType = mimeType", "docstring": "create a new MimeData object.\n\nArgs:\nmimeType (str): the mime type.", "source": "juraj-google-style"}
{"code": "def aggr(array, op, initial_value, ty):\n    weld_obj = WeldObject(encoder_, decoder_)\n    array_var = weld_obj.update(array)\n    if isinstance(array, WeldObject):\n        array_var = array.obj_id\n        weld_obj.dependencies[array_var] = array\n    weld_template = '\\n      result(\\n        for(\\n          %(array)s,\\n          merger[%(ty)s,%(op)s],\\n          |b, i, e| merge(b, e)\\n        )\\n      )\\n    '\n    weld_obj.weld_code = (weld_template % {'array': array_var, 'ty': ty, 'op': op})\n    return weld_obj", "docstring": "Computes the aggregate of elements in the array.\n\nArgs:\narray (WeldObject / Numpy.ndarray): Input array to aggregate\nop (str): Op string used to aggregate the array (+ / *)\ninitial_value (int): Initial value for aggregation\nty (WeldType): Type of each element in the input array\n\n\nReturns:\nA WeldObject representing this computation", "source": "codesearchnet"}
{"code": "def get_parents_graph(self, item_ids, language=None):\n        \n        def _parents(item_ids):\n            if item_ids is None:\n                items = Item.objects.filter(active=True).prefetch_related('parents')\n            else:\n                item_ids = [ii for iis in item_ids.values() for ii in iis]\n                items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('parents')\n            return {item.id: sorted([_item.id for _item in item.parents.all()]) for item in items}\n        return self._reachable_graph(item_ids, _parents, language=language)\n\n        if item_ids is None:\n            return self._reachable_graph(None, _parents, language=language)\n        else:\n            graph = self.get_parents_graph(None, language)\n            return self._subset_graph(graph, item_ids)", "docstring": "Get a subgraph of items reachable from the given set of items through\nthe 'parent' relation.\n\nArgs:\nitem_ids (list): items which are taken as roots for the reachability\nlanguage (str): if specified, filter out items which are not\navailable in the given language\n\nReturns:\ndict: item id -> list of items (parent items), root items are\nreferenced by None key", "source": "juraj-google-style"}
{"code": "def _CheckSignature(self, value_data):\n    \n    signature_map = self._GetDataTypeMap('uint32le')\n\n    try:\n      signature = self._ReadStructureFromByteStream(\n          value_data, 0, signature_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError(\n          'Unable to parse signature value with error: {0!s}'.format(\n              exception))\n\n    format_type = self._HEADER_SIGNATURES.get(signature, None)\n\n    if format_type == self._FORMAT_TYPE_2003:\n      \n      return self._FORMAT_TYPE_2003\n\n    if format_type == self._FORMAT_TYPE_8:\n      cached_entry_signature = value_data[signature:signature + 4]\n      if cached_entry_signature in (\n          self._CACHED_ENTRY_SIGNATURE_8_0, self._CACHED_ENTRY_SIGNATURE_8_1):\n        return self._FORMAT_TYPE_8\n\n    elif format_type == self._FORMAT_TYPE_10:\n      \n      cached_entry_signature = value_data[signature:signature + 4]\n      if cached_entry_signature == self._CACHED_ENTRY_SIGNATURE_8_1:\n        return self._FORMAT_TYPE_10\n\n    return format_type", "docstring": "Parses and validates the signature.\n\nArgs:\nvalue_data (bytes): value data.\n\nReturns:\nint: format type or None if format could not be determined.\n\nRaises:\nParseError: if the value data could not be parsed.", "source": "juraj-google-style"}
{"code": "def remove_bucket_list_item(self, id, collection, item):\n    if (type(id) is not ObjectId):\n        id = ObjectId(id)\n    obj = getattr(self.db, collection)\n    result = obj.update({'_id': id}, {'$pull': {'bucket_list': item}})\n    return result", "docstring": "Removes an item from the bucket list\n\nArgs:\nid: the CRITs object id of the TLO\ncollection: The db collection. See main class documentation.\nitem: the bucket list item to remove\nReturns:\nThe mongodb result", "source": "codesearchnet"}
{"code": "def _create_table_init_from_file_model_tf1(self, sess: session.Session) -> Tuple[core.Tensor, core.Tensor, core.Tensor]:\n    asset_dir = self.create_tempdir('assets').full_path\n    asset_file = os.path.join(asset_dir, 'vocab_file.txt')\n    content = '\\n'.join(['static', 'range', 'quantization'])\n    file_io.write_string_to_file(filename=asset_file, file_content=content)\n    init = lookup_ops.TextFileInitializer(filename=asset_file, key_dtype=dtypes.string, key_index=lookup_ops.TextFileIndex.WHOLE_LINE, value_dtype=dtypes.int64, value_index=lookup_ops.TextFileIndex.LINE_NUMBER)\n    table = lookup_ops.StaticHashTable(init, default_value=-1)\n    input_vocabs_placeholder = array_ops.placeholder(dtypes.string, shape=(None,), name='input_vocabs')\n    lookup_vals = math_ops.cast(table.lookup(input_vocabs_placeholder), dtypes.float32)\n    matmul_input = array_ops_stack.stack([lookup_vals, lookup_vals])\n    weight_row = array_ops.ones(shape=array_ops.shape(input_vocabs_placeholder), dtype=dtypes.float32)\n    weight = array_ops.transpose_v2(array_ops_stack.stack([weight_row, weight_row]))\n    output_tensor = math_ops.matmul(matmul_input, weight)\n    return (input_vocabs_placeholder, lookup_vals, output_tensor)", "docstring": "Creates a simple model that initializes a table from an asset file.\n\nThis model creates an asset file at \"vocab_file.txt\" containing\ncomma-separated vocabularies and uses it to initialize a\n`StaticVocabularyTable`. For inference, the model performs a lookup with a\n1D string tensor input vocabs.\n\nArgs:\nsess: Tensorflow Session to create the model in.\n\nReturns:\n(input_vocabs_placeholder, lookup_vals, output_tensor), where\n* input_vocabs_placeholder is a placeholder tensor of 1D strings\n* lookup_vals is an output tensor that is a direct result of table lookup\n* output_tensor is a float 2x2 matrix", "source": "github-repos"}
{"code": "def gaussian_square(times: np.ndarray, amp: complex, center: float, width: float,\n                    sigma: float, zeroed_width: Union[None, float] = None) -> np.ndarray:\n    r\n    square_start = center-width/2\n    square_stop = center+width/2\n    if zeroed_width:\n        zeroed_width = min(width, zeroed_width)\n        gauss_zeroed_width = zeroed_width-width\n    else:\n        gauss_zeroed_width = None\n\n    funclist = [functools.partial(gaussian, amp=amp, center=square_start, sigma=sigma,\n                                  zeroed_width=gauss_zeroed_width, rescale_amp=True),\n                functools.partial(gaussian, amp=amp, center=square_stop, sigma=sigma,\n                                  zeroed_width=gauss_zeroed_width, rescale_amp=True),\n                functools.partial(constant, amp=amp)]\n    condlist = [times <= square_start, times >= square_stop]\n    return np.piecewise(times.astype(np.complex_), condlist, funclist)", "docstring": "r\"\"\"Continuous gaussian square pulse.\n\nArgs:\ntimes: Times to output pulse for.\namp: Pulse amplitude.\ncenter: Center of the square pulse component.\nwidth: Width of the square pulse component.\nsigma: Width (standard deviation) of gaussian rise/fall portion of the pulse.\nzeroed_width: Subtract baseline of gaussian square pulse\nto enforce $\\OmegaSquare(center \\pm zeroed_width/2)=0$.", "source": "juraj-google-style"}
{"code": "class ChineseCLIPVisionEncoder(nn.Module):\n\n    def __init__(self, config: ChineseCLIPConfig):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([ChineseCLIPVisionLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    def forward(self, inputs_embeds, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for idx, encoder_layer in enumerate(self.layers):\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, output_attentions)\n            else:\n                layer_outputs = encoder_layer(hidden_states, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`ChineseCLIPVisionEncoderLayer`].\n\nArgs:\nconfig: ChineseCLIPConfig", "source": "github-repos"}
{"code": "def _GetArgType(arg, spec):\n    if arg in spec.annotations:\n        arg_type = spec.annotations[arg]\n        try:\n            return arg_type.__qualname__\n        except AttributeError:\n            return repr(arg_type)\n    return ''", "docstring": "Returns a string describing the type of an argument.\n\nArgs:\narg: The name of the argument.\nspec: An instance of fire.inspectutils.FullArgSpec, containing type and\ndefault information about the arguments to a callable.\nReturns:\nA string to be used in constructing the help screen for the function, the\nempty string if the argument type is not available.", "source": "github-repos"}
{"code": "def _from_dict_record(data):\n    return [Schema._get_field_entry(name, value) for (name, value) in list(data.items())]", "docstring": "Infer a BigQuery table schema from a dictionary. If the dictionary has entries that\nare in turn OrderedDicts these will be turned into RECORD types. Ideally this will\nbe an OrderedDict but it is not required.\n\nArgs:\ndata: The dict to infer a schema from.\nReturns:\nA list of dictionaries containing field 'name' and 'type' entries, suitable for use in a\nBigQuery Tables resource schema.", "source": "codesearchnet"}
{"code": "def concat(self, axis, other, **kwargs):\n        \n        return self._append_list_of_managers(other, axis, **kwargs)", "docstring": "Concatenates two objects together.\n\nArgs:\naxis: The axis index object to join (0 for columns, 1 for index).\nother: The other_index to concat with.\n\nReturns:\nConcatenated objects.", "source": "juraj-google-style"}
{"code": "def _on_disconnect(self):\n    self._logger.info('Connection to device %s was interrupted', self.connection_string)\n    self.connection_interrupted = True", "docstring": "Callback when a device is disconnected unexpectedly.\n\nArgs:\nadapter_id (int): An ID for the adapter that was connected to the device\nconnection_id (int): An ID for the connection that has become disconnected", "source": "codesearchnet"}
{"code": "def ParseRecord(self, parser_mediator, key, structure):\n    \n    if key != 'line':\n      raise errors.ParseError(\n          'Unable to parse record, unknown structure: {0:s}'.format(key))\n\n    try:\n      date_time = dfdatetime_time_elements.TimeElements(\n          time_elements_tuple=structure.date_time)\n    except ValueError:\n      parser_mediator.ProduceExtractionWarning(\n          'invalid date time value: {0!s}'.format(structure.date_time))\n      return\n\n    body_text = structure.body\n    if not body_text:\n      parser_mediator.ProduceExtractionWarning(\n          'invalid body {0:s}'.format(structure.body))\n      return\n\n    event_data = DpkgEventData()\n    event_data.body = body_text\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_ADDED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a structure of tokens derived from a line of a text file.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nkey (str): identifier of the structure of tokens.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.\n\nRaises:\nParseError: when the structure type is unknown.", "source": "juraj-google-style"}
{"code": "def _ExtractDataStream(\n      self, file_entry, data_stream_name, destination_path, output_writer,\n      skip_duplicates=True):\n    \n    if not data_stream_name and not file_entry.IsFile():\n      return\n\n    display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(\n        file_entry.path_spec)\n\n    if skip_duplicates:\n      try:\n        digest = self._CalculateDigestHash(file_entry, data_stream_name)\n      except (IOError, dfvfs_errors.BackEndError) as exception:\n        output_writer.Write((\n            '[skipping] unable to read content of file entry: {0:s} '\n            'with error: {1!s}\\n').format(display_name, exception))\n        return\n\n      if not digest:\n        output_writer.Write(\n            '[skipping] unable to read content of file entry: {0:s}\\n'.format(\n                display_name))\n        return\n\n      duplicate_display_name = self._digests.get(digest, None)\n      if duplicate_display_name:\n        output_writer.Write((\n            '[skipping] file entry: {0:s} is a duplicate of: {1:s} with '\n            'digest: {2:s}\\n').format(\n                display_name, duplicate_display_name, digest))\n        return\n\n      self._digests[digest] = display_name\n\n    target_directory, target_filename = self._CreateSanitizedDestination(\n        file_entry, file_entry.path_spec, data_stream_name, destination_path)\n\n    if not os.path.isdir(target_directory):\n      os.makedirs(target_directory)\n\n    target_path = os.path.join(target_directory, target_filename)\n\n    if os.path.exists(target_path):\n      output_writer.Write((\n          '[skipping] unable to export contents of file entry: {0:s} '\n          'because exported file: {1:s} already exists.\\n').format(\n              display_name, target_path))\n      return\n\n    try:\n      self._WriteFileEntry(file_entry, data_stream_name, target_path)\n    except (IOError, dfvfs_errors.BackEndError) as exception:\n      output_writer.Write((\n          '[skipping] unable to export contents of file entry: {0:s} '\n          'with error: {1!s}\\n').format(display_name, exception))\n\n      try:\n        os.remove(target_path)\n      except (IOError, OSError):\n        pass", "docstring": "Extracts a data stream.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry containing the data stream.\ndata_stream_name (str): name of the data stream.\ndestination_path (str): path where the extracted files should be stored.\noutput_writer (CLIOutputWriter): output writer.\nskip_duplicates (Optional[bool]): True if files with duplicate content\nshould be skipped.", "source": "juraj-google-style"}
{"code": "def __init__(self, output_mediator):\n    \n    hostname = output_mediator.GetStoredHostname()\n    if hostname:\n      logger.debug('Hostname: {0:s}'.format(hostname))\n\n    super(TimesketchOutputModule, self).__init__(output_mediator)\n    self._timeline_name = hostname\n    self._timeline_owner = None\n    self._timesketch = timesketch.create_app()", "docstring": "Initializes a Timesketch output module.\n\nArgs:\noutput_mediator (OutputMediator): mediates interactions between output\nmodules and other components, such as storage and dfvfs.", "source": "juraj-google-style"}
{"code": "def _ParseHeader(self, parser_mediator, file_object):\n    \n    header_map = self._GetDataTypeMap('cups_ipp_header')\n\n    try:\n      header, _ = self._ReadStructureFromFileObject(file_object, 0, header_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.UnableToParseFile(\n          '[{0:s}] Unable to parse header with error: {1!s}'.format(\n              self.NAME, exception))\n\n    format_version = '{0:d}.{1:d}'.format(\n        header.major_version, header.minor_version)\n    if format_version not in self._SUPPORTED_FORMAT_VERSIONS:\n      raise errors.UnableToParseFile(\n          '[{0:s}] Unsupported format version {1:s}.'.format(\n              self.NAME, format_version))\n\n    if header.operation_identifier != 5:\n      \n      display_name = parser_mediator.GetDisplayName()\n      logger.debug((\n          '[{0:s}] Non-standard operation identifier: 0x{1:08x} in file header '\n          'of: {2:s}.').format(\n              self.NAME, header.operation_identifier, display_name))", "docstring": "Parses a CUPS IPP header from a file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nUnableToParseFile: when the header cannot be parsed.", "source": "juraj-google-style"}
{"code": "def SampleMemoryUsage(self, parser_name):\n    \n    if self._memory_profiler:\n      used_memory = self._process_information.GetUsedMemory() or 0\n      self._memory_profiler.Sample(parser_name, used_memory)", "docstring": "Takes a sample of the memory usage for profiling.\n\nArgs:\nparser_name (str): name of the parser.", "source": "juraj-google-style"}
{"code": "def get_average_along_axis(self, ind):\n    m = self.data['total']\n    ng = self.dim\n    if (ind == 0):\n        total = np.sum(np.sum(m, axis=1), 1)\n    elif (ind == 1):\n        total = np.sum(np.sum(m, axis=0), 1)\n    else:\n        total = np.sum(np.sum(m, axis=0), 0)\n    return ((total / ng[((ind + 1) % 3)]) / ng[((ind + 2) % 3)])", "docstring": "Get the averaged total of the volumetric data a certain axis direction.\nFor example, useful for visualizing Hartree Potentials from a LOCPOT\nfile.\n\nArgs:\nind (int): Index of axis.\n\nReturns:\nAverage total along axis", "source": "codesearchnet"}
{"code": "def with_hot_key_fanout(self, fanout):\n    from apache_beam.transforms.combiners import curry_combine_fn\n    if fanout is None:\n        return self\n    else:\n        return _CombinePerKeyWithHotKeyFanout(curry_combine_fn(self.fn, self.args, self.kwargs), fanout)", "docstring": "A per-key combine operation like self but with two levels of aggregation.\n\nIf a given key is produced by too many upstream bundles, the final\nreduction can become a bottleneck despite partial combining being lifted\npre-GroupByKey.  In these cases it can be helpful to perform intermediate\npartial aggregations in parallel and then re-group to peform a final\n(per-key) combine.  This is also useful for high-volume keys in streaming\nwhere combiners are not generally lifted for latency reasons.\n\nNote that a fanout greater than 1 requires the data to be sent through\ntwo GroupByKeys, and a high fanout can also result in more shuffle data\ndue to less per-bundle combining. Setting the fanout for a key at 1 or less\nplaces values on the \"cold key\" path that skip the intermediate level of\naggregation.\n\nArgs:\nfanout: either None, for no fanout, an int, for a constant-degree fanout,\nor a callable mapping keys to a key-specific degree of fanout.\n\nReturns:\nA per-key combining PTransform with the specified fanout.", "source": "github-repos"}
{"code": "def _serialize(self, entity, pb, prefix='', parent_repeated=False, projection=None):\n    values = self._get_base_value_unwrapped_as_list(entity)\n    name = (prefix + self._name)\n    if (projection and (name not in projection)):\n        return\n    if self._indexed:\n        create_prop = (lambda : pb.add_property())\n    else:\n        create_prop = (lambda : pb.add_raw_property())\n    if (self._repeated and (not values) and self._write_empty_list):\n        p = create_prop()\n        p.set_name(name)\n        p.set_multiple(False)\n        p.set_meaning(entity_pb.Property.EMPTY_LIST)\n        p.mutable_value()\n    else:\n        for val in values:\n            p = create_prop()\n            p.set_name(name)\n            p.set_multiple((self._repeated or parent_repeated))\n            v = p.mutable_value()\n            if (val is not None):\n                self._db_set_value(v, p, val)\n                if projection:\n                    new_p = entity_pb.Property()\n                    new_p.set_name(p.name())\n                    new_p.set_meaning(entity_pb.Property.INDEX_VALUE)\n                    new_p.set_multiple(False)\n                    new_p.mutable_value().CopyFrom(v)\n                    p.CopyFrom(new_p)", "docstring": "Internal helper to serialize this property to a protocol buffer.\n\nSubclasses may override this method.\n\nArgs:\nentity: The entity, a Model (subclass) instance.\npb: The protocol buffer, an EntityProto instance.\nprefix: Optional name prefix used for StructuredProperty\n(if present, must end in '.').\nparent_repeated: True if the parent (or an earlier ancestor)\nis a repeated Property.\nprojection: A list or tuple of strings representing the projection for\nthe model instance, or None if the instance is not a projection.", "source": "codesearchnet"}
{"code": "def set_working_directory(working_directory):\n    logger.debug('starting')\n    logger.debug(f'adding {working_directory} to sys.paths')\n    sys.path.append(working_directory)\n    logger.debug('done')", "docstring": "Add working_directory to sys.paths.\n\nThis allows dynamic loading of arbitrary python modules in cwd.\n\nArgs:\nworking_directory: string. path to add to sys.paths", "source": "codesearchnet"}
{"code": "def visualize(G, settings, filename=\"dependencies\", no_graphviz=False):\n    \n    error = settings[\"error\"]\n    if no_graphviz:\n        write_dot_file(G, filename)\n        return 0\n    write_dot_file(G, \"tempdot\")\n    renderer = \"svg\"\n    if re.search(\"\\.jpg$\", filename, re.IGNORECASE):\n        renderer = \"jpg\"\n    elif re.search(\"\\.jpeg$\", filename, re.IGNORECASE):\n        renderer = \"jpg\"\n    elif re.search(\"\\.svg$\", filename, re.IGNORECASE):\n        renderer = \"svg\"\n    elif re.search(\"\\.png$\", filename, re.IGNORECASE):\n        renderer = \"png\"\n    elif re.search(\"\\.gif$\", filename, re.IGNORECASE):\n        renderer = \"gif\"\n    elif re.search(\"\\.ps$\", filename, re.IGNORECASE):\n        renderer = \"ps\"\n    elif re.search(\"\\.pdf$\", filename, re.IGNORECASE):\n        renderer = \"pdf\"\n    else:\n        renderer = \"svg\"\n        filename += \".svg\"\n    command = \"dot -T{} tempdot -o {}\".format(renderer, filename)\n    p = Popen(command, shell=True)\n    p.communicate()\n    if p.returncode:\n        errmes = \"Either graphviz is not installed, or its not on PATH\"\n        os.remove(\"tempdot\")\n        error(errmes)\n        sys.exit(1)\n    os.remove(\"tempdot\")\n    return 0", "docstring": "Uses networkX to draw a graphviz dot file either (a) calls the\ngraphviz command \"dot\" to turn it into a SVG and remove the\ndotfile (default), or (b) if no_graphviz is True, just output\nthe graphviz dot file\n\nArgs:\na NetworkX DiGraph\nthe settings dictionary\na filename (a default is provided\na flag indicating whether graphviz should *not* be called\n\nReturns:\n0 if everything worked\nwill cause fatal error on failure", "source": "juraj-google-style"}
{"code": "def export_as_code(self, cv_source):\n    rand_value = ''.join((random.choice((string.ascii_uppercase + string.digits)) for _ in range(25)))\n    base_learner_code = ''\n    base_learner_code += 'base_learner_list_{} = []\\n'.format(rand_value)\n    base_learner_code += 'meta_feature_generators_list_{} = []\\n\\n'.format(rand_value)\n    for (idx, base_learner) in enumerate(self.base_learners):\n        base_learner_code += '\n        base_learner_code += '\n        base_learner_code += '\n        base_learner_code += base_learner.base_learner_origin.source\n        base_learner_code += '\\n\\n'\n        base_learner_code += 'base_learner.set_params(**{})\\n'.format(base_learner.hyperparameters)\n        base_learner_code += 'base_learner_list_{}.append(base_learner)\\n'.format(rand_value)\n        base_learner_code += 'meta_feature_generators_list_{}.append(\"{}\")\\n'.format(rand_value, base_learner.base_learner_origin.meta_feature_generator)\n        base_learner_code += '\\n\\n'\n    base_learner_code += '\n    base_learner_code += '\n    base_learner_code += '\n    base_learner_code += self.base_learner_origin.source\n    base_learner_code += '\\n\\n'\n    base_learner_code += 'base_learner.set_params(**{})\\n'.format(self.secondary_learner_hyperparameters)\n    base_learner_code += 'secondary_learner_{} = base_learner\\n'.format(rand_value)\n    base_learner_code += '\\n\\n'\n    base_learner_code += '\n    base_learner_code += '\n    base_learner_code += '\n    base_learner_code += cv_source\n    base_learner_code += '\\n\\n'\n    base_learner_code += '\n    base_learner_code += '\n    base_learner_code += '\n    stacker_file_loc = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'stacker.py')\n    with open(stacker_file_loc) as f2:\n        base_learner_code += f2.read()\n    base_learner_code += '\\n\\n    def {}(self, X):\\n        return self._process_using_meta_feature_generator(X, \"{}\")\\n\\n'.format(self.base_learner_origin.meta_feature_generator, self.base_learner_origin.meta_feature_generator)\n    base_learner_code += '\\n\\n'\n    base_learner_code += 'base_learner = XcessivStackedEnsemble(base_learners=base_learner_list_{}, meta_feature_generators=meta_feature_generators_list_{}, secondary_learner=secondary_learner_{}, cv_function=return_splits_iterable)\\n'.format(rand_value, rand_value, rand_value)\n    return base_learner_code", "docstring": "Returns a string value that contains the Python code for the ensemble\n\nArgs:\ncv_source (str, unicode): String containing actual code for base learner\ncross-validation used to generate secondary meta-features.\n\nReturns:\nbase_learner_code (str, unicode): String that can be used as Python code", "source": "codesearchnet"}
{"code": "def writeTable(self, tableName):\n        \n        lock_and_call(\n            lambda: self._impl.writeTable(tableName),\n            self._lock\n        )", "docstring": "Write the table corresponding to the specified name, equivalent to the\nAMPL statement\n\n.. code-block:: ampl\n\nwrite table tableName;\n\nArgs:\ntableName: Name of the table to be written.", "source": "juraj-google-style"}
{"code": "def _add_variable_proxy_methods(var, proxy_tensor):\n  \n  proxy_tensor.read_value = lambda: tf.identity(proxy_tensor)\n  proxy_tensor.assign_sub = var.assign_sub\n  proxy_tensor.assign = var.assign\n  proxy_tensor.initialized_value = var.initialized_value", "docstring": "Proxy methods of underlying variable.\n\nThis enables our custom getters to still work with, e.g., batch norm.\n\nArgs:\nvar: Variable to proxy\nproxy_tensor: Tensor that is identity of var", "source": "juraj-google-style"}
{"code": "def events(config):\n    celery_app = create_app(config)\n    for event in event_stream(celery_app, filter_by_prefix='task'):\n        try:\n            (yield create_event_model(event))\n        except JobEventTypeUnsupported:\n            pass", "docstring": "Return a generator that yields workflow events.\n\nFor every workflow event that is sent from celery this generator yields an event\nobject.\n\nArgs:\nconfig (Config): Reference to the configuration object from which the\nsettings are retrieved.\n\nReturns:\ngenerator: A generator that returns workflow events.", "source": "codesearchnet"}
{"code": "def sum(x, axis=None, keepdims=False):\n    from .function_bases import sum as sum_base\n    if (axis is None):\n        axis = range(x.ndim)\n    elif (not hasattr(axis, '__iter__')):\n        axis = [axis]\n    return sum_base(x, axis, keepdims)", "docstring": "Reduction along axes with sum operation.\n\nArgs:\nx (Variable): An input variable.\naxis (None, int or tuple of ints): Axis or axes along which the sum is\ncalculated. Passing the default value `None` will reduce all dimensions.\nkeepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.\n\nReturns:\n~nnabla.Variable: N-D array.", "source": "codesearchnet"}
{"code": "def matches(self, address, name=None):\n    if self.controller:\n        return (address == 8)\n    return (self.address == address)", "docstring": "Check if this slot identifier matches the given tile.\n\nMatching can happen either by address or by module name (not currently implemented).\n\nReturns:\nbool: True if there is a match, otherwise False.", "source": "codesearchnet"}
{"code": "def create_nanopubs_fh(output_fn: str):\n    \n\n    \n    \n    json_flag, jsonl_flag, yaml_flag = False, False, False\n    if output_fn:\n        if re.search(\"gz$\", output_fn):\n            out_fh = gzip.open(output_fn, \"wt\")\n        else:\n            out_fh = click.open_file(output_fn, mode=\"wt\")\n\n        if re.search(\"ya?ml\", output_fn):\n            yaml_flag = True\n        elif \"jsonl\" in output_fn or \"-\" == output_fn:\n            jsonl_flag = True\n        elif \"json\" in output_fn:\n            json_flag = True\n\n    else:\n        out_fh = sys.stdout\n\n    return (out_fh, yaml_flag, jsonl_flag, json_flag)", "docstring": "Create Nanopubs output filehandle\n\n\\b\nIf output fn is '-' will write JSONlines to STDOUT\nIf output fn has *.gz, will written as a gzip file\nIf output fn has *.jsonl*, will written as a JSONLines file\nIF output fn has *.json*, will be written as a JSON file\nIf output fn has *.yaml* or *.yml*,  will be written as a YAML file\n\nArgs:\noutput_fn: Name of output file\n\nReturns:\n(filehandle, yaml_flag, jsonl_flag, json_flag)", "source": "juraj-google-style"}
{"code": "def emit(self, record):\n    record.task = self.cur_task\n    if ((record.levelno >= self.dump_level) and self.cur_task):\n        self.tasks[self.cur_task].failed = True\n        self.tasks[self.cur_task].force_show = True\n    is_start = START_TASK_REG.match(str(record.msg))\n    if is_start:\n        self.handle_new_task(is_start.groupdict()['task_name'], record)\n        return\n    is_end = END_TASK_REG.match(str(record.msg))\n    if is_end:\n        self.handle_closed_task(is_end.groupdict()['task_name'], record)\n        return\n    force_show_record = ALWAYS_SHOW_REG.match(str(record.msg))\n    if force_show_record:\n        record.msg = force_show_record.groupdict()['message']\n        self.pretty_emit(record)\n    if ((not force_show_record) and self.should_show_by_level(record) and self.should_show_by_depth()):\n        self.pretty_emit(record)\n        return\n    if self.cur_task:\n        self.tasks[self.cur_task].append(record)", "docstring": "Handle the given record, this is the entry point from the python\nlogging facility\n\nParams:\nrecord (logging.LogRecord): log record to handle\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def attach_bytes(key, the_bytes):\n  \n  tf_v1.add_to_collection(\n      _ATTACHMENT_COLLECTION_INTERNAL,\n      module_attachment_pb2.ModuleAttachment(key=key, value=the_bytes))", "docstring": "Adds a ModuleAttachment to the current graph.\n\nArgs:\nkey: A string with the unique key of the attachment.\nthe_bytes: A bytes object with the serialized attachment.", "source": "juraj-google-style"}
{"code": "def delete(self, url, params=None, **kwargs):\n    return self.call_api('DELETE', url, params=params, **kwargs)", "docstring": "Call the API with a DELETE request.\n\nArgs:\nurl (str): Resource location relative to the base URL.\nparams (dict or None): Query-string parameters.\n\nReturns:\nResultParser or ErrorParser.", "source": "codesearchnet"}
{"code": "def fetch_credential(self, credential=None, profile=None):\n    q = self.db.get((self.query.profile == profile))\n    if (q is not None):\n        return q.get(credential)", "docstring": "Fetch credential from credentials file.\n\nArgs:\ncredential (str): Credential to fetch.\nprofile (str): Credentials profile. Defaults to ``'default'``.\n\nReturns:\nstr, None: Fetched credential or ``None``.", "source": "codesearchnet"}
{"code": "def resize_video(in_file,\n                 out_file,\n                 size=None,\n                 ratio=None,\n                 keep_ar=False,\n                 log_level='info',\n                 print_cmd=False,\n                 **kwargs):\n    \n    if size is None and ratio is None:\n        raise ValueError('expected size or ratio must be specified')\n    elif size is not None and ratio is not None:\n        raise ValueError('size and ratio cannot be specified at the same time')\n    options = {'log_level': log_level}\n    if size:\n        if not keep_ar:\n            options['vf'] = 'scale={}:{}'.format(size[0], size[1])\n        else:\n            options['vf'] = ('scale=w={}:h={}:force_original_aspect_ratio'\n                             '=decrease'.format(size[0], size[1]))\n    else:\n        if not isinstance(ratio, tuple):\n            ratio = (ratio, ratio)\n        options['vf'] = 'scale=\"trunc(iw*{}):trunc(ih*{})\"'.format(\n            ratio[0], ratio[1])\n    convert_video(in_file, out_file, print_cmd, **options)", "docstring": "Resize a video.\n\nArgs:\nin_file (str): Input video filename.\nout_file (str): Output video filename.\nsize (tuple): Expected size (w, h), eg, (320, 240) or (320, -1).\nratio (tuple or float): Expected resize ratio, (2, 0.5) means\n(w*2, h*0.5).\nkeep_ar (bool): Whether to keep original aspect ratio.\nlog_level (str): Logging level of ffmpeg.\nprint_cmd (bool): Whether to print the final ffmpeg command.", "source": "juraj-google-style"}
{"code": "def _generate_image_and_label_batch(image, label, min_queue_examples, batch_size, shuffle):\n    num_preprocess_threads = 16\n    if shuffle:\n        (images, label_batch) = tf.train.shuffle_batch([image, label], batch_size=batch_size, num_threads=num_preprocess_threads, capacity=(min_queue_examples + (3 * batch_size)), min_after_dequeue=min_queue_examples)\n    else:\n        (images, label_batch) = tf.train.batch([image, label], batch_size=batch_size, num_threads=num_preprocess_threads, capacity=(min_queue_examples + (3 * batch_size)))\n    tf.summary.image('images', images)\n    return (images, tf.reshape(label_batch, [batch_size]))", "docstring": "Construct a queued batch of images and labels.\n\nArgs:\nimage: 3-D Tensor of [height, width, 3] of type.float32.\nlabel: 1-D Tensor of type.int32\nmin_queue_examples: int32, minimum number of samples to retain\nin the queue that provides of batches of examples.\nbatch_size: Number of images per batch.\nshuffle: boolean indicating whether to use a shuffling queue.\n\nReturns:\nimages: Images. 4D tensor of [batch_size, height, width, 3] size.\nlabels: Labels. 1D tensor of [batch_size] size.", "source": "codesearchnet"}
{"code": "def __init__(self, scope, parent, name):\n        \n        CodeStatement.__init__(self, scope, parent)\n        self.name = name\n        self.value = None", "docstring": "Constructor for jump statements.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.\nname (str): The name of the statement in the program.", "source": "juraj-google-style"}
{"code": "def get_group(self, group_id):\n    group = self.group_id_map.get(group_id)\n    if group:\n        return group\n    self.logger.error(('Group ID \"%s\" is not in datafile.' % group_id))\n    self.error_handler.handle_error(exceptions.InvalidGroupException(enums.Errors.INVALID_GROUP_ID_ERROR))\n    return None", "docstring": "Get group for the provided group ID.\n\nArgs:\ngroup_id: Group ID for which group is to be determined.\n\nReturns:\nGroup corresponding to the provided group ID.", "source": "codesearchnet"}
{"code": "def NotEqualTo(self, value):\n    \n    self._awql = self._CreateSingleValueCondition(value, '!=')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"not equal to\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "juraj-google-style"}
{"code": "def instantiate(self, input_types):\n    key = _type_list_to_str(input_types)\n    defined = self._overload.get(key)\n    if not defined:\n        name = self._func_name\n        if name is not None:\n            name = '_'.join([name, key])\n        defined = _DefinedFunction(self._func, self._argnames, input_types, name, None, self._python_grad_func, out_names=self._out_names, **self._extra_kwargs)\n        _ = defined.name\n        if self._grad_func:\n            output_types = [dtypes.DType(_.type) for _ in defined._signature.output_arg]\n            defined._grad_func = self._grad_func.instantiate(input_types + output_types)\n        self._overload[key] = defined\n    return defined", "docstring": "Instantiate this function given input argument types.\n\nArgs:\ninput_types: A list of data types for the inputs.\n\nReturns:\n_DefinedFunction for the given input types.", "source": "github-repos"}
{"code": "def broadcast_dimension(self, axis, lengths):\n    lengths = ragged_util.convert_to_int_tensor(lengths, name='lengths', dtype=self.dim_size_dtype)\n    if lengths.shape.ndims is None:\n        raise ValueError('lengths must have a known rank.')\n    elif lengths.shape.ndims > 1:\n        raise ValueError('lengths must be a scalar or vector')\n    else:\n        lengths_is_scalar = lengths.shape.ndims == 0\n    if self.is_ragged(axis):\n        if lengths_is_scalar:\n            condition = math_ops.equal(lengths, 1)\n        else:\n            condition = math_ops.reduce_all(math_ops.equal(lengths, self.dimension_size(axis)))\n    else:\n        axis_dim_size = self.dimension_size(axis)\n        if lengths_is_scalar:\n            condition = math_ops.equal(lengths, 1) | math_ops.equal(axis_dim_size, 1) | math_ops.equal(axis_dim_size, lengths)\n        else:\n            condition = math_ops.equal(axis_dim_size, 1)\n    broadcast_err = ['Unable to broadcast: dimension size mismatch in dimension', axis, 'lengths=', lengths, 'dim_size=', self.dimension_size(axis)]\n    broadcast_check = control_flow_assert.Assert(condition, data=broadcast_err, summarize=10)\n    with ops.control_dependencies([broadcast_check]):\n        if axis < self.num_partitioned_dimensions:\n            if self.is_ragged(axis):\n                return RaggedTensorDynamicShape(self._partitioned_dim_sizes, array_ops.identity(self.inner_dim_sizes), self.dim_size_dtype)\n            else:\n                return self._broadcast_uniform_partitioned_dimension(axis, lengths)\n        elif lengths_is_scalar:\n            return self._broadcast_inner_dimension_to_uniform(axis, lengths)\n        else:\n            if axis == 0:\n                raise ValueError('Unable to broadcast: outermost dimension must be uniform.')\n            return self._broadcast_inner_dimension_to_ragged(axis, lengths)", "docstring": "Returns a shape that is broadcast-compatible with self & lengths.\n\n* If dimension[axis] is uniform and lengths is a scalar, the check\nthat either lengths==1 or axis==1 or lengths==axis, and tile\ndimension[axis] with tf.where(lengths==axis, 1, axis) repeats.\n\n* If dimension[axis] is uniform and lengths is a vector, then check\nthat dimension[axis]==1, and raggedly tile dimension[axis] with\nlengths repeats.  (we can skip tiling if we statically know that\nslice_lengths == 1??)\n\n* If dimension[axis] is ragged and lengths is a scalar, then check\nthat lengths==1.\n\n* If dimension[axis] is ragged and lengths is a vector, then check\nthat self.dimension_size(axis) == lengths.\n\nArgs:\naxis: `int`.  The dimension to broadcast.\nlengths: 0-D or 1-D integer `Tensor`.\n\nReturns:\nA `RaggedTensorDynamicShape`.", "source": "github-repos"}
{"code": "def write_info_file(resource, path, dataset_name, original_fname):\n    info_path = _get_info_path(path)\n    info = (_read_info(info_path) or {})\n    urls = set((info.get('urls', []) + [resource.url]))\n    dataset_names = info.get('dataset_names', [])\n    if dataset_name:\n        dataset_names.append(dataset_name)\n    if (('original_fname' in info) and (info['original_fname'] != original_fname)):\n        raise AssertionError(('`original_fname` \"%s\" stored in %s does NOT match \"%s\".' % (info['original_fname'], info_path, original_fname)))\n    info = dict(urls=list(urls), dataset_names=list(set(dataset_names)), original_fname=original_fname)\n    with py_utils.atomic_write(info_path, 'w') as info_f:\n        json.dump(info, info_f, sort_keys=True)", "docstring": "Write the INFO file next to local file.\n\nAlthough the method is synchronized, there is still a risk two processes\nrunning at the same time overlap here. Risk accepted, since potentially lost\ndata (`dataset_name`) is only for human consumption.\n\nArgs:\nresource: resource for which to write the INFO file.\npath: path of downloaded file.\ndataset_name: data used to dl the file.\noriginal_fname: name of file as downloaded.", "source": "codesearchnet"}
{"code": "def _einsum_helper(input_shapes, output_shape, mesh_impl):\n  \n  input_shape_union = _shape_union(input_shapes)\n  total_num_dims = input_shape_union.ndims\n  \n  full_shapes = [\n      s for s in input_shapes + [output_shape] if s.ndims == total_num_dims]\n  full_shape = full_shapes[0] if full_shapes else input_shape_union\n  reduce_slice_fn, reduced_mesh_axes = _reduce_helper(\n      full_shape, output_shape, mesh_impl.tensor_layout(full_shape))\n  def einsum_slice_fn_naive(*slices):\n    \n    \n    return reduce_slice_fn(functools.reduce(tf.multiply, [\n        _expand_dims(x, input_shape, full_shape)\n        for x, input_shape in zip(slices, input_shapes)]))\n  if full_shapes:\n    \n    \n    einsum_slice_fn = einsum_slice_fn_naive\n  else:\n    \n    equation = _einsum_equation(input_shapes, output_shape)\n    def einsum_slice_fn(*slices):\n      if slices[0].dtype.is_floating:\n        return mesh_impl.einsum(equation, *slices)\n      else:\n        return einsum_slice_fn_naive(*slices)\n  return einsum_slice_fn, reduced_mesh_axes", "docstring": "Returns slicewise function and reduced mesh dimensions.\n\nAssumes the output shape contains no new dimensions.\n\nArgs:\ninput_shapes: a list of Shapes\noutput_shape: a Shape\nmesh_impl: a MeshImpl\nReturns:\neinsum_slice_fn: a function from tf.Tensors to tf.Tensor\nreduced_mesh_axes: a list of integers", "source": "juraj-google-style"}
{"code": "def decode(self, tx):\n        \n        if not isinstance(self._service, BitcoinBlockrService):\n            raise NotImplementedError('Currently only supported for \"blockr.io\"')\n        return self._service.decode(tx)", "docstring": "Decodes the given transaction.\n\nArgs:\ntx: hex of transaction\nReturns:\ndecoded transaction\n\n.. note:: Only supported for blockr.io at the moment.", "source": "juraj-google-style"}
{"code": "def _bitResponseToValue(bytestring):\n    \n    _checkString(bytestring, description='bytestring', minlength=1, maxlength=1)\n\n    RESPONSE_ON  = '\\x01'\n    RESPONSE_OFF = '\\x00'\n\n    if bytestring == RESPONSE_ON:\n        return 1\n    elif bytestring == RESPONSE_OFF:\n        return 0\n    else:\n        raise ValueError('Could not convert bit response to a value. Input: {0!r}'.format(bytestring))", "docstring": "Convert a response string to a numerical value.\n\nArgs:\nbytestring (str): A string of length 1. Can be for example ``\\\\x01``.\n\nReturns:\nThe converted value (int).\n\nRaises:\nTypeError, ValueError", "source": "juraj-google-style"}
{"code": "def FetchBlobsForSignedBinary(\n    binary_urn,\n    token = None\n):\n  \n  if _ShouldUseLegacyDatastore():\n    try:\n      aff4_stream = aff4.FACTORY.Open(\n          binary_urn, aff4_type=collects.GRRSignedBlob, mode=\"r\", token=token)\n    except aff4.InstantiationError:\n      raise SignedBinaryNotFoundError(binary_urn)\n    timestamp = aff4_stream.Get(aff4_stream.Schema.TYPE).age\n    return (blob for blob in aff4_stream), timestamp\n  else:\n    try:\n      references, timestamp = data_store.REL_DB.ReadSignedBinaryReferences(\n          _SignedBinaryIDFromURN(binary_urn))\n    except db.UnknownSignedBinaryError:\n      raise SignedBinaryNotFoundError(binary_urn)\n    blob_ids = [r.blob_id for r in references.items]\n    raw_blobs = (data_store.BLOBS.ReadBlob(blob_id) for blob_id in blob_ids)\n    blobs = (\n        rdf_crypto.SignedBlob.FromSerializedString(raw_blob)\n        for raw_blob in raw_blobs)\n    return blobs, timestamp", "docstring": "Retrieves blobs for the given binary from the datastore.\n\nArgs:\nbinary_urn: RDFURN that uniquely identifies the binary.\ntoken: ACL token to use with the legacy (non-relational) datastore.\n\nReturns:\nA tuple containing an iterator for all the binary's blobs and an\nRDFDatetime representing when the binary's contents were saved\nto the datastore.\n\nRaises:\nSignedBinaryNotFoundError: If no signed binary with the given URN exists.", "source": "juraj-google-style"}
{"code": "def minimize(self, time, variables, **kwargs):\n        \n        loss = kwargs[\"fn_loss\"]\n        sampled_loss = kwargs[\"sampled_loss\"]\n\n        min_op, _ = self.minimize_(loss, sampled_loss, var_list=variables)\n        return min_op", "docstring": "Performs an optimization step.\n\nArgs:\ntime: Time tensor. Not used for this\nvariables: List of variables to optimize.\n**kwargs:\nfn_loss : loss function tensor that is differentiated\nsampled_loss : the sampled loss from running the model.\n\nReturns:\nThe optimization operation.", "source": "juraj-google-style"}
{"code": "def reraise_modify(caught_exc, append_msg, prepend=False):\n    ExceptClass = type(caught_exc)\n    traceback = sys.exc_info()[2]\n    if (not caught_exc.args):\n        arg_list = [append_msg]\n    else:\n        arg_list = list(caught_exc.args[:(- 1)])\n        last_arg = caught_exc.args[(- 1)]\n        if isinstance(last_arg, str):\n            if prepend:\n                arg_list.append((append_msg + last_arg))\n            else:\n                arg_list.append((last_arg + append_msg))\n        else:\n            arg_list += [last_arg, append_msg]\n    caught_exc.args = tuple(arg_list)\n    six.reraise(ExceptClass, caught_exc, traceback)", "docstring": "Append message to exception while preserving attributes.\n\nPreserves exception class, and exception traceback.\n\nNote:\nThis function needs to be called inside an except because\n`sys.exc_info()` requires the exception context.\n\nArgs:\ncaught_exc(Exception): The caught exception object\nappend_msg(str): The message to append to the caught exception\nprepend(bool): If True prepend the message to args instead of appending\n\nReturns:\nNone\n\nSide Effects:\nRe-raises the exception with the preserved data / trace but\nmodified message", "source": "codesearchnet"}
{"code": "def __init__(self, name, filterString='', dataFrame=pd.DataFrame()):\n        \n        self._filterString = filterString\n        self._dataFrame = dataFrame\n        self.name = name", "docstring": "Constructs a `DataSearch` object from the given attributes.\n\nArgs:\nname (str): The name of the filter.\nfilterString (str, optional): A python expression as string.\nDefaults to an empty string.\ndataFrame (pandas.DataFrame, optional): The object to filter.\nDefaults to an empty `DataFrame`.", "source": "juraj-google-style"}
{"code": "def truncated_normal_ll_gradient(params, low, high, data):\n    if (params[1] == 0):\n        return np.array([np.inf, np.inf])\n    return np.array([_TruncatedNormalFitter.partial_derivative_mu(params[0], params[1], low, high, data), _TruncatedNormalFitter.partial_derivative_sigma(params[0], params[1], low, high, data)])", "docstring": "Return the gradient of the log likelihood of the truncated normal at the given position.\n\nArgs:\nparams: tuple with (mean, std), the parameters under which we evaluate the model\nlow (float): the lower truncation bound\nhigh (float): the upper truncation bound\ndata (ndarray): the one dimension list of data points for which we want to calculate the likelihood\n\nReturns:\ntuple: the gradient of the log likelihood given as a tuple with (mean, std)", "source": "codesearchnet"}
{"code": "def handle_no_document(self, item_session: ItemSession) -> Actions:\n    self._waiter.reset()\n    action = self.handle_response(item_session)\n    if (action == Actions.NORMAL):\n        item_session.set_status(Status.skipped)\n    return action", "docstring": "Callback for successful responses containing no useful document.\n\nReturns:\nA value from :class:`.hook.Actions`.", "source": "codesearchnet"}
{"code": "def tf_solve(self, fn_x, x_init, b):\n        \n        return super(ConjugateGradient, self).tf_solve(fn_x, x_init, b)", "docstring": "Iteratively solves the system of linear equations $A x = b$.\n\nArgs:\nfn_x: A callable returning the left-hand side $A x$ of the system of linear equations.\nx_init: Initial solution guess $x_0$, zero vector if None.\nb: The right-hand side $b$ of the system of linear equations.\n\nReturns:\nA solution $x$ to the problem as given by the solver.", "source": "juraj-google-style"}
{"code": "def when(self, key):\n    ctx = Context(key, self)\n    self.context.append(ctx)\n    return ctx", "docstring": "Specify context, i.e. condition that must be met.\n\nArguments:\nkey (str): Name of the context whose value you want to query.\nReturns:\nContext:", "source": "codesearchnet"}
{"code": "def _serve_plugins_listing(self, request):\n    \n    response = {}\n    for plugin in self._plugins:\n      start = time.time()\n      response[plugin.plugin_name] = plugin.is_active()\n      elapsed = time.time() - start\n      logger.info(\n          'Plugin listing: is_active() for %s took %0.3f seconds',\n          plugin.plugin_name, elapsed)\n    return http_util.Respond(request, response, 'application/json')", "docstring": "Serves an object mapping plugin name to whether it is enabled.\n\nArgs:\nrequest: The werkzeug.Request object.\n\nReturns:\nA werkzeug.Response object.", "source": "juraj-google-style"}
{"code": "def _bond_option_variance(model, option_expiry, bond_maturity):\n    if model._sample_with_generic:\n        raise ValueError('The paramerization of `mean_reversion` and/or `volatility` does not support analytic computation of bond option variance.')\n    mean_reversion = model.mean_reversion(option_expiry)\n    volatility = model.volatility(option_expiry)\n    var_between_vol_knots = model._variance_int(model._padded_knots, model._jump_locations, model._jump_values_vol, model._jump_values_mr)[0]\n    varx_at_vol_knots = tf.concat([tf.zeros([1], dtype=var_between_vol_knots.dtype), utils.cumsum_using_matvec(var_between_vol_knots)], axis=-1)\n    time_index = tf.searchsorted(model._jump_locations[0], option_expiry)\n    vn = tf.concat([model._zero_padding, model._jump_locations], axis=-1)\n    var_expiry = model._variance_int(tf.gather(vn, time_index, axis=-1), option_expiry, volatility, mean_reversion)[0]\n    var_expiry = var_expiry + tf.gather(varx_at_vol_knots, time_index)\n    var_expiry = var_expiry * (tf.math.exp(-mean_reversion * option_expiry) - tf.math.exp(-mean_reversion * bond_maturity)) ** 2 / mean_reversion ** 2\n    return var_expiry", "docstring": "Computes black equivalent variance for bond options.\n\nBlack equivalent variance is defined as the variance to use in the Black\nformula to obtain the model implied price of European bond options.\n\nArgs:\nmodel: An instance of `VectorHullWhiteModel`.\noption_expiry: A rank 1 `Tensor` of real dtype specifying the time to\nexpiry of each option.\nbond_maturity: A rank 1 `Tensor` of real dtype specifying the time to\nmaturity of underlying zero coupon bonds.\n\nReturns:\nA rank 1 `Tensor` of same dtype and shape as the inputs with computed\nBlack-equivalent variance for the underlying options.", "source": "github-repos"}
{"code": "def run(self, *args, **kwargs):\n        \n        accounts = list(AWSAccount.get_all(include_disabled=False).values())\n        self.manage_policies(accounts)", "docstring": "Iterate through all AWS accounts and apply roles and policies from Github\n\nArgs:\n*args: Optional list of arguments\n**kwargs: Optional list of keyword arguments\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def traverse_data(obj, use_numpy=True, buffers=None):\n    \n    if use_numpy and all(isinstance(el, np.ndarray) for el in obj):\n        return [transform_array(el, buffers=buffers) for el in obj]\n    obj_copy = []\n    for item in obj:\n        \n        \n        if type(item) is float:\n            if math.isnan(item):\n                item = 'NaN'\n            elif math.isinf(item):\n                if item > 0:\n                    item = 'Infinity'\n                else:\n                    item = '-Infinity'\n            obj_copy.append(item)\n        elif isinstance(item, (list, tuple)):  \n            obj_copy.append(traverse_data(item))\n        else:\n            obj_copy.append(item)\n    return obj_copy", "docstring": "Recursively traverse an object until a flat list is found.\n\nIf NumPy is available, the flat list is converted to a numpy array\nand passed to transform_array() to handle ``nan``, ``inf``, and\n``-inf``.\n\nOtherwise, iterate through all items, converting non-JSON items\n\nArgs:\nobj (list) : a list of values or lists\nuse_numpy (bool, optional) toggle NumPy as a dependency for testing\nThis argument is only useful for testing (default: True)", "source": "juraj-google-style"}
{"code": "def handle_incoming_message(self, msg):\n    if (msg.type == MessageType.START_JOB):\n        job = msg.message['job']\n        self.schedule_job(job)\n    elif (msg.type == MessageType.CANCEL_JOB):\n        job_id = msg.message['job_id']\n        self.cancel(job_id)", "docstring": "Start or cancel a job, based on the msg.\n\nIf msg.type == MessageType.START_JOB, then start the job given by msg.job.\n\nIf msg.type == MessageType.CANCEL_JOB, then try to cancel the job given by msg.job.job_id.\n\nArgs:\nmsg (barbequeue.messaging.classes.Message):\n\nReturns: None", "source": "codesearchnet"}
{"code": "def __init__(self, ctx):\n    member_map = collections_overlay.copy()\n    ast = ctx.loader.import_name('collections')\n    super().__init__(ctx, 'collections', member_map, ast)", "docstring": "Initializes the CollectionsOverlay.\n\nThis function loads the AST for the collections module, which is used to\naccess type information for any members that are not explicitly provided by\nthe overlay. See get_attribute in attribute.py for how it's used.\n\nArgs:\nctx: An instance of context.Context.", "source": "github-repos"}
{"code": "def get_assistants_from_file_hierarchy(cls, file_hierarchy, superassistant, role=settings.DEFAULT_ASSISTANT_ROLE):\n    result = []\n    warn_msg = 'Failed to load assistant {source}, skipping subassistants.'\n    for (name, attrs) in file_hierarchy.items():\n        loaded_yaml = yaml_loader.YamlLoader.load_yaml_by_path(attrs['source'])\n        if (loaded_yaml is None):\n            logger.warning(warn_msg.format(source=attrs['source']))\n            continue\n        try:\n            ass = cls.assistant_from_yaml(attrs['source'], loaded_yaml, superassistant, role=role)\n        except exceptions.YamlError as e:\n            logger.warning(e)\n            continue\n        ass._subassistants = cls.get_assistants_from_file_hierarchy(attrs['subhierarchy'], ass, role=role)\n        result.append(ass)\n    return result", "docstring": "Accepts file_hierarch as returned by cls.get_assistant_file_hierarchy and returns\ninstances of YamlAssistant for loaded files\n\nArgs:\nfile_hierarchy: structure as described in cls.get_assistants_file_hierarchy\nrole: role of all assistants in this hierarchy (we could find\nthis out dynamically but it's not worth the pain)\nReturns:\nlist of top level assistants from given hierarchy; these assistants contain\nreferences to instances of their subassistants (and their subassistants, ...)", "source": "codesearchnet"}
{"code": "def _get_cl_dependency_code(self):\n    code = ''\n    for d in self._dependencies:\n        code += (d.get_cl_code() + '\\n')\n    return code", "docstring": "Get the CL code for all the CL code for all the dependencies.\n\nReturns:\nstr: The CL code with the actual code.", "source": "codesearchnet"}
{"code": "def _isbn_cleanse(isbn, checksum=True):\n    if (not isinstance(isbn, string_types)):\n        raise TypeError(('ISBN must be a string, received %r' % isbn))\n    if (PY2 and isinstance(isbn, str)):\n        isbn = unicode(isbn)\n        uni_input = False\n    else:\n        uni_input = True\n    for dash in DASHES:\n        isbn = isbn.replace(dash, unicode())\n    if checksum:\n        if (not isbn[:(- 1)].isdigit()):\n            raise IsbnError('non-digit parts')\n        if (len(isbn) == 9):\n            isbn = ('0' + isbn)\n        if (len(isbn) == 10):\n            if (not (isbn[(- 1)].isdigit() or (isbn[(- 1)] in 'Xx'))):\n                raise IsbnError('non-digit or X checksum')\n        elif (len(isbn) == 13):\n            if (not isbn[(- 1)].isdigit()):\n                raise IsbnError('non-digit checksum')\n            if (not isbn.startswith(('978', '979'))):\n                raise IsbnError('invalid Bookland region')\n        else:\n            raise IsbnError('ISBN must be either 10 or 13 characters long')\n    else:\n        if (len(isbn) == 8):\n            isbn = ('0' + isbn)\n        elif ((len(isbn) == 12) and (not isbn[:3].startswith(('978', '979')))):\n            raise IsbnError('invalid Bookland region')\n        if (not isbn.isdigit()):\n            raise IsbnError('non-digit parts')\n        if (not (len(isbn) in (9, 12))):\n            raise IsbnError('ISBN must be either 9 or 12 characters long without checksum')\n    if (PY2 and (not uni_input)):\n        return str(isbn)\n    else:\n        return isbn", "docstring": "Check ISBN is a string, and passes basic sanity checks.\n\nArgs:\nisbn (str): SBN, ISBN-10 or ISBN-13\nchecksum (bool): ``True`` if ``isbn`` includes checksum character\n\nReturns:\n``str``: ISBN with hyphenation removed, including when called with a\nSBN\n\nRaises:\nTypeError: ``isbn`` is not a ``str`` type\nIsbnError: Incorrect length for ``isbn``\nIsbnError: Incorrect SBN or ISBN formatting", "source": "codesearchnet"}
{"code": "def summarize_variables(variables=None):\n    variable_counts = count_variables_by_type(variables=variables)\n    total_num_scalars = 0\n    total_num_bytes = 0\n    for dtype in sorted(variable_counts, key=(lambda dtype: ('%r' % dtype))):\n        var_info_for_type = variable_counts[dtype]\n        num_bytes = (var_info_for_type['num_scalars'] * dtype.size)\n        total_num_scalars += var_info_for_type['num_scalars']\n        total_num_bytes += num_bytes\n        tf.logging.info('%r: %d variables comprising %d scalars, %s', dtype, var_info_for_type['num_variables'], var_info_for_type['num_scalars'], _num_bytes_to_human_readable(num_bytes))", "docstring": "Logs a summary of variable information.\n\nThis function groups Variables by dtype and prints out the number of Variables\nand the total number of scalar values for each datatype, as well as the total\nmemory consumed.\n\nFor Variables of type tf.string, the memory usage cannot be accurately\ncalculated from the Graph as the memory requirements change based on what\nstrings are actually stored, which can only be determined inside a session.\nIn this case, the amount of memory used to stored the pointers to the strings\nis logged, along with a warning.\n\nArgs:\nvariables: iterable of variables; if not provided, then all variables\n(in the default graph) are summarized.", "source": "codesearchnet"}
{"code": "def add_listener_policy(self, json_data):\n        \n        env = boto3.session.Session(profile_name=self.env, region_name=self.region)\n        elbclient = env.client('elb')\n\n        \n        stickiness = {}\n        elb_settings = self.properties['elb']\n        if elb_settings.get('ports'):\n            ports = elb_settings['ports']\n            for listener in ports:\n                if listener.get(\"stickiness\"):\n                    stickiness = self.add_stickiness()\n                    LOG.info('Stickiness Found: %s', stickiness)\n                    break\n\n        \n        for job in json.loads(json_data)['job']:\n            for listener in job['listeners']:\n                policies = []\n                ext_port = listener['externalPort']\n                if listener['listenerPolicies']:\n                    policies.extend(listener['listenerPolicies'])\n                if stickiness.get(ext_port):\n                    policies.append(stickiness.get(ext_port))\n                if policies:\n                    LOG.info('Adding listener policies: %s', policies)\n                    elbclient.set_load_balancer_policies_of_listener(\n                        LoadBalancerName=self.app, LoadBalancerPort=ext_port, PolicyNames=policies)", "docstring": "Attaches listerner policies to an ELB\n\nArgs:\njson_data (json): return data from ELB upsert", "source": "juraj-google-style"}
{"code": "def _ProcessPathSpec(self, extraction_worker, parser_mediator, path_spec):\n    \n    self._current_display_name = parser_mediator.GetDisplayNameForPathSpec(\n        path_spec)\n\n    try:\n      extraction_worker.ProcessPathSpec(parser_mediator, path_spec)\n\n    except dfvfs_errors.CacheFullError:\n      \n      self._abort = True\n      logger.error((\n          'ABORT: detected cache full error while processing path spec: '\n          '{0:s}').format(self._current_display_name))\n\n    except Exception as exception:  \n      parser_mediator.ProduceExtractionWarning((\n          'unable to process path specification with error: '\n          '{0!s}').format(exception), path_spec=path_spec)\n\n      if self._processing_configuration.debug_output:\n        logger.warning((\n            'Unhandled exception while processing path specification: '\n            '{0:s}.').format(self._current_display_name))\n        logger.exception(exception)", "docstring": "Processes a path specification.\n\nArgs:\nextraction_worker (worker.ExtractionWorker): extraction worker.\nparser_mediator (ParserMediator): parser mediator.\npath_spec (dfvfs.PathSpec): path specification.", "source": "juraj-google-style"}
{"code": "def triangle(duration: int, amp: complex, period: float = None,\n             phase: float = 0, name: str = None) -> SamplePulse:\n    \n    if period is None:\n        period = duration\n\n    return _sampled_triangle_pulse(duration, amp, period, phase=phase, name=name)", "docstring": "Generates triangle wave `SamplePulse`.\n\nApplies `left` sampling strategy to generate discrete pulse from continuous function.\n\nArgs:\nduration: Duration of pulse. Must be greater than zero.\namp: Pulse amplitude. Wave range is [-amp, amp].\nperiod: Pulse period, units of dt. If `None` defaults to single cycle.\nphase: Pulse phase.\nname: Name of pulse.", "source": "juraj-google-style"}
{"code": "def register_ops_if_needed(graph_ops):\n  \n  missing_ops = graph_ops - set(op_def_registry.get_registered_ops().keys())\n\n  if not missing_ops:\n    return\n\n  p_buffer = c_api.TF_GetAllOpList()\n  cpp_op_list = op_def_pb2.OpList()\n  cpp_op_list.ParseFromString(c_api.TF_GetBuffer(p_buffer))\n  cpp_registry_ops = {op.name: op for op in cpp_op_list.op}\n\n  missing_op_list = op_def_pb2.OpList()\n  for missing_op in missing_ops:\n    if missing_op not in cpp_registry_ops:\n      logging.info(\n          \"Op %s is missing from both the python and C++ registry.\",\n          missing_op)\n    else:\n      missing_op_list.op.extend([cpp_registry_ops[missing_op]])\n      logging.info(\n          \"Adding op %s from c++ registry to python registry.\",\n          missing_op)\n\n  op_def_registry.register_op_list(missing_op_list)\n\n  \n  \n  \n  if not missing_ops <= set(cpp_registry_ops.keys()):\n    raise RuntimeError(\n        \"Graph ops missing from the python registry (%s) are also absent from \"\n        \"the c++ registry.\"\n        % missing_ops.difference(set(cpp_registry_ops.keys())))", "docstring": "Register graph ops absent in op_def_registry, if present in c++ registry.\n\nArgs:\ngraph_ops: set with graph op names to register.\n\nRaises:\nRuntimeError: if `graph_ops` contains ops that are not in either python or\nc++ registry.", "source": "juraj-google-style"}
{"code": "def RemoveUser(self, user):\n    self.logger.info('Removing user %s.', user)\n    if self.remove:\n        command = self.userdel_cmd.format(user=user)\n        try:\n            subprocess.check_call(command.split(' '))\n        except subprocess.CalledProcessError as e:\n            self.logger.warning('Could not remove user %s. %s.', user, str(e))\n        else:\n            self.logger.info('Removed user account %s.', user)\n    self._RemoveAuthorizedKeys(user)\n    self._UpdateSudoer(user, sudoer=False)", "docstring": "Remove a Linux user account.\n\nArgs:\nuser: string, the Linux user account to remove.", "source": "codesearchnet"}
{"code": "def CheckCheck(filename, clean_lines, linenum, error):\n  \n\n  \n  lines = clean_lines.elided\n  (check_macro, start_pos) = FindCheckMacro(lines[linenum])\n  if not check_macro:\n    return\n\n  \n  (last_line, end_line, end_pos) = CloseExpression(\n      clean_lines, linenum, start_pos)\n  if end_pos < 0:\n    return\n\n  \n  \n  \n  if not Match(r'\\s*;', last_line[end_pos:]):\n    return\n\n  if linenum == end_line:\n    expression = lines[linenum][start_pos + 1:end_pos - 1]\n  else:\n    expression = lines[linenum][start_pos + 1:]\n    for i in xrange(linenum + 1, end_line):\n      expression += lines[i]\n    expression += last_line[0:end_pos - 1]\n\n  \n  \n  \n  lhs = ''\n  rhs = ''\n  operator = None\n  while expression:\n    matched = Match(r'^\\s*(<<|<<=|>>|>>=|->\\*|->|&&|\\|\\||'\n                    r'==|!=|>=|>|<=|<|\\()(.*)$', expression)\n    if matched:\n      token = matched.group(1)\n      if token == '(':\n        \n        expression = matched.group(2)\n        (end, _) = FindEndOfExpressionInLine(expression, 0, ['('])\n        if end < 0:\n          return  \n        lhs += '(' + expression[0:end]\n        expression = expression[end:]\n      elif token in ('&&', '||'):\n        \n        \n        \n        \n        \n        return\n      elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):\n        \n        lhs += token\n        expression = matched.group(2)\n      else:\n        \n        operator = token\n        rhs = matched.group(2)\n        break\n    else:\n      \n      \n      \n      \n      \n      matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)\n      if not matched:\n        matched = Match(r'^(\\s*\\S)(.*)$', expression)\n        if not matched:\n          break\n      lhs += matched.group(1)\n      expression = matched.group(2)\n\n  \n  if not (lhs and operator and rhs):\n    return\n\n  \n  \n  if rhs.find('&&') > -1 or rhs.find('||') > -1:\n    return\n\n  \n  \n  \n  \n  \n  \n  lhs = lhs.strip()\n  rhs = rhs.strip()\n  match_constant = r'^([-+]?(\\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|\".*\"|\\'.*\\')$'\n  if Match(match_constant, lhs) or Match(match_constant, rhs):\n    \n    \n    \n    \n    \n    \n    \n    \n    error(filename, linenum, 'readability/check', 2,\n          'Consider using %s instead of %s(a %s b)' % (\n              _CHECK_REPLACEMENT[check_macro][operator],\n              check_macro, operator))", "docstring": "Checks the use of CHECK and EXPECT macros.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def create(self, name, description='', private=False, runs_executable_tasks=True, runs_docker_container_tasks=True, runs_singularity_container_tasks=True, active=True, whitelists=None):\n    if (whitelists is None):\n        whitelists = []\n    request_url = (self._client.base_api_url + self.list_url)\n    data_to_post = {'name': name, 'description': description, 'private': private, 'runs_executable_tasks': runs_executable_tasks, 'runs_docker_container_tasks': runs_docker_container_tasks, 'runs_singularity_container_tasks': runs_singularity_container_tasks, 'active': active, 'whitelists': whitelists}\n    response = self._client.session.post(request_url, data=data_to_post)\n    self.validate_request_success(response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_201_CREATED)\n    return self.response_data_to_model_instance(response.json())", "docstring": "Create a task queue.\n\nArgs:\nname (str): The name of the task queue.\ndescription (str, optional): A description of the task queue.\nprivate (bool, optional): A boolean specifying whether the\nqueue is exclusive to its creator. Defaults to False.\nruns_executable_tasks (bool, optional): A Boolean specifying\nwhether the queue runs executable tasks. Defaults to\nTrue.\nruns_docker_container_tasks (bool, optional): A Boolean\nspecifying whether the queue runs container tasks that\nrun in Docker containers. Defaults to True.\nruns_singularity_container_tasks (bool, optional): A Boolean\nspecifying whether the queue runs container tasks that\nrun in Singularity containers. Defaults to True.\nactive (bool, optional): A boolean specifying whether the\nqueue is active. Default to True.\nwhitelists (list, optional): A list of task whitelist IDs.\nDefaults to None (which gets translated to []).\n\nReturns:\n:class:`saltant.models.task_queue.TaskQueue`:\nA task queue model instance representing the task queue\njust created.", "source": "codesearchnet"}
{"code": "def is_node_inside_try_except(node: astroid.Raise) -> bool:\n    \n    context = find_try_except_wrapper_node(node)\n    return isinstance(context, astroid.TryExcept)", "docstring": "Check if the node is directly under a Try/Except statement.\n(but not under an ExceptHandler!)\n\nArgs:\nnode (astroid.Raise): the node raising the exception.\n\nReturns:\nbool: True if the node is inside a try/except statement, False otherwise.", "source": "juraj-google-style"}
{"code": "def LogHttpFrontendAccess(self, request, source=None, message_count=None):\n    event_id = self.GetNewEventId()\n    log_msg = ('%s-%s [%s]: %s %s %s %s (%d)' % (event_id, request.source_ip, (source or '<unknown>'), request.method, request.url, request.user_agent, request.user, (message_count or 0)))\n    logging.info(log_msg)", "docstring": "Write a log entry for a Frontend or UI Request.\n\nArgs:\nrequest: A HttpRequest protobuf.\nsource: Client id of the client initiating the request. Optional.\nmessage_count: Number of messages received from the client. Optional.", "source": "codesearchnet"}
{"code": "def to_pb(self):\n    return policy_pb2.Policy(etag=self.etag, version=(self.version or 0), bindings=[policy_pb2.Binding(role=role, members=sorted(self[role])) for role in self])", "docstring": "Render a protobuf message.\n\nReturns:\ngoogle.iam.policy_pb2.Policy: a message to be passed to the\n``set_iam_policy`` gRPC API.", "source": "codesearchnet"}
{"code": "def remove_overlap(self, also_remove_contiguous: bool=False) -> None:\n    overlap = True\n    while overlap:\n        overlap = self._remove_overlap_sub(also_remove_contiguous)\n    self._sort()", "docstring": "Merges any overlapping intervals.\n\nArgs:\nalso_remove_contiguous: treat contiguous (as well as overlapping)\nintervals as worthy of merging?", "source": "codesearchnet"}
{"code": "def _lookup_global(self, symbol):\n    assert symbol.parts\n    namespace = self.namespaces\n    if (len(symbol.parts) == 1):\n        namespace = self.namespaces[None]\n    try:\n        return self._lookup_namespace(symbol, namespace)\n    except Error as orig_exc:\n        try:\n            namespace = self.namespaces[None]\n            return self._lookup_namespace(symbol, namespace)\n        except Error:\n            raise orig_exc", "docstring": "Helper for lookup_symbol that only looks up global variables.\n\nArgs:\nsymbol: Symbol", "source": "codesearchnet"}
{"code": "def get_mailcap_entry(self, url):\n    for parser in mime_parsers.parsers:\n        if parser.pattern.match(url):\n            try:\n                (modified_url, content_type) = parser.get_mimetype(url)\n            except Exception as e:\n                _logger.warning('parser %s raised an exception', parser)\n                _logger.exception(e)\n                raise exceptions.MailcapEntryNotFound()\n            if (not content_type):\n                _logger.info('Content type could not be determined')\n                raise exceptions.MailcapEntryNotFound()\n            elif (content_type == 'text/html'):\n                _logger.info('Content type text/html, deferring to browser')\n                raise exceptions.MailcapEntryNotFound()\n            (command, entry) = mailcap.findmatch(self._mailcap_dict, content_type, filename=modified_url)\n            if (not entry):\n                _logger.info('Could not find a valid mailcap entry')\n                raise exceptions.MailcapEntryNotFound()\n            return (command, entry)\n    raise exceptions.MailcapEntryNotFound()", "docstring": "Search through the mime handlers list and attempt to find the\nappropriate command to open the provided url with.\n\nWill raise a MailcapEntryNotFound exception if no valid command exists.\n\nParams:\nurl (text): URL that will be checked\n\nReturns:\ncommand (text): The string of the command that should be executed\nin a subprocess to open the resource.\nentry (dict): The full mailcap entry for the corresponding command", "source": "codesearchnet"}
{"code": "def inquire_by_mech(self, mech, name=True, init_lifetime=True, accept_lifetime=True, usage=True):\n    res = rcreds.inquire_cred_by_mech(self, mech, name, init_lifetime, accept_lifetime, usage)\n    if (res.name is not None):\n        res_name = names.Name(res.name)\n    else:\n        res_name = None\n    return tuples.InquireCredByMechResult(res_name, res.init_lifetime, res.accept_lifetime, res.usage)", "docstring": "Inspect these credentials for per-mechanism information\n\nThis method inspects these credentials for per-mechanism information\nabout them.\n\nArgs:\nmech (OID): the mechanism for which to retrive the information\nname (bool): get the name associated with the credentials\ninit_lifetime (bool): get the remaining initiate lifetime for\nthe credentials\naccept_lifetime (bool): get the remaining accept lifetime for\nthe credentials\nusage (bool): get the usage for the credentials\n\nReturns:\nInquireCredByMechResult: the information about the credentials,\nwith None used when the corresponding argument was False", "source": "codesearchnet"}
{"code": "def update(self, resource, id_or_uri):\n        \n        return self._client.update(resource=resource, uri=id_or_uri)", "docstring": "Updates a registered Device Manager.\n\nArgs:\nresource (dict): Object to update.\nid_or_uri: Can be either the Device manager ID or URI.\n\nReturns:\ndict: The device manager resource.", "source": "juraj-google-style"}
{"code": "def __init__(self, corpus):\n        \n        self.words = corpus\n        self.floor = log10(0.01 / len(self.words))", "docstring": "Build function with set of words from a corpus.\n\nArgs:\ncorpus (collection): collection of words to use", "source": "juraj-google-style"}
{"code": "def get_range_tracker(self, start_position: Optional[Any], stop_position: Optional[Any]) -> 'RangeTracker':\n    raise NotImplementedError", "docstring": "Returns a RangeTracker for a given position range.\n\nFramework may invoke ``read()`` method with the RangeTracker object returned\nhere to read data from the source.\n\nArgs:\nstart_position: starting position of the range. If 'None' default start\nposition of the source must be used.\nstop_position:  ending position of the range. If 'None' default stop\nposition of the source must be used.\nReturns:\na ``RangeTracker`` for the given position range.", "source": "github-repos"}
{"code": "def save(self, filething=None, deleteid3=False, padding=None):\n    self._save(filething, self.metadata_blocks, deleteid3, padding)", "docstring": "Save metadata blocks to a file.\n\nArgs:\nfilething (filething)\ndeleteid3 (bool): delete id3 tags while at it\npadding (:obj:`mutagen.PaddingFunction`)\n\nIf no filename is given, the one most recently loaded is used.", "source": "codesearchnet"}
{"code": "def hugepage_support(user, group='hugetlb', nr_hugepages=256, max_map_count=65536, mnt_point='/run/hugepages/kvm', pagesize='2MB', mount=True, set_shmmax=False):\n    group_info = add_group(group)\n    gid = group_info.gr_gid\n    add_user_to_group(user, group)\n    if (max_map_count < (2 * nr_hugepages)):\n        max_map_count = (2 * nr_hugepages)\n    sysctl_settings = {'vm.nr_hugepages': nr_hugepages, 'vm.max_map_count': max_map_count, 'vm.hugetlb_shm_group': gid}\n    if set_shmmax:\n        shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))\n        shmmax_minsize = (bytes_from_string(pagesize) * nr_hugepages)\n        if (shmmax_minsize > shmmax_current):\n            sysctl_settings['kernel.shmmax'] = shmmax_minsize\n    sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')\n    mkdir(mnt_point, owner='root', group='root', perms=493, force=False)\n    lfstab = fstab.Fstab()\n    fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)\n    if fstab_entry:\n        lfstab.remove_entry(fstab_entry)\n    entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)\n    lfstab.add_entry(entry)\n    if mount:\n        fstab_mount(mnt_point)", "docstring": "Enable hugepages on system.\n\nArgs:\nuser (str)  -- Username to allow access to hugepages to\ngroup (str) -- Group name to own hugepages\nnr_hugepages (int) -- Number of pages to reserve\nmax_map_count (int) -- Number of Virtual Memory Areas a process can own\nmnt_point (str) -- Directory to mount hugepages on\npagesize (str) -- Size of hugepages\nmount (bool) -- Whether to Mount hugepages", "source": "codesearchnet"}
{"code": "def from_scf_input(cls, workdir, scf_input, manager=None, allocate=True):\n    flow = cls(workdir, manager=manager)\n    flow.register_scf_task(scf_input)\n    scf_task = flow[0][0]\n    nl_work = DteWork.from_scf_task(scf_task)\n    flow.register_work(nl_work)\n    if allocate:\n        flow.allocate()\n    return flow", "docstring": "Create a `NonlinearFlow` for second order susceptibility calculations from\nan `AbinitInput` defining a ground-state run.\n\nArgs:\nworkdir: Working directory of the flow.\nscf_input: :class:`AbinitInput` object with the parameters for the GS-SCF run.\nmanager: :class:`TaskManager` object. Read from `manager.yml` if None.\nallocate: True if the flow should be allocated before returning.\n\nReturn:\n:class:`NonlinearFlow` object.", "source": "codesearchnet"}
{"code": "def soft_shrink(x, threshold=0.5):\n    return ops.soft_shrink(x, threshold=threshold)", "docstring": "Soft Shrink activation function.\n\nIt is defined as:\n\n`soft_shrink(x) = x - threshold` if `x > threshold`,\n`soft_shrink(x) = x + threshold` if `x < -threshold`,\n`soft_shrink(x) = 0` otherwise.\n\nArgs:\nx: Input tensor.\nthreshold: Threshold value. Defaults to 0.5.", "source": "github-repos"}
{"code": "def image_needs_pushing(image):\n    \n    d = docker_client()\n    try:\n        d.images.get_registry_data(image)\n    except docker.errors.APIError:\n        \n        return True\n    else:\n        return False", "docstring": "Return whether an image needs pushing\n\nArgs:\n\nimage (str): the `repository:tag` image to be build.\n\nReturns:\n\nTrue: if image needs to be pushed (not on registry)\nFalse: if not (already present on registry)", "source": "juraj-google-style"}
{"code": "def is_scalar_event(self, name='is_scalar_event'):\n    with self._name_scope(name):\n        return ops.convert_to_tensor(self._is_scalar_helper(self.event_shape, self.event_shape_tensor), name='is_scalar_event')", "docstring": "Indicates that `event_shape == []`.\n\nArgs:\nname: Python `str` prepended to names of ops created by this function.\n\nReturns:\nis_scalar_event: `bool` scalar `Tensor`.", "source": "github-repos"}
{"code": "class _PruneReindexingLMHead(nn.Module):\n\n    def __init__(self, original_lm_head, assistant_overlap_token_ids):\n        super().__init__()\n        self.pruned_lm_head = prune_linear_layer(original_lm_head, assistant_overlap_token_ids).to(original_lm_head.weight.dtype)\n\n    def forward(self, hidden_states):\n        pruned_logits = self.pruned_lm_head(hidden_states)\n        return pruned_logits", "docstring": "A class to prune and reindex the language model head.\n\nThis class prunes the language model head to only include the specified token IDs and reindexes the logits\nto map back to the original vocabulary.\n\nArgs:\noriginal_lm_head (nn.Module): The original language model head.\ntoken_ids (list[int]): The list of token IDs to keep.", "source": "github-repos"}
{"code": "def QA_fetch_risk(message={}, params={\"_id\": 0, 'assets': 0, 'timeindex': 0, 'totaltimeindex': 0, 'benchmark_assets': 0, 'month_profit': 0}, db=DATABASE):\n    \n    collection = DATABASE.risk\n    return [res for res in collection.find(message, params)]", "docstring": "get the risk message\n\nArguments:\nquery_mes {[type]} -- [description]\n\nKeyword Arguments:\ncollection {[type]} -- [description] (default: {DATABASE})\n\nReturns:\n[type] -- [description]", "source": "juraj-google-style"}
{"code": "def flatten_dict(x):\n    \n    out = {}\n    for k, v in x.items():\n        out = _recur_flatten(k, v, out)\n    return out", "docstring": "Flatten a dict\n\nFlatten an arbitrarily nested dict as output by to_dict\n\n.. note::\n\nKeys in the flattened dict may get very long.\n\nArgs:\nx (dict): Arbitrarily nested dict (maybe resembling a tree) with literal/scalar leaf values\n\nReturns:\ndict: flattened 1D dict", "source": "juraj-google-style"}
{"code": "def ncx2cdf_and_gradient(x, k, l, truncation=10):\n    g = 0.0\n    dg = 0.0\n    factorial = 1.0\n    for j in range(truncation + 1):\n        factorial *= j if j > 0 else 1\n        h = (1 - tf.math.igammac((k + 2 * j) / 2.0, x / 2.0)) / factorial\n        g += h * (l * 0.5) ** j\n        dg += h * 0.5 * j * (l * 0.5) ** (j - 1)\n    f = tf.math.exp(-0.5 * l)\n    df = -0.5 * f\n    p = f * g\n    dp = df * g + f * dg\n    return (p, dp)", "docstring": "Returns the CDF of noncentral X2 distribution and its gradient over l.\n\nArgs:\nx: Values of the random variable following a noncentral X2 distribution. A\nreal `Tensor`.\nk: Degrees of freedom. A positive real `Tensor` of same shape as `x`.\nl: Non-centrality parameter. A positive real `Tensor` of same shape as `x`.\ntruncation: A positive integer. When computing the CDF of a noncentral X2\ndistribution, it needs to calculate the sum of an expression from 0 to\ninfinity. In practice, it needs to be truncated to compute an approximate\nvalue. This argument is the index of the last\nterm that will be included in the sum. Default value: 10.\n\nReturns:\nA tuple of two `Tensor`s. The first `Tensor` is the CDF. The second\n`Tensor` is the gradient of the CDF over l. Both of the `Tensors` are of\nsame shape as `x`.", "source": "github-repos"}
{"code": "def defer(target, args=None, kwargs=None, callback=None):\n    obj = _defer(target, args, kwargs, callback)\n    obj.finished.connect((lambda : _defer_cleanup(obj)))\n    obj.start()\n    _defer_threads.append(obj)\n    return obj", "docstring": "Perform operation in thread with callback\n\nInstances are cached until finished, at which point\nthey are garbage collected. If we didn't do this,\nPython would step in and garbage collect the thread\nbefore having had time to finish, resulting in an\nexception.\n\nArguments:\ntarget (callable): Method or function to call\ncallback (callable, optional): Method or function to call\nonce `target` has finished.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def on_persist_completed(self, block):\n        \n        if len(self._events_to_write):\n\n            addr_db = self.db.prefixed_db(NotificationPrefix.PREFIX_ADDR)\n            block_db = self.db.prefixed_db(NotificationPrefix.PREFIX_BLOCK)\n            contract_db = self.db.prefixed_db(NotificationPrefix.PREFIX_CONTRACT)\n\n            block_write_batch = block_db.write_batch()\n            contract_write_batch = contract_db.write_batch()\n\n            block_count = 0\n            block_bytes = self._events_to_write[0].block_number.to_bytes(4, 'little')\n\n            for evt in self._events_to_write:  \n\n                \n                write_both = True\n                hash_data = evt.ToByteArray()\n\n                bytes_to = bytes(evt.addr_to.Data)\n                bytes_from = bytes(evt.addr_from.Data)\n\n                if bytes_to == bytes_from:\n                    write_both = False\n\n                total_bytes_to = addr_db.get(bytes_to + NotificationPrefix.PREFIX_COUNT)\n                total_bytes_from = addr_db.get(bytes_from + NotificationPrefix.PREFIX_COUNT)\n\n                if not total_bytes_to:\n                    total_bytes_to = b'\\x00'\n\n                if not total_bytes_from:\n                    total_bytes_from = b'x\\00'\n\n                addr_to_key = bytes_to + total_bytes_to\n                addr_from_key = bytes_from + total_bytes_from\n\n                with addr_db.write_batch() as b:\n                    b.put(addr_to_key, hash_data)\n                    if write_both:\n                        b.put(addr_from_key, hash_data)\n                    total_bytes_to = int.from_bytes(total_bytes_to, 'little') + 1\n                    total_bytes_from = int.from_bytes(total_bytes_from, 'little') + 1\n                    new_bytes_to = total_bytes_to.to_bytes(4, 'little')\n                    new_bytes_from = total_bytes_from.to_bytes(4, 'little')\n                    b.put(bytes_to + NotificationPrefix.PREFIX_COUNT, new_bytes_to)\n                    if write_both:\n                        b.put(bytes_from + NotificationPrefix.PREFIX_COUNT, new_bytes_from)\n\n                \n                per_block_key = block_bytes + block_count.to_bytes(4, 'little')\n                block_write_batch.put(per_block_key, hash_data)\n                block_count += 1\n\n                \n                contract_bytes = bytes(evt.contract_hash.Data)\n                count_for_contract = contract_db.get(contract_bytes + NotificationPrefix.PREFIX_COUNT)\n                if not count_for_contract:\n                    count_for_contract = b'\\x00'\n                contract_event_key = contract_bytes + count_for_contract\n                contract_count_int = int.from_bytes(count_for_contract, 'little') + 1\n                new_contract_count = contract_count_int.to_bytes(4, 'little')\n                contract_write_batch.put(contract_bytes + NotificationPrefix.PREFIX_COUNT, new_contract_count)\n                contract_write_batch.put(contract_event_key, hash_data)\n\n            \n            block_write_batch.write()\n            contract_write_batch.write()\n\n        self._events_to_write = []\n\n        if len(self._new_contracts_to_write):\n\n            token_db = self.db.prefixed_db(NotificationPrefix.PREFIX_TOKEN)\n\n            token_write_batch = token_db.write_batch()\n\n            for token_event in self._new_contracts_to_write:\n                try:\n                    hash_data = token_event.ToByteArray()  \n                    hash_key = token_event.contract.Code.ScriptHash().ToBytes()\n                    token_write_batch.put(hash_key, hash_data)\n                except Exception as e:\n                    logger.debug(f\"Failed to write new contract, reason: {e}\")\n\n            token_write_batch.write()\n\n        self._new_contracts_to_write = []", "docstring": "Called when a block has been persisted to disk.  Used as a hook to persist notification data.\nArgs:\nblock (neo.Core.Block): the currently persisting block", "source": "juraj-google-style"}
{"code": "def get_variant(self, index=None):\n    for variant in self.iter_variants():\n        if (variant.index == index):\n            return variant", "docstring": "Get the variant with the associated index.\n\nReturns:\n`Variant` object, or None if no variant with the given index exists.", "source": "codesearchnet"}
{"code": "def _ParseRecordExtraField(self, byte_stream, file_offset):\n    \n    extra_field_map = self._GetDataTypeMap('asl_record_extra_field')\n\n    try:\n      record_extra_field = self._ReadStructureFromByteStream(\n          byte_stream, file_offset, extra_field_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError((\n          'Unable to parse record extra field at offset: 0x{0:08x} with error: '\n          '{1!s}').format(file_offset, exception))\n\n    return record_extra_field", "docstring": "Parses a record extra field.\n\nArgs:\nbyte_stream (bytes): byte stream.\nfile_offset (int): offset of the record extra field relative to\nthe start of the file.\n\nReturns:\nasl_record_extra_field: record extra field.\n\nRaises:\nParseError: if the record extra field cannot be parsed.", "source": "juraj-google-style"}
{"code": "def assert_iter(**kw):\n    for (name, value) in kw.items():\n        if (not isiter(value)):\n            raise TypeError('paco: {} must be an iterable object'.format(name))", "docstring": "Asserts if a given values implements a valid iterable interface.\n\nArguments:\n**kw (mixed): value to check if it is an iterable.\n\nRaises:\nTypeError: if assertion fails.", "source": "codesearchnet"}
{"code": "def nne(dim_red, true_labels):\n    \n    \n    bt = BallTree(dim_red.T)\n    correct = 0\n    for i, l in enumerate(true_labels):\n        dist, ind = bt.query([dim_red[:,i]], k=2)\n        closest_cell = ind[0, 1]\n        if true_labels[closest_cell] == l:\n            correct += 1\n    return float(correct)/len(true_labels)", "docstring": "Calculates the nearest neighbor accuracy (basically leave-one-out cross\nvalidation with a 1NN classifier).\n\nArgs:\ndim_red (array): dimensions (k, cells)\ntrue_labels (array): 1d array of integers\n\nReturns:\nNearest neighbor accuracy - fraction of points for which the 1NN\n1NN classifier returns the correct value.", "source": "juraj-google-style"}
{"code": "def export_json(data, status, headers):\n    \n    dumped = json.dumps(data, ensure_ascii=False)\n    resp = current_app.response_class(\n        dumped, status=status, headers=headers,\n        content_type='application/json; charset=utf-8')\n    return resp", "docstring": "Creates a JSON response\n\nJSON content is encoded by utf-8, not unicode escape.\n\nArgs:\ndata: any type object that can dump to json\nstatus (int): http status code\nheaders (dict): http headers", "source": "juraj-google-style"}
{"code": "def walk_dependencies(root, visitor):\n\n    def visit(parent, visitor):\n        for d in get_dependencies(parent):\n            visitor(d, parent)\n            visit(d, visitor)\n    visitor(root, None)\n    visit(root, visitor)", "docstring": "Call visitor on root and all dependencies reachable from it in breadth\nfirst order.\n\nArgs:\nroot (component): component function or class\nvisitor (function): signature is `func(component, parent)`.  The\ncall on root is `visitor(root, None)`.", "source": "codesearchnet"}
{"code": "def get(url, max_backoff=32, verbose=False, **kwargs):\n    \n    sleep_seconds = 1\n    while sleep_seconds <= max_backoff:\n        try:\n            \n            response = requests.get(url, **{**{'timeout': 30}, **kwargs})\n            \n            if 400 <= response.status_code < 500:\n                return None\n            \n            if 200 <= response.status_code < 400:\n                return response\n            \n        except RequestException as e:\n            if verbose:\n                print(str(e))\n\n        time.sleep(sleep_seconds)\n        sleep_seconds *= 2\n    return None", "docstring": "Adding retries to requests.get with exponential backoff.\n\nArgs:\nurl (str): The URL to fetch\nmax_backoff (int): The number of seconds to sleep at maximums\nverbose (bool): Whether to print exceptions.\n\nReturns:\nResponse: For successful requests return requests' response. `None` otherwise.", "source": "juraj-google-style"}
{"code": "def from_rfc3339(cls, rfc3339: str) -> 'Timestamp':\n    try:\n        dt = dateutil.parser.isoparse(rfc3339).astimezone(pytz.UTC)\n    except ValueError as e:\n        raise ValueError(\"Could not parse RFC 3339 string '{}' due to error: '{}'.\".format(rfc3339, e))\n    return cls.from_utc_datetime(dt)", "docstring": "Create a ``Timestamp`` instance from an RFC 3339 compliant string.\n\n.. note::\nAll timezones are implicitly converted to UTC.\n\nArgs:\nrfc3339: String in RFC 3339 form.", "source": "github-repos"}
{"code": "def set_cn_energies(self, cn_energies):\n    for site in self.sites:\n        site.set_cn_occupation_energies(cn_energies[site.label])\n    self.cn_energies = cn_energies", "docstring": "Set the coordination number dependent energies for this lattice.\n\nArgs:\ncn_energies (Dict(Str:Dict(Int:Float))): Dictionary of dictionaries specifying the coordination number dependent energies for each site type. e.g.::\n\n{ 'A' : { 0 : 0.0, 1 : 1.0, 2 : 2.0 }, 'B' : { 0 : 0.0, 1 : 2.0 } }\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _actor_method_call(self, method_name, args=None, kwargs=None, num_return_vals=None):\n    worker = ray.worker.get_global_worker()\n    worker.check_connected()\n    function_signature = self._ray_method_signatures[method_name]\n    if (args is None):\n        args = []\n    if (kwargs is None):\n        kwargs = {}\n    args = signature.extend_args(function_signature, args, kwargs)\n    if (worker.mode == ray.LOCAL_MODE):\n        return getattr(worker.actors[self._ray_actor_id], method_name)(*copy.deepcopy(args))\n    function_descriptor = FunctionDescriptor(self._ray_module_name, method_name, self._ray_class_name)\n    with self._ray_actor_lock:\n        object_ids = worker.submit_task(function_descriptor, args, actor_id=self._ray_actor_id, actor_handle_id=self._ray_actor_handle_id, actor_counter=self._ray_actor_counter, actor_creation_dummy_object_id=self._ray_actor_creation_dummy_object_id, execution_dependencies=[self._ray_actor_cursor], new_actor_handles=self._ray_new_actor_handles, num_return_vals=(num_return_vals + 1), resources={'CPU': self._ray_actor_method_cpus}, placement_resources={}, driver_id=self._ray_actor_driver_id)\n        self._ray_actor_counter += 1\n        self._ray_actor_cursor = object_ids.pop()\n        self._ray_new_actor_handles = []\n    if (len(object_ids) == 1):\n        object_ids = object_ids[0]\n    elif (len(object_ids) == 0):\n        object_ids = None\n    return object_ids", "docstring": "Method execution stub for an actor handle.\n\nThis is the function that executes when\n`actor.method_name.remote(*args, **kwargs)` is called. Instead of\nexecuting locally, the method is packaged as a task and scheduled\nto the remote actor instance.\n\nArgs:\nmethod_name: The name of the actor method to execute.\nargs: A list of arguments for the actor method.\nkwargs: A dictionary of keyword arguments for the actor method.\nnum_return_vals (int): The number of return values for the method.\n\nReturns:\nobject_ids: A list of object IDs returned by the remote actor\nmethod.", "source": "codesearchnet"}
{"code": "def __parse_entry(entry_line):\n    \n    if entry_line.startswith(\"!\"):\n        entry_line = sub(r\"!\\w*?_\", '', entry_line)\n    else:\n        entry_line = entry_line.strip()[1:]\n    try:\n        entry_type, entry_name = [i.strip() for i in entry_line.split(\"=\", 1)]\n    except ValueError:\n        entry_type = [i.strip() for i in entry_line.split(\"=\", 1)][0]\n        entry_name = ''\n    return entry_type, entry_name", "docstring": "Parse the SOFT file entry name line that starts with '^', '!' or '#'.\n\nArgs:\nentry_line (:obj:`str`): Line from SOFT  to be parsed.\n\nReturns:\n:obj:`2-tuple`: Type of entry, value of entry.", "source": "juraj-google-style"}
{"code": "def build(self, input_shape):\n    if self._is_graph_network:\n        super(Model, self).build(input_shape)\n        return\n    if input_shape is None:\n        raise ValueError('Input shape must be defined when calling build on a model subclass network.')\n    valid_types = (tuple, list, tensor_shape.TensorShape, dict)\n    if not isinstance(input_shape, valid_types):\n        raise ValueError('Specified input shape is not one of the valid types. Please specify a batch input shape of type tuple or list of input shapes. User provided input type: {}'.format(type(input_shape)))\n    if input_shape and (not self.inputs):\n        if context.executing_eagerly():\n            graph = func_graph.FuncGraph('build_graph')\n        else:\n            graph = backend.get_graph()\n        with graph.as_default():\n            if isinstance(input_shape, list) and all((d is None or isinstance(d, int) for d in input_shape)):\n                input_shape = tuple(input_shape)\n            if isinstance(input_shape, list):\n                x = [base_layer_utils.generate_placeholders_from_shape(shape) for shape in input_shape]\n            elif isinstance(input_shape, dict):\n                x = {k: base_layer_utils.generate_placeholders_from_shape(shape) for k, shape in input_shape.items()}\n            else:\n                x = base_layer_utils.generate_placeholders_from_shape(input_shape)\n            kwargs = {}\n            call_signature = self._call_full_argspec\n            call_args = call_signature.args\n            if len(call_args) > 2:\n                if call_signature.defaults:\n                    call_args = call_args[2:-len(call_signature.defaults)]\n                else:\n                    call_args = call_args[2:]\n                for arg in call_args:\n                    if arg == 'training':\n                        kwargs['training'] = False\n                    else:\n                        raise ValueError('Currently, you cannot build your model if it has positional or keyword arguments that are not inputs to the model, but are required for its `call` method. Instead, in order to instantiate and build your model, `call` your model on real tensor data with all expected call arguments.')\n            elif len(call_args) < 2:\n                raise ValueError('You can only call `build` on a model if its `call` method accepts an `inputs` argument.')\n            try:\n                self.call(x, **kwargs)\n            except (errors.InvalidArgumentError, TypeError):\n                raise ValueError('You cannot build your model by calling `build` if your layers do not support float type inputs. Instead, in order to instantiate and build your model, `call` your model on real tensor data (of the correct dtype).')\n    super(Model, self).build(input_shape)", "docstring": "Builds the model based on input shapes received.\n\nThis is to be used for subclassed models, which do not know at instantiation\ntime what their inputs look like.\n\nThis method only exists for users who want to call `model.build()` in a\nstandalone way (as a substitute for calling the model on real data to\nbuild it). It will never be called by the framework (and thus it will\nnever throw unexpected errors in an unrelated workflow).\n\nArgs:\ninput_shape: Single tuple, TensorShape, or list/dict of shapes, where\nshapes are tuples, integers, or TensorShapes.\n\nRaises:\nValueError:\n1. In case of invalid user-provided data (not of type tuple,\nlist, TensorShape, or dict).\n2. If the model requires call arguments that are agnostic\nto the input shapes (positional or kwarg in call signature).\n3. If not all layers were properly built.\n4. If float type inputs are not supported within the layers.\n\nIn each of these cases, the user should build their model by calling it\non real tensor data.", "source": "github-repos"}
{"code": "def handle_closed_task(self, task_name, record):\n        \n        if task_name not in self.tasks:\n            return\n\n        if self.main_failed:\n            self.mark_parent_tasks_as_failed(self.cur_task)\n\n        if self.tasks[task_name].failed:\n            record.msg = ColorFormatter.colored('red', END_TASK_ON_ERROR_MSG)\n        else:\n            record.msg = ColorFormatter.colored('green', END_TASK_MSG)\n\n        record.msg += ' (in %s)' % self.tasks[task_name].elapsed_time()\n\n        if self.should_show_by_depth() or self.tasks[task_name].force_show:\n            if self.tasks[task_name].force_show:\n                self.handle_error()\n\n            self.pretty_emit(record, is_header=True)\n\n        self.close_children_tasks(task_name)\n        self.tasks.pop(task_name)", "docstring": "Do everything needed when a task is closed\n\nParams:\ntask_name (str): name of the task that is finishing\nrecord (logging.LogRecord): log record with all the info\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def add_vtep(self, name, vtep, vlan=None):\n    if (not vlan):\n        cmd = 'vxlan flood vtep add {}'.format(vtep)\n    else:\n        cmd = 'vxlan vlan {} flood vtep add {}'.format(vlan, vtep)\n    return self.configure_interface(name, cmd)", "docstring": "Adds a new VTEP endpoint to the global or local flood list\n\nEosVersion:\n4.13.7M\n\nArgs:\nname (str): The name of the interface to configure\nvtep (str): The IP address of the remote VTEP endpoint to add\nvlan (str): The VLAN ID associated with this VTEP.  If the VLAN\nkeyword is used, then the VTEP is configured as a local flood\nendpoing\n\nReturns:\nTrue if the command completes successfully", "source": "codesearchnet"}
{"code": "def hrs_84_and_db12_8_or_20_6(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `hrs_84_and_db12_8_or_20_6`'.format(value))\n\n        self._hrs_84_and_db12_8_or_20_6 = value", "docstring": "Corresponds to IDD Field `hrs_84_and_db12_8_or_20_6`\nNumber of hours between 8 AM and 4 PM (inclusive) with dry-bulb temperature between 12.8 and 20.6 C\n\nArgs:\nvalue (float): value for IDD Field `hrs_84_and_db12_8_or_20_6`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def trade_day(dt, cal='US'):\n    from xone import calendar\n    dt = pd.Timestamp(dt).date()\n    return calendar.trading_dates(start=(dt - pd.Timedelta('10D')), end=dt, calendar=cal)[(- 1)]", "docstring": "Latest trading day w.r.t given dt\n\nArgs:\ndt: date of reference\ncal: trading calendar\n\nReturns:\npd.Timestamp: last trading day\n\nExamples:\n>>> trade_day('2018-12-25').strftime('%Y-%m-%d')\n'2018-12-24'", "source": "codesearchnet"}
{"code": "def plot(self, freq=None, figsize=(15, 5), title=None,\n             logy=False, **kwargs):\n        \n\n        if title is None:\n            title = self._get_default_plot_title(\n                freq, 'Equity Progression')\n\n        ser = self._get_series(freq).rebase()\n        return ser.plot(figsize=figsize, logy=logy,\n                        title=title, **kwargs)", "docstring": "Helper function for plotting the series.\n\nArgs:\n* freq (str): Data frequency used for display purposes.\nRefer to pandas docs for valid freq strings.\n* figsize ((x,y)): figure size\n* title (str): Title if default not appropriate\n* logy (bool): log-scale for y axis\n* kwargs: passed to pandas' plot method", "source": "juraj-google-style"}
{"code": "def add_variants(self, variants):\n        \n        \n        operations = []\n        nr_inserted = 0\n        for i,variant in enumerate(variants, 1):\n            \n            \n            \n            if not variant:\n                continue\n            nr_inserted += 1\n            update = self._get_update(variant)\n            operations.append(\n                UpdateOne(\n                    {'_id': variant['_id']},\n                    update,\n                    upsert=True\n                )\n            )\n            if i % 10000 == 0:\n                self.db.variant.bulk_write(operations, ordered=False)\n                operations = []\n        \n        if len(operations) > 0:\n            self.db.variant.bulk_write(operations, ordered=False)\n        \n        return nr_inserted", "docstring": "Add a bulk of variants\n\nThis could be used for faster inserts\n\nArgs:\nvariants(iterable(dict))", "source": "juraj-google-style"}
{"code": "def from_response(self, response_data):\n    return HSAccessTokenAuth(response_data['access_token'], response_data['token_type'], response_data['refresh_token'], response_data['expires_in'], response_data.get('state'))", "docstring": "Builds a new HSAccessTokenAuth straight from response data\n\nArgs:\nresponse_data (dict): Response data to use\n\nReturns:\nA HSAccessTokenAuth objet", "source": "codesearchnet"}
{"code": "def escalation_date(self, escalation_date):\n        \n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        escalation_date = self._utils.format_datetime(\n            escalation_date, date_format='%Y-%m-%dT%H:%M:%SZ'\n        )\n        self._data['escalationDate'] = escalation_date\n        request = {'escalationDate': escalation_date}\n        return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)", "docstring": "Sets the task escalation_date\nArgs:\nescalation_date: Converted to %Y-%m-%dT%H:%M:%SZ date format", "source": "juraj-google-style"}
{"code": "def image(request, data):\n    try:\n        width = int(request.GET.get('w', PYDENTICON_WIDTH))\n    except ValueError:\n        raise SuspiciousOperation('Identicon width must be a positive integer.')\n    try:\n        height = int(request.GET.get('h', PYDENTICON_HEIGHT))\n    except ValueError:\n        raise SuspiciousOperation('Identicon height must be a positive integer.')\n    output_format = request.GET.get('f', PYDENTICON_FORMAT)\n    try:\n        padding = [int(p) for p in request.GET['p'].split(',')]\n    except KeyError:\n        padding = PYDENTICON_PADDING\n    except ValueError:\n        raise SuspiciousOperation('Identicon padding must consist out of 4 positive integers separated with commas.')\n    if ('i' in request.GET):\n        inverted = request.GET.get('i')\n        if (inverted.lower() == 'true'):\n            inverted = True\n        elif (inverted.lower() == 'false'):\n            inverted = False\n        else:\n            raise SuspiciousOperation('Inversion parameter must be a boolean (true/false).')\n    else:\n        inverted = PYDENTICON_INVERT\n    if ((not isinstance(width, int)) or (width <= 0)):\n        raise SuspiciousOperation('Identicon width must be a positive integer.')\n    if ((not isinstance(height, int)) or (height <= 0)):\n        raise SuspiciousOperation('Identicon height must be a positive integer.')\n    if ((not all([(isinstance(p, int) and (p >= 0)) for p in padding])) or (len(padding) != 4)):\n        raise SuspiciousOperation('Padding must be a 4-element tuple consisting out of positive integers.')\n    if (output_format == 'png'):\n        content_type = 'image/png'\n    elif (output_format == 'ascii'):\n        content_type = 'text/plain'\n    else:\n        raise SuspiciousOperation(\"Unsupported identicon format requested - '%s' % output_format\")\n    generator = Generator(PYDENTICON_ROWS, PYDENTICON_COLUMNS, foreground=PYDENTICON_FOREGROUND, background=PYDENTICON_BACKGROUND, digest=PYDENTICON_DIGEST)\n    content = generator.generate(data, width, height, padding=padding, output_format=output_format, inverted=inverted)\n    response = HttpResponse(content, content_type=content_type)\n    return response", "docstring": "Generates identicon image based on passed data.\n\nArguments:\n\ndata - Data which should be used for generating an identicon. This data\nwill be used in order to create a digest which is used for generating the\nidenticon. If the data passed is a hex digest already, the digest will be\nused as-is.\n\nReturns:\n\nIdenticon image in raw format.", "source": "codesearchnet"}
{"code": "def _load_schema_for_record(data, schema=None):\n    if (schema is None):\n        if ('$schema' not in data):\n            raise SchemaKeyNotFound(data=data)\n        schema = data['$schema']\n    if isinstance(schema, six.string_types):\n        schema = load_schema(schema_name=schema)\n    return schema", "docstring": "Load the schema from a given record.\n\nArgs:\ndata (dict): record data.\nschema (Union[dict, str]): schema to validate against.\n\nReturns:\ndict: the loaded schema.\n\nRaises:\nSchemaNotFound: if the given schema was not found.\nSchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was\nfound in ``data``.\njsonschema.SchemaError: if the schema is invalid.", "source": "codesearchnet"}
{"code": "def format_diff_xml(a_xml, b_xml):\n    return '\\n'.join(difflib.ndiff(reformat_to_pretty_xml(a_xml).splitlines(), reformat_to_pretty_xml(b_xml).splitlines()))", "docstring": "Create a diff between two XML documents.\n\nArgs:\na_xml: str\nb_xml: str\n\nReturns:\nstr : `Differ`-style delta", "source": "codesearchnet"}
{"code": "def remove_all_servers(self):\n    cmd = self.command_builder('ntp', disable=True)\n    return self.configure(cmd)", "docstring": "Remove all NTP server entries from the node config\n\nReturns:\nTrue if the operation succeeds, otherwise False.", "source": "codesearchnet"}
{"code": "def get_all_status(self, only_min=False):\n    if (len(self) == 0):\n        if only_min:\n            return self.S_INIT\n        else:\n            return [self.S_INIT]\n    self.check_status()\n    status_list = [task.status for task in self]\n    if only_min:\n        return min(status_list)\n    else:\n        return status_list", "docstring": "Returns a list with the status of the tasks in self.\n\nArgs:\nonly_min: If True, the minimum of the status is returned.", "source": "codesearchnet"}
{"code": "def get_cbm_vbm(self, tol=0.001, abs_tol=False, spin=None):\n        \n        \n        tdos = self.get_densities(spin)\n        if not abs_tol:\n            tol = tol * tdos.sum() / tdos.shape[0]\n\n        \n        i_fermi = 0\n        while self.energies[i_fermi] <= self.efermi:\n            i_fermi += 1\n\n        \n        i_gap_start = i_fermi\n        while i_gap_start - 1 >= 0 and tdos[i_gap_start - 1] <= tol:\n            i_gap_start -= 1\n\n        \n        i_gap_end = i_gap_start\n        while i_gap_end < tdos.shape[0] and tdos[i_gap_end] <= tol:\n            i_gap_end += 1\n        i_gap_end -= 1\n        return self.energies[i_gap_end], self.energies[i_gap_start]", "docstring": "Expects a DOS object and finds the cbm and vbm.\n\nArgs:\ntol: tolerance in occupations for determining the gap\nabs_tol: An absolute tolerance (True) and a relative one (False)\nspin: Possible values are None - finds the gap in the summed\ndensities, Up - finds the gap in the up spin channel,\nDown - finds the gap in the down spin channel.\n\nReturns:\n(cbm, vbm): float in eV corresponding to the gap", "source": "juraj-google-style"}
{"code": "def patch(self, id_or_uri, operation, path, value, timeout=(- 1)):\n    return self._client.patch(id_or_uri, operation, path, value, timeout=timeout)", "docstring": "Uses the PATCH to update a resource for a given logical switch group.\n\nOnly one operation can be performed in each PATCH call.\n\nArgs:\nid_or_uri: Can be either the resource ID or the resource URI.\noperation: Patch operation\npath: Path\nvalue: Value\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Updated resource.", "source": "codesearchnet"}
{"code": "def match(pattern, name):\n    \n    \n    try:\n        re_pat = _PATTERN_CACHE[(pattern, True)]\n    except KeyError:\n        res = \"(?ms)\" + _translate(pattern) + r'\\Z'\n        _PATTERN_CACHE[(pattern, True)] = re_pat = re.compile(res)\n    return re_pat.match(name) is not None", "docstring": "Test whether a name matches a wildcard pattern.\n\nArguments:\npattern (str): A wildcard pattern, e.g. ``\"*.py\"``.\nname (str): A filename.\n\nReturns:\nbool: `True` if the filename matches the pattern.", "source": "juraj-google-style"}
{"code": "def trainGP(self,fast=False,scales0=None,fixed0=None,lambd=None):\n        \n        assert self.n_terms>0, 'CVarianceDecomposition:: No variance component terms'\n\n        if not self.init:\t\tself.initGP(fast=fast)\n\n        \n        if lambd!=None:\t\tself.gp.setLambda(lambd)\n\n        \n        if scales0!=None:\n            self.setScales(scales0)\n        \n        self.vd.initGPparams()\n        \n        if fixed0!=None:\n            params = self.gp.getParams()\n            params['dataTerm'] = fixed0\n            self.gp.setParams(params)\n\n        \n        conv =self.vd.trainGP()\n        \n        self.cache['Sigma']   = None\n        self.cache['Hessian'] = None\n            \n        return conv", "docstring": "Train the gp\n\nArgs:\nfast:       if true and the gp has not been initialized, initializes a kronSum gp\nscales0:\tinitial variance components params\nfixed0:     initial fixed effect params", "source": "juraj-google-style"}
{"code": "def grad(f, has_aux=False):\n\n    def check_loss_shape(np_loss):\n        if not isinstance(np_loss, tf_np.ndarray):\n            raise ValueError('The result of the function to take gradient must be an ndarray.')\n        if not np_loss.shape.is_compatible_with([]):\n            raise ValueError('The result of the function to take gradient must be a scalar.')\n\n    def _f(params, *args):\n        \n        with backprop.GradientTape() as g:\n            g.watch(nest.flatten(params))\n            outputs = f(params, *args)\n            if has_aux:\n                np_loss, aux = outputs\n            else:\n                np_loss = outputs\n            check_loss_shape(np_loss)\n            tf_grads = g.gradient(np_loss, params)\n            if has_aux:\n                res = (tf_grads, aux)\n            else:\n                res = tf_grads\n            return _tf_to_np(res)\n    return _f", "docstring": "Returns a function that computes gradient of f.\n\nGradients can only be computed through numpy and tensorflow operations and not\nthrough python float operations and values.\n\nArgs:\nf: a function of type (params, *args) -> scalar. 'params' can be a nested\nstructure (made of lists and tuples) of ndarrays and the gradient is\nevaluated against it. `scalar` is a scalar ndarray.\nhas_aux: bool, indicates whether fun returns a pair where the first element\nis considered the output of the mathematical function to be differentiated\nand the second element is auxiliary data.\n\nReturns:\nA gradient function of type (params, *args) -> gradients, where the result\n'gradients' has the same structure and shapes as 'params'.", "source": "github-repos"}
{"code": "def execute(self, data_dict, callback, group=None, trace=None):\n        \n        \n        \n        group = group or self.group\n        context = _ScopedContext(data_dict, self.undefined_str, group=group)\n        _Execute(self._program.Statements(), context, callback, trace)", "docstring": "Low level method to expand the template piece by piece.\n\nArgs:\ndata_dict: The JSON data dictionary.\ncallback: A callback which should be called with each expanded token.\ngroup: Dictionary of name -> Template instance (for styles)\n\nExample: You can pass 'f.write' as the callback to write directly to a file\nhandle.", "source": "juraj-google-style"}
{"code": "def allocate(self, size, max_time_to_block_ms):\n    with self._lock:\n        if self._free:\n            return self._free.popleft()\n        elif (self._poolable_size == 0):\n            return io.BytesIO()\n        else:\n            buf = None\n            more_memory = threading.Condition(self._lock)\n            self._waiters.append(more_memory)\n            while (buf is None):\n                start_wait = time.time()\n                more_memory.wait((max_time_to_block_ms / 1000.0))\n                end_wait = time.time()\n                if self.wait_time:\n                    self.wait_time.record((end_wait - start_wait))\n                if self._free:\n                    buf = self._free.popleft()\n                else:\n                    self._waiters.remove(more_memory)\n                    raise Errors.KafkaTimeoutError('Failed to allocate memory within the configured max blocking time')\n            removed = self._waiters.popleft()\n            assert (removed is more_memory), 'Wrong condition'\n            if (self._free and self._waiters):\n                self._waiters[0].notify()\n            return buf", "docstring": "Allocate a buffer of the given size. This method blocks if there is not\nenough memory and the buffer pool is configured with blocking mode.\n\nArguments:\nsize (int): The buffer size to allocate in bytes [ignored]\nmax_time_to_block_ms (int): The maximum time in milliseconds to\nblock for buffer memory to be available\n\nReturns:\nio.BytesIO", "source": "codesearchnet"}
{"code": "def next_state_scope(self, next_state_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]:\n        \n        return dict(zip(self.rddl.domain.next_state_fluent_ordering, next_state_fluents))", "docstring": "Returns a partial scope with current next state-fluents.\n\nArgs:\nnext_state_fluents (Sequence[tf.Tensor]): The next state fluents.\n\nReturns:\nA mapping from next state fluent names to :obj:`rddl2tf.fluent.TensorFluent`.", "source": "juraj-google-style"}
{"code": "def listen(self, log, noprint=True):\n        \n        try:\n            result = self.decode_event(log.topics, log.data)\n        except ValueError:\n            return  \n\n        if not noprint:\n            print(result)\n\n        return result", "docstring": "Return a dictionary representation of the Log instance.\n\nNote:\nThis function won't work with anonymous events.\n\nArgs:\nlog (processblock.Log): The Log instance that needs to be parsed.\nnoprint (bool): Flag to turn off priting of the decoded log instance.", "source": "juraj-google-style"}
{"code": "def get_pdbs_for_gene(bigg_model, bigg_gene, cache_dir=tempfile.gettempdir(), force_rerun=False):\n    my_structures = []\n    gene = ssbio.utils.request_json(link='http:\n    uniprots = []\n    if ('database_links' in gene):\n        if ('UniProt' in gene['database_links']):\n            uniprots = [x['id'] for x in gene['database_links']['UniProt']]\n        elif ('NCBI GI' in gene['database_links']):\n            uniprots = []\n            gis = [x['id'] for x in gene['database_links']['NCBI GI']]\n            gi_uniprots = bs_unip.mapping(fr='P_GI', to='ACC', query=gis).values()\n            uniprots.extend(gi_uniprots)\n            uniprots = ssbio.utils.flatlist_dropdup(uniprots)\n            uniprots = [x for x in uniprots if ssbio.databases.uniprot.is_valid_uniprot_id(x)]\n    if uniprots:\n        for u in uniprots:\n            get_best_structure = ssbio.databases.pdb.best_structures(uniprot_id=u, outdir=cache_dir)\n            if get_best_structure:\n                for best_structure in get_best_structure:\n                    my_structures.append((best_structure['pdb_id'], best_structure['chain_id']))\n    return my_structures", "docstring": "Attempt to get a rank-ordered list of available PDB structures for a BiGG Model and its gene.\n\nArgs:\nbigg_model: BiGG Model ID\nbigg_gene: BiGG Gene ID\n\nReturns:\nlist: rank-ordered list of tuples of (pdb_id, chain_id)", "source": "codesearchnet"}
{"code": "def get_members(self, name):\n        \n        grpid = re.search(r'(\\d+)', name).group()\n        command = 'show port-channel %s all-ports' % grpid\n        config = self.node.enable(command, 'text')\n        return re.findall(r'\\b(?!Peer)Ethernet[\\d/]*\\b',\n                          config[0]['result']['output'])", "docstring": "Returns the member interfaces for the specified Port-Channel\n\nArgs:\nname(str): The Port-channel interface name to return the member\ninterfaces for\n\nReturns:\nA list of physical interface names that belong to the specified\ninterface", "source": "juraj-google-style"}
{"code": "def _MatchValue(expected, actual):\n    if isinstance(expected, dict):\n        if not isinstance(actual, dict):\n            return False\n        for k, v in expected.items():\n            if k not in actual:\n                logging.log(1, 'Not exist: field=' + k)\n                return False\n            if not MessageValue._MatchValue(v, actual[k]):\n                logging.log(1, 'Different: field=%s, expected=%s, actual=%s', k, v, actual[k])\n                return False\n        return True\n    if isinstance(expected, list):\n        if not isinstance(actual, list):\n            return False\n        for e in expected:\n            found = False\n            for a in actual:\n                if MessageValue._MatchValue(e, a):\n                    found = True\n                    break\n            if not found:\n                return False\n        return True\n    if isinstance(expected, stl.base.QualifierValue.Resolved):\n        return expected.ValidateAndSet(actual)\n    if isinstance(expected, stl.base.FuncSet):\n        expected.SetValue(actual)\n        return True\n    if isinstance(expected, stl.base.LocalVar):\n        return expected.value == actual\n    if isinstance(expected, stl.base.Func):\n        return expected.Run() == actual\n    if isinstance(expected, MessageValue):\n        return expected._MatchFromString(actual)\n    return expected == actual", "docstring": "Whether or not |expected| is same value of |actual|.\n\nArgs:\nexpected: Expected value.\nactual: Actual value.\n\nReturns:\nTrue if:\n1) Type of |expected| and of |actual| must be same.\n2) If type of |expected| is dictionary or sub-message, all fields\nspecified in |expected| must have same value in |actual|.\n3) If type of |expected| is array, all entries specified in |expected|\nmust exist in |actual| in any order.\n4) If type of |expected| is either integer or string, |expected| must\nbe same to |actual|.", "source": "github-repos"}
{"code": "def MatchBuildContext(self, target_os, target_arch, target_package, context=None):\n    for spec in self.Get('ClientBuilder.target_platforms', context=context):\n        (spec_os, arch, package_name) = spec.split('_')\n        if ((spec_os == target_os) and (arch == target_arch) and (package_name == target_package)):\n            return True\n    return False", "docstring": "Return true if target_platforms matches the supplied parameters.\n\nUsed by buildanddeploy to determine what clients need to be built.\n\nArgs:\ntarget_os: which os we are building for in this run (linux, windows,\ndarwin)\ntarget_arch: which arch we are building for in this run (i386, amd64)\ntarget_package: which package type we are building (exe, dmg, deb, rpm)\ncontext: config_lib context\n\nReturns:\nbool: True if target_platforms spec matches parameters.", "source": "codesearchnet"}
{"code": "def validate(bo, error_level: str='WARNING') -> Tuple[(bool, List[Tuple[(str, str)]])]:\n    if bo.ast:\n        bo = validate_functions(bo.ast, bo)\n        if (error_level == 'WARNING'):\n            bo = validate_arg_values(bo.ast, bo)\n    else:\n        bo.validation_messages.append(('ERROR', 'Invalid BEL Statement - cannot parse'))\n    for msg in bo.validation_messages:\n        if (msg[0] == 'ERROR'):\n            bo.parse_valid = False\n            break\n    return bo", "docstring": "Semantically validate BEL AST\n\nAdd errors and warnings to bel_obj.validation_messages\n\nError Levels are similar to log levels - selecting WARNING includes both\nWARNING and ERROR, selecting ERROR just includes ERROR\n\nArgs:\nbo: main BEL language object\nerror_level: return ERRORs only or also WARNINGs\n\nReturns:\nTuple[bool, List[Tuple[str, str]]]: (is_valid, messages)", "source": "codesearchnet"}
{"code": "def metamodel_from_file(file_name, **kwargs):\n    with codecs.open(file_name, 'r', 'utf-8') as f:\n        lang_desc = f.read()\n    metamodel = metamodel_from_str(lang_desc=lang_desc, file_name=file_name, **kwargs)\n    return metamodel", "docstring": "Creates new metamodel from the given file.\n\nArgs:\nfile_name(str): The name of the file with textX language description.\nother params: See metamodel_from_str.", "source": "codesearchnet"}
{"code": "def __init__(self, real_env, world_model_dir, hparams, random_starts,\n               setable_initial_frames=False):\n    \n\n    self._setable_initial_frames = setable_initial_frames\n\n    if self._setable_initial_frames:\n      real_obs_shape = real_env.observation_space.shape\n      shape = (1, hparams.frame_stack_size) + real_obs_shape\n      self._initial_frames = np.zeros(shape=shape, dtype=np.uint8)\n      def initial_frame_chooser(batch_size):\n        assert batch_size == 1\n        return self._initial_frames\n\n    else:\n      initial_frame_chooser = rl_utils.make_initial_frame_chooser(\n          real_env, hparams.frame_stack_size,\n          simulation_random_starts=random_starts,\n          simulation_flip_first_random_for_beginning=False\n      )\n    env_fn = make_simulated_env_fn_from_hparams(\n        real_env, hparams,\n        batch_size=1,\n        initial_frame_chooser=initial_frame_chooser,\n        model_dir=world_model_dir,\n    )\n\n    env = env_fn(in_graph=False)\n    self.env = FlatBatchEnv(env)\n\n    self.observation_space = self.env.observation_space\n    self.action_space = self.env.action_space", "docstring": "Init.\n\nArgs:\nreal_env: gym environment.\nworld_model_dir: path to world model checkpoint directory.\nhparams: hparams for rlmb pipeline.\nrandom_starts: if restart world model from random frames, or only\nfrom initial ones (from beginning of episodes). Valid only when\n`setable_initial_fames` set to False.\nsetable_initial_frames: if True, initial_frames for world model should be\nset by `add_to_initial_stack`.", "source": "juraj-google-style"}
{"code": "def mean_absolute_error(y_true, y_pred):\n    y_pred = ops.convert_to_tensor(y_pred)\n    y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)\n    y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)\n    return ops.mean(ops.abs(y_true - y_pred), axis=-1)", "docstring": "Computes the mean absolute error between labels and predictions.\n\n```python\nloss = mean(abs(y_true - y_pred), axis=-1)\n```\n\nArgs:\ny_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.\ny_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.\n\nReturns:\nMean absolute error values with shape = `[batch_size, d0, .. dN-1]`.\n\nExample:\n\n>>> y_true = np.random.randint(0, 2, size=(2, 3))\n>>> y_pred = np.random.random(size=(2, 3))\n>>> loss = keras.losses.mean_absolute_error(y_true, y_pred)", "source": "github-repos"}
{"code": "def alerts(self):\n    if (not self.__alerts):\n        self.__alerts = Alerts(self.__connection)\n    return self.__alerts", "docstring": "Gets the Alerts API client.\n\nReturns:\nAlerts:", "source": "codesearchnet"}
{"code": "def get_job(self, jobid):\n    import shlex\n    from pyccc.job import Job\n    job = Job(engine=self)\n    job.jobid = job.rundata.containerid = jobid\n    try:\n        jobdata = self.client.inspect_container(job.jobid)\n    except docker.errors.NotFound:\n        raise exceptions.JobNotFound(('The daemon could not find containter \"%s\"' % job.jobid))\n    cmd = jobdata['Config']['Cmd']\n    entrypoint = jobdata['Config']['Entrypoint']\n    if ((len(cmd) == 3) and (cmd[0:2] == ['sh', '-c'])):\n        cmd = cmd[2]\n    elif (entrypoint is not None):\n        cmd = (entrypoint + cmd)\n    if isinstance(cmd, list):\n        cmd = ' '.join((shlex.quote(x) for x in cmd))\n    job.command = cmd\n    job.env = jobdata['Config']['Env']\n    job.workingdir = jobdata['Config']['WorkingDir']\n    job.rundata.container = jobdata\n    return job", "docstring": "Return a Job object for the requested job id.\n\nThe returned object will be suitable for retrieving output, but depending on the engine,\nmay not populate all fields used at launch time (such as `job.inputs`, `job.commands`, etc.)\n\nArgs:\njobid (str): container id\n\nReturns:\npyccc.job.Job: job object for this container\n\nRaises:\npyccc.exceptions.JobNotFound: if no job could be located for this jobid", "source": "codesearchnet"}
{"code": "def sync_ik_robot(self, joint_positions, simulate=False, sync_last=True):\n    num_joints = len(joint_positions)\n    if (not sync_last):\n        num_joints -= 1\n    for i in range(num_joints):\n        if simulate:\n            p.setJointMotorControl2(self.ik_robot, self.actual[i], p.POSITION_CONTROL, targetVelocity=0, targetPosition=joint_positions[i], force=500, positionGain=0.5, velocityGain=1.0)\n        else:\n            p.resetJointState(self.ik_robot, self.actual[i], joint_positions[i])", "docstring": "Force the internal robot model to match the provided joint angles.\n\nArgs:\njoint_positions (list): a list or flat numpy array of joint positions.\nsimulate (bool): If True, actually use physics simulation, else\nwrite to physics state directly.\nsync_last (bool): If False, don't sync the last joint angle. This\nis useful for directly controlling the roll at the end effector.", "source": "codesearchnet"}
{"code": "def _joint_mean(self):\n    with tf.name_scope('mean_joint'):\n        with tf.control_dependencies(self.runtime_assertions):\n            initial_latent_mean = _broadcast_to_shape(self.initial_state_prior.mean()[(..., tf.newaxis)], tf.concat([self.batch_shape_tensor(), [self.latent_size, 1]], axis=0))\n        initial_observation_mean = _propagate_mean(initial_latent_mean, self.get_observation_matrix_for_timestep(self.initial_step), self.get_observation_noise_for_timestep(self.initial_step))\n        mean_step = build_kalman_mean_step(self.get_transition_matrix_for_timestep, self.get_transition_noise_for_timestep, self.get_observation_matrix_for_timestep, self.get_observation_noise_for_timestep)\n        (latent_means, observation_means) = tf.scan(mean_step, elems=tf.range((self.initial_step + 1), self.final_step), initializer=(initial_latent_mean, initial_observation_mean))\n        latent_means = tf.concat([initial_latent_mean[(tf.newaxis, ...)], latent_means], axis=0)\n        observation_means = tf.concat([initial_observation_mean[(tf.newaxis, ...)], observation_means], axis=0)\n        latent_means = tf.squeeze(latent_means, (- 1))\n        latent_means = distribution_util.move_dimension(latent_means, 0, (- 2))\n        observation_means = tf.squeeze(observation_means, (- 1))\n        observation_means = distribution_util.move_dimension(observation_means, 0, (- 2))\n        return (latent_means, observation_means)", "docstring": "Compute prior means for all variables via dynamic programming.\n\nReturns:\nlatent_means: Prior means of latent states `z_t`, as a `Tensor`\nof shape `batch_shape + [num_timesteps, latent_size]`\nobservation_means: Prior covariance matrices of observations\n`x_t`, as a `Tensor` of shape `batch_shape + [num_timesteps,\nobservation_size]`", "source": "codesearchnet"}
{"code": "def _add_case(self, case_obj):\n        \n        if self.case(case_obj['_id']):\n            raise IntegrityError(\"Case %s already exists in database\" % case_obj['_id'])\n\n        return self.case_collection.insert_one(case_obj)", "docstring": "Add a case to the database\nIf the case already exists exception is raised\n\nArgs:\ncase_obj(Case)", "source": "juraj-google-style"}
{"code": "def dump(self, output, close_after_write=True):\n    self.open(output)\n    try:\n        self.make_worksheet(self.table_name)\n        self.write_table()\n    finally:\n        if close_after_write:\n            self.close()", "docstring": "Write a worksheet to the current workbook.\n\nArgs:\noutput (str):\nPath to the workbook file to write.\nclose_after_write (bool, optional):\nClose the workbook after write.\nDefaults to |True|.", "source": "codesearchnet"}
{"code": "def query_with_attributes(type_to_query, client):\n        \n        session = client.create_session()\n\n        \n        query = session.query(Attribute.name,\n                              Attribute.value,\n                              Entity.id) \\\n            .join(Entity) \\\n            .filter(Entity.type == type_to_query)\n\n        df = client.df_query(query)\n\n        session.close()\n\n        \n        df = df.dropna(how='any')\n\n        \n        \n        df = df.set_index(['id', 'name']).unstack().reset_index()\n        \n        df.columns = ['id'] + list(df.columns.get_level_values(1)[1:])\n\n        return df", "docstring": "Query all entities of a specific type, with their attributes\n\nArgs:\ntype_to_query (str): type of entity to query\nclient: DB client to perform query with\n\nReturns:\npandas.DataFrame: table of entities, with attributes as columns", "source": "juraj-google-style"}
{"code": "def set_column_count(self, count):\n        \n        current_row_count = self.row_count()\n        current_column_count = self.column_count()\n        if count > current_column_count:\n            cl = TableEditableItem if self._editable else TableItem\n            for r_key in self.children.keys():\n                row = self.children[r_key]\n                for i in range(current_column_count, count):\n                    row.append(cl(), str(i))\n                    if self._editable:\n                        row.children[str(i)].onchange.connect(\n                            self.on_item_changed, int(r_key), int(i))\n            self._update_first_row()\n        elif count < current_column_count:\n            for row in self.children.values():\n                for i in range(count, current_column_count):\n                    row.remove_child(row.children[str(i)])\n        self._column_count = count", "docstring": "Sets the table column count.\n\nArgs:\ncount (int): column of rows", "source": "juraj-google-style"}
{"code": "def getRowByIndex(self, index):\n        \n        assert isinstance(index, int)\n        return Row(self._impl.getRowByIndex(index))", "docstring": "Get row by numeric index.\n\nArgs:\nindex: Zero-based index of the row to get.\n\nReturns:\nThe corresponding row.", "source": "juraj-google-style"}
{"code": "def add_to_tensor(self, x, name='add_to_tensor'):\n    with self._name_scope(name):\n        x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name='x')\n        self._check_input_dtype(x)\n        return self._add_to_tensor(x)", "docstring": "Add matrix represented by this operator to `x`.  Equivalent to `A + x`.\n\nArgs:\nx:  `Tensor` with same `dtype` and shape broadcastable to `self.shape`.\nname:  A name to give this `Op`.\n\nReturns:\nA `Tensor` with broadcast shape and same `dtype` as `self`.", "source": "github-repos"}
{"code": "def compare_names(first, second):\n    \n    first = name_to_vector(first)\n    second = name_to_vector(second)\n\n    zipped = zip(first, second)\n\n    if not zipped:\n        return 0\n\n    similarity_factor = 0\n    for fitem, _ in zipped:\n        if fitem in second:\n            similarity_factor += 1\n\n    return (float(similarity_factor) / len(zipped)) * 100", "docstring": "Compare two names in complicated, but more error prone way.\n\nAlgorithm is using vector comparison.\n\nExample:\n>>> compare_names(\"Franta Putšálek\", \"ing. Franta Putšálek\")\n100.0\n>>> compare_names(\"F. Putšálek\", \"ing. Franta Putšálek\")\n50.0\n\nArgs:\nfirst (str): Fisst name as string.\nsecond (str): Second name as string.\n\nReturns:\nfloat: Percentage of the similarity.", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, output_attentions: bool=False):\n    residual = hidden_states\n    hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, output_attentions=output_attentions)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    residual = hidden_states\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    if self.training:\n        if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():\n            clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n            hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n    return (hidden_states, attn_weights)", "docstring": "Args:\nhidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\nInput to the layer.\nattention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\nAttention mask.\nposition_embeddings (`torch.FloatTensor`, *optional*):\nPosition embeddings, to be added to `hidden_states`.\nreference_points (`torch.FloatTensor`, *optional*):\nReference points.\nspatial_shapes (`torch.LongTensor`, *optional*):\nSpatial shapes of the backbone feature maps.\nspatial_shapes_list (`List[Tuple[int, int]]`, *optional*):\nSpatial shapes of the backbone feature maps (but as list for export compatibility).\nlevel_start_index (`torch.LongTensor`, *optional*):\nLevel start index.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def initialize(self):\n    self.log.info('Initializing the snippet package %s.', self.package)\n    start_time = time.perf_counter()\n    self.log.debug('Preparing to start the snippet server of %s.', self.package)\n    self.before_starting_server()\n    try:\n        self.log.debug('Starting the snippet server of %s.', self.package)\n        self.start_server()\n        self.log.debug('Making a connection to the snippet server of %s.', self.package)\n        self._make_connection()\n    except Exception:\n        self.log.error('Error occurred trying to start and connect to the snippet server of %s.', self.package)\n        try:\n            self.stop()\n        except Exception:\n            self.log.exception('Failed to stop the snippet package %s after failure to start and connect.', self.package)\n        raise\n    self.log.debug('Snippet package %s initialized after %.1fs.', self.package, time.perf_counter() - start_time)", "docstring": "Initializes the snippet client to interact with the remote device.\n\nThis function contains following stages:\n1. before starting server: preparing to start the snippet server.\n2. start server: starting the snippet server on the remote device.\n3. make connection: making a connection to the snippet server.\n\nAn error occurring at any stage will abort the initialization. Only errors\nat the `start_server` and `make_connection` stages will trigger `stop` to\nclean up.\n\nRaises:\nerrors.ProtocolError: something went wrong when exchanging data with the\nserver.\nerrors.ServerStartPreCheckError: when prechecks for starting the server\nfailed.\nerrors.ServerStartError: when failed to start the snippet server.", "source": "github-repos"}
{"code": "def scalar(name, data, step=None, description=None):\n    summary_metadata = metadata.create_summary_metadata(display_name=None, description=description)\n    summary_scope = (getattr(tf.summary.experimental, 'summary_scope', None) or tf.summary.summary_scope)\n    with summary_scope(name, 'scalar_summary', values=[data, step]) as (tag, _):\n        tf.debugging.assert_scalar(data)\n        return tf.summary.write(tag=tag, tensor=tf.cast(data, tf.float32), step=step, metadata=summary_metadata)", "docstring": "Write a scalar summary.\n\nArguments:\nname: A name for this summary. The summary tag used for TensorBoard will\nbe this name prefixed by any active name scopes.\ndata: A real numeric scalar value, convertible to a `float32` Tensor.\nstep: Explicit `int64`-castable monotonic step value for this summary. If\nomitted, this defaults to `tf.summary.experimental.get_step()`, which must\nnot be None.\ndescription: Optional long-form description for this summary, as a\nconstant `str`. Markdown is supported. Defaults to empty.\n\nReturns:\nTrue on success, or false if no summary was written because no default\nsummary writer was available.\n\nRaises:\nValueError: if a default writer exists, but no step was provided and\n`tf.summary.experimental.get_step()` is None.", "source": "codesearchnet"}
{"code": "def _collect_metrics(repo, path, recursive, typ, xpath, branch):\n    outs = [out for stage in repo.stages() for out in stage.outs]\n    if path:\n        try:\n            outs = repo.find_outs_by_path(path, outs=outs, recursive=recursive)\n        except OutputNotFoundError:\n            logger.debug(\"stage file not for found for '{}' in branch '{}'\".format(path, branch))\n            return []\n    res = []\n    for o in outs:\n        if (not o.metric):\n            continue\n        if ((not typ) and isinstance(o.metric, dict)):\n            t = o.metric.get(o.PARAM_METRIC_TYPE, typ)\n            x = o.metric.get(o.PARAM_METRIC_XPATH, xpath)\n        else:\n            t = typ\n            x = xpath\n        res.append((o, t, x))\n    return res", "docstring": "Gather all the metric outputs.\n\nArgs:\npath (str): Path to a metric file or a directory.\nrecursive (bool): If path is a directory, do a recursive search for\nmetrics on the given path.\ntyp (str): The type of metric to search for, could be one of the\nfollowing (raw|json|tsv|htsv|csv|hcsv).\nxpath (str): Path to search for.\nbranch (str): Branch to look up for metrics.\n\nReturns:\nlist(tuple): (output, typ, xpath)\n- output:\n- typ:\n- xpath:", "source": "codesearchnet"}
{"code": "def matrix(self):\n    matrix = (c_float * 6)()\n    rc = self._libinput.libinput_device_config_calibration_get_matrix(self._handle, matrix)\n    return (rc, tuple(matrix))", "docstring": "The current calibration matrix for this device.\n\nReturns:\n(bool, (float, float, float, float, float, float)): :obj:`False` if\nno calibration is set and\nthe returned matrix is the identity matrix, :obj:`True`\notherwise. :obj:`tuple` representing the first two rows of\na 3x3 matrix as described in :meth:`set_matrix`.", "source": "codesearchnet"}
{"code": "def _order_code(dis_code: pycnite.types.DisassembledCode) -> OrderedCode:\n    ops = opcodes.build_opcodes(dis_code)\n    add_pop_block_targets(ops)\n    blocks = compute_order(ops, dis_code.python_version)\n    return OrderedCode(dis_code.code, ops, blocks)", "docstring": "Split a CodeType object into ordered blocks.\n\nThis takes a CodeType object (i.e., a piece of compiled Python code) and\nsplits it into ordered basic blocks.\n\nArgs:\ndis_code: A pycnite.types.DisassembledCode object.\n\nReturns:\nAn OrderedCode instance.", "source": "github-repos"}
{"code": "def __validate_args(self, func_name, args, kwargs):\n        \n        from pyvalid.validators import Validator\n        for i, (arg_name, accepted_values) in enumerate(self.accepted_args):\n            if i < len(args):\n                value = args[i]\n            else:\n                if arg_name in kwargs:\n                    value = kwargs[arg_name]\n                elif i in self.optional_args:\n                    continue\n                else:\n                    raise InvalidArgumentNumberError(func_name)\n            is_valid = False\n            for accepted_val in accepted_values:\n                is_validator = (\n                    isinstance(accepted_val, Validator) or\n                    (\n                        isinstance(accepted_val, MethodType) and\n                        hasattr(accepted_val, '__func__') and\n                        isinstance(accepted_val.__func__, Validator)\n                    )\n                )\n                if is_validator:\n                    is_valid = accepted_val(value)\n                elif isinstance(accepted_val, type):\n                    is_valid = isinstance(value, accepted_val)\n                else:\n                    is_valid = value == accepted_val\n                if is_valid:\n                    break\n            if not is_valid:\n                ord_num = self.__ordinal(i + 1)\n                raise ArgumentValidationError(\n                    ord_num,\n                    func_name,\n                    value,\n                    accepted_values\n                )", "docstring": "Compare value of each required argument with list of\naccepted values.\n\nArgs:\nfunc_name (str): Function name.\nargs (list): Collection of the position arguments.\nkwargs (dict): Collection of the keyword arguments.\n\nRaises:\nInvalidArgumentNumberError: When position or count of the arguments\nis incorrect.\nArgumentValidationError: When encountered unexpected argument\nvalue.", "source": "juraj-google-style"}
{"code": "def load(self, context):\n    \n    try:\n      \n      import tensorflow\n    except ImportError:\n      return\n    \n    from tensorboard.plugins.beholder.beholder_plugin import BeholderPlugin\n    return BeholderPlugin(context)", "docstring": "Returns the plugin, if possible.\n\nArgs:\ncontext: The TBContext flags.\n\nReturns:\nA BeholderPlugin instance or None if it couldn't be loaded.", "source": "juraj-google-style"}
{"code": "def _get_condition_json(self, index):\n    \n    condition = self.condition_data[index]\n    condition_log = {\n      'name': condition[0],\n      'value': condition[1],\n      'type': condition[2],\n      'match': condition[3]\n    }\n\n    return json.dumps(condition_log)", "docstring": "Method to generate json for logging audience condition.\n\nArgs:\nindex: Index of the condition.\n\nReturns:\nString: Audience condition JSON.", "source": "juraj-google-style"}
{"code": "def get_size(self, value=None):\n        \n        if value is None:\n            if not self:\n                \n                return 0\n            elif issubclass(type(self[0]), GenericType):\n                \n                \n                return len(self) * self[0].get_size()\n\n            \n            return sum(item.get_size() for item in self)\n\n        return type(self)(value).get_size()", "docstring": "Return the size in bytes.\n\nArgs:\nvalue: In structs, the user can assign other value instead of\nthis class' instance. Here, in such cases, ``self`` is a class\nattribute of the struct.\n\nReturns:\nint: The size in bytes.", "source": "juraj-google-style"}
{"code": "def Map(self, function):\n    new_table = self.__class__()\n    new_table._table = [self.header]\n    for row in self:\n        filtered_row = function(row)\n        if filtered_row:\n            new_table.Append(filtered_row)\n    return new_table", "docstring": "Applies the function to every row in the table.\n\nArgs:\nfunction: A function applied to each row.\n\nReturns:\nA new TextTable()\n\nRaises:\nTableError: When transform is not invalid row entry. The transform\nmust be compatible with Append().", "source": "codesearchnet"}
{"code": "def get_embeddings_index(embedding_type='glove.42B.300d', embedding_dims=None, embedding_path=None, cache=True):\n    if (embedding_path is not None):\n        embedding_type = embedding_path\n    embeddings_index = _EMBEDDINGS_CACHE.get(embedding_type)\n    if (embeddings_index is not None):\n        return embeddings_index\n    if (embedding_path is None):\n        embedding_type_obj = get_embedding_type(embedding_type)\n        extract = embedding_type_obj.get('extract', True)\n        file_path = get_file(embedding_type_obj['file'], origin=embedding_type_obj['url'], extract=extract, cache_subdir='embeddings', file_hash=embedding_type_obj.get('file_hash'))\n        if ('file_in_zip' in embedding_type_obj):\n            zip_folder = file_path.split('.zip')[0]\n            with ZipFile(file_path, 'r') as zf:\n                zf.extractall(zip_folder)\n            file_path = os.path.join(zip_folder, embedding_type_obj['file_in_zip'])\n        elif extract:\n            if file_path.endswith('.zip'):\n                file_path = file_path.split('.zip')[0]\n    else:\n        file_path = embedding_path\n    embeddings_index = _build_embeddings_index(file_path, embedding_dims)\n    if cache:\n        _EMBEDDINGS_CACHE[embedding_type] = embeddings_index\n    return embeddings_index", "docstring": "Retrieves embeddings index from embedding name or path. Will automatically download and cache as needed.\n\nArgs:\nembedding_type: The embedding type to load.\nembedding_path: Path to a local embedding to use instead of the embedding type. Ignores `embedding_type` if specified.\n\nReturns:\nThe embeddings indexed by word.", "source": "codesearchnet"}
{"code": "def _read_mode_utopt(self, size, kind):\n    temp = self._read_fileng(size)\n    data = dict(kind=kind, length=size, granularity=('minutes' if int(temp[0]) else 'seconds'), timeout=bytes(chr(int(temp[0:], base=2)), encoding='utf-8'))\n    return data", "docstring": "Read User Timeout option.\n\nPositional arguments:\n* size - int, length of option\n* kind - int, 28 (User Timeout Option)\n\nReturns:\n* dict -- extracted User Timeout (TIMEOUT) option\n\nStructure of TCP TIMEOUT [RFC 5482]:\n0                   1                   2                   3\n0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\n+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n|   Kind = 28   |   Length = 4  |G|        User Timeout         |\n+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\nOctets      Bits        Name                    Description\n0           0     tcp.timeout.kind        Kind (28)\n1           8     tcp.timeout.length      Length (4)\n2          16     tcp.timeout.granularity Granularity\n2          17     tcp.timeout.timeout     User Timeout", "source": "codesearchnet"}
{"code": "def make_datastore_query(self, cursor=None):\n    filters = {}\n    filters['__key__ >= '] = _key_for_namespace(self.namespace_start, self.app)\n    filters['__key__ <= '] = _key_for_namespace(self.namespace_end, self.app)\n    return datastore.Query('__namespace__', filters=filters, keys_only=True, cursor=cursor, _app=self.app)", "docstring": "Returns a datastore.Query that generates all namespaces in the range.\n\nArgs:\ncursor: start cursor for the query.\n\nReturns:\nA datastore.Query instance that generates db.Keys for each namespace in\nthe NamespaceRange.", "source": "codesearchnet"}
{"code": "def set_metadata(self, key: str, value: Any, cloneable: bool=False) -> 'DNA':\n    self.metadata.rebind({key: value}, raise_on_no_change=False, skip_notification=True)\n    if cloneable:\n        self._cloneable_metadata_keys.add(key)\n    return self", "docstring": "Set metadata associated with a key.\n\nMetadata associated with the DNA will be persisted and carried over across\nprocesses, which is different the `userdata`. (See `set_userdata` for more\ndetails.)\n\nArgs:\nkey: Key for the metadata.\nvalue: Value for the metadata.\ncloneable: If True, the key/value will be propagated during clone.\n\nReturns:\nSelf.", "source": "github-repos"}
{"code": "def db_ws004c(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `db_ws004c`'.format(value))\n\n        self._db_ws004c = value", "docstring": "Corresponds to IDD Field `db_ws004c`\nMean coincident dry-bulb temperature to wind speed corresponding to 0.40% cumulative frequency for coldest month\n\nArgs:\nvalue (float): value for IDD Field `db_ws004c`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def AddIndex(self, path_segment_index):\n    \n    if path_segment_index in self._weight_per_index:\n      raise ValueError('Path segment index already set.')\n\n    self._weight_per_index[path_segment_index] = 0", "docstring": "Adds a path segment index and sets its weight to 0.\n\nArgs:\npath_segment_index: an integer containing the path segment index.\n\nRaises:\nValueError: if the path segment weights already contains\nthe path segment index.", "source": "juraj-google-style"}
{"code": "def _ValidateDataTypeDefinition(cls, data_type_definition):\n    if (not cls._IsIdentifier(data_type_definition.name)):\n        raise ValueError('Data type definition name: {0!s} not a valid identifier'.format(data_type_definition.name))\n    if keyword.iskeyword(data_type_definition.name):\n        raise ValueError('Data type definition name: {0!s} matches keyword'.format(data_type_definition.name))\n    members = getattr(data_type_definition, 'members', None)\n    if (not members):\n        raise ValueError('Data type definition name: {0!s} missing members'.format(data_type_definition.name))\n    defined_attribute_names = set()\n    for member_definition in members:\n        attribute_name = member_definition.name\n        if (not cls._IsIdentifier(attribute_name)):\n            raise ValueError('Attribute name: {0!s} not a valid identifier'.format(attribute_name))\n        if attribute_name.startswith('_'):\n            raise ValueError('Attribute name: {0!s} starts with underscore'.format(attribute_name))\n        if keyword.iskeyword(attribute_name):\n            raise ValueError('Attribute name: {0!s} matches keyword'.format(attribute_name))\n        if (attribute_name in defined_attribute_names):\n            raise ValueError('Attribute name: {0!s} already defined'.format(attribute_name))\n        defined_attribute_names.add(attribute_name)", "docstring": "Validates the data type definition.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\n\nRaises:\nValueError: if the data type definition is not considered valid.", "source": "codesearchnet"}
{"code": "def get_container_list(self) -> list:\n    containers = []\n    containers_list = self._client.containers.list()\n    for c_list in containers_list:\n        containers.append(c_list.short_id)\n    return containers", "docstring": "Get list of containers.\n\nReturns:\nlist, all the ids of containers", "source": "codesearchnet"}
{"code": "def update(self, forecasts, observations):\n        \n        for t, threshold in enumerate(self.thresholds[:-1]):\n            self.frequencies.loc[t, \"Positive_Freq\"] += np.count_nonzero((threshold <= forecasts) &\n                                                                         (forecasts < self.thresholds[t+1]) &\n                                                                         (observations >= self.obs_threshold))\n            self.frequencies.loc[t, \"Total_Freq\"] += np.count_nonzero((threshold <= forecasts) &\n                                                                      (forecasts < self.thresholds[t+1]))", "docstring": "Update the statistics with a set of forecasts and observations.\n\nArgs:\nforecasts (numpy.ndarray): Array of forecast probability values\nobservations (numpy.ndarray): Array of observation values", "source": "juraj-google-style"}
{"code": "def delete(adapter, case_obj, update=False, existing_case=False):\n    \n    \n    if update:\n        adapter.add_case(existing_case)\n    else:\n        adapter.delete_case(case_obj)\n\n    for file_type in ['vcf_path','vcf_sv_path']:\n        if not case_obj.get(file_type):\n            continue\n        variant_file = case_obj[file_type]\n        \n        vcf_obj = get_vcf(variant_file)\n\n        delete_variants(\n            adapter=adapter,\n            vcf_obj=vcf_obj,\n            case_obj=case_obj,\n        )", "docstring": "Delete a case and all of it's variants from the database.\n\nArgs:\nadapter: Connection to database\ncase_obj(models.Case)\nupdate(bool): If we are in the middle of an update\nexisting_case(models.Case): If something failed during an update we need to revert\nto the original case", "source": "juraj-google-style"}
{"code": "def get(self, name):\n    config = self.get_block(('interface %s' % name))\n    if ((name[0:2] in ['Et', 'Po']) and (not SWITCHPORT_RE.search(config, re.M))):\n        return None\n    resource = dict(name=name)\n    resource.update(self._parse_address(config))\n    resource.update(self._parse_mtu(config))\n    return resource", "docstring": "Returns the specific IP interface properties\n\nThe Ipinterface resource returns the following:\n\n* name (str): The name of the interface\n* address (str): The IP address of the interface in the form\nof A.B.C.D/E\n* mtu (int): The configured value for IP MTU.\n\n\nArgs:\nname (string): The interface identifier to retrieve the\nconfiguration for\n\nReturn:\nA Python dictionary object of key/value pairs that represents\nthe current configuration of the node.  If the specified\ninterface does not exist then None is returned.", "source": "codesearchnet"}
{"code": "def model_inference_fn(features, training, params):\n    \n\n    mg_batchn = functools.partial(\n        tf.layers.batch_normalization,\n        axis=-1,\n        momentum=.95,\n        epsilon=1e-5,\n        center=True,\n        scale=True,\n        fused=True,\n        training=training)\n\n    mg_conv2d = functools.partial(\n        tf.layers.conv2d,\n        filters=params['conv_width'],\n        kernel_size=3,\n        padding=\"same\",\n        data_format=\"channels_last\",\n        use_bias=False)\n\n    mg_global_avgpool2d = functools.partial(\n        tf.layers.average_pooling2d,\n        pool_size=go.N,\n        strides=1,\n        padding=\"valid\",\n        data_format=\"channels_last\")\n\n    def mg_activation(inputs):\n        if FLAGS.use_swish:\n            return tf.nn.swish(inputs)\n\n        return tf.nn.relu(inputs)\n\n\n    def residual_inner(inputs):\n        conv_layer1 = mg_batchn(mg_conv2d(inputs))\n        initial_output = mg_activation(conv_layer1)\n        conv_layer2 = mg_batchn(mg_conv2d(initial_output))\n        return conv_layer2\n\n    def mg_res_layer(inputs):\n        residual = residual_inner(inputs)\n        output = mg_activation(inputs + residual)\n        return output\n\n    def mg_squeeze_excitation_layer(inputs):\n        \n        \n        \n\n        channels = params['conv_width']\n        ratio = FLAGS.SE_ratio\n        assert channels % ratio == 0\n\n        residual = residual_inner(inputs)\n        pool = mg_global_avgpool2d(residual)\n        fc1 = tf.layers.dense(pool, units=channels \n        squeeze = mg_activation(fc1)\n\n        if FLAGS.use_SE_bias:\n            fc2 = tf.layers.dense(squeeze, units=2*channels)\n            \n            gamma, bias = tf.split(fc2, 2, axis=3)\n        else:\n            gamma = tf.layers.dense(squeeze, units=channels)\n            bias = 0\n\n        sig = tf.nn.sigmoid(gamma)\n        \n        scale = tf.reshape(sig, [-1, 1, 1, channels])\n\n        excitation = tf.multiply(scale, residual) + bias\n        return mg_activation(inputs + excitation)\n\n    initial_block = mg_activation(mg_batchn(mg_conv2d(features)))\n\n    \n    shared_output = initial_block\n    for _ in range(params['trunk_layers']):\n        if FLAGS.use_SE or FLAGS.use_SE_bias:\n            shared_output = mg_squeeze_excitation_layer(shared_output)\n        else:\n            shared_output = mg_res_layer(shared_output)\n\n    \n    policy_conv = mg_conv2d(\n        shared_output, filters=params['policy_conv_width'], kernel_size=1)\n    policy_conv = mg_activation(mg_batchn(policy_conv, center=False, scale=False))\n    logits = tf.layers.dense(\n        tf.reshape(\n            policy_conv, [-1, params['policy_conv_width'] * go.N * go.N]),\n        go.N * go.N + 1)\n\n    policy_output = tf.nn.softmax(logits, name='policy_output')\n\n    \n    value_conv = mg_conv2d(\n        shared_output, filters=params['value_conv_width'], kernel_size=1)\n    value_conv = mg_activation(mg_batchn(value_conv, center=False, scale=False))\n\n    value_fc_hidden = mg_activation(tf.layers.dense(\n        tf.reshape(value_conv, [-1, params['value_conv_width'] * go.N * go.N]),\n        params['fc_width']))\n    value_output = tf.nn.tanh(\n        tf.reshape(tf.layers.dense(value_fc_hidden, 1), [-1]),\n        name='value_output')\n\n    return policy_output, value_output, logits", "docstring": "Builds just the inference part of the model graph.\n\nArgs:\nfeatures: input features tensor.\ntraining: True if the model is training.\nparams: A dictionary\n\nReturns:\n(policy_output, value_output, logits) tuple of tensors.", "source": "juraj-google-style"}
{"code": "async def check_record(self, record, timeout=60):\n        \n        start_time = time.time()\n\n        name, rr_data, r_type, ttl = self._extract_record_data(record)\n        r_type_code = async_dns.types.get_code(r_type)\n\n        resolvable_record = False\n        retries = 0\n        sleep_time = 5\n\n        while not resolvable_record and \\\n                timeout > retries * sleep_time:\n\n            retries += 1\n            resolver_res = await self._resolver.query(name, r_type_code)\n            possible_ans = resolver_res.an\n\n            resolvable_record = \\\n                await self._check_resolver_ans(possible_ans, name,\n                                               rr_data, ttl, r_type_code)\n\n            if not resolvable_record:\n                await asyncio.sleep(sleep_time)\n\n        if not resolvable_record:\n            logging.info(\n                f'Sending metric record-checker-failed: {record}.')\n        else:\n            final_time = float(time.time() - start_time)\n            success_msg = (f'This record: {record} took {final_time} to '\n                           'register.')\n            logging.info(success_msg)", "docstring": "Measures the time for a DNS record to become available.\n\nQuery a provided DNS server multiple times until the reply matches the\ninformation in the record or until timeout is reached.\n\nArgs:\nrecord (dict): DNS record as a dict with record properties.\ntimeout (int): Time threshold to query the DNS server.", "source": "juraj-google-style"}
{"code": "def _file_size(self, field):\n    size = 0\n    try:\n        handle = open(self._files[field], 'r')\n        size = os.fstat(handle.fileno()).st_size\n        handle.close()\n    except:\n        size = 0\n    self._file_lengths[field] = size\n    return self._file_lengths[field]", "docstring": "Returns the file size for given file field.\n\nArgs:\nfield (str): File field\n\nReturns:\nint. File size", "source": "codesearchnet"}
{"code": "def list(self, path, timeout=None):\n    transport = DentFilesyncTransport(self.stream)\n    transport.write_data('LIST', path, timeout)\n    return (DeviceFileStat(dent_msg.name, dent_msg.mode, dent_msg.size, dent_msg.time) for dent_msg in transport.read_until_done('DENT', timeout))", "docstring": "List directory contents on the device.\n\nArgs:\npath: List the contents of this directory.\ntimeout: Timeout to use for this operation.\n\nReturns:\nGenerator yielding DeviceFileStat tuples representing the contents of\nthe requested path.", "source": "codesearchnet"}
{"code": "def fillup_layer(layer, first_clbit):\n        \n        for nones in [i for i, x in enumerate(layer) if x is None]:\n            layer[nones] = EmptyWire('═') if nones >= first_clbit else EmptyWire('─')\n        return layer", "docstring": "Given a layer, replace the Nones in it with EmptyWire elements.\nArgs:\nlayer (list): The layer that contains Nones.\nfirst_clbit (int): The first wire that is classic.\n\nReturns:\nlist: The new layer, with no Nones.", "source": "juraj-google-style"}
{"code": "def mark_job_as_failed(self, job_id, exception, traceback):\n        \n        session = self.sessionmaker()\n        job, orm_job = self._update_job_state(\n            job_id, State.FAILED, session=session)\n\n        \n        \n        \n        \n        \n        \n        \n        \n        job = copy(job)\n\n        job.exception = exception\n        job.traceback = traceback\n        orm_job.obj = job\n\n        session.add(orm_job)\n        session.commit()\n        session.close()", "docstring": "Mark the job as failed, and record the traceback and exception.\nArgs:\njob_id: The job_id of the job that failed.\nexception: The exception object thrown by the job.\ntraceback: The traceback, if any. Note (aron): Not implemented yet. We need to find a way\nfor the conncurrent.futures workers to throw back the error to us.\n\nReturns: None", "source": "juraj-google-style"}
{"code": "def sample(reader, writer, n, start=None, stop=None, tsCol=None,\n           writeSampleOnly=True):\n  \n  rows = list(reader)\n  if tsCol is not None:\n    ts = rows[0][tsCol]\n    inc = rows[1][tsCol] - ts\n  if start is None:\n    start = 0\n  if stop is None:\n    stop = len(rows) - 1\n  initialN = stop - start + 1\n  \n  \n  numDeletes =  initialN - n\n  for i in xrange(numDeletes):\n    delIndex = random.randint(start, stop - i)\n    del rows[delIndex]\n  \n  if writeSampleOnly:\n    rows = rows[start:start + n]\n  \n  if tsCol is not None:\n    ts = rows[0][tsCol]\n  \n  for row in rows:\n    if tsCol is not None:\n      row[tsCol] = ts\n      ts += inc\n    writer.appendRecord(row)", "docstring": "Samples n rows.\n\nArgs:\nreader: A FileRecordStream object with input data.\nwriter: A FileRecordStream object to write output data to.\nn: The number of elements to sample.\nstart: The first row in the range to sample from.\nstop: The last row in the range to sample from.\ntsCol: If specified, the timestamp column to update.\nwriteSampleOnly: If False, the rows before start are written before the\nsample and the rows after stop are written after the sample.", "source": "juraj-google-style"}
{"code": "def posix_to_dt_str(posix):\n  \n  dt = datetime.datetime.utcfromtimestamp(posix)\n  dt_str = dt.strftime(_DT_FORMAT)\n  return dt_str + '.000Z'", "docstring": "Reverse of str_to_datetime.\n\nThis is used by GCS stub to generate GET bucket XML response.\n\nArgs:\nposix: A float of secs from unix epoch.\n\nReturns:\nA datetime str.", "source": "juraj-google-style"}
{"code": "def select(self, attr, default=None):\n        \n        return List([_select(item, attr, default) for item in self])", "docstring": "Select a given attribute (or chain or attributes) from the objects within the\nlist.\n\nArgs:\nattr (str): attributes to be selected (with initial `.` omitted)\ndefault (any): value to return if given element in list doesn't contain\ndesired attribute\n\nReturns:\nnhl.List: list of selected attribute values", "source": "juraj-google-style"}
{"code": "def profile_python(self, options):\n    opts = _build_options(options)\n    tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()\n    try:\n        tfprof_node.ParseFromString(print_mdl.Profile('code'.encode('utf-8'), opts.SerializeToString()))\n    except message.DecodeError as e:\n        sys.stderr.write('Cannot parse returned proto: %s.\\n' % e)\n    return tfprof_node", "docstring": "Profile the statistics of the Python codes.\n\nBy default, it shows the call stack from root. To avoid\nredundant output, you may use options to filter as below\noptions['show_name_regexes'] = ['.*my_code.py.*']\n\nArgs:\noptions: A dict of options. See core/profiler/g3doc/options.md.\n\nReturns:\na MultiGraphNodeProto that records the results.", "source": "github-repos"}
{"code": "def __init__(self, stack_name, region, cf_client):\n        \n        try:\n            self._stack_name = stack_name\n            self._region = region\n            self._cf_client = cf_client\n        except Exception:\n            raise SystemError", "docstring": "StackTool is a simple tool to print some specific data about a\nCloudFormation stack.\n\nArgs:\nstack_name - name of the stack of interest\nregion - AWS region where the stack was created\n\nReturns:\nnot a damn thing\n\nRaises:\nSystemError - if everything isn't just right", "source": "juraj-google-style"}
{"code": "def sas_logical_jbods(self):\n    if (not self.__sas_logical_jbods):\n        self.__sas_logical_jbods = SasLogicalJbods(self.__connection)\n    return self.__sas_logical_jbods", "docstring": "Gets the SAS Logical JBODs API client.\n\nReturns:\nSasLogicalJbod:", "source": "codesearchnet"}
{"code": "def get_num_bytes(self, batch: Sequence[torch.Tensor]) -> int:\n    return sum((el.element_size() for tensor in batch for el in tensor))", "docstring": "Returns:\nThe number of bytes of data for a batch of Tensors.", "source": "github-repos"}
{"code": "def __init__(self, ca_cert=None, worker_cls=None, private_key=None):\n    \n    self.ca_cert = ca_cert\n    if private_key is None:\n      private_key = config.CONFIG.Get(\"Client.private_key\", default=None)\n\n    \n    self.server_certificate = None\n\n    \n    \n    \n    \n    self.http_manager = self.http_manager_class()\n\n    \n    self.communicator = ClientCommunicator(private_key=private_key)\n\n    \n    self.timer = Timer()\n\n    \n    \n    self.last_enrollment_time = 0\n\n    \n    self.last_foreman_check = 0\n\n    \n    if worker_cls:\n      self.client_worker = worker_cls(client=self)\n    else:\n      self.client_worker = GRRClientWorker(client=self)\n    \n    \n    \n    self.client_worker.start()", "docstring": "Constructor.\n\nArgs:\nca_cert: String representation of a CA certificate to use for checking\nserver certificate.\nworker_cls: The client worker class to use. Defaults to GRRClientWorker.\nprivate_key: The private key for this client. Defaults to config\nClient.private_key.", "source": "juraj-google-style"}
{"code": "def save(model, filepath, overwrite, include_optimizer, signatures=None, options=None, save_traces=True):\n    if not overwrite and os.path.exists(filepath):\n        proceed = ask_to_proceed_with_overwrite(filepath)\n        if not proceed:\n            return\n    if save_traces:\n        if save_impl.should_skip_serialization(model):\n            saving_utils.raise_model_input_error(model)\n    if not include_optimizer:\n        orig_optimizer = model.optimizer\n        model.optimizer = None\n        model._delete_tracking('optimizer')\n    with K.deprecated_internal_learning_phase_scope(0):\n        with utils.keras_option_scope(save_traces):\n            saved_nodes, node_paths = save_lib.save_and_return_nodes(model, filepath, signatures, options)\n        metadata = generate_keras_metadata(saved_nodes, node_paths)\n    with gfile.GFile(os.path.join(filepath, constants.SAVED_METADATA_PATH), 'wb') as w:\n        w.write(metadata.SerializeToString(deterministic=True))\n    if not include_optimizer:\n        model.optimizer = orig_optimizer", "docstring": "Saves a model as a SavedModel to the filepath.\n\nArgs:\nmodel: Keras model instance to be saved.\nfilepath: String path to save the model.\noverwrite: whether to overwrite the existing filepath.\ninclude_optimizer: If True, save the model's optimizer state.\nsignatures: Signatures to save with the SavedModel. Applicable to the 'tf'\nformat only. Please see the `signatures` argument in `tf.saved_model.save`\nfor details.\noptions: (only applies to SavedModel format) `tf.saved_model.SaveOptions`\nobject that specifies options for saving to SavedModel.\nsave_traces: (only applies to SavedModel format) When enabled, the\nSavedModel will store the function traces for each layer. This\ncan be disabled, so that only the configs of each layer are stored.\nDefaults to `True`. Disabling this will decrease serialization time\nand reduce file size, but it requires that all custom layers/models\nimplement a `get_config()` method.\n\nRaises:\nValueError: if the model's inputs have not been defined.", "source": "github-repos"}
{"code": "def reserveIdentifier(self, pid, vendorSpecific=None):\n        \n        response = self.reserveIdentifierResponse(pid, vendorSpecific)\n        return self._read_dataone_type_response(response, 'Identifier', vendorSpecific)", "docstring": "See Also: reserveIdentifierResponse()\n\nArgs:\npid:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _BroadcastMul(vec, mat):\n    vec = array_ops.expand_dims(vec, -1)\n    return vec * mat", "docstring": "Multiply after broadcasting vec to match dimensions of mat.\n\nArgs:\nvec: A 1-D tensor of dimension [D0]\nmat: A 2-D tensor of dimension [D0, D1]\n\nReturns:\nA tensor of dimension [D0, D1], the result of vec * mat", "source": "github-repos"}
{"code": "def _process_using_meta_feature_generator(self, X, meta_feature_generator):\n    all_learner_meta_features = []\n    for (idx, base_learner) in enumerate(self.base_learners):\n        single_learner_meta_features = getattr(base_learner, self.meta_feature_generators[idx])(X)\n        if (len(single_learner_meta_features.shape) == 1):\n            single_learner_meta_features = single_learner_meta_features.reshape((- 1), 1)\n        all_learner_meta_features.append(single_learner_meta_features)\n    all_learner_meta_features = np.concatenate(all_learner_meta_features, axis=1)\n    out = getattr(self.secondary_learner, meta_feature_generator)(all_learner_meta_features)\n    return out", "docstring": "Process using secondary learner meta-feature generator\n\nSince secondary learner meta-feature generator can be anything e.g. predict, predict_proba,\nthis internal method gives the ability to use any string. Just make sure secondary learner\nhas the method.\n\nArgs:\nX (array-like): Features array\n\nmeta_feature_generator (str, unicode): Method for use by secondary learner", "source": "codesearchnet"}
{"code": "def serialized_tensors_to_saveable_cache(serialized_tensors):\n    saveables_cache = object_identity.ObjectIdentityWeakKeyDictionary()\n    for obj, tensor_dict in serialized_tensors.items():\n        if not tensor_dict:\n            continue\n        if isinstance(obj, SaveableCompatibilityConverter):\n            trackable_obj = obj.obj\n            saveables_cache[trackable_obj] = {}\n            for saveable in obj.saveables:\n                local_name = trackable_utils.extract_local_name(saveable.name)\n                saveables_cache[trackable_obj][local_name] = [saveable]\n            continue\n        specs = []\n        local_names = []\n        prefix = saveable_compat.get_saveable_name(obj) or ''\n        for checkpoint_key, maybe_tensor in tensor_dict.items():\n            if not isinstance(maybe_tensor, dict):\n                maybe_tensor = {'': maybe_tensor}\n            for slice_spec, tensor in maybe_tensor.items():\n                if isinstance(tensor, saveable_object.SaveSpec):\n                    specs.append(tensor)\n                else:\n                    specs.append(saveable_object.SaveSpec(tensor, slice_spec, checkpoint_key))\n            local_names.append(trackable_utils.extract_local_name(checkpoint_key, prefix))\n        object_name = trackable_utils.extract_object_name(next(iter(tensor_dict.keys())))\n        saveables_cache[obj] = {trackable_utils.SERIALIZE_TO_TENSORS_NAME: [TrackableSaveable(obj, specs, object_name, local_names=local_names, prefix=prefix)]}\n    return saveables_cache", "docstring": "Converts a tensor dict to a SaveableObject cache.\n\nArgs:\nserialized_tensors: Map from Trackable to a tensor dict. The tensor dict\nmaps checkpoint key (-> slice_spec) -> Tensor\n\nReturns:\nA dict mapping Trackable objects to a map from local savable name to\nSaveableObject.", "source": "github-repos"}
{"code": "def __init__(self, *args, exit_code=1, **kwargs):\n        \n        self.exit_code = exit_code\n        super(ScriptWorkerTaskException, self).__init__(*args, **kwargs)", "docstring": "Initialize ScriptWorkerTaskException.\n\nArgs:\n*args: These are passed on via super().\nexit_code (int, optional): The exit_code we should exit with when\nthis exception is raised.  Defaults to 1 (failure).\n**kwargs: These are passed on via super().", "source": "juraj-google-style"}
{"code": "def address_to_ip(address):\n    \n    address_parts = address.split(\":\")\n    ip_address = socket.gethostbyname(address_parts[0])\n    \n    if ip_address == \"127.0.0.1\":\n        ip_address = get_node_ip_address()\n    return \":\".join([ip_address] + address_parts[1:])", "docstring": "Convert a hostname to a numerical IP addresses in an address.\n\nThis should be a no-op if address already contains an actual numerical IP\naddress.\n\nArgs:\naddress: This can be either a string containing a hostname (or an IP\naddress) and a port or it can be just an IP address.\n\nReturns:\nThe same address but with the hostname replaced by a numerical IP\naddress.", "source": "juraj-google-style"}
{"code": "def _DisableNetworkManager(self, interfaces, logger):\n    for interface in interfaces:\n        interface_config = os.path.join(self.network_path, ('ifcfg-%s' % interface))\n        if os.path.exists(interface_config):\n            self._ModifyInterface(interface_config, 'DEVICE', interface, replace=False)\n            self._ModifyInterface(interface_config, 'NM_CONTROLLED', 'no', replace=True)\n        else:\n            with open(interface_config, 'w') as interface_file:\n                interface_content = ['\n                interface_file.write('\\n'.join(interface_content))\n            logger.info('Created config file for interface %s.', interface)", "docstring": "Disable network manager management on a list of network interfaces.\n\nArgs:\ninterfaces: list of string, the output device names enable.\nlogger: logger object, used to write to SysLog and serial port.", "source": "codesearchnet"}
{"code": "def get_grouping_from_attentions(attentions, hw_shape):\n    attn_maps = []\n    with torch.no_grad():\n        prev_attn_masks = None\n        for attn_masks in attentions:\n            attn_masks = attn_masks.permute(0, 2, 1).contiguous()\n            if prev_attn_masks is None:\n                prev_attn_masks = attn_masks\n            else:\n                prev_attn_masks = prev_attn_masks @ attn_masks\n            cur_attn_map = resize_attention_map(prev_attn_masks.permute(0, 2, 1).contiguous(), *hw_shape)\n            attn_maps.append(cur_attn_map)\n    final_grouping = attn_maps[-1]\n    return final_grouping", "docstring": "Args:\nattentions (`tuple(torch.FloatTensor)`: tuple of attention maps returned by `GroupViTVisionTransformer`\nhw_shape (`tuple(int)`): height and width of the output attention map\nReturns:\n`torch.Tensor`: the attention map of shape [batch_size, groups, height, width]", "source": "github-repos"}
{"code": "def setUdpJoinerPort(self, portNumber):\n        \n        print '%s call setUdpJoinerPort' % self.port\n        cmd = 'joinerport %d' % portNumber\n        print cmd\n        return self.__sendCommand(cmd)[0] == 'Done'", "docstring": "set Joiner UDP Port\n\nArgs:\nportNumber: Joiner UDP Port number\n\nReturns:\nTrue: successful to set Joiner UDP Port\nFalse: fail to set Joiner UDP Port", "source": "juraj-google-style"}
{"code": "def read_string(self, key, embedded=True):\n        \n        data = None\n        if key is not None:\n            key_type = self.variable_type(key)\n            data = self.db.read(key.strip())\n            if data is not None:\n                \n                try:\n                    data = json.loads(data)\n                    if embedded:\n                        data = self.read_embedded(data, key_type)\n                    if data is not None:\n                        \n                        \n                        \n                        data = u'{}'.format(data)\n                except ValueError as e:\n                    err = u'Failed loading JSON data ({}). Error: ({})'.format(data, e)\n                    self.tcex.log.error(err)\n        else:\n            self.tcex.log.warning(u'The key field was None.')\n        return data", "docstring": "Read method of CRUD operation for string data.\n\nArgs:\nkey (string): The variable to read from the DB.\nembedded (boolean): Resolve embedded variables.\n\nReturns:\n(string): Results retrieved from DB.", "source": "juraj-google-style"}
{"code": "def cancelMktData(self, contract: Contract):\n        \n        ticker = self.ticker(contract)\n        reqId = self.wrapper.endTicker(ticker, 'mktData')\n        if reqId:\n            self.client.cancelMktData(reqId)\n        else:\n            self._logger.error(\n                'cancelMktData: ' f'No reqId found for contract {contract}')", "docstring": "Unsubscribe from realtime streaming tick data.\n\nArgs:\ncontract: The exact contract object that was used to\nsubscribe with.", "source": "juraj-google-style"}
{"code": "def is_rotation(self, tol=0.001, include_improper=True):\n    det = np.abs(np.linalg.det(self))\n    if include_improper:\n        det = np.abs(det)\n    return ((np.abs((self.inv - self.trans)) < tol).all() and (np.abs((det - 1.0)) < tol))", "docstring": "Test to see if tensor is a valid rotation matrix, performs a\ntest to check whether the inverse is equal to the transpose\nand if the determinant is equal to one within the specified\ntolerance\n\nArgs:\ntol (float): tolerance to both tests of whether the\nthe determinant is one and the inverse is equal\nto the transpose\ninclude_improper (bool): whether to include improper\nrotations in the determination of validity", "source": "codesearchnet"}
{"code": "def update(self, puts, deletes):\n    with self._lmdb.begin(write=True, buffers=True) as txn:\n        cursor = txn.cursor(self._main_db)\n        for key in deletes:\n            if (not cursor.set_key(key.encode())):\n                continue\n            value = self._deserializer(bytes(cursor.value()))\n            cursor.delete()\n            for (index_db, index_key_fn) in self._indexes.values():\n                index_keys = index_key_fn(value)\n                index_cursor = txn.cursor(index_db)\n                for idx_key in index_keys:\n                    if index_cursor.set_key(idx_key):\n                        index_cursor.delete()\n        for (key, value) in puts:\n            packed = self._serializer(value)\n            cursor.put(key.encode(), packed, overwrite=True)\n            for (index_db, index_key_fn) in self._indexes.values():\n                index_keys = index_key_fn(value)\n                index_cursor = txn.cursor(index_db)\n                for idx_key in index_keys:\n                    index_cursor.put(idx_key, key.encode())\n    self.sync()", "docstring": "Applies the given puts and deletes atomically.\n\nArgs:\nputs (:iterable:`tuple`): an iterable of key/value pairs to insert\ndeletes (:iterable:str:) an iterable of keys to delete", "source": "codesearchnet"}
{"code": "def Group(params, name=None, type=None):\n    atts = {}\n    if name:\n        atts['name'] = name\n    if type:\n        atts['type'] = type\n    g = objectify.Element('Group', attrib=atts)\n    for p in params:\n        g.append(p)\n    return g", "docstring": "Groups together Params for adding under the 'What' section.\n\nArgs:\nparams(list of :func:`Param`): Parameter elements to go in this group.\nname(str): Group name. NB ``None`` is valid, since the group may be\nbest identified by its type.\ntype(str): Type of group, e.g. 'complex' (for real and imaginary).", "source": "codesearchnet"}
{"code": "def _remove_curly_braces(text):\n    current_pos = 0\n    depth = 0\n    ret = ''\n    for match in re.finditer('[{}]', text):\n        if (depth == 0):\n            ret += text[current_pos:match.start()]\n        depth += (1 if (text[match.start()] == '{') else (- 1))\n        current_pos = match.end()\n    if (depth != 0):\n        pass\n    else:\n        ret += text[current_pos:]\n    return ret", "docstring": "Remove everything in curly braces.\n\nCurly braces may be nested, so we keep track of depth.\n\nArgs:\ntext: a string\nReturns:\na string", "source": "codesearchnet"}
{"code": "def delete(self):\n    headers = self.headers\n    endpoint = ('https:\n    r = requests.delete(endpoint, headers=headers)\n    check_response(r)", "docstring": "Deletes this Folder.\n\nRaises:\nAuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token.", "source": "codesearchnet"}
{"code": "def __add__(self, other):\n    try:\n        other = as_dimension(other)\n    except (TypeError, ValueError):\n        return NotImplemented\n    if self._value is None or other.value is None:\n        return Dimension(None)\n    else:\n        return Dimension(self._value + other.value)", "docstring": "Returns the sum of `self` and `other`.\n\nDimensions are summed as follows:\n\n```python\ntf.compat.v1.Dimension(m)    + tf.compat.v1.Dimension(n)     ==\ntf.compat.v1.Dimension(m + n)\ntf.compat.v1.Dimension(m)    + tf.compat.v1.Dimension(None)  # equiv. to\ntf.compat.v1.Dimension(None)\ntf.compat.v1.Dimension(None) + tf.compat.v1.Dimension(n)     # equiv. to\ntf.compat.v1.Dimension(None)\ntf.compat.v1.Dimension(None) + tf.compat.v1.Dimension(None)  # equiv. to\ntf.compat.v1.Dimension(None)\n```\n\nArgs:\nother: Another Dimension, or a value accepted by `as_dimension`.\n\nReturns:\nA Dimension whose value is the sum of `self` and `other`.", "source": "github-repos"}
{"code": "def get_container_instance_logs(access_token, subscription_id, resource_group, container_group_name,\n                                container_name=None):\n    \n    if container_name is None:\n        container_name = container_group_name\n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourcegroups/', resource_group,\n                        '/providers/Microsoft.ContainerInstance/ContainerGroups/',\n                        container_group_name,\n                        '/containers/', container_name, '/logs?api-version=', CONTAINER_API])\n    return do_get(endpoint, access_token)", "docstring": "Get the container logs for containers in a container group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\ncontainer_group_name (str): Name of container instance group.\ncontainer_name (str): Optional name of a container in the group.\n\nReturns:\nHTTP response. Container logs.", "source": "juraj-google-style"}
{"code": "def apply_grad(self, grad, local_step=0, name=None):\n    grad = ops.convert_to_tensor(grad, self._dtype)\n    grad.get_shape().assert_is_compatible_with(self._shape)\n    local_step = math_ops.cast(ops.convert_to_tensor(local_step), _dtypes.int64)\n    return gen_data_flow_ops.resource_accumulator_apply_gradient(self._accumulator_ref, local_step=local_step, gradient=grad, name=name)", "docstring": "Attempts to apply a gradient to the accumulator.\n\nThe attempt is silently dropped if the gradient is stale, i.e., local_step\nis less than the accumulator's global time step.\n\nArgs:\ngrad: The gradient tensor to be applied.\nlocal_step: Time step at which the gradient was computed.\nname: Optional name for the operation.\n\nReturns:\nThe operation that (conditionally) applies a gradient to the accumulator.\n\nRaises:\nValueError: If grad is of the wrong shape", "source": "github-repos"}
{"code": "def _add_impact_severity(self, variant_obj, gemini_variant):\n        \n        gemini_impact = gemini_variant['impact_severity']\n        if gemini_impact == 'MED':\n            gemini_impact = 'MEDIUM'\n        variant_obj.impact_severity = gemini_impact", "docstring": "Add the impact severity for the most severe consequence\n\nArgs:\nvariant_obj (puzzle.models.Variant)\ngemini_variant (GeminiQueryRow)", "source": "juraj-google-style"}
{"code": "def extract_response(self, extractors):\n    if (not extractors):\n        return {}\n    logger.log_debug('start to extract from response object.')\n    extracted_variables_mapping = OrderedDict()\n    extract_binds_order_dict = utils.ensure_mapping_format(extractors)\n    for (key, field) in extract_binds_order_dict.items():\n        extracted_variables_mapping[key] = self.extract_field(field)\n    return extracted_variables_mapping", "docstring": "extract value from requests.Response and store in OrderedDict.\n\nArgs:\nextractors (list):\n\n[\n{\"resp_status_code\": \"status_code\"},\n{\"resp_headers_content_type\": \"headers.content-type\"},\n{\"resp_content\": \"content\"},\n{\"resp_content_person_first_name\": \"content.person.name.first_name\"}\n]\n\nReturns:\nOrderDict: variable binds ordered dict", "source": "codesearchnet"}
{"code": "def from_str(text):\n        \n        segment_list = chat_message_parser.parse(text)\n        return [ChatMessageSegment(segment.text, **segment.params)\n                for segment in segment_list]", "docstring": "Construct :class:`ChatMessageSegment` list parsed from a string.\n\nArgs:\ntext (str): Text to parse. May contain line breaks, URLs and\nformatting markup (simplified Markdown and HTML) to be\nconverted into equivalent segments.\n\nReturns:\nList of :class:`ChatMessageSegment` objects.", "source": "juraj-google-style"}
{"code": "def similar(self, **kwargs):\n        \n        path = self._get_id_path('similar')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the similar TV series for a specific TV series id.\n\nArgs:\npage: (optional) Minimum value of 1.  Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\nappend_to_response: (optional) Comma separated, any TV method.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir, category, module_name):\n    module_name = module_name.replace(':\n    return (((get_image_path(image_lists, label_name, index, bottleneck_dir, category) + '_') + module_name) + '.txt')", "docstring": "Returns a path to a bottleneck file for a label at the given index.\n\nArgs:\nimage_lists: OrderedDict of training images for each label.\nlabel_name: Label string we want to get an image for.\nindex: Integer offset of the image we want. This will be moduloed by the\navailable number of images for the label, so it can be arbitrarily large.\nbottleneck_dir: Folder string holding cached files of bottleneck values.\ncategory: Name string of set to pull images from - training, testing, or\nvalidation.\nmodule_name: The name of the image module being used.\n\nReturns:\nFile system path string to an image that meets the requested parameters.", "source": "codesearchnet"}
{"code": "def touch(path, content=\"\", encoding=\"utf-8\", overwrite=False):\n    \n    path = os.path.abspath(path)\n    if not overwrite and os.path.exists(path):\n        logger.warning('touch: \"%s\" already exists', path)\n        return False\n    try:\n        logger.info(\"touch: %s\", path)\n        with io.open(path, \"wb\") as f:\n            if not isinstance(content, six.binary_type):\n                content = content.encode(encoding)\n            f.write(content)\n        return True\n    except Exception as e:\n        logger.error(\"touch: %s failed. Error: %s\", path, e)\n        return False", "docstring": "Create a file at the given path if it does not already exists.\n\nArgs:\npath (str): Path to the file.\ncontent (str): Optional content that will be written in the file.\nencoding (str): Encoding in which to write the content.\nDefault: ``utf-8``\noverwrite (bool): Overwrite the file if exists.\n\nReturns:\nbool: True if the operation is successful, False otherwise.", "source": "juraj-google-style"}
{"code": "def get_individual_positions(individuals):\n    \n    ind_pos = {}\n    if individuals:\n        for i, ind in enumerate(individuals):\n            ind_pos[ind] = i\n    return ind_pos", "docstring": "Return a dictionary with individual positions\n\nArgs:\nindividuals(list): A list with vcf individuals in correct order\n\nReturns:\nind_pos(dict): Map from ind_id -> index position", "source": "juraj-google-style"}
{"code": "def alias_tool(self, context_name, tool_name, tool_alias):\n    data = self._context(context_name)\n    aliases = data['tool_aliases']\n    if (tool_name in aliases):\n        raise SuiteError(('Tool %r in context %r is already aliased to %r' % (tool_name, context_name, aliases[tool_name])))\n    self._validate_tool(context_name, tool_name)\n    aliases[tool_name] = tool_alias\n    self._flush_tools()", "docstring": "Register an alias for a specific tool.\n\nNote that a tool alias takes precedence over a context prefix/suffix.\n\nArgs:\ncontext_name (str): Context containing the tool.\ntool_name (str): Name of tool to alias.\ntool_alias (str): Alias to give the tool.", "source": "codesearchnet"}
{"code": "def prepend(self, line, font_attr_segs=None):\n    other = RichTextLines(line)\n    if font_attr_segs:\n        other.font_attr_segs[0] = font_attr_segs\n    self._extend_before(other)", "docstring": "Prepend (i.e., add to the front) a single line of text.\n\nArgs:\nline: (str) The text to be added to the front.\nfont_attr_segs: (list of tuples) Font attribute segments of the appended\nline.", "source": "github-repos"}
{"code": "def add_answer_for_student(student_item, vote, rationale):\n    answers = get_answers_for_student(student_item)\n    answers.add_answer(vote, rationale)\n    sub_api.create_submission(student_item, {ANSWER_LIST_KEY: answers.get_answers_as_list()})", "docstring": "Add an answer for a student to the backend\n\nArgs:\nstudent_item (dict): The location of the problem this submission is\nassociated with, as defined by a course, student, and item.\nvote (int): the option that student voted for\nrationale (str): the reason why the student vote for the option", "source": "codesearchnet"}
{"code": "def sysctl(command):\n    \n    out = subprocess.check_output(command)\n    result = out.split(b\" \")[1]\n    try:\n        return int(result)\n    except ValueError:\n        return result", "docstring": "Run a sysctl command and parse the output.\n\nArgs:\ncommand: A sysctl command with an argument, for example,\n[\"sysctl\", \"hw.memsize\"].\n\nReturns:\nThe parsed output.", "source": "juraj-google-style"}
{"code": "def get_next(self):\n    raise NotImplementedError('Iterator.get_next()')", "docstring": "Returns the next element.\n\n>>> dataset = tf.data.Dataset.from_tensors(42)\n>>> iterator = iter(dataset)\n>>> print(iterator.get_next())\ntf.Tensor(42, shape=(), dtype=int32)\n\nReturns:\nA (nested) structure of values matching `tf.data.Iterator.element_spec`.\n\nRaises:\n`tf.errors.OutOfRangeError`: If the end of the iterator has been reached.", "source": "github-repos"}
{"code": "async def get_json(self, url, json_callback=None, **kwargs):\n        \n        if not json_callback:\n            json_callback = json.loads\n        response = await self.request(method='get', url=url, **kwargs)\n        return json_callback(response)", "docstring": "Get a URL and return its JSON response.\n\nArgs:\nurl (str): URL to be requested.\njson_callback (func): Custom JSON loader function. Defaults\nto :meth:`json.loads`.\nkwargs (dict): Additional arguments to pass through to the\nrequest.\nReturns:\nresponse body returned by :func:`json_callback` function.", "source": "juraj-google-style"}
{"code": "def _controller_buffer(self, port):\n    address = _LIB.Controller(self._env, port)\n    buffer_ = ctypes.cast(address, ctypes.POINTER(CONTROLLER_VECTOR)).contents\n    return np.frombuffer(buffer_, dtype='uint8')", "docstring": "Find the pointer to a controller and setup a NumPy buffer.\n\nArgs:\nport: the port of the controller to setup\n\nReturns:\na NumPy buffer with the controller's binary data", "source": "codesearchnet"}
{"code": "def _flatten_dict(original_dict):\n  \n  flat_dict = {}\n  for key, value in original_dict.items():\n    if isinstance(value, dict):\n      for name, tensor in value.items():\n        if isinstance(tensor, dict):\n          raise ValueError(\"flatten_dict only handles 2 levels of nesting.\")\n        flat_key = \"__\" + key + \"_\" + name\n        flat_dict[flat_key] = tensor\n    else:\n      flat_dict[key] = value\n\n  return flat_dict", "docstring": "Flatten dict of dicts into a single dict with appropriate prefixes.\n\nHandles only 2 levels of nesting in the original dict.\n\nArgs:\noriginal_dict: Dict which may contain one or more dicts.\nReturns:\nflat_dict: Dict without any nesting. Any dicts in the original dict have\ntheir keys as prefixes in the new dict.\nRaises:\nValueError if the original dict has more than two levels of nesting.", "source": "juraj-google-style"}
{"code": "def stats_per_utterance(self):\n    all_stats = {}\n    for utterance in self.utterances.values():\n        data = utterance.read_samples()\n        all_stats[utterance.idx] = stats.DataStats(float(np.mean(data)), float(np.var(data)), np.min(data), np.max(data), data.size)\n    return all_stats", "docstring": "Return statistics calculated for all samples of each utterance in the corpus.\n\nReturns:\ndict: A dictionary containing a DataStats object for each utt.", "source": "codesearchnet"}
{"code": "def __init__(self, runner_results):\n    \n    super(DataflowJob, self).__init__(runner_results._job.name)\n    self._runner_results = runner_results", "docstring": "Initializes an instance of a DataFlow Job.\n\nArgs:\nrunner_results: a DataflowPipelineResult returned from Pipeline.run().", "source": "juraj-google-style"}
{"code": "def _pull_out_perm_lhs(lhs, rest, out_port, in_port):\n    \n    out_inv, lhs_red = lhs._factor_lhs(out_port)\n    return lhs_red << Feedback.create(SeriesProduct.create(*rest),\n                                      out_port=out_inv, in_port=in_port)", "docstring": "Pull out a permutation from the Feedback of a SeriesProduct with itself.\n\nArgs:\nlhs (CPermutation): The permutation circuit\nrest (tuple): The other SeriesProduct operands\nout_port (int): The feedback output port index\nin_port (int): The feedback input port index\n\nReturns:\nCircuit: The simplified circuit", "source": "juraj-google-style"}
{"code": "def __init__(self, tpu_hardware_feature_proto):\n    self.tpu_hardware_feature_proto = tpu_hardware_feature_proto", "docstring": "Store TPU hardware feature info.\n\nArgs:\ntpu_hardware_feature_proto: protobuf which describe the tpu hardware\nfeature.", "source": "github-repos"}
{"code": "def unsafe_peek(init):\n    \n    def peek(store, container, _stack=None):\n        return init(*[ store.peek(attr, container, _stack=_stack) for attr in container ])\n    return peek", "docstring": "Deserialize all the attributes available in the container and pass them in the same order\nas they come in the container.\n\nThis is a factory function; returns the actual `peek` routine.\n\nArguments:\n\ninit: type constructor.\n\nReturns:\n\ncallable: deserializer (`peek` routine).", "source": "juraj-google-style"}
{"code": "def cross_entropy_loss(weights: Array, x: Array, y: Array) -> Array:\n    pred = 1 / (1 + jnp.exp(-x.dot(weights)))\n    return -jnp.mean(y * jnp.log(pred) + (1 - y) * jnp.log(1 - pred))", "docstring": "Calcurates a cross entropy loss with a prediction by a sigmoid function.\n\nArgs:\nweights: A weight vector.\nx: An input array.\ny: A target output array.\n\nReturns:\nA cross entropy loss.", "source": "github-repos"}
{"code": "def _load_yaml_credentials(filename=None, yaml_key=None):\n    try:\n        with open(os.path.expanduser(filename)) as f:\n            search_creds = yaml.safe_load(f)[yaml_key]\n    except FileNotFoundError:\n        logger.error('cannot read file {}'.format(filename))\n        search_creds = {}\n    except KeyError:\n        logger.error('{} is missing the provided key: {}'.format(filename, yaml_key))\n        search_creds = {}\n    return search_creds", "docstring": "Loads and parses credentials in a YAML file. Catches common exceptions\nand returns an empty dict on error, which will be handled downstream.\n\nReturns:\ndict: parsed credentials or {}", "source": "codesearchnet"}
{"code": "def dbmin05years(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `dbmin05years`'.format(value))\n    self._dbmin05years = value", "docstring": "Corresponds to IDD Field `dbmin05years`\n5-year return period values for minimum extreme dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmin05years`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def trace(self, predicate):\n    self._handler = predicate\n    if ((self.threading_support is None) or self.threading_support):\n        self._threading_previous = getattr(threading, '_trace_hook', None)\n        threading.settrace(self)\n    self._previous = sys.gettrace()\n    sys.settrace(self)\n    return self", "docstring": "Starts tracing with the given callable.\n\nArgs:\npredicate (callable that accepts a single :obj:`hunter.Event` argument):\nReturn:\nself", "source": "codesearchnet"}
{"code": "def _parse_package(cls, package_string):\n        \n        pkg, arch = rsplit(package_string, cls._arch_sep(package_string))\n        if arch not in KNOWN_ARCHITECTURES:\n            pkg, arch = (package_string, None)\n        pkg, release = rsplit(pkg, '-')\n        name, version = rsplit(pkg, '-')\n        epoch, version = version.split(':', 1) if \":\" in version else ['0', version]\n        \n        if name.startswith('oracleasm') and name.endswith('.el5'):\n            name, version2 = name.split('-', 1)\n            version = version2 + '-' + version\n        return {\n            'name': name,\n            'version': version,\n            'release': release,\n            'arch': arch,\n            'epoch': epoch\n        }", "docstring": "Helper method for parsing package string.\n\nArgs:\npackage_string (str): dash separated package string such as 'bash-4.2.39-3.el7'\n\nReturns:\ndict: dictionary containing 'name', 'version', 'release' and 'arch' keys", "source": "juraj-google-style"}
{"code": "def _old_init(cls, fields, shape, nrows, row_partitions, internal=False):\n    assert isinstance(fields, dict), fields\n    assert isinstance(shape, tensor_shape.TensorShape), shape\n    assert nrows is None or isinstance(nrows, tensor.Tensor), nrows\n    assert row_partitions is None or isinstance(row_partitions, tuple), row_partitions\n    return StructuredTensor(fields=fields, ragged_shape=_dynamic_ragged_shape_init(fields, shape, nrows, row_partitions))", "docstring": "Private constructor -- use factory methods to create StructuredTensors.\n\nThis constructor builds a `StructuredTensor` from the given attributes,\nperforming minimal validation.\n\nArgs:\nfields: A dictionary mapping from string to `Tensor`, `RaggedTensor`, or\n`StructuredTensor`.  (This dict is not copied, so the caller must ensure\nthat it does not get mutated via leaked references.)\nshape: `tf.TensorShape` with statically known rank.\nnrows: scalar integer `tf.Tensor`, or `None` if `shape.rank==0`.\nrow_partitions: tuple of `RowPartition`s, with length `shape.rank-1`.\ninternal: ignored argument.\n\nReturns:\na StructuredTensor.", "source": "github-repos"}
{"code": "def mme_match(case_obj, match_type, mme_base_url, mme_token, nodes=None, mme_accepts=None):\n    \n    query_patients = []\n    server_responses = []\n    url = None\n    \n    query_patients = case_obj['mme_submission']['patients']\n    if match_type=='internal':\n        url = ''.join([mme_base_url,'/match'])\n        for patient in query_patients:\n            json_resp = matchmaker_request(url=url, token=mme_token, method='POST',\n                content_type=mme_accepts, accept=mme_accepts, data={'patient':patient})\n            resp_obj = {\n                'server' : 'Local MatchMaker node',\n                'patient_id' : patient['id'],\n                'results' : json_resp.get('results'),\n                'status_code' : json_resp.get('status_code'),\n                'message' : json_resp.get('message') \n            }\n            server_responses.append(resp_obj)\n    else: \n        \n        query_patients = [ patient['id'] for patient in query_patients]\n        node_ids = [ node['id'] for node in nodes ]\n        if match_type in node_ids: \n            node_ids = [match_type]\n\n        \n        for patient in query_patients:\n            \n            for node in node_ids:\n                url = ''.join([mme_base_url,'/match/external/', patient, '?node=', node])\n                json_resp = matchmaker_request(url=url, token=mme_token, method='POST')\n                resp_obj = {\n                    'server' : node,\n                    'patient_id' : patient,\n                    'results' : json_resp.get('results'),\n                    'status_code' : json_resp.get('status_code'),\n                    'message' : json_resp.get('message') \n                }\n                server_responses.append(resp_obj)\n    return server_responses", "docstring": "Initiate a MatchMaker match against either other Scout patients or external nodes\n\nArgs:\ncase_obj(dict): a scout case object already submitted to MME\nmatch_type(str): 'internal' or 'external'\nmme_base_url(str): base url of the MME server\nmme_token(str): auth token of the MME server\nmme_accepts(str): request content accepted by MME server (only for internal matches)\n\nReturns:\nmatches(list): a list of eventual matches", "source": "juraj-google-style"}
{"code": "def extract(self, tokens: List[Token]) -> List[Extraction]:\n        \n        results = list()\n\n        if len(tokens) > 0:\n            if self._case_sensitive:\n                new_tokens = [x.orth_ if isinstance(x, Token) else x for x in tokens]\n            else:\n                new_tokens = [x.lower_ if isinstance(x, Token) else x.lower() for x in tokens]\n        else:\n            return results\n\n        try:\n            ngrams_iter = self._generate_ngrams_with_context(new_tokens)\n            results.extend(map(lambda term: self._wrap_value_with_context(tokens, term[1], term[2]),\n                               filter(lambda term: isinstance(term[0], str),\n                                      map(lambda term: (self._glossary.get(term[0]), term[1], term[2]),\n                                          map(lambda term: (\n                                              self._combine_ngrams(term[0], self._joiner), term[1], term[2]),\n                                              ngrams_iter)))))\n        except Exception as e:\n            raise ExtractorError('GlossaryExtractor: Failed to extract with ' + self.name + '. Catch ' + str(e) + '. ')\n        return results", "docstring": "Extracts information from a string(TEXT) with the GlossaryExtractor instance\n\nArgs:\ntoken (List[Token]): list of spaCy token to be processed.\n\nReturns:\nList[Extraction]: the list of extraction or the empty list if there are no matches.", "source": "juraj-google-style"}
{"code": "def backup(self, backup_name, folder_key=None, folder_name=None):\n        \n\n        folder = self._find_or_create_folder(folder_key, folder_name)\n        drive_service = self.drive_service\n        try:\n            source_rsrc = drive_service.files().get(fileId=self.document_key).execute()\n        except Exception, e:\n            logger.exception(\"Google API error. %s\", e)\n            raise e\n\n        backup = self._create_new_or_copy(source_doc=source_rsrc, \n                                        target_name=backup_name, \n                                        folder=folder,\n                                        sheet_description=\"backup\")\n\n        backup_key = backup['id']\n        return backup_key", "docstring": "Copies the google spreadsheet to the backup_name and folder specified.\n\nArgs:\nbackup_name (str): The name of the backup document to create.\n\nfolder_key (Optional) (str): The key of a folder that the new copy will\nbe moved to.\n\nfolder_name (Optional) (str): Like folder_key, references the folder to move a\nbackup to. If the folder can't be found, sheetsync will create it.", "source": "juraj-google-style"}
{"code": "def __call__(self,\n            state: Sequence[tf.Tensor],\n            timestep: tf.Tensor) -> Sequence[tf.Tensor]:\n        \n        action, _, _ = self._sample_actions(state)\n        return action", "docstring": "Returns sampled action fluents for the current `state` and `timestep`.\n\nArgs:\nstate (Sequence[tf.Tensor]): The current state fluents.\ntimestep (tf.Tensor): The current timestep.\n\nReturns:\nSequence[tf.Tensor]: A tuple of action fluents.", "source": "juraj-google-style"}
{"code": "def connect(self, **kwargs):\n        \n        self.app = self._app.connect(**kwargs)\n        try:\n            self._top_window = self.app.top_window().wrapper_object()\n            self.set_foreground()\n        except RuntimeError:\n            self._top_window = None", "docstring": "Connect to window and set it foreground\n\nArgs:\n**kwargs: optional arguments\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _find_methods(cls, *names, **kwds):\n    reverse = kwds.pop('reverse', False)\n    assert (not kwds), repr(kwds)\n    cache = cls.__dict__.get('_find_methods_cache')\n    if cache:\n        hit = cache.get(names)\n        if (hit is not None):\n            return hit\n    else:\n        cls._find_methods_cache = cache = {}\n    methods = []\n    for c in cls.__mro__:\n        for name in names:\n            method = c.__dict__.get(name)\n            if (method is not None):\n                methods.append(method)\n    if reverse:\n        methods.reverse()\n    cache[names] = methods\n    return methods", "docstring": "Compute a list of composable methods.\n\nBecause this is a common operation and the class hierarchy is\nstatic, the outcome is cached (assuming that for a particular list\nof names the reversed flag is either always on, or always off).\n\nArgs:\n*names: One or more method names.\nreverse: Optional flag, default False; if True, the list is\nreversed.\n\nReturns:\nA list of callable class method objects.", "source": "codesearchnet"}
{"code": "def getAsGrassAsciiGrid(self, session):\n    if (type(self.raster) != type(None)):\n        converter = RasterConverter(sqlAlchemyEngineOrSession=session)\n        return converter.getAsGrassAsciiRaster(tableName=self.tableName, rasterIdFieldName='id', rasterId=self.id, rasterFieldName=self.rasterColumnName)", "docstring": "Retrieve the raster in the GRASS ASCII Grid format.\n\nArgs:\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.\n\nReturns:\nstr: GRASS ASCII string.", "source": "codesearchnet"}
{"code": "def Match(self, registry_key):\n    \n    key_path = registry_key.path.upper()\n    if self._key_path_prefix and self._key_path_suffix:\n      if (key_path.startswith(self._key_path_prefix) and\n          key_path.endswith(self._key_path_suffix)):\n\n        key_path_segment = key_path[\n            len(self._key_path_prefix):-len(self._key_path_suffix)]\n        if key_path_segment.startswith('ControlSet'.upper()):\n          try:\n            control_set = int(key_path_segment[10:], 10)\n          except ValueError:\n            control_set = None\n\n          \n          return control_set is not None\n\n    return key_path in (self._key_path_upper, self._wow64_key_path_upper)", "docstring": "Determines if a Windows Registry key matches the filter.\n\nArgs:\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\n\nReturns:\nbool: True if the keys match.", "source": "juraj-google-style"}
{"code": "def _add_weight(self, name, initial_value, dtype=None):\n    variable = variable_v1.VariableV1(initial_value=initial_value, name=name, dtype=dtype, trainable=False, use_resource=True, synchronization=variables.VariableSynchronization.AUTO, aggregation=variables.VariableAggregation.NONE)\n    if context.executing_eagerly():\n        graph_key = None\n    else:\n        graph = ops.get_default_graph()\n        graph_key = graph._graph_key\n    key = (name, graph_key)\n    self._weights[key] = variable\n    self._handle_deferred_dependencies(name=name, trackable=variable)\n    backend.track_variable(variable)\n    return variable", "docstring": "Adds a weight to this loss scale.\n\nArgs:\nname: Variable name.\ninitial_value: The variable's initial value.\ndtype: The type of the variable.\n\nReturns:\nA variable.\n\nRaises:\nRuntimeError: If a weight with `name` has already been added.", "source": "github-repos"}
{"code": "def find(self, collection, query):\n    obj = getattr(self.db, collection)\n    result = obj.find(query)\n    return result", "docstring": "Search a collection for the query provided. Just a raw interface to\nmongo to do any query you want.\n\nArgs:\ncollection: The db collection. See main class documentation.\nquery: A mongo find query.\nReturns:\npymongo Cursor object with the results.", "source": "codesearchnet"}
{"code": "def get_help(sakefile):\n    \n    full_string = \"You can 'sake' one of the following...\\n\\n\"\n    errmes = \"target '{}' is not allowed to not have help message\\n\"\n    outerlines = []\n    for target in sakefile:\n        if target == \"all\":\n            \n            continue\n        middle_lines = []\n        if \"formula\" not in sakefile[target]:\n            \n            innerstr = \"{}:\\n  - {}\\n\\n\".format(escp(target),\n                                                sakefile[target][\"help\"])\n            inner = []\n            for atom_target in sakefile[target]:\n                if atom_target == \"help\":\n                    continue\n                inner.append(\"    {}:\\n      -  {}\\n\\n\".format(escp(atom_target),\n                                                               sakefile[target][atom_target][\"help\"]))\n            if inner:\n                innerstr += '\\n'.join(sorted(inner))\n            middle_lines.append(innerstr)\n        else:\n            middle_lines.append(\"{}:\\n  - {}\\n\\n\".format(escp(target),\n                                                         sakefile[target][\"help\"]))\n        if middle_lines:\n            outerlines.append('\\n'.join(sorted(middle_lines)))\n\n    if outerlines:\n        full_string += '\\n'.join(sorted(outerlines))\n    what_clean_does = \"remove all targets' outputs and start from scratch\"\n    full_string += \"\\nclean:\\n  -  {}\\n\\n\".format(what_clean_does)\n    what_visual_does = \"output visual representation of project's dependencies\"\n    full_string += \"visual:\\n  -  {}\\n\".format(what_visual_does)\n    full_string = re.sub(\"\\n{3,}\", \"\\n\\n\", full_string)\n    return full_string", "docstring": "Returns the prettily formatted help strings (for printing)\n\nArgs:\nA dictionary that is the parsed Sakefile (from sake.py)\n\nNOTE:\nthe list sorting in this function is required for this\nfunction to be deterministic", "source": "juraj-google-style"}
{"code": "def asdim(dimension):\n    \n    if isinstance(dimension, Dimension):\n        return dimension\n    elif isinstance(dimension, (tuple, dict, basestring)):\n        return Dimension(dimension)\n    else:\n        raise ValueError('%s type could not be interpreted as Dimension. '\n                         'Dimensions must be declared as a string, tuple, '\n                         'dictionary or Dimension type.')", "docstring": "Convert the input to a Dimension.\n\nArgs:\ndimension: tuple, dict or string type to convert to Dimension\n\nReturns:\nA Dimension object constructed from the dimension spec. No\ncopy is performed if the input is already a Dimension.", "source": "juraj-google-style"}
{"code": "def _query(queue_name=None, build_id=None, release_id=None, run_id=None,\n           count=None):\n    \n    assert queue_name or build_id or release_id or run_id\n\n    q = WorkQueue.query\n    if queue_name:\n        q = q.filter_by(queue_name=queue_name)\n    if build_id:\n        q = q.filter_by(build_id=build_id)\n    if release_id:\n        q = q.filter_by(release_id=release_id)\n    if run_id:\n        q = q.filter_by(run_id=run_id)\n\n    q = q.order_by(WorkQueue.created.desc())\n\n    if count is not None:\n        q = q.limit(count)\n\n    return q.all()", "docstring": "Queries for work items based on their criteria.\n\nArgs:\nqueue_name: Optional queue name to restrict to.\nbuild_id: Optional build ID to restrict to.\nrelease_id: Optional release ID to restrict to.\nrun_id: Optional run ID to restrict to.\ncount: How many tasks to fetch. Defaults to None, which means all\ntasks are fetch that match the query.\n\nReturns:\nList of WorkQueue items.", "source": "juraj-google-style"}
{"code": "def sanitize_git_path(self, uri, ref=None):\n        \n        if uri.endswith('.git'):\n            dir_name = uri[:-4]  \n        else:\n            dir_name = uri\n        dir_name = self.sanitize_uri_path(dir_name)\n        if ref is not None:\n            dir_name += \"-%s\" % ref\n        return dir_name", "docstring": "Take a git URI and ref and converts it to a directory safe path.\n\nArgs:\nuri (string): git URI\n(e.g. git@github.com:foo/bar.git)\nref (string): optional git ref to be appended to the path\n\nReturns:\nstr: Directory name for the supplied uri", "source": "juraj-google-style"}
{"code": "def get_fba_flux(self, objective):\n    flux_result = self.solve_fba(objective)\n    fba_fluxes = {}\n    for key in self._model.reactions:\n        fba_fluxes[key] = flux_result.get_value(self._v_wt[key])\n    return fba_fluxes", "docstring": "Return a dictionary of all the fluxes solved by FBA.\n\nDictionary of fluxes is used in :meth:`.lin_moma` and :meth:`.moma`\nto minimize changes in the flux distributions following model\nperturbation.\n\nArgs:\nobjective: The objective reaction that is maximized.\n\nReturns:\nDictionary of fluxes for each reaction in the model.", "source": "codesearchnet"}
{"code": "def trailing_stop_loss_replace(self, accountID, orderID, **kwargs):\n        \n        return self.replace(\n            accountID,\n            orderID,\n            order=TrailingStopLossOrderRequest(**kwargs)\n        )", "docstring": "Shortcut to replace a pending Trailing Stop Loss Order in an Account\n\nArgs:\naccountID : The ID of the Account\norderID : The ID of the Take Profit Order to replace\nkwargs : The arguments to create a TrailingStopLossOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "juraj-google-style"}
{"code": "def __init__(self, host_url, username, password):\n    \n    self.host_url = host_url\n    self.api_base_url = '{0:s}/api/v1'.format(self.host_url)\n    self.username = username\n    self.session = self._create_session(username, password)", "docstring": "Initialize the Timesketch API client object.\n\nArgs:\nhost_url (str): URL of Timesketch instance\nusername (str): Timesketch username\npassword (str): Timesketch password", "source": "juraj-google-style"}
{"code": "def download_software_version(version=None, synch=False):\n    if (not version):\n        raise CommandExecutionError('Version option must not be none.')\n    if (not isinstance(synch, bool)):\n        raise CommandExecutionError('Synch option must be boolean..')\n    if (synch is True):\n        query = {'type': 'op', 'cmd': '<request><system><software><download><version>{0}</version></download></software></system></request>'.format(version)}\n    else:\n        query = {'type': 'op', 'cmd': '<request><system><software><download><sync-to-peer>yes</sync-to-peer><version>{0}</version></download></software></system></request>'.format(version)}\n    return _get_job_results(query)", "docstring": "Download software packages by version number.\n\nArgs:\nversion(str): The version of the PANOS file to download.\n\nsynch (bool): If true then the file will synch to the peer unit.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' panos.download_software_version 8.0.0\nsalt '*' panos.download_software_version 8.0.0 True", "source": "codesearchnet"}
{"code": "def _covert_to_hashable(data):\n    r\n    if isinstance(data, six.binary_type):\n        hashable = data\n        prefix = b'TXT'\n    elif util_type.HAVE_NUMPY and isinstance(data, np.ndarray):\n        if data.dtype.kind == 'O':\n            msg = '[ut] hashing ndarrays with dtype=object is unstable'\n            warnings.warn(msg, RuntimeWarning)\n            hashable = data.dumps()\n        else:\n            hashable = data.tobytes()\n        prefix = b'NDARR'\n    elif isinstance(data, six.text_type):\n        \n        hashable = data.encode('utf-8')\n        prefix = b'TXT'\n    elif isinstance(data, uuid.UUID):\n        hashable = data.bytes\n        prefix = b'UUID'\n    elif isinstance(data, int):\n        \n        hashable = _int_to_bytes(data)\n        \n        prefix = b'INT'\n    \n    \n    \n    elif util_type.HAVE_NUMPY and isinstance(data, np.int64):\n        return _covert_to_hashable(int(data))\n    elif util_type.HAVE_NUMPY and isinstance(data, np.float64):\n        a, b = float(data).as_integer_ratio()\n        hashable = (a.to_bytes(8, byteorder='big') +\n                    b.to_bytes(8, byteorder='big'))\n        prefix = b'FLOAT'\n    else:\n        raise TypeError('unknown hashable type=%r' % (type(data)))\n        \n        \n        \n    prefix = b''\n    return prefix, hashable", "docstring": "r\"\"\"\nArgs:\ndata (?):\n\nReturns:\n?:\n\nCommandLine:\npython -m utool.util_hash _covert_to_hashable\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_hash import *  # NOQA\n>>> from utool.util_hash import _covert_to_hashable  # NOQA\n>>> import utool as ut\n>>> data = np.array([1], dtype=np.int64)\n>>> result = _covert_to_hashable(data)\n>>> print(result)", "source": "juraj-google-style"}
{"code": "def get_users_by_email(cls, emails):\n    users = User.objects.filter(email__in=emails)\n    present_emails = users.values_list('email', flat=True)\n    missing_emails = list((set(emails) - set(present_emails)))\n    return (users, missing_emails)", "docstring": "Accept a list of emails, and separate them into users that exist on OpenEdX and users who don't.\n\nArgs:\nemails: An iterable of email addresses to split between existing and nonexisting\n\nReturns:\nusers: Queryset of users who exist in the OpenEdX platform and who were in the list of email addresses\nmissing_emails: List of unique emails which were in the original list, but do not yet exist as users", "source": "codesearchnet"}
{"code": "def sk_log_loss(y_true: Union[(List[List[float]], List[List[int]], np.ndarray)], y_predicted: Union[(List[List[float]], List[List[int]], np.ndarray)]) -> float:\n    return log_loss(y_true, y_predicted)", "docstring": "Calculates log loss.\n\nArgs:\ny_true: list or array of true values\ny_predicted: list or array of predicted values\n\nReturns:\nLog loss", "source": "codesearchnet"}
{"code": "def _create_pseudo_names(tensors, prefix):\n\n    def one_index(ele):\n        if isinstance(ele, int):\n            return ele + 1\n        return ele\n    flat_paths = list(nest.yield_flat_paths(tensors))\n    flat_paths = nest.map_structure(one_index, flat_paths)\n    names = []\n    for path in flat_paths:\n        if not path:\n            name = prefix + '1'\n        else:\n            name = '_'.join((str(p) for p in path))\n            if isinstance(path[0], int):\n                name = prefix + name\n        names.append(name)\n    return names", "docstring": "Creates pseudo {input | output} names for subclassed Models.\n\nWarning: this function should only be used to define default\nnames for `Metrics` and `SavedModel`. No other use cases should\nrely on a `Model`'s input or output names.\n\nExample with dict:\n\n`{'a': [x1, x2], 'b': x3}` becomes:\n`['a_1', 'a_2', 'b']`\n\nExample with list:\n\n`[x, y]` becomes:\n`['output_1', 'output_2']`\n\nArgs:\ntensors: `Model`'s outputs or inputs.\nprefix: 'output_' for outputs, 'input_' for inputs.\n\nReturns:\nFlattened list of pseudo names.", "source": "github-repos"}
{"code": "def variants_export_header(case_obj):\n    header = []\n    header = (header + EXPORT_HEADER)\n    for individual in case_obj['individuals']:\n        display_name = str(individual['display_name'])\n        header.append(('AD_reference_' + display_name))\n        header.append(('AD_alternate_' + display_name))\n        header.append(('GT_quality_' + display_name))\n    return header", "docstring": "Returns a header for the CSV file with the filtered variants to be exported.\n\nArgs:\ncase_obj(scout.models.Case)\n\nReturns:\nheader: includes the fields defined in scout.constants.variants_export EXPORT_HEADER\n+ AD_reference, AD_alternate, GT_quality for each sample analysed for a case", "source": "codesearchnet"}
{"code": "def group_entities(self, entities: List[dict]) -> List[dict]:\n    entity_groups = []\n    entity_group_disagg = []\n    for entity in entities:\n        if not entity_group_disagg:\n            entity_group_disagg.append(entity)\n            continue\n        bi, tag = self.get_tag(entity['entity'])\n        last_bi, last_tag = self.get_tag(entity_group_disagg[-1]['entity'])\n        if tag == last_tag and bi != 'B':\n            entity_group_disagg.append(entity)\n        else:\n            entity_groups.append(self.group_sub_entities(entity_group_disagg))\n            entity_group_disagg = [entity]\n    if entity_group_disagg:\n        entity_groups.append(self.group_sub_entities(entity_group_disagg))\n    return entity_groups", "docstring": "Find and group together the adjacent tokens with the same entity predicted.\n\nArgs:\nentities (`dict`): The entities predicted by the pipeline.", "source": "github-repos"}
{"code": "def clear_redis(self, variable, clear_type):\n    if (variable is None):\n        return\n    if (variable in self._clear_redis_tracker):\n        return\n    if (not re.match(self._vars_match, variable)):\n        return\n    self.log.info('[{}] Deleting redis variable: {}.'.format(clear_type, variable))\n    print('Clearing Variables: {}{}{}'.format(c.Style.BRIGHT, c.Fore.MAGENTA, variable))\n    self.tcex.playbook.delete(variable)\n    self._clear_redis_tracker.append(variable)", "docstring": "Delete Redis data for provided variable.\n\nArgs:\nvariable (str): The Redis variable to delete.\nclear_type (str): The type of clear action.", "source": "codesearchnet"}
{"code": "def _bfd_multiplier(self, **kwargs):\n        \n        int_type = kwargs['int_type']\n        method_name = 'interface_%s_bfd_interval_multiplier' % int_type\n        bfd_multiplier = getattr(self._interface, method_name)\n        config = bfd_multiplier(**kwargs)\n        if kwargs['delete']:\n            tag = 'multiplier'\n            config.find('.\n        return config", "docstring": "Return the BFD multiplier XML.\n\nYou should not use this method.\nYou probably want `BGP.bfd`.\n\nArgs:\nmin_tx (str): BFD transmit interval in milliseconds (300, 500, etc)\ndelete (bool): Remove the configuration if ``True``.\n\nReturns:\nXML to be passed to the switch.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def get_stream_action_type(stream_arn):\n    \n\n    stream_type_map = {\n        \"kinesis\": awacs.kinesis.Action,\n        \"dynamodb\": awacs.dynamodb.Action,\n    }\n\n    stream_type = stream_arn.split(\":\")[2]\n    try:\n        return stream_type_map[stream_type]\n    except KeyError:\n        raise ValueError(\n            \"Invalid stream type '%s' in arn '%s'\" % (stream_type, stream_arn)\n        )", "docstring": "Returns the awacs Action for a stream type given an arn\n\nArgs:\nstream_arn (str): The Arn of the stream.\n\nReturns:\n:class:`awacs.aws.Action`: The appropriate stream type awacs Action\nclass\n\nRaises:\nValueError: If the stream type doesn't match kinesis or dynamodb.", "source": "juraj-google-style"}
{"code": "def status(self, workflow_id):\n        \n        self.logger.debug('Get status of workflow: ' + workflow_id)\n        url = '%(wf_url)s/%(wf_id)s' % {\n            'wf_url': self.workflows_url, 'wf_id': workflow_id\n        }\n        r = self.gbdx_connection.get(url)\n        r.raise_for_status()\n        return r.json()['state']", "docstring": "Checks workflow status.\n\nArgs:\nworkflow_id (str): Workflow id.\n\nReturns:\nWorkflow status (str).", "source": "juraj-google-style"}
{"code": "def is_placeholder(x):\n    try:\n        if ops.executing_eagerly_outside_functions():\n            return hasattr(x, '_is_backend_placeholder')\n        from tensorflow.python.keras.utils import tf_utils\n        if tf_utils.is_extension_type(x):\n            flat_components = nest.flatten(x, expand_composites=True)\n            return py_any((is_placeholder(c) for c in flat_components))\n        else:\n            return x.op.type == 'Placeholder'\n    except AttributeError:\n        return False", "docstring": "Returns whether `x` is a placeholder.\n\nArgs:\nx: A candidate placeholder.\n\nReturns:\nBoolean.", "source": "github-repos"}
{"code": "def isloaded(self, name):\n        \n        if name is None:\n            return True\n\n        if isinstance(name, str):\n            return (name in [x.__module__ for x in self])\n\n        if isinstance(name, Iterable):\n            return set(name).issubset([x.__module__ for x in self])\n\n        return False", "docstring": "Checks if given hook module has been loaded\n\nArgs:\nname (str): The name of the module to check\n\nReturns:\nbool.  The return code::\n\nTrue -- Loaded\nFalse -- Not Loaded", "source": "juraj-google-style"}
{"code": "def get_key(self, key, request_only=False):\n        \n        values = {}\n        requested_names = [x.name for x in self._package_requests\n                           if not x.conflict]\n\n        for pkg in self.resolved_packages:\n            if (not request_only) or (pkg.name in requested_names):\n                value = getattr(pkg, key)\n                if value is not None:\n                    values[pkg.name] = (pkg, value)\n\n        return values", "docstring": "Get a data key value for each resolved package.\n\nArgs:\nkey (str): String key of property, eg 'tools'.\nrequest_only (bool): If True, only return the key from resolved\npackages that were also present in the request.\n\nReturns:\nDict of {pkg-name: (variant, value)}.", "source": "juraj-google-style"}
{"code": "def Get(self, key):\n    if (key not in self._hash):\n        raise KeyError(key)\n    node = self._hash[key]\n    self._age.Unlink(node)\n    self._age.AppendNode(node)\n    return node.data", "docstring": "Fetch the object from cache.\n\nObjects may be flushed from cache at any time. Callers must always\nhandle the possibility of KeyError raised here.\n\nArgs:\nkey: The key used to access the object.\n\nReturns:\nCached object.\n\nRaises:\nKeyError: If the object is not present in the cache.", "source": "codesearchnet"}
{"code": "def require(self, entity_type, attribute_name=None):\n        \n        if not attribute_name:\n            attribute_name = entity_type\n        self.requires += [(entity_type, attribute_name)]\n        return self", "docstring": "The intent parser should require an entity of the provided type.\n\nArgs:\nentity_type(str): an entity type\nattribute_name(str): the name of the attribute on the parsed intent. Defaults to match entity_type.\n\nReturns:\nself: to continue modifications.", "source": "juraj-google-style"}
{"code": "def unique_bitstrings_with_counts(bitstrings, out_idx=tf.dtypes.int32):\n    y, idx, count = tf.raw_ops.UniqueWithCountsV2(x=bitstrings, axis=[0], out_idx=out_idx)\n    return (y, idx, count)", "docstring": "Extract the unique bitstrings in the given bitstring tensor.\n\nArgs:\nbitstrings: 2-D `tf.Tensor`, interpreted as a list of bitstrings.\nout_idx: An optional `tf.DType` from: `tf.int32`, `tf.int64`. Defaults to\n`tf.int32`.  Specifies the type of `count` output.\n\nReturns:\ny: 2-D `tf.Tensor` of same dtype as `bitstrings`, containing the unique\n0-axis entries of `bitstrings`.\nidx: The index of each value of the input in the unique output `y`.\ncount: 1-D `tf.Tensor` of dtype `out_idx` such that `count[i]` is the\nnumber of occurences of `y[i]` in `bitstrings`.", "source": "github-repos"}
{"code": "def _add_namespace(marc_xml):\n    dom = marc_xml\n    if isinstance(dom, basestring):\n        dom = dhtmlparser.parseString(marc_xml)\n    root = dom.find('root')\n    if root:\n        root[0].params = {}\n    for record in dom.find('record'):\n        record.params = {}\n    collections = dom.find('collection')\n    if (not collections):\n        record = dom.find('record')[0]\n        return XML_TEMPLATE.replace('$CONTENT', str(record))\n    for col in collections:\n        col.params['xmlns'] = 'http:\n        col.params['xmlns:xsi'] = 'http:\n        col.params['xsi:schemaLocation'] = ('http:\n    return str(dom)", "docstring": "Add proper XML namespace to the `marc_xml` record.\n\nArgs:\nmarc_xml (str): String representation of the XML record.\n\nReturns:\nstr: XML with namespace.", "source": "codesearchnet"}
{"code": "async def do_cmd(self, *args, success=None):\n        \n        if success is None:\n            success = (250,)\n\n        cmd = \" \".join(args)\n\n        await self.writer.send_command(cmd)\n        code, message = await self.reader.read_reply()\n\n        if code not in success:\n            raise SMTPCommandFailedError(code, message, cmd)\n\n        return code, message", "docstring": "Sends the given command to the server.\n\nArgs:\n*args: Command and arguments to be sent to the server.\n\nRaises:\nConnectionResetError: If the connection with the server is\nunexpectedely lost.\nSMTPCommandFailedError: If the command fails.\n\nReturns:\n(int, str): A (code, message) 2-tuple containing the server\nresponse.", "source": "juraj-google-style"}
{"code": "def Deserialize(self, reader):\n        \n        usage = reader.ReadByte()\n        self.Usage = usage\n\n        if usage == TransactionAttributeUsage.ContractHash or usage == TransactionAttributeUsage.Vote or \\\n                (usage >= TransactionAttributeUsage.Hash1 and usage <= TransactionAttributeUsage.Hash15):\n            self.Data = reader.ReadBytes(32)\n\n        elif usage == TransactionAttributeUsage.ECDH02 or usage == TransactionAttributeUsage.ECDH03:\n            self.Data = bytearray(usage) + bytearray(reader.ReadBytes(32))\n\n        elif usage == TransactionAttributeUsage.Script:\n            self.Data = reader.ReadBytes(20)\n\n        elif usage == TransactionAttributeUsage.DescriptionUrl:\n\n            self.Data = reader.ReadBytes(reader.ReadByte())\n\n        elif usage == TransactionAttributeUsage.Description or usage >= TransactionAttributeUsage.Remark:\n            self.Data = reader.ReadVarBytes(max=self.MAX_ATTR_DATA_SIZE)\n        else:\n            logger.error(\"format error!!!\")", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def format_level_1_memory(memory):\n    \n    formatted_memory = _list_to_complex_array(memory)\n    \n    if not 1 <= len(formatted_memory.shape) <= 2:\n        raise QiskitError('Level one memory is not of correct shape.')\n    return formatted_memory", "docstring": "Format an experiment result memory object for measurement level 1.\n\nArgs:\nmemory (list): Memory from experiment with `meas_level==1`. `avg` or\n`single` will be inferred from shape of result memory.\n\nReturns:\nnp.ndarray: Measurement level 1 complex numpy array\n\nRaises:\nQiskitError: If the returned numpy array does not have 1 (avg) or 2 (single)\nindicies.", "source": "juraj-google-style"}
{"code": "def snapped_slice(size, frac, n):\n    if (size < n):\n        n = size\n    start = (int(((size * frac) - ceil((n / 2)))) + 1)\n    stop = (int(((size * frac) + floor((n / 2)))) + 1)\n    buf = 0\n    if (stop >= size):\n        buf = (size - stop)\n    elif (start < 0):\n        buf = (0 - start)\n    stop += buf\n    start += buf\n    assert (stop <= size), ('out of bounds [%r, %r]' % (stop, start))\n    sl = slice(start, stop)\n    return sl", "docstring": "r\"\"\"\nCreates a slice spanning `n` items in a list of length `size` at position\n`frac`.\n\nArgs:\nsize (int): length of the list\nfrac (float): position in the range [0, 1]\nn (int): number of items in the slice\n\nReturns:\nslice: slice object that best fits the criteria\n\nSeeAlso:\ntake_percentile_parts\n\nExample:\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_list import *  # NOQA\n>>> import utool as ut\n>>> print(snapped_slice(0, 0, 10))\n>>> print(snapped_slice(1, 0, 10))\n>>> print(snapped_slice(100, 0, 10))\n>>> print(snapped_slice(9, 0, 10))\n>>> print(snapped_slice(100, 1, 10))\npass", "source": "codesearchnet"}
{"code": "def num_lineages_at(self, distance):\n    if ((not isinstance(distance, float)) and (not isinstance(distance, int))):\n        raise TypeError('distance must be an int or a float')\n    if (distance < 0):\n        raise RuntimeError('distance cannot be negative')\n    d = dict()\n    q = deque()\n    q.append(self.root)\n    count = 0\n    while (len(q) != 0):\n        node = q.popleft()\n        if node.is_root():\n            d[node] = 0\n        else:\n            d[node] = d[node.parent]\n        if (node.edge_length is not None):\n            d[node] += node.edge_length\n        if (d[node] < distance):\n            q.extend(node.children)\n        elif ((node.parent is None) or (d[node.parent] < distance)):\n            count += 1\n    return count", "docstring": "Returns the number of lineages of this ``Tree`` that exist ``distance`` away from the root\n\nArgs:\n``distance`` (``float``): The distance away from the root\n\nReturns:\n``int``: The number of lineages that exist ``distance`` away from the root", "source": "codesearchnet"}
{"code": "def _createBitpattern(functioncode, value):\n    _checkFunctioncode(functioncode, [5, 15])\n    _checkInt(value, minvalue=0, maxvalue=1, description='inputvalue')\n    if (functioncode == 5):\n        if (value == 0):\n            return '\\x00\\x00'\n        else:\n            return 'ÿ\\x00'\n    elif (functioncode == 15):\n        if (value == 0):\n            return '\\x00'\n        else:\n            return '\\x01'", "docstring": "Create the bit pattern that is used for writing single bits.\n\nThis is basically a storage of numerical constants.\n\nArgs:\n* functioncode (int): can be 5 or 15\n* value (int): can be 0 or 1\n\nReturns:\nThe bit pattern (string).\n\nRaises:\nTypeError, ValueError", "source": "codesearchnet"}
{"code": "def _pack_sequence_as(structured_outputs, op_outputs):\n    outputs_with_nones = []\n    counter = 0\n    for output in nest.flatten(structured_outputs, expand_composites=True):\n        if output is None:\n            outputs_with_nones.append(None)\n        else:\n            outputs_with_nones.append(op_outputs[counter])\n            counter += 1\n    return func_graph_module.pack_sequence_as(structured_outputs, outputs_with_nones)", "docstring": "Packs the outputs of the gradient If/Case op.\n\nThe branch functions may contain None's in the list of `structured_outputs`.\n`op_outputs` has those outputs missing. So we need to add those Nones to the\nlist of `op_outputs` and then pack it in the same structure as\n`structured_outputs`.\n\nArgs:\nstructured_outputs: structured_outputs from one of the branch functions.\nop_outputs: List of output tensors of the op.\n\nReturns:\n`op_outputs` packed like `structured_outputs`.", "source": "github-repos"}
{"code": "def parse_GPL(filepath, entry_name=None, partial=None):\n    gsms = {}\n    gses = {}\n    gpl_soft = []\n    has_table = False\n    gpl_name = entry_name\n    database = None\n    if isinstance(filepath, str):\n        with utils.smart_open(filepath) as soft:\n            groupper = groupby(soft, (lambda x: x.startswith('^')))\n            for (is_new_entry, group) in groupper:\n                if is_new_entry:\n                    (entry_type, entry_name) = __parse_entry(next(group))\n                    logger.debug(('%s: %s' % (entry_type.upper(), entry_name)))\n                    if (entry_type == 'SERIES'):\n                        (is_data, data_group) = next(groupper)\n                        gse_metadata = parse_metadata(data_group)\n                        gses[entry_name] = GSE(name=entry_name, metadata=gse_metadata)\n                    elif (entry_type == 'SAMPLE'):\n                        if (partial and (entry_name not in partial)):\n                            continue\n                        (is_data, data_group) = next(groupper)\n                        gsms[entry_name] = parse_GSM(data_group, entry_name)\n                    elif (entry_type == 'DATABASE'):\n                        (is_data, data_group) = next(groupper)\n                        database_metadata = parse_metadata(data_group)\n                        database = GEODatabase(name=entry_name, metadata=database_metadata)\n                    elif ((entry_type == 'PLATFORM') or (entry_type == 'Annotation')):\n                        gpl_name = entry_name\n                        (is_data, data_group) = next(groupper)\n                        has_gpl_name = (gpl_name or (gpl_name is None))\n                        for line in data_group:\n                            if (('_table_begin' in line) or (not line.startswith(('^', '!', '\n                                has_table = True\n                            if (not has_gpl_name):\n                                if match('!Annotation_platform\\\\s*=\\\\s*', line):\n                                    gpl_name = split('\\\\s*=\\\\s*', line)[(- 1)].strip()\n                                    has_gpl_name = True\n                            gpl_soft.append(line)\n                    else:\n                        raise RuntimeError('Cannot parse {etype}. Unknown for GPL.'.format(etype=entry_type))\n    else:\n        for line in filepath:\n            if (('_table_begin' in line) or (not line.startswith(('^', '!', '\n                has_table = True\n            gpl_soft.append(line.rstrip())\n    columns = None\n    try:\n        columns = parse_columns(gpl_soft)\n    except Exception:\n        pass\n    metadata = parse_metadata(gpl_soft)\n    if has_table:\n        table_data = parse_table_data(gpl_soft)\n    else:\n        table_data = DataFrame()\n    gpl = GPL(name=gpl_name, gses=gses, gsms=gsms, table=table_data, metadata=metadata, columns=columns, database=database)\n    for (gse_id, gse) in gpl.gses.items():\n        for gsm_id in gse.metadata.get('sample_id', []):\n            if (gsm_id in gpl.gsms):\n                gpl.gses[gse_id].gsms[gsm_id] = gpl.gsms[gsm_id]\n    return gpl", "docstring": "Parse GPL entry from SOFT file.\n\nArgs:\nfilepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GPL entry\nor list of lines representing GPL from GSE file.\nentry_name (:obj:`str`, optional): Name of the entry. By default it is\ninferred from the data.\npartial (:obj:'iterable', optional): A list of accession IDs of GSMs\nto be partially extracted from GPL, works only if a file/accession\nis a GPL.\n\nReturns:\n:obj:`GEOparse.GPL`: A GPL object.", "source": "codesearchnet"}
{"code": "def _VerifyExplicitPaddings(self, tensor_in_sizes, filter_in_sizes, strides, padding, data_format, dtype, use_gpu, op_name, dilations=(1, 1), test_grappler_layout_optimizer=False, tol=1e-05):\n    input_tensor = self._CreateNumpyTensor(tensor_in_sizes)\n    filter_tensor = self._CreateNumpyTensor(filter_in_sizes)\n    input_tensor = array_ops.pad(input_tensor, [(0, 0)] + padding + [(0, 0)])\n    dilations = list(dilations)\n    conv2d_result = nn_ops.conv2d(input_tensor, filter_tensor, [1] + list(strides) + [1], 'VALID', dilations=[1] + dilations + [1])\n    expected = list(self.evaluate(array_ops.reshape(conv2d_result, [-1])))\n    self._VerifyValuesParameters(tensor_in_sizes, filter_in_sizes, strides, padding, expected, data_format, dtype, use_gpu, op_name, dilations, test_grappler_layout_optimizer=test_grappler_layout_optimizer, tol=tol)", "docstring": "Verifies Conv2D with explicit padding generates correct values.\n\nIt does this by comparing with Conv2D without explicit padding. This\nfunction assumes Conv2D without explicit padding works correctly.\n\nArgs:\ntensor_in_sizes: Input tensor dimensions in [batch, input_rows,\ninput_cols, input_depth].\nfilter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,\ninput_depth, output_depth].\nstrides: [row_stride, col_stride] for the convolution;\npadding: Explicit padding amounts.\ndata_format: \"NCHW\" or \"NHWC\"\ndtype: data type to perform test\nuse_gpu: True if testing on the GPU\nop_name: \"Conv\" or \"Conv2D\"\ndilations: Dilation values\ntest_grappler_layout_optimizer: If True, allow the Grappler layout\noptimizer to run, which turns NHWC Conv2Ds on the GPU to NCHW Conv2Ds.\ntol: The absolute and relative tolerance.", "source": "github-repos"}
{"code": "def anonymize_column(self, col):\n    column = col[self.col_name]\n    generator = self.get_generator()\n    original_values = column[(~ pd.isnull(column))].unique()\n    new_values = [generator() for x in range(len(original_values))]\n    if (len(new_values) != len(set(new_values))):\n        raise ValueError('There are not enought different values on faker providerfor category {}'.format(self.category))\n    value_map = dict(zip(original_values, new_values))\n    column = column.apply(value_map.get)\n    return column.to_frame()", "docstring": "Map the values of column to new ones of the same type.\n\nIt replaces the values from others generated using `faker`. It will however,\nkeep the original distribution. That mean that the generated `probability_map` for both\nwill have the same values, but different keys.\n\nArgs:\ncol (pandas.DataFrame): Dataframe containing the column to anonymize.\n\nReturns:\npd.DataFrame: DataFrame with its values mapped to new ones,\nkeeping the original distribution.\n\nRaises:\nValueError: A `ValueError` is raised if faker is not able to provide enought\ndifferent values.", "source": "codesearchnet"}
{"code": "def __init__(self, filenames, index=0, buffer_size=None, _account_id=None,\n               delimiter=None, path_filter=None):\n    \n    super(GCSInputReader, self).__init__()\n    self._filenames = filenames\n    self._index = index\n    self._buffer_size = buffer_size\n    self._account_id = _account_id\n    self._delimiter = delimiter\n    self._bucket = None\n    self._bucket_iter = None\n    self._path_filter = path_filter\n    self._slice_ctx = None", "docstring": "Initialize a GoogleCloudStorageInputReader instance.\n\nArgs:\nfilenames: A list of Google Cloud Storage filenames of the form\n'/bucket/objectname'.\nindex: Index of the next filename to read.\nbuffer_size: The size of the read buffer, None to use default.\n_account_id: Internal use only. See cloudstorage documentation.\ndelimiter: Delimiter used as path separator. See class doc.\npath_filter: An instance of PathFilter.", "source": "juraj-google-style"}
{"code": "def stream_stderr(self, processes, print_only_first=False):\n\n    def _stream_stderr_single_process(process, type_string, index, print_to_stdout):\n        \n        while True:\n            output = process.stderr.readline()\n            if not output and process.poll() is not None:\n                break\n            if output and print_to_stdout:\n                print('{}{} {}'.format(type_string, index, output.strip()))\n                sys.stdout.flush()\n    stream_threads = []\n    for process_type, process_list in six.iteritems(processes):\n        for i in range(len(process_list)):\n            print_to_stdout = not print_only_first or i == 0\n            thread = threading.Thread(target=_stream_stderr_single_process, args=(process_list[i], process_type, i, print_to_stdout))\n            thread.start()\n            stream_threads.append(thread)\n    for thread in stream_threads:\n        thread.join()", "docstring": "Consume stderr of all processes and print to stdout.\n\nTo reduce the amount of logging, caller can set print_only_first to True.\nIn that case, this function only prints stderr from the first process of\neach type.\n\nArgs:\nprocesses: A dictionary from process type string -> list of processes.\nprint_only_first: If true, only print output from first process of each\ntype.", "source": "github-repos"}
{"code": "def hdfs_path(ctx, path):\n  \n  \n  HADOOP_SCHEMES = ['adl:\n                    'file:\n                    'hdfs:\n                    'oss:\n                    's3:\n                    's3a:\n                    's3n:\n                    'swift:\n                    'viewfs:\n                    'wasb:\n  if (any(path.startswith(scheme) for scheme in HADOOP_SCHEMES)):\n    \n    return path\n  elif path.startswith(\"/\"):\n    \n    return ctx.defaultFS + path\n  else:\n    \n    if ctx.defaultFS.startswith(\"hdfs:\n      return \"{0}/user/{1}/{2}\".format(ctx.defaultFS, getpass.getuser(), path)\n    elif ctx.defaultFS.startswith(\"file:\n      return \"{0}/{1}/{2}\".format(ctx.defaultFS, ctx.working_dir[1:], path)\n    else:\n      logging.warn(\"Unknown scheme {0} with relative path: {1}\".format(ctx.defaultFS, path))\n      return \"{0}/{1}\".format(ctx.defaultFS, path)", "docstring": "Convenience function to create a Tensorflow-compatible absolute HDFS path from relative paths\n\nArgs:\n:ctx: TFNodeContext containing the metadata specific to this node in the cluster.\n:path: path to convert\n\nReturns:\nAn absolute path prefixed with the correct filesystem scheme.", "source": "juraj-google-style"}
{"code": "def probe(filename, cmd='ffprobe', **kwargs):\n    args = [cmd, '-show_format', '-show_streams', '-of', 'json']\n    args += convert_kwargs_to_cmd_line_args(kwargs)\n    args += [filename]\n    p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n    (out, err) = p.communicate()\n    if (p.returncode != 0):\n        raise Error('ffprobe', out, err)\n    return json.loads(out.decode('utf-8'))", "docstring": "Run ffprobe on the specified file and return a JSON representation of the output.\n\nRaises:\n:class:`ffmpeg.Error`: if ffprobe returns a non-zero exit code,\nan :class:`Error` is returned with a generic error message.\nThe stderr output can be retrieved by accessing the\n``stderr`` property of the exception.", "source": "codesearchnet"}
{"code": "def get_info_line(self, **kwargs):\n    select_date = ('%02d/%02d/%d' % (kwargs.get('day', '01'), kwargs.get('month', '01'), kwargs.get('year', '1970')))\n    params = {'fecha': select_date, 'line': util.ints_to_string(kwargs.get('lines', [])), 'cultureInfo': util.language_code(kwargs.get('lang'))}\n    result = self.make_request('geo', 'get_info_line', **params)\n    if (not util.check_result(result, 'Line')):\n        return (False, 'UNKNOWN ERROR')\n    values = util.response_list(result, 'Line')\n    return (True, [emtype.Line(**a) for a in values])", "docstring": "Obtain basic information on a bus line on a given date.\n\nArgs:\nday (int): Day of the month in format DD.\nThe number is automatically padded if it only has one digit.\nmonth (int): Month number in format MM.\nThe number is automatically padded if it only has one digit.\nyear (int): Year number in format YYYY.\nlines (list[int] | int): Lines to query, may be empty to get\nall the lines.\nlang (str): Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[Line]), or message string\nin case of error.", "source": "codesearchnet"}
{"code": "def call(self, x):\n    return ops.rms_normalization(x, scale=self.scale, axis=self.axis, epsilon=self.epsilon)", "docstring": "Applies RMS normalization to the input tensor.\n\nArgs:\nx: Input tensor of shape (batch_size, input_dim).\n\nReturns:\nThe RMS-normalized tensor of the same shape (batch_size, input_dim),\nscaled by the learned `scale` parameter.", "source": "github-repos"}
{"code": "def extract(self, file_path, is_drum=False):\n        \n        midi_data = pretty_midi.PrettyMIDI(file_path)\n        note_tuple_list = []\n        for instrument in midi_data.instruments:\n            if (is_drum is False and instrument.is_drum is False) or (is_drum is True and instrument.is_drum is True):\n                for note in instrument.notes:\n                    note_tuple_list.append((instrument.program, note.start, note.end, note.pitch, note.velocity))\n        note_df = pd.DataFrame(note_tuple_list, columns=[\"program\", \"start\", \"end\", \"pitch\", \"velocity\"])\n        note_df = note_df.sort_values(by=[\"program\", \"start\", \"end\"])\n        note_df[\"duration\"] = note_df.end - note_df.start\n\n        return note_df", "docstring": "Extract MIDI file.\n\nArgs:\nfile_path:    File path of MIDI.\nis_drum:      Extract drum data or not.\n\nReturns:\npd.DataFrame(columns=[\"program\", \"start\", \"end\", \"pitch\", \"velocity\", \"duration\"])", "source": "juraj-google-style"}
{"code": "def get_num_bytes(self, batch: Sequence[ExampleT]) -> int:\n    return len(pickle.dumps(batch))", "docstring": "Returns:\nThe number of bytes of data for a batch.", "source": "github-repos"}
{"code": "def getVarianceComps(self, univariance=False):\n    RV = sp.zeros((self.P, self.n_randEffs))\n    for term_i in range(self.n_randEffs):\n        RV[(:, term_i)] = self.getTraitCovar(term_i).diagonal()\n    if univariance:\n        RV /= RV.sum(1)[(:, sp.newaxis)]\n    return RV", "docstring": "Return the estimated variance components\n\nArgs:\nunivariance:   Boolean indicator, if True variance components are normalized to sum up to 1 for each trait\nReturns:\nvariance components of all random effects on all phenotypes [P, n_randEffs matrix]", "source": "codesearchnet"}
{"code": "def history(self, hash):\n    txs = self._t.get(hash, max_transactions=10000)['transactions']\n    tree = defaultdict(list)\n    number_editions = 0\n    for tx in txs:\n        _tx = self._t.get(tx['txid'])\n        txid = _tx['txid']\n        verb_str = BlockchainSpider.check_script(_tx['vouts'])\n        verb = Spoolverb.from_verb(verb_str)\n        (from_address, to_address, piece_address) = BlockchainSpider._get_addresses(_tx)\n        timestamp_utc = _tx['time']\n        action = verb.action\n        edition_number = 0\n        if (action != 'EDITIONS'):\n            edition_number = verb.edition_number\n        else:\n            number_editions = verb.num_editions\n        tree[edition_number].append({'txid': txid, 'verb': verb_str, 'from_address': from_address, 'to_address': to_address, 'piece_address': piece_address, 'timestamp_utc': timestamp_utc, 'action': action, 'number_editions': number_editions, 'edition_number': edition_number})\n    for (edition, chain) in tree.items():\n        [d.update({'number_editions': number_editions}) for d in chain]\n    return dict(tree)", "docstring": "Retrieve the ownership tree of all editions of a piece given the hash.\n\nArgs:\nhash (str): Hash of the file to check. Can be created with the\n:class:`File` class\n\nReturns:\ndict: Ownsership tree of all editions of a piece.\n\n.. note:: For now we only support searching the blockchain by\nthe piece hash.", "source": "codesearchnet"}
{"code": "def assert_current_path(self, path, **kwargs):\n        \n\n        query = CurrentPathQuery(path, **kwargs)\n\n        @self.document.synchronize\n        def assert_current_path():\n            if not query.resolves_for(self):\n                raise ExpectationNotMet(query.failure_message)\n        assert_current_path()\n\n        return True", "docstring": "Asserts that the page has the given path. By default this will compare against the\npath+query portion of the full URL.\n\nArgs:\npath (str | RegexObject): The string or regex that the current \"path\" should match.\n**kwargs: Arbitrary keyword arguments for :class:`CurrentPathQuery`.\n\nReturns:\nTrue\n\nRaises:\nExpectationNotMet: If the assertion hasn't succeeded during the wait time.", "source": "juraj-google-style"}
{"code": "def _create_outbound_stream(self, config=None):\n        \n        if config is None:\n            raise ValueError('No stream config to create stream from.')\n\n        name = self._get_stream_name(config)\n        stream_handlers = self._get_stream_handlers(config, name)\n        stream_input = config.get('input', None)\n        stream_output = config.get('output', None)\n\n        if type(stream_output) is int:\n            return PortOutputStream(name,\n                                    stream_input,\n                                    stream_output,\n                                    stream_handlers,\n                                    zmq_args={'zmq_context': self.broker.context,\n                                              'zmq_proxy_xsub_url': self.broker.XSUB_URL,\n                                              'zmq_proxy_xpub_url': self.broker.XPUB_URL})\n        else:\n            if stream_output is not None:\n                log.warn(\"Output of stream {} is not an integer port. \"\n                         \"Stream outputs can only be ports.\".format(name))\n            return ZMQStream(name,\n                             stream_input,\n                             stream_handlers,\n                             zmq_args={'zmq_context': self.broker.context,\n                                       'zmq_proxy_xsub_url': self.broker.XSUB_URL,\n                                       'zmq_proxy_xpub_url': self.broker.XPUB_URL})", "docstring": "Creates an outbound stream from its config.\n\nParams:\nconfig:       stream configuration as read by ait.config\nReturns:\nstream:       a Stream\nRaises:\nValueError:   if any of the required config values are missing", "source": "juraj-google-style"}
{"code": "def _write_class_markdown_to_file(self, f, name, cls):\n    \n    \n    methods = dict(self.get_class_members(name, cls))\n    \n    \n    num_methods = len(methods)\n    try:\n      self._write_docstring_markdown_to_file(f, \"\n                                             methods, {})\n    except ValueError as e:\n      raise ValueError(str(e) + \" in class `%s`\" % cls.__name__)\n\n    \n    \n    \n    \n    \n    \n    any_method_called_out = (len(methods) != num_methods)\n    if any_method_called_out:\n      other_methods = {n: m for n, m in methods.items() if n in cls.__dict__}\n      if other_methods:\n        print(\"\\n\n    else:\n      other_methods = methods\n    for name in sorted(other_methods):\n      self._write_member_markdown_to_file(f, \"", "docstring": "Write the class doc to `f`.\n\nArgs:\nf: File to write to.\nprefix: Prefix for names.\ncls: class object.\nname: name to use.", "source": "juraj-google-style"}
{"code": "async def send_rpc_command(self, short_name, rpc_id, payload, sender_client, timeout=1.0):\n    rpc_tag = str(uuid.uuid4())\n    self.rpc_results.declare(rpc_tag)\n    if ((short_name in self.services) and (short_name in self.agents)):\n        agent_tag = self.agents[short_name]\n        rpc_message = {'rpc_id': rpc_id, 'payload': payload, 'response_uuid': rpc_tag}\n        self.in_flight_rpcs[rpc_tag] = InFlightRPC(sender_client, short_name, monotonic(), timeout)\n        (await self._notify_update(short_name, 'rpc_command', rpc_message, directed_client=agent_tag))\n    else:\n        response = dict(result='service_not_found', response=b'')\n        self.rpc_results.set(rpc_tag, response)\n    return rpc_tag", "docstring": "Send an RPC to a service using its registered agent.\n\nArgs:\nshort_name (str): The name of the service we would like to send\nand RPC to\nrpc_id (int): The rpc id that we would like to call\npayload (bytes): The raw bytes that we would like to send as an\nargument\nsender_client (str): The uuid of the sending client\ntimeout (float): The maximum number of seconds before we signal a timeout\nof the RPC\n\nReturns:\nstr: A unique id that can used to identify the notified response of this\nRPC.", "source": "codesearchnet"}
{"code": "def read_var_bytes(self, max_size=sys.maxsize) -> bytes:\n        \n        length = self.read_var_int(max_size)\n        return self.read_bytes(length)", "docstring": "Read a variable length of bytes from the stream.\n\nArgs:\nmax_size (int): (Optional) maximum number of bytes to read.\n\nReturns:\nbytes:", "source": "juraj-google-style"}
{"code": "def GetEventTagByIdentifier(self, storage_file, event_identifier):\n    \n    if not self._index:\n      self._Build(storage_file)\n\n    lookup_key = event_identifier.CopyToString()\n    event_tag_identifier = self._index.get(lookup_key, None)\n    if not event_tag_identifier:\n      return None\n\n    return storage_file.GetEventTagByIdentifier(event_tag_identifier)", "docstring": "Retrieves the most recently updated event tag for an event.\n\nArgs:\nstorage_file (BaseStorageFile): storage file.\nevent_identifier (AttributeContainerIdentifier): event attribute\ncontainer identifier.\n\nReturns:\nEventTag: event tag or None if the event has no event tag.", "source": "juraj-google-style"}
{"code": "def _BuildOobLink(self, param, mode):\n    \n    code = self.rpc_helper.GetOobCode(param)\n    if code:\n      parsed = list(parse.urlparse(self.widget_url))\n\n      query = dict(parse.parse_qsl(parsed[4]))\n      query.update({'mode': mode, 'oobCode': code})\n\n      try:\n        parsed[4] = parse.urlencode(query)\n      except AttributeError:\n        parsed[4] = urllib.urlencode(query)\n\n      return code, parse.urlunparse(parsed)\n    raise errors.GitkitClientError('invalid request')", "docstring": "Builds out-of-band URL.\n\nGitkit API GetOobCode() is called and the returning code is combined\nwith Gitkit widget URL to building the out-of-band url.\n\nArgs:\nparam: dict of request.\nmode: string, Gitkit widget mode to handle the oob action after user\nclicks the oob url in the email.\n\nRaises:\nGitkitClientError: if oob code is not returned.\n\nReturns:\nA string of oob url.", "source": "juraj-google-style"}
{"code": "def determinant(self, name='det'):\n    if self.is_square is False:\n        raise NotImplementedError('Determinant not implemented for an operator that is expected to not be square.')\n    with self._name_scope(name):\n        return self._determinant()", "docstring": "Determinant for every batch member.\n\nArgs:\nname:  A name for this `Op`.\n\nReturns:\n`Tensor` with shape `self.batch_shape` and same `dtype` as `self`.\n\nRaises:\nNotImplementedError:  If `self.is_square` is `False`.", "source": "github-repos"}
{"code": "def get_service_alias_by_class(self, service_class):\n    aliases = []\n    for alias, service_object in self._service_objects.items():\n        if isinstance(service_object, service_class):\n            aliases.append(alias)\n    return aliases", "docstring": "Gets the aslias name of a registered service.\n\nThe same service class can be registered multiple times with different\naliases. When not well managed, duplication and race conditions can arise.\nOne can use this API to de-duplicate as needed.\n\nArgs:\nservice_class: class, the class of a service type.\n\nReturns:\nlist of strings, the aliases the service is registered with.", "source": "github-repos"}
{"code": "def __init__(self, statediag=[], thebiggestid=None):\n        \n        self.statediag = []\n        self.quickresponse = {}\n        self.quickresponse_types = {}\n        self.toadd = []\n        self.biggestid = 0\n\n        if thebiggestid is None:\n            for state in statediag:\n                if statediag[state].id > self.biggestid:\n                    self.biggestid = statediag[state].id\n        else:\n            self.biggestid = thebiggestid\n        self.statediag = statediag", "docstring": "Find the biggest State ID\nArgs:\nstatediag (list): The states of the PDA\nthebiggestid (int): The binggest state identifier\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _list_certs(certificate_store='My'):\n    ret = dict()\n    blacklist_keys = ['DnsNameList', 'Thumbprint']\n    ps_cmd = ['Get-ChildItem', '-Path', \"'Cert:\\\\LocalMachine\\\\{0}'\".format(certificate_store), '|', 'Select-Object DnsNameList, SerialNumber, Subject, Thumbprint, Version']\n    cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)\n    try:\n        items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n    except ValueError:\n        raise CommandExecutionError('Unable to parse return data as Json.')\n    for item in items:\n        cert_info = dict()\n        for key in item:\n            if (key not in blacklist_keys):\n                cert_info[key.lower()] = item[key]\n        cert_info['dnsnames'] = []\n        if item['DnsNameList']:\n            cert_info['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']]\n        ret[item['Thumbprint']] = cert_info\n    return ret", "docstring": "List details of available certificates in the LocalMachine certificate\nstore.\n\nArgs:\ncertificate_store (str): The name of the certificate store on the local\nmachine.\n\nReturns:\ndict: A dictionary of certificates found in the store", "source": "codesearchnet"}
{"code": "def __init__(self, num_participants):\n    self._num_participants = num_participants\n    self._counter = 0\n    self._flag = False\n    self._local_sense = threading.local()\n    self._lock = threading.Lock()\n    self._condition = threading.Condition()", "docstring": "Initializes the barrier object.\n\nArgs:\nnum_participants: an integer which is the expected number of calls of\n`wait` pass to through this barrier.", "source": "github-repos"}
{"code": "def resize(self, image: 'torch.Tensor', size: Dict[str, int], crop_pct: float, interpolation: PILImageResampling=PILImageResampling.BICUBIC, **kwargs) -> 'torch.Tensor':\n    if not size.shortest_edge:\n        raise ValueError(f\"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}\")\n    shortest_edge = size['shortest_edge']\n    if shortest_edge < 384:\n        resize_shortest_edge = int(shortest_edge / crop_pct)\n        resize_size = get_resize_output_image_size(image, size=resize_shortest_edge, default_to_square=False, input_data_format=ChannelDimension.FIRST)\n        image = F.resize(image, resize_size, interpolation=interpolation, **kwargs)\n        return F.center_crop(image, (shortest_edge, shortest_edge), **kwargs)\n    else:\n        return F.resize(image, (shortest_edge, shortest_edge), interpolation=interpolation, **kwargs)", "docstring": "Resize an image.\n\nArgs:\nimage (`torch.Tensor`):\nImage to resize.\nsize (`Dict[str, int]`):\nDictionary of the form `{\"shortest_edge\": int}`, specifying the size of the output image. If\n`size[\"shortest_edge\"]` >= 384 image is resized to `(size[\"shortest_edge\"], size[\"shortest_edge\"])`.\nOtherwise, the smaller edge of the image will be matched to `int(size[\"shortest_edge\"] / crop_pct)`,\nafter which the image is cropped to `(size[\"shortest_edge\"], size[\"shortest_edge\"])`.\ncrop_pct (`float`):\nPercentage of the image to crop. Only has an effect if size < 384.\nresample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\nResampling filter to use when resizing the image.\n\nReturns:\n`torch.Tensor`: Resized image.", "source": "github-repos"}
{"code": "def downstream_index_dinf(dinfdir_value, i, j):\n        \n        down_dirs = DinfUtil.dinf_downslope_direction(dinfdir_value)\n        down_coors = []\n        for dir_code in down_dirs:\n            row, col = D8Util.downstream_index(dir_code, i, j)\n            down_coors.append([row, col])\n        return down_coors", "docstring": "find downslope coordinate for Dinf of TauDEM\nArgs:\ndinfdir_value: dinf direction value\ni: current row\nj: current col\n\nReturns:\ndownstream (row, col)s", "source": "juraj-google-style"}
{"code": "def _CreateExpandedDSA(client, ad_group_id):\n  \n  \n  ad_group_ad_service = client.GetService('AdGroupAdService')\n\n  \n  operations = [{\n      'operator': 'ADD',\n      'operand': {\n          'xsi_type': 'AdGroupAd',\n          'adGroupId': ad_group_id,\n          \n          \n          \n          \n          'ad': {\n              'xsi_type': 'ExpandedDynamicSearchAd',\n              \n              'description': 'Buy your tickets now!',\n              'description2': 'Discount ends soon'\n          },\n          \n          'status': 'PAUSED',\n      }\n  }]\n\n  \n  ad = ad_group_ad_service.mutate(operations)['value'][0]['ad']\n\n  \n  print ('Expanded dynamic search ad with ID \"%d\", description \"%s\", and '\n         'description 2 \"%s\" was added'\n         % (ad['id'], ad['description'], ad['description2']))", "docstring": "Creates the expanded Dynamic Search Ad.\n\nArgs:\nclient: an AdwordsClient instance.\nad_group_id: an integer ID of the ad group in which the DSA is added.", "source": "juraj-google-style"}
{"code": "def log_value(self, name, value, step=None):\n    if isinstance(value, six.string_types):\n        raise TypeError('\"value\" should be a number, got {}'.format(type(value)))\n    value = float(value)\n    self._check_step(step)\n    tf_name = self._ensure_tf_name(name)\n    summary = self._scalar_summary(tf_name, value, step)\n    self._log_summary(tf_name, summary, value, step=step)", "docstring": "Log new value for given name on given step.\n\nArgs:\nname (str): name of the variable (it will be converted to a valid\ntensorflow summary name).\nvalue (float): this is a real number to be logged as a scalar.\nstep (int): non-negative integer used for visualization: you can\nlog several different variables on one step, but should not log\ndifferent values of the same variable on the same step (this is\nnot checked).", "source": "codesearchnet"}
{"code": "def _ParseShVariables(self, lines):\n    \n    paths = {}\n    for line in lines:\n      for entry in line:\n        if \"=\" in entry:\n          \n          \n          target, vals = (entry.split(\"=\", 1) + [\"\"])[:2]\n          if vals:\n            path_vals = vals.split(\":\")\n          else:\n            path_vals = []\n          self._ExpandPath(target, path_vals, paths)\n        elif entry not in self._SH_CONTINUATION:\n          \n          \n          \n          \n          break\n    return paths", "docstring": "Extract env_var and path values from sh derivative shells.\n\nIterates over each line, word by word searching for statements that set the\npath. These are either variables, or conditions that would allow a variable\nto be set later in the line (e.g. export).\n\nArgs:\nlines: A list of lines, each of which is a list of space separated words.\n\nReturns:\na dictionary of path names and values.", "source": "juraj-google-style"}
{"code": "def deroot(self, label='OLDROOT'):\n        \n        if self.root.edge_length is not None:\n            self.root.add_child(Node(edge_length=self.root.edge_length,label=label))\n            self.root.edge_length = None", "docstring": "If the tree has a root edge, drop the edge to be a child of the root node\n\nArgs:\n``label`` (``str``): The desired label of the new child", "source": "juraj-google-style"}
{"code": "def __init__(self, *args, **kwargs):\n    \n    self.args = args\n    self.kwargs = kwargs\n    self.outputs = None\n    self.backoff_seconds = _DEFAULT_BACKOFF_SECONDS\n    self.backoff_factor = _DEFAULT_BACKOFF_FACTOR\n    self.max_attempts = _DEFAULT_MAX_ATTEMPTS\n    self.target = None\n    self.task_retry = False\n    self._current_attempt = 0\n    self._root_pipeline_key = None\n    self._pipeline_key = None\n    self._context = None\n    self._result_status = None\n    self._set_class_path()\n    \n    \n    self.target = mr_util._get_task_target()\n\n    if _TEST_MODE:\n      self._context = _PipelineContext('', 'default', '')\n      self._root_pipeline_key = _TEST_ROOT_PIPELINE_KEY\n      self._pipeline_key = db.Key.from_path(\n          _PipelineRecord.kind(), uuid.uuid4().hex)\n      self.outputs = PipelineFuture(self.output_names)\n      self._context.evaluate_test(self)", "docstring": "Initializer.\n\nArgs:\n*args: The positional arguments for this function-object.\n**kwargs: The keyword arguments for this function-object.", "source": "juraj-google-style"}
{"code": "def implemented(cls, for_type):\n    for function in cls.required():\n        if (not function.implemented_for_type(for_type)):\n            raise TypeError((\"%r doesn't implement %r so it cannot participate in the protocol %r.\" % (for_type, function.func.__name__, cls)))\n    cls.register(for_type)", "docstring": "Assert that protocol 'cls' is implemented for type 'for_type'.\n\nThis will cause 'for_type' to be registered with the protocol 'cls'.\nSubsequently, protocol.isa(for_type, cls) will return True, as will\nisinstance, issubclass and others.\n\nRaises:\nTypeError if 'for_type' doesn't implement all required functions.", "source": "codesearchnet"}
{"code": "def from_music_service(cls, music_service, content_dict):\n        \n        \n        quoted_id = quote_url(content_dict['id'].encode('utf-8'))\n        \n        item_id = '0fffffff{}'.format(quoted_id)\n        \n        is_track = cls == get_class('MediaMetadataTrack')\n        uri = form_uri(item_id, music_service, is_track)\n        \n        resources = [DidlResource(uri=uri, protocol_info=\"DUMMY\")]\n        desc = music_service.desc\n        return cls(item_id, desc, resources, uri, content_dict,\n                   music_service=music_service)", "docstring": "Return an element instantiated from the information that a music\nservice has (alternative constructor)\n\nArgs:\nmusic_service (MusicService): The music service that content_dict\noriginated from\ncontent_dict (OrderedDict): The data to instantiate the music\nservice item from\n\nReturns:\nMusicServiceItem: A MusicServiceItem instance", "source": "juraj-google-style"}
{"code": "def delete_endpoint(self, endpoint_name):\n        \n        LOGGER.info('Deleting endpoint with name: {}'.format(endpoint_name))\n        self.sagemaker_client.delete_endpoint(EndpointName=endpoint_name)", "docstring": "Delete an Amazon SageMaker ``Endpoint``.\n\nArgs:\nendpoint_name (str): Name of the Amazon SageMaker ``Endpoint`` to delete.", "source": "juraj-google-style"}
{"code": "def put_rpc(self, address, rpc_id, arg_payload, response):\n    self._rpc_queue.put_nowait((address, rpc_id, arg_payload, response))", "docstring": "Place an RPC onto the RPC queue.\n\nThe rpc will be dispatched asynchronously by the background dispatch\ntask.  This method must be called from the event loop.  This method\ndoes not block.\n\nArgs:\naddress (int): The address of the tile with the RPC\nrpc_id (int): The id of the rpc you want to call\narg_payload (bytes): The RPC payload\nrespones (GenericResponse): The object to use to signal the result.", "source": "codesearchnet"}
{"code": "def read_value(self):\n    raise NotImplementedError", "docstring": "Returns the value of this variable, read in the current context.\n\nCan be different from value() if it's on another device, with control\ndependencies, etc.\n\nReturns:\nA `Tensor` containing the value of the variable.", "source": "github-repos"}
{"code": "def _example_short_number(region_code):\n    \n    metadata = PhoneMetadata.short_metadata_for_region(region_code)\n    if metadata is None:\n        return U_EMPTY_STRING\n    desc = metadata.short_code\n    if desc.example_number is not None:\n        return desc.example_number\n    return U_EMPTY_STRING", "docstring": "Gets a valid short number for the specified region.\n\nArguments:\nregion_code -- the region for which an example short number is needed.\n\nReturns a valid short number for the specified region. Returns an empty\nstring when the metadata does not contain such information.", "source": "juraj-google-style"}
{"code": "def get_smeared_densities(self, sigma):\n    from scipy.ndimage.filters import gaussian_filter1d\n    diff = [(self.frequencies[(i + 1)] - self.frequencies[i]) for i in range((len(self.frequencies) - 1))]\n    avgdiff = (sum(diff) / len(diff))\n    smeared_dens = gaussian_filter1d(self.densities, (sigma / avgdiff))\n    return smeared_dens", "docstring": "Returns the densities, but with a Gaussian smearing of\nstd dev sigma applied.\n\nArgs:\nsigma: Std dev of Gaussian smearing function.\n\nReturns:\nGaussian-smeared densities.", "source": "codesearchnet"}
{"code": "def process_opened_file(self, in_filename, in_file, out_filename, out_file):\n    lines = in_file.readlines()\n    processed_file, new_file_content, log, process_errors = self.update_string_pasta(''.join(lines), in_filename)\n    if out_file and processed_file:\n        out_file.write(new_file_content)\n    return (processed_file, self._format_log(log, in_filename, out_filename), process_errors)", "docstring": "Process the given python file for incompatible changes.\n\nThis function is split out to facilitate StringIO testing from\ntf_upgrade_test.py.\n\nArgs:\nin_filename: filename to parse\nin_file: opened file (or StringIO)\nout_filename: output file to write to\nout_file: opened file (or StringIO)\nReturns:\nA tuple representing number of files processed, log of actions, errors", "source": "github-repos"}
{"code": "def _executeMassiveMethod(path, method, args=None, classArgs = None):\n\t\t\n\t\tresponse = {}\n\n\t\tif args is None:\n\t\t\targs = {}\n\n\t\tif classArgs is None:\n\t\t\tclassArgs = {}\n\n\t\tsys.path.append(path)\n\t\texclude = [\"__init__.py\", \"base.py\"]\n\t\tfor f in AtomShieldsScanner._getFiles(path, \"*.py\", exclude=exclude):\n\t\t\ttry:\n\t\t\t\tinstance = AtomShieldsScanner._getClassInstance(path = f, args = classArgs)\n\t\t\t\tif instance is not None:\n\t\t\t\t\tif callable(method):\n\t\t\t\t\t\targs[\"instance\"] = instance\n\t\t\t\t\t\toutput = method(**args)\n\t\t\t\t\t\tresponse[instance.__class__.NAME] = output\n\t\t\t\t\telse:\n\t\t\t\t\t\tif hasattr(instance, method):\n\t\t\t\t\t\t\toutput = getattr(instance, method)(**args)\n\t\t\t\t\t\t\tresponse[instance.__class__.NAME] = output\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcontinue\n\n\t\t\texcept Exception as e:\n\t\t\t\tAtomShieldsScanner._debug(\"[!] %s\" % e)\n\t\tsys.path.remove(path)\n\t\treturn response", "docstring": "Execute an specific method for each class instance located in path\n\nArgs:\npath (str): Absolute path which contains the .py files\nmethod (str): Method to execute into class instance\n\nReturns:\ndict: Dictionary which contains the response for every class instance.\nThe dictionary keys are the value of 'NAME' class variable.", "source": "juraj-google-style"}
{"code": "def flatten(repertoire, big_endian=False):\n    if (repertoire is None):\n        return None\n    order = ('C' if big_endian else 'F')\n    return repertoire.squeeze().ravel(order=order)", "docstring": "Flatten a repertoire, removing empty dimensions.\n\nBy default, the flattened repertoire is returned in little-endian order.\n\nArgs:\nrepertoire (np.ndarray or None): A repertoire.\n\nKeyword Args:\nbig_endian (boolean): If ``True``, flatten the repertoire in big-endian\norder.\n\nReturns:\nnp.ndarray: The flattened repertoire.", "source": "codesearchnet"}
{"code": "def get_compatibility_log(self):\n    if not self._verified:\n        raise RuntimeError(\"target compatibility isn't verified yet\")\n    return self._log_messages", "docstring": "Returns list of compatibility log messages.\n\nWARNING: This method should only be used for unit tests.\n\nReturns:\nThe list of log messages by the recent compatibility check.\nRaises:\nRuntimeError: when the compatibility was NOT checked.", "source": "github-repos"}
{"code": "def item_to_mrc(code, val):\n    if isinstance(val, basestring):\n        return [val_to_mrc(code, val)]\n    if isinstance(val, dict):\n        val = [val]\n    return dicts_to_mrc(code, val)", "docstring": "Convert `val` to MRC, whether it is dict or string.\n\nArgs:\ncode (str): Code of the field.\nval (str or dict): Value of the field.\n\nReturns:\nlist: MRC lines for output template.", "source": "codesearchnet"}
{"code": "def from_hising(cls, h, J, offset=None):\n    poly = {(k,): v for (k, v) in h.items()}\n    poly.update(J)\n    if (offset is not None):\n        poly[frozenset([])] = offset\n    return cls(poly, Vartype.SPIN)", "docstring": "Construct a binary polynomial from a higher-order Ising problem.\n\nArgs:\nh (dict):\nThe linear biases.\n\nJ (dict):\nThe higher-order biases.\n\noffset (optional, default=0.0):\nConstant offset applied to the model.\n\nReturns:\n:obj:`.BinaryPolynomial`\n\nExamples:\n>>> poly = dimod.BinaryPolynomial.from_hising({'a': 2}, {'ab': -1}, 0)", "source": "codesearchnet"}
{"code": "def auto_batch_size(sequence_length,\n                    mesh_shape,\n                    layout_rules,\n                    tokens_per_split=2048):\n  \n  num_splits = mtf.tensor_dim_to_mesh_dim_size(\n      layout_rules, mesh_shape, mtf.Dimension(\"batch\", 0))\n  ret = max(1, tokens_per_split \n  tf.logging.info(\n      \"AUTO_BATCH_SIZE tokens_per_split=%s num_splits=%s\"\n      \" sequence_length=%s batch_size=%s\"\n      % (tokens_per_split, num_splits, sequence_length, ret))\n  return ret", "docstring": "Automatically compute batch size.\n\nArgs:\nsequence_length: an integer\nmesh_shape: an input to mtf.convert_to_shape()\nlayout_rules: an input to mtf.convert_to_layout_rules()\ntokens_per_split: an integer\nReturns:\nan integer", "source": "juraj-google-style"}
{"code": "def get_global_vars(func):\n    \n    closure = getclosurevars(func)\n    if closure['nonlocal']:\n        raise TypeError(\"Can't launch a job with closure variables: %s\" %\n                        closure['nonlocals'].keys())\n    globalvars = dict(modules={},\n                      functions={},\n                      vars={})\n    for name, value in closure['global'].items():\n        if inspect.ismodule(value):  \n            globalvars['modules'][name] = value.__name__\n        elif inspect.isfunction(value) or inspect.ismethod(value):\n            globalvars['functions'][name] = value\n        else:\n            globalvars['vars'][name] = value\n\n    return globalvars", "docstring": "Store any methods or variables bound from the function's closure\n\nArgs:\nfunc (function): function to inspect\n\nReturns:\ndict: mapping of variable names to globally bound VARIABLES", "source": "juraj-google-style"}
{"code": "def usufyToGmlExport(d, fPath):\n    try:\n        oldData = nx.read_gml(fPath)\n    except UnicodeDecodeError as e:\n        print(('UnicodeDecodeError:\\t' + str(e)))\n        print('Something went wrong when reading the .gml file relating to the decoding of UNICODE.')\n        import time as time\n        fPath += ('_' + str(time.time()))\n        print(((('To avoid losing data, the output file will be renamed to use the timestamp as:\\n' + fPath) + '_') + str(time.time())))\n        print()\n        oldData = nx.Graph()\n    except Exception as e:\n        oldData = nx.Graph()\n    newGraph = _generateGraphData(d, oldData)\n    nx.write_gml(newGraph, fPath)", "docstring": "Workaround to export data to a .gml file.\n\nArgs:\n-----\nd: Data to export.\nfPath: File path for the output file.", "source": "codesearchnet"}
{"code": "def unflatten1(flat_list, reverse_list):\n    \n    unflat_list2 = [[flat_list[index] for index in tup]\n                    for tup in reverse_list]\n    return unflat_list2", "docstring": "Rebuilds unflat list from invertible_flatten1\n\nArgs:\nflat_list (list): the flattened list\nreverse_list (list): the list which undoes flattenting\n\nReturns:\nunflat_list2: original nested list\n\n\nSeeAlso:\ninvertible_flatten1\ninvertible_flatten2\nunflatten2", "source": "juraj-google-style"}
{"code": "def connect(self, host='localhost'):\n        \n\n        \n\n        get_logger().info(\"Connecting to RabbitMQ server...\")\n\n        self._conn = pika.BlockingConnection(\n            pika.ConnectionParameters(host=host))\n        self._channel = self._conn.channel()\n\n        \n\n        get_logger().info(\"Declaring topic exchanger {}...\".format(\n            self.exchange))\n\n        self._channel.exchange_declare(exchange=self.exchange, type='topic')\n\n        \n\n        get_logger().info(\"Creating RabbitMQ queue...\")\n        result = self._channel.queue_declare(exclusive=True)\n\n        self._queue_name = result.method.queue\n\n        \n\n        if self.listen_all:\n            get_logger().info(\n                \"Binding queue to exchanger {} (listen all)...\".format(\n                    self.exchange\n                )\n            )\n            self._channel.queue_bind(\n                exchange=self.exchange,\n                queue=self._queue_name,\n                routing_key='*'\n            )\n        else:\n            for routing_key in self.topics:\n                get_logger().info(\n                    \"Binding queue to exchanger {} \"\n                    \"with routing key {}...\".format(\n                        self.exchange, routing_key)\n                )\n\n                self._channel.queue_bind(\n                    exchange=self.exchange,\n                    queue=self._queue_name,\n                    routing_key=routing_key\n                )\n\n        \n\n        get_logger().info(\"Binding callback...\")\n        self._channel.basic_consume(\n            self._callback, queue=self._queue_name, no_ack=True)", "docstring": "Connect to the server and set everything up.\n\nArgs:\nhost: hostname to connect to", "source": "juraj-google-style"}
{"code": "def is_valid(self, tol: float = DISTANCE_TOLERANCE) -> bool:\n        \n        if len(self.sites) == 1:\n            return True\n        all_dists = self.distance_matrix[np.triu_indices(len(self), 1)]\n        return bool(np.min(all_dists) > tol)", "docstring": "True if SiteCollection does not contain atoms that are too close\ntogether. Note that the distance definition is based on type of\nSiteCollection. Cartesian distances are used for non-periodic\nMolecules, while PBC is taken into account for periodic structures.\n\nArgs:\ntol (float): Distance tolerance. Default is 0.5A.\n\nReturns:\n(bool) True if SiteCollection does not contain atoms that are too\nclose together.", "source": "juraj-google-style"}
{"code": "def relaxed_value_for_var(value, var):\n        \n        assert isinstance(var, tf.Variable)\n        name = var.op.name\n\n        \n        varshape = tuple(var.get_shape().as_list())\n        if varshape != value.shape:\n            \n            if np.prod(varshape) != np.prod(value.shape):\n                raise ValueError(\n                    \"Trying to load a tensor of shape {} into the variable '{}' whose shape is {}.\".format(\n                        value.shape, name, varshape))\n            logger.warn(\"The tensor is reshaped from {} to {} when assigned to '{}'\".format(\n                value.shape, varshape, name))\n            value = value.reshape(varshape)\n\n        \n        def upcast(vartype, valtype):\n            \n            \n            \n            if vartype == tf.float64 and valtype == np.float32:\n                return np.float64\n            if vartype in [tf.int64, tf.int32] and valtype in [np.int32, np.int16, np.int8]:\n                return np.int64 if vartype == tf.int64 else np.int32\n            return None\n\n        if hasattr(value, 'dtype'):\n            vartype = var.dtype.as_numpy_dtype\n            if vartype != value.dtype:\n                msg = \"Variable {} has dtype {} but was given a value of dtype {}.\".format(name, vartype, value.dtype)\n                newtype = upcast(var.dtype.base_dtype, value.dtype)\n                if newtype is not None:\n                    value = newtype(value)\n                    logger.warn(msg + \" Load it after casting!\")\n                else:\n                    assert vartype == value.dtype, msg\n        return value", "docstring": "Returns a relaxed (possibly reshaped/upcast-ed) version of value,\nto be loaded to the given variable.\n\nArgs:\nvalue (ndarray): an numpy array to be loaded to var\nvar (tf.Variable):\n\nReturns:\nndarray: a possibly reshaped or casted version of value", "source": "juraj-google-style"}
{"code": "def _CheckStorageFile(self, storage_file_path):\n    if os.path.exists(storage_file_path):\n        if (not os.path.isfile(storage_file_path)):\n            raise errors.BadConfigOption('Storage file: {0:s} already exists and is not a file.'.format(storage_file_path))\n        logger.warning('Appending to an already existing storage file.')\n    dirname = os.path.dirname(storage_file_path)\n    if (not dirname):\n        dirname = '.'\n    if (not os.access(dirname, os.W_OK)):\n        raise errors.BadConfigOption('Unable to write to storage file: {0:s}'.format(storage_file_path))", "docstring": "Checks if the storage file path is valid.\n\nArgs:\nstorage_file_path (str): path of the storage file.\n\nRaises:\nBadConfigOption: if the storage file path is invalid.", "source": "codesearchnet"}
{"code": "def listdir(*paths, glob=None):\n    \n    path = genpath(*paths)\n\n    names = os.listdir(path)\n    if glob is not None:\n        names = fnmatch.filter(names, glob)\n\n    retn = [os.path.join(path, name) for name in names]\n    return retn", "docstring": "List the (optionally glob filtered) full paths from a dir.\n\nArgs:\n*paths ([str,...]): A list of path elements\nglob (str): An optional fnmatch glob str", "source": "juraj-google-style"}
{"code": "def __getitem__(self, thing: Any) -> np.ndarray:\n\t\t\n\t\tif type(thing) is slice or type(thing) is np.ndarray or type(thing) is int:\n\t\t\tam = AttributeManager(None, axis=self.axis)\n\t\t\tfor key, val in self.items():\n\t\t\t\tam[key] = val[thing]\n\t\t\treturn am\n\t\telif type(thing) is tuple:\n\t\t\t\n\t\t\tresult: np.ndarray = None\n\t\t\tfor t in thing:\n\t\t\t\tif t in self.__dict__[\"storage\"]:\n\t\t\t\t\tif result is None:\n\t\t\t\t\t\tresult = self.__getattr__(t)\n\t\t\t\t\telse:\n\t\t\t\t\t\tvals = self.__getattr__(t)\n\t\t\t\t\t\tif vals.dtype != result.dtype:\n\t\t\t\t\t\t\traise AttributeError(f\"Cannot stack attributes of different types ({vals.dtype} and {result.dtype})\")\n\t\t\t\t\t\tresult = np.vstack((result, vals)).transpose()\n\t\t\tif result is None:\n\t\t\t\traise AttributeError(f\"'{type(self)}' object has no attribute {thing}\")\n\t\t\telse:\n\t\t\t\treturn result\n\t\telse:\n\t\t\treturn self.__getattr__(thing)", "docstring": "Return a named attribute, or a slice through all the attributes\n\nArgs:\nthing:\t\tif string, return the named attribute\nif slice, np.ndarray or int, return a slice through all the attributes", "source": "juraj-google-style"}
{"code": "def fit(self, X, y):\n        \n        self.X = X\n        self.y = y", "docstring": "Fit\n\nArgs:\nX (np.array): Array of hyperparameter values with shape (n_samples, len(tunables))\ny (np.array): Array of scores with shape (n_samples, )", "source": "juraj-google-style"}
{"code": "def Delete(self, request, global_params=None):\n    config = self.GetMethodConfig('Delete')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Deletes the model specified by modelId from the dataset.\n\nArgs:\nrequest: (BigqueryModelsDeleteRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(BigqueryModelsDeleteResponse) The response message.", "source": "github-repos"}
{"code": "def is_flash_attention_enabled():\n    from keras.src.backend.common import global_state\n    return global_state.get_global_attribute('flash_attention', default=None)", "docstring": "Checks whether flash attention is globally enabled in Keras.\n\nFlash attention is a performance-optimized method for computing attention\nin large models, such as transformers, allowing for faster and more\nmemory-efficient operations. This function checks the global Keras\nconfiguration to determine if flash attention is enabled for compatible\nlayers (e.g., `MultiHeadAttention`).\n\nNote that enabling flash attention does not guarantee it will always be\nused. Typically, the inputs must be in `float16` or `bfloat16` dtype, and\ninput layout requirements may vary depending on the backend.\n\nReturns:\n`False` if disabled; otherwise, it indicates that it is enabled.", "source": "github-repos"}
{"code": "def cancel_all(self, product_id=None):\n    if (product_id is not None):\n        params = {'product_id': product_id}\n    else:\n        params = None\n    return self._send_message('delete', '/orders', params=params)", "docstring": "With best effort, cancel all open orders.\n\nArgs:\nproduct_id (Optional[str]): Only cancel orders for this\nproduct_id\n\nReturns:\nlist: A list of ids of the canceled orders. Example::\n[\n\"144c6f8e-713f-4682-8435-5280fbe8b2b4\",\n\"debe4907-95dc-442f-af3b-cec12f42ebda\",\n\"cf7aceee-7b08-4227-a76c-3858144323ab\",\n\"dfc5ae27-cadb-4c0c-beef-8994936fde8a\",\n\"34fecfbf-de33-4273-b2c6-baf8e8948be4\"\n]", "source": "codesearchnet"}
{"code": "def add_import(self, symbol, source_module_name, source_name, dest_module_name, dest_name):\n    if source_module_name.endswith('python.modules_with_exports'):\n        source_module_name = symbol.__module__\n    import_str = self.format_import(source_module_name, source_name, dest_name)\n    full_api_name = dest_name\n    if dest_module_name:\n        full_api_name = dest_module_name + '.' + full_api_name\n    symbol_id = -1 if not symbol else id(symbol)\n    self._check_already_imported(symbol_id, full_api_name)\n    if not dest_module_name and dest_name.startswith('_'):\n        self._underscore_names_in_root.add(dest_name)\n    priority = 0\n    if symbol:\n        if hasattr(symbol, '__module__'):\n            priority = int(source_module_name == symbol.__module__)\n        if hasattr(symbol, '__name__'):\n            priority += int(source_name == symbol.__name__)\n    self._module_imports[dest_module_name][full_api_name].add((import_str, priority))", "docstring": "Adds this import to module_imports.\n\nArgs:\nsymbol: TensorFlow Python symbol.\nsource_module_name: (string) Module to import from.\nsource_name: (string) Name of the symbol to import.\ndest_module_name: (string) Module name to add import to.\ndest_name: (string) Import the symbol using this name.\n\nRaises:\nSymbolExposedTwiceError: Raised when an import with the same\ndest_name has already been added to dest_module_name.", "source": "github-repos"}
{"code": "def get_list(self, **query_params):\n    list_json = self.get_list_json(self.base_uri, query_params=query_params)\n    return self.create_list(list_json)", "docstring": "Get list information for this card. Returns a List object.\n\nReturns:\nList: The list this card is attached to", "source": "codesearchnet"}
{"code": "def get_sonos_playlist_by_attr(self, attr_name, match):\n    for sonos_playlist in self.get_sonos_playlists():\n        if (getattr(sonos_playlist, attr_name) == match):\n            return sonos_playlist\n    raise ValueError('No match on \"{0}\" for value \"{1}\"'.format(attr_name, match))", "docstring": "Return the first Sonos Playlist DidlPlaylistContainer that\nmatches the attribute specified.\n\nArgs:\nattr_name (str): DidlPlaylistContainer attribute to compare. The\nmost useful being: 'title' and 'item_id'.\nmatch (str): Value to match.\n\nReturns:\n(:class:`~.soco.data_structures.DidlPlaylistContainer`): The\nfirst matching playlist object.\n\nRaises:\n(AttributeError): If indicated attribute name does not exist.\n(ValueError): If a match can not be found.\n\nExample::\n\ndevice.get_sonos_playlist_by_attr('title', 'Foo')\ndevice.get_sonos_playlist_by_attr('item_id', 'SQ:3')", "source": "codesearchnet"}
{"code": "def _ParseCacheEntries(self, parser_mediator, index_table, data_block_files):\n    \n    \n    for cache_address in index_table:\n      cache_address_chain_length = 0\n      while cache_address.value != 0:\n        if cache_address_chain_length >= 64:\n          parser_mediator.ProduceExtractionWarning(\n              'Maximum allowed cache address chain length reached.')\n          break\n\n        data_block_file_object = data_block_files.get(\n            cache_address.filename, None)\n        if not data_block_file_object:\n          message = 'Cache address: 0x{0:08x} missing data file.'.format(\n              cache_address.value)\n          parser_mediator.ProduceExtractionWarning(message)\n          break\n\n        try:\n          cache_entry = self._data_block_file_parser.ParseCacheEntry(\n              data_block_file_object, cache_address.block_offset)\n        except (IOError, errors.ParseError) as exception:\n          parser_mediator.ProduceExtractionWarning(\n              'Unable to parse cache entry with error: {0!s}'.format(\n                  exception))\n          break\n\n        event_data = ChromeCacheEntryEventData()\n        event_data.original_url = cache_entry.original_url\n\n        date_time = dfdatetime_webkit_time.WebKitTime(\n            timestamp=cache_entry.creation_time)\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n        cache_address = cache_entry.next\n        cache_address_chain_length += 1", "docstring": "Parses Chrome Cache file entries.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nindex_table (list[CacheAddress]): the cache addresses which are stored in\nthe index file.\ndata_block_files (dict[str: file]): look up table for the data block\nfile-like object handles.", "source": "juraj-google-style"}
{"code": "def get_storage(request):\n    \n    storage_model = oauth2_settings.storage_model\n    user_property = oauth2_settings.storage_model_user_property\n    credentials_property = oauth2_settings.storage_model_credentials_property\n\n    if storage_model:\n        module_name, class_name = storage_model.rsplit('.', 1)\n        module = importlib.import_module(module_name)\n        storage_model_class = getattr(module, class_name)\n        return storage.DjangoORMStorage(storage_model_class,\n                                        user_property,\n                                        request.user,\n                                        credentials_property)\n    else:\n        \n        return dictionary_storage.DictionaryStorage(\n            request.session, key=_CREDENTIALS_KEY)", "docstring": "Gets a Credentials storage object provided by the Django OAuth2 Helper\nobject.\n\nArgs:\nrequest: Reference to the current request object.\n\nReturns:\nAn :class:`oauth2.client.Storage` object.", "source": "juraj-google-style"}
{"code": "def save(self, resource):\n        \n        resource_type = None\n        xid = None\n        if isinstance(resource, dict):\n            resource_type = resource.get('type')\n            xid = resource.get('xid')\n        else:\n            resource_type = resource.type\n            xid = resource.xid\n\n        if resource_type is not None and xid is not None:\n            saved = True\n            if resource_type in self.tcex.group_types:\n                try:\n                    \n                    self.groups_shelf[xid] = resource\n                except Exception:\n                    saved = False\n\n                if saved:\n                    try:\n                        del self._groups[xid]\n                    except KeyError:\n                        \n                        pass\n            elif resource_type in self.tcex.indicator_types_data.keys():\n                try:\n                    \n                    self.indicators_shelf[xid] = resource\n                except Exception:\n                    saved = False\n\n                if saved:\n                    try:\n                        del self._indicators[xid]\n                    except KeyError:\n                        \n                        pass", "docstring": "Save group|indicator dict or object to shelve.\n\nBest effort to save group/indicator data to disk.  If for any reason the save fails\nthe data will still be accessible from list in memory.\n\nArgs:\nresource (dict|obj): The Group or Indicator dict or object.", "source": "juraj-google-style"}
{"code": "def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None,\n                                scope=None, syntax=None):\n    \n\n    if package:\n      desc_name = '.'.join((package, desc_proto.name))\n    else:\n      desc_name = desc_proto.name\n\n    if file_desc is None:\n      file_name = None\n    else:\n      file_name = file_desc.name\n\n    if scope is None:\n      scope = {}\n\n    nested = [\n        self._ConvertMessageDescriptor(\n            nested, desc_name, file_desc, scope, syntax)\n        for nested in desc_proto.nested_type]\n    enums = [\n        self._ConvertEnumDescriptor(enum, desc_name, file_desc, None, scope)\n        for enum in desc_proto.enum_type]\n    fields = [self._MakeFieldDescriptor(field, desc_name, index)\n              for index, field in enumerate(desc_proto.field)]\n    extensions = [\n        self._MakeFieldDescriptor(extension, desc_name, index,\n                                  is_extension=True)\n        for index, extension in enumerate(desc_proto.extension)]\n    oneofs = [\n        descriptor.OneofDescriptor(desc.name, '.'.join((desc_name, desc.name)),\n                                   index, None, [], desc.options)\n        for index, desc in enumerate(desc_proto.oneof_decl)]\n    extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range]\n    if extension_ranges:\n      is_extendable = True\n    else:\n      is_extendable = False\n    desc = descriptor.Descriptor(\n        name=desc_proto.name,\n        full_name=desc_name,\n        filename=file_name,\n        containing_type=None,\n        fields=fields,\n        oneofs=oneofs,\n        nested_types=nested,\n        enum_types=enums,\n        extensions=extensions,\n        options=_OptionsOrNone(desc_proto),\n        is_extendable=is_extendable,\n        extension_ranges=extension_ranges,\n        file=file_desc,\n        serialized_start=None,\n        serialized_end=None,\n        syntax=syntax)\n    for nested in desc.nested_types:\n      nested.containing_type = desc\n    for enum in desc.enum_types:\n      enum.containing_type = desc\n    for field_index, field_desc in enumerate(desc_proto.field):\n      if field_desc.HasField('oneof_index'):\n        oneof_index = field_desc.oneof_index\n        oneofs[oneof_index].fields.append(fields[field_index])\n        fields[field_index].containing_oneof = oneofs[oneof_index]\n\n    scope[_PrefixWithDot(desc_name)] = desc\n    self._descriptors[desc_name] = desc\n    return desc", "docstring": "Adds the proto to the pool in the specified package.\n\nArgs:\ndesc_proto: The descriptor_pb2.DescriptorProto protobuf message.\npackage: The package the proto should be located in.\nfile_desc: The file containing this message.\nscope: Dict mapping short and full symbols to message and enum types.\nsyntax: string indicating syntax of the file (\"proto2\" or \"proto3\")\n\nReturns:\nThe added descriptor.", "source": "juraj-google-style"}
{"code": "def build_url(self):\n    url = '{protocol}/{url}/{rest}/{version}/{restapi}/{rscpath}/{query}'.format(protocol=self.schema.protocol, url=self.schema.main_url, rest=self.schema.rest, version=self.schema.version, restapi=self.schema.restApi, rscpath=self.schema.resourcePath, query=self.schema.query)\n    return url.replace('/None/', '/')", "docstring": "Builds the URL for elevations API services based on the data given\nby the user.\n\nReturns:\nurl (str): URL for the elevations API services", "source": "codesearchnet"}
{"code": "def _check_sleep(self, op):\n    delay = 0.3\n    start_t = time.time()\n    func = tf.function(lambda: op(delay))\n    results = self.evaluate(func())\n    end_t = time.time()\n    delta_t = end_t - start_t\n    self.assertEqual(results.shape, tuple())\n    self.assertGreater(delta_t, 0.9 * delay)", "docstring": "Check that one sleep op works in isolation.\n\nSee sleep_bin.py for an example of how the synchronous and asynchronous\nsleep ops differ in behavior.\n\nArgs:\nop: The sleep op, either sleep_op.SyncSleep or sleep_op.AsyncSleep.", "source": "github-repos"}
{"code": "def setOutBoundLinkQuality(self, LinkQuality):\n        \n        print '%s call setOutBoundLinkQuality' % self.port\n        print LinkQuality\n        try:\n            cmd = 'macfilter rss add-lqi * %s' % str(LinkQuality)\n            print cmd\n            return self.__sendCommand(cmd)[0] == 'Done'\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger(\"setOutBoundLinkQuality() Error: \" + str(e))", "docstring": "set custom LinkQualityIn for all receiving messages from the any address\n\nArgs:\nLinkQuality: a given custom link quality\nlink quality/link margin mapping table\n3: 21 - 255 (dB)\n2: 11 - 20 (dB)\n1: 3 - 9 (dB)\n0: 0 - 2 (dB)\n\nReturns:\nTrue: successful to set the link quality\nFalse: fail to set the link quality", "source": "juraj-google-style"}
{"code": "def get_processed_events(self) -> List[Event]:\n    event_ids = DB.get_list(self._processed_key)\n    events = []\n    for event_id in event_ids:\n        event_str = DB.get_hash_value(self._data_key, event_id)\n        event_dict = ast.literal_eval(event_str)\n        event_dict['id'] = event_id\n        event_dict['subscriber'] = self._subscriber\n        events.append(Event.from_config(event_dict))\n    return events", "docstring": "Get all processed events.\n\nThis method is intended to be used to recover events stuck in the\nprocessed state which could happen if an event handling processing\nan processed event goes down before completing the event processing.\n\nReturns:\nlist[Events], list of event objects.", "source": "codesearchnet"}
{"code": "def read_xyz(cls, buf, start_index=0, get_bonds=True,\n                 nrows=None, engine=None):\n        \n        frame = pd.read_table(buf, skiprows=2, comment='\n                              nrows=nrows,\n                              delim_whitespace=True,\n                              names=['atom', 'x', 'y', 'z'], engine=engine)\n\n        remove_digits = partial(re.sub, r'[0-9]+', '')\n        frame['atom'] = frame['atom'].apply(remove_digits)\n\n        molecule = cls(frame)\n        molecule.index = range(start_index, start_index + len(molecule))\n\n        if get_bonds:\n            molecule.get_bonds(use_lookup=False, set_lookup=True)\n        return molecule", "docstring": "Read a file of coordinate information.\n\nReads xyz-files.\n\nArgs:\ninputfile (str):\nstart_index (int):\nget_bonds (bool):\nnrows (int): Number of rows of file to read.\nNote that the first two rows are implicitly excluded.\nengine (str): Wrapper for argument of :func:`pandas.read_csv`.\n\nReturns:\nCartesian:", "source": "juraj-google-style"}
{"code": "def read_windows_environ():\n    res = winapi.GetEnvironmentStringsW()\n    if (not res):\n        raise ctypes.WinError()\n    res = ctypes.cast(res, ctypes.POINTER(ctypes.c_wchar))\n    done = []\n    current = u''\n    i = 0\n    while 1:\n        c = res[i]\n        i += 1\n        if (c == u'\\x00'):\n            if (not current):\n                break\n            done.append(current)\n            current = u''\n            continue\n        current += c\n    dict_ = {}\n    for entry in done:\n        try:\n            (key, value) = entry.split(u'=', 1)\n        except ValueError:\n            continue\n        key = _norm_key(key)\n        dict_[key] = value\n    status = winapi.FreeEnvironmentStringsW(res)\n    if (status == 0):\n        raise ctypes.WinError()\n    return dict_", "docstring": "Returns a unicode dict of the Windows environment.\n\nRaises:\nWindowsEnvironError", "source": "codesearchnet"}
{"code": "def convert_upsample(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting upsample...')\n\n    if params['mode'] != 'nearest':\n        raise AssertionError('Cannot convert non-nearest upsampling')\n\n    if names == 'short':\n        tf_name = 'UPSL' + random_string(4)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    if 'height_scale' in params:\n        scale = (params['height_scale'], params['width_scale'])\n    elif len(inputs) == 2:\n        scale = layers[inputs[-1] + '_np'][-2:]\n\n    upsampling = keras.layers.UpSampling2D(\n        size=scale, name=tf_name\n    )\n    layers[scope_name] = upsampling(layers[inputs[0]])", "docstring": "Convert nearest upsampling layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def GetMessages(self, formatter_mediator, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    event_values = event.CopyToDict()\n\n    login_type = event_values.get('type', None)\n    if login_type is None:\n      status = 'N/A'\n    else:\n      status = self._STATUS_TYPES.get(login_type, 'UNKNOWN')\n\n    event_values['status'] = status\n\n    return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def average(numbers, numtype='float'):\n    \n    if type == 'decimal':\n        return Decimal(sum(numbers)) / len(numbers)\n    else:\n        return float(sum(numbers)) / len(numbers)", "docstring": "Calculates the average or mean of a list of numbers\n\nArgs:\nnumbers: a list of integers or floating point numbers.\n\nnumtype: string, 'decimal' or 'float'; the type of number to return.\n\nReturns:\nThe average (mean) of the numbers as a floating point number\nor a Decimal object.\n\nRequires:\nThe math module", "source": "juraj-google-style"}
{"code": "def _compile_pvariable_expression(self, expr: Expression, scope: Dict[(str, TensorFluent)], batch_size: Optional[int]=None, noise: Optional[List[tf.Tensor]]=None) -> TensorFluent:\n    etype = expr.etype\n    args = expr.args\n    name = expr._pvar_to_name(args)\n    if (name not in scope):\n        raise ValueError('Variable {} not in scope.'.format(name))\n    fluent = scope[name]\n    scope = (args[1] if (args[1] is not None) else [])\n    if isinstance(fluent, TensorFluent):\n        fluent = TensorFluent(fluent.tensor, scope, batch=fluent.batch)\n    elif isinstance(fluent, tf.Tensor):\n        fluent = TensorFluent(fluent, scope, batch=self.batch_mode)\n    else:\n        raise ValueError('Variable in scope must be TensorFluent-like: {}'.format(fluent))\n    return fluent", "docstring": "Compile a pvariable expression `expr` into a TensorFluent\nin the given `scope` with optional batch size.\n\nArgs:\nexpr (:obj:`rddl2tf.expr.Expression`): A RDDL pvariable expression.\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.\nbatch_size (Optional[size]): The batch size.\n\nReturns:\n:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.", "source": "codesearchnet"}
{"code": "def preview(self, n=10, k='items', kheader='displayLink', klink='link', kdescription='snippet'):\n    \n    if 'searchType' in self.cseargs:\n      searchType = self.cseargs['searchType']\n    else:\n      searchType = None\n    items = self.metadata[k]\n  \n    \n    for i, kv in enumerate(items[:n]):\n      if 'start' in self.cseargs:\n        i += int(self.cseargs['start'])\n      \n      \n      header = '\\n[' + str(i) + '] ' + kv[kheader]\n      print(header)\n      print('=' * len(header))\n      \n      \n      if searchType == 'image':\n        link = '\\n' + path.basename(kv[klink])\n        print(link)\n        \n      \n      description = '\\n' + kv[kdescription]\n      print(description)", "docstring": "Print a preview of the search results.\n\nArgs:\nn (int):\nMaximum number of search results to preview\nk (str):\nKey in :class:`api.results`.metadata to preview\nkheader (str):\nKey in :class:`api.results`.metadata[``k``] to use as the header\nklink (str):\nKey in :class:`api.results`.metadata[``k``] to use as the link if image search\nkdescription (str):\nKey in :class:`api.results`.metadata[``k``] to use as the description", "source": "juraj-google-style"}
{"code": "def _full_shape_filter(t: List, shapes: List) -> bool:\n        \n\n        if shapes:\n            for a_token in t:\n                if a_token._.full_shape not in shapes:\n                    return False\n\n        return True", "docstring": "Shape filter\nArgs:\nt: List, list of tokens\nshapes: List\n\nReturns: bool", "source": "juraj-google-style"}
{"code": "def isbase(path1, path2):\n    _path1 = forcedir(abspath(path1))\n    _path2 = forcedir(abspath(path2))\n    return _path2.startswith(_path1)", "docstring": "Check if ``path1`` is a base of ``path2``.\n\nArguments:\npath1 (str): A PyFilesytem path.\npath2 (str): A PyFilesytem path.\n\nReturns:\nbool: `True` if ``path2`` starts with ``path1``\n\nExample:\n>>> isbase('foo/bar', 'foo/bar/baz/egg.txt')\nTrue", "source": "codesearchnet"}
{"code": "def _is_quantized_input_stats_required(conversion_flags: _conversion_flags_pb2.ConverterFlags) -> bool:\n    quantized_inference_types = [_types_pb2.QUANTIZED_UINT8, _types_pb2.QUANTIZED_INT8]\n    return (conversion_flags.inference_type in quantized_inference_types or conversion_flags.inference_input_type in quantized_inference_types) and (not conversion_flags.post_training_quantize)", "docstring": "Checks if the `quantized_input_stats` flag is required for conversion.\n\nArgs:\nconversion_flags: A protocol buffer describing the conversion process.\n\nReturns:\nTrue, if the `inference_type` or the `inference_input_type` is a quantized\ntype and it is not post training quantization, else False.", "source": "github-repos"}
{"code": "def set_default(self, default: Any, use_default_apply: bool=True, root_path: Optional[utils.KeyPath]=None) -> 'ValueSpec':", "docstring": "Sets the default value and returns `self`.\n\nArgs:\ndefault: Default value.\nuse_default_apply: If True, invoke `apply` to the value, otherwise use\ndefault value as is.\nroot_path: (Optional) The path of the field.\n\nReturns:\nValueSpec itself.\n\nRaises:\nValueError: If default value cannot be applied when use_default_apply\nis set to True.", "source": "github-repos"}
{"code": "def load_data(self, data, datatype='ttl', namespace=None, graph=None, is_file=False, **kwargs):\n    log.setLevel(kwargs.get('log_level', self.log_level))\n    time_start = datetime.datetime.now()\n    datatype_map = {'ttl': 'text/turtle', 'xml': 'application/rdf+xml', 'rdf': 'application/rdf+xml', 'nt': 'text/plain'}\n    if is_file:\n        datatype = data.split(os.path.extsep)[(- 1)]\n        file_name = data\n        log.debug('starting data load of %s', file_name)\n        data = open(data, 'rb').read()\n    else:\n        try:\n            data = data.encode('utf-8')\n        except AttributeError:\n            pass\n    try:\n        content_type = datatype_map[datatype]\n    except KeyError:\n        raise NotImplementedError(\"'%s' is not an implemented data format\", datatype)\n    context_uri = pick(graph, self.graph)\n    result = requests.post(url=self._make_url(namespace), headers={'Content-Type': content_type}, params={'context-uri': context_uri}, data=data)\n    if (result.status_code == 200):\n        if is_file:\n            log.info(' loaded %s into blazegraph - %s', file_name, self.format_response(result.text))\n        else:\n            log.info(' loaded data - %s', self.format_response(result.text))\n        log.setLevel(self.log_level)\n        return result\n    else:\n        raise SyntaxError(result.text)", "docstring": "Loads data via file stream from python to triplestore\n\nArgs:\n-----\ndata: The data or filepath to load\ndatatype(['ttl', 'xml', 'rdf']): the type of data to load\nnamespace: the namespace to use\ngraph: the graph to load the data to.\nis_file(False): If true python will read the data argument as a\nfilepath, determine the datatype from the file extension,\nread the file and send it to blazegraph as a datastream", "source": "codesearchnet"}
{"code": "def group(text, size):\n    if (size <= 0):\n        raise ValueError('n must be a positive integer')\n    return [text[i:(i + size)] for i in range(0, len(text), size)]", "docstring": "Group ``text`` into blocks of ``size``.\n\nExample:\n>>> group(\"test\", 2)\n['te', 'st']\n\nArgs:\ntext (str): text to separate\nsize (int): size of groups to split the text into\n\nReturns:\nList of n-sized groups of text\n\nRaises:\nValueError: If n is non positive", "source": "codesearchnet"}
{"code": "def coupling(self, source_y, target_y, weight):\n    return ((np.ones_like(target_y) * np.mean(source_y)) * weight)", "docstring": "How to couple the output of one subsystem to the input of another.\n\nThis is a fallback default coupling function that should usually be\nreplaced with your own.\n\nThis example coupling function takes the mean of all variables of the\nsource subsystem and uses that value weighted by the connection\nstrength to drive all variables of the target subsystem.\n\nArguments:\nsource_y (array of shape (d,)): State of the source subsystem.\ntarget_y (array of shape (d,)): State of target subsystem.\nweight (float): the connection strength for this connection.\n\nReturns:\ninput (array of shape (d,)): Values to drive each variable of the\ntarget system.", "source": "codesearchnet"}
{"code": "def coalescence_waiting_times(self, backward=True):\n        \n        if not isinstance(backward, bool):\n            raise TypeError(\"backward must be a bool\")\n        times = list(); lowest_leaf_dist = float('-inf')\n        for n,d in self.distances_from_root():\n            if len(n.children) > 1:\n                times.append(d)\n            elif len(n.children) == 0 and d > lowest_leaf_dist:\n                lowest_leaf_dist = d\n        times.append(lowest_leaf_dist)\n        times.sort(reverse=backward)\n        for i in range(len(times)-1):\n            yield abs(times[i]-times[i+1])", "docstring": "Generator over the waiting times of successive coalescence events\n\nArgs:\n``backward`` (``bool``): ``True`` to go backward in time (i.e., leaves to root), otherwise ``False``", "source": "juraj-google-style"}
{"code": "def decode_payload(cls, request):\n    if (request.headers.get(cls.PAYLOAD_VERSION_HEADER) != cls.PAYLOAD_VERSION):\n        raise DeprecationWarning('Task is generated by an older incompatible version of mapreduce. Please kill this job manually')\n    return cls._decode_payload(request.body)", "docstring": "Decode task payload.\n\nHugeTask controls its own payload entirely including urlencoding.\nIt doesn't depend on any particular web framework.\n\nArgs:\nrequest: a webapp Request instance.\n\nReturns:\nA dict of str to str. The same as the params argument to __init__.\n\nRaises:\nDeprecationWarning: When task payload constructed from an older\nincompatible version of mapreduce.", "source": "codesearchnet"}
{"code": "def _transform_indices(self, key):\n        \n        ndims = self.ndims\n        if all(not (isinstance(el, slice) or callable(el)) for el in key):\n            dim_inds = []\n            for dim in self.kdims:\n                dim_type = self.get_dimension_type(dim)\n                if isinstance(dim_type, type) and issubclass(dim_type, Number):\n                    dim_inds.append(self.get_dimension_index(dim))\n            str_keys = iter(key[i] for i in range(self.ndims)\n                            if i not in dim_inds)\n            num_keys = []\n            if len(dim_inds):\n                keys = list({tuple(k[i] if ndims > 1 else k for i in dim_inds)\n                             for k in self.keys()})\n                q = np.array([tuple(key[i] if ndims > 1 else key for i in dim_inds)])\n                idx = np.argmin([np.inner(q - np.array(x), q - np.array(x))\n                                 if len(dim_inds) == 2 else np.abs(q-x)\n                                     for x in keys])\n                num_keys = iter(keys[idx])\n            key = tuple(next(num_keys) if i in dim_inds else next(str_keys)\n                        for i in range(self.ndims))\n        elif any(not (isinstance(el, slice) or callable(el)) for el in key):\n            keys = self.keys()\n            for i, k in enumerate(key):\n                if isinstance(k, slice):\n                    continue\n                dim_keys = np.array([ke[i] for ke in keys])\n                if dim_keys.dtype.kind in 'OSU':\n                    continue\n                snapped_val = dim_keys[np.argmin(np.abs(dim_keys-k))]\n                key = list(key)\n                key[i] = snapped_val\n            key = tuple(key)\n        return key", "docstring": "Snaps indices into the GridSpace to the closest coordinate.\n\nArgs:\nkey: Tuple index into the GridSpace\n\nReturns:\nTransformed key snapped to closest numeric coordinates", "source": "juraj-google-style"}
{"code": "def serialize_to_list(self, name, datas):\n    items = datas.get('items', None)\n    splitter = datas.get('splitter', self._DEFAULT_SPLITTER)\n    if (items is None):\n        msg = \"List reference '{}' lacks of required 'items' variable or is empty\"\n        raise SerializerError(msg.format(name))\n    else:\n        items = self.value_splitter(name, 'items', items, mode=splitter)\n    return items", "docstring": "Serialize given datas to a list structure.\n\nList structure is very simple and only require a variable ``--items``\nwhich is a string of values separated with an empty space. Every other\nproperties are ignored.\n\nArguments:\nname (string): Name only used inside possible exception message.\ndatas (dict): Datas to serialize.\n\nReturns:\nlist: List of serialized reference datas.", "source": "codesearchnet"}
{"code": "def get_load_balancer(self, id):\n    return LoadBalancer.get_object(api_token=self.token, id=id)", "docstring": "Returns a Load Balancer object by its ID.\n\nArgs:\nid (str): Load Balancer ID", "source": "codesearchnet"}
{"code": "def from_sub_model_configs(cls, text_config: ClvpEncoderConfig, speech_config: ClvpEncoderConfig, decoder_config: ClvpDecoderConfig, **kwargs):\n    return cls(text_config=text_config.to_dict(), speech_config=speech_config.to_dict(), decoder_config=decoder_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`ClvpConfig`] (or a derived class) from CLVP text model configuration, CLVP speech model\nconfiguration and CLVP decoder model configuration.\n\nArgs:\ntext_config (`ClvpEncoderConfig`):\nText model configuration of type [`ClvpEncoderConfig`].\nspeech_config (`ClvpEncoderConfig`):\nSpeech model configuration of type [`ClvpEncoderConfig`].\ndecoder_config (`ClvpDecoderConfig`):\nDecoder model configuration of type [`ClvpDecoderConfig`].\n\nReturns:\n[`ClvpConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def pop(stack, op_id):\n  \n  if __debug__:\n    pushed_value, pushed_op_id = stack.pop()\n    assert pushed_op_id == op_id, 'Wanted %s, got %s' % (op_id, pushed_op_id)\n  else:\n    pushed_value = stack.pop()\n  return pushed_value", "docstring": "Pop a value from the stack (i.e. read it from the tape).\n\nArgs:\nstack: The stack to pop from.\nop_id: A unique variable that is also passed into the matching push.\nAllows optimization passes to track pairs of pushes and pops.\n\nReturns:\nThe last value.", "source": "juraj-google-style"}
{"code": "def __init__(self, graph, canonical_device=None):\n    \n    self._graph = graph\n    self.canonical_device = canonical_device\n    self._operations = self._initialize_operations()\n    self._operation_name_to_id = self._initialize_operation_name_to_id()\n    self._tensor_name_to_ids = self._initialize_tensor_name_to_ids()\n    self._final_tensors = set()", "docstring": "Initializer.\n\nArgs:\ngraph: either a tf.Graph or mtf.Graph.\ncanonical_device: optional string, the name of the canonical device for\nIsTensoronCanonicalDevice.", "source": "juraj-google-style"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    for subkey in registry_key.GetSubkeys():\n      values_dict = {}\n      values_dict['subkey_name'] = subkey.name\n\n      vendor_identification = None\n      product_identification = None\n      try:\n        subkey_name_parts = subkey.name.split('&')\n        if len(subkey_name_parts) >= 2:\n          vendor_identification = subkey_name_parts[0]\n          product_identification = subkey_name_parts[1]\n      except ValueError as exception:\n        logger.warning(\n            'Unable to split string: {0:s} with error: {1!s}'.format(\n                subkey.name, exception))\n\n      if vendor_identification and product_identification:\n        values_dict['vendor'] = vendor_identification\n        values_dict['product'] = product_identification\n\n      for devicekey in subkey.GetSubkeys():\n        values_dict['serial'] = devicekey.name\n\n        event_data = windows_events.WindowsRegistryEventData()\n        event_data.key_path = registry_key.path\n        event_data.offset = registry_key.offset\n        event_data.regvalue = values_dict\n        event_data.source_append = self._SOURCE_APPEND\n\n        \n        event = time_events.DateTimeValuesEvent(\n            devicekey.last_written_time,\n            definitions.TIME_DESCRIPTION_LAST_CONNECTED)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def remove(path, force=False):\n    path = os.path.expanduser(path)\n    if (not os.path.isabs(path)):\n        raise SaltInvocationError('File path must be absolute: {0}'.format(path))\n    if ((not os.path.exists(path)) and (not is_link(path))):\n        raise CommandExecutionError('Path not found: {0}'.format(path))\n    if force:\n        file_attributes = win32api.GetFileAttributes(path)\n        win32api.SetFileAttributes(path, win32con.FILE_ATTRIBUTE_NORMAL)\n    try:\n        if os.path.isfile(path):\n            os.remove(path)\n        elif is_link(path):\n            os.rmdir(path)\n        else:\n            for name in os.listdir(path):\n                item = '{0}\\\\{1}'.format(path, name)\n                remove(item, force)\n            os.rmdir(path)\n    except (OSError, IOError) as exc:\n        if force:\n            win32api.SetFileAttributes(path, file_attributes)\n        raise CommandExecutionError(\"Could not remove '{0}': {1}\".format(path, exc))\n    return True", "docstring": "Remove the named file or directory\n\nArgs:\npath (str): The path to the file or directory to remove.\nforce (bool): Remove even if marked Read-Only. Default is False\n\nReturns:\nbool: True if successful, False if unsuccessful\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' file.remove C:\\\\Temp", "source": "codesearchnet"}
{"code": "def to_proto(self, export_scope=None):\n    if export_scope is None or self.name.startswith(export_scope):\n        context_def = control_flow_pb2.WhileContextDef()\n        context_def.context_name = ops.strip_name_scope(self.name, export_scope)\n        context_def.parallel_iterations = self._parallel_iterations\n        if self._maximum_iterations is not None:\n            context_def.maximum_iterations_name = ops.strip_name_scope(self._maximum_iterations.name, export_scope)\n        context_def.back_prop = self._back_prop\n        context_def.swap_memory = self._swap_memory\n        context_def.pivot_for_pred_name = ops.strip_name_scope(self._pivot_for_pred.name, export_scope)\n        context_def.pivot_for_body_name = ops.strip_name_scope(self._pivot_for_body.name, export_scope)\n        context_def.pivot_name = ops.strip_name_scope(self._pivot.name, export_scope)\n        context_def.loop_exit_names.extend([ops.strip_name_scope(l.name, export_scope) for l in self._loop_exits])\n        context_def.loop_enter_names.extend([ops.strip_name_scope(l.name, export_scope) for l in self._loop_enters])\n        context_def.values_def.MergeFrom(super(WhileContext, self)._to_values_def(export_scope=export_scope))\n        for nested in self._nested_contexts:\n            nested_def = context_def.nested_contexts.add()\n            nested.to_control_flow_context_def(nested_def)\n        return context_def\n    else:\n        return None", "docstring": "Converts a `WhileContext` to a `WhileContextDef` protocol buffer.\n\nArgs:\nexport_scope: Optional `string`. Name scope to remove.\n\nReturns:\nA `WhileContextDef` protocol buffer.", "source": "github-repos"}
{"code": "def __init__(self, scope, parent, id, name, result):\n        \n        CodeEntity.__init__(self, scope, parent)\n        self.id = id\n        self.name = name\n        self.result = result\n        self.value = None\n        self.member_of = None\n        self.references = []\n        self.writes = []", "docstring": "Constructor for variables.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.\nid: An unique identifier for this variable.\nname (str): The name of the variable in the program.\nresult (str): The type of the variable in the program.", "source": "juraj-google-style"}
{"code": "def InsertAll(self, request, global_params=None):\n    config = self.GetMethodConfig('InsertAll')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Streams data into BigQuery one record at a time without needing to run a load job. Requires the WRITER dataset role.\n\nArgs:\nrequest: (BigqueryTabledataInsertAllRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(TableDataInsertAllResponse) The response message.", "source": "github-repos"}
{"code": "def refactor_tree(self, tree, name):\n        \n\n        for fixer in chain(self.pre_order, self.post_order):\n            fixer.start_tree(tree, name)\n\n        \n        self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())\n        self.traverse_by(self.bmi_post_order_heads, tree.post_order())\n\n        \n        match_set = self.BM.run(tree.leaves())\n\n        while any(match_set.values()):\n            for fixer in self.BM.fixers:\n                if fixer in match_set and match_set[fixer]:\n                    \n                    match_set[fixer].sort(key=pytree.Base.depth, reverse=True)\n\n                    if fixer.keep_line_order:\n                        \n                        \n                        match_set[fixer].sort(key=pytree.Base.get_lineno)\n\n                    for node in list(match_set[fixer]):\n                        if node in match_set[fixer]:\n                            match_set[fixer].remove(node)\n\n                        try:\n                            find_root(node)\n                        except ValueError:\n                            \n                            \n                            continue\n\n                        if node.fixers_applied and fixer in node.fixers_applied:\n                            \n                            continue\n\n                        results = fixer.match(node)\n\n                        if results:\n                            new = fixer.transform(node, results)\n                            if new is not None:\n                                node.replace(new)\n                                \n                                for node in new.post_order():\n                                    \n                                    \n                                    if not node.fixers_applied:\n                                        node.fixers_applied = []\n                                    node.fixers_applied.append(fixer)\n\n                                \n                                \n                                new_matches = self.BM.run(new.leaves())\n                                for fxr in new_matches:\n                                    if not fxr in match_set:\n                                        match_set[fxr]=[]\n\n                                    match_set[fxr].extend(new_matches[fxr])\n\n        for fixer in chain(self.pre_order, self.post_order):\n            fixer.finish_tree(tree, name)\n        return tree.was_changed", "docstring": "Refactors a parse tree (modifying the tree in place).\n\nFor compatible patterns the bottom matcher module is\nused. Otherwise the tree is traversed node-to-node for\nmatches.\n\nArgs:\ntree: a pytree.Node instance representing the root of the tree\nto be refactored.\nname: a human-readable name for this tree.\n\nReturns:\nTrue if the tree was modified, False otherwise.", "source": "juraj-google-style"}
{"code": "def Append(self, new_values):\n        \n        newrow = self.NewRow()\n        newrow.values = new_values\n        self._table.append(newrow)", "docstring": "Adds a new row (list) to the table.\n\nArgs:\nnew_values: Tuple, dict, or Row() of new values to append as a row.\n\nRaises:\nTableError: Supplied tuple not equal to table width.", "source": "juraj-google-style"}
{"code": "def _signal_handler(self, signal_interupt, frame):  \n        \n        if self.container is not None:\n            print('{}{}Stopping docker container.'.format(c.Style.BRIGHT, c.Fore.YELLOW))\n            self.container.stop()\n        print('{}{}Interrupt signal received.'.format(c.Style.BRIGHT, c.Fore.RED))\n        self.log.error('tcrun received an interrupt signal and will now exit.')\n        sys.exit(1)", "docstring": "Handle singal interrupt.\n\nArgs:\nsignal_interupt ([type]): [Description]\nframe ([type]): [Description]", "source": "juraj-google-style"}
{"code": "def open_sequence(path: Union[str, os.PathLike[str]], mode: str='r', *, perms: Optional[int]=436, serializer: Optional[Callable[[Any], Union[bytes, str]]]=None, deserializer: Optional[Callable[[Union[bytes, str]], Any]]=None, make_dirs_if_not_exist: bool=True) -> Sequence:\n    if 'w' in mode or 'a' in mode:\n        parent_dir = os.path.dirname(path)\n        if make_dirs_if_not_exist:\n            file_system.mkdirs(parent_dir, exist_ok=True)\n    return _registry.get(path).open(path, mode, perms=perms, serializer=serializer, deserializer=deserializer)", "docstring": "Open sequence for reading or writing.\n\nArgs:\npath: The path to the sequence.\nmode: The mode of the sequence.\nperms: (Optional) The permissions of the sequence.\nserializer: (Optional) A serializer function for converting a structured\nobject to a string or bytes.\ndeserializer: (Optional) A deserializer function for converting a string or\nbytes to a structured object.\nmake_dirs_if_not_exist: (Optional) Whether to create the directories\nif they do not exist. Applicable when opening in write or append mode.\n\nReturns:\nA sequence for reading or writing.", "source": "github-repos"}
{"code": "def build_input_pipeline(x, y, batch_size):\n    training_dataset = tf.data.Dataset.from_tensor_slices((x, y))\n    training_batches = training_dataset.repeat().batch(batch_size)\n    training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches)\n    (batch_features, batch_labels) = training_iterator.get_next()\n    return (batch_features, batch_labels)", "docstring": "Build a Dataset iterator for supervised classification.\n\nArgs:\nx: Numpy `array` of features, indexed by the first dimension.\ny: Numpy `array` of labels, with the same first dimension as `x`.\nbatch_size: Number of elements in each training batch.\n\nReturns:\nbatch_features: `Tensor` feed  features, of shape\n`[batch_size] + x.shape[1:]`.\nbatch_labels: `Tensor` feed of labels, of shape\n`[batch_size] + y.shape[1:]`.", "source": "codesearchnet"}
{"code": "def gene_by_alias(self, symbol, build='37'):\n        \n        res = self.hgnc_collection.find({'hgnc_symbol': symbol, 'build':build})\n        if res.count() == 0:\n            res = self.hgnc_collection.find({'aliases': symbol, 'build':build})\n\n        return res", "docstring": "Return a iterable with hgnc_genes.\n\nIf the gene symbol is listed as primary the iterable will only have\none result. If not the iterable will include all hgnc genes that have\nthe symbol as an alias.\n\nArgs:\nsymbol(str)\nbuild(str)\n\nReturns:\nres(pymongo.Cursor(dict))", "source": "juraj-google-style"}
{"code": "def read(url, encoding=None, cache=None, mode='rb'):\n    with read_handle(url, cache, mode=mode) as handle:\n        data = handle.read()\n    if encoding:\n        data = data.decode(encoding)\n    return data", "docstring": "Read from any URL.\n\nInternally differentiates between URLs supported by tf.gfile, such as URLs\nwith the Google Cloud Storage scheme ('gs://...') or local paths, and HTTP\nURLs. This way users don't need to know about the underlying fetch mechanism.\n\nArgs:\nurl: a URL including scheme or a local path\nmode: mode in which to open the file. defaults to binary ('rb')\nencoding: if specified, encoding that should be used to decode read data\nif mode is specified to be text ('r'), this defaults to 'utf-8'.\ncache: whether to attempt caching the resource. Defaults to True only if\nthe given URL specifies a remote resource.\nReturns:\nAll bytes form the specified resource, or a decoded string of those.", "source": "codesearchnet"}
{"code": "def unmanaged_devices(self):\n    if (not self.__unmanaged_devices):\n        self.__unmanaged_devices = UnmanagedDevices(self.__connection)\n    return self.__unmanaged_devices", "docstring": "Gets the Unmanaged Devices API client.\n\nReturns:\nUnmanagedDevices:", "source": "codesearchnet"}
{"code": "def percentile_nearest(self, percentile):\n    \n\n    if self._input_csv_files:\n      df = self._get_data_from_csv_files()\n      if 'target' not in df or 'predicted' not in df:\n        raise ValueError('Cannot find \"target\" or \"predicted\" column')\n\n      df = df[['target', 'predicted']].apply(pd.to_numeric)\n      abs_errors = np.array((df['target'] - df['predicted']).apply(abs))\n      return np.percentile(abs_errors, percentile, interpolation='nearest')\n    elif self._bigquery:\n      query = bq.Query( % (float(percentile) / 100, self._bigquery))\n      df = self._get_data_from_bigquery([query])\n      if df.empty:\n        return None\n      return df['percentile'][0]", "docstring": "Get nearest percentile from regression model evaluation results.\n\nArgs:\npercentile: a 0~100 float number.\n\nReturns:\nthe percentile float number.\n\nRaises:\nException if the CSV headers do not include 'target' or 'predicted', or BigQuery\ndoes not return 'target' or 'predicted' column, or if target or predicted is not\nnumber.", "source": "juraj-google-style"}
{"code": "def get_duration_h_m(start: Union[str, DateTime],\n                     end: Union[str, DateTime],\n                     default: str = \"N/A\") -> str:\n    \n    start = coerce_to_pendulum(start)\n    end = coerce_to_pendulum(end)\n    if start is None or end is None:\n        return default\n    duration = end - start\n    minutes = duration.in_minutes()\n    (hours, minutes) = divmod(minutes, 60)\n    if hours < 0:\n        \n        \n        \n        hours += 1\n        minutes = 60 - minutes\n        return \"-{}:{}\".format(hours, \"00\" if minutes == 0 else minutes)\n    else:\n        return \"{}:{}\".format(hours, \"00\" if minutes == 0 else minutes)", "docstring": "Calculate the time between two dates/times expressed as strings.\n\nArgs:\nstart: start date/time\nend: end date/time\ndefault: string value to return in case either of the inputs is\n``None``\n\nReturns:\na string that is one of\n\n.. code-block:\n\n'hh:mm'\n'-hh:mm'\ndefault", "source": "juraj-google-style"}
{"code": "def kmip_version(self, value):\n        \n        if isinstance(value, enums.KMIPVersion):\n            self.proxy.kmip_version = value\n        else:\n            raise ValueError(\"KMIP version must be a KMIPVersion enumeration\")", "docstring": "Set the KMIP version for the client.\n\nArgs:\nvalue (KMIPVersion): A KMIPVersion enumeration\n\nReturn:\nNone\n\nRaises:\nValueError: if value is not a KMIPVersion enumeration\n\nExample:\n>>> client.kmip_version = enums.KMIPVersion.KMIP_1_1\n>>>", "source": "juraj-google-style"}
{"code": "def createCategoryFilter(self, filterName, positiveExamples, negativeExamples=[]):\n        \n        samples = {\"positiveExamples\": [{\"text\": s} for s in positiveExamples],\n                   \"negativeExamples\": [{\"text\": s} for s in negativeExamples]}\n        body = json.dumps(samples)\n        return self._classify.createCategoryFilter(self._retina, filterName, body)", "docstring": "Get a classifier filter (fingerprint) for positive and negative text samples\nArgs:\nfilterName, str: A unique name for the filter. (required)\npositiveExamples, list(str): The list of positive example texts. (required)\nnegativeExamples, list(str): The list of negative example texts. (optional)\nReturns:\nCategoryFilter\nRaises:\nCorticalioException: if the request was not successful", "source": "juraj-google-style"}
{"code": "def expand_abbreviations(self, text):\n        \n        if not self.abbreviations:\n            raise LexiconError(\"No abbreviations in lexicon.\")\n\n        def chunks(data, SIZE=25):\n            \n            it = iter(data)\n            for i in range(0, len(data), SIZE):\n                yield {k: data[k] for k in islice(it, SIZE)}\n\n        def cb(g):\n            \n            return self.abbreviations.get(g.group(0)) or g.group(0)\n\n        \n\n        \n        \n        text = re.sub(r'w/', r'wi', text)\n\n        \n        for subdict in chunks(self.abbreviations):\n            regex = r'(\\b' + r'\\b)|(\\b'.join(subdict.keys()) + r'\\b)'\n            text = re.sub(regex, cb, text)\n\n        return text", "docstring": "Parse a piece of text and replace any abbreviations with their full\nword equivalents. Uses the lexicon.abbreviations dictionary to find\nabbreviations.\n\nArgs:\ntext (str): The text to parse.\n\nReturns:\nstr: The text with abbreviations replaced.", "source": "juraj-google-style"}
{"code": "def pubsub_pop_message(self, deadline=None):\n    if (not self.subscribed):\n        excep = ClientError('you must subscribe before using pubsub_pop_message')\n        raise tornado.gen.Return(excep)\n    reply = None\n    try:\n        reply = self._reply_list.pop(0)\n        raise tornado.gen.Return(reply)\n    except IndexError:\n        pass\n    if (deadline is not None):\n        td = timedelta(seconds=deadline)\n        (yield self._condition.wait(timeout=td))\n    else:\n        (yield self._condition.wait())\n    try:\n        reply = self._reply_list.pop(0)\n    except IndexError:\n        pass\n    raise tornado.gen.Return(reply)", "docstring": "Pops a message for a subscribed client.\n\nArgs:\ndeadline (int): max number of seconds to wait (None => no timeout)\n\nReturns:\nFuture with the popped message as result (or None if timeout\nor ConnectionError object in case of connection errors\nor ClientError object if you are not subscribed)", "source": "codesearchnet"}
{"code": "def get_orbital_resolved_cohp(self, label, orbitals):\n    if (self.orb_res_cohp is None):\n        return None\n    elif (isinstance(orbitals, list) or isinstance(orbitals, tuple)):\n        cohp_orbs = [d['orbitals'] for d in self.orb_res_cohp[label].values()]\n        orbs = []\n        for orbital in orbitals:\n            if isinstance(orbital[1], int):\n                orbs.append(tuple((orbital[0], Orbital(orbital[1]))))\n            elif isinstance(orbital[1], Orbital):\n                orbs.append(tuple((orbital[0], orbital[1])))\n            elif isinstance(orbital[1], str):\n                orbs.append(tuple((orbital[0], Orbital[orbital[1]])))\n            else:\n                raise TypeError('Orbital must be str, int, or Orbital.')\n        orb_index = cohp_orbs.index(orbs)\n        orb_label = list(self.orb_res_cohp[label].keys())[orb_index]\n    elif isinstance(orbitals, str):\n        orb_label = orbitals\n    else:\n        raise TypeError('Orbitals must be str, list, or tuple.')\n    try:\n        icohp = self.orb_res_cohp[label][orb_label]['ICOHP']\n    except KeyError:\n        icohp = None\n    return Cohp(self.efermi, self.energies, self.orb_res_cohp[label][orb_label]['COHP'], icohp=icohp, are_coops=self.are_coops)", "docstring": "Get orbital-resolved COHP.\n\nArgs:\nlabel: bond label (Lobster: labels as in ICOHPLIST/ICOOPLIST.lobster).\n\norbitals: The orbitals as a label, or list or tuple of the form\n[(n1, orbital1), (n2, orbital2)]. Orbitals can either be str,\nint, or Orbital.\n\nReturns:\nA Cohp object if CompleteCohp contains orbital-resolved cohp,\nor None if it doesn't.\n\nNote: It currently assumes that orbitals are str if they aren't the\nother valid types. This is not ideal, but the easiest way to\navoid unicode issues between python 2 and python 3.", "source": "codesearchnet"}
{"code": "def subscribe(self, requested_timeout=None, auto_renew=False, event_queue=None):\n    subscription = Subscription(self, event_queue)\n    subscription.subscribe(requested_timeout=requested_timeout, auto_renew=auto_renew)\n    return subscription", "docstring": "Subscribe to the service's events.\n\nArgs:\nrequested_timeout (int, optional): If requested_timeout is\nprovided, a subscription valid for that\nnumber of seconds will be requested, but not guaranteed. Check\n`Subscription.timeout` on return to find out what period of\nvalidity is actually allocated.\nauto_renew (bool): If auto_renew is `True`, the subscription will\nautomatically be renewed just before it expires, if possible.\nDefault is `False`.\n\nevent_queue (:class:`~queue.Queue`): a thread-safe queue object on\nwhich received events will be put. If not specified,\na (:class:`~queue.Queue`) will be created and used.\n\nReturns:\n`Subscription`: an insance of `Subscription`, representing\nthe new subscription.\n\nTo unsubscribe, call the `unsubscribe` method on the returned object.", "source": "codesearchnet"}
{"code": "def _process_update(self, item, feed_item):\n    item['name'] = feed_item.get(FieldMap.CAMPAIGN_LANDING_PAGE_NAME, None)\n    item['url'] = feed_item.get(FieldMap.CAMPAIGN_LANDING_PAGE_URL, None)", "docstring": "Updates an landing page based on the values from the feed.\n\nArgs:\nitem: Object representing the landing page to be updated, this object is\nupdated directly.\nfeed_item: Feed item representing landing page values from the Bulkdozer\nfeed.", "source": "github-repos"}
{"code": "def list(self, resource):\n        \n        return self.service.list(\n            resource, self.url_prefix, self.auth, self.session,\n            self.session_send_opts)", "docstring": "List metadata keys associated with the given resource.\n\nArgs:\nresource (intern.resource.boss.BossResource): List keys associated with this resource.\n\nReturns:\n(list): List of key names.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def find_first(self, *args, **kwargs):\n    if capybara.wait_on_first_by_default:\n        kwargs.setdefault('minimum', 1)\n    try:\n        result = self.find_all(*args, **kwargs)\n        return (result[0] if (len(result) > 0) else None)\n    except ExpectationNotMet:\n        return None", "docstring": "Find the first element on the page matching the given selector and options, or None if no\nelement matches.\n\nBy default, no waiting behavior occurs. However, if ``capybara.wait_on_first_by_default``\nis set to true, it will trigger Capybara's waiting behavior for a minimum of 1 matching\nelement to be found.\n\nArgs:\n*args: Variable length argument list for :class:`SelectorQuery`.\n**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.\n\nReturns:\nElement: The found element or None.", "source": "codesearchnet"}
{"code": "def known(self, words):\n    tmp = [w.lower() for w in words]\n    return set((w for w in tmp if ((w in self._word_frequency.dictionary) or (not self._check_if_should_check(w)))))", "docstring": "The subset of `words` that appear in the dictionary of words\n\nArgs:\nwords (list): List of words to determine which are in the \\\ncorpus\nReturns:\nset: The set of those words from the input that are in the \\\ncorpus", "source": "codesearchnet"}
{"code": "def matching_args(fn, dictionary):\n    arg_spec = getargspec(fn)\n    if arg_spec.keywords:\n        return dictionary\n    return _mapping.split_by_criteria(dictionary, arg_spec.args).included", "docstring": "Given a function fn and a dict dictionary, returns the function arguments that match the dict keys.\n\nExample:\n\ndef train(channel_dirs, model_dir): pass\n\ndictionary = {'channel_dirs': {}, 'model_dir': '/opt/ml/model', 'other_args': None}\n\nargs = functions.matching_args(train, dictionary) # {'channel_dirs': {}, 'model_dir': '/opt/ml/model'}\n\ntrain(**args)\nArgs:\nfn (function): a function\ndictionary (dict): the dictionary with the keys\n\nReturns:\n(dict) a dictionary with only matching arguments.", "source": "codesearchnet"}
{"code": "def check_codes_match(observed_code: str, theoretical_code: str) -> Optional[int]:\n    observed_code_header = observed_code.split('\\n')[0]\n    theoretical_code_header = theoretical_code.split('\\n')[0]\n    _re_class_match = re.compile('class\\\\s+([^\\\\(:]+)(?:\\\\(|:)')\n    _re_func_match = re.compile('def\\\\s+([^\\\\(]+)\\\\(')\n    for re_pattern in [_re_class_match, _re_func_match]:\n        if re_pattern.match(observed_code_header) is not None:\n            try:\n                observed_obj_name = re_pattern.search(observed_code_header).groups()[0]\n            except Exception:\n                raise ValueError('Tried to split a class or function. It did not work. Error comes from: \\n```\\n' + observed_code_header + '\\n```\\n')\n            try:\n                theoretical_name = re_pattern.search(theoretical_code_header).groups()[0]\n            except Exception:\n                raise ValueError('Tried to split a class or function. It did not work. Error comes from: \\n```\\n' + theoretical_code_header + '\\n```\\n')\n            theoretical_code_header = theoretical_code_header.replace(theoretical_name, observed_obj_name)\n    diff_index = 0\n    if theoretical_code_header != observed_code_header:\n        return 0\n    diff_index = 1\n    for observed_line, theoretical_line in zip(observed_code.split('\\n')[1:], theoretical_code.split('\\n')[1:]):\n        if observed_line != theoretical_line:\n            return diff_index\n        diff_index += 1", "docstring": "Checks if two version of a code match with the exception of the class/function name.\n\nArgs:\nobserved_code (`str`): The code found.\ntheoretical_code (`str`): The code to match.\n\nReturns:\n`Optional[int]`: The index of the first line where there is a difference (if any) and `None` if the codes\nmatch.", "source": "github-repos"}
{"code": "def swo_enable(self, cpu_speed, swo_speed=9600, port_mask=1):\n    if self.swo_enabled():\n        self.swo_stop()\n    res = self._dll.JLINKARM_SWO_EnableTarget(cpu_speed, swo_speed, enums.JLinkSWOInterfaces.UART, port_mask)\n    if (res != 0):\n        raise errors.JLinkException(res)\n    self._swo_enabled = True\n    return None", "docstring": "Enables SWO output on the target device.\n\nConfigures the output protocol, the SWO output speed, and enables any\nITM & stimulus ports.\n\nThis is equivalent to calling ``.swo_start()``.\n\nNote:\nIf SWO is already enabled, it will first stop SWO before enabling it\nagain.\n\nArgs:\nself (JLink): the ``JLink`` instance\ncpu_speed (int): the target CPU frequency in Hz\nswo_speed (int): the frequency in Hz used by the target to communicate\nport_mask (int): port mask specifying which stimulus ports to enable\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error", "source": "codesearchnet"}
{"code": "def allreduce(self, x, mesh_axes, reduction_fn_string):\n    \n    return self._collective_with_groups(\n        x, mesh_axes, functools.partial(\n            allreduce_ring, reduction_fn_string=reduction_fn_string))", "docstring": "Grouped allreduce, (across the given dimensions).\n\nArgs:\nx: a LaidOutTensor\nmesh_axes: a list of integers - the mesh dimensions to be reduced\nreduction_fn_string: \"SUM\" or \"MAX\"\nReturns:\na LaidOutTensor", "source": "juraj-google-style"}
{"code": "def replace_characters(self, text, characters, replacement=''):\n        \n        if not characters:\n            return text\n\n        characters = ''.join(sorted(characters))\n        if characters in self._characters_regexes:\n            characters_regex = self._characters_regexes[characters]\n        else:\n            characters_regex = re.compile(\"[%s]\" % re.escape(characters))\n            self._characters_regexes[characters] = characters_regex\n\n        return characters_regex.sub(replacement, text)", "docstring": "Remove characters from text.\n\nRemoves custom characters from input text or replaces them\nwith a string if specified.\n\nArgs:\ntext: The text to be processed.\ncharacters: Characters that will be replaced.\nreplacement: New text that will replace the custom characters.\n\nReturns:\nThe text without the given characters.", "source": "juraj-google-style"}
{"code": "def __init__(self, project: str, location: str, api_endpoint: str, feature_store_name: str, feature_view_name: str, row_key: str, *, exception_level: ExceptionLevel=ExceptionLevel.WARN, **kwargs):\n    self.project = project\n    self.location = location\n    self.api_endpoint = api_endpoint\n    self.feature_store_name = feature_store_name\n    self.feature_view_name = feature_view_name\n    self.row_key = row_key\n    self.exception_level = exception_level\n    self.kwargs = kwargs if kwargs else {}\n    if 'client_options' in self.kwargs:\n        if not self.kwargs['client_options']['api_endpoint']:\n            self.kwargs['client_options']['api_endpoint'] = self.api_endpoint\n        elif self.kwargs['client_options']['api_endpoint'] != self.api_endpoint:\n            raise ValueError('Multiple values received for api_endpoint in api_endpoint and client_options parameters.')\n    else:\n        self.kwargs['client_options'] = {'api_endpoint': self.api_endpoint}\n    try:\n        admin_client = aiplatform.gapic.FeatureOnlineStoreAdminServiceClient(**self.kwargs)\n    except Exception:\n        _LOGGER.warning('Due to insufficient admin permission, could not verify the existence of feature store. If the `exception_level` is set to WARN then make sure the feature store exists otherwise the data enrichment will not happen without throwing an error.')\n    else:\n        location_path = admin_client.common_location_path(project=self.project, location=self.location)\n        feature_store_path = admin_client.feature_online_store_path(project=self.project, location=self.location, feature_online_store=self.feature_store_name)\n        feature_store = admin_client.get_feature_online_store(name=feature_store_path)\n        if not feature_store:\n            raise NotFound('Vertex AI Feature Store %s does not exists in %s' % (self.feature_store_name, location_path))", "docstring": "Initializes an instance of `VertexAIFeatureStoreEnrichmentHandler`.\n\nArgs:\nproject (str): The GCP project-id for the Vertex AI Feature Store.\nlocation (str): The region for the Vertex AI Feature Store.\napi_endpoint (str): The API endpoint for the Vertex AI Feature Store.\nfeature_store_name (str): The name of the Vertex AI Feature Store.\nfeature_view_name (str): The name of the feature view within the\nFeature Store.\nrow_key (str): The row key field name containing the unique id\nfor the feature values.\nexception_level: a `enum.Enum` value from\n`apache_beam.transforms.enrichment_handlers.utils.ExceptionLevel`\nto set the level when an empty row is returned from the BigTable query.\nDefaults to `ExceptionLevel.WARN`.\nkwargs: Optional keyword arguments to configure the\n`aiplatform.gapic.FeatureOnlineStoreServiceClient`.", "source": "github-repos"}
{"code": "def __init__(self, details):\n\t\t\n\n\t\t\n\t\tif not isinstance(details, dict):\n\t\t\traise ValueError('details in ' + self.__class__.__name__ + '.' + sys._getframe().f_code.co_name + ' must be a dict')\n\n\t\t\n\t\tif '__name__' not in details:\n\t\t\traise KeyError('__name__')\n\n\t\t\n\t\tif not _standardField.match(details['__name__']):\n\t\t\traise ValueError('__name__')\n\n\t\t\n\t\tself._name = details['__name__']\n\t\tdel details['__name__']\n\n\t\t\n\t\tif '__array__' in details:\n\t\t\tdel details['__array__']\n\n\t\t\n\t\tsuper(Tree, self).__init__(details)\n\n\t\t\n\t\tself._class = 'Tree'", "docstring": "Constructor\n\nInitialises the instance\n\nArguments:\ndetails {dict} -- Details describing the type of values allowed for\nthe node\n\nRaises:\nKeyError\nValueError\n\nReturns:\nTree", "source": "juraj-google-style"}
{"code": "def initial_value_of_masked_time_series(time_series_tensor, broadcast_mask):\n  \n\n  num_timesteps = tf.shape(input=time_series_tensor)[-1]\n\n  \n  unmasked_negindices = (\n      tf.cast(~broadcast_mask, tf.int32) *\n      tf.range(num_timesteps, 0, -1))\n  first_unmasked_indices = num_timesteps - tf.reduce_max(\n      input_tensor=unmasked_negindices, axis=-1)\n\n  if first_unmasked_indices.shape.ndims is None:\n    raise NotImplementedError(\n        'Cannot compute initial values of a masked time series with'\n        'dynamic rank.')  \n\n  \n  return tf.squeeze(tf.compat.v1.batch_gather(\n      params=time_series_tensor,\n      indices=first_unmasked_indices[..., tf.newaxis]), axis=-1)", "docstring": "Get the first unmasked entry of each time series in the batch.\n\nArgs:\ntime_series_tensor: float `Tensor` of shape [..., num_timesteps].\nbroadcast_mask: bool `Tensor` of same shape as `time_series`.", "source": "juraj-google-style"}
{"code": "def put(self, data):", "docstring": "Write data to file sequentially.\n\nArgs:\ndata: (memoryview) Data to write.", "source": "github-repos"}
{"code": "def certificate_authority(self):\n    if (not self.__certificate_authority):\n        self.__certificate_authority = CertificateAuthority(self.__connection)\n    return self.__certificate_authority", "docstring": "Gets the Certificate Authority API client.\n\nReturns:\nCertificateAuthority:", "source": "codesearchnet"}
{"code": "def fromTFExample(iter, binary_features=[]):\n\n    def _get_value(k, v):\n        if v.int64_list.value:\n            result = v.int64_list.value\n        elif v.float_list.value:\n            result = v.float_list.value\n        elif (k in binary_features):\n            return bytearray(v.bytes_list.value[0])\n        else:\n            return v.bytes_list.value[0].decode('utf-8')\n        if (len(result) > 1):\n            return list(result)\n        elif (len(result) == 1):\n            return result[0]\n        else:\n            return None\n    results = []\n    for record in iter:\n        example = tf.train.Example()\n        example.ParseFromString(bytes(record[0]))\n        d = {k: _get_value(k, v) for (k, v) in sorted(example.features.feature.items())}\n        row = Row(**d)\n        results.append(row)\n    return results", "docstring": "mapPartition function to convert an RDD of serialized tf.train.Example bytestring into an RDD of Row.\n\nNote: TensorFlow represents both strings and binary types as tf.train.BytesList, and we need to\ndisambiguate these types for Spark DataFrames DTypes (StringType and BinaryType), so we require a \"hint\"\nfrom the caller in the ``binary_features`` argument.\n\nArgs:\n:iter: the RDD partition iterator\n:binary_features: a list of tf.train.Example features which are expected to be binary/bytearrays.\n\nReturns:\nAn array/iterator of DataFrame Row with features converted into columns.", "source": "codesearchnet"}
{"code": "def get_symbol_list(rank, dim=6):\n    indices = list(itertools.combinations_with_replacement(range(dim), r=rank))\n    c_vec = np.zeros(len(indices), dtype=object)\n    c_arr = np.zeros(([dim] * rank), dtype=object)\n    for (n, idx) in enumerate(indices):\n        c_vec[n] = sp.Symbol(('c_' + ''.join([str(i) for i in idx])))\n        for perm in itertools.permutations(idx):\n            c_arr[perm] = c_vec[n]\n    return (c_vec, c_arr)", "docstring": "Returns a symbolic representation of the voigt-notation\ntensor that places identical symbols for entries related\nby index transposition, i. e. C_1121 = C_1211 etc.\n\nArgs:\ndim (int): dimension of matrix/tensor, e. g. 6 for\nvoigt notation and 3 for standard\nrank (int): rank of tensor, e. g. 3 for third-order ECs\n\nReturns:\nc_vec (array): array representing distinct indices\nc_arr (array): array representing tensor with equivalent\nindices assigned as above", "source": "codesearchnet"}
{"code": "def aggregate_repo(repo, args, sem, err_queue):\n    \n    try:\n        logger.debug('%s' % repo)\n        dirmatch = args.dirmatch\n        if not match_dir(repo.cwd, dirmatch):\n            logger.info(\"Skip %s\", repo.cwd)\n            return\n        if args.command == 'aggregate':\n            repo.aggregate()\n            if args.do_push:\n                repo.push()\n        elif args.command == 'show-closed-prs':\n            repo.show_closed_prs()\n        elif args.command == 'show-all-prs':\n            repo.show_all_prs()\n    except Exception:\n        err_queue.put_nowait(sys.exc_info())\n    finally:\n        sem.release()", "docstring": "Aggregate one repo according to the args.\n\nArgs:\nrepo (Repo): The repository to aggregate.\nargs (argparse.Namespace): CLI arguments.", "source": "juraj-google-style"}
{"code": "def intersection(L1, L2):\n    D = ((L1[0] * L2[1]) - (L1[1] * L2[0]))\n    Dx = ((L1[2] * L2[1]) - (L1[1] * L2[2]))\n    Dy = ((L1[0] * L2[2]) - (L1[2] * L2[0]))\n    if (D != 0):\n        x = (Dx / D)\n        y = (Dy / D)\n        return (x, y)\n    else:\n        return False", "docstring": "Intersects two line segments\n\nArgs:\nL1 ([float, float]): x and y coordinates\nL2 ([float, float]): x and y coordinates\nReturns:\nbool: if they intersect\n(float, float): x and y of intersection, if they do", "source": "codesearchnet"}
{"code": "def __init__(self, filename, mode='a', encoding='utf-8'):\n    \n    if 't' not in mode and encoding and py2to3.PY_3:\n      mode = '{0:s}t'.format(mode)\n    super(CompressedFileHandler, self).__init__(\n        filename, mode=mode, encoding=encoding, delay=True)", "docstring": "Initializes a compressed file logging handler.\n\nArgs:\nfilename (str): name of the log file.\nmode (Optional[str]): file access mode.\nencoding (Optional[str]): encoding of the log lines.", "source": "juraj-google-style"}
{"code": "def get_control_outputs(self, op):\n    if op.graph not in self.cache:\n        control_outputs = self.calc_control_outputs(op.graph)\n        self.cache[op.graph] = control_outputs\n    else:\n        control_outputs = self.cache[op.graph]\n    return control_outputs.get(op, [])", "docstring": "Return the control outputs for a given op.\n\nArgs:\nop: The op to fetch control outputs for.\n\nReturns:\nIterable of control output ops.", "source": "github-repos"}
{"code": "def create_writer_of_type(type_name):\n    writers = available_writers()\n    if (type_name not in writers.keys()):\n        raise UnknownWriterException(('Unknown writer: %s' % (type_name,)))\n    return writers[type_name]()", "docstring": "Create an instance of the writer with the given name.\n\nArgs:\ntype_name: The name of a writer.\n\nReturns:\nAn instance of the writer with the given type.", "source": "codesearchnet"}
{"code": "def get_top_coins(tsym, limit=20):\n\t\n\t\n\t\n\turl = build_url('volumes', tsym=tsym, limit=limit)\n\tdata = load_data(url)\n\n\treturn data['Data']", "docstring": "Get top coins by 24 hour trading volume value in the requested currency.\n\nArgs:\ntsym: TO symbol.\nlimit: Number of results. Default value returns top 20 coins.\n\nReturns:\nFunction returns a list containing a dictionary for each result:\n\n[{'SUPPLY': ..., 'SYMBOL': ..., 'VOLUME24HOURTO': ...},\n{...},\n...]\n\nThe list is ordered based on the volume of the TO currency starting with\nthe highest value.", "source": "juraj-google-style"}
{"code": "def validate(self, ticket, client_ip=None, now=None, encoding='utf-8'):\n    parts = self.parse(ticket)\n    new_ticket = self.new(*parts[1:], client_ip=client_ip, encoding=encoding)\n    if (new_ticket[:(self._hash.digest_size * 2)] != parts.digest):\n        raise TicketDigestError(ticket)\n    if (now is None):\n        now = time.time()\n    if (parts.valid_until <= now):\n        raise TicketExpired(ticket)\n    return parts", "docstring": "Validates the passed ticket, , raises a TicketError\non failure\n\nArgs:\nticket: String value (possibly generated by new function)\nclient_ip: Optional IPAddress of client, should be passed if the\nip address was passed on ticket creation.\nnow: Optional (defaults to time.time()) time to use when\nvalidating ticket date\n\nReturns:\nTicket a TicketInfo tuple containing the users authentication details on\nsuccess\n\nRaises:\nTicketParseError: Invalid ticket format\nTicketDigestError: Digest is incorrect (ticket data was modified)\nTicketExpired: Ticket has passed expiration date", "source": "codesearchnet"}
{"code": "def set_authentication_profile(profile=None, deploy=False):\n    if (not profile):\n        raise CommandExecutionError('Profile name option must not be none.')\n    ret = {}\n    query = {'type': 'config', 'action': 'set', 'xpath': \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/authentication-profile\", 'element': '<authentication-profile>{0}</authentication-profile>'.format(profile)}\n    ret.update(__proxy__['panos.call'](query))\n    if (deploy is True):\n        ret.update(commit())\n    return ret", "docstring": "Set the authentication profile of the Palo Alto proxy minion. A commit will be required before this is processed.\n\nCLI Example:\n\nArgs:\nprofile (str): The name of the authentication profile to set.\n\ndeploy (bool): If true then commit the full candidate configuration, if false only set pending change.\n\n.. code-block:: bash\n\nsalt '*' panos.set_authentication_profile foo\nsalt '*' panos.set_authentication_profile foo deploy=True", "source": "codesearchnet"}
{"code": "def get_file(self, file_name, local_destination=None, **kwargs):\n    if (not local_destination):\n        local_destination = file_name\n    return SubprocessTask((self._rsync_cmd() + ['-ut', ('%s:%s' % (self.hostname, file_name)), local_destination]), **kwargs)", "docstring": "Get a file from a remote host with rsync.\n\nArgs:\nfile_name (str): The relative location of the file on the remote\nhost.\n\nlocal_destination (str): The destination for the file on the local\nhost. If `None`, will be assumed to be the same as\n**file_name**. Default `None`.\n\n**kwargs: Passed to ``SubprocessTask``'s init method.\n\nReturn:\n``pyrem.task.SubprocessTask``: The resulting task.", "source": "codesearchnet"}
{"code": "def _decode_crop_and_flip(image_buffer, num_channels):\n    min_object_covered = 0.1\n    aspect_ratio_range = [0.75, 1.33]\n    area_range = [0.05, 1.0]\n    max_attempts = 100\n    mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_MIN_OBJ_COV, value=min_object_covered)\n    mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_RATIO_RANGE, value=aspect_ratio_range)\n    mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_AREA_RANGE, value=area_range)\n    mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_MAX_ATTEMPTS, value=max_attempts)\n    mlperf_log.resnet_print(key=mlperf_log.INPUT_CROP_USES_BBOXES, value=False)\n    bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])\n    sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(tf.image.extract_jpeg_shape(image_buffer), bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True)\n    (bbox_begin, bbox_size, _) = sample_distorted_bounding_box\n    (offset_y, offset_x, _) = tf.unstack(bbox_begin)\n    (target_height, target_width, _) = tf.unstack(bbox_size)\n    crop_window = tf.stack([offset_y, offset_x, target_height, target_width])\n    cropped = tf.image.decode_and_crop_jpeg(image_buffer, crop_window, channels=num_channels)\n    mlperf_log.resnet_print(key=mlperf_log.INPUT_RANDOM_FLIP)\n    cropped = tf.image.random_flip_left_right(cropped)\n    return cropped", "docstring": "Crops the given image to a random part of the image, and randomly flips.\n\nWe use the fused decode_and_crop op, which performs better than the two ops\nused separately in series, but note that this requires that the image be\npassed in as an un-decoded string Tensor.\n\nArgs:\nimage_buffer: scalar string Tensor representing the raw JPEG image buffer.\nnum_channels: Integer depth of the image buffer for decoding.\n\nReturns:\n3-D tensor with cropped image.", "source": "codesearchnet"}
{"code": "def create_policy(self, account, client, document, name, arn=None):\n    if ((not arn) and (not name)):\n        raise ValueError('create_policy must be called with either arn or name in the argument list')\n    if arn:\n        response = client.list_policy_versions(PolicyArn=arn)\n        if (len(response['Versions']) >= 5):\n            version = [x for x in sorted(response['Versions'], key=(lambda k: k['CreateDate'])) if (not x['IsDefaultVersion'])][0]\n            self.log.info('Deleting oldest IAM Policy version {}/{}'.format(arn, version['VersionId']))\n            client.delete_policy_version(PolicyArn=arn, VersionId=version['VersionId'])\n            auditlog(event='iam.check_roles.delete_policy_version', actor=self.ns, data={'account': account.account_name, 'policyName': name, 'policyArn': arn, 'versionId': version['VersionId']})\n        res = client.create_policy_version(PolicyArn=arn, PolicyDocument=document, SetAsDefault=True)\n    else:\n        res = client.create_policy(PolicyName=name, PolicyDocument=document)\n    auditlog(event='iam.check_roles.create_policy', actor=self.ns, data={'account': account.account_name, 'policyName': name, 'policyArn': arn})\n    return res", "docstring": "Create a new IAM policy.\n\nIf the policy already exists, a new version will be added and if needed the oldest policy version not in use\nwill be removed. Returns a dictionary containing the policy or version information\n\nArgs:\naccount (:obj:`Account`): Account to create the policy on\nclient (:obj:`boto3.client`): A boto3 client object\ndocument (`str`): Policy document\nname (`str`): Name of the policy to create / update\narn (`str`): Optional ARN for the policy to update\n\nReturns:\n`dict`", "source": "codesearchnet"}
{"code": "def model(self, inputs, mode='train'):\n        \n        \n        training = (mode == 'train')\n        with tf.variable_scope('conv1') as scope:\n            conv = tf.layers.conv2d(inputs=inputs, filters=16, kernel_size=[3, 3], padding='SAME')\n            bn = tf.layers.batch_normalization(inputs=conv, training=training)\n            bn = tf.nn.relu(bn)\n            conv = tf.layers.conv2d(inputs=bn, filters=16, kernel_size=[3, 3], padding='SAME')\n            bn = tf.layers.batch_normalization(inputs=conv, training=training)\n            bn = tf.nn.relu(bn)\n            pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name)\n\n        with tf.variable_scope('conv2') as scope:\n            conv = tf.layers.conv2d(inputs=pool, filters=32, kernel_size=[3, 3], padding='SAME')\n            bn = tf.layers.batch_normalization(inputs=conv, training=training)\n            bn = tf.nn.relu(bn)\n            conv = tf.layers.conv2d(inputs=bn, filters=32, kernel_size=[3, 3], padding='SAME')\n            bn = tf.layers.batch_normalization(inputs=conv, training=training)\n            bn = tf.nn.relu(bn)\n            pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name)\n\n        with tf.variable_scope('conv3') as scope:\n            conv = tf.layers.conv2d(inputs=pool, filters=32, kernel_size=[3, 3], padding='SAME')\n            bn = tf.layers.batch_normalization(inputs=conv, training=training)\n            bn = tf.nn.relu(bn)\n            conv = tf.layers.conv2d(inputs=bn, filters=32, kernel_size=[3, 3], padding='SAME')\n            bn = tf.layers.batch_normalization(inputs=conv, training=training)\n            bn = tf.nn.relu(bn)\n            pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name)\n\n        \n        with tf.variable_scope('fc') as scope:\n            flat = tf.layers.flatten(pool)\n            fc = tf.layers.dense(inputs=flat, units=32, activation=tf.nn.relu)\n            softmax = tf.layers.dense(inputs=fc, units=self.num_classes, activation=tf.nn.softmax)\n\n        return softmax", "docstring": "Build a simple convnet (BN before ReLU).\nArgs:\ninputs: a tensor of size [batch_size, height, width, channels]\nmode: string in ['train', 'test']\nReturns:\nthe last op containing the predictions\nNote:\nBest score\nStep:  7015 - Epoch: 18/20 - best batch acc: 0.8984 - loss: 1.5656\nWorst score\nStep:  7523 - Epoch: 20/20 - best batch acc: 0.7734 - loss: 1.6874", "source": "juraj-google-style"}
{"code": "def is_initialized(self, name=None):\n    return gen_resource_variable_ops.var_is_initialized_op(self.handle, name)", "docstring": "Checks whether a resource variable has been initialized.\n\nOutputs boolean scalar indicating whether the tensor has been initialized.\n\nArgs:\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` of type `bool`.", "source": "github-repos"}
{"code": "def ascii2h5(dat_fname, h5_fname):\n    \n    table = np.loadtxt(dat_fname, skiprows=1, dtype='f4')\n\n    filter_kwargs = dict(\n        chunks=True,\n        compression='gzip',\n        compression_opts=3)\n\n    \n    idx = ~np.all(table[:,2:32] < 1.e-5, axis=1)\n\n    with h5py.File(h5_fname, 'w') as f:\n        d = np.arange(0., 4.351, 0.15).astype('f4')\n\n        dset = f.create_dataset('dists', data=d, **filter_kwargs)\n        dset.attrs['description'] = 'Distances at which extinction is measured'\n        dset.attrs['units'] = 'kpc'\n\n        dset = f.create_dataset('pix_lb', data=table[idx,0:2], **filter_kwargs)\n        dset.attrs['description'] = 'Galactic (l, b) of each pixel'\n        dset.attrs['units'] = 'deg'\n\n        dset = f.create_dataset('A_r', data=table[idx,2:32], **filter_kwargs)\n        dset.attrs['description'] = 'Extinction'\n        dset.attrs['shape'] = '(pixel, distance)'\n        dset.attrs['band'] = 'r'\n        dset.attrs['units'] = 'mag'\n\n        dset = f.create_dataset('A_r_err', data=table[idx,32:], **filter_kwargs)\n        dset.attrs['description'] = 'Gaussian uncertainty in extinction'\n        dset.attrs['shape'] = '(pixel, distance)'\n        dset.attrs['band'] = 'r'\n        dset.attrs['units'] = 'mag'", "docstring": "Converts from the original ASCII format of the Chen+ (2014) 3D dust map to\nthe HDF5 format.\n\nArgs:\ndat_fname (:obj:`str`): Filename of the original ASCII .dat file.\nh5_fname (:obj:`str`): Output filename to write the resulting HDF5 file to.", "source": "juraj-google-style"}
{"code": "def create_app(*, debug=False, threads=1, bigchaindb_factory=None):\n    \n\n    if not bigchaindb_factory:\n        bigchaindb_factory = BigchainDB\n\n    app = Flask(__name__)\n    app.wsgi_app = StripContentTypeMiddleware(app.wsgi_app)\n\n    CORS(app)\n\n    app.debug = debug\n\n    app.config['bigchain_pool'] = utils.pool(bigchaindb_factory, size=threads)\n\n    add_routes(app)\n\n    return app", "docstring": "Return an instance of the Flask application.\n\nArgs:\ndebug (bool): a flag to activate the debug mode for the app\n(default: False).\nthreads (int): number of threads to use\nReturn:\nan instance of the Flask application.", "source": "juraj-google-style"}
{"code": "def softmax_classifier(input_, num_classes, labels=None, loss_weight=None, per_example_weights=None, weights=None, bias=tf.zeros_initializer(), parameter_modifier=parameters.identity, name=PROVIDED):\n    full = input_.fully_connected(num_classes, activation_fn=None, name=name, weights=weights, bias=bias, parameter_modifier=parameter_modifier)\n    return full.softmax(labels=labels, loss_weight=loss_weight, per_example_weights=per_example_weights, name=name)", "docstring": "Creates a fully-connected linear layer followed by a softmax.\n\nThis returns `(softmax, loss)` where `loss` is the cross entropy loss.\n\nArgs:\ninput_: A rank 2 Tensor or a Pretty Tensor holding the activation before\nthe logits (penultimate layer).\nnum_classes: The number of classes.\nlabels: The target labels to learn as a float tensor.  Use None to not\ninclude a training loss.\nloss_weight: A scalar multiplier for the loss.\nper_example_weights: A Tensor with a weight per example.\nweights: The initializer for the weights (see `fully_connected`).\nbias: The initializer for the bias (see `fully_connected`).\nparameter_modifier: A modifier for the parameters that compute the logits.\nname: The optional name.\nReturns:\nA named tuple holding:\n\nsoftmax: The result of this layer with softmax normalization.\nloss: The cross entropy loss.\nRaises:\nValueError: If the datatype is wrong.", "source": "codesearchnet"}
{"code": "def initialize(self):\n    if eager_context.executing_eagerly():\n        self._iterator = self._dataset.make_one_shot_iterator()\n        return []\n    else:\n        return [self._iterator.initializer]", "docstring": "Initialize underlying iterators.\n\nReturns:\nA list of any initializer ops that should be run.", "source": "github-repos"}
{"code": "def bespoke_md5(self, md5):\n        \n        r = requests.post('http:\n        self._output(r.text)", "docstring": "Performs Bespoke MD5 lookup on an MD5.\n\nArgs:\nmd5 - A hash.", "source": "juraj-google-style"}
{"code": "def upload(self, file_path, golden_image_info):\n    uri = '{0}?name={1}&description={2}'.format(self.URI, quote(golden_image_info.get('name', '')), quote(golden_image_info.get('description', '')))\n    return self._client.upload(file_path, uri)", "docstring": "Adds a Golden Image resource from the file that is uploaded from a local drive. Only the .zip format file can\nbe used for the upload.\n\nArgs:\nfile_path (str): File name to upload.\ngolden_image_info (dict): Golden Image information.\n\nReturns:\ndict: Golden Image.", "source": "codesearchnet"}
{"code": "def alias_inplace_sub(x, i, v):\n    return _inplace_helper(x, i, v, gen_array_ops.inplace_sub)", "docstring": "Applies an inplace sub on input x at index i with value v. Aliases x.\n\nIf i is None, x and v must be the same shape. Computes\nx -= v;\nIf i is a scalar, x has a rank 1 higher than v's. Computes\nx[i, :] -= v;\nOtherwise, x and v must have the same rank. Computes\nx[i, :] -= v;\n\nArgs:\nx: A Tensor.\ni: None, a scalar or a vector.\nv: A Tensor.\n\nReturns:\nReturns x.", "source": "github-repos"}
{"code": "def __init__(\n      self, credential_data=None, credential_type=None, path_spec=None):\n    \n    super(CredentialConfiguration, self).__init__()\n    self.credential_data = credential_data\n    self.credential_type = credential_type\n    self.path_spec = path_spec", "docstring": "Initializes a credential configuration object.\n\nArgs:\ncredential_data (Optional[bytes]): credential data.\ncredential_type (Optional[str]): credential type.\npath_spec (Optional[dfvfs.PathSpec]): path specification.", "source": "juraj-google-style"}
{"code": "def __init__(self, key_dtype, value_dtype, default_value, name='SimpleHashTable'):\n    super(SimpleHashTable, self).__init__()\n    self._default_value = tf.convert_to_tensor(default_value, dtype=value_dtype)\n    self._value_shape = self._default_value.get_shape()\n    self._key_dtype = key_dtype\n    self._value_dtype = value_dtype\n    self._name = name\n    self._resource_handle = self._create_resource()", "docstring": "Creates an empty `SimpleHashTable` object.\n\nCreates a table, the type of its keys and values are specified by key_dtype\nand value_dtype, respectively.\n\nArgs:\nkey_dtype: the type of the key tensors.\nvalue_dtype: the type of the value tensors.\ndefault_value: The value to use if a key is missing in the table.\nname: A name for the operation (optional).\n\nReturns:\nA `SimpleHashTable` object.", "source": "github-repos"}
{"code": "def _make_columnar(self, x):\n    if (tensorshape_util.rank(x.shape) is not None):\n        if (tensorshape_util.rank(x.shape) == 1):\n            x = x[(tf.newaxis, :)]\n        return x\n    shape = tf.shape(input=x)\n    maybe_expanded_shape = tf.concat([shape[:(- 1)], distribution_util.pick_vector(tf.equal(tf.rank(x), 1), [1], np.array([], dtype=np.int32)), shape[(- 1):]], 0)\n    return tf.reshape(x, maybe_expanded_shape)", "docstring": "Ensures non-scalar input has at least one column.\n\nExample:\nIf `x = [1, 2, 3]` then the output is `[[1], [2], [3]]`.\n\nIf `x = [[1, 2, 3], [4, 5, 6]]` then the output is unchanged.\n\nIf `x = 1` then the output is unchanged.\n\nArgs:\nx: `Tensor`.\n\nReturns:\ncolumnar_x: `Tensor` with at least two dimensions.", "source": "codesearchnet"}
{"code": "def rapidfire(self, max_nlaunch=-1, max_loops=1, sleep_time=5):\n        \n        num_launched, do_exit, launched = 0, False, []\n\n        for count in range(max_loops):\n            if do_exit:\n                break\n            if count > 0:\n                time.sleep(sleep_time)\n\n            tasks = self.fetch_tasks_to_run()\n\n            \n            if any(task in launched for task in tasks):\n                logger.critical(\"numtasks %d already in launched list:\\n%s\" % (len(tasks), launched))\n\n            \n            tasks = [t for t in tasks if t not in launched]\n\n            if not tasks:\n                continue\n\n            for task in tasks:\n                fired = task.start()\n                if fired:\n                    launched.append(task)\n                    num_launched += 1\n\n                if num_launched >= max_nlaunch > 0:\n                    logger.info('num_launched >= max_nlaunch, going back to sleep')\n                    do_exit = True\n                    break\n\n        \n        self.flow.pickle_dump()\n\n        return num_launched", "docstring": "Keeps submitting `Tasks` until we are out of jobs or no job is ready to run.\n\nArgs:\nmax_nlaunch: Maximum number of launches. default: no limit.\nmax_loops: Maximum number of loops\nsleep_time: seconds to sleep between rapidfire loop iterations\n\nReturns:\nThe number of tasks launched.", "source": "juraj-google-style"}
{"code": "def create_sas_locator(access_token, asset_id, accesspolicy_id):\n    \n    path = '/Locators'\n    endpoint = ''.join([ams_rest_endpoint, path])\n    body = '{ \\\n\t\t\"AccessPolicyId\":\"' + accesspolicy_id + '\", \\\n\t\t\"AssetId\":\"' + asset_id + '\", \\\n\t\t\"Type\":1 \\\n\t}'\n    return do_ams_post(endpoint, path, body, access_token)", "docstring": "Create Media Service SAS Locator.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nasset_id (str): Media Service Asset ID.\naccesspolicy_id (str): Media Service Access Policy ID.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def max_intensity(item_a, time_a, item_b, time_b, max_value):\n    intensity_a = item_a.max_intensity(time_a)\n    intensity_b = item_b.max_intensity(time_b)\n    diff = np.sqrt(((intensity_a - intensity_b) ** 2))\n    return (np.minimum(diff, max_value) / float(max_value))", "docstring": "RMS difference in maximum intensity\n\nArgs:\nitem_a: STObject from the first set in ObjectMatcher\ntime_a: Time integer being evaluated\nitem_b: STObject from the second set in ObjectMatcher\ntime_b: Time integer being evaluated\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "codesearchnet"}
{"code": "def SignMessage(self, message, script_hash):\n    keypair = self.GetKeyByScriptHash(script_hash)\n    prikey = bytes(keypair.PrivateKey)\n    res = Crypto.Default().Sign(message, prikey)\n    return (res, keypair.PublicKey)", "docstring": "Sign a message with a specified script_hash.\n\nArgs:\nmessage (str): a hex encoded message to sign\nscript_hash (UInt160): a bytearray (len 20).\n\nReturns:\nstr: the signed message", "source": "codesearchnet"}
{"code": "def from_backbone_configs(cls, backbone_config: PretrainedConfig, **kwargs):\n    return cls(backbone_config=backbone_config, **kwargs)", "docstring": "Instantiate a [`RTDetrConfig`] (or a derived class) from a pre-trained backbone model configuration and DETR model\nconfiguration.\n\nArgs:\nbackbone_config ([`PretrainedConfig`]):\nThe backbone configuration.\n\nReturns:\n[`RTDetrConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "async def send_rpc(self, conn_id, address, rpc_id, payload, timeout):\n        \n\n        self._ensure_connection(conn_id, True)\n        dev = self._get_property(conn_id, 'device')\n\n        try:\n            res = dev.call_rpc(address, rpc_id, bytes(payload))\n            if inspect.iscoroutine(res):\n                return await res\n            else:\n                return res\n        except (RPCInvalidIDError, RPCNotFoundError, TileNotFoundError, RPCErrorCode, BusyRPCResponse):\n            raise\n        except Exception:\n            self._logger.exception(\"Exception inside rpc %d:0x%04X, payload=%s\",\n                                   address, rpc_id, payload)\n            raise", "docstring": "Asynchronously send an RPC to this IOTile device\n\nArgs:\nconn_id (int): A unique identifier that will refer to this connection\naddress (int): the address of the tile that we wish to send the RPC to\nrpc_id (int): the 16-bit id of the RPC we want to call\npayload (bytearray): the payload of the command\ntimeout (float): the number of seconds to wait for the RPC to execute", "source": "juraj-google-style"}
{"code": "def _load_dataset_clipping(self, dataset_dir, epsilon):\n    self.dataset_max_clip = {}\n    self.dataset_min_clip = {}\n    self._dataset_image_count = 0\n    for fname in os.listdir(dataset_dir):\n        if (not fname.endswith('.png')):\n            continue\n        image_id = fname[:(- 4)]\n        image = np.array(Image.open(os.path.join(dataset_dir, fname)).convert('RGB'))\n        image = image.astype('int32')\n        self._dataset_image_count += 1\n        self.dataset_max_clip[image_id] = np.clip((image + epsilon), 0, 255).astype('uint8')\n        self.dataset_min_clip[image_id] = np.clip((image - epsilon), 0, 255).astype('uint8')", "docstring": "Helper method which loads dataset and determines clipping range.\n\nArgs:\ndataset_dir: location of the dataset.\nepsilon: maximum allowed size of adversarial perturbation.", "source": "codesearchnet"}
{"code": "def read(self, key, array=False, embedded=True):\n    self.tcex.log.debug('read variable {}'.format(key))\n    data = key\n    if (key is not None):\n        key = key.strip()\n        key_type = self.variable_type(key)\n        if re.match(self._variable_match, key):\n            if (key_type in self.read_data_types):\n                if (key_type in ['Binary', 'BinaryArray']):\n                    data = self.read_data_types[key_type](key)\n                else:\n                    data = self.read_data_types[key_type](key, embedded)\n            else:\n                data = self.read_raw(key)\n        else:\n            if (key_type == 'String'):\n                data = re.sub('(?<!\\\\\\\\)\\\\\\\\s', ' ', data)\n                data = re.sub('\\\\\\\\\\\\\\\\s', '\\\\s', data)\n            if embedded:\n                data = self.read_embedded(data, key_type)\n    if (array and (not isinstance(data, list))):\n        if (data is not None):\n            data = [data]\n        else:\n            data = []\n    return data", "docstring": "Read method of CRUD operation for working with KeyValue DB.\n\nThis method will automatically check to see if a single variable is passed\nor if \"mixed\" data is passed and return the results from the DB. It will also\nautomatically determine the variable type to read.\n\nArgs:\nkey (string): The variable to read from the DB.\narray (boolean): Convert string/dict to Array/List before returning.\nembedded (boolean): Resolve embedded variables.\n\nReturns:\n(any): Results retrieved from DB", "source": "codesearchnet"}
{"code": "def make_encoder(activation, latent_size, base_depth):\n  \n  conv = functools.partial(\n      tf.keras.layers.Conv2D, padding=\"SAME\", activation=activation)\n\n  encoder_net = tf.keras.Sequential([\n      conv(base_depth, 5, 1),\n      conv(base_depth, 5, 2),\n      conv(2 * base_depth, 5, 1),\n      conv(2 * base_depth, 5, 2),\n      conv(4 * latent_size, 7, padding=\"VALID\"),\n      tf.keras.layers.Flatten(),\n      tf.keras.layers.Dense(2 * latent_size, activation=None),\n  ])\n\n  def encoder(images):\n    images = 2 * tf.cast(images, dtype=tf.float32) - 1\n    net = encoder_net(images)\n    return tfd.MultivariateNormalDiag(\n        loc=net[..., :latent_size],\n        scale_diag=tf.nn.softplus(net[..., latent_size:] +\n                                  _softplus_inverse(1.0)),\n        name=\"code\")\n\n  return encoder", "docstring": "Creates the encoder function.\n\nArgs:\nactivation: Activation function in hidden layers.\nlatent_size: The dimensionality of the encoding.\nbase_depth: The lowest depth for a layer.\n\nReturns:\nencoder: A `callable` mapping a `Tensor` of images to a\n`tfd.Distribution` instance over encodings.", "source": "juraj-google-style"}
{"code": "def __init__(self, workdir, prefix):\n        \n\n        self._workdir = workdir\n        self._prefix = prefix\n        \n        self._pprefix = SDKWrapper(weakref.proxy(self._prefix))", "docstring": "__init__\n\nArgs:\nworkdir(:class:`~lago.workdir.Workdir`): The enviornment\nworkdir.\nprefix(:class:~lago.prefix.Prefix): The enviornment Prefix.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def choices_validator(choices):\n    \n    def validator(value):\n        if value not in choices:\n            \n            raise ValidationError(\n                \"{} is not in {}\".format(value, list(choices))\n            )\n\n    return validator", "docstring": "Return validator function that will check if ``value in choices``.\n\nArgs:\nmax_value (list, set, tuple): allowed choices for new validator", "source": "juraj-google-style"}
{"code": "def get_samples_live(self, sensor_id, last=None):\n    \n    url = \"https:\n\n    headers = self.__gen_headers()\n    headers[\"Content-Type\"] = \"application/json\"\n\n    params = { \"sensorId\": sensor_id }\n    if last:\n      params[\"last\"] = last\n    url = self.__append_url_params(url, params)\n\n    r = requests.get(url, headers=headers)\n    return r.json()", "docstring": "Get recent samples, one sample per second for up to the last 2 minutes.\n\nArgs:\nsensor_id (string): hexadecimal id of the sensor to query, e.g.\n``0x0013A20040B65FAD``\nlast (string): starting range, as ISO8601 timestamp\n\nReturns:\nlist: dictionary objects containing sample data", "source": "juraj-google-style"}
{"code": "def copy(self, source_file_names, destination_file_names):\n    err_msg = 'source_file_names and destination_file_names should be equal in length'\n    assert len(source_file_names) == len(destination_file_names), err_msg\n\n    def _copy_path(source, destination):\n        \n        if not destination.startswith(GCSFileSystem.GCS_PREFIX):\n            raise ValueError('Destination %r must be GCS path.' % destination)\n        if source.endswith('/'):\n            self._gcsIO().copytree(source, destination)\n        else:\n            self._gcsIO().copy(source, destination)\n    exceptions = {}\n    for source, destination in zip(source_file_names, destination_file_names):\n        try:\n            _copy_path(source, destination)\n        except Exception as e:\n            exceptions[source, destination] = e\n    if exceptions:\n        raise BeamIOError('Copy operation failed', exceptions)", "docstring": "Recursively copy the file tree from the source to the destination\n\nArgs:\nsource_file_names: list of source file objects that needs to be copied\ndestination_file_names: list of destination of the new object\n\nRaises:\n``BeamIOError``: if any of the copy operations fail", "source": "github-repos"}
{"code": "def _list_node_dumps(self, node_name):\n    lines = []\n    font_attr_segs = {}\n    watch_keys = self._debug_dump.debug_watch_keys(node_name)\n    dump_count = 0\n    for watch_key in watch_keys:\n        debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)\n        for datum in debug_tensor_data:\n            line = '  Slot %d @ %s @ %.3f ms' % (datum.output_slot, datum.debug_op, (datum.timestamp - self._debug_dump.t0) / 1000.0)\n            lines.append(line)\n            command = 'pt %s:%d -n %d' % (node_name, datum.output_slot, dump_count)\n            font_attr_segs[len(lines) - 1] = [(2, len(line), debugger_cli_common.MenuItem(None, command))]\n            dump_count += 1\n    output = debugger_cli_common.RichTextLines(lines, font_attr_segs=font_attr_segs)\n    output_with_header = debugger_cli_common.RichTextLines(['%d dumped tensor(s):' % dump_count, ''])\n    output_with_header.extend(output)\n    return output_with_header", "docstring": "List dumped tensor data from a node.\n\nArgs:\nnode_name: Name of the node of which the attributes are to be listed.\n\nReturns:\nA RichTextLines object.", "source": "github-repos"}
{"code": "def add_string_pairs_from_text_field_element(xib_file, results, text_field, special_ui_components_prefix):\n    text_field_entry_comment = extract_element_internationalized_comment(text_field)\n    if (text_field_entry_comment is None):\n        return\n    if (text_field.hasAttribute('usesAttributedText') and (text_field.attributes['usesAttributedText'].value == 'YES')):\n        add_string_pairs_from_attributed_ui_element(results, text_field, text_field_entry_comment)\n    else:\n        try:\n            text_field_entry_key = text_field.attributes['text'].value\n            results.append((text_field_entry_key, (text_field_entry_comment + ' default text value')))\n        except KeyError:\n            pass\n    try:\n        text_field_entry_key = text_field.attributes['placeholder'].value\n        results.append((text_field_entry_key, (text_field_entry_comment + ' placeholder text value')))\n    except KeyError:\n        pass\n    warn_if_element_not_of_class(text_field, 'TextField', special_ui_components_prefix)", "docstring": "Adds string pairs from a textfield element.\n\nArgs:\nxib_file (str): Path to the xib file.\nresults (list): The list to add the results to.\ntext_field(element): The textfield element from the xib, to extract the string pairs from.\nspecial_ui_components_prefix (str):\nIf not None, extraction will not warn about internationalized UI components with this class prefix.", "source": "codesearchnet"}
{"code": "def draw_text(img, text, position=(10, 10), font='FreeSans.ttf', font_size=14, color=(0, 0, 0)):\n    _check_pil()\n    font_files = _find_font_file(font)\n    if (len(font_files) == 0):\n        logger.warn(\"Failed to lookup font '{}', falling back to default\".format(font))\n        font = ImageFont.load_default()\n    else:\n        font = ImageFont.truetype(font_files[0], font_size)\n    img = Image.fromarray(img)\n    draw = ImageDraw.Draw(img)\n    draw.text(position, text, fill=color, font=font)\n    return np.asarray(img)", "docstring": "Draws text over the image. Requires PIL.\n\nArgs:\nimg: The image to use.\ntext: The text string to overlay.\nposition: The text (x, y) position. (Default value = (10, 10))\nfont: The ttf or open type font to use. (Default value = 'FreeSans.ttf')\nfont_size: The text font size. (Default value = 12)\ncolor: The (r, g, b) values for text color. (Default value = (0, 0, 0))\n\nReturns: Image overlayed with text.", "source": "codesearchnet"}
{"code": "def pymmh3_hash128(key: Union[bytes, bytearray],\n                   seed: int = 0,\n                   x64arch: bool = True) -> int:\n    \n    if x64arch:\n        return pymmh3_hash128_x64(key, seed)\n    else:\n        return pymmh3_hash128_x86(key, seed)", "docstring": "Implements 128bit murmur3 hash, as per ``pymmh3``.\n\nArgs:\nkey: data to hash\nseed: seed\nx64arch: is a 64-bit architecture available?\n\nReturns:\ninteger hash", "source": "juraj-google-style"}
{"code": "def save(self, sess, save_path, timestep=None):\n        \n\n        if self._saver is None:\n            raise TensorForceError(\"register_saver_ops should be called before save\")\n        return self._saver.save(\n            sess=sess,\n            save_path=save_path,\n            global_step=timestep,\n            write_meta_graph=False,\n            write_state=True,  \n        )", "docstring": "Saves this component's managed variables.\n\nArgs:\nsess: The session for which to save the managed variables.\nsave_path: The path to save data to.\ntimestep: Optional, the timestep to append to the file name.\n\nReturns:\nCheckpoint path where the model was saved.", "source": "juraj-google-style"}
{"code": "async def datacenters(self):\n    response = (await self._api.get('/v1/coordinate/datacenters'))\n    return {data['Datacenter']: data for data in response.body}", "docstring": "Queries for WAN coordinates of Consul servers\n\nReturns:\nMapping: WAN network coordinates for all Consul\nservers, organized by DCs.\n\nIt returns a body like this::\n\n{\n\"dc1\": {\n\"Datacenter\": \"dc1\",\n\"Coordinates\": [\n{\n\"Node\": \"agent-one\",\n\"Coord\": {\n\"Adjustment\": 0,\n\"Error\": 1.5,\n\"Height\": 0,\n\"Vec\": [0,0,0,0,0,0,0,0]\n}\n}\n]\n}\n}\n\nThis endpoint serves data out of the server's local Serf data about\nthe WAN, so its results may vary as requests are handled by different\nservers in the cluster.\n\nAlso, it does not support blocking queries or any consistency modes.", "source": "codesearchnet"}
{"code": "def try_evaluate_constant(tensor):\n    with tensor.graph._c_graph.get() as c_graph:\n        return c_api.TF_TryEvaluateConstant_wrapper(c_graph, tensor._as_tf_output())", "docstring": "Evaluates a symbolic tensor as a constant.\n\nArgs:\ntensor: a symbolic Tensor.\n\nReturns:\nndarray if the evaluation succeeds, or None if it fails.", "source": "github-repos"}
{"code": "def read(cls, data):\n        \n        if isinstance(data, pd.DataFrame):\n            return cls((json.loads(\n                to_json_stat(data, output='dict', version='2.0'),\n                object_pairs_hook=OrderedDict)))\n        elif isinstance(data, OrderedDict):\n            return cls(data)\n        elif (isinstance(data, basestring)\n              and data.startswith((\"http:\n                                   \"ftp:\n            \n            return cls(request(data))\n        elif isinstance(data, basestring):\n            try:\n                json_dict = json.loads(data, object_pairs_hook=OrderedDict)\n                return cls(json_dict)\n            except ValueError:\n                raise\n        else:\n            try:\n                json_dict = json.load(data, object_pairs_hook=OrderedDict)\n                return cls(json_dict)\n            except ValueError:\n                raise", "docstring": "Reads data from URL, Dataframe, JSON string, JSON file or\nOrderedDict.\nArgs:\ndata: can be a Pandas Dataframe, a JSON file, a JSON string,\nan OrderedDict or a URL pointing to a JSONstat file.\n\nReturns:\nAn object of class Dataset populated with data.", "source": "juraj-google-style"}
{"code": "def exists_evaluator(self, index):\n    attr_name = self.condition_data[index][0]\n    return (self.attributes.get(attr_name) is not None)", "docstring": "Evaluate the given exists match condition for the user attributes.\n\nArgs:\nindex: Index of the condition to be evaluated.\n\nReturns:\nBoolean: True if the user attributes have a non-null value for the given condition,\notherwise False.", "source": "codesearchnet"}
{"code": "def site_occupation_statistics( self ):\n        \n        if self.time == 0.0:\n            return None\n        occupation_stats = { label : 0.0 for label in self.site_labels }\n        for site in self.sites:\n            occupation_stats[ site.label ] += site.time_occupied\n        for label in self.site_labels:\n            occupation_stats[ label ] /= self.time\n        return occupation_stats", "docstring": "Average site occupation for each site type\n\nArgs:\nNone\n\nReturns:\n(Dict(Str:Float)): Dictionary of occupation statistics, e.g.::\n\n{ 'A' : 2.5, 'B' : 25.3 }", "source": "juraj-google-style"}
{"code": "def get_flat_tensor_shapes(element_spec):\n    return [spec.shape for spec in get_flat_tensor_specs(element_spec)]", "docstring": "Returns a list `tf.TensorShapes`s for the element tensor representation.\n\nArgs:\nelement_spec: A nested structure of `tf.TypeSpec` objects representing to\nelement type specification.\n\nReturns:\nA list `tf.TensorShapes`s for the element tensor representation.", "source": "github-repos"}
{"code": "def get_execution_info(self, driver_id, function_descriptor):\n    if self._worker.load_code_from_local:\n        driver_id = ray.DriverID.nil()\n        if (not function_descriptor.is_actor_method()):\n            self._load_function_from_local(driver_id, function_descriptor)\n    else:\n        with profiling.profile('wait_for_function'):\n            self._wait_for_function(function_descriptor, driver_id)\n    try:\n        function_id = function_descriptor.function_id\n        info = self._function_execution_info[driver_id][function_id]\n    except KeyError as e:\n        message = ('Error occurs in get_execution_info: driver_id: %s, function_descriptor: %s. Message: %s' % (driver_id, function_descriptor, e))\n        raise KeyError(message)\n    return info", "docstring": "Get the FunctionExecutionInfo of a remote function.\n\nArgs:\ndriver_id: ID of the driver that the function belongs to.\nfunction_descriptor: The FunctionDescriptor of the function to get.\n\nReturns:\nA FunctionExecutionInfo object.", "source": "codesearchnet"}
{"code": "def l1_regression_loss(y, target, name=None):\n  \n  with tf.name_scope(name, 'l1_regression', [y, target]) as scope:\n    y = tf.convert_to_tensor(y, name='y')\n    target = tf.convert_to_tensor(target, name='target')\n    return reduce_batch_sum(tf.abs(y - target), name=scope)", "docstring": "Calculates the sum of absolute errors between y and target.\n\nArgs:\ny: the calculated values.\ntarget: the desired values.\nname: the name for this op, defaults to l1_regression\nReturns:\nA tensorflow op.", "source": "juraj-google-style"}
{"code": "def _BuildFindSpecsFromGroupName(self, group_name, environment_variables):\n    \n    definition = self._artifacts_registry.GetDefinitionByName(group_name)\n    if not definition:\n      return None\n\n    return self._BuildFindSpecsFromArtifact(definition, environment_variables)", "docstring": "Builds find specifications from a artifact group name.\n\nArgs:\ngroup_name (str): artifact group name.\nenvironment_variables (list[str]): environment variable attributes used to\ndynamically populate environment variables in file and registry\nartifacts.\n\nReturns:\nlist[dfwinreg.FindSpec|dfvfs.FindSpec]: find specifications or None if no\nartifact with the given name can be retrieved.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.GetModel = channel.unary_unary(\n            \"/google.cloud.bigquery.v2.ModelService/GetModel\",\n            request_serializer=google_dot_cloud_dot_bigquery__v2_dot_proto_dot_model__pb2.GetModelRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_bigquery__v2_dot_proto_dot_model__pb2.Model.FromString,\n        )\n        self.ListModels = channel.unary_unary(\n            \"/google.cloud.bigquery.v2.ModelService/ListModels\",\n            request_serializer=google_dot_cloud_dot_bigquery__v2_dot_proto_dot_model__pb2.ListModelsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_bigquery__v2_dot_proto_dot_model__pb2.ListModelsResponse.FromString,\n        )\n        self.PatchModel = channel.unary_unary(\n            \"/google.cloud.bigquery.v2.ModelService/PatchModel\",\n            request_serializer=google_dot_cloud_dot_bigquery__v2_dot_proto_dot_model__pb2.PatchModelRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_bigquery__v2_dot_proto_dot_model__pb2.Model.FromString,\n        )\n        self.DeleteModel = channel.unary_unary(\n            \"/google.cloud.bigquery.v2.ModelService/DeleteModel\",\n            request_serializer=google_dot_cloud_dot_bigquery__v2_dot_proto_dot_model__pb2.DeleteModelRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def _extract_defaults(self, defaults_var: 'cfg.Variable') -> 'tuple[cfg.Variable, ...] | None':\n    if all((isinstance(d, _instances.Tuple) for d in defaults_var.data)):\n        return max((d.pyval for d in defaults_var.data), key=len)\n    else:\n        if not (all((isinstance(d, (_instance_base.Instance, _singletons.Unknown, _singletons.Unsolvable)) for d in defaults_var.data)) and all((d.full_name == 'builtins.tuple' for d in defaults_var.data if isinstance(d, _instance_base.Instance)))):\n            self.ctx.errorlog.bad_function_defaults(self.ctx.vm.frames, self.name)\n        return None", "docstring": "Extracts defaults from a Variable, used by set_function_defaults.\n\nArgs:\ndefaults_var: Variable containing potential default values.\n\nReturns:\nA tuple of default values, if one could be extracted, or None otherwise.", "source": "github-repos"}
{"code": "def convert_seeded_answers(answers):\n    \n    converted = {}\n    for index, answer in enumerate(answers):\n        converted.setdefault(answer['answer'], {})\n        converted[answer['answer']]['seeded' + str(index)] = answer['rationale']\n\n    return converted", "docstring": "Convert seeded answers into the format that can be merged into student answers.\n\nArgs:\nanswers (list): seeded answers\n\nReturns:\ndict: seeded answers with student answers format:\n{\n0: {\n'seeded0': 'rationaleA'\n}\n1: {\n'seeded1': 'rationaleB'\n}\n}", "source": "juraj-google-style"}
{"code": "def parse_datetime(__string: str) -> datetime.datetime:\n    \n    if not __string:\n        datetime_ = datetime.datetime.now(datetime.timezone.utc)\n    else:\n        \n        datetime_ = ciso8601.parse_datetime(__string)\n    if datetime_.tzinfo is None:\n        datetime_ = datetime_.replace(tzinfo=datetime.timezone.utc)\n    return datetime_", "docstring": "Parse ISO-8601 datetime string.\n\nArgs:\n__string: Datetime string to parse\nReturns:\nParsed datetime object", "source": "juraj-google-style"}
{"code": "class Multimodal2VisionEncoder(nn.Module):\n\n    def __init__(self, config):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([Multimodal2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    @can_return_tuple\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutput:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for idx, encoder_layer in enumerate(self.layers):\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions)\n            else:\n                layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`Multimodal2VisionEncoderLayer`].\n\nArgs:\nconfig: Multimodal2VisionConfig", "source": "github-repos"}
{"code": "def claim(self, file_readers):\n        \n        unclaimed_readers = []\n        vcf_readers = []\n        for file_reader in file_readers:\n            if self._is_mutect_vcf(file_reader):\n                vcf_reader = vcf.VcfReader(file_reader)\n                vcf_readers.append(_MutectVcfReader(vcf_reader))\n            else:\n                unclaimed_readers.append(file_reader)\n        return (unclaimed_readers, vcf_readers)", "docstring": "Recognizes and claims MuTect VCFs form the set of all input VCFs.\n\nEach defined caller has a chance to evaluate and claim all the incoming\nfiles as something that it can process.\n\nArgs:\nfile_readers: the collection of currently unclaimed files\n\nReturns:\nA tuple of unclaimed readers and MuTectVcfReaders.", "source": "juraj-google-style"}
{"code": "def add_graph(self, y, x_label=None, y_label='', title='', x_run=None, y_run=None, svg_size_px=None, key_position='bottom right'):\n    if (x_run is None):\n        x_run = self.default_x_run\n    if (y_run is None):\n        y_run = self.default_y_run\n    if (svg_size_px is None):\n        svg_size_px = self.default_svg_size_px\n    for panel in self.panels:\n        x_run = self._load_x_run(x_run)\n        y_run = self._load_y_run(y_run)\n        svg_size_px = self._load_svg_size_px(svg_size_px)\n        panel.add_graph(y=y, x_run=x_run, y_run=y_run, svg_size_px=svg_size_px, y_label=y_label, x_label=(x_label if (x_label is not None) else self.default_x_label), title=title, key_position=key_position)", "docstring": "Add a new graph to the overlap report.\n\nArgs:\ny (str): Value plotted on y-axis.\nx_label (str): Label on x-axis.\ny_label (str): Label on y-axis.\ntitle (str): Title of the plot.\nx_run ((float,float)): x-range.\ny_run ((int,int)): y-rang.\nsvg_size_px ((int,int): Size of SVG image in pixels.\nkey_position (str): GnuPlot position of the legend.", "source": "codesearchnet"}
{"code": "def get_nltk_builder(languages):\n    all_stemmers = []\n    all_stopwords_filters = []\n    all_word_characters = set()\n    for language in languages:\n        if (language == 'en'):\n            all_stemmers.append(lunr.stemmer.stemmer)\n            all_stopwords_filters.append(stop_word_filter)\n            all_word_characters.update({'\\\\w'})\n        else:\n            (stopwords, word_characters) = _get_stopwords_and_word_characters(language)\n            all_stemmers.append(Pipeline.registered_functions['stemmer-{}'.format(language)])\n            all_stopwords_filters.append(generate_stop_word_filter(stopwords, language=language))\n            all_word_characters.update(word_characters)\n    builder = Builder()\n    multi_trimmer = generate_trimmer(''.join(sorted(all_word_characters)))\n    Pipeline.register_function(multi_trimmer, 'lunr-multi-trimmer-{}'.format('-'.join(languages)))\n    builder.pipeline.reset()\n    for fn in chain([multi_trimmer], all_stopwords_filters, all_stemmers):\n        builder.pipeline.add(fn)\n    for fn in all_stemmers:\n        builder.search_pipeline.add(fn)\n    return builder", "docstring": "Returns a builder with stemmers for all languages added to it.\n\nArgs:\nlanguages (list): A list of supported languages.", "source": "codesearchnet"}
{"code": "def get_structures(self, chemsys_formula_id, final=True):\n        \n        prop = \"final_structure\" if final else \"initial_structure\"\n        data = self.get_data(chemsys_formula_id, prop=prop)\n        return [d[prop] for d in data]", "docstring": "Get a list of Structures corresponding to a chemical system, formula,\nor materials_id.\n\nArgs:\nchemsys_formula_id (str): A chemical system (e.g., Li-Fe-O),\nor formula (e.g., Fe2O3) or materials_id (e.g., mp-1234).\nfinal (bool): Whether to get the final structure, or the initial\n(pre-relaxation) structure. Defaults to True.\n\nReturns:\nList of Structure objects.", "source": "juraj-google-style"}
{"code": "class JetMoeMoE(nn.Module):\n\n    def __init__(self, config: JetMoeConfig):\n        super(JetMoeMoE, self).__init__()\n        self.input_size = config.hidden_size\n        self.hidden_size = config.intermediate_size\n        self.activation = ACT2FN[config.activation_function]\n        self.bias = torch.nn.Parameter(torch.empty(self.input_size))\n        self.input_linear = JetMoeParallelExperts(config.num_local_experts, self.input_size, self.hidden_size * 2)\n        self.output_linear = JetMoeParallelExperts(config.num_local_experts, self.hidden_size, self.input_size)\n        self.router = JetMoeTopKGating(input_size=self.input_size, num_experts=config.num_local_experts, top_k=config.num_experts_per_tok)\n\n    def forward(self, layer_input):\n        \n        bsz, length, emb_size = layer_input.size()\n        layer_input = layer_input.reshape(-1, emb_size)\n        _, batch_index, batch_gates, expert_size, router_logits = self.router(layer_input)\n        expert_inputs = layer_input[batch_index]\n        hidden_states = self.input_linear(expert_inputs, expert_size)\n        chunked_hidden_states = hidden_states.chunk(2, dim=-1)\n        hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1]\n        expert_outputs = self.output_linear(hidden_states, expert_size)\n        expert_outputs = expert_outputs * batch_gates[:, None]\n        zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device)\n        layer_output = zeros.index_add(0, batch_index, expert_outputs)\n        layer_output = layer_output.view(bsz, length, self.input_size)\n        layer_output = layer_output + self.bias\n        return (layer_output, router_logits)", "docstring": "A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.\n\nArgs:\nconfig:\nConfiguration object with model hyperparameters.", "source": "github-repos"}
{"code": "def get_outputs_filtered(self, owner, spent=None):\n    outputs = self.fastquery.get_outputs_by_public_key(owner)\n    if (spent is None):\n        return outputs\n    elif (spent is True):\n        return self.fastquery.filter_unspent_outputs(outputs)\n    elif (spent is False):\n        return self.fastquery.filter_spent_outputs(outputs)", "docstring": "Get a list of output links filtered on some criteria\n\nArgs:\nowner (str): base58 encoded public_key.\nspent (bool): If ``True`` return only the spent outputs. If\n``False`` return only unspent outputs. If spent is\nnot specified (``None``) return all outputs.\n\nReturns:\n:obj:`list` of TransactionLink: list of ``txid`` s and ``output`` s\npointing to another transaction's condition", "source": "codesearchnet"}
{"code": "def _ensure_proper_types(struct, encoding, force_types):\n    if (force_types is None):\n        return struct\n    res = None\n    if isinstance(struct, (dict, collections.OrderedDict)):\n        res = type(struct)()\n        for (k, v) in struct.items():\n            res[_ensure_proper_types(k, encoding, force_types)] = _ensure_proper_types(v, encoding, force_types)\n    elif isinstance(struct, list):\n        res = []\n        for i in struct:\n            res.append(_ensure_proper_types(i, encoding, force_types))\n    elif isinstance(struct, six.binary_type):\n        res = struct.decode(encoding)\n    elif isinstance(struct, (six.text_type, type(None), type(True), six.integer_types, float)):\n        res = struct\n    elif isinstance(struct, datetime.datetime):\n        res = struct\n    else:\n        raise AnyMarkupError('internal error - unexpected type {0} in parsed markup'.format(type(struct)))\n    if (force_types and isinstance(res, six.text_type)):\n        res = _recognize_basic_types(res)\n    elif (not (force_types or isinstance(res, (dict, collections.OrderedDict, list, six.text_type)))):\n        res = six.text_type(res)\n    return res", "docstring": "A convenience function that recursively makes sure the given structure\ncontains proper types according to value of `force_types`.\n\nArgs:\nstruct: a structure to check and fix\nencoding: encoding to use on found bytestrings\nforce_types:\nif `True`, integers, floats, booleans and none/null\nare recognized and returned as proper types instead of strings;\nif `False`, everything is converted to strings\nif `None`, unmodified `struct` is returned\nReturns:\na fully decoded copy of given structure", "source": "codesearchnet"}
{"code": "def combine_samples(self, md5_list, filename, type_tag):\n        \n        total_bytes = \"\"\n        for md5 in md5_list:\n            total_bytes += self.get_sample(md5)['sample']['raw_bytes']\n            self.remove_sample(md5)\n\n        \n        return self.store_sample(total_bytes, filename, type_tag)", "docstring": "Combine samples together. This may have various use cases the most significant\ninvolving a bunch of sample 'chunks' got uploaded and now we combine them together\n\nArgs:\nmd5_list: The list of md5s to combine, order matters!\nfilename: name of the file (used purely as meta data not for lookup)\ntype_tag: ('exe','pcap','pdf','json','swf', or ...)\nReturns:\nthe computed md5 of the combined samples", "source": "juraj-google-style"}
{"code": "def pprint_value_string(self, value):\n        \n        unit = '' if self.unit is None else ' ' + bytes_to_unicode(self.unit)\n        value = self.pprint_value(value)\n        return title_format.format(name=bytes_to_unicode(self.label), val=value, unit=unit)", "docstring": "Pretty print the dimension value and unit.\n\nArgs:\nvalue: Dimension value to format\n\nReturns:\nFormatted dimension value string with unit", "source": "juraj-google-style"}
{"code": "def layer_preprocess(layer_input, hparams, layer_collection=None):\n    assert ('a' not in hparams.layer_preprocess_sequence), 'No residual connections allowed in hparams.layer_preprocess_sequence'\n    assert ('z' not in hparams.layer_preprocess_sequence), 'No residual connections allowed in hparams.layer_preprocess_sequence'\n    return layer_prepostprocess(None, layer_input, sequence=hparams.layer_preprocess_sequence, dropout_rate=hparams.layer_prepostprocess_dropout, norm_type=hparams.norm_type, depth=None, epsilon=hparams.norm_epsilon, dropout_broadcast_dims=comma_separated_string_to_integer_list(getattr(hparams, 'layer_prepostprocess_dropout_broadcast_dims', '')), default_name='layer_prepostprocess', layer_collection=layer_collection)", "docstring": "Apply layer preprocessing.\n\nSee layer_prepostprocess() for details.\n\nA hyperparameters object is passed for convenience.  The hyperparameters\nthat may be used are:\n\nlayer_preprocess_sequence\nlayer_prepostprocess_dropout\nnorm_type\nhidden_size\nnorm_epsilon\n\nArgs:\nlayer_input: a Tensor\nhparams: a hyperparameters object.\nlayer_collection: A tensorflow_kfac.LayerCollection. Only used by the\nKFAC optimizer. Default is None.\n\nReturns:\na Tensor", "source": "codesearchnet"}
{"code": "def os_volumes(self):\n    if (not self.__os_volumes):\n        self.__os_volumes = OsVolumes(self.__connection)\n    return self.__os_volumes", "docstring": "Gets the OS Volumes API client.\n\nReturns:\nOsVolumes:", "source": "codesearchnet"}
{"code": "def _finish_parsing(self, instrumentation_block):\n    formatter = _InstrumentationBlockFormatter(instrumentation_block)\n    return formatter.has_completed_result_block_format(self.DEFAULT_INSTRUMENTATION_ERROR_MESSAGE)", "docstring": "Finishes parsing the instrumentation result block for the final\ninstrumentation run status.\n\nArgs:\ninstrumentation_block: _InstrumentationBlock, the instrumentation\nresult block for the instrumenation run. Potentially, thisi\ncould actually be method block if the instrumentation outputi\nis malformed.\n\nReturns:\nA boolean indicating whether the instrumentation run completed\nwith all the tests passing.\n\nRaises:\nsignals.TestError: Error raised if the instrumentation failed to\ncomplete with either a pass or fail status.", "source": "github-repos"}
{"code": "def delete_container_instance_group(access_token, subscription_id, resource_group,\n                                    container_group_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourcegroups/', resource_group,\n                        '/providers/Microsoft.ContainerInstance/ContainerGroups/',\n                        container_group_name,\n                        '?api-version=', CONTAINER_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete a container group from a resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\ncontainer_group_name (str): Name of container instance group.\n\nReturns:\nHTTP response.", "source": "juraj-google-style"}
{"code": "def _GetSignatureMatchParserNames(self, file_object):\n    \n    parser_names = []\n    scan_state = pysigscan.scan_state()\n    self._file_scanner.scan_file_object(scan_state, file_object)\n\n    for scan_result in iter(scan_state.scan_results):\n      format_specification = (\n          self._formats_with_signatures.GetSpecificationBySignature(\n              scan_result.identifier))\n\n      if format_specification.identifier not in parser_names:\n        parser_names.append(format_specification.identifier)\n\n    return parser_names", "docstring": "Determines if a file-like object matches one of the known signatures.\n\nArgs:\nfile_object (file): file-like object whose contents will be checked\nfor known signatures.\n\nReturns:\nlist[str]: parser names for which the contents of the file-like object\nmatches their known signatures.", "source": "juraj-google-style"}
{"code": "def _call_api(self, method, params=None):\n    url = self.url.format(method=method)\n    if (not params):\n        params = {'token': self.token}\n    else:\n        params['token'] = self.token\n    logger.debug('Send request to %s', url)\n    response = requests.get(url, params=params).json()\n    if self.verify:\n        if (not response['ok']):\n            msg = 'For {url} API returned this bad response {response}'\n            raise Exception(msg.format(url=url, response=response))\n    return response", "docstring": "Low-level method to call the Slack API.\n\nArgs:\nmethod: {str} method name to call\nparams: {dict} GET parameters\nThe token will always be added", "source": "codesearchnet"}
{"code": "def GetUnicodeString(value):\n  \n  if isinstance(value, list):\n    value = [GetUnicodeString(item) for item in value]\n    return ''.join(value)\n\n  if isinstance(value, py2to3.INTEGER_TYPES):\n    value = '{0:d}'.format(value)\n\n  if not isinstance(value, py2to3.UNICODE_TYPE):\n    return codecs.decode(value, 'utf8', 'ignore')\n  return value", "docstring": "Attempts to convert the argument to a Unicode string.\n\nArgs:\nvalue (list|int|bytes|str): value to convert.\n\nReturns:\nstr: string representation of the argument.", "source": "juraj-google-style"}
{"code": "def create_alias(alias_name, alias_command):\n    \n    alias_name, alias_command = alias_name.strip(), alias_command.strip()\n    alias_table = get_alias_table()\n    if alias_name not in alias_table.sections():\n        alias_table.add_section(alias_name)\n\n    alias_table.set(alias_name, 'command', alias_command)\n    _commit_change(alias_table)", "docstring": "Create an alias.\n\nArgs:\nalias_name: The name of the alias.\nalias_command: The command that the alias points to.", "source": "juraj-google-style"}
{"code": "def lint_fileset(*dirnames, **kwargs):\n    try:\n        rc_filename = kwargs['rc_filename']\n        description = kwargs['description']\n        if (len(kwargs) != 2):\n            raise KeyError\n    except KeyError:\n        raise KeyError(_LINT_FILESET_MSG)\n    pylint_shell_command = ['pylint', '--rcfile', rc_filename]\n    pylint_shell_command.extend(dirnames)\n    status_code = subprocess.call(pylint_shell_command)\n    if (status_code != 0):\n        error_message = _ERROR_TEMPLATE.format(description, status_code)\n        print(error_message, file=sys.stderr)\n        sys.exit(status_code)", "docstring": "Lints a group of files using a given rcfile.\n\nKeyword arguments are\n\n* ``rc_filename`` (``str``): The name of the Pylint config RC file.\n* ``description`` (``str``): A description of the files and configuration\ncurrently being run.\n\nArgs:\ndirnames (tuple): Directories to run Pylint in.\nkwargs: The keyword arguments. The only keyword arguments\nare ``rc_filename`` and ``description`` and both\nare required.\n\nRaises:\nKeyError: If the wrong keyword arguments are used.", "source": "codesearchnet"}
{"code": "def get_plugin(cls, name: str) -> Type[ConnectionPlugin]:\n        \n        if name not in cls.available:\n            raise ConnectionPluginNotRegistered(\n                f\"Connection {name!r} is not registered\"\n            )\n        return cls.available[name]", "docstring": "Fetches the connection plugin by name if already registered\n\nArgs:\nname: name of the connection plugin\n\nRaises:\n:obj:`nornir.core.exceptions.ConnectionPluginNotRegistered`", "source": "juraj-google-style"}
{"code": "def descriptors(package):\n    \n    from os import path\n    dpath = _descriptor_path(package)\n    if path.isfile(dpath):\n        import json\n        with open(dpath) as f:\n            jdb = json.load(f)\n        return jdb\n    else:\n        return None", "docstring": "Returns a dictionary of descriptors deserialized from JSON for the\nspecified package.\n\nArgs:\npackage (str): name of the python package to get settings for.", "source": "juraj-google-style"}
{"code": "def import_demonstrations(self, demonstrations):\n    if isinstance(demonstrations, dict):\n        if self.unique_state:\n            demonstrations['states'] = dict(state=demonstrations['states'])\n        if self.unique_action:\n            demonstrations['actions'] = dict(action=demonstrations['actions'])\n        self.model.import_demo_experience(**demonstrations)\n    else:\n        if self.unique_state:\n            states = dict(state=list())\n        else:\n            states = {name: list() for name in demonstrations[0]['states']}\n        internals = {name: list() for name in demonstrations[0]['internals']}\n        if self.unique_action:\n            actions = dict(action=list())\n        else:\n            actions = {name: list() for name in demonstrations[0]['actions']}\n        terminal = list()\n        reward = list()\n        for demonstration in demonstrations:\n            if self.unique_state:\n                states['state'].append(demonstration['states'])\n            else:\n                for (name, state) in states.items():\n                    state.append(demonstration['states'][name])\n            for (name, internal) in internals.items():\n                internal.append(demonstration['internals'][name])\n            if self.unique_action:\n                actions['action'].append(demonstration['actions'])\n            else:\n                for (name, action) in actions.items():\n                    action.append(demonstration['actions'][name])\n            terminal.append(demonstration['terminal'])\n            reward.append(demonstration['reward'])\n        self.model.import_demo_experience(states=states, internals=internals, actions=actions, terminal=terminal, reward=reward)", "docstring": "Imports demonstrations, i.e. expert observations. Note that for large numbers of observations,\nset_demonstrations is more appropriate, which directly sets memory contents to an array an expects\na different layout.\n\nArgs:\ndemonstrations: List of observation dicts", "source": "codesearchnet"}
{"code": "def _build_endpoint(self, endpoint_name):\n        \n        endpoint_relative = settings.get('asmaster_endpoints', endpoint_name)\n        return '%s%s' % (self.host, endpoint_relative)", "docstring": "Generate an enpoint url from a setting name.\n\nArgs:\nendpoint_name(str): setting name for the enpoint to build\n\nReturns:\n(str) url enpoint", "source": "juraj-google-style"}
{"code": "def ExpandWindowsUserEnvironmentVariables(data_string, knowledge_base, sid=None, username=None):\n    win_environ_regex = re.compile('%([^%]+?)%')\n    components = []\n    offset = 0\n    for match in win_environ_regex.finditer(data_string):\n        components.append(data_string[offset:match.start()])\n        kb_user = knowledge_base.GetUser(sid=sid, username=username)\n        kb_value = None\n        if kb_user:\n            kb_value = getattr(kb_user, match.group(1).lower(), None)\n        if (isinstance(kb_value, string_types) and kb_value):\n            components.append(kb_value)\n        else:\n            components.append(('%%%s%%' % match.group(1)))\n        offset = match.end()\n    components.append(data_string[offset:])\n    return ''.join(components)", "docstring": "r\"\"\"Take a string and expand windows user environment variables based.\n\nArgs:\ndata_string: A string, e.g. \"%TEMP%\\\\LogFiles\"\nknowledge_base: A knowledgebase object.\nsid: A Windows SID for a user to expand for.\nusername: A Windows user name to expand for.\n\nReturns:\nA string with available environment variables expanded.", "source": "codesearchnet"}
{"code": "def set_element_dt(self, el_name, dt, tz=None, el_idx=0):\n    dt = d1_common.date_time.cast_naive_datetime_to_tz(dt, tz)\n    self.get_element_by_name(el_name, el_idx).text = dt.isoformat()", "docstring": "Set the text of the selected element to an ISO8601 formatted datetime.\n\nArgs:\nel_name : str\nName of element to update.\n\ndt : datetime.datetime\nDate and time to set\n\ntz : datetime.tzinfo\nTimezone to set\n\n- Without a timezone, other contextual information is required in order to\ndetermine the exact represented time.\n- If dt has timezone: The ``tz`` parameter is ignored.\n- If dt is naive (without timezone): The timezone is set to ``tz``.\n- ``tz=None``: Prevent naive dt from being set to a timezone. Without a\ntimezone, other contextual information is required in order to determine\nthe exact represented time.\n- ``tz=d1_common.date_time.UTC()``: Set naive dt to UTC.\n\nel_idx : int\nIndex of element to use in the event that there are multiple sibling\nelements with the same name.", "source": "codesearchnet"}
{"code": "def traverse_ancestors(self, include_self=True):\n        \n        if not isinstance(include_self, bool):\n            raise TypeError(\"include_self must be a bool\")\n        if include_self:\n            c = self\n        else:\n            c = self.parent\n        while c is not None:\n            yield c; c = c.parent", "docstring": "Traverse over the ancestors of this ``Node``\n\nArgs:\n``include_self`` (``bool``): ``True`` to include self in the traversal, otherwise ``False``", "source": "juraj-google-style"}
{"code": "def get_json(filename):\n    check_if_this_file_exist(filename)\n    filename = os.path.abspath(filename)\n    s = command_line(['exiftool', '-G', '-j', '-sort', filename])\n    if s:\n        s = s.decode('utf-8').rstrip('\\r\\n')\n        return json.loads(s)\n    else:\n        return s", "docstring": "Return a json value of the exif\n\nGet a filename and return a JSON object\n\nArguments:\nfilename {string} -- your filename\n\nReturns:\n[JSON] -- Return a JSON object", "source": "codesearchnet"}
{"code": "def AnalyzeClient(self, client):\n    \n\n    \n    \n    \n    \n    \n\n    keywords = set([\".\"])\n\n    def TryAppend(prefix, keyword):\n      precondition.AssertType(prefix, Text)\n      precondition.AssertType(keyword, Text)\n      if keyword:\n        keyword_string = self._NormalizeKeyword(keyword)\n        keywords.add(keyword_string)\n        if prefix:\n          keywords.add(prefix + \":\" + keyword_string)\n\n    def TryAppendPrefixes(prefix, keyword, delimiter):\n      TryAppend(prefix, keyword)\n      segments = keyword.split(delimiter)\n      for i in range(1, len(segments)):\n        TryAppend(prefix, delimiter.join(segments[0:i]))\n      return len(segments)\n\n    def TryAppendIP(ip):\n      TryAppend(\"ip\", ip)\n      \n      if TryAppendPrefixes(\"ip\", Text(ip), \".\") == 4:\n        return\n      \n      TryAppendPrefixes(\"ip\", Text(ip), \":\")\n\n    def TryAppendMac(mac):\n      TryAppend(\"mac\", mac)\n      if len(mac) == 12:\n        \n        \n        TryAppend(\"mac\", \":\".join([mac[i:i + 2] for i in range(0, 12, 2)]))\n\n    TryAppend(\"host\", client.knowledge_base.fqdn)\n    host = client.knowledge_base.fqdn.split(\".\", 1)[0]\n    TryAppendPrefixes(\"host\", host, \"-\")\n    TryAppendPrefixes(\"host\", client.knowledge_base.fqdn, \".\")\n    TryAppend(\"\", client.knowledge_base.os)\n    TryAppend(\"\", client.Uname())\n    TryAppend(\"\", client.os_release)\n    TryAppend(\"\", client.os_version)\n    TryAppend(\"\", client.kernel)\n    TryAppend(\"\", client.arch)\n\n    kb = client.knowledge_base\n    if kb:\n      for user in kb.users:\n        TryAppend(\"user\", user.username)\n        TryAppend(\"\", user.full_name)\n        if user.full_name:\n          for name in user.full_name.split():\n            \n            \n            \n            TryAppend(\"\", name.strip(\"\\\"'()\"))\n\n    for ip in client.GetIPAddresses():\n      TryAppendIP(ip)\n    for mac in client.GetMacAddresses():\n      TryAppendMac(mac)\n\n    client_info = client.startup_info.client_info\n    if client_info:\n      TryAppend(\"client\", client_info.client_name)\n      TryAppend(\"client\", Text(client_info.client_version))\n      if client_info.labels:\n        for label in client_info.labels:\n          TryAppend(\"label\", label)\n\n    return keywords", "docstring": "Finds the client_id and keywords for a client.\n\nArgs:\nclient: A Client object record to find keywords for.\n\nReturns:\nA list of keywords related to client.", "source": "juraj-google-style"}
{"code": "def batch_norm_relu(inputs, is_training, relu=True, init_zero=False, data_format='channels_first'):\n    if init_zero:\n        gamma_initializer = tf.zeros_initializer()\n    else:\n        gamma_initializer = tf.ones_initializer()\n    if (data_format == 'channels_first'):\n        axis = 1\n    else:\n        axis = 3\n    inputs = layers().BatchNormalization(axis=axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, center=True, scale=True, fused=True, gamma_initializer=gamma_initializer)(inputs, training=is_training)\n    if relu:\n        inputs = tf.nn.relu(inputs)\n    return inputs", "docstring": "Performs a batch normalization followed by a ReLU.\n\nArgs:\ninputs: `Tensor` of shape `[batch, channels, ...]`.\nis_training: `bool` for whether the model is training.\nrelu: `bool` if False, omits the ReLU operation.\ninit_zero: `bool` if True, initializes scale parameter of batch\nnormalization with 0 instead of 1 (default).\ndata_format: `str` either \"channels_first\" for `[batch, channels, height,\nwidth]` or \"channels_last for `[batch, height, width, channels]`.\n\nReturns:\nA normalized `Tensor` with the same `data_format`.", "source": "codesearchnet"}
{"code": "def qn_to_qubo(expr):\n    \n    try:\n        import sympy\n    except ImportError:\n        raise ImportError(\"This function requires sympy. Please install it.\")\n    assert type(expr) == sympy.Add\n    to_i = lambda s: int(str(s)[1:])\n    max_i = max(map(to_i, expr.free_symbols)) + 1\n    qubo = [[0.] * max_i for _ in range(max_i)]\n    for arg in expr.args:\n        syms = arg.free_symbols\n        assert len(syms) <= 2\n        if len(syms) == 2:\n            assert type(arg) == sympy.Mul\n            i, j = list(map(to_i, syms))\n            if i > j:\n                i, j = j, i\n            if i == j:\n                if len(arg.args) == 2:\n                    qubo[i][i] = float(arg.args[0])\n                elif len(arg.args) == 1:\n                    qubo[i][i] = 1.0\n                else:\n                    raise ValueError(f\"Too many args! arg.args = {arg.args}\")\n                continue\n            if len(arg.args) == 3:\n                qubo[i][j] = float(arg.args[0])\n            elif len(arg.args) == 2:\n                qubo[i][j]\n        if len(syms) == 1:\n            if len(arg.args) == 2:\n                assert type(arg) == sympy.Mul\n                i = to_i(next(iter(syms)))\n                qubo[i][i] = float(arg.args[0])\n            elif len(arg.args) == 1:\n                qubo[i][i] = 1.0\n            else:\n                raise ValueError(f\"Too many args! arg.args = {arg.args}\")\n    return qubo", "docstring": "Convert Sympy's expr to QUBO.\n\nArgs:\nexpr: Sympy's quadratic expression with variable `q0`, `q1`, ...\nReturns:\n[[float]]: Returns QUBO matrix.", "source": "juraj-google-style"}
{"code": "def _SparseReorderGrad(op: ops.Operation, unused_output_indices_grad, output_values_grad):\n    input_indices = op.inputs[0]\n    input_shape = op.inputs[2]\n    num_entries = array_ops.shape(input_indices)[0]\n    entry_indices = math_ops.range(num_entries)\n    sp_unordered = sparse_tensor.SparseTensor(input_indices, entry_indices, input_shape)\n    sp_ordered = sparse_ops.sparse_reorder(sp_unordered)\n    inverted_permutation = array_ops.invert_permutation(sp_ordered.values)\n    return (None, array_ops.gather(output_values_grad, inverted_permutation), None)", "docstring": "Gradients for the SparseReorder op.\n\nArgs:\nop: the SparseReorder op\nunused_output_indices_grad: the incoming gradients of the output indices\noutput_values_grad: the incoming gradients of the output values\n\nReturns:\nGradient for each of the 3 input tensors:\n(input_indices, input_values, input_shape)\nThe gradients for input_indices and input_shape is None.", "source": "github-repos"}
{"code": "def PmfProbLess(pmf1, pmf2):\n    total = 0.0\n    for (v1, p1) in pmf1.Items():\n        for (v2, p2) in pmf2.Items():\n            if (v1 < v2):\n                total += (p1 * p2)\n    return total", "docstring": "Probability that a value from pmf1 is less than a value from pmf2.\n\nArgs:\npmf1: Pmf object\npmf2: Pmf object\n\nReturns:\nfloat probability", "source": "codesearchnet"}
{"code": "def minimum(x1, x2, output_shape=None, name=None):\n  \n  output_shape = convert_to_shape(output_shape)\n  with tf.name_scope(name, default_name=\"minimum\"):\n    x1, x2 = binary_arguments_to_tensors(x1, x2)\n    return MinMaxOperation(\n        tf.minimum, x1, x2, output_shape=_infer_binary_broadcast_shape(\n            x1.shape, x2.shape, output_shape)).outputs[0]", "docstring": "Binary minimum with broadcsting.\n\nArgs:\nx1: a Tensor\nx2: a Tensor\noutput_shape: an optional Shape\nname: an optional string\nReturns:\na Tensor", "source": "juraj-google-style"}
{"code": "def _create_moving_sequence(image, pad_lefts, total_padding):\n  \n\n  with tf.name_scope(\"moving_sequence\"):\n    def get_padded_image(args):\n      pad_left, = args\n      pad_right = total_padding - pad_left\n      padding = tf.stack([pad_left, pad_right], axis=-1)\n      z = tf.zeros((1, 2), dtype=pad_left.dtype)\n      padding = tf.concat([padding, z], axis=0)\n      return tf.pad(image, padding)\n\n    padded_images = tf.map_fn(\n        get_padded_image, [pad_lefts], dtype=tf.uint8, infer_shape=False,\n        back_prop=False)\n\n  return padded_images", "docstring": "Create a moving image sequence from the given image a left padding values.\n\nArgs:\nimage: [in_h, in_w, n_channels] uint8 array\npad_lefts: [sequence_length, 2] int32 array of left padding values\ntotal_padding: tensor of padding values, (pad_h, pad_w)\n\nReturns:\n[sequence_length, out_h, out_w, n_channels] uint8 image sequence, where\nout_h = in_h + pad_h, out_w = in_w + out_w", "source": "juraj-google-style"}
{"code": "def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):\n    if axis != -1 and axis != len(output.shape) - 1:\n        raise ValueError(f'Only axis=-1 is currently supported. Received: axis={axis}')\n    output, from_logits = _get_logits(output, from_logits, 'Softmax', 'sparse_categorical_crossentropy')\n    target = tf.convert_to_tensor(target)\n    target = tf.cast(target, dtype='int64')\n    output = tf.convert_to_tensor(output)\n    if len(target.shape) == len(output.shape) and target.shape[-1] == 1:\n        target = tf.squeeze(target, axis=-1)\n    if len(output.shape) < 1:\n        raise ValueError(f'Argument `output` must be at least rank 1. Received: output.shape={output.shape}')\n    if len(target.shape) != len(output.shape[:-1]):\n        raise ValueError(f'Argument `output` must have rank (ndim) `target.ndim - 1`. Received: target.shape={target.shape}, output.shape={output.shape}')\n    for e1, e2 in zip(target.shape, output.shape[:-1]):\n        if e1 is not None and e2 is not None and (e1 != e2):\n            raise ValueError(f'Arguments `target` and `output` must have the same shape up until the last dimension: target.shape={target.shape}, output.shape={output.shape}')\n    if not from_logits:\n        output = tf.clip_by_value(output, backend.epsilon(), 1 - backend.epsilon())\n        output = tf.math.log(output)\n    result = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output)\n    return result", "docstring": "Categorical crossentropy with integer targets.\n\nArgs:\ntarget: An integer tensor.\noutput: A tensor resulting from a softmax\n(unless `from_logits` is True, in which\ncase `output` is expected to be the logits).\nfrom_logits: Boolean, whether `output` is the\nresult of a softmax, or is a tensor of logits.\naxis: Int specifying the channels axis. `axis=-1` corresponds to data\nformat `channels_last`, and `axis=1` corresponds to data format\n`channels_first`.\n\nReturns:\nOutput tensor.", "source": "github-repos"}
{"code": "def WriteSourceFile(self, source_file):\n    debug_event = debug_event_pb2.DebugEvent(source_file=source_file)\n    self._EnsureTimestampAdded(debug_event)\n    _pywrap_debug_events_writer.WriteSourceFile(self._dump_root, debug_event)", "docstring": "Write a SourceFile proto with the writer.\n\nArgs:\nsource_file: A SourceFile proto, describing the content of a source file\ninvolved in the execution of the debugged TensorFlow program.", "source": "github-repos"}
{"code": "def DeleteSnapshots(self, request, global_params=None):\n    config = self.GetMethodConfig('DeleteSnapshots')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Deletes a snapshot.\n\nArgs:\nrequest: (DataflowProjectsDeleteSnapshotsRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(DeleteSnapshotResponse) The response message.", "source": "github-repos"}
{"code": "def Getattr(self, path, fh=None):\n    \n    del fh\n\n    if not path:\n      raise fuse.FuseOSError(errno.ENOENT)\n\n    if path != self.root:\n      full_path = self.root.Add(path)\n    else:\n      full_path = path\n\n    fd = aff4.FACTORY.Open(full_path, token=self.token)\n\n    \n    \n    if full_path == \"/\":\n      return self.MakePartialStat(fd)\n\n    fd = aff4.FACTORY.Open(full_path, token=self.token)\n    \n    aff4_stat = fd.Get(fd.Schema.STAT)\n\n    \n    \n    if aff4_stat:\n      return aff4_stat.AsDict()\n\n    \n    \n\n    \n    \n    \n    \n    \n    elif fd.Get(fd.Schema.LAST) is None:\n      \n      raise fuse.FuseOSError(errno.ENOENT)\n    else:\n      \n      \n      pass\n\n    \n    \n    return self.MakePartialStat(fd)", "docstring": "Performs a stat on a file or directory.\n\nArgs:\npath: The path to stat.\nfh: A file handler. Not used.\n\nReturns:\nA dictionary mapping st_ names to their values.\n\nRaises:\nFuseOSError: When a path is supplied that grr doesn't know about, ie an\ninvalid file path.\nValueError: If an empty path is passed. (The empty string, when passed to\nself.root.Add, returns a path for aff4:/, the root directory, which is not\nthe behaviour we want.)", "source": "juraj-google-style"}
{"code": "def stddev(self, name='stddev'):\n    with self._name_scope(name):\n        try:\n            return self._stddev()\n        except NotImplementedError as original_exception:\n            try:\n                return math_ops.sqrt(self._variance())\n            except NotImplementedError:\n                raise original_exception", "docstring": "Standard deviation.\n\nStandard deviation is defined as,\n\n```none\nstddev = E[(X - E[X])**2]**0.5\n```\n\nwhere `X` is the random variable associated with this distribution, `E`\ndenotes expectation, and `stddev.shape = batch_shape + event_shape`.\n\nArgs:\nname: Python `str` prepended to names of ops created by this function.\n\nReturns:\nstddev: Floating-point `Tensor` with shape identical to\n`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.", "source": "github-repos"}
{"code": "def _check_validity(cls, text):\n    if ((not text[0].lstrip().startswith('1 ')) or (not text[1].lstrip().startswith('2 '))):\n        raise ValueError('Line number check failed')\n    for line in text:\n        line = line.strip()\n        if (str(cls._checksum(line)) != line[(- 1)]):\n            raise ValueError('Checksum validation failed')", "docstring": "Check the validity of a TLE\n\nArgs:\ntext (tuple of str)\nRaise:\nValueError", "source": "codesearchnet"}
{"code": "def remove_plugin(self, name, force=False):\n    url = self._url('/plugins/{0}', name)\n    res = self._delete(url, params={'force': force})\n    self._raise_for_status(res)\n    return True", "docstring": "Remove an installed plugin.\n\nArgs:\nname (string): Name of the plugin to remove. The ``:latest``\ntag is optional, and is the default if omitted.\nforce (bool): Disable the plugin before removing. This may\nresult in issues if the plugin is in use by a container.\n\nReturns:\n``True`` if successful", "source": "codesearchnet"}
{"code": "def create_xml_dom_element(doc, name, value):\n    s = str_or_unicode(value)\n    if (six.PY2 and (not isinstance(s, unicode))):\n        s = s.decode('utf-8', 'ignore')\n    if isinstance(value, bool):\n        s = s.lower()\n    s = _ILLEGAL_XML_CHARS_REGEX.sub(u'', s)\n    e = doc.createElement(name)\n    e.appendChild(doc.createTextNode(s))\n    return e", "docstring": "Returns an XML DOM element with name and text value.\n\nArgs:\ndoc: minidom.Document, the DOM document it should create nodes from.\nname: str, the tag of XML element.\nvalue: object, whose string representation will be used\nas the value of the XML element. Illegal or highly discouraged xml 1.0\ncharacters are stripped.\n\nReturns:\nAn instance of minidom.Element.", "source": "codesearchnet"}
{"code": "def _create_dag_op(self, name, params, qargs):\n        \n        if name == \"u0\":\n            op_class = U0Gate\n        elif name == \"u1\":\n            op_class = U1Gate\n        elif name == \"u2\":\n            op_class = U2Gate\n        elif name == \"u3\":\n            op_class = U3Gate\n        elif name == \"x\":\n            op_class = XGate\n        elif name == \"y\":\n            op_class = YGate\n        elif name == \"z\":\n            op_class = ZGate\n        elif name == \"t\":\n            op_class = TGate\n        elif name == \"tdg\":\n            op_class = TdgGate\n        elif name == \"s\":\n            op_class = SGate\n        elif name == \"sdg\":\n            op_class = SdgGate\n        elif name == \"swap\":\n            op_class = SwapGate\n        elif name == \"rx\":\n            op_class = RXGate\n        elif name == \"ry\":\n            op_class = RYGate\n        elif name == \"rz\":\n            op_class = RZGate\n        elif name == \"rzz\":\n            op_class = RZZGate\n        elif name == \"id\":\n            op_class = IdGate\n        elif name == \"h\":\n            op_class = HGate\n        elif name == \"cx\":\n            op_class = CnotGate\n        elif name == \"cy\":\n            op_class = CyGate\n        elif name == \"cz\":\n            op_class = CzGate\n        elif name == \"ch\":\n            op_class = CHGate\n        elif name == \"crz\":\n            op_class = CrzGate\n        elif name == \"cu1\":\n            op_class = Cu1Gate\n        elif name == \"cu3\":\n            op_class = Cu3Gate\n        elif name == \"ccx\":\n            op_class = ToffoliGate\n        elif name == \"cswap\":\n            op_class = FredkinGate\n        else:\n            raise QiskitError(\"unknown operation for ast node name %s\" % name)\n\n        op = op_class(*params)\n\n        self.dag.apply_operation_back(op, qargs, [], condition=self.condition)", "docstring": "Create a DAG node out of a parsed AST op node.\n\nArgs:\nname (str): operation name to apply to the dag.\nparams (list): op parameters\nqargs (list(QuantumRegister, int)): qubits to attach to\n\nRaises:\nQiskitError: if encountering a non-basis opaque gate", "source": "juraj-google-style"}
{"code": "async def send_script(self, conn_id, data):\n        \n\n        self._ensure_connection(conn_id, True)\n        connection_string = self._get_property(conn_id, \"connection_string\")\n\n        msg = dict(connection_string=connection_string, fragment_count=1, fragment_index=0,\n                   script=base64.b64encode(data))\n        await self._send_command(OPERATIONS.SEND_SCRIPT, msg, COMMANDS.SendScriptResponse)", "docstring": "Send a a script to this IOTile device\n\nArgs:\nconn_id (int): A unique identifier that will refer to this connection\ndata (bytes): the script to send to the device", "source": "juraj-google-style"}
{"code": "def broadcast(tensor):\n    _check_device(tensor)\n    with ops.device(tensor.device):\n        return gen_nccl_ops.nccl_broadcast(input=tensor, shape=tensor.shape)", "docstring": "Returns a tensor that can be efficiently transferred to other devices.\n\nArgs:\ntensor: The tensor to send; must be assigned to a GPU device.\n\nReturns:\nA tensor with the value of `src_tensor`, which can be used as input to\nops on other GPU devices.", "source": "github-repos"}
{"code": "def create_variable(self, feature_column, name, shape, dtype=None, trainable=True, use_resource=True, initializer=None):\n    if name in self._cols_to_vars_map[feature_column]:\n        raise ValueError('Variable already exists.')\n    with trackable.no_manual_dependency_tracking_scope(self._layer):\n        var = self._layer.add_weight(name=name, shape=shape, dtype=dtype, initializer=initializer, trainable=self._trainable and trainable, use_resource=use_resource, getter=variable_scope.get_variable)\n    if isinstance(var, variables.PartitionedVariable):\n        for v in var:\n            part_name = name + '/' + str(v._get_save_slice_info().var_offset[0])\n            self._layer._track_trackable(v, feature_column.name + '/' + part_name)\n    elif isinstance(var, trackable.Trackable):\n        self._layer._track_trackable(var, feature_column.name + '/' + name)\n    self._cols_to_vars_map[feature_column][name] = var\n    return var", "docstring": "Creates a new variable.\n\nArgs:\nfeature_column: A `FeatureColumn` object this variable corresponds to.\nname: variable name.\nshape: variable shape.\ndtype: The type of the variable. Defaults to `self.dtype` or `float32`.\ntrainable: Whether this variable is trainable or not.\nuse_resource: If true, we use resource variables. Otherwise we use\nRefVariable.\ninitializer: initializer instance (callable).\n\nReturns:\nThe created variable.", "source": "github-repos"}
{"code": "def _rpc(self, method, *args):\n    with self._lock:\n        apiid = next(self._counter)\n        data = {'id': apiid, 'method': method, 'params': args}\n        request = json.dumps(data)\n        self._client_send(request)\n        response = self._client_receive()\n    if (not response):\n        raise ProtocolError(self._ad, ProtocolError.NO_RESPONSE_FROM_SERVER)\n    result = json.loads(str(response, encoding='utf8'))\n    if result['error']:\n        raise ApiError(self._ad, result['error'])\n    if (result['id'] != apiid):\n        raise ProtocolError(self._ad, ProtocolError.MISMATCHED_API_ID)\n    if (result.get('callback') is not None):\n        if (self._event_client is None):\n            self._event_client = self._start_event_client()\n        return callback_handler.CallbackHandler(callback_id=result['callback'], event_client=self._event_client, ret_value=result['result'], method_name=method, ad=self._ad)\n    return result['result']", "docstring": "Sends an rpc to the app.\n\nArgs:\nmethod: str, The name of the method to execute.\nargs: any, The args of the method.\n\nReturns:\nThe result of the rpc.\n\nRaises:\nProtocolError: Something went wrong with the protocol.\nApiError: The rpc went through, however executed with errors.", "source": "codesearchnet"}
{"code": "def _get_user_command_string(self):\n    sdk_int = int(self._ad.build_info['build_version_sdk'])\n    if sdk_int < 24:\n        return ''\n    return f'--user {self.user_id}'", "docstring": "Gets the appropriate command argument for specifying user IDs.\n\nBy default, `SnippetClient` operates within the current user.\n\nWe don't add the `--user {ID}` arg when Android's SDK is below 24,\nwhere multi-user support is not well implemented.\n\nReturns:\nString, the command param section to be formatted into the adb\ncommands.", "source": "github-repos"}
{"code": "def verify_fileobj(fileobj, writable=False):\n    try:\n        data = fileobj.read(0)\n    except Exception:\n        if (not hasattr(fileobj, 'read')):\n            raise ValueError(('%r not a valid file object' % fileobj))\n        raise ValueError((\"Can't read from file object %r\" % fileobj))\n    if (not isinstance(data, bytes)):\n        raise ValueError(('file object %r not opened in binary mode' % fileobj))\n    if writable:\n        try:\n            fileobj.write(b'')\n        except Exception:\n            if (not hasattr(fileobj, 'write')):\n                raise ValueError(('%r not a valid file object' % fileobj))\n            raise ValueError((\"Can't write to file object %r\" % fileobj))", "docstring": "Verifies that the passed fileobj is a file like object which\nwe can use.\n\nArgs:\nwritable (bool): verify that the file object is writable as well\n\nRaises:\nValueError: In case the object is not a file object that is readable\n(or writable if required) or is not opened in bytes mode.", "source": "codesearchnet"}
{"code": "def match_bitap(self, text, pattern, loc):\n    s = self.match_alphabet(pattern)\n\n    def match_bitapScore(e, x):\n        'Compute and return the score for a match with e errors and x location.\\n      Accesses loc and pattern through being a closure.\\n\\n      Args:\\n        e: Number of errors in match.\\n        x: Location of match.\\n\\n      Returns:\\n        Overall score for match (0.0 = good, 1.0 = bad).\\n      '\n        accuracy = (float(e) / len(pattern))\n        proximity = abs((loc - x))\n        if (not self.Match_Distance):\n            return ((proximity and 1.0) or accuracy)\n        return (accuracy + (proximity / float(self.Match_Distance)))\n    score_threshold = self.Match_Threshold\n    best_loc = text.find(pattern, loc)\n    if (best_loc != (- 1)):\n        score_threshold = min(match_bitapScore(0, best_loc), score_threshold)\n        best_loc = text.rfind(pattern, (loc + len(pattern)))\n        if (best_loc != (- 1)):\n            score_threshold = min(match_bitapScore(0, best_loc), score_threshold)\n    matchmask = (1 << (len(pattern) - 1))\n    best_loc = (- 1)\n    bin_max = (len(pattern) + len(text))\n    last_rd = None\n    for d in range(len(pattern)):\n        bin_min = 0\n        bin_mid = bin_max\n        while (bin_min < bin_mid):\n            if (match_bitapScore(d, (loc + bin_mid)) <= score_threshold):\n                bin_min = bin_mid\n            else:\n                bin_max = bin_mid\n            bin_mid = (((bin_max - bin_min) \n        bin_max = bin_mid\n        start = max(1, ((loc - bin_mid) + 1))\n        finish = (min((loc + bin_mid), len(text)) + len(pattern))\n        rd = ([0] * (finish + 2))\n        rd[(finish + 1)] = ((1 << d) - 1)\n        for j in range(finish, (start - 1), (- 1)):\n            if (len(text) <= (j - 1)):\n                charMatch = 0\n            else:\n                charMatch = s.get(text[(j - 1)], 0)\n            if (d == 0):\n                rd[j] = (((rd[(j + 1)] << 1) | 1) & charMatch)\n            else:\n                rd[j] = (((((rd[(j + 1)] << 1) | 1) & charMatch) | (((last_rd[(j + 1)] | last_rd[j]) << 1) | 1)) | last_rd[(j + 1)])\n            if (rd[j] & matchmask):\n                score = match_bitapScore(d, (j - 1))\n                if (score <= score_threshold):\n                    score_threshold = score\n                    best_loc = (j - 1)\n                    if (best_loc > loc):\n                        start = max(1, ((2 * loc) - best_loc))\n                    else:\n                        break\n        if (match_bitapScore((d + 1), loc) > score_threshold):\n            break\n        last_rd = rd\n    return best_loc", "docstring": "Locate the best instance of 'pattern' in 'text' near 'loc' using the\nBitap algorithm.\n\nArgs:\ntext: The text to search.\npattern: The pattern to search for.\nloc: The location to search around.\n\nReturns:\nBest match index or -1.", "source": "codesearchnet"}
{"code": "def do_operation_update(self, info, an_op):\n        \n        self.update_op_func(self.metric_name, info, an_op)", "docstring": "Updates an operation using the assigned update_op_func\n\nArgs:\ninfo: (:class:`endpoints_management.control.report_request.Info`): the\ninfo instance to update\nan_op: (:class:`endpoints_management.control.report_request.Info`):\nthe info instance to update\n\nReturn:\n`True` if desc is supported, otherwise `False`", "source": "juraj-google-style"}
{"code": "def _IsValidUrl(self, url):\n    parsed_url = urlparse.urlparse(url)\n    return (parsed_url.scheme in self._SUPPORTED_URL_SCHEMES)", "docstring": "Checks if an URL is considered valid.\n\nReturns:\nbool: True if the URL is valid.", "source": "codesearchnet"}
{"code": "def find_copy_constructor(type_):\n    copy_ = type_.constructors((lambda x: is_copy_constructor(x)), recursive=False, allow_empty=True)\n    if copy_:\n        return copy_[0]\n    return None", "docstring": "Returns reference to copy constructor.\n\nArgs:\ntype_ (declarations.class_t): the class to be searched.\n\nReturns:\ndeclarations.constructor_t: the copy constructor", "source": "codesearchnet"}
{"code": "def splitdrive(self, path):\n    path = make_string_path(path)\n    if self.is_windows_fs:\n        if (len(path) >= 2):\n            path = self.normcase(path)\n            sep = self._path_separator(path)\n            if (sys.version_info >= (2, 7, 8)):\n                if ((path[0:2] == (sep * 2)) and (path[2:3] != sep)):\n                    sep_index = path.find(sep, 2)\n                    if (sep_index == (- 1)):\n                        return (path[:0], path)\n                    sep_index2 = path.find(sep, (sep_index + 1))\n                    if (sep_index2 == (sep_index + 1)):\n                        return (path[:0], path)\n                    if (sep_index2 == (- 1)):\n                        sep_index2 = len(path)\n                    return (path[:sep_index2], path[sep_index2:])\n            if (path[1:2] == self._matching_string(path, ':')):\n                return (path[:2], path[2:])\n    return (path[:0], path)", "docstring": "Splits the path into the drive part and the rest of the path.\n\nTaken from Windows specific implementation in Python 3.5\nand slightly adapted.\n\nArgs:\npath: the full path to be splitpath.\n\nReturns:\nA tuple of the drive part and the rest of the path, or of\nan empty string and the full path if drive letters are\nnot supported or no drive is present.", "source": "codesearchnet"}
{"code": "def get_plot(self, xlim=None, ylim=None, units=\"thz\"):\n        \n\n        u = freq_units(units)\n\n        ncolors = max(3, len(self._doses))\n        ncolors = min(9, ncolors)\n\n        import palettable\n\n        colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors\n\n        y = None\n        alldensities = []\n        allfrequencies = []\n        plt = pretty_plot(12, 8)\n\n        \n        \n        for key, dos in self._doses.items():\n            frequencies = dos['frequencies'] * u.factor\n            densities = dos['densities']\n            if y is None:\n                y = np.zeros(frequencies.shape)\n            if self.stack:\n                y += densities\n                newdens = y.copy()\n            else:\n                newdens = densities\n            allfrequencies.append(frequencies)\n            alldensities.append(newdens)\n\n        keys = list(self._doses.keys())\n        keys.reverse()\n        alldensities.reverse()\n        allfrequencies.reverse()\n        allpts = []\n        for i, (key, frequencies, densities) in enumerate(zip(keys, allfrequencies, alldensities)):\n            allpts.extend(list(zip(frequencies, densities)))\n            if self.stack:\n                plt.fill(frequencies, densities, color=colors[i % ncolors],\n                         label=str(key))\n            else:\n                plt.plot(frequencies, densities, color=colors[i % ncolors],\n                         label=str(key), linewidth=3)\n\n        if xlim:\n            plt.xlim(xlim)\n        if ylim:\n            plt.ylim(ylim)\n        else:\n            xlim = plt.xlim()\n            relevanty = [p[1] for p in allpts\n                         if xlim[0] < p[0] < xlim[1]]\n            plt.ylim((min(relevanty), max(relevanty)))\n\n        ylim = plt.ylim()\n        plt.plot([0, 0], ylim, 'k--', linewidth=2)\n\n        plt.xlabel(r'$\\mathrm{{Frequencies\\ ({})}}$'.format(u.label))\n        plt.ylabel(r'$\\mathrm{Density\\ of\\ states}$')\n\n        plt.legend()\n        leg = plt.gca().get_legend()\n        ltext = leg.get_texts()  \n        plt.setp(ltext, fontsize=30)\n        plt.tight_layout()\n        return plt", "docstring": "Get a matplotlib plot showing the DOS.\n\nArgs:\nxlim: Specifies the x-axis limits. Set to None for automatic\ndetermination.\nylim: Specifies the y-axis limits.\nunits: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.", "source": "juraj-google-style"}
{"code": "def UninstallDriver(bundle_name):\n    km = objc.KextManager()\n    cf_bundle_name = km.PyStringToCFString(bundle_name)\n    status = km.iokit.KextManagerUnloadKextWithIdentifier(cf_bundle_name)\n    km.dll.CFRelease(cf_bundle_name)\n    return status", "docstring": "Calls into the IOKit to unload a kext by its name.\n\nArgs:\nbundle_name: The bundle identifier of the kernel extension as defined in\nInfo.plist field CFBundleIdentifier.\nReturns:\nThe error code from the library call. objc.OS_SUCCESS if successfull.", "source": "codesearchnet"}
{"code": "def case_report_content(store, institute_obj, case_obj):\n    \n    variant_types = {\n        'causatives_detailed': 'causatives',\n        'suspects_detailed': 'suspects',\n        'classified_detailed': 'acmg_classification',\n        'tagged_detailed': 'manual_rank',\n        'dismissed_detailed': 'dismiss_variant',\n        'commented_detailed': 'is_commented',\n    }\n    data = case_obj\n\n    for individual in data['individuals']:\n        try:\n            sex = int(individual.get('sex', 0))\n        except ValueError as err:\n            sex = 0\n        individual['sex_human'] = SEX_MAP[sex]\n        individual['phenotype_human'] = PHENOTYPE_MAP.get(individual['phenotype'])\n\n    \n    data['comments'] = store.events(institute_obj, case=case_obj, comments=True)\n\n    data['manual_rank_options'] = MANUAL_RANK_OPTIONS\n    data['dismissed_options'] = DISMISS_VARIANT_OPTIONS\n    data['genetic_models'] = dict(GENETIC_MODELS)\n    data['report_created_at'] = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n\n    evaluated_variants = {}\n    for vt in variant_types:\n        evaluated_variants[vt] = []\n    \n    \n    for var_type in ['causatives', 'suspects']:\n        \n        vt = '_'.join([var_type, 'detailed'])\n        for var_id in case_obj.get(var_type,[]):\n            variant_obj = store.variant(var_id)\n            if not variant_obj:\n                continue\n            \n            evaluated_variants[vt].append(variant_obj)\n\n    \n    for var_obj in store.evaluated_variants(case_id=case_obj['_id']):\n        \n        for vt in variant_types:\n            keyword = variant_types[vt]\n            \n            \n            if keyword in var_obj:\n                evaluated_variants[vt].append(var_obj)\n\n    for var_type in evaluated_variants:\n        decorated_variants = []\n        for var_obj in evaluated_variants[var_type]:\n        \n            if var_obj['category'] == 'snv':\n                decorated_info = variant_decorator(\n                        store=store,\n                        institute_obj=institute_obj,\n                        case_obj=case_obj,\n                        variant_id=None,\n                        variant_obj=var_obj,\n                        add_case=False,\n                        add_other=False,\n                        get_overlapping=False\n                    )\n            else:\n                decorated_info = sv_variant(\n                    store=store,\n                    institute_id=institute_obj['_id'],\n                    case_name=case_obj['display_name'],\n                    variant_obj=var_obj,\n                    add_case=False,\n                    get_overlapping=False\n                    )\n            decorated_variants.append(decorated_info['variant'])\n        \n        data[var_type] = decorated_variants\n\n    return data", "docstring": "Gather contents to be visualized in a case report\n\nArgs:\nstore(adapter.MongoAdapter)\ninstitute_obj(models.Institute)\ncase_obj(models.Case)\n\nReturns:\ndata(dict)", "source": "juraj-google-style"}
{"code": "def convert_to_tensor(x, dtype=None, sparse=None, ragged=None):\n    if any_symbolic_tensors((x,)):\n        return ConvertToTensor(dtype=dtype, sparse=sparse, ragged=ragged)(x)\n    return backend.core.convert_to_tensor(x, dtype=dtype, sparse=sparse, ragged=ragged)", "docstring": "Convert a NumPy array or Python array to a tensor.\n\nNative tensors for the current backend or left unchanged unless the `dtype`,\n`sparse` or `ragged` arguments are set.\n\nArgs:\nx: A NumPy array, Python array (can be nested) or a backend tensor.\ndtype: The target type. If `None`, the type of `x` is used.\nsparse: Whether to keep sparse tensors. `False` will cause sparse\ntensors to be densified. The default value of `None` means that\nsparse tensors are kept only if the backend supports them.\nragged: Whether to keep ragged tensors. `False` will cause ragged\ntensors to be densified. The default value of `None` means that\nragged tensors are kept only if the backend supports them.\n\nReturns:\nA backend tensor of the specified `dtype` and sparseness.\n\nExample:\n\n>>> x = np.array([1, 2, 3])\n>>> y = keras.ops.convert_to_tensor(x)", "source": "github-repos"}
{"code": "def _calculate_scores(self, query, key):\n    scores = math_ops.matmul(query, key, transpose_b=True)\n    if self.scale is not None:\n        scores *= self.scale\n    return scores", "docstring": "Calculates attention scores as a query-key dot product.\n\nArgs:\nquery: Query tensor of shape `[batch_size, Tq, dim]`.\nkey: Key tensor of shape `[batch_size, Tv, dim]`.\nReturns:\nTensor of shape `[batch_size, Tq, Tv]`.", "source": "github-repos"}
{"code": "def get_scan_stats(self):\n    time_spent = time.time()\n    return (self._scan_event_count, self._v1_scan_count, self._v1_scan_response_count, self._v2_scan_count, self._device_scan_counts.copy(), (time_spent - self._last_reset_time))", "docstring": "Return the scan event statistics for this adapter\n\nReturns:\nint : total scan events\nint : total v1 scan count\nint : total v1 scan response count\nint : total v2 scan count\ndict : device-specific scan counts\nfloat : seconds since last reset", "source": "codesearchnet"}
{"code": "def assert_is_compatible_with(self, other):\n    if not self.is_compatible_with(other):\n        raise ValueError('Shapes %s and %s are incompatible' % (self, other))", "docstring": "Raises exception if `self` and `other` do not represent the same shape.\n\nThis method can be used to assert that there exists a shape that both\n`self` and `other` represent.\n\nArgs:\nother: Another TensorShape.\n\nRaises:\nValueError: If `self` and `other` do not represent the same shape.", "source": "github-repos"}
{"code": "def eye(size, dtype=None, name=None):\n    if dtype is None:\n        dtype = floatx()\n    tf_dtype = dtypes_module.as_dtype(dtype)\n    return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name)", "docstring": "Instantiate an identity matrix and returns it.\n\nArgs:\nsize: Integer, number of rows/columns.\ndtype: String, data type of returned Keras variable.\nname: String, name of returned Keras variable.\n\nReturns:\nA Keras variable, an identity matrix.\n\nExample:\n\n\n>>> kvar = tf.keras.backend.eye(3)\n>>> tf.keras.backend.eval(kvar)\narray([[1.,  0.,  0.],\n[0.,  1.,  0.],\n[0.,  0.,  1.]], dtype=float32)", "source": "github-repos"}
{"code": "def compare_python_to_reference_murmur3_32(data: Any, seed: int=0) -> None:\n    assert mmh3, 'Need mmh3 module'\n    c_data = to_str(data)\n    c_signed = mmh3.hash(c_data, seed=seed)\n    py_data = to_bytes(c_data)\n    py_unsigned = murmur3_x86_32(py_data, seed=seed)\n    py_signed = twos_comp_to_signed(py_unsigned, n_bits=32)\n    preamble = 'Hashing {data} with MurmurHash3/32-bit/seed={seed}'.format(data=repr(data), seed=seed)\n    if (c_signed == py_signed):\n        print((preamble + ' -> {result}: OK'.format(result=c_signed)))\n    else:\n        raise AssertionError((preamble + '; mmh3 says {c_data} -> {c_signed}, Python version says {py_data} -> {py_unsigned} = {py_signed}'.format(c_data=repr(c_data), c_signed=c_signed, py_data=repr(py_data), py_unsigned=py_unsigned, py_signed=py_signed)))", "docstring": "Checks the pure Python implementation of 32-bit murmur3 against the\n``mmh3`` C-based module.\n\nArgs:\ndata: data to hash\nseed: seed\n\nRaises:\nAssertionError: if the two calculations don't match", "source": "codesearchnet"}
{"code": "def context(self, name):\n    data = self._context(name)\n    context = data.get('context')\n    if context:\n        return context\n    assert self.load_path\n    context_path = os.path.join(self.load_path, 'contexts', ('%s.rxt' % name))\n    context = ResolvedContext.load(context_path)\n    data['context'] = context\n    data['loaded'] = True\n    return context", "docstring": "Get a context.\n\nArgs:\nname (str): Name to store the context under.\n\nReturns:\n`ResolvedContext` object.", "source": "codesearchnet"}
{"code": "def __mul__(self, other):\n    try:\n        other = as_dimension(other)\n    except (TypeError, ValueError):\n        return NotImplemented\n    if self._value is None or other.value is None:\n        return Dimension(None)\n    else:\n        return Dimension(self._value * other.value)", "docstring": "Returns the product of `self` and `other`.\n\nDimensions are summed as follows:\n\n```python\ntf.compat.v1.Dimension(m)    * tf.compat.v1.Dimension(n)     ==\ntf.compat.v1.Dimension(m * n)\ntf.compat.v1.Dimension(m)    * tf.compat.v1.Dimension(None)  # equiv. to\ntf.compat.v1.Dimension(None)\ntf.compat.v1.Dimension(None) * tf.compat.v1.Dimension(n)     # equiv. to\ntf.compat.v1.Dimension(None)\ntf.compat.v1.Dimension(None) * tf.compat.v1.Dimension(None)  # equiv. to\ntf.compat.v1.Dimension(None)\n```\n\nArgs:\nother: Another Dimension, or a value accepted by `as_dimension`.\n\nReturns:\nA Dimension whose value is the product of `self` and `other`.", "source": "github-repos"}
{"code": "def valid(self, value, level=[]):\n\t\t\n\n\t\t\n\t\tself.validation_failures = []\n\n\t\t\n\t\tif value is None and self._optional:\n\t\t\treturn True\n\n\t\t\n\t\tfor i in range(len(self._nodes)):\n\n\t\t\t\n\t\t\tif self._nodes[i].valid(value):\n\n\t\t\t\t\n\t\t\t\treturn True\n\n\t\t\n\t\tself.validation_failures.append(('.'.join(level), 'no valid option'))\n\t\treturn False", "docstring": "Valid\n\nChecks if a value is valid based on the instance's values\n\nArguments:\nvalue {mixed} -- The value to validate\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def complement(self, alphabet):\n        \n        states = sorted(self.states, key=attrgetter('initial'), reverse=True)\n        for state in states:\n            if state.final:\n                state.final = False\n            else:\n                state.final = True", "docstring": "Returns the complement of DFA\nArgs:\nalphabet (list): The input alphabet\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def close(self, channel_identifier: ChannelID, partner: Address, balance_hash: BalanceHash, nonce: Nonce, additional_hash: AdditionalHash, signature: Signature, given_block_identifier: BlockSpecification):\n    log_details = {'token_network': pex(self.address), 'node': pex(self.node_address), 'partner': pex(partner), 'nonce': nonce, 'balance_hash': encode_hex(balance_hash), 'additional_hash': encode_hex(additional_hash), 'signature': encode_hex(signature)}\n    log.debug('closeChannel called', **log_details)\n    checking_block = self.client.get_checking_block()\n    try:\n        self._close_preconditions(channel_identifier, partner=partner, block_identifier=given_block_identifier)\n    except NoStateForBlockIdentifier:\n        pass\n    error_prefix = 'closeChannel call will fail'\n    with self.channel_operations_lock[partner]:\n        gas_limit = self.proxy.estimate_gas(checking_block, 'closeChannel', channel_identifier=channel_identifier, partner=partner, balance_hash=balance_hash, nonce=nonce, additional_hash=additional_hash, signature=signature)\n        if gas_limit:\n            error_prefix = 'closeChannel call failed'\n            transaction_hash = self.proxy.transact('closeChannel', safe_gas_limit(gas_limit, GAS_REQUIRED_FOR_CLOSE_CHANNEL), channel_identifier=channel_identifier, partner=partner, balance_hash=balance_hash, nonce=nonce, additional_hash=additional_hash, signature=signature)\n            self.client.poll(transaction_hash)\n            receipt_or_none = check_transaction_threw(self.client, transaction_hash)\n        transaction_executed = (gas_limit is not None)\n        if ((not transaction_executed) or receipt_or_none):\n            if transaction_executed:\n                block = receipt_or_none['blockNumber']\n            else:\n                block = checking_block\n            self.proxy.jsonrpc_client.check_for_insufficient_eth(transaction_name='closeChannel', transaction_executed=transaction_executed, required_gas=GAS_REQUIRED_FOR_CLOSE_CHANNEL, block_identifier=block)\n            (error_type, msg) = self._check_channel_state_for_close(participant1=self.node_address, participant2=partner, block_identifier=block, channel_identifier=channel_identifier)\n            if (not error_type):\n                error_type = RaidenUnrecoverableError\n            error_msg = f'{error_prefix}. {msg}'\n            if (error_type == RaidenRecoverableError):\n                log.warning(error_msg, **log_details)\n            else:\n                log.critical(error_msg, **log_details)\n            raise error_type(error_msg)\n    log.info('closeChannel successful', **log_details)", "docstring": "Close the channel using the provided balance proof.\n\nNote:\nThis method must *not* be called without updating the application\nstate, otherwise the node may accept new transfers which cannot be\nused, because the closer is not allowed to update the balance proof\nsubmitted on chain after closing\n\nRaises:\nRaidenRecoverableError: If the channel is already closed.\nRaidenUnrecoverableError: If the channel does not exist or is settled.", "source": "codesearchnet"}
{"code": "def _handle_error_response(response_body):\n    \n    try:\n        error_data = json.loads(response_body)\n        error_details = '{}: {}'.format(\n            error_data['error'],\n            error_data.get('error_description'))\n    \n    except (KeyError, ValueError):\n        error_details = response_body\n\n    raise exceptions.RefreshError(\n        error_details, response_body)", "docstring": "Translates an error response into an exception.\n\nArgs:\nresponse_body (str): The decoded response data.\n\nRaises:\ngoogle.auth.exceptions.RefreshError", "source": "juraj-google-style"}
{"code": "async def read_reply(self):\n    code = 500\n    messages = []\n    go_on = True\n    while go_on:\n        try:\n            line = (await self.readline())\n        except ValueError as e:\n            code = 500\n            go_on = False\n        else:\n            try:\n                code = int(line[:3])\n            except ValueError as e:\n                raise ConnectionResetError('Connection lost.') from e\n            else:\n                go_on = (line[3:4] == b'-')\n        message = line[4:].strip(b' \\t\\r\\n').decode('ascii')\n        messages.append(message)\n    full_message = '\\n'.join(messages)\n    return (code, full_message)", "docstring": "Reads a reply from the server.\n\nRaises:\nConnectionResetError: If the connection with the server is lost\n(we can't read any response anymore). Or if the server\nreplies without a proper return code.\n\nReturns:\n(int, str): A (code, full_message) 2-tuple consisting of:\n\n- server response code ;\n- server response string corresponding to response code\n(multiline responses are returned in a single string).", "source": "codesearchnet"}
{"code": "def _generate_assignments(splittable_dimensions, mesh_dimension_to_size):\n  \n  assignments = []\n  for assignment_size in six.moves.xrange(\n      1 + min(len(splittable_dimensions), len(mesh_dimension_to_size))):\n    for s_dims_chosen in itertools.combinations(splittable_dimensions,\n                                                assignment_size):\n      for m_dims_chosen in itertools.permutations(mesh_dimension_to_size,\n                                                  assignment_size):\n        assignments.append(dict(zip(s_dims_chosen, m_dims_chosen)))\n  return assignments", "docstring": "Generates all ways to map splittable dimensions to mesh dimensions.\n\nArgs:\nsplittable_dimensions: a frozenset of the names of splittable dimensions.\nmesh_dimension_to_size: a dictionary from mesh dimension name to size.\n\nReturns:\nA list of the valid assignments. Each assignment is a dict keyed by every\nsplittable dimension, whose value is either a mesh dimension or None.", "source": "juraj-google-style"}
{"code": "def check_call(state, callstr, argstr=None, expand_msg=None):\n    state.assert_is(['function_defs', 'lambda_functions'], 'check_call', ['check_function_def', 'check_lambda_function'])\n    if (expand_msg is None):\n        expand_msg = 'To verify it, we reran {{argstr}}. '\n    (stu_part, _argstr) = build_call(callstr, state.student_parts['node'])\n    (sol_part, _) = build_call(callstr, state.solution_parts['node'])\n    append_message = {'msg': expand_msg, 'kwargs': {'argstr': (argstr or _argstr)}}\n    child = part_to_child(stu_part, sol_part, append_message, state)\n    return child", "docstring": "When checking a function definition of lambda function,\nprepare has_equal_x for checking the call of a user-defined function.\n\nArgs:\ncallstr (str): call string that specifies how the function should be called, e.g. `f(1, a = 2)`.\n``check_call()`` will replace ``f`` with the function/lambda you're targeting.\nargstr (str): If specified, this overrides the way the function call is refered to in the expand message.\nexpand_msg (str): If specified, this overrides any messages that are prepended by previous SCT chains.\nstate (State): state object that is chained from.\n\n:Example:\n\nStudent and solution code::\n\ndef my_power(x):\nprint(\"calculating sqrt...\")\nreturn(x * x)\n\nSCT::\n\nEx().check_function_def('my_power').multi(\ncheck_call(\"f(3)\").has_equal_value()\ncheck_call(\"f(3)\").has_equal_output()\n)", "source": "codesearchnet"}
{"code": "def get_by(self, field, value):\n        \n        firmwares = self.get_all()\n        matches = []\n        for item in firmwares:\n            if item.get(field) == value:\n                matches.append(item)\n        return matches", "docstring": "Gets the list of firmware baseline resources managed by the appliance. Optional parameters can be used to\nfilter the list of resources returned.\n\nThe search is case-insensitive.\n\nArgs:\nfield: Field name to filter.\nvalue: Value to filter.\n\nReturns:\nlist: List of firmware baseline resources.", "source": "juraj-google-style"}
{"code": "def fit(self, mol1, mol2):\n        \n        return self.get_rmsd(mol1, mol2) < self._tolerance", "docstring": "Fit two molecules.\n\nArgs:\nmol1: First molecule. OpenBabel OBMol or pymatgen Molecule object\nmol2: Second molecule. OpenBabel OBMol or pymatgen Molecule object\n\nReturns:\nA boolean value indicates whether two molecules are the same.", "source": "juraj-google-style"}
{"code": "def write_profile(name, repo, token):\n    make_sure_folder_exists(CONFIG_FOLDER)\n    config = configparser.ConfigParser()\n    config.read(CONFIG_FILE)\n    profile = {'repo': repo, 'token': token}\n    config[name] = profile\n    with open(CONFIG_FILE, 'w') as configfile:\n        config.write(configfile)\n    return profile", "docstring": "Save a profile to the CONFIG_FILE.\n\nAfter you use this method to save a profile, you can load it anytime\nlater with the ``read_profile()`` function defined above.\n\nArgs:\n\nname\nThe name of the profile to save.\n\nrepo\nThe Github repo you want to connect to. For instance,\nthis repo is ``jtpaasch/simplygithub``.\n\ntoken\nA personal access token to connect to the repo. It is\na hash that looks something like ``ff20ae42dc...``\n\nReturns:\nA dictionary with the profile's ``repo`` and ``token`` values.", "source": "codesearchnet"}
{"code": "def OpenFile(client_path, max_timestamp=None):\n    path_info = data_store.REL_DB.ReadLatestPathInfosWithHashBlobReferences([client_path], max_timestamp=max_timestamp)[client_path]\n    if (path_info is None):\n        raise FileHasNoContentError(client_path)\n    hash_id = rdf_objects.SHA256HashID.FromBytes(path_info.hash_entry.sha256.AsBytes())\n    blob_references = data_store.REL_DB.ReadHashBlobReferences([hash_id])[hash_id]\n    if (blob_references is None):\n        raise MissingBlobReferencesError(('File hash was expected to have corresponding blob references, but they were not found: %r' % hash_id))\n    return BlobStream(client_path, blob_references, hash_id)", "docstring": "Opens latest content of a given file for reading.\n\nArgs:\nclient_path: A db.ClientPath object describing path to a file.\nmax_timestamp: If specified, will open the last collected version with a\ntimestamp equal or lower than max_timestamp. If not specified, will simply\nopen the latest version.\n\nReturns:\nA file like object with random access support.\n\nRaises:\nFileHasNoContentError: if the file was never collected.\nMissingBlobReferencesError: if one of the blobs was not found.", "source": "codesearchnet"}
{"code": "def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor:\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n    pooled_output = text_outputs[1] if return_dict is not None else text_outputs.pooler_output\n    text_features = self.text_projection(pooled_output)\n    text_features = F.normalize(text_features, dim=-1)\n    return text_features", "docstring": "Returns:\ntext_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by\napplying the projection layer to the pooled output of [`ClapTextModel`].\n\nExamples:\n\n```python\n>>> from transformers import AutoTokenizer, ClapModel\n\n>>> model = ClapModel.from_pretrained(\"laion/clap-htsat-unfused\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"laion/clap-htsat-unfused\")\n\n>>> inputs = tokenizer([\"the sound of a cat\", \"the sound of a dog\"], padding=True, return_tensors=\"pt\")\n>>> text_features = model.get_text_features(**inputs)\n```", "source": "github-repos"}
{"code": "def read(self, size=None):\n    \n    if not self._is_open:\n      raise IOError('Not opened.')\n\n    return self._vslvm_logical_volume.read(size)", "docstring": "Reads a byte string from the file-like object at the current offset.\n\nThe function will read a byte string of the specified size or\nall of the remaining data if no size was specified.\n\nArgs:\nsize (Optional[int]): number of bytes to read, where None is all\nremaining data.\n\nReturns:\nbytes: data read.\n\nRaises:\nIOError: if the read failed.\nOSError: if the read failed.", "source": "juraj-google-style"}
{"code": "def FindProxies():\n    proxies = []\n    for i in range(0, 100):\n        try:\n            sid = winreg.EnumKey(winreg.HKEY_USERS, i)\n        except OSError:\n            break\n        try:\n            subkey = (sid + '\\\\Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Internet Settings')\n            internet_settings = winreg.OpenKey(winreg.HKEY_USERS, subkey)\n            proxy_enable = winreg.QueryValueEx(internet_settings, 'ProxyEnable')[0]\n            if proxy_enable:\n                proxy_server = str(winreg.QueryValueEx(internet_settings, 'ProxyServer')[0])\n                if ('=' in proxy_server):\n                    for p in proxy_server.split(';'):\n                        (protocol, address) = p.split('=', 1)\n                        if (not re.match('^([^/:]+):\n                            address = ('%s:\n                        proxies.append(address)\n                elif (proxy_server[:5] == 'http:'):\n                    proxies.append(proxy_server)\n                else:\n                    proxies.append(('http:\n            internet_settings.Close()\n        except (OSError, ValueError, TypeError):\n            continue\n    logging.debug('Found proxy servers: %s', proxies)\n    return proxies", "docstring": "Tries to find proxies by interrogating all the user's settings.\n\nThis function is a modified urillib.getproxies_registry() from the\nstandard library. We just store the proxy value in the environment\nfor urllib to find it.\n\nTODO(user): Iterate through all the possible values if one proxy\nfails, in case more than one proxy is specified in different users\nprofiles.\n\nReturns:\nA list of proxies.", "source": "codesearchnet"}
{"code": "def GetTARInfoByPathSpec(self, path_spec):\n    location = getattr(path_spec, 'location', None)\n    if (location is None):\n        raise errors.PathSpecError('Path specification missing location.')\n    if (not location.startswith(self.LOCATION_ROOT)):\n        raise errors.PathSpecError('Invalid location in path specification.')\n    if (len(location) == 1):\n        return None\n    try:\n        return self._tar_file.getmember(location[1:])\n    except KeyError:\n        pass", "docstring": "Retrieves the TAR info for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\ntarfile.TARInfo: TAR info or None if it does not exist.\n\nRaises:\nPathSpecError: if the path specification is incorrect.", "source": "codesearchnet"}
{"code": "def import_args_from_dict(value, args, config):\n    if isinstance(value, six.string_types):\n        for match in TOKEN_REGEX.finditer(str(value)):\n            token = match.group(1)\n            if (token in args):\n                actual_param = args[token]\n                if isinstance(actual_param, six.string_types):\n                    value = value.replace(('@' + token), args[token])\n                else:\n                    value = actual_param\n    elif isinstance(value, list):\n        return [import_args_from_dict(item, args, config) for item in value]\n    elif isinstance(value, dict):\n        return {key: import_args_from_dict(val, args, config) for (key, val) in value.items()}\n    elif isinstance(value, tuple):\n        return tuple((import_args_from_dict(val, args, config) for val in value))\n    return value", "docstring": "Replaces some arguments by those specified by a key-value dictionary.\n\nThis function will be recursively called on a dictionary looking for any\nvalue containing a \"$\" variable. If found, the value will be replaced\nby the attribute in \"args\" of the same name.\n\nIt is used to load arguments from the CLI and any extra configuration\nparameters passed in recipes.\n\nArgs:\nvalue: The value of a {key: value} dictionary. This is passed recursively\nand may change in nature: string, list, or dict. The top-level variable\nshould be the dictionary that is supposed to be recursively traversed.\nargs: A {key: value} dictionary used to do replacements.\nconfig: A dftimewolf.Config class containing configuration information\n\nReturns:\nThe first caller of the function will receive a dictionary in which strings\nstarting with \"@\" are replaced by the parameters in args.", "source": "codesearchnet"}
{"code": "def _create_grad_indexed_slices_init(grad_output_slices, forward_input):\n    assert isinstance(grad_output_slices, indexed_slices.IndexedSlices)\n    assert isinstance(forward_input, tensor.Tensor)\n    values_out = grad_output_slices.values\n    indices_out = grad_output_slices.indices\n    if values_out.shape.is_fully_defined():\n        values_shape = tensor_shape.TensorShape([0] + values_out.shape.as_list()[1:])\n        values = array_ops.zeros(values_shape, dtype=values_out.dtype, name='values_init')\n    else:\n        if forward_input.dtype == dtypes.resource:\n            forward_shape = gen_resource_variable_ops.variable_shape(forward_input)\n        else:\n            forward_shape = array_ops.shape(forward_input)\n        values_shape = array_ops.concat([[0], forward_shape[1:]], 0)\n        values = array_ops.zeros(values_shape, dtype=values_out.dtype, name='values_init')\n    indices = constant_op.constant([], indices_out.dtype, name='indices_init')\n    if forward_input.dtype == dtypes.resource:\n        shape = gen_resource_variable_ops.variable_shape(forward_input, name='shape_init')\n    else:\n        shape = array_ops.shape(forward_input, name='shape_init')\n    return indexed_slices.IndexedSlices(values=values, indices=indices, dense_shape=shape)", "docstring": "Creates an IndexedSlices to pass as input to the while grad function.\n\nArgs:\ngrad_output_slices: IndexedSlices. The corresponding while grad function\noutput.\nforward_input: Tensor. The corresponding input to the forward while op.\n\nReturns:\nZeros IndexedSlices, created in current Graph.", "source": "github-repos"}
{"code": "def translations(self, **kwargs):\n        \n        path = self._get_id_path('translations')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the translations for a specific movie id.\n\nArgs:\nappend_to_response: (optional) Comma separated, any movie method.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def _numpy_section(line_info):\n    next_line_is_hyphens = _line_is_hyphens(line_info.next.stripped)\n    if next_line_is_hyphens:\n        possible_title = line_info.remaining\n        return _section_from_possible_title(possible_title)\n    else:\n        return None", "docstring": "Checks whether the current line is the start of a new numpy-style section.\n\nNumpy style sections are followed by a full line of hyphens, for example:\n\nSection Name\n------------\nSection body goes here.\n\nArgs:\nline_info: Information about the current line.\nReturns:\nA Section type if one matches, or None if no section type matches.", "source": "github-repos"}
{"code": "def get_metrics_result(self):\n    return_metrics = {}\n    for metric in self.metrics:\n        result = metric.result()\n        if isinstance(result, dict):\n            return_metrics.update(result)\n        else:\n            return_metrics[metric.name] = result\n    return python_utils.pythonify_logs(return_metrics)", "docstring": "Returns the model's metrics values as a dict.\n\nIf any of the metric result is a dict (containing multiple metrics),\neach of them gets added to the top level returned dict of this method.\n\nReturns:\nA `dict` containing values of the metrics listed in `self.metrics`.\nExample: `{'loss': 0.2, 'accuracy': 0.7}`.", "source": "github-repos"}
{"code": "def contrast(x, severity=1):\n  \n  c = [0.4, .3, .2, .1, .05][severity - 1]\n\n  x = np.array(x) / 255.\n  means = np.mean(x, axis=(0, 1), keepdims=True)\n  x_clip = np.clip((x - means) * c + means, 0, 1) * 255\n  return around_and_astype(x_clip)", "docstring": "Change contrast of images.\n\nArgs:\nx: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\nseverity: integer, severity of corruption.\n\nReturns:\nnumpy array, image with uint8 pixels in [0,255]. Changed contrast.", "source": "juraj-google-style"}
{"code": "def get(self, report_id):\n        \n        return Report(\n            self._app,\n            self._swimlane.request('get', \"reports/{0}\".format(report_id)).json()\n        )", "docstring": "Retrieve report by ID\n\nArgs:\nreport_id (str): Full report ID\n\nReturns:\nReport: Corresponding Report instance", "source": "juraj-google-style"}
{"code": "def state(self):\n    if not self.has_job:\n        return PipelineState.DONE\n    self._update_job()\n    return self._get_job_state()", "docstring": "Return the current state of the remote job.\n\nReturns:\nA PipelineState object.", "source": "github-repos"}
{"code": "def _ParseValueData(self, knowledge_base, value_data):\n    \n    if not isinstance(value_data, py2to3.UNICODE_TYPE):\n      raise errors.PreProcessFail(\n          'Unsupported Windows Registry value type: {0:s} for '\n          'artifact: {1:s}.'.format(\n              type(value_data), self.ARTIFACT_DEFINITION_NAME))\n\n    environment_variable = artifacts.EnvironmentVariableArtifact(\n        case_sensitive=False, name=self._NAME, value=value_data)\n\n    try:\n      logger.debug('setting environment variable: {0:s} to: \"{1:s}\"'.format(\n          self._NAME, value_data))\n      knowledge_base.AddEnvironmentVariable(environment_variable)\n    except KeyError:\n      \n      pass", "docstring": "Parses Windows Registry value data for a preprocessing attribute.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nvalue_data (object): Windows Registry value data.\n\nRaises:\nerrors.PreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def write_file(self, filename, file_format='xyz'):\n    mol = pb.Molecule(self._obmol)\n    return mol.write(file_format, filename, overwrite=True)", "docstring": "Uses OpenBabel to output all supported formats.\n\nArgs:\nfilename: Filename of file to output\nfile_format: String specifying any OpenBabel supported formats.", "source": "codesearchnet"}
{"code": "def stitch_map(tiles, width, height, bbox, dpi):\n    \n    size = (int(width * dpi_to_dpmm(dpi)), int(height * dpi_to_dpmm(dpi)))\n    background = Image.new('RGBA', size, (255, 255, 255))\n    for layer in tiles:\n        layer_img = Image.new(\"RGBA\", size)\n        for (x, y), tile_path in layer.items():\n            tile = Image.open(tile_path)\n            layer_img.paste(tile, ((x - bbox.min.x) * TILE_SIZE, (y - bbox.min.y) * TILE_SIZE))\n        background = Image.alpha_composite(background, layer_img)\n    add_scales_bar(background, bbox)\n    return background.convert(\"RGB\")", "docstring": "Merge tiles together into one image.\n\nArgs:\ntiles (list of dict of file): tiles for each layer\nwidth (float): page width in mm\nheight (height): page height in mm\ndpi (dpi): resolution in dots per inch\n\nReturns:\nPIL.Image: merged map.", "source": "juraj-google-style"}
{"code": "def _InitializeURL(self, upload_url, current_content_length):\n    \n    \n    if current_content_length != 0:\n      return upload_url\n\n    headers = {\n        'Content-Type': 'application/xml',\n        'Content-Length': 0,\n        'x-goog-resumable': 'start'\n    }\n\n    \n    req = urllib2.Request(upload_url, data={}, headers=headers)\n    resp = self._url_opener.open(req)\n\n    return resp.headers['location']", "docstring": "Ensures that the URL used to upload operations is properly initialized.\n\nArgs:\nupload_url: a string url.\ncurrent_content_length: an integer identifying the current content length\nof data uploaded to the Batch Job.\n\nReturns:\nAn initialized string URL, or the provided string URL if the URL has\nalready been initialized.", "source": "juraj-google-style"}
{"code": "def set_colour(self, r, g, b):\n    if (not (0 <= r <= 255)):\n        raise ValueError('The value for red needs to be between 0 and 255.')\n    if (not (0 <= g <= 255)):\n        raise ValueError('The value for green needs to be between 0 and 255.')\n    if (not (0 <= b <= 255)):\n        raise ValueError('The value for blue needs to be between 0 and 255.')\n    hexvalue = BulbDevice._rgb_to_hexvalue(r, g, b)\n    payload = self.generate_payload(SET, {self.DPS_INDEX_MODE: self.DPS_MODE_COLOUR, self.DPS_INDEX_COLOUR: hexvalue})\n    data = self._send_receive(payload)\n    return data", "docstring": "Set colour of an rgb bulb.\n\nArgs:\nr(int): Value for the colour red as int from 0-255.\ng(int): Value for the colour green as int from 0-255.\nb(int): Value for the colour blue as int from 0-255.", "source": "codesearchnet"}
{"code": "async def upload_image(self, image_file, filename=None, *, return_uploaded_image=False):\n    image_filename = (filename or os.path.basename(image_file.name))\n    image_data = image_file.read()\n    res = (await self._base_request(IMAGE_UPLOAD_URL, 'application/x-www-form-urlencoded;charset=UTF-8', 'json', json.dumps({'protocolVersion': '0.8', 'createSessionRequest': {'fields': [{'external': {'name': 'file', 'filename': image_filename, 'put': {}, 'size': len(image_data)}}]}})))\n    try:\n        upload_url = self._get_upload_session_status(res)['externalFieldTransfers'][0]['putInfo']['url']\n    except KeyError:\n        raise exceptions.NetworkError('image upload failed: can not acquire an upload url')\n    res = (await self._base_request(upload_url, 'application/octet-stream', 'json', image_data))\n    try:\n        raw_info = self._get_upload_session_status(res)['additionalInfo']['uploader_service.GoogleRupioAdditionalInfo']['completionInfo']['customerSpecificInfo']\n        image_id = raw_info['photoid']\n        url = raw_info['url']\n    except KeyError:\n        raise exceptions.NetworkError('image upload failed: can not fetch upload info')\n    result = UploadedImage(image_id=image_id, url=url)\n    return (result if return_uploaded_image else result.image_id)", "docstring": "Upload an image that can be later attached to a chat message.\n\nArgs:\nimage_file: A file-like object containing an image.\nfilename (str): (optional) Custom name for the uploaded file.\nreturn_uploaded_image (bool): (optional) If True, return\n:class:`.UploadedImage` instead of image ID. Defaults to False.\n\nRaises:\nhangups.NetworkError: If the upload request failed.\n\nReturns:\n:class:`.UploadedImage` instance, or ID of the uploaded image.", "source": "codesearchnet"}
{"code": "def compute_advantages(rollout, last_r, gamma=0.9, lambda_=1.0, use_gae=True):\n    traj = {}\n    trajsize = len(rollout[SampleBatch.ACTIONS])\n    for key in rollout:\n        traj[key] = np.stack(rollout[key])\n    if use_gae:\n        assert (SampleBatch.VF_PREDS in rollout), 'Values not found!'\n        vpred_t = np.concatenate([rollout[SampleBatch.VF_PREDS], np.array([last_r])])\n        delta_t = ((traj[SampleBatch.REWARDS] + (gamma * vpred_t[1:])) - vpred_t[:(- 1)])\n        traj[Postprocessing.ADVANTAGES] = discount(delta_t, (gamma * lambda_))\n        traj[Postprocessing.VALUE_TARGETS] = (traj[Postprocessing.ADVANTAGES] + traj[SampleBatch.VF_PREDS]).copy().astype(np.float32)\n    else:\n        rewards_plus_v = np.concatenate([rollout[SampleBatch.REWARDS], np.array([last_r])])\n        traj[Postprocessing.ADVANTAGES] = discount(rewards_plus_v, gamma)[:(- 1)]\n        traj[Postprocessing.VALUE_TARGETS] = np.zeros_like(traj[Postprocessing.ADVANTAGES])\n    traj[Postprocessing.ADVANTAGES] = traj[Postprocessing.ADVANTAGES].copy().astype(np.float32)\n    assert all(((val.shape[0] == trajsize) for val in traj.values())), 'Rollout stacked incorrectly!'\n    return SampleBatch(traj)", "docstring": "Given a rollout, compute its value targets and the advantage.\n\nArgs:\nrollout (SampleBatch): SampleBatch of a single trajectory\nlast_r (float): Value estimation for last observation\ngamma (float): Discount factor.\nlambda_ (float): Parameter for GAE\nuse_gae (bool): Using Generalized Advantage Estamation\n\nReturns:\nSampleBatch (SampleBatch): Object with experience from rollout and\nprocessed rewards.", "source": "codesearchnet"}
{"code": "def plot_spectra_pages_pdf(ss, pdf_filename='pages.pdf', setup=_default_setup):\n    \n    logger = a99.get_python_logger()\n    xmin, xmax, ymin_, ymax, xspan, yspan = calc_max_min(ss)\n    ymin = ymin_ if setup.ymin is None else setup.ymin\n    num_pages = len(ss)\n    a99.format_BLB()\n    pdf = matplotlib.backends.backend_pdf.PdfPages(pdf_filename)\n    for i, s in enumerate(ss):\n        title = s.title\n        fig = plt.figure()\n        plt.plot(s.x, s.y, c=_FAV_COLOR)\n        if setup.flag_xlabel and setup.fmt_xlabel:\n            _set_plot(plt.xlabel, setup.fmt_xlabel, s)\n        if setup.flag_ylabel and setup.fmt_ylabel:\n            _set_plot(plt.ylabel, setup.fmt_ylabel, s)\n        _set_plot(plt.title, setup.fmt_title, s)\n        plt.xlim([xmin-xspan*_T, xmax+xspan*_T])\n        plt.ylim([ymin-yspan*_T, ymax+yspan*_T])\n        plt.tight_layout()\n        plt.subplots_adjust(top=0.94) \n        logger.info(\"Printing page {0:d}/{1:d} ('{2!s}')\".format(i+1, num_pages, title))\n        pdf.savefig(fig)\n        plt.close()\n    pdf.close()\n    logger.info(\"File {0!s} successfully created.\".format(pdf_filename))", "docstring": "Plots spectra into a PDF file, one spectrum per page.\n\nSplits into several pieces of width\n\nArgs:\nss: list of Spectrum objects\npdf_filename: name of output file", "source": "juraj-google-style"}
{"code": "def address(self, ip, owner=None, **kwargs):\n        \n        return Address(self.tcex, ip, owner=owner, **kwargs)", "docstring": "Create the Address TI object.\n\nArgs:\nowner:\nip:\n**kwargs:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def __set_proxy(self, config):\n        \n        if \"proxy\" in config and config[\"proxy\"]:\n            proxy = config[\"proxy\"]\n            splitted = proxy.split(':')\n            if len(splitted) != 2:\n                raise ValueError(ONEVIEW_CLIENT_INVALID_PROXY)\n\n            proxy_host = splitted[0]\n            proxy_port = int(splitted[1])\n            self.__connection.set_proxy(proxy_host, proxy_port)", "docstring": "Set proxy if needed\nArgs:\nconfig: Config dict", "source": "juraj-google-style"}
{"code": "def get_static_batch_size(layer):\n    batch_input_shape, _ = get_input_shape_and_dtype(layer)\n    if batch_input_shape is not None:\n        return tensor_shape.Dimension(batch_input_shape[0]).value\n    return None", "docstring": "Gets the static batch size of a Layer.\n\nArgs:\nlayer: a `Layer` instance.\n\nReturns:\nThe static batch size of a Layer.", "source": "github-repos"}
{"code": "def FindUnspentCoinsByAsset(self, asset_id, from_addr=None, use_standard=False, watch_only_val=0):\n    coins = self.FindUnspentCoins(from_addr=from_addr, use_standard=use_standard, watch_only_val=watch_only_val)\n    return [coin for coin in coins if (coin.Output.AssetId == asset_id)]", "docstring": "Finds unspent coin objects in the wallet limited to those of a certain asset type.\n\nArgs:\nasset_id (UInt256): a bytearray (len 32) representing an asset on the blockchain.\nfrom_addr (UInt160): a bytearray (len 20) representing an address.\nuse_standard (bool): whether or not to only include standard contracts ( i.e not a smart contract addr ).\nwatch_only_val (int): a flag ( 0 or 64 ) indicating whether or not to find coins that are in 'watch only' addresses.\n\nReturns:\nlist: a list of ``neo.Wallet.Coin`` in the wallet that are not spent", "source": "codesearchnet"}
{"code": "def run(self, samples=1000, chains=1, **kwargs):\n        \n        self.fit = self.stan_model.sampling(data=self.X, iter=samples,\n                                            chains=chains, **kwargs)\n        return self._convert_to_results()", "docstring": "Run the Stan sampler.\nArgs:\nsamples (int): Number of samples to obtain (in each chain).\nchains (int): Number of chains to use.\nkwargs (dict): Optional keyword arguments passed onto the PyStan\nStanModel.sampling() call.\nReturns: A PyMC3ModelResults instance.", "source": "juraj-google-style"}
{"code": "def pad_image(self, image: np.ndarray, pad_size: Optional[Dict[str, int]]=None, constant_values: Union[float, Iterable[float]]=0, pad_mode: PaddingMode=PaddingMode.CONSTANT, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs):\n    height, width = get_image_size(image, channel_dim=input_data_format)\n    max_height = pad_size.get('height', height)\n    max_width = pad_size.get('width', width)\n    pad_right, pad_bottom = (max_width - width, max_height - height)\n    if pad_right < 0 or pad_bottom < 0:\n        raise ValueError('The padding size must be greater than image size')\n    padding = ((0, pad_bottom), (0, pad_right))\n    padded_image = pad(image, padding, mode=pad_mode, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format)\n    return padded_image", "docstring": "Pad an image with zeros to the given size.\n\nArgs:\nimage (`np.ndarray`):\nImage to pad.\npad_size (`Dict[str, int]`)\nSize of the output image with pad.\nconstant_values (`Union[float, Iterable[float]]`)\nThe fill value to use when padding the image.\npad_mode (`PaddingMode`)\nThe pad mode, default to PaddingMode.CONSTANT\ndata_format (`ChannelDimension` or `str`, *optional*)\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def _linop_inverse(self) -> 'LinearOperatorBlockLowerTriangular':\n    if len(self.operators) == 1:\n        return LinearOperatorBlockLowerTriangular([[self.operators[0][0].inverse()]], is_non_singular=self.is_non_singular, is_self_adjoint=self.is_self_adjoint, is_positive_definite=self.is_positive_definite, is_square=True)\n    blockwise_dim = len(self.operators)\n    upper_left_inverse = LinearOperatorBlockLowerTriangular(self.operators[:-1]).inverse()\n    bottom_row = self.operators[-1]\n    bottom_right_inverse = bottom_row[-1].inverse()\n    inverse_bottom_row = []\n    for i in range(blockwise_dim - 1):\n        blocks = []\n        for j in range(i, blockwise_dim - 1):\n            result = bottom_row[j].matmul(upper_left_inverse.operators[j][i])\n            if not any((isinstance(result, op_type) for op_type in linear_operator_addition.SUPPORTED_OPERATORS)):\n                result = linear_operator_full_matrix.LinearOperatorFullMatrix(result.to_dense())\n            blocks.append(result)\n        summed_blocks = linear_operator_addition.add_operators(blocks)\n        assert len(summed_blocks) == 1\n        block = summed_blocks[0]\n        block = bottom_right_inverse.matmul(block)\n        block = linear_operator_identity.LinearOperatorScaledIdentity(num_rows=bottom_right_inverse.domain_dimension_tensor(), multiplier=math_ops.cast(-1, dtype=block.dtype)).matmul(block)\n        inverse_bottom_row.append(block)\n    inverse_bottom_row.append(bottom_right_inverse)\n    return LinearOperatorBlockLowerTriangular(upper_left_inverse.operators + [inverse_bottom_row], is_non_singular=self.is_non_singular, is_self_adjoint=self.is_self_adjoint, is_positive_definite=self.is_positive_definite, is_square=True)", "docstring": "Inverse of LinearOperatorBlockLowerTriangular.\n\nWe recursively apply the identity:\n\n```none\n|A 0|'  =  |    A'  0|\n|B C|      |-C'BA' C'|\n```\n\nwhere `A` is n-by-n, `B` is m-by-n,\n`C` is m-by-m, and `'` denotes inverse.\n\nThis identity can be verified through multiplication:\n\n```none\n|A 0||    A'  0|\n|B C||-C'BA' C'|\n\n= |       AA'   0|\n|BA'-CC'BA' CC'|\n\n= |I 0|\n|0 I|\n```\nReturns:\nA 'LinearOperatorBlockLowerTriangular'.", "source": "github-repos"}
{"code": "def run(inputs, program, outputs):\n  \n  root = tempfile.mkdtemp()\n  try:\n    cwd = os.getcwd()\n    for fake, real in inputs:\n      parent = os.path.join(root, os.path.dirname(fake))\n      if not os.path.exists(parent):\n        os.makedirs(parent)\n      \n      \n      if hasattr(os, 'symlink') and not os.name == 'nt':\n        os.symlink(os.path.join(cwd, real), os.path.join(root, fake))\n      else:\n        shutil.copyfile(os.path.join(cwd, real), os.path.join(root, fake))\n    if subprocess.call(program + [root]) != 0:\n      return 1\n    for fake, real in outputs:\n      shutil.copyfile(os.path.join(root, fake), real)\n    return 0\n  finally:\n    try:\n      shutil.rmtree(root)\n    except EnvironmentError:\n      \n      pass", "docstring": "Creates temp symlink tree, runs program, and copies back outputs.\n\nArgs:\ninputs: List of fake paths to real paths, which are used for symlink tree.\nprogram: List containing real path of program and its arguments. The\nexecroot directory will be appended as the last argument.\noutputs: List of fake outputted paths to copy back to real paths.\nReturns:\n0 if succeeded or nonzero if failed.", "source": "juraj-google-style"}
{"code": "def play_from_queue(self, index, start=True):\n        \n        \n        \n        if not self.speaker_info:\n            self.get_speaker_info()\n\n        \n        uri = 'x-rincon-queue:{0}\n        self.avTransport.SetAVTransportURI([\n            ('InstanceID', 0),\n            ('CurrentURI', uri),\n            ('CurrentURIMetaData', '')\n        ])\n\n        \n        self.avTransport.Seek([\n            ('InstanceID', 0),\n            ('Unit', 'TRACK_NR'),\n            ('Target', index + 1)\n        ])\n\n        \n        if start:\n            self.play()", "docstring": "Play a track from the queue by index.\n\nThe index number is required as an argument, where the first index\nis 0.\n\nArgs:\nindex (int): 0-based index of the track to play\nstart (bool): If the item that has been set should start playing", "source": "juraj-google-style"}
{"code": "def add(self, information, timeout=-1):\n        \n        return self._client.create(information, timeout=timeout)", "docstring": "Adds a data center resource based upon the attributes specified.\n\nArgs:\ninformation: Data center information\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Added data center.", "source": "juraj-google-style"}
{"code": "def _make_gh_link_node(app, rawtext, role, kind, api_type, id, options=None):\n    \n    url = \"%s/%s/%s\" % (_BOKEH_GH, api_type, id)\n    options = options or {}\n    set_classes(options)\n    node = nodes.reference(\n        rawtext, kind + utils.unescape(id), refuri=url, **options)\n    return node", "docstring": "Return a link to a Bokeh Github resource.\n\nArgs:\napp (Sphinx app) : current app\nrawtext (str) : text being replaced with link node.\nrole (str) : role name\nkind (str) : resource type (issue, pull, etc.)\napi_type (str) : type for api link\nid : (str) : id of the resource to link to\noptions (dict) : options dictionary passed to role function", "source": "juraj-google-style"}
{"code": "def __gt__(self, other):\n    other = as_dimension(other)\n    if self._value is None or other.value is None:\n        return None\n    else:\n        return self._value > other.value", "docstring": "Returns True if `self` is known to be greater than `other`.\n\nDimensions are compared as follows:\n\n```python\n(tf.compat.v1.Dimension(m)    > tf.compat.v1.Dimension(n))    == (m > n)\n(tf.compat.v1.Dimension(m)    > tf.compat.v1.Dimension(None)) == None\n(tf.compat.v1.Dimension(None) > tf.compat.v1.Dimension(n))    == None\n(tf.compat.v1.Dimension(None) > tf.compat.v1.Dimension(None)) == None\n```\n\nArgs:\nother: Another Dimension.\n\nReturns:\nThe value of `self.value > other.value` if both are known, otherwise\nNone.", "source": "github-repos"}
{"code": "def _Execute(self, http):\n    message = mime_multipart.MIMEMultipart('mixed')\n    setattr(message, '_write_headers', (lambda self: None))\n    for key in self.__request_response_handlers:\n        msg = mime_nonmultipart.MIMENonMultipart('application', 'http')\n        msg['Content-Transfer-Encoding'] = 'binary'\n        msg['Content-ID'] = self._ConvertIdToHeader(key)\n        body = self._SerializeRequest(self.__request_response_handlers[key].request)\n        msg.set_payload(body)\n        message.attach(msg)\n    request = http_wrapper.Request(self.__batch_url, 'POST')\n    request.body = message.as_string()\n    request.headers['content-type'] = ('multipart/mixed; boundary=\"%s\"' % message.get_boundary())\n    response = http_wrapper.MakeRequest(http, request)\n    if (response.status_code >= 300):\n        raise exceptions.HttpError.FromResponse(response)\n    header = ('content-type: %s\\r\\n\\r\\n' % response.info['content-type'])\n    content = response.content\n    if (isinstance(content, bytes) and self.__response_encoding):\n        content = response.content.decode(self.__response_encoding)\n    parser = email_parser.Parser()\n    mime_response = parser.parsestr((header + content))\n    if (not mime_response.is_multipart()):\n        raise exceptions.BatchError('Response not in multipart/mixed format.')\n    for part in mime_response.get_payload():\n        request_id = self._ConvertHeaderToId(part['Content-ID'])\n        response = self._DeserializeResponse(part.get_payload())\n        self.__request_response_handlers[request_id] = self.__request_response_handlers[request_id]._replace(response=response)", "docstring": "Serialize batch request, send to server, process response.\n\nArgs:\nhttp: A httplib2.Http object to be used to make the request with.\n\nRaises:\nhttplib2.HttpLib2Error if a transport error has occured.\napiclient.errors.BatchError if the response is the wrong format.", "source": "codesearchnet"}
{"code": "def register_magics(store_name='_ampl_cells', ampl_object=None):\n    \n    from IPython.core.magic import  (\n        Magics, magics_class, cell_magic, line_magic\n    )\n\n    @magics_class\n    class StoreAMPL(Magics):\n        def __init__(self, shell=None,  **kwargs):\n            Magics.__init__(self, shell=shell, **kwargs)\n            self._store = []\n            shell.user_ns[store_name] = self._store\n\n        @cell_magic\n        def ampl(self, line, cell):\n            \n            self._store.append(cell)\n\n        @cell_magic\n        def ampl_eval(self, line, cell):\n            \n            ampl_object.eval(cell)\n\n        @line_magic\n        def get_ampl(self, line):\n            \n            return self._store\n\n    get_ipython().register_magics(StoreAMPL)", "docstring": "Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``.\n\nArgs:\nstore_name: Name of the store where ``%%ampl cells`` will be stored.\nampl_object: Object used to evaluate ``%%ampl_eval`` cells.", "source": "juraj-google-style"}
{"code": "def get_plugins(package_name, paths=None):\n    pkg = get_latest_package(package_name, paths=paths, error=True)\n    if (not pkg.has_plugins):\n        return []\n    it = iter_package_families(paths)\n    package_names = set((x.name for x in it))\n    bar = ProgressBar('Searching', len(package_names))\n    plugin_pkgs = []\n    for package_name_ in package_names:\n        bar.next()\n        if (package_name_ == package_name):\n            continue\n        plugin_pkg = get_latest_package(package_name_, paths=paths)\n        if (not plugin_pkg.plugin_for):\n            continue\n        for plugin_for in plugin_pkg.plugin_for:\n            if (plugin_for == pkg.name):\n                plugin_pkgs.append(package_name_)\n    bar.finish()\n    return plugin_pkgs", "docstring": "Find packages that are plugins of the given package.\n\nArgs:\npackage_name (str): Name of the package.\npaths (list of str): Paths to search for packages, defaults to\n`config.packages_path`.\n\nReturns:\nlist of str: The packages that are plugins of the given package.", "source": "codesearchnet"}
{"code": "def create_new_board(self, query_params=None):\n    board_json = self.fetch_json(uri_path='/boards', http_method='POST', query_params=(query_params or {}))\n    return self.create_board(board_json)", "docstring": "Create a new board. name is required in query_params. Returns a Board\nobject.\n\nReturns:\nBoard: Returns the created board", "source": "codesearchnet"}
{"code": "def run_eval(interpreter, input_image):\n    input_details = interpreter.get_input_details()\n    output_details = interpreter.get_output_details()\n    input_image = np.reshape(input_image, input_details[0]['shape'])\n    interpreter.set_tensor(input_details[0]['index'], input_image)\n    interpreter.invoke()\n    output_data = interpreter.get_tensor(output_details[0]['index'])\n    output = np.squeeze(output_data)\n    return output", "docstring": "Performs evaluation for input image over specified model.\n\nArgs:\ninterpreter: TFLite interpreter initialized with model to execute.\ninput_image: Image input to the model.\n\nReturns:\noutput: output tensor of model being executed.", "source": "github-repos"}
{"code": "def get(self, name):\n        \n        return self.prepare_model(self.client.api.inspect_plugin(name))", "docstring": "Gets a plugin.\n\nArgs:\nname (str): The name of the plugin.\n\nReturns:\n(:py:class:`Plugin`): The plugin.\n\nRaises:\n:py:class:`docker.errors.NotFound` If the plugin does not\nexist.\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def segment(self, source, language=None):\n    \n    if language and not language in self.supported_languages:\n      raise ValueError(\n          'Language {} is not supported by NLAPI segmenter'.format(language))\n\n    chunks, language = self._get_source_chunks(source, language=language)\n    if self.use_entity:\n      entities = self._get_entities(source, language=language)\n      chunks = self._group_chunks_by_entities(chunks, entities)\n    chunks.resolve_dependencies()\n    return chunks", "docstring": "Returns a chunk list from the given sentence.\n\nArgs:\nsource (str): Source string to segment.\nlanguage (:obj:`str`, optional): A language code.\n\nReturns:\nA chunk list. (:obj:`budou.chunk.ChunkList`)\n\nRaises:\nValueError: If :obj:`language` is given and it is not included in\n:obj:`supported_languages`.", "source": "juraj-google-style"}
{"code": "def __init__(self, working_directory, emulator_zip, java=None):\n    \n    self._working_directory = working_directory\n\n    self._emulators = {}\n\n    \n    zipped_file = zipfile.ZipFile(emulator_zip)\n    if not os.path.isdir(self._working_directory):\n      os.mkdir(self._working_directory)\n    zipped_file.extractall(self._working_directory)\n\n    self._emulator_dir = os.path.join(self._working_directory,\n                                      'cloud-datastore-emulator')\n    self._emulator_cmd = os.path.join(self._emulator_dir,\n                                      'cloud_datastore_emulator')\n    os.chmod(self._emulator_cmd, 0700)  \n\n    \n    if java:\n      os.environ['JAVA'] = java", "docstring": "Constructs a factory for building datastore emulator instances.\n\nArgs:\nworking_directory: path to a directory where temporary files will be\nstored\nemulator_zip: path to the emulator zip file\njava: path to a java executable", "source": "juraj-google-style"}
{"code": "def update(self, **kwargs):\n    if ('image' not in kwargs):\n        spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec']\n        kwargs['image'] = spec['Image']\n    if (kwargs.get('force_update') is True):\n        task_template = self.attrs['Spec']['TaskTemplate']\n        current_value = int(task_template.get('ForceUpdate', 0))\n        kwargs['force_update'] = (current_value + 1)\n    create_kwargs = _get_create_service_kwargs('update', kwargs)\n    return self.client.api.update_service(self.id, self.version, **create_kwargs)", "docstring": "Update a service's configuration. Similar to the ``docker service\nupdate`` command.\n\nTakes the same parameters as :py:meth:`~ServiceCollection.create`.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def moma2(self, objective, wt_obj):\n    obj_expr = 0\n    for reaction in self._adjustment_reactions():\n        v_wt = self._v_wt[reaction]\n        v = self._v[reaction]\n        obj_expr += ((v_wt - v) ** 2)\n    self._prob.set_objective(obj_expr)\n    with self.constraints((self._v_wt[objective] >= wt_obj)):\n        self._solve(lp.ObjectiveSense.Minimize)", "docstring": "Find the smallest redistribution vector using Euclidean distance.\n\nMinimizing the redistribution of fluxes using a quadratic objective\nfunction. The distance is minimized by minimizing the sum of\n(wild type - knockout)^2.\n\nCreates the constraint that the we select the optimal flux vector that\nis closest to the wildtype. This might still return an arbitrary flux\nvector the maximizes the objective function.\n\nArgs:\nobjective: Objective reaction for the model.\nwt_obj: The flux value for your wild type objective reactions.\nCan either use an expiremental value or on determined by FBA\nby using :meth:`.get_fba_obj_flux(objective)`.", "source": "codesearchnet"}
{"code": "def validate_layout_display(self, table, display_condition):\n        \n        display = False\n        if display_condition is None:\n            display = True\n        else:\n            display_query = 'select count(*) from {} where {}'.format(table, display_condition)\n            try:\n                cur = self.db_conn.cursor()\n                cur.execute(display_query.replace('\"', ''))\n                rows = cur.fetchall()\n                if rows[0][0] > 0:\n                    display = True\n            except sqlite3.Error as e:\n                print('\"{}\" query returned an error: ({}).'.format(display_query, e))\n                sys.exit(1)\n        return display", "docstring": "Check to see if the display condition passes.\n\nArgs:\ntable (str): The name of the DB table which hold the App data.\ndisplay_condition (str): The \"where\" clause of the DB SQL statement.\n\nReturns:\nbool: True if the row count is greater than 0.", "source": "juraj-google-style"}
{"code": "def get_attribute(json, attr):\n    res = [json[entry][attr] for (entry, _) in enumerate(json)]\n    logger.debug('{0}s (from JSON):\\n{1}'.format(attr, res))\n    return res", "docstring": "Gets the values of an attribute from JSON\n\nArgs:\njson: JSON data as a list of dict dates, where the keys are\nthe raw market statistics.\nattr: String of attribute in JSON file to collect.\n\nReturns:\nList of values of specified attribute from JSON", "source": "codesearchnet"}
{"code": "def values(self):\n    return self._values", "docstring": "The non-zero values in the represented dense tensor.\n\nReturns:\nA 1-D Tensor of any data type.", "source": "github-repos"}
{"code": "def encode_field(self, field, value):\n        \n        for encoder in _GetFieldCodecs(field, 'encoder'):\n            result = encoder(field, value)\n            value = result.value\n            if result.complete:\n                return value\n        if isinstance(field, messages.EnumField):\n            if field.repeated:\n                remapped_value = [GetCustomJsonEnumMapping(\n                    field.type, python_name=e.name) or e.name for e in value]\n            else:\n                remapped_value = GetCustomJsonEnumMapping(\n                    field.type, python_name=value.name)\n            if remapped_value:\n                return remapped_value\n        if (isinstance(field, messages.MessageField) and\n                not isinstance(field, message_types.DateTimeField)):\n            value = json.loads(self.encode_message(value))\n        return super(_ProtoJsonApiTools, self).encode_field(field, value)", "docstring": "Encode the given value as JSON.\n\nArgs:\nfield: a messages.Field for the field we're encoding.\nvalue: a value for field.\n\nReturns:\nA python value suitable for json.dumps.", "source": "juraj-google-style"}
{"code": "def init_from_wave_file(wavpath):\n    try:\n        (samplerate, data) = SW.read(wavpath)\n        nframes = data.shape[0]\n    except:\n        try:\n            w = wave.open(wavpath)\n            samplerate = w.getframerate()\n            nframes = w.getnframes()\n        except:\n            raise Exception(('Cannot decode wavefile ' + wavpath))\n    return SVEnv(samplerate, nframes, wavpath)", "docstring": "Init a sonic visualiser environment structure based the analysis\nof the main audio file. The audio file have to be encoded in wave\n\nArgs:\nwavpath(str): the full path to the wavfile", "source": "codesearchnet"}
{"code": "def GetUsernameByIdentifier(\n      self, user_identifier, session_identifier=CURRENT_SESSION):\n    \n    user_accounts = self._user_accounts.get(session_identifier, {})\n    user_account = user_accounts.get(user_identifier, None)\n    if not user_account:\n      return ''\n\n    return user_account.username or ''", "docstring": "Retrieves the username based on an user identifier.\n\nArgs:\nuser_identifier (str): user identifier, either a UID or SID.\nsession_identifier (Optional[str])): session identifier, where\nCURRENT_SESSION represents the active session.\n\nReturns:\nstr: username.", "source": "juraj-google-style"}
{"code": "def _has_attr(self, node, obj, attr):\n    if isinstance(obj, abstract.AMBIGUOUS_OR_EMPTY):\n        return (node, None)\n    if not isinstance(attr, abstract.PythonConstant) or not isinstance(attr.pyval, str):\n        return (node, None)\n    node, ret = self.ctx.attribute_handler.get_attribute(node, obj, attr.pyval)\n    return (node, ret is not None)", "docstring": "Check if the object has attribute attr.\n\nArgs:\nnode: The given node.\nobj: A BaseValue, generally the left hand side of a hasattr() call.\nattr: A BaseValue, generally the right hand side of a hasattr() call.\n\nReturns:\n(node, result) where result = True if the object has attribute attr, False\nif it does not, and None if it is ambiguous.", "source": "github-repos"}
{"code": "def __init__(self, *args, **kwargs):\n        \n        \n        \n        args = deepcopy(args)\n        kwargs = deepcopy(kwargs)\n        super(Metadata, self).__init__(*args, **kwargs)\n        self._ensure_id()\n        self._ensure_version()\n        self._validate()\n        self._normalize_dates()\n        self._validate_interval()", "docstring": "prepare compliant, normalized metadata from inputs\n\nArgs:\n\nkwargs: key-value pairs for metadata fields.\n\nRaises:\n\nInvalidDatalakeMetadata if required fields are missing and cannot\nbe inferred.", "source": "juraj-google-style"}
{"code": "def get_ytvideos(query, ilogger):\n    \n\n    queue = []\n\n    \n    search_result = ytdiscoveryapi.search().list(\n            q=query,\n            part=\"id,snippet\",\n            maxResults=1,\n            type=\"video,playlist\"\n    ).execute()\n\n    if not search_result[\"items\"]:\n        return []\n\n    \n    title = search_result[\"items\"][0][\"snippet\"][\"title\"]\n    ilogger.info(\"Queueing {}\".format(title))\n\n    \n    if search_result[\"items\"][0][\"id\"][\"kind\"] == \"youtube\n        \n        videoid = search_result[\"items\"][0][\"id\"][\"videoId\"]\n\n        \n        queue.append([\"https:\n\n    \n    elif search_result[\"items\"][0][\"id\"][\"kind\"] == \"youtube\n        queue = get_queue_from_playlist(search_result[\"items\"][0][\"id\"][\"playlistId\"])\n\n    return queue", "docstring": "Gets either a list of videos from a playlist or a single video, using the\nfirst result of a YouTube search\n\nArgs:\nquery (str): The YouTube search query\nilogger (logging.logger): The logger to log API calls to\n\nReturns:\nqueue (list): The items obtained from the YouTube search", "source": "juraj-google-style"}
{"code": "def setMeterPassword(self, new_pwd, pwd=\"00000000\"):\n        \n        result = False\n        self.setContext(\"setMeterPassword\")\n        try:\n            if len(new_pwd) != 8 or len(pwd) != 8:\n                self.writeCmdMsg(\"Passwords must be exactly eight characters.\")\n                self.setContext(\"\")\n                return result\n\n            if not self.request(False):\n                self.writeCmdMsg(\"Pre command read failed: check serial line.\")\n            else:\n                if not self.serialCmdPwdAuth(pwd):\n                    self.writeCmdMsg(\"Password failure\")\n                else:\n                    req_pwd = binascii.hexlify(new_pwd.zfill(8))\n                    req_str = \"015731023030323028\" + req_pwd + \"2903\"\n                    req_str += self.calc_crc16(req_str[2:].decode(\"hex\"))\n                    self.m_serial_port.write(req_str.decode(\"hex\"))\n                    if self.m_serial_port.getResponse(self.getContext()).encode(\"hex\") == \"06\":\n                        self.writeCmdMsg(\"Success(setMeterPassword): 06 returned.\")\n                        result = True\n            self.serialPostEnd()\n        except:\n            ekm_log(traceback.format_exc(sys.exc_info()))\n\n        self.setContext(\"\")\n        return result", "docstring": "Serial Call to set meter password.  USE WITH CAUTION.\n\nArgs:\nnew_pwd (str): 8 digit numeric password to set\npwd (str): Old 8 digit numeric password.\n\nReturns:\nbool: True on completion with ACK.", "source": "juraj-google-style"}
{"code": "def FetchMostRecentGraphSeries(label, report_type, token=None):\n    if _ShouldUseLegacyDatastore():\n        return _FetchMostRecentGraphSeriesFromTheLegacyDB(label, report_type, token=token)\n    return data_store.REL_DB.ReadMostRecentClientGraphSeries(label, report_type)", "docstring": "Fetches the latest graph series for a client label from the DB.\n\nArgs:\nlabel: Client label to fetch data for.\nreport_type: rdf_stats.ClientGraphSeries.ReportType to fetch data for.\ntoken: ACL token to use for reading from the legacy (non-relational)\ndatastore.\n\nRaises:\nAFF4AttributeTypeError: If, when reading to the legacy DB, an unexpected\nreport-data type is encountered.\n\nReturns:\nThe graph series for the given label and report type that was last\nwritten to the DB, or None if no series for that label and report-type\nexist.", "source": "codesearchnet"}
{"code": "def __setitem__(self, complete_selector, value):\n    \n    if not SELECTOR_RE.match(complete_selector):\n      raise ValueError(\"Invalid selector '{}'.\".format(complete_selector))\n\n    selector_components = complete_selector.split('.')\n    node = self._selector_tree\n\n    \n    for component in selector_components[::-1]:\n      node = node.setdefault(component, {})\n    node[_TERMINAL_KEY] = complete_selector\n    self._selector_map[complete_selector] = value", "docstring": "Associates a value with `complete_selector`.\n\nThis function also performs some additional bookkeeping to facilitate\npartial matching of selectors.\n\nArgs:\ncomplete_selector: The (complete) selector to associate a value with.\nvalue: The value to associate.\n\nRaises:\nValueError: If `complete_selector` isn't a string consisting of valid\nPython identifiers separated by periods.", "source": "juraj-google-style"}
{"code": "def intersect(self, other):\n    if (not isinstance(other, self.__class__)):\n        m = 'You can only intersect striplogs with each other.'\n        raise StriplogError(m)\n    result = []\n    for iv in self:\n        for jv in other:\n            try:\n                result.append(iv.intersect(jv))\n            except IntervalError:\n                pass\n    return Striplog(result)", "docstring": "Makes a striplog of all intersections.\n\nArgs:\nStriplog. The striplog instance to intersect with.\n\nReturns:\nStriplog. The result of the intersection.", "source": "codesearchnet"}
{"code": "def list_tasks(target=None):\n    \n    from os import getcwd, chdir\n    from glob import glob\n    original = getcwd()\n    if target is None:\n        target = _dbdir()\n        \n    chdir(target)\n    result = {}\n    for filename in glob(\"*.*.json\"):\n        project, task = filename.split('.')[0:2]\n        if project not in result:\n            result[project] = []\n        result[project].append(task)\n\n    \n    chdir(original)\n        \n    return result", "docstring": "Returns a list of all the projects and tasks available in the `acorn`\ndatabase directory.\n\nArgs:\ntarget (str): directory to list the projects for. Defaults to the configured\ndatabase directory.\n\nReturns:\ndict: keys are project names; values are lists of tasks associated with the\nproject.", "source": "juraj-google-style"}
{"code": "def __init__(self, item_class, expected_class):\n        \n        super().__init__()\n        self.item_class = item_class\n        self.expected_class = expected_class", "docstring": "Take the parameters to inform the user about the error.\n\nArgs:\nitem_class (:obj:`type`): The class of the item that was being\ninserted in the list when the exception was raised.\nexpected_class (:obj:`type`): The expected type that didn't match\nagainst the item to be inserted.", "source": "juraj-google-style"}
{"code": "def _is_failed(self):\n    if self._status_code in _InstrumentationStatusCodeCategories.FAIL:\n        return True\n    elif self._known_keys[_InstrumentationKnownStatusKeys.STACK] and self._status_code != _InstrumentationStatusCodes.ASSUMPTION_FAILURE:\n        return True\n    elif self._known_keys[_InstrumentationKnownStatusKeys.ERROR]:\n        return True\n    elif self._known_keys[_InstrumentationKnownResultKeys.SHORTMSG]:\n        return True\n    elif self._known_keys[_InstrumentationKnownResultKeys.LONGMSG]:\n        return True\n    else:\n        return False", "docstring": "Determines if the test corresponding to the instrumentation block\nfailed.\n\nThis method can not be used to tell if a test method passed and\nshould not be used for such a purpose.\n\nReturns:\nA boolean indicating if the test method failed.", "source": "github-repos"}
{"code": "def select_by_key(self, key):\n        \n        for item in self.children.values():\n            if 'selected' in item.attributes:\n                del item.attributes['selected']\n        self.children[key].attributes['selected'] = 'selected'\n        self._selected_key = key\n        self._selected_item = self.children[key]", "docstring": "Selects an item by its unique string identifier.\n\nArgs:\nkey (str): Unique string identifier of the DropDownItem that have to be selected.", "source": "juraj-google-style"}
{"code": "def to_csv(self, sep=',', path=None):\n        \n        stats = self._stats()\n\n        data = []\n        first_row = ['Stat', self.name]\n        data.append(sep.join(first_row))\n\n        for stat in stats:\n            k, n, f = stat\n\n            \n            if k is None:\n                row = [''] * len(data[0])\n                data.append(sep.join(row))\n                continue\n            elif k == 'rf' and not type(self.rf) == float:\n                continue\n\n            row = [n]\n            raw = getattr(self, k)\n            if f is None:\n                row.append(raw)\n            elif f == 'p':\n                row.append(fmtp(raw))\n            elif f == 'n':\n                row.append(fmtn(raw))\n            elif f == 'dt':\n                row.append(raw.strftime('%Y-%m-%d'))\n            else:\n                raise NotImplementedError('unsupported format %s' % f)\n\n            data.append(sep.join(row))\n\n        res = '\\n'.join(data)\n\n        if path is not None:\n            with open(path, 'w') as fl:\n                fl.write(res)\n        else:\n            return res", "docstring": "Returns a CSV string with appropriate formatting.\nIf path is not None, the string will be saved to file\nat path.\n\nArgs:\n* sep (char): Separator\n* path (str): If None, CSV string returned. Else file written\nto specified path.", "source": "juraj-google-style"}
{"code": "def _ReadEventDataIntoEvent(self, event):\n    if (self._storage_type != definitions.STORAGE_TYPE_SESSION):\n        return\n    event_data_identifier = event.GetEventDataIdentifier()\n    if event_data_identifier:\n        lookup_key = event_data_identifier.CopyToString()\n        event_data = self._event_data[lookup_key]\n        for (attribute_name, attribute_value) in event_data.GetAttributes():\n            setattr(event, attribute_name, attribute_value)", "docstring": "Reads the data into the event.\n\nThis function is intended to offer backwards compatible event behavior.\n\nArgs:\nevent (EventObject): event.", "source": "codesearchnet"}
{"code": "def setModelData(self, editor, model, index):\n        \n        if index.isValid():\n            value = editor.text()\n            model.setData(index, value, QtCore.Qt.EditRole)", "docstring": "Gets data from the editor widget and stores it in the specified model at the item index.\n\nArgs:\neditor (QtGui.QLineEdit): editor widget.\nmodel (QAbstractItemModel): parent model.\nindex (QModelIndex): model data index.", "source": "juraj-google-style"}
{"code": "def get_params(img, scale, ratio):\n    area = (img.size[0] * img.size[1])\n    for attempt in range(10):\n        target_area = (random.uniform(*scale) * area)\n        log_ratio = (math.log(ratio[0]), math.log(ratio[1]))\n        aspect_ratio = math.exp(random.uniform(*log_ratio))\n        w = int(round(math.sqrt((target_area * aspect_ratio))))\n        h = int(round(math.sqrt((target_area / aspect_ratio))))\n        if ((w <= img.size[0]) and (h <= img.size[1])):\n            i = random.randint(0, (img.size[1] - h))\n            j = random.randint(0, (img.size[0] - w))\n            return (i, j, h, w)\n    in_ratio = (img.size[0] / img.size[1])\n    if (in_ratio < min(ratio)):\n        w = img.size[0]\n        h = (w / min(ratio))\n    elif (in_ratio > max(ratio)):\n        h = img.size[1]\n        w = (h * max(ratio))\n    else:\n        w = img.size[0]\n        h = img.size[1]\n    i = ((img.size[1] - h) \n    j = ((img.size[0] - w) \n    return (i, j, h, w)", "docstring": "Get parameters for ``crop`` for a random sized crop.\n\nArgs:\nimg (PIL Image): Image to be cropped.\nscale (tuple): range of size of the origin size cropped\nratio (tuple): range of aspect ratio of the origin aspect ratio cropped\n\nReturns:\ntuple: params (i, j, h, w) to be passed to ``crop`` for a random\nsized crop.", "source": "codesearchnet"}
{"code": "def memory_read8(self, addr, num_bytes, zone=None):\n        \n        return self.memory_read(addr, num_bytes, zone=zone, nbits=8)", "docstring": "Reads memory from the target system in units of bytes.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): start address to read from\nnum_bytes (int): number of bytes to read\nzone (str): memory zone to read from\n\nReturns:\nList of bytes read from the target system.\n\nRaises:\nJLinkException: if memory could not be read.", "source": "juraj-google-style"}
{"code": "def _get_group_object(name):\n    \n    with salt.utils.winapi.Com():\n        nt = win32com.client.Dispatch('AdsNameSpaces')\n    return nt.GetObject('', 'WinNT:", "docstring": "A helper function to get a specified group object\n\nArgs:\n\nname (str): The name of the object\n\nReturns:\nobject: The specified group object", "source": "juraj-google-style"}
{"code": "def save_local_scope(\n        self,\n        line_number,\n        saved_function_call_index\n    ):\n        \n        saved_variables = list()\n        saved_variables_so_far = set()\n        first_node = None\n\n        \n        for assignment in [node for node in self.nodes\n                           if (type(node) == AssignmentNode or\n                               type(node) == AssignmentCallNode or\n                               type(Node) == BBorBInode)]:  \n            if assignment.left_hand_side in saved_variables_so_far:\n                continue\n            saved_variables_so_far.add(assignment.left_hand_side)\n            save_name = 'save_{}_{}'.format(saved_function_call_index, assignment.left_hand_side)\n\n            previous_node = self.nodes[-1]\n\n            saved_scope_node = RestoreNode(\n                save_name + ' = ' + assignment.left_hand_side,\n                save_name,\n                [assignment.left_hand_side],\n                line_number=line_number,\n                path=self.filenames[-1]\n            )\n            if not first_node:\n                first_node = saved_scope_node\n\n            self.nodes.append(saved_scope_node)\n            \n            saved_variables.append(SavedVariable(LHS=save_name,\n                                                 RHS=assignment.left_hand_side))\n            self.connect_if_allowed(previous_node, saved_scope_node)\n\n        return (saved_variables, first_node)", "docstring": "Save the local scope before entering a function call by saving all the LHS's of assignments so far.\n\nArgs:\nline_number(int): Of the def of the function call about to be entered into.\nsaved_function_call_index(int): Unique number for each call.\n\nReturns:\nsaved_variables(list[SavedVariable])\nfirst_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function.", "source": "juraj-google-style"}
{"code": "def binomial_coefficient(n, k):\n\n    \n\n    if not isinstance(k, int) or not isinstance(n, int):\n        raise TypeError(\"Expecting positive integers\")\n    if k > n:\n        raise ValueError(\"k must be lower or equal than n\")\n    if k < 0 or n < 0:\n        raise ValueError(\"Expecting positive integers\")\n\n    return factorial(n)", "docstring": "Calculate the binomial coefficient indexed by n and k.\n\nArgs:\nn (int): positive integer\nk (int): positive integer\n\nReturns:\nThe binomial coefficient indexed by n and k\n\nRaises:\nTypeError: If either n or k is not an integer\nValueError: If either n or k is negative, or if k is strictly greater than n", "source": "juraj-google-style"}
{"code": "def _start_services_on_ads(ads):\n    for ad in ads:\n        start_logcat = not getattr(ad, KEY_SKIP_LOGCAT, DEFAULT_VALUE_SKIP_LOGCAT)\n        try:\n            if start_logcat:\n                ad.services.logcat.start()\n        except Exception:\n            is_required = getattr(ad, KEY_DEVICE_REQUIRED, DEFAULT_VALUE_DEVICE_REQUIRED)\n            if is_required:\n                ad.log.exception('Failed to start some services, abort!')\n                destroy(ads)\n                raise\n            else:\n                ad.log.exception('Skipping this optional device because some services failed to start.')", "docstring": "Starts long running services on multiple AndroidDevice objects.\n\nIf any one AndroidDevice object fails to start services, cleans up all\nAndroidDevice objects and their services.\n\nArgs:\nads: A list of AndroidDevice objects whose services to start.", "source": "github-repos"}
{"code": "def add_gene(self, gene):\n    logger.debug('Adding gene {0} to variant {1}'.format(gene, self['variant_id']))\n    self['genes'].append(gene)", "docstring": "Add the information of a gene\n\nThis adds a gene dict to variant['genes']\n\nArgs:\ngene (dict): A gene dictionary", "source": "codesearchnet"}
{"code": "def _AssertProtoEquals(self, a, b, msg=None, relative_tolerance=None):\n    if not compare.ProtoEq(a, b):\n        compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg, relative_tolerance=relative_tolerance)", "docstring": "Asserts that a and b are the same proto.\n\nUses ProtoEq() first, as it returns correct results\nfor floating point attributes, and then use assertProtoEqual()\nin case of failure as it provides good error messages.\n\nArgs:\na: a proto.\nb: another proto.\nmsg: Optional message to report on failure.\nrelative_tolerance: float. The allowable difference between the two values\nbeing compared is determined by multiplying the relative tolerance by\nthe maximum of the two values. If this is not provided, then all floats\nare compared using string comparison.", "source": "github-repos"}
{"code": "def __le__(self, other):\n        \n        other = self._cast_to_frameset(other)\n        if other is NotImplemented:\n            return NotImplemented\n        return self.items <= other.items", "docstring": "Check if `self` <= `other` via a comparison of the contents.\nIf `other` is not a :class:`FrameSet`, but is a set, frozenset, or\nis iterable, it will be cast to a :class:`FrameSet`.\n\nArgs:\nother (:class:`FrameSet`): Also accepts an object that can be cast to a :class:`FrameSet`\n\nReturns:\nbool:\n:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`", "source": "juraj-google-style"}
{"code": "def info(msg: str, *args, **kwargs) -> None:\n    _DEFAULT_LOGGER.info(msg, *args, **kwargs)", "docstring": "Logs info message.\n\nArgs:\nmsg: Message with possible format string.\n*args: Values for variables in the format string.\n**kwargs: Keyword arguments for the logger.", "source": "github-repos"}
{"code": "def join(input_files, output_file):\n    \n\n    \n    final_features = []\n    for file in input_files:\n        with open(file) as f:\n            feat_collection = geojson.load(f)\n            final_features += feat_collection['features']\n\n    feat_collection['features'] = final_features\n\n    \n    with open(output_file, 'w') as f:\n        geojson.dump(feat_collection, f)", "docstring": "Join geojsons into one. The spatial reference system of the output file is the same\nas the one of the last file in the list.\n\nArgs:\ninput_files (list): List of file name strings.\noutput_file (str): Output file name.", "source": "juraj-google-style"}
{"code": "def freeze_parameter(self, name):\n    i = self.get_parameter_names(include_frozen=True).index(name)\n    self.unfrozen_mask[i] = False", "docstring": "Freeze a parameter by name\n\nArgs:\nname: The name of the parameter", "source": "codesearchnet"}
{"code": "def create_tc_entity(self, key, value):\n    data = None\n    if ((key is not None) and (value is not None)):\n        data = self.db.create(key.strip(), json.dumps(value))\n    else:\n        self.tcex.log.warning(u'The key or value field was None.')\n    return data", "docstring": "Create method of CRUD operation for TC entity data.\n\nArgs:\nkey (string): The variable to write to the DB.\nvalue (any): The data to write to the DB.\n\nReturns:\n(string): Result of DB write.", "source": "codesearchnet"}
{"code": "def unpack_grad_tuple(gv, gpt):\n    \n    elt_widths = [x.num_elements() for x in gpt.shapes]\n    with tf.device(gv[0][0].device):\n        with tf.name_scope(\"unpack\"):\n            splits = tf.split(gv[0], elt_widths)\n            unpacked_gv = []\n            for idx, s in enumerate(splits):\n                unpacked_gv.append((tf.reshape(s, gpt.shapes[idx]),\n                                    gpt.vars[idx]))\n    return unpacked_gv", "docstring": "Unpack a previously packed collection of gradient tensors.\n\nArgs:\ngv: A (grad, var) pair to be unpacked.\ngpt: A GradPackTuple describing the packing operation that produced gv.\n\nReturns:\nA list of (grad, var) pairs corresponding to the values that were\noriginally packed into gv, maybe following subsequent operations like\nreduction.", "source": "juraj-google-style"}
{"code": "def render_latex(latex: str) -> PIL.Image:      \n    \n    tmpfilename = 'circ'\n    with tempfile.TemporaryDirectory() as tmpdirname:\n        tmppath = os.path.join(tmpdirname, tmpfilename)\n        with open(tmppath + '.tex', 'w') as latex_file:\n            latex_file.write(latex)\n\n        subprocess.run([\"pdflatex\",\n                        \"-halt-on-error\",\n                        \"-output-directory={}\".format(tmpdirname),\n                        \"{}\".format(tmpfilename+'.tex')],\n                       stdout=subprocess.PIPE,\n                       stderr=subprocess.DEVNULL,\n                       check=True)\n\n        subprocess.run(['pdftocairo',\n                        '-singlefile',\n                        '-png',\n                        '-q',\n                        tmppath + '.pdf',\n                        tmppath])\n        img = PIL.Image.open(tmppath + '.png')\n\n    return img", "docstring": "Convert a single page LaTeX document into an image.\n\nTo display the returned image, `img.show()`\n\n\nRequired external dependencies: `pdflatex` (with `qcircuit` package),\nand `poppler` (for `pdftocairo`).\n\nArgs:\nA LaTeX document as a string.\n\nReturns:\nA PIL Image\n\nRaises:\nOSError: If an external dependency is not installed.", "source": "juraj-google-style"}
{"code": "def view_structure(self, only_chains=None, opacity=1.0, recolor=False, gui=False):\n        \n        \n\n        if ssbio.utils.is_ipynb():\n            import nglview as nv\n        else:\n            raise EnvironmentError('Unable to display structure - not running in a Jupyter notebook environment')\n\n        if not self.structure_file:\n            raise ValueError(\"Structure file not loaded\")\n\n        only_chains = ssbio.utils.force_list(only_chains)\n        to_show_chains = '( '\n        for c in only_chains:\n            to_show_chains += ':{} or'.format(c)\n        to_show_chains = to_show_chains.strip(' or ')\n        to_show_chains += ' )'\n\n        if self.file_type == 'mmtf' or self.file_type == 'mmtf.gz':\n            view = nv.NGLWidget()\n            view.add_component(self.structure_path)\n        else:\n            view = nv.show_structure_file(self.structure_path, gui=gui)\n\n        if recolor:\n            view.clear_representations()\n            if only_chains:\n                view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity)\n            else:\n                view.add_cartoon(selection='protein', color='silver', opacity=opacity)\n        elif only_chains:\n            view.clear_representations()\n            view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity)\n\n        return view", "docstring": "Use NGLviewer to display a structure in a Jupyter notebook\n\nArgs:\nonly_chains (str, list): Chain ID or IDs to display\nopacity (float): Opacity of the structure\nrecolor (bool): If structure should be cleaned and recolored to silver\ngui (bool): If the NGLview GUI should show up\n\nReturns:\nNGLviewer object", "source": "juraj-google-style"}
{"code": "def process_status_queue(self):\n    self.log.debug('Start processing status queue')\n    while True:\n        messages = self.status_queue.receive_messages(MaxNumberOfMessages=10)\n        if (not messages):\n            break\n        for message in messages:\n            data = json.loads(message.body)\n            job = SchedulerJob.get(data['id'])\n            try:\n                if (job and job.update_status(data['status'])):\n                    db.session.commit()\n            except SchedulerError as ex:\n                if (hasattr(ex, 'message') and (ex.message == 'Attempting to update already completed job')):\n                    pass\n            message.delete()\n    open_batches = db.SchedulerBatch.find((SchedulerBatch.status < SchedulerStatus.COMPLETED))\n    for batch in open_batches:\n        open_jobs = list(filter((lambda x: (x.status < SchedulerStatus.COMPLETED)), batch.jobs))\n        if (not open_jobs):\n            open_batches.remove(batch)\n            batch.update_status(SchedulerStatus.COMPLETED)\n            self.log.debug('Closed completed batch {}'.format(batch.batch_id))\n        else:\n            started_jobs = list(filter((lambda x: (x.status > SchedulerStatus.PENDING)), open_jobs))\n            if ((batch.status == SchedulerStatus.PENDING) and (len(started_jobs) > 0)):\n                batch.update_status(SchedulerStatus.STARTED)\n                self.log.debug('Started batch manually {}'.format(batch.batch_id))\n    for batch in open_batches:\n        if (batch.started < (datetime.now() - timedelta(hours=2))):\n            self.log.warning('Closing a stale scheduler batch: {}'.format(batch.batch_id))\n            for job in batch.jobs:\n                if (job.status < SchedulerStatus.COMPLETED):\n                    job.update_status(SchedulerStatus.ABORTED)\n            batch.update_status(SchedulerStatus.ABORTED)\n    db.session.commit()", "docstring": "Process all messages in the `status_queue` and check for any batches that needs to change status\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def __init__(self, fileobj):\n        \n        self.fileobj = fileobj\n\n        self.mardata = mar.parse_stream(self.fileobj)", "docstring": "Initialize a new MarReader object.\n\nNote:\nFiles should always be opened in binary mode.\n\nArgs:\nfileobj (file object): A file-like object open in read mode where\nthe MAR data will be read from. This object must also be\nseekable (i.e.  support .seek() and .tell()).", "source": "juraj-google-style"}
{"code": "def __init__(self, a_file=None):\n    \n    self._macros = dict()\n    if a_file:\n      self.ParseInput(a_file)", "docstring": "Initializes the collection.\n\nArgs:\na_file: The file like stream to parse.\n\nRaises:\nPDDMError if there are any issues.", "source": "juraj-google-style"}
{"code": "def update_compliance_all(self, information, timeout=(- 1)):\n    uri = (self.URI + '/compliance')\n    result = self._helper.update(information, uri, timeout=timeout)\n    return result", "docstring": "Returns SAS Logical Interconnects to a consistent state. The current SAS Logical Interconnect state is\ncompared to the associated SAS Logical Interconnect group.\n\nArgs:\ninformation: Can be either the resource ID or URI.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: SAS Logical Interconnect.", "source": "codesearchnet"}
{"code": "def add_word(self, word):\n        \n        word = word.lower()\n\n        if not (word.isascii() and word.isalpha()):\n            raise ValueError(\"Invalid character in word '{}'\".format(word))\n\n        word = word.encode(encoding=\"ascii\")\n        result = cgaddag.gdg_add_word(self.gdg, word)\n        if result == 1:\n            raise ValueError(\"Invalid character in word '{}'\".format(word))\n        elif result == 2:\n            raise MemoryError(\"Out of memory, GADDAG is in an undefined state\")", "docstring": "Add a word to the GADDAG.\n\nArgs:\nword: A word to be added to the GADDAG.", "source": "juraj-google-style"}
{"code": "def sys_save_screenshot(name: Optional[str]=None) -> None:\n    lib.TCOD_sys_save_screenshot((_bytes(name) if (name is not None) else ffi.NULL))", "docstring": "Save a screenshot to a file.\n\nBy default this will automatically save screenshots in the working\ndirectory.\n\nThe automatic names are formatted as screenshotNNN.png.  For example:\nscreenshot000.png, screenshot001.png, etc.  Whichever is available first.\n\nArgs:\nfile Optional[AnyStr]: File path to save screenshot.", "source": "codesearchnet"}
{"code": "def extract_value_from_output(canary, split_offset, kal_out):\n    \n    retval = \"\"\n    while retval == \"\":\n        for line in kal_out.splitlines():\n            if canary in line:\n                retval = str(line.split()[split_offset])\n        if retval == \"\":\n            retval = None\n    return retval", "docstring": "Return value parsed from output.\n\nArgs:\ncanary(str): This string must exist in the target line.\nsplit_offset(int): Split offset for target value in string.\nkal_out(int): Output from kal.", "source": "juraj-google-style"}
{"code": "def parse_isoformat(timestamp):\n        \n        if len(timestamp) == 20:\n            zone = TzOffset('+00:00')\n            timestamp = timestamp[:-1]\n        elif len(timestamp) == 24:\n            zone = TzOffset('%s:%s' % (timestamp[-5:-2], timestamp[-2:]))\n            timestamp = timestamp[:-5]\n        elif len(timestamp) == 25:\n            zone = TzOffset(timestamp[-6:])\n            timestamp = timestamp[:-6]\n        timestamp = Timestamp.strptime(timestamp, '%Y-%m-%dT%H:%M:%S')\n        timestamp = timestamp.replace(tzinfo=zone)\n        return timestamp", "docstring": "Parse an ISO 8601 formatted time stamp.\n\nArgs:\ntimestamp (str): Timestamp to parse\n\nReturns:\nTimestamp: Parsed timestamp", "source": "juraj-google-style"}
{"code": "def decodes(self, s: str) -> BioCCollection:\n        \n        tree = etree.parse(io.BytesIO(bytes(s, encoding='UTF-8')))\n        collection = self.__parse_collection(tree.getroot())\n        collection.encoding = tree.docinfo.encoding\n        collection.standalone = tree.docinfo.standalone\n        collection.version = tree.docinfo.xml_version\n        return collection", "docstring": "Deserialize ``s`` to a BioC collection object.\n\nArgs:\ns: a \"str\" instance containing a BioC collection\n\nReturns:\nan object of BioCollection", "source": "juraj-google-style"}
{"code": "def from_text_file(file_path):\n    results = []\n    with io.open(file_path, 'r', encoding='utf-8') as f:\n        data_strs = f.read().split(MonsoonData.delimiter)\n        for data_str in data_strs:\n            results.append(MonsoonData.from_string(data_str))\n    return results", "docstring": "Load MonsoonData objects from a text file generated by\nMonsoonData.save_to_text_file.\n\nArgs:\nfile_path: The full path of the file load from, including the file\nname.\n\nReturns:\nA list of MonsoonData objects.", "source": "codesearchnet"}
{"code": "def convert_error(exc_src, exc_dest):\n    \n\n    def wrap(func):\n\n        @wraps(func)\n        def wrapper(*args, **kwargs):\n            try:\n                return func(*args, **kwargs)\n            except exc_dest:\n                raise\n            except exc_src as err:\n                reraise(exc_dest, err, sys.exc_info()[2])\n\n        return wrapper\n\n    return wrap", "docstring": "A decorator for reraising exceptions with a different type.\nMostly useful for IOError.\n\nArgs:\nexc_src (type): The source exception type\nexc_dest (type): The target exception type.", "source": "juraj-google-style"}
{"code": "def publish(self, event_type: str, event_data: dict = None):\n        \n        import inspect\n        import os.path\n        _stack = inspect.stack()\n        _origin = os.path.basename(_stack[3][1]) + '::' + \\\n            _stack[3][3]+'::L{}'.format(_stack[3][2])\n\n        publish(event_type=event_type,\n                event_data=event_data,\n                object_type=self._type,\n                object_id=self._id,\n                object_key=self._key,\n                origin=_origin)", "docstring": "Publish an event associated with the scheduling object.\n\nNote:\nIdeally publish should not be used directly but by other methods\nwhich perform actions on the object.\n\nArgs:\nevent_type (str): Type of event.\nevent_data (dict, optional): Event data.", "source": "juraj-google-style"}
{"code": "def __init__(self, idx, name=\"select_input\"):\n    \n    super(SelectInput, self).__init__(name=name)\n    self._check_type(idx)\n    self._idx = idx", "docstring": "Module constructor.\n\nArgs:\nidx: Indexes of the tensors to select. If `idx` is an integer, then\na `Tensor` is returned. If `idx` is a (nested) list/tuple, then a\n(nested) tuple of `Tensor` is returned.\nname: Name of the module.\n\nRaises:\nTypeError: If `idx` is not an list, tuple or integer.", "source": "juraj-google-style"}
{"code": "def calc_padding(fmt, align):\n    \n    remain = struct.calcsize(fmt) % align\n    if remain == 0:\n        return \"\"\n    return 'x' * (align - remain)", "docstring": "Calculate how many padding bytes needed for ``fmt`` to be aligned to\n``align``.\n\nArgs:\nfmt (str): :mod:`struct` format.\nalign (int): alignment (2, 4, 8, etc.)\n\nReturns:\nstr: padding format (e.g., various number of 'x').\n\n>>> calc_padding('b', 2)\n'x'\n\n>>> calc_padding('b', 3)\n'xx'", "source": "juraj-google-style"}
{"code": "def tracking_metadata(self):\n    return json_utils.Encoder().encode(self.python_properties)", "docstring": "String stored in metadata field in the SavedModel proto.\n\nReturns:\nA serialized JSON storing information necessary for recreating this layer.", "source": "github-repos"}
{"code": "class Llama4VisionEncoder(nn.Module):\n\n    def __init__(self, config: Llama4VisionConfig):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([Llama4VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n        self.config = config\n\n    def forward(self, hidden_states: torch.Tensor, freqs_ci: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        for encoder_layer in self.layers:\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, freqs_ci, attention_mask, output_attentions)\n            else:\n                layer_outputs = encoder_layer(hidden_state=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, freqs_ci=freqs_ci)\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n            hidden_states = layer_outputs[0]\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`Llama4VisionEncoderLayer`].\n\nArgs:\nconfig: Llama4VisionConfig", "source": "github-repos"}
{"code": "def set_forced_variation(self, experiment_key, user_id, variation_key):\n    \n\n    if not self.is_valid:\n      self.logger.error(enums.Errors.INVALID_DATAFILE.format('set_forced_variation'))\n      return False\n\n    if not validator.is_non_empty_string(experiment_key):\n      self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))\n      return False\n\n    if not isinstance(user_id, string_types):\n      self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))\n      return False\n\n    return self.config.set_forced_variation(experiment_key, user_id, variation_key)", "docstring": "Force a user into a variation for a given experiment.\n\nArgs:\nexperiment_key: A string key identifying the experiment.\nuser_id: The user ID.\nvariation_key: A string variation key that specifies the variation which the user.\nwill be forced into. If null, then clear the existing experiment-to-variation mapping.\n\nReturns:\nA boolean value that indicates if the set completed successfully.", "source": "juraj-google-style"}
{"code": "def AddAnalysisReport(self, analysis_report):\n    \n    self._RaiseIfNotWritable()\n\n    self._storage_file.AddAnalysisReport(analysis_report)\n\n    report_identifier = analysis_report.plugin_name\n    self._session.analysis_reports_counter['total'] += 1\n    self._session.analysis_reports_counter[report_identifier] += 1\n    self.number_of_analysis_reports += 1", "docstring": "Adds an analysis report.\n\nArgs:\nanalysis_report (AnalysisReport): analysis report.\n\nRaises:\nIOError: when the storage writer is closed.\nOSError: when the storage writer is closed.", "source": "juraj-google-style"}
{"code": "def parse_package_string(path):\n    parts = path.split('.')\n    if parts[(- 1)][0].isupper():\n        return ('.'.join(parts[:(- 1)]), parts[(- 1)])\n    return (path, '')", "docstring": "Parse the effect package string.\nCan contain the package python path or path to effect class in an effect package.\n\nExamples::\n\n# Path to effect pacakge\nexamples.cubes\n\n# Path to effect class\nexamples.cubes.Cubes\n\nArgs:\npath: python path to effect package. May also include effect class name.\n\nReturns:\ntuple: (package_path, effect_class)", "source": "codesearchnet"}
{"code": "def get_likelihood(self, uni_matrix):\n        \n        if self.parents is None:\n            left_u = uni_matrix[:, self.L]\n            right_u = uni_matrix[:, self.R]\n\n        else:\n            left_ing = list(self.D - self.parents[0].D)[0]\n            right_ing = list(self.D - self.parents[1].D)[0]\n            left_u = uni_matrix[self.L, left_ing]\n            right_u = uni_matrix[self.R, right_ing]\n\n        copula = Bivariate(self.name)\n        copula.theta = self.theta\n\n        X_left_right = np.array([[left_u, right_u]])\n        X_right_left = np.array([[right_u, left_u]])\n\n        value = np.sum(copula.probability_density(X_left_right))\n        left_given_right = copula.partial_derivative(X_left_right)\n        right_given_left = copula.partial_derivative(X_right_left)\n\n        return value, left_given_right, right_given_left", "docstring": "Compute likelihood given a U matrix.\n\nArgs:\nuni_matrix(numpy.array): Matrix to compute the likelihood.\n\nReturn:\ntuple(np.ndarray, np.ndarray, np.array): likelihood and conditional values.", "source": "juraj-google-style"}
{"code": "def delete_idx_status(self, rdf_class):\n    sparql_template = '\\n            DELETE\\n            {{\\n                ?s kds:esIndexTime ?esTime .\\n                ?s kds:esIndexError ?esError .\\n            }}\\n            WHERE\\n            {{\\n\\n                VALUES ?rdftypes {{\\n\\t\\t{} }} .\\n                ?s a ?rdftypes .\\n                OPTIONAL {{\\n                    ?s kds:esIndexTime ?esTime\\n                }}\\n                OPTIONAL {{\\n                    ?s kds:esIndexError ?esError\\n                }}\\n                FILTER(bound(?esTime)||bound(?esError))\\n            }}\\n            '\n    rdf_types = ([rdf_class.uri] + [item.uri for item in rdf_class.subclasses])\n    sparql = sparql_template.format('\\n\\t\\t'.join(rdf_types))\n    log.warn('Deleting index status for %s', rdf_class.uri)\n    return self.tstore_conn.update_query(sparql)", "docstring": "Removes all of the index status triples from the datastore\n\nArgs:\n-----\nrdf_class: The class of items to remove the status from", "source": "codesearchnet"}
{"code": "def info(self, collector_id):\n        \n        cid = self.collector_id\n        if collector_id:\n            cid = collector_id\n\n        url = '{0}/{1}'.format(self.url, cid)\n        request = requests.get(url, auth=self.auth)\n        return request.json()", "docstring": "Return a dict of collector.\n\nArgs:\ncollector_id (int): id of collector (optional)", "source": "juraj-google-style"}
{"code": "def get_structure_property_dict(self, structure, include_base_props=True,\n                                    ignore_errors=False):\n        \n        s_props = [\"trans_v\", \"long_v\", \"snyder_ac\", \"snyder_opt\",\n                   \"snyder_total\", \"clarke_thermalcond\", \"cahill_thermalcond\",\n                   \"debye_temperature\"]\n        if ignore_errors and (self.k_vrh < 0 or self.g_vrh < 0):\n            sp_dict = {prop: None for prop in s_props}\n        else:\n            sp_dict = {prop: getattr(self, prop)(structure) for prop in s_props}\n        sp_dict[\"structure\"] = structure\n        if include_base_props:\n            sp_dict.update(self.property_dict)\n        return sp_dict", "docstring": "returns a dictionary of properties derived from the elastic tensor\nand an associated structure\n\nArgs:\nstructure (Structure): structure object for which to calculate\nassociated properties\ninclude_base_props (bool): whether to include base properties,\nlike k_vrh, etc.\nignore_errors (bool): if set to true, will set problem properties\nthat depend on a physical tensor to None, defaults to False", "source": "juraj-google-style"}
{"code": "def with_row(self, row):\n        \n        self = self.copy()\n        self.append(row)\n        return self", "docstring": "Return a table with an additional row.\n\nArgs:\n``row`` (sequence): A value for each column.\n\nRaises:\n``ValueError``: If the row length differs from the column count.\n\n>>> tiles = Table(make_array('letter', 'count', 'points'))\n>>> tiles.with_row(['c', 2, 3]).with_row(['d', 4, 2])\nletter | count | points\nc      | 2     | 3\nd      | 4     | 2", "source": "juraj-google-style"}
{"code": "def parse_args(test: typing.Optional[typing.List[str]]=None) -> argparse.Namespace:\n    parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)\n    parser.add_argument('train_data', help='File path for the encoded training data.')\n    parser.add_argument('base_model', help='File path for the base model file.')\n    parser.add_argument('-o', '--output', help=f'File path for the output weights. (default: {DEFAULT_OUTPUT_NAME})', type=str, default=DEFAULT_OUTPUT_NAME)\n    parser.add_argument('--val-data', help='File path for the encoded validation data.', type=str)\n    parser.add_argument('--iters', help=f'Number of iterations for training. (default: {DEFAULT_NUM_ITERS})', type=int, default=DEFAULT_NUM_ITERS)\n    parser.add_argument('--log-span', help=f'Iteration span to print metrics. (default: {DEFAULT_LOG_SPAN})', type=int, default=DEFAULT_LOG_SPAN)\n    parser.add_argument('--learning-rate', help=f'Learning rate. (default: {DEFAULT_LEARNING_RATE})', type=float, default=DEFAULT_LEARNING_RATE)\n    if test is None:\n        return parser.parse_args()\n    else:\n        return parser.parse_args(test)", "docstring": "Parses commandline arguments.\n\nArgs:\ntest (typing.Optional[typing.List[str]], optional): Commandline args for\ntesting. Defaults to None.\n\nReturns:\nParsed arguments (argparse.Namespace).", "source": "github-repos"}
{"code": "def Matches(self, file_entry):\n    \n    if not self._names or not file_entry.IsFile():\n      return False\n\n    return file_entry.name.lower() in self._names", "docstring": "Compares the file entry against the filter.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry to compare.\n\nReturns:\nbool: True if the file entry matches the filter.", "source": "juraj-google-style"}
{"code": "def keyword(self, **kwargs):\n        \n        path = self._get_path('keyword')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Search for keywords by name.\n\nArgs:\nquery: CGI escpaed string.\npage: (optional) Minimum value of 1. Expected value is an integer.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def get_aligned_collection(self, value=0, data_type=None, unit=None, mutable=None):\n    header = self._check_aligned_header(data_type, unit)\n    values = self._check_aligned_value(value)\n    if (mutable is None):\n        collection = self.__class__(header, values, self.datetimes)\n    else:\n        if (self._enumeration is None):\n            self._get_mutable_enumeration()\n        if (mutable is False):\n            col_obj = self._enumeration['immutable'][self._collection_type]\n        else:\n            col_obj = self._enumeration['mutable'][self._collection_type]\n        collection = col_obj(header, values, self.datetimes)\n    collection._validated_a_period = self._validated_a_period\n    return collection", "docstring": "Return a Collection aligned with this one composed of one repeated value.\n\nAligned Data Collections are of the same Data Collection class, have the same\nnumber of values and have matching datetimes.\n\nArgs:\nvalue: A value to be repeated in the aliged collection values or\nA list of values that has the same length as this collection.\nDefault: 0.\ndata_type: The data type of the aligned collection. Default is to\nuse the data type of this collection.\nunit: The unit of the aligned collection. Default is to\nuse the unit of this collection or the base unit of the\ninput data_type (if it exists).\nmutable: An optional Boolean to set whether the returned aligned\ncollection is mutable (True) or immutable (False). The default is\nNone, which will simply set the aligned collection to have the\nsame mutability as the starting collection.", "source": "codesearchnet"}
{"code": "def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None):\n    return internal_convert_n_to_tensor_or_indexed_slices(values=values, dtype=dtype, name=name, as_ref=False)", "docstring": "Converts `values` to a list of `Output` or `IndexedSlices` objects.\n\nAny `IndexedSlices` or `SparseTensor` objects in `values` are returned\nunmodified.\n\nArgs:\nvalues: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that\ncan be consumed by `convert_to_tensor()`.\ndtype: (Optional.) The required `DType` of the returned `Tensor`\n`IndexedSlices`.\nname: (Optional.) A name prefix to used when a new `Tensor` is created, in\nwhich case element `i` will be given the name `name + '_' + i`.\n\nReturns:\nA list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.\n\nRaises:\nTypeError: If no conversion function is registered for an element in\n`values`.\nRuntimeError: If a registered conversion function returns an invalid\nvalue.", "source": "github-repos"}
{"code": "def get_course_modes(self, course_id):\n        \n        details = self.get_course_details(course_id)\n        modes = details.get('course_modes', [])\n        return self._sort_course_modes([mode for mode in modes if mode['slug'] not in EXCLUDED_COURSE_MODES])", "docstring": "Query the Enrollment API for the specific course modes that are available for the given course_id.\n\nArguments:\ncourse_id (str): The string value of the course's unique identifier\n\nReturns:\nlist: A list of course mode dictionaries.", "source": "juraj-google-style"}
{"code": "def AppendSource(self, type_indicator, attributes):\n    if (not type_indicator):\n        raise errors.FormatError('Missing type indicator.')\n    try:\n        source_object = registry.ArtifactDefinitionsRegistry.CreateSourceType(type_indicator, attributes)\n    except (AttributeError, TypeError) as exception:\n        raise errors.FormatError('Unable to create source type: {0:s} for artifact definition: {1:s} with error: {2!s}'.format(type_indicator, self.name, exception))\n    self.sources.append(source_object)\n    return source_object", "docstring": "Appends a source.\n\nIf you want to implement your own source type you should create a subclass\nin source_type.py and change the AppendSource method to handle the new\nsubclass. This function raises FormatError if an unsupported source type\nindicator is encountered.\n\nArgs:\ntype_indicator (str): source type indicator.\nattributes (dict[str, object]): source attributes.\n\nReturns:\nSourceType: a source type.\n\nRaises:\nFormatError: if the type indicator is not set or unsupported,\nor if required attributes are missing.", "source": "codesearchnet"}
{"code": "def get_reaction(self, reactants, products):\n        \n        return self._make_request(\"/reaction\",\n                                  payload={\"reactants[]\": reactants,\n                                           \"products[]\": products}, mp_decode=False)", "docstring": "Gets a reaction from the Materials Project.\n\nArgs:\nreactants ([str]): List of formulas\nproducts ([str]): List of formulas\n\nReturns:\nrxn", "source": "juraj-google-style"}
{"code": "def _create_warm_start_tuner(self, additional_parents, warm_start_type, estimator=None):\n    all_parents = {self.latest_tuning_job.name}\n    if additional_parents:\n        all_parents = all_parents.union(additional_parents)\n    return HyperparameterTuner(estimator=(estimator if estimator else self.estimator), objective_metric_name=self.objective_metric_name, hyperparameter_ranges=self._hyperparameter_ranges, objective_type=self.objective_type, max_jobs=self.max_jobs, max_parallel_jobs=self.max_parallel_jobs, warm_start_config=WarmStartConfig(warm_start_type=warm_start_type, parents=all_parents))", "docstring": "Creates a new ``HyperparameterTuner`` with ``WarmStartConfig``, where type will be equal to\n``warm_start_type`` and``parents`` would be equal to union of ``additional_parents`` and self.\n\nArgs:\nadditional_parents (set{str}): Additional parents along with self, to be used for warm starting.\nwarm_start_type (sagemaker.tuner.WarmStartTypes): Type of warm start job.\n\nReturns:\nsagemaker.tuner.HyperparameterTuner: Instance with the request fields copied from self along with the\nwarm start configuration", "source": "codesearchnet"}
{"code": "def _GenerateSection(self, problem_type):\n    \n    if problem_type == transitfeed.TYPE_WARNING:\n      dataset_problems = self._dataset_warnings\n      heading = 'Warnings'\n    else:\n      dataset_problems = self._dataset_errors\n      heading = 'Errors'\n\n    if not dataset_problems:\n      return ''\n\n    prefix = '<h2 class=\"issueHeader\">%s:</h2>' % heading\n    dataset_sections = []\n    for dataset_merger, problems in dataset_problems.items():\n      dataset_sections.append('<h3>%s</h3><ol>%s</ol>' % (\n          dataset_merger.FILE_NAME, '\\n'.join(problems)))\n    body = '\\n'.join(dataset_sections)\n    return prefix + body", "docstring": "Generate a listing of the given type of problems.\n\nArgs:\nproblem_type: The type of problem. This is one of the problem type\nconstants from transitfeed.\n\nReturns:\nThe generated HTML as a string.", "source": "juraj-google-style"}
{"code": "def VisitParameter(self, p):\n    if not self.class_types:\n        return p\n    if not self.force and (not isinstance(p.type, pytd.AnythingType)):\n        return p\n    if p.name == 'self' and self.method_kind in (pytd.MethodKind.METHOD, pytd.MethodKind.PROPERTY):\n        return p.Replace(type=self.class_types[-1])\n    elif p.name == 'cls' and self.method_kind == pytd.MethodKind.CLASSMETHOD:\n        cls_type = pytd.GenericType(pytd.NamedType('builtins.type'), parameters=(self.class_types[-1],))\n        return p.Replace(type=cls_type)\n    else:\n        return p", "docstring": "Adjust all parameters called \"self\" to have their base class type.\n\nBut do this only if their original type is unoccupied (\"Any\").\n\nArgs:\np: pytd.Parameter instance.\n\nReturns:\nAdjusted pytd.Parameter instance.", "source": "github-repos"}
{"code": "def _get_condition_json(self, index):\n    condition = self.condition_data[index]\n    condition_log = {'name': condition[0], 'value': condition[1], 'type': condition[2], 'match': condition[3]}\n    return json.dumps(condition_log)", "docstring": "Method to generate json for logging audience condition.\n\nArgs:\nindex: Index of the condition.\n\nReturns:\nString: Audience condition JSON.", "source": "codesearchnet"}
{"code": "def JsonDumpAndFlush(data, fp):\n    json.dump(data, fp)\n    fp.flush()", "docstring": "Write the dictionary `data` to a JSON file `fp` (and flush).\n\nArgs:\ndata: in a dictionary that is JSON serializable.\nfp: File-like object", "source": "github-repos"}
{"code": "def get_modifier_from_signature(self, modifier_signature):\n        \n        return next((m for m in self.modifiers if m.full_name == modifier_signature), None)", "docstring": "Return a modifier from a signature\nArgs:\nmodifier_name (str): signature of the modifier\nReturns:\nModifier", "source": "juraj-google-style"}
{"code": "def ion_or_solid_comp_object(formula):\n    m = re.search('\\\\[([^\\\\[\\\\]]+)\\\\]|\\\\(aq\\\\)', formula)\n    if m:\n        comp_obj = Ion.from_formula(formula)\n    elif re.search('\\\\(s\\\\)', formula):\n        comp_obj = Composition(formula[:(- 3)])\n    else:\n        comp_obj = Composition(formula)\n    return comp_obj", "docstring": "Returns either an ion object or composition object given\na formula.\n\nArgs:\nformula: String formula. Eg. of ion: NaOH(aq), Na[+];\nEg. of solid: Fe2O3(s), Fe(s), Na2O\n\nReturns:\nComposition/Ion object", "source": "codesearchnet"}
{"code": "def get_hgnc_id(gene_info, adapter):\n    hgnc_id = gene_info.get('hgnc_id')\n    hgnc_symbol = gene_info.get('hgnc_symbol')\n    true_id = None\n    if hgnc_id:\n        true_id = int(hgnc_id)\n    else:\n        gene_result = adapter.hgnc_genes(hgnc_symbol)\n        if (gene_result.count() == 0):\n            raise Exception('No gene could be found for {}'.format(hgnc_symbol))\n        for gene in gene_result:\n            if (hgnc_symbol.upper() == gene.hgnc_symbol.upper()):\n                true_id = gene.hgnc_id\n        if (not gene_info['hgnc_id']):\n            true_id = gene.hgnc_id\n    return true_id", "docstring": "Get the hgnc id for a gene\n\nThe proprity order will be\n1. if there is a hgnc id this one will be choosen\n2. if the hgnc symbol matches a genes proper hgnc symbol\n3. if the symbol ony matches aliases on several genes one will be\nchoosen at random\n\nArgs:\ngene_info(dict)\nadapter\n\nReturns:\ntrue_id(int)", "source": "codesearchnet"}
{"code": "def get_min_max_value(self) -> tuple[float, float]:\n    return self._get_min_max_value_by_expanding_range(self._num_bins", "docstring": "Finds min and max starting from the center index.\n\nThe HistogramMseSymmetric method starts from the center bin and expands the\nrange to both sides. This works better when the data is well-centered.\n\nReturns:\n(min_value, max_value): Min and max calculated using the method starting\nfrom center and expanding.", "source": "github-repos"}
{"code": "def set_atten(self, value):\n    self.attenuation_device.set_atten(self.idx, value)", "docstring": "This function sets the attenuation of Attenuator.\n\nArgs:\nvalue: This is a floating point value for nominal attenuation to be\nset. Unit is db.", "source": "github-repos"}
{"code": "def __init__(self, empty=True):\n        \n        super(ObjectTypeChecker, self).__init__(empty=empty)", "docstring": "Initialization method.\n\nArgs:\nempty (bool):", "source": "juraj-google-style"}
{"code": "def delete_document(project_id, knowledge_base_id, document_id):\n    \n    import dialogflow_v2beta1 as dialogflow\n    client = dialogflow.DocumentsClient()\n    document_path = client.document_path(project_id, knowledge_base_id,\n                                         document_id)\n\n    response = client.delete_document(document_path)\n    print('operation running:\\n {}'.format(response.operation))\n    print('Waiting for results...')\n    print('Done.\\n {}'.format(response.result()))", "docstring": "Deletes a Document.\n\nArgs:\nproject_id: The GCP project linked with the agent.\nknowledge_base_id: Id of the Knowledge base.\ndocument_id: Id of the Document.", "source": "juraj-google-style"}
{"code": "def install_json_params(self, ij=None):\n        \n        if self._install_json_params is None or ij is not None:\n            self._install_json_params = {}\n            \n            if ij is None:\n                ij = self.install_json\n            for p in ij.get('params') or []:\n                self._install_json_params.setdefault(p.get('name'), p)\n        return self._install_json_params", "docstring": "Return install.json params in a dict with name param as key.\n\nArgs:\nij (dict, optional): Defaults to None. The install.json contents.\n\nReturns:\ndict: A dictionary containing the install.json input params with name as key.", "source": "juraj-google-style"}
{"code": "def __init__(\n      self,\n      hparams,\n      metrics,\n      user=None,\n      description=None,\n      time_created_secs=None,\n  ):\n    \n    self._hparams = list(hparams)\n    self._metrics = list(metrics)\n    self._user = user\n    self._description = description\n    if time_created_secs is None:\n      time_created_secs = time.time()\n    self._time_created_secs = time_created_secs", "docstring": "Create an experiment object.\n\nArgs:\nhparams: A list of `HParam` values.\nmetrics: A list of `Metric` values.\nuser: An optional string denoting the user or group that owns this\nexperiment.\ndescription: An optional Markdown string describing this\nexperiment.\ntime_created_secs: The time that this experiment was created, as\nseconds since epoch. Defaults to the current time.", "source": "juraj-google-style"}
{"code": "def autorotate(image, orientation=None):\n    orientation_value = (orientation if orientation else image._getexif().get(EXIF_KEYS.get('Orientation')))\n    if (orientation_value is None):\n        raise ImDirectException('No orientation available in Exif tag or given explicitly.')\n    if (orientation_value in (1, 2)):\n        i = image\n    elif (orientation_value in (3, 4)):\n        i = image.transpose(Image.ROTATE_180)\n    elif (orientation_value in (5, 6)):\n        i = image.transpose(Image.ROTATE_270)\n    elif (orientation_value in (7, 8)):\n        i = image.transpose(Image.ROTATE_90)\n    else:\n        i = image\n    if (orientation_value in (2, 4, 5, 7)):\n        i = i.transpose(Image.FLIP_LEFT_RIGHT)\n    return i", "docstring": "Rotate and return an image according to its Exif information.\n\nROTATION_NEEDED = {\n1: 0,\n2: 0 (Mirrored),\n3: 180,\n4: 180 (Mirrored),\n5: -90 (Mirrored),\n6: -90,\n7: 90 (Mirrored),\n8: 90,\n}\n\nArgs:\nimage (PIL.Image.Image): PIL image to rotate\norientation (): Optional orientation value in [1, 8]\n\nReturns:\nA :py:class:`~PIL.Image.Image` image.", "source": "codesearchnet"}
{"code": "def message_index(index_url):\n    \n    idx = csv.reader(urllib2.urlopen(index_url), delimiter=':')\n    messages = []\n    for line in idx:\n        messages.append(line)\n    return messages", "docstring": "get message index of components for urllib2.\n\nArgs:\nurl(string):\n\nReturns:\nlist: messages", "source": "juraj-google-style"}
{"code": "def _generate_input_signature(self, layer):\n    if isinstance(layer.call, def_function.Function) and layer.call.input_signature is not None:\n        return layer.call.input_signature\n    elif isinstance(layer, training_lib.Model):\n        return saving_utils.model_input_signature(layer)\n    elif layer.input_spec is not None and layer._use_input_spec_as_call_signature:\n\n        def to_tensor_spec_or_none(x):\n            spec = input_spec.to_tensor_spec(x, layer._compute_dtype)\n            if spec.shape == tensor_shape.TensorShape(None):\n                return None\n            return spec\n        input_signature = [nest.map_structure(to_tensor_spec_or_none, layer.input_spec)]\n        return input_signature\n    else:\n        return None", "docstring": "Inspects layer object and returns the inferred input signature.\n\nArgs:\nlayer: Layer object.\n\nReturns:\nList of possibly nested TensorSpecs of the layer call function inputs.\nThe list does not contain the `training` argument.", "source": "github-repos"}
{"code": "def _ParseValueData(self, knowledge_base, value_data):\n    \n    if not isinstance(value_data, py2to3.UNICODE_TYPE):\n      raise errors.PreProcessFail(\n          'Unsupported Windows Registry value type: {0:s} for '\n          'artifact: {1:s}.'.format(\n              type(value_data), self.ARTIFACT_DEFINITION_NAME))\n\n    \n    codepage = 'cp{0:s}'.format(value_data)\n\n    if not knowledge_base.codepage:\n      try:\n        knowledge_base.SetCodepage(codepage)\n      except ValueError:\n        \n        pass", "docstring": "Parses Windows Registry value data for a preprocessing attribute.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nvalue_data (object): Windows Registry value data.\n\nRaises:\nerrors.PreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def set_config_files_(self, *config_files):\n    self._config_files = tuple((pathlib.Path(path) for path in config_files))", "docstring": "Set the list of config files.\n\nArgs:\nconfig_files (pathlike): path of config files, given in the order\nof reading.", "source": "codesearchnet"}
{"code": "def main_process_first(self, local=True, desc='work'):\n    if is_torch_available() and self.world_size > 1:\n        main_process_desc = 'main local process' if local else 'main process'\n        if self.distributed_state is not None:\n            is_main_process = self.distributed_state.is_local_main_process if local else self.distributed_state.is_main_process\n        elif is_sagemaker_mp_enabled():\n            is_main_process = smp.rank() == 0\n        try:\n            if not is_main_process:\n                logger.debug(f'{self.process_index}: waiting for the {main_process_desc} to perform {desc}')\n                if is_torch_xla_available():\n                    xm.rendezvous(desc)\n                else:\n                    dist.barrier()\n            yield\n        finally:\n            if is_main_process:\n                logger.debug(f'{self.process_index}: {main_process_desc} completed {desc}, releasing all replicas')\n                if is_torch_xla_available():\n                    xm.rendezvous(desc)\n                else:\n                    dist.barrier()\n    else:\n        yield", "docstring": "A context manager for torch distributed environment where on needs to do something on the main process, while\nblocking replicas, and when it's finished releasing the replicas.\n\nOne such use is for `datasets`'s `map` feature which to be efficient should be run once on the main process,\nwhich upon completion saves a cached version of results and which then automatically gets loaded by the\nreplicas.\n\nArgs:\nlocal (`bool`, *optional*, defaults to `True`):\nif `True` first means process of rank 0 of each node if `False` first means process of rank 0 of node\nrank 0 In multi-node environment with a shared filesystem you most likely will want to use\n`local=False` so that only the main process of the first node will do the processing. If however, the\nfilesystem is not shared, then the main process of each node will need to do the processing, which is\nthe default behavior.\ndesc (`str`, *optional*, defaults to `\"work\"`):\na work description to be used in debug logs", "source": "github-repos"}
{"code": "def peek(self, size=-1):\n        \n        if not self._readable:\n            raise UnsupportedOperation('read')\n\n        with self._seek_lock:\n            self._raw.seek(self._seek)\n            return self._raw._peek(size)", "docstring": "Return bytes from the stream without advancing the position.\n\nArgs:\nsize (int): Number of bytes to read. -1 to read the full\nstream.\n\nReturns:\nbytes: bytes read", "source": "juraj-google-style"}
{"code": "def joinNetwork(self, eRoleId):\n        \n        print '%s call joinNetwork' % self.port\n        print eRoleId\n\n        self.deviceRole = eRoleId\n        mode = 15\n        try:\n            if ModuleHelper.LeaderDutChannelFound:\n                self.channel = ModuleHelper.Default_Channel\n\n            \n            \n            if eRoleId == Thread_Device_Role.Leader:\n                print 'join as leader'\n                \n                mode = 15\n                if self.AutoDUTEnable is False:\n                    \n                    self.__setRouterDowngradeThreshold(33)\n            elif eRoleId == Thread_Device_Role.Router:\n                print 'join as router'\n                \n                mode = 15\n                if self.AutoDUTEnable is False:\n                    \n                    self.__setRouterDowngradeThreshold(33)\n            elif eRoleId == Thread_Device_Role.SED:\n                print 'join as sleepy end device'\n                \n                mode = 4\n                self.setPollingRate(self.sedPollingRate)\n            elif eRoleId == Thread_Device_Role.EndDevice:\n                print 'join as end device'\n                \n                mode = 13\n            elif eRoleId == Thread_Device_Role.REED:\n                print 'join as REED'\n                \n                mode = 15\n                \n                self.__setRouterUpgradeThreshold(0)\n            elif eRoleId == Thread_Device_Role.EndDevice_FED:\n                \n                print 'join as FED'\n                \n                mode = 15\n                \n                self.__setRouterUpgradeThreshold(0)\n            elif eRoleId == Thread_Device_Role.EndDevice_MED:\n                print 'join as MED'\n                \n                mode = 13\n            else:\n                pass\n\n            \n            self.__setDeviceMode(mode)\n            self.__setKeySwitchGuardTime(0)  \n            time.sleep(0.1)\n            \n            self.__startOpenThreadWpan()\n            time.sleep(3)\n            return True\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger('joinNetwork() Error: ' + str(e))", "docstring": "make device ready to join the Thread Network with a given role\n\nArgs:\neRoleId: a given device role id\n\nReturns:\nTrue: ready to set Thread Network parameter for joining desired Network", "source": "juraj-google-style"}
{"code": "def get_host(self):\n    if (hasattr(self, 'host') and self.host):\n        return Host(self.rest_client.make_request(self.host), self.rest_client)", "docstring": "Get resource this operator is currently executing in.\nIf the operator is running on an externally\nmanaged resource ``None`` is returned.\n\nReturns:\nHost: Resource this operator is running on.\n\n.. versionadded:: 1.9", "source": "codesearchnet"}
{"code": "def clean(self, settings):\n    return {k: v for (k, v) in settings.items() if (k in DEFAULT_SETTINGS)}", "docstring": "Filter given settings to keep only key names available in\n``DEFAULT_SETTINGS``.\n\nArgs:\nsettings (dict): Loaded settings.\n\nReturns:\ndict: Settings object filtered.", "source": "codesearchnet"}
{"code": "def erase(self):\n        \n        try:\n            \n            \n            if not self.halted():\n                self.halt()\n        except errors.JLinkException:\n            \n            pass\n\n        res = self._dll.JLINK_EraseChip()\n        if res < 0:\n            raise errors.JLinkEraseException(res)\n\n        return res", "docstring": "Erases the flash contents of the device.\n\nThis erases the flash memory of the target device.  If this method\nfails, the device may be left in an inoperable state.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nNumber of bytes erased.", "source": "juraj-google-style"}
{"code": "def __init__(self, default_alpha=1, default_beta=1):\n        \n        if isinstance(default_alpha, int) is False:\n            if isinstance(default_alpha, float) is False:\n                raise TypeError()\n        if isinstance(default_beta, int) is False:\n            if isinstance(default_beta, float) is False:\n                raise TypeError()\n\n        if default_alpha <= 0:\n            raise ValueError()\n        if default_beta <= 0:\n            raise ValueError()\n\n        self.__success += 0\n        self.__failure += 0\n        self.__default_alpha = default_alpha\n        self.__default_beta = default_beta", "docstring": "Initialization\n\nArgs:\ndefault_alpha:      Alpha\ndefault_beta:       Beta", "source": "juraj-google-style"}
{"code": "def get_position_encoding(\n    length, hidden_size, min_timescale=1.0, max_timescale=1.0e4):\n  \n  position = tf.to_float(tf.range(length))\n  num_timescales = hidden_size \n  log_timescale_increment = (\n      math.log(float(max_timescale) / float(min_timescale)) /\n      (tf.to_float(num_timescales) - 1))\n  inv_timescales = min_timescale * tf.exp(\n      tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)\n  scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)\n  signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)\n  return signal", "docstring": "Return positional encoding.\n\nCalculates the position encoding as a mix of sine and cosine functions with\ngeometrically increasing wavelengths.\nDefined and formulized in Attention is All You Need, section 3.5.\n\nArgs:\nlength: Sequence length.\nhidden_size: Size of the\nmin_timescale: Minimum scale that will be applied at each position\nmax_timescale: Maximum scale that will be applied at each position\n\nReturns:\nTensor with shape [length, hidden_size]", "source": "juraj-google-style"}
{"code": "def DEFINE_multi(parser, serializer, name, default, help, flag_values=FLAGS, module_name=None, **args):\n    DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args), flag_values, module_name)", "docstring": "Registers a generic MultiFlag that parses its args with a given parser.\n\nAuxiliary function.  Normal users should NOT use it directly.\n\nDevelopers who need to create their own 'Parser' classes for options\nwhich can appear multiple times can call this module function to\nregister their flags.\n\nArgs:\nparser: ArgumentParser that is used to parse the flag arguments.\nserializer: ArgumentSerializer that serializes the flag value.\nname: A string, the flag name.\ndefault: The default value of the flag.\nhelp: A help string.\nflag_values: FlagValues object with which the flag will be registered.\nmodule_name: A string, the name of the Python module declaring this flag.\nIf not provided, it will be computed using the stack trace of this call.\n**args: Dictionary with extra keyword args that are passed to the\nFlag __init__.", "source": "codesearchnet"}
{"code": "def db_dp020(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `db_dp020`'.format(value))\n    self._db_dp020 = value", "docstring": "Corresponds to IDD Field `db_dp020`\nmean coincident dry-bulb temperature to\nDew-point temperature corresponding to 2.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `db_dp020`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def padding_to_length(padding):\n  \n  non_padding = 1.0 - padding\n  return tf.to_int32(tf.reduce_sum(non_padding, axis=-1))", "docstring": "Calculate the length of mask based on padding.\n\nArgs:\npadding: a Tensor with shape [..., length].\nReturns:\na Tensor with shape [...].", "source": "juraj-google-style"}
{"code": "def write_data(worksheet, data):\n    if (not data):\n        return\n    if isinstance(data, list):\n        rows = data\n    else:\n        rows = [data]\n    if isinstance(rows[0], dict):\n        keys = get_keys(rows)\n        worksheet.append([utilities.convert_snake_to_title_case(key) for key in keys])\n        for row in rows:\n            values = [get_value_from_row(row, key) for key in keys]\n            worksheet.append(values)\n    elif isinstance(rows[0], list):\n        for row in rows:\n            values = [utilities.normalize_cell_value(value) for value in row]\n            worksheet.append(values)\n    else:\n        for row in rows:\n            worksheet.append([utilities.normalize_cell_value(row)])", "docstring": "Writes data into worksheet.\n\nArgs:\nworksheet: worksheet to write into\ndata: data to be written", "source": "codesearchnet"}
{"code": "def get_sql_statement_with_environment(item, args=None):\n    \n    if isinstance(item, basestring):\n      item = _sql_statement.SqlStatement(item)\n    elif not isinstance(item, _sql_statement.SqlStatement):\n      item = SqlModule.get_default_query_from_module(item)\n      if not item:\n        raise Exception('Expected a SQL statement or module but got %s' % str(item))\n\n    env = {}\n    if item.module:\n      env.update(item.module.__dict__)\n      parser = env.get(_utils._SQL_MODULE_ARGPARSE, None)\n      if parser:\n        args = SqlModule._get_sql_args(parser, args=args)\n      else:\n        args = None\n\n    if isinstance(args, dict):\n      env.update(args)\n\n    return item, env", "docstring": "Given a SQLStatement, string or module plus command line args or a dictionary,\nreturn a SqlStatement and final dictionary for variable resolution.\n\nArgs:\nitem: a SqlStatement, %%sql module, or string containing a query.\nargs: a string of command line arguments or a dictionary of values.\n\nReturns:\nA SqlStatement for the query or module, plus a dictionary of variable values to use.", "source": "juraj-google-style"}
{"code": "class EfficientNetFastImageProcessorKwargs(DefaultFastImageProcessorKwargs):\n    rescale_offset: bool\n    include_top: bool", "docstring": "Args:\nrescale_offset (`bool`, *optional*, defaults to `self.rescale_offset`):\nWhether to rescale the image between [-max_range/2, scale_range/2] instead of [0, scale_range].\ninclude_top (`bool`, *optional*, defaults to `self.include_top`):\nNormalize the image again with the standard deviation only for image classification if set to True.", "source": "github-repos"}
{"code": "def _merge_partition_lists(partition_lists):\n    dst = list(partition_lists[0])\n    for src in partition_lists[1:]:\n        if len(src) != len(dst):\n            raise ValueError('All ragged inputs must have the same ragged_rank.')\n        for i in range(len(dst)):\n            dst[i] = dst[i]._merge_precomputed_encodings(src[i])\n    return dst", "docstring": "Merges the given list of lists of RowPartitions.\n\nArgs:\npartition_lists: A list of lists of RowPartition.\n\nReturns:\nA list of RowPartitions, where `result[i]` is formed by merging\n`partition_lists[j][i]` for all `j`, using\n`RowPartition._merge_precomputed_encodings`.", "source": "github-repos"}
{"code": "def available_readers(as_dict=False):\n    readers = []\n    for reader_configs in configs_for_reader():\n        try:\n            reader_info = read_reader_config(reader_configs)\n        except (KeyError, IOError, yaml.YAMLError):\n            LOG.warning('Could not import reader config from: %s', reader_configs)\n            LOG.debug('Error loading YAML', exc_info=True)\n            continue\n        readers.append((reader_info if as_dict else reader_info['name']))\n    return readers", "docstring": "Available readers based on current configuration.\n\nArgs:\nas_dict (bool): Optionally return reader information as a dictionary.\nDefault: False\n\nReturns: List of available reader names. If `as_dict` is `True` then\na list of dictionaries including additionally reader information\nis returned.", "source": "codesearchnet"}
{"code": "def _satisfied_at_timestamp(self, device_name, pending, timestamp, start_i=0):\n    if not pending:\n        return True\n    for datum in self._dump_tensor_data[device_name][start_i:]:\n        if datum.timestamp > timestamp:\n            break\n        if datum.timestamp == timestamp and (datum.node_name, datum.output_slot) in pending:\n            pending.remove((datum.node_name, datum.output_slot))\n            if not pending:\n                return True\n    return not pending", "docstring": "Determine whether pending inputs are satisfied at given timestamp.\n\nNote: This method mutates the input argument \"pending\".\n\nArgs:\ndevice_name: (str) device name.\npending: A list of 2-tuple (node_name, output_slot): the dependencies to\ncheck.\ntimestamp: (int) the timestamp in question.\nstart_i: (int) the index in self._dump_tensor_data to start searching for\nthe timestamp.\n\nReturns:\n(bool) Whether all the dependencies in pending are satisfied at the\ntimestamp. If pending is empty to begin with, return True.", "source": "github-repos"}
{"code": "def exec_inspect(self, exec_id):\n        \n        if isinstance(exec_id, dict):\n            exec_id = exec_id.get('Id')\n        res = self._get(self._url(\"/exec/{0}/json\", exec_id))\n        return self._result(res, True)", "docstring": "Return low-level information about an exec command.\n\nArgs:\nexec_id (str): ID of the exec instance\n\nReturns:\n(dict): Dictionary of values returned by the endpoint.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def kms_encrypt(value, key, aws_config=None):\n    \n    aws_config = aws_config or {}\n    aws = boto3.session.Session(**aws_config)\n    client = aws.client('kms')\n    enc_res = client.encrypt(KeyId=key,\n                             Plaintext=value)\n    return n(b64encode(enc_res['CiphertextBlob']))", "docstring": "Encrypt and value with KMS key.\n\nArgs:\nvalue (str): value to encrypt\nkey (str): key id or alias\naws_config (optional[dict]): aws credentials\ndict of arguments passed into boto3 session\nexample:\naws_creds = {'aws_access_key_id': aws_access_key_id,\n'aws_secret_access_key': aws_secret_access_key,\n'region_name': 'us-east-1'}\n\nReturns:\nstr: encrypted cipher text", "source": "juraj-google-style"}
{"code": "class TimesFmOutputForPrediction(BaseModelOutput):\n    mean_predictions: Optional[torch.Tensor] = None\n    full_predictions: Optional[torch.Tensor] = None\n    loss: Optional[Union[torch.Tensor, float]] = None", "docstring": "Args:\nmean_predictions (`torch.Tensor` of shape `(batch_size, sequence_length)`):\nThe mean predictions of the time series.\nfull_predictions (`torch.Tensor` of shape `(batch_size, sequence_length)`):\nThe full predictions of the time series including the mean and the quantiles.\nloss (`torch.Tensor` of shape `(1,)`, *optional*, returned when `future_values` is provided):\nThe loss of the TimesFM model.", "source": "github-repos"}
{"code": "def write_xls(data, file_name, worksheet_names=None):\n    workbook = xlwt.Workbook()\n    for (sheet_index, sheet_data) in enumerate(data):\n        if (worksheet_names and (sheet_index < len(worksheet_names)) and worksheet_names[sheet_index]):\n            name = worksheet_names[sheet_index]\n        else:\n            name = 'Worksheet {}'.format(sheet_index)\n        sheet = workbook.add_sheet(name)\n        for (row_index, row) in enumerate(sheet_data):\n            for (col_index, value) in enumerate(row):\n                sheet.write(row_index, col_index, value)\n    workbook.save(file_name)", "docstring": "Writes out to old excel format.\n\nArgs:\ndata: 2D list of tables/worksheets.\nfile_name: Name of the output file.\nworksheet_names: A list of worksheet names (optional).", "source": "codesearchnet"}
{"code": "def on_binlog(event, stream):\n    \n    rows, meta = _rows_event_to_dict(event, stream)\n\n    table_name = '%s.%s' % (meta['schema'], meta['table'])\n\n    if meta['action'] == 'insert':\n        sig = signals.rows_inserted\n    elif meta['action'] == 'update':\n        sig = signals.rows_updated\n    elif meta['action'] == 'delete':\n        sig = signals.rows_deleted\n    else:\n        raise RuntimeError('Invalid action \"%s\"' % meta['action'])\n\n    sig.send(table_name, rows=rows, meta=meta)", "docstring": "Process on a binlog event\n\n1. Convert event instance into a dict\n2. Send corresponding schema/table/signals\n\nArgs:\nevent (pymysqlreplication.row_event.RowsEvent): the event", "source": "juraj-google-style"}
{"code": "def conv_stack(name, x, mid_channels, output_channels, dilations=None, activation='relu', dropout=0.0):\n    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n        x = conv_block('conv_block', x, mid_channels=mid_channels, dilations=dilations, activation=activation, dropout=dropout)\n        x = conv('zeros', x, apply_actnorm=False, conv_init='zeros', output_channels=output_channels, dilations=dilations)\n    return x", "docstring": "3-layer convolutional stack.\n\nArgs:\nname: variable scope.\nx: 5-D Tensor.\nmid_channels: Number of output channels of the first layer.\noutput_channels: Number of output channels.\ndilations: Dilations to apply in the first 3x3 layer and the last 3x3 layer.\nBy default, apply no dilations.\nactivation: relu or gatu.\nIf relu, the second layer is relu(W*x)\nIf gatu, the second layer is tanh(W1*x) * sigmoid(W2*x)\ndropout: float, 0.0\nReturns:\noutput: output of 3 layer conv network.", "source": "codesearchnet"}
{"code": "def stop_gradient(cls, x: 'TensorFluent') -> 'TensorFluent':\n    scope = x.scope.as_list()\n    batch = x.batch\n    return TensorFluent(tf.stop_gradient(x.tensor), scope, batch)", "docstring": "Returns a copy of the input fluent with stop_gradient at tensor level.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent that stops backpropagation of gradient computations.", "source": "codesearchnet"}
{"code": "def from_model_config(cls, model_config: PretrainedConfig) -> 'GenerationConfig':\n    config_dict = model_config.to_dict()\n    config_dict.pop('_from_model_config', None)\n    config_dict = {key: value for key, value in config_dict.items() if value is not None}\n    generation_config = cls.from_dict(config_dict, return_unused_kwargs=False, _from_model_config=True)\n    decoder_config = model_config.get_text_config(decoder=True)\n    if decoder_config is not model_config:\n        default_generation_config = GenerationConfig()\n        decoder_config_dict = decoder_config.to_dict()\n        for attr in generation_config.to_dict().keys():\n            is_unset = getattr(generation_config, attr) == getattr(default_generation_config, attr)\n            if attr in decoder_config_dict and is_unset:\n                setattr(generation_config, attr, decoder_config_dict[attr])\n    if generation_config.return_dict_in_generate is False:\n        if any((getattr(generation_config, extra_output_flag, False) for extra_output_flag in generation_config.extra_output_flags)):\n            generation_config.return_dict_in_generate = True\n    generation_config._original_object_hash = hash(generation_config)\n    return generation_config", "docstring": "Instantiates a [`GenerationConfig`] from a [`PretrainedConfig`]. This function is useful to convert legacy\n[`PretrainedConfig`] objects, which may contain generation parameters, into a stand-alone [`GenerationConfig`].\n\nArgs:\nmodel_config (`PretrainedConfig`):\nThe model config that will be used to instantiate the generation config.\n\nReturns:\n[`GenerationConfig`]: The configuration object instantiated from those parameters.", "source": "github-repos"}
{"code": "def _refresh(self, http):\n    if (not self.store):\n        self._do_refresh_request(http)\n    else:\n        self.store.acquire_lock()\n        try:\n            new_cred = self.store.locked_get()\n            if (new_cred and (not new_cred.invalid) and (new_cred.access_token != self.access_token) and (not new_cred.access_token_expired)):\n                logger.info('Updated access_token read from Storage')\n                self._updateFromCredential(new_cred)\n            else:\n                self._do_refresh_request(http)\n        finally:\n            self.store.release_lock()", "docstring": "Refreshes the access_token.\n\nThis method first checks by reading the Storage object if available.\nIf a refresh is still needed, it holds the Storage lock until the\nrefresh is completed.\n\nArgs:\nhttp: an object to be used to make HTTP requests.\n\nRaises:\nHttpAccessTokenRefreshError: When the refresh fails.", "source": "codesearchnet"}
{"code": "def __clone_function(f, name=None):\n    \n    if not isinstance(f, types.FunctionType):\n        raise SimTypeError('Given parameter is not a function.')\n    if name is None:\n        name = f.__name__\n    newglobals = f.__globals__.copy()\n    globals_used = [x for x in f.__globals__ if x in f.__code__.co_names]\n    for x in globals_used:\n        gv = f.__globals__[x]\n        if isinstance(gv, types.FunctionType):\n            \n            newglobals[x] = __clone_function(gv)\n        elif isinstance(gv, types.ModuleType):\n            newglobals[x] = gv\n        else:\n            \n            newglobals[x] = copy.deepcopy(gv)\n    newfunc = types.FunctionType(\n        f.__code__, newglobals, name, f.__defaults__, f.__closure__)\n    return newfunc", "docstring": "Make a new version of a function that has its own independent copy\nof any globals that it uses directly, and has its own name.\nAll other attributes are assigned from the original function.\n\nArgs:\nf: the function to clone\nname (str):  the name for the new function (if None, keep the same name)\n\nReturns:\nA copy of the function f, having its own copy of any globals used\n\nRaises:\nSimValueError", "source": "juraj-google-style"}
{"code": "def supply(self, issuer):\n        \n        def _retrieve_jwks():\n            \n            jwks_uri = self._key_uri_supplier.supply(issuer)\n\n            if not jwks_uri:\n                raise UnauthenticatedException(u\"Cannot find the `jwks_uri` for issuer \"\n                                               u\"%s: either the issuer is unknown or \"\n                                               u\"the OpenID discovery failed\" % issuer)\n\n            try:\n                response = requests.get(jwks_uri)\n                json_response = response.json()\n            except Exception as exception:\n                message = u\"Cannot retrieve valid verification keys from the `jwks_uri`\"\n                raise UnauthenticatedException(message, exception)\n\n            if u\"keys\" in json_response:\n                \n                jwks_keys = jwk.KEYS()\n                jwks_keys.load_jwks(response.text)\n                return jwks_keys._keys\n            else:\n                \n                \n                \n                return _extract_x509_certificates(json_response)\n\n        return self._jwks_cache.get_or_create(issuer, _retrieve_jwks)", "docstring": "Supplies the `Json Web Key Set` for the given issuer.\n\nArgs:\nissuer: the issuer.\n\nReturns:\nThe successfully retrieved Json Web Key Set. None is returned if the\nissuer is unknown or the retrieval process fails.\n\nRaises:\nUnauthenticatedException: When this method cannot supply JWKS for the\ngiven issuer (e.g. unknown issuer, HTTP request error).", "source": "juraj-google-style"}
{"code": "def _get_attribute(self, offset):\n    attr_type = self.get_uint_le(offset)\n    length = self.get_uint_le((offset + 4))\n    data = self.get_chunk(offset, length)\n    return MftAttr.factory(attr_type, data)", "docstring": "Determines attribute type at the offset and returns \\\ninitialized attribute object.\n\nReturns:\nMftAttr: One of the attribute objects \\\n(eg. :class:`~.mft_attribute.MftAttrFilename`).\nNone: If atttribute type does not mach any one of the supported \\\nattribute types.", "source": "codesearchnet"}
{"code": "def _pyval_update_fields(pyval, fields, depth):\n    if not isinstance(pyval, (dict, list, tuple)):\n        raise ValueError('Expected dict or nested list/tuple of dict')\n    for key, target in fields.items():\n        for _ in range(1, depth):\n            target = target[-1]\n        target.append(pyval[key] if isinstance(pyval, dict) else [])\n    if isinstance(pyval, (list, tuple)):\n        for child in pyval:\n            _pyval_update_fields(child, fields, depth + 1)", "docstring": "Append the field values from `pyval` to `fields`.\n\nArgs:\npyval: A python `dict`, or nested list/tuple of `dict`, whose value(s)\nshould be appended to `fields`.\nfields: A dictionary mapping string keys to field values.  Field values\nextracted from `pyval` are appended to this dictionary's values.\ndepth: The depth at which `pyval` should be appended to the field values.", "source": "github-repos"}
{"code": "def forward(ctx, x, k, percentile_mode, scale):\n    zero_point = torch.tensor(0.0, device=scale.device)\n    n = 2 ** (k - 1) - 1\n    new_quant_x = linear_quantize(x, scale, zero_point, inplace=False)\n    new_quant_x = torch.clamp(new_quant_x, -n, n - 1)\n    ctx.scale = scale\n    return new_quant_x", "docstring": "Args:\nx (`torch.Tensor`):\nFloating point tensor to be quantized.\nk (`int`):\nQuantization bitwidth.\npercentile_mode (`bool`):\nWhether or not to use percentile calibration.\nscale (`torch.Tensor`):\nPre-calculated scaling factor for *x*. Note that the current implementation of SymmetricQuantFunction\nrequires pre-calculated scaling factor.\n\nReturns:\n`torch.Tensor`: Symmetric-quantized value of *input*.", "source": "github-repos"}
{"code": "def list_classes(mod_name):\n    mod = sys.modules[mod_name]\n    return [cls.__name__ for cls in mod.__dict__.values() if is_mod_class(mod, cls)]", "docstring": "Lists all classes declared in a module.\n\nArgs:\nmod_name: the module name\nReturns:\nA list of functions declared in that module.", "source": "codesearchnet"}
{"code": "def data(self, value):\n        \n        if value == self._defaults['data'] and 'data' in self._values:\n            del self._values['data']\n        else:\n            self._values['data'] = value", "docstring": "The data property.\n\nArgs:\nvalue (object). the property value.", "source": "juraj-google-style"}
{"code": "def charge_balance(model):\n    compound_charge = {}\n    for compound in model.compounds:\n        if (compound.charge is not None):\n            compound_charge[compound.id] = compound.charge\n    for reaction in model.reactions:\n        charge = reaction_charge(reaction.equation, compound_charge)\n        (yield (reaction, charge))", "docstring": "Calculate the overall charge for all reactions in the model.\n\nYield (reaction, charge) pairs.\n\nArgs:\nmodel: :class:`psamm.datasource.native.NativeModel`.", "source": "codesearchnet"}
{"code": "def _parse_single_sequence_example_raw(serialized, context, feature_list, debug_name, name=None):\n    with ops.name_scope(name, 'ParseSingleExample', [serialized, debug_name]):\n        serialized = ops.convert_to_tensor(serialized, name='serialized')\n        serialized = _assert_scalar(serialized, 'serialized')\n    return _parse_sequence_example_raw(serialized, debug_name, context, feature_list, name)[:2]", "docstring": "Parses a single `SequenceExample` proto.\n\nArgs:\nserialized: A scalar (0-D Tensor) of type string, a single binary serialized\n`SequenceExample` proto.\ncontext: A `ParseOpParams` containing the parameters for the parse op for\nthe context features.\nfeature_list: A `ParseOpParams` containing the parameters for the parse op\nfor the feature_list features.\ndebug_name: A scalar (0-D Tensor) of strings (optional), the name of the\nserialized proto.\nname: A name for this operation (optional).\n\nReturns:\nA tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s.\nThe first dict contains the context key/values.\nThe second dict contains the feature_list key/values.\n\nRaises:\nTypeError: if feature_list.dense_defaults is not either None or a dict.", "source": "github-repos"}
{"code": "def settings_view_for_block(block_wrapper, settings_view_factory):\n    state_root_hash = (block_wrapper.state_root_hash if (block_wrapper is not None) else None)\n    return settings_view_factory.create_settings_view(state_root_hash)", "docstring": "Returns the settings view for an arbitrary block.\n\nArgs:\nblock_wrapper (BlockWrapper): The block for which a settings\nview is to be returned\nsettings_view_factory (SettingsViewFactory): The settings\nview factory used to create the SettingsView object\n\nReturns:\nSettingsView object associated with the block", "source": "codesearchnet"}
{"code": "def get_servo_status(self):\n        \n        data = []\n        data.append(0x09)\n        data.append(self.servoid)\n        data.append(RAM_READ_REQ)\n        data.append(STATUS_ERROR_RAM)\n        data.append(BYTE1)\n        send_data(data)\n\n        rxdata = []\n        try:\n            rxdata = SERPORT.read(12)\n            return ord(rxdata[9])&0xFF\n        except:\n            raise HerkulexError(\"could not communicate with motors\")", "docstring": "Get the error status of servo\n\nThis function gets the  error status (if any) of the servo\n\nArgs:\nnone\n\nReturns:\nint:  an integer corresponding to the servo status\n* refer datasheet", "source": "juraj-google-style"}
{"code": "def _load_client_secrets(filename):\n    \n    client_type, client_info = clientsecrets.loadfile(filename)\n\n    if client_type != clientsecrets.TYPE_WEB:\n        raise ValueError(\n            'The flow specified in {} is not supported, only the WEB flow '\n            'type  is supported.'.format(client_type))\n    return client_info['client_id'], client_info['client_secret']", "docstring": "Loads client secrets from the given filename.\n\nArgs:\nfilename: The name of the file containing the JSON secret key.\n\nReturns:\nA 2-tuple, the first item containing the client id, and the second\nitem containing a client secret.", "source": "juraj-google-style"}
{"code": "def to_insert(table, d):\n    \n\n    columns = []\n    args = []\n    for key, val in d.items():\n        columns.append('\"{}\"'.format(key))\n        args.append(val)\n    stmt = 'insert into {table} ({columns}) values ({params})'.format(\n        table=table,\n        columns=', '.join(columns),\n        params=', '.join(['?'] * len(columns)))\n    return (stmt, args)", "docstring": "Generate an insert statement using the given table and dictionary.\n\nArgs:\ntable (str): table name\nd (dict): dictionary with column names as keys and values as values.\nReturns:\ntuple of statement and arguments\n\n>>> to_insert('doc.foobar', {'name': 'Marvin'})\n('insert into doc.foobar (\"name\") values (?)', ['Marvin'])", "source": "juraj-google-style"}
{"code": "class TFConv1D(keras.layers.Layer):\n\n    def __init__(self, nf, nx, initializer_range=0.02, **kwargs):\n        super().__init__(**kwargs)\n        self.nf = nf\n        self.nx = nx\n        self.initializer_range = initializer_range\n\n    def build(self, input_shape):\n        if self.built:\n            return\n        self.built = True\n        self.weight = self.add_weight('weight', shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range))\n        self.bias = self.add_weight('bias', shape=[1, self.nf], initializer=tf.zeros_initializer())\n\n    def call(self, x):\n        bz, sl = shape_list(x)[:2]\n        x = tf.reshape(x, [-1, self.nx])\n        x = tf.matmul(x, self.weight) + self.bias\n        x = tf.reshape(x, [bz, sl, self.nf])\n        return x", "docstring": "1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).\n\nBasically works like a linear layer but the weights are transposed.\n\nArgs:\nnf (`int`):\nThe number of output features.\nnx (`int`):\nThe number of input features.\ninitializer_range (`float`, *optional*, defaults to 0.02):\nThe standard deviation to use to initialize the weights.\nkwargs (`Dict[str, Any]`, *optional*):\nAdditional keyword arguments passed along to the `__init__` of `keras.layers.Layer`.", "source": "github-repos"}
{"code": "def forward(self, x, name='forward'):\n    return self._call_forward(x, name)", "docstring": "Returns the forward `Bijector` evaluation, i.e., X = g(Y).\n\nArgs:\nx: `Tensor`. The input to the \"forward\" evaluation.\nname: The name to give this op.\n\nReturns:\n`Tensor`.\n\nRaises:\nTypeError: if `self.dtype` is specified and `x.dtype` is not\n`self.dtype`.\nNotImplementedError: if `_forward` is not implemented.", "source": "github-repos"}
{"code": "def __init__(self, client, dag_name):\n        \n        self._client = client\n        self._dag_name = dag_name", "docstring": "Initialise the task signal convenience class.\n\nArgs:\nclient (Client): A reference to a signal client object.\ndag_name (str): The name of the dag the task belongs to.", "source": "juraj-google-style"}
{"code": "def _testCompareToExplicitDerivative(self, dtype):\n    delta = 0.001\n    np_dtype = dtype.as_numpy_dtype\n    try:\n        from scipy import differentiate\n        from scipy import special\n        alpha_val = np.logspace(-2, 3, dtype=np_dtype)\n        alpha = constant_op.constant(alpha_val)\n        sample = random_ops.random_gamma([], alpha, np_dtype(1.0), dtype=dtype, seed=12345)\n        actual = gradients_impl.gradients(sample, alpha)[0]\n        sample_val, actual_val = self.evaluate((sample, actual))\n        u = special.gammainc(alpha_val, sample_val)\n        expected_val = differentiate.derivative(special.gammaincinv, alpha_val, args=(u,), initial_step=delta * alpha_val, order=2, preserve_shape=True).df\n        self.assertAllClose(actual_val, expected_val, rtol=0.001, atol=0.001)\n    except ImportError as e:\n        tf_logging.warn('Cannot use special functions in a test: %s' % str(e))", "docstring": "Compare to the explicit reparameterization derivative.\n\nVerifies that the computed derivative satisfies\ndsample / dalpha = d igammainv(alpha, u) / dalpha,\nwhere u = igamma(alpha, sample).\n\nArgs:\ndtype: TensorFlow dtype to perform the computations in.", "source": "github-repos"}
{"code": "def send_client_cmd(self, data, cmd=None, via_queue=None):\n        \n        mq_channel = self._connect_mq()\n        if cmd:\n            data['cmd'] = cmd\n        if via_queue:\n            mq_channel.basic_publish(exchange='',\n                                     routing_key=via_queue,\n                                     body=json.dumps(data))\n        else:\n            mq_channel.basic_publish(exchange=self.prv_exchange,\n                                     routing_key='',\n                                     body=json.dumps(data))", "docstring": "Send arbitrary cmd and data to client\n\nif queue name passed by \"via_queue\" parameter,\nthat queue will be used instead of users private exchange.\nArgs:\ndata: dict\ncmd: string\nvia_queue: queue name,", "source": "juraj-google-style"}
{"code": "def get_type_from_api_entity(self, api_entity):\n        \n        merged = self.group_types_data.copy()\n        merged.update(self.indicator_types_data)\n        print(merged)\n        for (key, value) in merged.items():\n            if value.get('apiEntity') == api_entity:\n                return key\n        return None", "docstring": "Returns the object type as a string given a api entity.\n\nArgs:\napi_entity:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _map_free_gates(layout, gates, coupling_map):\n    \n\n    blocked_qubits = set()\n\n    mapped_gates = []\n    remaining_gates = []\n\n    for gate in gates:\n        \n        \n        if not gate['partition']:\n            qubits = [n for n in gate['graph'].nodes() if n.type == 'op'][0].qargs\n\n            if not qubits:\n                continue\n\n            if blocked_qubits.intersection(qubits):\n                blocked_qubits.update(qubits)\n                remaining_gates.append(gate)\n            else:\n                mapped_gate = _transform_gate_for_layout(gate, layout)\n                mapped_gates.append(mapped_gate)\n            continue\n\n        qubits = gate['partition'][0]\n\n        if blocked_qubits.intersection(qubits):\n            blocked_qubits.update(qubits)\n            remaining_gates.append(gate)\n        elif len(qubits) == 1:\n            mapped_gate = _transform_gate_for_layout(gate, layout)\n            mapped_gates.append(mapped_gate)\n        elif coupling_map.distance(*[layout[q] for q in qubits]) == 1:\n            mapped_gate = _transform_gate_for_layout(gate, layout)\n            mapped_gates.append(mapped_gate)\n        else:\n            blocked_qubits.update(qubits)\n            remaining_gates.append(gate)\n\n    return mapped_gates, remaining_gates", "docstring": "Map all gates that can be executed with the current layout.\n\nArgs:\nlayout (Layout): Map from virtual qubit index to physical qubit index.\ngates (list): Gates to be mapped.\ncoupling_map (CouplingMap): CouplingMap for target device topology.\n\nReturns:\ntuple:\nmapped_gates (list): ops for gates that can be executed, mapped onto layout.\nremaining_gates (list): gates that cannot be executed on the layout.", "source": "juraj-google-style"}
{"code": "def get_template_parameters_file(template_full_path):\n    \n    for suffix in EFConfig.PARAMETER_FILE_SUFFIXES:\n      parameters_file = template_full_path.replace(\"/templates\", \"/parameters\") + suffix\n      if exists(parameters_file):\n        return parameters_file\n      else:\n        continue\n    return None", "docstring": "Checks for existance of parameters file against supported suffixes and returns parameters file path if found\nArgs:\ntemplate_full_path: full filepath for template file\nReturns:\nfilename of parameters file if it exists", "source": "juraj-google-style"}
{"code": "def write_uint32(self, value, little_endian=True):\n        \n        if little_endian:\n            endian = \"<\"\n        else:\n            endian = \">\"\n        return self.pack('%sI' % endian, value)", "docstring": "Pack the value as an unsigned integer and write 4 bytes to the stream.\n\nArgs:\nvalue:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint: the number of bytes written.", "source": "juraj-google-style"}
{"code": "def set_dataset_year_range(self, dataset_year, dataset_end_year=None):\n    if isinstance(dataset_year, int):\n        dataset_date = ('01/01/%d' % dataset_year)\n    elif isinstance(dataset_year, str):\n        dataset_date = ('01/01/%s' % dataset_year)\n    else:\n        raise hdx.data.hdxobject.HDXError(('dataset_year has type %s which is not supported!' % type(dataset_year).__name__))\n    if (dataset_end_year is None):\n        dataset_end_year = dataset_year\n    if isinstance(dataset_end_year, int):\n        dataset_end_date = ('31/12/%d' % dataset_end_year)\n    elif isinstance(dataset_end_year, str):\n        dataset_end_date = ('31/12/%s' % dataset_end_year)\n    else:\n        raise hdx.data.hdxobject.HDXError(('dataset_end_year has type %s which is not supported!' % type(dataset_end_year).__name__))\n    self.set_dataset_date(dataset_date, dataset_end_date)", "docstring": "Set dataset date as a range from year or start and end year.\n\nArgs:\ndataset_year (Union[str, int]): Dataset year given as string or int\ndataset_end_year (Optional[Union[str, int]]): Dataset end year given as string or int\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def nic_b(msg):\n    tc = typecode(msg)\n    if ((tc < 9) or (tc > 18)):\n        raise RuntimeError(('%s: Not a airborne position message, expecting 8<TC<19' % msg))\n    msgbin = common.hex2bin(msg)\n    nic_b = int(msgbin[39])\n    return nic_b", "docstring": "Obtain NICb, navigation integrity category supplement-b\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string\n\nReturns:\nint: NICb number (0 or 1)", "source": "codesearchnet"}
{"code": "def _config_net_topology(self, conf):\n        \n        conf = self._init_net_specs(conf)\n        mgmts = self._select_mgmt_networks(conf)\n        self._validate_netconfig(conf)\n        allocated_subnets, conf = self._allocate_subnets(conf)\n        try:\n            self._add_mgmt_to_domains(conf, mgmts)\n            self._register_preallocated_ips(conf)\n            self._allocate_ips_to_nics(conf)\n            self._set_mtu_to_nics(conf)\n            self._add_dns_records(conf, mgmts)\n        except:\n            self._subnet_store.release(allocated_subnets)\n            raise\n        return conf", "docstring": "Initialize and populate all the network related elements, like\nreserving ips and populating network specs of the given confiiguration\nspec\n\nArgs:\nconf (dict): Configuration spec to initalize\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def getIndexGrid(self, name):\n    index_map = self.mapTableFile.indexMaps.filter_by(name=name).one()\n    gssha_pro_card = self.getCard('\n    if (gssha_pro_card is None):\n        raise ValueError('\n    with tmp_chdir(self.project_directory):\n        return GDALGrid(index_map.filename, gssha_pro_card.value.strip('\"').strip(\"'\"))", "docstring": "Returns GDALGrid object of index map\n\nParamters:\nname(str): Name of index map in 'cmt' file.\n\nReturns:\nGDALGrid", "source": "codesearchnet"}
{"code": "def set_countriesdata(cls, countries):\n        \n        \n        cls._countriesdata = dict()\n        cls._countriesdata['countries'] = dict()\n        cls._countriesdata['iso2iso3'] = dict()\n        cls._countriesdata['m49iso3'] = dict()\n        cls._countriesdata['countrynames2iso3'] = dict()\n        cls._countriesdata['regioncodes2countries'] = dict()\n        cls._countriesdata['regioncodes2names'] = dict()\n        cls._countriesdata['regionnames2codes'] = dict()\n        cls._countriesdata['aliases'] = dict()\n\n        for country in countries:\n            iso3 = country.get('\n            if not iso3:\n                continue\n            iso3 = iso3.upper()\n            cls._add_countriesdata(iso3, country)\n            cls._countriesdata['countries'][iso3] = country.dictionary\n\n        def sort_list(colname):\n            for idval in cls._countriesdata[colname]:\n                cls._countriesdata[colname][idval] = \\\n                    sorted(list(cls._countriesdata[colname][idval]))\n\n        sort_list('regioncodes2countries')", "docstring": "Set up countries data from data in form provided by UNStats and World Bank\n\nArgs:\ncountries (str): Countries data in HTML format provided by UNStats\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def qc_data(self, tests, alias=None):\n    r = {m: c.quality(tests, alias) for (m, c) in self.data.items()}\n    s = self.qc_curve_group(tests, alias=alias)\n    for (m, results) in r.items():\n        if (m in s):\n            results.update(s[m])\n    return r", "docstring": "Run a series of tests against the data and return the corresponding\nresults.\n\nArgs:\ntests (list): a list of functions.\n\nReturns:\nlist. The results. Stick to booleans (True = pass) or ints.", "source": "codesearchnet"}
{"code": "def project(self, n):\n    n = get_uvec(n)\n    return self.einsum_sequence(([n] * self.rank))", "docstring": "Convenience method for projection of a tensor into a\nvector.  Returns the tensor dotted into a unit vector\nalong the input n.\n\nArgs:\nn (3x1 array-like): direction to project onto\n\nReturns (float):\nscalar value corresponding to the projection of\nthe tensor into the vector", "source": "codesearchnet"}
{"code": "def _getScalesDiag(self,termx=0):\n        \n        assert self.P>1, 'VarianceDecomposition:: diagonal init_method allowed only for multi trait models'\n        assert self.noisPos is not None, 'VarianceDecomposition:: noise term has to be set'\n        assert termx<self.n_randEffs-1, 'VarianceDecomposition:: termx>=n_randEffs-1'\n        assert self.trait_covar_type[self.noisPos] not in ['lowrank','block','fixed'], 'VarianceDecomposition:: diagonal initializaiton not posible for such a parametrization'\n        assert self.trait_covar_type[termx] not in ['lowrank','block','fixed'], 'VarianceDecimposition:: diagonal initializaiton not posible for such a parametrization'\n        scales = []\n        res = self._getH2singleTrait(self.vd.getTerm(termx).getK())\n        scaleg = sp.sqrt(res['varg'].mean())\n        scalen = sp.sqrt(res['varn'].mean())\n        for term_i in range(self.n_randEffs):\n            if term_i==termx:\n                _scales = scaleg*self.diag[term_i]\n            elif term_i==self.noisPos:\n                _scales = scalen*self.diag[term_i]\n            else:\n                _scales = 0.*self.diag[term_i]\n            if self.jitter[term_i]>0:\n                _scales = sp.concatenate((_scales,sp.array([sp.sqrt(self.jitter[term_i])])))\n            scales.append(_scales)\n        return sp.concatenate(scales)", "docstring": "Internal function for parameter initialization\nUses 2 term single trait model to get covar params for initialization\n\nArgs:\ntermx:      non-noise term terms that is used for initialization", "source": "juraj-google-style"}
{"code": "def set(self, key, value):\n        \n        self._check_limit()\n        _expire = time.time() + self._timeout if self._timeout else None\n        self._store[key] = (value, _expire)", "docstring": "Add an item to the cache\nArgs:\nkey: item key\nvalue: the value associated with this key", "source": "juraj-google-style"}
{"code": "def Bernoulli(cls,\n        mean: 'TensorFluent',\n        batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:\n        \n        probs = mean.tensor\n        dist = tf.distributions.Bernoulli(probs=probs, dtype=tf.bool)\n        batch = mean.batch\n        if not batch and batch_size is not None:\n            t = dist.sample(batch_size)\n            batch = True\n        else:\n            t = dist.sample()\n        scope = mean.scope.as_list()\n        return (dist, TensorFluent(t, scope, batch=batch))", "docstring": "Returns a TensorFluent for the Bernoulli sampling op with given mean parameter.\n\nArgs:\nmean: The mean parameter of the Bernoulli distribution.\nbatch_size: The size of the batch (optional).\n\nReturns:\nThe Bernoulli distribution and a TensorFluent sample drawn from the distribution.", "source": "juraj-google-style"}
{"code": "def get_plot(self, normalize_rxn_coordinate=True, label_barrier=True):\n    plt = pretty_plot(12, 8)\n    scale = (1 if (not normalize_rxn_coordinate) else (1 / self.r[(- 1)]))\n    x = np.arange(0, np.max(self.r), 0.01)\n    y = (self.spline(x) * 1000)\n    relative_energies = (self.energies - self.energies[0])\n    plt.plot((self.r * scale), (relative_energies * 1000), 'ro', (x * scale), y, 'k-', linewidth=2, markersize=10)\n    plt.xlabel('Reaction coordinate')\n    plt.ylabel('Energy (meV)')\n    plt.ylim(((np.min(y) - 10), ((np.max(y) * 1.02) + 20)))\n    if label_barrier:\n        data = zip((x * scale), y)\n        barrier = max(data, key=(lambda d: d[1]))\n        plt.plot([0, barrier[0]], [barrier[1], barrier[1]], 'k--')\n        plt.annotate(('%.0f meV' % (np.max(y) - np.min(y))), xy=((barrier[0] / 2), (barrier[1] * 1.02)), xytext=((barrier[0] / 2), (barrier[1] * 1.02)), horizontalalignment='center')\n    plt.tight_layout()\n    return plt", "docstring": "Returns the NEB plot. Uses Henkelman's approach of spline fitting\neach section of the reaction path based on tangent force and energies.\n\nArgs:\nnormalize_rxn_coordinate (bool): Whether to normalize the\nreaction coordinate to between 0 and 1. Defaults to True.\nlabel_barrier (bool): Whether to label the maximum barrier.\n\nReturns:\nmatplotlib.pyplot object.", "source": "codesearchnet"}
{"code": "def AddEventSource(self, event_source):\n    \n    self._RaiseIfNotWritable()\n\n    event_source = self._PrepareAttributeContainer(event_source)\n\n    self._event_sources.append(event_source)\n    self.number_of_event_sources += 1", "docstring": "Adds an event source.\n\nArgs:\nevent_source (EventSource): event source.\n\nRaises:\nIOError: when the storage writer is closed.\nOSError: when the storage writer is closed.", "source": "juraj-google-style"}
{"code": "def get_container_instance_logs(access_token, subscription_id, resource_group, container_group_name, container_name=None):\n    if (container_name is None):\n        container_name = container_group_name\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '/containers/', container_name, '/logs?api-version=', CONTAINER_API])\n    return do_get(endpoint, access_token)", "docstring": "Get the container logs for containers in a container group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\ncontainer_group_name (str): Name of container instance group.\ncontainer_name (str): Optional name of a container in the group.\n\nReturns:\nHTTP response. Container logs.", "source": "codesearchnet"}
{"code": "def _maybe_strip_extension(number):\n    \n    match = _EXTN_PATTERN.search(number)\n    \n    \n    if match and _is_viable_phone_number(number[:match.start()]):\n        \n        for group in match.groups():\n            \n            \n            \n            if group is not None:\n                return (group, number[:match.start()])\n    return (\"\", number)", "docstring": "Strip extension from the end of a number string.\n\nStrips any extension (as in, the part of the number dialled after the\ncall is connected, usually indicated with extn, ext, x or similar) from\nthe end of the number, and returns it.\n\nArguments:\nnumber -- the non-normalized telephone number that we wish to strip the extension from.\n\nReturns a 2-tuple of:\n- the phone extension (or \"\" or not present)\n- the number before the extension.", "source": "juraj-google-style"}
{"code": "def addResource(self, pid):\n        \n        self._check_initialized()\n        try:\n            \n            self.getObjectByPid(pid)\n            return\n        except IndexError:\n            pass\n        \n        oid = self._pid_to_id(pid)\n        obj = rdflib.URIRef(oid)\n        ag = self.getAggregation()\n        self.add((ag, ORE.aggregates, obj))\n        self.add((obj, ORE.isAggregatedBy, ag))\n        self.add((obj, DCTERMS.identifier, rdflib.term.Literal(pid)))", "docstring": "Add a resource to the Resource Map.\n\nArgs:\npid : str", "source": "juraj-google-style"}
{"code": "def module_set_id(self) -> str:\n    fnames = sorted(['@'.join(m) for m in self.schema_data.modules])\n    return hashlib.sha1(''.join(fnames).encode('ascii')).hexdigest()", "docstring": "Compute unique id of YANG modules comprising the data model.\n\nReturns:\nString consisting of hexadecimal digits.", "source": "codesearchnet"}
{"code": "def get_domain(self):\n    if hasattr(self, 'domain'):\n        return Domain(self.rest_client.make_request(self.domain), self.rest_client)", "docstring": "Get the Streams domain for the instance that owns this view.\n\nReturns:\nDomain: Streams domain for the instance owning this view.", "source": "codesearchnet"}
{"code": "def RemoveUser(self, user):\n    \n    self.logger.info('Removing user %s.', user)\n    if self.remove:\n      command = self.userdel_cmd.format(user=user)\n      try:\n        subprocess.check_call(command.split(' '))\n      except subprocess.CalledProcessError as e:\n        self.logger.warning('Could not remove user %s. %s.', user, str(e))\n      else:\n        self.logger.info('Removed user account %s.', user)\n    self._RemoveAuthorizedKeys(user)\n    self._UpdateSudoer(user, sudoer=False)", "docstring": "Remove a Linux user account.\n\nArgs:\nuser: string, the Linux user account to remove.", "source": "juraj-google-style"}
{"code": "def lowpass_filter(data: FLOATS_TYPE,\n                   sampling_freq_hz: float,\n                   cutoff_freq_hz: float,\n                   numtaps: int) -> FLOATS_TYPE:\n    \n    coeffs = firwin(\n        numtaps=numtaps,\n        cutoff=normalized_frequency(cutoff_freq_hz, sampling_freq_hz),\n        pass_zero=True\n    )  \n    filtered_data = lfilter(b=coeffs, a=1.0, x=data)\n    return filtered_data", "docstring": "Apply a low-pass filter to the data.\n\nArgs:\ndata: time series of the data\nsampling_freq_hz: sampling frequency :math:`f_s`, in Hz\n(or other consistent units)\ncutoff_freq_hz: filter cutoff frequency in Hz\n(or other consistent units)\nnumtaps: number of filter taps\n\nReturns:\nfiltered data\n\nNote: number of filter taps = filter order + 1", "source": "juraj-google-style"}
{"code": "def estimate_motion(self, time, intensity_grid, max_u, max_v):\n        \n        ti = np.where(time == self.times)[0][0]\n        mask_vals = np.where(self.masks[ti].ravel() == 1)\n        i_vals = self.i[ti].ravel()[mask_vals]\n        j_vals = self.j[ti].ravel()[mask_vals]\n        obj_vals = self.timesteps[ti].ravel()[mask_vals]\n        u_shifts = np.arange(-max_u, max_u + 1)\n        v_shifts = np.arange(-max_v, max_v + 1)\n        min_error = 99999999999.0\n        best_u = 0\n        best_v = 0\n        for u in u_shifts:\n            j_shift = j_vals - u\n            for v in v_shifts:\n                i_shift = i_vals - v\n                if np.all((0 <= i_shift) & (i_shift < intensity_grid.shape[0]) &\n                                  (0 <= j_shift) & (j_shift < intensity_grid.shape[1])):\n                    shift_vals = intensity_grid[i_shift, j_shift]\n                else:\n                    shift_vals = np.zeros(i_shift.shape)\n                \n                error = np.abs(shift_vals - obj_vals).mean()\n                if error < min_error:\n                    min_error = error\n                    best_u = u * self.dx\n                    best_v = v * self.dx\n        \n        \n        \n        \n        self.u[ti] = best_u\n        self.v[ti] = best_v\n        return best_u, best_v, min_error", "docstring": "Estimate the motion of the object with cross-correlation on the intensity values from the previous time step.\n\nArgs:\ntime: time being evaluated.\nintensity_grid: 2D array of intensities used in cross correlation.\nmax_u: Maximum x-component of motion. Used to limit search area.\nmax_v: Maximum y-component of motion. Used to limit search area\n\nReturns:\nu, v, and the minimum error.", "source": "juraj-google-style"}
{"code": "def getRowByIndex(self, index):\n    assert isinstance(index, int)\n    return Row(self._impl.getRowByIndex(index))", "docstring": "Get row by numeric index.\n\nArgs:\nindex: Zero-based index of the row to get.\n\nReturns:\nThe corresponding row.", "source": "codesearchnet"}
{"code": "def _checkInt(inputvalue, minvalue=None, maxvalue=None, description='inputvalue'):\n    if (not isinstance(description, str)):\n        raise TypeError('The description should be a string. Given: {0!r}'.format(description))\n    if (not isinstance(inputvalue, (int, long))):\n        raise TypeError('The {0} must be an integer. Given: {1!r}'.format(description, inputvalue))\n    if (not isinstance(minvalue, (int, long, type(None)))):\n        raise TypeError('The minvalue must be an integer or None. Given: {0!r}'.format(minvalue))\n    if (not isinstance(maxvalue, (int, long, type(None)))):\n        raise TypeError('The maxvalue must be an integer or None. Given: {0!r}'.format(maxvalue))\n    _checkNumerical(inputvalue, minvalue, maxvalue, description)", "docstring": "Check that the given integer is valid.\n\nArgs:\n* inputvalue (int or long): The integer to be checked\n* minvalue (int or long, or None): Minimum value of the integer\n* maxvalue (int or long, or None): Maximum value of the integer\n* description (string): Used in error messages for the checked inputvalue\n\nRaises:\nTypeError, ValueError\n\nNote: Can not use the function :func:`_checkString`, as that function uses this function internally.", "source": "codesearchnet"}
{"code": "def greedy_set_cover(universe, subsets, costs):\n    elements = set((e for s in subsets.keys() for e in subsets[s]))\n    if (elements != universe):\n        return None\n    covered = set()\n    cover_sets = []\n    while (covered != universe):\n        min_cost_elem_ratio = float('inf')\n        min_set = None\n        for (s, elements) in subsets.items():\n            new_elements = len((elements - covered))\n            if (new_elements != 0):\n                cost_elem_ratio = (costs[s] / new_elements)\n                if (cost_elem_ratio < min_cost_elem_ratio):\n                    min_cost_elem_ratio = cost_elem_ratio\n                    min_set = s\n        cover_sets.append(min_set)\n        covered |= subsets[min_set]\n    return cover_sets", "docstring": "Approximate greedy algorithm for set-covering. Can be used on large\ninputs - though not an optimal solution.\n\nArgs:\nuniverse (list): Universe of elements\nsubsets (dict): Subsets of U {S1:elements,S2:elements}\ncosts (dict): Costs of each subset in S - {S1:cost, S2:cost...}", "source": "codesearchnet"}
{"code": "def is_namedtuple(x) -> bool:\n    return isinstance(x, tuple) and hasattr(type(x), '_fields')", "docstring": "Returns `True` if the value is instance of `NamedTuple`.\n\nThis is using some heuristic by checking for a `._field` attribute.\n\nArgs:\nx: Object to check\n\nReturns:\n`True` if the object is a `namedtuple`", "source": "github-repos"}
{"code": "def set_string(self, option, value):\n        \n        if not isinstance(value, str):\n            raise TypeError(\"%s must be a string\" % option)\n\n        self.options[option] = value", "docstring": "Set a string option.\n\nArgs:\noption (str): name of option.\nvalue (str): value of the option.\n\nRaises:\nTypeError: Value must be a string.", "source": "juraj-google-style"}
{"code": "def get_maybe_base_expanded_node_name(self, node_name, run_key, device_name):\n    device_name = tf.compat.as_str(device_name)\n    if (run_key not in self._run_key_to_original_graphs):\n        raise ValueError(('Unknown run_key: %s' % run_key))\n    if (device_name not in self._run_key_to_original_graphs[run_key]):\n        raise ValueError(('Unknown device for run key \"%s\": %s' % (run_key, device_name)))\n    return self._run_key_to_original_graphs[run_key][device_name].maybe_base_expanded_node_name(node_name)", "docstring": "Obtain possibly base-expanded node name.\n\nBase-expansion is the transformation of a node name which happens to be the\nname scope of other nodes in the same graph. For example, if two nodes,\ncalled 'a/b' and 'a/b/read' in a graph, the name of the first node will\nbe base-expanded to 'a/b/(b)'.\n\nThis method uses caching to avoid unnecessary recomputation.\n\nArgs:\nnode_name: Name of the node.\nrun_key: The run key to which the node belongs.\ngraph_def: GraphDef to which the node belongs.\n\nRaises:\nValueError: If `run_key` and/or `device_name` do not exist in the record.", "source": "codesearchnet"}
{"code": "def validate(self, value):\n    \n    if value is not None and not isinstance(value, self.data_type):\n      raise datastore_errors.BadValueError(\n          \"Property %s must be convertible to a %s instance (%s)\" %\n          (self.name, self.data_type, value))\n    return super(JsonProperty, self).validate(value)", "docstring": "Validate value.\n\nArgs:\nvalue: model value.\n\nReturns:\nWhether the specified value is valid data type value.\n\nRaises:\nBadValueError: when value is not of self.data_type type.", "source": "juraj-google-style"}
{"code": "def assign_sub(self, delta, use_locking=False, name=None, read_value=True):\n    assign = state_ops.assign_sub(self._variable, delta, use_locking=use_locking, name=name)\n    if read_value:\n        return assign\n    return assign.op", "docstring": "Subtracts a value from this variable.\n\nThis is essentially a shortcut for `assign_sub(self, delta)`.\n\nArgs:\ndelta: A `Tensor`. The value to subtract from this variable.\nuse_locking: If `True`, use locking during the operation.\nname: The name of the operation to be created\nread_value: if True, will return something which evaluates to the new\nvalue of the variable; if False will return the assign op.\n\nReturns:\nA `Tensor` that will hold the new value of this variable after\nthe subtraction has completed.", "source": "github-repos"}
{"code": "def dbmax10years(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `dbmax10years`'.format(value))\n\n        self._dbmax10years = value", "docstring": "Corresponds to IDD Field `dbmax10years`\n10-year return period values for maximum extreme dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmax10years`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def __setitem__(self, key, value):\n    \n    setitem = self._class_to_mock.__dict__.get('__setitem__', None)\n\n    \n    if setitem is None:\n      raise TypeError('object does not support item assignment')\n\n    \n    if self._replay_mode:\n      return MockMethod('__setitem__', self._expected_calls_queue,\n                        self._replay_mode)(key, value)\n\n\n    \n    return self._CreateMockMethod('__setitem__')(key, value)", "docstring": "Provide custom logic for mocking classes that support item assignment.\n\nArgs:\nkey: Key to set the value for.\nvalue: Value to set.\n\nReturns:\nExpected return value in replay mode.  A MockMethod object for the\n__setitem__ method that has already been called if not in replay mode.\n\nRaises:\nTypeError if the underlying class does not support item assignment.\nUnexpectedMethodCallError if the object does not expect the call to\n__setitem__.", "source": "juraj-google-style"}
{"code": "def dump_credibilities(self, output):\n    for p in self.products:\n        json.dump({'product_id': p.name, 'credibility': self.credibility(p)}, output)\n        output.write('\\n')", "docstring": "Dump credibilities of all products.\n\nArgs:\noutput: a writable object.", "source": "codesearchnet"}
{"code": "def revnet_step(name, x, hparams, reverse=True):\n    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n        if (hparams.coupling == 'additive'):\n            coupling_layer = functools.partial(additive_coupling, name='additive', reverse=reverse, mid_channels=hparams.coupling_width, activation=hparams.activation, dropout=hparams.coupling_dropout)\n        else:\n            coupling_layer = functools.partial(affine_coupling, name='affine', reverse=reverse, mid_channels=hparams.coupling_width, activation=hparams.activation, dropout=hparams.coupling_dropout)\n        ops = [functools.partial(actnorm, name='actnorm', reverse=reverse), functools.partial(invertible_1x1_conv, name='invertible', reverse=reverse), coupling_layer]\n        if reverse:\n            ops = ops[::(- 1)]\n        objective = 0.0\n        for op in ops:\n            (x, curr_obj) = op(x=x)\n            objective += curr_obj\n        return (x, objective)", "docstring": "One step of glow generative flow.\n\nActnorm + invertible 1X1 conv + affine_coupling.\n\nArgs:\nname: used for variable scope.\nx: input\nhparams: coupling_width is the only hparam that is being used in\nthis function.\nreverse: forward or reverse pass.\nReturns:\nz: Output of one step of reversible flow.", "source": "codesearchnet"}
{"code": "def prod(self, vars_list: List[str]) -> 'TensorFluent':\n        \n        operand = self\n        if operand.dtype == tf.bool:\n            operand = operand.cast(tf.float32)\n        return self._aggregation_op(tf.reduce_prod, operand, vars_list)", "docstring": "Returns the TensorFluent for the prod aggregation function.\n\nArgs:\nvars_list: The list of variables to be aggregated over.\n\nReturns:\nA TensorFluent wrapping the prod aggregation function.", "source": "juraj-google-style"}
{"code": "def collection(iterable=None, mutable=True, ordered=False, unique=False):\n\t\n\tif iterable is None:\n\t\titerable = tuple()\n\tif unique:\n\t\tif ordered:\n\t\t\tif mutable:\n\t\t\t\treturn setlist(iterable)\n\t\t\telse:\n\t\t\t\treturn frozensetlist(iterable)\n\t\telse:\n\t\t\tif mutable:\n\t\t\t\treturn set(iterable)\n\t\t\telse:\n\t\t\t\treturn frozenset(iterable)\n\telse:\n\t\tif ordered:\n\t\t\tif mutable:\n\t\t\t\treturn list(iterable)\n\t\t\telse:\n\t\t\t\treturn tuple(iterable)\n\t\telse:\n\t\t\tif mutable:\n\t\t\t\treturn bag(iterable)\n\t\t\telse:\n\t\t\t\treturn frozenbag(iterable)", "docstring": "Return a :class:`Collection` with the specified properties.\n\nArgs:\niterable (Iterable): collection to instantiate new collection from.\nmutable (bool): Whether or not the new collection is mutable.\nordered (bool): Whether or not the new collection is ordered.\nunique (bool): Whether or not the new collection contains only unique values.", "source": "juraj-google-style"}
{"code": "def extractDates(self, inp):\n        \n        def merge(param):\n            day, time = param\n            if not (day or time):\n                return None\n\n            if not day:\n                return time\n            if not time:\n                return day\n\n            return datetime.datetime(\n                day.year, day.month, day.day, time.hour, time.minute\n            )\n\n        days = self.extractDays(inp)\n        times = self.extractTimes(inp)\n        return map(merge, zip_longest(days, times, fillvalue=None))", "docstring": "Extract semantic date information from an input string.\nIn effect, runs both parseDay and parseTime on the input\nstring and merges the results to produce a comprehensive\ndatetime object.\n\nArgs:\ninp (str): Input string to be parsed.\n\nReturns:\nA list of datetime objects containing the extracted dates from the\ninput snippet, or an empty list if not found.", "source": "juraj-google-style"}
{"code": "def get_wallet_height(self, id=None, endpoint=None):\n        \n        return self._call_endpoint(GET_WALLET_HEIGHT, id=id, endpoint=endpoint)", "docstring": "Get the current wallet index height.\nArgs:\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"}
{"code": "def Shell(self, command, timeout_ms=None):\n    return self.protocol_handler.Command(self._handle, service=b'shell', command=command, timeout_ms=timeout_ms)", "docstring": "Run command on the device, returning the output.\n\nArgs:\ncommand: Shell command to run\ntimeout_ms: Maximum time to allow the command to run.", "source": "codesearchnet"}
{"code": "def get_sharding_tile_shape(sharding):\n    if sharding is None:\n        return None\n    sharding_message = xla_data_pb2.OpSharding()\n    sharding_message.ParseFromString(sharding)\n    if sharding_message.tile_assignment_dimensions:\n        return sharding_message.tile_assignment_dimensions\n    else:\n        return None", "docstring": "Returns the tile assignment shape for a sharded Tensor.\n\nArgs:\nsharding: a serialized OpSharding message describing the layout of a\nsharded Tensor.\n\nReturns:\nA list, for each dimension of the sharded Tensor, of the number of shards\ninto which it has been split. Returns None if the input indicates no tile\nassignments.", "source": "github-repos"}
{"code": "def _build_list_of_Intervals(cls, data_dict, stop=None, points=False, include=None, exclude=None, ignore=None, lexicon=None):\n    include = (include or {})\n    exclude = (exclude or {})\n    ignore = (ignore or [])\n    all_data = []\n    for data in zip(*data_dict.values()):\n        all_data.append({k: v for (k, v) in zip(data_dict.keys(), data)})\n    all_data = sorted(all_data, key=(lambda x: x['top']))\n    wanted_data = []\n    for dictionary in all_data:\n        keep = True\n        delete = []\n        for (k, v) in dictionary.items():\n            incl = include.get(k, utils.null_default(True))\n            excl = exclude.get(k, utils.null_default(False))\n            if (k in ignore):\n                delete.append(k)\n            if (not incl(v)):\n                keep = False\n            if excl(v):\n                keep = False\n        if delete:\n            for key in delete:\n                _ = dictionary.pop(key, None)\n        if keep:\n            wanted_data.append(dictionary)\n    if (not points):\n        for (i, iv) in enumerate(wanted_data):\n            if (iv.get('base', None) is None):\n                try:\n                    iv['base'] = wanted_data[(i + 1)]['top']\n                except (IndexError, KeyError):\n                    if (stop is not None):\n                        thick = (stop - iv['top'])\n                    else:\n                        thick = 1\n                    iv['base'] = (iv['top'] + thick)\n    list_of_Intervals = []\n    for iv in wanted_data:\n        top = iv.pop('top')\n        base = iv.pop('base', None)\n        descr = iv.pop('description', '')\n        if iv:\n            (c, d) = ({}, {})\n            for (k, v) in iv.items():\n                if ((k[:5].lower() == 'comp ') or (k[:9].lower() == 'component')):\n                    k = re.sub('comp(?:onent)? ', '', k, flags=re.I)\n                    c[k] = v\n                elif (v is not None):\n                    d[k] = v\n            comp = ([Component(c)] if c else None)\n            this = Interval(**{'top': top, 'base': base, 'description': descr, 'data': d, 'components': comp})\n        else:\n            this = Interval(**{'top': top, 'base': base, 'description': descr, 'lexicon': lexicon})\n        list_of_Intervals.append(this)\n    return list_of_Intervals", "docstring": "Private function. Takes a data dictionary and reconstructs a list\nof Intervals from it.\n\nArgs:\ndata_dict (dict)\nstop (float): Where to end the last interval.\npoints (bool)\ninclude (dict)\nexclude (dict)\nignore (list)\nlexicon (Lexicon)\n\nReturns:\nlist.", "source": "codesearchnet"}
{"code": "def typical_or_extreme_period_type(self, value=None):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `typical_or_extreme_period_type`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `typical_or_extreme_period_type`')\n    self._typical_or_extreme_period_type = value", "docstring": "Corresponds to IDD Field `typical_or_extreme_period_type`\n\nArgs:\nvalue (str): value for IDD Field `typical_or_extreme_period_type`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def speed_difference(points):\n    data = [0]\n    for (before, after) in pairwise(points):\n        data.append((before.vel - after.vel))\n    return data", "docstring": "Computes the speed difference between each adjacent point\n\nArgs:\npoints (:obj:`Point`)\nReturns:\n:obj:`list` of int: Indexes of changepoints", "source": "codesearchnet"}
{"code": "def tfidf_corpus(docs=CORPUS):\n    \n    vectorizer = TfidfVectorizer()\n    vectorizer = vectorizer.fit(docs)\n    return vectorizer, vectorizer.transform(docs)", "docstring": "Count the words in a corpus and return a TfidfVectorizer() as well as all the TFIDF vecgtors for the corpus\n\nArgs:\ndocs (iterable of strs): a sequence of documents (strings)\n\nReturns:\n(TfidfVectorizer, tfidf_vectors)", "source": "juraj-google-style"}
{"code": "def get_relavent_flags(self):\n    relavent_flags = {}\n    for (code, flags_list) in self.flags.items():\n        relavent_flags[code] = []\n        for flag in flags_list:\n            if self.flag_is_related(flag):\n                relavent_flags[code].append(flag)\n        if (not relavent_flags[code]):\n            del relavent_flags[code]\n    return relavent_flags", "docstring": "Retrieves the relevant flags for this data block.\n\nReturns:\nAll flags related to this block.", "source": "codesearchnet"}
{"code": "def first(series, order_by=None):\n    if (order_by is not None):\n        series = order_series_by(series, order_by)\n    first_s = series.iloc[0]\n    return first_s", "docstring": "Returns the first value of a series.\n\nArgs:\nseries (pandas.Series): column to summarize.\n\nKwargs:\norder_by: a pandas.Series or list of series (can be symbolic) to order\nthe input series by before summarization.", "source": "codesearchnet"}
{"code": "def generate_encoded_user_data(env='dev', region='us-east-1', generated=None, group_name='', pipeline_type='', canary=False):\n    if (env in ['prod', 'prodp', 'prods']):\n        (env_c, env_p, env_s) = ('prod', 'prodp', 'prods')\n    else:\n        (env_c, env_p, env_s) = (env, env, env)\n    user_data = get_template(template_file='infrastructure/user_data.sh.j2', env=env, env_c=env_c, env_p=env_p, env_s=env_s, region=region, app_name=generated.app_name(), group_name=group_name, pipeline_type=pipeline_type, canary=canary, formats=generated)\n    return base64.b64encode(user_data.encode()).decode()", "docstring": "r\"\"\"Generate base64 encoded User Data.\n\nArgs:\nenv (str): Deployment environment, e.g. dev, stage.\nregion (str): AWS Region, e.g. us-east-1.\ngenerated (gogoutils.Generator): Generated naming formats.\ngroup_name (str): Application group nane, e.g. core.\npipeline_type (str): Type of Foremast Pipeline to configure.\n\nReturns:\nstr: base64 encoded User Data script.\n\n#!/bin/bash\nexport CLOUD_ENVIRONMENT=dev\nexport CLOUD_ENVIRONMENT_C=dev\nexport CLOUD_ENVIRONMENT_P=dev\nexport CLOUD_ENVIRONMENT_S=dev\nexport CLOUD_APP=coreforrest\nexport CLOUD_APP_GROUP=forrest\nexport CLOUD_STACK=forrest\nexport EC2_REGION=us-east-1\nexport CLOUD_DOMAIN=dev.example.com\nprintenv | grep 'CLOUD\\|EC2' | awk '$0=\"export \"$0'>> /etc/gogo/cloud_env", "source": "codesearchnet"}
{"code": "def line(self, value):\n        \n        if value == self._defaults['line'] and 'line' in self._values:\n            del self._values['line']\n        else:\n            self._values['line'] = value", "docstring": "The line property.\n\nArgs:\nvalue (int). the property value.", "source": "juraj-google-style"}
{"code": "def elastic(x, severity=1):\n  \n  c = [(244 * 2, 244 * 0.7, 244 * 0.1), (244 * 2, 244 * 0.08, 244 * 0.2),\n       (244 * 0.05, 244 * 0.01, 244 * 0.02), (244 * 0.07, 244 * 0.01,\n                                              244 * 0.02),\n       (244 * 0.12, 244 * 0.01, 244 * 0.02)][severity - 1]\n\n  image = np.array(x, dtype=np.float32) / 255.\n  shape = image.shape\n  shape_size = shape[:2]\n\n  \n  center_square = np.float32(shape_size) \n  square_size = min(shape_size) \n  pts1 = np.float32([\n      center_square + square_size,\n      [center_square[0] + square_size, center_square[1] - square_size],\n      center_square - square_size\n  ])\n  pts2 = pts1 + np.random.uniform(\n      -c[2], c[2], size=pts1.shape).astype(np.float32)\n  affine_trans = tfds.core.lazy_imports.cv2.getAffineTransform(pts1, pts2)\n  image = tfds.core.lazy_imports.cv2.warpAffine(\n      image,\n      affine_trans,\n      shape_size[::-1],\n      borderMode=tfds.core.lazy_imports.cv2.BORDER_REFLECT_101)\n\n  dx = (tfds.core.lazy_imports.skimage.filters.gaussian(\n      np.random.uniform(-1, 1, size=shape[:2]),\n      c[1],\n      mode='reflect',\n      truncate=3) * c[0]).astype(np.float32)\n  dy = (tfds.core.lazy_imports.skimage.filters.gaussian(\n      np.random.uniform(-1, 1, size=shape[:2]),\n      c[1],\n      mode='reflect',\n      truncate=3) * c[0]).astype(np.float32)\n  dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]\n\n  x, y, z = np.meshgrid(\n      np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))\n  indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx,\n                                                    (-1, 1)), np.reshape(\n                                                        z, (-1, 1))\n  x_clip = np.clip(\n      tfds.core.lazy_imports.scipy.ndimage.interpolation.map_coordinates(\n          image, indices, order=1, mode='reflect').reshape(shape), 0, 1) * 255\n  return around_and_astype(x_clip)", "docstring": "Conduct elastic transform to images.\n\nElastic transform is performed on small patches of the images.\n\nArgs:\nx: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\nseverity: integer, severity of corruption.\n\nReturns:\nnumpy array, image with uint8 pixels in [0,255]. Applied elastic transform.", "source": "juraj-google-style"}
{"code": "def _try_to_compute_deterministic_class_id(cls, depth=5):\n    class_id = pickle.dumps(cls)\n    for _ in range(depth):\n        new_class_id = pickle.dumps(pickle.loads(class_id))\n        if (new_class_id == class_id):\n            return hashlib.sha1(new_class_id).digest()\n        class_id = new_class_id\n    logger.warning('WARNING: Could not produce a deterministic class ID for class {}'.format(cls))\n    return hashlib.sha1(new_class_id).digest()", "docstring": "Attempt to produce a deterministic class ID for a given class.\n\nThe goal here is for the class ID to be the same when this is run on\ndifferent worker processes. Pickling, loading, and pickling again seems to\nproduce more consistent results than simply pickling. This is a bit crazy\nand could cause problems, in which case we should revert it and figure out\nsomething better.\n\nArgs:\ncls: The class to produce an ID for.\ndepth: The number of times to repeatedly try to load and dump the\nstring while trying to reach a fixed point.\n\nReturns:\nA class ID for this class. We attempt to make the class ID the same\nwhen this function is run on different workers, but that is not\nguaranteed.\n\nRaises:\nException: This could raise an exception if cloudpickle raises an\nexception.", "source": "codesearchnet"}
{"code": "def get(self, url, headers=None, parameters=None, get_json=True):\n    if self.debug:\n        print(('GET: %s, headers=%s' % (url, headers)))\n    self.headers = self._get_default_headers()\n    get_parameters = self.parameters\n    if (get_parameters is None):\n        get_parameters = {}\n    if (headers is not None):\n        self.headers.update(headers)\n    if (parameters is not None):\n        get_parameters.update(parameters)\n    response = requests.get(url, headers=self.headers, params=get_parameters, auth=self.auth, verify=self.verify_ssl)\n    json_response = self._process_json_response(response)\n    return (json_response if (get_json is True) else response)", "docstring": "Send a GET request with custome headers and parameters\n\nArgs:\nurl (str): URL to send the request to\nheaders (str, optional): custom headers\nparameters (str, optional): optional parameters\n\nReturns:\nA JSON object of the returned response if `get_json` is True,\nRequests' response object otherwise", "source": "codesearchnet"}
{"code": "def remove_op_callback(self, callback):\n    if callback not in self._thread_local_data.op_callbacks:\n        raise KeyError('The specified op callback has not been registered, and hence cannot be removed.')\n    del self._thread_local_data.op_callbacks[self._thread_local_data.op_callbacks.index(callback)]", "docstring": "Remove an already-registered op callback.\n\nArgs:\ncallback: The op callback to be removed.\n\nRaises:\nKeyError: If `callback` is not already registered.", "source": "github-repos"}
{"code": "def return_type(type_name, formatter=None):\n\n    def _returns(func):\n        annotated(func)\n        func.metadata.typed_returnvalue(type_name, formatter)\n        return func\n    return _returns", "docstring": "Specify that this function returns a typed value.\n\nArgs:\ntype_name (str): A type name known to the global typedargs type system\nformatter (str): An optional name of a formatting function specified\nfor the type given in type_name.", "source": "codesearchnet"}
{"code": "def resource_struct(self, resource: str) -> str:\n    resource = self.api_document['schemas'][resource]['properties']\n    return self.to_struct(from_api=resource)", "docstring": "Return BigQuery STRUCT for a Discovery API resource.\n\nArgs:\nresource: the name of the Google API resource\n\nReturns:\nA string STRUCT of the resource ready to be used in a query.", "source": "github-repos"}
{"code": "def create_config(self, name, data, labels=None):\n        \n        if not isinstance(data, bytes):\n            data = data.encode('utf-8')\n\n        data = base64.b64encode(data)\n        if six.PY3:\n            data = data.decode('ascii')\n        body = {\n            'Data': data,\n            'Name': name,\n            'Labels': labels\n        }\n\n        url = self._url('/configs/create')\n        return self._result(\n            self._post_json(url, data=body), True\n        )", "docstring": "Create a config\n\nArgs:\nname (string): Name of the config\ndata (bytes): Config data to be stored\nlabels (dict): A mapping of labels to assign to the config\n\nReturns (dict): ID of the newly created config", "source": "juraj-google-style"}
{"code": "def _legacy_weights(layer):\n    weights = layer.trainable_weights + layer.non_trainable_weights\n    if any((not isinstance(w, variables_module.Variable) for w in weights)):\n        raise NotImplementedError(\"Save or restore weights that is not an instance of `tf.Variable` is not supported in h5, use `save_format='tf'` instead. Got a model or layer {} with weights {}\".format(layer.__class__.__name__, weights))\n    return weights", "docstring": "DO NOT USE.\n\nFor legacy reason, the layer.weights was in the order of\n[self.trainable_weights + self.non_trainable_weights], and this order was\nused for preserving the weights in h5 format. The new order of layer.weights\nare the same as layer.get_weights() which is more intuitive for user. To\nkeep supporting the existing saved h5 file, this method should be used to\nsave/load weights. In future version, we will delete this method and\nintroduce a breaking change for h5 and stay with the new order for weights.\n\nArgs:\nlayer: a `tf.keras.Model` or `tf.keras.layers.Layer` instance.\n\nReturns:\nA list of variables with the order of trainable_weights, followed by\nnon_trainable_weights.", "source": "github-repos"}
{"code": "def SetCredentials(self, password=None, username=None):\n    if password:\n        self._password = password\n    if username:\n        self._user = username", "docstring": "Sets the database credentials.\n\nArgs:\npassword (Optional[str]): password to access the database.\nusername (Optional[str]): username to access the database.", "source": "codesearchnet"}
{"code": "def _NormalizeKeyPath(self, key_path):\n    normalized_key_path = key_path.lower()\n    if ((len(normalized_key_path) < 39) or (not normalized_key_path.startswith(self._CONTROL_SET_PREFIX))):\n        return normalized_key_path\n    return ''.join([self._NORMALIZED_CONTROL_SET_PREFIX, normalized_key_path[39:]])", "docstring": "Normalizes a Windows Registry key path.\n\nArgs:\nkey_path (str): Windows Registry key path.\n\nReturns:\nstr: normalized Windows Registry key path.", "source": "codesearchnet"}
{"code": "def __init__(self, correction_limit=88., **kwargs):\n        \n        self.correction_limit = correction_limit\n        super(EffectiveSolarPathLengthCorrector, self).__init__(**kwargs)", "docstring": "Collect custom configuration values.\n\nArgs:\ncorrection_limit (float): Maximum solar zenith angle to apply the\ncorrection in degrees. Pixels beyond this limit have a\nconstant correction applied. Default 88.\nmax_sza (float): Maximum solar zenith angle in degrees that is\nconsidered valid and correctable. Default 95.0.", "source": "juraj-google-style"}
{"code": "def eval_in_new(cls, expr, *args, **kwargs):\n        \n        ctx = cls(*args, **kwargs)\n        ctx.env.rec_new(expr)\n        return ctx.eval(expr)", "docstring": ":meth:`eval` an expression in a new, temporary :class:`Context`.\n\nThis should be safe to use directly on user input.\n\nArgs:\nexpr (LispVal): The expression to evaluate.\n*args: Args for the :class:`Context` constructor.\n**kwargs: Kwargs for the :class:`Context` constructor.", "source": "juraj-google-style"}
{"code": "def _UpdateCounters(self, event):\n    self._session.parsers_counter['total'] += 1\n    parser_name = getattr(event, 'parser', '')\n    (_, _, parser_name) = parser_name.rpartition('/')\n    if (not parser_name):\n        parser_name = 'N/A'\n    self._session.parsers_counter[parser_name] += 1", "docstring": "Updates the counters.\n\nArgs:\nevent (EventObject): event.", "source": "codesearchnet"}
{"code": "def __init__(self, dataset, worker, devices, options=None):\n    self._dataset = dataset\n    self._worker = worker\n    self._devices = devices\n    self._element_spec = dataset.element_spec\n    self._options = options\n    self._make_iterator()", "docstring": "Create iterator for the `dataset` to fetch data to worker's `devices` .\n\nA `MultiDeviceIterator`  or `OwnedMultiDeviceIterator` is used to prefetch\ninput to the devices on the given worker.\n\nArgs:\ndataset: A `tf.data.Dataset` instance.\nworker: Worker on which ops should be created.\ndevices: Distribute data from `dataset` to these devices.\noptions: options.", "source": "github-repos"}
{"code": "def l1_loss(tensor, weight=1.0, scope=None):\n    with tf.name_scope(scope, 'L1Loss', [tensor]):\n        weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='loss_weight')\n        loss = tf.multiply(weight, tf.reduce_sum(tf.abs(tensor)), name='value')\n        tf.add_to_collection(LOSSES_COLLECTION, loss)\n        return loss", "docstring": "Define a L1Loss, useful for regularize, i.e. lasso.\n\nArgs:\ntensor: tensor to regularize.\nweight: scale the loss by this factor.\nscope: Optional scope for name_scope.\n\nReturns:\nthe L1 loss op.", "source": "codesearchnet"}
{"code": "def load_yaml(task: Task, file: str) -> Result:\n    \n    with open(file, \"r\") as f:\n        yml = ruamel.yaml.YAML(typ=\"safe\")\n        data = yml.load(f)\n\n    return Result(host=task.host, result=data)", "docstring": "Loads a yaml file.\n\nArguments:\nfile: path to the file containing the yaml file to load\n\nExamples:\n\nSimple example with ``ordered_dict``::\n\n> nr.run(task=load_yaml,\nfile=\"mydata.yaml\")\n\nReturns:\nResult object with the following attributes set:\n* result (``dict``): dictionary with the contents of the file", "source": "juraj-google-style"}
{"code": "def get_first_model_with_resource_name(cls, resource_name):\n    models = cls.get_models_with_resource_name(resource_name)\n    if (len(models) > 0):\n        return models[0]\n    return None", "docstring": "Get the first model corresponding to a resource_name\n\nArgs:\nresource_name: the resource name", "source": "codesearchnet"}
{"code": "def build_logits(data_ops, embed_layer, rnn_core, output_linear, name_prefix):\n  \n  \n  embedded_input_seq = snt.BatchApply(\n      embed_layer, name=\"input_embed_seq\")(data_ops.sparse_obs)\n\n  \n  initial_rnn_state = nest.map_structure(\n      lambda t: tf.get_local_variable(  \n          \"{}/rnn_state/{}\".format(name_prefix, t.op.name), initializer=t),\n      rnn_core.initial_state(FLAGS.batch_size))\n  assign_zero_rnn_state = nest.map_structure(\n      lambda x: x.assign(tf.zeros_like(x)), initial_rnn_state)\n  assign_zero_rnn_state = tf.group(*nest.flatten(assign_zero_rnn_state))\n\n  \n  rnn_output_seq, rnn_final_state = tf.nn.dynamic_rnn(\n      cell=rnn_core,\n      inputs=embedded_input_seq,\n      initial_state=initial_rnn_state,\n      time_major=True)\n\n  \n  update_rnn_state = nest.map_structure(\n      tf.assign, initial_rnn_state, rnn_final_state)\n  with tf.control_dependencies(nest.flatten(update_rnn_state)):\n    rnn_output_seq = tf.identity(rnn_output_seq, name=\"rnn_output_seq\")\n  output_logits = snt.BatchApply(\n      output_linear, name=\"output_embed_seq\")(rnn_output_seq)\n  return output_logits, assign_zero_rnn_state", "docstring": "This is the core model logic.\n\nUnrolls a Bayesian RNN over the given sequence.\n\nArgs:\ndata_ops: A `sequence_data.SequenceDataOps` namedtuple.\nembed_layer: A `snt.Embed` instance.\nrnn_core: A `snt.RNNCore` instance.\noutput_linear: A `snt.Linear` instance.\nname_prefix: A string to use to prefix local variable names.\n\nReturns:\nA 3D time-major tensor representing the model's logits for a sequence of\npredictions. Shape `[time_steps, batch_size, vocab_size]`.", "source": "juraj-google-style"}
{"code": "def NotIn(self, *values):\n    \n    self._awql = self._CreateMultipleValuesCondition(values, 'NOT_IN')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"in\".\n\nArgs:\n*values: The values to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "juraj-google-style"}
{"code": "def get_sources(self, prefix=''):\n    prefix = prefix.replace('-', '_')\n    prefixed = ('%s_sources' % prefix)\n    if (prefixed in self.__cli):\n        sources = self.__cli.get(prefixed)\n        from_conf = False\n    else:\n        sources = self.__config.get(prefixed)\n        from_conf = True\n    if (sources is None):\n        return OrderedSet()\n    sources = self.__resolve_patterns(sources, from_conf)\n    prefixed = ('%s_source_filters' % prefix)\n    if (prefixed in self.__cli):\n        filters = self.__cli.get(prefixed)\n        from_conf = False\n    else:\n        filters = self.__config.get(prefixed)\n        from_conf = True\n    if (filters is None):\n        return sources\n    sources -= self.__resolve_patterns(filters, from_conf)\n    return sources", "docstring": "Retrieve a set of absolute paths to sources, according to `prefix`\n\n`ConfigParser` will perform wildcard expansion and\nfiltering.\n\nArgs:\nprefix: str, the desired prefix.\n\nReturns:\nutils.utils.OrderedSet: The set of sources for the given\n`prefix`.", "source": "codesearchnet"}
{"code": "def get(self, uid: int) -> FrozenSet[Flag]:\n        \n        recent = _recent_set if uid in self._recent else frozenset()\n        flags = self._flags.get(uid)\n        return recent if flags is None else (flags | recent)", "docstring": "Return the session flags for the mailbox session.\n\nArgs:\nuid: The message UID value.", "source": "juraj-google-style"}
{"code": "def _format_parameter_error_message(name: str, sig: Signature,\n                                    num_params: int) -> str:\n    \n    if num_params == 0:\n        plural = 's'\n        missing = 2\n        arguments = \"'slack' and 'event'\"\n    else:\n        plural = ''\n        missing = 1\n        arguments = \"'event'\"\n\n    return (f\"{name}{sig} missing {missing} required positional \"\n            f\"argument{plural}: {arguments}\")", "docstring": "Format an error message for missing positional arguments.\n\nArgs:\nname: The function name.\nsig: The function's signature.\nnum_params: The number of function parameters.\n\nReturns:\nstr: A formatted error message.", "source": "juraj-google-style"}
{"code": "def inject_params(self, params):\n    for (arg, value) in params.items():\n        cli_arg = '--{}'.format(arg)\n        if (cli_arg in sys.argv):\n            self.tcex.log.debug('skipping existing arg: {}'.format(cli_arg))\n            continue\n        param_data = (self.tcex.install_json_params.get(arg) or {})\n        if (param_data.get('type', '').lower() == 'multichoice'):\n            value = value.split('|')\n        elif (param_data.get('type', '').lower() == 'boolean'):\n            value = self.tcex.utils.to_bool(value)\n        elif (arg in self.tc_bool_args):\n            value = self.tcex.utils.to_bool(value)\n        if isinstance(value, bool):\n            if (value is True):\n                sys.argv.append(cli_arg)\n        elif isinstance(value, list):\n            for mcv in value:\n                sys.argv.append('{}={}'.format(cli_arg, mcv))\n        else:\n            sys.argv.append('{}={}'.format(cli_arg, value))\n    (self._default_args, unknown) = self.parser.parse_known_args()\n    self.tcex._logger()", "docstring": "Inject params into sys.argv from secureParams API, AOT, or user provided.\n\nArgs:\nparams (dict): A dictionary containing all parameters that need to be injected as args.", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n    \n    self._remote_execute = channel.unary_unary(\n        '/OnlineActionHandler/_remote_execute',\n        request_serializer=actions__pb2.OnlineActionRequest.SerializeToString,\n        response_deserializer=actions__pb2.OnlineActionResponse.FromString,\n        )\n    self._remote_reload = channel.unary_unary(\n        '/OnlineActionHandler/_remote_reload',\n        request_serializer=actions__pb2.ReloadRequest.SerializeToString,\n        response_deserializer=actions__pb2.ReloadResponse.FromString,\n        )\n    self._health_check = channel.unary_unary(\n        '/OnlineActionHandler/_health_check',\n        request_serializer=actions__pb2.HealthCheckRequest.SerializeToString,\n        response_deserializer=actions__pb2.HealthCheckResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    return metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, class_id=self.class_id, sample_weight=sample_weight)", "docstring": "Accumulates confusion matrix statistics.\n\nArgs:\ny_true: The ground truth values.\ny_pred: The predicted values.\nsample_weight: Optional weighting of each example. Defaults to 1. Can be a\n`Tensor` whose rank is either 0, or the same rank as `y_true`, and must\nbe broadcastable to `y_true`.\n\nReturns:\nUpdate op.", "source": "github-repos"}
{"code": "def dbmin10years(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `dbmin10years`'.format(value))\n\n        self._dbmin10years = value", "docstring": "Corresponds to IDD Field `dbmin10years`\n10-year return period values for minimum extreme dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmin10years`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def append_from_list(self, content, fill_title=False):\n    row_index = 0\n    for row in content:\n        tr = TableRow()\n        column_index = 0\n        for item in row:\n            if ((row_index == 0) and fill_title):\n                ti = TableTitle(item)\n            else:\n                ti = TableItem(item)\n            tr.append(ti, str(column_index))\n            column_index = (column_index + 1)\n        self.append(tr, str(row_index))\n        row_index = (row_index + 1)", "docstring": "Appends rows created from the data contained in the provided\nlist of tuples of strings. The first tuple of the list can be\nset as table title.\n\nArgs:\ncontent (list): list of tuples of strings. Each tuple is a row.\nfill_title (bool): if true, the first tuple in the list will\nbe set as title.", "source": "codesearchnet"}
{"code": "def _count_righthand_zero_bits(number, bits):\n    \n    if number == 0:\n        return bits\n    for i in range(bits):\n        if (number >> i) & 1:\n            return i\n    \n    return bits", "docstring": "Count the number of zero bits on the right hand side.\n\nArgs:\nnumber: an integer.\nbits: maximum number of bits to count.\n\nReturns:\nThe number of zero bits on the right hand side of the number.", "source": "juraj-google-style"}
{"code": "def set_shutdown(self, default=False, disable=True):\n    return self._configure_mlag('shutdown', True, default, disable)", "docstring": "Configures the mlag shutdown value\n\nDefault setting for set_shutdown is disable=True, meaning\n'no shutdown'. Setting both default and disable to False will\neffectively enable shutdown.\n\nArgs:\ndefault (bool): Configures the shutdown using the\ndefault keyword\ndisable (bool): Negates shutdown using the no keyword\n\nReturns:\nbool: Returns True if the commands complete successfully", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n        \n        self.SayHello = channel.unary_unary(\n            '/helloworld.Greeter/SayHello',\n            request_serializer=hello__world__pb2.HelloRequest.\n            SerializeToString,\n            response_deserializer=hello__world__pb2.HelloReply.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "async def items(self):\n    response = (await self._api.get('/v1/acl/list'))\n    results = [decode_token(r) for r in response.body]\n    return consul(results, meta=extract_meta(response.headers))", "docstring": "Lists all the active tokens\n\nReturns:\nObjectMeta: where value is a list of tokens\n\nIt returns a body like this::\n\n[\n{\n\"CreateIndex\": 3,\n\"ModifyIndex\": 3,\n\"ID\": \"8f246b77-f3e1-ff88-5b48-8ec93abf3e05\",\n\"Name\": \"Client Token\",\n\"Type\": \"client\",\n\"Rules\": {\n\"key\": {\n\"\": { \"policy\": \"read\" },\n\"private/\": { \"policy\": \"deny\" }\n}\n}\n}\n]", "source": "codesearchnet"}
{"code": "def __init__(self, record_bytes, header_bytes=None, footer_bytes=None, hop_bytes=None, name=None, encoding=None):\n    rr = gen_io_ops.fixed_length_record_reader_v2(record_bytes=record_bytes, header_bytes=header_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, encoding=encoding, name=name)\n    super(FixedLengthRecordReader, self).__init__(rr)", "docstring": "Create a FixedLengthRecordReader.\n\nArgs:\nrecord_bytes: An int.\nheader_bytes: An optional int. Defaults to 0.\nfooter_bytes: An optional int. Defaults to 0.\nhop_bytes: An optional int. Defaults to 0.\nname: A name for the operation (optional).\nencoding: The type of encoding for the file. Defaults to none.", "source": "github-repos"}
{"code": "def baredoc(obj):\n    doc = getdoc(obj)\n    if (not doc):\n        return ''\n    doc = doc.splitlines()[0]\n    return doc.rstrip(' .').lstrip()", "docstring": "Return the first line of the docstring of an object.\n\nTrailing periods and spaces as well as leading spaces are removed from the\noutput.\n\nArgs:\nobj: any Python object.\nReturns:\nstr: the first line of the docstring of obj.", "source": "codesearchnet"}
{"code": "def validate_config_must_have(config, required_keys):\n  \n  missing_keys = set(required_keys) - set(config)\n  if len(missing_keys) > 0:\n    raise Exception('Invalid config with missing keys \"%s\"' % ', '.join(missing_keys))", "docstring": "Validate a config dictionary to make sure it has all of the specified keys\n\nArgs:\nconfig: the config to validate.\nrequired_keys: the list of possible keys that config must include.\n\nRaises:\nException if the config does not have any of them.", "source": "juraj-google-style"}
{"code": "def complement(self):\n    if self.complementary:\n        try:\n            return self._instances[self.complementary]\n        except KeyError:\n            raise ValueError('{} has a complementary but it was not defined !')\n    else:\n        return None", "docstring": "Return the complementary relationship of self.\n\nRaises:\nValueError: if the relationship has a complementary\nwhich was not defined.\n\nReturns:\ncomplementary (Relationship): the complementary relationship.\n\nExample:\n\n>>> from pronto.relationship import Relationship\n>>> print(Relationship('has_part').complement())\nRelationship('part_of')\n>>> print(Relationship('has_units').complement())\nNone", "source": "codesearchnet"}
{"code": "def sort_by_modified(files_or_folders: list) -> list:\n    return sorted(files_or_folders, key=os.path.getmtime, reverse=True)", "docstring": "Sort files or folders by modified time\n\nArgs:\nfiles_or_folders: list of files or folders\n\nReturns:\nlist", "source": "codesearchnet"}
{"code": "def movie_lists(self, **kwargs):\n    path = self._get_path('movie_lists')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Gets the movie lists available from the API.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def layer_prepostprocess(previous_value, x, sequence, dropout_rate, norm_type, depth, epsilon, default_name, name=None, dropout_broadcast_dims=None, layer_collection=None):\n    with tf.variable_scope(name, default_name=default_name):\n        if (sequence == 'none'):\n            return x\n        for c in sequence:\n            if (c == 'a'):\n                x += previous_value\n            elif (c == 'z'):\n                x = zero_add(previous_value, x)\n            elif (c == 'n'):\n                x = apply_norm(x, norm_type, depth, epsilon, layer_collection=layer_collection)\n            else:\n                assert (c == 'd'), ('Unknown sequence step %s' % c)\n                x = dropout_with_broadcast_dims(x, (1.0 - dropout_rate), broadcast_dims=dropout_broadcast_dims)\n        return x", "docstring": "Apply a sequence of functions to the input or output of a layer.\n\nThe sequence is specified as a string which may contain the following\ncharacters:\na: add previous_value\nn: apply normalization\nd: apply dropout\nz: zero add\n\nFor example, if sequence==\"dna\", then the output is\nprevious_value + normalize(dropout(x))\n\nArgs:\nprevious_value: A Tensor, to be added as a residual connection ('a')\nx: A Tensor to be transformed.\nsequence: a string.\ndropout_rate: a float\nnorm_type: a string (see apply_norm())\ndepth: an integer (size of last dimension of x).\nepsilon: a float (parameter for normalization)\ndefault_name: a string\nname: a string\ndropout_broadcast_dims:  an optional list of integers less than 3\nspecifying in which dimensions to broadcast the dropout decisions.\nsaves memory.\nlayer_collection: A tensorflow_kfac.LayerCollection. Only used by the\nKFAC optimizer. Default is None.\n\nReturns:\na Tensor", "source": "codesearchnet"}
{"code": "def main(argv=None):\n    args = None\n    cmd = None\n    try:\n        args = parse_args(argv)\n        if args.quiet:\n            logger.setLevel(logging.CRITICAL)\n        elif args.verbose:\n            logger.setLevel(logging.DEBUG)\n        cmd = args.func(args)\n        ret = cmd.run_cmd()\n    except KeyboardInterrupt:\n        logger.exception('interrupted by the user')\n        ret = 252\n    except NotDvcRepoError:\n        logger.exception('')\n        ret = 253\n    except DvcParserError:\n        ret = 254\n    except Exception:\n        logger.exception('unexpected error')\n        ret = 255\n    Analytics().send_cmd(cmd, args, ret)\n    return ret", "docstring": "Run dvc CLI command.\n\nArgs:\nargv: optional list of arguments to parse. sys.argv is used by default.\n\nReturns:\nint: command's return code.", "source": "codesearchnet"}
{"code": "def Create(self, request, global_params=None):\n    config = self.GetMethodConfig('Create')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Creates a new `BuildTrigger`. This API is experimental.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsTriggersCreateRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(BuildTrigger) The response message.", "source": "github-repos"}
{"code": "def initialize(self, emt_id, emt_pass):\n        \n        self._emt_id = emt_id\n        self._emt_pass = emt_pass\n\n        \n        self.bus = BusApi(self)\n        self.geo = GeoApi(self)\n        self.parking = ParkingApi(self)", "docstring": "Manual initialization of the interface attributes.\n\nThis is useful when the interface must be declare but initialized later\non with parsed configuration values.\n\nArgs:\nemt_id (str): ID given by the server upon registration\nemt_pass (str): Token given by the server upon registration", "source": "juraj-google-style"}
{"code": "def merge(self, options):\n    return options_lib.merge_options(self, options)", "docstring": "Merges itself with the given `tf.data.Options`.\n\nIf this object and the `options` to merge set an option differently, a\nwarning is generated and this object's value is updated with the `options`\nobject's value.\n\nArgs:\noptions: The `tf.data.Options` to merge with.\n\nReturns:\nNew `tf.data.Options` object which is the result of merging self with\nthe input `tf.data.Options`.", "source": "github-repos"}
{"code": "def list(self, cat, ctr=None, nb_results=None, offset=None):\n    path = (LIST_URL + '?c=3&cat={}'.format(requests.utils.quote(cat)))\n    if (ctr is not None):\n        path += '&ctr={}'.format(requests.utils.quote(ctr))\n    if (nb_results is not None):\n        path += '&n={}'.format(requests.utils.quote(str(nb_results)))\n    if (offset is not None):\n        path += '&o={}'.format(requests.utils.quote(str(offset)))\n    data = self.executeRequestApi2(path)\n    clusters = []\n    docs = []\n    if (ctr is None):\n        for pf in data.preFetch:\n            for cluster in pf.response.payload.listResponse.doc:\n                clusters.extend(cluster.child)\n        return [c.docid for c in clusters]\n    else:\n        apps = []\n        for d in data.payload.listResponse.doc:\n            for c in d.child:\n                for a in c.child:\n                    apps.append(utils.parseProtobufObj(a))\n        return apps", "docstring": "List all possible subcategories for a specific category. If\nalso a subcategory is provided, list apps from this category.\n\nArgs:\ncat (str): category id\nctr (str): subcategory id\nnb_results (int): if a subcategory is specified, limit number\nof results to this number\noffset (int): if a subcategory is specified, start counting from this\nresult\nReturns:\nA list of categories. If subcategory is specified, a list of apps in this\ncategory.", "source": "codesearchnet"}
{"code": "def empty(cls, labels=None):\n        \n        warnings.warn(\"Table.empty(labels) is deprecated. Use Table(labels)\", FutureWarning)\n        if labels is None:\n            return cls()\n        values = [[] for label in labels]\n        return cls(values, labels)", "docstring": "Creates an empty table. Column labels are optional. [Deprecated]\n\nArgs:\n``labels`` (None or list): If ``None``, a table with 0\ncolumns is created.\nIf a list, each element is a column label in a table with\n0 rows.\n\nReturns:\nA new instance of ``Table``.", "source": "juraj-google-style"}
{"code": "def Serialize(self, writer):\n        \n        super(Header, self).Serialize(writer)\n        writer.WriteByte(0)", "docstring": "Serialize full object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def _render_batch(self,\n            non_fluents: NonFluents,\n            states: Fluents, actions: Fluents, interms: Fluents,\n            rewards: np.array,\n            horizon: Optional[int] = None) -> None:\n        \n        if horizon is None:\n            horizon = len(states[0][1])\n            self._render_round_init(horizon, non_fluents)\n            for t in range(horizon):\n                s = [(s[0], s[1][t]) for s in states]\n                f = [(f[0], f[1][t]) for f in interms]\n                a = [(a[0], a[1][t]) for a in actions]\n                r = rewards[t]\n                self._render_timestep(t, s, a, f, r)\n            self._render_round_end(rewards)", "docstring": "Prints `non_fluents`, `states`, `actions`, `interms` and `rewards`\nfor given `horizon`.\n\nArgs:\nstates (Sequence[Tuple[str, np.array]]): A state trajectory.\nactions (Sequence[Tuple[str, np.array]]): An action trajectory.\ninterms (Sequence[Tuple[str, np.array]]): An interm state trajectory.\nrewards (np.array): Sequence of rewards (1-dimensional array).\nhorizon (Optional[int]): Number of timesteps.", "source": "juraj-google-style"}
{"code": "def __init__(self, num_steps=None, last_step=None):\n    if num_steps is None and last_step is None:\n        raise ValueError('One of num_steps or last_step must be specified.')\n    if num_steps is not None and last_step is not None:\n        raise ValueError('Only one of num_steps or last_step can be specified.')\n    self._num_steps = num_steps\n    self._last_step = last_step", "docstring": "Initializes a `StopAtStepHook`.\n\nThis hook requests stop after either a number of steps have been\nexecuted or a last step has been reached. Only one of the two options can be\nspecified.\n\nif `num_steps` is specified, it indicates the number of steps to execute\nafter `begin()` is called. If instead `last_step` is specified, it\nindicates the last step we want to execute, as passed to the `after_run()`\ncall.\n\nArgs:\nnum_steps: Number of steps to execute.\nlast_step: Step after which to stop.\n\nRaises:\nValueError: If one of the arguments is invalid.", "source": "github-repos"}
{"code": "def chdir(directory):\n    directory = os.path.abspath(directory)\n    logger.info(('chdir -> %s' % directory))\n    try:\n        if (not os.path.isdir(directory)):\n            logger.error('chdir -> %s failed! Directory does not exist!', directory)\n            return False\n        os.chdir(directory)\n        return True\n    except Exception as e:\n        logger.error(('chdir -> %s failed! %s' % (directory, e)))\n        return False", "docstring": "Change the current working directory.\n\nArgs:\ndirectory (str): Directory to go to.", "source": "codesearchnet"}
{"code": "def __init__(self, checkpoint, proto_id):\n    self._checkpoint = checkpoint\n    self._proto_id = proto_id\n    self.skip_restore = False\n    self.callback = checkpoint_adapter.ReshardCallback()", "docstring": "Specify an object within a checkpoint.\n\nArgs:\ncheckpoint: A _CheckpointRestoreCoordinator object.\nproto_id: The index of this object in TrackableObjectGraph.nodes.", "source": "github-repos"}
{"code": "def NotEqualTo(self, value):\n    self._awql = self._CreateSingleValueCondition(value, '!=')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"not equal to\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "codesearchnet"}
{"code": "def _parse_path(self):\n    if (self.engine == ENGINE_DROPBOX):\n        path = get_dropbox_folder_location()\n    elif (self.engine == ENGINE_GDRIVE):\n        path = get_google_drive_folder_location()\n    elif (self.engine == ENGINE_COPY):\n        path = get_copy_folder_location()\n    elif (self.engine == ENGINE_ICLOUD):\n        path = get_icloud_folder_location()\n    elif (self.engine == ENGINE_BOX):\n        path = get_box_folder_location()\n    elif (self.engine == ENGINE_FS):\n        if self._parser.has_option('storage', 'path'):\n            cfg_path = self._parser.get('storage', 'path')\n            path = os.path.join(os.environ['HOME'], cfg_path)\n        else:\n            raise ConfigError(\"The required 'path' can't be found while the 'file_system' engine is used.\")\n    return str(path)", "docstring": "Parse the storage path in the config.\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def read_string(self, registeraddress, numberOfRegisters=16, functioncode=3):\n    _checkFunctioncode(functioncode, [3, 4])\n    _checkInt(numberOfRegisters, minvalue=1, description='number of registers for read string')\n    return self._genericCommand(functioncode, registeraddress, numberOfRegisters=numberOfRegisters, payloadformat='string')", "docstring": "Read a string from the slave.\n\nEach 16-bit register in the slave are interpreted as two characters (1 byte = 8 bits).\nFor example 16 consecutive registers can hold 32 characters (32 bytes).\n\nArgs:\n* registeraddress (int): The slave register start address (use decimal numbers, not hex).\n* numberOfRegisters (int): The number of registers allocated for the string.\n* functioncode (int): Modbus function code. Can be 3 or 4.\n\nReturns:\nThe string (str).\n\nRaises:\nValueError, TypeError, IOError", "source": "codesearchnet"}
{"code": "def weighted_moments(x, axes, frequency_weights, name=None, keep_dims=None, keepdims=None):\n    keep_dims = deprecated_argument_lookup('keepdims', keepdims, 'keep_dims', keep_dims)\n    if keep_dims is None:\n        keep_dims = False\n    with ops.name_scope(name, 'weighted_moments', [x, frequency_weights, axes]):\n        x = ops.convert_to_tensor(x, name='x')\n        frequency_weights = ops.convert_to_tensor(frequency_weights, name='frequency_weights')\n        needs_cast = x.dtype == dtypes.float16\n        if needs_cast:\n            x = math_ops.cast(x, dtypes.float32)\n        if frequency_weights.dtype != x.dtype:\n            frequency_weights = math_ops.cast(frequency_weights, x.dtype)\n        weighted_input_sum = math_ops.reduce_sum(frequency_weights * x, axes, name='weighted_input_sum', keepdims=True)\n        broadcasted_weights = frequency_weights + array_ops.zeros_like(x)\n        sum_of_weights = math_ops.reduce_sum(broadcasted_weights, axes, name='sum_of_weights', keepdims=True)\n        weighted_mean = math_ops.div_no_nan(weighted_input_sum, sum_of_weights)\n        weighted_distsq = math_ops.reduce_sum(frequency_weights * math_ops.squared_difference(x, weighted_mean), axes, name='weighted_distsq', keepdims=True)\n        weighted_variance = math_ops.div_no_nan(weighted_distsq, sum_of_weights)\n        if not keep_dims:\n            weighted_mean = array_ops.squeeze(weighted_mean, axis=axes)\n            weighted_variance = array_ops.squeeze(weighted_variance, axis=axes)\n        if needs_cast:\n            weighted_mean = math_ops.cast(weighted_mean, dtypes.float16)\n            weighted_variance = math_ops.cast(weighted_variance, dtypes.float16)\n        return (weighted_mean, weighted_variance)", "docstring": "Returns the frequency-weighted mean and variance of `x`.\n\nArgs:\nx: A tensor.\naxes: 1-d tensor of int32 values; these are the axes along which\nto compute mean and variance.\nfrequency_weights: A tensor of positive weights which can be\nbroadcast with x.\nname: Name used to scope the operation.\nkeep_dims: Produce moments with the same dimensionality as the input.\nkeepdims: Alias of keep_dims.\n\nReturns:\nTwo tensors: `weighted_mean` and `weighted_variance`.", "source": "github-repos"}
{"code": "def _add_tests(self, testcases):\n        \n        def _add_test(test_runner, test_dict):\n            \n            def test(self):\n                try:\n                    test_runner.run_test(test_dict)\n                except exceptions.MyBaseFailure as ex:\n                    self.fail(str(ex))\n                finally:\n                    self.meta_datas = test_runner.meta_datas\n\n            if \"config\" in test_dict:\n                \n                test.__doc__ = test_dict[\"config\"].get(\"name\")\n                variables = test_dict[\"config\"].get(\"variables\", {})\n            else:\n                \n                test.__doc__ = test_dict.get(\"name\")\n                variables = test_dict.get(\"variables\", {})\n\n            if isinstance(test.__doc__, parser.LazyString):\n                parsed_variables = parser.parse_variables_mapping(variables, ignore=True)\n                test.__doc__ = parser.parse_lazy_data(\n                    test.__doc__, parsed_variables)\n\n            return test\n\n        test_suite = unittest.TestSuite()\n        for testcase in testcases:\n            config = testcase.get(\"config\", {})\n            test_runner = runner.Runner(config)\n            TestSequense = type('TestSequense', (unittest.TestCase,), {})\n\n            tests = testcase.get(\"teststeps\", [])\n            for index, test_dict in enumerate(tests):\n                for times_index in range(int(test_dict.get(\"times\", 1))):\n                    \n                    \n                    test_method_name = 'test_{:04}_{:03}'.format(index, times_index)\n                    test_method = _add_test(test_runner, test_dict)\n                    setattr(TestSequense, test_method_name, test_method)\n\n            loaded_testcase = self.test_loader.loadTestsFromTestCase(TestSequense)\n            setattr(loaded_testcase, \"config\", config)\n            setattr(loaded_testcase, \"teststeps\", tests)\n            setattr(loaded_testcase, \"runner\", test_runner)\n            test_suite.addTest(loaded_testcase)\n\n        return test_suite", "docstring": "initialize testcase with Runner() and add to test suite.\n\nArgs:\ntestcases (list): testcases list.\n\nReturns:\nunittest.TestSuite()", "source": "juraj-google-style"}
{"code": "def index_of(self, value_str):\n    if value_str is None:\n        value_str = ''\n    if value_str in self._string_to_index:\n        return self._string_to_index[value_str]\n    index = len(self._string_table)\n    self._string_table.append(value_str)\n    self._string_to_index[value_str] = index\n    return index", "docstring": "Get index of value_str in the string table.\n\nIf value_str is not in the string table, we will add it at the end\nand then return the new index.\nArgs:\nvalue_str: (string) Value to lookup/add in/to the string table.\n\nReturns:\nIndex of value_str in the string table.", "source": "github-repos"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_buffer = utils.BytearrayStream()\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        if (self._common_template_attribute is not None):\n            self._common_template_attribute.write(local_buffer, kmip_version=kmip_version)\n    elif (self._common_template_attribute is not None):\n        attributes = objects.convert_template_attribute_to_attributes(self._common_template_attribute)\n        attributes.write(local_buffer, kmip_version=kmip_version)\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        if (self._private_key_template_attribute is not None):\n            self._private_key_template_attribute.write(local_buffer, kmip_version=kmip_version)\n    elif (self._private_key_template_attribute is not None):\n        attributes = objects.convert_template_attribute_to_attributes(self._private_key_template_attribute)\n        attributes.write(local_buffer, kmip_version=kmip_version)\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        if (self._public_key_template_attribute is not None):\n            self._public_key_template_attribute.write(local_buffer, kmip_version=kmip_version)\n    elif (self._public_key_template_attribute is not None):\n        attributes = objects.convert_template_attribute_to_attributes(self._public_key_template_attribute)\n        attributes.write(local_buffer, kmip_version=kmip_version)\n    self.length = local_buffer.length()\n    super(CreateKeyPairRequestPayload, self).write(output_buffer, kmip_version=kmip_version)\n    output_buffer.write(local_buffer.buffer)", "docstring": "Write the data encoding the CreateKeyPair request payload to a buffer.\n\nArgs:\noutput_buffer (stream): A data buffer in which to encode object\ndata, supporting a write method.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def get_tag_html(tag_id):\n    \n    tag_data = get_lazy_tag_data(tag_id)\n    tag = tag_data['tag']\n    args = tag_data['args']\n    kwargs = tag_data['kwargs']\n    lib, tag_name = get_lib_and_tag_name(tag)\n\n    args_str = ''\n    if args:\n        for arg in args:\n            if isinstance(arg, six.string_types):\n                args_str += \"'{0}' \".format(arg)\n            else:\n                args_str += \"{0} \".format(arg)\n\n    kwargs_str = ''\n    if kwargs:\n        for name, value in kwargs.items():\n            if isinstance(value, six.string_types):\n                kwargs_str += \"{0}='{1}' \".format(name, value)\n            else:\n                kwargs_str += \"{0}={1} \".format(name, value)\n\n    html = '{{% load {lib} %}}{{% {tag_name} {args}{kwargs}%}}'.format(\n        lib=lib, tag_name=tag_name, args=args_str, kwargs=kwargs_str)\n\n    return html", "docstring": "Returns the Django HTML to load the tag library and render the tag.\n\nArgs:\ntag_id (str): The tag id for the to return the HTML for.", "source": "juraj-google-style"}
{"code": "def encrypt(self, plainText):\n    encryptedResult = ''\n    for index in range(0, len(plainText), BLOCK_SIZE):\n        block = plainText[index:(index + BLOCK_SIZE)]\n        if (len(block) < BLOCK_SIZE):\n            block = zero_pad(block, BLOCK_SIZE)\n        encryptedResult += self.encrypt_block(block)\n    return encryptedResult", "docstring": "Encrypt an arbitrary-length block of data.\n\nNOTE: This function formerly worked only on 16-byte blocks of `plainText`.\ncode that assumed this should still work fine, but can optionally be\nmodified to call `encrypt_block` instead.\n\nArgs:\nplainText (str): data to encrypt. If the data is not a multiple of 16\nbytes long, it will be padded with null (0x00) bytes until it is.\n\nReturns:\nencrypted data. Note that this will always be a multiple of 16 bytes\nlong.", "source": "codesearchnet"}
{"code": "def setup(self, dna_spec: geno.DNASpec) -> None:\n    self._dna_spec = dna_spec", "docstring": "Setup states of an early stopping policy based on dna_spec.\n\nArgs:\ndna_spec: DNASpec for DNA to propose.\n\nRaises:\nRuntimeError: if dna_spec is not supported.", "source": "github-repos"}
{"code": "def match(self, url):\n        \n        try:\n            urlSchemes = self._urlSchemes.itervalues() \n        except AttributeError:\n            urlSchemes = self._urlSchemes.values() \n\n        for urlScheme in urlSchemes:\n            if urlScheme.match(url):\n                return True\n        return False", "docstring": "Try to find if url matches against any of the schemes within this\nendpoint.\n\nArgs:\nurl: The url to match against each scheme\n\nReturns:\nTrue if a matching scheme was found for the url, False otherwise", "source": "juraj-google-style"}
{"code": "def first_paragraph_indent(indent_texts):\n    opening_indent = determine_opening_indent(indent_texts)\n    result = []\n    input = iter(indent_texts)\n    for (indent, text) in input:\n        if (indent == 0):\n            result.append((opening_indent, text))\n        else:\n            result.append((indent, text))\n            break\n    for (indent, text) in input:\n        result.append((indent, text))\n    return result", "docstring": "Fix the indentation on the first paragraph.\n\nThis occurs because the first line of a multi-line docstring following the\nopening quote usually has no indent.\n\nArgs:\nindent_texts: The lines of the docstring as an iterable over 2-tuples\neach containing an integer indent level as the first element and\nthe text as the second element.\n\nReturn:\nA list of 2-tuples, each containing an integer indent level as the\nfirst element and the text as the second element.", "source": "codesearchnet"}
{"code": "def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]:\n    residual = hidden_states\n    hidden_states = self.layer_norm1(inputs=hidden_states)\n    attention_outputs = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, training=training)\n    hidden_states = attention_outputs[0]\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.layer_norm2(inputs=hidden_states)\n    hidden_states = self.mlp(hidden_states=hidden_states)\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states,) + attention_outputs[1:]\n    return outputs", "docstring": "Args:\nhidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`tf.Tensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\ncausal_attention_mask (`tf.Tensor`): causal attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\noutput_attentions (`bool`):\nWhether or not to return the attentions tensors of all attention layers. See `outputs` under returned\ntensors for more detail.", "source": "github-repos"}
{"code": "def __init__(self, input_energy: energy.BitstringEnergy, initial_seed: Union[None, tf.Tensor]=None, name: Union[None, str]=None):\n    super().__init__(name=name)\n    self._energy = input_energy\n    self._energy.build([None, self._energy.num_bits])\n    self._tracked_variables = input_energy.variables\n    if len(self._tracked_variables) == 0:\n        self._checkpoint = False\n    else:\n        self._tracked_variables_checkpoint = [tf.Variable(v.read_value(), trainable=False) for v in self._tracked_variables]\n        self._checkpoint = True\n    if initial_seed is None:\n        self._update_seed = tf.Variable(True, trainable=False)\n    else:\n        self._update_seed = tf.Variable(False, trainable=False)\n    self._seed = tf.Variable(tfp.random.sanitize_seed(initial_seed), trainable=False)\n    self._first_inference = tf.Variable(True, trainable=False)", "docstring": "Initializes an EnergyInferenceBase.\n\nArgs:\ninput_energy: The parameterized energy function which defines this\ndistribution via the equations of an energy based model.  This class\nassumes that all parameters of `energy` are `tf.Variable`s and that\nthey are all returned by `energy.variables`.\ninitial_seed: PRNG seed; see tfp.random.sanitize_seed for details. This\nseed will be used in the `sample` method.  If None, the seed is updated\nafter every inference call.  Otherwise, the seed is fixed.\nname: Optional name for the model.", "source": "github-repos"}
{"code": "async def set_typing(self, typing=hangouts_pb2.TYPING_TYPE_STARTED):\n    try:\n        (await self._client.set_typing(hangouts_pb2.SetTypingRequest(request_header=self._client.get_request_header(), conversation_id=hangouts_pb2.ConversationId(id=self.id_), type=typing)))\n    except exceptions.NetworkError as e:\n        logger.warning('Failed to set typing status: {}'.format(e))\n        raise", "docstring": "Set your typing status in this conversation.\n\nArgs:\ntyping: (optional) ``TYPING_TYPE_STARTED``, ``TYPING_TYPE_PAUSED``,\nor ``TYPING_TYPE_STOPPED`` to start, pause, or stop typing,\nrespectively. Defaults to ``TYPING_TYPE_STARTED``.\n\nRaises:\n.NetworkError: If typing status cannot be set.", "source": "codesearchnet"}
{"code": "def show_qouts(self, nids=None, stream=sys.stdout):\n        \n        lines = []\n\n        for task in self.iflat_tasks(status=self.S_QCRITICAL, nids=nids):\n            header = \"=== \" + task.qout_file.path + \"===\"\n            lines.append(header)\n            if task.qout_file.exists:\n                with open(task.qout_file.path, \"rt\") as fh:\n                    lines += fh.readlines()\n            else:\n                lines.append(\"File does not exist!\")\n\n            lines.append(\"=\" * len(header) + 2*\"\\n\")\n\n        return stream.writelines(lines)", "docstring": "Write to the given stream the content of the queue output file for all tasks whose status is S_QCRITICAL.\n\nArgs:\nnids: optional list of node identifiers used to filter the tasks.\nstream: File-like object. Default: sys.stdout", "source": "juraj-google-style"}
{"code": "def add_backdoor(self, backdoor_name, source, reference, method='', aliases=[], version='', campaign='', confidence='', description='', bucket_list=[]):\n    data = {'api_key': self.api_key, 'username': self.username, 'source': source, 'reference': reference, 'method': method, 'name': backdoor_name, 'aliases': ','.join(aliases), 'version': version, 'campaign': campaign, 'confidence': confidence, 'bucket_list': bucket_list, 'description': description}\n    r = requests.post('{0}/backdoors/'.format(self.url), data=data, verify=self.verify, proxies=self.proxies)\n    if (r.status_code == 200):\n        result_data = json.loads(r.text)\n        return result_data\n    else:\n        log.error('Error with status code {0} and message {1}'.format(r.status_code, r.text))\n    return None", "docstring": "Add a backdoor object to CRITs.\n\nArgs:\nbackdoor_name: The primary name of the backdoor\nsource: Source of the information\nreference: A reference where more information can be found\nmethod: The method for obtaining the backdoor information.\naliases: List of aliases for the backdoor.\nversion: Version\ncampaign: An associated campaign\nconfidence: The campaign confidence\ndescription: A description of the email\nbucket_list: A list of bucket list items to add", "source": "codesearchnet"}
{"code": "class Wav2Vec2CTCTokenizerOutput(ModelOutput):\n    text: Union[List[str], str]\n    char_offsets: Union[List[ListOfDict], ListOfDict] = None\n    word_offsets: Union[List[ListOfDict], ListOfDict] = None", "docstring": "Output type of [` Wav2Vec2CTCTokenizer`], with transcription.\n\nArgs:\ntext (list of `str` or `str`):\nDecoded logits in text from. Usually the speech transcription.\nchar_offsets (list of `List[Dict[str, Union[int, str]]]` or `List[Dict[str, Union[int, str]]]`):\nOffsets of the decoded characters. In combination with sampling rate and model downsampling rate char\noffsets can be used to compute time stamps for each character. Total logit score of the beam associated with\nproduced text.\nword_offsets (list of `List[Dict[str, Union[int, str]]]` or `List[Dict[str, Union[int, str]]]`):\nOffsets of the decoded words. In combination with sampling rate and model downsampling rate word offsets\ncan be used to compute time stamps for each word.", "source": "github-repos"}
{"code": "def _is_trivial_angle(rad: float, atol: float) -> bool:\n    \n    return abs(rad) < atol or abs(abs(rad) - np.pi / 4) < atol", "docstring": "Tests if a circuit for an operator exp(i*rad*XX) (or YY, or ZZ) can\nbe performed with a whole CZ.\n\nArgs:\nrad: The angle in radians, assumed to be in the range [-pi/4, pi/4]", "source": "juraj-google-style"}
{"code": "def airborne_position_with_ref(msg, lat_ref, lon_ref):\n    \n\n\n    mb = common.hex2bin(msg)[32:]\n\n    cprlat = common.bin2int(mb[22:39]) / 131072.0\n    cprlon = common.bin2int(mb[39:56]) / 131072.0\n\n    i = int(mb[21])\n    d_lat = 360.0/59 if i else 360.0/60\n\n    j = common.floor(lat_ref / d_lat) \\\n        + common.floor(0.5 + ((lat_ref % d_lat) / d_lat) - cprlat)\n\n    lat = d_lat * (j + cprlat)\n\n    ni = common.cprNL(lat) - i\n\n    if ni > 0:\n        d_lon = 360.0 / ni\n    else:\n        d_lon = 360.0\n\n    m = common.floor(lon_ref / d_lon) \\\n        + common.floor(0.5 + ((lon_ref % d_lon) / d_lon) - cprlon)\n\n    lon = d_lon * (m + cprlon)\n\n    return round(lat, 5), round(lon, 5)", "docstring": "Decode airborne position with only one message,\nknowing reference nearby location, such as previously calculated location,\nground station, or airport location, etc. The reference position shall\nbe with in 180NM of the true position.\n\nArgs:\nmsg (string): even message (28 bytes hexadecimal string)\nlat_ref: previous known latitude\nlon_ref: previous known longitude\n\nReturns:\n(float, float): (latitude, longitude) of the aircraft", "source": "juraj-google-style"}
{"code": "def schedule(time: Union[(datetime.time, datetime.datetime)], callback: Callable, *args):\n    dt = _fillDate(time)\n    now = datetime.datetime.now(dt.tzinfo)\n    delay = (dt - now).total_seconds()\n    loop = asyncio.get_event_loop()\n    loop.call_later(delay, callback, *args)", "docstring": "Schedule the callback to be run at the given time with\nthe given arguments.\n\nArgs:\ntime: Time to run callback. If given as :py:class:`datetime.time`\nthen use today as date.\ncallback: Callable scheduled to run.\nargs: Arguments for to call callback with.", "source": "codesearchnet"}
{"code": "def absolute_distance(cls, q0, q1):\n    q0_minus_q1 = (q0 - q1)\n    q0_plus_q1 = (q0 + q1)\n    d_minus = q0_minus_q1.norm\n    d_plus = q0_plus_q1.norm\n    if (d_minus < d_plus):\n        return d_minus\n    else:\n        return d_plus", "docstring": "Quaternion absolute distance.\n\nFind the distance between two quaternions accounting for the sign ambiguity.\n\nParams:\nq0: the first quaternion\nq1: the second quaternion\n\nReturns:\nA positive scalar corresponding to the chord of the shortest path/arc that\nconnects q0 to q1.\n\nNote:\nThis function does not measure the distance on the hypersphere, but\nit takes into account the fact that q and -q encode the same rotation.\nIt is thus a good indicator for rotation similarities.", "source": "codesearchnet"}
{"code": "def create_string(self, key, value):\n    data = None\n    if ((key is not None) and (value is not None)):\n        if isinstance(value, (bool, list, int, dict)):\n            value = u'{}'.format(value)\n        data = self.db.create(key.strip(), u'{}'.format(json.dumps(value)))\n    else:\n        self.tcex.log.warning(u'The key or value field was None.')\n    return data", "docstring": "Create method of CRUD operation for string data.\n\nArgs:\nkey (string): The variable to write to the DB.\nvalue (any): The data to write to the DB.\n\nReturns:\n(string): Result of DB write.", "source": "codesearchnet"}
{"code": "def get(self, report_id):\n    return Report(self._app, self._swimlane.request('get', 'reports/{0}'.format(report_id)).json())", "docstring": "Retrieve report by ID\n\nArgs:\nreport_id (str): Full report ID\n\nReturns:\nReport: Corresponding Report instance", "source": "codesearchnet"}
{"code": "def reset_logger(name, level=None, handler=None):\n    if (level is None):\n        level = logging.INFO\n    logger = logging.getLogger(name)\n    logger.setLevel(level)\n    handler = (handler or logging.StreamHandler())\n    handler.setFormatter(logging.Formatter(_DEFAULT_LOG_FORMAT))\n    logger.handlers = [handler]\n    return logger", "docstring": "Make a standard python logger object with default formatter, handler, etc.\n\nDefaults are:\n- level == logging.INFO\n- handler == logging.StreamHandler()\n\nArgs:\nname: a logger name.\nlevel: an optional initial log level for this logger.\nhandler: an optional initial handler for this logger.\n\nReturns: a standard python logger with a single handler.", "source": "codesearchnet"}
{"code": "def add_lambda_permissions(function='', statement_id='', action='lambda:InvokeFunction', principal='', source_arn='', env='', region='us-east-1'):\n    session = boto3.Session(profile_name=env, region_name=region)\n    lambda_client = session.client('lambda')\n    response_action = None\n    prefixed_sid = (FOREMAST_PREFIX + statement_id)\n    add_permissions_kwargs = {'FunctionName': function, 'StatementId': prefixed_sid, 'Action': action, 'Principal': principal}\n    if source_arn:\n        add_permissions_kwargs['SourceArn'] = source_arn\n    try:\n        lambda_client.add_permission(**add_permissions_kwargs)\n        response_action = 'Add permission with Sid: {}'.format(prefixed_sid)\n    except boto3.exceptions.botocore.exceptions.ClientError as error:\n        LOG.debug('Add permission error: %s', error)\n        response_action = 'Did not add permissions'\n    LOG.debug('Related StatementId (SID): %s', prefixed_sid)\n    LOG.info(response_action)", "docstring": "Add permission to Lambda for the event trigger.\n\nArgs:\nfunction (str): Lambda function name\nstatement_id (str): IAM policy statement (principal) id\naction (str): Lambda action to allow\nprincipal (str): AWS principal to add permissions\nsource_arn (str): ARN of the source of the event. Only needed for S3\nenv (str): Environment/account of function\nregion (str): AWS region of function", "source": "codesearchnet"}
{"code": "def Serialize(self, writer):\n        \n        try:\n            writer.WriteByte(self.Type)\n            writer.WriteHashes(self.Hashes)\n        except Exception as e:\n            logger.error(f\"COULD NOT WRITE INVENTORY HASHES ({self.Type} {self.Hashes}) {e}\")", "docstring": "Serialize object.\n\nRaises:\nException: if hash writing fails.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def ParseAccountInformation(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    display_name = self._GetRowValue(query_hash, row, 'given_displayname')\n    fullname = self._GetRowValue(query_hash, row, 'fullname')\n    username = '{0!s} <{1!s}>'.format(fullname, display_name)\n    event_data = SkypeAccountEventData()\n    event_data.country = self._GetRowValue(query_hash, row, 'country')\n    event_data.display_name = display_name\n    event_data.email = self._GetRowValue(query_hash, row, 'emails')\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.username = username\n    timestamp = self._GetRowValue(query_hash, row, 'profile_timestamp')\n    if timestamp:\n        date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, 'Profile Changed')\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n    timestamp = self._GetRowValue(query_hash, row, 'authreq_timestamp')\n    if timestamp:\n        date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, 'Authenticate Request')\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n    timestamp = self._GetRowValue(query_hash, row, 'lastonline_timestamp')\n    if timestamp:\n        date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, 'Last Online')\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n    timestamp = self._GetRowValue(query_hash, row, 'mood_timestamp')\n    if timestamp:\n        date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, 'Mood Event')\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n    timestamp = self._GetRowValue(query_hash, row, 'sent_authrequest_time')\n    if timestamp:\n        date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, 'Auth Request Sent')\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n    timestamp = self._GetRowValue(query_hash, row, 'lastused_timestamp')\n    if timestamp:\n        date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, 'Last Used')\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses account information.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row with account information.", "source": "codesearchnet"}
{"code": "def index_impute2(fn):\n    logger.info('Indexing {} (IMPUTE2)'.format(fn))\n    impute2_index(fn, cols=[0, 1, 2], names=['chrom', 'name', 'pos'], sep=' ')\n    logger.info('Index generated')", "docstring": "Indexes an IMPUTE2 file.\n\nArgs:\nfn (str): The name of the IMPUTE2 file.", "source": "codesearchnet"}
{"code": "def display(self, *amplExpressions):\n        \n        exprs = list(map(str, amplExpressions))\n        lock_and_call(\n            lambda: self._impl.displayLst(exprs, len(exprs)),\n            self._lock\n        )", "docstring": "Writes on the current OutputHandler the outcome of the AMPL statement.\n\n.. code-block:: ampl\n\ndisplay e1, e2, .., en;\n\nwhere e1, ..., en are the strings passed to the procedure.\n\nArgs:\namplExpressions: Expressions to be evaluated.", "source": "juraj-google-style"}
{"code": "def delete_box_comment(self, box_key, comment_key):\n\t\t\n\t\t\n\t\tself._raise_unimplemented_error()\n\n\t\turi = '/'.join([self.api_uri,\n\t\t\t\t\t\tself.boxes_suffix,\n\t\t\t\t\t\tbox_key,\n\t\t\t\t\t\tself.comments_suffix,\n\t\t\t\t\t\tcomment_key\n\t\t\t\t\t\t])\n\t\treturn self._req('delete', uri)", "docstring": "Deletes comment in a box with the comment_key\nArgs:\nbox_key\t\t\tkey for box\nreturn\t\t\t(status code, list of comment dicts)", "source": "juraj-google-style"}
{"code": "def _set_xml_from_keys(self, root, item, **kwargs):\n    (key, val) = item\n    target_key = root.find(key)\n    if (target_key is None):\n        target_key = ElementTree.SubElement(root, key)\n    if isinstance(val, dict):\n        for dict_item in val.items():\n            self._set_xml_from_keys(target_key, dict_item, **kwargs)\n        return\n    if (key in kwargs):\n        kwarg = kwargs[key]\n        if isinstance(kwarg, bool):\n            kwargs[key] = str(kwargs[key]).lower()\n        elif (kwarg is None):\n            kwargs[key] = ''\n        elif isinstance(kwarg, int):\n            kwargs[key] = str(kwargs[key])\n        elif isinstance(kwarg, JSSObject):\n            kwargs[key] = kwargs[key].name\n    target_key.text = kwargs.get(key, val)", "docstring": "Create SubElements of root with kwargs.\n\nArgs:\nroot: Element to add SubElements to.\nitem: Tuple key/value pair from self.data_keys to add.\nkwargs:\nFor each item in self.data_keys, if it has a\ncorresponding kwarg, create a SubElement at root with\nthe kwarg's value.\n\nInt and bool values will be cast to string. (Int 10,\nbool False become string values \"10\" and \"false\").\n\nDicts will be recursively added to their key's Element.", "source": "codesearchnet"}
{"code": "def from_vision_text_configs(cls, vision_config: PretrainedConfig, text_config: PretrainedConfig, **kwargs):\n    return cls(vision_config=vision_config.to_dict(), text_config=text_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`VisionTextDualEncoderConfig`] (or a derived class) from text model configuration and vision\nmodel configuration.\n\nReturns:\n[`VisionTextDualEncoderConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def RetrieveAsset(logdir, plugin_name, asset_name):\n    asset_path = os.path.join(PluginDirectory(logdir, plugin_name), asset_name)\n    try:\n        with tf.io.gfile.GFile(asset_path, 'r') as f:\n            return f.read()\n    except tf.errors.NotFoundError:\n        raise KeyError(('Asset path %s not found' % asset_path))\n    except tf.errors.OpError as e:\n        raise KeyError((\"Couldn't read asset path: %s, OpError %s\" % (asset_path, e)))", "docstring": "Retrieve a particular plugin asset from a logdir.\n\nArgs:\nlogdir: A directory that was created by a TensorFlow summary.FileWriter.\nplugin_name: The plugin we want an asset from.\nasset_name: The name of the requested asset.\n\nReturns:\nstring contents of the plugin asset.\n\nRaises:\nKeyError: if the asset does not exist.", "source": "codesearchnet"}
{"code": "def __init__(self, data=''):\n    \n    super(Lexer, self).__init__()\n    self.buffer = data\n    self.error = 0\n    self.flags = 0\n    self.processed = 0\n    self.processed_buffer = ''\n    self.state = self._INITIAL_STATE\n    self.state_stack = []\n    self.verbose = 0", "docstring": "Initializes the lexer object.\n\nArgs:\ndata: optional initial data to be processed by the lexer.", "source": "juraj-google-style"}
{"code": "def look_up(self, **keys: Dict[(InstanceName, ScalarValue)]) -> 'ArrayEntry':\n    if (not isinstance(self.schema_node, ListNode)):\n        raise InstanceValueError(self.json_pointer(), 'lookup on non-list')\n    try:\n        for i in range(len(self.value)):\n            en = self.value[i]\n            flag = True\n            for k in keys:\n                if (en[k] != keys[k]):\n                    flag = False\n                    break\n            if flag:\n                return self._entry(i)\n        raise NonexistentInstance(self.json_pointer(), 'entry lookup failed')\n    except KeyError:\n        raise NonexistentInstance(self.json_pointer(), 'entry lookup failed') from None\n    except TypeError:\n        raise InstanceValueError(self.json_pointer(), 'lookup on non-list') from None", "docstring": "Return the entry with matching keys.\n\nArgs:\nkeys: Keys and values specified as keyword arguments.\n\nRaises:\nInstanceValueError: If the receiver's value is not a YANG list.\nNonexistentInstance: If no entry with matching keys exists.", "source": "codesearchnet"}
{"code": "def update_current_state(self, value: str, force: bool=False) -> datetime:\n    value = value.lower()\n    if (not force):\n        current_state = self.current_state\n        if (current_state == 'unknown'):\n            allowed_transitions = self._allowed_states\n        else:\n            allowed_transitions = self._allowed_transitions[current_state]\n            allowed_transitions.append(current_state)\n        LOG.debug('Updating current state of %s to %s', self._id, value)\n        if (value not in allowed_transitions):\n            raise ValueError(\"Invalid current state update: '{}'. '{}' can be transitioned to states: {}\".format(value, current_state, allowed_transitions))\n    return self._update_state('current', value)", "docstring": "Update the current state.\n\nArgs:\nvalue (str): New value for sdp state\nforce (bool): If true, ignore allowed transitions\n\nReturns:\ndatetime, update timestamp\n\nRaises:\nValueError: If the specified current state is not allowed.", "source": "codesearchnet"}
{"code": "def is_compatible_with(self, spec_or_tensor):\n    return (self._dtype.is_compatible_with(spec_or_tensor.dtype) and self._shape.is_compatible_with(spec_or_tensor.shape))", "docstring": "Returns True if spec_or_tensor is compatible with this TensorSpec.\n\nTwo tensors are considered compatible if they have the same dtype\nand their shapes are compatible (see `tf.TensorShape.is_compatible_with`).\n\nArgs:\nspec_or_tensor: A tf.TensorSpec or a tf.Tensor\n\nReturns:\nTrue if spec_or_tensor is compatible with self.", "source": "codesearchnet"}
{"code": "def datasets_update(self, dataset_name, dataset_info):\n    url = (Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name))\n    return datalab.utils.Http.request(url, method='PUT', data=dataset_info, credentials=self._credentials)", "docstring": "Updates the Dataset info.\n\nArgs:\ndataset_name: the name of the dataset to update as a tuple of components.\ndataset_info: the Dataset resource with updated fields.", "source": "codesearchnet"}
{"code": "def from_ase_atoms(cls, atoms):\n        \n        return cls(atoms=atoms.get_chemical_symbols(), coords=atoms.positions)", "docstring": "Create an instance of the own class from an ase molecule\n\nArgs:\nmolecule (:class:`ase.atoms.Atoms`):\n\nReturns:\nCartesian:", "source": "juraj-google-style"}
{"code": "def _flush(self, buffer, start, end):\n        \n        buffer_size = len(buffer)\n        if not buffer_size:\n            return\n\n        \n        with self._size_lock:\n            if end > self._size:\n                \n                with _handle_azure_exception():\n                    self._resize(content_length=end, **self._client_kwargs)\n                self._reset_head()\n\n        if buffer_size > self.MAX_FLUSH_SIZE:\n            \n            futures = []\n            for part_start in range(0, buffer_size, self.MAX_FLUSH_SIZE):\n\n                \n                buffer_part = buffer[\n                      part_start:part_start + self.MAX_FLUSH_SIZE]\n                if not len(buffer_part):\n                    \n                    break\n\n                \n                start_range = start + part_start\n                futures.append(self._workers.submit(\n                    self._update_range, data=buffer_part.tobytes(),\n                    start_range=start_range,\n                    end_range=start_range + len(buffer_part) - 1,\n                    **self._client_kwargs))\n\n            with _handle_azure_exception():\n                \n                for future in _as_completed(futures):\n                    future.result()\n\n        else:\n            \n            with _handle_azure_exception():\n                self._update_range(\n                    data=buffer.tobytes(), start_range=start,\n                    end_range=end - 1, **self._client_kwargs)", "docstring": "Flush the write buffer of the stream if applicable.\n\nArgs:\nbuffer (memoryview): Buffer content.\nstart (int): Start of buffer position to flush.\nSupported only with page blobs.\nend (int): End of buffer position to flush.\nSupported only with page blobs.", "source": "juraj-google-style"}
{"code": "def _step(time, output_ta_t, prev_output, *states):\n    current_input = tuple((ta.read(time) for ta in input_ta))\n    current_input = nest.pack_sequence_as(inputs, current_input)\n    mask_t = masking_fn(time)\n    output, new_states = step_function(current_input, tuple(states) + tuple(constants))\n    flat_output = nest.flatten(output)\n    flat_mask_output = flat_zero_output if zero_output_for_mask else nest.flatten(prev_output)\n    flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output)\n    flat_state = nest.flatten(states)\n    flat_new_state = nest.flatten(new_states)\n    for state, new_state in zip(flat_state, flat_new_state):\n        if isinstance(new_state, tensor_lib.Tensor):\n            new_state.set_shape(state.shape)\n    flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state)\n    new_states = nest.pack_sequence_as(new_states, flat_final_state)\n    output_ta_t = tuple((ta.write(time, out) for ta, out in zip(output_ta_t, flat_new_output)))\n    return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(new_states)", "docstring": "RNN step function.\n\nArgs:\ntime: Current timestep value.\noutput_ta_t: TensorArray.\nprev_output: tuple of outputs from time - 1.\n*states: List of states.\n\nReturns:\nTuple: `(time + 1, output_ta_t, output) + tuple(new_states)`", "source": "github-repos"}
{"code": "def __field_to_parameter_type_and_format(self, field):\n    \n    \n    variant = field.variant\n    if variant == messages.Variant.MESSAGE:\n      raise TypeError('A message variant cannot be used in a parameter.')\n\n    \n    \n    \n\n    return CUSTOM_VARIANT_MAP.get(variant) or (variant.name.lower(), None)", "docstring": "Converts the field variant type into a tuple describing the parameter.\n\nArgs:\nfield: An instance of a subclass of messages.Field.\n\nReturns:\nA tuple with the type and format of the field, respectively.\n\nRaises:\nTypeError: if the field variant is a message variant.", "source": "juraj-google-style"}
{"code": "def try_claim(self, position):\n    raise NotImplementedError", "docstring": "Attempts to claim the block of work in the current restriction\nidentified by the given position. Each claimed position MUST be a valid\nsplit point.\n\nIf this succeeds, the DoFn MUST execute the entire block of work. If it\nfails, the ``DoFn.process()`` MUST return ``None`` without performing any\nadditional work or emitting output (note that emitting output or performing\nwork from ``DoFn.process()`` is also not allowed before the first call of\nthis method).\n\nThe API is required to be implemented.\n\nArgs:\nposition: current position that wants to be claimed.\n\nReturns: ``True`` if the position can be claimed as current_position.\nOtherwise, returns ``False``.", "source": "github-repos"}
{"code": "def unnest_collection(collection, df_list):\n    for item in collection['link']['item']:\n        if (item['class'] == 'dataset'):\n            df_list.append(Dataset.read(item['href']).write('dataframe'))\n        elif (item['class'] == 'collection'):\n            nested_collection = request(item['href'])\n            unnest_collection(nested_collection, df_list)", "docstring": "Unnest collection structure extracting all its datasets and converting \\\nthem to Pandas Dataframes.\n\nArgs:\ncollection (OrderedDict): data in JSON-stat format, previously \\\ndeserialized to a python object by \\\njson.load() or json.loads(),\ndf_list (list): list variable which will contain the converted \\\ndatasets.\n\nReturns:\nNothing.", "source": "codesearchnet"}
{"code": "def _get_model_reference(self, model_id):\n    return ModelReference.from_api_repr({'projectId': self.project, 'datasetId': self.dataset_id, 'modelId': model_id})", "docstring": "Constructs a ModelReference.\n\nArgs:\nmodel_id (str): the ID of the model.\n\nReturns:\ngoogle.cloud.bigquery.model.ModelReference:\nA ModelReference for a model in this dataset.", "source": "codesearchnet"}
{"code": "def from_string(contents):\n    lines = contents.split('\\n')\n    num_sites = int(lines[0])\n    coords = []\n    sp = []\n    prop = []\n    coord_patt = re.compile(('(\\\\w+)\\\\s+([0-9\\\\-\\\\.]+)\\\\s+([0-9\\\\-\\\\.]+)\\\\s+([0-9\\\\-\\\\.]+)\\\\s+' + '([0-9\\\\-\\\\.]+)'))\n    for i in range(2, (2 + num_sites)):\n        m = coord_patt.search(lines[i])\n        if m:\n            sp.append(m.group(1))\n            coords.append([float(j) for j in [m.group(i) for i in [3, 4, 2]]])\n            prop.append(float(m.group(5)))\n    return ZeoVoronoiXYZ(Molecule(sp, coords, site_properties={'voronoi_radius': prop}))", "docstring": "Creates Zeo++ Voronoi XYZ object from a string.\nfrom_string method of XYZ class is being redefined.\n\nArgs:\ncontents: String representing Zeo++ Voronoi XYZ file.\n\nReturns:\nZeoVoronoiXYZ object", "source": "codesearchnet"}
{"code": "def _step(time, output_ta_t, *states):\n    current_input = tuple((ta.read(time) for ta in input_ta))\n    current_input = nest.pack_sequence_as(inputs, current_input)\n    output, new_states = step_function(current_input, tuple(states) + tuple(constants))\n    flat_state = nest.flatten(states)\n    flat_new_state = nest.flatten(new_states)\n    for state, new_state in zip(flat_state, flat_new_state):\n        if isinstance(new_state, tensor_lib.Tensor):\n            new_state.set_shape(state.shape)\n    flat_output = nest.flatten(output)\n    output_ta_t = tuple((ta.write(time, out) for ta, out in zip(output_ta_t, flat_output)))\n    new_states = nest.pack_sequence_as(initial_states, flat_new_state)\n    return (time + 1, output_ta_t) + tuple(new_states)", "docstring": "RNN step function.\n\nArgs:\ntime: Current timestep value.\noutput_ta_t: TensorArray.\n*states: List of states.\n\nReturns:\nTuple: `(time + 1,output_ta_t) + tuple(new_states)`", "source": "github-repos"}
{"code": "def annotate_source(source, ast_module, pytype_options):\n    source_code = infer_types(source, pytype_options)\n    module = ast_module.parse(source, pytype_options.input)\n    visitor = AnnotateAstVisitor(source_code, ast_module)\n    visitor.visit(module)\n    return module", "docstring": "Infer types for `source`, and return an AST of it with types added.\n\nArgs:\nsource: Text, the source code to type-infer and parse to an AST.\nast_module: An ast-module like object used to parse the source to an AST\nand traverse the created ast.Module object.\npytype_options: pytype.config.Options, the options to pass onto Pytype.\n\nReturns:\nThe created Module object from what `ast_factory` returned.", "source": "github-repos"}
{"code": "def broadcast_weights(weights, values):\n    with ops.name_scope(None, 'broadcast_weights', (weights, values)) as scope:\n        values = ops.convert_to_tensor(values, name='values')\n        weights = ops.convert_to_tensor(weights, dtype=values.dtype.base_dtype, name='weights')\n        weights_shape = weights.get_shape()\n        values_shape = values.get_shape()\n        if weights_shape.is_fully_defined() and values_shape.is_fully_defined() and weights_shape.is_compatible_with(values_shape):\n            return weights\n        if control_flow_ops.get_enclosing_xla_context() is not None:\n            return math_ops.multiply(weights, array_ops.ones_like(values), name=scope)\n        with ops.control_dependencies((assert_broadcastable(weights, values),)):\n            return math_ops.multiply(weights, array_ops.ones_like(values), name=scope)", "docstring": "Broadcast `weights` to the same shape as `values`.\n\nThis returns a version of `weights` following the same broadcast rules as\n`mul(weights, values)`, but limited to the weights shapes allowed by\n`assert_broadcastable`. When computing a weighted average, use this function\nto broadcast `weights` before summing them; e.g.,\n`reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`.\n\nArgs:\nweights: `Tensor` whose shape is broadcastable to `values` according to the\nrules of `assert_broadcastable`.\nvalues: `Tensor` of any shape.\n\nReturns:\n`weights` broadcast to `values` shape according to the rules of\n`assert_broadcastable`.", "source": "github-repos"}
{"code": "def to_dense_one_hot(labels, class_count):\n  \n  if not isinstance(class_count, tf.compat.integral_types):\n    raise TypeError('class_count must be an integer type.')\n  if labels.dtype.base_dtype not in (tf.int32, tf.int64):\n    raise TypeError('Labels must be an integer: %s' % labels.dtype)\n  if labels.get_shape().ndims != 1:\n    raise ValueError('Labels must be a rank 1 tensor: %s' % labels.get_shape())\n\n  dtype = labels.dtype.base_dtype\n  class_tensor = tf.convert_to_tensor(\n      class_count, dtype=dtype, name='class_count')\n\n  \n  batch = tf.gather(tf.shape(labels), 0)\n  count = tf.expand_dims(tf.range(0, limit=batch), 1)\n  labels = tf.expand_dims(labels, 1)\n  batch = tf.gather(tf.shape(labels), 0)\n\n  if dtype != tf.int32:\n    count = tf.cast(count, dtype)\n    batch = tf.cast(batch, dtype)\n\n  result = tf.sparse_to_dense(\n      tf.concat([count, labels], 1),\n      tf.concat([tf.expand_dims(batch, 0), tf.expand_dims(class_tensor, 0)], 0),\n      1.0, 0.0)\n  result.set_shape([labels.get_shape().dims[0], class_count])\n  return result", "docstring": "Converts a vector that specified one-hot per batch into a dense version.\n\nArgs:\nlabels: The labels input.\nclass_count: The number of classes as an int.\nReturns:\nOne dense vector for each item in the batch.\nRaises:\nValueError: If labels is not rank 1.\nTypeError: If class_count is not an integer or labels is not an integer\nTensor.", "source": "juraj-google-style"}
{"code": "def pretty_printer_for_analytics(cls, primitive_handler_: primitive_handler.PrimitiveHandler, indent_size: int) -> 'JsonPrinter':\n    return cls(primitive_handler_, _PrettyJsonTextGenerator(indent_size), _FhirJsonFormat.ANALYTIC)", "docstring": "Returns a printer for Analytic FHIR JSON with spaces and newlines.\n\nArgs:\nprimitive_handler_: Responsible for returning PrimitiveWrappers.\nindent_size: The size of space indentation for lexical scoping.", "source": "github-repos"}
{"code": "def seek_to_beginning(self, *partitions):\n    if (not all([isinstance(p, TopicPartition) for p in partitions])):\n        raise TypeError('partitions must be TopicPartition namedtuples')\n    if (not partitions):\n        partitions = self._subscription.assigned_partitions()\n        assert partitions, 'No partitions are currently assigned'\n    else:\n        for p in partitions:\n            assert (p in self._subscription.assigned_partitions()), 'Unassigned partition'\n    for tp in partitions:\n        log.debug('Seeking to beginning of partition %s', tp)\n        self._subscription.need_offset_reset(tp, OffsetResetStrategy.EARLIEST)", "docstring": "Seek to the oldest available offset for partitions.\n\nArguments:\n*partitions: Optionally provide specific TopicPartitions, otherwise\ndefault to all assigned partitions.\n\nRaises:\nAssertionError: If any partition is not currently assigned, or if\nno partitions are assigned.", "source": "codesearchnet"}
{"code": "def find_contour_yaml(config_file=__file__, names=None):\n    \n    checked = set()\n    contour_yaml = _find_countour_yaml(os.path.dirname(config_file), checked,\n                                       names=names)\n\n    if not contour_yaml:\n        contour_yaml = _find_countour_yaml(os.getcwd(), checked, names=names)\n\n    return contour_yaml", "docstring": "Traverse directory trees to find a contour.yaml file\n\nBegins with the location of this file then checks the\nworking directory if not found\n\nArgs:\nconfig_file: location of this file, override for\ntesting\nReturns:\nthe path of contour.yaml or None if not found", "source": "juraj-google-style"}
{"code": "def sg_prod(tensor, opt):\n    return tf.reduce_prod(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)", "docstring": "r\"\"\"Computes the product of elements across axis of a tensor.\n\nSee `tf.reduce_prod()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` (automatically given by chain).\nopt:\naxis : A tuple/list of integers or an integer. The axis to reduce.\nkeep_dims: If true, retains reduced dimensions with length 1.\nname: If provided, replace current tensor's name.\n\nReturns:\nA `Tensor`.", "source": "codesearchnet"}
{"code": "def __init__(self, filenames, compression_type=None, buffer_size=None, name=None):\n    self._filenames = filenames\n    self._compression_type = convert.optional_param_to_tensor('compression_type', compression_type, argument_default='', argument_dtype=dtypes.string)\n    self._buffer_size = convert.optional_param_to_tensor('buffer_size', buffer_size, argument_default=_DEFAULT_READER_BUFFER_SIZE_BYTES)\n    self._name = name\n    variant_tensor = gen_dataset_ops.text_line_dataset(self._filenames, self._compression_type, self._buffer_size, metadata=self._metadata.SerializeToString())\n    super(_TextLineDataset, self).__init__(variant_tensor)", "docstring": "Creates a `TextLineDataset`.\n\nArgs:\nfilenames: A `tf.string` tensor containing one or more filenames.\ncompression_type: (Optional.) A `tf.string` scalar evaluating to one of\n`\"\"` (no compression), `\"ZLIB\"`, or `\"GZIP\"`.\nbuffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes\nto buffer. A value of 0 results in the default buffering values chosen\nbased on the compression type.\nname: (Optional.) A name for the tf.data operation.", "source": "github-repos"}
{"code": "def moves_from_last_n_games(self, n, moves, shuffle, column_family, column):\n    self.wait_for_fresh_games()\n    latest_game = self.latest_game_number\n    utils.dbg(('Latest game in %s: %s' % (self.btspec.table, latest_game)))\n    if (latest_game == 0):\n        raise ValueError('Cannot find a latest game in the table')\n    start = int(max(0, (latest_game - n)))\n    ds = self.moves_from_games(start, latest_game, moves, shuffle, column_family, column)\n    return ds", "docstring": "Randomly choose a given number of moves from the last n games.\n\nArgs:\nn:  number of games at the end of this GameQueue to source.\nmoves:  number of moves to be sampled from `n` games.\nshuffle:  if True, shuffle the selected moves.\ncolumn_family:  name of the column family containing move examples.\ncolumn:  name of the column containing move examples.\n\nReturns:\na dataset containing the selected moves.", "source": "codesearchnet"}
{"code": "def get_value_set(self, value_set_url: str) -> Optional[_ValueSetT]:", "docstring": "Returns the ValueSet identified by the given URL.\n\nArgs:\nvalue_set_url: The URL for the FHIR ValueSet to be returned.\n\nReturns:\nThe corresponding value set, or None if no such value set exists.", "source": "github-repos"}
{"code": "def mirror_pull(self, **kwargs):\n    path = ('/projects/%s/mirror/pull' % self.get_id())\n    self.manager.gitlab.http_post(path, **kwargs)", "docstring": "Start the pull mirroring process for the project.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabCreateError: If the server failed to perform the request", "source": "codesearchnet"}
{"code": "def get_package_from_string(txt, paths=None):\n    \n    o = VersionedObject(txt)\n    return get_package(o.name, o.version, paths=paths)", "docstring": "Get a package given a string.\n\nArgs:\ntxt (str): String such as 'foo', 'bah-1.3'.\npaths (list of str, optional): paths to search for package, defaults\nto `config.packages_path`.\n\nReturns:\n`Package` instance, or None if no package was found.", "source": "juraj-google-style"}
{"code": "def fail_api(channel):\n    gui = ui_embed.UI(channel, \"Couldn't get stats off RLTrackerNetwork.\", 'Maybe the API changed, please tell Infraxion.', modulename=modulename, colour=35071)\n    return gui", "docstring": "Creates an embed UI for when the API call didn't work\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\n\nReturns:\nui (ui_embed.UI): The embed UI object", "source": "codesearchnet"}
{"code": "def require_meta_and_content(self, content_handler, params, **kwargs):\n    meta = {'params': params}\n    content = content_handler(params, meta, **kwargs)\n    meta['params'] = params\n    return (meta, content)", "docstring": "Require 'meta' and 'content' dictionaries using proper hander.\n\nArgs:\ncontent_handler (callable): function that accepts\n``params, meta, **kwargs`` argument and returns dictionary\nfor ``content`` response section\nparams (dict): dictionary of parsed resource parameters\nkwargs (dict): dictionary of values created from resource url\ntemplate\n\nReturns:\ntuple (meta, content): two-tuple with dictionaries of ``meta`` and\n``content`` response sections", "source": "codesearchnet"}
{"code": "def CheckVersion(problems, latest_version=None):\n    if (not latest_version):\n        timeout = 20\n        socket.setdefaulttimeout(timeout)\n        request = urllib2.Request(LATEST_RELEASE_VERSION_URL)\n        try:\n            response = urllib2.urlopen(request)\n            content = response.read()\n            m = re.search('version=(\\\\d+\\\\.\\\\d+\\\\.\\\\d+)', content)\n            if m:\n                latest_version = m.group(1)\n        except urllib2.HTTPError as e:\n            description = ('During the new-version check, we failed to reach transitfeed server: Reason: %s [%s].' % (e.reason, e.code))\n            problems.OtherProblem(description=description, type=errors.TYPE_NOTICE)\n            return\n        except urllib2.URLError as e:\n            description = ('During the new-version check, we failed to reach transitfeed server. Reason: %s.' % e.reason)\n            problems.OtherProblem(description=description, type=errors.TYPE_NOTICE)\n            return\n    if (not latest_version):\n        description = ('During the new-version check, we had trouble parsing the contents of %s.' % LATEST_RELEASE_VERSION_URL)\n        problems.OtherProblem(description=description, type=errors.TYPE_NOTICE)\n        return\n    newest_version = _MaxVersion([latest_version, __version__])\n    if (__version__ != newest_version):\n        problems.NewVersionAvailable(newest_version)", "docstring": "Check if there is a newer version of transitfeed available.\n\nArgs:\nproblems: if a new version is available, a NewVersionAvailable problem will\nbe added\nlatest_version: if specified, override the latest version read from the\nproject page", "source": "codesearchnet"}
{"code": "def run_defense_work(self, work_id):\n    class_batch_id = self.defense_work.work[work_id]['output_classification_batch_id']\n    class_batch = self.class_batches.read_batch_from_datastore(class_batch_id)\n    adversarial_batch_id = class_batch['adversarial_batch_id']\n    submission_id = class_batch['submission_id']\n    cloud_result_path = class_batch['result_path']\n    logging.info('Defense work piece: adversarial_batch_id=\"%s\" submission_id=\"%s\"', adversarial_batch_id, submission_id)\n    if (submission_id in self.blacklisted_submissions):\n        raise WorkerError('Blacklisted submission')\n    defense = DefenseSubmission(submission_id, self.submissions, self.storage_bucket)\n    defense.download()\n    input_dir = os.path.join(LOCAL_INPUT_DIR, adversarial_batch_id)\n    if os.path.exists(input_dir):\n        sudo_remove_dirtree(input_dir)\n    os.makedirs(input_dir)\n    try:\n        shell_call(['gsutil', '-m', 'cp', os.path.join('gs:\n        adv_images_files = os.listdir(input_dir)\n        if ((len(adv_images_files) == 1) and adv_images_files[0].endswith('.zip')):\n            logging.info('Adversarial batch is in zip archive %s', adv_images_files[0])\n            shell_call(['unzip', os.path.join(input_dir, adv_images_files[0]), '-d', input_dir])\n            os.remove(os.path.join(input_dir, adv_images_files[0]))\n            adv_images_files = os.listdir(input_dir)\n        logging.info('%d adversarial images copied', len(adv_images_files))\n    except (subprocess.CalledProcessError, IOError) as e:\n        raise WorkerError('Cant copy adversarial batch locally', e)\n    if os.path.exists(LOCAL_OUTPUT_DIR):\n        sudo_remove_dirtree(LOCAL_OUTPUT_DIR)\n    os.mkdir(LOCAL_OUTPUT_DIR)\n    output_filname = os.path.join(LOCAL_OUTPUT_DIR, 'result.csv')\n    elapsed_time_sec = defense.run(input_dir, output_filname)\n    batch_result = eval_lib.analyze_one_classification_result(storage_client=None, file_path=output_filname, adv_batch=self.adv_batches.data[adversarial_batch_id], dataset_batches=self.dataset_batches, dataset_meta=self.dataset_meta)\n    try:\n        shell_call(['gsutil', 'cp', output_filname, os.path.join('gs:\n    except subprocess.CalledProcessError as e:\n        raise WorkerError('Cant result to Cloud Storage', e)\n    return (elapsed_time_sec, submission_id, batch_result)", "docstring": "Runs one defense work.\n\nArgs:\nwork_id: ID of the piece of work to run\n\nReturns:\nelapsed_time_sec, submission_id - elapsed time and id of the submission\n\nRaises:\nWorkerError: if error occurred during execution.", "source": "codesearchnet"}
{"code": "def verify_key_in_shelve(file_name, save_key, file_location):\n    \n    file = __os.path.join(file_location, file_name)\n    shelve_store = __shelve.open(file)\n    exists = shelve_store.get(save_key)\n    shelve_store.close()\n    if exists:\n        return True\n\n    elif not exists:\n        return False", "docstring": "Function to check for a key in a shelve\nArgs:\nfile_name: Shelve storage file name\nsave_key: The name of the key the item is stored in\nfile_location: The location of the file, derive from the os module\n\nReturns: returns true or false", "source": "juraj-google-style"}
{"code": "def __init__(self, config: Dict) -> None:\n        \n\n        self.fields_dict = dict()\n        try:\n            for field in config[\"fields\"]:\n                if config[\"fields\"][field][\"type\"] == \"kg_id\":\n                    self.fields_dict[field] = FieldType.KG_ID\n                elif config[\"fields\"][field][\"type\"] == \"number\":\n                    self.fields_dict[field] = FieldType.NUMBER\n                elif config[\"fields\"][field][\"type\"] == \"date\":\n                    self.fields_dict[field] = FieldType.DATE\n                elif config[\"fields\"][field][\"type\"] == \"location\":\n                    self.fields_dict[field] = FieldType.LOCATION\n                else:\n                    self.fields_dict[field] = FieldType.STRING\n\n        except KeyError as key:\n            print(str(key) + \" not in config\")", "docstring": "Record a mapping about each fields and its type from config file\n\nArgs:\nconfig: Dict", "source": "juraj-google-style"}
{"code": "def dependency_of_targets(targets, op):\n    if isinstance(op, tf.Tensor):\n        op = op.op\n    assert isinstance(op, tf.Operation), op\n    from tensorflow.contrib.graph_editor import get_backward_walk_ops\n    dependent_ops = get_backward_walk_ops(targets, control_inputs=True)\n    return (op in dependent_ops)", "docstring": "Check that op is in the subgraph induced by the dependencies of targets.\nThe result is memoized.\n\nThis is useful if some SessionRunHooks should be run only together with certain ops.\n\nArgs:\ntargets: a tuple of ops or tensors. The targets to find dependencies of.\nop (tf.Operation or tf.Tensor):\n\nReturns:\nbool: True if any one of `targets` depend on `op`.", "source": "codesearchnet"}
{"code": "def get_model_indexes(model):\n    \n    indexes = []\n    for index in get_index_names():\n        for app_model in get_index_models(index):\n            if app_model == model:\n                indexes.append(index)\n    return indexes", "docstring": "Return list of all indexes in which a model is configured.\n\nA model may be configured to appear in multiple indexes. This function\nwill return the names of the indexes as a list of strings. This is\nuseful if you want to know which indexes need updating when a model\nis saved.\n\nArgs:\nmodel: a Django model class.", "source": "juraj-google-style"}
{"code": "def get(self, container_id):\n        \n        resp = self.client.api.inspect_container(container_id)\n        return self.prepare_model(resp)", "docstring": "Get a container by name or ID.\n\nArgs:\ncontainer_id (str): Container name or ID.\n\nReturns:\nA :py:class:`Container` object.\n\nRaises:\n:py:class:`docker.errors.NotFound`\nIf the container does not exist.\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def _kl_half_normal_half_normal(a, b, name=None):\n  \n  with tf.name_scope(name or \"kl_half_normal_half_normal\"):\n    \n    \n    return (tf.math.log(b.scale) - tf.math.log(a.scale) +\n            (a.scale**2 - b.scale**2) / (2 * b.scale**2))", "docstring": "Calculate the batched KL divergence KL(a || b) with a and b `HalfNormal`.\n\nArgs:\na: Instance of a `HalfNormal` distribution object.\nb: Instance of a `HalfNormal` distribution object.\nname: (optional) Name to use for created operations.\ndefault is \"kl_half_normal_half_normal\".\n\nReturns:\nBatchwise KL(a || b)", "source": "juraj-google-style"}
{"code": "async def do_upload(context, files):\n    \n    status = 0\n    try:\n        await upload_artifacts(context, files)\n    except ScriptWorkerException as e:\n        status = worst_level(status, e.exit_code)\n        log.error(\"Hit ScriptWorkerException: {}\".format(e))\n    except aiohttp.ClientError as e:\n        status = worst_level(status, STATUSES['intermittent-task'])\n        log.error(\"Hit aiohttp error: {}\".format(e))\n    except Exception as e:\n        log.exception(\"SCRIPTWORKER_UNEXPECTED_EXCEPTION upload {}\".format(e))\n        raise\n    return status", "docstring": "Upload artifacts and return status.\n\nReturns the integer status of the upload.\n\nargs:\ncontext (scriptworker.context.Context): the scriptworker context.\nfiles (list of str): list of files to be uploaded as artifacts\n\nRaises:\nException: on unexpected exception.\n\nReturns:\nint: exit status", "source": "juraj-google-style"}
{"code": "def export_ply(filename, cutout, level=0):\n    \n    if \".ply\" not in filename:\n        filename = filename + \".ply\"\n\n    vs, fs = mcubes.marching_cubes(cutout, level)\n\n    with open(filename, 'w') as fh:\n        lines = [\n            \"ply\"\n            \"format ascii 1.0\",\n            \"comment generated by ndio\",\n            \"element vertex \" + str(len(vs)),\n            \"property float32 x\",\n            \"property float32 y\",\n            \"property float32 z\",\n            \"element face \" + str(len(fs)),\n            \"property list uint8 int32 vertex_index\",\n            \"end_header\"\n        ]\n        fh.writelines(lines)\n        for v in vs:\n            fh.write(\"{} {} {}\".format(v[0], v[1], v[2]))\n        for f in fs:\n            fh.write(\"3 {} {} {}\".format(f[0], f[1], f[2]))", "docstring": "Converts a dense annotation to a .PLY, using Marching Cubes (PyMCubes).\n\nArguments:\nfilename (str): The filename to write out to\ncutout (numpy.ndarray): The dense annotation\nlevel (int): The level at which to run mcubes\n\nReturns:\nboolean success", "source": "juraj-google-style"}
{"code": "def send(self, content_type='HTML'):\n    payload = self.api_representation(content_type)\n    endpoint = 'https:\n    self._make_api_call('post', endpoint=endpoint, data=json.dumps(payload))", "docstring": "Takes the recipients, body, and attachments of the Message and sends.\n\nArgs:\ncontent_type: Can either be 'HTML' or 'Text', defaults to HTML.", "source": "codesearchnet"}
{"code": "def assert_array_lines_close(test, expected_array, array_lines):\n    elements = []\n    for line in array_lines:\n        line = re.sub(_ARRAY_VALUE_SEPARATOR_REGEX, ' ', line)\n        elements.extend((float(s) for s in line.split()))\n    test.assertAllClose(np.array(expected_array).flatten(), elements)", "docstring": "Assert that the array value represented by lines is close to expected.\n\nNote that the shape of the array represented by the `array_lines` is ignored.\n\nArgs:\ntest: An instance of TensorFlowTestCase.\nexpected_array: Expected value of the array.\narray_lines: A list of strings representing the array.\nE.g., \"array([[ 1.0, 2.0 ], [ 3.0, 4.0 ]])\"\nAssumes that values are separated by commas, parentheses, brackets, \"|\"\ncharacters and whitespace.", "source": "github-repos"}
{"code": "def extract(self, destdir, decompress='auto'):\n        \n        for e in self.mardata.index.entries:\n            name = e.name\n            entry_path = safejoin(destdir, name)\n            entry_dir = os.path.dirname(entry_path)\n            mkdir(entry_dir)\n            with open(entry_path, 'wb') as f:\n                write_to_file(self.extract_entry(e, decompress), f)\n                os.chmod(entry_path, e.flags)", "docstring": "Extract the entire MAR file into a directory.\n\nArgs:\ndestdir (str): A local directory on disk into which the contents of\nthis MAR file will be extracted. Required parent directories\nwill be created as necessary.\ndecompress (obj, optional): Controls whether files are decompressed\nwhen extracted. Must be one of 'auto' or None. Defaults to\n'auto'.", "source": "juraj-google-style"}
{"code": "def fabrics(self):\n    if (not self.__fabrics):\n        self.__fabrics = Fabrics(self.__connection)\n    return self.__fabrics", "docstring": "Gets the Fabrics API client.\n\nReturns:\nFabrics:", "source": "codesearchnet"}
{"code": "def is_line_in_file(filename: str, line: str) -> bool:\n    assert ('\\n' not in line)\n    with open(filename, 'r') as file:\n        for fileline in file:\n            if (fileline == line):\n                return True\n        return False", "docstring": "Detects whether a line is present within a file.\n\nArgs:\nfilename: file to check\nline: line to search for (as an exact match)", "source": "codesearchnet"}
{"code": "def _seconds_have_elapsed(token, num_seconds):\n  \n  now = timeit.default_timer()\n  then = _log_timer_per_token.get(token, None)\n  if then is None or (now - then) >= num_seconds:\n    _log_timer_per_token[token] = now\n    return True\n  else:\n    return False", "docstring": "Tests if 'num_seconds' have passed since 'token' was requested.\n\nNot strictly thread-safe - may log with the wrong frequency if called\nconcurrently from multiple threads. Accuracy depends on resolution of\n'timeit.default_timer()'.\n\nAlways returns True on the first call for a given 'token'.\n\nArgs:\ntoken: The token for which to look up the count.\nnum_seconds: The number of seconds to test for.\n\nReturns:\nWhether it has been >= 'num_seconds' since 'token' was last requested.", "source": "juraj-google-style"}
{"code": "def master_key_from_entropy(passphrase='', strength=128):\n        \n        if strength % 32 != 0:\n            raise ValueError(\"strength must be a multiple of 32\")\n        if strength < 128 or strength > 256:\n            raise ValueError(\"strength should be >= 128 and <= 256\")\n        entropy = rand_bytes(strength \n        m = Mnemonic(language='english')\n        n = m.to_mnemonic(entropy)\n        return HDPrivateKey.master_key_from_seed(\n            Mnemonic.to_seed(n, passphrase)), n", "docstring": "Generates a master key from system entropy.\n\nArgs:\nstrength (int): Amount of entropy desired. This should be\na multiple of 32 between 128 and 256.\npassphrase (str): An optional passphrase for the generated\nmnemonic string.\n\nReturns:\nHDPrivateKey, str:\na tuple consisting of the master\nprivate key and a mnemonic string from which the seed\ncan be recovered.", "source": "juraj-google-style"}
{"code": "def __init__(self, sharding_specs: List[str], mesh: Mesh):\n    if not isinstance(mesh, Mesh):\n        raise ValueError('mesh is not a valid Mesh object.')\n    for _, dim_sharding in enumerate(sharding_specs):\n        if dim_sharding == UNSHARDED or dim_sharding == MATCH:\n            continue\n        if sharding_specs.count(dim_sharding) > 1:\n            raise ValueError(('Mesh dimension {mesh_dim} was repeated in sharding ' + 'specification {sharding_specs}. Mesh dimensions must be unique ' + 'in a layout.').format(mesh_dim=dim_sharding, sharding_specs=sharding_specs))\n        if dim_sharding not in mesh:\n            raise ValueError(('{dim_sharding}: A dimension sharding must either be a ' + 'valid mesh dimension or UNSHARDED.').format(dim_sharding=dim_sharding))\n    super().__init__(type=LayoutType.STATIC, sharding_specs=sharding_specs, mesh=mesh)", "docstring": "Builds a Layout from a list of dimension names and a Mesh.\n\nArgs:\nsharding_specs: List of sharding specifications, each corresponding to a\ntensor axis. Each specification (dim_sharding) can either be a mesh\ndimension or the special value UNSHARDED.\nmesh: A mesh configuration for the Tensor.\n\nReturns:\nA valid Layout built with given layout & mesh.", "source": "github-repos"}
{"code": "def load_examples(tmp_dir, prop_train=0.09, prop_val=0.01):\n    infile = generator_utils.maybe_download(tmp_dir, _TAR, _URL)\n    tf.logging.info('Loading examples')\n    all_examples = []\n    for (i, d) in enumerate(csv.DictReader(gzip.open(infile), delimiter='\\t')):\n        if ((i % 100000) == 0):\n            tf.logging.info(('%d examples have been loaded....' % i))\n        ex = {x: (int(y) if y.isdigit() else y) for (x, y) in d.items()}\n        all_examples.append(ex)\n    random.seed(1)\n    random.shuffle(all_examples)\n    n_train = int((len(all_examples) * prop_train))\n    n_val = (n_train + int((len(all_examples) * prop_val)))\n    train = all_examples[:n_train]\n    val = all_examples[n_train:n_val]\n    test = []\n    for e in all_examples[n_val:]:\n        if (e['n_intervening'] == e['n_diff_intervening']):\n            test.append(e)\n    return (all_examples, train, val, test)", "docstring": "Loads exampls from the tsv file.\n\nArgs:\ntmp_dir: temp directory.\nprop_train: proportion of the train data\nprop_val: proportion of the validation data\n\nReturns:\nAll examples in the dataset pluse train, test, and development splits.", "source": "codesearchnet"}
{"code": "def start(self, device):\n    super(NativeBLEVirtualInterface, self).start(device)\n    self.set_advertising(True)", "docstring": "Start serving access to this VirtualIOTileDevice\n\nArgs:\ndevice (VirtualIOTileDevice): The device we will be providing access to", "source": "codesearchnet"}
{"code": "def add_densities(density1, density2):\n    \n    return {spin: np.array(density1[spin]) + np.array(density2[spin])\n            for spin in density1.keys()}", "docstring": "Method to sum two densities.\n\nArgs:\ndensity1: First density.\ndensity2: Second density.\n\nReturns:\nDict of {spin: density}.", "source": "juraj-google-style"}
{"code": "def min(self):\n    if (len(self._data) == 0):\n        return 10\n    return next(iter(sorted(self._data.keys())))", "docstring": "Return the minimum value in this histogram.\n\nIf there are no values in the histogram at all, return 10.\n\nReturns:\nint: The minimum value in the histogram.", "source": "codesearchnet"}
{"code": "def begin_abort(self, root_pipeline_key, abort_message):\n\n    def txn():\n        pipeline_record = db.get(root_pipeline_key)\n        if (pipeline_record is None):\n            logging.warning('Tried to abort root pipeline ID \"%s\" but it does not exist.', root_pipeline_key.name())\n            raise db.Rollback()\n        if (pipeline_record.status == _PipelineRecord.ABORTED):\n            logging.warning('Tried to abort root pipeline ID \"%s\"; already in state: %s', root_pipeline_key.name(), pipeline_record.status)\n            raise db.Rollback()\n        if pipeline_record.abort_requested:\n            logging.warning('Tried to abort root pipeline ID \"%s\"; abort signal already sent.', root_pipeline_key.name())\n            raise db.Rollback()\n        pipeline_record.abort_requested = True\n        pipeline_record.abort_message = abort_message\n        pipeline_record.put()\n        task = taskqueue.Task(url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key))\n        task.add(queue_name=self.queue_name, transactional=True)\n        return True\n    return db.run_in_transaction(txn)", "docstring": "Kicks off the abort process for a root pipeline and all its children.\n\nArgs:\nroot_pipeline_key: db.Key of the root pipeline to abort.\nabort_message: Message explaining why the abort happened, only saved\ninto the root pipeline.\n\nReturns:\nTrue if the abort signal was sent successfully; False otherwise.", "source": "codesearchnet"}
{"code": "def search(self, searchAreaWkt=None, filters=None, startDate=None, endDate=None, types=None):\n    if (not types):\n        types = ['Acquisition']\n    if startDate:\n        startDateTime = datetime.datetime.strptime(startDate, '%Y-%m-%dT%H:%M:%S.%fZ')\n    if endDate:\n        endDateTime = datetime.datetime.strptime(endDate, '%Y-%m-%dT%H:%M:%S.%fZ')\n    if (startDate and endDate):\n        diff = (endDateTime - startDateTime)\n        if (diff.days < 0):\n            raise Exception('startDate must come before endDate.')\n    postdata = {'searchAreaWkt': searchAreaWkt, 'types': types, 'startDate': startDate, 'endDate': endDate}\n    if filters:\n        postdata['filters'] = filters\n    if searchAreaWkt:\n        postdata['searchAreaWkt'] = searchAreaWkt\n    url = ('%(base_url)s/search' % {'base_url': self.base_url})\n    headers = {'Content-Type': 'application/json'}\n    r = self.gbdx_connection.post(url, headers=headers, data=json.dumps(postdata))\n    r.raise_for_status()\n    results = r.json()['results']\n    return results", "docstring": "Perform a catalog search\n\nArgs:\nsearchAreaWkt: WKT Polygon of area to search.  Optional.\nfilters: Array of filters.  Optional.  Example:\n[\n\"(sensorPlatformName = 'WORLDVIEW01' OR sensorPlatformName ='QUICKBIRD02')\",\n\"cloudCover < 10\",\n\"offNadirAngle < 10\"\n]\nstartDate: string.  Optional.  Example: \"2004-01-01T00:00:00.000Z\"\nendDate: string.  Optional.  Example: \"2004-01-01T00:00:00.000Z\"\ntypes: Array of types to search for.  Optional.  Example (and default):  [\"Acquisition\"]\n\nReturns:\ncatalog search resultset", "source": "codesearchnet"}
{"code": "def lookup_symbol(self, name, namespace_stack):\n        \n        \n        \n        \n        \n        \n\n        \n        symbol = Symbol(name, name.split('::'), namespace_stack)\n        assert symbol.parts\n        if symbol.parts[0] == '':\n            \n            symbol.parts = symbol.parts[1:]\n        elif namespace_stack is not None:\n            result = self._lookup_in_all_namespaces(symbol)\n            if result:\n                return result\n\n        return self._lookup_global(symbol)", "docstring": "Returns AST node and module for symbol if found.\n\nArgs:\nname: 'name of the symbol to lookup'\nnamespace_stack: None or ['namespaces', 'in', 'current', 'scope']\n\nReturns:\n(ast.Node, module (ie, any object stored with symbol)) if found\n\nRaises:\nError if the symbol cannot be found.", "source": "juraj-google-style"}
{"code": "def _get_ngrams(ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int):\n    generated_ngrams = [{} for _ in range(num_hypos)]\n    for idx in range(num_hypos):\n        gen_tokens = prev_input_ids[idx].tolist()\n        generated_ngram = generated_ngrams[idx]\n        for ngram in zip(*[gen_tokens[i:] for i in range(ngram_size)]):\n            prev_ngram_tuple = tuple(ngram[:-1])\n            generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]]\n    return generated_ngrams", "docstring": "Assume ngram_size=2 and prev_input_ids=tensor([[40, 2883, 2712, 4346]]). The output of generated ngrams look like\nthis {(40,): [2883], (2883,): [2712], (2712,): [4346]}.\n\nArgs:\nngram_size (`int`):\nThe number sequential tokens taken as a group which may only occur once before being banned.\nprev_input_ids (`torch.Tensor`):\nGenerated token ids for the current hypothesis.\nnum_hypos (`int`):\nThe number of hypotheses for which n-grams need to be generated.\n\nReturns:\ngenerated_ngrams (`dict`):\nDictionary of generated ngrams.", "source": "github-repos"}
{"code": "def _rmsprop(self, grads, cache=None, decay_rate=0.95):\n    if (cache is None):\n        cache = np.zeros_like(grads)\n    cache = ((decay_rate * cache) + ((1 - decay_rate) * (grads ** 2)))\n    step = ((- grads) / np.sqrt((cache + K.epsilon())))\n    return (step, cache)", "docstring": "Uses RMSProp to compute step from gradients.\n\nArgs:\ngrads: numpy array of gradients.\ncache: numpy array of same shape as `grads` as RMSProp cache\ndecay_rate: How fast to decay cache\n\nReturns:\nA tuple of\nstep: numpy array of the same shape as `grads` giving the step.\nNote that this does not yet take the learning rate into account.\ncache: Updated RMSProp cache.", "source": "codesearchnet"}
{"code": "def _create_and_save_file_init_hash_table_qat_model_tf1(self, output_path: str, tags: Collection[str], signature_def_key: str) -> Tuple[Mapping[str, core.Tensor], Mapping[str, core.Tensor]]:\n    with session.Session(graph=ops.Graph()) as sess:\n        input_vocabs_placeholder, lookup_tensor, output_tensor = self._create_table_init_from_file_qat_model_tf1(sess)\n        inputs = {'input_vocabs': input_vocabs_placeholder}\n        outputs = {'lookup': lookup_tensor, 'output': output_tensor}\n        self._save_tf1_model(sess, output_path, signature_def_key, tags, inputs=inputs, outputs=outputs, init_op=lookup_ops.tables_initializer(), assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS))\n    return (inputs, outputs)", "docstring": "Creates and saves a QAT model that uses a file-initialized table.\n\nThe asset file \"vocab_file.txt\" is used to initialize a hash table.\n\nArgs:\noutput_path: Path to the directory to save the created model.\ntags: Set of strings that identifies the saved meta graph.\nsignature_def_key: Name of the SignatureDef. Used to identify the\nSignatureDef within the meta graph.\n\nReturns:\ninputs: A mapping of input_key -> input_tensor (placeholder). The input\nkey is \"input_vocabs\".\noutputs: A mapping of output_key -> output_tensor. The output keys are\n\"lookup\" and \"output\".", "source": "github-repos"}
{"code": "def get_string(self):\n        \n        return_string = None\n        if not self.mmc:\n            return \"\"\n        method = 'PDASTRING'\n        if method == 'PDASTRING':\n            stringgen = PdaString()\n            print '* Reduce PDA using DFA BFS (remove unreachable states):'\n            newpda = self.mmc.s\n            handle = IntersectionHandling()\n            newpda = handle.get(newpda, self.mmc.accepted)\n            reduce_b = ReducePDA()\n            newpda = reduce_b.get(newpda)\n            \n            \n            \n            print \"- Total PDA states after reduction are \" + repr(len(newpda))\n            return_string = stringgen.init(newpda, self.mmc.accepted)\n            if return_string is not None:\n                return_string = return_string[0]\n        elif method == 'PDACFGSTRING':\n\n            optimized = 1\n            dt1 = datetime.datetime.fromtimestamp(time.time())\n            print '* Initiating PDA simplification'\n            print ' - Total PDA states are ' + repr(len(self.mmc.s))\n            handle = IntersectionHandling()\n            newpda = handle.get(self.mmc.s, self.mmc.accepted)\n            newpda = self.mmc.s\n            simply = SimplifyStateIDs()\n            newpda, biggestid, newaccepted = simply.get(\n                newpda, self.mmc.accepted)\n            print ' - Total PDA states after id clearence are ' + repr(len(newpda))\n            replace = ReadReplace(newpda, biggestid)\n            newpda = replace.replace_read()\n            print ' - Total PDA states after read elimination are ' + repr(len(newpda))\n            maxstate = replace.nextstate() - 1\n            print '* Reduce PDA using DFA BFS (remove unreachable states):'\n            reduce_b = ReducePDA()\n            newpda = reduce_b.get(newpda)\n            print \"- Total PDA states after reduction are \" + repr(len(newpda))\n\n            dt2 = datetime.datetime.fromtimestamp(time.time())\n            rdelta = dateutil.relativedelta.relativedelta(dt2, dt1)\n            print \"* PDA was simplyfied in %d days, %d hours, %d minutes and %d seconds\" % (\n                rdelta.days, rdelta.hours, rdelta.minutes, rdelta.seconds)\n            dt1 = datetime.datetime.fromtimestamp(time.time())\n            print '* Initiating CNF from PDA generation'\n            cnfgenerator = PdaCnf(newpda, newaccepted)\n            dt2 = datetime.datetime.fromtimestamp(time.time())\n            rdelta = dateutil.relativedelta.relativedelta(dt2, dt1)\n            print \"* CNF was generated in %d days, %d hours, %d minutes and %d seconds\" % (\n                rdelta.days, rdelta.hours, rdelta.minutes, rdelta.seconds)\n            dt1 = datetime.datetime.fromtimestamp(time.time())\n            print '* Initiating string from CFG generation'\n            grammar = cnfgenerator.get_rules(optimized)\n            print ' - Total grammar rules are ' + repr(len(grammar))\n            gen = CFGGenerator(CNFGenerator(grammar),\n                               optimized=optimized,\n                               splitstring=0,\n                               maxstate=maxstate)\n            return_string = gen.generate()\n            dt2 = datetime.datetime.fromtimestamp(time.time())\n            rdelta = dateutil.relativedelta.relativedelta(dt2, dt1)\n            print \"* A string was generated in %d days, %d hours, %d minutes and %d seconds\" % (\n                rdelta.days, rdelta.hours, rdelta.minutes, rdelta.seconds)\n\n            print return_string\n        else:\n            return_string = None\n        return return_string", "docstring": "Returns a string from the Diff resutl.\nDepending on the method, either the string will\nbe generated directly from the PDA using the state\nremoval method, or the PDA will be first translated to\na CFG and then a string will be generated from the CFG\nArgs:\nNone\nReturns:\nA string from the Diff", "source": "juraj-google-style"}
{"code": "def load(cls, fh):\n    dat = fh.read()\n    try:\n        ret = cls.from_json(dat)\n    except:\n        ret = cls.from_yaml(dat)\n    return ret", "docstring": "Load json or yaml data from file handle.\n\nArgs:\nfh (file): File handle to load from.\n\nExamlple:\n>>> with open('data.json', 'r') as json:\n>>>    jsdata = composite.load(json)\n>>>\n>>> with open('data.yml', 'r') as yml:\n>>>    ymldata = composite.load(yml)", "source": "codesearchnet"}
{"code": "def check(self, dsm, **kwargs):\n    layered_architecture = True\n    messages = []\n    categories = dsm.categories\n    dsm_size = dsm.size[0]\n    if (not categories):\n        categories = (['appmodule'] * dsm_size)\n    for i in range(0, (dsm_size - 1)):\n        for j in range((i + 1), dsm_size):\n            if ((categories[i] != 'broker') and (categories[j] != 'broker') and (dsm.entities[i].split('.')[0] != dsm.entities[j].split('.')[0])):\n                if (dsm.data[i][j] > 0):\n                    layered_architecture = False\n                    messages.append(('Dependency from %s to %s breaks the layered architecture.' % (dsm.entities[i], dsm.entities[j])))\n    return (layered_architecture, '\\n'.join(messages))", "docstring": "Check layered architecture.\n\nArgs:\ndsm (:class:`DesignStructureMatrix`): the DSM to check.\n\nReturns:\nbool, str: True if layered architecture else False, messages", "source": "codesearchnet"}
{"code": "def _process_output_source_directive(schema, current_schema_type, ast, location, context, local_unique_directives):\n    output_source_directive = local_unique_directives.get('output_source', None)\n    if output_source_directive:\n        if has_encountered_output_source(context):\n            raise GraphQLCompilationError(u'Cannot have more than one output source!')\n        if is_in_optional_scope(context):\n            raise GraphQLCompilationError(u'Cannot have the output source in an optional block!')\n        set_output_source_data(context, location)\n        return blocks.OutputSource()\n    else:\n        return None", "docstring": "Process the output_source directive, modifying the context as appropriate.\n\nArgs:\nschema: GraphQL schema object, obtained from the graphql library\ncurrent_schema_type: GraphQLType, the schema type at the current location\nast: GraphQL AST node, obtained from the graphql library\nlocation: Location object representing the current location in the query\ncontext: dict, various per-compilation data (e.g. declared tags, whether the current block\nis optional, etc.). May be mutated in-place in this function!\nlocal_unique_directives: dict, directive name string -> directive object, containing\nunique directives present on the current AST node *only*\n\nReturns:\nan OutputSource block, if one should be emitted, or None otherwise", "source": "codesearchnet"}
{"code": "def facade(projectmainfn, **kwargs):\n    \n    \n\n    \n    \n    \n    site_url = Configuration._create(**kwargs)\n\n    logger.info('--------------------------------------------------')\n    logger.info('> Using HDX Python API Library %s' % Configuration.apiversion)\n    logger.info('> HDX Site: %s' % site_url)\n\n    UserAgent.user_agent = Configuration.read().user_agent\n\n    projectmainfn()", "docstring": "Facade to simplify project setup that calls project main function\n\nArgs:\nprojectmainfn ((None) -> None): main function of project\n**kwargs: configuration parameters to pass to HDX Configuration class\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def hub_retry(max_attempts: int=5, wait_before_retry: Optional[float]=2):\n\n    def decorator(test_func_ref):\n\n        @functools.wraps(test_func_ref)\n        def wrapper(*args, **kwargs):\n            retry_count = 1\n            while retry_count < max_attempts:\n                try:\n                    return test_func_ref(*args, **kwargs)\n                except (requests.exceptions.ConnectionError, requests.exceptions.Timeout, requests.exceptions.ReadTimeout, requests.exceptions.HTTPError, requests.exceptions.RequestException) as err:\n                    logger.error(f\"Test failed with {err} at try {retry_count}/{max_attempts} as it couldn't connect to the specified Hub repository.\")\n                    if wait_before_retry is not None:\n                        time.sleep(wait_before_retry)\n                    retry_count += 1\n            return test_func_ref(*args, **kwargs)\n        return wrapper\n    return decorator", "docstring": "To decorate tests that download from the Hub. They can fail due to a\nvariety of network issues such as timeouts, connection resets, etc.\n\nArgs:\nmax_attempts (`int`, *optional*, defaults to 5):\nThe maximum number of attempts to retry the flaky test.\nwait_before_retry (`float`, *optional*, defaults to 2):\nIf provided, will wait that number of seconds before retrying the test.", "source": "github-repos"}
{"code": "def Feed(self, size=512):\n    \n    data = self.file_object.read(size)\n    Lexer.Feed(self, data)\n    return len(data)", "docstring": "Feed data into the buffer.\n\nArgs:\nsize: optional data size to read form the file-like object.", "source": "juraj-google-style"}
{"code": "def _create_security_group(self, ingress):\n    template_kwargs = {'app': self.app_name, 'env': self.env, 'region': self.region, 'vpc': get_vpc_id(self.env, self.region), 'description': self.properties['security_group']['description'], 'ingress': ingress}\n    secgroup_json = get_template(template_file='infrastructure/securitygroup_data.json.j2', formats=self.generated, **template_kwargs)\n    wait_for_task(secgroup_json)\n    return True", "docstring": "Send a POST to spinnaker to create a new security group.\n\nReturns:\nboolean: True if created successfully", "source": "codesearchnet"}
{"code": "def get_measurements(region, core_info, data, extra_offset=0):\n    \n    measurements = []\n    clean_core_info = [x for x in core_info if x]\n    cores = len(clean_core_info)\n    for k in data:\n        if k not in [\"1\", \"Region Info\", \"Event\", \"Metric\", \"CPU clock\"]:\n            slot = data[k]\n            for i in range(cores):\n                core = core_info[i]\n                idx = extra_offset + i\n                if core and slot[idx]:\n                    measurements.append((region, k, core, slot[idx]))\n\n    return measurements", "docstring": "Get the complete measurement info from likwid's region info.\n\nArgs:\nregion: The region we took a measurement in.\ncore_info: The core information.\ndata: The raw data.\nextra_offset (int): default = 0\n\nReturns (list((region, metric, core, value))):\nA list of measurement tuples, a tuple contains the information about\nthe region, the metric, the core and the actual value.", "source": "juraj-google-style"}
{"code": "def _GetValueAsObject(self, property_value):\n    if (property_value.type == pyolecf.value_types.BOOLEAN):\n        return property_value.data_as_boolean\n    if (property_value.type in self._INTEGER_TYPES):\n        return property_value.data_as_integer\n    if (property_value.type in self._STRING_TYPES):\n        return property_value.data_as_string\n    try:\n        data = property_value.data\n    except IOError:\n        data = None\n    return data", "docstring": "Retrieves the property value as a Python object.\n\nArgs:\nproperty_value (pyolecf.property_value): OLECF property value.\n\nReturns:\nobject: property value as a Python object.", "source": "codesearchnet"}
{"code": "def clear_config(clear_constants=False):\n    _set_config_is_locked(False)\n    _CONFIG.clear()\n    _SINGLETONS.clear()\n    if clear_constants:\n        _CONSTANTS.clear()\n    else:\n        saved_constants = _CONSTANTS.copy()\n        _CONSTANTS.clear()\n        for (name, value) in six.iteritems(saved_constants):\n            constant(name, value)\n    _IMPORTED_MODULES.clear()\n    _OPERATIVE_CONFIG.clear()", "docstring": "Clears the global configuration.\n\nThis clears any parameter values set by `bind_parameter` or `parse_config`, as\nwell as the set of dynamically imported modules. It does not remove any\nconfigurable functions or classes from the registry of configurables.\n\nArgs:\nclear_constants: Whether to clear constants created by `constant`. Defaults\nto False.", "source": "codesearchnet"}
{"code": "def restrict_bond_dict(self, bond_dict):\n    return {j: (bond_dict[j] & set(self.index)) for j in self.index}", "docstring": "Restrict a bond dictionary to self.\n\nArgs:\nbond_dict (dict): Look into :meth:`~chemcoord.Cartesian.get_bonds`,\nto see examples for a bond_dict.\n\nReturns:\nbond dictionary", "source": "codesearchnet"}
{"code": "def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):\n    shape = input_tensor.get_shape().as_list()\n    if ((shape[1] is None) or (shape[2] is None)):\n        kernel_size_out = kernel_size\n    else:\n        kernel_size_out = [min(shape[1], kernel_size[0]), min(shape[2], kernel_size[1])]\n    return kernel_size_out", "docstring": "Define kernel size which is automatically reduced for small input.\n\nIf the shape of the input images is unknown at graph construction time this\nfunction assumes that the input images are is large enough.\n\nArgs:\ninput_tensor: input tensor of size [batch_size, height, width, channels].\nkernel_size: desired kernel size of length 2: [kernel_height, kernel_width]\n\nReturns:\na tensor with the kernel size.\n\nTODO(jrru): Make this function work with unknown shapes. Theoretically, this\ncan be done with the code below. Problems are two-fold: (1) If the shape was\nknown, it will be lost. (2) inception.slim.ops._two_element_tuple cannot\nhandle tensors that define the kernel size.\nshape = tf.shape(input_tensor)\nreturn = tf.stack([tf.minimum(shape[1], kernel_size[0]),\ntf.minimum(shape[2], kernel_size[1])])", "source": "codesearchnet"}
{"code": "def default_if_empty(self, default):\n    if self.closed():\n        raise ValueError('Attempt to call default_if_empty() on a closed Queryable.')\n    return self._create(self._generate_default_if_empty_result(default))", "docstring": "If the source sequence is empty return a single element sequence\ncontaining the supplied default value, otherwise return the source\nsequence unchanged.\n\nNote: This method uses deferred execution.\n\nArgs:\ndefault: The element to be returned if the source sequence is empty.\n\nReturns:\nThe source sequence, or if the source sequence is empty an sequence\ncontaining a single element with the supplied default value.\n\nRaises:\nValueError: If the Queryable has been closed.", "source": "codesearchnet"}
{"code": "def build_from_file(self, path: str | None) -> imports_map.ImportsMap | None:\n    if not path:\n        return None\n    items = self._read_from_file(path)\n    return self.build_from_items(items)", "docstring": "Create an ImportsMap from a .imports_info file.\n\nBuilds a dict of short_path to full name\n(e.g. \"path/to/file.py\" =>\n\"$GENDIR/rulename~~pytype-gen/path_to_file.py~~pytype\"\nArgs:\npath: The file with the info (may be None, for do-nothing)\n\nReturns:\nDict of .py short_path to list of .pytd path or None if no path", "source": "github-repos"}
{"code": "def objects_ids_and_slot_variables_and_paths(graph_view, skip_slot_variables=False):\n    trackable_objects, node_paths = graph_view.breadth_first_traversal()\n    object_names = object_identity.ObjectIdentityDictionary()\n    for obj, path in node_paths.items():\n        object_names[obj] = trackable_utils.object_path_to_string(path)\n    node_ids = object_identity.ObjectIdentityDictionary()\n    for node_id, node in enumerate(trackable_objects):\n        node_ids[node] = node_id\n    if skip_slot_variables:\n        slot_variables = object_identity.ObjectIdentityDictionary()\n    else:\n        slot_variables = serialize_slot_variables(trackable_objects=trackable_objects, node_ids=node_ids, object_names=object_names)\n    return (trackable_objects, node_paths, node_ids, slot_variables, object_names)", "docstring": "Traverse the object graph and list all accessible objects.\n\nLooks for `Trackable` objects which are dependencies of\n`root_trackable`. Includes slot variables only if the variable they are\nslotting for and the optimizer are dependencies of `root_trackable`\n(i.e. if they would be saved with a checkpoint).\n\nArgs:\ngraph_view: A GraphView object.\nskip_slot_variables: If True does not return trackables for slot variable.\nDefault False.\n\nReturns:\nA tuple of (trackable objects, paths from root for each object,\nobject -> node id, slot variables, object_names)", "source": "github-repos"}
{"code": "def default_logger(name):\n    logger = logging.getLogger(name)\n    logger_handler = logging.StreamHandler()\n    formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')\n    logger_handler.setFormatter(formatter)\n    logger.addHandler(logger_handler)\n    return logger", "docstring": "Return a toplevel logger.\n\nThis should be used only in the toplevel file.\nFiles deeper in the hierarchy should use\n``logger = logging.getLogger(__name__)``,\nin order to considered as children of the toplevel logger.\n\nBeware that without a setLevel() somewhere,\nthe default value (warning) will be used, so no debug message will be shown.\n\nArgs:\nname (str): usually `__name__` in the package toplevel __init__.py, or\n`__file__` in a script file\n(because __name__ would be \"__main__\" in this case).", "source": "codesearchnet"}
{"code": "def body(self, body):\n    self._request.body = body\n    self.add_matcher(matcher('BodyMatcher', body))", "docstring": "Defines the body data to match.\n\n``body`` argument can be a ``str``, ``binary`` or a regular expression.\n\nArguments:\nbody (str|binary|regex): body data to match.\n\nReturns:\nself: current Mock instance.", "source": "codesearchnet"}
{"code": "def get_minimizer_options(method):\n    \n    if method == 'Powell':\n        return {'patience': 2,\n                'patience_line_search': None,\n                'reset_method': 'EXTRAPOLATED_POINT'}\n\n    elif method == 'Nelder-Mead':\n        return {'patience': 200,\n                'alpha': 1.0, 'beta': 0.5, 'gamma': 2.0, 'delta': 0.5, 'scale': 0.1,\n                'adaptive_scales': True}\n\n    elif method == 'Levenberg-Marquardt':\n        return {'patience': 250, 'step_bound': 100.0, 'scale_diag': 1, 'usertol_mult': 30}\n\n    elif method == 'Subplex':\n        return {'patience': 10,\n                'patience_nmsimplex': 100,\n                'alpha': 1.0, 'beta': 0.5, 'gamma': 2.0, 'delta': 0.5, 'scale': 1.0, 'psi': 0.0001, 'omega': 0.01,\n                'adaptive_scales': True,\n                'min_subspace_length': 'auto',\n                'max_subspace_length': 'auto'}\n\n    raise ValueError('Could not find the specified method \"{}\".'.format(method))", "docstring": "Return a dictionary with the default options for the given minimization method.\n\nArgs:\nmethod (str): the name of the method we want the options off\n\nReturns:\ndict: a dictionary with the default options", "source": "juraj-google-style"}
{"code": "def send_state_event(self, event_type, content, state_key=''):\n    return self.client.api.send_state_event(self.room_id, event_type, content, state_key)", "docstring": "Send a state event to the room.\n\nArgs:\nevent_type (str): The type of event that you are sending.\ncontent (): An object with the content of the message.\nstate_key (str, optional): A unique key to identify the state.", "source": "codesearchnet"}
{"code": "def _get_by_name(self, feed_item):\n    key = ''\n    if self._search_field:\n        key = feed_item[self._search_field].strip()\n        search_string = feed_item[self._search_field].strip()\n        args = self._get_base_search_args(search_string)\n        if self._parent_filter_name:\n            if feed_item.get(self._parent_filter_field_name, None):\n                args[self._parent_filter_name] = feed_item.get(self._parent_filter_field_name, None)\n            elif self._parent_dao:\n                parent = self._parent_dao.get(feed_item, required=True)\n                if parent:\n                    args[self._parent_filter_name] = parent.get('id', None)\n            key = str(args.get(self._parent_filter_name, '')) + key\n        print('hitting the api to search for %s, %s' % (self._entity, search_string))\n        search_result = self._api().list(**args).execute()\n        items = search_result[self._list_name]\n        if items and len(items) > 0:\n            item = items[0]\n            if search_string == item['name']:\n                if len(items) > 1 and items[1]['name'] == search_string:\n                    raise Exception('ERROR: More than one item found with %s %s' % (self._search_field, feed_item[self._search_field]))\n                else:\n                    return (item, key)\n    return (None, key)", "docstring": "Searches CM for an item of name defined in the search field of the DAO class.\n\nIf more than one item is returned an error is raised, e.g. if there are more\nthan one item with the same name.\n\nArgs:\nfeed_item: The Bulkdozer feed item with the name to search for.\n\nReturns:\nIf found, the CM entity object that matches the search string.", "source": "github-repos"}
{"code": "def appliance_device_snmp_v3_users(self):\n    if (not self.__appliance_device_snmp_v3_users):\n        self.__appliance_device_snmp_v3_users = ApplianceDeviceSNMPv3Users(self.__connection)\n    return self.__appliance_device_snmp_v3_users", "docstring": "Gets the ApplianceDeviceSNMPv3Users API client.\n\nReturns:\nApplianceDeviceSNMPv3Users:", "source": "codesearchnet"}
{"code": "def get_ignored_files(self):\n    return [os.path.join(self.path, p) for p in self.run('ls-files', '--ignored', '--exclude-standard', '--others').strip().split()]", "docstring": "Returns the list of files being ignored in this repository.\n\nNote that file names, not directories, are returned.\n\nSo, we will get the following:\n\na/b.txt\na/c.txt\n\ninstead of just:\n\na/\n\nReturns:\nList[str] - list of ignored files. The paths are absolute.", "source": "codesearchnet"}
{"code": "def MakeZip(self, xar_file, output_file):\n    logging.info('Generating zip template file at %s', output_file)\n    with zipfile.ZipFile(output_file, mode='a') as zf:\n        build_yaml = io.BytesIO()\n        self.WriteBuildYaml(build_yaml)\n        build_yaml.seek(0)\n        zf.writestr('build.yaml', build_yaml.read())", "docstring": "Add a zip to the end of the .xar containing build.yaml.\n\nThe build.yaml is already inside the .xar file, but we can't easily open\nthis on linux. To make repacking easier we add a zip to the end of the .xar\nand add in the build.yaml. The repack step will then look at the build.yaml\nand insert the config.yaml. We end up storing the build.yaml twice but it is\ntiny, so this doesn't matter.\n\nArgs:\nxar_file: the name of the xar file.\noutput_file: the name of the output ZIP archive.", "source": "codesearchnet"}
{"code": "def IsDecltype(clean_lines, linenum, column):\n  \n  (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)\n  if start_col < 0:\n    return False\n  if Search(r'\\bdecltype\\s*$', text[0:start_col]):\n    return True\n  return False", "docstring": "Check if the token ending on (linenum, column) is decltype().\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: the number of the line to check.\ncolumn: end column of the token to check.\nReturns:\nTrue if this token is decltype() expression, False otherwise.", "source": "juraj-google-style"}
{"code": "def bsp_new_with_size(x: int, y: int, w: int, h: int) -> tcod.bsp.BSP:\n    return Bsp(x, y, w, h)", "docstring": "Create a new BSP instance with the given rectangle.\n\nArgs:\nx (int): Rectangle left coordinate.\ny (int): Rectangle top coordinate.\nw (int): Rectangle width.\nh (int): Rectangle height.\n\nReturns:\nBSP: A new BSP instance.\n\n.. deprecated:: 2.0\nCall the :any:`BSP` class instead.", "source": "codesearchnet"}
{"code": "def addColumn(self, columnName, dtype, defaultValue):\n        \n        model = self.tableView.model()\n\n        if model is not None:\n            model.addDataFrameColumn(columnName, dtype, defaultValue)\n\n        self.addColumnButton.setChecked(False)", "docstring": "Adds a column with the given parameters to the underlying model\n\nThis method is also a slot.\nIf no model is set, nothing happens.\n\nArgs:\ncolumnName (str): The name of the new column.\ndtype (numpy.dtype): The datatype of the new column.\ndefaultValue (object): Fill the column with this value.", "source": "juraj-google-style"}
{"code": "def send_put(self, mri, attribute_name, value):\n        \n        path = attribute_name + \".value\"\n        typ, value = convert_to_type_tuple_value(serialize_object(value))\n        if isinstance(typ, tuple):\n            \n            _, typeid, fields = typ\n            value = Value(Type(fields, typeid), value)\n        try:\n            self._ctxt.put(mri, {path: value}, path)\n        except RemoteError:\n            if attribute_name == \"exports\":\n                \n                \n                \n                self._queues[mri].get(timeout=DEFAULT_TIMEOUT)\n            else:\n                \n                raise", "docstring": "Abstract method to dispatch a Put to the server\n\nArgs:\nmri (str): The mri of the Block\nattribute_name (str): The name of the Attribute within the Block\nvalue: The value to put", "source": "juraj-google-style"}
{"code": "def route(self, method, pattern):\n\n    def decorator(callback):\n        self._router.add(method, pattern, callback)\n        return callback\n    return decorator", "docstring": "Decorator to add route for a request with any HTTP method.\n\nArguments:\nmethod (str): HTTP method name, e.g. GET, POST, etc.\npattern (str): Routing pattern the path must match.\n\nReturns:\nfunction: Decorator function to add route.", "source": "codesearchnet"}
{"code": "def get_service_state_object_id(subsystem: str, name: str, version: str) -> str:\n    return '{}:{}:{}'.format(subsystem, name, version)", "docstring": "Return service state data object key.\n\nArgs:\nsubsystem (str): Subsystem the service belongs to\nname (str): Name of the Service\nversion (str): Version of the Service\n\nReturns:\nstr, Key used to store the service state data object", "source": "codesearchnet"}
{"code": "def integer_key_convert(dictin, dropfailedkeys=False):\n    return key_value_convert(dictin, keyfn=int, dropfailedkeys=dropfailedkeys)", "docstring": "Convert keys of dictionary to integers\n\nArgs:\ndictin (DictUpperBound): Input dictionary\ndropfailedkeys (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False.\n\nReturns:\nDict: Dictionary with keys converted to integers", "source": "codesearchnet"}
{"code": "def ParseCode(unformatted_source, filename='<unknown>'):\n    if not unformatted_source.endswith(os.linesep):\n        unformatted_source += os.linesep\n    try:\n        ast_tree = ast.parse(unformatted_source, filename)\n        ast.fix_missing_locations(ast_tree)\n        readline = StringIO(unformatted_source).readline\n        tokens = tokenize.generate_tokens(readline)\n    except Exception:\n        raise\n    logical_lines = _CreateLogicalLines(tokens)\n    split_penalty_visitor.SplitPenalty(logical_lines).visit(ast_tree)\n    return logical_lines", "docstring": "Parse a string of Python code into logical lines.\n\nThis provides an alternative entry point to YAPF.\n\nArguments:\nunformatted_source: (unicode) The code to format.\nfilename: (unicode) The name of the file being reformatted.\n\nReturns:\nA list of LogicalLines.\n\nRaises:\nAn exception is raised if there's an error during AST parsing.", "source": "github-repos"}
{"code": "def get_resources(minify=False):\n    all_resources = dict()\n    subclasses = (resource_base.ResourceBase.__subclasses__() + resource_definitions.ResourceAngular.__subclasses__())\n    for resource in subclasses:\n        obj = resource(minify)\n        all_resources[resource.RESOURCE_NAME] = dict(css=tuple(obj.resources_css), js=tuple(obj.resources_js))\n    return all_resources", "docstring": "Find all resources which subclass ResourceBase.\n\nKeyword arguments:\nminify -- select minified resources if available.\n\nReturns:\nDictionary of available resources. Keys are resource names (part of the config variable names), values are dicts\nwith css and js keys, and tuples of resources as values.", "source": "codesearchnet"}
{"code": "def db_dict(c):\n    db_d = {}\n    c.execute('SELECT * FROM library_spectra')\n    db_d['library_spectra'] = [list(row) for row in c]\n    c.execute('SELECT * FROM library_spectra_meta')\n    db_d['library_spectra_meta'] = [list(row) for row in c]\n    c.execute('SELECT * FROM library_spectra_annotation')\n    db_d['library_spectra_annotations'] = [list(row) for row in c]\n    c.execute('SELECT * FROM library_spectra_source')\n    db_d['library_spectra_source'] = [list(row) for row in c]\n    c.execute('SELECT * FROM metab_compound')\n    db_d['metab_compound'] = [list(row) for row in c]\n    return db_d", "docstring": "Get a dictionary of the library spectra from a database\n\nExample:\n>>> from msp2db.db import get_connection\n>>> conn = get_connection('sqlite', 'library.db')\n>>> test_db_d = db_dict(conn.cursor())\n\nIf using a large database the resulting dictionary will be very large!\n\nArgs:\nc (cursor): SQL database connection cursor\n\nReturns:\nA dictionary with the following keys 'library_spectra', 'library_spectra_meta', 'library_spectra_annotations',\n'library_spectra_source' and 'metab_compound'. Where corresponding values for each key are list of list containing\nall the rows in the database.", "source": "codesearchnet"}
{"code": "def show(self, view: View, request: Request):\n        \n        return view.render('welcome', {\n            'app': request.app().make('Application')\n        })", "docstring": "Show the welcome page.\n\nArguments:\nview {masonite.view.View} -- The Masonite view class.\nApplication {config.application} -- The application config module.\n\nReturns:\nmasonite.view.View -- The Masonite view class.", "source": "juraj-google-style"}
{"code": "def _convert_values_and_partition(cls, values, row_partition, name):\n    if not isinstance(row_partition, RowPartition):\n        raise TypeError(f'Argument `row_partition` must be a RowPartition. Received {row_partition}.')\n    if isinstance(values, RaggedTensor):\n        if values._row_partition.dtype != row_partition.dtype:\n            if not ragged_config.auto_cast_partition_dtype():\n                raise ValueError(f'Argument `row_partition` of RaggedTensor with name: {name} must have same dtype as Argument `values`. ({row_partition.dtype} vs. {values._row_partition.dtype}).')\n            values = values.with_row_splits_dtype(row_partition.dtype)\n    else:\n        values = _convert_to_ragged_tensor_values(values)\n    return (values, row_partition)", "docstring": "Converts `values` and `partition` to Tensors.\n\nIf `values` is a `RaggedTensor`, then converts `values` and `partition`\nto have compatible row-partitioning dtypes.  In particular, if any of the\nrow partitioning tensors are `int64`, then all of the other row\npartitioning tensors will be cast to `int64` (if auto_cast_partition_dtype()\nis true) or an error will be raised (if auto_cast_partition_dtype() is\nfalse).\n\nArgs:\nvalues: The `values` for the `RaggedTensor` being constructed.\nrow_partition: A RowPartition object for the `RaggedTensor` being\nconstructed.\nname: The name of the RowPartition object.\n\nReturns:\nA tuple (values, partition).", "source": "github-repos"}
{"code": "def wait_for_transform_job(self, job, poll=5):\n    desc = _wait_until((lambda : _transform_job_status(self.sagemaker_client, job)), poll)\n    self._check_job_status(job, desc, 'TransformJobStatus')\n    return desc", "docstring": "Wait for an Amazon SageMaker transform job to complete.\n\nArgs:\njob (str): Name of the transform job to wait for.\npoll (int): Polling interval in seconds (default: 5).\n\nReturns:\n(dict): Return value from the ``DescribeTransformJob`` API.\n\nRaises:\nValueError: If the transform job fails.", "source": "codesearchnet"}
{"code": "def color(self, color):\n        \n        self._data['color'] = color\n        request = self._base_request\n        request['color'] = color\n        return self._tc_requests.update(request, owner=self.owner)", "docstring": "Updates the security labels color.\n\nArgs:\ncolor:", "source": "juraj-google-style"}
{"code": "def url(self, url):\n    if (url and url.endswith('/')):\n        url = url[:(- 1)]\n    self._url = url", "docstring": "Set API URL endpoint\n\nArgs:\nurl: the url of the API endpoint", "source": "codesearchnet"}
{"code": "def generate_version(max_major: int = 1, max_minor: int = 7,\n                     max_patch: int = 15) -> str:\n    \n    major = randint(0, max_major)\n    minor = randint(0, max_minor)\n    patch = randint(0, max_patch)\n    return '{:d}.{:d}.{:d}'.format(major, minor, patch)", "docstring": "Select a random version.\n\nArgs:\nmax_major (int, optional) maximum major version\nmax_minor (int, optional) maximum minor version\nmax_patch (int, optional) maximum patch version\n\nReturns:\nstr, Version String", "source": "juraj-google-style"}
{"code": "def EnqueueBreakpointUpdate(self, breakpoint):\n    with self._transmission_thread_startup_lock:\n        if (self._transmission_thread is None):\n            self._transmission_thread = threading.Thread(target=self._TransmissionThreadProc)\n            self._transmission_thread.name = 'Cloud Debugger transmission thread'\n            self._transmission_thread.daemon = True\n            self._transmission_thread.start()\n    self._transmission_queue.append((breakpoint, 0))\n    self._new_updates.set()", "docstring": "Asynchronously updates the specified breakpoint on the backend.\n\nThis function returns immediately. The worker thread is actually doing\nall the work. The worker thread is responsible to retry the transmission\nin case of transient errors.\n\nArgs:\nbreakpoint: breakpoint in either final or non-final state.", "source": "codesearchnet"}
{"code": "def cmd_ssh(options):\n    \n    import os\n    import subprocess\n    from os.path import expanduser\n    options.inst_state = \"running\"\n    (i_info, param_str) = gather_data(options)\n    (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command)\n    home_dir = expanduser(\"~\")\n    if options.user is None:\n        tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami'])\n        options.user = cmd_ssh_user(tar_aminame,\n                                    i_info[tar_idx]['tag']['Name'])\n    else:\n        debg.dprint(\"LoginUser set by user: \", options.user)\n    os_spec = {\"nt\": [\"powershell plink\", \"\\\\\", \"ppk\"]}\n    c_itm = os_spec.get(os.name, [\"ssh\", \"/\", \"pem\"])\n    cmd_ssh_run = c_itm[0]\n    if not options.nopem:\n        cmd_ssh_run += (\" -i {0}{1}.aws{1}{2}.{3}\".\n                        format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'],\n                               c_itm[2]))\n    else:\n        debg.dprint(\"Connect string: \", \"ssh {}@{}\".\n                    format(options.user, i_info[tar_idx]['pub_dns_name']))\n    cmd_ssh_run += \" {0}@{1}\".format(options.user,\n                                     i_info[tar_idx]['pub_dns_name'])\n    print(cmd_ssh_run)\n    subprocess.call(cmd_ssh_run, shell=True)", "docstring": "Connect to the specified instance via ssh.\n\nFinds instances that match the user specified args that are also\nin the 'running' state.  The target instance is determined, the\nrequired connection information is retreived (IP, key and ssh\nuser-name), then an 'ssh' connection is made to the instance.\n\nArgs:\noptions (object): contains args and data from parser", "source": "juraj-google-style"}
{"code": "def GetUserById(self, local_id):\n    \n    user = self.rpc_helper.GetAccountInfoById(local_id)\n    return GitkitUser.FromApiResponse(user)", "docstring": "Gets user info by id.\n\nArgs:\nlocal_id: string, the user id at Gitkit server.\n\nReturns:\nGitkitUser, containing the user info.", "source": "juraj-google-style"}
{"code": "def create_constructor_args(cls, proto_list: List[american_option_pb2.AmericanEquityOption], config: AmericanOptionConfig=None) -> Dict[str, Any]:\n    am_option_data = proto_utils.from_protos(proto_list, config)\n    res = {}\n    for key in am_option_data:\n        tensor_repr = proto_utils.tensor_repr(am_option_data[key])\n        res[key] = tensor_repr\n    return res", "docstring": "Creates a dictionary to initialize AmericanEquityOption.\n\nThe output dictionary is such that the instruments can be initialized\nas follows:\n```\ninitializer = create_constructor_args(proto_list, config)\namerican_options = [AmericanEquityOption(**data)\nfor data in initializer.values()]\n```\n\nThe keys of the output dictionary are unique identifiers of the batched\ninstruments. This is useful for identifying an existing graph that could be\nreused for the instruments without the need of rebuilding the graph.\n\nArgs:\nproto_list: A list of protos for which the initialization arguments are\nconstructed.\nconfig: An instance of `AmericanOptionConfig`.\n\nReturns:\nA possibly nested dictionary such that each value provides initialization\narguments for the AmericanEquityOption.", "source": "github-repos"}
{"code": "def is_initialised(self):\n    if (not self.lattice):\n        raise AttributeError('Running a simulation needs the lattice to be initialised')\n    if (not self.atoms):\n        raise AttributeError('Running a simulation needs the atoms to be initialised')\n    if ((not self.number_of_jumps) and (not self.for_time)):\n        raise AttributeError('Running a simulation needs number_of_jumps or for_time to be set')", "docstring": "Check whether the simulation has been initialised.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _set_all_lims(self, which, lim, d, scale, fontsize=None):\n    setattr(self.general, (which + 'lims'), lim)\n    setattr(self.general, ('d' + which), d)\n    setattr(self.general, (which + 'scale'), scale)\n    if (fontsize is not None):\n        setattr(self.general, (which + '_tick_label_fontsize'), fontsize)\n    return", "docstring": "Set limits and ticks for an axis for whole figure.\n\nThis will set axis limits and tick marks for the entire figure.\nIt can be overridden in the SinglePlot class.\n\nArgs:\nwhich (str): The indicator of which part of the plots\nto adjust. This currently handles `x` and `y`.\nlim (len-2 list of floats): The limits for the axis.\nd (float): Amount to increment by between the limits.\nscale (str): Scale of the axis. Either `log` or `lin`.\nfontsize (int, optional): Set fontsize for associated axis tick marks.\nDefault is None.", "source": "codesearchnet"}
{"code": "def vflip(img):\n    if (not _is_pil_image(img)):\n        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n    return img.transpose(Image.FLIP_TOP_BOTTOM)", "docstring": "Vertically flip the given PIL Image.\n\nArgs:\nimg (PIL Image): Image to be flipped.\n\nReturns:\nPIL Image:  Vertically flipped image.", "source": "codesearchnet"}
{"code": "def _resolve_credential(self, credential):\n    if self._credentials_found_in_instance:\n        return\n    elif self._credentials_found_in_envars():\n        return os.getenv(('PAN_' + credential.upper()))\n    else:\n        return self.storage.fetch_credential(credential=credential, profile=self.profile)", "docstring": "Resolve credential from envars or credentials store.\n\nArgs:\ncredential (str): Credential to resolve.\n\nReturns:\nstr or None: Resolved credential or ``None``.", "source": "codesearchnet"}
{"code": "def _get_data_by_field(self, field_number):\n        \n        if not self.is_data_loaded:\n            self._import_data()\n\n        \n        if not 0 <= field_number < self._num_of_fields:\n            raise ValueError(\"Field number should be between 0-%d\" % self._num_of_fields)\n\n        return self._data[field_number]", "docstring": "Return a data field by field number.\n\nThis is a useful method to get the values for fields that Ladybug\ncurrently doesn't import by default. You can find list of fields by typing\nEPWFields.fields\n\nArgs:\nfield_number: a value between 0 to 34 for different available epw fields.\n\nReturns:\nAn annual Ladybug list", "source": "juraj-google-style"}
{"code": "def add_cohp_dict(self, cohp_dict, key_sort_func=None):\n        \n        if key_sort_func:\n            keys = sorted(cohp_dict.keys(), key=key_sort_func)\n        else:\n            keys = cohp_dict.keys()\n        for label in keys:\n            self.add_cohp(label, cohp_dict[label])", "docstring": "Adds a dictionary of COHPs with an optional sorting function\nfor the keys.\n\nArgs:\ncohp_dict: dict of the form {label: Cohp}\n\nkey_sort_func: function used to sort the cohp_dict keys.", "source": "juraj-google-style"}
{"code": "def get_files_in_branch(profile, branch_sha):\n    \n    tree_sha = get_commit_tree(profile, branch_sha)\n    files = get_files_in_tree(profile, tree_sha)\n    tree = [prepare(x) for x in files]\n    return tree", "docstring": "Get all files in a branch's tree.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nbranch_sha\nThe SHA a branch's HEAD points to.\n\nReturns:\nA list of dicts containing info about each blob in the tree.", "source": "juraj-google-style"}
{"code": "def __init__(self, url, conn=None, user=None, password=None, verify=True,\n                 proxies=None):\n        \n        if conn and (user or password):\n            raise InvalidArgumentsError(\"A connection and user/password may\"\n                                        \" not both be provided.\")\n        elif conn:\n            self._conn = conn\n        else:\n            self._conn = _HTTPConnection(user, password, verify, proxies)\n\n        \n        \n        if url[-1] == \"/\":\n            self.url = url\n        else:\n            self.url = url + \"/\"", "docstring": "Create a TAXII endpoint.\n\nArgs:\nuser (str): username for authentication (optional)\npassword (str): password for authentication (optional)\nverify (bool): validate the entity credentials (default: True)\nconn (_HTTPConnection): A connection to reuse (optional)\nproxies (dict): key/value pair for http/https proxy settings.\n(optional)", "source": "juraj-google-style"}
{"code": "def DeserializeUnsignedWithoutType(self, reader):\n        \n        self.Version = reader.ReadByte()\n        self.DeserializeExclusiveData(reader)\n        self.Attributes = reader.ReadSerializableArray('neo.Core.TX.TransactionAttribute.TransactionAttribute',\n                                                       max=self.MAX_TX_ATTRIBUTES)\n        self.inputs = reader.ReadSerializableArray('neo.Core.CoinReference.CoinReference')\n        self.outputs = reader.ReadSerializableArray('neo.Core.TX.Transaction.TransactionOutput')", "docstring": "Deserialize object without reading transaction type data.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "async def get_data(self, url):\n        \n        logger.debug('making request to %r', url)\n        with aiohttp.ClientSession() as session:\n            async with session.get(url, headers=self.headers) as response:\n                body = json.loads((await response.read()).decode('utf-8'))\n                if response.status == HTTPStatus.OK:\n                    if url != self.url_builder('configuration'):\n                        await self._update_config()\n                    return body\n                elif response.status == HTTPStatus.TOO_MANY_REQUESTS:\n                    timeout = self.calculate_timeout(\n                        response.headers['Retry-After'],\n                    )\n                    logger.warning(\n                        'Request limit exceeded, waiting %s seconds',\n                        timeout,\n                    )\n                    await asyncio.sleep(timeout)\n                    return await self.get_data(url)\n                logger.warning(\n                    'request failed %s: %r',\n                    response.status,\n                    body.get('status_message', '<no message>')\n                )", "docstring": "Get data from the TMDb API via :py:func:`aiohttp.get`.\n\nNotes:\nUpdates configuration (if required) on successful requests.\n\nArguments:\nurl (:py:class:`str`): The endpoint URL and params.\n\nReturns:\n:py:class:`dict`: The parsed JSON result.", "source": "juraj-google-style"}
{"code": "def get_referenced_object_as_list(\n        prev_obj, obj, dot_separated_name, desired_type=None):\n    \n    res = get_referenced_object(prev_obj, obj, dot_separated_name,\n                                desired_type)\n    if res is None:\n        return []\n    elif type(res) is list:\n        return res\n    else:\n        return [res]", "docstring": "Same as get_referenced_object, but always returns a list.\n\nArgs:\nprev_obj: see get_referenced_object\nobj: see get_referenced_object\ndot_separated_name: see get_referenced_object\ndesired_type: see get_referenced_object\n\nReturns:\nsame as get_referenced_object, but always returns a list", "source": "juraj-google-style"}
{"code": "def gen_permutations(self, index=0, args=None):\n        \n        if args is None:\n            args = []\n        try:\n            name = self.layout_json_names[index]\n            display = self.layout_json_params.get(name, {}).get('display')\n            input_type = self.install_json_params().get(name, {}).get('type')\n            if self.validate_layout_display(self.input_table, display):\n                if input_type.lower() == 'boolean':\n                    for val in [True, False]:\n                        args.append({'name': name, 'value': val})\n                        self.db_update_record(self.input_table, name, val)\n                        self.gen_permutations(index + 1, list(args))\n                        \n                        args.pop()\n                elif input_type.lower() == 'choice':\n                    valid_values = self.expand_valid_values(\n                        self.install_json_params().get(name, {}).get('validValues', [])\n                    )\n                    for val in valid_values:\n                        args.append({'name': name, 'value': val})\n                        self.db_update_record(self.input_table, name, val)\n                        self.gen_permutations(index + 1, list(args))\n                        \n                        args.pop()\n                else:\n                    args.append({'name': name, 'value': None})\n                    self.gen_permutations(index + 1, list(args))\n            else:\n                self.gen_permutations(index + 1, list(args))\n\n        except IndexError:\n            \n            self._input_permutations.append(args)\n            outputs = []\n\n            for o_name in self.install_json_output_variables():\n                if self.layout_json_outputs.get(o_name) is not None:\n                    display = self.layout_json_outputs.get(o_name, {}).get('display')\n                    valid = self.validate_layout_display(self.input_table, display)\n                    if display is None or not valid:\n                        continue\n                for ov in self.install_json_output_variables().get(o_name):\n                    outputs.append(ov)\n            self._output_permutations.append(outputs)", "docstring": "Iterate recursively over layout.json parameter names.\n\nTODO: Add indicator values.\n\nArgs:\nindex (int, optional): The current index position in the layout names list.\nargs (list, optional): Defaults to None. The current list of args.", "source": "juraj-google-style"}
{"code": "def save_metadata(self, file_path):\n    \n    data = self.metadata\n    with open(file_path, 'w')  as out_file:\n      json.dump(data, out_file)", "docstring": "Saves a json file of the search result metadata.\n\nSaves a json file of the search result metadata from :class:`api.results`.metadata.\n\nArgs:\nfile_path (str):\nPath to the json file to save metadata to.", "source": "juraj-google-style"}
{"code": "def zero_add(previous_value, x, name=None, reuse=None):\n    with tf.variable_scope(name, default_name='zero_add', reuse=reuse):\n        gamma = tf.get_variable('gamma', (), initializer=tf.zeros_initializer())\n        return (previous_value + (gamma * x))", "docstring": "Resnet connection with zero initialization.\n\nAnother type of resnet connection which returns previous_value + gamma * x.\ngamma is a trainable scalar and initialized with zero. It is useful when a\nmodule is plugged into a trained model and we want to make sure it matches the\noriginal model's performance.\n\nArgs:\nprevious_value:  A tensor.\nx: A tensor.\nname: name of variable scope; defaults to zero_add.\nreuse: reuse scope.\n\nReturns:\nprevious_value + gamma * x.", "source": "codesearchnet"}
{"code": "def window_partition(hidden_states, window_size):\n    batch_size, height, width, num_channels = hidden_states.shape\n    hidden_states = hidden_states.view(batch_size, height \n    windows = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)\n    return windows", "docstring": "Returns the resized hidden states. The output shape should be `(batch_size * num_windows, window_size, window_size,\nnum_channels)`\n\nArgs:\nhidden_states (`torch.FloatTensor` of shape `(batch_size, height, width, num_channels)`):\nInput hidden states\nwindow_size (`int`):\nWindow size", "source": "github-repos"}
{"code": "def get_random_url(ltd=\"com\"):\n        \n\n        url = [\n            \"https:\n            RandomInputHelper.get_random_value(8, [string.ascii_lowercase]),\n            \".\",\n            ltd\n        ]\n\n        return \"\".join(url)", "docstring": "Get a random url with the given ltd.\n\nArgs:\nltd (str): The ltd to use (e.g. com).\n\nReturns:\nstr: The random url.", "source": "juraj-google-style"}
{"code": "def run_cell(self, cell):\n        \n        globals = self.ipy_shell.user_global_ns\n        locals = self.ipy_shell.user_ns\n        globals.update({\n            \"__ipy_scope__\": None,\n        })\n        try:\n            with redirect_stdout(self.stdout):\n                self.run(cell, globals, locals)\n        except:\n            self.code_error = True\n            if self.options.debug:\n                raise BdbQuit\n        finally:\n            self.finalize()", "docstring": "Run the Cell code using the IPython globals and locals\n\nArgs:\ncell (str): Python code to be executed", "source": "juraj-google-style"}
{"code": "def _get_iam_rest_api_url_from_creds(rest_client, credentials):\n    \n    res = rest_client.make_request(credentials[_IAMConstants.V2_REST_URL])\n    base = res['streams_self']\n    end = base.find('/instances')\n    return base[:end] + '/resources'", "docstring": "Retrieves the Streams REST API URL from the provided credentials using iam authentication.\nArgs:\nrest_client (:py:class:`rest_primitives._IAMStreamsRestClient`): A client  for making REST calls using IAM authentication\ncredentials (dict): A dict representation of the credentials.\nReturns:\nstr: The remote Streams REST API URL.", "source": "juraj-google-style"}
{"code": "def union(self, second_iterable, selector=identity):\n    if self.closed():\n        raise ValueError('Attempt to call union() on a closed Queryable.')\n    if (not is_iterable(second_iterable)):\n        raise TypeError('Cannot compute union() with second_iterable of non-iterable {0}'.format(str(type(second_iterable))[7:(- 1)]))\n    return self._create(itertools.chain(self, second_iterable)).distinct(selector)", "docstring": "Returns those elements which are either in the source sequence or in\nthe second_iterable, or in both.\n\nNote: This method uses deferred execution.\n\nArgs:\nsecond_iterable: Elements from this sequence are returns if they\nare not also in the source sequence.\n\nselector: An optional single argument function which is used to\nproject the elements in the source and second_iterables prior\nto comparing them. If omitted the identity function will be\nused.\n\nReturns:\nA sequence containing all elements in the source sequence and second\nsequence.\n\nRaises:\nValueError: If the Queryable has been closed.\nTypeError: If the second_iterable is not in fact iterable.\nTypeError: If the selector is not callable.", "source": "codesearchnet"}
{"code": "def guess_content_type_and_encoding(path):\n    \n    for ext, content_type in _EXTENSION_TO_MIME_TYPE.items():\n        if path.endswith(ext):\n            return content_type\n\n    content_type, encoding = mimetypes.guess_type(path)\n    content_type = content_type or \"application/binary\"\n    return content_type, encoding", "docstring": "Guess the content type of a path, using ``mimetypes``.\n\nFalls back to \"application/binary\" if no content type is found.\n\nArgs:\npath (str): the path to guess the mimetype of\n\nReturns:\nstr: the content type of the file", "source": "juraj-google-style"}
{"code": "def calibrate(self, fetch_names, num_runs, feed_dict_fn=None, input_map_fn=None):\n    assert self._converted\n    assert self._need_calibration\n    assert not self._calibration_data_collected\n    if feed_dict_fn and input_map_fn or (not feed_dict_fn and (not input_map_fn)):\n        raise ValueError('Should specify one and only one of feed_dict_fn and input_map_fn.')\n    if input_map_fn:\n        for k, v in input_map_fn().items():\n            if not isinstance(k, str):\n                raise ValueError('Keys of input_map_fn must be of type str')\n            if not isinstance(v, tensor.Tensor):\n                raise ValueError('Values of input_map_fn must be of type tf.Tensor')\n    self._calibration_graph = ops.Graph()\n    with self._calibration_graph.as_default():\n        fetches = importer.import_graph_def(self._converted_graph_def, input_map=input_map_fn() if input_map_fn else None, return_elements=fetch_names, name='')\n    calibrate_rewriter_cfg = rewriter_config_pb2.RewriterConfig()\n    if self._test_only_disable_non_trt_optimizers:\n        trt_utils.disable_non_trt_optimizers_in_rewriter_config(calibrate_rewriter_cfg)\n    calibrate_config = config_pb2.ConfigProto(allow_soft_placement=True, graph_options=config_pb2.GraphOptions(rewrite_options=calibrate_rewriter_cfg))\n    with session.Session(graph=self._calibration_graph, config=calibrate_config) as calibration_sess:\n        for _ in range(num_runs):\n            calibration_sess.run(fetches, feed_dict=feed_dict_fn() if feed_dict_fn else None)\n        device_to_get_resource_op_map = {}\n        with self._calibration_graph.as_default():\n            resource_name_input = array_ops.placeholder(dtypes.string)\n            for node in self._converted_graph_def.node:\n                if node.op == _TRT_ENGINE_OP_NAME:\n                    if node.device not in device_to_get_resource_op_map:\n                        with self._calibration_graph.device(node.device):\n                            serialized_resources_output = gen_trt_ops.get_calibration_data_op(resource_name_input)\n                        device_to_get_resource_op_map[node.device] = serialized_resources_output\n                    calibration_result = calibration_sess.run(device_to_get_resource_op_map[node.device], feed_dict={resource_name_input: _get_canonical_engine_name(node.name)})\n                    node.attr['calibration_data'].s = calibration_result\n        self._calibration_data_collected = True\n    return self._converted_graph_def", "docstring": "Run the calibration and return the calibrated GraphDef.\n\nArgs:\nfetch_names: a list of output tensor name to fetch during calibration.\nnum_runs: number of runs of the graph during calibration.\nfeed_dict_fn: a function that returns a dictionary mapping input names (as\nstrings) in the GraphDef to be calibrated to values (e.g. Python list,\nnumpy arrays, etc). One and only one of `feed_dict_fn` and\n`input_map_fn` should be specified.\ninput_map_fn: a function that returns a dictionary mapping input names (as\nstrings) in the GraphDef to be calibrated to Tensor objects. The values\nof the named input tensors in the GraphDef to be calibrated will be\nre-mapped to the respective `Tensor` values during calibration. One and\nonly one of `feed_dict_fn` and `input_map_fn` should be specified.\n\nRaises:\nValueError: if the input combination is invalid.\nRuntimeError: if this method is called in eager mode.\n\nReturns:\nThe GraphDef after the calibration.", "source": "github-repos"}
{"code": "def copy_table(self, src, dst):\n        \n        \n        self.create_table_from(dst, src)\n\n        \n        self.execute(\"INSERT INTO {dst} SELECT * FROM {src}\"\n                     .format(dst=dst, src=src))\n\n        \n        self.commit()", "docstring": "Create a carbon copy of the source table.\n\nArguments:\n\nsrc (str): The name of the table to copy.\ndst (str): The name of the target duplicate table.\n\nRaises:\n\nsql.OperationalError: If source table does not exist.", "source": "juraj-google-style"}
{"code": "def schema(self) -> Schema:\n    return self._schema", "docstring": "Schema of the EventSetNode.\n\nThe schema defines the name and dtype of the features and the index.\n\nReturns:\nSchema of the EventSetNode.", "source": "github-repos"}
{"code": "def uid(uid):\n    if uid is None:\n        raise ValueError('UID cannot be None.')\n\n    def decorate(test_func):\n\n        @functools.wraps(test_func)\n        def wrapper(*args, **kwargs):\n            return test_func(*args, **kwargs)\n        setattr(wrapper, 'uid', uid)\n        return wrapper\n    return decorate", "docstring": "Decorator specifying the unique identifier (UID) of a test case.\n\nThe UID will be recorded in the test's record when executed by Mobly.\n\nIf you use any other decorator for the test method, you may want to use\nthis as the outer-most one.\n\nNote a common UID system is the Universal Unitque Identifier (UUID), but\nwe are not limiting people to use UUID, hence the more generic name `UID`.\n\nArgs:\nuid: string, the uid for the decorated test function.", "source": "github-repos"}
{"code": "def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer=None):\n    if self.lr_scheduler is None:\n        self.lr_scheduler = get_scheduler(self.args.lr_scheduler_type, optimizer=self.optimizer if optimizer is None else optimizer, num_warmup_steps=self.args.get_warmup_steps(num_training_steps), num_training_steps=num_training_steps, scheduler_specific_kwargs=self.args.lr_scheduler_kwargs)\n        self._created_lr_scheduler = True\n    return self.lr_scheduler", "docstring": "Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or\npassed as an argument.\n\nArgs:\nnum_training_steps (int): The number of training steps to do.", "source": "github-repos"}
{"code": "def get(self, name):\n        \n\n        name = str(name)\n        if name not in self._properties:\n            raise ArgumentError(\"Unknown property in DeviceModel\", name=name)\n\n        return self._properties[name]", "docstring": "Get a device model property.\n\nArgs:\nname (str): The name of the property to get", "source": "juraj-google-style"}
{"code": "def log_cdf(self, value, name='log_cdf'):\n    return self._call_log_cdf(value, name)", "docstring": "Log cumulative distribution function.\n\nGiven random variable `X`, the cumulative distribution function `cdf` is:\n\n```none\nlog_cdf(x) := Log[ P[X <= x] ]\n```\n\nOften, a numerical approximation can be used for `log_cdf(x)` that yields\na more accurate answer than simply taking the logarithm of the `cdf` when\n`x << -1`.\n\nArgs:\nvalue: `float` or `double` `Tensor`.\nname: Python `str` prepended to names of ops created by this function.\n\nReturns:\nlogcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with\nvalues of type `self.dtype`.", "source": "github-repos"}
{"code": "def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n    with open(json_file_path, 'w', encoding='utf-8') as writer:\n        writer.write(self.to_json_string())", "docstring": "Save this instance to a JSON file.\n\nArgs:\njson_file_path (`str` or `os.PathLike`):\nPath to the JSON file in which this feature_extractor instance's parameters will be saved.", "source": "github-repos"}
{"code": "def onTagAdd(self, name, func):\n        \n        \n        if '*' in name:\n            self.ontagaddglobs.add(name, func)\n        else:\n            self.ontagadds[name].append(func)", "docstring": "Register a callback for tag addition.\n\nArgs:\nname (str): The name of the tag or tag glob.\nfunc (function): The callback func(node, tagname, tagval).", "source": "juraj-google-style"}
{"code": "def get_clinvar_submission(store, institute_id, case_name, variant_id, submission_id):\n    \n\n    institute_obj, case_obj = institute_and_case(store, institute_id, case_name)\n    pinned = [store.variant(variant_id) or variant_id for variant_id in\n                  case_obj.get('suspects', [])]\n    variant_obj = store.variant(variant_id)\n    clinvar_submission_objs = store.clinvars(submission_id=submission_id)\n    return dict(\n        today = str(date.today()),\n        institute=institute_obj,\n        case=case_obj,\n        variant=variant_obj,\n        pinned_vars=pinned,\n        clinvars = clinvar_submission_objs\n    )", "docstring": "Collects all variants from the clinvar submission collection with a specific submission_id\n\nArgs:\nstore(scout.adapter.MongoAdapter)\ninstitute_id(str): Institute ID\ncase_name(str): case ID\nvariant_id(str): variant._id\nsubmission_id(str): clinvar submission id, i.e. SUB76578\n\nReturns:\nA dictionary with all the data to display the clinvar_update.html template page", "source": "juraj-google-style"}
{"code": "def _get_name_and_module(full_name):\n    name_segments = full_name.split('.')\n    return ('.'.join(name_segments[:-1]), name_segments[-1])", "docstring": "Split full_name into module and short name.\n\nArgs:\nfull_name: Full name of symbol that includes module.\n\nReturns:\nFull module name and short symbol name.", "source": "github-repos"}
{"code": "def list(cls, session, mailbox):\n        \n        endpoint = '/mailboxes/%d/conversations.json' % mailbox.id\n        return super(Conversations, cls).list(session, endpoint)", "docstring": "Return conversations in a mailbox.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nmailbox (helpscout.models.Mailbox): Mailbox to list.\n\nReturns:\nRequestPaginator(output_type=helpscout.models.Conversation):\nConversations iterator.", "source": "juraj-google-style"}
{"code": "def put(self, filename, encoding=None):\n        \n        from . import LocalFile\n\n        if os.path.isdir(filename) and self.source is None:\n            raise ValueError(\"Cannot write this object to \"\n                             \"directory %s without an explicit filename.\" % filename)\n\n        target = get_target_path(filename, self.source)\n\n        if encoding is None:\n            encoding = self.encoding\n\n        if self._isbytes:\n            kwargs = {'mode': 'wb'}\n        else:\n            kwargs = {'mode': 'w', 'encoding': encoding}\n\n        with open(target, **kwargs) as outfile:\n            outfile.write(self._contents)\n\n        return LocalFile(target, encoded_with=encoding)", "docstring": "Write the file to the given path\n\nArgs:\nfilename (str): path to write this file to\nencoding (str): file encoding (default: system default)\n\nReturns:\nLocalFile: reference to the copy of the file stored at ``filename``", "source": "juraj-google-style"}
{"code": "def print_colored_columns(printer, rows, padding=2):\n    \n    rows_ = [x[:-1] for x in rows]\n    colors = [x[-1] for x in rows]\n    for col, line in zip(colors, columnise(rows_, padding=padding)):\n        printer(line, col)", "docstring": "Like `columnise`, but with colored rows.\n\nArgs:\nprinter (`colorize.Printer`): Printer object.\n\nNote:\nThe last entry in each row is the row color, or None for no coloring.", "source": "juraj-google-style"}
{"code": "def bit_to_int(x_bit, num_bits, base=2):\n  \n  x_l = tf.stop_gradient(tf.to_int32(tf.reshape(x_bit, [-1, num_bits])))\n  x_labels = [\n      x_l[:, i] * tf.to_int32(base)**tf.to_int32(i) for i in range(num_bits)]\n  res = sum(x_labels)\n  return tf.to_int32(tf.reshape(res, common_layers.shape_list(x_bit)[:-1]))", "docstring": "Turn x_bit representing numbers bitwise (lower-endian) to int tensor.\n\nArgs:\nx_bit: Tensor containing numbers in a particular base to be converted to\nint.\nnum_bits: Number of bits in the representation.\nbase: Base of the representation.\n\nReturns:\nInteger representation of this number.", "source": "juraj-google-style"}
{"code": "def merge(self, obj):\n        \n        if obj.id in self.cache:\n            self.cache[obj.id].merge(obj)\n        else:\n            self.cache[obj.id] = obj\n        return self.cache[obj.id]", "docstring": "Add a given object to the cache, or update an existing entry to include more fields.\n\nArgs:\nobj (SkypeObj): object to add to the cache", "source": "juraj-google-style"}
{"code": "def GetFileEntryByPathSpec(self, path_spec):\n    \n    \n    tsk_file = None\n    inode = getattr(path_spec, 'inode', None)\n    location = getattr(path_spec, 'location', None)\n\n    root_inode = self.GetRootInode()\n    if (location == self.LOCATION_ROOT or\n        (inode is not None and root_inode is not None and inode == root_inode)):\n      tsk_file = self._tsk_file_system.open(self.LOCATION_ROOT)\n      return tsk_file_entry.TSKFileEntry(\n          self._resolver_context, self, path_spec, tsk_file=tsk_file,\n          is_root=True)\n\n    try:\n      if inode is not None:\n        tsk_file = self._tsk_file_system.open_meta(inode=inode)\n      elif location is not None:\n        tsk_file = self._tsk_file_system.open(location)\n\n    except IOError:\n      pass\n\n    if tsk_file is None:\n      return None\n\n    \n    return tsk_file_entry.TSKFileEntry(\n        self._resolver_context, self, path_spec, tsk_file=tsk_file)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nTSKFileEntry: a file entry or None if not available.", "source": "juraj-google-style"}
{"code": "def supply(self, issuer):\n        \n        issuer_uri_config = self._issuer_uri_configs.get(issuer)\n\n        if not issuer_uri_config:\n            \n            return\n\n        jwks_uri = issuer_uri_config.jwks_uri\n        if jwks_uri:\n            \n            return jwks_uri\n\n        \n        \n        open_id_valid = issuer_uri_config.open_id_valid\n        if open_id_valid:\n            discovered_jwks_uri = _discover_jwks_uri(issuer)\n            self._issuer_uri_configs[issuer] = IssuerUriConfig(False,\n                                                               discovered_jwks_uri)\n            return discovered_jwks_uri", "docstring": "Supplies the `jwks_uri` for the given issuer.\n\nArgs:\nissuer: the issuer.\n\nReturns:\nThe `jwks_uri` that is either statically configured or retrieved via\nOpenId discovery. None is returned when the issuer is unknown or the\nOpenId discovery fails.", "source": "juraj-google-style"}
{"code": "def cancelTickByTickData(self, contract: Contract, tickType: str):\n        \n        ticker = self.ticker(contract)\n        reqId = self.wrapper.endTicker(ticker, tickType)\n        if reqId:\n            self.client.cancelTickByTickData(reqId)\n        else:\n            self._logger.error(\n                f'cancelMktData: No reqId found for contract {contract}')", "docstring": "Unsubscribe from tick-by-tick data\n\nArgs:\ncontract: The exact contract object that was used to\nsubscribe with.", "source": "juraj-google-style"}
{"code": "def convertData(self, contents, def_buf, kwh_scale=ScaleKWH.EmptyScale):\n        \n        log_str = \"\"\n        count = 0\n        \n        \n        \n        \n        if kwh_scale == ScaleKWH.EmptyScale:\n            scale_offset = int(def_buf.keys().index(Field.kWh_Scale))\n            self.m_kwh_precision = kwh_scale = int(contents[scale_offset])\n\n        for fld in def_buf:\n\n            if def_buf[fld][MeterData.CalculatedFlag]:\n                count += 1\n                continue\n\n            if len(contents) == 0:\n                count += 1\n                continue\n\n            try:  \n                raw_data = contents[count]\n                fld_type = def_buf[fld][MeterData.TypeValue]\n                fld_scale = def_buf[fld][MeterData.ScaleValue]\n\n                if fld_type == FieldType.Float:\n                    float_data = float(str(raw_data))\n                    divisor = 1\n                    if fld_scale == ScaleType.KWH:\n                        divisor = 1\n                        if kwh_scale == ScaleKWH.Scale10:\n                            divisor = 10\n                        elif kwh_scale == ScaleKWH.Scale100:\n                            divisor = 100\n                        elif (kwh_scale != ScaleKWH.NoScale) and (kwh_scale != ScaleKWH.EmptyScale):\n                            ekm_log(\"Unrecognized kwh scale.\")\n                    elif fld_scale == ScaleType.Div10:\n                        divisor = 10\n                    elif fld_scale == ScaleType.Div100:\n                        divisor = 100\n                    elif fld_scale != ScaleType.No:\n                        ekm_log(\"Unrecognized float scale.\")\n                    float_data /= divisor\n                    float_data_str = str(float_data)\n                    def_buf[fld][MeterData.StringValue] = float_data_str\n                    def_buf[fld][MeterData.NativeValue] = float_data\n\n                elif fld_type == FieldType.Hex:\n                    hex_data = raw_data.encode('hex')\n                    def_buf[fld][MeterData.StringValue] = hex_data\n                    def_buf[fld][MeterData.NativeValue] = hex_data\n\n                elif fld_type == FieldType.Int:\n                    integer_data = int(raw_data)\n                    integer_data_str = str(integer_data)\n                    if len(integer_data_str) == 0:\n                        integer_data_str = str(0)\n                    def_buf[fld][MeterData.StringValue] = integer_data_str\n                    def_buf[fld][MeterData.NativeValue] = integer_data\n\n                elif fld_type == FieldType.String:\n                    string_data = str(raw_data)\n                    def_buf[fld][MeterData.StringValue] = string_data\n                    def_buf[fld][MeterData.NativeValue] = string_data\n\n                elif fld_type == FieldType.PowerFactor:\n                    def_buf[fld][MeterData.StringValue] = str(raw_data)\n                    def_buf[fld][MeterData.NativeValue] = str(raw_data)\n\n                else:\n                    ekm_log(\"Unrecognized field type\")\n\n                log_str = log_str + '\"' + fld + '\":  \"' + def_buf[fld][MeterData.StringValue] + '\"\\n'\n\n            except:\n                ekm_log(\"Exception on Field:\" + str(fld))\n                ekm_log(traceback.format_exc(sys.exc_info()))\n                self.writeCmdMsg(\"Exception on Field:\" + str(fld))\n\n            count += 1\n\n        return True", "docstring": "Move data from raw tuple into scaled and conveted values.\n\nArgs:\ncontents (tuple): Breakout of passed block from unpackStruct().\ndef_buf (): Read buffer destination.\nkwh_scale (int):  :class:`~ekmmeters.ScaleKWH` as int, from Field.kWhScale`\n\nReturns:\nbool: True on completion.", "source": "juraj-google-style"}
{"code": "def _setup_mock_socket_file(mock_socket_create_conn, resp):\n    fake_file = mock.Mock()\n    fake_file.readline.side_effect = resp\n    fake_conn = mock.Mock()\n    fake_conn.makefile.return_value = fake_file\n    mock_socket_create_conn.return_value = fake_conn\n    return fake_file", "docstring": "Sets up a mock socket file from the mock connection.\n\nArgs:\nmock_socket_create_conn: The mock method for creating a socket connection.\nresp: iterable, the side effect of the `readline` function of the mock\nsocket file.\n\nReturns:\nThe mock socket file that will be injected into the code.", "source": "github-repos"}
{"code": "def get_paginated_catalog_courses(self, catalog_id, querystring=None):\n    return self._load_data(self.CATALOGS_COURSES_ENDPOINT.format(catalog_id), default=[], querystring=querystring, traverse_pagination=False, many=False)", "docstring": "Return paginated response for all catalog courses.\n\nReturns:\ndict: API response with links to next and previous pages.", "source": "codesearchnet"}
{"code": "def price(\n        self,\n        instrument,\n        **kwargs\n    ):\n        \n\n        request = Request(\n            'GET',\n            '/v3/instruments/{instrument}/price'\n        )\n\n        request.set_path_param(\n            'instrument',\n            instrument\n        )\n\n        request.set_param(\n            'time',\n            kwargs.get('time')\n        )\n\n        response = self.ctx.request(request)\n\n\n        if response.content_type is None:\n            return response\n\n        if not response.content_type.startswith(\"application/json\"):\n            return response\n\n        jbody = json.loads(response.raw_body)\n\n        parsed_body = {}\n\n        \n        \n        \n        if str(response.status) == \"200\":\n            if jbody.get('price') is not None:\n                parsed_body['price'] = \\\n                    self.ctx.pricing_common.Price.from_dict(\n                        jbody['price'],\n                        self.ctx\n                    )\n\n        elif str(response.status) == \"400\":\n            if jbody.get('errorCode') is not None:\n                parsed_body['errorCode'] = \\\n                    jbody.get('errorCode')\n\n            if jbody.get('errorMessage') is not None:\n                parsed_body['errorMessage'] = \\\n                    jbody.get('errorMessage')\n\n        elif str(response.status) == \"401\":\n            if jbody.get('errorCode') is not None:\n                parsed_body['errorCode'] = \\\n                    jbody.get('errorCode')\n\n            if jbody.get('errorMessage') is not None:\n                parsed_body['errorMessage'] = \\\n                    jbody.get('errorMessage')\n\n        elif str(response.status) == \"404\":\n            if jbody.get('errorCode') is not None:\n                parsed_body['errorCode'] = \\\n                    jbody.get('errorCode')\n\n            if jbody.get('errorMessage') is not None:\n                parsed_body['errorMessage'] = \\\n                    jbody.get('errorMessage')\n\n        elif str(response.status) == \"405\":\n            if jbody.get('errorCode') is not None:\n                parsed_body['errorCode'] = \\\n                    jbody.get('errorCode')\n\n            if jbody.get('errorMessage') is not None:\n                parsed_body['errorMessage'] = \\\n                    jbody.get('errorMessage')\n\n        \n        \n        \n        else:\n            parsed_body = jbody\n\n        response.body = parsed_body\n\n        return response", "docstring": "Fetch a price for an instrument. Accounts are not associated in any way\nwith this endpoint.\n\nArgs:\ninstrument:\nName of the Instrument\ntime:\nThe time at which the desired price is in effect. The current\nprice is returned if no time is provided.\n\nReturns:\nv20.response.Response containing the results from submitting the\nrequest", "source": "juraj-google-style"}
{"code": "def peek(self, iroute: 'InstanceRoute') -> Optional[Value]:\n    val = self.value\n    sn = self.schema_node\n    for sel in iroute:\n        (val, sn) = sel.peek_step(val, sn)\n        if (val is None):\n            return None\n    return val", "docstring": "Return a value within the receiver's subtree.\n\nArgs:\niroute: Instance route (relative to the receiver).", "source": "codesearchnet"}
{"code": "def add_payload(self, key, val, append=True):\n        \n        if append:\n            self._params.setdefault(key, []).append(val)\n        else:\n            self._params[key] = val", "docstring": "Add a key value pair to payload for this request.\n\n.. Note:: For ``_search`` you can pass a search argument. (e.g. _search?summary=1.1.1.1).\n\nArgs:\nkey (string): The payload key\nval (string): The payload value\nappend (bool): Indicates whether the value should be appended or overwritten.", "source": "juraj-google-style"}
{"code": "def segments(seg_type=None):\n    \n\n    for index in xrange(idaapi.get_segm_qty()):\n        seg = Segment(index=index)\n        if (seg_type is None) or (seg.type == seg_type):\n            yield Segment(index=index)", "docstring": "Iterate segments based on type\n\nArgs:\nseg_type: type of segment e.g. SEG_CODE\n\nReturns:\niterator of `Segment` objects. if seg_type is None , returns all segments\notherwise returns only the relevant ones", "source": "juraj-google-style"}
{"code": "def _placement_points_generator(self, skyline, width):\n         \n        skyline_r = skyline[-1].right\n        skyline_l = skyline[0].left\n\n        \n        ppointsl = (s.left for s in skyline if s.left+width <= skyline_r)\n\n        \n        ppointsr = (s.right-width for s in skyline if s.right-width >= skyline_l)\n\n        \n        return heapq.merge(ppointsl, ppointsr)", "docstring": "Returns a generator for the x coordinates of all the placement\npoints on the skyline for a given rectangle.\n\nWARNING: In some cases could be duplicated points, but it is faster\nto compute them twice than to remove them.\n\nArguments:\nskyline (list): Skyline HSegment list\nwidth (int, float): Rectangle width\n\nReturns:\ngenerator", "source": "juraj-google-style"}
{"code": "def getTraitCovar(self, term_i=None):\n        \n        assert term_i < self.n_randEffs, 'VarianceDecomposition:: specied term out of range'\n\n        if term_i is None:\n            RV = sp.zeros((self.P,self.P))\n            for term_i in range(self.n_randEffs):\n                RV += self.getTraitCovarFun().K()\n        else:\n            assert term_i<self.n_randEffs, 'Term index non valid'\n            RV = self.getTraitCovarFun(term_i).K()\n        return RV", "docstring": "Return the estimated trait covariance matrix for term_i (or the total if term_i is None)\nTo retrieve the matrix of correlation coefficient use \\see getTraitCorrCoef\n\nArgs:\nterm_i:     index of the random effect term we want to retrieve the covariance matrix\nReturns:\nestimated trait covariance", "source": "juraj-google-style"}
{"code": "def get_repo_config(self, repo='default'):\n        \n        for repo_config in self.repositories:\n            if repo_config.name == repo or repo_config.url in RepositoryURL(repo):\n                return repo_config\n\n        return None", "docstring": "Retrieve configuration for a given repository.\n\nArgs:\nrepo (str): a repository \"realm\" (alias) or its URL\n\nReturns:\nRepositoryConfig: if there is configuration for that repository\nNone: otherwise", "source": "juraj-google-style"}
{"code": "def to_dict(ramons, flatten=False):\n    \n    if type(ramons) is not list:\n        ramons = [ramons]\n\n    out_ramons = {}\n    for r in ramons:\n        out_ramons[r.id] = {\n            \"id\": r.id,\n            \"type\": _reverse_ramon_types[type(r)],\n            \"metadata\": vars(r)\n        }\n    return out_ramons", "docstring": "Converts a RAMON object list to a JSON-style dictionary. Useful for going\nfrom an array of RAMONs to a dictionary, indexed by ID.\n\nArguments:\nramons (RAMON[]): A list of RAMON objects\nflatten (boolean: False): Not implemented\n\nReturns:\ndict: A python dictionary of RAMON objects.", "source": "juraj-google-style"}
{"code": "def add(self, predicted, target):\n    predicted = predicted.cpu().numpy()\n    target = target.cpu().numpy()\n    assert (predicted.shape[0] == target.shape[0]), 'number of targets and predicted outputs do not match'\n    if (np.ndim(predicted) != 1):\n        assert (predicted.shape[1] == self.k), 'number of predictions does not match size of confusion matrix'\n        predicted = np.argmax(predicted, 1)\n    else:\n        assert ((predicted.max() < self.k) and (predicted.min() >= 0)), 'predicted values are not between 1 and k'\n    onehot_target = (np.ndim(target) != 1)\n    if onehot_target:\n        assert (target.shape[1] == self.k), 'Onehot target does not match size of confusion matrix'\n        assert ((target >= 0).all() and (target <= 1).all()), 'in one-hot encoding, target values should be 0 or 1'\n        assert (target.sum(1) == 1).all(), 'multi-label setting is not supported'\n        target = np.argmax(target, 1)\n    else:\n        assert ((predicted.max() < self.k) and (predicted.min() >= 0)), 'predicted values are not between 0 and k-1'\n    x = (predicted + (self.k * target))\n    bincount_2d = np.bincount(x.astype(np.int32), minlength=(self.k ** 2))\n    assert (bincount_2d.size == (self.k ** 2))\n    conf = bincount_2d.reshape((self.k, self.k))\n    self.conf += conf", "docstring": "Computes the confusion matrix of K x K size where K is no of classes\n\nArgs:\npredicted (tensor): Can be an N x K tensor of predicted scores obtained from\nthe model for N examples and K classes or an N-tensor of\ninteger values between 0 and K-1.\ntarget (tensor): Can be a N-tensor of integer values assumed to be integer\nvalues between 0 and K-1 or N x K tensor, where targets are\nassumed to be provided as one-hot vectors", "source": "codesearchnet"}
{"code": "def get_attribute(self, main_type, sub_type, unique_id, attribute_id, owner=None, params=None):\n        \n        return self.attribute(\n            main_type, sub_type, unique_id, attribute_id, action='GET', owner=owner, params=params\n        )", "docstring": "Args:\nowner:\nmain_type:\nsub_type:\nunique_id:\nattribute_id:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def update_panel(store, panel_name, csv_lines, option):\n    new_genes = []\n    panel_obj = store.gene_panel(panel_name)\n    if (panel_obj is None):\n        return None\n    try:\n        new_genes = parse_genes(csv_lines)\n    except SyntaxError as error:\n        flash(error.args[0], 'danger')\n        return None\n    if (option == 'replace'):\n        for gene in panel_obj['genes']:\n            gene['hgnc_symbol'] = gene['symbol']\n            store.add_pending(panel_obj, gene, action='delete', info=None)\n    for new_gene in new_genes:\n        if (not new_gene['hgnc_id']):\n            flash('gene missing hgnc id: {}'.format(new_gene['hgnc_symbol']), 'danger')\n            continue\n        gene_obj = store.hgnc_gene(new_gene['hgnc_id'])\n        if (gene_obj is None):\n            flash('gene not found: {} - {}'.format(new_gene['hgnc_id'], new_gene['hgnc_symbol']), 'danger')\n            continue\n        if (new_gene['hgnc_symbol'] and (gene_obj['hgnc_symbol'] != new_gene['hgnc_symbol'])):\n            flash('symbol mis-match: {0} | {1}'.format(gene_obj['hgnc_symbol'], new_gene['hgnc_symbol']), 'warning')\n        info_data = {'disease_associated_transcripts': new_gene['transcripts'], 'reduced_penetrance': new_gene['reduced_penetrance'], 'mosaicism': new_gene['mosaicism'], 'inheritance_models': new_gene['inheritance_models'], 'database_entry_version': new_gene['database_entry_version']}\n        if (option == 'replace'):\n            action = 'add'\n        else:\n            existing_genes = {gene['hgnc_id'] for gene in panel_obj['genes']}\n            action = ('edit' if (gene_obj['hgnc_id'] in existing_genes) else 'add')\n        store.add_pending(panel_obj, gene_obj, action=action, info=info_data)\n    return panel_obj", "docstring": "Update an existing gene panel with genes.\n\nArgs:\nstore(scout.adapter.MongoAdapter)\npanel_name(str)\ncsv_lines(iterable(str)): Stream with genes\noption(str): 'add' or 'replace'\n\nReturns:\npanel_obj(dict)", "source": "codesearchnet"}
{"code": "def add_group_member(self, grp_name, user):\n        \n        self.project_service.set_auth(self._token_project)\n        self.project_service.add_group_member(grp_name, user)", "docstring": "Add the given user to the named group.\n\nBoth group and user must already exist for this to succeed.\n\nArgs:\nname (string): Name of group.\nuser_name (string): User to add to group.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def execute(self, command, data={}):\n    (method, uri) = command\n    try:\n        path = self._formatter.format_map(uri, data)\n        body = self._formatter.get_unused_kwargs()\n        url = '{0}{1}'.format(self._url, path)\n        return self._request(method, url, body)\n    except KeyError as err:\n        LOGGER.debug('Endpoint {0} is missing argument {1}'.format(uri, err))\n        raise", "docstring": "Format the endpoint url by data and then request the remote server.\n\nArgs:\ncommand(Command): WebDriver command to be executed.\ndata(dict): Data fulfill the uri template and json body.\n\nReturns:\nA dict represent the json body from server response.\n\nRaises:\nKeyError: Data cannot fulfill the variable which command needed.\nConnectionError: Meet network problem (e.g. DNS failure,\nrefused connection, etc).\nTimeout: A request times out.\nHTTPError: HTTP request returned an unsuccessful status code.", "source": "codesearchnet"}
{"code": "def __contains__(self, item):\n        \n        if self is item:\n            return True\n        elif self.package is item and self.name == '__init__':\n            return True\n        return False", "docstring": "Whether given item is contained inside this module.\n\nArgs:\nitem (Package/Module): a package or module.\n\nReturns:\nbool:\nTrue if self is item or item is self's package and\nself if an ``__init__`` module.", "source": "juraj-google-style"}
{"code": "def reduce_and_verify(self, inputs, expect, options):\n\n    def replica_fn():\n        CollectiveReplicaLauncher._prefer_unique_instance_key = options.prefer_unique_instance_key\n        collective, devices, pid = self.make_collective(options.num_processes, options.gpus_per_process)\n\n        def reduce_fn():\n            value_fn = lambda device_idx: inputs[pid * len(devices) + device_idx]\n            per_replica_value = make_per_replica_value(value_fn, devices)\n            reduced_values = collective.reduce(options.reduce_op, per_replica_value, per_replica_value, options.communication_options)\n            if options.gpus_per_process > 1:\n                self.assertIsInstance(reduced_values, value_lib.Mirrored)\n            reduced_values = self.as_list(reduced_values)\n            self.assertAllEqual(devices, [v.device for v in reduced_values])\n            return [ops.convert_to_tensor(v) for v in reduced_values]\n        per_replica_expect = [ops.convert_to_tensor(expect)] * len(devices)\n        if 'eager' in options.mode:\n            got = reduce_fn()\n            self.assertAllClose(got, per_replica_expect)\n        if 'func_graph' in options.mode:\n            got = def_function.function(reduce_fn)()\n            self.assertAllClose(got, per_replica_expect)\n    get_global_mpr(options.num_processes).run(replica_fn)", "docstring": "Reduce the given `inputs` and verify the output matches `expect`.\n\nArgs:\ninputs: a list of `Tensor` or `IndexedSlices`, where i-th value will be\nfed to i-th replica.\nexpect: a `Tensor` or `IndexedSlices`. This should be the expected value\nfor one replica.\noptions: a `RunOpotions` instance.", "source": "github-repos"}
{"code": "def shift(self, time: int) -> 'Interval':\n    return Interval((self._begin + time), (self._end + time))", "docstring": "Return a new interval shifted by `time` from self\n\nArgs:\ntime: time to be shifted\n\nReturns:\nInterval: interval shifted by `time`", "source": "codesearchnet"}
{"code": "def _create_grad_func(ys, xs, grads, cond_graph, body_graph, name, while_op, maximum_iterations):\n    assert len(ys) == len(grads)\n    total_iters = while_op.outputs[0]\n    counter = constant_op.constant(0, dtype=total_iters.dtype, name='grad_counter')\n    body_graph_inputs = object_identity.ObjectIdentitySet(body_graph.inputs)\n    body_graph_outputs = object_identity.ObjectIdentitySet(body_graph.outputs)\n    args = [counter, maximum_iterations, total_iters] + list(grads)\n    grad_func_graph = func_graph_module.func_graph_from_py_func(name, lambda *args: _grad_fn(ys, xs, args, body_graph), args, {}, func_graph=_WhileBodyGradFuncGraph(name, cond_graph, body_graph, maximum_iterations, while_op, body_graph_inputs, body_graph_outputs))\n    for external_capture, internal_capture in grad_func_graph.captures:\n        if ops.tensor_id(internal_capture) in grad_func_graph.internal_capture_to_output:\n            new_output = grad_func_graph.internal_capture_to_output[ops.tensor_id(internal_capture)]\n        else:\n            raise ValueError(f'Tensor {str(internal_capture)} which captures {str(external_capture)} is in list of internal_captures but not in internal_capture_to_output.')\n        grad_func_graph.outputs.append(new_output)\n        grad_func_graph.structured_outputs.append(new_output)\n    return (grad_func_graph, args)", "docstring": "Builds and returns the gradient FuncGraph of `func_graph` and its args.\n\nThe returned grad_func_graph must be called with the returned\nargs + grad_func_graph.captures.\n\nArgs:\nys: A `Tensor` or list of tensors to be differentiated.\nxs: A `Tensor` or list of tensors to be used for differentiation.\ngrads: The incoming grads for `ys`.\ncond_graph: FuncGraph for the forward cond function.\nbody_graph: FuncGraph for the forward body function.\nname: Name of the returned gradient function.\nwhile_op: The forward While op.\nmaximum_iterations: Tensor. The maximum number of iterations.\n\nReturns:\n2-tuple of (grad_func_graph, args).", "source": "github-repos"}
{"code": "def update(self, data):\n        \n        \n        if data.state['Name'] == 'terminated':\n            self.delete(auto_commit=False)\n            return True\n\n        updated = self.set_property('launch_date', to_utc_date(data.launch_time).isoformat())\n        updated |= self.set_property('state', data.state['Name'])\n        updated |= self.set_property('instance_type', data.instance_type)\n        updated |= self.set_property('public_ip', data.public_ip_address or None)\n        updated |= self.set_property('public_dns', data.public_dns_name or None)\n\n        tags = {x['Key']: x['Value'] for x in data.tags or {}}\n        existing_tags = {x.key: x for x in self.tags}\n\n        \n        for key, value in list(tags.items()):\n            updated |= self.set_tag(key, value)\n\n        \n        for key in list(existing_tags.keys()):\n            if key not in tags:\n                updated |= self.delete_tag(key)\n\n        return updated", "docstring": "Updates the object information based on live data, if there were any changes made. Any changes will be\nautomatically applied to the object, but will not be automatically persisted. You must manually call\n`db.session.add(instance)` on the object.\n\nArgs:\ndata (:obj:): AWS API Resource object fetched from AWS API\n\nReturns:\nTrue if there were any changes to the object, else false", "source": "juraj-google-style"}
{"code": "def create_seq(character, action_metadata, direction, length=8, start=0):\n    sprite_start = ((action_metadata[0] + direction) * FRAME_SIZE)\n    sprite_end = (((action_metadata[0] + direction) + 1) * FRAME_SIZE)\n    sprite_line = character[(sprite_start:sprite_end, ...)]\n    frames = tf.stack(tf.split(sprite_line, 13, axis=1))\n    frames = frames[0:action_metadata[1]]\n    frames = tf.roll(frames, shift=(- start), axis=0)\n    frames = tf.tile(frames, [2, 1, 1, 1])\n    frames = frames[:length]\n    frames = tf.cast(frames, dtype=tf.float32)\n    frames.set_shape([length, FRAME_SIZE, FRAME_SIZE, CHANNELS])\n    return frames", "docstring": "Creates a sequence.\n\nArgs:\ncharacter: A character sprite tensor.\naction_metadata: An action metadata tuple.\ndirection: An integer representing the direction, i.e., the row\noffset within each action group corresponding to a particular\ndirection.\nlength: Desired length of the sequence. If this is longer than\nthe number of available frames, it will roll over to the\nbeginning.\nstart: Index of possible frames at which to start the sequence.\n\nReturns:\nA sequence tensor.", "source": "codesearchnet"}
{"code": "def from_filename(filename, require=None):\n    with io.open(filename, 'r', encoding='utf-8') as json_file:\n        data = json.load(json_file)\n        return (data, from_dict(data, require=require))", "docstring": "Reads a Google service account JSON file and returns its parsed info.\n\nArgs:\nfilename (str): The path to the service account .json file.\nrequire (Sequence[str]): List of keys required to be present in the\ninfo.\n\nReturns:\nTuple[ Mapping[str, str], google.auth.crypt.Signer ]: The verified\ninfo and a signer instance.", "source": "codesearchnet"}
{"code": "def get_nowait(self, name, default=_MISSING, autoremove=False):\n    self._ensure_declared(name)\n    try:\n        future = self._data[name]\n        if future.done():\n            return future.result()\n        if (default is _MISSING):\n            raise KeyError('Key {} has not been assigned a value and no default given'.format(name))\n        return default\n    finally:\n        if autoremove:\n            self._data[name].cancel()\n            del self._data[name]", "docstring": "Get the value of a key if it is already set.\n\nThis method allows you to check if a key has already been set\nwithout blocking.  If the key has not been set you will get the\ndefault value you pass in or KeyError() if no default is passed.\n\nWhen this method returns the key is automatically removed unless\nyou pass ``autoremove=False``.\n\nThis method is not a coroutine and does not block.\n\nArgs:\nname (str): The name of the key to wait on.\ndefault (object): The default value to return if the key\nhas not yet been set.  Defaults to raising KeyError().\nautoremove (bool): Whether to automatically remove the\nkey when get() returns.\n\nReturns:\nobject: Whatever was set in the key by :meth:`set`.", "source": "codesearchnet"}
{"code": "def __init__(self, env):\n    \n    self._env = env\n    self._observation_space = self._env.observation_space\n    self._action_space = self._env.action_space", "docstring": "Cache observation and action space to not recompute them repeatedly.\n\nArgs:\nenv: OpenAI Gym environment.", "source": "juraj-google-style"}
{"code": "def __init__(self, prefs, g, divPressureValues, kappa=2.0, omega=0.5,\n            beta=1.0, mu=1.0,omega2=0.0,\n            freeparams=['kappa', 'omega', 'beta', 'mu', 'omega2']):\n        \n        _checkParam('omega2',omega2, self.PARAMLIMITS, self.PARAMTYPES)\n        self.omega2 = omega2\n        self.deltar = scipy.array(divPressureValues.copy())\n        assert (max(scipy.absolute(self.deltar))) <= 1, (\n                \"A scaled deltar value is > 1 or < -1.\")\n        super(ExpCM_empirical_phi_divpressure, self).__init__(prefs, g,\n                kappa=kappa, omega=omega, beta=beta, mu=mu,\n                freeparams=freeparams)", "docstring": "Initialize an `ExpCM_empirical_phi_divpressure` object.\n\nArgs:\n`prefs`, `kappa`, `omega`, `beta`, `mu`, `g`, `freeparams`\nSame meaning as for an `ExpCM_empirical_phi`\n`divPressureValues`, `omega2`\nMeaning described in the main class doc string.", "source": "juraj-google-style"}
{"code": "def unwrap(data_type):\n    \n    unwrapped_nullable = False\n    unwrapped_alias = False\n    while is_alias(data_type) or is_nullable_type(data_type):\n        if is_nullable_type(data_type):\n            unwrapped_nullable = True\n        if is_alias(data_type):\n            unwrapped_alias = True\n        data_type = data_type.data_type\n    return data_type, unwrapped_nullable, unwrapped_alias", "docstring": "Convenience method to unwrap all Aliases and Nullables from around a\nDataType. This checks for nullable wrapping aliases, as well as aliases\nwrapping nullables.\n\nArgs:\ndata_type (DataType): The target to unwrap.\n\nReturn:\nTuple[DataType, bool, bool]: The underlying data type; a bool that is\nset if a nullable was present; a bool that is set if an alias was\npresent.", "source": "juraj-google-style"}
{"code": "def add(self, command, *args):\n    cmd = Command(command, args)\n    self.commands.append(cmd)", "docstring": "Add a command to this command file.\n\nArgs:\ncommand (str): The command to add\n*args (str): The parameters to call the command with", "source": "codesearchnet"}
{"code": "def create(self, ip_dest, next_hop, **kwargs):\n    return self._set_route(ip_dest, next_hop, **kwargs)", "docstring": "Create a static route\n\nArgs:\nip_dest (string): The ip address of the destination in the\nform of A.B.C.D/E\nnext_hop (string): The next hop interface or ip address\n**kwargs['next_hop_ip'] (string): The next hop address on\ndestination interface\n**kwargs['distance'] (string): Administrative distance for this\nroute\n**kwargs['tag'] (string): Route tag\n**kwargs['route_name'] (string): Route name\n\nReturns:\nTrue if the operation succeeds, otherwise False.", "source": "codesearchnet"}
{"code": "def spawn(self, function, *args, **kwargs):\n        \n        \n        assert self.state != STOPPED, \"Can't spawn when process stopped\"\n        spawned = Spawned(function, args, kwargs)\n        self._spawned.append(spawned)\n        self._spawn_count += 1\n        \n        if self._spawn_count > SPAWN_CLEAR_COUNT:\n            self._clear_spawn_list()\n        return spawned", "docstring": "Runs the function in a worker thread, returning a Result object\n\nArgs:\nfunction: Function to run\nargs: Positional arguments to run the function with\nkwargs: Keyword arguments to run the function with\n\nReturns:\nSpawned: Something you can call wait(timeout) on to see when it's\nfinished executing", "source": "juraj-google-style"}
{"code": "def GetSizeHint(self, context=None, **unused_kwargs):\n    \n    context_state = getattr(context, 'state', {})\n\n    elements_data_size = self.GetByteSize()\n    if elements_data_size:\n      return elements_data_size\n\n    try:\n      elements_data_size = self._CalculateElementsDataSize(context)\n    except errors.MappingError:\n      pass\n\n    if elements_data_size is None and self._HasElementsTerminator():\n      size_hints = context_state.get('size_hints', {})\n      size_hint = size_hints.get(self._data_type_definition.name, None)\n\n      elements_data_size = 0\n\n      if size_hint:\n        elements_data_size = size_hint.byte_size\n\n      if not size_hint or not size_hint.is_complete:\n        elements_data_size += self._element_data_type_definition.GetByteSize()\n\n    return elements_data_size", "docstring": "Retrieves a hint about the size.\n\nArgs:\ncontext (Optional[DataTypeMapContext]): data type map context, used to\ndetermine the size hint.\n\nReturns:\nint: hint of the number of bytes needed from the byte stream or None.", "source": "juraj-google-style"}
{"code": "def course_blocks(self, course_id, username):\n    resp = self.requester.get(urljoin(self.base_url, '/api/courses/v1/blocks/'), params={'depth': 'all', 'username': username, 'course_id': course_id, 'requested_fields': 'children,display_name,id,type,visible_to_staff_only'})\n    resp.raise_for_status()\n    return Structure(resp.json())", "docstring": "Fetches course blocks.\n\nArgs:\ncourse_id (str): An edx course id.\nusername (str): username of the user to query for (can reveal hidden\nmodules)\n\nReturns:\nStructure", "source": "codesearchnet"}
{"code": "def tuplesorted(items, *keys):\n    tuple_keys = [Key(func=(lambda t, i=index, k=key: k.func(t[i])), reverse=key.reverse) for (index, key) in enumerate(keys)]\n    return multisorted(items, *tuple_keys)", "docstring": "Sort by tuples with a different key for each item.\n\nArgs:\nitems: An iterable series of sequences (typically tuples)\n\n*keys: Key objects which transform individual elements of\neach tuple into sort keys. The zeroth object\ntransforms the zeroth element of each tuple, the first\nkey object transforms the first element of each tuple,\nand so on.\nReturns:\nA list of items sorted according to keys.", "source": "codesearchnet"}
{"code": "def GetKeyByPath(self, key_path):\n    \n    key_path_upper = key_path.upper()\n    if key_path_upper.startswith(self._key_path_prefix_upper):\n      relative_key_path = key_path[self._key_path_prefix_length:]\n    elif key_path.startswith(definitions.KEY_PATH_SEPARATOR):\n      relative_key_path = key_path\n      key_path = ''.join([self._key_path_prefix, key_path])\n    else:\n      return None\n\n    path_segments = key_paths.SplitKeyPath(relative_key_path)\n    registry_key = self._root_key\n    if not registry_key:\n      return None\n\n    for path_segment in path_segments:\n      registry_key = registry_key.GetSubkeyByName(path_segment)\n      if not registry_key:\n        return None\n\n    return registry_key", "docstring": "Retrieves the key for a specific path.\n\nArgs:\nkey_path (str): Windows Registry key path.\n\nReturns:\nWinRegistryKey: Windows Registry key or None if not available.", "source": "juraj-google-style"}
{"code": "def message_factory(msg_type, msg_types=MESSAGE_TYPES, *args, **kwargs):\n    \n    try:\n        return msg_types[msg_type.lower()](*args, **kwargs)\n    except (UnknownProfileError, InvalidMessageInputError) as e:\n        err_exit(\"Unable to send message: \", e)\n    except KeyError:\n        raise UnsupportedMessageTypeError(msg_type, msg_types)", "docstring": "Factory function to return the specified message instance.\n\nArgs:\n:msg_type: (str) the type of message to send, i.e. 'Email'\n:msg_types: (str, list, or set) the supported message types\n:kwargs: (dict) keywords arguments that are required for the\nvarious message types.  See docstrings for each type.\ni.e. help(messages.Email), help(messages.Twilio), etc.", "source": "juraj-google-style"}
{"code": "def getPixmap(page, matrix = None, colorspace = csRGB, clip = None,\n              alpha = True):\n    \n    CheckParent(page)\n\n    \n    cs = colorspace\n    if type(colorspace) is str:\n        if colorspace.upper() == \"GRAY\":\n            cs = csGRAY\n        elif colorspace.upper() == \"CMYK\":\n            cs = csCMYK\n        else:\n            cs = csRGB\n    if cs.n not in (1,3,4):\n        raise ValueError(\"unsupported colorspace\")\n\n    dl = page.getDisplayList()               \n    if clip:\n        scissor = Rect(clip)\n    else:\n        scissor = None\n    pix = dl.getPixmap(matrix = matrix,\n                       colorspace = cs,\n                       alpha = alpha,\n                       clip = scissor)\n    del dl\n    return pix", "docstring": "Create pixmap of page.\n\nArgs:\nmatrix: Matrix for transformation (default: Identity).\ncolorspace: (str/Colorspace) rgb, rgb, gray - case ignored, default csRGB.\nclip: (irect-like) restrict rendering to this area.\nalpha: (bool) include alpha channel", "source": "juraj-google-style"}
{"code": "def _log_effective_mass_data(data, is_spin_polarized, mass_type='m_e'):\n    s = (' ({})'.format(data['spin'].name) if is_spin_polarized else '')\n    band_str = 'band {}{}'.format((data['band_id'] + 1), s)\n    start_kpoint = data['start_kpoint']\n    end_kpoint = data['end_kpoint']\n    eff_mass = data['effective_mass']\n    kpoint_str = kpt_str.format(k=start_kpoint.frac_coords)\n    if start_kpoint.label:\n        kpoint_str += ' ({})'.format(start_kpoint.label)\n    kpoint_str += ' -> '\n    kpoint_str += kpt_str.format(k=end_kpoint.frac_coords)\n    if end_kpoint.label:\n        kpoint_str += ' ({})'.format(end_kpoint.label)\n    logging.info('  {}: {:.3f} | {} | {}'.format(mass_type, eff_mass, band_str, kpoint_str))", "docstring": "Log data about the effective masses and their directions.\n\nArgs:\ndata (dict): The effective mass data. Formatted as a :obj:`dict` with\nthe keys:\n\n'effective_mass' (:obj:`float`)\nThe effective mass in units of electron rest mass, :math:`m_0`.\n\n'energies' (:obj:`numpy.ndarray`)\nBand eigenvalues in eV.\n\n'band_id' (:obj:`int`)\nThe index of the band,\n\n'spin' (:obj:`~pymatgen.electronic_structure.core.Spin`)\nThe spin channel\n\n'start_kpoint' (:obj:`int`)\nThe index of the k-point at which the band extrema occurs\n\n'end_kpoint' (:obj:`int`)\nThe k-point towards which the data has been sampled.\n\nis_spin_polarized (bool): Whether the system is spin polarized.", "source": "codesearchnet"}
{"code": "def stations(self, station, limit=10):\n        \n        query = {\n            'start': 1,\n            'S': station + '?',\n            'REQ0JourneyStopsB': limit\n        }\n        rsp = requests.get('http:\n        return parse_stations(rsp.text)", "docstring": "Find stations for given queries\n\nArgs:\nstation (str): search query\nlimit (int): limit number of results", "source": "juraj-google-style"}
{"code": "def generate_srt_from_sjson(sjson_subs):\n        \n\n        output = ''\n\n        equal_len = len(sjson_subs['start']) == len(sjson_subs['end']) == len(sjson_subs['text'])\n        if not equal_len:\n            return output\n\n        for i in range(len(sjson_subs['start'])):\n            item = SubRipItem(\n                index=i,\n                start=SubRipTime(milliseconds=sjson_subs['start'][i]),\n                end=SubRipTime(milliseconds=sjson_subs['end'][i]),\n                text=sjson_subs['text'][i]\n            )\n            output += (six.text_type(item))\n            output += '\\n'\n        return output", "docstring": "Generate transcripts from sjson to SubRip (*.srt).\n\nArguments:\nsjson_subs (dict): `sjson` subs.\n\nReturns:\nSubtitles in SRT format.", "source": "juraj-google-style"}
{"code": "def _process_celeba_config_file(self, file_path):\n    with tf.io.gfile.GFile(file_path) as f:\n        data_raw = f.read()\n    lines = data_raw.split('\\n')\n    keys = lines[1].strip().split()\n    values = {}\n    for line in lines[2:(- 1)]:\n        row_values = line.strip().split()\n        values[row_values[0]] = [int(v) for v in row_values[1:]]\n    return (keys, values)", "docstring": "Unpack the celeba config file.\n\nThe file starts with the number of lines, and a header.\nAfterwards, there is a configuration for each file: one per line.\n\nArgs:\nfile_path: Path to the file with the configuration.\n\nReturns:\nkeys: names of the attributes\nvalues: map from the file name to the list of attribute values for\nthis file.", "source": "codesearchnet"}
{"code": "def validate_level_indexes(num_levels, v_level_indexes, h_level_indexes):\n    if (num_levels < 1):\n        raise ValueError('num_levels {} is less than one'.format(num_levels))\n    all_levels = SortedFrozenSet(range(num_levels))\n    if ((h_level_indexes is None) and (v_level_indexes is None)):\n        v_level_indexes = range(0, num_levels, 2)\n        h_level_indexes = range(1, num_levels, 2)\n    h_level_set = SortedFrozenSet(h_level_indexes)\n    v_level_set = SortedFrozenSet(v_level_indexes)\n    if (h_level_indexes is None):\n        h_level_indexes = (all_levels - v_level_set)\n    if (v_level_indexes is None):\n        v_level_indexes = (all_levels - h_level_set)\n    if (len(h_level_indexes) != len(h_level_set)):\n        raise ValueError('h_level_indexes contains duplicate values')\n    if (h_level_set and ((h_level_set[0] < 0) or (h_level_set[(- 1)] >= num_levels))):\n        raise ValueError('h_level_indexes contains out of range values')\n    if (len(v_level_indexes) != len(v_level_set)):\n        raise ValueError('v_level_indexes contains duplicate values')\n    if (v_level_set and ((v_level_set[0] < 0) or (v_level_set[(- 1)] >= num_levels))):\n        raise ValueError('v_level_indexes contains out of range values')\n    unmentioned_levels = ((all_levels - v_level_set) - h_level_set)\n    if (len(unmentioned_levels) > 0):\n        raise ValueError('v_level_indexes and h_level_indexes do not together include levels {}'.format(', '.join(map(str, unmentioned_levels))))\n    if (not h_level_set.isdisjoint(v_level_set)):\n        raise ValueError('h_level_indexes and v_level_indexes are not disjoint')\n    v_level_indexes = list(v_level_indexes)\n    h_level_indexes = list(h_level_indexes)\n    return (v_level_indexes, h_level_indexes)", "docstring": "Ensure that v_level_indexes and h_level_indexes are consistent.\n\nArgs:\nnum_levels: The number of levels of keys in the data structure being tabulated.\nv_level_indexes: A sequence of level indexes between zero and num_levels for\nthe vertical axis, or None.\nh_level_indexes: A sequence of level indexes between zero and num_levels for for\nthe horizontal axis, or None.\n\nReturns:\nA 2-tuple containing v_level_indexes and h_level_indexes sequences.\n\nRaises:\nValueError: If v_level_indexes contains duplicate values.\nValueError: If h_level_indexes contains duplicate values.\nValueError: If v_level_indexes contains out of range values.\nValueError: If h_level_indexes contains out of range values.\nValueError: If taken together v_level_indexes and h_level_indexes\ndo not include all levels from zero to up to, but not including\nnum_levels.\nValueError: If v_level_indexes and h_level_indexes have items in\ncommon.", "source": "codesearchnet"}
{"code": "def gradient_summaries(grad_vars, groups=None, scope='gradients'):\n  \n  groups = groups or {r'all': r'.*'}\n  grouped = collections.defaultdict(list)\n  for grad, var in grad_vars:\n    if grad is None:\n      continue\n    for name, pattern in groups.items():\n      if re.match(pattern, var.name):\n        name = re.sub(pattern, name, var.name)\n        grouped[name].append(grad)\n  for name in groups:\n    if name not in grouped:\n      tf.logging.warn(\"No variables matching '{}' group.\".format(name))\n  summaries = []\n  for name, grads in grouped.items():\n    grads = [tf.reshape(grad, [-1]) for grad in grads]\n    grads = tf.concat(grads, 0)\n    summaries.append(tf.summary.histogram(scope + '/' + name, grads))\n  return tf.summary.merge(summaries)", "docstring": "Create histogram summaries of the gradient.\n\nSummaries can be grouped via regexes matching variables names.\n\nArgs:\ngrad_vars: List of (gradient, variable) tuples as returned by optimizers.\ngroups: Mapping of name to regex for grouping summaries.\nscope: Name scope for this operation.\n\nReturns:\nSummary tensor.", "source": "juraj-google-style"}
{"code": "def parse_hgnc_line(line, header):\n    \n    hgnc_gene = {}\n    line = line.rstrip().split('\\t')\n    raw_info = dict(zip(header, line))\n    \n    if 'Withdrawn' in raw_info['status']:\n        return hgnc_gene\n    \n    hgnc_symbol = raw_info['symbol']\n    hgnc_gene['hgnc_symbol'] = hgnc_symbol\n    hgnc_gene['hgnc_id'] = int(raw_info['hgnc_id'].split(':')[-1])\n    hgnc_gene['description'] = raw_info['name']\n    \n    \n    aliases = set([hgnc_symbol, hgnc_symbol.upper()])\n    \n    \n    previous_names = raw_info['prev_symbol']\n    if previous_names:\n        for alias in previous_names.strip('\"').split('|'):\n            aliases.add(alias)\n\n    alias_symbols = raw_info['alias_symbol']\n    if alias_symbols:\n        for alias in alias_symbols.strip('\"').split('|'):\n            aliases.add(alias)\n\n    hgnc_gene['previous_symbols'] = list(aliases)\n\n    \n    hgnc_gene['ensembl_gene_id'] = raw_info.get('ensembl_gene_id')\n\n    omim_id = raw_info.get('omim_id')\n    if omim_id:\n        hgnc_gene['omim_id'] = int(omim_id.strip('\"').split('|')[0])\n    else:\n        hgnc_gene['omim_id'] = None\n\n    entrez_id = hgnc_gene['entrez_id'] = raw_info.get('entrez_id')\n    if entrez_id:\n        hgnc_gene['entrez_id'] = int(entrez_id)\n    else:\n        hgnc_gene['entrez_id'] = None\n\n    \n    ref_seq = raw_info.get('refseq_accession')\n    if ref_seq:\n        hgnc_gene['ref_seq'] = ref_seq.strip('\"').split('|')\n    else:\n        hgnc_gene['ref_seq'] = []\n\n    uniprot_ids = raw_info.get('uniprot_ids')\n    if uniprot_ids:\n        hgnc_gene['uniprot_ids'] = uniprot_ids.strip('\"\"').split('|')\n    else:\n        hgnc_gene['uniprot_ids'] = []\n\n    ucsc_id = raw_info.get('ucsc_id')\n    if ucsc_id:\n        hgnc_gene['ucsc_id'] = ucsc_id\n    else:\n        hgnc_gene['ucsc_id'] = None\n\n    vega_id = raw_info.get('vega_id')\n    if vega_id:\n        hgnc_gene['vega_id'] = vega_id\n    else:\n        hgnc_gene['vega_id'] = None\n\n    return hgnc_gene", "docstring": "Parse an hgnc formated line\n\nArgs:\nline(list): A list with hgnc gene info\nheader(list): A list with the header info\n\nReturns:\nhgnc_info(dict): A dictionary with the relevant info", "source": "juraj-google-style"}
{"code": "def recompute_grad(fn):\n\n    @functools.wraps(fn)\n    def wrapped(*args):\n        return _recompute_grad(fn, args)\n    return wrapped", "docstring": "Decorator that recomputes the function on the backwards pass.\n\nArgs:\nfn: a function that takes Tensors (all as positional arguments) and returns\na tuple of Tensors.\n\nReturns:\nA wrapped fn that is identical to fn when called, but its activations will\nbe discarded and recomputed on the backwards pass (i.e. on a call to\ntf.gradients).", "source": "codesearchnet"}
{"code": "def make_qs(n, m=None):\n    \n    try:\n        import sympy\n    except ImportError:\n        raise ImportError(\"This function requires sympy. Please install it.\")\n    if m is None:\n        syms = sympy.symbols(\" \".join(f\"q{i}\" for i in range(n)))\n        if isinstance(syms, tuple):\n            return syms\n        else:\n            return (syms,)\n    syms = sympy.symbols(\" \".join(f\"q{i}\" for i in range(n, m)))\n    if isinstance(syms, tuple):\n        return syms\n    else:\n        return (syms,)", "docstring": "Make sympy symbols q0, q1, ...\n\nArgs:\nn(int), m(int, optional):\nIf specified both n and m, returns [qn, q(n+1), ..., qm],\nOnly n is specified, returns[q0, q1, ..., qn].\n\nReturn:\ntuple(Symbol): Tuple of sympy symbols.", "source": "juraj-google-style"}
{"code": "def CheckFile(self, filename):\n    \n    result = True\n    artifact_reader = reader.YamlArtifactsReader()\n\n    try:\n      for artifact_definition in artifact_reader.ReadFile(filename):\n        try:\n          self._artifact_registry.RegisterDefinition(artifact_definition)\n        except KeyError:\n          logging.warning(\n              'Duplicate artifact definition: {0:s} in file: {1:s}'.format(\n                  artifact_definition.name, filename))\n          result = False\n\n        artifact_definition_supports_macos = (\n            definitions.SUPPORTED_OS_DARWIN in (\n                artifact_definition.supported_os))\n        artifact_definition_supports_windows = (\n            definitions.SUPPORTED_OS_WINDOWS in (\n                artifact_definition.supported_os))\n\n        for source in artifact_definition.sources:\n          if source.type_indicator in (\n              definitions.TYPE_INDICATOR_FILE, definitions.TYPE_INDICATOR_PATH):\n\n            if (definitions.SUPPORTED_OS_DARWIN in source.supported_os or (\n                artifact_definition_supports_macos and\n                not source.supported_os)):\n              if not self._CheckMacOSPaths(\n                  filename, artifact_definition, source, source.paths):\n                result = False\n\n            elif (artifact_definition_supports_windows or\n                  definitions.SUPPORTED_OS_WINDOWS in source.supported_os):\n              for path in source.paths:\n                if not self._CheckWindowsPath(\n                    filename, artifact_definition, source, path):\n                  result = False\n\n          elif source.type_indicator == (\n              definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY):\n\n            \n            \n            if (filename != self.LEGACY_PATH and\n                self._HasDuplicateRegistryKeyPaths(\n                    filename, artifact_definition, source)):\n              result = False\n\n            for key_path in source.keys:\n              if not self._CheckWindowsRegistryKeyPath(\n                  filename, artifact_definition, key_path):\n                result = False\n\n          elif source.type_indicator == (\n              definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE):\n\n            for key_value_pair in source.key_value_pairs:\n              if not self._CheckWindowsRegistryKeyPath(\n                  filename, artifact_definition, key_value_pair['key']):\n                result = False\n\n    except errors.FormatError as exception:\n      logging.warning(\n          'Unable to validate file: {0:s} with error: {1!s}'.format(\n              filename, exception))\n      result = False\n\n    return result", "docstring": "Validates the artifacts definition in a specific file.\n\nArgs:\nfilename (str): name of the artifacts definition file.\n\nReturns:\nbool: True if the file contains valid artifacts definitions.", "source": "juraj-google-style"}
{"code": "def _AddAttribute(self, attribute):\n    if (attribute.identifier in self._attributes):\n        raise KeyError('Volume attribute object already set for volume attribute identifier: {0:s}.'.format(attribute.identifier))\n    self._attributes[attribute.identifier] = attribute", "docstring": "Adds an attribute.\n\nArgs:\nattribute (VolumeAttribute): a volume attribute.\n\nRaises:\nKeyError: if volume attribute is already set for the corresponding volume\nattribute identifier.", "source": "codesearchnet"}
{"code": "def set_colourtemp(self, colourtemp):\n    if (not (0 <= colourtemp <= 255)):\n        raise ValueError('The colour temperature needs to be between 0 and 255.')\n    payload = self.generate_payload(SET, {self.DPS_INDEX_COLOURTEMP: colourtemp})\n    data = self._send_receive(payload)\n    return data", "docstring": "Set the colour temperature of an rgb bulb.\n\nArgs:\ncolourtemp(int): Value for the colour temperature (0-255).", "source": "codesearchnet"}
{"code": "def _remove_one_redundant_stack_unstack(in_graph_def):\n    name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(in_graph_def)\n    del name_to_seq_num\n    do_generic_pack_unpack = True\n    out = _graph_pb2.GraphDef()\n    out.library.CopyFrom(in_graph_def.library)\n    out.versions.CopyFrom(in_graph_def.versions)\n    for n in in_graph_def.node:\n        node_name = _tensor_name_base(n.name)\n        if not node_name.startswith('OpHintStack') and (not n.op.startswith('Pack')):\n            continue\n        next_to_visit = [node_name]\n        visited = set()\n        unpack_nodes = set()\n        pack_node = node_name\n        matches_pattern = True\n        is_hint_created_stack = False\n        while next_to_visit:\n            current_node_name = next_to_visit[0]\n            visited.add(current_node_name)\n            del next_to_visit[0]\n            node = name_to_node[current_node_name]\n            is_op_hint_stack = node.name.startswith('OpHintStack')\n            is_op_hint_unstack = node.name.startswith('OpHintUnstack')\n            if node.op == 'Identity' or is_op_hint_stack or (do_generic_pack_unpack and node.op == 'Pack'):\n                is_hint_created_stack |= is_op_hint_stack\n                next_to_visit += [input_node for input_node in name_to_input_name[current_node_name] if input_node not in visited]\n            elif is_op_hint_unstack or (do_generic_pack_unpack and node.op == 'Unpack'):\n                unpack_nodes.add(node.name)\n                is_hint_created_stack &= is_op_hint_unstack\n            else:\n                matches_pattern = False\n                break\n            visited.add(node.name)\n        if matches_pattern and len(unpack_nodes) == 1:\n            pack_node = node_name\n            no_external_dependency = True\n            for other_n in in_graph_def.node:\n                if other_n.name in visited:\n                    continue\n                for input_tensor in name_to_input_name[other_n.name]:\n                    input_op = _tensor_name_base(input_tensor)\n                    if input_op in visited and input_op != pack_node:\n                        no_external_dependency = False\n            if is_hint_created_stack or no_external_dependency:\n                end = unpack_nodes.pop()\n                end_input = name_to_node[end].input[0]\n                for other_n in in_graph_def.node:\n                    node_name = _tensor_name_base(other_n.name)\n                    if node_name not in visited:\n                        new_node = _copy.deepcopy(other_n)\n                        new_node.input[:] = [end_input if stripped == pack_node else non_stripped for stripped, non_stripped in zip(name_to_input_name[node_name], new_node.input[:])]\n                        out.node.extend([new_node])\n                return (out, True)\n    return (in_graph_def, False)", "docstring": "Removes a stack->unstack pattern from in_graph_def in a returned graph.\n\nArgs:\nin_graph_def: Graph def to use as input.\n\nReturns:\nSimplified tuple (graph_def, changed_something) where changed_something\nis true if anything was done.", "source": "github-repos"}
{"code": "def __init__(self, value_type, value):\n    self.value_type = value_type\n    self.value = value_type(value)", "docstring": "Args:\nvalue_type: Type of the static value\nvalue: Static value", "source": "github-repos"}
{"code": "def scatter(indices, values, shape):\n    if any_symbolic_tensors((indices, values)):\n        return Scatter(shape=shape).symbolic_call(indices, values)\n    return backend.core.scatter(indices, values, shape)", "docstring": "Returns a tensor of shape `shape` where `indices` are set to `values`.\n\nAt a high level, this operation does `zeros[indices] = updates` and\nreturns the output. It is equivalent to:\n\n```python\nzeros = keras.ops.zeros(shape)\noutput = keras.ops.scatter_update(zeros, indices, values)\n```\n\nArgs:\nindices: A tensor or list/tuple specifying\nindices for the values in `values`.\nvalues: A tensor, the values to be set at `indices`.\nshape: Shape of the output tensor.\n\nExample:\n\n>>> indices = [[0, 1], [1, 1]]\n>>> values = np.array([1., 1.])\n>>> keras.ops.scatter(indices, values, shape=(2, 2))\narray([[0., 1.],\n[0., 1.]])", "source": "github-repos"}
{"code": "def __init__(\n      self, processing_configuration, enable_sigsegv_handler=False, **kwargs):\n    \n    super(MultiProcessBaseProcess, self).__init__(**kwargs)\n    self._debug_output = False\n    self._enable_sigsegv_handler = enable_sigsegv_handler\n    self._guppy_memory_profiler = None\n    self._log_filename = None\n    self._memory_profiler = None\n    self._original_sigsegv_handler = None\n    \n    \n    self._pid = None\n    self._processing_configuration = processing_configuration\n    self._process_information = None\n    self._processing_profiler = None\n    self._quiet_mode = False\n    self._rpc_server = None\n    self._serializers_profiler = None\n    self._status_is_running = False\n    self._storage_profiler = None\n    self._tasks_profiler = None\n\n    if self._processing_configuration:\n      self._debug_output = self._processing_configuration.debug_output\n\n      if processing_configuration.log_filename:\n        log_path = os.path.dirname(self._processing_configuration.log_filename)\n        log_filename = os.path.basename(\n            self._processing_configuration.log_filename)\n        log_filename = '{0:s}_{1:s}'.format(self._name, log_filename)\n        self._log_filename = os.path.join(log_path, log_filename)\n\n    \n    self.rpc_port = multiprocessing.Value('I', 0)", "docstring": "Initializes a process.\n\nArgs:\nprocessing_configuration (ProcessingConfiguration): processing\nconfiguration.\nenable_sigsegv_handler (Optional[bool]): True if the SIGSEGV handler\nshould be enabled.\nkwargs (dict[str,object]): keyword arguments to pass to\nmultiprocessing.Process.", "source": "juraj-google-style"}
{"code": "def learn(self, state_arr, limit=1000):\n        \n        while self.t <= limit:\n            \n            next_action_arr = self.extract_possible_actions(state_arr)\n            \n            predicted_q_arr = self.__function_approximator.inference_q(next_action_arr)\n            \n            reward_value_arr = np.empty((next_action_arr.shape[0], 1))\n            next_max_q_arr = np.empty((next_action_arr.shape[0], 1))\n            for i in range(reward_value_arr.shape[0]):\n                \n                reward_value_arr[i] = self.observe_reward_value(state_arr, next_action_arr[i])\n                \n                next_next_action_arr = self.extract_possible_actions(next_action_arr[i])\n                next_max_q_arr[i] = self.__function_approximator.inference_q(next_next_action_arr).max()\n\n            \n            action_arr, predicted_q = self.select_action(next_action_arr, predicted_q_arr)\n            \n            real_q_arr = self.update_q(\n                predicted_q_arr,\n                reward_value_arr,\n                next_max_q_arr\n            )\n\n            \n            real_q = real_q_arr[np.where(predicted_q_arr == predicted_q)[0][0]]\n            if self.__q_logs_arr.shape[0] > 0:\n                self.__q_logs_arr = np.r_[\n                    self.__q_logs_arr,\n                    np.array([predicted_q, real_q]).reshape(1, 2)\n                ]\n            else:\n                self.__q_logs_arr = np.array([predicted_q, real_q]).reshape(1, 2)\n\n            \n            self.learn_q(predicted_q_arr, real_q_arr)\n            \n            state_arr = self.update_state(state_arr, action_arr)\n            \n            self.t += 1\n            \n            end_flag = self.check_the_end_flag(state_arr)\n            if end_flag is True:\n                break", "docstring": "Learning and searching the optimal solution.\n\nArgs:\nstate_arr:      `np.ndarray` of initial state.\nlimit:          The maximum number of iterative updates based on value iteration algorithms.", "source": "juraj-google-style"}
{"code": "def __init__(self, group_key_start=1):\n    self._group_key = group_key_start\n    self._instance_key_table = {}\n    self._lock = threading.Lock()\n    self._known_groups = {}", "docstring": "Initializes the object.\n\nArgs:\ngroup_key_start: the starting integer of group key.", "source": "github-repos"}
{"code": "def _serialize_tensor_like_io(value, debug_path: Optional[str]=None, use_repr: bool=True, path_to_value: Optional[str]=None):\n    torch.set_printoptions(sci_mode=True)\n    if use_repr:\n        value_out = _repr_to_list(value)\n    elif path_to_value:\n        if not path_to_value.endswith('.safetensors'):\n            path_to_value += '.safetensors'\n        filepath = os.path.join(debug_path, path_to_value) if debug_path else path_to_value\n        save_file({'data': value.contiguous().detach().cpu()}, filepath)\n        value_out = f'./{path_to_value}'\n    else:\n        raise ValueError(f'use_repr={use_repr!r} and path_to_value={path_to_value!r} cannot both be falsy.')\n    out = {'shape': repr(value.shape), 'dtype': repr(value.dtype), 'value': value_out}\n    if value.dtype in {torch.float16, torch.float32, torch.bfloat16}:\n        out.update({'mean': _sanitize_repr_for_diff(repr(value.mean())), 'std': _sanitize_repr_for_diff(repr(value.std())), 'min': _sanitize_repr_for_diff(repr(value.min())), 'max': _sanitize_repr_for_diff(repr(value.max()))})\n    return out", "docstring": "Converts Tensors and DTensors to a JSON-serializable dictionary representation.\n\nArgs:\nvalue: Any Python object, often including torch Tensors, lists, dicts, etc.\ndebug_path (`str`, *optional*, defaults to `None`): Directory to dump debug JSON and SafeTensors files.\nuse_repr (bool, *optional*, defaults to `True`): Whether to save a `repr()`-ized version of the tensor as the\n`value` property in the asscoiated FULL_TENSORS.json file, or to store the full tensors in separate\nSafeTensors file and store the relative path to that file in the `value` property in the dictionary.\npath_to_value (`str`, *optional*, defaults to `None`): The file name for the SafeTensors file holding the full\ntensor value if `use_repr=False`.\n\nReturns:\nA nested Python structure (list, dict, or sanitized string) that is safe to json.dump.", "source": "github-repos"}
{"code": "def _QueryHash(self, nsrl_socket, digest):\n    \n    try:\n      query = 'QUERY {0:s}\\n'.format(digest).encode('ascii')\n    except UnicodeDecodeError:\n      logger.error('Unable to encode digest: {0!s} to ASCII.'.format(digest))\n      return False\n\n    response = None\n\n    try:\n      nsrl_socket.sendall(query)\n      response = nsrl_socket.recv(self._RECEIVE_BUFFER_SIZE)\n\n    except socket.error as exception:\n      logger.error('Unable to query nsrlsvr with error: {0!s}.'.format(\n          exception))\n\n    if not response:\n      return False\n\n    \n    \n    response = response.strip()\n    \n    return response == b'OK 1'", "docstring": "Queries nsrlsvr for a specific hash.\n\nArgs:\nnsrl_socket (socket._socketobject): socket of connection to nsrlsvr.\ndigest (str): hash to look up.\n\nReturns:\nbool: True if the hash was found, False if not or None on error.", "source": "juraj-google-style"}
{"code": "def __init__(self, *args, **kwargs):\n        \n        super(JLinkDeviceInfo, self).__init__(*args, **kwargs)\n        self.SizeofStruct = ctypes.sizeof(self)", "docstring": "Initializes the instance.\n\nPopulates the ``.SizeofStruct`` parameter to the size of the instance.\n\nArgs:\nself (JLinkDeviceInfo): the ``JLinkDeviceInfo`` instance\nargs: list of arguments\nkwargs: key-word arguments dictionary\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def open_channel_url(channel, staging=False):\n    \n    return OPEN_CHANNEL_URL.format(domain=DOMAIN, channel_id=channel, access='staging' if staging or STAGE else 'edit')", "docstring": "open_channel_url: returns url to uploaded channel\nArgs:\nchannel (str): channel id of uploaded channel\nReturns: string url to open channel", "source": "juraj-google-style"}
{"code": "def condition_details_has_owner(condition_details, owner):\n    \n    if 'subconditions' in condition_details:\n        result = condition_details_has_owner(condition_details['subconditions'], owner)\n        if result:\n            return True\n\n    elif isinstance(condition_details, list):\n        for subcondition in condition_details:\n            result = condition_details_has_owner(subcondition, owner)\n            if result:\n                return True\n    else:\n        if 'public_key' in condition_details \\\n                and owner == condition_details['public_key']:\n            return True\n    return False", "docstring": "Check if the public_key of owner is in the condition details\nas an Ed25519Fulfillment.public_key\n\nArgs:\ncondition_details (dict): dict with condition details\nowner (str): base58 public key of owner\n\nReturns:\nbool: True if the public key is found in the condition details, False otherwise", "source": "juraj-google-style"}
{"code": "def random( self ):\n        \n        j = np.searchsorted( self.cumulative_probabilities(), random.random() )\n        return self.jumps[ j ]", "docstring": "Select a jump at random with appropriate relative probabilities.\n\nArgs:\nNone\n\nReturns:\n(Jump): The randomly selected Jump.", "source": "juraj-google-style"}
{"code": "def get_functions_overridden_by(self, function):\n        \n        candidates = [c.functions_not_inherited for c in self.inheritance]\n        candidates = [candidate for sublist in candidates for candidate in sublist]\n        return [f for f in candidates if f.full_name == function.full_name]", "docstring": "Return the list of functions overriden by the function\nArgs:\n(core.Function)\nReturns:\nlist(core.Function)", "source": "juraj-google-style"}
{"code": "def pipe(engine, format, data, renderer=None, formatter=None, quiet=False):\n    (cmd, _) = command(engine, format, None, renderer, formatter)\n    (out, _) = run(cmd, input=data, capture_output=True, check=True, quiet=quiet)\n    return out", "docstring": "Return ``data`` piped through Graphviz ``engine`` into ``format``.\n\nArgs:\nengine: The layout commmand used for rendering (``'dot'``, ``'neato'``, ...).\nformat: The output format used for rendering (``'pdf'``, ``'png'``, ...).\ndata: The binary (encoded) DOT source string to render.\nrenderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...).\nformatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...).\nquiet (bool): Suppress ``stderr`` output.\nReturns:\nBinary (encoded) stdout of the layout command.\nRaises:\nValueError: If ``engine``, ``format``, ``renderer``, or ``formatter`` are not known.\ngraphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None.\ngraphviz.ExecutableNotFound: If the Graphviz executable is not found.\nsubprocess.CalledProcessError: If the exit status is non-zero.", "source": "codesearchnet"}
{"code": "def create_position_ids_from_input_ids(input_ids, padding_idx):\n    mask = input_ids.ne(padding_idx).int()\n    incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask\n    return incremental_indices.long() + padding_idx", "docstring": "Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols\nare ignored. This is modified from fairseq's `utils.make_positions`.\n\nArgs:\nx: torch.Tensor x:\n\nReturns: torch.Tensor", "source": "github-repos"}
{"code": "def get_layer_vis_square(data,\n                         allow_heatmap=True,\n                         normalize=True,\n                         min_img_dim=100,\n                         max_width=1200,\n                         channel_order='RGB',\n                         colormap='jet',\n                         ):\n    \n    if channel_order not in ['RGB', 'BGR']:\n        raise ValueError('Unsupported channel_order %s' % channel_order)\n    if data.ndim == 1:\n        \n        \n        data = data[:, np.newaxis, np.newaxis]\n    elif data.ndim == 2:\n        \n        \n        data = data.reshape((data.shape[0] * data.shape[1], 1, 1))\n    elif data.ndim == 3:\n        if data.shape[0] == 3:\n            \n            \n            if channel_order == 'BGR':\n                data = data[[2, 1, 0], ...]  \n            data = data.transpose(1, 2, 0)\n            data = data[np.newaxis, ...]\n        else:\n            \n            \n            pass\n    elif data.ndim == 4:\n        if data.shape[0] == 3:\n            \n            \n            data = data.transpose(1, 2, 3, 0)\n            if channel_order == 'BGR':\n                data = data[:, :, :, [2, 1, 0]]  \n        elif data.shape[1] == 3:\n            \n            \n            data = data.transpose(0, 2, 3, 1)\n            if channel_order == 'BGR':\n                data = data[:, :, :, [2, 1, 0]]  \n        else:\n            \n            \n            data = data.reshape((data.shape[0] * data.shape[1], data.shape[2], data.shape[3]))\n    else:\n        raise RuntimeError('unrecognized data shape: %s' % (data.shape,))\n\n    return get_layer_vis_square_raw(data,\n                         allow_heatmap,\n                         normalize,\n                         min_img_dim,\n                         max_width,\n                         colormap,\n                         )", "docstring": "Returns a vis_square for the given layer data\nArguments:\ndata -- a np.ndarray\nKeyword arguments:\nallow_heatmap -- if True, convert single channel images to heatmaps\nnormalize -- whether to normalize the data when visualizing\nmax_width -- maximum width for the vis_square", "source": "juraj-google-style"}
{"code": "def MakeHistFromList(t, name=''):\n    hist = Hist(name=name)\n    [hist.Incr(x) for x in t]\n    return hist", "docstring": "Makes a histogram from an unsorted sequence of values.\n\nArgs:\nt: sequence of numbers\nname: string name for this histogram\n\nReturns:\nHist object", "source": "codesearchnet"}
{"code": "async def get_ticket(self, request):\n        \n        session = await get_session(request)\n        return session.get(self.cookie_name)", "docstring": "Called to return the ticket for a request.\n\nArgs:\nrequest: aiohttp Request object.\n\nReturns:\nA ticket (string like) object, or None if no ticket is available\nfor the passed request.", "source": "juraj-google-style"}
{"code": "def system_repertoire_distance(r1, r2):\n    if (config.MEASURE in measures.asymmetric()):\n        raise ValueError('{} is asymmetric and cannot be used as a system-level irreducibility measure.'.format(config.MEASURE))\n    return measures[config.MEASURE](r1, r2)", "docstring": "Compute the distance between two repertoires of a system.\n\nArgs:\nr1 (np.ndarray): The first repertoire.\nr2 (np.ndarray): The second repertoire.\n\nReturns:\nfloat: The distance between ``r1`` and ``r2``.", "source": "codesearchnet"}
{"code": "def VerifyStructure(self, parser_mediator, line):\n    return max([parser.matches(line) for (_, parser) in self.LINE_STRUCTURES])", "docstring": "Verifies that this is an apache access log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nline (str): line from the text file.\n\nReturns:\nbool: True if this is the correct parser, False otherwise.", "source": "codesearchnet"}
{"code": "def __init__(self, napp_path, tpl_path):\n        \n        self._napp_path = napp_path\n        self._template = tpl_path / 'openapi.yml.template'\n        self._api_file = napp_path / 'openapi.yml'\n\n        metadata = napp_path / 'kytos.json'\n        self._napp = NApp.create_from_json(metadata)\n\n        \n        self._summary = None\n        self._description = None\n\n        \n        self._paths = {}", "docstring": "Instantiate an OpenAPI object.\n\nArgs:\nnapp_path (string): Napp directory\ntlp_path (string): File name from template", "source": "juraj-google-style"}
{"code": "def build(self, text, matrix, skim_depth=10, d_weights=False):\n\n        \n\n        for anchor in bar(matrix.keys):\n\n            n1 = text.unstem(anchor)\n\n            \n            pairs = matrix.anchored_pairs(anchor).items()\n            for term, weight in list(pairs)[:skim_depth]:\n\n                \n                \n                if d_weights: weight = 1-weight\n\n                n2 = text.unstem(term)\n\n                \n                \n                self.graph.add_edge(n1, n2, weight=float(weight))", "docstring": "1. For each term in the passed matrix, score its KDE similarity with\nall other indexed terms.\n\n2. With the ordered stack of similarities in hand, skim off the top X\npairs and add them as edges.\n\nArgs:\ntext (Text): The source text instance.\nmatrix (Matrix): An indexed term matrix.\nskim_depth (int): The number of siblings for each term.\nd_weights (bool): If true, give \"close\" words low edge weights.", "source": "juraj-google-style"}
{"code": "def __response_message_descriptor(self, message_type, method_id):\n    \n\n    \n    descriptor = {'200': {'description': 'A successful response'}}\n\n    if message_type != message_types.VoidMessage():\n      self.__parser.add_message(message_type.__class__)\n      self.__response_schema[method_id] = self.__parser.ref_for_message_type(\n          message_type.__class__)\n      descriptor['200']['schema'] = {'$ref': '\n          self.__response_schema[method_id])}\n\n    return dict(descriptor)", "docstring": "Describes the response.\n\nArgs:\nmessage_type: messages.Message class, The message to describe.\nmethod_id: string, Unique method identifier (e.g. 'myapi.items.method')\n\nReturns:\nDictionary describing the response.", "source": "juraj-google-style"}
{"code": "def MakeSuiteFromList(t, name=''):\n    \n    hist = MakeHistFromList(t)\n    d = hist.GetDict()\n    return MakeSuiteFromDict(d)", "docstring": "Makes a suite from an unsorted sequence of values.\n\nArgs:\nt: sequence of numbers\nname: string name for this suite\n\nReturns:\nSuite object", "source": "juraj-google-style"}
{"code": "def get_course_duration(self, obj):\n        \n        duration = obj.end - obj.start if obj.start and obj.end else None\n        if duration:\n            return strfdelta(duration, '{W} weeks {D} days.')\n        return ''", "docstring": "Get course's duration as a timedelta.\n\nArguments:\nobj (CourseOverview): CourseOverview object\n\nReturns:\n(timedelta): Duration of a course.", "source": "juraj-google-style"}
{"code": "def next_trials(self):\n    trials = []\n    for trial in self._trial_generator:\n        if (trial is None):\n            return trials\n        trials += [trial]\n    self._finished = True\n    return trials", "docstring": "Provides a batch of Trial objects to be queued into the TrialRunner.\n\nA batch ends when self._trial_generator returns None.\n\nReturns:\ntrials (list): Returns a list of trials.", "source": "codesearchnet"}
{"code": "def remove(self, *l):\n        \n        for a in flatten(l):\n            self._remove([self.Inner(a)], self.l)", "docstring": "remove inner from outer\n\nArgs:\n*l element that is passes into Inner init", "source": "juraj-google-style"}
{"code": "def random_data(line_count=1, chars_per_line=80):\n    \n    divide_lines = chars_per_line * line_count\n    return '\\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))", "docstring": "Function to creates lines of random string data\nArgs:\nline_count: An integer that says how many lines to return\nchars_per_line: An integer that says how many characters per line to return\n\nReturns:\nA String", "source": "juraj-google-style"}
{"code": "def get_response(response: Dict[str, Any]) -> JSONRPCResponse:\n    \n    if \"error\" in response:\n        return ErrorResponse(**response)\n    return SuccessResponse(**response)", "docstring": "Converts a deserialized response into a JSONRPCResponse object.\n\nThe dictionary be either an error or success response, never a notification.\n\nArgs:\nresponse: Deserialized response dictionary. We can assume the response is valid\nJSON-RPC here, since it passed the jsonschema validation.", "source": "juraj-google-style"}
{"code": "def check_list_type(objects, allowed_type, name, allow_none=True):\n  \n  if objects is None:\n    if not allow_none:\n      raise TypeError('%s is None, which is not allowed.' % name)\n    return objects\n  if not isinstance(objects, (tuple, list)):\n    raise TypeError('%s is not a list.' % name)\n  if not all(isinstance(i, allowed_type) for i in objects):\n    type_list = sorted(list(set(type(obj) for obj in objects)))\n    raise TypeError('%s contains types that don\\'t match %s: %s' %\n                    (name, allowed_type.__name__, type_list))\n  return objects", "docstring": "Verify that objects in list are of the allowed type or raise TypeError.\n\nArgs:\nobjects: The list of objects to check.\nallowed_type: The allowed type of items in 'settings'.\nname: Name of the list of objects, added to the exception.\nallow_none: If set, None is also allowed.\n\nRaises:\nTypeError: if object is not of the allowed type.\n\nReturns:\nThe list of objects, for convenient use in assignment.", "source": "juraj-google-style"}
{"code": "def __ne__(self, other):\n        \n        if isinstance(other, DocumentReference):\n            return self._client != other._client or self._path != other._path\n        else:\n            return NotImplemented", "docstring": "Inequality check against another instance.\n\nArgs:\nother (Any): A value to compare against.\n\nReturns:\nUnion[bool, NotImplementedType]: Indicating if the values are\nnot equal.", "source": "juraj-google-style"}
{"code": "def _g(self, z):\n    return (np.exp(np.multiply((- self.theta), z)) - 1)", "docstring": "Helper function to solve Frank copula.\n\nThis functions encapsulates :math:`g_z = e^{-\\\\theta z} - 1` used on Frank copulas.\n\nArgument:\nz: np.ndarray\n\nReturns:\nnp.ndarray", "source": "codesearchnet"}
{"code": "def plots_html_page(query_module):\n    template = jenv.get_template('analysis.html')\n    context = dict(extended=config.EXTENDED)\n    cl = client.get_client()\n    session = cl.create_session()\n    seaborn.set_style('whitegrid')\n    decade_df = query_module.decade_query()\n    pix_size = pixels_to_inches((600, 400))\n    ax = seaborn.lmplot(x='decade', y='area', data=decade_df, size=pix_size[1], aspect=(pix_size[0] / pix_size[1]), scatter_kws={'s': 30, 'alpha': 0.3})\n    ax.set(xlabel='Decade', ylabel='Area, m^2')\n    context['area_by_decade_svg'] = fig_to_svg(plt.gcf())\n    plt.close('all')\n    if config.EXTENDED:\n        gender_df = query_module.gender_query()\n        pix_size = pixels_to_inches((600, 400))\n        g = seaborn.FacetGrid(gender_df, hue='gender', margin_titles=True, size=pix_size[1], aspect=(pix_size[0] / pix_size[1]))\n        bins = np.linspace(0, 5, 30)\n        g.map(plt.hist, 'area', bins=bins, lw=0, alpha=0.5, normed=True)\n        g.axes[(0, 0)].set_xlabel('Area, m^2')\n        g.axes[(0, 0)].set_ylabel('Percentage of paintings')\n        context['area_by_gender_svg'] = fig_to_svg(plt.gcf())\n        plt.close('all')\n    out_file = path.join(out_dir, 'analysis.html')\n    html_content = template.render(**context)\n    with open(out_file, 'w') as f:\n        f.write(html_content)\n    plt.close('all')\n    session.close()", "docstring": "Generate analysis output as html page\n\nArgs:\nquery_module (module): module to use for querying data for the\ndesired model/pipeline variant, e.g. leonardo.standard.queries", "source": "codesearchnet"}
{"code": "def as_objective(obj):\n  \n  if isinstance(obj, Objective):\n    return obj\n  elif callable(obj):\n    return obj\n  elif isinstance(obj, str):\n    layer, n = obj.split(\":\")\n    layer, n = layer.strip(), int(n)\n    return channel(layer, n)", "docstring": "Convert obj into Objective class.\n\nStrings of the form \"layer:n\" become the Objective channel(layer, n).\nObjectives are returned unchanged.\n\nArgs:\nobj: string or Objective.\n\nReturns:\nObjective", "source": "juraj-google-style"}
{"code": "def add_token(self, token):\n        \n        token = self.process_token(token)\n        self._token_count.update([token])", "docstring": "Add token to vocabulary.\n\nArgs:\ntoken (str): token to add.", "source": "juraj-google-style"}
{"code": "def _parse_services(self, service_config: dict, service_name: str, service_list: dict) -> dict:\n    for (key, value) in service_list['services'][service_name].items():\n        service_config[key] = value\n        if ('command' in key):\n            key = 'args'\n            service_config['args'] = value\n            service_config.pop('command')\n        if ('ports' in key):\n            endpoint_spec = self._parse_ports(value)\n            service_config['endpoint_spec'] = endpoint_spec\n            service_config.pop('ports')\n        if ('volumes' in key):\n            volume_spec = self._parse_volumes(value)\n            service_config['mounts'] = volume_spec\n            service_config.pop('volumes')\n        if ('deploy' in key):\n            self._parse_deploy(value, service_config)\n            service_config.pop('deploy')\n        if ('networks' in key):\n            network_spec = self._parse_networks(service_list)\n            service_config['networks'] = network_spec\n        if ('logging' in key):\n            self._parse_logging(value, service_config)\n            service_config.pop('logging')\n        if ('environment' in key):\n            service_config['env'] = value\n            service_config.pop('environment')\n    return service_config", "docstring": "Parse the docker compose file.\n\nArgs:\nservice_config (dict): Service configurations from the compose file\nservice_name (string): Name of the services\nservice_list (dict): Service configuration list\n\nReturns:\ndict, service specifications extracted from the compose file", "source": "codesearchnet"}
{"code": "def track_storms(storm_objects, times, distance_components, distance_maxima, distance_weights, tracked_objects=None):\n    obj_matcher = ObjectMatcher(distance_components, distance_weights, distance_maxima)\n    if (tracked_objects is None):\n        tracked_objects = []\n    for (t, time) in enumerate(times):\n        past_time_objects = []\n        for obj in tracked_objects:\n            if (obj.end_time == (time - obj.step)):\n                past_time_objects.append(obj)\n        if (len(past_time_objects) == 0):\n            tracked_objects.extend(storm_objects[t])\n        elif ((len(past_time_objects) > 0) and (len(storm_objects[t]) > 0)):\n            assignments = obj_matcher.match_objects(past_time_objects, storm_objects[t], times[(t - 1)], times[t])\n            unpaired = list(range(len(storm_objects[t])))\n            for pair in assignments:\n                past_time_objects[pair[0]].extend(storm_objects[t][pair[1]])\n                unpaired.remove(pair[1])\n            if (len(unpaired) > 0):\n                for up in unpaired:\n                    tracked_objects.append(storm_objects[t][up])\n    return tracked_objects", "docstring": "Given the output of extract_storm_objects, this method tracks storms through time and merges individual\nSTObjects into a set of tracks.\n\nArgs:\nstorm_objects: list of list of STObjects that have not been tracked.\ntimes: List of times associated with each set of STObjects\ndistance_components: list of function objects that make up components of distance function\ndistance_maxima: array of maximum values for each distance for normalization purposes\ndistance_weights: weight given to each component of the distance function. Should add to 1.\ntracked_objects: List of STObjects that have already been tracked.\nReturns:\ntracked_objects:", "source": "codesearchnet"}
{"code": "def select_action(self, next_action_arr, next_q_arr):\n        \n        key_arr = self.select_action_key(next_action_arr, next_q_arr)\n        return next_action_arr[key_arr], next_q_arr[key_arr]", "docstring": "Select action by Q(state, action).\n\nArgs:\nnext_action_arr:        `np.ndarray` of actions.\nnext_q_arr:             `np.ndarray` of Q-Values.\n\nRetruns:\nTuple(`np.ndarray` of action., Q-Value)", "source": "juraj-google-style"}
{"code": "def template_string(task: Task, template: str, jinja_filters: FiltersDict=None, **kwargs: Any) -> Result:\n    jinja_filters = (jinja_filters or {} or task.nornir.config.jinja2.filters)\n    text = jinja_helper.render_from_string(template=template, host=task.host, jinja_filters=jinja_filters, **kwargs)\n    return Result(host=task.host, result=text)", "docstring": "Renders a string with jinja2. All the host data is available in the template\n\nArguments:\ntemplate (string): template string\njinja_filters (dict): jinja filters to enable. Defaults to nornir.config.jinja2.filters\n**kwargs: additional data to pass to the template\n\nReturns:\nResult object with the following attributes set:\n* result (``string``): rendered string", "source": "codesearchnet"}
{"code": "def locate_module(module_id: str, module_type: str = None):\n    \n\n    entry_point = None\n\n    if module_type:\n        entry_point = 'ehforwarderbot.%s' % module_type\n\n    module_id = module_id.split('\n\n    if entry_point:\n        for i in pkg_resources.iter_entry_points(entry_point):\n            if i.name == module_id:\n                return i.load()\n\n    return pydoc.locate(module_id)", "docstring": "Locate module by module ID\n\nArgs:\nmodule_id: Module ID\nmodule_type: Type of module, one of ``'master'``, ``'slave'`` and ``'middleware'``", "source": "juraj-google-style"}
{"code": "def verify_account(self, email_address):\n        \n        request = self._get_request()\n        resp = request.post(self.ACCOUNT_VERIFY_URL, {\n            'email_address': email_address\n        })\n        return ('account' in resp)", "docstring": "Verify whether a HelloSign Account exists\n\nArgs:\n\nemail_address (str): Email address for the account to verify\n\nReturns:\nTrue or False", "source": "juraj-google-style"}
{"code": "def install_package(self, name, index=None, force=False, update=False):\n        \n        cmd = 'install'\n        if force:\n\n            cmd = '{0} {1}'.format(cmd, '--force-reinstall')\n\n        if update:\n\n            cmd = '{0} {1}'.format(cmd, '--update')\n\n        if index:\n\n            cmd = '{0} {1}'.format(cmd, '--index-url {0}'.format(index))\n\n        self.pip('{0} {1}'.format(cmd, name))", "docstring": "Install a given package.\n\nArgs:\nname (str): The package name to install. This can be any valid\npip package specification.\nindex (str): The URL for a pypi index to use.\nforce (bool): For the reinstall of packages during updates.\nupdate (bool): Update the package if it is out of date.", "source": "juraj-google-style"}
{"code": "def receive(self, event_type, signature, data_str):\n    if (not self.validate_signature(signature, data_str)):\n        raise HelpScoutSecurityException('The signature provided by this request was invalid.')\n    return HelpScoutWebHookEvent(event_type=event_type, record=json.loads(data_str))", "docstring": "Receive a web hook for the event and signature.\n\nArgs:\nevent_type (str): Name of the event that was received (from the\nrequest ``X-HelpScout-Event`` header).\nsignature (str): The signature that was received, which serves as\nauthentication (from the request ``X-HelpScout-Signature``\nheader).\ndata_str (str): The raw data that was posted by HelpScout\nto the web hook. This must be the raw string, because if it\nis parsed with JSON it will lose its ordering and not pass\nsignature validation.\n\nRaises:\nhelpscout.exceptions.HelpScoutSecurityException: If an invalid\nsignature is provided, and ``raise_if_invalid`` is ``True``.\n\nReturns:\nhelpscout.web_hook.WebHookEvent: The authenticated web hook\nrequest.", "source": "codesearchnet"}
{"code": "def get_energy_relax_structure_buckingham(structure, gulp_cmd='gulp', keywords=('optimise', 'conp'), valence_dict=None):\n    gio = GulpIO()\n    gc = GulpCaller(gulp_cmd)\n    gin = gio.buckingham_input(structure, keywords, valence_dict=valence_dict)\n    gout = gc.run(gin)\n    energy = gio.get_energy(gout)\n    relax_structure = gio.get_relaxed_structure(gout)\n    return (energy, relax_structure)", "docstring": "Relax a structure and compute the energy using Buckingham potential.\n\nArgs:\nstructure: pymatgen.core.structure.Structure\ngulp_cmd: GULP command if not in standard place\nkeywords: GULP first line keywords\nvalence_dict: {El: valence}. Needed if the structure is not charge\nneutral.", "source": "codesearchnet"}
{"code": "def read(self, length, timeout=None):\n    data = b''\n    while True:\n        if (timeout is not None):\n            (rlist, _, _) = select.select([self._fd], [], [], timeout)\n            if (self._fd not in rlist):\n                break\n        try:\n            data += os.read(self._fd, (length - len(data)))\n        except OSError as e:\n            raise SerialError(e.errno, ('Reading serial port: ' + e.strerror))\n        if (len(data) == length):\n            break\n    return data", "docstring": "Read up to `length` number of bytes from the serial port with an\noptional timeout.\n\n`timeout` can be positive for a timeout in seconds, 0 for a\nnon-blocking read, or negative or None for a blocking read that will\nblock until `length` number of bytes are read. Default is a blocking\nread.\n\nFor a non-blocking or timeout-bound read, read() may return data whose\nlength is less than or equal to the requested length.\n\nArgs:\nlength (int): length in bytes.\ntimeout (int, float, None): timeout duration in seconds.\n\nReturns:\nbytes: data read.\n\nRaises:\nSerialError: if an I/O or OS error occurs.", "source": "codesearchnet"}
{"code": "def CreateTaskStorage(self, task):\n    \n    if self._storage_type != definitions.STORAGE_TYPE_SESSION:\n      raise IOError('Unsupported storage type.')\n\n    storage_file_path = self._GetTaskStorageFilePath(task)\n    return self._CreateTaskStorageWriter(storage_file_path, task)", "docstring": "Creates a task storage.\n\nThe task storage is used to store attributes created by the task.\n\nArgs:\ntask(Task): task.\n\nReturns:\nStorageWriter: storage writer.\n\nRaises:\nIOError: if the storage type is not supported.\nOSError: if the storage type is not supported.", "source": "juraj-google-style"}
{"code": "def __init__(self, host: str, port: int, time_to_live: Union[int, timedelta], *, request_coder: Optional[coders.Coder], response_coder: Optional[coders.Coder], kwargs: Optional[Dict[str, Any]]=None, source_caller: Optional[Caller]=None, mode: _RedisMode):\n    self.host, self.port = (host, port)\n    self.time_to_live = time_to_live\n    self.request_coder = request_coder\n    self.response_coder = response_coder\n    self.kwargs = kwargs\n    self.source_caller = source_caller\n    self.mode = mode", "docstring": "Args:\nhost (str): The hostname or IP address of the Redis server.\nport (int): The port number of the Redis server.\ntime_to_live: `(Union[int, timedelta])` The time-to-live (TTL) for\nrecords stored in Redis. Provide an integer (in seconds) or a\n`datetime.timedelta` object.\nrequest_coder: (Optional[`coders.Coder`]) coder for requests stored\nin Redis.\nresponse_coder: (Optional[`coders.Coder`]) coder for decoding responses\nreceived from Redis.\nkwargs: Optional(Dict[str, Any]) additional keyword arguments that\nare required to connect to your redis server. Same as `redis.Redis()`.\nsource_caller: (Optional[`Caller`]): The source caller using this Redis\ncache in case of fetching the cache request to store in Redis.\nmode: `_RedisMode` An enum type specifying the operational mode of\nthe `_RedisCaller`.", "source": "github-repos"}
{"code": "def get_section_by_name(self, section_name):\n    sections = self.unravel_sections(self.get_sections())\n    for section in sections:\n        if (section['name'] == section_name):\n            return (section['groupId'], section)\n    return (None, None)", "docstring": "Get a section by its name.\n\nGet a list of sections for a given gradebook,\nspecified by a gradebookid.\n\nArgs:\nsection_name (str): The section's name.\n\nRaises:\nrequests.RequestException: Exception connection error\nValueError: Unable to decode response content\n\nReturns:\ntuple: tuple of group id, and section dictionary\n\nAn example return value is:\n\n.. code-block:: python\n\n(\n1327565,\n{\nu'editable': True,\nu'groupId': 1327565,\nu'groupingScheme': u'Recitation',\nu'members': None,\nu'name': u'r01',\nu'shortName': u'r01',\nu'staffs': None\n}\n)", "source": "codesearchnet"}
{"code": "def generate_message_doc(message_descriptor, locations, path, name_prefix=''):\n    \n    \n    prefixed_name = name_prefix + message_descriptor.name\n    print(make_subsection(prefixed_name))\n    location = locations[path]\n    if location.HasField('leading_comments'):\n        print(textwrap.dedent(location.leading_comments))\n\n    row_tuples = []\n    for field_index, field in enumerate(message_descriptor.field):\n        field_location = locations[path + (2, field_index)]\n        if field.type not in [11, 14]:\n            type_str = TYPE_TO_STR[field.type]\n        else:\n            type_str = make_link(field.type_name.lstrip('.'))\n        row_tuples.append((\n            make_code(field.name),\n            field.number,\n            type_str,\n            LABEL_TO_STR[field.label],\n            textwrap.fill(get_comment_from_location(field_location), INFINITY),\n        ))\n    print_table(('Field', 'Number', 'Type', 'Label', 'Description'),\n                row_tuples)\n\n    \n    nested_types = enumerate(message_descriptor.nested_type)\n    for index, nested_message_desc in nested_types:\n        generate_message_doc(nested_message_desc, locations,\n                             path + (3, index),\n                             name_prefix=prefixed_name + '.')\n\n    \n    for index, nested_enum_desc in enumerate(message_descriptor.enum_type):\n        generate_enum_doc(nested_enum_desc, locations, path + (4, index),\n                          name_prefix=prefixed_name + '.')", "docstring": "Generate docs for message and nested messages and enums.\n\nArgs:\nmessage_descriptor: descriptor_pb2.DescriptorProto instance for message\nto generate docs for.\nlocations: Dictionary of location paths tuples to\ndescriptor_pb2.SourceCodeInfo.Location instances.\npath: Path tuple to the message definition.\nname_prefix: Optional prefix for this message's name.", "source": "juraj-google-style"}
{"code": "def cancel(self, job_ids):\n    statuses = []\n    for job_id in job_ids:\n        try:\n            self.delete_instance(job_id)\n            statuses.append(True)\n            self.provisioned_blocks -= 1\n        except Exception:\n            statuses.append(False)\n    return statuses", "docstring": "Cancels the resources identified by the job_ids provided by the user.\n\nArgs:\n- job_ids (list): A list of job identifiers\n\nReturns:\n- A list of status from cancelling the job which can be True, False\n\nRaises:\n- ExecutionProviderException or its subclasses", "source": "codesearchnet"}
{"code": "def get_losses_for(self, inputs):\n    warnings.warn('`layer.get_losses_for` is deprecated and will be removed in a future version. Please use `layer.losses` instead.')\n    return self.losses", "docstring": "Deprecated, do NOT use!\n\nRetrieves losses relevant to a specific set of inputs.\n\nArgs:\ninputs: Input tensor or list/tuple of input tensors.\n\nReturns:\nList of loss tensors of the layer that depend on `inputs`.", "source": "github-repos"}
{"code": "def _verify_output(self, submission_type):\n    result = True\n    if (submission_type == 'defense'):\n        try:\n            image_classification = load_defense_output(os.path.join(self._sample_output_dir, 'result.csv'))\n            expected_keys = [IMAGE_NAME_PATTERN.format(i) for i in range(BATCH_SIZE)]\n            if (set(image_classification.keys()) != set(expected_keys)):\n                logging.error('Classification results are not saved for all images')\n                result = False\n        except IOError as e:\n            logging.error('Failed to read defense output file: %s', e)\n            result = False\n    else:\n        for i in range(BATCH_SIZE):\n            image_filename = os.path.join(self._sample_output_dir, IMAGE_NAME_PATTERN.format(i))\n            try:\n                img = np.array(Image.open(image_filename).convert('RGB'))\n                if (list(img.shape) != [299, 299, 3]):\n                    logging.error('Invalid image size %s for image %s', str(img.shape), image_filename)\n                    result = False\n            except IOError as e:\n                result = False\n    return result", "docstring": "Verifies correctness of the submission output.\n\nArgs:\nsubmission_type: type of the submission\n\nReturns:\nTrue if output looks valid", "source": "codesearchnet"}
{"code": "def dumpfile(item, path):\n    \n    with io.open(path, 'wb') as fd:\n        fd.write(en(item))", "docstring": "Dump an object to a file by path.\n\nArgs:\nitem (object): The object to serialize.\npath (str): The file path to save.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def squared_hinge(y_true, y_pred):\n    y_pred = ops.convert_to_tensor(y_pred)\n    y_true = ops.cast(y_true, y_pred.dtype)\n    y_true = convert_binary_labels_to_hinge(y_true)\n    return ops.mean(ops.square(ops.maximum(1.0 - y_true * y_pred, 0.0)), axis=-1)", "docstring": "Computes the squared hinge loss between `y_true` & `y_pred`.\n\nFormula:\n\n```python\nloss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)\n```\n\nArgs:\ny_true: The ground truth values. `y_true` values are expected to be -1\nor 1. If binary (0 or 1) labels are provided we will convert them\nto -1 or 1 with shape = `[batch_size, d0, .. dN]`.\ny_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.\n\nReturns:\nSquared hinge loss values with shape = `[batch_size, d0, .. dN-1]`.\n\nExample:\n\n>>> y_true = np.random.choice([-1, 1], size=(2, 3))\n>>> y_pred = np.random.random(size=(2, 3))\n>>> loss = keras.losses.squared_hinge(y_true, y_pred)", "source": "github-repos"}
{"code": "def install(name, dst, capture_error=False):\n    \n    if dst not in sys.path:\n        sys.path.insert(0, dst)\n\n    entrypoint_type = _entry_point_type.get(dst, name)\n    if entrypoint_type is _entry_point_type.PYTHON_PACKAGE:\n        _modules.install(dst, capture_error)\n    if entrypoint_type is _entry_point_type.COMMAND:\n        os.chmod(os.path.join(dst, name), 511)", "docstring": "Install the user provided entry point to be executed as follow:\n- add the path to sys path\n- if the user entry point is a command, gives exec permissions to the script\n\nArgs:\nname (str): name of the script or module.\ndst (str): path to directory with the script or module.\ncapture_error (bool): Default false. If True, the running process captures the\nstderr, and appends it to the returned Exception message in case of errors.", "source": "juraj-google-style"}
{"code": "def _confirm_overwrite(filename):\n    message = '{}Would you like to overwrite the contents of {} (y/[n])? '.format(c.Fore.MAGENTA, filename)\n    response = raw_input(message)\n    response = response.lower()\n    if (response in ['y', 'yes']):\n        return True\n    return False", "docstring": "Confirm overwrite of template files.\n\nMake sure the user would like to continue downloading a file which will overwrite a file\nin the current directory.\n\nArgs:\nfilename (str): The name of the file to overwrite.\n\nReturns:\nbool: True if the user specifies a \"yes\" response.", "source": "codesearchnet"}
{"code": "def log_error(self, msg):\n    if self.__logger:\n        self.__logger.error(msg)\n    raise RuntimeError(msg)", "docstring": "Log an error and raise an exception.\n\nArgs:\nmsg: Error message to log.\n\nRaises:\nRuntimeError: With the message.", "source": "codesearchnet"}
{"code": "def Process(self, parser_mediator, root_item=None, **kwargs):\n    \n    \n    super(DefaultOLECFPlugin, self).Process(parser_mediator, **kwargs)\n\n    if not root_item:\n      raise ValueError('Root item not set.')\n\n    if not self._ParseItem(parser_mediator, root_item):\n      event_data = OLECFItemEventData()\n      event_data.name = root_item.name\n      event_data.offset = 0\n      event_data.size = root_item.size\n\n      \n      date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_CREATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an OLECF file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nroot_item (Optional[pyolecf.item]): root item of the OLECF file.\n\nRaises:\nValueError: If the root item is not set.", "source": "juraj-google-style"}
{"code": "def sample_from_discretized_mix_logistic(pred, seed=None):\n  \n\n  logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(\n      pred)\n\n  \n  num_mixtures = shape_list(logits)[-1]\n  gumbel_noise = -tf.log(-tf.log(\n      tf.random_uniform(\n          tf.shape(logits), minval=1e-5, maxval=1. - 1e-5, seed=seed)))\n  sel = tf.one_hot(\n      tf.argmax(logits + gumbel_noise, -1),\n      depth=num_mixtures,\n      dtype=tf.float32)\n\n  \n  sel = tf.expand_dims(sel, -1)\n  locs = tf.reduce_sum(locs * sel, 3)\n  log_scales = tf.reduce_sum(log_scales * sel, 3)\n  coeffs = tf.reduce_sum(coeffs * sel, 3)\n\n  \n  \n  uniform_noise = tf.random_uniform(\n      tf.shape(locs), minval=1e-5, maxval=1. - 1e-5, seed=seed)\n  logistic_noise = tf.log(uniform_noise) - tf.log1p(-uniform_noise)\n  x = locs + tf.exp(log_scales) * logistic_noise\n  x0 = x[..., 0]\n  x1 = x[..., 1] + coeffs[..., 0] * x0\n  x2 = x[..., 2] + coeffs[..., 1] * x0 + coeffs[..., 2] * x1\n  x = tf.stack([x0, x1, x2], axis=-1)\n  x = tf.clip_by_value(x, -1., 1.)\n  return x", "docstring": "Sampling from a discretized mixture of logistics.\n\nArgs:\npred: A [batch, height, width, num_mixtures*10] tensor of floats\ncomprising one unconstrained mixture probability, three means\n(one per channel), three standard deviations (one per channel),\nand three coefficients which linearly parameterize dependence across\nchannels.\nseed: Random seed.\n\nReturns:\nA tensor of shape [batch, height, width, 3] with real intensities scaled\nbetween -1 and 1.", "source": "juraj-google-style"}
{"code": "def _read_protocol_line(self):\n    while True:\n        line = self._proc.stdout.readline().decode('utf-8')\n        if (not line):\n            raise jsonrpc_client_base.AppStartError(self._ad, 'Unexpected EOF waiting for app to start')\n        line = line.strip()\n        if (line.startswith('INSTRUMENTATION_RESULT:') or line.startswith('SNIPPET ')):\n            self.log.debug('Accepted line from instrumentation output: \"%s\"', line)\n            return line\n        self.log.debug('Discarded line from instrumentation output: \"%s\"', line)", "docstring": "Reads the next line of instrumentation output relevant to snippets.\n\nThis method will skip over lines that don't start with 'SNIPPET' or\n'INSTRUMENTATION_RESULT'.\n\nReturns:\n(str) Next line of snippet-related instrumentation output, stripped.\n\nRaises:\njsonrpc_client_base.AppStartError: If EOF is reached without any\nprotocol lines being read.", "source": "codesearchnet"}
{"code": "def profile(self, profile):\n        \n        \n        self._staging_data = None\n        \n        lang = profile.get('install_json', {}).get('programLanguage', 'PYTHON')\n        \n        profile_args = ArgBuilder(lang, self.profile_args(profile.get('args')))\n        \n        self._profile = profile\n        \n        self._profile['profile_args'] = profile_args\n        \n        self.load_tcex()\n        \n        self.reports.profile(profile.get('profile_name'))\n        \n        self._create_tc_dirs()", "docstring": "Set the current profile.\n\nArgs:\nprofile (dict): The profile data.", "source": "juraj-google-style"}
{"code": "def sg_int(tensor, opt):\n    r\n    return tf.cast(tensor, tf.sg_intx, name=opt.name)", "docstring": "r\"\"\"Casts a tensor to intx.\n\nSee `tf.cast()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` or `SparseTensor` (automatically given by chain).\nopt:\nname: If provided, it replaces current tensor's name.\n\nReturns:\nA `Tensor` or `SparseTensor` with same shape as `tensor`.", "source": "juraj-google-style"}
{"code": "def __init__(self, _max_size, _random=None, always_keep_last=True):\n    \n    if _max_size < 0 or _max_size != round(_max_size):\n      raise ValueError('_max_size must be nonnegative int, was %s' % _max_size)\n    self.items = []\n    \n    \n    self._mutex = threading.Lock()\n    self._max_size = _max_size\n    self._num_items_seen = 0\n    if _random is not None:\n      self._random = _random\n    else:\n      self._random = random.Random(0)\n    self.always_keep_last = always_keep_last", "docstring": "Create the _ReservoirBucket.\n\nArgs:\n_max_size: The maximum size the reservoir bucket may grow to. If size is\nzero, the bucket has unbounded size.\n_random: The random number generator to use. If not specified, defaults to\nrandom.Random(0).\nalways_keep_last: Whether the latest seen item should always be included\nin the end of the bucket.\n\nRaises:\nValueError: if the size is not a nonnegative integer.", "source": "juraj-google-style"}
{"code": "def set_all_tiers(key, value, django_cache_timeout=DEFAULT_TIMEOUT):\n        \n        DEFAULT_REQUEST_CACHE.set(key, value)\n        django_cache.set(key, value, django_cache_timeout)", "docstring": "Caches the value for the provided key in both the request cache and the\ndjango cache.\n\nArgs:\nkey (string)\nvalue (object)\ndjango_cache_timeout (int): (Optional) Timeout used to determine\nif and for how long to cache in the django cache. A timeout of\n0 will skip the django cache. If timeout is provided, use that\ntimeout for the key; otherwise use the default cache timeout.", "source": "juraj-google-style"}
{"code": "def random_uniform(mesh, shape, **kwargs):\n  \n  shape = convert_to_shape(shape)\n  return RandomOperation(mesh, shape, tf.random.uniform, **kwargs).outputs[0]", "docstring": "Random uniform.\n\nArgs:\nmesh: a Mesh\nshape: a Shape\n**kwargs: keyword args for tf.random.uniform, except seed\n\nReturns:\na Tensor", "source": "juraj-google-style"}
{"code": "def unicode(self, b, encoding=None):\n    if (encoding is None):\n        encoding = self.string_encoding\n    return unicode(b, encoding, self.decode_errors)", "docstring": "Convert a byte string to unicode, using string_encoding and decode_errors.\n\nArguments:\n\nb: a byte string.\n\nencoding: the name of an encoding.  Defaults to the string_encoding\nattribute for this instance.\n\nRaises:\n\nTypeError: Because this method calls Python's built-in unicode()\nfunction, this method raises the following exception if the\ngiven string is already unicode:\n\nTypeError: decoding Unicode is not supported", "source": "codesearchnet"}
{"code": "def run(self, *args, **kwargs):\n    accounts = list(AWSAccount.get_all(include_disabled=False).values())\n    self.manage_policies(accounts)", "docstring": "Iterate through all AWS accounts and apply roles and policies from Github\n\nArgs:\n*args: Optional list of arguments\n**kwargs: Optional list of keyword arguments\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def convert_elementwise_div(\n    params, w_name, scope_name, inputs, layers, weights, names\n):\n    \n    print('Converting elementwise_div ...')\n\n    if names == 'short':\n        tf_name = 'D' + random_string(7)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    def target_layer(x):\n        layer = tf.div(\n            x[0],\n            x[1]\n        )\n        return layer\n\n    lambda_layer = keras.layers.Lambda(target_layer, name=tf_name)\n    layers[scope_name] = lambda_layer([layers[inputs[0]], layers[inputs[1]]])", "docstring": "Convert elementwise multiplication.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def box_predictor(self, image_feats: torch.FloatTensor, feature_map: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor:\n    pred_boxes = self.box_head(image_feats)\n    if interpolate_pos_encoding:\n        _, num_patches_height, num_patches_width, _ = feature_map.shape\n        box_bias = self.compute_box_bias(num_patches_height, num_patches_width)\n    else:\n        box_bias = self.box_bias\n    box_bias = box_bias.to(feature_map.device)\n    pred_boxes += box_bias\n    pred_boxes = self.sigmoid(pred_boxes)\n    return pred_boxes", "docstring": "Args:\nimage_feats:\nFeatures extracted from the image, returned by the `image_text_embedder` method.\nfeature_map:\nA spatial re-arrangement of image_features, also returned by the `image_text_embedder` method.\ninterpolate_pos_encoding:\nWhether to interpolate the pre-trained position encodings.\nReturns:\npred_boxes:\nList of predicted boxes (cxcywh normalized to 0, 1) nested within a dictionary.", "source": "github-repos"}
{"code": "def make_class(node, props, ctx):\n    name = abstract_utils.get_atomic_python_constant(props.name_var)\n    log.info('Declaring class %s', name)\n    try:\n        class_dict = abstract_utils.get_atomic_value(props.class_dict_var)\n    except abstract_utils.ConversionError:\n        log.error('Error initializing class %r', name)\n        return ctx.convert.create_new_unknown(node)\n    metacls, bases = _filter_out_metaclasses(props.bases, ctx)\n    cls_var = metacls if metacls else props.metaclass_var\n    bases = [_process_base_class(node, base, ctx) for base in bases]\n    bases = _expand_generic_protocols(node, bases, ctx)\n    if not bases:\n        base = ctx.convert.object_type\n        bases = [base.to_variable(ctx.root_node)]\n    if isinstance(class_dict, abstract.Unsolvable) or not isinstance(class_dict, abstract.PythonConstant):\n        var = ctx.new_unsolvable(node)\n    else:\n        if cls_var is None:\n            cls_var = class_dict.members.get('__metaclass__')\n            if cls_var:\n                ctx.errorlog.ignored_metaclass(ctx.vm.frames, name, cls_var.data[0].full_name if cls_var.bindings else 'Any')\n        if cls_var and all((v.data.full_name == 'builtins.type' for v in cls_var.bindings)):\n            cls_var = None\n        cls = abstract_utils.get_atomic_value(cls_var, default=ctx.convert.unsolvable) if cls_var else None\n        if '__annotations__' not in class_dict.members and name in ctx.vm.annotated_locals:\n            annotations_dict = ctx.vm.annotated_locals[name]\n            if any((local.typ for local in annotations_dict.values())):\n                annotations_member = abstract.AnnotationsDict(annotations_dict, ctx).to_variable(node)\n                class_dict.members['__annotations__'] = annotations_member\n                class_dict.pyval['__annotations__'] = annotations_member\n        if '__init_subclass__' in class_dict.members:\n            underlying = class_dict.pyval['__init_subclass__']\n            _, method = ctx.vm.load_special_builtin('classmethod').call(node, func=None, args=function.Args(posargs=(underlying,)))\n            class_dict.pyval['__init_subclass__'] = method\n        try:\n            class_type = props.class_type or abstract.InterpreterClass\n            assert issubclass(class_type, abstract.InterpreterClass)\n            val = class_type(name, bases, class_dict.pyval, cls, ctx.vm.current_opcode, props.undecorated_methods, ctx)\n            _check_final_members(val, class_dict.pyval, ctx)\n            overriding_checks.check_overriding_members(val, bases, class_dict.pyval, ctx.matcher(node), ctx)\n            val.decorators = props.decorators or []\n        except mro.MROError as e:\n            ctx.errorlog.mro_error(ctx.vm.frames, name, e.mro_seqs)\n            var = ctx.new_unsolvable(node)\n        except abstract_utils.GenericTypeError as e:\n            ctx.errorlog.invalid_annotation(ctx.vm.frames, e.annot, e.error)\n            var = ctx.new_unsolvable(node)\n        else:\n            var = props.new_class_var or ctx.program.NewVariable()\n            var.AddBinding(val, props.class_dict_var.bindings, node)\n            node = val.call_metaclass_init(node)\n            node = val.call_init_subclass(node)\n    ctx.vm.trace_opcode(None, name, var)\n    return (node, var)", "docstring": "Create a class with the name, bases and methods given.\n\nArgs:\nnode: The current CFG node.\nprops: class_mixin.ClassBuilderProperties required to build the class\nctx: The current context.\n\nReturns:\nA node and an instance of class_type.", "source": "github-repos"}
{"code": "def _AbortJoin(self, timeout=None):\n    \n    for pid, process in iter(self._processes_per_pid.items()):\n      logger.debug('Waiting for process: {0:s} (PID: {1:d}).'.format(\n          process.name, pid))\n      process.join(timeout=timeout)\n      if not process.is_alive():\n        logger.debug('Process {0:s} (PID: {1:d}) stopped.'.format(\n            process.name, pid))", "docstring": "Aborts all registered processes by joining with the parent process.\n\nArgs:\ntimeout (int): number of seconds to wait for processes to join, where\nNone represents no timeout.", "source": "juraj-google-style"}
{"code": "def handle_error(err, halt=True):\n        \n        print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, err))\n        if halt:\n            sys.exit(1)", "docstring": "Print errors message and optionally exit.\n\nArgs:\nerr (str): The error message to print.\nhalt (bool, optional): Defaults to True. If True the script will exit.", "source": "juraj-google-style"}
{"code": "def _decode_helper(obj, deserialize=False, module_objects=None, custom_objects=None):\n    if isinstance(obj, dict) and 'class_name' in obj:\n        if tf.available:\n            if obj['class_name'] == 'TensorShape':\n                return tf.TensorShape(obj['items'])\n            elif obj['class_name'] == 'TypeSpec':\n                from tensorflow.python.framework import type_spec_registry\n                return type_spec_registry.lookup(obj['type_spec'])._deserialize(_decode_helper(obj['serialized']))\n            elif obj['class_name'] == 'CompositeTensor':\n                spec = obj['spec']\n                tensors = []\n                for dtype, tensor in obj['tensors']:\n                    tensors.append(tf.constant(tensor, dtype=tf.dtypes.as_dtype(dtype)))\n                return tf.nest.pack_sequence_as(_decode_helper(spec), tensors, expand_composites=True)\n        if obj['class_name'] == '__tuple__':\n            return tuple((_decode_helper(i) for i in obj['items']))\n        elif obj['class_name'] == '__ellipsis__':\n            return Ellipsis\n        elif deserialize and '__passive_serialization__' in obj:\n            try:\n                if 'module' not in obj:\n                    return serialization.deserialize_keras_object(obj, module_objects=module_objects, custom_objects=custom_objects)\n                else:\n                    return serialization_lib.deserialize_keras_object(obj, module_objects=module_objects, custom_objects=custom_objects)\n            except ValueError:\n                pass\n        elif obj['class_name'] == '__bytes__':\n            return obj['value'].encode('utf-8')\n    return obj", "docstring": "A decoding helper that is TF-object aware.\n\nArgs:\nobj: A decoded dictionary that may represent an object.\ndeserialize: Boolean. When True, deserializes any Keras\nobjects found in `obj`. Defaults to `False`.\nmodule_objects: A dictionary of built-in objects to look the name up in.\nGenerally, `module_objects` is provided by midlevel library\nimplementers.\ncustom_objects: A dictionary of custom objects to look the name up in.\nGenerally, `custom_objects` is provided by the end user.\n\nReturns:\nThe decoded object.", "source": "github-repos"}
{"code": "def get_vnet(access_token, subscription_id, resource_group, vnet_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/virtualNetworks/', vnet_name, '?api-version=', NETWORK_API])\n    return do_get(endpoint, access_token)", "docstring": "Get details about the named virtual network.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvnet_name (str): Name of the VNet.\n\nReturns:\nHTTP response. VNet JSON body.", "source": "codesearchnet"}
{"code": "def register_rpc(self, address, rpc_id, func):\n    if ((rpc_id < 0) or (rpc_id > 65535)):\n        raise RPCInvalidIDError('Invalid RPC ID: {}'.format(rpc_id))\n    if (address not in self._rpc_overlays):\n        self._rpc_overlays[address] = RPCDispatcher()\n    self._rpc_overlays[address].add_rpc(rpc_id, func)", "docstring": "Register a single RPC handler with the given info.\n\nThis function can be used to directly register individual RPCs,\nrather than delegating all RPCs at a given address to a virtual\nTile.\n\nIf calls to this function are mixed with calls to add_tile for\nthe same address, these RPCs will take precedence over what is\ndefined in the tiles.\n\nArgs:\naddress (int): The address of the mock tile this RPC is for\nrpc_id (int): The number of the RPC\nfunc (callable): The function that should be called to handle the\nRPC.  func is called as func(payload) and must return a single\nstring object of up to 20 bytes with its response", "source": "codesearchnet"}
{"code": "def image_section(image, title):\n    \n    \n    img = yield marv.pull(image)\n    if img is None:\n        return\n\n    \n    widget = {'title': image.title, 'image': {'src': img.relpath}}\n    section = {'title': title, 'widgets': [widget]}\n    yield marv.push(section)", "docstring": "Create detail section with one image.\n\nArgs:\ntitle (str): Title to be displayed for detail section.\nimage: marv image file.\n\nReturns\nOne detail section.", "source": "juraj-google-style"}
{"code": "def _minimize_peak_memory_list(graph):\n  \n  schedule = []\n  bytes_freed = {}  \n  users_of = collections.defaultdict(set)  \n  in_degree = collections.defaultdict(int)  \n  operation_id = {}  \n  \n  \n  priority_queue = []  \n\n  \n  for i, operation_name in enumerate(graph.get_all_operation_names()):\n    operation_id[operation_name] = i\n\n    for input_name in graph.get_operation_input_names(operation_name):\n      \n      \n      \n      if operation_name in users_of[input_name]:\n        continue\n      users_of[input_name].add(operation_name)\n      in_degree[operation_name] += 1\n\n  for operation_name in graph.get_all_operation_names():\n    bytes_freed[operation_name] = 0\n    \n    for input_name in graph.get_operation_input_names(operation_name):\n      if len(users_of[input_name]) == 1 and not graph.is_tensor_final(\n          input_name):\n        bytes_freed[operation_name] += graph.get_tensor_size(input_name)\n    \n    \n    for output_name in graph.get_operation_output_names(operation_name):\n      \n      if users_of[output_name] or graph.is_tensor_final(output_name):\n        bytes_freed[operation_name] -= graph.get_tensor_size(output_name)\n\n  for operation_name in graph.get_all_operation_names():\n    if in_degree[operation_name] == 0:\n      heapq.heappush(priority_queue,\n                     (-bytes_freed[operation_name], operation_name))\n\n  \n  while priority_queue:\n    neg_bytes_freed, operation_name = heapq.heappop(priority_queue)\n    if bytes_freed[operation_name] != -neg_bytes_freed:\n      continue\n    schedule.append(operation_id[operation_name])\n    bytes_freed[operation_name] = None\n\n    for output_name in graph.get_operation_output_names(operation_name):\n      for other_operation_name in users_of[output_name]:\n        in_degree[other_operation_name] -= 1\n        if in_degree[other_operation_name] == 0:\n          heapq.heappush(priority_queue,\n                         (-bytes_freed[other_operation_name],\n                          other_operation_name))\n\n    for input_name in graph.get_operation_input_names(operation_name):\n      if operation_name not in users_of[input_name]:\n        \n        continue\n      users_of[input_name].remove(operation_name)\n      if len(users_of[input_name]) != 1 or graph.is_tensor_final(output_name):\n        continue\n      (other_operation_name,) = users_of[input_name]\n      bytes_freed[other_operation_name] += graph.get_tensor_size(\n          input_name)\n      if in_degree[other_operation_name] > 0:\n        continue\n      \n      \n      heapq.heappush(priority_queue, (-bytes_freed[other_operation_name],\n                                      other_operation_name))\n\n  return schedule", "docstring": "Computes schedule according to the greedy list heuristic.\n\nGreedy list heuristic: schedule the operation which results in the most bytes\nof memory being (immediately) freed.\nTODO(joshuawang): Experiment with tiebreaking by preferring more successors.\n\nArgs:\ngraph: an mtf.auto_mtf.graph_interface.GraphInterface.\n\nReturns:\nan iterable of integers representing the schedule.", "source": "juraj-google-style"}
{"code": "def deepcopy(original_obj):\n    \n    if isinstance(original_obj, list):\n        return list(deepcopy(item) for item in original_obj)\n    elif isinstance(original_obj, dict):\n        return dict((key, deepcopy(val)) for key, val in original_obj.items())\n    else:\n        return original_obj", "docstring": "Creates a deep copy of an object with no crossed referenced lists or dicts,\nuseful when loading from yaml as anchors generate those cross-referenced\ndicts and lists\n\nArgs:\noriginal_obj(object): Object to deep copy\n\nReturn:\nobject: deep copy of the object", "source": "juraj-google-style"}
{"code": "def db990(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `db990`'.format(value))\n\n        self._db990 = value", "docstring": "Corresponds to IDD Field `db990`\nDry-bulb temperature corresponding to 90.0% annual cumulative\nfrequency of occurrence (cold conditions)\n\nArgs:\nvalue (float): value for IDD Field `db990`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def add_mutually_exclusive_groups(self, groups):\n    all_params = set.union(*groups)\n    for group in groups:\n        mutually_exclusive = all_params - group\n        for name in group:\n            self._mutually_exclusive[name].update(mutually_exclusive)", "docstring": "Adds groups of mutually exclusive type parameters.\n\nFor example, [{\"T1\", \"T2\"}, {\"T3\", \"T4\"}] would mean that the following\npairs are mutually exclusive: (T1, T3), (T1, T4), (T2, T3), (T2, T4).\n\nArgs:\ngroups: The mutually exclusive groups.", "source": "github-repos"}
{"code": "def listdir(self, target_directory):\n    target_directory = self.resolve_path(target_directory, allow_fd=True)\n    directory = self.confirmdir(target_directory)\n    directory_contents = directory.contents\n    return list(directory_contents.keys())", "docstring": "Return a list of file names in target_directory.\n\nArgs:\ntarget_directory: Path to the target directory within the\nfake filesystem.\n\nReturns:\nA list of file names within the target directory in arbitrary\norder.\n\nRaises:\nOSError: if the target is not a directory.", "source": "codesearchnet"}
{"code": "def add_history(self, filename, color_scheme, font, wrap):\n    filename = encoding.to_unicode_from_fs(filename)\n    if (filename in self.filenames):\n        return\n    editor = codeeditor.CodeEditor(self)\n    if (osp.splitext(filename)[1] == '.py'):\n        language = 'py'\n    else:\n        language = 'bat'\n    editor.setup_editor(linenumbers=False, language=language, scrollflagarea=False, show_class_func_dropdown=False)\n    editor.focus_changed.connect((lambda : self.focus_changed.emit()))\n    editor.setReadOnly(True)\n    editor.set_font(font, color_scheme)\n    editor.toggle_wrap_mode(wrap)\n    (text, _) = encoding.read(filename)\n    editor.set_text(text)\n    editor.set_cursor_position('eof')\n    self.editors.append(editor)\n    self.filenames.append(filename)\n    index = self.tabwidget.addTab(editor, osp.basename(filename))\n    self.find_widget.set_editor(editor)\n    self.tabwidget.setTabToolTip(index, filename)\n    self.tabwidget.setCurrentIndex(index)", "docstring": "Add new history tab.\n\nArgs:\nfilename (str): file to be loaded in a new tab.", "source": "codesearchnet"}
{"code": "def resolve(self, context, provider):\n    resolve_variables(self.variables, context, provider)\n    self.blueprint.resolve_variables(self.variables)", "docstring": "Resolve the Stack variables.\n\nThis resolves the Stack variables and then prepares the Blueprint for\nrendering by passing the resolved variables to the Blueprint.\n\nArgs:\ncontext (:class:`stacker.context.Context`): stacker context\nprovider (:class:`stacker.provider.base.BaseProvider`): subclass of\nthe base provider", "source": "codesearchnet"}
{"code": "def layer_norm(x, dim, epsilon=1e-6, name=\"layer_prepostprocess\"):\n  \n  with tf.variable_scope(name + \"/layer_norm\"):\n    scale = mtf.get_variable(\n        x.mesh,\n        \"layer_norm_scale\",\n        mtf.Shape([dim]),\n        initializer=tf.ones_initializer(),\n        activation_dtype=x.dtype)\n    bias = mtf.get_variable(\n        x.mesh,\n        \"layer_norm_bias\",\n        mtf.Shape([dim]),\n        initializer=tf.zeros_initializer(),\n        activation_dtype=x.dtype)\n    reduced_shape = x.shape - dim\n    mean = mtf.reduce_mean(x, output_shape=reduced_shape)\n    variance = mtf.reduce_mean(mtf.square(x - mean), output_shape=reduced_shape)\n    norm_x = (x - mean) * mtf.rsqrt(variance + epsilon)\n    return norm_x * scale + bias", "docstring": "Layer normalization over dimension dim.\n\nArgs:\nx: a mtf.Tensor whose shape contains dim.\ndim: a mtf.Dimension\nepsilon: a floating point number\nname: a string. variable scope.\n\nReturns:\na mtf.Tensor with same shape as x.", "source": "juraj-google-style"}
{"code": "def __init__(self, key: Key, exclude_from_indexes: Iterable[str]=()):\n    self.key = key\n    self.exclude_from_indexes = set(exclude_from_indexes)\n    self.properties = {}", "docstring": "Represents a Datastore entity.\n\nDoes not support the property value \"meaning\" field.\n\nArgs:\nkey: (Key) A complete Key representing this Entity.\nexclude_from_indexes: (iterable of str) List of property keys whose values\nshould not be indexed for this entity.", "source": "github-repos"}
{"code": "def stop_threadsafe(self):\n    if self.stopped:\n        return\n    try:\n        self._loop.run_coroutine(self.stop())\n    except asyncio.TimeoutError:\n        raise TimeoutExpiredError('Timeout stopping task {} with {} subtasks'.format(self.name, len(self.subtasks)))", "docstring": "Stop this task from another thread and wait for it to finish.\n\nThis method must not be called from within the BackgroundEventLoop but\nwill inject self.stop() into the event loop and block until it\nreturns.\n\nRaises:\nTimeoutExpiredError: If the task does not stop in the given\ntimeout specified in __init__()", "source": "codesearchnet"}
{"code": "def lu_solve(LU, b):\n    from scipy.linalg import lu_solve as sp_lu_solve\n    LU = (asarray(LU[0], float), asarray(LU[1], float))\n    b = asarray(b, float)\n    return sp_lu_solve(LU, b, check_finite=False)", "docstring": "r\"\"\"Solve for LU decomposition.\n\nSolve the linear equations :math:`\\mathrm A \\mathbf x = \\mathbf b`,\ngiven the LU factorization of :math:`\\mathrm A`.\n\nArgs:\nLU (array_like): LU decomposition.\nb (array_like): Right-hand side.\n\nReturns:\n:class:`numpy.ndarray`: The solution to the system\n:math:`\\mathrm A \\mathbf x = \\mathbf b`.\n\nSee Also\n--------\nscipy.linalg.lu_factor : LU decomposition.\nscipy.linalg.lu_solve : Solve linear equations given LU factorization.", "source": "codesearchnet"}
{"code": "def set_site_energies(self, energies):\n    self.site_energies = energies\n    for site_label in energies:\n        for site in self.sites:\n            if (site.label == site_label):\n                site.energy = energies[site_label]", "docstring": "Set the energies for every site in the lattice according to the site labels.\n\nArgs:\nenergies (Dict(Str:Float): Dictionary of energies for each site label, e.g.::\n\n{ 'A' : 1.0, 'B', 0.0 }\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0):\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        raise exceptions.VersionNotSupported('KMIP {} does not support the ObjectDefaults object.'.format(kmip_version.value))\n    local_buffer = BytearrayStream()\n    if self._object_type:\n        self._object_type.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The ObjectDefaults structure is missing the object type field.')\n    if self._attributes:\n        self._attributes.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The ObjectDefaults structure is missing the attributes field.')\n    self.length = local_buffer.length()\n    super(ObjectDefaults, self).write(output_buffer, kmip_version=kmip_version)\n    output_buffer.write(local_buffer.buffer)", "docstring": "Write the ObjectDefaults structure encoding to the data stream.\n\nArgs:\noutput_buffer (stream): A data stream in which to encode\nAttributes structure data, supporting a write method.\nkmip_version (enum): A KMIPVersion enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 2.0.\n\nRaises:\nInvalidField: Raised if the object type or attributes fields are\nnot defined.\nVersionNotSupported: Raised when a KMIP version is provided that\ndoes not support the ObjectDefaults structure.", "source": "codesearchnet"}
{"code": "def subCell2DSlices(arr, shape, d01=None, p01=None):\n    \n    if p01 is not None:\n        yinit, xinit = p01\n    else:\n        xinit, yinit = 0, 0\n\n    x, y = xinit, yinit\n    g0, g1 = shape\n    s0, s1 = arr.shape[:2]\n\n    if d01 is not None:\n        d0, d1 = d01\n    else:\n        d0, d1 = s0 / g0, s1 / g1\n\n    y1 = d0 + yinit\n    for i in range(g0):\n        for j in range(g1):\n            x1 = x + d1\n            yield (i, j, slice(max(0, _rint(y)),\n                               max(0, _rint(y1))),\n                   slice(max(0, _rint(x)),\n                         max(0, _rint(x1))))\n            x = x1\n        y = y1\n        y1 = y + d0\n        x = xinit", "docstring": "Generator to access evenly sized sub-cells in a 2d array\n\nArgs:\nshape (tuple): number of sub-cells in y,x e.g. (10,15)\nd01 (tuple, optional): cell size in y and x\np01 (tuple, optional): position of top left edge\n\nReturns:\nint: 1st index\nint: 2nd index\nslice: first dimension\nslice: 1st dimension", "source": "juraj-google-style"}
{"code": "def create_secret(self, name, data, labels=None, driver=None):\n    if (not isinstance(data, bytes)):\n        data = data.encode('utf-8')\n    data = base64.b64encode(data)\n    if six.PY3:\n        data = data.decode('ascii')\n    body = {'Data': data, 'Name': name, 'Labels': labels}\n    if (driver is not None):\n        if utils.version_lt(self._version, '1.31'):\n            raise errors.InvalidVersion('Secret driver is only available for API version > 1.31')\n        body['Driver'] = driver\n    url = self._url('/secrets/create')\n    return self._result(self._post_json(url, data=body), True)", "docstring": "Create a secret\n\nArgs:\nname (string): Name of the secret\ndata (bytes): Secret data to be stored\nlabels (dict): A mapping of labels to assign to the secret\ndriver (DriverConfig): A custom driver configuration. If\nunspecified, the default ``internal`` driver will be used\n\nReturns (dict): ID of the newly created secret", "source": "codesearchnet"}
{"code": "def range(self, start_date=None, stop_date=None, field=lambda x: x.xfer):\n        \n        assert start_date <= stop_date, \\\n            \"Start date must be earlier than end date.\"\n\n        out = Transactions()\n\n        for t in self.trans:\n            date = field(t)\n            if (start_date is not None) and not (date >= start_date):\n                continue\n            if (stop_date is not None) and not (date <= stop_date):\n                continue\n            out.append(t)\n\n        return out", "docstring": "Return a ``Transactions`` object in an inclusive date range.\n\nArgs:\nstart_date: A ``datetime.Date`` object that marks the inclusive\nstart date for the range.\n\nstop_date: A ``datetime.Date`` object that marks the inclusive end\ndate for the range.\n\nfield: The field to compare start and end dates to. Default is the\n``xfer`` field.\n\nReturns:\nA ``Transactions`` object.", "source": "juraj-google-style"}
{"code": "def set_room_alias(self, room_id, room_alias):\n        \n        data = {\n            \"room_id\": room_id\n        }\n\n        return self._send(\"PUT\", \"/directory/room/{}\".format(quote(room_alias)),\n                          content=data)", "docstring": "Set alias to room id\n\nArgs:\nroom_id (str): The room id.\nroom_alias (str): The room wanted alias name.", "source": "juraj-google-style"}
{"code": "def func2md(self, func, clsname=None, names=None, depth=3):\n        \n        section = \"\n        if names is None:\n            names = [func.__name__]\n\n        funcname = \", \".join(names)\n        escfuncname = \", \".join([\"`%s`\" % funcname if funcname.startswith(\"_\") else funcname for funcname in names])\n        header = \"%s%s\" % (\"%s.\" % clsname if clsname else \"\", escfuncname)\n\n        path = self.get_src_path(func)\n        doc = self.doc2md(func)\n\n        args, kwargs = [], []\n        spec = getargspec(func)\n        vargsname, kwargsname = spec.varargs, spec.keywords\n        vargs = list(make_iter(spec.args)) if spec.args else []\n        defaults = list(make_iter(spec.defaults)) if spec.defaults else []\n\n        while vargs:\n            if vargs and vargs[0] == \"self\":\n                args.append(vargs.pop(0))\n            elif len(vargs) > len(defaults):\n                args.append(vargs.pop(0))\n            else:\n                default = defaults.pop(0)\n                if isinstance(default, str):\n                    default = \"\\\"%s\\\"\" % default\n                else:\n                    default = \"%s\" % str(default)\n\n                kwargs.append((vargs.pop(0), default))\n\n        if args:\n            args = \", \".join(\"%s\" % arg for arg in args)\n        if kwargs:\n            kwargs = \", \".join(\"%s=%s\" % kwarg for kwarg in kwargs)\n            if args:\n                kwargs = \", \" + kwargs\n        if vargsname:\n            vargsname = \"*%s\" % vargsname\n            if args or kwargs:\n                vargsname = \", \" + vargsname\n        if kwargsname:\n            kwargsname = \"**%s\" % kwargsname\n            if args or kwargs or vargsname:\n                kwargsname = \", \" + kwargsname\n\n        _FUNCDEF = \"{funcname}({args}{kwargs}{vargs}{vkwargs})\"\n        funcdef = _FUNCDEF.format(funcname=funcname,\n                                  args=args or \"\",\n                                  kwargs=kwargs or \"\",\n                                  vargs=vargsname or \"\",\n                                  vkwargs=kwargsname or \"\")\n\n        \n        lmax = 90\n        if len(funcdef) > lmax:\n            \n            split = funcdef.split(\"(\", 1)\n            \n            rest = split[1]\n            args = rest.split(\", \")\n\n            funcname = \"(\".join(split[:1]) + \"(\"\n            lline = len(funcname)\n            parts = []\n            for arg in args:\n                larg = len(arg)\n                if larg > lmax - 5:\n                    \n                    parts.append(arg)\n                elif lline + larg > lmax:\n                    \n                    parts.append(\"\\\\\\n    \" + arg)\n                    lline = 0\n                else:\n                    parts.append(arg)\n                lline += len(parts[-1])\n            funcdef = funcname + \", \".join(parts)\n\n        \n        string = FUNC_TEMPLATE.format(section=section,\n                                      header=header,\n                                      funcdef=funcdef,\n                                      path=path,\n                                      doc=doc if doc else \"*No documentation found.*\")\n        return string", "docstring": "Takes a function (or method) and documents it.\n\nArgs:\nclsname (str, optional): class name to prepend to funcname.\ndepth (int, optional): number of ### to append to function name", "source": "juraj-google-style"}
{"code": "def _ParseFileData(self, knowledge_base, file_object):\n    \n    text_file_object = dfvfs_text_file.TextFile(file_object, encoding='utf-8')\n\n    system_product = text_file_object.readline()\n    system_product = system_product.strip()\n\n    if not knowledge_base.GetValue('operating_system_product'):\n      if system_product:\n        knowledge_base.SetValue('operating_system_product', system_product)", "docstring": "Parses file content (data) for system product preprocessing attribute.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nfile_object (dfvfs.FileIO): file-like object that contains the artifact\nvalue data.\n\nRaises:\nerrors.PreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def __init__(self, replica_id_in_sync_group=0, num_replicas_in_sync=1):\n    self._replica_id_in_sync_group = replica_id_in_sync_group\n    self._num_replicas_in_sync = num_replicas_in_sync", "docstring": "Initializes a ValueContext object.\n\nArgs:\nreplica_id_in_sync_group: the current replica_id, should be an int in\n[0,`num_replicas_in_sync`).\nnum_replicas_in_sync: the number of replicas that are in sync.", "source": "github-repos"}
{"code": "def __init__(self, resolver_context):\n    \n    super(SQLiteBlobFile, self).__init__(resolver_context)\n    self._blob = None\n    self._current_offset = 0\n    self._database_object = None\n    self._number_of_rows = None\n    self._size = 0\n    self._table_name = None", "docstring": "Initializes the file-like object.\n\nArgs:\nresolver_context (Context): resolver context.", "source": "juraj-google-style"}
{"code": "def merge_results(inputs, arguments=None):\n        \n        if arguments is None:\n            arguments = Arguments()\n\n        args = arguments.args\n        kwargs = arguments.kwargs\n\n        for i in inputs:\n            \n            \n            if isinstance(i.result, dict):\n                \n                kwargs.update({k: v for k, v in i.result.items() if k not in kwargs})\n            elif isinstance(i.result, list):\n                args.extend(i.result)\n            elif isinstance(i.result, Arguments):\n                args.extend(i.result.args)\n                kwargs.update({k: v for k, v in i.result.kwargs.items() if k not in kwargs})\n            \n            else:\n                args.append(i.result)\n\n        return arguments", "docstring": "Merges results to form arguments to run(). There are two cases for each result:\n- dictionary: dictionaries get merged and passed as keyword arguments\n- list: lists get concatenated to positional arguments\n- Arguments: kwargs gets merged and args gets appended\n- else: concatenated and passed as postitional arguments\nArgs:\ninputs: the inputs whose results to merge\narguments: an optional existing Arguments object to merge into", "source": "juraj-google-style"}
{"code": "def _dispatch_coroutine(self, event, listener, *args, **kwargs):\n    try:\n        coro = listener(*args, **kwargs)\n    except Exception as exc:\n        if (event == self.LISTENER_ERROR_EVENT):\n            raise\n        return self.emit(self.LISTENER_ERROR_EVENT, event, listener, exc)\n    asyncio.ensure_future(_try_catch_coro(self, event, listener, coro), loop=self._loop)", "docstring": "Schedule a coroutine for execution.\n\nArgs:\nevent (str): The name of the event that triggered this call.\nlistener (async def): The async def that needs to be executed.\n*args: Any number of positional arguments.\n**kwargs: Any number of keyword arguments.\n\nThe values of *args and **kwargs are passed, unaltered, to the async\ndef when generating the coro. If there is an exception generating the\ncoro, such as the wrong number of arguments, the emitter's error event\nis triggered. If the triggering event _is_ the emitter's error event\nthen the exception is reraised. The reraised exception may show in\ndebug mode for the event loop but is otherwise silently dropped.", "source": "codesearchnet"}
{"code": "def diff_linesToChars(self, text1, text2):\n    \n    lineArray = []  \n    lineHash = {}   \n\n    \n    \n    lineArray.append('')\n\n    def diff_linesToCharsMunge(text):\n      \n      chars = []\n      \n      \n      \n      lineStart = 0\n      lineEnd = -1\n      while lineEnd < len(text) - 1:\n        lineEnd = text.find('\\n', lineStart)\n        if lineEnd == -1:\n          lineEnd = len(text) - 1\n        line = text[lineStart:lineEnd + 1]\n\n        if line in lineHash:\n          chars.append(chr(lineHash[line]))\n        else:\n          if len(lineArray) == maxLines:\n            \n            line = text[lineStart:]\n            lineEnd = len(text)\n          lineArray.append(line)\n          lineHash[line] = len(lineArray) - 1\n          chars.append(chr(len(lineArray) - 1))\n        lineStart = lineEnd + 1\n      return \"\".join(chars)\n\n    \n    maxLines = 666666\n    chars1 = diff_linesToCharsMunge(text1)\n    maxLines = 1114111\n    chars2 = diff_linesToCharsMunge(text2)\n    return (chars1, chars2, lineArray)", "docstring": "Split two texts into an array of strings.  Reduce the texts to a string\nof hashes where each Unicode character represents one line.\n\nArgs:\ntext1: First string.\ntext2: Second string.\n\nReturns:\nThree element tuple, containing the encoded text1, the encoded text2 and\nthe array of unique strings.  The zeroth element of the array of unique\nstrings is intentionally blank.", "source": "juraj-google-style"}
{"code": "def get_certificate(self, id):\n    return Certificate.get_object(api_token=self.token, cert_id=id)", "docstring": "Returns a Certificate object by its ID.\n\nArgs:\nid (str): Certificate ID", "source": "codesearchnet"}
{"code": "def decode(self, codes):\n        \n        \n        return self.pq.decode(codes) @ self.R.T", "docstring": "Given PQ-codes, reconstruct original D-dimensional vectors via :func:`PQ.decode`,\nand applying an inverse-rotation.\n\nArgs:\ncodes (np.ndarray): PQ-cdoes with shape=(N, M) and dtype=self.code_dtype.\nEach row is a PQ-code\n\nReturns:\nnp.ndarray: Reconstructed vectors with shape=(N, D) and dtype=np.float32", "source": "juraj-google-style"}
{"code": "def create_binary_descriptor(descriptor):\n    func_names = {0: 'copy_latest_a', 1: 'average_a', 2: 'copy_all_a', 3: 'sum_a', 4: 'copy_count_a', 5: 'trigger_streamer', 6: 'call_rpc', 7: 'subtract_afromb'}\n    func_codes = {y: x for (x, y) in func_names.items()}\n    (node, inputs, processing) = parse_node_descriptor(descriptor, DeviceModel())\n    func_code = func_codes.get(processing)\n    if (func_code is None):\n        raise ArgumentError('Unknown processing function', function=processing)\n    (stream_a, trigger_a) = inputs[0]\n    stream_a = stream_a.encode()\n    if (len(inputs) == 2):\n        (stream_b, trigger_b) = inputs[1]\n        stream_b = stream_b.encode()\n    else:\n        (stream_b, trigger_b) = (65535, None)\n    if (trigger_a is None):\n        trigger_a = TrueTrigger()\n    if (trigger_b is None):\n        trigger_b = TrueTrigger()\n    ref_a = 0\n    if isinstance(trigger_a, InputTrigger):\n        ref_a = trigger_a.reference\n    ref_b = 0\n    if isinstance(trigger_b, InputTrigger):\n        ref_b = trigger_b.reference\n    trigger_a = _create_binary_trigger(trigger_a)\n    trigger_b = _create_binary_trigger(trigger_b)\n    combiner = node.trigger_combiner\n    bin_desc = struct.pack('<LLHHHBBBB2x', ref_a, ref_b, node.stream.encode(), stream_a, stream_b, func_code, trigger_a, trigger_b, combiner)\n    return bin_desc", "docstring": "Convert a string node descriptor into a 20-byte binary descriptor.\n\nThis is the inverse operation of parse_binary_descriptor and composing\nthe two operations is a noop.\n\nArgs:\ndescriptor (str): A string node descriptor\n\nReturns:\nbytes: A 20-byte binary node descriptor.", "source": "codesearchnet"}
{"code": "def transform_ast(self, node, ctx):\n    raise NotImplementedError('subclasses must override this')", "docstring": "Performs an actual transformation of a function's AST.\n\nSubclasses must implement this method, and do not usually call it.\n\nArgs:\nnode: One or more ast.AST nodes representing the AST to be transformed.\nctx: transformer.Context.", "source": "github-repos"}
{"code": "def loss(probs, labels):\n    diff = -labels * tf.math.log(probs)\n    loss = tf.reduce_mean(diff)\n    return loss", "docstring": "Calculates cross entropy loss.\n\nArgs:\nprobs: Class probabilities predicted by the model. The shape is expected\nto be (?, 10).\nlabels: Truth labels for the classes, as one-hot encoded vectors. The\nshape is expected to be the same as `probs`.\n\nReturns:\nA scalar loss tensor.", "source": "github-repos"}
{"code": "def put(self, block_id, priority, pb_type='offline'):\n        \n        if pb_type not in ('offline', 'realtime'):\n            raise ValueError('Invalid PB type.')\n\n        with self._mutex:\n            added_time = datetime.datetime.utcnow().isoformat()\n            entry = (priority, sys.maxsize-self._index, block_id, pb_type,\n                     added_time)\n            self._index += 1\n            if self._block_map.get(block_id) is not None:\n                raise KeyError('ERROR: Block id \"{}\" already exists in '\n                               'PC PB queue!'.\n                               format(block_id))\n            self._block_map[block_id] = entry\n            LOG.debug(\"Adding PB %s to queue\", block_id)\n            self._queue.append(entry)\n            self._queue.sort()  \n            self._queue.reverse()", "docstring": "Add a Processing Block to the queue.\n\nWhen a new entry it added, the queue is (re-)sorted by priority\nfollowed by insertion order (older blocks with equal priority are\nfirst).\n\nArgs:\nblock_id (str): Processing Block Identifier\npriority (int): Processing Block scheduling priority\n(higher values = higher priority)\npb_type (str): Processing Block type (offline, realtime)", "source": "juraj-google-style"}
{"code": "def create(options, timer=None, use_deque=True):\n    if (options is None):\n        return None\n    if (not isinstance(options, (CheckOptions, QuotaOptions, ReportOptions))):\n        _logger.error(u'make_cache(): bad options %s', options)\n        raise ValueError(u'Invalid options')\n    if (options.num_entries <= 0):\n        _logger.debug(u'did not create cache, options was %s', options)\n        return None\n    _logger.debug(u'creating a cache from %s', options)\n    if (options.flush_interval > ZERO_INTERVAL):\n        ttl = getattr(options, u'expiration', options.flush_interval)\n        cache_cls = (DequeOutTTLCache if use_deque else cachetools.TTLCache)\n        return LockedObject(cache_cls(options.num_entries, ttl=ttl.total_seconds(), timer=to_cache_timer(timer)))\n    cache_cls = (DequeOutLRUCache if use_deque else cachetools.LRUCache)\n    return LockedObject(cache_cls(options.num_entries))", "docstring": "Create a cache specified by ``options``\n\n``options`` is an instance of either\n:class:`endpoints_management.control.caches.CheckOptions` or\n:class:`endpoints_management.control.caches.ReportOptions`\n\nThe returned cache is wrapped in a :class:`LockedObject`, requiring it to\nbe accessed in a with statement that gives synchronized access\n\nExample:\n>>> options = CheckOptions()\n>>> synced_cache = make_cache(options)\n>>> with synced_cache as cache:  #  acquire the lock\n...    cache['a_key'] = 'a_value'\n\nArgs:\noptions (object): an instance of either of the options classes\n\nReturns:\n:class:`cachetools.Cache`: the cache implementation specified by options\nor None: if options is ``None`` or if options.num_entries < 0\n\nRaises:\nValueError: if options is not a support type", "source": "codesearchnet"}
{"code": "def last_updated(self, path):\n    try:\n        return s3io.S3IO(options=self._options).last_updated(path)\n    except Exception as e:\n        raise BeamIOError('last_updated operation failed', {path: e})", "docstring": "Get UNIX Epoch time in seconds on the FileSystem.\n\nArgs:\npath: string path of file.\n\nReturns: float UNIX Epoch time\n\nRaises:\n``BeamIOError``: if path doesn't exist.", "source": "github-repos"}
{"code": "def resize(self, video: 'torch.Tensor', size: SizeDict, interpolation: 'F.InterpolationMode'=None, antialias: bool=True, **kwargs) -> 'torch.Tensor':\n    interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR\n    if interpolation == F.InterpolationMode.LANCZOS:\n        logger.warning_once('You have used fast image processor with LANCZOS resample which not yet supported for torch.Tensor. BICUBIC resample will be used as an alternative. Please fall back to image processor if you want full consistency with the original model.')\n        interpolation = F.InterpolationMode.BICUBIC\n    if size.longest_edge:\n        new_size = get_resize_output_image_size(video, resolution_max_side=size.longest_edge)\n    elif size.height and size.width:\n        new_size = (size.height, size.width)\n    else:\n        raise ValueError(f\"Size must contain 'height' and 'width' keys, or 'longest_edge' key. Got {size}.\")\n    video = F.resize(video, new_size, interpolation=interpolation, antialias=antialias)\n    max_size = (self.max_image_size['longest_edge'], self.max_image_size['longest_edge'])\n    video = F.resize(video, max_size, interpolation=interpolation, antialias=antialias)\n    return video", "docstring": "Resize an video to `(size[\"height\"], size[\"width\"])`.\nArgs:\nvideo (`torch.Tensor`):\nVideo to resize.\nsize (`SizeDict`):\nDictionary in the format `{\"height\": int, \"width\": int}` specifying the size of the output video.\nresample (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):\n`InterpolationMode` filter to use when resizing the video e.g. `InterpolationMode.BICUBIC`.\nReturns:\n`torch.Tensor`: The resized video.", "source": "github-repos"}
{"code": "def get_content_metadata(self, enterprise_customer):\n    content_metadata = OrderedDict()\n    if enterprise_customer.catalog:\n        response = self._load_data(self.ENTERPRISE_CUSTOMER_ENDPOINT, detail_resource='courses', resource_id=str(enterprise_customer.uuid), traverse_pagination=True)\n        for course in response['results']:\n            for course_run in course['course_runs']:\n                course_run['content_type'] = 'courserun'\n                content_metadata[course_run['key']] = course_run\n    for enterprise_customer_catalog in enterprise_customer.enterprise_customer_catalogs.all():\n        response = self._load_data(self.ENTERPRISE_CUSTOMER_CATALOGS_ENDPOINT, resource_id=str(enterprise_customer_catalog.uuid), traverse_pagination=True, querystring={'page_size': 1000})\n        for item in response['results']:\n            content_id = utils.get_content_metadata_item_id(item)\n            content_metadata[content_id] = item\n    return content_metadata.values()", "docstring": "Return all content metadata contained in the catalogs associated with the EnterpriseCustomer.\n\nArguments:\nenterprise_customer (EnterpriseCustomer): The EnterpriseCustomer to return content metadata for.\n\nReturns:\nlist: List of dicts containing content metadata.", "source": "codesearchnet"}
{"code": "def exists(self, path):\n    try:\n        return s3io.S3IO(options=self._options).exists(path)\n    except Exception as e:\n        raise BeamIOError('exists() operation failed', {path: e})", "docstring": "Check if the provided path exists on the FileSystem.\n\nArgs:\npath: string path that needs to be checked.\n\nReturns: boolean flag indicating if path exists", "source": "github-repos"}
{"code": "def get_cmd_handler(self, cmd):\n        \n        cmd = cmd.replace('-', '_')\n        handler = getattr(self, cmd, None)\n        if not handler:\n            raise BuildException(\n                'Command {} is not supported as a '\n                'build command'.format(cmd)\n            )\n        return handler", "docstring": "Return an handler for cmd.\nThe handler and the command should have the same name.\nSee class description for more info about handlers.\n\nArgs:\ncmd (str): The name of the command\n\nReturns:\ncallable: which handles cmd\n\nRaises:\nlago.build.BuildException: If an handler for cmd doesn't exist", "source": "juraj-google-style"}
{"code": "def __getitem__(self, key):\n    \n    if not isinstance(key, tuple) or len(key) != 2:\n      raise IndexError('Invalid index: {0}'.format(key))\n    return self._items.get(key, self._default_value)", "docstring": "Returns element of the matrix indexed by given key.\n\nArgs:\nkey: tuple of (row_idx, column_idx)\n\nReturns:\nElement of the matrix\n\nRaises:\nIndexError: if key is invalid.", "source": "juraj-google-style"}
{"code": "def ResolvePrefix(self, subject, attribute_prefix, timestamp=None, limit=None):\n    for (_, values) in self.MultiResolvePrefix([subject], attribute_prefix, timestamp=timestamp, limit=limit):\n        values.sort(key=(lambda a: a[0]))\n        return values\n    return []", "docstring": "Retrieve a set of value matching for this subject's attribute.\n\nArgs:\nsubject: The subject that we will search.\nattribute_prefix: The attribute prefix.\ntimestamp: A range of times for consideration (In microseconds). Can be a\nconstant such as ALL_TIMESTAMPS or NEWEST_TIMESTAMP or a tuple of ints\n(start, end).\nlimit: The number of results to fetch.\n\nReturns:\nA list of (attribute, value string, timestamp).\n\nValues with the same attribute (happens when timestamp is not\nNEWEST_TIMESTAMP, but ALL_TIMESTAMPS or time range) are guaranteed\nto be ordered in the decreasing timestamp order.\n\nRaises:\nAccessError: if anything goes wrong.", "source": "codesearchnet"}
{"code": "def delete_ldap_group_link(self, cn, provider=None, **kwargs):\n    path = ('/groups/%s/ldap_group_links' % self.get_id())\n    if (provider is not None):\n        path += ('/%s' % provider)\n    path += ('/%s' % cn)\n    self.manager.gitlab.http_delete(path)", "docstring": "Delete an LDAP group link.\n\nArgs:\ncn (str): CN of the LDAP group\nprovider (str): LDAP provider for the LDAP group\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabDeleteError: If the server cannot perform the request", "source": "codesearchnet"}
{"code": "def write(self, *pb2_obj):\n    base = len(self._write_buff)\n    for (idx, obj) in enumerate(pb2_obj):\n        if ((self._buffer_size > 0) and ((idx + base) != 0) and (((idx + base) % self._buffer_size) == 0)):\n            self.flush()\n        self._write_buff.append(obj)\n    if (self._buffer_size == 0):\n        self.flush()", "docstring": "Write a group of one or more protobuf objects to the file. Multiple\nobject groups can be written by calling this method several times\nbefore closing stream or exiting the runtime context.\n\nThe input protobuf objects get buffered and will be written down when\nthe number of buffered objects exceed the `self._buffer_size`.\n\nArgs:\npb2_obj (*protobuf.message.Message): list of protobuf messages.", "source": "codesearchnet"}
{"code": "def freeze_to_tar(script_path, freeze_fn, extra_files=None):\n    if (not extra_files):\n        extra_files = []\n    freeze_dir = tempfile.mkdtemp()\n    try:\n        cmds = freeze(script_path, target_dir=freeze_dir)\n        if freeze_fn.endswith('.tar.gz'):\n            mode = 'w|gz'\n        elif freeze_fn.endswith('.tar'):\n            mode = 'w'\n        else:\n            raise NameError(('[%s] must end in .tar or .tar.gz' % freeze_fn))\n        fp = tarfile.open(freeze_fn, mode)\n        proj_name = os.path.basename(script_path)\n        proj_name = proj_name[:proj_name.rfind('.')]\n        for x in (glob.glob(('%s/dist/%s/*' % (freeze_dir, proj_name))) + extra_files):\n            fp.add(x, arcname=os.path.basename(x))\n        fp.close()\n    finally:\n        shutil.rmtree(freeze_dir)\n    return cmds", "docstring": "Freezes a script to a .tar or .tar.gz file\n\nThe script contains all of the files at the root of the tar\n\nArgs:\nscript_path: Path to python script to be frozen.\nfreeze_fn: Tar filename (must end in .tar or .tar.gz)\nextra_files: List of paths to add to the tar (default is None)\n\nReturns:\nList of freeze commands ran\n\nRaises:\nsubprocess.CalledProcessError: freeze error.\nOSError: freeze not found.\nNameError: Tar must end in .tar or .tar.gz", "source": "codesearchnet"}
{"code": "def _get_ip_unnumbered(self, unnumbered_type, unnumbered_name):\n        \n        unnumbered_type = self._callback(unnumbered_type, handler='get_config')\n        unnumbered_name = self._callback(unnumbered_name, handler='get_config')\n        unnumbered_type = pynos.utilities.return_xml(str(unnumbered_type))\n        unnumbered_name = pynos.utilities.return_xml(str(unnumbered_name))\n        return pynos.utilities.merge_xml(unnumbered_type, unnumbered_name)", "docstring": "Get and merge the `ip unnumbered` config from an interface.\n\nYou should not use this method.\nYou probably want `Interface.ip_unnumbered`.\n\nArgs:\nunnumbered_type: XML document with the XML to get the donor type.\nunnumbered_name: XML document with the XML to get the donor name.\n\nReturns:\nMerged XML document.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def is_finite_number(value):\n  \n  if not isinstance(value, (numbers.Integral, float)):\n      \n    return False\n\n  if isinstance(value, bool):\n    \n    return False\n\n  if isinstance(value, float):\n    if math.isnan(value) or math.isinf(value):\n      return False\n\n  if abs(value) > (2**53):\n    return False\n\n  return True", "docstring": "Validates if the given value is a number, enforces\nabsolute limit of 2^53 and restricts NAN, INF, -INF.\n\nArgs:\nvalue: Value to be validated.\n\nReturns:\nBoolean: True if value is a number and not NAN, INF, -INF or\ngreater than absolute limit of 2^53 else False.", "source": "juraj-google-style"}
{"code": "def random_init_mapping(candidate_mapping):\n    \n    \n    random.seed()\n    matched_dict = {}\n    result = []\n    for c in candidate_mapping:\n        candidates = list(c)\n        if not candidates:\n            \n            result.append(-1)\n            continue\n        found = False\n        while candidates:\n            \n            rid = random.randint(0, len(candidates) - 1)\n            candidate = candidates[rid]\n            \n            if candidate in matched_dict:\n                candidates.pop(rid)\n            else:\n                matched_dict[candidate] = 1\n                result.append(candidate)\n                found = True\n                break\n        if not found:\n            result.append(-1)\n    return result", "docstring": "Generate a random node mapping.\nArgs:\ncandidate_mapping: candidate_mapping: candidate node match list\nReturns:\nrandomly-generated node mapping between two AMRs", "source": "juraj-google-style"}
{"code": "def init_from_acceptor_bycopying(self, acceptor):\n        \n        for state in acceptor.states:\n            for arc in state.arcs:\n                self.add_arc(state.stateid, arc.nextstate, acceptor.isyms.find(arc.ilabel))\n            if state.final:\n                print state.stateid,' is final'\n                self[state.stateid].final = True;", "docstring": "Adds a sink state\nArgs:\nalphabet (list): The input alphabet\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(GetRequestPayload, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = utils.BytearrayStream(input_stream.read(self.length))\n    if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):\n        self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)\n        self._unique_identifier.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.KEY_FORMAT_TYPE, local_stream):\n        self._key_format_type = primitives.Enumeration(enum=enums.KeyFormatType, tag=enums.Tags.KEY_FORMAT_TYPE)\n        self._key_format_type.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.KEY_COMPRESSION_TYPE, local_stream):\n        self._key_compression_type = primitives.Enumeration(enum=enums.KeyCompressionType, tag=enums.Tags.KEY_COMPRESSION_TYPE)\n        self._key_compression_type.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.KEY_WRAPPING_SPECIFICATION, local_stream):\n        self._key_wrapping_specification = objects.KeyWrappingSpecification()\n        self._key_wrapping_specification.read(local_stream, kmip_version=kmip_version)\n    self.is_oversized(local_stream)", "docstring": "Read the data encoding the Get request payload and decode it into its\nconstituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def list_instances(i_info, param_str, numbered=False):\n    print(param_str)\n    for i in i_info:\n        if numbered:\n            print('Instance {}\n        print('  {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}'.format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2))\n        print('  AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}'.format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame']))\n        list_tags(i_info[i]['tag'])\n    debg.dprintx('All Data')\n    debg.dprintx(i_info, True)", "docstring": "Display a list of all instances and their details.\n\nIterates through all the instances in the dict, and displays\ninformation for each instance.\n\nArgs:\ni_info (dict): information on instances and details.\nparam_str (str): the title to display before the list.\nnumbered (bool): optional - indicates wheter the list should be\ndisplayed with numbers before each instance.\nThis is used when called from user_picklist.", "source": "codesearchnet"}
{"code": "def bool(name, default=None, allow_none=False, fallback=None):\n    \n    value = read(name, default, allow_none, fallback=fallback)\n    if isinstance(value, builtins.bool):\n        return value\n    elif isinstance(value, builtins.int):\n        return True if value > 0 else False\n    elif value is None and allow_none:\n        return None\n    else:\n        value_str = builtins.str(value).lower().strip()\n        return _strtobool(value_str)", "docstring": "Get a boolean based environment value or the default.\n\nArgs:\nname: The environment variable name\ndefault: The default value to use if no environment variable is found\nallow_none: If the return value can be `None` (i.e. optional)", "source": "juraj-google-style"}
{"code": "def calc_intent(self, query):\n        \n        matches = self.calc_intents(query)\n        if len(matches) == 0:\n            return MatchData('', '')\n        best_match = max(matches, key=lambda x: x.conf)\n        best_matches = (match for match in matches if match.conf == best_match.conf)\n        return min(best_matches, key=lambda x: sum(map(len, x.matches.values())))", "docstring": "Tests all the intents against the query and returns\nmatch data of the best intent\n\nArgs:\nquery (str): Input sentence to test against intents\nReturns:\nMatchData: Best intent match", "source": "juraj-google-style"}
{"code": "def set_permitted_ip(address=None, deploy=False):\n    if (not address):\n        raise CommandExecutionError('Address option must not be empty.')\n    ret = {}\n    query = {'type': 'config', 'action': 'set', 'xpath': \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/permitted-ip\", 'element': \"<entry name='{0}'></entry>\".format(address)}\n    ret.update(__proxy__['panos.call'](query))\n    if (deploy is True):\n        ret.update(commit())\n    return ret", "docstring": "Add an IPv4 address or network to the permitted IP list.\n\nCLI Example:\n\nArgs:\naddress (str): The IPv4 address or network to allow access to add to the Palo Alto device.\n\ndeploy (bool): If true then commit the full candidate configuration, if false only set pending change.\n\n.. code-block:: bash\n\nsalt '*' panos.set_permitted_ip 10.0.0.1\nsalt '*' panos.set_permitted_ip 10.0.0.0/24\nsalt '*' panos.set_permitted_ip 10.0.0.1 deploy=True", "source": "codesearchnet"}
{"code": "def unpack_binary(self, offset, length=False):\n        \n        if not length:\n            return bytes(\"\".encode(\"ascii\"))\n        o = self._offset + offset\n        try:\n            return bytes(struct.unpack_from(\"<{}s\".format(length), self._buf, o)[0])\n        except struct.error:\n            raise OverrunBufferException(o, len(self._buf))", "docstring": "Returns raw binary data from the relative offset with the given length.\nArguments:\n- `offset`: The relative offset from the start of the block.\n- `length`: The length of the binary blob. If zero, the empty string\nzero length is returned.\nThrows:\n- `OverrunBufferException`", "source": "juraj-google-style"}
{"code": "def _get_what_to_read_next(fp, previously_read_position, chunk_size):\n    \n    seek_position = max(previously_read_position - chunk_size, 0)\n    read_size = chunk_size\n\n    \n    \n    \n    \n    \n    \n    while seek_position > 0:\n        fp.seek(seek_position)\n        if _is_partially_read_new_line(fp.read(1)):\n            seek_position -= 1\n            read_size += 1  \n        else:\n            break\n\n    \n    read_size = min(previously_read_position - seek_position, read_size)\n    return seek_position, read_size", "docstring": "Return information on which file pointer position to read from and how many bytes.\n\nArgs:\nfp\npast_read_positon (int): The file pointer position that has been read previously\nchunk_size(int): ideal io chunk_size\n\nReturns:\n(int, int): The next seek position, how many bytes to read next", "source": "juraj-google-style"}
{"code": "def AssertDictType(dct, expected_key_type, expected_value_type):\n    AssertType(dct, dict)\n    for (key, value) in iteritems(dct):\n        AssertType(key, expected_key_type)\n        AssertType(value, expected_value_type)", "docstring": "Ensures that given dictionary is actually a dictionary of specified type.\n\nArgs:\ndct: A dictionary to assert the type for.\nexpected_key_type: An expected type for dictionary keys.\nexpected_value_type: An expected type for dictionary values.\n\nRaises:\nTypeError: If given dictionary is not really a dictionary or not all its\nkeys and values have the expected type.", "source": "codesearchnet"}
{"code": "def is40(msg):\n    if allzeros(msg):\n        return False\n    d = hex2bin(data(msg))\n    if wrongstatus(d, 1, 2, 13):\n        return False\n    if wrongstatus(d, 14, 15, 26):\n        return False\n    if wrongstatus(d, 27, 28, 39):\n        return False\n    if wrongstatus(d, 48, 49, 51):\n        return False\n    if wrongstatus(d, 54, 55, 56):\n        return False\n    if (bin2int(d[39:47]) != 0):\n        return False\n    if (bin2int(d[51:53]) != 0):\n        return False\n    return True", "docstring": "Check if a message is likely to be BDS code 4,0\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nbool: True or False", "source": "codesearchnet"}
{"code": "def join(self, basepath, *paths):\n    if not basepath.startswith(GCSFileSystem.GCS_PREFIX):\n        raise ValueError('Basepath %r must be GCS path.' % basepath)\n    path = basepath\n    for p in paths:\n        path = path.rstrip('/') + '/' + p.lstrip('/')\n    return path", "docstring": "Join two or more pathname components for the filesystem\n\nArgs:\nbasepath: string path of the first component of the path\npaths: path components to be added\n\nReturns: full path after combining all the passed components", "source": "github-repos"}
{"code": "def to_microseconds(value):\n    \n    if not value.tzinfo:\n        value = value.replace(tzinfo=pytz.utc)\n    \n    value = value.astimezone(pytz.utc)\n    \n    return int(calendar.timegm(value.timetuple()) * 1e6) + value.microsecond", "docstring": "Convert a datetime to microseconds since the unix epoch.\n\nArgs:\nvalue (datetime.datetime): The datetime to covert.\n\nReturns:\nint: Microseconds since the unix epoch.", "source": "juraj-google-style"}
{"code": "def edge_length_sum(self, terminal=True, internal=True):\n    if (not isinstance(terminal, bool)):\n        raise TypeError('leaves must be a bool')\n    if (not isinstance(internal, bool)):\n        raise TypeError('internal must be a bool')\n    return sum((node.edge_length for node in self.traverse_preorder() if ((node.edge_length is not None) and ((terminal and node.is_leaf()) or (internal and (not node.is_leaf()))))))", "docstring": "Compute the sum of all selected edge lengths in this ``Tree``\n\nArgs:\n``terminal`` (``bool``): ``True`` to include terminal branches, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal branches, otherwise ``False``\n\nReturns:\n``float``: Sum of all selected edge lengths in this ``Tree``", "source": "codesearchnet"}
{"code": "def maybe_download_from_drive(directory, filename, url):\n    if (not tf.gfile.Exists(directory)):\n        tf.logging.info(('Creating directory %s' % directory))\n        tf.gfile.MakeDirs(directory)\n    filepath = os.path.join(directory, filename)\n    confirm_token = None\n    if tf.gfile.Exists(filepath):\n        tf.logging.info(('Not downloading, file already found: %s' % filepath))\n        return filepath\n    confirm_token = None\n    session = requests.Session()\n    response = session.get(url, stream=True)\n    for (k, v) in response.cookies.items():\n        if k.startswith('download_warning'):\n            confirm_token = v\n    if confirm_token:\n        url = ((url + '&confirm=') + confirm_token)\n    tf.logging.info(('Downloading %s to %s' % (url, filepath)))\n    response = session.get(url, stream=True)\n    chunk_size = (16 * 1024)\n    with open(filepath, 'wb') as f:\n        for chunk in response.iter_content(chunk_size):\n            if chunk:\n                f.write(chunk)\n    print()\n    statinfo = os.stat(filepath)\n    tf.logging.info(('Successfully downloaded %s, %s bytes.' % (filename, statinfo.st_size)))\n    return filepath", "docstring": "Download filename from Google drive unless it's already in directory.\n\nArgs:\ndirectory: path to the directory that will be used.\nfilename: name of the file to download to (do nothing if it already exists).\nurl: URL to download from.\n\nReturns:\nThe path to the downloaded file.", "source": "codesearchnet"}
{"code": "def check_python_requirements(path_or_repo_id, requirements_file='requirements.txt', **kwargs):\n    failed = []\n    try:\n        requirements = cached_file(path_or_repo_id=path_or_repo_id, filename=requirements_file, **kwargs)\n        with open(requirements, 'r') as f:\n            requirements = f.readlines()\n        for requirement in requirements:\n            requirement = requirement.strip()\n            if not requirement or requirement.startswith('\n                continue\n            try:\n                package_name, delimiter, version_number = split_package_version(requirement)\n            except ValueError:\n                package_name = requirement\n                delimiter, version_number = (None, None)\n            try:\n                local_package_version = importlib.metadata.version(package_name)\n            except importlib.metadata.PackageNotFoundError:\n                failed.append(f'{requirement} (installed: None)')\n                continue\n            if delimiter is not None and version_number is not None:\n                is_satisfied = VersionComparison.from_string(delimiter)(version.parse(local_package_version), version.parse(version_number))\n            else:\n                is_satisfied = True\n            if not is_satisfied:\n                failed.append(f'{requirement} (installed: {local_package_version})')\n    except OSError:\n        pass\n    if failed:\n        raise ImportError(f'Missing requirements in your local environment for `{path_or_repo_id}`:\\n' + '\\n'.join(failed))", "docstring": "Tries to locate `requirements_file` in a local folder or repo, and confirms that the environment has all the\npython dependencies installed.\n\nArgs:\npath_or_repo_id (`str` or `os.PathLike`):\nThis can be either:\n- a string, the *model id* of a model repo on huggingface.co.\n- a path to a *directory* potentially containing the file.\nkwargs (`Dict[str, Any]`, *optional*):\nAdditional arguments to pass to `cached_file`.", "source": "github-repos"}
{"code": "def DownloadFile(file_obj, target_path, buffer_size=BUFFER_SIZE):\n    logging.info(u'Downloading: %s to: %s', file_obj.urn, target_path)\n    target_file = open(target_path, 'wb')\n    file_obj.Seek(0)\n    count = 0\n    data_buffer = file_obj.Read(buffer_size)\n    while data_buffer:\n        target_file.write(data_buffer)\n        data_buffer = file_obj.Read(buffer_size)\n        count += 1\n        if (not (count % 3)):\n            logging.debug(u'Downloading: %s: %s done', file_obj.urn, utils.FormatNumberAsString((count * buffer_size)))\n    target_file.close()", "docstring": "Download an aff4 file to the local filesystem overwriting it if it exists.\n\nArgs:\nfile_obj: An aff4 object that supports the file interface (Read, Seek)\ntarget_path: Full path of file to write to.\nbuffer_size: Read in chunks this size.", "source": "codesearchnet"}
{"code": "def VerifyRow(self, parser_mediator, row):\n    \n    if len(row) != 8:\n      return False\n\n    \n    \n    \n    row_bytes = codecs.encode(row['date'], parser_mediator.codepage)\n    if row_bytes.startswith(b'\\xef\\xbb\\xbf'):\n      row['date'] = row['date'][3:]\n      self._encoding = 'utf-8'\n\n    \n    \n    try:\n      timestamp = self._ConvertToTimestamp(\n          row['date'], row['time'], parser_mediator.timezone)\n    except errors.TimestampError:\n      return False\n\n    if timestamp is None:\n      return False\n\n    \n    if (not 'Access Protection' in row['status'] and\n        not 'Would be blocked' in row['status']):\n      return False\n\n    return True", "docstring": "Verifies if a line of the file is in the expected format.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrow (dict[str, str]): fields of a single row, as specified in COLUMNS.\n\nReturns:\nbool: True if this is the correct parser, False otherwise.", "source": "juraj-google-style"}
{"code": "def __parameter_default(self, field):\n    \n    if field.default:\n      if isinstance(field, messages.EnumField):\n        return field.default.name\n      elif isinstance(field, messages.BooleanField):\n        \n        \n        return 'true' if field.default else 'false'\n      else:\n        return str(field.default)", "docstring": "Returns default value of field if it has one.\n\nArgs:\nfield: A simple field.\n\nReturns:\nThe default value of the field, if any exists, with the exception of an\nenum field, which will have its value cast to a string.", "source": "juraj-google-style"}
{"code": "def snakecase(string):\n    string = re.sub('[\\\\-\\\\.\\\\s]', '_', str(string))\n    if (not string):\n        return string\n    return (lowercase(string[0]) + re.sub('[A-Z]', (lambda matched: ('_' + lowercase(matched.group(0)))), string[1:]))", "docstring": "Convert string into snake case.\nJoin punctuation with underscore\n\nArgs:\nstring: String to convert.\n\nReturns:\nstring: Snake cased string.", "source": "codesearchnet"}
{"code": "def merge_strings_files(old_strings_file, new_strings_file):\n    old_localizable_dict = generate_localization_key_to_entry_dictionary_from_file(old_strings_file)\n    output_file_elements = []\n    f = open_strings_file(new_strings_file, 'r+')\n    for (header_comment, comments, key, value) in extract_header_comment_key_value_tuples_from_file(f):\n        if (len(header_comment) > 0):\n            output_file_elements.append(Comment(header_comment))\n        localize_value = value\n        if (key in old_localizable_dict):\n            localize_value = old_localizable_dict[key].value\n        output_file_elements.append(LocalizationEntry(comments, key, localize_value))\n    f.close()\n    write_file_elements_to_strings_file(old_strings_file, output_file_elements)", "docstring": "Merges the old strings file with the new one.\n\nArgs:\nold_strings_file (str): The path to the old strings file (previously produced, and possibly altered)\nnew_strings_file (str): The path to the new strings file (newly produced).", "source": "codesearchnet"}
{"code": "def contains(self, key):\n    path = self.object_path(key)\n    return (os.path.exists(path) and os.path.isfile(path))", "docstring": "Returns whether the object named by `key` exists.\nOptimized to only check whether the file object exists.\n\nArgs:\nkey: Key naming the object to check.\n\nReturns:\nboalean whether the object exists", "source": "codesearchnet"}
{"code": "def scalarize(function):\n\n    def decorated(self, X, *args, **kwargs):\n        scalar = (not isinstance(X, np.ndarray))\n        if scalar:\n            X = np.array([X])\n        result = function(self, X, *args, **kwargs)\n        if scalar:\n            result = result[0]\n        return result\n    decorated.__doc__ = function.__doc__\n    return decorated", "docstring": "Allow methods that only accepts 1-d vectors to work with scalars.\n\nArgs:\nfunction(callable): Function that accepts and returns vectors.\n\nReturns:\ncallable: Decorated function that accepts and returns scalars.", "source": "codesearchnet"}
{"code": "def __init__(self, start, stop, value):\n\t\t\n\t\tself.start = start\n\t\tself.stop = stop\n\t\tself.value = value", "docstring": "Create a mapped range.\n\nArgs:\nstart: The start of the range, inclusive.\nstop: The end of the range, exclusive.\nvalue: The mapped value.", "source": "juraj-google-style"}
{"code": "def learn_transportation_mode(track, clf):\n    for segment in track.segments:\n        tmodes = segment.transportation_modes\n        points = segment.points\n        features = []\n        labels = []\n        for tmode in tmodes:\n            points_part = points[tmode['from']:tmode['to']]\n            if (len(points_part) > 0):\n                features.append(extract_features_2(points_part))\n                labels.append(tmode['label'])\n        clf.learn(features, labels)", "docstring": "Inserts transportation modes of a track into a classifier\n\nArgs:\ntrack (:obj:`Track`)\nclf (:obj:`Classifier`)", "source": "codesearchnet"}
{"code": "def cyclegan_upsample(net, num_outputs, stride, method='conv2d_transpose'):\n    with tf.variable_scope('upconv'):\n        net_shape = tf.shape(net)\n        height = net_shape[1]\n        width = net_shape[2]\n        spatial_pad_1 = np.array([[0, 0], [1, 1], [1, 1], [0, 0]])\n        if (method == 'nn_upsample_conv'):\n            net = tf.image.resize_nearest_neighbor(net, [(stride[0] * height), (stride[1] * width)])\n            net = tf.pad(net, spatial_pad_1, 'REFLECT')\n            net = layers().Conv2D(num_outputs, (3, 3), activation=tf.nn.relu)(net)\n        elif (method == 'bilinear_upsample_conv'):\n            net = tf.image.resize_bilinear(net, [(stride[0] * height), (stride[1] * width)])\n            net = tf.pad(net, spatial_pad_1, 'REFLECT')\n            net = layers().Conv2D(num_outputs, (3, 3), activation=tf.nn.relu)(net)\n        elif (method == 'conv2d_transpose'):\n            net = layers().Conv2DTranspose(num_outputs, (3, 3), strides=stride, activation=tf.nn.relu)(net)\n            net = net[(:, 1:, 1:, :)]\n        else:\n            raise ValueError(('Unknown method: [%s]' % method))\n        return net", "docstring": "Upsamples the given inputs.\n\nArgs:\nnet: A Tensor of size [batch_size, height, width, filters].\nnum_outputs: The number of output filters.\nstride: A list of 2 scalars or a 1x2 Tensor indicating the scale,\nrelative to the inputs, of the output dimensions. For example, if kernel\nsize is [2, 3], then the output height and width will be twice and three\ntimes the input size.\nmethod: The upsampling method: 'nn_upsample_conv',\n'bilinear_upsample_conv', or 'conv2d_transpose'.\n\nReturns:\nA Tensor which was upsampled using the specified method.\n\nRaises:\nValueError: if `method` is not recognized.", "source": "codesearchnet"}
{"code": "def register(self, name):\n        \n        def register_func(func):\n            self.store[name] = func\n            return func\n        return register_func", "docstring": "Decorator for registering a function with PyPhi.\n\nArgs:\nname (string): The name of the function", "source": "juraj-google-style"}
{"code": "def set_approvers(self, approver_ids=[], approver_group_ids=[], **kwargs):\n        \n        path = '%s/%s/approvers' % (self._parent.manager.path,\n                                    self._parent.get_id())\n        data = {'approver_ids': approver_ids,\n                'approver_group_ids': approver_group_ids}\n        self.gitlab.http_put(path, post_data=data, **kwargs)", "docstring": "Change MR-level allowed approvers and approver groups.\n\nArgs:\napprover_ids (list): User IDs that can approve MRs\napprover_group_ids (list): Group IDs whose members can approve MRs\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabUpdateError: If the server failed to perform the request", "source": "juraj-google-style"}
{"code": "def ProcessFileData(filename, file_extension, lines, error, extra_check_functions=None):\n    lines = ((['\n    include_state = _IncludeState()\n    function_state = _FunctionState()\n    nesting_state = NestingState()\n    ResetNolintSuppressions()\n    CheckForCopyright(filename, lines, error)\n    ProcessGlobalSuppresions(lines)\n    RemoveMultiLineComments(filename, lines, error)\n    clean_lines = CleansedLines(lines)\n    if (file_extension in GetHeaderExtensions()):\n        CheckForHeaderGuard(filename, clean_lines, error)\n    for line in range(clean_lines.NumLines()):\n        ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions)\n        FlagCxx11Features(filename, clean_lines, line, error)\n    nesting_state.CheckCompletedBlocks(filename, error)\n    CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)\n    if _IsSourceExtension(file_extension):\n        CheckHeaderFileIncluded(filename, include_state, error)\n    CheckForBadCharacters(filename, lines, error)\n    CheckForNewlineAtEOF(filename, lines, error)", "docstring": "Performs lint checks and reports any errors to the given error function.\n\nArgs:\nfilename: Filename of the file that is being processed.\nfile_extension: The extension (dot not included) of the file.\nlines: An array of strings, each representing a line of the file, with the\nlast element being empty if the file is terminated with a newline.\nerror: A callable to which errors are reported, which takes 4 arguments:\nfilename, line number, error level, and message\nextra_check_functions: An array of additional check functions that will be\nrun on each source line. Each function takes 4\narguments: filename, clean_lines, line, error", "source": "codesearchnet"}
{"code": "def ask_to_proceed_with_overwrite(filepath):\n    overwrite = input(f'[WARNING] {filepath} already exists - overwrite? [y/n]').strip().lower()\n    while overwrite not in ('y', 'n'):\n        overwrite = input('Enter \"y\" (overwrite) or \"n\" (cancel).').strip().lower()\n    if overwrite == 'n':\n        return False\n    print_msg('[TIP] Next time specify overwrite=True!')\n    return True", "docstring": "Produces a prompt asking about overwriting a file.\n\nArgs:\nfilepath: the path to the file to be overwritten.\n\nReturns:\nTrue if we can proceed with overwrite, False otherwise.", "source": "github-repos"}
{"code": "def _get_bit(self, n, hash_bytes):\n    if (((hash_bytes[(n \n        return True\n    return False", "docstring": "Determines if the n-th bit of passed bytes is 1 or 0.\n\nArguments:\n\nhash_bytes - List of hash byte values for which the n-th bit value\nshould be checked. Each element of the list should be an integer from\n0 to 255.\n\nReturns:\n\nTrue if the bit is 1. False if the bit is 0.", "source": "codesearchnet"}
{"code": "def dumps(messages):\n    serialized_messages = []\n    try:\n        for message in messages:\n            message_dict = message._dump()\n            serialized_messages.append(message_dict)\n    except AttributeError:\n        _log.error('Improper object for messages serialization.')\n        raise TypeError('Message have to be instance of Message class or subclass.')\n    return json.dumps(serialized_messages, sort_keys=True)", "docstring": "Serialize messages to a JSON formatted str\n\nArgs:\nmessages (list): The list of messages to serialize. Each message in\nthe messages is subclass of Messge.\n\nReturns:\nstr: Serialized messages.\n\nRaises:\nTypeError: If at least one message is not instance of Message class or subclass.", "source": "codesearchnet"}
{"code": "def gradient(poly):\n    \n    return differential(poly, chaospy.poly.collection.basis(1, 1, poly.dim))", "docstring": "Gradient of a polynomial.\n\nArgs:\npoly (Poly) : polynomial to take gradient of.\n\nReturns:\n(Poly) : The resulting gradient.\n\nExamples:\n>>> q0, q1, q2 = chaospy.variable(3)\n>>> poly = 2*q0 + q1*q2\n>>> print(chaospy.gradient(poly))\n[2, q2, q1]", "source": "juraj-google-style"}
{"code": "def map_seqprop_resnums_to_seqprop_resnums(self, resnums, seqprop1, seqprop2):\n    resnums = ssbio.utils.force_list(resnums)\n    alignment = self._get_seqprop_to_seqprop_alignment(seqprop1=seqprop1, seqprop2=seqprop2)\n    mapped = ssbio.protein.sequence.utils.alignment.map_resnum_a_to_resnum_b(resnums=resnums, a_aln=alignment[0], b_aln=alignment[1])\n    return mapped", "docstring": "Map a residue number in any SeqProp to another SeqProp using the pairwise alignment information.\n\nArgs:\nresnums (int, list): Residue numbers in seqprop1\nseqprop1 (SeqProp): SeqProp object the resnums match to\nseqprop2 (SeqProp): SeqProp object you want to map the resnums to\n\nReturns:\ndict: Mapping of seqprop1 residue numbers to seqprop2 residue numbers. If mappings don't exist in this\ndictionary, that means the residue number cannot be mapped according to alignment!", "source": "codesearchnet"}
{"code": "def add_import(self, from_package, import_list):\n    if from_package:\n        for item in import_list:\n            t = self.module_info.process_from_import(from_package, item)\n            self.type_map[t.new_name] = t.pytd_node\n            if isinstance(item, tuple) or from_package != 'typing' or self.module_info.module_name == 'protocols':\n                self.aliases[t.new_name] = t.pytd_alias()\n                if t.new_name != 'typing':\n                    self.module_path_map[t.new_name] = t.qualified_name\n    else:\n        for item in import_list:\n            t = self.module_info.process_import(item)\n            if t:\n                self.aliases[t.new_name] = t.pytd_alias()", "docstring": "Add an import.\n\nArgs:\nfrom_package: A dotted package name if this is a \"from\" statement, or None\nif it is an \"import\" statement.\nimport_list: A list of imported items, which are either strings or pairs\nof strings.  Pairs are used when items are renamed during import using\n\"as\".", "source": "github-repos"}
{"code": "def configure(access_key=None, secret_key=None, logger=None):\n    \n    if not logger:\n        logger = log.get_logger('s3')\n    if not all([access_key, secret_key]):\n        logger.info('')\n        access_key = input('AWS Access Key: ')\n        secret_key = input('AWS Secret Key: ')\n    _write_config(access_key, secret_key)\n    logger.info('')\n    logger.info('Completed writing S3 config file.')\n    logger.info('')", "docstring": "Configures s3cmd prior to first use.\n\nIf no arguments are provided, you will be prompted to enter\nthe access key and secret key interactively.\n\nArgs:\n\naccess_key (str): AWS access key\n\nsecret_key (str): AWS secret key", "source": "juraj-google-style"}
{"code": "def _debug_run_and_get_dump(self, sess, fetches, feed_dict=None, debug_ops='DebugIdentity', tolerate_debug_op_creation_failures=False, global_step=-1, validate=True, expected_partition_graph_count=None):\n    run_options = config_pb2.RunOptions(output_partition_graphs=True)\n    debug_utils.watch_graph(run_options, sess.graph, debug_ops=debug_ops, debug_urls=self._debug_urls(), tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures, global_step=global_step)\n    run_metadata = config_pb2.RunMetadata()\n    run_output = sess.run(fetches, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata)\n    if expected_partition_graph_count is not None:\n        self.assertEqual(expected_partition_graph_count, len(run_metadata.partition_graphs))\n    return (run_output, debug_data.DebugDumpDir(self._dump_root, partition_graphs=run_metadata.partition_graphs, validate=validate))", "docstring": "Run fetches with debugging and obtain DebugDumpDir.\n\nArgs:\nsess: the tf.compat.v1.Session to be used.\nfetches: fetches of the Session.run().\nfeed_dict: feed dict for the Session.run().\ndebug_ops: name(s) of the debug ops to be used.\ntolerate_debug_op_creation_failures: whether to tolerate debug op\ncreation failures.\nglobal_step: Optional global step.\nvalidate: whether to validate dumped tensors against graph.\nexpected_partition_graph_count: optional count of partition graphs to\nassert on.\n\nReturns:\n1. Return values of the Session.run().\n2. The DebugDumpDir object from the debugged run().", "source": "github-repos"}
{"code": "def verify(self, verify_locations: str) -> None:\n    with open(verify_locations):\n        pass\n    try:\n        self._ocsp_response.basic_verify(verify_locations)\n    except _nassl.OpenSSLError as e:\n        if ('certificate verify error' in str(e)):\n            raise OcspResponseNotTrustedError(verify_locations)\n        raise", "docstring": "Verify that the OCSP response is trusted.\n\nArgs:\nverify_locations: The file path to a trust store containing pem-formatted certificates, to be used for\nvalidating the OCSP response.\n\nRaises OcspResponseNotTrustedError if the validation failed ie. the OCSP response is not trusted.", "source": "codesearchnet"}
{"code": "def _generate_multielement_entries(self, entries, forced_include=None, nproc=None):\n    N = len(self._elt_comp)\n    total_comp = Composition(self._elt_comp)\n    forced_include = (forced_include or [])\n    entry_combos = [itertools.combinations(entries, ((j + 1) - len(forced_include))) for j in range(N)]\n    entry_combos = itertools.chain.from_iterable(entry_combos)\n    if forced_include:\n        entry_combos = [(forced_include + list(ec)) for ec in entry_combos]\n    entry_combos = filter((lambda x: (total_comp < MultiEntry(x).composition)), entry_combos)\n    processed_entries = []\n    total = sum([comb(len(entries), ((j + 1) - len(forced_include))) for j in range(N)])\n    if (total > 1000000.0):\n        warnings.warn('Your pourbaix diagram includes {} entries and may take a long time to generate.'.format(total))\n    if (nproc is not None):\n        f = partial(self.process_multientry, prod_comp=total_comp)\n        with Pool(nproc) as p:\n            processed_entries = list(tqdm(p.imap(f, entry_combos), total=total))\n        processed_entries = list(filter(bool, processed_entries))\n    else:\n        for entry_combo in entry_combos:\n            processed_entry = self.process_multientry(entry_combo, total_comp)\n            if (processed_entry is not None):\n                processed_entries.append(processed_entry)\n    return processed_entries", "docstring": "Create entries for multi-element Pourbaix construction.\n\nThis works by finding all possible linear combinations\nof entries that can result in the specified composition\nfrom the initialized comp_dict.\n\nArgs:\nentries ([PourbaixEntries]): list of pourbaix entries\nto process into MultiEntries\nforced_include ([PourbaixEntries]) list of pourbaix entries\nthat must be included in multielement entries\nnproc (int): number of processes to be used in parallel\ntreatment of entry combos", "source": "codesearchnet"}
{"code": "def _compute_elemwise_op_output_shape(self, shape1, shape2):\n    if None in [shape1, shape2]:\n        return None\n    elif len(shape1) < len(shape2):\n        return self._compute_elemwise_op_output_shape(shape2, shape1)\n    elif not shape2:\n        return shape1\n    output_shape = list(shape1[:-len(shape2)])\n    for i, j in zip(shape1[-len(shape2):], shape2):\n        if i is None or j is None:\n            output_shape.append(None)\n        elif i == 1:\n            output_shape.append(j)\n        elif j == 1:\n            output_shape.append(i)\n        else:\n            if i != j:\n                raise ValueError(f'Inputs have incompatible shapes. Received shapes {shape1} and {shape2}')\n            output_shape.append(i)\n    return tuple(output_shape)", "docstring": "Computes the shape of the resultant of an elementwise operation.\n\nArgs:\nshape1: Tuple or None. Shape of the first tensor\nshape2: Tuple or None. Shape of the second tensor\n\nReturns:\nExpected output shape when an element-wise operation is\ncarried out on 2 tensors with shapes shape1 and shape2.\ntuple or None.\n\nRaises:\nValueError: If shape1 and shape2 are not compatible for\nelement-wise operations.", "source": "github-repos"}
{"code": "def set_shard_dimension(self, shard_dimension):\n    if self._frozen:\n        if self._shard_dimension != shard_dimension:\n            raise ValueError(\"Can't set shard dimension to %d since it has been frozen to use %d.\" % (shard_dimension, self._shard_dimension))\n    else:\n        self._shard_dimension = tensor_shape.as_dimension(shard_dimension)", "docstring": "Sets the shard dimension for the current policy.\n\nIf the policy has been frozen then shard_dimension must match the\nexisting setting.\n\nArgs:\nshard_dimension: The shard dimension to use in the policy.\n\nRaises:\nValueError: If the policy has been frozen and shard_dimension\ndiffers from the frozen value, or shard_dimension can't be\ninterpreted as a Dimension.", "source": "github-repos"}
{"code": "def _parse_expiry(response_data):\n    expires_in = response_data.get('expires_in', None)\n    if (expires_in is not None):\n        return (_helpers.utcnow() + datetime.timedelta(seconds=expires_in))\n    else:\n        return None", "docstring": "Parses the expiry field from a response into a datetime.\n\nArgs:\nresponse_data (Mapping): The JSON-parsed response data.\n\nReturns:\nOptional[datetime]: The expiration or ``None`` if no expiration was\nspecified.", "source": "codesearchnet"}
{"code": "def fit(self, documents, labels, weights=None):\n        \n        block_groups = np.array([self.blockifier.blockify(doc) for doc in documents])\n        mask = [self._has_enough_blocks(blocks) for blocks in block_groups]\n        block_groups = block_groups[mask]\n        labels = np.concatenate(np.array(labels)[mask])\n\n        \n        \n        features_mat = np.concatenate([self.features.fit_transform(blocks)\n                                       for blocks in block_groups])\n        if weights is None:\n            self.model.fit(features_mat, labels)\n        else:\n            weights = np.concatenate(np.array(weights)[mask])\n            self.model.fit(features_mat, labels, sample_weight=weights)\n        return self", "docstring": "Fit :class`Extractor` features and model to a training dataset.\n\nArgs:\nblocks (List[Block])\nlabels (``np.ndarray``)\nweights (``np.ndarray``)\n\nReturns:\n:class`Extractor`", "source": "juraj-google-style"}
{"code": "def address(self, num):\n        \n        url_root = \"company/{}/registered-office-address\"\n        baseuri = self._BASE_URI + url_root.format(num)\n        res = self.session.get(baseuri)\n        self.handle_http_error(res)\n        return res", "docstring": "Search for company addresses by company number.\n\nArgs:\nnum (str): Company number to search on.", "source": "juraj-google-style"}
{"code": "def _SymbolStackEndsWith(self, parser_symbol_stack, stack_pattern):\n    parser_symbol_stack_str = ' '.join((s.type for s in parser_symbol_stack))\n    stack_pattern_str = ' '.join(stack_pattern)\n    return parser_symbol_stack_str.endswith(stack_pattern_str)", "docstring": "Determines if |stack| matches against |symbol_stack|.\n\nArgs:\nsymbol_stack: The symbol stack from parser.symstack left on th parser\nwhen an error was generarted.\nstack: A list of strings to match against the token 'type' in\n|symbol_stack|. (e.g. ['TRANSITION', 'NAME', 'params', '=']", "source": "github-repos"}
{"code": "def _as_row_partitions(self):\n    rank = self.rank\n    if rank is None:\n        raise ValueError('rank must be known for _as_row_partitions')\n    elif rank < 1:\n        raise ValueError('rank must be >= 1 for _as_row_partitions')\n    fully_ragged = self._with_num_row_partitions(rank - 1)\n    return fully_ragged.row_partitions", "docstring": "Returns row partitions representing this shape.\n\nIn order to represent a shape as row partitions, the rank of the shape\nmust be known, and the shape must have rank at least one.\n\nReturns:\nA list of RowPartition objects.\nRaises:\nValueError, if the shape cannot be represented by RowPartitions.", "source": "github-repos"}
{"code": "def AddMapping(self, filename, new_mapping):\n    for field in self._REQUIRED_MAPPING_FIELDS:\n        if (field not in new_mapping):\n            raise problems.InvalidMapping(field)\n    if (filename in self.GetKnownFilenames()):\n        raise problems.DuplicateMapping(filename)\n    self._file_mapping[filename] = new_mapping", "docstring": "Adds an entry to the list of known filenames.\n\nArgs:\nfilename: The filename whose mapping is being added.\nnew_mapping: A dictionary with the mapping to add. Must contain all\nfields in _REQUIRED_MAPPING_FIELDS.\nRaises:\nDuplicateMapping if the filename already exists in the mapping\nInvalidMapping if not all required fields are present", "source": "codesearchnet"}
{"code": "def _compute_edge_nodes(nodes, degree):\n    \n    dimension, _ = np.shape(nodes)\n    nodes1 = np.empty((dimension, degree + 1), order=\"F\")\n    nodes2 = np.empty((dimension, degree + 1), order=\"F\")\n    nodes3 = np.empty((dimension, degree + 1), order=\"F\")\n    curr2 = degree\n    curr3 = -1\n    for i in six.moves.xrange(degree + 1):\n        nodes1[:, i] = nodes[:, i]\n        nodes2[:, i] = nodes[:, curr2]\n        nodes3[:, i] = nodes[:, curr3]\n        \n        curr2 += degree - i\n        curr3 -= i + 2\n    return nodes1, nodes2, nodes3", "docstring": "Compute the nodes of each edges of a surface.\n\n.. note::\n\nThere is also a Fortran implementation of this function, which\nwill be used if it can be built.\n\nArgs:\nnodes (numpy.ndarray): Control point nodes that define the surface.\ndegree (int): The degree of the surface define by ``nodes``.\n\nReturns:\nTuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]: The nodes in\nthe edges of the surface.", "source": "juraj-google-style"}
{"code": "def read_tree_nexus(nexus):\n    if (not isinstance(nexus, str)):\n        raise TypeError('nexus must be a str')\n    if nexus.lower().endswith('.gz'):\n        f = gopen(expanduser(nexus))\n    elif isfile(expanduser(nexus)):\n        f = open(expanduser(nexus))\n    else:\n        f = nexus.splitlines()\n    trees = dict()\n    for line in f:\n        if isinstance(line, bytes):\n            l = line.decode().strip()\n        else:\n            l = line.strip()\n        if l.lower().startswith('tree '):\n            i = l.index('=')\n            left = l[:i].strip()\n            right = l[(i + 1):].strip()\n            name = ' '.join(left.split(' ')[1:])\n            trees[name] = read_tree_newick(right)\n    if hasattr(f, 'close'):\n        f.close()\n    return trees", "docstring": "Read a tree from a Nexus string or file\n\nArgs:\n``nexus`` (``str``): Either a Nexus string or the path to a Nexus file (plain-text or gzipped)\n\nReturns:\n``dict`` of ``Tree``: A dictionary of the trees represented by ``nexus``, where keys are tree names (``str``) and values are ``Tree`` objects", "source": "codesearchnet"}
{"code": "def chempot_plot_addons(self, plt, xrange, ref_el, axes, pad=2.4,\n                            rect=[-0.047, 0, 0.84, 1], ylim=[]):\n\n        \n\n        \n        plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)\n        axes.set_xlabel(r\"Chemical potential $\\Delta\\mu_{%s}$ (eV)\" % (ref_el))\n\n        ylim = ylim if ylim else axes.get_ylim()\n        plt.xticks(rotation=60)\n        plt.ylim(ylim)\n        xlim = axes.get_xlim()\n        plt.xlim(xlim)\n        plt.tight_layout(pad=pad, rect=rect)\n        plt.plot([xrange[0], xrange[0]], ylim, '--k')\n        plt.plot([xrange[1], xrange[1]], ylim, '--k')\n        xy = [np.mean([xrange[1]]), np.mean(ylim)]\n        plt.annotate(\"%s-rich\" % (ref_el), xy=xy,\n                     xytext=xy, rotation=90, fontsize=17)\n        xy = [np.mean([xlim[0]]), np.mean(ylim)]\n        plt.annotate(\"%s-poor\" % (ref_el), xy=xy,\n                     xytext=xy, rotation=90, fontsize=17)\n\n        return plt", "docstring": "Helper function to a chempot plot look nicer.\n\nArgs:\nplt (Plot) Plot to add things to.\nxrange (list): xlim parameter\nref_el (str): Element of the referenced chempot.\naxes(axes) Axes object from matplotlib\npad (float) For tight layout\nrect (list): For tight layout\nylim (ylim parameter):\n\nreturn (Plot): Modified plot with addons.\nreturn (Plot): Modified plot with addons.", "source": "juraj-google-style"}
{"code": "def update_configuration(self, configuration):\n        \n        return self._client.update(configuration, uri=self.URI + \"/configuration\")", "docstring": "Updates the metrics configuration with the new values. Overwrites the existing configuration.\n\nArgs:\nconfiguration (dict):\nDictionary with a list of objects which contain frequency, sample interval, and source type for each\nresource-type.\n\nReturns:\ndict: The current configuration for which metrics are being relayed.", "source": "juraj-google-style"}
{"code": "def notify_txn_invalid(self, txn_id, message=None, extended_data=None):\n        \n        invalid_txn_info = {'id': txn_id}\n        if message is not None:\n            invalid_txn_info['message'] = message\n        if extended_data is not None:\n            invalid_txn_info['extended_data'] = extended_data\n\n        with self._lock:\n            for batch_id, txn_ids in self._batch_info.items():\n                if txn_id in txn_ids:\n                    if batch_id not in self._invalid:\n                        self._invalid[batch_id] = [invalid_txn_info]\n                    else:\n                        self._invalid[batch_id].append(invalid_txn_info)\n                    self._pending.discard(batch_id)\n                    self._update_observers(batch_id, ClientBatchStatus.INVALID)\n                    return", "docstring": "Adds a batch id to the invalid cache along with the id of the\ntransaction that was rejected and any error message or extended data.\nRemoves that batch id from the pending set. The cache is only\ntemporary, and the batch info will be purged after one hour.\n\nArgs:\ntxn_id (str): The id of the invalid batch\nmessage (str, optional): Message explaining why batch is invalid\nextended_data (bytes, optional): Additional error data", "source": "juraj-google-style"}
{"code": "def _group_centroid(mol, ilabels, group_atoms):\n    (c1x, c1y, c1z) = (0.0, 0.0, 0.0)\n    for i in group_atoms:\n        orig_idx = ilabels[(i - 1)]\n        oa1 = mol.GetAtom(orig_idx)\n        c1x += float(oa1.x())\n        c1y += float(oa1.y())\n        c1z += float(oa1.z())\n    num_atoms = len(group_atoms)\n    c1x /= num_atoms\n    c1y /= num_atoms\n    c1z /= num_atoms\n    return (c1x, c1y, c1z)", "docstring": "Calculate the centroids of a group atoms indexed by the labels of inchi\n\nArgs:\nmol: The molecule. OpenBabel OBMol object\nilabel: inchi label map\n\nReturns:\nCentroid. Tuple (x, y, z)", "source": "codesearchnet"}
{"code": "def read(self, bands=None, **kwargs):\n    arr = self\n    if (bands is not None):\n        arr = self[(bands, ...)]\n    return arr.compute(scheduler=threaded_get)", "docstring": "Reads data from a dask array and returns the computed ndarray matching the given bands\n\nArgs:\nbands (list): band indices to read from the image. Returns bands in the order specified in the list of bands.\n\nReturns:\nndarray: a numpy array of image data", "source": "codesearchnet"}
{"code": "def slice_constant(data, batch_size=32, name='constant_data', global_step=None):\n  \n  with tf.name_scope(name):\n    all_data = tf.convert_to_tensor(data)\n    global_step = global_step or bookkeeper.global_step()\n\n    count = len(data) / batch_size\n    extra = len(data) - count * batch_size\n\n    if extra:\n      offset = tf.mod(global_step, count)\n      return tf.slice(all_data, offset * batch_size, batch_size)\n    else:\n      offset = tf.mod(global_step, count + 1)\n      return tf.slice(all_data, offset * batch_size,\n                      tf.where(tf.equal(offset, count), extra, batch_size))", "docstring": "Provide a slice based on the global_step.\n\nThis is useful when the entire data array can be stored in memory because it\nallows you to feed the data very efficiently.\n\nArgs:\ndata: A numpy array or tensor.\nbatch_size: The batch size for the produced data.\nname: An optional name for this data.\nglobal_step: A global step variable that is used to read the data. If None\nthen the default prettytensor global_step is used.\nReturns:\nA tensor that produces the given data.", "source": "juraj-google-style"}
{"code": "def get(self, identifier, default=None):\n    if isinstance(identifier, int):\n        values = list(self.data.values())\n        if (0 <= identifier < len(values)):\n            return values[identifier]\n        else:\n            return default\n    return super(Overlay, self).get(identifier, default)", "docstring": "Get a layer in the Overlay.\n\nGet a particular layer in the Overlay using its path string\nor an integer index.\n\nArgs:\nidentifier: Index or path string of the item to return\ndefault: Value to return if no item is found\n\nReturns:\nThe indexed layer of the Overlay", "source": "codesearchnet"}
{"code": "def parallel(devices, fn, *args, **kwargs):\n  \n  if not isinstance(devices, list):\n    raise ValueError(\"devices must be a list\")\n  for x in list(args) + list(six.itervalues(kwargs)):\n    if not isinstance(x, list) or len(x) != len(devices):\n      raise ValueError(\n          \"Argument not a list with same length as devices \"\n          \"arg=%s devices=%s\" % (x, devices))\n  ret = []\n  for i, device in enumerate(devices):\n    with tf.device(device):\n      with tf.variable_scope(\"parallel_%d\" % i):\n        my_args = [x[i] for x in args]\n        my_kwargs = {k: v[i] for k, v in six.iteritems(kwargs)}\n        ret.append(fn(*my_args, **my_kwargs))\n  return ret", "docstring": "Call a function once on each device.\n\nArgs:\ndevices: a list of n devices\nfn: a function\n*args: arguments, each of which is a list of length n\n**kwargs: keyword-args, each of which is a list of length n\nReturns:\na list of length n\nRaises:\nValueError: if the arguments are not all lists of length n", "source": "juraj-google-style"}
{"code": "def _ConvertToTimestamp(self, date, time):\n    if (len(date) != 8):\n        raise ValueError('Unsupported length of date string: {0!s}'.format(repr(date)))\n    if ((len(time) < 3) or (len(time) > 4)):\n        raise ValueError('Unsupported length of time string: {0!s}'.format(repr(time)))\n    try:\n        year = int(date[:4], 10)\n        month = int(date[4:6], 10)\n        day = int(date[6:8], 10)\n    except (TypeError, ValueError):\n        raise ValueError('Unable to parse date string: {0!s}'.format(repr(date)))\n    try:\n        hour = int(time[:(- 2)], 10)\n        minutes = int(time[(- 2):], 10)\n    except (TypeError, ValueError):\n        raise ValueError('Unable to parse time string: {0!s}'.format(repr(date)))\n    time_elements_tuple = (year, month, day, hour, minutes, 0)\n    date_time = dfdatetime_time_elements.TimeElements(time_elements_tuple=time_elements_tuple)\n    date_time.is_local_time = True\n    date_time._precision = dfdatetime_definitions.PRECISION_1_MINUTE\n    return date_time", "docstring": "Converts date and time strings into a timestamp.\n\nRecent versions of Office Scan write a log field with a Unix timestamp.\nOlder versions may not write this field; their logs only provide a date and\na time expressed in the local time zone. This functions handles the latter\ncase.\n\nArgs:\ndate (str): date as an 8-character string in the YYYYMMDD format.\ntime (str): time as a 3 or 4-character string in the [H]HMM format or a\n6-character string in the HHMMSS format.\n\nReturns:\ndfdatetime_time_elements.TimestampElements: the parsed timestamp.\n\nRaises:\nValueError: if the date and time values cannot be parsed.", "source": "codesearchnet"}
{"code": "def from_yang(self, text: str) -> ScalarValue:\n        \n        res = self.parse_value(text)\n        if res is None:\n            raise InvalidArgument(text)\n        return res", "docstring": "Parse value specified in a YANG module.\n\nArgs:\ntext: String representation of the value.\n\nRaises:\nInvalidArgument: If the receiver type cannot parse the text.", "source": "juraj-google-style"}
{"code": "def set_name(self, vid, name=None, default=False, disable=False):\n    cmds = self.command_builder('name', value=name, default=default, disable=disable)\n    return self.configure_vlan(vid, cmds)", "docstring": "Configures the VLAN name\n\nEosVersion:\n4.13.7M\n\nArgs:\nvid (str): The VLAN ID to Configures\nname (str): The value to configure the vlan name\ndefault (bool): Defaults the VLAN ID name\ndisable (bool): Negates the VLAN ID name\n\nReturns:\nTrue if the operation was successful otherwise False", "source": "codesearchnet"}
{"code": "def join(self, delimiter=' ', overlap_threshold=0.1):\n    sorted_by_start = sorted(self.labels)\n    concat_values = []\n    last_label_end = None\n    for label in sorted_by_start:\n        if ((last_label_end is None) or (((last_label_end - label.start) < overlap_threshold) and (last_label_end > 0))):\n            concat_values.append(label.value)\n            last_label_end = label.end\n        else:\n            raise ValueError('Labels overlap, not able to define the correct order')\n    return delimiter.join(concat_values)", "docstring": "Return a string with all labels concatenated together.\nThe order of the labels is defined by the start of the label.\nIf the overlapping between two labels is greater than ``overlap_threshold``,\nan Exception is thrown.\n\nArgs:\ndelimiter (str): A string to join two consecutive labels.\noverlap_threshold (float): Maximum overlap between two consecutive labels.\n\nReturns:\nstr: A string with all labels concatenated together.\n\nExample:\n>>> ll = LabelList(idx='some', labels=[\n>>>     Label('a', start=0, end=4),\n>>>     Label('b', start=3.95, end=6.0),\n>>>     Label('c', start=7.0, end=10.2),\n>>>     Label('d', start=10.3, end=14.0)\n>>> ])\n>>> ll.join(' - ')\n'a - b - c - d'", "source": "codesearchnet"}
{"code": "def get_tick(self, index):\n    name = self.tick_name(index)\n    if (name is None):\n        return [pack_error(ControllerSubsystem.SENSOR_GRAPH, Error.INVALID_ARRAY_KEY), 0]\n    return [Error.NO_ERROR, self.ticks[name]]", "docstring": "Get a tick's interval.\n\nArgs:\nindex (int): The index of the tick that you want to fetch.\n\nReturns:\nint, int: Error code and The tick's interval in seconds.\n\nA value of 0 means that the tick is disabled.", "source": "codesearchnet"}
{"code": "def _Enum(docstring, *names):\n  \n  enums = dict(zip(names, range(len(names))))\n  reverse = dict((value, key) for key, value in enums.iteritems())\n  enums['reverse_mapping'] = reverse\n  enums['__doc__'] = docstring\n  return type('Enum', (object,), enums)", "docstring": "Utility to generate enum classes used by annotations.\n\nArgs:\ndocstring: Docstring for the generated enum class.\n*names: Enum names.\n\nReturns:\nA class that contains enum names as attributes.", "source": "juraj-google-style"}
{"code": "def adjust_saturation(img, saturation_factor):\n    if (not _is_pil_image(img)):\n        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n    enhancer = ImageEnhance.Color(img)\n    img = enhancer.enhance(saturation_factor)\n    return img", "docstring": "Adjust color saturation of an image.\n\nArgs:\nimg (PIL Image): PIL Image to be adjusted.\nsaturation_factor (float):  How much to adjust the saturation. 0 will\ngive a black and white image, 1 will give the original image while\n2 will enhance the saturation by a factor of 2.\n\nReturns:\nPIL Image: Saturation adjusted image.", "source": "codesearchnet"}
{"code": "def parse_config(file_path):\n    \n    if not os.path.isfile(file_path):\n        return {}\n    parser = ConfigParser()\n    parser.read(file_path)\n    \n    for s in parser._sections:\n        for v in six.iterkeys(parser._sections[s]):\n            parser._sections[s][v] = parser._sections[s][v].split(\"\n    return parser._sections", "docstring": "Convert the CISM configuration file to a python dictionary\n\nArgs:\nfile_path: absolute path to the configuration file\n\nReturns:\nA dictionary representation of the given file", "source": "juraj-google-style"}
{"code": "def managed(name, table, data, record=None):\n    \n    ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}\n    if record is None:\n        record = name\n    current_data = {\n        column: __salt__['openvswitch.db_get'](table, record, column)\n        for column in data\n    }\n\n    \n    comment_changes = 'Columns have been updated.'\n    comment_no_changes = 'All columns are already up to date.'\n    comment_error = 'Error while updating column {0}: {1}'\n\n    \n    if __opts__['test']:\n        for column in data:\n            if data[column] != current_data[column]:\n                ret['changes'][column] = {'old': current_data[column],\n                                          'new': data[column]}\n        if ret['changes']:\n            ret['result'] = None\n            ret['comment'] = comment_changes\n        else:\n            ret['result'] = True\n            ret['comment'] = comment_no_changes\n        return ret\n\n    for column in data:\n        if data[column] != current_data[column]:\n            result = __salt__['openvswitch.db_set'](table, record, column,\n                                                    data[column])\n            if result is not None:\n                ret['comment'] = comment_error.format(column, result)\n                ret['result'] = False\n                return ret\n            ret['changes'][column] = {'old': current_data[column],\n                                      'new': data[column]}\n    ret['result'] = True\n    ret['comment'] = comment_no_changes\n    return ret", "docstring": "Ensures that the specified columns of the named record have the specified\nvalues.\n\nArgs:\nname: The name of the record.\ntable: The name of the table to which the record belongs.\ndata: Dictionary containing a mapping from column names to the desired\nvalues. Columns that exist, but are not specified in this\ndictionary are not touched.\nrecord: The name of the record (optional). Replaces name if specified.", "source": "juraj-google-style"}
{"code": "def device(self, idx):\n        \n\n        class GpuDevice(Structure):\n            pass\n\n        c_nvmlDevice_t = POINTER(GpuDevice)\n\n        c_index = c_uint(idx)\n        device = c_nvmlDevice_t()\n        _check_return(_NVML.get_function(\n            \"nvmlDeviceGetHandleByIndex_v2\")(c_index, byref(device)))\n        return NvidiaDevice(device)", "docstring": "Get a specific GPU device\n\nArgs:\nidx: index of device\n\nReturns:\nNvidiaDevice: single GPU device", "source": "juraj-google-style"}
{"code": "def get_backdoor(self, name, version=''):\n        \n        params = {}\n        params['or'] = 1\n        params['c-name'] = name\n        params['c-aliases__in'] = name\n        r = requests.get('{0}/backdoors/'.format(self.url),\n                         params=params,\n                         verify=self.verify,\n                         proxies=self.proxies)\n        if r.status_code == 200:\n            result_data = json.loads(r.text)\n            if 'meta' not in result_data:\n                return None\n            if 'total_count' not in result_data['meta']:\n                return None\n            if result_data['meta']['total_count'] <= 0:\n                return None\n            if 'objects' not in result_data:\n                return None\n            for backdoor in result_data['objects']:\n                if 'version' in backdoor:\n                    if backdoor['version'] == version:\n                        return backdoor\n        else:\n            log.error('Non-200 status code: {}'.format(r.status_code))\n        return None", "docstring": "Searches for the backdoor based on name and version.\n\nArgs:\nname: The name of the backdoor. This can be an alias.\nversion: The version.\nReturns:\nReturns a JSON object contain one or more backdoor results or\nNone if not found.", "source": "juraj-google-style"}
{"code": "def _RecAnnotate(tree, annotate_name, annotate_value):\n    for child in tree.children:\n        _RecAnnotate(child, annotate_name, annotate_value)\n    if isinstance(tree, pytree.Leaf):\n        cur_annotate = pytree_utils.GetNodeAnnotation(tree, annotate_name, default=0)\n        if cur_annotate < annotate_value:\n            pytree_utils.SetNodeAnnotation(tree, annotate_name, annotate_value)", "docstring": "Recursively set the given annotation on all leafs of the subtree.\n\nTakes care to only increase the penalty. If the node already has a higher\nor equal penalty associated with it, this is a no-op.\n\nArgs:\ntree: subtree to annotate\nannotate_name: name of the annotation to set\nannotate_value: value of the annotation to set", "source": "github-repos"}
{"code": "def __init__(self, fn, buffer_size=_DEFAULT_BUFFER_SIZE):\n    if not callable(fn):\n        raise TypeError('Expected a callable object instead of: %r' % fn)\n    super().__init__()\n    self._fn = fn\n    self._buffer_size = buffer_size", "docstring": "Initializes a CallableFn object wrapping a callable.\n\nArgs:\nfn: A callable object that reduces elements of an iterable to a single\nvalue (like the builtins sum and max). This callable must be capable of\nreceiving the kind of values it generates as output in its input, and\nfor best results, its operation must be commutative and associative.\n\nRaises:\nTypeError: if fn parameter is not a callable type.", "source": "github-repos"}
{"code": "def debug_string(self, indent: int=0) -> str:\n    self_repr = f'{'| ' * indent}{self.__class__.__name__}<{repr(self)}>'\n    if self._children:\n        child_repr = '\\n'.join((child.debug_string(indent=indent + 1) for child in self._children))\n        self_repr = f'{self_repr}\\n{child_repr}'\n    return self_repr", "docstring": "Returns a debug string for the tree rooted at this node.\n\nArgs:\nindent: The level of indentation to begin printing the tree.\n\nReturns:\nA string representing this node and its descendant nodes.", "source": "github-repos"}
{"code": "def logical_not(x):\n    if any_symbolic_tensors((x,)):\n        return LogicalNot().symbolic_call(x)\n    return backend.numpy.logical_not(x)", "docstring": "Computes the element-wise NOT of the given input tensor.\n\nZeros are treated as `False` and non-zeros are treated as `True`.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput tensor, element-wise logical NOT of the input.", "source": "github-repos"}
{"code": "def build_graph(device, input_shape, perm, datatype, num_iters):\n    with ops.device('/%s:0' % device):\n        total_size = np.prod(input_shape)\n        inp = np.arange(1, total_size + 1, dtype=datatype).reshape(input_shape)\n        t = constant_op.constant(inp, shape=input_shape)\n        outputs = []\n        transpose_op = array_ops.transpose(t, perm)\n        outputs.append(transpose_op)\n        for _ in range(1, num_iters):\n            with ops.control_dependencies([transpose_op]):\n                transpose_op = array_ops.transpose(t, perm)\n                outputs.append(transpose_op)\n        return control_flow_ops.group(*outputs)", "docstring": "builds a graph containing a sequence of conv2d operations.\n\nArgs:\ndevice: String, the device to run on.\ninput_shape: Shape of the input tensor.\nperm: A list of ints with the same length as input tensor's dimension.\ndatatype: numpy data type of the input tensor.\nnum_iters: number of iterations to run transpose.\n\nReturns:\nAn array of tensors to run()", "source": "github-repos"}
{"code": "def _compile_ast_node_to_ir(schema, current_schema_type, ast, location, context):\n    basic_blocks = []\n    local_unique_directives = get_unique_directives(ast)\n    fields = _get_fields(ast)\n    (vertex_fields, property_fields) = fields\n    fragment = _get_inline_fragment(ast)\n    filter_operations = get_local_filter_directives(ast, current_schema_type, vertex_fields)\n    fragment_exists = (fragment is not None)\n    fields_exist = (vertex_fields or property_fields)\n    if (fragment_exists and fields_exist):\n        raise GraphQLCompilationError(u'Cannot compile GraphQL that has inline fragment and selected fields in the same selection. Please move the selected fields inside the inline fragment.')\n    if (location.field is not None):\n        if fragment_exists:\n            raise AssertionError(u'Found inline fragment at a property field: {} {}'.format(location, fragment))\n        if (len(property_fields) > 0):\n            raise AssertionError(u'Found property fields on a property field: {} {}'.format(location, property_fields))\n    for filter_operation_info in filter_operations:\n        filter_block = process_filter_directive(filter_operation_info, location, context)\n        if (isinstance(location, FoldScopeLocation) and (location.field == COUNT_META_FIELD_NAME)):\n            set_fold_innermost_scope(context)\n            expected_field = expressions.LocalField(COUNT_META_FIELD_NAME)\n            replacement_field = expressions.FoldedContextField(location, GraphQLInt)\n            visitor_fn = expressions.make_replacement_visitor(expected_field, replacement_field)\n            filter_block = filter_block.visit_and_update_expressions(visitor_fn)\n            visitor_fn = expressions.make_type_replacement_visitor(expressions.ContextField, (lambda context_field: expressions.GlobalContextField(context_field.location, context_field.field_type)))\n            filter_block = filter_block.visit_and_update_expressions(visitor_fn)\n            set_fold_count_filter(context)\n            context['global_filters'].append(filter_block)\n        else:\n            basic_blocks.append(filter_block)\n    if (location.field is not None):\n        _compile_property_ast(schema, current_schema_type, ast, location, context, local_unique_directives)\n    elif fragment_exists:\n        basic_blocks.extend(_compile_fragment_ast(schema, current_schema_type, fragment, location, context))\n    else:\n        basic_blocks.extend(_compile_vertex_ast(schema, current_schema_type, ast, location, context, local_unique_directives, fields))\n    return basic_blocks", "docstring": "Compile the given GraphQL AST node into a list of basic blocks.\n\nArgs:\nschema: GraphQL schema object, obtained from the graphql library\ncurrent_schema_type: GraphQLType, the schema type at the current location\nast: the current GraphQL AST node, obtained from the graphql library\nlocation: Location object representing the current location in the query\ncontext: dict, various per-compilation data (e.g. declared tags, whether the current block\nis optional, etc.). May be mutated in-place in this function!\n\nReturns:\nlist of basic blocks corresponding to this GraphQL AST node", "source": "codesearchnet"}
{"code": "def clean_dataset_tags(self):\n    (tags_dict, wildcard_tags) = Tags.tagscleanupdicts()\n\n    def delete_tag(tag):\n        logger.info(('%s - Deleting tag %s!' % (self.data['name'], tag)))\n        return (self.remove_tag(tag), False)\n\n    def update_tag(tag, final_tags, wording, remove_existing=True):\n        text = ('%s - %s: %s -> ' % (self.data['name'], wording, tag))\n        if (not final_tags):\n            logger.error(('%snothing!' % text))\n            return (False, True)\n        tags_lower_five = final_tags[:5].lower()\n        if ((tags_lower_five == 'merge') or (tags_lower_five == 'split') or ((';' not in final_tags) and (len(final_tags) > 50))):\n            logger.error(('%s%s - Invalid final tag!' % (text, final_tags)))\n            return (False, True)\n        if remove_existing:\n            self.remove_tag(tag)\n        tags = ', '.join(self.get_tags())\n        if self.add_tags(final_tags.split(';')):\n            logger.info(('%s%s! Dataset tags: %s' % (text, final_tags, tags)))\n        else:\n            logger.warning(('%s%s - At least one of the tags already exists! Dataset tags: %s' % (text, final_tags, tags)))\n        return (True, False)\n\n    def do_action(tag, tags_dict_key):\n        whattodo = tags_dict[tags_dict_key]\n        action = whattodo[u'action']\n        final_tags = whattodo[u'final tags (semicolon separated)']\n        if (action == u'Delete'):\n            (changed, error) = delete_tag(tag)\n        elif (action == u'Merge'):\n            (changed, error) = update_tag(tag, final_tags, 'Merging')\n        elif (action == u'Fix spelling'):\n            (changed, error) = update_tag(tag, final_tags, 'Fixing spelling')\n        elif (action == u'Non English'):\n            (changed, error) = update_tag(tag, final_tags, 'Anglicising', remove_existing=False)\n        else:\n            changed = False\n            error = False\n        return (changed, error)\n\n    def process_tag(tag):\n        changed = False\n        error = False\n        if (tag in tags_dict.keys()):\n            (changed, error) = do_action(tag, tag)\n        else:\n            for wildcard_tag in wildcard_tags:\n                if fnmatch.fnmatch(tag, wildcard_tag):\n                    (changed, error) = do_action(tag, wildcard_tag)\n                    break\n        return (changed, error)\n    anychange = False\n    anyerror = False\n    for tag in self.get_tags():\n        (changed, error) = process_tag(tag)\n        if changed:\n            anychange = True\n        if error:\n            anyerror = True\n    return (anychange, anyerror)", "docstring": "Clean dataset tags according to tags cleanup spreadsheet and return if any changes occurred\n\nReturns:\nTuple[bool, bool]: Returns (True if tags changed or False if not, True if error or False if not)", "source": "codesearchnet"}
{"code": "def get_videos_for_ids(\n        edx_video_ids,\n        sort_field=None,\n        sort_dir=SortDirection.asc\n):\n    \n    videos, __ = _get_videos_for_filter(\n        {\"edx_video_id__in\":edx_video_ids},\n        sort_field,\n        sort_dir,\n    )\n    return videos", "docstring": "Returns an iterator of videos that match the given list of ids.\n\nArgs:\nedx_video_ids (list)\nsort_field (VideoSortField)\nsort_dir (SortDirection)\n\nReturns:\nA generator expression that contains the videos found, sorted by the\ngiven field and direction, with ties broken by edx_video_id to ensure a\ntotal order", "source": "juraj-google-style"}
{"code": "def __leastsq_fit(tomo_data, weights=None, trace=None, beta=None):\n    if (trace is None):\n        trace = 1.0\n    data = tomo_data['data']\n    keys = data[0]['circuit'].keys()\n    counts = []\n    shots = []\n    ops = []\n    for dat in data:\n        for key in keys:\n            counts.append(dat['counts'][key])\n            shots.append(dat['shots'])\n            projectors = dat['circuit'][key]\n            op = __projector(projectors['meas'], tomo_data['meas_basis'])\n            if ('prep' in projectors):\n                op_prep = __projector(projectors['prep'], tomo_data['prep_basis'])\n                op = np.kron(op_prep.conj(), op)\n            ops.append(op)\n    counts = np.array(counts)\n    shots = np.array(shots)\n    freqs = (counts / shots)\n    if (weights is None):\n        if (beta is None):\n            beta = 0.50922\n        K = len(keys)\n        freqs_hedged = ((counts + beta) / (shots + (K * beta)))\n        weights = np.sqrt((shots / (freqs_hedged * (1 - freqs_hedged))))\n    return __tomo_linear_inv(freqs, ops, weights, trace=trace)", "docstring": "Reconstruct a state from unconstrained least-squares fitting.\n\nArgs:\ntomo_data (list[dict]): state or process tomography data.\nweights (list or array or None): weights to use for least squares\nfitting. The default is standard deviation from a binomial\ndistribution.\ntrace (float or None): trace of returned operator. The default is 1.\nbeta (float or None): hedge parameter (>=0) for computing frequencies\nfrom zero-count data. The default value is 0.50922.\n\nReturns:\nnumpy.array: A numpy array of the reconstructed operator.", "source": "codesearchnet"}
{"code": "def site_specific_coordination_numbers(self):\n    specific_coordination_numbers = {}\n    for site in self.sites:\n        specific_coordination_numbers[site.label] = site.site_specific_neighbours()\n    return specific_coordination_numbers", "docstring": "Returns a dictionary of coordination numbers for each site type.\n\nArgs:\nNone\n\nReturns:\n(Dict(Str:List(Int))) : Dictionary of coordination numbers for each site type, e.g.::\n\n{ 'A' : [ 2, 4 ], 'B' : [ 2 ] }", "source": "codesearchnet"}
{"code": "def get_settable_properties(cls):\n    results = []\n    for attr, value in vars(cls).items():\n        if isinstance(value, property) and value.fset is not None:\n            results.append(attr)\n    return results", "docstring": "Gets the settable properties of a class.\n\nOnly returns the explicitly defined properties with setters.\n\nArgs:\ncls: A class in Python.", "source": "github-repos"}
{"code": "def get_job(self, job_resource_name: str) -> Dict:\n        \n        return self.service.projects().programs().jobs().get(\n            name=job_resource_name).execute()", "docstring": "Returns metadata about a previously created job.\n\nSee get_job_result if you want the results of the job and not just\nmetadata about the job.\n\nParams:\njob_resource_name: A string of the form\n`projects/project_id/programs/program_id/jobs/job_id`.\n\nReturns:\nA dictionary containing the metadata.", "source": "juraj-google-style"}
{"code": "def _get_formatted_date(dataset_date, date_format=None):\n        \n        \n        if dataset_date:\n            if date_format:\n                return dataset_date.strftime(date_format)\n            else:\n                return dataset_date.date().isoformat()\n        else:\n            return None", "docstring": "Get supplied dataset date as string in specified format.\nIf no format is supplied, an ISO 8601 string is returned.\n\nArgs:\ndataset_date (Optional[datetime.datetime]): dataset date in datetime.datetime format\ndate_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None.\n\nReturns:\nOptional[str]: Dataset date string or None if no date is set", "source": "juraj-google-style"}
{"code": "def _GetValueAsObject(self, property_value):\n    \n    if property_value.type == pyolecf.value_types.BOOLEAN:\n      return property_value.data_as_boolean\n\n    if property_value.type in self._INTEGER_TYPES:\n      return property_value.data_as_integer\n\n    if property_value.type in self._STRING_TYPES:\n      return property_value.data_as_string\n\n    try:\n      data = property_value.data\n    except IOError:\n      data = None\n\n    return data", "docstring": "Retrieves the property value as a Python object.\n\nArgs:\nproperty_value (pyolecf.property_value): OLECF property value.\n\nReturns:\nobject: property value as a Python object.", "source": "juraj-google-style"}
{"code": "def all(self, customer_id, data={}, **kwargs):\n    url = '{}/{}/tokens'.format(self.base_url, customer_id)\n    return self.get_url(url, data, **kwargs)", "docstring": "Get all tokens for given customer Id\n\nArgs:\ncustomer_id : Customer Id for which tokens have to be fetched\n\nReturns:\nToken dicts for given cutomer Id", "source": "codesearchnet"}
{"code": "def _StartProfiling(self, configuration):\n    \n    if not configuration:\n      return\n\n    if configuration.HaveProfileMemoryGuppy():\n      self._guppy_memory_profiler = profilers.GuppyMemoryProfiler(\n          self._name, configuration)\n      self._guppy_memory_profiler.Start()\n\n    if configuration.HaveProfileMemory():\n      self._memory_profiler = profilers.MemoryProfiler(\n          self._name, configuration)\n      self._memory_profiler.Start()\n\n    if configuration.HaveProfileProcessing():\n      identifier = '{0:s}-processing'.format(self._name)\n      self._processing_profiler = profilers.ProcessingProfiler(\n          identifier, configuration)\n      self._processing_profiler.Start()\n\n    if configuration.HaveProfileSerializers():\n      identifier = '{0:s}-serializers'.format(self._name)\n      self._serializers_profiler = profilers.SerializersProfiler(\n          identifier, configuration)\n      self._serializers_profiler.Start()\n\n    if configuration.HaveProfileStorage():\n      self._storage_profiler = profilers.StorageProfiler(\n          self._name, configuration)\n      self._storage_profiler.Start()\n\n    if configuration.HaveProfileTaskQueue():\n      self._task_queue_profiler = profilers.TaskQueueProfiler(\n          self._name, configuration)\n      self._task_queue_profiler.Start()", "docstring": "Starts profiling.\n\nArgs:\nconfiguration (ProfilingConfiguration): profiling configuration.", "source": "juraj-google-style"}
{"code": "def generate_name(self, name_format=DEFAULT_FILE_NAME_FORMAT):\n        \n        if len(self.segments) > 0:\n            return self.segments[0].points[0].time.strftime(name_format) + \".gpx\"\n        else:\n            return \"EmptyTrack\"", "docstring": "Generates a name for the track\n\nThe name is generated based on the date of the first point of the\ntrack, or in case it doesn't exist, \"EmptyTrack\"\n\nArgs:\nname_format (str, optional): Name formar to give to the track, based on\nits start time. Defaults to DEFAULT_FILE_NAME_FORMAT\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def create_walker(self, selector, skip_all=True):\n    if selector.buffered:\n        walker = BufferedStreamWalker(selector, self._engine, skip_all=skip_all)\n        self._queue_walkers.append(walker)\n        return walker\n    if (selector.match_type == DataStream.CounterType):\n        walker = CounterStreamWalker(selector)\n    else:\n        walker = VirtualStreamWalker(selector)\n    self._virtual_walkers.append(walker)\n    return walker", "docstring": "Create a stream walker based on the given selector.\n\nThis function returns a StreamWalker subclass that will\nremain up to date and allow iterating over and popping readings\nfrom the stream(s) specified by the selector.\n\nWhen the stream walker is done, it should be passed to\ndestroy_walker so that it is removed from internal lists that\nare used to always keep it in sync.\n\nArgs:\nselector (DataStreamSelector): The selector describing the\nstreams that we want to iterate over.\nskip_all (bool): Whether to start at the beginning of the data\nor to skip everything and start at the end.  Defaults\nto skipping everything.  This parameter only has any\neffect on buffered stream selectors.\n\nReturns:\nStreamWalker: A properly updating stream walker with the given selector.", "source": "codesearchnet"}
{"code": "def span_to_answer(self, text: str, start: int, end: int) -> Dict[str, Union[str, int]]:\n    words = []\n    token_idx = char_start_idx = char_end_idx = chars_idx = 0\n    for i, word in enumerate(text.split(' ')):\n        token = self.tokenizer.tokenize(word)\n        if start <= token_idx <= end:\n            if token_idx == start:\n                char_start_idx = chars_idx\n            if token_idx == end:\n                char_end_idx = chars_idx + len(word)\n            words += [word]\n        if token_idx > end:\n            break\n        token_idx += len(token)\n        chars_idx += len(word) + 1\n    return {'answer': ' '.join(words), 'start': max(0, char_start_idx), 'end': min(len(text), char_end_idx)}", "docstring": "When decoding from token probabilities, this method maps token indexes to actual word in the initial context.\n\nArgs:\ntext (`str`): The actual context to extract the answer from.\nstart (`int`): The answer starting token index.\nend (`int`): The answer end token index.\n\nReturns:\nDictionary like `{'answer': str, 'start': int, 'end': int}`", "source": "github-repos"}
{"code": "def _ComputeUniquifier(self, debuggee):\n    uniquifier = hashlib.sha1()\n    if (('minorversion' not in debuggee.get('labels', [])) and ('sourceContexts' not in debuggee)):\n        uniquifier_computer.ComputeApplicationUniquifier(uniquifier)\n    return uniquifier.hexdigest()", "docstring": "Computes debuggee uniquifier.\n\nThe debuggee uniquifier has to be identical on all instances. Therefore the\nuniquifier should not include any random numbers and should only be based\non inputs that are guaranteed to be the same on all instances.\n\nArgs:\ndebuggee: complete debuggee message without the uniquifier\n\nReturns:\nHex string of SHA1 hash of project information, debuggee labels and\ndebuglet version.", "source": "codesearchnet"}
{"code": "def CreateDynamicDisplayAdSettings(client, opener):\n    media_service = client.GetService('MediaService', 'v201809')\n    logo = {'xsi_type': 'Image', 'mediaId': _CreateImage(media_service, opener, 'https:\n    dynamic_settings = {'landscapeLogoImage': logo, 'pricePrefix': 'as low as', 'promoText': 'Free shipping!'}\n    return dynamic_settings", "docstring": "Creates dynamic display ad settings.\n\nArgs:\nclient: an AdWordsClient instance.\nopener: an OpenerDirector instance.\n\nReturns:\nA dict containing the dynamic display ad settings.", "source": "codesearchnet"}
{"code": "def update_config_pwd(msg, cfg):\n    msg_type = msg.__class__.__name__.lower()\n    key_fmt = ((msg.profile + '_') + msg_type)\n    if isinstance(msg._auth, (MutableSequence, tuple)):\n        cfg.pwd[key_fmt] = ' :: '.join(msg._auth)\n    else:\n        cfg.pwd[key_fmt] = msg._auth", "docstring": "Updates the profile's auth entry with values set by the user.\nThis will overwrite existing values.\n\nArgs:\n:msg: (Message class) an instance of a message class.\n:cfg: (jsonconfig.Config) config instance.", "source": "codesearchnet"}
{"code": "def wrap_inference_results(inference_result_proto):\n  \n  inference_proto = inference_pb2.InferenceResult()\n  if isinstance(inference_result_proto,\n                classification_pb2.ClassificationResponse):\n    inference_proto.classification_result.CopyFrom(\n        inference_result_proto.result)\n  elif isinstance(inference_result_proto, regression_pb2.RegressionResponse):\n    inference_proto.regression_result.CopyFrom(inference_result_proto.result)\n  return inference_proto", "docstring": "Returns packaged inference results from the provided proto.\n\nArgs:\ninference_result_proto: The classification or regression response proto.\n\nReturns:\nAn InferenceResult proto with the result from the response.", "source": "juraj-google-style"}
{"code": "def _analemma_position(self, hour):\n    low = self.calculate_sun(12, 21, hour).is_during_day\n    high = self.calculate_sun(6, 21, hour).is_during_day\n    if (low and high):\n        return 1\n    elif (low or high):\n        return 0\n    else:\n        return (- 1)", "docstring": "Check what the analemma position is for an hour.\n\nThis is useful for calculating hours of analemma curves.\n\nReturns:\n-1 if always night,\n0 if both day and night,\n1 if always day.", "source": "codesearchnet"}
{"code": "def collect_hunt_results(self, hunt):\n    \n    if not os.path.isdir(self.output_path):\n      os.makedirs(self.output_path)\n\n    output_file_path = os.path.join(\n        self.output_path, '.'.join((self.hunt_id, 'zip')))\n\n    if os.path.exists(output_file_path):\n      print('{0:s} already exists: Skipping'.format(output_file_path))\n      return None\n\n    self._check_approval_wrapper(\n        hunt, self._get_and_write_archive, hunt, output_file_path)\n\n    results = self._extract_hunt_results(output_file_path)\n    print('Wrote results of {0:s} to {1:s}'.format(\n        hunt.hunt_id, output_file_path))\n    return results", "docstring": "Download current set of files in results.\n\nArgs:\nhunt: The GRR hunt object to download files from.\n\nReturns:\nlist: tuples containing:\nstr: human-readable description of the source of the collection. For\nexample, the name of the source host.\nstr: path to the collected data.\nRaises:\nValueError: if approval is needed and approvers were not specified.", "source": "juraj-google-style"}
{"code": "def hugepage_support(user, group='hugetlb', nr_hugepages=256,\n                     max_map_count=65536, mnt_point='/run/hugepages/kvm',\n                     pagesize='2MB', mount=True, set_shmmax=False):\n    \n    group_info = add_group(group)\n    gid = group_info.gr_gid\n    add_user_to_group(user, group)\n    if max_map_count < 2 * nr_hugepages:\n        max_map_count = 2 * nr_hugepages\n    sysctl_settings = {\n        'vm.nr_hugepages': nr_hugepages,\n        'vm.max_map_count': max_map_count,\n        'vm.hugetlb_shm_group': gid,\n    }\n    if set_shmmax:\n        shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))\n        shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages\n        if shmmax_minsize > shmmax_current:\n            sysctl_settings['kernel.shmmax'] = shmmax_minsize\n    sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')\n    mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)\n    lfstab = fstab.Fstab()\n    fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)\n    if fstab_entry:\n        lfstab.remove_entry(fstab_entry)\n    entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',\n                         'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)\n    lfstab.add_entry(entry)\n    if mount:\n        fstab_mount(mnt_point)", "docstring": "Enable hugepages on system.\n\nArgs:\nuser (str)  -- Username to allow access to hugepages to\ngroup (str) -- Group name to own hugepages\nnr_hugepages (int) -- Number of pages to reserve\nmax_map_count (int) -- Number of Virtual Memory Areas a process can own\nmnt_point (str) -- Directory to mount hugepages on\npagesize (str) -- Size of hugepages\nmount (bool) -- Whether to Mount hugepages", "source": "juraj-google-style"}
{"code": "def get_HDX_code_from_location_partial(location, locations=None, configuration=None):\n        \n        \n        hdx_code = Locations.get_HDX_code_from_location(location, locations, configuration)\n\n        if hdx_code is not None:\n            return hdx_code, True\n\n        if locations is None:\n            locations = Locations.validlocations(configuration)\n        locationupper = location.upper()\n        for locdict in locations:\n            locationname = locdict['title'].upper()\n            if locationupper in locationname or locationname in locationupper:\n                return locdict['name'].upper(), False\n\n        return None, False", "docstring": "Get HDX code for location\n\nArgs:\nlocation (str): Location for which to get HDX code\nlocations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX.\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nTuple[Optional[str], bool]: HDX code and if the match is exact or (None, False) for no match", "source": "juraj-google-style"}
{"code": "def validate(self, message):\n    if hasattr(message, '__json__'):\n        message = message.__json__()\n        if isinstance(message['body'], six.text_type):\n            message['body'] = json.loads(message['body'])\n        elif isinstance(message['body'], six.binary_type):\n            warnings.warn('Message body is not unicode', DeprecationWarning)\n            message['body'] = json.loads(message['body'].decode('utf-8'))\n    if ('topic' not in message['body']):\n        message['body'] = {'topic': message.get('topic'), 'msg': message['body']}\n    if (not self.validate_signatures):\n        return\n    if (not (message['topic'] == message['body']['topic'])):\n        raise RuntimeWarning('Topic envelope mismatch.')\n    if (not fedmsg.crypto.validate(message['body'], **self.hub.config)):\n        raise RuntimeWarning('Failed to authn message.')", "docstring": "Validate the message before the consumer processes it.\n\nThis needs to raise an exception, caught by moksha.\n\nArgs:\nmessage (dict): The message as a dictionary. This must, at a minimum,\ncontain the 'topic' key with a unicode string value and 'body' key\nwith a dictionary value. However, the message might also be an object\nwith a ``__json__`` method that returns a dict with a 'body' key that\ncan be a unicode string that is JSON-encoded.\n\nRaises:\nRuntimeWarning: If the message is not valid.\nUnicodeDecodeError: If the message body is not unicode or UTF-8 and also\nhappens to contain invalid UTF-8 binary.", "source": "codesearchnet"}
{"code": "def AddWarning(self, warning):\n    \n    self._RaiseIfNotWritable()\n\n    self._AddAttributeContainer(\n        self._CONTAINER_TYPE_EXTRACTION_WARNING, warning)", "docstring": "Adds an warning.\n\nArgs:\nwarning (ExtractionWarning): warning.\n\nRaises:\nIOError: when the storage file is closed or read-only.\nOSError: when the storage file is closed or read-only.", "source": "juraj-google-style"}
{"code": "def windows(self):\n    from foxpuppet.windows import BrowserWindow\n    return [BrowserWindow(self.selenium, handle) for handle in self.selenium.window_handles]", "docstring": "Return a list of all open windows.\n\nReturns:\nlist: List of FoxPuppet BrowserWindow objects.", "source": "codesearchnet"}
{"code": "def objects_delete(self, bucket, key):\n    \n    url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key)))\n    datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials,\n                               raw_response=True)", "docstring": "Deletes the specified object.\n\nArgs:\nbucket: the name of the bucket.\nkey: the key of the object within the bucket.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"}
{"code": "def notify(self, notices):\n        \n        tmpl_html = get_template('required_tags_notice.html')\n        tmpl_text = get_template('required_tags_notice.txt')\n        for recipient, data in list(notices.items()):\n            body_html = tmpl_html.render(data=data)\n            body_text = tmpl_text.render(data=data)\n\n            send_notification(\n                subsystem=self.ns,\n                recipients=[recipient],\n                subject=self.email_subject,\n                body_html=body_html,\n                body_text=body_text\n            )", "docstring": "Send notifications to the recipients provided\n\nArgs:\nnotices (:obj:`dict` of `str`: `list`): A dictionary mapping notification messages to the recipient.\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def _PromptUserForEncryptedVolumeCredential(self, scan_context, locked_scan_node, output_writer):\n    credentials = credentials_manager.CredentialsManager.GetCredentials(locked_scan_node.path_spec)\n    if (locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_APFS_CONTAINER):\n        line = 'Found an APFS encrypted volume.'\n    elif (locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_BDE):\n        line = 'Found a BitLocker encrypted volume.'\n    elif (locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_FVDE):\n        line = 'Found a CoreStorage (FVDE) encrypted volume.'\n    else:\n        line = 'Found an encrypted volume.'\n    output_writer.WriteLine(line)\n    credentials_list = list(credentials.CREDENTIALS)\n    credentials_list.append('skip')\n    output_writer.WriteLine('Supported credentials:')\n    output_writer.WriteLine('')\n    for (index, name) in enumerate(credentials_list):\n        output_writer.WriteLine('  {0:d}. {1:s}'.format((index + 1), name))\n    output_writer.WriteLine('')\n    result = False\n    while (not result):\n        output_writer.WriteString('Select a credential to unlock the volume: ')\n        input_line = sys.stdin.readline()\n        input_line = input_line.strip()\n        if (input_line in credentials_list):\n            credential_identifier = input_line\n        else:\n            try:\n                credential_identifier = int(input_line, 10)\n                credential_identifier = credentials_list[(credential_identifier - 1)]\n            except (IndexError, ValueError):\n                output_writer.WriteLine('Unsupported credential: {0:s}'.format(input_line))\n                continue\n        if (credential_identifier == 'skip'):\n            break\n        getpass_string = 'Enter credential data: '\n        if (sys.platform.startswith('win') and (sys.version_info[0] < 3)):\n            getpass_string = self._EncodeString(getpass_string)\n        credential_data = getpass.getpass(getpass_string)\n        output_writer.WriteLine('')\n        result = self._source_scanner.Unlock(scan_context, locked_scan_node.path_spec, credential_identifier, credential_data)\n        if (not result):\n            output_writer.WriteLine('Unable to unlock volume.')\n            output_writer.WriteLine('')", "docstring": "Prompts the user to provide a credential for an encrypted volume.\n\nArgs:\nscan_context (SourceScannerContext): the source scanner context.\nlocked_scan_node (SourceScanNode): the locked scan node.\noutput_writer (StdoutWriter): the output writer.", "source": "codesearchnet"}
{"code": "def write_bottom_half(f, row_metadata_df, data_df, data_null, data_float_format, metadata_null):\n    \n    \n    size_of_left_bottom_half_df = (row_metadata_df.shape[0],\n                              1 + row_metadata_df.shape[1])\n    left_bottom_half_df = pd.DataFrame(np.full(size_of_left_bottom_half_df, metadata_null, dtype=object))\n\n    \n    bottom_half_df = pd.concat([left_bottom_half_df, data_df.reset_index(drop=True)], axis=1)\n    bottom_half_df.columns = range(bottom_half_df.shape[1])\n\n    \n    bottom_half_df.iloc[:, 0] = row_metadata_df.index.values\n\n    \n    row_metadata_col_indices = range(1, 1 + row_metadata_df.shape[1])\n    bottom_half_df.iloc[:, row_metadata_col_indices] = (\n        row_metadata_df.astype(str).replace(\"nan\", value=metadata_null).values)\n\n    \n    bottom_half_df.to_csv(f, header=False, index=False, sep=\"\\t\",\n                          na_rep=data_null,\n                          float_format=data_float_format)", "docstring": "Write the bottom half of the gct file: row metadata and data.\n\nArgs:\nf (file handle): handle for output file\nrow_metadata_df (pandas df)\ndata_df (pandas df)\ndata_null (string): how to represent missing values in the data\nmetadata_null (string): how to represent missing values in the metadata\ndata_float_format (string): how many decimal points to keep in representing data\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def read_value(self):\n    return array_ops.identity(self._variable, name='read')", "docstring": "Returns the value of this variable, read in the current context.\n\nCan be different from value() if it's on another device, with control\ndependencies, etc.\n\nReturns:\nA `Tensor` containing the value of the variable.", "source": "github-repos"}
{"code": "def getHostCaPath(self, name):\n    cert = self.getHostCert(name)\n    if (cert is None):\n        return None\n    return self._getCaPath(cert)", "docstring": "Gets the path to the CA certificate that issued a given host keypair.\n\nArgs:\nname (str): The name of the host keypair.\n\nExamples:\nGet the path to the CA cert which issue the cert for \"myhost\":\n\nmypath = cdir.getHostCaPath('myhost')\n\nReturns:\nstr: The path if exists.", "source": "codesearchnet"}
{"code": "def decode(message, pblite, ignore_first_item=False):\n    if (not isinstance(pblite, list)):\n        logger.warning('Ignoring invalid message: expected list, got %r', type(pblite))\n        return\n    if ignore_first_item:\n        pblite = pblite[1:]\n    if (pblite and isinstance(pblite[(- 1)], dict)):\n        extra_fields = {int(field_number): value for (field_number, value) in pblite[(- 1)].items()}\n        pblite = pblite[:(- 1)]\n    else:\n        extra_fields = {}\n    fields_values = itertools.chain(enumerate(pblite, start=1), extra_fields.items())\n    for (field_number, value) in fields_values:\n        if (value is None):\n            continue\n        try:\n            field = message.DESCRIPTOR.fields_by_number[field_number]\n        except KeyError:\n            if (value not in [[], '', 0]):\n                logger.debug('Message %r contains unknown field %s with value %r', message.__class__.__name__, field_number, value)\n            continue\n        if (field.label == FieldDescriptor.LABEL_REPEATED):\n            _decode_repeated_field(message, field, value)\n        else:\n            _decode_field(message, field, value)", "docstring": "Decode pblite to Protocol Buffer message.\n\nThis method is permissive of decoding errors and will log them as warnings\nand continue decoding where possible.\n\nThe first element of the outer pblite list must often be ignored using the\nignore_first_item parameter because it contains an abbreviation of the name\nof the protobuf message (eg.  cscmrp for ClientSendChatMessageResponseP)\nthat's not part of the protobuf.\n\nArgs:\nmessage: protocol buffer message instance to decode into.\npblite: list representing a pblite-serialized message.\nignore_first_item: If True, ignore the item at index 0 in the pblite\nlist, making the item at index 1 correspond to field 1 in the\nmessage.", "source": "codesearchnet"}
{"code": "def errorhandler_callback(cls, exc):\n    if exc.flash_message:\n        flash(exc.flash_message, exc.flash_level)\n    if (exc.redirect is not MISSING):\n        return redirect(url_for(exc.redirect, **exc.redirect_args))\n    error_result = exc.error_page()\n    if (error_result is not None):\n        return (error_result, (exc.status_code or 500))", "docstring": "This function should be called in the global error handlers. This\nwill allow for consolidating of cleanup tasks if the exception\nbubbles all the way to the top of the stack.\n\nFor example, this method will automatically rollback the database\nsession if the exception bubbles to the top.\n\nThis is the method that :meth:`register_errorhandler` adds as an\nerrorhandler. See the documentation there for more info.\n\nArgs:\nexc (FleakerBaseException):\nThe exception that was thrown that we are to handle.", "source": "codesearchnet"}
{"code": "def format_snippet(sensor_graph):\n    \n\n    output = []\n\n    \n    output.append(\"disable\")\n    output.append(\"clear\")\n    output.append(\"reset\")\n\n    \n    for node in sensor_graph.dump_nodes():\n        output.append('add_node \"{}\"'.format(node))\n\n    \n    for streamer in sensor_graph.streamers:\n        line = \"add_streamer '{}' '{}' {} {} {}\".format(streamer.selector, streamer.dest, streamer.automatic, streamer.format, streamer.report_type)\n\n        if streamer.with_other is not None:\n            line += ' --withother {}'.format(streamer.with_other)\n\n        output.append(line)\n\n    \n    for stream, value in sorted(sensor_graph.constant_database.items(), key=lambda x: x[0].encode()):\n        output.append(\"set_constant '{}' {}\".format(stream, value))\n\n    \n    output.append(\"persist\")\n\n    output.append(\"back\")\n\n    \n    app_tag = sensor_graph.metadata_database.get('app_tag')\n    app_version = sensor_graph.metadata_database.get('app_version')\n\n    if app_tag is not None:\n        if app_version is None:\n            app_version = \"0.0\"\n\n        output.append(\"test_interface\")\n        output.append(\"set_version app %d --version '%s'\" % (app_tag, app_version))\n        output.append(\"back\")\n\n    \n    output.append(\"config_database\")\n    output.append(\"clear_variables\")\n\n    for slot, conf_vars in sensor_graph.config_database.items():\n        for conf_var, conf_def in conf_vars.items():\n            conf_type, conf_val = conf_def\n\n            if conf_type == 'binary':\n                conf_val = 'hex:' + hexlify(conf_val)\n            elif isinstance(conf_val, str):\n                conf_val = '\"%s\"' % conf_val\n\n            output.append(\"set_variable '{}' {} {} {}\".format(slot, conf_var, conf_type, conf_val))\n\n    \n    output.append(\"back\")\n    output.append(\"reset\")\n\n    return \"\\n\".join(output) + '\\n'", "docstring": "Format this sensor graph as iotile command snippets.\n\nThis includes commands to reset and clear previously stored\nsensor graphs.\n\nArgs:\nsensor_graph (SensorGraph): the sensor graph that we want to format", "source": "juraj-google-style"}
{"code": "def run_metadata_graphs(name, data, step=None):\n    summary_metadata = summary_pb2.SummaryMetadata()\n    summary_metadata.plugin_data.plugin_name = 'graph_run_metadata_graph'\n    summary_metadata.plugin_data.content = b'1'\n    data = config_pb2.RunMetadata(function_graphs=data.function_graphs, partition_graphs=data.partition_graphs)\n    with summary_scope(name, 'graph_run_metadata_graph_summary', [data, step]) as (tag, _):\n        with ops.device('cpu:0'):\n            tensor = constant_op.constant(data.SerializeToString(), dtype=dtypes.string)\n        return write(tag=tag, tensor=tensor, step=step, metadata=summary_metadata)", "docstring": "Writes graphs from a RunMetadata summary.\n\nArgs:\nname: A name for this summary. The summary tag used for TensorBoard will be\nthis name prefixed by any active name scopes.\ndata: A RunMetadata proto to write.\nstep: Explicit `int64`-castable monotonic step value for this summary. If\nomitted, this defaults to `tf.summary.experimental.get_step()`, which must\nnot be None.\n\nReturns:\nTrue on success, or false if no summary was written because no default\nsummary writer was available.\n\nRaises:\nValueError: if a default writer exists, but no step was provided and\n`tf.summary.experimental.get_step()` is None.", "source": "github-repos"}
{"code": "def to(self, device: Optional[torch.device], dtype: Optional[torch.dtype]) -> Rotation:\n    if self._rot_mats is not None:\n        return Rotation(rot_mats=self._rot_mats.to(device=device, dtype=dtype), quats=None)\n    elif self._quats is not None:\n        return Rotation(rot_mats=None, quats=self._quats.to(device=device, dtype=dtype), normalize_quats=False)\n    else:\n        raise ValueError('Both rotations are None')", "docstring": "Analogous to the to() method of torch Tensors\n\nArgs:\ndevice:\nA torch device\ndtype:\nA torch dtype\nReturns:\nA copy of the Rotation using the new device and dtype", "source": "github-repos"}
{"code": "def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask):\n    exp_blocked_to_pad = torch.cat([to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2)\n    band_mask = torch.einsum('blq,blk->blqk', from_blocked_mask[:, 2:-2], exp_blocked_to_pad)\n    band_mask.unsqueeze_(1)\n    return band_mask", "docstring": "Create 3D attention mask from a 2D tensor mask.\n\nArgs:\nfrom_blocked_mask: 2D Tensor of shape [batch_size,\nfrom_seq_length//from_block_size, from_block_size].\nto_blocked_mask: int32 Tensor of shape [batch_size,\nto_seq_length//to_block_size, to_block_size].\n\nReturns:\nfloat Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size,\n3*to_block_size].", "source": "github-repos"}
{"code": "def build_from_config(self, config):\n    if config:\n        if 'input_shape' in config:\n            self.build(config['input_shape'])\n        elif 'shapes_dict' in config:\n            self.build(**config['shapes_dict'])", "docstring": "Builds the layer's states with the supplied config dict.\n\nBy default, this method calls the `build(config[\"input_shape\"])` method,\nwhich creates weights based on the layer's input shape in the supplied\nconfig. If your config contains other information needed to load the\nlayer's state, you should override this method.\n\nArgs:\nconfig: Dict containing the input shape associated with this layer.", "source": "github-repos"}
{"code": "def __init__(self, length=None, experimenter=None):\n        \n        super().__init__(action_type=ActionType.OFPAT_EXPERIMENTER)\n        self.length = length\n        self.experimenter = experimenter", "docstring": "Create ActionExperimenterHeader with the optional parameters below.\n\nArgs:\nexperimenter (int): The experimenter field is the Experimenter ID,\nwhich takes the same form as in struct ofp_experimenter.", "source": "juraj-google-style"}
{"code": "def _get_new_alive_state(self, new_seq, new_log_probs, new_cache):\n    new_finished_flags = tf.equal(new_seq[(:, :, (- 1))], self.eos_id)\n    new_log_probs += (tf.to_float(new_finished_flags) * (- INF))\n    (top_alive_seq, top_alive_log_probs, top_alive_cache) = _gather_topk_beams([new_seq, new_log_probs, new_cache], new_log_probs, self.batch_size, self.beam_size)\n    return {_StateKeys.ALIVE_SEQ: top_alive_seq, _StateKeys.ALIVE_LOG_PROBS: top_alive_log_probs, _StateKeys.ALIVE_CACHE: top_alive_cache}", "docstring": "Gather the top k sequences that are still alive.\n\nArgs:\nnew_seq: New sequences generated by growing the current alive sequences\nint32 tensor with shape [batch_size, 2 * beam_size, cur_index + 1]\nnew_log_probs: Log probabilities of new sequences\nfloat32 tensor with shape [batch_size, beam_size]\nnew_cache: Dict of cached values for each sequence.\n\nReturns:\nDictionary with alive keys from _StateKeys:\n{Top beam_size sequences that are still alive (don't end with eos_id)\nLog probabilities of top alive sequences\nDict cache storing decoder states for top alive sequences}", "source": "codesearchnet"}
{"code": "def _process_for_docstring(self, node, node_type):\n    if (node.doc is not None):\n        if (node_type == 'module'):\n            if (not node.body):\n                for key in list(self._tokenized_triple_quotes.keys()):\n                    quote_record = self._tokenized_triple_quotes.get(key)\n                    if quote_record:\n                        self._check_docstring_quotes(quote_record)\n                        del self._tokenized_triple_quotes[key]\n            else:\n                for i in range(0, node.body[0].lineno):\n                    quote_record = self._tokenized_triple_quotes.get(i)\n                    if quote_record:\n                        self._check_docstring_quotes(quote_record)\n                        del self._tokenized_triple_quotes[i]\n                        break\n        elif (not node.body):\n            lineno = self._find_docstring_line_for_no_body(node.fromlineno)\n            quote_record = self._tokenized_triple_quotes.get(lineno)\n            if quote_record:\n                self._check_docstring_quotes(quote_record)\n                del self._tokenized_triple_quotes[lineno]\n        else:\n            doc_row = self._find_docstring_line(node.fromlineno, node.tolineno)\n            quote_record = self._tokenized_triple_quotes.get(doc_row)\n            if quote_record:\n                self._check_docstring_quotes(quote_record)\n                del self._tokenized_triple_quotes[doc_row]", "docstring": "Check for docstring quote consistency.\n\nArgs:\nnode: the AST node being visited.\nnode_type: the type of node being operated on.", "source": "codesearchnet"}
{"code": "def run_exit_code(self, returncode):\n        \n        exit_status = False\n        self.log.info('[run] Exit Code {}'.format(returncode))\n\n        self.reports.increment_total()  \n        valid_exit_codes = self.profile.get('exit_codes', [0])\n        self.reports.exit_code(returncode)\n\n        if returncode in valid_exit_codes:\n            exit_status = True\n            self.reports.profile_execution(True)\n            print('App Exit Code: {}{}{}'.format(c.Style.BRIGHT, c.Fore.GREEN, returncode))\n        else:\n            print(\n                'App Exit Code: {}{}{}{} (Valid Exit Codes: {})'.format(\n                    c.Style.BRIGHT,\n                    c.Fore.RED,\n                    returncode,\n                    c.Fore.RESET,\n                    self.profile.get('exit_codes', [0]),\n                )\n            )\n\n            self.reports.profile_execution(False)\n            self.exit_code = 1\n            if self.args.halt_on_fail:\n                raise RuntimeError('App exited with invalid exit code {}'.format(returncode))\n        return exit_status", "docstring": "Handle the exit code for the current run.\n\nArgs:\nreturncode (int): The return exit code.\n\nRaises:\nRuntimeError: Raise on invalid exit code if halt_on_fail is True.\n\nReturns:\nbool: True if exit code is a valid exit code, else False.", "source": "juraj-google-style"}
{"code": "def _add_property(self, name, default_value):\n        \n\n        name = str(name)\n        self._properties[name] = default_value", "docstring": "Add a device property with a given default value.\n\nArgs:\nname (str): The name of the property to add\ndefault_value (int, bool): The value of the property", "source": "juraj-google-style"}
{"code": "def non_transactional(func, args, kwds, allow_existing=True):\n    from . import tasklets\n    ctx = tasklets.get_context()\n    if (not ctx.in_transaction()):\n        return func(*args, **kwds)\n    if (not allow_existing):\n        raise datastore_errors.BadRequestError(('%s cannot be called within a transaction.' % func.__name__))\n    save_ctx = ctx\n    while ctx.in_transaction():\n        ctx = ctx._parent_context\n        if (ctx is None):\n            raise datastore_errors.BadRequestError('Context without non-transactional ancestor')\n    save_ds_conn = datastore._GetConnection()\n    try:\n        if hasattr(save_ctx, '_old_ds_conn'):\n            datastore._SetConnection(save_ctx._old_ds_conn)\n        tasklets.set_context(ctx)\n        return func(*args, **kwds)\n    finally:\n        tasklets.set_context(save_ctx)\n        datastore._SetConnection(save_ds_conn)", "docstring": "A decorator that ensures a function is run outside a transaction.\n\nIf there is an existing transaction (and allow_existing=True), the\nexisting transaction is paused while the function is executed.\n\nArgs:\nallow_existing: If false, throw an exception if called from within\na transaction.  If true, temporarily re-establish the\nprevious non-transactional context.  Defaults to True.\n\nThis supports two forms, similar to transactional().\n\nReturns:\nA wrapper for the decorated function that ensures it runs outside a\ntransaction.", "source": "codesearchnet"}
{"code": "def __chunk(segment, abbr=False):\n    names = ('north', 'east', 'south', 'west', 'north')\n    if (not abbr):\n        sjoin = '-'\n    else:\n        names = [s[0].upper() for s in names]\n        sjoin = ''\n    if ((segment % 2) == 0):\n        return (names[segment].capitalize(), sjoin.join((names[segment].capitalize(), names[segment], names[(segment + 1)])), sjoin.join((names[segment].capitalize(), names[(segment + 1)])), sjoin.join((names[(segment + 1)].capitalize(), names[segment], names[(segment + 1)])))\n    else:\n        return (names[segment].capitalize(), sjoin.join((names[segment].capitalize(), names[(segment + 1)], names[segment])), sjoin.join((names[(segment + 1)].capitalize(), names[segment])), sjoin.join((names[(segment + 1)].capitalize(), names[(segment + 1)], names[segment])))", "docstring": "Generate a ``tuple`` of compass direction names.\n\nArgs:\nsegment (list): Compass segment to generate names for\nabbr (bool): Names should use single letter abbreviations\n\nReturns:\nbool: Direction names for compass segment", "source": "codesearchnet"}
{"code": "def fts_intersection(self, segs):\n        \n        fts_vecs = [self.fts(s) for s in self.filter_segs(segs)]\n        return reduce(lambda a, b: a & b, fts_vecs)", "docstring": "Return the features shared by `segs`\n\nArgs:\nsegs (list): list of Unicode IPA segments\n\nReturns:\nset: set of (value, feature) tuples shared by the valid segments in\n`segs`", "source": "juraj-google-style"}
{"code": "def get_imported_namespaces(self, must_have_imported_data_type=False, consider_annotations=False, consider_annotation_types=False):\n    imported_namespaces = []\n    for (imported_namespace, reason) in self._imported_namespaces.items():\n        if (must_have_imported_data_type and (not reason.data_type)):\n            continue\n        if ((not consider_annotations) and (not (reason.data_type or reason.alias or reason.annotation_type))):\n            continue\n        if ((not consider_annotation_types) and (not (reason.data_type or reason.alias or reason.annotation))):\n            continue\n        imported_namespaces.append(imported_namespace)\n    imported_namespaces.sort(key=(lambda n: n.name))\n    return imported_namespaces", "docstring": "Returns a list of Namespace objects. A namespace is a member of this\nlist if it is imported by the current namespace and a data type is\nreferenced from it. Namespaces are in ASCII order by name.\n\nArgs:\nmust_have_imported_data_type (bool): If true, result does not\ninclude namespaces that were not imported for data types.\nconsider_annotations (bool): If false, result does not include\nnamespaces that were only imported for annotations\nconsider_annotation_types (bool): If false, result does not\ninclude namespaces that were only imported for annotation types.\n\nReturns:\nList[Namespace]: A list of imported namespaces.", "source": "codesearchnet"}
{"code": "def ldr(scatterer, h_pol=True):\n    \n    Z = scatterer.get_Z()\n    if h_pol:\n        return (Z[0,0] - Z[0,1] + Z[1,0] - Z[1,1]) / \\\n               (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1])\n    else:\n        return (Z[0,0] + Z[0,1] - Z[1,0] - Z[1,1]) / \\\n               (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])", "docstring": "Linear depolarizarion ratio (LDR) for the current setup.\n\nArgs:\nscatterer: a Scatterer instance.\nh_pol: If True (default), return LDR_h.\nIf False, return LDR_v.\n\nReturns:\nThe LDR.", "source": "juraj-google-style"}
{"code": "def __batch_update(self, train_events, test_events, n_epoch):\n    for epoch in range(n_epoch):\n        if (n_epoch != 1):\n            np.random.shuffle(train_events)\n        for e in train_events:\n            self.rec.update(e, batch_train=True)\n        MPR = self.__batch_evaluate(test_events)\n        if self.debug:\n            logger.debug(('epoch %2d: MPR = %f' % ((epoch + 1), MPR)))", "docstring": "Batch update called by the fitting method.\n\nArgs:\ntrain_events (list of Event): Positive training events.\ntest_events (list of Event): Test events.\nn_epoch (int): Number of epochs for the batch training.", "source": "codesearchnet"}
{"code": "def update_exif_for_rotated_image(exif):\n    \n    orientation_value = exif.get('0th', ).get(\n        piexif.ImageIFD.Orientation, exif.get('1st', ).get(\n            piexif.ImageIFD.Orientation, None))\n\n    if orientation_value is not None:\n        \n        exif['0th'][piexif.ImageIFD.Orientation] = 1\n        if exif.get('1st', {}).get(piexif.ImageIFD.Orientation) is not None:\n            exif['1st'][piexif.ImageIFD.Orientation] = 1\n\n        \n        \n        if orientation_value > 4:\n            for exif_tag in ['0th', '1st']:\n                if exif.get(exif_tag) is not None:\n                    x, y = (exif.get(exif_tag).get(piexif.ImageIFD.ImageWidth),\n                            exif.get(exif_tag).get(piexif.ImageIFD.ImageLength))\n                    if x is not None and y is not None:\n                        exif[exif_tag][piexif.ImageIFD.ImageWidth] = y\n                        exif[exif_tag][piexif.ImageIFD.ImageLength] = x\n\n                    x, y = (exif.get(exif_tag).get(piexif.ImageIFD.XResolution),\n                            exif.get(exif_tag).get(piexif.ImageIFD.YResolution))\n                    if x is not None and y is not None:\n                        exif[exif_tag][piexif.ImageIFD.XResolution] = y\n                        exif[exif_tag][piexif.ImageIFD.YResolution] = x\n\n                    x, y = (exif.get(exif_tag).get(piexif.ImageIFD.TileWidth),\n                            exif.get(exif_tag).get(piexif.ImageIFD.TileLength))\n                    if x is not None and y is not None:\n                        exif[exif_tag][piexif.ImageIFD.TileWidth] = y\n                        exif[exif_tag][piexif.ImageIFD.TileLength] = x\n            if exif.get('Exif') is not None:\n                x, y = (exif.get('Exif').get(piexif.ExifIFD.PixelXDimension),\n                        exif.get('Exif').get(piexif.ExifIFD.PixelYDimension))\n                if x is not None and y is not None:\n                    exif['Exif'][piexif.ExifIFD.PixelXDimension] = y\n                    exif['Exif'][piexif.ExifIFD.PixelYDimension] = x\n        if exif.get('thumbnail') is not None:\n            try:\n                thumbnail = pil_open(io.BytesIO(exif.get('thumbnail')))\n                thumbnail = autorotate(thumbnail, orientation=orientation_value)\n                with io.BytesIO() as bio:\n                    thumbnail.save(bio, format='jpeg')\n                    bio.seek(0)\n                    exif['thumbnail'] = bio.read()\n            except Exception as e:\n                warnings.warn(\"deprecated\", DeprecationWarning)\n\n    return exif", "docstring": "Modifies the Exif tag if rotation has been performed.\n\n0th, 1st\n--------\nImageWidth = 256\nImageLength = 257\nXResolution = 282\nYResolution = 283\nTileWidth = 322\nTileLength = 323\n\nExif\n----\nPixelXDimension = 40962\nPixelYDimension = 40963\n\nArgs:\nexif (dict): The parsed Exif tag\n\nReturns:\nThe modified Exif dict.", "source": "juraj-google-style"}
{"code": "class Siglip2Encoder(nn.Module):\n\n    def __init__(self, config: Siglip2Config):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([Siglip2EncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    @can_return_tuple\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutput:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for encoder_layer in self.layers:\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`Siglip2EncoderLayer`].\n\nArgs:\nconfig: Siglip2Config", "source": "github-repos"}
{"code": "def swd_sync(self, pad=False):\n        \n        if pad:\n            self._dll.JLINK_SWD_SyncBytes()\n        else:\n            self._dll.JLINK_SWD_SyncBits()\n        return None", "docstring": "Causes a flush to write all data remaining in output buffers to SWD\ndevice.\n\nArgs:\nself (JLink): the ``JLink`` instance\npad (bool): ``True`` if should pad the data to full byte size\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def ExamineEvent(self, mediator, event):\n    \n    \n    event_data_type = getattr(event, 'data_type', '')\n    if event_data_type == 'windows:registry:service':\n      \n      service = WindowsService.FromEvent(event)\n      self._service_collection.AddService(service)", "docstring": "Analyzes an event and creates Windows Services as required.\n\nAt present, this method only handles events extracted from the Registry.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between analysis\nplugins and other components, such as storage and dfvfs.\nevent (EventObject): event to examine.", "source": "juraj-google-style"}
{"code": "def execute(desktop_file, files=None, return_cmd=False, background=False):\n\t\n\n\t\n\n\tdesktop_file_exec = parse(desktop_file)['Exec']\n\n\tfor i in desktop_file_exec.split():\n\t\tif i.startswith('%'):\n\t\t\tdesktop_file_exec = desktop_file_exec.replace(i, '')\n\n\tdesktop_file_exec = desktop_file_exec.replace(r'%F', '')\n\tdesktop_file_exec = desktop_file_exec.replace(r'%f', '')\n\n\tif files:\n\t\tfor i in files:\n\t\t\tdesktop_file_exec += ' ' + i\n\n\tif parse(desktop_file)['Terminal']:\n\t\t\n\t\tdesktop_file_exec = eval(\n\t\t\t\t('__import__(\"libdesktop\").applications.terminal(exec_=\"%s\",'\n\t\t\t\t ' keep_open_after_cmd_exec=True, return_cmd=True)') %\n\t\t\tdesktop_file_exec)\n\n\tif return_cmd:\n\t\treturn desktop_file_exec\n\n\tdesktop_file_proc = sp.Popen([desktop_file_exec], shell=True)\n\n\tif not background:\n\t\tdesktop_file_proc.wait()", "docstring": "Execute a .desktop file.\nExecutes a given .desktop file path properly.\nArgs:\ndesktop_file (str) : The path to the .desktop file.\nfiles\t\t(list): Any files to be launched by the .desktop. Defaults to empty list.\nreturn_cmd   (bool): Return the command (as ``str``) instead of executing. Defaults to ``False``.\nbackground   (bool): Run command in background. Defaults to ``False``.\nReturns:\nstr: Only if ``return_cmd``. Returns command instead of running it. Else returns nothing.", "source": "juraj-google-style"}
{"code": "def __init__(self, n_output_node, input_shape):\n        \n        super(MlpGenerator, self).__init__(n_output_node, input_shape)\n        if len(self.input_shape) > 1:\n            raise ValueError(\"The input dimension is too high.\")", "docstring": "Initialize the instance.\nArgs:\nn_output_node: An integer. Number of output nodes in the network.\ninput_shape: A tuple. Input shape of the network. If it is 1D, ensure the value is appended by a comma\nin the tuple.", "source": "juraj-google-style"}
{"code": "def get_error_intro(tf_error):\n    if hasattr(tf_error, 'op') and hasattr(tf_error.op, 'name'):\n        op_name = tf_error.op.name\n    else:\n        op_name = None\n    intro_lines = ['--------------------------------------', RL('!!! An error occurred during the run !!!', 'blink'), '']\n    out = debugger_cli_common.rich_text_lines_from_rich_line_list(intro_lines)\n    if op_name is not None:\n        out.extend(debugger_cli_common.RichTextLines(['You may use the following commands to debug:']))\n        out.extend(_recommend_command('ni -a -d -t %s' % op_name, 'Inspect information about the failing op.', create_link=True))\n        out.extend(_recommend_command('li -r %s' % op_name, 'List inputs to the failing op, recursively.', create_link=True))\n        out.extend(_recommend_command('lt', 'List all tensors dumped during the failing run() call.', create_link=True))\n    else:\n        out.extend(debugger_cli_common.RichTextLines(['WARNING: Cannot determine the name of the op that caused the error.']))\n    more_lines = ['', 'Op name:    %s' % op_name, 'Error type: ' + str(type(tf_error)), '', 'Details:', str(tf_error), '', '--------------------------------------', '']\n    out.extend(debugger_cli_common.RichTextLines(more_lines))\n    return out", "docstring": "Generate formatted intro for TensorFlow run-time error.\n\nArgs:\ntf_error: (errors.OpError) TensorFlow run-time error object.\n\nReturns:\n(RichTextLines) Formatted intro message about the run-time OpError, with\nsample commands for debugging.", "source": "github-repos"}
{"code": "def im_open(self, *, user: str, **kwargs) -> SlackResponse:\n        \n        kwargs.update({\"user\": user})\n        return self.api_call(\"im.open\", json=kwargs)", "docstring": "Opens a direct message channel.\n\nArgs:\nuser (str): The user id to open a DM with. e.g. 'W1234567890'", "source": "juraj-google-style"}
{"code": "def __rtruediv__(self, other):\n    raise TypeError(\"unsupported operand type(s) for /: '{}' and 'Dimension', please use", "docstring": "Use `__floordiv__` via `x // y` instead.\n\nThis function exists only to have a better error message. Instead of:\n`TypeError: unsupported operand type(s) for /: 'int' and 'Dimension'`,\nthis function will explicitly call for usage of `//` instead.\n\nArgs:\nother: Another `Dimension`.\n\nRaises:\nTypeError.", "source": "github-repos"}
{"code": "def user_has_access(self, user):\n        \n        if ROLE_ADMIN in user.roles:\n            return True\n\n        \n        if self.enabled:\n            if not self.required_roles:\n                return True\n\n            for role in self.required_roles:\n                if role in user.roles:\n                    return True\n\n        return False", "docstring": "Check if a user has access to view information for the account\n\nArgs:\nuser (:obj:`User`): User object to check\n\nReturns:\nTrue if user has access to the account, else false", "source": "juraj-google-style"}
{"code": "def get_unique_tags(field_to_obs):\n  \n  return {field: sorted(set([x.get('tag', '') for x in observations]))\n          for field, observations in field_to_obs.items()\n          if field in TAG_FIELDS}", "docstring": "Returns a dictionary of tags that a user could query over.\n\nArgs:\nfield_to_obs: Dict that maps string field to `Observation` list.\n\nReturns:\nA dict that maps keys in `TAG_FIELDS` to a list of string tags present in\nthe event files. If the dict does not have any observations of the type,\nmaps to an empty list so that we can render this to console.", "source": "juraj-google-style"}
{"code": "def validate_bindings(bindings):\n    if (not isinstance(bindings, (list, tuple))):\n        raise exceptions.ConfigurationException('bindings must be a list or tuple of dictionaries, but was a {}'.format(type(bindings)))\n    for binding in bindings:\n        missing_keys = []\n        for key in ('queue', 'exchange', 'routing_keys'):\n            if (key not in binding):\n                missing_keys.append(key)\n        if missing_keys:\n            raise exceptions.ConfigurationException('a binding is missing the following keys from its settings value: {}'.format(missing_keys))\n        if (not isinstance(binding['routing_keys'], (list, tuple))):\n            raise exceptions.ConfigurationException('routing_keys must be a list or tuple, but was a {}'.format(type(binding['routing_keys'])))", "docstring": "Validate the bindings configuration.\n\nRaises:\nexceptions.ConfigurationException: If the configuration provided is of an\ninvalid format.", "source": "codesearchnet"}
{"code": "def load_data_and_labels(filename, encoding='utf-8'):\n    (sents, labels) = ([], [])\n    (words, tags) = ([], [])\n    with open(filename, encoding=encoding) as f:\n        for line in f:\n            line = line.rstrip()\n            if line:\n                (word, tag) = line.split('\\t')\n                words.append(word)\n                tags.append(tag)\n            else:\n                sents.append(words)\n                labels.append(tags)\n                (words, tags) = ([], [])\n    return (sents, labels)", "docstring": "Loads data and label from a file.\n\nArgs:\nfilename (str): path to the file.\nencoding (str): file encoding format.\n\nThe file format is tab-separated values.\nA blank line is required at the end of a sentence.\n\nFor example:\n```\nEU\tB-ORG\nrejects\tO\nGerman\tB-MISC\ncall\tO\nto\tO\nboycott\tO\nBritish\tB-MISC\nlamb\tO\n.\tO\n\nPeter\tB-PER\nBlackburn\tI-PER\n...\n```\n\nReturns:\ntuple(numpy array, numpy array): data and labels.\n\nExample:\n>>> filename = 'conll2003/en/ner/train.txt'\n>>> data, labels = load_data_and_labels(filename)", "source": "codesearchnet"}
{"code": "def __init__(self, encoding, buffer_size=2048):\n    \n    super(EncodedTextReader, self).__init__()\n    self._buffer = ''\n    self._buffer_size = buffer_size\n    self._current_offset = 0\n    self._encoding = encoding\n    self.lines = ''", "docstring": "Initializes the encoded text reader object.\n\nArgs:\nencoding (str): encoding.\nbuffer_size (Optional[int]): buffer size.", "source": "juraj-google-style"}
{"code": "def beta_to_uni(text, strict=False):\n    \n    \n    \n    param_key = (strict,)\n    try:\n       t = _BETA_CONVERSION_TRIES[param_key]\n    except KeyError:\n        t = _create_conversion_trie(*param_key)\n        _BETA_CONVERSION_TRIES[param_key] = t\n\n    transform = []\n    idx = 0\n    possible_word_boundary = False\n\n    while idx < len(text):\n        if possible_word_boundary and _penultimate_sigma_word_final(transform):\n            transform[-2] = _FINAL_LC_SIGMA\n\n        step = t.longest_prefix(text[idx:idx + _MAX_BETA_TOKEN_LEN])\n\n        if step:\n            possible_word_boundary = text[idx] in _BETA_PUNCTUATION\n\n            key, value = step\n            transform.append(value)\n            idx += len(key)\n        else:\n            possible_word_boundary = True\n\n            transform.append(text[idx])\n            idx += 1\n\n    \n    \n    if possible_word_boundary and _penultimate_sigma_word_final(transform):\n        transform[-2] = _FINAL_LC_SIGMA\n    elif len(transform) > 0 and transform[-1] == _MEDIAL_LC_SIGMA:\n        transform[-1] = _FINAL_LC_SIGMA\n\n    converted = ''.join(transform)\n    return converted", "docstring": "Converts the given text from betacode to unicode.\n\nArgs:\ntext: The beta code text to convert. All of this text must be betacode.\nstrict: Flag to allow for flexible diacritic order on input.\n\nReturns:\nThe converted text.", "source": "juraj-google-style"}
{"code": "def read_user_data(self, user_data_path):\n        \n        raw_user_data = read_value_from_path(user_data_path)\n\n        variables = self.get_variables()\n\n        return parse_user_data(variables, raw_user_data, self.name)", "docstring": "Reads and parses a user_data file.\n\nArgs:\nuser_data_path (str):\npath to the userdata file\n\nReturns:\nstr: the parsed user data file", "source": "juraj-google-style"}
{"code": "def update_user_groups(self, user, claims):\n        \n        if settings.GROUPS_CLAIM is not None:\n            \n            django_groups = [group.name for group in user.groups.all()]\n\n            if settings.GROUPS_CLAIM in claims:\n                claim_groups = claims[settings.GROUPS_CLAIM]\n                if not isinstance(claim_groups, list):\n                    claim_groups = [claim_groups, ]\n            else:\n                logger.debug(\n                    \"The configured groups claim '{}' was not found in the access token\".format(settings.GROUPS_CLAIM))\n                claim_groups = []\n\n            \n            \n            \n            \n            groups_to_remove = set(django_groups) - set(claim_groups)\n            groups_to_add = set(claim_groups) - set(django_groups)\n\n            \n            \n            for group_name in groups_to_remove:\n                group = Group.objects.get(name=group_name)\n                user.groups.remove(group)\n                logger.debug(\"User removed from group '{}'\".format(group_name))\n\n            for group_name in groups_to_add:\n                try:\n                    if settings.MIRROR_GROUPS:\n                        group, _ = Group.objects.get_or_create(name=group_name)\n                        logger.debug(\"Created group '{}'\".format(group_name))\n                    else:\n                        group = Group.objects.get(name=group_name)\n                    user.groups.add(group)\n                    logger.debug(\"User added to group '{}'\".format(group_name))\n                except ObjectDoesNotExist:\n                    \n                    pass", "docstring": "Updates user group memberships based on the GROUPS_CLAIM setting.\n\nArgs:\nuser (django.contrib.auth.models.User): User model instance\nclaims (dict): Claims from the access token", "source": "juraj-google-style"}
{"code": "def from_dict(cls, d):\n    for cat in ['HEADER', 'VERS']:\n        if (cat not in d):\n            d[cat] = None\n    alat = (d['ALAT'] * bohr_to_angstrom)\n    plat = (d['PLAT'] * alat)\n    species = []\n    positions = []\n    for site in d['SITE']:\n        species.append(re.split('[0-9*]', site['ATOM'])[0])\n        positions.append((site['POS'] * alat))\n    if (('CLASS' in d) and ('SPCGRP' in d) and (len(d['SITE']) == len(d['CLASS']))):\n        try:\n            structure = Structure.from_spacegroup(d['SPCGRP'], plat, species, positions, coords_are_cartesian=True)\n        except ValueError:\n            structure = Structure(plat, species, positions, coords_are_cartesian=True, to_unit_cell=True)\n    else:\n        structure = Structure(plat, species, positions, coords_are_cartesian=True, to_unit_cell=True)\n    return cls(structure, header=d['HEADER'], version=d['VERS'])", "docstring": "Creates a CTRL file object from a dictionary. The dictionary\nmust contain the items \"ALAT\", PLAT\" and \"SITE\".\n\nValid dictionary items are:\nALAT: the a-lattice parameter\nPLAT: (3x3) array for the lattice vectors\nSITE: list of dictionaries: {'ATOM': class label,\n'POS': (3x1) array of fractional\ncoordinates}\nCLASS (optional): list of unique atom labels as str\nSPCGRP (optional): space group symbol (str) or number (int)\nHEADER (optional): HEADER text as a str\nVERS (optional): LMTO version as a str\n\nArgs:\nd: The CTRL file as a dictionary.\n\nReturns:\nAn LMTOCtrl object.", "source": "codesearchnet"}
{"code": "def execute_before(self, sensor_graph, scope_stack):\n        \n\n        parent = scope_stack[-1]\n        new_scope = TriggerScope(sensor_graph, scope_stack, parent.clock(self.interval, basis=self.basis))\n        scope_stack.append(new_scope)", "docstring": "Execute statement before children are executed.\n\nArgs:\nsensor_graph (SensorGraph): The sensor graph that we are building or\nmodifying\nscope_stack (list(Scope)): A stack of nested scopes that may influence\nhow this statement allocates clocks or other stream resources.", "source": "juraj-google-style"}
{"code": "def tagged(pode, tag):\n    if tag.startswith('\n        tag = tag[1:]\n    return (pode[1]['tags'].get(tag) is not None)", "docstring": "Check if a packed node has a given tag.\n\nArgs:\npode (tuple): A packed node.\ntag (str): The tag to check.\n\nExamples:\nCheck if a node is tagged with \"woot\" and dostuff if it is.\n\nif s_node.tagged(node,'woot'):\ndostuff()\n\nNotes:\nIf the tag starts with `#`, this is removed prior to checking.\n\nReturns:\nbool: True if the tag is present. False otherwise.", "source": "codesearchnet"}
{"code": "def raise_not_enough_arguments(self, string):\n    requested = errors.number((self.counter + 1))\n    number = len(self.positional)\n    verb = ('was' if (number == 1) else 'were')\n    what = \"Requested {} formatting argument for '{}' but only {} {} supplied!\"\n    what = what.format(requested, string, number, verb)\n    raise errors.ArgumentError(what)", "docstring": "Raises an errors.ArgumentError if not enough arguments were supplied.\n\nTakes care of formatting for detailed error messages.\n\nArguments:\nstring (str): The string of the phrase for which there weren't enough\narguments.\n\nRaises:\nerrors.ArgumentError with a detailed error message.", "source": "codesearchnet"}
{"code": "def determine_encoding(path, default=None):\n    \n    byte_order_marks = (\n        ('utf-8-sig', (codecs.BOM_UTF8, )),\n        ('utf-16', (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)),\n        ('utf-32', (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)),\n    )\n\n    try:\n        with open(path, 'rb') as infile:\n            raw = infile.read(4)\n    except IOError:\n        return default\n\n    for encoding, boms in byte_order_marks:\n        if any(raw.startswith(bom) for bom in boms):\n            return encoding\n\n    return default", "docstring": "Determines the encoding of a file based on byte order marks.\n\nArguments:\npath (str): The path to the file.\ndefault (str, optional): The encoding to return if the byte-order-mark\nlookup does not return an answer.\n\nReturns:\nstr: The encoding of the file.", "source": "juraj-google-style"}
{"code": "def certificate_rabbitmq(self):\n    if (not self.__certificate_rabbitmq):\n        self.__certificate_rabbitmq = CertificateRabbitMQ(self.__connection)\n    return self.__certificate_rabbitmq", "docstring": "Gets the Certificate RabbitMQ API client.\n\nReturns:\nCertificateRabbitMQ:", "source": "codesearchnet"}
{"code": "def generate_lars_path(weighted_data, weighted_labels):\n        \n        x_vector = weighted_data\n        alphas, _, coefs = lars_path(x_vector,\n                                     weighted_labels,\n                                     method='lasso',\n                                     verbose=False)\n        return alphas, coefs", "docstring": "Generates the lars path for weighted data.\n\nArgs:\nweighted_data: data that has been weighted by kernel\nweighted_label: labels, weighted by kernel\n\nReturns:\n(alphas, coefs), both are arrays corresponding to the\nregularization parameter and coefficients, respectively", "source": "juraj-google-style"}
{"code": "def register_items(self, items):\n        \n        for item in items:\n            item.set_parent(self)\n        self.items.extend(items)", "docstring": "Bulk ``register_item``.\n\nArgs:\nitems (iterable[Tree]):\nSequence of nodes to be registered as children.", "source": "juraj-google-style"}
{"code": "def numpy(self) -> npt.ArrayLike:\n    maybe_arr = self._numpy()\n    return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr", "docstring": "Copy of the contents of this Tensor into a NumPy array or scalar.\n\nUnlike NumPy arrays, Tensors are immutable, so this method has to copy\nthe contents to ensure safety. Use `memoryview` to get a readonly\nview of the contents without doing a copy:\n\n>>> t = tf.constant([42])\n>>> np.asarray(memoryview(t))\narray([42], dtype=int32)\n\nNote that `memoryview` is only zero-copy for Tensors on CPU. If a Tensor\nis on GPU, it will have to be transferred to CPU first in order for\n`memoryview` to work.\n\nReturns:\nA NumPy array of the same shape and dtype or a NumPy scalar, if this\nTensor has rank 0.\n\nRaises:\nValueError: If the dtype of this Tensor does not have a compatible\nNumPy dtype.", "source": "github-repos"}
{"code": "def __init__(self, text: str, name: YangIdentifier = None, rev: str = None):\n        \n        super().__init__(text)\n        self.name = name\n        self.rev = rev", "docstring": "Initialize the parser instance.\n\nArgs:\nname: Expected module name.\nrev: Expected revision date.", "source": "juraj-google-style"}
{"code": "def show_events(self, status=None, nids=None):\n        \n        nrows, ncols = get_terminal_size()\n\n        for task in self.iflat_tasks(status=status, nids=nids):\n            report = task.get_event_report()\n            if report:\n                print(make_banner(str(task), width=ncols, mark=\"=\"))\n                print(report)", "docstring": "Print the Abinit events (ERRORS, WARNIING, COMMENTS) to stdout\n\nArgs:\nstatus: if not None, only the tasks with this status are select\nnids: optional list of node identifiers used to filter the tasks.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.ListMonitoredResourceDescriptors = channel.unary_unary(\n            \"/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListMonitoredResourceDescriptorsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListMonitoredResourceDescriptorsResponse.FromString,\n        )\n        self.GetMonitoredResourceDescriptor = channel.unary_unary(\n            \"/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.GetMonitoredResourceDescriptorRequest.SerializeToString,\n            response_deserializer=google_dot_api_dot_monitored__resource__pb2.MonitoredResourceDescriptor.FromString,\n        )\n        self.ListMetricDescriptors = channel.unary_unary(\n            \"/google.monitoring.v3.MetricService/ListMetricDescriptors\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListMetricDescriptorsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListMetricDescriptorsResponse.FromString,\n        )\n        self.GetMetricDescriptor = channel.unary_unary(\n            \"/google.monitoring.v3.MetricService/GetMetricDescriptor\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.GetMetricDescriptorRequest.SerializeToString,\n            response_deserializer=google_dot_api_dot_metric__pb2.MetricDescriptor.FromString,\n        )\n        self.CreateMetricDescriptor = channel.unary_unary(\n            \"/google.monitoring.v3.MetricService/CreateMetricDescriptor\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.CreateMetricDescriptorRequest.SerializeToString,\n            response_deserializer=google_dot_api_dot_metric__pb2.MetricDescriptor.FromString,\n        )\n        self.DeleteMetricDescriptor = channel.unary_unary(\n            \"/google.monitoring.v3.MetricService/DeleteMetricDescriptor\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.DeleteMetricDescriptorRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n        self.ListTimeSeries = channel.unary_unary(\n            \"/google.monitoring.v3.MetricService/ListTimeSeries\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListTimeSeriesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.ListTimeSeriesResponse.FromString,\n        )\n        self.CreateTimeSeries = channel.unary_unary(\n            \"/google.monitoring.v3.MetricService/CreateTimeSeries\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_metric__service__pb2.CreateTimeSeriesRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def _request_reports(self, domains):\n        \n        params = [{'url': domain} for domain in domains]\n        responses = self._requests.multi_get(\n            self.BASE_URL, query_params=params, to_json=False)\n        return responses", "docstring": "Sends multiples requests for the resources to a particular endpoint.\n\nArgs:\nresource_param_name: a string name of the resource parameter.\nresources: list of of the resources.\nendpoint_name: AlexaRankingApi endpoint URL suffix.\nReturns:\nA list of the responses.", "source": "juraj-google-style"}
{"code": "def traverse_nodes(self, node_set, depth=0):\n        \n\n        tab = \"  \"\n        result = list()\n        for n in node_set:\n            repr = (\n                n\n                if self.nodes[n][\"type\"] == \"variable\"\n                else f\"{n}{inspect.signature(self.nodes[n]['lambda_fn'])}\"\n            )\n\n            result.append(f\"{tab * depth}{repr}\")\n            result.extend(\n                self.traverse_nodes(self.successors(n), depth=depth + 1)\n            )\n        return result", "docstring": "BFS traversal of nodes that returns name traversal as large string.\n\nArgs:\nnode_set: Set of input nodes to begin traversal.\ndepth: Current traversal depth for child node viewing.\n\nReturns:\ntype: String containing tabbed traversal view.", "source": "juraj-google-style"}
{"code": "def _kl_pareto_pareto(a, b, name=None):\n  \n  with tf.name_scope(name or \"kl_pareto_pareto\"):\n    \n    \n    \n    \n    \n    final_batch_shape = distribution_util.get_broadcast_shape(\n        a.concentration, b.concentration, a.scale, b.scale)\n    common_type = dtype_util.common_dtype(\n        [a.concentration, b.concentration, a.scale, b.scale], tf.float32)\n    return tf.where(\n        a.scale >= b.scale,\n        b.concentration * (tf.math.log(a.scale) - tf.math.log(b.scale)) +\n        tf.math.log(a.concentration) - tf.math.log(b.concentration) +\n        b.concentration / a.concentration - 1.0,\n        tf.broadcast_to(tf.cast(np.inf, common_type), final_batch_shape))", "docstring": "Calculate the batched KL divergence KL(a || b) with a and b Pareto.\n\nArgs:\na: instance of a Pareto distribution object.\nb: instance of a Pareto distribution object.\nname: (optional) Name to use for created operations.\ndefault is \"kl_pareto_pareto\".\n\nReturns:\nBatchwise KL(a || b)", "source": "juraj-google-style"}
{"code": "def get_forced_variation(self, experiment_key, user_id):\n    \n\n    if not self.is_valid:\n      self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_forced_variation'))\n      return None\n\n    if not validator.is_non_empty_string(experiment_key):\n      self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))\n      return None\n\n    if not isinstance(user_id, string_types):\n      self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))\n      return None\n\n    forced_variation = self.config.get_forced_variation(experiment_key, user_id)\n    return forced_variation.key if forced_variation else None", "docstring": "Gets the forced variation for a given user and experiment.\n\nArgs:\nexperiment_key: A string key identifying the experiment.\nuser_id: The user ID.\n\nReturns:\nThe forced variation key. None if no forced variation key.", "source": "juraj-google-style"}
{"code": "def calculate_sun_from_date_time(self, datetime, is_solar_time=False):\n        \n        \n        if datetime.year != 2016 and self.is_leap_year:\n            datetime = DateTime(datetime.month, datetime.day, datetime.hour,\n                                datetime.minute, True)\n\n        sol_dec, eq_of_time = self._calculate_solar_geometry(datetime)\n\n        hour = datetime.float_hour\n\n        is_daylight_saving = self.is_daylight_saving_hour(datetime.hoy)\n\n        hour = hour + 1 if self.is_daylight_saving_hour(datetime.hoy) else hour\n\n        \n        sol_time = self._calculate_solar_time(hour, eq_of_time, is_solar_time) * 60\n\n        \n        if sol_time / 4 < 0:\n            hour_angle = sol_time / 4 + 180\n        else:\n            hour_angle = sol_time / 4 - 180\n\n        \n        zenith = math.degrees(math.acos\n                              (math.sin(self._latitude) *\n                               math.sin(math.radians(sol_dec)) +\n                               math.cos(self._latitude) *\n                               math.cos(math.radians(sol_dec)) *\n                               math.cos(math.radians(hour_angle))))\n\n        altitude = 90 - zenith\n\n        \n        if altitude > 85:\n            atmos_refraction = 0\n        else:\n            if altitude > 5:\n                atmos_refraction = 58.1 / math.tan(math.radians(altitude))\n\n                - 0.07 / (math.tan(math.radians(altitude)))**3\n                + 0.000086 / (math.tan(math.radians(altitude)))**5\n            else:\n                if altitude > -0.575:\n                    atmos_refraction = 1735\n\n                    + altitude * (-518.2 + altitude *\n                                  (103.4 + altitude *\n                                   (-12.79 + altitude * 0.711)))\n                else:\n\n                    atmos_refraction = -20.772 / math.tan(\n                        math.radians(altitude))\n\n        atmos_refraction /= 3600\n\n        altitude += atmos_refraction\n\n        \n        if hour_angle > 0:\n            azimuth = (math.degrees(\n                math.acos(\n                    (\n                        (math.sin(self._latitude) *\n                         math.cos(math.radians(zenith))) -\n                        math.sin(math.radians(sol_dec))) /\n                    (math.cos(self._latitude) *\n                     math.sin(math.radians(zenith)))\n                )\n            ) + 180) % 360\n        else:\n            azimuth = (540 - math.degrees(math.acos((\n                (math.sin(self._latitude) *\n                 math.cos(math.radians(zenith))) -\n                math.sin(math.radians(sol_dec))) /\n                (math.cos(self._latitude) *\n                 math.sin(math.radians(zenith))))\n            )) % 360\n\n        altitude = math.radians(altitude)\n        azimuth = math.radians(azimuth)\n        \n        return Sun(datetime, altitude, azimuth, is_solar_time, is_daylight_saving,\n                   self.north_angle)", "docstring": "Get Sun for an hour of the year.\n\nThis code is originally written by Trygve Wastvedt \\\n(Trygve.Wastvedt@gmail.com)\nbased on (NOAA) and modified by Chris Mackey and Mostapha Roudsari\n\nArgs:\ndatetime: Ladybug datetime\nis_solar_time: A boolean to indicate if the input hour is solar time.\n(Default: False)\n\nReturns:\nA sun object for this particular time", "source": "juraj-google-style"}
{"code": "def from_cif_string(cif_string, transformations=None, primitive=True, occupancy_tolerance=1.0):\n    parser = CifParser.from_string(cif_string, occupancy_tolerance)\n    raw_string = re.sub(\"'\", '\"', cif_string)\n    cif_dict = parser.as_dict()\n    cif_keys = list(cif_dict.keys())\n    s = parser.get_structures(primitive)[0]\n    partial_cif = cif_dict[cif_keys[0]]\n    if ('_database_code_ICSD' in partial_cif):\n        source = (partial_cif['_database_code_ICSD'] + '-ICSD')\n    else:\n        source = 'uploaded cif'\n    source_info = {'source': source, 'datetime': str(datetime.datetime.now()), 'original_file': raw_string, 'cif_data': cif_dict[cif_keys[0]]}\n    return TransformedStructure(s, transformations, history=[source_info])", "docstring": "Generates TransformedStructure from a cif string.\n\nArgs:\ncif_string (str): Input cif string. Should contain only one\nstructure. For cifs containing multiple structures, please use\nCifTransmuter.\ntransformations ([Transformations]): Sequence of transformations\nto be applied to the input structure.\nprimitive (bool): Option to set if the primitive cell should be\nextracted. Defaults to True. However, there are certain\ninstances where you might want to use a non-primitive cell,\ne.g., if you are trying to generate all possible orderings of\npartial removals or order a disordered structure.\noccupancy_tolerance (float): If total occupancy of a site is\nbetween 1 and occupancy_tolerance, the occupancies will be\nscaled down to 1.\n\nReturns:\nTransformedStructure", "source": "codesearchnet"}
{"code": "def get_numeric_feature_names(example):\n    numeric_features = ('float_list', 'int64_list')\n    features = get_example_features(example)\n    return sorted([feature_name for feature_name in features if (features[feature_name].WhichOneof('kind') in numeric_features)])", "docstring": "Returns a list of feature names for float and int64 type features.\n\nArgs:\nexample: An example.\n\nReturns:\nA list of strings of the names of numeric features.", "source": "codesearchnet"}
{"code": "def init(self, force_deploy=False):\n        \n        machines = self.provider_conf.machines\n        networks = self.provider_conf.networks\n        _networks = []\n        for network in networks:\n            ipnet = IPNetwork(network.cidr)\n            _networks.append({\n                \"netpool\": list(ipnet)[10:-10],\n                \"cidr\": network.cidr,\n                \"roles\": network.roles,\n                \"gateway\": ipnet.ip\n            })\n\n        vagrant_machines = []\n        vagrant_roles = {}\n        j = 0\n        for machine in machines:\n            for _ in range(machine.number):\n                vagrant_machine = {\n                    \"name\": \"enos-%s\" % j,\n                    \"cpu\": machine.flavour_desc[\"core\"],\n                    \"mem\": machine.flavour_desc[\"mem\"],\n                    \"ips\": [n[\"netpool\"].pop() for n in _networks],\n                }\n                vagrant_machines.append(vagrant_machine)\n                \n                for role in machine.roles:\n                    vagrant_roles.setdefault(role, []).append(vagrant_machine)\n                j = j + 1\n\n        logger.debug(vagrant_roles)\n\n        loader = FileSystemLoader(searchpath=TEMPLATE_DIR)\n        env = Environment(loader=loader, autoescape=True)\n        template = env.get_template('Vagrantfile.j2')\n        vagrantfile = template.render(machines=vagrant_machines,\n                                      provider_conf=self.provider_conf)\n        vagrantfile_path = os.path.join(os.getcwd(), \"Vagrantfile\")\n        with open(vagrantfile_path, 'w') as f:\n            f.write(vagrantfile)\n\n        \n        \n        v_env = dict(os.environ)\n        v_env['VAGRANT_DEFAULT_PROVIDER'] = self.provider_conf.backend\n\n        v = vagrant.Vagrant(root=os.getcwd(),\n                            quiet_stdout=False,\n                            quiet_stderr=False,\n                            env=v_env)\n        if force_deploy:\n            v.destroy()\n        v.up()\n        v.provision()\n        roles = {}\n        for role, machines in vagrant_roles.items():\n            for machine in machines:\n                keyfile = v.keyfile(vm_name=machine['name'])\n                port = v.port(vm_name=machine['name'])\n                address = v.hostname(vm_name=machine['name'])\n                roles.setdefault(role, []).append(\n                    Host(address,\n                         alias=machine['name'],\n                         user=self.provider_conf.user,\n                         port=port,\n                         keyfile=keyfile))\n\n        networks = [{\n            'cidr': str(n[\"cidr\"]),\n            'start': str(n[\"netpool\"][0]),\n            'end': str(n[\"netpool\"][-1]),\n            'dns': '8.8.8.8',\n            'gateway': str(n[\"gateway\"]),\n            'roles': n[\"roles\"]\n            } for n in _networks]\n        logger.debug(roles)\n        logger.debug(networks)\n\n        return (roles, networks)", "docstring": "Reserve and deploys the vagrant boxes.\n\nArgs:\nforce_deploy (bool): True iff new machines should be started", "source": "juraj-google-style"}
{"code": "def initialize_logger():\n    logger = logging.getLogger('steppy')\n    logger.setLevel(logging.INFO)\n    message_format = logging.Formatter(fmt='%(asctime)s %(name)s >>> %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n    console_handler = logging.StreamHandler(sys.stdout)\n    console_handler.setLevel(logging.INFO)\n    console_handler.setFormatter(fmt=message_format)\n    logger.addHandler(console_handler)\n    return logger", "docstring": "Initialize steppy logger.\n\nThis logger is used throughout the steppy library to report computation progress.\n\nExample:\n\nSimple use of steppy logger:\n\n.. code-block:: python\n\ninitialize_logger()\nlogger = get_logger()\nlogger.info('My message inside pipeline')\n\nresult looks like this:\n\n.. code::\n\n2018-06-02 12:33:48 steppy >>> My message inside pipeline\n\nReturns:\nlogging.Logger: logger object formatted in the steppy style", "source": "codesearchnet"}
{"code": "def interpolate(self, x: types.RealTensor, y: types.RealTensor, name: str=None):\n    name = name or self._name + '_interpolate'\n    with tf.name_scope(name):\n        x = tf.convert_to_tensor(x, dtype=self._dtype, name='x')\n        y = tf.convert_to_tensor(y, dtype=self._dtype, name='y')\n        y = tf.expand_dims(y, axis=-2)\n        xy = cubic.interpolate(y, self._spline_yz, name='interpolation_in_y_direction')\n        xy_rank = xy.shape.rank\n        perm = [xy_rank - 1] + list(range(xy_rank - 1))\n        yx = tf.transpose(xy, perm=perm)\n        perm_original = list(range(1, xy_rank)) + [0]\n        x = tf.expand_dims(tf.transpose(x, [xy_rank - 2] + list(range(xy_rank - 2))), axis=-1)\n        z_values = linear.interpolate(x, self._xdata, yx)\n        return tf.squeeze(tf.transpose(z_values, perm=perm_original), axis=-2)", "docstring": "Performs 2-D interpolation on a specified set of points.\n\nArgs:\nx: Real-valued `Tensor` of shape `batch_shape + [num_points]`.\nDefines the x-coordinates at which the interpolation should be\nperformed. Note that `batch_shape` should be the same as in the\nunderlying data.\ny: A `Tensor` of the same shape and `dtype` as `x`.\nDefines the y-coordinates at which the interpolation should be\nperformed.\nname: Python `str` name prefixed to ops created by this function.\nDefault value: `None` which is mapped to the default name\n`interpolate`.\n\nReturns:\nA `Tensor` of the same shape and `dtype` as `x`. Represents the\ninterpolated values of the function on for the coordinates\n`(x, y)`.", "source": "github-repos"}
{"code": "def delete(self, filename):\n        \n        if is_package(filename):\n            self.connection[\"jss\"].Package(filename).delete()\n        else:\n            self.connection[\"jss\"].Script(filename).delete()", "docstring": "Delete a package or script from the distribution server.\n\nThis method simply finds the Package or Script object from the\ndatabase with the API GET call and then deletes it. This will\nremove the file from the database blob.\n\nFor setups which have file share distribution points, you will\nneed to delete the files on the shares also.\n\nArgs:\nfilename: Filename (no path) to delete.", "source": "juraj-google-style"}
{"code": "def read_record(cls, file_handle):\n    buf_length_expected = 12\n    buf = file_handle.read(buf_length_expected)\n    if not buf:\n        return None\n    if len(buf) != buf_length_expected:\n        raise ValueError('Not a valid TFRecord. Fewer than %d bytes: %s' % (buf_length_expected, codecs.encode(buf, 'hex')))\n    length, length_mask_expected = struct.unpack('<QI', buf)\n    length_mask_actual = cls._masked_crc32c(buf[:8])\n    if length_mask_actual != length_mask_expected:\n        raise ValueError('Not a valid TFRecord. Mismatch of length mask: %s' % codecs.encode(buf, 'hex'))\n    buf_length_expected = length + 4\n    buf = file_handle.read(buf_length_expected)\n    if len(buf) != buf_length_expected:\n        raise ValueError('Not a valid TFRecord. Fewer than %d bytes: %s' % (buf_length_expected, codecs.encode(buf, 'hex')))\n    data, data_mask_expected = struct.unpack('<%dsI' % length, buf)\n    data_mask_actual = cls._masked_crc32c(data)\n    if data_mask_actual != data_mask_expected:\n        raise ValueError('Not a valid TFRecord. Mismatch of data mask: %s' % codecs.encode(buf, 'hex'))\n    return data", "docstring": "Read a record from a TFRecords file.\n\nArgs:\nfile_handle: The file to read from.\nReturns:\nNone if EOF is reached; the paylod of the record otherwise.\nRaises:\nValueError: If file appears to not be a valid TFRecords file.", "source": "github-repos"}
{"code": "def _ParseFiletime(self, byte_stream):\n    filetime_map = self._GetDataTypeMap('filetime')\n    try:\n        filetime = self._ReadStructureFromByteStream(byte_stream, 0, filetime_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse FILETIME value with error: {0!s}'.format(exception))\n    if (filetime == 0):\n        return None\n    try:\n        return dfdatetime_filetime.Filetime(timestamp=filetime)\n    except ValueError:\n        raise errors.ParseError('Invalid FILETIME value: 0x{0:08x}'.format(filetime))", "docstring": "Parses a FILETIME date and time value from a byte stream.\n\nArgs:\nbyte_stream (bytes): byte stream.\n\nReturns:\ndfdatetime.Filetime: FILETIME date and time value or None if no\nvalue is set.\n\nRaises:\nParseError: if the FILETIME could not be parsed.", "source": "codesearchnet"}
{"code": "def detect_gpt(self, filename, offset, fs_guid):\n    self.logger.debug('Detecting GPT partition type')\n    if (fs_guid not in self.__gpt_plugins):\n        return None\n    else:\n        plugins = self.__gpt_plugins.get(fs_guid)\n        for plugin in plugins:\n            if plugin.detect(filename, offset):\n                return plugin.get_volume_object()\n    return None", "docstring": "Used by rawdisk.session.Session to match gpt partitions agains\nfilesystem plugins.\n\nArgs:\nfilename: device or file that it will read in order to detect the\nfilesystem\nfs_id: filesystem guid to match\n(ex. {EBD0A0A2-B9E5-4433-87C0-68B6B72699C7})\noffset: offset for the filesystem that is being matched\n\nReturns:\nVolume object supplied by matched plugin.\nIf there is no match, None is returned", "source": "codesearchnet"}
{"code": "def E(poly, dist=None, **kws):\n    if (not isinstance(poly, (distributions.Dist, polynomials.Poly))):\n        print(type(poly))\n        print('Approximating expected value...')\n        out = quadrature.quad(poly, dist, veceval=True, **kws)\n        print('done')\n        return out\n    if isinstance(poly, distributions.Dist):\n        (dist, poly) = (poly, polynomials.variable(len(poly)))\n    if (not poly.keys):\n        return numpy.zeros(poly.shape, dtype=int)\n    if isinstance(poly, (list, tuple, numpy.ndarray)):\n        return [E(_, dist, **kws) for _ in poly]\n    if (poly.dim < len(dist)):\n        poly = polynomials.setdim(poly, len(dist))\n    shape = poly.shape\n    poly = polynomials.flatten(poly)\n    keys = poly.keys\n    mom = dist.mom(numpy.array(keys).T, **kws)\n    A = poly.A\n    if (len(dist) == 1):\n        mom = mom[0]\n    out = numpy.zeros(poly.shape)\n    for i in range(len(keys)):\n        out += (A[keys[i]] * mom[i])\n    out = numpy.reshape(out, shape)\n    return out", "docstring": "Expected value operator.\n\n1st order statistics of a probability distribution or polynomial on a given\nprobability space.\n\nArgs:\npoly (Poly, Dist):\nInput to take expected value on.\ndist (Dist):\nDefines the space the expected value is taken on. It is ignored if\n``poly`` is a distribution.\n\nReturns:\n(numpy.ndarray):\nThe expected value of the polynomial or distribution, where\n``expected.shape == poly.shape``.\n\nExamples:\n>>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2))\n>>> print(chaospy.E(dist))\n[1. 0.]\n>>> x, y = chaospy.variable(2)\n>>> poly = chaospy.Poly([1, x, y, 10*x*y])\n>>> print(chaospy.E(poly, dist))\n[1. 1. 0. 0.]", "source": "codesearchnet"}
{"code": "def parse_args(self, argv: list[str]) -> ParsedArgs:\n    tool_args = self._parser.parse_args(argv)\n    return self.process_parsed_args(tool_args)", "docstring": "Parses argv.\n\nArgs:\nargv: sys.argv[1:]\n\nReturns:\nA ParsedArgs object", "source": "github-repos"}
{"code": "def _CheckStorageFile(self, storage_file_path):  \n    \n    if os.path.exists(storage_file_path):\n      if not os.path.isfile(storage_file_path):\n        raise errors.BadConfigOption(\n            'Storage file: {0:s} already exists and is not a file.'.format(\n                storage_file_path))\n      logger.warning('Appending to an already existing storage file.')\n\n    dirname = os.path.dirname(storage_file_path)\n    if not dirname:\n      dirname = '.'\n\n    \n    \n\n    if not os.access(dirname, os.W_OK):\n      raise errors.BadConfigOption(\n          'Unable to write to storage file: {0:s}'.format(storage_file_path))", "docstring": "Checks if the storage file path is valid.\n\nArgs:\nstorage_file_path (str): path of the storage file.\n\nRaises:\nBadConfigOption: if the storage file path is invalid.", "source": "juraj-google-style"}
{"code": "def json_compare(self, db_data, user_data):\n        \n        if isinstance(db_data, (string_types)):\n            db_data = json.loads(db_data)\n        if isinstance(user_data, (string_types)):\n            user_data = json.loads(user_data)\n        return self.deep_diff(db_data, user_data)", "docstring": "Validate data in user data.\n\nArgs:\ndb_data (str): The data store in Redis.\nuser_data (str): The user provided data.\n\nReturns:\nbool: True if the data passed validation.", "source": "juraj-google-style"}
{"code": "def write_supercells_with_displacements(supercell, cells_with_disps, filename='geo.gen'):\n    write_dftbp((filename + 'S'), supercell)\n    for ii in range(len(cells_with_disps)):\n        write_dftbp((filename + 'S-{:03d}'.format((ii + 1))), cells_with_disps[ii])", "docstring": "Writes perfect supercell and supercells with displacements\n\nArgs:\nsupercell: perfect supercell\ncells_with_disps: supercells with displaced atoms\nfilename: root-filename", "source": "codesearchnet"}
{"code": "def umask(self, new_mask):\n    if (not is_int_type(new_mask)):\n        raise TypeError('an integer is required')\n    old_umask = self.filesystem.umask\n    self.filesystem.umask = new_mask\n    return old_umask", "docstring": "Change the current umask.\n\nArgs:\nnew_mask: (int) The new umask value.\n\nReturns:\nThe old umask.\n\nRaises:\nTypeError: if new_mask is of an invalid type.", "source": "codesearchnet"}
{"code": "def _faster_to_representation(self, instance):\n    ret = {}\n    fields = self._readable_fields\n    is_fast = isinstance(instance, prefetch.FastObject)\n    id_fields = self._readable_id_fields\n    for field in fields:\n        attribute = None\n        if (is_fast and (not isinstance(field, (DynamicGenericRelationField, DynamicRelationField)))):\n            if ((field in id_fields) and (field.source not in instance)):\n                attribute = instance.get((field.source + '_id'))\n                ret[field.field_name] = attribute\n                continue\n            else:\n                try:\n                    attribute = instance[field.source]\n                except KeyError:\n                    if hasattr(instance, field.source):\n                        attribute = getattr(instance, field.source)\n                    else:\n                        attribute = field.get_attribute(instance)\n                        print(('Missing %s from %s' % (field.field_name, self.__class__.__name__)))\n        else:\n            try:\n                attribute = field.get_attribute(instance)\n            except SkipField:\n                continue\n        if (attribute is None):\n            ret[field.field_name] = None\n        else:\n            ret[field.field_name] = field.to_representation(attribute)\n    return ret", "docstring": "Modified to_representation with optimizations.\n\n1) Returns a plain old dict as opposed to OrderedDict.\n(Constructing ordered dict is ~100x slower than `{}`.)\n2) Ensure we use a cached list of fields\n(this optimization exists in DRF 3.2 but not 3.1)\n\nArguments:\ninstance: a model instance or data object\nReturns:\nDict of primitive datatypes.", "source": "codesearchnet"}
{"code": "def open_file(cls, filename: str, response: BaseResponse, mode='wb+'):\n    _logger.debug('Saving file to {0}, mode={1}.', filename, mode)\n    dir_path = os.path.dirname(filename)\n    if (dir_path and (not os.path.exists(dir_path))):\n        os.makedirs(dir_path)\n    response.body = Body(open(filename, mode))", "docstring": "Open a file object on to the Response Body.\n\nArgs:\nfilename: The path where the file is to be saved\nresponse: Response\nmode: The file mode\n\nThis function will create the directories if not exist.", "source": "codesearchnet"}
{"code": "def _check_consistent_returns(self, node):\n    explicit_returns = [_node for _node in self._return_nodes[node.name] if (_node.value is not None)]\n    if (not explicit_returns):\n        return\n    if ((len(explicit_returns) == len(self._return_nodes[node.name])) and self._is_node_return_ended(node)):\n        return\n    self.add_message('inconsistent-return-statements', node=node)", "docstring": "Check that all return statements inside a function are consistent.\n\nReturn statements are consistent if:\n- all returns are explicit and if there is no implicit return;\n- all returns are empty and if there is, possibly, an implicit return.\n\nArgs:\nnode (astroid.FunctionDef): the function holding the return statements.", "source": "codesearchnet"}
{"code": "def lattice_2_lmpbox(lattice, origin=(0, 0, 0)):\n    (a, b, c) = lattice.abc\n    (xlo, ylo, zlo) = origin\n    xhi = (a + xlo)\n    m = lattice.matrix\n    xy = np.dot(m[1], (m[0] / a))\n    yhi = (np.sqrt(((b ** 2) - (xy ** 2))) + ylo)\n    xz = np.dot(m[2], (m[0] / a))\n    yz = ((np.dot(m[1], m[2]) - (xy * xz)) / (yhi - ylo))\n    zhi = (np.sqrt((((c ** 2) - (xz ** 2)) - (yz ** 2))) + zlo)\n    tilt = (None if lattice.is_orthogonal else [xy, xz, yz])\n    rot_matrix = np.linalg.solve([[(xhi - xlo), 0, 0], [xy, (yhi - ylo), 0], [xz, yz, (zhi - zlo)]], m)\n    bounds = [[xlo, xhi], [ylo, yhi], [zlo, zhi]]\n    symmop = SymmOp.from_rotation_and_translation(rot_matrix, origin)\n    return (LammpsBox(bounds, tilt), symmop)", "docstring": "Converts a lattice object to LammpsBox, and calculates the symmetry\noperation used.\n\nArgs:\nlattice (Lattice): Input lattice.\norigin: A (3,) array/list of floats setting lower bounds of\nsimulation box. Default to (0, 0, 0).\n\nReturns:\nLammpsBox, SymmOp", "source": "codesearchnet"}
{"code": "def expected_error(self, expected: str) -> str:\n        \n\n        if self.finished:\n            return super().expected_error(expected)\n        else:\n            line_index, character_index, line, pointer = self.current_line()\n\n            return 'Expected {} but found {}\\nLine {}, character {}\\n\\n{}{}'.format(\n                expected, repr(self.next_token()), line_index, character_index, line, pointer)", "docstring": "Generate a basic error to include the current state.\n\nA parser can supply only a representation of what it is expecting to\nthis method and the reader will provide the context, including the line\nand character positions.\n\nArgs:\nexpected: A representation of what the parser is currently expecting\n\nReturns:\nA full error message", "source": "juraj-google-style"}
{"code": "def get(self, key, default_value=__NoDefaultSpecified__):\n    os_env_string = (ConfigReader.ENV_PREFIX + key)\n    os_env_string = os_env_string.replace('.', '_')\n    if (type(os.getenv(os_env_string)) != NoneType):\n        return os.getenv(os_env_string)\n    for data_map in self._dataMaps:\n        try:\n            if ('.' in key):\n                namespaces = key.split('.')\n                temp_var = data_map\n                for name in namespaces:\n                    temp_var = temp_var[name]\n                return temp_var\n            else:\n                value = data_map[key]\n                return value\n        except (AttributeError, TypeError, KeyError):\n            pass\n    if (default_value == self.__NoDefaultSpecified__):\n        raise KeyError(u(\"Key '{0}' does not exist\").format(key))\n    else:\n        return default_value", "docstring": "Gets the value from the yaml config based on the key.\n\nNo type casting is performed, any type casting should be\nperformed by the caller.\n\nArgs:\nkey (str) - Config setting key.\n\nKwargs:\ndefault_value - Default value to return if config is not specified.\n\nReturns:\nReturns value stored in config file.", "source": "codesearchnet"}
{"code": "def load_template(filename):\n    template_file = os.path.join(PKG_DIR, 'templates', filename)\n    with open(template_file) as fp:\n        return fp.read()", "docstring": "Load template from file.\n\nThe templates are part of the package and must be included as\n``package_data`` in project ``setup.py``.\n\nArgs:\nfilename (str):\nThe template path. Relative to `peltak` package directory.\n\nReturns:\nstr: The content of the chosen template.", "source": "codesearchnet"}
{"code": "def _add_work_if_necessary(self, timers_fired):\n    if timers_fired:\n        return\n    if self._is_executing():\n        return\n    for applied_ptransform in self._executor.all_nodes:\n        if not self._executor.evaluation_context.is_done(applied_ptransform):\n            pending_bundles = self._executor.node_to_pending_bundles.get(applied_ptransform, [])\n            for bundle in pending_bundles:\n                self._executor.schedule_consumption(applied_ptransform, bundle, [], self._executor.default_completion_callback)\n            self._executor.node_to_pending_bundles[applied_ptransform] = []", "docstring": "Adds more work from the roots if pipeline requires more input.\n\nIf all active TransformExecutors are in a blocked state, add more work\nfrom root nodes that may have additional work. This ensures that if a\npipeline has elements available from the root nodes it will add those\nelements when necessary.\n\nArgs:\ntimers_fired: True if any timers fired prior to this call.", "source": "github-repos"}
{"code": "def get_contrib_features(project_root):\n    project = Project(project_root)\n    contrib = project._resolve('.features.contrib')\n    return _get_contrib_features(contrib)", "docstring": "Get contributed features for a project at project_root\n\nFor a project ``foo``, walks modules within the ``foo.features.contrib``\nsubpackage. A single object that is an instance of ``ballet.Feature`` is\nimported if present in each module. The resulting ``Feature`` objects are\ncollected.\n\nArgs:\nproject_root (str, path-like): Path to project root\n\nReturns:\nList[ballet.Feature]: list of Feature objects", "source": "codesearchnet"}
{"code": "def _code_search(query, github_user=None):\n    \n    github_client = temple.utils.GithubClient()\n    headers = {'Accept': 'application/vnd.github.v3.text-match+json'}\n\n    resp = github_client.get('/search/code',\n                             params={'q': query, 'per_page': 100},\n                             headers=headers)\n\n    if resp.status_code == requests.codes.unprocessable_entity and github_user:\n        raise temple.exceptions.InvalidGithubUserError(\n            'Invalid Github user or org - \"{}\"'.format(github_user))\n    resp.raise_for_status()\n\n    resp_data = resp.json()\n\n    repositories = collections.defaultdict(dict)\n    while True:\n        repositories.update({\n            'git@github.com:{}.git'.format(repo['repository']['full_name']): repo['repository']\n            for repo in resp_data['items']\n        })\n\n        next_url = _parse_link_header(resp.headers).get('next')\n        if next_url:\n            resp = requests.get(next_url, headers=headers)\n            resp.raise_for_status()\n            resp_data = resp.json()\n        else:\n            break\n\n    return repositories", "docstring": "Performs a Github API code search\n\nArgs:\nquery (str): The query sent to Github's code search\ngithub_user (str, optional): The Github user being searched in the query string\n\nReturns:\ndict: A dictionary of repository information keyed on the git SSH url\n\nRaises:\n`InvalidGithubUserError`: When ``github_user`` is invalid", "source": "juraj-google-style"}
{"code": "def lstm(inputs, sequence_length, hparams, train, name, initial_state=None):\n    layers = [_dropout_lstm_cell(hparams, train) for _ in range(hparams.num_hidden_layers)]\n    with tf.variable_scope(name):\n        return tf.nn.dynamic_rnn(tf.nn.rnn_cell.MultiRNNCell(layers), inputs, sequence_length, initial_state=initial_state, dtype=tf.float32, time_major=False)", "docstring": "Adds a stack of LSTM layers on top of input.\n\nArgs:\ninputs: The input `Tensor`, shaped `[batch_size, time_steps, hidden_size]`.\nsequence_length: Lengths of the actual input sequence, excluding padding; a\n`Tensor` shaped `[batch_size]`.\nhparams: HParams; hyperparameters.\ntrain: bool; `True` when constructing training graph to enable dropout.\nname: string; Create variable names under this scope.\ninitial_state: tuple of `LSTMStateTuple`s; the initial state of each layer.\n\nReturns:\nA tuple (outputs, states), where:\noutputs: The output `Tensor`, shaped `[batch_size, time_steps,\nhidden_size]`.\nstates: A tuple of `LSTMStateTuple`s; the final state of each layer.\nBidirectional LSTM returns a concatenation of last forward and backward\nstate, reduced to the original dimensionality.", "source": "codesearchnet"}
{"code": "def movies_box_office(self, **kwargs):\n        \n        path = self._get_path('movies_box_office')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Gets the top box office earning movies from the API.\nSorted by most recent weekend gross ticket sales.\n\nArgs:\nlimit (optional): limits the number of movies returned, default=10\ncountry (optional): localized data for selected country, default=\"us\"\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def text_array_to_html(text_arr):\n    if (not text_arr.shape):\n        return plugin_util.markdown_to_safe_html(np.asscalar(text_arr))\n    warning = ''\n    if (len(text_arr.shape) > 2):\n        warning = plugin_util.markdown_to_safe_html((WARNING_TEMPLATE % len(text_arr.shape)))\n        text_arr = reduce_to_2d(text_arr)\n    html_arr = [plugin_util.markdown_to_safe_html(x) for x in text_arr.reshape((- 1))]\n    html_arr = np.array(html_arr).reshape(text_arr.shape)\n    return (warning + make_table(html_arr))", "docstring": "Take a numpy.ndarray containing strings, and convert it into html.\n\nIf the ndarray contains a single scalar string, that string is converted to\nhtml via our sanitized markdown parser. If it contains an array of strings,\nthe strings are individually converted to html and then composed into a table\nusing make_table. If the array contains dimensionality greater than 2,\nall but two of the dimensions are removed, and a warning message is prefixed\nto the table.\n\nArgs:\ntext_arr: A numpy.ndarray containing strings.\n\nReturns:\nThe array converted to html.", "source": "codesearchnet"}
{"code": "def attach_profile_to_role(client, role_name='forrest_unicorn_role', profile_name='forrest_unicorn_profile'):\n    current_instance_profiles = resource_action(client, action='list_instance_profiles_for_role', log_format='Found Instance Profiles for %(RoleName)s.', RoleName=role_name)['InstanceProfiles']\n    for profile in current_instance_profiles:\n        if (profile['InstanceProfileName'] == profile_name):\n            LOG.info('Found Instance Profile attached to Role: %s -> %s', profile_name, role_name)\n            break\n    else:\n        for remove_profile in current_instance_profiles:\n            resource_action(client, action='remove_role_from_instance_profile', log_format='Removed Instance Profile from Role: %(InstanceProfileName)s -> %(RoleName)s', InstanceProfileName=remove_profile['InstanceProfileName'], RoleName=role_name)\n        resource_action(client, action='add_role_to_instance_profile', log_format='Added Instance Profile to Role: %(InstanceProfileName)s -> %(RoleName)s', InstanceProfileName=profile_name, RoleName=role_name)\n    return True", "docstring": "Attach an IAM Instance Profile _profile_name_ to Role _role_name_.\n\nArgs:\nrole_name (str): Name of Role.\nprofile_name (str): Name of Instance Profile.\n\nReturns:\nTrue upon successful completion.", "source": "codesearchnet"}
{"code": "def _get_all_trackables(root, exclude_set):\n    all_trackables = trackable_view.TrackableView(root=root).descendants()\n    trackable_index = 0\n    while trackable_index < len(all_trackables) and exclude_set:\n        if all_trackables[trackable_index] in exclude_set:\n            exclude_set.discard(all_trackables[trackable_index])\n            all_trackables.pop(trackable_index)\n        else:\n            trackable_index += 1\n\n    def _trackable_needs_to_be_saved(obj):\n        \n        if hasattr(obj, '__dict__'):\n            if '_serialize_to_tensors' in obj.__dict__ or '_gather_saveables_for_checkpoint' in obj.__dict__ or '_copy_trackable_to_cpu' in obj.__dict__:\n                return True\n        for t in type(obj).mro():\n            if t is base.Trackable:\n                continue\n            elif '_serialize_to_tensors' in t.__dict__ or '_gather_saveables_for_checkpoint' in t.__dict__ or '_copy_trackable_to_cpu' in t.__dict__:\n                return True\n        return False\n    saveable_trackables = [x for x in all_trackables if _trackable_needs_to_be_saved(x)]\n    return (saveable_trackables, all_trackables)", "docstring": "Return the list of checkpointable trackables dependent on `root`.\n\nArgs:\nroot: The root trackable from where we get all its dependent trackables.\nexclude_set: An ObjectIdentitySet of Trackables to exclude before returning.\nEach element in `exclude_set` is a specific instance of a `Trackable`\nand appears precisely once in `TrackableView(root).descendants()`.\n\nReturns:\nsaveable_trackables: All trackables that are saveable in `all_trackables`\n(see definition of \"saveable\" in `_trackable_needs_to_be_saved()`). A\nsubset of `all_trackables`.\nall_trackables: All trackables returned by `TrackableView`'s `descendants()`\nafter excluding `exclude_set`. A superset of `saveable_trackables`.", "source": "github-repos"}
{"code": "def load_kegg(self, kegg_id, kegg_organism_code=None, kegg_seq_file=None, kegg_metadata_file=None, set_as_representative=False, download=False, outdir=None, force_rerun=False):\n    if download:\n        if (not outdir):\n            outdir = self.sequence_dir\n            if (not outdir):\n                raise ValueError('Output directory must be specified')\n    if kegg_organism_code:\n        kegg_id = ((kegg_organism_code + ':') + kegg_id)\n    if self.sequences.has_id(kegg_id):\n        if force_rerun:\n            existing = self.sequences.get_by_id(kegg_id)\n            self.sequences.remove(existing)\n        else:\n            log.debug('{}: KEGG ID already present in list of sequences'.format(kegg_id))\n            kegg_prop = self.sequences.get_by_id(kegg_id)\n    if (not self.sequences.has_id(kegg_id)):\n        kegg_prop = KEGGProp(id=kegg_id, seq=None, fasta_path=kegg_seq_file, txt_path=kegg_metadata_file)\n        if download:\n            kegg_prop.download_seq_file(outdir, force_rerun)\n            kegg_prop.download_metadata_file(outdir, force_rerun)\n        if self.representative_sequence:\n            if (not self.representative_sequence.uniprot):\n                if kegg_prop.equal_to(self.representative_sequence):\n                    self.representative_sequence.update(kegg_prop.get_dict(), only_keys=['sequence_path', 'metadata_path', 'kegg', 'description', 'taxonomy', 'id', 'pdbs', 'uniprot', 'seq_record', 'gene_name', 'refseq'])\n                else:\n                    log.warning('{}: representative sequence does not match mapped KEGG sequence.'.format(self.id))\n        self.sequences.append(kegg_prop)\n    if set_as_representative:\n        self.representative_sequence = kegg_prop\n    return self.sequences.get_by_id(kegg_id)", "docstring": "Load a KEGG ID, sequence, and metadata files into the sequences attribute.\n\nArgs:\nkegg_id (str): KEGG ID\nkegg_organism_code (str): KEGG organism code to prepend to the kegg_id if not part of it already.\nExample: ``eco:b1244``, ``eco`` is the organism code\nkegg_seq_file (str): Path to KEGG FASTA file\nkegg_metadata_file (str): Path to KEGG metadata file (raw KEGG format)\nset_as_representative (bool): If this KEGG ID should be set as the representative sequence\ndownload (bool): If the KEGG sequence and metadata files should be downloaded if not provided\noutdir (str): Where the sequence and metadata files should be downloaded to\nforce_rerun (bool): If ID should be reloaded and files redownloaded\n\nReturns:\nKEGGProp: object contained in the sequences attribute", "source": "codesearchnet"}
{"code": "def get_attached_bytes_map(meta_graph):\n    result = {}\n    if (ATTACHMENT_COLLECTION_SAVED not in meta_graph.collection_def):\n        return result\n    collection_def = meta_graph.collection_def[ATTACHMENT_COLLECTION_SAVED]\n    if (collection_def.WhichOneof('kind') != 'bytes_list'):\n        raise ValueError(('Internal CollectionDef for attached messages has kind %s, expected bytes_list' % collection_def.WhichOneof('kind')))\n    attachment = module_attachment_pb2.ModuleAttachment()\n    for value in collection_def.bytes_list.value:\n        attachment.ParseFromString(value)\n        result[attachment.key] = attachment.value\n    return result", "docstring": "Returns the dict of ModuleAttachments stored in `meta_graph`.\n\nArgs:\nmeta_graph: A MetaGraphDef, as built by SavedModelHandler.add_graph_copy()\nfrom some graph.\n\nReturns:\nA dict, containing the `(key, bytes)` items passed to `attach_bytes()`\nwhen the graph had been built.\n\nRaises:\nValueError: if `meta-graph` is malformed.", "source": "codesearchnet"}
{"code": "def parse_napp(napp_id):\n    regex = '([a-zA-Z][a-zA-Z0-9_]{2,})/([a-zA-Z][a-zA-Z0-9_]{2,}):?(.+)?'\n    compiled_regex = re.compile(regex)\n    matched = compiled_regex.fullmatch(napp_id)\n    if (not matched):\n        msg = '\"{}\" NApp has not the form username/napp_name[:version].'\n        raise KytosException(msg.format(napp_id))\n    return matched.groups()", "docstring": "Convert a napp_id in tuple with username, napp name and version.\n\nArgs:\nnapp_id: String with the form 'username/napp[:version]' (version is\noptional). If no version is found, it will be None.\n\nReturns:\ntuple: A tuple with (username, napp, version)\n\nRaises:\nKytosException: If a NApp has not the form _username/name_.", "source": "codesearchnet"}
{"code": "def createTemplate(data):\n        \n        conn = Qubole.agent()\n        return conn.post(Template.rest_entity_path, data)", "docstring": "Create a new template.\n\nArgs:\n`data`: json data required for creating a template\nReturns:\nDictionary containing the details of the template with its ID.", "source": "juraj-google-style"}
{"code": "def average_precision(truth, recommend):\n    \n    if len(truth) == 0:\n        if len(recommend) == 0:\n            return 1.\n        return 0.\n\n    tp = accum = 0.\n    for n in range(recommend.size):\n        if recommend[n] in truth:\n            tp += 1.\n            accum += (tp / (n + 1.))\n    return accum / truth.size", "docstring": "Average Precision (AP).\n\nArgs:\ntruth (numpy 1d array): Set of truth samples.\nrecommend (numpy 1d array): Ordered set of recommended samples.\n\nReturns:\nfloat: AP.", "source": "juraj-google-style"}
{"code": "def HasDateExceptionOn(self, date, exception_type=_EXCEPTION_TYPE_ADD):\n    if (date in self.date_exceptions):\n        return (exception_type == self.date_exceptions[date][0])\n    return False", "docstring": "Test if this service period has a date exception of the given type.\n\nArgs:\ndate: a string of form \"YYYYMMDD\"\nexception_type: the exception type the date should have. Defaults to\n_EXCEPTION_TYPE_ADD\n\nReturns:\nTrue iff this service has service exception of specified type at date.", "source": "codesearchnet"}
{"code": "def get_replacement_inputs(self, applied_ptransform):\n    return tuple(applied_ptransform.inputs) + tuple((side_input.pvalue for side_input in applied_ptransform.side_inputs))", "docstring": "Provides inputs that will be passed to the replacement PTransform.\n\nArgs:\napplied_ptransform: Original AppliedPTransform containing the PTransform\nto be replaced.\n\nReturns:\nAn iterable of PValues that will be passed to the expand() method of the\nreplacement PTransform.", "source": "github-repos"}
{"code": "def squeeze(x, axis=None):\n    if any_symbolic_tensors((x,)):\n        return Squeeze(axis=axis).symbolic_call(x)\n    return backend.numpy.squeeze(x, axis=axis)", "docstring": "Remove axes of length one from `x`.\n\nArgs:\nx: Input tensor.\naxis: Select a subset of the entries of length one in the shape.\n\nReturns:\nThe input tensor with all or a subset of the dimensions of\nlength 1 removed.", "source": "github-repos"}
{"code": "def __init__(\n      self, session, storage_type=definitions.STORAGE_TYPE_SESSION, task=None):\n    \n    super(FakeStorageWriter, self).__init__(\n        session, storage_type=storage_type, task=task)\n    self._event_data = {}\n    self._event_sources = []\n    self._event_tags = []\n    self._events = []\n    self._warnings = []\n    self._is_open = False\n    self._task_storage_writers = {}\n    self.analysis_reports = []\n    self.session_completion = None\n    self.session_start = None\n    self.task_completion = None\n    self.task_start = None", "docstring": "Initializes a storage writer object.\n\nArgs:\nsession (Session): session the storage changes are part of.\nstorage_type (Optional[str]): storage type.\ntask(Optional[Task]): task.", "source": "juraj-google-style"}
{"code": "def get_registered_name(obj):\n    if obj in GLOBAL_CUSTOM_NAMES:\n        return GLOBAL_CUSTOM_NAMES[obj]\n    else:\n        return obj.__name__", "docstring": "Returns the name registered to an object within the Keras framework.\n\nThis function is part of the Keras serialization and deserialization\nframework. It maps objects to the string names associated with those objects\nfor serialization/deserialization.\n\nArgs:\nobj: The object to look up.\n\nReturns:\nThe name associated with the object, or the default Python name if the\nobject is not registered.", "source": "github-repos"}
{"code": "def tree_type_checker(*ref):\n    ref = tuple(ref)\n    if (NeuriteType.all in ref):\n\n        def check_tree_type(_):\n            'Always returns true'\n            return True\n    else:\n\n        def check_tree_type(tree):\n            'Check whether tree has the same type as ref\\n\\n            Returns:\\n                True if ref in the same type as tree.type or ref is NeuriteType.all\\n            '\n            return (tree.type in ref)\n    return check_tree_type", "docstring": "Tree type checker functor\n\nReturns:\nFunctor that takes a tree, and returns true if that tree matches any of\nNeuriteTypes in ref\n\nEx:\n>>> from neurom.core.types import NeuriteType, tree_type_checker\n>>> tree_filter = tree_type_checker(NeuriteType.axon, NeuriteType.basal_dendrite)\n>>> nrn.i_neurites(tree.isegment, tree_filter=tree_filter)", "source": "codesearchnet"}
{"code": "def add_dimension(self, dimension, dim_pos, dim_val, vdim=False, **kwargs):\n    if isinstance(dimension, (util.basestring, tuple)):\n        dimension = Dimension(dimension)\n    if (dimension.name in self.kdims):\n        raise Exception('{dim} dimension already defined'.format(dim=dimension.name))\n    if vdim:\n        dims = self.vdims[:]\n        dims.insert(dim_pos, dimension)\n        dimensions = dict(vdims=dims)\n        dim_pos += self.ndims\n    else:\n        dims = self.kdims[:]\n        dims.insert(dim_pos, dimension)\n        dimensions = dict(kdims=dims)\n    if (issubclass(self.interface, ArrayInterface) and (np.asarray(dim_val).dtype != self.data.dtype)):\n        element = self.clone(datatype=[default_datatype])\n        data = element.interface.add_dimension(element, dimension, dim_pos, dim_val, vdim)\n    else:\n        data = self.interface.add_dimension(self, dimension, dim_pos, dim_val, vdim)\n    return self.clone(data, **dimensions)", "docstring": "Adds a dimension and its values to the Dataset\n\nRequires the dimension name or object, the desired position in\nthe key dimensions and a key value scalar or array of values,\nmatching the length o shape of the Dataset.\n\nArgs:\ndimension: Dimension or dimension spec to add\ndim_pos (int) Integer index to insert dimension at\ndim_val (scalar or ndarray): Dimension value(s) to add\nvdim: Disabled, this type does not have value dimensions\n**kwargs: Keyword arguments passed to the cloned element\n\nReturns:\nCloned object containing the new dimension", "source": "codesearchnet"}
{"code": "def _PackArgumentsHelper(self, elem, data, set_type_attrs):\n    \n    if self._packer:\n      data = self._packer.Pack(data, self._version)\n\n    if isinstance(data, dict):  \n      \n      type_override = data.get('xsi_type')\n      if type_override:\n        elem_type = self._DiscoverElementTypeFromLocalname(type_override)\n      else:\n        elem_type = elem.type\n\n      data_formatted = data.iteritems()\n      packed_result = self._CreateComplexTypeFromData(\n          elem_type, type_override is not None, data_formatted, set_type_attrs)\n    elif isinstance(data, zeep.xsd.CompoundValue):\n      \n      \n      elem_type = data._xsd_type\n      data_formatted = zip(dir(data), [data[k] for k in dir(data)])\n      packed_result = self._CreateComplexTypeFromData(\n          elem_type, False, data_formatted, set_type_attrs)\n    elif isinstance(data, (list, tuple)):\n      packed_result = [self._PackArgumentsHelper(elem, item, set_type_attrs)\n                       for item in data]\n    else:\n      if elem.type.name == 'base64Binary' and self._IsBase64(data):\n        _logger.warn('Passing data to base64 field %s that may '\n                     'already be encoded. Do not pre-encode base64 '\n                     'fields with zeep.', elem.name)\n      packed_result = data\n\n    return packed_result", "docstring": "Recursive helper for PackArguments.\n\nArgs:\nelem: The element type we are creating.\ndata: The data to instantiate it with.\nset_type_attrs: A boolean indicating whether or not attributes that end\nin .Type should be set. This is only necessary for batch job service.\n\nReturns:\nAn instance of type 'elem'.", "source": "juraj-google-style"}
{"code": "def listen(self):\n        \n\n        logger.info(\"Listening on port \" + str(self.listener.listen_port))\n        self.listener.listen()", "docstring": "Starts the client listener to listen for server responses.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def get(self, webfont_name, webfont_settings):\n    try:\n        webfont_settings = extend_webfont_settings(webfont_settings)\n    except IcomoonSettingsError as e:\n        msg = \"Invalid webfont settings for '{}': {}\"\n        self.errors[webfont_name] = msg.format(webfont_name, e.value)\n        return\n    filepath = os.path.join(webfont_settings['fontdir_path'], self.manifest_filename)\n    if os.path.exists(filepath):\n        self.manifests[webfont_name] = self.parse_manifest(filepath)\n    else:\n        msg = 'Filepath for webfont <strong>{name}</strong> does not exists: <code>{filepath}</code>'\n        self.errors[webfont_name] = msg.format(name=webfont_name, filepath=filepath)", "docstring": "Get a manifest file, parse and store it.\n\nArgs:\nwebfont_name (string): Webfont key name. Used to store manifest\nand potentially its parser error.\nwebfont_settings (dict): Webfont settings (an item value from\n``settings.ICOMOON_WEBFONTS``).", "source": "codesearchnet"}
{"code": "def validate_email_to_link(email, raw_email=None, message_template=None, ignore_existing=False):\n    raw_email = (raw_email if (raw_email is not None) else email)\n    message_template = (message_template if (message_template is not None) else ValidationMessages.INVALID_EMAIL)\n    try:\n        validate_email(email)\n    except ValidationError:\n        raise ValidationError(message_template.format(argument=raw_email))\n    existing_record = EnterpriseCustomerUser.objects.get_link_by_email(email)\n    if (existing_record and (not ignore_existing)):\n        raise ValidationError(ValidationMessages.USER_ALREADY_REGISTERED.format(email=email, ec_name=existing_record.enterprise_customer.name))\n    return (existing_record or False)", "docstring": "Validate email to be linked to Enterprise Customer.\n\nPerforms two checks:\n* Checks that email is valid\n* Checks that it is not already linked to any Enterprise Customer\n\nArguments:\nemail (str): user email to link\nraw_email (str): raw value as it was passed by user - used in error message.\nmessage_template (str): Validation error template string.\nignore_existing (bool): If True to skip the check for an existing Enterprise Customer\n\nRaises:\nValidationError: if email is invalid or already linked to Enterprise Customer.\n\nReturns:\nbool: Whether or not there is an existing record with the same email address.", "source": "codesearchnet"}
{"code": "def Field(\n    dagster_type,\n    default_value=FIELD_NO_DEFAULT_PROVIDED,\n    is_optional=INFER_OPTIONAL_COMPOSITE_FIELD,\n    is_secret=False,\n    description=None,\n):\n    \n    config_type = resolve_to_config_type(dagster_type)\n    if not config_type:\n        raise DagsterInvalidDefinitionError(\n            (\n                'Attempted to pass {value_repr} to a Field that expects a valid '\n                'dagster type usable in config (e.g. Dict, NamedDict, Int, String et al).'\n            ).format(value_repr=repr(dagster_type))\n        )\n    return FieldImpl(\n        config_type=resolve_to_config_type(dagster_type),\n        default_value=default_value,\n        is_optional=is_optional,\n        is_secret=is_secret,\n        description=description,\n    )", "docstring": "The schema for configuration data that describes the type, optionality, defaults, and description.\n\nArgs:\ndagster_type (DagsterType):\nA ``DagsterType`` describing the schema of this field, ie `Dict({'example': Field(String)})`\ndefault_value (Any):\nA default value to use that respects the schema provided via dagster_type\nis_optional (bool): Whether the presence of this field is optional\ndespcription (str):", "source": "juraj-google-style"}
{"code": "def batch_decode(self, sequences, **kwargs):\n    return super().batch_decode(sequences, **kwargs)", "docstring": "Convert a list of lists of token ids into a list of strings by calling decode.\n\nArgs:\nsequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`):\nList of tokenized input ids. Can be obtained using the `__call__` method.\nskip_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not to remove special tokens in the decoding.\nclean_up_tokenization_spaces (`bool`, *optional*):\nWhether or not to clean up the tokenization spaces. If `None`, will default to\n`self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).\nuse_source_tokenizer (`bool`, *optional*, defaults to `False`):\nWhether or not to use the source tokenizer to decode sequences (only applicable in sequence-to-sequence\nproblems).\nkwargs (additional keyword arguments, *optional*):\nWill be passed to the underlying model specific decode method.\n\nReturns:\n`List[str]`: The list of decoded sentences.", "source": "github-repos"}
{"code": "def get_size(self, value=None):\n    if isinstance(value, type(self)):\n        return value.get_size()\n    return (2 + self.length)", "docstring": "Return struct size.\n\nReturns:\nint: Returns the struct size based on inner attributes.", "source": "codesearchnet"}
{"code": "def sin(cls, x: 'TensorFluent') -> 'TensorFluent':\n        \n        return cls._unary_op(x, tf.sin, tf.float32)", "docstring": "Returns a TensorFluent for the sin function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the sin function.", "source": "juraj-google-style"}
{"code": "def from_dir(cls, top, workdir=None, name=None, manager=None, max_depth=2):\n    from .flows import Flow\n\n    def find_pickles(dirtop):\n        paths = []\n        for (dirpath, dirnames, filenames) in os.walk(dirtop):\n            fnames = [f for f in filenames if (f == Flow.PICKLE_FNAME)]\n            paths.extend([os.path.join(dirpath, f) for f in fnames])\n        return paths\n    if is_string(top):\n        pickle_paths = find_pickles(top)\n    else:\n        pickle_paths = []\n        for p in top:\n            pickle_paths.extend(find_pickles(p))\n    workdir = ('batch' if (workdir is None) else workdir)\n    new = cls(workdir, name=name, manager=manager)\n    for path in pickle_paths:\n        new.add_flow(path)\n    return new", "docstring": "Find all flows located withing the directory `top` and build the `BatchLauncher`.\n\nArgs:\ntop: Top level directory or list of directories.\nworkdir: Batch workdir.\nname:\nmanager: :class:`TaskManager` object. If None, the manager is read from `manager.yml`\nIn this case the YAML file must provide the entry `batch_manager` that defined\nthe queue adapter used to submit the batch script.\nmax_depth: Search in directory only if it is N or fewer levels below top", "source": "codesearchnet"}
{"code": "def setY(self,Y,standardize=False):\n        \n        assert Y.shape[0]==self.N, 'CVarianceDecomposition:: Incompatible shape'\n        assert Y.shape[1]==self.P, 'CVarianceDecomposition:: Incompatible shape'\n\n        \n        if standardize:\n            Y=preprocess.standardize(Y)\n\n        \n        assert (~(SP.isnan(Y).any(axis=1))==self.Iok).all(), 'CVarianceDecomposition:: pattern of missing values needs to match Y given at initialization'\n\n        self.Y = Y\n        self.vd.setPheno(Y)\n\n        self.optimum = None\n    \n        self.cache['Sigma']   = None\n        self.cache['Hessian'] = None\n        self.cache['Lparams'] = None\n        self.cache['paramsST']= None", "docstring": "Set phenotype matrix\n\nArgs:\nY:              phenotype matrix [N, P]\nstandardize:\tif True, phenotype is standardized (zero mean, unit variance)", "source": "juraj-google-style"}
{"code": "def write_layout(_path):\n    \n\n    path.mkdir_uchroot(\"/etc/portage/metadata\")\n    path.mkfile_uchroot(\"/etc/portage/metadata/layout.conf\")\n    with open(_path, 'w') as layoutconf:\n        lines = \n        layoutconf.write(lines)", "docstring": "Write a valid gentoo layout file to :path:.\n\nArgs:\npath - The output path of the layout.conf", "source": "juraj-google-style"}
{"code": "def AddDatastore(self, urn):\n    \n    if urn not in self._datastores:\n      self._datastores.add(urn)\n      return True\n    return False", "docstring": "Adds a datastore URN as a source.\n\nArgs:\nurn: an RDF URN value of the datastore.\n\nReturns:\nTrue if the datastore is not an already existing source.", "source": "juraj-google-style"}
{"code": "def from_specification(specification, env_prefix=None, separator='.',\n                       parent_names=None):\n    \n    items = {}\n    for item_name, item_info in six.iteritems(specification):\n        names = copy.copy(parent_names) if parent_names else []\n        items[item_name] = _generate_item(item_name,\n                                          item_info,\n                                          env_prefix,\n                                          separator,\n                                          names)\n    return items", "docstring": "Used to create YapconfItems from a specification dictionary.\n\nArgs:\nspecification (dict): The specification used to\ninitialize ``YapconfSpec``\nenv_prefix (str): Prefix to add to environment names\nseparator (str): Separator for nested items\nparent_names (list): Parents names of any given item\n\nReturns:\nA dictionary of names to YapconfItems", "source": "juraj-google-style"}
{"code": "def forward(self, input_modal, input_ids=None, modal_start_tokens=None, modal_end_tokens=None, attention_mask=None, token_type_ids=None, modal_token_type_ids=None, position_ids=None, modal_position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    if input_ids is not None and inputs_embeds is not None:\n        raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')\n    elif input_ids is not None:\n        input_txt_shape = input_ids.size()\n    elif inputs_embeds is not None:\n        input_txt_shape = inputs_embeds.size()[:-1]\n    else:\n        raise ValueError('You have to specify either input_ids or inputs_embeds')\n    device = input_ids.device if input_ids is not None else inputs_embeds.device\n    modal_embeddings = self.modal_encoder(input_modal, start_token=modal_start_tokens, end_token=modal_end_tokens, position_ids=modal_position_ids, token_type_ids=modal_token_type_ids)\n    input_modal_shape = modal_embeddings.size()[:-1]\n    if token_type_ids is None:\n        token_type_ids = torch.ones(input_txt_shape, dtype=torch.long, device=device)\n    txt_embeddings = self.transformer.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)\n    embedding_output = torch.cat([modal_embeddings, txt_embeddings], 1)\n    input_shape = embedding_output.size()[:-1]\n    if attention_mask is None:\n        attention_mask = torch.ones(input_shape, device=device)\n    else:\n        attention_mask = torch.cat([torch.ones(input_modal_shape, device=device, dtype=torch.long), attention_mask], dim=1)\n    if encoder_attention_mask is None:\n        encoder_attention_mask = torch.ones(input_shape, device=device)\n    else:\n        encoder_attention_mask = torch.cat([torch.ones(input_modal_shape, device=device), encoder_attention_mask], dim=1)\n    extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)\n    encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n    head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n    encoder_outputs = self.transformer.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n    sequence_output = encoder_outputs[0]\n    pooled_output = self.transformer.pooler(sequence_output)\n    if not return_dict:\n        return (sequence_output, pooled_output) + encoder_outputs[1:]\n    return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)", "docstring": "Returns:\n\nExamples:\n\n```python\n# For example purposes. Not runnable.\ntransformer = BertModel.from_pretrained(\"google-bert/bert-base-uncased\")\nencoder = ImageEncoder(args)\nmmbt = MMBTModel(config, transformer, encoder)\n```", "source": "github-repos"}
{"code": "def __move(self, current_pos):\n    if (self.__move_range is not None):\n        next_pos = np.random.randint((current_pos - self.__move_range), (current_pos + self.__move_range))\n        if (next_pos < 0):\n            next_pos = 0\n        elif (next_pos >= (self.var_arr.shape[0] - 1)):\n            next_pos = (self.var_arr.shape[0] - 1)\n        return next_pos\n    else:\n        next_pos = np.random.randint((self.var_arr.shape[0] - 1))\n        return next_pos", "docstring": "Move in the feature map.\n\nArgs:\ncurrent_pos:    The now position.\n\nReturns:\nThe next position.", "source": "codesearchnet"}
{"code": "def getMonthsBuffer(self, direction):\n        \n        if direction == ReadMonths.kWhReverse:\n            return self.m_rev_mons\n\n        \n        return self.m_mons", "docstring": "Get the months tariff SerialBlock for meter.\n\nArgs:\ndirection (int): A :class:`~ekmmeters.ReadMonths` value.\n\nReturns:\nSerialBlock: Requested months tariffs buffer.", "source": "juraj-google-style"}
{"code": "def lookupSpatialReferenceID(cls, directory, filename):\n    path = os.path.join(directory, filename)\n    with open(path, 'r') as f:\n        srid = lookupSpatialReferenceID(f.read())\n    return srid", "docstring": "Look up spatial reference system using the projection file.\n\nArgs:\ndirectory (str):\nfilename (str):\n\nReturn:\nint: Spatial Reference ID", "source": "codesearchnet"}
{"code": "def GetCommandLineArguments(self):\n    command_line_arguments = sys.argv\n    if (not command_line_arguments):\n        return ''\n    if isinstance(command_line_arguments[0], py2to3.BYTES_TYPE):\n        encoding = sys.stdin.encoding\n        if (not encoding):\n            encoding = self.preferred_encoding\n        try:\n            command_line_arguments = [argument.decode(encoding) for argument in command_line_arguments]\n        except UnicodeDecodeError:\n            logger.error('Unable to properly read command line input due to encoding error. Replacing non Basic Latin (C0) characters with \"?\" or \"\\\\ufffd\".')\n            command_line_arguments = [argument.decode(encoding, errors='replace') for argument in command_line_arguments]\n    return ' '.join(command_line_arguments)", "docstring": "Retrieves the command line arguments.\n\nReturns:\nstr: command line arguments.", "source": "codesearchnet"}
{"code": "def GetTableView(cls, format_type, column_names=None, title=None):\n    view_class = cls._TABLE_VIEW_FORMAT_CLASSES.get(format_type, None)\n    if (not view_class):\n        raise ValueError('Unsupported format type: {0:s}'.format(format_type))\n    return view_class(column_names=column_names, title=title)", "docstring": "Retrieves a table view.\n\nArgs:\nformat_type (str): table view format type.\ncolumn_names (Optional[list[str]]): column names.\ntitle (Optional[str]): title.\n\nReturns:\nBaseTableView: table view.\n\nRaises:\nValueError: if the format type is not supported.", "source": "codesearchnet"}
{"code": "def parse(file_or_string):\n    \n    from mysqlparse.grammar.sql_file import sql_file_syntax\n\n    if hasattr(file_or_string, 'read') and hasattr(file_or_string.read, '__call__'):\n        return sql_file_syntax.parseString(file_or_string.read())\n    elif isinstance(file_or_string, six.string_types):\n        return sql_file_syntax.parseString(file_or_string)\n    else:\n        raise TypeError(\"Expected file-like or string object, but got '{type_name}' instead.\".format(\n            type_name=type(file_or_string).__name__,\n        ))", "docstring": "Parse a file-like object or string.\n\nArgs:\nfile_or_string (file, str): File-like object or string.\n\nReturns:\nParseResults: instance of pyparsing parse results.", "source": "juraj-google-style"}
{"code": "def List(self, request, global_params=None):\n    config = self.GetMethodConfig('List')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Lists snapshots.\n\nArgs:\nrequest: (DataflowProjectsSnapshotsListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ListSnapshotsResponse) The response message.", "source": "github-repos"}
{"code": "def consume(self, msg):\n        \n        msg['body'] = crypto.sign(msg['body'], **self.hub.config)\n        super(SigningRelayConsumer, self).consume(msg)", "docstring": "Sign the message prior to sending the message.\n\nArgs:\nmsg (dict): The message to sign and relay.", "source": "juraj-google-style"}
{"code": "def is_legal_object(self, data_type: str) -> bool:\n    data_type = str(data_type)\n    ranges = self.included_ranges()\n    return ((not ranges) or (data_type in ranges) or (self.super_properties() and any((x.is_legal_object(data_type) for x in self.super_properties()))))", "docstring": "Do data_type validation according to the rules of the XML xsd schema.\n\nArgs:\ndata_type:\n\nReturns:", "source": "codesearchnet"}
{"code": "def list_nsgs_all(access_token, subscription_id):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                '/subscriptions/', subscription_id,\n                '/providers/Microsoft.Network/',\n                'networkSEcurityGroups?api-version=', NETWORK_API])\n    return do_get(endpoint, access_token)", "docstring": "List all network security groups in a subscription.\nArgs:\naccess_token (str): a valid Azure Authentication token.\nsubscription_id (str): Azure subscription id.\nReturns:\nHTTP response. JSON body of all network security groups in a subscription.", "source": "juraj-google-style"}
{"code": "def files_from_list(*paths):\n    \n    ret = []\n    for path in paths:\n        if isfile(path):\n            ret.append(abspath(path))\n        elif isdir(path):\n            ret += [f for f in ls(path, abspaths=True, recursive=True)\n                    if isfile(f)]\n        else:\n            raise File404(path)\n    return ret", "docstring": "Return a list of all file paths from a list of files or directories.\n\nFor each path in the input: if it is a file, return it; if it is a\ndirectory, return a list of files in the directory.\n\nArguments:\npaths (list of str): List of file and directory paths.\n\nReturns:\nlist of str: Absolute file paths.\n\nRaises:\nFile404: If any of the paths do not exist.", "source": "juraj-google-style"}
{"code": "def handle_unexpected_exception(exc):\n    try:\n        write_logfile()\n        addendum = 'Please see the log file for more information.'\n    except IOError:\n        addendum = 'Unable to write log file.'\n    try:\n        message = str(exc)\n        return '{}{}{}'.format(message, ('\\n' if message else ''), addendum)\n    except Exception:\n        return str(exc)", "docstring": "Return an error message and write a log file if logging was not enabled.\n\nArgs:\nexc: The unexpected exception.\n\nReturns:\nA message to display to the user concerning the unexpected exception.", "source": "codesearchnet"}
{"code": "def __init__(self, campfire, data=None):\n        \n        super(CampfireEntity, self).__init__(data)\n        self._campfire = campfire\n        self._connection = None\n        if self._campfire:\n            self._connection = self._campfire.get_connection()", "docstring": "Initialize.\n\nArgs:\ncampfire (:class:`Campfire`): Campfire Instance\n\nKwargs:\ndata (dict): Entity data", "source": "juraj-google-style"}
{"code": "def GetBudget(self, client_customer_id, budget_id):\n    self.client.SetClientCustomerId(client_customer_id)\n    selector = {'fields': ['BudgetId', 'BudgetName', 'BudgetStatus', 'Amount', 'DeliveryMethod', 'BudgetReferenceCount', 'IsBudgetExplicitlyShared'], 'predicates': [{'field': 'BudgetId', 'operator': 'EQUALS', 'values': [budget_id]}]}\n    budgets = self.client.GetService('BudgetService').get(selector)\n    if (int(budgets['totalNumEntries']) > 0):\n        return budgets['entries'][0]\n    else:\n        return None", "docstring": "Return a Budget with the associated budgetId.\n\nArgs:\nclient_customer_id: str Client Customer Id to which the budget belongs.\nbudget_id: str id of the budget we want to examine.\n\nReturns:\nBudget A Budget data object.", "source": "codesearchnet"}
{"code": "def configure(\n        self,\n        accountID,\n        **kwargs\n    ):\n        \n\n        request = Request(\n            'PATCH',\n            '/v3/accounts/{accountID}/configuration'\n        )\n\n        request.set_path_param(\n            'accountID',\n            accountID\n        )\n\n        body = EntityDict()\n\n        if 'alias' in kwargs:\n            body.set('alias', kwargs['alias'])\n\n        if 'marginRate' in kwargs:\n            body.set('marginRate', kwargs['marginRate'])\n\n        request.set_body_dict(body.dict)\n\n        response = self.ctx.request(request)\n\n\n        if response.content_type is None:\n            return response\n\n        if not response.content_type.startswith(\"application/json\"):\n            return response\n\n        jbody = json.loads(response.raw_body)\n\n        parsed_body = {}\n\n        \n        \n        \n        if str(response.status) == \"200\":\n            if jbody.get('clientConfigureTransaction') is not None:\n                parsed_body['clientConfigureTransaction'] = \\\n                    self.ctx.transaction.ClientConfigureTransaction.from_dict(\n                        jbody['clientConfigureTransaction'],\n                        self.ctx\n                    )\n\n            if jbody.get('lastTransactionID') is not None:\n                parsed_body['lastTransactionID'] = \\\n                    jbody.get('lastTransactionID')\n\n        elif str(response.status) == \"400\":\n            if jbody.get('clientConfigureRejectTransaction') is not None:\n                parsed_body['clientConfigureRejectTransaction'] = \\\n                    self.ctx.transaction.ClientConfigureRejectTransaction.from_dict(\n                        jbody['clientConfigureRejectTransaction'],\n                        self.ctx\n                    )\n\n            if jbody.get('lastTransactionID') is not None:\n                parsed_body['lastTransactionID'] = \\\n                    jbody.get('lastTransactionID')\n\n            if jbody.get('errorCode') is not None:\n                parsed_body['errorCode'] = \\\n                    jbody.get('errorCode')\n\n            if jbody.get('errorMessage') is not None:\n                parsed_body['errorMessage'] = \\\n                    jbody.get('errorMessage')\n\n        elif str(response.status) == \"403\":\n            if jbody.get('clientConfigureRejectTransaction') is not None:\n                parsed_body['clientConfigureRejectTransaction'] = \\\n                    self.ctx.transaction.ClientConfigureRejectTransaction.from_dict(\n                        jbody['clientConfigureRejectTransaction'],\n                        self.ctx\n                    )\n\n            if jbody.get('lastTransactionID') is not None:\n                parsed_body['lastTransactionID'] = \\\n                    jbody.get('lastTransactionID')\n\n            if jbody.get('errorCode') is not None:\n                parsed_body['errorCode'] = \\\n                    jbody.get('errorCode')\n\n            if jbody.get('errorMessage') is not None:\n                parsed_body['errorMessage'] = \\\n                    jbody.get('errorMessage')\n\n        elif str(response.status) == \"401\":\n            if jbody.get('errorCode') is not None:\n                parsed_body['errorCode'] = \\\n                    jbody.get('errorCode')\n\n            if jbody.get('errorMessage') is not None:\n                parsed_body['errorMessage'] = \\\n                    jbody.get('errorMessage')\n\n        elif str(response.status) == \"404\":\n            if jbody.get('errorCode') is not None:\n                parsed_body['errorCode'] = \\\n                    jbody.get('errorCode')\n\n            if jbody.get('errorMessage') is not None:\n                parsed_body['errorMessage'] = \\\n                    jbody.get('errorMessage')\n\n        elif str(response.status) == \"405\":\n            if jbody.get('errorCode') is not None:\n                parsed_body['errorCode'] = \\\n                    jbody.get('errorCode')\n\n            if jbody.get('errorMessage') is not None:\n                parsed_body['errorMessage'] = \\\n                    jbody.get('errorMessage')\n\n        \n        \n        \n        else:\n            parsed_body = jbody\n\n        response.body = parsed_body\n\n        return response", "docstring": "Set the client-configurable portions of an Account.\n\nArgs:\naccountID:\nAccount Identifier\nalias:\nClient-defined alias (name) for the Account\nmarginRate:\nThe string representation of a decimal number.\n\nReturns:\nv20.response.Response containing the results from submitting the\nrequest", "source": "juraj-google-style"}
{"code": "def new(arg_name, annotated_with=None):\n    if arg_name.startswith(_PROVIDE_PREFIX):\n        binding_key_name = arg_name[_PROVIDE_PREFIX_LEN:]\n        provider_indirection = provider_indirections.INDIRECTION\n    else:\n        binding_key_name = arg_name\n        provider_indirection = provider_indirections.NO_INDIRECTION\n    binding_key = binding_keys.new(binding_key_name, annotated_with)\n    return ArgBindingKey(arg_name, binding_key, provider_indirection)", "docstring": "Creates an ArgBindingKey.\n\nArgs:\narg_name: the name of the bound arg\nannotation: an Annotation, or None to create an unannotated arg binding\nkey\nReturns:\na new ArgBindingKey", "source": "codesearchnet"}
{"code": "def WriteBytes(self, value, unhex=True):\n    if unhex:\n        try:\n            value = binascii.unhexlify(value)\n        except binascii.Error:\n            pass\n    return self.stream.write(value)", "docstring": "Write a `bytes` type to the stream.\n\nArgs:\nvalue (bytes): array of bytes to write to the stream.\nunhex (bool): (Default) True. Set to unhexlify the stream. Use when the bytes are not raw bytes; i.e. b'aabb'\n\nReturns:\nint: the number of bytes written.", "source": "codesearchnet"}
{"code": "def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple[int, int]]]=None) -> 'torch.Tensor':\n    class_queries_logits = outputs.class_queries_logits\n    masks_queries_logits = outputs.masks_queries_logits\n    masks_queries_logits = torch.nn.functional.interpolate(masks_queries_logits, size=(384, 384), mode='bilinear', align_corners=False)\n    masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]\n    masks_probs = masks_queries_logits.sigmoid()\n    segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs)\n    batch_size = class_queries_logits.shape[0]\n    if target_sizes is not None:\n        if batch_size != len(target_sizes):\n            raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n        semantic_segmentation = []\n        for idx in range(batch_size):\n            resized_logits = torch.nn.functional.interpolate(segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)\n            semantic_map = resized_logits[0].argmax(dim=0)\n            semantic_segmentation.append(semantic_map)\n    else:\n        semantic_segmentation = segmentation.argmax(dim=1)\n        semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]\n    return semantic_segmentation", "docstring": "Converts the output of [`Mask2FormerForUniversalSegmentation`] into semantic segmentation maps. Only supports\nPyTorch.\n\nArgs:\noutputs ([`Mask2FormerForUniversalSegmentation`]):\nRaw outputs of the model.\ntarget_sizes (`List[Tuple[int, int]]`, *optional*):\nList of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested\nfinal size (height, width) of each prediction. If left to None, predictions will not be resized.\nReturns:\n`List[torch.Tensor]`:\nA list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)\ncorresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each\n`torch.Tensor` correspond to a semantic class id.", "source": "github-repos"}
{"code": "def _text_checker(job, interval, _interval_set=False, quiet=False, output=sys.stdout):\n    status = job.status()\n    msg = status.value\n    prev_msg = msg\n    msg_len = len(msg)\n    if (not quiet):\n        print(('\\r%s: %s' % ('Job Status', msg)), end='', file=output)\n    while (status.name not in ['DONE', 'CANCELLED', 'ERROR']):\n        time.sleep(interval)\n        status = job.status()\n        msg = status.value\n        if (status.name == 'QUEUED'):\n            msg += (' (%s)' % job.queue_position())\n            if (not _interval_set):\n                interval = max(job.queue_position(), 2)\n        elif (not _interval_set):\n            interval = 2\n        if (len(msg) < msg_len):\n            msg += (' ' * (msg_len - len(msg)))\n        elif (len(msg) > msg_len):\n            msg_len = len(msg)\n        if ((msg != prev_msg) and (not quiet)):\n            print(('\\r%s: %s' % ('Job Status', msg)), end='', file=output)\n            prev_msg = msg\n    if (not quiet):\n        print('', file=output)", "docstring": "A text-based job status checker\n\nArgs:\njob (BaseJob): The job to check.\ninterval (int): The interval at which to check.\n_interval_set (bool): Was interval time set by user?\nquiet (bool): If True, do not print status messages.\noutput (file): The file like object to write status messages to.\nBy default this is sys.stdout.", "source": "codesearchnet"}
{"code": "def halted(self):\n        \n        result = int(self._dll.JLINKARM_IsHalted())\n        if result < 0:\n            raise errors.JLinkException(result)\n\n        return (result > 0)", "docstring": "Returns whether the CPU core was halted.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\n``True`` if the CPU core is halted, otherwise ``False``.\n\nRaises:\nJLinkException: on device errors.", "source": "juraj-google-style"}
{"code": "def claim(self, unclaimed_file_readers):\n    claimed_vcf_readers = []\n    for caller in self._callers:\n        (unclaimed_file_readers, translated_vcf_readers) = caller.claim(unclaimed_file_readers)\n        claimed_vcf_readers.extend(translated_vcf_readers)\n    return (unclaimed_file_readers, claimed_vcf_readers)", "docstring": "Allows each caller to claim incoming files as they are recognized.\n\nArgs:\nunclaimed_file_readers: Usually, all files in the input dir.\n\nReturns:\nA tuple of unclaimed file readers and claimed VcfReaders. The\npresence of any unclaimed file readers could indicate stray files\nin the input dir.", "source": "codesearchnet"}
{"code": "def __setstate__(self, state):\n    \n    superstate, localstate = state\n    super(_StorageApi, self).__setstate__(superstate)\n    self.api_url = localstate['api_url']", "docstring": "Restore state as part of deserialization/unpickling.\n\nArgs:\nstate: the tuple from a __getstate__ call", "source": "juraj-google-style"}
{"code": "def read_folder(directory):\n    res = []\n    for filename in os.listdir(directory):\n        with io.open(os.path.join(directory, filename), encoding='utf-8') as f:\n            content = f.read()\n            res.append(content)\n    return res", "docstring": "read text files in directory and returns them as array\n\nArgs:\ndirectory: where the text files are\n\nReturns:\nArray of text", "source": "codesearchnet"}
{"code": "def reference_value_to_document(reference_value, client):\n    parts = reference_value.split(DOCUMENT_PATH_DELIMITER, 5)\n    if (len(parts) != 6):\n        msg = BAD_REFERENCE_ERROR.format(reference_value)\n        raise ValueError(msg)\n    document = client.document(parts[(- 1)])\n    if (document._document_path != reference_value):\n        msg = WRONG_APP_REFERENCE.format(reference_value, client._database_string)\n        raise ValueError(msg)\n    return document", "docstring": "Convert a reference value string to a document.\n\nArgs:\nreference_value (str): A document reference value.\nclient (~.firestore_v1beta1.client.Client): A client that has\na document factory.\n\nReturns:\n~.firestore_v1beta1.document.DocumentReference: The document\ncorresponding to ``reference_value``.\n\nRaises:\nValueError: If the ``reference_value`` is not of the expected\nformat: ``projects/{project}/databases/{database}/documents/...``.\nValueError: If the ``reference_value`` does not come from the same\nproject / database combination as the ``client``.", "source": "codesearchnet"}
{"code": "def matvec(self, x, adjoint=False, name='matvec'):\n    with self._name_scope(name):\n        x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name='x')\n        self._check_input_dtype(x)\n        self_dim = -2 if adjoint else -1\n        tensor_shape.dimension_at_index(self.shape, self_dim).assert_is_compatible_with(x.shape[-1])\n        return self._matvec(x, adjoint=adjoint)", "docstring": "Transform [batch] vector `x` with left multiplication:  `x --> Ax`.\n\n```python\n# Make an operator acting like batch matrix A.  Assume A.shape = [..., M, N]\noperator = LinearOperator(...)\n\nX = ... # shape [..., N], batch vector\n\nY = operator.matvec(X)\nY.shape\n==> [..., M]\n\nY[..., :] = sum_j A[..., :, j] X[..., j]\n```\n\nArgs:\nx: `Tensor` with compatible shape and same `dtype` as `self`.\n`x` is treated as a [batch] vector meaning for every set of leading\ndimensions, the last dimension defines a vector.\nSee class docstring for definition of compatibility.\nadjoint: Python `bool`.  If `True`, left multiply by the adjoint: `A^H x`.\nname:  A name for this `Op`.\n\nReturns:\nA `Tensor` with shape `[..., M]` and same `dtype` as `self`.", "source": "github-repos"}
{"code": "def generate(self, cache_root):\n        \n        generator_cwd = os.path.join(cache_root, 'generated', self.vlnv.sanitized_name)\n        generator_input_file  = os.path.join(generator_cwd, self.name+'_input.yml')\n\n        logger.info('Generating ' + str(self.vlnv))\n        if not os.path.exists(generator_cwd):\n            os.makedirs(generator_cwd)\n        with open(generator_input_file, 'w') as f:\n            f.write(yaml.dump(self.generator_input))\n\n        args = [os.path.join(os.path.abspath(self.generator.root), self.generator.command),\n                generator_input_file]\n\n        if self.generator.interpreter:\n            args[0:0] = [self.generator.interpreter]\n\n        Launcher(args[0], args[1:],\n                 cwd=generator_cwd).run()\n\n        cores = []\n        logger.debug(\"Looking for generated cores in \" + generator_cwd)\n        for root, dirs, files in os.walk(generator_cwd):\n            for f in files:\n                if f.endswith('.core'):\n                    try:\n                        cores.append(Core(os.path.join(root, f)))\n                    except SyntaxError as e:\n                        w = \"Failed to parse generated core file \" + f + \": \" + e.msg\n                        raise RuntimeError(w)\n        logger.debug(\"Found \" + ', '.join(str(c.name) for c in cores))\n        return cores", "docstring": "Run a parametrized generator\n\nArgs:\ncache_root (str): The directory where to store the generated cores\n\nReturns:\nlist: Cores created by the generator", "source": "juraj-google-style"}
{"code": "def run(self, args):\n    jlink = self.create_jlink(args)\n    mcu = args.name[0].lower()\n    if pylink.unlock(jlink, mcu):\n        print('Successfully unlocked device!')\n    else:\n        print('Failed to unlock device!')", "docstring": "Unlocks the target device.\n\nArgs:\nself (UnlockCommand): the ``UnlockCommand`` instance\nargs (Namespace): the arguments passed on the command-line\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n    default_to_square = True\n    if 'shortest_edge' in size:\n        size = size['shortest_edge']\n        default_to_square = False\n    elif 'height' in size and 'width' in size:\n        size = (size['height'], size['width'])\n    else:\n        raise ValueError(\"Size must contain either 'shortest_edge' or 'height' and 'width'.\")\n    output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format)\n    return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\nresized to keep the input aspect ratio.\n\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nsize (`Dict[str, int]`):\nSize of the output image.\nresample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):\nResampling filter to use when resiizing the image.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def parse_rank_score(rank_score_entry, case_id):\n    \n    rank_score = None\n    if rank_score_entry:\n        for family_info in rank_score_entry.split(','):\n            splitted_info = family_info.split(':')\n            if case_id == splitted_info[0]:\n                rank_score = float(splitted_info[1])\n    return rank_score", "docstring": "Parse the rank score\n\nArgs:\nrank_score_entry(str): The raw rank score entry\ncase_id(str)\n\nReturns:\nrank_score(float)", "source": "juraj-google-style"}
{"code": "def reverse_transform_table(self, table, table_meta, missing=None):\n    if (missing is None):\n        missing = self.missing\n    else:\n        self.missing = missing\n        warnings.warn(DEPRECATION_MESSAGE.format('reverse_transform_table'), DeprecationWarning)\n    result = pd.DataFrame(index=table.index)\n    table_name = table_meta['name']\n    for field in table_meta['fields']:\n        new_column = self._reverse_transform_column(table, field, table_name)\n        if (new_column is not None):\n            result[field['name']] = new_column\n    return result", "docstring": "Transform a `table` back to its original format.\n\nArgs:\ntable(pandas.DataFrame):     Contents of the table to be transformed.\n\ntable_meta(dict):   Metadata for the given table.\n\nmissing(bool):      Wheter or not use NullTransformer to handle missing values.\n\nReturns:\npandas.DataFrame: Table in original format.", "source": "codesearchnet"}
{"code": "def assertAllLess(self, a, comparison_target):\n    a, comparison_target = self.evaluate_if_both_tensors(a, comparison_target)\n    a = self._GetNdArray(a)\n    self.assertLess(np.max(a), comparison_target)", "docstring": "Assert element values are all less than a target value.\n\nArgs:\na: The numpy `ndarray`, or anything that can be converted into a numpy\n`ndarray` (including Tensor).\ncomparison_target: The target value of comparison.", "source": "github-repos"}
{"code": "def _init_tag_params(self, tag, params):\n        \n        self._element = tag\n        self.params = params\n        self._parseTagName()\n        self._istag = True\n        self._isendtag = False\n        self._isnonpairtag = False\n\n        self._element = self.tagToString()", "docstring": "Alternative constructor used when the tag parameters are added to the\nHTMLElement (HTMLElement(tag, params)).\n\nThis method just creates string and then pass it to the\n:meth:`_init_tag`.\n\nArgs:\ntag (str): HTML tag as string.\nparams (dict): HTML tag parameters as dictionary.", "source": "juraj-google-style"}
{"code": "def _get_model_reference(self, model_id):\n    \n    return ModelReference.from_api_repr(\n        {\"projectId\": self.project, \"datasetId\": self.dataset_id, \"modelId\": model_id}\n    )", "docstring": "Constructs a ModelReference.\n\nArgs:\nmodel_id (str): the ID of the model.\n\nReturns:\ngoogle.cloud.bigquery.model.ModelReference:\nA ModelReference for a model in this dataset.", "source": "juraj-google-style"}
{"code": "def CopyFromDateTimeString(self, time_string):\n    \n    date_time_values = self._CopyDateTimeFromString(time_string)\n\n    year = date_time_values.get('year', 0)\n    month = date_time_values.get('month', 0)\n    day_of_month = date_time_values.get('day_of_month', 0)\n    hours = date_time_values.get('hours', 0)\n    minutes = date_time_values.get('minutes', 0)\n    seconds = date_time_values.get('seconds', 0)\n\n    self._timestamp = self._GetNumberOfSecondsFromElements(\n        year, month, day_of_month, hours, minutes, seconds)\n\n    self.is_local_time = False", "docstring": "Copies a POSIX timestamp from a date and time string.\n\nArgs:\ntime_string (str): date and time value formatted as:\nYYYY-MM-DD hh:mm:ss.######[+-]##:##\n\nWhere # are numeric digits ranging from 0 to 9 and the seconds\nfraction can be either 3 or 6 digits. The time of day, seconds\nfraction and time zone offset are optional. The default time zone\nis UTC.", "source": "juraj-google-style"}
{"code": "def set_description(self, name, action, seqno, value=None, default=False, disable=False):\n    commands = [('route-map %s %s %s' % (name, action, seqno))]\n    if (value is not None):\n        commands.append(self.command_builder('description', disable=True))\n    commands.append(self.command_builder('description', value=value, default=default, disable=disable))\n    return self.configure(commands)", "docstring": "Configures the routemap description\n\nArgs:\nname (string): The full name of the routemap.\naction (string): The action to take for this routemap clause.\nseqno (integer): The sequence number for the routemap clause.\nvalue (string): The value to configure for the routemap description\ndefault (bool): Specifies to default the routemap description value\ndisable (bool): Specifies to negate the routemap description\n\nReturns:\nTrue if the operation succeeds otherwise False is returned", "source": "codesearchnet"}
{"code": "def get_value_index(self, indices):\n        \n        size = self['size'] if self.get('size') else self['dimension']['size']\n        ndims = len(size)\n        mult = 1\n        num = 0\n        for idx, dim in enumerate(size):\n            mult *= size[ndims - idx] if (idx > 0) else 1\n            num += mult * indices[ndims - idx - 1]\n        return num", "docstring": "Converts a list of dimensions’ indices into a numeric value index.\n\nArgs:\nindices(list): list of dimension's indices.\n\nReturns:\nnum(int): numeric value index.", "source": "juraj-google-style"}
{"code": "def build(self, var_list):\n    if self.built:\n        return\n    super().build(var_list)\n    self._r = []\n    self._c = []\n    self._v = []\n    for var in var_list:\n        if len(var.shape) < 2:\n            self._r.append(backend.Variable(0, name=var.name, trainable=False))\n            self._c.append(backend.Variable(0, name=var.name, trainable=False))\n        elif self._overwrite_variable_with_gradient(var):\n            self._r.append(None)\n            self._c.append(None)\n        else:\n            r_shape = var.shape[:-1]\n            c_shape = var.shape[:-2] + (var.shape[-1],)\n            self._r.append(self.add_variable(shape=r_shape, dtype=var.dtype, name=var.name))\n            self._c.append(self.add_variable(shape=c_shape, dtype=var.dtype, name=var.name))\n        if self._overwrite_variable_with_gradient(var):\n            self._v.append(None)\n        else:\n            self._v.append(self.add_variable_from_reference(reference_variable=var, name='velocity'))", "docstring": "Initialize optimizer variables.\n\nAdam optimizer has 3 types of variables: momentums, velocities and\nvelocity_hat (only set when amsgrad is applied),\n\nArgs:\nvar_list: list of model variables to build Adam variables on.", "source": "github-repos"}
{"code": "def get_func(func_ea):\n    \n    if isinstance(func_ea, idaapi.func_t):\n        return func_ea\n    func = idaapi.get_func(func_ea)\n    if func is None:\n        raise exceptions.SarkNoFunction(\"No function at 0x{:08X}\".format(func_ea))\n\n    return func", "docstring": "get_func(func_t or ea) -> func_t\n\nTake an IDA function (``idaapi.func_t``) or an address (EA) and return\nan IDA function object.\n\nUse this when APIs can take either a function or an address.\n\nArgs:\nfunc_ea: ``idaapi.func_t`` or ea of the function.\n\nReturns:\nAn ``idaapi.func_t`` object for the given address. If a ``func_t`` is\nprovided, it is returned.", "source": "juraj-google-style"}
{"code": "def GetParent(self):\n    if self.root:\n        return None\n    return PathInfo(components=self.components[:(- 1)], path_type=self.path_type, directory=True)", "docstring": "Constructs a path info corresponding to the parent of current path.\n\nThe root path (represented by an empty list of components, corresponds to\n`/` on Unix-like systems) does not have a parent.\n\nReturns:\nInstance of `rdf_objects.PathInfo` or `None` if parent does not exist.", "source": "codesearchnet"}
{"code": "def leak(self: EventSetOrNode, duration: Duration) -> EventSetOrNode:\n    from temporian.core.operators.leak import leak\n    return leak(self, duration=duration)", "docstring": "Subtracts a duration from an [`EventSet`][temporian.EventSet]'s\ntimestamps.\n\nIn other words, shifts the timestamp values backward in time.\n\nNote that this operator moves future data into the past, and should be used\nwith caution to prevent unwanted future leakage. For instance, this op\nshould generally not be used to compute the input features of a model.\n\nUsage example:\n```python\n>>> a = tp.event_set(\n...     timestamps=[0, 1, 5, 6],\n...     features={\"value\": [0, 1, 5, 6]},\n... )\n\n>>> b = a.leak(tp.duration.seconds(2))\n>>> b\nindexes: ...\n(4 events):\ntimestamps: [-2. -1. 3. 4.]\n'value': [0 1 5 6]\n...\n\n```\n\nArgs:\nduration: Duration to leak by.\n\nReturns:\nLeaked EventSet.", "source": "github-repos"}
{"code": "def handle_response_for_connection(self, should_post=False):\n    status_code = self._response.status_code\n    data = self._response.data\n    if (data and ('errors' in data)):\n        self._response.errors = data['errors']\n    if (status_code in [HTTP_CODE_SUCCESS, HTTP_CODE_CREATED, HTTP_CODE_EMPTY]):\n        return True\n    if (status_code == HTTP_CODE_MULTIPLE_CHOICES):\n        return False\n    if (status_code in [HTTP_CODE_PERMISSION_DENIED, HTTP_CODE_UNAUTHORIZED]):\n        if (not should_post):\n            return True\n        return False\n    if (status_code in [HTTP_CODE_CONFLICT, HTTP_CODE_NOT_FOUND, HTTP_CODE_BAD_REQUEST, HTTP_CODE_METHOD_NOT_ALLOWED, HTTP_CODE_PRECONDITION_FAILED, HTTP_CODE_SERVICE_UNAVAILABLE]):\n        if (not should_post):\n            return True\n        return False\n    if (status_code == HTTP_CODE_INTERNAL_SERVER_ERROR):\n        return False\n    if (status_code == HTTP_CODE_ZERO):\n        bambou_logger.error('NURESTConnection: Connection error with code 0. Sending NUNURESTConnectionFailureNotification notification and exiting.')\n        return False\n    bambou_logger.error(('NURESTConnection: Report this error, because this should not happen: %s' % self._response))\n    return False", "docstring": "Check if the response succeed or not.\n\nIn case of error, this method also print messages and set\nan array of errors in the response object.\n\nReturns:\nReturns True if the response has succeed, False otherwise", "source": "codesearchnet"}
{"code": "def update_hmet_card_file(hmet_card_file_path, new_hmet_data_path):\n    hmet_card_file_path_temp = '{0}_tmp'.format(hmet_card_file_path)\n    try:\n        remove(hmet_card_file_path_temp)\n    except OSError:\n        pass\n    copy(hmet_card_file_path, hmet_card_file_path_temp)\n    with io_open(hmet_card_file_path_temp, 'w', newline='\\r\\n') as out_hmet_list_file:\n        with open(hmet_card_file_path) as old_hmet_list_file:\n            for date_path in old_hmet_list_file:\n                out_hmet_list_file.write(u'{0}\\n'.format(path.join(new_hmet_data_path, path.basename(date_path))))\n    try:\n        remove(hmet_card_file_path)\n    except OSError:\n        pass\n    rename(hmet_card_file_path_temp, hmet_card_file_path)", "docstring": "This function updates the paths in the HMET card file to the new\nlocation of the HMET data. This is necessary because the file paths\nare absolute and will need to be updated if moved.\n\nArgs:\nhmet_card_file_path(str): Location of the file used for the HMET_ASCII card.\nnew_hmet_data_path(str): Location where the HMET ASCII files are currently.\n\nExample::\n\nnew_hmet_data_path = \"E:\\\\GSSHA\\\\new_hmet_directory\"\nhmet_card_file_path = \"E:\\\\GSSHA\\\\hmet_card_file.txt\"\n\nupdate_hmet_card_file(hmet_card_file_path, new_hmet_data_path)", "source": "codesearchnet"}
{"code": "def NextToken(self):\n    if (len(self.buffer) < 512):\n        if ((self.Feed() == 0) and (not self.buffer)):\n            return None\n    return Lexer.NextToken(self)", "docstring": "Retrieves the next token.\n\nReturns:\nThe next token (instance of Token) or None.", "source": "codesearchnet"}
{"code": "def minimum(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return Minimum().symbolic_call(x1, x2)\n    return backend.numpy.minimum(x1, x2)", "docstring": "Element-wise minimum of `x1` and `x2`.\n\nArgs:\nx1: First tensor.\nx2: Second tensor.\n\nReturns:\nOutput tensor, element-wise minimum of `x1` and `x2`.", "source": "github-repos"}
{"code": "def make_lda_variational(activation, num_topics, layer_sizes):\n  \n  encoder_net = tf.keras.Sequential()\n  for num_hidden_units in layer_sizes:\n    encoder_net.add(\n        tf.keras.layers.Dense(\n            num_hidden_units,\n            activation=activation,\n            kernel_initializer=tf.compat.v1.glorot_normal_initializer()))\n  encoder_net.add(\n      tf.keras.layers.Dense(\n          num_topics,\n          activation=tf.nn.softplus,\n          kernel_initializer=tf.compat.v1.glorot_normal_initializer()))\n\n  def lda_variational(bag_of_words):\n    concentration = _clip_dirichlet_parameters(encoder_net(bag_of_words))\n    return ed.Dirichlet(concentration=concentration, name=\"topics_posterior\")\n\n  return lda_variational", "docstring": "Creates the variational distribution for LDA.\n\nArgs:\nactivation: Activation function to use.\nnum_topics: The number of topics.\nlayer_sizes: The number of hidden units per layer in the encoder.\n\nReturns:\nlda_variational: A function that takes a bag-of-words Tensor as\ninput and returns a distribution over topics.", "source": "juraj-google-style"}
{"code": "def _ReadCompressedData(self, read_size):\n    \n    compressed_data = self._file_object.read(read_size)\n\n    read_count = len(compressed_data)\n\n    self._compressed_data = b''.join([self._compressed_data, compressed_data])\n\n    self._uncompressed_data, self._compressed_data = (\n        self._decompressor.Decompress(self._compressed_data))\n\n    self._uncompressed_data_size = len(self._uncompressed_data)\n\n    return read_count", "docstring": "Reads compressed data from the file-like object.\n\nArgs:\nread_size (int): number of bytes of compressed data to read.\n\nReturns:\nint: number of bytes of compressed data read.", "source": "juraj-google-style"}
{"code": "def to_hg_scheme_url(cls, url):\n    regexes = cls._get_url_scheme_regexes()\n    for (scheme_key, pattern, regex) in regexes:\n        match = regex.match(url)\n        if (match is not None):\n            groups = match.groups()\n            if (len(groups) == 2):\n                return u''.join(scheme_key, ':\n            elif (len(groups) == 1):\n                return u''.join(scheme_key, ':", "docstring": "Convert a URL to local mercurial URL schemes\n\nArgs:\nurl (str): URL to map to local mercurial URL schemes\n\nexample::\n\n# schemes.gh = git://github.com/\n>> remote_url = git://github.com/westurner/dotfiles'\n>> to_hg_scheme_url(remote_url)\n<< gh://westurner/dotfiles", "source": "codesearchnet"}
{"code": "def update_add(x, increment):\n    return state_ops.assign_add(x, increment)", "docstring": "Update the value of `x` by adding `increment`.\n\nArgs:\nx: A Variable.\nincrement: A tensor of same shape as `x`.\n\nReturns:\nThe variable `x` updated.", "source": "github-repos"}
{"code": "def has_no_jumps(neuron, max_distance=30.0, axis='z'):\n    \n    bad_ids = []\n    axis = {'x': COLS.X, 'y': COLS.Y, 'z': COLS.Z, }[axis.lower()]\n    for neurite in iter_neurites(neuron):\n        section_segment = ((sec, seg) for sec in iter_sections(neurite)\n                           for seg in iter_segments(sec))\n        for sec, (p0, p1) in islice(section_segment, 1, None):  \n            if max_distance < abs(p0[axis] - p1[axis]):\n                bad_ids.append((sec.id, [p0, p1]))\n    return CheckResult(len(bad_ids) == 0, bad_ids)", "docstring": "Check if there are jumps (large movements in the `axis`)\n\nArguments:\nneuron(Neuron): The neuron object to test\nmax_distance(float): value above which consecutive z-values are\nconsidered a jump\naxis(str): one of x/y/z, which axis to check for jumps\n\nReturns:\nCheckResult with result list of ids of bad sections", "source": "juraj-google-style"}
{"code": "def set_flowcontrol_send(self, name, value=None, default=False, disable=False):\n    return self.set_flowcontrol(name, 'send', value, default, disable)", "docstring": "Configures the interface flowcontrol send value\n\nArgs:\nname (string): The interface identifier.  It must be a full\ninterface name (ie Ethernet, not Et)\n\nvalue (boolean): True if the interface should enable sending flow\ncontrol packets, otherwise False\n\ndefault (boolean): Specifies to default the interface flow\ncontrol send value\n\ndisable (boolean): Specifies to disable the interface flow\ncontrol send value\n\nReturns:\nTrue if the operation succeeds otherwise False is returned", "source": "codesearchnet"}
{"code": "def send(self, message_type, task_id, message):\n    x = 0\n    try:\n        buffer = pickle.dumps((self.source_id, int(time.time()), message_type, message))\n    except Exception as e:\n        print('Exception during pickling {}'.format(e))\n        return\n    try:\n        x = self.sock.sendto(buffer, (self.ip, self.port))\n    except socket.timeout:\n        print('Could not send message within timeout limit')\n        return False\n    return x", "docstring": "Sends a message to the UDP receiver\n\nParameter\n---------\n\nmessage_type: monitoring.MessageType (enum)\nIn this case message type is RESOURCE_INFO most often\ntask_id: int\nTask identifier of the task for which resource monitoring is being reported\nmessage: object\nArbitrary pickle-able object that is to be sent\n\nReturns:\n# bytes sent", "source": "codesearchnet"}
{"code": "def __init__(self, target='', graph=None, config=None):\n    if not config:\n        gpu_options = config_pb2.GPUOptions(allow_growth=True)\n        config = config_pb2.ConfigProto(gpu_options=gpu_options)\n    config.graph_options.place_pruned_graph = True\n    super(InteractiveSession, self).__init__(target, graph, config)\n    with InteractiveSession._count_lock:\n        if InteractiveSession._active_session_count > 0:\n            logging.error('An interactive session is already active. This can cause out-of-memory errors or some other unexpected errors (due to the unpredictable timing of garbage collection) in some cases. You must explicitly call `InteractiveSession.close()` to release resources held by the other session(s). Please use `tf.Session()` if you intend to productionize.')\n        InteractiveSession._active_session_count += 1\n    self._explicitly_closed = False\n    self._default_session = self.as_default()\n    self._default_session.enforce_nesting = False\n    self._default_session.__enter__()\n    self._explicit_graph = graph\n    if self._explicit_graph is not None:\n        self._default_graph = graph.as_default()\n        self._default_graph.enforce_nesting = False\n        self._default_graph.__enter__()", "docstring": "Creates a new interactive TensorFlow session.\n\nIf no `graph` argument is specified when constructing the session,\nthe default graph will be launched in the session. If you are\nusing more than one graph (created with `tf.Graph()`) in the same\nprocess, you will have to use different sessions for each graph,\nbut each graph can be used in multiple sessions. In this case, it\nis often clearer to pass the graph to be launched explicitly to\nthe session constructor.\n\nArgs:\ntarget: (Optional.) The execution engine to connect to. Defaults to using\nan in-process engine.\ngraph: (Optional.) The `Graph` to be launched (described above).\nconfig: (Optional) `ConfigProto` proto used to configure the session.", "source": "github-repos"}
{"code": "def __eq__(self, other):\n        \n        if not isinstance(other, FrameSet):\n            if not hasattr(other, '__iter__'):\n                return NotImplemented\n            other = self.from_iterable(other)\n        this = hash(self.items) | hash(self.order)\n        that = hash(other.items) | hash(other.order)\n        return this == that", "docstring": "Check if `self` == `other` via a comparison of the hash of\ntheir contents.\nIf `other` is not a :class:`FrameSet`, but is a set, frozenset, or\nis iterable, it will be cast to a :class:`FrameSet`.\n\nArgs:\nother (:class:`FrameSet`): Also accepts an object that can be cast to a :class:`FrameSet`\n\nReturns:\nbool:\n:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`", "source": "juraj-google-style"}
{"code": "def parse(cls, args):\n        \n\n        try:\n            (options, args) = cls.optparser.parse_args(args)\n            if options.db_tap_id is None:\n                raise ParseError(\"db_tap_id is required\",\n                                 cls.optparser.format_help())\n            if options.query is None and options.script_location is None:\n                raise ParseError(\"query or script location is required\",\n                                 cls.optparser.format_help())\n\n            if options.script_location is not None:\n                if options.query is not None:\n                    raise ParseError(\n                        \"Both query and script_location cannot be specified\",\n                        cls.optparser.format_help())\n\n                if ((options.script_location.find(\"s3:\n                        (options.script_location.find(\"s3n:\n\n                    \n\n                    try:\n                        q = open(options.script_location).read()\n                    except IOError as e:\n                        raise ParseError(\"Unable to open script location: %s\" %\n                                         str(e),\n                                         cls.optparser.format_help())\n                    options.script_location = None\n                    options.query = q\n\n        except OptionParsingError as e:\n            raise ParseError(e.msg, cls.optparser.format_help())\n        except OptionParsingExit as e:\n            return None\n\n        if options.macros is not None:\n            options.macros = json.loads(options.macros)\n        v = vars(options)\n        v[\"command_type\"] = \"DbTapQueryCommand\"\n        return v", "docstring": "Parse command line arguments to construct a dictionary of command\nparameters that can be used to create a command\n\nArgs:\n`args`: sequence of arguments\n\nReturns:\nDictionary that can be used in create method\n\nRaises:\nParseError: when the arguments are not correct", "source": "juraj-google-style"}
{"code": "def skip_if(expr, reason, extras=None):\n    if expr:\n        skip(reason, extras)", "docstring": "Skip a test if expression evaluates to True.\n\nArgs:\nexpr: The expression that is evaluated.\nreason: The reason this test is skipped.\nextras: An optional field for extra information to be included in\ntest result.", "source": "github-repos"}
{"code": "def FromTrimmedData(data, index):\n    header = Header()\n    ms = StreamManager.GetStream(data)\n    reader = BinaryReader(ms)\n    header.DeserializeUnsigned(reader)\n    reader.ReadByte()\n    witness = Witness()\n    witness.Deserialize(reader)\n    header.Script = witness\n    StreamManager.ReleaseStream(ms)\n    return header", "docstring": "Deserialize into a Header object from the provided data.\n\nArgs:\ndata (bytes):\nindex: UNUSED\n\nReturns:\nHeader:", "source": "codesearchnet"}
{"code": "def Reboot(self, target_mode=b'', timeout_ms=None):\n        \n        return self._SimpleCommand(\n            b'reboot', arg=target_mode or None, timeout_ms=timeout_ms)", "docstring": "Reboots the device.\n\nArgs:\ntarget_mode: Normal reboot when unspecified. Can specify other target\nmodes such as 'recovery' or 'bootloader'.\ntimeout_ms: Optional timeout in milliseconds to wait for a response.\n\nReturns:\nUsually the empty string. Depends on the bootloader and the target_mode.", "source": "juraj-google-style"}
{"code": "def __init__(self, source, lineno, target, what=None):\n        \n        self.source = source\n        self.lineno = lineno\n        self.target = target\n        self.what = what", "docstring": "Initialization method.\n\nArgs:\nsource (Module): source Module.\nlineno (int): number of line at which import statement occurs.\ntarget (str/Module/Package): the target node.\nwhat (str): what is imported (optional).", "source": "juraj-google-style"}
{"code": "def delete_field(self, field_name):\n    \n    self._whoosh.remove_field(field_name.strip())\n    return self._whoosh.schema", "docstring": "This function deletes one determined field using the command MODEL.pw.delete_field(FIELD)\n\nArgs:\nfield_name (string): This argument let you delete some field for some model registered in the index.\n\nReturns:\n(WhooshSchema): The new schema after deleted is returned.", "source": "juraj-google-style"}
{"code": "def read_value(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    try:\n        value = unpack('!Q', istream.read(self.LENGTH))[0]\n    except Exception:\n        self.logger.error('Error reading boolean value from buffer')\n        raise\n    if (value == 1):\n        self.value = True\n    elif (value == 0):\n        self.value = False\n    else:\n        raise ValueError('expected: 0 or 1, observed: {0}'.format(value))\n    self.validate()", "docstring": "Read the value of the Boolean object from the input stream.\n\nArgs:\nistream (Stream): A buffer containing the encoded bytes of the\nvalue of a Boolean object. Usually a BytearrayStream object.\nRequired.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: if the read boolean value is not a 0 or 1.", "source": "codesearchnet"}
{"code": "def _create_temp_cache(self, num_traced_tensors, num_signatures, graph):\n    init_value = constant_op.constant(_COMPACT_TRACE_ENTRY_INIT_VALUE, dtype=dtypes.float32, shape=[num_signatures])\n    self._temp_cache_var[graph] = [init_value for _ in range(num_traced_tensors)]", "docstring": "Creates a temporary cache with the given dimensions.\n\nFills the self._temp_cache_var with num_traced_tensors tf.constant() ops\nthat have shape of [num_signatures].\nArgs:\nnum_traced_tensors: Int, denoting total number of traced tensors.\nnum_signatures: Int, denoting the number of statistics collected per\ntensors.\ngraph: TensorFlow graph.", "source": "github-repos"}
{"code": "def __iter__(self):\n    raise NotImplementedError('Must be implemented in descendants')", "docstring": "Creates an iterator for the `tf.distribute.DistributedDataset`.\n\nThe returned iterator implements the Python Iterator protocol.\n\nExample usage:\n\n>>> global_batch_size = 4\n>>> strategy = tf.distribute.MirroredStrategy([\"GPU:0\", \"GPU:1\"])\n>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4]).repeat().batch(global_batch_size)\n>>> distributed_iterator = iter(strategy.experimental_distribute_dataset(dataset))\n>>> print(next(distributed_iterator))\nPerReplica:{\n0: tf.Tensor([1 2], shape=(2,), dtype=int32),\n1: tf.Tensor([3 4], shape=(2,), dtype=int32)\n}\n\nReturns:\nAn `tf.distribute.DistributedIterator` instance for the given\n`tf.distribute.DistributedDataset` object to enumerate over the\ndistributed data.", "source": "github-repos"}
{"code": "def sample_poly(self, poly, scalar=None, bias_range=1, poly_range=None, ignored_terms=None, **parameters):\n    if (ignored_terms is None):\n        ignored_terms = set()\n    else:\n        ignored_terms = {frozenset(term) for term in ignored_terms}\n    (original, poly) = (poly, poly.copy())\n    if (scalar is not None):\n        poly.scale(scalar, ignored_terms=ignored_terms)\n    else:\n        poly.normalize(bias_range=bias_range, poly_range=poly_range, ignored_terms=ignored_terms)\n        try:\n            v = next((v for (v, bias) in original.items() if (bias and (v not in ignored_terms))))\n        except StopIteration:\n            scalar = 1\n        else:\n            scalar = (poly[v] / original[v])\n    sampleset = self.child.sample_poly(poly, **parameters)\n    if ignored_terms:\n        sampleset.record.energy = original.energies((sampleset.record.sample, sampleset.variables))\n    else:\n        sampleset.record.energy /= scalar\n    return sampleset", "docstring": "Scale and sample from the given binary polynomial.\n\nIf scalar is not given, problem is scaled based on bias and polynomial\nranges. See :meth:`.BinaryPolynomial.scale` and\n:meth:`.BinaryPolynomial.normalize`\n\nArgs:\npoly (obj:`.BinaryPolynomial`): A binary polynomial.\n\nscalar (number, optional):\nValue by which to scale the energy range of the binary polynomial.\n\nbias_range (number/pair, optional, default=1):\nValue/range by which to normalize the all the biases, or if\n`poly_range` is provided, just the linear biases.\n\npoly_range (number/pair, optional):\nValue/range by which to normalize the higher order biases.\n\nignored_terms (iterable, optional):\nBiases associated with these terms are not scaled.\n\n**parameters:\nOther parameters for the sampling method, specified by\nthe child sampler.", "source": "codesearchnet"}
{"code": "def register_model(cls, model):\n        \n\n        rest_name = model.rest_name\n        resource_name = model.resource_name\n\n        if rest_name not in cls._model_rest_name_registry:\n            cls._model_rest_name_registry[rest_name] = [model]\n            cls._model_resource_name_registry[resource_name] = [model]\n\n        elif model not in cls._model_rest_name_registry[rest_name]:\n            cls._model_rest_name_registry[rest_name].append(model)\n            cls._model_resource_name_registry[resource_name].append(model)", "docstring": "Register a model class according to its remote name\n\nArgs:\nmodel: the model to register", "source": "juraj-google-style"}
{"code": "def add_permission_by_name(self, code, save=False):\n    if (not save):\n        return [('%s | %s' % (p.name, p.code)) for p in Permission.objects.filter(code__contains=code)]\n    for p in Permission.objects.filter(code__contains=code):\n        if (p not in self.Permissions):\n            self.Permissions(permission=p)\n    if p:\n        self.save()", "docstring": "Adds a permission with given name.\n\nArgs:\ncode (str): Code name of the permission.\nsave (bool): If False, does nothing.", "source": "codesearchnet"}
{"code": "def generate_output_network(self, json_data=None, hr=True, show_name=False, colorize=True):\n    if (json_data is None):\n        json_data = {}\n    output = generate_output(line='0', short=(HR_RDAP['network']['_short'] if hr else 'network'), name=(HR_RDAP['network']['_name'] if (hr and show_name) else None), is_parent=True, colorize=colorize)\n    for (key, val) in json_data['network'].items():\n        if (key in ['links', 'status']):\n            output += self.generate_output_list(source='network', key=key, val=val, line='1', hr=hr, show_name=show_name, colorize=colorize)\n        elif (key in ['notices', 'remarks']):\n            output += self.generate_output_notices(source='network', key=key, val=val, line='1', hr=hr, show_name=show_name, colorize=colorize)\n        elif (key == 'events'):\n            output += self.generate_output_events(source='network', key=key, val=val, line='1', hr=hr, show_name=show_name, colorize=colorize)\n        elif (key not in ['raw']):\n            output += generate_output(line='1', short=(HR_RDAP['network'][key]['_short'] if hr else key), name=(HR_RDAP['network'][key]['_name'] if (hr and show_name) else None), value=val, colorize=colorize)\n    return output", "docstring": "The function for generating CLI output RDAP network results.\n\nArgs:\njson_data (:obj:`dict`): The data to process. Defaults to None.\nhr (:obj:`bool`): Enable human readable key translations. Defaults\nto True.\nshow_name (:obj:`bool`): Show human readable name (default is to\nonly show short). Defaults to False.\ncolorize (:obj:`bool`): Colorize the console output with ANSI\ncolors. Defaults to True.\n\nReturns:\nstr: The generated output.", "source": "codesearchnet"}
{"code": "def write_jsonl_file(fname, data):\n    \n    if not isinstance(data, list):\n        print('warning: malformed json data for file', fname)\n        return\n    with open(fname, 'w') as of:\n        for row in data:\n            \n            if row.strip():\n                of.write('%s\\n' % row.strip())", "docstring": "Writes a jsonl file.\n\nArgs:\ndata: list of json encoded data", "source": "juraj-google-style"}
{"code": "def get_volume_details(self, volume_name: str) -> dict:\n    if (volume_name not in self.volumes):\n        raise RuntimeError('No such volume found: ', volume_name)\n    volume = self._client.volumes.get(volume_name)\n    return volume.attrs", "docstring": "Get details of the volume.\n\nArgs:\nvolume_name (str): Name of the volume\n\nReturns:\ndict, details of the volume", "source": "codesearchnet"}
{"code": "def use_db(path, mode=WorkDB.Mode.create):\n    \n    database = WorkDB(path, mode)\n    try:\n        yield database\n    finally:\n        database.close()", "docstring": "Open a DB in file `path` in mode `mode` as a context manager.\n\nOn exiting the context the DB will be automatically closed.\n\nArgs:\npath: The path to the DB file.\nmode: The mode in which to open the DB. See the `Mode` enum for\ndetails.\n\nRaises:\nFileNotFoundError: If `mode` is `Mode.open` and `path` does not\nexist.", "source": "juraj-google-style"}
{"code": "def converted_self(self):\n    if self._converted_self is None:\n        old_name = self.function.signature.name\n        new_name = self._enclosing_graph.converted_function_names[old_name]\n        self.converted_enclosing_graph.rename_function(old_name, new_name)\n        self._converted_self = self.converted_enclosing_graph.functions[new_name]\n    return self._converted_self", "docstring": "The Function copy to be converted.\n\nThe copy will be renamed according to the graph's converted_function_name\nmap, to ensure the name does not match anything currently in TensorFlow's\nfunction cache.\n\nReturns:\nThe function instance to be converted.", "source": "github-repos"}
{"code": "def usufyToXlsxExport(d, fPath):\n    from pyexcel_xlsx import get_data\n    try:\n        oldData = {'OSRFramework': get_data(fPath)}\n    except:\n        oldData = {'OSRFramework': []}\n    tabularData = _generateTabularData(d, oldData)\n    from pyexcel_xlsx import save_data\n    save_data(fPath, tabularData)", "docstring": "Workaround to export to a .xlsx file.\n\nArgs:\n-----\nd: Data to export.\nfPath: File path for the output file.", "source": "codesearchnet"}
{"code": "def flatten(value: '_instance_base.SimpleValue', classes: 'list[class_mixin.Class]') -> bool:\n    if isinstance(value, _abstract.AnnotationClass):\n        value = value.base_cls\n    if isinstance(value, _abstract.Class):\n        classes.append(value)\n        return False\n    elif isinstance(value, _abstract.Tuple):\n        ambiguous = False\n        for var in value.pyval:\n            if len(var.bindings) != 1 or flatten(var.bindings[0].data, classes):\n                ambiguous = True\n        return ambiguous\n    elif isinstance(value, _abstract.Union):\n        ambiguous = False\n        for val in value.options:\n            if flatten(val, classes):\n                ambiguous = True\n        return ambiguous\n    else:\n        return True", "docstring": "Flatten the contents of value into classes.\n\nIf value is a Class, it is appended to classes.\nIf value is a PythonConstant of type tuple, then each element of the tuple\nthat has a single binding is also flattened.\nAny other type of value, or tuple elements that have multiple bindings are\nignored.\n\nArgs:\nvalue: An abstract value.\nclasses: A list to be modified.\n\nReturns:\nTrue iff a value was ignored during flattening.", "source": "github-repos"}
{"code": "def __init__(self, data, label=None):\n        \n        if hasattr(data, 'to_matrix'):\n            \n            \n            data = data.to_matrix()\n        elif hasattr(data, 'to_operator'):\n            \n            \n            \n            data = data.to_operator().data\n        \n        data = numpy.array(data, dtype=complex)\n        \n        if not is_unitary_matrix(data):\n            raise ExtensionError(\"Input matrix is not unitary.\")\n        \n        input_dim, output_dim = data.shape\n        n_qubits = int(numpy.log2(input_dim))\n        if input_dim != output_dim or 2**n_qubits != input_dim:\n            raise ExtensionError(\n                \"Input matrix is not an N-qubit operator.\")\n        \n        super().__init__('unitary', n_qubits, [data], label=label)", "docstring": "Create a gate from a numeric unitary matrix.\n\nArgs:\ndata (matrix or Operator): unitary operator.\nlabel (str): unitary name for backend [Default: None].\n\nRaises:\nExtensionError: if input data is not an N-qubit unitary operator.", "source": "juraj-google-style"}
{"code": "def monkey_patch(enabled=True):\n    \n\n    if enabled:\n        Image.open = imdirect_open\n    else:\n        Image.open = pil_open", "docstring": "Monkey patching PIL.Image.open method\n\nArgs:\nenabled (bool): If the monkey patch should be activated or deactivated.", "source": "juraj-google-style"}
{"code": "def _fill_shape(x, n):\n    if ((not isinstance(n, numbers.Integral)) or (n < 1)):\n        raise TypeError('n must be a positive integer')\n    if ((isinstance(x, numbers.Integral) or isinstance(x, tf.Dimension)) and (x > 0)):\n        return ((x,) * n)\n    try:\n        if ((len(x) == n) and all(((v > 0) for v in x))):\n            return tuple(x)\n    except TypeError:\n        pass\n    raise TypeError('x is {}, must be either a positive integer or an iterable of positive integers of size {}'.format(x, n))", "docstring": "Converts a dimension to a tuple of dimensions of a given size.\n\nThis is used to allow shorthand notation for various configuration parameters.\nA user can provide either, for example, `2` or `[2, 2]` as a kernel shape, and\nthis function returns `(2, 2)` in both cases. Passing `[1, 2]` will return\n`(1, 2)`.\n\nArgs:\nx: An integer, tf.Dimension, or an iterable of them.\nn: An integer, the size of the desired output list\n\nReturns:\nIf `x` is an integer, a tuple of size `n` containing `n` copies of `x`.\nIf `x` is an iterable of integers or tf.Dimension of size `n`, it returns\n`tuple(x)`.\n\nRaises:\nTypeError: If n is not a positive integer;\nor if x is neither integer nor an iterable of size n.", "source": "codesearchnet"}
{"code": "def _create_produce_requests(self, collated):\n        \n        requests = {}\n        for node_id, batches in six.iteritems(collated):\n            requests[node_id] = self._produce_request(\n                node_id, self.config['acks'],\n                self.config['request_timeout_ms'], batches)\n        return requests", "docstring": "Transfer the record batches into a list of produce requests on a\nper-node basis.\n\nArguments:\ncollated: {node_id: [RecordBatch]}\n\nReturns:\ndict: {node_id: ProduceRequest} (version depends on api_version)", "source": "juraj-google-style"}
{"code": "def get_var(self, var, info_cb=DEFAULT_MESSAGE_CALLBACK):\n    \n    return self._simple_command('getvar', arg=var, info_cb=info_cb)", "docstring": "Returns the given variable's definition.\n\nArgs:\nvar: A variable the bootloader tracks, such as version.\ninfo_cb: See Download. Usually no messages.\nReturns:\nValue of var according to the current bootloader.", "source": "juraj-google-style"}
{"code": "def get_default_connection_info(self, provider_name):\n        \n        provider = self._provider_client.get_by_name(provider_name)\n        if provider:\n            return provider['defaultConnectionInfo']\n        else:\n            return {}", "docstring": "Gets default connection info for a specific provider.\n\nArgs:\nprovider_name: Name of the provider.\n\nReturns:\ndict: Default connection information.", "source": "juraj-google-style"}
{"code": "def add_dataset(self, dataset, datasets_to_check=None):\n        \n        \n        showcase_dataset = self._get_showcase_dataset_dict(dataset)\n        if datasets_to_check is None:\n            datasets_to_check = self.get_datasets()\n        for dataset in datasets_to_check:\n            if showcase_dataset['package_id'] == dataset['id']:\n                return False\n        self._write_to_hdx('associate', showcase_dataset, 'package_id')\n        return True", "docstring": "Add a dataset\n\nArgs:\ndataset (Union[Dataset,Dict,str]): Either a dataset id or dataset metadata either from a Dataset object or a dictionary\ndatasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase.\n\nReturns:\nbool: True if the dataset was added, False if already present", "source": "juraj-google-style"}
{"code": "def set_volume(percentage):\n\t\n\n\tif percentage > 100 or percentage < 0:\n\t\traise ValueError('percentage must be an integer between 0 and 100')\n\n\tif system.get_name() == 'windows':\n\t\t\n\t\t\n\t\tpass\n\n\telif system.get_name() == 'mac':\n\t\t\n\t\tvolume_int = percentage / 10\n\n\t\tsp.Popen(['osascript', '-e', 'set Volume %d' % volume_int]).wait()\n\n\telse:\n\t\t\n\t\tformatted = str(percentage) + '%'\n\t\tsp.Popen(['amixer', '--quiet', 'sset', 'Master', formatted]).wait()", "docstring": "Set the volume.\n\nSets the volume to a given percentage (integer between 0 and 100).\n\nArgs:\npercentage (int): The percentage (as a 0 to 100 integer) to set the volume to.\n\nRaises:\nValueError: if the percentage is >100 or <0.", "source": "juraj-google-style"}
{"code": "def _build_command(self, python_executable, lib_dir_fq, proxy_enabled):\n        \n        exe_command = [\n            os.path.expanduser(python_executable),\n            '-m',\n            'pip',\n            'install',\n            '-r',\n            self.requirements_file,\n            '--ignore-installed',\n            '--quiet',\n            '--target',\n            lib_dir_fq,\n        ]\n        if self.args.no_cache_dir:\n            exe_command.append('--no-cache-dir')\n\n        if proxy_enabled:\n            \n            trusted_hosts = ['pypi.org', 'pypi.python.org', 'files.pythonhosted.org']\n\n            for host in trusted_hosts:\n                exe_command.append('--trusted-host')\n                exe_command.append(host)\n\n        return exe_command", "docstring": "Build the pip command for installing dependencies.\n\nArgs:\npython_executable (str): The fully qualified path of the Python executable.\nlib_dir_fq (str): The fully qualified path of the lib directory.\n\nReturns:\nlist: The Python pip command with all required args.", "source": "juraj-google-style"}
{"code": "def ParseRecord(self, parser_mediator, key, structure):\n    \n    if key not in self._SUPPORTED_KEYS:\n      raise errors.ParseError(\n          'Unable to parse record, unknown structure: {0:s}'.format(key))\n\n    if key == 'chromeos_syslog_line':\n      date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()\n\n      try:\n        date_time.CopyFromStringISO8601(structure.chromeos_date)\n      except ValueError:\n        parser_mediator.ProduceExtractionWarning(\n            'invalid date time value: {0:s}'.format(structure.chromeos_date))\n        return\n\n    else:\n      \n\n      month = timelib.MONTH_DICT.get(structure.month.lower(), 0)\n      if month != 0:\n        self._UpdateYear(parser_mediator, month)\n\n      time_elements_tuple = (\n          self._year_use, month, structure.day, structure.hour,\n          structure.minute, structure.second)\n\n      try:\n        date_time = dfdatetime_time_elements.TimeElements(\n            time_elements_tuple=time_elements_tuple)\n        date_time.is_local_time = True\n      except ValueError:\n        parser_mediator.ProduceExtractionWarning(\n            'invalid date time value: {0!s}'.format(time_elements_tuple))\n        return\n\n    plugin = None\n    if key == 'syslog_comment':\n      event_data = SyslogCommentEventData()\n      event_data.body = structure.body\n      \n      event_data.offset = 0\n\n    else:\n      event_data = SyslogLineEventData()\n      event_data.body = structure.body\n      event_data.hostname = structure.hostname or None\n      \n      event_data.offset = 0\n      event_data.pid = structure.pid\n      event_data.reporter = structure.reporter\n      event_data.severity = structure.severity\n\n      plugin = self._plugin_by_reporter.get(structure.reporter, None)\n      if plugin:\n        attributes = {\n            'hostname': structure.hostname,\n            'severity': structure.severity,\n            'reporter': structure.reporter,\n            'pid': structure.pid,\n            'body': structure.body}\n\n        try:\n          \n          plugin.Process(parser_mediator, date_time, attributes)\n\n        except errors.WrongPlugin:\n          plugin = None\n\n    if not plugin:\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_WRITTEN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a matching entry.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nkey (str): name of the parsed structure.\nstructure (pyparsing.ParseResults): elements parsed from the file.\n\nRaises:\nParseError: when the structure type is unknown.", "source": "juraj-google-style"}
{"code": "def gather_initializers(root_trackable):\n    trackable_objects = list_objects(root_trackable)\n    return [c.initializer for c in trackable_objects if hasattr(c, 'initializer') and c.initializer is not None]", "docstring": "Traverse the object graph and find initialization ops.\n\nLooks for `Trackable` objects which are dependencies of\n`root_trackable` and which have an `initializer` property. Includes\ninitializers for slot variables only if the variable they are slotting for and\nthe optimizer are dependencies of `root_trackable` (i.e. if they would be\nsaved with a checkpoint).\n\nArgs:\nroot_trackable: A `Trackable` object to gather initializers for.\n\nReturns:\nA list of initialization ops.", "source": "github-repos"}
{"code": "def get_service_account_token(request, service_account='default'):\n    token_json = get(request, 'instance/service-accounts/{0}/token'.format(service_account))\n    token_expiry = (_helpers.utcnow() + datetime.timedelta(seconds=token_json['expires_in']))\n    return (token_json['access_token'], token_expiry)", "docstring": "Get the OAuth 2.0 access token for a service account.\n\nArgs:\nrequest (google.auth.transport.Request): A callable used to make\nHTTP requests.\nservice_account (str): The string 'default' or a service account email\naddress. The determines which service account for which to acquire\nan access token.\n\nReturns:\nUnion[str, datetime]: The access token and its expiration.\n\nRaises:\ngoogle.auth.exceptions.TransportError: if an error occurred while\nretrieving metadata.", "source": "codesearchnet"}
{"code": "def GetAttribute(self, identifier):\n    if (not self._is_parsed):\n        self._Parse()\n        self._is_parsed = True\n    if (identifier not in self._attributes):\n        return None\n    return self._attributes[identifier]", "docstring": "Retrieves a specific attribute.\n\nArgs:\nidentifier (str): identifier of the attribute within the volume.\n\nReturns:\nVolumeAttribute: volume attribute or None if not available.", "source": "codesearchnet"}
{"code": "def visit_ImportFrom(self, node):\n    if not node.module:\n        self.generic_visit(node)\n        return\n    from_import = node.module\n    from_import_first_component = from_import.split('.')[0]\n    import_renames = getattr(self._api_change_spec, 'import_renames', {})\n    import_rename_spec = import_renames.get(from_import_first_component, None)\n    if not import_rename_spec:\n        self.generic_visit(node)\n        return\n    updated_aliases = []\n    same_aliases = []\n    for import_alias in node.names:\n        full_module_name = '%s.%s' % (from_import, import_alias.name)\n        if excluded_from_module_rename(full_module_name, import_rename_spec):\n            same_aliases.append(import_alias)\n        else:\n            updated_aliases.append(import_alias)\n    if not updated_aliases:\n        self.generic_visit(node)\n        return\n    assert self._stack[-1] is node\n    parent = self._stack[-2]\n    new_from_import = import_rename_spec.new_name + from_import[len(from_import_first_component):]\n    updated_node = ast.ImportFrom(new_from_import, updated_aliases, node.level)\n    ast.copy_location(updated_node, node)\n    pasta.ast_utils.replace_child(parent, node, updated_node)\n    additional_import_log = ''\n    if same_aliases:\n        same_node = ast.ImportFrom(from_import, same_aliases, node.level, col_offset=node.col_offset, lineno=node.lineno)\n        ast.copy_location(same_node, node)\n        parent.body.insert(parent.body.index(updated_node), same_node)\n        pasta.base.formatting.set(same_node, 'prefix', pasta.base.formatting.get(updated_node, 'prefix'))\n        additional_import_log = ' and %r' % pasta.dump(same_node)\n    self.add_log(INFO, node.lineno, node.col_offset, 'Changed import from %r to %r%s.' % (pasta.dump(node), pasta.dump(updated_node), additional_import_log))\n    self.generic_visit(node)", "docstring": "Handle visiting an import-from node in the AST.\n\nArgs:\nnode: Current Node", "source": "github-repos"}
{"code": "def simple_balance(self, as_of=None, raw=False, leg_query=None, **kwargs):\n    legs = self.legs\n    if as_of:\n        legs = legs.filter(transaction__date__lte=as_of)\n    if (leg_query or kwargs):\n        leg_query = (leg_query or models.Q())\n        legs = legs.filter(leg_query, **kwargs)\n    return ((legs.sum_to_balance() * (1 if raw else self.sign)) + self._zero_balance())", "docstring": "Get the balance for this account, ignoring all child accounts\n\nArgs:\nas_of (Date): Only include transactions on or before this date\nraw (bool): If true the returned balance should not have its sign\nadjusted for display purposes.\nleg_query (models.Q): Django Q-expression, will be used to filter the transaction legs.\nallows for more complex filtering than that provided by **kwargs.\nkwargs (dict): Will be used to filter the transaction legs\n\nReturns:\nBalance", "source": "codesearchnet"}
{"code": "def from_api_repr(cls, resource):\n        \n        from google.cloud.bigquery.dataset import DatasetReference\n\n        project = resource[\"projectId\"]\n        dataset_id = resource[\"datasetId\"]\n        table_id = resource[\"tableId\"]\n        return cls(DatasetReference(project, dataset_id), table_id)", "docstring": "Factory:  construct a table reference given its API representation\n\nArgs:\nresource (Dict[str, object]):\nTable reference representation returned from the API\n\nReturns:\ngoogle.cloud.bigquery.table.TableReference:\nTable reference parsed from ``resource``.", "source": "juraj-google-style"}
{"code": "def __init__(self, func, lower_control_flow, aggressive_inlining, variable_names_allowlist=None, variable_names_denylist=None):\n    self._func = func\n    graph_def = _run_inline_graph_optimization(func, lower_control_flow, aggressive_inlining)\n    super(_FunctionConverterData, self).__init__(graph_def, variable_names_allowlist=variable_names_allowlist, variable_names_denylist=variable_names_denylist)\n    self._build_tensor_data()", "docstring": "Creates the conversion data for the given function.\n\nArgs:\nfunc: ConcreteFunction.\nlower_control_flow: Boolean indicating whether or not to lower control\nflow ops such as If and While.\naggressive_inlining: Boolean indicating whether or not to do aggressive\nfunction inlining (might be unsafe if function has stateful ops, not\nproperly connected to control outputs).\nvariable_names_allowlist: The set of variable names to convert (by\ndefault, all variables are converted).\nvariable_names_denylist: The set of variable names to omit converting to\nconstants.", "source": "github-repos"}
{"code": "def delete_public_ip(access_token, subscription_id, resource_group, public_ip_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/publicIPAddresses/', public_ip_name, '?api-version=', NETWORK_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete a public ip addresses associated with a resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\npublic_ip_name (str): Name of the public ip address resource.\n\nReturns:\nHTTP response.", "source": "codesearchnet"}
{"code": "def get_available_storage_system(self, **kwargs):\n    uri = self._helper.build_uri_with_query_string(kwargs, '/available-storage-system')\n    return self._helper.do_get(uri)", "docstring": "Retrieves a specific storage system and its associated volumes available to the server profile based\non the given server hardware type and enclosure group.\n\nArgs:\nenclosureGroupUri (str):\nThe URI of the enclosure group associated with the resource.\nserverHardwareTypeUri (str):\nThe URI of the server hardware type associated with the resource.\nstorageSystemId (str):\nThe storage system ID associated with the resource.\n\nReturns:\ndict: Available storage system.", "source": "codesearchnet"}
{"code": "def __init__(self, minimum=None, maximum=None):\n        \n        super(FloatTypeChecker, self).__init__(base_type=float)\n        self.minimum = minimum\n        self.maximum = maximum", "docstring": "Initialization method.\n\nArgs:\nminimum (float): a minimum value (included).\nmaximum (float): a maximum value (included).", "source": "juraj-google-style"}
{"code": "def _update_job_info(cls, job_dir):\n    meta_file = os.path.join(job_dir, JOB_META_FILE)\n    meta = parse_json(meta_file)\n    if meta:\n        logging.debug(('Update job info for %s' % meta['job_id']))\n        JobRecord.objects.filter(job_id=meta['job_id']).update(end_time=timestamp2date(meta['end_time']))", "docstring": "Update information for given job.\n\nMeta file will be loaded if exists, and the job information in\nin db backend will be updated.\n\nArgs:\njob_dir (str): Directory path of the job.\n\nReturn:\nUpdated dict of job meta info", "source": "codesearchnet"}
{"code": "def load_profile_include(self, include_directory):\n        \n\n        include_directory = os.path.join(self.app_path, include_directory)\n        if not os.path.isdir(include_directory):\n            msg = 'Provided include directory does not exist ({}).'.format(include_directory)\n            sys.exit(msg)\n\n        for filename in sorted(os.listdir(include_directory)):\n            if filename.endswith('.json'):\n                fqfn = os.path.join(include_directory, filename)\n                self.load_profiles_from_file(fqfn)", "docstring": "Load included configuration files.\n\nArgs:\ninclude_directory (str): The path of the profile include directory.", "source": "juraj-google-style"}
{"code": "def _GetAxisFromLabel(subscripts, label):\n    splits = subscripts.split(ellipsis)\n    index = splits[0].find(label)\n    if index != -1:\n        return index\n    if len(splits) < 2:\n        return None\n    index = splits[1].find(label)\n    if index != -1:\n        return index - len(splits[1])\n    return None", "docstring": "Returns the axis (possibly negative) corresponding to a label.\n\nReturns the axis index of the axis label if it is before an ellipsis (or if\nthe ellipsis is not present), and the negative index if it occurs after the\nellipsis. E.g. index of `b` in `ab...cd`, is `1`, but that of `c` is `-2`.\n\nFor multiple occurrences, returns the leftmost one. If not found, returns\nNone.\n\nArgs:\nsubscripts: A string denoting the einsum subscript (e.g. `ab...cd`)\nlabel: The single character axis label.", "source": "github-repos"}
{"code": "def setup(self, host, flow_id, reason, grr_server_url, grr_username, grr_password, approvers=None, verify=True):\n    super(GRRFlowCollector, self).setup(reason, grr_server_url, grr_username, grr_password, approvers=approvers, verify=verify)\n    self.flow_id = flow_id\n    self.host = host", "docstring": "Initializes a GRR flow collector.\n\nArgs:\nhost: hostname of machine.\nflow_id: ID of GRR flow to retrieve.\nreason: justification for GRR access.\ngrr_server_url: GRR server URL.\ngrr_username: GRR username.\ngrr_password: GRR password.\napprovers: list of GRR approval recipients.\nverify: boolean, whether to verify the GRR server's x509 certificate.", "source": "codesearchnet"}
{"code": "def ContainsAll(self, *values):\n    self._awql = self._CreateMultipleValuesCondition(values, 'CONTAINS_ALL')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"contains all\".\n\nArgs:\n*values: The values to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "codesearchnet"}
{"code": "def Write(self, output_writer):\n    \n    if self._title and len(self._title) > self._MAXIMUM_WIDTH:\n      raise RuntimeError('Title length out of bounds.')\n\n    if self._number_of_columns not in (0, 2):\n      raise RuntimeError('Unsupported number of columns: {0:d}.'.format(\n          self._number_of_columns))\n\n    if self._column_width < 0 or self._column_width >= self._MAXIMUM_WIDTH:\n      raise RuntimeError('Column width out of bounds.')\n\n    output_writer.Write('\\n')\n\n    self._WriteHeader(output_writer)\n\n    if self._columns:\n      self._WriteRow(output_writer, self._columns)\n      self._WriteSeparatorLine(output_writer)\n\n    for values in self._rows:\n      self._WriteRow(output_writer, values)\n\n    self._WriteSeparatorLine(output_writer)", "docstring": "Writes the table to the output writer.\n\nArgs:\noutput_writer (OutputWriter): output writer.\n\nRaises:\nRuntimeError: if the title exceeds the maximum width or\nif the table has more than 2 columns or\nif the column width is out of bounds.", "source": "juraj-google-style"}
{"code": "def convert_unicode(value):\n    if isinstance(value, dict):\n        return {convert_unicode(key): convert_unicode(value) for (key, value) in value.iteritems()}\n    elif isinstance(value, list):\n        return [convert_unicode(item) for item in value]\n    elif isinstance(value, unicode):\n        return value.encode('utf-8')\n    else:\n        return value", "docstring": "Resolves python 2 issue with json loading in unicode instead of string\n\nArgs:\nvalue (str): Unicode value to be converted\n\nReturns:\n(str): converted string", "source": "codesearchnet"}
{"code": "def get_average_along_axis(self, ind):\n        \n        m = self.data[\"total\"]\n        ng = self.dim\n        if ind == 0:\n            total = np.sum(np.sum(m, axis=1), 1)\n        elif ind == 1:\n            total = np.sum(np.sum(m, axis=0), 1)\n        else:\n            total = np.sum(np.sum(m, axis=0), 0)\n        return total / ng[(ind + 1) % 3] / ng[(ind + 2) % 3]", "docstring": "Get the averaged total of the volumetric data a certain axis direction.\nFor example, useful for visualizing Hartree Potentials from a LOCPOT\nfile.\n\nArgs:\nind (int): Index of axis.\n\nReturns:\nAverage total along axis", "source": "juraj-google-style"}
{"code": "def __init__(self, builtins, full_names=True, allow_singletons=False):\n    super().__init__(allow_singletons)\n    self._builtins = builtins\n    self._full_names = full_names", "docstring": "Create this visitor.\n\nArgs:\nbuiltins: The builtins module.\nfull_names: Whether to use fully qualified names for lookup.\nallow_singletons: Whether to allow singleton types like Ellipsis.", "source": "github-repos"}
{"code": "def _aggregate_gradients(self, grads_and_vars):\n    return self.gradient_aggregator(grads_and_vars)", "docstring": "Called in `apply_gradients` to aggregate gradients across devices.\n\nNote that user subclasses may override this, so the interface should not be\nchanged.\n\nArgs:\ngrads_and_vars: List of (gradient, variable) pairs.\n\nReturns:\nA list of (aggregated_gradient, variable) pairs. By default, this calls\n`self.gradient_aggregator`.", "source": "github-repos"}
{"code": "def format_plugins(plugins):\n    formatted = []\n    for plugin_ in plugins:\n        formatted_plugin = format_plugin(plugin_)\n        formatted.append(formatted_plugin)\n    return formatted", "docstring": "Serialise multiple plug-in\n\nReturns:\nList of JSON-compatible plug-ins", "source": "codesearchnet"}
{"code": "def getFilepaths(self, filename):\n    return (os.path.join(os.environ['HOME'], filename), os.path.join(self.mackup.mackup_folder, filename))", "docstring": "Get home and mackup filepaths for given file\n\nArgs:\nfilename (str)\n\nReturns:\nhome_filepath, mackup_filepath (str, str)", "source": "codesearchnet"}
{"code": "def check_syntax(self, app_path=None):\n    app_path = (app_path or '.')\n    for filename in sorted(os.listdir(app_path)):\n        error = None\n        status = True\n        if filename.endswith('.py'):\n            try:\n                with open(filename, 'rb') as f:\n                    ast.parse(f.read(), filename=filename)\n            except SyntaxError:\n                status = False\n                e = []\n                for line in traceback.format_exc().split('\\n')[(- 5):(- 2)]:\n                    e.append(line.strip())\n                error = ' '.join(e)\n        elif filename.endswith('.json'):\n            try:\n                with open(filename, 'r') as fh:\n                    json.load(fh)\n            except ValueError as e:\n                status = False\n                error = e\n        else:\n            continue\n        if error:\n            self.validation_data['errors'].append('Syntax validation failed for {} ({}).'.format(filename, error))\n        self.validation_data['fileSyntax'].append({'filename': filename, 'status': status})", "docstring": "Run syntax on each \".py\" and \".json\" file.\n\nArgs:\napp_path (str, optional): Defaults to None. The path of Python files.", "source": "codesearchnet"}
{"code": "def _filter_and_bucket_subtokens(subtoken_counts, min_count):\n    subtoken_buckets = []\n    for (subtoken, count) in six.iteritems(subtoken_counts):\n        if (count < min_count):\n            continue\n        while (len(subtoken_buckets) <= len(subtoken)):\n            subtoken_buckets.append(set())\n        subtoken_buckets[len(subtoken)].add(subtoken)\n    return subtoken_buckets", "docstring": "Return a bucketed list of subtokens that are filtered by count.\n\nArgs:\nsubtoken_counts: defaultdict mapping subtokens to their counts\nmin_count: int count used to filter subtokens\n\nReturns:\nList of subtoken sets, where subtokens in set i have the same length=i.", "source": "codesearchnet"}
{"code": "def normalize_collaboration(collaboration):\n    \n    if not collaboration:\n        return []\n\n    collaboration = collaboration.strip()\n    if collaboration.startswith('(') and collaboration.endswith(')'):\n        collaboration = collaboration[1:-1]\n\n    collaborations = _RE_AND.split(collaboration)\n    collaborations = (_RE_COLLABORATION_LEADING.sub('', collab)\n                      for collab in collaborations)\n    collaborations = (_RE_COLLABORATION_TRAILING.sub('', collab)\n                      for collab in collaborations)\n\n    return [collab.strip() for collab in collaborations]", "docstring": "Normalize collaboration string.\n\nArgs:\ncollaboration: a string containing collaboration(s) or None\n\nReturns:\nlist: List of extracted and normalized collaborations\n\nExamples:\n>>> from inspire_schemas.utils import normalize_collaboration\n>>> normalize_collaboration('for the CMS and ATLAS Collaborations')\n['CMS', 'ATLAS']", "source": "juraj-google-style"}
{"code": "def CreateSourceType(cls, type_indicator, attributes):\n    if (type_indicator not in cls._source_type_classes):\n        raise errors.FormatError('Unsupported type indicator: {0:s}.'.format(type_indicator))\n    return cls._source_type_classes[type_indicator](**attributes)", "docstring": "Creates a source type.\n\nArgs:\ntype_indicator (str): source type indicator.\nattributes (dict[str, object]): source type attributes.\n\nReturns:\nSourceType: a source type.\n\nRaises:\nFormatError: if the type indicator is not set or unsupported,\nor if required attributes are missing.", "source": "codesearchnet"}
{"code": "def get_dimension_index(self, dimension):\n        \n        if isinstance(dimension, int):\n            if (dimension < (self.ndims + len(self.vdims)) or\n                dimension < len(self.dimensions())):\n                return dimension\n            else:\n                return IndexError('Dimension index out of bounds')\n        dim = dimension_name(dimension)\n        try:\n            dimensions = self.kdims+self.vdims\n            return [i for i, d in enumerate(dimensions) if d == dim][0]\n        except IndexError:\n            raise Exception(\"Dimension %s not found in %s.\" %\n                            (dim, self.__class__.__name__))", "docstring": "Get the index of the requested dimension.\n\nArgs:\ndimension: Dimension to look up by name or by index\n\nReturns:\nInteger index of the requested dimension", "source": "juraj-google-style"}
{"code": "def extract_lookups_from_string(value):\n    \n    lookups = set()\n    for match in LOOKUP_REGEX.finditer(value):\n        groupdict = match.groupdict()\n        raw = match.groups()[0]\n        lookup_type = groupdict[\"type\"]\n        lookup_input = groupdict[\"input\"]\n        lookups.add(Lookup(lookup_type, lookup_input, raw))\n    return lookups", "docstring": "Extract any lookups within a string.\n\nArgs:\nvalue (str): string value we're extracting lookups from\n\nReturns:\nlist: list of :class:`stacker.lookups.Lookup` if any", "source": "juraj-google-style"}
{"code": "def verify_ed25519_signature(public_key, contents, signature, message):\n    try:\n        public_key.verify(signature, contents)\n    except InvalidSignature as exc:\n        raise ScriptWorkerEd25519Error((message % {'exc': str(exc)}))", "docstring": "Verify that ``signature`` comes from ``public_key`` and ``contents``.\n\nArgs:\npublic_key (Ed25519PublicKey): the key to verify the signature\ncontents (bytes): the contents that was signed\nsignature (bytes): the signature to verify\nmessage (str): the error message to raise.\n\nRaises:\nScriptWorkerEd25519Error: on failure", "source": "codesearchnet"}
{"code": "def ch_stop_time(self, *channels: List[Channel]) -> int:\n    intervals = list(itertools.chain(*(self._table[chan] for chan in channels if (chan in self._table))))\n    if intervals:\n        return max((interval.end for interval in intervals))\n    return 0", "docstring": "Return maximum time of timeslots over all channels.\n\nArgs:\n*channels: Channels over which to obtain stop time.", "source": "codesearchnet"}
{"code": "def create_test_record(self, mobly_test_class):\n    details = self._get_details()\n    extras = self._get_extras()\n    tr_record = records.TestResultRecord(t_name=self._get_full_name(), t_class=mobly_test_class)\n    if self._begin_time:\n        tr_record.begin_time = self._begin_time\n    if self._is_failed():\n        tr_record.test_fail(e=signals.TestFailure(details=details, extras=extras))\n    elif self._status_code in _InstrumentationStatusCodeCategories.SKIPPED:\n        tr_record.test_skip(e=signals.TestSkip(details=details, extras=extras))\n    elif self._status_code in _InstrumentationStatusCodeCategories.PASS:\n        tr_record.test_pass(e=signals.TestPass(details=details, extras=extras))\n    elif self._status_code in _InstrumentationStatusCodeCategories.TIMING:\n        if self._error_message:\n            tr_record.test_error(e=signals.TestError(details=details, extras=extras))\n        else:\n            tr_record = None\n    else:\n        tr_record.test_error(e=signals.TestError(details=details, extras=extras))\n    if self._known_keys[_InstrumentationKnownStatusKeys.STACK]:\n        tr_record.termination_signal.stacktrace = self._known_keys[_InstrumentationKnownStatusKeys.STACK]\n    return tr_record", "docstring": "Creates a TestResultRecord for the instrumentation block.\n\nArgs:\nmobly_test_class: string, the name of the Mobly test case\nexecuting the instrumentation run.\n\nReturns:\nA TestResultRecord with an appropriate signals exception\nrepresenting the instrumentation test method's result status.", "source": "github-repos"}
{"code": "def do_keygen(args):\n    \n    if args.key_name is not None:\n        key_name = args.key_name\n    else:\n        key_name = 'validator'\n\n    key_dir = get_key_dir()\n\n    if not os.path.exists(key_dir):\n        raise CliException(\"Key directory does not exist: {}\".format(key_dir))\n\n    priv_filename = os.path.join(key_dir, key_name + '.priv')\n    pub_filename = os.path.join(key_dir, key_name + '.pub')\n\n    if not args.force:\n        file_exists = False\n        for filename in [priv_filename, pub_filename]:\n            if os.path.exists(filename):\n                file_exists = True\n                print('file exists: {}'.format(filename), file=sys.stderr)\n        if file_exists:\n            raise CliException(\n                'files exist, rerun with --force to overwrite existing files')\n\n    context = create_context('secp256k1')\n\n    private_key = context.new_random_private_key()\n    public_key = context.get_public_key(private_key)\n\n    try:\n        priv_exists = os.path.exists(priv_filename)\n        with open(priv_filename, 'w') as priv_fd:\n            if not args.quiet:\n                if priv_exists:\n                    print('overwriting file: {}'.format(priv_filename))\n                else:\n                    print('writing file: {}'.format(priv_filename))\n            priv_fd.write(private_key.as_hex())\n            priv_fd.write('\\n')\n            \n            keydir_info = os.stat(key_dir)\n            keydir_gid = keydir_info.st_gid\n            keydir_uid = keydir_info.st_uid\n            \n            os.chown(priv_filename, keydir_uid, keydir_gid)\n            \n            os.chmod(priv_filename, 0o640)\n\n        pub_exists = os.path.exists(pub_filename)\n        with open(pub_filename, 'w') as pub_fd:\n            if not args.quiet:\n                if pub_exists:\n                    print('overwriting file: {}'.format(pub_filename))\n                else:\n                    print('writing file: {}'.format(pub_filename))\n            pub_fd.write(public_key.as_hex())\n            pub_fd.write('\\n')\n            \n            os.chown(pub_filename, keydir_uid, keydir_gid)\n            \n            os.chmod(pub_filename, 0o644)\n\n    except IOError as ioe:\n        raise CliException('IOError: {}'.format(str(ioe)))", "docstring": "Executes the key generation operation, given the parsed arguments.\n\nArgs:\nargs (:obj:`Namespace`): The parsed args.", "source": "juraj-google-style"}
{"code": "def pop_chunk(self, chunk_max_size):\n    if (self._total_length < chunk_max_size):\n        res = self._tobytes()\n        self.clear()\n        return res\n    first_iteration = True\n    while True:\n        try:\n            data = self._deque.popleft()\n            data_length = len(data)\n            self._total_length -= data_length\n            if first_iteration:\n                if (data_length == chunk_max_size):\n                    return data\n                elif (data_length > chunk_max_size):\n                    view = self._get_pointer_or_memoryview(data, data_length)\n                    self.appendleft(view[chunk_max_size:])\n                    return view[:chunk_max_size]\n                else:\n                    chunk_write_buffer = WriteBuffer()\n            elif ((chunk_write_buffer._total_length + data_length) > chunk_max_size):\n                view = self._get_pointer_or_memoryview(data, data_length)\n                limit = ((chunk_max_size - chunk_write_buffer._total_length) - data_length)\n                self.appendleft(view[limit:])\n                data = view[:limit]\n            chunk_write_buffer.append(data)\n            if (chunk_write_buffer._total_length >= chunk_max_size):\n                break\n        except IndexError:\n            self._has_view = False\n            break\n        first_iteration = False\n    return chunk_write_buffer._tobytes()", "docstring": "Pops a chunk of the given max size.\n\nOptimized to avoid too much string copies.\n\nArgs:\nchunk_max_size (int): max size of the returned chunk.\n\nReturns:\nstring (bytes) with a size <= chunk_max_size.", "source": "codesearchnet"}
{"code": "def random(length: int=8, chars: str=(digits + ascii_lowercase)) -> Iterator[str]:\n    while True:\n        (yield ''.join([choice(chars) for _ in range(length)]))", "docstring": "A random string.\n\nNot unique, but has around 1 in a million chance of collision (with the default 8\ncharacter length). e.g. 'fubui5e6'\n\nArgs:\nlength: Length of the random string.\nchars: The characters to randomly choose from.", "source": "codesearchnet"}
{"code": "def _get_row_partition_type_tensor_pairs_tail(partition):\n    if partition._has_precomputed_value_rowids():\n        return ('VALUE_ROWIDS', partition.value_rowids())\n    else:\n        return ('ROW_SPLITS', partition.row_splits())", "docstring": "Gets a row partition type tensor pair for the tail.\n\nIf value_rowid is defined, then it is used. Otherwise, row_splits\nare used.\n\nArgs:\npartition: a RowPartition.\n\nReturns:\nA list of (row_partition_type, row_partition_tensor) pairs.", "source": "github-repos"}
{"code": "def _check_zero_size(self):\n    block_zero = ((self.end[0] <= self.start[0]) or (self.end[1] <= self.start[1]))\n    if block_zero:\n        self.flag_change(self.flags, 'fatal', worksheet=self.worksheet, message=self.FLAGS['0-size'])\n    return block_zero", "docstring": "Checks for zero height or zero width blocks and flags the occurrence.\n\nReturns:\nTrue if the block is size 0.", "source": "codesearchnet"}
{"code": "def add_to_cache(cls, remote_info, container):  \n    \n    if not isinstance(container, cls):\n      raise TypeError('%r not an instance of %r, could not be added to cache.' %\n                      (container, cls))\n    if remote_info in cls.__remote_info_cache:\n      raise KeyError('Cache has collision but should not.')\n    cls.__remote_info_cache[remote_info] = container", "docstring": "Adds a ResourceContainer to a cache tying it to a protorpc method.\n\nArgs:\nremote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding\nto a method.\ncontainer: An instance of ResourceContainer.\n\nRaises:\nTypeError: if the container is not an instance of cls.\nKeyError: if the remote method has been reference by a container before.\nThis created remote method should never occur because a remote method\nis created once.", "source": "juraj-google-style"}
{"code": "def __init__(self, permissive=True):\n        \n        self._journal_contents = ''\n        self._init_journal(permissive=permissive)", "docstring": "Inititalize the journal maker object.\n\nAppends the first lines in the journal (JrnObj variable and timestamp)\nto the _journal_contents.\n\nArgs:\npermissive (bool): if True most errors in journal will not\ncause Revit to stop journal execution.\nSome still do.", "source": "juraj-google-style"}
{"code": "def transmute(df, *keep_columns, **kwargs):\n    keep_cols = []\n    for col in flatten(keep_columns):\n        try:\n            keep_cols.append(col.name)\n        except:\n            if isinstance(col, str):\n                keep_cols.append(col)\n            elif isinstance(col, int):\n                keep_cols.append(df.columns[col])\n    df = df.assign(**kwargs)\n    columns = ([k for k in kwargs.keys()] + list(keep_cols))\n    return df[columns]", "docstring": "Creates columns and then returns those new columns and optionally specified\noriginal columns from the DataFrame.\n\nThis works like `mutate`, but designed to discard the original columns used\nto create the new ones.\n\nArgs:\n*keep_columns: Column labels to keep. Can be string, symbolic, or\ninteger position.\n\nKwargs:\n**kwargs: keys are the names of the new columns, values indicate\nwhat the new column values will be.\n\nExample:\ndiamonds >> transmute(x_plus_y=X.x + X.y, y_div_z=(X.y / X.z)) >> head(3)\n\ny_div_z  x_plus_y\n0  1.637860      7.93\n1  1.662338      7.73\n2  1.761905      8.12", "source": "codesearchnet"}
{"code": "def _build_rule_message(self, column: str, rule: str, error: str, value: Any, rule_params: dict={}) -> LogMessage:\n    return self._base_log.copy() | LogMessage(log_type=LogType.RULE.value, column=column, rule=rule, error=error, value=value, rule_params=json.dumps(rule_params))", "docstring": "Adds rule error information to base log message.\n\nArgs:\n* column: column where the rule is applied\n* rule: rule that is violated and raises this message\n* value: value that violates the rule\n* rule_params: optional, parameters set for the rule\n\nReturns:\n* log: LogMessage dictionary", "source": "github-repos"}
{"code": "def _enter_scope_uncached(self):\n    if self._auxiliary_name_scope:\n        current_name_scope = None\n    else:\n        name_scope = ops.get_name_scope()\n        if name_scope:\n            name_scope += '/'\n            current_name_scope = ops.name_scope(name_scope, skip_on_eager=False)\n        else:\n            current_name_scope = ops.name_scope(name_scope, skip_on_eager=False)\n    if self._name_or_scope is not None:\n        if not isinstance(self._name_or_scope, (VariableScope, str)):\n            raise TypeError('VariableScope: name_or_scope must be a string or VariableScope.')\n        if isinstance(self._name_or_scope, str):\n            name_scope = self._name_or_scope\n        else:\n            name_scope = self._name_or_scope.name.split('/')[-1]\n        if name_scope or current_name_scope:\n            current_name_scope = current_name_scope or ops.name_scope(name_scope, skip_on_eager=False)\n            try:\n                current_name_scope_name = current_name_scope.__enter__()\n            except:\n                current_name_scope.__exit__(*sys.exc_info())\n                raise\n            self._current_name_scope = current_name_scope\n            if isinstance(self._name_or_scope, str):\n                old_name_scope = current_name_scope_name\n            else:\n                old_name_scope = self._name_or_scope.original_name_scope\n            pure_variable_scope = _pure_variable_scope(self._name_or_scope, reuse=self._reuse, initializer=self._initializer, regularizer=self._regularizer, caching_device=self._caching_device, partitioner=self._partitioner, custom_getter=self._custom_getter, old_name_scope=old_name_scope, dtype=self._dtype, use_resource=self._use_resource, constraint=self._constraint)\n            try:\n                entered_pure_variable_scope = pure_variable_scope.__enter__()\n            except:\n                pure_variable_scope.__exit__(*sys.exc_info())\n                raise\n            self._cached_pure_variable_scope = pure_variable_scope\n            return entered_pure_variable_scope\n        else:\n            self._current_name_scope = None\n            pure_variable_scope = _pure_variable_scope(self._name_or_scope, reuse=self._reuse, initializer=self._initializer, regularizer=self._regularizer, caching_device=self._caching_device, partitioner=self._partitioner, custom_getter=self._custom_getter, dtype=self._dtype, use_resource=self._use_resource, constraint=self._constraint)\n            try:\n                entered_pure_variable_scope = pure_variable_scope.__enter__()\n            except:\n                pure_variable_scope.__exit__(*sys.exc_info())\n                raise\n            self._cached_pure_variable_scope = pure_variable_scope\n            return entered_pure_variable_scope\n    else:\n        if self._reuse:\n            raise ValueError('reuse=True cannot be used without a name_or_scope')\n        current_name_scope = current_name_scope or ops.name_scope(self._default_name, skip_on_eager=False)\n        try:\n            current_name_scope_name = current_name_scope.__enter__()\n        except:\n            current_name_scope.__exit__(*sys.exc_info())\n            raise\n        self._current_name_scope = current_name_scope\n        unique_default_name = _get_unique_variable_scope(self._default_name)\n        pure_variable_scope = _pure_variable_scope(unique_default_name, initializer=self._initializer, regularizer=self._regularizer, caching_device=self._caching_device, partitioner=self._partitioner, custom_getter=self._custom_getter, old_name_scope=current_name_scope_name, dtype=self._dtype, use_resource=self._use_resource, constraint=self._constraint)\n        try:\n            entered_pure_variable_scope = pure_variable_scope.__enter__()\n        except:\n            pure_variable_scope.__exit__(*sys.exc_info())\n            raise\n        self._cached_pure_variable_scope = pure_variable_scope\n        return entered_pure_variable_scope", "docstring": "Enters the context manager when there is no cached scope yet.\n\nReturns:\nThe entered variable scope.\n\nRaises:\nTypeError: A wrong type is passed as `scope` at __init__().\nValueError: `reuse` is incorrectly set at __init__().", "source": "github-repos"}
{"code": "def evaluate(estimator, eval_args):\n    values = {}\n    checkpoint_path = estimator.latest_checkpoint()\n    if (not checkpoint_path):\n        return values\n    tf.logging.info('Starting evaluation on checkpoint %s', checkpoint_path)\n    for eval_name in eval_args:\n        (input_fn, eval_steps) = eval_args[eval_name]\n        metric_values = estimator.evaluate(input_fn, steps=eval_steps, name=eval_name, checkpoint_path=checkpoint_path)\n        for (key, val) in metric_values.iteritems():\n            values[((eval_name + '/') + key)] = val\n    tf.logging.info(values)\n    return values", "docstring": "Runs evaluation on the latest model checkpoint & logs to tensorboard.\n\nArgs:\nestimator: A tf.Estimator object.\neval_args: Dictionary of {eval_name: (input_fn, eval_steps)} where eval_name\nis the name of the evaluation set, e.g. \"train\" or \"val\", input_fn is an\ninput function returning a tuple (features, labels), and eval_steps is the\nnumber of steps for which to evaluate the model. If None, evaluates until\ninput_fn raises an end-of-input exception.\n\nReturns:\nA dict of metric values from the evaluation. May be empty, e.g. if the\ntraining job has not yet saved a checkpoint or the checkpoint is deleted by\nthe time the TPU worker initializes.", "source": "codesearchnet"}
{"code": "def train_fn(data_dir=None, output_dir=None, model_class=gin.REQUIRED, dataset=gin.REQUIRED, input_names=None, target_names=None, train_steps=1000, eval_steps=1, eval_frequency=100):\n    (train_data, eval_data, features_info, keys) = train_and_eval_dataset(dataset, data_dir)\n    if (input_names is None):\n        input_names = keys[0]\n    if (target_names is None):\n        target_names = keys[1]\n    model = model_class(features_info=features_info, input_names=input_names, target_names=target_names)\n    optimize_fn(model)\n    train_batches = shuffle_and_batch_data(train_data, target_names, features_info, training=True)\n    eval_batches = shuffle_and_batch_data(eval_data, target_names, features_info, training=False)\n    model.fit(train_batches, epochs=1, steps_per_epoch=1)\n    callbacks = []\n    callbacks.append(tf.keras.callbacks.History())\n    callbacks.append(tf.keras.callbacks.BaseLogger())\n    last_epoch = 0\n    if (output_dir is not None):\n        callbacks.append(tf.keras.callbacks.TensorBoard(log_dir=output_dir))\n        output_format = os.path.join(output_dir, 'model-{epoch:05d}')\n        callbacks.append(tf.keras.callbacks.ModelCheckpoint(filepath=output_format, save_weights_only=True))\n        checkpoints = tf.gfile.Glob(os.path.join(output_dir, 'model-*'))\n        checkpoints = [os.path.basename(ckpt)[6:] for ckpt in checkpoints]\n        epoch_numbers = [int(ckpt[:5]) for ckpt in checkpoints if (len(ckpt) > 4)]\n        epoch_numbers.sort()\n        if epoch_numbers:\n            last_epoch = epoch_numbers[(- 1)]\n            saved_path = os.path.join(output_dir, ('model-%05d' % last_epoch))\n            model.load_weights(saved_path)\n    model.fit(train_batches, epochs=(train_steps", "docstring": "Train the given model on the given dataset.\n\nArgs:\ndata_dir: Directory where the data is located.\noutput_dir: Directory where to put the logs and checkpoints.\nmodel_class: The model class to train.\ndataset: The name of the dataset to train on.\ninput_names: List of strings with the names of the features on input.\ntarget_names: List of strings with the names of the target features.\ntrain_steps: for how many steps to train.\neval_steps: for how many steps to do evaluation.\neval_frequency: how often (every this many steps) to run evaluation.", "source": "codesearchnet"}
{"code": "def internal_link_sets(self):\n    if (not self.__internal_link_sets):\n        self.__internal_link_sets = InternalLinkSets(self.__connection)\n    return self.__internal_link_sets", "docstring": "Gets the InternalLinkSets API client.\n\nReturns:\nInternalLinkSets:", "source": "codesearchnet"}
{"code": "def parse_section_links(self, section_title):\n        \n        soup = BeautifulSoup(self.html, \"html.parser\")\n        headlines = soup.find_all(\"span\", {\"class\": \"mw-headline\"})\n        tmp_soup = BeautifulSoup(section_title, \"html.parser\")\n        tmp_sec_title = tmp_soup.get_text().lower()\n        id_tag = None\n        for headline in headlines:\n            tmp_id = headline.text\n            if tmp_id.lower() == tmp_sec_title:\n                id_tag = headline.get(\"id\")\n                break\n\n        if id_tag is not None:\n            return self._parse_section_links(id_tag)\n        return None", "docstring": "Parse all links within a section\n\nArgs:\nsection_title (str): Name of the section to pull\nReturns:\nlist: List of (title, url) tuples\nNote:\nReturns **None** if section title is not found\nNote:\nSide effect is to also pull the html which can be slow\nNote:\nThis is a parsing operation and not part of the standard API", "source": "juraj-google-style"}
{"code": "def init_variable(v, init, name='init'):\n    with ops.name_scope(None, v.op.name + '/', [v, init]):\n        with ops.name_scope(name) as scope:\n            with ops.colocate_with(v):\n                if callable(init):\n                    assert v.get_shape().is_fully_defined(), 'Variable shape unknown.'\n                    value = init(v.get_shape().as_list(), v.dtype.base_dtype)\n                    value = ops.convert_to_tensor(value, name='value')\n                    return gen_state_ops.assign(v, value, name=scope)\n                else:\n                    init = ops.convert_to_tensor(init, name='init')\n                    return gen_state_ops.assign(v, init, name=scope)", "docstring": "Initializes variable with \"init\".\n\nThis op does the following:\nif init is a Tensor, v = init\nif callable(init): v = init(VariableShape(v), v.dtype)\n\nArgs:\nv: Variable to initialize\ninit: Tensor to assign to v,\nOr an object convertible to Tensor e.g. nparray,\nOr an Initializer that generates a tensor given the shape and type of v.\nAn \"Initializer\" is a callable that returns a tensor that \"v\" should be\nset to. It will be called as init(shape, dtype).\nname: Optional name for the op.\n\nReturns:\nThe operation that initializes v.", "source": "github-repos"}
{"code": "def rouge_l_fscore(predictions, labels):\n  \n  outputs = tf.to_int32(tf.argmax(predictions, axis=-1))\n  rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),\n                               tf.float32)\n  return rouge_l_f_score, tf.constant(1.0)", "docstring": "ROUGE scores computation between labels and predictions.\n\nThis is an approximate ROUGE scoring method since we do not glue word pieces\nor decode the ids and tokenize the output.\n\nArgs:\npredictions: tensor, model predictions\nlabels: tensor, gold output.\n\nReturns:\nrouge_l_fscore: approx rouge-l f1 score.", "source": "juraj-google-style"}
{"code": "def blend(self, other, percent=0.5):\n    \n    dest = 1.0 - percent\n    rgb = tuple(((u * percent) + (v * dest) for u, v in zip(self.__rgb, other.__rgb)))\n    a = (self.__a * percent) + (other.__a * dest)\n    return Color(rgb, 'rgb', a, self.__wref)", "docstring": "blend this color with the other one.\n\nArgs:\n:other:\nthe grapefruit.Color to blend with this one.\n\nReturns:\nA grapefruit.Color instance which is the result of blending\nthis color on the other one.\n\n>>> c1 = Color.from_rgb(1, 0.5, 0, 0.2)\n>>> c2 = Color.from_rgb(1, 1, 1, 0.6)\n>>> c3 = c1.blend(c2)\n>>> c3\nColor(1.0, 0.75, 0.5, 0.4)", "source": "juraj-google-style"}
{"code": "def _add_to_quick_menu(self, key, wf):\n        \n        if key in settings.QUICK_MENU:\n            self.output['quick_menu'].append(wf)", "docstring": "Appends menu entries to dashboard quickmenu according\nto :attr:`zengine.settings.QUICK_MENU`\n\nArgs:\nkey: workflow name\nwf: workflow menu entry", "source": "juraj-google-style"}
{"code": "def hr_dp004(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `hr_dp004`'.format(value))\n\n        self._hr_dp004 = value", "docstring": "Corresponds to IDD Field `hr_dp004`\nhumidity ratio corresponding to\nDew-point temperature corresponding to 0.4% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `hr_dp004`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def variable_created_in_scope(self, v):\n    return v._distribute_strategy == self._container_strategy_weakref()", "docstring": "Tests whether `v` was created while this strategy scope was active.\n\nVariables created inside the strategy scope are \"owned\" by it:\n\n>>> strategy = tf.distribute.MirroredStrategy()\n>>> with strategy.scope():\n...   v = tf.Variable(1.)\n>>> strategy.extended.variable_created_in_scope(v)\nTrue\n\nVariables created outside the strategy are not owned by it:\n\n>>> strategy = tf.distribute.MirroredStrategy()\n>>> v = tf.Variable(1.)\n>>> strategy.extended.variable_created_in_scope(v)\nFalse\n\nArgs:\nv: A `tf.Variable` instance.\n\nReturns:\nTrue if `v` was created inside the scope, False if not.", "source": "github-repos"}
{"code": "def register_rml(self, filepath, **kwargs):\n    name = os.path.split(filepath)[(- 1)]\n    if ((name in self.rml_maps) and (self.rml_maps[name] != filepath)):\n        raise Exception('RML name already registered. Filenames must be unique.', (self.rml_maps[name], filepath))\n    self.rml_maps[name] = filepath", "docstring": "Registers the filepath for an rml mapping\n\nArgs:\n-----\nfilepath: the path the rml file", "source": "codesearchnet"}
{"code": "def config_conf_section():\n    config_dict = OrderedDict((('create', ConfOpt(None, True, None, {'action': 'store_true'}, False, 'create most global config file')), ('create_local', ConfOpt(None, True, None, {'action': 'store_true'}, False, 'create most local config file')), ('update', ConfOpt(None, True, None, {'action': 'store_true'}, False, 'add missing entries to config file')), ('edit', ConfOpt(None, True, None, {'action': 'store_true'}, False, 'open config file in a text editor')), ('editor', ConfOpt('vim', False, None, {}, True, 'text editor'))))\n    return config_dict", "docstring": "Define a configuration section handling config file.\n\nReturns:\ndict of ConfOpt: it defines the 'create', 'update', 'edit' and 'editor'\nconfiguration options.", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor:\n    residual = hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    hidden_states, attn_weights, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    if hidden_states.dtype == torch.float16:\n        clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n        hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\nlayer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size\n`(encoder_attention_heads,)`.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def build_transcript(transcript_info, build='37'):\n    \n    try:\n        transcript_id = transcript_info['ensembl_transcript_id']\n    except KeyError:\n        raise KeyError(\"Transcript has to have ensembl id\")\n    \n    build = build\n    is_primary = transcript_info.get('is_primary', False)\n    \n    refseq_id = transcript_info.get('refseq_id')\n    refseq_identifiers = transcript_info.get('refseq_identifiers')\n\n    try:\n        chrom = transcript_info['chrom']\n    except KeyError:\n        raise KeyError(\"Transcript has to have a chromosome\")\n    \n    try:\n        start = int(transcript_info['transcript_start'])\n    except KeyError:\n        raise KeyError(\"Transcript has to have start\")\n    except TypeError:\n        raise TypeError(\"Transcript start has to be integer\")\n\n    try:\n        end = int(transcript_info['transcript_end'])\n    except KeyError:\n        raise KeyError(\"Transcript has to have end\")\n    except TypeError:\n        raise TypeError(\"Transcript end has to be integer\")\n\n    try:\n        hgnc_id = int(transcript_info['hgnc_id'])\n    except KeyError:\n        raise KeyError(\"Transcript has to have a hgnc id\")\n    except TypeError:\n        raise TypeError(\"hgnc id has to be integer\")\n\n    transcript_obj = HgncTranscript(\n        transcript_id=transcript_id, \n        hgnc_id=hgnc_id, \n        chrom=chrom, \n        start=start, \n        end=end, \n        is_primary=is_primary, \n        refseq_id=refseq_id,\n        refseq_identifiers=refseq_identifiers,\n        build=build\n    )\n    \n    for key in list(transcript_obj):\n        if transcript_obj[key] is None:\n            transcript_obj.pop(key)\n\n    return transcript_obj", "docstring": "Build a hgnc_transcript object\n\nArgs:\ntranscript_info(dict): Transcript information\n\nReturns:\ntranscript_obj(HgncTranscript)\n{\ntranscript_id: str, required\nhgnc_id: int, required\nbuild: str, required\nrefseq_id: str,\nchrom: str, required\nstart: int, required\nend: int, required\nis_primary: bool\n}", "source": "juraj-google-style"}
{"code": "def Or(exprs):\n    return simplify_exprs(exprs, _Or, TRUE, FALSE)", "docstring": "Create a disjunction or its simplified equivalent.\n\nThis will ensure that, when an _Or is returned, none of its immediate\nsubterms is TRUE, FALSE, or another disjunction.\n\nArgs:\nexprs: An iterable. The subterms.\n\nReturns:\nA BooleanTerm.", "source": "github-repos"}
{"code": "def build_exon(exon_info, build='37'):\n    \n\n    try:\n        chrom = exon_info['chrom']\n    except KeyError:\n        raise KeyError(\"Exons has to have a chromosome\")\n\n    try:\n        start = int(exon_info['start'])\n    except KeyError:\n        raise KeyError(\"Exon has to have a start\")\n    except TypeError:\n        raise TypeError(\"Exon start has to be integer\")\n    \n    try:\n        end = int(exon_info['end'])\n    except KeyError:\n        raise KeyError(\"Exon has to have a end\")\n    except TypeError:\n        raise TypeError(\"Exon end has to be integer\")\n\n    try:\n        rank = int(exon_info['rank'])\n    except KeyError:\n        raise KeyError(\"Exon has to have a rank\")\n    except TypeError:\n        raise TypeError(\"Exon rank has to be integer\")\n\n    try:\n        exon_id = exon_info['exon_id']\n    except KeyError:\n        raise KeyError(\"Exons has to have a id\")\n\n    try:\n        transcript = exon_info['transcript']\n    except KeyError:\n        raise KeyError(\"Exons has to have a transcript\")\n\n    try:\n        hgnc_id = int(exon_info['hgnc_id'])\n    except KeyError:\n        raise KeyError(\"Exons has to have a hgnc_id\")\n    except TypeError:\n        raise TypeError(\"hgnc_id has to be integer\")\n\n    exon_obj = Exon(\n        exon_id = exon_id,\n        chrom = chrom,\n        start = start,\n        end = end,\n        rank = rank,\n        transcript = transcript,\n        hgnc_id = hgnc_id,\n        build = build,\n    )\n\n    return exon_obj", "docstring": "Build a Exon object object\n\nArgs:\nexon_info(dict): Exon information\n\nReturns:\nexon_obj(Exon)\n\n\"exon_id\": str, # str(chrom-start-end)\n\"chrom\": str,\n\"start\": int,\n\"end\": int,\n\"transcript\": str, # ENST ID\n\"hgnc_id\": int,      # HGNC_id\n\"rank\": int, # Order of exon in transcript\n\"build\": str, # Genome build", "source": "juraj-google-style"}
{"code": "def ignore_errors(self, log_warning=False, name=None) -> 'DatasetV2':\n    from tensorflow.python.data.ops import ignore_errors_op\n    return ignore_errors_op._ignore_errors(self, log_warning, name)", "docstring": "Drops elements that cause errors.\n\n>>> dataset = tf.data.Dataset.from_tensor_slices([1., 2., 0., 4.])\n>>> dataset = dataset.map(lambda x: tf.debugging.check_numerics(1. / x, \"\"))\n>>> list(dataset.as_numpy_iterator())\nTraceback (most recent call last):\n...\nInvalidArgumentError: ... Tensor had Inf values\n>>> dataset = dataset.ignore_errors()\n>>> list(dataset.as_numpy_iterator())\n[1.0, 0.5, 0.25]\n\nArgs:\nlog_warning: (Optional.) A bool indicating whether or not ignored errors\nshould be logged to stderr. Defaults to `False`.\nname: (Optional.) A string indicating a name for the `tf.data` operation.\n\nReturns:\nA new `Dataset` with the transformation applied as described above.", "source": "github-repos"}
{"code": "def unique(seen, *iterables):\n    \n    _add = seen.add\n    \n    \n    return (i for i in chain(*iterables) if i not in seen and not _add(i))", "docstring": "Get the unique items in iterables while preserving order.  Note that this\nmutates the seen set provided only when the returned generator is used.\n\nArgs:\nseen (set): either an empty set, or the set of things already seen\n*iterables: one or more iterable lists to chain together\n\nReturns:\ngenerator:", "source": "juraj-google-style"}
{"code": "def add_op_consumer(self, src_op_name, src_slot, dst_op_name, dst_slot):\n    self._op_consumers[src_op_name].append((src_slot, dst_op_name, dst_slot))", "docstring": "Add a consuming op for this op.\n\nArgs:\nsrc_op_name: Name of the op of which the output tensor is being consumed.\nsrc_slot: 0-based output slot of the op being consumed.\ndst_op_name: Name of the consuming op (e.g., \"Conv2D_3/BiasAdd\")\ndst_slot: 0-based input slot of the consuming op that receives the tensor\nfrom this op.", "source": "github-repos"}
{"code": "async def join(self, *, remote_addrs: Iterable[str], listen_addr: str='0.0.0.0:2377', join_token: str, advertise_addr: str=None, data_path_addr: str=None) -> bool:\n    data = {'RemoteAddrs': list(remote_addrs), 'JoinToken': join_token, 'ListenAddr': listen_addr, 'AdvertiseAddr': advertise_addr, 'DataPathAddr': data_path_addr}\n    (await self.docker._query('swarm/join', method='POST', data=clean_map(data)))\n    return True", "docstring": "Join a swarm.\n\nArgs:\nlisten_addr\nUsed for inter-manager communication\n\nadvertise_addr\nExternally reachable address advertised to other nodes.\n\ndata_path_addr\nAddress or interface to use for data path traffic.\n\nremote_addrs\nAddresses of manager nodes already participating in the swarm.\n\njoin_token\nSecret token for joining this swarm.", "source": "codesearchnet"}
{"code": "def _make_patterns(patterns):\n    field_registry = display_fields.FieldRegistry()\n    pattern_list = display_pattern.ScreenPatternList(field_registry=field_registry)\n    for pattern in patterns:\n        pattern_list.add(pattern.split('\\n'))\n    return pattern_list", "docstring": "Create a ScreenPatternList from a given pattern text.\n\nArgs:\npattern_txt (str list): the patterns\n\nReturns:\nmpdlcd.display_pattern.ScreenPatternList: a list of patterns from the\ngiven entries.", "source": "codesearchnet"}
{"code": "def deserialize(json, cls=None):\n    LOGGER.debug('deserialize(%s)', json)\n    out = simplejson.loads(json)\n    if (isinstance(out, dict) and (cls is not None)):\n        return cls(**out)\n    return out", "docstring": "Deserialize a JSON string into a Python object.\n\nArgs:\njson (str): the JSON string.\ncls (:py:class:`object`):\nif the ``json`` is deserialized into a ``dict`` and\nthis argument is set,\nthe ``dict`` keys are passed as keyword arguments to the\ngiven ``cls`` initializer.\n\nReturns:\nPython object representation of the given JSON string.", "source": "codesearchnet"}
{"code": "def remove_user_from_template(self, template_id, account_id=None, email_address=None):\n        \n        return self._add_remove_user_template(self.TEMPLATE_REMOVE_USER_URL, template_id, account_id, email_address)", "docstring": "Removes the specified Account's access to the specified Template\n\nArgs:\n\ntemplate_id (str):      The id of the template to remove the account's access from.\n\naccount_id (str):       The id of the account to remove access from the template. The account id prevails if both account_id and email_address are provided.\n\nemail_address (str):    The email address of the account to remove access from.\n\nReturns:\nAn Template object", "source": "juraj-google-style"}
{"code": "def key_exists(hive, key, use_32bit_registry=False):\n    r\n    return __utils__['reg.key_exists'](hive=hive,\n                                       key=key,\n                                       use_32bit_registry=use_32bit_registry)", "docstring": "r'''\nCheck that the key is found in the registry. This refers to keys and not\nvalue/data pairs.\n\nArgs:\n\nhive (str): The hive to connect to\n\nkey (str): The key to check\n\nuse_32bit_registry (bool): Look in the 32bit portion of the registry\n\nReturns:\nbool: True if exists, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' reg.key_exists HKLM SOFTWARE\\Microsoft", "source": "juraj-google-style"}
{"code": "def log_device_compatibility_check(policy_name):\n    global _logged_compatibility_check\n    if _logged_compatibility_check:\n        return\n    _logged_compatibility_check = True\n    gpus = config.list_physical_devices('GPU')\n    gpu_details_list = [config.get_device_details(g) for g in gpus]\n    _log_device_compatibility_check(policy_name, gpu_details_list)", "docstring": "Logs a compatibility check if the devices support the policy.\n\nCurrently only logs for the policy mixed_float16. A log is shown only the\nfirst time this function is called.\n\nArgs:\npolicy_name: The name of the dtype policy.", "source": "github-repos"}
{"code": "class CLIPSegImageSegmentationOutput(ModelOutput):\n    loss: Optional[torch.FloatTensor] = None\n    logits: Optional[torch.FloatTensor] = None\n    conditional_embeddings: Optional[torch.FloatTensor] = None\n    pooled_output: Optional[torch.FloatTensor] = None\n    vision_model_output: BaseModelOutputWithPooling = None\n    decoder_output: CLIPSegDecoderOutput = None\n\n    def to_tuple(self) -> Tuple[Any]:\n        return tuple((self[k] if k not in ['vision_model_output', 'decoder_output'] else getattr(self, k).to_tuple() for k in self.keys()))", "docstring": "Args:\nloss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):\nContrastive loss for image-text similarity.\n...\nvision_model_output (`BaseModelOutputWithPooling`):\nThe output of the [`CLIPSegVisionModel`].", "source": "github-repos"}
{"code": "def send_notifications(self, notification_type, *args):\n    if (notification_type in self.notifications):\n        for (notification_id, callback) in self.notifications[notification_type]:\n            try:\n                callback(*args)\n            except:\n                self.logger.exception('Problem calling notify callback!')", "docstring": "Fires off the notification for the specific event.  Uses var args to pass in a\narbitrary list of parameter according to which notification type was fired.\n\nArgs:\nnotification_type: Type of notification to fire (String from .helpers.enums.NotificationTypes)\nargs: variable list of arguments to the callback.", "source": "codesearchnet"}
{"code": "def _TensorArrayWriteGrad(op: ops.Operation, flow):\n    handle = op.inputs[0]\n    index = op.inputs[1]\n    dtype = op.get_attr('T')\n    grad_source = _GetGradSource(flow)\n    flow_out = array_ops.identity(op.outputs[0], 'flow_out')\n    with ops.control_dependencies([flow_out]):\n        flow = array_ops.identity(flow, 'write_barrier')\n    g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow, colocate_with_first_write_call=False).grad(source=grad_source, flow=flow)\n    grad = g.read(index)\n    return [None, None, grad, flow]", "docstring": "Gradient for TensorArrayWrite.\n\nArgs:\nop: Forward TensorArrayWrite op.\nflow: Gradient `Tensor` flow to TensorArrayWrite.\n\nReturns:\nA grad `Tensor`, the gradient created in an upstream ReadGrad or PackGrad.", "source": "github-repos"}
{"code": "def ints_to_string(ints):\n    \n    if not isinstance(ints, list):\n        return six.u(str(ints))\n\n    return '|'.join(six.u(str(l)) for l in ints)", "docstring": "Convert a list of integers to a *|* separated string.\n\nArgs:\nints (list[int]|int): List of integer items to convert or single\ninteger to convert.\n\nReturns:\nstr: Formatted string", "source": "juraj-google-style"}
{"code": "def hexstr(text):\n    \n    text = text.strip().lower()\n    if text.startswith(('0x', '0X')):\n        text = text[2:]\n\n    if not text:\n        raise s_exc.BadTypeValu(valu=text, name='hexstr',\n                                mesg='No string left after stripping')\n\n    try:\n        \n        \n        s_common.uhex(text)\n    except (binascii.Error, ValueError) as e:\n        raise s_exc.BadTypeValu(valu=text, name='hexstr', mesg=str(e))\n    return text", "docstring": "Ensure a string is valid hex.\n\nArgs:\ntext (str): String to normalize.\n\nExamples:\nNorm a few strings:\n\nhexstr('0xff00')\nhexstr('ff00')\n\nNotes:\nWill accept strings prefixed by '0x' or '0X' and remove them.\n\nReturns:\nstr: Normalized hex string.", "source": "juraj-google-style"}
{"code": "def predecesors_pattern(element, root):\n    \n    def is_root_container(el):\n        return el.parent.parent.getTagName() == \"\"\n\n    if not element.parent or not element.parent.parent or \\\n       is_root_container(element):\n        return []\n\n    trail = [\n        [\n            element.parent.parent.getTagName(),\n            _params_or_none(element.parent.parent.params)\n        ],\n        [\n            element.parent.getTagName(),\n            _params_or_none(element.parent.params)\n        ],\n        [element.getTagName(), _params_or_none(element.params)],\n    ]\n\n    match = root.match(*trail)\n    if element in match:\n        return [\n            PathCall(\"match\", match.index(element), trail)\n        ]", "docstring": "Look for `element` by its predecesors.\n\nArgs:\nelement (obj): HTMLElement instance of the object you are looking for.\nroot (obj): Root of the `DOM`.\n\nReturns:\nlist: ``[PathCall()]`` - list with one :class:`PathCall` object (to \\\nallow use with ``.extend(predecesors_pattern())``).", "source": "juraj-google-style"}
{"code": "def AddFiles(self, hash_id_metadatas):\n    for (hash_id, metadata) in iteritems(hash_id_metadatas):\n        self.AddFile(hash_id, metadata)", "docstring": "Adds multiple files to the file store.\n\nArgs:\nhash_id_metadatas: A dictionary mapping hash ids to file metadata (a tuple\nof hash client path and blob references).", "source": "codesearchnet"}
{"code": "def Dump(self, output):\n    \n    data = {\n        'current_content_length': self._current_content_length,\n        'is_last': self._is_last,\n        'server': self._request_builder.GetServer(),\n        'upload_url': self._upload_url,\n        'version': self._request_builder.GetVersion()\n    }\n\n    try:\n      yaml.dump(data, output)\n    except yaml.YAMLError as e:\n      raise googleads.errors.GoogleAdsError(\n          'Error dumping IncrementalUploadHelper to file: %s' % str(e))", "docstring": "Serialize the IncrementalUploadHelper and store in file-like object.\n\nArgs:\noutput: a file-like object where the status of the IncrementalUploadHelper\nwill be written.\n\nRaises:\nGoogleAdsError: If a YAMLError occurs while writing to the file.", "source": "juraj-google-style"}
{"code": "def add_subscription(self, channel, callback_function):\n        \n        if channel not in CHANNELS:\n            CHANNELS.append(channel)\n            SUBSCRIPTIONS[channel] = [callback_function]\n        else:\n            SUBSCRIPTIONS[channel].append(callback_function)\n        \n        \n        if self._subscribed:\n            _LOGGER.info(\"New channel added after main subscribe call.\")\n            self._pubnub.subscribe().channels(channel).execute()", "docstring": "Add a channel to subscribe to and a callback function to\nrun when the channel receives an update.\nIf channel already exists, create a new \"subscription\"\nand append another callback function.\n\nArgs:\nchannel (str): The channel to add a subscription too.\ncallback_function (func): The function to run on an\nupdate to the passed in channel.", "source": "juraj-google-style"}
{"code": "def swapaxes(x, axis1, axis2):\n    if any_symbolic_tensors((x,)):\n        return Swapaxes(axis1, axis2).symbolic_call(x)\n    return backend.numpy.swapaxes(x, axis1=axis1, axis2=axis2)", "docstring": "Interchange two axes of a tensor.\n\nArgs:\nx: Input tensor.\naxis1: First axis.\naxis2: Second axis.\n\nReturns:\nA tensor with the axes swapped.", "source": "github-repos"}
{"code": "def floatx():\n    return _FLOATX", "docstring": "Return the default float type, as a string.\n\nE.g. `'bfloat16'`, `'float16'`, `'float32'`, `'float64'`.\n\nReturns:\nString, the current default float type.\n\nExample:\n\n>>> keras.config.floatx()\n'float32'", "source": "github-repos"}
{"code": "def sparse_grid(func, order, dim=None, skew=None):\n    if (not isinstance(order, int)):\n        orders = numpy.array(order).flatten()\n        dim = orders.size\n        m_order = int(numpy.min(orders))\n        skew = [(order - m_order) for order in orders]\n        return sparse_grid(func, m_order, dim, skew)\n    (abscissas, weights) = ([], [])\n    bindex = chaospy.bertran.bindex(((order - dim) + 1), order, dim)\n    if (skew is None):\n        skew = numpy.zeros(dim, dtype=int)\n    else:\n        skew = numpy.array(skew, dtype=int)\n        assert (len(skew) == dim)\n    for idx in range((chaospy.bertran.terms(order, dim) - chaospy.bertran.terms((order - dim), dim))):\n        idb = bindex[idx]\n        (abscissa, weight) = func((skew + idb))\n        weight *= (((- 1) ** (order - sum(idb))) * comb((dim - 1), (order - sum(idb))))\n        abscissas.append(abscissa)\n        weights.append(weight)\n    abscissas = numpy.concatenate(abscissas, 1)\n    weights = numpy.concatenate(weights, 0)\n    abscissas = numpy.around(abscissas, 15)\n    order = numpy.lexsort(tuple(abscissas))\n    abscissas = abscissas.T[order].T\n    weights = weights[order]\n    diff = numpy.diff(abscissas.T, axis=0)\n    unique = numpy.ones(len(abscissas.T), bool)\n    unique[1:] = (diff != 0).any(axis=1)\n    length = len(weights)\n    idx = 1\n    while (idx < length):\n        while ((idx < length) and unique[idx]):\n            idx += 1\n        idy = (idx + 1)\n        while ((idy < length) and (not unique[idy])):\n            idy += 1\n        if ((idy - idx) > 1):\n            weights[(idx - 1)] = numpy.sum(weights[(idx - 1):idy])\n        idx = (idy + 1)\n    abscissas = abscissas[(:, unique)]\n    weights = weights[unique]\n    return (abscissas, weights)", "docstring": "Smolyak sparse grid constructor.\n\nArgs:\nfunc (:py:data:typing.Callable):\nFunction that takes a single argument ``order`` of type\n``numpy.ndarray`` and with ``order.shape = (dim,)``\norder (int, numpy.ndarray):\nThe order of the grid. If ``numpy.ndarray``, it overrides both\n``dim`` and ``skew``.\ndim (int):\nNumber of dimension.\nskew (list):\nOrder skewness.", "source": "codesearchnet"}
{"code": "def allowed_methods(self):\n    return [method for (method, allowed) in (('GET', hasattr(self, 'on_get')), ('POST', hasattr(self, 'on_post')), ('PUT', hasattr(self, 'on_put')), ('PATCH', hasattr(self, 'on_patch')), ('DELETE', hasattr(self, 'on_delete')), ('HEAD', hasattr(self, 'on_head')), ('OPTIONS', hasattr(self, 'on_options'))) if allowed]", "docstring": "Return list of allowed HTTP methods on this resource.\n\nThis is only for purpose of making resource description.\n\nReturns:\nlist: list of allowed HTTP method names (uppercase)", "source": "codesearchnet"}
{"code": "def ParseDataStream(self, parser_mediator, file_entry, data_stream_name):\n    file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)\n    if (not file_object):\n        raise RuntimeError('Unable to retrieve file-like object from file entry.')\n    try:\n        parser_names = self._GetSignatureMatchParserNames(file_object)\n        parse_with_non_sigscan_parsers = True\n        if parser_names:\n            parse_result = self._ParseFileEntryWithParsers(parser_mediator, parser_names, file_entry, file_object=file_object)\n            if (parse_result in (self._PARSE_RESULT_FAILURE, self._PARSE_RESULT_SUCCESS)):\n                parse_with_non_sigscan_parsers = False\n        if parse_with_non_sigscan_parsers:\n            self._ParseFileEntryWithParsers(parser_mediator, self._non_sigscan_parser_names, file_entry, file_object=file_object)\n    finally:\n        file_object.close()", "docstring": "Parses a data stream of a file entry with the enabled parsers.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nfile_entry (dfvfs.FileEntry): file entry.\ndata_stream_name (str): data stream name.\n\nRaises:\nRuntimeError: if the file-like object or the parser object is missing.", "source": "codesearchnet"}
{"code": "def plot_spectra_overlapped(ss, title=None, setup=_default_setup):\n    \n\n    plt.figure()\n    draw_spectra_overlapped(ss, title, setup)\n    plt.show()", "docstring": "Plots one or more spectra in the same plot.\n\nArgs:\nss: list of Spectrum objects\ntitle=None: window title\nsetup: PlotSpectrumSetup object", "source": "juraj-google-style"}
{"code": "def potcar_spec(filename):\n    p_spec = {}\n    with open(filename, 'r') as f:\n        potcars = re.split('(End of Dataset\\n)', f.read())\n    potcar_md5sums = [md5sum(''.join(pair)) for pair in zip(potcars[::2], potcars[1:(- 1):2])]\n    for this_md5sum in potcar_md5sums:\n        for ps in potcar_sets:\n            for (p, p_md5sum) in potcar_md5sum_data[ps].items():\n                if (this_md5sum == p_md5sum):\n                    p_spec[p] = ps\n    if (len(p_spec) != len(potcar_md5sums)):\n        raise ValueError('One or more POTCARs did not have matching md5 hashes')\n    return p_spec", "docstring": "Returns a dictionary specifying the pseudopotentials contained in a POTCAR file.\n\nArgs:\nfilename (Str): The name of the POTCAR file to process.\n\nReturns:\n(Dict): A dictionary of pseudopotential filename: dataset pairs, e.g.\n{ 'Fe_pv': 'PBE_54', 'O', 'PBE_54' }", "source": "codesearchnet"}
{"code": "def set_standby_timeout(timeout, power='ac', scheme=None):\n    return _set_powercfg_value(scheme=scheme, sub_group='SUB_SLEEP', setting_guid='STANDBYIDLE', power=power, value=timeout)", "docstring": "Set the standby timeout in minutes for the given power scheme\n\nArgs:\ntimeout (int):\nThe amount of time in minutes before the computer sleeps\n\npower (str):\nSet the value for AC or DC power. Default is ``ac``. Valid options\nare:\n\n- ``ac`` (AC Power)\n- ``dc`` (Battery)\n\nscheme (str):\nThe scheme to use, leave as ``None`` to use the current. Default is\n``None``. This can be the GUID or the Alias for the Scheme. Known\nAliases are:\n\n- ``SCHEME_BALANCED`` - Balanced\n- ``SCHEME_MAX`` - Power saver\n- ``SCHEME_MIN`` - High performance\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\nCLI Example:\n\n.. code-block:: bash\n\n# Sets the system standby timeout to 30 minutes on Battery\nsalt '*' powercfg.set_standby_timeout 30 power=dc", "source": "codesearchnet"}
{"code": "def config_insync(self):\n    status = self.get('config/insync').get('configInSync', False)\n    if (status is None):\n        status = False\n    return status", "docstring": "Returns whether the config is in sync, i.e. whether the running\nconfiguration is the same as that on disk.\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def get_time_evolution(self):\n    term = self.simplify()\n    coeff = term.coeff\n    if coeff.imag:\n        raise ValueError('Not a real coefficient.')\n    ops = term.ops\n\n    def append_to_circuit(circuit, t):\n        if (not ops):\n            return\n        for op in ops:\n            n = op.n\n            if (op.op == 'X'):\n                circuit.h[n]\n            elif (op.op == 'Y'):\n                circuit.rx((- half_pi))[n]\n        for i in range(1, len(ops)):\n            circuit.cx[(ops[(i - 1)].n, ops[i].n)]\n        circuit.rz((((- 2) * coeff) * t))[ops[(- 1)].n]\n        for i in range((len(ops) - 1), 0, (- 1)):\n            circuit.cx[(ops[(i - 1)].n, ops[i].n)]\n        for op in ops:\n            n = op.n\n            if (op.op == 'X'):\n                circuit.h[n]\n            elif (op.op == 'Y'):\n                circuit.rx(half_pi)[n]\n    return append_to_circuit", "docstring": "Get the function to append the time evolution of this term.\n\nReturns:\nfunction(circuit: Circuit, t: float):\nAdd gates for time evolution to `circuit` with time `t`", "source": "codesearchnet"}
{"code": "def _str_to_ord(content, weights):\n  \n  ordinal = 0\n  for i, c in enumerate(content):\n    ordinal += weights[i] * _ALPHABET.index(c) + 1\n  return ordinal", "docstring": "Converts a string to its lexicographical order.\n\nArgs:\ncontent: the string to convert. Of type str.\nweights: weights from _get_weights.\n\nReturns:\nan int or long that represents the order of this string. \"\" has order 0.", "source": "juraj-google-style"}
{"code": "def _call(callable_obj, arg_names, namespace):\n    arguments = {arg_name: getattr(namespace, arg_name) for arg_name in arg_names}\n    return callable_obj(**arguments)", "docstring": "Actually calls the callable with the namespace parsed from the command\nline.\n\nArgs:\ncallable_obj: a callable object\narg_names: name of the function arguments\nnamespace: the namespace object parsed from the command line", "source": "codesearchnet"}
{"code": "def upload_timeline(self, timeline_name, plaso_storage_path):\n    resource_url = '{0:s}/upload/'.format(self.api_base_url)\n    files = {'file': open(plaso_storage_path, 'rb')}\n    data = {'name': timeline_name}\n    response = self.session.post(resource_url, files=files, data=data)\n    try:\n        response_dict = response.json()\n    except ValueError:\n        raise RuntimeError('Could not decode JSON response from Timesketch (Status {0:d}):\\n{1:s}'.format(response.status_code, response.content))\n    index_id = response_dict['objects'][0]['id']\n    return index_id", "docstring": "Create a timeline with the specified name from the given plaso file.\n\nArgs:\ntimeline_name (str): Name of timeline\nplaso_storage_path (str): Local path of plaso file to be uploaded\n\nReturns:\nint: ID of uploaded timeline\n\nRaises:\nRuntimeError: When the JSON response from Timesketch cannot be decoded.", "source": "codesearchnet"}
{"code": "def _HasDuplicateRegistryKeyPaths(self, filename, artifact_definition, source):\n    result = False\n    intersection = self._artifact_registry_key_paths.intersection(set(source.keys))\n    if intersection:\n        duplicate_key_paths = '\\n'.join(intersection)\n        logging.warning('Artifact definition: {0:s} in file: {1:s} has duplicate Registry key paths:\\n{2:s}'.format(artifact_definition.name, filename, duplicate_key_paths))\n        result = True\n    self._artifact_registry_key_paths.update(source.keys)\n    return result", "docstring": "Checks if Registry key paths are not already defined by other artifacts.\n\nNote that at the moment this function will only find exact duplicate\nRegistry key paths.\n\nArgs:\nfilename (str): name of the artifacts definition file.\nartifact_definition (ArtifactDefinition): artifact definition.\nsource (SourceType): source definition.\n\nReturns:\nbool: True if the Registry key paths defined by the source type\nare used in other artifacts.", "source": "codesearchnet"}
{"code": "def weighted_moments_v2(x, axes, frequency_weights, keepdims=False, name=None):\n    return weighted_moments(x=x, axes=axes, frequency_weights=frequency_weights, name=name, keep_dims=keepdims)", "docstring": "Returns the frequency-weighted mean and variance of `x`.\n\nArgs:\nx: A tensor.\naxes: 1-d tensor of int32 values; these are the axes along which\nto compute mean and variance.\nfrequency_weights: A tensor of positive weights which can be\nbroadcast with x.\nkeepdims: Produce moments with the same dimensionality as the input.\nname: Name used to scope the operation.\n\nReturns:\nTwo tensors: `weighted_mean` and `weighted_variance`.", "source": "github-repos"}
{"code": "def _acquire(self, uuid_path):\n        \n        for index in range(self._min_third_octet, self._max_third_octet + 1):\n            lease = self.create_lease_object_from_idx(index)\n            if self._lease_valid(lease):\n                continue\n            self._take_lease(lease, uuid_path, safe=False)\n            return lease.to_ip_network()\n\n        raise LagoSubnetLeaseStoreFullException(self.get_allowed_range())", "docstring": "Lease a free network for the given uuid path\n\nArgs:\nuuid_path (str): Path to the uuid file of a :class:`lago.Prefix`\n\nReturns:\nnetaddr.IPNetwork: Which represents the selected subnet\n\nRaises:\nLagoSubnetLeaseException: If the store is full", "source": "juraj-google-style"}
{"code": "def handle_discovery_request(self, path, request, start_response):\n    if (path == self._GET_REST_API):\n        return self._get_rest_doc(request, start_response)\n    elif (path == self._GET_RPC_API):\n        error_msg = 'RPC format documents are no longer supported with the Endpoints Framework for Python. Please use the REST format.'\n        _logger.error('%s', error_msg)\n        return util.send_wsgi_error_response(error_msg, start_response)\n    elif (path == self._LIST_API):\n        return self._list(request, start_response)\n    return False", "docstring": "Returns the result of a discovery service request.\n\nThis calls start_response and returns the response body.\n\nArgs:\npath: A string containing the API path (the portion of the path\nafter /_ah/api/).\nrequest: An ApiRequest, the transformed request sent to the Discovery API.\nstart_response: A function with semantics defined in PEP-333.\n\nReturns:\nThe response body.  Or returns False if the request wasn't handled by\nDiscoveryService.", "source": "codesearchnet"}
{"code": "def date_to_epoch(year, month, day):\n    return int(date_to_delorean(year, month, day).epoch)", "docstring": "Converts a date to epoch in UTC\n\nArgs:\nyear: int between 1 and 9999.\nmonth: int between 1 and 12.\nday: int between 1 and 31.\n\nReturns:\nInt epoch in UTC from date.", "source": "codesearchnet"}
{"code": "def _commit_change(alias_table, export_path=None, post_commit=True):\n    with open((export_path or GLOBAL_ALIAS_PATH), 'w+') as alias_config_file:\n        alias_table.write(alias_config_file)\n        if post_commit:\n            alias_config_file.seek(0)\n            alias_config_hash = hashlib.sha1(alias_config_file.read().encode('utf-8')).hexdigest()\n            AliasManager.write_alias_config_hash(alias_config_hash)\n            collided_alias = AliasManager.build_collision_table(alias_table.sections())\n            AliasManager.write_collided_alias(collided_alias)\n            build_tab_completion_table(alias_table)", "docstring": "Record changes to the alias table.\nAlso write new alias config hash and collided alias, if any.\n\nArgs:\nalias_table: The alias table to commit.\nexport_path: The path to export the aliases to. Default: GLOBAL_ALIAS_PATH.\npost_commit: True if we want to perform some extra actions after writing alias to file.", "source": "codesearchnet"}
{"code": "def uni_to_beta(text):\n    u = _UNICODE_MAP\n    transform = []\n    for ch in text:\n        try:\n            conv = u[ch]\n        except KeyError:\n            conv = ch\n        transform.append(conv)\n    converted = ''.join(transform)\n    return converted", "docstring": "Convert unicode text to a betacode equivalent.\n\nThis method can handle tónos or oxeîa characters in the input.\n\nArgs:\ntext: The text to convert to betacode. This text does not have to all be\nGreek polytonic text, and only Greek characters will be converted. Note\nthat in this case, you cannot convert to beta and then back to unicode.\n\nReturns:\nThe betacode equivalent of the inputted text where applicable.", "source": "codesearchnet"}
{"code": "def _html_tree_view_render(self, *, view: 'HtmlTreeView', name: Optional[str]=None, parent: Any=None, root_path: Optional[KeyPath]=None, **kwargs) -> Html:\n    return self._html_tree_view(view=view, name=name, parent=parent, root_path=root_path, **view.get_kwargs(kwargs, self._html_tree_view_config(), root_path or KeyPath())).add_style(*self._html_tree_view_css_styles())", "docstring": "The entrypoint of rendering the subtree represented by this extension.\n\nArgs:\nview: The view to render the object.\nname: The name of the object.\nparent: The parent of the object.\nroot_path: The key path of the object relative to the root.\n**kwargs: kwargs to pass to `view.render()` on this extension.\n\nReturns:\nThe rendered HTML.", "source": "github-repos"}
{"code": "def _open_rpc_interface(self, connection_id, callback):\n    try:\n        context = self.connections.get_context(connection_id)\n    except ArgumentError:\n        callback(connection_id, self.id, False, 'Could not find connection information')\n        return\n    self.connections.begin_operation(connection_id, 'open_interface', callback, self.get_config('default_timeout'))\n    try:\n        service = context['services'][TileBusService]\n        header_characteristic = service[ReceiveHeaderChar]\n        payload_characteristic = service[ReceivePayloadChar]\n    except KeyError:\n        self.connections.finish_operation(connection_id, False, \"Can't find characteristics to open rpc interface\")\n        return\n    self.bable.set_notification(enabled=True, connection_handle=context['connection_handle'], characteristic=header_characteristic, on_notification_set=[self._on_interface_opened, context, payload_characteristic], on_notification_received=self._on_notification_received, sync=False)", "docstring": "Enable RPC interface for this IOTile device\n\nArgs:\nconnection_id (int): The unique identifier for the connection\ncallback (callback): Callback to be called when this command finishes\ncallback(conn_id, adapter_id, success, failure_reason)", "source": "codesearchnet"}
{"code": "def get_variation_for_rollout(self, rollout, user_id, attributes=None):\n    \n\n    \n    if rollout and len(rollout.experiments) > 0:\n      for idx in range(len(rollout.experiments) - 1):\n        experiment = self.config.get_experiment_from_key(rollout.experiments[idx].get('key'))\n\n        \n        if not audience_helper.is_user_in_experiment(self.config, experiment, attributes, self.logger):\n          self.logger.debug('User \"%s\" does not meet conditions for targeting rule %s.' % (\n            user_id,\n            idx + 1\n          ))\n          continue\n\n        self.logger.debug('User \"%s\" meets conditions for targeting rule %s.' % (user_id, idx + 1))\n        \n        bucketing_id = self._get_bucketing_id(user_id, attributes)\n        variation = self.bucketer.bucket(experiment, user_id, bucketing_id)\n        if variation:\n          self.logger.debug('User \"%s\" is in variation %s of experiment %s.' % (\n            user_id,\n            variation.key,\n            experiment.key\n          ))\n          return Decision(experiment, variation, enums.DecisionSources.ROLLOUT)\n        else:\n          \n          self.logger.debug('User \"%s\" is not in the traffic group for the targeting else. '\n                            'Checking \"Everyone Else\" rule now.' % user_id)\n          break\n\n      \n      everyone_else_experiment = self.config.get_experiment_from_key(rollout.experiments[-1].get('key'))\n      if audience_helper.is_user_in_experiment(self.config,\n                                               self.config.get_experiment_from_key(rollout.experiments[-1].get('key')),\n                                               attributes,\n                                               self.logger):\n        \n        bucketing_id = self._get_bucketing_id(user_id, attributes)\n        variation = self.bucketer.bucket(everyone_else_experiment, user_id, bucketing_id)\n        if variation:\n          self.logger.debug('User \"%s\" meets conditions for targeting rule \"Everyone Else\".' % user_id)\n          return Decision(everyone_else_experiment, variation, enums.DecisionSources.ROLLOUT)\n\n    return Decision(None, None, enums.DecisionSources.ROLLOUT)", "docstring": "Determine which experiment/variation the user is in for a given rollout.\nReturns the variation of the first experiment the user qualifies for.\n\nArgs:\nrollout: Rollout for which we are getting the variation.\nuser_id: ID for user.\nattributes: Dict representing user attributes.\n\nReturns:\nDecision namedtuple consisting of experiment and variation for the user.", "source": "juraj-google-style"}
{"code": "def to_grayscale(img):\n    gray = numpy.asarray(ImageOps.grayscale(img)).astype(numpy.float)\n    imbands = img.getbands()\n    alpha = None\n    if ('A' in imbands):\n        alpha = numpy.asarray(img.split()[(- 1)]).astype(numpy.float)\n    return (gray, alpha)", "docstring": "Convert PIL image to numpy grayscale array and numpy alpha array.\n\nArgs:\nimg (PIL.Image): PIL Image object.\n\nReturns:\n(gray, alpha): both numpy arrays.", "source": "codesearchnet"}
{"code": "def GetTaskPendingMerge(self, current_task):\n    \n    next_task = self._tasks_pending_merge.PeekTask()\n    if not next_task:\n      return None\n\n    if current_task and next_task.merge_priority > current_task.merge_priority:\n      return None\n\n    with self._lock:\n      next_task = self._tasks_pending_merge.PopTask()\n\n    self._tasks_merging[next_task.identifier] = next_task\n    return next_task", "docstring": "Retrieves the first task that is pending merge or has a higher priority.\n\nThis function will check if there is a task with a higher merge priority\nthan the current_task being merged. If so, that task with the higher\npriority is returned.\n\nArgs:\ncurrent_task (Task): current task being merged or None if no such task.\n\nReturns:\nTask: the next task to merge or None if there is no task pending merge or\nwith a higher priority.", "source": "juraj-google-style"}
{"code": "def get_box_threads(self, box_key):\n\t\t\n\t\turi = '/'.join([\n\t\t\t\t\t\tself.api_uri,\n\t\t\t\t\t\tself.boxes_suffix,\n\t\t\t\t\t\tbox_key,\n\t\t\t\t\t\tself.threads_suffix\n\t\t\t\t\t\t])\n\t\treturn self._req('get', uri)", "docstring": "Gets all threads in a specified box\nArgs:\nbox_key \t\tbox to look in\nreturns \t\ta list of thread dicts", "source": "juraj-google-style"}
{"code": "def set_guest_access(self, room_id, guest_access):\n        \n        content = {\n            \"guest_access\": guest_access\n        }\n        return self.send_state_event(room_id, \"m.room.guest_access\", content)", "docstring": "Set the guest access policy of the room.\n\nArgs:\nroom_id(str): The room to set the rules for.\nguest_access(str): Wether guests can join. One of: [\"can_join\",\n\"forbidden\"]", "source": "juraj-google-style"}
{"code": "def compute_recall(self, result_neighbors, ground_truth_neighbors):\n    self.assertLen(result_neighbors.shape, 2)\n    self.assertLen(ground_truth_neighbors.shape, 2)\n    self.assertEqual(result_neighbors.shape[0], ground_truth_neighbors.shape[0])\n    gt_sets = [set(np.asarray(x)) for x in ground_truth_neighbors]\n\n    def hits_per_q(q, nn_per_q):\n        return len(list((x for x in nn_per_q if x.item() in gt_sets[q])))\n    hits = sum((hits_per_q(q, nn_per_q) for q, nn_per_q in enumerate(result_neighbors)))\n    return hits / ground_truth_neighbors.size", "docstring": "Computes the recall of an approximate nearest neighbor search.\n\nArgs:\nresult_neighbors: int32 numpy array of the shape [num_queries,\nneighbors_per_query] where the values are the indices of the dataset.\nground_truth_neighbors: int32 numpy array of with shape [num_queries,\nground_truth_neighbors_per_query] where the values are the indices of\nthe dataset.\n\nReturns:\nThe recall.", "source": "github-repos"}
{"code": "def set_parent(self, parent):\n        \n        if not isinstance(parent, Node):\n            raise TypeError(\"parent must be a Node\")\n        self.parent = parent", "docstring": "Set the parent of this ``Node`` object. Use this carefully, otherwise you may damage the structure of this ``Tree`` object.\n\nArgs:\n``Node``: The new parent of this ``Node``", "source": "juraj-google-style"}
{"code": "def get_whois_tags(ip_address):\n    whois = IPWhois(ip_address).lookup_whois()\n    nets = whois.get('nets', None)\n    if (not nets):\n        return []\n    cities = [net['city'] for net in nets if net.get('city', None)]\n    address_list = []\n    for net in nets:\n        address = net.get('address', None)\n        if (not address):\n            continue\n        if (('description' in net) and net['description']):\n            address = address.replace(net['description'], '').strip()\n        if ('\\n' in address):\n            address = ', '.join(address.splitlines())\n        address_list.append(address)\n    return [SourceString(val, source='Whois') for val in set((cities + address_list))]", "docstring": "Get list of tags with `address` for given `ip_address`.\n\nArgs:\nindex_page (str): HTML content of the page you wisht to analyze.\n\nReturns:\nlist: List of :class:`.SourceString` objects.", "source": "codesearchnet"}
{"code": "def parse_header(line):\n    if ((not line) or (line == '\\r\\n')):\n        return None\n    if (line[0] in ' \\t'):\n        return line[1:].rstrip()\n    (name, value) = line.split(':', 1)\n    return (name.strip(), value.strip())", "docstring": "Parse a header line.\n\nArgs:\nline: A header line as a string.\n\nReturns:\nNone if end of headers is found. A string giving the continuation line\nif a continuation is found. A tuple of name, value when a header line is\nfound.\n\nRaises:\nValueError: If the line cannot be parsed as a header.", "source": "codesearchnet"}
{"code": "def _try_to_clean_garbage(self, writer_spec, exclude_list=()):\n    tmpl = string.Template(self._TMPFILE_PREFIX)\n    prefix = tmpl.substitute(id=self.status.mapreduce_id, shard=self.status.shard)\n    bucket = self._get_tmp_gcs_bucket(writer_spec)\n    account_id = self._get_tmp_account_id(writer_spec)\n    for f in cloudstorage.listbucket(('/%s/%s' % (bucket, prefix)), _account_id=account_id):\n        if (f.filename not in exclude_list):\n            self._remove_tmpfile(f.filename, self.status.writer_spec)", "docstring": "Tries to remove any files created by this shard that aren't needed.\n\nArgs:\nwriter_spec: writer_spec for the MR.\nexclude_list: A list of filenames (strings) that should not be\nremoved.", "source": "codesearchnet"}
{"code": "def qubits_tab(backend):\n    props = backend.properties().to_dict()\n    header_html = \"<div><font style='font-weight:bold'>{key}</font>: {value}</div>\"\n    header_html = header_html.format(key='last_update_date', value=props['last_update_date'])\n    update_date_widget = widgets.HTML(value=header_html)\n    qubit_html = '<table>'\n    qubit_html += '<style>\\ntable {\\n    border-collapse: collapse;\\n    width: auto;\\n}\\n\\nth, td {\\n    text-align: left;\\n    padding: 8px;\\n}\\n\\ntr:nth-child(even) {background-color: \n    qubit_html += '<tr><th></th><th>Frequency</th><th>T1</th><th>T2</th>'\n    qubit_html += '<th>U1 gate error</th><th>U2 gate error</th><th>U3 gate error</th>'\n    qubit_html += '<th>Readout error</th></tr>'\n    qubit_footer = '</table>'\n    for qub in range(len(props['qubits'])):\n        name = ('Q%s' % qub)\n        qubit_data = props['qubits'][qub]\n        gate_data = props['gates'][(3 * qub):((3 * qub) + 3)]\n        t1_info = qubit_data[0]\n        t2_info = qubit_data[1]\n        freq_info = qubit_data[2]\n        readout_info = qubit_data[3]\n        freq = ((str(round(freq_info['value'], 5)) + ' ') + freq_info['unit'])\n        T1 = ((str(round(t1_info['value'], 5)) + ' ') + t1_info['unit'])\n        T2 = ((str(round(t2_info['value'], 5)) + ' ') + t2_info['unit'])\n        U1 = str(round(gate_data[0]['parameters'][0]['value'], 5))\n        U2 = str(round(gate_data[1]['parameters'][0]['value'], 5))\n        U3 = str(round(gate_data[2]['parameters'][0]['value'], 5))\n        readout_error = round(readout_info['value'], 5)\n        qubit_html += \"<tr><td><font style='font-weight:bold'>%s</font></td><td>%s</td>\"\n        qubit_html += '<td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>'\n        qubit_html = (qubit_html % (name, freq, T1, T2, U1, U2, U3, readout_error))\n    qubit_html += qubit_footer\n    qubit_widget = widgets.HTML(value=qubit_html)\n    out = widgets.VBox([update_date_widget, qubit_widget])\n    return out", "docstring": "The qubits properties widget\n\nArgs:\nbackend (IBMQbackend): The backend.\n\nReturns:\nVBox: A VBox widget.", "source": "codesearchnet"}
{"code": "def calculate_signatures(self):\n    if (not self.signing_algorithm):\n        return []\n    algo_id = {'sha1': 1, 'sha384': 2}[self.signing_algorithm]\n    hashers = [(algo_id, make_hasher(algo_id))]\n    for block in get_signature_data(self.fileobj, self.filesize):\n        [h.update(block) for (_, h) in hashers]\n    signatures = [(algo_id, sign_hash(self.signing_key, h.finalize(), h.algorithm.name)) for (algo_id, h) in hashers]\n    return signatures", "docstring": "Calculate the signatures for this MAR file.\n\nReturns:\nA list of signature tuples: [(algorithm_id, signature_data), ...]", "source": "codesearchnet"}
{"code": "def get_revisions(page):\n    start_string = '    <revision>\\n'\n    end_string = '    </revision>\\n'\n    ret = []\n    current_pos = 0\n    while True:\n        start_pos = page.find(start_string, current_pos)\n        if (start_pos == (- 1)):\n            break\n        end_pos = page.find(end_string, start_pos)\n        assert (end_pos != (- 1))\n        ret.append(page[(start_pos + len(start_string)):end_pos])\n        current_pos = (end_pos + len(end_string))\n    return ret", "docstring": "Extract the revisions of a page.\n\nArgs:\npage: a string\nReturns:\na list of strings", "source": "codesearchnet"}
{"code": "def main(args=None):\n    if (args is None):\n        args = sys.argv[1:]\n    parser = create_parser()\n    args = parser.parse_args(args)\n    if (args.verbose >= 2):\n        level = logging.DEBUG\n    elif (args.verbose >= 1):\n        level = logging.INFO\n    else:\n        level = logging.WARNING\n    logging.basicConfig(level=level)\n    try:\n        args.command(args)\n    except pylink.JLinkException as e:\n        sys.stderr.write(('Error: %s%s' % (str(e), os.linesep)))\n        return 1\n    return 0", "docstring": "Main command-line interface entrypoint.\n\nRuns the given subcommand or argument that were specified.  If not given a\n``args`` parameter, assumes the arguments are passed on the command-line.\n\nArgs:\nargs (list): list of command-line arguments\n\nReturns:\nZero on success, non-zero otherwise.", "source": "codesearchnet"}
{"code": "def key_changes(self, from_token, to_token):\n        \n        params = {\"from\": from_token, \"to\": to_token}\n        return self._send(\"GET\", \"/keys/changes\", query_params=params)", "docstring": "Gets a list of users who have updated their device identity keys.\n\nArgs:\nfrom_token (str): The desired start point of the list. Should be the\nnext_batch field from a response to an earlier call to /sync.\nto_token (str): The desired end point of the list. Should be the next_batch\nfield from a recent call to /sync - typically the most recent such call.", "source": "juraj-google-style"}
{"code": "def main(jlink_serial, device):\n    \n    buf = StringIO.StringIO()\n    jlink = pylink.JLink(log=buf.write, detailed_log=buf.write)\n    jlink.open(serial_no=jlink_serial)\n\n    jlink.set_tif(pylink.enums.JLinkInterfaces.SWD)\n    jlink.connect(device, verbose=True)\n\n    \n    big_endian = jlink.set_little_endian()\n    if big_endian:\n        jlink.set_big_endian()\n\n    print('Target Endian Mode: %s Endian' % ('Big' if big_endian else 'Little'))", "docstring": "Main function.\n\nArgs:\njlink_serial (str): the J-Link serial number\ndevice (str): the target CPU\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error", "source": "juraj-google-style"}
{"code": "class UnitNorm(Constraint):\n\n    def __init__(self, axis=0):\n        self.axis = axis\n\n    def __call__(self, w):\n        w = backend.convert_to_tensor(w)\n        return w / (backend.epsilon() + ops.sqrt(ops.sum(ops.square(w), axis=self.axis, keepdims=True)))\n\n    def get_config(self):\n        return {'axis': self.axis}", "docstring": "Constrains the weights incident to each hidden unit to have unit norm.\n\nArgs:\naxis: integer, axis along which to calculate weight norms.\nFor instance, in a `Dense` layer the weight matrix\nhas shape `(input_dim, output_dim)`,\nset `axis` to `0` to constrain each weight vector\nof length `(input_dim,)`.\nIn a `Conv2D` layer with `data_format=\"channels_last\"`,\nthe weight tensor has shape\n`(rows, cols, input_depth, output_depth)`,\nset `axis` to `[0, 1, 2]`\nto constrain the weights of each filter tensor of size\n`(rows, cols, input_depth)`.", "source": "github-repos"}
{"code": "def altitude(msg):\n    \n\n    tc = common.typecode(msg)\n\n    if tc<9 or tc==19 or tc>22:\n        raise RuntimeError(\"%s: Not a airborn position message\" % msg)\n\n    mb = common.hex2bin(msg)[32:]\n\n    if tc < 19:\n        \n        q = mb[15]\n        if q:\n            n = common.bin2int(mb[8:15]+mb[16:20])\n            alt = n * 25 - 1000\n        else:\n            alt = None\n    else:\n        \n        alt = common.bin2int(mb[8:20]) * 3.28084\n\n    return alt", "docstring": "Decode aircraft altitude\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string\n\nReturns:\nint: altitude in feet", "source": "juraj-google-style"}
{"code": "def from_input(cls, input, workdir=None, manager=None):\n    return cls(input, workdir=workdir, manager=manager)", "docstring": "Create an instance of `AbinitTask` from an ABINIT input.\n\nArgs:\nainput: `AbinitInput` object.\nworkdir: Path to the working directory.\nmanager: :class:`TaskManager` object.", "source": "codesearchnet"}
{"code": "def split_vert_on_nonmanifold_face(script, vert_displacement_ratio=0.0):\n    filter_xml = ''.join(['  <filter name=\"Split Vertexes Incident on Non Manifold Faces\">\\n', '    <Param name=\"VertDispRatio\" ', 'value=\"{}\" '.format(vert_displacement_ratio), 'description=\"Vertex Displacement Ratio\" ', 'type=\"RichFloat\" ', '/>\\n', '  </filter>\\n'])\n    util.write_filter(script, filter_xml)\n    return None", "docstring": "Split non-manifold vertices until it becomes two-manifold.\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter to.\nvert_displacement_ratio (float): When a vertex is split it is moved\nalong the average vector going from its position to the centroid\nof the FF connected faces sharing it.\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "codesearchnet"}
{"code": "def nearest_neighbour_delta_E(self):\n    delta_nn = ((self.final_site.nn_occupation() - self.initial_site.nn_occupation()) - 1)\n    return (delta_nn * self.nearest_neighbour_energy)", "docstring": "Nearest-neighbour interaction contribution to the change in system energy if this jump were accepted.\n\nArgs:\nNone\n\nReturns:\n(Float): delta E (nearest-neighbour)", "source": "codesearchnet"}
{"code": "def parse_results(self, data):\n        \n        results = []\n        if len(data[\"Records\"]) < 1:\n            return -1\n\n        codes = data[\"Records\"][0][\"Results\"]\n        for code in codes.split(\",\"):\n            results.append(str(code))\n\n        self.addr1 = data[\"Records\"][0][\"AddressLine1\"]\n        self.addr2 = data[\"Records\"][0][\"AddressLine2\"]\n        self.city = data[\"Records\"][0][\"City\"]\n        self.name = data[\"Records\"][0][\"NameFull\"]\n        self.phone = data[\"Records\"][0][\"PhoneNumber\"]\n        self.province = data[\"Records\"][0][\"State\"]\n        self.postal = data[\"Records\"][0][\"PostalCode\"]\n        self.recordID = data[\"Records\"][0][\"RecordID\"]\n        return results", "docstring": "parse_results\n\nParses the MelissaData response.\n\nArgs:\ndata (dict): Contains MelissaData response\n\nReturns:\nresults, either contains a dict with corrected address info or -1 for an invalid address.", "source": "juraj-google-style"}
{"code": "def peek(init, exposes, debug=False):\n    \n    def _peek(store, container, _stack=None):\n        args = [ store.peek(objname, container, _stack=_stack) \\\n            for objname in exposes ]\n        if debug:\n            print(args)\n        return init(*args)\n    return _peek", "docstring": "Default deserializer factory.\n\nArguments:\n\ninit (callable): type constructor.\n\nexposes (iterable): attributes to be peeked and passed to `init`.\n\nReturns:\n\ncallable: deserializer (`peek` routine).", "source": "juraj-google-style"}
{"code": "def __init__(self, default_environment: Optional[environments.Environment]=None, bundle_repeat: int=0, use_state_iterables: bool=False, provision_info: Optional['ExtendedProvisionInfo']=None, progress_request_frequency: Optional[float]=None, is_drain: bool=False) -> None:\n    super().__init__()\n    self._default_environment = default_environment or environments.EmbeddedPythonEnvironment.default()\n    self._bundle_repeat = bundle_repeat\n    self._num_workers = 1\n    self._progress_frequency = progress_request_frequency\n    self._profiler_factory: Optional[Callable[..., Profile]] = None\n    self._use_state_iterables = use_state_iterables\n    self._is_drain = is_drain\n    self._provision_info = provision_info or ExtendedProvisionInfo(beam_provision_api_pb2.ProvisionInfo(retrieval_token='unused-retrieval-token'))", "docstring": "Creates a new Fn API Runner.\n\nArgs:\ndefault_environment: the default environment to use for UserFns.\nbundle_repeat: replay every bundle this many extra times, for profiling\nand debugging\nuse_state_iterables: Intentionally split gbk iterables over state API\n(for testing)\nprovision_info: provisioning info to make available to workers, or None\nprogress_request_frequency: The frequency (in seconds) that the runner\nwaits before requesting progress from the SDK.\nis_drain: identify whether expand the sdf graph in the drain mode.", "source": "github-repos"}
{"code": "def scripthash_to_address(scripthash):\n    sb = (bytearray([ADDRESS_VERSION]) + scripthash)\n    c256 = bin_dbl_sha256(sb)[0:4]\n    outb = (sb + bytearray(c256))\n    return base58.b58encode(bytes(outb)).decode('utf-8')", "docstring": "Convert a script hash to a public address.\n\nArgs:\nscripthash (bytes):\n\nReturns:\nstr: base58 encoded string representing the wallet address.", "source": "codesearchnet"}
{"code": "def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1):\n    q_ids = tf.range(query_size, dtype=tf.int32)\n    k_ids = tf.range(key_size, dtype=tf.int32)\n    rel_pos_ids = q_ids[:, None] - tf.tile(tf.expand_dims(k_ids, axis=0), [shape_list(q_ids)[0], 1])\n    if bucket_size > 0 and max_position > 0:\n        rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)\n    rel_pos_ids = rel_pos_ids[:query_size, :]\n    rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)\n    return tf.cast(rel_pos_ids, tf.int64)", "docstring": "Build relative position according to the query and key\n\nWe assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key\n\\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -\nP_k\\)\n\nArgs:\nquery_size (int): the length of query\nkey_size (int): the length of key\nbucket_size (int): the size of position bucket\nmax_position (int): the maximum allowed absolute position\n\nReturn:\n`tf.Tensor`: A tensor with shape [1, query_size, key_size]", "source": "github-repos"}
{"code": "def virt_env(self):\n    if (self._virt_env is None):\n        self._virt_env = self._create_virt_env()\n    return self._virt_env", "docstring": "Getter for this instance's virt env, creates it if needed\n\nReturns:\nlago.virt.VirtEnv: virt env instance used by this prefix", "source": "codesearchnet"}
{"code": "def observe(self, value):\n    self._buffer.append(value)\n    if (len(self._buffer) == _BUFFER_SIZE):\n        self._flush()", "docstring": "Samples an observation's value.\n\nArgs:\nvalue: A numeric value signifying the value to be sampled.", "source": "codesearchnet"}
{"code": "def get_subnets(target='ec2', purpose='internal', env='', region=''):\n    account_az_dict = defaultdict(defaultdict)\n    subnet_id_dict = defaultdict(defaultdict)\n    subnet_url = '{0}/subnets/aws'.format(API_URL)\n    subnet_response = requests.get(subnet_url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n    if (not subnet_response.ok):\n        raise SpinnakerTimeout(subnet_response.text)\n    subnet_list = subnet_response.json()\n    for subnet in subnet_list:\n        LOG.debug('Subnet: %(account)s\\t%(region)s\\t%(target)s\\t%(vpcId)s\\t%(availabilityZone)s', subnet)\n        if (subnet.get('target', '') == target):\n            availability_zone = subnet['availabilityZone']\n            account = subnet['account']\n            subnet_region = subnet['region']\n            subnet_id = subnet['id']\n            try:\n                if (availability_zone not in account_az_dict[account][subnet_region]):\n                    account_az_dict[account][subnet_region].append(availability_zone)\n            except KeyError:\n                account_az_dict[account][subnet_region] = [availability_zone]\n            if (subnet['purpose'] == purpose):\n                try:\n                    subnet_id_dict[account][subnet_region].append(subnet_id)\n                except KeyError:\n                    subnet_id_dict[account][subnet_region] = [subnet_id]\n            LOG.debug('%s regions: %s', account, list(account_az_dict[account].keys()))\n    if all([env, region]):\n        try:\n            region_dict = {region: account_az_dict[env][region]}\n            region_dict['subnet_ids'] = {region: subnet_id_dict[env][region]}\n            LOG.debug('Region dict: %s', region_dict)\n            return region_dict\n        except KeyError:\n            raise SpinnakerSubnetError(env=env, region=region)\n    LOG.debug('AZ dict:\\n%s', pformat(dict(account_az_dict)))\n    return account_az_dict", "docstring": "Get all availability zones for a given target.\n\nArgs:\ntarget (str): Type of subnets to look up (ec2 or elb).\nenv (str): Environment to look up.\nregion (str): AWS Region to find Subnets for.\n\nReturns:\naz_dict: dictionary of  availbility zones, structured like\n{ $region: [ $avaibilityzones ] }\nor\n{ $account: $region: [ $availabilityzone] }", "source": "codesearchnet"}
{"code": "def _preprocess(self, inputs: Sequence[torch.Tensor], freq: Sequence[int]) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n    input_ts, input_padding, inp_freq = ([], [], [])\n    for i, ts in enumerate(inputs):\n        input_len = ts.shape[0]\n        padding = torch.zeros(input_len + self.horizon_len, dtype=ts.dtype, device=ts.device)\n        if input_len < self.context_len:\n            num_front_pad = self.context_len - input_len\n            ts = torch.cat([torch.zeros(num_front_pad, dtype=ts.dtype, device=ts.device), ts], dim=0)\n            padding = torch.cat([torch.ones(num_front_pad, dtype=ts.dtype, device=padding.device), padding], dim=0)\n        elif input_len > self.context_len:\n            ts = ts[-self.context_len:]\n            padding = padding[-(self.context_len + self.horizon_len):]\n        input_ts.append(ts)\n        input_padding.append(padding)\n        inp_freq.append(freq[i])\n    return (torch.stack(input_ts, dim=0), torch.stack(input_padding, dim=0), torch.tensor(inp_freq, dtype=torch.int32).reshape(-1, 1))", "docstring": "Formats and pads raw inputs to feed into the model.\n\nThis function both pads each time series to match the context length, and\npads the inputs to meet the SPMD shape requirement.\n\nArgs:\ninputs: A list of 1d Tensors. Each Tensor is the context time series of\na single forecast task.\nfreq: list of frequencies\n\nReturns:\nA tuple of:\n- the padded input time series to meet the model required context.\n- the padding indicator.\n- the number of padded examples for SPMD so that each core has the same\nnumber (a multiple of `batch_size`) of examples.", "source": "github-repos"}
{"code": "def dbmax10years(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `dbmax10years`'.format(value))\n    self._dbmax10years = value", "docstring": "Corresponds to IDD Field `dbmax10years`\n10-year return period values for maximum extreme dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmax10years`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def _locate_point(nodes, point):\n    candidates = [(0.0, 1.0, nodes)]\n    for _ in six.moves.xrange((_MAX_LOCATE_SUBDIVISIONS + 1)):\n        next_candidates = []\n        for (start, end, candidate) in candidates:\n            if _helpers.contains_nd(candidate, point.ravel(order='F')):\n                midpoint = (0.5 * (start + end))\n                (left, right) = subdivide_nodes(candidate)\n                next_candidates.extend(((start, midpoint, left), (midpoint, end, right)))\n        candidates = next_candidates\n    if (not candidates):\n        return None\n    params = [(start, end) for (start, end, _) in candidates]\n    if (np.std(params) > _LOCATE_STD_CAP):\n        raise ValueError('Parameters not close enough to one another', params)\n    s_approx = np.mean(params)\n    s_approx = newton_refine(nodes, point, s_approx)\n    if (s_approx < 0.0):\n        return 0.0\n    elif (s_approx > 1.0):\n        return 1.0\n    else:\n        return s_approx", "docstring": "r\"\"\"Locate a point on a curve.\n\nDoes so by recursively subdividing the curve and rejecting\nsub-curves with bounding boxes that don't contain the point.\nAfter the sub-curves are sufficiently small, uses Newton's\nmethod to zoom in on the parameter value.\n\n.. note::\n\nThis assumes, but does not check, that ``point`` is ``D x 1``,\nwhere ``D`` is the dimension that ``curve`` is in.\n\n.. note::\n\nThere is also a Fortran implementation of this function, which\nwill be used if it can be built.\n\nArgs:\nnodes (numpy.ndarray): The nodes defining a B |eacute| zier curve.\npoint (numpy.ndarray): The point to locate.\n\nReturns:\nOptional[float]: The parameter value (:math:`s`) corresponding\nto ``point`` or :data:`None` if the point is not on the ``curve``.\n\nRaises:\nValueError: If the standard deviation of the remaining start / end\nparameters among the subdivided intervals exceeds a given\nthreshold (e.g. :math:`2^{-20}`).", "source": "codesearchnet"}
{"code": "def flatten_rules(self, declarations):\n    rules = []\n    for (protocole, paths) in declarations:\n        if protocole:\n            continue\n        rules.extend([self.strip_quotes(v.strip()) for v in paths.split(',')])\n    return list(filter(self.filter_rules, rules))", "docstring": "Flatten returned import rules from regex.\n\nBecause import rules can contains multiple items in the same rule\n(called multiline import rule), the regex ``REGEX_IMPORT_RULE``\nreturn a list of unquoted items for each rule.\n\nArgs:\ndeclarations (list): A SCSS source.\n\nReturns:\nlist: Given SCSS source with all comments removed.", "source": "codesearchnet"}
{"code": "def piece_to_id(input, model_file=None, model_proto=None, name=None):\n    return _gen_sentencepiece_processor_op.sentencepiece_piece_to_id(input, model_file=model_file, model_proto=model_proto, name=name)", "docstring": "Converts piece into vocabulary id.\n\nArgs:\ninput: An arbitrary tensor of string.\nmodel_file: The sentencepiece model file path.\nmodel_proto: The sentencepiece model serialized proto.\nEither `model_file` or `model_proto` must be set.\nname: The name argument that is passed to the op function.\nReturns:\nA tensor of int32 with the same shape as input.", "source": "codesearchnet"}
{"code": "def pages(self):\n    if (self._owner_id is None):\n        it = ProfileIterator.from_username(self._username, self.session)\n        self._owner_id = it.owner_id\n        return it\n    return ProfileIterator(self._owner_id, self.session, self.rhx)", "docstring": "Obtain an iterator over Instagram post pages.\n\nReturns:\nPageIterator: an iterator over the instagram post pages.\n\nRaises:\nValueError: when the requested user does not exist.\nRuntimeError: when the user is a private account\nand there is no logged user (or the logged user\ndoes not follow that account).", "source": "codesearchnet"}
{"code": "def createThread(parent, worker, deleteWorkerLater=False):\n    thread = QtCore.QThread(parent)\n    thread.started.connect(worker.doWork)\n    worker.finished.connect(thread.quit)\n    if deleteWorkerLater:\n        thread.finished.connect(worker.deleteLater)\n    worker.moveToThread(thread)\n    worker.setParent(parent)\n    return thread", "docstring": "Create a new thread for given worker.\n\nArgs:\nparent (QObject): parent of thread and worker.\nworker (ProgressWorker): worker to use in thread.\ndeleteWorkerLater (bool, optional): delete the worker if thread finishes.\n\nReturns:\nQThread", "source": "codesearchnet"}
{"code": "def _check_suffix(self, w_string, access_string, index):\n        \n        prefix_as = self._membership_query(access_string)\n        full_as = self._membership_query(access_string + w_string[index:])\n\n        prefix_w = self._membership_query(w_string[:index])\n        full_w = self._membership_query(w_string)\n\n        length = len(commonprefix([prefix_as, full_as]))\n        as_suffix = full_as[length:]\n\n        length = len(commonprefix([prefix_w, full_w]))\n        w_suffix = full_w[length:]\n\n        if as_suffix != w_suffix:\n            logging.debug('Access string state incorrect')\n            return True\n        logging.debug('Access string state correct.')\n        return False", "docstring": "Checks if access string suffix matches with the examined string suffix\nArgs:\nw_string (str): The examined string to be consumed\naccess_string (str): The access string for the state\nindex (int): The index value for selecting the prefix of w\nReturns:\nbool: A boolean valuei indicating if matching was successful", "source": "juraj-google-style"}
{"code": "def serialize_to_string(self, name, datas):\n    value = datas.get('value', None)\n    if (value is None):\n        msg = \"String reference '{}' lacks of required 'value' variable or is empty\"\n        raise SerializerError(msg.format(name))\n    return value", "docstring": "Serialize given datas to a string.\n\nSimply return the value from required variable``value``.\n\nArguments:\nname (string): Name only used inside possible exception message.\ndatas (dict): Datas to serialize.\n\nReturns:\nstring: Value.", "source": "codesearchnet"}
{"code": "def __le__(self, other):\n    other = as_dimension(other)\n    if self._value is None or other.value is None:\n        return None\n    else:\n        return self._value <= other.value", "docstring": "Returns True if `self` is known to be less than or equal to `other`.\n\nDimensions are compared as follows:\n\n```python\n(tf.compat.v1.Dimension(m)    <= tf.compat.v1.Dimension(n))    == (m <= n)\n(tf.compat.v1.Dimension(m)    <= tf.compat.v1.Dimension(None)) == None\n(tf.compat.v1.Dimension(None) <= tf.compat.v1.Dimension(n))    == None\n(tf.compat.v1.Dimension(None) <= tf.compat.v1.Dimension(None)) == None\n```\n\nArgs:\nother: Another Dimension.\n\nReturns:\nThe value of `self.value <= other.value` if both are known, otherwise\nNone.", "source": "github-repos"}
{"code": "class GitProcessor(ProcessorMixin):\n    attributes = ['image_processor', 'tokenizer']\n    image_processor_class = 'AutoImageProcessor'\n    tokenizer_class = 'AutoTokenizer'\n\n    def __init__(self, image_processor, tokenizer):\n        super().__init__(image_processor, tokenizer)\n        self.current_processor = self.image_processor\n\n    def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]]=None, audio=None, videos=None, **kwargs: Unpack[GitProcessorKwargs]) -> BatchFeature:\n        \n        if text is None and images is None:\n            raise ValueError('You have to specify either text or images. Both cannot be none.')\n        images, text = _validate_images_text_input_order(images, text)\n        output_kwargs = self._merge_kwargs(GitProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)\n        data = {}\n        if text is not None:\n            text_features = self.tokenizer(text, **output_kwargs['text_kwargs'])\n            data.update(text_features)\n        if images is not None:\n            image_features = self.image_processor(images, **output_kwargs['images_kwargs'])\n            data.update(image_features)\n        return BatchFeature(data=data, tensor_type=output_kwargs['common_kwargs'].get('return_tensors'))\n\n    def batch_decode(self, *args, **kwargs):\n        \n        return self.tokenizer.batch_decode(*args, **kwargs)\n\n    def decode(self, *args, **kwargs):\n        \n        return self.tokenizer.decode(*args, **kwargs)\n\n    @property\n    def model_input_names(self):\n        return ['input_ids', 'attention_mask', 'pixel_values']", "docstring": "Constructs a GIT processor which wraps a CLIP image processor and a BERT tokenizer into a single processor.\n\n[`GitProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`BertTokenizerFast`]. See the\n[`~GitProcessor.__call__`] and [`~GitProcessor.decode`] for more information.\n\nArgs:\nimage_processor ([`AutoImageProcessor`]):\nThe image processor is a required input.\ntokenizer ([`AutoTokenizer`]):\nThe tokenizer is a required input.", "source": "github-repos"}
{"code": "def get_logger(name):\n    \n    logger = logging.getLogger(name)\n    logger.addHandler(logging.NullHandler())\n    return logger", "docstring": "Gets a logger\n\nArguments:\nname - the name you wish to log as\n\nReturns:\nA logger!", "source": "juraj-google-style"}
{"code": "def CacheObject(self, identifier, vfs_object):\n    \n    if identifier in self._values:\n      raise KeyError('Object already cached for identifier: {0:s}'.format(\n          identifier))\n\n    if len(self._values) == self._maximum_number_of_cached_values:\n      raise errors.CacheFullError('Maximum number of cached values reached.')\n\n    self._values[identifier] = ObjectsCacheValue(vfs_object)", "docstring": "Caches a VFS object.\n\nThis method ignores the cache value reference count.\n\nArgs:\nidentifier (str): VFS object identifier.\nvfs_object (object): VFS object to cache.\n\nRaises:\nCacheFullError: if he maximum number of cached values is reached.\nKeyError: if the VFS object already is cached.", "source": "juraj-google-style"}
{"code": "def get_or_create(session, model, **kwargs):\n    instance = session.query(model).filter_by(**kwargs).first()\n    if instance:\n        return (instance, False)\n    else:\n        instance = model(**kwargs)\n        if ('dataset' in kwargs):\n            instance.update_sequence_id(session, kwargs['dataset'])\n        session.add(instance)\n        session.commit()\n        return (instance, True)", "docstring": "Get or create sqlalchemy instance.\n\nArgs:\nsession (Sqlalchemy session):\nmodel (sqlalchemy model):\nkwargs (dict): kwargs to lookup or create instance.\n\nReturns:\nTuple: first element is found or created instance, second is boolean - True if instance created,\nFalse if instance found.", "source": "codesearchnet"}
{"code": "def nested_row_lengths(self, name=None):\n    with ops.name_scope(name, 'RaggedNestedRowLengths', [self]):\n        rt_nested_row_lengths = []\n        rt = self\n        while isinstance(rt, RaggedTensor):\n            rt_nested_row_lengths.append(rt.row_lengths())\n            rt = rt.values\n        return tuple(rt_nested_row_lengths)", "docstring": "Returns a tuple containing the row_lengths for all ragged dimensions.\n\n`rt.nested_row_lengths()` is a tuple containing the `row_lengths` tensors\nfor all ragged dimensions in `rt`, ordered from outermost to innermost.\n\nArgs:\nname: A name prefix for the returned tensors (optional).\n\nReturns:\nA `tuple` of 1-D integer `Tensors`.  The length of the tuple is equal to\n`self.ragged_rank`.", "source": "github-repos"}
{"code": "def converted_self(self):\n    raise NotImplementedError", "docstring": "A copy of this Convertible to be modified during conversion.\n\nReturns:\nImplementations should return the copied instance, which in turn should\nbe contained in converted_enclosing_graph(). This instance is the one that\nwill be modified during conversion. Its main use will be in the\nimplementations of convert_variable_to_constant().", "source": "github-repos"}
{"code": "def monitored_timer(cell):\n\n    def actual_decorator(func):\n\n        @functools.wraps(func)\n        def wrapper(*args, **kwargs):\n            with MonitoredTimer(cell):\n                return func(*args, **kwargs)\n        return wrapper\n    return actual_decorator", "docstring": "A function decorator for adding MonitoredTimer support.\n\nArgs:\ncell: the cell associated with the time metric that will be inremented.\nReturns:\nA decorator that measure the function runtime and increment the specified\ncounter cell.", "source": "github-repos"}
{"code": "def convert(credentials):\n    credentials_class = type(credentials)\n    try:\n        return _CLASS_CONVERSION_MAP[credentials_class](credentials)\n    except KeyError as caught_exc:\n        new_exc = ValueError(_CONVERT_ERROR_TMPL.format(credentials_class))\n        six.raise_from(new_exc, caught_exc)", "docstring": "Convert oauth2client credentials to google-auth credentials.\n\nThis class converts:\n\n- :class:`oauth2client.client.OAuth2Credentials` to\n:class:`google.oauth2.credentials.Credentials`.\n- :class:`oauth2client.client.GoogleCredentials` to\n:class:`google.oauth2.credentials.Credentials`.\n- :class:`oauth2client.service_account.ServiceAccountCredentials` to\n:class:`google.oauth2.service_account.Credentials`.\n- :class:`oauth2client.service_account._JWTAccessCredentials` to\n:class:`google.oauth2.service_account.Credentials`.\n- :class:`oauth2client.contrib.gce.AppAssertionCredentials` to\n:class:`google.auth.compute_engine.Credentials`.\n- :class:`oauth2client.contrib.appengine.AppAssertionCredentials` to\n:class:`google.auth.app_engine.Credentials`.\n\nReturns:\ngoogle.auth.credentials.Credentials: The converted credentials.\n\nRaises:\nValueError: If the credentials could not be converted.", "source": "codesearchnet"}
{"code": "def clone(self, to_namespace, to_name):\n    r = fapi.clone_workspace(self.namespace, self.name, to_namespace, to_name, self.api_url)\n    fapi._check_response_code(r, 201)\n    return Workspace(to_namespace, to_name, self.api_url)", "docstring": "Clone this workspace.\n\nArgs:\nto_namespace (str): Target workspace namespace\nto_name (str): Target workspace name", "source": "codesearchnet"}
{"code": "def issue(self, invoice_id, **kwargs):\n        \n        url = \"{}/{}/issue\".format(self.base_url, invoice_id)\n        return self.post_url(url, {}, **kwargs)", "docstring": "Issues an invoice in draft state\n\nArgs:\ninvoice_id : Id for delete the invoice\nReturns:\nIts response is the invoice entity, similar to create/update API response. Its status now would be issued.", "source": "juraj-google-style"}
{"code": "def _GetBetweenQEqualsAndAmpersand(self, url):\n    \n    \n    _, _, url = url.partition('?')\n    \n    _, _, url = url.partition('q=')\n    if not url:\n      return ''\n\n    \n    url, _, _ = url.partition('&')\n    return url", "docstring": "Retrieves the substring between the substrings 'q=' and '&'.\n\nArgs:\nurl (str): URL.\n\nReturns:\nstr: search query, the value between 'q=' and '&'  or None if no query\nwas found.", "source": "juraj-google-style"}
{"code": "def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):\n    y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n    y_true = math_ops.cast(y_true, y_pred.dtype)\n    return backend.sparse_categorical_crossentropy(y_true, y_pred, from_logits=from_logits, axis=axis)", "docstring": "Computes the sparse categorical crossentropy loss.\n\nStandalone usage:\n\n>>> y_true = [1, 2]\n>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]\n>>> loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)\n>>> assert loss.shape == (2,)\n>>> loss.numpy()\narray([0.0513, 2.303], dtype=float32)\n\nArgs:\ny_true: Ground truth values.\ny_pred: The predicted values.\nfrom_logits: Whether `y_pred` is expected to be a logits tensor. By default,\nwe assume that `y_pred` encodes a probability distribution.\naxis: Defaults to -1. The dimension along which the entropy is\ncomputed.\n\nReturns:\nSparse categorical crossentropy loss value.", "source": "github-repos"}
{"code": "def as_dict_summary(self, print_subelectrodes=True):\n        \n        chg_comp = self.fully_charged_entry.composition\n        dischg_comp = self.fully_discharged_entry.composition\n\n        ion = self.working_ion\n        d = {\"average_voltage\": self.get_average_voltage(),\n             \"max_voltage\": self.max_voltage,\n             \"min_voltage\": self.min_voltage,\n             \"max_delta_volume\": self.max_delta_volume,\n             \"max_voltage_step\": self.max_voltage_step,\n             \"capacity_grav\": self.get_capacity_grav(),\n             \"capacity_vol\": self.get_capacity_vol(),\n             \"energy_grav\": self.get_specific_energy(),\n             \"energy_vol\": self.get_energy_density(),\n             \"working_ion\": self._working_ion.symbol,\n             \"nsteps\": self.num_steps,\n             \"framework\": self._vpairs[0].framework.to_data_dict,\n             \"formula_charge\": chg_comp.reduced_formula,\n             \"id_charge\": self.fully_charged_entry.entry_id,\n             \"formula_discharge\": dischg_comp.reduced_formula,\n             \"id_discharge\": self.fully_discharged_entry.entry_id,\n             \"fracA_charge\": chg_comp.get_atomic_fraction(ion),\n             \"fracA_discharge\": dischg_comp.get_atomic_fraction(ion),\n             \"max_instability\": self.get_max_instability(),\n             \"min_instability\": self.get_min_instability(),\n             \"material_ids\" : [itr_ent.entry_id for itr_ent in self._entries],\n             \"stable_material_ids\" : [itr_ent.entry_id for itr_ent in self.get_stable_entries()],\n             \"unstable_material_ids\": [itr_ent.entry_id for itr_ent in self.get_unstable_entries()],\n             }\n\n        if all(['decomposition_energy' in itr_ent.data for itr_ent in self._entries]):\n            d.update({\"stability_charge\": self.fully_charged_entry.data['decomposition_energy'],\n                 \"stability_discharge\": self.fully_discharged_entry.data['decomposition_energy'],\n                 \"stability_data\":{itr_ent.entry_id: itr_ent.data['decomposition_energy'] for itr_ent in self._entries},\n                 })\n\n        if all(['muO2' in itr_ent.data for itr_ent in self._entries]):\n            d.update({\"muO2_data\" : {itr_ent.entry_id: itr_ent.data['muO2'] for itr_ent in self._entries}})\n\n        if print_subelectrodes:\n            f_dict = lambda c: c.as_dict_summary(print_subelectrodes=False)\n            d[\"adj_pairs\"] = list(map(f_dict,\n                                 self.get_sub_electrodes(adjacent_only=True)))\n            d[\"all_pairs\"] = list(map(f_dict,\n                                 self.get_sub_electrodes(adjacent_only=False)))\n        return d", "docstring": "Generate a summary dict.\n\nArgs:\nprint_subelectrodes: Also print data on all the possible\nsubelectrodes.\n\nReturns:\nA summary of this electrode\"s properties in dict format.", "source": "juraj-google-style"}
{"code": "def remove_results(vcs, signature):\n    results_directory = _get_results_directory(vcs, signature)\n    if (not os.path.exists(results_directory)):\n        raise ResultsNotFoundError\n    shutil.rmtree(results_directory)", "docstring": "Removed saved results for this signature\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\nsignature (str)\nRaises:\nResultsNotFoundError", "source": "codesearchnet"}
{"code": "def get_creator_by_name(name):\n    return {'docker(container)': Container.creator, 'shell': Bash.creator, 'docker(image)': Image.creator, 'python': Script.creator, 'packer': Packer.creator, 'ansible(simple)': Ansible.creator}[name]", "docstring": "Get creator function by name.\n\nArgs:\nname (str): name of the creator function.\n\nReturns:\nfunction: creater function.", "source": "codesearchnet"}
{"code": "def get_links(self, **kw):\n    links = [a for a in dir(self) if (isinstance(getattr(self, a), Model) and (not a.startswith('_model')))]\n    return [{'field': l, 'mdl': getattr(self, l).__class__} for l in links]", "docstring": "Prepare links of form by mimicing pyoko's get_links method's result\n\nArgs:\n**kw:\n\nReturns: list of link dicts", "source": "codesearchnet"}
{"code": "def is_os(name, version_id=None):\n    \n    result = False\n    os_release_infos = _fetch_os_release_infos()\n\n    if name == os_release_infos.get('name', None):\n\n        if version_id is None:\n            result = True\n        elif version_id == os_release_infos.get('version_id', None):\n            result = True\n\n    return result", "docstring": "Return True if OS name in /etc/lsb-release of host given by fabric param\n`-H` is the same as given by argument, False else.\n\nIf arg version_id is not None only return True if it is the same as in\n/etc/lsb-release, too.\n\nArgs:\nname: 'Debian GNU/Linux', 'Ubuntu'\nversion_id(None or str): None,\n'14.04', (Ubuntu)\n'16.04', (Ubuntu)\n'8', (Debian)", "source": "juraj-google-style"}
{"code": "def __stripValue(self, value):\n        \n        if isinstance(value, str):\n            if ( value[0] == '\"' and value[-1] == '\"' ) or ( value[0] == '[' and value[-1] == ']' ):\n                return value[1:-1]\n        return value", "docstring": "strip the special characters in the value\n\nArgs:\nvalue: value string\n\nReturns:\nvalue string without special characters", "source": "juraj-google-style"}
{"code": "def create_iam_resources(env='dev', app='', **_):\n    session = boto3.session.Session(profile_name=env)\n    client = session.client('iam')\n    app_properties = get_properties(env='pipeline')\n    generated = get_details(env=env, app=app)\n    generated_iam = generated.iam()\n    app_details = collections.namedtuple('AppDetails', generated_iam.keys())\n    details = app_details(**generated_iam)\n    LOG.debug('Application details: %s', details)\n    deployment_type = app_properties['type']\n    role_trust_template = get_template('infrastructure/iam/trust/{0}_role.json.j2'.format(deployment_type), formats=generated)\n    resource_action(client, action='create_role', log_format='Created Role: %(RoleName)s', RoleName=details.role, AssumeRolePolicyDocument=role_trust_template)\n    resource_action(client, action='create_instance_profile', log_format='Created Instance Profile: %(InstanceProfileName)s', InstanceProfileName=details.profile)\n    attach_profile_to_role(client, role_name=details.role, profile_name=details.profile)\n    iam_policy = construct_policy(app=app, group=details.group, env=env, pipeline_settings=app_properties)\n    if iam_policy:\n        resource_action(client, action='put_role_policy', log_format='Added IAM Policy: %(PolicyName)s', RoleName=details.role, PolicyName=details.policy, PolicyDocument=iam_policy)\n    resource_action(client, action='create_user', log_format='Created User: %(UserName)s', UserName=details.user)\n    resource_action(client, action='create_group', log_format='Created Group: %(GroupName)s', GroupName=details.group)\n    resource_action(client, action='add_user_to_group', log_format='Added User to Group: %(UserName)s -> %(GroupName)s', GroupName=details.group, UserName=details.user)\n    return True", "docstring": "Create the IAM Resources for the application.\n\nArgs:\nenv (str): Deployment environment/account, i.e. dev, stage, prod.\napp (str): Spinnaker Application name.\n\nReturns:\nTrue upon successful completion.", "source": "codesearchnet"}
{"code": "def drop(self, items):\n        \n        self._manager.leaser.remove(items)\n        self._manager.maybe_resume_consumer()", "docstring": "Remove the given messages from lease management.\n\nArgs:\nitems(Sequence[DropRequest]): The items to drop.", "source": "juraj-google-style"}
{"code": "def get_default_description(arg: inspect.Parameter) -> str:\n    if arg.annotation is inspect._empty:\n        arg_type = '<fill_type>'\n    elif hasattr(arg.annotation, '__name__'):\n        arg_type = arg.annotation.__name__\n    else:\n        arg_type = str(arg.annotation)\n    if arg.default is inspect._empty:\n        return f'`{arg_type}`'\n    elif arg.default is None:\n        return f'`{arg_type}`, {OPTIONAL_KEYWORD}'\n    else:\n        str_default = stringify_default(arg.default)\n        return f'`{arg_type}`, {OPTIONAL_KEYWORD}, defaults to {str_default}'", "docstring": "Builds a default description for a parameter that was not documented.\n\nArgs:\narg (`inspect.Parameter`): The argument in the signature to generate a description for.\n\nReturns:\n`str`: The description.", "source": "github-repos"}
{"code": "def _verify_ops(graph_def: graph_pb2.GraphDef, namespace_whitelist):\n    if namespace_whitelist is None:\n        return\n    invalid_ops = []\n    invalid_namespaces = set()\n    all_operations = []\n    all_operations.extend(meta_graph.ops_used_by_graph_def(graph_def))\n    for op in all_operations:\n        if '>' in op:\n            namespace = op.split('>')[0]\n            if namespace not in namespace_whitelist:\n                invalid_ops.append(op)\n                invalid_namespaces.add(namespace)\n    if invalid_ops:\n        raise ValueError(f\"Attempted to save ops from non-whitelisted namespaces to SavedModel: {invalid_ops}.\\nPlease verify that these ops should be saved, since they must be available when loading the SavedModel. If loading from Python, you must import the library defining these ops. From C++, link the custom ops to the serving binary. Once you've confirmed this, add the following namespaces to the `namespace_whitelist` argument in tf.saved_model.SaveOptions: {invalid_namespaces}.\")", "docstring": "Verifies that all namespaced ops in the graph are whitelisted.\n\nArgs:\ngraph_def: the GraphDef to validate.\nnamespace_whitelist: a list of namespaces to allow. If `None`, all will be\nallowed. If an op does not have a namespace, it will be allowed.\n\nRaises:\nValueError: If the graph contains ops that violate the whitelist.", "source": "github-repos"}
{"code": "def get(self, uid: int) -> Optional[CachedMessage]:\n        \n        return self._cache.get(uid)", "docstring": "Return the given cached message.\n\nArgs:\nuid: The message UID.", "source": "juraj-google-style"}
{"code": "def parseConfig(cls, value):\n\t\t\n\t\tif 'enabled' in value:\n\t\t\tvalue['enabled'] = bool(value['enabled'])\n\n\t\tif 'exclude_paths' in value:\n\t\t\tvalue['exclude_paths'] = [n.strip() for n in ast.literal_eval(value['exclude_paths'])]\n\n\t\treturn value", "docstring": "Parse the config values\n\nArgs:\nvalue (dict): Dictionary which contains the checker config\n\nReturns:\ndict: The checker config with parsed values", "source": "juraj-google-style"}
{"code": "def leave(self):\n    try:\n        self.client.api.leave_room(self.room_id)\n        del self.client.rooms[self.room_id]\n        return True\n    except MatrixRequestError:\n        return False", "docstring": "Leave the room.\n\nReturns:\nboolean: Leaving the room was successful.", "source": "codesearchnet"}
{"code": "def _getFieldStats(self):\n    fieldStats = dict()\n    fieldNames = self._inputSource.getFieldNames()\n    for field in fieldNames:\n        curStats = dict()\n        curStats['min'] = self._inputSource.getFieldMin(field)\n        curStats['max'] = self._inputSource.getFieldMax(field)\n        fieldStats[field] = curStats\n    return fieldStats", "docstring": "Method which returns a dictionary of field statistics received from the\ninput source.\n\nReturns:\n\nfieldStats: dict of dicts where the first level is the field name and\nthe second level is the statistic. ie. fieldStats['pounds']['min']", "source": "codesearchnet"}
{"code": "def get_np_doc_form():\n    return _np_doc_form", "docstring": "Gets the form of the original numpy docstrings.\n\nReturns:\nSee `set_np_doc_form` for the list of valid values.", "source": "github-repos"}
{"code": "def list_apps(site):\n    \n    ret = dict()\n    ps_cmd = list()\n    ps_cmd.append(\"Get-WebApplication -Site '{0}'\".format(site))\n    ps_cmd.append(r\"| Select-Object applicationPool, path, PhysicalPath, preloadEnabled,\")\n    ps_cmd.append(r\"@{ Name='name'; Expression={ $_.path.Split('/', 2)[-1] } },\")\n    ps_cmd.append(r\"@{ Name='protocols'; Expression={ @( $_.enabledProtocols.Split(',')\")\n    ps_cmd.append(r\"| Foreach-Object { $_.Trim() } ) } }\")\n\n    cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)\n\n    try:\n        items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n    except ValueError:\n        raise CommandExecutionError('Unable to parse return data as Json.')\n\n    for item in items:\n        protocols = list()\n\n        \n        \n        \n\n        if isinstance(item['protocols'], dict):\n            if 'value' in item['protocols']:\n                protocols += item['protocols']['value']\n        else:\n            protocols.append(item['protocols'])\n\n        ret[item['name']] = {'apppool': item['applicationPool'],\n                             'path': item['path'],\n                             'preload': item['preloadEnabled'],\n                             'protocols': protocols,\n                             'sourcepath': item['PhysicalPath']}\n\n    if not ret:\n        log.warning('No apps found in output: %s', cmd_ret)\n\n    return ret", "docstring": "Get all configured IIS applications for the specified site.\n\nArgs:\nsite (str): The IIS site name.\n\nReturns: A dictionary of the application names and properties.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.list_apps site", "source": "juraj-google-style"}
{"code": "def get_total_mass(self):\n        \n        try:\n            mass = self.loc[:, 'mass'].sum()\n        except KeyError:\n            mass_molecule = self.add_data('mass')\n            mass = mass_molecule.loc[:, 'mass'].sum()\n        return mass", "docstring": "Returns the total mass in g/mol.\n\nArgs:\nNone\n\nReturns:\nfloat:", "source": "juraj-google-style"}
{"code": "def parse(self, utterance, context=None, N=1):\n    start = time.time()\n    context_trie = None\n    if (context and isinstance(context, list)):\n        context.sort(key=(lambda x: x.get('confidence')))\n        context_trie = Trie()\n        for entity in context:\n            (entity_value, entity_type) = entity.get('data')[0]\n            context_trie.insert(entity_value.lower(), data=(entity_value, entity_type), weight=entity.get('confidence'))\n    tagged = self._tagger.tag(utterance.lower(), context_trie=context_trie)\n    self.emit('tagged_entities', {'utterance': utterance, 'tags': list(tagged), 'time': (time.time() - start)})\n    start = time.time()\n    bke = BronKerboschExpander(self._tokenizer)\n\n    def score_clique(clique):\n        score = 0.0\n        for tagged_entity in clique:\n            ec = tagged_entity.get('entities', [{'confidence': 0.0}])[0].get('confidence')\n            score += ((ec * len(tagged_entity.get('entities', [{'match': ''}])[0].get('match'))) / (len(utterance) + 1))\n        return score\n    parse_results = bke.expand(tagged, clique_scoring_func=score_clique)\n    count = 0\n    for result in parse_results:\n        count += 1\n        parse_confidence = 0.0\n        for tag in result:\n            sample_entity = tag['entities'][0]\n            entity_confidence = ((sample_entity.get('confidence', 0.0) * float(len(sample_entity.get('match')))) / len(utterance))\n            parse_confidence += entity_confidence\n        (yield {'utterance': utterance, 'tags': result, 'time': (time.time() - start), 'confidence': parse_confidence})\n        if (count >= N):\n            break", "docstring": "Used to find tags within utterance with a given confidence\n\nArgs:\nutterance(str): conversational piece given by the user\ncontext(list): a list of entities\nN(int): number of results\nReturns: yield an object with the following fields\nutterance(str): the value passed in\ntags(list) : a list of tags found in utterance\ntime(time) : duration since call of function\nconfidence(float) : float indicating how confident of a match to the\nutterance. This might be used to determan the most likely intent.", "source": "codesearchnet"}
{"code": "def set_query(self, value):\n        \n        if isinstance(value, basestring) or value is None:\n            self._content['query'] = value\n        elif hasattr(value, 'keys'):\n            self._content['query'] = query.terms_from_dict(value)\n        else:\n            raise TypeError(\"Query must be a string or dict. Got: \" + type(value) + \" insted!\")", "docstring": "Convert a dict form of query in a string of needed and store the query string.\n\nArgs:\nvalue -- A query string or a dict with query xpaths as keys and text or\nnested query dicts as values.", "source": "juraj-google-style"}
{"code": "def locked_get(self):\n    filters = {self.key_name: self.key_value}\n    query = self.session.query(self.model_class).filter_by(**filters)\n    entity = query.first()\n    if entity:\n        credential = getattr(entity, self.property_name)\n        if (credential and hasattr(credential, 'set_store')):\n            credential.set_store(self)\n        return credential\n    else:\n        return None", "docstring": "Retrieve stored credential.\n\nReturns:\nA :class:`oauth2client.Credentials` instance or `None`.", "source": "codesearchnet"}
{"code": "def CheckFlowCanBeStartedOnClient(flow_name):\n  \n  flow_cls = flow.GRRFlow.GetPlugin(flow_name)\n\n  if flow_cls.category:\n    return True\n  else:\n    raise access_control.UnauthorizedAccess(\n        \"Flow %s can't be started on a client by non-suid users.\" % flow_name)", "docstring": "Checks if flow can be started on a particular client.\n\nOnly flows with a category can bestarted. Having a category means that the\nflow will be accessible from the UI.\n\nArgs:\nflow_name: Name of the flow to check access for.\n\nReturns:\nTrue if flow is externally accessible.\nRaises:\naccess_control.UnauthorizedAccess: if flow is not externally accessible.", "source": "juraj-google-style"}
{"code": "def RegisterPlugin(cls, plugin_class):\n    \n    name = getattr(\n        plugin_class, 'ARTIFACT_DEFINITION_NAME', plugin_class.__name__)\n    name = name.lower()\n    if name in cls._plugins:\n      raise KeyError(\n          'Artifact plugin class already set for name: {0:s}.'.format(name))\n\n    preprocess_plugin = plugin_class()\n\n    cls._plugins[name] = preprocess_plugin\n\n    if isinstance(\n        preprocess_plugin, interface.FileSystemArtifactPreprocessorPlugin):\n      cls._file_system_plugins[name] = preprocess_plugin\n\n    elif isinstance(\n        preprocess_plugin, interface.KnowledgeBasePreprocessorPlugin):\n      cls._knowledge_base_plugins[name] = preprocess_plugin\n\n    elif isinstance(\n        preprocess_plugin,\n        interface.WindowsRegistryKeyArtifactPreprocessorPlugin):\n      cls._windows_registry_plugins[name] = preprocess_plugin", "docstring": "Registers an preprocess plugin class.\n\nArgs:\nplugin_class (type): preprocess plugin class.\n\nRaises:\nKeyError: if plugin class is already set for the corresponding name.\nTypeError: if the source type of the plugin class is not supported.", "source": "juraj-google-style"}
{"code": "def angle(self, deg=False):\n    if (self.dtype.str[1] != 'c'):\n        warnings.warn('angle() is intended for complex-valued timeseries', RuntimeWarning, 1)\n    da = distob.vectorize(np.angle)(self, deg)\n    return _dts_from_da(da, self.tspan, self.labels)", "docstring": "Return the angle of a complex Timeseries\n\nArgs:\ndeg (bool, optional):\nReturn angle in degrees if True, radians if False (default).\n\nReturns:\nangle (Timeseries):\nThe counterclockwise angle from the positive real axis on\nthe complex plane, with dtype as numpy.float64.", "source": "codesearchnet"}
{"code": "def print_periodic_table(filter_function: callable = None):\n        \n        for row in range(1, 10):\n            rowstr = []\n            for group in range(1, 19):\n                try:\n                    el = Element.from_row_and_group(row, group)\n                except ValueError:\n                    el = None\n                if el and ((not filter_function) or filter_function(el)):\n                    rowstr.append(\"{:3s}\".format(el.symbol))\n                else:\n                    rowstr.append(\"   \")\n            print(\" \".join(rowstr))", "docstring": "A pretty ASCII printer for the periodic table, based on some\nfilter_function.\n\nArgs:\nfilter_function: A filtering function taking an Element as input\nand returning a boolean. For example, setting\nfilter_function = lambda el: el.X > 2 will print a periodic\ntable containing only elements with electronegativity > 2.", "source": "juraj-google-style"}
{"code": "def List(self, request, global_params=None):\n    config = self.GetMethodConfig('List')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Lists snapshots.\n\nArgs:\nrequest: (DataflowProjectsLocationsJobsSnapshotsListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ListSnapshotsResponse) The response message.", "source": "github-repos"}
{"code": "def __init__(self, data_parallelism, expert_parallelism, gates):\n    \n    self._gates = gates\n    self._dp = data_parallelism\n    self._ep = expert_parallelism\n    assert len(gates) == self._dp.n\n    self._dispatchers = self._dp(SparseDispatcher, self._ep.n, gates)", "docstring": "Create a DistributedSparseDispatcher.\n\nArgs:\ndata_parallelism: a Parallelism object.\nexpert_parallelism: a Parallelism object.\ngates: a list of datashard_parallelism.n `Tensor`s of shapes\n`[batch_size[d], num_experts]`.\n\nReturns:\na DistributedSparseDispatcher", "source": "juraj-google-style"}
{"code": "def find_nearest(a, value, index=False):\n    \n    i = np.abs(a - value).argmin()\n    if index:\n        return i\n    else:\n        return a[i]", "docstring": "Find the array value, or index of the array value, closest to some given\nvalue.\n\nArgs:\na (ndarray)\nvalue (float)\nindex (bool): whether to return the index instead of the array value.\n\nReturns:\nfloat. The array value (or index, as int) nearest the specified value.", "source": "juraj-google-style"}
{"code": "def vocab_token_counts(text_filepattern, max_lines):\n    ret = {}\n    for (i, line) in enumerate(_read_filepattern(text_filepattern, max_lines=max_lines)):\n        if (',' not in line):\n            tf.logging.warning(\"Malformed vocab line \n            continue\n        (token, count) = line.rsplit(',', 1)\n        ret[_native_to_unicode(token)] = int(count)\n    return ret", "docstring": "Read a vocab file and return a dictionary of token counts.\n\nReads a two-column CSV file of tokens and their frequency in a dataset. The\ntokens are presumed to be generated by encode() or the equivalent.\n\nArgs:\ntext_filepattern: A pattern matching one or more files.\nmax_lines: An integer; maximum total lines to read.\n\nReturns:\na dictionary mapping token to count.", "source": "codesearchnet"}
{"code": "def _save_private_file(filename, json_contents):\n    temp_filename = tempfile.mktemp()\n    file_desc = os.open(temp_filename, (os.O_WRONLY | os.O_CREAT), 384)\n    with os.fdopen(file_desc, 'w') as file_handle:\n        json.dump(json_contents, file_handle, sort_keys=True, indent=2, separators=(',', ': '))\n    shutil.move(temp_filename, filename)", "docstring": "Saves a file with read-write permissions on for the owner.\n\nArgs:\nfilename: String. Absolute path to file.\njson_contents: JSON serializable object to be saved.", "source": "codesearchnet"}
{"code": "def summarize_tensors(tensor_dict, tag=None):\n  \n  if tag is None:\n    tag = \"tensors/\"\n\n  for t_name in list(tensor_dict):\n    t = tensor_dict[t_name]\n    tf.summary.histogram(tag + t_name, t)", "docstring": "Summarize the tensors.\n\nArgs:\ntensor_dict: a dictionary of tensors.\ntag: name scope of the summary; defaults to tensors/.", "source": "juraj-google-style"}
{"code": "def create_downloader_of_type(type_name):\n    downloaders = available_downloaders()\n    if (type_name not in downloaders.keys()):\n        raise UnknownDownloaderException(('Unknown downloader: %s' % (type_name,)))\n    return downloaders[type_name]()", "docstring": "Create an instance of the downloader with the given name.\n\nArgs:\ntype_name: The name of a downloader.\n\nReturns:\nAn instance of the downloader with the given type.", "source": "codesearchnet"}
{"code": "def fetch_tuples(self, max_tuples=20, timeout=None):\n    tuples = list()\n    if (timeout is None):\n        while (len(tuples) < max_tuples):\n            fetcher = self._data_fetcher\n            if (not fetcher):\n                break\n            tuples.append(fetcher.items.get())\n        return tuples\n    timeout = float(timeout)\n    end = (time.time() + timeout)\n    while (len(tuples) < max_tuples):\n        qto = (end - time.time())\n        if (qto <= 0):\n            break\n        try:\n            fetcher = self._data_fetcher\n            if (not fetcher):\n                break\n            tuples.append(fetcher.items.get(timeout=qto))\n        except queue.Empty:\n            break\n    return tuples", "docstring": "Fetch a number of tuples from this view.\n\nFetching of data must have been started with\n:py:meth:`start_data_fetch` before calling this method.\n\nIf ``timeout`` is ``None`` then the returned list will\ncontain ``max_tuples`` tuples. Otherwise if the timeout is reached\nthe list may contain less than ``max_tuples`` tuples.\n\nArgs:\nmax_tuples(int): Maximum number of tuples to fetch.\ntimeout(float): Maximum time to wait for ``max_tuples`` tuples.\n\nReturns:\nlist: List of fetched tuples.\n.. versionadded:: 1.12", "source": "codesearchnet"}
{"code": "def CheckForBadCharacters(filename, lines, error):\n    for (linenum, line) in enumerate(lines):\n        if (unicode_escape_decode('�') in line):\n            error(filename, linenum, 'readability/utf8', 5, 'Line contains invalid UTF-8 (or Unicode replacement character).')\n        if ('\\x00' in line):\n            error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')", "docstring": "Logs an error for each line containing bad characters.\n\nTwo kinds of bad characters:\n\n1. Unicode replacement characters: These indicate that either the file\ncontained invalid UTF-8 (likely) or Unicode replacement characters (which\nit shouldn't).  Note that it's possible for this to throw off line\nnumbering if the invalid UTF-8 occurred adjacent to a newline.\n\n2. NUL bytes.  These are problematic for some tools.\n\nArgs:\nfilename: The name of the current file.\nlines: An array of strings, each representing a line of the file.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def prose_wc(args):\n    if (args.file is None):\n        return 1\n    if args.split_hyphens:\n        INTERSTITIAL_PUNCTUATION.append(re.compile('-'))\n    content = args.file.read().decode('utf-8')\n    filename = args.file.name\n    body = strip_frontmatter(content)\n    parsed = markdown_to_text(body)\n    result = wc(filename, body, parsed=parsed, is_jekyll=(body != content))\n    if (args.update and (filename != '_stdin_') and (result['counts']['type'] == 'jekyll')):\n        update_file(filename, result, content, args.indent)\n    else:\n        _mockable_print({'yaml': yaml.safe_dump(result, default_flow_style=False, indent=args.indent), 'json': json.dumps(result, indent=args.indent), 'default': default_dump(result)}[args.format])\n    return 0", "docstring": "Processes data provided to print a count object, or update a file.\n\nArgs:\nargs: an ArgumentParser object returned by setup()", "source": "codesearchnet"}
{"code": "def _check_middleware_dependencies(concerned_object, required_middleware):\n    declared_middleware = getattr(settings, 'MIDDLEWARE', None)\n    if (declared_middleware is None):\n        declared_middleware = settings.MIDDLEWARE_CLASSES\n    matching_middleware = [mw for mw in declared_middleware if (mw in required_middleware)]\n    if (required_middleware != matching_middleware):\n        raise AssertionError('{} requires middleware order {} but matching middleware was {}'.format(concerned_object, required_middleware, matching_middleware))", "docstring": "Check required middleware dependencies exist and in the correct order.\n\nArgs:\nconcerned_object (object): The object for which the required\nmiddleware is being checked. This is used for error messages only.\nrequired_middleware (list of String): An ordered list representing the\nrequired middleware to be checked.\n\nUsage:\nAdd in __init__ method to a Middleware class to have its dependencies\nchecked on startup.\n\ndef __init__(self):\nsuper(SomeMiddleware, self).__init__()\n_check_middleware_dependencies(self, required_middleware=[\n'edx_django_utils.cache.middleware.RequestCacheMiddleware',\n])\n\nRaises:\nAssertionError if the provided dependencies don't appear in\nMIDDLEWARE_CLASSES in the correct order.", "source": "codesearchnet"}
{"code": "def cardinal(self, to):\n        \n        return sum(1 for _ in filter(\n            lambda d: not d.external and d.target in to, self.dependencies))", "docstring": "Return the number of dependencies of this module to the given node.\n\nArgs:\nto (Package/Module): the target node.\n\nReturns:\nint: number of dependencies.", "source": "juraj-google-style"}
{"code": "def vert_tab_pos(self, positions):\n        \n        if positions == 'clear':\n            self.send(chr(27)+'B'+chr(0))\n            return\n        if positions.min < 1 or positions.max >255:\n                raise RuntimeError('Invalid position parameter in function horzTabPos')\n        sendstr = chr(27) + 'D'\n        if len(positions)<=16:\n            for position in positions:\n                sendstr += chr(position)\n            self.send(sendstr + chr(0))\n        else:\n            raise RuntimeError('Too many positions in function vertTabPos')", "docstring": "Sets tab positions, up to a maximum of 32 positions. Also can clear tab positions.\n\nArgs:\npositions -- Either a list of tab positions (between 1 and 255), or 'clear'.\nReturns:\nNone\nRaises:\nRuntimeError: Invalid position parameter.\nRuntimeError: Too many positions.", "source": "juraj-google-style"}
{"code": "def update_or_create_all(cls, list_of_kwargs, keys=[]):\n    objs = []\n    for kwargs in list_of_kwargs:\n        filter_kwargs = subdict(kwargs, keys)\n        if (filter_kwargs == {}):\n            obj = None\n        else:\n            obj = cls.first(**filter_kwargs)\n        if (obj is not None):\n            for (key, value) in kwargs.iteritems():\n                if ((key not in keys) and (key not in cls._no_overwrite_)):\n                    setattr(obj, key, value)\n        else:\n            obj = cls.new(**kwargs)\n        objs.append(obj)\n    try:\n        return cls.add_all(objs)\n    except:\n        cls.session.rollback()\n        raise", "docstring": "Batch method for updating a list of instances and\ncreating them if required\n\nArgs:\nlist_of_kwargs(list of dicts): A list of dicts where\neach dict denotes the keyword args that you would pass\nto the create method separately\n\nkeys (list, optional): A list of keys to use for the\ninitial finding step. Matching is done only on these\nattributes.\n\nExamples:\n\n>>> Customer.update_or_create_all([\n... {'name': 'Vicky', 'email': 'vicky@x.com', 'age': 34},\n... {'name': 'Ron', 'age': 40, 'email': 'ron@x.com',\n... 'gender': 'Male'}], keys=['name', 'email'])", "source": "codesearchnet"}
{"code": "def _find_variables(graph_def: graph_pb2.GraphDef) -> Mapping[str, node_def_pb2.NodeDef]:\n    variable_nodes = {}\n    for var_node in filter(_is_variable, graph_def.node):\n        shared_name = str(var_node.attr['shared_name'].s, encoding='utf-8')\n        variable_nodes[shared_name] = var_node\n    for func in graph_def.library.function:\n        for var_node in filter(_is_variable, func.node_def):\n            variable_nodes[shared_name] = var_node\n    return variable_nodes", "docstring": "Finds all variables within `graph_def`.\n\nThis function makes sense for TF 1 graphs only, as it depends on\n`shared_name`.\n\nArgs:\ngraph_def: `GraphDef` to find variables from.\n\nReturns:\nA mapping of `shared_name` -> `NodeDef` corresponding to a variable op.", "source": "github-repos"}
{"code": "def _head(self, client_kwargs):\n        \n        with _handle_azure_exception():\n            \n            if 'file_name' in client_kwargs:\n                result = self.client.get_file_properties(**client_kwargs)\n\n            \n            elif 'directory_name' in client_kwargs:\n                result = self.client.get_directory_properties(**client_kwargs)\n\n            \n            else:\n                result = self.client.get_share_properties(**client_kwargs)\n\n        return self._model_to_dict(result)", "docstring": "Returns object or bucket HTTP header.\n\nArgs:\nclient_kwargs (dict): Client arguments.\n\nReturns:\ndict: HTTP header.", "source": "juraj-google-style"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        local_stream = BytearrayStream()\n\n        if self._device_serial_number is not None:\n            self._device_serial_number.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self._password is not None:\n            self._password.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self._device_identifier is not None:\n            self._device_identifier.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self._network_identifier is not None:\n            self._network_identifier.write(\n                local_stream,\n                kmip_version=kmip_version)\n        if self._machine_identifier is not None:\n            self._machine_identifier.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self._media_identifier is not None:\n            self._media_identifier.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        self.length = local_stream.length()\n        super(DeviceCredential, self).write(\n            output_stream,\n            kmip_version=kmip_version\n        )\n        output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the DeviceCredential struct to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def _construct(configdict, prefix, ua):\n    if (not ua):\n        raise UserAgentError(\"User_agent parameter missing. It can be your project's name for example.\")\n    preprefix = configdict.get('preprefix')\n    if preprefix:\n        user_agent = ('%s:' % preprefix)\n    else:\n        user_agent = ''\n    if prefix:\n        user_agent = ('%s%s-' % (user_agent, prefix))\n    user_agent = ('%s%s' % (user_agent, ua))\n    return user_agent", "docstring": "Construct user agent\n\nArgs:\nconfigdict (str): Additional configuration for user agent\nprefix (str): Text to put at start of user agent\nua (str): Custom user agent text\n\nReturns:\nstr: Full user agent string", "source": "codesearchnet"}
{"code": "def round(cls, x: 'TensorFluent') -> 'TensorFluent':\n        \n        return cls._unary_op(x, tf.round, tf.float32)", "docstring": "Returns a TensorFluent for the round function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the round function.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n               api,\n               path,\n               buffer_size=DEFAULT_BUFFER_SIZE,\n               max_request_size=MAX_REQUEST_SIZE,\n               offset=0):\n    \n    self._api = api\n    self._path = path\n    self.name = api_utils._unquote_filename(path)\n    self.closed = False\n\n    assert buffer_size <= max_request_size\n    self._buffer_size = buffer_size\n    self._max_request_size = max_request_size\n    self._offset = offset\n\n    self._buffer = _Buffer()\n    self._etag = None\n\n    get_future = self._get_segment(offset, self._buffer_size, check_response=False)\n\n    status, headers, content = self._api.head_object(path)\n    errors.check_status(status, [200], path, resp_headers=headers, body=content)\n    self._file_size = long(common.get_stored_content_length(headers))\n    self._check_etag(headers.get('etag'))\n\n    self._buffer_future = None\n\n    if self._file_size != 0:\n      content, check_response_closure = get_future.get_result()\n      check_response_closure()\n      self._buffer.reset(content)\n      self._request_next_buffer()", "docstring": "Constructor.\n\nArgs:\napi: A StorageApi instance.\npath: Quoted/escaped path to the object, e.g. /mybucket/myfile\nbuffer_size: buffer size. The ReadBuffer keeps\none buffer. But there may be a pending future that contains\na second buffer. This size must be less than max_request_size.\nmax_request_size: Max bytes to request in one urlfetch.\noffset: Number of bytes to skip at the start of the file. If None, 0 is\nused.", "source": "juraj-google-style"}
{"code": "def dict_setdiff(dict_, negative_keys):\n    keys = [key for key in six.iterkeys(dict_) if (key not in set(negative_keys))]\n    subdict_ = dict_subset(dict_, keys)\n    return subdict_", "docstring": "r\"\"\"\nreturns a copy of dict_ without keys in the negative_keys list\n\nArgs:\ndict_ (dict):\nnegative_keys (list):", "source": "codesearchnet"}
{"code": "def _assert_same_base_type(items, expected_type=None):\n  r\n  original_expected_type = expected_type\n  mismatch = False\n  for item in items:\n    if item is not None:\n      item_type = base_dtype(item.dtype)\n      if not expected_type:\n        expected_type = item_type\n      elif expected_type != item_type:\n        mismatch = True\n        break\n  if mismatch:\n    \n    \n    expected_type = original_expected_type\n    original_item_str = None\n    get_name = lambda x: x.name if hasattr(x, 'name') else str(x)\n    for item in items:\n      if item is not None:\n        item_type = base_dtype(item.dtype)\n        if not expected_type:\n          expected_type = item_type\n          original_item_str = get_name(item)\n        elif expected_type != item_type:\n          raise ValueError(\n              '{}, type={}, must be of the same type ({}){}.'.format(\n                  get_name(item),\n                  item_type,\n                  expected_type,\n                  ((' as {}'.format(original_item_str))\n                   if original_item_str else '')))\n    return expected_type  \n  else:\n    return expected_type", "docstring": "r\"\"\"Asserts all items are of the same base type.\n\nArgs:\nitems: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,\n`Operation`, or `IndexedSlices`). Can include `None` elements, which\nwill be ignored.\nexpected_type: Expected type. If not specified, assert all items are\nof the same base type.\n\nReturns:\nValidated type, or none if neither expected_type nor items provided.\n\nRaises:\nValueError: If any types do not match.", "source": "juraj-google-style"}
{"code": "def _perform_action(self, params, return_dict=True):\n        \n        action = self.get_data(\n            \"droplets/%s/actions/\" % self.id,\n            type=POST,\n            params=params\n        )\n        if return_dict:\n            return action\n        else:\n            action = action[u'action']\n            return_action = Action(token=self.token)\n            \n            for attr in action.keys():\n                setattr(return_action, attr, action[attr])\n            return return_action", "docstring": "Perform a droplet action.\n\nArgs:\nparams (dict): parameters of the action\n\nOptional Args:\nreturn_dict (bool): Return a dict when True (default),\notherwise return an Action.\n\nReturns dict or Action", "source": "juraj-google-style"}
{"code": "def remove_pos_arg_placeholders(alias_command):\n    \n    \n    split_command = shlex.split(alias_command)\n    boundary_index = len(split_command)\n    for i, subcommand in enumerate(split_command):\n        if not re.match('^[a-z]', subcommand.lower()) or i > COLLISION_CHECK_LEVEL_DEPTH:\n            boundary_index = i\n            break\n\n    return ' '.join(split_command[:boundary_index]).lower()", "docstring": "Remove positional argument placeholders from alias_command.\n\nArgs:\nalias_command: The alias command to remove from.\n\nReturns:\nThe alias command string without positional argument placeholder.", "source": "juraj-google-style"}
{"code": "def ChiSquared(target_frequency):\n\n    def inner(text):\n        text = ''.join(text)\n        return (- chi_squared(frequency_analyze(text), target_frequency))\n    return inner", "docstring": "Score a text by comparing its frequency distribution against another.\n\nNote:\nIt is easy to be penalised without knowing it when using this scorer.\nEnglish frequency ngrams are capital letters, meaning when using it\nany text you score against must be all capitals for it to give correct results.\nI am aware of the issue and will work on a fix.\n\nTodo:\nMaybe include paramter for ngram size. Havent had a use case for this yet.\nOnce there is evidence it is needed, I will add it.\n\nExample:\n>>> fitness = ChiSquared(english.unigrams)\n>>> fitness(\"ABC\")\n-32.2\n\nArgs:\ntarget_frequency (dict): symbol to frequency mapping of the distribution to compare with", "source": "codesearchnet"}
{"code": "def __init__(self, analyzer_class):\n    \n    super(HashTaggingAnalysisPlugin, self).__init__()\n    self._analysis_queue_timeout = self.DEFAULT_QUEUE_TIMEOUT\n    self._analyzer_started = False\n    self._comment = 'Tag applied by {0:s} analysis plugin'.format(self.NAME)\n    self._event_identifiers_by_pathspec = collections.defaultdict(list)\n    self._hash_pathspecs = collections.defaultdict(list)\n    self._requester_class = None\n    self._time_of_last_status_log = time.time()\n    self.hash_analysis_queue = Queue.Queue()\n    self.hash_queue = Queue.Queue()\n\n    self._analyzer = analyzer_class(self.hash_queue, self.hash_analysis_queue)", "docstring": "Initializes a hash tagging analysis plugin.\n\nArgs:\nanalyzer_class (type): a subclass of HashAnalyzer that will be\ninstantiated by the plugin.", "source": "juraj-google-style"}
{"code": "def match(self, path):\n        \n        this = self.segments\n        that = path.split('/')\n        current_var = None\n        bindings = {}\n        segment_count = self.segment_count\n        j = 0\n        for i in range(0, len(this)):\n            if j >= len(that):\n                break\n            if this[i].kind == _TERMINAL:\n                if this[i].literal == '*':\n                    bindings[current_var] = that[j]\n                    j += 1\n                elif this[i].literal == '**':\n                    until = j + len(that) - segment_count + 1\n                    segment_count += len(that) - segment_count\n                    bindings[current_var] = '/'.join(that[j:until])\n                    j = until\n                elif this[i].literal != that[j]:\n                    raise ValidationException(\n                        'mismatched literal: \\'%s\\' != \\'%s\\'' % (\n                            this[i].literal, that[j]))\n                else:\n                    j += 1\n            elif this[i].kind == _BINDING:\n                current_var = this[i].literal\n        if j != len(that) or j != segment_count:\n            raise ValidationException(\n                'match error: could not render from the path template: {}'\n                .format(path))\n        return bindings", "docstring": "Matches a fully qualified path template string.\n\nArgs:\npath (str): A fully qualified path template string.\n\nReturns:\ndict: Var names to matched binding values.\n\nRaises:\nValidationException: If path can't be matched to the template.", "source": "juraj-google-style"}
{"code": "def from_cif_file(cif_file, source='', comment=''):\n        \n        r = CifParser(cif_file)\n        structure = r.get_structures()[0]\n        return Header(structure, source, comment)", "docstring": "Static method to create Header object from cif_file\n\nArgs:\ncif_file: cif_file path and name\nsource: User supplied identifier, i.e. for Materials Project this\nwould be the material ID number\ncomment: User comment that goes in header\n\nReturns:\nHeader Object", "source": "juraj-google-style"}
{"code": "def argmin(x, axis=None, keepdims=False):\n    if any_symbolic_tensors((x,)):\n        return Argmin(axis=axis, keepdims=keepdims).symbolic_call(x)\n    return backend.numpy.argmin(x, axis=axis, keepdims=keepdims)", "docstring": "Returns the indices of the minimum values along an axis.\n\nArgs:\nx: Input tensor.\naxis: By default, the index is into the flattened tensor, otherwise\nalong the specified axis.\nkeepdims: If this is set to `True`, the axes which are reduced are left\nin the result as dimensions with size one. Defaults to `False`.\n\nReturns:\nTensor of indices. It has the same shape as `x`, with the dimension\nalong `axis` removed.\n\nExample:\n>>> x = keras.ops.arange(6).reshape(2, 3) + 10\n>>> x\narray([[10, 11, 12],\n[13, 14, 15]], dtype=int32)\n>>> keras.ops.argmin(x)\narray(0, dtype=int32)\n>>> keras.ops.argmin(x, axis=0)\narray([0, 0, 0], dtype=int32)\n>>> keras.ops.argmin(x, axis=1)\narray([0, 0], dtype=int32)", "source": "github-repos"}
{"code": "def user_agent_detail(self, **kwargs):\n    path = ('%s/%s/user_agent_detail' % (self.manager.path, self.get_id()))\n    return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "Get the user agent detail.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the server cannot perform the request", "source": "codesearchnet"}
{"code": "def inverse_removing(self, words_to_remove):\n        \n        mask = np.ones(self.as_np.shape[0], dtype='bool')\n        mask[self.__get_idxs(words_to_remove)] = False\n        if not self.bow:\n            return ''.join([self.as_list[i] if mask[i]\n                            else 'UNKWORDZ' for i in range(mask.shape[0])])\n        return ''.join([self.as_list[v] for v in mask.nonzero()[0]])", "docstring": "Returns a string after removing the appropriate words.\n\nIf self.bow is false, replaces word with UNKWORDZ instead of removing\nit.\n\nArgs:\nwords_to_remove: list of ids (ints) to remove\n\nReturns:\noriginal raw string with appropriate words removed.", "source": "juraj-google-style"}
{"code": "def mod_replace(match, sphinx_modules):\n    \n    sphinx_modules.append(match.group(\"module\"))\n    return \"`{}`_\".format(match.group(\"value\"))", "docstring": "Convert Sphinx ``:mod:`` to plain reST link.\n\nArgs:\nmatch (_sre.SRE_Match): A match (from ``re``) to be used\nin substitution.\nsphinx_modules (list): List to be track the modules that have been\nencountered.\n\nReturns:\nstr: The ``match`` converted to a link.", "source": "juraj-google-style"}
{"code": "def clean_title(title):\n    date_pattern = re.compile('\\\\W*\\\\d{1,2}[/\\\\-.]\\\\d{1,2}[/\\\\-.](?=\\\\d*)(?:.{4}|.{2})\\\\W*')\n    title = date_pattern.sub(' ', title)\n    title = re.sub('\\\\s{2,}', ' ', title)\n    title = title.strip()\n    return title", "docstring": "Clean title -> remove dates, remove duplicated spaces and strip title.\n\nArgs:\ntitle (str): Title.\n\nReturns:\nstr: Clean title without dates, duplicated, trailing and leading spaces.", "source": "codesearchnet"}
{"code": "def __call__(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:\n    out = math_ops.matmul(input_tensor, self.filters)\n    return {'output': out}", "docstring": "Performs a matrix multiplication.\n\nArgs:\ninput_tensor: Input tensor to matmul with the filter.\n\nReturns:\nA map of: output key -> output result.", "source": "github-repos"}
{"code": "def header(self, key, value):\n        \n        if type(key) is tuple:\n            key, value = str(key[0]), key[1]\n\n        headers = {key: value}\n        self._headers.extend(headers)", "docstring": "Defines a new response header.\nAlias to ``Response.header()``.\n\nArguments:\nheader (str): header name.\nvalue (str): header value.\n\nReturns:\nself: ``pook.Response`` current instance.", "source": "juraj-google-style"}
{"code": "def localopt(self, forcefield='mmff94', steps=500):\n        \n        pbmol = pb.Molecule(self._obmol)\n        pbmol.localopt(forcefield=forcefield, steps=steps)\n        self._obmol = pbmol.OBMol", "docstring": "A wrapper to pybel's localopt method to optimize a Molecule.\n\nArgs:\nforcefield: Default is mmff94. Options are 'gaff', 'ghemical',\n'mmff94', 'mmff94s', and 'uff'.\nsteps: Default is 500.", "source": "juraj-google-style"}
{"code": "def _detect(self):\n    results = []\n    self.results = []\n    self.visited_all_paths = {}\n    for contract in self.slither.contracts:\n        for function in contract.functions:\n            if (function.is_implemented and (function.contract == contract)):\n                if function.contains_assembly:\n                    continue\n                uninitialized_local_variables = [v for v in function.local_variables if ((not v.is_storage) and v.uninitialized)]\n                function.entry_point.context[self.key] = uninitialized_local_variables\n                self._detect_uninitialized(function, function.entry_point, [])\n    all_results = list(set(self.results))\n    for (function, uninitialized_local_variable) in all_results:\n        var_name = uninitialized_local_variable.name\n        info = '{} in {}.{} ({}) is a local variable never initialiazed\\n'\n        info = info.format(var_name, function.contract.name, function.name, uninitialized_local_variable.source_mapping_str)\n        json = self.generate_json_result(info)\n        self.add_variable_to_json(uninitialized_local_variable, json)\n        self.add_function_to_json(function, json)\n        results.append(json)\n    return results", "docstring": "Detect uninitialized local variables\n\nRecursively visit the calls\nReturns:\ndict: [contract name] = set(local variable uninitialized)", "source": "codesearchnet"}
{"code": "def test_correctness_2_factor_hull_white_consistency(self, valuation_method, error_tol):\n    dtype = tf.float64\n    expiries = np.array([1.0])\n    fixed_leg_payment_times = np.array([1.25, 1.5, 1.75, 2.0])\n    fixed_leg_daycount_fractions = 0.25 * np.ones_like(fixed_leg_payment_times)\n    fixed_leg_coupon = 0.011 * np.ones_like(fixed_leg_payment_times)\n    zero_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)\n    mu = 0.03\n    vol1 = 0.02\n    vol2 = 0.01\n    eff_vol = np.sqrt(vol1 ** 2 + vol2 ** 2)\n    hjm_price = tff.models.hjm.swaption_price(expiries=expiries, fixed_leg_payment_times=fixed_leg_payment_times, fixed_leg_daycount_fractions=fixed_leg_daycount_fractions, fixed_leg_coupon=fixed_leg_coupon, reference_rate_fn=zero_rate_fn, notional=100.0, num_hjm_factors=2, mean_reversion=[mu, mu], volatility=[vol1, vol2], num_samples=25000, valuation_method=valuation_method, time_step_finite_difference=0.05, num_grid_points_finite_difference=251, time_step=0.1, random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC, seed=[1, 2], dtype=dtype)\n    hjm_price = self.evaluate(hjm_price)\n    hw_price = tff.models.hull_white.swaption_price(expiries=expiries, floating_leg_start_times=[0], floating_leg_end_times=[0], floating_leg_daycount_fractions=[0], fixed_leg_payment_times=fixed_leg_payment_times, fixed_leg_daycount_fractions=fixed_leg_daycount_fractions, fixed_leg_coupon=fixed_leg_coupon, reference_rate_fn=zero_rate_fn, notional=100.0, mean_reversion=[mu], volatility=[eff_vol], use_analytic_pricing=True, dtype=dtype)\n    hw_price = self.evaluate(hw_price)\n    self.assertNear(hjm_price, hw_price, error_tol)", "docstring": "Test that under certain conditions HJM matches analytic HW results.\n\nArgs:\nvaluation_method: The valuation method used.\nerror_tol: Test error tolerance.\n\nFor the two factor model, when both mean reversions are equivalent, then\nthe HJM model matches that of a HW one-factor model with the same mean\nreversion, and effective volatility:\neff_vol = sqrt(vol1^2 + vol2^2 + 2 rho(vol1 * vol2)\nwhere rho is the cross correlation between the two factors. In this\nspecific test, we assume rho = 0.0.", "source": "github-repos"}
{"code": "def get_program_by_title(self, program_title):\n        \n        all_programs = self._load_data(self.PROGRAMS_ENDPOINT, default=[])\n        matching_programs = [program for program in all_programs if program.get('title') == program_title]\n        if len(matching_programs) > 1:\n            raise MultipleProgramMatchError(len(matching_programs))\n        elif len(matching_programs) == 1:\n            return matching_programs[0]\n        else:\n            return None", "docstring": "Return single program by name, or None if not found.\n\nArguments:\nprogram_title(string): Program title as seen by students and in Course Catalog Admin\n\nReturns:\ndict: Program data provided by Course Catalog API", "source": "juraj-google-style"}
{"code": "def log(self, level, msg, *args, **kwargs):\n    \n    if level >= logging.FATAL:\n      \n      \n      \n      extra = kwargs.setdefault('extra', {})\n      extra[_ABSL_LOG_FATAL] = True\n    super(ABSLLogger, self).log(level, msg, *args, **kwargs)", "docstring": "Logs a message at a cetain level substituting in the supplied arguments.\n\nThis method behaves differently in python and c++ modes.\n\nArgs:\nlevel: int, the standard logging level at which to log the message.\nmsg: str, the text of the message to log.\n*args: The arguments to substitute in the message.\n**kwargs: The keyword arguments to substitute in the message.", "source": "juraj-google-style"}
{"code": "def inverse(self, name: str='inverse') -> 'LinearOperator':\n    if self.is_square is False:\n        raise ValueError('Cannot take the Inverse: This operator represents a non square matrix.')\n    if self.is_non_singular is False:\n        raise ValueError('Cannot take the Inverse: This operator represents a singular matrix.')\n    with self._name_scope(name):\n        return self._linop_inverse()", "docstring": "Returns the Inverse of this `LinearOperator`.\n\nGiven `A` representing this `LinearOperator`, return a `LinearOperator`\nrepresenting `A^-1`.\n\nArgs:\nname: A name scope to use for ops added by this method.\n\nReturns:\n`LinearOperator` representing inverse of this matrix.\n\nRaises:\nValueError: When the `LinearOperator` is not hinted to be `non_singular`.", "source": "github-repos"}
{"code": "def experimental_from_jax(cls, serving_funcs, inputs):\n    TFLiteConverterBase._set_original_model_type(conversion_metadata_fb.ModelType.JAX)\n    return TFLiteJaxConverterV2(serving_funcs, inputs)", "docstring": "Creates a TFLiteConverter object from a Jax model with its inputs.\n\nArgs:\nserving_funcs: An array of Jax functions with all the weights applied\nalready.\ninputs: An array of Jax input placeholders tuples list, e.g.,\njnp.zeros(INPUT_SHAPE). Each tuple list should correspond with the\nserving function.\n\nReturns:\nTFLiteConverter object.", "source": "github-repos"}
{"code": "def concat_video(video_list, out_file, vcodec=None, acodec=None, log_level='info', print_cmd=False, **kwargs):\n    (_, tmp_filename) = tempfile.mkstemp(suffix='.txt', text=True)\n    with open(tmp_filename, 'w') as f:\n        for filename in video_list:\n            f.write('file {}\\n'.format(osp.abspath(filename)))\n    options = {'log_level': log_level}\n    if (vcodec is None):\n        options['vcodec'] = 'copy'\n    if (acodec is None):\n        options['acodec'] = 'copy'\n    convert_video(tmp_filename, out_file, print_cmd, pre_options='-f concat -safe 0', **options)\n    os.remove(tmp_filename)", "docstring": "Concatenate multiple videos into a single one.\n\nArgs:\nvideo_list (list): A list of video filenames\nout_file (str): Output video filename\nvcodec (None or str): Output video codec, None for unchanged\nacodec (None or str): Output audio codec, None for unchanged\nlog_level (str): Logging level of ffmpeg.\nprint_cmd (bool): Whether to print the final ffmpeg command.", "source": "codesearchnet"}
{"code": "def _CheckPythonModuleVersion(\n      self, module_name, module_object, version_property, minimum_version,\n      maximum_version):\n    \n    module_version = None\n    if not version_property.endswith('()'):\n      module_version = getattr(module_object, version_property, None)\n    else:\n      version_method = getattr(\n          module_object, version_property[:-2], None)\n      if version_method:\n        module_version = version_method()\n\n    if not module_version:\n      status_message = (\n          'unable to determine version information for: {0:s}').format(\n              module_name)\n      return False, status_message\n\n    \n    module_version = '{0!s}'.format(module_version)\n\n    \n    \n\n    \n    module_version = self._VERSION_NUMBERS_REGEX.findall(module_version)[0]\n\n    if module_version[-1] == '.':\n      module_version = module_version[:-1]\n\n    try:\n      module_version_map = list(\n          map(int, self._VERSION_SPLIT_REGEX.split(module_version)))\n    except ValueError:\n      status_message = 'unable to parse module version: {0:s} {1:s}'.format(\n          module_name, module_version)\n      return False, status_message\n\n    if minimum_version:\n      try:\n        minimum_version_map = list(\n            map(int, self._VERSION_SPLIT_REGEX.split(minimum_version)))\n      except ValueError:\n        status_message = 'unable to parse minimum version: {0:s} {1:s}'.format(\n            module_name, minimum_version)\n        return False, status_message\n\n      if module_version_map < minimum_version_map:\n        status_message = (\n            '{0:s} version: {1!s} is too old, {2!s} or later required').format(\n                module_name, module_version, minimum_version)\n        return False, status_message\n\n    if maximum_version:\n      try:\n        maximum_version_map = list(\n            map(int, self._VERSION_SPLIT_REGEX.split(maximum_version)))\n      except ValueError:\n        status_message = 'unable to parse maximum version: {0:s} {1:s}'.format(\n            module_name, maximum_version)\n        return False, status_message\n\n      if module_version_map > maximum_version_map:\n        status_message = (\n            '{0:s} version: {1!s} is too recent, {2!s} or earlier '\n            'required').format(module_name, module_version, maximum_version)\n        return False, status_message\n\n    status_message = '{0:s} version: {1!s}'.format(module_name, module_version)\n    return True, status_message", "docstring": "Checks the version of a Python module.\n\nArgs:\nmodule_object (module): Python module.\nmodule_name (str): name of the Python module.\nversion_property (str): version attribute or function.\nminimum_version (str): minimum version.\nmaximum_version (str): maximum version.\n\nReturns:\ntuple: consists:\n\nbool: True if the Python module is available and conforms to\nthe minimum required version, False otherwise.\nstr: status message.", "source": "juraj-google-style"}
{"code": "def get_ast_dict(belstr, component_type: str=''):\n    errors = []\n    parsed = {}\n    bels = list(belstr)\n    (char_locs, errors) = parse_chars(bels, errors)\n    (parsed, errors) = parse_functions(belstr, char_locs, parsed, errors)\n    (parsed, errors) = parse_args(bels, char_locs, parsed, errors)\n    (parsed, errors) = arg_types(parsed, errors)\n    (parsed, errors) = parse_relations(belstr, char_locs, parsed, errors)\n    (parsed, errors) = parse_nested(bels, char_locs, parsed, errors)\n    errors = parsed_top_level_errors(parsed, errors)\n    (ast, errors) = parsed_to_ast(parsed, errors, component_type=component_type)\n    return (ast, errors)", "docstring": "Convert BEL string to AST dictionary\n\nArgs:\nbelstr: BEL string\ncomponent_type: Empty string or 'subject' or 'object' to indicate that we\nare parsing the subject or object field input", "source": "codesearchnet"}
{"code": "def query_orders(self, accounts, status='filled'):\n        \n        try:\n            data = self.call(\"orders\", {'client': accounts, 'status': status})\n\n            if data is not None:\n                orders = data.get('dataTable', False)\n\n                order_headers = orders['columns']\n                if ('成交状态' in order_headers\n                        or '状态说明' in order_headers) and ('备注' in order_headers):\n                    order_headers[order_headers.index('备注')] = '废弃'\n\n                order_headers = [cn_en_compare[item] for item in order_headers]\n                order_all = pd.DataFrame(\n                    orders['rows'],\n                    columns=order_headers\n                ).assign(account_cookie=accounts)\n\n                order_all.towards = order_all.towards.apply(\n                    lambda x: trade_towards_cn_en[x]\n                )\n                if 'order_time' in order_headers:\n                    \n                    order_all['status'] = order_all.status.apply(\n                        lambda x: order_status_cn_en[x]\n                    )\n                    if 'order_date' not in order_headers:\n                        order_all.order_time = order_all.order_time.apply(\n                            lambda x: QA_util_get_order_datetime(\n                                dt='{} {}'.format(datetime.date.today(),\n                                                  x)\n                            )\n                        )\n                    else:\n                        order_all = order_all.assign(\n                            order_time=order_all.order_date\n                            .apply(QA_util_date_int2str) + ' ' +\n                            order_all.order_time\n                        )\n\n                if 'trade_time' in order_headers:\n\n                    order_all.trade_time = order_all.trade_time.apply(\n                        lambda x: '{} {}'.format(datetime.date.today(),\n                                                 x)\n                    )\n\n                if status is 'filled':\n                    return order_all.loc[:,\n                                         self.dealstatus_headers].set_index(\n                                             ['account_cookie',\n                                              'realorder_id']\n                    ).sort_index()\n                else:\n                    return order_all.loc[:,\n                                         self.orderstatus_headers].set_index(\n                                             ['account_cookie',\n                                              'realorder_id']\n                    ).sort_index()\n            else:\n                print('response is None')\n                return False\n        except Exception as e:\n            print(e)\n            return False", "docstring": "查询订单\n\nArguments:\naccounts {[type]} -- [description]\n\nKeyword Arguments:\nstatus {str} -- 'open' 待成交 'filled' 成交 (default: {'filled'})\n\nReturns:\n[type] -- [description]", "source": "juraj-google-style"}
{"code": "def _copy_source(s, graph, op_map, handle_captures, inverse_captures, base_graph):\n    if handle_captures and s in inverse_captures:\n        copied_placeholder = graph.capture(inverse_captures[s], name=s.op.name)\n    elif s.op.type == 'PlaceholderWithDefault' and _constant_inputs(s):\n        default_value = s.op.inputs[0]\n        unavailable_inputs, unavailable_control_inputs = _copy_non_source(op=default_value.op, graph=graph, op_map=op_map, base_graph=base_graph)\n        if unavailable_inputs or unavailable_control_inputs:\n            raise AssertionError('Could not copy source node {} because it has inputs.'.format(default_value))\n        with ops.device(s.op.device):\n            copied_placeholder = array_ops.placeholder_with_default(input=op_map[default_value], shape=s.shape, name=s.op.name)\n    else:\n        with ops.device(s.op.device):\n            copied_placeholder = array_ops.placeholder(dtype=s.dtype, shape=s.shape, name=s.op.name)\n    base_handle = resource_variable_ops.get_resource_handle_data(s)\n    if base_handle.shape_and_type:\n        resource_variable_ops._set_handle_shapes_and_types(copied_placeholder, base_handle, graph_mode=True)\n    op_map[s] = copied_placeholder\n    op_map[s.op] = copied_placeholder.op", "docstring": "Create a source in a graph based on a Tensor from a different graph.\n\nThis function creates a placeholder analog of `s` in a graph with the\nfollowing behavior:\n\n1) If s is a captured Tensor or Variable and handle_captures is set to True,\nsimply capture it in the new graph as well.\n\n2) If s is a PlaceholderWithDefault whose default is a constant, preserve\nsaid default in the new graph.\n\n3) When applicable, copy resource variable metadata from `s` to the newly\ncreated placeholder.\n\nArgs:\ns: The source of interest.\ngraph: The destination graph.\nop_map: A dict mapping ops and tensors in the old graph to the new one.\nhandle_captures: A boolean indicating whether to re-capture s in the new\ngraph or simply create a vanilla placeholder.\ninverse_captures: A dict mapping s back to the Tensor or Variable that it\ncaptures.\nbase_graph: The graph being copied from.", "source": "github-repos"}
{"code": "def setExtension(self, ext):\n        \n        if ext[0] != \".\":\n            ext = \".\" + ext\n        self._ext = utils.asString(ext)", "docstring": "Set a new file extension for the sequence.\n\nNote:\nA leading period will be added if none is provided.\n\nArgs:\next (str): the new file extension", "source": "juraj-google-style"}
{"code": "def center_label(self, input_length, order):\n        \n        location_in_the_box = '*'.center(input_length * 2 - 1).index('*') + 1\n        top_limit = order * 2 + 2\n        bot_limit = top_limit + 2\n        if top_limit <= location_in_the_box < bot_limit:\n            if location_in_the_box == top_limit:\n                self.top_connect = self.label\n            elif location_in_the_box == top_limit + 1:\n                self.mid_content = self.label\n            else:\n                self.bot_connect = self.label", "docstring": "In multi-bit elements, the label is centered vertically.\nArgs:\ninput_length (int): Rhe amount of wires affected.\norder (int): Which middle element is this one?", "source": "juraj-google-style"}
{"code": "def patch_addPadding(self, patches):\n    paddingLength = self.Patch_Margin\n    nullPadding = ''\n    for x in range(1, (paddingLength + 1)):\n        nullPadding += chr(x)\n    for patch in patches:\n        patch.start1 += paddingLength\n        patch.start2 += paddingLength\n    patch = patches[0]\n    diffs = patch.diffs\n    if ((not diffs) or (diffs[0][0] != self.DIFF_EQUAL)):\n        diffs.insert(0, (self.DIFF_EQUAL, nullPadding))\n        patch.start1 -= paddingLength\n        patch.start2 -= paddingLength\n        patch.length1 += paddingLength\n        patch.length2 += paddingLength\n    elif (paddingLength > len(diffs[0][1])):\n        extraLength = (paddingLength - len(diffs[0][1]))\n        newText = (nullPadding[len(diffs[0][1]):] + diffs[0][1])\n        diffs[0] = (diffs[0][0], newText)\n        patch.start1 -= extraLength\n        patch.start2 -= extraLength\n        patch.length1 += extraLength\n        patch.length2 += extraLength\n    patch = patches[(- 1)]\n    diffs = patch.diffs\n    if ((not diffs) or (diffs[(- 1)][0] != self.DIFF_EQUAL)):\n        diffs.append((self.DIFF_EQUAL, nullPadding))\n        patch.length1 += paddingLength\n        patch.length2 += paddingLength\n    elif (paddingLength > len(diffs[(- 1)][1])):\n        extraLength = (paddingLength - len(diffs[(- 1)][1]))\n        newText = (diffs[(- 1)][1] + nullPadding[:extraLength])\n        diffs[(- 1)] = (diffs[(- 1)][0], newText)\n        patch.length1 += extraLength\n        patch.length2 += extraLength\n    return nullPadding", "docstring": "Add some padding on text start and end so that edges can match\nsomething.  Intended to be called only from within patch_apply.\n\nArgs:\npatches: Array of Patch objects.\n\nReturns:\nThe padding string added to each side.", "source": "codesearchnet"}
{"code": "def download_report_hook(count, block_size, total_size):\n  \n  percent = int(count * block_size * 100 / total_size)\n  print(\"\\r%d%%\" % percent + \" completed\", end=\"\\r\")", "docstring": "Report hook for download progress.\n\nArgs:\ncount: current block number\nblock_size: block size\ntotal_size: total size", "source": "juraj-google-style"}
{"code": "def get_counters(counter_list):\n    if (not isinstance(counter_list, list)):\n        raise CommandExecutionError('counter_list must be a list of tuples')\n    try:\n        query = win32pdh.OpenQuery()\n        counters = build_counter_list(counter_list)\n        for counter in counters:\n            counter.add_to_query(query)\n        win32pdh.CollectQueryData(query)\n        time.sleep(1)\n        win32pdh.CollectQueryData(query)\n        ret = {}\n        for counter in counters:\n            try:\n                ret.update({counter.path: counter.value()})\n            except pywintypes.error as exc:\n                if (exc.strerror == 'No data to return.'):\n                    continue\n                else:\n                    raise\n    finally:\n        win32pdh.CloseQuery(query)\n    return ret", "docstring": "Get the values for the passes list of counters\n\nArgs:\ncounter_list (list):\nA list of counters to lookup\n\nReturns:\ndict: A dictionary of counters and their values", "source": "codesearchnet"}
{"code": "def get_apps_to_backup(self):\n    app_db = appsdb.ApplicationsDatabase()\n    apps_to_backup = (self._config.apps_to_sync or app_db.get_app_names())\n    for app_name in self._config.apps_to_ignore:\n        apps_to_backup.discard(app_name)\n    return apps_to_backup", "docstring": "Get the list of applications that should be backed up by Mackup.\n\nIt's the list of allowed apps minus the list of ignored apps.\n\nReturns:\n(set) List of application names to back up", "source": "codesearchnet"}
{"code": "def split_into_batches(input_list, batch_size, batch_storage_dir, checkpoint=False):\n    if (checkpoint and (not os.path.exists(batch_storage_dir))):\n        os.mkdir(batch_storage_dir)\n    batches = [{'index': batch_index, 'data': input_list[start_index:(start_index + batch_size)], 'input_filename': os.path.join(batch_storage_dir, 'batch-{:05d}-input.pickle'.format(batch_index)), 'result_filename': os.path.join(batch_storage_dir, 'batch-{:05d}-output.pickle'.format(batch_index))} for (batch_index, start_index) in enumerate(range(0, len(input_list), batch_size))]\n    if checkpoint:\n        for batch in batches:\n            save(batch['data'], batch['input_filename'])\n    return batches", "docstring": "Break the input data into smaller batches, optionally saving each one to disk.\n\nArgs:\ninput_list: An input object that has a list-like interface (indexing and slicing).\nbatch_size: The maximum number of input items in each batch.\nbatch_storage_dir: The directory to save the checkpoints to.\ncheckpoint: Whether to save each batch to a file.\n\nReturns:\nA list of batch objects with the following structure:\n{'index', 'data', 'input_filename', 'result_filename'}", "source": "codesearchnet"}
{"code": "def structure_from_ncdata(ncdata, site_properties=None, cls=Structure):\n    \n    ncdata, closeit = as_ncreader(ncdata)\n\n    \n    lattice = ArrayWithUnit(ncdata.read_value(\"primitive_vectors\"), \"bohr\").to(\"ang\")\n\n    red_coords = ncdata.read_value(\"reduced_atom_positions\")\n    natom = len(red_coords)\n\n    znucl_type = ncdata.read_value(\"atomic_numbers\")\n\n    \n    type_atom = ncdata.read_value(\"atom_species\")\n\n    \n    species = natom * [None]\n    for atom in range(natom):\n        type_idx = type_atom[atom] - 1\n        species[atom] = int(znucl_type[type_idx])\n\n    d = {}\n    if site_properties is not None:\n        for prop in site_properties:\n            d[property] = ncdata.read_value(prop)\n\n    structure = cls(lattice, species, red_coords, site_properties=d)\n\n    \n    \n    try:\n        from abipy.core.structure import Structure as AbipyStructure\n        structure.__class__ = AbipyStructure\n    except ImportError:\n        pass\n\n    if closeit:\n        ncdata.close()\n\n    return structure", "docstring": "Reads and returns a pymatgen structure from a NetCDF file\ncontaining crystallographic data in the ETSF-IO format.\n\nArgs:\nncdata: filename or NetcdfReader instance.\nsite_properties: Dictionary with site properties.\ncls: The Structure class to instanciate.", "source": "juraj-google-style"}
{"code": "def __init__(self, default: typing.Optional[str]=MISSING_VALUE, regex: typing.Optional[str]=None, is_noneable: bool=False, frozen: bool=False):\n    self._regex = re.compile(regex) if regex else None\n    super().__init__(str, default, is_noneable=is_noneable, frozen=frozen)", "docstring": "Constructor.\n\nArgs:\ndefault: Default value for this value spec.\nregex: Optional regular expression for acceptable value.\nis_noneable: If True, None is acceptable.\nfrozen: If True, values other than the default value is not accceptable.", "source": "github-repos"}
{"code": "def _read(cls, filepath_or_buffer, **kwargs):\n        \n        \n        \n        \n        \n        \n        \n        try:\n            args, _, _, defaults, _, _, _ = inspect.getfullargspec(cls.read_csv)\n            defaults = dict(zip(args[2:], defaults))\n            filtered_kwargs = {\n                kw: kwargs[kw]\n                for kw in kwargs\n                if kw in defaults\n                and not isinstance(kwargs[kw], type(defaults[kw]))\n                or kwargs[kw] != defaults[kw]\n            }\n        \n        except AttributeError:\n            filtered_kwargs = kwargs\n\n        if isinstance(filepath_or_buffer, str):\n            if not file_exists(filepath_or_buffer):\n                ErrorMessage.default_to_pandas(\"File path could not be resolved\")\n                return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs)\n        elif not isinstance(filepath_or_buffer, py.path.local):\n            read_from_pandas = True\n            \n            try:\n                import pathlib\n\n                if isinstance(filepath_or_buffer, pathlib.Path):\n                    read_from_pandas = False\n            except ImportError:  \n                pass\n            if read_from_pandas:\n                ErrorMessage.default_to_pandas(\"Reading from buffer.\")\n                return cls._read_csv_from_pandas(filepath_or_buffer, kwargs)\n        if (\n            _infer_compression(filepath_or_buffer, kwargs.get(\"compression\"))\n            is not None\n        ):\n            ErrorMessage.default_to_pandas(\"Compression detected.\")\n            return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs)\n\n        chunksize = kwargs.get(\"chunksize\")\n        if chunksize is not None:\n            ErrorMessage.default_to_pandas(\"Reading chunks from a file.\")\n            return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs)\n\n        skiprows = kwargs.get(\"skiprows\")\n        if skiprows is not None and not isinstance(skiprows, int):\n            ErrorMessage.default_to_pandas(\"skiprows parameter not optimized yet.\")\n            return cls._read_csv_from_pandas(filepath_or_buffer, kwargs)\n        \n        if kwargs.get(\"nrows\") is not None:\n            ErrorMessage.default_to_pandas(\"`read_csv` with `nrows`\")\n            return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs)\n        else:\n            return cls._read_csv_from_file_pandas_on_ray(\n                filepath_or_buffer, filtered_kwargs\n            )", "docstring": "Read csv file from local disk.\nArgs:\nfilepath_or_buffer:\nThe filepath of the csv file.\nWe only support local files for now.\nkwargs: Keyword arguments in pandas.read_csv", "source": "juraj-google-style"}
{"code": "def compression_type(self):\n    best_compression = None\n    for e in self.mardata.index.entries:\n        self.fileobj.seek(e.offset)\n        magic = self.fileobj.read(10)\n        compression = guess_compression(magic)\n        if (compression == 'xz'):\n            best_compression = 'xz'\n            break\n        elif ((compression == 'bz2') and (best_compression is None)):\n            best_compression = 'bz2'\n    return best_compression", "docstring": "Return the latest compresion type used in this MAR.\n\nReturns:\nOne of None, 'bz2', or 'xz'", "source": "codesearchnet"}
{"code": "def unpack(value):\n    if not is_packed(value):\n        return value\n    variant = value._tf_extension_type_packed_variant\n    spec = value._tf_extension_type_cached_type_spec\n    spec = spec._tf_extension_type_with_packed(False)\n    return composite_tensor_ops.composite_tensor_from_variant(variant, spec)", "docstring": "Returns a copy of `value` with individual fields stored in __dict__.\n\nArgs:\nvalue: An `ExtensionType` object.\n\nReturns:\nAn `ExtensionType` object.", "source": "github-repos"}
{"code": "def load_data_split(proc_data_dir):\n    \n    ds_train = Dataset.load(path.join(proc_data_dir, 'train.bin'))\n    ds_val = Dataset.load(path.join(proc_data_dir, 'val.bin'))\n    ds_test = Dataset.load(path.join(proc_data_dir, 'test.bin'))\n    return ds_train, ds_val, ds_test", "docstring": "Loads a split dataset\n\nArgs:\nproc_data_dir: Directory with the split and processed data\n\nReturns:\n(Training Data, Validation Data, Test Data)", "source": "juraj-google-style"}
{"code": "def dagify_min_edge(g):\n    \n    while not nx.is_directed_acyclic_graph(g):\n        cycle = next(nx.simple_cycles(g))\n        scores = []\n        edges = []\n        for i, j in zip(cycle[:1], cycle[:1]):\n            edges.append((i, j))\n            scores.append(g[i][j]['weight'])\n\n        i, j = edges[scores.index(min(scores))]\n        gc = deepcopy(g)\n        gc.remove_edge(i, j)\n        gc.add_edge(j, i)\n\n        if len(list(nx.simple_cycles(gc))) < len(list(nx.simple_cycles(g))):\n            g.add_edge(j, i, weight=min(scores))\n        g.remove_edge(i, j)\n    return g", "docstring": "Input a graph and output a DAG.\n\nThe heuristic is to reverse the edge with the lowest score of the cycle\nif possible, else remove it.\n\nArgs:\ng (networkx.DiGraph): Graph to modify to output a DAG\n\nReturns:\nnetworkx.DiGraph: DAG made out of the input graph.", "source": "juraj-google-style"}
{"code": "def _CreateFlagItem(flag, docstring_info, spec, required=False, flag_string=None, short_arg=False):\n    max_str_length = LINE_LENGTH - SECTION_INDENTATION - SUBSECTION_INDENTATION\n    description = _GetArgDescription(flag, docstring_info)\n    if not flag_string:\n        flag_name_upper = formatting.Underline(flag.upper())\n        flag_string = f'--{flag}={flag_name_upper}'\n    if required:\n        flag_string += ' (required)'\n    if short_arg:\n        short_flag = flag[0]\n        flag_string = f'-{short_flag}, {flag_string}'\n    arg_type = _GetArgType(flag, spec)\n    arg_default = _GetArgDefault(flag, spec)\n    if arg_default == 'None':\n        arg_type = f'Optional[{arg_type}]'\n    arg_type = f'Type: {arg_type}' if arg_type else ''\n    available_space = max_str_length - len(arg_type)\n    arg_type = formatting.EllipsisTruncate(arg_type, available_space, max_str_length)\n    arg_default = f'Default: {arg_default}' if arg_default else ''\n    available_space = max_str_length - len(arg_default)\n    arg_default = formatting.EllipsisTruncate(arg_default, available_space, max_str_length)\n    description = '\\n'.join((part for part in (arg_type, arg_default, description) if part))\n    return _CreateItem(flag_string, description, indent=SUBSECTION_INDENTATION)", "docstring": "Returns a string describing a flag using docstring and FullArgSpec info.\n\nArgs:\nflag: The name of the flag.\ndocstring_info: A docstrings.DocstringInfo namedtuple with information about\nthe containing function's docstring.\nspec: An instance of fire.inspectutils.FullArgSpec, containing type and\ndefault information about the arguments to a callable.\nrequired: Whether the flag is required.\nflag_string: If provided, use this string for the flag, rather than\nconstructing one from the flag name.\nshort_arg: Whether the flag has a short variation or not.\nReturns:\nA string to be used in constructing the help screen for the function.", "source": "github-repos"}
{"code": "def read(self, vals):\n        \n        i = 0\n        if len(vals[i]) == 0:\n            self.comments_2 = None\n        else:\n            self.comments_2 = vals[i]\n        i += 1", "docstring": "Read values.\n\nArgs:\nvals (list): list of strings representing values", "source": "juraj-google-style"}
{"code": "def __init__(self, order_dict, default_order=None):\n        \n        self.order_dict = order_dict.copy()\n        self.default_order = default_order", "docstring": "Create a reorderer.\n\nArgs:\norder_dict (dict of (str, `PackageOrder`): Orderers to apply to\neach package family.\ndefault_order (`PackageOrder`): Orderer to apply to any packages\nnot specified in `order_dict`.", "source": "juraj-google-style"}
{"code": "def find_exception_by_code(code):\n    \n    errorName = None\n    for error in WebDriverError:\n        if error.value.code == code:\n            errorName = error\n            break\n    return errorName", "docstring": "Find name of exception by WebDriver defined error code.\n\nArgs:\ncode(str): Error code defined in protocol.\n\nReturns:\nThe error name defined in protocol.", "source": "juraj-google-style"}
{"code": "def get_property_dict(entity_proto):\n  \n  return dict((p.key, p.value) for p in entity_proto.property)", "docstring": "Convert datastore.Entity to a dict of property name -> datastore.Value.\n\nArgs:\nentity_proto: datastore.Entity proto message.\n\nUsage:\n>>> get_property_dict(entity_proto)\n{'foo': {string_value='a'}, 'bar': {integer_value=2}}\n\nReturns:\ndict of entity properties.", "source": "juraj-google-style"}
{"code": "def merge_annotations(code, annotations, param_annotations):\n    if param_annotations:\n        visitor = FunctionDefVisitor(param_annotations)\n        pyc.visit(code, visitor)\n    visitor = CollectAnnotationTargetsVisitor()\n    code = pyc.visit(code, visitor)\n    for line, op in visitor.store_ops.items():\n        if line in annotations:\n            annot = annotations[line]\n            if annot.name in (None, op.argval):\n                op.annotation = annot.annotation\n    for start, (end, op) in sorted(visitor.make_function_ops.items(), reverse=True):\n        for i in range(start, end):\n            if i in annotations:\n                op.annotation = (annotations[i].annotation, i)\n                break\n    return code", "docstring": "Merges type comments into their associated opcodes.\n\nModifies code in place.\n\nArgs:\ncode: An OrderedCode object.\nannotations: A map of lines to annotations.\nparam_annotations: A list of _ParamAnnotations from the director\n\nReturns:\nThe code with annotations added to the relevant opcodes.", "source": "github-repos"}
{"code": "def convert(model_path: str, out_file: str):\n    \n    print('Converting', model_path, 'to', out_file, '...')\n\n    import tensorflow as tf\n    from precise.model import load_precise_model\n    from keras import backend as K\n\n    out_dir, filename = split(out_file)\n    out_dir = out_dir or '.'\n    os.makedirs(out_dir, exist_ok=True)\n\n    K.set_learning_phase(0)\n    model = load_precise_model(model_path)\n\n    out_name = 'net_output'\n    tf.identity(model.output, name=out_name)\n    print('Output node name:', out_name)\n    print('Output folder:', out_dir)\n\n    sess = K.get_session()\n\n    \n    tf.train.write_graph(sess.graph.as_graph_def(), out_dir, filename + 'txt', as_text=True)\n    print('Saved readable graph to:', filename + 'txt')\n\n    \n    from tensorflow.python.framework import graph_util\n    from tensorflow.python.framework import graph_io\n\n    cgraph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), [out_name])\n    graph_io.write_graph(cgraph, out_dir, filename, as_text=False)\n\n    if isfile(model_path + '.params'):\n        copyfile(model_path + '.params', out_file + '.params')\n\n    print('Saved graph to:', filename)\n\n    del sess", "docstring": "Converts an HD5F file from Keras to a .pb for use with TensorFlow\n\nArgs:\nmodel_path: location of Keras model\nout_file: location to write protobuf", "source": "juraj-google-style"}
{"code": "def blit_2x(self, console: tcod.console.Console, dest_x: int, dest_y: int, img_x: int=0, img_y: int=0, img_width: int=(- 1), img_height: int=(- 1)) -> None:\n    lib.TCOD_image_blit_2x(self.image_c, _console(console), dest_x, dest_y, img_x, img_y, img_width, img_height)", "docstring": "Blit onto a Console with double resolution.\n\nArgs:\nconsole (Console): Blit destination Console.\ndest_x (int): Console tile X position starting from the left at 0.\ndest_y (int): Console tile Y position starting from the top at 0.\nimg_x (int): Left corner pixel of the Image to blit\nimg_y (int): Top corner pixel of the Image to blit\nimg_width (int): Width of the Image to blit.\nUse -1 for the full Image width.\nimg_height (int): Height of the Image to blit.\nUse -1 for the full Image height.", "source": "codesearchnet"}
{"code": "def create_log(log_file, uid):\n    \n    if not os.path.exists(log_file):  \n        dir_name = os.path.dirname(log_file)\n        if not os.path.exists(dir_name):\n            os.makedirs(dir_name, 0755)\n            os.chown(dir_name, uid, -1)\n\n        with open(log_file, \"w\") as f:\n            f.write(\"\")\n\n    os.chown(log_file, uid, -1)\n    os.chmod(log_file, 0640)", "docstring": "Create log file and set necessary permissions.\n\nArgs:\nlog_file (str): Path to the log file.\nuid (int): User ID - will be used for chown.", "source": "juraj-google-style"}
{"code": "def update(self, data):\n        \n        updated = False\n        if 'missing_tags' in data:\n            updated |= self.set_property('missing_tags', data['missing_tags'])\n\n        if 'notes' in data:\n            updated |= self.set_property('notes', data['notes'])\n\n        if 'state' in data:\n            updated |= self.set_property('state', data['state'])\n\n        if 'last_alert' in data:\n            updated |= self.set_property('last_alert', data['last_alert'])\n\n        if updated:\n            now = datetime.now()\n            self.set_property('last_change', now)\n\n        return updated", "docstring": "Updates the object information based on live data, if there were any changes made. Any changes will be\nautomatically applied to the object, but will not be automatically persisted. You must manually call\n`db.session.add(instance)` on the object.\n\nArgs:\ndata (:obj:): AWS API Resource object fetched from AWS API\n\nReturns:\n`bool`", "source": "juraj-google-style"}
{"code": "def get_lattice_type(number):\n    f = (lambda i, j: (i <= number <= j))\n    cs = {'triclinic': (1, 2), 'monoclinic': (3, 15), 'orthorhombic': (16, 74), 'tetragonal': (75, 142), 'trigonal': (143, 167), 'hexagonal': (168, 194), 'cubic': (195, 230)}\n    crystal_system = None\n    for (k, v) in cs.items():\n        if f(*v):\n            crystal_system = k\n            break\n    if (number in [146, 148, 155, 160, 161, 166, 167]):\n        return 'rhombohedral'\n    elif (crystal_system == 'trigonal'):\n        return 'hexagonal'\n    else:\n        return crystal_system", "docstring": "Return the lattice crystal system.\n\nHexagonal cells are differentiated into rhombohedral and hexagonal\nlattices.\n\nArgs:\nnumber (int): The international space group number.\n\nReturns:\nstr: The lattice crystal system.", "source": "codesearchnet"}
{"code": "def update_z(self, z, indices=None):\n    z = _make_np_bool(z)\n    if (indices is None):\n        if (len(self._z) != len(z)):\n            raise QiskitError('During updating whole z, you can not change the number of qubits.')\n        self._z = z\n    else:\n        if ((not isinstance(indices, list)) and (not isinstance(indices, np.ndarray))):\n            indices = [indices]\n        for (p, idx) in enumerate(indices):\n            self._z[idx] = z[p]\n    return self", "docstring": "Update partial or entire z.\n\nArgs:\nz (numpy.ndarray or list): to-be-updated z\nindices (numpy.ndarray or list or optional): to-be-updated qubit indices\n\nReturns:\nPauli: self\n\nRaises:\nQiskitError: when updating whole z, the number of qubits must be the same.", "source": "codesearchnet"}
{"code": "def run_iperf_client(self, server_host, extra_args=''):\n    out = self.adb.shell(('iperf3 -c %s %s' % (server_host, extra_args)))\n    clean_out = new_str(out, 'utf-8').strip().split('\\n')\n    if ('error' in clean_out[0].lower()):\n        return (False, clean_out)\n    return (True, clean_out)", "docstring": "Start iperf client on the device.\n\nReturn status as true if iperf client start successfully.\nAnd data flow information as results.\n\nArgs:\nserver_host: Address of the iperf server.\nextra_args: A string representing extra arguments for iperf client,\ne.g. '-i 1 -t 30'.\n\nReturns:\nstatus: true if iperf client start successfully.\nresults: results have data flow information", "source": "codesearchnet"}
{"code": "def downloadMARCXML(doc_id, library, base='nkc'):\n    downer = Downloader()\n    data = downer.download((ALEPH_URL + Template(DOC_URL_TEMPLATE).substitute(DOC_ID=doc_id, LIBRARY=library)))\n    dom = dhtmlparser.parseString(data)\n    error = dom.find('login')\n    if error:\n        error_msg = error[0].find('error')\n        if error_msg:\n            raise LibraryNotFoundException(((((((\"Can't download document doc_id: '\" + str(doc_id)) + \"' \") + \"(probably bad library: '\") + library) + \"')!\\nMessage: \") + '\\n'.join(map((lambda x: x.getContent()), error_msg))))\n    error = dom.find('ill-get-doc')\n    if error:\n        error_msg = error[0].find('error')\n        if error_msg:\n            raise DocumentNotFoundException('\\n'.join(map((lambda x: x.getContent()), error_msg)))\n    return data", "docstring": "Download MARC XML document with given `doc_id` from given `library`.\n\nArgs:\ndoc_id (DocumentID): You will get this from :func:`getDocumentIDs`.\nlibrary (str): \"``NKC01``\" in our case, but don't worry,\n:func:`getDocumentIDs` adds library specification into\n:class:`DocumentID` named tuple.\n\nReturns:\nstr: MARC XML unicode string.\n\nRaises:\nLibraryNotFoundException\nDocumentNotFoundException", "source": "codesearchnet"}
{"code": "def _load_from_file(path):\n    config = []\n    try:\n        with open(path, 'r') as config_file:\n            config = yaml.load(config_file)['normalizations']\n    except EnvironmentError as e:\n        raise ConfigError((('Problem while loading file: %s' % e.args[1]) if (len(e.args) > 1) else e))\n    except (TypeError, KeyError) as e:\n        raise ConfigError(('Config file has an unexpected structure: %s' % e))\n    except yaml.YAMLError:\n        raise ConfigError('Invalid YAML file syntax')\n    return config", "docstring": "Load a config file from the given path.\n\nLoad all normalizations from the config file received as\nargument. It expects to find a YAML file with a list of\nnormalizations and arguments under the key 'normalizations'.\n\nArgs:\npath: Path to YAML file.", "source": "codesearchnet"}
{"code": "def _ReadRecordHeader(self, file_object, record_header_offset):\n    \n    data_type_map = self._GetDataTypeMap('keychain_record_header')\n\n    record_header, _ = self._ReadStructureFromFileObject(\n        file_object, record_header_offset, data_type_map)\n\n    return record_header", "docstring": "Reads the record header.\n\nArgs:\nfile_object (file): file-like object.\nrecord_header_offset (int): offset of the record header relative to\nthe start of the file.\n\nReturns:\nkeychain_record_header: record header.\n\nRaises:\nParseError: if the record header cannot be read.", "source": "juraj-google-style"}
{"code": "def restore(self, request):\n        \n        self._connection.connection.rpush(self._request_key, pickle.dumps(request))", "docstring": "Push the request back onto the queue.\n\nArgs:\nrequest (Request): Reference to a request object that should be pushed back\nonto the request queue.", "source": "juraj-google-style"}
{"code": "def add_context(self, name, context, prefix_char=None):\n        \n        if name in self.contexts:\n            raise SuiteError(\"Context already in suite: %r\" % name)\n        if not context.success:\n            raise SuiteError(\"Context is not resolved: %r\" % name)\n\n        self.contexts[name] = dict(name=name,\n                                   context=context.copy(),\n                                   tool_aliases={},\n                                   hidden_tools=set(),\n                                   priority=self._next_priority,\n                                   prefix_char=prefix_char)\n        self._flush_tools()", "docstring": "Add a context to the suite.\n\nArgs:\nname (str): Name to store the context under.\ncontext (ResolvedContext): Context to add.", "source": "juraj-google-style"}
{"code": "def get(self, column, default_value=None):\n        \n        if isinstance(column, (list, tuple)):\n            ret = []\n            for col in column:\n                ret.append(self.get(col, default_value))\n            return ret\n        \n        try:\n            return self._values[column]\n        except (IndexError, TypeError):\n            pass\n        try:\n            return self[column]\n        except IndexError:\n            return default_value", "docstring": "Get an item from the Row by column name.\n\nArgs:\ncolumn: Tuple of column names, or a (str) column name, or positional\ncolumn number, 0-indexed.\ndefault_value: The value to use if the key is not found.\n\nReturns:\nA list or string with column value(s) or default_value if not found.", "source": "juraj-google-style"}
{"code": "def from_esri_code(code):\n    code = str(code)\n    proj4 = utils.crscode_to_string('esri', code, 'proj4')\n    crs = from_proj4(proj4)\n    return crs", "docstring": "Load crs object from esri code, via spatialreference.org.\nParses based on the proj4 representation.\n\nArguments:\n\n- *code*: The ESRI code as an integer.\n\nReturns:\n\n- A CS instance of the indicated type.", "source": "codesearchnet"}
{"code": "def is_comparable_type(var, type_):\n    other_types = COMPARABLE_TYPES.get(type_, type_)\n    return isinstance(var, other_types)", "docstring": "Check to see if `var` is an instance of known compatible types for `type_`\n\nArgs:\nvar (?):\ntype_ (?):\n\nReturns:\nbool:\n\nCommandLine:\npython -m utool.util_type is_comparable_type --show\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_type import *  # NOQA\n>>> import utool as ut\n>>> flags = []\n>>> flags += [is_comparable_type(0, float)]\n>>> flags += [is_comparable_type(0, np.float32)]\n>>> flags += [is_comparable_type(0, np.int32)]\n>>> flags += [is_comparable_type(0, int)]\n>>> flags += [is_comparable_type(0.0, int)]\n>>> result = ut.repr2(flags)\n>>> print(result)\n[True, True, True, True, False]", "source": "codesearchnet"}
{"code": "def draw_vr_anaglyph(cube_fbo, vr_scene, active_scene, eye_poses=(.035, -.035)):\n    \n    color_masks = [(True, False, False, True), (False, True, True, True)]\n    cam = vr_scene.camera\n    orig_cam_position = cam.position.xyz\n\n    for color_mask, eye_pos in zip(color_masks, eye_poses):\n        gl.glColorMask(*color_mask)\n        cam.position.xyz = cam.model_matrix.dot([eye_pos, 0., 0., 1.])[:3]  \n        cam.uniforms['playerPos'] = cam.position.xyz\n        with cube_fbo as fbo:\n            vr_scene.draw360_to_texture(fbo.texture)\n        cam.position.xyz = orig_cam_position\n        active_scene.draw()", "docstring": "Experimental anaglyph drawing function for VR system with red/blue glasses, used in Sirota lab.\nDraws a virtual scene in red and blue, from subject's (heda trackers) perspective in active scene.\n\nNote: assumes shader uses playerPos like ratcave's default shader\n\nArgs:\ncube_fbo: texture frameBuffer object.\nvr_scene: virtual scene object\nactive_scene: active scene object\neye_poses: the eye positions\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _CreateCampaignGroup(client):\n  \n  \n  campaign_group_service = client.GetService('CampaignGroupService',\n                                             version='v201809')\n\n  \n  operations = [{\n      'operator': 'ADD',\n      \n      'operand': {\n          'name': 'Mars campaign group \n      }\n  }]\n\n  campaign_group = campaign_group_service.mutate(operations)['value'][0]\n  campaign_group_id = campaign_group['id']\n\n  \n  print 'Campaign group with ID \"%d\" and name \"%s\" was created.' % (\n      campaign_group_id, campaign_group['name'])\n\n  return campaign_group_id", "docstring": "Create a campaign group.\n\nArgs:\nclient: an AdWordsClient instance.\n\nReturns:\nThe integer ID of the created campaign group.", "source": "juraj-google-style"}
{"code": "def run_validate_program_main(self, program_main):\n        \n        program_language = self.profile.get('install_json').get('programLanguage', 'python').lower()\n        if program_language == 'python' and not os.path.isfile('{}.py'.format(program_main)):\n            print(\n                '{}{}Could not find program main file ({}).'.format(\n                    c.Style.BRIGHT, c.Fore.RED, program_main\n                )\n            )\n            sys.exit(1)", "docstring": "Validate the program main file exists.\n\nArgs:\nprogram_main (str): The executable name.", "source": "juraj-google-style"}
{"code": "def _decode_crop_and_flip(image_buffer, num_channels):\n  \n  \n  \n  \n  \n  \n  \n  \n\n  min_object_covered=0.1\n  aspect_ratio_range=[0.75, 1.33]\n  area_range=[0.05, 1.0]\n  max_attempts=100\n\n  mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_MIN_OBJ_COV,\n                          value=min_object_covered)\n  mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_RATIO_RANGE,\n                          value=aspect_ratio_range)\n  mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_AREA_RANGE,\n                          value=area_range)\n  mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_MAX_ATTEMPTS,\n                          value=max_attempts)\n  mlperf_log.resnet_print(key=mlperf_log.INPUT_CROP_USES_BBOXES, value=False)\n\n  bbox = tf.constant([0.0, 0.0, 1.0, 1.0],\n                     dtype=tf.float32, shape=[1, 1, 4])   \n  sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(\n      tf.image.extract_jpeg_shape(image_buffer),\n      bounding_boxes=bbox,\n      min_object_covered=min_object_covered,\n      aspect_ratio_range=aspect_ratio_range,\n      area_range=area_range,\n      max_attempts=max_attempts,\n      use_image_if_no_bounding_boxes=True)\n  bbox_begin, bbox_size, _ = sample_distorted_bounding_box\n\n  \n  offset_y, offset_x, _ = tf.unstack(bbox_begin)\n  target_height, target_width, _ = tf.unstack(bbox_size)\n  crop_window = tf.stack([offset_y, offset_x, target_height, target_width])\n\n  \n  cropped = tf.image.decode_and_crop_jpeg(\n      image_buffer, crop_window, channels=num_channels)\n\n  \n  mlperf_log.resnet_print(key=mlperf_log.INPUT_RANDOM_FLIP)\n  cropped = tf.image.random_flip_left_right(cropped)\n  return cropped", "docstring": "Crops the given image to a random part of the image, and randomly flips.\n\nWe use the fused decode_and_crop op, which performs better than the two ops\nused separately in series, but note that this requires that the image be\npassed in as an un-decoded string Tensor.\n\nArgs:\nimage_buffer: scalar string Tensor representing the raw JPEG image buffer.\nnum_channels: Integer depth of the image buffer for decoding.\n\nReturns:\n3-D tensor with cropped image.", "source": "juraj-google-style"}
{"code": "def get_dict_definition(self, dict, get_list=False):\n        \n        list_def_candidate = []\n        for definition_name in self.specification['definitions'].keys():\n            if self.validate_definition(definition_name, dict):\n                if not get_list:\n                    return definition_name\n                list_def_candidate.append(definition_name)\n        if get_list:\n            return list_def_candidate\n        return None", "docstring": "Get the definition name of the given dict.\n\nArgs:\ndict: dict to test.\nget_list: if set to true, return a list of definition that match the body.\nif False, only return the first.\n\nReturns:\nThe definition name or None if the dict does not match any definition.\nIf get_list is True, return a list of definition_name.", "source": "juraj-google-style"}
{"code": "def form_to_params(fn=None, return_json=True):\n\n    def forms_to_params_decorator(fn):\n\n        @handle_type_error\n        @wraps(fn)\n        def forms_to_params_wrapper(*args, **kwargs):\n            kwargs.update(dict(request.forms))\n            if (not return_json):\n                return fn(*args, **kwargs)\n            return encode_json_body(fn(*args, **kwargs))\n        return forms_to_params_wrapper\n    if fn:\n        return forms_to_params_decorator(fn)\n    return forms_to_params_decorator", "docstring": "Convert bottle forms request to parameters for the wrapped function.\n\nArgs:\nreturn_json (bool, default True): Should the decorator automatically\nconvert returned value to JSON?", "source": "codesearchnet"}
{"code": "def in_place_subclassed_model_state_restoration(model):\n    assert not model._is_graph_network\n    if hasattr(model, '_original_attributes_cache') and model._original_attributes_cache is not None:\n        setattr_tracking = model._setattr_tracking\n        model._setattr_tracking = False\n        model._self_tracked_trackables = []\n        for name, value in model._original_attributes_cache.items():\n            setattr(model, name, value)\n            if isinstance(value, Layer):\n                model._self_tracked_trackables.append(value)\n        model._original_attributes_cache = None\n        model._setattr_tracking = setattr_tracking\n    else:\n        _reset_build_compile_trackers(model)", "docstring": "Restores the original state of a model after it was \"reset\".\n\nThis undoes this action of `_in_place_subclassed_model_reset`, which is called\nin `clone_and_build_model` if `in_place_reset` is set to True.\n\nArgs:\nmodel: Instance of a Keras model created via subclassing, on which\n`_in_place_subclassed_model_reset` was previously called.", "source": "github-repos"}
{"code": "def get_vm(access_token, subscription_id, resource_group, vm_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Compute/virtualMachines/', vm_name,\n                        '?api-version=', COMP_API])\n    return do_get(endpoint, access_token)", "docstring": "Get virtual machine details.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvm_name (str): Name of the virtual machine.\n\nReturns:\nHTTP response. JSON body of VM properties.", "source": "juraj-google-style"}
{"code": "def export_with_dynamic_cache(model: PreTrainedModel, example_input_ids: Optional[torch.Tensor]=None, example_attention_mask: Optional[torch.Tensor]=None):\n    if not is_torch_greater_or_equal_than_2_3:\n        raise ImportError('torch >= 2.3 is required.')\n    ALL_MASK_ATTENTION_FUNCTIONS.register('sdpa_without_vmap', sdpa_mask_without_vmap)\n    ALL_ATTENTION_FUNCTIONS.register('sdpa_without_vmap', ALL_ATTENTION_FUNCTIONS['sdpa'])\n    model.config._attn_implementation = 'sdpa_without_vmap'\n    with torch.no_grad():\n        exported_program = torch.export.export(model, (), {'input_ids': example_input_ids, 'attention_mask': example_attention_mask, 'past_key_values': DynamicCache(), 'use_cache': True}, strict=False)\n        return exported_program", "docstring": "Export a model with DynamicCache using `torch.export`, ensuring the exported model is compatible with `ExecuTorch`.\n\nArgs:\nmodel (`PreTrainedModel`): The pretrained model to be exported.\nexample_input_ids (`Optional[torch.Tensor]`): Example input token id used by `torch.export`.\nexample_attention_mask (`Optional[torch.Tensor]`): Example attention mask used by `torch.export`.\n\nReturns:\nExported program (`torch.export.ExportedProgram`): The exported program generated via `torch.export`.", "source": "github-repos"}
{"code": "def list_children(self, obj, save_type=base.SaveType.CHECKPOINT, **kwargs):\n    children = []\n    for name, ref in super(ObjectGraphView, self).children(obj, save_type, **kwargs).items():\n        children.append(base.TrackableReference(name, ref))\n    if obj is self.root and self._attached_dependencies:\n        children.extend(self._attached_dependencies)\n    return children", "docstring": "Returns list of all child trackables attached to obj.\n\nArgs:\nobj: A `Trackable` object.\nsave_type: A string, can be 'savedmodel' or 'checkpoint'.\n**kwargs: kwargs to use when retrieving the object's children.\n\nReturns:\nList of all children attached to the object.", "source": "github-repos"}
{"code": "def _ParseLogFileOptions(self, options):\n    self._log_file = self.ParseStringOption(options, 'log_file')\n    if (not self._log_file):\n        local_date_time = datetime.datetime.now()\n        self._log_file = '{0:s}-{1:04d}{2:02d}{3:02d}T{4:02d}{5:02d}{6:02d}.log.gz'.format(self.NAME, local_date_time.year, local_date_time.month, local_date_time.day, local_date_time.hour, local_date_time.minute, local_date_time.second)", "docstring": "Parses the log file options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.", "source": "codesearchnet"}
{"code": "def insert_or_update(table, columns, values):\n    rows = len(values)\n    cells = len(columns) * len(values)\n    return _Mutator(mutation=Mutation(insert_or_update=batch._make_write_pb(table, columns, values)), operation=WriteMutation._OPERATION_INSERT_OR_UPDATE, rows=rows, cells=cells, kwargs={'table': table, 'columns': columns, 'values': values})", "docstring": "Insert/update one or more table rows.\nArgs:\ntable: Name of the table to be modified.\ncolumns: Name of the table columns to be modified.\nvalues: Values to be modified.", "source": "github-repos"}
{"code": "def binary_op(self, op, other, **kwargs):\n    func = getattr(pandas.DataFrame, op)\n    return self._inter_df_op_handler(func, other, **kwargs)", "docstring": "Perform an operation between two objects.\n\nNote: The list of operations is as follows:\n- add\n- eq\n- floordiv\n- ge\n- gt\n- le\n- lt\n- mod\n- mul\n- ne\n- pow\n- rfloordiv\n- rmod\n- rpow\n- rsub\n- rtruediv\n- sub\n- truediv\n- __and__\n- __or__\n- __xor__\nArgs:\nop: The operation. See list of operations above\nother: The object to operate against.\n\nReturns:\nA new QueryCompiler object.", "source": "codesearchnet"}
{"code": "def _check_element_shape(self, shape):\n    if not shape.is_compatible_with(self.element_shape):\n        raise ValueError('Inconsistent shapes: saw %s but expected %s ' % (shape, self.element_shape))\n    if self._infer_shape:\n        self._element_shape[0] = self.element_shape.merge_with(shape)", "docstring": "Changes the element shape of the array given a shape to merge with.\n\nArgs:\nshape: A `TensorShape` object to merge with.\n\nRaises:\nValueError: if the provided shape is incompatible with the current\nelement shape of the `TensorArray`.", "source": "github-repos"}
{"code": "def resize_images(x, height_factor, width_factor, data_format, interpolation='nearest'):\n    if data_format == 'channels_first':\n        rows, cols = (2, 3)\n    elif data_format == 'channels_last':\n        rows, cols = (1, 2)\n    else:\n        raise ValueError('Invalid `data_format` argument: %s' % (data_format,))\n    new_shape = x.shape[rows:cols + 1]\n    if new_shape.is_fully_defined():\n        new_shape = constant_op.constant(new_shape.as_list(), dtype='int32')\n    else:\n        new_shape = array_ops.shape_v2(x)[rows:cols + 1]\n    new_shape *= constant_op.constant(np.array([height_factor, width_factor], dtype='int32'))\n    if data_format == 'channels_first':\n        x = permute_dimensions(x, [0, 2, 3, 1])\n    if interpolation == 'nearest':\n        x = image_ops.resize_images_v2(x, new_shape, method=image_ops.ResizeMethod.NEAREST_NEIGHBOR)\n    elif interpolation == 'bilinear':\n        x = image_ops.resize_images_v2(x, new_shape, method=image_ops.ResizeMethod.BILINEAR)\n    else:\n        raise ValueError('interpolation should be one of \"nearest\" or \"bilinear\".')\n    if data_format == 'channels_first':\n        x = permute_dimensions(x, [0, 3, 1, 2])\n    return x", "docstring": "Resizes the images contained in a 4D tensor.\n\nArgs:\nx: Tensor or variable to resize.\nheight_factor: Positive integer.\nwidth_factor: Positive integer.\ndata_format: One of `\"channels_first\"`, `\"channels_last\"`.\ninterpolation: A string, one of `nearest` or `bilinear`.\n\nReturns:\nA tensor.\n\nRaises:\nValueError: in case of incorrect value for\n`data_format` or `interpolation`.", "source": "github-repos"}
{"code": "def symbol(name: str=None, symbol_type: Type[Symbol]=Symbol) -> 'SymbolWildcard':\n    if (isinstance(name, type) and issubclass(name, Symbol) and (symbol_type is Symbol)):\n        return SymbolWildcard(name)\n    return SymbolWildcard(symbol_type, variable_name=name)", "docstring": "Create a `SymbolWildcard` that matches a single `Symbol` argument.\n\nArgs:\nname:\nOptional variable name for the wildcard.\nsymbol_type:\nAn optional subclass of `Symbol` to further limit which kind of symbols are\nmatched by the wildcard.\n\nReturns:\nA `SymbolWildcard` that matches the *symbol_type*.", "source": "codesearchnet"}
{"code": "def get_group_id(self, uuid=None):\n    group_data = self.get_group(uuid)\n    try:\n        return group_data['response']['docs'][0]['id']\n    except (KeyError, IndexError):\n        failure_message = 'Error in get_group response data - got {0}'.format(group_data)\n        log.exception(failure_message)\n        raise PyLmodUnexpectedData(failure_message)", "docstring": "Get group id based on uuid.\n\nArgs:\nuuid (str): optional uuid. defaults to self.cuuid\n\nRaises:\nPyLmodUnexpectedData: No group data was returned.\nrequests.RequestException: Exception connection error\n\nReturns:\nint: numeric group id", "source": "codesearchnet"}
{"code": "def replaceFA(self, faDataType: int, xml: str):\n    self.client.replaceFA(faDataType, xml)", "docstring": "Replaces Financial Advisor's settings.\n\nArgs:\nfaDataType: See :meth:`.requestFA`.\nxml: The XML-formatted configuration string.", "source": "codesearchnet"}
{"code": "def download(self, temp_ver, store_metadata=True):\n        \n        dest = self._prefixed(temp_ver.name)\n        temp_dest = '%s.tmp' % dest\n\n        with utils.LockFile(dest + '.lock'):\n            \n            if os.path.exists(dest):\n                return\n\n            temp_ver.download(temp_dest)\n            if store_metadata:\n                with open('%s.metadata' % dest, 'w') as f:\n                    utils.json_dump(temp_ver.get_metadata(), f)\n\n            sha1 = utils.get_hash(temp_dest)\n            if temp_ver.get_hash() != sha1:\n                raise RuntimeError(\n                    'Image %s does not match the expected hash %s' % (\n                        temp_ver.name,\n                        sha1,\n                    )\n                )\n\n            with open('%s.hash' % dest, 'w') as f:\n                f.write(sha1)\n\n            with log_utils.LogTask('Convert image', logger=LOGGER):\n                result = utils.run_command(\n                    [\n                        'qemu-img',\n                        'convert',\n                        '-O',\n                        'raw',\n                        temp_dest,\n                        dest,\n                    ],\n                )\n\n                os.unlink(temp_dest)\n                if result:\n                    raise RuntimeError(result.err)", "docstring": "Retrieve the given template version\n\nArgs:\ntemp_ver (TemplateVersion): template version to retrieve\nstore_metadata (bool): If set to ``False``, will not refresh the\nlocal metadata with the retrieved one\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def describe_message(message_definition):\n    message_descriptor = MessageDescriptor()\n    message_descriptor.name = message_definition.definition_name().split('.')[(- 1)]\n    fields = sorted(message_definition.all_fields(), key=(lambda v: v.number))\n    if fields:\n        message_descriptor.fields = [describe_field(field) for field in fields]\n    try:\n        nested_messages = message_definition.__messages__\n    except AttributeError:\n        pass\n    else:\n        message_descriptors = []\n        for name in nested_messages:\n            value = getattr(message_definition, name)\n            message_descriptors.append(describe_message(value))\n        message_descriptor.message_types = message_descriptors\n    try:\n        nested_enums = message_definition.__enums__\n    except AttributeError:\n        pass\n    else:\n        enum_descriptors = []\n        for name in nested_enums:\n            value = getattr(message_definition, name)\n            enum_descriptors.append(describe_enum(value))\n        message_descriptor.enum_types = enum_descriptors\n    return message_descriptor", "docstring": "Build descriptor for Message class.\n\nArgs:\nmessage_definition: Message class to provide descriptor for.\n\nReturns:\nInitialized MessageDescriptor instance describing the Message class.", "source": "codesearchnet"}
{"code": "def prune_graph(graph_str, package_name):\n    \n    \n    g = read_dot(graph_str)\n    nodes = set()\n\n    for node, attrs in g.node_attr.iteritems():\n        attr = [x for x in attrs if x[0] == \"label\"]\n        if attr:\n            label = attr[0][1]\n            try:\n                req_str = _request_from_label(label)\n                request = PackageRequest(req_str)\n            except PackageRequestError:\n                continue\n\n            if request.name == package_name:\n                nodes.add(node)\n\n    if not nodes:\n        raise ValueError(\"The package %r does not appear in the graph.\"\n                         % package_name)\n\n    \n    g_rev = g.reverse()\n    accessible_nodes = set()\n    access = accessibility(g_rev)\n    for node in nodes:\n        nodes_ = access.get(node, [])\n        accessible_nodes |= set(nodes_)\n\n    \n    inaccessible_nodes = set(g.nodes()) - accessible_nodes\n    for node in inaccessible_nodes:\n        g.del_node(node)\n\n    return write_dot(g)", "docstring": "Prune a package graph so it only contains nodes accessible from the\ngiven package.\n\nArgs:\ngraph_str (str): Dot-language graph string.\npackage_name (str): Name of package of interest.\n\nReturns:\nPruned graph, as a string.", "source": "juraj-google-style"}
{"code": "def __init__(self, resolver_context, file_object=None):\n    \n    if file_object:\n      raise ValueError('File object value set.')\n\n    super(EWFFile, self).__init__(resolver_context)\n    self._file_objects = []", "docstring": "Initializes a file-like object.\n\nArgs:\nresolver_context (Context): resolver context.\nfile_object (Optional[FileIO]): file-like object.\n\nRaises:\nValueError: when file_object is set.", "source": "juraj-google-style"}
{"code": "def add_answer_for_student(student_item, vote, rationale):\n    \n    answers = get_answers_for_student(student_item)\n    answers.add_answer(vote, rationale)\n\n    sub_api.create_submission(student_item, {\n        ANSWER_LIST_KEY: answers.get_answers_as_list()\n    })", "docstring": "Add an answer for a student to the backend\n\nArgs:\nstudent_item (dict): The location of the problem this submission is\nassociated with, as defined by a course, student, and item.\nvote (int): the option that student voted for\nrationale (str): the reason why the student vote for the option", "source": "juraj-google-style"}
{"code": "def parse(self, sentence: str) -> typing.List[str]:\n    if sentence == '':\n        return []\n    chunks = [sentence[0]]\n    base_score = -sum((sum(g.values()) for g in self.model.values())) * 0.5\n    for i in range(1, len(sentence)):\n        score = base_score\n        if i > 2:\n            score += self.model.get('UW1', {}).get(sentence[i - 3], 0)\n        if i > 1:\n            score += self.model.get('UW2', {}).get(sentence[i - 2], 0)\n        score += self.model.get('UW3', {}).get(sentence[i - 1], 0)\n        score += self.model.get('UW4', {}).get(sentence[i], 0)\n        if i + 1 < len(sentence):\n            score += self.model.get('UW5', {}).get(sentence[i + 1], 0)\n        if i + 2 < len(sentence):\n            score += self.model.get('UW6', {}).get(sentence[i + 2], 0)\n        if i > 1:\n            score += self.model.get('BW1', {}).get(sentence[i - 2:i], 0)\n        score += self.model.get('BW2', {}).get(sentence[i - 1:i + 1], 0)\n        if i + 1 < len(sentence):\n            score += self.model.get('BW3', {}).get(sentence[i:i + 2], 0)\n        if i > 2:\n            score += self.model.get('TW1', {}).get(sentence[i - 3:i], 0)\n        if i > 1:\n            score += self.model.get('TW2', {}).get(sentence[i - 2:i + 1], 0)\n        if i + 1 < len(sentence):\n            score += self.model.get('TW3', {}).get(sentence[i - 1:i + 2], 0)\n        if i + 2 < len(sentence):\n            score += self.model.get('TW4', {}).get(sentence[i:i + 3], 0)\n        if score > 0:\n            chunks.append(sentence[i])\n        else:\n            chunks[-1] += sentence[i]\n    return chunks", "docstring": "Parses the input sentence and returns a list of semantic chunks.\n\nArgs:\nsentence (str): An input sentence.\n\nReturns:\nA list of semantic chunks (List[str]).", "source": "github-repos"}
{"code": "def ValidateDependencies(rdf_artifact):\n    for dependency in GetArtifactDependencies(rdf_artifact):\n        try:\n            dependency_obj = REGISTRY.GetArtifact(dependency)\n        except rdf_artifacts.ArtifactNotRegisteredError as e:\n            raise rdf_artifacts.ArtifactDependencyError(rdf_artifact, 'missing dependency', cause=e)\n        message = dependency_obj.error_message\n        if message:\n            raise rdf_artifacts.ArtifactDependencyError(rdf_artifact, 'dependency error', cause=message)", "docstring": "Validates artifact dependencies.\n\nThis method checks whether all dependencies of the artifact are present\nand contain no errors.\n\nThis method can be called only after all other artifacts have been loaded.\n\nArgs:\nrdf_artifact: RDF object artifact.\n\nRaises:\nArtifactDependencyError: If a dependency is missing or contains errors.", "source": "codesearchnet"}
{"code": "def plot_val_with_title(self, idxs, y):\n        \n        \n        if len(idxs) > 0:\n            imgs = np.stack([self.ds[x][0] for x in idxs])\n            title_probs = [self.probs[x,y] for x in idxs]\n\n            return plots(self.ds.denorm(imgs), rows=1, titles=title_probs)\n        \n        else:\n            return False;", "docstring": "Displays the images and their probabilities of belonging to a certain class\n\nArguments:\nidxs (numpy.ndarray): indexes of the image samples from the dataset\ny (int): the selected class\n\nReturns:\nPlots the images in n rows [rows = n]", "source": "juraj-google-style"}
{"code": "def _cast_to_frameset(cls, other):\n        \n        if isinstance(other, FrameSet):\n            return other\n        try:\n            return FrameSet(other)\n        except Exception:\n            return NotImplemented", "docstring": "Private method to simplify comparison operations.\n\nArgs:\nother (:class:`FrameSet` or set or frozenset or or iterable): item to be compared\n\nReturns:\n:class:`FrameSet`\n\nRaises:\n:class:`NotImplemented`: if a comparison is impossible", "source": "juraj-google-style"}
{"code": "def tracer_diffusion_coefficient( self ):\n        \n        if self.has_run:\n            return self.atoms.sum_dr_squared() / ( 6.0 * float( self.number_of_atoms ) * self.lattice.time )\n        else:\n            return None", "docstring": "Tracer diffusion coefficient, D*.\n\nArgs:\nNone\n\nReturns:\n(Float): The tracer diffusion coefficient, D*.", "source": "juraj-google-style"}
{"code": "def import_family(self, rfa_file):\n    self._add_entry(templates.IMPORT_FAMILY.format(family_file=rfa_file))", "docstring": "Append a import family entry to the journal.\n\nThis instructs Revit to import a family into the opened model.\n\nArgs:\nrfa_file (str): full path of the family file", "source": "codesearchnet"}
{"code": "def from_sub_models_config(cls, text_encoder_config: PretrainedConfig, audio_encoder_config: PretrainedConfig, decoder_config: MusicgenDecoderConfig, **kwargs):\n    return cls(text_encoder=text_encoder_config.to_dict(), audio_encoder=audio_encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`MusicgenConfig`] (or a derived class) from text encoder, audio encoder and decoder\nconfigurations.\n\nReturns:\n[`MusicgenConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def find_link(self, target_node):\n    try:\n        return next((l for l in self.link_list if (l.target == target_node)))\n    except StopIteration:\n        return None", "docstring": "Find the link that points to ``target_node`` if it exists.\n\nIf no link in ``self`` points to ``target_node``, return None\n\nArgs:\ntarget_node (Node): The node to look for in ``self.link_list``\n\nReturns:\nLink: An existing link pointing to ``target_node`` if found\n\nNone: If no such link exists\n\nExample:\n>>> node_1 = Node('One')\n>>> node_2 = Node('Two')\n>>> node_1.add_link(node_2, 1)\n>>> link_1 = node_1.link_list[0]\n>>> found_link = node_1.find_link(node_2)\n>>> found_link == link_1\nTrue", "source": "codesearchnet"}
{"code": "def valid_vlan_id(vlan_id, extended=True):\n    minimum_vlan_id = 1\n    maximum_vlan_id = 4095\n    if extended:\n        maximum_vlan_id = 8191\n    return (minimum_vlan_id <= int(vlan_id) <= maximum_vlan_id)", "docstring": "Validates a VLAN ID.\n\nArgs:\nvlan_id (integer): VLAN ID to validate.  If passed as ``str``, it will\nbe cast to ``int``.\nextended (bool): If the VLAN ID range should be considered extended\nfor Virtual Fabrics.\n\nReturns:\nbool: ``True`` if it is a valid VLAN ID.  ``False`` if not.\n\nRaises:\nNone\n\nExamples:\n>>> import pynos.utilities\n>>> vlan = '565'\n>>> pynos.utilities.valid_vlan_id(vlan)\nTrue\n>>> extended = False\n>>> vlan = '6789'\n>>> pynos.utilities.valid_vlan_id(vlan, extended=extended)\nFalse\n>>> pynos.utilities.valid_vlan_id(vlan)\nTrue", "source": "codesearchnet"}
{"code": "def memory_write32(self, addr, data, zone=None):\n        \n        return self.memory_write(addr, data, zone, 32)", "docstring": "Writes words to memory of a target system.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): start address to write to\ndata (list): list of words to write\nzone (str): optional memory zone to access\n\nReturns:\nNumber of words written to target.\n\nRaises:\nJLinkException: on memory access error.", "source": "juraj-google-style"}
{"code": "def get_raw_data(self, url, *args, **kwargs):\n        \n        res = self._conn.get(url, headers=self._prepare_headers(**kwargs))\n        if res.status_code == 200:\n            return res.content\n        else:\n            return None", "docstring": "Gets data from url as bytes\n\nReturns content under the provided url as bytes\nie. for binary data\n\nArgs:\n**url**: address of the wanted data\n\n.. versionadded:: 0.3.2\n**additional_headers**: (optional) Additional headers\nto be used with request\n\nReturns:\nbytes", "source": "juraj-google-style"}
{"code": "def __new__(cls: Type[_T], *args: PathLike) -> _T:\n    if cls == Path:\n        if not args:\n            return register.make_path('.')\n        root, *parts = args\n        return register.make_path(root).joinpath(*parts)\n    else:\n        return super().__new__(cls, *args)", "docstring": "Create a new path.\n\n```python\npath = abcpath.Path()\n```\n\nWe use __new__ instead of __init__ to allow subclassing, even though the\nusage of __init__ is possible from python>=3.12.\n\nArgs:\n*args: Paths to create\n\nReturns:\npath: The registered path", "source": "github-repos"}
{"code": "def count_tornadoes(input_data):\n    return input_data | 'months with tornadoes' >> beam.FlatMap(lambda row: [(int(row['month']), 1)] if row['tornado'] else []) | 'monthly count' >> beam.CombinePerKey(sum) | 'format' >> beam.Map(lambda k_v: {'month': k_v[0], 'tornado_count': k_v[1]})", "docstring": "Workflow computing the number of tornadoes for each month that had one.\n\nArgs:\ninput_data: a PCollection of dictionaries representing table rows. Each\ndictionary will have a 'month' and a 'tornado' key as described in the\nmodule comment.\n\nReturns:\nA PCollection of dictionaries containing 'month' and 'tornado_count' keys.\nMonths without tornadoes are skipped.", "source": "github-repos"}
{"code": "def _get_new_group_key(self, devices):\n    new_key = self._group_key\n    self._group_key += 1\n    self._instance_key_table[new_key] = {}\n    for device in devices:\n        self._instance_key_table[new_key][device] = INSTANCE_KEY_START_NUMBER\n    return new_key", "docstring": "Returns a new group key.\n\nThe caller should store and reuse the same group key for the same set of\ndevices. Calling this method always returns a new group key.\n\nThis method is not thread-safe.\n\nArgs:\ndevices: a list of canonical device strings in a collective group.\n\nReturns:\na new group key.", "source": "github-repos"}
{"code": "def reset_internal_states(self, record=None):\n    self._record = None\n    self._count = 0\n    self._record = record", "docstring": "Resets the internal state of the recorder.\n\nArgs:\nrecord: records.TestResultRecord, the test record for a test.", "source": "codesearchnet"}
{"code": "def error(msg):\n    return debugger_cli_common.rich_text_lines_from_rich_line_list([RL('ERROR: ' + msg, COLOR_RED)])", "docstring": "Generate a RichTextLines output for error.\n\nArgs:\nmsg: (str) The error message.\n\nReturns:\n(debugger_cli_common.RichTextLines) A representation of the error message\nfor screen output.", "source": "github-repos"}
{"code": "def start_time_distance(item_a, item_b, max_value):\n    \n    start_time_diff = np.abs(item_a.times[0] - item_b.times[0])\n    return np.minimum(start_time_diff, max_value) / float(max_value)", "docstring": "Absolute difference between the starting times of each item.\n\nArgs:\nitem_a: STObject from the first set in TrackMatcher\nitem_b: STObject from the second set in TrackMatcher\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "juraj-google-style"}
{"code": "def generate_tests(self, test_logic, name_func, arg_sets, uid_func=None):\n    self._assert_function_names_in_stack([STAGE_NAME_PRE_RUN])\n    root_msg = 'During test generation of \"%s\":' % test_logic.__name__\n    for args in arg_sets:\n        test_name = name_func(*args)\n        if test_name in self.get_existing_test_names():\n            raise Error('%s Test name \"%s\" already exists, cannot be duplicated!' % (root_msg, test_name))\n        test_func = functools.partial(test_logic, *args)\n        for attr_name in (ATTR_MAX_RETRY_CNT, ATTR_MAX_CONSEC_ERROR, ATTR_REPEAT_CNT):\n            attr = getattr(test_logic, attr_name, None)\n            if attr is not None:\n                setattr(test_func, attr_name, attr)\n        if uid_func is not None:\n            uid = uid_func(*args)\n            if uid is None:\n                logging.warning('%s UID for arg set %s is None.', root_msg, args)\n            else:\n                setattr(test_func, 'uid', uid)\n        self._generated_test_table[test_name] = test_func", "docstring": "Generates tests in the test class.\n\nThis function has to be called inside a test class's `self.pre_run`.\n\nGenerated tests are not written down as methods, but as a list of\nparameter sets. This way we reduce code repetition and improve test\nscalability.\n\nUsers can provide an optional function to specify the UID of each test.\nNot all generated tests are required to have UID.\n\nArgs:\ntest_logic: function, the common logic shared by all the generated\ntests.\nname_func: function, generate a test name according to a set of\ntest arguments. This function should take the same arguments as\nthe test logic function.\narg_sets: a list of tuples, each tuple is a set of arguments to be\npassed to the test logic function and name function.\nuid_func: function, an optional function that takes the same\narguments as the test logic function and returns a string that\nis the corresponding UID.", "source": "github-repos"}
{"code": "def logsumexp(x, axis=None, keepdims=False):\n    if any_symbolic_tensors((x,)):\n        return Logsumexp(axis, keepdims).symbolic_call(x)\n    return backend.math.logsumexp(x, axis=axis, keepdims=keepdims)", "docstring": "Computes the logarithm of sum of exponentials of elements in a tensor.\n\nArgs:\nx: Input tensor.\naxis: An integer or a tuple of integers specifying the axis/axes\nalong which to compute the sum. If `None`, the sum is computed\nover all elements. Defaults to `None`.\nkeepdims: A boolean indicating whether to keep the dimensions of\nthe input tensor when computing the sum. Defaults to `False`.\n\nReturns:\nA tensor containing the logarithm of the sum of exponentials of\nelements in `x`.\n\nExample:\n\n>>> x = keras.ops.convert_to_tensor([1., 2., 3.])\n>>> logsumexp(x)\n3.407606", "source": "github-repos"}
{"code": "def add_query(self, query, join_with=AND):\n        \n        if not isinstance(query, DomainCondition):\n            query = DomainCondition.from_tuple(query)\n        if len(self.query):\n            self.query.append(join_with)\n        self.query.append(query)", "docstring": "Join a new query to existing queries on the stack.\n\nArgs:\nquery (tuple or list or DomainCondition): The condition for the\nquery. If a ``DomainCondition`` object is not provided, the\ninput should conform to the interface defined in\n:func:`~.domain.DomainCondition.from_tuple`.\njoin_with (str): The join string to apply, if other queries are\nalready on the stack.", "source": "juraj-google-style"}
{"code": "def get_inventory(self, keys=None):\n    inventory = defaultdict(list)\n    keys = (keys or ['vm-type', 'groups', 'vm-provider'])\n    vms = self.prefix.get_vms().values()\n    for vm in vms:\n        entry = self._generate_entry(vm)\n        vm_spec = vm.spec\n        for key in keys:\n            value = self.get_key(key, vm_spec)\n            if (value is None):\n                continue\n            if isinstance(value, list):\n                for sub_value in value:\n                    inventory['{}={}'.format(key, sub_value)].append(entry)\n            else:\n                inventory['{}={}'.format(key, value)].append(entry)\n        for group in vm_spec.get('groups', []):\n            inventory[group].append(entry)\n    return inventory", "docstring": "Create an Ansible inventory based on python dicts and lists.\nThe returned value is a dict in which every key represents a group\nand every value is a list of entries for that group.\n\nArgs:\nkeys (list of str): Path to the keys that will be used to\ncreate groups.\n\nReturns:\ndict: dict based Ansible inventory", "source": "codesearchnet"}
{"code": "def download_url(url, root, filename=None, md5=None):\n    \n    from six.moves import urllib\n\n    root = os.path.expanduser(root)\n    if not filename:\n        filename = os.path.basename(url)\n    fpath = os.path.join(root, filename)\n\n    makedir_exist_ok(root)\n\n    \n    if os.path.isfile(fpath) and check_integrity(fpath, md5):\n        print('Using downloaded and verified file: ' + fpath)\n    else:\n        try:\n            print('Downloading ' + url + ' to ' + fpath)\n            urllib.request.urlretrieve(\n                url, fpath,\n                reporthook=gen_bar_updater()\n            )\n        except OSError:\n            if url[:5] == 'https':\n                url = url.replace('https:', 'http:')\n                print('Failed download. Trying https -> http instead.'\n                      ' Downloading ' + url + ' to ' + fpath)\n                urllib.request.urlretrieve(\n                    url, fpath,\n                    reporthook=gen_bar_updater()\n                )", "docstring": "Download a file from a url and place it in root.\n\nArgs:\nurl (str): URL to download file from\nroot (str): Directory to place downloaded file in\nfilename (str, optional): Name to save the file under. If None, use the basename of the URL\nmd5 (str, optional): MD5 checksum of the download. If None, do not check", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    if (not self._encoding):\n        self._encoding = parser_mediator.codepage\n    try:\n        if (not self._HasExpectedLineLength(file_object)):\n            display_name = parser_mediator.GetDisplayName()\n            raise errors.UnableToParseFile('[{0:s}] Unable to parse DSV file: {1:s} with error: unexpected line length.'.format(self.NAME, display_name))\n    except UnicodeDecodeError as exception:\n        display_name = parser_mediator.GetDisplayName()\n        raise errors.UnableToParseFile('[{0:s}] Unable to parse DSV file: {1:s} with error: {2!s}.'.format(self.NAME, display_name, exception))\n    try:\n        line_reader = self._CreateLineReader(file_object)\n        reader = self._CreateDictReader(line_reader)\n        row_offset = line_reader.tell()\n        row = next(reader)\n    except (StopIteration, csv.Error, UnicodeDecodeError) as exception:\n        display_name = parser_mediator.GetDisplayName()\n        raise errors.UnableToParseFile('[{0:s}] Unable to parse DSV file: {1:s} with error: {2!s}.'.format(self.NAME, display_name, exception))\n    number_of_columns = len(self.COLUMNS)\n    number_of_records = len(row)\n    if (number_of_records != number_of_columns):\n        display_name = parser_mediator.GetDisplayName()\n        raise errors.UnableToParseFile('[{0:s}] Unable to parse DSV file: {1:s}. Wrong number of records (expected: {2:d}, got: {3:d})'.format(self.NAME, display_name, number_of_columns, number_of_records))\n    for (key, value) in row.items():\n        if (self._MAGIC_TEST_STRING in (key, value)):\n            display_name = parser_mediator.GetDisplayName()\n            raise errors.UnableToParseFile('[{0:s}] Unable to parse DSV file: {1:s}. Signature mismatch.'.format(self.NAME, display_name))\n    row = self._ConvertRowToUnicode(parser_mediator, row)\n    if (not self.VerifyRow(parser_mediator, row)):\n        display_name = parser_mediator.GetDisplayName()\n        raise errors.UnableToParseFile('[{0:s}] Unable to parse DSV file: {1:s}. Verification failed.'.format(self.NAME, display_name))\n    self.ParseRow(parser_mediator, row_offset, row)\n    row_offset = line_reader.tell()\n    for row in reader:\n        if parser_mediator.abort:\n            break\n        row = self._ConvertRowToUnicode(parser_mediator, row)\n        self.ParseRow(parser_mediator, row_offset, row)\n        row_offset = line_reader.tell()", "docstring": "Parses a DSV text file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def test_pass(self, e=None):\n    self._test_end(TestResultEnums.TEST_RESULT_PASS, e)", "docstring": "To mark the test as passed in this record.\n\nArgs:\ne: An instance of mobly.signals.TestPass.", "source": "github-repos"}
{"code": "def angle(self, deg=False):\n        \n        if self.dtype.str[1] != 'c':\n            warnings.warn('angle() is intended for complex-valued timeseries',\n                          RuntimeWarning, 1)\n        return Timeseries(np.angle(self, deg=deg), self.tspan, self.labels)", "docstring": "Return the angle of the complex argument.\n\nArgs:\ndeg (bool, optional):\nReturn angle in degrees if True, radians if False (default).\n\nReturns:\nangle (Timeseries):\nThe counterclockwise angle from the positive real axis on\nthe complex plane, with dtype as numpy.float64.", "source": "juraj-google-style"}
{"code": "def is_valid(self, value):\n    try:\n        if validation_on():\n            self.validate(value, False)\n    except ValueError:\n        return False\n    else:\n        return True", "docstring": "Whether the value passes validation\n\nArgs:\nvalue (obj) : the value to validate against this property type\n\nReturns:\nTrue if valid, False otherwise", "source": "codesearchnet"}
{"code": "def _html_checker(job_var, interval, status, header, _interval_set=False):\n    job_status = job_var.status()\n    job_status_name = job_status.name\n    job_status_msg = job_status.value\n    status.value = (header % job_status_msg)\n    while (job_status_name not in ['DONE', 'CANCELLED']):\n        time.sleep(interval)\n        job_status = job_var.status()\n        job_status_name = job_status.name\n        job_status_msg = job_status.value\n        if (job_status_name == 'ERROR'):\n            break\n        else:\n            if (job_status_name == 'QUEUED'):\n                job_status_msg += (' (%s)' % job_var.queue_position())\n                if (not _interval_set):\n                    interval = max(job_var.queue_position(), 2)\n            elif (not _interval_set):\n                interval = 2\n            status.value = (header % job_status_msg)\n    status.value = (header % job_status_msg)", "docstring": "Internal function that updates the status\nof a HTML job monitor.\n\nArgs:\njob_var (BaseJob): The job to keep track of.\ninterval (int): The status check interval\nstatus (widget): HTML ipywidget for output ot screen\nheader (str): String representing HTML code for status.\n_interval_set (bool): Was interval set by user?", "source": "codesearchnet"}
{"code": "def __init__(self, property_type=TableFeaturePropType.OFPTFPT_NEXT_TABLES,\n                 next_table_ids=None):\n        \n        super().__init__(property_type)\n        self.next_table_ids = (ListOfInstruction() if next_table_ids is None\n                               else next_table_ids)\n        self.update_length()", "docstring": "Create a NextTablesProperty with the optional parameters below.\n\nArgs:\ntype(|TableFeaturePropType_v0x04|):\nProperty Type value of this instance.\nnext_table_ids (|ListOfInstruction_v0x04|):\nList of InstructionGotoTable instances.", "source": "juraj-google-style"}
{"code": "def lyap_e_len(**kwargs):\n    m = ((kwargs['emb_dim'] - 1) \n    min_len = kwargs['emb_dim']\n    min_len += m\n    min_len += (kwargs['min_tsep'] * 2)\n    min_len += kwargs['min_nb']\n    return min_len", "docstring": "Helper function that calculates the minimum number of data points required\nto use lyap_e.\n\nNote that none of the required parameters may be set to None.\n\nKwargs:\nkwargs(dict):\narguments used for lyap_e (required: emb_dim, matrix_dim, min_nb\nand min_tsep)\n\nReturns:\nminimum number of data points required to call lyap_e with the given\nparameters", "source": "codesearchnet"}
{"code": "def _subtoken_to_tokens(self, subtokens):\n    concatenated = ''.join(subtokens)\n    split = concatenated.split('_')\n    return [_unescape_token((t + '_')) for t in split if t]", "docstring": "Converts a list of subtoken to a list of tokens.\n\nArgs:\nsubtokens: a list of integers in the range [0, vocab_size)\n\nReturns:\na list of strings.", "source": "codesearchnet"}
{"code": "def get_heroku_connect_models():\n    from django.apps import apps\n    apps.check_models_ready()\n    from heroku_connect.db.models import HerokuConnectModel\n    return (model for models in apps.all_models.values() for model in models.values() if (issubclass(model, HerokuConnectModel) and (not model._meta.managed)))", "docstring": "Return all registered Heroku Connect Models.\n\nReturns:\n(Iterator):\nAll registered models that are subclasses of `.HerokuConnectModel`.\nAbstract models are excluded, since they are not registered.", "source": "codesearchnet"}
{"code": "def hamming_distance(str1, str2):\n    \n    if len(str1) != len(str2):\n        raise VisualizationError('Strings not same length.')\n    return sum(s1 != s2 for s1, s2 in zip(str1, str2))", "docstring": "Calculate the Hamming distance between two bit strings\n\nArgs:\nstr1 (str): First string.\nstr2 (str): Second string.\nReturns:\nint: Distance between strings.\nRaises:\nVisualizationError: Strings not same length", "source": "juraj-google-style"}
{"code": "def __init__(self, num_classes: int, matcher: OneFormerHungarianMatcher, weight_dict: Dict[str, float], eos_coef: float, num_points: int, oversample_ratio: float, importance_sample_ratio: float, contrastive_temperature: Optional[float]=None):\n    requires_backends(self, ['scipy'])\n    super().__init__()\n    self.num_classes = num_classes\n    self.matcher = matcher\n    self.weight_dict = weight_dict\n    self.eos_coef = eos_coef\n    empty_weight = torch.ones(self.num_classes + 1)\n    empty_weight[-1] = self.eos_coef\n    self.register_buffer('empty_weight', empty_weight)\n    self.num_points = num_points\n    self.oversample_ratio = oversample_ratio\n    self.importance_sample_ratio = importance_sample_ratio\n    self.contrastive_temperature = contrastive_temperature\n    if self.contrastive_temperature is not None:\n        self.logit_scale = nn.Parameter(torch.tensor(np.log(1 / contrastive_temperature)))", "docstring": "This class computes the losses using the class predictions, mask predictions and the contrastive queries.\n\nOneformer calculates the classification CE loss on the class predictions. Mask predictions are used for\ncalculating the binary CE loss and dice loss. The contrastive queries are used for calculating the contrastive\nloss.\n\nArgs:\nnum_labels (`int`):\nThe number of classes.\nmatcher (`OneFormerHungarianMatcher`):\nA torch module that computes the assignments between the predictions and labels.\nweight_dict (`Dict[str, float]`):\nA dictionary of weights to be applied to the different losses.\neos_coef (`float`):\nWeight to apply to the null class.\nnum_points (`int`):\nNumber of points to be sampled for dice and mask loss calculations.\noversample_ratio (`float`):\nRequired for pointwise loss calculation.\nimportance_sample_ratio (`float`):\nRequired for pointwise loss calculation.\ncontrastive_temperature (`float`):\nTemperature for scaling the contrastive logits.", "source": "github-repos"}
{"code": "def list_and_add(a, b):\n    \n    if not isinstance(b, list):\n        b = [b]\n    if not isinstance(a, list):\n        a = [a]\n    return a + b", "docstring": "Concatenate anything into a list.\n\nArgs:\na: the first thing\nb: the second thing\n\nReturns:\nlist. All the things in a list.", "source": "juraj-google-style"}
{"code": "def ParseNumericOption(self, options, name, base=10, default_value=None):\n    \n    numeric_value = getattr(options, name, None)\n    if not numeric_value:\n      return default_value\n\n    try:\n      return int(numeric_value, base)\n\n    except (TypeError, ValueError):\n      name = name.replace('_', ' ')\n      raise errors.BadConfigOption(\n          'Unsupported numeric value {0:s}: {1!s}.'.format(\n              name, numeric_value))", "docstring": "Parses a numeric option.\n\nIf the option is not set the default value is returned.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\nname (str): name of the numeric option.\nbase (Optional[int]): base of the numeric value.\ndefault_value (Optional[object]): default value.\n\nReturns:\nint: numeric value.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "juraj-google-style"}
{"code": "def swap(self, old_chunks, new_chunk):\n    \n    indexes = [self.index(chunk) for chunk in old_chunks]\n    del self[indexes[0]:indexes[-1] + 1]\n    self.insert(indexes[0], new_chunk)", "docstring": "Swaps old consecutive chunks with new chunk.\n\nArgs:\nold_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to\nbe removed.\nnew_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted.", "source": "juraj-google-style"}
{"code": "def __init__(self, model_name: str, *, max_seq_length: Optional[int]=None, **kwargs):\n    if not SentenceTransformer:\n        raise ImportError('sentence-transformers is required to use HuggingfaceTextEmbeddings.Please install it with using `pip install sentence-transformers`.')\n    super().__init__(type_adapter=create_rag_adapter(), **kwargs)\n    self.model_name = model_name\n    self.max_seq_length = max_seq_length\n    self.model_class = SentenceTransformer", "docstring": "Utilizes huggingface SentenceTransformer embeddings for RAG pipeline.\n\nArgs:\nmodel_name: Name of the sentence-transformers model to use\nmax_seq_length: Maximum sequence length for the model\n**kwargs: Additional arguments passed to\n:class:`~apache_beam.ml.transforms.base.EmbeddingsManager`\nconstructor including ModelHandler arguments", "source": "github-repos"}
{"code": "def days_since_last_snowfall(self, value=99):\n    if (value is not None):\n        try:\n            value = int(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type int for field `days_since_last_snowfall`'.format(value))\n    self._days_since_last_snowfall = value", "docstring": "Corresponds to IDD Field `days_since_last_snowfall`\n\nArgs:\nvalue (int): value for IDD Field `days_since_last_snowfall`\nMissing value: 99\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def retry(func):\n    \n    def retried_func(*args, **kwargs):\n        max_tries = 3\n        tries = 0\n        while True:\n            try:\n                resp = func(*args, **kwargs)\n\n            except requests.exceptions.ConnectionError as exc:\n                exc.msg = \"Connection error for session; exiting\"\n                raise exc\n\n            except requests.exceptions.HTTPError as exc:\n                exc.msg = \"HTTP error for session; exiting\"\n                raise exc\n\n            if resp.status_code != 200 and tries < max_tries:\n                logger.warning(\"retrying request; current status code: {}\"\n                               .format(resp.status_code))\n                tries += 1\n                \n                time.sleep(tries ** 2)\n                continue\n\n            break\n\n        if resp.status_code != 200:\n            error_message = resp.json()[\"error\"][\"message\"]\n            logger.error(\"HTTP Error code: {}: {}\".format(resp.status_code, error_message))\n            logger.error(\"Rule payload: {}\".format(kwargs[\"rule_payload\"]))\n            raise requests.exceptions.HTTPError\n\n        return resp\n\n    return retried_func", "docstring": "Decorator to handle API retries and exceptions. Defaults to three retries.\n\nArgs:\nfunc (function): function for decoration\n\nReturns:\ndecorated function", "source": "juraj-google-style"}
{"code": "def read_log(self, logfile):\n    logfile.seek(0)\n    (field_names, _) = self._parse_bro_header(logfile)\n    while 1:\n        _line = next(logfile).strip()\n        if (not _line.startswith('\n            (yield self._cast_dict(dict(zip(field_names, _line.split(self.delimiter)))))\n        else:\n            time.sleep(0.1)\n            break", "docstring": "The read_log method returns a memory efficient generator for rows in a Bro log.\n\nUsage:\nrows = my_bro_reader.read_log(logfile)\nfor row in rows:\ndo something with row\n\nArgs:\nlogfile: The Bro Log file.", "source": "codesearchnet"}
{"code": "def tf_baseline_loss(self, states, internals, reward, update, reference=None):\n    if (self.baseline_mode == 'states'):\n        loss = self.baseline.loss(states=states, internals=internals, reward=reward, update=update, reference=reference)\n    elif (self.baseline_mode == 'network'):\n        loss = self.baseline.loss(states=self.network.apply(x=states, internals=internals, update=update), internals=internals, reward=reward, update=update, reference=reference)\n    regularization_loss = self.baseline.regularization_loss()\n    if (regularization_loss is not None):\n        loss += regularization_loss\n    return loss", "docstring": "Creates the TensorFlow operations for calculating the baseline loss of a batch.\n\nArgs:\nstates: Dict of state tensors.\ninternals: List of prior internal state tensors.\nreward: Reward tensor.\nupdate: Boolean tensor indicating whether this call happens during an update.\nreference: Optional reference tensor(s), in case of a comparative loss.\n\nReturns:\nLoss tensor.", "source": "codesearchnet"}
{"code": "def get_nic(access_token, subscription_id, resource_group, nic_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Network/networkInterfaces/', nic_name,\n                        '?api-version=', NETWORK_API])\n    return do_get(endpoint, access_token)", "docstring": "Get details about a network interface.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nnic_name (str): Name of the NIC.\n\nReturns:\nHTTP response. NIC JSON body.", "source": "juraj-google-style"}
{"code": "def ListChildren(self, urn, limit=None, age=NEWEST_TIME):\n    \n    _, children_urns = list(\n        self.MultiListChildren([urn], limit=limit, age=age))[0]\n    return children_urns", "docstring": "Lists bunch of directories efficiently.\n\nArgs:\nurn: Urn to list children.\nlimit: Max number of children to list.\nage: The age of the items to retrieve. Should be one of ALL_TIMES,\nNEWEST_TIME or a range.\n\nReturns:\nRDFURNs instances of each child.", "source": "juraj-google-style"}
{"code": "def kms_key_arn(kms_client, alias):\n  \n  try:\n    response = kms_client.describe_key(KeyId=alias)\n    key_arn = response[\"KeyMetadata\"][\"Arn\"]\n  except ClientError as error:\n    raise RuntimeError(\"Failed to obtain key arn for alias {}, error: {}\".format(alias, error.response[\"Error\"][\"Message\"]))\n\n  return key_arn", "docstring": "Obtain the full key arn based on the key alias provided\nArgs:\nkms_client (boto3 kms client object): Instantiated kms client object. Usually created through create_aws_clients.\nalias (string): alias of key, example alias/proto0-evs-drm.\n\nReturns:\nstring of the full key arn", "source": "juraj-google-style"}
{"code": "def find_duplicates_in_array(array):\n    \n    duplicates = []\n    non_duplicates = []\n\n    if len(array) != len(set(array)):\n        for item in array:\n            if item not in non_duplicates:\n                non_duplicates.append(item)\n            elif item in non_duplicates and item not in duplicates:\n                duplicates.append(item)\n\n    return duplicates", "docstring": "Runs through the array and returns the elements that contain\nmore than one duplicate\n\nArgs:\narray: The array to check for duplicates.\n\nReturns:\nArray of the elements that are duplicates. Returns empty list if\nthere are no duplicates.", "source": "juraj-google-style"}
{"code": "def _parse_meta_info(self, line):\n    if self.mslevel:\n        self.meta_info['ms_level'] = self.mslevel\n    if self.polarity:\n        self.meta_info['polarity'] = self.polarity\n    for (k, regexes) in six.iteritems(self.meta_regex):\n        for reg in regexes:\n            m = re.search(reg, line, re.IGNORECASE)\n            if m:\n                self.meta_info[k] = m.group(1).strip()", "docstring": "Parse and extract all meta data by looping through the dictionary of meta_info regexs\n\nupdates self.meta_info\n\nArgs:\nline (str): line of the msp file", "source": "codesearchnet"}
{"code": "def add_values_to_bundle_safe(connection, bundle, values):\n    for value in values:\n        try:\n            connection.addValueToBundle(bundle, value)\n        except YouTrackException as e:\n            if (e.response.status == 409):\n                print(('Value with name [ %s ] already exists in bundle [ %s ]' % (utf8encode(value.name), utf8encode(bundle.name))))\n            else:\n                raise e", "docstring": "Adds values to specified bundle. Checks, whether each value already contains in bundle. If yes, it is not added.\n\nArgs:\nconnection: An opened Connection instance.\nbundle: Bundle instance to add values in.\nvalues: Values, that should be added in bundle.\n\nRaises:\nYouTrackException: if something is wrong with queries.", "source": "codesearchnet"}
{"code": "def check(self, dsm, independence_factor=5, **kwargs):\n        \n        \n        least_common_mechanism = False\n        message = ''\n        \n        data = dsm.data\n        categories = dsm.categories\n        dsm_size = dsm.size[0]\n\n        if not categories:\n            categories = ['appmodule'] * dsm_size\n\n        dependent_module_number = []\n        \n        for j in range(0, dsm_size):\n            dependent_module_number.append(0)\n            for i in range(0, dsm_size):\n                if (categories[i] != 'framework' and\n                        categories[j] != 'framework' and\n                        data[i][j] > 0):\n                    dependent_module_number[j] += 1\n        \n        \n        \n        \n        for index, item in enumerate(dsm.categories):\n            if item == 'broker' or item == 'applib':\n                dependent_module_number[index] = 0\n        if max(dependent_module_number) <= dsm_size / independence_factor:\n            least_common_mechanism = True\n        else:\n            maximum = max(dependent_module_number)\n            message = (\n                'Dependencies to %s (%s) > matrix size (%s) / '\n                'independence factor (%s) = %s' % (\n                    dsm.entities[dependent_module_number.index(maximum)],\n                    maximum, dsm_size, independence_factor,\n                    dsm_size / independence_factor))\n\n        return least_common_mechanism, message", "docstring": "Check least common mechanism.\n\nArgs:\ndsm (:class:`DesignStructureMatrix`): the DSM to check.\nindependence_factor (int): if the maximum dependencies for one\nmodule is inferior or equal to the DSM size divided by the\nindependence factor, then this criterion is verified.\n\nReturns:\nbool: True if least common mechanism, else False", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.CreateCompany = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.CompanyService/CreateCompany\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_company__service__pb2.CreateCompanyRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_company__pb2.Company.FromString,\n        )\n        self.GetCompany = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.CompanyService/GetCompany\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_company__service__pb2.GetCompanyRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_company__pb2.Company.FromString,\n        )\n        self.UpdateCompany = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.CompanyService/UpdateCompany\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_company__service__pb2.UpdateCompanyRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_company__pb2.Company.FromString,\n        )\n        self.DeleteCompany = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.CompanyService/DeleteCompany\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_company__service__pb2.DeleteCompanyRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n        self.ListCompanies = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.CompanyService/ListCompanies\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_company__service__pb2.ListCompaniesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_company__service__pb2.ListCompaniesResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def set_configuration_from_input_tensors(self, input_tensors):\n    if len(input_tensors) != self.number_of_tuple_elements:\n        raise ValueError(f'input_tensors is {str(input_tensors)}, but should be a list of {self.number_of_tuple_elements} Tensors')\n    self.set_tuple_shapes([t.shape for t in input_tensors])\n    self.set_tuple_types([t.dtype for t in input_tensors])", "docstring": "Sets the shapes and types of the queue tuple elements.\n\ninput_tensors is a list of Tensors whose types and shapes are used\nto set the queue configuration.\n\nArgs:\ninput_tensors: list of Tensors of the same types and shapes as\nthe desired queue Tuple.\n\nRaises:\nValueError: if input_tensors is not a list of length\nself.number_of_tuple_elements", "source": "github-repos"}
{"code": "def _get_create_query(partition, tablename, include=None):\n        \n        TYPE_MAP = {\n            'int': 'INTEGER',\n            'float': 'REAL',\n            six.binary_type.__name__: 'TEXT',\n            six.text_type.__name__: 'TEXT',\n            'date': 'DATE',\n            'datetime': 'TIMESTAMP WITHOUT TIME ZONE'\n        }\n        columns_types = []\n        if not include:\n            include = []\n        for column in sorted(partition.datafile.reader.columns, key=lambda x: x['pos']):\n            if include and column['name'] not in include:\n                continue\n            sqlite_type = TYPE_MAP.get(column['type'])\n            if not sqlite_type:\n                raise Exception('Do not know how to convert {} to sql column.'.format(column['type']))\n            columns_types.append('    \"{}\" {}'.format(column['name'], sqlite_type))\n        columns_types_str = ',\\n'.join(columns_types)\n        query = 'CREATE TABLE IF NOT EXISTS {}(\\n{})'.format(tablename, columns_types_str)\n        return query", "docstring": "Creates and returns `CREATE TABLE ...` sql statement for given mprows.\n\nArgs:\npartition (orm.Partition):\ntablename (str): name of the table in the return create query.\ninclude (list of str, optional): list of columns to include to query.\n\nReturns:\nstr: create table query.", "source": "juraj-google-style"}
{"code": "def site_coordination_numbers( self ):\n        \n        coordination_numbers = {}\n        for l in self.site_labels:\n            coordination_numbers[ l ] = set( [ len( site.neighbours ) for site in self.sites if site.label is l ] ) \n        return coordination_numbers", "docstring": "Returns a dictionary of the coordination numbers for each site label. e.g.::\n\n{ 'A' : { 4 }, 'B' : { 2, 4 } }\n\nArgs:\nnone\n\nReturns:\ncoordination_numbers (Dict(Str:Set(Int))): dictionary of coordination\nnumbers for each site label.", "source": "juraj-google-style"}
{"code": "def make_subdivision_matrices(degree):\n    left = np.zeros(((degree + 1), (degree + 1)), order='F')\n    right = np.zeros(((degree + 1), (degree + 1)), order='F')\n    left[(0, 0)] = 1.0\n    right[((- 1), (- 1))] = 1.0\n    for col in six.moves.xrange(1, (degree + 1)):\n        half_prev = (0.5 * left[(:col, (col - 1))])\n        left[(:col, col)] = half_prev\n        left[(1:(col + 1), col)] += half_prev\n        complement = (degree - col)\n        right[((- (col + 1)):, complement)] = left[(:(col + 1), col)]\n    return (left, right)", "docstring": "Make the matrix used to subdivide a curve.\n\n.. note::\n\nThis is a helper for :func:`_subdivide_nodes`. It does not have a\nFortran speedup because it is **only** used by a function which has\na Fortran speedup.\n\nArgs:\ndegree (int): The degree of the curve.\n\nReturns:\nTuple[numpy.ndarray, numpy.ndarray]: The matrices used to convert\nthe nodes into left and right nodes, respectively.", "source": "codesearchnet"}
{"code": "def register_rml_def(self, location_type, location, filename=None, **kwargs):\n    if (location_type == 'directory'):\n        self.register_directory(location, **kwargs)\n    elif (location_type == 'filepath'):\n        if (not os.path.exists(location)):\n            raise OSError('File not found', location)\n        if os.path.isfile(location):\n            self.register_rml(location)\n        elif filename:\n            new_loc = os.path.join(location, filename)\n            if (not os.path.exists(new_loc)):\n                raise OSError('File not found', new_loc)\n            elif os.path.isfile(new_loc):\n                self.register_rml(new_loc)\n        else:\n            raise OSError('File not found', location)\n    elif location_type.startswith('package'):\n        pkg_path = importlib.util.find_spec(location).submodule_search_locations[0]\n        if location_type.endswith('_all'):\n            self.register_directory(pkg_path, **kwargs)\n        elif location_type.endswith('_file'):\n            filepath = os.path.join(pkg_path, filename)\n            self.register_rml(filepath, **kwargs)\n        else:\n            raise NotImplementedError", "docstring": "Registers the rml file locations for easy access\n\nArgs:\n-----\nlocation_type: ['package_all',\n'package_file',\n'directory',\n'filepath']\nlocation: The correlated location string based on the location_type\nfilename: Optional, associated with 'package_file' location_type\n\nkwargs:\n-------\ninclude_subfolders: Boolean", "source": "codesearchnet"}
{"code": "def BuildTypeDescriptor(self, value_cls):\n    result = ApiRDFValueDescriptor(name=value_cls.__name__, parents=[klass.__name__ for klass in value_cls.__mro__], doc=(value_cls.__doc__ or ''), kind='PRIMITIVE')\n    result.default = self.BuildDefaultValue(value_cls)\n    return result", "docstring": "Renders metadata of a given value class.\n\nArgs:\nvalue_cls: Metadata of this class will be rendered. This class has to be\n(or to be a subclass of) a self.value_class (i.e. a class that this\nrenderer is capable of rendering).\n\nReturns:\nDictionary with class metadata.", "source": "codesearchnet"}
{"code": "def get_container_service(access_token, subscription_id, resource_group, service_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerService/ContainerServices/', service_name, '?api-version=', ACS_API])\n    return do_get(endpoint, access_token)", "docstring": "Get details about an Azure Container Server\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nservice_name (str): Name of container service.\n\nReturns:\nHTTP response. JSON model.", "source": "codesearchnet"}
{"code": "def _resource_capture_helper(self, tensor):\n    assert tensor.dtype == dtypes.resource\n    forward_graph_input_names = [t.name for t in self._forward_graph.inputs]\n    forward_graph_name_to_opdef = {op.name: op.node_def for op in self._forward_graph.get_operations()}\n    index = util.resource_input_index(tensor.name, forward_graph_input_names, forward_graph_name_to_opdef, self._forward_graph._functions)\n    input_placeholder = self._forward_graph.inputs[index]\n    tensor_in_outer_graph = self._forward_graph._while.inputs[index]\n    assert input_placeholder.dtype == dtypes.resource\n    assert tensor_in_outer_graph.dtype == dtypes.resource\n    if index != util.resource_input_index(self._forward_graph.outputs[index].name, forward_graph_input_names, forward_graph_name_to_opdef, self._forward_graph._functions):\n        raise AssertionError(f'Resource tensors must be loop invariants {tensor_in_outer_graph}')\n    self._indirect_captures[ops.tensor_id(tensor)] = self.capture(tensor_in_outer_graph)\n    return self._indirect_captures[ops.tensor_id(tensor)]", "docstring": "Returns the captured resource tensor.\n\nResource-type tensors are not accumulated. If a resource tensor exists in\nthe loop body it must either be a loop input or an output of a nested While\nop inside the loop body which had captured the external resource.\n\nArgs:\ntensor: the external resource Tensor to be captured.\n\nReturns:\nTensor in this graph.", "source": "github-repos"}
{"code": "def cancelOrder(self, order: Order) -> Trade:\n        \n        self.client.cancelOrder(order.orderId)\n        now = datetime.datetime.now(datetime.timezone.utc)\n        key = self.wrapper.orderKey(\n            order.clientId, order.orderId, order.permId)\n        trade = self.wrapper.trades.get(key)\n        if trade:\n            if not trade.isDone():\n                status = trade.orderStatus.status\n                if (status == OrderStatus.PendingSubmit and not order.transmit\n                        or status == OrderStatus.Inactive):\n                    newStatus = OrderStatus.Cancelled\n                else:\n                    newStatus = OrderStatus.PendingCancel\n                logEntry = TradeLogEntry(now, newStatus, '')\n                trade.log.append(logEntry)\n                trade.orderStatus.status = newStatus\n                self._logger.info(f'cancelOrder: {trade}')\n                trade.cancelEvent.emit(trade)\n                trade.statusEvent.emit(trade)\n                self.cancelOrderEvent.emit(trade)\n                self.orderStatusEvent.emit(trade)\n                if newStatus == OrderStatus.Cancelled:\n                    trade.cancelledEvent.emit(trade)\n        else:\n            self._logger.error(f'cancelOrder: Unknown orderId {order.orderId}')\n        return trade", "docstring": "Cancel the order and return the Trade it belongs to.\n\nArgs:\norder: The order to be canceled.", "source": "juraj-google-style"}
{"code": "def document(self, document_id=None):\n    if (document_id is None):\n        document_id = _auto_id()\n    child_path = (self._path + (document_id,))\n    return self._client.document(*child_path)", "docstring": "Create a sub-document underneath the current collection.\n\nArgs:\ndocument_id (Optional[str]): The document identifier\nwithin the current collection. If not provided, will default\nto a random 20 character string composed of digits,\nuppercase and lowercase and letters.\n\nReturns:\n~.firestore_v1beta1.document.DocumentReference: The child\ndocument.", "source": "codesearchnet"}
{"code": "def get_ytvideos(query, ilogger):\n    queue = []\n    search_result = ytdiscoveryapi.search().list(q=query, part='id,snippet', maxResults=1, type='video,playlist').execute()\n    if (not search_result['items']):\n        return []\n    title = search_result['items'][0]['snippet']['title']\n    ilogger.info('Queueing {}'.format(title))\n    if (search_result['items'][0]['id']['kind'] == 'youtube\n        videoid = search_result['items'][0]['id']['videoId']\n        queue.append(['https:\n    elif (search_result['items'][0]['id']['kind'] == 'youtube\n        queue = get_queue_from_playlist(search_result['items'][0]['id']['playlistId'])\n    return queue", "docstring": "Gets either a list of videos from a playlist or a single video, using the\nfirst result of a YouTube search\n\nArgs:\nquery (str): The YouTube search query\nilogger (logging.logger): The logger to log API calls to\n\nReturns:\nqueue (list): The items obtained from the YouTube search", "source": "codesearchnet"}
{"code": "def from_deformation(cls, deformation):\n    dfm = Deformation(deformation)\n    return cls((0.5 * (np.dot(dfm.trans, dfm) - np.eye(3))))", "docstring": "Factory method that returns a Strain object from a deformation\ngradient\n\nArgs:\ndeformation (3x3 array-like):", "source": "codesearchnet"}
{"code": "def fit(self, X, y):\n        \n        self._word_vocab.add_documents(X)\n        self._label_vocab.add_documents(y)\n        if self._use_char:\n            for doc in X:\n                self._char_vocab.add_documents(doc)\n\n        self._word_vocab.build()\n        self._char_vocab.build()\n        self._label_vocab.build()\n\n        return self", "docstring": "Learn vocabulary from training set.\n\nArgs:\nX : iterable. An iterable which yields either str, unicode or file objects.\n\nReturns:\nself : IndexTransformer.", "source": "juraj-google-style"}
{"code": "def to_obj(self, ns_info=None):\n    if ns_info:\n        ns_info.collect(self)\n    if (not hasattr(self, '_binding_class')):\n        return None\n    entity_obj = self._binding_class()\n    for (field, val) in six.iteritems(self._fields):\n        if (isinstance(val, EntityList) and (len(val) == 0)):\n            val = None\n        elif field.multiple:\n            if val:\n                val = [_objectify(field, x, ns_info) for x in val]\n            else:\n                val = []\n        else:\n            val = _objectify(field, val, ns_info)\n        setattr(entity_obj, field.name, val)\n    self._finalize_obj(entity_obj)\n    return entity_obj", "docstring": "Convert to a GenerateDS binding object.\n\nSubclasses can override this function.\n\nReturns:\nAn instance of this Entity's ``_binding_class`` with properties\nset from this Entity.", "source": "codesearchnet"}
{"code": "def __use_cache__(self, cache):\n        \n        \n        try:\n            cache_mod = os.path.getmtime(self.cache_filepath)\n        except FileNotFoundError:\n            return False\n        last_file_mod = sorted( \\\n                self.conn.mgr.loaded_times.values())[-1].timestamp()\n        if last_file_mod > cache_mod:\n            return False\n        curr_load = set(self.conn.mgr.loaded)\n        \n        try:\n            with open(self.loaded_filepath, \"r\") as fo:\n                loaded_files = set(json.loads(fo.read()))\n            if curr_load != loaded_files:\n                return False\n        except FileNotFoundError:\n            return False\n        \n        return cache", "docstring": "checks for changes in the vocabulary and mod times of the files\nto see if the cache should be used.\n\nArgs:\ncache: the kwarg passed in to use the cache during __init__\n\nReturns:\nBool: True = use the cache files\nFalse = requery the triplestore", "source": "juraj-google-style"}
{"code": "def _example_from_definition(self, prop_spec):\n    definition_name = self.get_definition_name_from_ref(prop_spec['$ref'])\n    if self.build_one_definition_example(definition_name):\n        example_dict = self.definitions_example[definition_name]\n        if (not isinstance(example_dict, dict)):\n            return example_dict\n        example = dict(((example_name, example_value) for (example_name, example_value) in example_dict.items()))\n        return example", "docstring": "Get an example from a property specification linked to a definition.\n\nArgs:\nprop_spec: specification of the property you want an example of.\n\nReturns:\nAn example.", "source": "codesearchnet"}
{"code": "def _make_cluster_def(self):\n    self._cluster_def = cluster_pb2.ClusterDef()\n    for job_name, tasks in sorted(self._cluster_spec.items()):\n        try:\n            job_name = compat.as_bytes(job_name)\n        except TypeError:\n            raise TypeError('Job name %r must be bytes or unicode' % job_name)\n        job_def = self._cluster_def.job.add()\n        job_def.name = job_name\n        for i, task_address in sorted(tasks.items()):\n            try:\n                task_address = compat.as_bytes(task_address)\n            except TypeError:\n                raise TypeError('Task address %r must be bytes or unicode' % task_address)\n            job_def.tasks[i] = task_address", "docstring": "Creates a `tf.train.ClusterDef` based on the given `cluster_spec`.\n\nRaises:\nTypeError: If `cluster_spec` is not a dictionary mapping strings to lists\nof strings.", "source": "github-repos"}
{"code": "def from_json_file(cls, json_file: Union[str, os.PathLike]):\n    with open(json_file, encoding='utf-8') as reader:\n        text = reader.read()\n    image_processor_dict = json.loads(text)\n    return cls(**image_processor_dict)", "docstring": "Instantiates a image processor of type [`~image_processing_utils.ImageProcessingMixin`] from the path to a JSON\nfile of parameters.\n\nArgs:\njson_file (`str` or `os.PathLike`):\nPath to the JSON file containing the parameters.\n\nReturns:\nA image processor of type [`~image_processing_utils.ImageProcessingMixin`]: The image_processor object\ninstantiated from that JSON file.", "source": "github-repos"}
{"code": "def _get_addresses(tx):\n        \n        from_address = set([vin['address'] for vin in tx['vins']])\n        if len(from_address) != 1:\n            raise InvalidTransactionError(\"Transaction should have inputs \" \\\n                                          \"from only one address {}\".format(from_address))\n\n        \n        vouts = sorted(tx['vouts'], key=lambda d: d['n'])[:-1]\n        piece_address = vouts[0]['address']\n        to_address = vouts[-1]['address']\n        from_address = from_address.pop()\n\n        return from_address, to_address, piece_address", "docstring": "Checks for the from, to, and piece address of a SPOOL transaction.\n\nArgs:\ntx (dict): Transaction payload, as returned by\n:meth:`transactions.Transactions.get()`.\n\n.. note:: Formats as returned by JSON-RPC API\n``decoderawtransaction`` have yet to be supported.\n\nReturns:\nTuple([str]): Sender, receiver, and piece addresses.", "source": "juraj-google-style"}
{"code": "def _cleanup_workflow(config, task_id, args, **kwargs):\n    from lightflow.models import Workflow\n    if isinstance(args[0], Workflow):\n        if (config.celery['result_expires'] == 0):\n            AsyncResult(task_id).forget()", "docstring": "Cleanup the results of a workflow when it finished.\n\nConnects to the postrun signal of Celery. If the signal was sent by a workflow,\nremove the result from the result backend.\n\nArgs:\ntask_id (str): The id of the task.\nargs (tuple): The arguments the task was started with.\n**kwargs: Keyword arguments from the hook.", "source": "codesearchnet"}
{"code": "def placeholder_symbol_table(name, version, max_id):\n    if (version <= 0):\n        raise ValueError(('Version must be grater than or equal to 1: %s' % version))\n    if (max_id < 0):\n        raise ValueError(('Max ID must be zero or positive: %s' % max_id))\n    return SymbolTable(table_type=SHARED_TABLE_TYPE, symbols=repeat(None, max_id), name=name, version=version, is_substitute=True)", "docstring": "Constructs a shared symbol table that consists symbols that all have no known text.\n\nThis is generally used for cases where a shared symbol table is not available by the\napplication.\n\nArgs:\nname (unicode): The name of the shared symbol table.\nversion (int): The version of the shared symbol table.\nmax_id (int): The maximum ID allocated by this symbol table, must be ``>= 0``\n\nReturns:\nSymbolTable: The synthesized table.", "source": "codesearchnet"}
{"code": "def edgelist_to_adjacency(edgelist):\n    adjacency = dict()\n    for (u, v) in edgelist:\n        if (u in adjacency):\n            adjacency[u].add(v)\n        else:\n            adjacency[u] = {v}\n        if (v in adjacency):\n            adjacency[v].add(u)\n        else:\n            adjacency[v] = {u}\n    return adjacency", "docstring": "Converts an iterator of edges to an adjacency dict.\n\nArgs:\nedgelist (iterable):\nAn iterator over 2-tuples where each 2-tuple is an edge.\n\nReturns:\ndict: The adjacency dict. A dict of the form {v: Nv, ...} where v is a node in a graph and\nNv is the neighbors of v as an set.", "source": "codesearchnet"}
{"code": "def __init__(self, service):\n        \n        if not isinstance(service, sm_messages.Service):\n            raise ValueError(u'service should be an instance of Service')\n        if not service.name:\n            raise ValueError(u'Bad service: the name is missing')\n\n        self._service = service  \n        self._extracted_methods = {}  \n\n        self._auth_infos = self._extract_auth_config()\n        self._quota_infos = self._extract_quota_config()\n\n        \n        self._templates_method_infos = collections.defaultdict(list)\n        self._extract_methods()", "docstring": "Constructor.\n\nArgs:\nservice (:class:`endpoints_management.gen.servicemanagement_v1_messages.Service`):\na service instance", "source": "juraj-google-style"}
{"code": "def inference(cluster_info, feed_timeout=600, qname='input'):\n\n    def _inference(iter):\n        mgr = _get_manager(cluster_info, util.get_ip_address(), util.read_executor_id())\n        try:\n            queue_in = mgr.get_queue(qname)\n            equeue = mgr.get_queue('error')\n        except (AttributeError, KeyError):\n            msg = \"Queue '{}' not found on this node, check for exceptions on other nodes.\".format(qname)\n            raise Exception(msg)\n        logging.info('Feeding partition {0} into {1} queue {2}'.format(iter, qname, queue_in))\n        count = 0\n        for item in iter:\n            count += 1\n            queue_in.put(item, block=True)\n        queue_in.put(marker.EndPartition())\n        if (count == 0):\n            return []\n        joinThr = Thread(target=queue_in.join)\n        joinThr.start()\n        timeout = feed_timeout\n        while joinThr.isAlive():\n            if (not equeue.empty()):\n                e_str = equeue.get()\n                equeue.task_done()\n                raise Exception(('exception in worker:\\n' + e_str))\n            time.sleep(1)\n            timeout -= 1\n            if (timeout <= 0):\n                raise Exception('Timeout while feeding partition')\n        logging.info('Processed {0} items in partition'.format(count))\n        results = []\n        queue_out = mgr.get_queue('output')\n        while (count > 0):\n            result = queue_out.get(block=True)\n            results.append(result)\n            count -= 1\n            queue_out.task_done()\n        logging.info('Finished processing partition')\n        return results\n    return _inference", "docstring": "Feeds Spark partitions into the shared multiprocessing.Queue and returns inference results.\n\nArgs:\n:cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc)\n:feed_timeout: number of seconds after which data feeding times out (600 sec default)\n:qname: *INTERNAL_USE*\n\nReturns:\nA dataRDD.mapPartitions() function", "source": "codesearchnet"}
{"code": "def load_exons(adapter, exon_lines, build='37', ensembl_genes=None):\n    \n    \n    ensembl_genes = ensembl_genes or adapter.ensembl_genes(build)\n    hgnc_id_transcripts = adapter.id_transcripts_by_gene(build=build)\n    \n    if isinstance(exon_lines, DataFrame):\n        exons = parse_ensembl_exon_request(exon_lines)\n        nr_exons = exon_lines.shape[0]\n    else:\n        exons = parse_ensembl_exons(exon_lines)\n        nr_exons = 1000000\n    \n    start_insertion = datetime.now()\n    loaded_exons = 0\n    LOG.info(\"Loading exons...\")\n    with progressbar(exons, label=\"Loading exons\", length=nr_exons) as bar:\n        for exon in bar:\n            ensg_id = exon['gene']\n            enst_id = exon['transcript']\n            gene_obj = ensembl_genes.get(ensg_id)\n            \n            if not gene_obj:\n                continue\n            \n            hgnc_id = gene_obj['hgnc_id']\n\n            if not enst_id in hgnc_id_transcripts[hgnc_id]:\n                continue\n\n            exon['hgnc_id'] = hgnc_id\n\n            exon_obj = build_exon(exon, build)\n            adapter.load_exon(exon_obj)\n            loaded_exons += 1\n\n    LOG.info('Number of exons in build {0}: {1}'.format(build, nr_exons))\n    LOG.info('Number loaded: {0}'.format(loaded_exons))\n    LOG.info('Time to load exons: {0}'.format(datetime.now() - start_insertion))", "docstring": "Load all the exons\n\nTranscript information is from ensembl.\nCheck that the transcript that the exon belongs to exists in the database\n\nArgs:\nadapter(MongoAdapter)\nexon_lines(iterable): iterable with ensembl exon lines\nbuild(str)\nensembl_transcripts(dict): Existing ensembl transcripts", "source": "juraj-google-style"}
{"code": "def parse_args(args):\n    \n    parser = argparse.ArgumentParser(\n        description=\"Imports GramVaani data for Deep Speech\"\n    )\n    parser.add_argument(\n        \"--version\",\n        action=\"version\",\n        version=\"GramVaaniImporter {ver}\".format(ver=__version__),\n    )\n    parser.add_argument(\n        \"-v\",\n        \"--verbose\",\n        action=\"store_const\",\n        required=False,\n        help=\"set loglevel to INFO\",\n        dest=\"loglevel\",\n        const=logging.INFO,\n    )\n    parser.add_argument(\n        \"-vv\",\n        \"--very-verbose\",\n        action=\"store_const\",\n        required=False,\n        help=\"set loglevel to DEBUG\",\n        dest=\"loglevel\",\n        const=logging.DEBUG,\n    )\n    parser.add_argument(\n        \"-c\",\n        \"--csv_filename\",\n        required=True,\n        help=\"Path to the GramVaani csv\",\n        dest=\"csv_filename\",\n    )\n    parser.add_argument(\n        \"-t\",\n        \"--target_dir\",\n        required=True,\n        help=\"Directory in which to save the importer GramVaani data\",\n        dest=\"target_dir\",\n    )\n    return parser.parse_args(args)", "docstring": "Parse command line parameters\nArgs:\nargs ([str]): Command line parameters as list of strings\nReturns:\n:obj:`argparse.Namespace`: command line parameters namespace", "source": "juraj-google-style"}
{"code": "def _sync_to_uri(self, uri):\n        \n        cmd_cp = 'aws s3 cp {} {} --recursive --profile {}'.format(self.s3_version_uri, uri, self.env)\n        \n        cmd_sync = 'aws s3 sync {} {} --delete --exact-timestamps --profile {}'.format(\n            self.s3_version_uri, uri, self.env)\n\n        cp_result = subprocess.run(cmd_cp, check=True, shell=True, stdout=subprocess.PIPE)\n        LOG.debug(\"Copy to %s before sync output: %s\", uri, cp_result.stdout)\n        LOG.info(\"Copied version %s to %s\", self.version, uri)\n\n        sync_result = subprocess.run(cmd_sync, check=True, shell=True, stdout=subprocess.PIPE)\n        LOG.debug(\"Sync to %s command output: %s\", uri, sync_result.stdout)\n        LOG.info(\"Synced version %s to %s\", self.version, uri)", "docstring": "Copy and sync versioned directory to uri in S3.\n\nArgs:\nuri (str): S3 URI to sync version to.", "source": "juraj-google-style"}
{"code": "def add_timeout_arg(a_func, timeout, **kwargs):\n    \n\n    def inner(*args):\n        \n        updated_args = args + (timeout,)\n        return a_func(*updated_args, **kwargs)\n\n    return inner", "docstring": "Updates a_func so that it gets called with the timeout as its final arg.\n\nThis converts a callable, a_func, into another callable with an additional\npositional arg.\n\nArgs:\na_func (callable): a callable to be updated\ntimeout (int): to be added to the original callable as it final positional\narg.\nkwargs: Addtional arguments passed through to the callable.\n\nReturns:\ncallable: the original callable updated to the timeout arg", "source": "juraj-google-style"}
{"code": "def __init__(self, object_type: str, subscriber: str,\n                 callback_handler: Callable = None):\n        \n        self._queue = DB.pub_sub()\n        if callback_handler is None:\n            self._queue.subscribe(object_type)\n        else:\n            self._queue.subscribe(**{object_type: callback_handler})\n        self._pub_key = _keys.published(object_type, subscriber)\n        self._data_key = _keys.data(object_type, subscriber)\n        self._processed_key = _keys.processed_events(object_type, subscriber)\n        self._object_type = object_type\n        self._subscriber = subscriber", "docstring": "Initialise the event queue.\n\nSubscribes to Redis pub/sub events of the given object type.\n\nArgs:\nobject_type (str): Object type\nsubscriber (str): Subscriber name", "source": "juraj-google-style"}
{"code": "def __init__(self, env, directory, collect_freq=1, flush_freq=100):\n        \n        super().__init__(env)\n\n        \n        self.directory = directory\n\n        \n        self.states = []\n        self.action_infos = []  \n\n        \n        self.collect_freq = collect_freq\n\n        \n        self.flush_freq = flush_freq\n\n        if not os.path.exists(directory):\n            print(\"DataCollectionWrapper: making new directory at {}\".format(directory))\n            os.makedirs(directory)\n\n        \n        self.ep_directory = None\n\n        \n        self.has_interaction = False", "docstring": "Initializes the data collection wrapper.\n\nArgs:\nenv: The environment to monitor.\ndirectory: Where to store collected data.\ncollect_freq: How often to save simulation state, in terms of environment steps.\nflush_freq: How frequently to dump data to disk, in terms of environment steps.", "source": "juraj-google-style"}
{"code": "def objects_copy(self, source_bucket, source_key, target_bucket, target_key):\n    \n    url = Api._ENDPOINT + (Api._OBJECT_COPY_PATH % (source_bucket, Api._escape_key(source_key),\n                                                    target_bucket, Api._escape_key(target_key)))\n    return datalab.utils.Http.request(url, method='POST', credentials=self._credentials)", "docstring": "Updates the metadata associated with an object.\n\nArgs:\nsource_bucket: the name of the bucket containing the source object.\nsource_key: the key of the source object being copied.\ntarget_bucket: the name of the bucket that will contain the copied object.\ntarget_key: the key of the copied object.\nReturns:\nA parsed object information dictionary.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"}
{"code": "def RemoveObject(self, identifier):\n    \n    if identifier not in self._values:\n      raise KeyError('Missing cached object for identifier: {0:s}'.format(\n          identifier))\n\n    del self._values[identifier]", "docstring": "Removes a cached object based on the identifier.\n\nThis method ignores the cache value reference count.\n\nArgs:\nidentifier (str): VFS object identifier.\n\nRaises:\nKeyError: if the VFS object is not found in the cache.", "source": "juraj-google-style"}
{"code": "def SetActiveBreakpoints(self, breakpoints_data):\n    \n    with self._lock:\n      ids = set([x['id'] for x in breakpoints_data])\n\n      \n      for breakpoint_id in six.viewkeys(self._active) - ids:\n        self._active.pop(breakpoint_id).Clear()\n\n      \n      self._active.update([\n          (x['id'],\n           python_breakpoint.PythonBreakpoint(\n               x,\n               self._hub_client,\n               self,\n               self.data_visibility_policy))\n          for x in breakpoints_data\n          if x['id'] in ids - six.viewkeys(self._active) - self._completed])\n\n      \n      \n      \n      \n      self._completed &= ids\n\n      if self._active:\n        self._next_expiration = datetime.min  \n      else:\n        self._next_expiration = datetime.max", "docstring": "Adds new breakpoints and removes missing ones.\n\nArgs:\nbreakpoints_data: updated list of active breakpoints.", "source": "juraj-google-style"}
{"code": "def update_remote_archive(self, save_uri, timeout=(- 1)):\n    return self._client.update_with_zero_body(uri=save_uri, timeout=timeout)", "docstring": "Saves a backup of the appliance to a previously-configured remote location.\n\nArgs:\nsave_uri (dict): The URI for saving the backup to a previously configured location.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.\n\nReturns:\ndict: Backup details.", "source": "codesearchnet"}
{"code": "def to_hg_scheme_url(cls, url):\n        \n        regexes = cls._get_url_scheme_regexes()\n        for scheme_key, pattern, regex in regexes:\n            match = regex.match(url)\n            if match is not None:\n                groups = match.groups()\n                if len(groups) == 2:\n                    return u''.join(\n                        scheme_key,\n                        ':\n                        pattern.replace('{1}', groups[0]),\n                        groups[1])\n                elif len(groups) == 1:\n                    return u''.join(\n                        scheme_key,\n                        ':\n                        pattern,\n                        groups[0])", "docstring": "Convert a URL to local mercurial URL schemes\n\nArgs:\nurl (str): URL to map to local mercurial URL schemes\n\nexample::\n\n# schemes.gh = git://github.com/\n>> remote_url = git://github.com/westurner/dotfiles'\n>> to_hg_scheme_url(remote_url)\n<< gh://westurner/dotfiles", "source": "juraj-google-style"}
{"code": "def send_location(self, room_id, geo_uri, name, thumb_url=None, thumb_info=None,\n                      timestamp=None):\n        \n        content_pack = {\n            \"geo_uri\": geo_uri,\n            \"msgtype\": \"m.location\",\n            \"body\": name,\n        }\n        if thumb_url:\n            content_pack[\"thumbnail_url\"] = thumb_url\n        if thumb_info:\n            content_pack[\"thumbnail_info\"] = thumb_info\n\n        return self.send_message_event(room_id, \"m.room.message\", content_pack,\n                                       timestamp=timestamp)", "docstring": "Send m.location message event\n\nArgs:\nroom_id (str): The room ID to send the event in.\ngeo_uri (str): The geo uri representing the location.\nname (str): Description for the location.\nthumb_url (str): URL to the thumbnail of the location.\nthumb_info (dict): Metadata about the thumbnail, type ImageInfo.\ntimestamp (int): Set origin_server_ts (For application services only)", "source": "juraj-google-style"}
{"code": "def parse_rule(cls, txt):\n        \n        types = {\"glob\": GlobRule,\n                 \"regex\": RegexRule,\n                 \"range\": RangeRule,\n                 \"before\": TimestampRule,\n                 \"after\": TimestampRule}\n\n        \n        label, txt = Rule._parse_label(txt)\n        if label is None:\n            if '*' in txt:\n                label = \"glob\"\n            else:\n                label = \"range\"\n        elif label not in types:\n            raise ConfigurationError(\n                \"'%s' is not a valid package filter type\" % label)\n\n        rule_cls = types[label]\n        txt_ = \"%s(%s)\" % (label, txt)\n\n        try:\n            rule = rule_cls._parse(txt_)\n        except Exception as e:\n            raise ConfigurationError(\"Error parsing package filter '%s': %s: %s\"\n                                     % (txt_, e.__class__.__name__, str(e)))\n        return rule", "docstring": "Parse a rule from a string.\n\nSee rezconfig.package_filter for an overview of valid strings.\n\nArgs:\ntxt (str): String to parse.\n\nReturns:\n`Rule` instance.", "source": "juraj-google-style"}
{"code": "def on_train_begin(self, logs=None):\n    logs = self._process_logs(logs)\n    for callback in self.callbacks:\n        callback.on_train_begin(logs)", "docstring": "Calls the `on_train_begin` methods of its callbacks.\n\nArgs:\nlogs: Dict. Currently no data is passed to this argument for this method\nbut that may change in the future.", "source": "github-repos"}
{"code": "def parsed_forensic_reports_to_csv(reports):\n    fields = ['feedback_type', 'user_agent', 'version', 'original_envelope_id', 'original_mail_from', 'original_rcpt_to', 'arrival_date', 'arrival_date_utc', 'subject', 'message_id', 'authentication_results', 'dkim_domain', 'source_ip_address', 'source_country', 'source_reverse_dns', 'source_base_domain', 'delivery_result', 'auth_failure', 'reported_domain', 'authentication_mechanisms', 'sample_headers_only']\n    if (type(reports) == OrderedDict):\n        reports = [reports]\n    csv_file = StringIO()\n    csv_writer = DictWriter(csv_file, fieldnames=fields)\n    csv_writer.writeheader()\n    for report in reports:\n        row = report.copy()\n        row['source_ip_address'] = report['source']['ip_address']\n        row['source_reverse_dns'] = report['source']['reverse_dns']\n        row['source_base_domain'] = report['source']['base_domain']\n        row['source_country'] = report['source']['country']\n        del row['source']\n        row['subject'] = report['parsed_sample']['subject']\n        row['auth_failure'] = ','.join(report['auth_failure'])\n        authentication_mechanisms = report['authentication_mechanisms']\n        row['authentication_mechanisms'] = ','.join(authentication_mechanisms)\n        del row['sample']\n        del row['parsed_sample']\n        csv_writer.writerow(row)\n    return csv_file.getvalue()", "docstring": "Converts one or more parsed forensic reports to flat CSV format, including\nheaders\n\nArgs:\nreports: A parsed forensic report or list of parsed forensic reports\n\nReturns:\nstr: Parsed forensic report data in flat CSV format, including headers", "source": "codesearchnet"}
{"code": "def rescale(image: np.ndarray, scale: float, data_format: Optional[ChannelDimension]=None, dtype: np.dtype=np.float32, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n    if not isinstance(image, np.ndarray):\n        raise TypeError(f'Input image must be of type np.ndarray, got {type(image)}')\n    rescaled_image = image.astype(np.float64) * scale\n    if data_format is not None:\n        rescaled_image = to_channel_dimension_format(rescaled_image, data_format, input_data_format)\n    rescaled_image = rescaled_image.astype(dtype)\n    return rescaled_image", "docstring": "Rescales `image` by `scale`.\n\nArgs:\nimage (`np.ndarray`):\nThe image to rescale.\nscale (`float`):\nThe scale to use for rescaling the image.\ndata_format (`ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ndtype (`np.dtype`, *optional*, defaults to `np.float32`):\nThe dtype of the output image. Defaults to `np.float32`. Used for backwards compatibility with feature\nextractors.\ninput_data_format (`ChannelDimension`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred from the input image.\n\nReturns:\n`np.ndarray`: The rescaled image.", "source": "github-repos"}
{"code": "def automatic_density_by_vol(structure, kppvol, force_gamma=False):\n    vol = structure.lattice.reciprocal_lattice.volume\n    kppa = ((kppvol * vol) * structure.num_sites)\n    return Kpoints.automatic_density(structure, kppa, force_gamma=force_gamma)", "docstring": "Returns an automatic Kpoint object based on a structure and a kpoint\ndensity per inverse Angstrom^3 of reciprocal cell.\n\nAlgorithm:\nSame as automatic_density()\n\nArgs:\nstructure (Structure): Input structure\nkppvol (int): Grid density per Angstrom^(-3) of reciprocal cell\nforce_gamma (bool): Force a gamma centered mesh\n\nReturns:\nKpoints", "source": "codesearchnet"}
{"code": "def gini(y, p):\n    \n\n    \n    assert y.shape == p.shape\n\n    n_samples = y.shape[0]\n\n    \n    \n    arr = np.array([y, p]).transpose()\n    true_order = arr[arr[:,0].argsort()][::-1,0]\n    pred_order = arr[arr[:,1].argsort()][::-1,0]\n\n    \n    l_true = np.cumsum(true_order) / np.sum(true_order)\n    l_pred = np.cumsum(pred_order) / np.sum(pred_order)\n    l_ones = np.linspace(1/n_samples, 1, n_samples)\n\n    \n    g_true = np.sum(l_ones - l_true)\n    g_pred = np.sum(l_ones - l_pred)\n\n    \n    return g_pred / g_true", "docstring": "Normalized Gini Coefficient.\n\nArgs:\ny (numpy.array): target\np (numpy.array): prediction\n\nReturns:\ne (numpy.float64): normalized Gini coefficient", "source": "juraj-google-style"}
{"code": "def _add_train_op(self, train_op):\n    if train_op is not None:\n        if not isinstance(train_op, tensor.Tensor) and (not isinstance(train_op, ops.Operation)):\n            raise TypeError(f'`train_op` {train_op} needs to be a Tensor or Op.')\n        ops.add_to_collection(constants.TRAIN_OP_KEY, train_op)", "docstring": "Add train op to the SavedModel.\n\nNote that this functionality is in development, and liable to be\nmoved elsewhere.\n\nArgs:\ntrain_op: Op or group of ops that are used for training. These are stored\nas a collection with key TRAIN_OP_KEY, but not executed.\n\nRaises:\nTypeError if Train op is not of type `Operation`.", "source": "github-repos"}
{"code": "def pipeline(gcp_project_id: str, region: str, component_artifact_root: str, dataflow_staging_root: str, beam_runner: str):\n    ingest_data_task = DataIngestOp(base_artifact_path=component_artifact_root)\n    data_preprocessing_task = DataPreprocessingOp(ingested_dataset_path=ingest_data_task.outputs['ingested_dataset_path'], base_artifact_path=component_artifact_root, gcp_project_id=gcp_project_id, region=region, dataflow_staging_root=dataflow_staging_root, beam_runner=beam_runner)\n    train_model_task = TrainModelOp(preprocessed_dataset_path=data_preprocessing_task.outputs['preprocessed_dataset_path'], base_artifact_path=component_artifact_root)", "docstring": "KFP pipeline definition.\n\nArgs:\ngcp_project_id (str): ID for the google cloud project to deploy the pipeline to.\nregion (str): Region in which to deploy the pipeline.\ncomponent_artifact_root (str): Path to artifact repository where Kubeflow Pipelines\ncomponents can store artifacts.\ndataflow_staging_root (str): Path to staging directory for the dataflow runner.\nbeam_runner (str): Beam runner: DataflowRunner or DirectRunner.", "source": "github-repos"}
{"code": "def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices, **kwargs):\n    summed_grad, unique_indices = _deduplicate_indexed_slices(values=grad, indices=indices)\n    return self._resource_apply_sparse(summed_grad, handle, unique_indices, **kwargs)", "docstring": "Add ops to apply sparse gradients to `handle`, with repeated indices.\n\nOptimizers which override this method must deal with repeated indices. See\nthe docstring of `_apply_sparse_duplicate_indices` for details. By default\nthe correct behavior, to sum non-unique indices and their associated\ngradients, is enforced by first pre-processing `grad` and `indices` and\npassing them on to `_resource_apply_sparse`. Optimizers which deal correctly\nwith duplicate indices may instead override this method to avoid the\noverhead of summing.\n\nArgs:\ngrad: a `Tensor` representing the gradient for the affected indices.\nhandle: a `Tensor` of dtype `resource` which points to the variable to be\nupdated.\nindices: a `Tensor` of integral type representing the indices for which\nthe gradient is nonzero. Indices may be repeated.\n**kwargs: May optionally contain `apply_state`\n\nReturns:\nAn `Operation` which updates the value of the variable.", "source": "github-repos"}
{"code": "def __init__(self, origin='center', coords='relative', **kwargs):\n        \n        self._origin = origin\n        self._coords = coords\n        super(OrthoProjection, self).__init__(**kwargs)", "docstring": "Orthogonal Projection Object cretes projection Object that can be used in Camera\n\nArgs:\norigin (str): 'center' or 'corner'\ncoords (str): 'relative' or 'absolute'\n\nReturns:\nOrthoProjection instance", "source": "juraj-google-style"}
{"code": "def reconnect(self):\n    if (self._auth_method is 'userpass'):\n        self._mgr = manager.connect(host=self._conn[0], port=self._conn[1], username=self._auth[0], password=self._auth[1], hostkey_verify=self._hostkey_verify)\n    elif (self._auth_method is 'key'):\n        self._mgr = manager.connect(host=self._conn[0], port=self._conn[1], username=self._auth[0], key_filename=self._auth_key, hostkey_verify=self._hostkey_verify)\n    else:\n        raise ValueError('auth_method incorrect value.')\n    self._mgr.timeout = 600\n    return True", "docstring": "Reconnect session with device.\n\nArgs:\nNone\n\nReturns:\nbool: True if reconnect succeeds, False if not.\n\nRaises:\nNone", "source": "codesearchnet"}
{"code": "def get_effective_ecs(self, strain, order=2):\n        \n        ec_sum = 0\n        for n, ecs in enumerate(self[order-2:]):\n            ec_sum += ecs.einsum_sequence([strain] * n) / factorial(n)\n        return ec_sum", "docstring": "Returns the effective elastic constants\nfrom the elastic tensor expansion.\n\nArgs:\nstrain (Strain or 3x3 array-like): strain condition\nunder which to calculate the effective constants\norder (int): order of the ecs to be returned", "source": "juraj-google-style"}
{"code": "def sheets_create(config, auth, sheet_name, sheet_tab, template_sheet=None, template_tab=None):\n    created = False\n    sheet_id, tab_id = sheets_tab_id(config, auth, sheet_name, sheet_tab)\n    if sheet_id is None:\n        if config.verbose:\n            print('SHEET CREATE', sheet_name, sheet_tab)\n        body = {'properties': {'title': sheet_name}, 'sheets': [{'properties': {'title': sheet_tab}}]}\n        spreadsheet = API_Sheets(config, auth).spreadsheets().create(body=body).execute()\n        sheet_id = spreadsheet['spreadsheetId']\n        tab_id = spreadsheet['sheets'][0]['properties']['title']\n        created = True\n    if (created or tab_id is None) and template_sheet and template_tab:\n        if config.verbose:\n            print('SHEET TAB COPY', sheet_tab)\n        sheets_tab_copy(config, auth, template_sheet, template_tab, sheet_id, sheet_tab, True)\n    elif tab_id is None:\n        if config.verbose:\n            print('SHEET TAB CREATE', sheet_name, sheet_tab)\n        sheets_tab_create(config, auth, sheet_name, sheet_tab)\n    elif config.verbose:\n        print('SHEET EXISTS', sheet_name, sheet_tab)\n    return (sheet_id, tab_id, created)", "docstring": "Checks if sheet with name already exists ( outside of trash ) and\n\nif not, creates the sheet. Both sheet and tab must be provided or both must be\nomitted to create\na blank sheet and tab.\n\nArgs:\n* auth: (string) Either user or service.\n* sheet_name: (string) name of sheet to create, used as key to check if it\nexists in the future.\n* sheet_tab: (string) name of the tab to create.\n* template_sheet: (string) optional sheet to copy tempalte from.\n* template_tab: (string) optional tab to copy template from.\n* parent: (string) the Google Drive to upload the file to.\n\nReturns:\n* JSON specification of the file created or existing.", "source": "github-repos"}
{"code": "def compute_output(self, o, output_shape=None):\n    \n    if self.combine_dims:\n      o = mtf.transpose(o, o.shape - self.o_dims + self.o_dims)\n      o = mtf.replace_dimensions(o, self.o_dims, self.wo.shape.dims[0])\n      reduced_dims = [self.wo.shape.dims[0]]\n    else:\n      reduced_dims = self.o_dims\n    return mtf.einsum(\n        [o, self.wo], output_shape=output_shape, reduced_dims=reduced_dims)", "docstring": "Compute output of multihead attention.\n\nArgs:\no: a Tensor with dimensions\nquery_heads_dims + {value_dim} + other_dims\noutput_shape: an optional Shape\nReturns:\na Tensor with shape:\n{output_dim} + other_dims", "source": "juraj-google-style"}
{"code": "def _map_or_apply(input_layer, op, *args, **kwargs):\n    kwargs.pop('name')\n    right = kwargs.pop('right_', False)\n    if input_layer.is_sequence():\n        if right:\n            args += (input_layer,)\n        else:\n            args = ((input_layer,) + args)\n        result = [op(*x, **kwargs) for x in _zip_with_scalars(args)]\n        if (len(result) != len(input_layer)):\n            raise ValueError('Not all arguments were the same length.')\n        return result\n    else:\n        if right:\n            my_op = (lambda x: op(*(args + (x,)), **kwargs))\n        else:\n            my_op = (lambda x: op(x, *args, **kwargs))\n        return my_op(input_layer.tensor)", "docstring": "Map op across the input if it is a sequence; otherwise apply it.\n\nNote: This takes a keyword argument `right_` to right apply the op to this\ninput. The name is chosen to limit conflicts with other keyword arguments.\n\nArgs:\ninput_layer: The input_layer (self when chaining).\nop: The op to apply:\n*args: Positional arguments for op; if input is a list then any iterable is\ntreated as an argument to co-map (i.e. it zips across non-scalars).\n**kwargs: Keyword arguments for op; note that `right_` is used by this\nfunction.\nReturns:\nA new Pretty Tensor that is the result of applying the op to every internal\nTensor.\nRaises:\nValueError: If a sequence argument is not the same length as the\ninput_layer.", "source": "codesearchnet"}
{"code": "def search(self, query_string):\n    query = self.create_query()\n    parser = QueryParser(query_string, query)\n    parser.parse()\n    return self.query(query)", "docstring": "Performs a search against the index using lunr query syntax.\n\nResults will be returned sorted by their score, the most relevant\nresults will be returned first.\n\nFor more programmatic querying use `lunr.Index.query`.\n\nArgs:\nquery_string (str): A string to parse into a Query.\n\nReturns:\ndict: Results of executing the query.", "source": "codesearchnet"}
{"code": "def _write_module_descriptor_file(handle, module_dir):\n  \n  readme = _module_descriptor_file(module_dir)\n  readme_content = (\n      \"Module: %s\\nDownload Time: %s\\nDownloader Hostname: %s (PID:%d)\" %\n      (handle, str(datetime.datetime.today()), socket.gethostname(),\n       os.getpid()))\n  \n  \n  \n  tf_utils.atomic_write_string_to_file(readme, readme_content, overwrite=True)", "docstring": "Writes a descriptor file about the directory containing a module.\n\nArgs:\nhandle: Module name/handle.\nmodule_dir: Directory where a module was downloaded.", "source": "juraj-google-style"}
{"code": "def update(self, **kwargs):\n    return self.client.api.update_container(self.id, **kwargs)", "docstring": "Update resource configuration of the containers.\n\nArgs:\nblkio_weight (int): Block IO (relative weight), between 10 and 1000\ncpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period\ncpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota\ncpu_shares (int): CPU shares (relative weight)\ncpuset_cpus (str): CPUs in which to allow execution\ncpuset_mems (str): MEMs in which to allow execution\nmem_limit (int or str): Memory limit\nmem_reservation (int or str): Memory soft limit\nmemswap_limit (int or str): Total memory (memory + swap), -1 to\ndisable swap\nkernel_memory (int or str): Kernel memory limit\nrestart_policy (dict): Restart policy dictionary\n\nReturns:\n(dict): Dictionary containing a ``Warnings`` key.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def register_intent_parser(self, intent_parser):\n    if (hasattr(intent_parser, 'validate') and callable(intent_parser.validate)):\n        self.intent_parsers.append(intent_parser)\n    else:\n        raise ValueError(('%s is not an intent parser' % str(intent_parser)))", "docstring": "\"Enforce\" the intent parser interface at registration time.\n\nArgs:\nintent_parser(intent): Intent to be registered.\n\nRaises:\nValueError: on invalid intent", "source": "codesearchnet"}
{"code": "def __init__(self, connection_param, queue, output_exchange, output_key):\n        \n        super(PikaDaemon, self).__init__(queue)\n\n        self.connection_param = connection_param\n\n        self.queue = queue\n        self.output_exchange = output_exchange\n\n        self.content_type = \"application/json\"\n\n        self.output_key = output_key", "docstring": "Pika and Daemon wrapper for handling AMQP connections.\n\nArgs:\nconnection_param (pika.ConnectionParameters): object setting the\nconnection\nqueue (str): name of queue where the daemon should listen\noutput_exchange (str): name of exchange where the daemon should put\nresponses\noutput_key (str): routing key for output exchange", "source": "juraj-google-style"}
{"code": "def all_elements_equal(value):\n    \n    if is_scalar(value):\n        return True\n    return np.array(value == value.flatten()[0]).all()", "docstring": "Checks if all elements in the given value are equal to each other.\n\nIf the input is a single value the result is trivial. If not, we compare all the values to see\nif they are exactly the same.\n\nArgs:\nvalue (ndarray or number): a numpy array or a single number.\n\nReturns:\nbool: true if all elements are equal to each other, false otherwise", "source": "juraj-google-style"}
{"code": "def filter_by_hoys(self, hoys):\n        \n        _moys = tuple(int(hour * 60) for hour in hoys)\n        return self.filter_by_moys(_moys)", "docstring": "Filter the Data Collection based on an analysis period.\n\nArgs:\nhoys: A List of hours of the year 0..8759\n\nReturn:\nA new Data Collection with filtered data", "source": "juraj-google-style"}
{"code": "def qubits_tab(backend):\n    \n    props = backend.properties().to_dict()\n\n    header_html = \"<div><font style='font-weight:bold'>{key}</font>: {value}</div>\"\n    header_html = header_html.format(key='last_update_date',\n                                     value=props['last_update_date'])\n    update_date_widget = widgets.HTML(value=header_html)\n\n    qubit_html = \"<table>\"\n    qubit_html += \n\n    qubit_html += \"<tr><th></th><th>Frequency</th><th>T1</th><th>T2</th>\"\n    qubit_html += \"<th>U1 gate error</th><th>U2 gate error</th><th>U3 gate error</th>\"\n    qubit_html += \"<th>Readout error</th></tr>\"\n    qubit_footer = \"</table>\"\n\n    for qub in range(len(props['qubits'])):\n        name = 'Q%s' % qub\n        qubit_data = props['qubits'][qub]\n        gate_data = props['gates'][3*qub:3*qub+3]\n        t1_info = qubit_data[0]\n        t2_info = qubit_data[1]\n        freq_info = qubit_data[2]\n        readout_info = qubit_data[3]\n\n        freq = str(round(freq_info['value'], 5))+' '+freq_info['unit']\n        T1 = str(round(t1_info['value'],  \n                       5))+' ' + t1_info['unit']\n        T2 = str(round(t2_info['value'],  \n                       5))+' ' + t2_info['unit']\n        \n        U1 = str(round(gate_data[0]['parameters'][0]['value'], 5))\n        \n        U2 = str(round(gate_data[1]['parameters'][0]['value'], 5))\n        \n        U3 = str(round(gate_data[2]['parameters'][0]['value'], 5))\n\n        readout_error = round(readout_info['value'], 5)\n        qubit_html += \"<tr><td><font style='font-weight:bold'>%s</font></td><td>%s</td>\"\n        qubit_html += \"<td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>\"\n        qubit_html = qubit_html % (name, freq, T1, T2, U1, U2, U3, readout_error)\n    qubit_html += qubit_footer\n\n    qubit_widget = widgets.HTML(value=qubit_html)\n\n    out = widgets.VBox([update_date_widget,\n                        qubit_widget])\n\n    return out", "docstring": "The qubits properties widget\n\nArgs:\nbackend (IBMQbackend): The backend.\n\nReturns:\nVBox: A VBox widget.", "source": "juraj-google-style"}
{"code": "def segment_max(data, segment_ids, num_segments=None, sorted=False):\n    _segment_reduce_validation(data, segment_ids)\n    if any_symbolic_tensors((data,)):\n        return SegmentMax(num_segments, sorted).symbolic_call(data, segment_ids)\n    return backend.math.segment_max(data, segment_ids, num_segments=num_segments, sorted=sorted)", "docstring": "Computes the max of segments in a tensor.\n\nArgs:\ndata: Input tensor.\nsegment_ids: A N-D tensor containing segment indices for each\nelement in `data`. data.shape[:len(segment_ids.shape)] should match.\nnum_segments: An integer representing the total number of\nsegments. If not specified, it is inferred from the maximum\nvalue in `segment_ids`.\nsorted: A boolean indicating whether `segment_ids` is sorted.\nDefaults to `False`.\n\nReturns:\nA tensor containing the max of segments, where each element\nrepresents the max of the corresponding segment in `data`.\n\nExample:\n\n>>> data = keras.ops.convert_to_tensor([1, 2, 10, 20, 100, 200])\n>>> segment_ids = keras.ops.convert_to_tensor([0, 0, 1, 1, 2, 2])\n>>> num_segments = 3\n>>> keras.ops.segment_max(data, segment_ids, num_segments)\narray([2, 20, 200], dtype=int32)", "source": "github-repos"}
{"code": "def __init__(self,\n               url=None,\n               extract_method=None,\n               path=None):\n    \n    self.url = url\n    self.path = path\n    self._extract_method = extract_method", "docstring": "Resource constructor.\n\nArgs:\nurl: `str`, the URL at which to download the resource.\nextract_method: `ExtractMethod` to be used to extract resource. If\nnot set, will be guessed from downloaded file name `original_fname`.\npath: `str`, path of resource on local disk. Can be None if resource has\nnot be downloaded yet. In such case, `url` must be set.", "source": "juraj-google-style"}
{"code": "def nb_ll(data, P, R):\n    (genes, cells) = data.shape\n    clusters = P.shape[1]\n    lls = np.zeros((cells, clusters))\n    for c in range(clusters):\n        P_c = P[(:, c)].reshape((genes, 1))\n        R_c = R[(:, c)].reshape((genes, 1))\n        ll = (gammaln((R_c + data)) - gammaln(R_c))\n        ll += ((data * np.log(P_c)) + xlog1py(R_c, (- P_c)))\n        lls[(:, c)] = ll.sum(0)\n    return lls", "docstring": "Returns the negative binomial log-likelihood of the data.\n\nArgs:\ndata (array): genes x cells\nP (array): NB success probability param - genes x clusters\nR (array): NB stopping param - genes x clusters\n\nReturns:\ncells x clusters array of log-likelihoods", "source": "codesearchnet"}
{"code": "def _create_outbound_stream(self, config=None):\n    if (config is None):\n        raise ValueError('No stream config to create stream from.')\n    name = self._get_stream_name(config)\n    stream_handlers = self._get_stream_handlers(config, name)\n    stream_input = config.get('input', None)\n    stream_output = config.get('output', None)\n    if (type(stream_output) is int):\n        return PortOutputStream(name, stream_input, stream_output, stream_handlers, zmq_args={'zmq_context': self.broker.context, 'zmq_proxy_xsub_url': self.broker.XSUB_URL, 'zmq_proxy_xpub_url': self.broker.XPUB_URL})\n    else:\n        if (stream_output is not None):\n            log.warn('Output of stream {} is not an integer port. Stream outputs can only be ports.'.format(name))\n        return ZMQStream(name, stream_input, stream_handlers, zmq_args={'zmq_context': self.broker.context, 'zmq_proxy_xsub_url': self.broker.XSUB_URL, 'zmq_proxy_xpub_url': self.broker.XPUB_URL})", "docstring": "Creates an outbound stream from its config.\n\nParams:\nconfig:       stream configuration as read by ait.config\nReturns:\nstream:       a Stream\nRaises:\nValueError:   if any of the required config values are missing", "source": "codesearchnet"}
{"code": "def translate_item_ids(self, item_ids, language, is_nested=None):\n    if (is_nested is None):\n\n        def is_nested_fun(x):\n            return True\n    elif isinstance(is_nested, bool):\n\n        def is_nested_fun(x):\n            return is_nested\n    else:\n        is_nested_fun = is_nested\n    all_item_type_ids = ItemType.objects.get_all_item_type_ids()\n    groupped = proso.list.group_by(item_ids, by=(lambda item_id: all_item_type_ids[item_id]))\n    result = {}\n    for (item_type_id, items) in groupped.items():\n        with timeit('translating item type {}'.format(item_type_id)):\n            item_type = ItemType.objects.get_all_types()[item_type_id]\n            model = ItemType.objects.get_model(item_type_id)\n            kwargs = {'{}__in'.format(item_type['foreign_key']): items}\n            if ('language' in item_type):\n                kwargs[item_type['language']] = language\n            if (any([(not is_nested_fun(item_id)) for item_id in items]) and hasattr(model.objects, 'prepare_related')):\n                objs = model.objects.prepare_related()\n            elif hasattr(model.objects, 'prepare'):\n                objs = model.objects.prepare()\n            else:\n                objs = model.objects\n            for obj in objs.filter(**kwargs):\n                item_id = getattr(obj, item_type['foreign_key'])\n                result[item_id] = obj.to_json(nested=is_nested_fun(item_id))\n    return result", "docstring": "Translate a list of item ids to JSON objects which reference them.\n\nArgs:\nitem_ids (list[int]): item ids\nlanguage (str): language used for further filtering (some objects\nfor different languages share the same item)\nis_nested (function): mapping from item ids to booleans, where the\nboolean value indicates whether the item is nested\n\nReturns:\ndict: item id -> JSON object", "source": "codesearchnet"}
{"code": "def get_pkg_module_names(package_path):\n    \n    module_names = set()\n    for fobj, modname, _ in pkgutil.iter_modules(path=[package_path]):\n        filename = os.path.join(fobj.path, '%s.py' % modname)\n        if os.path.exists(filename):\n            module_names.add(os.path.abspath(filename))\n    return module_names", "docstring": "Returns module filenames from package.\n\nArgs:\npackage_path: Path to Python package.\nReturns:\nA set of module filenames.", "source": "juraj-google-style"}
{"code": "def get_attributes(path):\n    if (not os.path.exists(path)):\n        raise CommandExecutionError('Path not found: {0}'.format(path))\n    attributes = {}\n    intAttributes = win32file.GetFileAttributes(path)\n    attributes['archive'] = ((intAttributes & 32) == 32)\n    attributes['reparsePoint'] = ((intAttributes & 1024) == 1024)\n    attributes['compressed'] = ((intAttributes & 2048) == 2048)\n    attributes['directory'] = ((intAttributes & 16) == 16)\n    attributes['encrypted'] = ((intAttributes & 16384) == 16384)\n    attributes['hidden'] = ((intAttributes & 2) == 2)\n    attributes['normal'] = ((intAttributes & 128) == 128)\n    attributes['notIndexed'] = ((intAttributes & 8192) == 8192)\n    attributes['offline'] = ((intAttributes & 4096) == 4096)\n    attributes['readonly'] = ((intAttributes & 1) == 1)\n    attributes['system'] = ((intAttributes & 4) == 4)\n    attributes['temporary'] = ((intAttributes & 256) == 256)\n    attributes['mountedVolume'] = False\n    if ((attributes['reparsePoint'] is True) and (attributes['directory'] is True)):\n        fileIterator = win32file.FindFilesIterator(path)\n        findDataTuple = next(fileIterator)\n        if (findDataTuple[6] == 2684354563):\n            attributes['mountedVolume'] = True\n    attributes['symbolicLink'] = False\n    if (attributes['reparsePoint'] is True):\n        fileIterator = win32file.FindFilesIterator(path)\n        findDataTuple = next(fileIterator)\n        if (findDataTuple[6] == 2684354572):\n            attributes['symbolicLink'] = True\n    return attributes", "docstring": "Return a dictionary object with the Windows\nfile attributes for a file.\n\nArgs:\npath (str): The path to the file or directory\n\nReturns:\ndict: A dictionary of file attributes\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' file.get_attributes c:\\\\temp\\\\a.txt", "source": "codesearchnet"}
{"code": "def Matches(self, file_entry, search_depth):\n    \n    if self._location_segments is None:\n      location_match = None\n    else:\n      location_match = self._CheckLocation(file_entry, search_depth)\n      if not location_match:\n        return False, location_match\n\n      if search_depth != self._number_of_location_segments:\n        return False, location_match\n\n    match = self._CheckFileEntryType(file_entry)\n    if match is not None and not match:\n      return False, location_match\n\n    match = self._CheckIsAllocated(file_entry)\n    if match is not None and not match:\n      return False, location_match\n\n    return True, location_match", "docstring": "Determines if the file entry matches the find specification.\n\nArgs:\nfile_entry (FileEntry): file entry.\nsearch_depth (int): number of location path segments to compare.\n\nReturns:\ntuple: contains:\n\nbool: True if the file entry matches the find specification, False\notherwise.\nbool: True if the location matches, False if not or None if no location\nspecified.", "source": "juraj-google-style"}
{"code": "def __init__(self, profile_datum):\n    self.total_op_time = profile_datum.op_time\n    self.total_exec_time = profile_datum.exec_time\n    device_and_node = '%s:%s' % (profile_datum.device_name, profile_datum.node_exec_stats.node_name)\n    self._node_to_exec_count = {device_and_node: 1}", "docstring": "Constructor.\n\nArgs:\nprofile_datum: (`ProfileDatum`) an instance of `ProfileDatum` to\ninitialize this object with.", "source": "github-repos"}
{"code": "def load_gene(ensembl, gene_id, de_novos=[]):\n    \n    \n    transcripts = minimise_transcripts(ensembl, gene_id, de_novos)\n    \n    genes = []\n    for transcript_id in transcripts:\n        gene = construct_gene_object(ensembl, transcript_id)\n        genes.append(gene)\n    \n    if len(genes) == 0:\n        raise IndexError(\"{0}: no suitable transcripts\".format(gene_id))\n    \n    return genes", "docstring": "sort out all the necessary sequences and positions for a gene\n\nArgs:\nensembl: EnsemblRequest object to request data from ensembl\ngene_id: HGNC symbol for gene\nde_novos: list of de novo positions, so we can check they all fit in\nthe gene transcript\n\nReturns:\nlist of Transcript objects for gene, including genomic ranges and sequences", "source": "juraj-google-style"}
{"code": "def mkdir(path, mode=511, dir_fd=None):\n    system = get_instance(path)\n    relative = system.relpath(path)\n    parent_dir = dirname(relative.rstrip('/'))\n    if parent_dir:\n        parent = ((path.rsplit(relative, 1)[0] + parent_dir) + '/')\n        if (not system.isdir(parent)):\n            raise ObjectNotFoundError((\"No such file or directory: '%s'\" % parent))\n    if system.isdir(system.ensure_dir_path(path)):\n        raise ObjectExistsError((\"File exists: '%s'\" % path))\n    system.make_dir(relative, relative=True)", "docstring": "Create a directory named path with numeric mode mode.\n\nEquivalent to \"os.mkdir\".\n\nArgs:\npath (path-like object): Path or URL.\nmode (int): The mode parameter is passed to os.mkdir();\nsee the os.mkdir() description for how it is interpreted.\nNot supported on cloud storage objects.\ndir_fd: directory descriptors;\nsee the os.remove() description for how it is interpreted.\nNot supported on cloud storage objects.\n\nRaises:\nFileExistsError : Directory already exists.\nFileNotFoundError: Parent directory not exists.", "source": "codesearchnet"}
{"code": "def remove_backup(name):\n    if (name not in list_backups()):\n        log.debug('Backup already removed: %s', name)\n        return True\n    ps_cmd = ['Remove-WebConfigurationBackup', '-Name', \"'{0}'\".format(name)]\n    cmd_ret = _srvmgr(ps_cmd)\n    if (cmd_ret['retcode'] != 0):\n        msg = 'Unable to remove web configuration: {0}\\nError: {1}'.format(name, cmd_ret['stderr'])\n        raise CommandExecutionError(msg)\n    return (name not in list_backups())", "docstring": "Remove an IIS Configuration backup from the System.\n\n.. versionadded:: 2017.7.0\n\nArgs:\nname (str): The name of the backup to remove\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.remove_backup backup_20170209", "source": "codesearchnet"}
{"code": "def get_password(request, mapping) -> None:\n    \n    LOGGER.debug('Received request \"%s\"', request)\n    if 'host' not in request:\n        LOGGER.error('host= entry missing in request. '\n                     'Cannot query without a host')\n        return\n\n    host = request['host']\n    if 'path' in request:\n        host = '/'.join([host, request['path']])\n\n    def skip(line, skip):\n        return line[skip:]\n\n    LOGGER.debug('Iterating mapping to match against host \"%s\"', host)\n    for section in mapping.sections():\n        if fnmatch.fnmatch(host, section):\n            LOGGER.debug('Section \"%s\" matches requested host \"%s\"',\n                         section, host)\n            \n            pass_target = mapping.get(section, 'target').replace(\n                \"${host}\", request['host'])\n\n            password_extractor = SpecificLineExtractor(\n                0, 0, option_suffix='_password')\n            password_extractor.configure(mapping[section])\n            \n            \n            username_extractor = _username_extractors[mapping[section].get(\n                'username_extractor', fallback=_line_extractor_name)]\n            username_extractor.configure(mapping[section])\n\n            LOGGER.debug('Requesting entry \"%s\" from pass', pass_target)\n            output = subprocess.check_output(\n                ['pass', 'show', pass_target]).decode('utf-8')\n            lines = output.splitlines()\n\n            password = password_extractor.get_value(pass_target, lines)\n            username = username_extractor.get_value(pass_target, lines)\n            if password:\n                print('password={password}'.format(  \n                    password=password))\n            if 'username' not in request and username:\n                print('username={username}'.format(  \n                    username=username))\n            return\n\n    LOGGER.warning('No mapping matched')\n    sys.exit(1)", "docstring": "Resolve the given credential request in the provided mapping definition.\n\nThe result is printed automatically.\n\nArgs:\nrequest:\nThe credential request specified as a dict of key-value pairs.\nmapping:\nThe mapping configuration as a ConfigParser instance.", "source": "juraj-google-style"}
{"code": "def _build_system_message(self, error: str) -> LogMessage:\n    return self._base_log.copy() | LogMessage(log_type=LogType.SYSTEM.value, error=error)", "docstring": "Adds system error information to base log message.\n\nArgs:\n* error: error that occurred\n\nReturns:\n* Log: dictionary containing log data", "source": "github-repos"}
{"code": "def find_runner(program):\n    if (os.path.isfile(program) and (not os.access(program, os.X_OK))):\n        try:\n            opened = open(program)\n        except PermissionError:\n            return None\n        first_line = opened.readline().strip()\n        if first_line.startswith('\n            return shlex.split(first_line[2:])\n        if program.endswith('.py'):\n            return [sys.executable]\n    return None", "docstring": "Return a command that will run program.\n\nArgs:\nprogram: The string name of the program to try to run.\nReturns:\ncommandline list of strings to run the program (eg. with subprocess.call()) or None", "source": "codesearchnet"}
{"code": "def purity(labels, true_labels):\n    purity = 0.0\n    for i in set(labels):\n        indices = (labels == i)\n        true_clusters = true_labels[indices]\n        if (len(true_clusters) == 0):\n            continue\n        counts = Counter(true_clusters)\n        (lab, count) = counts.most_common()[0]\n        purity += count\n    return (float(purity) / len(labels))", "docstring": "Calculates the purity score for the given labels.\n\nArgs:\nlabels (array): 1D array of integers\ntrue_labels (array): 1D array of integers - true labels\n\nReturns:\npurity score - a float bewteen 0 and 1. Closer to 1 is better.", "source": "codesearchnet"}
{"code": "def cut_sphere(\n            self,\n            radius=15.,\n            origin=None,\n            outside_sliced=True,\n            preserve_bonds=False):\n        \n        if origin is None:\n            origin = np.zeros(3)\n        elif pd.api.types.is_list_like(origin):\n            origin = np.array(origin, dtype='f8')\n        else:\n            origin = self.loc[origin, ['x', 'y', 'z']]\n\n        molecule = self.get_distance_to(origin)\n        if outside_sliced:\n            molecule = molecule[molecule['distance'] < radius]\n        else:\n            molecule = molecule[molecule['distance'] > radius]\n\n        if preserve_bonds:\n            molecule = self._preserve_bonds(molecule)\n\n        return molecule", "docstring": "Cut a sphere specified by origin and radius.\n\nArgs:\nradius (float):\norigin (list): Please note that you can also pass an\ninteger. In this case it is interpreted as the\nindex of the atom which is taken as origin.\noutside_sliced (bool): Atoms outside/inside the sphere\nare cut out.\npreserve_bonds (bool): Do not cut covalent bonds.\n\nReturns:\nCartesian:", "source": "juraj-google-style"}
{"code": "def to(self, jid: str):\n    if ((jid is not None) and (not isinstance(jid, str))):\n        raise TypeError(\"'to' MUST be a string\")\n    self._to = (aioxmpp.JID.fromstr(jid) if (jid is not None) else None)", "docstring": "Set jid of the receiver.\n\nArgs:\njid (str): the jid of the receiver.", "source": "codesearchnet"}
{"code": "def _setup_logger(self, logging_level: int, log_to_console: bool):\n        \n        self.logger = logging.getLogger('discord')\n        self.logger.handlers = []\n        self.logger.setLevel(logging_level)\n        formatter = logging.Formatter(style='{', fmt='{asctime} [{levelname}] {message}', datefmt='%Y-%m-%d %H:%M:%S')\n        file_handler = logging.FileHandler('pycord.log')\n        file_handler.setFormatter(formatter)\n        file_handler.setLevel(logging_level)\n        self.logger.addHandler(file_handler)\n        if log_to_console:\n            stream_handler = logging.StreamHandler(sys.stdout)\n            stream_handler.setFormatter(formatter)\n            stream_handler.setLevel(logging_level)\n            self.logger.addHandler(stream_handler)", "docstring": "Sets up the internal logger\n\nArgs:\nlogging_level: what logging level to use\nlog_to_console: whether or not to log to the console", "source": "juraj-google-style"}
{"code": "def _frame_advance(self, action):\n    self.controllers[0][:] = action\n    _LIB.Step(self._env)", "docstring": "Advance a frame in the emulator with an action.\n\nArgs:\naction (byte): the action to press on the joy-pad\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _scale_tensor(tensor, range_min, range_max, scale_min, scale_max):\n  \n  if range_min == range_max:\n    return tensor\n\n  float_tensor = tf.to_float(tensor)\n  scaled_tensor = tf.divide((tf.subtract(float_tensor, range_min) *\n                             tf.constant(float(scale_max - scale_min))),\n                            tf.constant(float(range_max - range_min)))\n  shifted_tensor = scaled_tensor + tf.constant(float(scale_min))\n\n  return shifted_tensor", "docstring": "Scale a tensor to scale_min to scale_max.\n\nArgs:\ntensor: input tensor. Should be a numerical tensor.\nrange_min: min expected value for this feature/tensor.\nrange_max: max expected Value.\nscale_min: new expected min value.\nscale_max: new expected max value.\n\nReturns:\nscaled tensor.", "source": "juraj-google-style"}
{"code": "def __init__(self, names=None):\n    \n    if not names:\n      raise errors.FormatError('Missing names value.')\n\n    super(ArtifactGroupSourceType, self).__init__()\n    self.names = names", "docstring": "Initializes a source type.\n\nArgs:\nnames (Optional[str]): artifact definition names.\n\nRaises:\nFormatError: when artifact names is not set.", "source": "juraj-google-style"}
{"code": "def __call__(self, match_quality_matrix):\n    assert match_quality_matrix.dim() == 2\n    if match_quality_matrix.numel() == 0:\n        default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64)\n        default_match_labels = match_quality_matrix.new_full((match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8)\n        return (default_matches, default_match_labels)\n    assert torch.all(match_quality_matrix >= 0)\n    matched_vals, matches = match_quality_matrix.max(dim=0)\n    match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8)\n    for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]):\n        low_high = (matched_vals >= low) & (matched_vals < high)\n        match_labels[low_high] = l\n    if self.allow_low_quality_matches:\n        self.set_low_quality_matches_(match_labels, match_quality_matrix)\n    return (matches, match_labels)", "docstring": "Args:\nmatch_quality_matrix (Tensor[float]): an MxN tensor, containing the\npairwise quality between M ground-truth elements and N predicted elements. All elements must be >= 0\n(due to the us of `torch.nonzero` for selecting indices in `set_low_quality_matches_`).\n\nReturns:\nmatches (Tensor[int64]): a vector of length N, where matches[i] is a matched\nground-truth index in [0, M)\nmatch_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates\nwhether a prediction is a true or false positive or ignored", "source": "github-repos"}
{"code": "def case_mme_delete(self, case_obj, user_obj):\n        \n        institute_obj = self.institute(case_obj['owner'])\n        \n        for individual in case_obj['individuals']:\n            if individual['phenotype'] == 2: \n                \n                self.create_event(institute=institute_obj, case=case_obj, user=user_obj,\n                    link='', category='case', verb='mme_remove', subject=individual['display_name'],\n                    level='specific')\n\n        \n        case_obj['mme_submission'] = None\n        updated_case = self.update_case(case_obj)\n        return updated_case", "docstring": "Delete a MatchMaker submission from a case record\nand creates the related event.\nArgs:\ncase_obj(dict): a scout case object\nuser_obj(dict): a scout user object\nReturns:\nupdated_case(dict): the updated scout case", "source": "juraj-google-style"}
{"code": "def _GetMaps(name, commands, default_options):\n    global_options = copy.copy(default_options)\n    options_map = collections.defaultdict(lambda: copy.copy(default_options))\n    subcommands_map = collections.defaultdict(set)\n    for command in commands:\n        if len(command) == 1:\n            if _IsOption(command[0]):\n                global_options.add(command[0])\n            else:\n                subcommands_map[name].add(command[0])\n        elif command:\n            subcommand = command[-2]\n            arg = _FormatForCommand(command[-1])\n            if _IsOption(arg):\n                args_map = options_map\n            else:\n                args_map = subcommands_map\n            args_map[subcommand].add(arg)\n            args_map[subcommand.replace('_', '-')].add(arg)\n    return (global_options, options_map, subcommands_map)", "docstring": "Returns sets of subcommands and options for each command.\n\nArgs:\nname: The first token in the commands, also the name of the command.\ncommands: A list of all possible commands that tab completion can complete\nto. Each command is a list or tuple of the string tokens that make up\nthat command.\ndefault_options: A dict of options that can be used with any command. Use\nthis if there are flags that can always be appended to a command.\nReturns:\nglobal_options: A set of all options of the first token of the command.\nsubcommands_map: A dict storing set of subcommands for each\ncommand/subcommand.\noptions_map: A dict storing set of options for each subcommand.", "source": "github-repos"}
{"code": "def get_content_type(content_type):\n    m = email.message.Message()\n    m['Content-Type'] = content_type\n    return m.get_content_type()", "docstring": "Extract the MIME type value from a content type string.\n\nRemoves any subtype and parameter values that may be present in the string.\n\nArgs:\ncontent_type: str\nString with content type and optional subtype and parameter fields.\n\nReturns:\nstr: String with only content type\n\nExample:\n\n::\n\nInput:   multipart/form-data; boundary=aBoundaryString\nReturns: multipart/form-data", "source": "codesearchnet"}
{"code": "def from_data(cls, data):\n        \n        obj = cls()\n        with contextlib.closing(BytesIO(data)) as file_handle:\n            obj.load_file(file_handle)\n        return obj", "docstring": "Load an FCS file from a bytes-like object.\n\nArgs:\ndata: buffer containing contents of an FCS file.\n\nReturns:\nFCSParser instance with data loaded", "source": "juraj-google-style"}
{"code": "def update_one_time_key_counts(self, counts):\n    self.one_time_keys_manager.server_counts = counts\n    if self.one_time_keys_manager.should_upload():\n        logger.info('Uploading new one-time keys.')\n        self.upload_one_time_keys()", "docstring": "Update data on one-time keys count and upload new ones if necessary.\n\nArgs:\ncounts (dict): Counts of keys currently on the HS for each key type.", "source": "codesearchnet"}
{"code": "def __init__(self, batch_size: int, ngram_len: int, context_history_size: int, device: torch.device):\n    self.context = torch.zeros((batch_size, ngram_len - 1), dtype=torch.int64, device=device)\n    self.context_history = torch.zeros((batch_size, context_history_size), dtype=torch.int64, device=device)\n    self.num_calls = 0", "docstring": "Initializes the state.\n\nArgs:\nbatch_size (`int`): Batch size.\nngram_len (`int`): Ngram length.\ncontext_history_size (`int`): Size of the tensor to keep track of seen contexts.\ndevice (`int`): Device to use.", "source": "github-repos"}
{"code": "def plot_correlation(self, freq=None, title=None,\n                         figsize=(12, 6), **kwargs):\n        \n        if title is None:\n            title = self._get_default_plot_title(\n                freq, 'Return Correlation Matrix')\n\n        rets = self._get_series(freq).to_returns().dropna()\n        return rets.plot_corr_heatmap(title=title, figsize=figsize, **kwargs)", "docstring": "Utility function to plot correlations.\n\nArgs:\n* freq (str): Pandas data frequency alias string\n* title (str): Plot title\n* figsize (tuple (x,y)): figure size\n* kwargs: passed to Pandas' plot_corr_heatmap function", "source": "juraj-google-style"}
{"code": "def _greedy_infer(self, features, decode_length, use_tpu=False):\n    if use_tpu:\n        return self._slow_greedy_infer_tpu(features, decode_length)\n    return self._slow_greedy_infer(features, decode_length)", "docstring": "A greedy inference method.\n\nModels should ideally implement a more efficient version of this function.\n\nArgs:\nfeatures: an map of string to `Tensor`\ndecode_length: an integer.  How many additional timesteps to decode.\nuse_tpu: A bool, whether to build the inference graph for TPU.\n\nReturns:\nA dict of decoding results {\n\"outputs\": integer `Tensor` of decoded ids of shape\n[batch_size, <= decode_length] if beam_size == 1 or\n[batch_size, top_beams, <= decode_length]\n\"scores\": None\n\"logits\": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].\n\"losses\": a dictionary: {loss-name (string): floating point `Scalar`}\n}", "source": "codesearchnet"}
{"code": "def test_moment_matching(samples, number_moments, dist, stride=0):\n    sample_moments = []\n    expected_moments = []\n    variance_sample_moments = []\n    for i in range(1, number_moments + 1):\n        if len(samples.shape) == 2:\n            strided_range = samples.flat[::(i - 1) * stride + 1]\n        else:\n            strided_range = samples[::(i - 1) * stride + 1, ...]\n        sample_moments.append(np.mean(strided_range ** i, axis=0))\n        expected_moments.append(dist.moment(i))\n        variance_sample_moments.append((dist.moment(2 * i) - dist.moment(i) ** 2) / len(strided_range))\n    z_test_scores = []\n    for i in range(1, number_moments + 1):\n        total_variance = variance_sample_moments[i - 1] + i * np.finfo(samples.dtype).eps\n        tiny = np.finfo(samples.dtype).tiny\n        assert np.all(total_variance > 0)\n        total_variance = np.where(total_variance < tiny, tiny, total_variance)\n        z_test_scores.append(abs((sample_moments[i - 1] - expected_moments[i - 1]) / np.sqrt(total_variance)))\n    return z_test_scores", "docstring": "Return z-test scores for sample moments to match analytic moments.\n\nGiven `samples`, check that the first sample `number_moments` match\nthe given  `dist` moments by doing a z-test.\n\nArgs:\nsamples: Samples from target distribution.\nnumber_moments: Python `int` describing how many sample moments to check.\ndist: SciPy distribution object that provides analytic moments.\nstride: Distance between samples to check for statistical properties.\nA stride of 0 means to use all samples, while other strides test for\nspatial correlation.\nReturns:\nArray of z_test scores.", "source": "github-repos"}
{"code": "def ListAttrs(cls):\n    precondition.AssertType(cls, type)\n    if PY2:\n        return [item.decode('ascii') for item in dir(cls)]\n    else:\n        return dir(cls)", "docstring": "A compatibility wrapper for listing class attributes.\n\nThis method solves similar Python 2 compatibility issues for `dir` function as\n`GetName` does for `__name__` invocations. See documentation for `GetName` for\nmore details.\n\nOnce support for Python 2 is dropped all invocations of this function should\nbe replaced with ordinary `dir` calls.\n\nArgs:\ncls: A class object to list the attributes for.\n\nReturns:\nA list of attribute names as unicode strings.", "source": "codesearchnet"}
{"code": "def parse_statement(self, statement, orig_contents):\n    children = []\n    is_block = False\n    name = statement.getName()\n    if (name == 'block'):\n        children_statements = statement[1]\n        for child in children_statements:\n            parsed = self.parse_statement(child, orig_contents=orig_contents)\n            children.append(parsed)\n        locn = statement[0]['location']\n        statement = statement[0][1]\n        name = statement.getName()\n        is_block = True\n    else:\n        stmt_language = get_statement()\n        locn = statement['location']\n        statement = statement['match']\n        statement_string = str(u''.join(statement.asList()))\n        try:\n            statement = stmt_language.parseString(statement_string)[0]\n        except (pyparsing.ParseException, pyparsing.ParseSyntaxException) as exc:\n            raise SensorGraphSyntaxError('Error parsing statement in sensor graph file', message=exc.msg, line=pyparsing.line(locn, orig_contents).strip(), line_number=pyparsing.lineno(locn, orig_contents), column=pyparsing.col(locn, orig_contents))\n        except SensorGraphSemanticError as exc:\n            raise SensorGraphSemanticError(exc.msg, line=pyparsing.line(locn, orig_contents).strip(), line_number=pyparsing.lineno(locn, orig_contents), **exc.params)\n        name = statement.getName()\n    if (name not in statement_map):\n        raise ArgumentError('Unknown statement in sensor graph file', parsed_statement=statement, name=name)\n    line = pyparsing.line(locn, orig_contents).strip()\n    line_number = pyparsing.lineno(locn, orig_contents)\n    column = pyparsing.col(locn, orig_contents)\n    location_info = LocationInfo(line, line_number, column)\n    if is_block:\n        return statement_map[name](statement, children=children, location=location_info)\n    return statement_map[name](statement, location_info)", "docstring": "Parse a statement, possibly called recursively.\n\nArgs:\nstatement (int, ParseResult): The pyparsing parse result that\ncontains one statement prepended with the match location\norig_contents (str): The original contents of the file that we're\nparsing in case we need to convert an index into a line, column\npair.\n\nReturns:\nSensorGraphStatement: The parsed statement.", "source": "codesearchnet"}
{"code": "def bit_flip(p: Optional[float]=None) -> Union[(common_gates.XPowGate, BitFlipChannel)]:\n    if (p is None):\n        return pauli_gates.X\n    return _bit_flip(p)", "docstring": "r\"\"\"\nConstruct a BitFlipChannel that flips a qubit state\nwith probability of a flip given by p. If p is None, return\na guaranteed flip in the form of an X operation.\n\nThis channel evolves a density matrix via\n\n$$\n\\rho \\rightarrow M_0 \\rho M_0^\\dagger + M_1 \\rho M_1^\\dagger\n$$\n\nWith\n\n$$\n\\begin{aligned}\nM_0 =& \\sqrt{p} \\begin{bmatrix}\n1 & 0 \\\\\n0 & 1\n\\end{bmatrix}\n\\\\\nM_1 =& \\sqrt{1-p} \\begin{bmatrix}\n0 & 1 \\\\\n1 & -0\n\\end{bmatrix}\n\\end{aligned}\n$$\n\nArgs:\np: the probability of a bit flip.\n\nRaises:\nValueError: if p is not a valid probability.", "source": "codesearchnet"}
{"code": "def _parse_graph(self):\n    if self.exists:\n        self.rdf.graph = self.repo.api.parse_rdf_payload(self.rdf.data, self.headers)\n    else:\n        self.rdf.graph = rdflib.Graph()\n    self.rdf.namespace_manager = rdflib.namespace.NamespaceManager(self.rdf.graph)\n    for (ns_prefix, ns_uri) in self.rdf.prefixes.__dict__.items():\n        self.rdf.namespace_manager.bind(ns_prefix, ns_uri, override=False)\n    for (ns_prefix, ns_uri) in self.rdf.graph.namespaces():\n        setattr(self.rdf.prefixes, ns_prefix, rdflib.Namespace(ns_uri))\n        setattr(self.rdf.uris, rdflib.Namespace(ns_uri), ns_prefix)\n    self.rdf._orig_graph = copy.deepcopy(self.rdf.graph)\n    self.parse_object_like_triples()", "docstring": "use Content-Type from headers to determine parsing method\n\nArgs:\nNone\n\nReturn:\nNone: sets self.rdf by parsing data from GET request, or setting blank graph of resource does not yet exist", "source": "codesearchnet"}
{"code": "def parse_panel_app_gene(app_gene, hgnc_map):\n    \n    gene_info = {}\n    confidence_level = app_gene['LevelOfConfidence']\n    \n    if not confidence_level == 'HighEvidence':\n        return gene_info\n    \n    hgnc_symbol = app_gene['GeneSymbol']\n    \n    hgnc_ids = get_correct_ids(hgnc_symbol, hgnc_map)\n    if not hgnc_ids:\n        LOG.warning(\"Gene %s does not exist in database. Skipping gene...\", hgnc_symbol)\n        return gene_info\n    \n    if len(hgnc_ids) > 1:\n        LOG.warning(\"Gene %s has unclear identifier. Choose random id\", hgnc_symbol)\n\n    gene_info['hgnc_symbol'] = hgnc_symbol\n    for hgnc_id in hgnc_ids:\n        gene_info['hgnc_id'] = hgnc_id\n\n    gene_info['reduced_penetrance'] = INCOMPLETE_PENETRANCE_MAP.get(app_gene['Penetrance'])\n\n    inheritance_models = []\n    for model in MODELS_MAP.get(app_gene['ModeOfInheritance'],[]):\n        inheritance_models.append(model)\n    \n    gene_info['inheritance_models'] = inheritance_models\n    \n    return gene_info", "docstring": "Parse a panel app formated gene\n\nArgs:\napp_gene(dict): Dict with panel app info\nhgnc_map(dict): Map from hgnc_symbol to hgnc_id\n\nReturns:\ngene_info(dict): Scout infromation", "source": "juraj-google-style"}
{"code": "def index_normalize(index_val):\n    index_val = index_val.lower().strip()\n    index_val = re.sub('^\\\\W*', '', index_val)\n    index_val = re.sub('\\\\W*$', '', index_val)\n    index_val = re.sub('\\\\W+', '_', index_val)\n    index_val = re.sub('_+', '_', index_val)\n    return index_val", "docstring": "Normalize dictionary calculated key\n\nWhen parsing, keys within a dictionary may come from the input text. To ensure there is no\nspace or other special caracters, one should use this function. This is useful because\nDictExt dictionaries can be access with a dotted notation that only supports ``A-Za-z0-9_`` chars.\n\nArgs:\nindex_val (str): The candidate string to a dictionary key.\n\nReturns:\nstr: A normalized string with only ``A-Za-z0-9_`` chars\n\nExamples:\n>>> index_normalize('this my key')\n'this_my_key'\n>>> index_normalize('this -my- %key%')\n'this_my_key'", "source": "codesearchnet"}
{"code": "def decode_csv(csv_string, column_names):\n    import csv\n    r = next(csv.reader([csv_string]))\n    if (len(r) != len(column_names)):\n        raise ValueError(('csv line %s does not have %d columns' % (csv_string, len(column_names))))\n    return {k: v for (k, v) in zip(column_names, r)}", "docstring": "Parse a csv line into a dict.\n\nArgs:\ncsv_string: a csv string. May contain missing values \"a,,c\"\ncolumn_names: list of column names\n\nReturns:\nDict of {column_name, value_from_csv}. If there are missing values,\nvalue_from_csv will be ''.", "source": "codesearchnet"}
{"code": "def assignee(self, main_type, sub_type, unique_id, assignee_id, action='ADD', params=None):\n        \n        params = params or {}\n\n        url = '/v2/{}/{}/{}/assignees/{}'.format(main_type, sub_type, unique_id, assignee_id)\n        if action == 'GET':\n            return self.tcex.session.get(url, params=params)\n        if action == 'DELETE':\n            return self.tcex.session.delete(url)\n        if action == 'ADD':\n            return self.tcex.session.post(url)\n        return None", "docstring": "Args:\nmain_type:\nsub_type:\nunique_id:\nassignee_id:\naction:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def read_from_text(path: str):\n    return beam_io.ReadFromText(path) | beam.Map(lambda s: beam.Row(line=s))", "docstring": "Reads lines from a text files.\n\nThe resulting PCollection consists of rows with a single string field named\n\"line.\"\n\nArgs:\npath (str): The file path to read from.  The path can contain glob\ncharacters such as ``*`` and ``?``.", "source": "github-repos"}
{"code": "def _get_query_params(self):\n    result = {}\n    if (self.next_page_token is not None):\n        result[self._PAGE_TOKEN] = self.next_page_token\n    if (self.max_results is not None):\n        result[self._MAX_RESULTS] = (self.max_results - self.num_results)\n    result.update(self.extra_params)\n    return result", "docstring": "Getter for query parameters for the next request.\n\nReturns:\ndict: A dictionary of query parameters.", "source": "codesearchnet"}
{"code": "def _PathStripPrefix(self, path):\n    \n    if path.startswith('\\\\\\\\.\\\\') or path.startswith('\\\\\\\\?\\\\'):\n      if len(path) < 7 or path[5] != ':' or path[6] != self._PATH_SEPARATOR:\n        \n        return None\n\n      path = path[7:]\n\n    elif path.startswith('\\\\\\\\'):\n      \n      return None\n\n    elif len(path) >= 3 and path[1] == ':':\n      \n      if path[2] != self._PATH_SEPARATOR:\n        \n        return None\n\n      path = path[3:]\n\n    elif path.startswith('\\\\'):\n      path = path[1:]\n\n    else:\n      \n      return None\n\n    return path", "docstring": "Strips the prefix from a path.\n\nArgs:\npath (str): Windows path to strip the prefix from.\n\nReturns:\nstr: path without the prefix or None if the path is not supported.", "source": "juraj-google-style"}
{"code": "def from_directory(input_dir, optional_files=None):\n    sub_d = {}\n    for (fname, ftype) in [('INCAR', Incar), ('KPOINTS', Kpoints), ('POSCAR', Poscar), ('POTCAR', Potcar)]:\n        fullzpath = zpath(os.path.join(input_dir, fname))\n        sub_d[fname.lower()] = ftype.from_file(fullzpath)\n    sub_d['optional_files'] = {}\n    if (optional_files is not None):\n        for (fname, ftype) in optional_files.items():\n            sub_d['optional_files'][fname] = ftype.from_file(os.path.join(input_dir, fname))\n    return VaspInput(**sub_d)", "docstring": "Read in a set of VASP input from a directory. Note that only the\nstandard INCAR, POSCAR, POTCAR and KPOINTS files are read unless\noptional_filenames is specified.\n\nArgs:\ninput_dir (str): Directory to read VASP input from.\noptional_files (dict): Optional files to read in as well as a\ndict of {filename: Object type}. Object type must have a\nstatic method from_file.", "source": "codesearchnet"}
{"code": "def convert(self):\n    if not self._has_valid_tensors():\n        if not self._input_arrays_with_shape or not (self._output_arrays or self._control_output_arrays):\n            raise ValueError('If input_tensors and output_tensors are None, both input_arrays_with_shape and output_arrays|control_output_arrays must be defined.')\n    return super(TFLiteFrozenGraphConverter, self).convert()", "docstring": "Converts a TensorFlow GraphDef based on instance variables.\n\nReturns:\nThe converted data in serialized format, either a TFLite Flatbuffer or\na Graphviz graph depending on value in `output_format`.\n\nRaises:\nValueError:\nInput shape is not specified.\nNone value for dimension in input_tensor.", "source": "github-repos"}
{"code": "def add_tensor_summary(x, types, name=None, collections=None, main_tower_only=True):\n    types = set(types)\n    if (name is None):\n        name = x.op.name\n    ctx = get_current_tower_context()\n    if (main_tower_only and (ctx is not None) and (not ctx.is_main_training_tower)):\n        return\n    SUMMARY_TYPES_DIC = {'scalar': (lambda : tf.summary.scalar((name + '-summary'), x, collections=collections)), 'histogram': (lambda : tf.summary.histogram((name + '-histogram'), x, collections=collections)), 'sparsity': (lambda : tf.summary.scalar((name + '-sparsity'), tf.nn.zero_fraction(x), collections=collections)), 'mean': (lambda : tf.summary.scalar((name + '-mean'), tf.reduce_mean(x), collections=collections)), 'rms': (lambda : tf.summary.scalar((name + '-rms'), rms(x), collections=collections))}\n    for typ in types:\n        SUMMARY_TYPES_DIC[typ]()", "docstring": "Summarize a tensor by different methods.\n\nArgs:\nx (tf.Tensor): a tensor to summarize\ntypes (list[str]): summary types, can be scalar/histogram/sparsity/mean/rms\nname (str): summary name. Defaults to be the op name.\ncollections (list[str]): collections of the summary ops.\nmain_tower_only (bool): Only run under main training tower. If\nset to True, calling this function under other TowerContext\nhas no effect.\n\nExample:\n\n.. code-block:: python\n\nwith tf.name_scope('mysummaries'):  # to not mess up tensorboard\nadd_tensor_summary(\ntensor, ['histogram', 'rms', 'sparsity'], name='mytensor')", "source": "codesearchnet"}
{"code": "def GetEventTaggingRules(self):\n    tagging_rules = {}\n    label_name = None\n    with io.open(self._path, 'r', encoding='utf-8') as tagging_file:\n        for line in tagging_file.readlines():\n            line = line.rstrip()\n            stripped_line = line.lstrip()\n            if ((not stripped_line) or (stripped_line[0] == '\n                continue\n            if (not line[0].isspace()):\n                label_name = line\n                tagging_rules[label_name] = []\n                continue\n            if (not label_name):\n                continue\n            filter_object = event_filter.EventObjectFilter()\n            try:\n                filter_object.CompileFilter(stripped_line)\n            except errors.ParseError as exception:\n                raise errors.TaggingFileError('Unable to compile filter for label: {0:s} with error: {1!s}'.format(label_name, exception))\n            if (filter_object not in tagging_rules[label_name]):\n                tagging_rules[label_name].append(filter_object)\n    return tagging_rules", "docstring": "Retrieves the event tagging rules from the tagging file.\n\nReturns:\ndict[str, FilterObject]: tagging rules, that consists of one or more\nfilter objects per label.\n\nRaises:\nTaggingFileError: if a filter expression cannot be compiled.", "source": "codesearchnet"}
{"code": "def parse_query_param(url, param):\n    \n\n    try:\n        return parse.parse_qs(parse.urlparse(url).query)[param][0]\n    except:\n        return None", "docstring": "Parses the query string of a URL and returns the value of a parameter.\n\nArgs:\nurl: A URL.\nparam: A string representing the name of the parameter.\n\nReturns:\nThe value of the parameter.", "source": "juraj-google-style"}
{"code": "def augpath(path, augsuf='', augext='', augpref='', augdir=None, newext=None, newfname=None, ensure=False, prefix=None, suffix=None):\n    if (prefix is not None):\n        augpref = prefix\n    if (suffix is not None):\n        augsuf = suffix\n    (dpath, fname) = split(path)\n    (fname_noext, ext) = splitext(fname)\n    if (newfname is not None):\n        fname_noext = newfname\n    if (newext is None):\n        newext = ext\n    new_fname = ''.join((augpref, fname_noext, augsuf, newext, augext))\n    if (augdir is not None):\n        new_dpath = join(dpath, augdir)\n        if ensure:\n            ensuredir(new_dpath)\n    else:\n        new_dpath = dpath\n    newpath = join(new_dpath, new_fname)\n    return newpath", "docstring": "augments end of path before the extension.\n\naugpath\n\nArgs:\npath (str):\naugsuf (str): augment filename before extension\n\nReturns:\nstr: newpath\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_path import *  # NOQA\n>>> path = 'somefile.txt'\n>>> augsuf = '_aug'\n>>> newpath = augpath(path, augsuf)\n>>> result = str(newpath)\n>>> print(result)\nsomefile_aug.txt\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_path import *  # NOQA\n>>> path = 'somefile.txt'\n>>> augsuf = '_aug2'\n>>> newext = '.bak'\n>>> augdir = 'backup'\n>>> newpath = augpath(path, augsuf, newext=newext, augdir=augdir)\n>>> result = str(newpath)\n>>> print(result)\nbackup/somefile_aug2.bak", "source": "codesearchnet"}
{"code": "def add_option(self, section, name, value):\n        \n\n        \n        if self._is_live():\n            raise RuntimeError('Submitted units cannot update their options')\n\n        option = {\n            'section': section,\n            'name': name,\n            'value': value\n        }\n\n        self._data['options'].append(option)\n\n        return True", "docstring": "Add an option to a section of the unit file\n\nArgs:\nsection (str): The name of the section, If it doesn't exist it will be created\nname (str): The name of the option to add\nvalue (str): The value of the option\n\nReturns:\nTrue: The item was added", "source": "juraj-google-style"}
{"code": "def get_metadata(self, entity_type, entity_id):\n    if (not is_valid_uuid(entity_id)):\n        raise StorageArgumentException('Invalid UUID for entity_id: {0}'.format(entity_id))\n    return self._authenticated_request.to_endpoint('{}/{}/metadata/'.format(entity_type, entity_id)).return_body().get()", "docstring": "Get metadata of an entity.\n\nArgs:\nentity_type (str): Type of the entity. Admitted values: ['project',\n'folder', 'file'].\nentity_id (str): The UUID of the entity to be modified.\n\nReturns:\nA dictionary of the metadata::\n\n{\nu'bar': u'200',\nu'foo': u'100'\n}\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "codesearchnet"}
{"code": "def infer_annotation(type_comments):\n    assert type_comments\n    args = {}\n    returns = set()\n    for comment in type_comments:\n        (arg_types, return_type) = parse_type_comment(comment)\n        for (i, arg_type) in enumerate(arg_types):\n            args.setdefault(i, set()).add(arg_type)\n        returns.add(return_type)\n    combined_args = []\n    for i in sorted(args):\n        arg_infos = list(args[i])\n        kind = argument_kind(arg_infos)\n        if (kind is None):\n            raise InferError(('Ambiguous argument kinds:\\n' + '\\n'.join(type_comments)))\n        types = [arg.type for arg in arg_infos]\n        combined = combine_types(types)\n        if (str(combined) == 'None'):\n            combined = UnionType([ClassType('None'), AnyType()])\n        if ((kind != ARG_POS) and ((len(str(combined)) > 120) or isinstance(combined, UnionType))):\n            combined = AnyType()\n        combined_args.append(Argument(combined, kind))\n    combined_return = combine_types(returns)\n    return (combined_args, combined_return)", "docstring": "Given some type comments, return a single inferred signature.\n\nArgs:\ntype_comments: Strings of form '(arg1, ... argN) -> ret'\n\nReturns: Tuple of (argument types and kinds, return type).", "source": "codesearchnet"}
{"code": "def sagemaker_auth(overrides={}, path=\".\"):\n    \n\n    api_key = overrides.get(env.API_KEY, Api().api_key)\n    if api_key is None:\n        raise ValueError(\n            \"Can't find W&B ApiKey, set the WANDB_API_KEY env variable or run `wandb login`\")\n    overrides[env.API_KEY] = api_key\n    with open(os.path.join(path, \"secrets.env\"), \"w\") as file:\n        for k, v in six.iteritems(overrides):\n            file.write(\"{}={}\\n\".format(k, v))", "docstring": "Write a secrets.env file with the W&B ApiKey and any additional secrets passed.\n\nArgs:\noverrides (dict, optional): Additional environment variables to write to secrets.env\npath (str, optional): The path to write the secrets file.", "source": "juraj-google-style"}
{"code": "def set_expiration(self, key, ignore_missing=False, additional_seconds=None, seconds=None):\n    if ((key not in self.time_dict) and ignore_missing):\n        return\n    elif ((key not in self.time_dict) and (not ignore_missing)):\n        raise Exception('Key missing from `TimedDict` and `ignore_missing` is False.')\n    if (additional_seconds is not None):\n        self.time_dict[key] += additional_seconds\n    elif (seconds is not None):\n        self.time_dict[key] = (time.time() + seconds)", "docstring": "Alters the expiration time for a key. If the key is not\npresent, then raise an Exception unless `ignore_missing`\nis set to `True`.\n\nArgs:\nkey: The key whose expiration we are changing.\nignore_missing (bool): If set, then return silently\nif the key does not exist. Default is `False`.\nadditional_seonds (int): Add this many seconds to the\ncurrent expiration time.\nseconds (int): Expire the key this many seconds from now.", "source": "codesearchnet"}
{"code": "def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):\n    mask = tf.cast(input_ids != padding_idx, tf.int64)\n    incremental_indices = (tf.cumsum(mask, axis=1) + past_key_values_length) * mask\n    return incremental_indices + padding_idx", "docstring": "Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols\nare ignored. This is modified from fairseq's `utils.make_positions`.\n\nArgs:\nx: tf.Tensor x:\n\nReturns: tf.Tensor", "source": "github-repos"}
{"code": "def _get_head_block(self, request):\n        \n        if request.head_id:\n            if self._id_regex.fullmatch(request.head_id) is None:\n                LOGGER.debug('Invalid head id requested: %s', request.head_id)\n                raise _ResponseFailed(self._status.NO_ROOT)\n            try:\n                return self._block_store[request.head_id]\n            except KeyError as e:\n                LOGGER.debug('Unable to find block \"%s\" in store', e)\n                raise _ResponseFailed(self._status.NO_ROOT)\n\n        else:\n            return self._get_chain_head()", "docstring": "Fetches the request specified head block, or the chain head.\n\nNote:\nThis method will fail if `_block_store` has not been set\n\nArgs:\nrequest (object): The parsed protobuf request object\n\nReturns:\nBlock: the block object at the head of the requested chain\n\nRaises:\nResponseFailed: Failed to retrieve a head block", "source": "juraj-google-style"}
{"code": "def atomic_observe(self, states, actions, internals, reward, terminal):\n    self.current_terminal = terminal\n    self.current_reward = reward\n    if self.unique_state:\n        states = dict(state=states)\n    if self.unique_action:\n        actions = dict(action=actions)\n    self.episode = self.model.atomic_observe(states=states, actions=actions, internals=internals, terminal=self.current_terminal, reward=self.current_reward)", "docstring": "Utility method for unbuffered observing where each tuple is inserted into TensorFlow via\na single session call, thus avoiding race conditions in multi-threaded mode.\n\nObserve full experience  tuplefrom the environment to learn from. Optionally pre-processes rewards\nChild classes should call super to get the processed reward\nEX: terminal, reward = super()...\n\nArgs:\nstates (any): One state (usually a value tuple) or dict of states if multiple states are expected.\nactions (any): One action (usually a value tuple) or dict of states if multiple actions are expected.\ninternals (any): Internal list.\nterminal (bool): boolean indicating if the episode terminated after the observation.\nreward (float): scalar reward that resulted from executing the action.", "source": "codesearchnet"}
{"code": "def _process_debug_graph_node(self, node):\n    if is_debug_node(node.name):\n        return\n    if node.name in self._node_inputs:\n        raise ValueError(\"Duplicate node name on device %s: '%s'\" % (self._device_name, node.name))\n    self._node_attributes[node.name] = node.attr\n    self._node_inputs[node.name] = []\n    self._node_ctrl_inputs[node.name] = []\n    self._node_recipients[node.name] = []\n    self._node_ctrl_recipients[node.name] = []\n    if node.name not in self._node_devices:\n        self._node_devices[node.name] = set()\n    self._node_devices[node.name].add(node.device if node.device else self._device_name)\n    self._node_op_types[node.name] = node.op\n    self._ref_args[node.name] = self._get_ref_args(node)\n    for inp in node.input:\n        if is_copy_node(inp) and (node.op == '_Send' or node.op == '_Retval'):\n            self._copy_send_nodes.append(node.name)\n        if inp.startswith('^'):\n            cinp = inp[1:]\n            self._node_ctrl_inputs[node.name].append(cinp)\n        else:\n            self._node_inputs[node.name].append(inp)", "docstring": "Process a node from the debug GraphDef.\n\nArgs:\nnode: (NodeDef) A partition-graph node to be processed.\n\nRaises:\nValueError: If duplicate node names are encountered.", "source": "github-repos"}
{"code": "def get(self, id):\n        \n        \n        request_url = self._client.base_api_url + self.detail_url.format(id=id)\n\n        response = self._client.session.get(request_url)\n\n        \n        self.validate_request_success(\n            response_text=response.text,\n            request_url=request_url,\n            status_code=response.status_code,\n            expected_status_code=HTTP_200_OK,\n        )\n\n        \n        return self.response_data_to_model_instance(response.json())", "docstring": "Get the model instance with a given id.\n\nArgs:\nid (int or str): The primary identifier (e.g., pk or UUID)\nfor the task instance to get.\n\nReturns:\n:class:`saltant.models.resource.Model`:\nA :class:`saltant.models.resource.Model` subclass\ninstance representing the resource requested.", "source": "juraj-google-style"}
{"code": "def merge_transformers_sharded_states(path, num_checkpoints):\n    state_dict = {}\n    for i in range(1, num_checkpoints + 1):\n        checkpoint_path = os.path.join(path, f'pytorch_model-{i:05d}-of-{num_checkpoints:05d}.bin')\n        check_torch_load_is_safe()\n        current_chunk = torch.load(checkpoint_path, map_location='cpu', weights_only=True)\n        state_dict.update(current_chunk)\n    return state_dict", "docstring": "Merge sharded checkpoints from transformers into a single checkpoint.\n\nArgs:\npath (str): the path to the sharded checkpoints\nnum_checkpoints (int): the number of checkpoints to merge", "source": "github-repos"}
{"code": "def get(self, key, default=None):\n    index = self._xxx_field_to_index.get(key)\n    if (index is None):\n        return default\n    return self._xxx_values[index]", "docstring": "Return a value for key, with a default value if it does not exist.\n\nArgs:\nkey (str): The key of the column to access\ndefault (object):\nThe default value to use if the key does not exist. (Defaults\nto :data:`None`.)\n\nReturns:\nobject:\nThe value associated with the provided key, or a default value.\n\nExamples:\nWhen the key exists, the value associated with it is returned.\n\n>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('x')\n'a'\n\nThe default value is :data:`None` when the key does not exist.\n\n>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z')\nNone\n\nThe default value can be overrided with the ``default`` parameter.\n\n>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', '')\n''\n\n>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', default = '')\n''", "source": "codesearchnet"}
{"code": "def list_files_in_directory(full_directory_path):\n    \n    files = list()\n    for file_name in __os.listdir(full_directory_path):\n        if __os.path.isfile(__os.path.join(full_directory_path, file_name)):\n            files.append(file_name)\n    return files", "docstring": "List the files in a specified directory\nArgs:\nfull_directory_path: The full directory path to check, derive from the os module\n\nReturns: returns a list of files", "source": "juraj-google-style"}
{"code": "def cast_losses_to_common_dtype(losses):\n    highest_float = None\n    for loss in losses:\n        if loss.dtype.is_floating:\n            if highest_float is None or loss.dtype.size > highest_float.size:\n                highest_float = loss.dtype\n            elif {loss.dtype, highest_float} == {'bfloat16', 'float16'}:\n                highest_float = 'float32'\n        if loss.dtype.is_complex:\n            return losses\n    if highest_float:\n        losses = [math_ops.cast(loss, highest_float) for loss in losses]\n    return losses", "docstring": "Cast a list of losses to a common dtype.\n\nIf any loss is floating-point, they will all be casted to the most-precise\nfloating-point loss. Otherwise the losses are not casted. We also skip casting\nlosses if there are any complex losses.\n\nArgs:\nlosses: A list of losses.\n\nReturns:\n`losses`, but they have been casted to a common dtype.", "source": "github-repos"}
{"code": "def add_device(self, path):\n    hdevice = self._libinput.libinput_path_add_device(self._li, path.encode())\n    if hdevice:\n        return Device(hdevice, self._libinput)\n    return None", "docstring": "Add a device to a libinput context.\n\nIf successful, the device will be added to the internal list and\nre-opened on :meth:`~libinput.LibInput.resume`. The device can be\nremoved with :meth:`remove_device`.\nIf the device was successfully initialized, it is returned.\n\nArgs:\npath (str): Path to an input device.\nReturns:\n~libinput.define.Device: A device object or :obj:`None`.", "source": "codesearchnet"}
{"code": "def IsTemplateParameterList(clean_lines, linenum, column):\n  \n  (_, startline, startpos) = ReverseCloseExpression(\n      clean_lines, linenum, column)\n  if (startpos > -1 and\n      Search(r'\\btemplate\\s*$', clean_lines.elided[startline][0:startpos])):\n    return True\n  return False", "docstring": "Check if the token ending on (linenum, column) is the end of template<>.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: the number of the line to check.\ncolumn: end column of the token to check.\nReturns:\nTrue if this token is end of a template parameter list, False otherwise.", "source": "juraj-google-style"}
{"code": "def get_inverse_sqrt_schedule(optimizer: Optimizer, num_warmup_steps: int, timescale: Optional[int]=None, last_epoch: int=-1):\n    if timescale is None:\n        timescale = num_warmup_steps or 10000\n    lr_lambda = partial(_get_inverse_sqrt_schedule_lr_lambda, num_warmup_steps=num_warmup_steps, timescale=timescale)\n    return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)", "docstring": "Create a schedule with an inverse square-root learning rate, from the initial lr set in the optimizer, after a\nwarmup period which increases lr linearly from 0 to the initial lr set in the optimizer.\n\nArgs:\noptimizer ([`~torch.optim.Optimizer`]):\nThe optimizer for which to schedule the learning rate.\nnum_warmup_steps (`int`):\nThe number of steps for the warmup phase.\ntimescale (`int`, *optional*, defaults to `num_warmup_steps`):\nTime scale.\nlast_epoch (`int`, *optional*, defaults to -1):\nThe index of the last epoch when resuming training.\n\nReturn:\n`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.", "source": "github-repos"}
{"code": "def _ParseExample(self, example_features, example_feature_lists, entries, index):\n    features_seen = set()\n    for (feature_list, is_feature) in zip([example_features, example_feature_lists], [True, False]):\n        sequence_length = None\n        for feature_name in feature_list:\n            if (feature_name not in entries):\n                entries[feature_name] = {'vals': [], 'counts': [], 'feat_lens': [], 'missing': index}\n            feature_entry = entries[feature_name]\n            feature = feature_list[feature_name]\n            value_type = None\n            value_list = []\n            if is_feature:\n                if feature.HasField('float_list'):\n                    value_list = feature.float_list.value\n                    value_type = self.fs_proto.FLOAT\n                elif feature.HasField('bytes_list'):\n                    value_list = feature.bytes_list.value\n                    value_type = self.fs_proto.STRING\n                elif feature.HasField('int64_list'):\n                    value_list = feature.int64_list.value\n                    value_type = self.fs_proto.INT\n            else:\n                sequence_length = len(feature.feature)\n                if ((sequence_length != 0) and feature.feature[0].HasField('float_list')):\n                    for feat in feature.feature:\n                        for value in feat.float_list.value:\n                            value_list.append(value)\n                    value_type = self.fs_proto.FLOAT\n                elif ((sequence_length != 0) and feature.feature[0].HasField('bytes_list')):\n                    for feat in feature.feature:\n                        for value in feat.bytes_list.value:\n                            value_list.append(value)\n                    value_type = self.fs_proto.STRING\n                elif ((sequence_length != 0) and feature.feature[0].HasField('int64_list')):\n                    for feat in feature.feature:\n                        for value in feat.int64_list.value:\n                            value_list.append(value)\n                    value_type = self.fs_proto.INT\n            if (value_type is not None):\n                if ('type' not in feature_entry):\n                    feature_entry['type'] = value_type\n                elif (feature_entry['type'] != value_type):\n                    raise TypeError(('type mismatch for feature ' + feature_name))\n            feature_entry['counts'].append(len(value_list))\n            feature_entry['vals'].extend(value_list)\n            if (sequence_length is not None):\n                feature_entry['feat_lens'].append(sequence_length)\n            if value_list:\n                features_seen.add(feature_name)\n    for f in entries:\n        fv = entries[f]\n        if (f not in features_seen):\n            fv['missing'] += 1", "docstring": "Parses data from an example, populating a dictionary of feature values.\n\nArgs:\nexample_features: A map of strings to tf.Features from the example.\nexample_feature_lists: A map of strings to tf.FeatureLists from the\nexample.\nentries: A dictionary of all features parsed thus far and arrays of their\nvalues. This is mutated by the function.\nindex: The index of the example to parse from a list of examples.\nRaises:\nTypeError: Raises an exception when a feature has inconsistent types\nacross\nexamples.", "source": "codesearchnet"}
{"code": "def invoice(request, invoice_id, access_code=None):\n    current_invoice = InvoiceController.for_id_or_404(invoice_id)\n    if (not current_invoice.can_view(user=request.user, access_code=access_code)):\n        raise Http404()\n    data = {'invoice': current_invoice.invoice}\n    return render(request, 'registrasion/invoice.html', data)", "docstring": "Displays an invoice.\n\nThis view is not authenticated, but it will only allow access to either:\nthe user the invoice belongs to; staff; or a request made with the correct\naccess code.\n\nArguments:\n\ninvoice_id (castable to int): The invoice_id for the invoice you want\nto view.\n\naccess_code (Optional[str]): The access code for the user who owns\nthis invoice.\n\nReturns:\nrender:\nRenders ``registrasion/invoice.html``, with the following\ndata::\n\n{\n\"invoice\": models.commerce.Invoice(),\n}\n\nRaises:\nHttp404: if the current user cannot view this invoice and the correct\naccess_code is not provided.", "source": "codesearchnet"}
{"code": "def _InitSSLContext(self, cafile=None, disable_ssl_certificate_validation=False):\n    try:\n        if disable_ssl_certificate_validation:\n            ssl._create_default_https_context = ssl._create_unverified_context\n            ssl_context = ssl.create_default_context()\n        else:\n            ssl_context = ssl.create_default_context(cafile=cafile)\n    except AttributeError:\n        return None\n    return ssl_context", "docstring": "Creates a ssl.SSLContext with the given settings.\n\nArgs:\ncafile: A str identifying the resolved path to the cafile. If not set,\nthis will use the system default cafile.\ndisable_ssl_certificate_validation: A boolean indicating whether\ncertificate verification is disabled. For security purposes, it is\nhighly recommended that certificate verification remain enabled.\n\nReturns:\nAn ssl.SSLContext instance, or None if the version of Python being used\ndoesn't support it.", "source": "codesearchnet"}
{"code": "def _benchmarkFeed(self, name, target, size, iters):\n    feed_val = np.random.rand(size).astype(np.float32)\n    times = []\n    with ops.Graph().as_default():\n        p = array_ops.placeholder(dtypes.float32, shape=[size])\n        no_op = array_ops.identity(p).op\n        with session.Session(target) as sess:\n            sess.run(no_op, feed_dict={p: feed_val})\n            for _ in range(iters):\n                start_time = time.time()\n                sess.run(no_op, feed_dict={p: feed_val})\n                end_time = time.time()\n                times.append(end_time - start_time)\n    print('%s %d %f' % (name, size, np.median(times)))\n    self.report_benchmark(iters=1, wall_time=np.median(times), name=name)", "docstring": "Runs a microbenchmark to measure the cost of feeding a tensor.\n\nReports the median cost of feeding a tensor of `size` * `sizeof(float)`\nbytes.\n\nArgs:\nname: A human-readable name for logging the output.\ntarget: The session target to use for the benchmark.\nsize: The number of floating-point numbers to be feed.\niters: The number of iterations to perform.", "source": "github-repos"}
{"code": "def sigmoid_cross_entropy_one_hot(logits, labels, weights_fn=None):\n  \n  with tf.variable_scope(\"sigmoid_cross_entropy_one_hot\",\n                         values=[logits, labels]):\n    del weights_fn\n    cross_entropy = tf.losses.sigmoid_cross_entropy(\n        multi_class_labels=labels, logits=logits)\n    return cross_entropy, tf.constant(1.0)", "docstring": "Calculate sigmoid cross entropy for one-hot lanels and logits.\n\nArgs:\nlogits: Tensor of size [batch-size, o=1, p=1, num-classes]\nlabels: Tensor of size [batch-size, o=1, p=1, num-classes]\nweights_fn: Function that takes in labels and weighs examples (unused)\nReturns:\ncross_entropy (scalar), weights", "source": "juraj-google-style"}
{"code": "def activate_nsxcontroller(self, **kwargs):\n        \n        name = kwargs.pop('name')\n        name_args = dict(name=name)\n        method_name = 'nsx_controller_activate'\n        method_class = self._brocade_tunnels\n        nsxcontroller_attr = getattr(method_class, method_name)\n        config = nsxcontroller_attr(**name_args)\n        output = self._callback(config)\n        return output", "docstring": "Activate NSX Controller\n\nArgs:\nname (str): nsxcontroller name\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def set(self, key, data):\n        \n        self.raise_error_if_not_open()\n\n        if key in self._file:\n            del self._file[key]\n\n        self._file.create_dataset(key, data=data)", "docstring": "Set the given data to the container with the given key.\nAny existing data for the given key is discarded/overwritten.\n\nArgs:\nkey (str): A key to store the data for.\ndata (numpy.ndarray): Array-like data.\n\nNote:\nThe container has to be opened in advance.", "source": "juraj-google-style"}
{"code": "def find(entity, **kwargs):\n    \n    try:\n        typedfields = entity.typed_fields()\n    except AttributeError:\n        typedfields = iterfields(entity.__class__)\n\n    matching = [x for x in typedfields if _matches(x, kwargs)]\n    return matching", "docstring": "Return all TypedFields found on the input `Entity` that were initialized\nwith the input **kwargs.\n\nExample:\n>>> find(myentity, multiple=True, type_=Foo)\n\nNote:\nTypedFields.__init__() can accept a string or a class as a type_\nargument, but this method expects a class.\n\nArgs:\n**kwargs: TypedField __init__ **kwargs to search on.\n\nReturns:\nA list of TypedFields with matching **kwarg values.", "source": "juraj-google-style"}
{"code": "def preprocess_data(dataset_path: str, training_set_path: str, labels_path: str, test_set_path: str):\n    df = pandas.read_csv(dataset_path)\n    df['Grade'].replace(['low', 'medium', 'high'], [0, 1, 2], inplace=True)\n    x = df.drop(columns=['Grade'])\n    y = df['Grade']\n    x_train, x_test, y_train, _ = train_test_split(x, y, test_size=0.6, random_state=99)\n    x_train.to_csv(training_set_path, index=False)\n    y_train.to_csv(labels_path, index=False)\n    x_test.to_csv(test_set_path, index=False)", "docstring": "Helper function to split the dataset into a training set\nand its labels and a test set. The training set and\nits labels are used to train a lightweight model.\nThe test set is used to create a test streaming pipeline.\nArgs:\ndataset_path: path to csv file containing the Kaggle\nmilk quality dataset\ntraining_set_path: path to output the training samples\nlabels_path:  path to output the labels for the training set\ntest_set_path: path to output the test samples", "source": "github-repos"}
{"code": "def crc(msg, encode=False):\n    \n\n    \n    generator = np.array([1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,1,0,0,1])\n    ng = len(generator)\n\n    msgnpbin = bin2np(hex2bin(msg))\n\n    if encode:\n        msgnpbin[-24:] = [0] * 24\n\n    \n    for i in range(len(msgnpbin)-24):\n        if msgnpbin[i] == 0:\n            continue\n\n        \n        msgnpbin[i:i+ng] = np.bitwise_xor(msgnpbin[i:i+ng], generator)\n\n    \n    reminder = np2bin(msgnpbin[-24:])\n    return reminder", "docstring": "Mode-S Cyclic Redundancy Check\nDetect if bit error occurs in the Mode-S message\nArgs:\nmsg (string): 28 bytes hexadecimal message string\nencode (bool): True to encode the date only and return the checksum\nReturns:\nstring: message checksum, or partity bits (encoder)", "source": "juraj-google-style"}
{"code": "def __init__(self, element_shape=None, dtype=dtypes.float32, dynamic_size=False, infer_shape=True):\n    self._element_shape = tensor_shape.as_shape(element_shape)\n    self._dtype = dtypes.as_dtype(dtype)\n    self._dynamic_size = dynamic_size\n    self._infer_shape = infer_shape", "docstring": "Constructs a type specification for a `tf.TensorArray`.\n\nArgs:\nelement_shape: The shape of each element in the `TensorArray`.\ndtype: Data type of the `TensorArray`.\ndynamic_size: Whether the `TensorArray` can grow past its initial size.\ninfer_shape: Whether shape inference is enabled.", "source": "github-repos"}
{"code": "def analyze_async(output_dir, dataset, cloud=False, project_id=None):\n    import google.datalab.utils as du\n    with warnings.catch_warnings():\n        warnings.simplefilter('ignore')\n        fn = (lambda : _analyze(output_dir, dataset, cloud, project_id))\n        return du.LambdaJob(fn, job_id=None)", "docstring": "Analyze data locally or in the cloud with BigQuery.\n\nProduce analysis used by training. This can take a while, even for small\ndatasets. For small datasets, it may be faster to use local_analysis.\n\nArgs:\noutput_dir: The output directory to use.\ndataset: only CsvDataSet is supported currently.\ncloud: If False, runs analysis locally with Pandas. If Ture, runs analysis\nin the cloud with BigQuery.\nproject_id: Uses BigQuery with this project id. Default is datalab's\ndefault project id.\n\nReturns:\nA google.datalab.utils.Job object that can be used to query state from or wait.", "source": "codesearchnet"}
{"code": "def send_data(data):\n    \n    datalength = len(data)\n    csm1 = checksum1(data, datalength)\n    csm2 = checksum2(csm1)\n    data.insert(0, 0xFF)\n    data.insert(1, 0xFF)\n    data.insert(5, csm1)\n    data.insert(6, csm2)\n    stringtosend = \"\"\n    for i in range(len(data)):\n        byteformat = '%02X' % data[i]\n        stringtosend = stringtosend + \"\\\\x\" + byteformat\n\n    try:\n\n        SERPORT.write(stringtosend.decode('string-escape'))\n        \n\n    except:\n        raise HerkulexError(\"could not communicate with motors\")", "docstring": "Send data to herkulex\n\nPaketize & write the packet to serial port\n\nArgs:\ndata (list): the data to be sent\n\nRaises:\nSerialException: Error occured while opening serial port", "source": "juraj-google-style"}
{"code": "def Patch(self, request, global_params=None):\n    config = self.GetMethodConfig('Patch')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource. This method supports patch semantics.\n\nArgs:\nrequest: (BigqueryTablesPatchRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Table) The response message.", "source": "github-repos"}
{"code": "def _post_check(date_info: dict) -> bool:\n        \n        if date_info['pattern']:\n            return True\n        \n        if date_info['value'] == 'may' or re.match(illegal, date_info['value'])\\\n            or (re.match(possible_illegal, date_info['value']) and len([g for g in date_info['groups'] if g]) != 2) \\\n            or (re.match(possible_illegal_3, date_info['value']) and len([g for g in date_info['groups'] if g]) != 3) \\\n            or (re.match('^\\b?[0-9]{4}\\b?$', date_info['value']) and len([g for g in date_info['groups'] if g]) > 1):\n            return False\n        return True", "docstring": "Post check the extracted date string to filter out some false positives\n\nArgs:\ndate_info: dict - includes the extracted string, matching groups, patterns etc.\n\nReturns: bool - if the date extracted is valid", "source": "juraj-google-style"}
{"code": "def deserialize(config, custom_objects=None):\n    populate_deserializable_objects()\n    return generic_utils.deserialize_keras_object(config, module_objects=LOCAL.ALL_OBJECTS, custom_objects=custom_objects, printable_module_name='layer')", "docstring": "Instantiates a layer from a config dictionary.\n\nArgs:\nconfig: dict of the form {'class_name': str, 'config': dict}\ncustom_objects: dict mapping class names (or function names)\nof custom (non-Keras) objects to class/functions\n\nReturns:\nLayer instance (may be Model, Sequential, Network, Layer...)", "source": "github-repos"}
{"code": "def load_dictionary(self, filename, encoding='utf-8'):\n    with load_file(filename, encoding) as data:\n        self._dictionary.update(json.loads(data.lower(), encoding=encoding))\n        self._update_dictionary()", "docstring": "Load in a pre-built word frequency list\n\nArgs:\nfilename (str): The filepath to the json (optionally gzipped) \\\nfile to be loaded\nencoding (str): The encoding of the dictionary", "source": "codesearchnet"}
{"code": "def _init_boto3_clients(self, profile, region):\n    try:\n        session = None\n        if (profile and region):\n            session = boto3.session.Session(profile_name=profile, region_name=region)\n        elif profile:\n            session = boto3.session.Session(profile_name=profile)\n        elif region:\n            session = boto3.session.Session(region_name=region)\n        else:\n            session = boto3.session.Session()\n        self._cloud_formation = session.client('cloudformation')\n        return True\n    except Exception as wtf:\n        logging.error(wtf, exc_info=True)\n        return False", "docstring": "The utililty requires boto3 clients to CloudFormation.\n\nArgs:\nNone\n\nReturns:\nGood or Bad; True or False", "source": "codesearchnet"}
{"code": "def _parse_response(self, respond):\n        \n        \n        mobj = self._max_qubit_error_re.match(respond.text)\n        if mobj:\n            raise RegisterSizeError(\n                'device register size must be <= {}'.format(mobj.group(1)))\n        return True", "docstring": "parse text of response for HTTP errors\n\nThis parses the text of the response to decide whether to\nretry request or raise exception. At the moment this only\ndetects an exception condition.\n\nArgs:\nrespond (Response): requests.Response object\n\nReturns:\nbool: False if the request should be retried, True\nif not.\n\nRaises:\nRegisterSizeError", "source": "juraj-google-style"}
{"code": "def execute_interactive_code(elem, doc):\n    \n    code_lines = [l[4:] for l in elem.text.split('\\n')]\n\n    code_blocks = [[code_lines[0]]]\n    for line in code_lines[1:]:\n        if line.startswith(' ') or line == '':\n            code_blocks[-1].append(line)\n        else:\n            code_blocks.append([line])\n\n    final_code = []\n    try:\n        child = replwrap.REPLWrapper(\"python\", \">>> \", None)\n    except NameError:\n        pf.debug('Can not run interactive session. No output produced ' +\n                 '(Code was:\\n{!s}\\n)'\n                 .format(elem))\n        pf.debug('Please pip install pexpect.')\n        return ''\n    for code_block in code_blocks:\n        result = child.run_command('\\n'.join(code_block) + '\\n').rstrip('\\r\\n')\n        final_code += [('>>> ' if i == 0 else '... ') + l for i, l in\n                       enumerate(code_block)]\n        if result:\n            final_code += [r for r in result.split('\\n')\n                           if r.strip() not in code_block]\n    return '\\n'.join(final_code)", "docstring": "Executes code blocks for a python shell.\n\nParses the code in `elem.text` into blocks and\nexecutes them.\n\nArgs:\nelem The AST element.\ndoc  The document.\n\nReturn:\nThe code with inline results.", "source": "juraj-google-style"}
{"code": "def append(self, node, dirty=True):\n    self._children[node.id] = node\n    node.parent = self\n    if dirty:\n        self.touch()\n    return node", "docstring": "Add a new child node.\n\nArgs:\nnode (gkeepapi.Node): Node to add.\ndirty (bool): Whether this node should be marked dirty.", "source": "codesearchnet"}
{"code": "def pool(builder, size, timeout=None):\n    lock = threading.Lock()\n    local_pool = queue.Queue()\n    current_size = 0\n\n    @contextlib.contextmanager\n    def pooled():\n        nonlocal current_size\n        instance = None\n        if (current_size < size):\n            with lock:\n                if (current_size < size):\n                    current_size += 1\n                    instance = builder()\n        if (instance is None):\n            instance = local_pool.get(timeout=timeout)\n        (yield instance)\n        local_pool.put(instance)\n    return pooled", "docstring": "Create a pool that imposes a limit on the number of stored\ninstances.\n\nArgs:\nbuilder: a function to build an instance.\nsize: the size of the pool.\ntimeout(Optional[float]): the seconds to wait before raising\na ``queue.Empty`` exception if no instances are available\nwithin that time.\nRaises:\nIf ``timeout`` is defined but the request is taking longer\nthan the specified time, the context manager will raise\na ``queue.Empty`` exception.\n\nReturns:\nA context manager that can be used with the ``with``\nstatement.", "source": "codesearchnet"}
{"code": "def setup(self, input_nodes=None, drop_na=False, **kwargs):\n    self.output_nodes = []\n    input_nodes = (input_nodes or self.input_nodes or [])\n    if (self.level != 'run'):\n        kwargs = kwargs.copy()\n        kwargs.pop('scan_length', None)\n    collections = self.layout.get_collections(self.level, drop_na=drop_na, **kwargs)\n    objects = (collections + input_nodes)\n    (objects, kwargs) = self._filter_objects(objects, kwargs)\n    groups = self._group_objects(objects)\n    model = (self.model or {})\n    X = model.get('x', [])\n    for grp in groups:\n        input_nodes = [o for o in grp if isinstance(o, AnalysisNode)]\n        colls = list((set(grp) - set(input_nodes)))\n        if input_nodes:\n            node_coll = self._concatenate_input_nodes(input_nodes)\n            colls.append(node_coll)\n        coll = (merge_collections(colls) if (len(colls) > 1) else colls[0])\n        coll = apply_transformations(coll, self.transformations)\n        if X:\n            transform.Select(coll, X)\n        node = AnalysisNode(self.level, coll, self.contrasts, input_nodes, self.auto_contrasts)\n        self.output_nodes.append(node)", "docstring": "Set up the Step and construct the design matrix.\n\nArgs:\ninput_nodes (list): Optional list of Node objects produced by\nthe preceding Step in the analysis. If None, uses any inputs\npassed in at Step initialization.\ndrop_na (bool): Boolean indicating whether or not to automatically\ndrop events that have a n/a amplitude when reading in data\nfrom event files.\nkwargs: Optional keyword arguments to pass onto load_variables.", "source": "codesearchnet"}
{"code": "def __find_variant(self, value):\n        \n        if isinstance(value, bool):\n            return messages.Variant.BOOL\n        elif isinstance(value, six.integer_types):\n            return messages.Variant.INT64\n        elif isinstance(value, float):\n            return messages.Variant.DOUBLE\n        elif isinstance(value, six.string_types):\n            return messages.Variant.STRING\n        elif isinstance(value, (list, tuple)):\n            \n            variant_priority = [None,\n                                messages.Variant.INT64,\n                                messages.Variant.DOUBLE,\n                                messages.Variant.STRING]\n            chosen_priority = 0\n            for v in value:\n                variant = self.__find_variant(v)\n                try:\n                    priority = variant_priority.index(variant)\n                except IndexError:\n                    priority = -1\n                if priority > chosen_priority:\n                    chosen_priority = priority\n            return variant_priority[chosen_priority]\n        \n        return None", "docstring": "Find the messages.Variant type that describes this value.\n\nArgs:\nvalue: The value whose variant type is being determined.\n\nReturns:\nThe messages.Variant value that best describes value's type,\nor None if it's a type we don't know how to handle.", "source": "juraj-google-style"}
{"code": "def Read(self, file_object):\n    try:\n        self.root_key = biplist.readPlist(file_object)\n    except (biplist.NotBinaryPlistException, biplist.InvalidPlistException) as exception:\n        raise IOError(exception)", "docstring": "Reads a plist from a file-like object.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object containing plist data.\n\nRaises:\nIOError: if the plist file-like object cannot be read.\nOSError: if the plist file-like object cannot be read.", "source": "codesearchnet"}
{"code": "def find_input(self, stream):\n        \n\n        for i, input_x in enumerate(self.inputs):\n            if input_x[0].matches(stream):\n                return i", "docstring": "Find the input that responds to this stream.\n\nArgs:\nstream (DataStream): The stream to find\n\nReturns:\n(index, None): The index if found or None", "source": "juraj-google-style"}
{"code": "def get_physical_server_hardware(self):\n    uri = '{}/physicalServerHardware'.format(self.data['uri'])\n    return self._helper.do_get(uri)", "docstring": "Information describing an 'SDX' partition including a list of physical server blades represented by a server\nhardware. Used with SDX enclosures only.\n\nReturns:\nResource", "source": "codesearchnet"}
{"code": "def _get_event_id(object_type: str) -> str:\n    key = _keys.event_counter(object_type)\n    DB.watch(key, pipeline=True)\n    count = DB.get_value(key)\n    DB.increment(key)\n    DB.execute()\n    if (count is None):\n        count = 0\n    return '{}_event_{:08d}'.format(object_type, int(count))", "docstring": "Return an event key for the event on the object type.\n\nThis must be a unique event id for the object.\n\nArgs:\nobject_type (str): Type of object\n\nReturns:\nstr, event id", "source": "codesearchnet"}
{"code": "def _compile_output_step(outputs):\n    \n    if not outputs:\n        raise GraphQLCompilationError(u'No fields were selected for output! Please mark at least '\n                                      u'one field with the @output directive.')\n\n    output_fields = {}\n    for output_name, output_context in six.iteritems(outputs):\n        location = output_context['location']\n        optional = output_context['optional']\n        graphql_type = output_context['type']\n\n        expression = None\n        existence_check = None\n        \n        if isinstance(location, FoldScopeLocation):\n            if optional:\n                raise AssertionError(u'Unreachable state reached, optional in fold: '\n                                     u'{}'.format(output_context))\n\n            if location.field == COUNT_META_FIELD_NAME:\n                expression = expressions.FoldCountContextField(location)\n            else:\n                expression = expressions.FoldedContextField(location, graphql_type)\n        else:\n            expression = expressions.OutputContextField(location, graphql_type)\n\n            if optional:\n                existence_check = expressions.ContextFieldExistence(location.at_vertex())\n\n        if existence_check:\n            expression = expressions.TernaryConditional(\n                existence_check, expression, expressions.NullLiteral)\n        \n\n        output_fields[output_name] = expression\n\n    return blocks.ConstructResult(output_fields)", "docstring": "Construct the final ConstructResult basic block that defines the output format of the query.\n\nArgs:\noutputs: dict, output name (string) -> output data dict, specifying the location\nfrom where to get the data, and whether the data is optional (and therefore\nmay be missing); missing optional data is replaced with 'null'\n\nReturns:\na ConstructResult basic block that constructs appropriate outputs for the query", "source": "juraj-google-style"}
{"code": "def fasta_files_equal(seq_file1, seq_file2):\n    \n\n    \n    seq1 = SeqIO.read(open(seq_file1), 'fasta')\n\n    \n    seq2 = SeqIO.read(open(seq_file2), 'fasta')\n\n    \n    if str(seq1.seq) == str(seq2.seq):\n        return True\n    else:\n        return False", "docstring": "Check equality of a FASTA file to another FASTA file\n\nArgs:\nseq_file1: Path to a FASTA file\nseq_file2: Path to another FASTA file\n\nReturns:\nbool: If the sequences are the same", "source": "juraj-google-style"}
{"code": "def add_arguments(self, parser, bootstrap=False):\n        \n        [item.add_argument(parser, bootstrap)\n         for item in self._get_items(bootstrap=False)]", "docstring": "Adds all items to the parser passed in.\n\nArgs:\nparser (argparse.ArgumentParser): The parser to add all items to.\nbootstrap (bool): Flag to indicate whether you only want to mark\nbootstrapped items as required on the command-line.", "source": "juraj-google-style"}
{"code": "def all_indices_partitioned(self):\n    return self._all_indices_partitioned", "docstring": "all_indices_partitioned property.\n\nReturns:\nTrue if we are inside a control flow construct and not all pfor iterations\nmay be active.", "source": "github-repos"}
{"code": "def add_nested_compat_imports(module_builder, compat_api_versions, output_package):\n    imported_modules = module_builder.get_destination_modules()\n    for v in compat_api_versions:\n        for sv in compat_api_versions:\n            subcompat_module = _SUBCOMPAT_MODULE_TEMPLATE % (v, sv)\n            compat_module = _COMPAT_MODULE_TEMPLATE % sv\n            module_builder.copy_imports(compat_module, subcompat_module)\n            module_builder.copy_imports('%s.compat' % compat_module, '%s.compat' % subcompat_module)\n    compat_prefixes = tuple((_COMPAT_MODULE_TEMPLATE % v + '.' for v in compat_api_versions))\n    for imported_module in imported_modules:\n        if not imported_module.startswith(compat_prefixes):\n            continue\n        module_split = imported_module.split('.')\n        if len(module_split) > 3 and module_split[2] == 'compat':\n            src_module = '.'.join(module_split[:3])\n            src_name = module_split[3]\n            assert src_name != 'v1' and src_name != 'v2', imported_module\n        else:\n            src_module = '.'.join(module_split[:2])\n            src_name = module_split[2]\n            if src_name == 'compat':\n                continue\n        for compat_api_version in compat_api_versions:\n            module_builder.add_import(symbol=None, source_module_name='%s.%s' % (output_package, src_module), source_name=src_name, dest_module_name='compat.v%d.%s' % (compat_api_version, src_module), dest_name=src_name)", "docstring": "Adds compat.vN.compat.vK modules to module builder.\n\nTo avoid circular imports, we want to add __init__.py files under\ncompat.vN.compat.vK and under compat.vN.compat.vK.compat. For all other\nimports, we point to corresponding modules under compat.vK.\n\nArgs:\nmodule_builder: `_ModuleInitCodeBuilder` instance.\ncompat_api_versions: Supported compatibility versions.\noutput_package: Base output python package where generated API will be\nadded.", "source": "github-repos"}
{"code": "def get_actions(self, issues):\n        \n        actions = []\n        try:\n            for issue in issues:\n                action_item = self.determine_action(issue)\n                if action_item['action'] != AuditActions.IGNORE:\n                    action_item['owners'] = self.get_contacts(issue)\n                    actions.append(action_item)\n        finally:\n            db.session.rollback()\n        return actions", "docstring": "Returns a list of actions to executed\n\nArgs:\nissues (`list` of :obj:`RequiredTagsIssue`): List of issues\n\nReturns:\n`list` of `dict`", "source": "juraj-google-style"}
{"code": "def verify_response(response, status_code, content_type=None):\n    status = int(response.status.split(' ', 1)[0])\n    if (status != status_code):\n        return False\n    if (content_type is None):\n        return True\n    for (header, value) in response.headers:\n        if (header.lower() == 'content-type'):\n            return (value == content_type)\n    return False", "docstring": "Verifies that a response has the expected status and content type.\n\nArgs:\nresponse: The ResponseTuple to be checked.\nstatus_code: An int, the HTTP status code to be compared with response\nstatus.\ncontent_type: A string with the acceptable Content-Type header value.\nNone allows any content type.\n\nReturns:\nTrue if both status_code and content_type match, else False.", "source": "codesearchnet"}
{"code": "def to_timestamp(self, data):\n        \n        result = pd.Series(index=data.index)\n        _slice = ~data[self.col_name].isnull()\n\n        result[_slice] = data[_slice][self.col_name].astype('int64')\n        return result", "docstring": "Transform a datetime series into linux epoch.\n\nArgs:\ndata(pandas.DataFrame): DataFrame containins a column named as `self.col_name`.\n\nReturns:\npandas.Series", "source": "juraj-google-style"}
{"code": "def _cleanup_keys_with_confirmation(self, keys_to_delete):\n    \n    print('Round name: ', self.round_name)\n    print('Number of entities to be deleted: ', len(keys_to_delete))\n    if not keys_to_delete:\n      return\n    if self.verbose:\n      print('Entities to delete:')\n      idx = 0\n      prev_key_prefix = None\n      dots_printed_after_same_prefix = False\n      for k in keys_to_delete:\n        if idx >= 20:\n          print('   ...')\n          print('   ...')\n          break\n        key_prefix = (k.flat_path[0:1]\n                      if k.flat_path[0] in [u'SubmissionType', u'WorkType']\n                      else k.flat_path[0])\n        if prev_key_prefix == key_prefix:\n          if not dots_printed_after_same_prefix:\n            print('   ...')\n          dots_printed_after_same_prefix = True\n        else:\n          print('  ', k)\n          dots_printed_after_same_prefix = False\n          idx += 1\n        prev_key_prefix = key_prefix\n    print()\n    inp = input_str('Are you sure? (type \"yes\" without quotes to confirm): ')\n    if inp != 'yes':\n      return\n    with self.datastore_client.no_transact_batch() as batch:\n      for k in keys_to_delete:\n        batch.delete(k)\n    print('Data deleted')", "docstring": "Asks confirmation and then deletes entries with keys.\n\nArgs:\nkeys_to_delete: list of datastore keys for which entries should be deleted", "source": "juraj-google-style"}
{"code": "def compute_actor_handle_id_non_forked(actor_handle_id, current_task_id):\n    assert isinstance(actor_handle_id, ActorHandleID)\n    assert isinstance(current_task_id, TaskID)\n    handle_id_hash = hashlib.sha1()\n    handle_id_hash.update(actor_handle_id.binary())\n    handle_id_hash.update(current_task_id.binary())\n    handle_id = handle_id_hash.digest()\n    return ActorHandleID(handle_id)", "docstring": "Deterministically compute an actor handle ID in the non-forked case.\n\nThis code path is used whenever an actor handle is pickled and unpickled\n(for example, if a remote function closes over an actor handle). Then,\nwhenever the actor handle is used, a new actor handle ID will be generated\non the fly as a deterministic function of the actor ID, the previous actor\nhandle ID and the current task ID.\n\nTODO(rkn): It may be possible to cause problems by closing over multiple\nactor handles in a remote function, which then get unpickled and give rise\nto the same actor handle IDs.\n\nArgs:\nactor_handle_id: The original actor handle ID.\ncurrent_task_id: The ID of the task that is unpickling the handle.\n\nReturns:\nAn ID for the new actor handle.", "source": "codesearchnet"}
{"code": "def check_hardware(self, expected):\n    if (len(expected) < 10):\n        expected += ('\\x00' * (10 - len(expected)))\n    (err,) = self.rpc(0, 3, expected, result_format='L')\n    if (err == 0):\n        return True\n    return False", "docstring": "Make sure the hardware version is what we expect.\n\nThis convenience function is meant for ensuring that we are talking to\na tile that has the correct hardware version.\n\nArgs:\nexpected (str): The expected hardware string that is compared\nagainst what is reported by the hardware_version RPC.\n\nReturns:\nbool: true if the hardware is the expected version, false otherwise", "source": "codesearchnet"}
{"code": "def _ParseCachedEntry8(self, value_data, cached_entry_offset):\n    \n    try:\n      cached_entry = self._ReadStructureFromByteStream(\n          value_data[cached_entry_offset:], cached_entry_offset,\n          self._cached_entry_data_type_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError(\n          'Unable to parse cached entry value with error: {0!s}'.format(\n              exception))\n\n    if cached_entry.signature not in (\n        self._CACHED_ENTRY_SIGNATURE_8_0, self._CACHED_ENTRY_SIGNATURE_8_1):\n      raise errors.ParseError('Unsupported cache entry signature')\n\n    cached_entry_data = value_data[cached_entry_offset:]\n\n    if cached_entry.signature == self._CACHED_ENTRY_SIGNATURE_8_0:\n      data_type_map_name = 'appcompatcache_cached_entry_body_8_0'\n    elif cached_entry.signature == self._CACHED_ENTRY_SIGNATURE_8_1:\n      data_type_map_name = 'appcompatcache_cached_entry_body_8_1'\n\n    data_type_map = self._GetDataTypeMap(data_type_map_name)\n    context = dtfabric_data_maps.DataTypeMapContext()\n\n    try:\n      cached_entry_body = self._ReadStructureFromByteStream(\n          cached_entry_data[12:], cached_entry_offset + 12,\n          data_type_map, context=context)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError(\n          'Unable to parse cached entry body with error: {0!s}'.format(\n              exception))\n\n    data_offset = context.byte_size\n    data_size = cached_entry_body.data_size\n\n    cached_entry_object = AppCompatCacheCachedEntry()\n    cached_entry_object.cached_entry_size = (\n        12 + cached_entry.cached_entry_data_size)\n    cached_entry_object.insertion_flags = cached_entry_body.insertion_flags\n    cached_entry_object.last_modification_time = (\n        cached_entry_body.last_modification_time)\n    cached_entry_object.path = cached_entry_body.path\n    cached_entry_object.shim_flags = cached_entry_body.shim_flags\n\n    if data_size > 0:\n      cached_entry_object.data = cached_entry_data[\n          data_offset:data_offset + data_size]\n\n    return cached_entry_object", "docstring": "Parses a Windows 8.0 or 8.1 cached entry.\n\nArgs:\nvalue_data (bytes): value data.\ncached_entry_offset (int): offset of the first cached entry data\nrelative to the start of the value data.\n\nReturns:\nAppCompatCacheCachedEntry: cached entry.\n\nRaises:\nParseError: if the value data could not be parsed.", "source": "juraj-google-style"}
{"code": "def remove_handler(self, name):\n        \n        index = None\n        for i, h in enumerate(self.capture_handlers):\n            if h['name'] == name:\n                index = i\n\n        if index is not None:\n            self.capture_handlers[index]['logger'].close()\n            del self.capture_handlers[index]", "docstring": "Remove a handler given a name\n\nNote, if multiple handlers have the same name the last matching\ninstance in the handler list will be removed.\n\nArgs:\nname:\nThe name of the handler to remove", "source": "juraj-google-style"}
{"code": "def _refresh(self, http):\n        \n        self.devshell_response = _SendRecv()\n        self.access_token = self.devshell_response.access_token\n        expires_in = self.devshell_response.expires_in\n        if expires_in is not None:\n            delta = datetime.timedelta(seconds=expires_in)\n            self.token_expiry = client._UTCNOW() + delta\n        else:\n            self.token_expiry = None", "docstring": "Refreshes the access token.\n\nArgs:\nhttp: unused HTTP object", "source": "juraj-google-style"}
{"code": "def process(self):\n    print('Artifacts to be collected: {0!s}'.format(self.artifacts))\n    hunt_args = flows_pb2.ArtifactCollectorFlowArgs(artifact_list=self.artifacts, use_tsk=self.use_tsk, ignore_interpolation_errors=True, apply_parsers=False)\n    return self._create_hunt('ArtifactCollectorFlow', hunt_args)", "docstring": "Construct and start new Artifact Collection hunt.\n\nReturns:\nThe newly created GRR hunt object.\n\nRaises:\nRuntimeError: if no items specified for collection.", "source": "codesearchnet"}
{"code": "def __init__(self, context):\n    \n    self._event_multiplexer = context.multiplexer\n    self._logdir = context.logdir\n    self._debugger_data_server = None\n    self._grpc_port = None", "docstring": "Constructs a debugger plugin for TensorBoard.\n\nThis plugin adds handlers for retrieving debugger-related data. The plugin\nalso starts a debugger data server once the log directory is passed to the\nplugin via the call to get_plugin_apps.\n\nArgs:\ncontext: A base_plugin.TBContext instance.", "source": "juraj-google-style"}
{"code": "def _add_query_parameter(url, name, value):\n    \n    if value is None:\n        return url\n    else:\n        return update_query_params(url, {name: value})", "docstring": "Adds a query parameter to a url.\n\nReplaces the current value if it already exists in the URL.\n\nArgs:\nurl: string, url to add the query parameter to.\nname: string, query parameter name.\nvalue: string, query parameter value.\n\nReturns:\nUpdated query parameter. Does not update the url if value is None.", "source": "juraj-google-style"}
{"code": "def copy_binary(directory, origin_tag, new_tag, version, package):\n    print('Rename and copy binaries with %s to %s.' % (origin_tag, new_tag))\n    origin_binary = BINARY_STRING_TEMPLATE % (package, version, origin_tag)\n    new_binary = BINARY_STRING_TEMPLATE % (package, version, new_tag)\n    zip_ref = zipfile.ZipFile(os.path.join(directory, origin_binary), 'r')\n    try:\n        tmpdir = tempfile.mkdtemp()\n        os.chdir(tmpdir)\n        zip_ref.extractall()\n        zip_ref.close()\n        old_py_ver = re.search('(cp\\\\d\\\\d-cp\\\\d\\\\d)', origin_tag).group(1)\n        new_py_ver = re.search('(cp\\\\d\\\\d-cp\\\\d\\\\d)', new_tag).group(1)\n        wheel_file = os.path.join(tmpdir, '%s-%s.dist-info' % (package, version), 'WHEEL')\n        with open(wheel_file, 'r') as f:\n            content = f.read()\n        with open(wheel_file, 'w') as f:\n            f.write(content.replace(old_py_ver, new_py_ver))\n        zout = zipfile.ZipFile(directory + new_binary, 'w', zipfile.ZIP_DEFLATED)\n        zip_these_files = ['%s-%s.dist-info' % (package, version), '%s-%s.data' % (package, version), 'tensorflow', 'tensorflow_core']\n        for dirname in zip_these_files:\n            for root, _, files in os.walk(dirname):\n                for filename in files:\n                    zout.write(os.path.join(root, filename))\n        zout.close()\n    finally:\n        shutil.rmtree(tmpdir)", "docstring": "Rename and copy binaries for different python versions.\n\nArgs:\ndirectory: string of directory\norigin_tag: str of the old python version tag\nnew_tag: str of the new tag\nversion: the version of the package\npackage: str, name of the package", "source": "github-repos"}
{"code": "def fast_tpu_gather(params, indices, name=None):\n    with tf.name_scope(name):\n        dtype = params.dtype\n\n        def _gather(params, indices):\n            'Fast gather using one_hot and batch matmul.'\n            if (dtype != tf.float32):\n                params = tf.to_float(params)\n            shape = common_layers.shape_list(params)\n            indices_shape = common_layers.shape_list(indices)\n            ndims = params.shape.ndims\n            if (ndims == 2):\n                params = tf.expand_dims(params, axis=(- 1))\n            if (ndims > 3):\n                params = tf.reshape(params, [shape[0], shape[1], (- 1)])\n            gather_result = tf.matmul(tf.one_hot(indices, shape[1], dtype=params.dtype), params)\n            if (ndims == 2):\n                gather_result = tf.squeeze(gather_result, axis=(- 1))\n            if (ndims > 3):\n                shape[1] = indices_shape[1]\n                gather_result = tf.reshape(gather_result, shape)\n            if (dtype != tf.float32):\n                gather_result = tf.cast(gather_result, dtype)\n            return gather_result\n        if dtype.is_integer:\n            gather_result = tf.batch_gather(params, indices)\n        else:\n            gather_result = _gather(params, indices)\n        return gather_result", "docstring": "Fast gather implementation for models running on TPU.\n\nThis function use one_hot and batch matmul to do gather, which is faster\nthan gather_nd on TPU. For params that have dtype of int32 (sequences to\ngather from), batch_gather is used to keep accuracy.\n\nArgs:\nparams: A tensor from which to gather values.\n[batch_size, original_size, ...]\nindices: A tensor used as the index to gather values.\n[batch_size, selected_size].\nname: A string, name of the operation (optional).\n\nReturns:\ngather_result: A tensor that has the same rank as params.\n[batch_size, selected_size, ...]", "source": "codesearchnet"}
{"code": "def available_readers(as_dict=False):\n    \n    readers = []\n    for reader_configs in configs_for_reader():\n        try:\n            reader_info = read_reader_config(reader_configs)\n        except (KeyError, IOError, yaml.YAMLError):\n            LOG.warning(\"Could not import reader config from: %s\", reader_configs)\n            LOG.debug(\"Error loading YAML\", exc_info=True)\n            continue\n        readers.append(reader_info if as_dict else reader_info['name'])\n    return readers", "docstring": "Available readers based on current configuration.\n\nArgs:\nas_dict (bool): Optionally return reader information as a dictionary.\nDefault: False\n\nReturns: List of available reader names. If `as_dict` is `True` then\na list of dictionaries including additionally reader information\nis returned.", "source": "juraj-google-style"}
{"code": "def get_subnets(\n        target='ec2',\n        purpose='internal',\n        env='',\n        region='', ):\n    \n    account_az_dict = defaultdict(defaultdict)\n    subnet_id_dict = defaultdict(defaultdict)\n\n    subnet_url = '{0}/subnets/aws'.format(API_URL)\n    subnet_response = requests.get(subnet_url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n\n    if not subnet_response.ok:\n        raise SpinnakerTimeout(subnet_response.text)\n\n    subnet_list = subnet_response.json()\n    for subnet in subnet_list:\n        LOG.debug('Subnet: %(account)s\\t%(region)s\\t%(target)s\\t%(vpcId)s\\t' '%(availabilityZone)s', subnet)\n\n        if subnet.get('target', '') == target:\n            availability_zone = subnet['availabilityZone']\n            account = subnet['account']\n            subnet_region = subnet['region']\n            subnet_id = subnet['id']\n            try:\n                if availability_zone not in account_az_dict[account][subnet_region]:\n                    account_az_dict[account][subnet_region].append(availability_zone)\n            except KeyError:\n                account_az_dict[account][subnet_region] = [availability_zone]\n            \n            if subnet['purpose'] == purpose:\n                try:\n                    subnet_id_dict[account][subnet_region].append(subnet_id)\n                except KeyError:\n                    subnet_id_dict[account][subnet_region] = [subnet_id]\n\n            LOG.debug('%s regions: %s', account, list(account_az_dict[account].keys()))\n\n    if all([env, region]):\n        try:\n            region_dict = {region: account_az_dict[env][region]}\n            region_dict['subnet_ids'] = {region: subnet_id_dict[env][region]}\n            LOG.debug('Region dict: %s', region_dict)\n            return region_dict\n        except KeyError:\n            raise SpinnakerSubnetError(env=env, region=region)\n\n    LOG.debug('AZ dict:\\n%s', pformat(dict(account_az_dict)))\n\n    return account_az_dict", "docstring": "Get all availability zones for a given target.\n\nArgs:\ntarget (str): Type of subnets to look up (ec2 or elb).\nenv (str): Environment to look up.\nregion (str): AWS Region to find Subnets for.\n\nReturns:\naz_dict: dictionary of  availbility zones, structured like\n{ $region: [ $avaibilityzones ] }\nor\n{ $account: $region: [ $availabilityzone] }", "source": "juraj-google-style"}
{"code": "def fmtVersion(*vsnparts):\n    if (len(vsnparts) < 1):\n        raise s_exc.BadTypeValu(valu=repr(vsnparts), name='fmtVersion', mesg='Not enough version parts to form a version string with.')\n    ret = '.'.join([str(part).lower() for part in vsnparts])\n    return ret", "docstring": "Join a string of parts together with a . separator.\n\nArgs:\n*vsnparts:\n\nReturns:", "source": "codesearchnet"}
{"code": "def find_proxy_plugin(component, plugin_name):\n    \n\n    reg = ComponentRegistry()\n\n    plugins = reg.load_extensions('iotile.proxy_plugin', comp_filter=component, class_filter=TileBusProxyPlugin,\n                                  product_name='proxy_plugin')\n\n    for _name, plugin in plugins:\n        if plugin.__name__ == plugin_name:\n            return plugin\n\n    raise DataError(\"Could not find proxy plugin module in registered components or installed distributions\",\n                    component=component, name=plugin_name)", "docstring": "Attempt to find a proxy plugin provided by a specific component\n\nArgs:\ncomponent (string): The name of the component that provides the plugin\nplugin_name (string): The name of the plugin to load\n\nReturns:\nTileBuxProxyPlugin: The plugin, if found, otherwise raises DataError", "source": "juraj-google-style"}
{"code": "def pre_save(self, instance, add: bool):\n        \n\n        if not isinstance(instance, AtomicSlugRetryMixin):\n            raise ImproperlyConfigured((\n                'Model \\'%s\\' does not inherit from AtomicSlugRetryMixin. '\n                'Without this, the LocalizedUniqueSlugField will not work.'\n            ) % type(instance).__name__)\n\n        slugs = LocalizedValue()\n\n        for lang_code, value in self._get_populate_values(instance):\n            if not value:\n                continue\n\n            slug = slugify(value, allow_unicode=True)\n\n            \n            \n            if instance.pk is not None:\n                current_slug = getattr(instance, self.name).get(lang_code)\n                if current_slug is not None:\n                    stripped_slug = current_slug[0:current_slug.rfind('-')]\n                    if slug == stripped_slug:\n                        slugs.set(lang_code, current_slug)\n                        continue\n\n            if self.include_time:\n                slug += '-%d' % datetime.now().microsecond\n\n            retries = getattr(instance, 'retries', 0)\n            if retries > 0:\n                \n                if not self.include_time:\n                    slug += '-'\n                slug += '%d' % retries\n\n            slugs.set(lang_code, slug)\n\n        setattr(instance, self.name, slugs)\n        return slugs", "docstring": "Ran just before the model is saved, allows us to built\nthe slug.\n\nArguments:\ninstance:\nThe model that is being saved.\n\nadd:\nIndicates whether this is a new entry\nto the database or an update.\n\nReturns:\nThe localized slug that was generated.", "source": "juraj-google-style"}
{"code": "def __init__(self, layers, scope='layered-network', summary_labels=()):\n        \n        self.layers_spec = layers\n        super(LayeredNetwork, self).__init__(scope=scope, summary_labels=summary_labels)\n\n        self.parse_layer_spec(layer_spec=self.layers_spec, layer_counter=Counter())", "docstring": "Single-stack layered network.\n\nArgs:\nlayers: List of layer specification dicts.", "source": "juraj-google-style"}
{"code": "def save(self, filething=None, padding=None):\n        \n\n        try:\n            self.tags._inject(filething.fileobj, padding)\n        except (IOError, error) as e:\n            reraise(self._Error, e, sys.exc_info()[2])\n        except EOFError:\n            raise self._Error(\"no appropriate stream found\")", "docstring": "save(filething=None, padding=None)\n\nSave a tag to a file.\n\nIf no filename is given, the one most recently loaded is used.\n\nArgs:\nfilething (filething)\npadding (:obj:`mutagen.PaddingFunction`)\nRaises:\nmutagen.MutagenError", "source": "juraj-google-style"}
{"code": "def __init__(self, preread=None, precompile=None, file_path=None):\n        \n        self.index = None\n        self.compiled = None\n        if file_path:\n            self._index_file = file_path\n            self._index_handle = open(self._index_file, \"r\")\n            self._ParseIndex(preread, precompile)", "docstring": "Create new IndexTable object.\nArgs:\npreread: func, Pre-processing, applied to each field as it is read.\nprecompile: func, Pre-compilation, applied to each field before compiling.\nfile_path: String, Location of file to use as input.", "source": "juraj-google-style"}
{"code": "def build_variant(variant, case_obj, case_id=None, gq_treshold=None):\n    \n    variant_obj = None\n\n    sv = False\n    \n    if variant.var_type == 'sv':\n        sv = True\n\n    \n    variant_id = get_variant_id(variant)\n\n    ref = variant.REF\n    \n    \n    alt = variant.ALT[0]\n\n    coordinates = get_coords(variant)\n    chrom = coordinates['chrom']\n    pos = coordinates['pos']\n\n    \n    found_homozygote = 0\n    found_hemizygote = 0\n\n    \n    if sv:\n        found_variant = True\n    else:\n        found_variant = False\n        for ind_obj in case_obj['individuals']:\n            ind_id = ind_obj['ind_id']\n            \n            ind_pos = ind_obj['ind_index']\n            gq = int(variant.gt_quals[ind_pos])\n            if (gq_treshold and gq < gq_treshold):\n                continue\n\n            genotype = GENOTYPE_MAP[variant.gt_types[ind_pos]]\n\n            if genotype in ['het', 'hom_alt']:\n                LOG.debug(\"Found variant\")\n                found_variant = True\n\n                \n                \n                if chrom in ['X','Y'] and ind_obj['sex'] == 1:\n                    if not check_par(chrom, pos):\n                        LOG.debug(\"Found hemizygous variant\")\n                        found_hemizygote = 1\n\n                if genotype == 'hom_alt':\n                    LOG.debug(\"Found homozygote alternative variant\")\n                    found_homozygote = 1\n\n    if found_variant:\n        \n        variant_obj = Variant(\n            variant_id=variant_id,\n            chrom=chrom,\n            pos=pos,\n            end=coordinates['end'],\n            ref=ref,\n            alt=alt,\n            end_chrom=coordinates['end_chrom'],\n            sv_type = coordinates['sv_type'],\n            sv_len = coordinates['sv_length'],\n            case_id = case_id,\n            homozygote = found_homozygote,\n            hemizygote = found_hemizygote,\n            is_sv = sv,\n            id_column = variant.ID,\n        )\n\n    return variant_obj", "docstring": "Return a Variant object\n\nTake a cyvcf2 formated variant line and return a models.Variant.\n\nIf criterias are not fullfilled, eg. variant have no gt call or quality\nis below gq treshold then return None.\n\nArgs:\nvariant(cyvcf2.Variant)\ncase_obj(Case): We need the case object to check individuals sex\ncase_id(str): The case id\ngq_treshold(int): Genotype Quality treshold\n\nReturn:\nformated_variant(models.Variant): A variant dictionary", "source": "juraj-google-style"}
{"code": "def _on_skip(self, record):\n    logging.info('Reason to skip: %s', record.details)\n    logging.info(RESULT_LINE_TEMPLATE, record.test_name, record.result)\n    self.on_skip(record)", "docstring": "Proxy function to guarantee the base implementation of on_skip is\ncalled.\n\nArgs:\nrecord: records.TestResultRecord, a copy of the test record for\nthis test, containing all information of the test execution\nincluding exception objects.", "source": "github-repos"}
{"code": "def from_preset(preset):\n    if (preset == 'vesta_2019'):\n        cut_offs = loadfn(os.path.join(_directory, 'vesta_cutoffs.yaml'))\n        return CutOffDictNN(cut_off_dict=cut_offs)\n    else:\n        raise ValueError('Unrecognised preset: {}'.format(preset))", "docstring": "Initialise a CutOffDictNN according to a preset set of cut-offs.\n\nArgs:\npreset (str): A preset name. The list of supported presets are:\n\n- \"vesta_2019\": The distance cut-offs used by the VESTA\nvisualisation program.\n\nReturns:\nA CutOffDictNN using the preset cut-off dictionary.", "source": "codesearchnet"}
{"code": "def expand_value_set_url(self, url: str) -> Optional[value_set_pb2.ValueSet]:\n    for resolver in self._resolvers:\n        expanded_value_set = resolver.expand_value_set_url(url)\n        if expanded_value_set is not None:\n            return expanded_value_set\n    return None", "docstring": "Retrieves the expanded value set definition for the given URL.\n\nAttempts to expand the value set using definitions available to the\ninstance's package manager. If the expansion can not be performed with\navailable resources, makes network calls to a terminology service to perform\nthe expansion.\n\nArgs:\nurl: The URL of the value set to expand.\n\nReturns:\nA value set protocol buffer expanded to include the codes it represents.", "source": "github-repos"}
{"code": "def run(self, sensor_graph, model):\n        \n\n        \n        \n\n        did_downgrade = False\n\n        for node, inputs, _outputs in sensor_graph.iterate_bfs():\n            can_downgrade = False\n\n            if node.func_name != u'copy_all_a':\n                continue\n\n            input_a, trigger_a = node.inputs[0]\n\n            \n            if input_a.selector.match_type in (DataStream.InputType, DataStream.UnbufferedType):\n                can_downgrade = True\n            elif isinstance(trigger_a, InputTrigger) and trigger_a.comp_string == u'==' and trigger_a.use_count and trigger_a.reference == 1:\n                can_downgrade = True\n            elif isinstance(trigger_a, TrueTrigger) and not input_a.selector.buffered:\n                \n                \n                \n                \n                \n                can_downgrade = True\n                for in_node in inputs:\n                    if input_a.matches(in_node.stream) and in_node.func_name == u'copy_all_a' and in_node.input_a.match_type not in (DataStream.InputType, DataStream.UnbufferedType):\n                        can_downgrade = False\n                        break\n\n            if can_downgrade:\n                did_downgrade = True\n                node.set_func(u'copy_latest_a', sensor_graph.find_processing_function(u'copy_latest_a'))\n\n        return did_downgrade", "docstring": "Run this optimization pass on the sensor graph\n\nIf necessary, information on the device model being targeted\ncan be found in the associated model argument.\n\nArgs:\nsensor_graph (SensorGraph): The sensor graph to optimize\nmodel (DeviceModel): The device model we're using", "source": "juraj-google-style"}
{"code": "def pseudos_with_symbols(self, symbols):\n    pseudos = self.select_symbols(symbols, ret_list=True)\n    found_symbols = [p.symbol for p in pseudos]\n    duplicated_elements = [s for (s, o) in collections.Counter(found_symbols).items() if (o > 1)]\n    if duplicated_elements:\n        raise ValueError(('Found multiple occurrences of symbol(s) %s' % ', '.join(duplicated_elements)))\n    missing_symbols = [s for s in symbols if (s not in found_symbols)]\n    if missing_symbols:\n        raise ValueError(('Missing data for symbol(s) %s' % ', '.join(missing_symbols)))\n    return pseudos", "docstring": "Return the pseudos with the given chemical symbols.\n\nRaises:\nValueError if one of the symbols is not found or multiple occurences are present.", "source": "codesearchnet"}
{"code": "def select(self, selector):\n    if self.closed():\n        raise ValueError('Attempt to call select() on a closed Queryable.')\n    try:\n        selector = make_selector(selector)\n    except ValueError:\n        raise TypeError('select() parameter selector={selector} cannot beconverted into a callable selector'.format(selector=repr(selector)))\n    if (selector is identity):\n        return self\n    return self._create(imap(selector, self))", "docstring": "Transforms each element of a sequence into a new form.\n\nEach element of the source is transformed through a selector function\nto produce a corresponding element in teh result sequence.\n\nIf the selector is identity the method will return self.\n\nNote: This method uses deferred execution.\n\nArgs:\nselector: A unary function mapping a value in the source sequence\nto the corresponding value in the generated generated sequence.\nThe single positional argument to the selector function is the\nelement value.  The return value of the selector function\nshould be the corresponding element of the result sequence.\n\nReturns:\nA Queryable over generated sequence whose elements are the result\nof invoking the selector function on each element of the source\nsequence.\n\nRaises:\nValueError: If this Queryable has been closed.\nTypeError: If selector is not callable.", "source": "codesearchnet"}
{"code": "def _load_config_file(path):\n    \n    with io.open(utils.abs_path(path), 'r', encoding='utf-8') as f:\n        conf = yaml.load(f)\n        return conf", "docstring": "Loads a test config file.\n\nThe test config file has to be in YAML format.\n\nArgs:\npath: A string that is the full path to the config file, including the\nfile name.\n\nReturns:\nA dict that represents info in the config file.", "source": "juraj-google-style"}
{"code": "def CopyFromDateTimeString(self, time_string):\n    \n    super(APFSTime, self)._CopyFromDateTimeString(time_string)\n\n    if (self._timestamp is None or self._timestamp < self._INT64_MIN or\n        self._timestamp > self._INT64_MAX):\n      raise ValueError('Date time value not supported.')", "docstring": "Copies a APFS timestamp from a date and time string.\n\nArgs:\ntime_string (str): date and time value formatted as:\nYYYY-MM-DD hh:mm:ss.######[+-]##:##\n\nWhere # are numeric digits ranging from 0 to 9 and the seconds\nfraction can be either 3 or 6 digits. The time of day, seconds\nfraction and time zone offset are optional. The default time zone\nis UTC.\n\nRaises:\nValueError: if the date and time value is not supported.", "source": "juraj-google-style"}
{"code": "def _find_dtype_iterable(iterable: Iterable[Any], dtype: Optional[dtypes.DType]) -> Optional[dtypes.DType]:\n    if dtype is not None:\n        return dtype\n    for x in iterable:\n        dtype = _find_dtype(x, dtype)\n    return dtype", "docstring": "Find the preferred dtype of a list of objects.\n\nThis will go over the iterable, and use the first object with a preferred\ndtype. The dtype passed has highest priority if it is not None.\n\nArgs:\niterable: an iterable with things that might have a dtype.\ndtype: an overriding dtype, or None.\n\nReturns:\nan optional dtype.", "source": "github-repos"}
{"code": "def get_pdbs_for_gene(bigg_model, bigg_gene, cache_dir=tempfile.gettempdir(), force_rerun=False):\n    \n    my_structures = []\n\n    \n    gene = ssbio.utils.request_json(link='http:\n                                    outfile='{}_{}.json'.format(bigg_model, bigg_gene),\n                                    outdir=cache_dir,\n                                    force_rerun_flag=force_rerun)\n\n    uniprots = []\n    if 'database_links' in gene:\n        if 'UniProt' in gene['database_links']:\n            uniprots = [x['id'] for x in gene['database_links']['UniProt']]\n        elif 'NCBI GI' in gene['database_links']:\n            uniprots = []\n            gis = [x['id'] for x in gene['database_links']['NCBI GI']]\n            gi_uniprots = bs_unip.mapping(fr='P_GI', to='ACC', query=gis).values()\n            uniprots.extend(gi_uniprots)\n            uniprots = ssbio.utils.flatlist_dropdup(uniprots)\n            uniprots = [x for x in uniprots if ssbio.databases.uniprot.is_valid_uniprot_id(x)]\n\n    if uniprots:\n        for u in uniprots:\n            get_best_structure = ssbio.databases.pdb.best_structures(uniprot_id=u, outdir=cache_dir)\n            if get_best_structure:\n                for best_structure in get_best_structure:\n                    my_structures.append((best_structure['pdb_id'], best_structure['chain_id']))\n\n    return my_structures", "docstring": "Attempt to get a rank-ordered list of available PDB structures for a BiGG Model and its gene.\n\nArgs:\nbigg_model: BiGG Model ID\nbigg_gene: BiGG Gene ID\n\nReturns:\nlist: rank-ordered list of tuples of (pdb_id, chain_id)", "source": "juraj-google-style"}
{"code": "def __init__(self, bucket, key, info=None, context=None):\n    \n    if context is None:\n      context = google.datalab.Context.default()\n    self._context = context\n    self._api = _api.Api(context)\n    self._bucket = bucket\n    self._key = key\n    self._info = info", "docstring": "Initializes an instance of an Object.\n\nArgs:\nbucket: the name of the bucket containing the object.\nkey: the key of the object.\ninfo: the information about the object if available.\ncontext: an optional Context object providing project_id and credentials. If a specific\nproject id or credentials are unspecified, the default ones configured at the global\nlevel are used.", "source": "juraj-google-style"}
{"code": "def _GetTitleFromChromeWebStore(self, extension_identifier):\n    if (extension_identifier in self._extensions):\n        return self._extensions.get(extension_identifier)\n    page_content = self._GetChromeWebStorePage(extension_identifier)\n    if (not page_content):\n        logger.warning('[{0:s}] no data returned for extension identifier: {1:s}'.format(self.NAME, extension_identifier))\n        return None\n    (first_line, _, _) = page_content.partition('\\n')\n    match = self._TITLE_RE.search(first_line)\n    name = None\n    if match:\n        title = match.group(1)\n        if title.startswith('Chrome Web Store - '):\n            name = title[19:]\n        elif title.endswith('- Chrome Web Store'):\n            name = title[:(- 19)]\n    if (not name):\n        self._extensions[extension_identifier] = 'UNKNOWN'\n        return None\n    self._extensions[extension_identifier] = name\n    return name", "docstring": "Retrieves the name of the extension from the Chrome store website.\n\nArgs:\nextension_identifier (str): Chrome extension identifier.\n\nReturns:\nstr: name of the extension or None.", "source": "codesearchnet"}
{"code": "def __init__(self, max_batch_tokens: int):\n    self.max_batch_tokens = max_batch_tokens\n    self._setup_metrics()", "docstring": "Initialize metrics for continuous batch processor.\n\nArgs:\nmax_batch_tokens: Maximum number of tokens in a batch", "source": "github-repos"}
{"code": "def update(self, car_id=None, wake_if_asleep=False, force=False):\n    cur_time = time.time()\n    with self.__lock:\n        last_update = self._last_attempted_update_time\n        if (force or ((cur_time - last_update) > self.update_interval)):\n            cars = self.get_vehicles()\n            for car in cars:\n                self.car_online[car['id']] = (car['state'] == 'online')\n            self._last_attempted_update_time = cur_time\n        update_succeeded = False\n        for (id_, value) in self.car_online.items():\n            if ((car_id is not None) and (car_id != id_)):\n                continue\n            if (value and ((id_ in self.__update) and self.__update[id_]) and (force or (id_ not in self._last_update_time) or ((cur_time - self._last_update_time[id_]) > self.update_interval))):\n                try:\n                    data = self.get(id_, 'data', wake_if_asleep)\n                except TeslaException:\n                    data = None\n                if (data and data['response']):\n                    response = data['response']\n                    self.__climate[car_id] = response['climate_state']\n                    self.__charging[car_id] = response['charge_state']\n                    self.__state[car_id] = response['vehicle_state']\n                    self.__driving[car_id] = response['drive_state']\n                    self.__gui[car_id] = response['gui_settings']\n                    self.car_online[car_id] = (response['state'] == 'online')\n                    self._last_update_time[car_id] = time.time()\n                    update_succeeded = True\n        return update_succeeded", "docstring": "Update all vehicle attributes in the cache.\n\nThis command will connect to the Tesla API and first update the list of\nonline vehicles assuming no attempt for at least the [update_interval].\nIt will then update all the cached values for cars that are awake\nassuming no update has occurred for at least the [update_interval].\n\nArgs:\ninst (Controller): The instance of a controller\ncar_id (string): The vehicle to update. If None, all cars are updated.\nwake_if_asleep (bool): Keyword arg to force a vehicle awake. This is\nprocessed by the wake_up decorator.\nforce (bool): Keyword arg to force a vehicle update regardless of the\nupdate_interval\n\nReturns:\nTrue if any update succeeded for any vehicle else false\n\nThrows:\nRetryLimitError", "source": "codesearchnet"}
{"code": "def wiki_request(self, params):\n        \n\n        params[\"format\"] = \"json\"\n        if \"action\" not in params:\n            params[\"action\"] = \"query\"\n\n        limit = self._rate_limit\n        last_call = self._rate_limit_last_call\n        if limit and last_call and last_call + self._min_wait > datetime.now():\n            \n            wait_time = (last_call + self._min_wait) - datetime.now()\n            time.sleep(int(wait_time.total_seconds()))\n\n        req = self._get_response(params)\n\n        if self._rate_limit:\n            self._rate_limit_last_call = datetime.now()\n\n        return req", "docstring": "Make a request to the MediaWiki API using the given search\nparameters\n\nArgs:\nparams (dict): Request parameters\nReturns:\nA parsed dict of the JSON response\nNote:\nUseful when wanting to query the MediaWiki site for some \\\nvalue that is not part of the wrapper API", "source": "juraj-google-style"}
{"code": "def _get_password(params):\n        \n        user_name = params['user']\n        service_name = params['host'] + ':' + params['driver']\n        return keyring.get_password(service_name=service_name,\n                                    username=user_name)", "docstring": "Get the password for a database connection from :mod:`keyring`\n\nArgs:\nparams (dict): database configuration, as defined in :mod:`ozelot.config`\n\nReturns:\nstr: password", "source": "juraj-google-style"}
{"code": "class TFMinLengthLogitsProcessor(TFLogitsProcessor):\n\n    def __init__(self, min_length: int, eos_token_id: int):\n        if not isinstance(min_length, int) or min_length < 0:\n            raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}')\n        if not isinstance(eos_token_id, int) or eos_token_id < 0:\n            raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}')\n        self.min_length = min_length\n        self.eos_token_id = eos_token_id\n\n    def _apply_eos_token_mask(self, scores: tf.Tensor) -> tf.Tensor:\n        eos_token_id_mask = tf.range(scores.shape[-1]) == self.eos_token_id\n        scores = tf.where(eos_token_id_mask, float('-inf'), scores)\n        return scores\n\n    def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:\n        scores = tf.cond(tf.less(cur_len, self.min_length), lambda: self._apply_eos_token_mask(scores), lambda: tf.identity(scores))\n        return scores", "docstring": "[`TFLogitsProcessor`] enforcing a min-length by setting EOS probability to 0.\n\nArgs:\nmin_length (`int`):\nThe minimum length below which the score of `eos_token_id` is set to `-float(\"Inf\")`.\neos_token_id (`int`):\nThe id of the *end-of-sequence* token.", "source": "github-repos"}
{"code": "def _extract_namespace_ast_node(self, desc):\n        \n        if len(desc) == 0 or not isinstance(desc[0], AstNamespace):\n            if self._debug:\n                self._logger.info('Description: %r', desc)\n            raise InvalidSpec('First declaration in a stone must be '\n                              'a namespace. Possibly caused by preceding '\n                              'errors.', desc[0].lineno, desc[0].path)\n        for item in desc[1:]:\n            if isinstance(item, AstNamespace):\n                raise InvalidSpec('Only one namespace declaration per file.',\n                                  item[0].lineno, item[0].path)\n        return desc.pop(0)", "docstring": "Checks that the namespace is declared first in the spec, and that only\none namespace is declared.\n\nArgs:\ndesc (List[stone.stone.parser.ASTNode]): All AST nodes in a spec\nfile in the order they were defined.\n\nReturn:\nstone.frontend.ast.AstNamespace: The namespace AST node.", "source": "juraj-google-style"}
{"code": "def get_functionalHome(self, functionalHomeType: type) -> FunctionalHome:\n        \n        for x in self.functionalHomes:\n            if isinstance(x, functionalHomeType):\n                return x\n\n        return None", "docstring": "gets the specified functionalHome\n\nArgs:\nfunctionalHome(type): the type of the functionalHome which should be returned\n\nReturns:\nthe FunctionalHome or None if it couldn't be found", "source": "juraj-google-style"}
{"code": "def cvt2frames(self, frame_dir, file_start=0, filename_tmpl='{:06d}.jpg', start=0, max_num=0, show_progress=True):\n    mkdir_or_exist(frame_dir)\n    if (max_num == 0):\n        task_num = (self.frame_cnt - start)\n    else:\n        task_num = min((self.frame_cnt - start), max_num)\n    if (task_num <= 0):\n        raise ValueError('start must be less than total frame number')\n    if (start > 0):\n        self._set_real_position(start)\n\n    def write_frame(file_idx):\n        img = self.read()\n        filename = osp.join(frame_dir, filename_tmpl.format(file_idx))\n        cv2.imwrite(filename, img)\n    if show_progress:\n        track_progress(write_frame, range(file_start, (file_start + task_num)))\n    else:\n        for i in range(task_num):\n            img = self.read()\n            if (img is None):\n                break\n            filename = osp.join(frame_dir, filename_tmpl.format((i + file_start)))\n            cv2.imwrite(filename, img)", "docstring": "Convert a video to frame images\n\nArgs:\nframe_dir (str): Output directory to store all the frame images.\nfile_start (int): Filenames will start from the specified number.\nfilename_tmpl (str): Filename template with the index as the\nplaceholder.\nstart (int): The starting frame index.\nmax_num (int): Maximum number of frames to be written.\nshow_progress (bool): Whether to show a progress bar.", "source": "codesearchnet"}
{"code": "def get_type(mime=None, ext=None):\n    \n    for kind in types:\n        if kind.extension is ext or kind.mime is mime:\n            return kind\n    return None", "docstring": "Returns the file type instance searching by\nMIME type or file extension.\n\nArgs:\next: file extension string. E.g: jpg, png, mp4, mp3\nmime: MIME string. E.g: image/jpeg, video/mpeg\n\nReturns:\nThe matched file type instance. Otherwise None.", "source": "juraj-google-style"}
{"code": "def add_record(self, record):\n        \n        record.update_record()\n        if record.result == TestResultEnums.TEST_RESULT_SKIP:\n            self.skipped.append(record)\n            return\n        self.executed.append(record)\n        if record.result == TestResultEnums.TEST_RESULT_FAIL:\n            self.failed.append(record)\n        elif record.result == TestResultEnums.TEST_RESULT_PASS:\n            self.passed.append(record)\n        else:\n            self.error.append(record)", "docstring": "Adds a test record to test result.\n\nA record is considered executed once it's added to the test result.\n\nAdding the record finalizes the content of a record, so no change\nshould be made to the record afterwards.\n\nArgs:\nrecord: A test record object to add.", "source": "juraj-google-style"}
{"code": "def create_downloader_of_type(type_name):\n    \n    downloaders = available_downloaders()\n\n    if type_name not in downloaders.keys():\n        raise UnknownDownloaderException('Unknown downloader: %s' % (type_name,))\n\n    return downloaders[type_name]()", "docstring": "Create an instance of the downloader with the given name.\n\nArgs:\ntype_name: The name of a downloader.\n\nReturns:\nAn instance of the downloader with the given type.", "source": "juraj-google-style"}
{"code": "def run_commands(self, program_language, program_main):\n        \n        \n        if program_language == 'python':\n            python_exe = sys.executable\n            ptvsd_host = 'localhost'\n            if self.args.docker:\n                \n                python_exe = 'python'\n                ptvsd_host = '0.0.0.0'\n\n            if self.args.vscd:\n                self.update_environment()  \n                command = [\n                    python_exe,\n                    '-m',\n                    'ptvsd',\n                    '--host',\n                    ptvsd_host,\n                    '--port',\n                    self.args.vscd_port,\n                    '--wait',\n                    '{}.py'.format(program_main),\n                ]\n            else:\n                command = [python_exe, '.', program_main]\n\n            \n            cli_command = [str(s) for s in command + self.profile.get('profile_args').standard]\n\n            \n            \n            print_command = ' '.join(\n                str(s) for s in command + self.profile.get('profile_args').masked\n            )\n            if self.args.unmask:\n                \n                print_command = ' '.join(\n                    str(s) for s in command + self.profile.get('profile_args').quoted\n                )\n\n        elif program_language == 'java':\n            if self.args.docker:\n                command = ['java', '-cp', self.tcex_json.get('class_path', './target/*')]\n            else:\n                command = [\n                    self.tcex_json.get('java_path', program_language),\n                    '-cp',\n                    self.tcex_json.get('class_path', './target/*'),\n                ]\n\n            \n            cli_command = command + self.profile.get('profile_args').standard + [program_main]\n\n            \n            print_command = ' '.join(\n                command + self.profile.get('profile_args').masked + [program_main]\n            )\n            if self.args.unmask:\n                print_command = ' '.join(\n                    command + self.profile.get('profile_args').quoted + [program_main]\n                )\n        return {'cli_command': cli_command, 'print_command': print_command}", "docstring": "Return the run Print Command.\n\nArgs:\nprogram_language (str): The language of the current App/Project.\nprogram_main (str): The executable name.\n\nReturns:\ndict: A dictionary containing the run command and a printable version of the command.", "source": "juraj-google-style"}
{"code": "def convert(model_flags: _model_flags_pb2.ModelFlags, conversion_flags: _conversion_flags_pb2.ConverterFlags, input_data_str: Optional[str]=None, debug_info_str: Optional[str]=None):\n    try:\n        return wrap_converter.wrapped_convert(model_flags.SerializeToString(), conversion_flags.SerializeToString(), input_data_str, debug_info_str)\n    except Exception as e:\n        converter_error = ConverterError(str(e))\n        for error_data in _metrics_wrapper.retrieve_collected_errors():\n            converter_error.append_error(error_data)\n            if error_data.error_code == converter_error_data_pb2.ConverterErrorData.ERROR_STATEFUL_PARTITIONED_CALL_IN_FINAL_IR and (not conversion_flags.guarantee_all_funcs_one_use):\n                conversion_flags.guarantee_all_funcs_one_use = True\n                return convert(model_flags, conversion_flags, input_data_str, debug_info_str)\n        raise converter_error", "docstring": "Converts `input_data_str` to a TFLite model.\n\nArgs:\nmodel_flags: Proto describing model properties, see `model_flags.proto`.\nconversion_flags: Proto describing conversion properties, see\n`compiler/mlir/lite/converter_flags.proto`.\ninput_data_str: Input data in serialized form (e.g. a graphdef is common, or\nit can be hlo text or proto)\ndebug_info_str: Serialized `GraphDebugInfo` proto describing logging\ninformation.\n\nReturns:\nConverted model in serialized form (e.g. a TFLITE model is common).\nRaises:\nConverterError: When conversion fails in TFLiteConverter, usually due to\nops not being supported.", "source": "github-repos"}
{"code": "def get_gcc_version():\n    key = 'gcc_ver'\n    out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])\n    if err and FLAGS.debug:\n        print('Error in detecting GCC version:\\n %s' % str(err))\n    return out.strip(b'\\n')", "docstring": "Retrieves version of GCC detected.\n\nReturns:\nString that is the version of GCC.\ne.g. '7.3.0'", "source": "github-repos"}
{"code": "def _CalculateHashesFileEntry(self, file_system, file_entry, parent_full_path, output_writer):\n    full_path = file_system.JoinPath([parent_full_path, file_entry.name])\n    for data_stream in file_entry.data_streams:\n        hash_value = self._CalculateHashDataStream(file_entry, data_stream.name)\n        display_path = self._GetDisplayPath(file_entry.path_spec, full_path, data_stream.name)\n        output_writer.WriteFileHash(display_path, (hash_value or 'N/A'))\n    for sub_file_entry in file_entry.sub_file_entries:\n        self._CalculateHashesFileEntry(file_system, sub_file_entry, full_path, output_writer)", "docstring": "Recursive calculates hashes starting with the file entry.\n\nArgs:\nfile_system (dfvfs.FileSystem): file system.\nfile_entry (dfvfs.FileEntry): file entry.\nparent_full_path (str): full path of the parent file entry.\noutput_writer (StdoutWriter): output writer.", "source": "codesearchnet"}
{"code": "def Add(self, entry):\n    if not isinstance(entry, PasswdMapEntry):\n        raise TypeError\n    return super(PasswdMap, self).Add(entry)", "docstring": "Add a new object, verify it is a PasswdMapEntry instance.\n\nArgs:\nentry: A PasswdMapEntry instance.\n\nReturns:\nTrue if added successfully, False otherwise.\n\nRaises:\nTypeError: The argument is of the wrong type.", "source": "github-repos"}
{"code": "def collective_leader(cluster_spec, task_type, task_id):\n    cluster_spec = normalize_cluster_spec(cluster_spec)\n    if not cluster_spec.as_dict():\n        return ''\n    _validate_cluster_spec(cluster_spec, task_type, task_id)\n    if task_type == 'evaluator':\n        return ''\n    if 'chief' in cluster_spec.jobs:\n        return '/job:chief/replica:0/task:0'\n    assert 'worker' in cluster_spec.jobs\n    return '/job:worker/replica:0/task:0'", "docstring": "Return the job name for the leader of for collective ops.\n\nArgs:\ncluster_spec: a dict, `ClusterDef` or `ClusterSpec` object specifying the\ncluster configurations.\ntask_type: the task type in the cluster.\ntask_id: the task id in the cluster.\n\nReturns:\na string indicating the leader job name or empty string if no need to set\nleader job.", "source": "github-repos"}
{"code": "def split_by_criteria(dictionary, keys=None, prefix=None):\n    keys = (keys or [])\n    keys = set(keys)\n    included_items = {k: dictionary[k] for k in dictionary.keys() if ((k in keys) or (prefix and k.startswith(prefix)))}\n    excluded_items = {k: dictionary[k] for k in dictionary.keys() if (k not in included_items)}\n    return SplitResultSpec(included=included_items, excluded=excluded_items)", "docstring": "Split a dictionary in two by the provided keys.\n\nArgs:\ndictionary (dict[str, object]): A Python dictionary\nkeys (sequence [str]): A sequence of keys which will be added the split criteria\nprefix (str): A prefix which will be added the split criteria\n\nReturns:\n`SplitResultSpec` : A collections.namedtuple with the following attributes:\n\n* Args:\nincluded (dict[str, object]: A dictionary with the keys included in the criteria.\nexcluded (dict[str, object]: A dictionary with the keys not included in the criteria.", "source": "codesearchnet"}
{"code": "def RegisterDecompressor(cls, decompressor):\n    \n    compression_method = decompressor.COMPRESSION_METHOD.lower()\n    if compression_method in cls._decompressors:\n      raise KeyError(\n          'Decompressor for compression method: {0:s} already set.'.format(\n              decompressor.COMPRESSION_METHOD))\n\n    cls._decompressors[compression_method] = decompressor", "docstring": "Registers a decompressor for a specific compression method.\n\nArgs:\ndecompressor (type): decompressor class.\n\nRaises:\nKeyError: if the corresponding decompressor is already set.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.GetChanges = channel.unary_stream(\n        '/pb.Data/GetChanges',\n        request_serializer=lookout_dot_sdk_dot_service__data__pb2.ChangesRequest.SerializeToString,\n        response_deserializer=lookout_dot_sdk_dot_service__data__pb2.Change.FromString,\n        )\n    self.GetFiles = channel.unary_stream(\n        '/pb.Data/GetFiles',\n        request_serializer=lookout_dot_sdk_dot_service__data__pb2.FilesRequest.SerializeToString,\n        response_deserializer=lookout_dot_sdk_dot_service__data__pb2.File.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def _invalid_string_quote(self, quote, row, correct_quote=None, col=None):\n        \n        if not correct_quote:\n            correct_quote = SMART_QUOTE_OPTS.get(self.config.string_quote)\n\n        self.add_message(\n            'invalid-string-quote',\n            line=row,\n            args=(quote, correct_quote),\n            **self.get_offset(col)\n        )", "docstring": "Add a message for an invalid string literal quote.\n\nArgs:\nquote: The quote characters that were found.\nrow: The row number the quote character was found on.\ncorrect_quote: The quote characters that is required. If None\n(default), will use the one from the config.\ncol: The column the quote characters were found on.", "source": "juraj-google-style"}
{"code": "def square(times: np.ndarray, amp: complex, period: float, phase: float = 0) -> np.ndarray:\n    \n    x = times/period+phase/np.pi\n    return amp*(2*(2*np.floor(x) - np.floor(2*x)) + 1).astype(np.complex_)", "docstring": "Continuous square wave.\n\nArgs:\ntimes: Times to output wave for.\namp: Pulse amplitude. Wave range is [-amp, amp].\nperiod: Pulse period, units of dt.\nphase: Pulse phase.", "source": "juraj-google-style"}
{"code": "def all_label_values(self, label_list_ids=None):\n    values = set()\n    for label_list in self.label_lists.values():\n        if ((label_list_ids is None) or (label_list.idx in label_list_ids)):\n            values = values.union(label_list.label_values())\n    return values", "docstring": "Return a set of all label-values occurring in this utterance.\n\nArgs:\nlabel_list_ids (list): If not None, only label-values from\nlabel-lists with an id contained in this list\nare considered.\n\nReturns:\n:class:`set`: A set of distinct label-values.", "source": "codesearchnet"}
{"code": "def unset(entity, *types):\n    if (not types):\n        types = (TypedField,)\n    fields = list(entity._fields.keys())\n    remove = (x for x in fields if isinstance(x, types))\n    for field in remove:\n        del entity._fields[field]", "docstring": "Unset the TypedFields on the input `entity`.\n\nArgs:\nentity: A mixbox.Entity object.\n*types: A variable-length list of TypedField subclasses. If not\nprovided, defaults to TypedField.", "source": "codesearchnet"}
{"code": "def feedback(self, dna: DNA, reward: Union[float, Tuple[float]]) -> None:\n    if self.needs_feedback:\n        if self.multi_objective and isinstance(reward, float):\n            reward = (reward,)\n        elif not self.multi_objective and isinstance(reward, tuple):\n            if len(reward) != 1:\n                raise ValueError(f'{self!r} is single objective, but the reward {reward!r} contains multiple objectives.')\n            reward = reward[0]\n        self._feedback(dna, reward)\n    self._num_feedbacks += 1", "docstring": "Feedback a completed trial to the algorithm.\n\nArgs:\ndna: a DNA object.\nreward: reward for the DNA. It is a float if `self.multi_objective`\nreturns False, otherwise it's a tuple of floats.", "source": "github-repos"}
{"code": "def __init__(self, plugin_name=None, text=None):\n    \n    super(AnalysisReport, self).__init__()\n    self.filter_string = None\n    self.plugin_name = plugin_name\n    self.report_array = None\n    self.report_dict = None\n    \n    self.text = text\n    self.time_compiled = None", "docstring": "Initializes the analysis report.\n\nArgs:\nplugin_name (Optional[str]): name of the analysis plugin that generated\nthe report.\ntext (Optional[str]): report text.", "source": "juraj-google-style"}
{"code": "def _add_strings_to_commastring(self, field, strings):\n    allstringsadded = True\n    for string in strings:\n        if (not self._add_string_to_commastring(field, string)):\n            allstringsadded = False\n    return allstringsadded", "docstring": "Add a list of strings to a comma separated list of strings\n\nArgs:\nfield (str): Field containing comma separated list\nstrings (List[str]): list of strings to add\n\nReturns:\nbool: True if all strings added or False if any already present.", "source": "codesearchnet"}
{"code": "def load(file_path, parse_line_fn):\n    vocabulary = []\n    embeddings = []\n    embeddings_dim = None\n    for line in tf.gfile.GFile(file_path):\n        (token, embedding) = parse_line_fn(line)\n        if (not embeddings_dim):\n            embeddings_dim = len(embedding)\n        elif (embeddings_dim != len(embedding)):\n            raise ValueError('Inconsistent embedding dimension detected, %d != %d for token %s', embeddings_dim, len(embedding), token)\n        vocabulary.append(token)\n        embeddings.append(embedding)\n    return (vocabulary, np.array(embeddings))", "docstring": "Loads a text embedding into memory as a numpy matrix.\n\nArgs:\nfile_path: Path to the text embedding file.\nparse_line_fn: callback function to parse each file line.\n\nReturns:\nA tuple of (list of vocabulary tokens, numpy matrix of embedding vectors).\n\nRaises:\nValueError: if the data in the sstable is inconsistent.", "source": "codesearchnet"}
{"code": "def _fn(self, arg0, arg1):\n    return arg0 + arg1", "docstring": "fn doc.\n\nArgs:\narg0: Arg 0.\narg1: Arg 1.\n\nReturns:\nSum of args.", "source": "github-repos"}
{"code": "def ed25519_private_key_from_string(string):\n    try:\n        return Ed25519PrivateKey.from_private_bytes(base64.b64decode(string))\n    except (UnsupportedAlgorithm, Base64Error) as exc:\n        raise ScriptWorkerEd25519Error(\"Can't create Ed25519PrivateKey: {}!\".format(str(exc)))", "docstring": "Create an ed25519 private key from ``string``, which is a seed.\n\nArgs:\nstring (str): the string to use as a seed.\n\nReturns:\nEd25519PrivateKey: the private key", "source": "codesearchnet"}
{"code": "def CopyToDateTimeString(self):\n    if ((self._timestamp is None) or (self._timestamp < self._INT64_MIN) or (self._timestamp > self._INT64_MAX)):\n        return None\n    return super(APFSTime, self)._CopyToDateTimeString()", "docstring": "Copies the APFS timestamp to a date and time string.\n\nReturns:\nstr: date and time value formatted as: \"YYYY-MM-DD hh:mm:ss.#########\" or\nNone if the timestamp is missing or invalid.", "source": "codesearchnet"}
{"code": "async def _sync_all_conversations(client):\n    \n    conv_states = []\n    sync_timestamp = None\n    request = hangouts_pb2.SyncRecentConversationsRequest(\n        request_header=client.get_request_header(),\n        max_conversations=CONVERSATIONS_PER_REQUEST,\n        max_events_per_conversation=1,\n        sync_filter=[\n            hangouts_pb2.SYNC_FILTER_INBOX,\n            hangouts_pb2.SYNC_FILTER_ARCHIVED,\n        ]\n    )\n    for _ in range(MAX_CONVERSATION_PAGES):\n        logger.info(\n            'Requesting conversations page %s', request.last_event_timestamp\n        )\n        response = await client.sync_recent_conversations(request)\n        conv_states = list(response.conversation_state) + conv_states\n        sync_timestamp = parsers.from_timestamp(\n            \n            \n            \n            \n            response.response_header.current_server_time\n        )\n        if response.continuation_end_timestamp == 0:\n            logger.info('Reached final conversations page')\n            break\n        else:\n            request.last_event_timestamp = response.continuation_end_timestamp\n    else:\n        logger.warning('Exceeded maximum number of conversation pages')\n    logger.info('Synced %s total conversations', len(conv_states))\n    return conv_states, sync_timestamp", "docstring": "Sync all conversations by making paginated requests.\n\nConversations are ordered by ascending sort timestamp.\n\nArgs:\nclient (Client): Connected client.\n\nRaises:\nNetworkError: If the requests fail.\n\nReturns:\ntuple of list of ``ConversationState`` messages and sync timestamp", "source": "juraj-google-style"}
{"code": "def __init__(self, config_reader=None):\n        \n        if config_reader:\n            self._config = config_reader\n        else:\n            self._config = WTF_CONFIG_READER", "docstring": "Constructor\n\nArgs:\nconfig_reader (ConfigReader) - override default config reader.", "source": "juraj-google-style"}
{"code": "def from_string(cls, prjs):\n\n    def parse(v):\n        try:\n            return int(v)\n        except ValueError:\n            pass\n        try:\n            return float(v)\n        except ValueError:\n            return v\n    parts = [o.lstrip('+') for o in prjs.strip().split()]\n    items = map((lambda kv: (((len(kv) == 2) and (kv[0], parse(kv[1]))) or (kv[0], True))), (p.split('=') for p in parts))\n    return cls({k: v for (k, v) in items if (('+' + k) in PROJ4_PARAMS.keys())})", "docstring": "Turn a PROJ.4 string into a mapping of parameters. Bare parameters\nlike \"+no_defs\" are given a value of ``True``. All keys are checked\nagainst the ``all_proj_keys`` list.\n\nArgs:\nprjs (str): A PROJ4 string.", "source": "codesearchnet"}
{"code": "def create_asset_accesspolicy(access_token, name, duration, permission=\"1\"):\n    \n    path = '/AccessPolicies'\n    endpoint = ''.join([ams_rest_endpoint, path])\n    body = '{ \\\n\t\t\"Name\": \"' + str(name) + '\", \\\n\t\t\"DurationInMinutes\": \"' + duration + '\", \\\n\t\t\"Permissions\": \"' + permission + '\" \\\n\t}'\n    return do_ams_post(endpoint, path, body, access_token)", "docstring": "Create Media Service Asset Access Policy.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nname (str): A Media Service Asset Access Policy Name.\nduration (str): A Media Service duration.\npermission (str): A Media Service permission.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def check_dummies(overwrite: bool=False):\n    dummy_files = create_dummy_files()\n    short_names = {'torch': 'pt'}\n    path = os.path.join(PATH_TO_TRANSFORMERS, 'utils')\n    dummy_file_paths = {backend: os.path.join(path, f'dummy_{short_names.get(backend, backend)}_objects.py') for backend in dummy_files.keys()}\n    actual_dummies = {}\n    for backend, file_path in dummy_file_paths.items():\n        if os.path.isfile(file_path):\n            with open(file_path, 'r', encoding='utf-8', newline='\\n') as f:\n                actual_dummies[backend] = f.read()\n        else:\n            actual_dummies[backend] = ''\n    for backend in dummy_files.keys():\n        if dummy_files[backend] != actual_dummies[backend]:\n            if overwrite:\n                print(f'Updating transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py as the main __init__ has new objects.')\n                with open(dummy_file_paths[backend], 'w', encoding='utf-8', newline='\\n') as f:\n                    f.write(dummy_files[backend])\n            else:\n                found = False\n                for _actual, _dummy in zip(actual_dummies['torch'].split('class'), dummy_files['torch'].split('class')):\n                    if _actual != _dummy:\n                        actual_broken = _actual\n                        dummy_broken = _dummy\n                        found = True\n                        break\n                if not found:\n                    print('A transient error was found with the dummies, please investigate.')\n                    continue\n                raise ValueError(f'The main __init__ has objects that are not present in transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py.\\n It is likely the following objects are responsible, see these excerpts: \\n---------------------------------- Actual -------------------------------------\\n \\n {actual_broken} \\n---------------------------------- Dummy -------------------------------------\\n \\n {dummy_broken} \\nRun `make fix-copies` to fix this.')", "docstring": "Check if the dummy files are up to date and maybe `overwrite` with the right content.\n\nArgs:\noverwrite (`bool`, *optional*, default to `False`):\nWhether or not to overwrite the content of the dummy files. Will raise an error if they are not up to date\nwhen `overwrite=False`.", "source": "github-repos"}
{"code": "def GetValueByPath(self, path_segments):\n    \n    key = self.root_key\n    for path_segment in path_segments:\n      if isinstance(key, dict):\n        try:\n          key = key[path_segment]\n        except KeyError:\n          return None\n\n      elif isinstance(key, list):\n        try:\n          list_index = int(path_segment, 10)\n        except ValueError:\n          return None\n\n        key = key[list_index]\n\n      else:\n        return None\n\n      if not key:\n        return None\n\n    return key", "docstring": "Retrieves a plist value by path.\n\nArgs:\npath_segments (list[str]): path segment strings relative to the root\nof the plist.\n\nReturns:\nobject: The value of the key specified by the path or None.", "source": "juraj-google-style"}
{"code": "def flash_write(self, addr, data, nbits=None, flags=0):\n        \n        \n        \n        self._dll.JLINKARM_BeginDownload(flags)\n\n        self.memory_write(addr, data, nbits=nbits)\n\n        \n        bytes_flashed = self._dll.JLINKARM_EndDownload()\n        if bytes_flashed < 0:\n            raise errors.JLinkFlashException(bytes_flashed)\n\n        return bytes_flashed", "docstring": "Writes data to the flash region of a device.\n\nThe given number of bits, if provided, must be either ``8``, ``16``, or\n``32``.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): starting flash address to write to\ndata (list): list of data units to write\nnbits (int): number of bits to use for each unit\n\nReturns:\nNumber of bytes written to flash.", "source": "juraj-google-style"}
{"code": "def layer_postprocess(layer_input, layer_output, hparams):\n    return layer_prepostprocess(layer_input, layer_output, sequence=hparams.layer_postprocess_sequence, dropout_rate=hparams.layer_prepostprocess_dropout, norm_type=hparams.norm_type, depth=None, epsilon=hparams.norm_epsilon, dropout_broadcast_dims=comma_separated_string_to_integer_list(getattr(hparams, 'layer_prepostprocess_dropout_broadcast_dims', '')), default_name='layer_postprocess')", "docstring": "Apply layer postprocessing.\n\nSee layer_prepostprocess() for details.\n\nA hyperparameters object is passed for convenience.  The hyperparameters\nthat may be used are:\n\nlayer_postprocess_sequence\nlayer_prepostprocess_dropout\nnorm_type\nhidden_size\nnorm_epsilon\n\nArgs:\nlayer_input: a Tensor\nlayer_output: a Tensor\nhparams: a hyperparameters object.\n\nReturns:\na Tensor", "source": "codesearchnet"}
{"code": "def software_breakpoint(self):\n        \n        software_types = [\n            enums.JLinkBreakpoint.SW_RAM,\n            enums.JLinkBreakpoint.SW_FLASH,\n            enums.JLinkBreakpoint.SW\n        ]\n        return any(self.Type & stype for stype in software_types)", "docstring": "Returns whether this is a software breakpoint.\n\nArgs:\nself (JLinkBreakpointInfo): the ``JLinkBreakpointInfo`` instance\n\nReturns:\n``True`` if the breakpoint is a software breakpoint, otherwise\n``False``.", "source": "juraj-google-style"}
{"code": "def inception_v3_arg_scope(weight_decay=4e-05, stddev=0.1, batch_norm_var_collection='moving_vars'):\n    batch_norm_params = {'decay': 0.9997, 'epsilon': 0.001, 'updates_collections': tf.GraphKeys.UPDATE_OPS, 'variables_collections': {'beta': None, 'gamma': None, 'moving_mean': [batch_norm_var_collection], 'moving_variance': [batch_norm_var_collection]}}\n    with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay)):\n        with slim.arg_scope([slim.conv2d], weights_initializer=tf.truncated_normal_initializer(stddev=stddev), activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params) as sc:\n            return sc", "docstring": "Defines the default InceptionV3 arg scope.\n\nArgs:\nweight_decay: The weight decay to use for regularizing the model.\nstddev: The standard deviation of the trunctated normal weight initializer.\nbatch_norm_var_collection: The name of the collection for the batch norm\nvariables.\n\nReturns:\nAn `arg_scope` to use for the inception v3 model.", "source": "codesearchnet"}
{"code": "def defer(coro, delay=1):\n    assert_corofunction(coro=coro)\n\n    @asyncio.coroutine\n    def wrapper(*args, **kw):\n        (yield from asyncio.sleep(delay))\n        return (yield from coro(*args, **kw))\n    return wrapper", "docstring": "Returns a coroutine function wrapper that will defer the given coroutine\nexecution for a certain amount of seconds in a non-blocking way.\n\nThis function can be used as decorator.\n\nArguments:\ncoro (coroutinefunction): coroutine function to defer.\ndelay (int/float): number of seconds to defer execution.\n\nRaises:\nTypeError: if coro argument is not a coroutine function.\n\nReturns:\nfiltered values (list): ordered list of resultant values.\n\nUsage::\n\n# Usage as function\nawait paco.defer(coro, delay=1)\nawait paco.defer(coro, delay=0.5)\n\n# Usage as decorator\n@paco.defer(delay=1)\nasync def mul_2(num):\nreturn num * 2\n\nawait mul_2(2)\n# => 4", "source": "codesearchnet"}
{"code": "def ReadPreprocessingInformation(self, knowledge_base):\n    \n    if not self._storage_file:\n      raise IOError('Unable to read from closed storage writer.')\n\n    self._storage_file.ReadPreprocessingInformation(knowledge_base)", "docstring": "Reads preprocessing information.\n\nThe preprocessing information contains the system configuration which\ncontains information about various system specific configuration data,\nfor example the user accounts.\n\nArgs:\nknowledge_base (KnowledgeBase): is used to store the preprocessing\ninformation.\n\nRaises:\nIOError: when the storage writer is closed.\nOSError: when the storage writer is closed.", "source": "juraj-google-style"}
{"code": "def _convert_dynamic_dimension_to_zero(shape):\n    if shape.rank is None:\n        return shape\n    return tensor_shape.TensorShape([0 if d is None else d for d in shape.as_list()])", "docstring": "Converts dynamic dimensions in `shape` to zero.\n\nThe fake params created to match the intermediates captured in other branches\ncould have dynamic dimensions. But the XLA shape is not able to handle\ndynamic dimensions in TF TensorShape. Setting the dynamic dimensions to\nsize zero will help avoid failing safety checks in bridge. When XLA\nDynamicConditional op reconciles branch differences, XLA will replace the\ndimension size 0 with a bounded dimension determined from the shape of\nreal argument in the other branch.\n\nNote: Rank unknown shapes are returned as they are.\n\nArgs:\nshape: The TensorShape of fake param.\n\nReturns:\nThe new TensorShape with dynamic dimensions set to zero.", "source": "github-repos"}
{"code": "def strip_iterable(self) -> 'IOTypeHints':\n    if self.output_types is None or not self.has_simple_output_type():\n        return self\n    output_type = self.output_types[0][0]\n    if output_type is None or isinstance(output_type, type(None)):\n        return self\n    if isinstance(output_type, typehints.UnionConstraint):\n        types = list(output_type.union_types)\n        if len(types) == 2:\n            try:\n                types.remove(type(None))\n                output_type = types[0]\n            except ValueError:\n                pass\n    if isinstance(output_type, typehints.TypeVariable):\n        return self._replace(output_types=((typehints.Any,), {}), origin=self._make_origin([self], tb=False, msg=['strip_iterable()']))\n    yielded_type = typehints.get_yielded_type(output_type)\n    return self._replace(output_types=((yielded_type,), {}), origin=self._make_origin([self], tb=False, msg=['strip_iterable()']))", "docstring": "Removes outer Iterable (or equivalent) from output type.\n\nOnly affects instances with simple output types, otherwise is a no-op.\nDoes not modify self.\n\nDesigned to be used with type hints from callables of ParDo, FlatMap, DoFn.\nOutput type may be Optional[T], in which case the result of stripping T is\nused as the output type.\nOutput type may be None/NoneType, in which case nothing is done.\n\nExample: Generator[Tuple(int, int)] becomes Tuple(int, int)\n\nReturns:\nA copy of this instance with a possibly different output type.\n\nRaises:\nValueError if output type is simple and not iterable.", "source": "github-repos"}
{"code": "def add_timeout_callback(self, callback, timeout_milliseconds):\n    from ..server.callbacks import TimeoutCallback\n    cb = TimeoutCallback(self, None, timeout_milliseconds)\n    return self._add_session_callback(cb, callback, one_shot=True, originator=self.add_timeout_callback)", "docstring": "Add callback to be invoked once, after a specified timeout passes.\n\nArgs:\ncallback (callable) :\nA callback function to execute after timeout\n\ntimeout_milliseconds (int) :\nNumber of milliseconds before callback execution.\n\nReturns:\nTimeoutCallback : can be used with ``remove_timeout_callback``\n\n.. note::\nTimeout callbacks only work within the context of a Bokeh server\nsession. This function will no effect when Bokeh outputs to\nstandalone HTML or Jupyter notebook cells.", "source": "codesearchnet"}
{"code": "def compose_object(self, file_list, destination_file, content_type):\n    \n\n    xml_setting_list = ['<ComposeRequest>']\n\n    for meta_data in file_list:\n      xml_setting_list.append('<Component>')\n      for key, val in meta_data.iteritems():\n        xml_setting_list.append('<%s>%s</%s>' % (key, val, key))\n      xml_setting_list.append('</Component>')\n    xml_setting_list.append('</ComposeRequest>')\n    xml = ''.join(xml_setting_list)\n\n    if content_type is not None:\n      headers = {'Content-Type': content_type}\n    else:\n      headers = None\n    status, resp_headers, content = self.put_object(\n        api_utils._quote_filename(destination_file) + '?compose',\n        payload=xml,\n        headers=headers)\n    errors.check_status(status, [200], destination_file, resp_headers,\n                        body=content)", "docstring": "COMPOSE multiple objects together.\n\nUsing the given list of files, calls the put object with the compose flag.\nThis call merges all the files into the destination file.\n\nArgs:\nfile_list: list of dicts with the file name.\ndestination_file: Path to the destination file.\ncontent_type: Content type for the destination file.", "source": "juraj-google-style"}
{"code": "def __convertRlocToRouterId(self, xRloc16):\n        \n        routerList = []\n        routerList = self.__sendCommand(WPANCTL_CMD + 'getprop -v Thread:RouterTable')\n        print routerList\n        print xRloc16\n\n        for line in routerList:\n            if re.match('\\[|\\]', line):\n                continue\n            if re.match(WPAN_CARRIER_PROMPT, line, re.M|re.I):\n                break\n            router = []\n            router = self.__stripValue(line).split(',')\n\n            for item in router:\n                if 'RouterId' in item:\n                    routerid = item.split(':')[1]\n                elif 'RLOC16' in line:\n                    rloc16 = line.split(':')[1]\n                else:\n                    pass\n\n            \n            if isinstance(xRloc16, str):\n                rloc16 = '0x' + rloc16\n                if rloc16 == xRloc16:\n                    return routerid\n            elif isinstance(xRloc16, int):\n                if int(rloc16, 16) == xRloc16:\n                    return routerid\n            else:\n                pass\n\n        return None", "docstring": "mapping Rloc16 to router id\n\nArgs:\nxRloc16: hex rloc16 short address\n\nReturns:\nactual router id allocated by leader", "source": "juraj-google-style"}
{"code": "def gradient_tensor(self, x_tensor):\n    x_tensor_name = self._get_tensor_name(x_tensor)\n    if x_tensor_name not in self._gradient_tensors:\n        raise LookupError('This GradientsDebugger has not received any gradient tensor for x-tensor %s' % x_tensor_name)\n    return self._gradient_tensors[x_tensor_name]", "docstring": "Get the gradient tensor of an x-tensor.\n\nArgs:\nx_tensor: (`tf.Tensor`, `tf.Variable` or `str`) The x-tensor object or its\nname. x-tensor refers to the independent `tf.Tensor`, i.e., the tensor\non the denominator of the differentiation.\n\nReturns:\nIf found, the gradient tensor.\n\nRaises:\nTypeError: If `x_tensor` is not a `tf.Tensor`, `tf.Variable` or `str`.\nLookupError: If the `x_tensor` has not been registered with a gradient\ntensor.", "source": "github-repos"}
{"code": "def load(self, filename, bs=512):\n        \n        self.__filename = filename\n        self.__volumes = []\n\n        \n        self.__partition_scheme = rawdisk.scheme.common.detect_scheme(filename)\n\n        plugin_objects = [plugin.plugin_object for plugin in self.__fs_plugins]\n        fs_detector = FilesystemDetector(fs_plugins=plugin_objects)\n\n        if self.__partition_scheme == PartitionScheme.SCHEME_MBR:\n            self.__load_mbr_volumes(filename, fs_detector, bs)\n        elif self.__partition_scheme == PartitionScheme.SCHEME_GPT:\n            self.__load_gpt_volumes(filename, fs_detector, bs)\n        else:\n            self.logger.warning('Partitioning scheme could not be determined.')\n            \n            volume = fs_detector.detect_standalone(filename, offset=0)\n            if volume is not None:\n                volume.load(filename, offset=0)\n                self.__volumes.append(volume)\n            else:\n                self.logger.warning(\n                    'Were not able to detect standalone volume type')", "docstring": "Starts filesystem analysis. Detects supported filesystems and \\\nloads :attr:`partitions` array.\n\nArgs:\nfilename - Path to file or device for reading.\n\nRaises:\nIOError - File/device does not exist or is not readable.", "source": "juraj-google-style"}
{"code": "def compose_auth_header(\n    auth: Union[MutableMapping, str, bytes], registry_addr: str = None\n) -> str:\n    \n    if isinstance(auth, Mapping):\n        \n        if \"identitytoken\" in auth:\n            pass\n        elif \"auth\" in auth:\n            return compose_auth_header(auth[\"auth\"], registry_addr)\n        else:\n            if registry_addr:\n                auth[\"serveraddress\"] = registry_addr\n        auth_json = json.dumps(auth).encode(\"utf-8\")\n    elif isinstance(auth, (str, bytes)):\n        \n        \n        if isinstance(auth, bytes):\n            auth = auth.decode(\"utf-8\")\n        s = base64.b64decode(auth)\n        username, passwd = s.split(b\":\", 1)\n        config = {\n            \"username\": username.decode(\"utf-8\"),\n            \"password\": passwd.decode(\"utf-8\"),\n            \"email\": None,\n            \"serveraddress\": registry_addr,\n        }\n        auth_json = json.dumps(config).encode(\"utf-8\")\n    else:\n        raise TypeError(\"auth must be base64 encoded string/bytes or a dictionary\")\n    auth = base64.b64encode(auth_json).decode(\"ascii\")\n    return auth", "docstring": "Validate and compose base64-encoded authentication header\nwith an optional support for parsing legacy-style \"user:password\"\nstrings.\n\nArgs:\nauth: Authentication information\nregistry_addr: An address of the registry server\n\nReturns:\nA base64-encoded X-Registry-Auth header value", "source": "juraj-google-style"}
{"code": "def set_state_vector(self, state: Union[(int, np.ndarray)]):\n    self._stepper.reset_state(state)", "docstring": "Updates the state of the simulator to the given new state.\n\nArgs:\nstate: If this is an int, then this is the state to reset\nthe stepper to, expressed as an integer of the computational basis.\nInteger to bitwise indices is little endian. Otherwise if this is\na np.ndarray this must be the correct size and have dtype of\nnp.complex64.\n\nRaises:\nValueError if the state is incorrectly sized or not of the correct\ndtype.", "source": "codesearchnet"}
{"code": "def write(self, file_prefix: str) -> str:", "docstring": "Serializes proto to disk.\n\nArgs:\nfile_prefix: string prefix of the filepath.\n\nReturns:\nThe actual path the proto is written to.", "source": "github-repos"}
{"code": "def load_yaml(path):\n    \n    \n    with open(path, 'rt') as f:\n        yamldict = yaml.load(f.read(), Loader=yamlloader.ordereddict.CSafeLoader)\n    if not yamldict:\n        raise (LoadError('YAML file: %s is empty!' % path))\n    return yamldict", "docstring": "Load YAML file into an ordered dictionary\n\nArgs:\npath (str): Path to YAML file\n\nReturns:\nOrderedDict: Ordered dictionary containing loaded YAML file", "source": "juraj-google-style"}
{"code": "def process(filename, args, detector_classes, printer_classes):\n    ast = '--ast-compact-json'\n    if args.legacy_ast:\n        ast = '--ast-json'\n    args.filter_paths = parse_filter_paths(args)\n    slither = Slither(filename, ast_format=ast, **vars(args))\n    return _process(slither, detector_classes, printer_classes)", "docstring": "The core high-level code for running Slither static analysis.\n\nReturns:\nlist(result), int: Result list and number of contracts analyzed", "source": "codesearchnet"}
{"code": "def parse_args(test: typing.Optional[typing.List[str]]=None) -> argparse.Namespace:\n    parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)\n    parser.add_argument('weight_file', help='A file path for the learned weights.')\n    parser.add_argument('-o', '--outfile', help='A file path to export a model file. (default: model.json)', default='model.json', type=str)\n    parser.add_argument('--scale', help='A scale factor for the output scores', default=1000, type=int)\n    if test is None:\n        return parser.parse_args()\n    else:\n        return parser.parse_args(test)", "docstring": "Parses commandline arguments.\n\nArgs:\ntest (typing.Optional[typing.List[str]], optional): Commandline args for\ntesting. Defaults to None.\n\nReturns:\nParsed arguments (argparse.Namespace).", "source": "github-repos"}
{"code": "def _add_state_variable(self, name, shape, dtype, initializer=None, partitioner=None, use_resource=None, **kwargs):\n    weight = self.add_weight(name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=None, trainable=False, constraint=None, partitioner=partitioner, use_resource=use_resource, **kwargs)\n    self.state_variables[name] = weight\n    return weight", "docstring": "Add a variable that can hold state which is updated during adapt().\n\nArgs:\nname: Variable name.\nshape: Variable shape. Defaults to scalar if unspecified.\ndtype: The type of the variable. Defaults to `self.dtype` or `float32`.\ninitializer: initializer instance (callable).\npartitioner: Partitioner to be passed to the `Trackable` API.\nuse_resource: Whether to use `ResourceVariable`\n**kwargs: Additional keyword arguments. Accepted values are `getter` and\n`collections`.\n\nReturns:\nThe created variable.", "source": "github-repos"}
{"code": "def select_segments(self, jsonpath: str) -> List[Segment]:\n    path = self.etk.parse_json_path(jsonpath)\n    matches = path.find(self.cdr_document)\n    segments = list()\n    for a_match in matches:\n        this_segment = Segment(str(a_match.full_path), a_match.value, self)\n        segments.append(this_segment)\n    return segments", "docstring": "Dereferences the json_path inside the document and returns the selected elements.\nThis method should compile and cache the compiled json_path in case the same path\nis reused by multiple extractors.\n\nArgs:\njsonpath (str): a valid JSON path.\n\nReturns: A list of Segments object that contains the elements selected by the json path.", "source": "codesearchnet"}
{"code": "def Sample(self, operation, description, data_size, compressed_data_size):\n    \n    sample_time = time.time()\n    sample = '{0:f}\\t{1:s}\\t{2:s}\\t{3:d}\\t{4:d}\\n'.format(\n        sample_time, operation, description, data_size, compressed_data_size)\n    self._WritesString(sample)", "docstring": "Takes a sample of data read or written for profiling.\n\nArgs:\noperation (str): operation, either 'read' or 'write'.\ndescription (str): description of the data read.\ndata_size (int): size of the data read in bytes.\ncompressed_data_size (int): size of the compressed data read in bytes.", "source": "juraj-google-style"}
{"code": "def validate(cls, mapper_spec):\n    \n    if mapper_spec.input_reader_class() != cls:\n      raise BadReaderParamsError(\"Input reader class mismatch\")\n    params = _get_params(mapper_spec)\n    if cls.ENTITY_KIND_PARAM not in params:\n      raise BadReaderParamsError(\"Missing mapper parameter 'entity_kind'\")\n    if cls.BATCH_SIZE_PARAM in params:\n      try:\n        batch_size = int(params[cls.BATCH_SIZE_PARAM])\n        if batch_size < 1:\n          raise BadReaderParamsError(\"Bad batch size: %s\" % batch_size)\n      except ValueError, e:\n        raise BadReaderParamsError(\"Bad batch size: %s\" % e)\n    if cls.NAMESPACE_PARAM in params:\n      if not isinstance(params[cls.NAMESPACE_PARAM],\n                        (str, unicode, type(None))):\n        raise BadReaderParamsError(\n            \"Expected a single namespace string\")\n    if cls.NAMESPACES_PARAM in params:\n      raise BadReaderParamsError(\"Multiple namespaces are no longer supported\")\n    if cls.FILTERS_PARAM in params:\n      filters = params[cls.FILTERS_PARAM]\n      if not isinstance(filters, list):\n        raise BadReaderParamsError(\"Expected list for filters parameter\")\n      for f in filters:\n        if not isinstance(f, (tuple, list)):\n          raise BadReaderParamsError(\"Filter should be a tuple or list: %s\", f)\n        if len(f) != 3:\n          raise BadReaderParamsError(\"Filter should be a 3-tuple: %s\", f)\n        if not isinstance(f[0], basestring):\n          raise BadReaderParamsError(\"First element should be string: %s\", f)\n        if f[1] != \"=\":\n          raise BadReaderParamsError(\n              \"Only equality filters are supported: %s\", f)", "docstring": "Validates mapper spec and all mapper parameters.\n\nArgs:\nmapper_spec: The MapperSpec for this InputReader.\n\nRaises:\nBadReaderParamsError: required parameters are missing or invalid.", "source": "juraj-google-style"}
{"code": "def flip_channel_order(self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n    return flip_channel_order(image, data_format=data_format, input_data_format=input_data_format)", "docstring": "Flip the color channels from RGB to BGR or vice versa.\n\nArgs:\nimage (`np.ndarray`):\nThe image, represented as a numpy array.\ndata_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def infer(self, ob):\n    self._add_to_stack(ob)\n    (logits, vf) = self.infer_from_frame_stack(self._frame_stack)\n    return (logits, vf)", "docstring": "Add new observation to frame stack and infer policy.\n\nArgs:\nob: array of shape (height, width, channels)\n\nReturns:\nlogits and vf.", "source": "codesearchnet"}
{"code": "def get_selected_subassistant_path(self, **kwargs):\n        \n        path = [self]\n        previous_subas_list = None\n        currently_searching = self.get_subassistant_tree()[1]\n\n        \n        while settings.SUBASSISTANT_N_STRING.format(len(path) - 1) in kwargs and \\\n                kwargs[settings.SUBASSISTANT_N_STRING.format(len(path) - 1)]:\n            for sa, subas_list in currently_searching:\n                if sa.name == kwargs[settings.SUBASSISTANT_N_STRING.format(len(path) - 1)]:\n                    currently_searching = subas_list\n                    path.append(sa)\n                    break  \n\n            if subas_list == previous_subas_list:\n                raise exceptions.AssistantNotFoundException(\n                    'No assistant {n} after path {p}.'.format(\n                        n=kwargs[settings.SUBASSISTANT_N_STRING.format(len(path) - 1)],\n                        p=path))\n            previous_subas_list = subas_list\n\n        return path", "docstring": "Recursively searches self._tree - has format of (Assistant: [list_of_subassistants]) -\nfor specific path from first to last selected subassistants.\n\nArgs:\nkwargs: arguments containing names of the given assistants in form of\nsubassistant_0 = 'name', subassistant_1 = 'another_name', ...\nReturns:\nlist of subassistants objects from tree sorted from first to last", "source": "juraj-google-style"}
{"code": "def __rfloordiv__(self, other):\n    other = as_dimension(other)\n    if self._value is None or other.value is None:\n        return Dimension(None)\n    else:\n        return Dimension(other.value", "docstring": "Returns the quotient of `other` and `self` rounded down.\n\nArgs:\nother: Another Dimension, or a value accepted by `as_dimension`.\n\nReturns:\nA `Dimension` whose value is the integer quotient of `self` and `other`.", "source": "github-repos"}
{"code": "def func(self, w, *args):\n    x0 = args[0]\n    x1 = args[1]\n    n0 = x0.shape[0]\n    n1 = x1.shape[0]\n    n = (max(n0, n1) * 10)\n    idx0 = np.random.choice(range(n0), size=n)\n    idx1 = np.random.choice(range(n1), size=n)\n    b0 = np.ones((n0, 1))\n    b1 = np.ones((n1, 1))\n    i1 = (self.i + 1)\n    h = self.h\n    h1 = (h + 1)\n    if sparse.issparse(x0):\n        p0 = np.hstack((sigm(sparse.hstack((x0, b0)).dot(w[:(- h1)].reshape(i1, h))), b0)).dot(w[(- h1):].reshape(h1, 1))\n        p1 = np.hstack((sigm(sparse.hstack((x1, b1)).dot(w[:(- h1)].reshape(i1, h))), b1)).dot(w[(- h1):].reshape(h1, 1))\n    else:\n        p0 = np.hstack((sigm(np.hstack((x0, b0)).dot(w[:(- h1)].reshape(i1, h))), b0)).dot(w[(- h1):].reshape(h1, 1))\n        p1 = np.hstack((sigm(np.hstack((x1, b1)).dot(w[:(- h1)].reshape(i1, h))), b1)).dot(w[(- h1):].reshape(h1, 1))\n    p0 = p0[idx0]\n    p1 = p1[idx1]\n    return (0.5 * (((sum((((1 - p1) + p0) ** 2)) / n) + ((self.l1 * sum((w[:(- h1)] ** 2))) / (i1 * h))) + ((self.l2 * sum((w[(- h1):] ** 2))) / h1)))", "docstring": "Return the costs of the neural network for predictions.\n\nArgs:\nw (array of float): weight vectors such that:\nw[:-h1] -- weights between the input and h layers\nw[-h1:] -- weights between the h and output layers\nargs: features (args[0]) and target (args[1])\n\nReturns:\ncombined cost of RMSE, L1, and L2 regularization", "source": "codesearchnet"}
{"code": "def normal_meanvar(data):\n    data = np.hstack(([0.0], np.array(data)))\n    cumm = np.cumsum(data)\n    cumm_sq = np.cumsum([(val ** 2) for val in data])\n\n    def cost(s, t):\n        ' Cost function for normal distribution with variable variance\\n\\n        Args:\\n            start (int): start index\\n            end (int): end index\\n        Returns:\\n            float: Cost, from start to end\\n        '\n        ts_i = (1.0 / (t - s))\n        mu = ((cumm[t] - cumm[s]) * ts_i)\n        sig = (((cumm_sq[t] - cumm_sq[s]) * ts_i) - (mu ** 2))\n        sig_i = (1.0 / sig)\n        return (((((t - s) * np.log(sig)) + ((cumm_sq[t] - cumm_sq[s]) * sig_i)) - (((2 * (cumm[t] - cumm[s])) * mu) * sig_i)) + (((t - s) * (mu ** 2)) * sig_i))\n    return cost", "docstring": "Creates a segment cost function for a time series with a\nNormal distribution with changing mean and variance\n\nArgs:\ndata (:obj:`list` of float): 1D time series data\nReturns:\nfunction: Function with signature\n(int, int) -> float\nwhere the first arg is the starting index, and the second\nis the last arg. Returns the cost of that segment", "source": "codesearchnet"}
{"code": "def for_each(self, func):\n    aliases = list(self._service_objects.keys())\n    for alias in aliases:\n        with expects.expect_no_raises('Failed to execute \"%s\" for service \"%s\".' % (func.__name__, alias)):\n            func(self._service_objects[alias])", "docstring": "Executes a function with all registered services.\n\nArgs:\nfunc: function, the function to execute. This function should take\na service object as args.", "source": "github-repos"}
{"code": "def _inject(self, value, settings):\n    assert isinstance(value, string_types), 'Expected str; got {0.__class__}'.format(value)\n    (begin, end) = ('{{', '}}')\n    if (begin not in value):\n        return (value, False)\n    new_value = value\n    (begin_pos, end_pos) = (0, None)\n    (len_begin, len_end) = (len(begin), len(end))\n    len_value = len(new_value)\n    while (begin_pos < len_value):\n        begin_pos = new_value.find(begin, begin_pos)\n        if (begin_pos == (- 1)):\n            break\n        before = new_value[:begin_pos]\n        begin_pos += len_begin\n        end_pos = new_value.find(end, begin_pos)\n        if (end_pos == (- 1)):\n            raise ValueError('Unmatched {begin}...{end} in {value}'.format(**locals()))\n        name = new_value[begin_pos:end_pos]\n        name = name.strip()\n        if (not name):\n            raise ValueError('Empty name in {value}'.format(**locals()))\n        after_pos = (end_pos + len_end)\n        try:\n            after = new_value[after_pos:]\n        except IndexError:\n            after = ''\n        try:\n            injection_value = settings.get_dotted(name)\n        except KeyError:\n            raise KeyError('{name} not found in {settings}'.format(**locals()))\n        if (not isinstance(injection_value, string_types)):\n            injection_value = self.strategy.encode_value(injection_value)\n        new_value = ''.join((before, injection_value, after))\n        begin_pos = (len(before) + len(injection_value))\n        len_value = len(new_value)\n    return (new_value, (new_value != value))", "docstring": "Inject ``settings`` into ``value``.\n\nGo through ``value`` looking for ``{{NAME}}`` groups and replace\neach group with the value of the named item from ``settings``.\n\nArgs:\nvalue (str): The value to inject settings into\nsettings: An object that provides the dotted access interface\n\nReturns:\n(str, bool): The new value and whether the new value is\ndifferent from the original value", "source": "codesearchnet"}
{"code": "def interpolate_jagged(xyz,nseg):\n    \n    \n    \n    \n    (r,theta,phi) = sequential_spherical(xyz)\n    \n    \n    rcum = np.append(0,np.cumsum(r))\n\n    \n    breakpoints = np.linspace(0,rcum[-1],nseg+1)\n    np.delete(breakpoints,0)\n    \n    \n    seg_paths = []\n    for a in range(nseg):\n        path = []\n        \n        \n        if a == 0:\n            start_coord = xyz[0,:]\n        else:\n            start_coord = end_coord \n        path.append(start_coord)\n\n        \n        start_length = breakpoints[a]\n        end_length = breakpoints[a+1]\n        mid_boolean = (rcum > start_length) & (rcum < end_length)\n        mid_indices = np.nonzero(mid_boolean)[0]\n        for mi in mid_indices:\n            path.append(xyz[mi,:])\n\n        \n        end_coord = find_coord(end_length,xyz,rcum,theta,phi)\n        path.append(end_coord)\n\n        \n        seg_paths.append(np.array(path))\n    \n    \n    return seg_paths", "docstring": "Interpolates along a jagged path in 3D\n\nArgs:\nxyz = section path specified in cartesian coordinates\nnseg = number of segment paths in section path\n\nReturns:\ninterp_xyz = interpolated path", "source": "juraj-google-style"}
{"code": "def idxmax(self, **kwargs):\n    if self._is_transposed:\n        kwargs['axis'] = (kwargs.get('axis', 0) ^ 1)\n        return self.transpose().idxmax(**kwargs)\n    axis = kwargs.get('axis', 0)\n    index = (self.index if (axis == 0) else self.columns)\n\n    def idxmax_builder(df, **kwargs):\n        if (axis == 0):\n            df.index = index\n        else:\n            df.columns = index\n        return df.idxmax(**kwargs)\n    func = self._build_mapreduce_func(idxmax_builder, **kwargs)\n    return self._full_axis_reduce(axis, func)", "docstring": "Returns the first occurrence of the maximum over requested axis.\n\nReturns:\nA new QueryCompiler object containing the maximum of each column or axis.", "source": "codesearchnet"}
{"code": "def contains_vasp_input(dir_name):\n    \n    for f in [\"INCAR\", \"POSCAR\", \"POTCAR\", \"KPOINTS\"]:\n        if not os.path.exists(os.path.join(dir_name, f)) and \\\n                not os.path.exists(os.path.join(dir_name, f + \".orig\")):\n            return False\n    return True", "docstring": "Checks if a directory contains valid VASP input.\n\nArgs:\ndir_name:\nDirectory name to check.\n\nReturns:\nTrue if directory contains all four VASP input files (INCAR, POSCAR,\nKPOINTS and POTCAR).", "source": "juraj-google-style"}
{"code": "def tf():\n    try:\n        from tensorboard.compat import notf\n    except ImportError:\n        try:\n            import tensorflow\n            return tensorflow\n        except ImportError:\n            pass\n    from tensorboard.compat import tensorflow_stub\n    return tensorflow_stub", "docstring": "Provide the root module of a TF-like API for use within TensorBoard.\n\nBy default this is equivalent to `import tensorflow as tf`, but it can be used\nin combination with //tensorboard/compat:tensorflow (to fall back to a stub TF\nAPI implementation if the real one is not available) or with\n//tensorboard/compat:no_tensorflow (to force unconditional use of the stub).\n\nReturns:\nThe root module of a TF-like API, if available.\n\nRaises:\nImportError: if a TF-like API is not available.", "source": "codesearchnet"}
{"code": "def tables_list(self, dataset_name, max_results=0, page_token=None):\n    \n    url = Api._ENDPOINT +\\\n        (Api._TABLES_PATH % (dataset_name.project_id, dataset_name.dataset_id, '', ''))\n\n    args = {}\n    if max_results != 0:\n      args['maxResults'] = max_results\n    if page_token is not None:\n      args['pageToken'] = page_token\n\n    return google.datalab.utils.Http.request(url, args=args, credentials=self.credentials)", "docstring": "Issues a request to retrieve a list of tables.\n\nArgs:\ndataset_name: the name of the dataset to enumerate.\nmax_results: an optional maximum number of tables to retrieve.\npage_token: an optional token to continue the retrieval.\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"}
{"code": "def normal_meanvar(data):\n    \n    data = np.hstack(([0.0], np.array(data)))\n\n    cumm = np.cumsum(data)\n    cumm_sq = np.cumsum([val**2 for val in data])\n\n    def cost(s, t):\n        \n        ts_i = 1.0 / (t-s)\n        mu = (cumm[t] - cumm[s]) * ts_i\n        sig = (cumm_sq[t] - cumm_sq[s]) * ts_i - mu**2\n        sig_i = 1.0 / sig\n        return (t-s) * np.log(sig) + (cumm_sq[t] - cumm_sq[s]) * sig_i - 2*(cumm[t] - cumm[s])*mu*sig_i + ((t-s)*mu**2)*sig_i\n\n    return cost", "docstring": "Creates a segment cost function for a time series with a\nNormal distribution with changing mean and variance\n\nArgs:\ndata (:obj:`list` of float): 1D time series data\nReturns:\nfunction: Function with signature\n(int, int) -> float\nwhere the first arg is the starting index, and the second\nis the last arg. Returns the cost of that segment", "source": "juraj-google-style"}
{"code": "def timezone(self, timezone=0):\n    tz_dt = timedelta(hours=timezone)\n    for segment in self.segments:\n        for point in segment.points:\n            point.time = (point.time + tz_dt)\n    return self", "docstring": "Sets the timezone of the entire track\n\nArgs:\ntimezone (int): Timezone hour delta", "source": "codesearchnet"}
{"code": "def extract(self, url=None, raw_html=None):\n        \n        crawl_candidate = CrawlCandidate(self.config, url, raw_html)\n        return self.__crawl(crawl_candidate)", "docstring": "Extract the most likely article content from the html page\n\nArgs:\nurl (str): URL to pull and parse\nraw_html (str): String representation of the HTML page\nReturns:\nArticle: Representation of the article contents \\\nincluding other parsed and extracted metadata", "source": "juraj-google-style"}
{"code": "def _eligible_features_from_example_handler(self, request):\n    \n    features_list = inference_utils.get_eligible_features(\n      self.examples[0: NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS)\n    return http_util.Respond(request, features_list, 'application/json')", "docstring": "Returns a list of JSON objects for each feature in the example.\n\nArgs:\nrequest: A request for features.\n\nReturns:\nA list with a JSON object for each feature.\nNumeric features are represented as {name: observedMin: observedMax:}.\nCategorical features are repesented as {name: samples:[]}.", "source": "juraj-google-style"}
{"code": "def help(self, print_output=True):\n    help_text = self._rpc('help')\n    if print_output:\n        print(help_text)\n    else:\n        return help_text", "docstring": "Calls the help RPC, which returns the list of RPC calls available.\n\nThis RPC should normally be used in an interactive console environment\nwhere the output should be printed instead of returned. Otherwise,\nnewlines will be escaped, which will make the output difficult to read.\n\nArgs:\nprint_output: bool, for whether the output should be printed.\n\nReturns:\nA string containing the help output otherwise None if `print_output`\nwasn't set.", "source": "github-repos"}
{"code": "def __getitem__(self, key):\n    if key in self._policy_map:\n        return self._policy_map[key]\n    matching_keys = []\n    for k in self._policy_map:\n        if re.search(k, key):\n            matching_keys.append(k)\n    if len(matching_keys) > 1:\n        raise ValueError(f\"Path '{key}' matches multiple dtype policy specification keys: {matching_keys}. Please make sure each path only matches at most one dtype policy specification key in the DTypePolicyMap.\")\n    elif len(matching_keys) == 1:\n        return self._policy_map[matching_keys[0]]\n    return self.default_policy", "docstring": "Retrieves the corresponding `DTypePolicy` by the string key.\n\nWhen there isn't an exact match, all the existing keys in the map\nwill be treated as a regex and map against the input key again. When\nthere are multiple matches for the regex, an `ValueError` will be\nraised. Returns `self.default_policy` if there isn't any match found.\n\nArgs:\nkey: String key to query a `DTypePolicy`.\n\nReturns:\nCorresponding `DTypePolicy` based on the query.", "source": "github-repos"}
{"code": "def frequency_to_probability(frequency_map, decorator=lambda f: f):\n    \n    total = sum(frequency_map.values())\n    return {k: decorator(v / total) for k, v in frequency_map.items()}", "docstring": "Transform a ``frequency_map`` into a map of probability using the sum of all frequencies as the total.\n\nExample:\n>>> frequency_to_probability({'a': 2, 'b': 2})\n{'a': 0.5, 'b': 0.5}\n\nArgs:\nfrequency_map (dict): The dictionary to transform\ndecorator (function): A function to manipulate the probability\n\nReturns:\nDictionary of ngrams to probability", "source": "juraj-google-style"}
{"code": "def start_new_feature(**cc_kwargs):\n    project = Project.from_path(pathlib.Path.cwd().resolve())\n    contrib_dir = project.get('contrib', 'module_path')\n    with tempfile.TemporaryDirectory() as tempdir:\n        output_dir = tempdir\n        cc_kwargs['output_dir'] = output_dir\n        rendered_dir = render_feature_template(**cc_kwargs)\n        src = rendered_dir\n        dst = contrib_dir\n        synctree(src, dst, onexist=_fail_if_feature_exists)\n    logger.info('Start new feature successful.')", "docstring": "Start a new feature within a ballet project\n\nRenders the feature template into a temporary directory, then copies the\nfeature files into the proper path within the contrib directory.\n\nArgs:\n**cc_kwargs: options for the cookiecutter template\n\nRaises:\nballet.exc.BalletError: the new feature has the same name as an\nexisting one", "source": "codesearchnet"}
{"code": "def set_cc_opt_flags(environ_cp):\n    if is_ppc64le():\n        default_cc_opt_flags = '-mcpu=native'\n    elif is_windows():\n        default_cc_opt_flags = '/arch:AVX'\n    else:\n        default_cc_opt_flags = '-Wno-sign-compare'\n    question = 'Please specify optimization flags to use during compilation when bazel option \"--config=opt\" is specified [Default is %s]: ' % default_cc_opt_flags\n    cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS', question, default_cc_opt_flags)\n    for opt in cc_opt_flags.split():\n        write_to_bazelrc('build:opt --copt=%s' % opt)\n        write_to_bazelrc('build:opt --host_copt=%s' % opt)", "docstring": "Set up architecture-dependent optimization flags.\n\nAlso append CC optimization flags to bazel.rc..\n\nArgs:\nenviron_cp: copy of the os.environ.", "source": "github-repos"}
{"code": "def get_actual_replica(self, service_id: str) -> str:\n        \n        \n        if not self._manager:\n            raise RuntimeError('Only the Swarm manager node can retrieve '\n                               'replication level of the service')\n\n        service_details = self.get_service_details(service_id)\n        actual_replica = service_details[\"Spec\"][\"Mode\"][\n            \"Replicated\"][\"Replicas\"]\n        return actual_replica", "docstring": "Get the actual replica level of a service.\n\nArgs:\nservice_id (str): docker swarm service id\n\nReturns:\nstr, replicated level of the service", "source": "juraj-google-style"}
{"code": "def get_model_class_for_feature(feature: str, framework: str='pt') -> Type:\n    task = FeaturesManager.feature_to_task(feature)\n    FeaturesManager._validate_framework_choice(framework)\n    if framework == 'pt':\n        task_to_automodel = FeaturesManager._TASKS_TO_AUTOMODELS\n    else:\n        task_to_automodel = FeaturesManager._TASKS_TO_TF_AUTOMODELS\n    if task not in task_to_automodel:\n        raise KeyError(f'Unknown task: {feature}. Possible values are {list(FeaturesManager._TASKS_TO_AUTOMODELS.values())}')\n    return task_to_automodel[task]", "docstring": "Attempts to retrieve an AutoModel class from a feature name.\n\nArgs:\nfeature (`str`):\nThe feature required.\nframework (`str`, *optional*, defaults to `\"pt\"`):\nThe framework to use for the export.\n\nReturns:\nThe AutoModel class corresponding to the feature.", "source": "github-repos"}
{"code": "def GetOutputDir(self, base_dir, config_filename):\n    return os.path.join(base_dir, os.path.basename(config_filename.replace('.yaml', '')))", "docstring": "Add the repack config filename onto the base output directory.\n\nThis allows us to repack lots of different configs to the same installer\nname and still be able to distinguish them.\n\nArgs:\nbase_dir: output directory string\nconfig_filename: the secondary config filename string\n\nReturns:\nString to be used as output directory for this repack.", "source": "codesearchnet"}
{"code": "def SignBuffer(self, in_buffer):\n    \n    precondition.AssertType(in_buffer, bytes)\n    with tempfile.NamedTemporaryFile() as temp_in:\n      temp_in.write(in_buffer)\n      temp_in.seek(0)\n      outfile = self.SignFile(temp_in.name)\n      with io.open(outfile, \"rb\") as filedesc:\n        return filedesc.read()", "docstring": "Sign a buffer via temp files.\n\nOur signing tool can't sign a buffer, so we work around it using temporary\nfiles.\n\nArgs:\nin_buffer: data to sign\n\nReturns:\nsigned data", "source": "juraj-google-style"}
{"code": "def FlagCxx11Features(filename, clean_lines, linenum, error):\n  \n  line = clean_lines.elided[linenum]\n\n  \n  include = Match(r'\\s*\n  if include and include.group(1) in ('cfenv',\n                                      'condition_variable',\n                                      'fenv.h',\n                                      'future',\n                                      'mutex',\n                                      'thread',\n                                      'chrono',\n                                      'ratio',\n                                      'regex',\n                                      'system_error',\n                                     ):\n    error(filename, linenum, 'build/c++11', 5,\n          ('<%s> is an unapproved C++11 header.') % include.group(1))\n\n  \n  \n  if Match(r'\\s*\n\n  \n  \n  \n  for top_name in (\n      \n      'alignment_of',\n      'aligned_union',\n      ):\n    if Search(r'\\bstd::%s\\b' % top_name, line):\n      error(filename, linenum, 'build/c++11', 5,\n            ('std::%s is an unapproved C++11 class or function.  Send c-style '\n             'an example of where it would make your code more readable, and '\n             'they may let you use it.') % top_name)", "docstring": "Flag those c++11 features that we only allow in certain places.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def _parse_name(self, config):\n    value = NAME_RE.search(config).group('value')\n    return dict(name=value)", "docstring": "_parse_name scans the provided configuration block and extracts\nthe vlan name.  The config block is expected to always return the\nvlan name.  The return dict is intended to be merged into the response\ndict.\n\nArgs:\nconfig (str): The vlan configuration block from the nodes running\nconfiguration\n\nReturns:\ndict: resource dict attribute", "source": "codesearchnet"}
{"code": "def read_probes(self, key):\n        \n        assert key in list(self._PROBES.keys())\n\n        import random\n        if key == 'value1':\n            value = random.random()\n        elif key == 'value2':\n            value = self.settings['output probe2']\n        elif key == 'internal':\n            value = self._internal_state\n        elif key == 'deep_internal':\n            value = self._internal_state_deep\n\n        return value", "docstring": "requestes value from the instrument and returns it\nArgs:\nkey: name of requested value\n\nReturns: reads values from instrument", "source": "juraj-google-style"}
{"code": "def handle_api_explorer_request(self, request, start_response):\n    \n    redirect_url = self._get_explorer_redirect_url(\n        request.server, request.port, request.base_path)\n    return util.send_wsgi_redirect_response(redirect_url, start_response)", "docstring": "Handler for requests to {base_path}/explorer.\n\nThis calls start_response and returns the response body.\n\nArgs:\nrequest: An ApiRequest, the request from the user.\nstart_response: A function with semantics defined in PEP-333.\n\nReturns:\nA string containing the response body (which is empty, in this case).", "source": "juraj-google-style"}
{"code": "def list_live_services(self):\n    aliases = []\n    self.for_each(lambda service: aliases.append(service.alias) if service.is_alive else None)\n    return aliases", "docstring": "Lists the aliases of all the services that are alive.\n\nOrder of this list is determined by the order the services are\nregistered in.\n\nReturns:\nlist of strings, the aliases of the services that are running.", "source": "github-repos"}
{"code": "def resize_bilinear_nd(t, target_shape):\n  \n  shape = t.get_shape().as_list()\n  target_shape = list(target_shape)\n  assert len(shape) == len(target_shape)\n\n  \n  d = 0\n  while d < len(shape):\n\n    \n    if shape[d] == target_shape[d]:\n      d += 1\n      continue\n\n    \n    \n    new_shape = shape[:]\n    new_shape[d : d+2] = target_shape[d : d+2]\n\n    \n    \n    shape_ = collapse_shape(shape, d, d+2)\n    new_shape_ = collapse_shape(new_shape, d, d+2)\n\n    \n    \n    t_ = tf.reshape(t, shape_)\n    t_ = tf.image.resize_bilinear(t_, new_shape_[1:3])\n\n    \n    \n    t = tf.reshape(t_, new_shape)\n    shape = new_shape\n    d += 2\n\n  return t", "docstring": "Bilinear resizes a tensor t to have shape target_shape.\n\nThis function bilinearly resizes a n-dimensional tensor by iteratively\napplying tf.image.resize_bilinear (which can only resize 2 dimensions).\nFor bilinear interpolation, the order in which it is applied does not matter.\n\nArgs:\nt: tensor to be resized\ntarget_shape: the desired shape of the new tensor.\n\nReturns:\nThe resized tensor", "source": "juraj-google-style"}
{"code": "def from_backbone_configs(cls, backbone_config: PretrainedConfig, **kwargs):\n    return cls(backbone_config=backbone_config, **kwargs)", "docstring": "Instantiate a [`RTDetrV2Config`] (or a derived class) from a pre-trained backbone model configuration and DETR model\nconfiguration.\n\nArgs:\nbackbone_config ([`PretrainedConfig`]):\nThe backbone configuration.\n\nReturns:\n[`RTDetrV2Config`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def add_session_log(self, session_log, global_step=None):\n    event = event_pb2.Event(session_log=session_log)\n    self._add_event(event, global_step)", "docstring": "Adds a `SessionLog` protocol buffer to the event file.\n\nThis method wraps the provided session in an `Event` protocol buffer\nand adds it to the event file.\n\nArgs:\nsession_log: A `SessionLog` protocol buffer.\nglobal_step: Number. Optional global step value to record with the\nsummary.", "source": "github-repos"}
{"code": "def post_warning(self, name, message):\n        \n\n        self.post_command(OPERATIONS.CMD_POST_MESSAGE,\n                          _create_message(name, states.WARNING_LEVEL, message))", "docstring": "Asynchronously post a user facing warning message about a service.\n\nArgs:\nname (string): The name of the service\nmessage (string): The user facing warning message that will be stored\nfor the service and can be queried later.", "source": "juraj-google-style"}
{"code": "def _readline(sock, buf):\n    chunks = []\n    last_char = b''\n    while True:\n        if ((last_char == b'\\r') and (buf[0:1] == b'\\n')):\n            chunks[(- 1)] = chunks[(- 1)][:(- 1)]\n            return (buf[1:], b''.join(chunks))\n        elif (buf.find(b'\\r\\n') != (- 1)):\n            (before, sep, after) = buf.partition(b'\\r\\n')\n            chunks.append(before)\n            return (after, b''.join(chunks))\n        if buf:\n            chunks.append(buf)\n            last_char = buf[(- 1):]\n        buf = _recv(sock, RECV_SIZE)\n        if (not buf):\n            raise MemcacheUnexpectedCloseError()", "docstring": "Read line of text from the socket.\n\nRead a line of text (delimited by \"\\r\\n\") from the socket, and\nreturn that line along with any trailing characters read from the\nsocket.\n\nArgs:\nsock: Socket object, should be connected.\nbuf: String, zero or more characters, returned from an earlier\ncall to _readline or _readvalue (pass an empty string on the\nfirst call).\n\nReturns:\nA tuple of (buf, line) where line is the full line read from the\nsocket (minus the \"\\r\\n\" characters) and buf is any trailing\ncharacters read after the \"\\r\\n\" was found (which may be an empty\nstring).", "source": "codesearchnet"}
{"code": "def update_sub(x, decrement):\n    return state_ops.assign_sub(x, decrement)", "docstring": "Update the value of `x` by subtracting `decrement`.\n\nArgs:\nx: A Variable.\ndecrement: A tensor of same shape as `x`.\n\nReturns:\nThe variable `x` updated.", "source": "github-repos"}
{"code": "class HungarianMatcher(nn.Module):\n\n    def __init__(self, class_cost: float=1, bbox_cost: float=1, giou_cost: float=1):\n        super().__init__()\n        requires_backends(self, ['scipy'])\n        self.class_cost = class_cost\n        self.bbox_cost = bbox_cost\n        self.giou_cost = giou_cost\n        if class_cost == 0 and bbox_cost == 0 and (giou_cost == 0):\n            raise ValueError(\"All costs of the Matcher can't be 0\")\n\n    @torch.no_grad()\n    def forward(self, outputs, targets):\n        \n        batch_size, num_queries = outputs['logits'].shape[:2]\n        out_prob = outputs['logits'].flatten(0, 1).softmax(-1)\n        out_bbox = outputs['pred_boxes'].flatten(0, 1)\n        target_ids = torch.cat([v['class_labels'] for v in targets])\n        target_bbox = torch.cat([v['boxes'] for v in targets])\n        class_cost = -out_prob[:, target_ids]\n        bbox_cost = torch.cdist(out_bbox, target_bbox, p=1)\n        giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox))\n        cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost\n        cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()\n        sizes = [len(v['boxes']) for v in targets]\n        indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))]\n        return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]", "docstring": "This class computes an assignment between the targets and the predictions of the network.\n\nFor efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more\npredictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are\nun-matched (and thus treated as non-objects).\n\nArgs:\nclass_cost:\nThe relative weight of the classification error in the matching cost.\nbbox_cost:\nThe relative weight of the L1 error of the bounding box coordinates in the matching cost.\ngiou_cost:\nThe relative weight of the giou loss of the bounding box in the matching cost.", "source": "github-repos"}
{"code": "def ParsePageVisitRow(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n    was_http_non_get = self._GetRowValue(query_hash, row, 'http_non_get')\n\n    event_data = SafariHistoryPageVisitedEventData()\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.title = self._GetRowValue(query_hash, row, 'title')\n    event_data.url = self._GetRowValue(query_hash, row, 'url')\n    event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count')\n    event_data.was_http_non_get = bool(was_http_non_get)\n\n    timestamp = self._GetRowValue(query_hash, row, 'visit_time')\n    date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a visited row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def get_shape(self) -> tensor_shape.TensorShape:\n    return self.shape", "docstring": "The statically known shape of this ragged tensor.\n\nReturns:\nA `TensorShape` containing the statically known shape of this ragged\ntensor.  Ragged dimensions have a size of `None`.\n\nAlias for `shape` property.\n\nExamples:\n\n>>> tf.ragged.constant([[0], [1, 2]]).get_shape()\nTensorShape([2, None])\n\n>>> tf.ragged.constant(\n...    [[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).get_shape()\nTensorShape([2, None, 2])", "source": "github-repos"}
{"code": "def get_file_path(self, digest):\n        \n        relPath = Fsdb.generate_tree_path(digest, self._conf['depth'])\n        return os.path.join(self.fsdbRoot, relPath)", "docstring": "Retrieve the absolute path to the file with the given digest\n\nArgs:\ndigest -- digest of the file\nReturns:\nString rapresenting the absolute path of the file", "source": "juraj-google-style"}
{"code": "def __init__(self, thunk):\n    self._thunk = thunk\n    self._master_tensor = thunk()", "docstring": "Initializes a _LazyEvalTensor object.\n\nArgs:\nthunk: A callable. A thunk which computes the value of the tensor.", "source": "github-repos"}
{"code": "def take_while(predicate):\n\n    def _apply_fn(dataset):\n        return dataset.take_while(predicate=predicate)\n    return _apply_fn", "docstring": "A transformation that stops dataset iteration based on a `predicate`.\n\nArgs:\npredicate: A function that maps a nested structure of tensors (having shapes\nand types defined by `self.output_shapes` and `self.output_types`) to a\nscalar `tf.bool` tensor.\n\nReturns:\nA `Dataset` transformation function, which can be passed to\n`tf.data.Dataset.apply`.", "source": "github-repos"}
{"code": "def bandit(self, choice_rewards):\n    return max(choice_rewards, key=(lambda a: np.mean(choice_rewards[a])))", "docstring": "Return the choice to take next using multi-armed bandit\n\nMulti-armed bandit method. Accepts a mapping of choices to rewards which indicate their\nhistorical performance, and returns the choice that we should make next in order to\nmaximize expected reward in the long term.\n\nThe default implementation is to return the arm with the highest average score.\n\nArgs:\nchoice_rewards (Dict[object, List[float]]): maps choice IDs to lists of rewards.\n\nReturns:\nstr: the name of the choice to take next.", "source": "codesearchnet"}
{"code": "def color_val(color):\n    if is_str(color):\n        return Color[color].value\n    elif isinstance(color, Color):\n        return color.value\n    elif isinstance(color, tuple):\n        assert (len(color) == 3)\n        for channel in color:\n            assert ((channel >= 0) and (channel <= 255))\n        return color\n    elif isinstance(color, int):\n        assert ((color >= 0) and (color <= 255))\n        return (color, color, color)\n    elif isinstance(color, np.ndarray):\n        assert ((color.ndim == 1) and (color.size == 3))\n        assert np.all(((color >= 0) & (color <= 255)))\n        color = color.astype(np.uint8)\n        return tuple(color)\n    else:\n        raise TypeError('Invalid type for color: {}'.format(type(color)))", "docstring": "Convert various input to color tuples.\n\nArgs:\ncolor (:obj:`Color`/str/tuple/int/ndarray): Color inputs\n\nReturns:\ntuple[int]: A tuple of 3 integers indicating BGR channels.", "source": "codesearchnet"}
{"code": "def time_travel(self, datetime=None, timedelta=None, seconds=0, minutes=0, hours=0, days=0):\n    if (datetime is not None):\n        self.timedelta = (datetime - python_datetime.now())\n    if (timedelta is not None):\n        self.timedelta = (self.timedelta + timedelta)\n    self.timedelta = (self.timedelta + python_timedelta(seconds=seconds))\n    self.timedelta = (self.timedelta + python_timedelta(minutes=minutes))\n    self.timedelta = (self.timedelta + python_timedelta(hours=hours))\n    self.timedelta = (self.timedelta + python_timedelta(days=days))\n    log('Time traveling to {}\\n'.format(humanize.naturaltime(self.now())))\n    faketime.change_time(self.hitch_dir.faketime(), self.now())", "docstring": "Mock moving forward or backward in time by shifting the system clock fed to the services tested.\n\nNote that all of these arguments can be used together, individually or not at all. The time\ntraveled to will be the sum of all specified time deltas from datetime. If no datetime is specified,\nthe deltas will be added to the current time.\n\nArgs:\ndatetime (Optional[datetime]): Time travel to specific datetime.\ntimedelta (Optional[timedelta]): Time travel to 'timedelta' from now.\nseconds (Optional[number]): Time travel 'seconds' seconds from now.\nminutes (Optional[number]): Time travel 'minutes' minutes from now.\nhours (Optional[number]): Time travel 'hours' hours from now.\ndays (Optional[number]): Time travel 'days' days from now.", "source": "codesearchnet"}
{"code": "def __init__(self, pid_filename):\n        \n        self.stdin_path = '/dev/null'\n        self.stdout_path = '/dev/null'\n        self.stderr_path = '/dev/null'\n        self.pidfile_path = '/tmp/' + pid_filename + '.pid'\n        self.pidfile_timeout = 5\n        self.daemon_runner = runner.DaemonRunner(self)\n\n        \n        if self.isRunning() and \"stop\" not in sys.argv and \\\n           \"restart\" not in sys.argv:\n            self.onIsRunning()", "docstring": "Generic daemon class, which allows you to daemonize your script and\nreact to events in simple callbacks.\n\nArgs:\npid_filename (str): name of daemon's PID file, which is stored in\n``/tmp``. Class automatically adds ``.pid``\nsuffix.", "source": "juraj-google-style"}
{"code": "def __init__(self, file_pattern, action_function):\n        \n        super(GeneratorAction, self).__init__()\n        self.__file_pattern = file_pattern\n        self.__action_function = action_function", "docstring": "Container to store an \"action\".\n\nEvery file(s) generation is considered as an action.\n\nArgs:\nfile_pattern: fnmatch pattern.\naction_function: Callback without argument. See documentation.", "source": "juraj-google-style"}
{"code": "def validate_resource(resource: message.Message, primitive_handler_: primitive_handler.PrimitiveHandler) -> None:\n    _validate_fhir_constraints(resource, resource.DESCRIPTOR.name, primitive_handler_)", "docstring": "Performs basic FHIR constraint validation on the provided resource.\n\nThis API works for all supported versions of FHIR, but requires a primitive\nhandler to be passed as an argument.\nIf the FHIR version being used is known ahead of time, version-specific APIs\nsuch as `google.fhir.r4 resource_validation` should be used instead.\n\nArgs:\nresource: the resource proto to validate\nprimitive_handler_: Version-specific logic", "source": "github-repos"}
{"code": "def load_hgnc_genes(adapter, genes=None, ensembl_lines=None, hgnc_lines=None, exac_lines=None, mim2gene_lines=None, genemap_lines=None, hpo_lines=None, build='37', omim_api_key=''):\n    gene_objects = list()\n    if (not genes):\n        if (ensembl_lines is None):\n            ensembl_lines = fetch_ensembl_genes(build=build)\n        hgnc_lines = (hgnc_lines or fetch_hgnc())\n        exac_lines = (exac_lines or fetch_exac_constraint())\n        if (not (mim2gene_lines and genemap_lines)):\n            if (not omim_api_key):\n                raise SyntaxError('Need to provide omim api key')\n            mim_files = fetch_mim_files(omim_api_key, mim2genes=True, genemap2=True)\n            mim2gene_lines = mim_files['mim2genes']\n            genemap_lines = mim_files['genemap2']\n        if (not hpo_lines):\n            hpo_files = fetch_hpo_files(hpogenes=True)\n            hpo_lines = hpo_files['hpogenes']\n        genes = link_genes(ensembl_lines=ensembl_lines, hgnc_lines=hgnc_lines, exac_lines=exac_lines, mim2gene_lines=mim2gene_lines, genemap_lines=genemap_lines, hpo_lines=hpo_lines)\n    non_existing = 0\n    nr_genes = len(genes)\n    with progressbar(genes.values(), label='Building genes', length=nr_genes) as bar:\n        for gene_data in bar:\n            if (not gene_data.get('chromosome')):\n                LOG.debug('skipping gene: %s. No coordinates found', gene_data.get('hgnc_symbol', '?'))\n                non_existing += 1\n                continue\n            gene_obj = build_hgnc_gene(gene_data, build=build)\n            gene_objects.append(gene_obj)\n    LOG.info('Loading genes build %s', build)\n    adapter.load_hgnc_bulk(gene_objects)\n    LOG.info('Loading done. %s genes loaded', len(gene_objects))\n    LOG.info('Nr of genes without coordinates in build %s: %s', build, non_existing)\n    return gene_objects", "docstring": "Load genes into the database\n\nlink_genes will collect information from all the different sources and\nmerge it into a dictionary with hgnc_id as key and gene information as values.\n\nArgs:\nadapter(scout.adapter.MongoAdapter)\ngenes(dict): If genes are already parsed\nensembl_lines(iterable(str)): Lines formated with ensembl gene information\nhgnc_lines(iterable(str)): Lines with gene information from genenames.org\nexac_lines(iterable(str)): Lines with information pLi-scores from ExAC\nmim2gene(iterable(str)): Lines with map from omim id to gene symbol\ngenemap_lines(iterable(str)): Lines with information of omim entries\nhpo_lines(iterable(str)): Lines information about map from hpo terms to genes\nbuild(str): What build to use. Defaults to '37'\n\nReturns:\ngene_objects(list): A list with all gene_objects that was loaded into database", "source": "codesearchnet"}
{"code": "def login_with_password_no_sync(self, username, password):\n        \n        warn(\"login_with_password_no_sync is deprecated. Use login with sync=False.\",\n             DeprecationWarning)\n        return self.login(username, password, sync=False)", "docstring": "Deprecated. Use ``login`` with ``sync=False``.\n\nLogin to the homeserver.\n\nArgs:\nusername (str): Account username\npassword (str): Account password\n\nReturns:\nstr: Access token\n\nRaises:\nMatrixRequestError", "source": "juraj-google-style"}
{"code": "def render_diagram(out_base):\n    import codecs\n    import subprocess\n    import sadisplay\n    desc = sadisplay.describe(list(model_registry.values()), show_methods=False, show_properties=True, show_indexes=True)\n    with codecs.open((out_base + '.dot'), 'w', encoding='utf-8') as f:\n        f.write(sadisplay.dot(desc))\n    if (not hasattr(config, 'DOT_EXECUTABLE')):\n        raise RuntimeError(\"Please configure the 'DOT_EXECUTABLE' variable in your 'project_config.py'\")\n    if (not os.path.exists(config.DOT_EXECUTABLE)):\n        raise IOError((\"Could not find file pointed to by 'DOT_EXECUTABLE': \" + str(config.DOT_EXECUTABLE)))\n    subprocess.check_call([config.DOT_EXECUTABLE, '-T', 'png', '-o', (out_base + '.png'), (out_base + '.dot')])", "docstring": "Render a data model diagram\n\nIncluded in the diagram are all classes from the model registry.\nFor your project, write a small script that imports all models that you would like to\nhave included and then calls this function.\n\n.. note:: This function requires the 'dot' executable from the GraphViz package to be installed\nand its location configured in your `project_config.py` variable :attr:`DOT_EXECUTABLE`.\n\nArgs:\nout_base (str): output base path (file endings will be appended)", "source": "codesearchnet"}
{"code": "def uses_keras_history(tensors):\n    checked_tensors = set()\n    tensors_to_check = nest.flatten(tensors)\n    while tensors_to_check:\n        new_tensors_to_check = []\n        for tensor in tensors_to_check:\n            if id(tensor) in checked_tensors:\n                continue\n            checked_tensors.add(id(tensor))\n            if getattr(tensor, '_keras_history_checked', None) is not None:\n                continue\n            if getattr(tensor, '_keras_history', None) is not None:\n                return True\n            try:\n                new_tensors_to_check.extend(tensor.op.inputs)\n            except AttributeError:\n                pass\n        tensors_to_check = new_tensors_to_check\n    mark_checked(tensors)\n    return False", "docstring": "Check if at least one Tensor originates from a `keras.Input`.\n\nThis is `True` if at least one Tensor has its origin in a `keras.Input`.\nAny Tensor that originates from a `keras.Input` will have a dependency\nTensor with a `_keras_history` attribute attached. Tensors that have\nalready been checked to not originate from a `keras.Input`\nare marked as `_keras_history_checked`.\n\nArgs:\ntensors: An arbitrary nested structure of Tensors.\n\nReturns:\nBool, whether at least one Tensor originates from a `keras.Input`.", "source": "github-repos"}
{"code": "def get_version(tool_name, tool_command):\n        \n        result = {}\n        for line in Bash(ShellConfig(script=tool_command, internal=True)).process():\n            if line.find(\"command not found\") >= 0:\n                VersionsCheck.LOGGER.error(\"Required tool '%s' not found (stopping pipeline)!\", tool_name)\n                sys.exit(1)\n            else:\n                version = list(re.findall(r'(\\d+(\\.\\d+)+)+', line))[0][0]\n                result = {tool_name: Version(str(version))}\n            break\n        return result", "docstring": "Get name and version of a tool defined by given command.\n\nArgs:\ntool_name (str): name of the tool.\ntool_command (str): Bash one line command to get the version of the tool.\n\nReturns:\ndict: tool name and version or empty when no line has been found", "source": "juraj-google-style"}
{"code": "def get(self, request):\n    code = request.GET.get('code')\n    if (not code):\n        return render(request, 'django_auth_adfs/login_failed.html', {'error_message': 'No authorization code was provided.'}, status=400)\n    redirect_to = request.GET.get('state')\n    user = authenticate(request=request, authorization_code=code)\n    if (user is not None):\n        if user.is_active:\n            login(request, user)\n            if redirect_to:\n                redirect_to = base64.urlsafe_b64decode(redirect_to.encode()).decode()\n            else:\n                redirect_to = django_settings.LOGIN_REDIRECT_URL\n            url_is_safe = is_safe_url(url=redirect_to, allowed_hosts=[request.get_host()], require_https=request.is_secure())\n            redirect_to = (redirect_to if url_is_safe else '/')\n            return redirect(redirect_to)\n        else:\n            return render(request, 'django_auth_adfs/login_failed.html', {'error_message': 'Your account is disabled.'}, status=403)\n    else:\n        return render(request, 'django_auth_adfs/login_failed.html', {'error_message': 'Login failed.'}, status=401)", "docstring": "Handles the redirect from ADFS to our site.\nWe try to process the passed authorization code and login the user.\n\nArgs:\nrequest (django.http.request.HttpRequest): A Django Request object", "source": "codesearchnet"}
{"code": "def get_transaction_id(transaction, read_operation=True):\n    if (transaction is None):\n        return None\n    else:\n        if (not transaction.in_progress):\n            raise ValueError(INACTIVE_TXN)\n        if (read_operation and (len(transaction._write_pbs) > 0)):\n            raise ReadAfterWriteError(READ_AFTER_WRITE_ERROR)\n        return transaction.id", "docstring": "Get the transaction ID from a ``Transaction`` object.\n\nArgs:\ntransaction (Optional[~.firestore_v1beta1.transaction.\\\nTransaction]): An existing transaction that this query will\nrun in.\nread_operation (Optional[bool]): Indicates if the transaction ID\nwill be used in a read operation. Defaults to :data:`True`.\n\nReturns:\nOptional[bytes]: The ID of the transaction, or :data:`None` if the\n``transaction`` is :data:`None`.\n\nRaises:\nValueError: If the ``transaction`` is not in progress (only if\n``transaction`` is not :data:`None`).\nReadAfterWriteError: If the ``transaction`` has writes stored on\nit and ``read_operation`` is :data:`True`.", "source": "codesearchnet"}
{"code": "def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n    known_args, pipeline_args = parse_known_args(argv)\n    pipeline_options = PipelineOptions(pipeline_args)\n    pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n    model_loader = KeyedModelHandler(TFModelHandlerNumpy(model_uri=known_args.model_path, model_type=ModelType.SAVED_MODEL, large_model=known_args.large_model))\n    pipeline = test_pipeline\n    if not test_pipeline:\n        pipeline = beam.Pipeline(options=pipeline_options)\n    label_pixel_tuple = pipeline | 'ReadFromInput' >> beam.io.ReadFromText(known_args.input) | 'PreProcessInputs' >> beam.Map(process_input)\n    predictions = label_pixel_tuple | 'RunInference' >> RunInference(model_loader) | 'PostProcessOutputs' >> beam.ParDo(PostProcessor())\n    _ = predictions | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)\n    result = pipeline.run()\n    result.wait_until_finish()\n    return result", "docstring": "Args:\nargv: Command line arguments defined for this example.\nsave_main_session: Used for internal testing.\ntest_pipeline: Used for internal testing.", "source": "github-repos"}
{"code": "def get_ip_reports(self, ips):\n        \n        api_name = 'virustotal-ip-address-reports'\n\n        (all_responses, ips) = self._bulk_cache_lookup(api_name, ips)\n        responses = self._request_reports(\"ip\", ips, 'ip-address/report')\n\n        for ip, response in zip(ips, responses):\n            if self._cache:\n                self._cache.cache_value(api_name, ip, response)\n            all_responses[ip] = response\n\n        return all_responses", "docstring": "Retrieves the most recent VT info for a set of ips.\n\nArgs:\nips: list of IPs.\nReturns:\nA dict with the IP as key and the VT report as value.", "source": "juraj-google-style"}
{"code": "def bridge_list():\n    cmd = 'ovs-vsctl list-br'\n    result = __salt__['cmd.run_all'](cmd)\n    retcode = result['retcode']\n    stdout = result['stdout']\n    return _stdout_list_split(retcode, stdout)", "docstring": "Lists all existing real and fake bridges.\n\nReturns:\nList of bridges (or empty list), False on failure.\n\n.. versionadded:: 2016.3.0\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.bridge_list", "source": "codesearchnet"}
{"code": "def request(self, send_terminator = False):\n        \n        self.m_a_crc = False\n        start_context = self.getContext()\n        self.setContext(\"request[v3A]\")\n        try:\n            self.m_serial_port.write(\"2f3f\".decode(\"hex\") +\n                                     self.m_meter_address +\n                                     \"210d0a\".decode(\"hex\"))\n            self.m_raw_read_a = self.m_serial_port.getResponse(self.getContext())\n            unpacked_read_a = self.unpackStruct(self.m_raw_read_a, self.m_blk_a)\n            self.convertData(unpacked_read_a, self.m_blk_a, 1)\n            self.m_a_crc = self.crcMeterRead(self.m_raw_read_a, self.m_blk_a)\n            if send_terminator:\n                self.serialPostEnd()\n            self.calculateFields()\n            self.makeReturnFormat()\n        except:\n            ekm_log(traceback.format_exc(sys.exc_info()))\n\n        self.setContext(start_context)\n        return self.m_a_crc", "docstring": "Required request() override for v3 and standard method to read meter.\n\nArgs:\nsend_terminator (bool): Send termination string at end of read.\n\nReturns:\nbool: CRC request flag result from most recent read", "source": "juraj-google-style"}
{"code": "def download_structure_file(self, outdir, file_type=None, load_header_metadata=True, force_rerun=False):\n    ssbio.utils.double_check_attribute(object=self, setter=file_type, backup_attribute='file_type', custom_error_text='Please set file type to be downloaded from the PDB: pdb, mmCif, xml, or mmtf')\n    p = PDBList()\n    with ssbio.utils.suppress_stdout():\n        structure_file = p.retrieve_pdb_file(pdb_code=self.id, pdir=outdir, file_format=file_type, overwrite=force_rerun)\n    if (not op.exists(structure_file)):\n        log.debug('{}: {} file not available'.format(self.id, file_type))\n        raise URLError('{}.{}: file not available to download'.format(self.id, file_type))\n    else:\n        log.debug('{}: {} file saved'.format(self.id, file_type))\n        if (file_type == 'pdb'):\n            new_name = structure_file.replace('pdb', '').replace('ent', 'pdb')\n            os.rename(structure_file, new_name)\n            structure_file = new_name\n        self.load_structure_path(structure_file, file_type)\n        if (load_header_metadata and (file_type == 'mmtf')):\n            self.update(parse_mmtf_header(structure_file))\n        if (load_header_metadata and (file_type != 'mmtf')):\n            self.update(parse_mmcif_header(download_mmcif_header(pdb_id=self.id, outdir=outdir, force_rerun=force_rerun)))", "docstring": "Download a structure file from the PDB, specifying an output directory and a file type. Optionally download\nthe mmCIF header file and parse data from it to store within this object.\n\nArgs:\noutdir (str): Path to output directory\nfile_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB\nload_header_metadata (bool): If header metadata should be loaded into this object, fastest with mmtf files\nforce_rerun (bool): If structure file should be downloaded even if it already exists", "source": "codesearchnet"}
{"code": "class UperNetPyramidPoolingModule(nn.Module):\n\n    def __init__(self, pool_scales: Tuple[int, ...], in_channels: int, channels: int, align_corners: bool) -> None:\n        super().__init__()\n        self.pool_scales = pool_scales\n        self.align_corners = align_corners\n        self.in_channels = in_channels\n        self.channels = channels\n        self.blocks = []\n        for i, pool_scale in enumerate(pool_scales):\n            block = UperNetPyramidPoolingBlock(pool_scale=pool_scale, in_channels=in_channels, channels=channels)\n            self.blocks.append(block)\n            self.add_module(str(i), block)\n\n    def forward(self, x: torch.Tensor) -> List[torch.Tensor]:\n        ppm_outs = []\n        for ppm in self.blocks:\n            ppm_out = ppm(x)\n            upsampled_ppm_out = nn.functional.interpolate(ppm_out, size=x.size()[2:], mode='bilinear', align_corners=self.align_corners)\n            ppm_outs.append(upsampled_ppm_out)\n        return ppm_outs", "docstring": "Pyramid Pooling Module (PPM) used in PSPNet.\n\nArgs:\npool_scales (`Tuple[int]`):\nPooling scales used in Pooling Pyramid Module.\nin_channels (`int`):\nInput channels.\nchannels (`int`):\nChannels after modules, before conv_seg.\nalign_corners (`bool`):\nalign_corners argument of F.interpolate.", "source": "github-repos"}
{"code": "def execute_phase(self, phase):\n    \n    repeat_count = 1\n    repeat_limit = phase.options.repeat_limit or sys.maxsize\n    while not self._stopping.is_set():\n      is_last_repeat = repeat_count >= repeat_limit\n      phase_execution_outcome = self._execute_phase_once(phase, is_last_repeat)\n\n      if phase_execution_outcome.is_repeat and not is_last_repeat:\n        repeat_count += 1\n        continue\n\n      return phase_execution_outcome\n    \n    return PhaseExecutionOutcome(None)", "docstring": "Executes a phase or skips it, yielding PhaseExecutionOutcome instances.\n\nArgs:\nphase: Phase to execute.\n\nReturns:\nThe final PhaseExecutionOutcome that wraps the phase return value\n(or exception) of the final phase run. All intermediary results, if any,\nare REPEAT and handled internally. Returning REPEAT here means the phase\nhit its limit for repetitions.", "source": "juraj-google-style"}
{"code": "def _git_fetch_for_comparison(remote: str, actual_branch: str, compare_branch: str, verbose: bool) -> prepared_env.PreparedEnv:\n    actual_id = ''\n    base_id = ''\n    for depth in [10, 100, 1000, None]:\n        depth_str = ('' if (depth is None) else '--depth={}'.format(depth))\n        shell_tools.run_cmd('git', 'fetch', (None if verbose else '--quiet'), remote, actual_branch, depth_str, log_run_to_stderr=verbose)\n        actual_id = shell_tools.output_of('git', 'rev-parse', 'FETCH_HEAD')\n        shell_tools.run_cmd('git', 'fetch', (None if verbose else '--quiet'), remote, compare_branch, depth_str, log_run_to_stderr=verbose)\n        base_id = shell_tools.output_of('git', 'rev-parse', 'FETCH_HEAD')\n        try:\n            base_id = shell_tools.output_of('git', 'merge-base', actual_id, base_id)\n            break\n        except subprocess.CalledProcessError:\n            pass\n    return prepared_env.PreparedEnv(None, actual_id, base_id, None, None)", "docstring": "Fetches two branches including their common ancestor.\n\nLimits the depth of the fetch to avoid unnecessary work. Scales up the\ndepth exponentially and tries again when the initial guess is not deep\nenough.\n\nArgs:\nremote: The location of the remote repository, in a format that the\ngit command will understand.\nactual_branch: A remote branch or ref to fetch,\ncompare_branch: Another remote branch or ref to fetch,\nverbose: When set, more progress output is produced.\n\nReturns:\nA ComparableCommits containing the commit id of the actual branch and\na the id of a commit to compare against (e.g. for when doing incremental\nchecks).", "source": "codesearchnet"}
{"code": "def get(self, webfont_name, webfont_settings):\n        \n        try:\n            webfont_settings = extend_webfont_settings(webfont_settings)\n        except IcomoonSettingsError as e:\n            msg = \"Invalid webfont settings for '{}': {}\"\n            self.errors[webfont_name] = msg.format(webfont_name, e.value)\n            return\n\n        filepath = os.path.join(webfont_settings['fontdir_path'],\n                                self.manifest_filename)\n\n        if os.path.exists(filepath):\n            self.manifests[webfont_name] = self.parse_manifest(filepath)\n        else:\n            msg = (\n                   )\n            self.errors[webfont_name] = msg.format(name=webfont_name,\n                                                   filepath=filepath)", "docstring": "Get a manifest file, parse and store it.\n\nArgs:\nwebfont_name (string): Webfont key name. Used to store manifest\nand potentially its parser error.\nwebfont_settings (dict): Webfont settings (an item value from\n``settings.ICOMOON_WEBFONTS``).", "source": "juraj-google-style"}
{"code": "def __init__(self, features, location_id=None, metadata=None, timeout=120):\n    super().__init__(features=features, location_id=location_id, metadata=metadata, timeout=timeout)", "docstring": "Args:\nfeatures: (List[``videointelligence_v1.Feature``]) Required.\nthe Video Intelligence API features to detect\nlocation_id: (str) Optional.\nCloud region where annotation should take place.\nIf no region is specified, a region will be determined\nbased on video file location.\nmetadata: (Sequence[Tuple[str, str]]) Optional.\nAdditional metadata that is provided to the method.\ntimeout: (int) Optional.\nThe time in seconds to wait for the response from the\nVideo Intelligence API", "source": "github-repos"}
{"code": "def set_colors(self, fg=None, bg=None):\n        \n        if fg is not None:\n            self._fg = _format_color(fg, self._fg)\n        if bg is not None:\n            self._bg = _format_color(bg, self._bg)", "docstring": "Sets the colors to be used with the L{print_str} and draw_* methods.\n\nValues of None will only leave the current values unchanged.\n\nArgs:\nfg (Optional[Union[Tuple[int, int, int], int, Ellipsis]])\nbg (Optional[Union[Tuple[int, int, int], int, Ellipsis]])\n.. seealso:: :any:`move`, :any:`print_str`", "source": "juraj-google-style"}
{"code": "def __init__(self, filename, temporary_directory=None):\n    \n    self._database = None\n    self._filename = filename\n    self._is_open = False\n    self._temp_db_file_path = ''\n    self._temporary_directory = temporary_directory\n    self._temp_wal_file_path = ''\n\n    self.schema = {}", "docstring": "Initializes the database object.\n\nArgs:\nfilename (str): name of the file entry.\ntemporary_directory (Optional[str]): path of the directory for temporary\nfiles.", "source": "juraj-google-style"}
{"code": "def and_terms(*args):\n    \n    args = [arg if not isinstance(arg, list) else ' '.join(arg) for arg in args]\n    return '({0})'.format(' '.join(args))", "docstring": "Connect given term strings or list(s) of term strings with an AND operator for querying.\n\nArgs:\nAn arbitrary number of either strings or lists of strings representing query terms.\n\nReturns\nA query string consisting of argument terms and'ed together.", "source": "juraj-google-style"}
{"code": "def Proxy(self, status, headers, exc_info=None):\n    self.call_context['status'] = status\n    self.call_context['headers'] = headers\n    self.call_context['exc_info'] = exc_info\n    return self.body_buffer.write", "docstring": "Save args, defer start_response until response body is parsed.\n\nCreate output buffer for body to be written into.\nNote: this is not quite WSGI compliant: The body should come back as an\niterator returned from calling service_app() but instead, StartResponse\nreturns a writer that will be later called to output the body.\nSee google/appengine/ext/webapp/__init__.py::Response.wsgi_write()\nwrite = start_response('%d %s' % self.__status, self.__wsgi_headers)\nwrite(body)\n\nArgs:\nstatus: Http status to be sent with this response\nheaders: Http headers to be sent with this response\nexc_info: Exception info to be displayed for this response\nReturns:\ncallable that takes as an argument the body content", "source": "codesearchnet"}
{"code": "def readlink(self, path):\n        \n        if path is None:\n            raise TypeError\n        try:\n            link_obj = self.lresolve(path)\n        except IOError as exc:\n            self.raise_os_error(exc.errno, path)\n        if S_IFMT(link_obj.st_mode) != S_IFLNK:\n            self.raise_os_error(errno.EINVAL, path)\n\n        if self.ends_with_path_separator(path):\n            if not self.is_windows_fs and self.exists(path):\n                self.raise_os_error(errno.EINVAL, path)\n            if not self.exists(link_obj.path):\n                if self.is_windows_fs:\n                    error = errno.EINVAL\n                elif self._is_circular_link(link_obj):\n                    if self.is_macos:\n                        return link_obj.path\n                    error = errno.ELOOP\n                else:\n                    error = errno.ENOENT\n                self.raise_os_error(error, link_obj.path)\n\n        return link_obj.contents", "docstring": "Read the target of a symlink.\n\nArgs:\npath:  symlink to read the target of.\n\nReturns:\nthe string representing the path to which the symbolic link points.\n\nRaises:\nTypeError: if path is None\nOSError: (with errno=ENOENT) if path is not a valid path, or\n(with errno=EINVAL) if path is valid, but is not a symlink,\nor if the path ends with a path separator (Posix only)", "source": "juraj-google-style"}
{"code": "def write_dftbp(filename, atoms):\n    scale_pos = dftbpToBohr\n    lines = ''\n    natoms = atoms.get_number_of_atoms()\n    lines += str(natoms)\n    lines += ' S \\n'\n    expaned_symbols = atoms.get_chemical_symbols()\n    symbols = get_reduced_symbols(expaned_symbols)\n    lines += (' '.join(symbols) + '\\n')\n    atom_numbers = []\n    for ss in expaned_symbols:\n        atom_numbers.append((symbols.index(ss) + 1))\n    positions = (atoms.get_positions() / scale_pos)\n    for ii in range(natoms):\n        pos = positions[ii]\n        pos_str = '{:3d} {:3d} {:20.15f} {:20.15f} {:20.15f}\\n'.format((ii + 1), atom_numbers[ii], pos[0], pos[1], pos[2])\n        lines += pos_str\n    lines += '0.0 0.0 0.0\\n'\n    cell = (atoms.get_cell() / scale_pos)\n    for ii in range(3):\n        cell_str = '{:20.15f} {:20.15f} {:20.15f}\\n'.format(cell[ii][0], cell[ii][1], cell[ii][2])\n        lines += cell_str\n    outfile = open(filename, 'w')\n    outfile.write(lines)", "docstring": "Writes DFTB+ readable, gen-formatted structure files\n\nArgs:\nfilename: name of the gen-file to be written\natoms: object containing information about structure", "source": "codesearchnet"}
{"code": "def get_max_position(self, chrom):\n        \n        res = self.db.variant.find({'chrom':chrom}, {'_id':0, 'end':1}).sort([('end', DESCENDING)]).limit(1)\n        end = 0\n        for variant in res:\n            end = variant['end']\n        return end", "docstring": "Get the last position observed on a chromosome in the database\n\nArgs:\nchrom(str)\n\nReturns:\nend(int): The largest end position found", "source": "juraj-google-style"}
{"code": "def banner(text, border='=', width=80):\n    \n    text_padding = '{0:^%d}' % (width)\n    LOG.info(border * width)\n    LOG.info(text_padding.format(text))\n    LOG.info(border * width)", "docstring": "Center _text_ in a banner _width_ wide with _border_ characters.\n\nArgs:\ntext (str): What to write in the banner\nborder (str): Border character\nwidth (int): How long the border should be", "source": "juraj-google-style"}
{"code": "def load_hpo_terms(adapter, hpo_lines=None, hpo_gene_lines=None, alias_genes=None):\n    \n    \n    \n    hpo_terms = {}\n    \n    \n    if not hpo_lines:\n        hpo_lines = fetch_hpo_terms()\n    \n    \n    if not hpo_gene_lines:\n        hpo_gene_lines = fetch_hpo_to_genes()\n\n    \n    \n    LOG.info(\"Parsing hpo terms\")\n    for term in parse_hpo_obo(hpo_lines):\n        hpo_terms[term['hpo_id']] = term\n    \n    \n    if not alias_genes:\n        alias_genes = adapter.genes_by_alias()\n\n    LOG.info(\"Adding gene information to hpo terms ...\")\n    for hpo_to_symbol in parse_hpo_to_genes(hpo_gene_lines):\n        hgnc_symbol = hpo_to_symbol['hgnc_symbol']\n        hpo_id = hpo_to_symbol['hpo_id']\n        \n        \n        gene_info = alias_genes.get(hgnc_symbol)\n        if not gene_info:\n            continue\n\n        hgnc_id = gene_info['true']\n\n        if hpo_id not in hpo_terms:\n            continue\n\n        hpo_term = hpo_terms[hpo_id]\n\n        if not 'genes' in hpo_term:\n            hpo_term['genes'] = set()\n\n        hpo_term['genes'].add(hgnc_id)\n\n    start_time = datetime.now()\n\n    LOG.info(\"Loading the hpo terms...\")\n    nr_terms = len(hpo_terms)\n    hpo_bulk = []\n    with progressbar(hpo_terms.values(), label=\"Loading hpo terms\", length=nr_terms) as bar:\n        \n        for hpo_info in bar:\n            hpo_bulk.append(build_hpo_term(hpo_info))\n        \n        if len(hpo_bulk) > 10000:\n            adapter.load_hpo_bulk(hpo_bulk)\n            hpo_bulk = []\n    \n    if hpo_bulk:\n        adapter.load_hpo_bulk(hpo_bulk)\n    \n    LOG.info(\"Loading done. Nr of terms loaded {0}\".format(nr_terms))\n    LOG.info(\"Time to load terms: {0}\".format(datetime.now() - start_time))", "docstring": "Load the hpo terms into the database\n\nParse the hpo lines, build the objects and add them to the database\n\nArgs:\nadapter(MongoAdapter)\nhpo_lines(iterable(str))\nhpo_gene_lines(iterable(str))", "source": "juraj-google-style"}
{"code": "def form_out(self, _form=None):\n        \n        _form = _form or self.object_form\n        self.output['forms'] = _form.serialize()\n        self._add_meta_props(_form)\n        self.output['forms']['grouping'] = _form.Meta.grouping\n        self.output['forms']['constraints'] = _form.Meta.constraints\n        self._patch_form(self.output['forms'])\n        self.set_client_cmd('form')", "docstring": "Renders form. Applies form modifiers, then writes\nresult to response payload. If supplied, given form\nobject instance will be used instead of view's\ndefault ObjectForm.\n\nArgs:\n_form (:py:attr:`~zengine.forms.json_form.JsonForm`):\nForm object to override `self.object_form`", "source": "juraj-google-style"}
{"code": "def call(self, inputs):\n    \n    image_shape = tf.shape(input=inputs)[-3:]\n    collapsed_shape = tf.concat(([-1], image_shape), axis=0)\n    out = tf.reshape(inputs, collapsed_shape)  \n    out = self.conv1(out)\n    out = self.conv2(out)\n    out = self.conv3(out)\n    out = self.conv4(out)\n    expanded_shape = tf.concat((tf.shape(input=inputs)[:-3], [-1]), axis=0)\n    return tf.reshape(out, expanded_shape)", "docstring": "Runs the model to generate an intermediate representation of x_t.\n\nArgs:\ninputs: A batch of image sequences `x_{1:T}` of shape\n`[sample_shape, batch_size, timesteps, height, width,\nchannels]`.\n\nReturns:\nA batch of intermediate representations of shape [sample_shape,\nbatch_size, timesteps, hidden_size].", "source": "juraj-google-style"}
{"code": "def minutes(start, end=None):\n    \n    return iterate.between(start, datetime.timedelta(minutes=1), end)", "docstring": "Iterate over the minutes between the given datetime_tzs.\n\nArgs:\nstart: datetime_tz to start from.\nend: (Optional) Date to end at, if not given the iterator will never\nterminate.\n\nReturns:\nAn iterator which generates datetime_tz objects a minute apart.", "source": "juraj-google-style"}
{"code": "def copy_and_move_messages(from_channel, to_channel):\n        \n        with BlockSave(Message, query_dict={'channel_id': to_channel.key}):\n            for message in Message.objects.filter(channel=from_channel, typ=15):\n                message.key = ''\n                message.channel = to_channel\n                message.save()", "docstring": "While splitting channel and moving chosen subscribers to new channel,\nold channel's messages are copied and moved to new channel.\n\nArgs:\nfrom_channel (Channel object): move messages from channel\nto_channel (Channel object): move messages to channel", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: Optional[torch.FloatTensor], attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple[torch.Tensor, Tuple[torch.Tensor]], Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]]]:\n    output_attentions = False\n    bsz, q_len, hidden_size = hidden_states.size()\n    query_states, router_logits, topo_info = self.experts.map(hidden_states)\n    key_states, value_states = self.kv_proj(hidden_states).chunk(2, dim=-1)\n    query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)\n    key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)\n    value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)\n    cos, sin = self.rotary_emb(value_states, position_ids)\n    query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)\n    if past_key_value is not None:\n        cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}\n        key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)\n    key_states = key_states.repeat(1, self.top_k, 1, 1)\n    value_states = value_states.repeat(1, self.top_k, 1, 1)\n    query_states = query_states.transpose(1, 2)\n    key_states = key_states.transpose(1, 2)\n    value_states = value_states.transpose(1, 2)\n    dropout_rate = self.attention_dropout if self.training else 0.0\n    input_dtype = query_states.dtype\n    if input_dtype == torch.float32:\n        if torch.is_autocast_enabled():\n            target_dtype = torch.get_autocast_gpu_dtype()\n        elif hasattr(self.config, '_pre_quantization_dtype'):\n            target_dtype = self.config._pre_quantization_dtype\n        else:\n            target_dtype = self.kv_proj.weight.dtype\n        logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.')\n        query_states = query_states.to(target_dtype)\n        key_states = key_states.to(target_dtype)\n        value_states = value_states.to(target_dtype)\n    attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal).to(input_dtype)\n    attn_output = attn_output.reshape(bsz, q_len, self.top_k, self.kv_projection_size)\n    attn_output = self.experts.reduce(attn_output, topo_info)\n    attn_output = attn_output.view(bsz, q_len, hidden_size)\n    if not output_attentions:\n        attn_weights = None\n    return (attn_output, attn_weights, past_key_value, router_logits)", "docstring": "Forward pass of the JetMoeAttention module.\n\nArgs:\nhidden_states (Optional[torch.FloatTensor]): Input hidden states.\nattention_mask (Optional[torch.FloatTensor]): Attention mask.\nlayer_past (Optional[Tuple[torch.Tensor]]): Past layer state.\nuse_cache (Optional[bool]): Whether to use cached states.\noutput_attentions (Optional[bool]): Whether to output attention weights.\ncache_position (Optional[torch.LongTensor]): Position of the cache.\n\nReturns:\nUnion[Tuple[torch.Tensor, Tuple[torch.Tensor]], Optional[Tuple[...]]]: Tuple containing outputs.", "source": "github-repos"}
{"code": "def Read(f):\n    try:\n        yaml_data = yaml.load(f)\n    except yaml.YAMLError as e:\n        raise ParseError(('%s' % e))\n    except IOError as e:\n        raise YAMLLoadError(('%s' % e))\n    _CheckData(yaml_data)\n    try:\n        return Config(yaml_data.get('blacklist', ()), yaml_data.get('whitelist', '*'))\n    except UnicodeDecodeError as e:\n        raise YAMLLoadError(('%s' % e))", "docstring": "Reads and returns Config data from a yaml file.\n\nArgs:\nf: Yaml file to parse.\n\nReturns:\nConfig object as defined in this file.\n\nRaises:\nError (some subclass): If there is a problem loading or parsing the file.", "source": "codesearchnet"}
{"code": "def verify(self, obj):\n        \n\n        if obj is not None:\n            raise ValidationError(\"Object is not None\",\n                                  reason='%s is not None' % str(obj), object=obj)\n\n        return obj", "docstring": "Verify that the object conforms to this verifier's schema\n\nArgs:\nobj (object): A python object to verify\n\nRaises:\nValidationError: If there is a problem verifying the dictionary, a\nValidationError is thrown with at least the reason key set indicating\nthe reason for the lack of validation.", "source": "juraj-google-style"}
{"code": "def collect_per_output_metric_info(metrics, output_names, output_shapes, loss_fns, from_serialized=False, is_weighted=False):\n    if not metrics:\n        return [{} for _ in output_names]\n    if isinstance(metrics, list):\n        any_sub_list = any((isinstance(m, list) for m in metrics))\n        if any_sub_list:\n            if len(metrics) != len(output_names):\n                raise ValueError('When passing a list of lists as `metrics`, it should have one entry per model output. The model has ' + str(len(output_names)) + ' outputs, but you passed metrics=' + str(metrics))\n            nested_metrics = [generic_utils.to_list(m) for m in metrics]\n        elif len(output_names) > 1:\n            nested_metrics = []\n            for _ in output_names:\n                nested_metrics.append([metrics_module.clone_metric(m) for m in metrics])\n        else:\n            nested_metrics = [metrics]\n    elif isinstance(metrics, collections.abc.Mapping):\n        generic_utils.check_for_unexpected_keys('metrics', metrics, output_names)\n        nested_metrics = []\n        for name in output_names:\n            output_metrics = generic_utils.to_list(metrics.get(name, []))\n            nested_metrics.append(output_metrics)\n    else:\n        raise TypeError('Type of `metrics` argument not understood. Expected a list or dictionary, found: ' + str(metrics))\n    per_output_metrics = []\n    for i, metrics in enumerate(nested_metrics):\n        metrics_dict = collections.OrderedDict()\n        for metric in metrics:\n            metric_name = get_metric_name(metric, is_weighted)\n            metric_fn = get_metric_function(metric, output_shape=output_shapes[i], loss_fn=loss_fns[i])\n            metric_fn._from_serialized = from_serialized\n            if not isinstance(metric_fn, metrics_module.Metric):\n                metric_fn = metrics_module.MeanMetricWrapper(metric_fn, name=metric_name)\n                metric_fn._from_serialized = False\n            metrics_dict[metric_name] = metric_fn\n        per_output_metrics.append(metrics_dict)\n    return per_output_metrics", "docstring": "Maps metric names and functions to model outputs.\n\nArgs:\nmetrics: a list or a list of lists or a dict of metric functions.\noutput_names: a list of the names (strings) of model outputs.\noutput_shapes: a list of the shapes (strings) of model outputs.\nloss_fns: a list of the loss functions corresponding to the model outputs.\nfrom_serialized: whether the model the metrics are being sourced from is\nbeing initialized from a serialized format.\nis_weighted: Boolean indicating whether the given metrics are weighted.\n\nReturns:\nA list (one entry per model output) of dicts.\nFor instance, if the model has 2 outputs, and for the first output\nwe want to compute \"binary_accuracy\" and \"binary_crossentropy\",\nand just \"binary_accuracy\" for the second output,\nthe list would look like: `[{\n'acc': binary_accuracy(),\n'ce': binary_crossentropy(),\n}, {\n'acc': binary_accuracy(),\n}]`\n\nRaises:\nTypeError: if an incorrect type is passed for the `metrics` argument.", "source": "github-repos"}
{"code": "def experimental_design(self) -> Any:\n    if (not self.samples):\n        raise ValueError('No samples in sample sheet')\n    markdown = tabulate([[getattr(s, h, '') for h in DESIGN_HEADER] for s in self.samples], headers=DESIGN_HEADER, tablefmt='pipe')\n    return maybe_render_markdown(markdown)", "docstring": "Return a markdown summary of the samples on this sample sheet.\n\nThis property supports displaying rendered markdown only when running\nwithin an IPython interpreter. If we are not running in an IPython\ninterpreter, then print out a nicely formatted ASCII table.\n\nReturns:\nMarkdown, str: A visual table of IDs and names for all samples.", "source": "codesearchnet"}
{"code": "def GenerateModelReport(metagraph, assume_valid_feeds=True, debug=False):\n    return tf_wrap.GenerateModelReport(metagraph.SerializeToString(), assume_valid_feeds, debug)", "docstring": "Report what's known statically about each node in the provided metagraph.\n\nArgs:\nmetagraph: A TensorFlow MetaGraphDef.\nassume_valid_feeds: If True, assume that the shape of the fed nodes is valid\ndebug: Add some information useful for debugging.\n\nReturns:\nA string containing the report.", "source": "github-repos"}
{"code": "def Parse(self, stat, file_object, knowledge_base):\n    (_, _) = (stat, knowledge_base)\n    lines = [l.strip() for l in utils.ReadFileBytesAsUnicode(file_object).splitlines()]\n    return self.ParseLines(lines)", "docstring": "Parse the netgroup file and return User objects.\n\nLines are of the form:\ngroup1 (-,user1,) (-,user2,) (-,user3,)\n\nGroups are ignored, we return users in lines that match the filter regexes,\nor all users in the file if no filters are specified.\n\nWe assume usernames are in the default regex format specified in the adduser\nman page.  Notably no non-ASCII characters.\n\nArgs:\nstat: unused statentry\nfile_object: netgroup VFSFile\nknowledge_base: unused\n\nReturns:\nrdf_client.User", "source": "codesearchnet"}
{"code": "def __delitem__(self, name: str) -> None:\n    if base.treats_as_sealed(self):\n        raise base.WritePermissionError('Cannot del item from a sealed Dict.')\n    if not base.writtable_via_accessors(self):\n        raise base.WritePermissionError(self._error_message(\"Cannot del Dict field by attribute or key while accessor_writable is set to False. Use 'rebind' method instead.\"))\n    if name not in self:\n        raise KeyError(self._error_message(f'Key does not exist in Dict: {name!r}.'))\n    update = self._set_item_without_permission_check(name, pg_typing.MISSING_VALUE)\n    if flags.is_change_notification_enabled() and update:\n        self._notify_field_updates([update])", "docstring": "Delete a key from the Dict.\n\nThis is used to delete a key which resolves to a pg.typing.NonConstKey.\n\nArgs:\nname: Key to delete.\n\nRaises:\nWritePermissionError: When Dict is sealed.\nKeyError: When key is not a NonConstKey.", "source": "github-repos"}
{"code": "def make_simulated_env_fn(**env_kwargs):\n\n    def env_fn(in_graph):\n        class_ = (SimulatedBatchEnv if in_graph else SimulatedBatchGymEnv)\n        return class_(**env_kwargs)\n    return env_fn", "docstring": "Returns a function creating a simulated env, in or out of graph.\n\nArgs:\n**env_kwargs: kwargs to pass to the simulated env constructor.\n\nReturns:\nFunction in_graph -> env.", "source": "codesearchnet"}
{"code": "def segment_sum(data, segment_ids, num_segments=None, sorted=False):\n    _segment_reduce_validation(data, segment_ids)\n    if any_symbolic_tensors((data,)):\n        return SegmentSum(num_segments, sorted).symbolic_call(data, segment_ids)\n    return backend.math.segment_sum(data, segment_ids, num_segments=num_segments, sorted=sorted)", "docstring": "Computes the sum of segments in a tensor.\n\nArgs:\ndata: Input tensor.\nsegment_ids: A N-D tensor containing segment indices for each\nelement in `data`. Num dims for segment ids should be strictly\nsmaller or equal to number of dims in data.\nnum_segments: An integer representing the total number of\nsegments. If not specified, it is inferred from the maximum\nvalue in `segment_ids`.\nsorted: A boolean indicating whether `segment_ids` is sorted.\nDefaults to `False`.\n\nReturns:\nA tensor containing the sum of segments, where each element\nrepresents the sum of the corresponding segment in `data`.\n\nExample:\n\n>>> data = keras.ops.convert_to_tensor([1, 2, 10, 20, 100, 200])\n>>> segment_ids = keras.ops.convert_to_tensor([0, 0, 1, 1, 2, 2])\n>>> num_segments = 3\n>>> keras.ops.segment_sum(data, segment_ids,num_segments)\narray([3, 30, 300], dtype=int32)", "source": "github-repos"}
{"code": "def to_representation(self, instance):\n        \n        request = self.context['request']\n        enterprise_customer = instance.enterprise_customer\n\n        representation = super(EnterpriseCustomerCatalogDetailSerializer, self).to_representation(instance)\n\n        \n        paginated_content = instance.get_paginated_content(request.GET)\n        count = paginated_content['count']\n        search_results = paginated_content['results']\n\n        for item in search_results:\n            content_type = item['content_type']\n            marketing_url = item.get('marketing_url')\n            if marketing_url:\n                item['marketing_url'] = utils.update_query_parameters(\n                    marketing_url, utils.get_enterprise_utm_context(enterprise_customer)\n                )\n            \n            if content_type == 'course':\n                item['enrollment_url'] = instance.get_course_enrollment_url(item['key'])\n            if content_type == 'courserun':\n                item['enrollment_url'] = instance.get_course_run_enrollment_url(item['key'])\n            if content_type == 'program':\n                item['enrollment_url'] = instance.get_program_enrollment_url(item['uuid'])\n\n        \n        previous_url = None\n        next_url = None\n        page = int(request.GET.get('page', '1'))\n        request_uri = request.build_absolute_uri()\n        if paginated_content['previous']:\n            previous_url = utils.update_query_parameters(request_uri, {'page': page - 1})\n        if paginated_content['next']:\n            next_url = utils.update_query_parameters(request_uri, {'page': page + 1})\n\n        representation['count'] = count\n        representation['previous'] = previous_url\n        representation['next'] = next_url\n        representation['results'] = search_results\n\n        return representation", "docstring": "Serialize the EnterpriseCustomerCatalog object.\n\nArguments:\ninstance (EnterpriseCustomerCatalog): The EnterpriseCustomerCatalog to serialize.\n\nReturns:\ndict: The EnterpriseCustomerCatalog converted to a dict.", "source": "juraj-google-style"}
{"code": "def pandas(self):\n    (names, prior, posterior) = ([], [], [])\n    for (iname, name) in enumerate(self.posterior_parameter.row_names):\n        names.append(name)\n        posterior.append(np.sqrt(float(self.posterior_parameter[(iname, iname)].x)))\n        iprior = self.parcov.row_names.index(name)\n        prior.append(np.sqrt(float(self.parcov[(iprior, iprior)].x)))\n    for (pred_name, pred_var) in self.posterior_prediction.items():\n        names.append(pred_name)\n        posterior.append(np.sqrt(pred_var))\n        prior.append(self.prior_prediction[pred_name])\n    return pd.DataFrame({'posterior': posterior, 'prior': prior}, index=names)", "docstring": "get a pandas dataframe of prior and posterior for all predictions\n\nReturns:\npandas.DataFrame : pandas.DataFrame\na dataframe with prior and posterior uncertainty estimates\nfor all forecasts (predictions)", "source": "codesearchnet"}
{"code": "def to_array(data):\n    try:\n        numpy_data = blosc.unpack_array(data)\n    except Exception as e:\n        raise ValueError('Could not load numpy data. {}'.format(e))\n    return numpy_data", "docstring": "Import a blosc array into a numpy array.\n\nArguments:\ndata: A blosc packed numpy array\n\nReturns:\nA numpy array with data from a blosc compressed array", "source": "codesearchnet"}
{"code": "def __init__(self, x, offset, dim, wrap, name=None):\n    \n    super(ShiftOperation, self).__init__([x], name=name or \"shift\")\n    self._dim = dim\n    self._axis = x.shape.dims.index(dim)\n    self._offset = offset\n    self._wrap = wrap\n    self._outputs = [Tensor(self, x.shape, x.dtype)]", "docstring": "Create a shift operation.\n\nShift x right by +offset in dimension dim.\nIf offset is negative, shift left.\nIf wrap is true then wrap-around.  Else, pad with zeros.\n\nArgs:\nx: a Tensor\noffset: an integer\ndim: a Dimension of x\nwrap: a boolean - whether to wrap or pad.\nname: an optional string", "source": "juraj-google-style"}
{"code": "def fastcc_is_consistent(model, epsilon, solver):\n    \n    for reaction in fastcc(model, epsilon, solver):\n        return False\n    return True", "docstring": "Quickly check whether model is consistent\n\nReturn true if the model is consistent. If it is only necessary to know\nwhether a model is consistent, this function is fast as it will return\nthe result as soon as it finds a single inconsistent reaction.\n\nArgs:\nmodel: :class:`MetabolicModel` to solve.\nepsilon: Flux threshold value.\nsolver: LP solver instance to use.", "source": "juraj-google-style"}
{"code": "def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n    self.input_dtype = hidden_states.dtype\n    hidden_states = hidden_states.to(self.dtype)\n    if self.training and self.jitter_noise > 0:\n        hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)\n    self._cast_classifier()\n    router_logits = self.classifier(hidden_states)\n    router_probabilities = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(self.input_dtype)\n    return (router_probabilities, router_logits)", "docstring": "Computes router probabilities from input hidden states.\n\nArgs:\nhidden_states (`torch.Tensor`):\n(batch_size, sequence_length, hidden_dim) from which router probabilities are computed.\nReturns:\nrouter_probabilities (`torch.Tensor`):\nTensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each\ntoken and expert. Used for routing tokens to experts.\nrouter_logits (`torch.Tensor`):\nLogits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits.\nThis is used later for computing router z-loss.", "source": "github-repos"}
{"code": "def cancel_job(self, job_id=None, job_name=None):\n        \n        payload = {}\n        if job_name is not None:\n            payload['job_name'] = job_name\n        if job_id is not None:\n            payload['job_id'] = job_id\n\n        jobs_url = self._get_url('jobs_path')\n        res = self.rest_client.session.delete(jobs_url, params=payload)\n        _handle_http_errors(res)\n        return res.json()", "docstring": "Cancel a running job.\n\nArgs:\njob_id (str, optional): Identifier of job to be canceled.\njob_name (str, optional): Name of job to be canceled.\n\nReturns:\ndict: JSON response for the job cancel operation.", "source": "juraj-google-style"}
{"code": "def __init__(self, primitive_handler_: primitive_handler.PrimitiveHandler, default_timezone: str) -> None:\n    self.primitive_handler = primitive_handler_\n    self.default_timezone = default_timezone\n    self._resource_type_mapping = {field.message_type.name: field for field in primitive_handler_.contained_resource_cls.DESCRIPTOR.fields}", "docstring": "Initializes an instance of the FHIR JSON parser.\n\nNote that this is for *internal-use* only. External clients should leverage\none of the available class constructors, such as:\n`JsonParser.json_parser_with_default_timezone(...)`.\n\nArgs:\nprimitive_handler_: Responsible for returning PrimitiveWrappers.\ndefault_timezone: The string representation of the timezone to default-to\nwhen parsing time-like values.", "source": "github-repos"}
{"code": "def apply(self, flag_set: AbstractSet[Flag], operand: AbstractSet[Flag]) -> FrozenSet[Flag]:\n    if (self == FlagOp.ADD):\n        return frozenset((flag_set | operand))\n    elif (self == FlagOp.DELETE):\n        return frozenset((flag_set - operand))\n    else:\n        return frozenset(operand)", "docstring": "Apply the flag operation on the two sets, returning the result.\n\nArgs:\nflag_set: The flag set being operated on.\noperand: The flags to use as the operand.", "source": "codesearchnet"}
{"code": "def DumpMany(objs):\n    precondition.AssertIterableType(objs, object)\n    text = yaml.safe_dump_all(objs, default_flow_style=False, allow_unicode=True)\n    if compatibility.PY2:\n        text = text.decode('utf-8')\n    return text", "docstring": "Stringifies a sequence of Python objects to a multi-document YAML.\n\nArgs:\nobjs: An iterable of Python objects to convert to YAML.\n\nReturns:\nA multi-document YAML representation of the given objects.", "source": "codesearchnet"}
{"code": "def fetch(self, refund_id, data={}, **kwargs):\n        \n        return super(Refund, self).fetch(refund_id, data, **kwargs)", "docstring": "Refund object for given paymnet Id\n\nArgs:\nrefund_id : Refund Id for which refund has to be retrieved\n\nReturns:\nRefund dict for given refund Id", "source": "juraj-google-style"}
{"code": "def __init__(\n        self,\n        cls,\n        diff,\n    ):\n        \n        msg = \"\\n\".join([\n            \"\", \n            \"ctor: {}\".format(cls),\n            \"extras: {}\".format(diff)\n        ])\n        Exception.__init__(self, msg)\n        self.type = str(\n            type(self),\n        )\n        self.cls = str(cls)\n        self.diff = str(diff)\n        self.type = self.__class__.__name__", "docstring": "Note that type_assert can't be used because it would\ncreate a circular dependency.\n\nArgs:\ncls,  type, The type that was attempted to unmarshal into\ndiff: dict, The extra arguments that were passed to @cls", "source": "juraj-google-style"}
{"code": "def imread(img_or_path, flag='color'):\n    if isinstance(img_or_path, np.ndarray):\n        return img_or_path\n    elif is_str(img_or_path):\n        flag = (imread_flags[flag] if is_str(flag) else flag)\n        check_file_exist(img_or_path, 'img file does not exist: {}'.format(img_or_path))\n        return cv2.imread(img_or_path, flag)\n    else:\n        raise TypeError('\"img\" must be a numpy array or a filename')", "docstring": "Read an image.\n\nArgs:\nimg_or_path (ndarray or str): Either a numpy array or image path.\nIf it is a numpy array (loaded image), then it will be returned\nas is.\nflag (str): Flags specifying the color type of a loaded image,\ncandidates are `color`, `grayscale` and `unchanged`.\n\nReturns:\nndarray: Loaded image array.", "source": "codesearchnet"}
{"code": "def get(self, language: str=None, default: str=None) -> str:\n        \n\n        language = language or settings.LANGUAGE_CODE\n        value = super().get(language, default)\n        return value if value is not None else default", "docstring": "Gets the underlying value in the specified or\nprimary language.\n\nArguments:\nlanguage:\nThe language to get the value in.\n\nReturns:\nThe value in the current language, or\nthe primary language in case no language\nwas specified.", "source": "juraj-google-style"}
{"code": "def to_dataframe(self):\n    data = []\n    for (target_index, target_row) in enumerate(self._cm):\n        for (predicted_index, count) in enumerate(target_row):\n            data.append((self._labels[target_index], self._labels[predicted_index], count))\n    return pd.DataFrame(data, columns=['target', 'predicted', 'count'])", "docstring": "Convert the confusion matrix to a dataframe.\n\nReturns:\nA DataFrame with \"target\", \"predicted\", \"count\" columns.", "source": "codesearchnet"}
{"code": "def smart_device_selection(preferred_device_type=None):\n    cl_environments = CLEnvironmentFactory.all_devices(cl_device_type=preferred_device_type)\n    platform_names = [env.platform.name for env in cl_environments]\n    has_amd_pro_platform = any((('AMD Accelerated Parallel Processing' in name) for name in platform_names))\n    if has_amd_pro_platform:\n        return list(filter((lambda env: ('Clover' not in env.platform.name)), cl_environments))\n    if ((preferred_device_type is not None) and (not len(cl_environments))):\n        return CLEnvironmentFactory.all_devices()\n    return cl_environments", "docstring": "Get a list of device environments that is suitable for use in MOT.\n\nBasically this gets the total list of devices using all_devices() and applies a filter on it.\n\nThis filter does the following:\n1) if the 'AMD Accelerated Parallel Processing' is available remove all environments using the 'Clover'\nplatform.\n\nMore things may be implemented in the future.\n\nArgs:\npreferred_device_type (str): the preferred device type, one of 'CPU', 'GPU' or 'APU'.\nIf no devices of this type can be found, we will use any other device available.\n\nReturns:\nlist of CLEnvironment: List with the CL device environments.", "source": "codesearchnet"}
{"code": "def _decrypt_asymmetric(self, decryption_algorithm, decryption_key, cipher_text, padding_method, hashing_algorithm=None):\n    if (decryption_algorithm == enums.CryptographicAlgorithm.RSA):\n        if (padding_method == enums.PaddingMethod.OAEP):\n            hash_algorithm = self._encryption_hash_algorithms.get(hashing_algorithm)\n            if (hash_algorithm is None):\n                raise exceptions.InvalidField(\"The hashing algorithm '{0}' is not supported for asymmetric decryption.\".format(hashing_algorithm))\n            padding_method = asymmetric_padding.OAEP(mgf=asymmetric_padding.MGF1(algorithm=hash_algorithm()), algorithm=hash_algorithm(), label=None)\n        elif (padding_method == enums.PaddingMethod.PKCS1v15):\n            padding_method = asymmetric_padding.PKCS1v15()\n        else:\n            raise exceptions.InvalidField(\"The padding method '{0}' is not supported for asymmetric decryption.\".format(padding_method))\n        backend = default_backend()\n        try:\n            private_key = backend.load_der_private_key(decryption_key, None)\n        except Exception:\n            try:\n                private_key = backend.load_pem_private_key(decryption_key, None)\n            except Exception:\n                raise exceptions.CryptographicFailure('The private key bytes could not be loaded.')\n        plain_text = private_key.decrypt(cipher_text, padding_method)\n        return plain_text\n    else:\n        raise exceptions.InvalidField(\"The cryptographic algorithm '{0}' is not supported for asymmetric decryption.\".format(decryption_algorithm))", "docstring": "Encrypt data using asymmetric decryption.\n\nArgs:\ndecryption_algorithm (CryptographicAlgorithm): An enumeration\nspecifying the asymmetric decryption algorithm to use for\ndecryption. Required.\ndecryption_key (bytes): The bytes of the private key to use for\ndecryption. Required.\ncipher_text (bytes): The bytes to be decrypted. Required.\npadding_method (PaddingMethod): An enumeration specifying the\npadding method to use with the asymmetric decryption\nalgorithm. Required.\nhashing_algorithm (HashingAlgorithm): An enumeration specifying\nthe hashing algorithm to use with the decryption padding\nmethod. Required, if the padding method is OAEP. Optional\notherwise, defaults to None.\n\nReturns:\ndict: A dictionary containing the decrypted data, with at least\nthe following key/value field:\n* plain_text - the bytes of the decrypted data\n\nRaises:\nInvalidField: Raised when the algorithm is unsupported or the\nlength is incompatible with the algorithm.\nCryptographicFailure: Raised when the key generation process\nfails.", "source": "codesearchnet"}
{"code": "def save_page(self, path=None):\n    path = _prepare_path(path, 'html')\n    with open(path, 'wb') as f:\n        f.write(encode_string(self.body))\n    return path", "docstring": "Save a snapshot of the page.\n\nIf invoked without arguments, it will save a file to :data:`capybara.save_path` and the\nfile will be given a randomly generated filename. If invoked with a relative path, the path\nwill be relative to :data:`capybara.save_path`.\n\nArgs:\npath (str, optional): The path to where it should be saved.\n\nReturns:\nstr: The path to which the file was saved.", "source": "codesearchnet"}
{"code": "def __init__(self, conf, conn=None):\n    super(HttpFilesSource, self).__init__(conf)\n    self._SetDefaults(conf)\n    if not conn:\n        conn = pycurl.Curl()\n        conn.setopt(pycurl.NOPROGRESS, 1)\n        conn.setopt(pycurl.NOSIGNAL, 1)\n        conn.setopt(pycurl.TIMEOUT, 60)\n        conn.setopt(pycurl.USERAGENT, 'nsscache')\n        if self.conf['http_proxy']:\n            conn.setopt(pycurl.PROXY, self.conf['http_proxy'])\n    self.conn = conn", "docstring": "Initialise the HTTP Data Source.\n\nArgs:\nconf: config.Config instance\nconn: pycurl Curl object", "source": "github-repos"}
{"code": "def _send_request(url_id, data=None, json=None, req_type=None):\n    url = (settings.SEEDER_INFO_URL % url_id)\n    if (not req_type):\n        req_type = requests.get\n    resp = req_type(url, data=data, json=json, timeout=settings.SEEDER_TIMEOUT, headers={'User-Agent': settings.USER_AGENT, 'Authorization': settings.SEEDER_TOKEN})\n    resp.raise_for_status()\n    data = resp.json()\n    return data", "docstring": "Send request to Seeder's API.\n\nArgs:\nurl_id (str): ID used as identification in Seeder.\ndata (obj, default None): Optional parameter for data.\njson (obj, default None): Optional parameter for JSON body.\nreq_type (fn, default None): Request method used to send/download the\ndata. If none, `requests.get` is used.\n\nReturns:\ndict: Data from Seeder.", "source": "codesearchnet"}
{"code": "def update_vm(access_token, subscription_id, resource_group, vm_name, body):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachines/', vm_name, '?api-version=', COMP_API])\n    return do_put(endpoint, body, access_token)", "docstring": "Update a virtual machine with a new JSON body. E.g. do a GET, change something, call this.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvm_name (str): Name of the virtual machine.\nbody (dict): JSON body of the VM.\n\nReturns:\nHTTP response.", "source": "codesearchnet"}
{"code": "def CheckTaskReadyForMerge(self, task):\n    \n    if self._storage_type != definitions.STORAGE_TYPE_SESSION:\n      raise IOError('Unsupported storage type.')\n\n    if not self._processed_task_storage_path:\n      raise IOError('Missing processed task storage path.')\n\n    processed_storage_file_path = self._GetProcessedStorageFilePath(task)\n\n    try:\n      stat_info = os.stat(processed_storage_file_path)\n    except (IOError, OSError):\n      return False\n\n    task.storage_file_size = stat_info.st_size\n    return True", "docstring": "Checks if a task is ready for merging with this session storage.\n\nIf the task is ready to be merged, this method also sets the task's\nstorage file size.\n\nArgs:\ntask (Task): task.\n\nReturns:\nbool: True if the task is ready to be merged.\n\nRaises:\nIOError: if the storage type is not supported or\nOSError: if the storage type is not supported or\nif the temporary path for the task storage does not exist.", "source": "juraj-google-style"}
{"code": "def update_connection_endpoint(self, connection_id, endpoint):\n        \n        if connection_id in self._connections:\n            connection_info = self._connections[connection_id]\n            self._connections[connection_id] = \\\n                ConnectionInfo(connection_info.connection_type,\n                               connection_info.connection,\n                               endpoint,\n                               connection_info.status,\n                               connection_info.public_key)\n\n        else:\n            LOGGER.debug(\"Could not update the endpoint %s for \"\n                         \"connection_id %s. The connection does not \"\n                         \"exist.\",\n                         endpoint,\n                         connection_id)", "docstring": "Adds the endpoint to the connection definition. When the\nconnection is created by the send/receive thread, we do not\nyet have the endpoint of the remote node. That is not known\nuntil we process the incoming ConnectRequest.\n\nArgs:\nconnection_id (str): The identifier for the connection.\nendpoint (str): A zmq-style uri which identifies a publically\nreachable endpoint.", "source": "juraj-google-style"}
{"code": "def usergroups_disable(self, *, usergroup: str, **kwargs) -> SlackResponse:\n    self._validate_xoxp_token()\n    kwargs.update({'usergroup': usergroup})\n    return self.api_call('usergroups.disable', json=kwargs)", "docstring": "Disable an existing User Group\n\nArgs:\nusergroup (str): The encoded ID of the User Group to disable.\ne.g. 'S0604QSJC'", "source": "codesearchnet"}
{"code": "def msgBox(self, promptType, _timeout=(- 1), **options):\n    if (promptType == 'confirm'):\n        return self._sendConfirmPrompt(_timeout, options)\n    else:\n        raise ValueError('Unknown prompt type: {}'.format(promptType))", "docstring": "Send a user prompt request to the GUI\n\nArguments:\npromptType (string):\nThe prompt type to send to the GUI. Currently\nthe only type supported is 'confirm'.\n\n_timeout (int):\nThe optional amount of time for which the prompt\nshould be displayed to the user before a timeout occurs.\nDefaults to -1 which indicates there is no timeout limit.\n\noptions (dict):\nThe keyword arguments that should be passed to the requested\nprompt type. Check prompt specific sections below for information on what\narguments are expected to be present.\n\nRaises:\nValueError:\nIf the prompt type received is an unexpected value\n\n**Confirm Prompt**\n\nDisplay a message to the user and prompt them for a confirm/deny\nresponse to the message.\n\nArguments:\nmsg (string):\nThe message to display to the user\n\nReturns:\nTrue if the user picks 'Confirm', False if the user picks 'Deny'\n\nRaises:\nKeyError:\nIf the options passed to the prompt handler doesn't contain a\n`msg` attribute.\n\nAPITimeoutError:\nIf the timeout value is reached without receiving a response.", "source": "codesearchnet"}
{"code": "def Deserialize(self, reader):\n        \n        self.Script = reader.ReadVarBytes()\n        self.ParameterList = reader.ReadVarBytes()\n        self.ReturnType = reader.ReadByte()", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, **kwargs):\n    try:\n        tokenizer = BertTokenizer.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs)\n    except:\n        from .tokenization_bert_fast import BertTokenizerFast\n        tokenizer = BertTokenizerFast.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs)\n    return cls.from_tokenizer(tokenizer, **kwargs)", "docstring": "Instantiate a `TFBertTokenizer` from a pre-trained tokenizer.\n\nArgs:\npretrained_model_name_or_path (`str` or `os.PathLike`):\nThe name or path to the pre-trained tokenizer.\n\nExamples:\n\n```python\nfrom transformers import TFBertTokenizer\n\ntf_tokenizer = TFBertTokenizer.from_pretrained(\"google-bert/bert-base-uncased\")\n```", "source": "github-repos"}
{"code": "def _resolve_attribute_match(self, match):\n    if (match.group(1) == 'cluster'):\n        return str(self.cluster_id)\n    return self.get(match.group(1), match.group(0))", "docstring": "Replaces a reference to an attribute with the value of the attribute.\n\nArgs:\nmatch (re.match object): A match object containing a match to a reference to an attribute.", "source": "codesearchnet"}
{"code": "def closest_eere(latitude, longitude):\n    \n    with open(env.SRC_PATH + '/eere_meta.csv') as eere_meta:\n        stations = csv.DictReader(eere_meta)\n        d = 9999\n        station_code = ''\n        station_name = ''\n        for station in stations:\n            new_dist = great_circle((latitude, longitude),\n                                    (float(station['latitude']),\n                                     float(station['longitude']))).miles\n            if new_dist <= d:\n                d = new_dist\n                station_code = station['station_code']\n                station_name = station['weather_station']\n        return station_code, station_name\n    raise KeyError('station not found')", "docstring": "Find closest station from the new(er) list.\n\nWarning: There may be some errors with smaller non US stations.\n\nArgs:\nlatitude (float)\nlongitude (float)\n\nReturns:\ntuple (station_code (str), station_name (str))", "source": "juraj-google-style"}
{"code": "def set_pipeline_definition(self):\n    if (not self.pipeline_id):\n        self.get_pipeline_id()\n    json_def = self.datapipeline_data['json_definition']\n    try:\n        pipelineobjects = translator.definition_to_api_objects(json_def)\n        parameterobjects = translator.definition_to_api_parameters(json_def)\n        parametervalues = translator.definition_to_parameter_values(json_def)\n    except translator.PipelineDefinitionError as error:\n        LOG.warning(error)\n        raise DataPipelineDefinitionError\n    response = self.client.put_pipeline_definition(pipelineId=self.pipeline_id, pipelineObjects=pipelineobjects, parameterObjects=parameterobjects, parameterValues=parametervalues)\n    LOG.debug(response)\n    LOG.info('Successfully applied pipeline definition')\n    return response", "docstring": "Translates the json definition and puts it on created pipeline\n\nReturns:\ndict: the response of the Boto3 command", "source": "codesearchnet"}
{"code": "def login_with_password_no_sync(self, username, password):\n    warn('login_with_password_no_sync is deprecated. Use login with sync=False.', DeprecationWarning)\n    return self.login(username, password, sync=False)", "docstring": "Deprecated. Use ``login`` with ``sync=False``.\n\nLogin to the homeserver.\n\nArgs:\nusername (str): Account username\npassword (str): Account password\n\nReturns:\nstr: Access token\n\nRaises:\nMatrixRequestError", "source": "codesearchnet"}
{"code": "def get_attribute(self, obj, attr):\n    if (attr == '*'):\n        return obj\n    if isinstance(obj, Mapping):\n        return obj.get(attr, None)\n    return getattr(obj, attr, None)", "docstring": "Get attribute of given object instance.\n\nReason for existence of this method is the fact that  'attribute' can\nbe also object's key from if is a dict or any other kind of mapping.\n\nNote: it will return None if attribute key does not exist\n\nArgs:\nobj (object): internal object to retrieve data from\n\nReturns:\ninternal object's key value or attribute", "source": "codesearchnet"}
{"code": "async def remove(self, index=\"\"):\n        \n\n        if not self.state == 'ready':\n            logger.debug(\"Trying to remove from wrong state '{}'\".format(self.state))\n            return\n\n        if index == \"\":\n            self.statuslog.error(\"Must provide index to remove\")\n            return\n        elif index == \"all\":\n            self.queue = []\n            self.update_queue()\n            self.statuslog.info(\"Removed all songs\")\n            return\n\n        indexes = index.split(\"-\")\n        self.logger.debug(\"Removing {}\".format(indexes))\n\n        try:\n            if len(indexes) == 0:\n                self.statuslog.error(\"Remove must specify an index or range\")\n                return\n            elif len(indexes) == 1:\n                num_lower = int(indexes[0]) - 1\n                num_upper = num_lower + 1\n            elif len(indexes) == 2:\n                num_lower = int(indexes[0]) - 1\n                num_upper = int(indexes[1])\n            else:\n                self.statuslog.error(\"Cannot have more than 2 indexes for remove range\")\n                return\n        except TypeError:\n            self.statuslog.error(\"Remove index must be a number\")\n            return\n        except ValueError:\n            self.statuslog.error(\"Remove index must be a number\")\n            return\n\n        if num_lower < 0 or num_lower >= len(self.queue) or num_upper > len(self.queue):\n            if len(self.queue) == 0:\n                self.statuslog.warning(\"No songs in queue\")\n            elif len(self.queue) == 1:\n                self.statuslog.error(\"Remove index must be 1 (only 1 song in queue)\")\n            else:\n                self.statuslog.error(\"Remove index must be between 1 and {}\".format(len(self.queue)))\n            return\n\n        if num_upper <= num_lower:\n            self.statuslog.error(\"Second index in range must be greater than first\")\n            return\n\n        lower_songname = self.queue[num_lower][1]\n        for num in range(0, num_upper - num_lower):\n            self.logger.debug(\"Removed {}\".format(self.queue[num_lower][1]))\n            self.queue.pop(num_lower)\n\n        if len(indexes) == 1:\n            self.statuslog.info(\"Removed {}\".format(lower_songname))\n        else:\n            self.statuslog.info(\"Removed songs {}-{}\".format(num_lower + 1, num_upper))\n\n        self.update_queue()", "docstring": "The remove command\n\nArgs:\nindex (str): The index to remove, can be either a number, or a range in the for '##-##'", "source": "juraj-google-style"}
{"code": "def display_hierarchy(root_ad_unit, all_ad_units):\n    parent_id_to_children = collections.defaultdict(list)\n    for ad_unit in all_ad_units:\n        if ('parentId' in ad_unit):\n            parent_id_to_children[ad_unit['parentId']].append(ad_unit)\n    parent_id_to_children = dict(parent_id_to_children)\n    display_hierarchy_helper(root_ad_unit, parent_id_to_children, 0)", "docstring": "Display the ad units as a tree.\n\nArgs:\nroot_ad_unit: The root ad unit to begin from.\nall_ad_units: A list containing all ad units.", "source": "codesearchnet"}
{"code": "def from_paths(cls, path, bs=64, tfms=(None, None), trn_name='train', val_name='valid', test_name=None, test_with_labels=False, num_workers=8):\n    assert (not ((tfms[0] is None) or (tfms[1] is None))), 'please provide transformations for your train and validation sets'\n    (trn, val) = [folder_source(path, o) for o in (trn_name, val_name)]\n    if test_name:\n        test = (folder_source(path, test_name) if test_with_labels else read_dir(path, test_name))\n    else:\n        test = None\n    datasets = cls.get_ds(FilesIndexArrayDataset, trn, val, tfms, path=path, test=test)\n    return cls(path, datasets, bs, num_workers, classes=trn[2])", "docstring": "Read in images and their labels given as sub-folder names\n\nArguments:\npath: a root path of the data (used for storing trained models, precomputed values, etc)\nbs: batch size\ntfms: transformations (for data augmentations). e.g. output of `tfms_from_model`\ntrn_name: a name of the folder that contains training images.\nval_name:  a name of the folder that contains validation images.\ntest_name:  a name of the folder that contains test images.\nnum_workers: number of workers\n\nReturns:\nImageClassifierData", "source": "codesearchnet"}
{"code": "def encode_schedule(schedule):\n    (interpolation, steps, pmfs) = schedule\n    return ((interpolation + ' ') + ' '.join((((('@' + str(s)) + ' ') + ' '.join(map(str, p))) for (s, p) in zip(steps, pmfs))))", "docstring": "Encodes a schedule tuple into a string.\n\nArgs:\nschedule: A tuple containing (interpolation, steps, pmfs), where\ninterpolation is a string specifying the interpolation strategy, steps\nis an int array_like of shape [N] specifying the global steps, and pmfs is\nan array_like of shape [N, M] where pmf[i] is the sampling distribution\nat global step steps[i]. N is the number of schedule requirements to\ninterpolate and M is the size of the probability space.\n\nReturns:\nThe string encoding of the schedule tuple.", "source": "codesearchnet"}
{"code": "def as_json_range(self, name):\n    return {'Name': name, 'Values': [json.dumps(v) for v in self.values]}", "docstring": "Represent the parameter range as a dictionary suitable for a request to\ncreate an Amazon SageMaker hyperparameter tuning job using one of the deep learning frameworks.\n\nThe deep learning framework images require that hyperparameters be serialized as JSON.\n\nArgs:\nname (str): The name of the hyperparameter.\n\nReturns:\ndict[str, list[str]]: A dictionary that contains the name and values of the hyperparameter,\nwhere the values are serialized as JSON.", "source": "codesearchnet"}
{"code": "def _Open(self, path_spec, mode='rb'):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    file_object = resolver.Resolver.OpenFileObject(\n        path_spec.parent, resolver_context=self._resolver_context)\n\n    try:\n      vshadow_volume = pyvshadow.volume()\n      vshadow_volume.open_file_object(file_object)\n    except:\n      file_object.close()\n      raise\n\n    self._file_object = file_object\n    self._vshadow_volume = vshadow_volume", "docstring": "Opens the file system object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nmode (Optional[str]): file access mode. The default is 'rb' which\nrepresents read-only binary.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file system object could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def less(x, y):\n    return math_ops.less(x, y)", "docstring": "Element-wise truth value of (x < y).\n\nArgs:\nx: Tensor or variable.\ny: Tensor or variable.\n\nReturns:\nA bool tensor.", "source": "github-repos"}
{"code": "def apply(self, func, num_splits=None, other_axis_partition=None, maintain_partitioning=True, **kwargs):\n    import dask\n    if (num_splits is None):\n        num_splits = len(self.list_of_blocks)\n    if (other_axis_partition is not None):\n        return [DaskFramePartition(dask.delayed(obj)) for obj in deploy_func_between_two_axis_partitions(self.axis, func, num_splits, len(self.list_of_blocks), kwargs, *dask.compute(*tuple((self.list_of_blocks + other_axis_partition.list_of_blocks))))]\n    args = [self.axis, func, num_splits, kwargs, maintain_partitioning]\n    args.extend(dask.compute(*self.list_of_blocks))\n    return [DaskFramePartition(dask.delayed(obj)) for obj in deploy_axis_func(*args)]", "docstring": "Applies func to the object.\n\nSee notes in Parent class about this method.\n\nArgs:\nfunc: The function to apply.\nnum_splits: The number of times to split the result object.\nother_axis_partition: Another `DaskFrameAxisPartition` object to apply to\nfunc with this one.\n\nReturns:\nA list of `DaskFramePartition` objects.", "source": "codesearchnet"}
{"code": "def make_adapt_function(self):\n    if self._adapt_function is not None:\n        return self._adapt_function\n\n    def adapt_step(iterator):\n        data = next(iterator)\n        self._adapt_maybe_build(data)\n        self.update_state(data)\n    if self._steps_per_execution.numpy().item() == 1:\n        adapt_fn = adapt_step\n    else:\n\n        def adapt_fn(iterator):\n            for _ in math_ops.range(self._steps_per_execution):\n                adapt_step(iterator)\n    if not self._run_eagerly:\n        adapt_fn = def_function.function(adapt_fn)\n    self._adapt_function = adapt_fn\n    return self._adapt_function", "docstring": "Creates a function to execute one step of `adapt`.\n\nThis method can be overridden to support custom adapt logic.\nThis method is called by `PreprocessingLayer.adapt`.\n\nTypically, this method directly controls `tf.function` settings,\nand delegates the actual state update logic to\n`PreprocessingLayer.update_state`.\n\nThis function is cached the first time `PreprocessingLayer.adapt`\nis called. The cache is cleared whenever `PreprocessingLayer.compile`\nis called.\n\nReturns:\nFunction. The function created by this method should accept a\n`tf.data.Iterator`, retrieve a batch, and update the state of the\nlayer.", "source": "github-repos"}
{"code": "def energy_upperbound(self, spins):\n    subtheta = self.theta.copy()\n    subtheta.fix_variables(spins)\n    trees = self._trees\n    if (not trees):\n        assert ((not subtheta.linear) and (not subtheta.quadratic))\n        return subtheta.offset\n    energy = Plus(self.message_upperbound(trees, {}, subtheta), subtheta.offset)\n    return energy", "docstring": "A formula for an upper bound on the energy of Theta with spins fixed.\n\nArgs:\nspins (dict): Spin values for a subset of the variables in Theta.\n\nReturns:\nFormula that upper bounds the energy with spins fixed.", "source": "codesearchnet"}
{"code": "def sync_proxy(self, mri, block):\n        \n        \n        subscribe = Subscribe(path=[mri], delta=True)\n        done_queue = Queue()\n\n        def handle_response(response):\n            \n            if not isinstance(response, Delta):\n                \n                self.log.debug(\"Proxy got response %r\", response)\n                done_queue.put(None)\n            else:\n                cothread.Callback(\n                    self._handle_response, response, block, done_queue)\n\n        subscribe.set_callback(handle_response)\n        IOLoopHelper.call(self._send_request, subscribe)\n        done_queue.get(timeout=DEFAULT_TIMEOUT)", "docstring": "Abstract method telling the ClientComms to sync this proxy Block\nwith its remote counterpart. Should wait until it is connected\n\nArgs:\nmri (str): The mri for the remote block\nblock (BlockModel): The local proxy Block to keep in sync", "source": "juraj-google-style"}
{"code": "def restore(self, fade=False):\n        \n\n        try:\n            if self.is_coordinator:\n                self._restore_coordinator()\n        finally:\n            self._restore_volume(fade)\n\n        \n        \n        if self.is_coordinator:\n            if self.transport_state == 'PLAYING':\n                self.device.play()\n            elif self.transport_state == 'STOPPED':\n                self.device.stop()", "docstring": "Restore the state of a device to that which was previously saved.\n\nFor coordinator devices restore everything. For slave devices\nonly restore volume etc., not transport info (transport info\ncomes from the slave's coordinator).\n\nArgs:\nfade (bool): Whether volume should be faded up on restore.", "source": "juraj-google-style"}
{"code": "def comments_1(self, value=None):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `comments_1`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `comments_1`')\n    self._comments_1 = value", "docstring": "Corresponds to IDD Field `comments_1`\n\nArgs:\nvalue (str): value for IDD Field `comments_1`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def get_aggregate(self):\n    return dict([(aggregate.find('query').text, [ET.tostring(data).lstrip('<data xmlns:cps=\"www.clusterpoint.com\" xmlns:cpse=\"www.clusterpoint.com\">').strip().rstrip('</data>') for data in aggregate.findall('data')]) for aggregate in self._content.findall('aggregate')])", "docstring": "Get aggregate data.\n\nReturns:\nA dict in with queries as keys and results as values.", "source": "codesearchnet"}
{"code": "def Add(self, rdf_value, timestamp=None, suffix=None, mutation_pool=None):\n    return self.StaticAdd(self.collection_id, rdf_value, timestamp=timestamp, suffix=suffix, mutation_pool=mutation_pool)", "docstring": "Adds an rdf value to the collection.\n\nAdds an rdf value to the collection. Does not require that the collection\nbe locked.\n\nArgs:\nrdf_value: The rdf value to add to the collection.\ntimestamp: The timestamp (in microseconds) to store the rdf value at.\nDefaults to the current time.\nsuffix: A 'fractional timestamp' suffix to reduce the chance of\ncollisions. Defaults to a random number.\nmutation_pool: A MutationPool object to write to.\n\nReturns:\nThe pair (timestamp, suffix) which identifies the value within the\ncollection.\n\nRaises:\nValueError: rdf_value has unexpected type.", "source": "codesearchnet"}
{"code": "def tf_initialize(self, x_init, base_value, target_value, estimated_improvement):\n        \n        self.base_value = base_value\n\n        if estimated_improvement is None:  \n            estimated_improvement = tf.abs(x=base_value)\n\n        first_step = super(LineSearch, self).tf_initialize(x_init)\n\n        improvement = tf.divide(\n            x=(target_value - self.base_value),\n            y=tf.maximum(x=estimated_improvement, y=util.epsilon)\n        )\n\n        last_improvement = improvement - 1.0\n\n        if self.mode == 'linear':\n            deltas = [-t * self.parameter for t in x_init]\n            self.estimated_incr = -estimated_improvement * self.parameter\n\n        elif self.mode == 'exponential':\n            deltas = [-t * self.parameter for t in x_init]\n\n        return first_step + (deltas, improvement, last_improvement, estimated_improvement)", "docstring": "Initialization step preparing the arguments for the first iteration of the loop body.\n\nArgs:\nx_init: Initial solution guess $x_0$.\nbase_value: Value $f(x')$ at $x = x'$.\ntarget_value: Value $f(x_0)$ at $x = x_0$.\nestimated_improvement: Estimated value at $x = x_0$, $f(x')$ if None.\n\nReturns:\nInitial arguments for tf_step.", "source": "juraj-google-style"}
{"code": "def _extract_id(self) -> str:\n    match = re.match(self._VALID_URL, self.url)\n    if match:\n        return match.group('video_id')\n    else:\n        raise VideoIdNotMatchedError", "docstring": "Get video_id needed to obtain the real_url of the video.\n\nRaises:\nVideoIdNotMatchedError: If video_id is not matched with regular expression.", "source": "codesearchnet"}
{"code": "def __add__(self, other):\n        \n        if not all(np.equal(self.energies, other.energies)):\n            raise ValueError(\"Energies of both COHP are not compatible.\")\n        populations = {spin: self.populations[spin] + other.populations[spin]\n                       for spin in self.cohp}\n        if self.icohp is not None and other.icohp is not None:\n            int_pop = {spin: self.icohp[spin] + other.icohp[spin]\n                       for spin in self.icohp}\n        else:\n            if self.icohp is not None or other.icohp is not None:\n                warnings.warn(\"One of the COHP objects does not contain \"\n                              \"ICOHPs. Setting ICOHP to None.\")\n            int_pop = None\n        return Cohp(self.efermi, self.energies, populations, icohp=int_pop)", "docstring": "Adds two COHP together. Checks that energy scales are the same.\nOtherwise, it raises a ValueError. It also adds ICOHP if present.\nIf ICOHP is only present in one object, it displays a warning and\nwill not add ICOHP.\n\nArgs:\nother: Another COHP object.\n\nReturns:\nSum of the two COHPs as a COHP object.", "source": "juraj-google-style"}
{"code": "def create_clusters(provider, context, **kwargs):\n    conn = get_session(provider.region).client('ecs')\n    try:\n        clusters = kwargs['clusters']\n    except KeyError:\n        logger.error('setup_clusters hook missing \"clusters\" argument')\n        return False\n    if isinstance(clusters, basestring):\n        clusters = [clusters]\n    cluster_info = {}\n    for cluster in clusters:\n        logger.debug('Creating ECS cluster: %s', cluster)\n        r = conn.create_cluster(clusterName=cluster)\n        cluster_info[r['cluster']['clusterName']] = r\n    return {'clusters': cluster_info}", "docstring": "Creates ECS clusters.\n\nExpects a \"clusters\" argument, which should contain a list of cluster\nnames to create.\n\nArgs:\nprovider (:class:`stacker.providers.base.BaseProvider`): provider\ninstance\ncontext (:class:`stacker.context.Context`): context instance\n\nReturns: boolean for whether or not the hook succeeded.", "source": "codesearchnet"}
{"code": "def __to_plain_containers(self, container: Union[(CommentedSeq, CommentedMap)]) -> Union[(OrderedDict, list)]:\n    if isinstance(container, CommentedMap):\n        new_container = OrderedDict()\n        for (key, value_obj) in container.items():\n            if (isinstance(value_obj, CommentedMap) or isinstance(value_obj, CommentedSeq)):\n                new_container[key] = self.__to_plain_containers(value_obj)\n            else:\n                new_container[key] = value_obj\n    elif isinstance(container, CommentedSeq):\n        new_container = list()\n        for value_obj in container:\n            if (isinstance(value_obj, CommentedMap) or isinstance(value_obj, CommentedSeq)):\n                new_container.append(self.__to_plain_containers(value_obj))\n            else:\n                new_container.append(value_obj)\n    return new_container", "docstring": "Converts any sequence or mapping to list or OrderedDict\n\nStops at anything that isn't a sequence or a mapping.\n\nOne day, we'll extract the comments and formatting and store \\\nthem out-of-band.\n\nArgs:\nmapping: The mapping of constructed subobjects to edit", "source": "codesearchnet"}
{"code": "def designPrimers(seq_args, global_args=None, misprime_lib=None, mishyb_lib=None, debug=False):\n    if global_args:\n        primerdesign.setGlobals(global_args, misprime_lib, mishyb_lib)\n    primerdesign.setSeqArgs(seq_args)\n    return primerdesign.runDesign(debug)", "docstring": "Run the Primer3 design process.\n\nIf the global args have been previously set (either by a pervious\n`designPrimers` call or by a `setGlobals` call), `designPrimers` may be\ncalled with seqArgs alone (as a means of optimization).\n\nArgs:\nseq_args (dict)               : Primer3 sequence/design args as per\nPrimer3 docs\n\nglobal_args (dict, optional)  : Primer3 global args as per Primer3 docs\nmisprime_lib (dict, optional) : `Sequence name: sequence` dictionary\nfor mispriming checks.\nmishyb_lib (dict, optional)   : `Sequence name: sequence` dictionary\nfor mishybridization checks.\n\nReturns:\nA dictionary of Primer3 results (should be identical to the expected\nBoulderIO output from primer3_main)", "source": "codesearchnet"}
{"code": "def from_tensor(cls, tensor):\n    if isinstance(tensor, core.Value):\n        return EagerWeakTensor(tensor)\n    if isinstance(tensor, core.Symbol):\n        return GraphWeakTensor(tensor)\n    raise errors.InvalidArgumentError(None, None, f'WeakTensor can only be constructed from tf.Tensor or tf.WeakTensor, but {type(tensor)} was given.')", "docstring": "Converts a 'tf.Tensor' into a 'WeakTensor'.\n\nThis should be the standard way of creating a WeakTensor instead\nof directly calling the WeakTensor constructor.\n\nArgs:\ntensor: The `tf.Tensor` that should be converted into a 'WeakTensor'.\n\nReturns:\nA `EagerWeakTensor` or 'GraphWeakTensor' that holds the `tensor`.", "source": "github-repos"}
{"code": "def to_hashable_table_ref(table_ref_elem_kv: Tuple[Union[str, TableReference], V]) -> Tuple[str, V]:\n    table_ref = table_ref_elem_kv[0]\n    hashable_table_ref = get_hashable_destination(table_ref)\n    return (hashable_table_ref, table_ref_elem_kv[1])", "docstring": "Turns the key of the input tuple to its string representation. The key\nshould be either a string or a TableReference.\n\nArgs:\ntable_ref_elem_kv: A tuple of table reference and element.\n\nReturns:\nA tuple of string representation of input table and input element.", "source": "github-repos"}
{"code": "def push(self, x):\n    self._quantile_tracker.push(x)", "docstring": "Pushes a new value and updates the internal quantile tracker.\n\nArgs:\nx: The new value to be pushed.", "source": "github-repos"}
{"code": "def _lookup_model(cls, kind, default_model=None):\n    modelclass = cls._kind_map.get(kind, default_model)\n    if (modelclass is None):\n        raise KindError((\"No model class found for kind '%s'. Did you forget to import it?\" % kind))\n    return modelclass", "docstring": "Get the model class for the kind.\n\nArgs:\nkind: A string representing the name of the kind to lookup.\ndefault_model: The model class to use if the kind can't be found.\n\nReturns:\nThe model class for the requested kind.\nRaises:\nKindError: The kind was not found and no default_model was provided.", "source": "codesearchnet"}
{"code": "def _poll_once(self, timeout_ms, max_records):\n        \n        self._coordinator.poll()\n\n        \n        \n        if not self._subscription.has_all_fetch_positions():\n            self._update_fetch_positions(self._subscription.missing_fetch_positions())\n\n        \n        \n        records, partial = self._fetcher.fetched_records(max_records)\n        if records:\n            \n            \n            \n            \n            if not partial:\n                self._fetcher.send_fetches()\n            return records\n\n        \n        self._fetcher.send_fetches()\n\n        timeout_ms = min(timeout_ms, self._coordinator.time_to_next_poll() * 1000)\n        self._client.poll(timeout_ms=timeout_ms)\n        \n        \n        if self._coordinator.need_rejoin():\n            return {}\n\n        records, _ = self._fetcher.fetched_records(max_records)\n        return records", "docstring": "Do one round of polling. In addition to checking for new data, this does\nany needed heart-beating, auto-commits, and offset updates.\n\nArguments:\ntimeout_ms (int): The maximum time in milliseconds to block.\n\nReturns:\ndict: Map of topic to list of records (may be empty).", "source": "juraj-google-style"}
{"code": "def settings(package, reload_=False):\n    global packages\n    if ((package not in packages) or reload_):\n        from os import path\n        result = CaseConfigParser()\n        if (package != 'acorn'):\n            confpath = _package_path(package)\n            _read_single(result, confpath)\n        _read_single(result, _package_path('acorn'))\n        packages[package] = result\n    return packages[package]", "docstring": "Returns the config settings for the specified package.\n\nArgs:\npackage (str): name of the python package to get settings for.", "source": "codesearchnet"}
{"code": "def refresh(self, request):\n        \n        try:\n            self._retrieve_info(request)\n            self.token, self.expiry = _metadata.get_service_account_token(\n                request,\n                service_account=self._service_account_email)\n        except exceptions.TransportError as caught_exc:\n            new_exc = exceptions.RefreshError(caught_exc)\n            six.raise_from(new_exc, caught_exc)", "docstring": "Refresh the access token and scopes.\n\nArgs:\nrequest (google.auth.transport.Request): The object used to make\nHTTP requests.\n\nRaises:\ngoogle.auth.exceptions.RefreshError: If the Compute Engine metadata\nservice can't be reached if if the instance has not\ncredentials.", "source": "juraj-google-style"}
{"code": "def unpack(self, buff=None, offset=0):\n        \n        instruction_type = UBInt16(enum_ref=InstructionType)\n        instruction_type.unpack(buff, offset)\n        self.__class__ = InstructionType(instruction_type.value).find_class()\n\n        length = UBInt16()\n        length.unpack(buff, offset=offset+2)\n\n        super().unpack(buff[:offset+length.value], offset)", "docstring": "Unpack *buff* into this object.\n\nThis method will convert a binary data into a readable value according\nto the attribute format.\n\nArgs:\nbuff (bytes): Binary buffer.\noffset (int): Where to begin unpacking.\n\nRaises:\n:exc:`~.exceptions.UnpackException`: If unpack fails.", "source": "juraj-google-style"}
{"code": "def _MakeRequestNoRetry(http, http_request, redirections=5, check_response_func=CheckResponse):\n    connection_type = None\n    if getattr(http, 'connections', None):\n        url_scheme = parse.urlsplit(http_request.url).scheme\n        if (url_scheme and (url_scheme in http.connections)):\n            connection_type = http.connections[url_scheme]\n    new_debuglevel = (4 if (httplib2.debuglevel == 4) else 0)\n    with _Httplib2Debuglevel(http_request, new_debuglevel, http=http):\n        (info, content) = http.request(str(http_request.url), method=str(http_request.http_method), body=http_request.body, headers=http_request.headers, redirections=redirections, connection_type=connection_type)\n    if (info is None):\n        raise exceptions.RequestError()\n    response = Response(info, content, http_request.url)\n    check_response_func(response)\n    return response", "docstring": "Send http_request via the given http.\n\nThis wrapper exists to handle translation between the plain httplib2\nrequest/response types and the Request and Response types above.\n\nArgs:\nhttp: An httplib2.Http instance, or a http multiplexer that delegates to\nan underlying http, for example, HTTPMultiplexer.\nhttp_request: A Request to send.\nredirections: (int, default 5) Number of redirects to follow.\ncheck_response_func: Function to validate the HTTP response.\nArguments are (Response, response content, url).\n\nReturns:\nA Response object.\n\nRaises:\nRequestError if no response could be parsed.", "source": "codesearchnet"}
{"code": "def controlled_by(self, *control_qubits: Qid) -> 'Operation':\n        \n        \n        from cirq.ops import ControlledOperation\n        if control_qubits is None or len(control_qubits) is 0:\n            raise ValueError(\n                \"Can't get controlled operation without control qubit. Op: {}\"\n                .format(repr(self)))\n        else:\n            return ControlledOperation(control_qubits, self)", "docstring": "Returns a controlled version of this operation.\n\nArgs:\ncontrol_qubits: Qubits to control the operation by. Required.", "source": "juraj-google-style"}
{"code": "def convert_reshape(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting reshape ...')\n    if names == 'short':\n        tf_name = 'RESH' + random_string(4)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    if len(inputs) > 1:\n        if layers[inputs[1]][0] == -1:\n            print('Cannot deduct batch size! It will be omitted, but result may be wrong.')\n\n        reshape = keras.layers.Reshape(layers[inputs[1] + '_np'], name=tf_name)\n        layers[scope_name] = reshape(layers[inputs[0]])\n    else:\n        if inputs[0] in layers:\n            reshape = keras.layers.Reshape(params['shape'][1:], name=tf_name)\n            layers[scope_name] = reshape(layers[inputs[0]])\n        else:\n            print('Skip weight matrix transpose, but result may be wrong.')", "docstring": "Convert reshape layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def _generate_assignments(splittable_dimensions, mesh_dimension_to_size):\n    assignments = []\n    for assignment_size in six.moves.xrange((1 + min(len(splittable_dimensions), len(mesh_dimension_to_size)))):\n        for s_dims_chosen in itertools.combinations(splittable_dimensions, assignment_size):\n            for m_dims_chosen in itertools.permutations(mesh_dimension_to_size, assignment_size):\n                assignments.append(dict(zip(s_dims_chosen, m_dims_chosen)))\n    return assignments", "docstring": "Generates all ways to map splittable dimensions to mesh dimensions.\n\nArgs:\nsplittable_dimensions: a frozenset of the names of splittable dimensions.\nmesh_dimension_to_size: a dictionary from mesh dimension name to size.\n\nReturns:\nA list of the valid assignments. Each assignment is a dict keyed by every\nsplittable dimension, whose value is either a mesh dimension or None.", "source": "codesearchnet"}
{"code": "def _print_args(arguments, argument_type='Argument', indent=0):\n    indent_str = '  ' * indent\n\n    def _maybe_add_quotes(value):\n        is_quotes = \"'\" * isinstance(value, str)\n        return is_quotes + str(value) + is_quotes\n\n    def in_print(s, end='\\n'):\n        print(indent_str + s, end=end)\n    for index, element in enumerate(arguments, 1):\n        if indent == 4:\n            in_print('%s \n        if isinstance(element, str):\n            in_print('  %s' % element)\n        elif isinstance(element, tensor_spec.TensorSpec):\n            print((indent + 1) * '  ' + '%s: %s' % (element.name, repr(element)))\n        elif isinstance(element, collections_abc.Iterable) and (not isinstance(element, dict)):\n            in_print('  DType: %s' % type(element).__name__)\n            in_print('  Value: [', end='')\n            for value in element:\n                print('%s' % _maybe_add_quotes(value), end=', ')\n            print('\\x08\\x08]')\n        elif isinstance(element, dict):\n            in_print('  DType: %s' % type(element).__name__)\n            in_print('  Value: {', end='')\n            for key, value in element.items():\n                print(\"'%s': %s\" % (str(key), _maybe_add_quotes(value)), end=', ')\n            print('\\x08\\x08}')\n        else:\n            in_print('  DType: %s' % type(element).__name__)\n            in_print('  Value: %s' % str(element))", "docstring": "Formats and prints the argument of the concrete functions defined in the model.\n\nArgs:\narguments: Arguments to format print.\nargument_type: Type of arguments.\nindent: How far (in increments of 2 spaces) to indent each line of\noutput.", "source": "github-repos"}
{"code": "def ParseCodeToTree(code):\n    if not code.endswith(os.linesep):\n        code += os.linesep\n    try:\n        parser_driver = driver.Driver(_PYTHON_GRAMMAR, convert=pytree.convert)\n        tree = parser_driver.parse_string(code, debug=False)\n    except parse.ParseError:\n        ast.parse(code)\n        raise\n    return _WrapEndMarker(tree)", "docstring": "Parse the given code to a lib2to3 pytree.\n\nArguments:\ncode: a string with the code to parse.\n\nRaises:\nSyntaxError if the code is invalid syntax.\nparse.ParseError if some other parsing failure.\n\nReturns:\nThe root node of the parsed tree.", "source": "github-repos"}
{"code": "def _build_all_reduce_ring(core_locations: List[_CoreLocation], rotate: bool=False) -> List[int]:\n    permutation = list(range(len(core_locations)))\n    if not permutation:\n        return permutation\n    logging.vlog(2, 'Core locations in: %s', core_locations)\n    first_column = min([l.x for l in core_locations])\n    first_row = min([l.y for l in core_locations])\n    same_z = len(set([l.z for l in core_locations])) == 1\n    logging.vlog(2, 'first_column: %d', first_column)\n    logging.vlog(2, 'first_row: %d', first_row)\n    logging.vlog(2, 'same_z: %s', same_z)\n\n    def _cmp_2d(ia: int, ib: int) -> int:\n        if not rotate:\n            a = core_locations[ia]\n            b = core_locations[ib]\n            a_first = a.x == first_column and a.y != first_row\n            b_first = b.x == first_column and b.y != first_row\n            if a_first != b_first:\n                return -1 if b_first else 1\n            if a.y != b.y:\n                return b.y - a.y if a_first else a.y - b.y\n            if a.x != b.x:\n                return a.x - b.x if a.y % 2 == 0 else b.x - a.x\n            return a.core - b.core\n        else:\n            a = core_locations[ia]\n            b = core_locations[ib]\n            a_first = a.y == first_row and a.x != first_column\n            b_first = b.y == first_row and b.x != first_column\n            if a_first != b_first:\n                return -1 if b_first else 1\n            if a.x != b.x:\n                return b.x - a.x if a_first else a.x - b.x\n            if a.y != b.y:\n                return a.y - b.y if a.x % 2 == 0 else b.y - a.y\n            return a.core - b.core\n\n    def _cmp_3d(ia: int, ib: int) -> int:\n        a = core_locations[ia]\n        b = core_locations[ib]\n        a_corner = a.x == first_column and a.y == first_row\n        b_corner = b.x == first_column and b.y == first_row\n        if a_corner and b_corner:\n            return b.z - a.z if a.z != b.z else a.core - b.core\n        if a_corner != b_corner:\n            return -1 if b_corner else 1\n        if a.z == b.z:\n            return _cmp_2d(ia, ib) if a.z % 2 == 0 else -_cmp_2d(ia, ib)\n        return a.z - b.z\n    if same_z:\n        permutation.sort(key=functools.cmp_to_key(_cmp_2d))\n    else:\n        permutation.sort(key=functools.cmp_to_key(_cmp_3d))\n    logging.vlog(2, 'Permutation out: %s', permutation)\n    return permutation", "docstring": "Reorders a list of TPU cores to optimize for AllReduce performance.\n\nThis is ported from the C++ tensorflow::BuildAllReduceRing function,\nmixed with some logic from TF TPU's device_assignment._ring_3d.\n\nArgs:\ncore_locations: A list of core locations expressed as [x, y, z, core].\nrotate: If true, scan the cores in a column-major order. False by default.\n\nReturns:\nA permutation of the input list such that neighbors in the sequence are\nnearby in the TPU topology.", "source": "github-repos"}
{"code": "def from_dict(cls, cls_dict, fallback_xsi_type=None):\n        \n        if not cls_dict:\n            return None\n        \n        if isinstance(cls_dict, six.string_types):\n            if not getattr(cls, \"_convert_strings\", False):\n                return cls_dict\n\n        try:\n            typekey = cls.dictkey(cls_dict)\n        except TypeError:\n            typekey = fallback_xsi_type\n        klass   = cls.entity_class(typekey)\n        return klass.from_dict(cls_dict)", "docstring": "Parse the dictionary and return an Entity instance.\n\nThis will attempt to extract type information from the input\ndictionary and pass it to entity_class to resolve the correct class\nfor the type.\n\nArgs:\ncls_dict: A dictionary representation of an Entity object.\nfallback_xsi_type: An xsi_type to use for string input, which\ndoesn't have properties\n\nReturns:\nAn Entity instance.", "source": "juraj-google-style"}
{"code": "def generate_link(flag, np_fun_name):\n    if flag == 'dev':\n        template = 'https:\n    elif flag == 'stable':\n        template = 'https:\n    elif re.match('\\\\d+(\\\\.\\\\d+(\\\\.\\\\d+)?)?$', flag):\n        template = f'https:\n    else:\n        return None\n    return template % np_fun_name", "docstring": "Generates link from numpy function name.\n\nArgs:\nflag: the flag to control link form. See `set_np_doc_form`.\nnp_fun_name: the numpy function name.\n\nReturns:\nA string.", "source": "github-repos"}
{"code": "def SetIamPolicy(self, request, global_params=None):\n    config = self.GetMethodConfig('SetIamPolicy')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.\n\nArgs:\nrequest: (BigqueryTablesSetIamPolicyRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Policy) The response message.", "source": "github-repos"}
{"code": "def prepare_http_request(self, method_type, params, **kwargs):\n        \n        prepared_request = self.session.prepare_request(\n            requests.Request(method=method_type, **params)\n        )\n        return prepared_request", "docstring": "Prepares the HTTP REQUEST and returns it.\n\nArgs:\nmethod_type: The HTTP method type\nparams: Additional parameters for the HTTP request.\nkwargs: Any extra keyword arguements passed into a client method.\n\nreturns:\nprepared_request: An HTTP request object.", "source": "juraj-google-style"}
{"code": "def find_usbserial(vendor, product):\n    if (platform.system() == 'Linux'):\n        (vendor, product) = [('%04x' % x).strip() for x in (vendor, product)]\n        return linux_find_usbserial(vendor, product)\n    elif (platform.system() == 'Darwin'):\n        return osx_find_usbserial(vendor, product)\n    else:\n        raise NotImplementedError(('Cannot find serial ports on %s' % platform.system()))", "docstring": "Find the tty device for a given usbserial devices identifiers.\n\nArgs:\nvendor: (int) something like 0x0000\nproduct: (int) something like 0x0000\n\nReturns:\nString, like /dev/ttyACM0 or /dev/tty.usb...", "source": "codesearchnet"}
{"code": "def ParseOptions(cls, options, configuration_object):\n    \n    if not isinstance(configuration_object, tools.CLITool):\n      raise errors.BadConfigObject(\n          'Configuration object is not an instance of CLITool')\n\n    artifacts_path = getattr(options, 'artifact_definitions_path', None)\n\n    data_location = getattr(configuration_object, '_data_location', None)\n    if ((not artifacts_path or not os.path.exists(artifacts_path)) and\n        data_location):\n      artifacts_path = os.path.dirname(data_location)\n      artifacts_path = os.path.join(artifacts_path, 'artifacts')\n\n      if not os.path.exists(artifacts_path) and 'VIRTUAL_ENV' in os.environ:\n        artifacts_path = os.path.join(\n            os.environ['VIRTUAL_ENV'], 'share', 'artifacts')\n\n      if not os.path.exists(artifacts_path):\n        artifacts_path = os.path.join(sys.prefix, 'share', 'artifacts')\n      if not os.path.exists(artifacts_path):\n        artifacts_path = os.path.join(sys.prefix, 'local', 'share', 'artifacts')\n\n      if sys.prefix != '/usr':\n        if not os.path.exists(artifacts_path):\n          artifacts_path = os.path.join('/usr', 'share', 'artifacts')\n        if not os.path.exists(artifacts_path):\n          artifacts_path = os.path.join('/usr', 'local', 'share', 'artifacts')\n\n      if not os.path.exists(artifacts_path):\n        artifacts_path = None\n\n    if not artifacts_path or not os.path.exists(artifacts_path):\n      raise errors.BadConfigOption(\n          'Unable to determine path to artifact definitions.')\n\n    custom_artifacts_path = getattr(\n        options, 'custom_artifact_definitions_path', None)\n\n    if custom_artifacts_path and not os.path.isfile(custom_artifacts_path):\n      raise errors.BadConfigOption(\n          'No such artifacts filter file: {0:s}.'.format(custom_artifacts_path))\n\n    if custom_artifacts_path:\n      logger.info(\n          'Custom artifact filter file: {0:s}'.format(custom_artifacts_path))\n\n    registry = artifacts_registry.ArtifactDefinitionsRegistry()\n    reader = artifacts_reader.YamlArtifactsReader()\n\n    logger.info(\n        'Determined artifact definitions path: {0:s}'.format(artifacts_path))\n\n    try:\n      registry.ReadFromDirectory(reader, artifacts_path)\n\n    except (KeyError, artifacts_errors.FormatError) as exception:\n      raise errors.BadConfigOption((\n          'Unable to read artifact definitions from: {0:s} with error: '\n          '{1!s}').format(artifacts_path, exception))\n\n    for name in preprocessors_manager.PreprocessPluginsManager.GetNames():\n      if not registry.GetDefinitionByName(name):\n        raise errors.BadConfigOption(\n            'Missing required artifact definition: {0:s}'.format(name))\n\n    if custom_artifacts_path:\n      try:\n        registry.ReadFromFile(reader, custom_artifacts_path)\n\n      except (KeyError, artifacts_errors.FormatError) as exception:\n        raise errors.BadConfigOption((\n            'Unable to read artifact definitions from: {0:s} with error: '\n            '{1!s}').format(custom_artifacts_path, exception))\n\n    setattr(configuration_object, '_artifact_definitions_path', artifacts_path)\n    setattr(\n        configuration_object, '_custom_artifacts_path', custom_artifacts_path)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.\nBadConfigOption: if the required artifact definitions are not defined.", "source": "juraj-google-style"}
{"code": "def download(self, file: Optional[IO]=None, rewind: bool=True,\n                 duration_timeout: Optional[float]=None) -> Response:\n        \n        if self._session_state != SessionState.file_request_sent:\n            raise RuntimeError('File request not sent')\n\n        if rewind and file and hasattr(file, 'seek'):\n            original_offset = file.tell()\n        else:\n            original_offset = None\n\n        if not hasattr(file, 'drain'):\n            self._response.body = file\n\n            if not isinstance(file, Body):\n                self._response.body = Body(file)\n\n        read_future = self._commander.read_stream(file, self._data_stream)\n\n        try:\n            reply = yield from \\\n                asyncio.wait_for(read_future, timeout=duration_timeout)\n        except asyncio.TimeoutError as error:\n            raise DurationTimeout(\n                'Did not finish reading after {} seconds.'\n                .format(duration_timeout)\n            ) from error\n\n        self._response.reply = reply\n\n        if original_offset is not None:\n            file.seek(original_offset)\n\n        self.event_dispatcher.notify(self.Event.end_transfer, self._response)\n\n        self._session_state = SessionState.response_received\n\n        return self._response", "docstring": "Read the response content into file.\n\nArgs:\nfile: A file object or asyncio stream.\nrewind: Seek the given file back to its original offset after\nreading is finished.\nduration_timeout: Maximum time in seconds of which the\nentire file must be read.\n\nReturns:\nA Response populated with the final data connection reply.\n\nBe sure to call :meth:`start` first.\n\nCoroutine.", "source": "juraj-google-style"}
{"code": "def run_inference(self, batch: Sequence[ExampleT], model: ModelT, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionT]:\n    raise NotImplementedError(type(self))", "docstring": "Runs inferences on a batch of examples.\n\nArgs:\nbatch: A sequence of examples or features.\nmodel: The model used to make inferences.\ninference_args: Extra arguments for models whose inference call requires\nextra parameters.\n\nReturns:\nAn Iterable of Predictions.", "source": "github-repos"}
{"code": "def apply(self, predictions: Iterable[AnomalyPrediction]) -> AnomalyPrediction:\n    raise NotImplementedError", "docstring": "Applies the aggregation function to an iterable of predictions, either on\ntheir outlier scores or labels.\n\nArgs:\npredictions: An Iterable of `AnomalyPrediction` objects to aggregate.\n\nReturns:\nAn `AnomalyPrediction` object containing the aggregated result.", "source": "github-repos"}
{"code": "def set_redirect(self, url, status=HttpStatusCodes.HTTP_303):\n        \n        self.set_status(status)\n        self.set_content('')\n        self.set_header(HttpResponseHeaders.LOCATION, url)", "docstring": "Helper method to set a redirect response.\n\nArgs:\nurl (:obj:`str`): URL to redirect to\nstatus (:obj:`str`, optional): Status code of the response", "source": "juraj-google-style"}
{"code": "def _PackArgumentsHelper(self, elem, data, set_type_attrs):\n    if self._packer:\n        data = self._packer.Pack(data, self._version)\n    if isinstance(data, dict):\n        type_override = data.get('xsi_type')\n        if type_override:\n            elem_type = self._DiscoverElementTypeFromLocalname(type_override)\n        else:\n            elem_type = elem.type\n        data_formatted = data.iteritems()\n        packed_result = self._CreateComplexTypeFromData(elem_type, (type_override is not None), data_formatted, set_type_attrs)\n    elif isinstance(data, zeep.xsd.CompoundValue):\n        elem_type = data._xsd_type\n        data_formatted = zip(dir(data), [data[k] for k in dir(data)])\n        packed_result = self._CreateComplexTypeFromData(elem_type, False, data_formatted, set_type_attrs)\n    elif isinstance(data, (list, tuple)):\n        packed_result = [self._PackArgumentsHelper(elem, item, set_type_attrs) for item in data]\n    else:\n        if ((elem.type.name == 'base64Binary') and self._IsBase64(data)):\n            _logger.warn('Passing data to base64 field %s that may already be encoded. Do not pre-encode base64 fields with zeep.', elem.name)\n        packed_result = data\n    return packed_result", "docstring": "Recursive helper for PackArguments.\n\nArgs:\nelem: The element type we are creating.\ndata: The data to instantiate it with.\nset_type_attrs: A boolean indicating whether or not attributes that end\nin .Type should be set. This is only necessary for batch job service.\n\nReturns:\nAn instance of type 'elem'.", "source": "codesearchnet"}
{"code": "def _piecewise_learning_rate(step, boundaries, values):\n  \n  values = [1.0] + values\n  boundaries = [float(x) for x in boundaries]\n  return tf.train.piecewise_constant(\n      step, boundaries, values, name=\"piecewise_lr\")", "docstring": "Scale learning rate according to the given schedule.\n\nMultipliers are not cumulative.\n\nArgs:\nstep: global step\nboundaries: List of steps to transition on.\nvalues: Multiplier to apply at each boundary transition.\n\nReturns:\nScaled value for the learning rate.", "source": "juraj-google-style"}
{"code": "def generate(self, information, timeout=-1):\n        \n        return self._client.create(information, timeout=timeout)", "docstring": "Generates a self signed certificate or an internal CA signed certificate for RabbitMQ clients.\n\nArgs:\ninformation (dict): Information to generate the certificate for RabbitMQ clients.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: RabbitMQ certificate generated", "source": "juraj-google-style"}
{"code": "def _validate_exp(claims, leeway=0):\n    if ('exp' not in claims):\n        return\n    try:\n        exp = int(claims['exp'])\n    except ValueError:\n        raise JWTClaimsError('Expiration Time claim (exp) must be an integer.')\n    now = timegm(datetime.utcnow().utctimetuple())\n    if (exp < (now - leeway)):\n        raise ExpiredSignatureError('Signature has expired.')", "docstring": "Validates that the 'exp' claim is valid.\n\nThe \"exp\" (expiration time) claim identifies the expiration time on\nor after which the JWT MUST NOT be accepted for processing.  The\nprocessing of the \"exp\" claim requires that the current date/time\nMUST be before the expiration date/time listed in the \"exp\" claim.\nImplementers MAY provide for some small leeway, usually no more than\na few minutes, to account for clock skew.  Its value MUST be a number\ncontaining a NumericDate value.  Use of this claim is OPTIONAL.\n\nArgs:\nclaims (dict): The claims dictionary to validate.\nleeway (int): The number of seconds of skew that is allowed.", "source": "codesearchnet"}
{"code": "def list_tags(self, image_name):\n    tags_url = (self.registry_url + '/v2/{}/tags/list')\n    r = self.get(tags_url.format(image_name), auth=self.auth)\n    data = r.json()\n    if ('tags' in data):\n        return reversed(sorted(data['tags']))\n    return []", "docstring": "List all tags for the given image stored in the registry.\n\nArgs:\nimage_name (str):\nThe name of the image to query. The image must be present on the\nregistry for this call to return any values.\nReturns:\nlist[str]: List of tags for that image.", "source": "codesearchnet"}
{"code": "def open_tunnel(self, serial_no, port=19020):\n        \n        return self.open(ip_addr='tunnel:' + str(serial_no) + ':' + str(port))", "docstring": "Connects to the J-Link emulator (over SEGGER tunnel).\n\nArgs:\nself (JLink): the ``JLink`` instance\nserial_no (int): serial number of the J-Link\nport (int): optional port number (default to 19020).\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def find_element_by_id(self, id_, update=False) -> Elements:\n        \n        return self.find_element(by=By.ID, value=id_, update=update)", "docstring": "Finds an element by id.\n\nArgs:\nid_: The id of the element to be found.\nupdate: If the interface has changed, this option should be True.\n\nReturns:\nThe element if it was found.\n\nRaises:\nNoSuchElementException - If the element wasn't found.\n\nUsage:\nelement = driver.find_element_by_id('foo')", "source": "juraj-google-style"}
{"code": "def update_mim_version(self, new_genes, new_panel, old_version):\n        \n        LOG.info('Updating versions for new genes')\n        version = new_panel['version']\n        for gene in new_panel['genes']:\n            gene_symbol = gene['hgnc_id']\n            \n            if gene_symbol in new_genes:\n                gene['database_entry_version'] = version\n                continue\n            \n            gene['database_entry_version'] = old_version\n\n        return", "docstring": "Set the correct version for each gene\nLoop over the genes in the new panel\n\nArgs:\nnew_genes(set(str)): Set with the new gene symbols\nnew_panel(dict)", "source": "juraj-google-style"}
{"code": "def chain_to_quadratic(chain, target_adjacency, chain_strength):\n    quadratic = {}\n    seen = set()\n    try:\n        next_level = {next(iter(chain))}\n    except StopIteration:\n        raise ValueError('chain must have at least one variable')\n    while next_level:\n        this_level = next_level\n        next_level = set()\n        for v in this_level:\n            if (v not in seen):\n                seen.add(v)\n                for u in target_adjacency[v]:\n                    if (u not in chain):\n                        continue\n                    next_level.add(u)\n                    if ((u != v) and ((u, v) not in quadratic)):\n                        quadratic[(v, u)] = (- chain_strength)\n    if (len(chain) != len(seen)):\n        raise ValueError('{} is not a connected chain'.format(chain))\n    return quadratic", "docstring": "Determine the quadratic biases that induce the given chain.\n\nArgs:\nchain (iterable):\nThe variables that make up a chain.\n\ntarget_adjacency (dict/:class:`networkx.Graph`):\nShould be a dict of the form {s: Ns, ...} where s is a variable\nin the target graph and Ns is the set of neighbours of s.\n\nchain_strength (float):\nThe magnitude of the quadratic bias that should be used to create chains.\n\nReturns:\ndict[edge, float]: The quadratic biases that induce the given chain.\n\nRaises:\nValueError: If the variables in chain do not form a connected subgraph of target.\n\nExamples:\n>>> chain = {1, 2}\n>>> target_adjacency = {0: {1, 2}, 1: {0, 2}, 2: {0, 1}}\n>>> dimod.embedding.chain_to_quadratic(chain, target_adjacency, 1)\n{(1, 2): -1}", "source": "codesearchnet"}
{"code": "def _CallMethod(self, srvc, method_descriptor, rpc_controller, request, callback):\n    if (method_descriptor.containing_service != self.descriptor):\n        raise RuntimeError('CallMethod() given method descriptor for wrong service type.')\n    method = getattr(srvc, method_descriptor.name)\n    return method(rpc_controller, request, callback)", "docstring": "Calls the method described by a given method descriptor.\n\nArgs:\nsrvc: Instance of the service for which this method is called.\nmethod_descriptor: Descriptor that represent the method to call.\nrpc_controller: RPC controller to use for this method's execution.\nrequest: Request protocol message.\ncallback: A callback to invoke after the method has completed.", "source": "codesearchnet"}
{"code": "def length_squared(x, keep_dims=False, name=None, reduction_dim=None):\n  \n  with tf.name_scope(name, 'length_squared', [x]) as scope:\n    x = tf.convert_to_tensor(x, name='x')\n    if not reduction_dim:\n      reduction_dim = _last_index(x, 1)\n    return tf.reduce_sum(\n        tf.square(x),\n        reduction_dim,\n        keep_dims=keep_dims,\n        name=scope)", "docstring": "Computes the squared length of x.\n\nArgs:\nx: A tensor.\nkeep_dims: If true, reduction does not change the rank of the input.\nname: Optional name for this op.\nreduction_dim: The dimension to reduce, by default choose the last one\nand if no shape is specified guess 1.\nReturns:\nThe squared length of x.", "source": "juraj-google-style"}
{"code": "def combine_columns(columns):\n    \n    columns_zipped = itertools.zip_longest(*columns)\n    return ''.join(x for zipped in columns_zipped for x in zipped if x)", "docstring": "Combine ``columns`` into a single string.\n\nExample:\n>>> combine_columns(['eape', 'xml'])\n'example'\n\nArgs:\ncolumns (iterable): ordered columns to combine\n\nReturns:\nString of combined columns", "source": "juraj-google-style"}
{"code": "def _gather_saveables_for_checkpoint(self):\n\n    def _saveable_factory(name=self._common_name):\n        return _SyncOnReadSaveable(self, name)\n    return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}", "docstring": "Overrides Trackable method.\n\nThis allows both name-based and object-based save and restore of\n`SyncOnReadVariable`s.\n\nReturns:\nA dictionary mapping attribute names to `SaveableObject` factories.", "source": "github-repos"}
{"code": "def GetBlockHash(self, height):\n        \n        if self._current_block_height < height:\n            return\n\n        if len(self._header_index) <= height:\n            return\n\n        return self._header_index[height]", "docstring": "Get the block hash by its block height\nArgs:\nheight(int): height of the block to retrieve hash from.\n\nReturns:\nbytes: a non-raw block hash (e.g. b'6dd83ed8a3fc02e322f91f30431bf3662a8c8e8ebe976c3565f0d21c70620991', but not b'\\x6d\\xd8...etc'", "source": "juraj-google-style"}
{"code": "def _AddParentDirectories(self, path):\n    path_segments = self.file_system.SplitPath(path)\n    for segment_index in range(len(path_segments)):\n        parent_path = self.file_system.JoinPath(path_segments[:segment_index])\n        file_entry = self.file_system.GetFileEntryByPath(parent_path)\n        if (file_entry and (not file_entry.IsDirectory())):\n            raise ValueError('Non-directory parent file entry: {0:s} already exists.'.format(parent_path))\n    for segment_index in range(len(path_segments)):\n        parent_path = self.file_system.JoinPath(path_segments[:segment_index])\n        if (not self.file_system.FileEntryExistsByPath(parent_path)):\n            self.file_system.AddFileEntry(parent_path, file_entry_type=definitions.FILE_ENTRY_TYPE_DIRECTORY)", "docstring": "Adds the parent directories of a path to the fake file system.\n\nArgs:\npath (str): path of the file within the fake file system.\n\nRaises:\nValueError: if a parent directory is already set and is not a directory.", "source": "codesearchnet"}
{"code": "def parse_arguments(argv):\n    parser = argparse.ArgumentParser(description='write-to-pubsub')\n    parser.add_argument('-m', '--mode', help='Mode to run pipeline in.', choices=['local', 'cloud'], default='local')\n    parser.add_argument('-p', '--project', help='GCP project to run pipeline on.', default=cfg.PROJECT_ID)\n    args, _ = parser.parse_known_args(args=argv)\n    return args", "docstring": "It parses the arguments passed to the command line and returns them as an object\n\nArgs:\nargv: The arguments passed to the command line.\n\nReturns:\nThe arguments that are being passed in.", "source": "github-repos"}
{"code": "def _read_hdf_columns(path_or_buf, columns, num_splits, kwargs):\n    df = pandas.read_hdf(path_or_buf, columns=columns, **kwargs)\n    return (_split_result_for_readers(0, num_splits, df) + [len(df.index)])", "docstring": "Use a Ray task to read columns from HDF5 into a Pandas DataFrame.\n\nNote: Ray functions are not detected by codecov (thus pragma: no cover)\n\nArgs:\npath_or_buf: The path of the HDF5 file.\ncolumns: The list of column names to read.\nnum_splits: The number of partitions to split the column into.\n\nReturns:\nA list containing the split Pandas DataFrames and the Index as the last\nelement. If there is not `index_col` set, then we just return the length.\nThis is used to determine the total length of the DataFrame to build a\ndefault Index.", "source": "codesearchnet"}
{"code": "def edge(self, tail_name, head_name, label=None, _attributes=None, **attrs):\n    tail_name = self._quote_edge(tail_name)\n    head_name = self._quote_edge(head_name)\n    attr_list = self._attr_list(label, attrs, _attributes)\n    line = (self._edge % (tail_name, head_name, attr_list))\n    self.body.append(line)", "docstring": "Create an edge between two nodes.\n\nArgs:\ntail_name: Start node identifier.\nhead_name: End node identifier.\nlabel: Caption to be displayed near the edge.\nattrs: Any additional edge attributes (must be strings).", "source": "codesearchnet"}
{"code": "def request_stop(self, ex=None):\n    self._coord.request_stop(ex=ex)", "docstring": "Request that the coordinator stop the threads.\n\nSee `Coordinator.request_stop()`.\n\nArgs:\nex: Optional `Exception`, or Python `exc_info` tuple as returned by\n`sys.exc_info()`.  If this is the first call to `request_stop()` the\ncorresponding exception is recorded and re-raised from `join()`.", "source": "github-repos"}
{"code": "def _get_bonds(self, mol):\n        \n        num_atoms = len(mol)\n        \n        if self.ignore_ionic_bond:\n            covalent_atoms = [i for i in range(num_atoms) if mol.species[i].symbol not in self.ionic_element_list]\n        else:\n            covalent_atoms = list(range(num_atoms))\n        all_pairs = list(itertools.combinations(covalent_atoms, 2))\n        pair_dists = [mol.get_distance(*p) for p in all_pairs]\n        elements = mol.composition.as_dict().keys()\n        unavailable_elements = list(set(elements) -\n                                    set(self.covalent_radius.keys()))\n        if len(unavailable_elements) > 0:\n            raise ValueError(\"The covalent radius for element {} is not \"\n                             \"available\".format(unavailable_elements))\n        bond_13 = self.get_13_bonds(self.priority_bonds)\n        max_length = [(self.covalent_radius[mol.sites[p[0]].specie.symbol] +\n                       self.covalent_radius[mol.sites[p[1]].specie.symbol]) *\n                      (1 + (self.priority_cap if p in self.priority_bonds\n                            else (self.bond_length_cap if p not in bond_13\n                                  else self.bond_13_cap))) *\n                      (0.1 if (self.ignore_halogen_self_bond and p not in self.priority_bonds and\n                               mol.sites[p[0]].specie.symbol in self.halogen_list and\n                               mol.sites[p[1]].specie.symbol in self.halogen_list)\n                       else 1.0)\n                      for p in all_pairs]\n\n        bonds = [bond\n                 for bond, dist, cap in zip(all_pairs, pair_dists, max_length)\n                 if dist <= cap]\n        return bonds", "docstring": "Find all the bond in a molcule\n\nArgs:\nmol: the molecule. pymatgen Molecule object\n\nReturns:\nList of tuple. Each tuple correspond to a bond represented by the\nid of the two end atoms.", "source": "juraj-google-style"}
{"code": "def classify_coupling(coupling):\n    (lower, upper) = coupling\n    if ((lower is None) and (upper is None)):\n        return CouplingClass.Uncoupled\n    elif ((lower is None) or (upper is None)):\n        return CouplingClass.DirectionalReverse\n    elif ((lower == 0.0) and (upper == 0.0)):\n        return CouplingClass.Inconsistent\n    elif ((lower <= 0.0) and (upper >= 0.0)):\n        return CouplingClass.DirectionalForward\n    elif (abs((lower - upper)) < 1e-06):\n        return CouplingClass.Full\n    else:\n        return CouplingClass.Partial", "docstring": "Return a constant indicating the type of coupling.\n\nDepending on the type of coupling, one of the constants from\n:class:`.CouplingClass` is returned.\n\nArgs:\ncoupling: Tuple of minimum and maximum flux ratio", "source": "codesearchnet"}
{"code": "def __init__(self, option):\n        \n        self.option = option\n        super().__init__('invalid option name: {}'.format(option))", "docstring": "Initialization of instances:\n\nArgs:\noption (str): invalid option name.\n\nAttributes:\noption (str): invalid option name.", "source": "juraj-google-style"}
{"code": "def extend(self, step):\n        \n        self.timesteps.extend(step.timesteps)\n        self.masks.extend(step.masks)\n        self.x.extend(step.x)\n        self.y.extend(step.y)\n        self.i.extend(step.i)\n        self.j.extend(step.j)\n        self.end_time = step.end_time\n        self.times = np.arange(self.start_time, self.end_time + self.step, self.step)\n        self.u = np.concatenate((self.u, step.u))\n        self.v = np.concatenate((self.v, step.v))\n        for attr in self.attributes.keys():\n            if attr in step.attributes.keys():\n                self.attributes[attr].extend(step.attributes[attr])", "docstring": "Adds the data from another STObject to this object.\n\nArgs:\nstep: another STObject being added after the current one in time.", "source": "juraj-google-style"}
{"code": "def getFilepaths(self, filename):\n        \n        return (os.path.join(os.environ['HOME'], filename),\n                os.path.join(self.mackup.mackup_folder, filename))", "docstring": "Get home and mackup filepaths for given file\n\nArgs:\nfilename (str)\n\nReturns:\nhome_filepath, mackup_filepath (str, str)", "source": "juraj-google-style"}
{"code": "def align_segmentation(beat_times, song):\n    \n    try:\n        segment_times, segment_labels = msaf.io.read_references(song)\n    except:\n        return None, None, None\n    segment_times = np.asarray(segment_times)\n\n    \n    segment_intervals = msaf.utils.times_to_intervals(segment_times)\n\n    \n    beat_intervals = np.asarray(zip(beat_times[:-1], beat_times[1:]))\n\n    \n    beat_segment_ids = librosa.util.match_intervals(beat_intervals,\n                                                    segment_intervals)\n\n    segment_beats = []\n    segment_times_out = []\n    segment_labels_out = []\n\n    \n    \n    for i in range(segment_times.shape[0]):\n        hits = np.argwhere(beat_segment_ids == i)\n        if len(hits) > 0 and i < len(segment_intervals) and \\\n                i < len(segment_labels):\n            segment_beats.extend(hits[0])\n            segment_times_out.append(segment_intervals[i, :])\n            segment_labels_out.append(segment_labels[i])\n\n    \n    segment_beats = list(segment_beats)\n    \n    \n\n    \n    \n    segment_times_out = segment_times\n\n    return segment_beats, segment_times_out, segment_labels_out", "docstring": "Load a ground-truth segmentation, and align times to the nearest\ndetected beats.\n\nArguments:\nbeat_times -- array\nsong -- path to the audio file\n\nReturns:\nsegment_beats -- array\nbeat-aligned segment boundaries\n\nsegment_times -- array\ntrue segment times\n\nsegment_labels -- array\nlist of segment labels", "source": "juraj-google-style"}
{"code": "def get_capacity_vol(self, min_voltage=None, max_voltage=None, use_overall_normalization=True):\n    pairs_in_range = self._select_in_voltage_range(min_voltage, max_voltage)\n    normalization_vol = (self.normalization_volume if (use_overall_normalization or (len(pairs_in_range) == 0)) else pairs_in_range[(- 1)].vol_discharge)\n    return (((sum([pair.mAh for pair in pairs_in_range]) / normalization_vol) * 1e+24) / N_A)", "docstring": "Get the volumetric capacity of the electrode.\n\nArgs:\nmin_voltage (float): The minimum allowable voltage for a given\nstep.\nmax_voltage (float): The maximum allowable voltage allowable for a\ngiven step.\nuse_overall_normalization (booL): If False, normalize by the\ndischarged state of only the voltage pairs matching the voltage\ncriteria. if True, use default normalization of the full\nelectrode path.\n\nReturns:\nVolumetric capacity in mAh/cc across the insertion path (a subset\nof the path can be chosen by the optional arguments)", "source": "codesearchnet"}
{"code": "def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(CreateKeyPairResponsePayload, self).read(input_buffer, kmip_version=kmip_version)\n    local_buffer = utils.BytearrayStream(input_buffer.read(self.length))\n    if self.is_tag_next(enums.Tags.PRIVATE_KEY_UNIQUE_IDENTIFIER, local_buffer):\n        self._private_key_unique_identifier = primitives.TextString(tag=enums.Tags.PRIVATE_KEY_UNIQUE_IDENTIFIER)\n        self._private_key_unique_identifier.read(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidKmipEncoding('The CreateKeyPair response payload encoding is missing the private key unique identifier.')\n    if self.is_tag_next(enums.Tags.PUBLIC_KEY_UNIQUE_IDENTIFIER, local_buffer):\n        self._public_key_unique_identifier = primitives.TextString(tag=enums.Tags.PUBLIC_KEY_UNIQUE_IDENTIFIER)\n        self._public_key_unique_identifier.read(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidKmipEncoding('The CreateKeyPair response payload encoding is missing the public key unique identifier.')\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        if self.is_tag_next(enums.Tags.PRIVATE_KEY_TEMPLATE_ATTRIBUTE, local_buffer):\n            self._private_key_template_attribute = objects.TemplateAttribute(tag=enums.Tags.PRIVATE_KEY_TEMPLATE_ATTRIBUTE)\n            self._private_key_template_attribute.read(local_buffer, kmip_version=kmip_version)\n        if self.is_tag_next(enums.Tags.PUBLIC_KEY_TEMPLATE_ATTRIBUTE, local_buffer):\n            self._public_key_template_attribute = objects.TemplateAttribute(tag=enums.Tags.PUBLIC_KEY_TEMPLATE_ATTRIBUTE)\n            self._public_key_template_attribute.read(local_buffer, kmip_version=kmip_version)\n    self.is_oversized(local_buffer)", "docstring": "Read the data encoding the CreateKeyPair response payload and decode it\ninto its constituent parts.\n\nArgs:\ninput_buffer (stream): A data buffer containing encoded object\ndata, supporting a read method.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nInvalidKmipEncoding: Raised if the private key unique identifier or\nthe public key unique identifier is missing from the encoded\npayload.", "source": "codesearchnet"}
{"code": "def PopEventSource(self):\n    try:\n        (_, _, event_source) = heapq.heappop(self._heap)\n    except IndexError:\n        return None\n    return event_source", "docstring": "Pops an event source from the heap.\n\nReturns:\nEventSource: an event source or None on if no event source is available.", "source": "codesearchnet"}
{"code": "def _hexencode(bytestring, insert_spaces=False):\n    _checkString(bytestring, description='byte string')\n    separator = ('' if (not insert_spaces) else ' ')\n    byte_representions = []\n    for c in bytestring:\n        byte_representions.append('{0:02X}'.format(ord(c)))\n    return separator.join(byte_representions).strip()", "docstring": "Convert a byte string to a hex encoded string.\n\nFor example 'J' will return '4A', and ``'\\\\x04'`` will return '04'.\n\nArgs:\nbytestring (str): Can be for example ``'A\\\\x01B\\\\x45'``.\ninsert_spaces (bool): Insert space characters between pair of characters to increase readability.\n\nReturns:\nA string of twice the length, with characters in the range '0' to '9' and 'A' to 'F'.\nThe string will be longer if spaces are inserted.\n\nRaises:\nTypeError, ValueError", "source": "codesearchnet"}
{"code": "def GetTopLevel(self, file_object):\n    \n    try:\n      top_level_object = biplist.readPlist(file_object)\n\n    except (biplist.InvalidPlistException,\n            biplist.NotBinaryPlistException) as exception:\n      raise errors.UnableToParseFile(\n          'Unable to parse plist with error: {0!s}'.format(exception))\n\n    return top_level_object", "docstring": "Returns the deserialized content of a plist as a dictionary object.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object to parse.\n\nReturns:\ndict[str, object]: contents of the plist.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def retrieve_taf(station_icao) -> typing.Tuple[(typing.Union[(str, None)], typing.Union[(str, None)])]:\n    url = _BASE_TAF_URL.format(station=station_icao)\n    with requests.get(url) as resp:\n        if (not resp.ok):\n            return (f'''unable to obtain TAF for station {station_icao}\nGot to \"http:\n        return (None, resp.content.decode().split('\\n')[1])", "docstring": "Retrieves a TAF string from an online database\n\nArgs:\nstation_icao: ICAO of the station\n\nReturns:\ntuple of error, metar_str", "source": "codesearchnet"}
{"code": "def get_path_to_datafile(path):\n    if runfiles:\n        r = runfiles.Create()\n        new_fpath = r.Rlocation(_os.path.abspath(_os.path.join('tensorflow', path)))\n        if new_fpath is not None and _os.path.exists(new_fpath):\n            return new_fpath\n    old_filepath = _os.path.join(_os.path.dirname(_inspect.getfile(_sys._getframe(1))), path)\n    return old_filepath", "docstring": "Get the path to the specified file in the data dependencies.\n\nThe path is relative to tensorflow/\n\nArgs:\npath: a string resource path relative to tensorflow/\n\nReturns:\nThe path to the specified file present in the data attribute of py_test\nor py_binary.\n\nRaises:\nIOError: If the path is not found, or the resource can't be opened.", "source": "github-repos"}
{"code": "def publish(msg=\"checkpoint: publish package\"):\n    \n    test = check()\n    if test.succeeded:\n        \n        \n        sdist = local(\"python setup.py sdist\")\n        if sdist.succeeded:\n            build = local(\n                'python setup.py build && python setup.py bdist_egg')\n            if build.succeeded:\n                upload = local(\"twine upload dist/*\")\n                if upload.succeeded:\n                    tag()", "docstring": "Deploy the app to PYPI.\n\nArgs:\nmsg (str, optional): Description", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, level_start_index=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):\n    residual = hidden_states\n    hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, position_embeddings=position_embeddings, output_attentions=output_attentions)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    second_residual = hidden_states\n    cross_attn_weights = None\n    hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = second_residual + hidden_states\n    hidden_states = self.encoder_attn_layer_norm(hidden_states)\n    residual = hidden_states\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (self_attn_weights, cross_attn_weights)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`):\nInput to the layer of shape `(batch, seq_len, embed_dim)`.\nposition_embeddings (`torch.FloatTensor`, *optional*):\nPosition embeddings that are added to the queries and keys in the self-attention layer.\nreference_points (`torch.FloatTensor`, *optional*):\nReference points.\nspatial_shapes (`torch.LongTensor`, *optional*):\nSpatial shapes.\nlevel_start_index (`torch.LongTensor`, *optional*):\nLevel start index.\nencoder_hidden_states (`torch.FloatTensor`):\ncross attention input to the layer of shape `(batch, seq_len, embed_dim)`\nencoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size\n`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative\nvalues.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def get_model_indexes(model):\n    indexes = []\n    for index in get_index_names():\n        for app_model in get_index_models(index):\n            if (app_model == model):\n                indexes.append(index)\n    return indexes", "docstring": "Return list of all indexes in which a model is configured.\n\nA model may be configured to appear in multiple indexes. This function\nwill return the names of the indexes as a list of strings. This is\nuseful if you want to know which indexes need updating when a model\nis saved.\n\nArgs:\nmodel: a Django model class.", "source": "codesearchnet"}
{"code": "def _add_row_partitions(self, flat_values, validate=False):\n    if self.row_partitions:\n        if validate:\n            flat_values = self._validate_flat_values(flat_values)\n        return ragged_tensor.RaggedTensor._from_nested_row_partitions(flat_values, self.row_partitions, validate=False)\n    else:\n        return flat_values", "docstring": "Add row partitions to flat_values, if necessary.\n\nIf the shape is truly ragged, then this adds the row_partitions.\n\nThe shape is dense, then this just returns flat_values.\n\nArgs:\nflat_values: the flat_values of a ragged tensor with this shape, or a\ndense tensor with this shape.\nvalidate: validate the flat_values have the right first dimension.\n\nReturns:\nflat_values reshaped to have row_partitions.", "source": "github-repos"}
{"code": "def symbol_top(body_output, targets, model_hparams, vocab_size):\n  \n  del targets  \n  if model_hparams.shared_embedding_and_softmax_weights:\n    scope_name = \"shared\"\n    reuse = tf.AUTO_REUSE\n  else:\n    scope_name = \"softmax\"\n    reuse = False\n  with tf.variable_scope(scope_name, reuse=reuse):\n    body_output_shape = common_layers.shape_list(body_output)\n    var = get_weights(model_hparams, vocab_size, body_output_shape[-1])\n    if (model_hparams.factored_logits and\n        model_hparams.mode == tf.estimator.ModeKeys.TRAIN):\n      \n      body_output = tf.expand_dims(body_output, 3)\n      return common_layers.FactoredTensor(body_output, var)\n    else:\n      body_output = tf.reshape(body_output, [-1, body_output_shape[-1]])\n      logits = tf.matmul(body_output, var, transpose_b=True)\n      return tf.reshape(logits,\n                        body_output_shape[:-1] + [1, vocab_size])", "docstring": "Generate logits.\n\nArgs:\nbody_output: A Tensor with shape\n[batch, p0, p1, model_hparams.hidden_size].\ntargets: Unused.\nmodel_hparams: HParams, model hyperparmeters.\nvocab_size: int, vocabulary size.\n\nReturns:\nlogits: A Tensor with shape  [batch, p0, p1, ?, vocab_size].", "source": "juraj-google-style"}
{"code": "def _get_event_id(object_type: str) -> str:\n    \n    key = _keys.event_counter(object_type)\n    DB.watch(key, pipeline=True)\n    count = DB.get_value(key)\n    DB.increment(key)\n    DB.execute()\n    if count is None:\n        count = 0\n    return '{}_event_{:08d}'.format(object_type, int(count))", "docstring": "Return an event key for the event on the object type.\n\nThis must be a unique event id for the object.\n\nArgs:\nobject_type (str): Type of object\n\nReturns:\nstr, event id", "source": "juraj-google-style"}
{"code": "def convert(self, point):\n    (x, y) = point\n    (x1, y1) = ((x - self.x_offset), (y - self.y_offset))\n    logger.debug('converted {} {} ==> {} {}'.format(x, y, x1, y1))\n    return (x1, y1)", "docstring": "Convert a point from one coordinate system to another.\n\nArgs:\npoint: tuple(int x, int y)\nThe point in the original coordinate system.\n\nReturns:\nconverted_point: tuple(int x, int y)\nThe point in the new coordinate system.\n\nExample: convert coordinate from original image into a pixel location\nwithin a cutout image.\n\n@rtype: list(float,float)", "source": "codesearchnet"}
{"code": "def users_setPresence(self, *, presence: str, **kwargs) -> SlackResponse:\n        \n        kwargs.update({\"presence\": presence})\n        return self.api_call(\"users.setPresence\", json=kwargs)", "docstring": "Manually sets user presence.\n\nArgs:\npresence (str): Either 'auto' or 'away'.", "source": "juraj-google-style"}
{"code": "def convert_relu(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting relu ...')\n\n    if names == 'short':\n        tf_name = 'RELU' + random_string(4)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    relu = keras.layers.Activation('relu', name=tf_name)\n    layers[scope_name] = relu(layers[inputs[0]])", "docstring": "Convert relu layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def aggregate_and_return_name_for_input(self, out_graphdef):\n    flattened = self.flatten_nodes()\n    if self.aggregation == OpHint.AGGREGATE_FIRST or self.aggregation == OpHint.AGGREGATE_LAST:\n        assert len(flattened) == 1\n    if len(flattened) == 1 and self.aggregation != OpHint.AGGREGATE_STACK:\n        return _tensor_name_base(flattened[0].name)\n    else:\n        new_node = _node_def_pb2.NodeDef()\n        new_node.op = 'Pack'\n        new_node.name = 'OpHintStack-%s' % flattened[0].name\n        new_node.attr['N'].i = len(flattened)\n        new_node.attr['T'].type = flattened[0].attr['T'].type\n        for discrete in flattened:\n            new_node.input.append(_tensor_name_base(discrete.name))\n        out_graphdef.node.extend([new_node])\n        return new_node.name", "docstring": "This adds the nodes to out_graphdef and returns an aggregated output.\n\nIn particular, if you have 4 inputs to a hint stub, this will be the\nnode that you can use as an output. I.e. you have 4 timesteps from a\nstatic rnn, then a fused UnidirectionalLSTM will expect 1 input with\nall 4 time steps. So here we make a pack and return the output name of\nthat pack.\n\nArgs:\nout_graphdef: A graphdef that is ready to have this input added.\n\nReturns:\nThe name of a pack that aggregates this node.", "source": "github-repos"}
{"code": "def get(self, name):\n    return self.prepare_model(self.client.api.inspect_image(name))", "docstring": "Gets an image.\n\nArgs:\nname (str): The name of the image.\n\nReturns:\n(:py:class:`Image`): The image.\n\nRaises:\n:py:class:`docker.errors.ImageNotFound`\nIf the image does not exist.\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def _parse_rd(self, config):\n        \n        match = RD_RE.search(config)\n        if match:\n            value = match.group('value')\n        else:\n            value = match\n        return dict(rd=value)", "docstring": "_parse_rd scans the provided configuration block and extracts\nthe vrf rd. The return dict is intended to be merged into the response\ndict.\n\nArgs:\nconfig (str): The vrf configuration block from the nodes running\nconfiguration\n\nReturns:\ndict: resource dict attribute", "source": "juraj-google-style"}
{"code": "def _build_mac_signature_key_information(self, value):\n    if (value is None):\n        return None\n    if (not isinstance(value, dict)):\n        raise TypeError('MAC/signature key information must be a dictionary.')\n    cryptographic_parameters = value.get('cryptographic_parameters')\n    if cryptographic_parameters:\n        cryptographic_parameters = self._build_cryptographic_parameters(cryptographic_parameters)\n    mac_signature_key_information = cobjects.MACSignatureKeyInformation(unique_identifier=value.get('unique_identifier'), cryptographic_parameters=cryptographic_parameters)\n    return mac_signature_key_information", "docstring": "Build an MACSignatureKeyInformation struct from a dictionary.\n\nArgs:\nvalue (dict): A dictionary containing the key/value pairs for a\nMACSignatureKeyInformation struct.\n\nReturns:\nMACSignatureInformation: a MACSignatureKeyInformation struct\n\nRaises:\nTypeError: if the input argument is invalid", "source": "codesearchnet"}
{"code": "def prefix(self, imod: YangIdentifier, mid: ModuleId) -> YangIdentifier:\n    try:\n        did = (imod, self.implement[imod])\n    except KeyError:\n        raise ModuleNotImplemented(imod) from None\n    try:\n        pmap = self.modules[mid].prefix_map\n    except KeyError:\n        raise ModuleNotRegistered(*mid) from None\n    for p in pmap:\n        if (pmap[p] == did):\n            return p\n    raise ModuleNotImported(imod, mid)", "docstring": "Return the prefix corresponding to an implemented module.\n\nArgs:\nimod: Name of an implemented module.\nmid: Identifier of the context module.\n\nRaises:\nModuleNotImplemented: If `imod` is not implemented.\nModuleNotRegistered: If `mid` is not registered in YANG library.\nModuleNotImported: If `imod` is not imported in `mid`.", "source": "codesearchnet"}
{"code": "def delete(self, key):\n    \n    path = self.object_path(key)\n    if os.path.exists(path):\n      os.remove(path)", "docstring": "Removes the object named by `key`.\n\nArgs:\nkey: Key naming the object to remove.", "source": "juraj-google-style"}
{"code": "def _import_and_bind(self, imp):\n    \n    \n    \n    with self.block.alloc_temp() as mod, \\\n        self.block.alloc_temp('[]*πg.Object') as mod_slice:\n      self.writer.write_checked_call2(\n          mod_slice, 'πg.ImportModule(πF, {})', util.go_str(imp.name))\n\n      \n      for binding in imp.bindings:\n        if binding.bind_type == imputil.Import.MODULE:\n          self.writer.write('{} = {}[{}]'.format(\n              mod.name, mod_slice.expr, binding.value))\n          self.block.bind_var(self.writer, binding.alias, mod.expr)\n        else:\n          self.writer.write('{} = {}[{}]'.format(\n              mod.name, mod_slice.expr, imp.name.count('.')))\n          \n          with self.block.alloc_temp() as member:\n            self.writer.write_checked_call2(\n                member, 'πg.GetAttr(πF, {}, {}, nil)',\n                mod.expr, self.block.root.intern(binding.value))\n            self.block.bind_var(self.writer, binding.alias, member.expr)", "docstring": "Generates code that imports a module and binds it to a variable.\n\nArgs:\nimp: Import object representing an import of the form \"import x.y.z\" or\n\"from x.y import z\". Expects only a single binding.", "source": "juraj-google-style"}
{"code": "def from_comm(cls, pub):\n        \n        filename = None\n        if pub.b64_data:\n            filename = cls._save_to_unique_filename(pub)\n\n        return cls(\n            title=pub.title,\n            author=pub.author,\n            pub_year=pub.pub_year,\n            isbn=pub.isbn,\n            urnnbn=pub.urnnbn,\n            uuid=pub.uuid,\n            aleph_id=pub.aleph_id,\n            producent_id=pub.producent_id,\n            is_public=pub.is_public,\n            filename=pub.filename,\n            is_periodical=pub.is_periodical,\n            path=pub.path,\n\n            file_pointer=filename\n        )", "docstring": "Convert communication namedtuple to this class.\n\nArgs:\npub (obj): :class:`.Publication` instance which will be converted.\n\nReturns:\nobj: :class:`DBPublication` instance.", "source": "juraj-google-style"}
{"code": "async def setup_round_robin_points(self, match_win: float = None, match_tie: float = None, game_win: float = None, game_tie: float = None):\n        \n        params = {}\n        if match_win is not None:\n            params['rr_pts_for_match_win'] = match_win\n        if match_win is not None:\n            params['rr_pts_for_match_tie'] = match_tie\n        if match_win is not None:\n            params['rr_pts_for_game_win'] = game_win\n        if match_win is not None:\n            params['rr_pts_for_game_tie'] = game_tie\n        assert_or_raise(len(params) > 0, ValueError, 'At least one of the points must be given')\n        await self.update(**params)", "docstring": "|methcoro|\n\nArgs:\nmatch_win\nmatch_tie\ngame_win\ngame_tie\n\nRaises:\nAPIException", "source": "juraj-google-style"}
{"code": "def value_instance_to_pytd_type(self, node, v, instance, seen, view):\n    if abstract_utils.is_recursive_annotation(v):\n        return pytd.LateType(v.unflatten_expr() if self._detailed else v.expr)\n    elif isinstance(v, abstract.Union):\n        return pytd.UnionType(tuple((self.value_instance_to_pytd_type(node, t, instance, seen, view) for t in v.options)))\n    elif isinstance(v, abstract.AnnotationContainer):\n        return self.value_instance_to_pytd_type(node, v.base_cls, instance, seen, view)\n    elif isinstance(v, abstract.LiteralClass):\n        if isinstance(v.value, abstract.Instance) and v.value.cls.is_enum:\n            typ = pytd_utils.NamedTypeWithModule(v.value.cls.official_name or v.value.cls.name, v.value.cls.module)\n            value = pytd.Constant(v.value.name, typ)\n        elif isinstance(v.value.pyval, (str, bytes)):\n            value = repr(v.value.pyval)\n        elif isinstance(v.value.pyval, bool):\n            value = self.ctx.loader.lookup_pytd('builtins', v.value.pyval)\n        else:\n            assert isinstance(v.value.pyval, int), v.value.pyval\n            value = v.value.pyval\n        return pytd.Literal(value)\n    elif isinstance(v, typed_dict.TypedDictClass):\n        return pytd.NamedType(v.name)\n    elif isinstance(v, fiddle_overlay.BuildableType):\n        param = self.value_instance_to_pytd_type(node, v.underlying, None, seen, view)\n        return pytd.GenericType(base_type=pytd.NamedType(f'fiddle.{v.fiddle_type_name}'), parameters=(param,))\n    elif isinstance(v, abstract.Class):\n        if not self._detailed and v.official_name is None:\n            return pytd.AnythingType()\n        if seen is None:\n            seen = frozenset()\n        if instance in seen:\n            type_params = ()\n        else:\n            type_params = tuple((t.name for t in v.template))\n        if instance is not None:\n            seen |= {instance}\n        type_arguments = self._value_to_parameter_types(node, v, instance, type_params, seen, view)\n        base = pytd_utils.NamedTypeWithModule(v.official_name or v.name, v.module)\n        if self._is_tuple(v, instance):\n            homogeneous = False\n        elif v.full_name == 'typing.Callable':\n            homogeneous = not isinstance(v, abstract.CallableClass)\n        else:\n            homogeneous = len(type_arguments) == 1\n        return pytd_utils.MakeClassOrContainerType(base, type_arguments, homogeneous)\n    elif isinstance(v, abstract.TYPE_VARIABLE_TYPES):\n        return self._type_variable_to_def(node, v, v.name)\n    elif isinstance(v, typing_overlay.Never):\n        return pytd.NothingType()\n    elif isinstance(v, abstract.Concatenate):\n        params = tuple((self.value_instance_to_pytd_type(node, t, instance, seen, view) for t in v.args + [v.paramspec]))\n        return pytd.Concatenate(pytd.NamedType('typing.Concatenate'), parameters=params)\n    else:\n        log.info('Using Any for instance of %s', v.name)\n        return pytd.AnythingType()", "docstring": "Get the PyTD type an instance of this object would have.\n\nArgs:\nnode: The node.\nv: The object.\ninstance: The instance.\nseen: Already seen instances.\nview: A Variable -> binding map.\n\nReturns:\nA PyTD type.", "source": "github-repos"}
{"code": "def bresenham(x1, y1, x2, y2):\n    points = []\n    issteep = (abs((y2 - y1)) > abs((x2 - x1)))\n    if issteep:\n        (x1, y1) = (y1, x1)\n        (x2, y2) = (y2, x2)\n    rev = False\n    if (x1 > x2):\n        (x1, x2) = (x2, x1)\n        (y1, y2) = (y2, y1)\n        rev = True\n    deltax = (x2 - x1)\n    deltay = abs((y2 - y1))\n    error = int((deltax / 2))\n    y = y1\n    ystep = None\n    if (y1 < y2):\n        ystep = 1\n    else:\n        ystep = (- 1)\n    for x in range(x1, (x2 + 1)):\n        if issteep:\n            points.append((y, x))\n        else:\n            points.append((x, y))\n        error -= deltay\n        if (error < 0):\n            y += ystep\n            error += deltax\n    if rev:\n        points.reverse()\n    return points", "docstring": "Return a list of points in a bresenham line.\n\nImplementation hastily copied from RogueBasin.\n\nReturns:\nList[Tuple[int, int]]: A list of (x, y) points,\nincluding both the start and end-points.", "source": "codesearchnet"}
{"code": "def ping(self, url, endpoint=''):\n        \n        r = self.get_url(url + \"/\" + endpoint)\n        return r.status_code", "docstring": "Ping the server to make sure that you can access the base URL.\n\nArguments:\nNone\nReturns:\n`boolean` Successful access of server (or status code)", "source": "juraj-google-style"}
{"code": "def _AddDSATargeting(client, ad_group_id, label_name):\n  \n  \n  ad_group_criterion_service = client.GetService('AdGroupCriterionService',\n                                                 version='v201809')\n\n  \n  operation = {\n      'operand': {\n          'xsi_type': 'BiddableAdGroupCriterion',\n          'adGroupId': ad_group_id,\n          \n          'criterion': {\n              'xsi_type': 'Webpage',\n              'parameter': {\n                  'criterionName': 'Test criterion',\n                  \n                  'conditions': [{\n                      'operand': 'CUSTOM_LABEL',\n                      'argument': label_name\n                  }],\n              }\n          },\n          \n          'biddingStrategyConfiguration': {\n              'bids': [{\n                  'xsi_type': 'CpcBid',\n                  'bid': {\n                      'microAmount': 1500000\n                  }\n              }]\n          }\n      },\n      'operator': 'ADD'\n  }\n\n  criterion = ad_group_criterion_service.mutate([operation])['value'][0]\n  print 'Web page criterion with ID \"%d\" and status \"%s\" was created.' % (\n      criterion['criterion']['id'], criterion['userStatus'])\n  return criterion", "docstring": "Set custom targeting for the page feed URLs based on a list of labels.\n\nArgs:\nclient: an AdWordsClient instance.\nad_group_id: a str AdGroup ID.\nlabel_name: a str label name.\n\nReturns:\nA suds.sudsobject.Object representing the newly created webpage criterion.", "source": "juraj-google-style"}
{"code": "def DeregisterSourceType(cls, source_type_class):\n    \n    if source_type_class.TYPE_INDICATOR not in cls._source_type_classes:\n      raise KeyError(\n          'Source type not set for type: {0:s}.'.format(\n              source_type_class.TYPE_INDICATOR))\n\n    del cls._source_type_classes[source_type_class.TYPE_INDICATOR]", "docstring": "Deregisters a source type.\n\nSource types are identified based on their type indicator.\n\nArgs:\nsource_type_class (type): source type.\n\nRaises:\nKeyError: if a source type is not set for the corresponding type\nindicator.", "source": "juraj-google-style"}
{"code": "def GetDataByPath(self, path):\n    (_, path_data) = self._paths.get(path, (None, None))\n    return path_data", "docstring": "Retrieves the data associated to a path.\n\nArgs:\npath (str): path of the file entry.\n\nReturns:\nbytes: data or None if not available.", "source": "codesearchnet"}
{"code": "def on_modified(self, event):\n    if (not self._event_error):\n        self.logger.info(u'Change detected from an edit on: %s', event.src_path)\n        self.compile_dependencies(event.src_path)", "docstring": "Called when a file or directory is modified.\n\nArgs:\nevent: Watchdog event, ``watchdog.events.DirModifiedEvent`` or\n``watchdog.events.FileModifiedEvent``.", "source": "codesearchnet"}
{"code": "def skip(self, count=1):\n    if self.closed():\n        raise ValueError('Attempt to call skip() on a closed Queryable.')\n    count = max(0, count)\n    if (count == 0):\n        return self\n    if hasattr(self._iterable, '__getitem__'):\n        try:\n            stop = len(self._iterable)\n            return self._create(self._generate_optimized_skip_result(count, stop))\n        except TypeError:\n            pass\n    return self._create(self._generate_skip_result(count))", "docstring": "Skip the first count contiguous elements of the source sequence.\n\nIf the source sequence contains fewer than count elements returns an\nempty sequence and does not raise an exception.\n\nNote: This method uses deferred execution.\n\nArgs:\ncount: The number of elements to skip from the beginning of the\nsequence. If omitted defaults to one. If count is less than one\nthe result sequence will be empty.\n\nReturns:\nA Queryable over the elements of source excluding the first count\nelements.\n\nRaises:\nValueError: If the Queryable is closed().", "source": "codesearchnet"}
{"code": "def enum(cls):\n    \n\n    assert cls.__bases__ == (object,)\n\n    d = dict(cls.__dict__)\n    new_type = type(cls.__name__, (int,), d)\n    new_type.__module__ = cls.__module__\n\n    map_ = {}\n    for key, value in iteritems(d):\n        if key.upper() == key and isinstance(value, integer_types):\n            value_instance = new_type(value)\n            setattr(new_type, key, value_instance)\n            map_[value] = key\n\n    def str_(self):\n        if self in map_:\n            return \"%s.%s\" % (type(self).__name__, map_[self])\n        return \"%d\" % int(self)\n\n    def repr_(self):\n        if self in map_:\n            return \"<%s.%s: %d>\" % (type(self).__name__, map_[self], int(self))\n        return \"%d\" % int(self)\n\n    setattr(new_type, \"__repr__\", repr_)\n    setattr(new_type, \"__str__\", str_)\n\n    return new_type", "docstring": "A decorator for creating an int enum class.\n\nMakes the values a subclass of the type and implements repr/str.\nThe new class will be a subclass of int.\n\nArgs:\ncls (type): The class to convert to an enum\n\nReturns:\ntype: A new class\n\n::\n\n@enum\nclass Foo(object):\nFOO = 1\nBAR = 2", "source": "juraj-google-style"}
{"code": "def _callable_func(self, func, axis, *args, **kwargs):\n        \n\n        def callable_apply_builder(df, axis=0):\n            if not axis:\n                df.index = index\n                df.columns = pandas.RangeIndex(len(df.columns))\n            else:\n                df.columns = index\n                df.index = pandas.RangeIndex(len(df.index))\n            result = df.apply(func, axis=axis, *args, **kwargs)\n            return result\n\n        index = self.index if not axis else self.columns\n        func_prepared = self._build_mapreduce_func(callable_apply_builder, axis=axis)\n        result_data = self._map_across_full_axis(axis, func_prepared)\n        return self._post_process_apply(result_data, axis)", "docstring": "Apply callable functions across given axis.\n\nArgs:\nfunc: The functions to apply.\naxis: Target axis to apply the function along.\n\nReturns:\nA new PandasQueryCompiler.", "source": "juraj-google-style"}
{"code": "def _read_hopopt_options(self, length):\n    counter = 0\n    optkind = list()\n    options = dict()\n    while (counter < length):\n        code = self._read_unpack(1)\n        if (not code):\n            break\n        (abbr, desc) = _HOPOPT_OPT.get(code, ('none', 'Unassigned'))\n        data = _HOPOPT_PROC(abbr)(self, code, desc=desc)\n        enum = _OPT_TYPE.get(code)\n        counter += data['length']\n        if (enum in optkind):\n            if isinstance(options[abbr], tuple):\n                options[abbr] += (Info(data),)\n            else:\n                options[abbr] = (Info(options[abbr]), Info(data))\n        else:\n            optkind.append(enum)\n            options[abbr] = data\n    if (counter != length):\n        raise ProtocolError(f'{self.alias}: invalid format')\n    return (tuple(optkind), options)", "docstring": "Read HOPOPT options.\n\nPositional arguments:\n* length -- int, length of options\n\nReturns:\n* dict -- extracted HOPOPT options", "source": "codesearchnet"}
{"code": "def bench(image, thread_count):\n    threads = [threading.Thread(target=(lambda : encoder.encode_png(image))) for _ in xrange(thread_count)]\n    start_time = datetime.datetime.now()\n    for thread in threads:\n        thread.start()\n    for thread in threads:\n        thread.join()\n    end_time = datetime.datetime.now()\n    delta = (end_time - start_time).total_seconds()\n    return delta", "docstring": "Encode `image` to PNG on `thread_count` threads in parallel.\n\nReturns:\nA `float` representing number of seconds that it takes all threads\nto finish encoding `image`.", "source": "codesearchnet"}
{"code": "def _get_manager(cluster_info, host, executor_id):\n  \n  for node in cluster_info:\n    if node['host'] == host and node['executor_id'] == executor_id:\n      addr = node['addr']\n      authkey = node['authkey']\n      TFSparkNode.mgr = TFManager.connect(addr, authkey)\n      break\n\n  if TFSparkNode.mgr is None:\n    msg = \"No TFManager found on this node, please ensure that:\\n\" + \\\n          \"1. Spark num_executors matches TensorFlow cluster_size\\n\" + \\\n          \"2. Spark cores/tasks per executor is 1.\\n\" + \\\n          \"3. Spark dynamic allocation is disabled.\"\n    raise Exception(msg)\n\n  logging.info(\"Connected to TFSparkNode.mgr on {0}, executor={1}, state={2}\".format(host, executor_id, str(TFSparkNode.mgr.get('state'))))\n  return TFSparkNode.mgr", "docstring": "Returns this executor's \"singleton\" instance of the multiprocessing.Manager, reconnecting per python-worker if needed.\n\nArgs:\n:cluster_info: cluster node reservations\n:host: host IP address\n:executor_id: unique id per executor (created during initial call to run())\n\nReturns:\nTFManager instance for this executor/python-worker", "source": "juraj-google-style"}
{"code": "def output_mask(self):\n    output = self.output\n    if isinstance(output, list):\n        return [getattr(x, '_keras_mask', None) for x in output]\n    else:\n        return getattr(output, '_keras_mask', None)", "docstring": "Retrieves the output mask tensor(s) of a layer.\n\nOnly applicable if the layer has exactly one inbound node,\ni.e. if it is connected to one incoming layer.\n\nReturns:\nOutput mask tensor (potentially None) or list of output\nmask tensors.\n\nRaises:\nAttributeError: if the layer is connected to\nmore than one incoming layers.", "source": "github-repos"}
{"code": "def _get_config_instance(group_or_term, session, **kwargs):\n    \n    path = group_or_term._get_path()\n    cached = group_or_term._top._cached_configs.get(path)\n    if cached:\n        config = cached\n        created = False\n    else:\n        \n        config, created = get_or_create(session, Config, **kwargs)\n    return config, created", "docstring": "Finds appropriate config instance and returns it.\n\nArgs:\ngroup_or_term (Group or Term):\nsession (Sqlalchemy session):\nkwargs (dict): kwargs to pass to get_or_create.\n\nReturns:\ntuple of (Config, bool):", "source": "juraj-google-style"}
{"code": "def __init__(self, conf_path=ZEO_CLIENT_PATH, project_key=PROJECT_KEY):\n        \n        super(self.__class__, self).__init__(\n            conf_path=conf_path,\n            project_key=project_key\n        )\n\n        \n        self.name_db_key = \"name_db\"\n        self.name_db = self._get_key_or_create(self.name_db_key)\n\n        \n        self.aleph_id_db_key = \"aleph_id_db\"\n        self.aleph_id_db = self._get_key_or_create(self.aleph_id_db_key)\n\n        \n        self.issn_db_key = \"issn_db\"\n        self.issn_db = self._get_key_or_create(self.issn_db_key)\n\n        \n        self.path_db_key = \"path_db\"\n        self.path_db = self._get_key_or_create(self.path_db_key)\n\n        \n        self.parent_db_key = \"parent_db\"\n        self.parent_db = self._get_key_or_create(self.parent_db_key)", "docstring": "Constructor.\n\nArgs:\nconf_path (str): Path to the ZEO configuration file. Default\n:attr:`~storage.settings.ZEO_CLIENT_PATH`.\nproject_key (str): Project key, which is used for lookups into ZEO.\nDefault :attr:`~storage.settings.TREE_PROJECT_KEY`.", "source": "juraj-google-style"}
{"code": "def _get_connection(self, conn_or_int_id):\n    key = conn_or_int_id\n    if isinstance(key, str):\n        table = self._int_connections\n    elif isinstance(key, int):\n        table = self._connections\n    else:\n        return None\n    try:\n        data = table[key]\n    except KeyError:\n        return None\n    return data", "docstring": "Get the data for a connection by either conn_id or internal_id\n\nArgs:\nconn_or_int_id (int, string): The external integer connection id or\nand internal string connection id\n\nReturns:\ndict: The context data associated with that connection or None if it cannot\nbe found.\n\nRaises:\nArgumentError: When the key is not found in the list of active connections\nor is invalid.", "source": "codesearchnet"}
{"code": "def WriteTimestamp(timestamp, filename):\n    if timestamp is None:\n        return True\n    timestamp_dir = os.path.dirname(filename)\n    filedesc, temp_filename = tempfile.mkstemp(prefix='nsscache-update-', dir=timestamp_dir)\n    time_string = time.strftime('%Y-%m-%dT%H:%M:%SZ', timestamp)\n    try:\n        os.write(filedesc, b'%s\\n' % time_string.encode())\n        os.fsync(filedesc)\n        os.close(filedesc)\n    except OSError:\n        os.unlink(temp_filename)\n        logging.warning('writing timestamp failed!')\n        return False\n    os.chmod(temp_filename, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)\n    os.rename(temp_filename, filename)\n    logging.debug('wrote timestamp %s to file %r', time_string, filename)\n    return True", "docstring": "Write a given timestamp out to a file, converting to the ISO-8601\nformat.\n\nWe convert internal timestamp format (epoch) to ISO-8601 format, i.e.\nYYYY-MM-DDThh:mm:ssZ which is basically UTC time, then write it out to a\nfile.\n\nArgs:\ntimestamp: A struct time.struct_time or time tuple.\nfilename: A String naming the file to write to.\n\nReturns:\nA boolean indicating success of write.", "source": "github-repos"}
{"code": "def finish(self, end='\\n', dirty=False):\n        \n\n        if not dirty:\n            self.end_time = datetime.now()\n            self.update(self.max_value, force=True)\n\n        StdRedirectMixin.finish(self, end=end)\n        ResizableMixin.finish(self)\n        ProgressBarBase.finish(self)", "docstring": "Puts the ProgressBar bar in the finished state.\n\nAlso flushes and disables output buffering if this was the last\nprogressbar running.\n\nArgs:\nend (str): The string to end the progressbar with, defaults to a\nnewline\ndirty (bool): When True the progressbar kept the current state and\nwon't be set to 100 percent", "source": "juraj-google-style"}
{"code": "def _init_from_args(self, queue=None, enqueue_ops=None, close_op=None, cancel_op=None, queue_closed_exception_types=None):\n    if not queue or not enqueue_ops:\n        raise ValueError('Must provide queue and enqueue_ops.')\n    self._queue = queue\n    self._enqueue_ops = enqueue_ops\n    self._close_op = close_op\n    self._cancel_op = cancel_op\n    if queue_closed_exception_types is not None:\n        if not isinstance(queue_closed_exception_types, tuple) or not queue_closed_exception_types or (not all((issubclass(t, errors.OpError) for t in queue_closed_exception_types))):\n            raise TypeError('queue_closed_exception_types, when provided, must be a tuple of tf.error types, but saw: %s' % queue_closed_exception_types)\n    self._queue_closed_exception_types = queue_closed_exception_types\n    if self._close_op is None:\n        self._close_op = self._queue.close()\n    if self._cancel_op is None:\n        self._cancel_op = self._queue.close(cancel_pending_enqueues=True)\n    if not self._queue_closed_exception_types:\n        self._queue_closed_exception_types = (errors.OutOfRangeError,)\n    else:\n        self._queue_closed_exception_types = tuple(self._queue_closed_exception_types)", "docstring": "Create a QueueRunner from arguments.\n\nArgs:\nqueue: A `Queue`.\nenqueue_ops: List of enqueue ops to run in threads later.\nclose_op: Op to close the queue. Pending enqueue ops are preserved.\ncancel_op: Op to close the queue and cancel pending enqueue ops.\nqueue_closed_exception_types: Tuple of exception types, which indicate\nthe queue has been safely closed.\n\nRaises:\nValueError: If `queue` or `enqueue_ops` are not provided when not\nrestoring from `queue_runner_def`.\nTypeError: If `queue_closed_exception_types` is provided, but is not\na non-empty tuple of error types (subclasses of `tf.errors.OpError`).", "source": "github-repos"}
{"code": "def _get_python_exe_version(python_exe: list[str]):\n    try:\n        python_exe_version = subprocess.check_output(python_exe + ['-V'], stderr=subprocess.STDOUT).decode()\n    except (subprocess.CalledProcessError, FileNotFoundError):\n        return None\n    return _parse_exe_version_string(python_exe_version)", "docstring": "Determine the major and minor version of given Python executable.\n\nArguments:\npython_exe: absolute path to the Python executable\n\nReturns:\nVersion as (major, minor) tuple, or None if it could not be determined.", "source": "github-repos"}
{"code": "def add(self, term):\n        \n        if isinstance(term, Conjunction):\n            for term_ in term.terms:\n                self.add(term_)\n        elif isinstance(term, Term):\n            self._terms.append(term)\n        else:\n            raise TypeError('Not a Term or Conjunction')", "docstring": "Add a term to the conjunction.\n\nArgs:\nterm (:class:`Term`, :class:`Conjunction`): term to add;\nif a :class:`Conjunction`, all of its terms are added\nto the current conjunction.\nRaises:\n:class:`TypeError`: when *term* is an invalid type", "source": "juraj-google-style"}
{"code": "def delete(self, *names: str, pipeline=False):\n    if pipeline:\n        self._pipeline.delete(*names)\n    else:\n        self._db.delete(*names)", "docstring": "Delete one or more keys specified by names.\n\nArgs:\nnames (str): Names of keys to delete\npipeline (bool): True, start a transaction block. Default false.", "source": "codesearchnet"}
{"code": "def _extract_cell_info(self, structure, site_idx, sites, targets, voro, compute_adj_neighbors=False):\n    all_vertices = voro.vertices\n    center_coords = sites[site_idx].coords\n    results = {}\n    for (nn, vind) in voro.ridge_dict.items():\n        if (site_idx in nn):\n            other_site = (nn[0] if (nn[1] == site_idx) else nn[1])\n            if ((- 1) in vind):\n                if self.allow_pathological:\n                    continue\n                else:\n                    raise RuntimeError('This structure is pathological, infinite vertex in the voronoi construction')\n            facets = [all_vertices[i] for i in vind]\n            angle = solid_angle(center_coords, facets)\n            volume = 0\n            for (j, k) in zip(vind[1:], vind[2:]):\n                volume += vol_tetra(center_coords, all_vertices[vind[0]], all_vertices[j], all_vertices[k])\n            face_dist = (np.linalg.norm((center_coords - sites[other_site].coords)) / 2)\n            face_area = ((3 * volume) / face_dist)\n            normal = np.subtract(sites[other_site].coords, center_coords)\n            normal /= np.linalg.norm(normal)\n            results[other_site] = {'site': sites[other_site], 'normal': normal, 'solid_angle': angle, 'volume': volume, 'face_dist': face_dist, 'area': face_area, 'n_verts': len(vind)}\n            if compute_adj_neighbors:\n                results[other_site]['verts'] = vind\n    resultweighted = {}\n    for (nn_index, nstats) in results.items():\n        nn = nstats['site']\n        if nn.is_ordered:\n            if (nn.specie in targets):\n                resultweighted[nn_index] = nstats\n        else:\n            for disordered_sp in nn.species.keys():\n                if (disordered_sp in targets):\n                    resultweighted[nn_index] = nstats\n    if compute_adj_neighbors:\n        adj_neighbors = dict(((i, []) for i in resultweighted.keys()))\n        for (a_ind, a_nninfo) in resultweighted.items():\n            a_verts = set(a_nninfo['verts'])\n            for (b_ind, b_nninfo) in resultweighted.items():\n                if (b_ind > a_ind):\n                    continue\n                if (len(a_verts.intersection(b_nninfo['verts'])) == 2):\n                    adj_neighbors[a_ind].append(b_ind)\n                    adj_neighbors[b_ind].append(a_ind)\n        for (key, neighbors) in adj_neighbors.items():\n            resultweighted[key]['adj_neighbors'] = neighbors\n    return resultweighted", "docstring": "Get the information about a certain atom from the results of a tessellation\n\nArgs:\nstructure (Structure) - Structure being assessed\nsite_idx (int) - Index of the atom in question\nsites ([Site]) - List of all sites in the tessellation\ntargets ([Element]) - Target elements\nvoro - Output of qvoronoi\ncompute_adj_neighbors (boolean) - Whether to compute which neighbors are adjacent\nReturns:\nA dict of sites sharing a common Voronoi facet. Key is facet id\n(not useful) and values are dictionaries containing statistics\nabout the facet:\n- site: Pymatgen site\n- solid_angle - Solid angle subtended by face\n- angle_normalized - Solid angle normalized such that the\nfaces with the largest\n- area - Area of the facet\n- face_dist - Distance between site n and the facet\n- volume - Volume of Voronoi cell for this face\n- n_verts - Number of vertices on the facet\n- adj_neighbors - Facet id's for the adjacent neighbors", "source": "codesearchnet"}
{"code": "def is_http_running_on(port):\n    try:\n        conn = httplib.HTTPConnection(('127.0.0.1:' + str(port)))\n        conn.connect()\n        conn.close()\n        return True\n    except Exception:\n        return False", "docstring": "Check if an http server runs on a given port.\n\nArgs:\nThe port to check.\nReturns:\nTrue if it is used by an http server. False otherwise.", "source": "codesearchnet"}
{"code": "def _to_dict(self, include=None, exclude=None):\n    \n    if (include is not None and\n        not isinstance(include, (list, tuple, set, frozenset))):\n      raise TypeError('include should be a list, tuple or set')\n    if (exclude is not None and\n        not isinstance(exclude, (list, tuple, set, frozenset))):\n      raise TypeError('exclude should be a list, tuple or set')\n    values = {}\n    for prop in self._properties.itervalues():\n      name = prop._code_name\n      if include is not None and name not in include:\n        continue\n      if exclude is not None and name in exclude:\n        continue\n      try:\n        values[name] = prop._get_for_dict(self)\n      except UnprojectedPropertyError:\n        pass  \n    return values", "docstring": "Return a dict containing the entity's property values.\n\nArgs:\ninclude: Optional set of property names to include, default all.\nexclude: Optional set of property names to skip, default none.\nA name contained in both include and exclude is excluded.", "source": "juraj-google-style"}
{"code": "def append_transformed_structures(self, tstructs_or_transmuter):\n    if isinstance(tstructs_or_transmuter, self.__class__):\n        self.transformed_structures.extend(tstructs_or_transmuter.transformed_structures)\n    else:\n        for ts in tstructs_or_transmuter:\n            assert isinstance(ts, TransformedStructure)\n        self.transformed_structures.extend(tstructs_or_transmuter)", "docstring": "Method is overloaded to accept either a list of transformed structures\nor transmuter, it which case it appends the second transmuter\"s\nstructures.\n\nArgs:\ntstructs_or_transmuter: A list of transformed structures or a\ntransmuter.", "source": "codesearchnet"}
{"code": "async def peers(self):\n    response = (await self._api.get('/v1/status/peers'))\n    if (response.status == 200):\n        return set(response.body)", "docstring": "Returns the current Raft peer set\n\nReturns:\nCollection: addresses of peers\n\nThis endpoint retrieves the Raft peers for the datacenter in which\nthe agent is running. It returns a collection of addresses, such as::\n\n[\n\"10.1.10.12:8300\",\n\"10.1.10.11:8300\",\n\"10.1.10.10:8300\"\n]\n\nThis list of peers is strongly consistent and can be useful in\ndetermining when a given server has successfully joined the cluster.", "source": "codesearchnet"}
{"code": "def ensemble_center(self, site_list, indices, cartesian=True):\n    if cartesian:\n        return np.average([site_list[i].coords for i in indices], axis=0)\n    else:\n        return np.average([site_list[i].frac_coords for i in indices], axis=0)", "docstring": "Finds the center of an ensemble of sites selected from\na list of sites.  Helper method for the find_adsorption_sites\nalgorithm.\n\nArgs:\nsite_list (list of sites): list of sites\nindices (list of ints): list of ints from which to select\nsites from site list\ncartesian (bool): whether to get average fractional or\ncartesian coordinate", "source": "codesearchnet"}
{"code": "def scale_vmss(access_token, subscription_id, resource_group, vmss_name, capacity):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,\n                        '?api-version=', COMP_API])\n    body = '{\"sku\":{\"capacity\":\"' + str(capacity) + '\"}}'\n    return do_patch(endpoint, body, access_token)", "docstring": "Change the instance count of an existing VM Scale Set.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvmss_name (str): Name of the virtual machine scale set.\ncapacity (int): New number of VMs.\nReturns:\nHTTP response.", "source": "juraj-google-style"}
{"code": "def _make_output_composite_tensors_match(op_type, branch_graphs):\n    assert branch_graphs\n    branch_outputs = [g.structured_outputs for g in branch_graphs]\n    outputs_per_branch = list((len(outs) for outs in branch_outputs))\n    assert len(set(outputs_per_branch)) == 1, outputs_per_branch\n    for output_idx, branch_outs in enumerate(zip(*branch_outputs)):\n        if len(set((type(out) for out in branch_outs))) == 1:\n            continue\n        if not any((isinstance(out, indexed_slices.IndexedSlices) for out in branch_outs)):\n            continue\n        for branch_idx, branch_out in enumerate(branch_outs):\n            if isinstance(branch_out, indexed_slices.IndexedSlices):\n                continue\n            elif isinstance(branch_out, tensor_lib.Tensor):\n                with branch_graphs[branch_idx].as_default():\n                    branch_outputs[branch_idx][output_idx] = math_ops._as_indexed_slices(branch_out)\n            else:\n                raise TypeError('Cannot reconcile {op_name} {output_idx}-th outputs:\\n  outputs from all branches: {outputs}'.format(op_name='tf.cond' if op_type == _COND else 'tf.switch_case', output_idx=output_idx, outputs=branch_outs))\n    for branch_graph, branch_outs in zip(branch_graphs, branch_outputs):\n        branch_graph.structured_outputs = branch_outs\n        branch_graph.outputs = [t for t in func_graph_module.flatten(branch_outs) if t is not None]", "docstring": "Modifies each branch_graph's outputs to have the same output signature.\n\nCurrently the only transformation implemented is turning a Tensor into an\nequivalent IndexedSlices if the other branch returns an IndexedSlices.\nUpdates branch_graph.{outputs,structured_outputs} for each branch_graph in\nbranch_graphs.\n\nArgs:\nop_type: _COND or _CASE\nbranch_graphs: `list` of `FuncGraph`\n\nRaises:\nTypeError: if a set of outputs cannot be rewritten.", "source": "github-repos"}
{"code": "def get_arrays(self, type_img):\n    if (type_img.lower() == 'lola'):\n        return LolaMap(self.ppdlola, *self.window, path_pdsfile=self.path_pdsfiles).image()\n    elif (type_img.lower() == 'wac'):\n        return WacMap(self.ppdwac, *self.window, path_pdsfile=self.path_pdsfiles).image()\n    else:\n        raise ValueError('The img type has to be either \"Lola\" or \"Wac\"')", "docstring": "Return arrays the region of interest\n\nArgs:\ntype_img (str): Either lola or wac.\n\nReturns:\nA tupple of three arrays ``(X,Y,Z)`` with ``X`` contains the\nlongitudes, ``Y`` contains the latitude and ``Z`` the values\nextracted for the region of interest.\n\nNote:\nThe argument has to be either lola or wac. Note case sensitive.\nAll return arrays have the same size.\n\nAll coordinates are in degree.", "source": "codesearchnet"}
{"code": "def _validate_symbol_names(self) -> None:\n    all_symbol_names = set(self._names) | set(self._names_v1)\n    if self._api_name == TENSORFLOW_API_NAME:\n        for subpackage in SUBPACKAGE_NAMESPACES:\n            if any((n.startswith(subpackage) for n in all_symbol_names)):\n                raise InvalidSymbolNameError('@tf_export is not allowed to export symbols under %s.*' % subpackage)\n    elif not all((n.startswith(self._api_name) for n in all_symbol_names)):\n        raise InvalidSymbolNameError('Can only export symbols under package name of component.')", "docstring": "Validate you are exporting symbols under an allowed package.\n\nWe need to ensure things exported by tf_export, etc.\nexport symbols under disjoint top-level package names.\n\nFor TensorFlow, we check that it does not export anything under subpackage\nnames used by components (keras, etc.).\n\nFor each component, we check that it exports everything under its own\nsubpackage.\n\nRaises:\nInvalidSymbolNameError: If you try to export symbol under disallowed name.", "source": "github-repos"}
{"code": "def add_resource(self, resource, *class_args, **class_kwargs):\n        \n        name = resource.__name__.lower()\n        meta_resource = parse_docs(resource.__doc__, [\"$shared\"])\n        self.meta[name] = meta_resource\n        shared = self.meta[\"$shared\"].copy()\n        shared.update(meta_resource.get(\"$shared\", {}))\n        with MarkKey(\"%s.$shared\" % resource.__name__):\n            sp = SchemaParser(validators=self.validators, shared=shared)\n        with MarkKey(resource.__name__):\n            resource = resource(*class_args, **class_kwargs)\n            \n            \n            actions = defaultdict(lambda: {})\n            for action in dir(resource):\n                find = PATTERN_ACTION.findall(action)\n                if not find:\n                    continue\n                httpmethod, action_name = find[0]\n                action_group = actions[action_name]\n                fn = getattr(resource, action)\n                meta_action = parse_docs(\n                    fn.__doc__, [\"$input\", \"$output\", \"$error\"])\n                meta_resource[action] = meta_action\n                with MarkKey(fn.__name__):\n                    action_group[httpmethod] = \\\n                        self.make_action(fn, sp, meta_action)\n\n        for action_name in actions:\n            if action_name == \"\":\n                url = \"/\" + name\n                endpoint = name\n            else:\n                url = \"/{0}/{1}\".format(name, action_name)\n                endpoint = \"{0}@{1}\".format(name, action_name)\n            action_group = actions[action_name]\n            self.app.add_url_rule(\n                url, endpoint=endpoint,\n                view_func=self.make_view(action_group),\n                methods=set(action_group)\n            )", "docstring": "Add resource\n\nParse resource and it's actions, route actions by naming rule.\n\nArgs:\nresource: resource class\nclass_args: class_args\nclass_kwargs: class_kwargs", "source": "juraj-google-style"}
{"code": "def _ParseInternetPasswordRecord(self, parser_mediator, record):\n    key = record.get('_key_', None)\n    if ((not key) or (not key.startswith(b'ssgp'))):\n        raise errors.ParseError('Unsupported Internet password record key value does not start with: \"ssgp\".')\n    protocol_string = codecs.decode('{0:08x}'.format(record['ptcl']), 'hex')\n    protocol_string = codecs.decode(protocol_string, 'utf-8')\n    event_data = KeychainInternetRecordEventData()\n    event_data.account_name = self._ParseBinaryDataAsString(parser_mediator, record['acct'])\n    event_data.comments = self._ParseBinaryDataAsString(parser_mediator, record['crtr'])\n    event_data.entry_name = self._ParseBinaryDataAsString(parser_mediator, record['PrintName'])\n    event_data.protocol = self._PROTOCOL_TRANSLATION_DICT.get(protocol_string, protocol_string)\n    ssgp_hash = codecs.encode(key[4:], 'hex')\n    event_data.ssgp_hash = codecs.decode(ssgp_hash, 'utf-8')\n    event_data.text_description = self._ParseBinaryDataAsString(parser_mediator, record['desc'])\n    event_data.type_protocol = self._ParseBinaryDataAsString(parser_mediator, record['atyp'])\n    event_data.where = self._ParseBinaryDataAsString(parser_mediator, record['srvr'])\n    date_time = self._ParseDateTimeValue(parser_mediator, record['cdat'])\n    if date_time:\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n    date_time = self._ParseDateTimeValue(parser_mediator, record['mdat'])\n    if date_time:\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts the information from an Internet password record.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrecord (dict[str, object]): database record.\n\nRaises:\nParseError: if Internet password record cannot be parsed.", "source": "codesearchnet"}
{"code": "def normal_mean(data, variance):\n    \n    if not isinstance(data, np.ndarray):\n        data = np.array(data)\n\n    i_variance_2 = 1 / (variance ** 2)\n    cmm = [0.0]\n    cmm.extend(np.cumsum(data))\n\n    cmm2 = [0.0]\n    cmm2.extend(np.cumsum(np.abs(data)))\n\n    def cost(start, end):\n        \n        cmm2_diff = cmm2[end] - cmm2[start]\n        cmm_diff = pow(cmm[end] - cmm[start], 2)\n        i_diff = end - start\n        diff = cmm2_diff - cmm_diff\n        return (diff/i_diff) * i_variance_2\n\n    return cost", "docstring": "Creates a segment cost function for a time series with a\nNormal distribution with changing mean\n\nArgs:\ndata (:obj:`list` of float): 1D time series data\nvariance (float): variance\nReturns:\nfunction: Function with signature\n(int, int) -> float\nwhere the first arg is the starting index, and the second\nis the last arg. Returns the cost of that segment", "source": "juraj-google-style"}
{"code": "def read_passwd_file(pass_file):\n    with open(pass_file) as fin:\n        passwd = fin.read().strip()\n    return passwd", "docstring": "Read password from external file and retrun as string. The file should\ncontain just single line. Prevents hard-coding password anywhere in this\nscript. IMPORTANT! Password is stored as plain text! Do NOT use with your\npersonal account!\"\n\nArgs:\npass_file (str): /path/to/pass_file", "source": "codesearchnet"}
{"code": "def tpu_core_ids_to_locations(self, tpu_core_ids):\n    return _pywrap_dtensor_device.TPUCoreIDsToLocations(context.context()._handle, self._device_info, tpu_core_ids)", "docstring": "Translates TPU core IDs to TPU core locations.\n\nArgs:\ntpu_core_ids: A list of TPU core IDs. Each one is an unsigned integer.\n\nReturns:\nA list of corresponding TPU core locations.", "source": "github-repos"}
{"code": "def crossCombine(l):\n    resultList = []\n    firstList = l[0]\n    rest = l[1:]\n    if (len(rest) == 0):\n        return firstList\n    for e in firstList:\n        for e1 in crossCombine(rest):\n            resultList.append(combinteDict(e, e1))\n    return resultList", "docstring": "Taken a list of lists, returns a big list of lists contain all the possibilities of elements of sublist combining together.\n\nIt is basically a Combinatorics of list. For example:\n\n>>> crossCombine([[a,a1,a2,...], [b,b1,b2,...]])\n>>> [[a,b], [a,b1], [a,b2], [a1,b], [a1,b1], [a1, b2], [a2,b], [a2,b1], [a2,b2], ...]\n\nFor using in StartCalendarInterval, the syntax of ``l`` is like below:\n``l: [[dic of month], [dict of day]]``\n\nsuch as:\n``l: [[{'month': 1}, {'month': 2}], [{'day': 2}, {'day': 3}, {'day': 4}]]``\n\nArgs:\nl (list[list]): the list of lists you want to crossCombine with.\n\nReturns:\nlist: crossCombined list", "source": "codesearchnet"}
{"code": "def __init__(self,\n               retry_params,\n               retriable_exceptions=_RETRIABLE_EXCEPTIONS,\n               should_retry=lambda r: False):\n    \n    self.retry_params = retry_params\n    self.retriable_exceptions = retriable_exceptions\n    self.should_retry = should_retry", "docstring": "Init.\n\nArgs:\nretry_params: an RetryParams instance.\nretriable_exceptions: a list of exception classes that are retriable.\nshould_retry: a function that takes a result from the tasklet and returns\na boolean. True if the result should be retried.", "source": "juraj-google-style"}
{"code": "def get_task(config):\n    \n    path = os.path.join(config['work_dir'], \"task.json\")\n    message = \"Can't read task from {}!\\n%(exc)s\".format(path)\n    contents = load_json_or_yaml(path, is_path=True, message=message)\n    return contents", "docstring": "Read the task.json from work_dir.\n\nArgs:\nconfig (dict): the running config, to find work_dir.\n\nReturns:\ndict: the contents of task.json\n\nRaises:\nScriptWorkerTaskException: on error.", "source": "juraj-google-style"}
{"code": "def member_command(self, member_id, command):\n        \n        server_id = self._servers.host_to_server_id(\n            self.member_id_to_host(member_id))\n        return self._servers.command(server_id, command)", "docstring": "apply command (start/stop/restart) to member instance of replica set\nArgs:\nmember_id - member index\ncommand - string command (start/stop/restart)\n\nreturn True if operation success otherwise False", "source": "juraj-google-style"}
{"code": "def make_preprocessing_fn(frequency_threshold):\n\n    def preprocessing_fn(inputs):\n        \n        result = {'clicked': inputs['clicked']}\n        for name in _INTEGER_COLUMN_NAMES:\n            feature = inputs[name]\n            feature = tft.sparse_tensor_to_dense_with_shape(feature, [None, 1], default_value=-1)\n            feature = tf.squeeze(feature, axis=1)\n            result[name] = feature\n            result[name + '_bucketized'] = tft.bucketize(feature, _NUM_BUCKETS)\n        for name in _CATEGORICAL_COLUMN_NAMES:\n            feature = inputs[name]\n            feature = tft.sparse_tensor_to_dense_with_shape(feature, [None, 1], default_value='')\n            feature = tf.squeeze(feature, axis=1)\n            result[get_transformed_categorical_column_name(name)] = tft.compute_and_apply_vocabulary(feature, frequency_threshold=frequency_threshold)\n        return result\n    return preprocessing_fn", "docstring": "Creates a preprocessing function for criteo.\n\nArgs:\nfrequency_threshold: The frequency_threshold used when generating\nvocabularies for the categorical features.\n\nReturns:\nA preprocessing function.", "source": "github-repos"}
{"code": "def __init__(self, format_string):\n    \n    try:\n      struct_object = struct.Struct(format_string)\n    except (TypeError, struct.error) as exception:\n      raise errors.FormatError((\n          'Unable to create struct object from data type definition '\n          'with error: {0!s}').format(exception))\n\n    super(StructOperation, self).__init__()\n    self._struct = struct_object\n    self._struct_format_string = format_string", "docstring": "Initializes a Python struct-base byte stream operation.\n\nArgs:\nformat_string (str): format string as used by Python struct.\n\nRaises:\nFormatError: if the struct operation cannot be determined from the data\ntype definition.", "source": "juraj-google-style"}
{"code": "def copy_and_move_messages(from_channel, to_channel):\n    with BlockSave(Message, query_dict={'channel_id': to_channel.key}):\n        for message in Message.objects.filter(channel=from_channel, typ=15):\n            message.key = ''\n            message.channel = to_channel\n            message.save()", "docstring": "While splitting channel and moving chosen subscribers to new channel,\nold channel's messages are copied and moved to new channel.\n\nArgs:\nfrom_channel (Channel object): move messages from channel\nto_channel (Channel object): move messages to channel", "source": "codesearchnet"}
{"code": "def propagate(self, date):\n    if (type(date) is timedelta):\n        date = (self.orbit.date + date)\n    _date = [float(x) for x in '{:%Y %m %d %H %M %S.%f}'.format(date).split()]\n    (p, v) = self.tle.propagate(*_date)\n    result = [(x * 1000) for x in (p + v)]\n    return self.orbit.__class__(date, result, 'cartesian', 'TEME', self.__class__(), **self.orbit.complements)", "docstring": "Propagate the initialized orbit\n\nArgs:\ndate (Date or datetime.timedelta)\nReturn:\nOrbit", "source": "codesearchnet"}
{"code": "def insert(self, i, species, coords, validate_proximity=False,\n               properties=None):\n        \n        new_site = Site(species, coords, properties=properties)\n        if validate_proximity:\n            for site in self:\n                if site.distance(new_site) < self.DISTANCE_TOLERANCE:\n                    raise ValueError(\"New site is too close to an existing \"\n                                     \"site!\")\n        self._sites.insert(i, new_site)", "docstring": "Insert a site to the molecule.\n\nArgs:\ni (int): Index to insert site\nspecies: species of inserted site\ncoords (3x1 array): coordinates of inserted site\nvalidate_proximity (bool): Whether to check if inserted site is\ntoo close to an existing site. Defaults to True.\nproperties (dict): Dict of properties for the Site.\n\nReturns:\nNew molecule with inserted site.", "source": "juraj-google-style"}
{"code": "def with_step(self, step):\n    self._options['step'] = step\n    return self", "docstring": "Which profile step to use for profiling.\n\nThe 'step' here refers to the step defined by `Profiler.add_step()` API.\n\nArgs:\nstep: When multiple steps of profiles are available, select which step's\nprofile to use. If -1, use average of all available steps.\nReturns:\nself", "source": "github-repos"}
{"code": "def setup_low_rank_optimizer(optimizer_name: str, optimizer_mapping: dict[str, Any], optim_kwargs: dict[str, Any], is_layerwise_supported: bool=True) -> tuple[Any, Any]:\n    is_layerwise = optimizer_name.lower().endswith('layerwise')\n    if is_layerwise and args.parallel_mode == ParallelMode.DISTRIBUTED and is_layerwise_supported:\n        raise NotImplementedError(f'Layer-wise {optimizer_name} does not support DDP at this time')\n    optimizer_cls = optimizer_mapping[optimizer_name]\n    if args.optim_target_modules is None:\n        raise ValueError(f'You need to define `optim_target_modules` to use {optimizer_name} optimizers')\n    if not isinstance(args.optim_target_modules, (list, str)):\n        raise ValueError(f\"`optim_target_modules` must be a list of strings, a regex string, or 'all-linear'. Got: {args.optim_target_modules}\")\n    if model is None:\n        raise ValueError(f'You need to pass a model to initialize {optimizer_name} optimizer.')\n    all_linear = isinstance(args.optim_target_modules, str) and args.optim_target_modules.replace('_', '-') == 'all-linear'\n    target_params_names = []\n    for module_name, module in model.named_modules():\n        target_module_exists, is_regex = check_target_module_exists(args.optim_target_modules, module_name, return_is_regex=True)\n        if not isinstance(module, nn.Linear):\n            if target_module_exists and (not is_regex):\n                logger.warning(f'{module_name} matched but ignored. {optimizer_name} only supports linear layers.')\n            continue\n        if not target_module_exists and (not all_linear):\n            continue\n        target_params_names.append(module_name + '.weight')\n    if len(target_params_names) == 0:\n        raise ValueError(f'No target modules found for {optimizer_name} ({args.optim_target_modules}).')\n    target_params = [p for n, p in model.named_parameters() if n in target_params_names]\n    non_target_params = [p for n, p in model.named_parameters() if n not in target_params_names]\n    optim_kwargs.update(optim_args)\n    param_groups = [{'params': non_target_params}, {'params': target_params, **optim_kwargs}]\n    if is_layerwise:\n        if args.gradient_accumulation_steps != 1:\n            raise ValueError(f'Layerwise {optimizer_name} does not support gradient accumulation!')\n        optimizer_dict = {}\n        for param in non_target_params:\n            optimizer_dict[param] = optimizer_cls([{'params': [param]}], **optimizer_kwargs)\n        for param in target_params:\n            optimizer_dict[param] = optimizer_cls([{'params': [param], **optim_kwargs}], **optimizer_kwargs)\n\n        def optimizer_hook(param):\n            if param.grad is not None:\n                optimizer_dict[param].step()\n                optimizer_dict[param].zero_grad()\n        for param in model.parameters():\n            if param.requires_grad:\n                param.register_post_accumulate_grad_hook(optimizer_hook)\n        optimizer_cls = LayerWiseDummyOptimizer\n        optimizer_kwargs.update({'optimizer_dict': optimizer_dict})\n    optimizer_kwargs.update({'params': param_groups})\n    return (optimizer_cls, optimizer_kwargs)", "docstring": "Helper function to set up low-rank optimizers like GaLore and Apollo.\n\nArgs:\noptimizer_name (str): Name of the optimizer.\noptimizer_mapping (dict): Mapping of optimizer names to their classes.\noptim_kwargs (dict): Keyword arguments for the optimizer.\nis_layerwise_supported (bool): Whether layerwise optimization is supported.\n\nReturns:\nTuple[Any, Any]: Optimizer class and updated optimizer kwargs.", "source": "github-repos"}
{"code": "def indent(lines, amount=2, char=' '):\n    lines = str(lines)\n    padding = (amount * char)\n    return (padding + ('\\n' + padding).join(lines.split('\\n')))", "docstring": "r\"\"\"Indent a string.\n\nPrepends whitespace to every line in the passed string. (Lines are\nseparated by newline characters.)\n\nArgs:\nlines (str): The string to indent.\n\nKeyword Args:\namount (int): The number of columns to indent by.\nchar (str): The character to to use as the indentation.\n\nReturns:\nstr: The indented string.\n\nExample:\n>>> print(indent('line1\\nline2', char='*'))\n**line1\n**line2", "source": "codesearchnet"}
{"code": "def mat2quat(rmat, precise=False):\n    \n    M = np.array(rmat, dtype=np.float32, copy=False)[:3, :3]\n    if precise:\n        q = np.empty((4,))\n        t = np.trace(M)\n        if t > M[3, 3]:\n            q[0] = t\n            q[3] = M[1, 0] - M[0, 1]\n            q[2] = M[0, 2] - M[2, 0]\n            q[1] = M[2, 1] - M[1, 2]\n        else:\n            i, j, k = 0, 1, 2\n            if M[1, 1] > M[0, 0]:\n                i, j, k = 1, 2, 0\n            if M[2, 2] > M[i, i]:\n                i, j, k = 2, 0, 1\n            t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]\n            q[i] = t\n            q[j] = M[i, j] + M[j, i]\n            q[k] = M[k, i] + M[i, k]\n            q[3] = M[k, j] - M[j, k]\n            q = q[[3, 0, 1, 2]]\n        q *= 0.5 / math.sqrt(t * M[3, 3])\n    else:\n        m00 = M[0, 0]\n        m01 = M[0, 1]\n        m02 = M[0, 2]\n        m10 = M[1, 0]\n        m11 = M[1, 1]\n        m12 = M[1, 2]\n        m20 = M[2, 0]\n        m21 = M[2, 1]\n        m22 = M[2, 2]\n        \n        K = np.array(\n            [\n                [m00 - m11 - m22, 0.0, 0.0, 0.0],\n                [m01 + m10, m11 - m00 - m22, 0.0, 0.0],\n                [m02 + m20, m12 + m21, m22 - m00 - m11, 0.0],\n                [m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22],\n            ]\n        )\n        K /= 3.0\n        \n        w, V = np.linalg.eigh(K)\n        q = V[[3, 0, 1, 2], np.argmax(w)]\n    if q[0] < 0.0:\n        np.negative(q, q)\n    return q[[1, 2, 3, 0]]", "docstring": "Converts given rotation matrix to quaternion.\n\nArgs:\nrmat: 3x3 rotation matrix\nprecise: If isprecise is True, the input matrix is assumed to be a precise\nrotation matrix and a faster algorithm is used.\n\nReturns:\nvec4 float quaternion angles", "source": "juraj-google-style"}
{"code": "def replace_iterable_params(args, kwargs, iterable_params):\n    args = list(args)\n    for name, index in iterable_params:\n        if index < len(args):\n            args[index] = list(args[index])\n        elif name in kwargs:\n            kwargs[name] = list(kwargs[name])\n    return (tuple(args), kwargs)", "docstring": "Returns (args, kwargs) with any iterable parameters converted to lists.\n\nArgs:\nargs: Positional rguments to a function\nkwargs: Keyword arguments to a function.\niterable_params: A list of (name, index) tuples for iterable parameters.\n\nReturns:\nA tuple (args, kwargs), where any positional or keyword parameters in\n`iterable_params` have their value converted to a `list`.", "source": "github-repos"}
{"code": "def init(config, workdir=None, logfile=None, loglevel=logging.INFO, **kwargs):\n    setup_sdk_logging(logfile, loglevel)\n    defaults = lago_config.get_section('init')\n    if (workdir is None):\n        workdir = os.path.abspath('.lago')\n    defaults['workdir'] = workdir\n    defaults['virt_config'] = config\n    defaults.update(kwargs)\n    (workdir, prefix) = cmd.do_init(**defaults)\n    return SDK(workdir, prefix)", "docstring": "Initialize the Lago environment\n\nArgs:\nconfig(str): Path to LagoInitFile\nworkdir(str): Path to initalize the workdir, defaults to \"$PWD/.lago\"\n**kwargs(dict): Pass arguments to :func:`~lago.cmd.do_init`\nlogfile(str): A path to setup a log file.\nloglevel(int): :mod:`logging` log level.\n\nReturns:\n:class:`~lago.sdk.SDK`: Initialized Lago enviornment\n\nRaises:\n:exc:`~lago.utils.LagoException`: If initialization failed", "source": "codesearchnet"}
{"code": "def adjoint(matrix, name=None):\n    with ops.name_scope(name, 'adjoint', [matrix]):\n        matrix = ops.convert_to_tensor(matrix, name='matrix')\n        return array_ops.matrix_transpose(matrix, conjugate=True)", "docstring": "Transposes the last two dimensions of and conjugates tensor `matrix`.\n\nFor example:\n\n```python\nx = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],\n[4 + 4j, 5 + 5j, 6 + 6j]])\ntf.linalg.adjoint(x)  # [[1 - 1j, 4 - 4j],\n#  [2 - 2j, 5 - 5j],\n#  [3 - 3j, 6 - 6j]]\n```\n\nArgs:\nmatrix:  A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,\nor `complex128` with shape `[..., M, M]`.\nname:  A name to give this `Op` (optional).\n\nReturns:\nThe adjoint (a.k.a. Hermitian transpose a.k.a. conjugate transpose) of\nmatrix.", "source": "github-repos"}
{"code": "def get_structure_by_id(self, cod_id, **kwargs):\n    r = requests.get(('http:\n    return Structure.from_str(r.text, fmt='cif', **kwargs)", "docstring": "Queries the COD for a structure by id.\n\nArgs:\ncod_id (int): COD id.\nkwargs: All kwargs supported by\n:func:`pymatgen.core.structure.Structure.from_str`.\n\nReturns:\nA Structure.", "source": "codesearchnet"}
{"code": "def fts_count(self, fts, inv):\n    return len(list(filter((lambda s: self.fts_match(fts, s)), inv)))", "docstring": "Return the count of segments in an inventory matching a given\nfeature mask.\n\nArgs:\nfts (set): feature mask given as a set of (value, feature) tuples\ninv (set): inventory of segments (as Unicode IPA strings)\n\nReturns:\nint: number of segments in `inv` that match feature mask `fts`", "source": "codesearchnet"}
{"code": "def load_default(self):\n    path = ctypes_util.find_library(self._sdk)\n    if (path is None):\n        if (self._windows or self._cygwin):\n            path = next(self.find_library_windows(), None)\n        elif sys.platform.startswith('linux'):\n            path = next(self.find_library_linux(), None)\n        elif sys.platform.startswith('darwin'):\n            path = next(self.find_library_darwin(), None)\n    if (path is not None):\n        return self.load(path)\n    return False", "docstring": "Loads the default J-Link SDK DLL.\n\nThe default J-Link SDK is determined by first checking if ``ctypes``\ncan find the DLL, then by searching the platform-specific paths.\n\nArgs:\nself (Library): the ``Library`` instance\n\nReturns:\n``True`` if the DLL was loaded, otherwise ``False``.", "source": "codesearchnet"}
{"code": "def classify_format(f):\n    \n    l0, l1 = _get_two_lines(f)\n    if loader.glove.check_valid(l0, l1):\n        return _glove\n    elif loader.word2vec_text.check_valid(l0, l1):\n        return _word2vec_text\n    elif loader.word2vec_bin.check_valid(l0, l1):\n        return _word2vec_bin\n    else:\n        raise OSError(b\"Invalid format\")", "docstring": "Determine the format of word embedding file by their content. This operation\nonly looks at the first two lines and does not check the sanity of input\nfile.\n\nArgs:\nf (Filelike):\n\nReturns:\nclass", "source": "juraj-google-style"}
{"code": "def distinct(l):\n    seen = set()\n    seen_add = seen.add\n    return (_ for _ in l if (not ((_ in seen) or seen_add(_))))", "docstring": "Return a list where the duplicates have been removed.\n\nArgs:\nl (list): the list to filter.\n\nReturns:\nlist: the same list without duplicates.", "source": "codesearchnet"}
{"code": "def Search(self, search_base, search_filter, search_scope, attrs):\n    self._last_search_params = (search_base, search_filter, search_scope, attrs)\n    self.log.debug('searching for base=%r, filter=%r, scope=%r, attrs=%r', search_base, search_filter, search_scope, attrs)\n    if 'dn' in attrs:\n        self._dn_requested = True\n    self.message_id = self.conn.search_ext(base=search_base, filterstr=search_filter, scope=search_scope, attrlist=attrs, serverctrls=[self.ldap_controls])", "docstring": "Search the data source.\n\nThe search is asynchronous; data should be retrieved by iterating over\nthe source object itself (see __iter__() below).\n\nArgs:\nsearch_base: the base of the tree being searched\nsearch_filter: a filter on the objects to be returned\nsearch_scope: the scope of the search from ldap.SCOPE_*\nattrs: a list of attributes to be returned\n\nReturns:\nnothing.", "source": "github-repos"}
{"code": "def initializer(self):\n    if self._initializer is not None:\n        return self._initializer\n    else:\n        raise ValueError('The iterator does not have an initializer. This means it was likely created using `tf.data.Dataset.make_one_shot_iterator()`. For an initializable iterator, use `tf.data.Dataset.make_initializable_iterator()` instead.')", "docstring": "A `tf.Operation` that should be run to initialize this iterator.\n\nReturns:\nA `tf.Operation` that should be run to initialize this iterator\n\nRaises:\nValueError: If this iterator initializes itself automatically.", "source": "github-repos"}
{"code": "def Serialize(self, writer):\n        \n        writer.WriteUInt32(self.Version)\n        writer.WriteUInt64(self.Services)\n        writer.WriteUInt32(self.Timestamp)\n        writer.WriteUInt16(self.Port)\n        writer.WriteUInt32(self.Nonce)\n        writer.WriteVarString(self.UserAgent)\n        writer.WriteUInt32(self.StartHeight)\n        writer.WriteBool(self.Relay)", "docstring": "Serialize object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def is_artifact_optional(chain, task_id, path):\n    \n    upstream_artifacts = chain.task['payload'].get('upstreamArtifacts', [])\n    optional_artifacts_per_task_id = get_optional_artifacts_per_task_id(upstream_artifacts)\n    return path in optional_artifacts_per_task_id.get(task_id, [])", "docstring": "Tells whether an artifact is flagged as optional or not.\n\nArgs:\nchain (ChainOfTrust): the chain of trust object\ntask_id (str): the id of the aforementioned task\n\nReturns:\nbool: True if artifact is optional", "source": "juraj-google-style"}
{"code": "async def import_image(self, data, stream: bool = False):\n        \n        headers = {\"Content-Type\": \"application/x-tar\"}\n        response = await self.docker._query_chunked_post(\n            \"images/load\", \"POST\", data=data, headers=headers\n        )\n        return await json_stream_result(response, stream=stream)", "docstring": "Import tarball of image to docker.\n\nArgs:\ndata: tarball data of image to be imported\n\nReturns:\nTarball of the image", "source": "juraj-google-style"}
{"code": "def list_datasets(self, get_global_public):\n        \n        appending = \"\"\n        if get_global_public:\n            appending = \"public\"\n        url = self.url() + \"/resource/{}dataset/\".format(appending)\n        req = self.remote_utils.get_url(url)\n\n        if req.status_code is not 200:\n            raise RemoteDataNotFoundError('Could not find {}'.format(req.text))\n        else:\n            return req.json()", "docstring": "Lists datasets in resources. Setting 'get_global_public' to 'True'\nwill retrieve all public datasets in cloud. 'False' will get user's\npublic datasets.\n\nArguments:\nget_global_public (bool): True if user wants all public datasets in\ncloud. False if user wants only their\npublic datasets.\n\nReturns:\ndict: Returns datasets in JSON format", "source": "juraj-google-style"}
{"code": "def _AddShardedRestoreOps(self, filename_tensor, per_device, restore_sequentially, reshape):\n    sharded_restores = []\n    for shard, (device, saveables) in enumerate(per_device):\n        with ops.device(device):\n            sharded_restores.append(self._AddRestoreOps(filename_tensor, saveables, restore_sequentially, reshape, preferred_shard=shard, name='restore_shard'))\n    return control_flow_ops.group(*sharded_restores, name='restore_all')", "docstring": "Add Ops to restore variables from multiple devices.\n\nArgs:\nfilename_tensor: Tensor for the path of the file to load.\nper_device: A list of (device, SaveableObject) pairs, as returned by\n_GroupByDevices().\nrestore_sequentially: True if we want to restore variables sequentially\nwithin a shard.\nreshape: True if we want to reshape loaded tensors to the shape of the\ncorresponding variable.\n\nReturns:\nAn Operation that restores the variables.", "source": "github-repos"}
{"code": "def desc_from_uri(uri):\n    if (':' in uri):\n        (_, uri) = uri.split(':', 1)\n    query_string = parse_qs(urlparse(uri, 'http').query)\n    if query_string.get('sn'):\n        account_serial_number = query_string['sn'][0]\n        try:\n            account = Account.get_accounts()[account_serial_number]\n            desc = 'SA_RINCON{}_{}'.format(account.service_type, account.username)\n            return desc\n        except KeyError:\n            pass\n    if query_string.get('sid'):\n        service_id = query_string['sid'][0]\n        for service in MusicService._get_music_services_data().values():\n            if (service_id == service['ServiceID']):\n                service_type = service['ServiceType']\n                account = Account.get_accounts_for_service(service_type)\n                if (not account):\n                    break\n                account = account[0]\n                desc = 'SA_RINCON{}_{}'.format(account.service_type, account.username)\n                return desc\n    desc = 'RINCON_AssociatedZPUDN'\n    return desc", "docstring": "Create the content of DIDL desc element from a uri.\n\nArgs:\nuri (str): A uri, eg:\n``'x-sonos-http:track%3a3402413.mp3?sid=2&amp;flags=32&amp;sn=4'``\n\nReturns:\nstr: The content of a desc element for that uri, eg\n``'SA_RINCON519_email@example.com'``", "source": "codesearchnet"}
{"code": "def getOption(self, name):\n    try:\n        value = lock_and_call((lambda : self._impl.getOption(name).value()), self._lock)\n    except RuntimeError:\n        return None\n    else:\n        try:\n            return int(value)\n        except ValueError:\n            try:\n                return float(value)\n            except ValueError:\n                return value", "docstring": "Get the current value of the specified option. If the option does not\nexist, returns None.\n\nArgs:\nname: Option name.\n\nReturns:\nValue of the option.\n\nRaises:\nInvalidArgumet: if the option name is not valid.", "source": "codesearchnet"}
{"code": "def get_query_columns(engine, query):\n    \n    con = engine.connect()\n    result = con.execute(query).fetchone()\n    values = list(result)\n    cols_names = result.keys()\n    cols = OrderedDict()\n    for i in range(len(cols_names)):\n        cols[cols_names[i]] = type(values[i]).__name__\n    return cols", "docstring": "Extract columns names and python typos from query\n\nArgs:\nengine: SQLAlchemy connection engine\nquery: SQL query\n\nReturns:\ndict with columns names and python types", "source": "juraj-google-style"}
{"code": "def heightmap_get_minmax(hm: np.ndarray) -> Tuple[(float, float)]:\n    mi = ffi.new('float *')\n    ma = ffi.new('float *')\n    lib.TCOD_heightmap_get_minmax(_heightmap_cdata(hm), mi, ma)\n    return (mi[0], ma[0])", "docstring": "Return the min and max values of this heightmap.\n\nArgs:\nhm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.\n\nReturns:\nTuple[float, float]: The (min, max) values.\n\n.. deprecated:: 2.0\nUse ``hm.min()`` or ``hm.max()`` instead.", "source": "codesearchnet"}
{"code": "def stop_replace(self, accountID, orderID, **kwargs):\n        \n        return self.replace(\n            accountID,\n            orderID,\n            order=StopOrderRequest(**kwargs)\n        )", "docstring": "Shortcut to replace a pending Stop Order in an Account\n\nArgs:\naccountID : The ID of the Account\norderID : The ID of the Stop Order to replace\nkwargs : The arguments to create a StopOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "juraj-google-style"}
{"code": "def implement(self, implementation, for_type=None, for_types=None):\n        \n        unbound_implementation = self.__get_unbound_function(implementation)\n        for_types = self.__get_types(for_type, for_types)\n\n        for t in for_types:\n            self._write_lock.acquire()\n            try:\n                self.implementations.append((t, unbound_implementation))\n            finally:\n                self._write_lock.release()", "docstring": "Registers an implementing function for for_type.\n\nArguments:\nimplementation: Callable implementation for this type.\nfor_type: The type this implementation applies to.\nfor_types: Same as for_type, but takes a tuple of types.\n\nfor_type and for_types cannot both be passed (for obvious reasons.)\n\nRaises:\nValueError", "source": "juraj-google-style"}
{"code": "def _to_map_job_config(cls, mr_spec, queue_name):\n    mapper_spec = mr_spec.mapper\n    api_version = mr_spec.params.get('api_version', 0)\n    old_api = (api_version == 0)\n    input_reader_cls = mapper_spec.input_reader_class()\n    input_reader_params = input_readers._get_params(mapper_spec)\n    if issubclass(input_reader_cls, input_reader.InputReader):\n        input_reader_params = input_reader_cls.params_from_json(input_reader_params)\n    output_writer_cls = mapper_spec.output_writer_class()\n    output_writer_params = output_writers._get_params(mapper_spec)\n    return cls(_lenient=old_api, job_name=mr_spec.name, job_id=mr_spec.mapreduce_id, mapper=util.for_name(mapper_spec.handler_spec), input_reader_cls=input_reader_cls, input_reader_params=input_reader_params, output_writer_cls=output_writer_cls, output_writer_params=output_writer_params, shard_count=mapper_spec.shard_count, queue_name=queue_name, user_params=mr_spec.params.get('user_params'), shard_max_attempts=mr_spec.params.get('shard_max_attempts'), done_callback_url=mr_spec.params.get('done_callback'), _force_writes=mr_spec.params.get('force_writes'), _base_path=mr_spec.params['base_path'], _task_max_attempts=mr_spec.params.get('task_max_attempts'), _task_max_data_processing_attempts=mr_spec.params.get('task_max_data_processing_attempts'), _hooks_cls=util.for_name(mr_spec.hooks_class_name), _app=mr_spec.params.get('app_id'), _api_version=api_version)", "docstring": "Converts model.MapreduceSpec back to JobConfig.\n\nThis method allows our internal methods to use JobConfig directly.\nThis method also allows us to expose JobConfig as an API during execution,\ndespite that it is not saved into datastore.\n\nArgs:\nmr_spec: model.MapreduceSpec.\nqueue_name: queue name.\n\nReturns:\nThe JobConfig object for this job.", "source": "codesearchnet"}
{"code": "def configure_interface(self, name, commands):\n    commands = make_iterable(commands)\n    commands.insert(0, ('interface %s' % name))\n    return self.configure(commands)", "docstring": "Configures the specified interface with the commands\n\nArgs:\nname (str): The interface name to configure\ncommands: The commands to configure in the interface\n\nReturns:\nTrue if the commands completed successfully", "source": "codesearchnet"}
{"code": "def query(self, query):\n    if (str(query.key) in self._items):\n        return query(self._items[str(query.key)].values())\n    else:\n        return query([])", "docstring": "Returns an iterable of objects matching criteria expressed in `query`\n\nNaively applies the query operations on the objects within the namespaced\ncollection corresponding to ``query.key.path``.\n\nArgs:\nquery: Query object describing the objects to return.\n\nRaturns:\niterable cursor with all objects matching criteria", "source": "codesearchnet"}
{"code": "def calculate_part_visibility(self, ports):\n    source_port_lookup = {}\n    for (part_name, port_infos) in SourcePortInfo.filter_parts(ports).items():\n        for port_info in port_infos:\n            source_port_lookup[port_info.connected_value] = (part_name, port_info.port)\n    for (part_name, port_infos) in SinkPortInfo.filter_parts(ports).items():\n        for port_info in port_infos:\n            if (port_info.value != port_info.disconnected_value):\n                (conn_part, port) = source_port_lookup.get(port_info.value, (None, None))\n                if (conn_part and (port == port_info.port)):\n                    if (conn_part not in self.part_visibility):\n                        self.part_visibility[conn_part] = True\n                    if (part_name not in self.part_visibility):\n                        self.part_visibility[part_name] = True", "docstring": "Calculate what is connected to what\n\nArgs:\nports: {part_name: [PortInfo]} from other ports", "source": "codesearchnet"}
{"code": "def passgen(length=12, punctuation=False, digits=True, letters=True, case='both', **kwargs):\n    p_min = punctuation\n    p_max = (0 if (punctuation is False) else length)\n    d_min = digits\n    d_max = (0 if (digits is False) else length)\n    a_min = letters\n    a_max = (0 if (letters is False) else length)\n    if (((d_min + p_min) + a_min) > length):\n        raise ValueError('Minimum punctuation and digits number cannot be greater than length')\n    if ((not digits) and (not letters)):\n        raise ValueError('digits and letters cannot be False at the same time')\n    if (length < 1):\n        raise ValueError('length must be greater than zero')\n    if letters:\n        if (case == 'both'):\n            alpha = (string.ascii_uppercase + string.ascii_lowercase)\n        elif (case == 'upper'):\n            alpha = string.ascii_uppercase\n        elif (case == 'lower'):\n            alpha = string.ascii_lowercase\n        else:\n            raise ValueError(\"case can only be 'both', 'upper' or 'lower'\")\n    else:\n        alpha = (string.ascii_uppercase + string.ascii_lowercase)\n    if punctuation:\n        limit_punctuation = kwargs.get('limit_punctuation', '')\n        if (limit_punctuation == ''):\n            punctuation_set = string.punctuation\n        else:\n            punctuation_set = ''.join([p for p in limit_punctuation if (p in string.punctuation)])\n    else:\n        punctuation_set = string.punctuation\n    srandom = random.SystemRandom()\n    p_generator = Generator(punctuation_set, srandom, p_min, p_max)\n    d_generator = Generator(string.digits, srandom, d_min, d_max)\n    a_generator = Generator(alpha, srandom, a_min, a_max)\n    main_generator = SuperGenerator(srandom, length, length)\n    main_generator.add(p_generator)\n    main_generator.add(a_generator)\n    main_generator.add(d_generator)\n    chars = []\n    for i in main_generator:\n        chars.append(i)\n    try:\n        srandom.shuffle(chars, srandom)\n    except:\n        random.shuffle(chars)\n    return ''.join(chars)", "docstring": "Generate random password.\n\nArgs:\nlength (int): The length of the password.  Must be greater than\nzero. Defaults to 12.\npunctuation (bool): Whether to use punctuation or not.  Defaults\nto False.\nlimit_punctuation (str): Limits the allowed puncturation to defined\ncharacters.\ndigits (bool): Whether to use digits or not.  Defaults to True.\nOne of *digits* and *letters* must be True.\nletters (bool): Whether to use letters or not.  Defaults to\nTrue. One of *digits* and *letters* must be True.\ncase (str): Letter case to use.  Accepts 'upper' for upper case,\n'lower' for lower case, and 'both' for both.  Defaults to\n'both'.\n\nReturns:\nstr. The generated password.\n\nRaises:\nValueError\n\nBelow are some basic examples.\n\n>>> passgen()\nz7GlutdEEbnk\n\n>>> passgen(case='upper')\nQ81J9DOAMBRN\n\n>>> passgen(length=6)\nEzJMRX", "source": "codesearchnet"}
{"code": "def results(self, use_cache=True, dialect=None, billing_tier=None):\n    if ((not use_cache) or (self._results is None)):\n        self.execute(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier)\n    return self._results.results", "docstring": "Retrieves table of results for the query. May block if the query must be executed first.\n\nArgs:\nuse_cache: whether to use cached results or not. Ignored if append is specified.\ndialect : {'legacy', 'standard'}, default 'legacy'\n'legacy' : Use BigQuery's legacy SQL dialect.\n'standard' : Use BigQuery's standard SQL (beta), which is\ncompliant with the SQL 2011 standard.\nbilling_tier: Limits the billing tier for this job. Queries that have resource\nusage beyond this tier will fail (without incurring a charge). If unspecified, this\nwill be set to your project default. This can also be used to override your\nproject-wide default billing tier on a per-query basis.\nReturns:\nA QueryResultsTable containing the result set.\nRaises:\nException if the query could not be executed or query response was malformed.", "source": "codesearchnet"}
{"code": "def get_log_file_name(level=INFO):\n  \n  if level not in converter.ABSL_LEVELS:\n    raise ValueError('Invalid absl.logging level {}'.format(level))\n  stream = get_absl_handler().python_handler.stream\n  if (stream == sys.stderr or stream == sys.stdout or\n      not hasattr(stream, 'name')):\n    return ''\n  else:\n    return stream.name", "docstring": "Returns the name of the log file.\n\nFor Python logging, only one file is used and level is ignored. And it returns\nempty string if it logs to stderr/stdout or the log stream has no `name`\nattribute.\n\nArgs:\nlevel: int, the absl.logging level.\n\nRaises:\nValueError: Raised when `level` has an invalid value.", "source": "juraj-google-style"}
{"code": "def rate_to_mcs(rate, bw=20, long_gi=True):\n    \n    if bw not in [20, 40, 80, 160]:\n        raise Exception(\"Unknown bandwidth: %d MHz\" % (bw))\n    idx = int((math.log(bw/10, 2)-1)*2)\n    if not long_gi:\n        idx += 1\n\n    for mcs, rates in MCS_TABLE.items():\n        if abs(rates[idx] - rate) < 1e-3:\n            return mcs\n\n    \n    for idx, r in enumerate(DOT11A_RATES):\n        if abs(r-rate) < 1e-3:\n            return idx\n\n    raise Exception(\"MCS not found: rate=%f, bw=%d, long_gi=%s\" %\n                    (rate, bw, long_gi))", "docstring": "Convert bit rate to MCS index.\n\nArgs:\nrate (float): bit rate in Mbps\nbw (int): bandwidth, 20, 40, 80, ...\nlong_gi (bool): True if long GI is used.\n\nReturns:\nmcs (int): MCS index\n\n>>> rate_to_mcs(120, bw=40, long_gi=False)\n5", "source": "juraj-google-style"}
{"code": "def call(self, inputs, training=None, mask=None):\n    raise NotImplementedError('When subclassing the `Model` class, you should implement a `call` method.')", "docstring": "Calls the model on new inputs.\n\nIn this case `call` just reapplies\nall ops in the graph to the new inputs\n(e.g. build a new computational graph from the provided inputs).\n\nNote: This method should not be called directly. It is only meant to be\noverridden when subclassing `tf.keras.Model`.\nTo call a model on an input, always use the `__call__` method,\ni.e. `model(inputs)`, which relies on the underlying `call` method.\n\nArgs:\ninputs: Input tensor, or dict/list/tuple of input tensors.\ntraining: Boolean or boolean scalar tensor, indicating whether to run\nthe `Network` in training mode or inference mode.\nmask: A mask or list of masks. A mask can be\neither a tensor or None (no mask).\n\nReturns:\nA tensor if there is a single output, or\na list of tensors if there are more than one outputs.", "source": "github-repos"}
{"code": "def _build_cryptographic_parameters(self, value):\n        \n        if value is None:\n            return None\n        elif not isinstance(value, dict):\n            raise TypeError(\"Cryptographic parameters must be a dictionary.\")\n\n        cryptographic_parameters = CryptographicParameters(\n            block_cipher_mode=value.get('block_cipher_mode'),\n            padding_method=value.get('padding_method'),\n            hashing_algorithm=value.get('hashing_algorithm'),\n            key_role_type=value.get('key_role_type'),\n            digital_signature_algorithm=value.get(\n                'digital_signature_algorithm'\n            ),\n            cryptographic_algorithm=value.get('cryptographic_algorithm'),\n            random_iv=value.get('random_iv'),\n            iv_length=value.get('iv_length'),\n            tag_length=value.get('tag_length'),\n            fixed_field_length=value.get('fixed_field_length'),\n            invocation_field_length=value.get('invocation_field_length'),\n            counter_length=value.get('counter_length'),\n            initial_counter_value=value.get('initial_counter_value')\n        )\n        return cryptographic_parameters", "docstring": "Build a CryptographicParameters struct from a dictionary.\n\nArgs:\nvalue (dict): A dictionary containing the key/value pairs for a\nCryptographicParameters struct.\n\nReturns:\nNone: if value is None\nCryptographicParameters: a CryptographicParameters struct\n\nRaises:\nTypeError: if the input argument is invalid", "source": "juraj-google-style"}
{"code": "def _VerifyGroupConvFwd(self, tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, dtype):\n    tensor_in = self._CreateNumpyTensor(tensor_in_sizes)\n    filter_in = self._CreateNumpyTensor(filter_in_sizes)\n    num_groups = tensor_in_sizes[3] \n    assert num_groups > 1 and filter_in_sizes[2] * num_groups == tensor_in_sizes[3]\n    with test_util.device(True):\n        t1 = constant_op.constant(tensor_in, dtype=dtype)\n        t2 = constant_op.constant(filter_in, dtype=dtype)\n        strides = [1] + strides + [1]\n        dilations = [1] + dilations + [1]\n        if data_format == 'NCHW':\n            t1 = test_util.NHWCToNCHW(t1)\n            strides = test_util.NHWCToNCHW(strides)\n            dilations = test_util.NHWCToNCHW(dilations)\n            t1_splits = array_ops.split(t1, num_groups, axis=1)\n        else:\n            t1_splits = array_ops.split(t1, num_groups, axis=3)\n        t2_splits = array_ops.split(t2, num_groups, axis=3)\n\n        def MakeConv2d(inputs, filters):\n            return nn_ops.conv2d(inputs, filters, strides, padding, dilations=dilations, data_format=data_format)\n        group_conv = MakeConv2d(t1, t2)\n        group_conv_loop = array_ops.concat([MakeConv2d(t1s, t2s) for t1s, t2s in zip(t1_splits, t2_splits)], axis=1 if data_format == 'NCHW' else 3)\n        results = self.evaluate([group_conv, group_conv_loop])\n        tol_to_use = 1e-05\n        self.assertAllClose(results[0], results[1], atol=tol_to_use, rtol=tol_to_use)", "docstring": "Verify the output of group convolution is equal to a for-loop implementation.\n\nArgs:\ntensor_in_sizes: Input tensor dimensions in [batch, input_rows,\ninput_cols, input_depth].\nfilter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,\ninput_depth, output_depth].\ndilations: Dilated rate: [col_dilation, row_dilation]\nstrides: Stride: [col_stride, row_stride]\npadding: Padding type.\ndata_format: Format of the data tensors.\ndtype: Data type for inputs and outputs.", "source": "github-repos"}
{"code": "def create_runner(runner_name: str) -> 'PipelineRunner':\n    runner_name = _RUNNER_MAP.get(runner_name.lower(), _RUNNER_MAP.get(runner_name.lower() + 'runner', runner_name))\n    if '.' in runner_name:\n        module, runner = runner_name.rsplit('.', 1)\n        try:\n            return getattr(importlib.import_module(module), runner)()\n        except ImportError:\n            if 'dataflow' in runner_name.lower():\n                raise ImportError('Google Cloud Dataflow runner not available, please install apache_beam[gcp]')\n            elif 'interactive' in runner_name.lower():\n                raise ImportError('Interactive runner not available, please install apache_beam[interactive]')\n            else:\n                raise\n    else:\n        raise ValueError('Unexpected pipeline runner: %s. Valid values are %s or the fully qualified name of a PipelineRunner subclass.' % (runner_name, ', '.join(StandardOptions.KNOWN_RUNNER_NAMES)))", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\nCreates a runner instance from a runner class name.\n\nArgs:\nrunner_name: Name of the pipeline runner. Possible values are listed in\n_RUNNER_MAP above.\n\nReturns:\nA runner object.\n\nRaises:\nRuntimeError: if an invalid runner name is used.", "source": "github-repos"}
{"code": "def _GetMetadataUpdate(self, metadata_key='', recursive=True, wait=True, timeout=None):\n    metadata_key = (os.path.join(metadata_key, '') if recursive else metadata_key)\n    metadata_url = os.path.join(METADATA_SERVER, metadata_key)\n    params = {'alt': 'json', 'last_etag': self.etag, 'recursive': recursive, 'timeout_sec': (timeout or self.timeout), 'wait_for_change': wait}\n    while True:\n        response = self._GetMetadataRequest(metadata_url, params=params, timeout=timeout)\n        etag_updated = self._UpdateEtag(response)\n        if (wait and (not etag_updated) and (not timeout)):\n            continue\n        else:\n            break\n    return json.loads(response.read().decode('utf-8'))", "docstring": "Request the contents of metadata server and deserialize the response.\n\nArgs:\nmetadata_key: string, the metadata key to watch for changes.\nrecursive: bool, True if we should recursively watch for metadata changes.\nwait: bool, True if we should wait for a metadata change.\ntimeout: int, timeout in seconds for returning metadata output.\n\nReturns:\njson, the deserialized contents of the metadata server.", "source": "codesearchnet"}
{"code": "def get_box_newsfeeds(self, box_key, detail_level = None):\n\t\t\n\t\turi = '/'.join([\n\t\t\t\t\t\tself.api_uri,\n\t\t\t\t\t\tself.boxes_suffix,\n\t\t\t\t\t\tbox_key,\n\t\t\t\t\t\tself.newsfeed_suffix\n\t\t\t\t\t\t])\n\t\treturn self._get_newsfeeds(uri, detail_level)", "docstring": "Function to get newsfeed for a pipeline\nArgs:\nbox \t\t\tpipeline key\ndetail_level \targuments for req str ['ALL', 'CONDENSED']\nreturn \t\t\tlist of feed dicts parse at your convenience", "source": "juraj-google-style"}
{"code": "def setup_mock_socket_file(self, mock_create_connection, resp=MOCK_RESP):\n    fake_file = self.MockSocketFile(resp)\n    fake_conn = mock.MagicMock()\n    fake_conn.makefile.return_value = fake_file\n    mock_create_connection.return_value = fake_conn\n    return fake_file", "docstring": "Sets up a fake socket file from the mock connection.\n\nArgs:\nmock_create_connection: The mock method for creating a method.\nresp: (str) response to give. MOCK_RESP by default.\n\nReturns:\nThe mock file that will be injected into the code.", "source": "github-repos"}
{"code": "def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(CreateRequestPayload, self).read(input_buffer, kmip_version=kmip_version)\n    local_buffer = utils.BytearrayStream(input_buffer.read(self.length))\n    if self.is_tag_next(enums.Tags.OBJECT_TYPE, local_buffer):\n        self._object_type = primitives.Enumeration(enums.ObjectType, tag=enums.Tags.OBJECT_TYPE)\n        self._object_type.read(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidKmipEncoding('The Create request payload encoding is missing the object type.')\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        if self.is_tag_next(enums.Tags.TEMPLATE_ATTRIBUTE, local_buffer):\n            self._template_attribute = objects.TemplateAttribute()\n            self._template_attribute.read(local_buffer, kmip_version=kmip_version)\n        else:\n            raise exceptions.InvalidKmipEncoding('The Create request payload encoding is missing the template attribute.')\n    elif self.is_tag_next(enums.Tags.ATTRIBUTES, local_buffer):\n        attributes = objects.Attributes()\n        attributes.read(local_buffer, kmip_version=kmip_version)\n        value = objects.convert_attributes_to_template_attribute(attributes)\n        self._template_attribute = value\n    else:\n        raise exceptions.InvalidKmipEncoding('The Create request payload encoding is missing the attributes structure.')\n    self.is_oversized(local_buffer)", "docstring": "Read the data encoding the Create request payload and decode it into\nits constituent parts.\n\nArgs:\ninput_buffer (stream): A data buffer containing encoded object\ndata, supporting a read method.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nInvalidKmipEncoding: Raised if the object type or template\nattribute is missing from the encoded payload.", "source": "codesearchnet"}
{"code": "def ensure_files(self, filenames):\n        \n        logger.debug(\"Testing {0} for the following files: {1}\".format(\n            self.working_dir, filenames))\n        dircontent = os.listdir(self.working_dir)\n        for fname in filenames:\n            if fname not in dircontent:\n                return False\n        return True", "docstring": "Checks the student submission for specific files.\n\nArgs:\nfilenames (tuple): The list of file names to be cjecked for.\n\nReturns:\nbool: Indicator if all files are found in the student archive.", "source": "juraj-google-style"}
{"code": "async def retry_request(*args, retry_exceptions=(asyncio.TimeoutError,\n                                                 ScriptWorkerRetryException),\n                        retry_async_kwargs=None, **kwargs):\n    \n    retry_async_kwargs = retry_async_kwargs or {}\n    return await retry_async(request, retry_exceptions=retry_exceptions,\n                             args=args, kwargs=kwargs, **retry_async_kwargs)", "docstring": "Retry the ``request`` function.\n\nArgs:\n*args: the args to send to request() through retry_async().\nretry_exceptions (list, optional): the exceptions to retry on.\nDefaults to (ScriptWorkerRetryException, ).\nretry_async_kwargs (dict, optional): the kwargs for retry_async.\nIf None, use {}.  Defaults to None.\n**kwargs: the kwargs to send to request() through retry_async().\n\nReturns:\nobject: the value from request().", "source": "juraj-google-style"}
{"code": "def wait(self, timeout=None):\n        \n\n        flag = self._finished.wait(timeout=timeout)\n        if flag is False:\n            raise TimeoutExpiredError(\"Timeout waiting for response to event loop operation\")\n\n        if self._exception is not None:\n            self._raise_exception()\n\n        return self._result", "docstring": "Wait for this operation to finish.\n\nYou can specify an optional timeout that defaults to no timeout if\nNone is passed.  The result of the operation is returned from this\nmethod. If the operation raised an exception, it is reraised from this\nmethod.\n\nArgs:\ntimeout (float): The maximum number of seconds to wait before timing\nout.", "source": "juraj-google-style"}
{"code": "def _checkNumerical(inputvalue, minvalue=None, maxvalue=None, description='inputvalue'):\n    if (not isinstance(description, str)):\n        raise TypeError('The description should be a string. Given: {0!r}'.format(description))\n    if (not isinstance(inputvalue, (int, long, float))):\n        raise TypeError('The {0} must be numerical. Given: {1!r}'.format(description, inputvalue))\n    if (not isinstance(minvalue, (int, float, long, type(None)))):\n        raise TypeError('The minvalue must be numeric or None. Given: {0!r}'.format(minvalue))\n    if (not isinstance(maxvalue, (int, float, long, type(None)))):\n        raise TypeError('The maxvalue must be numeric or None. Given: {0!r}'.format(maxvalue))\n    if ((not (minvalue is None)) and (not (maxvalue is None))):\n        if (maxvalue < minvalue):\n            raise ValueError('The maxvalue must not be smaller than minvalue. Given: {0} and {1}, respectively.'.format(maxvalue, minvalue))\n    if (not (minvalue is None)):\n        if (inputvalue < minvalue):\n            raise ValueError('The {0} is too small: {1}, but minimum value is {2}.'.format(description, inputvalue, minvalue))\n    if (not (maxvalue is None)):\n        if (inputvalue > maxvalue):\n            raise ValueError('The {0} is too large: {1}, but maximum value is {2}.'.format(description, inputvalue, maxvalue))", "docstring": "Check that the given numerical value is valid.\n\nArgs:\n* inputvalue (numerical): The value to be checked.\n* minvalue (numerical): Minimum value  Use None to skip this part of the test.\n* maxvalue (numerical): Maximum value. Use None to skip this part of the test.\n* description (string): Used in error messages for the checked inputvalue\n\nRaises:\nTypeError, ValueError\n\nNote: Can not use the function :func:`_checkString`, as it uses this function internally.", "source": "codesearchnet"}
{"code": "def Default() -> 'Blockchain':\n    if (Blockchain._instance is None):\n        Blockchain._instance = Blockchain()\n        Blockchain.GenesisBlock().RebuildMerkleRoot()\n    return Blockchain._instance", "docstring": "Get the default registered blockchain instance.\n\nReturns:\nobj: Currently set to `neo.Implementations.Blockchains.LevelDB.LevelDBBlockchain`.", "source": "codesearchnet"}
{"code": "def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):\n    min_pixels = images_kwargs.get('min_pixels', None) or self.size['shortest_edge']\n    max_pixels = images_kwargs.get('max_pixels', None) or self.size['longest_edge']\n    patch_size = images_kwargs.get('patch_size', None) or self.patch_size\n    merge_size = images_kwargs.get('merge_size', None) or self.merge_size\n    factor = patch_size * merge_size\n    resized_height, resized_width = smart_resize(height, width, factor, min_pixels=min_pixels, max_pixels=max_pixels)\n    grid_h, grid_w = (resized_height \n    return grid_h * grid_w", "docstring": "A utility that returns number of image patches for a given image size.\n\nArgs:\nheight (`int`):\nHeight of the input image.\nwidth (`int`):\nWidth of the input image.\nimages_kwargs (`dict`, *optional*)\nAny kwargs to override defaults of the image processor.\nReturns:\n`int`: Number of image patches per image.", "source": "github-repos"}
{"code": "def create_slot_with_initializer(primary, initializer, shape, dtype, name, colocate_with_primary=True, *, copy_xla_sharding=False):\n    validate_shape = shape.is_fully_defined()\n    if isinstance(primary, variables.Variable):\n        prefix = primary._shared_name\n    else:\n        prefix = primary.op.name\n    with variable_scope.variable_scope(None, prefix + '/' + name):\n        if colocate_with_primary:\n            distribution_strategy = distribute_lib.get_strategy()\n            with distribution_strategy.extended.colocate_vars_with(primary):\n                return _create_slot_var(primary, initializer, '', validate_shape, shape, dtype, copy_xla_sharding=copy_xla_sharding)\n        else:\n            return _create_slot_var(primary, initializer, '', validate_shape, shape, dtype, copy_xla_sharding=copy_xla_sharding)", "docstring": "Creates a slot initialized using an `Initializer`.\n\nThe type of the slot is determined by the given value.\n\nArgs:\nprimary: The primary `Variable` or `Tensor`.\ninitializer: An `Initializer`.  The initial value of the slot.\nshape: Shape of the initial value of the slot.\ndtype: Type of the value of the slot.\nname: Name to use for the slot variable.\ncolocate_with_primary: Boolean.  If True the slot is located\non the same device as `primary`.\ncopy_xla_sharding: Boolean. If True also copies XLA sharding\nfrom primary.\n\nReturns:\nA `Variable` object.", "source": "github-repos"}
{"code": "def iter_geno_marker(self, markers, return_index=False):\n        \n        if self._mode != \"r\":\n            raise UnsupportedOperation(\"not available in 'w' mode\")\n\n        \n        if isinstance(markers, str):\n            markers = [markers]\n\n        \n        if return_index:\n            for marker in markers:\n                geno, seek = self.get_geno_marker(marker, return_index=True)\n                yield marker, geno, seek\n        else:\n            for marker in markers:\n                yield marker, self.get_geno_marker(marker)", "docstring": "Iterates over genotypes for a list of markers.\n\nArgs:\nmarkers (list): The list of markers to iterate onto.\nreturn_index (bool): Wether to return the marker's index or not.\n\nReturns:\ntuple: The name of the marker as a string, and its genotypes as a\n:py:class:`numpy.ndarray` (additive format).", "source": "juraj-google-style"}
{"code": "def count_params(self):\n    if not self.built:\n        raise ValueError(f\"You tried to call `count_params` on layer '{self.name}', but the layer isn't built. You can build it manually via: `layer.build(input_shape)`.\")\n    return summary_utils.count_params(self.weights)", "docstring": "Count the total number of scalars composing the weights.\n\nReturns:\nAn integer count.", "source": "github-repos"}
{"code": "def GetFileEntryByPathSpec(self, path_spec):\n    \n    return fvde_file_entry.FVDEFileEntry(\n        self._resolver_context, self, path_spec, is_root=True, is_virtual=True)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nFVDEFileEntry: file entry or None.", "source": "juraj-google-style"}
{"code": "def __init__(self, unicodeHexValue, block):\n        \n        if unicodeHexValue < 0 or unicodeHexValue > 0x10FFFF:\n            raise ValueError(\"numeric value outside Unicode range\")\n        self.unicodeHexValue = unicodeHexValue\n        \n        self.unichr = py23char(self.unicodeHexValue)\n        self.name = unicodedata.name(self.unichr)\n        self.equivalents = {}\n        self._block = block", "docstring": "Set up a unicode character.\n\nArguments:\nunicodeHexValue -- an integer that should correspond to a\nUnicode code point.\nblock -- the CharacterBlock this character belongs to.\n\nRaises:\nValueError -- if unicodeHexValue is not a valid code point.", "source": "juraj-google-style"}
{"code": "def unit_pos_to_spot(unit_pos) -> ParkingSpot:\n    \n    min_ = 50\n    res = None\n    for airport in parkings:\n        for spot in parkings[airport]:  \n            spot_pos = parkings[airport][spot]  \n            dist = math.hypot(unit_pos[0] - spot_pos[0], unit_pos[1] - spot_pos[1])\n            if dist < min_:\n                min_ = dist  \n                res = ParkingSpot(airport=airport, spot=spot)\n    return res", "docstring": "Translates a unit position to a known parking spot\n\nArgs:\nunit_pos: unit position as Vec2\n\nReturns: ParkingSpot object", "source": "juraj-google-style"}
{"code": "def AddValue(self, registry_value):\n    \n    name = registry_value.name.upper()\n    if name in self._values:\n      raise KeyError(\n          'Value: {0:s} already exists.'.format(registry_value.name))\n\n    self._values[name] = registry_value", "docstring": "Adds a value.\n\nArgs:\nregistry_value (WinRegistryValue): Windows Registry value.\n\nRaises:\nKeyError: if the value already exists.", "source": "juraj-google-style"}
{"code": "def todo(self, **kwargs):\n        \n        path = '%s/%s/todo' % (self.manager.path, self.get_id())\n        self.manager.gitlab.http_post(path, **kwargs)", "docstring": "Create a todo associated to the object.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabTodoError: If the todo cannot be set", "source": "juraj-google-style"}
{"code": "def list_folder(cls, session, mailbox, folder):\n    return cls(('/mailboxes/%d/folders/%s/conversations.json' % (mailbox.id, folder.id)), session=session)", "docstring": "Return conversations in a specific folder of a mailbox.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nmailbox (helpscout.models.Mailbox): Mailbox that folder is in.\nfolder (helpscout.models.Folder): Folder to list.\n\nReturns:\nRequestPaginator(output_type=helpscout.models.Conversation):\nConversations iterator.", "source": "codesearchnet"}
{"code": "def Create(self, request, global_params=None):\n    config = self.GetMethodConfig('Create')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Create an association between a GCP project and a GitHub Enterprise server.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsGithubEnterpriseConfigsCreateRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def to_md_file(string, filename, out_path=\".\"):\n    \n    md_file = \"%s.md\" % filename\n    with open(os.path.join(out_path, md_file), \"w\") as f:\n        f.write(string)\n    print(\"wrote {}.\".format(md_file))", "docstring": "Import a module path and create an api doc from it\n\nArgs:\nstring (str): string with line breaks to write to file.\nfilename (str): filename without the .md\nout_path (str): The output directory", "source": "juraj-google-style"}
{"code": "def plot_iso(axis, step, var):\n    \n    xmesh, ymesh, fld = get_meshes_fld(step, var)\n    if conf.field.shift:\n        fld = np.roll(fld, conf.field.shift, axis=0)\n    axis.contour(xmesh, ymesh, fld, linewidths=1)", "docstring": "Plot isocontours of scalar field.\n\nArgs:\naxis (:class:`matplotlib.axes.Axes`): the axis handler of an\nexisting matplotlib figure where the isocontours should\nbe plotted.\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nvar (str): the scalar field name.", "source": "juraj-google-style"}
{"code": "def get_repository(self, path):\n    parts = path.split('@', 1)\n    if (len(parts) == 1):\n        parts = ('filesystem', parts[0])\n    (repo_type, location) = parts\n    if (repo_type == 'filesystem'):\n        location = os.path.abspath(location)\n    normalised_path = ('%s@%s' % (repo_type, location))\n    return self._get_repository(normalised_path)", "docstring": "Get a package repository.\n\nArgs:\npath (str): Entry from the 'packages_path' config setting. This may\nsimply be a path (which is managed by the 'filesystem' package\nrepository plugin), or a string in the form \"type@location\",\nwhere 'type' identifies the repository plugin type to use.\n\nReturns:\n`PackageRepository` instance.", "source": "codesearchnet"}
{"code": "def _new_from_rft(self, base_template, rft_file):\n    self._add_entry(base_template)\n    self._add_entry(templates.NEW_FROM_RFT.format(rft_file_path=rft_file, rft_file_name=op.basename(rft_file)))", "docstring": "Append a new file from .rft entry to the journal.\n\nThis instructs Revit to create a new model based on\nthe provided .rft template.\n\nArgs:\nbase_template (str): new file journal template from rmj.templates\nrft_file (str): full path to .rft template to be used", "source": "codesearchnet"}
{"code": "def Group(items, key):\n    result = {}\n    for item in items:\n        result.setdefault(key(item), []).append(item)\n    return result", "docstring": "Groups items by given key function.\n\nArgs:\nitems: An iterable or an iterator of items.\nkey: A function which given each item will return the key.\n\nReturns:\nA dict with keys being each unique key and values being a list of items of\nthat key.", "source": "codesearchnet"}
{"code": "def append(self, future):\n        \n        future.prev = self.tail\n        if self.tail is None:\n            assert self.head is None\n            self.head = future\n        else:\n            self.tail.next = future\n        self.tail = future\n        \n        future.add_done_callback(self.remove)", "docstring": "Append an object to the linked list.\n\nArgs:\nfuture (PlasmaObjectFuture): A PlasmaObjectFuture instance.", "source": "juraj-google-style"}
{"code": "def set_api_url(self, api_url='https:\n    old_api_url = self._api_url\n    old_lang = self._lang\n    self._lang = lang.lower()\n    self._api_url = api_url.format(lang=self._lang)\n    try:\n        self._get_site_info()\n        self.__supported_languages = None\n    except MediaWikiException:\n        self._api_url = old_api_url\n        self._lang = old_lang\n        raise MediaWikiAPIURLError(api_url)\n    self.clear_memoized()", "docstring": "Set the API URL and language\n\nArgs:\napi_url (str): API URL to use\nlang (str): Language of the API URL\nRaises:\n:py:func:`mediawiki.exceptions.MediaWikiAPIURLError`: if the \\\nurl is not a valid MediaWiki site", "source": "codesearchnet"}
{"code": "def GetBullets(self):\n    return self._bullets", "docstring": "Returns the bullet characters list.\n\nUse the list elements in order for best appearance in nested bullet lists,\nwrapping back to the first element for deep nesting. The list size depends\non the console implementation.\n\nReturns:\nA tuple of bullet characters.", "source": "github-repos"}
{"code": "def __init__(self, dims, multiples, name=\"tile_by_dim\"):\n    \n    super(TileByDim, self).__init__(name=name)\n    self._dims = dims\n    self._multiples = multiples\n    if np.unique(dims).size != len(dims):\n      raise ValueError(\"dims must not have any repeated integers.\")\n    if len(multiples) != len(dims):\n      raise ValueError(\n          \"multiples must have the same length as dims: {}.\".format(len(dims)))", "docstring": "Constructs the `TileByDim` module.\n\nArgs:\ndims: The dimensions to tile along, as a list of unique integers.\nmultiples: The multiple of the tiling, as a list of integers. Must\nbe the same length as the `dims` list.\nname: The name of the module.\n\nRaises:\nValueError: If `dims` has non-unique integers, or if the size of\n`multiples` is different from the size of `dims`.", "source": "juraj-google-style"}
{"code": "def get_json(filename):\n    \n    check_if_this_file_exist(filename)\n\n    \n    filename = os.path.abspath(filename)\n    s = command_line(['exiftool', '-G', '-j', '-sort', filename])\n    if s:\n        \n        s = s.decode('utf-8').rstrip('\\r\\n')\n        return json.loads(s)\n    else:\n        return s", "docstring": "Return a json value of the exif\n\nGet a filename and return a JSON object\n\nArguments:\nfilename {string} -- your filename\n\nReturns:\n[JSON] -- Return a JSON object", "source": "juraj-google-style"}
{"code": "def total_cost_function(self, item_a, item_b, time_a, time_b):\n    distances = np.zeros(len(self.weights))\n    for (c, component) in enumerate(self.cost_function_components):\n        distances[c] = component(item_a, time_a, item_b, time_b, self.max_values[c])\n    total_distance = np.sum((self.weights * distances))\n    return total_distance", "docstring": "Calculate total cost function between two items.\n\nArgs:\nitem_a: STObject\nitem_b: STObject\ntime_a: Timestep in item_a at which cost function is evaluated\ntime_b: Timestep in item_b at which cost function is evaluated\n\nReturns:\nThe total weighted distance between item_a and item_b", "source": "codesearchnet"}
{"code": "def prepare_all_data(data_dir, block_pct_tokens_thresh=0.1):\n    gs_blocks_dir = os.path.join(data_dir, GOLD_STANDARD_BLOCKS_DIRNAME)\n    gs_blocks_filenames = get_filenames(gs_blocks_dir, full_path=False, match_regex=re.escape(GOLD_STANDARD_BLOCKS_EXT))\n    gs_blocks_fileroots = (re.search(('(.+)' + re.escape(GOLD_STANDARD_BLOCKS_EXT)), gs_blocks_filename).group(1) for gs_blocks_filename in gs_blocks_filenames)\n    return [prepare_data(data_dir, fileroot, block_pct_tokens_thresh) for fileroot in gs_blocks_fileroots]", "docstring": "Prepare data for all HTML + gold standard blocks examples in ``data_dir``.\n\nArgs:\ndata_dir (str)\nblock_pct_tokens_thresh (float): must be in [0.0, 1.0]\n\nReturns:\nList[Tuple[str, List[float, int, List[str]], List[float, int, List[str]]]]\n\nSee Also:\n:func:`prepare_data`", "source": "codesearchnet"}
{"code": "def _ReadTable(self, tables, file_object, table_offset):\n    table_header = self._ReadTableHeader(file_object, table_offset)\n    for record_offset in table_header.record_offsets:\n        if (record_offset == 0):\n            continue\n        record_offset += table_offset\n        if (table_header.record_type == self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INFO):\n            self._ReadRecordSchemaInformation(tables, file_object, record_offset)\n        elif (table_header.record_type == self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INDEXES):\n            self._ReadRecordSchemaIndexes(tables, file_object, record_offset)\n        elif (table_header.record_type == self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_ATTRIBUTES):\n            self._ReadRecordSchemaAttributes(tables, file_object, record_offset)\n        else:\n            self._ReadRecord(tables, file_object, record_offset, table_header.record_type)", "docstring": "Reads the table.\n\nArgs:\ntables (dict[int, KeychainDatabaseTable]): tables per identifier.\nfile_object (file): file-like object.\ntable_offset (int): offset of the table relative to the start of\nthe file.\n\nRaises:\nParseError: if the table cannot be read.", "source": "codesearchnet"}
{"code": "def avl_join2(t1, t2):\n    if ((t1 is None) and (t2 is None)):\n        new_root = None\n    elif (t2 is None):\n        new_root = t1\n    elif (t1 is None):\n        new_root = t2\n    else:\n        (new_left, last_node) = avl_split_last(t1)\n        debug = 0\n        if debug:\n            EulerTourTree(root=new_left)._assert_nodes('new_left')\n            EulerTourTree(root=last_node)._assert_nodes('last_node')\n            EulerTourTree(root=t2)._assert_nodes('t2')\n            print('new_left')\n            EulerTourTree(root=new_left).print_tree()\n            print('last_node')\n            EulerTourTree(root=last_node).print_tree()\n            print('t2')\n            EulerTourTree(root=t2).print_tree()\n        new_root = avl_join(new_left, t2, last_node)\n        if debug:\n            print('new_root')\n            EulerTourTree(root=new_root).print_tree()\n            EulerTourTree(root=last_node)._assert_nodes('new_root')\n    return new_root", "docstring": "join two trees without any intermediate key\n\nReturns:\nNode: new_root\n\nO(log(n) + log(m)) = O(r(t1) + r(t2))\n\nFor AVL-Trees the rank r(t1) = height(t1) - 1", "source": "codesearchnet"}
{"code": "def _list_objects(self, client_kwargs, path, max_request_entries):\n        \n        client_kwargs = client_kwargs.copy()\n        if max_request_entries:\n            client_kwargs['MaxKeys'] = max_request_entries\n\n        while True:\n            with _handle_client_error():\n                response = self.client.list_objects_v2(\n                    Prefix=path, **client_kwargs)\n\n            try:\n                for obj in response['Contents']:\n                    yield obj.pop('Key'), obj\n            except KeyError:\n                raise _ObjectNotFoundError('Not found: %s' % path)\n\n            \n            try:\n                client_kwargs['ContinuationToken'] = response[\n                    'NextContinuationToken']\n            except KeyError:\n                \n                break", "docstring": "Lists objects.\n\nargs:\nclient_kwargs (dict): Client arguments.\npath (str): Path relative to current locator.\nmax_request_entries (int): If specified, maximum entries returned\nby request.\n\nReturns:\ngenerator of tuple: object name str, object header dict", "source": "juraj-google-style"}
{"code": "def evaluate(code: str, *, global_vars: Optional[Dict[str, Any]]=None, permission: Optional[permissions.CodePermission]=None, returns_stdout: bool=False, outputs_intermediate: bool=False) -> Union[Any, Dict[str, Any]]:\n    permission = permission or permissions.get_permission()\n    ctx = dict(get_context())\n    if global_vars:\n        ctx.update(global_vars)\n    code_block = parsing.parse(code, permission)\n    global_vars, orig_global_vars = (ctx, ctx.copy())\n    if not code_block.body:\n        return {} if outputs_intermediate else None\n    stdout = io.StringIO()\n    with contextlib.redirect_stdout(stdout):\n        if hasattr(code_block.body[-1], 'value'):\n            last_expr = code_block.body.pop()\n            result_vars = [RESULT_KEY]\n            if isinstance(last_expr, ast.Assign):\n                for name_node in last_expr.targets:\n                    if isinstance(name_node, ast.Name):\n                        result_vars.append(name_node.id)\n            last_expr = ast.Expression(last_expr.value)\n            try:\n                exec(compile(code_block, '', mode='exec'), global_vars)\n                result = eval(compile(last_expr, '', mode='eval'), global_vars)\n            except BaseException as e:\n                raise errors.CodeError(code, e) from e\n            for result_var in result_vars:\n                global_vars[result_var] = result\n        else:\n            try:\n                exec(compile(code_block, '', mode='exec'), global_vars)\n            except BaseException as e:\n                raise errors.CodeError(code, e) from e\n            global_vars[RESULT_KEY] = list(global_vars.values())[-1]\n    if returns_stdout:\n        return stdout.getvalue()\n    if outputs_intermediate:\n        outputs = {}\n        for k, v in global_vars.items():\n            if k == '__builtins__':\n                continue\n            if k not in orig_global_vars or v is not orig_global_vars[k]:\n                outputs[k] = v\n        outputs[STDOUT_KEY] = stdout.getvalue()\n        return outputs\n    return global_vars[RESULT_KEY]", "docstring": "Executes Python code.\n\nFeatures:\n* Fine-grained execution policy for limiting what APIs could be executed.\nThis eliminates the need for sandboxing.\n* It exposes both the final results and intermediate results (variables).\n\nArgs:\ncode: Python code to run.\nglobal_vars: An optional dict as the globals that could be referenced by the\ncode.\npermission: Permission for the Python code to run.\nreturns_stdout: If True, the stdout (a str) will be returned.\noutputs_intermediate: Applicable when returns_stdout is False. If True,\nintermediate output will be outputted as a dict, with the last line's\nvalue accessible by key '__result__' and the std output accessible by\nkey '__stdout__'. Otherwise the value of the last line will be returned.\n\nReturns:\nThe value of the last line of the code block. Or a dict of variable\nnames of all locals to their evaluated values as the output of the code to\nrun. The value for the last line can be accessed by key '__result__'. Or the\nstdout as a str.", "source": "github-repos"}
{"code": "def __init__(self, conf, conn=None):\n    super(LdapSource, self).__init__(conf)\n    self._dn_requested = False\n    self._SetDefaults(conf)\n    self._conf = conf\n    self.ldap_controls = makeSimplePagedResultsControl(self.PAGE_SIZE)\n    self._last_search_params = None\n    if conn is None:\n        rlo = ldap.ldapobject.ReconnectLDAPObject\n        self.conn = rlo(uri=conf['uri'], retry_max=conf['retry_max'], retry_delay=conf['retry_delay'])\n        if conf['tls_starttls'] == 1:\n            self.conn.start_tls_s()\n        if 'ldap_debug' in conf:\n            self.conn.set_option(ldap.OPT_DEBUG_LEVEL, conf['ldap_debug'])\n    else:\n        self.conn = conn\n    self.Bind(conf)", "docstring": "Initialise the LDAP Data Source.\n\nArgs:\nconf: config.Config instance\nconn: An instance of ldap.LDAPObject that'll be used as the connection.", "source": "github-repos"}
{"code": "def start_app(self, bundle_id):\n        \n        \n        \n        self._bundle_id = bundle_id\n        self._session = self._wda.session(bundle_id)\n        return self._session", "docstring": "Start an application\nArgs:\n- bundle_id: (string) apk bundle ID\n\nReturns:\nWDA session object", "source": "juraj-google-style"}
{"code": "def list_vnets(access_token, subscription_id):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/providers/Microsoft.Network/',\n                        '/virtualNetworks?api-version=', NETWORK_API])\n    return do_get(endpoint, access_token)", "docstring": "List the VNETs in a subscription\t.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body of VNets list with properties.", "source": "juraj-google-style"}
{"code": "def segment(self, source, language=None):\n    \n    if language and not language in self.supported_languages:\n      raise ValueError(\n          'Language {} is not supported by MeCab segmenter'.format(language))\n\n    chunks = ChunkList()\n    seek = 0\n    source_str = source.encode('utf-8') if six.PY2 else source\n    results = self.tagger.parse(source_str).split('\\n')[:-2]\n    for row in results:\n      if six.PY2:\n        row = row.decode('utf-8')\n      token = row.split('\\t')\n      word = token[0]\n      labels = token[3].split('-')\n      pos = labels[0]\n      label = labels[1] if len(labels) > 1 else None\n      if source[seek: seek + len(word)] != word:\n        assert source[seek] == ' '\n        assert source[seek + 1: seek + len(word) + 1] == word\n        chunks.append(Chunk.space())\n        seek += 1\n\n      dependency = None\n      if pos in _DEPENDENT_POS_FORWARD:\n        dependency = True\n      elif pos in _DEPENDENT_POS_BACKWARD:\n        dependency = False\n      elif label in _DEPENDENT_LABEL_FORWARD:\n        dependency = True\n      elif label in _DEPENDENT_LABEL_BACKWARD:\n        dependency = False\n\n      chunk = Chunk(word, pos=pos, label=label, dependency=dependency)\n      if chunk.is_punct():\n        chunk.dependency = chunk.is_open_punct()\n      chunks.append(chunk)\n      seek += len(word)\n    chunks.resolve_dependencies()\n    return chunks", "docstring": "Returns a chunk list from the given sentence.\n\nArgs:\nsource (str): Source string to segment.\nlanguage (:obj:`str`, optional): A language code.\n\nReturns:\nA chunk list. (:obj:`budou.chunk.ChunkList`)\n\nRaises:\nValueError: If :obj:`language` is given and it is not included in\n:obj:`supported_languages`.", "source": "juraj-google-style"}
{"code": "def loss_labels(self, class_queries_logits: Tensor, class_labels: List[Tensor], indices: Tuple[np.array]) -> Dict[str, Tensor]:\n    pred_logits = class_queries_logits\n    batch_size, num_queries, _ = pred_logits.shape\n    criterion = nn.CrossEntropyLoss(weight=self.empty_weight)\n    idx = self._get_predictions_permutation_indices(indices)\n    target_classes_o = torch.cat([target[j] for target, (_, j) in zip(class_labels, indices)])\n    target_classes = torch.full((batch_size, num_queries), fill_value=self.num_labels, dtype=torch.int64, device=pred_logits.device)\n    target_classes[idx] = target_classes_o\n    pred_logits_transposed = pred_logits.transpose(1, 2)\n    loss_ce = criterion(pred_logits_transposed, target_classes)\n    losses = {'loss_cross_entropy': loss_ce}\n    return losses", "docstring": "Compute the losses related to the labels using cross entropy.\n\nArgs:\nclass_queries_logits (`torch.Tensor`):\nA tensor of shape `batch_size, num_queries, num_labels`\nclass_labels (`List[torch.Tensor]`):\nList of class labels of shape `(labels)`.\nindices (`Tuple[np.array])`:\nThe indices computed by the Hungarian matcher.\n\nReturns:\n`Dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:\n- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.", "source": "github-repos"}
{"code": "def guess_content_type_and_encoding(path):\n    for (ext, content_type) in _EXTENSION_TO_MIME_TYPE.items():\n        if path.endswith(ext):\n            return content_type\n    (content_type, encoding) = mimetypes.guess_type(path)\n    content_type = (content_type or 'application/binary')\n    return (content_type, encoding)", "docstring": "Guess the content type of a path, using ``mimetypes``.\n\nFalls back to \"application/binary\" if no content type is found.\n\nArgs:\npath (str): the path to guess the mimetype of\n\nReturns:\nstr: the content type of the file", "source": "codesearchnet"}
{"code": "def execute(self, method, **kwargs):\n        \n        payload = {\n            'id': 1,\n            'jsonrpc': '2.0',\n            'method': method,\n            'params': kwargs\n        }\n\n        credentials = base64.b64encode('{}:{}'.format(self._username, self._password).encode())\n        auth_header_prefix = 'Basic ' if self._auth_header == DEFAULT_AUTH_HEADER else ''\n        headers = {\n            self._auth_header: auth_header_prefix + credentials.decode(),\n            'Content-Type': 'application/json',\n        }\n\n        return self._do_request(headers, payload)", "docstring": "Call remote API procedure\n\nArgs:\nmethod: Procedure name\nkwargs: Procedure named arguments\n\nReturns:\nProcedure result\n\nRaises:\nurllib2.HTTPError: Any HTTP error (Python 2)\nurllib.error.HTTPError: Any HTTP error (Python 3)", "source": "juraj-google-style"}
{"code": "def __init__(self, text='', font_attr=None):\n    self.text = text\n    if font_attr:\n        self.font_attr_segs = [(0, len(text), font_attr)]\n    else:\n        self.font_attr_segs = []", "docstring": "Construct a RichLine with no rich attributes or a single attribute.\n\nArgs:\ntext: Raw text string\nfont_attr: If specified, a single font attribute to be applied to the\nentire text.  Extending this object via concatenation allows creation\nof text with varying attributes.", "source": "github-repos"}
{"code": "def __call__(self, observed_obj, *arg, **kw):\n        \n\n        if self.identify_observed:\n            return self.func_wr()(observed_obj, *arg, **kw)\n        else:\n            return self.func_wr()(*arg, **kw)", "docstring": "Call the function I wrap.\n\nArgs:\n*arg: The arguments passed to me by the observed object.\n**kw: The keyword args passed to me by the observed object.\nobserved_obj: The observed object which called me.\n\nReturns:\nWhatever the function I wrap returns.", "source": "juraj-google-style"}
{"code": "def get_all_disorder_predictions(self, iupred_path='/home/nathan/software/iupred/',\n                                          iupred_exec='iupred', disembl_cmd='/home/nathan/software/DisEMBL-1.4/DisEMBL.py',\n                                     representative_only=True):\n        \n        if representative_only:\n            \n            if not self.representative_sequence:\n                log.warning('{}: no representative sequence set, cannot get disorder properties'.format(self.id))\n                return\n\n            \n            if not self.representative_sequence.seq:\n                log.warning('{}: representative sequence {} set, but no sequence stored. '\n                            'Cannot get disorder properties.'.format(self.id, self.representative_sequence.id))\n                return\n\n            \n            \n            \n            \n            \n            \n            self.representative_sequence.store_disembl_disorder_predictions(disembl_cmd=disembl_cmd)\n\n        if not representative_only:\n            for s in self.sequences:\n                \n                if not s.seq:\n                    log.warning('{}: no sequence stored. '\n                                'Cannot get disorder properties.'.format(s.id))\n                    continue\n\n                else:\n                    \n                    \n                    \n                    \n                    \n                    \n                    s.store_disembl_disorder_predictions(disembl_cmd=disembl_cmd)", "docstring": "Run Biopython ProteinAnalysis and EMBOSS pepstats to summarize basic statistics of the protein sequences.\nResults are stored in the protein's respective SeqProp objects at ``.annotations``\n\nArgs:\nrepresentative_only (bool): If analysis should only be run on the representative sequence", "source": "juraj-google-style"}
{"code": "def __init__(self, dataset, devices, max_buffer_size=1, prefetch_buffer_size=1, source_device='/cpu:0'):\n    options = options_lib.Options()\n    options.experimental_distribute.num_devices = len(devices)\n    if prefetch_buffer_size == 0:\n        options.experimental_optimization.inject_prefetch = False\n    dataset = dataset.with_options(options)\n    self._dataset = dataset._apply_debug_options()\n    self._experimental_slack = dataset.options().experimental_slack\n    self._devices = devices\n    self._source_device = source_device\n    self._source_device_tensor = ops.convert_to_tensor(source_device)\n    self._max_buffer_size = max_buffer_size\n    self._prefetch_buffer_size = prefetch_buffer_size\n    if self._prefetch_buffer_size > self._max_buffer_size:\n        self._max_buffer_size = self._prefetch_buffer_size\n    with ops.device(self._source_device):\n        shared_name = ''\n        if context.executing_eagerly():\n            shared_name = context.anonymous_name()\n        self._multi_device_iterator_resource = gen_dataset_ops.multi_device_iterator(devices=self._devices, shared_name=shared_name, container='', **self._dataset._flat_structure)\n        if context.executing_eagerly():\n            self._resource_deleter = resource_variable_ops.EagerResourceDeleter(handle=self._multi_device_iterator_resource, handle_device=self._source_device)\n        self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(self._dataset._variant_tensor, self._multi_device_iterator_resource, max_buffer_size=self._max_buffer_size)\n    self._prototype_device_datasets = []\n    for i, device in enumerate(self._devices):\n        with ops.device(device):\n            ds = _PerDeviceGenerator(i, self._multi_device_iterator_resource, self._incarnation_id, self._source_device_tensor, self._dataset.element_spec, iterator_is_anonymous=False)\n            self._prototype_device_datasets.append(ds)\n    self._device_iterators = []\n    for i, device in enumerate(self._devices):\n        with ops.device(device):\n            ds = _create_device_dataset(self._prototype_device_datasets[i], self._incarnation_id, self._prefetch_buffer_size, self._experimental_slack)\n            if context.executing_eagerly():\n                self._device_iterators.append(dataset_ops.make_one_shot_iterator(ds))\n            else:\n                self._device_iterators.append(dataset_ops.make_initializable_iterator(ds))\n    if not context.executing_eagerly():\n        device_iterator_initializers = [iterator.initializer for iterator in self._device_iterators]\n        self._initializer = control_flow_ops.group(*device_iterator_initializers)", "docstring": "Constructs a MultiDeviceIterator.\n\nArgs:\ndataset: The input dataset to be iterated over.\ndevices: The list of devices to fetch data to.\nmax_buffer_size: Maximum size of the host side per device buffer to keep.\nprefetch_buffer_size: if > 0, then we setup a buffer on each device to\nprefetch into.\nsource_device: The host device to place the `dataset` on.  In order to\nprevent deadlocks, if the prefetch_buffer_size is greater than the\nmax_buffer_size, we set the max_buffer_size to prefetch_buffer_size.", "source": "github-repos"}
{"code": "def insert(parent: ScheduleComponent, time: int, child: ScheduleComponent,\n           name: str = None) -> Schedule:\n    \n    return union(parent, (time, child), name=name)", "docstring": "Return a new schedule with the `child` schedule inserted into the `parent` at `start_time`.\n\nArgs:\nparent: Schedule to be inserted into\ntime: Time to be inserted defined with respect to `parent`\nchild: Schedule to insert\nname: Name of the new schedule. Defaults to name of parent", "source": "juraj-google-style"}
{"code": "def setup_sdk_logging(logfile=None, loglevel=logging.INFO):\n    logging.root.setLevel(logging.DEBUG)\n    logging.root.addHandler(logging.NullHandler())\n    if logfile:\n        fh = logging.FileHandler(logfile)\n        fh.setLevel(loglevel)\n        fh.setFormatter(get_default_log_formatter())\n        logging.root.addHandler(fh)", "docstring": "Setup a NullHandler to the root logger. If ``logfile`` is passed,\nadditionally add a FileHandler in ``loglevel`` level.\n\nArgs:\nlogfile(str): A path to setup a log file.\nloglevel(int): :mod:`logging` log level.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def from_pb(cls, pb):\n        \n        obj = cls._from_pb(pb)\n        obj._pb = pb\n        return obj", "docstring": "Instantiate the object from a protocol buffer.\n\nArgs:\npb (protobuf)\n\nSave a reference to the protocol buffer on the object.", "source": "juraj-google-style"}
{"code": "def relaxng(filename=None):\n    \n    E = ElementMaker(namespace=\"http:\n    grammar = E.grammar( E.start( E.element( \n                E.attribute(name='id',ns=\"http:\n                E.optional( E.attribute(name='version') ),\n                E.optional( E.attribute(name='generator') ),\n                E.element( \n                    E.optional(E.attribute(name='type')),\n                    E.optional(E.attribute(name='src')),\n                    E.element( E.zeroOrMore( E.choice( *relaxng_declarations() ) ) ,name='annotations'),\n                    E.zeroOrMore(\n                        E.element(E.attribute(name='id'), E.text(), name='meta'),\n                    ),\n                    E.zeroOrMore(\n                        E.ref(name=\"foreign-data\"),\n                    ),\n                    E.zeroOrMore(\n                        E.element( \n                            E.attribute(name='id',ns=\"http:\n                            E.optional(E.attribute(name='type')),\n                            E.optional(E.attribute(name='src')),\n                            E.zeroOrMore(\n                                E.element(E.attribute(name='id'), E.text(), name='meta'),\n                            ),\n                            E.zeroOrMore(\n                                E.ref(name=\"foreign-data\"),\n                            ),\n                            name=\"submetadata\"\n                        )\n                    ),\n                    \n                    \n                    \n                    name='metadata',\n                    \n                ),\n                E.interleave(\n                    E.zeroOrMore(\n                        E.ref(name='text'),\n                    ),\n                    E.zeroOrMore(\n                        E.ref(name='speech'),\n                    ),\n                ),\n                name='FoLiA',\n                ns = NSFOLIA\n            ) ),\n            \n            E.define( E.interleave(E.zeroOrMore(E.ref(name=\"any_element\")),E.text()), name=\"any_content\"),\n            E.define( E.element(E.anyName(), E.zeroOrMore(E.ref(name=\"any_attribute\")), E.zeroOrMore(E.ref(name=\"any_content\"))), name=\"any_element\"),\n            E.define( E.attribute(E.anyName()), name=\"any_attribute\"),\n            \n            E.define( E.zeroOrMore(E.attribute(E.anyName(getattr(E,'except')(E.nsName(),E.nsName(ns=\"\"),E.nsName(ns=\"http:\n            datatypeLibrary=\"http:\n            )\n\n    done = {}\n    for c in globals().values():\n        if 'relaxng' in dir(c):\n            if c.relaxng and c.XMLTAG and not c.XMLTAG in done:\n                done[c.XMLTAG] = True\n                definition = c.relaxng()\n                grammar.append( definition )\n                if c.XMLTAG == 'item': \n                    definition_alias = c.relaxng()\n                    definition_alias.set('name','listitem')\n                    definition_alias[0].set('name','listitem')\n                    grammar.append( definition_alias )\n\n    \n    \n    if filename:\n        if sys.version < '3':\n            f = io.open(filename,'w',encoding='utf-8')\n        else:\n            f = io.open(filename,'wb')\n        if LXE:\n            if sys.version < '3':\n                f.write( ElementTree.tostring(relaxng(),pretty_print=True).replace(\"</define>\",\"</define>\\n\\n\") )\n            else:\n                f.write( ElementTree.tostring(relaxng(),pretty_print=True).replace(b\"</define>\",b\"</define>\\n\\n\") )\n        else:\n            f.write( ElementTree.tostring(relaxng()).replace(\"</define>\",\"</define>\\n\\n\") )\n        f.close()\n\n    return grammar", "docstring": "Generates a RelaxNG Schema for FoLiA. Optionally saves it to file.\n\nArgs:\nfilename (str): Save the schema to the following filename\n\nReturns:\nlxml.ElementTree: The schema", "source": "juraj-google-style"}
{"code": "def autopep8_diff(fpath):\n    r\n    import utool as ut\n    args = ('autopep8', fpath, '--diff')\n    res = ut.cmd(args, verbose=False)\n    out, err, ret = res\n    ut.print_difftext(out)", "docstring": "r\"\"\"\nArgs:\nfpath (str):  file path string\n\nCommandLine:\npython -m utool.util_dev --test-autopep8_diff --fpath ingest_data.py\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_dev import *  # NOQA\n>>> fpath = ut.get_argval('--fpath', type_=str, default='ingest_data.py')\n>>> result = autopep8_diff(fpath)\n>>> print(result)", "source": "juraj-google-style"}
{"code": "def flux_randomization(model, threshold, tfba, solver):\n    optimize = {}\n    for reaction_id in model.reactions:\n        if model.is_reversible(reaction_id):\n            optimize[reaction_id] = ((2 * random.random()) - 1.0)\n        else:\n            optimize[reaction_id] = random.random()\n    fba = _get_fba_problem(model, tfba, solver)\n    for (reaction_id, value) in iteritems(threshold):\n        fba.prob.add_linear_constraints((fba.get_flux_var(reaction_id) >= value))\n    fba.maximize(optimize)\n    for reaction_id in model.reactions:\n        (yield (reaction_id, fba.get_flux(reaction_id)))", "docstring": "Find a random flux solution on the boundary of the solution space.\n\nThe reactions in the threshold dictionary are constrained with the\nassociated lower bound.\n\nArgs:\nmodel: MetabolicModel to solve.\nthreshold: dict of additional lower bounds on reaction fluxes.\ntfba: If True enable thermodynamic constraints.\nsolver: LP solver instance to use.\n\nReturns:\nAn iterator of reaction ID and reaction flux pairs.", "source": "codesearchnet"}
{"code": "def delete(self, addon_id, data={}, **kwargs):\n        \n        return super(Addon, self).delete(addon_id, data, **kwargs)", "docstring": "Delete addon for given id\n\nArgs:\naddon_id : Id for which addon object has to be deleted", "source": "juraj-google-style"}
{"code": "def as_report_request(self, rules, timer=datetime.utcnow):\n    if (not self.service_name):\n        raise ValueError(u'the service name must be set')\n    op = super(Info, self).as_operation(timer=timer)\n    if (op.operationId and op.operationName):\n        labels = {}\n        for known_label in rules.labels:\n            known_label.do_labels_update(self, labels)\n        labels[_KNOWN_LABELS.SCC_PLATFORM.label_name] = self.platform.friendly_string()\n        labels[_KNOWN_LABELS.SCC_SERVICE_AGENT.label_name] = SERVICE_AGENT\n        labels[_KNOWN_LABELS.SCC_USER_AGENT.label_name] = USER_AGENT\n        if labels:\n            op.labels = encoding.PyValueToMessage(sc_messages.Operation.LabelsValue, labels)\n        for known_metric in rules.metrics:\n            known_metric.do_operation_update(self, op)\n    now = timer()\n    op.logEntries = [self._as_log_entry(l, now) for l in rules.logs]\n    return sc_messages.ServicecontrolServicesReportRequest(serviceName=self.service_name, reportRequest=sc_messages.ReportRequest(operations=[op]))", "docstring": "Makes a `ServicecontrolServicesReportRequest` from this instance\n\nArgs:\nrules (:class:`ReportingRules`): determines what labels, metrics and\nlogs to include in the report request.\ntimer: a function that determines the current time\n\nReturn:\na ``ServicecontrolServicesReportRequest`` generated from this instance\ngoverned by the provided ``rules``\n\nRaises:\nValueError: if the fields in this instance cannot be used to create\na valid ``ServicecontrolServicesReportRequest``", "source": "codesearchnet"}
{"code": "def from_ops(*operations: ops.OP_TREE, strategy: InsertStrategy=InsertStrategy.EARLIEST, device: devices.Device=devices.UnconstrainedDevice) -> 'Circuit':\n    result = Circuit(device=device)\n    result.append(operations, strategy)\n    return result", "docstring": "Creates an empty circuit and appends the given operations.\n\nArgs:\noperations: The operations to append to the new circuit.\nstrategy: How to append the operations.\ndevice: Hardware that the circuit should be able to run on.\n\nReturns:\nThe constructed circuit containing the operations.", "source": "codesearchnet"}
{"code": "def _apply_with_random_selector(x, func, num_cases):\n  \n  sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)\n  \n  return control_flow_ops.merge([\n      func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)\n      for case in range(num_cases)\n  ])[0]", "docstring": "Computes func(x, sel), with sel sampled from [0...num_cases-1].\n\nArgs:\nx: input Tensor.\nfunc: Python function to apply.\nnum_cases: Python int32, number of cases to sample sel from.\n\nReturns:\nThe result of func(x, sel), where func receives the value of the\nselector as a python integer, but sel is sampled dynamically.", "source": "juraj-google-style"}
{"code": "def diffs_prof(step):\n    (diff, rad) = diff_prof(step)\n    return (_scale_prof(step, diff, rad), rad)", "docstring": "Scaled diffusion.\n\nThis computation takes sphericity into account if necessary.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nReturns:\ntuple of :class:`numpy.array`: the diffusion and the radial position\nat which it is evaluated.", "source": "codesearchnet"}
{"code": "def correct_pad(kernel_size: Union[int, Tuple], adjust: bool=True):\n    if isinstance(kernel_size, int):\n        kernel_size = (kernel_size, kernel_size)\n    correct = (kernel_size[0] \n    if adjust:\n        return (correct[1] - 1, correct[1], correct[0] - 1, correct[0])\n    else:\n        return (correct[1], correct[1], correct[0], correct[0])", "docstring": "Utility function to get the tuple padding value for the depthwise convolution.\n\nArgs:\nkernel_size (`int` or `tuple`):\nKernel size of the convolution layers.\nadjust (`bool`, *optional*, defaults to `True`):\nAdjusts padding value to apply to right and bottom sides of the input.", "source": "github-repos"}
{"code": "def set_evaluation_parameter(self, parameter_name, parameter_value):\n    if ('evaluation_parameters' not in self._expectations_config):\n        self._expectations_config['evaluation_parameters'] = {}\n    self._expectations_config['evaluation_parameters'].update({parameter_name: parameter_value})", "docstring": "Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate\nparameterized expectations.\n\nArgs:\nparameter_name (string): The name of the kwarg to be replaced at evaluation time\nparameter_value (any): The value to be used", "source": "codesearchnet"}
{"code": "def __init__(self, cluster_id, variant_id, case_id):\n        \n        super(Identity, self).__init__(\n            cluster_id=cluster_id,\n            variant_id=variant_id,\n            case_id=case_id,\n        )", "docstring": "Construct a identity object\n\nArgs:\ncluster_id(str): Ref to a cluster\nvariant_id (str): ID from variant\ncase_id (str): What case it belongs to", "source": "juraj-google-style"}
{"code": "def format_level_2_memory(memory, header=None):\n    memory_list = []\n    for shot_memory in memory:\n        memory_list.append(format_counts_memory(shot_memory, header))\n    return memory_list", "docstring": "Format an experiment result memory object for measurement level 2.\n\nArgs:\nmemory (list): Memory from experiment with `meas_level==2` and `memory==True`.\nheader (dict): the experiment header dictionary containing\nuseful information for postprocessing.\n\nReturns:\nlist[str]: List of bitstrings", "source": "codesearchnet"}
{"code": "def formula_double_format(afloat, ignore_ones=True, tol=1e-8):\n    \n    if ignore_ones and afloat == 1:\n        return \"\"\n    elif abs(afloat - int(afloat)) < tol:\n        return str(int(afloat))\n    else:\n        return str(round(afloat, 8))", "docstring": "This function is used to make pretty formulas by formatting the amounts.\nInstead of Li1.0 Fe1.0 P1.0 O4.0, you get LiFePO4.\n\nArgs:\nafloat (float): a float\nignore_ones (bool): if true, floats of 1 are ignored.\ntol (float): Tolerance to round to nearest int. i.e. 2.0000000001 -> 2\n\nReturns:\nA string representation of the float for formulas.", "source": "juraj-google-style"}
{"code": "def _write_object_proto(self, proto, options):\n    resource_variable_ops.write_object_proto_for_resource_variable(self, proto, options)\n    values_util.write_object_proto(self, proto, options)", "docstring": "Update a SavedObject proto for the caller.\n\nIf a DistributedVariable object supports this method, it will be called when\nsaving with a pre-built `SavedObject` proto representing the object, plus an\ninstance of `SaveOptions`. This method is then free to modify that proto\ninstance.\n\n`DistributedVariable` with `AUTO` or `ON_WRITE` synchronization optionally\nwrite out information about their components to the\n`experimental_distributed_variable_components` field of a\n`SavedVariable` (depending on the `SaveOptions` variable policy).\n\nArgs:\nproto: A pre-built `SavedObject` proto for this object. It is assumed this\nwill be a `SavedVariable` instance.\noptions: A `SaveOptions` instance.", "source": "github-repos"}
{"code": "def _parse_saved_model_args(self, always_enable_saved_model_import=False):\n    if not self.experimental_new_converter:\n        self.saved_model_dir = None\n        return\n    if self.saved_model_dir:\n        try:\n            saved_model_proto, _ = _parse_saved_model_with_debug_info(self.saved_model_dir)\n        except OSError:\n            self.saved_model_dir = None\n            return\n        if not always_enable_saved_model_import and (not self._contains_function_with_implements_attr(saved_model_proto)):\n            self.saved_model_dir = None\n            return\n        if not self._saved_model_exported_names:\n            self._saved_model_exported_names = []\n        self._saved_model_version = saved_model_proto.saved_model_schema_version\n        if self._saved_model_version == 0:\n            self.saved_model_dir = None\n            logging.warning('SavedModel schema version is zero.')\n            return\n        if self._saved_model_version not in [1, 2]:\n            raise ValueError('SavedModel file format({0}) is not supported'.format(self._saved_model_version))", "docstring": "Parses SavedModel arguments from the given Keras/RNN SavedModel.\n\nArgs:\nalways_enable_saved_model_import: Bool. When the value is true, it enables\nMLIR saved model import path regardless of checking the conditions.", "source": "github-repos"}
{"code": "def find_effect_class(self, path) -> Type[Effect]:\n        \n        package_name, class_name = parse_package_string(path)\n\n        if package_name:\n            package = self.get_package(package_name)\n            return package.find_effect_class(class_name, raise_for_error=True)\n\n        for package in self.packages:\n            effect_cls = package.find_effect_class(class_name)\n            if effect_cls:\n                return effect_cls\n\n        raise EffectError(\"No effect class '{}' found in any packages\".format(class_name))", "docstring": "Find an effect class by class name or full python path to class\n\nArgs:\npath (str): effect class name or full python path to effect class\n\nReturns:\nEffect class\n\nRaises:\nEffectError if no class is found", "source": "juraj-google-style"}
{"code": "def add_attribute_label(self, attribute_id, label):\n        \n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        return self.tc_requests.add_attribute_label(\n            self.api_type, self.api_sub_type, self.unique_id, attribute_id, label, owner=self.owner\n        )", "docstring": "Adds a security labels to a attribute\n\nArgs:\nattribute_id:\nlabel:\n\nReturns: A response json", "source": "juraj-google-style"}
{"code": "def file_delete(filename, settings):\n        \n        if len(settings) != 1:\n            raise ValueError(\"Settings must only contain one item with key \"\n                             \"'mode'.\")\n        for k, v in settings.items():\n            if k == \"mode\" and v == \"actual\":\n                try:\n                    os.remove(filename)\n                except OSError:\n                    \n                    pass\n            elif k == \"mode\" and v == \"simulated\":\n                print(\"Simulated removal of {}\".format(filename))", "docstring": "Deletes a file. {'_file_delete': {'mode': \"actual\"}}\n\nArgs:\nfilename (str): Filename.\nsettings (dict): Must be {\"mode\": actual/simulated}. Simulated\nmode only prints the action without performing it.", "source": "juraj-google-style"}
{"code": "def get_student_certificates(self, username, course_ids=None):\n    if (course_ids is None):\n        enrollments_client = CourseEnrollments(self.requester, self.base_url)\n        enrollments = enrollments_client.get_student_enrollments()\n        course_ids = list(enrollments.get_enrolled_course_ids())\n    all_certificates = []\n    for course_id in course_ids:\n        try:\n            all_certificates.append(self.get_student_certificate(username, course_id))\n        except HTTPError as error:\n            if (error.response.status_code >= 500):\n                raise\n    return Certificates(all_certificates)", "docstring": "Returns an Certificates object with the user certificates\n\nArgs:\nusername (str): an edx user's username\ncourse_ids (list): a list of edX course ids.\n\nReturns:\nCertificates: object representing the student certificates for a course", "source": "codesearchnet"}
{"code": "def simplify(self, eps, max_dist_error, max_speed_error, topology_only=False):\n    for segment in self.segments:\n        segment.simplify(eps, max_dist_error, max_speed_error, topology_only)\n    return self", "docstring": "In-place simplification of segments\n\nArgs:\nmax_dist_error (float): Min distance error, in meters\nmax_speed_error (float): Min speed error, in km/h\ntopology_only: Boolean, optional. True to keep\nthe topology, neglecting velocity and time\naccuracy (use common Douglas-Ramen-Peucker).\nFalse (default) to simplify segments keeping\nthe velocity between points.\nReturns:\nThis track", "source": "codesearchnet"}
{"code": "def simplify(self, assignments):\n    raise NotImplementedError()", "docstring": "Simplify this term, given a list of possible values for each variable.\n\nArgs:\nassignments: A list of possible values for each variable. A dictionary\nmapping strings (variable name) to sets of strings (value names).\n\nReturns:\nA new BooleanTerm, potentially simplified.", "source": "github-repos"}
{"code": "def create_in_hdx(self, allow_no_resources=False, update_resources=True, update_resources_by_name=True, remove_additional_resources=False, create_default_views=True, hxl_update=True):\n    self.check_required_fields(allow_no_resources=allow_no_resources)\n    loadedid = None\n    if ('id' in self.data):\n        if self._dataset_load_from_hdx(self.data['id']):\n            loadedid = self.data['id']\n        else:\n            logger.warning(('Failed to load dataset with id %s' % self.data['id']))\n    if (not loadedid):\n        if self._dataset_load_from_hdx(self.data['name']):\n            loadedid = self.data['name']\n    if loadedid:\n        logger.warning(('Dataset exists. Updating %s' % loadedid))\n        self._dataset_merge_hdx_update(update_resources=update_resources, update_resources_by_name=update_resources_by_name, remove_additional_resources=remove_additional_resources, create_default_views=create_default_views, hxl_update=hxl_update)\n        return\n    filestore_resources = list()\n    if self.resources:\n        ignore_fields = ['package_id']\n        for resource in self.resources:\n            resource.check_required_fields(ignore_fields=ignore_fields)\n            if resource.get_file_to_upload():\n                filestore_resources.append(resource)\n                resource['url'] = Dataset.temporary_url\n        self.data['resources'] = self._convert_hdxobjects(self.resources)\n    self._save_to_hdx('create', 'name')\n    self._add_filestore_resources(filestore_resources, False, hxl_update)", "docstring": "Check if dataset exists in HDX and if so, update it, otherwise create it\n\nArgs:\nallow_no_resources (bool): Whether to allow no resources. Defaults to False.\nupdate_resources (bool): Whether to update resources (if updating). Defaults to True.\nupdate_resources_by_name (bool): Compare resource names rather than position in list. Defaults to True.\nremove_additional_resources (bool): Remove additional resources found in dataset (if updating). Defaults to False.\ncreate_default_views (bool): Whether to call package_create_default_resource_views (if updating). Defaults to True.\nhxl_update (bool): Whether to call package_hxl_update. Defaults to True.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def get_matrix(self, x1=None, x2=None, include_diagonal=None, include_general=None):\n    if ((x1 is None) and (x2 is None)):\n        if ((self._t is None) or (not self.computed)):\n            raise RuntimeError(\"you must call 'compute' first\")\n        K = self.kernel.get_value((self._t[(:, None)] - self._t[(None, :)]))\n        if ((include_diagonal is None) or include_diagonal):\n            K[np.diag_indices_from(K)] += ((self._yerr ** 2) + self.kernel.jitter)\n        if (((include_general is None) or include_general) and len(self._A)):\n            K[np.diag_indices_from(K)] += self._A\n            K += np.tril(np.dot(self._U.T, self._V), (- 1))\n            K += np.triu(np.dot(self._V.T, self._U), 1)\n        return K\n    incl = False\n    x1 = np.ascontiguousarray(x1, dtype=float)\n    if (x2 is None):\n        x2 = x1\n        incl = ((include_diagonal is not None) and include_diagonal)\n    K = self.kernel.get_value((x1[(:, None)] - x2[(None, :)]))\n    if incl:\n        K[np.diag_indices_from(K)] += self.kernel.jitter\n    return K", "docstring": "Get the covariance matrix at given independent coordinates\n\nArgs:\nx1 (Optional[array[n1]]): The first set of independent coordinates.\nIf this is omitted, ``x1`` will be assumed to be equal to ``x``\nfrom a previous call to :func:`GP.compute`.\nx2 (Optional[array[n2]]): The second set of independent\ncoordinates. If this is omitted, ``x2`` will be assumed to be\n``x1``.\ninclude_diagonal (Optional[bool]): Should the white noise and\n``yerr`` terms be included on the diagonal?\n(default: ``False``)", "source": "codesearchnet"}
{"code": "def get_pipeline_field(self, pipeline_key, field_key = None):\n\t\t\n\t\turi = '/'.join([\n\t\t\t\t\t\tself.api_uri, \n\t\t\t\t\t\tself.pipelines_suffix, \n\t\t\t\t\t\tpipeline_key, \n\t\t\t\t\t\tself.fields_suffix\n\t\t\t\t\t\t])\n\t\tif field_key:\n\t\t\turi = '/'.join([uri, field_key])\n\n\t\treturn self._req('get', uri)", "docstring": "Gets one/all field in a pipeline\nArgs:\npipeline_key \t\tkey for pipeline\nfield_key \t\t\tkey for field (default: None i.e. ALL)\nreturns\t\t\t\tstatus code, field dict or list thereof", "source": "juraj-google-style"}
{"code": "def guess_listing_type(lines, threshold=100):\n    scores = {'unix': 0, 'msdos': 0, 'nlst': 0}\n    for line in lines:\n        if (not line):\n            continue\n        if re.search('---|r--|rw-|rwx', line):\n            scores['unix'] += 1\n        if (('<DIR>' in line) or re.search('^.{0,4}\\\\d\\\\d', line)):\n            scores['msdos'] += 1\n        words = line.split(' ', 1)\n        if (len(words) == 1):\n            scores['nlst'] += 1\n        if (max(scores.values()) > threshold):\n            break\n    top = max(scores.items(), key=(lambda item: item[1]))\n    if top[1]:\n        return top[0]\n    else:\n        return 'unknown'", "docstring": "Guess the style of directory listing.\n\nReturns:\nstr: ``unix``, ``msdos``, ``nlst``, ``unknown``.", "source": "codesearchnet"}
{"code": "def __init__(self, cipher_suites=None):\n        \n        super(BasicAuthenticationSuite, self).__init__(cipher_suites)\n        self._protocol = ssl.PROTOCOL_TLSv1", "docstring": "Create a BasicAuthenticationSuite object.\n\nArgs:\ncipher_suites (list): A list of strings representing the names of\ncipher suites to use. Overrides the default set of cipher\nsuites. Optional, defaults to None.", "source": "juraj-google-style"}
{"code": "def _count_ops(self, graphdef: graph_pb2.GraphDef, op_names: Collection[str], attr_name: str='', attr_val: _AttrValType=None, get_op_name: bool=False) -> int:\n    op_count = 0\n    for op_name in op_names:\n        op_count += self._count_op_with_name_and_attribute(nodes=graphdef.node, op_name=op_name, attr_name=attr_name, attr_val=attr_val, get_op_name=get_op_name)\n        for func in graphdef.library.function:\n            op_count += self._count_op_with_name_and_attribute(nodes=func.node_def, op_name=op_name, attr_name=attr_name, attr_val=attr_val, get_op_name=get_op_name)\n    return op_count", "docstring": "Returns the number of given ops in a graph def.\n\nArgs:\ngraphdef: A GraphDef object.\nop_names: Names of the operations to find within the graph.\nattr_name: Name of the attribute of the ops to match.\nattr_val: Value of the attr_name to check.\nget_op_name: If set True, checks node.name rather than node.op.\n\nReturns:\nThe number of occurrences of the given ops in a graph. The ops will be\ncounted only if the ops are named 'op_name' and has 'attr_val' if\n'attr_name' is specified.", "source": "github-repos"}
{"code": "def select_starts_ends(start, end, p_mask, attention_mask, min_null_score=1000000, top_k=1, handle_impossible_answer=False, max_answer_len=15):\n    undesired_tokens = np.abs(np.array(p_mask) - 1)\n    if attention_mask is not None:\n        undesired_tokens = undesired_tokens & attention_mask\n    undesired_tokens_mask = undesired_tokens == 0.0\n    start = np.where(undesired_tokens_mask, -10000.0, start)\n    end = np.where(undesired_tokens_mask, -10000.0, end)\n    start = np.exp(start - start.max(axis=-1, keepdims=True))\n    start = start / start.sum()\n    end = np.exp(end - end.max(axis=-1, keepdims=True))\n    end = end / end.sum()\n    if handle_impossible_answer:\n        min_null_score = min(min_null_score, (start[0, 0] * end[0, 0]).item())\n    start[0, 0] = end[0, 0] = 0.0\n    starts, ends, scores = decode_spans(start, end, top_k, max_answer_len, undesired_tokens)\n    return (starts, ends, scores, min_null_score)", "docstring": "Takes the raw output of any `ModelForQuestionAnswering` and first normalizes its outputs and then uses\n`decode_spans()` to generate probabilities for each span to be the actual answer.\n\nArgs:\nstart (`np.ndarray`): Individual start logits for each token.\nend (`np.ndarray`): Individual end logits for each token.\np_mask (`np.ndarray`): A mask with 1 for values that cannot be in the answer\nattention_mask (`np.ndarray`): The attention mask generated by the tokenizer\nmin_null_score(`float`): The minimum null (empty) answer score seen so far.\ntopk (`int`): Indicates how many possible answer span(s) to extract from the model output.\nhandle_impossible_answer(`bool`): Whether to allow null (empty) answers\nmax_answer_len (`int`): Maximum size of the answer to extract from the model's output.", "source": "github-repos"}
{"code": "def CreateKey(self, prikey=None):\n    account = super(UserWallet, self).CreateKey(private_key=prikey)\n    self.OnCreateAccount(account)\n    contract = WalletContract.CreateSignatureContract(account.PublicKey)\n    self.AddContract(contract)\n    return account", "docstring": "Create a KeyPair and store it encrypted in the database.\n\nArgs:\nprivate_key (iterable_of_ints): (optional) 32 byte private key.\n\nReturns:\nKeyPair: a KeyPair instance.", "source": "codesearchnet"}
{"code": "def integer_fractional_parts(number):\n    \n    radix_point = number.index(\".\")\n    integer_part = number[:radix_point]\n    fractional_part = number[radix_point:]\n    return(integer_part, fractional_part)", "docstring": "Returns a tuple of the integer and fractional parts of a number.\n\nArgs:\nnumber(iterable container): A number in the following form:\n(..., \".\", int, int, int, ...)\n\nReturns:\n(integer_part, fractional_part): tuple.\n\nExample:\n>>> integer_fractional_parts((1,2,3,\".\",4,5,6))\n((1, 2, 3), ('.', 4, 5, 6))", "source": "juraj-google-style"}
{"code": "def find_item(self, fq_name):\n        \n        names = fq_name.split(self._separator)\n        current = self._yapconf_items\n        for name in names:\n            if isinstance(current, (YapconfDictItem, YapconfListItem)):\n                current = current.children\n\n            if name not in current:\n                return None\n            current = current[name]\n        return current", "docstring": "Find an item in the specification by fully qualified name.\n\nArgs:\nfq_name (str): Fully-qualified name of the item.\n\nReturns:\nThe item if it is in the specification. None otherwise", "source": "juraj-google-style"}
{"code": "def load_sklearn_iris_test_data(data_type: Callable, split: bool=True, seed: int=999) -> list[Union[numpy.array, pandas.DataFrame]]:\n    dataset = load_iris()\n    _, x_test, _, _ = train_test_split(dataset['data'], dataset['target'], test_size=0.2, random_state=seed)\n    if split:\n        return [(index, data_type(sample.reshape(1, -1))) for index, sample in enumerate(x_test)]\n    return [(0, data_type(x_test))]", "docstring": "Loads test data from the sklearn Iris dataset in a given format,\neither in a single or multiple batches.\nArgs:\ndata_type: Datatype of the iris test dataset.\nsplit: Split the dataset in different batches or return single batch.\nseed: Random state for splitting the train and test set.", "source": "github-repos"}
{"code": "def _get_eq_sets(self):\n    UNIT = np.eye(3)\n    (eq_sets, operations) = (defaultdict(set), defaultdict(dict))\n    symm_ops = [op.rotation_matrix for op in generate_full_symmops(self.symmops, self.tol)]\n\n    def get_clustered_indices():\n        indices = cluster_sites(self.centered_mol, self.tol, give_only_index=True)\n        out = list(indices[1].values())\n        if (indices[0] is not None):\n            out.append([indices[0]])\n        return out\n    for index in get_clustered_indices():\n        sites = self.centered_mol.cart_coords[index]\n        for (i, reference) in zip(index, sites):\n            for op in symm_ops:\n                rotated = np.dot(op, sites.T).T\n                matched_indices = find_in_coord_list(rotated, reference, self.tol)\n                matched_indices = {dict(enumerate(index))[i] for i in matched_indices}\n                eq_sets[i] |= matched_indices\n                if (i not in operations):\n                    operations[i] = {j: (op.T if (j != i) else UNIT) for j in matched_indices}\n                else:\n                    for j in matched_indices:\n                        if (j not in operations[i]):\n                            operations[i][j] = (op.T if (j != i) else UNIT)\n                for j in matched_indices:\n                    if (j not in operations):\n                        operations[j] = {i: (op if (j != i) else UNIT)}\n                    elif (i not in operations[j]):\n                        operations[j][i] = (op if (j != i) else UNIT)\n    return {'eq_sets': eq_sets, 'sym_ops': operations}", "docstring": "Calculates the dictionary for mapping equivalent atoms onto each other.\n\nArgs:\nNone\n\nReturns:\ndict: The returned dictionary has two possible keys:\n\n``eq_sets``:\nA dictionary of indices mapping to sets of indices,\neach key maps to indices of all equivalent atoms.\nThe keys are guaranteed to be not equivalent.\n\n``sym_ops``:\nTwofold nested dictionary.\n``operations[i][j]`` gives the symmetry operation\nthat maps atom ``i`` unto ``j``.", "source": "codesearchnet"}
{"code": "def get_current_user(with_domain=True):\n    try:\n        user_name = win32api.GetUserNameEx(win32api.NameSamCompatible)\n        if (user_name[(- 1)] == '$'):\n            test_user = win32api.GetUserName()\n            if (test_user == 'SYSTEM'):\n                user_name = 'SYSTEM'\n            elif (get_sid_from_name(test_user) == 'S-1-5-18'):\n                user_name = 'SYSTEM'\n        elif (not with_domain):\n            user_name = win32api.GetUserName()\n    except pywintypes.error as exc:\n        raise CommandExecutionError('Failed to get current user: {0}'.format(exc))\n    if (not user_name):\n        return False\n    return user_name", "docstring": "Gets the user executing the process\n\nArgs:\n\nwith_domain (bool):\n``True`` will prepend the user name with the machine name or domain\nseparated by a backslash\n\nReturns:\nstr: The user name", "source": "codesearchnet"}
{"code": "def fetch_local_package(self, config):\n    self.update_paths_and_config(config=config, pkg_dir_name=config['source'], pkg_cache_dir=os.getcwd())", "docstring": "Make a local path available to current stacker config.\n\nArgs:\nconfig (dict): 'local' path config dictionary", "source": "codesearchnet"}
{"code": "def __setitem__(self, key, value):\n        \n        if not fs.exists(value):\n            raise ValueError(value)\n\n        path = self.keypath(key)\n        fs.mkdir(self.path)\n        fs.mv(value, path)", "docstring": "Emplace file in cache.\n\nArguments:\nkey: Key.\nvalue (str): Path of file to insert in cache.\n\nRaises:\nValueError: If no \"value\" does nto exist.", "source": "juraj-google-style"}
{"code": "def run_ansible(playbooks, inventory_path=None, roles=None, extra_vars=None, tags=None, on_error_continue=False, basedir='.'):\n    (inventory, variable_manager, loader, options) = _load_defaults(inventory_path=inventory_path, roles=roles, extra_vars=extra_vars, tags=tags, basedir=basedir)\n    passwords = {}\n    for path in playbooks:\n        logger.info(('Running playbook %s with vars:\\n%s' % (path, extra_vars)))\n        pbex = PlaybookExecutor(playbooks=[path], inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords)\n        code = pbex.run()\n        stats = pbex._tqm._stats\n        hosts = stats.processed.keys()\n        result = [{h: stats.summarize(h)} for h in hosts]\n        results = {'code': code, 'result': result, 'playbook': path}\n        print(results)\n        failed_hosts = []\n        unreachable_hosts = []\n        for h in hosts:\n            t = stats.summarize(h)\n            if (t['failures'] > 0):\n                failed_hosts.append(h)\n            if (t['unreachable'] > 0):\n                unreachable_hosts.append(h)\n        if (len(failed_hosts) > 0):\n            logger.error(('Failed hosts: %s' % failed_hosts))\n            if (not on_error_continue):\n                raise EnosFailedHostsError(failed_hosts)\n        if (len(unreachable_hosts) > 0):\n            logger.error(('Unreachable hosts: %s' % unreachable_hosts))\n            if (not on_error_continue):\n                raise EnosUnreachableHostsError(unreachable_hosts)", "docstring": "Run Ansible.\n\nArgs:\nplaybooks (list): list of paths to the playbooks to run\ninventory_path (str): path to the hosts file (inventory)\nextra_var (dict): extra vars to pass\ntags (list): list of tags to run\non_error_continue(bool): Don't throw any exception in case a host is\nunreachable or the playbooks run with errors\n\nRaises:\n:py:class:`enoslib.errors.EnosFailedHostsError`: if a task returns an\nerror on a host and ``on_error_continue==False``\n:py:class:`enoslib.errors.EnosUnreachableHostsError`: if a host is\nunreachable (through ssh) and ``on_error_continue==False``", "source": "codesearchnet"}
{"code": "def add_answer(self, vote, rationale):\n        \n        self.raw_answers.append({\n            VOTE_KEY: vote,\n            RATIONALE_KEY: rationale,\n        })", "docstring": "Add an answer\n\nArgs:\nvote (int): the option that student voted for\nrationale (str): the reason why the student vote for the option", "source": "juraj-google-style"}
{"code": "def _find_best_fit(self, pbin):\n    fit = ((pbin.fitness(r[0], r[1]), k) for (k, r) in self._sorted_rect.items())\n    fit = (f for f in fit if (f[0] is not None))\n    try:\n        (_, rect) = min(fit, key=self.first_item)\n        return rect\n    except ValueError:\n        return None", "docstring": "Return best fitness rectangle from rectangles packing _sorted_rect list\n\nArguments:\npbin (PackingAlgorithm): Packing bin\n\nReturns:\nkey of the rectangle with best fitness", "source": "codesearchnet"}
{"code": "def add_sources_argument(cls, group, allow_filters=True, prefix=None, add_root_paths=False):\n        \n        prefix = prefix or cls.argument_prefix\n\n        group.add_argument(\"--%s-sources\" % prefix,\n                           action=\"store\", nargs=\"+\",\n                           dest=\"%s_sources\" % prefix.replace('-', '_'),\n                           help=\"%s source files to parse\" % prefix)\n\n        if allow_filters:\n            group.add_argument(\"--%s-source-filters\" % prefix,\n                               action=\"store\", nargs=\"+\",\n                               dest=\"%s_source_filters\" % prefix.replace(\n                                   '-', '_'),\n                               help=\"%s source files to ignore\" % prefix)\n\n        if add_root_paths:\n            group.add_argument(\"--%s-source-roots\" % prefix,\n                               action=\"store\", nargs=\"+\",\n                               dest=\"%s_source_roots\" % prefix.replace(\n                                   '-', '_'),\n                               help=\"%s source root directories allowing files \"\n                                    \"to be referenced relatively to those\" % prefix)", "docstring": "Subclasses may call this to add sources and source_filters arguments.\n\nArgs:\ngroup: arparse.ArgumentGroup, the extension argument group\nallow_filters: bool,  Whether the extension wishes to expose a\nsource_filters argument.\nprefix: str, arguments have to be namespaced.", "source": "juraj-google-style"}
{"code": "def Create(self, project_id, start_options=None, deadline=10):\n    return DatastoreEmulator(self._emulator_cmd, self._working_directory, project_id, deadline, start_options)", "docstring": "Creates an emulator instance.\n\nThis method will wait for up to 'deadline' seconds for the emulator to\nstart.\n\nArgs:\nproject_id: project ID\nstart_options: a list of additional command-line options to pass to the\nemulator 'start' command\ndeadline: number of seconds to wait for the datastore to respond\n\nReturns:\na DatastoreEmulator\n\nRaises:\nIOError: if the emulator could not be started within the deadline", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: torch.Tensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, use_cache: Optional[bool]=None):\n    all_hidden_states = () if output_hidden_states else None\n    all_self_attns = () if output_attentions else None\n    current_key_values = () if use_cache else None\n    for i, layer in enumerate(self.layers):\n        if output_hidden_states:\n            all_hidden_states += (hidden_states,)\n        layer_outputs = layer(hidden_states, attention_mask, position_bias, output_attentions=output_attentions, past_key_values=past_key_values[i] if past_key_values else None, use_cache=use_cache)\n        hidden_states, attn_weights, current_key_value = layer_outputs\n        if output_attentions:\n            all_self_attns += (attn_weights,)\n        if current_key_value is not None:\n            current_key_values = current_key_values + (current_key_value,)\n    hidden_states = self.output_layernorm(hidden_states)\n    if output_hidden_states:\n        all_hidden_states += (hidden_states,)\n    return (hidden_states, current_key_values, all_hidden_states, all_self_attns)", "docstring": "Args:\nhidden_states (`torch.Tensor`):\nInput to the layer of shape `(batch, seq_len, dim_model)`\nattention_mask (`torch.Tensor`):\nAvoid invalid areas to participate in the calculation of shape `(batch, seq_len, seq_len)`\nposition_bias (`torch.Tensor`):\nProvides position information to attention mechanism of shape `(num_heads, seq_len, seq_len)`\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers.\noutput_hidden_states (`bool`, *optional*):\nWhether or not to return the hidden states of all layers.\npast_key_values (`Tuple[torch.Tensor, torch.Tensor])`, *optional*):\nCached past key and value projection states\nuse_cache (`bool`, *optional*):\nIf set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n(see `past_key_values`).", "source": "github-repos"}
{"code": "def GetSshkeyMap(self, since=None):\n    return SshkeyUpdateGetter(self.conf).GetUpdates(source=self, search_base=self.conf['base'], search_filter=self.conf['filter'], search_scope=self.conf['scope'], since=since)", "docstring": "Return the sshkey map from this source.\n\nArgs:\nsince: Get data only changed since this timestamp (inclusive) or None\nfor all data.\n\nReturns:\ninstance of maps.SshkeyMap", "source": "github-repos"}
{"code": "def _build_parser_message(self, column: str, parser: str, error: str, value: Any) -> LogMessage:\n    return self._base_log.copy() | LogMessage(log_type=LogType.PARSER.value, column=column, parser=parser, error=error, value=value)", "docstring": "Adds parser error information to base log message.\n\nArgs:\n* column: column where the rule is applied\n* parser: parser function that failed and raises this message\n* value: value that fails to parse\n\nReturns:\n* log: LogMessage dictionary", "source": "github-repos"}
{"code": "def grep(regex, output):\n    lines = output.decode('utf-8').strip().splitlines()\n    results = []\n    for line in lines:\n        if re.search(regex, line):\n            results.append(line.strip())\n    return results", "docstring": "Similar to linux's `grep`, this returns the line in an output stream\nthat matches a given regex pattern.\n\nIt does not rely on the `grep` binary and is not sensitive to line endings,\nso it can be used cross-platform.\n\nArgs:\nregex: string, a regex that matches the expected pattern.\noutput: byte string, the raw output of the adb cmd.\n\nReturns:\nA list of strings, all of which are output lines that matches the\nregex pattern.", "source": "github-repos"}
{"code": "def get_image_size_for_max_num_patches(image_height: int, image_width: int, patch_size: int, max_num_patches: int, eps: float=1e-05) -> Tuple[int, int]:\n\n    def get_scaled_image_size(scale: float, size: int, patch_size: int) -> int:\n        scaled_size = size * scale\n        scaled_size = math.ceil(scaled_size / patch_size) * patch_size\n        scaled_size = max(patch_size, scaled_size)\n        return int(scaled_size)\n    scale_min, scale_max = (eps / 10, 100.0)\n    while scale_max - scale_min >= eps:\n        scale = (scale_min + scale_max) / 2\n        target_height = get_scaled_image_size(scale, image_height, patch_size)\n        target_width = get_scaled_image_size(scale, image_width, patch_size)\n        num_patches = target_height / patch_size * (target_width / patch_size)\n        if num_patches <= max_num_patches:\n            scale_min = scale\n        else:\n            scale_max = scale\n    scale = scale_min\n    target_height = get_scaled_image_size(scale, image_height, patch_size)\n    target_width = get_scaled_image_size(scale, image_width, patch_size)\n    return (target_height, target_width)", "docstring": "Determine image size based on max number of patches, ensure dimensions are divisible by patch size and image is at least 1 patch.\n\nArgs:\nimage_height (`int`):\nOriginal image height.\nimage_width (`int`):\nOriginal image width.\npatch_size (`int`):\nPatch size for processing.\nmax_num_patches (`int`):\nMaximum number of patches.\neps (`float`):\nSmall threshold for binary search.\n\nReturns:\nTuple: (target_height, target_width)", "source": "github-repos"}
{"code": "def lookup_imagenet_labels(indices):\n    \n    global _CLASS_INDEX\n    if _CLASS_INDEX is None:\n        with open(os.path.join(os.path.dirname(__file__), '../../resources/imagenet_class_index.json')) as f:\n            _CLASS_INDEX = json.load(f)\n\n    indices = listify(indices)\n    return [_CLASS_INDEX[str(idx)][1] for idx in indices]", "docstring": "Utility function to return the image net label for the final `dense` layer output index.\n\nArgs:\nindices: Could be a single value or an array of indices whose labels should be looked up.\n\nReturns:\nImage net label corresponding to the image category.", "source": "juraj-google-style"}
{"code": "def _check_status(cls, response_json):\n        \n        status = response_json['status']\n        msg = response_json['msg']\n\n        if status == 400:\n            raise BadRequestException(msg)\n        elif status == 403:\n            raise PermissionDeniedException(msg)\n        elif status == 404:\n            raise FileNotFoundException(msg)\n        elif status == 451:\n            raise UnavailableForLegalReasonsException(msg)\n        elif status == 509:\n            raise BandwidthUsageExceeded(msg)\n        elif status >= 500:\n            raise ServerErrorException(msg)", "docstring": "Check the status of the incoming response, raise exception if status is not 200.\n\nArgs:\nresponse_json (dict): results of the response of the GET request.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def remove_handler(self, handler: Handler, group: int=0):\n    if isinstance(handler, DisconnectHandler):\n        self.disconnect_handler = None\n    else:\n        self.dispatcher.remove_handler(handler, group)", "docstring": "Removes a previously-added update handler.\n\nMake sure to provide the right group that the handler was added in. You can use\nthe return value of the :meth:`add_handler` method, a tuple of (handler, group), and\npass it directly.\n\nArgs:\nhandler (``Handler``):\nThe handler to be removed.\n\ngroup (``int``, *optional*):\nThe group identifier, defaults to 0.", "source": "codesearchnet"}
{"code": "def getWindow(title, exact=False):\n    \n    titles = getWindows()\n    hwnd = titles.get(title, None)\n    if not hwnd and not exact:\n        for k, v in titles.items():\n            if title in k:\n                hwnd = v\n                break\n    if hwnd:\n        return Window(hwnd)\n    else:\n        return None", "docstring": "Return Window object if 'title' or its part found in visible windows titles, else return None\n\nReturn only 1 window found first\nArgs:\ntitle: unicode string\nexact (bool): True if search only exact match", "source": "juraj-google-style"}
{"code": "def concat(self, second_iterable):\n        \n        if self.closed():\n            raise ValueError(\"Attempt to call concat() on a closed Queryable.\")\n\n        if not is_iterable(second_iterable):\n            raise TypeError(\"Cannot compute concat() with second_iterable of \"\n                  \"non-iterable {0}\".format(str(type(second_iterable))[7: -1]))\n\n        return self._create(itertools.chain(self, second_iterable))", "docstring": "Concatenates two sequences.\n\nNote: This method uses deferred execution.\n\nArgs:\nsecond_iterable: The sequence to concatenate on to the sequence.\n\nReturns:\nA Queryable over the concatenated sequences.\n\nRaises:\nValueError: If the Queryable is closed().\nTypeError: If second_iterable is not in fact iterable.", "source": "juraj-google-style"}
{"code": "def ReadFile(self, definitions_registry, path):\n    with open(path, 'r') as file_object:\n        self.ReadFileObject(definitions_registry, file_object)", "docstring": "Reads data type definitions from a file into the registry.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\npath (str): path of the file to read from.", "source": "codesearchnet"}
{"code": "def _DrawHours(self):\n    tmpstrs = []\n    for i in range(0, self._gwidth, self._min_grid):\n        if ((i % self._hour_grid) == 0):\n            tmpstrs.append(('<polyline class=\"FullHour\" points=\"%d,%d, %d,%d\" />' % (((i + 0.5) + 20), 20, ((i + 0.5) + 20), self._gheight)))\n            tmpstrs.append(('<text class=\"Label\" x=\"%d\" y=\"%d\">%d</text>' % ((i + 20), 20, (((i / self._hour_grid) + self._offset) % 24))))\n        else:\n            tmpstrs.append(('<polyline class=\"SubHour\" points=\"%d,%d,%d,%d\" />' % (((i + 0.5) + 20), 20, ((i + 0.5) + 20), self._gheight)))\n    return ''.join(tmpstrs)", "docstring": "Generates svg to show a vertical hour and sub-hour grid\n\nReturns:\n# A string containing a polyline tag for each grid line\n\" <polyline class=\"FullHour\" points=\"20,0 ...\"", "source": "codesearchnet"}
{"code": "def uninstalled(name):\n    \n    ret = {'name': name,\n           'changes': {},\n           'result': None,\n           'comment': ''}\n\n    old = __salt__['flatpak.is_installed'](name)\n    if not old:\n        ret['comment'] = 'Package {0} is not installed'.format(name)\n        ret['result'] = True\n        return ret\n    else:\n        if __opts__['test']:\n            ret['comment'] = 'Package {0} would have been uninstalled'.format(name)\n            ret['changes']['old'] = old[0]['version']\n            ret['changes']['new'] = None\n            ret['result'] = None\n            return ret\n\n        __salt__['flatpak.uninstall'](name)\n        if not __salt__['flatpak.is_installed'](name):\n            ret['comment'] = 'Package {0} uninstalled'.format(name)\n            ret['changes']['old'] = old[0]['version']\n            ret['changes']['new'] = None\n            ret['result'] = True\n            return ret", "docstring": "Ensure that the named package is not installed.\n\nArgs:\nname (str): The flatpak package.\n\nReturns:\ndict: The ``result`` and ``output``.\n\nExample:\n\n.. code-block:: yaml\n\nuninstall_package:\nflatpack.uninstalled:\n- name: gimp", "source": "juraj-google-style"}
{"code": "def deprecated(msg):\n    \n    def decorator(func):\n        @wraps(func)\n        def wrapper(*args, **kwargs):\n            logging.getLogger(__name__).warning(msg)\n            return func(*args, **kwargs)\n\n        return wrapper\n\n    return decorator", "docstring": "Marks a function / method as deprecated.\n\nTakes one argument, a message to be logged with information on future usage of the function or alternative methods\nto call.\n\nArgs:\nmsg (str): Deprecation message to be logged\n\nReturns:\n`callable`", "source": "juraj-google-style"}
{"code": "def witness_tx(tx_ins, tx_outs, tx_witnesses, **kwargs):\n    \n\n    \n    deser = [script_ser.deserialize(tx_in.redeem_script) for tx_in in tx_ins\n             if tx_in is not None]\n    for w in tx_witnesses:\n        try:\n            deser.append(script_ser.deserialize(w.stack[-1].item))\n        except (NotImplementedError, ValueError):\n            pass\n    version = max([guess_version(d) for d in deser])\n    if 'lock_time' in kwargs:\n        lock_time = kwargs['lock_time']\n    else:\n        lock_time = max([guess_locktime(d) for d in deser])\n\n    return tb.make_tx(\n        version=version,\n        tx_ins=tx_ins,\n        tx_outs=tx_outs,\n        lock_time=lock_time,\n        tx_witnesses=tx_witnesses)", "docstring": "Construct a fully-signed segwit transaction\nArgs:\ntx_ins       list(TxIn instances): list of transaction inputs\ntx_outs      list(TxOut instances): list of transaction outputs\ntx_witnesses list(TxWitness instances): list of transaction witnsses\n**kwargs:\nversion     (int): transaction version number\nlocktime    (hex): transaction locktime\n\nReturns:\n(Tx instance): signed transaction with witnesses", "source": "juraj-google-style"}
{"code": "def defer(self, func: typing.Callable[([], typing.Any)], until: typing.Union[(int, float)]=(- 1)) -> typing.Any:\n    raise NotImplementedError()", "docstring": "Defer the execution of a function until some clock value.\n\nArgs:\nfunc (typing.Callable[[], typing.Any]): A callable that accepts no\narguments. All return values are ignored.\nuntil (typing.Union[int, float]): A numeric value that represents\nthe clock time when the callback becomes available for\nexecution. Values that are less than the current time result in\nthe function being called at the next opportunity.\n\nReturns:\ntyping.Any: An opaque identifier that represents the callback\nuniquely within the processor. This identifier is used to\nmodify the callback scheduling.\n\nNote:\nThe time given should not be considered absolute. It represents\nthe time when the callback becomes available to execute. It may\nbe much later than the given time value when the function actually\nexecutes depending on the implementation.", "source": "codesearchnet"}
{"code": "def process_fixed_issues(self, volumes, existing_issues):\n        \n        fixed_issues = []\n        for issue_id, issue in list(existing_issues.items()):\n            if issue_id not in volumes:\n                fixed_issues.append(issue)\n\n        return fixed_issues", "docstring": "Provided a list of volumes and existing issues, returns a list of fixed issues to be deleted\n\nArgs:\nvolumes (`dict`): A dictionary keyed on the issue id, with the :obj:`Volume` object as the value\nexisting_issues (`dict`): A dictionary keyed on the issue id, with the :obj:`EBSVolumeAuditIssue` object as\nthe value\n\nReturns:\n:obj:`list` of :obj:`EBSVolumeAuditIssue`", "source": "juraj-google-style"}
{"code": "def inspect_last(self, stream, only_allocated=False):\n    if only_allocated:\n        found = False\n        for walker in self._virtual_walkers:\n            if walker.matches(stream):\n                found = True\n                break\n        if (not found):\n            raise UnresolvedIdentifierError('inspect_last could not find an allocated virtual streamer for the desired stream', stream=stream)\n    if (stream in self._last_values):\n        return self._last_values[stream]\n    raise StreamEmptyError(u'inspect_last called on stream that has never been written to', stream=stream)", "docstring": "Return the last value pushed into a stream.\n\nThis function works even if the stream is virtual and no\nvirtual walker has been created for it.  It is primarily\nuseful to aid in debugging sensor graphs.\n\nArgs:\nstream (DataStream): The stream to inspect.\nonly_allocated (bool): Optional parameter to only allow inspection\nof allocated virtual streams.  This is useful for mimicking the\nbehavior of an embedded device that does not have a _last_values\narray.\n\nReturns:\nIOTileReading: The data in the stream\n\nRaises:\nStreamEmptyError: if there has never been data written to\nthe stream.\nUnresolvedIdentifierError: if only_allocated is True and there has not\nbeen a virtual stream walker allocated to listen to this stream.", "source": "codesearchnet"}
{"code": "def with_division(self, division):\n    if (division is None):\n        division = ''\n    division = slugify(division)\n    self._validate_division(division)\n    self.division = division\n    return self", "docstring": "Add a division segment\n\nArgs:\ndivision (str): Official name of an electoral division.\n\nReturns:\nIdBuilder\n\nRaises:\nValueError", "source": "codesearchnet"}
{"code": "def limit_epochs(tensor, num_epochs=None, name=None):\n    if num_epochs is None:\n        return tensor\n    if num_epochs <= 0:\n        raise ValueError('num_epochs must be > 0 not %d.' % num_epochs)\n    with ops.name_scope(name, 'limit_epochs', [tensor]) as name:\n        zero64 = constant_op.constant(0, dtype=dtypes.int64)\n        epochs = variable_v1.VariableV1(zero64, name='epochs', trainable=False, collections=[ops.GraphKeys.LOCAL_VARIABLES])\n        counter = epochs.count_up_to(num_epochs)\n        with ops.control_dependencies([counter]):\n            return array_ops.identity(tensor, name=name)", "docstring": "Returns tensor `num_epochs` times and then raises an `OutOfRange` error.\n\nNote: creates local counter `epochs`. Use `local_variables_initializer()` to\ninitialize local variables.\n\nArgs:\ntensor: Any `Tensor`.\nnum_epochs: A positive integer (optional).  If specified, limits the number\nof steps the output tensor may be evaluated.\nname: A name for the operations (optional).\n\nReturns:\ntensor or `OutOfRange`.\n\nRaises:\nValueError: if `num_epochs` is invalid.", "source": "github-repos"}
{"code": "def iter_predict(self, X, include_init=False):\n    utils.validation.check_is_fitted(self, 'init_estimator_')\n    X = utils.check_array(X, accept_sparse=['csr', 'csc'], dtype=None, force_all_finite=False)\n    y_pred = self.init_estimator_.predict(X)\n    if include_init:\n        (yield y_pred)\n    for (estimators, line_searchers, cols) in itertools.zip_longest(self.estimators_, self.line_searchers_, self.columns_):\n        for (i, (estimator, line_searcher)) in enumerate(itertools.zip_longest(estimators, (line_searchers or []))):\n            if (cols is None):\n                direction = estimator.predict(X)\n            else:\n                direction = estimator.predict(X[(:, cols)])\n            if line_searcher:\n                direction = line_searcher.update(direction)\n            y_pred[(:, i)] += (self.learning_rate * direction)\n        (yield y_pred)", "docstring": "Returns the predictions for ``X`` at every stage of the boosting procedure.\n\nArgs:\nX (array-like or sparse matrix of shape (n_samples, n_features): The input samples.\nSparse matrices are accepted only if they are supported by the weak model.\ninclude_init (bool, default=False): If ``True`` then the prediction from\n``init_estimator`` will also be returned.\nReturns:\niterator of arrays of shape (n_samples,) containing the predicted values at each stage", "source": "codesearchnet"}
{"code": "def groups_invite(self, *, channel: str, user: str, **kwargs) -> SlackResponse:\n    self._validate_xoxp_token()\n    kwargs.update({'channel': channel, 'user': user})\n    return self.api_call('groups.invite', json=kwargs)", "docstring": "Invites a user to a private channel.\n\nArgs:\nchannel (str): The group id. e.g. 'G1234567890'\nuser (str): The user id. e.g. 'U1234567890'", "source": "codesearchnet"}
{"code": "def add_timing_signal_1d_given_position(x,\n                                        position,\n                                        min_timescale=1.0,\n                                        max_timescale=1.0e4):\n  \n  channels = common_layers.shape_list(x)[2]\n  num_timescales = channels \n  log_timescale_increment = (\n      math.log(float(max_timescale) / float(min_timescale)) /\n      (tf.to_float(num_timescales) - 1))\n  inv_timescales = min_timescale * tf.exp(\n      tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)\n  scaled_time = (\n      tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims(\n          tf.expand_dims(inv_timescales, 0), 0))\n  signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2)\n  signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]])\n  signal = common_layers.cast_like(signal, x)\n  return x + signal", "docstring": "Adds sinusoids of diff frequencies to a Tensor, with timing position given.\n\nArgs:\nx: a Tensor with shape [batch, length, channels]\nposition: a Tensor with shape [batch, length]\nmin_timescale: a float\nmax_timescale: a float\n\nReturns:\na Tensor the same shape as x.", "source": "juraj-google-style"}
{"code": "def _get_saver_def_or_none(exported_model: exported_model_pb2.ExportedModel) -> Optional[saver_pb2.SaverDef]:\n    if exported_model.HasField('saver_def'):\n        return exported_model.saver_def\n    return None", "docstring": "Returns the SaverDef from ExportedModel, None otherwise.\n\nArgs:\nexported_model: ExportedModel to take the SaverDef from.\n\nReturns:\nSaverDef instance if the field `saver_def` is set. None otherwise.", "source": "github-repos"}
{"code": "def range(self, x_data=None):\n        \n        if x_data is None:\n            try:\n                x_data = evaluation.evaluate_inverse(\n                    self, numpy.array([[0.5]]*len(self)))\n            except StochasticallyDependentError:\n                x_data = approximation.find_interior_point(self)\n            shape = (len(self),)\n            if hasattr(self, \"_range\"):\n                return self._range(x_data, {})\n        else:\n            x_data = numpy.asfarray(x_data)\n            shape = x_data.shape\n            x_data = x_data.reshape(len(self), -1)\n\n        q_data = evaluation.evaluate_bound(self, x_data)\n        q_data = q_data.reshape((2,)+shape)\n        return q_data", "docstring": "Generate the upper and lower bounds of a distribution.\n\nArgs:\nx_data (numpy.ndarray) :\nThe bounds might vary over the sample space. By providing\nx_data you can specify where in the space the bound should be\ntaken.  If omitted, a (pseudo-)random sample is used.\n\nReturns:\n(numpy.ndarray):\nThe lower (out[0]) and upper (out[1]) bound where\nout.shape=(2,)+x_data.shape", "source": "juraj-google-style"}
{"code": "def _ip_int_from_string(self, ip_str):\n        \n        if not ip_str:\n            raise AddressValueError('Address cannot be empty')\n\n        octets = ip_str.split('.')\n        if len(octets) != 4:\n            raise AddressValueError(\"Expected 4 octets in %r\" % ip_str)\n\n        try:\n            return _int_from_bytes(map(self._parse_octet, octets), 'big')\n        except ValueError as exc:\n            raise AddressValueError(\"%s in %r\" % (exc, ip_str))", "docstring": "Turn the given IP string into an integer for comparison.\n\nArgs:\nip_str: A string, the IP ip_str.\n\nReturns:\nThe IP ip_str as an integer.\n\nRaises:\nAddressValueError: if ip_str isn't a valid IPv4 Address.", "source": "juraj-google-style"}
{"code": "def add_ipdu(self, information, timeout=(- 1)):\n    uri = (self.URI + '/discover')\n    return self._client.create(information, uri=uri, timeout=timeout)", "docstring": "Add an HP iPDU and bring all components under management by discovery of its management module. Bring the\nmanagement module under exclusive management by the appliance, configure any management or data collection\nsettings, and create a private set of administrative credentials to enable ongoing communication and management\nof the iPDU. Use \"force\" to claim the device, even if claimed by another management appliance\n\nArgs:\nresource: power device information\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: added power device.", "source": "codesearchnet"}
{"code": "def SetExtractionConfiguration(self, configuration):\n    \n    self._hasher_file_size_limit = configuration.hasher_file_size_limit\n    self._SetHashers(configuration.hasher_names_string)\n    self._process_archives = configuration.process_archives\n    self._process_compressed_streams = configuration.process_compressed_streams\n    self._SetYaraRules(configuration.yara_rules_string)", "docstring": "Sets the extraction configuration settings.\n\nArgs:\nconfiguration (ExtractionConfiguration): extraction configuration.", "source": "juraj-google-style"}
{"code": "def __init__(self, xid=None, data=None):\n        \n        super().__init__(xid)\n        self.data = data", "docstring": "Create an EchoReply with the optional parameters below.\n\nArgs:\nxid (int): xid to be used on the message header.\ndata (bytes): arbitrary-length data field.", "source": "juraj-google-style"}
{"code": "def get_checklist(self, id, name=None):\n    return self.create_checklist(dict(id=id, name=name))", "docstring": "Get a checklist\n\nReturns:\nChecklist: The checklist with the given `id`", "source": "codesearchnet"}
{"code": "def warn_logging(logger):\n    \n    \n    def showwarning(message, category, filename, lineno, file=None, line=None):\n        logger.warning(message)\n    return showwarning", "docstring": "Create a `showwarning` function that uses the given logger.\n\nArguments:\nlogger (~logging.Logger): the logger to use.\n\nReturns:\nfunction: a function that can be used as the `warnings.showwarning`\ncallback.", "source": "juraj-google-style"}
{"code": "def staged_rewards(self):\n    cubeA_pos = self.sim.data.body_xpos[self.cubeA_body_id]\n    cubeB_pos = self.sim.data.body_xpos[self.cubeB_body_id]\n    gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]\n    dist = np.linalg.norm((gripper_site_pos - cubeA_pos))\n    r_reach = ((1 - np.tanh((10.0 * dist))) * 0.25)\n    touch_left_finger = False\n    touch_right_finger = False\n    touch_cubeA_cubeB = False\n    for i in range(self.sim.data.ncon):\n        c = self.sim.data.contact[i]\n        if ((c.geom1 in self.l_finger_geom_ids) and (c.geom2 == self.cubeA_geom_id)):\n            touch_left_finger = True\n        if ((c.geom1 == self.cubeA_geom_id) and (c.geom2 in self.l_finger_geom_ids)):\n            touch_left_finger = True\n        if ((c.geom1 in self.r_finger_geom_ids) and (c.geom2 == self.cubeA_geom_id)):\n            touch_right_finger = True\n        if ((c.geom1 == self.cubeA_geom_id) and (c.geom2 in self.r_finger_geom_ids)):\n            touch_right_finger = True\n        if ((c.geom1 == self.cubeA_geom_id) and (c.geom2 == self.cubeB_geom_id)):\n            touch_cubeA_cubeB = True\n        if ((c.geom1 == self.cubeB_geom_id) and (c.geom2 == self.cubeA_geom_id)):\n            touch_cubeA_cubeB = True\n    if (touch_left_finger and touch_right_finger):\n        r_reach += 0.25\n    cubeA_height = cubeA_pos[2]\n    table_height = self.table_full_size[2]\n    cubeA_lifted = (cubeA_height > (table_height + 0.04))\n    r_lift = (1.0 if cubeA_lifted else 0.0)\n    if cubeA_lifted:\n        horiz_dist = np.linalg.norm((np.array(cubeA_pos[:2]) - np.array(cubeB_pos[:2])))\n        r_lift += (0.5 * (1 - np.tanh(horiz_dist)))\n    r_stack = 0\n    not_touching = ((not touch_left_finger) and (not touch_right_finger))\n    if (not_touching and (r_lift > 0) and touch_cubeA_cubeB):\n        r_stack = 2.0\n    return (r_reach, r_lift, r_stack)", "docstring": "Helper function to return staged rewards based on current physical states.\n\nReturns:\nr_reach (float): reward for reaching and grasping\nr_lift (float): reward for lifting and aligning\nr_stack (float): reward for stacking", "source": "codesearchnet"}
{"code": "def do_REMOTE(self, target: str, remote_command: str, source: list, *args, **kwargs) -> None:\n    if (target == self.messaging._service_name):\n        info = 'target for remote command is the bot itself! Returning the function'\n        self.logger.info(info)\n        return self._handle_command(remote_command, source, *args, **kwargs)\n    try:\n        target = self.messaging._address_map[target]\n    except KeyError:\n        warn = ' Target %s, not found in addresses. Are you sure that %s sent an IDENT message?'\n        self.logger.warn(warn, target, target)\n        return\n    self.logger.info(' REMOTE %s, target: %s | %s, %s', remote_command, target, args, kwargs)\n    source = (target + source)\n    self.messaging.send_command_response(source, remote_command, *args, **kwargs)", "docstring": "Send a remote command to a service. Used\n\nArgs:\ntarget: The service that the command gets set to\nremote_command: The command to do remotely.\nsource: the binary source of the zmq_socket. Packed to send to the", "source": "codesearchnet"}
{"code": "def maybe_set_static_shape(tensor, shape):\n    if _ENABLE_MAYBE_SET_STATIC_SHAPE and (not context.executing_eagerly()) and ops.get_default_graph().building_function and (not tensor.shape.is_fully_defined()) and tensor_util.is_tensor(shape):\n        shape = shape_tensor(shape)\n        const_shape = tensor_util.constant_value_as_shape(shape)\n        tensor.set_shape(const_shape)", "docstring": "Sets the shape of `tensor` to the `shape`'s constant value, if inferrable.\n\nThis is a temporary workaround to fix shape inference across functional op\nboundaries. E.g.\n\n```python\nshape = tf.constant([3])\n@tf.function\ndef f():\nu = tf.random_uniform(shape)\nreturn u\n```\n\nIf we were to rely solely on C++ shape inference, the shape of `u` inside\n`f` would be unknown because C++ shape inference is not aware of the outer\ngraph and all it sees is a Placeholder node when backtracing the captured\ntensor for `shape`. `maybe_set_static_shape` computes the static shape value\nof `shape` by traversing the `FuncGraph` boundaries and sets the correct\nshape.\n\nA longer term solution would be to fix C++ shape inference.\n\nArgs:\ntensor: A tensor.\nshape: A shape tensor.", "source": "github-repos"}
{"code": "def get_page_artid(self, separator='-'):\n        \n        publication_info = get_value(\n            self.record,\n            'publication_info[0]',\n            default={}\n        )\n        return LiteratureReader.get_page_artid_for_publication_info(\n            publication_info,\n            separator\n        )", "docstring": "Return the page range or the article id of a record.\n\nArgs:\nseparator(basestring): optional page range symbol, defaults to a single dash\n\nReturns:\nstring: the page range or the article id of the record.\n\nExamples:\n>>> record = {\n...     'publication_info': [\n...         {'artid': '054021'},\n...     ],\n... }\n>>> LiteratureReader(record).get_page_artid()\n'054021'", "source": "juraj-google-style"}
{"code": "def _get_metrics_result_or_logs(self, logs):\n    metric_logs = self.get_metrics_result()\n    if isinstance(logs, dict) and set(logs.keys()) == set(metric_logs.keys()):\n        return metric_logs\n    return logs", "docstring": "Returns model metrics as a dict if the keys match with input logs.\n\nWhen the training / evaluation is performed with an asynchronous steps,\nthe last scheduled `train / test_step` may not give the latest metrics\nbecause it is not guaranteed to be executed the last. This method gets\nmetrics from the model directly instead of relying on the return from\nlast step function.\n\nWhen the user has custom train / test step functions, the metrics\nreturned may be different from `Model.metrics`. In those instances,\nthis function will be no-op and return the logs passed in.\n\nArgs:\nlogs: A `dict` of metrics returned by train / test step function.\n\nReturns:\nA `dict` containing values of the metrics listed in `self.metrics`\nwhen logs and model metrics keys match. Otherwise it returns input\n`logs`.", "source": "github-repos"}
{"code": "def record_kv_cache_memory_metrics(self, cache) -> None:\n    if not _has_opentelemetry:\n        return\n    try:\n        num_used_blocks = cache.num_blocks - len(cache._free_blocks)\n        num_layers = len(cache.key_cache)\n        bytes_per_parameter = 2 if cache.dtype in [torch.float16, torch.bfloat16] else 4\n        memory_bytes = num_layers * num_used_blocks * cache.block_size * cache.num_key_value_heads * cache.head_dim * 2 * bytes_per_parameter\n        free_memory_bytes = num_layers * len(cache._free_blocks) * cache.block_size * cache.num_key_value_heads * cache.head_dim * 2 * bytes_per_parameter\n        self.kv_cache_memory_gauge.set(memory_bytes)\n        self.kv_cache_free_memory_gauge.set(free_memory_bytes)\n        logger.debug(f'KV Cache memory: {memory_bytes / (1024 * 1024):.2f}MB, Used blocks: {num_used_blocks}/{cache.num_blocks} ({num_used_blocks / cache.num_blocks * 100:.1f}%)')\n    except Exception as e:\n        logger.warning(f'Failed to record KV cache memory metrics: {e}')", "docstring": "Record memory usage of the PagedAttentionCache without GPU synchronization.\n\nThis calculates the theoretical memory usage based on cache configuration\nand the number of blocks currently in use.\n\nArgs:\ncache: The PagedAttentionCache object to measure", "source": "github-repos"}
{"code": "def _checkFunctioncode(functioncode, listOfAllowedValues=[]):\n    FUNCTIONCODE_MIN = 1\n    FUNCTIONCODE_MAX = 127\n    _checkInt(functioncode, FUNCTIONCODE_MIN, FUNCTIONCODE_MAX, description='functioncode')\n    if (listOfAllowedValues is None):\n        return\n    if (not isinstance(listOfAllowedValues, list)):\n        raise TypeError('The listOfAllowedValues should be a list. Given: {0!r}'.format(listOfAllowedValues))\n    for value in listOfAllowedValues:\n        _checkInt(value, FUNCTIONCODE_MIN, FUNCTIONCODE_MAX, description='functioncode inside listOfAllowedValues')\n    if (functioncode not in listOfAllowedValues):\n        raise ValueError('Wrong function code: {0}, allowed values are {1!r}'.format(functioncode, listOfAllowedValues))", "docstring": "Check that the given functioncode is in the listOfAllowedValues.\n\nAlso verifies that 1 <= function code <= 127.\n\nArgs:\n* functioncode (int): The function code\n* listOfAllowedValues (list of int): Allowed values. Use *None* to bypass this part of the checking.\n\nRaises:\nTypeError, ValueError", "source": "codesearchnet"}
{"code": "def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:\n    \n    if isinstance(variables, dict):\n        if variables.get(\"times\"):\n            times = int(variables[\"times\"])\n            del variables[\"times\"]\n\n            yield list(variable_matrix(variables, parent, \"product\")) * times\n\n        else:\n            raise ValueError(f\"times is a required keyword for the repeat iterator.\")\n    else:\n        raise ValueError(\n            f\"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}\"\n        )", "docstring": "Cycle through a list of values a specified number of times\n\nArgs:\nvariables: The input variables for the creation of the range\nparent: The variable for which the values are being generated.\n\nReturns: A list of dictionaries mapping the parent to each value.", "source": "juraj-google-style"}
{"code": "def by_name(name):\n    \n    devices = discover(all_households=True)\n    for device in (devices or []):\n        if device.player_name == name:\n            return device\n    return None", "docstring": "Return a device by name.\n\nArgs:\nname (str): The name of the device to return.\n\nReturns:\n:class:`~.SoCo`: The first device encountered among all zone with the\ngiven player name. If none are found `None` is returned.", "source": "juraj-google-style"}
{"code": "def rotate_view(self, axis_ind=0, angle=0):\n        \n        camera = self.ren.GetActiveCamera()\n        if axis_ind == 0:\n            camera.Roll(angle)\n        elif axis_ind == 1:\n            camera.Azimuth(angle)\n        else:\n            camera.Pitch(angle)\n        self.ren_win.Render()", "docstring": "Rotate the camera view.\n\nArgs:\naxis_ind: Index of axis to rotate. Defaults to 0, i.e., a-axis.\nangle: Angle to rotate by. Defaults to 0.", "source": "juraj-google-style"}
{"code": "def _get_shards_by_task(self, sharding_callback: sharding_util.ShardingCallback) -> Sequence[tuple[str, Sequence[sharding_util.Shard]]]:\n\n    def wrap_tensor(shardable_tensor):\n        tensor_val = shardable_tensor.tensor\n        tensor_shape = shardable_tensor.shape\n        save_spec = shardable_tensor._tensor_save_spec\n        with ops.device(shardable_tensor.device):\n            save_spec_tensor = save_spec.tensor\n        if tensor_val is None and save_spec_tensor is None:\n            return None\n        elif save_spec_tensor is not None:\n            tensor_val = save_spec_tensor\n            tensor_shape = save_spec_tensor.shape\n            if isinstance(save_spec.name, tensor_lib.Tensor):\n                tensor_val._wrapped_name = save_spec.name\n            if isinstance(shardable_tensor.slice_spec, tensor_lib.Tensor):\n                tensor_val._wrapped_slice_spec = save_spec.slice_spec\n        return dataclasses.replace(shardable_tensor, tensor=tensor_val, shape=tensor_shape)\n    shardable_tensors_by_task = {task: [shardable_tensor for shardable_tensor in map(wrap_tensor, shardable_tensors) if shardable_tensor is not None] for task, shardable_tensors in self._shardable_tensors_by_task.items()}\n    sharding_callback = sharding_callback or sharding_policies.ShardByTaskPolicy()\n    metrics.SetShardingCallbackDescription(description=sharding_callback.description)\n    callback_start_time = time.time() * 1000000.0\n    shards_by_task = []\n    for task, shardable_tensors in shardable_tensors_by_task.items():\n        shards_by_task.append((task, sharding_callback(shardable_tensors)))\n    callback_end_time = time.time() * 1000000.0\n    callback_duration = math.ceil(callback_end_time - callback_start_time)\n    metrics.AddShardingCallbackDuration(callback_duration=max(1, callback_duration))\n    logging.info('Sharding callback duration: %s microseconds', callback_duration)\n    return shards_by_task", "docstring": "Calls the sharding callback with shardable_tensors.\n\nArgs:\nsharding_callback: ShardingCallback. The callback function wrapper that\nsplits shardable_tensors into shards.\n\nReturns:\nA list of (task, shards) tuples.", "source": "github-repos"}
{"code": "def find_stacks(node, strict=False):\n    fso = FindStackOps()\n    fso.visit(node)\n    AnnotateStacks(fso.push_pop_pairs, strict).visit(node)\n    return node", "docstring": "Find pushes and pops to the stack and annotate them as such.\n\nArgs:\nnode: An AST node that might contain stack pushes and pops.\nstrict: A boolean indicating whether to stringently test whether each\npush and pop are matched. This is not always possible when taking\nhigher-order derivatives of code generated in split-motion.\n\nReturns:\nnode: The node passed in, but with pushes and pops annotated in AST nodes.", "source": "codesearchnet"}
{"code": "def sg_to_sparse(tensor, opt):\n    r\n    indices = tf.where(tf.not_equal(tensor.sg_float(), 0.))\n    return tf.SparseTensor(indices=indices,\n                           values=tf.gather_nd(tensor, indices) - 1,  \n                           dense_shape=tf.shape(tensor).sg_cast(dtype=tf.int64))", "docstring": "r\"\"\"Converts a dense tensor into a sparse tensor.\n\nSee `tf.SparseTensor()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` with zero-padding (automatically given by chain).\nopt:\nname: If provided, replace current tensor's name.\n\nReturns:\nA `SparseTensor`.", "source": "juraj-google-style"}
{"code": "def infer_inputs_from_restored_call_function(fn):\n\n    def common_spec(x, y):\n        common_shape = get_common_shape(x.shape, y.shape)\n        if isinstance(x, sparse_tensor.SparseTensorSpec):\n            return sparse_tensor.SparseTensorSpec(common_shape, x.dtype)\n        elif isinstance(x, ragged_tensor.RaggedTensorSpec):\n            return ragged_tensor.RaggedTensorSpec(common_shape, x.dtype)\n        return tensor_spec.TensorSpec(common_shape, x.dtype, x.name)\n    spec = fn.concrete_functions[0].structured_input_signature[0][0]\n    for concrete in fn.concrete_functions[1:]:\n        spec2 = concrete.structured_input_signature[0][0]\n        spec = nest.map_structure(common_spec, spec, spec2)\n    return spec", "docstring": "Returns TensorSpec of inputs from a restored call function.\n\nArgs:\nfn: Restored layer call function. It is assumed that `fn` has at least\none concrete function and that the inputs are in the first argument.\n\nReturns:\nTensorSpec of call function inputs.", "source": "github-repos"}
{"code": "def _Lock(self, path=None, force=False):\n    if self.lock is None:\n        self.lock = lock.PidFile(filename=path)\n    return self.lock.Lock(force=force)", "docstring": "Grab a system-wide lock for this command.\n\nCommands wishing to prevent concurrent operation can invoke this\nmethod to acquire a system-wide lock.  The lock will be\nautomatically released on object destruction, however an optional\nUnlock() method is provided for commands wishing a smaller scope\nof locking.\n\nArgs:\npath: optional path to lock file.\nforce: optional boolean to override existing locks.\nReturns:\nTrue if the lock was acquired.\nFalse if the lock was not.", "source": "github-repos"}
{"code": "def ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions=[]):\n    raw_lines = clean_lines.raw_lines\n    ParseNolintSuppressions(filename, raw_lines[line], line, error)\n    nesting_state.Update(filename, clean_lines, line, error)\n    CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, error)\n    if nesting_state.InAsmBlock():\n        return\n    CheckForFunctionLengths(filename, clean_lines, line, function_state, error)\n    CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)\n    CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)\n    CheckLanguage(filename, clean_lines, line, file_extension, include_state, nesting_state, error)\n    CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)\n    CheckForNonStandardConstructs(filename, clean_lines, line, nesting_state, error)\n    CheckVlogArguments(filename, clean_lines, line, error)\n    CheckPosixThreading(filename, clean_lines, line, error)\n    CheckInvalidIncrement(filename, clean_lines, line, error)\n    CheckMakePairUsesDeduction(filename, clean_lines, line, error)\n    CheckDefaultLambdaCaptures(filename, clean_lines, line, error)\n    CheckRedundantVirtual(filename, clean_lines, line, error)\n    CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)\n    for check_fn in extra_check_functions:\n        check_fn(filename, clean_lines, line, error)", "docstring": "Processes a single line in the file.\n\nArgs:\nfilename: Filename of the file that is being processed.\nfile_extension: The extension (dot not included) of the file.\nclean_lines: An array of strings, each representing a line of the file,\nwith comments stripped.\nline: Number of line being processed.\ninclude_state: An _IncludeState instance in which the headers are inserted.\nfunction_state: A _FunctionState instance which counts function lines, etc.\nnesting_state: A NestingState instance which maintains information about\nthe current stack of nested blocks being parsed.\nerror: A callable to which errors are reported, which takes 4 arguments:\nfilename, line number, error level, and message\nextra_check_functions: An array of additional check functions that will be\nrun on each source line. Each function takes 4\narguments: filename, clean_lines, line, error", "source": "codesearchnet"}
{"code": "def _ParsePlistKeyValue(self, knowledge_base, name, value):\n    \n    if not knowledge_base.GetValue('operating_system_version'):\n      if name in self._PLIST_KEYS:\n        knowledge_base.SetValue('operating_system_version', value)", "docstring": "Parses a plist key value.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nname (str): name of the plist key.\nvalue (str): value of the plist key.", "source": "juraj-google-style"}
{"code": "def AddKeywordsForName(self, name, keywords):\n    data_store.DB.IndexAddKeywordsForName(self.urn, name, keywords)", "docstring": "Associates keywords with name.\n\nRecords that keywords are associated with name.\n\nArgs:\nname: A name which should be associated with some keywords.\nkeywords: A collection of keywords to associate with name.", "source": "codesearchnet"}
{"code": "def get_class(schema_name):\n    global _registry_loaded\n    if (not _registry_loaded):\n        load_message_classes()\n    try:\n        return _schema_name_to_class[schema_name]\n    except KeyError:\n        _log.warning('The schema \"%s\" is not in the schema registry! Either install the package with its schema definition or define a schema. Falling back to the default schema...', schema_name)\n        return Message", "docstring": "Retrieve the message class associated with the schema name.\n\nIf no match is found, the default schema is returned and a warning is logged.\n\nArgs:\nschema_name (six.text_type): The name of the :class:`Message` sub-class;\nthis is typically the Python path.\n\nReturns:\nMessage: A sub-class of :class:`Message` to create the message from.", "source": "codesearchnet"}
{"code": "def _ProcessEvent(self, mediator, event):\n    \n    try:\n      self._analysis_plugin.ExamineEvent(mediator, event)\n\n    except Exception as exception:  \n      self.SignalAbort()\n\n      \n\n      if self._debug_output:\n        logger.warning('Unhandled exception while processing event object.')\n        logger.exception(exception)", "docstring": "Processes an event.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between\nanalysis plugins and other components, such as storage and dfvfs.\nevent (EventObject): event.", "source": "juraj-google-style"}
{"code": "def convert_to_string(self, productions):\n    \n    symbols = []\n    for production in tf.unstack(productions, axis=1):\n      lhs, rhs = self.production_rules[tf.argmax(input=production, axis=-1)]\n      if not symbols:  \n        if lhs != self.start_symbol:\n          raise ValueError(\"`productions` must begin with `self.start_symbol`.\")\n        symbols = rhs\n      else:\n        \n        \n        index = symbols.index(lhs)\n        symbols = symbols[:index] + rhs + symbols[index + 1:]\n    string = \"\".join(symbols)\n    return string", "docstring": "Converts a sequence of productions into a string of terminal symbols.\n\nArgs:\nproductions: Tensor of shape [1, num_productions, num_production_rules].\nSlices along the `num_productions` dimension represent one-hot vectors.\n\nReturns:\nstr that concatenates all terminal symbols from `productions`.\n\nRaises:\nValueError: If the first production rule does not begin with\n`self.start_symbol`.", "source": "juraj-google-style"}
{"code": "def Page(self, text=None, show_percent=None):\n    if (text is not None):\n        self._text += text\n    if (show_percent is None):\n        show_percent = (text is None)\n    self._show_percent = show_percent\n    text = LineWrap(self._text).splitlines()\n    while True:\n        self._newlines = text[self._displayed:(self._displayed + self._lines_to_show)]\n        for line in self._newlines:\n            sys.stdout.write((line + '\\n'))\n            if (self._delay and (self._lastscroll > 0)):\n                time.sleep(0.005)\n        self._displayed += len(self._newlines)\n        self._currentpagelines += len(self._newlines)\n        if (self._currentpagelines >= self._lines_to_show):\n            self._currentpagelines = 0\n            wish = self._AskUser()\n            if (wish == 'q'):\n                return False\n            elif (wish == 'g'):\n                self._Scroll(((len(text) - self._displayed) + 1))\n            elif (wish == '\\r'):\n                self._Scroll(1)\n            elif (wish == '\\x1b[B'):\n                self._Scroll(1)\n            elif (wish == '\\x1b[A'):\n                self._Scroll((- 1))\n            elif (wish == 'b'):\n                self._Scroll((0 - self._cli_lines))\n            else:\n                self._Scroll()\n        if (self._displayed >= len(text)):\n            break\n    return True", "docstring": "Page text.\n\nContinues to page through any text supplied in the constructor. Also, any\ntext supplied to this method will be appended to the total text to be\ndisplayed. The method returns when all available text has been displayed to\nthe user, or the user quits the pager.\n\nArgs:\ntext: A string, extra text to be paged.\nshow_percent: A boolean, if True, indicate how much is displayed so far.\nIf None, this behaviour is 'text is None'.\n\nReturns:\nA boolean. If True, more data can be displayed to the user. False\nimplies that the user has quit the pager.", "source": "codesearchnet"}
{"code": "def _CreateRoutesFolder(self, schedule, doc, route_type=None):\n\n    def GetRouteName(route):\n        'Return a placemark name for the route.\\n\\n      Args:\\n        route: The transitfeed.Route instance.\\n\\n      Returns:\\n        The name as a string.\\n      '\n        name_parts = []\n        if route.route_short_name:\n            name_parts.append(('<b>%s</b>' % route.route_short_name))\n        if route.route_long_name:\n            name_parts.append(route.route_long_name)\n        return (' - '.join(name_parts) or route.route_id)\n\n    def GetRouteDescription(route):\n        'Return a placemark description for the route.\\n\\n      Args:\\n        route: The transitfeed.Route instance.\\n\\n      Returns:\\n        The description as a string.\\n      '\n        desc_items = []\n        if route.route_desc:\n            desc_items.append(route.route_desc)\n        if route.route_url:\n            desc_items.append(('Route info page: <a href=\"%s\">%s</a>' % (route.route_url, route.route_url)))\n        description = '<br/>'.join(desc_items)\n        return (description or None)\n    routes = [route for route in schedule.GetRouteList() if ((route_type is None) or (route.route_type == route_type))]\n    if (not routes):\n        return None\n    routes.sort(key=(lambda x: GetRouteName(x)))\n    if (route_type is not None):\n        route_type_names = {0: 'Tram, Streetcar or Light rail', 1: 'Subway or Metro', 2: 'Rail', 3: 'Bus', 4: 'Ferry', 5: 'Cable car', 6: 'Gondola or suspended cable car', 7: 'Funicular'}\n        type_name = route_type_names.get(route_type, str(route_type))\n        folder_name = ('Routes - %s' % type_name)\n    else:\n        folder_name = 'Routes'\n    routes_folder = self._CreateFolder(doc, folder_name, visible=False)\n    for route in routes:\n        style_id = self._CreateStyleForRoute(doc, route)\n        route_folder = self._CreateFolder(routes_folder, GetRouteName(route), description=GetRouteDescription(route))\n        self._CreateRouteShapesFolder(schedule, route_folder, route, style_id, False)\n        self._CreateRoutePatternsFolder(route_folder, route, style_id, False)\n        if self.show_trips:\n            self._CreateRouteTripsFolder(route_folder, route, style_id, schedule)\n    return routes_folder", "docstring": "Create a KML Folder containing routes in a schedule.\n\nThe folder contains a subfolder for each route in the schedule of type\nroute_type. If route_type is None, then all routes are selected. Each\nsubfolder contains a flattened graph placemark, a route shapes placemark\nand, if show_trips is True, a subfolder containing placemarks for each of\nthe trips in the route.\n\nIf there are no routes in the schedule then no folder is created and None\nis returned.\n\nArgs:\nschedule: The transitfeed.Schedule instance.\ndoc: The KML Document ElementTree.Element instance.\nroute_type: The route type integer or None.\n\nReturns:\nThe Folder ElementTree.Element instance or None.", "source": "codesearchnet"}
{"code": "def sequence_path(self, fasta_path):\n        \n        if not fasta_path:\n            self.sequence_dir = None\n            self.sequence_file = None\n\n        else:\n            if not op.exists(fasta_path):\n                raise OSError('{}: file does not exist'.format(fasta_path))\n\n            if not op.dirname(fasta_path):\n                self.sequence_dir = '.'\n            else:\n                self.sequence_dir = op.dirname(fasta_path)\n            self.sequence_file = op.basename(fasta_path)\n\n            tmp_sr = SeqIO.read(fasta_path, 'fasta')\n            if self.name == '<unknown name>':\n                self.name = tmp_sr.name\n            if self.description == '<unknown description>':\n                self.description = tmp_sr.description\n            if not self.dbxrefs:\n                self.dbxrefs = tmp_sr.dbxrefs\n            if not self.features:\n                self.features = tmp_sr.features\n            if not self.annotations:\n                self.annotations = tmp_sr.annotations\n            if not self.letter_annotations:\n                self.letter_annotations = tmp_sr.letter_annotations", "docstring": "Provide pointers to the paths of the FASTA file\n\nArgs:\nfasta_path: Path to FASTA file", "source": "juraj-google-style"}
{"code": "def explicit_method_override(method):\n    setattr(method, '__explicit_override__', True)\n    return method", "docstring": "Decorator that marks a member method as explicitly overridden.\n\nIn PyGlove, many methods are managed by the framework - for example -\n``pg.Object.__init__``. It's easy for users to override these methods\nunconsciously. Therefore, we introduce this decorator to catch error at\nthe first place when such overrides incidentally take place, while allowing\nadvanced users to override them.\n\nUsage::\n\nclass Foo(pg.Object):\n\n@pg.explicit_method_override\ndef __init__(self, *args, **kwargs):\n...\n\nArgs:\nmethod: method to explicitly overriden.\n\nReturns:\nThe original method with an explicit overriden stamp.", "source": "github-repos"}
{"code": "def document(self, name, file_name, **kwargs):\n    group_obj = Document(name, file_name, **kwargs)\n    return self._group(group_obj)", "docstring": "Add Document data to Batch object.\n\nArgs:\nname (str): The name for this Group.\nfile_name (str): The name for the attached file for this Group.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nfile_content (str;method, kwargs): The file contents or callback method to retrieve\nfile content.\nmalware (bool, kwargs): If true the file is considered malware.\npassword (bool, kwargs): If malware is true a password for the zip archive is\nxid (str, kwargs): The external id for this Group.\n\nReturns:\nobj: An instance of Document.", "source": "codesearchnet"}
{"code": "def rhombohedral(a: float, alpha: float):\n        \n        return Lattice.from_parameters(a, a, a, alpha, alpha, alpha)", "docstring": "Convenience constructor for a rhombohedral lattice.\n\nArgs:\na (float): *a* lattice parameter of the rhombohedral cell.\nalpha (float): Angle for the rhombohedral lattice in degrees.\n\nReturns:\nRhombohedral lattice of dimensions a x a x a.", "source": "juraj-google-style"}
{"code": "def setupSerialPort(loopback, port):\n    \n    if loopback:\n        \n        testSerial = SerialTestClass()\n        serialPort = testSerial.serialPort\n    else:\n        \n        serialPort = serial.Serial(port, 115200, timeout=0)\n\n    return serialPort", "docstring": "Sets up serial port by connecting to phsyical or software port.\n\nDepending on command line options, this function will either connect to a\nSerialTestClass() port for loopback testing or to the specified port from\nthe command line option. If loopback is True it overrides the physical port\nspecification.\n\nArgs:\nloopback: argparse option\nport: argparse option\n\nReturns:\nserialPort: Pyserial serial port instance", "source": "juraj-google-style"}
{"code": "def fit(self, volumes, energies):\n    eos_fit = self.model(np.array(volumes), np.array(energies))\n    eos_fit.fit()\n    return eos_fit", "docstring": "Fit energies as function of volumes.\n\nArgs:\nvolumes (list/np.array)\nenergies (list/np.array)\n\nReturns:\nEOSBase: EOSBase object", "source": "codesearchnet"}
{"code": "def _ModifyInterface(\n      self, interface_config, config_key, config_value, replace=False):\n    \n    config_entry = '%s=%s' % (config_key, config_value)\n    if not open(interface_config).read().count(config_key):\n      with open(interface_config, 'a') as config:\n        config.write('%s\\n' % config_entry)\n    elif replace:\n      for line in fileinput.input(interface_config, inplace=True):\n        print(re.sub(r'%s=.*' % config_key, config_entry, line.rstrip()))", "docstring": "Write a value to a config file if not already present.\n\nArgs:\ninterface_config: string, the path to a config file.\nconfig_key: string, the configuration key to set.\nconfig_value: string, the value to set for the configuration key.\nreplace: bool, replace the configuration option if already present.", "source": "juraj-google-style"}
{"code": "def square(duration: int, amp: complex, period: float = None,\n           phase: float = 0, name: str = None) -> SamplePulse:\n    \n    if period is None:\n        period = duration\n\n    return _sampled_square_pulse(duration, amp, period, phase=phase, name=name)", "docstring": "Generates square wave `SamplePulse`.\n\nApplies `left` sampling strategy to generate discrete pulse from continuous function.\n\nArgs:\nduration: Duration of pulse. Must be greater than zero.\namp: Pulse amplitude. Wave range is [-amp, amp].\nperiod: Pulse period, units of dt. If `None` defaults to single cycle.\nphase: Pulse phase.\nname: Name of pulse.", "source": "juraj-google-style"}
{"code": "def scalar_projection(v1, v2):\n    return (np.dot(v1, v2) / np.linalg.norm(v2))", "docstring": "compute the scalar projection of v1 upon v2\n\nArgs:\nv1, v2: iterable\nindices 0, 1, 2 corresponding to cartesian coordinates\n\nReturns:\n3-vector of the projection of point p onto the direction of v", "source": "codesearchnet"}
{"code": "class CustomHFIndex(HFIndexBase):\n\n    def __init__(self, vector_size: int, dataset, index_path=None):\n        requires_backends(self, ['faiss'])\n        super().__init__(vector_size, dataset, index_initialized=index_path is None)\n        self.index_path = index_path\n\n    @classmethod\n    def load_from_disk(cls, vector_size, dataset_path, index_path):\n        logger.info(f'Loading passages from {dataset_path}')\n        if dataset_path is None or index_path is None:\n            raise ValueError(\"Please provide `dataset_path` and `index_path` after calling `dataset.save_to_disk(dataset_path)` and `dataset.get_index('embeddings').save(index_path)`.\")\n        dataset = load_from_disk(dataset_path)\n        return cls(vector_size=vector_size, dataset=dataset, index_path=index_path)\n\n    def init_index(self):\n        if not self.is_initialized():\n            logger.info(f'Loading index from {self.index_path}')\n            self.dataset.load_faiss_index('embeddings', file=self.index_path)\n            self._index_initialized = True", "docstring": "A wrapper around an instance of [`~datasets.Datasets`]. The dataset and the index are both loaded from the\nindicated paths on disk.\n\nArgs:\nvector_size (`int`): the dimension of the passages embeddings used by the index\ndataset_path (`str`):\nThe path to the serialized dataset on disk. The dataset should have 3 columns: title (str), text (str) and\nembeddings (arrays of dimension vector_size)\nindex_path (`str`)\nThe path to the serialized faiss index on disk.", "source": "github-repos"}
{"code": "def score(self, data, metric='accuracy', break_ties='random', verbose=True, print_confusion_matrix=True, **kwargs):\n    (Y_p, Y, Y_s) = self._get_predictions(data, break_ties=break_ties, return_probs=True, **kwargs)\n    return_list = isinstance(metric, list)\n    metric_list = (metric if isinstance(metric, list) else [metric])\n    scores = []\n    for metric in metric_list:\n        score = metric_score(Y, Y_p, metric, probs=Y_s, ignore_in_gold=[0])\n        scores.append(score)\n        if verbose:\n            print(f'{metric.capitalize()}: {score:.3f}')\n    if (print_confusion_matrix and verbose):\n        confusion_matrix(Y, Y_p, pretty_print=True)\n    if ((len(scores) == 1) and (not return_list)):\n        return scores[0]\n    else:\n        return scores", "docstring": "Scores the predictive performance of the Classifier on all tasks\n\nArgs:\ndata: a Pytorch DataLoader, Dataset, or tuple with Tensors (X,Y):\nX: The input for the predict method\nY: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels\nin {1,...,k}\nmetric: A metric (string) with which to score performance or a\nlist of such metrics\nbreak_ties: A tie-breaking policy (see Classifier._break_ties())\nverbose: The verbosity for just this score method; it will not\nupdate the class config.\nprint_confusion_matrix: Print confusion matrix (overwritten to False if\nverbose=False)\n\nReturns:\nscores: A (float) score or a list of such scores if kwarg metric\nis a list", "source": "codesearchnet"}
{"code": "def add_comment(self, comment):\n    if (not comment):\n        return\n    self.__comments[comment.name] = comment\n    self.comment_added_signal(self, comment)", "docstring": "Add a comment to the database.\n\nArgs:\ncomment (hotdoc.core.Comment): comment to add", "source": "codesearchnet"}
{"code": "def from_callable(cls, fn: Callable) -> Optional['IOTypeHints']:\n    if _disable_from_callable or getattr(fn, '_beam_no_annotations', False):\n        return None\n    signature = get_signature(fn)\n    if all((param.annotation == param.empty for param in signature.parameters.values())) and signature.return_annotation == signature.empty:\n        return None\n    input_args = []\n    input_kwargs = {}\n    for param in signature.parameters.values():\n        if param.annotation == param.empty:\n            if param.kind == param.VAR_POSITIONAL:\n                input_args.append(_ANY_VAR_POSITIONAL)\n            elif param.kind == param.VAR_KEYWORD:\n                input_kwargs[param.name] = _ANY_VAR_KEYWORD\n            elif param.kind == param.KEYWORD_ONLY:\n                input_kwargs[param.name] = typehints.Any\n            else:\n                input_args.append(typehints.Any)\n        elif param.kind in [param.KEYWORD_ONLY, param.VAR_KEYWORD]:\n            input_kwargs[param.name] = convert_to_beam_type(param.annotation)\n        else:\n            assert param.kind in [param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD, param.VAR_POSITIONAL], 'Unsupported Parameter kind: %s' % param.kind\n            input_args.append(convert_to_beam_type(param.annotation))\n    output_args = []\n    if signature.return_annotation != signature.empty:\n        output_args.append(convert_to_beam_type(signature.return_annotation))\n    else:\n        output_args.append(typehints.Any)\n    name = getattr(fn, '__name__', '<unknown>')\n    msg = ['from_callable(%s)' % name, '  signature: %s' % signature]\n    if hasattr(fn, '__code__'):\n        msg.append('  File \"%s\", line %d' % (fn.__code__.co_filename, fn.__code__.co_firstlineno))\n    return IOTypeHints(input_types=(tuple(input_args), input_kwargs), output_types=(tuple(output_args), {}), origin=cls._make_origin([], tb=False, msg=msg))", "docstring": "Construct an IOTypeHints object from a callable's signature.\n\nSupports Python 3 annotations. For partial annotations, sets unknown types\nto Any, _ANY_VAR_POSITIONAL, or _ANY_VAR_KEYWORD.\n\nReturns:\nA new IOTypeHints or None if no annotations found.", "source": "github-repos"}
{"code": "def del_hparam(self, name):\n    \n    if hasattr(self, name):\n      delattr(self, name)\n      del self._hparam_types[name]", "docstring": "Removes the hyperparameter with key 'name'.\n\nDoes nothing if it isn't present.\n\nArgs:\nname: Name of the hyperparameter.", "source": "juraj-google-style"}
{"code": "def create_magic_packet(macaddress):\n    \n    if len(macaddress) == 12:\n        pass\n    elif len(macaddress) == 17:\n        sep = macaddress[2]\n        macaddress = macaddress.replace(sep, '')\n    else:\n        raise ValueError('Incorrect MAC address format')\n\n    \n    data = b'FFFFFFFFFFFF' + (macaddress * 16).encode()\n    send_data = b''\n\n    \n    for i in range(0, len(data), 2):\n        send_data += struct.pack(b'B', int(data[i: i + 2], 16))\n    return send_data", "docstring": "Create a magic packet.\n\nA magic packet is a packet that can be used with the for wake on lan\nprotocol to wake up a computer. The packet is constructed from the\nmac address given as a parameter.\n\nArgs:\nmacaddress (str): the mac address that should be parsed into a\nmagic packet.", "source": "juraj-google-style"}
{"code": "def trim_whitespace(self, text):\n        \n\n        lines = text.split('\\n')\n        new_lines = [x.lstrip() for x in lines]\n\n        return '\\n'.join(new_lines)", "docstring": "Remove leading whitespace from each line of a multiline string\n\nArgs:\ntext (string): The text to be unindented\n\nReturns:\nstring: The unindented block of text", "source": "juraj-google-style"}
{"code": "def MakeJoint(pmf1, pmf2):\n    joint = Joint()\n    for (v1, p1) in pmf1.Items():\n        for (v2, p2) in pmf2.Items():\n            joint.Set((v1, v2), (p1 * p2))\n    return joint", "docstring": "Joint distribution of values from pmf1 and pmf2.\n\nArgs:\npmf1: Pmf object\npmf2: Pmf object\n\nReturns:\nJoint pmf of value pairs", "source": "codesearchnet"}
{"code": "def get_snpeff_info(snpeff_string, snpeff_header):\n    \n    \n    snpeff_annotations = [\n        dict(zip(snpeff_header, snpeff_annotation.split('|'))) \n        for snpeff_annotation in snpeff_string.split(',')\n    ]\n    \n    return snpeff_annotations", "docstring": "Make the vep annotations into a dictionaries\n\nA snpeff dictionary will have the snpeff column names as keys and\nthe vep annotations as values.\nThe dictionaries are stored in a list.\nOne dictionary for each transcript.\n\nArgs:\nsnpeff_string (string): A string with the ANN annotation\nsnpeff_header (list): A list with the vep header\n\nReturn:\nsnpeff_annotations (list): A list of vep dicts", "source": "juraj-google-style"}
{"code": "def get_default_settings(sub_scripts, script_order, script_execution_freq, iterator_type):\n\n    def populate_sweep_param(scripts, parameter_list, trace=''):\n        \"\\n\\n            Args:\\n                scripts: a dict of {'class name': <class object>} pairs\\n\\n            Returns: A list of all parameters of the input scripts\\n\\n            \"\n\n        def get_parameter_from_dict(trace, dic, parameter_list, valid_values=None):\n            '\\n                appends keys in the dict to a list in the form trace.key.subkey.subsubkey...\\n                Args:\\n                    trace: initial prefix (path through scripts and parameters to current location)\\n                    dic: dictionary\\n                    parameter_list: list to which append the parameters\\n\\n                    valid_values: valid values of dictionary values if None dic should be a dictionary\\n\\n                Returns:\\n\\n                '\n            if ((valid_values is None) and isinstance(dic, Parameter)):\n                valid_values = dic.valid_values\n            for (key, value) in dic.items():\n                if isinstance(value, dict):\n                    parameter_list = get_parameter_from_dict(((trace + '.') + key), value, parameter_list, dic.valid_values[key])\n                elif ((valid_values[key] in (float, int)) or (isinstance(valid_values[key], list) and (valid_values[key][0] in (float, int)))):\n                    parameter_list.append(((trace + '.') + key))\n                else:\n                    print(('ignoring sweep parameter', key))\n            return parameter_list\n        for script_name in list(scripts.keys()):\n            from pylabcontrol.core import ScriptIterator\n            script_trace = trace\n            if (script_trace == ''):\n                script_trace = script_name\n            else:\n                script_trace = ((script_trace + '->') + script_name)\n            if issubclass(scripts[script_name], ScriptIterator):\n                populate_sweep_param(vars(scripts[script_name])['_SCRIPTS'], parameter_list=parameter_list, trace=script_trace)\n            else:\n                for setting in [elem[1] for elem in inspect.getmembers(scripts[script_name]) if (elem[0] == '_DEFAULT_SETTINGS')][0]:\n                    parameter_list = get_parameter_from_dict(script_trace, setting, parameter_list)\n        return parameter_list\n    if (iterator_type == 'loop'):\n        script_default_settings = [Parameter('script_order', script_order), Parameter('script_execution_freq', script_execution_freq), Parameter('num_loops', 0, int, 'times the subscripts will be executed'), Parameter('run_all_first', True, bool, 'Run all scripts with nonzero frequency in first pass')]\n    elif (iterator_type == 'sweep'):\n        sweep_params = populate_sweep_param(sub_scripts, [])\n        script_default_settings = [Parameter('script_order', script_order), Parameter('script_execution_freq', script_execution_freq), Parameter('sweep_param', sweep_params[0], sweep_params, 'variable over which to sweep'), Parameter('sweep_range', [Parameter('min_value', 0, float, 'min parameter value'), Parameter('max_value', 0, float, 'max parameter value'), Parameter('N/value_step', 0, float, 'either number of steps or parameter value step, depending on mode')]), Parameter('stepping_mode', 'N', ['N', 'value_step'], 'Switch between number of steps and step amount'), Parameter('run_all_first', True, bool, 'Run all scripts with nonzero frequency in first pass')]\n    else:\n        print(('unknown iterator type ' + iterator_type))\n        raise TypeError(('unknown iterator type ' + iterator_type))\n    return script_default_settings", "docstring": "assigning the actual script settings depending on the iterator type\n\nthis might be overwritten by classes that inherit form ScriptIterator\n\nArgs:\nsub_scripts: dictionary with the subscripts\nscript_order: execution order of subscripts\nscript_execution_freq: execution frequency of subscripts\n\nReturns:\nthe default setting for the iterator", "source": "codesearchnet"}
{"code": "def get_ituz(self, callsign, timestamp=timestamp_now):\n        \n        return self.get_all(callsign, timestamp)[const.ITUZ]", "docstring": "Returns ITU Zone of a callsign\n\nArgs:\ncallsign (str): Amateur Radio callsign\ntimestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)\n\nReturns:\nint: containing the callsign's CQ Zone\n\nRaises:\nKeyError: No ITU Zone found for callsign\n\nNote:\nCurrently, only Country-files.com lookup database contains ITU Zones", "source": "juraj-google-style"}
{"code": "def plot_hall_carriers(self, temp=300):\n    import matplotlib.pyplot as plt\n    hall_carriers = [abs(i) for i in self._bz.get_hall_carrier_concentration()[temp]]\n    plt.semilogy(self._bz.mu_steps, hall_carriers, linewidth=3.0, color='r')\n    self._plot_bg_limits()\n    self._plot_doping(temp)\n    plt.xlim((- 0.5), (self._bz.gap + 0.5))\n    plt.ylim(100000000000000.0, 1e+22)\n    plt.ylabel('Hall carrier concentration (cm-3)', fontsize=30.0)\n    plt.xlabel('E-E$_f$ (eV)', fontsize=30)\n    plt.xticks(fontsize=25)\n    plt.yticks(fontsize=25)\n    return plt", "docstring": "Plot the Hall carrier concentration in function of Fermi level\n\nArgs:\ntemp: the temperature\n\nReturns:\na matplotlib object", "source": "codesearchnet"}
{"code": "def get_sources(self, prefix=''):\n        \n        prefix = prefix.replace('-', '_')\n        prefixed = '%s_sources' % prefix\n\n        if prefixed in self.__cli:\n            sources = self.__cli.get(prefixed)\n            from_conf = False\n        else:\n            sources = self.__config.get(prefixed)\n            from_conf = True\n\n        if sources is None:\n            return OrderedSet()\n\n        sources = self.__resolve_patterns(sources, from_conf)\n\n        prefixed = '%s_source_filters' % prefix\n        if prefixed in self.__cli:\n            filters = self.__cli.get(prefixed)\n            from_conf = False\n        else:\n            filters = self.__config.get(prefixed)\n            from_conf = True\n\n        if filters is None:\n            return sources\n\n        sources -= self.__resolve_patterns(filters, from_conf)\n\n        return sources", "docstring": "Retrieve a set of absolute paths to sources, according to `prefix`\n\n`ConfigParser` will perform wildcard expansion and\nfiltering.\n\nArgs:\nprefix: str, the desired prefix.\n\nReturns:\nutils.utils.OrderedSet: The set of sources for the given\n`prefix`.", "source": "juraj-google-style"}
{"code": "def get_memory_growth(device):\n    return context.context().get_memory_growth(device)", "docstring": "Get if memory growth is enabled for a `PhysicalDevice`.\n\nIf memory growth is enabled for a `PhysicalDevice`, the runtime initialization\nwill not allocate all memory on the device.\n\nFor example:\n\n>>> physical_devices = tf.config.list_physical_devices('GPU')\n>>> try:\n...   tf.config.experimental.set_memory_growth(physical_devices[0], True)\n...   assert tf.config.experimental.get_memory_growth(physical_devices[0])\n... except:\n...   # Invalid device or cannot modify virtual devices once initialized.\n...   pass\n\nArgs:\ndevice: `PhysicalDevice` to query\n\nReturns:\nA boolean indicating the memory growth setting for the `PhysicalDevice`.\n\nRaises:\nValueError: Invalid `PhysicalDevice` specified.", "source": "github-repos"}
{"code": "def seek_to_end(self, *partitions):\n    if (not all([isinstance(p, TopicPartition) for p in partitions])):\n        raise TypeError('partitions must be TopicPartition namedtuples')\n    if (not partitions):\n        partitions = self._subscription.assigned_partitions()\n        assert partitions, 'No partitions are currently assigned'\n    else:\n        for p in partitions:\n            assert (p in self._subscription.assigned_partitions()), 'Unassigned partition'\n    for tp in partitions:\n        log.debug('Seeking to end of partition %s', tp)\n        self._subscription.need_offset_reset(tp, OffsetResetStrategy.LATEST)", "docstring": "Seek to the most recent available offset for partitions.\n\nArguments:\n*partitions: Optionally provide specific TopicPartitions, otherwise\ndefault to all assigned partitions.\n\nRaises:\nAssertionError: If any partition is not currently assigned, or if\nno partitions are assigned.", "source": "codesearchnet"}
{"code": "def _get_shoulds(options):\n    \n    if options.version == '2.0':\n        return shoulds20.list_shoulds(options)\n    else:\n        return shoulds21.list_shoulds(options)", "docstring": "Return the list of 'SHOULD' validators for the correct version of STIX.\n\nArgs:\noptions: ValidationOptions instance with validation options for this\nvalidation run, including the STIX spec version.", "source": "juraj-google-style"}
{"code": "def __init__(\n            self,\n            input_columns: t.List[Column],\n            output_columns: t.List[Column],\n            column_transform,) -> None:\n        \n        self.input_columns = input_columns\n        self.output_columns = output_columns\n        self.column_transform = column_transform", "docstring": "Construct a new ``CompoundColumn`` object.\n\nArgs:\ninput_columns (list, Column): A list of ``Column`` objects representing column(s) from the SOURCE table.\noutput_columns (list, Column): A list of ``Column`` objects representing column(s) from the FINAL table.\ncolumn_transform (Callable): Function accepting the table object, performing transformations to it and returning a DataFrame containing the NEW columns only.", "source": "juraj-google-style"}
{"code": "def imfrombytes(content, flag='color'):\n    img_np = np.frombuffer(content, np.uint8)\n    flag = (imread_flags[flag] if is_str(flag) else flag)\n    img = cv2.imdecode(img_np, flag)\n    return img", "docstring": "Read an image from bytes.\n\nArgs:\ncontent (bytes): Image bytes got from files or other streams.\nflag (str): Same as :func:`imread`.\n\nReturns:\nndarray: Loaded image array.", "source": "codesearchnet"}
{"code": "def orth_chol(order, dist, normed=True, sort='GR', cross_truncation=1.0, **kws):\n    dim = len(dist)\n    basis = chaospy.poly.basis(start=1, stop=order, dim=dim, sort=sort, cross_truncation=cross_truncation)\n    length = len(basis)\n    cholmat = chaospy.chol.gill_king(chaospy.descriptives.Cov(basis, dist))\n    cholmat_inv = numpy.linalg.inv(cholmat.T).T\n    if (not normed):\n        diag_mesh = numpy.repeat(numpy.diag(cholmat_inv), len(cholmat_inv))\n        cholmat_inv /= diag_mesh.reshape(cholmat_inv.shape)\n    coefs = numpy.empty(((length + 1), (length + 1)))\n    coefs[(1:, 1:)] = cholmat_inv\n    coefs[(0, 0)] = 1\n    coefs[(0, 1:)] = 0\n    expected = (- numpy.sum((cholmat_inv * chaospy.descriptives.E(basis, dist, **kws)), (- 1)))\n    coefs[(1:, 0)] = expected\n    coefs = coefs.T\n    out = {}\n    out[((0,) * dim)] = coefs[0]\n    for idx in range(length):\n        index = basis[idx].keys[0]\n        out[index] = coefs[(idx + 1)]\n    polynomials = chaospy.poly.Poly(out, dim, coefs.shape[1:], float)\n    return polynomials", "docstring": "Create orthogonal polynomial expansion from Cholesky decomposition.\n\nArgs:\norder (int):\nOrder of polynomial expansion\ndist (Dist):\nDistribution space where polynomials are orthogonal\nnormed (bool):\nIf True orthonormal polynomials will be used instead of monic.\nsort (str):\nOrdering argument passed to poly.basis.  If custom basis is used,\nargument is ignored.\ncross_truncation (float):\nUse hyperbolic cross truncation scheme to reduce the number of\nterms in expansion.\n\nExamples:\n>>> Z = chaospy.Normal()\n>>> print(chaospy.around(chaospy.orth_chol(3, Z), 4))\n[1.0, q0, 0.7071q0^2-0.7071, 0.4082q0^3-1.2247q0]", "source": "codesearchnet"}
{"code": "def as_dict(self):\n    return {'@module': self.__class__.__module__, '@class': self.__class__.__name__, 'r': jsanitize(self.r), 'energies': jsanitize(self.energies), 'forces': jsanitize(self.forces), 'structures': [s.as_dict() for s in self.structures]}", "docstring": "Dict representation of NEBAnalysis.\n\nReturns:\nJSON serializable dict representation.", "source": "codesearchnet"}
{"code": "def ConvertStringToFilename(name):\n    return re.sub('\\\\W', (lambda x: ('%%%02X' % ord(x.group(0)))), name, flags=re.UNICODE).rstrip('/')", "docstring": "Converts an unicode string to a filesystem safe filename.\n\nFor maximum compatibility we escape all chars which are not alphanumeric (in\nthe unicode sense).\n\nArgs:\nname: a unicode string that is part of a subject.\n\nReturns:\nA safe filename with escaped special chars.", "source": "codesearchnet"}
{"code": "def __init__(self, config_files, use_tc=None, **kwargs):\n        \n        super(VIIRSSDRReader, self).__init__(config_files, **kwargs)\n        self.use_tc = use_tc", "docstring": "Initialize file reader and adjust geolocation preferences.\n\nArgs:\nconfig_files (iterable): yaml config files passed to base class\nuse_tc (boolean): If `True` use the terrain corrected\nfiles. If `False`, switch to non-TC files. If\n`None` (default), use TC if available, non-TC otherwise.", "source": "juraj-google-style"}
{"code": "def delete(filething):\n    \n\n    f = FLAC(filething)\n    filething.fileobj.seek(0)\n    f.delete(filething)", "docstring": "Remove tags from a file.\n\nArgs:\nfilething (filething)\nRaises:\nmutagen.MutagenError", "source": "juraj-google-style"}
{"code": "def __init__(self, config: Dict[str, str], default_level: str):\n        \n        self._should_log: Dict[Tuple[str, str], bool] = {}\n        \n        self._default_level = config.get('', default_level)\n        self._log_rules = [\n            (logger.split('.') if logger else list(), level)\n            for logger, level in config.items()\n        ]", "docstring": "Initializes a new `LogFilter`\n\nArgs:\nconfig: Dictionary mapping module names to logging level\ndefault_level: The default logging level", "source": "juraj-google-style"}
{"code": "def make_json_formatted_for_single_chart(mutant_features, inference_result_proto, index_to_mutate):\n    x_label = 'step'\n    y_label = 'scalar'\n    if isinstance(inference_result_proto, classification_pb2.ClassificationResponse):\n        series = {}\n        for (idx, classification) in enumerate(inference_result_proto.result.classifications):\n            mutant_feature = mutant_features[(idx % len(mutant_features))]\n            for (class_index, classification_class) in enumerate(classification.classes):\n                if (classification_class.label == ''):\n                    classification_class.label = str(class_index)\n                if ((len(classification.classes) == 2) and (classification_class.label == '0')):\n                    continue\n                key = classification_class.label\n                if index_to_mutate:\n                    key += (' (index %d)' % index_to_mutate)\n                if (not (key in series)):\n                    series[key] = {}\n                if (not (mutant_feature.mutant_value in series[key])):\n                    series[key][mutant_feature.mutant_value] = []\n                series[key][mutant_feature.mutant_value].append(classification_class.score)\n        return_series = collections.defaultdict(list)\n        for (key, mutant_values) in iteritems(series):\n            for (value, y_list) in iteritems(mutant_values):\n                return_series[key].append({x_label: value, y_label: (sum(y_list) / float(len(y_list)))})\n            return_series[key].sort(key=(lambda p: p[x_label]))\n        return return_series\n    elif isinstance(inference_result_proto, regression_pb2.RegressionResponse):\n        points = {}\n        for (idx, regression) in enumerate(inference_result_proto.result.regressions):\n            mutant_feature = mutant_features[(idx % len(mutant_features))]\n            if (not (mutant_feature.mutant_value in points)):\n                points[mutant_feature.mutant_value] = []\n            points[mutant_feature.mutant_value].append(regression.value)\n        key = 'value'\n        if (index_to_mutate != 0):\n            key += (' (index %d)' % index_to_mutate)\n        list_of_points = []\n        for (value, y_list) in iteritems(points):\n            list_of_points.append({x_label: value, y_label: (sum(y_list) / float(len(y_list)))})\n        list_of_points.sort(key=(lambda p: p[x_label]))\n        return {key: list_of_points}\n    else:\n        raise NotImplementedError('Only classification and regression implemented.')", "docstring": "Returns JSON formatted for a single mutant chart.\n\nArgs:\nmutant_features: An iterable of `MutantFeatureValue`s representing the\nX-axis.\ninference_result_proto: A ClassificationResponse or RegressionResponse\nreturned by Servo, representing the Y-axis.\nIt contains one 'classification' or 'regression' for every Example that\nwas sent for inference. The length of that field should be the same length\nof mutant_features.\nindex_to_mutate: The index of the feature being mutated for this chart.\n\nReturns:\nA JSON-able dict for rendering a single mutant chart, parseable by\n`vz-line-chart` or `vz-bar-chart`.", "source": "codesearchnet"}
{"code": "def decode(obj, content_type):\n    \n    \n    try:\n        decoder = _decoders_map[content_type]\n        return decoder(obj)\n    except KeyError:\n        raise _errors.UnsupportedFormatError(content_type)", "docstring": "Decode an object ton a one of the default content types to a numpy array.\n\nArgs:\nobj (object): to be decoded.\ncontent_type (str): content type to be used.\n\nReturns:\nnp.array: decoded object.", "source": "juraj-google-style"}
{"code": "def _process_assignments(self, feed_item, creative_assignments, placement_assignments, event_tag_assignments, campaign):\n    assigned_creatives = []\n    assigned_placements = []\n    assigned_event_tags = []\n    for assignment in feed_item['creative_assignment']:\n        creative = self._creative_dao.get(assignment, required=True)\n        assignment[FieldMap.CREATIVE_ID] = creative['id']\n        if not creative['id'] in assigned_creatives:\n            assigned_creatives.append(creative['id'])\n            sequence = assignment.get(FieldMap.CREATIVE_ROTATION_SEQUENCE, None)\n            weight = assignment.get(FieldMap.CREATIVE_ROTATION_WEIGHT, None)\n            sequence = sequence if type(sequence) is int else None\n            weight = weight if type(weight) is int else None\n            if assignment.get(FieldMap.AD_CREATIVE_ROTATION_START_TIME, ''):\n                startTime = assignment.get(FieldMap.AD_CREATIVE_ROTATION_START_TIME, '') if 'T' in assignment.get(FieldMap.AD_CREATIVE_ROTATION_START_TIME, '') else StringExtensions.convertDateStrToDateTimeStr(feed_item.get(FieldMap.AD_CREATIVE_ROTATION_START_TIME, None))\n                assignment[FieldMap.AD_CREATIVE_ROTATION_START_TIME] = startTime\n            else:\n                startTime = None\n            if assignment.get(FieldMap.AD_CREATIVE_ROTATION_END_TIME, ''):\n                endTime = assignment.get(FieldMap.AD_CREATIVE_ROTATION_END_TIME, '') if 'T' in assignment.get(FieldMap.AD_CREATIVE_ROTATION_END_TIME, '') else StringExtensions.convertDateStrToDateTimeStr(feed_item.get(FieldMap.AD_CREATIVE_ROTATION_END_TIME, None), '23:59:59')\n                assignment[FieldMap.AD_CREATIVE_ROTATION_END_TIME] = endTime\n            else:\n                endTime = None\n            lp = None\n            if assignment.get(FieldMap.AD_LANDING_PAGE_ID, '') != 'CAMPAIGN_DEFAULT':\n                lp = self._landing_page_dao.get(assignment, required=True)\n            else:\n                lp = self._landing_page_dao.get({FieldMap.AD_LANDING_PAGE_ID: campaign['defaultLandingPageId']}, required=True)\n            creative_assignment = {'active': True, 'sequence': sequence, 'weight': weight, 'creativeId': assignment.get(FieldMap.CREATIVE_ID, None), 'startTime': startTime, 'endTime': endTime, 'clickThroughUrl': {'defaultLandingPage': False if (assignment.get(FieldMap.AD_LANDING_PAGE_ID, '') or assignment.get(FieldMap.CUSTOM_CLICK_THROUGH_URL, '')) and assignment.get(FieldMap.AD_LANDING_PAGE_ID, '') != 'CAMPAIGN_DEFAULT' else True, 'landingPageId': lp.get('id', None) if lp else None, 'customClickThroughUrl': assignment.get(FieldMap.CUSTOM_CLICK_THROUGH_URL, '')}}\n            if creative.get('exitCustomEvents'):\n                creative_assignment['richMediaExitOverrides'] = []\n                if assignment.get(FieldMap.AD_LANDING_PAGE_ID, '') or assignment.get(FieldMap.CUSTOM_CLICK_THROUGH_URL, ''):\n                    for exit_custom_event in creative.get('exitCustomEvents', []):\n                        creative_assignment['richMediaExitOverrides'].append({'exitId': exit_custom_event['id'], 'enabled': True, 'clickThroughUrl': {'defaultLandingPage': False if (assignment.get(FieldMap.AD_LANDING_PAGE_ID, '') or assignment.get(FieldMap.CUSTOM_CLICK_THROUGH_URL, '')) and assignment.get(FieldMap.AD_LANDING_PAGE_ID, '') != 'CAMPAIGN_DEFAULT' else True, 'landingPageId': lp.get('id', None) if lp else None, 'customClickThroughUrl': assignment.get(FieldMap.CUSTOM_CLICK_THROUGH_URL, '')}})\n            creative_assignments.append(creative_assignment)\n    for assignment in feed_item['placement_assignment']:\n        placement = self._placement_dao.get(assignment, required=True)\n        if placement:\n            assignment[FieldMap.PLACEMENT_ID] = placement['id']\n            if not placement['id'] in assigned_placements:\n                assigned_placements.append(placement['id'])\n                placement_assignments.append({'active': True, 'placementId': assignment.get(FieldMap.PLACEMENT_ID, None)})\n    event_tags = [{'assignment': item, 'event_tag': self._event_tag_dao.get(item, required=True)} for item in feed_item['event_tag_assignment']]\n    event_tags += [{'assignment': item, 'event_tag': self._event_tag_dao.get(item, required=True)} for item in feed_item['placement_event_tag_profile']]\n    for item in event_tags:\n        assignment = item['assignment']\n        event_tag = item['event_tag']\n        if event_tag:\n            assignment[FieldMap.EVENT_TAG_ID] = event_tag['id']\n            if not event_tag['id'] in assigned_event_tags:\n                assigned_event_tags.append(event_tag['id'])\n                event_tag_assignments.append({'id': event_tag['id'], 'enabled': assignment.get(FieldMap.EVENT_TAG_ENABLED, True)})", "docstring": "Updates the ad by setting the values of child objects based on secondary feeds.\n\nArgs:\nfeed_item: Feed item representing the ad from the Bulkdozer feed.\ncreative_assignments: Feed items representing creative assignments related\nwith the current ad.\nplacement_assignments: Feed items representing placement assignments\nrelated with the current ad.\nevent_tag_assignments: Feed items representing event tag assignments\nrelated with the current ad.", "source": "github-repos"}
{"code": "def initialize(self, table):\n    check_table_dtypes(table, self._keys.dtype, self._values.dtype)\n    with ops.name_scope(self._name, values=(table.resource_handle, self._keys, self._values)):\n        init_op = gen_lookup_ops.lookup_table_import_v2(table.resource_handle, self._keys, self._values)\n    ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)\n    return init_op", "docstring": "Initializes the given `table` with `keys` and `values` tensors.\n\nArgs:\ntable: The table to initialize.\n\nReturns:\nThe operation that initializes the table.\n\nRaises:\nTypeError: when the keys and values data types do not match the table\nkey and value data types.", "source": "github-repos"}
{"code": "def trigger(self, attr, old, new, hint=None, setter=None):\n\n    def invoke():\n        callbacks = self._callbacks.get(attr)\n        if callbacks:\n            for callback in callbacks:\n                callback(attr, old, new)\n    if (hasattr(self, '_document') and (self._document is not None)):\n        self._document._notify_change(self, attr, old, new, hint, setter, invoke)\n    else:\n        invoke()", "docstring": "Trigger callbacks for ``attr`` on this object.\n\nArgs:\nattr (str) :\nold (object) :\nnew (object) :\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def num_fmt(num, max_digits=None):\n    if (num is None):\n        return 'None'\n\n    def num_in_mag(num, mag):\n        return ((mag > num) and (num > ((- 1) * mag)))\n    if (max_digits is None):\n        if num_in_mag(num, 1):\n            if num_in_mag(num, 0.1):\n                max_digits = 4\n            else:\n                max_digits = 3\n        else:\n            max_digits = 1\n    if util_type.is_float(num):\n        num_str = ((('%.' + str(max_digits)) + 'f') % num)\n        num_str = num_str.rstrip('0').lstrip('0')\n        if num_str.startswith('.'):\n            num_str = ('0' + num_str)\n        if num_str.endswith('.'):\n            num_str = (num_str + '0')\n        return num_str\n    elif util_type.is_int(num):\n        return int_comma_str(num)\n    else:\n        return '%r'", "docstring": "r\"\"\"\nWeird function. Not very well written. Very special case-y\n\nArgs:\nnum (int or float):\nmax_digits (int):\n\nReturns:\nstr:\n\nCommandLine:\npython -m utool.util_num --test-num_fmt\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_num import *  # NOQA\n>>> # build test data\n>>> num_list = [0, 0.0, 1.2, 1003232, 41431232., .0000000343, -.443243]\n>>> max_digits = None\n>>> # execute function\n>>> result = [num_fmt(num, max_digits) for num in num_list]\n>>> # verify results\n>>> print(result)\n['0', '0.0', '1.2', '1,003,232', '41431232.0', '0.0', '-0.443']", "source": "codesearchnet"}
{"code": "def are_equal(self, mol1, mol2):\n    b1 = set(self._get_bonds(mol1))\n    b2 = set(self._get_bonds(mol2))\n    return (b1 == b2)", "docstring": "Compare the bond table of the two molecules.\n\nArgs:\nmol1: first molecule. pymatgen Molecule object.\nmol2: second moleculs. pymatgen Molecule objec.", "source": "codesearchnet"}
{"code": "def __init__(self, stream):\n        \n        super(BinaryWriter, self).__init__()\n        self.stream = stream", "docstring": "Create an instance.\n\nArgs:\nstream (BytesIO): a stream to operate on. i.e. a neo.IO.MemoryStream or raw BytesIO.", "source": "juraj-google-style"}
{"code": "def __init__(self, key, b64secret, passphrase,\n                 api_url=\"https:\n        \n        super(AuthenticatedClient, self).__init__(api_url)\n        self.auth = CBProAuth(key, b64secret, passphrase)\n        self.session = requests.Session()", "docstring": "Create an instance of the AuthenticatedClient class.\n\nArgs:\nkey (str): Your API key.\nb64secret (str): The secret key matching your API key.\npassphrase (str): Passphrase chosen when setting up key.\napi_url (Optional[str]): API URL. Defaults to cbpro API.", "source": "juraj-google-style"}
{"code": "def _to_proto_sparse_tensor(sparse_tensor, nested_proto, process_leafs, already_processed):\n    already_processed.add(id(sparse_tensor))\n    nested_proto.named_tuple.name = _SPARSE_TENSOR_NAME\n    for str_key in _SPARSE_TENSOR_FIELD:\n        tensor = getattr(sparse_tensor, str_key)\n        nested_proto.named_tuple.map[str_key].value = process_leafs(tensor)", "docstring": "Serializes a `tf.SparseTensor` into `nested_proto`.\n\nArgs:\nsparse_tensor: An instance of `tf.SparseTensor`.\nnested_proto: A `module_pb2.NestedData` instance to be filled from\n`sparse_tensor`.\nprocess_leafs: A function to be applied to the leaf valued of the nested\nstructure.\nalready_processed: Set of already processed objects (used to avoid\ninfinite recursion).", "source": "codesearchnet"}
{"code": "def create_runner(ns_path, script, runner_type='Auto', optimized=True):\n    if ((runner_type == 'Auto') and DRMAA_AVAILABLE):\n        runner_type = 'GridRunner'\n    elif (runner_type == 'Auto'):\n        runner_type = 'ParallelRunner'\n    return locals().get(runner_type, globals().get(runner_type))(ns_path, script, optimized=optimized)", "docstring": "Create a SimulationRunner from a string containing the desired\nclass implementation, and return it.\n\nArgs:\nns_path (str): path to the ns-3 installation to employ in this\nSimulationRunner.\nscript (str): ns-3 script that will be executed to run simulations.\nrunner_type (str): implementation of the SimulationRunner to use.\nValue can be: SimulationRunner (for running sequential\nsimulations locally), ParallelRunner (for running parallel\nsimulations locally), GridRunner (for running simulations using\na DRMAA-compatible parallel task scheduler). If Auto,\nautomatically pick the best available runner (GridRunner if\nDRMAA is available, ParallelRunner otherwise).\noptimized (bool): whether to configure the runner to employ an\noptimized ns-3 build.", "source": "codesearchnet"}
{"code": "def copy_default_config_to_user_directory(\n        basename,\n        clobber=False,\n        dst_dir='~/.config/scriptabit'):\n    \n    dst_dir = os.path.expanduser(dst_dir)\n    dst = os.path.join(dst_dir, basename)\n    src = resource_filename(\n        Requirement.parse(\"scriptabit\"),\n        os.path.join('scriptabit', basename))\n\n    if not os.path.exists(dst_dir):\n        os.makedirs(dst_dir)\n\n    if clobber or not os.path.isfile(dst):\n        shutil.copy(src, dst)", "docstring": "Copies the default configuration file into the user config directory.\n\nArgs:\nbasename (str): The base filename.\nclobber (bool): If True, the default will be written even if a user\nconfig already exists.\ndst_dir (str): The destination directory.", "source": "juraj-google-style"}
{"code": "def decode(model_path_prefix: Union[(str, Path)], input_paths: Sequence[Path], label_set: Set[str], *, feature_type: str='fbank', batch_size: int=64, feat_dir: Optional[Path]=None, batch_x_name: str='batch_x:0', batch_x_lens_name: str='batch_x_lens:0', output_name: str='hyp_dense_decoded:0') -> List[List[str]]:\n    if (not input_paths):\n        raise PersephoneException('No untranscribed WAVs to transcribe.')\n    model_path_prefix = str(model_path_prefix)\n    for p in input_paths:\n        if (not p.exists()):\n            raise PersephoneException('The WAV file path {} does not exist'.format(p))\n    preprocessed_file_paths = []\n    for p in input_paths:\n        prefix = p.stem\n        feature_file_ext = '.{}.npy'.format(feature_type)\n        conventional_npy_location = ((p.parent.parent / 'feat') / Path((prefix + feature_file_ext)))\n        if conventional_npy_location.exists():\n            preprocessed_file_paths.append(conventional_npy_location)\n        else:\n            if (not feat_dir):\n                feat_dir = (p.parent.parent / 'feat')\n            if (not feat_dir.is_dir()):\n                os.makedirs(str(feat_dir))\n            mono16k_wav_path = (feat_dir / '{}.wav'.format(prefix))\n            feat_path = (feat_dir / '{}.{}.npy'.format(prefix, feature_type))\n            feat_extract.convert_wav(p, mono16k_wav_path)\n            preprocessed_file_paths.append(feat_path)\n    if feat_dir:\n        feat_extract.from_dir(feat_dir, feature_type)\n    fn_batches = utils.make_batches(preprocessed_file_paths, batch_size)\n    metagraph = load_metagraph(model_path_prefix)\n    with tf.Session() as sess:\n        metagraph.restore(sess, model_path_prefix)\n        for fn_batch in fn_batches:\n            (batch_x, batch_x_lens) = utils.load_batch_x(fn_batch)\n        feed_dict = {batch_x_name: batch_x, batch_x_lens_name: batch_x_lens}\n        dense_decoded = sess.run(output_name, feed_dict=feed_dict)\n    indices_to_labels = labels.make_indices_to_labels(label_set)\n    human_readable = dense_to_human_readable(dense_decoded, indices_to_labels)\n    return human_readable", "docstring": "Use an existing tensorflow model that exists on disk to decode\nWAV files.\n\nArgs:\nmodel_path_prefix: The path to the saved tensorflow model.\nThis is the full prefix to the \".ckpt\" file.\ninput_paths: A sequence of `pathlib.Path`s to WAV files to put through\nthe model provided.\nlabel_set: The set of all the labels this model uses.\nfeature_type: The type of features this model uses.\nNote that this MUST match the type of features that the\nmodel was trained on initially.\nfeat_dir: Any files that require preprocessing will be\nsaved to the path specified by this.\nbatch_x_name: The name of the tensorflow input for batch_x\nbatch_x_lens_name: The name of the tensorflow input for batch_x_lens\noutput_name: The name of the tensorflow output", "source": "codesearchnet"}
{"code": "def GetArtifactsInProperOrder(self):\n    artifact_list = []\n    while self.reachable_nodes:\n        node_name = self.reachable_nodes.pop()\n        node = self.graph[node_name]\n        if node.is_artifact:\n            artifact_list.append(node_name)\n        for next_node_name in node.outgoing:\n            if (next_node_name not in self.graph):\n                continue\n            next_node = self.graph[next_node_name]\n            if next_node.is_provided:\n                continue\n            next_node.incoming.remove(node_name)\n            if (not (next_node.is_artifact and next_node.incoming)):\n                next_node.is_provided = True\n                self.reachable_nodes.add(next_node_name)\n    return artifact_list", "docstring": "Bring the artifacts in a linear order that resolves dependencies.\n\nThis method obtains a linear ordering of the nodes and then returns the list\nof artifact names.\n\nReturns:\nA list of `ArtifactName` instances such that if they are collected in the\ngiven order their dependencies are resolved.", "source": "codesearchnet"}
{"code": "def getMonthsBuffer(self, direction):\n    if (direction == ReadMonths.kWhReverse):\n        return self.m_rev_mons\n    return self.m_mons", "docstring": "Get the months tariff SerialBlock for meter.\n\nArgs:\ndirection (int): A :class:`~ekmmeters.ReadMonths` value.\n\nReturns:\nSerialBlock: Requested months tariffs buffer.", "source": "codesearchnet"}
{"code": "def get_tokens(max_value):\n    vocab = [str(i) for i in range(max_value)]\n    vocab = set(vocab)\n    vocab.update(CodeOp.LITERALS)\n    vocab.update(CodeOp.KEYWORDS)\n    vocab |= set(''.join(vocab))\n    return sorted(vocab)", "docstring": "Defines tokens.\n\nArgs:\nmax_value: the maximum numeric range for the token.\n\nReturns:\nlist of string tokens in vocabulary.", "source": "codesearchnet"}
{"code": "def __init__(self, url, username, password, enterprise, apiversion, sdk_identifier, monolithe_config):\n        \n        self.url = url\n        self.username = username\n        self.password = password\n        self.enterprise = enterprise\n        self.apiversion = apiversion\n        self.monolithe_config = monolithe_config\n        self.sdk_identifier = sdk_identifier", "docstring": "Initializes Courgette\n\nArgs:\nurl (string): the url of the server with its port\nusername (string): the username to launch tests\npassword (string): the password to connect to the server\nenterprise (string): the name of the enterprise to connect to the server\napiversion (float): the version of the API to connect\nsdk (string): the full name of the SDK to use", "source": "juraj-google-style"}
{"code": "def _expand_and_tile(tensor, multiple, dim=0, name=None):\n    if multiple < 1:\n        raise ValueError(f'Invalid argument multiple={multiple} for expand_and_tile  call. `multiple` must be an integer > 0')\n    with ops.name_scope(name, 'expand_and_tile', (tensor, multiple, dim)) as scope:\n        tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)\n        if isinstance(tensor, sparse_tensor.SparseTensor):\n            if dim < 0:\n                expand_dims = array_ops.reshape(array_ops.size(tensor.dense_shape) + dim, [1])\n            else:\n                expand_dims = [dim]\n            expanded_shape = array_ops.concat((array_ops.slice(tensor.dense_shape, [0], expand_dims), [1], array_ops.slice(tensor.dense_shape, expand_dims, [-1])), 0, name='expanded_shape')\n            expanded = sparse_ops.sparse_reshape(tensor, shape=expanded_shape, name='expand')\n            if multiple == 1:\n                return expanded\n            return sparse_ops.sparse_concat(dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope)\n        expanded = array_ops.expand_dims(tensor, dim if dim >= 0 else dim - 1, name='expand')\n        if multiple == 1:\n            return expanded\n        ones = array_ops.ones_like(array_ops.shape(tensor))\n        tile_multiples = array_ops.concat((ones[:dim], (multiple,), ones[dim:]), 0, name='multiples')\n        return array_ops.tile(expanded, tile_multiples, name=scope)", "docstring": "Slice `tensor` shape in 2, then tile along the sliced dimension.\n\nA new dimension is inserted in shape of `tensor` before `dim`, then values are\ntiled `multiple` times along the new dimension.\n\nArgs:\ntensor: Input `Tensor` or `SparseTensor`.\nmultiple: Integer, number of times to tile.\ndim: Integer, dimension along which to tile.\nname: Name of operation.\n\nReturns:\n`Tensor` result of expanding and tiling `tensor`.\n\nRaises:\nValueError: if `multiple` is less than 1, or `dim` is not in\n`[-rank(tensor), rank(tensor)]`.", "source": "github-repos"}
{"code": "def _configure(self, session_config=None, cluster_spec=None, task_type=None, task_id=None):\n    if cluster_spec:\n        cluster_resolver = cluster_resolver_lib.SimpleClusterResolver(cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec), task_type=task_type, task_id=task_id, num_accelerators={'GPU': self._num_gpus_per_worker})\n        self._initialize_multi_worker(cluster_resolver)\n    if session_config:\n        session_config.CopyFrom(self._update_config_proto(session_config))", "docstring": "Configures the strategy class with `cluster_spec`.\n\nThe strategy object will be re-initialized if `cluster_spec` is passed to\n`configure` but was not passed when instantiating the strategy.\n\nArgs:\nsession_config: Session config object.\ncluster_spec: a dict, ClusterDef or ClusterSpec object specifying the\ncluster configurations.\ntask_type: the current task type.\ntask_id: the current task id.\n\nRaises:\nValueError: if `cluster_spec` is given but `task_type` or `task_id` is\nnot.", "source": "github-repos"}
{"code": "def __init__(self, max_batch_size: int=5000, project: str=None, retry: Retry=None, timeout: float=120, metadata: Sequence[Tuple[str, str]]=(), catalog_name: str='default_catalog', event_store: str='default_event_store'):\n    self.max_batch_size = max_batch_size\n    self.project = project\n    self.retry = retry\n    self.timeout = timeout\n    self.metadata = metadata\n    self.catalog_name = catalog_name\n    self.event_store = event_store", "docstring": "Initializes a :class:`WriteUserEvent` transform.\n\nArgs:\nbatch_size (int): Required. Maximum number of catalogitems\nper request.\nproject (str): Optional. GCP project name in which the catalog\ndata will be imported.\nretry: Optional. Designation of what\nerrors, if any, should be retried.\ntimeout (float): Optional. The amount of time, in seconds, to wait\nfor the request to complete.\nmetadata: Optional. Strings which\nshould be sent along with the request as metadata.\ncatalog_name (str): Optional. Name of the catalog.\nDefault: 'default_catalog'\nevent_store (str): Optional. Name of the event store.\nDefault: 'default_event_store'", "source": "github-repos"}
{"code": "def add_site(self, site):\n    start_angle = 0\n    radius = 0\n    total_occu = 0\n    for (specie, occu) in site.species.items():\n        radius += (occu * (specie.ionic_radius if (isinstance(specie, Specie) and specie.ionic_radius) else specie.average_ionic_radius))\n        total_occu += occu\n    vis_radius = (0.2 + (0.002 * radius))\n    for (specie, occu) in site.species.items():\n        if (not specie):\n            color = (1, 1, 1)\n        elif (specie.symbol in self.el_color_mapping):\n            color = [(i / 255) for i in self.el_color_mapping[specie.symbol]]\n        mapper = self.add_partial_sphere(site.coords, vis_radius, color, start_angle, (start_angle + (360 * occu)))\n        self.mapper_map[mapper] = [site]\n        start_angle += (360 * occu)\n    if (total_occu < 1):\n        mapper = self.add_partial_sphere(site.coords, vis_radius, (1, 1, 1), start_angle, (start_angle + (360 * (1 - total_occu))))\n        self.mapper_map[mapper] = [site]", "docstring": "Add a site to the render window. The site is displayed as a sphere, the\ncolor of which is determined based on the element. Partially occupied\nsites are displayed as a single element color, though the site info\nstill shows the partial occupancy.\n\nArgs:\nsite: Site to add.", "source": "codesearchnet"}
{"code": "def __init__(self, physaddr, size):\n        \n        self.mapping = None\n        self._open(physaddr, size)", "docstring": "Instantiate an MMIO object and map the region of physical memory\nspecified by the address base `physaddr` and size `size` in bytes.\n\nArgs:\nphysaddr (int, long): base physical address of memory region.\nsize (int, long): size of memory region.\n\nReturns:\nMMIO: MMIO object.\n\nRaises:\nMMIOError: if an I/O or OS error occurs.\nTypeError: if `physaddr` or `size` types are invalid.", "source": "juraj-google-style"}
{"code": "def fts_match(self, features, segment):\n    features = set(features)\n    if self.seg_known(segment):\n        return (features <= self.fts(segment))\n    else:\n        return None", "docstring": "Answer question \"are `ft_mask`'s features a subset of ft_seg?\"\n\nThis is like `FeatureTable.match` except that it checks whether a\nsegment is valid and returns None if it is not.\n\nArgs:\nfeatures (set): pattern defined as set of (value, feature) tuples\nsegment (set): segment defined as a set of (value, feature) tuples\n\nReturns:\nbool: True iff all features in `ft_mask` are also in `ft_seg`; None\nif segment is not valid", "source": "codesearchnet"}
{"code": "def report_line(zipfilename: str, contentsfilename: str, line: str, show_inner_file: bool) -> None:\n    if show_inner_file:\n        print('{} [{}]: {}'.format(zipfilename, contentsfilename, line))\n    else:\n        print('{}: {}'.format(zipfilename, line))", "docstring": "Prints a line from a file, with the ``.zip`` filename and optionally also\nthe inner filename.\n\nArgs:\nzipfilename: filename of the ``.zip`` file\ncontentsfilename: filename of the inner file\nline: the line from the inner file\nshow_inner_file: if ``True``, show both filenames; if ``False``, show\njust the ``.zip`` filename", "source": "codesearchnet"}
{"code": "def _authenticate(secrets_file):\n    flow = oauthclient.flow_from_clientsecrets(secrets_file, scope=OAUTH_SCOPE, message=('Failed to initialized OAuth 2.0 flow with secrets file: %s' % secrets_file))\n    storage = oauthfile.Storage(OAUTH_CREDENTIALS_FILE)\n    credentials = storage.get()\n    if ((credentials is None) or credentials.invalid):\n        credentials = oauthtools.run_flow(flow, storage, oauthtools.argparser.parse_args(args=[]))\n    http = httplib2.Http()\n    return credentials.authorize(http)", "docstring": "Runs the OAuth 2.0 installed application flow.\n\nReturns:\nAn authorized httplib2.Http instance.", "source": "codesearchnet"}
{"code": "def match_rules(tree, rules, fun=None, multi=False):\n    if multi:\n        context = match_rules_context_multi(tree, rules)\n    else:\n        context = match_rules_context(tree, rules)\n        if (not context):\n            return None\n    if fun:\n        args = fun.__code__.co_varnames\n        if multi:\n            res = []\n            for c in context:\n                action_context = {}\n                for arg in args:\n                    if (arg in c):\n                        action_context[arg] = c[arg]\n                res.append(fun(**action_context))\n            return res\n        else:\n            action_context = {}\n            for arg in args:\n                if (arg in context):\n                    action_context[arg] = context[arg]\n            return fun(**action_context)\n    else:\n        return context", "docstring": "Matches a Tree structure with the given query rules.\n\nQuery rules are represented as a dictionary of template to action.\nAction is either a function, or a dictionary of subtemplate parameter to rules::\n\nrules = { 'template' : { 'key': rules } }\n| { 'template' : {} }\n\nArgs:\ntree (Tree): Parsed tree structure\nrules (dict): A dictionary of query rules\nfun (function): Function to call with context (set to None if you want to return context)\nmulti (Bool): If True, returns all matched contexts, else returns first matched context\nReturns:\nContexts from matched rules", "source": "codesearchnet"}
{"code": "def authenticate_identify(self, api_token, override=True):\n    if (self.context.has_auth_params('Gem-Identify') and (not override)):\n        raise OverrideError('Gem-Identify')\n    if ((not api_token) or (not self.context.authorize('Gem-Identify', api_token=api_token))):\n        raise AuthUsageError(self.context, 'Gem-Identify')\n    return True", "docstring": "Set credentials for Identify authentication.\n\nArgs:\napi_token (str): Token issued to your Application through the Gem\nDeveloper Console.\noverride (boolean): Replace existing Application credentials.", "source": "codesearchnet"}
{"code": "class MeanAbsoluteError(reduction_metrics.MeanMetricWrapper):\n\n    def __init__(self, name='mean_absolute_error', dtype=None):\n        super().__init__(mean_absolute_error, name, dtype=dtype)\n        self._direction = 'down'\n\n    def get_config(self):\n        return {'name': self.name, 'dtype': self.dtype}", "docstring": "Computes the mean absolute error between the labels and predictions.\n\nFormula:\n\n```python\nloss = mean(abs(y_true - y_pred))\n```\n\nArgs:\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.\n\nExamples:\n\n>>> m = keras.metrics.MeanAbsoluteError()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])\n>>> m.result()\n0.25\n\n>>> m.reset_state()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],\n...                sample_weight=[1, 0])\n>>> m.result()\n0.5\n\nUsage with `compile()` API:\n\n```python\nmodel.compile(\noptimizer='sgd',\nloss='mse',\nmetrics=[keras.metrics.MeanAbsoluteError()])\n```", "source": "github-repos"}
{"code": "def process_alias_create_namespace(namespace):\n    namespace = filter_alias_create_namespace(namespace)\n    _validate_alias_name(namespace.alias_name)\n    _validate_alias_command(namespace.alias_command)\n    _validate_alias_command_level(namespace.alias_name, namespace.alias_command)\n    _validate_pos_args_syntax(namespace.alias_name, namespace.alias_command)", "docstring": "Validate input arguments when the user invokes 'az alias create'.\n\nArgs:\nnamespace: argparse namespace object.", "source": "codesearchnet"}
{"code": "def operator_driven(drain_timeout=_DEFAULT_DRAIN, reset_timeout=_DEFAULT_RESET, max_consecutive_attempts=_DEFAULT_ATTEMPTS):\n    return ConsistentRegionConfig(trigger=ConsistentRegionConfig.Trigger.OPERATOR_DRIVEN, drain_timeout=drain_timeout, reset_timeout=reset_timeout, max_consecutive_attempts=max_consecutive_attempts)", "docstring": "Define an operator-driven consistent region configuration.\nThe source operator triggers drain and checkpoint cycles for the region.\n\nArgs:\ndrain_timeout: The drain timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`.  If not specified, the default value is 180 seconds.\nreset_timeout: The reset timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`.  If not specified, the default value is 180 seconds.\nmax_consecutive_attempts(int): The maximum number of consecutive attempts to reset the region.  This must be an integer value between 1 and 2147483647, inclusive.  If not specified, the default value is 5.\n\nReturns:\nConsistentRegionConfig: the configuration.", "source": "codesearchnet"}
{"code": "def pprint_table(table, out=sys.stdout, rstrip=False):\n    \n    def max_width_col(table, col_idx):\n        \n        return max([len(row[col_idx]) for row in table])\n\n    if rstrip:\n        for row_idx, row in enumerate(table):\n            table[row_idx] = [c.rstrip() for c in row]\n\n    col_paddings = []\n    ncols = len(table[0])\n    for i in range(ncols):\n        col_paddings.append(max_width_col(table, i))\n\n    for row in table:\n        \n        out.write(row[0].ljust(col_paddings[0] + 1))\n        \n        for i in range(1, len(row)):\n            col = row[i].rjust(col_paddings[i] + 2)\n            out.write(col)\n        out.write(\"\\n\")", "docstring": "Prints out a table of data, padded for alignment\nEach row must have the same number of columns.\n\nArgs:\ntable: The table to print. A list of lists.\nout: Output stream (file-like object)\nrstrip: if True, trailing withespaces are removed from the entries.", "source": "juraj-google-style"}
{"code": "def _UnserializableObjectFallback(self, obj):\n    \n    if isinstance(obj, libpython.PyInstanceObjectPtr):\n      \n      \n      in_class = obj.pyop_field('in_class')\n      result_dict = in_class.pyop_field('cl_dict').proxyval(set())\n\n      \n      instanceproxy = obj.proxyval(set())\n      result_dict.update(instanceproxy.attrdict)\n      result_dict['__pyringe_type_name__'] = instanceproxy.cl_name\n      result_dict['__pyringe_address__'] = instanceproxy.address\n      return result_dict\n\n    if isinstance(obj, libpython.HeapTypeObjectPtr):\n      \n      \n      \n      \n      \n      \n\n      try:\n        \n        type_ptr = obj.field('ob_type')\n        tp_dict = type_ptr.cast(GdbCache.TYPE)['tp_dict'].cast(GdbCache.DICT)\n        result_dict = libpython.PyDictObjectPtr(tp_dict).proxyval(set())\n      except gdb.error:\n        \n        \n        result_dict = {}\n\n      try:\n        \n        result_dict.update(obj.get_attr_dict().proxyval(set()))\n        result_dict['__pyringe_type_name__'] = obj.safe_tp_name()\n        result_dict['__pyringe_address__'] = long(obj._gdbval)  \n        return result_dict\n      except TypeError:\n        \n        \n        \n        pass\n    \n    \n    \n    try:\n      proxy = obj.proxyval(set())\n      \n      if isinstance(proxy, dict):\n        return {str(key): val for key, val in proxy.iteritems()}\n      return proxy\n    except AttributeError:\n      return str(obj)", "docstring": "Handles sanitizing of unserializable objects for Json.\n\nFor instances of heap types, we take the class dict, augment it with the\ninstance's __dict__, tag it and transmit it over to the RPC client to be\nreconstructed there. (Works with both old and new style classes)\nArgs:\nobj: The object to Json-serialize\nReturns:\nA Json-serializable version of the parameter", "source": "juraj-google-style"}
{"code": "def change_window(self, size_window):\n    self.size_window = size_window\n    self.window = self.lambert_window(self.size_window, self.lat0, self.lon0)", "docstring": "Change the region of interest\n\nArgs:\nsize_window (float): Radius of the region of interest (km)\n\nNotes:\nChange the attributes ``size_window`` and ``window`` to\ncorrespond to the new region of interest.", "source": "codesearchnet"}
{"code": "def account_distance(A1, A2):\n    \n    return (sum([action.alpha for action in A1]) -\n            sum([action.alpha for action in A2]))", "docstring": "Return the distance between two accounts. Here that is just the\ndifference in sum(alpha)\n\nArgs:\nA1 (Account): The first account.\nA2 (Account): The second account\n\nReturns:\nfloat: The distance between the two accounts.", "source": "juraj-google-style"}
{"code": "def write_additional(self, productversion, channel):\n        \n        self.fileobj.seek(self.additional_offset)\n        extras = extras_header.build(dict(\n            count=1,\n            sections=[dict(\n                channel=six.u(channel),\n                productversion=six.u(productversion),\n                size=len(channel) + len(productversion) + 2 + 8,\n                padding=b'',\n            )],\n        ))\n\n        self.fileobj.write(extras)\n        self.last_offset = self.fileobj.tell()", "docstring": "Write the additional information to the MAR header.\n\nArgs:\nproductversion (str): product and version string\nchannel (str): channel string", "source": "juraj-google-style"}
{"code": "def _add_arg_python(self, key, value=None, mask=False):\n    self._data[key] = value\n    if (not value):\n        pass\n    elif (value is True):\n        self._args.append('--{}'.format(key))\n        self._args_quoted.append('--{}'.format(key))\n        self._args_masked.append('--{}'.format(key))\n    else:\n        self._args.append('--{}={}'.format(key, value))\n        if mask:\n            value = ('x' * len(str(value)))\n        else:\n            value = self.quote(value)\n        self._args_quoted.append('--{}={}'.format(key, value))\n        self._args_masked.append('--{}={}'.format(key, value))", "docstring": "Add CLI Arg formatted specifically for Python.\n\nArgs:\nkey (string): The CLI Args key (e.g., --name).\nvalue (string): The CLI Args value (e.g., bob).\nmask (boolean, default:False): Indicates whether no mask value.", "source": "codesearchnet"}
{"code": "def __init__(self, client, search_query: str) -> None:\n    self._client = client\n    self._search_query = search_query", "docstring": "Initializer.\n\nInitializes the FhirSearchRunner with user provided FHIR Client, and search\nquery.\n\nArgs:\nclient: FHIR Client for the FHIR server where queries will be run against.\nViews will be created from the response.\nsearch_query: Query used to fetch the subset of data from the FHIR server.", "source": "github-repos"}
{"code": "def acknowledge(self, **kwargs):\n\n    \n\n    device_id = kwargs['device_id']\n    config = self.get_config()\n    \n    \n    if 'r_folder_id' in kwargs:\n      r_folder_id = kwargs['r_folder_id']\n      remote_folder = syncthing_adt.Folder(\n        id=r_folder_id,\n        label=kwargs['label'],\n        path=kwargs['local_path'],\n        deviceID=self.get_device_id(),\n        rescanIntervalS=kwargs['interval']\n      )\n      remote_folder.add_device(device_id)\n      remote_folder = remote_folder.obj\n    \n    else:\n      remote_folder = kwargs['folder_obj']\n      remote_folder['path'] = kwargs['local_path']\n\n      if kwargs['interval']:\n        remote_folder['rescanIntervalS'] = kwargs['interval']\n\n      r_folder_id = remote_folder['id']\n    \n    \n    if self.folder_exists({'path' : kwargs['local_path']}, config):\n      raise ValueError('This folder has already been added.')\n    \n    \n    config['folders'].append(remote_folder)\n    config['label'] = kwargs['label']\n\n    self.new_device(config=config, device_id=device_id)\n           \n    device = self.find_device(device_id, config)\n    \n    if device:\n      device['name'] = kwargs['hostname']\n        \n    \n    self.adapter.set_dir_config({\n      'device_id' : device_id,\n      'api_key' : kwargs['api_key'] if 'api_key' in kwargs else '',\n      'label' : kwargs['label'],\n      'local_path' : kwargs['local_path'],\n      'is_shared' : True,\n      'server' : kwargs['server'] if 'server' in kwargs else False,\n      'host' : kwargs['host'] if 'host' in kwargs else None,\n\n      'remote_path': kwargs['remote_path'] if 'remote_path' in kwargs else '',\n      'port' : kwargs['port'] if 'port' in kwargs else None\n    }) \n    \n    self.set_config(config)\n    self.restart()", "docstring": "Commit the shared remote folder data into local config.xml file\n1. Update the remote_folder path and label\n2. Append the remote_folder to config folders list\n\nArgs:\nremote_folder(folder): syncthing folder object\nlocal_path: existing local path", "source": "juraj-google-style"}
{"code": "def refresh(self, request):\n    try:\n        self._retrieve_info(request)\n        (self.token, self.expiry) = _metadata.get_service_account_token(request, service_account=self._service_account_email)\n    except exceptions.TransportError as caught_exc:\n        new_exc = exceptions.RefreshError(caught_exc)\n        six.raise_from(new_exc, caught_exc)", "docstring": "Refresh the access token and scopes.\n\nArgs:\nrequest (google.auth.transport.Request): The object used to make\nHTTP requests.\n\nRaises:\ngoogle.auth.exceptions.RefreshError: If the Compute Engine metadata\nservice can't be reached if if the instance has not\ncredentials.", "source": "codesearchnet"}
{"code": "def pow(x, y, name=None):\n    with ops.name_scope(name, 'Pow', [x]) as name:\n        return gen_math_ops._pow(x, y, name=name)", "docstring": "Computes the power of one value to another.\n\nGiven a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for\ncorresponding elements in `x` and `y`. For example:\n\n```python\nx = tf.constant([[2, 2], [3, 3]])\ny = tf.constant([[8, 16], [2, 3]])\ntf.pow(x, y)  # [[256, 65536], [9, 27]]\n```\n\nArgs:\nx: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n`complex64`, or `complex128`.\ny: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n`complex64`, or `complex128`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor`.", "source": "github-repos"}
{"code": "def read_profile(name):\n    config = configparser.ConfigParser()\n    config.read(CONFIG_FILE)\n    profile = config[name]\n    repo = profile['repo']\n    token = profile['token']\n    return {'repo': repo, 'token': token}", "docstring": "Get a named profile from the CONFIG_FILE.\n\nArgs:\n\nname\nThe name of the profile to load.\n\nReturns:\nA dictionary with the profile's ``repo`` and ``token`` values.", "source": "codesearchnet"}
{"code": "def reset_logformat(logger: logging.Logger,\n                    fmt: str,\n                    datefmt: str = '%Y-%m-%d %H:%M:%S') -> None:\n    \n    handler = logging.StreamHandler()\n    formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)\n    handler.setFormatter(formatter)\n    remove_all_logger_handlers(logger)\n    logger.addHandler(handler)\n    logger.propagate = False", "docstring": "Create a new formatter and apply it to the logger.\n\n:func:`logging.basicConfig` won't reset the formatter if another module\nhas called it, so always set the formatter like this.\n\nArgs:\nlogger: logger to modify\nfmt: passed to the ``fmt=`` argument of :class:`logging.Formatter`\ndatefmt: passed to the ``datefmt=`` argument of\n:class:`logging.Formatter`", "source": "juraj-google-style"}
{"code": "def shebang(self, new_shebang):\n    if (not self.shebang):\n        raise ValueError('Cannot modify a shebang if it does not exist.')\n    if (not new_shebang.startswith('\n        raise ValueError('Invalid shebang.')\n    self.writeline(new_shebang, 0)", "docstring": "Write a new shebang to the file.\n\nRaises:\nValueError: If the file has no shebang to modify.\nValueError: If the new shebang is invalid.", "source": "codesearchnet"}
{"code": "def fingerprints(data):\n    \n\n    Hashes = namedtuple('Hashes', \"md5 sha1 sha256 sha512\")\n\n    if six.PY2:\n        if not isinstance(data, str):\n            data = data.encode(\"utf-8\")\n    elif six.PY3:\n        if not isinstance(data, bytes):\n            data = data.encode(\"utf-8\")\n\n    \n    md5 = hashlib.md5()\n    md5.update(data)\n    md5 = md5.hexdigest()\n\n    \n    sha1 = hashlib.sha1()\n    sha1.update(data)\n    sha1 = sha1.hexdigest()\n\n    \n    sha256 = hashlib.sha256()\n    sha256.update(data)\n    sha256 = sha256.hexdigest()\n\n    \n    sha512 = hashlib.sha512()\n    sha512.update(data)\n    sha512 = sha512.hexdigest()\n\n    return Hashes(md5, sha1, sha256, sha512)", "docstring": "This function return the fingerprints of data.\n\nArgs:\ndata (string): raw data\n\nReturns:\nnamedtuple: fingerprints md5, sha1, sha256, sha512", "source": "juraj-google-style"}
{"code": "def get_qemu_info(path, backing_chain=False, fail_on_error=True):\n    \n\n    cmd = ['qemu-img', 'info', '--output=json', path]\n\n    if backing_chain:\n        cmd.insert(-1, '--backing-chain')\n\n    result = run_command_with_validation(\n        cmd, fail_on_error, msg='Failed to get info for {}'.format(path)\n    )\n\n    return json.loads(result.out)", "docstring": "Get info on a given qemu disk\n\nArgs:\npath(str): Path to the required disk\nbacking_chain(boo): if true, include also info about\nthe image predecessors.\nReturn:\nobject: if backing_chain == True then a list of dicts else a dict", "source": "juraj-google-style"}
{"code": "def consume(self, data):\n        \n        try:\n            self._streamer.consume(data)\n        except YajlError as ye:\n            print(ye.value)\n            raise JSONStreamerException(ye.value)", "docstring": "Takes input that must be parsed\n\nNote:\nAttach all your listeners before calling this method\n\nArgs:\ndata (str): input json string", "source": "juraj-google-style"}
{"code": "def print_object_results(obj_result):\n    \n    print_results_header(obj_result.object_id, obj_result.is_valid)\n\n    if obj_result.warnings:\n        print_warning_results(obj_result, 1)\n    if obj_result.errors:\n        print_schema_results(obj_result, 1)", "docstring": "Print the results of validating an object.\n\nArgs:\nobj_result: An ObjectValidationResults instance.", "source": "juraj-google-style"}
{"code": "def blacken_code(code):\n    \n    if black is None:\n        raise NotImplementedError\n\n    major, minor, _ = platform.python_version_tuple()\n    pyversion = 'py{major}{minor}'.format(major=major, minor=minor)\n    target_versions = [black.TargetVersion[pyversion.upper()]]\n\n    line_length = black.DEFAULT_LINE_LENGTH\n    string_normalization = True\n\n    mode = black.FileMode(\n        target_versions=target_versions,\n        line_length=line_length,\n        string_normalization=string_normalization,\n    )\n\n    return black.format_file_contents(code, fast=False, mode=mode)", "docstring": "Format code content using Black\n\nArgs:\ncode (str): code as string\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def create_package(name, data, package_cls=None):\n    from rez.package_maker__ import PackageMaker\n    maker = PackageMaker(name, data, package_cls=package_cls)\n    return maker.get_package()", "docstring": "Create a package given package data.\n\nArgs:\nname (str): Package name.\ndata (dict): Package data. Must conform to `package_maker.package_schema`.\n\nReturns:\n`Package` object.", "source": "codesearchnet"}
{"code": "def tap_hold(self, x, y, duration=1.0):\n    data = {'x': x, 'y': y, 'duration': duration}\n    return self.http.post('/wda/touchAndHold', data=data)", "docstring": "Tap and hold for a moment\n\nArgs:\n- x, y(int): position\n- duration(float): seconds of hold time\n\n[[FBRoute POST:@\"/wda/touchAndHold\"] respondWithTarget:self action:@selector(handleTouchAndHoldCoordinate:)],", "source": "codesearchnet"}
{"code": "def __init__(self, window, root):\n        \n        self.root = root\n        self.selenium = window.selenium\n        self.wait = window.wait\n        self.window = window", "docstring": "Create a Region object.\n\nArgs:\nwindow (:py:class:`BaseWindow`): Window object this region appears\nin.\nroot\n(:py:class:`~selenium.webdriver.remote.webelement.WebElement`):\nWebDriver element object that serves as the root for the\nregion.", "source": "juraj-google-style"}
{"code": "def request_unwatch(self, node_name, output_slot, debug_op):\n    self._debug_ops_state_change_queue.put(_state_change(debug_service_pb2.EventReply.DebugOpStateChange.DISABLED, node_name, output_slot, debug_op))", "docstring": "Request disabling a debug tensor watchpoint or breakpoint.\n\nThis is the opposite of `request_watch()`.\n\nArgs:\nnode_name: (`str`) name of the node that the to-be-watched tensor belongs\nto, e.g., \"hidden/Weights\".\noutput_slot: (`int`) output slot index of the tensor to watch.\ndebug_op: (`str`) name of the debug op to enable. This should not include\nany attribute substrings.", "source": "github-repos"}
{"code": "def read_tabular(filepath):\n    \n    _, fn, ext = splitext2(filepath)\n    if ext == '.h5':\n        return _read_tabular_h5(filepath)\n    elif ext == '.pkl':\n        return _read_tabular_pickle(filepath)\n    else:\n        raise NotImplementedError", "docstring": "Read tabular object in HDF5 or pickle format\n\nArgs:\nfilepath (path-like): path to read to; must end in '.h5' or '.pkl'", "source": "juraj-google-style"}
{"code": "def _processing_limit(self, spec):\n    \n    processing_rate = float(spec.mapper.params.get(\"processing_rate\", 0))\n    slice_processing_limit = -1\n    if processing_rate > 0:\n      slice_processing_limit = int(math.ceil(\n          parameters.config._SLICE_DURATION_SEC*processing_rate/\n          int(spec.mapper.shard_count)))\n    return slice_processing_limit", "docstring": "Get the limit on the number of map calls allowed by this slice.\n\nArgs:\nspec: a Mapreduce spec.\n\nReturns:\nThe limit as a positive int if specified by user. -1 otherwise.", "source": "juraj-google-style"}
{"code": "def __init__(self, mfr_desc=None, hw_desc=None, sw_desc=None,\n                 serial_num=None, dp_desc=None):\n        \n        super().__init__()\n        self.mfr_desc = mfr_desc\n        self.hw_desc = hw_desc\n        self.sw_desc = sw_desc\n        self.serial_num = serial_num\n        self.dp_desc = dp_desc", "docstring": "Create a Desc with the optional parameters below.\n\nArgs:\nmfr_desc (str): Manufacturer description\nhw_desc (str): Hardware description\nsw_desc (str): Software description\nserial_num (str): Serial number\ndp_desc (str): Datapath description", "source": "juraj-google-style"}
{"code": "def compatible_firmware_version(self):\n        \n        identifier = self.firmware_version.split('compiled')[0]\n        buf_size = self.MAX_BUF_SIZE\n        buf = (ctypes.c_char * buf_size)()\n        res = self._dll.JLINKARM_GetEmbeddedFWString(identifier.encode(), buf, buf_size)\n        if res < 0:\n            raise errors.JLinkException(res)\n\n        return ctypes.string_at(buf).decode()", "docstring": "Returns the DLL's compatible J-Link firmware version.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nThe firmware version of the J-Link that the DLL is compatible\nwith.\n\nRaises:\nJLinkException: on error.", "source": "juraj-google-style"}
{"code": "def cube():\n    app = Ice()\n\n    @app.get('/')\n    def default_home_page():\n        'Return a default home page.'\n        return simple_html('It works!', '<h1>It works!</h1>\\n<p>This is the default ice web page.</p>')\n\n    @app.error()\n    def generic_error_page():\n        'Return a simple and generic error page.'\n        return simple_html(app.response.status_line, '<h1>{title}</h1>\\n<p>{description}</p>\\n<hr>\\n<address>Ice/{version}</address>'.format(title=app.response.status_line, description=app.response.status_detail, version=__version__))\n\n    def simple_html(title, body):\n        'Return a simple HTML page.'\n        return '<!DOCTYPE html>\\n<html>\\n<head><title>{title}</title></head>\\n<body>\\n{body}\\n</body>\\n</html>\\n'.format(title=title, body=body)\n    return app", "docstring": "Return an Ice application with a default home page.\n\nCreate :class:`Ice` object, add a route to return the default page\nwhen a client requests the server root, i.e. /, using HTTP GET\nmethod, add an error handler to return HTTP error pages when an\nerror occurs and return this object. The returned object can be used\nas a WSGI application.\n\nReturns:\nIce: WSGI application.", "source": "codesearchnet"}
{"code": "def poisson_ll(data, means):\n    \n    if sparse.issparse(data):\n        return sparse_poisson_ll(data, means)\n    genes, cells = data.shape\n    clusters = means.shape[1]\n    ll = np.zeros((cells, clusters))\n    for i in range(clusters):\n        means_i = np.tile(means[:,i], (cells, 1))\n        means_i = means_i.transpose() + eps\n        \n        ll[:,i] = np.sum(xlogy(data, means_i) - means_i, 0)\n    return ll", "docstring": "Calculates the Poisson log-likelihood.\n\nArgs:\ndata (array): 2d numpy array of genes x cells\nmeans (array): 2d numpy array of genes x k\n\nReturns:\ncells x k array of log-likelihood for each cell/cluster pair", "source": "juraj-google-style"}
{"code": "def __init__(self, assign_defaults=(), method_name=None, overwrite=False):\n    \n    if isinstance(assign_defaults, str):\n      self._assign_defaults = [assign_defaults]\n    else:\n      self._assign_defaults = assign_defaults\n    self._method_name = method_name\n    self._overwrite = overwrite\n    _valid_defaults.update(self._assign_defaults)\n    default_args = sorted(_valid_defaults)\n    default_values = [None] * len(_valid_defaults)\n    if six.PY2:\n      default_func = PrettyTensor.with_defaults.__func__\n    else:\n      default_func = PrettyTensor.with_defaults\n    _set_ipython_string(default_func, default_args, default_values,\n                        _original_set_defaults_doc)\n    _set_ipython_string(defaults_scope, default_args, default_values,\n                        _original_defaults_scope_doc)", "docstring": "Assigns arguments to the decorator.\n\nArgs:\nassign_defaults: A sequence of strings for the default values that should\nbe provided.\nmethod_name: If provided, use this as the method_name instead of the\nwrapped function's name.\noverwrite: If False, throw an exception if this method has already been\nregistered.  True should be used in interactive environments or with\ngreat care.", "source": "juraj-google-style"}
{"code": "def put_event(self, evt):\n        \n        evt.step = self.global_step\n        evt.wall_time = time.time()\n        self._dispatch(lambda m: m.process_event(evt))", "docstring": "Put an :class:`tf.Event`.\n`step` and `wall_time` fields of :class:`tf.Event` will be filled automatically.\n\nArgs:\nevt (tf.Event):", "source": "juraj-google-style"}
{"code": "def torch_distributed_zero_first(local_rank: int):\n    if local_rank not in [-1, 0]:\n        dist.barrier()\n    yield\n    if local_rank == 0:\n        dist.barrier()", "docstring": "Decorator to make all processes in distributed training wait for each local_master to do something.\n\nArgs:\nlocal_rank (`int`): The rank of the local process.", "source": "github-repos"}
{"code": "def transfer_project(self, to_project_id, **kwargs):\n        \n        path = '/groups/%s/projects/%s' % (self.id, to_project_id)\n        self.manager.gitlab.http_post(path, **kwargs)", "docstring": "Transfer a project to this group.\n\nArgs:\nto_project_id (int): ID of the project to transfer\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabTransferProjectError: If the project could not be transfered", "source": "juraj-google-style"}
{"code": "def _init_journal(self, permissive=True):\n    nowstamp = datetime.now().strftime('%d-%b-%Y %H:%M:%S.%f')[:(- 3)]\n    self._add_entry(templates.INIT.format(time_stamp=nowstamp))\n    if permissive:\n        self._add_entry(templates.INIT_DEBUG)", "docstring": "Add the initialization lines to the journal.\n\nBy default adds JrnObj variable and timestamp to the journal contents.\n\nArgs:\npermissive (bool): if True most errors in journal will not\ncause Revit to stop journal execution.\nSome still do.", "source": "codesearchnet"}
{"code": "def placeOrder(self, contract: Contract, order: Order) -> Trade:\n        \n        orderId = order.orderId or self.client.getReqId()\n        self.client.placeOrder(orderId, contract, order)\n        now = datetime.datetime.now(datetime.timezone.utc)\n        key = self.wrapper.orderKey(\n            self.wrapper.clientId, orderId, order.permId)\n        trade = self.wrapper.trades.get(key)\n        if trade:\n            \n            assert trade.orderStatus.status not in OrderStatus.DoneStates\n            logEntry = TradeLogEntry(now, trade.orderStatus.status, 'Modify')\n            trade.log.append(logEntry)\n            self._logger.info(f'placeOrder: Modify order {trade}')\n            trade.modifyEvent.emit(trade)\n            self.orderModifyEvent.emit(trade)\n        else:\n            \n            order.clientId = self.wrapper.clientId\n            order.orderId = orderId\n            orderStatus = OrderStatus(status=OrderStatus.PendingSubmit)\n            logEntry = TradeLogEntry(now, orderStatus.status, '')\n            trade = Trade(\n                contract, order, orderStatus, [], [logEntry])\n            self.wrapper.trades[key] = trade\n            self._logger.info(f'placeOrder: New order {trade}')\n            self.newOrderEvent.emit(trade)\n        return trade", "docstring": "Place a new order or modify an existing order.\nReturns a Trade that is kept live updated with\nstatus changes, fills, etc.\n\nArgs:\ncontract: Contract to use for order.\norder: The order to be placed.", "source": "juraj-google-style"}
{"code": "def save_images(images, filenames, output_dir):\n  \n  for i, filename in enumerate(filenames):\n    \n    \n    with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:\n      imsave(f, (images[i, :, :, :] + 1.0) * 0.5, format='png')", "docstring": "Saves images to the output directory.\n\nArgs:\nimages: array with minibatch of images\nfilenames: list of filenames without path\nIf number of file names in this list less than number of images in\nthe minibatch then only first len(filenames) images will be saved.\noutput_dir: directory where to save images", "source": "juraj-google-style"}
{"code": "def _global_batch_size(self):\n    return True", "docstring": "`make_dataset_iterator` and `make_numpy_iterator` use global batch size.\n\n`make_input_fn_iterator` assumes per-replica batching.\n\nReturns:\nBoolean.", "source": "github-repos"}
{"code": "def pow(x, a):\n    return math_ops.pow(x, a)", "docstring": "Element-wise exponentiation.\n\nArgs:\nx: Tensor or variable.\na: Python integer.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def decode_fast(self, token_ids: Union[int, List[int]]) -> str:\n    return self.sp_model.decode(token_ids)", "docstring": "Encodes a text or batch of texts to token ids using preprocessing and the raw SP tokenizer. This has reduced\nfunctionality but is often much faster.\n\nArgs:\ntoken_ids (`int` or `List[int]`): Encoded token or text as token id(s).\n\nReturns:\n`str`: Decoded text", "source": "github-repos"}
{"code": "def path_is_empty(p: tcod.path.AStar) -> bool:\n    \n    return bool(lib.TCOD_path_is_empty(p._path_c))", "docstring": "Return True if a path is empty.\n\nArgs:\np (AStar): An AStar instance.\nReturns:\nbool: True if a path is empty.  Otherwise False.", "source": "juraj-google-style"}
{"code": "def returns_scalar(return_type: Optional[FhirPathDataType]) -> bool:\n    return not return_type or return_type.cardinality == Cardinality.SCALAR", "docstring": "Indicates if the return type evaluates to a scalar.\n\nArgs:\nreturn_type: The data type to describe.\n\nReturns:\nTrue if `return_type` represents an element with cardinality less than or\nequal to one whose parents are all also scalars.\nFalse otherwise. For example, the path Patient.name.use does not return a\nscalar, despite 'use' being a scalar, because it is a child of the\ncollection, 'name.'", "source": "github-repos"}
{"code": "def get_representations_of_kind(kind, start=None, end=None):\n    q = Property.query(ancestor=Property.key_for_kind(kind))\n    if ((start is not None) and (start != '')):\n        q = q.filter((Property.key >= Property.key_for_property(kind, start)))\n    if (end is not None):\n        if (end == ''):\n            return {}\n        q = q.filter((Property.key < Property.key_for_property(kind, end)))\n    result = {}\n    for property in q:\n        result[property.property_name] = property.property_representation\n    return result", "docstring": "Return all representations of properties of kind in the specified range.\n\nNOTE: This function does not return unindexed properties.\n\nArgs:\nkind: name of kind whose properties you want.\nstart: only return properties >= start if start is not None.\nend: only return properties < end if end is not None.\n\nReturns:\nA dictionary mapping property names to its list of representations.", "source": "codesearchnet"}
{"code": "def login(self, username, password, state=None, sync=True):\n    auth = APIAuth(self.OAUTH_SCOPES)\n    ret = auth.login(username, password, get_mac())\n    if ret:\n        self.load(auth, state, sync)\n    return ret", "docstring": "Authenticate to Google with the provided credentials & sync.\n\nArgs:\nemail (str): The account to use.\npassword (str): The account password.\nstate (dict): Serialized state to load.\n\nRaises:\nLoginException: If there was a problem logging in.", "source": "codesearchnet"}
{"code": "def setKstar(self,term_i,Ks):\n        \n        assert Ks.shape[0]==self.N\n    \n        \n            \n            \n\n        self.vd.getTerm(term_i).getKcf().setK0cross(Ks)", "docstring": "Set the kernel for predictions\n\nArgs:\nterm_i:     index of the term we are interested in\nKs:         (TODO: is this the covariance between train and test or the covariance between test points?)", "source": "juraj-google-style"}
{"code": "def to_hising(self):\n    if (self.vartype is Vartype.BINARY):\n        return self.to_spin().to_hising()\n    h = {}\n    J = {}\n    offset = 0\n    for (term, bias) in self.items():\n        if (len(term) == 0):\n            offset += bias\n        elif (len(term) == 1):\n            (v,) = term\n            h[v] = bias\n        else:\n            J[tuple(term)] = bias\n    return (h, J, offset)", "docstring": "Construct a higher-order Ising problem from a binary polynomial.\n\nReturns:\ntuple: A 3-tuple of the form (`h`, `J`, `offset`) where `h` includes\nthe linear biases, `J` has the higher-order biases and `offset` is\nthe linear offset.\n\nExamples:\n>>> poly = dimod.BinaryPolynomial({'a': -1, 'ab': 1, 'abc': -1}, dimod.SPIN)\n>>> h, J, off = poly.to_hising()\n>>> h\n{'a': -1}", "source": "codesearchnet"}
{"code": "def _set_current(self, new_current):\n        \n        new_cur_full_path = self.join(new_current)\n        if not os.path.exists(new_cur_full_path):\n            raise PrefixNotFound(\n                'Prefix \"%s\" does not exist in workdir %s' %\n                (new_current, self.path)\n            )\n\n        if os.path.lexists(self.join('current')):\n            os.unlink(self.join('current'))\n\n        os.symlink(new_current, self.join('current'))\n        self.current = new_current", "docstring": "Change the current default prefix, for internal usage\n\nArgs:\nnew_current(str): Name of the new current prefix, it must already\nexist\n\nReturns:\nNone\n\nRaises:\nPrefixNotFound: if the given prefix name does not exist in the\nworkdir", "source": "juraj-google-style"}
{"code": "def _maybe_set_current_user_vars(method, api_info=None, request=None):\n  \n  if _is_auth_info_available():\n    return\n\n  \n  os.environ[_ENV_AUTH_EMAIL] = ''\n  os.environ[_ENV_AUTH_DOMAIN] = ''\n\n  \n  \n  \n  try:\n    api_info = api_info or method.im_self.api_info\n  except AttributeError:\n    \n    \n    \n    \n    \n    _logger.warning('AttributeError when accessing %s.im_self.  An unbound '\n                    'method was probably passed as an endpoints handler.',\n                    method.__name__)\n    scopes = method.method_info.scopes\n    audiences = method.method_info.audiences\n    allowed_client_ids = method.method_info.allowed_client_ids\n  else:\n    scopes = (method.method_info.scopes\n              if method.method_info.scopes is not None\n              else api_info.scopes)\n    audiences = (method.method_info.audiences\n                 if method.method_info.audiences is not None\n                 else api_info.audiences)\n    allowed_client_ids = (method.method_info.allowed_client_ids\n                          if method.method_info.allowed_client_ids is not None\n                          else api_info.allowed_client_ids)\n\n  if not scopes and not audiences and not allowed_client_ids:\n    \n    \n    \n    return\n\n  token = _get_token(request)\n  if not token:\n    return None\n\n  if allowed_client_ids and _is_local_dev():\n    allowed_client_ids = (constants.API_EXPLORER_CLIENT_ID,) + tuple(allowed_client_ids)\n\n  \n  \n  \n  \n  if ((scopes == [_EMAIL_SCOPE] or scopes == (_EMAIL_SCOPE,)) and\n      allowed_client_ids):\n    _logger.debug('Checking for id_token.')\n    issuers = api_info.issuers\n    if issuers is None:\n      issuers = _DEFAULT_GOOGLE_ISSUER\n    elif 'google_id_token' not in issuers:\n      issuers.update(_DEFAULT_GOOGLE_ISSUER)\n    time_now = long(time.time())\n    user = _get_id_token_user(token, issuers, audiences, allowed_client_ids,\n                              time_now, memcache)\n    if user:\n      os.environ[_ENV_AUTH_EMAIL] = user.email()\n      os.environ[_ENV_AUTH_DOMAIN] = user.auth_domain()\n      return\n\n  \n  if scopes:\n    _logger.debug('Checking for oauth token.')\n    if _is_local_dev():\n      _set_bearer_user_vars_local(token, allowed_client_ids, scopes)\n    else:\n      _set_bearer_user_vars(allowed_client_ids, scopes)", "docstring": "Get user information from the id_token or oauth token in the request.\n\nUsed internally by Endpoints to set up environment variables for user\nauthentication.\n\nArgs:\nmethod: The class method that's handling this request.  This method\nshould be annotated with @endpoints.method.\napi_info: An api_config._ApiInfo instance. Optional. If None, will attempt\nto parse api_info from the implicit instance of the method.\nrequest: The current request, or None.", "source": "juraj-google-style"}
{"code": "def _TransmitBreakpointUpdates(self, service):\n    reconnect = False\n    retry_list = []\n    while self._transmission_queue:\n        (breakpoint, retry_count) = self._transmission_queue.popleft()\n        try:\n            service.debuggees().breakpoints().update(debuggeeId=self._debuggee_id, id=breakpoint['id'], body={'breakpoint': breakpoint}).execute()\n            native.LogInfo(('Breakpoint %s update transmitted successfully' % breakpoint['id']))\n        except apiclient.errors.HttpError as err:\n            status = err.resp.status\n            is_transient = ((status >= 500) or (status == 408))\n            if (is_transient and (retry_count < (self.max_transmit_attempts - 1))):\n                native.LogInfo(('Failed to send breakpoint %s update: %s' % (breakpoint['id'], traceback.format_exc())))\n                retry_list.append((breakpoint, (retry_count + 1)))\n            elif is_transient:\n                native.LogWarning(('Breakpoint %s retry count exceeded maximum' % breakpoint['id']))\n            else:\n                native.LogInfo(('%s, breakpoint: %s' % (err, breakpoint['id'])))\n        except BaseException:\n            native.LogWarning(('Fatal error sending breakpoint %s update: %s' % (breakpoint['id'], traceback.format_exc())))\n            reconnect = True\n    self._transmission_queue.extend(retry_list)\n    if (not self._transmission_queue):\n        self.update_backoff.Succeeded()\n        return (reconnect, None)\n    else:\n        return (reconnect, self.update_backoff.Failed())", "docstring": "Tries to send pending breakpoint updates to the backend.\n\nSends all the pending breakpoint updates. In case of transient failures,\nthe breakpoint is inserted back to the top of the queue. Application\nfailures are not retried (for example updating breakpoint in a final\nstate).\n\nEach pending breakpoint maintains a retry counter. After repeated transient\nfailures the breakpoint is discarded and dropped from the queue.\n\nArgs:\nservice: client to use for API calls\n\nReturns:\n(reconnect, timeout) tuple. The first element (\"reconnect\") is set to\ntrue on unexpected HTTP responses. The caller should discard the HTTP\nconnection and create a new one. The second element (\"timeout\") is\nset to None if all pending breakpoints were sent successfully. Otherwise\nreturns time interval in seconds to stall before retrying.", "source": "codesearchnet"}
{"code": "def _check_registry_type(folder=None):\n    \n\n    folder = _registry_folder(folder)\n\n    default_file = os.path.join(folder, 'registry_type.txt')\n\n    try:\n        with open(default_file, \"r\") as infile:\n            data = infile.read()\n            data = data.strip()\n\n            ComponentRegistry.SetBackingStore(data)\n    except IOError:\n        pass", "docstring": "Check if the user has placed a registry_type.txt file to choose the registry type\n\nIf a default registry type file is found, the DefaultBackingType and DefaultBackingFile\nclass parameters in ComponentRegistry are updated accordingly.\n\nArgs:\nfolder (string): The folder that we should check for a default registry type", "source": "juraj-google-style"}
{"code": "def os_version(self, value):\n        \n        if value == self._defaults['ai.device.osVersion'] and 'ai.device.osVersion' in self._values:\n            del self._values['ai.device.osVersion']\n        else:\n            self._values['ai.device.osVersion'] = value", "docstring": "The os_version property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def ParseRecord(self, parser_mediator, key, structure):\n    \n    if key not in self._SUPPORTED_KEYS:\n      raise errors.ParseError(\n          'Unable to parse record, unknown structure: {0:s}'.format(key))\n\n    date_time = dfdatetime_time_elements.TimeElements()\n\n    try:\n      iso_date_time = self._GetISO8601String(structure.date_time)\n      date_time.CopyFromStringISO8601(iso_date_time)\n    except ValueError:\n      parser_mediator.ProduceExtractionWarning(\n          'invalid date time value: {0!s}'.format(structure.date_time))\n      return\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_RECORDED)\n\n    event_data = ApacheAccessEventData()\n    event_data.ip_address = structure.ip_address\n    event_data.remote_name = structure.remote_name\n    event_data.user_name = structure.user_name\n    event_data.http_request = structure.http_request\n    event_data.http_response_code = structure.response_code\n    event_data.http_response_bytes = structure.response_bytes\n\n    if key == 'combined_log_format':\n      event_data.http_request_referer = structure.referer\n      event_data.http_request_user_agent = structure.user_agent\n\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a matching entry.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nkey (str): name of the parsed structure.\nstructure (pyparsing.ParseResults): elements parsed from the file.\n\nRaises:\nParseError: when the structure type is unknown.", "source": "juraj-google-style"}
{"code": "def get(self, query: Mapping[str, Any], context: PipelineContext = None) -> T:\n        \n        result = self._source.get(self._source_type, deepcopy(query), context)\n        LOGGER.info(\"Got result \\\"{result}\\\" from query \\\"{query}\\\" of source \\\"{source}\\\"\".format(result=result, query=query, source=self._source))\n\n        LOGGER.info(\"Sending result \\\"{result}\\\" to sinks before converting\".format(result=result))\n        for sink in self._before_transform:\n            sink.put(result, context)\n\n        LOGGER.info(\"Converting result \\\"{result}\\\" to request type\".format(result=result))\n        result = self._transform(data=result, context=context)\n\n        LOGGER.info(\"Sending result \\\"{result}\\\" to sinks after converting\".format(result=result))\n        for sink in self._after_transform:\n            sink.put(result, context)\n\n        return result", "docstring": "Gets a query from the data source.\n\n1) Extracts the query from the data source.\n2) Inserts the result into any data sinks.\n3) Transforms the result into the requested type if it wasn't already.\n4) Inserts the transformed result into any data sinks.\n\nArgs:\nquery: The query being requested.\ncontext: The context for the extraction (mutable).\n\nReturns:\nThe requested object.", "source": "juraj-google-style"}
{"code": "def highwater(self, partition):\n    if (not isinstance(partition, TopicPartition)):\n        raise TypeError('partition must be a TopicPartition namedtuple')\n    assert self._subscription.is_assigned(partition), 'Partition is not assigned'\n    return self._subscription.assignment[partition].highwater", "docstring": "Last known highwater offset for a partition.\n\nA highwater offset is the offset that will be assigned to the next\nmessage that is produced. It may be useful for calculating lag, by\ncomparing with the reported position. Note that both position and\nhighwater refer to the *next* offset -- i.e., highwater offset is\none greater than the newest available message.\n\nHighwater offsets are returned in FetchResponse messages, so will\nnot be available if no FetchRequests have been sent for this partition\nyet.\n\nArguments:\npartition (TopicPartition): Partition to check\n\nReturns:\nint or None: Offset if available", "source": "codesearchnet"}
{"code": "def AddEventData(self, event_data):\n    \n    self._RaiseIfNotWritable()\n\n    self._AddAttributeContainer(self._CONTAINER_TYPE_EVENT_DATA, event_data)", "docstring": "Adds event data.\n\nArgs:\nevent_data (EventData): event data.\n\nRaises:\nIOError: when the storage file is closed or read-only.\nOSError: when the storage file is closed or read-only.", "source": "juraj-google-style"}
{"code": "def set_device_name(self, new_name):\n        \n\n        device_name = self.get_characteristic_handle_from_uuid(UUID_DEVICE_NAME)\n        if device_name is None:\n            logger.warn('Failed to find handle for device name')\n            return False\n        \n        if len(new_name) > MAX_DEVICE_NAME_LEN:\n            logger.error('Device name exceeds maximum length ({} > {})'.format(len(new_name), MAX_DEVICE_NAME_LEN))\n            return False\n\n        if self.dongle._write_attribute(self.conn_handle, device_name, new_name.encode('ascii')):\n            self.name = new_name\n            return True\n\n        return False", "docstring": "Sets a new BLE device name for this SK8.\n\nArgs:\nnew_name (str): the new device name as an ASCII string, max 20 characters.\n\nReturns:\nTrue if the name was updated successfully, False otherwise.", "source": "juraj-google-style"}
{"code": "def youtube(keyword=None):\n    \n    if keyword is None:\n        web.open('https:\n    else:\n        web.open(quote('https:", "docstring": "Open youtube.\n\nArgs:\nkeyword (optional): Search word.", "source": "juraj-google-style"}
{"code": "def form_uri(item_id, service, is_track):\n    \n    if is_track:\n        uri = service.sonos_uri_from_id(item_id)\n    else:\n        uri = 'x-rincon-cpcontainer:' + item_id\n    return uri", "docstring": "Form and return a music service item uri\n\nArgs:\nitem_id (str): The item id\nservice (MusicService): The music service that the item originates from\nis_track (bool): Whether the item_id is from a track or not\n\nReturns:\nstr: The music service item uri", "source": "juraj-google-style"}
{"code": "def __init__(self, details):\n\t\t\n\n\t\t\n\t\tif not isinstance(details, list):\n\t\t\traise ValueError('details in ' + self.__class__.__name__ + '.' + sys._getframe().f_code.co_name + ' must be a list')\n\n\t\t\n\t\tself.validation_failures = {}\n\n\t\t\n\t\t\n\t\tself._optional = True\n\n\t\t\n\t\tself._nodes = []\n\n\t\t\n\t\tfor i in range(len(details)):\n\n\t\t\t\n\t\t\tif isinstance(details[i], _BaseNode):\n\t\t\t\tself._nodes.append(details[i])\n\t\t\t\tcontinue\n\n\t\t\t\n\t\t\telif isinstance(details[i], (dict, list)):\n\n\t\t\t\t\n\t\t\t\tself._nodes.append(_child(details[i]))\n\n\t\t\t\n\t\t\telse:\n\t\t\t\traise ValueError('details[' + str(i)  + '] in ' + self.__class__.__name__ + '.' + sys._getframe().f_code.co_name + ' must be a dict')\n\n\t\t\t\n\t\t\t\n\t\t\tif not self._nodes[-1]._optional:\n\t\t\t\tself._optional = False", "docstring": "Constructor\n\nInitialises the instance\n\nArguments:\ndetails {dict} -- Details describing the type of values allowed for\nthe node\n\nRaises:\nValueError\n\nReturns:\nOptionsNode", "source": "juraj-google-style"}
{"code": "def __init__(self, pad_mask):\n    \n    self.nonpad_ids = None\n    self.dim_origin = None\n\n    with tf.name_scope(\"pad_reduce/get_ids\"):\n      pad_mask = tf.reshape(pad_mask, [-1])  \n      \n      \n      \n      \n      self.nonpad_ids = tf.to_int32(tf.where(pad_mask < 1e-9))\n      self.dim_origin = tf.shape(pad_mask)[:1]", "docstring": "Compute and store the location of the padding.\n\nArgs:\npad_mask (tf.Tensor): Reference padding tensor of shape\n[batch_size,length] or [dim_origin] (dim_origin=batch_size*length)\ncontaining non-zeros positive values to indicate padding location.", "source": "juraj-google-style"}
{"code": "def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training: Optional[bool]=False):\n    residual = hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    hidden_states, self_attn_weights, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask)\n    tf.debugging.assert_equal(shape_list(hidden_states), shape_list(residual), message=f'Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}')\n    hidden_states = self.dropout(hidden_states, training=training)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = self.activation_dropout(hidden_states, training=training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = self.dropout(hidden_states, training=training)\n    hidden_states = residual + hidden_states\n    return (hidden_states, self_attn_weights)", "docstring": "Args:\nhidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*\nattention_mask (`tf.Tensor`): attention mask of size\n*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.\nlayer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size\n*(encoder_attention_heads,)*", "source": "github-repos"}
{"code": "def ConvertGlobIntoPathComponents(self, pattern):\n    components = []\n    for path_component in pattern.split('/'):\n        m = rdf_paths.GlobExpression.RECURSION_REGEX.search(path_component)\n        if m:\n            path_component = path_component.replace(m.group(0), '*')\n            component = rdf_paths.PathSpec(path=fnmatch.translate(path_component), pathtype=self.state.pathtype, path_options=rdf_paths.PathSpec.Options.RECURSIVE)\n            if m.group(1):\n                component.recursion_depth = int(m.group(1))\n        elif self.GLOB_MAGIC_CHECK.search(path_component):\n            component = rdf_paths.PathSpec(path=fnmatch.translate(path_component), pathtype=self.state.pathtype, path_options=rdf_paths.PathSpec.Options.REGEX)\n        else:\n            pathtype = self.state.pathtype\n            if ((pathtype == rdf_paths.PathSpec.PathType.TSK) and re.match('^.:$', path_component)):\n                path_component = ('%s\\\\' % path_component)\n            component = rdf_paths.PathSpec(path=path_component, pathtype=pathtype, path_options=rdf_paths.PathSpec.Options.CASE_INSENSITIVE)\n        components.append(component)\n    return components", "docstring": "r\"\"\"Converts a glob pattern into a list of pathspec components.\n\nWildcards are also converted to regular expressions. The pathspec components\ndo not span directories, and are marked as a regex or a literal component.\n\nWe also support recursion into directories using the ** notation.  For\nexample, /home/**2/foo.txt will find all files named foo.txt recursed 2\ndirectories deep. If the directory depth is omitted, it defaults to 3.\n\nExample:\n/home/test/* -> ['home', 'test', '.*\\\\Z(?ms)']\n\nArgs:\npattern: A glob expression with wildcards.\n\nReturns:\nA list of PathSpec instances for each component.\n\nRaises:\nValueError: If the glob is invalid.", "source": "codesearchnet"}
{"code": "def output(self, _filename):\n        \n\n        txt = ''\n        for contract in self.slither.contracts_derived:\n            txt += '\\n{}:\\n'.format(contract.name)\n            table = PrettyTable(['Name', 'ID'])\n            for function in contract.functions:\n                if function.visibility in ['public', 'external']:\n                    table.add_row([function.full_name, hex(get_function_id(function.full_name))])\n            for variable in contract.state_variables:\n                if variable.visibility in ['public']:\n                    variable_getter_args = \"\"\n                    if type(variable.type) is ArrayType:\n                        length = 0\n                        v = variable\n                        while type(v.type) is ArrayType:\n                            length += 1\n                            v = v.type\n                        variable_getter_args = ','.join([\"uint256\"]*length)\n                    elif type(variable.type) is MappingType:\n                        variable_getter_args = variable.type.type_from\n\n                    table.add_row([f\"{variable.name}({variable_getter_args})\", hex(get_function_id(f\"{variable.name}({variable_getter_args})\"))])\n            txt += str(table) + '\\n'\n\n        self.info(txt)", "docstring": "_filename is not used\nArgs:\n_filename(string)", "source": "juraj-google-style"}
{"code": "def _compute_latents(self, g_values: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n    x = torch.repeat_interleave(torch.unsqueeze(g_values, dim=-2), self.watermarking_depth, axis=-2)\n    x = torch.tril(x, diagonal=-1)\n    logits = (self.delta[..., None, :] @ x.type(self.delta.dtype)[..., None]).squeeze() + self.beta\n    p_two_unique_tokens = torch.sigmoid(logits)\n    p_one_unique_token = 1 - p_two_unique_tokens\n    return (p_one_unique_token, p_two_unique_tokens)", "docstring": "Computes the unique token probability distribution given g-values.\n\nArgs:\ng_values (`torch.Tensor` of shape `(batch_size, seq_len, watermarking_depth)`):\nPRF values.\n\nReturns:\np_one_unique_token and p_two_unique_tokens, both of shape\n[batch_size, seq_len, watermarking_depth]. p_one_unique_token[i,t,l]\ngives the probability of there being one unique token in a tournament\nmatch on layer l, on timestep t, for batch item i.\np_one_unique_token[i,t,l] + p_two_unique_token[i,t,l] = 1.", "source": "github-repos"}
{"code": "class PipelineDataFormat:\n    SUPPORTED_FORMATS = ['json', 'csv', 'pipe']\n\n    def __init__(self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite: bool=False):\n        self.output_path = output_path\n        self.input_path = input_path\n        self.column = column.split(',') if column is not None else ['']\n        self.is_multi_columns = len(self.column) > 1\n        if self.is_multi_columns:\n            self.column = [tuple(c.split('=')) if '=' in c else (c, c) for c in self.column]\n        if output_path is not None and (not overwrite):\n            if exists(abspath(self.output_path)):\n                raise OSError(f'{self.output_path} already exists on disk')\n        if input_path is not None:\n            if not exists(abspath(self.input_path)):\n                raise OSError(f\"{self.input_path} doesn't exist on disk\")\n\n    @abstractmethod\n    def __iter__(self):\n        raise NotImplementedError()\n\n    @abstractmethod\n    def save(self, data: Union[dict, List[dict]]):\n        \n        raise NotImplementedError()\n\n    def save_binary(self, data: Union[dict, List[dict]]) -> str:\n        \n        path, _ = os.path.splitext(self.output_path)\n        binary_path = os.path.extsep.join((path, 'pickle'))\n        with open(binary_path, 'wb+') as f_output:\n            pickle.dump(data, f_output)\n        return binary_path\n\n    @staticmethod\n    def from_str(format: str, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False) -> 'PipelineDataFormat':\n        \n        if format == 'json':\n            return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)\n        elif format == 'csv':\n            return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)\n        elif format == 'pipe':\n            return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)\n        else:\n            raise KeyError(f'Unknown reader {format} (Available reader are json/csv/pipe)')", "docstring": "Base class for all the pipeline supported data format both for reading and writing. Supported data formats\ncurrently includes:\n\n- JSON\n- CSV\n- stdin/stdout (pipe)\n\n`PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets columns to\npipelines keyword arguments through the `dataset_kwarg_1=dataset_column_1` format.\n\nArgs:\noutput_path (`str`): Where to save the outgoing data.\ninput_path (`str`): Where to look for the input data.\ncolumn (`str`): The column to read.\noverwrite (`bool`, *optional*, defaults to `False`):\nWhether or not to overwrite the `output_path`.", "source": "github-repos"}
{"code": "def get_remaining_width(sample_string, max_terminal_width=None):\n    if (max_terminal_width is not None):\n        available_width = min(terminal_width(), max_terminal_width)\n    else:\n        available_width = terminal_width()\n    return (available_width - len(sample_string))", "docstring": "Returns the number of characters available if sample string were to be printed in the terminal.\n\nPositional arguments:\nsample_string -- gets the length of this string.\n\nKeyword arguments:\nmax_terminal_width -- limit the overall width of everything to these many characters.\n\nReturns:\nInteger.", "source": "codesearchnet"}
{"code": "def __init__(self, property_type=TableFeaturePropType.OFPTFPT_INSTRUCTIONS,\n                 instruction_ids=None):\n        \n        super().__init__(property_type=property_type)\n        self.instruction_ids = instruction_ids if instruction_ids else []\n        self.update_length()", "docstring": "Create a InstructionsProperty with the optional parameters below.\n\nArgs:\ntype(|TableFeaturePropType_v0x04|):\nProperty Type value of this instance.\nnext_table_ids(|ListOfInstruction_v0x04|):\nList of InstructionGotoTable instances.", "source": "juraj-google-style"}
{"code": "def files_comments_delete(self, *, file: str, id: str, **kwargs) -> SlackResponse:\n        \n        kwargs.update({\"file\": file, \"id\": id})\n        return self.api_call(\"files.comments.delete\", json=kwargs)", "docstring": "Deletes an existing comment on a file.\n\nArgs:\nfile (str): The file id. e.g. 'F1234467890'\nid (str): The file comment id. e.g. 'Fc1234567890'", "source": "juraj-google-style"}
{"code": "def start(backdate=None):\n    if f.s.cum:\n        raise StartError(\"Already have stamps, can't start again (must reset).\")\n    if (f.t.subdvsn_awaiting or f.t.par_subdvsn_awaiting):\n        raise StartError(\"Already have subdivisions, can't start again (must reset).\")\n    if f.t.stopped:\n        raise StoppedError('Timer already stopped (must open new or reset).')\n    t = timer()\n    if (backdate is None):\n        t_start = t\n    else:\n        if (f.t is f.root):\n            raise BackdateError('Cannot backdate start of root timer.')\n        if (not isinstance(backdate, float)):\n            raise TypeError('Backdate must be type float.')\n        if (backdate > t):\n            raise BackdateError('Cannot backdate to future time.')\n        if (backdate < f.tm1.last_t):\n            raise BackdateError('Cannot backdate start to time previous to latest stamp in parent timer.')\n        t_start = backdate\n    f.t.paused = False\n    f.t.tmp_total = 0.0\n    f.t.start_t = t_start\n    f.t.last_t = t_start\n    return t", "docstring": "Mark the start of timing, overwriting the automatic start data written on\nimport, or the automatic start at the beginning of a subdivision.\n\nNotes:\nBackdating: For subdivisions only.  Backdate time must be in the past\nbut more recent than the latest stamp in the parent timer.\n\nArgs:\nbackdate (float, optional): time to use for start instead of current.\n\nReturns:\nfloat: The current time.\n\nRaises:\nBackdateError: If given backdate time is out of range or used in root timer.\nStartError: If the timer is not in a pristine state (if any stamps or\nsubdivisions, must reset instead).\nStoppedError: If the timer is already stopped (must reset instead).\nTypeError: If given backdate value is not type float.", "source": "codesearchnet"}
{"code": "def to_dict(self):\n    dictionary = dict()\n    for (local_name, attribute) in self._attributes.items():\n        remote_name = attribute.remote_name\n        if hasattr(self, local_name):\n            value = getattr(self, local_name)\n            if isinstance(value, NURESTObject):\n                value = value.to_dict()\n            if (isinstance(value, list) and (len(value) > 0) and isinstance(value[0], NURESTObject)):\n                tmp = list()\n                for obj in value:\n                    tmp.append(obj.to_dict())\n                value = tmp\n            dictionary[remote_name] = value\n        else:\n            pass\n    return dictionary", "docstring": "Converts the current object into a Dictionary using all exposed ReST attributes.\n\nReturns:\ndict: the dictionary containing all the exposed ReST attributes and their values.\n\nExample::\n>>> print entity.to_dict()\n{\"name\": \"my entity\", \"description\": \"Hello World\", \"ID\": \"xxxx-xxx-xxxx-xxx\", ...}", "source": "codesearchnet"}
{"code": "def get_template_name(env, pipeline_type):\n    \n    pipeline_base = 'pipeline/pipeline'\n    template_name_format = '{pipeline_base}'\n    if env.startswith('prod'):\n        template_name_format = template_name_format + '_{env}'\n    else:\n        template_name_format = template_name_format + '_stages'\n\n    if pipeline_type != 'ec2':\n        template_name_format = template_name_format + '_{pipeline_type}'\n\n    template_name_format = template_name_format + '.json.j2'\n    template_name = template_name_format.format(pipeline_base=pipeline_base, env=env, pipeline_type=pipeline_type)\n\n    return template_name", "docstring": "Generates the correct template name based on pipeline type\n\nArgs:\nenv (str): environment to generate templates for\npipeline_type (str): Type of pipeline like ec2 or lambda\n\nReturns:\nstr: Name of template", "source": "juraj-google-style"}
{"code": "def absolute(x):\n    if any_symbolic_tensors((x,)):\n        return Absolute().symbolic_call(x)\n    return backend.numpy.absolute(x)", "docstring": "Compute the absolute value element-wise.\n\n`keras.ops.abs` is a shorthand for this function.\n\nArgs:\nx: Input tensor.\n\nReturns:\nAn array containing the absolute value of each element in `x`.\n\nExample:\n\n>>> x = keras.ops.convert_to_tensor([-1.2, 1.2])\n>>> keras.ops.absolute(x)\narray([1.2, 1.2], dtype=float32)", "source": "github-repos"}
{"code": "def export_dae(filename, cutout, level=0):\n    \n    if \".dae\" not in filename:\n        filename = filename + \".dae\"\n\n    vs, fs = mcubes.marching_cubes(cutout, level)\n    mcubes.export_mesh(vs, fs, filename, \"ndioexport\")", "docstring": "Converts a dense annotation to a DAE, using Marching Cubes (PyMCubes).\n\nArguments:\nfilename (str): The filename to write out to\ncutout (numpy.ndarray): The dense annotation\nlevel (int): The level at which to run mcubes\n\nReturns:\nboolean success", "source": "juraj-google-style"}
{"code": "def from_vision_qformer_text_configs(cls, vision_config: InstructBlipVisionConfig, qformer_config: InstructBlipQFormerConfig, text_config: PretrainedConfig, **kwargs):\n    return cls(vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`InstructBlipConfig`] (or a derived class) from a InstructBLIP vision model, Q-Former and\nlanguage model configurations.\n\nReturns:\n[`InstructBlipConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def transpose(x, axes=None):\n    if any_symbolic_tensors((x,)):\n        return Transpose(axes=axes).symbolic_call(x)\n    return backend.numpy.transpose(x, axes=axes)", "docstring": "Returns a tensor with `axes` transposed.\n\nArgs:\nx: Input tensor.\naxes: Sequence of integers. Permutation of the dimensions of `x`.\nBy default, the order of the axes are reversed.\n\nReturns:\n`x` with its axes permuted.", "source": "github-repos"}
{"code": "async def train(state, tf_records):\n  \n\n  model_path = os.path.join(fsdb.models_dir(), state.train_model_name)\n  await run(\n      'python3', 'train.py', *tf_records,\n      '--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'train.flags')),\n      '--work_dir={}'.format(fsdb.working_dir()),\n      '--export_path={}'.format(model_path),\n      '--training_seed={}'.format(state.seed),\n      '--freeze=true')\n  \n  \n  elapsed = time.time() - state.start_time\n  timestamps_path = os.path.join(fsdb.models_dir(), 'train_times.txt')\n  with gfile.Open(timestamps_path, 'a') as f:\n    print('{:.3f} {}'.format(elapsed, state.train_model_name), file=f)", "docstring": "Run training and write a new model to the fsdb models_dir.\n\nArgs:\nstate: the RL loop State instance.\ntf_records: a list of paths to TensorFlow records to train on.", "source": "juraj-google-style"}
{"code": "def __init__(self, num_steps=None, last_step=None, steps_per_run=1):\n    if num_steps is None and last_step is None:\n        raise ValueError('One of num_steps or last_step must be specified.')\n    if num_steps is not None and last_step is not None:\n        raise ValueError('Only one of num_steps or last_step can be specified.')\n    if steps_per_run is None or steps_per_run < 1:\n        raise ValueError('steps_per_run should be greater than 0')\n    self._num_steps = num_steps\n    self._last_step = last_step\n    self._steps_per_run_initial_value = steps_per_run", "docstring": "Initializes a `MultiStepStopAtStepHook`.\n\nThis hook requests stop after either a number of steps have been\nexecuted or a last step has been reached. Only one of the two options can be\nspecified.\n\nif `num_steps` is specified, it indicates the number of steps to execute\nafter `begin()` is called. If instead `last_step` is specified, it\nindicates the last step we want to execute, as passed to the `after_run()`\ncall.\n\nIn Estimator, the user provided computation, the model_fn, is wrapped\ninside a tf.while_loop for peak performance. The steps_per_run variable\ndetermines the number of iterations of the loop before returning to the CPU.\n\nArgs:\nnum_steps: Number of steps to execute.\nlast_step: Step after which to stop.\nsteps_per_run: Number of steps executed per run call.\n\nRaises:\nValueError: If one of the arguments is invalid.", "source": "github-repos"}
{"code": "def _audience_condition_deserializer(obj_dict):\n    return [obj_dict.get('name'), obj_dict.get('value'), obj_dict.get('type'), obj_dict.get('match')]", "docstring": "Deserializer defining how dict objects need to be decoded for audience conditions.\n\nArgs:\nobj_dict: Dict representing one audience condition.\n\nReturns:\nList consisting of condition key with corresponding value, type and match.", "source": "codesearchnet"}
{"code": "def has_sample(self, md5):\n        \n\n        \n        \n        sample = self.get_sample(md5)\n        return True if sample else False", "docstring": "Checks if data store has this sample.\n\nArgs:\nmd5: The md5 digest of the required sample.\n\nReturns:\nTrue if sample with this md5 is present, else False.", "source": "juraj-google-style"}
{"code": "def load_ems(self, modules_paths: List[str]):\n    all_em_lst = []\n    if modules_paths:\n        for modules_path in modules_paths:\n            em_lst = []\n            try:\n                for file_name in os.listdir(modules_path):\n                    if (file_name.startswith('em_') and file_name.endswith('.py')):\n                        sys.path.append(modules_path)\n                        this_module = importlib.import_module(file_name[:(- 3)])\n                        for em in self.classes_in_module(this_module):\n                            em_lst.append(em(self))\n            except:\n                self.log(('Error when loading etk modules from ' + modules_path), 'error')\n                raise NotGetETKModuleError('Wrong file path for ETK modules')\n            all_em_lst += em_lst\n    try:\n        all_em_lst = self.topological_sort(all_em_lst)\n    except Exception:\n        self.log('Topological sort for ETK modules fails', 'error')\n        raise NotGetETKModuleError('Topological sort for ETK modules fails')\n    return all_em_lst", "docstring": "Load all extraction modules from the path\n\nArgs:\nmodules_path: str\n\nReturns:", "source": "codesearchnet"}
{"code": "def add_streamer(self, streamer):\n    if ((self._max_streamers is not None) and (len(self.streamers) >= self._max_streamers)):\n        raise ResourceUsageError('Maximum number of streamers exceeded', max_streamers=self._max_streamers)\n    streamer.link_to_storage(self.sensor_log)\n    streamer.index = len(self.streamers)\n    self.streamers.append(streamer)", "docstring": "Add a streamer to this sensor graph.\n\nArgs:\nstreamer (DataStreamer): The streamer we want to add", "source": "codesearchnet"}
{"code": "def _check_pattern_list(patterns, key, default=None):\n    if (not patterns):\n        return default\n    if isinstance(patterns, basestring):\n        return [patterns]\n    if isinstance(patterns, list):\n        if all((isinstance(p, basestring) for p in patterns)):\n            return patterns\n    raise ValueError(\"Invalid file patterns in key '{}': must be a string or list of strings\".format(key))", "docstring": "Validates file search patterns from user configuration.\n\nAcceptable input is a string (which will be converted to a singleton list),\na list of strings, or anything falsy (such as None or an empty dictionary).\nEmpty or unset input will be converted to a default.\n\nArgs:\npatterns: input from user configuration (YAML).\nkey (str): name of the configuration key the input came from,\nused for error display purposes.\n\nKeyword Args:\ndefault: value to return in case the input is empty or unset.\n\nReturns:\nlist[str]: validated list of patterns\n\nRaises:\nValueError: if the input is unacceptable.", "source": "codesearchnet"}
{"code": "def get_project_name(project_id, projects):\n    for project in projects:\n        if (project_id == project.id):\n            return project.name", "docstring": "Retrieves project name for given project id\n\nArgs:\nprojects: List of projects\nproject_id: project id\n\nReturns: Project name or None if there is no match", "source": "codesearchnet"}
{"code": "def flash(self, partition, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):\n    \n    return self._simple_command('flash', arg=partition, info_cb=info_cb,\n                                timeout_ms=timeout_ms)", "docstring": "Flashes the last downloaded file to the given partition.\n\nArgs:\npartition: Partition to flash.\ntimeout_ms: Optional timeout in milliseconds to wait for it to finish.\ninfo_cb: See Download. Usually no messages.\n\nReturns:\nResponse to a download request, normally nothing.", "source": "juraj-google-style"}
{"code": "def sync_to_numpy_or_python_type(tensors):\n    if isinstance(tensors, coordinator_lib.RemoteValue):\n        return tensors.fetch()\n\n    def _to_single_numpy_or_python_type(t):\n        if isinstance(t, tensor_lib.Tensor):\n            x = t.numpy()\n            return x.item() if np.ndim(x) == 0 else x\n        return t\n    return nest.map_structure(_to_single_numpy_or_python_type, tensors)", "docstring": "Syncs and converts a structure of `Tensor`s to `NumPy` arrays or Python scalar types.\n\nFor each tensor, it calls `tensor.numpy()`. If the result is a scalar value,\nit converts it to a Python type, such as a float or int, by calling\n`result.item()`.\n\nNumpy scalars are converted, as Python types are often more convenient to deal\nwith. This is especially useful for bfloat16 Numpy scalars, which don't\nsupport as many operations as other Numpy values.\n\nAsync strategies (such as `TPUStrategy` and `ParameterServerStrategy`) are\nforced to\nsync during this process.\n\nArgs:\ntensors: A structure of tensors.\n\nReturns:\n`tensors`, but scalar tensors are converted to Python types and non-scalar\ntensors are converted to Numpy arrays.", "source": "github-repos"}
{"code": "def get_file_download(self, resources):\n        \n        api_name = 'virustotal-file-download'\n        api_endpoint = 'file/download'\n        return self._extract_all_responses(resources, api_endpoint, api_name)", "docstring": "Retrieves a file from its a md5, sha1, and/or sha2 hash.\n\nArgs:\nresources: list of string hashes.\nReturns:\na file download", "source": "juraj-google-style"}
{"code": "def execute_command(self, command):\n        \n\n        self.runner.info_log(\"Executing command: %s\" % command)\n\n        process = Popen(\n                command,\n                stdout=open(os.devnull, 'w'),\n                stderr=open('runner.log', 'a'),\n        )\n\n        return process", "docstring": "Execute a command\n\nArgs:\ncommand (str)\n\nReturns:\nprocess (object)", "source": "juraj-google-style"}
{"code": "def get_relative_imports(module_file: Union[str, os.PathLike]) -> list[str]:\n    with open(module_file, encoding='utf-8') as f:\n        content = f.read()\n    relative_imports = re.findall('^\\\\s*import\\\\s+\\\\.(\\\\S+)\\\\s*$', content, flags=re.MULTILINE)\n    relative_imports += re.findall('^\\\\s*from\\\\s+\\\\.(\\\\S+)\\\\s+import', content, flags=re.MULTILINE)\n    return list(set(relative_imports))", "docstring": "Get the list of modules that are relatively imported in a module file.\n\nArgs:\nmodule_file (`str` or `os.PathLike`): The module file to inspect.\n\nReturns:\n`list[str]`: The list of relative imports in the module.", "source": "github-repos"}
{"code": "def emit(self, record):\n    try:\n        message = self.format(record)\n        log_record = LogRecord(record.levelno, record.name, os.path.basename(record.pathname), record.lineno, int((record.created * 1000)), message)\n        self._test_record.add_log_record(log_record)\n        self._notify_update()\n    except Exception:\n        self.handleError(record)", "docstring": "Save a logging.LogRecord to our test record.\n\nLogs carry useful metadata such as the logger name and level information.\nWe capture this in a structured format in the test record to enable\nfiltering by client applications.\n\nArgs:\nrecord: A logging.LogRecord to record.", "source": "codesearchnet"}
{"code": "def run(self, copy_to_current_on_exit=False, site_property=None):\n        \n        scratch = tempfile.gettempdir()\n        with ScratchDir(scratch, copy_to_current_on_exit=copy_to_current_on_exit) as scratch_dir:\n            self._write_input(input_dir=scratch_dir)\n            packmol_input = open(os.path.join(scratch_dir, self.input_file), 'r')\n            p = Popen(self.packmol_bin, stdin=packmol_input, stdout=PIPE, stderr=PIPE)\n            (stdout, stderr) = p.communicate()\n            output_file = os.path.join(scratch_dir, self.control_params[\"output\"])\n            if os.path.isfile(output_file):\n                packed_mol = BabelMolAdaptor.from_file(output_file,\n                                                       self.control_params[\"filetype\"])\n                packed_mol = packed_mol.pymatgen_mol\n                print(\"packed molecule written to {}\".format(\n                    self.control_params[\"output\"]))\n                if site_property:\n                    packed_mol = self.restore_site_properties(site_property=site_property, filename=output_file)\n                return packed_mol\n            else:\n                print(\"Packmol execution failed\")\n                print(stdout, stderr)\n                return None", "docstring": "Write the input file to the scratch directory, run packmol and return\nthe packed molecule.\n\nArgs:\ncopy_to_current_on_exit (bool): Whether or not to copy the packmol\ninput/output files from the scratch directory to the current\ndirectory.\nsite_property (str): if set then the specified site property\nfor the the final packed molecule will be restored.\n\nReturns:\nMolecule object", "source": "juraj-google-style"}
{"code": "def check_with_golden(filename):\n    path_to_file = PATH_TO_DIR + '/data/' + filename\n    if os.path.isfile(path_to_file) and os.path.isfile(CUDA_CC_GOLDEN_DIR):\n        with open(path_to_file, 'r') as f_new:\n            with open(CUDA_CC_GOLDEN_DIR, 'r') as f_golden:\n                diff = difflib.unified_diff(f_new.readlines(), f_golden.readlines(), fromfile=path_to_file, tofile=CUDA_CC_GOLDEN_DIR)\n                diff_list = []\n                for line in diff:\n                    diff_list.append(line)\n                if diff_list:\n                    print('WARNING: difference(s) found between new csv and golden csv.')\n                    print(diff_list)\n                else:\n                    print('No difference found between new csv and golen csv.')", "docstring": "Checks the newly created CUDA compute capability file with the golden.\n\nIf differences are found, then it prints a list of all mismatches as\na `WARNING`.\n\nGolden file must reside in `golden/` directory.\n\nArgs:\nfilename: String that is the name of the newly created file.", "source": "github-repos"}
{"code": "def ParseActivityLogUncompressedRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    event_data = ChromeExtensionActivityEventData()\n    event_data.action_type = self._GetRowValue(query_hash, row, 'action_type')\n    event_data.activity_id = self._GetRowValue(query_hash, row, 'activity_id')\n    event_data.api_name = self._GetRowValue(query_hash, row, 'api_name')\n    event_data.arg_url = self._GetRowValue(query_hash, row, 'arg_url')\n    event_data.args = self._GetRowValue(query_hash, row, 'args')\n    event_data.extension_id = self._GetRowValue(query_hash, row, 'extension_id')\n    event_data.other = self._GetRowValue(query_hash, row, 'other')\n    event_data.page_title = self._GetRowValue(query_hash, row, 'page_title')\n    event_data.page_url = self._GetRowValue(query_hash, row, 'page_url')\n    event_data.query = query\n    timestamp = self._GetRowValue(query_hash, row, 'time')\n    date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_UNKNOWN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an activity log row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "codesearchnet"}
{"code": "def GetMatchingTransitions(self, transitions):\n    return [t for t in transitions if self._MatchWithTransition(t)]", "docstring": "Return a list of state.Transition's compatible to this state.\n\nA transition is compatible to this state when the transition's pre_states\nis comparible with this state, i.e. the transition can be executed from\nthis state.\n\nArgs:\ntransitions: List of state.Transitions among which it finds ones\ncompatible to this state.\nReturns:\nList of state.Transition's compatible to this state.", "source": "github-repos"}
{"code": "def check_server_proc_running(self):", "docstring": "Checks whether the server is still running.\n\nIf the server is not running, it throws an error. As this function is called\neach time the client tries to send an RPC, this should be a quick check\nwithout affecting performance. Otherwise it is fine to not check anything.\n\nRaises:\nerrors.ServerDiedError: if the server died.", "source": "github-repos"}
{"code": "def parse_individuals(samples):\n    \n    individuals = []\n    if len(samples) == 0:\n        raise PedigreeError(\"No samples could be found\")\n\n    ind_ids = set()\n    for sample_info in samples:\n        parsed_ind = parse_individual(sample_info)\n        individuals.append(parsed_ind)\n        ind_ids.add(parsed_ind['individual_id'])\n\n    \n    for parsed_ind in individuals:\n        father = parsed_ind['father']\n        if (father and father != '0'):\n            if father not in ind_ids:\n                raise PedigreeError('father %s does not exist in family' % father)\n        mother = parsed_ind['mother']\n        if (mother and mother != '0'):\n            if mother not in ind_ids:\n                raise PedigreeError('mother %s does not exist in family' % mother)\n\n    return individuals", "docstring": "Parse the individual information\n\nReformat sample information to proper individuals\n\nArgs:\nsamples(list(dict))\n\nReturns:\nindividuals(list(dict))", "source": "juraj-google-style"}
{"code": "def combine(path1, path2):\n    if (not path1):\n        return path2.lstrip()\n    return '{}/{}'.format(path1.rstrip('/'), path2.lstrip('/'))", "docstring": "Join two paths together.\n\nThis is faster than :func:`~fs.path.join`, but only works when the\nsecond path is relative, and there are no back references in either\npath.\n\nArguments:\npath1 (str): A PyFilesytem path.\npath2 (str): A PyFilesytem path.\n\nReturns:\nstr: The joint path.\n\nExample:\n>>> combine(\"foo/bar\", \"baz\")\n'foo/bar/baz'", "source": "codesearchnet"}
{"code": "def notify_program_learners(cls, enterprise_customer, program_details, users):\n    program_name = program_details.get('title')\n    program_branding = program_details.get('type')\n    program_uuid = program_details.get('uuid')\n    lms_root_url = get_configuration_value_for_site(enterprise_customer.site, 'LMS_ROOT_URL', settings.LMS_ROOT_URL)\n    program_path = urlquote('/dashboard/programs/{program_uuid}/?tpa_hint={tpa_hint}'.format(program_uuid=program_uuid, tpa_hint=enterprise_customer.identity_provider))\n    destination_url = '{site}/{login_or_register}?next={program_path}'.format(site=lms_root_url, login_or_register='{login_or_register}', program_path=program_path)\n    program_type = 'program'\n    program_start = get_earliest_start_date_from_program(program_details)\n    with mail.get_connection() as email_conn:\n        for user in users:\n            login_or_register = ('register' if isinstance(user, PendingEnterpriseCustomerUser) else 'login')\n            destination_url = destination_url.format(login_or_register=login_or_register)\n            send_email_notification_message(user=user, enrolled_in={'name': program_name, 'url': destination_url, 'type': program_type, 'start': program_start, 'branding': program_branding}, enterprise_customer=enterprise_customer, email_connection=email_conn)", "docstring": "Notify learners about a program in which they've been enrolled.\n\nArgs:\nenterprise_customer: The EnterpriseCustomer being linked to\nprogram_details: Details about the specific program the learners were enrolled in\nusers: An iterable of the users or pending users who were enrolled", "source": "codesearchnet"}
{"code": "def format_import(self, source_module_name, source_name, dest_name):\n    if self._lazy_loading:\n        return \"  '%s': ('%s', '%s'),\" % (dest_name, source_module_name, source_name)\n    elif source_module_name:\n        if source_name == dest_name:\n            return 'from %s import %s' % (source_module_name, source_name)\n        else:\n            return 'from %s import %s as %s' % (source_module_name, source_name, dest_name)\n    elif source_name == dest_name:\n        return 'import %s' % source_name\n    else:\n        return 'import %s as %s' % (source_name, dest_name)", "docstring": "Formats import statement.\n\nArgs:\nsource_module_name: (string) Source module to import from.\nsource_name: (string) Source symbol name to import.\ndest_name: (string) Destination alias name.\n\nReturns:\nAn import statement string.", "source": "github-repos"}
{"code": "def find_customer(cls, session, mailbox, customer):\n        \n        return cls(\n            '/mailboxes/%d/customers/%s/conversations.json' % (\n                mailbox.id, customer.id,\n            ),\n            session=session,\n        )", "docstring": "Return conversations for a specific customer in a mailbox.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nmailbox (helpscout.models.Mailbox): Mailbox to search.\ncustomer (helpscout.models.Customer): Customer to search for.\n\nReturns:\nRequestPaginator(output_type=helpscout.models.Conversation):\nConversations iterator.", "source": "juraj-google-style"}
{"code": "def _GetVisitSource(self, visit_identifier, cache, database):\n    sync_cache_results = cache.GetResults('sync')\n    if (not sync_cache_results):\n        result_set = database.Query(self._SYNC_CACHE_QUERY)\n        cache.CacheQueryResults(result_set, 'sync', 'id', ('source',))\n        sync_cache_results = cache.GetResults('sync')\n    if (sync_cache_results and visit_identifier):\n        results = sync_cache_results.get(visit_identifier, None)\n        if results:\n            return results[0]\n    return None", "docstring": "Retrieves a visit source type based on the identifier.\n\nArgs:\nvisit_identifier (str): identifier from the visits table for the\nparticular record.\ncache (SQLiteCache): cache which contains cached results from querying\nthe visit_source table.\ndatabase (SQLiteDatabase): database.\n\nReturns:\nint: visit source type or None if no visit source type was found for\nthe identifier.", "source": "codesearchnet"}
{"code": "def plot(self, event_names, x_axis='step'):\n    \n\n    if isinstance(event_names, six.string_types):\n      event_names = [event_names]\n\n    events_list = self.get_events(event_names)\n    for event_name, dir_event_dict in zip(event_names, events_list):\n      for dir, df in six.iteritems(dir_event_dict):\n        label = event_name + ':' + dir\n        x_column = df['step'] if x_axis == 'step' else df['time']\n        plt.plot(x_column, df['value'], label=label)\n    plt.legend(loc='best')\n    plt.show()", "docstring": "Plots a list of events. Each event (a dir+event_name) is represetented as a line\nin the graph.\nArgs:\nevent_names: A list of events to plot. Each event_name may correspond to multiple events,\neach in a different directory.\nx_axis: whether to use step or time as x axis.", "source": "juraj-google-style"}
{"code": "def update_environmental_configuration(self, configuration, timeout=-1):\n        \n        uri = '{}/environmentalConfiguration'.format(self.data['uri'])\n        return self._helper.do_put(uri, configuration, timeout, None)", "docstring": "Sets the calibrated max power of an unmanaged or unsupported enclosure.\n\nArgs:\nconfiguration: Configuration\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\nSettings that describe the environmental configuration.", "source": "juraj-google-style"}
{"code": "def is_end_node(node):\n    return (isinstance(node, ast.Expr) and isinstance(node.value, ast.Name) and (node.value.id == 'end'))", "docstring": "Checks if a node is the \"end\" keyword.\n\nArgs:\nnode: AST node.\n\nReturns:\nTrue if the node is the \"end\" keyword, otherwise False.", "source": "codesearchnet"}
{"code": "def RegisterOutput(cls, output_class, disabled=False):\n    \n    output_name = output_class.NAME.lower()\n\n    if disabled:\n      class_dict = cls._disabled_output_classes\n    else:\n      class_dict = cls._output_classes\n\n    if output_name in class_dict:\n      raise KeyError((\n          'Output class already set for name: {0:s}.').format(\n              output_class.NAME))\n\n    class_dict[output_name] = output_class", "docstring": "Registers an output class.\n\nThe output classes are identified based on their NAME attribute.\n\nArgs:\noutput_class (type): output module class.\ndisabled (Optional[bool]): True if the output module is disabled due to\nthe module not loading correctly or not.\n\nRaises:\nKeyError: if output class is already set for the corresponding name.", "source": "juraj-google-style"}
{"code": "def __savorize(self, node: yaml.Node, expected_type: Type) -> yaml.Node:\n        \n        logger.debug('Savorizing node assuming type {}'.format(\n            expected_type.__name__))\n\n        for base_class in expected_type.__bases__:\n            if base_class in self._registered_classes.values():\n                node = self.__savorize(node, base_class)\n\n        if hasattr(expected_type, 'yatiml_savorize'):\n            logger.debug('Calling {}.yatiml_savorize()'.format(\n                expected_type.__name__))\n            cnode = Node(node)\n            expected_type.yatiml_savorize(cnode)\n            node = cnode.yaml_node\n        return node", "docstring": "Removes syntactic sugar from the node.\n\nThis calls yatiml_savorize(), first on the class's base \\\nclasses, then on the class itself.\n\nArgs:\nnode: The node to modify.\nexpected_type: The type to assume this type is.", "source": "juraj-google-style"}
{"code": "def _ListFileEntry(\n      self, file_system, file_entry, parent_full_path, output_writer):\n    \n    \n    \n    \n    full_path = file_system.JoinPath([parent_full_path, file_entry.name])\n    if not self._list_only_files or file_entry.IsFile():\n      output_writer.WriteFileEntry(full_path)\n\n    for sub_file_entry in file_entry.sub_file_entries:\n      self._ListFileEntry(file_system, sub_file_entry, full_path, output_writer)", "docstring": "Lists a file entry.\n\nArgs:\nfile_system (dfvfs.FileSystem): file system that contains the file entry.\nfile_entry (dfvfs.FileEntry): file entry to list.\nparent_full_path (str): full path of the parent file entry.\noutput_writer (StdoutWriter): output writer.", "source": "juraj-google-style"}
{"code": "def lattice_points_in_supercell(supercell_matrix):\n    \n    diagonals = np.array(\n        [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1],\n         [1, 1, 0], [1, 1, 1]])\n    d_points = np.dot(diagonals, supercell_matrix)\n\n    mins = np.min(d_points, axis=0)\n    maxes = np.max(d_points, axis=0) + 1\n\n    ar = np.arange(mins[0], maxes[0])[:, None] * \\\n         np.array([1, 0, 0])[None, :]\n    br = np.arange(mins[1], maxes[1])[:, None] * \\\n         np.array([0, 1, 0])[None, :]\n    cr = np.arange(mins[2], maxes[2])[:, None] * \\\n         np.array([0, 0, 1])[None, :]\n\n    all_points = ar[:, None, None] + br[None, :, None] + cr[None, None, :]\n    all_points = all_points.reshape((-1, 3))\n\n    frac_points = np.dot(all_points, np.linalg.inv(supercell_matrix))\n\n    tvects = frac_points[np.all(frac_points < 1 - 1e-10, axis=1)\n                         & np.all(frac_points >= -1e-10, axis=1)]\n    assert len(tvects) == round(abs(np.linalg.det(supercell_matrix)))\n    return tvects", "docstring": "Returns the list of points on the original lattice contained in the\nsupercell in fractional coordinates (with the supercell basis).\ne.g. [[2,0,0],[0,1,0],[0,0,1]] returns [[0,0,0],[0.5,0,0]]\n\nArgs:\nsupercell_matrix: 3x3 matrix describing the supercell\n\nReturns:\nnumpy array of the fractional coordinates", "source": "juraj-google-style"}
{"code": "def pair_wise_sigmoid_cross_entropy_loss(inputs: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:\n    height_and_width = inputs.shape[1]\n    criterion = nn.BCEWithLogitsLoss(reduction='none')\n    cross_entropy_loss_pos = criterion(inputs, torch.ones_like(inputs))\n    cross_entropy_loss_neg = criterion(inputs, torch.zeros_like(inputs))\n    loss_pos = torch.matmul(cross_entropy_loss_pos / height_and_width, labels.T)\n    loss_neg = torch.matmul(cross_entropy_loss_neg / height_and_width, (1 - labels).T)\n    loss = loss_pos + loss_neg\n    return loss", "docstring": "A pair wise version of the cross entropy loss, see `sigmoid_cross_entropy_loss` for usage.\n\nArgs:\ninputs (`torch.Tensor`):\nA tensor representing a mask.\nlabels (`torch.Tensor`):\nA tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs\n(0 for the negative class and 1 for the positive class).\n\nReturns:\nloss (`torch.Tensor`): The computed loss between each pairs.", "source": "github-repos"}
{"code": "def get(self, key):\n    if key in self._feature_tensors:\n        return self._feature_tensors[key]\n    if key in self._features:\n        feature_tensor = self._get_raw_feature_as_tensor(key)\n        self._feature_tensors[key] = feature_tensor\n        return feature_tensor\n    if isinstance(key, six.string_types):\n        raise ValueError('Feature {} is not in features dictionary.'.format(key))\n    if not isinstance(key, _FeatureColumn):\n        raise TypeError('\"key\" must be either a \"str\" or \"_FeatureColumn\". Provided: {}'.format(key))\n    column = key\n    logging.debug('Transforming feature_column %s.', column)\n    transformed = column._transform_feature(self)\n    if transformed is None:\n        raise ValueError('Column {} is not supported.'.format(column.name))\n    self._feature_tensors[column] = transformed\n    return transformed", "docstring": "Returns a `Tensor` for the given key.\n\nA `str` key is used to access a base feature (not-transformed). When a\n`_FeatureColumn` is passed, the transformed feature is returned if it\nalready exists, otherwise the given `_FeatureColumn` is asked to provide its\ntransformed output, which is then cached.\n\nArgs:\nkey: a `str` or a `_FeatureColumn`.\n\nReturns:\nThe transformed `Tensor` corresponding to the `key`.\n\nRaises:\nValueError: if key is not found or a transformed `Tensor` cannot be\ncomputed.", "source": "github-repos"}
{"code": "def DEFINE_list(name, default, help, flag_values=FLAGS, **args):\n    parser = ListParser()\n    serializer = CsvListSerializer(',')\n    DEFINE(parser, name, default, help, flag_values, serializer, **args)", "docstring": "Registers a flag whose value is a comma-separated list of strings.\n\nThe flag value is parsed with a CSV parser.\n\nArgs:\nname: A string, the flag name.\ndefault: The default value of the flag.\nhelp: A help string.\nflag_values: FlagValues object with which the flag will be registered.\n**args: Dictionary with extra keyword args that are passed to the\nFlag __init__.", "source": "codesearchnet"}
{"code": "def _build_rdf(self, data=None):\n\n\t\t\n\n\t\t\n\t\tself.rdf = SimpleNamespace()\n\t\tself.rdf.data = data\n\t\tself.rdf.prefixes = SimpleNamespace()\n\t\tself.rdf.uris = SimpleNamespace()\n\t\t\n\t\tfor prefix,uri in self.repo.context.items():\n\t\t\tsetattr(self.rdf.prefixes, prefix, rdflib.Namespace(uri))\n\t\t\n\t\tself._parse_graph()", "docstring": "Parse incoming rdf as self.rdf.orig_graph, create copy at self.rdf.graph\n\nArgs:\ndata (): payload from GET request, expected RDF content in various serialization formats\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def LookupClients(self, keywords):\n    \n    if isinstance(keywords, string_types):\n      raise ValueError(\n          \"Keywords should be an iterable, not a string (got %s).\" % keywords)\n\n    start_time, filtered_keywords = self._AnalyzeKeywords(keywords)\n\n    keyword_map = data_store.REL_DB.ListClientsForKeywords(\n        list(map(self._NormalizeKeyword, filtered_keywords)),\n        start_time=start_time)\n\n    results = itervalues(keyword_map)\n    relevant_set = set(next(results))\n\n    for hits in results:\n      relevant_set &= set(hits)\n\n      if not relevant_set:\n        return []\n\n    return sorted(relevant_set)", "docstring": "Returns a list of client URNs associated with keywords.\n\nArgs:\nkeywords: The list of keywords to search by.\n\nReturns:\nA list of client URNs.\n\nRaises:\nValueError: A string (single keyword) was passed instead of an iterable.", "source": "juraj-google-style"}
{"code": "def guess_is_tensorflow_py_library(py_file_path):\n    if not is_extension_uncompiled_python_source(py_file_path) and (not is_extension_compiled_python_source(py_file_path)):\n        return False\n    py_file_path = _norm_abs_path(py_file_path)\n    return (py_file_path.startswith(_TENSORFLOW_BASEDIR) or py_file_path.startswith(_ABSL_BASEDIR)) and (not py_file_path.endswith('_test.py')) and (os.path.normpath('tensorflow/python/debug/examples') not in os.path.normpath(py_file_path))", "docstring": "Guess whether a Python source file is a part of the tensorflow library.\n\nSpecial cases:\n1) Returns False for unit-test files in the library (*_test.py),\n2) Returns False for files under python/debug/examples.\n\nArgs:\npy_file_path: full path of the Python source file in question.\n\nReturns:\n(`bool`) Whether the file is inferred to be a part of the tensorflow\nlibrary.", "source": "github-repos"}
{"code": "def reset(self, indices=None):\n    \n\n    if indices is None:\n      indices = np.arange(self.trajectories.batch_size)\n\n    \n    if indices.size == 0:\n      tf.logging.warning(\n          \"`reset` called with empty indices array, this is a no-op.\")\n      return None\n\n    observations = self._reset(indices)\n    processed_observations = self.process_observations(observations)\n\n    \n    self.trajectories.reset(indices, observations)\n\n    return processed_observations", "docstring": "Resets environments at given indices.\n\nSubclasses should override _reset to do the actual reset if something other\nthan the default implementation is desired.\n\nArgs:\nindices: Indices of environments to reset. If None all envs are reset.\n\nReturns:\nBatch of initial observations of reset environments.", "source": "juraj-google-style"}
{"code": "def bgr2gray(img, keepdim=False):\n    out_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n    if keepdim:\n        out_img = out_img[(..., None)]\n    return out_img", "docstring": "Convert a BGR image to grayscale image.\n\nArgs:\nimg (ndarray): The input image.\nkeepdim (bool): If False (by default), then return the grayscale image\nwith 2 dims, otherwise 3 dims.\n\nReturns:\nndarray: The converted grayscale image.", "source": "codesearchnet"}
{"code": "def glu(x, axis=-1):\n    if any_symbolic_tensors((x,)):\n        return Glu(axis).symbolic_call(x)\n    return backend.nn.glu(x, axis=axis)", "docstring": "Gated Linear Unit (GLU) activation function.\n\nIt is defined as:\n\n`f(x) = a * sigmoid(b)`\nwhere `x` is split into `a` and `b` along the given axis.\n\nArgs:\nx: Input tensor.\naxis: The axis along which to split the input tensor. Defaults to `-1`.\n\nReturns:\nA tensor with the same shape as half of the input.\n\nExample:\n\n>>> x = np.array([-1., 0., 1. , 1.])\n>>> x_glu = keras.ops.glu(x)\n>>> print(x_glu)\narray([-0.73105858, 0. ], shape=(2,), dtype=float64)", "source": "github-repos"}
{"code": "def swo_start(self, swo_speed=9600):\n    if self.swo_enabled():\n        self.swo_stop()\n    info = structs.JLinkSWOStartInfo()\n    info.Speed = swo_speed\n    res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.START, ctypes.byref(info))\n    if (res < 0):\n        raise errors.JLinkException(res)\n    self._swo_enabled = True\n    return None", "docstring": "Starts collecting SWO data.\n\nNote:\nIf SWO is already enabled, it will first stop SWO before enabling it\nagain.\n\nArgs:\nself (JLink): the ``JLink`` instance\nswo_speed (int): the frequency in Hz used by the target to communicate\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error", "source": "codesearchnet"}
{"code": "def _fn(arg0, arg1):\n    return arg0 + arg1", "docstring": "fn doc.\n\nArgs:\narg0: Arg 0.\narg1: Arg 1.\n\nReturns:\nSum of args.", "source": "github-repos"}
{"code": "def format(self, record):\n    \n    if (not FLAGS['showprefixforinfo'].value and\n        FLAGS['verbosity'].value == converter.ABSL_INFO and\n        record.levelno == logging.INFO and\n        _absl_handler.python_handler.stream == sys.stderr):\n      prefix = ''\n    else:\n      prefix = get_absl_log_prefix(record)\n    return prefix + super(PythonFormatter, self).format(record)", "docstring": "Appends the message from the record to the results of the prefix.\n\nArgs:\nrecord: logging.LogRecord, the record to be formatted.\n\nReturns:\nThe formatted string representing the record.", "source": "juraj-google-style"}
{"code": "def dvd_lists(self, **kwargs):\n    path = self._get_path('dvd_lists')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Gets the dvd lists available from the API.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def box_draw_character(first: Optional[BoxDrawCharacterSet], second: BoxDrawCharacterSet, *, top: int=0, bottom: int=0, left: int=0, right: int=0) -> Optional[str]:\n    if (first is None):\n        first = second\n    sign = (+ 1)\n    combo = None\n    if ((first is NORMAL_BOX_CHARS) and (second is BOLD_BOX_CHARS)):\n        combo = NORMAL_THEN_BOLD_MIXED_BOX_CHARS\n    if ((first is BOLD_BOX_CHARS) and (second is NORMAL_BOX_CHARS)):\n        combo = NORMAL_THEN_BOLD_MIXED_BOX_CHARS\n        sign = (- 1)\n    if (combo is None):\n        choice = (second if ((+ 1) in [top, bottom, left, right]) else first)\n        return choice.char(top=bool(top), bottom=bool(bottom), left=bool(left), right=bool(right))\n    return combo.char(top=(top * sign), bottom=(bottom * sign), left=(left * sign), right=(right * sign))", "docstring": "Finds a box drawing character based on its connectivity.\n\nFor example:\n\nbox_draw_character(\nNORMAL_BOX_CHARS,\nBOLD_BOX_CHARS,\ntop=-1,\nright=+1)\n\nevaluates to '┕', which has a normal upward leg and bold rightward leg.\n\nArgs:\nfirst: The character set to use for legs set to -1. If set to None,\ndefaults to the same thing as the second character set.\nsecond: The character set to use for legs set to +1.\ntop: Whether the upward leg should be present.\nbottom: Whether the bottom leg should be present.\nleft: Whether the left leg should be present.\nright: Whether the right leg should be present.\n\nReturns:\nA box drawing character approximating the desired properties, or None\nif all legs are set to 0.", "source": "codesearchnet"}
{"code": "def get_reference_points(spatial_shapes, valid_ratios, device):\n    reference_points_list = []\n    for level, (height, width) in enumerate(spatial_shapes):\n        ref_y, ref_x = meshgrid(torch.linspace(0.5, height - 0.5, height, dtype=torch.float32, device=device), torch.linspace(0.5, width - 0.5, width, dtype=torch.float32, device=device), indexing='ij')\n        ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height)\n        ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width)\n        ref = torch.stack((ref_x, ref_y), -1)\n        reference_points_list.append(ref)\n    reference_points = torch.cat(reference_points_list, 1)\n    reference_points = reference_points[:, :, None] * valid_ratios[:, None]\n    return reference_points", "docstring": "Get reference points for each feature map.\n\nArgs:\nspatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):\nSpatial shapes of each feature map.\nvalid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):\nValid ratios of each feature map.\ndevice (`torch.device`):\nDevice on which to create the tensors.\nReturns:\n`torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)`", "source": "github-repos"}
{"code": "def read(self, size=-1):\n    \n    self._check_open()\n    if not self._remaining():\n      return ''\n\n    data_list = []\n    while True:\n      remaining = self._buffer.remaining()\n      if size >= 0 and size < remaining:\n        data_list.append(self._buffer.read(size))\n        self._offset += size\n        break\n      else:\n        size -= remaining\n        self._offset += remaining\n        data_list.append(self._buffer.read())\n\n        if self._buffer_future is None:\n          if size < 0 or size >= self._remaining():\n            needs = self._remaining()\n          else:\n            needs = size\n          data_list.extend(self._get_segments(self._offset, needs))\n          self._offset += needs\n          break\n\n        if self._buffer_future:\n          self._buffer.reset(self._buffer_future.get_result())\n          self._buffer_future = None\n\n    if self._buffer_future is None:\n      self._request_next_buffer()\n    return ''.join(data_list)", "docstring": "Read data from RAW file.\n\nArgs:\nsize: Number of bytes to read as integer. Actual number of bytes\nread is always equal to size unless EOF is reached. If size is\nnegative or unspecified, read the entire file.\n\nReturns:\ndata read as str.\n\nRaises:\nIOError: When this buffer is closed.", "source": "juraj-google-style"}
{"code": "def startProducing(self, consumer):\n        \n        self._consumer = consumer\n        self._current_deferred = defer.Deferred()\n        self._sent = 0\n        self._paused = False\n\n        if not hasattr(self, \"_chunk_headers\"):\n            self._build_chunk_headers()\n\n        if self._data:\n            block = \"\"\n            for field in self._data:\n                block += self._chunk_headers[field]\n                block += self._data[field]\n                block += \"\\r\\n\"\n\n            self._send_to_consumer(block)\n\n        if self._files:\n            self._files_iterator = self._files.iterkeys()\n            self._files_sent = 0\n            self._files_length = len(self._files)\n            self._current_file_path = None\n            self._current_file_handle = None\n            self._current_file_length = None\n            self._current_file_sent = 0\n\n            result = self._produce()\n            if result:\n                return result\n        else:\n            return defer.succeed(None)\n\n        return self._current_deferred", "docstring": "Start producing.\n\nArgs:\nconsumer: Consumer", "source": "juraj-google-style"}
{"code": "def _hide_parameters(self, file_name):\n        \n        try:\n            in_data = load_b26_file(file_name)\n        except:\n            in_data = {}\n\n        def set_item_visible(item, is_visible):\n            if isinstance(is_visible, dict):\n                for child_id in range(item.childCount()):\n                    child = item.child(child_id)\n                    if child.name in is_visible:\n                        set_item_visible(child, is_visible[child.name])\n            else:\n                item.visible = is_visible\n\n        if \"scripts_hidden_parameters\" in in_data:\n            \n            if len(list(in_data[\"scripts_hidden_parameters\"].keys())) == self.tree_scripts.topLevelItemCount():\n\n                for index in range(self.tree_scripts.topLevelItemCount()):\n                    item = self.tree_scripts.topLevelItem(index)\n                    \n                    set_item_visible(item, in_data[\"scripts_hidden_parameters\"][item.name])\n            else:\n                print('WARNING: settings for hiding parameters does\\'t seem to match other settings')", "docstring": "hide the parameters that had been hidden\nArgs:\nfile_name: config file that has the information about which parameters are hidden", "source": "juraj-google-style"}
{"code": "def pick_unused_port():\n    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n    s.bind(('localhost', 0))\n    (addr, port) = s.getsockname()\n    s.close()\n    return port", "docstring": "get an unused port on the VM.\n\nReturns:\nAn unused port.", "source": "codesearchnet"}
{"code": "def send_offset_commit_request(self, group, payloads=None, fail_on_error=True, callback=None, group_generation_id=(- 1), consumer_id=''):\n    group = _coerce_consumer_group(group)\n    encoder = partial(KafkaCodec.encode_offset_commit_request, group=group, group_generation_id=group_generation_id, consumer_id=consumer_id)\n    decoder = KafkaCodec.decode_offset_commit_response\n    resps = (yield self._send_broker_aware_request(payloads, encoder, decoder, consumer_group=group))\n    returnValue(self._handle_responses(resps, fail_on_error, callback, group))", "docstring": "Send a list of OffsetCommitRequests to the Kafka broker for the\ngiven consumer group.\n\nArgs:\ngroup (str): The consumer group to which to commit the offsets\npayloads ([OffsetCommitRequest]): List of topic, partition, offsets\nto commit.\nfail_on_error (bool): Whether to raise an exception if a response\nfrom the Kafka broker indicates an error\ncallback (callable): a function to call with each of the responses\nbefore returning the returned value to the caller.\ngroup_generation_id (int): Must currently always be -1\nconsumer_id (str): Must currently always be empty string\nReturns:\n[OffsetCommitResponse]: List of OffsetCommitResponse objects.\nWill raise KafkaError for failed requests if fail_on_error is True", "source": "codesearchnet"}
{"code": "def HandleInvMessage(self, payload):\n        \n\n        if self.sync_mode != MODE_MAINTAIN:\n            return\n\n        inventory = IOHelper.AsSerializableWithType(payload, 'neo.Network.Payloads.InvPayload.InvPayload')\n        if not inventory:\n            return\n\n        if inventory.Type == InventoryType.BlockInt:\n\n            ok_hashes = []\n            for hash in inventory.Hashes:\n                hash = hash.encode('utf-8')\n                if hash not in self.myblockrequests and hash not in BC.Default().BlockRequests:\n                    ok_hashes.append(hash)\n                    BC.Default().BlockRequests.add(hash)\n                    self.myblockrequests.add(hash)\n            if len(ok_hashes):\n                message = Message(\"getdata\", InvPayload(InventoryType.Block, ok_hashes))\n                self.SendSerializedMessage(message)\n\n        elif inventory.Type == InventoryType.TXInt:\n            pass\n        elif inventory.Type == InventoryType.ConsensusInt:\n            pass", "docstring": "Process a block header inventory payload.\n\nArgs:\ninventory (neo.Network.Payloads.InvPayload):", "source": "juraj-google-style"}
{"code": "def _GetAttributeContainerByIndex(self, container_type, index):\n    \n    sequence_number = index + 1\n    query = 'SELECT _data FROM {0:s} WHERE rowid = {1:d}'.format(\n        container_type, sequence_number)\n\n    try:\n      self._cursor.execute(query)\n    except sqlite3.OperationalError as exception:\n      raise IOError('Unable to query storage file with error: {0!s}'.format(\n          exception))\n\n    row = self._cursor.fetchone()\n    if row:\n      identifier = identifiers.SQLTableIdentifier(\n          container_type, sequence_number)\n\n      if self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB:\n        serialized_data = zlib.decompress(row[0])\n      else:\n        serialized_data = row[0]\n\n      if self._storage_profiler:\n        self._storage_profiler.Sample(\n            'read', container_type, len(serialized_data), len(row[0]))\n\n      attribute_container = self._DeserializeAttributeContainer(\n          container_type, serialized_data)\n      attribute_container.SetIdentifier(identifier)\n      return attribute_container\n\n    count = self._CountStoredAttributeContainers(container_type)\n    index -= count\n\n    serialized_data = self._GetSerializedAttributeContainerByIndex(\n        container_type, index)\n    attribute_container = self._DeserializeAttributeContainer(\n        container_type, serialized_data)\n\n    if attribute_container:\n      identifier = identifiers.SQLTableIdentifier(\n          container_type, sequence_number)\n      attribute_container.SetIdentifier(identifier)\n    return attribute_container", "docstring": "Retrieves a specific attribute container.\n\nArgs:\ncontainer_type (str): attribute container type.\nindex (int): attribute container index.\n\nReturns:\nAttributeContainer: attribute container or None if not available.\n\nRaises:\nIOError: when there is an error querying the storage file.\nOSError: when there is an error querying the storage file.", "source": "juraj-google-style"}
{"code": "def render_build_args(options, ns):\n    \n    build_args = options.get('buildArgs', {})\n    for key, value in build_args.items():\n        build_args[key] = value.format(**ns)\n    return build_args", "docstring": "Get docker build args dict, rendering any templated args.\n\nArgs:\noptions (dict):\nThe dictionary for a given image from chartpress.yaml.\nFields in `options['buildArgs']` will be rendered and returned,\nif defined.\nns (dict): the namespace used when rendering templated arguments", "source": "juraj-google-style"}
{"code": "class PatchMixerBlock(nn.Module):\n\n    def __init__(self, config: PatchTSMixerConfig):\n        super().__init__()\n        self.norm = PatchTSMixerNormLayer(config)\n        self.self_attn = config.self_attn\n        self.gated_attn = config.gated_attn\n        self.mlp = PatchTSMixerMLP(in_features=config.num_patches, out_features=config.num_patches, config=config)\n        if config.gated_attn:\n            self.gating_block = PatchTSMixerGatedAttention(in_size=config.num_patches, out_size=config.num_patches)\n        if config.self_attn:\n            self.self_attn_layer = PatchTSMixerAttention(embed_dim=config.d_model, num_heads=config.self_attn_heads, dropout=config.dropout, config=config)\n            self.norm_attn = PatchTSMixerNormLayer(config)\n\n    def forward(self, hidden_state):\n        \n        residual = hidden_state\n        hidden_state = self.norm(hidden_state)\n        if self.self_attn:\n            batch_size, n_vars, num_patches, d_model = hidden_state.shape\n            hidden_state_reshaped = hidden_state.reshape(batch_size * n_vars, num_patches, d_model)\n            x_attn, _, _ = self.self_attn_layer(hidden_state_reshaped, output_attentions=False)\n            x_attn = x_attn.reshape(batch_size, n_vars, num_patches, d_model)\n        hidden_state = hidden_state.transpose(2, 3)\n        hidden_state = self.mlp(hidden_state)\n        if self.gated_attn:\n            hidden_state = self.gating_block(hidden_state)\n        hidden_state = hidden_state.transpose(2, 3)\n        if self.self_attn:\n            hidden_state = self.norm_attn(hidden_state + x_attn)\n        out = hidden_state + residual\n        return out", "docstring": "This module mixes the patch dimension.\n\nArgs:\nconfig (`PatchTSMixerConfig`):\nConfiguration.", "source": "github-repos"}
{"code": "def build(self, var_list):\n    if self.built:\n        return\n    super().build(var_list)\n    self._momentums, self._velocities = self.add_optimizer_variables(var_list, ['momentum', 'velocity'])", "docstring": "Initialize optimizer variables.\n\nLamb optimizer has 2 types of variables: momentums and velocities\n\nArgs:\nvar_list: list of model variables to build Lamb variables on.", "source": "github-repos"}
{"code": "def clusters_sites_obj(clusters):\n    result = {}\n    all_clusters = get_all_clusters_sites()\n    clusters_sites = {c: s for (c, s) in all_clusters.items() if (c in clusters)}\n    for (cluster, site) in clusters_sites.items():\n        result.update({cluster: get_site_obj(site)})\n    return result", "docstring": "Get all the corresponding sites of the passed clusters.\n\nArgs:\nclusters(list): list of string uid of sites (e.g 'rennes')\n\nReturn:\ndict corresponding to the mapping cluster uid to python-grid5000 site", "source": "codesearchnet"}
{"code": "def ensure_scheme(url, default_scheme='http'):\n    \n    parsed = urlsplit(url, scheme=default_scheme)\n    if not parsed.netloc:\n        parsed = SplitResult(\n            scheme=parsed.scheme,\n            netloc=parsed.path,\n            path='',\n            query=parsed.query,\n            fragment=parsed.fragment\n        )\n\n    return urlunsplit(parsed)", "docstring": "Adds a scheme to a url if not present.\n\nArgs:\nurl (string): a url, assumed to start with netloc\ndefault_scheme (string): a scheme to be added\n\nReturns:\nstring: URL with a scheme", "source": "juraj-google-style"}
{"code": "def decode(self, codes):\n    assert (codes.ndim == 2)\n    (N, M) = codes.shape\n    assert (M == self.M)\n    assert (codes.dtype == self.code_dtype)\n    vecs = np.empty((N, (self.Ds * self.M)), dtype=np.float32)\n    for m in range(self.M):\n        vecs[(:, (m * self.Ds):((m + 1) * self.Ds))] = self.codewords[m][(codes[(:, m)], :)]\n    return vecs", "docstring": "Given PQ-codes, reconstruct original D-dimensional vectors\napproximately by fetching the codewords.\n\nArgs:\ncodes (np.ndarray): PQ-cdoes with shape=(N, M) and dtype=self.code_dtype.\nEach row is a PQ-code\n\nReturns:\nnp.ndarray: Reconstructed vectors with shape=(N, D) and dtype=np.float32", "source": "codesearchnet"}
{"code": "def getFilesFromAFolder(path):\n    \n    from os import listdir\n    from os.path import isfile, join\n    \n    onlyFiles = []\n    for f in listdir(path):\n        if isfile(join(path, f)):\n            onlyFiles.append(f)\n    return onlyFiles", "docstring": "Getting all the files in a folder.\n\nArgs:\n-----\npath: The path in which looking for the files\n\nReturns:\n--------\nlist: The list of filenames found.", "source": "juraj-google-style"}
{"code": "def parts(path):\n    _path = normpath(path)\n    components = _path.strip('/')\n    _parts = [('/' if _path.startswith('/') else './')]\n    if components:\n        _parts += components.split('/')\n    return _parts", "docstring": "Split a path in to its component parts.\n\nArguments:\npath (str): Path to split in to parts.\n\nReturns:\nlist: List of components\n\nExample:\n>>> parts('/foo/bar/baz')\n['/', 'foo', 'bar', 'baz']", "source": "codesearchnet"}
{"code": "async def run(self, state: ConnectionState) -> None:\n        \n        self._print('%d +++| %s', bytes(socket_info.get()))\n        bad_commands = 0\n        try:\n            greeting = await self._exec(state.do_greeting())\n        except ResponseError as exc:\n            resp = exc.get_response(b'*')\n            resp.condition = ResponseBye.condition\n            await self.write_response(resp)\n            return\n        else:\n            await self.write_response(greeting)\n        while True:\n            try:\n                cmd = await self.read_command()\n            except (ConnectionError, EOFError):\n                break\n            except CancelledError:\n                await self.send_error_disconnect()\n                break\n            except Exception:\n                await self.send_error_disconnect()\n                raise\n            else:\n                prev_cmd = current_command.set(cmd)\n                try:\n                    if isinstance(cmd, AuthenticateCommand):\n                        creds = await self.authenticate(state, cmd.mech_name)\n                        response, _ = await self._exec(\n                            state.do_authenticate(cmd, creds))\n                    elif isinstance(cmd, IdleCommand):\n                        response = await self.idle(state, cmd)\n                    else:\n                        response = await self._exec(state.do_command(cmd))\n                except ResponseError as exc:\n                    resp = exc.get_response(cmd.tag)\n                    await self.write_response(resp)\n                    if resp.is_terminal:\n                        break\n                except AuthenticationError as exc:\n                    msg = bytes(str(exc), 'utf-8', 'surrogateescape')\n                    resp = ResponseBad(cmd.tag, msg)\n                    await self.write_response(resp)\n                except TimeoutError:\n                    resp = ResponseNo(cmd.tag, b'Operation timed out.',\n                                      ResponseCode.of(b'TIMEOUT'))\n                    await self.write_response(resp)\n                except CancelledError:\n                    await self.send_error_disconnect()\n                    break\n                except Exception:\n                    await self.send_error_disconnect()\n                    raise\n                else:\n                    await self.write_response(response)\n                    if response.is_bad:\n                        bad_commands += 1\n                        if self.bad_command_limit \\\n                                and bad_commands >= self.bad_command_limit:\n                            msg = b'Too many errors, disconnecting.'\n                            response.add_untagged(ResponseBye(msg))\n                    else:\n                        bad_commands = 0\n                    if response.is_terminal:\n                        break\n                    if isinstance(cmd, StartTLSCommand) and state.ssl_context \\\n                            and isinstance(response, ResponseOk):\n                        await self.start_tls(state.ssl_context)\n                finally:\n                    await state.do_cleanup()\n                    current_command.reset(prev_cmd)\n        self._print('%d ---| %s', b'<disconnected>')", "docstring": "Start the socket communication with the IMAP greeting, and then\nenter the command/response cycle.\n\nArgs:\nstate: Defines the interaction with the backend plugin.", "source": "juraj-google-style"}
{"code": "async def send_heartbeat(self, short_name):\n        \n\n        if short_name not in self.services:\n            raise ArgumentError(\"Unknown service name\", short_name=short_name)\n\n        self.services[short_name]['state'].heartbeat()\n        await self._notify_update(short_name, 'heartbeat')", "docstring": "Post a heartbeat for a service.\n\nArgs:\nshort_name (string): The short name of the service to query", "source": "juraj-google-style"}
{"code": "def IsInitializerList(clean_lines, linenum):\n  \n  for i in xrange(linenum, 1, -1):\n    line = clean_lines.elided[i]\n    if i == linenum:\n      remove_function_body = Match(r'^(.*)\\{\\s*$', line)\n      if remove_function_body:\n        line = remove_function_body.group(1)\n\n    if Search(r'\\s:\\s*\\w+[({]', line):\n      \n      \n      \n      \n      return True\n    if Search(r'\\}\\s*,\\s*$', line):\n      \n      \n      return True\n    if Search(r'[{};]\\s*$', line):\n      \n      \n      \n      \n      \n      \n      \n      return False\n\n  \n  \n  return False", "docstring": "Check if current line is inside constructor initializer list.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nReturns:\nTrue if current line appears to be inside constructor initializer\nlist, False otherwise.", "source": "juraj-google-style"}
{"code": "def not_found(cls, errors=None):\n    if cls.expose_status:\n        cls.response.content_type = 'application/json'\n        cls.response._status_line = '404 Not Found'\n    return cls(404, None, errors).to_json", "docstring": "Shortcut API for HTTP 404 `Not found` response.\n\nArgs:\nerrors (list): Response key/value data.\n\nReturns:\nWSResponse Instance.", "source": "codesearchnet"}
{"code": "def read_table(fstream):\n    pos = fstream.tell()\n    line = fstream.readline().strip()\n    fragments = line.split(',')\n    fragments = [x for x in fragments if (x is not None)]\n    partition = dict()\n    if (not (len(fragments) >= 4)):\n        return None\n    partition['table'] = fragments[0]\n    partition['group'] = fragments[1]\n    partition['set'] = fragments[2]\n    partition['num_lines'] = fragments[3]\n    struct = None\n    if ((partition is not None) and (partition['table'] == 'TABLE')):\n        num_lines = int(partition['num_lines'].strip())\n        struct = {}\n        header = fetch_cols(fstream)\n        struct.update({header[0]: header[1:]})\n        for _ in range(num_lines):\n            cols = fetch_cols(fstream)\n            struct.update({cols[0]: cols[1:]})\n    else:\n        fstream.seek(pos)\n    return struct", "docstring": "Read a likwid table info from the text stream.\n\nArgs:\nfstream: Likwid's filestream.\n\nReturns (dict(str: str)):\nA dict containing likwid's table info as key/value pairs.", "source": "codesearchnet"}
{"code": "def _init_trace_logging(self, app):\n    enabled = (not app.config.get(CONF_DISABLE_TRACE_LOGGING, False))\n    if (not enabled):\n        return\n    self._trace_log_handler = LoggingHandler(self._key, telemetry_channel=self._channel)\n    app.logger.addHandler(self._trace_log_handler)", "docstring": "Sets up trace logging unless ``APPINSIGHTS_DISABLE_TRACE_LOGGING`` is\nset in the Flask config.\n\nArgs:\napp (flask.Flask). the Flask application for which to initialize the extension.", "source": "codesearchnet"}
{"code": "def from_json_file(cls, json_file: Union[str, os.PathLike]) -> PreTrainedFeatureExtractor:\n    with open(json_file, encoding='utf-8') as reader:\n        text = reader.read()\n    feature_extractor_dict = json.loads(text)\n    return cls(**feature_extractor_dict)", "docstring": "Instantiates a feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`] from the path to\na JSON file of parameters.\n\nArgs:\njson_file (`str` or `os.PathLike`):\nPath to the JSON file containing the parameters.\n\nReturns:\nA feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`]: The feature_extractor\nobject instantiated from that JSON file.", "source": "github-repos"}
{"code": "def unpack(self, buff, offset=0):\n        \n        header = UBInt16()\n        header.unpack(buff[offset:offset+2])\n        self.tlv_type = header.value >> 9\n        length = header.value & 511\n        begin, end = offset + 2, offset + 2 + length\n        sub_type = UBInt8()\n        sub_type.unpack(buff[begin:begin+1])\n        self.sub_type = sub_type.value\n        self.sub_value = BinaryData(buff[begin+1:end])", "docstring": "Unpack a binary message into this object's attributes.\n\nUnpack the binary value *buff* and update this object attributes based\non the results.\n\nArgs:\nbuff (bytes): Binary data package to be unpacked.\noffset (int): Where to begin unpacking.\n\nRaises:\nException: If there is a struct unpacking error.", "source": "juraj-google-style"}
{"code": "def set_result(self, result):\n        \n        \n        if self.done():\n            raise RuntimeError(\"set_result can only be called once.\")\n\n        \n        self._result = result\n        self._trigger()", "docstring": "Set the result of the future to the provided result.\n\nArgs:\nresult (Any): The result", "source": "juraj-google-style"}
{"code": "async def stop_tasks(self, address):\n    tasks = self._tasks.get(address, [])\n    for task in tasks:\n        task.cancel()\n    asyncio.gather(*tasks, return_exceptions=True)\n    self._tasks[address] = []", "docstring": "Clear all tasks pertaining to a tile.\n\nThis coroutine will synchronously cancel all running tasks that were\nattached to the given tile and wait for them to stop before returning.\n\nArgs:\naddress (int): The address of the tile we should stop.", "source": "codesearchnet"}
{"code": "def _to_tensor_list(self, value) -> List['core_types.Symbol']:\n    return nest.flatten(self._to_components(value), expand_composites=True)", "docstring": "Encodes `value` as a flat list of `tf.Tensor`.\n\nBy default, this just flattens `self._to_components(value)` using\n`nest.flatten`.  However, subclasses may override this to return a\ndifferent tensor encoding for values.  In particular, some subclasses\nof `BatchableTypeSpec` override this method to return a \"boxed\" encoding\nfor values, which then can be batched or unbatched.  See\n`BatchableTypeSpec` for more details.\n\nArgs:\nvalue: A value with compatible this `TypeSpec`.  (Caller is responsible\nfor ensuring compatibility.)\n\nReturns:\nA list of `tf.Tensor`, compatible with `self._flat_tensor_specs`, which\ncan be used to reconstruct `value`.", "source": "github-repos"}
{"code": "def create_view(operations, operation):\n    operations.execute(('CREATE VIEW %s AS %s' % (operation.target.name, operation.target.sqltext)))", "docstring": "Implements ``CREATE VIEW``.\n\nArgs:\noperations: instance of ``alembic.operations.base.Operations``\noperation: instance of :class:`.ReversibleOp`\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def data_group_type(self, group_data):\n        \n        if isinstance(group_data, dict):\n            \n            file_content = group_data.pop('fileContent', None)\n            if file_content is not None:\n                self._files[group_data.get('xid')] = {\n                    'fileContent': file_content,\n                    'type': group_data.get('type'),\n                }\n        else:\n            GROUPS_STRINGS_WITH_FILE_CONTENTS = ['Document', 'Report']\n            \n            if group_data.data.get('type') in GROUPS_STRINGS_WITH_FILE_CONTENTS:\n                self._files[group_data.data.get('xid')] = group_data.file_data\n            group_data = group_data.data\n        return group_data", "docstring": "Return dict representation of group data.\n\nArgs:\ngroup_data (dict|obj): The group data dict or object.\n\nReturns:\ndict: The group data in dict format.", "source": "juraj-google-style"}
{"code": "async def get_participants(self, force_update=False) -> list:\n        \n        if force_update or self.participants is None:\n            res = await self.connection('GET', 'tournaments/{}/participants'.format(self._id))\n            self._refresh_participants_from_json(res)\n        return self.participants or []", "docstring": "get all participants\n\n|methcoro|\n\nArgs:\nforce_update (default=False): True to force an update to the Challonge API\n\nReturns:\nlist[Participant]:\n\nRaises:\nAPIException", "source": "juraj-google-style"}
{"code": "def add_state(self, state_name, initial_state, batch_size=None):\n    state_shape = initial_state.get_shape().as_list()\n    full_shape = ([batch_size] + state_shape)\n    if (not batch_size):\n        shape_proto = self._as_shape_proto(([0] + state_shape))\n        batch_size = 1\n    else:\n        shape_proto = self._as_shape_proto(([batch_size] + state_shape))\n    tiles = ([batch_size] + ([1] * len(initial_state.get_shape())))\n    feed_op = tf.placeholder_with_default(tf.tile(tf.expand_dims(initial_state, [0]), tiles), shape=full_shape, name=('%s_feed' % state_name))\n    s = {'feed_op': feed_op, 'feed_type': initial_state.dtype, 'feed_shape': shape_proto}\n    self._states[state_name] = s", "docstring": "Adds a state to the state saver.\n\nArgs:\nstate_name: The name of this state.\ninitial_state: The initial state vector. Only zeros are supported.\nbatch_size: The batch_size or None for unknown.", "source": "codesearchnet"}
{"code": "def apply_configs(config):\n    default_enabled = config.get('default_component_enabled', False)\n    delegate_keys = sorted(dr.DELEGATES, key=dr.get_name)\n    for comp_cfg in config.get('configs', []):\n        name = comp_cfg.get('name')\n        for c in delegate_keys:\n            delegate = dr.DELEGATES[c]\n            cname = dr.get_name(c)\n            if cname.startswith(name):\n                dr.ENABLED[c] = comp_cfg.get('enabled', default_enabled)\n                delegate.metadata.update(comp_cfg.get('metadata', {}))\n                delegate.tags = set(comp_cfg.get('tags', delegate.tags))\n                for (k, v) in delegate.metadata.items():\n                    if hasattr(c, k):\n                        log.debug('Setting %s.%s to %s', cname, k, v)\n                        setattr(c, k, v)\n                if hasattr(c, 'timeout'):\n                    c.timeout = comp_cfg.get('timeout', c.timeout)\n            if (cname == name):\n                break", "docstring": "Configures components. They can be enabled or disabled, have timeouts set\nif applicable, and have metadata customized. Valid keys are name, enabled,\nmetadata, and timeout.\n\nArgs:\nconfig (list): a list of dictionaries with the following keys:\ndefault_component_enabled (bool): default value for whether compoments\nare enable if not specifically declared in the config section\n\npackages (list): a list of packages to be loaded. These will be in\naddition to any packages previosly loaded for the `-p` option\n\nconfigs:\nname, enabled, metadata, and timeout. All keys are optional except\nname.\n\nname is the prefix or exact name of any loaded component. Any\ncomponent starting with name will have the associated configuration\napplied.\n\nenabled is whether the matching components will execute even if\ntheir dependencies are met. Defaults to True.\n\ntimeout sets the class level timeout attribute of any component so\nlong as the attribute already exists.\n\nmetadata is any dictionary that you want to attach to the\ncomponent. The dictionary can be retrieved by the component at\nruntime.", "source": "codesearchnet"}
{"code": "def iter_intersecting(self, iterable, key=None, descending=False):\n    return _ContainsVersionIterator(self, iterable, key, descending, mode=_ContainsVersionIterator.MODE_INTERSECTING)", "docstring": "Like `iter_intersect_test`, but returns intersections only.\n\nReturns:\nAn iterator that returns items from `iterable` that intersect.", "source": "codesearchnet"}
{"code": "def parse(input_string, prefix=''):\n    tree = parser.parse(input_string)\n    visitor = ChatlVisitor(prefix)\n    visit_parse_tree(tree, visitor)\n    return visitor.parsed", "docstring": "Parses the given DSL string and returns parsed results.\n\nArgs:\ninput_string (str): DSL string\nprefix (str): Optional prefix to add to every element name, useful to namespace things\n\nReturns:\ndict: Parsed content", "source": "codesearchnet"}
{"code": "def is_field_remote(model, field_name):\n    if (not hasattr(model, '_meta')):\n        return False\n    model_field = get_model_field(model, field_name)\n    return isinstance(model_field, (ManyToManyField, RelatedObject))", "docstring": "Check whether a given model field is a remote field.\n\nA remote field is the inverse of a one-to-many or a\nmany-to-many relationship.\n\nArguments:\nmodel: a Django model\nfield_name: the name of a field\n\nReturns:\nTrue if `field_name` is a remote field, False otherwise.", "source": "codesearchnet"}
{"code": "def image_to_tf_summary_value(image, tag):\n  \n  curr_image = np.asarray(image, dtype=np.uint8)\n  height, width, n_channels = curr_image.shape\n  \n  if n_channels == 1:\n    curr_image = np.reshape(curr_image, [height, width])\n  s = io.BytesIO()\n  matplotlib_pyplot().imsave(s, curr_image, format=\"png\")\n  img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),\n                             height=height, width=width,\n                             colorspace=n_channels)\n  return tf.Summary.Value(tag=tag, image=img_sum)", "docstring": "Converts a NumPy image to a tf.Summary.Value object.\n\nArgs:\nimage: 3-D NumPy array.\ntag: name for tf.Summary.Value for display in tensorboard.\nReturns:\nimage_summary: A tf.Summary.Value object.", "source": "juraj-google-style"}
{"code": "def next_population(self, population, fitnesses):\n        \n        \n        self._probability_vec = _adjust_probability_vec_best(\n            population, fitnesses, self._probability_vec, self._adjust_rate)\n\n        \n        _mutate_probability_vec(self._probability_vec, self._mutation_chance,\n                                self._mutation_adjust_rate)\n\n        \n        return [\n            _sample(self._probability_vec)\n            for _ in range(self._population_size)\n        ]", "docstring": "Make a new population after each optimization iteration.\n\nArgs:\npopulation: The population current population of solutions.\nfitnesses: The fitness associated with each solution in the population\nReturns:\nlist; a list of solutions.", "source": "juraj-google-style"}
{"code": "def asymmetric_depolarize(p_x: float, p_y: float, p_z: float) -> AsymmetricDepolarizingChannel:\n    return AsymmetricDepolarizingChannel(p_x, p_y, p_z)", "docstring": "r\"\"\"Returns a AsymmetricDepolarizingChannel with given parameter.\n\nThis channel evolves a density matrix via\n\n$$\n\\rho \\rightarrow (1 - p_x - p_y - p_z) \\rho\n+ p_x X \\rho X + p_y Y \\rho Y + p_z Z \\rho Z\n$$\n\nArgs:\np_x: The probability that a Pauli X and no other gate occurs.\np_y: The probability that a Pauli Y and no other gate occurs.\np_z: The probability that a Pauli Z and no other gate occurs.\n\nRaises:\nValueError: if the args or the sum of the args are not probabilities.", "source": "codesearchnet"}
{"code": "def CreateStorageWriter(cls, storage_format, session, path):\n    if (storage_format == definitions.STORAGE_FORMAT_SQLITE):\n        return sqlite_writer.SQLiteStorageFileWriter(session, path)\n    return None", "docstring": "Creates a storage writer.\n\nArgs:\nsession (Session): session the storage changes are part of.\npath (str): path to the storage file.\nstorage_format (str): storage format.\n\nReturns:\nStorageWriter: a storage writer or None if the storage file cannot be\nopened or the storage format is not supported.", "source": "codesearchnet"}
{"code": "def remap_variables(fn):\n    \n    def custom_getter(getter, *args, **kwargs):\n        v = getter(*args, **kwargs)\n        return fn(v)\n    return custom_getter_scope(custom_getter)", "docstring": "Use fn to map the output of any variable getter.\n\nArgs:\nfn (tf.Variable -> tf.Tensor)\n\nReturns:\nThe current variable scope with a custom_getter that maps\nall the variables by fn.\n\nExample:\n.. code-block:: python\n\nwith varreplace.remap_variables(lambda var: quantize(var)):\nx = FullyConnected('fc', x, 1000)   # fc/{W,b} will be quantized", "source": "juraj-google-style"}
{"code": "def get_provider_uri(self, provider_display_name):\n        \n        providers = self._provider_client.get_by('displayName', provider_display_name)\n        return providers[0]['uri'] if providers else None", "docstring": "Gets uri for a specific provider.\n\nArgs:\nprovider_display_name: Display name of the provider.\n\nReturns:\nuri", "source": "juraj-google-style"}
{"code": "def get_product_order_book(self, product_id, level=1):\n    params = {'level': level}\n    return self._send_message('get', '/products/{}/book'.format(product_id), params=params)", "docstring": "Get a list of open orders for a product.\n\nThe amount of detail shown can be customized with the `level`\nparameter:\n* 1: Only the best bid and ask\n* 2: Top 50 bids and asks (aggregated)\n* 3: Full order book (non aggregated)\n\nLevel 1 and Level 2 are recommended for polling. For the most\nup-to-date data, consider using the websocket stream.\n\n**Caution**: Level 3 is only recommended for users wishing to\nmaintain a full real-time order book using the websocket\nstream. Abuse of Level 3 via polling will cause your access to\nbe limited or blocked.\n\nArgs:\nproduct_id (str): Product\nlevel (Optional[int]): Order book level (1, 2, or 3).\nDefault is 1.\n\nReturns:\ndict: Order book. Example for level 1::\n{\n\"sequence\": \"3\",\n\"bids\": [\n[ price, size, num-orders ],\n],\n\"asks\": [\n[ price, size, num-orders ],\n]\n}", "source": "codesearchnet"}
{"code": "def metadata_path(self, m_path):\n        \n        if not m_path:\n            self.metadata_dir = None\n            self.metadata_file = None\n\n        else:\n            if not op.exists(m_path):\n                raise OSError('{}: file does not exist!'.format(m_path))\n\n            if not op.dirname(m_path):\n                self.metadata_dir = '.'\n            else:\n                self.metadata_dir = op.dirname(m_path)\n            self.metadata_file = op.basename(m_path)", "docstring": "Provide pointers to the paths of the metadata file\n\nArgs:\nm_path: Path to metadata file", "source": "juraj-google-style"}
{"code": "def get_mapreduce_yaml(parse=parse_mapreduce_yaml):\n  \n  mr_yaml_path = find_mapreduce_yaml()\n  if not mr_yaml_path:\n    raise errors.MissingYamlError()\n  mr_yaml_file = open(mr_yaml_path)\n  try:\n    return parse(mr_yaml_file.read())\n  finally:\n    mr_yaml_file.close()", "docstring": "Locates mapreduce.yaml, loads and parses its info.\n\nArgs:\nparse: Used for testing.\n\nReturns:\nMapReduceYaml object.\n\nRaises:\nerrors.BadYamlError: when contents is not a valid mapreduce.yaml file or the\nfile is missing.", "source": "juraj-google-style"}
{"code": "def set_s3_bucket(self, region, name, bucketName):\n    ct = self.session.client('cloudtrail', region_name=region)\n    ct.update_trail(Name=name, S3BucketName=bucketName)\n    auditlog(event='cloudtrail.set_s3_bucket', actor=self.ns, data={'account': self.account.account_name, 'region': region})\n    self.log.info('Updated S3BucketName to {} for {} in {}/{}'.format(bucketName, name, self.account.account_name, region))", "docstring": "Sets the S3 bucket location for logfile delivery\n\nArgs:\nregion (`str`): Name of the AWS region\nname (`str`): Name of the CloudTrail Trail\nbucketName (`str`): Name of the S3 bucket to deliver log files to\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def parent(self):\n    if (self._parent is not None):\n        return self._parent\n    try:\n        package = self.repository.get_parent_package(self.resource)\n        self._parent = Package(package, context=self.context)\n    except AttributeError as e:\n        reraise(e, ValueError)\n    return self._parent", "docstring": "Get the parent package.\n\nReturns:\n`Package`.", "source": "codesearchnet"}
{"code": "def get_image_embeddings(self, pixel_values, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None):\n    vision_output = self.vision_encoder(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n    image_embeddings = vision_output[0]\n    intermediate_embeddings = vision_output[1]\n    return (image_embeddings, intermediate_embeddings)", "docstring": "Returns the image embeddings by passing the pixel values through the vision encoder.\n\nArgs:\npixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\nInput pixel values\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers.\noutput_hidden_states (`bool`, *optional*):\nWhether or not to return the hidden states of all layers.\nreturn_dict (`bool`, *optional*):\nWhether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.", "source": "github-repos"}
{"code": "def create_tasks(self, wfk_file, scr_input):\n    assert (len(self) == 0)\n    wfk_file = self.wfk_file = os.path.abspath(wfk_file)\n    shell_manager = self.manager.to_shell_manager(mpi_procs=1)\n    w = Work(workdir=self.tmpdir.path_join('_qptdm_run'), manager=shell_manager)\n    fake_input = scr_input.deepcopy()\n    fake_task = w.register(fake_input)\n    w.allocate()\n    w.build()\n    fake_task.inlink_file(wfk_file)\n    fake_task.set_vars({'nqptdm': (- 1)})\n    fake_task.start_and_wait()\n    with NetcdfReader(fake_task.outdir.has_abiext('qptdms.nc')) as reader:\n        qpoints = reader.read_value('reduced_coordinates_of_kpoints')\n    for qpoint in qpoints:\n        qptdm_input = scr_input.deepcopy()\n        qptdm_input.set_vars(nqptdm=1, qptdm=qpoint)\n        new_task = self.register_scr_task(qptdm_input, manager=self.manager)\n        if (self.flow.gc is not None):\n            new_task.set_gc(self.flow.gc)\n    self.allocate()", "docstring": "Create the SCR tasks and register them in self.\n\nArgs:\nwfk_file: Path to the ABINIT WFK file to use for the computation of the screening.\nscr_input: Input for the screening calculation.", "source": "codesearchnet"}
{"code": "def install(self, ref, table_name=None, index_columns=None, logger=None):\n    try:\n        obj_number = ObjectNumber.parse(ref)\n        if isinstance(obj_number, TableNumber):\n            table = self._library.table(ref)\n            connection = self._backend._get_connection()\n            return self._backend.install_table(connection, table, logger=logger)\n        else:\n            raise NotObjectNumberError\n    except NotObjectNumberError:\n        partition = self._library.partition(ref)\n        connection = self._backend._get_connection()\n        return self._backend.install(connection, partition, table_name=table_name, index_columns=index_columns, logger=logger)", "docstring": "Finds partition by reference and installs it to warehouse db.\n\nArgs:\nref (str): id, vid (versioned id), name or vname (versioned name) of the partition.", "source": "codesearchnet"}
{"code": "def clinsig_query(self, query, mongo_query):\n        \n        LOG.debug('clinsig is a query parameter')\n        trusted_revision_level = ['mult', 'single', 'exp', 'guideline']\n        rank = []\n        str_rank = []\n        clnsig_query = {}\n\n        for item in query['clinsig']:\n            rank.append(int(item))\n            \n            rank.append(CLINSIG_MAP[int(item)])\n            str_rank.append(CLINSIG_MAP[int(item)])\n\n        if query.get('clinsig_confident_always_returned') == True:\n            LOG.debug(\"add CLINSIG filter with trusted_revision_level\")\n\n            clnsig_query = { \"clnsig\":\n                        {\n                            '$elemMatch': {\n                                '$or' : [\n                                    {\n                                        '$and' : [\n                                             {'value' : { '$in': rank }},\n                                             {'revstat': { '$in': trusted_revision_level }}\n                                        ]\n                                    },\n                                    {\n                                        '$and': [\n                                            {'value' : re.compile('|'.join(str_rank))},\n                                            {'revstat' : re.compile('|'.join(trusted_revision_level))}\n                                        ]\n                                    }\n                                ]\n                            }\n                         }\n                    }\n        else:\n            LOG.debug(\"add CLINSIG filter for rank: %s\" %\n                             ', '.join(str(query['clinsig'])))\n\n            clnsig_query = {\n                    \"clnsig\":\n                        {\n                            '$elemMatch': {\n                                '$or' : [\n                                    { 'value' : { '$in': rank }},\n                                    { 'value' : re.compile('|'.join(str_rank)) }\n                                ]\n                            }\n                        }\n                }\n\n        return clnsig_query", "docstring": "Add clinsig filter values to the mongo query object\n\nArgs:\nquery(dict): a dictionary of query filters specified by the users\nmongo_query(dict): the query that is going to be submitted to the database\n\nReturns:\nclinsig_query(dict): a dictionary with clinsig key-values", "source": "juraj-google-style"}
{"code": "def parse(self, data):\n    self.binding_var_count = 0\n    self.segment_count = 0\n    segments = self.parser.parse(data)\n    path_wildcard = False\n    for segment in segments:\n        if ((segment.kind == _TERMINAL) and (segment.literal == '**')):\n            if path_wildcard:\n                raise ValidationException('validation error: path template cannot contain more than one path wildcard')\n            path_wildcard = True\n    return segments", "docstring": "Returns a list of path template segments parsed from data.\n\nArgs:\ndata: A path template string.\nReturns:\nA list of _Segment.", "source": "codesearchnet"}
{"code": "def _trychar(char, fallback, asciimode=None):\n    if (asciimode is True):\n        return fallback\n    if (hasattr(sys.stdout, 'encoding') and sys.stdout.encoding):\n        try:\n            char.encode(sys.stdout.encoding)\n        except Exception:\n            pass\n        else:\n            return char\n    return fallback", "docstring": "Logic from IPython timeit to handle terminals that cant show mu\n\nArgs:\nchar (str): character, typically unicode, to try to use\nfallback (str): ascii character to use if stdout cannot encode char\nasciimode (bool): if True, always use fallback\n\nExample:\n>>> char = _trychar('µs', 'us')\n>>> print('char = {}'.format(char))\n>>> assert _trychar('µs', 'us', asciimode=True) == 'us'", "source": "codesearchnet"}
{"code": "def serialize_to_string(self, name, datas):\n        \n        value = datas.get('value', None)\n\n        if value is None:\n            msg = (\"String reference '{}' lacks of required 'value' variable \"\n                   \"or is empty\")\n            raise SerializerError(msg.format(name))\n\n        return value", "docstring": "Serialize given datas to a string.\n\nSimply return the value from required variable``value``.\n\nArguments:\nname (string): Name only used inside possible exception message.\ndatas (dict): Datas to serialize.\n\nReturns:\nstring: Value.", "source": "juraj-google-style"}
{"code": "def eager_run(main=None, argv=None) -> NoReturn:\n    enable_eager_execution()\n    app.run(main, argv)", "docstring": "Runs the program with an optional main function and argv list.\n\nThe program will run with eager execution enabled.\n\nExample:\n```python\nimport tensorflow as tf\n# Import subject to future changes:\n\ndef main(_):\nu = tf.constant(6.0)\nv = tf.constant(7.0)\nprint(u * v)\n\nif __name__ == \"__main__\":\ntfe.run()\n```\n\nArgs:\nmain: the main function to run.\nargv: the arguments to pass to it.", "source": "github-repos"}
{"code": "def boxify(message, border_color=None):\n    \n    lines = message.split(\"\\n\")\n    max_width = max(_visual_width(line) for line in lines)\n\n    padding_horizontal = 5\n    padding_vertical = 1\n\n    box_size_horizontal = max_width + (padding_horizontal * 2)\n\n    chars = {\"corner\": \"+\", \"horizontal\": \"-\", \"vertical\": \"|\", \"empty\": \" \"}\n\n    margin = \"{corner}{line}{corner}\\n\".format(\n        corner=chars[\"corner\"], line=chars[\"horizontal\"] * box_size_horizontal\n    )\n\n    padding_lines = [\n        \"{border}{space}{border}\\n\".format(\n            border=colorize(chars[\"vertical\"], color=border_color),\n            space=chars[\"empty\"] * box_size_horizontal,\n        )\n        * padding_vertical\n    ]\n\n    content_lines = [\n        \"{border}{space}{content}{space}{border}\\n\".format(\n            border=colorize(chars[\"vertical\"], color=border_color),\n            space=chars[\"empty\"] * padding_horizontal,\n            content=_visual_center(line, max_width),\n        )\n        for line in lines\n    ]\n\n    box_str = \"{margin}{padding}{content}{padding}{margin}\".format(\n        margin=colorize(margin, color=border_color),\n        padding=\"\".join(padding_lines),\n        content=\"\".join(content_lines),\n    )\n\n    return box_str", "docstring": "Put a message inside a box.\n\nArgs:\nmessage (unicode): message to decorate.\nborder_color (unicode): name of the color to outline the box with.", "source": "juraj-google-style"}
{"code": "def _construct_operation_id(self, service_name, protorpc_method_name):\n    \n\n    \n    method_name_camel = util.snake_case_to_headless_camel_case(\n        protorpc_method_name)\n\n    return '{0}_{1}'.format(service_name, method_name_camel)", "docstring": "Return an operation id for a service method.\n\nArgs:\nservice_name: The name of the service.\nprotorpc_method_name: The ProtoRPC method name.\n\nReturns:\nA string representing the operation id.", "source": "juraj-google-style"}
{"code": "def add_object_to_scope(self, obj):\n        \n        if isinstance(obj, Computer):\n            self.add_object_to_path(obj, \"scope/computers\")\n        elif isinstance(obj, ComputerGroup):\n            self.add_object_to_path(obj, \"scope/computer_groups\")\n        elif isinstance(obj, Building):\n            self.add_object_to_path(obj, \"scope/buildings\")\n        elif isinstance(obj, Department):\n            self.add_object_to_path(obj, \"scope/departments\")\n        else:\n            raise TypeError", "docstring": "Add an object to the appropriate scope block.\n\nArgs:\nobj: JSSObject to add to scope. Accepted subclasses are:\nComputer\nComputerGroup\nBuilding\nDepartment\n\nRaises:\nTypeError if invalid obj type is provided.", "source": "juraj-google-style"}
{"code": "def CreateSitelinkFeedItem(feed_items, feed_item_id):\n  \n  site_link_from_feed = feed_items[feed_item_id]\n  site_link_feed_item = {\n      'sitelinkText': site_link_from_feed['text'],\n      'sitelinkLine2': site_link_from_feed['line2'],\n      'sitelinkLine3': site_link_from_feed['line3'],\n  }\n\n  if 'finalUrls' in site_link_from_feed and site_link_from_feed['finalUrls']:\n    site_link_feed_item['sitelinkFinalUrls'] = {\n        'urls': site_link_from_feed['finalUrls']\n    }\n\n    if 'finalMobileUrls' in site_link_from_feed:\n      site_link_feed_item['sitelinkFinalMobileUrls'] = {\n          'urls': site_link_from_feed['finalMobileUrls']\n      }\n\n    site_link_feed_item['sitelinkTrackingUrlTemplate'] = (\n        site_link_from_feed['trackingUrlTemplate'])\n  else:\n    site_link_feed_item['sitelinkUrl'] = site_link_from_feed['url']\n\n  return site_link_feed_item", "docstring": "Creates a Sitelink Feed Item.\n\nArgs:\nfeed_items: a list of all Feed Items.\nfeed_item_id: the Id of a specific Feed Item for which a Sitelink Feed Item\nshould be created.\n\nReturns:\nThe new Sitelink Feed Item.", "source": "juraj-google-style"}
{"code": "def _parse_address(self, val):\n    ret = {'type': None, 'value': None}\n    try:\n        ret['type'] = val[1]['type']\n    except (KeyError, ValueError, TypeError):\n        pass\n    try:\n        ret['value'] = val[1]['label']\n    except (KeyError, ValueError, TypeError):\n        ret['value'] = '\\n'.join(val[3]).strip()\n    try:\n        self.vars['address'].append(ret)\n    except AttributeError:\n        self.vars['address'] = []\n        self.vars['address'].append(ret)", "docstring": "The function for parsing the vcard address.\n\nArgs:\nval (:obj:`list`): The value to parse.", "source": "codesearchnet"}
{"code": "def ts_to_dt(jwt_dict):\n    d = jwt_dict.copy()\n    for (k, v) in [v[:2] for v in CLAIM_LIST if v[2]]:\n        if (k in jwt_dict):\n            d[k] = d1_common.date_time.dt_from_ts(jwt_dict[k])\n    return d", "docstring": "Convert timestamps in JWT to datetime objects.\n\nArgs:\njwt_dict: dict\nJWT with some keys containing timestamps.\n\nReturns:\ndict: Copy of input dict where timestamps have been replaced with\ndatetime.datetime() objects.", "source": "codesearchnet"}
{"code": "def add_parameter(self, name, min_val, max_val):\n    self.__parameters.append(Parameter(name, min_val, max_val))", "docstring": "Adds a paramber to the Population\n\nArgs:\nname (str): name of the parameter\nmin_val (int or float): minimum value for the parameter\nmax_val (int or float): maximum value for the parameter", "source": "codesearchnet"}
{"code": "def extract_annotation(data):\n    xlabel = None\n    xvalues = None\n    ylabel = None\n    yvalues = None\n    if hasattr(data, 'minor_axis'):\n        xvalues = data.minor_axis\n        if hasattr(data.minor_axis, 'name'):\n            xlabel = data.minor_axis.name\n    if hasattr(data, 'columns'):\n        xvalues = data.columns\n        if hasattr(data.columns, 'name'):\n            xlabel = data.columns.name\n    if hasattr(data, 'major_axis'):\n        yvalues = data.major_axis\n        if hasattr(data.major_axis, 'name'):\n            ylabel = data.major_axis.name\n    if hasattr(data, 'index'):\n        yvalues = data.index\n        if hasattr(data.index, 'name'):\n            ylabel = data.index.name\n    return (xlabel, xvalues, ylabel, yvalues)", "docstring": "Extract names and values of rows and columns.\n\nParameter:\ndata : DataFrame | Panel\n\nReturns:\ncol_name, col_values, row_name, row_values", "source": "codesearchnet"}
{"code": "def parse_config(file_path):\n    if (not os.path.isfile(file_path)):\n        return {}\n    parser = ConfigParser()\n    parser.read(file_path)\n    for s in parser._sections:\n        for v in six.iterkeys(parser._sections[s]):\n            parser._sections[s][v] = parser._sections[s][v].split('\n    return parser._sections", "docstring": "Convert the CISM configuration file to a python dictionary\n\nArgs:\nfile_path: absolute path to the configuration file\n\nReturns:\nA dictionary representation of the given file", "source": "codesearchnet"}
{"code": "class Poisson(MeanMetricWrapper):\n\n    def __init__(self, name='poisson', dtype=None):\n        super(Poisson, self).__init__(poisson, name, dtype=dtype)", "docstring": "Computes the Poisson metric between `y_true` and `y_pred`.\n\n`metric = y_pred - y_true * log(y_pred)`\n\nArgs:\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.\n\nStandalone usage:\n\n>>> m = tf.keras.metrics.Poisson()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])\n>>> m.result().numpy()\n0.49999997\n\n>>> m.reset_state()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],\n...                sample_weight=[1, 0])\n>>> m.result().numpy()\n0.99999994\n\nUsage with `compile()` API:\n\n```python\nmodel.compile(optimizer='sgd',\nloss='mse',\nmetrics=[tf.keras.metrics.Poisson()])\n```", "source": "github-repos"}
{"code": "def _build_element_shape(shape):\n    if isinstance(shape, tensor_lib.Tensor):\n        return shape\n    if isinstance(shape, tensor_shape.TensorShape):\n        shape = shape.as_list() if shape else None\n    if shape is None:\n        return -1\n    if isinstance(shape, (np.ndarray, np.generic)) or not shape:\n        return ops.convert_to_tensor(shape, dtype=dtypes.int32)\n\n    def convert(val):\n        if val is None:\n            return -1\n        if isinstance(val, tensor_lib.Tensor):\n            return val\n        if isinstance(val, tensor_shape.Dimension):\n            return val.value if val.value is not None else -1\n        return val\n    return [convert(d) for d in shape]", "docstring": "Converts shape to a format understood by list_ops for element_shape.\n\nIf `shape` is already a `Tensor` it is returned as-is. We do not perform a\ntype check here.\n\nIf shape is None or a TensorShape with unknown rank, -1 is returned.\n\nIf shape is a scalar, an int32 tensor with empty list is returned. Note we\ndo directly return an empty list since ops.convert_to_tensor would conver it\nto a float32 which is not a valid type for element_shape.\n\nIf shape is a sequence of dims, None's in the list are replaced with -1. We\ndo not check the dtype of the other dims.\n\nArgs:\nshape: Could be None, Tensor, TensorShape or a list of dims (each dim could\nbe a None, scalar or Tensor).\n\nReturns:\nA None-free shape that can be converted to a tensor.", "source": "github-repos"}
{"code": "def update_headers(self, response):\n    if (('expires' in response.headers) and ('cache-control' in response.headers)):\n        self.msg = self.server_cache_headers\n        return response.headers\n    else:\n        self.msg = self.default_cache_vars\n        date = parsedate(response.headers['date'])\n        expires = (datetime(*date[:6]) + timedelta(0, self.expire_after))\n        response.headers.update({'expires': formatdate(calendar.timegm(expires.timetuple())), 'cache-control': 'public'})\n        return response.headers", "docstring": "Returns the updated caching headers.\n\nArgs:\nresponse (HttpResponse): The response from the remote service\n\nReturns:\nresponse:(HttpResponse.Headers): Http caching headers", "source": "codesearchnet"}
{"code": "def event(self, cuuid, host, euuid, event_data, timestamp, priority):\n    response = None\n    if (host in self.encrypted_hosts):\n        logger.debug('Encrypted!')\n        client_key = self.registry[cuuid]['encryption']\n    else:\n        logger.debug('Not encrypted :<')\n        client_key = None\n    port = host[1]\n    host = host[0]\n    if (not self.is_registered(cuuid, host)):\n        logger.warning(('<%s> Sending BYE EVENT: Client not registered.' % cuuid))\n        response = serialize_data({'method': 'BYE EVENT', 'data': 'Not registered'}, self.compression, self.encryption, client_key)\n        return response\n    if (euuid in self.event_uuids):\n        logger.warning(('<%s> Event ID is already being processed: %s' % (cuuid, euuid)))\n        return response\n    self.event_uuids[euuid] = 0\n    logger.debug(('<%s> <euuid:%s> Currently processing events: %s' % (cuuid, euuid, str(self.event_uuids))))\n    logger.debug(('<%s> <euuid:%s> New event being processed' % (cuuid, euuid)))\n    logger.debug(('<%s> <euuid:%s> Event Data: %s' % (cuuid, euuid, pformat(event_data))))\n    if self.middleware.event_legal(cuuid, euuid, event_data):\n        logger.debug(('<%s> <euuid:%s> Event LEGAL. Sending judgement to client.' % (cuuid, euuid)))\n        response = serialize_data({'method': 'LEGAL', 'euuid': euuid, 'priority': priority}, self.compression, self.encryption, client_key)\n        thread = threading.Thread(target=self.middleware.event_execute, args=(cuuid, euuid, event_data))\n        thread.start()\n    else:\n        logger.debug(('<%s> <euuid:%s> Event ILLEGAL. Sending judgement to client.' % (cuuid, euuid)))\n        response = serialize_data({'method': 'ILLEGAL', 'euuid': euuid, 'priority': priority}, self.compression, self.encryption, client_key)\n    self.listener.call_later(self.timeout, self.retransmit, {'euuid': euuid, 'response': response, 'cuuid': cuuid})\n    return response", "docstring": "This function will process event packets and send them to legal\nchecks.\n\nArgs:\ncuuid (string): The client uuid that the event came from.\nhost (tuple): The (address, port) tuple of the client.\neuuid (string): The event uuid of the specific event.\nevent_data (any): The event data that we will be sending to the\nmiddleware to be judged and executed.\ntimestamp (string): The client provided timestamp of when the event\nwas created.\npriority (string): The priority of the event. This is normally set to\neither \"normal\" or \"high\". If an event was sent with a high\npriority, then the client will not wait for a response from the\nserver before executing the event locally.\n\nReturns:\nA LEGAL/ILLEGAL response to be sent to the client.", "source": "codesearchnet"}
{"code": "def post_process(self, outputs, target_sizes):\n    logger.warning_once('`post_process` is deprecated and will be removed in v5 of Transformers, please use `post_process_object_detection` instead, with `threshold=0.` for equivalent results.')\n    out_logits, out_bbox = (outputs.logits, outputs.pred_boxes)\n    if len(out_logits) != len(target_sizes):\n        raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n    if target_sizes.shape[1] != 2:\n        raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch')\n    prob = out_logits.sigmoid()\n    topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)\n    scores = topk_values\n    topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor')\n    labels = topk_indexes % out_logits.shape[2]\n    boxes = center_to_corners_format(out_bbox)\n    boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))\n    img_h, img_w = target_sizes.unbind(1)\n    scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)\n    boxes = boxes * scale_fct[:, None, :]\n    results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]\n    return results", "docstring": "Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x,\ntop_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.\n\nArgs:\noutputs ([`DeformableDetrObjectDetectionOutput`]):\nRaw outputs of the model.\ntarget_sizes (`torch.Tensor` of shape `(batch_size, 2)`):\nTensor containing the size (height, width) of each image of the batch. For evaluation, this must be the\noriginal image size (before any data augmentation). For visualization, this should be the image size\nafter data augment, but before padding.\nReturns:\n`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image\nin the batch as predicted by the model.", "source": "github-repos"}
{"code": "def expired(self, cfgstr=None, product=None):\n        \n        products = self._rectify_products(product)\n        certificate = self._get_certificate(cfgstr=cfgstr)\n        if certificate is None:\n            \n            is_expired = True\n        elif products is None:\n            \n            is_expired = False\n        elif not all(map(os.path.exists, products)):\n            \n            is_expired = True\n        else:\n            \n            \n            product_file_hash = self._product_file_hash(products)\n            certificate_hash = certificate.get('product_file_hash', None)\n            is_expired = product_file_hash != certificate_hash\n        return is_expired", "docstring": "Check to see if a previously existing stamp is still valid and if the\nexpected result of that computation still exists.\n\nArgs:\ncfgstr (str, optional): override the default cfgstr if specified\nproduct (PathLike or Sequence[PathLike], optional): override the\ndefault product if specified", "source": "juraj-google-style"}
{"code": "def on_enter(__msg: Optional[Union[Callable, str]] = None) -> Callable:\n    \n    \n    def decorator(__func):\n        @wraps(__func)\n        def wrapper(*args, **kwargs):\n            if __msg:\n                print(__msg)\n            else:\n                print('Entering {!r}({!r})'.format(__func.__name__, __func))\n            return __func(*args, **kwargs)\n        return wrapper\n    if callable(__msg):\n        return on_enter()(__msg)\n    return decorator", "docstring": "Decorator to display a message when entering a function.\n\nArgs:\n__msg: Message to display\nReturns:\nWrapped function", "source": "juraj-google-style"}
{"code": "def checkpoints(self):\n    return list(self._maybe_delete.keys())", "docstring": "A list of managed checkpoints.\n\nNote that checkpoints saved due to `keep_checkpoint_every_n_hours` will not\nshow up in this list (to avoid ever-growing filename lists).\n\nReturns:\nA list of filenames, sorted from oldest to newest.", "source": "github-repos"}
{"code": "def equals(self, other):\n        \n\n        \n        \n        \n        \n        \n        if not isinstance(other, self.__class__):\n            return False\n        else:\n            return self.properties_with_values() == other.properties_with_values()", "docstring": "Structural equality of models.\n\nArgs:\nother (HasProps) : the other instance to compare to\n\nReturns:\nTrue, if properties are structurally equal, otherwise False", "source": "juraj-google-style"}
{"code": "def __init__(self, fail_on_unset: bool = False, default: str = 'none', **_vars: Any):\n        \n        self.fail_on_unset = bool(fail_on_unset)\n        self.default = str(default)\n        self.vars = _vars", "docstring": "Initializer.\n\nArgs:\nfail_on_unset (bool): If set to True an exception will be raised when the environment\nvariable is unset; otherwise the default value (see next) will be used instead.\ndefault (str): If a environment variable is unset, it will get this value instead.", "source": "juraj-google-style"}
{"code": "def _AddEnumValues(descriptor, cls):\n  \n  for enum_type in descriptor.enum_types:\n    setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type))\n    for enum_value in enum_type.values:\n      setattr(cls, enum_value.name, enum_value.number)", "docstring": "Sets class-level attributes for all enum fields defined in this message.\n\nAlso exporting a class-level object that can name enum values.\n\nArgs:\ndescriptor: Descriptor object for this message type.\ncls: Class we're constructing for this message type.", "source": "juraj-google-style"}
{"code": "class Activation(Layer):\n\n    def __init__(self, activation, **kwargs):\n        super().__init__(**kwargs)\n        self.supports_masking = True\n        self.activation = activations.get(activation)\n        self._build_at_init()\n\n    def call(self, inputs):\n        return self.activation(inputs)\n\n    def compute_output_shape(self, input_shape):\n        return input_shape\n\n    def get_config(self):\n        config = {'activation': activations.serialize(self.activation)}\n        base_config = super().get_config()\n        return {**base_config, **config}", "docstring": "Applies an activation function to an output.\n\nArgs:\nactivation: Activation function. It could be a callable, or the name of\nan activation from the `keras.activations` namespace.\n**kwargs: Base layer keyword arguments, such as `name` and `dtype`.\n\nExample:\n\n>>> layer = keras.layers.Activation('relu')\n>>> layer(np.array([-3.0, -1.0, 0.0, 2.0]))\n[0.0, 0.0, 0.0, 2.0]\n>>> layer = keras.layers.Activation(keras.activations.relu)\n>>> layer(np.array([-3.0, -1.0, 0.0, 2.0]))\n[0.0, 0.0, 0.0, 2.0]", "source": "github-repos"}
{"code": "def compile_reward(self, scope: Dict[str, TensorFluent]) -> TensorFluent:\n        \n        reward_expr = self.rddl.domain.reward\n        with self.graph.as_default():\n            with tf.name_scope('reward'):\n                return self._compile_expression(reward_expr, scope)", "docstring": "Compiles the reward function given the fluent `scope`.\n\nArgs:\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for reward evaluation.\n\nReturns:\nA :obj:`rddl2tf.fluent.TensorFluent` representing the reward function.", "source": "juraj-google-style"}
{"code": "def download_from_url(path, url):\n  \n  filename = url.split(\"/\")[-1]\n  found_file = find_file(path, filename, max_depth=0)\n  if found_file is None:\n    filename = os.path.join(path, filename)\n    tf.logging.info(\"Downloading from %s to %s.\" % (url, filename))\n    inprogress_filepath = filename + \".incomplete\"\n    inprogress_filepath, _ = urllib.request.urlretrieve(\n        url, inprogress_filepath, reporthook=download_report_hook)\n    \n    print()\n    tf.gfile.Rename(inprogress_filepath, filename)\n    return filename\n  else:\n    tf.logging.info(\"Already downloaded: %s (at %s).\" % (url, found_file))\n    return found_file", "docstring": "Download content from a url.\n\nArgs:\npath: string directory where file will be downloaded\nurl: string url\n\nReturns:\nFull path to downloaded file", "source": "juraj-google-style"}
{"code": "def slice_constant(data, batch_size=32, name='constant_data', global_step=None):\n    with tf.name_scope(name):\n        all_data = tf.convert_to_tensor(data)\n        global_step = (global_step or bookkeeper.global_step())\n        count = (len(data) / batch_size)\n        extra = (len(data) - (count * batch_size))\n        if extra:\n            offset = tf.mod(global_step, count)\n            return tf.slice(all_data, (offset * batch_size), batch_size)\n        else:\n            offset = tf.mod(global_step, (count + 1))\n            return tf.slice(all_data, (offset * batch_size), tf.where(tf.equal(offset, count), extra, batch_size))", "docstring": "Provide a slice based on the global_step.\n\nThis is useful when the entire data array can be stored in memory because it\nallows you to feed the data very efficiently.\n\nArgs:\ndata: A numpy array or tensor.\nbatch_size: The batch size for the produced data.\nname: An optional name for this data.\nglobal_step: A global step variable that is used to read the data. If None\nthen the default prettytensor global_step is used.\nReturns:\nA tensor that produces the given data.", "source": "codesearchnet"}
{"code": "def UpdateMapping(self, filename, mapping_update):\n    \n    if filename not in self._file_mapping:\n      raise problems.NonexistentMapping(filename)\n    mapping = self._file_mapping[filename]\n    mapping.update(mapping_update)", "docstring": "Updates an entry in the list of known filenames.\nAn entry is identified by its filename.\n\nArgs:\nfilename: The filename whose mapping is to be updated\nmapping_update: A dictionary containing the fields to update and their\nnew values.\nRaises:\nInexistentMapping if the filename does not exist in the mapping", "source": "juraj-google-style"}
{"code": "def has_auth_params(self, scheme):\n    for (k, v) in iteritems(self.schemes[scheme][u'params']):\n        if (not v):\n            return False\n    return True", "docstring": "Check whether all information required for a given auth scheme have\nbeen supplied.\n\nArgs:\nscheme (str): Name of the authentication scheme to check. One of\nGem-Identify, Gem-Device, Gem-Application\n\nReturns:\nTrue if all required parameters for the specified scheme are present\nor False otherwise.", "source": "codesearchnet"}
{"code": "def load_addon(username, package_name, _globals):\n    \n    addon_module = get_or_create_module_r(username)\n    package_module = __import__(package_name)\n    add_tasks_r(addon_module, package_module, package_name)\n    _globals.update({username: addon_module})\n    del package_module\n    del addon_module", "docstring": "Load an fabsetup addon given by 'package_name' and hook it in the\nbase task namespace 'username'.\n\nArgs:\nusername(str)\npackage_name(str)\n_globals(dict): the globals() namespace of the fabric script.\n\nReturn: None", "source": "juraj-google-style"}
{"code": "class PerceiverDecoderOutput(ModelOutput):\n    logits: Optional[torch.FloatTensor] = None\n    cross_attentions: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "Base class for Perceiver decoder outputs, with potential cross-attentions.\n\nArgs:\nlogits (`torch.FloatTensor` of shape `(batch_size, num_labels)`):\nOutput of the basic decoder.\ncross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\nTuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\nsequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,\nused to compute the weighted average in the cross-attention heads.", "source": "github-repos"}
{"code": "def sendCommand(self, command):\n    command_data = [ord(x) for x in buffer(command)]\n    self.hid.write(command_data)\n    response_data = ''.join((chr(x) for x in self.hid.read(64)))\n    response = command.RESPONSE.from_buffer_copy(response_data)\n    if (response.status != 0):\n        raise CommandException(response.status)\n    return response", "docstring": "Sends a Command object to the MCP2210 and returns its response.\n\nArguments:\nA commands.Command instance\n\nReturns:\nA commands.Response instance, or raises a CommandException on error.", "source": "codesearchnet"}
{"code": "def spec_filled(self, pos_args, kw_args):\n        \n\n        req_names = self.arg_names\n        if len(self.arg_defaults) > 0:\n            req_names = req_names[:-len(self.arg_defaults)]\n\n        req = [x for x in req_names if x not in kw_args]\n        return len(req) <= len(pos_args)", "docstring": "Check if we have enough arguments to call this function.\n\nArgs:\npos_args (list): A list of all the positional values we have.\nkw_args (dict): A dict of all of the keyword args we have.\n\nReturns:\nbool: True if we have a filled spec, False otherwise.", "source": "juraj-google-style"}
{"code": "def my_solid_angle(center, coords):\n    \n    o = np.array(center)\n    r = [np.array(c) - o for c in coords]\n    r.append(r[0])\n    n = [np.cross(r[i + 1], r[i]) for i in range(len(r) - 1)]\n    n.append(np.cross(r[1], r[0]))\n    phi = 0.0\n    for i in range(len(n) - 1):\n        try:\n            value = math.acos(-np.dot(n[i], n[i + 1]) / (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1])))\n        except ValueError:\n            mycos = -np.dot(n[i], n[i + 1]) / (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1]))\n            if 0.999999999999 < mycos < 1.000000000001:\n                value = math.acos(1.0)\n            elif -0.999999999999 > mycos > -1.000000000001:\n                value = math.acos(-1.0)\n            else:\n                raise SolidAngleError(mycos)\n        phi += value\n    return phi + (3 - len(r)) * math.pi", "docstring": "Helper method to calculate the solid angle of a set of coords from the\ncenter.\n\nArgs:\ncenter:\nCenter to measure solid angle from.\ncoords:\nList of coords to determine solid angle.\n\nReturns:\nThe solid angle.", "source": "juraj-google-style"}
{"code": "def extract_wavs(utterances: List[Utterance], tgt_dir: Path, lazy: bool) -> None:\n    tgt_dir.mkdir(parents=True, exist_ok=True)\n    for utter in utterances:\n        wav_fn = '{}.{}'.format(utter.prefix, 'wav')\n        out_wav_path = (tgt_dir / wav_fn)\n        if (lazy and out_wav_path.is_file()):\n            logger.info('File {} already exists and lazy == {}; not writing.'.format(out_wav_path, lazy))\n            continue\n        logger.info('File {} does not exist and lazy == {}; creating it.'.format(out_wav_path, lazy))\n        trim_wav_ms(utter.org_media_path, out_wav_path, utter.start_time, utter.end_time)", "docstring": "Extracts WAVs from the media files associated with a list of Utterance\nobjects and stores it in a target directory.\n\nArgs:\nutterances: A list of Utterance objects, which include information\nabout the source media file, and the offset of the utterance in the\nmedia_file.\ntgt_dir: The directory in which to write the output WAVs.\nlazy: If True, then existing WAVs will not be overwritten if they have\nthe same name", "source": "codesearchnet"}
{"code": "def get_serialization_context(self, driver_id):\n    with self.lock:\n        if (driver_id not in self.serialization_context_map):\n            _initialize_serialization(driver_id)\n        return self.serialization_context_map[driver_id]", "docstring": "Get the SerializationContext of the driver that this worker is processing.\n\nArgs:\ndriver_id: The ID of the driver that indicates which driver to get\nthe serialization context for.\n\nReturns:\nThe serialization context of the given driver.", "source": "codesearchnet"}
{"code": "def add_citations(voevent, event_ivorns):\n    \n    if not voevent.xpath('Citations'):\n        etree.SubElement(voevent, 'Citations')\n    voevent.Citations.extend(_listify(event_ivorns))", "docstring": "Add citations to other voevents.\n\nThe schema mandates that the 'Citations' section must either be entirely\nabsent, or non-empty - hence we require this wrapper function for its\ncreation prior to listing the first citation.\n\nArgs:\nvoevent(:class:`Voevent`): Root node of a VOEvent etree.\nevent_ivorns (:class:`voeventparse.misc.EventIvorn`): List of EventIvorn\nelements to add to citation list.", "source": "juraj-google-style"}
{"code": "def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n    raise NotImplementedError", "docstring": "Subtracts `tf.IndexedSlices` from this variable.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to be subtracted from this variable.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nThe updated variable.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"}
{"code": "def getctime(self, path=None, client_kwargs=None, header=None):\n    return self._getctime_from_header(self.head(path, client_kwargs, header))", "docstring": "Return the creation time of path.\n\nArgs:\npath (str): File path or URL.\nclient_kwargs (dict): Client arguments.\nheader (dict): Object header.\n\nReturns:\nfloat: The number of seconds since the epoch\n(see the time module).", "source": "codesearchnet"}
{"code": "def _key2seed(a):\n\n    def int64_to_int32s(a):\n        \n        a = math_ops.cast(a, dtypes.uint64)\n        fst = math_ops.cast(a, dtypes.uint32)\n        snd = math_ops.cast(gen_bitwise_ops.right_shift(a, constant_op.constant(32, dtypes.uint64)), dtypes.uint32)\n        a = [fst, snd]\n        a = nest.map_structure(lambda x: math_ops.cast(x, dtypes.int32), a)\n        a = array_ops_stack.stack(a)\n        return a\n    return int64_to_int32s(a)", "docstring": "Converts an RNG key to an RNG seed.\n\nArgs:\na: an RNG key, an ndarray of shape [] and dtype `np.int64`.\n\nReturns:\nan RNG seed, a tensor of shape [2] and dtype `tf.int32`.", "source": "github-repos"}
{"code": "def get_gap(self, tol=0.001, abs_tol=False, spin=None):\n        \n        (cbm, vbm) = self.get_cbm_vbm(tol, abs_tol, spin)\n        return max(cbm - vbm, 0.0)", "docstring": "Expects a DOS object and finds the gap.\n\nArgs:\ntol: tolerance in occupations for determining the gap\nabs_tol: An absolute tolerance (True) and a relative one (False)\nspin: Possible values are None - finds the gap in the summed\ndensities, Up - finds the gap in the up spin channel,\nDown - finds the gap in the down spin channel.\n\nReturns:\ngap in eV", "source": "juraj-google-style"}
{"code": "def cluster_from_file(filename):\n        \n        atoms_string = Atoms.atoms_string_from_file(filename)\n        line_list = [l.split() for l in atoms_string.splitlines()[3:]]\n        coords = []\n        symbols = []\n        for l in line_list:\n            if l:\n                coords.append([float(i) for i in l[:3]])\n                symbols.append(l[4])\n        return Molecule(symbols, coords)", "docstring": "Parse the feff input file and return the atomic cluster as a Molecule\nobject.\n\nArgs:\nfilename (str): path the feff input file\n\nReturns:\nMolecule: the atomic cluster as Molecule object. The absorbing atom\nis the one at the origin.", "source": "juraj-google-style"}
{"code": "def reset_partition_offset(self, partition):\n        \n        LATEST = -1\n        EARLIEST = -2\n        if self.auto_offset_reset == 'largest':\n            reqs = [OffsetRequestPayload(self.topic, partition, LATEST, 1)]\n        elif self.auto_offset_reset == 'smallest':\n            reqs = [OffsetRequestPayload(self.topic, partition, EARLIEST, 1)]\n        else:\n            \n            \n            if sys.exc_info() == (None, None, None):\n                raise OffsetOutOfRangeError('Cannot reset partition offsets without a '\n                                            'valid auto_offset_reset setting '\n                                            '(largest|smallest)')\n            \n            \n            \n            raise \n\n        \n        log.info('Resetting topic-partition offset to %s for %s:%d',\n                 self.auto_offset_reset, self.topic, partition)\n        try:\n            (resp, ) = self.client.send_offset_request(reqs)\n        except KafkaError as e:\n            log.error('%s sending offset request for %s:%d',\n                      e.__class__.__name__, self.topic, partition)\n        else:\n            self.offsets[partition] = resp.offsets[0]\n            self.fetch_offsets[partition] = resp.offsets[0]\n            return resp.offsets[0]", "docstring": "Update offsets using auto_offset_reset policy (smallest|largest)\n\nArguments:\npartition (int): the partition for which offsets should be updated\n\nReturns: Updated offset on success, None on failure", "source": "juraj-google-style"}
{"code": "def read_local_config(cfg):\n    \n    try:\n        if os.path.exists(cfg):\n            config = import_file_object(cfg)\n            return config\n        else:\n            logger.warning(\n                '%s: local config file (%s) not found, cannot be read' %\n                (inspect.stack()[0][3], str(cfg)))\n    except IOError as e:\n        logger.warning(\n            'import_file_object: %s error opening %s' % (str(e), str(cfg))\n        )\n    return {}", "docstring": "Parses local config file for override values\n\nArgs:\n:local_file (str):  filename of local config file\n\nReturns:\ndict object of values contained in local config file", "source": "juraj-google-style"}
{"code": "def create_app(config=None, config_obj=None):\n    app = Flask(__name__)\n    configure_app(app, config=config, config_obj=config_obj)\n    register_blueprints(app)\n    bind_extensions(app)\n    return app", "docstring": "Flask app factory function.\n\nArgs:\nconfig (Optional[path]): path to a Python module config file\nconfig_obj (Optional[class]): Python config object", "source": "codesearchnet"}
{"code": "def form_uri(item_id, service, is_track):\n    if is_track:\n        uri = service.sonos_uri_from_id(item_id)\n    else:\n        uri = ('x-rincon-cpcontainer:' + item_id)\n    return uri", "docstring": "Form and return a music service item uri\n\nArgs:\nitem_id (str): The item id\nservice (MusicService): The music service that the item originates from\nis_track (bool): Whether the item_id is from a track or not\n\nReturns:\nstr: The music service item uri", "source": "codesearchnet"}
{"code": "def __init__(self, host: str, port: int, command: Optional[str]=None, batch_size: int=100, embedded_columns: list=[]):\n    self._host = host\n    self._port = port\n    self._command = command\n    self._batch_size = batch_size\n    self.embedded_columns = embedded_columns", "docstring": "Args:\nhost (str): The redis host\nport (int): The redis port\ncommand (str): command to be executed with redis client\nbatch_size (int): Number of key, values pairs to write at once\nembedded_columns (list): list of column whose embedding needs to be generated\n\nReturns:\n:class:`~apache_beam.transforms.ptransform.PTransform`", "source": "github-repos"}
{"code": "def create_releasenotes(project_dir=os.curdir, bugtracker_url=''):\n    pkg_info_file = os.path.join(project_dir, 'PKG-INFO')\n    if os.path.exists(pkg_info_file):\n        return\n    with open('RELEASE_NOTES', 'wb') as releasenotes_fd:\n        releasenotes_fd.write((get_releasenotes(project_dir=project_dir, bugtracker_url=bugtracker_url).encode('utf-8') + b'\\n'))", "docstring": "Creates the release notes file, if not in a package.\n\nArgs:\nproject_dir(str): Path to the git repo of the project.\nbugtracker_url(str): Url to the bug tracker for the issues.\n\nReturns:\nNone\n\nRaises:\nRuntimeError: If the release notes could not be retrieved", "source": "codesearchnet"}
{"code": "def get_pull_request_number(task, source_env_prefix):\n    \n    pull_request = _extract_from_env_in_payload(task, source_env_prefix + '_PULL_REQUEST_NUMBER')\n    if pull_request is not None:\n        pull_request = int(pull_request)\n    return pull_request", "docstring": "Get what Github pull request created the graph.\n\nArgs:\nobj (ChainOfTrust or LinkOfTrust): the trust object to inspect\nsource_env_prefix (str): The environment variable prefix that is used\nto get repository information.\n\nReturns:\nint: the pull request number.\nNone: if not defined for this task.", "source": "juraj-google-style"}
{"code": "def remove_collisions(self, min_dist=0.5):\n    vfcoords = [v.frac_coords for v in self.vnodes]\n    sfcoords = self.structure.frac_coords\n    dist_matrix = self.structure.lattice.get_all_distances(vfcoords, sfcoords)\n    all_dist = np.min(dist_matrix, axis=1)\n    new_vnodes = []\n    for (i, v) in enumerate(self.vnodes):\n        if (all_dist[i] > min_dist):\n            new_vnodes.append(v)\n    self.vnodes = new_vnodes", "docstring": "Remove vnodes that are too close to existing atoms in the structure\n\nArgs:\nmin_dist(float): The minimum distance that a vertex needs to be\nfrom existing atoms.", "source": "codesearchnet"}
{"code": "def _GetParser(self):\n    usage = 'nsscache synchronises a local NSS cache against a remote data source.\\n\\nUsage: nsscache [global options] command [command options]\\n\\ncommands:\\n'\n    command_descriptions = []\n    for name, cls in list(command.__dict__.items()):\n        if name == 'Command':\n            continue\n        if hasattr(cls, 'Help'):\n            short_help = cls().Help(short=True)\n            command_descriptions.append('  %-21s %.40s' % (name.lower(), short_help.lower()))\n    usage += '\\n'.join(command_descriptions)\n    version_string = 'nsscache ' + nss_cache.__version__ + '\\n\\nCopyright (c) 2007 Google, Inc.\\nThis is free software; see the source for copying conditions.  There is NO\\nwarranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\\n\\nWritten by Jamie Wilkinson and Vasilios Hoffman.'\n    parser = optparse.OptionParser(usage, version=version_string)\n    parser.disable_interspersed_args()\n    parser.set_defaults(verbose=False, debug=False)\n    parser.add_option('-v', '--verbose', action='store_true', help='enable verbose output')\n    parser.add_option('-d', '--debug', action='store_true', help='enable debugging output')\n    parser.add_option('-c', '--config-file', type='string', help='read configuration from FILE', metavar='FILE')\n    old_get_usage = parser.get_usage\n\n    def get_usage():\n        return old_get_usage()[7:]\n    parser.get_usage = get_usage\n    return parser", "docstring": "Sets up our parser for global options.\n\nArgs:  None\nReturns:\n# OptionParser is from standard python module optparse\nOptionParser", "source": "github-repos"}
{"code": "def rApply(d, f):\n    remainingDicts = [(d, ())]\n    while (len(remainingDicts) > 0):\n        (current, prevKeys) = remainingDicts.pop()\n        for (k, v) in current.iteritems():\n            keys = (prevKeys + (k,))\n            if isinstance(v, dict):\n                remainingDicts.insert(0, (v, keys))\n            else:\n                f(v, keys)", "docstring": "Recursively applies f to the values in dict d.\n\nArgs:\nd: The dict to recurse over.\nf: A function to apply to values in d that takes the value and a list of\nkeys from the root of the dict to the value.", "source": "codesearchnet"}
{"code": "def delete(self, paths):\n    raise NotImplementedError", "docstring": "Deletes files or directories at the provided paths.\nDirectories will be deleted recursively.\n\nArgs:\npaths: list of paths that give the file objects to be deleted\n\nRaises:\n``BeamIOError``: if any of the delete operations fail", "source": "github-repos"}
{"code": "def create_new(mapreduce_id=None,\n                 gettime=datetime.datetime.now):\n    \n    if not mapreduce_id:\n      mapreduce_id = MapreduceState.new_mapreduce_id()\n    state = MapreduceState(key_name=mapreduce_id,\n                           last_poll_time=gettime())\n    state.set_processed_counts([], [])\n    return state", "docstring": "Create a new MapreduceState.\n\nArgs:\nmapreduce_id: Mapreduce id as string.\ngettime: Used for testing.", "source": "juraj-google-style"}
{"code": "def parse_args(\n    bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors\n) -> Tuple[Parsed, Errors]:\n    \n\n    commas = char_locs[\"commas\"]\n\n    \n    for span in parsed:\n        if parsed[span][\"type\"] != \"Function\" or \"parens_span\" not in parsed[span]:\n            continue  \n        sp, ep = parsed[span][\"parens_span\"]\n\n        \n        if ep == -1:  \n            args_end = len(bels) - 1  \n        else:\n            args_end = ep - 1  \n\n        \n        args = []\n        arg_start = sp + 1\n        each_arg_end_list = sorted([end - 1 for end in commas.get(sp, [])] + [args_end])\n        for arg_end in each_arg_end_list:\n            \n\n            \n            while arg_start < args_end and bels[arg_start] == \" \":\n                arg_start += 1\n\n            \n            trimmed_arg_end = arg_end\n            while trimmed_arg_end > arg_start and bels[trimmed_arg_end] == \" \":\n                trimmed_arg_end -= 1\n\n            if trimmed_arg_end < arg_start:\n                trimmed_arg_end = arg_start\n\n            arg = \"\".join(bels[arg_start : trimmed_arg_end + 1])\n\n            \n            args.append({\"arg\": arg, \"span\": (arg_start, trimmed_arg_end)})\n            arg_start = arg_end + 2\n\n        parsed[span][\"args\"] = args\n\n    return parsed, errors", "docstring": "Parse arguments from functions\n\nArgs:\nbels: BEL string as list of chars\nchar_locs: char locations for parens, commas and quotes\nparsed: function locations\nerrors: error messages\n\nReturns:\n(functions, errors): function and arg locations plus error messages", "source": "juraj-google-style"}
{"code": "def from_surface(renderer, surface):\n    texture = object.__new__(Texture)\n    texture._ptr = check_ptr_err(lib.SDL_CreateTextureFromSurface(renderer._ptr, surface._ptr))\n    return texture", "docstring": "Create a texture from an existing surface.\n\nArgs:\nsurface (Surface): The surface containing pixel data used to fill the texture.\n\nReturns:\nTexture: A texture containing the pixels from surface.\n\nRaises:\nSDLError: If an error is encountered.", "source": "codesearchnet"}
{"code": "def update(self, resource, timeout=-1):\n        \n        return self._client.update(resource, timeout=timeout, default_values=self.DEFAULT_VALUES, uri=self.URI)", "docstring": "Updates a User.\n\nArgs:\nresource (dict): Object to update.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.\n\nReturns:\ndict: Updated resource.", "source": "juraj-google-style"}
{"code": "def run_callback(self):\n    if (self._callback_func is not None):\n        try:\n            self._callback_func(self._request, self._result)\n        except Exception:\n            LOGGER.exception('An unhandled error occurred while running future callback')", "docstring": "Calls the callback_func, passing in the two positional arguments,\nconditionally waiting if the callback function hasn't been set yet.\nMeant to be run in a threadpool owned by the FutureCollection.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def cancel(self, consumers):\n    for consumer in consumers:\n        del self._consumers[consumer.queue]\n        protocol = (yield self.when_connected())\n        (yield protocol.cancel(consumer))", "docstring": "Cancel a consumer that was previously started with consume.\n\nArgs:\nconsumer (list of fedora_messaging.api.Consumer): The consumers to cancel.", "source": "codesearchnet"}
{"code": "def _update_version(connection, version):\n    \n    if connection.engine.name == 'sqlite':\n        connection.execute('PRAGMA user_version = {}'.format(version))\n\n    elif connection.engine.name == 'postgresql':\n\n        connection.execute(DDL('CREATE SCHEMA IF NOT EXISTS {};'.format(POSTGRES_SCHEMA_NAME)))\n        connection.execute(DDL('CREATE SCHEMA IF NOT EXISTS {};'.format(POSTGRES_PARTITION_SCHEMA_NAME)))\n\n        connection.execute('CREATE TABLE IF NOT EXISTS {}.user_version(version INTEGER NOT NULL);'\n                           .format(POSTGRES_SCHEMA_NAME))\n\n        \n        if connection.execute('SELECT * FROM {}.user_version;'.format(POSTGRES_SCHEMA_NAME)).fetchone():\n            \n            connection.execute('UPDATE {}.user_version SET version = {};'\n                               .format(POSTGRES_SCHEMA_NAME, version))\n        else:\n            \n            connection.execute('INSERT INTO {}.user_version (version) VALUES ({})'\n                               .format(POSTGRES_SCHEMA_NAME, version))\n    else:\n        raise DatabaseMissingError('Do not know how to migrate {} engine.'\n                                   .format(connection.engine.driver))", "docstring": "Updates version in the db to the given version.\n\nArgs:\nconnection (sqlalchemy connection): sqlalchemy session where to update version.\nversion (int): version of the migration.", "source": "juraj-google-style"}
{"code": "def build_byte_align_buff(bits):\n    \n    bitmod = len(bits)%8\n    if bitmod == 0:\n        rdiff = bitarray()\n    else:\n        \n        rdiff = bitarray(8-bitmod)\n        rdiff.setall(False)\n    return rdiff+bits", "docstring": "Pad the left side of a bitarray with 0s to align its length with byte boundaries.\n\nArgs:\nbits: A bitarray to be padded and aligned.\n\nReturns:\nA newly aligned bitarray.", "source": "juraj-google-style"}
{"code": "def format_image(path, options):\n    image = Image.open(path)\n    image_pipeline_results = __pipeline_image(image, options)\n    return image_pipeline_results", "docstring": "Formats an image.\n\nArgs:\npath (str): Path to the image file.\noptions (dict): Options to apply to the image.\n\nReturns:\n(list) A list of PIL images. The list will always be of length\n1 unless resolutions for resizing are provided in the options.", "source": "codesearchnet"}
{"code": "def __init__(self):\n        \n        super(JLinkStraceEventInfo, self).__init__()\n        self.SizeOfStruct = ctypes.sizeof(self)", "docstring": "Initializes the ``JLinkStraceEventInfo`` instance.\n\nSets the size of the structure.\n\nArgs:\nself (JLinkStraceEventInfo): the ``JLinkStraceEventInfo`` instance\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def login(self, email, password):\n    r = requests.post('{0}/v2/session'.format(self._api_endpoint), json={'email': email, 'password': password})\n    r.raise_for_status()\n    return r.json()['accessToken']", "docstring": "Authenticate a user with SignalFx to acquire a session token.\n\nNote that data ingest can only be done with an organization or team API\naccess token, not with a user token obtained via this method.\n\nArgs:\nemail (string): the email login\npassword (string): the password\nReturns a new, immediately-usable session token for the logged in user.", "source": "codesearchnet"}
{"code": "def is_enrolled(self, username, course_run_id):\n        \n        enrollment = self.get_course_enrollment(username, course_run_id)\n        return enrollment is not None and enrollment.get('is_active', False)", "docstring": "Query the enrollment API and determine if a learner is enrolled in a course run.\n\nArgs:\nusername (str): The username by which the user goes on the OpenEdX platform\ncourse_run_id (str): The string value of the course's unique identifier\n\nReturns:\nbool: Indicating whether the user is enrolled in the course run. Returns False under any errors.", "source": "juraj-google-style"}
{"code": "def output(self, _filename):\n        \n\n        for contract in self.contracts:\n            txt = \"\\nContract %s\\n\"%contract.name\n            table = PrettyTable([\"Function\", \"State variables written\", \"Conditions on msg.sender\"])\n            for function in contract.functions:\n\n                state_variables_written = [v.name for v in function.all_state_variables_written()]\n                msg_sender_condition = self.get_msg_sender_checks(function)\n                table.add_row([function.name, str(state_variables_written), str(msg_sender_condition)])\n            self.info(txt + str(table))", "docstring": "_filename is not used\nArgs:\n_filename(string)", "source": "juraj-google-style"}
{"code": "def empty(element_spec):\n    return _OptionalImpl(gen_optional_ops.optional_none(), element_spec)", "docstring": "Returns an `Optional` that has no value.\n\nNOTE: This method takes an argument that defines the structure of the value\nthat would be contained in the returned `Optional` if it had a value.\n\n>>> optional = tf.experimental.Optional.empty(\n...   tf.TensorSpec(shape=(), dtype=tf.int32, name=None))\n>>> print(optional.has_value())\ntf.Tensor(False, shape=(), dtype=bool)\n\nArgs:\nelement_spec: A (nested) structure of `tf.TypeSpec` objects matching the\nstructure of an element of this optional.\n\nReturns:\nA `tf.experimental.Optional` with no value.", "source": "github-repos"}
{"code": "def compstat(sdat, tstart=None, tend=None):\n    \n    data = sdat.tseries_between(tstart, tend)\n    time = data['t'].values\n    delta_time = time[-1] - time[0]\n    data = data.iloc[:, 1:].values  \n\n    mean = np.trapz(data, x=time, axis=0) / delta_time\n    rms = np.sqrt(np.trapz((data - mean)**2, x=time, axis=0) / delta_time)\n\n    with open(misc.out_name('statistics.dat'), 'w') as out_file:\n        mean.tofile(out_file, sep=' ', format=\"%10.5e\")\n        out_file.write('\\n')\n        rms.tofile(out_file, sep=' ', format=\"%10.5e\")\n        out_file.write('\\n')", "docstring": "Compute statistics from series output by StagYY.\n\nCreate a file 'statistics.dat' containing the mean and standard deviation\nof each series on the requested time span.\n\nArgs:\nsdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.\ntstart (float): starting time. Set to None to start at the beginning of\navailable data.\ntend (float): ending time. Set to None to stop at the end of available\ndata.", "source": "juraj-google-style"}
{"code": "def assert_processor_available(processor: str) -> None:\n    if (processor not in [Processors.XHTML2PDF, Processors.WEASYPRINT, Processors.PDFKIT]):\n        raise AssertionError('rnc_pdf.set_pdf_processor: invalid PDF processor specified')\n    if ((processor == Processors.WEASYPRINT) and (not weasyprint)):\n        raise RuntimeError('rnc_pdf: Weasyprint requested, but not available')\n    if ((processor == Processors.XHTML2PDF) and (not xhtml2pdf)):\n        raise RuntimeError('rnc_pdf: xhtml2pdf requested, but not available')\n    if ((processor == Processors.PDFKIT) and (not pdfkit)):\n        raise RuntimeError('rnc_pdf: pdfkit requested, but not available')", "docstring": "Assert that a specific PDF processor is available.\n\nArgs:\nprocessor: a PDF processor type from :class:`Processors`\n\nRaises:\nAssertionError: if bad ``processor``\nRuntimeError: if requested processor is unavailable", "source": "codesearchnet"}
{"code": "def validate_token(key, token, user_id, action_id='', current_time=None):\n    if (not token):\n        return False\n    try:\n        decoded = base64.urlsafe_b64decode(token)\n        token_time = int(decoded.split(DELIMITER)[(- 1)])\n    except (TypeError, ValueError, binascii.Error):\n        return False\n    if (current_time is None):\n        current_time = time.time()\n    if ((current_time - token_time) > DEFAULT_TIMEOUT_SECS):\n        return False\n    expected_token = generate_token(key, user_id, action_id=action_id, when=token_time)\n    if (len(token) != len(expected_token)):\n        return False\n    different = 0\n    for (x, y) in zip(bytearray(token), bytearray(expected_token)):\n        different |= (x ^ y)\n    return (not different)", "docstring": "Validates that the given token authorizes the user for the action.\n\nTokens are invalid if the time of issue is too old or if the token\ndoes not match what generateToken outputs (i.e. the token was forged).\n\nArgs:\nkey: secret key to use.\ntoken: a string of the token generated by generateToken.\nuser_id: the user ID of the authenticated user.\naction_id: a string identifier of the action they requested\nauthorization for.\n\nReturns:\nA boolean - True if the user is authorized for the action, False\notherwise.", "source": "codesearchnet"}
{"code": "def allreduce_grads_hierarchical(all_grads, devices, average=False):\n    \n    num_gpu = len(devices)\n    assert num_gpu == 8, num_gpu\n    assert len(all_grads) == num_gpu, len(all_grads)\n    group_size = num_gpu \n\n    agg_all_grads = []  \n    for varid, grads in enumerate(zip(*all_grads)):\n        \n        g0_main_gpu = varid % num_gpu\n        g1_main_gpu = (g0_main_gpu + group_size) % num_gpu\n        g0_start = 0 if g0_main_gpu < group_size else group_size\n        g1_start = 0 if g1_main_gpu < group_size else group_size\n        assert g0_start != g1_start\n        g0_grads = grads[g0_start: g0_start + group_size]\n        g1_grads = grads[g1_start: g1_start + group_size]\n\n        with tf.device(devices[g0_main_gpu]):\n            g0_agg = tf.add_n(g0_grads, name='group0_agg')\n\n        with tf.device(devices[g1_main_gpu]):\n            g1_agg = tf.add_n(g1_grads, name='group1_agg')\n            g1_total_agg = tf.add(g0_agg, g1_agg, name='group1_total_agg')\n\n        with tf.device(devices[g0_main_gpu]):\n            g0_total_agg = tf.identity(g1_total_agg, name='group0_total_agg')\n\n        agg_grads = []  \n        for k in range(num_gpu):\n            if (k < group_size) == (g0_main_gpu < group_size):\n                main_gpu = g0_total_agg\n            else:\n                main_gpu = g1_total_agg\n            with tf.device(devices[k]):\n                if not average:\n                    device_total_agg = tf.identity(\n                        main_gpu, name='device{}_total_agg'.format(k))\n                else:\n                    \n                    device_total_agg = tf.multiply(\n                        main_gpu, 1.0 / num_gpu, name='device{}_total_agg'.format(k))\n                agg_grads.append(device_total_agg)\n\n        agg_all_grads.append(agg_grads)\n\n    \n    agg_all_grads = list(zip(*agg_all_grads))   \n    return agg_all_grads", "docstring": "Hierarchical allreduce for DGX-1 system.\n\nArgs:\nall_grads (K x N): List of list of gradients. N is the number of variables.\ndevices ([str]): K str for the K devices.\naverage (bool): average gradients or not.\n\nReturns:\n(K x N): same as input, but each grad is replaced by the average over K lists.", "source": "juraj-google-style"}
{"code": "def assert_split_at_fraction_binary(source, expected_items, num_items_to_read_before_split, left_fraction, left_result, right_fraction, right_result, stats, start_position=None, stop_position=None):\n    assert right_fraction > left_fraction\n    if right_fraction - left_fraction < 0.001:\n        return\n    middle_fraction = (left_fraction + right_fraction) / 2\n    if left_result is None:\n        left_result = _assert_split_at_fraction_behavior(source, expected_items, num_items_to_read_before_split, left_fraction, ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS)\n    if right_result is None:\n        right_result = _assert_split_at_fraction_behavior(source, expected_items, num_items_to_read_before_split, right_fraction, ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS)\n    middle_result = _assert_split_at_fraction_behavior(source, expected_items, num_items_to_read_before_split, middle_fraction, ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS)\n    if middle_result[1] != -1:\n        stats.successful_fractions.append(middle_fraction)\n    if middle_result[1] > 0:\n        stats.non_trivial_fractions.append(middle_fraction)\n    if left_result[0] != middle_result[0]:\n        assert_split_at_fraction_binary(source, expected_items, num_items_to_read_before_split, left_fraction, left_result, middle_fraction, middle_result, stats)\n    if right_fraction == 1.0 or middle_result[0] != right_result[0]:\n        assert_split_at_fraction_binary(source, expected_items, num_items_to_read_before_split, middle_fraction, middle_result, right_fraction, right_result, stats)", "docstring": "Performs dynamic work rebalancing for fractions within a given range.\n\nAsserts that given a start position, a source can be split at every\ninteresting fraction (halfway between two fractions that differ by at\nleast one item) and the results are consistent if a split succeeds.\n\nArgs:\nsource: source to perform dynamic splitting on.\nexpected_items: total set of items expected when reading the source.\nnum_items_to_read_before_split: number of items to read before splitting.\nleft_fraction: left fraction for binary splitting.\nleft_result: result received by splitting at left fraction.\nright_fraction: right fraction for binary splitting.\nright_result: result received by splitting at right fraction.\nstats: a ``SplitFractionStatistics`` for storing results.", "source": "github-repos"}
{"code": "def protein_only_and_noH(self, keep_ligands=None, force_rerun=False):\n        \n        log.debug('{}: running protein receptor isolation...'.format(self.id))\n\n        if not self.dockprep_path:\n            return ValueError('Please run dockprep')\n\n        receptor_mol2 = op.join(self.dock_dir, '{}_receptor.mol2'.format(self.id))\n        receptor_noh = op.join(self.dock_dir, '{}_receptor_noH.pdb'.format(self.id))\n\n        prly_com = op.join(self.dock_dir, \"prly.com\")\n\n        if ssbio.utils.force_rerun(flag=force_rerun, outfile=receptor_noh):\n            with open(prly_com, \"w\") as f:\n                f.write('open {}\\n'.format(self.dockprep_path))\n\n                keep_str = 'delete ~protein'\n                if keep_ligands:\n                    keep_ligands = ssbio.utils.force_list(keep_ligands)\n                    for res in keep_ligands:\n                        keep_str += ' & ~:{} '.format(res)\n                keep_str = keep_str.strip() + '\\n'\n                f.write(keep_str)\n\n                f.write('write format mol2 0 {}\\n'.format(receptor_mol2))\n                f.write('delete element.H\\n')\n                f.write('write format pdb 0 {}\\n'.format(receptor_noh))\n\n            cmd = 'chimera --nogui {}'.format(prly_com)\n            os.system(cmd)\n            os.remove(prly_com)\n\n        if ssbio.utils.is_non_zero_file(receptor_mol2) and ssbio.utils.is_non_zero_file(receptor_noh):\n            self.receptormol2_path = receptor_mol2\n            self.receptorpdb_path = receptor_noh\n            log.debug('{}: successful receptor isolation (mol2)'.format(self.receptormol2_path))\n            log.debug('{}: successful receptor isolation (pdb)'.format(self.receptorpdb_path))\n        else:\n            log.critical('{}: protein_only_and_noH failed to run on dockprep file'.format(self.dockprep_path))", "docstring": "Isolate the receptor by stripping everything except protein and specified ligands.\n\nArgs:\nkeep_ligands (str, list): Ligand(s) to keep in PDB file\nforce_rerun (bool): If method should be rerun even if output file exists", "source": "juraj-google-style"}
{"code": "def __init__(self, mu, sigma, output_shape):\n        \n        self.__mu = mu\n        self.__sigma = sigma\n        self.__output_shape = output_shape", "docstring": "Init.\n\nArgs:\nmu:             `float` or `array_like of floats`.\nMean (`centre`) of the distribution.\n\nsigma:          `float` or `array_like of floats`.\nStandard deviation (spread or `width`) of the distribution.\n\noutput_shape:   Output shape.\nthe shape is `(batch size, d1, d2, d3, ...)`.", "source": "juraj-google-style"}
{"code": "def list_objects(self, path='', relative=False, first_level=False,\n                     max_request_entries=None):\n        \n        entries = 0\n        max_request_entries_arg = None\n\n        if not relative:\n            path = self.relpath(path)\n\n        \n        if not path:\n            locators = self._list_locators()\n\n            \n            if first_level:\n                for locator in locators:\n\n                    entries += 1\n                    yield locator\n                    if entries == max_request_entries:\n                        return\n                return\n\n            \n            for loc_path, loc_header in locators:\n\n                \n                loc_path = loc_path.strip('/')\n\n                entries += 1\n                yield loc_path, loc_header\n                if entries == max_request_entries:\n                    return\n\n                \n                if max_request_entries is not None:\n                    max_request_entries_arg = max_request_entries - entries\n                try:\n                    for obj_path, obj_header in self._list_objects(\n                            self.get_client_kwargs(loc_path), '',\n                            max_request_entries_arg):\n\n                        entries += 1\n                        yield ('/'.join((loc_path, obj_path.lstrip('/'))),\n                               obj_header)\n                        if entries == max_request_entries:\n                            return\n\n                except ObjectPermissionError:\n                    \n                    continue\n            return\n\n        \n        locator, path = self.split_locator(path)\n\n        if first_level:\n            seen = set()\n\n        if max_request_entries is not None:\n            max_request_entries_arg = max_request_entries - entries\n\n        for obj_path, header in self._list_objects(\n                self.get_client_kwargs(locator), path, max_request_entries_arg):\n\n            if path:\n                try:\n                    obj_path = obj_path.split(path, 1)[1]\n                except IndexError:\n                    \n                    continue\n            obj_path = obj_path.lstrip('/')\n\n            \n            if not obj_path:\n                continue\n\n            \n            if first_level:\n                \n                try:\n                    obj_path, _ = obj_path.strip('/').split('/', 1)\n                    obj_path += '/'\n\n                    \n                    \n                    \n                    header = dict()\n\n                \n                except ValueError:\n                    pass\n\n                if obj_path not in seen:\n                    entries += 1\n                    yield obj_path, header\n                    if entries == max_request_entries:\n                        return\n                    seen.add(obj_path)\n\n            \n            else:\n                entries += 1\n                yield obj_path, header\n                if entries == max_request_entries:\n                    return", "docstring": "List objects.\n\nArgs:\npath (str): Path or URL.\nrelative (bool): Path is relative to current root.\nfirst_level (bool): It True, returns only first level objects.\nElse, returns full tree.\nmax_request_entries (int): If specified, maximum entries returned\nby request.\n\nReturns:\ngenerator of tuple: object name str, object header dict", "source": "juraj-google-style"}
{"code": "def configure(cls, api_token,\n                  api_url=\"https:\n                  poll_interval=5, skip_ssl_cert_check=False, cloud_name=\"AWS\"):\n        \n\n        cls._auth = QuboleAuth(api_token)\n        cls.api_token = api_token\n        cls.version = version\n        cls.baseurl = api_url\n        if poll_interval < Qubole.MIN_POLL_INTERVAL:\n            log.warn(\"Poll interval cannot be less than %s seconds. Setting it to %s seconds.\\n\" % (Qubole.MIN_POLL_INTERVAL, Qubole.MIN_POLL_INTERVAL))\n            cls.poll_interval = Qubole.MIN_POLL_INTERVAL\n        else:\n            cls.poll_interval = poll_interval\n        cls.skip_ssl_cert_check = skip_ssl_cert_check\n        cls.cloud_name = cloud_name.lower()\n        cls.cached_agent = None", "docstring": "Set parameters governing interaction with QDS\n\nArgs:\n`api_token`: authorization token for QDS. required\n\n`api_url`: the base URL for QDS API. configurable for testing only\n\n`version`: QDS REST api version. Will be used throughout unless overridden in Qubole.agent(..)\n\n`poll_interval`: interval in secs when polling QDS for events", "source": "juraj-google-style"}
{"code": "def list_directory_v2(path):\n    if not is_directory(path):\n        raise errors.NotFoundError(node_def=None, op=None, message='Could not find directory {}'.format(path))\n    return [compat.as_str_any(filename) for filename in _pywrap_file_io.GetChildren(compat.path_to_bytes(path))]", "docstring": "Returns a list of entries contained within a directory.\n\nThe list is in arbitrary order. It does not contain the special entries \".\"\nand \"..\".\n\nArgs:\npath: string, path to a directory\n\nReturns:\n[filename1, filename2, ... filenameN] as strings\n\nRaises:\nerrors.NotFoundError if directory doesn't exist", "source": "github-repos"}
{"code": "def load_tf_shard(model, model_layer_map, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):\n    saved_weight_names_set = set()\n    saved_weights = {}\n    mismatched_keys = set()\n    unexpected_keys = set()\n    try:\n        with h5py.File(resolved_archive_file, 'r') as sharded_checkpoint_file:\n            saved_h5_model_layers_name = set(load_attributes_from_hdf5_group(sharded_checkpoint_file, 'layer_names'))\n            weight_value_tuples = []\n            for layer_name in saved_h5_model_layers_name:\n                h5_layer_object = sharded_checkpoint_file[layer_name]\n                saved_weights[layer_name] = np.asarray(h5_layer_object)\n                saved_weight_names_set.add(layer_name)\n                if layer_name not in model_layer_map:\n                    unexpected_keys.add(layer_name)\n                else:\n                    symbolic_weight = model.weights[model_layer_map[layer_name]]\n                    saved_weight_value = saved_weights[layer_name]\n                    if saved_weight_value is not None:\n                        if K.int_shape(symbolic_weight) != saved_weight_value.shape:\n                            try:\n                                array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight))\n                            except ValueError as e:\n                                if ignore_mismatched_sizes:\n                                    mismatched_keys.add((layer_name, saved_weight_value.shape, K.int_shape(symbolic_weight)))\n                                    continue\n                                else:\n                                    raise e\n                        else:\n                            array = saved_weight_value\n                    weight_value_tuples.append((symbolic_weight, array))\n        K.batch_set_value(weight_value_tuples)\n        return (saved_weight_names_set, unexpected_keys, mismatched_keys)\n    except Exception as e:\n        try:\n            with open(resolved_archive_file) as f:\n                if f.read().startswith('version'):\n                    raise OSError('You seem to have cloned a repository without having git-lfs installed. Please install git-lfs and run `git lfs install` followed by `git lfs pull` in the folder you cloned.')\n                else:\n                    raise ValueError(f'Unable to locate the file {resolved_archive_file} which is necessary to load this pretrained model. Make sure you have saved the model properly.') from e\n        except (UnicodeDecodeError, ValueError):\n            raise OSError(f\"Unable to load weights from TF checkpoint file for '{resolved_archive_file}' at '{resolved_archive_file}'. If you tried to load a TF model from a sharded checkpoint, you should try converting the model by loading it in pytorch and saving it locally. A conversion script should be released soon.\")", "docstring": "Loads a shard from a sharded checkpoint file. Can be either H5 or Safetensors.\nHandles missing keys and unexpected keys.\n\nArgs:\nmodel (`keras.models.Model`): Model in which the weights are loaded\nmodel_layer_map (`Dict`): A dictionary mapping the layer name to the index of the layer in the model.\nresolved_archive_file (`str`): Path to the checkpoint file from which the weights will be loaded\nignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether to ignore the mismatched keys\n\nReturns:\n`keras.models.Model`: Three lists, one for the layers that were found and successfully restored (from the\nshard file), one for the mismatched layers, and another one for the unexpected layers.", "source": "github-repos"}
{"code": "def __init__(self, key, attributes):\n        \n        \n        self.key = key\n        self.attributes = attributes", "docstring": "Object initialization\n\nArgs:\nkey: String name of an attributes key that represents the unique identify of the request\nattributes: Dictionary whose keys match the string values of the request attribute's names and values correspond the the request attribute values", "source": "juraj-google-style"}
{"code": "def fix_speech_decoder_output(self, speech_ids: torch.LongTensor) -> torch.LongTensor:\n    decoder_fixing_codes = self.config.decoder_config.decoder_fixing_codes\n    speech_ids = speech_ids[:, 1:]\n    stop_token_indices = torch.where(speech_ids == self.speech_decoder_model.config.eos_token_id, 1, 0)\n    speech_ids = torch.masked_fill(speech_ids, mask=stop_token_indices.bool(), value=decoder_fixing_codes[0])\n    for i, each_seq_stop_token_index in enumerate(stop_token_indices):\n        if each_seq_stop_token_index.sum() == 0:\n            continue\n        stm = each_seq_stop_token_index.argmax()\n        speech_ids[i, stm:] = decoder_fixing_codes[0]\n        if stm - 3 < speech_ids.shape[1]:\n            speech_ids[i, -3:] = torch.tensor([decoder_fixing_codes[1:]], device=speech_ids.device, dtype=torch.long)\n    return speech_ids", "docstring": "This method modifies the output of the decoder model, such as replacing the `eos_token_id` and changing the\nlast few tokens of each sequence.\n\nArgs:\nspeech_ids (`torch.LongTensor`):\nThis refers to the output of the decoder model.", "source": "github-repos"}
{"code": "def get_num_chunks(length, chunksize):\n    n_chunks = int(math.ceil((length / chunksize)))\n    return n_chunks", "docstring": "r\"\"\"\nReturns the number of chunks that a list will be split into given a\nchunksize.\n\nArgs:\nlength (int):\nchunksize (int):\n\nReturns:\nint: n_chunks\n\nCommandLine:\npython -m utool.util_progress --exec-get_num_chunks:0\n\nExample0:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_progress import *  # NOQA\n>>> length = 2000\n>>> chunksize = 256\n>>> n_chunks = get_num_chunks(length, chunksize)\n>>> result = ('n_chunks = %s' % (six.text_type(n_chunks),))\n>>> print(result)\nn_chunks = 8", "source": "codesearchnet"}
{"code": "def list_bindings(site):\n    ret = dict()\n    sites = list_sites()\n    if (site not in sites):\n        log.warning('Site not found: %s', site)\n        return ret\n    ret = sites[site]['bindings']\n    if (not ret):\n        log.warning('No bindings found for site: %s', site)\n    return ret", "docstring": "Get all configured IIS bindings for the specified site.\n\nArgs:\nsite (str): The name if the IIS Site\n\nReturns:\ndict: A dictionary of the binding names and properties.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.list_bindings site", "source": "codesearchnet"}
{"code": "def deserialize_report(self, serialized):\n        \n\n        type_map = self.known_formats\n\n        if serialized['report_format'] not in type_map:\n            raise ArgumentError(\"Unknown report format in DeserializeReport\", format=serialized['report_format'])\n\n        report = type_map[serialized['report_format']](serialized['encoded_report'])\n        report.received_time = serialized['received_time']\n\n        return report", "docstring": "Deserialize a report that has been serialized by calling report.serialize()\n\nArgs:\nserialized (dict): A serialized report object", "source": "juraj-google-style"}
{"code": "def register_piece(self, from_address, to_address, hash, password, min_confirmations=6, sync=False, ownership=True):\n    (file_hash, file_hash_metadata) = hash\n    (path, from_address) = from_address\n    verb = Spoolverb()\n    unsigned_tx = self.simple_spool_transaction(from_address, [file_hash, file_hash_metadata, to_address], op_return=verb.piece, min_confirmations=min_confirmations)\n    signed_tx = self._t.sign_transaction(unsigned_tx, password)\n    txid = self._t.push(signed_tx)\n    return txid", "docstring": "Register a piece\n\nArgs:\nfrom_address (Tuple[str]): Federation address. All register transactions\noriginate from the the Federation wallet\nto_address (str): Address registering the edition\nhash (Tuple[str]): Hash of the piece. (file_hash, file_hash_metadata)\npassword (str): Federation wallet password. For signing the transaction\nedition_num (int): The number of the edition to register. User\nedition_num=0 to register the master edition\nmin_confirmations (int): Override the number of confirmations when\nchosing the inputs of the transaction. Defaults to 6\nsync (bool): Perform the transaction in synchronous mode, the call to the\nfunction will block until there is at least on confirmation on\nthe blockchain. Defaults to False\nownership (bool): Check ownsership in the blockchain before pushing the\ntransaction. Defaults to True\n\nReturns:\nstr: transaction id", "source": "codesearchnet"}
{"code": "def is_broadcast_compatible(shape_x, shape_y):\n    if shape_x.ndims is None or shape_y.ndims is None:\n        return False\n    return _broadcast_shape_helper(shape_x, shape_y) is not None", "docstring": "Returns True if `shape_x` and `shape_y` are broadcast compatible.\n\nArgs:\nshape_x: A `TensorShape`\nshape_y: A `TensorShape`\n\nReturns:\nTrue if a shape exists that both `shape_x` and `shape_y` can be broadcasted\nto.  False otherwise.", "source": "github-repos"}
{"code": "def env(cls, separator=None, match=None, whitelist=None, parse_values=None, to_lower=None, convert_underscores=None):\n    cls.__hierarchy.append(env.Env(separator, match, whitelist, parse_values, to_lower, convert_underscores))", "docstring": "Set environment variables as a source.\n\nBy default all environment variables available to the process are used.\nThis can be narrowed by the args.\n\nArgs:\nseparator: Keys are split along this character, the resulting\nsplits are considered nested values.\nmatch: Regular expression for key matching. Keys matching the\nexpression are considered whitelisted.\nwhitelist: Only use environment variables that are listed in this\nlist.\nparse_values: Try to parse all variable for well-known types.\nto_lower: Convert all variable names to lower case.\nconvert_underscores: Convert all underscores in the name to dashes,\nthis takes place after separation via the separator option.", "source": "codesearchnet"}
{"code": "def alloc_buffer(self, length):\n        \n\n        buf = Buffer(sum(len(v) for v in six.iterkeys(self.data)) + sum(v.length for v in self.buffers), length)\n        self.buffers.append(buf)\n        return buf", "docstring": "Allocate a buffer (a range of uninitialized memory).\n\nArguments:\nlength(int): The length of the buffer to allocate.\n\nReturns:\n~pwnypack.types.Buffer: The object used to address this buffer.", "source": "juraj-google-style"}
{"code": "def end_container(self, header_buf):\n    if (not self.__container_nodes):\n        raise ValueError('Attempted to end container with none active.')\n    self.__container_node.add_leaf(_Node(header_buf))\n    self.__container_node = self.__container_nodes.pop()\n    parent_container_length = self.__container_lengths.pop()\n    self.current_container_length = ((parent_container_length + self.current_container_length) + len(header_buf))", "docstring": "Add a node containing the container's header to the current subtree.\n\nThis node will be added as the leftmost leaf of the subtree that was\nstarted by the matching call to start_container.\n\nArgs:\nheader_buf (bytearray): bytearray containing the container header.", "source": "codesearchnet"}
{"code": "def delete_permissions(self, grp_name, resource):\n        \n        self.service.delete_permissions(\n            grp_name, resource, self.url_prefix, self.auth, self.session, self.session_send_opts)", "docstring": "Removes permissions from the group for the given resource.\n\nArgs:\ngrp_name (string): Name of group.\nresource (intern.resource.boss.BossResource): Identifies which data model object to operate on.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def received(self, limit=None):\n        \n        return list(itertools.islice((itertools.filterfalse(lambda x: x[1].sent, self.store)), limit))[::-1]", "docstring": "Returns all the events that have been received (excluding sent events), until a limit if defined\n\nArgs:\nlimit (int, optional): the max length of the events to return (Default value = None)\n\nReturns:\nlist: a list of received events", "source": "juraj-google-style"}
{"code": "def flatten_zip_dataset(*args):\n    flattened = tf.data.Dataset.from_tensors(args[0])\n    for ex in args[1:]:\n        flattened = flattened.concatenate(tf.data.Dataset.from_tensors(ex))\n    return flattened", "docstring": "A list of examples to a dataset containing mixed examples.\n\nGiven a list of `n` dataset examples, flatten them by converting\neach element into a dataset and concatenating them to convert into a\nsingle dataset.\n\nArgs:\n*args: A list containing one example each from `n` different datasets.\n\nReturns:\nflattened: A new dataset containing the examples from the list as part\nof a single dataset.", "source": "codesearchnet"}
{"code": "def set_number_of_atoms(self, n, selected_sites=None):\n    self.number_of_atoms = n\n    self.atoms = species.Species(self.lattice.populate_sites(self.number_of_atoms, selected_sites=selected_sites))", "docstring": "Set the number of atoms for the simulation, and populate the simulation lattice.\n\nArgs:\nn (Int): Number of atoms for this simulation.\nselected_sites (:obj:(List|Set|String), optional): Selects a subset of site types to be populated with atoms. Defaults to None.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def delete_data(self, url, *args, **kwargs):\n    res = self._conn.delete(url, headers=self._prepare_headers(**kwargs))\n    if ((res.status_code == 200) or (res.status_code == 202)):\n        return True\n    else:\n        return False", "docstring": "Deletes data under provided url\n\nReturns status as boolean.\n\nArgs:\n**url**: address of file to be deleted\n\n.. versionadded:: 0.3.2\n**additional_headers**: (optional) Additional headers\nto be used with request\n\nReturns:\nBoolean. True if request was successful. False if not.", "source": "codesearchnet"}
{"code": "def write_variables(app_configs=None, out_file='', git_short=''):\n    generated = gogoutils.Generator(*gogoutils.Parser(git_short).parse_url(), formats=APP_FORMATS)\n    json_configs = {}\n    for (env, configs) in app_configs.items():\n        if (env != 'pipeline'):\n            instance_profile = generated.iam()['profile']\n            rendered_configs = json.loads(get_template('configs/configs.json.j2', env=env, app=generated.app_name(), profile=instance_profile, formats=generated))\n            json_configs[env] = dict(DeepChainMap(configs, rendered_configs))\n            region_list = configs.get('regions', rendered_configs['regions'])\n            json_configs[env]['regions'] = region_list\n            for region in region_list:\n                region_config = json_configs[env][region]\n                json_configs[env][region] = dict(DeepChainMap(region_config, rendered_configs))\n        else:\n            default_pipeline_json = json.loads(get_template('configs/pipeline.json.j2', formats=generated))\n            json_configs['pipeline'] = dict(DeepChainMap(configs, default_pipeline_json))\n    LOG.debug('Compiled configs:\\n%s', pformat(json_configs))\n    config_lines = convert_ini(json_configs)\n    with open(out_file, 'at') as jenkins_vars:\n        LOG.info('Appending variables to %s.', out_file)\n        jenkins_vars.write('\\n'.join(config_lines))\n    with open((out_file + '.exports'), 'wt') as export_vars:\n        LOG.info('Writing sourceable variables to %s.', export_vars.name)\n        export_vars.write('\\n'.join(('export {0}'.format(line) for line in config_lines)))\n    with open((out_file + '.json'), 'wt') as json_handle:\n        LOG.info('Writing JSON to %s.', json_handle.name)\n        LOG.debug('Total JSON dict:\\n%s', json_configs)\n        json.dump(json_configs, json_handle)\n    return json_configs", "docstring": "Append _application.json_ configs to _out_file_, .exports, and .json.\n\nVariables are written in INI style, e.g. UPPER_CASE=value. The .exports file\ncontains 'export' prepended to each line for easy sourcing. The .json file\nis a minified representation of the combined configurations.\n\nArgs:\napp_configs (dict): Environment configurations from _application.json_\nfiles, e.g. {'dev': {'elb': {'subnet_purpose': 'internal'}}}.\nout_file (str): Name of INI file to append to.\ngit_short (str): Short name of Git repository, e.g. forrest/core.\n\nReturns:\ndict: Configuration equivalent to the JSON output.", "source": "codesearchnet"}
{"code": "def export_constant(self, module_name: str, name: str) -> None:\n    module = sys.modules[module_name]\n    api_constants_attr = API_ATTRS[self._api_name].constants\n    api_constants_attr_v1 = API_ATTRS_V1[self._api_name].constants\n    if not hasattr(module, api_constants_attr):\n        setattr(module, api_constants_attr, [])\n    getattr(module, api_constants_attr).append((self._names, name))\n    if not hasattr(module, api_constants_attr_v1):\n        setattr(module, api_constants_attr_v1, [])\n    getattr(module, api_constants_attr_v1).append((self._names_v1, name))", "docstring": "Store export information for constants/string literals.\n\nExport information is stored in the module where constants/string literals\nare defined.\n\ne.g.\n```python\nfoo = 1\nbar = 2\ntf_export(\"consts.foo\").export_constant(__name__, 'foo')\ntf_export(\"consts.bar\").export_constant(__name__, 'bar')\n```\n\nArgs:\nmodule_name: (string) Name of the module to store constant at.\nname: (string) Current constant name.", "source": "github-repos"}
{"code": "def FirstEventTimestamp(self):\n    if (self._first_event_timestamp is not None):\n        return self._first_event_timestamp\n    with self._generator_mutex:\n        try:\n            event = next(self._generator.Load())\n            self._ProcessEvent(event)\n            return self._first_event_timestamp\n        except StopIteration:\n            raise ValueError('No event timestamp could be found')", "docstring": "Returns the timestamp in seconds of the first event.\n\nIf the first event has been loaded (either by this method or by `Reload`,\nthis returns immediately. Otherwise, it will load in the first event. Note\nthat this means that calling `Reload` will cause this to block until\n`Reload` has finished.\n\nReturns:\nThe timestamp in seconds of the first event that was loaded.\n\nRaises:\nValueError: If no events have been loaded and there were no events found\non disk.", "source": "codesearchnet"}
{"code": "def transform(self, X):\n        \n        assert np.shape(X)[0] == len(self._weights), (\n            'BlendingOptimizer: Number of models to blend its predictions and weights does not match: '\n            'n_models={}, weights_len={}'.format(np.shape(X)[0], len(self._weights)))\n        blended_predictions = np.average(np.power(X, self._power),\n                                         weights=self._weights,\n                                         axis=0) ** (1.0 / self._power)\n\n        return {'y_pred': blended_predictions}", "docstring": "Performs predictions blending using the trained weights.\n\nArgs:\nX (array-like): Predictions of different models.\nReturns: dict with blended predictions (key is 'y_pred').", "source": "juraj-google-style"}
{"code": "def execute_status(args, root_dir=None):\n    \n    status = command_factory('status')({}, root_dir=root_dir)\n    \n    if status['status'] == 'running':\n        status['status'] = Color('{autogreen}' + '{}'.format(status['status']) + '{/autogreen}')\n    elif status['status'] in ['paused']:\n        status['status'] = Color('{autoyellow}' + '{}'.format(status['status']) + '{/autoyellow}')\n\n    print('Daemon: {}\\n'.format(status['status']))\n\n    \n    data = status['data']\n    if isinstance(data, str):\n        print(data)\n    elif isinstance(data, dict):\n        \n        formatted_data = []\n        formatted_data.append(['Index', 'Status', 'Code',\n                               'Command', 'Path', 'Start', 'End'])\n        for key, entry in sorted(data.items(), key=operator.itemgetter(0)):\n            formatted_data.append(\n                [\n                    '\n                    entry['status'],\n                    '{}'.format(entry['returncode']),\n                    entry['command'],\n                    entry['path'],\n                    entry['start'],\n                    entry['end']\n                ]\n            )\n\n        \n        table = AsciiTable(formatted_data)\n        table.outer_border = False\n        table.inner_column_border = False\n\n        terminal_width = terminal_size()\n        customWidth = table.column_widths\n        \n        \n        if (reduce(lambda a, b: a+b, table.column_widths) + 10) > terminal_width[0]:\n            \n            left_space = math.floor((terminal_width[0] - customWidth[0] - customWidth[1] - customWidth[2] - customWidth[5] - customWidth[6] - 14)/2)\n\n            if customWidth[3] < left_space:\n                customWidth[4] = 2*left_space - customWidth[3]\n            elif customWidth[4] < left_space:\n                customWidth[3] = 2*left_space - customWidth[4]\n            else:\n                customWidth[3] = left_space\n                customWidth[4] = left_space\n\n        \n        for i, entry in enumerate(table.table_data):\n            for j, string in enumerate(entry):\n                max_width = customWidth[j]\n                wrapped_string = '\\n'.join(wrap(string, max_width))\n                if j == 1:\n                    if wrapped_string == 'done' or wrapped_string == 'running' or wrapped_string == 'paused':\n                        wrapped_string = Color('{autogreen}' + '{}'.format(wrapped_string) + '{/autogreen}')\n                    elif wrapped_string in ['queued', 'stashed']:\n                        wrapped_string = Color('{autoyellow}' + '{}'.format(wrapped_string) + '{/autoyellow}')\n                    elif wrapped_string in ['failed', 'stopping', 'killing']:\n                        wrapped_string = Color('{autored}' + '{}'.format(wrapped_string) + '{/autored}')\n                elif j == 2:\n                    if wrapped_string == '0' and wrapped_string != 'Code':\n                        wrapped_string = Color('{autogreen}' + '{}'.format(wrapped_string) + '{/autogreen}')\n                    elif wrapped_string != '0' and wrapped_string != 'Code':\n                        wrapped_string = Color('{autored}' + '{}'.format(wrapped_string) + '{/autored}')\n\n                table.table_data[i][j] = wrapped_string\n\n        print(table.table)\n    print('')", "docstring": "Print the status of the daemon.\n\nThis function displays the current status of the daemon as well\nas the whole queue and all available information about every entry\nin the queue.\n`terminaltables` is used to format and display the queue contents.\n`colorclass` is used to color format the various items in the queue.\n\nArgs:\nroot_dir (string): The path to the root directory the daemon is running in.", "source": "juraj-google-style"}
{"code": "class MaxPooling3D(keras_layers.MaxPooling3D, base.Layer):\n\n    def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs):\n        if strides is None:\n            raise ValueError('Argument `strides` must not be None.')\n        super(MaxPooling3D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)", "docstring": "Max pooling layer for 3D inputs (e.g. volumes).\n\nArgs:\npool_size: An integer or tuple/list of 3 integers:\n(pool_depth, pool_height, pool_width)\nspecifying the size of the pooling window.\nCan be a single integer to specify the same value for\nall spatial dimensions.\nstrides: An integer or tuple/list of 3 integers,\nspecifying the strides of the pooling operation.\nCan be a single integer to specify the same value for\nall spatial dimensions.\npadding: A string. The padding method, either 'valid' or 'same'.\nCase-insensitive.\ndata_format: A string. The ordering of the dimensions in the inputs.\n`channels_last` (default) and `channels_first` are supported.\n`channels_last` corresponds to inputs with shape\n`(batch, depth, height, width, channels)` while `channels_first`\ncorresponds to inputs with shape\n`(batch, channels, depth, height, width)`.\nname: A string, the name of the layer.", "source": "github-repos"}
{"code": "def load_with_vocab(fin, vocab, dtype=np.float32):\n    \n    arr = None\n    for line in fin:\n        try:\n            token, v = _parse_line(line, dtype)\n        except (ValueError, IndexError):\n            raise ParseError(b'Parsing error in line: ' + line)\n        if token in vocab:\n            if arr is None:\n                arr = np.empty((len(vocab), len(v)), dtype=dtype)\n                arr.fill(np.NaN)\n            elif arr.shape[1] != len(v):\n                raise ParseError(b'Vector size did not match in line: ' + line)\n            arr[vocab[token], :] = np.array(v, dtype=dtype).reshape(1, -1)\n    return arr", "docstring": "Load word embedding file with predefined vocabulary\n\nArgs:\nfin (File): File object to read. File should be open for reading ascii.\nvocab (dict): Mapping from words (``bytes``) to vector indices\n(``int``).\ndtype (numpy.dtype): Element data type to use for the array.\n\nReturns:\nnumpy.ndarray: Word embedding representation vectors", "source": "juraj-google-style"}
{"code": "def of(seconds: DurationTypes) -> 'Duration':\n    if isinstance(seconds, Timestamp):\n        raise TypeError('Cannot interpret %s as Duration.' % seconds)\n    if isinstance(seconds, Duration):\n        return seconds\n    return Duration(seconds)", "docstring": "Return the Duration for the given number of seconds since Unix epoch.\n\nIf the input is already a Duration, the input itself will be returned.\n\nArgs:\nseconds: Number of seconds as int, float or Duration.\n\nReturns:\nCorresponding Duration object.", "source": "github-repos"}
{"code": "def _validate_min_version(min_version):\n    if (min_version is not None):\n        try:\n            parsed_min_version = version.StrictVersion(min_version)\n        except ValueError:\n            return ExtensionVersionResult(error_reason=ExtensionValidationError.UNPARSEABLE_REQUESTED_VERSION, requested_extension_version=min_version)\n        if (parsed_min_version > HANDLER_VERSION):\n            return ExtensionVersionResult(error_reason=ExtensionValidationError.OUTDATED_VERSION, requested_extension_version=str(parsed_min_version))\n    return ExtensionVersionResult(error_reason=None, requested_extension_version=min_version)", "docstring": "Validates the extension version matches the requested version.\n\nArgs:\nmin_version: Minimum version passed as a query param when establishing the\nconnection.\n\nReturns:\nAn ExtensionVersionResult indicating validation status. If there is a\nproblem, the error_reason field will be non-empty.", "source": "codesearchnet"}
{"code": "def optimized_trace_matmul(rho, sigma):\n    return tf.reduce_sum(tf.multiply(tf.cast(rho, tf.complex128), tf.transpose(tf.cast(sigma, tf.complex128))))", "docstring": "Returns optimized version of tf.linalg.trace(tf.matmul(rho, sigma)).\nAssuming the both have the same shape.\nArgs:\nrho: 2-D `tf.Tensor` of dtype `complex64` representing the left density\nmatrix in the trace-matmul calculation.\nsigma: 2-D `tf.Tensor` of dtype `complex64` representing the right density\nmatrix in the trace-matmul calculation.\nReturns:\nA tf.Tensor float64 trace value between the two given density matrices.", "source": "github-repos"}
{"code": "def cancel(batch_fn, cancel_fn, ops):\n    canceled_ops = []\n    error_messages = []\n    max_batch = 256\n    total_ops = len(ops)\n    for first_op in range(0, total_ops, max_batch):\n        (batch_canceled, batch_messages) = _cancel_batch(batch_fn, cancel_fn, ops[first_op:(first_op + max_batch)])\n        canceled_ops.extend(batch_canceled)\n        error_messages.extend(batch_messages)\n    return (canceled_ops, error_messages)", "docstring": "Cancel operations.\n\nArgs:\nbatch_fn: API-specific batch function.\ncancel_fn: API-specific cancel function.\nops: A list of operations to cancel.\n\nReturns:\nA list of operations canceled and a list of error messages.", "source": "codesearchnet"}
{"code": "def hard_shrink(x, threshold=0.5):\n    return ops.hard_shrink(x, threshold=threshold)", "docstring": "Hard Shrink activation function.\n\nIt is defined as:\n\n`hard_shrink(x) = x` if `|x| > threshold`,\n`hard_shrink(x) = 0` otherwise.\n\nArgs:\nx: Input tensor.\nthreshold: Threshold value. Defaults to 0.5.", "source": "github-repos"}
{"code": "def refactor_string(self, data, name):\n        \n        features = _detect_future_features(data)\n        if \"print_function\" in features:\n            self.driver.grammar = pygram.python_grammar_no_print_statement\n        try:\n            tree = self.driver.parse_string(data)\n        except Exception as err:\n            self.log_error(\"Can't parse %s: %s: %s\",\n                           name, err.__class__.__name__, err)\n            return\n        finally:\n            self.driver.grammar = self.grammar\n        tree.future_features = features\n        self.log_debug(\"Refactoring %s\", name)\n        self.refactor_tree(tree, name)\n        return tree", "docstring": "Refactor a given input string.\n\nArgs:\ndata: a string holding the code to be refactored.\nname: a human-readable name for use in error/log messages.\n\nReturns:\nAn AST corresponding to the refactored input stream; None if\nthere were errors during the parse.", "source": "juraj-google-style"}
{"code": "def do_not_convert(func=None):\n    if func is None:\n        return do_not_convert\n\n    def wrapper(*args, **kwargs):\n        with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED):\n            return func(*args, **kwargs)\n    if inspect.isfunction(func) or inspect.ismethod(func):\n        wrapper = functools.update_wrapper(wrapper, func)\n    return autograph_artifact(wrapper)", "docstring": "Decorator that suppresses the conversion of a function.\n\nArgs:\nfunc: function to decorate.\n\nReturns:\nIf `func` is not None, returns a `Callable` which is equivalent to\n`func`, but is not converted by AutoGraph.\nIf `func` is None, returns a decorator that, when invoked with a\nsingle `func` argument, returns a `Callable` equivalent to the\nabove case.", "source": "github-repos"}
{"code": "def RebuildHttpConnections(http):\n    \n    if getattr(http, 'connections', None):\n        for conn_key in list(http.connections.keys()):\n            if ':' in conn_key:\n                del http.connections[conn_key]", "docstring": "Rebuilds all http connections in the httplib2.Http instance.\n\nhttplib2 overloads the map in http.connections to contain two different\ntypes of values:\n{ scheme string:  connection class } and\n{ scheme + authority string : actual http connection }\nHere we remove all of the entries for actual connections so that on the\nnext request httplib2 will rebuild them from the connection types.\n\nArgs:\nhttp: An httplib2.Http instance.", "source": "juraj-google-style"}
{"code": "def _to_dict(self, include=None, exclude=None):\n    if ((include is not None) and (not isinstance(include, (list, tuple, set, frozenset)))):\n        raise TypeError('include should be a list, tuple or set')\n    if ((exclude is not None) and (not isinstance(exclude, (list, tuple, set, frozenset)))):\n        raise TypeError('exclude should be a list, tuple or set')\n    values = {}\n    for prop in self._properties.itervalues():\n        name = prop._code_name\n        if ((include is not None) and (name not in include)):\n            continue\n        if ((exclude is not None) and (name in exclude)):\n            continue\n        try:\n            values[name] = prop._get_for_dict(self)\n        except UnprojectedPropertyError:\n            pass\n    return values", "docstring": "Return a dict containing the entity's property values.\n\nArgs:\ninclude: Optional set of property names to include, default all.\nexclude: Optional set of property names to skip, default none.\nA name contained in both include and exclude is excluded.", "source": "codesearchnet"}
{"code": "def get_renderer(option: Optional[str]=None) -> Type[PipelineGraphRenderer]:\n    if option is None:\n        if os.name == 'nt':\n            exists = subprocess.call(['where', 'dot.exe']) == 0\n        else:\n            exists = subprocess.call(['which', 'dot']) == 0\n        if exists:\n            option = 'graph'\n        else:\n            option = 'text'\n    renderer = [r for r in PipelineGraphRenderer.get_all_subclasses() if option == r.option()]\n    if len(renderer) == 0:\n        raise ValueError()\n    elif len(renderer) == 1:\n        return renderer[0]()\n    else:\n        raise ValueError('Found more than one renderer for option: %s', option)", "docstring": "Get an instance of PipelineGraphRenderer given rendering option.\n\nArgs:\noption: (str) the rendering option.\n\nReturns:\n(PipelineGraphRenderer)", "source": "github-repos"}
{"code": "def plot_predictions_histogram(Y_ph, Y, title=None):\n    \n    labels = list(set(Y).union(set(Y_ph)))\n    edges = [x - 0.5 for x in range(min(labels), max(labels) + 2)]\n\n    plt.hist([Y_ph, Y], bins=edges, label=[\"Predicted\", \"Gold\"])\n    ax = plt.gca()\n    ax.set_xticks(labels)\n    plt.xlabel(\"Label\")\n    plt.ylabel(\"\n    plt.legend(loc=\"upper right\")\n    if isinstance(title, str):\n        plt.title(title)\n    plt.show()", "docstring": "Plot a histogram comparing int predictions vs true labels by class\n\nArgs:\nY_ph: An [n] or [n, 1] np.ndarray of predicted int labels\nY: An [n] or [n, 1] np.ndarray of gold labels", "source": "juraj-google-style"}
{"code": "def validate(self):\n    missing = self.missing_property_names()\n    if (len(missing) > 0):\n        raise validators.ValidationError(\"'{0}' are required attributes for {1}\".format(missing, self.__class__.__name__))\n    for (prop, val) in six.iteritems(self._properties):\n        if (val is None):\n            continue\n        if isinstance(val, ProtocolBase):\n            val.validate()\n        elif (getattr(val, 'isLiteralClass', None) is True):\n            val.validate()\n        elif isinstance(val, list):\n            for subval in val:\n                subval.validate()\n        else:\n            setattr(self, prop, val)\n    return True", "docstring": "Applies all defined validation to the current\nstate of the object, and raises an error if\nthey are not all met.\n\nRaises:\nValidationError: if validations do not pass", "source": "codesearchnet"}
{"code": "def get_concept(self, conceptId, lang='en'):\n        \n        url = urljoin(self.concept_service + '/', conceptId)\n\n        res, status_code = self.get(url, params={'lang': lang})\n\n        if status_code != 200:\n            logger.debug('Fetch concept failed.')\n\n        return self.decode(res), status_code", "docstring": "Fetch the concept from the Knowledge base\n\nArgs:\nid (str): The concept id to be fetched, it can be Wikipedia\npage id or Wikiedata id.\n\nReturns:\ndict, int: A dict containing the concept information; an integer\nrepresenting the response code.", "source": "juraj-google-style"}
{"code": "def register_hooked(self,\n                        hooks,  \n                        func,  \n                        args_gen=None  \n                        ):\n        \n        \n        if self.hooked is None:\n            self.hooked = {}\n        if args_gen is None:\n            args_gen = getattr(func, \"call_types\", {}).keys\n        if not isinstance(hooks, Sequence):\n            hooks = [hooks]\n        for hook_cls in hooks:\n            self.hooked[hook_cls] = (func, args_gen)", "docstring": "Register func to be run when any of the hooks are run by parent\n\nArgs:\nhooks: A Hook class or list of Hook classes of interest\nfunc: The callable that should be run on that Hook\nargs_gen: Optionally specify the argument names that should be\npassed to func. If not given then use func.call_types.keys", "source": "juraj-google-style"}
{"code": "def register_subcommand(parser: ArgumentParser):\n    train_parser = parser.add_parser('convert', help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.')\n    train_parser.add_argument('--model_type', type=str, required=True, help=\"Model's type.\")\n    train_parser.add_argument('--tf_checkpoint', type=str, required=True, help='TensorFlow checkpoint path or folder.')\n    train_parser.add_argument('--pytorch_dump_output', type=str, required=True, help='Path to the PyTorch saved model output.')\n    train_parser.add_argument('--config', type=str, default='', help='Configuration file path or folder.')\n    train_parser.add_argument('--finetuning_task_name', type=str, default=None, help='Optional fine-tuning task name if the TF model was a finetuned model.')\n    train_parser.set_defaults(func=convert_command_factory)", "docstring": "Register this command to argparse so it's available for the transformer-cli\n\nArgs:\nparser: Root parser to register command-specific arguments", "source": "github-repos"}
{"code": "def _create_array(self, arr: np.ndarray) -> int:\n        \n        if not isinstance(arr, np.ndarray):\n            raise ValueError('Array is not a numpy ndarray.')\n        try:\n            c_arr = np.ctypeslib.as_ctypes(arr)\n        except (KeyError, NotImplementedError):\n            raise ValueError(\n                'Array has unsupported dtype {}.'.format(arr.dtype))\n\n        \n        raw_arr = RawArray(c_arr._type_, c_arr)\n\n        with self._lock:\n            if self._count >= len(self._arrays):\n                self._arrays += len(self._arrays) * [None]\n\n            self._get_next_free()\n\n            \n            \n            \n            self._arrays[self._current] = (raw_arr, arr.shape)\n\n            self._count += 1\n\n        return self._current", "docstring": "Returns the handle of a RawArray created from the given numpy array.\n\nArgs:\narr: A numpy ndarray.\n\nReturns:\nThe handle (int) of the array.\n\nRaises:\nValueError: if arr is not a ndarray or of an unsupported dtype. If\nthe array is of an unsupported type, using a view of the array to\nanother dtype and then converting on get is often a work around.", "source": "juraj-google-style"}
{"code": "def evaluate_bound(distribution, x_data, parameters=None, cache=None):\n    assert (len(x_data) == len(distribution))\n    assert (len(x_data.shape) == 2)\n    cache = (cache if (cache is not None) else {})\n    parameters = load_parameters(distribution, '_bnd', parameters=parameters, cache=cache)\n    out = numpy.zeros(((2,) + x_data.shape))\n    (lower, upper) = distribution._bnd(x_data.copy(), **parameters)\n    out.T[(:, :, 0)] = numpy.asfarray(lower).T\n    out.T[(:, :, 1)] = numpy.asfarray(upper).T\n    cache[distribution] = out\n    return out", "docstring": "Evaluate lower and upper bounds.\n\nArgs:\ndistribution (Dist):\nDistribution to evaluate.\nx_data (numpy.ndarray):\nLocations for where evaluate bounds at. Relevant in the case of\nmultivariate distributions where the bounds are affected by the\noutput of other distributions.\nparameters (:py:data:typing.Any):\nCollection of parameters to override the default ones in the\ndistribution.\ncache (:py:data:typing.Any):\nA collection of previous calculations in case the same distribution\nturns up on more than one occasion.\n\nReturns:\nThe lower and upper bounds of ``distribution`` at location\n``x_data`` using parameters ``parameters``.", "source": "codesearchnet"}
{"code": "def _GetTimeValues(self, number_of_seconds):\n    number_of_seconds = int(number_of_seconds)\n    (number_of_minutes, seconds) = divmod(number_of_seconds, 60)\n    (number_of_hours, minutes) = divmod(number_of_minutes, 60)\n    (number_of_days, hours) = divmod(number_of_hours, 24)\n    return (number_of_days, hours, minutes, seconds)", "docstring": "Determines time values.\n\nArgs:\nnumber_of_seconds (int|decimal.Decimal): number of seconds.\n\nReturns:\ntuple[int, int, int, int]: days, hours, minutes, seconds.", "source": "codesearchnet"}
{"code": "def ReadSerialized(cls, json_string):\n    if json_string:\n        json_dict = json.loads(json_string)\n        return cls.ReadSerializedDict(json_dict)\n    return None", "docstring": "Reads an attribute container from serialized form.\n\nArgs:\njson_string (str): JSON serialized attribute container.\n\nReturns:\nAttributeContainer: attribute container or None.", "source": "codesearchnet"}
{"code": "def from_file(filename, use_cores=True, thresh=0.0001):\n    with zopen(filename, 'rt') as f:\n        return Xr.from_string(f.read(), use_cores=use_cores, thresh=thresh)", "docstring": "Reads an xr-formatted file to create an Xr object.\n\nArgs:\nfilename (str): name of file to read from.\nuse_cores (bool): use core positions and discard shell\npositions if set to True (default).  Otherwise,\nuse shell positions and discard core positions.\nthresh (float): relative threshold for consistency check\nbetween cell parameters (lengths and angles) from\nheader information and cell vectors, respectively.\n\nReturns:\nxr (Xr): Xr object corresponding to the input\nfile.", "source": "codesearchnet"}
{"code": "def Deserialize(self, reader):\n        \n        self.__hash = None\n        self.DeserializeUnsigned(reader)\n        byt = reader.ReadByte()\n        if int(byt) != 1:\n            raise Exception('Incorrect format')\n\n        witness = Witness()\n        witness.Deserialize(reader)\n        self.Script = witness", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def from_json(cls, data):\n    assert ('name' in data), 'Required keyword \"name\" is missing!'\n    assert ('data_type' in data), 'Required keyword \"data_type\" is missing!'\n    if (cls._type_enumeration is None):\n        cls._type_enumeration = _DataTypeEnumeration(import_modules=False)\n    if (data['data_type'] == 'GenericType'):\n        assert ('base_unit' in data), 'Keyword \"base_unit\" is missing and is required for GenericType.'\n        return cls._type_enumeration._GENERICTYPE(data['name'], data['base_unit'])\n    elif (data['data_type'] in cls._type_enumeration._TYPES):\n        clss = cls._type_enumeration._TYPES[data['data_type']]\n        if (data['data_type'] == data['name'].title().replace(' ', '')):\n            return clss()\n        else:\n            instance = clss()\n            instance._name = data['name']\n            return instance\n    else:\n        raise ValueError('Data Type {} could not be recognized'.format(data['data_type']))", "docstring": "Create a data type from a dictionary.\n\nArgs:\ndata: Data as a dictionary.\n{\n\"name\": data type name of the data type as a string\n\"data_type\": the class name of the data type as a string\n\"base_unit\": the base unit of the data type\n}", "source": "codesearchnet"}
{"code": "def json_dict(json_data):\n    \n    if isinstance(json_data, dict):\n        return json_data\n    elif isinstance(json_data, basestring):\n        return json.loads(json_data, object_hook=OrderedDict)\n    else:\n        raise TypeError(\n            \"'json_data' must be a dictionary or valid JSON string; \"\n            \"received: {!r}\".format(json_data)\n        )", "docstring": "Given a dictionary or JSON string; return a dictionary.\n\nArgs:\njson_data(dict, str): Input JSON object.\n\nReturns:\nA Python dictionary with the contents of the JSON object.\n\nRaises:\nTypeError: If the input object is not a dictionary or string.", "source": "juraj-google-style"}
{"code": "def _get_individual_image(self, run, tag, index, sample):\n    \n    if self._db_connection_provider:\n      db = self._db_connection_provider()\n      cursor = db.execute(\n          ,\n          {'run': run,\n           'tag': tag,\n           'sample': sample,\n           'index': index,\n           'dtype': tf.string.as_datatype_enum})\n      (data,) = cursor.fetchone()\n      return six.binary_type(data)\n\n    events = self._filter_by_sample(self._multiplexer.Tensors(run, tag), sample)\n    images = events[index].tensor_proto.string_val[2:]  \n    return images[sample]", "docstring": "Returns the actual image bytes for a given image.\n\nArgs:\nrun: The name of the run the image belongs to.\ntag: The name of the tag the images belongs to.\nindex: The index of the image in the current reservoir.\nsample: The zero-indexed sample of the image to retrieve (for example,\nsetting `sample` to `2` will fetch the third image sample at `step`).\n\nReturns:\nA bytestring of the raw image bytes.", "source": "juraj-google-style"}
{"code": "def submit(cls, job_config, in_xg_transaction=False):\n    cls.__validate_job_config(job_config)\n    mapper_spec = job_config._get_mapper_spec()\n    mapreduce_params = job_config._get_mr_params()\n    mapreduce_spec = model.MapreduceSpec(job_config.job_name, job_config.job_id, mapper_spec.to_json(), mapreduce_params, util._obj_to_path(job_config._hooks_cls))\n    if in_xg_transaction:\n        propagation = db.MANDATORY\n    else:\n        propagation = db.INDEPENDENT\n    state = None\n\n    @db.transactional(propagation=propagation)\n    def _txn():\n        state = cls.__create_and_save_state(job_config, mapreduce_spec)\n        cls.__add_kickoff_task(job_config, mapreduce_spec)\n        return state\n    state = _txn()\n    return cls(state)", "docstring": "Submit the job to run.\n\nArgs:\njob_config: an instance of map_job.MapJobConfig.\nin_xg_transaction: controls what transaction scope to use to start this MR\njob. If True, there has to be an already opened cross-group transaction\nscope. MR will use one entity group from it.\nIf False, MR will create an independent transaction to start the job\nregardless of any existing transaction scopes.\n\nReturns:\na Job instance representing the submitted job.", "source": "codesearchnet"}
{"code": "def filter_by_analysis_period(self, analysis_period):\n        \n        self._check_analysis_period(analysis_period)\n        analysis_period = self._get_analysis_period_subset(analysis_period)\n\n        if analysis_period.st_hour == 0 and analysis_period.end_hour == 23:\n            \n            t_s = 60 / analysis_period.timestep\n            st_ind = int((analysis_period.st_time.moy / t_s) -\n                         (self.header.analysis_period.st_time.moy / t_s))\n            end_ind = int((analysis_period.end_time.moy / t_s) -\n                          (analysis_period.st_time.moy / t_s) + st_ind + 1)\n            if end_ind > st_ind:\n                _filt_values = self._values[st_ind:end_ind]\n            else:\n                _filt_values = self._values[st_ind:] + self._values[:end_ind]\n            _filt_header = self.header.duplicate()\n            _filt_header._analysis_period = analysis_period\n            return HourlyContinuousCollection(_filt_header, _filt_values)\n        else:\n            \n            _filtered_data = self.filter_by_moys(analysis_period.moys)\n            _filtered_data.header._analysis_period = analysis_period\n            return _filtered_data", "docstring": "Filter the Data Collection based on an analysis period.\n\nArgs:\nanalysis period: A Ladybug analysis period\n\nReturn:\nA new Data Collection with filtered data", "source": "juraj-google-style"}
{"code": "def _make_concatenated_type(self, type1: _base.BaseValue, type2: _base.BaseValue | None) -> '_typing.Concatenate | None':\n    if isinstance(type2, _abstract.ParamSpec):\n        new_args = [type1, type2]\n    elif isinstance(type2, _abstract.Concatenate):\n        type2 = cast(Any, type2)\n        new_args = [type1] + type2.args + [type2.paramspec]\n    else:\n        return None\n    return _abstract.Concatenate(new_args, type1.ctx)", "docstring": "Concatenates type1 and type2 if possible.\n\nIf type2 is a ParamSpec or Concatenate object, creates a new Concatenate\nobject by adding type1 to the front.\n\nArgs:\ntype1: An abstract value.\ntype2: An abstract value or None.\n\nReturns:\nA new Concatenate object, or None if type2 cannot be concatenated to.", "source": "github-repos"}
{"code": "def list_vmss_skus(access_token, subscription_id, resource_group, vmss_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,\n                        '/skus',\n                        '?api-version=', COMP_API])\n    return do_get_next(endpoint, access_token)", "docstring": "List the VM skus available for a VM Scale Set.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvmss_name (str): Name of the virtual machine scale set.\n\nReturns:\nHTTP response. JSON body of VM skus.", "source": "juraj-google-style"}
{"code": "def update(self, value: Union[RawValue, Value],\n               raw: bool = False) -> \"InstanceNode\":\n        \n        newval = self.schema_node.from_raw(\n            value, self.json_pointer()) if raw else value\n        return self._copy(newval)", "docstring": "Update the receiver's value.\n\nArgs:\nvalue: New value.\nraw: Flag to be set if `value` is raw.\n\nReturns:\nCopy of the receiver with the updated value.", "source": "juraj-google-style"}
{"code": "def from_json_file(cls, json_file: Union[str, os.PathLike]):\n    with open(json_file, 'r', encoding='utf-8') as reader:\n        text = reader.read()\n    video_processor_dict = json.loads(text)\n    return cls(**video_processor_dict)", "docstring": "Instantiates a video processor of type [`~video_processing_utils.VideoProcessorBase`] from the path to a JSON\nfile of parameters.\n\nArgs:\njson_file (`str` or `os.PathLike`):\nPath to the JSON file containing the parameters.\n\nReturns:\nA video processor of type [`~video_processing_utils.VideoProcessorBase`]: The video_processor object\ninstantiated from that JSON file.", "source": "github-repos"}
{"code": "def indicators_from_tag(self, indicator, tag_name, filters=None, params=None):\n        \n        params = params or {}\n\n        for t in self.pivot_from_tag(indicator, tag_name, filters=filters, params=params):\n            yield t", "docstring": "Args:\nindicator:\ntag_name:\nfilters:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def fts_match(self, fts_mask, segment):\n        \n        fts_mask = set(fts_mask)\n        fts_seg = self.fts(segment)\n        if fts_seg:\n            return fts_seg <= fts_mask\n        else:\n            return None", "docstring": "Evaluates whether a set of features 'match' a segment (are a subset\nof that segment's features)\n\nArgs:\nfts_mask (list): list of (value, feature) tuples\nsegment (unicode): IPA string corresponding to segment (consonant or\nvowel)\nReturns:\nbool: None if `segment` cannot be parsed; True if the feature values\nof `fts_mask` are a subset of those for `segment`", "source": "juraj-google-style"}
{"code": "def pxbounds(self, geom, clip=False):\n        \n\n        try:\n            if isinstance(geom, dict):\n                if 'geometry' in geom:\n                    geom = shape(geom['geometry'])\n                else:\n                    geom = shape(geom)\n            elif isinstance(geom, BaseGeometry):\n                geom = shape(geom)\n            else:\n                geom = wkt.loads(geom)\n        except:\n            raise TypeError (\"Invalid geometry object\")\n\n        \n        if geom.disjoint(shape(self)):\n            raise ValueError(\"Geometry outside of image bounds\")\n        \n        (xmin, ymin, xmax, ymax) = ops.transform(self.__geo_transform__.rev, geom).bounds\n        _nbands, ysize, xsize = self.shape\n        if clip:\n            xmin = max(xmin, 0)\n            ymin = max(ymin, 0)\n            xmax = min(xmax, xsize)\n            ymax = min(ymax, ysize)\n\n        return (xmin, ymin, xmax, ymax)", "docstring": "Returns the bounds of a geometry object in pixel coordinates\n\nArgs:\ngeom: Shapely geometry object or GeoJSON as Python dictionary or WKT string\nclip (bool): Clip the bounds to the min/max extent of the image\n\nReturns:\nlist: bounds in pixels [min x, min y, max x, max y] clipped to image bounds", "source": "juraj-google-style"}
{"code": "def erfinv(x, name=\"erfinv\"):\n  \n\n  with tf.name_scope(name):\n    x = tf.convert_to_tensor(value=x, name=\"x\")\n    if dtype_util.as_numpy_dtype(x.dtype) not in [np.float32, np.float64]:\n      raise TypeError(\"x.dtype={} is not handled, see docstring for supported \"\n                      \"types.\".format(dtype_util.name(x.dtype)))\n    return ndtri((x + 1.) / 2.) / np.sqrt(2.)", "docstring": "The inverse function for erf, the error function.\n\nArgs:\nx: `Tensor` of type `float32`, `float64`.\nname: Python string. A name for the operation (default=\"erfinv\").\n\nReturns:\nx: `Tensor` with `dtype=x.dtype`.\n\nRaises:\nTypeError: if `x` is not floating-type.", "source": "juraj-google-style"}
{"code": "def depth_march_average_ground_temperature(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `depth_march_average_ground_temperature`'.format(value))\n\n        self._depth_march_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_march_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_march_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def warning_max_changed(channel, max_warnings):\n    \n\n    \n    gui = ui_embed.UI(\n        channel,\n        \"Maximum Warnings Changed\",\n        \"Users must now have {} warnings to be banned \"\n        \"(this won't ban existing users with warnings)\".format(max_warnings),\n        modulename=modulename\n    )\n\n    return gui", "docstring": "Creates an embed UI containing an error message\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\nmax_warnings (int): The new maximum warnings\n\nReturns:\nui (ui_embed.UI): The embed UI object", "source": "juraj-google-style"}
{"code": "def _construct_full_hostname(self, hostname):\n        \n        if hostname.startswith(('http:\n            return hostname\n        if ':\n            protocol, host = hostname.split(':\n            raise ValueError('Protocol %s is not supported.' % protocol)\n        return ':", "docstring": "Create a full (scheme included) hostname from the argument given.\n\nOnly HTTP and HTTP+SSL protocols are allowed.\n\nArgs:\nhostname: The hostname to use.\nReturns:\nThe full hostname.\nRaises:\nValueError: A not supported protocol is used.", "source": "juraj-google-style"}
{"code": "def benchmarks_main(true_main, argv=None):\n    if argv is None:\n        argv = sys.argv\n    found_arg = [arg for arg in argv if arg.startswith('--benchmark_filter=') or arg.startswith('-benchmark_filter=')]\n    if found_arg:\n        argv.remove(found_arg[0])\n        regex = found_arg[0].split('=')[1]\n        app.run(lambda _: _run_benchmarks(regex), argv=argv)\n    else:\n        true_main()", "docstring": "Run benchmarks as declared in argv.\n\nArgs:\ntrue_main: True main function to run if benchmarks are not requested.\nargv: the command line arguments (if None, uses sys.argv).", "source": "github-repos"}
{"code": "def getAsWkt(self, session):\n        \n        statement = .format(self.geometryColumnName,\n                               self.tableName,\n                               self.id)\n\n        result = session.execute(statement)\n\n        for row in result:\n            return row.wkt", "docstring": "Retrieve the geometry in Well Known Text format.\n\nThis method is a veneer for an SQL query that calls the ``ST_AsText()`` function on the geometry column.\n\nArgs:\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.\n\nReturns:\nstr: Well Known Text string representation of geometry.", "source": "juraj-google-style"}
{"code": "def write(name, value):\n    \n    if value is not None:\n        environ[name] = builtins.str(value)\n    elif environ.get(name):\n        del environ[name]", "docstring": "Write a raw env value.\n\nA ``None`` value clears the environment variable.\n\nArgs:\nname: The environment variable name\nvalue: The value to write", "source": "juraj-google-style"}
{"code": "def as_date(dat):\n    LOGGER.debug('as_date(%s)', dat)\n    return strict_rfc3339.timestamp_to_rfc3339_utcoffset(calendar.timegm(dat.timetuple()))", "docstring": "Return the RFC3339 UTC string representation of the given date and time.\n\nArgs:\ndat (:py:class:`datetime.date`): the object/type to be serialized.\n\nRaises:\nTypeError:\nwhen ``o`` is not an instance of ``datetime.date``.\n\nReturns:\n(str) JSON serializable type for the given object.", "source": "codesearchnet"}
{"code": "def colless(self, normalize='leaves'):\n    t_res = copy(self)\n    t_res.resolve_polytomies()\n    leaves_below = dict()\n    n = 0\n    I = 0\n    for node in t_res.traverse_postorder():\n        if node.is_leaf():\n            leaves_below[node] = 1\n            n += 1\n        else:\n            (cl, cr) = node.children\n            nl = leaves_below[cl]\n            nr = leaves_below[cr]\n            leaves_below[node] = (nl + nr)\n            I += abs((nl - nr))\n    if ((normalize is None) or (normalize is False)):\n        return I\n    elif (not isinstance(normalize, str)):\n        raise TypeError('normalize must be None or a string')\n    normalize = normalize.lower()\n    if (normalize == 'leaves'):\n        return ((2.0 * I) / ((n - 1) * (n - 2)))\n    elif (normalize == 'yule'):\n        return (((I - (n * log(n))) - (n * ((EULER_GAMMA - 1) - log(2)))) / n)\n    elif (normalize == 'pda'):\n        return (I / (n ** 1.5))\n    else:\n        raise RuntimeError(\"normalize must be None, 'leaves', 'yule', or 'pda'\")", "docstring": "Compute the Colless balance index of this ``Tree``. If the tree has polytomies, they will be randomly resolved\n\nArgs:\n``normalize`` (``str``): How to normalize the Colless index (if at all)\n\n* ``None`` to not normalize\n\n* ``\"leaves\"`` to normalize by the number of leaves\n\n* ``\"yule\"`` to normalize to the Yule model\n\n* ``\"pda\"`` to normalize to the Proportional to Distinguishable Arrangements model\n\nReturns:\n``float``: Colless index (either normalized or not)", "source": "codesearchnet"}
{"code": "def _tzinfome(tzinfo):\n    if (not isinstance(tzinfo, datetime.tzinfo)):\n        try:\n            tzinfo = pytz.timezone(tzinfo)\n            assert (tzinfo.zone in pytz.all_timezones)\n        except AttributeError:\n            raise pytz.UnknownTimeZoneError(('Unknown timezone! %s' % tzinfo))\n    return tzinfo", "docstring": "Gets a tzinfo object from a string.\n\nArgs:\ntzinfo: A string (or string like) object, or a datetime.tzinfo object.\n\nReturns:\nAn datetime.tzinfo object.\n\nRaises:\nUnknownTimeZoneError: If the timezone given can't be decoded.", "source": "codesearchnet"}
{"code": "def get_cot_artifacts(context):\n    \n    artifacts = {}\n    filepaths = filepaths_in_dir(context.config['artifact_dir'])\n    hash_alg = context.config['chain_of_trust_hash_algorithm']\n    for filepath in sorted(filepaths):\n        path = os.path.join(context.config['artifact_dir'], filepath)\n        sha = get_hash(path, hash_alg=hash_alg)\n        artifacts[filepath] = {hash_alg: sha}\n    return artifacts", "docstring": "Generate the artifact relative paths and shas for the chain of trust.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\n\nReturns:\ndict: a dictionary of {\"path/to/artifact\": {\"hash_alg\": \"...\"}, ...}", "source": "juraj-google-style"}
{"code": "def _message_to_entity(msg, modelclass):\n  \n  ent = modelclass()\n  for prop_name, prop in modelclass._properties.iteritems():\n    if prop._code_name == 'blob_':  \n      continue  \n    value = getattr(msg, prop_name)\n    if value is not None and isinstance(prop, model.StructuredProperty):\n      if prop._repeated:\n        value = [_message_to_entity(v, prop._modelclass) for v in value]\n      else:\n        value = _message_to_entity(value, prop._modelclass)\n    setattr(ent, prop_name, value)\n  return ent", "docstring": "Recursive helper for _to_base_type() to convert a message to an entity.\n\nArgs:\nmsg: A Message instance.\nmodelclass: A Model subclass.\n\nReturns:\nAn instance of modelclass.", "source": "juraj-google-style"}
{"code": "def app(self):\n    app = (self._app or current_app)\n    if (not in_app_context(app)):\n        raise RuntimeError(\"This component hasn't been initialized yet and an app context doesn't exist.\")\n    if hasattr(app, '_get_current_object'):\n        app = app._get_current_object()\n    return app", "docstring": "Internal method that will supply the app to use internally.\n\nReturns:\nflask.Flask: The app to use within the component.\n\nRaises:\nRuntimeError: This is raised if no app was provided to the\ncomponent and the method is being called outside of an\napplication context.", "source": "codesearchnet"}
{"code": "def ExpandWindowsUserEnvironmentVariables(data_string,\n                                          knowledge_base,\n                                          sid=None,\n                                          username=None):\n  r\n  win_environ_regex = re.compile(r\"%([^%]+?)%\")\n  components = []\n  offset = 0\n  for match in win_environ_regex.finditer(data_string):\n    components.append(data_string[offset:match.start()])\n    kb_user = knowledge_base.GetUser(sid=sid, username=username)\n    kb_value = None\n    if kb_user:\n      kb_value = getattr(kb_user, match.group(1).lower(), None)\n    if isinstance(kb_value, string_types) and kb_value:\n      components.append(kb_value)\n    else:\n      components.append(\"%%%s%%\" % match.group(1))\n    offset = match.end()\n\n  components.append(data_string[offset:])  \n  return \"\".join(components)", "docstring": "r\"\"\"Take a string and expand windows user environment variables based.\n\nArgs:\ndata_string: A string, e.g. \"%TEMP%\\\\LogFiles\"\nknowledge_base: A knowledgebase object.\nsid: A Windows SID for a user to expand for.\nusername: A Windows user name to expand for.\n\nReturns:\nA string with available environment variables expanded.", "source": "juraj-google-style"}
{"code": "def create(self, project_id=None):\n    if (not self.exists()):\n        if (project_id is None):\n            project_id = self._api.project_id\n        try:\n            self._info = self._api.buckets_insert(self._name, project_id=project_id)\n        except Exception as e:\n            raise e\n    return self", "docstring": "Creates the bucket.\n\nArgs:\nproject_id: the project in which to create the bucket.\nReturns:\nThe bucket.\nRaises:\nException if there was an error creating the bucket.", "source": "codesearchnet"}
{"code": "def resize_bytes(fobj, old_size, new_size, offset):\n    if (new_size < old_size):\n        delete_size = (old_size - new_size)\n        delete_at = (offset + new_size)\n        delete_bytes(fobj, delete_size, delete_at)\n    elif (new_size > old_size):\n        insert_size = (new_size - old_size)\n        insert_at = (offset + old_size)\n        insert_bytes(fobj, insert_size, insert_at)", "docstring": "Resize an area in a file adding and deleting at the end of it.\nDoes nothing if no resizing is needed.\n\nArgs:\nfobj (fileobj)\nold_size (int): The area starting at offset\nnew_size (int): The new size of the area\noffset (int): The start of the area\nRaises:\nIOError", "source": "codesearchnet"}
{"code": "def digest_content(self, rule):\n    data = OrderedDict()\n    current_key = None\n    for token in rule.content:\n        if (token.type == 'ident'):\n            name = token.value\n            if name.startswith('-'):\n                name = name[1:]\n            current_key = name\n            data[current_key] = None\n        if (token.type == 'string'):\n            data[current_key] = token.value\n    return data", "docstring": "Walk on rule content tokens to return a dict of properties.\n\nThis is pretty naive and will choke/fail on everything that is more\nevolved than simple ``ident(string):value(string)``\n\nArguments:\nrule (tinycss2.ast.QualifiedRule): Qualified rule object as\nreturned by  tinycss2.\n\nReturns:\ndict: Dictionnary of retrieved variables and properties.", "source": "codesearchnet"}
{"code": "def __init__(self, project: str=None, retry: Retry=None, timeout: float=120, metadata: Sequence[Tuple[str, str]]=(), catalog_name: str='default_catalog', event_store: str='default_event_store', placement_id: str=None):\n    self.project = project\n    self.retry = retry\n    self.timeout = timeout\n    self.metadata = metadata\n    self.placement_id = placement_id\n    self.catalog_name = catalog_name\n    self.event_store = event_store\n    if placement_id is None:\n        raise ValueError('placement_id must be specified')\n    else:\n        self.placement_id = placement_id", "docstring": "Initializes a :class:`PredictUserEvent` transform.\n\nArgs:\nproject (str): Optional. GCP project name in which the catalog\ndata will be imported.\nretry: Optional. Designation of what\nerrors, if any, should be retried.\ntimeout (float): Optional. The amount of time, in seconds, to wait\nfor the request to complete.\nmetadata: Optional. Strings which\nshould be sent along with the request as metadata.\ncatalog_name (str): Optional. Name of the catalog.\nDefault: 'default_catalog'\nevent_store (str): Optional. Name of the event store.\nDefault: 'default_event_store'\nplacement_id (str): Required. ID of the recommendation engine\nplacement. This id is used to identify the set of models that\nwill be used to make the prediction.", "source": "github-repos"}
{"code": "def get_nodes(cluster):\n    \n    gk = get_api_client()\n    site = get_cluster_site(cluster)\n    return gk.sites[site].clusters[cluster].nodes.list()", "docstring": "Get all the nodes of a given cluster.\n\nArgs:\ncluster(string): uid of the cluster (e.g 'rennes')", "source": "juraj-google-style"}
{"code": "def _reduce_output(self, outputs, seq_lengths):\n        \n        batch_size = outputs.shape[0]\n        reduced = []\n        \n        for i in range(batch_size):\n            if self.lstm_reduction == \"mean\":\n                \n                \n                reduced.append(outputs[i, : seq_lengths[i], :].mean(dim=0))\n            elif self.lstm_reduction == \"max\":\n                \n                \n                reduced.append(outputs[i, : seq_lengths[i], :].max(dim=0)[0])\n            elif self.lstm_reduction == \"last\":\n                \n                \n                reduced.append(outputs[i, seq_lengths[i] - 1, :])\n            elif self.lstm_reduction == \"attention\":\n                reduced.append(self._attention(outputs[i, : seq_lengths[i], :]))\n            else:\n                msg = (\n                    f\"Did not recognize lstm kwarg 'lstm_reduction' == \"\n                    f\"{self.lstm_reduction}\"\n                )\n                raise ValueError(msg)\n        return torch.stack(reduced, dim=0)", "docstring": "Reduces the output of an LSTM step\n\nArgs:\noutputs: (torch.FloatTensor) the hidden state outputs from the\nlstm, with shape [batch_size, max_seq_length, hidden_size]", "source": "juraj-google-style"}
{"code": "def constants_from_enum(cls, module=None):\n    if (not issubclass(cls, enum.Enum)):\n        raise TypeError(\"Class '{}' is not subclass of enum.\".format(cls.__name__))\n    if (module is None):\n        module = cls.__module__\n    for value in cls:\n        constant('{}.{}'.format(module, str(value)), value)\n    return cls", "docstring": "Decorator for an enum class that generates Gin constants from values.\n\nGenerated constants have format `module.ClassName.ENUM_VALUE`. The module\nname is optional when using the constant.\n\nArgs:\ncls: Class type.\nmodule: The module to associate with the constants, to help handle naming\ncollisions. If `None`, `cls.__module__` will be used.\n\nReturns:\nClass type (identity function).\n\nRaises:\nTypeError: When applied to a non-enum class.", "source": "codesearchnet"}
{"code": "def get_encoder_config(self, encoder_config: PretrainedConfig) -> OnnxConfig:\n    return VisionEncoderDecoderEncoderOnnxConfig(encoder_config)", "docstring": "Returns ONNX encoder config for `VisionEncoderDecoder` model.\n\nArgs:\nencoder_config (`PretrainedConfig`):\nThe encoder model's configuration to use when exporting to ONNX.\n\nReturns:\n[`VisionEncoderDecoderEncoderOnnxConfig`]: An instance of the ONNX configuration object", "source": "github-repos"}
{"code": "def recipe_kv_uploader(config, recipe_name):\n    drive(config, {'auth': 'user', 'hour': [], 'copy': {'source': 'https:", "docstring": "A tool for bulk editing key value pairs for CM placements.\n\nArgs:\nrecipe_name (string) - Name of document to deploy to.", "source": "github-repos"}
{"code": "def DisplayWidth(self, buf):\n    if not isinstance(buf, str):\n        return len(buf)\n    cached = self._display_width_cache.get(buf, None)\n    if cached is not None:\n        return cached\n    width = 0\n    max_width = 0\n    i = 0\n    while i < len(buf):\n        if self._csi and buf[i:].startswith(self._csi):\n            i += self.GetControlSequenceLen(buf[i:])\n        elif buf[i] == '\\n':\n            max_width = max(width, max_width)\n            width = 0\n            i += 1\n        else:\n            width += GetCharacterDisplayWidth(buf[i])\n            i += 1\n    max_width = max(width, max_width)\n    self._display_width_cache[buf] = max_width\n    return max_width", "docstring": "Returns the display width of buf, handling unicode and ANSI controls.\n\nArgs:\nbuf: The string to count from.\n\nReturns:\nThe display width of buf, handling unicode and ANSI controls.", "source": "github-repos"}
{"code": "def _GenerateSshKey(self, key_type, key_dest):\n    \n    \n    with tempfile.NamedTemporaryFile(prefix=key_type, delete=True) as temp:\n      temp_key = temp.name\n\n    command = ['ssh-keygen', '-t', key_type, '-f', temp_key, '-N', '', '-q']\n    try:\n      self.logger.info('Generating SSH key %s.', key_dest)\n      subprocess.check_call(command)\n    except subprocess.CalledProcessError:\n      self.logger.warning('Could not create SSH key %s.', key_dest)\n      return\n\n    shutil.move(temp_key, key_dest)\n    shutil.move('%s.pub' % temp_key, '%s.pub' % key_dest)\n\n    file_utils.SetPermissions(key_dest, mode=0o600)\n    file_utils.SetPermissions('%s.pub' % key_dest, mode=0o644)", "docstring": "Generate a new SSH key.\n\nArgs:\nkey_type: string, the type of the SSH key.\nkey_dest: string, a file location to store the SSH key.", "source": "juraj-google-style"}
{"code": "def parse(self, argument):\n    \n    if not isinstance(argument, six.string_types):\n      raise TypeError('flag value must be a string, found \"{}\"'.format(\n          type(argument)))\n    return argument", "docstring": "Parses the string argument and returns the native value.\n\nBy default it returns its argument unmodified.\n\nArgs:\nargument: string argument passed in the commandline.\n\nRaises:\nValueError: Raised when it fails to parse the argument.\nTypeError: Raised when the argument has the wrong type.\n\nReturns:\nThe parsed value in native type.", "source": "juraj-google-style"}
{"code": "class MambaOutput(ModelOutput):\n    last_hidden_state: Optional[torch.FloatTensor] = None\n    cache_params: Optional[MambaCache] = None\n    hidden_states: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "Class for the MAMBA model outputs.\n\nArgs:\nlast_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\nSequence of hidden-states at the output of the last layer of the model.\ncache_params (`MambaCache`):\nThe state of the model at the last time step. Can be used in a forward method with the next `input_ids` to\navoid providing the old `input_ids`.\n\nIncludes both the State space model state matrices after the selective scan, and the Convolutional states\nhidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\nTuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\none for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\nHidden-states of the model at the output of each layer plus the optional initial embedding outputs.", "source": "github-repos"}
{"code": "def _default_global_step_tensor(self):\n    try:\n        gs = ops.get_default_graph().get_tensor_by_name('global_step:0')\n        if gs.dtype.base_dtype in [dtypes.int32, dtypes.int64]:\n            return gs\n        else:\n            logging.warning(\"Found 'global_step' is not an int type: %s\", gs.dtype)\n            return None\n    except KeyError:\n        return None", "docstring": "Returns the global_step from the default graph.\n\nReturns:\nThe global step `Tensor` or `None`.", "source": "github-repos"}
{"code": "def next_weekday(date):\n    n_days = (7 - date.weekday())\n    if (n_days > 3):\n        n_days = 1\n    return (date + datetime.timedelta(days=n_days))", "docstring": "Return the first weekday after date\n\nArgs:\ndate (datetime or datetime.date)\nReturns:\n(datetime or datetime.date)\nRaises:\n-", "source": "codesearchnet"}
{"code": "def unpack_archive(*components, **kwargs) -> str:\n    \n    path = fs.abspath(*components)\n    compression = kwargs.get(\"compression\", \"bz2\")\n    dir = kwargs.get(\"dir\", fs.dirname(path))\n\n    fs.cd(dir)\n    tar = tarfile.open(path, \"r:\" + compression)\n    tar.extractall()\n    tar.close()\n    fs.cdpop()\n\n    return dir", "docstring": "Unpack a compressed archive.\n\nArguments:\n*components (str[]): Absolute path.\n**kwargs (dict, optional): Set \"compression\" to compression type.\nDefault: bz2. Set \"dir\" to destination directory. Defaults to the\ndirectory of the archive.\n\nReturns:\nstr: Path to directory.", "source": "juraj-google-style"}
{"code": "def destroy_s3(app='', env='dev', **_):\n    session = boto3.Session(profile_name=env)\n    client = session.resource('s3')\n    generated = get_details(app=app, env=env)\n    archaius = generated.archaius()\n    bucket = client.Bucket(archaius['bucket'])\n    for item in bucket.objects.filter(Prefix=archaius['path']):\n        item.Object().delete()\n        LOG.info('Deleted: %s/%s', item.bucket_name, item.key)\n    return True", "docstring": "Destroy S3 Resources for _app_ in _env_.\n\nArgs:\napp (str): Application name\nenv (str): Deployment environment/account name\n\nReturns:\nboolean: True if destroyed sucessfully", "source": "codesearchnet"}
{"code": "def from_json_and_lambdas(cls, file: str, lambdas):\n        \n        with open(file, \"r\") as f:\n            data = json.load(f)\n\n        return cls.from_dict(data, lambdas)", "docstring": "Builds a GrFN from a JSON object.\n\nArgs:\ncls: The class variable for object creation.\nfile: Filename of a GrFN JSON file.\n\nReturns:\ntype: A GroundedFunctionNetwork object.", "source": "juraj-google-style"}
{"code": "def delete(self, filename):\n        \n        folder = \"Packages\" if is_package(filename) else \"Scripts\"\n        path = os.path.join(self.connection[\"mount_point\"], folder, filename)\n        if os.path.isdir(path):\n            shutil.rmtree(path)\n        elif os.path.isfile(path):\n            os.remove(path)", "docstring": "Delete a file from the repository.\n\nThis method will not delete a script from a migrated JSS.\nPlease remove migrated scripts with jss.Script.delete.\n\nArgs:\nfilename: String filename only (i.e. no path) of file to\ndelete. Will handle deleting scripts vs. packages\nautomatically.", "source": "juraj-google-style"}
{"code": "def write(self, file_prefix, session=None, options=None):\n    return self._write(file_prefix, session, options=options)", "docstring": "Writes a training checkpoint.\n\nThe checkpoint includes variables created by this object and any\ntrackable objects it depends on at the time `Checkpoint.write()` is\ncalled.\n\n`write` does not number checkpoints, increment `save_counter`, or update the\nmetadata used by `tf.train.latest_checkpoint`. It is primarily intended for\nuse by higher level checkpoint management utilities. `save` provides a very\nbasic implementation of these features.\n\nArgs:\nfile_prefix: A prefix to use for the checkpoint filenames\n(/path/to/directory/and_a_prefix).\nsession: The session to evaluate variables in. Ignored when executing\neagerly. If not provided when graph building, the default session is\nused.\noptions: Optional `tf.train.CheckpointOptions` object.\n\nReturns:\nThe full path to the checkpoint (i.e. `file_prefix`).", "source": "github-repos"}
{"code": "def _map_free_gates(layout, gates, coupling_map):\n    blocked_qubits = set()\n    mapped_gates = []\n    remaining_gates = []\n    for gate in gates:\n        if (not gate['partition']):\n            qubits = [n for n in gate['graph'].nodes() if (n.type == 'op')][0].qargs\n            if (not qubits):\n                continue\n            if blocked_qubits.intersection(qubits):\n                blocked_qubits.update(qubits)\n                remaining_gates.append(gate)\n            else:\n                mapped_gate = _transform_gate_for_layout(gate, layout)\n                mapped_gates.append(mapped_gate)\n            continue\n        qubits = gate['partition'][0]\n        if blocked_qubits.intersection(qubits):\n            blocked_qubits.update(qubits)\n            remaining_gates.append(gate)\n        elif (len(qubits) == 1):\n            mapped_gate = _transform_gate_for_layout(gate, layout)\n            mapped_gates.append(mapped_gate)\n        elif (coupling_map.distance(*[layout[q] for q in qubits]) == 1):\n            mapped_gate = _transform_gate_for_layout(gate, layout)\n            mapped_gates.append(mapped_gate)\n        else:\n            blocked_qubits.update(qubits)\n            remaining_gates.append(gate)\n    return (mapped_gates, remaining_gates)", "docstring": "Map all gates that can be executed with the current layout.\n\nArgs:\nlayout (Layout): Map from virtual qubit index to physical qubit index.\ngates (list): Gates to be mapped.\ncoupling_map (CouplingMap): CouplingMap for target device topology.\n\nReturns:\ntuple:\nmapped_gates (list): ops for gates that can be executed, mapped onto layout.\nremaining_gates (list): gates that cannot be executed on the layout.", "source": "codesearchnet"}
{"code": "def symbolic_master_equation(self, rho=None):\n        \n        L, H = self.L, self.H\n        if rho is None:\n            rho = OperatorSymbol('rho', hs=self.space)\n        return (-I * (H * rho - rho * H) +\n                sum(Lk * rho * adjoint(Lk) -\n                    (adjoint(Lk) * Lk * rho + rho * adjoint(Lk) * Lk) / 2\n                    for Lk in L.matrix.ravel()))", "docstring": "Compute the symbolic Liouvillian acting on a state rho\n\nIf no rho is given, an OperatorSymbol is created in its place.\nThis correspnds to the RHS of the master equation\nin which an average is taken over the external noise degrees of\nfreedom.\n\nArgs:\nrho (Operator): A symbolic density matrix operator\n\nReturns:\nOperator: The RHS of the master equation.", "source": "juraj-google-style"}
{"code": "def ParseApplicationResourceUsage(self, parser_mediator, cache=None, database=None, table=None, **unused_kwargs):\n    self._ParseGUIDTable(parser_mediator, cache, database, table, self._APPLICATION_RESOURCE_USAGE_VALUES_MAP, SRUMApplicationResourceUsageEventData)", "docstring": "Parses the application resource usage table.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ncache (Optional[ESEDBCache]): cache, which contains information about\nthe identifiers stored in the SruDbIdMapTable table.\ndatabase (Optional[pyesedb.file]): ESE database.\ntable (Optional[pyesedb.table]): table.", "source": "codesearchnet"}
{"code": "def get_library_progress(self):\n    kbp_dict = self._get_api_call('get_library_progress')\n    return {asin: KindleCloudReaderAPI._kbp_to_progress(kbp) for (asin, kbp) in kbp_dict.iteritems()}", "docstring": "Returns the reading progress for all books in the kindle library.\n\nReturns:\nA mapping of ASINs to `ReadingProgress` instances corresponding to the\nbooks in the current user's library.", "source": "codesearchnet"}
{"code": "def _scale_gradient_op(dtype):\n\n    def scale_gradient_backward(op, grad):\n        scale = op.inputs[1]\n        scaled_grad = (grad * scale)\n        return (scaled_grad, None)\n\n    def scale_gradient_forward(x, scale):\n        del scale\n        return x\n    func_name = 'ScaleGradient_{}'.format(dtype.name)\n    return function.Defun(dtype, dtype, python_grad_func=scale_gradient_backward, func_name=func_name)(scale_gradient_forward)", "docstring": "Create an op that scales gradients using a Defun.\n\nThe tensorflow Defun decorator creates an op and tensorflow caches these ops\nautomatically according to `func_name`. Using a Defun decorator twice with the\nsame `func_name` does not create a new op, instead the cached op is used.\n\nThis method produces a new op the first time it is called with a given `dtype`\nargument, and then uses the cached op each time it is called after that with\nthe same `dtype`. The scale value is given as an argument for the forward pass\nmethod so that it can be used in the backwards pass.\n\nArgs:\ndtype: the dtype of the net whose gradient is being scaled.\n\nReturns:\nThe op that scales gradients.", "source": "codesearchnet"}
{"code": "def append(self, event, help=''):\n    if isinstance(event, str):\n        self._events[event] = HookList(is_waterfall=self.is_waterfall)\n        self._help[event] = (help, getframeinfo(stack()[1][0]))\n        if (not help):\n            logger.warning(\"Great, don't say anything about your hooks and                 wait for plugin creators to figure it out.\")\n    elif isinstance(event, Iterable):\n        for name in event:\n            self.append(name)\n    else:\n        raise TypeError('Invalid event name!')", "docstring": "Creates a new event. `event` may be iterable or string\n\nArgs:\nevent (str): Name of event to declare\n\nKwrgs:\nhelp (str): Help string for the event\n\nRaises:\nTypeError\n\n**Please** describe the event and its calling arguments in the help\nstring.", "source": "codesearchnet"}
{"code": "def get_all_anonymous_mappings(self, struct1, struct2, niggli=True, include_dist=False):\n    (struct1, struct2) = self._process_species([struct1, struct2])\n    (struct1, struct2, fu, s1_supercell) = self._preprocess(struct1, struct2, niggli)\n    matches = self._anonymous_match(struct1, struct2, fu, s1_supercell, break_on_match=(not include_dist))\n    if matches:\n        if include_dist:\n            return [(m[0], m[1][0]) for m in matches]\n        else:\n            return [m[0] for m in matches]", "docstring": "Performs an anonymous fitting, which allows distinct species in one\nstructure to map to another. Returns a dictionary of species\nsubstitutions that are within tolerance\n\nArgs:\nstruct1 (Structure): 1st structure\nstruct2 (Structure): 2nd structure\nniggli (bool): Find niggli cell in preprocessing\ninclude_dist (bool): Return the maximin distance with each mapping\n\nReturns:\nlist of species mappings that map struct1 to struct2.", "source": "codesearchnet"}
{"code": "class MajorityVote(LabelAggregation):\n\n    def __init__(self, tie_breaker=DEFAULT_NORMAL_LABEL, **kwargs):\n        self._tie_breaker = tie_breaker\n\n        def inner(predictions: Iterable[int]) -> int:\n            counters = collections.Counter(predictions)\n            if counters[self._normal_label] < counters[self._outlier_label]:\n                vote = self._outlier_label\n            elif counters[self._normal_label] > counters[self._outlier_label]:\n                vote = self._normal_label\n            else:\n                vote = self._tie_breaker\n            return vote\n        super().__init__(agg_func=inner, **kwargs)", "docstring": "Aggregates anomaly labels using majority voting.\n\nThis `AggregationFn` implements a majority voting strategy to combine\nanomaly labels from multiple `AnomalyPrediction` objects. It counts the\noccurrences of normal and outlier labels and selects the label with the\nhigher count as the aggregated label. In case of a tie, a tie-breaker\nlabel is used.\n\nExample:\nIf input labels are [normal, outlier, outlier, normal, outlier], and\nnormal_label=0, outlier_label=1, then the aggregated label will be\noutlier (1) because outliers have a majority (3 vs 2).\n\nArgs:\nnormal_label (int): The integer label for normal predictions. Defaults to 0.\noutlier_label (int): The integer label for outlier predictions. Defaults to\n1.\ntie_breaker (int): The label to return if there is a tie in votes.\nDefaults to 0 (normal_label).\n**kwargs: Additional keyword arguments to pass to the base\n`LabelAggregation` class.", "source": "github-repos"}
{"code": "def _validate_input_state(quantum_state):\n    rho = np.asarray(quantum_state)\n    if (rho.ndim == 1):\n        rho = np.outer(rho, np.conj(rho))\n    shape = np.shape(rho)\n    if ((len(shape) != 2) or (shape[0] != shape[1])):\n        raise VisualizationError('Input is not a valid quantum state.')\n    num = int(np.log2(rho.shape[0]))\n    if ((2 ** num) != rho.shape[0]):\n        raise VisualizationError('Input is not a multi-qubit quantum state.')\n    return rho", "docstring": "Validates the input to state visualization functions.\n\nArgs:\nquantum_state (ndarray): Input state / density matrix.\nReturns:\nrho: A 2d numpy array for the density matrix.\nRaises:\nVisualizationError: Invalid input.", "source": "codesearchnet"}
{"code": "def get(name):\n    \n    for matcher in matchers:\n        if matcher.__name__ == name or getattr(matcher, 'name', None) == name:\n            return matcher", "docstring": "Returns a matcher instance by class or alias name.\n\nArguments:\nname (str): matcher class name or alias.\n\nReturns:\nmatcher: found matcher instance, otherwise ``None``.", "source": "juraj-google-style"}
{"code": "def gumbel_softmax(x,\n                   z_size,\n                   mode,\n                   softmax_k=0,\n                   temperature_warmup_steps=150000,\n                   summary=True,\n                   name=None):\n  \n  with tf.variable_scope(name, default_name=\"gumbel_softmax\"):\n    m = tf.layers.dense(x, 2**z_size, name=\"mask\")\n    if softmax_k > 0:\n      m, kl = top_k_softmax(m, softmax_k)\n      return m, m, 1.0 - tf.reduce_mean(kl)\n    logsm = tf.nn.log_softmax(m)\n\n    \n    gumbel_samples = gumbel_sample(common_layers.shape_list(m))\n    steps = temperature_warmup_steps\n    gumbel_samples *= common_layers.inverse_exp_decay(steps \n    temperature = 1.2 - common_layers.inverse_lin_decay(steps)\n\n    \n    temperature = tf.cond(\n        tf.less(tf.random_uniform([]), 0.9), lambda: temperature,\n        lambda: tf.random_uniform([], minval=0.5, maxval=1.0))\n    s = tf.nn.softmax((logsm + gumbel_samples) / temperature)\n    m = tf.nn.softmax(m)\n    kl = -tf.reduce_max(logsm, axis=-1)\n\n    if summary:\n      tf.summary.histogram(\"max-log\", tf.reshape(kl, [-1]))\n\n    \n    maxvec = tf.reshape(tf.argmax(m, axis=-1), [-1])\n    maxvhot = tf.stop_gradient(tf.one_hot(maxvec, 2**z_size))\n\n    \n    distrib = tf.reshape(logsm, [-1, 2**z_size]) * maxvhot\n    d_mean = tf.reduce_mean(distrib, axis=[0], keep_dims=True)\n    d_variance = tf.reduce_mean(\n        tf.squared_difference(distrib, d_mean), axis=[0])\n    d_dev = -tf.reduce_mean(d_variance)\n    ret = s\n\n    if mode != tf.estimator.ModeKeys.TRAIN:\n      ret = tf.reshape(maxvhot, common_layers.shape_list(s))  \n    return m, ret, d_dev * 5.0 + tf.reduce_mean(kl) * 0.002", "docstring": "Gumbel softmax discretization bottleneck.\n\nArgs:\nx: Input to the discretization bottleneck.\nz_size: Number of bits, where discrete codes range from 1 to 2**z_size.\nmode: tf.estimator.ModeKeys.\nsoftmax_k: If > 0 then do top-k softmax.\ntemperature_warmup_steps: Number of steps it takes to decay temperature to\n0.\nsummary: Whether to write summaries.\nname: Name for the bottleneck scope.\n\nReturns:\nEmbedding function, discrete code, and loss.", "source": "juraj-google-style"}
{"code": "def register_entity(self, entity_value, entity_type, alias_of=None):\n    if alias_of:\n        self.trie.insert(entity_value.lower(), data=(alias_of, entity_type))\n    else:\n        self.trie.insert(entity_value.lower(), data=(entity_value, entity_type))\n        self.trie.insert(entity_type.lower(), data=(entity_type, 'Concept'))", "docstring": "Register an entity to be tagged in potential parse results\n\nArgs:\nentity_value(str): the value/proper name of an entity instance (Ex: \"The Big Bang Theory\")\nentity_type(str): the type/tag of an entity instance (Ex: \"Television Show\")", "source": "codesearchnet"}
{"code": "def zero_d_graph_to_molecule_graph(bonded_structure, graph):\n    import networkx as nx\n    seen_indices = []\n    sites = []\n    start_index = list(graph.nodes())[0]\n    queue = [(start_index, (0, 0, 0), bonded_structure.structure[start_index])]\n    while (len(queue) > 0):\n        (comp_i, image_i, site_i) = queue.pop(0)\n        if (comp_i in [x[0] for x in seen_indices]):\n            raise ValueError('Graph component is not 0D')\n        seen_indices.append((comp_i, image_i))\n        sites.append(site_i)\n        for site_j in bonded_structure.get_connected_sites(comp_i, jimage=image_i):\n            if (((site_j.index, site_j.jimage) not in seen_indices) and ((site_j.index, site_j.jimage, site_j.site) not in queue)):\n                queue.append((site_j.index, site_j.jimage, site_j.site))\n    indices_ordering = np.argsort([x[0] for x in seen_indices])\n    sorted_sites = np.array(sites, dtype=object)[indices_ordering]\n    sorted_graph = nx.convert_node_labels_to_integers(graph, ordering='sorted')\n    mol = Molecule([s.specie for s in sorted_sites], [s.coords for s in sorted_sites])\n    mol_graph = MoleculeGraph.with_edges(mol, nx.Graph(sorted_graph).edges())\n    return mol_graph", "docstring": "Converts a zero-dimensional networkx Graph object into a MoleculeGraph.\n\nImplements a similar breadth-first search to that in\ncalculate_dimensionality_of_site().\n\nArgs:\nbonded_structure (StructureGraph): A structure with bonds, represented\nas a pymatgen structure graph. For example, generated using the\nCrystalNN.get_bonded_structure() method.\ngraph (nx.Graph): A networkx `Graph` object for the component of\ninterest.\n\nReturns:\n(MoleculeGraph): A MoleculeGraph object of the component.", "source": "codesearchnet"}
{"code": "def __best_intent(self, parse_result, context=[]):\n    best_intent = None\n    best_tags = None\n    context_as_entities = [{'entities': [c]} for c in context]\n    for intent in self.intent_parsers:\n        (i, tags) = intent.validate_with_tags((parse_result.get('tags') + context_as_entities), parse_result.get('confidence'))\n        if ((not best_intent) or (i and (i.get('confidence') > best_intent.get('confidence')))):\n            best_intent = i\n            best_tags = tags\n    return (best_intent, best_tags)", "docstring": "Decide the best intent\n\nArgs:\nparse_result(list): results used to match the best intent.\ncontext(list): ?\n\nReturns:\nbest_intent, best_tags:\nbest_intent : The best intent for given results\nbest_tags : The Tags for result", "source": "codesearchnet"}
{"code": "class TFForcedEOSTokenLogitsProcessor(TFLogitsProcessor):\n\n    def __init__(self, max_length: int, eos_token_id: int):\n        self.max_length = max_length\n        if eos_token_id < 0:\n            raise ValueError(f'The forced eos token id must be a non-negative integer, got {eos_token_id}')\n        self.eos_token_id = eos_token_id\n\n    def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:\n        if cur_len == self.max_length - 1:\n            batch_size, num_tokens = scores.shape\n            scores = tf.zeros((batch_size, 1))\n            if self.eos_token_id > 0:\n                scores = tf.concat((tf.broadcast_to(-float('inf'), (batch_size, self.eos_token_id)), scores), axis=-1)\n            if self.eos_token_id < num_tokens - 1:\n                scores = tf.concat((scores, tf.broadcast_to(-float('inf'), (batch_size, num_tokens - 1 - self.eos_token_id))), axis=-1)\n        return scores", "docstring": "[`TFLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached.\n\nArgs:\nmax_length (`int`):\nThe maximum length of the sequence to be generated.\neos_token_id (`int`):\nThe id of the token to force as the last generated token when `max_length` is reached.", "source": "github-repos"}
{"code": "def retrieve_model_classes(model_type: str, frameworks: Optional[List[str]]=None) -> Dict[str, List[str]]:\n    if frameworks is None:\n        frameworks = get_default_frameworks()\n    modules = {'pt': auto_module.modeling_auto if is_torch_available() else None, 'tf': auto_module.modeling_tf_auto if is_tf_available() else None, 'flax': auto_module.modeling_flax_auto if is_flax_available() else None}\n    model_classes = {}\n    for framework in frameworks:\n        new_model_classes = []\n        if modules[framework] is None:\n            raise ValueError(f'You selected {framework} in the frameworks, but it is not installed.')\n        model_mappings = [attr for attr in dir(modules[framework]) if _re_model_mapping.search(attr) is not None]\n        for model_mapping_name in model_mappings:\n            model_mapping = getattr(modules[framework], model_mapping_name)\n            if model_type in model_mapping:\n                new_model_classes.append(model_mapping[model_type])\n        if len(new_model_classes) > 0:\n            model_classes[framework] = list(set(new_model_classes))\n    return model_classes", "docstring": "Retrieve the model classes associated to a given model.\n\nArgs:\nmodel_type (`str`): A valid model type (like \"bert\" or \"gpt2\")\nframeworks (`List[str]`, *optional*):\nThe frameworks to look for. Will default to `[\"pt\", \"tf\", \"flax\"]`, passing a smaller list will restrict\nthe classes returned.\n\nReturns:\n`Dict[str, List[str]]`: A dictionary with one key per framework and the list of model classes associated to\nthat framework as values.", "source": "github-repos"}
{"code": "def _ConvertAttributeValueToDict(cls, attribute_value):\n    \n    if isinstance(attribute_value, py2to3.BYTES_TYPE):\n      encoded_value = binascii.b2a_qp(attribute_value)\n      encoded_value = codecs.decode(encoded_value, 'ascii')\n      attribute_value = {\n          '__type__': 'bytes',\n          'stream': '{0:s}'.format(encoded_value)\n      }\n\n    elif isinstance(attribute_value, (list, tuple)):\n      json_list = []\n      for list_element in attribute_value:\n        json_dict = cls._ConvertAttributeValueToDict(list_element)\n        json_list.append(json_dict)\n\n      if isinstance(attribute_value, list):\n        attribute_value = json_list\n      else:\n        attribute_value = {\n            '__type__': 'tuple',\n            'values': json_list\n        }\n\n    elif isinstance(attribute_value, collections.Counter):\n      attribute_value = cls._ConvertCollectionsCounterToDict(attribute_value)\n\n    elif isinstance(attribute_value, dfvfs_path_spec.PathSpec):\n      attribute_value = cls._ConvertPathSpecToDict(attribute_value)\n\n    elif isinstance(attribute_value, containers_interface.AttributeContainer):\n      attribute_value = cls._ConvertAttributeContainerToDict(attribute_value)\n\n    return attribute_value", "docstring": "Converts an attribute value into a JSON dictionary.\n\nArgs:\nattribute_value (object): an attribute value.\n\nReturns:\ndict|list: The JSON serialized object which can be a dictionary or a list.", "source": "juraj-google-style"}
{"code": "def assert_integer_v2(x, message=None, name=None):\n    assert_integer(x=x, message=message, name=name)", "docstring": "Assert that `x` is of integer dtype.\n\nIf `x` has a non-integer type, `message`, as well as the dtype of `x` are\nprinted, and `InvalidArgumentError` is raised.\n\nThis can always be checked statically, so this method returns nothing.\n\nArgs:\nx: A `Tensor`.\nmessage: A string to prefix to the default message.\nname: A name for this operation (optional). Defaults to \"assert_integer\".\n\nRaises:\nTypeError:  If `x.dtype` is not a non-quantized integer type.", "source": "github-repos"}
{"code": "def depth_february_average_ground_temperature(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `depth_february_average_ground_temperature`'.format(value))\n\n        self._depth_february_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_february_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_february_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def _eval(self, tensor):\n    name = tensor if isinstance(tensor, str) else tensor.name\n    index = '0'\n    if ':' in name:\n        name, index = name.split(':')\n    if resource_variables_toggle.resource_variables_enabled():\n        name = name + '/Read/ReadVariableOp'\n    return self.evaluate(name + ':' + index)", "docstring": "Evaluate a tensor.\n\nTakes care of the variations between graphs produced with and without\nresource variables when determining the name of the operation to run.\n\nArgs:\ntensor: The tensor to evaluate, or a string with the tensor name.\n\nReturns:\nThe evaluated tensor as a numpy array.", "source": "github-repos"}
{"code": "def slice_naive(self, key):\n        \n        cls = self.__class__\n        key = check_key(self, key)\n        enum = pd.Series(range(len(self)))\n        enum.index = self.index\n        values = self.field_values[enum[key].values]\n        data = self.loc[key]\n        return cls(data, field_values=values)", "docstring": "Naively (on index) slice the field data and values.\n\nArgs:\nkey: Int, slice, or iterable to select data and values\n\nReturns:\nfield: Sliced field object", "source": "juraj-google-style"}
{"code": "def execute(self, triple_map, output, **kwargs):\n        \n        sparql = PREFIX + triple_map.logicalSource.query.format(\n            **kwargs)\n        bindings = self.__get_bindings__(sparql)\n        iterator = str(triple_map.logicalSource.iterator)\n        for binding in bindings:\n            entity_dict = binding.get(iterator)\n            if isinstance(entity_dict, rdflib.term.Node):\n                entity = entity_dict\n            elif isinstance(entity_dict, dict):\n                raw_value = entity_dict.get('value')\n                if entity_dict.get('type').startswith('bnode'):\n                    entity = rdflib.BNode(raw_value)\n                else:\n                    entity = rdflib.URIRef(raw_value)\n            if triple_map.subjectMap.class_ is not None:\n                output.add(\n                    (entity,\n                     rdflib.RDF.type,\n                     triple_map.subjectMap.class_))\n\n            sparql_query = self.__construct_compound_query__(\n                triple_map).format(**kwargs)\n            properties = self.__get_bindings__(sparql_query)\n            for pred_obj_map in triple_map.predicateObjectMap:\n                predicate = pred_obj_map.predicate\n                if pred_obj_map.constant is not None:\n                    output.add(\n                        (entity, predicate, pred_obj_map.constant))\n                    continue\n                if \"\n                    key = str(predicate).split(\"\n                else:\n                    key = str(predicate).split(\"/\")[-1]\n\n                for property_ in properties:\n                    if key in property_.keys():\n                        info = {\"about\": property_.get(key)}\n                        object_ = __get_object__(info)\n                        output.add((entity, predicate, object_))", "docstring": "Method iterates through triple map's predicate object maps\nand processes query.\n\nArgs:\ntriple_map(SimpleNamespace): Triple Map", "source": "juraj-google-style"}
{"code": "def stage_tc_create_tag(self, tag, resource):\n        \n        tag_resource = resource.tags(self.tcex.safetag(tag))\n        tag_resource.http_method = 'POST'\n        t_response = tag_resource.request()\n        if t_response.get('status') != 'Success':\n            self.log.warning(\n                '[tcex] Failed adding tag \"{}\" ({}).'.format(tag, t_response.get('response').text)\n            )", "docstring": "Add a tag to a resource.\n\nArgs:\ntag (str): The tag to be added to the resource.\nresource (obj): An instance of tcex resource class.", "source": "juraj-google-style"}
{"code": "def parse_xhtml_reaction_notes(entry):\n    properties = {}\n    if (entry.xml_notes is not None):\n        cobra_notes = dict(parse_xhtml_notes(entry))\n        if ('subsystem' in cobra_notes):\n            properties['subsystem'] = cobra_notes['subsystem']\n        if ('gene_association' in cobra_notes):\n            properties['genes'] = cobra_notes['gene_association']\n        if ('ec_number' in cobra_notes):\n            properties['ec'] = cobra_notes['ec_number']\n        if ('authors' in cobra_notes):\n            properties['authors'] = [a.strip() for a in cobra_notes['authors'].split(';')]\n        if ('confidence' in cobra_notes):\n            try:\n                value = int(cobra_notes['confidence'])\n            except ValueError:\n                logger.warning('Unable to parse confidence level for {} as an integer: {}'.format(entry.id, cobra_notes['confidence']))\n                value = cobra_notes['confidence']\n            properties['confidence'] = value\n    return properties", "docstring": "Return reaction properties defined in the XHTML notes.\n\nOlder SBML models often define additional properties in the XHTML notes\nsection because structured methods for defining properties had not been\ndeveloped. This will try to parse the following properties: ``SUBSYSTEM``,\n``GENE ASSOCIATION``, ``EC NUMBER``, ``AUTHORS``, ``CONFIDENCE``.\n\nArgs:\nentry: :class:`SBMLReactionEntry`.", "source": "codesearchnet"}
{"code": "def _FormatExpression(self, frame, expression):\n    \n    rc, value = _EvaluateExpression(frame, expression)\n    if not rc:\n      message = _FormatMessage(value['description']['format'],\n                               value['description'].get('parameters'))\n      return '<' + message + '>'\n\n    return self._FormatValue(value)", "docstring": "Evaluates a single watched expression and formats it into a string form.\n\nIf expression evaluation fails, returns error message string.\n\nArgs:\nframe: Python stack frame in which the expression is evaluated.\nexpression: string expression to evaluate.\n\nReturns:\nFormatted expression value that can be used in the log message.", "source": "juraj-google-style"}
{"code": "def _DiscoverElementTypeFromLocalname(self, type_localname):\n    \n    elem_type = None\n    last_exception = None\n    for ns_prefix in self.zeep_client.wsdl.types.prefix_map.values():\n      try:\n        elem_type = self.zeep_client.get_type(\n            '{%s}%s' % (ns_prefix, type_localname))\n      except zeep.exceptions.LookupError as e:\n        last_exception = e\n        continue\n      break\n    if not elem_type:\n      raise last_exception\n    return elem_type", "docstring": "Searches all namespaces for a type by name.\n\nArgs:\ntype_localname: The name of the type.\n\nReturns:\nA fully qualified SOAP type with the specified name.\n\nRaises:\nA zeep.exceptions.LookupError if the type cannot be found in any\nnamespace.", "source": "juraj-google-style"}
{"code": "def app_trim_memory(self, pid: int or str, level: str = 'RUNNING_LOW') -> None:\n        \n        _, error = self._execute('-s', self.device_sn, 'shell',\n                                 'am', 'send-trim-memory', str(pid), level)\n        if error and error.startswith('Error'):\n            raise ApplicationsException(error.split(':', 1)[-1].strip())", "docstring": "Trim memory.\n\nArgs:\nlevel: HIDDEN | RUNNING_MODERATE | BACKGROUNDRUNNING_LOW | \\\nMODERATE | RUNNING_CRITICAL | COMPLETE", "source": "juraj-google-style"}
{"code": "def IsComposite(self):\n    return (bool(self.condition) or (self.member_data_type_definition and self.member_data_type_definition.IsComposite()))", "docstring": "Determines if the data type is composite.\n\nA composite data type consists of other data types.\n\nReturns:\nbool: True if the data type is composite, False otherwise.", "source": "codesearchnet"}
{"code": "def _finish(self, update_ops, name_scope):\n    return control_flow_ops.group(*update_ops, name=name_scope)", "docstring": "Do what is needed to finish the update.\n\nThis is called with the `name_scope` using the \"name\" that\nusers have chosen for the application of gradients.\n\nArgs:\nupdate_ops: List of `Operation` objects to update variables.  This list\ncontains the values returned by the `_apply_dense()` and\n`_apply_sparse()` calls.\nname_scope: String.  Name to use for the returned operation.\n\nReturns:\nThe operation to apply updates.", "source": "github-repos"}
{"code": "def _make_rebatch_fn(self, dataset, num_workers, num_replicas_in_sync):\n    if num_replicas_in_sync % num_workers:\n        raise ValueError('tf.distribute expects every worker to have the same number of replicas. However, encountered `num_replicas_in_sync` ({}) that cannot be divided by `num_workers` ({})'.format(num_replicas_in_sync, num_workers))\n    num_replicas_per_worker = num_replicas_in_sync \n    with ops.colocate_with(dataset._variant_tensor):\n        batch_size = distribute.compute_batch_size(dataset)\n\n    def rebatch_fn(dataset, worker_index):\n        try:\n\n            def apply_rebatch():\n                batch_sizes = distribute.batch_sizes_for_worker(batch_size, num_workers, num_replicas_per_worker, worker_index)\n                return dataset.rebatch(batch_sizes).prefetch(num_replicas_per_worker)\n\n            def apply_legacy_rebatch():\n                return distribute._LegacyRebatchDataset(dataset, num_replicas_in_sync).prefetch(num_replicas_per_worker)\n            with ops.colocate_with(dataset._variant_tensor):\n                return tf_cond.cond(math_ops.not_equal(batch_size, -1), true_fn=apply_rebatch, false_fn=apply_legacy_rebatch)\n        except errors.InvalidArgumentError as e:\n            if 'without encountering a batch' in str(e):\n                six.reraise(ValueError, ValueError('Call the `batch` method on the input Dataset in order to be able to split your input across {} replicas.\\n Please see the tf.distribute.Strategy guide. {}'.format(num_replicas_in_sync, e)), sys.exc_info()[2])\n            else:\n                raise\n    return rebatch_fn", "docstring": "Returns a callable that rebatches the input dataset.\n\nArgs:\ndataset: A `tf.data.Dataset` representing the dataset to be distributed.\nnum_workers: An integer representing the number of workers to distribute\n`dataset` among.\nnum_replicas_in_sync: An integer representing the number of replicas in\nsync across all workers.", "source": "github-repos"}
{"code": "def add_event_handler(self, callback, event=None):\n    builders = events._get_handlers(callback)\n    if (builders is not None):\n        for event in builders:\n            self._event_builders.append((event, callback))\n        return\n    if isinstance(event, type):\n        event = event()\n    elif (not event):\n        event = events.Raw()\n    self._event_builders.append((event, callback))", "docstring": "Registers the given callback to be called on the specified event.\n\nArgs:\ncallback (`callable`):\nThe callable function accepting one parameter to be used.\n\nNote that if you have used `telethon.events.register` in\nthe callback, ``event`` will be ignored, and instead the\nevents you previously registered will be used.\n\nevent (`_EventBuilder` | `type`, optional):\nThe event builder class or instance to be used,\nfor instance ``events.NewMessage``.\n\nIf left unspecified, `telethon.events.raw.Raw` (the\n:tl:`Update` objects with no further processing) will\nbe passed instead.", "source": "codesearchnet"}
{"code": "def _delete_batch(self, container, blobs):\n    container_client = self.client.get_container_client(container)\n    results = {}\n    for blob in blobs:\n        try:\n            response = container_client.delete_blob(blob)\n            results[container, blob] = response\n        except ResourceNotFoundError as e:\n            results[container, blob] = e.status_code\n    return results", "docstring": "A helper method. Azure Blob Storage Python Client allows batch\ndeletions for blobs within the same container.\n\nArgs:\ncontainer: container name.\nblobs: list of blobs to be deleted.\n\nReturns:\nDictionary of the form {(container, blob): error}, where error is\nNone if the operation succeeded.", "source": "github-repos"}
{"code": "def finish(queue_name, task_id, owner, error=False):\n    task = _get_task_with_policy(queue_name, task_id, owner)\n    if (not (task.status == WorkQueue.LIVE)):\n        logging.warning('Finishing already dead task. queue=%r, task_id=%r, owner=%r, status=%r', task.queue_name, task_id, owner, task.status)\n        return False\n    if (not error):\n        task.status = WorkQueue.DONE\n    else:\n        task.status = WorkQueue.ERROR\n    task.finished = datetime.datetime.utcnow()\n    db.session.add(task)\n    signals.task_updated.send(app, task=task)\n    return True", "docstring": "Marks a work item on a queue as finished.\n\nArgs:\nqueue_name: Name of the queue the work item is on.\ntask_id: ID of the task that is finished.\nowner: Who or what has the current lease on the task.\nerror: Defaults to false. True if this task's final state is an error.\n\nReturns:\nTrue if the task has been finished for the first time; False if the\ntask was already finished.\n\nRaises:\nTaskDoesNotExistError if the task does not exist.\nLeaseExpiredError if the lease is no longer active.\nNotOwnerError if the specified owner no longer owns the task.", "source": "codesearchnet"}
{"code": "def create_reverse_dependency_map() -> Dict[str, List[str]]:\n    cache = {}\n    example_deps, examples = init_test_examples_dependencies()\n    all_modules = list(PATH_TO_TRANFORMERS.glob('***.py')) + examples\n    all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules]\n    direct_deps = {m: get_module_dependencies(m, cache=cache) for m in all_modules}\n    direct_deps.update(example_deps)\n    something_changed = True\n    while something_changed:\n        something_changed = False\n        for m in all_modules:\n            for d in direct_deps[m]:\n                if d.endswith('__init__.py'):\n                    continue\n                if d not in direct_deps:\n                    raise ValueError(f'KeyError:{d}. From {m}')\n                new_deps = set(direct_deps[d]) - set(direct_deps[m])\n                if len(new_deps) > 0:\n                    direct_deps[m].extend(list(new_deps))\n                    something_changed = True\n    reverse_map = collections.defaultdict(list)\n    for m in all_modules:\n        for d in direct_deps[m]:\n            reverse_map[d].append(m)\n    for m in [f for f in all_modules if f.endswith('__init__.py')]:\n        direct_deps = get_module_dependencies(m, cache=cache)\n        deps = sum([reverse_map[d] for d in direct_deps if not d.endswith('__init__.py')], direct_deps)\n        reverse_map[m] = list(set(deps) - {m})\n    return reverse_map", "docstring": "Create the dependency map from module/test filename to the list of modules/tests that depend on it recursively.\n\nReturns:\n`Dict[str, List[str]]`: The reverse dependency map as a dictionary mapping filenames to all the filenames\ndepending on it recursively. This way the tests impacted by a change in file A are the test files in the list\ncorresponding to key A in this result.", "source": "github-repos"}
{"code": "def verify_fileobj(fileobj, writable=False):\n    \n\n    try:\n        data = fileobj.read(0)\n    except Exception:\n        if not hasattr(fileobj, \"read\"):\n            raise ValueError(\"%r not a valid file object\" % fileobj)\n        raise ValueError(\"Can't read from file object %r\" % fileobj)\n\n    if not isinstance(data, bytes):\n        raise ValueError(\n            \"file object %r not opened in binary mode\" % fileobj)\n\n    if writable:\n        try:\n            fileobj.write(b\"\")\n        except Exception:\n            if not hasattr(fileobj, \"write\"):\n                raise ValueError(\"%r not a valid file object\" % fileobj)\n            raise ValueError(\"Can't write to file object %r\" % fileobj)", "docstring": "Verifies that the passed fileobj is a file like object which\nwe can use.\n\nArgs:\nwritable (bool): verify that the file object is writable as well\n\nRaises:\nValueError: In case the object is not a file object that is readable\n(or writable if required) or is not opened in bytes mode.", "source": "juraj-google-style"}
{"code": "def log(x):\n    if any_symbolic_tensors((x,)):\n        return Log().symbolic_call(x)\n    return backend.numpy.log(x)", "docstring": "Natural logarithm, element-wise.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput tensor, element-wise natural logarithm of `x`.", "source": "github-repos"}
{"code": "def ParseZeitgeistEventRow(\n      self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = ZeitgeistActivityEventData()\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.subject_uri = self._GetRowValue(query_hash, row, 'subj_uri')\n\n    timestamp = self._GetRowValue(query_hash, row, 'timestamp')\n    date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_UNKNOWN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a zeitgeist event row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def _integrate_parameter(self, x, x_is_constant, t0, t1, name=None):\n    return x * (t1 - t0) if x_is_constant else x.integrate(t0, t1, name)", "docstring": "Returns the integral of x(t).dt over the interval [t0, t1].\n\nArgs:\nx: Scalar real `Tensor` of shape [`batch_shape`] or an instance of a\nleft-continuous `PiecewiseConstantFunc`. The function to be integrated.\nx_is_constant: 'bool' which is True if x is a Scalar real `Tensor`.\nt0: A `Tensor` which is broadcastable to [`batch_shape`, `k`], where `k`\nis the number of intervals to evaluate the integral over. The start\ntimes of the `k` intervals.\nt1: A `Tensor` which is broadcastable to [`batch_shape`, `k`], where `k`\nis the number of intervals to evaluate the integral over. The end\ntimes of the `k` intervals.\nname: Str. The name to give this op.\n\nReturns:\nA `Tensor` of shape [`batch_shape`, `k`] with the integrals of x over the\nintervals [`t0`, `t1`].", "source": "github-repos"}
{"code": "def generate_timing_breakdown_plot(timing_stats, scaling_var, title, description, plot_file):\n    \n    \n    cmap_data = colormaps._viridis_data\n    n_subplots = len(six.viewkeys(timing_stats))\n    fig, ax = plt.subplots(1, n_subplots+1, figsize=(3*(n_subplots+2), 5))\n    for plot_num, p_count in enumerate(\n            sorted(six.iterkeys(timing_stats), key=functions.sort_processor_counts)):\n\n        case_data = timing_stats[p_count]\n        all_timers = set(six.iterkeys(case_data['model'])) | set(six.iterkeys(case_data['bench']))\n        all_timers = sorted(list(all_timers), reverse=True)\n        cmap_stride = int(len(cmap_data)/(len(all_timers)+1))\n        colors = {all_timers[i]: cmap_data[i*cmap_stride] for i in range(len(all_timers))}\n\n        sub_ax = plt.subplot(1, n_subplots+1, plot_num+1)\n        sub_ax.set_title(p_count)\n        sub_ax.set_ylabel('Runtime (s)')\n        for case, var_data in case_data.items():\n            if case == 'bench':\n                bar_num = 2\n            else:\n                bar_num = 1\n\n            offset = 0\n            if var_data != {}:\n                for var in sorted(six.iterkeys(var_data), reverse=True):\n                    if var != scaling_var:\n                        plt.bar(bar_num, var_data[var]['mean'], 0.8, bottom=offset,\n                                color=colors[var], label=(var if bar_num == 1 else '_none'))\n                        offset += var_data[var]['mean']\n\n                plt.bar(bar_num, var_data[scaling_var]['mean']-offset, 0.8, bottom=offset,\n                        color=colors[scaling_var], label=(scaling_var if bar_num == 1 else '_none'))\n\n                sub_ax.set_xticks([1.4, 2.4])\n                sub_ax.set_xticklabels(('test', 'bench'))\n\n    plt.legend(loc=6, bbox_to_anchor=(1.05, 0.5))\n    plt.tight_layout()\n\n    sub_ax = plt.subplot(1, n_subplots+1, n_subplots+1)\n    hid_bar = plt.bar(1, 100)\n    for group in hid_bar:\n            group.set_visible(False)\n    sub_ax.set_visible(False)\n\n    if livvkit.publish:\n        plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600)\n    plt.savefig(plot_file)\n    plt.close()\n    return elements.image(title, description, os.path.basename(plot_file))", "docstring": "Description\n\nArgs:\ntiming_stats: a dictionary of the form\n{proc_count : {model||bench : { var : { stat : val }}}}\nscaling_var: the variable that accounts for the total runtime\ntitle: the title of the plot\ndescription: the description of the plot\nplot_file: the file to write the plot out to\nReturns:\nan image element containing the plot file and metadata", "source": "juraj-google-style"}
{"code": "def save_forensic_reports_to_splunk(self, forensic_reports):\n        \n        logger.debug(\"Saving forensic reports to Splunk\")\n        if type(forensic_reports) == dict:\n            forensic_reports = [forensic_reports]\n\n        if len(forensic_reports) < 1:\n            return\n\n        json_str = \"\"\n        for report in forensic_reports:\n            data = self._common_data.copy()\n            data[\"sourcetype\"] = \"dmarc:forensic\"\n            timestamp = human_timestamp_to_timestamp(\n                report[\"arrival_date_utc\"])\n            data[\"time\"] = timestamp\n            data[\"event\"] = report.copy()\n            json_str += \"{0}\\n\".format(json.dumps(data))\n\n        if not self.session.verify:\n            logger.debug(\"Skipping certificate verification for Splunk HEC\")\n        try:\n            response = self.session.post(self.url, data=json_str,\n                                         timeout=self.timeout)\n            response = response.json()\n        except Exception as e:\n            raise SplunkError(e.__str__())\n        if response[\"code\"] != 0:\n            raise SplunkError(response[\"text\"])", "docstring": "Saves forensic DMARC reports to Splunk\n\nArgs:\nforensic_reports (list):  A list of forensic report dictionaries\nto save in Splunk", "source": "juraj-google-style"}
{"code": "def union(df, other, index=False, keep='first'):\n    \n    validate_set_ops(df, other)\n    stacked = df.append(other)\n    if index:\n        stacked_reset_indexes = stacked.reset_index()\n        index_cols = [col for col in stacked_reset_indexes.columns if col not in df.columns]\n        index_name = df.index.names\n        return_df = stacked_reset_indexes.drop_duplicates(keep=keep).set_index(index_cols)\n        return_df.index.names = index_name\n        return return_df\n    else:\n        return stacked.drop_duplicates(keep=keep)", "docstring": "Returns rows that appear in either DataFrame.\n\nArgs:\ndf (pandas.DataFrame): data passed in through the pipe.\nother (pandas.DataFrame): other DataFrame to use for set operation with\nthe first.\n\nKwargs:\nindex (bool): Boolean indicating whether to consider the pandas index\nas part of the set operation (default `False`).\nkeep (str): Indicates which duplicate should be kept. Options are `'first'`\nand `'last'`.", "source": "juraj-google-style"}
{"code": "def copy(self):\n    fs = self.__class__.__new__(self.__class__)\n    fs.__dict__ = self.__dict__.copy()\n    fs._frameSet = None\n    if (self._frameSet is not None):\n        fs._frameSet = self._frameSet.copy()\n    return fs", "docstring": "Create a deep copy of this sequence\n\nReturns:\n:obj:`.FileSequence`:", "source": "codesearchnet"}
{"code": "def Analyze(self, hashes):\n    logger.debug('Opening connection to {0:s}:{1:d}'.format(self._host, self._port))\n    nsrl_socket = self._GetSocket()\n    if (not nsrl_socket):\n        self.SignalAbort()\n        return []\n    hash_analyses = []\n    for digest in hashes:\n        response = self._QueryHash(nsrl_socket, digest)\n        if (response is None):\n            continue\n        hash_analysis = interface.HashAnalysis(digest, response)\n        hash_analyses.append(hash_analysis)\n    nsrl_socket.close()\n    logger.debug('Closed connection to {0:s}:{1:d}'.format(self._host, self._port))\n    return hash_analyses", "docstring": "Looks up hashes in nsrlsvr.\n\nArgs:\nhashes (list[str]): hash values to look up.\n\nReturns:\nlist[HashAnalysis]: analysis results, or an empty list on error.", "source": "codesearchnet"}
{"code": "def avg_dicts(dictin1, dictin2, dropmissing=True):\n    dictout = dict()\n    for key in dictin1:\n        if (key in dictin2):\n            dictout[key] = ((dictin1[key] + dictin2[key]) / 2)\n        elif (not dropmissing):\n            dictout[key] = dictin1[key]\n    if (not dropmissing):\n        for key in dictin2:\n            if (key not in dictin1):\n                dictout[key] = dictin2[key]\n    return dictout", "docstring": "Create a new dictionary from two dictionaries by averaging values\n\nArgs:\ndictin1 (DictUpperBound): First input dictionary\ndictin2 (DictUpperBound): Second input dictionary\ndropmissing (bool): Whether to drop keys missing in one dictionary. Defaults to True.\n\nReturns:\nDict: Dictionary with values being average of 2 input dictionaries", "source": "codesearchnet"}
{"code": "def Analyze(self, data):\n    if (not self._rules):\n        return\n    try:\n        self._matches = self._rules.match(data=data, timeout=self._MATCH_TIMEOUT)\n    except yara.YaraTimeoutError:\n        logger.error('Could not process file within timeout: {0:d}'.format(self._MATCH_TIMEOUT))\n    except yara.YaraError as exception:\n        logger.error('Error processing file with Yara: {0!s}.'.format(exception))", "docstring": "Analyzes a block of data, attempting to match Yara rules to it.\n\nArgs:\ndata(bytes): a block of data.", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context):\n    \n    super(APFSFile, self).__init__(resolver_context)\n    self._file_system = None\n    self._fsapfs_file_entry = None", "docstring": "Initializes a file-like object.\n\nArgs:\nresolver_context (Context): resolver context.", "source": "juraj-google-style"}
{"code": "def create_contentkey_authorization_policy(access_token, content):\n    path = '/ContentKeyAuthorizationPolicies'\n    endpoint = ''.join([ams_rest_endpoint, path])\n    body = content\n    return do_ams_post(endpoint, path, body, access_token)", "docstring": "Create Media Service Content Key Authorization Policy.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\ncontent (str): Content Payload.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def summarize(self, document, Abstractor, similarity_filter=None):\n        \n        if isinstance(document, str) is False:\n            raise TypeError(\"The type of document must be str.\")\n\n        if isinstance(Abstractor, AbstractableDoc) is False:\n            raise TypeError(\"The type of Abstractor must be AbstractableDoc.\")\n\n        if isinstance(similarity_filter, SimilarityFilter) is False and similarity_filter is not None:\n            raise TypeError(\"The type of similarity_filter must be SimilarityFilter.\")\n\n        normalized_sentences = self.listup_sentence(document)\n\n        \n        if similarity_filter is not None:\n            normalized_sentences = similarity_filter.similar_filter_r(normalized_sentences)\n\n        self.tokenize(document)\n        words = self.token\n\n        fdist = nltk.FreqDist(words)\n        top_n_words = [w[0] for w in fdist.items()][:self.target_n]\n        scored_list = self.__closely_associated_score(normalized_sentences, top_n_words)\n        filtered_list = Abstractor.filter(scored_list)\n        result_list = [normalized_sentences[idx] for (idx, score) in filtered_list]\n        result_dict = {\n            \"summarize_result\": result_list,\n            \"scoring_data\": filtered_list\n        }\n        return result_dict", "docstring": "Execute summarization.\n\nArgs:\ndocument:           The target document.\nAbstractor:         The object of AbstractableDoc.\nsimilarity_filter   The object of SimilarityFilter.\n\nReturns:\ndict data.\n- \"summarize_result\": The list of summarized sentences.,\n- \"scoring_data\":     The list of scores.", "source": "juraj-google-style"}
{"code": "def remove_network(self, net_id):\n        \n        url = self._url(\"/networks/{0}\", net_id)\n        res = self._delete(url)\n        self._raise_for_status(res)", "docstring": "Remove a network. Similar to the ``docker network rm`` command.\n\nArgs:\nnet_id (str): The network's id", "source": "juraj-google-style"}
{"code": "def get_info_dict(info_line):\n    \n    \n    variant_info = {}\n    for raw_info in info_line.split(';'):\n        splitted_info = raw_info.split('=')\n        if len(splitted_info) == 2:\n            variant_info[splitted_info[0]] = splitted_info[1]\n        else:\n            variant_info[splitted_info[0]] = True\n    \n    return variant_info", "docstring": "Parse a info field of a variant\n\nMake a dictionary from the info field of a vcf variant.\nKeys are the info keys and values are the raw strings from the vcf\nIf the field only have a key (no value), value of infodict is True.\n\nArgs:\ninfo_line (str): The info field of a vcf variant\nReturns:\ninfo_dict (dict): A INFO dictionary", "source": "juraj-google-style"}
{"code": "def CopyFromStringISO8601(self, time_string):\n    date_time_values = self._CopyDateTimeFromStringISO8601(time_string)\n    self._CopyFromDateTimeValues(date_time_values)", "docstring": "Copies time elements from an ISO 8601 date and time string.\n\nCurrently not supported:\n* Duration notation: \"P...\"\n* Week notation \"2016-W33\"\n* Date with week number notation \"2016-W33-3\"\n* Date without year notation \"--08-17\"\n* Ordinal date notation \"2016-230\"\n\nArgs:\ntime_string (str): date and time value formatted as:\nYYYY-MM-DDThh:mm:ss.######[+-]##:##\n\nWhere # are numeric digits ranging from 0 to 9 and the seconds\nfraction can be either 3 or 6 digits. The time of day, seconds\nfraction and time zone offset are optional. The default time zone\nis UTC.\n\nRaises:\nValueError: if the time string is invalid or not supported.", "source": "codesearchnet"}
{"code": "def download_archive(self, name, file_path):\n    uri = ((self.URI + '/archive/') + name)\n    return self._client.download(uri, file_path)", "docstring": "Download archived logs of the OS Volume.\n\nArgs:\nname: Name of the OS Volume.\nfile_path (str): Destination file path.\n\nReturns:\nbool: Indicates if the resource was successfully downloaded.", "source": "codesearchnet"}
{"code": "def _get_flag_int_value(self, wanted_flag_name, default_value):\n    flag_int_value = default_value\n    found, flag_value = self.get_flag_value(wanted_flag_name)\n    if found:\n        try:\n            flag_int_value = int(flag_value)\n        except ValueError:\n            logging.warning('Cannot convert %s to int for flag %s' % (flag_int_value, wanted_flag_name))\n    return flag_int_value", "docstring": "Returns the int value of a TensorTracer flag.\n\nArgs:\nwanted_flag_name: the name of the flag we are looking for.\ndefault_value: the default value for the flag, if not provided.\nReturns:\nthe value of the flag.\nRaises:\nRuntimeError: If supposedly deadcode is reached.", "source": "github-repos"}
{"code": "def remove_showcase(self, showcase):\n    dataset_showcase = self._get_dataset_showcase_dict(showcase)\n    showcase = hdx.data.showcase.Showcase({'id': dataset_showcase['showcase_id']}, configuration=self.configuration)\n    showcase._write_to_hdx('disassociate', dataset_showcase, 'package_id')", "docstring": "Remove dataset from showcase\n\nArgs:\nshowcase (Union[Showcase,Dict,str]): Either a showcase id string or showcase metadata from a Showcase object or dictionary\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _initialize_global_state(self,\n                                 redis_address,\n                                 redis_password=None,\n                                 timeout=20):\n        \n        self.redis_client = services.create_redis_client(\n            redis_address, redis_password)\n        start_time = time.time()\n\n        num_redis_shards = None\n        redis_shard_addresses = []\n\n        while time.time() - start_time < timeout:\n            \n            num_redis_shards = self.redis_client.get(\"NumRedisShards\")\n            if num_redis_shards is None:\n                print(\"Waiting longer for NumRedisShards to be populated.\")\n                time.sleep(1)\n                continue\n            num_redis_shards = int(num_redis_shards)\n            if num_redis_shards < 1:\n                raise Exception(\"Expected at least one Redis shard, found \"\n                                \"{}.\".format(num_redis_shards))\n\n            \n            redis_shard_addresses = self.redis_client.lrange(\n                \"RedisShards\", start=0, end=-1)\n            if len(redis_shard_addresses) != num_redis_shards:\n                print(\"Waiting longer for RedisShards to be populated.\")\n                time.sleep(1)\n                continue\n\n            \n            break\n\n        \n        if time.time() - start_time >= timeout:\n            raise Exception(\"Timed out while attempting to initialize the \"\n                            \"global state. num_redis_shards = {}, \"\n                            \"redis_shard_addresses = {}\".format(\n                                num_redis_shards, redis_shard_addresses))\n\n        \n        self.redis_clients = []\n        for shard_address in redis_shard_addresses:\n            self.redis_clients.append(\n                services.create_redis_client(shard_address.decode(),\n                                             redis_password))", "docstring": "Initialize the GlobalState object by connecting to Redis.\n\nIt's possible that certain keys in Redis may not have been fully\npopulated yet. In this case, we will retry this method until they have\nbeen populated or we exceed a timeout.\n\nArgs:\nredis_address: The Redis address to connect.\nredis_password: The password of the redis server.", "source": "juraj-google-style"}
{"code": "def request_token(self) -> None:\n    response: requests.Response = requests.post(self._TOKEN_URL, auth=HTTPBasicAuth(self._client_id, self._client_key), data={'grant_type': self._GRANT_TYPE}, verify=True)\n    response.raise_for_status()\n    self._token = response.json()\n    self._token_expires_at = (time.time() + self._token['expires_in'])", "docstring": "Requests a new Client Credentials Flow authentication token from the Spotify API\nand stores it in the `token` property of the object.\n\nRaises:\nrequests.HTTPError: If an HTTP error occurred during the request.", "source": "codesearchnet"}
{"code": "def update_course(self, course, enterprise_customer, enterprise_context):\n        \n        course['course_runs'] = self.update_course_runs(\n            course_runs=course.get('course_runs') or [],\n            enterprise_customer=enterprise_customer,\n            enterprise_context=enterprise_context,\n        )\n\n        \n        marketing_url = course.get('marketing_url')\n        if marketing_url:\n            query_parameters = dict(enterprise_context, **utils.get_enterprise_utm_context(enterprise_customer))\n            course.update({'marketing_url': utils.update_query_parameters(marketing_url, query_parameters)})\n\n        \n        course.update(enterprise_context)\n        return course", "docstring": "Update course metadata of the given course and return updated course.\n\nArguments:\ncourse (dict): Course Metadata returned by course catalog API\nenterprise_customer (EnterpriseCustomer): enterprise customer instance.\nenterprise_context (dict): Enterprise context to be added to course runs and URLs..\n\nReturns:\n(dict): Updated course metadata", "source": "juraj-google-style"}
{"code": "def put(self, item):\n        \n        QueueBase.put(self, item)\n        if self.sender:\n            self.sender.start()", "docstring": "Adds the passed in item object to the queue and notifies the :func:`sender` to start an asynchronous\nsend operation by calling :func:`start`.\n\nArgs:\nitem (:class:`contracts.Envelope`) the telemetry envelope object to send to the service.", "source": "juraj-google-style"}
{"code": "def Decrypt(self, encrypted_data):\n    \n    index_split = -(len(encrypted_data) % Blowfish.block_size)\n    if index_split:\n      remaining_encrypted_data = encrypted_data[index_split:]\n      encrypted_data = encrypted_data[:index_split]\n    else:\n      remaining_encrypted_data = b''\n\n    decrypted_data = self._blowfish_cipher.decrypt(encrypted_data)\n\n    return decrypted_data, remaining_encrypted_data", "docstring": "Decrypts the encrypted data.\n\nArgs:\nencrypted_data (bytes): encrypted data.\n\nReturns:\ntuple[bytes,bytes]: decrypted data and remaining encrypted data.", "source": "juraj-google-style"}
{"code": "def size(input, name=None, out_type=None):\n    if out_type is None:\n        if flags.config().tf_shape_default_int64.value():\n            out_type = dtypes.int64\n        else:\n            out_type = dtypes.int32\n    return size_internal(input, name, optimize=True, out_type=out_type)", "docstring": "Returns the size of a tensor.\n\nReturns a 0-D `Tensor` representing the number of elements in `input`\nof type `out_type`. Defaults to tf.int32.\n\nFor example:\n\n```python\nt = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\ntf.size(t)  # 12\n```\n\nArgs:\ninput: A `Tensor` or `SparseTensor`.\nname: A name for the operation (optional).\nout_type: (Optional) The specified non-quantized numeric output type of the\noperation. Defaults to `tf.int32`. (Note: there is an experimental\nflag, `tf_shape_default_int64` that changes the default to `tf.int64`.\nThis is an unsupported, experimental setting that causes known breakages.)\n\nReturns:\nA `Tensor` of type `out_type`. Defaults to `tf.int32`.\n\n@compatibility(numpy)\nEquivalent to np.size()\n@end_compatibility", "source": "github-repos"}
{"code": "def ngettext_lazy(singular, plural, n, domain=DEFAULT_DOMAIN):\n    return LazyProxy(ngettext, singular, plural, n, domain=domain, enable_cache=False)", "docstring": "Mark a message with plural forms translateable, and delay the translation\nuntil the message is used.\n\nWorks the same was a `ngettext`, with a delaying functionality similiar to `gettext_lazy`.\n\nArgs:\nsingular (unicode): The singular form of the message.\nplural (unicode): The plural form of the message.\nn (int): The number that is used to decide which form should be used.\ndomain (basestring): The domain of the message. Defaults to 'messages', which\nis the domain where all application messages should be located.\nReturns:\nunicode: The correct pluralization, with the translation being\ndelayed until the message is used.", "source": "codesearchnet"}
{"code": "def next(self):\n    smallest = None\n    for key in self.queue.keys():\n        if (self.queue[key]['status'] == 'queued'):\n            if ((smallest is None) or (key < smallest)):\n                smallest = key\n    return smallest", "docstring": "Get the next processable item of the queue.\n\nA processable item is supposed to have the status `queued`.\n\nReturns:\nNone : If no key is found.\nInt: If a valid entry is found.", "source": "codesearchnet"}
{"code": "def create_update_event(self):\n    events = []\n    for (fields, rules) in iteritems(self._meta.update_messages):\n        if (not isinstance(fields, (list, tuple, set))):\n            fields = (fields,)\n        changed = any([(getattr(self, field) != getattr(self.get_original(), field)) for field in fields])\n        if changed:\n            event = self.create_audit_event(code=rules['code'])\n            event.body = rules['message']\n            event.meta = self.parse_meta(rules['meta'])\n            events.append(event)\n    self.update_event_callback(events)\n    with db.database.atomic():\n        for event in events:\n            event.save()\n    return events", "docstring": "Parse the update messages DSL to insert the data into the Event.\n\nReturns:\nlist[fleaker.peewee.EventStorageMixin]:\nAll the events that were created for the update.", "source": "codesearchnet"}
{"code": "def remove(self, x):\n    with tf.name_scope('pad_reduce/remove'):\n        x_shape = x.get_shape().as_list()\n        x = tf.gather_nd(x, indices=self.nonpad_ids)\n        if (not tf.executing_eagerly()):\n            x.set_shape(([None] + x_shape[1:]))\n    return x", "docstring": "Remove padding from the given tensor.\n\nArgs:\nx (tf.Tensor): of shape [dim_origin,...]\n\nReturns:\na tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin", "source": "codesearchnet"}
{"code": "def center_crop(self, image, size):\n    self._ensure_format_supported(image)\n    if not isinstance(size, tuple):\n        size = (size, size)\n    if is_torch_tensor(image) or isinstance(image, np.ndarray):\n        if image.ndim == 2:\n            image = self.expand_dims(image)\n        image_shape = image.shape[1:] if image.shape[0] in [1, 3] else image.shape[:2]\n    else:\n        image_shape = (image.size[1], image.size[0])\n    top = (image_shape[0] - size[0]) \n    bottom = top + size[0]\n    left = (image_shape[1] - size[1]) \n    right = left + size[1]\n    if isinstance(image, PIL.Image.Image):\n        return image.crop((left, top, right, bottom))\n    channel_first = True if image.shape[0] in [1, 3] else False\n    if not channel_first:\n        if isinstance(image, np.ndarray):\n            image = image.transpose(2, 0, 1)\n        if is_torch_tensor(image):\n            image = image.permute(2, 0, 1)\n    if top >= 0 and bottom <= image_shape[0] and (left >= 0) and (right <= image_shape[1]):\n        return image[..., top:bottom, left:right]\n    new_shape = image.shape[:-2] + (max(size[0], image_shape[0]), max(size[1], image_shape[1]))\n    if isinstance(image, np.ndarray):\n        new_image = np.zeros_like(image, shape=new_shape)\n    elif is_torch_tensor(image):\n        new_image = image.new_zeros(new_shape)\n    top_pad = (new_shape[-2] - image_shape[0]) \n    bottom_pad = top_pad + image_shape[0]\n    left_pad = (new_shape[-1] - image_shape[1]) \n    right_pad = left_pad + image_shape[1]\n    new_image[..., top_pad:bottom_pad, left_pad:right_pad] = image\n    top += top_pad\n    bottom += top_pad\n    left += left_pad\n    right += left_pad\n    new_image = new_image[..., max(0, top):min(new_image.shape[-2], bottom), max(0, left):min(new_image.shape[-1], right)]\n    return new_image", "docstring": "Crops `image` to the given size using a center crop. Note that if the image is too small to be cropped to the\nsize given, it will be padded (so the returned result has the size asked).\n\nArgs:\nimage (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape (n_channels, height, width) or (height, width, n_channels)):\nThe image to resize.\nsize (`int` or `Tuple[int, int]`):\nThe size to which crop the image.\n\nReturns:\nnew_image: A center cropped `PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape: (n_channels,\nheight, width).", "source": "github-repos"}
{"code": "def tasks(self, filters=None):\n    if (filters is None):\n        filters = {}\n    filters['service'] = self.id\n    return self.client.api.tasks(filters=filters)", "docstring": "List the tasks in this service.\n\nArgs:\nfilters (dict): A map of filters to process on the tasks list.\nValid filters: ``id``, ``name``, ``node``,\n``label``, and ``desired-state``.\n\nReturns:\n:py:class:`list`: List of task dictionaries.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def range(self, start_row=0, max_rows=None):\n    \n    fetcher = self._get_row_fetcher(start_row=start_row, max_rows=max_rows)\n    return iter(datalab.utils.Iterator(fetcher))", "docstring": "Get an iterator to iterate through a set of table rows.\n\nArgs:\nstart_row: the row of the table at which to start the iteration (default 0)\nmax_rows: an upper limit on the number of rows to iterate through (default None)\n\nReturns:\nA row iterator.", "source": "juraj-google-style"}
{"code": "def diff_xIndex(self, diffs, loc):\n    \n    chars1 = 0\n    chars2 = 0\n    last_chars1 = 0\n    last_chars2 = 0\n    for x in range(len(diffs)):\n      (op, text) = diffs[x]\n      if op != self.DIFF_INSERT:  \n        chars1 += len(text)\n      if op != self.DIFF_DELETE:  \n        chars2 += len(text)\n      if chars1 > loc:  \n        break\n      last_chars1 = chars1\n      last_chars2 = chars2\n\n    if len(diffs) != x and diffs[x][0] == self.DIFF_DELETE:\n      \n      return last_chars2\n    \n    return last_chars2 + (loc - last_chars1)", "docstring": "loc is a location in text1, compute and return the equivalent location\nin text2.  e.g. \"The cat\" vs \"The big cat\", 1->1, 5->8\n\nArgs:\ndiffs: Array of diff tuples.\nloc: Location within text1.\n\nReturns:\nLocation within text2.", "source": "juraj-google-style"}
{"code": "def arccos(self: EventSetOrNode) -> EventSetOrNode:\n    from temporian.core.operators.unary import arccos\n    return arccos(self)", "docstring": "Calculates the inverse cosine of an [`EventSet`][temporian.EventSet]'s features.\n\nCan only be used on floating point features.\n\nExample:\n```python\n>>> a = tp.event_set(\n...     timestamps=[1, 2, 3],\n...     features={\"M\": [1.0, 0, -1.0]},\n... )\n>>> a.arccos()\nindexes: ...\ntimestamps: [1. 2. 3.]\n'M': [0.     1.5708 3.1416]\n...\n\n```\n\nReturns:\nEventSetOrNode with inverse cosine of input features.", "source": "github-repos"}
{"code": "def __init__(self, pos=(0, 0, 0, -100), branches=None, sigma=(0, 0)):\n        \n        self.pos = pos\n        self.length = sqrt((pos[2]-pos[0])**2+(pos[3]-pos[1])**2)\n        self.branches = branches\n        self.sigma = sigma\n\n        self.comp = len(self.branches)\n        self.age = 0\n\n        self.nodes = [\n            [Node(pos[2:])]\n        ]", "docstring": "The contructor.\n\nArgs:\npos (tupel): A tupel, holding the start and end point of the tree. (x1, y1, x2, y2)\nbranches (tupel/array): Holding array/s with scale and angle for every branch.\nsigma (tuple): Holding the branch and angle sigma. e.g.(0.1, 0.2)", "source": "juraj-google-style"}
{"code": "def __init__(self, hps, images, labels, mode):\n        \n        self.hps = hps\n        self._images = images\n        self.labels = labels\n        self.mode = mode\n\n        self._extra_train_ops = []", "docstring": "ResNet constructor.\n\nArgs:\nhps: Hyperparameters.\nimages: Batches of images of size [batch_size, image_size,\nimage_size, 3].\nlabels: Batches of labels of size [batch_size, num_classes].\nmode: One of 'train' and 'eval'.", "source": "juraj-google-style"}
{"code": "def _ConvertMessage(value, message):\n    message_descriptor = message.DESCRIPTOR\n    full_name = message_descriptor.full_name\n    if _IsWrapperMessage(message_descriptor):\n        _ConvertWrapperMessage(value, message)\n    elif (full_name in _WKTJSONMETHODS):\n        _WKTJSONMETHODS[full_name][1](value, message)\n    else:\n        _ConvertFieldValuePair(value, message)", "docstring": "Convert a JSON object into a message.\n\nArgs:\nvalue: A JSON object.\nmessage: A WKT or regular protocol message to record the data.\n\nRaises:\nParseError: In case of convert problems.", "source": "codesearchnet"}
{"code": "def delete(self, key):\n        \n        with self._lmdb.begin(write=True, buffers=True) as txn:\n            txn.delete(key.encode())", "docstring": "Removes a key:value from the database\n\nArgs:\nkey (str): The key to remove.", "source": "juraj-google-style"}
{"code": "def DeregisterOutput(cls, output_class):\n    \n    output_class_name = output_class.NAME.lower()\n\n    if output_class_name in cls._disabled_output_classes:\n      class_dict = cls._disabled_output_classes\n    else:\n      class_dict = cls._output_classes\n\n    if output_class_name not in class_dict:\n      raise KeyError(\n          'Output class not set for name: {0:s}.'.format(\n              output_class.NAME))\n\n    del class_dict[output_class_name]", "docstring": "Deregisters an output class.\n\nThe output classes are identified based on their NAME attribute.\n\nArgs:\noutput_class (type): output module class.\n\nRaises:\nKeyError: if output class is not set for the corresponding data type.", "source": "juraj-google-style"}
{"code": "def _ReverseHostname(self, hostname):\n    if (not hostname):\n        return ''\n    if (len(hostname) <= 1):\n        return hostname\n    if (hostname[(- 1)] == '.'):\n        return hostname[::(- 1)][1:]\n    return hostname[::(- 1)][0:]", "docstring": "Reverses the hostname and strips the leading dot.\n\nThe hostname entry is reversed:\nmoc.elgoog.www.\nShould be:\nwww.google.com\n\nArgs:\nhostname (str): reversed hostname.\n\nReturns:\nstr: hostname without a leading dot.", "source": "codesearchnet"}
{"code": "def parse(self, template):\n    self._compile_delimiters()\n    start_index = 0\n    (content_end_index, parsed_section, section_key) = (None, None, None)\n    parsed_template = ParsedTemplate()\n    states = []\n    while True:\n        match = self._template_re.search(template, start_index)\n        if (match is None):\n            break\n        match_index = match.start()\n        end_index = match.end()\n        matches = match.groupdict()\n        if (matches['change'] is not None):\n            matches.update(tag='=', tag_key=matches['delims'])\n        elif (matches['raw'] is not None):\n            matches.update(tag='&', tag_key=matches['raw_name'])\n        tag_type = matches['tag']\n        tag_key = matches['tag_key']\n        leading_whitespace = matches['whitespace']\n        did_tag_begin_line = ((match_index == 0) or (template[(match_index - 1)] in END_OF_LINE_CHARACTERS))\n        did_tag_end_line = ((end_index == len(template)) or (template[end_index] in END_OF_LINE_CHARACTERS))\n        is_tag_interpolating = (tag_type in ['', '&'])\n        if (did_tag_begin_line and did_tag_end_line and (not is_tag_interpolating)):\n            if (end_index < len(template)):\n                end_index += (((template[end_index] == '\\r') and 1) or 0)\n            if (end_index < len(template)):\n                end_index += (((template[end_index] == '\\n') and 1) or 0)\n        elif leading_whitespace:\n            match_index += len(leading_whitespace)\n            leading_whitespace = ''\n        if (start_index != match_index):\n            parsed_template.add(template[start_index:match_index])\n        start_index = end_index\n        if (tag_type in ('\n            state = (tag_type, end_index, section_key, parsed_template)\n            states.append(state)\n            (section_key, parsed_template) = (tag_key, ParsedTemplate())\n            continue\n        if (tag_type == '/'):\n            if (tag_key != section_key):\n                raise ParsingError(('Section end tag mismatch: %s != %s' % (tag_key, section_key)))\n            parsed_section = parsed_template\n            (tag_type, section_start_index, section_key, parsed_template) = states.pop()\n            node = self._make_section_node(template, tag_type, tag_key, parsed_section, section_start_index, match_index)\n        else:\n            node = self._make_interpolation_node(tag_type, tag_key, leading_whitespace)\n        parsed_template.add(node)\n    if (start_index != len(template)):\n        parsed_template.add(template[start_index:])\n    return parsed_template", "docstring": "Parse a template string starting at some index.\n\nThis method uses the current tag delimiter.\n\nArguments:\n\ntemplate: a unicode string that is the template to parse.\n\nindex: the index at which to start parsing.\n\nReturns:\n\na ParsedTemplate instance.", "source": "codesearchnet"}
{"code": "def date_to_delorean(year, month, day):\n    \n    return Delorean(datetime=dt(year, month, day), timezone='UTC')", "docstring": "Converts date arguments to a Delorean instance in UTC\n\nArgs:\nyear: int between 1 and 9999.\nmonth: int between 1 and 12.\nday: int between 1 and 31.\n\nReturns:\nDelorean instance in UTC of date.", "source": "juraj-google-style"}
{"code": "def update(self, data, offset, is_last, buffer_index=0):\n        \n        if buffer_index >= self.num_buffers:\n            raise ValueError('Expected buffer index < {} but got index {}.'.format(self.num_buffers, buffer_index))\n\n        if self.buffers[buffer_index] is not None and self.buffers[buffer_index].shape[0] > 0:\n            expected_next_frame = self.current_frame + self.buffers[buffer_index].shape[0]\n            if expected_next_frame != offset:\n                raise ValueError(\n                    'There are missing frames. Last frame in buffer is {}. The passed frames start at {}.'.format(\n                        expected_next_frame, offset))\n\n            self.buffers[buffer_index] = np.vstack([self.buffers[buffer_index], data])\n        else:\n            self.buffers[buffer_index] = data\n\n        self.buffers_full[buffer_index] = is_last", "docstring": "Update the buffer at the given index.\n\nArgs:\ndata (np.ndarray): The frames.\noffset (int): The index of the first frame in `data` within the sequence.\nis_last (bool): Whether this is the last block of frames in the sequence.\nbuffer_index (int): The index of the buffer to update (< self.num_buffers).", "source": "juraj-google-style"}
{"code": "def determine_inst(i_info, param_str, command):\n    qty_instances = len(i_info)\n    if (not qty_instances):\n        print('No instances found with parameters: {}'.format(param_str))\n        sys.exit(1)\n    if (qty_instances > 1):\n        print('{} instances match these parameters:'.format(qty_instances))\n        tar_idx = user_picklist(i_info, command)\n    else:\n        tar_idx = 0\n    tar_inst = i_info[tar_idx]['id']\n    print('{0}{3}ing{1} instance id {2}{4}{1}'.format(C_STAT[command], C_NORM, C_TI, command, tar_inst))\n    return (tar_inst, tar_idx)", "docstring": "Determine the instance-id of the target instance.\n\nInspect the number of instance-ids collected and take the\nappropriate action: exit if no ids, return if single id,\nand call user_picklist function if multiple ids exist.\n\nArgs:\ni_info (dict): information and details for instances.\nparam_str (str): the title to display in the listing.\ncommand (str): command specified on the command line.\nReturns:\ntar_inst (str): the AWS instance-id of the target.\nRaises:\nSystemExit: if no instances are match parameters specified.", "source": "codesearchnet"}
{"code": "def table_update(self, table_name, table_info):\n    \n    url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)\n    return datalab.utils.Http.request(url, method='PUT', data=table_info,\n                                      credentials=self._credentials)", "docstring": "Updates the Table info.\n\nArgs:\ntable_name: the name of the table to update as a tuple of components.\ntable_info: the Table resource with updated fields.", "source": "juraj-google-style"}
{"code": "def _add_genotype_calls(self, variant_obj, variant_line, case_obj):\n        \n        variant_line = variant_line.split('\\t')\n        \n        if len(variant_line) > 8:\n            gt_format = variant_line[8].split(':')\n            for individual in case_obj.individuals:\n                sample_id = individual.ind_id\n                index = individual.ind_index\n\n                gt_call = variant_line[9+index].split(':')\n\n                raw_call = dict(zip(gt_format, gt_call))\n\n                genotype = Genotype(**raw_call)\n\n                variant_obj.add_individual(puzzle_genotype(\n                    sample_id = sample_id,\n                    genotype = genotype.genotype,\n                    case_id = case_obj.name,\n                    phenotype = individual.phenotype,\n                    ref_depth = genotype.ref_depth,\n                    alt_depth = genotype.alt_depth,\n                    genotype_quality = genotype.genotype_quality,\n                    depth = genotype.depth_of_coverage,\n                    supporting_evidence = genotype.supporting_evidence,\n                    pe_support = genotype.pe_support,\n                    sr_support = genotype.sr_support,\n                ))", "docstring": "Add the genotype calls for the variant\n\nArgs:\nvariant_obj (puzzle.models.Variant)\nvariant_dict (dict): A variant dictionary\ncase_obj (puzzle.models.Case)", "source": "juraj-google-style"}
{"code": "def extract_features(points, n_tops):\n    \n    max_bin = -1\n    for point in points:\n        max_bin = max(max_bin, point.vel)\n    max_bin = int(round(max_bin)) + 1\n\n    \n    histogram = [0] * max_bin\n    time = 0\n\n    \n    for point in points:\n        bin_index = int(round(point.vel))\n        histogram[bin_index] += point.dt\n        time += point.dt\n\n    result = []\n    if time == 0:\n        return result\n\n    for _ in range(n_tops):\n        max_index = np.argmax(histogram)\n        value = histogram[max_index] / time\n        result.extend([max_index, value])\n        histogram[max_index] = -1\n\n    return result", "docstring": "Feature extractor\n\nArgs:\npoints (:obj:`list` of :obj:`Point`)\nn_tops (int): Number of top speeds to extract\nReturns:\n:obj:`list` of float: with length (n_tops*2). Where the ith even element\nis the ith top speed and the i+1 element is the percentage of time\nspent on that speed", "source": "juraj-google-style"}
{"code": "def do(self, resource, method, params=None, data=None, json=None, headers=None):\n    uri = '{0}/{1}'.format(self._api_base, resource)\n    if (not params):\n        params = {}\n    params.update({'token': self._token})\n    req = Request(method=method, url=uri, params=params, headers=headers, data=data, json=json)\n    s = Session()\n    prepped = s.prepare_request(req)\n    resp = s.send(prepped)\n    return RTMResponse(resp)", "docstring": "Does the request job\n\nArgs:\nresource(str): resource uri(relative path)\nmethod(str): HTTP method\nparams(dict): uri queries\ndata(dict): HTTP body(form)\njson(dict): HTTP body(json)\nheaders(dict): HTTP headers\n\nReturns:\nRTMResponse", "source": "codesearchnet"}
{"code": "def __init__(self, ea=UseCurrentAddress, name=None, index=None, segment_t=None):\n        \n        if sum((ea not in (self.UseCurrentAddress, None), name is not None, index is not None,\n                segment_t is not None,)) > 1:\n            raise ValueError((\n                                 \"Expected only one (ea, name, index or segment_t).\"\n                                 \" Got (ea={!r}, name={!r}, index={!r}, segment_t={!r})\"\n                             ).format(ea,\n                                      name,\n                                      index,\n                                      segment_t))\n\n\n        elif segment_t is not None:\n            seg = segment_t\n\n        elif name is not None:\n            seg = idaapi.get_segm_by_name(name)\n\n        elif index is not None:\n            seg = idaapi.getnseg(index)\n\n        elif ea == self.UseCurrentAddress:\n            seg = idaapi.getseg(idc.here())\n\n        elif ea is None:\n            raise ValueError(\"`None` is not a valid address. To use the current screen ea, \"\n                             \"use `Function(ea=Function.UseCurrentAddress)` or supply no `ea`.\")\n\n        else:\n            seg = idaapi.getseg(ea)\n\n        self._segment = seg", "docstring": "Wrapper around IDA segments.\n\nThere are 3 ways to get a segment - by name, ea or index. Only use one.\n\nArgs:\nea - address in the segment\nname - name of the segment\nindex - index of the segment", "source": "juraj-google-style"}
{"code": "def storage_pools(self):\n    if (not self.__storage_pools):\n        self.__storage_pools = StoragePools(self.__connection)\n    return self.__storage_pools", "docstring": "Gets the StoragePools API client.\n\nReturns:\nStoragePools:", "source": "codesearchnet"}
{"code": "def save_project_id(project_id):\n  \n  \n  \n  try:\n    subprocess.call(['gcloud', 'config', 'set', 'project', project_id])\n  except:\n    config_file = os.path.join(get_config_dir(), 'config.json')\n    config = {}\n    if os.path.exists(config_file):\n      with open(config_file) as f:\n        config = json.loads(f.read())\n    config['project_id'] = project_id\n    with open(config_file, 'w') as f:\n      f.write(json.dumps(config))", "docstring": "Save project id to config file.\n\nArgs:\nproject_id: the project_id to save.", "source": "juraj-google-style"}
{"code": "def compare_name(given_name, family_name, question_name):\n    given_name = given_name.lower()\n    family_name = family_name.lower()\n    question_name = question_name.lower()\n    if (',' in question_name):\n        name_split = question_name.split(',')\n        name_split.reverse()\n        question_name = ' '.join(name_split).strip()\n    question_name = question_name.replace('.', '')\n    given_name = given_name.replace('.', '')\n    family_name = family_name.replace('.', '')\n    given_name = list(filter(None, re.split('[, \\\\-.]+', given_name)))\n    num_family_names = len(list(filter(None, re.split('[, .]+', family_name))))\n    name_split = list(filter(None, re.split('[, \\\\-.]+', question_name)))\n    first_name = [name_split[0]]\n    if (len(name_split) > 2):\n        first_name += [n for n in name_split[1:(- num_family_names)]]\n    if ((len(first_name) > 1) and (len(given_name) == len(first_name))):\n        for i in range(1, len(first_name)):\n            first_name[i] = first_name[i][0]\n            given_name[i] = given_name[i][0]\n    elif (len(given_name) != len(first_name)):\n        min_names = min(len(given_name), len(first_name))\n        first_name = first_name[:min_names]\n        given_name = given_name[:min_names]\n    if ((len(first_name[0]) == 1) or (len(given_name[0]) == 1)):\n        given_name[0] = given_name[0][0]\n        first_name[0] = first_name[0][0]\n    if ((len(first_name[0]) > 1) or (len(given_name[0]) > 1)):\n        given_name[0] = given_name[0][0]\n        first_name[0] = name_split[0][0]\n    if ((num_family_names == 1) and ('-' in family_name)):\n        num_hyphen = family_name.count('-')\n        family_name_compare = '-'.join(name_split[(- (num_hyphen + 1)):])\n    else:\n        family_name_compare = ' '.join(name_split[(- num_family_names):])\n    return ((given_name == first_name) and (family_name == family_name_compare))", "docstring": "Compares a name in question to a specified name separated into given and family.\n\nThe name in question ``question_name`` can be of varying format, including\n\"Kyle E. Niemeyer\", \"Kyle Niemeyer\", \"K. E. Niemeyer\", \"KE Niemeyer\", and\n\"K Niemeyer\". Other possibilities include names with hyphens such as\n\"Chih-Jen Sung\", \"C. J. Sung\", \"C-J Sung\".\n\nExamples:\n>>> compare_name('Kyle', 'Niemeyer', 'Kyle E Niemeyer')\nTrue\n>>> compare_name('Chih-Jen', 'Sung', 'C-J Sung')\nTrue\n\nArgs:\ngiven_name (`str`): Given (or first) name to be checked against.\nfamily_name (`str`): Family (or last) name to be checked against.\nquestion_name (`str`): The whole name in question.\n\nReturns:\n`bool`: The return value. True for successful comparison, False otherwise.", "source": "codesearchnet"}
{"code": "def _process_from_queue(self, queue):\n        \n        now = time.time()\n\n        log = self.log.bind(queue=queue)\n\n        batch_size = self._get_queue_batch_size(queue)\n\n        queue_lock, failed_to_acquire = self._get_queue_lock(queue, log)\n        if failed_to_acquire:\n            return [], -1\n\n        \n        \n        \n        \n        \n        \n        \n        \n        later = time.time() + self.config['LOCK_RETRY']\n\n        task_ids = self.scripts.zpoppush(\n            self._key(QUEUED, queue),\n            self._key(ACTIVE, queue),\n            batch_size,\n            None,\n            now,\n            if_exists=('add', self._key(SCHEDULED, queue), later, 'min'),\n            on_success=('update_sets', queue, self._key(QUEUED),\n                        self._key(ACTIVE), self._key(SCHEDULED))\n        )\n        log.debug('moved tasks', src_queue=QUEUED, dest_queue=ACTIVE,\n                  qty=len(task_ids))\n\n        processed_count = 0\n        if task_ids:\n            processed_count = self._process_queue_tasks(queue, queue_lock,\n                                                        task_ids, now, log)\n\n        if queue_lock:\n            queue_lock.release()\n            log.debug('released swq lock')\n\n        return task_ids, processed_count", "docstring": "Internal method to process a task batch from the given queue.\n\nArgs:\nqueue: Queue name to be processed\n\nReturns:\nTask IDs:   List of tasks that were processed (even if there was an\nerror so that client code can assume the queue is empty\nif nothing was returned)\nCount:      The number of tasks that were attempted to be executed or\n-1 if the queue lock couldn't be acquired.", "source": "juraj-google-style"}
{"code": "def create_border(video, color=\"blue\", border_percent=2):\n  \n  \n  if video.shape[-1] != 3:\n    return video\n  color_to_axis = {\"blue\": 2, \"red\": 0, \"green\": 1}\n  axis = color_to_axis[color]\n  _, _, height, width, _ = video.shape\n  border_height = np.ceil(border_percent * height / 100.0).astype(np.int)\n  border_width = np.ceil(border_percent * width / 100.0).astype(np.int)\n  video[:, :, :border_height, :, axis] = 255\n  video[:, :, -border_height:, :, axis] = 255\n  video[:, :, :, :border_width, axis] = 255\n  video[:, :, :, -border_width:, axis] = 255\n  return video", "docstring": "Creates a border around each frame to differentiate input and target.\n\nArgs:\nvideo: 5-D NumPy array.\ncolor: string, \"blue\", \"red\" or \"green\".\nborder_percent: Percentarge of the frame covered by the border.\nReturns:\nvideo: 5-D NumPy array.", "source": "juraj-google-style"}
{"code": "def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):\n        \n        init_params = super(Estimator, cls)._prepare_init_params_from_job_description(job_details, model_channel_name)\n\n        init_params['image_name'] = init_params.pop('image')\n        return init_params", "docstring": "Convert the job description to init params that can be handled by the class constructor\n\nArgs:\njob_details: the returned job details from a describe_training_job API call.\nmodel_channel_name (str): Name of the channel where pre-trained model data will be downloaded\n\nReturns:\ndictionary: The transformed init_params", "source": "juraj-google-style"}
{"code": "def declarations(cls, extra_defs=None):\n    warnings.warn('Factory.declarations is deprecated; use Factory._meta.pre_declarations instead.', DeprecationWarning, stacklevel=2)\n    decls = cls._meta.pre_declarations.as_dict()\n    decls.update((extra_defs or {}))\n    return decls", "docstring": "Retrieve a copy of the declared attributes.\n\nArgs:\nextra_defs (dict): additional definitions to insert into the\nretrieved DeclarationDict.", "source": "codesearchnet"}
{"code": "def pbs_for_create(document_path, document_data):\n    \n    extractor = DocumentExtractor(document_data)\n\n    if extractor.deleted_fields:\n        raise ValueError(\"Cannot apply DELETE_FIELD in a create request.\")\n\n    write_pbs = []\n\n    \n    \n    if extractor.empty_document or extractor.set_fields:\n        write_pbs.append(extractor.get_update_pb(document_path, exists=False))\n\n    if extractor.has_transforms:\n        exists = None if write_pbs else False\n        transform_pb = extractor.get_transform_pb(document_path, exists)\n        write_pbs.append(transform_pb)\n\n    return write_pbs", "docstring": "Make ``Write`` protobufs for ``create()`` methods.\n\nArgs:\ndocument_path (str): A fully-qualified document path.\ndocument_data (dict): Property names and values to use for\ncreating a document.\n\nReturns:\nList[google.cloud.firestore_v1beta1.types.Write]: One or two\n``Write`` protobuf instances for ``create()``.", "source": "juraj-google-style"}
{"code": "def _validate_iss(claims, issuer=None):\n    if (issuer is not None):\n        if isinstance(issuer, string_types):\n            issuer = (issuer,)\n        if (claims.get('iss') not in issuer):\n            raise JWTClaimsError('Invalid issuer')", "docstring": "Validates that the 'iss' claim is valid.\n\nThe \"iss\" (issuer) claim identifies the principal that issued the\nJWT.  The processing of this claim is generally application specific.\nThe \"iss\" value is a case-sensitive string containing a StringOrURI\nvalue.  Use of this claim is OPTIONAL.\n\nArgs:\nclaims (dict): The claims dictionary to validate.\nissuer (str or iterable): Acceptable value(s) for the issuer that\nsigned the token.", "source": "codesearchnet"}
{"code": "def flatten_rules(self, declarations):\n        \n        rules = []\n\n        for protocole, paths in declarations:\n            \n            if protocole:\n                continue\n            \n            rules.extend([self.strip_quotes(v.strip())\n                          for v in paths.split(',')])\n\n        return list(filter(self.filter_rules, rules))", "docstring": "Flatten returned import rules from regex.\n\nBecause import rules can contains multiple items in the same rule\n(called multiline import rule), the regex ``REGEX_IMPORT_RULE``\nreturn a list of unquoted items for each rule.\n\nArgs:\ndeclarations (list): A SCSS source.\n\nReturns:\nlist: Given SCSS source with all comments removed.", "source": "juraj-google-style"}
{"code": "def get_date(date, date_format = None):\n    \n    date_obj = datetime.datetime.now()\n    if date:\n        if date_format:\n            date_obj = datetime.datetime.strptime(date, date_format)\n        else:\n            if match_date(date):\n                if len(date.split('-')) == 3:\n                    date = date.split('-')\n                elif len(date.split(' ')) == 3:\n                    date = date.split(' ')\n                elif len(date.split('.')) == 3:\n                    date = date.split('.')\n                else:\n                    date = date.split('/')\n                date_obj = datetime.datetime(*(int(number) for number in date))\n            else:\n                raise ValueError(\"Date %s is invalid\" % date)\n\n    return date_obj", "docstring": "Return a datetime object if there is a valid date\n\nRaise exception if date is not valid\nReturn todays date if no date where added\n\nArgs:\ndate(str)\ndate_format(str)\n\nReturns:\ndate_obj(datetime.datetime)", "source": "juraj-google-style"}
{"code": "def _process_rules(self, rules):\n        \n        cidr = []\n        non_cidr = []\n\n        for rule in rules:\n            if '.' in rule['app']:\n                self.log.debug('Custom CIDR rule: %s', rule)\n                self._validate_cidr(rule)\n                cidr.append(rule)\n            else:\n                self.log.debug('SG reference rule: %s', rule)\n                non_cidr.append(rule)\n\n        self.log.debug('Custom CIDR rules: %s', cidr)\n        self.log.debug('SG reference rules: %s', non_cidr)\n        return non_cidr, cidr", "docstring": "Process rules into cidr and non-cidr lists.\n\nArgs:\nrules (list): Allowed Security Group ports and protocols.\n\nReturns:\n(list, list): Security Group reference rules and custom CIDR rules.", "source": "juraj-google-style"}
{"code": "def _ParseDLSPageHeader(self, file_object, page_offset):\n    page_header_map = self._GetDataTypeMap('dls_page_header')\n    try:\n        (page_header, page_size) = self._ReadStructureFromFileObject(file_object, page_offset, page_header_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse page header at offset: 0x{0:08x} with error: {1!s}'.format(page_offset, exception))\n    if (page_header.signature not in self._DLS_SIGNATURES):\n        raise errors.UnableToParseFile('Unsupported page header signature at offset: 0x{0:08x}'.format(page_offset))\n    return (page_header, page_size)", "docstring": "Parses a DLS page header from a file-like object.\n\nArgs:\nfile_object (file): file-like object to read the header from.\npage_offset (int): offset of the start of the page header, relative\nto the start of the file.\n\nReturns:\ntuple: containing:\n\ndls_page_header: parsed record structure.\nint: header size.\n\nRaises:\nParseError: when the header cannot be parsed.", "source": "codesearchnet"}
{"code": "def get_coder_from_spec(coder_spec):\n    assert coder_spec is not None\n    ignored_wrappers = 'com.google.cloud.dataflow.sdk.util.TimerOrElement$TimerOrElementCoder'\n    if coder_spec['@type'] in ignored_wrappers:\n        assert len(coder_spec['component_encodings']) == 1\n        coder_spec = coder_spec['component_encodings'][0]\n        return get_coder_from_spec(coder_spec)\n    if coder_spec['@type'] == 'kind:pair':\n        assert len(coder_spec['component_encodings']) == 2\n        component_coders = [get_coder_from_spec(c) for c in coder_spec['component_encodings']]\n        return coders.TupleCoder(component_coders)\n    elif coder_spec['@type'] == 'kind:stream':\n        assert len(coder_spec['component_encodings']) == 1\n        return coders.IterableCoder(get_coder_from_spec(coder_spec['component_encodings'][0]))\n    elif coder_spec['@type'] == 'kind:windowed_value':\n        assert len(coder_spec['component_encodings']) == 2\n        value_coder, window_coder = [get_coder_from_spec(c) for c in coder_spec['component_encodings']]\n        return coders.coders.WindowedValueCoder(value_coder, window_coder=window_coder)\n    elif coder_spec['@type'] == 'kind:interval_window':\n        assert 'component_encodings' not in coder_spec or not coder_spec['component_encodings']\n        return coders.coders.IntervalWindowCoder()\n    elif coder_spec['@type'] == 'kind:global_window':\n        assert 'component_encodings' not in coder_spec or not coder_spec['component_encodings']\n        return coders.coders.GlobalWindowCoder()\n    elif coder_spec['@type'] == 'kind:varint':\n        assert 'component_encodings' not in coder_spec or len(coder_spec['component_encodings'] == 0)\n        return coders.coders.VarIntCoder()\n    elif coder_spec['@type'] == 'kind:length_prefix':\n        assert len(coder_spec['component_encodings']) == 1\n        return coders.coders.LengthPrefixCoder(get_coder_from_spec(coder_spec['component_encodings'][0]))\n    elif coder_spec['@type'] == 'kind:bytes':\n        assert 'component_encodings' not in coder_spec or len(coder_spec['component_encodings'] == 0)\n        return coders.BytesCoder()\n    return coders.coders.deserialize_coder(coder_spec['@type'].encode('ascii'))", "docstring": "Return a coder instance from a coder spec.\n\nArgs:\ncoder_spec: A dict where the value of the '@type' key is a pickled instance\nof a Coder instance.\n\nReturns:\nA coder instance (has encode/decode methods).", "source": "github-repos"}
{"code": "def __init__(self, length=None, vendor=None):\n        \n        super().__init__(action_type=ActionType.OFPAT_VENDOR, length=length)\n        self.vendor = vendor", "docstring": "Create an ActionVendorHeader with the optional parameters below.\n\nArgs:\nlength (int): Length is a multiple of 8.\nvender (int): Vendor ID with the same form as in VendorHeader.\nDefaults to None.", "source": "juraj-google-style"}
{"code": "def __init__(self, project_id=None, context=None):\n    \n    self._context = context or datalab.context.Context.default()\n    self._project_id = project_id or self._context.project_id\n    self._client = _utils.make_client(project_id, context)\n    self._group_dict = None", "docstring": "Initializes the Groups for a Stackdriver project.\n\nArgs:\nproject_id: An optional project ID or number to override the one provided\nby the context.\ncontext: An optional Context object to use instead of the global default.", "source": "juraj-google-style"}
{"code": "def size(self, path):\n    try:\n        return self._blobstorageIO().size(path)\n    except Exception as e:\n        raise BeamIOError('Size operation failed', {path: e})", "docstring": "Get size in bytes of a file on the FileSystem.\n\nArgs:\npath: string filepath of file.\n\nReturns: int size of file according to the FileSystem.\n\nRaises:\n``BeamIOError``: if path doesn't exist.", "source": "github-repos"}
{"code": "def FromJsonString(self, value):\n    timezone_offset = value.find('Z')\n    if (timezone_offset == (- 1)):\n        timezone_offset = value.find('+')\n    if (timezone_offset == (- 1)):\n        timezone_offset = value.rfind('-')\n    if (timezone_offset == (- 1)):\n        raise ParseError('Failed to parse timestamp: missing valid timezone offset.')\n    time_value = value[0:timezone_offset]\n    point_position = time_value.find('.')\n    if (point_position == (- 1)):\n        second_value = time_value\n        nano_value = ''\n    else:\n        second_value = time_value[:point_position]\n        nano_value = time_value[(point_position + 1):]\n    date_object = datetime.strptime(second_value, _TIMESTAMPFOMAT)\n    td = (date_object - datetime(1970, 1, 1))\n    seconds = (td.seconds + (td.days * _SECONDS_PER_DAY))\n    if (len(nano_value) > 9):\n        raise ParseError('Failed to parse Timestamp: nanos {0} more than 9 fractional digits.'.format(nano_value))\n    if nano_value:\n        nanos = round((float(('0.' + nano_value)) * 1000000000.0))\n    else:\n        nanos = 0\n    if (value[timezone_offset] == 'Z'):\n        if (len(value) != (timezone_offset + 1)):\n            raise ParseError('Failed to parse timestamp: invalid trailing data {0}.'.format(value))\n    else:\n        timezone = value[timezone_offset:]\n        pos = timezone.find(':')\n        if (pos == (- 1)):\n            raise ParseError('Invalid timezone offset value: {0}.'.format(timezone))\n        if (timezone[0] == '+'):\n            seconds -= (((int(timezone[1:pos]) * 60) + int(timezone[(pos + 1):])) * 60)\n        else:\n            seconds += (((int(timezone[1:pos]) * 60) + int(timezone[(pos + 1):])) * 60)\n    self.seconds = int(seconds)\n    self.nanos = int(nanos)", "docstring": "Parse a RFC 3339 date string format to Timestamp.\n\nArgs:\nvalue: A date string. Any fractional digits (or none) and any offset are\naccepted as long as they fit into nano-seconds precision.\nExample of accepted format: '1972-01-01T10:00:20.021-05:00'\n\nRaises:\nParseError: On parsing problems.", "source": "codesearchnet"}
{"code": "def ip(ip_addr, return_tuple=True):\n    \n    regex_ip = __re.compile(\"^((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))$\")\n    if return_tuple:\n        while not regex_ip.match(ip_addr):\n            print(\"Not a good IP.\")\n            print(\"Please try again.\")\n            ip_addr = input(\"Please enter a IP address in the following format x.x.x.x: \")\n        return ip_addr\n    elif not return_tuple:\n        if not regex_ip.match(ip_addr):\n            return False\n        else:\n            return True", "docstring": "Function to check if a address is good\nArgs:\nip_addr: IP address in the following format 192.168.1.1\nreturn_tuple: Set to True it returns a IP, set to False returns True or False\n\nReturns: see return_tuple for return options", "source": "juraj-google-style"}
{"code": "def _AddAttributeNodes(self, attribute_names):\n    for attribute_name in attribute_names:\n        self.graph[attribute_name] = self.Node(is_artifact=False)", "docstring": "Add the attribute nodes to the graph.\n\nFor every attribute that is required for the collection of requested\nartifacts, add a node to the dependency graph. An attribute node will have\nincoming edges from the artifacts that provide this attribute and outgoing\nedges to the artifacts that depend on it.\n\nAn attribute is reachable as soon as one artifact that provides it is\nreachable. Initially, no attribute node is reachable.\n\nArgs:\nattribute_names: List of required attribute names.", "source": "codesearchnet"}
{"code": "def get_linear_interpolation(self, percentile):\n    with self._lock:\n        return self._get_linear_interpolation(percentile)", "docstring": "Calculate percentile estimation based on linear interpolation.\n\nIt first finds the bucket which includes the target percentile and\nprojects the estimated point in the bucket by assuming all the elements\nin the bucket are uniformly distributed.\n\nArgs:\npercentile: The target percentile of the value returning from this\nmethod. Should be a floating point number greater than 0 and less\nthan 1.", "source": "github-repos"}
{"code": "async def debug(self, client_id, conn_string, command, args):\n    conn_id = self._client_info(client_id, 'connections')[conn_string]\n    return (await self.adapter.debug(conn_id, command, args))", "docstring": "Send a debug command to a device on behalf of a client.\n\nSee :meth:`AbstractDeviceAdapter.send_script`.\n\nArgs:\nclient_id (str): The client we are working for.\nconn_string (str): A connection string that will be\npassed to the underlying device adapter.\ncommand (str): The name of the debug command to run.\nargs (dict): Any command arguments.\n\nReturns:\nobject: The response to the debug command.\n\nRaises:\nDeviceServerError: There is an issue with your client_id such\nas not being connected to the device.\nDeviceAdapterError: The adapter had a protocol issue sending the debug\ncommand.", "source": "codesearchnet"}
{"code": "def _validate_field(msg: message.Message, field: descriptor.FieldDescriptor, field_name: str, primitive_handler_: primitive_handler.PrimitiveHandler) -> None:\n    if annotation_utils.field_is_required(field) and (not proto_utils.field_is_set(msg, field)):\n        raise fhir_errors.InvalidFhirError(f'Required field `{field.full_name}` is missing.')\n    if annotation_utils.is_reference(field.message_type):\n        _validate_reference_field(msg, field)\n        return\n    if field.type == descriptor.FieldDescriptor.TYPE_MESSAGE:\n        for i in range(proto_utils.field_content_length(msg, field)):\n            submessage = proto_utils.get_value_at_field_index(msg, field, i)\n            _validate_fhir_constraints(submessage, field_name, primitive_handler_)\n            if fhir_types.is_period(submessage):\n                _validate_period(submessage, field_name)", "docstring": "Validates that required fields are set, and performs basic temporal checks.\n\nArgs:\nmsg: The Message that the field belongs to.\nfield: The FieldDescriptor of the field to examine.\nfield_name: The name of the field.\nprimitive_handler_: Responsible for returning PrimitiveWrappers.\n\nRaises:\nfhir_errors.InvalidFhirError: In the event that a required field is not set\nor if temporal requirements are not met.", "source": "github-repos"}
{"code": "def __iadd__(self, other):\n        \n        \n        assert isinstance(other, LocationDescriptor), \"You can only add LocationDescriptor together.\"\n        assert self._separation_char == other._separation_char, \\\n            \"You can only add LocationDescriptor together if they share the same separator character.\"\n        self._locations_list.extend(other._locations_list)\n\n        return self", "docstring": "**Extend** an existing :class:`LocationDescriptor` object by another.\n\nArgs:\nself: This :class:`LocationDescriptor` object.\nother: Another :class:`LocationDescriptor` object.\n\nReturns:\nThe updated :class:`LocationDescriptor` object itself.", "source": "juraj-google-style"}
{"code": "def from_scf_task(cls, scf_task, ddk_tolerance=None, ph_tolerance=None, manager=None):\n    new = cls(manager=manager)\n    new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance)\n    return new", "docstring": "Build tasks for the computation of Born effective charges from a ground-state task.\n\nArgs:\nscf_task: ScfTask object.\nddk_tolerance: tolerance used in the DDK run if with_becs. None to use AbiPy default.\nph_tolerance: dict {\"varname\": value} with the tolerance used in the phonon run.\nNone to use AbiPy default.\nmanager: :class:`TaskManager` object.", "source": "codesearchnet"}
{"code": "def recipe_dv360_editor(config, auth_dv, auth_sheet, auth_bigquery, recipe_name, recipe_slug, command):\n    dataset(config, {'__comment__': 'Ensure dataset exists.', 'auth': auth_bigquery, 'dataset': recipe_slug})\n    drive(config, {'__comment__': 'Copy the default template to sheet with the recipe name', 'auth': auth_sheet, 'copy': {'source': 'https:\n    dv_editor(config, {'__comment': 'Depending on users choice, execute a different part of the solution.', 'auth_dv': auth_dv, 'auth_sheets': auth_sheet, 'auth_bigquery': auth_bigquery, 'sheet': recipe_name, 'dataset': recipe_slug, 'command': command})", "docstring": "Allows bulk editing DV360 through Sheets and BigQuery.\n\nArgs:\nauth_dv (authentication) - Credentials used for dv.\nauth_sheet (authentication) - Credentials used for sheet.\nauth_bigquery (authentication) - Credentials used for bigquery.\nrecipe_name (string) - Name of Google Sheet to create.\nrecipe_slug (string) - Name of Google BigQuery dataset to create.\ncommand (choice) - Action to take.", "source": "github-repos"}
{"code": "def _deduplicate_indexed_slices(values, indices):\n    unique_indices, new_index_positions = array_ops.unique(indices)\n    summed_values = math_ops.unsorted_segment_sum(values, new_index_positions, array_ops.shape(unique_indices)[0])\n    return (summed_values, unique_indices)", "docstring": "Sums `values` associated with any non-unique `indices`.\n\nArgs:\nvalues: A `Tensor` with rank >= 1.\nindices: A one-dimensional integer `Tensor`, indexing into the first\ndimension of `values` (as in an IndexedSlices object).\n\nReturns:\nA tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a\nde-duplicated version of `indices` and `summed_values` contains the sum of\n`values` slices associated with each unique index.", "source": "github-repos"}
{"code": "def register_keras_serializable(package='Custom', name=None):\n\n    def decorator(arg):\n        \n        class_name = name if name is not None else arg.__name__\n        registered_name = package + '>' + class_name\n        if tf_inspect.isclass(arg) and (not hasattr(arg, 'get_config')):\n            raise ValueError('Cannot register a class that does not have a get_config() method.')\n        if registered_name in _GLOBAL_CUSTOM_OBJECTS:\n            raise ValueError('%s has already been registered to %s' % (registered_name, _GLOBAL_CUSTOM_OBJECTS[registered_name]))\n        if arg in _GLOBAL_CUSTOM_NAMES:\n            raise ValueError('%s has already been registered to %s' % (arg, _GLOBAL_CUSTOM_NAMES[arg]))\n        _GLOBAL_CUSTOM_OBJECTS[registered_name] = arg\n        _GLOBAL_CUSTOM_NAMES[arg] = registered_name\n        return arg\n    return decorator", "docstring": "Registers an object with the Keras serialization framework.\n\nThis decorator injects the decorated class or function into the Keras custom\nobject dictionary, so that it can be serialized and deserialized without\nneeding an entry in the user-provided custom object dict. It also injects a\nfunction that Keras will call to get the object's serializable string key.\n\nNote that to be serialized and deserialized, classes must implement the\n`get_config()` method. Functions do not have this requirement.\n\nThe object will be registered under the key 'package>name' where `name`,\ndefaults to the object name if not passed.\n\nArgs:\npackage: The package that this class belongs to.\nname: The name to serialize this class under in this package. If None, the\nclass' name will be used.\n\nReturns:\nA decorator that registers the decorated class with the passed names.", "source": "github-repos"}
{"code": "def set_config_variables(repo, variables):\n    with repo.config_writer() as writer:\n        for (k, value) in variables.items():\n            (section, option) = k.split('.')\n            writer.set_value(section, option, value)\n        writer.release()", "docstring": "Set config variables\n\nArgs:\nrepo (git.Repo): repo\nvariables (dict): entries of the form 'user.email': 'you@example.com'", "source": "codesearchnet"}
{"code": "def unreduce_array(array, shape, axis, keepdims):\n    if ((axis is not None) and ((not keepdims) or (keepdims is numpy._NoValue))):\n        if isinstance(axis, int):\n            axis = (axis,)\n        for ax in sorted(axis):\n            array = numpy.expand_dims(array, ax)\n    return numpy.broadcast_to(array, shape)", "docstring": "Reverse summing over a dimension, NumPy implementation.\n\nArgs:\narray: The array that was reduced.\nshape: The original shape of the array before reduction.\naxis: The axis or axes that were summed.\nkeepdims: Whether these axes were kept as singleton axes.\n\nReturns:\nAn array with axes broadcast to match the shape of the original array.", "source": "codesearchnet"}
{"code": "def parse_debug_node_name(node_name):\n    prefix = '__dbg_'\n    name = node_name\n    if not name.startswith(prefix):\n        raise ValueError(\"Invalid prefix in debug node name: '%s'\" % node_name)\n    name = name[len(prefix):]\n    if name.count('_') < 2:\n        raise ValueError(\"Invalid debug node name: '%s'\" % node_name)\n    debug_op = name[name.rindex('_') + 1:]\n    name = name[:name.rindex('_')]\n    debug_op_index = int(name[name.rindex('_') + 1:])\n    name = name[:name.rindex('_')]\n    if name.count(':') != 1:\n        raise ValueError(\"Invalid tensor name in debug node name: '%s'\" % node_name)\n    watched_node_name = name[:name.index(':')]\n    watched_output_slot = int(name[name.index(':') + 1:])\n    return (watched_node_name, watched_output_slot, debug_op_index, debug_op)", "docstring": "Parse the name of a debug node.\n\nArgs:\nnode_name: Name of the debug node.\n\nReturns:\n1. Name of the watched node, as a str.\n2. Output slot index of the watched tensor, as an int.\n3. Index of the debug node, as an int.\n4. Name of the debug op, as a str, e.g, \"DebugIdentity\".\n\nRaises:\nValueError: If the input node name is not a valid debug node name.", "source": "github-repos"}
{"code": "def _check_warnings(self, json_response):\n        \n\n        self.warnings = None\n        if json_response:\n            self.warnings = json_response.get('warnings')\n\n        if self.debug and self.warnings:\n            for w in self.warnings:\n                print(\"WARNING: %s - %s\" % (w['warning_name'], w['warning_msg']))", "docstring": "Extract warnings from the response to make them accessible\n\nArgs:\njson_response (dict): JSON response", "source": "juraj-google-style"}
{"code": "def __init__(self, input_energy: energy.BitstringEnergy, num_expectation_samples: int, initial_seed: Union[None, tf.Tensor]=None, name: Union[None, str]=None):\n    super().__init__(input_energy, initial_seed, name)\n    self.num_expectation_samples = num_expectation_samples", "docstring": "Initializes an EnergyInference.\n\nArgs:\ninput_energy: The parameterized energy function which defines this\ndistribution via the equations of an energy based model.  This class\nassumes that all parameters of `energy` are `tf.Variable`s and that\nthey are all returned by `energy.variables`.\nnum_expectation_samples: Number of samples to draw and use for estimating\nthe expectation value.\ninitial_seed: PRNG seed; see tfp.random.sanitize_seed for details. This\nseed will be used in the `sample` method.  If None, the seed is updated\nafter every inference call.  Otherwise, the seed is fixed.\nname: Optional name for the model.", "source": "github-repos"}
{"code": "def get_sequence_sliding_window_properties(self, scale, window, representatives_only=True):\n        \n        for g in tqdm(self.genes):\n            g.protein.get_sequence_sliding_window_properties(scale=scale, window=window,\n                                                             representative_only=representatives_only)", "docstring": "Run Biopython ProteinAnalysis and EMBOSS pepstats to summarize basic statistics of all protein sequences.\nResults are stored in the protein's respective SeqProp objects at ``.annotations``\n\nArgs:\nrepresentative_only (bool): If analysis should only be run on the representative sequences", "source": "juraj-google-style"}
{"code": "def _relocate_if_symbolic(self, key: Union[str, int], value: Any) -> Any:\n    if isinstance(value, Symbolic):\n        root_path = utils.KeyPath(key, self.sym_path)\n        if value.sym_parent is not None and (value.sym_parent is not self or root_path != value.sym_path):\n            value = value.clone()\n    if isinstance(value, TopologyAware):\n        value.sym_setpath(utils.KeyPath(key, self.sym_path))\n        value.sym_setparent(self._sym_parent_for_children())\n    return value", "docstring": "Relocate if a symbolic value is to be inserted as member.\n\nNOTE(daiyip): when a symbolic value is inserted into the object tree,\nif it already has a parent, we need to make a shallow copy of this object\nto avoid multiple parents. Otherwise we need to set its parent and root_path\naccording to current object.\n\nArgs:\nkey: Key used to insert the value.\nvalue: formal value to be inserted.\n\nReturns:\nFormalized value that is ready for insertion as members.", "source": "github-repos"}
{"code": "def key_for_namespace(cls, namespace):\n    \n    if namespace:\n      return model.Key(cls.KIND_NAME, namespace)\n    else:\n      return model.Key(cls.KIND_NAME, cls.EMPTY_NAMESPACE_ID)", "docstring": "Return the Key for a namespace.\n\nArgs:\nnamespace: A string giving the namespace whose key is requested.\n\nReturns:\nThe Key for the namespace.", "source": "juraj-google-style"}
{"code": "class Speech2TextProcessor(ProcessorMixin):\n    feature_extractor_class = 'Speech2TextFeatureExtractor'\n    tokenizer_class = 'Speech2TextTokenizer'\n\n    def __init__(self, feature_extractor, tokenizer):\n        super().__init__(feature_extractor, tokenizer)\n        self.current_processor = self.feature_extractor\n        self._in_target_context_manager = False\n\n    def __call__(self, *args, **kwargs):\n        \n        if self._in_target_context_manager:\n            return self.current_processor(*args, **kwargs)\n        if 'raw_speech' in kwargs:\n            warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')\n            audio = kwargs.pop('raw_speech')\n        else:\n            audio = kwargs.pop('audio', None)\n        sampling_rate = kwargs.pop('sampling_rate', None)\n        text = kwargs.pop('text', None)\n        if len(args) > 0:\n            audio = args[0]\n            args = args[1:]\n        if audio is None and text is None:\n            raise ValueError('You need to specify either an `audio` or `text` input to process.')\n        if audio is not None:\n            inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)\n        if text is not None:\n            encodings = self.tokenizer(text, **kwargs)\n        if text is None:\n            return inputs\n        elif audio is None:\n            return encodings\n        else:\n            inputs['labels'] = encodings['input_ids']\n            return inputs\n\n    def batch_decode(self, *args, **kwargs):\n        \n        return self.tokenizer.batch_decode(*args, **kwargs)\n\n    def decode(self, *args, **kwargs):\n        \n        return self.tokenizer.decode(*args, **kwargs)\n\n    @contextmanager\n    def as_target_processor(self):\n        \n        warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.')\n        self._in_target_context_manager = True\n        self.current_processor = self.tokenizer\n        yield\n        self.current_processor = self.feature_extractor\n        self._in_target_context_manager = False", "docstring": "Constructs a Speech2Text processor which wraps a Speech2Text feature extractor and a Speech2Text tokenizer into a\nsingle processor.\n\n[`Speech2TextProcessor`] offers all the functionalities of [`Speech2TextFeatureExtractor`] and\n[`Speech2TextTokenizer`]. See the [`~Speech2TextProcessor.__call__`] and [`~Speech2TextProcessor.decode`] for more\ninformation.\n\nArgs:\nfeature_extractor (`Speech2TextFeatureExtractor`):\nAn instance of [`Speech2TextFeatureExtractor`]. The feature extractor is a required input.\ntokenizer (`Speech2TextTokenizer`):\nAn instance of [`Speech2TextTokenizer`]. The tokenizer is a required input.", "source": "github-repos"}
{"code": "def _ProcessDirectory(self, mediator, file_entry):\n    \n    self.processing_status = definitions.STATUS_INDICATOR_COLLECTING\n\n    if self._processing_profiler:\n      self._processing_profiler.StartTiming('collecting')\n\n    for sub_file_entry in file_entry.sub_file_entries:\n      if self._abort:\n        break\n\n      try:\n        if not sub_file_entry.IsAllocated():\n          continue\n\n      except dfvfs_errors.BackEndError as exception:\n        warning_message = (\n            'unable to process directory entry: {0:s} with error: '\n            '{1!s}').format(sub_file_entry.name, exception)\n        mediator.ProduceExtractionWarning(\n            warning_message, path_spec=file_entry.path_spec)\n        continue\n\n      \n      \n      if sub_file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK:\n        if file_entry.IsRoot() and sub_file_entry.name == '$OrphanFiles':\n          continue\n\n      event_source = event_sources.FileEntryEventSource(\n          path_spec=sub_file_entry.path_spec)\n\n      \n      stat_object = sub_file_entry.GetStat()\n      if stat_object:\n        event_source.file_entry_type = stat_object.type\n\n      mediator.ProduceEventSource(event_source)\n\n      self.last_activity_timestamp = time.time()\n\n    if self._processing_profiler:\n      self._processing_profiler.StopTiming('collecting')\n\n    self.processing_status = definitions.STATUS_INDICATOR_RUNNING", "docstring": "Processes a directory file entry.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\nfile_entry (dfvfs.FileEntry): file entry of the directory.", "source": "juraj-google-style"}
{"code": "def _base_expansion_size(num, bases):\n    return (tf.floor((tf.math.log(num) / tf.math.log(bases))) + 1)", "docstring": "Computes the number of terms in the place value expansion.\n\nLet num = a0 + a1 b + a2 b^2 + ... ak b^k be the place value expansion of\n`num` in base b (ak <> 0). This function computes and returns `k+1` for each\nbase `b` specified in `bases`.\n\nThis can be inferred from the base `b` logarithm of `num` as follows:\n$$k = Floor(log_b (num)) + 1  = Floor( log(num) / log(b)) + 1$$\n\nArgs:\nnum: Scalar `Tensor` of dtype either `float32` or `float64`. The number to\ncompute the base expansion size of.\nbases: `Tensor` of the same dtype as num. The bases to compute the size\nagainst.\n\nReturns:\nTensor of same dtype and shape as `bases` containing the size of num when\nwritten in that base.", "source": "codesearchnet"}
{"code": "def __init__(self, replacements):\n    self.replacements = replacements\n    self.in_replacements = False\n    self.preserved_annos = {anno.Basic.DIRECTIVES, anno.Basic.EXTRA_LOOP_TEST, anno.Basic.ORIGIN, anno.Basic.SKIP_PROCESSING, anno.Static.ORIG_DEFINITIONS, 'function_context_name'}", "docstring": "Create a new ReplaceTransformer.\n\nArgs:\nreplacements: A mapping from placeholder names to (lists of) AST nodes\nthat these placeholders will be replaced by.", "source": "github-repos"}
{"code": "def _list_valid_filenames_in_directory(directory, white_list_formats, split, class_indices, follow_links):\n    dirname = os.path.basename(directory)\n    if split:\n        all_files = list(_iter_valid_files(directory, white_list_formats, follow_links))\n        num_files = len(all_files)\n        start, stop = (int(split[0] * num_files), int(split[1] * num_files))\n        valid_files = all_files[start:stop]\n    else:\n        valid_files = _iter_valid_files(directory, white_list_formats, follow_links)\n    classes = []\n    filenames = []\n    for root, fname in valid_files:\n        classes.append(class_indices[dirname])\n        absolute_path = os.path.join(root, fname)\n        relative_path = os.path.join(dirname, os.path.relpath(absolute_path, directory))\n        filenames.append(relative_path)\n    return (classes, filenames)", "docstring": "Lists paths of files in `subdir` with extensions in `white_list_formats`.\n\nArgs:\ndirectory: absolute path to a directory containing the files to list.\nThe directory name is used as class label\nand must be a key of `class_indices`.\nwhite_list_formats: set of strings containing allowed extensions for\nthe files to be counted.\nsplit: tuple of floats (e.g. `(0.2, 0.6)`) to only take into\naccount a certain fraction of files in each directory.\nE.g.: `segment=(0.6, 1.0)` would only account for last 40 percent\nof images in each directory.\nclass_indices: dictionary mapping a class name to its index.\nfollow_links: boolean, follow symbolic links to subdirectories.\n\nReturns:\nclasses: a list of class indices\nfilenames: the path of valid files in `directory`, relative from\n`directory`'s parent (e.g., if `directory` is \"dataset/class1\",\nthe filenames will be\n`[\"class1/file1.jpg\", \"class1/file2.jpg\", ...]`).", "source": "github-repos"}
{"code": "def lengths(self):\n    return np.array([math.sqrt(sum((row ** 2))) for row in self.matrix])", "docstring": "The cell lengths.\n\nArgs:\nNone\n\nReturns:\n(np.array(a,b,c)): The cell lengths.", "source": "codesearchnet"}
{"code": "def schema(self):\n    if (not self._schema):\n        try:\n            self._load_info()\n            self._schema = _schema.Schema(self._info['schema']['fields'])\n        except KeyError:\n            raise Exception('Unexpected table response: missing schema')\n    return self._schema", "docstring": "Retrieves the schema of the table.\n\nReturns:\nA Schema object containing a list of schema fields and associated metadata.\nRaises\nException if the request could not be executed or the response was malformed.", "source": "codesearchnet"}
{"code": "def _generate_enqueue_op(self, flat_inputs: List[internal_types.NativeObject], flat_weights: List[Optional[internal_types.NativeObject]], flat_features: List[tpu_embedding_v2_utils.FeatureConfig], device_ordinal: int, mode_override: Text) -> ops.Operation:\n    combiners = [table.combiner for table in self._table_config]\n    indices_or_row_splits = []\n    values = []\n    weights = []\n    int_zeros = array_ops.zeros((0,), dtype=dtypes.int32)\n    float_zeros = array_ops.zeros((0,), dtype=dtypes.float32)\n    for inp, weight, (path, feature) in zip(flat_inputs, flat_weights, flat_features):\n        if isinstance(inp, tensor_lib.Tensor):\n            self._add_data_for_tensor(inp, weight, indices_or_row_splits, values, weights, int_zeros, float_zeros, path)\n        elif isinstance(inp, sparse_tensor.SparseTensor):\n            self._add_data_for_sparse_tensor(inp, weight, indices_or_row_splits, values, weights, int_zeros, float_zeros, path, feature)\n        elif isinstance(inp, ragged_tensor.RaggedTensor):\n            self._add_data_for_ragged_tensor(inp, weight, indices_or_row_splits, values, weights, int_zeros, float_zeros, path, feature)\n        else:\n            raise ValueError('Input {} is of unknown type {}. Please only pass Tensor, SparseTensor or RaggedTensor as input to enqueue.'.format(path, type(inp)))\n    return tpu_ops.enqueue_tpu_embedding_arbitrary_tensor_batch(sample_indices_or_row_splits=indices_or_row_splits, embedding_indices=values, aggregation_weights=weights, mode_override=mode_override, device_ordinal=device_ordinal, combiners=combiners)", "docstring": "Outputs a the enqueue op given the inputs and weights.\n\nArgs:\nflat_inputs: A list of input tensors.\nflat_weights: A list of input weights (or None) of the same length as\nflat_inputs.\nflat_features: A list of FeatureConfigs of the same length as flat_inputs.\ndevice_ordinal: The device to create the enqueue op for.\nmode_override: A tensor containing the string \"train\" or \"inference\".\n\nReturns:\nThe enqueue op.", "source": "github-repos"}
{"code": "def import_object_from_string_code(code, object):\n    \n    sha256 = hashlib.sha256(code.encode('UTF-8')).hexdigest()\n    module = imp.new_module(sha256)\n    try:\n        exec_(code, module.__dict__)\n    except Exception as e:\n        raise exceptions.UserError('User code exception', exception_message=str(e))\n    sys.modules[sha256] = module\n    try:\n        return getattr(module, object)\n    except AttributeError:\n        raise exceptions.UserError(\"{} not found in code\".format(object))", "docstring": "Used to import an object from arbitrary passed code.\n\nPassed in code is treated as a module and is imported and added\nto `sys.modules` with its SHA256 hash as key.\n\nArgs:\ncode (string): Python code to import as module\n\nobject (string): Name of object to extract from imported module", "source": "juraj-google-style"}
{"code": "def _setup_parser(self, filename=None):\n        \n        assert isinstance(filename, str) or filename is None\n\n        \n        if not filename:\n            filename = MACKUP_CONFIG_FILE\n\n        parser = configparser.SafeConfigParser(allow_no_value=True)\n        parser.read(os.path.join(os.path.join(os.environ['HOME'], filename)))\n\n        return parser", "docstring": "Configure the ConfigParser instance the way we want it.\n\nArgs:\nfilename (str) or None\n\nReturns:\nSafeConfigParser", "source": "juraj-google-style"}
{"code": "def save(self, **fields):\n    for field in fields:\n        if (field in self.writable_fields):\n            setattr(self, field, fields[field])\n        else:\n            self._handle_wrong_field(field, ATTR_TYPE_WRITE)\n    if self._populated_fields:\n        self._update(**self._modified_fields)\n    else:\n        self._create(**self._modified_fields)", "docstring": "Save the instance to the remote Transifex server.\n\nIf it was pre-populated, it updates the instance on the server,\notherwise it creates a new object.\n\nAny values given in `fields` will be attempted to be saved\non the object. The same goes for any other values already set\nto the object by `model_instance.attr = value`.\n\nRaises:\nAttributeError: if a given field is not included in\n`self.writable_fields`,", "source": "codesearchnet"}
{"code": "def new_from_list(cls, content, fill_title=True, **kwargs):\n        \n        obj = cls(**kwargs)\n        obj.append_from_list(content, fill_title)\n        return obj", "docstring": "Populates the Table with a list of tuples of strings.\n\nArgs:\ncontent (list): list of tuples of strings. Each tuple is a row.\nfill_title (bool): if true, the first tuple in the list will\nbe set as title", "source": "juraj-google-style"}
{"code": "def get_element_spd_dos(self, el):\n        \n        el = get_el_sp(el)\n        el_dos = {}\n        for site, atom_dos in self.pdos.items():\n            if site.specie == el:\n                for orb, pdos in atom_dos.items():\n                    orbital_type = _get_orb_type_lobster(orb)\n                    if orbital_type not in el_dos:\n                        el_dos[orbital_type] = pdos\n                    else:\n                        el_dos[orbital_type] = \\\n                            add_densities(el_dos[orbital_type], pdos)\n\n        return {orb: Dos(self.efermi, self.energies, densities)\n                for orb, densities in el_dos.items()}", "docstring": "Get element and spd projected Dos\n\n\nArgs:\nel: Element in Structure.composition associated with LobsterCompleteDos\n\nReturns:\ndict of {Element: {\"S\": densities, \"P\": densities, \"D\": densities}}", "source": "juraj-google-style"}
{"code": "def dispatch_callback(self, items):\n        \n        if not self._manager.is_active:\n            return\n\n        batched_commands = collections.defaultdict(list)\n\n        for item in items:\n            batched_commands[item.__class__].append(item)\n\n        _LOGGER.debug(\"Handling %d batched requests\", len(items))\n\n        if batched_commands[requests.LeaseRequest]:\n            self.lease(batched_commands.pop(requests.LeaseRequest))\n        if batched_commands[requests.ModAckRequest]:\n            self.modify_ack_deadline(batched_commands.pop(requests.ModAckRequest))\n        \n        \n        if batched_commands[requests.AckRequest]:\n            self.ack(batched_commands.pop(requests.AckRequest))\n        if batched_commands[requests.NackRequest]:\n            self.nack(batched_commands.pop(requests.NackRequest))\n        if batched_commands[requests.DropRequest]:\n            self.drop(batched_commands.pop(requests.DropRequest))", "docstring": "Map the callback request to the appropriate gRPC request.\n\nArgs:\naction (str): The method to be invoked.\nkwargs (Dict[str, Any]): The keyword arguments for the method\nspecified by ``action``.\n\nRaises:\nValueError: If ``action`` isn't one of the expected actions\n\"ack\", \"drop\", \"lease\", \"modify_ack_deadline\" or \"nack\".", "source": "juraj-google-style"}
{"code": "def _compute_nfps_uniform(cum_counts, sizes):\n    \n    nfps = np.zeros((len(sizes), len(sizes)))\n    \n    \n    for l in range(len(sizes)):\n        for u in range(l, len(sizes)):\n            nfps[l, u] = _compute_nfp_uniform(l, u, cum_counts, sizes)\n    return nfps", "docstring": "Computes the matrix of expected false positives for all possible\nsub-intervals of the complete domain of set sizes, assuming uniform\ndistribution of set_sizes within each sub-intervals.\n\nArgs:\ncum_counts: the complete cummulative distribution of set sizes.\nsizes: the complete domain of set sizes.\n\nReturn (np.array): the 2-D array of expected number of false positives\nfor every pair of [l, u] interval, where l is axis-0 and u is\naxis-1.", "source": "juraj-google-style"}
{"code": "def loadFunction(self, root):\n    for element in root.iter():\n        if (element.tag == 'function'):\n            self.functionList.append(element.attrib['name'])", "docstring": "Loads a list with all the functions in the Fortran File\n\nArgs:\nroot: The root of the XML ast tree.\n\nReturns:\nNone\n\nDoes not return anything but populates a list (self.functionList) that\ncontains all the functions in the Fortran File.", "source": "codesearchnet"}
{"code": "def _GetApprovals(self, approval_type, offset, count, filter_func=None, token=None):\n    approvals_base_urn = aff4.ROOT_URN.Add('users').Add(token.username).Add('approvals').Add(approval_type)\n    all_children = aff4.FACTORY.RecursiveMultiListChildren([approvals_base_urn])\n    approvals_urns = []\n    for (subject, children) in all_children:\n        if children:\n            continue\n        approvals_urns.append(subject)\n    approvals_urns.sort(key=(lambda x: x.age), reverse=True)\n    approvals = list(aff4.FACTORY.MultiOpen(approvals_urns, mode='r', aff4_type=aff4_security.Approval, age=aff4.ALL_TIMES, token=token))\n    approvals_by_urn = {}\n    for approval in approvals:\n        approvals_by_urn[(approval.symlink_urn or approval.urn)] = approval\n    cur_offset = 0\n    sorted_approvals = []\n    for approval_urn in approvals_urns:\n        try:\n            approval = approvals_by_urn[approval_urn]\n        except KeyError:\n            continue\n        if ((filter_func is not None) and (not filter_func(approval))):\n            continue\n        cur_offset += 1\n        if (cur_offset <= offset):\n            continue\n        if (count and (len(sorted_approvals) >= count)):\n            break\n        sorted_approvals.append(approval)\n    subjects_urns = [a.Get(a.Schema.SUBJECT) for a in approvals]\n    subjects_by_urn = {}\n    for subject in aff4.FACTORY.MultiOpen(subjects_urns, mode='r', token=token):\n        subjects_by_urn[subject.urn] = subject\n    return (sorted_approvals, subjects_by_urn)", "docstring": "Gets all approvals for a given user and approval type.\n\nArgs:\napproval_type: The type of approvals to get.\noffset: The starting index within the collection.\ncount: The number of items to return.\nfilter_func: A predicate function, returning True if a specific approval\nshould be included in the result and False otherwise.\ntoken: The token identifying the user.\n\nReturns:\nA list of approvals of the given approval type.", "source": "codesearchnet"}
{"code": "def create(self, name, targetUrl, resource, event, filter=None, secret=None, **request_parameters):\n    check_type(name, basestring, may_be_none=False)\n    check_type(targetUrl, basestring, may_be_none=False)\n    check_type(resource, basestring, may_be_none=False)\n    check_type(event, basestring, may_be_none=False)\n    check_type(filter, basestring)\n    check_type(secret, basestring)\n    post_data = dict_from_items_with_values(request_parameters, name=name, targetUrl=targetUrl, resource=resource, event=event, filter=filter, secret=secret)\n    json_data = self._session.post(API_ENDPOINT, json=post_data)\n    return self._object_factory(OBJECT_TYPE, json_data)", "docstring": "Create a webhook.\n\nArgs:\nname(basestring): A user-friendly name for this webhook.\ntargetUrl(basestring): The URL that receives POST requests for\neach event.\nresource(basestring): The resource type for the webhook.\nevent(basestring): The event type for the webhook.\nfilter(basestring): The filter that defines the webhook scope.\nsecret(basestring): The secret used to generate payload signature.\n**request_parameters: Additional request parameters (provides\nsupport for parameters that may be added in the future).\n\nReturns:\nWebhook: A Webhook object with the details of the created webhook.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"}
{"code": "def _split_cell(cell, module):\n  \n  lines = cell.split('\\n')\n  code = None\n  last_def = -1\n  name = None\n  define_wild_re = re.compile('^DEFINE\\s+.*$', re.IGNORECASE)\n  define_re = re.compile('^DEFINE\\s+QUERY\\s+([A-Z]\\w*)\\s*?(.*)$', re.IGNORECASE)\n  select_re = re.compile('^SELECT\\s*.*$', re.IGNORECASE)\n  standard_sql_re = re.compile('^(CREATE|WITH|INSERT|DELETE|UPDATE)\\s*.*$', re.IGNORECASE)\n  \n  \n  \n  for i, line in enumerate(lines):\n    define_match = define_re.match(line)\n    select_match = select_re.match(line)\n    standard_sql_match = standard_sql_re.match(line)\n\n    if i:\n      prior_content = ''.join(lines[:i]).strip()\n      if select_match:\n        \n        \n        select_match = len(prior_content) == 0 or \\\n            (prior_content[-1] != '(' and not standard_sql_re.match(prior_content))\n      if standard_sql_match:\n        standard_sql_match = len(prior_content) == 0 or not standard_sql_re.match(prior_content)\n\n    if define_match or select_match or standard_sql_match:\n      \n      if code is None:\n        code = ('\\n'.join(lines[:i])).strip()\n        if len(code):\n          code += '\\n'\n      elif last_def >= 0:\n\n        \n        query = '\\n'.join([line for line in lines[last_def:i] if len(line)]).strip()\n        if select_match and name != datalab.data._utils._SQL_MODULE_MAIN and len(query) == 0:\n          \n          continue\n\n        \n        statement = datalab.data.SqlStatement(query, module)\n        module.__dict__[name] = statement\n        \n        module.__dict__[datalab.data._utils._SQL_MODULE_LAST] = statement\n\n      \n      if define_match:\n        name = define_match.group(1)\n        lines[i] = define_match.group(2)\n      else:\n        name = datalab.data._utils._SQL_MODULE_MAIN\n\n      \n      last_def = i\n    else:\n      define_wild_match = define_wild_re.match(line)\n      if define_wild_match:\n        raise Exception('Expected \"DEFINE QUERY <name>\"')\n\n  if last_def >= 0:\n    \n    query = '\\n'.join([line for line in lines[last_def:] if len(line)]).strip()\n    statement = datalab.data.SqlStatement(query, module)\n    module.__dict__[name] = statement\n    module.__dict__[datalab.data._utils._SQL_MODULE_LAST] = statement\n\n  if code is None:\n    code = ''\n  module.__dict__[datalab.data._utils._SQL_MODULE_ARGPARSE] = _arguments(code, module)\n  return module.__dict__.get(datalab.data._utils._SQL_MODULE_LAST, None)", "docstring": "Split a hybrid %%sql cell into the Python code and the queries.\n\nPopulates a module with the queries.\n\nArgs:\ncell: the contents of the %%sql cell.\nmodule: the module that the contents will populate.\n\nReturns:\nThe default (last) query for the module.", "source": "juraj-google-style"}
{"code": "def __init__(self, directory, container, entry_point, use_gpu):\n    \n    self.name = os.path.basename(directory)\n    self.directory = directory\n    self.container = container\n    self.entry_point = entry_point\n    self.use_gpu = use_gpu", "docstring": "Initializes instance of Submission class.\n\nArgs:\ndirectory: location of the submission.\ncontainer: URL of Docker container which should be used to run submission.\nentry_point: entry point script, which invokes submission.\nuse_gpu: whether to use Docker with GPU or not.", "source": "juraj-google-style"}
{"code": "def __init__(self, name, transition_min=258.15, transition_max=298.15,\n                 transition_gamma=3.0, **kwargs):\n        \n        self.transition_min = transition_min\n        self.transition_max = transition_max\n        self.transition_gamma = transition_gamma\n        super(CloudCompositor, self).__init__(name, **kwargs)", "docstring": "Collect custom configuration values.\n\nArgs:\ntransition_min (float): Values below or equal to this are\nclouds -> opaque white\ntransition_max (float): Values above this are\ncloud free -> transparent\ntransition_gamma (float): Gamma correction to apply at the end", "source": "juraj-google-style"}
{"code": "def consolidate(self, args):\n        \n        result = dict(args)\n\n        for opt in self:\n            if opt.name in result:\n                result[opt.name] = opt.convert(result[opt.name])\n            else:\n                if opt.default is not None:\n                    result[opt.name] = opt.convert(opt.default)\n\n        return result", "docstring": "Consolidate the provided arguments.\n\nIf the provided arguments have matching options, this performs a type conversion.\nFor any option that has a default value and is not present in the provided\narguments, the default value is added.\n\nArgs:\nargs (dict): A dictionary of the provided arguments.\n\nReturns:\ndict: A dictionary with the type converted and with default options enriched\narguments.", "source": "juraj-google-style"}
{"code": "def process_api_config_response(self, config_json):\n    with self._config_lock:\n        self._add_discovery_config()\n        for config in config_json.get('items', []):\n            lookup_key = (config.get('name', ''), config.get('version', ''))\n            self._configs[lookup_key] = config\n        for config in self._configs.itervalues():\n            name = config.get('name', '')\n            api_version = config.get('api_version', '')\n            path_version = config.get('path_version', '')\n            sorted_methods = self._get_sorted_methods(config.get('methods', {}))\n            for (method_name, method) in sorted_methods:\n                self._save_rest_method(method_name, name, path_version, method)", "docstring": "Parses a JSON API config and registers methods for dispatch.\n\nSide effects:\nParses method name, etc. for all methods and updates the indexing\ndata structures with the information.\n\nArgs:\nconfig_json: A dict, the JSON body of the getApiConfigs response.", "source": "codesearchnet"}
{"code": "def create_datastore(self, schema=None, primary_key=None, delete_first=0, path=None):\n    if (delete_first == 0):\n        pass\n    elif (delete_first == 1):\n        self.delete_datastore()\n    elif (delete_first == 2):\n        if (primary_key is None):\n            self.delete_datastore()\n    else:\n        raise HDXError('delete_first must be 0, 1 or 2! (0 = No, 1 = Yes, 2 = Delete if no primary key)')\n    if (path is None):\n        (url, path) = self.download()\n        delete_after_download = True\n    else:\n        url = path\n        delete_after_download = False\n\n    def convert_to_text(extended_rows):\n        for (number, headers, row) in extended_rows:\n            for (i, val) in enumerate(row):\n                row[i] = str(val)\n            (yield (number, headers, row))\n    with Download(full_agent=self.configuration.get_user_agent()) as downloader:\n        try:\n            stream = downloader.get_tabular_stream(path, headers=1, post_parse=[convert_to_text], bytes_sample_size=1000000)\n            nonefieldname = False\n            if (schema is None):\n                schema = list()\n                for fieldname in stream.headers:\n                    if (fieldname is not None):\n                        schema.append({'id': fieldname, 'type': 'text'})\n                    else:\n                        nonefieldname = True\n            data = {'resource_id': self.data['id'], 'force': True, 'fields': schema, 'primary_key': primary_key}\n            self._write_to_hdx('datastore_create', data, 'resource_id')\n            if (primary_key is None):\n                method = 'insert'\n            else:\n                method = 'upsert'\n            logger.debug(('Uploading data from %s to datastore' % url))\n            offset = 0\n            chunksize = 100\n            rowset = stream.read(keyed=True, limit=chunksize)\n            while (len(rowset) != 0):\n                if nonefieldname:\n                    for row in rowset:\n                        del row[None]\n                data = {'resource_id': self.data['id'], 'force': True, 'method': method, 'records': rowset}\n                self._write_to_hdx('datastore_upsert', data, 'resource_id')\n                rowset = stream.read(keyed=True, limit=chunksize)\n                logger.debug(('Uploading: %s' % offset))\n                offset += chunksize\n        except Exception as e:\n            raisefrom(HDXError, ('Upload to datastore of %s failed!' % url), e)\n        finally:\n            if delete_after_download:\n                remove(path)", "docstring": "For tabular data, create a resource in the HDX datastore which enables data preview in HDX. If no schema is provided\nall fields are assumed to be text. If path is not supplied, the file is first downloaded from HDX.\n\nArgs:\nschema (List[Dict]): List of fields and types of form {'id': 'FIELD', 'type': 'TYPE'}. Defaults to None.\nprimary_key (Optional[str]): Primary key of schema. Defaults to None.\ndelete_first (int): Delete datastore before creation. 0 = No, 1 = Yes, 2 = If no primary key. Defaults to 0.\npath (Optional[str]): Local path to file that was uploaded. Defaults to None.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def jump_if(state, op, ctx, *, jump_if_val, pop=PopBehavior.NONE):\n    if pop is PopBehavior.ALWAYS:\n        state, value = state.pop()\n    else:\n        value = state.top()\n    if jump_if_val is None:\n        normal_val = frame_state.NOT_NONE\n    elif jump_if_val is frame_state.NOT_NONE:\n        normal_val = None\n    elif isinstance(jump_if_val, bool):\n        normal_val = not jump_if_val\n    else:\n        raise NotImplementedError(f'Unsupported jump value: {jump_if_val!r}')\n    jump = frame_state.restrict_condition(state.node, value, jump_if_val)\n    normal = frame_state.restrict_condition(state.node, value, normal_val)\n    if jump is not frame_state.UNSATISFIABLE:\n        if jump:\n            assert jump.binding\n            else_state = state.forward_cfg_node('Jump', jump.binding).forward_cfg_node('Jump')\n        else:\n            else_state = state.forward_cfg_node('Jump')\n        ctx.vm.store_jump(op.target, else_state)\n    else:\n        else_state = None\n    if pop is PopBehavior.OR:\n        state = state.pop_and_discard()\n    if normal is frame_state.UNSATISFIABLE:\n        return state.set_why('unsatisfiable')\n    elif not else_state and (not normal):\n        return state\n    else:\n        return state.forward_cfg_node('NoJump', normal.binding if normal else None)", "docstring": "Implementation of various _JUMP_IF bytecodes.\n\nArgs:\nstate: Initial FrameState.\nop: An opcode.\nctx: The current context.\njump_if_val: Indicates what value leads to a jump. The non-jump state is\nreached by the value's negation. Use frame_state.NOT_NONE for `not None`.\npop: Whether and how the opcode pops a value off the stack.\n\nReturns:\nThe new FrameState.", "source": "github-repos"}
{"code": "def WriteEventBody(self, event):\n    \n    output_string = NativePythonFormatterHelper.GetFormattedEventObject(event)\n    self._output_writer.Write(output_string)", "docstring": "Writes the body of an event to the output.\n\nArgs:\nevent (EventObject): event.", "source": "juraj-google-style"}
{"code": "def _initialize_operations(self):\n    if isinstance(self._graph, tf.Graph):\n        return self._graph.get_operations()\n    elif isinstance(self._graph, mtf.Graph):\n        return self._graph.operations\n    else:\n        raise TypeError('Graph is not tf.Graph or mtf.Graph: {}'.format(type(self._graph)))", "docstring": "Initializer for _operations.\n\nRaises:\nTypeError: _graph is not a tf.Graph or mtf.Graph.\n\nReturns:\na list of (tf.Operation or mtf.Operation)", "source": "codesearchnet"}
{"code": "def UpdateTaskAsProcessingByIdentifier(self, task_identifier):\n    \n    with self._lock:\n      task_processing = self._tasks_processing.get(task_identifier, None)\n      if task_processing:\n        task_processing.UpdateProcessingTime()\n        self._UpdateLatestProcessingTime(task_processing)\n        return\n\n      task_queued = self._tasks_queued.get(task_identifier, None)\n      if task_queued:\n        logger.debug('Task {0:s} was queued, now processing.'.format(\n            task_identifier))\n        self._tasks_processing[task_identifier] = task_queued\n        del self._tasks_queued[task_identifier]\n\n        task_queued.UpdateProcessingTime()\n        self._UpdateLatestProcessingTime(task_queued)\n        return\n\n      task_abandoned = self._tasks_abandoned.get(task_identifier, None)\n      if task_abandoned:\n        del self._tasks_abandoned[task_identifier]\n        self._tasks_processing[task_identifier] = task_abandoned\n        logger.debug('Task {0:s} was abandoned, but now processing.'.format(\n            task_identifier))\n\n        task_abandoned.UpdateProcessingTime()\n        self._UpdateLatestProcessingTime(task_abandoned)\n        return\n\n      if task_identifier in self._tasks_pending_merge:\n        \n        \n        return\n\n    \n    raise KeyError('Status of task {0:s} is unknown.'.format(task_identifier))", "docstring": "Updates the task manager to reflect the task is processing.\n\nArgs:\ntask_identifier (str): unique identifier of the task.\n\nRaises:\nKeyError: if the task is not known to the task manager.", "source": "juraj-google-style"}
{"code": "def get_run_key(feed_dict, fetches):\n    return json.dumps(RunKey(get_flattened_names(feed_dict), get_flattened_names(fetches)))", "docstring": "Summarize the names of feeds and fetches as a RunKey JSON string.\n\nArgs:\nfeed_dict: The feed_dict given to the `Session.run()` call.\nfetches: The fetches from the `Session.run()` call.\n\nReturns:\nA JSON Array consisting of two items. They first items is a flattened\nArray of the names of the feeds. The second item is a flattened Array of\nthe names of the fetches.", "source": "github-repos"}
{"code": "def operations_happening_at_same_time_as(self, scheduled_operation: ScheduledOperation) -> List[ScheduledOperation]:\n    overlaps = self.query(time=scheduled_operation.time, duration=scheduled_operation.duration)\n    return [e for e in overlaps if (e != scheduled_operation)]", "docstring": "Finds operations happening at the same time as the given operation.\n\nArgs:\nscheduled_operation: The operation specifying the time to query.\n\nReturns:\nScheduled operations that overlap with the given operation.", "source": "codesearchnet"}
{"code": "def has_axon(neuron, treefun=_read_neurite_type):\n    \n    return CheckResult(NeuriteType.axon in (treefun(n) for n in neuron.neurites))", "docstring": "Check if a neuron has an axon\n\nArguments:\nneuron(Neuron): The neuron object to test\ntreefun: Optional function to calculate the tree type of\nneuron's neurites\n\nReturns:\nCheckResult with result", "source": "juraj-google-style"}
{"code": "def get_block(self, block_id):\n    block = backend.query.get_block(self.connection, block_id)\n    latest_block = self.get_latest_block()\n    latest_block_height = (latest_block['height'] if latest_block else 0)\n    if ((not block) and (block_id > latest_block_height)):\n        return\n    result = {'height': block_id, 'transactions': []}\n    if block:\n        transactions = backend.query.get_transactions(self.connection, block['transactions'])\n        result['transactions'] = [t.to_dict() for t in Transaction.from_db(self, transactions)]\n    return result", "docstring": "Get the block with the specified `block_id`.\n\nReturns the block corresponding to `block_id` or None if no match is\nfound.\n\nArgs:\nblock_id (int): block id of the block to get.", "source": "codesearchnet"}
{"code": "def pad(self, image: np.array, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):\n    height, width = get_image_size(image)\n    size = max(height, width)\n    image = pad(image=image, padding=((0, size - height), (0, size - width)), constant_values=0.5, data_format=data_format, input_data_format=input_data_format)\n    return image", "docstring": "Pad an image to a square with gray pixels on the bottom and the right, as per the original OWLv2\nimplementation.\n\nArgs:\nimage (`np.ndarray`):\nImage to pad.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred from the input\nimage.", "source": "github-repos"}
{"code": "def get_charge_transfer(self, atom_index):\n    if (self.potcar is None):\n        raise ValueError('POTCAR must be supplied in order to calculate charge transfer!')\n    potcar_indices = []\n    for (i, v) in enumerate(self.natoms):\n        potcar_indices += ([i] * v)\n    nelect = self.potcar[potcar_indices[atom_index]].nelectrons\n    return (self.data[atom_index]['charge'] - nelect)", "docstring": "Returns the charge transferred for a particular atom. Requires POTCAR\nto be supplied.\n\nArgs:\natom_index:\nIndex of atom.\n\nReturns:\nCharge transfer associated with atom from the Bader analysis.\nGiven by final charge on atom - nelectrons in POTCAR for\nassociated atom.", "source": "codesearchnet"}
{"code": "def visualize_reconstruction(inputs, reconstruct, num=3, name=\"reconstruction\"):\n  \n  reconstruct = tf.clip_by_value(reconstruct, 0., 1.)\n  inputs_and_reconstruct = tf.concat((inputs[:num], reconstruct[:num]), axis=0)\n  image_summary(inputs_and_reconstruct, name)", "docstring": "Visualizes the reconstruction of inputs in TensorBoard.\n\nArgs:\ninputs: A tensor of the original inputs, of shape [batch, timesteps,\nh, w, c].\nreconstruct: A tensor of a reconstruction of inputs, of shape\n[batch, timesteps, h, w, c].\nnum: Integer for the number of examples to visualize.\nname: String name of this summary.", "source": "juraj-google-style"}
{"code": "def convert_per_replica_to_dtensor(per_replica_value, mesh):\n    values = per_replica_value.values\n    if isinstance(values[0], (float, int)):\n        rank = 0\n    else:\n        rank = len(values[0].shape)\n    if rank == 0:\n        result = []\n        for v in values:\n            result.append(array_ops.expand_dims_v2(v, axis=0))\n        rank += 1\n    else:\n        result = list(values)\n    batch_layout = layout.Layout.batch_sharded(mesh, batch_dim=DEFAULT_BATCH_MESH_DIM_NAME, rank=rank)\n    return d_api.pack(result, batch_layout)", "docstring": "Convert a PerReplica result to a DTensor instance.\n\nArgs:\nper_replica_value: A PerReplica instance whose value will be converted\nto DTensor.\nmesh: The mesh used for layout creation.\n\nReturns:\nA DTensor instance that packed from per_replica_value with batch sharded\nlayout.", "source": "github-repos"}
{"code": "def switch_types(self):\n    if (not self.__switch_types):\n        self.__switch_types = SwitchTypes(self.__connection)\n    return self.__switch_types", "docstring": "Gets the SwitchTypes API client.\n\nReturns:\nSwitchTypes:", "source": "codesearchnet"}
{"code": "def __init__(self, storage_writer, path):\n    \n    super(SQLiteStorageMergeReader, self).__init__(storage_writer)\n    self._active_container_type = None\n    self._active_cursor = None\n    self._add_active_container_method = None\n    self._add_container_type_methods = {}\n    self._compression_format = definitions.COMPRESSION_FORMAT_NONE\n    self._connection = None\n    self._container_types = None\n    self._cursor = None\n    self._event_data_identifier_mappings = {}\n    self._path = path\n\n    \n    \n    \n    \n    for container_type, method_name in self._ADD_CONTAINER_TYPE_METHODS.items():\n      method = getattr(self, method_name, None)\n      if not method:\n        raise RuntimeError(\n            'Add method missing for container type: {0:s}'.format(\n                container_type))\n\n      self._add_container_type_methods[container_type] = method", "docstring": "Initializes a storage merge reader.\n\nArgs:\nstorage_writer (StorageWriter): storage writer.\npath (str): path to the input file.\n\nRaises:\nIOError: if the input file cannot be opened.\nRuntimeError: if an add container method is missing.", "source": "juraj-google-style"}
{"code": "def parse_lxml(self, file, encoding=None, target_class=HTMLParserTarget,\n                   parser_type='html'):\n        \n        if encoding:\n            lxml_encoding = to_lxml_encoding(encoding) or 'latin1'\n        else:\n            lxml_encoding = encoding\n\n        elements = []\n\n        callback_func = elements.append\n\n        target = target_class(callback_func)\n\n        if parser_type == 'html':\n            parser = lxml.html.HTMLParser(\n                encoding=lxml_encoding, target=target\n            )\n        elif parser_type == 'xhtml':\n            parser = lxml.html.XHTMLParser(\n                encoding=lxml_encoding, target=target, recover=True\n            )\n        else:\n            parser = lxml.etree.XMLParser(\n                encoding=lxml_encoding, target=target, recover=True\n            )\n\n        if parser_type == 'html':\n            \n            \n            \n            for dummy in range(3):\n                parser.feed('<html>'.encode(encoding))\n\n        while True:\n            data = file.read(self.BUFFER_SIZE)\n\n            if not data:\n                break\n\n            parser.feed(data)\n\n            for element in elements:\n                yield element\n\n            del elements[:]\n\n        parser.close()\n\n        for element in elements:\n            yield element", "docstring": "Return an iterator of elements found in the document.\n\nArgs:\nfile: A file object containing the document.\nencoding (str): The encoding of the document.\ntarget_class: A class to be used for target parsing.\nparser_type (str): The type of parser to use. Accepted values:\n``html``, ``xhtml``, ``xml``.\n\nReturns:\niterator: Each item is an element from\n:mod:`.document.htmlparse.element`", "source": "juraj-google-style"}
{"code": "def Run(self, conf, args):\n    raise NotImplementedError('command %r not implemented' % self.__class__.__name__)", "docstring": "Run this command.\n\nCommands are invoked with a global configuration object and a list\nof arguments.\n\nArgs:\nconf: A Config object defining global configuration of\nnss_cache.\nargs: A list of strings of commandline arguments.\nReturns:\n0 if the command was successful\nnon-zero shell error code if not.", "source": "github-repos"}
{"code": "def _LinearMapByteStream(\n      self, byte_stream, byte_offset=0, context=None, **unused_kwargs):\n    \n    elements_data_size = self._data_type_definition.GetByteSize()\n    self._CheckByteStreamSize(byte_stream, byte_offset, elements_data_size)\n\n    try:\n      struct_tuple = self._operation.ReadFrom(byte_stream[byte_offset:])\n      mapped_values = map(self._element_data_type_map.MapValue, struct_tuple)\n\n    except Exception as exception:\n      error_string = (\n          'Unable to read: {0:s} from byte stream at offset: {1:d} '\n          'with error: {2!s}').format(\n              self._data_type_definition.name, byte_offset, exception)\n      raise errors.MappingError(error_string)\n\n    if context:\n      context.byte_size = elements_data_size\n\n    return tuple(mapped_values)", "docstring": "Maps a data type sequence on a byte stream.\n\nArgs:\nbyte_stream (bytes): byte stream.\nbyte_offset (Optional[int]): offset into the byte stream where to start.\ncontext (Optional[DataTypeMapContext]): data type map context.\n\nReturns:\ntuple[object, ...]: mapped values.\n\nRaises:\nMappingError: if the data type definition cannot be mapped on\nthe byte stream.", "source": "juraj-google-style"}
{"code": "def part_studio_stl(self, did, wid, eid):\n        \n\n        req_headers = {\n            'Accept': 'application/vnd.onshape.v1+octet-stream'\n        }\n        return self._api.request('get', '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/stl', headers=req_headers)", "docstring": "Exports STL export from a part studio\n\nArgs:\n- did (str): Document ID\n- wid (str): Workspace ID\n- eid (str): Element ID\n\nReturns:\n- requests.Response: Onshape response data", "source": "juraj-google-style"}
{"code": "def MultiDestroyFlowStates(self, session_ids, request_limit=None):\n    subjects = [session_id.Add('state') for session_id in session_ids]\n    to_delete = []\n    deleted_requests = []\n    for (subject, values) in self.MultiResolvePrefix(subjects, self.FLOW_REQUEST_PREFIX, limit=request_limit):\n        for (_, serialized, _) in values:\n            request = rdf_flow_runner.RequestState.FromSerializedString(serialized)\n            deleted_requests.append(request)\n            response_subject = self.GetFlowResponseSubject(request.session_id, request.id)\n            to_delete.append(response_subject)\n        to_delete.append(subject)\n    self.DeleteSubjects(to_delete, sync=True)\n    return deleted_requests", "docstring": "Deletes all requests and responses for the given flows.\n\nArgs:\nsession_ids: A lists of flows to destroy.\nrequest_limit: A limit on the number of requests to delete.\n\nReturns:\nA list of requests that were deleted.", "source": "codesearchnet"}
{"code": "def copy_graph(subject, existing_graph):\n    \n    new_graph = rdflib.Graph()\n    for predicate, object_ in existing_graph.predicate_objects():\n        new_graph.add((subject, predicate, object_))\n    return new_graph", "docstring": "Function takes a subject and an existing graph, returns a new graph with\nall predicate and objects of the existing graph copied to the new_graph with\nsubject as the new subject\n\nArgs:\nsubject(rdflib.URIRef): A URIRef subject\nexisting_graph(rdflib.Graph): A rdflib.Graph\n\nReturns:\nrdflib.Graph", "source": "juraj-google-style"}
{"code": "def __init__(self, tensors):\n    if not isinstance(tensors, (list, tuple)) or not tensors:\n        raise ValueError('Unable to create a ShardedNdArray without a list of tensors.')\n    self.tensors = tensors\n    self.n_devices = len(tensors)", "docstring": "Initializes the ShardedNdArray.\n\nNote that the tensors should be ordered in the way the pmap producing these\ntensors is run.\n\nArgs:\ntensors: list or tuple of eager tensors, one for each device.", "source": "github-repos"}
{"code": "def _histogram_equalization_helper(valid_data, number_of_bins, clip_limit=None, slope_limit=None):\n    (temp_histogram, temp_bins) = np.histogram(valid_data, number_of_bins)\n    if (clip_limit is not None):\n        pixels_to_clip_at = int((clip_limit * (valid_data.size / float(number_of_bins))))\n        mask_to_clip = (temp_histogram > clip_limit)\n        temp_histogram[mask_to_clip] = pixels_to_clip_at\n    cumulative_dist_function = temp_histogram.cumsum()\n    if (slope_limit is not None):\n        pixel_height_limit = int((slope_limit * (valid_data.size / float(number_of_bins))))\n        cumulative_excess_height = 0\n        num_clipped_pixels = 0\n        weight_metric = np.zeros(cumulative_dist_function.shape, dtype=float)\n        for pixel_index in range(1, cumulative_dist_function.size):\n            current_pixel_count = cumulative_dist_function[pixel_index]\n            diff_from_acceptable = (((current_pixel_count - cumulative_dist_function[(pixel_index - 1)]) - pixel_height_limit) - cumulative_excess_height)\n            if (diff_from_acceptable < 0):\n                weight_metric[pixel_index] = abs(diff_from_acceptable)\n            cumulative_excess_height += max(diff_from_acceptable, 0)\n            cumulative_dist_function[pixel_index] = (current_pixel_count - cumulative_excess_height)\n            num_clipped_pixels = (num_clipped_pixels + cumulative_excess_height)\n    cumulative_dist_function = (((number_of_bins - 1) * cumulative_dist_function) / cumulative_dist_function[(- 1)])\n    return (cumulative_dist_function, temp_bins)", "docstring": "Calculate the simplest possible histogram equalization, using only valid data.\n\nReturns:\ncumulative distribution function and bin information", "source": "codesearchnet"}
{"code": "def get_resource(self, uri: str) -> Optional[message.Message]:\n    for collection in (self.structure_definitions, self.search_parameters, self.code_systems, self.value_sets):\n        resource = collection.get(uri)\n        if resource is not None:\n            return resource\n    return None", "docstring": "Retrieves a protocol buffer representation of the given resource.\n\nArgs:\nuri: The URI of the resource to retrieve.\n\nReturns:\nProtocol buffer for the resource or `None` if the `uri` can not be found.", "source": "github-repos"}
{"code": "def from_structure(cls, structure, ff_elements=None, atom_style='charge'):\n    s = structure.get_sorted_structure()\n    (box, symmop) = lattice_2_lmpbox(s.lattice)\n    coords = symmop.operate_multi(s.cart_coords)\n    site_properties = s.site_properties\n    if ('velocities' in site_properties):\n        velos = np.array(s.site_properties['velocities'])\n        rot = SymmOp.from_rotation_and_translation(symmop.rotation_matrix)\n        rot_velos = rot.operate_multi(velos)\n        site_properties.update({'velocities': rot_velos})\n    boxed_s = Structure(box.to_lattice(), s.species, coords, site_properties=site_properties, coords_are_cartesian=True)\n    symbols = list(s.symbol_set)\n    if ff_elements:\n        symbols.extend(ff_elements)\n    elements = sorted((Element(el) for el in set(symbols)))\n    mass_info = [tuple(([i.symbol] * 2)) for i in elements]\n    ff = ForceField(mass_info)\n    topo = Topology(boxed_s)\n    return cls.from_ff_and_topologies(box=box, ff=ff, topologies=[topo], atom_style=atom_style)", "docstring": "Simple constructor building LammpsData from a structure without\nforce field parameters and topologies.\n\nArgs:\nstructure (Structure): Input structure.\nff_elements ([str]): List of strings of elements that must\nbe present due to force field settings but not\nnecessarily in the structure. Default to None.\natom_style (str): Choose between \"atomic\" (neutral) and\n\"charge\" (charged). Default to \"charge\".", "source": "codesearchnet"}
{"code": "def deep_del(data, fn):\n    result = {}\n    for (k, v) in data.iteritems():\n        if (not fn(v)):\n            if isinstance(v, dict):\n                result[k] = deep_del(v, fn)\n            else:\n                result[k] = v\n    return result", "docstring": "Create dict copy with removed items.\n\nRecursively remove items where fn(value) is True.\n\nReturns:\ndict: New dict with matching items removed.", "source": "codesearchnet"}
{"code": "def get_lang(tweet):\n    if is_original_format(tweet):\n        lang_field = 'lang'\n    else:\n        lang_field = 'twitter_lang'\n    if ((tweet[lang_field] is not None) and (tweet[lang_field] != 'und')):\n        return tweet[lang_field]\n    else:\n        return None", "docstring": "Get the language that the Tweet is written in.\n\nArgs:\ntweet (Tweet or dict): A Tweet object or dictionary\n\nReturns:\nstr: 2-letter BCP 47 language code (or None if undefined)\n\nExample:\n>>> from tweet_parser.getter_methods.tweet_text import get_lang\n>>> original = {\"created_at\": \"Wed May 24 20:17:19 +0000 2017\",\n...             \"lang\": \"en\"}\n>>> get_lang(original)\n'en'\n\n>>> activity = {\"postedTime\": \"2017-05-24T20:17:19.000Z\",\n...             \"twitter_lang\": \"en\"}\n>>> get_lang(activity)\n'en'", "source": "codesearchnet"}
{"code": "def add(self, distinguished_name, object_class, attributes):\n    self.conn.add(distinguished_name, object_class, attributes)", "docstring": "Add object to LDAP.\n\nArgs:\ndistinguished_name: the DN of the LDAP record to be added\nobject_class: The objectClass of the record to be added.\nThis is a list of length >= 1.\nattributes: a dictionary of LDAP attributes to add\nSee ldap_tools.api.group.API#__ldap_attr", "source": "codesearchnet"}
{"code": "def f(x, depth1, depth2, dim='2d', first_batch_norm=True, stride=1, training=True, bottleneck=True, padding='SAME'):\n    conv = CONFIG[dim]['conv']\n    with tf.variable_scope('f', reuse=tf.AUTO_REUSE):\n        if first_batch_norm:\n            net = tf.layers.batch_normalization(x, training=training)\n            net = tf.nn.relu(net)\n        else:\n            net = x\n        if bottleneck:\n            net = conv(net, depth1, 1, strides=stride, padding=padding, activation=None)\n            net = tf.layers.batch_normalization(net, training=training)\n            net = tf.nn.relu(net)\n            net = conv(net, depth1, 3, strides=1, padding=padding, activation=None)\n            net = tf.layers.batch_normalization(net, training=training)\n            net = tf.nn.relu(net)\n            net = conv(net, depth2, 1, strides=1, padding=padding, activation=None)\n        else:\n            net = conv(net, depth2, 3, strides=stride, padding=padding, activation=None)\n            net = tf.layers.batch_normalization(x, training=training)\n            net = tf.nn.relu(net)\n            net = conv(net, depth2, 3, strides=stride, padding=padding, activation=None)\n        return net", "docstring": "Applies residual function for RevNet.\n\nArgs:\nx: input tensor\ndepth1: Number of output channels for the first and second conv layers.\ndepth2: Number of output channels for the third conv layer.\ndim: '2d' if 2-dimensional, '3d' if 3-dimensional.\nfirst_batch_norm: Whether to keep the first batch norm layer or not.\nTypically used in the first RevNet block.\nstride: Stride for the first conv filter. Note that this particular\nRevNet architecture only varies the stride for the first conv\nfilter. The stride for the second conv filter is always set to 1.\ntraining: True for train phase, False for eval phase.\nbottleneck: If true, apply bottleneck 1x1 down/up sampling.\npadding: Padding for each conv layer.\n\nReturns:\nOutput tensor after applying residual function for RevNet.", "source": "codesearchnet"}
{"code": "def __init__(self, *args, **kwargs):\n        \n        self.model = kwargs.pop('model', self.model)\n        self.queryset = kwargs.pop('queryset', self.queryset)\n        self.search_fields = kwargs.pop('search_fields', self.search_fields)\n        self.max_results = kwargs.pop('max_results', self.max_results)\n        defaults = {'data_view': 'django_select2:auto-json'}\n        defaults.update(kwargs)\n        super(ModelSelect2Mixin, self).__init__(*args, **defaults)", "docstring": "Overwrite class parameters if passed as keyword arguments.\n\nArgs:\nmodel (django.db.models.Model): Model to select choices from.\nqueryset (django.db.models.query.QuerySet): QuerySet to select choices from.\nsearch_fields (list): List of model lookup strings.\nmax_results (int): Max. JsonResponse view page size.", "source": "juraj-google-style"}
{"code": "def get_nonconflicting_string(base_fmtstr, conflict_set, offset=0):\n    conflict_set_ = set(conflict_set)\n    for count in it.count(offset):\n        base_str = (base_fmtstr % count)\n        if (base_str not in conflict_set_):\n            return base_str", "docstring": "gets a new string that wont conflict with something that already exists\n\nArgs:\nbase_fmtstr (str):\nconflict_set (set):\n\nCommandLine:\npython -m utool.util_dev --test-get_nonconflicting_string\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_dev import *  # NOQA\n>>> # build test data\n>>> base_fmtstr = 'somestring%d'\n>>> conflict_set = ['somestring0']\n>>> # execute function\n>>> result = get_nonconflicting_string(base_fmtstr, conflict_set)\n>>> # verify results\n>>> print(result)\nsomestring1", "source": "codesearchnet"}
{"code": "def _set_details(self, content):\n    try:\n        self.details = str(content)\n    except UnicodeEncodeError:\n        logging.error('Unable to decode \"%s\" in Py3, encoding in utf-8.', content)\n        self.details = content.encode('utf-8')", "docstring": "Sets the `details` field.\n\nArgs:\ncontent: the content to extract details from.", "source": "github-repos"}
{"code": "def _ParseKey(self, knowledge_base, registry_key, value_name):\n    \n    user_account = artifacts.UserAccountArtifact(\n        identifier=registry_key.name, path_separator='\\\\')\n\n    registry_value = registry_key.GetValueByName('ProfileImagePath')\n    if registry_value:\n      profile_path = registry_value.GetDataAsObject()\n      username = self._GetUsernameFromProfilePath(profile_path)\n\n      user_account.user_directory = profile_path or None\n      user_account.username = username or None\n\n    try:\n      knowledge_base.AddUserAccount(user_account)\n    except KeyError:\n      \n      pass", "docstring": "Parses a Windows Registry key for a preprocessing attribute.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\nvalue_name (str): name of the Windows Registry value.\n\nRaises:\nerrors.PreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def add_to_dumper(dumper: Type, classes: List[Type]) -> None:\n    if (not isinstance(classes, list)):\n        classes = [classes]\n    for class_ in classes:\n        if issubclass(class_, enum.Enum):\n            dumper.add_representer(class_, EnumRepresenter(class_))\n        elif (issubclass(class_, str) or issubclass(class_, UserString)):\n            dumper.add_representer(class_, UserStringRepresenter(class_))\n        else:\n            dumper.add_representer(class_, Representer(class_))", "docstring": "Register user-defined classes with the Dumper.\n\nThis enables the Dumper to write objects of your classes to a \\\nYAML file. Note that all the arguments are types, not instances!\n\nArgs:\ndumper: Your dumper class(!), derived from yatiml.Dumper\nclasses: One or more classes to add.", "source": "codesearchnet"}
{"code": "def update_aliases(self):\n    try:\n        response = self.client.api.get_room_state(self.room_id)\n        for chunk in response:\n            if (('content' in chunk) and ('aliases' in chunk['content'])):\n                if (chunk['content']['aliases'] != self.aliases):\n                    self.aliases = chunk['content']['aliases']\n                    return True\n                else:\n                    return False\n    except MatrixRequestError:\n        return False", "docstring": "Get aliases information from room state.\n\nReturns:\nboolean: True if the aliases changed, False if not", "source": "codesearchnet"}
{"code": "def _dilated_conv_layer(self, output_channels, dilation_rate, apply_relu,\n                          name):\n    \n    layer_components = [\n        conv.Conv2D(\n            output_channels, [3, 3],\n            initializers=self._initializers,\n            regularizers=self._regularizers,\n            rate=dilation_rate,\n            name=\"dilated_conv_\" + name),\n    ]\n    if apply_relu:\n      layer_components.append(lambda net: tf.nn.relu(net, name=\"relu_\" + name))\n    return sequential.Sequential(layer_components, name=name)", "docstring": "Create a dilated convolution layer.\n\nArgs:\noutput_channels: int. Number of output channels for each pixel.\ndilation_rate: int. Represents how many pixels each stride offset will\nmove. A value of 1 indicates a standard convolution.\napply_relu: bool. If True, a ReLU non-linearlity is added.\nname: string. Name for layer.\n\nReturns:\na sonnet Module for a dilated convolution.", "source": "juraj-google-style"}
{"code": "def __init__(self, core, keep_probs):\n    \n\n    super(RecurrentDropoutWrapper, self).__init__(\n        custom_getter=None, name=core.module_name + \"_recdropout\")\n    self._core = core\n    self._keep_probs = keep_probs\n\n    \n    \n    \n    \n    \n    self._dropout_state_size = []\n\n    def set_dropout_state_size(keep_prob, state_size):\n      if keep_prob is not None:\n        self._dropout_state_size.append(state_size)\n        return len(self._dropout_state_size) - 1\n      return None\n\n    self._dropout_indexes = tf.contrib.framework.nest.map_structure(\n        set_dropout_state_size, keep_probs, core.state_size)", "docstring": "Builds a new wrapper around a given core.\n\nArgs:\ncore: the RNN core to be wrapped.\nkeep_probs: the recurrent dropout keep probabilities to apply.\nThis should have the same structure has core.init_state. No dropout is\napplied for leafs set to None.", "source": "juraj-google-style"}
{"code": "def toInteger(self) -> 'Builder':\n    return self._to_builder(_evaluation.ToIntegerFunction(self.node.context, self.node, []))", "docstring": "The FHIRPath toInteger() function.\n\nCasts its operand to an integer.\nReturns an empty collection if the operand can not be coerced to an integer.\nRaises a ValueError if the operand collection contains more than one\nelement.\n\nReturns:\nAn integer representation of its operand.", "source": "github-repos"}
{"code": "def GetMessages(self, formatter_mediator, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    event_values = event.CopyToDict()\n\n    regvalue = event_values.get('regvalue', {})\n    string_parts = []\n    for key, value in sorted(regvalue.items()):\n      string_parts.append('{0:s}: {1!s}'.format(key, value))\n    event_values['text'] = ' '.join(string_parts)\n\n    urls = event_values.get('urls', [])\n    if urls:\n      event_values['urls'] = ' - '.join(urls)\n\n    if 'key_path' in event_values:\n      format_string = self.FORMAT_STRING\n    else:\n      format_string = self.FORMAT_STRING_ALTERNATIVE\n\n    return self._FormatMessages(\n        format_string, self.FORMAT_STRING_SHORT, event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def _refresh(self, _):\n        \n        \n        from google.appengine.api import app_identity\n        try:\n            token, _ = app_identity.get_access_token(self._scopes)\n        except app_identity.Error as e:\n            raise exceptions.CredentialsError(str(e))\n        self.access_token = token", "docstring": "Refresh self.access_token.\n\nArgs:\n_: (ignored) A function matching httplib2.Http.request's signature.", "source": "juraj-google-style"}
{"code": "def OpenSourcePath(self, source_path):\n    \n    source_path_spec = path_spec_factory.Factory.NewPathSpec(\n        definitions.TYPE_INDICATOR_OS, location=source_path)\n\n    self.AddScanNode(source_path_spec, None)", "docstring": "Opens the source path.\n\nArgs:\nsource_path (str): source path.", "source": "juraj-google-style"}
{"code": "def check_beam_implementation(test: absltest.TestCase, input_data: Union[EventSet, List[EventSet]], output_node: EventSetNode, cast: Optional[DType]=None):\n    if isinstance(input_data, EventSet):\n        input_data = [input_data]\n    tmp_dir = tempfile.mkdtemp()\n    output_path = os.path.join(tmp_dir, 'output.csv')\n    input_paths = []\n    for input_idx, input_evset in enumerate(input_data):\n        input_path = os.path.join(tmp_dir, f'input_{input_idx}.csv')\n        input_paths.append(input_path)\n        to_tensorflow_record(input_evset, path=input_path)\n    with TestPipeline() as p:\n        input_pcollection = {}\n        for input_path, input_evset in zip(input_paths, input_data):\n            input_pcollection[input_evset.node()] = p | beam_from_tensorflow_record(input_path, input_evset.node().schema)\n        output_pcollection = run_multi_io(inputs=input_pcollection, outputs=[output_node])\n        assert len(output_pcollection) == 1\n        output = output_pcollection[output_node] | beam_to_tensorflow_record(output_path, output_node.schema, shard_name_template='')\n        assert_that(output, equal_to([output_path]))\n    beam_output = from_tensorflow_record(output_path, output_node.schema)\n    if cast:\n        beam_output = beam_output.cast(cast)\n    expected_output = output_node.run(input_data)\n    assertEqualEventSet(test, beam_output, expected_output)", "docstring": "Checks the result of the Numpy backend against the Beam backend.\n\nArgs:\ntest: The absl's test.\ninput_data: An event set to feed to a graph.\noutput_node: Output of the graph.\ninput_node: Input of the graph. If not set, uses input_data.node()\ninstead.\ncast: DType to cast beam's output to after loading it from csv. Useful\nfor comparing outputs that are expected to be int32 for example,\nsince when written to CSV those will be loaded back up as int64.", "source": "github-repos"}
{"code": "def update_particle(position_update, velocity_update, state, nbest_topology, idx_particle):\n    (idx, particle) = idx_particle\n    nbest = state.swarm[nbest_topology[idx]].best_position\n    velocity = velocity_update(particle, nbest, state)\n    position = position_update(particle.position, velocity)\n    return particle._replace(position=position, velocity=velocity)", "docstring": "Update function for a particle.\n\nCalculates and updates the velocity and position of a particle for a\nsingle iteration of the PSO algorithm. Social best particle is determined\nby the state.params['topology'] function.\n\nArgs:\nstate: cipy.algorithms.pso.State: The state of the PSO algorithm.\nnbest_topology: dict: Containing neighbourhood best index for each\nparticle index.\nidx_particle: tuple: Tuple of the index of the particle and the\nparticle itself.\n\nReturns:\ncipy.algorithms.pso.Particle: A new particle with the updated position\nand velocity.", "source": "codesearchnet"}
{"code": "def kick_user(self, user_id, reason=''):\n    try:\n        self.client.api.kick_user(self.room_id, user_id)\n        return True\n    except MatrixRequestError:\n        return False", "docstring": "Kick a user from this room.\n\n\nArgs:\nuser_id (str): The matrix user id of a user.\nreason  (str): A reason for kicking the user.\n\nReturns:\nboolean: Whether user was kicked.", "source": "codesearchnet"}
{"code": "def HashBuffer(self, buf):\n    \n    for hasher in itervalues(self._hashers):\n      hasher.update(buf)\n      if self._progress:\n        self._progress()\n\n    self._bytes_read += len(buf)", "docstring": "Updates underlying hashers with a given buffer.\n\nArgs:\nbuf: A byte buffer (string object) that is going to be fed to the hashers.", "source": "juraj-google-style"}
{"code": "def _get_label_encoder_and_max(self, x):\n    label_count = x.fillna(NAN_INT).value_counts()\n    n_uniq = label_count.shape[0]\n    label_count = label_count[(label_count >= self.min_obs)]\n    n_uniq_new = label_count.shape[0]\n    offset = (0 if (n_uniq == n_uniq_new) else 1)\n    label_encoder = pd.Series((np.arange(n_uniq_new) + offset), index=label_count.index)\n    max_label = label_encoder.max()\n    label_encoder = label_encoder.to_dict()\n    return (label_encoder, max_label)", "docstring": "Return a mapping from values and its maximum of a column to integer labels.\n\nArgs:\nx (pandas.Series): a categorical column to encode.\n\nReturns:\nlabel_encoder (dict): mapping from values of features to integers\nmax_label (int): maximum label", "source": "codesearchnet"}
{"code": "def exec_start(self, exec_id, detach=False, tty=False, stream=False, socket=False, demux=False):\n    data = {'Tty': tty, 'Detach': detach}\n    headers = ({} if detach else {'Connection': 'Upgrade', 'Upgrade': 'tcp'})\n    res = self._post_json(self._url('/exec/{0}/start', exec_id), headers=headers, data=data, stream=True)\n    if detach:\n        return self._result(res)\n    if socket:\n        return self._get_raw_response_socket(res)\n    return self._read_from_socket(res, stream, tty=tty, demux=demux)", "docstring": "Start a previously set up exec instance.\n\nArgs:\nexec_id (str): ID of the exec instance\ndetach (bool): If true, detach from the exec command.\nDefault: False\ntty (bool): Allocate a pseudo-TTY. Default: False\nstream (bool): Stream response data. Default: False\nsocket (bool): Return the connection socket to allow custom\nread/write operations.\ndemux (bool): Return stdout and stderr separately\n\nReturns:\n\n(generator or str or tuple): If ``stream=True``, a generator\nyielding response chunks. If ``socket=True``, a socket object for\nthe connection. A string containing response data otherwise. If\n``demux=True``, a tuple with two elements of type byte: stdout and\nstderr.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def __init__(self, name=None, options=None):\n    compression_type = python_io.TFRecordOptions.get_compression_type_string(options)\n    rr = gen_io_ops.tf_record_reader_v2(name=name, compression_type=compression_type)\n    super(TFRecordReader, self).__init__(rr)", "docstring": "Create a TFRecordReader.\n\nArgs:\nname: A name for the operation (optional).\noptions: A TFRecordOptions object (optional).", "source": "github-repos"}
{"code": "def check_done(self):\n    raise NotImplementedError", "docstring": "Checks whether the restriction has been fully processed.\n\nCalled by the SDK harness after iterator returned by ``DoFn.process()``\nhas been fully read.\n\nThis method must raise a `ValueError` if there is still any unclaimed work\nremaining in the restriction when this method is invoked. Exception raised\nmust have an informative error message.\n\nThis API is required to be implemented in order to make sure no data loss\nduring SDK processing.\n\nReturns: ``True`` if current restriction has been fully processed.\nRaises:\nValueError: if there is still any unclaimed work remaining.", "source": "github-repos"}
{"code": "def _get_create_query(partition, tablename, include=None):\n    TYPE_MAP = {'int': 'INTEGER', 'float': 'REAL', six.binary_type.__name__: 'TEXT', six.text_type.__name__: 'TEXT', 'date': 'DATE', 'datetime': 'TIMESTAMP WITHOUT TIME ZONE'}\n    columns_types = []\n    if (not include):\n        include = []\n    for column in sorted(partition.datafile.reader.columns, key=(lambda x: x['pos'])):\n        if (include and (column['name'] not in include)):\n            continue\n        sqlite_type = TYPE_MAP.get(column['type'])\n        if (not sqlite_type):\n            raise Exception('Do not know how to convert {} to sql column.'.format(column['type']))\n        columns_types.append('    \"{}\" {}'.format(column['name'], sqlite_type))\n    columns_types_str = ',\\n'.join(columns_types)\n    query = 'CREATE TABLE IF NOT EXISTS {}(\\n{})'.format(tablename, columns_types_str)\n    return query", "docstring": "Creates and returns `CREATE TABLE ...` sql statement for given mprows.\n\nArgs:\npartition (orm.Partition):\ntablename (str): name of the table in the return create query.\ninclude (list of str, optional): list of columns to include to query.\n\nReturns:\nstr: create table query.", "source": "codesearchnet"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n    output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    if token_ids_1 is not None:\n        output += token_ids_1 + [self.sep_token_id]\n    return output", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A RoFormer sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def _on_disconnect(self, result):\n    (success, _, context) = self._parse_return(result)\n    callback = context['callback']\n    connection_id = context['connection_id']\n    handle = context['handle']\n    callback(connection_id, self.id, success, 'No reason given')\n    self._remove_connection(handle)", "docstring": "Callback called when disconnection command finishes\n\nArgs:\nresult (dict): result returned from diconnection command", "source": "codesearchnet"}
{"code": "def AliasMethod(func, from_constant):\n    new_func = func.Replace(kind=MethodKind.METHOD)\n    if func.kind == MethodKind.STATICMETHOD or (func.kind == MethodKind.METHOD and (not from_constant)):\n        return new_func\n    return new_func.Replace(signatures=tuple((s.Replace(params=s.params[1:]) for s in new_func.signatures)))", "docstring": "Returns method func with its signature modified as if it has been aliased.\n\nArgs:\nfunc: A pytd.Function.\nfrom_constant: If True, func will be modified as if it has been aliased from\nan instance of its defining class, e.g.,\nclass Foo:\ndef func(self): ...\nconst = ...  # type: Foo\nfunc = const.func\nOtherwise, it will be modified as if aliased from the class itself:\nclass Foo:\ndef func(self): ...\nfunc = Foo.func\n\nReturns:\nA pytd.Function, the aliased method.", "source": "github-repos"}
{"code": "def transform_and_print_file(self, file_path: str, transformation: Optional[Callable[[Iterator[str]], Iterator[str]]]=None, output_stream: io.TextIOBase=cast(io.TextIOBase, sys.stdout)) -> None:\n    if transformation is None:\n        transformation = self.annotate_test_file\n    if file_path == _STANDARD_IO_STREAMS:\n        output_stream.writelines(transformation(sys.stdin))\n    else:\n        with open(file_path, 'r') as file_contents:\n            output_stream.writelines(transformation(file_contents))", "docstring": "Reads from `file_path`, applies a transformation, and prints to `stdout`.\n\nArgs:\nfile_path: The path to the input file. If this is equal to the constant\n`_STANDARD_IO_STREAMS` (i.e. the string \"-\"), the input will come from\n`stdin`.\ntransformation: A function that takes an iterator over the lines of an HLO\nfile and returns an iterator over the lines of the transformed file. If\nthis is left as `None`, `self.annotate_test_file` will be used.\noutput_stream: The stream to which the transformed file should be written.\nDefaults to `stdout`.", "source": "github-repos"}
{"code": "def AddBitbucketServerConnectedRepository(self, request, global_params=None):\n    config = self.GetMethodConfig('AddBitbucketServerConnectedRepository')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Add a Bitbucket Server repository to a given BitbucketServerConfig's connected repositories. This API is experimental.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsBitbucketServerConfigsAddBitbucketServerConnectedRepositoryRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(AddBitbucketServerConnectedRepositoryResponse) The response message.", "source": "github-repos"}
{"code": "def decompress(ctype, unc_len, data):\n    if (ctype == UBIFS_COMPR_LZO):\n        try:\n            return lzo.decompress(b''.join((b'\\xf0', struct.pack('>I', unc_len), data)))\n        except Exception as e:\n            error(decompress, 'Warn', ('LZO Error: %s' % e))\n    elif (ctype == UBIFS_COMPR_ZLIB):\n        try:\n            return zlib.decompress(data, (- 11))\n        except Exception as e:\n            error(decompress, 'Warn', ('ZLib Error: %s' % e))\n    else:\n        return data", "docstring": "Decompress data.\n\nArguments:\nInt:ctype    -- Compression type LZO, ZLIB (*currently unused*).\nInt:unc_len  -- Uncompressed data lenth.\nStr:data     -- Data to be uncompessed.\n\nReturns:\nUncompressed Data.", "source": "codesearchnet"}
{"code": "def do_searchfy(self, query, **kwargs):\n        \n        \n        try:\n            results = self.wrapperAPI.search_users(query)\n            \n            for r in results:\n                aux = {}\n                aux[\"type\"]=\"i3visio.uri\"\n                alias=r[\"value\"].split(' - ')[1]\n                qURL = self.createURL(word=alias, mode=\"usufy\")\n                aux[\"value\"]= qURL\n                aux[\"attributes\"]= []\n                r[\"attributes\"].append(aux)\n\n        \n        except Exception, e:\n            return super(Twitter, self).do_searchfy(query, **kwargs)", "docstring": "Verifying a usufy query in this platform.\n\nThis might be redefined in any class inheriting from Platform.\n\nArgs:\n-----\nquery: The element to be searched.\n\nReturn:\n-------\nA list of elements to be appended.", "source": "juraj-google-style"}
{"code": "def get_extra_inputs():\n    g = ops.get_default_graph()\n    if isinstance(g, _FuncGraph):\n        return g.extra_inputs\n    else:\n        return []", "docstring": "Returns the captured input tensors by the function.\n\nReturns:\nIf the default graph is being used to define a function, the\nreturned list of tensors are those accessed inside the function body\nbut defined outside the function body so far. Otherwise, returns an\nempty list.", "source": "github-repos"}
{"code": "def convert_dense_weights_data_format(dense, previous_feature_map_shape, target_data_format='channels_first'):\n    assert target_data_format in {'channels_last', 'channels_first'}\n    kernel, bias = dense.get_weights()\n    for i in range(kernel.shape[1]):\n        if target_data_format == 'channels_first':\n            c, h, w = previous_feature_map_shape\n            original_fm_shape = (h, w, c)\n            ki = kernel[:, i].reshape(original_fm_shape)\n            ki = np.transpose(ki, (2, 0, 1))\n        else:\n            h, w, c = previous_feature_map_shape\n            original_fm_shape = (c, h, w)\n            ki = kernel[:, i].reshape(original_fm_shape)\n            ki = np.transpose(ki, (1, 2, 0))\n        kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))\n    dense.set_weights([kernel, bias])", "docstring": "Utility useful when changing a convnet's `data_format`.\n\nWhen porting the weights of a convnet from one data format to the other,\nif the convnet includes a `Flatten` layer\n(applied to the last convolutional feature map)\nfollowed by a `Dense` layer, the weights of that `Dense` layer\nshould be updated to reflect the new dimension ordering.\n\nArgs:\ndense: The target `Dense` layer.\nprevious_feature_map_shape: A shape tuple of 3 integers,\ne.g. `(512, 7, 7)`. The shape of the convolutional\nfeature map right before the `Flatten` layer that\ncame before the target `Dense` layer.\ntarget_data_format: One of \"channels_last\", \"channels_first\".\nSet it \"channels_last\"\nif converting a \"channels_first\" model to \"channels_last\",\nor reciprocally.", "source": "github-repos"}
{"code": "def remove_slice_from_lines(lines, clean_text, slice) -> str:\n    base = clean_text[slice[0]]\n    section = list(slice)\n    check_start_flag = False\n    for line_idx in range(max(0, slice[0] - 1), max(0, slice[0] - 5), -1):\n        if not lines[line_idx]:\n            continue\n        if lines[line_idx] == '\n            section[0] = line_idx\n            break\n        elif ratio(base, remove_numbers(lines[line_idx])) < 0.9:\n            section[0] = line_idx + 1\n            potential_ref = remove_numbers(lines[max(0, line_idx - 1)].partition('* [')[-1])\n            if len(potential_ref) >= 0.75 * len(base) and ratio(base, potential_ref) < 0.9:\n                section[0] = line_idx\n            check_start_flag = True\n            break\n    for line_idx in range(min(len(lines), slice[1]), min(len(lines), slice[1] + 5)):\n        if ratio(base, remove_numbers(lines[line_idx])) < 0.9:\n            section[1] = line_idx\n            break\n    if len(lines) <= section[1]:\n        section[1] = len(lines) - 1\n    to_delete = '\\n'.join(lines[section[0]:section[1] + 1])\n    itera, iterb = (enumerate(lines[section[1] - 1]), enumerate(lines[section[1]]))\n    while True:\n        try:\n            ia, a = next(itera)\n            while a.isnumeric():\n                ia, a = next(itera)\n            ib, b = next(iterb)\n            while b.isnumeric():\n                ib, b = next(iterb)\n            if a != b:\n                break\n        except StopIteration:\n            break\n    if check_start_flag and '* [' in to_delete:\n        to_delete = '* [' + to_delete.partition('* [')[-1]\n    try:\n        delta = len(lines[section[1]]) - ib - 1\n        if delta > 0:\n            to_delete = to_delete[:-delta]\n    except UnboundLocalError:\n        pass\n    return to_delete.strip()", "docstring": "Remove a slice of text from the lines based on specific criteria.\n\nThis function identifies a slice of text within the lines and removes it based on certain conditions.\n\nArgs:\nlines (list of str): The list of lines containing the text.\nclean_text (list of str): A cleaned version of the text (without numbers).\nslice (tuple): A tuple representing the start and end indices of the slice to be removed.\n\nReturns:\nstr: The removed slice of text as a single string.", "source": "github-repos"}
{"code": "def read_struct(fstream):\n    line = fstream.readline().strip()\n    fragments = line.split(',')\n    fragments = [x for x in fragments if (x is not None)]\n    partition = dict()\n    if (not (len(fragments) >= 3)):\n        return None\n    partition['struct'] = fragments[0]\n    partition['info'] = fragments[1]\n    partition['num_lines'] = fragments[2]\n    struct = None\n    if ((partition is not None) and (partition['struct'] == 'STRUCT')):\n        num_lines = int(partition['num_lines'].strip())\n        struct = {}\n        for _ in range(num_lines):\n            cols = fetch_cols(fstream)\n            struct.update({cols[0]: cols[1:]})\n    return struct", "docstring": "Read a likwid struct from the text stream.\n\nArgs:\nfstream: Likwid's filestream.\n\nReturns (dict(str: str)):\nA dict containing all likwid's struct info as key/value pairs.", "source": "codesearchnet"}
{"code": "def extract_variable_info(kwargs) -> Tuple[Text, Tuple[int, ...], dtypes.DType, Callable[[], Any]]:\n    if isinstance(kwargs['initial_value'], functools.partial) and ('shape' in kwargs['initial_value'].keywords or kwargs['initial_value'].args):\n        if 'shape' in kwargs['initial_value'].keywords:\n            shape = kwargs['initial_value'].keywords['shape']\n        else:\n            shape = kwargs['initial_value'].args[0]\n        return (kwargs['name'], shape, kwargs['initial_value'].keywords.get('dtype', kwargs['dtype']), kwargs['initial_value'].func)\n    elif 'shape' not in kwargs or kwargs['shape'] is None or (not callable(kwargs['initial_value'])):\n        raise ValueError('Unable to extract initializer function and shape from {}. Please either pass a function that expects a shape and dtype as the initial value for your variable or functools.partial object with the shape and dtype kwargs set. This is needed so that we can initialize the shards of the ShardedVariable locally.'.format(kwargs['initial_value']))\n    else:\n        return (kwargs['name'], kwargs['shape'], kwargs['dtype'], kwargs['initial_value'])", "docstring": "Extracts the variable creation attributes from the kwargs.\n\nArgs:\nkwargs: a dict of keyword arguments that were passed to a variable creator\nscope.\n\nReturns:\nA tuple of variable name, shape, dtype, initialization function.", "source": "github-repos"}
{"code": "def _extract_mnist_images(filename, num_images):\n  \n  with gzip.open(filename) as bytestream:\n    bytestream.read(16)\n    buf = bytestream.read(_MNIST_IMAGE_SIZE * _MNIST_IMAGE_SIZE * num_images)\n    data = np.frombuffer(buf, dtype=np.uint8)\n    data = data.reshape(num_images, _MNIST_IMAGE_SIZE, _MNIST_IMAGE_SIZE, 1)\n  return data", "docstring": "Extract images from an MNIST file into a numpy array.\n\nArgs:\nfilename: The path to an MNIST images file.\nnum_images: The number of images in the file.\n\nReturns:\nA numpy array of shape [number_of_images, height, width, channels].", "source": "juraj-google-style"}
{"code": "def build(self, input_shape):\n    if not hasattr(self.build, '_is_default'):\n        self._build_input_shape = input_shape\n    self.built = True", "docstring": "Creates the variables of the layer (optional, for subclass implementers).\n\nThis is a method that implementers of subclasses of `Layer` or `Model`\ncan override if they need a state-creation step in-between\nlayer instantiation and layer call.\n\nThis is typically used to create the weights of `Layer` subclasses.\n\nArgs:\ninput_shape: Instance of `TensorShape`, or list of instances of\n`TensorShape` if the layer expects a list of inputs\n(one instance per input).", "source": "github-repos"}
{"code": "def get_user_data_configuration():\n    from cloud_inquisitor import get_local_aws_session, app_config\n    kms_region = app_config.kms_region\n    session = get_local_aws_session()\n    if (session.get_credentials().method == 'iam-role'):\n        kms = session.client('kms', region_name=kms_region)\n    else:\n        sts = session.client('sts')\n        audit_role = sts.assume_role(RoleArn=app_config.aws_api.instance_role_arn, RoleSessionName='cloud_inquisitor')\n        kms = boto3.session.Session(audit_role['Credentials']['AccessKeyId'], audit_role['Credentials']['SecretAccessKey'], audit_role['Credentials']['SessionToken']).client('kms', region_name=kms_region)\n    user_data_url = app_config.user_data_url\n    res = requests.get(user_data_url)\n    if (res.status_code == 200):\n        data = kms.decrypt(CiphertextBlob=b64decode(res.content))\n        kms_config = json.loads(zlib.decompress(data['Plaintext']).decode('utf-8'))\n        app_config.database_uri = kms_config['db_uri']\n    else:\n        raise RuntimeError('Failed loading user-data, cannot continue: {}: {}'.format(res.status_code, res.content))", "docstring": "Retrieve and update the application configuration with information from the user-data\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def dbmax50years(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `dbmax50years`'.format(value))\n    self._dbmax50years = value", "docstring": "Corresponds to IDD Field `dbmax50years`\n50-year return period values for maximum extreme dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmax50years`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def enable(self, timeout=0):\n        \n        self.client.api.enable_plugin(self.name, timeout)\n        self.reload()", "docstring": "Enable the plugin.\n\nArgs:\ntimeout (int): Timeout in seconds. Default: 0\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def get_operator_output_port(self):\n    return OperatorOutputPort(self.rest_client.make_request(self.operatorOutputPort), self.rest_client)", "docstring": "Get the output port of this exported stream.\n\nReturns:\nOperatorOutputPort: Output port of this exported stream.", "source": "codesearchnet"}
{"code": "def potcar_spec( filename ):\n    \n    p_spec = {}\n    with open( filename, 'r' ) as f:\n        potcars = re.split('(End of Dataset\\n)', f.read() )\n    potcar_md5sums = [ md5sum( ''.join( pair ) ) for pair in zip( potcars[::2], potcars[1:-1:2] ) ]\n    for this_md5sum in potcar_md5sums:\n        for ps in potcar_sets:\n            for p, p_md5sum in potcar_md5sum_data[ ps ].items():\n                if this_md5sum == p_md5sum:\n                    p_spec[ p ] = ps\n    if len( p_spec ) != len( potcar_md5sums ):\n        raise ValueError( 'One or more POTCARs did not have matching md5 hashes' )\n    return p_spec", "docstring": "Returns a dictionary specifying the pseudopotentials contained in a POTCAR file.\n\nArgs:\nfilename (Str): The name of the POTCAR file to process.\n\nReturns:\n(Dict): A dictionary of pseudopotential filename: dataset pairs, e.g.\n{ 'Fe_pv': 'PBE_54', 'O', 'PBE_54' }", "source": "juraj-google-style"}
{"code": "def __init__(self, value: Union[int, float], period: Union[int, float]):\n        \n        self.value = value % period\n        self.period = period", "docstring": "Initializes the equivalence class.\n\nArgs:\nvalue: numerical value to wrap.\nperiod: periodicity of the numerical value.", "source": "juraj-google-style"}
{"code": "def Clear(self):\n    headers = {'Content-length': '0'}\n    (response, _) = self._http.request(('%s/reset' % self._host), method='POST', headers=headers)\n    if (response.status == 200):\n        return True\n    else:\n        logging.warning('failed to clear emulator; response was: %s', response)", "docstring": "Clears all data from the emulator instance.\n\nReturns:\nTrue if the data was successfully cleared, False otherwise.", "source": "codesearchnet"}
{"code": "def bulk_create(self, *records):\n    if (not records):\n        raise TypeError('Must provide at least one record')\n    if any(((not isinstance(r, dict)) for r in records)):\n        raise TypeError('New records must be provided as dicts')\n    new_records = []\n    for record_data in records:\n        record = record_factory(self._app, record_data)\n        record.validate()\n        new_records.append(record)\n    self._swimlane.request('post', 'app/{}/record/batch'.format(self._app.id), json=[r._raw for r in new_records])", "docstring": "Create and validate multiple records in associated app\n\nArgs:\n*records (dict): One or more dicts of new record field names and values\n\nNotes:\nRequires Swimlane 2.15+\n\nValidates like :meth:`create`, but only sends a single request to create all provided fields, and does not\nreturn the newly created records\n\nAny validation failures on any of the records will abort the batch creation, not creating any new records\n\nDoes not return the newly created records\n\nExamples:\nCreate 3 new records with single request\n\n::\n\napp.records.bulk_create(\n{'Field 1': 'value 1', ...},\n{'Field 1': 'value 2', ...},\n{'Field 1': 'value 3', ...}\n)\n\nRaises:\nswimlane.exceptions.UnknownField: If any field in any new record cannot be found\nswimlane.exceptions.ValidationError: If any field in any new record fails validation\nTypeError: If no dict of fields was provided, or any provided argument is not a dict", "source": "codesearchnet"}
{"code": "def config_pp(subs):\n    print('(c|f): available only as CLI argument/in the config file', end='\\n\\n')\n    for sub in subs:\n        hlp_lst = []\n        for (opt, meta) in conf[sub].defaults_():\n            if (meta.cmd_arg ^ meta.conf_arg):\n                opt += (' (c)' if meta.cmd_arg else ' (f)')\n            hlp_lst.append((opt, meta.help))\n        if hlp_lst:\n            print('{}:'.format(sub))\n            _pretty_print(hlp_lst, sep=' -- ', text_width=min(get_terminal_size().columns, 100))\n            print()", "docstring": "Pretty print of configuration options.\n\nArgs:\nsubs (iterable of str): iterable with the list of conf sections to\nprint.", "source": "codesearchnet"}
{"code": "def fromkeys(cls, iterable, value=None):\n    if (not callable(value)):\n        return cls(dict.fromkeys(iterable, value))\n    return cls(((key, value(key)) for key in iterable))", "docstring": "Create a new d from\n\nArgs:\niterable: Iterable containing keys\nvalue: value to associate with each key.\nIf callable, will be value[key]\n\nReturns: new DictWrapper\n\nExample:\n\n>>> from ww import d\n>>> sorted(d.fromkeys('123', value=4).items())\n[('1', 4), ('2', 4), ('3', 4)]\n>>> sorted(d.fromkeys(range(3), value=lambda e:e**2).items())\n[(0, 0), (1, 1), (2, 4)]", "source": "codesearchnet"}
{"code": "def _compile_fragment_ast(schema, current_schema_type, ast, location, context):\n    query_metadata_table = context['metadata']\n    coerces_to_type_name = ast.type_condition.name.value\n    coerces_to_type_obj = schema.get_type(coerces_to_type_name)\n    basic_blocks = []\n    is_same_type_as_scope = current_schema_type.is_same_type(coerces_to_type_obj)\n    equivalent_union_type = context['type_equivalence_hints'].get(coerces_to_type_obj, None)\n    is_base_type_of_union = (isinstance(current_schema_type, GraphQLUnionType) and current_schema_type.is_same_type(equivalent_union_type))\n    if (not (is_same_type_as_scope or is_base_type_of_union)):\n        query_metadata_table.record_coercion_at_location(location, coerces_to_type_obj)\n        basic_blocks.append(blocks.CoerceType({coerces_to_type_name}))\n    inner_basic_blocks = _compile_ast_node_to_ir(schema, coerces_to_type_obj, ast, location, context)\n    basic_blocks.extend(inner_basic_blocks)\n    return basic_blocks", "docstring": "Return a list of basic blocks corresponding to the inline fragment at this AST node.\n\nArgs:\nschema: GraphQL schema object, obtained from the graphql library\ncurrent_schema_type: GraphQLType, the schema type at the current location\nast: GraphQL AST node, obtained from the graphql library.\nlocation: Location object representing the current location in the query\ncontext: dict, various per-compilation data (e.g. declared tags, whether the current block\nis optional, etc.). May be mutated in-place in this function!\n\nReturns:\nlist of basic blocks, the compiled output of the vertex AST node", "source": "codesearchnet"}
{"code": "def videos(self, **kwargs):\n        \n        path = self._get_series_id_season_number_episode_number_path('videos')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the videos that have been added to a TV episode (teasers, clips,\netc...).\n\nArgs:\nlanguage: (optional) ISO 639 code.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def _substitute_globals(config_dict):\n    \n    constants = _get_all_constants()\n\n    if type(config_dict) != dict:\n        return\n\n    for key, val in config_dict.iteritems():\n        if key in constants and type(val) in _ALLOWED:\n            globals()[key] = val", "docstring": "Set global variables to values defined in `config_dict`.\n\nArgs:\nconfig_dict (dict): dict with data, which are used to set `globals`.\n\nNote:\n`config_dict` have to be dictionary, or it is ignored. Also all\nvariables, that are not already in globals, or are not types defined in\n:attr:`_ALLOWED` (str, int, ..) or starts with ``_`` are silently\nignored.", "source": "juraj-google-style"}
{"code": "def mark_as_unsaveable(self, error_message):\n    self._saveable = False\n    if isinstance(error_message, str):\n        error_message = [error_message]\n    self._saving_errors.update(error_message)", "docstring": "Marks this FuncGraph as unsaveable.\n\nAny attempts to export this FuncGraph will raise an error with the specified\nmessage.\n\nArgs:\nerror_message: List or string containing the error message to be raised\nwhen saving this FuncGraph to SavedModel.", "source": "github-repos"}
{"code": "def parse(cls, representation, corpus=None):\n        \n\n        criteria_definitions = representation.split('\\n')\n        criteria = []\n\n        for i in range(0, len(criteria_definitions), 2):\n            filter_name = criteria_definitions[i]\n            filter_repr = criteria_definitions[i + 1]\n\n            if filter_name not in available_filter_criteria():\n                raise UnknownFilterCriteriaException('Unknown filter-criterion {}'.format(filter_name))\n\n            criterion = available_filter_criteria()[filter_name].parse(filter_repr)\n            criteria.append(criterion)\n\n        return cls(corpus, criteria)", "docstring": "Creates a subview from a string representation (created with ``self.serialize``).\n\nArgs:\nrepresentation (str): The representation.\n\nReturns:\nSubview: The created subview.", "source": "juraj-google-style"}
{"code": "def _unpack(formatstring, packed):\n    \n    _checkString(formatstring, description='formatstring', minlength=1)\n    _checkString(packed, description='packed string', minlength=1)\n\n    if sys.version_info[0] > 2:\n        packed = bytes(packed, encoding='latin1')  \n\n    try:\n        value = struct.unpack(formatstring, packed)[0]\n    except:\n        errortext = 'The received bytestring is probably wrong, as the bytestring-to-num conversion failed.'\n        errortext += ' Bytestring: {0!r} Struct format code is: {1}'\n        raise ValueError(errortext.format(packed, formatstring))\n\n    return value", "docstring": "Unpack a bytestring into a value.\n\nUses the built-in :mod:`struct` Python module.\n\nArgs:\n* formatstring (str): String for the packing. See the :mod:`struct` module for details.\n* packed (str): The bytestring to be unpacked.\n\nReturns:\nA value. The type depends on the formatstring.\n\nRaises:\nValueError\n\nNote that the :mod:`struct` module wants byte buffers for Python3,\nbut bytestrings for Python2. This is compensated for automatically.", "source": "juraj-google-style"}
{"code": "def get_generic_distributions(generic_dists, metric_id):\n    return sum((get_all_distributions_by_type(dist, metric_id) for dist in generic_dists), [])", "docstring": "Creates flatten list of distributions per its value type.\nA generic distribution is the one which is not processed but saved in\nthe most raw version.\n\nArgs:\ngeneric_dists: list of distributions to be saved\nmetric_id(uuid): id of the current test run\n\nReturns:\nlist of dictionaries made from :class:`DistributionMetric`", "source": "github-repos"}
{"code": "def __init__(self, datastore_client, storage_client, dataset_name):\n    \n    super(DatasetBatches, self).__init__(\n        datastore_client=datastore_client,\n        entity_kind_batches=KIND_DATASET_BATCH,\n        entity_kind_images=KIND_DATASET_IMAGE)\n    self._storage_client = storage_client\n    self._dataset_name = dataset_name", "docstring": "Initializes DatasetBatches.\n\nArgs:\ndatastore_client: instance of CompetitionDatastoreClient\nstorage_client: instance of CompetitionStorageClient\ndataset_name: name of the dataset ('dev' or 'final')", "source": "juraj-google-style"}
{"code": "def update_value(self, offset, value):\n    if ((offset + len(value)) > self.total_size):\n        return Error.INPUT_BUFFER_TOO_LONG\n    if (len(self.current_value) < offset):\n        self.current_value += bytearray((offset - len(self.current_value)))\n    if (len(self.current_value) > offset):\n        self.current_value = self.current_value[:offset]\n    self.current_value += bytearray(value)\n    return 0", "docstring": "Update the binary value currently stored for this config value.\n\nReturns:\nint: An opaque error code that can be returned from a set_config rpc", "source": "codesearchnet"}
{"code": "def __init__(self, name=None, description=None, arguments=None):\n        \n        if name:\n            self.name = name\n        if description:\n            self.description = description\n\n        self.arguments = arguments or {}\n        self.data = None", "docstring": "Initialization method.\n\nArgs:\narguments (dict): arguments that will be used for get_data method.", "source": "juraj-google-style"}
{"code": "def dict_to_schema(schema_dict, required, allow_custom_keys=True, modifier=None):\n    if modifier:\n        modifier = Use(modifier)\n\n    def _to(value):\n        if isinstance(value, dict):\n            d = {}\n            for (k, v) in value.iteritems():\n                if isinstance(k, basestring):\n                    k = (Required(k) if required else Optional(k))\n                d[k] = _to(v)\n            if allow_custom_keys:\n                d[Optional(basestring)] = (modifier or object)\n            schema = Schema(d)\n        elif modifier:\n            schema = And(value, modifier)\n        else:\n            schema = value\n        return schema\n    return _to(schema_dict)", "docstring": "Convert a dict of Schemas into a Schema.\n\nArgs:\nrequired (bool): Whether to make schema keys optional or required.\nallow_custom_keys (bool, optional): If True, creates a schema that\nallows custom items in dicts.\nmodifier (callable): Functor to apply to dict values - it is applied\nvia `Schema.Use`.\n\nReturns:\nA `Schema` object.", "source": "codesearchnet"}
{"code": "def FetchSizeOfSignedBinary(binary_urn,\n                            token = None\n                           ):\n  \n  if _ShouldUseLegacyDatastore():\n    try:\n      aff4_stream = aff4.FACTORY.Open(\n          binary_urn, aff4_type=collects.GRRSignedBlob, mode=\"r\", token=token)\n      return aff4_stream.size\n    except aff4.InstantiationError:\n      raise SignedBinaryNotFoundError(binary_urn)\n  else:\n    try:\n      references, _ = data_store.REL_DB.ReadSignedBinaryReferences(\n          _SignedBinaryIDFromURN(binary_urn))\n    except db.UnknownSignedBinaryError:\n      raise SignedBinaryNotFoundError(binary_urn)\n    last_reference = references.items[-1]\n    return last_reference.offset + last_reference.size", "docstring": "Returns the size of the given binary (in bytes).\n\nArgs:\nbinary_urn: RDFURN that uniquely identifies the binary.\ntoken: ACL token to use with the legacy (non-relational) datastore.\n\nRaises:\nSignedBinaryNotFoundError: If no signed binary with the given URN exists.", "source": "juraj-google-style"}
{"code": "def convert(self, inp):\n        \n        inp = self._preprocess(inp)\n\n        n = NumberService().longestNumber(inp)\n        units = self.extractUnits(inp)\n\n        \n        quantity = pq.Quantity(float(n), units[0])\n        quantity.units = units[1]\n\n        return quantity", "docstring": "Converts a string representation of some quantity of units into a\nquantities object.\n\nArgs:\ninp (str): A textual representation of some quantity of units,\ne.g., \"fifty kilograms\".\n\nReturns:\nA quantities object representing the described quantity and its\nunits.", "source": "juraj-google-style"}
{"code": "def Register(self, name, constructor):\n    \n    precondition.AssertType(name, Text)\n\n    if name in self._constructors:\n      message = \"Duplicated constructors %r and %r for name '%s'\"\n      message %= (constructor, self._constructors[name], name)\n      raise ValueError(message)\n\n    self._constructors[name] = constructor", "docstring": "Registers a new constructor in the factory.\n\nArgs:\nname: A name associated with given constructor.\nconstructor: A constructor function that creates instances.\n\nRaises:\nValueError: If there already is a constructor associated with given name.", "source": "juraj-google-style"}
{"code": "def draw_mask(im, mask, alpha=0.5, color=None):\n    if (color is None):\n        color = PALETTE_RGB[np.random.choice(len(PALETTE_RGB))][::(- 1)]\n    im = np.where(np.repeat((mask > 0)[(:, :, None)], 3, axis=2), ((im * (1 - alpha)) + (color * alpha)), im)\n    im = im.astype('uint8')\n    return im", "docstring": "Overlay a mask on top of the image.\n\nArgs:\nim: a 3-channel uint8 image in BGR\nmask: a binary 1-channel image of the same size\ncolor: if None, will choose automatically", "source": "codesearchnet"}
{"code": "def make_descriptors(self, base_name):\n    units_name = (base_name + '_units')\n    units_props = self._units_type.make_descriptors(units_name)\n    return (units_props + [UnitsSpecPropertyDescriptor(base_name, self, units_props[0])])", "docstring": "Return a list of ``PropertyDescriptor`` instances to install on a\nclass, in order to delegate attribute access to this property.\n\nUnlike simpler property types, ``UnitsSpec`` returns multiple\ndescriptors to install. In particular, descriptors for the base\nproperty as well as the associated units property are returned.\n\nArgs:\nname (str) : the name of the property these descriptors are for\n\nReturns:\nlist[PropertyDescriptor]\n\nThe descriptors returned are collected by the ``MetaHasProps``\nmetaclass and added to ``HasProps`` subclasses during class creation.", "source": "codesearchnet"}
{"code": "def _start_reader_thread(self, stream, chunks):\n    \n    import io  \n    import threading  \n    def target():\n      while True:\n        chunk = stream.read(io.DEFAULT_BUFFER_SIZE)\n        if not chunk:\n          break\n        chunks.append(chunk)\n    thread = threading.Thread(target=target)\n    thread.start()\n    return thread", "docstring": "Starts a thread for reading output from FFMPEG.\n\nThe thread reads consecutive chunks from the stream and saves them in\nthe given list.\n\nArgs:\nstream: output stream of the FFMPEG process.\nchunks: list to save output chunks to.\n\nReturns:\nThread", "source": "juraj-google-style"}
{"code": "def find(self, title):\n    files = backend.iterfiles(self._drive, name=title)\n    try:\n        return next((self[id] for (id, _) in files))\n    except StopIteration:\n        raise KeyError(title)", "docstring": "Fetch and return the first spreadsheet with the given title.\n\nArgs:\ntitle(str): title/name of the spreadsheet to return\nReturns:\nSpreadSheet: new SpreadSheet instance\nRaises:\nKeyError: if no spreadsheet with the given ``title`` is found", "source": "codesearchnet"}
{"code": "def register_dispatchable_type(cls):\n    _api_dispatcher.register_dispatchable_type(cls)\n    return cls", "docstring": "Class decorator that registers a type for use with type-based dispatch.\n\nShould *not* be used with subclasses of `CompositeTensor` or `ExtensionType`\n(which are automatically registered).\n\nNote: this function is intended to support internal legacy use cases (such\nas RaggedTensorValue), and will probably not be exposed as a public API.\n\nArgs:\ncls: The class to register.\n\nReturns:\n`cls`.", "source": "github-repos"}
{"code": "def prune(A, threshold):\n    if isinstance(A, Poly):\n        B = A.A.copy()\n        for key in A.keys:\n            values = B[key].copy()\n            values[(numpy.abs(values) < threshold)] = 0.0\n            B[key] = values\n        return Poly(B, A.dim, A.shape, A.dtype)\n    A = A.copy()\n    A[(numpy.abs(A) < threshold)] = 0.0\n    return A", "docstring": "Remove coefficients that is not larger than a given threshold.\n\nArgs:\nA (Poly):\nInput data.\nthreshold (float):\nThreshold for which values to cut.\n\nReturns:\n(Poly):\nSame type as A.\n\nExamples:\n>>> P = chaospy.sum(chaospy.prange(3)*2**-numpy.arange(0, 6, 2, float))\n>>> print(P)\n0.0625q0^2+0.25q0+1.0\n>>> print(chaospy.prune(P, 0.1))\n0.25q0+1.0\n>>> print(chaospy.prune(P, 0.5))\n1.0\n>>> print(chaospy.prune(P, 1.5))\n0.0", "source": "codesearchnet"}
{"code": "def get_callback_url(self, **kwargs):\n    \n    \n    if not self.async:\n      raise UnexpectedPipelineError(\n          'May only call get_callback_url() method for asynchronous pipelines.')\n    kwargs['pipeline_id'] = self._pipeline_key.name()\n    params = urllib.urlencode(sorted(kwargs.items()))\n    return '%s/callback?%s' % (self.base_path, params)", "docstring": "Returns a relative URL for invoking this Pipeline's callback method.\n\nArgs:\nkwargs: Dictionary mapping keyword argument names to single values that\nshould be passed to the callback when it is invoked.\n\nRaises:\nUnexpectedPipelineError if this is invoked on pipeline that is not async.", "source": "juraj-google-style"}
{"code": "def get_choices_for(self, field):\n        \n        choices = self._fields[field].choices\n        if isinstance(choices, six.string_types):\n            return [(d['value'], d['name']) for d in self._choices_manager.get_all(choices)]\n        else:\n            return choices", "docstring": "Get the choices for the given fields.\n\nArgs:\nfield (str): Name of field.\n\nReturns:\nList of tuples. [(name, value),...]", "source": "juraj-google-style"}
{"code": "def __init__(self, queue_id=None):\n        \n        super().__init__(action_type=ActionType.OFPAT_SET_QUEUE, length=8)\n        self.queue_id = queue_id", "docstring": "Create an ActionSetQueue with the optional parameters below.\n\nArgs:\nqueue_id (int): The queue_id send packets to given queue on port.", "source": "juraj-google-style"}
{"code": "def __init__(self, file_format=None, shape=(None,)):\n    \n    self._file_format = file_format\n    if len(shape) != 1:\n      raise TypeError(\n          \"Audio feature currently only supports 1-D values, got %s.\" % shape)\n    self._shape = shape\n    super(Audio, self).__init__(shape=shape, dtype=tf.int64)", "docstring": "Constructs the connector.\n\nArgs:\nfile_format: `str`, the audio file format. Can be any format ffmpeg\nunderstands. If `None`, will attempt to infer from the file extension.\nshape: `tuple`, shape of the data.", "source": "juraj-google-style"}
{"code": "def add_logger(name, level=None, format=None):\n    format = (format or '%(filename)-11s %(lineno)-3d: %(message)s')\n    log = logging.getLogger(name)\n    log.setLevel((level or logging.INFO))\n    ch = logging.StreamHandler(sys.stdout)\n    ch.setFormatter(logging.Formatter(format))\n    log.addHandler(ch)\n    return log", "docstring": "Set up a stdout logger.\n\nArgs:\nname (str): name of the logger\nlevel: defaults to logging.INFO\nformat (str): format string for logging output.\ndefaults to ``%(filename)-11s %(lineno)-3d: %(message)s``.\n\nReturns:\nThe logger object.", "source": "codesearchnet"}
{"code": "def orient_averaged_adaptive(tm):\n    S = np.zeros((2, 2), dtype=complex)\n    Z = np.zeros((4, 4))\n\n    def Sfunc(beta, alpha, i, j, real):\n        (S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)\n        s = (S_ang[(i, j)].real if real else S_ang[(i, j)].imag)\n        return (s * tm.or_pdf(beta))\n    ind = range(2)\n    for i in ind:\n        for j in ind:\n            S.real[(i, j)] = (dblquad(Sfunc, 0.0, 360.0, (lambda x: 0.0), (lambda x: 180.0), (i, j, True))[0] / 360.0)\n            S.imag[(i, j)] = (dblquad(Sfunc, 0.0, 360.0, (lambda x: 0.0), (lambda x: 180.0), (i, j, False))[0] / 360.0)\n\n    def Zfunc(beta, alpha, i, j):\n        (S_and, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)\n        return (Z_ang[(i, j)] * tm.or_pdf(beta))\n    ind = range(4)\n    for i in ind:\n        for j in ind:\n            Z[(i, j)] = (dblquad(Zfunc, 0.0, 360.0, (lambda x: 0.0), (lambda x: 180.0), (i, j))[0] / 360.0)\n    return (S, Z)", "docstring": "Compute the T-matrix using variable orientation scatterers.\n\nThis method uses a very slow adaptive routine and should mainly be used\nfor reference purposes. Uses the set particle orientation PDF, ignoring\nthe alpha and beta attributes.\n\nArgs:\ntm: TMatrix (or descendant) instance\n\nReturns:\nThe amplitude (S) and phase (Z) matrices.", "source": "codesearchnet"}
{"code": "def usufyToTextExport(d, fPath=None):\n    \n    \n    if d == []:\n        return \"+------------------+\\n| No data found... |\\n+------------------+\"\n\n    import pyexcel as pe\n    import pyexcel.ext.text as text\n\n    if fPath == None:\n        isTerminal = True\n    else:\n        isTerminal = False\n\n    try:\n        oldData = get_data(fPath)\n    except:\n        \n        oldData = {\"OSRFramework\":[]}\n\n    \n    tabularData = _generateTabularData(d, {\"OSRFramework\":[[]]}, True, canUnicode=False)\n\n    \n    sheet = pe.Sheet(tabularData[\"OSRFramework\"])\n    sheet.name = \"Profiles recovered (\" + getCurrentStrDatetime() +\").\"\n    \n    sheet.name_columns_by_row(0)\n    text.TABLEFMT = \"grid\"\n\n    try:\n        with open(fPath, \"w\") as oF:\n            oF.write(str(sheet))\n    except Exception as e:\n        \n        return unicode(sheet)", "docstring": "Workaround to export to a .txt file or to show the information.\n\nArgs:\n-----\nd: Data to export.\nfPath: File path for the output file. If None was provided, it will\nassume that it has to print it.\n\nReturns:\n--------\nunicode: It sometimes returns a unicode representation of the Sheet\nreceived.", "source": "juraj-google-style"}
{"code": "def parse(self) -> Statement:\n    self.opt_separator()\n    start = self.offset\n    res = self.statement()\n    if (res.keyword not in ['module', 'submodule']):\n        self.offset = start\n        raise UnexpectedInput(self, \"'module' or 'submodule'\")\n    if ((self.name is not None) and (res.argument != self.name)):\n        raise ModuleNameMismatch(res.argument, self.name)\n    if self.rev:\n        revst = res.find1('revision')\n        if ((revst is None) or (revst.argument != self.rev)):\n            raise ModuleRevisionMismatch(revst.argument, self.rev)\n    try:\n        self.opt_separator()\n    except EndOfInput:\n        return res\n    raise UnexpectedInput(self, 'end of input')", "docstring": "Parse a complete YANG module or submodule.\n\nArgs:\nmtext: YANG module text.\n\nRaises:\nEndOfInput: If past the end of input.\nModuleNameMismatch: If parsed module name doesn't match `self.name`.\nModuleRevisionMismatch: If parsed revision date doesn't match `self.rev`.\nUnexpectedInput: If top-level statement isn't ``(sub)module``.", "source": "codesearchnet"}
{"code": "def GetParserPluginsInformation(cls, parser_filter_expression=None):\n    parser_plugins_information = []\n    for (_, parser_class) in cls.GetParsers(parser_filter_expression=parser_filter_expression):\n        if parser_class.SupportsPlugins():\n            for (plugin_name, plugin_class) in parser_class.GetPlugins():\n                description = getattr(plugin_class, 'DESCRIPTION', '')\n                parser_plugins_information.append((plugin_name, description))\n    return parser_plugins_information", "docstring": "Retrieves the parser plugins information.\n\nArgs:\nparser_filter_expression (Optional[str]): parser filter expression,\nwhere None represents all parsers and plugins.\n\nReturns:\nlist[tuple[str, str]]: pairs of parser plugin names and descriptions.", "source": "codesearchnet"}
{"code": "def input_shape(self):\n    if not self._inbound_nodes:\n        raise AttributeError('The layer has never been called and thus has no defined input shape.')\n    all_input_shapes = set([str(node.input_shapes) for node in self._inbound_nodes])\n    if len(all_input_shapes) == 1:\n        return self._inbound_nodes[0].input_shapes\n    else:\n        raise AttributeError('The layer \"' + str(self.name) + ' has multiple inbound nodes, with different input shapes. Hence the notion of \"input shape\" is ill-defined for the layer. Use `get_input_shape_at(node_index)` instead.')", "docstring": "Retrieves the input shape(s) of a layer.\n\nOnly applicable if the layer has exactly one input,\ni.e. if it is connected to one incoming layer, or if all inputs\nhave the same shape.\n\nReturns:\nInput shape, as an integer shape tuple\n(or list of shape tuples, one tuple per input tensor).\n\nRaises:\nAttributeError: if the layer has no defined input_shape.\nRuntimeError: if called in Eager mode.", "source": "github-repos"}
{"code": "def extract_compile_commands(parsed_aquery_output: _JSONDict) -> list[CompileCommand]:\n    actions = parsed_aquery_output['actions']\n    commands = []\n    for action in actions:\n        command = CompileCommand.from_args_list(action['arguments'])\n        commands.append(command)\n    return commands", "docstring": "Gathers compile commands to run from `bazel aquery` JSON output.\n\nArguments:\nparsed_aquery_output: Parsed JSON representing the output of `bazel aquery\n--output=jsonproto`.\n\nReturns:\nThe list of CompileCommands that should be executed.", "source": "github-repos"}
{"code": "def sequence_ids(self, batch_index: int=0) -> List[Optional[int]]:\n    if not self._encodings:\n        raise ValueError('sequence_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast` class).')\n    return self._encodings[batch_index].sequence_ids", "docstring": "Return a list mapping the tokens to the id of their original sentences:\n\n- `None` for special tokens added around or between sequences,\n- `0` for tokens corresponding to words in the first sequence,\n- `1` for tokens corresponding to words in the second sequence when a pair of sequences was jointly\nencoded.\n\nArgs:\nbatch_index (`int`, *optional*, defaults to 0): The index to access in the batch.\n\nReturns:\n`List[Optional[int]]`: A list indicating the sequence id corresponding to each token. Special tokens added\nby the tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding\nsequence.", "source": "github-repos"}
{"code": "def remove(self, key, name=None):\n    with tf.name_scope(name or '%s_lookup_table_remove' % self._name):\n        key = tf.convert_to_tensor(key, self._key_dtype, name='key')\n        op = gen_simple_hash_table_op.examples_simple_hash_table_remove(self.resource_handle, key, value_dtype=self._value_dtype)\n        return op", "docstring": "Remove `key`.\n\nArgs:\nkey: Scalar key to remove.\nname: A name for the operation (optional).\n\nReturns:\nThe created Operation.\n\nRaises:\nTypeError: when `key` doesn't match the table data type.", "source": "github-repos"}
{"code": "def sign_adaptation(control: FloatNest,\n                    output: FloatTensor,\n                    set_point: FloatTensor,\n                    adaptation_rate: FloatTensor = 0.01) -> FloatNest:\n  \n\n  def _get_new_control(control, output, set_point):\n    new_control = mcmc_util.choose(output > set_point,\n                                   control * (1. + adaptation_rate),\n                                   control / (1. + adaptation_rate))\n    return new_control\n\n  output = maybe_broadcast_structure(output, control)\n  set_point = maybe_broadcast_structure(set_point, control)\n\n  return tf.nest.map_structure(_get_new_control, control, output, set_point)", "docstring": "A function to do simple sign-based control of a variable.\n\n```\ncontrol = control * (1. + adaptation_rate) ** sign(output - set_point)\n```\n\nArgs:\ncontrol: The control variable.\noutput: The output variable.\nset_point: The set point for `output`. This function will adjust `control`\nso that `output` matches `set_point`.\nadaptation_rate: Adaptation rate.\n\nReturns:\ncontrol: New control.", "source": "juraj-google-style"}
{"code": "def read_uint8(self, little_endian=True):\n    if little_endian:\n        endian = '<'\n    else:\n        endian = '>'\n    return self.unpack(('%sB' % endian))", "docstring": "Read 1 byte as an unsigned integer value from the stream.\n\nArgs:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint:", "source": "codesearchnet"}
{"code": "def GetValues(self):\n    if ((not self._registry_key) and self._registry):\n        self._GetKeyFromRegistry()\n    if self._registry_key:\n        return self._registry_key.GetValues()\n    return iter([])", "docstring": "Retrieves all values within the key.\n\nReturns:\ngenerator[WinRegistryValue]: Windows Registry value generator.", "source": "codesearchnet"}
{"code": "def train(self, docs, retrain=False):\n        \n\n        if type(docs) == dict:\n            docs = docs.items()\n\n        train_sentences = [self._gen_sentence(item) for item in docs]\n        if (self.is_trained) and (retrain == False): \n            \n            self.update_model(train_sentences, update_labels_bool=True)\n\n        else: \n            \n            self.model = Doc2Vec(train_sentences, size=self.size, window=self.window, min_count=self.min_count, workers=self.workers)\n            self.is_trained = True\n\n        return 0", "docstring": "Train Doc2Vec on a series of docs. Train from scratch or update.\n\nArgs:\ndocs: list of tuples (assetid, body_text) or dictionary {assetid : body_text}\nretrain: boolean, retrain from scratch or update model\n\nsaves model in class to self.model\n\nReturns: 0 if successful", "source": "juraj-google-style"}
{"code": "def _create_flow(self, request_handler):\n    if (self.flow is None):\n        redirect_uri = request_handler.request.relative_url(self._callback_path)\n        self.flow = client.OAuth2WebServerFlow(self._client_id, self._client_secret, self._scope, redirect_uri=redirect_uri, user_agent=self._user_agent, auth_uri=self._auth_uri, token_uri=self._token_uri, revoke_uri=self._revoke_uri, **self._kwargs)", "docstring": "Create the Flow object.\n\nThe Flow is calculated lazily since we don't know where this app is\nrunning until it receives a request, at which point redirect_uri can be\ncalculated and then the Flow object can be constructed.\n\nArgs:\nrequest_handler: webapp.RequestHandler, the request handler.", "source": "codesearchnet"}
{"code": "def get_title(page):\n  \n  start_pos = page.find(\"<title>\")\n  end_pos = page.find(\"</title>\")\n  assert start_pos != -1\n  assert end_pos != -1\n  start_pos += len(\"<title>\")\n  return text_encoder.to_unicode_utf8(page[start_pos:end_pos])", "docstring": "Extract the title from a page.\n\nArgs:\npage: a string\nReturns:\na string", "source": "juraj-google-style"}
{"code": "def tokenize_sentence(input_dict):\n    text, uid = (input_dict['text'], input_dict['id'])\n    tokens = Tokenizer([text], padding=True, truncation=True, return_tensors='pt')\n    tokens = {key: torch.squeeze(val) for key, val in tokens.items()}\n    return ((text, uid), tokens)", "docstring": "Takes a dictionary with a text and an id, tokenizes the text, and\nreturns a tuple of the text and id and the tokenized text\n\nArgs:\ninput_dict: a dictionary with the text and id of the sentence\n\nReturns:\nA tuple of the text and id, and a dictionary of the tokens.", "source": "github-repos"}
{"code": "def __init__(self, tag_name, **kwargs):\n        \n        \n        \n        class_name = type(self).__name__\n        end_idx = class_name.rfind('TagProcessor')\n\n        tag_kind = str(class_name[:end_idx])\n        entry_type = tag_kind.capitalize()\n\n        \n        super(TagProcessorWithAutoEntryTypeAndFindByNamePlusAutoKind,\n              self).__init__(entry_type, tag_name, tag_kind, **kwargs)", "docstring": "Initializer.\n\nArgs:\ntag_name: unicode string name of tag to match. Usually u'compound'\nor u'member'.", "source": "juraj-google-style"}
{"code": "def __getitem__(self, anchor_id):\n        \n        file_path = self._anchor_path(anchor_id)\n\n        try:\n            with file_path.open(mode='rt') as handle:\n                return load_anchor(handle, self.root)\n        except OSError:\n            raise KeyError('No anchor with id {}'.format(anchor_id))", "docstring": "Get an Anchor by ID.\n\nArgs:\nanchor_id: The ID of the anchor to retrieve.\n\nReturns: An anchor instance.\n\nRaises:\nKeyError: The anchor can not be found.", "source": "juraj-google-style"}
{"code": "def emit_flow_start(self, name: str, timestamp: int, pid: int, tid: int, flow_id: int) -> None:\n    event = self._create_event('s', 'DataFlow', name, pid, tid, timestamp)\n    event['id'] = flow_id\n    self._events.append(event)", "docstring": "Adds a flow start event to the trace.\n\nWhen matched with a flow end event (with the same 'flow_id') this will\ncause the trace viewer to draw an arrow between the start and end events.\n\nArgs:\nname:  The event name as a string.\ntimestamp:  The timestamp of this event as a long integer.\npid:  Identifier of the process generating this event as an integer.\ntid:  Identifier of the thread generating this event as an integer.\nflow_id: Identifier of the flow as an integer.", "source": "github-repos"}
{"code": "def find_dependency_wheels(tile):\n    \n\n    return [os.path.join(x.folder, 'python', x.support_wheel) for x in _iter_dependencies(tile) if x.has_wheel]", "docstring": "Return a list of all python wheel objects created by dependencies of this tile\n\nArgs:\ntile (IOTile): Tile that we should scan for dependencies\n\nReturns:\nlist: A list of paths to dependency wheels", "source": "juraj-google-style"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        local_buffer = utils.BytearrayStream()\n\n        if self._unique_identifier:\n            self._unique_identifier.write(\n                local_buffer,\n                kmip_version=kmip_version\n            )\n        else:\n            raise exceptions.InvalidField(\n                \"The GetAttributes response payload is missing the unique \"\n                \"identifier field.\"\n            )\n\n        if kmip_version < enums.KMIPVersion.KMIP_2_0:\n            for attribute in self._attributes:\n                attribute.write(local_buffer, kmip_version=kmip_version)\n        else:\n            if self._attributes:\n                \n                template_attribute = objects.TemplateAttribute(\n                    attributes=self.attributes\n                )\n                attributes = objects.convert_template_attribute_to_attributes(\n                    template_attribute\n                )\n                attributes.write(local_buffer, kmip_version=kmip_version)\n            else:\n                raise exceptions.InvalidField(\n                    \"The GetAttributes response payload is missing the \"\n                    \"attributes list.\"\n                )\n\n        self.length = local_buffer.length()\n        super(GetAttributesResponsePayload, self).write(\n            output_buffer,\n            kmip_version=kmip_version\n        )\n        output_buffer.write(local_buffer.buffer)", "docstring": "Write the data encoding the GetAttributes response payload to a\nstream.\n\nArgs:\noutput_buffer (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def _check_format_string(self, node, format_arg):\n        \n        num_args = _count_supplied_tokens(node.args[format_arg + 1 :])\n        if not num_args:\n            \n            \n            return\n        format_string = node.args[format_arg].value\n        if not isinstance(format_string, str):\n            \n            \n            required_num_args = 0\n        else:\n            try:\n                if self._format_style == \"old\":\n                    keyword_args, required_num_args, _, _ = utils.parse_format_string(\n                        format_string\n                    )\n                    if keyword_args:\n                        \n                        \n                        return\n                elif self._format_style == \"new\":\n                    keyword_arguments, implicit_pos_args, explicit_pos_args = utils.parse_format_method_string(\n                        format_string\n                    )\n\n                    keyword_args_cnt = len(\n                        set(k for k, l in keyword_arguments if not isinstance(k, int))\n                    )\n                    required_num_args = (\n                        keyword_args_cnt + implicit_pos_args + explicit_pos_args\n                    )\n            except utils.UnsupportedFormatCharacter as ex:\n                char = format_string[ex.index]\n                self.add_message(\n                    \"logging-unsupported-format\",\n                    node=node,\n                    args=(char, ord(char), ex.index),\n                )\n                return\n            except utils.IncompleteFormatString:\n                self.add_message(\"logging-format-truncated\", node=node)\n                return\n        if num_args > required_num_args:\n            self.add_message(\"logging-too-many-args\", node=node)\n        elif num_args < required_num_args:\n            self.add_message(\"logging-too-few-args\", node=node)", "docstring": "Checks that format string tokens match the supplied arguments.\n\nArgs:\nnode (astroid.node_classes.NodeNG): AST node to be checked.\nformat_arg (int): Index of the format string in the node arguments.", "source": "juraj-google-style"}
{"code": "def CallHwclock(logger):\n  \n  command = ['/sbin/hwclock', '--hctosys']\n  try:\n    subprocess.check_call(command)\n  except subprocess.CalledProcessError:\n    logger.warning('Failed to sync system time with hardware clock.')\n  else:\n    logger.info('Synced system time with hardware clock.')", "docstring": "Sync clock using hwclock.\n\nArgs:\nlogger: logger object, used to write to SysLog and serial port.", "source": "juraj-google-style"}
{"code": "def decode_predictions(preds, top=5):\n    global CLASS_INDEX\n    if len(preds.shape) != 2 or preds.shape[1] != 1000:\n        raise ValueError(f'`decode_predictions` expects a batch of predictions (i.e. a 2D array of shape (samples, 1000)). Received array with shape: {preds.shape}')\n    if CLASS_INDEX is None:\n        fpath = file_utils.get_file('imagenet_class_index.json', CLASS_INDEX_PATH, cache_subdir='models', file_hash='c2c37ea517e94d9795004a39431a14cb')\n        with open(fpath) as f:\n            CLASS_INDEX = json.load(f)\n    results = []\n    preds = ops.convert_to_numpy(preds)\n    for pred in preds:\n        top_indices = pred.argsort()[-top:][::-1]\n        result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]\n        result.sort(key=lambda x: x[2], reverse=True)\n        results.append(result)\n    return results", "docstring": "Decodes the prediction of an ImageNet model.\n\nArgs:\npreds: NumPy array encoding a batch of predictions.\ntop: Integer, how many top-guesses to return. Defaults to `5`.\n\nReturns:\nA list of lists of top class prediction tuples\n`(class_name, class_description, score)`.\nOne list of tuples per sample in batch input.\n\nRaises:\nValueError: In case of invalid shape of the `pred` array\n(must be 2D).", "source": "github-repos"}
{"code": "def __init__(self, option_strings, dest, help, metavar, flag_instance):  \n    \n    del dest\n    self._flag_instance = flag_instance\n    super(_FlagAction, self).__init__(\n        option_strings=option_strings,\n        dest=argparse.SUPPRESS,\n        help=help,\n        metavar=metavar)", "docstring": "Initializes _FlagAction.\n\nArgs:\noption_strings: See argparse.Action.\ndest: Ignored. The flag is always defined with dest=argparse.SUPPRESS.\nhelp: See argparse.Action.\nmetavar: See argparse.Action.\nflag_instance: absl.flags.Flag, the absl flag instance.", "source": "juraj-google-style"}
{"code": "def compute_mask_offsets(shard_id2num_examples):\n    total_num_examples = sum(shard_id2num_examples)\n    mask_offsets = []\n    total_num_examples = 0\n    for num_examples_in_shard in shard_id2num_examples:\n        mask_offsets.append((total_num_examples % 100))\n        total_num_examples += num_examples_in_shard\n    return mask_offsets", "docstring": "Return the list of offsets associated with each shards.\n\nArgs:\nshard_id2num_examples: `list[int]`, mapping shard_id=>num_examples\n\nReturns:\nmask_offsets: `list[int]`, offset to skip for each of the shard", "source": "codesearchnet"}
{"code": "def dns_rr(self, ips):\n        \n        api_name = 'opendns-dns_rr'\n        fmt_url_path = u'dnsdb/name/a/{0}.json'\n        return self._multi_get(api_name, fmt_url_path, ips)", "docstring": "Get the domains related to input domains.\n\nArgs:\ndomains: an enumerable of strings as domains\nReturns:\nAn enumerable of resource records and features", "source": "juraj-google-style"}
{"code": "def disable_lower_using_switch_merge(graph_def):\n    output_graph_def = graph_pb2.GraphDef()\n    output_graph_def.CopyFrom(graph_def)\n\n    def disable_control_flow_lowering(node):\n        if node.op in _CONTROL_FLOW_OPS:\n            node.attr['_lower_using_switch_merge'].b = False\n    for node in output_graph_def.node:\n        disable_control_flow_lowering(node)\n    if output_graph_def.library:\n        for func in output_graph_def.library.function:\n            for node in func.node_def:\n                disable_control_flow_lowering(node)\n    return output_graph_def", "docstring": "Set '_lower_using_switch_merge' attributes to False.\n\nSets the attribute to False in the NodeDefs in the main graph and the NodeDefs\nin each function's graph.\n\nArgs:\ngraph_def: GraphDef proto.\n\nReturns:\nGraphDef", "source": "github-repos"}
{"code": "def merge_pot1_files(self, delete_source=True):\n    natom = len(self[0].input.structure)\n    max_pertcase = (3 * natom)\n    pot1_files = []\n    for task in self:\n        if (not isinstance(task, DfptTask)):\n            continue\n        paths = task.outdir.list_filepaths(wildcard='*_POT*')\n        for path in paths:\n            i = path.rindex('_POT')\n            pertcase = int(path[(i + 4):].replace('.nc', ''))\n            if (pertcase <= max_pertcase):\n                pot1_files.append(path)\n    if (not pot1_files):\n        return None\n    self.history.info(('Will call mrgdvdb to merge %s files:' % len(pot1_files)))\n    out_dvdb = self.outdir.path_in('out_DVDB')\n    if (len(pot1_files) == 1):\n        shutil.copy(pot1_files[0], out_dvdb)\n    else:\n        mrgdvdb = wrappers.Mrgdvdb(manager=self[0].manager, verbose=0)\n        mrgdvdb.merge(self.outdir.path, pot1_files, out_dvdb, delete_source=delete_source)\n    return out_dvdb", "docstring": "This method is called when all the q-points have been computed.\nIt runs `mrgdvdb` in sequential on the local machine to produce\nthe final DVDB file in the outdir of the `Work`.\n\nArgs:\ndelete_source: True if POT1 files should be removed after (successful) merge.\n\nReturns:\npath to the output DVDB file. None if not DFPT POT file is found.", "source": "codesearchnet"}
{"code": "def wavfile_to_examples(wav_file):\n  \n  from scipy.io import wavfile\n  sr, wav_data = wavfile.read(wav_file)\n  assert wav_data.dtype == np.int16, 'Bad sample type: %r' % wav_data.dtype\n  samples = wav_data / 32768.0  \n  return waveform_to_examples(samples, sr)", "docstring": "Convenience wrapper around waveform_to_examples() for a common WAV format.\n\nArgs:\nwav_file: String path to a file, or a file-like object. The file\nis assumed to contain WAV audio data with signed 16-bit PCM samples.\n\nReturns:\nSee waveform_to_examples.", "source": "juraj-google-style"}
{"code": "def execute_no_wait(self, cmd, walltime, envs={}):\n    current_env = copy.deepcopy(self._envs)\n    current_env.update(envs)\n    try:\n        proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.userhome, env=current_env, shell=True, preexec_fn=os.setpgrp)\n        pid = proc.pid\n    except Exception as e:\n        print('Caught exception : {0}'.format(e))\n        logger.warn('Execution of command [%s] failed due to \\n %s ', (cmd, e))\n    return (pid, proc)", "docstring": "Synchronously execute a commandline string on the shell.\n\nArgs:\n- cmd (string) : Commandline string to execute\n- walltime (int) : walltime in seconds, this is not really used now.\n\nReturns:\n\n- retcode : Return code from the execution, -1 on fail\n- stdout  : stdout string\n- stderr  : stderr string\n\nRaises:\nNone.", "source": "codesearchnet"}
{"code": "def get_name(self, tag):\n    name = tag.findChild('name').contents[0]\n    if self.include_parent_scopes:\n        parent_tag = tag.findParent()\n        if (parent_tag.get('kind') in ['class', 'struct', 'namespace']):\n            name = ((parent_tag.findChild('name').contents[0] + '::') + name)\n    return name", "docstring": "Extract and return a representative \"name\" from a tag.\n\nOverride as necessary. get_name's output can be controlled through\nkeyword arguments that are provided when initializing a\nTagProcessor. For instance, a member of a class or namespace can have\nits parent scope included in the name by passing\ninclude_parent_scopes=True to __init__().\n\nArgs:\ntag: A BeautifulSoup Tag that satisfies match_criterion.\n\nReturns:\nA string that would be appropriate to use as an entry name in a\nZeal database.", "source": "codesearchnet"}
{"code": "def __call__(self, input: EventSet) -> Dict[str, EventSet]:\n    assert isinstance(self.operator, BaseScalarOperator)\n    output_schema = self.output_schema('output')\n    dst_evset = EventSet(data={}, schema=output_schema)\n    for index_key, index_data in input.data.items():\n        dst_evset.set_index_value(index_key, IndexData([self._do_operation(feature, self.operator.value, input.schema.features[feature_idx].dtype) for feature_idx, feature in enumerate(index_data.features)], index_data.timestamps, schema=output_schema), normalize=False)\n    return {'output': dst_evset}", "docstring": "Applies the corresponding arithmetic operation between an EventSet\nand a scalar.\n\nArgs:\ninput: Event set to perform the operation to.\n\nReturns:\nResult of the operation.", "source": "github-repos"}
{"code": "async def validate(state, holdout_glob):\n    if (not glob.glob(holdout_glob)):\n        print('Glob \"{}\" didn\\'t match any files, skipping validation'.format(holdout_glob))\n    else:\n        (await run('python3', 'validate.py', holdout_glob, '--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'validate.flags')), '--work_dir={}'.format(fsdb.working_dir())))", "docstring": "Validate the trained model against holdout games.\n\nArgs:\nstate: the RL loop State instance.\nholdout_glob: a glob that matches holdout games.", "source": "codesearchnet"}
{"code": "def word_ngrams(s, n=3, token_fn=tokens.on_whitespace):\n    tokens = token_fn(s)\n    return __ngrams(tokens, n=min(len(tokens), n))", "docstring": "Word-level n-grams in a string\n\nBy default, whitespace is assumed to be a word boundary.\n\n>>> ng.word_ngrams('This is not a test!')\n[('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')]\n\nIf the sequence's length is less than or equal to n, the n-grams are\nsimply the sequence itself.\n\n>>> ng.word_ngrams('Test!')\n[('Test!')]\n\nArgs:\ns: a string\n\nReturns:\nlist: tuples of word-level n-grams", "source": "codesearchnet"}
{"code": "def supported_tasks(self, lang=None):\n    \n    if lang:\n      collection = self.get_collection(lang=lang)\n      return [x.id.split('.')[0] for x in collection.packages]\n    else:\n      return [x.name.split()[0] for x in self.collections() if Downloader.TASK_PREFIX in x.id]", "docstring": "Languages that are covered by a specific task.\n\nArgs:\nlang (string): Language code name.", "source": "juraj-google-style"}
{"code": "def compute_gradients(self, *args, **kwargs):\n    return self._opt.compute_gradients(*args, **kwargs)", "docstring": "Compute gradients of \"loss\" for the variables in \"var_list\".\n\nThis simply wraps the compute_gradients() from the real optimizer. The\ngradients will be aggregated in the apply_gradients() so that user can\nmodify the gradients like clipping with per replica global norm if needed.\nThe global norm with aggregated gradients can be bad as one replica's huge\ngradients can hurt the gradients from other replicas.\n\nArgs:\n*args: Arguments for compute_gradients().\n**kwargs: Keyword arguments for compute_gradients().\n\nReturns:\nA list of (gradient, variable) pairs.", "source": "github-repos"}
{"code": "def register_extension(self, group, name, extension):\n    if isinstance(extension, str):\n        (name, extension) = self.load_extension(extension)[0]\n    if (group not in self._registered_extensions):\n        self._registered_extensions[group] = []\n    self._registered_extensions[group].append((name, extension))", "docstring": "Register an extension.\n\nArgs:\ngroup (str): The type of the extension\nname (str): A name for the extension\nextension (str or class): If this is a string, then it will be\ninterpreted as a path to import and load.  Otherwise it\nwill be treated as the extension object itself.", "source": "codesearchnet"}
{"code": "def get_repo_config(self, repo='default'):\n    for repo_config in self.repositories:\n        if ((repo_config.name == repo) or (repo_config.url in RepositoryURL(repo))):\n            return repo_config\n    return None", "docstring": "Retrieve configuration for a given repository.\n\nArgs:\nrepo (str): a repository \"realm\" (alias) or its URL\n\nReturns:\nRepositoryConfig: if there is configuration for that repository\nNone: otherwise", "source": "codesearchnet"}
{"code": "def _resolve_task_logging(job_metadata, job_resources, task_descriptors):\n    if (not job_resources.logging):\n        return\n    for task_descriptor in task_descriptors:\n        logging_uri = provider_base.format_logging_uri(job_resources.logging.uri, job_metadata, task_descriptor.task_metadata)\n        logging_path = job_model.LoggingParam(logging_uri, job_resources.logging.file_provider)\n        if task_descriptor.task_resources:\n            task_descriptor.task_resources = task_descriptor.task_resources._replace(logging_path=logging_path)\n        else:\n            task_descriptor.task_resources = job_model.Resources(logging_path=logging_path)", "docstring": "Resolve the logging path from job and task properties.\n\nArgs:\njob_metadata: Job metadata, such as job-id, job-name, and user-id.\njob_resources: Resources specified such as ram, cpu, and logging path.\ntask_descriptors: Task metadata, parameters, and resources.\n\nResolve the logging path, which may have substitution parameters such as\njob-id, task-id, user-id, and job-name.", "source": "codesearchnet"}
{"code": "def poisson(data):\n    data = np.hstack(([0.0], np.array(data)))\n    cumm = np.cumsum(data)\n\n    def cost(s, t):\n        ' Cost function for poisson distribution with changing mean\\n\\n        Args:\\n            start (int): start index\\n            end (int): end index\\n        Returns:\\n            float: Cost, from start to end\\n        '\n        diff = (cumm[t] - cumm[s])\n        if (diff == 0):\n            return (((- 2) * diff) * ((- np.log((t - s))) - 1))\n        else:\n            return (((- 2) * diff) * ((np.log(diff) - np.log((t - s))) - 1))\n    return cost", "docstring": "Creates a segment cost function for a time series with a\npoisson distribution with changing mean\n\nArgs:\ndata (:obj:`list` of float): 1D time series data\nReturns:\nfunction: Function with signature\n(int, int) -> float\nwhere the first arg is the starting index, and the second\nis the last arg. Returns the cost of that segment", "source": "codesearchnet"}
{"code": "def running_instances(self, context, process_name):\n    handle = (id(context), process_name)\n    it = self.processes.get(handle, {}).itervalues()\n    entries = [x for x in it if (x[0].poll() is None)]\n    return entries", "docstring": "Get a list of running instances.\n\nArgs:\ncontext (`ResolvedContext`): Context the process is running in.\nprocess_name (str): Name of the process.\n\nReturns:\nList of (`subprocess.Popen`, start-time) 2-tuples, where start_time\nis the epoch time the process was added.", "source": "codesearchnet"}
{"code": "def structure_from_ncdata(ncdata, site_properties=None, cls=Structure):\n    (ncdata, closeit) = as_ncreader(ncdata)\n    lattice = ArrayWithUnit(ncdata.read_value('primitive_vectors'), 'bohr').to('ang')\n    red_coords = ncdata.read_value('reduced_atom_positions')\n    natom = len(red_coords)\n    znucl_type = ncdata.read_value('atomic_numbers')\n    type_atom = ncdata.read_value('atom_species')\n    species = (natom * [None])\n    for atom in range(natom):\n        type_idx = (type_atom[atom] - 1)\n        species[atom] = int(znucl_type[type_idx])\n    d = {}\n    if (site_properties is not None):\n        for prop in site_properties:\n            d[property] = ncdata.read_value(prop)\n    structure = cls(lattice, species, red_coords, site_properties=d)\n    try:\n        from abipy.core.structure import Structure as AbipyStructure\n        structure.__class__ = AbipyStructure\n    except ImportError:\n        pass\n    if closeit:\n        ncdata.close()\n    return structure", "docstring": "Reads and returns a pymatgen structure from a NetCDF file\ncontaining crystallographic data in the ETSF-IO format.\n\nArgs:\nncdata: filename or NetcdfReader instance.\nsite_properties: Dictionary with site properties.\ncls: The Structure class to instanciate.", "source": "codesearchnet"}
{"code": "def _TransposeTridiagonalMatrix(diags):\n    diag = diags[..., 1, :]\n    if diags.shape.is_fully_defined():\n        zeros = array_ops.zeros(list(diags.shape[:-2]) + [1], dtype=diags.dtype)\n        superdiag = array_ops.concat((diags[..., 2, 1:], zeros), axis=-1)\n        subdiag = array_ops.concat((zeros, diags[..., 0, :-1]), axis=-1)\n    else:\n        rank = array_ops.rank(diags)\n        zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32)\n        superdiag_pad = array_ops.concat((zeros, array_ops.constant([[0, 1]])), axis=0)\n        superdiag = array_ops.pad(diags[..., 2, 1:], superdiag_pad)\n        subdiag_pad = array_ops.concat((zeros, array_ops.constant([[1, 0]])), axis=0)\n        subdiag = array_ops.pad(diags[..., 0, :-1], subdiag_pad)\n    return array_ops_stack.stack([superdiag, diag, subdiag], axis=-2)", "docstring": "Transposes a tridiagonal matrix.\n\nArgs:\ndiags: the diagonals of the input matrix in the compact form (see\nlinalg_ops.tridiagonal_solve).\n\nReturns:\nDiagonals of the transposed matrix in the compact form.", "source": "github-repos"}
{"code": "def post_url(self, url, token='', json=None, data=None, headers=None):\n    if (token == ''):\n        token = self._user_token\n    if headers:\n        headers.update({'Authorization': 'Token {}'.format(token)})\n    else:\n        headers = {'Authorization': 'Token {}'.format(token)}\n    if json:\n        return requests.post(url, headers=headers, json=json, verify=False)\n    if data:\n        return requests.post(url, headers=headers, data=data, verify=False)\n    return requests.post(url, headers=headers, verify=False)", "docstring": "Returns a post resquest object taking in a url, user token, and\npossible json information.\n\nArguments:\nurl (str): The url to make post to\ntoken (str): The authentication token\njson (dict): json info to send\n\nReturns:\nobj: Post request object", "source": "codesearchnet"}
{"code": "def write_tree_newick(self, filename, hide_rooted_prefix=False):\n    if (not isinstance(filename, str)):\n        raise TypeError('filename must be a str')\n    treestr = self.newick()\n    if hide_rooted_prefix:\n        if treestr.startswith('[&R]'):\n            treestr = treestr[4:].strip()\n        else:\n            warn('Specified hide_rooted_prefix, but tree was not rooted')\n    if filename.lower().endswith('.gz'):\n        f = gopen(expanduser(filename), 'wb', 9)\n        f.write(treestr.encode())\n        f.close()\n    else:\n        f = open(expanduser(filename), 'w')\n        f.write(treestr)\n        f.close()", "docstring": "Write this ``Tree`` to a Newick file\n\nArgs:\n``filename`` (``str``): Path to desired output file (plain-text or gzipped)", "source": "codesearchnet"}
{"code": "def copy_workspace(self, uri, new_name):\n        \n\n        payload = {\n            'isPublic': True,\n            'newName': new_name\n        }\n\n        return self._api.request('post', '/api/documents/' + uri['did'] + '/workspaces/' + uri['wvm'] + '/copy', body=payload)", "docstring": "Copy the current workspace.\n\nArgs:\n- uri (dict): the uri of the workspace being copied. Needs to have a did and wid key.\n- new_name (str): the new name of the copied workspace.\n\nReturns:\n- requests.Response: Onshape response data", "source": "juraj-google-style"}
{"code": "def sub(self, other, axis=\"columns\", level=None, fill_value=None):\n        \n        return self._binary_op(\n            \"sub\", other, axis=axis, level=level, fill_value=fill_value\n        )", "docstring": "Subtract a DataFrame/Series/scalar from this DataFrame.\n\nArgs:\nother: The object to use to apply the subtraction to this.\naxis: The axis to apply the subtraction over.\nlevel: Mutlilevel index level to subtract over.\nfill_value: The value to fill NaNs with.\n\nReturns:\nA new DataFrame with the subtraciont applied.", "source": "juraj-google-style"}
{"code": "def create_primes(threshold):\n    \n    if threshold == 2:\n        return [2]\n\n    elif threshold < 2:\n        return []\n\n    numbers = list(range(3, threshold+1, 2))\n    root_of_threshold = threshold ** 0.5\n    half = int((threshold+1)/2-1)\n    idx = 0\n    counter = 3\n    while counter <= root_of_threshold:\n        if numbers[idx]:\n            idy = int((counter*counter-3)/2)\n            numbers[idy] = 0\n            while idy < half:\n                numbers[idy] = 0\n                idy += counter\n        idx += 1\n        counter = 2*idx+3\n    return [2] + [number for number in numbers if number]", "docstring": "Generate prime values using sieve of Eratosthenes method.\n\nArgs:\nthreshold (int):\nThe upper bound for the size of the prime values.\n\nReturns (List[int]):\nAll primes from 2 and up to ``threshold``.", "source": "juraj-google-style"}
{"code": "def static_nrows(self):\n    if self._row_splits is not None:\n        nrows_plus_one = tensor_shape.dimension_value(self._row_splits.shape[0])\n        if nrows_plus_one is not None:\n            return nrows_plus_one - 1\n    if self._row_lengths is not None:\n        nrows = tensor_shape.dimension_value(self._row_lengths.shape[0])\n        if nrows is not None:\n            return nrows\n    if self._nrows is not None:\n        return tensor_util.constant_value(self._nrows)\n    return None", "docstring": "The number of rows in this partition, if statically known.\n\n```python\nself.row_lengths().shape == [self.static_nrows]\nself.row_starts().shape == [self.static_nrows]\nself.row_limits().shape == [self.static_nrows]\nself.row_splits().shape == [self.static_nrows + 1]\n```\n\nReturns:\nThe number of rows in this partition as an `int` (if statically known);\nor `None` (otherwise).", "source": "github-repos"}
{"code": "def guess_task_type(name, task_defn):\n    \n    parts = name.split(':')\n    task_type = parts[-1]\n    if task_type == 'parent':\n        if is_action(task_defn):\n            task_type = 'action'\n        else:\n            task_type = 'decision'\n    if task_type not in get_valid_task_types():\n        raise CoTError(\n            \"Invalid task type for {}!\".format(name)\n        )\n    return task_type", "docstring": "Guess the task type of the task.\n\nArgs:\nname (str): the name of the task.\n\nReturns:\nstr: the task_type.\n\nRaises:\nCoTError: on invalid task_type.", "source": "juraj-google-style"}
{"code": "def predict_step(self, data):\n    data = data_adapter.expand_1d(data)\n    x, _, _ = data_adapter.unpack_x_y_sample_weight(data)\n    return self(x, training=False)", "docstring": "The logic for one inference step.\n\nThis method can be overridden to support custom inference logic.\nThis method is called by `Model.make_predict_function`.\n\nThis method should contain the mathematical logic for one step of inference.\nThis typically includes the forward pass.\n\nConfiguration details for *how* this logic is run (e.g. `tf.function` and\n`tf.distribute.Strategy` settings), should be left to\n`Model.make_predict_function`, which can also be overridden.\n\nArgs:\ndata: A nested structure of `Tensor`s.\n\nReturns:\nThe result of one inference step, typically the output of calling the\n`Model` on data.", "source": "github-repos"}
{"code": "def patch_toText(self, patches):\n    \n    text = []\n    for patch in patches:\n      text.append(str(patch))\n    return \"\".join(text)", "docstring": "Take a list of patches and return a textual representation.\n\nArgs:\npatches: Array of Patch objects.\n\nReturns:\nText representation of patches.", "source": "juraj-google-style"}
{"code": "def _operation_status_message(self):\n    msg = None\n    action = None\n    if (not google_v2_operations.is_done(self._op)):\n        last_event = google_v2_operations.get_last_event(self._op)\n        if last_event:\n            msg = last_event['description']\n            action_id = last_event.get('details', {}).get('actionId')\n            if action_id:\n                action = google_v2_operations.get_action_by_id(self._op, action_id)\n        else:\n            msg = 'Pending'\n    else:\n        failed_events = google_v2_operations.get_failed_events(self._op)\n        if failed_events:\n            failed_event = failed_events[(- 1)]\n            msg = failed_event.get('details', {}).get('stderr')\n            action_id = failed_event.get('details', {}).get('actionId')\n            if action_id:\n                action = google_v2_operations.get_action_by_id(self._op, action_id)\n        if (not msg):\n            error = google_v2_operations.get_error(self._op)\n            if error:\n                msg = error['message']\n            else:\n                msg = 'Success'\n    return (msg, action)", "docstring": "Returns the most relevant status string and failed action.\n\nThis string is meant for display only.\n\nReturns:\nA printable status string and name of failed action (if any).", "source": "codesearchnet"}
{"code": "def on_success(self, inv_plugin, emit_set_slot):\n        \n        self.dirty = set()\n        self.apply(inv_plugin)\n        for changed_slot in self.dirty:\n            emit_set_slot(changed_slot)", "docstring": "Called when the click was successful\nand should be applied to the inventory.\n\nArgs:\ninv_plugin (InventoryPlugin): inventory plugin instance\nemit_set_slot (func): function to signal a slot change,\nshould be InventoryPlugin().emit_set_slot", "source": "juraj-google-style"}
{"code": "def compute(self, t, yerr=1.123e-12, check_sorted=True, A=None, U=None, V=None):\n    t = np.atleast_1d(t)\n    if (check_sorted and np.any((np.diff(t) < 0.0))):\n        raise ValueError('the input coordinates must be sorted')\n    if (check_sorted and (len(t.shape) > 1)):\n        raise ValueError('dimension mismatch')\n    self._t = t\n    self._yerr = np.empty_like(self._t)\n    self._yerr[:] = yerr\n    (alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag) = self.kernel.coefficients\n    self._A = (np.empty(0) if (A is None) else A)\n    self._U = (np.empty((0, 0)) if (U is None) else U)\n    self._V = (np.empty((0, 0)) if (V is None) else V)\n    self.solver.compute(self.kernel.jitter, alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag, self._A, self._U, self._V, t, (self._yerr ** 2))\n    self.dirty = False", "docstring": "Compute the extended form of the covariance matrix and factorize\n\nArgs:\nx (array[n]): The independent coordinates of the data points.\nThis array must be _sorted_ in ascending order.\nyerr (Optional[float or array[n]]): The measurement uncertainties\nfor the data points at coordinates ``x``. These values will be\nadded in quadrature to the diagonal of the covariance matrix.\n(default: ``1.123e-12``)\ncheck_sorted (bool): If ``True``, ``x`` will be checked to make\nsure that it is properly sorted. If ``False``, the coordinates\nwill be assumed to be in the correct order.\n\nRaises:\nValueError: For un-sorted data or mismatched dimensions.\nsolver.LinAlgError: For non-positive definite matrices.", "source": "codesearchnet"}
{"code": "def _process_image(filename, coder):\n  \n  \n  with tf.gfile.FastGFile(filename, 'r') as f:\n    image_data = f.read()\n\n  \n  if _is_png(filename):\n    \n    print('Converting PNG to JPEG for %s' % filename)\n    image_data = coder.png_to_jpeg(image_data)\n  elif _is_cmyk(filename):\n    \n    print('Converting CMYK to RGB for %s' % filename)\n    image_data = coder.cmyk_to_rgb(image_data)\n\n  \n  image = coder.decode_jpeg(image_data)\n\n  \n  assert len(image.shape) == 3\n  height = image.shape[0]\n  width = image.shape[1]\n  assert image.shape[2] == 3\n\n  return image_data, height, width", "docstring": "Process a single image file.\n\nArgs:\nfilename: string, path to an image file e.g., '/path/to/example.JPG'.\ncoder: instance of ImageCoder to provide TensorFlow image coding utils.\nReturns:\nimage_buffer: string, JPEG encoding of RGB image.\nheight: integer, image height in pixels.\nwidth: integer, image width in pixels.", "source": "juraj-google-style"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return cls + token_ids_0 + sep\n    return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. An FNet sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def ReportStatus(self, request, global_params=None):\n    config = self.GetMethodConfig('ReportStatus')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Reports the status of dataflow WorkItems leased by a worker.\n\nArgs:\nrequest: (DataflowProjectsJobsWorkItemsReportStatusRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ReportWorkItemStatusResponse) The response message.", "source": "github-repos"}
{"code": "def get_data_for_name(cls, service_name):\n        \n        for service in cls._get_music_services_data().values():\n            if service_name == service[\"Name\"]:\n                return service\n        raise MusicServiceException(\n            \"Unknown music service: '%s'\" % service_name)", "docstring": "Get the data relating to a named music service.\n\nArgs:\nservice_name (str): The name of the music service for which data\nis required.\n\nReturns:\ndict: Data relating to the music service.\n\nRaises:\n`MusicServiceException`: if the music service cannot be found.", "source": "juraj-google-style"}
{"code": "def _get_scripts(self, host_metadata):\n        \n        deploy_scripts = host_metadata.get('deploy-scripts', [])\n        if deploy_scripts:\n            return deploy_scripts\n\n        ovirt_scripts = host_metadata.get('ovirt-scripts', [])\n        if ovirt_scripts:\n            warnings.warn(\n                'Deprecated entry \"ovirt-scripts\" will not be supported in '\n                'the future, replace with \"deploy-scripts\"'\n            )\n\n        return ovirt_scripts", "docstring": "Temporary method to retrieve the host scripts\n\nTODO:\nremove once the \"ovirt-scripts\" option gets deprecated\n\nArgs:\nhost_metadata(dict): host metadata to retrieve the scripts for\n\nReturns:\nlist: deploy scripts for the host, empty if none found", "source": "juraj-google-style"}
{"code": "def _preprocess_sqlite_index(asql_query, library, backend, connection):\n    new_query = None\n    if asql_query.strip().lower().startswith('index'):\n        logger.debug('_preprocess_index: create index query found.\\n    asql query: {}'.format(asql_query))\n        index = parse_index(asql_query)\n        partition = library.partition(index.source)\n        table = backend.install(connection, partition, materialize=True)\n        index_name = '{}_{}_ind'.format(partition.vid, '_'.join(index.columns))\n        new_query = 'CREATE INDEX IF NOT EXISTS {index} ON {table} ({columns});'.format(index=index_name, table=table, columns=','.join(index.columns))\n    logger.debug('_preprocess_index: preprocess finished.\\n    asql query: {}\\n    new query: {}'.format(asql_query, new_query))\n    return (new_query or asql_query)", "docstring": "Creates materialized view for each indexed partition found in the query.\n\nArgs:\nasql_query (str): asql query\nlibrary (ambry.Library):\nbackend (SQLiteBackend):\nconnection (apsw.Connection):\n\nReturns:\nstr: converted asql if it contains index query. If not, returns asql_query as is.", "source": "codesearchnet"}
{"code": "def __init__(self, clslist):\n        \n\n        if not hasattr(clslist, '__contains__'):\n            clslist = [clslist]\n\n        self.required = reduce(set.union, (cls.required for cls in clslist if issubclass(cls, AttributeMapper)))\n        self.optional = reduce(set.union, (cls.optional for cls in clslist if issubclass(cls, AttributeMapper)))\n        self.optional.symmetric_difference_update(self.required)", "docstring": "SCFilter(clslist)\n\nArgs:\nclslist (list): List of classes from which to build the filter\n\nReturns:\nnew SCFilter instance", "source": "juraj-google-style"}
{"code": "def API_Retry(job, key=None, retries=3, wait=31):\n    try:\n        data = job.execute()\n        return data if not key else data.get(key, [])\n    except HttpError as e:\n        if e.resp.status in [403, 409, 429, 500, 503]:\n            content = json.loads(e.content.decode())\n            if content['error']['code'] == 409:\n                return None\n            elif content.get('error', {}).get('status') == 'PERMISSION_DENIED' or content.get('error', {}).get('errors', [{}])[0].get('reason') == 'forbidden':\n                print('ERROR DETAILS:', e.content.decode())\n                raise\n            elif retries > 0:\n                print('API ERROR:', str(e))\n                print('API RETRY / WAIT:', retries, wait)\n                sleep(wait)\n                return API_Retry(job, key, retries - 1, wait * 2)\n            else:\n                print('ERROR DETAILS:', e.content.decode())\n                raise\n        else:\n            raise\n    except RETRIABLE_EXCEPTIONS as e:\n        if retries > 0:\n            print('HTTP ERROR:', str(e))\n            print('HTTP RETRY / WAIT:', retries, wait)\n            sleep(wait)\n            return API_Retry(job, key, retries - 1, wait * 2)\n        else:\n            raise\n    except SSLError as e:\n        if retries > 0 and 'timed out' in e.message:\n            print('SSL ERROR:', str(e))\n            print('SSL RETRY / WAIT:', retries, wait)\n            sleep(wait)\n            return API_Retry(job, key, retries - 1, wait * 2)\n        else:\n            raise", "docstring": "API retry that includes back off and some common error handling.\n\nCAUTION:  Total timeout cannot exceed 5 minutes or the SSL token expires for\nall future calls.\n\nFor critical but recoverable errors, the back off executes [retry] times.\nEach time the [wait] is doubled.\nBy default retries are: 0:31 + 1:02 + 2:04 = 3:37 ( minutes )\nThe recommended minimum wait is 60 seconds for most APIs.\n\n* Errors retried: 429, 500, 503\n* Errors ignored: 409 - already exists ( triggered by create only and also\nreturns None )\n* Errors raised: ALL OTHERS\n\nArgs:\n* job: (object) API call path, everything before the execute() statement to retry.\n* key: (string) Optional key from json reponse to return.\n* retries: (int) Number of times to try the job.\n* wait: (seconds) Time to wait in seconds between retries.\n\nReturns:\n* JSON result of job or key value from JSON result if job succeed.\n* None if object already exists.\n\nRaises:\n* Any exceptions not listed in comments above.", "source": "github-repos"}
{"code": "def reload_class_methods(self, class_, verbose=True):\n    \n    if verbose:\n        print('[util_class] Reloading self=%r as class_=%r' % (self, class_))\n    self.__class__ = class_\n    for key in dir(class_):\n        \n        func = getattr(class_, key)\n        if isinstance(func, types.MethodType):\n            \n            inject_func_as_method(self, func, class_=class_,\n                                  allow_override=True,\n                                  verbose=verbose)", "docstring": "rebinds all class methods\n\nArgs:\nself (object): class instance to reload\nclass_ (type): type to reload as\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_class import *  # NOQA\n>>> self = '?'\n>>> class_ = '?'\n>>> result = reload_class_methods(self, class_)\n>>> print(result)", "source": "juraj-google-style"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        local_stream = BytearrayStream()\n\n        if self._wrapping_method:\n            self._wrapping_method.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        else:\n            raise ValueError(\n                \"Invalid struct missing the wrapping method attribute.\"\n            )\n\n        if self._encryption_key_information:\n            self._encryption_key_information.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self._mac_signature_key_information:\n            self._mac_signature_key_information.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self._attribute_names:\n            for unique_identifier in self._attribute_names:\n                unique_identifier.write(\n                    local_stream,\n                    kmip_version=kmip_version\n                )\n        if self._encoding_option:\n            self._encoding_option.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        self.length = local_stream.length()\n        super(KeyWrappingSpecification, self).write(\n            output_stream,\n            kmip_version=kmip_version\n        )\n        output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the KeyWrappingSpecification struct to a\nstream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def to_json_string(self, include_defaults):\n    json_like = self._to_json_like(include_defaults=include_defaults)\n    json_like['id'] = self.id\n    return serialize_json(json_like)", "docstring": "Returns a JSON string encoding the attributes of this object.\n\nReferences to other objects are serialized as references\n(just the object ID and type info), so the deserializer\nwill need to separately have the full attributes of those\nother objects.\n\nThere's no corresponding ``from_json_string()`` because to\ndeserialize an object is normally done in the context of a\nDocument (since the Document can resolve references).\n\nFor most purposes it's best to serialize and deserialize\nentire documents.\n\nArgs:\ninclude_defaults (bool) : whether to include attributes\nthat haven't been changed from the default", "source": "codesearchnet"}
{"code": "def locator_to_latlong(locator):\n    locator = locator.upper()\n    if ((len(locator) == 5) or (len(locator) < 4)):\n        raise ValueError\n    if ((ord(locator[0]) > ord('R')) or (ord(locator[0]) < ord('A'))):\n        raise ValueError\n    if ((ord(locator[1]) > ord('R')) or (ord(locator[1]) < ord('A'))):\n        raise ValueError\n    if ((ord(locator[2]) > ord('9')) or (ord(locator[2]) < ord('0'))):\n        raise ValueError\n    if ((ord(locator[3]) > ord('9')) or (ord(locator[3]) < ord('0'))):\n        raise ValueError\n    if (len(locator) == 6):\n        if ((ord(locator[4]) > ord('X')) or (ord(locator[4]) < ord('A'))):\n            raise ValueError\n        if ((ord(locator[5]) > ord('X')) or (ord(locator[5]) < ord('A'))):\n            raise ValueError\n    longitude = (((ord(locator[0]) - ord('A')) * 20) - 180)\n    latitude = (((ord(locator[1]) - ord('A')) * 10) - 90)\n    longitude += ((ord(locator[2]) - ord('0')) * 2)\n    latitude += (ord(locator[3]) - ord('0'))\n    if (len(locator) == 6):\n        longitude += ((ord(locator[4]) - ord('A')) * (2 / 24))\n        latitude += ((ord(locator[5]) - ord('A')) * (1 / 24))\n        longitude += (1 / 24)\n        latitude += (0.5 / 24)\n    else:\n        longitude += 1\n        latitude += 0.5\n    return (latitude, longitude)", "docstring": "converts Maidenhead locator in the corresponding WGS84 coordinates\n\nArgs:\nlocator (string): Locator, either 4 or 6 characters\n\nReturns:\ntuple (float, float): Latitude, Longitude\n\nRaises:\nValueError: When called with wrong or invalid input arg\nTypeError: When arg is not a string\n\nExample:\nThe following example converts a Maidenhead locator into Latitude and Longitude\n\n>>> from pyhamtools.locator import locator_to_latlong\n>>> latitude, longitude = locator_to_latlong(\"JN48QM\")\n>>> print latitude, longitude\n48.5208333333 9.375\n\nNote:\nLatitude (negative = West, positive = East)\nLongitude (negative = South, positive = North)", "source": "codesearchnet"}
{"code": "def create_game(\n      self,\n      map_name,\n      bot_difficulty=sc_pb.VeryEasy,\n      bot_race=sc_common.Random,\n      bot_first=False):\n    \n    self._controller.ping()\n\n    \n    map_inst = maps.get(map_name)\n    map_data = map_inst.data(self._run_config)\n    if map_name not in self._saved_maps:\n      self._controller.save_map(map_inst.path, map_data)\n      self._saved_maps.add(map_name)\n\n    create = sc_pb.RequestCreateGame(\n        local_map=sc_pb.LocalMap(map_path=map_inst.path, map_data=map_data),\n        disable_fog=False)\n\n    \n    if not bot_first:\n      create.player_setup.add(type=sc_pb.Participant)\n\n    create.player_setup.add(\n        type=sc_pb.Computer, race=bot_race, difficulty=bot_difficulty)\n\n    if bot_first:\n      create.player_setup.add(type=sc_pb.Participant)\n\n    \n    self._controller.create_game(create)", "docstring": "Create a game, one remote agent vs the specified bot.\n\nArgs:\nmap_name: The map to use.\nbot_difficulty: The difficulty of the bot to play against.\nbot_race: The race for the bot.\nbot_first: Whether the bot should be player 1 (else is player 2).", "source": "juraj-google-style"}
{"code": "def __init__(self, item_id, desc,  \n                 resources, uri, metadata_dict, music_service=None):\n        \n        _LOG.debug('%s.__init__ with item_id=%s, desc=%s, resources=%s, '\n                   'uri=%s, metadata_dict=..., music_service=%s',\n                   self.__class__.__name__, item_id, desc, resources, uri,\n                   music_service)\n        super(MusicServiceItem, self).__init__(metadata_dict)\n        self.item_id = item_id\n        self.desc = desc\n        self.resources = resources\n        self.uri = uri\n        self.music_service = music_service", "docstring": "Init music service item\n\nArgs:\nitem_id (str): This is the Didl compatible id NOT the music item id\ndesc (str): A DIDL descriptor, default ``'RINCON_AssociatedZPUDN'\nresources (list): List of DidlResource\nuri (str): The uri for the location of the item\nmetdata_dict (dict): Mapping of metadata\nmusic_service (MusicService): The MusicService instance the item\noriginates from", "source": "juraj-google-style"}
{"code": "def get_model(servoid):\n    \n    data = []\n    data.append(0x09)\n    data.append(servoid)\n    data.append(EEP_READ_REQ)\n    data.append(MODEL_NO1_EEP)\n    data.append(BYTE1)\n    send_data(data)\n    rxdata = []\n    try:\n        rxdata = SERPORT.read(12)\n        return ord(rxdata[9])&0xFF\n    except:\n        raise HerkulexError(\"could not communicate with motors\")", "docstring": "Get the servo model\n\nThis function gets the model of the herkules servo, provided its id\n\nArgs:\nservoid(int): the id of the servo\n\nReturns:\nint:  an integer corresponding to the model number\n0x06 for DRS-602\n0x04 for DRS-402\n0x02 for DRS-202", "source": "juraj-google-style"}
{"code": "def astype(self, col_dtypes, **kwargs):\n    dtype_indices = {}\n    columns = col_dtypes.keys()\n    numeric_indices = list(self.columns.get_indexer_for(columns))\n    new_dtypes = self.dtypes.copy()\n    for (i, column) in enumerate(columns):\n        dtype = col_dtypes[column]\n        if ((not isinstance(dtype, type(self.dtypes[column]))) or (dtype != self.dtypes[column])):\n            if (dtype in dtype_indices.keys()):\n                dtype_indices[dtype].append(numeric_indices[i])\n            else:\n                dtype_indices[dtype] = [numeric_indices[i]]\n            try:\n                new_dtype = np.dtype(dtype)\n            except TypeError:\n                new_dtype = dtype\n            if ((dtype != np.int32) and (new_dtype == np.int32)):\n                new_dtype = np.dtype('int64')\n            elif ((dtype != np.float32) and (new_dtype == np.float32)):\n                new_dtype = np.dtype('float64')\n            new_dtypes[column] = new_dtype\n    new_data = self.data\n    for dtype in dtype_indices.keys():\n\n        def astype(df, internal_indices=[]):\n            block_dtypes = {}\n            for ind in internal_indices:\n                block_dtypes[df.columns[ind]] = dtype\n            return df.astype(block_dtypes)\n        new_data = new_data.apply_func_to_select_indices(0, astype, dtype_indices[dtype], keep_remaining=True)\n    return self.__constructor__(new_data, self.index, self.columns, new_dtypes)", "docstring": "Converts columns dtypes to given dtypes.\n\nArgs:\ncol_dtypes: Dictionary of {col: dtype,...} where col is the column\nname and dtype is a numpy dtype.\n\nReturns:\nDataFrame with updated dtypes.", "source": "codesearchnet"}
{"code": "def __init__(self, mount_path=None, path_specification=None):\n    \n    super(MountPoint, self).__init__()\n    self.mount_path = mount_path\n    self.path_specification = path_specification", "docstring": "Initializes a mount point.\n\nArgs:\nmount_path (Optional[str]): path where the path specification is mounted,\nsuch as \"/mnt/image\" or \"C:\\\\\".\npath_specification (Optional[dfvfs.PathSpec]): path specification.", "source": "juraj-google-style"}
{"code": "def ExpandWindowsEnvironmentVariables(data_string, knowledge_base):\n    win_environ_regex = re.compile('%([^%]+?)%')\n    components = []\n    offset = 0\n    for match in win_environ_regex.finditer(data_string):\n        components.append(data_string[offset:match.start()])\n        kb_value = getattr(knowledge_base, ('environ_%s' % match.group(1).lower()), None)\n        if (isinstance(kb_value, string_types) and kb_value):\n            components.append(kb_value)\n        else:\n            components.append(('%%%s%%' % match.group(1)))\n        offset = match.end()\n    components.append(data_string[offset:])\n    return ''.join(components)", "docstring": "r\"\"\"Take a string and expand any windows environment variables.\n\nArgs:\ndata_string: A string, e.g. \"%SystemRoot%\\\\LogFiles\"\nknowledge_base: A knowledgebase object.\n\nReturns:\nA string with available environment variables expanded. If we can't expand\nwe just return the string with the original variables.", "source": "codesearchnet"}
{"code": "def char_style(self, style):\n        \n        styleset = {'normal': 0,\n                    'outline': 1,\n                    'shadow': 2,\n                    'outlineshadow': 3\n                    }\n        if style in styleset:\n            self.send(chr(27) + 'q' + chr(styleset[style]))\n        else:\n            raise RuntimeError('Invalid character style in function charStyle')", "docstring": "Sets the character style.\n\nArgs:\nstyle: The desired character style. Choose from 'normal', 'outline', 'shadow', and 'outlineshadow'\nReturns:\nNone\nRaises:\nRuntimeError: Invalid character style", "source": "juraj-google-style"}
{"code": "def as_numpy(dataset, graph=None):\n    nested_ds = dataset\n    del dataset\n    flat_ds = tf.nest.flatten(nested_ds)\n    flat_np = []\n    for ds_el in flat_ds:\n        types = [type(el) for el in flat_ds]\n        types = tf.nest.pack_sequence_as(nested_ds, types)\n        if (not (isinstance(ds_el, tf.Tensor) or tf_compat.is_dataset(ds_el))):\n            raise ValueError(('Arguments to as_numpy must be tf.Tensors or tf.data.Datasets. Got: %s' % types))\n    if tf.executing_eagerly():\n        for ds_el in flat_ds:\n            if isinstance(ds_el, tf.Tensor):\n                np_el = ds_el.numpy()\n            elif tf_compat.is_dataset(ds_el):\n                np_el = _eager_dataset_iterator(ds_el)\n            else:\n                assert False\n            flat_np.append(np_el)\n    else:\n        with utils.maybe_with_graph(graph, create_if_none=False):\n            ds_iters = [tf.compat.v1.data.make_one_shot_iterator(ds_el).get_next() for ds_el in flat_ds if tf_compat.is_dataset(ds_el)]\n        ds_iters = [_graph_dataset_iterator(ds_iter, graph) for ds_iter in ds_iters]\n        with utils.nogpu_session(graph) as sess:\n            np_arrays = sess.run([tensor for tensor in flat_ds if (not tf_compat.is_dataset(tensor))])\n        iter_ds = iter(ds_iters)\n        iter_array = iter(np_arrays)\n        flat_np = [(next(iter_ds) if tf_compat.is_dataset(ds_el) else next(iter_array)) for ds_el in flat_ds]\n    return tf.nest.pack_sequence_as(nested_ds, flat_np)", "docstring": "Converts a `tf.data.Dataset` to an iterable of NumPy arrays.\n\n`as_numpy` converts a possibly nested structure of `tf.data.Dataset`s\nand `tf.Tensor`s to iterables of NumPy arrays and NumPy arrays, respectively.\n\nArgs:\ndataset: a possibly nested structure of `tf.data.Dataset`s and/or\n`tf.Tensor`s.\ngraph: `tf.Graph`, optional, explicitly set the graph to use.\n\nReturns:\nA structure matching `dataset` where `tf.data.Dataset`s are converted to\ngenerators of NumPy arrays and `tf.Tensor`s are converted to NumPy arrays.", "source": "codesearchnet"}
{"code": "def _delete_from_hdx(self, object_type, id_field_name):\n    if (id_field_name not in self.data):\n        raise HDXError(('No %s field (mandatory) in %s!' % (id_field_name, object_type)))\n    self._save_to_hdx('delete', id_field_name)", "docstring": "Helper method to deletes a resource from HDX\n\nArgs:\nobject_type (str): Description of HDX object type (for messages)\nid_field_name (str): Name of field containing HDX object identifier\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def wait_for(self, timeout=10000, interval=1000, asserter=(lambda x: x)):\n    if (not callable(asserter)):\n        raise TypeError('Asserter must be callable.')\n\n    @retry(retry_on_exception=(lambda ex: isinstance(ex, WebDriverException)), stop_max_delay=timeout, wait_fixed=interval)\n    def _wait_for(driver):\n        asserter(driver)\n        return driver\n    return _wait_for(self)", "docstring": "Wait for driver till satisfy the given condition\n\nSupport:\nAndroid iOS Web(WebView)\n\nArgs:\ntimeout(int): How long we should be retrying stuff.\ninterval(int): How long between retries.\nasserter(callable): The asserter func to determine the result.\n\nReturns:\nReturn the driver.\n\nRaises:\nWebDriverException.", "source": "codesearchnet"}
{"code": "def setattr(self, name, val):\n    nodes = self._do_query(multiple=False)\n    try:\n        return self.poco.agent.hierarchy.setAttr(nodes, name, val)\n    except UnableToSetAttributeException as e:\n        raise InvalidOperationException('\"{}\" of \"{}\"'.format(str(e), self))", "docstring": "Change the attribute value of the UI element. Not all attributes can be casted to text. If changing the\nimmutable attributes or attributes which do not exist, the InvalidOperationException exception is raised.\n\nArgs:\nname: attribute name\nval: new attribute value to cast\n\nRaises:\nInvalidOperationException: when it fails to set the attribute on UI element", "source": "codesearchnet"}
{"code": "def get_accounts(cls, soco=None):\n    root = XML.fromstring(cls._get_account_xml(soco))\n    xml_accounts = root.findall('.\n    result = {}\n    for xml_account in xml_accounts:\n        serial_number = xml_account.get('SerialNum')\n        is_deleted = (True if (xml_account.get('Deleted') == '1') else False)\n        if cls._all_accounts.get(serial_number):\n            if is_deleted:\n                del cls._all_accounts[serial_number]\n                continue\n            else:\n                account = cls._all_accounts.get(serial_number)\n        else:\n            if is_deleted:\n                continue\n            account = Account()\n            account.serial_number = serial_number\n            cls._all_accounts[serial_number] = account\n        account.service_type = xml_account.get('Type')\n        account.deleted = is_deleted\n        account.username = xml_account.findtext('UN')\n        account.metadata = xml_account.findtext('MD')\n        account.nickname = xml_account.findtext('NN')\n        account.oa_device_id = xml_account.findtext('OADevID')\n        account.key = xml_account.findtext('Key')\n        result[serial_number] = account\n        tunein = Account()\n        tunein.service_type = '65031'\n        tunein.deleted = False\n        tunein.username = ''\n        tunein.metadata = ''\n        tunein.nickname = ''\n        tunein.oa_device_id = ''\n        tunein.key = ''\n        tunein.serial_number = '0'\n        result['0'] = tunein\n    return result", "docstring": "Get all accounts known to the Sonos system.\n\nArgs:\nsoco (`SoCo`, optional): a `SoCo` instance to query. If `None`, a\nrandom instance is used. Defaults to `None`.\n\nReturns:\ndict: A dict containing account instances. Each key is the\naccount's serial number, and each value is the related Account\ninstance. Accounts which have been marked as deleted are excluded.\n\nNote:\nAny existing Account instance will have its attributes updated\nto those currently stored on the Sonos system.", "source": "codesearchnet"}
{"code": "def number_occurences(self, proc):\n    return len([True for row in self.data if (proc in row[self.command_name])])", "docstring": "Returns the number of occurencies of commands that contain given text\n\nReturns:\nint: The number of occurencies of commands with given text\n\n.. note::\n'proc' can match anywhere in the command path, name or arguments.", "source": "codesearchnet"}
{"code": "def download_decompress(url: str, download_path: [Path, str], extract_paths=None):\n    \n    file_name = Path(urlparse(url).path).name\n    download_path = Path(download_path)\n\n    if extract_paths is None:\n        extract_paths = [download_path]\n    elif isinstance(extract_paths, list):\n        extract_paths = [Path(path) for path in extract_paths]\n    else:\n        extract_paths = [Path(extract_paths)]\n\n    cache_dir = os.getenv('DP_CACHE_DIR')\n    extracted = False\n    if cache_dir:\n        cache_dir = Path(cache_dir)\n        url_hash = md5(url.encode('utf8')).hexdigest()[:15]\n        arch_file_path = cache_dir / url_hash\n        extracted_path = cache_dir / (url_hash + '_extracted')\n        extracted = extracted_path.exists()\n        if not extracted and not arch_file_path.exists():\n            simple_download(url, arch_file_path)\n    else:\n        arch_file_path = download_path / file_name\n        simple_download(url, arch_file_path)\n        extracted_path = extract_paths.pop()\n\n    if not extracted:\n        log.info('Extracting {} archive into {}'.format(arch_file_path, extracted_path))\n        extracted_path.mkdir(parents=True, exist_ok=True)\n\n        if file_name.endswith('.tar.gz'):\n            untar(arch_file_path, extracted_path)\n        elif file_name.endswith('.gz'):\n            ungzip(arch_file_path, extracted_path / Path(file_name).with_suffix('').name)\n        elif file_name.endswith('.zip'):\n            with zipfile.ZipFile(arch_file_path, 'r') as zip_ref:\n                zip_ref.extractall(extracted_path)\n        else:\n            raise RuntimeError(f'Trying to extract an unknown type of archive {file_name}')\n\n        if not cache_dir:\n            arch_file_path.unlink()\n\n    for extract_path in extract_paths:\n        for src in extracted_path.iterdir():\n            dest = extract_path / src.name\n            if src.is_dir():\n                copytree(src, dest)\n            else:\n                extract_path.mkdir(parents=True, exist_ok=True)\n                shutil.copy(str(src), str(dest))", "docstring": "Download and extract .tar.gz or .gz file to one or several target locations.\nThe archive is deleted if extraction was successful.\n\nArgs:\nurl: URL for file downloading\ndownload_path: path to the directory where downloaded file will be stored\nuntil the end of extraction\nextract_paths: path or list of paths where contents of archive will be extracted", "source": "juraj-google-style"}
{"code": "def __init__(self, latitude, longitude, altitude=None, name=None,\n                 description=None):\n        \n        super(Placemark, self).__init__(latitude, longitude, altitude, name)\n\n        if altitude:\n            self.altitude = float(altitude)\n        self.description = description", "docstring": "Initialise a new ``Placemark`` object.\n\nArgs:\nlatitude (float): Placemarks's latitude\nlongitude (float): Placemark's longitude\naltitude (float): Placemark's altitude\nname (str): Name for placemark\ndescription (str): Placemark's description", "source": "juraj-google-style"}
{"code": "def _dataset_merge_filestore_newresource(self, new_resource, ignore_fields, filestore_resources):\n        \n        \n        new_resource.check_required_fields(ignore_fields=ignore_fields)\n        self.resources.append(new_resource)\n        if new_resource.get_file_to_upload():\n            filestore_resources.append(new_resource)\n            new_resource['url'] = Dataset.temporary_url", "docstring": "Helper method to add new resource from dataset including filestore.\n\nArgs:\nnew_resource (hdx.data.Resource): New resource from dataset\nignore_fields (List[str]): List of fields to ignore when checking resource\nfilestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to)\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def RemoveClass(self, class_name):\n    \n    if class_name not in self._class_mapping:\n      raise problems.NonexistentMapping(class_name)\n    del self._class_mapping[class_name]", "docstring": "Removes an entry from the list of known classes.\n\nArgs:\nclass_name: A string with the class name that is to be removed.\nRaises:\nNonexistentMapping if there is no class with the specified class_name.", "source": "juraj-google-style"}
{"code": "def __init__(self, value=enums.CertificateType.X_509):\n        \n        super(CertificateType, self).__init__(\n            enums.CertificateType, value, Tags.CERTIFICATE_TYPE)", "docstring": "Construct a CertificateType object.\n\nArgs:\nvalue (CertificateType): A CertificateType enumeration\nvalue, (e.g., CertificateType.PGP). Optional, defaults to\nCertificateType.X_509.", "source": "juraj-google-style"}
{"code": "def RunValidation(feed, options, problems):\n    util.CheckVersion(problems, options.latest_version)\n    if options.extension:\n        try:\n            __import__(options.extension)\n            extension_module = sys.modules[options.extension]\n        except ImportError:\n            print(('Could not import extension %s! Please ensure it is a proper Python module.' % options.extension))\n            exit(2)\n    else:\n        extension_module = transitfeed\n    gtfs_factory = extension_module.GetGtfsFactory()\n    print(('validating %s' % feed))\n    print(('FeedValidator extension used: %s' % options.extension))\n    loader = gtfs_factory.Loader(feed, problems=problems, extra_validation=False, memory_db=options.memory_db, check_duplicate_trips=options.check_duplicate_trips, gtfs_factory=gtfs_factory)\n    schedule = loader.Load()\n    schedule.Validate(service_gap_interval=options.service_gap_interval, validate_children=False)\n    if (feed == 'IWantMyvalidation-crash.txt'):\n        raise Exception('For testing the feed validator crash handler.')\n    accumulator = problems.GetAccumulator()\n    if accumulator.HasIssues():\n        print(('ERROR: %s found' % accumulator.FormatCount()))\n        return (schedule, 1)\n    else:\n        print('feed validated successfully')\n        return (schedule, 0)", "docstring": "Validate feed, returning the loaded Schedule and exit code.\n\nArgs:\nfeed: GTFS file, either path of the file as a string or a file object\noptions: options object returned by optparse\nproblems: transitfeed.ProblemReporter instance\n\nReturns:\na transitfeed.Schedule object, exit code and plain text string of other\nproblems\nExit code is 2 if an extension is provided but can't be loaded, 1 if\nproblems are found and 0 if the Schedule is problem free.\nplain text string is '' if no other problems are found.", "source": "codesearchnet"}
{"code": "def __init__(self, x, name):\n    self.x = x\n    self.name = name", "docstring": "Construct DivideDelegateWithName.\n\nArgs:\nx: Tensor to use as left operand in operator overloads\nname: The name that is preferred for the op created.", "source": "github-repos"}
{"code": "def splitpath(self, path):\n    path = self.normcase(path)\n    sep = self._path_separator(path)\n    path_components = path.split(sep)\n    if (not path_components):\n        return ('', '')\n    starts_with_drive = self._starts_with_drive_letter(path)\n    basename = path_components.pop()\n    colon = self._matching_string(path, ':')\n    if (not path_components):\n        if starts_with_drive:\n            components = basename.split(colon)\n            return ((components[0] + colon), components[1])\n        return ('', basename)\n    for component in path_components:\n        if component:\n            while (not path_components[(- 1)]):\n                path_components.pop()\n            if starts_with_drive:\n                if (not path_components):\n                    components = basename.split(colon)\n                    return ((components[0] + colon), components[1])\n                if ((len(path_components) == 1) and path_components[0].endswith(colon)):\n                    return ((path_components[0] + sep), basename)\n            return (sep.join(path_components), basename)\n    return (sep, basename)", "docstring": "Mimic os.path.splitpath using the specified path_separator.\n\nMimics os.path.splitpath using the path_separator that was specified\nfor this FakeFilesystem.\n\nArgs:\npath:  (str) The path to split.\n\nReturns:\n(str) A duple (pathname, basename) for which pathname does not\nend with a slash, and basename does not contain a slash.", "source": "codesearchnet"}
{"code": "def today(boo):\n  \n  tod = datetime.strptime(datetime.today().date().isoformat().replace('-', ' '), '%Y %m %d')\n  if boo:\n    return int(str(tod).replace('-', '')[:8])\n  else:\n    return str(tod)[:10]", "docstring": "Return today's date as either a String or a Number, as specified by the User.\n\nArgs:\nboo: if true, function returns Number (20151230); if false, returns String (\"2015-12-30\")\nReturns:\neither a Number or a string, dependent upon the user's input", "source": "juraj-google-style"}
{"code": "def find_all(self, kw: YangIdentifier, pref: YangIdentifier=None) -> List['Statement']:\n    return [c for c in self.substatements if ((c.keyword == kw) and (c.prefix == pref))]", "docstring": "Return the list all substatements with the given keyword and prefix.\n\nArgs:\nkw: Statement keyword (local part for extensions).\npref: Keyword prefix (``None`` for built-in statements).", "source": "codesearchnet"}
{"code": "def init(module_paths, work_db, config):\n    operator_names = cosmic_ray.plugins.operator_names()\n    work_db.set_config(config=config)\n    work_db.clear()\n    for module_path in module_paths:\n        module_ast = get_ast(module_path, python_version=config.python_version)\n        for op_name in operator_names:\n            operator = get_operator(op_name)(config.python_version)\n            visitor = WorkDBInitVisitor(module_path, op_name, work_db, operator)\n            visitor.walk(module_ast)\n    apply_interceptors(work_db, config.sub('interceptors').get('enabled', ()))", "docstring": "Clear and initialize a work-db with work items.\n\nAny existing data in the work-db will be cleared and replaced with entirely\nnew work orders. In particular, this means that any results in the db are\nremoved.\n\nArgs:\nmodule_paths: iterable of pathlib.Paths of modules to mutate.\nwork_db: A `WorkDB` instance into which the work orders will be saved.\nconfig: The configuration for the new session.", "source": "codesearchnet"}
{"code": "def __init__(self, usaf):\n        \n        filename = env.WEATHER_DATA_PATH + '/' + usaf + 'TYA.csv'\n        self.csvfile = None\n        try:\n            self.csvfile = open(filename)\n        except IOError:\n            logger.info(\"%s not found\", filename)\n            download(_tmy_url(usaf), filename)\n            self.csvfile = open(filename)\n        logging.debug('opened %s', self.csvfile.name)\n        header = self.csvfile.readline().split(',')\n        self.tmy_data = csv.DictReader(self.csvfile)\n        self.latitude = float(header[4])\n        self.longitude = float(header[5])\n        self.tz = float(header[3])", "docstring": "initialize.\n\nArgs:\nusaf (str)\n\nReturns:\n(object)", "source": "juraj-google-style"}
{"code": "def sample_measurements(\n            self,\n            indices: List[int],\n            repetitions: int=1) -> List[List[bool]]:\n        \n        \n        reversed_indices = [self._num_qubits - 1 - index for index in indices]\n        return sim.sample_state_vector(self._current_state(), reversed_indices,\n                                       repetitions)", "docstring": "Samples from measurements in the computational basis.\n\nNote that this does not collapse the wave function.\n\nArgs:\nindices: Which qubits are measured.\n\nReturns:\nMeasurement results with True corresponding to the |1> state.\nThe outer list is for repetitions, and the inner corresponds to\nmeasurements ordered by the input indices.\n\nRaises:\nValueError if repetitions is less than one.", "source": "juraj-google-style"}
{"code": "def update_from_yaml(self, path=join('config', 'hdx_dataset_static.yml')):\n    super(Dataset, self).update_from_yaml(path)\n    self.separate_resources()", "docstring": "Update dataset metadata with static metadata from YAML file\n\nArgs:\npath (str): Path to YAML dataset metadata. Defaults to config/hdx_dataset_static.yml.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _get_logged_ops(graph, run_meta=None, add_trace=True, add_trainable_var=True):\n    if run_meta:\n        graph = _fill_missing_graph_shape(graph, run_meta)\n    op_missing_shape = 0\n    logged_ops = {}\n    string_to_id = {}\n    string_to_id['none'] = len(string_to_id)\n    for op in graph.get_operations():\n        try:\n            stats = ops.get_stats_for_node_def(graph, op.node_def, REGISTERED_FLOP_STATS)\n        except ValueError:\n            op_missing_shape += 1\n            stats = None\n        entry = tfprof_log_pb2.OpLogEntry()\n        entry.name = op.name\n        add_entry = False\n        if stats and stats.value:\n            entry.float_ops = int(stats.value)\n            add_entry = True\n        if add_trace:\n            if op.traceback:\n                for filename, lineno, funcname, line in op.traceback:\n                    trace = entry.code_def.traces.add()\n                    trace.file_id = _str_id(filename, string_to_id) if filename else 0\n                    trace.lineno = lineno if lineno else -1\n                    trace.function_id = _str_id(funcname, string_to_id) if funcname else 0\n                    trace.line_id = _str_id(line, string_to_id) if line else 0\n                    trace.func_start_line = -1\n            add_entry = True\n        if add_entry:\n            logged_ops[entry.name] = entry\n    if add_trainable_var:\n        for v in graph.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES):\n            if v.op.name not in logged_ops:\n                entry = tfprof_log_pb2.OpLogEntry()\n                entry.name = v.op.name\n                entry.types.append(TRAINABLE_VARIABLES)\n                logged_ops[entry.name] = entry\n            else:\n                logged_ops[v.op.name].types.append(TRAINABLE_VARIABLES)\n    if op_missing_shape > 0 and (not run_meta):\n        sys.stderr.write('%d ops no flops stats due to incomplete shapes.\\n' % op_missing_shape)\n    return (logged_ops, string_to_id)", "docstring": "Extract trainable model parameters and FLOPs for ops from a Graph.\n\nArgs:\ngraph: tf.Graph.\nrun_meta: RunMetadata proto used to complete shape information.\nadd_trace: Whether to add op trace information.\nadd_trainable_var: Whether to assign tf.compat.v1.trainable_variables() op\ntype '_trainable_variables'.\nReturns:\nlogged_ops: dict mapping from op_name to OpLogEntry.\nstring_to_id: dict mapping from string to id.", "source": "github-repos"}
{"code": "def resolve_topic(topic):\n    \n    try:\n        module_name, _, class_name = topic.partition('\n        module = importlib.import_module(module_name)\n    except ImportError as e:\n        raise TopicResolutionError(\"{}: {}\".format(topic, e))\n    try:\n        cls = resolve_attr(module, class_name)\n    except AttributeError as e:\n        raise TopicResolutionError(\"{}: {}\".format(topic, e))\n    return cls", "docstring": "Return class described by given topic.\n\nArgs:\ntopic: A string describing a class.\n\nReturns:\nA class.\n\nRaises:\nTopicResolutionError: If there is no such class.", "source": "juraj-google-style"}
{"code": "def read_tracers_h5(xdmf_file, infoname, snapshot, position):\n    xdmf_root = xmlET.parse(str(xdmf_file)).getroot()\n    tra = {}\n    tra[infoname] = [{}, {}]\n    if position:\n        for axis in 'xyz':\n            tra[axis] = [{}, {}]\n    for elt_subdomain in xdmf_root[0][0][snapshot].findall('Grid'):\n        ibk = int(elt_subdomain.get('Name').startswith('meshYang'))\n        if position:\n            for data_attr in elt_subdomain.findall('Geometry'):\n                for (data_item, axis) in zip(data_attr.findall('DataItem'), 'xyz'):\n                    (icore, data) = _get_field(xdmf_file, data_item)\n                    tra[axis][ibk][icore] = data\n        for data_attr in elt_subdomain.findall('Attribute'):\n            if (data_attr.get('Name') != infoname):\n                continue\n            (icore, data) = _get_field(xdmf_file, data_attr.find('DataItem'))\n            tra[infoname][ibk][icore] = data\n    for info in tra:\n        tra[info] = [trab for trab in tra[info] if trab]\n        for (iblk, trab) in enumerate(tra[info]):\n            tra[info][iblk] = np.concatenate([trab[icore] for icore in range(len(trab))])\n    return tra", "docstring": "Extract tracers data from hdf5 files.\n\nArgs:\nxdmf_file (:class:`pathlib.Path`): path of the xdmf file.\ninfoname (str): name of information to extract.\nsnapshot (int): snapshot number.\nposition (bool): whether to extract position of tracers.\nReturns:\ndict of list of numpy.array:\nTracers data organized by attribute and block.", "source": "codesearchnet"}
{"code": "def create_vpc_flow_logs(self, account, region, vpc_id, iam_role_arn):\n    try:\n        flow = self.session.client('ec2', region)\n        flow.create_flow_logs(ResourceIds=[vpc_id], ResourceType='VPC', TrafficType='ALL', LogGroupName=vpc_id, DeliverLogsPermissionArn=iam_role_arn)\n        fvpc = VPC.get(vpc_id)\n        fvpc.set_property('vpc_flow_logs_status', 'ACTIVE')\n        self.log.info('Enabled VPC Logging {}/{}/{}'.format(account, region, vpc_id))\n        auditlog(event='vpc_flow_logs.create_vpc_flow', actor=self.ns, data={'account': account.account_name, 'region': region, 'vpcId': vpc_id, 'arn': iam_role_arn})\n    except Exception:\n        self.log.exception('Failed creating VPC Flow Logs for {}/{}/{}.'.format(account, region, vpc_id))", "docstring": "Create a new VPC Flow log\n\nArgs:\naccount (:obj:`Account`): Account to create the flow in\nregion (`str`): Region to create the flow in\nvpc_id (`str`): ID of the VPC to create the flow for\niam_role_arn (`str`): ARN of the IAM role used to post logs to the log group\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def override_from_dict(self, values_dict):\n    \n    for name, value in values_dict.items():\n      self.set_hparam(name, value)\n    return self", "docstring": "Override existing hyperparameter values, parsing new values from a dictionary.\n\nArgs:\nvalues_dict: Dictionary of name:value pairs.\n\nReturns:\nThe `HParams` instance.\n\nRaises:\nKeyError: If a hyperparameter in `values_dict` doesn't exist.\nValueError: If `values_dict` cannot be parsed.", "source": "juraj-google-style"}
{"code": "def docx_text_from_xml_node(node: ElementTree.Element,\n                            level: int,\n                            config: TextProcessingConfig) -> str:\n    \n    text = ''\n    \n    if node.tag == DOCX_TEXT:\n        text += node.text or ''\n    elif node.tag == DOCX_TAB:\n        text += '\\t'\n    elif node.tag in DOCX_NEWLINES:\n        text += '\\n'\n    elif node.tag == DOCX_NEWPARA:\n        text += '\\n\\n'\n\n    if node.tag == DOCX_TABLE:\n        text += '\\n\\n' + docx_table_from_xml_node(node, level, config)\n    else:\n        for child in node:\n            text += docx_text_from_xml_node(child, level + 1, config)\n    return text", "docstring": "Returns text from an XML node within a DOCX file.\n\nArgs:\nnode: an XML node\nlevel: current level in XML hierarchy (used for recursion; start level\nis 0)\nconfig: :class:`TextProcessingConfig` control object\n\nReturns:\ncontents as a string", "source": "juraj-google-style"}
{"code": "def convert(self):\n    saved_model_convert_result = self._convert_as_saved_model()\n    if saved_model_convert_result:\n        return saved_model_convert_result\n    graph_def, input_tensors, output_tensors, frozen_func = self._freeze_keras_model()\n    graph_def = self._optimize_tf_model(graph_def, input_tensors, output_tensors, frozen_func)\n    return super(TFLiteKerasModelConverterV2, self).convert(graph_def, input_tensors, output_tensors)", "docstring": "Converts a keras model based on instance variables.\n\nReturns:\nThe converted data in serialized format.\n\nRaises:\nValueError:\nMultiple concrete functions are specified.\nInput shape is not specified.\nInvalid quantization parameters.", "source": "github-repos"}
{"code": "def __init__(self, node: cfg.CFGNode, ctx: _ContextType, f_code: blocks.OrderedCode, f_globals: abstract.LazyConcreteDict, f_locals: abstract.LazyConcreteDict, f_back: FrameType, callargs: dict[str, cfg.Variable], closure: tuple[cfg.Variable, ...] | None, func: cfg.Binding | None, first_arg: cfg.Variable | None, substs: Collection[dict[str, cfg.Variable]]):\n    super().__init__(ctx)\n    self.node = node\n    self.current_opcode = None\n    self.f_code = f_code\n    self.states = {}\n    self.f_globals = f_globals\n    self.f_locals = f_locals\n    self.f_back = f_back\n    if f_back and f_back.f_builtins:\n        self.f_builtins = f_back.f_builtins\n    else:\n        _, bltin = self.ctx.attribute_handler.get_attribute(self.ctx.root_node, f_globals, '__builtins__')\n        builtins_pu, = bltin.bindings\n        self.f_builtins = builtins_pu.data\n    self.f_lineno = f_code.firstlineno\n    self.first_arg = first_arg\n    self.allowed_returns = None\n    self.check_return = False\n    self.return_variable = self.ctx.program.NewVariable()\n    self.yield_variable = self.ctx.program.NewVariable()\n    self.current_block = None\n    self.targets = collections.defaultdict(list)\n    self.overloads = collections.defaultdict(list)\n    self.closure = closure\n    freevars = closure or []\n    assert len(f_code.freevars) == len(freevars)\n    if self.ctx.python_version < (3, 11):\n        cell_names = f_code.cellvars\n    elif freevars:\n        cell_names = f_code.localsplus[:-len(freevars)]\n    else:\n        cell_names = f_code.localsplus\n    self.cells = [self.ctx.program.NewVariable() for _ in cell_names]\n    self.cells.extend(freevars)\n    if callargs:\n        for name, value in sorted(callargs.items()):\n            if name in f_code.cellvars:\n                i = cell_names.index(name)\n                self.cells[i].PasteVariable(value, node)\n            else:\n                self.ctx.attribute_handler.set_attribute(node, f_locals, name, value)\n    self.class_closure_var = None\n    if func and isinstance(func.data, abstract.InterpreterFunction):\n        closure_name = abstract.BuildClass.CLOSURE_NAME\n        if func.data.is_class_builder and closure_name in f_code.cellvars:\n            self.class_closure_var = self.get_cell_by_name(closure_name)\n    self.func = func\n    self.substs = substs\n    self.skip_in_tracebacks = False\n    if f_code.filename:\n        self.module_name = module_utils.path_to_module_name(f_code.filename)\n    else:\n        self.module_name = ''\n    self.functions_created_in_frame: dict[str, list[abstract.InterpreterFunction]] = collections.defaultdict(list)", "docstring": "Initialize a special frame as needed by TypegraphVirtualMachine.\n\nArgs:\nnode: The current CFG graph node.\nctx: The owning abstract context.\nf_code: The code object to execute in this frame.\nf_globals: The global context to execute in as a SimpleValue as used by\nTypegraphVirtualMachine.\nf_locals: Local variables. Will be modified if callargs is passed.\nf_back: The frame above this one on the stack.\ncallargs: Additional function arguments to store in f_locals.\nclosure: A tuple containing the new co_freevars.\nfunc: Optionally, a binding to the function this frame corresponds to.\nfirst_arg: First argument to the function.\nsubsts: Maps from type parameter names in scope for this frame to their\npossible values.\n\nRaises:\nNameError: If we can't resolve any references into the outer frame.", "source": "github-repos"}
{"code": "def _xys(date):\n    \n\n    X, Y, s_xy2 = _xysxy2(date)\n\n    \n    dX, dY = date.eop.dx / 1000., date.eop.dy / 1000.\n\n    \n    X = np.radians((X + dX) / 3600.)\n    Y = np.radians((Y + dY) / 3600.)\n    s = np.radians(s_xy2 / 3600.) - (X * Y / 2)\n\n    return X, Y, s", "docstring": "Get The X, Y and s coordinates\n\nArgs:\ndate (Date):\nReturn:\n3-tuple of float: Values of X, Y and s, in radians", "source": "juraj-google-style"}
{"code": "def determine_plasma_store_config(object_store_memory=None, plasma_directory=None, huge_pages=False):\n    system_memory = ray.utils.get_system_memory()\n    if (object_store_memory is None):\n        object_store_memory = int((system_memory * 0.3))\n        if (object_store_memory > ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES):\n            logger.warning(('Warning: Capping object memory store to {}GB. '.format((ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES \n            object_store_memory = ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES\n    if (plasma_directory is None):\n        if ((sys.platform == 'linux') or (sys.platform == 'linux2')):\n            shm_avail = ray.utils.get_shared_memory_bytes()\n            if (shm_avail > object_store_memory):\n                plasma_directory = '/dev/shm'\n            else:\n                plasma_directory = '/tmp'\n                logger.warning(\"WARNING: The object store is using /tmp instead of /dev/shm because /dev/shm has only {} bytes available. This may slow down performance! You may be able to free up space by deleting files in /dev/shm or terminating any running plasma_store_server processes. If you are inside a Docker container, you may need to pass an argument with the flag '--shm-size' to 'docker run'.\".format(shm_avail))\n        else:\n            plasma_directory = '/tmp'\n        if (object_store_memory > system_memory):\n            raise Exception('The requested object store memory size is greater than the total available memory.')\n    else:\n        plasma_directory = os.path.abspath(plasma_directory)\n        logger.warning('WARNING: object_store_memory is not verified when plasma_directory is set.')\n    if (not os.path.isdir(plasma_directory)):\n        raise Exception('The file {} does not exist or is not a directory.'.format(plasma_directory))\n    return (object_store_memory, plasma_directory)", "docstring": "Figure out how to configure the plasma object store.\n\nThis will determine which directory to use for the plasma store (e.g.,\n/tmp or /dev/shm) and how much memory to start the store with. On Linux,\nwe will try to use /dev/shm unless the shared memory file system is too\nsmall, in which case we will fall back to /tmp. If any of the object store\nmemory or plasma directory parameters are specified by the user, then those\nvalues will be preserved.\n\nArgs:\nobject_store_memory (int): The user-specified object store memory\nparameter.\nplasma_directory (str): The user-specified plasma directory parameter.\nhuge_pages (bool): The user-specified huge pages parameter.\n\nReturns:\nA tuple of the object store memory to use and the plasma directory to\nuse. If either of these values is specified by the user, then that\nvalue will be preserved.", "source": "codesearchnet"}
{"code": "def parse_isoformat(timestamp):\n    if (len(timestamp) == 20):\n        zone = TzOffset('+00:00')\n        timestamp = timestamp[:(- 1)]\n    elif (len(timestamp) == 24):\n        zone = TzOffset(('%s:%s' % (timestamp[(- 5):(- 2)], timestamp[(- 2):])))\n        timestamp = timestamp[:(- 5)]\n    elif (len(timestamp) == 25):\n        zone = TzOffset(timestamp[(- 6):])\n        timestamp = timestamp[:(- 6)]\n    timestamp = Timestamp.strptime(timestamp, '%Y-%m-%dT%H:%M:%S')\n    timestamp = timestamp.replace(tzinfo=zone)\n    return timestamp", "docstring": "Parse an ISO 8601 formatted time stamp.\n\nArgs:\ntimestamp (str): Timestamp to parse\n\nReturns:\nTimestamp: Parsed timestamp", "source": "codesearchnet"}
{"code": "def get_op_or_tensor_by_name(name):\n    G = tfv1.get_default_graph()\n\n    def f(n):\n        if ((len(n) >= 3) and (n[(- 2)] == ':')):\n            return G.get_tensor_by_name(n)\n        else:\n            return G.get_operation_by_name(n)\n    if (not isinstance(name, list)):\n        return f(name)\n    else:\n        return list(map(f, name))", "docstring": "Get either tf.Operation of tf.Tensor from names.\n\nArgs:\nname (list[str] or str): names of operations or tensors.\n\nRaises:\nKeyError, if the name doesn't exist", "source": "codesearchnet"}
{"code": "def reset(self):\n    with tf.name_scope((self._name + '/reset')):\n        return tf.group(self._count.assign(0), self._mean.assign(tf.zeros_like(self._mean)), self._var_sum.assign(tf.zeros_like(self._var_sum)))", "docstring": "Reset the estimates of mean and variance.\n\nResets the full state of this class.\n\nReturns:\nOperation.", "source": "codesearchnet"}
{"code": "def _make_ctx_options(ctx_options, config_cls=ContextOptions):\n    if (not ctx_options):\n        return None\n    for key in list(ctx_options):\n        translation = _OPTION_TRANSLATIONS.get(key)\n        if translation:\n            if (translation in ctx_options):\n                raise ValueError(('Cannot specify %s and %s at the same time' % (key, translation)))\n            ctx_options[translation] = ctx_options.pop(key)\n    return config_cls(**ctx_options)", "docstring": "Helper to construct a ContextOptions object from keyword arguments.\n\nArgs:\nctx_options: A dict of keyword arguments.\nconfig_cls: Optional Configuration class to use, default ContextOptions.\n\nNote that either 'options' or 'config' can be used to pass another\nConfiguration object, but not both.  If another Configuration\nobject is given it provides default values.\n\nReturns:\nA Configuration object, or None if ctx_options is empty.", "source": "codesearchnet"}
{"code": "def waitAndGet(self, event_name, timeout=DEFAULT_TIMEOUT):\n    if timeout:\n        if (timeout > MAX_TIMEOUT):\n            raise Error(self._ad, ('Specified timeout %s is longer than max timeout %s.' % (timeout, MAX_TIMEOUT)))\n    timeout_ms = int((timeout * 1000))\n    try:\n        raw_event = self._event_client.eventWaitAndGet(self._id, event_name, timeout_ms)\n    except Exception as e:\n        if ('EventSnippetException: timeout.' in str(e)):\n            raise TimeoutError(self._ad, ('Timed out after waiting %ss for event \"%s\" triggered by %s (%s).' % (timeout, event_name, self._method_name, self._id)))\n        raise\n    return snippet_event.from_dict(raw_event)", "docstring": "Blocks until an event of the specified name has been received and\nreturn the event, or timeout.\n\nArgs:\nevent_name: string, name of the event to get.\ntimeout: float, the number of seconds to wait before giving up.\n\nReturns:\nSnippetEvent, the oldest entry of the specified event.\n\nRaises:\nError: If the specified timeout is longer than the max timeout\nsupported.\nTimeoutError: The expected event does not occur within time limit.", "source": "codesearchnet"}
{"code": "def _get_common_params(self, user_id, attributes):\n    commonParams = {}\n    commonParams[self.EventParams.PROJECT_ID] = self._get_project_id()\n    commonParams[self.EventParams.ACCOUNT_ID] = self._get_account_id()\n    visitor = {}\n    visitor[self.EventParams.END_USER_ID] = user_id\n    visitor[self.EventParams.SNAPSHOTS] = []\n    commonParams[self.EventParams.USERS] = []\n    commonParams[self.EventParams.USERS].append(visitor)\n    commonParams[self.EventParams.USERS][0][self.EventParams.ATTRIBUTES] = self._get_attributes(attributes)\n    commonParams[self.EventParams.SOURCE_SDK_TYPE] = 'python-sdk'\n    commonParams[self.EventParams.ENRICH_DECISIONS] = True\n    commonParams[self.EventParams.SOURCE_SDK_VERSION] = version.__version__\n    commonParams[self.EventParams.ANONYMIZE_IP] = self._get_anonymize_ip()\n    commonParams[self.EventParams.REVISION] = self._get_revision()\n    return commonParams", "docstring": "Get params which are used same in both conversion and impression events.\n\nArgs:\nuser_id: ID for user.\nattributes: Dict representing user attributes and values which need to be recorded.\n\nReturns:\nDict consisting of parameters common to both impression and conversion events.", "source": "codesearchnet"}
{"code": "def load_model(self, the_metamodel, filename, is_main_model, encoding='utf-8', add_to_local_models=True):\n    if (not self.local_models.has_model(filename)):\n        if self.all_models.has_model(filename):\n            new_model = self.all_models.filename_to_model[filename]\n        else:\n            new_model = the_metamodel.internal_model_from_file(filename, pre_ref_resolution_callback=(lambda other_model: self.pre_ref_resolution_callback(other_model)), is_main_model=is_main_model, encoding=encoding)\n            self.all_models.filename_to_model[filename] = new_model\n        if add_to_local_models:\n            self.local_models.filename_to_model[filename] = new_model\n    assert self.all_models.has_model(filename)\n    return self.all_models.filename_to_model[filename]", "docstring": "load a single model\n\nArgs:\nthe_metamodel: the metamodel used to load the model\nfilename: the model to be loaded (if not cached)\n\nReturns:\nthe loaded/cached model", "source": "codesearchnet"}
{"code": "def _add_example(self, example):\n        \n        if len(example.fields) != 1:\n            raise InvalidSpec(\n                'Example for union must specify exactly one tag.',\n                example.lineno, example.path)\n\n        \n        example_field = list(example.fields.values())[0]\n        tag = example_field.name\n\n        \n        for field in self.all_fields:\n            if tag == field.name:\n                break\n        else:\n            \n            raise InvalidSpec(\n                \"Unknown tag '%s' in example.\" % tag,\n                example.lineno, example.path\n            )\n\n        \n        \n        try:\n            field.data_type.check_example(example_field)\n        except InvalidSpec as e:\n            e.msg = \"Bad example for field '{}': {}\".format(\n                field.name, e.msg)\n            raise\n\n        self._raw_examples[example.label] = example", "docstring": "Adds a \"raw example\" for this type.\n\nThis does basic sanity checking to ensure that the example is valid\n(required fields specified, no unknown fields, correct types, ...).\n\nThe example is not available via :meth:`get_examples` until\n:meth:`_compute_examples` is called.\n\nArgs:\nexample (stone.frontend.ast.AstExample): An example of this\ntype.", "source": "juraj-google-style"}
{"code": "def qx(mt, x):\n    \n    if x < len(mt.qx):\n        return mt.qx[x]\n    else:\n        return 0", "docstring": "qx: Returns the probability that a life aged x dies before 1 year\nWith the convention: the true probability is qx/1000\nArgs:\nmt: the mortality table\nx: the age as integer number.", "source": "juraj-google-style"}
{"code": "def triggered(self, walker):\n        \n\n        if self.use_count:\n            comp_value = walker.count()\n        else:\n            if walker.count() == 0:\n                return False\n\n            comp_value = walker.peek().value\n\n        return self.comp_function(comp_value, self.reference)", "docstring": "Check if this input is triggered on the given stream walker.\n\nArgs:\nwalker (StreamWalker): The walker to check\n\nReturns:\nbool: Whether this trigger is triggered or not", "source": "juraj-google-style"}
{"code": "def _ParseCachedEntry8(self, value_data, cached_entry_offset):\n    try:\n        cached_entry = self._ReadStructureFromByteStream(value_data[cached_entry_offset:], cached_entry_offset, self._cached_entry_data_type_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse cached entry value with error: {0!s}'.format(exception))\n    if (cached_entry.signature not in (self._CACHED_ENTRY_SIGNATURE_8_0, self._CACHED_ENTRY_SIGNATURE_8_1)):\n        raise errors.ParseError('Unsupported cache entry signature')\n    cached_entry_data = value_data[cached_entry_offset:]\n    if (cached_entry.signature == self._CACHED_ENTRY_SIGNATURE_8_0):\n        data_type_map_name = 'appcompatcache_cached_entry_body_8_0'\n    elif (cached_entry.signature == self._CACHED_ENTRY_SIGNATURE_8_1):\n        data_type_map_name = 'appcompatcache_cached_entry_body_8_1'\n    data_type_map = self._GetDataTypeMap(data_type_map_name)\n    context = dtfabric_data_maps.DataTypeMapContext()\n    try:\n        cached_entry_body = self._ReadStructureFromByteStream(cached_entry_data[12:], (cached_entry_offset + 12), data_type_map, context=context)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse cached entry body with error: {0!s}'.format(exception))\n    data_offset = context.byte_size\n    data_size = cached_entry_body.data_size\n    cached_entry_object = AppCompatCacheCachedEntry()\n    cached_entry_object.cached_entry_size = (12 + cached_entry.cached_entry_data_size)\n    cached_entry_object.insertion_flags = cached_entry_body.insertion_flags\n    cached_entry_object.last_modification_time = cached_entry_body.last_modification_time\n    cached_entry_object.path = cached_entry_body.path\n    cached_entry_object.shim_flags = cached_entry_body.shim_flags\n    if (data_size > 0):\n        cached_entry_object.data = cached_entry_data[data_offset:(data_offset + data_size)]\n    return cached_entry_object", "docstring": "Parses a Windows 8.0 or 8.1 cached entry.\n\nArgs:\nvalue_data (bytes): value data.\ncached_entry_offset (int): offset of the first cached entry data\nrelative to the start of the value data.\n\nReturns:\nAppCompatCacheCachedEntry: cached entry.\n\nRaises:\nParseError: if the value data could not be parsed.", "source": "codesearchnet"}
{"code": "def _update(self, namespace, name, oldobj, newobj, is_class_namespace=False):\n        \n        try:\n            notify_info2('Updating: ', oldobj)\n            if oldobj is newobj:\n                \n                return\n\n            if type(oldobj) is not type(newobj):\n                \n                notify_error('Type of: %s changed... Skipping.' % (oldobj,))\n                return\n\n            if isinstance(newobj, types.FunctionType):\n                self._update_function(oldobj, newobj)\n                return\n\n            if isinstance(newobj, types.MethodType):\n                self._update_method(oldobj, newobj)\n                return\n\n            if isinstance(newobj, classmethod):\n                self._update_classmethod(oldobj, newobj)\n                return\n\n            if isinstance(newobj, staticmethod):\n                self._update_staticmethod(oldobj, newobj)\n                return\n\n            if hasattr(types, 'ClassType'):\n                classtype = (types.ClassType, type)  \n            else:\n                classtype = type\n\n            if isinstance(newobj, classtype):\n                self._update_class(oldobj, newobj)\n                return\n\n            \n            if hasattr(newobj, '__metaclass__') and hasattr(newobj, '__class__') and newobj.__metaclass__ == newobj.__class__:\n                self._update_class(oldobj, newobj)\n                return\n\n            if namespace is not None:\n\n                if oldobj != newobj and str(oldobj) != str(newobj) and repr(oldobj) != repr(newobj):\n                    xreload_old_new = None\n                    if is_class_namespace:\n                        xreload_old_new = getattr(namespace, '__xreload_old_new__', None)\n                        if xreload_old_new is not None:\n                            self.found_change = True\n                            xreload_old_new(name, oldobj, newobj)\n\n                    elif '__xreload_old_new__' in namespace:\n                        xreload_old_new = namespace['__xreload_old_new__']\n                        xreload_old_new(namespace, name, oldobj, newobj)\n                        self.found_change = True\n\n                    \n                    \n                    \n\n        except:\n            notify_error('Exception found when updating %s. Proceeding for other items.' % (name,))\n            pydev_log.exception()", "docstring": "Update oldobj, if possible in place, with newobj.\n\nIf oldobj is immutable, this simply returns newobj.\n\nArgs:\noldobj: the object to be updated\nnewobj: the object used as the source for the update", "source": "juraj-google-style"}
{"code": "def load(overlay, path=''):\n    global DEBUG\n    global _LOADED\n    if DEBUG:\n        print('LOAD OVERLAY: {0} @ {1}'.format(overlay, path))\n    if (overlay.upper() in _OVERLAYS.keys()):\n        cpath = ((OVERLAYCONFIGPATH + '/') + _FOLDERS[overlay.upper()])\n        if DEBUG:\n            print('VALID OVERLAY')\n            print('CONFIG PATH:  {0}'.format(cpath))\n        if ((overlay.upper() == 'CUST') and (path == '')):\n            raise ValueError('Path must be specified for Custom Overlay Choice')\n        elif ((overlay.upper() == 'CUST') and _LOADED[overlay.upper()]):\n            print('Custom Overlay already loaded')\n            return 2\n        elif ((overlay.upper() == 'CUST') and (not os.path.exists(path))):\n            print('Custom Overlay path does not exist')\n            return 1\n        if (is_chip_pro() and (overlay.upper() == 'PWM0')):\n            print('CHIP Pro supports PWM0 in base DTB, exiting')\n            return 1\n        if (overlay.upper() != 'CUST'):\n            opath = OVERLAYINSTALLPATH\n            opath += ('/' + _OVERLAYS[overlay.upper()])\n        else:\n            opath = path\n        if DEBUG:\n            print('OVERLAY PATH: {0}'.format(opath))\n        if ((overlay.upper() == 'PWM0') and _LOADED[overlay.upper()]):\n            print('PWM0 Overlay already loaded')\n            return 2\n        if ((overlay.upper() == 'SPI2') and _LOADED[overlay.upper()]):\n            print('SPI2 Overlay already loaded')\n            return 2\n        errc = _set_overlay_verify(overlay.upper(), opath, cpath)\n        if DEBUG:\n            print('_SET_OVERLAY_VERIFY ERRC: {0}'.format(errc))\n        if (errc == 0):\n            _LOADED[overlay.upper()] = True\n    else:\n        raise ValueError('Invalid Overlay name specified! Choose between: SPI2, PWM0, CUST')", "docstring": "load - Load a DTB Overlay\n\nInputs:\noverlay - Overlay Key: SPI2, PWM0, CUST\npath    - Full Path to where the custom overlay is stored\n\nReturns:\n0 - Successful Load\n1 - Unsuccessful Load\n2 - Overlay was previously set", "source": "codesearchnet"}
{"code": "def field_is_set(msg: message.Message, field: Union[descriptor.FieldDescriptor, str]) -> bool:\n    return field_content_length(msg, field) > 0", "docstring": "Returns True if the field is set.\n\nArgs:\nmsg: The Message whose fields to examine.\nfield: The FieldDescriptor or name of the field to examine.\n\nReturns:\nTrue if field has been set.", "source": "github-repos"}
{"code": "def feature_hash(feature, dim, seed=123):\n    vec = np.zeros(dim)\n    i = (mmh3.hash(feature, seed) % dim)\n    vec[i] = 1\n    return vec", "docstring": "Feature hashing.\n\nArgs:\nfeature (str): Target feature represented as string.\ndim (int): Number of dimensions for a hash value.\nseed (float): Seed of a MurmurHash3 hash function.\n\nReturns:\nnumpy 1d array: one-hot-encoded feature vector for `s`.", "source": "codesearchnet"}
{"code": "def _GetTSKPartitionIdentifiers(self, scan_node):\n    \n    if not scan_node or not scan_node.path_spec:\n      raise errors.ScannerError('Invalid scan node.')\n\n    volume_system = tsk_volume_system.TSKVolumeSystem()\n    volume_system.Open(scan_node.path_spec)\n\n    volume_identifiers = self._source_scanner.GetVolumeIdentifiers(\n        volume_system)\n    if not volume_identifiers:\n      return []\n\n    if len(volume_identifiers) == 1:\n      return volume_identifiers\n\n    if not self._mediator:\n      raise errors.ScannerError(\n          'Unable to proceed. Partitions found but no mediator to determine '\n          'how they should be used.')\n\n    try:\n      volume_identifiers = self._mediator.GetPartitionIdentifiers(\n          volume_system, volume_identifiers)\n\n    except KeyboardInterrupt:\n      raise errors.UserAbort('File system scan aborted.')\n\n    return self._NormalizedVolumeIdentifiers(\n        volume_system, volume_identifiers, prefix='p')", "docstring": "Determines the TSK partition identifiers.\n\nArgs:\nscan_node (SourceScanNode): scan node.\n\nReturns:\nlist[str]: TSK partition identifiers.\n\nRaises:\nScannerError: if the format of or within the source is not supported or\nthe scan node is invalid or if the volume for a specific identifier\ncannot be retrieved.\nUserAbort: if the user requested to abort.", "source": "juraj-google-style"}
{"code": "def get_canonical_serializer(\n        resource_key,\n        model=None,\n        instance=None,\n        resource_name=None\n    ):\n        \n\n        if model:\n            resource_key = get_model_table(model)\n        elif instance:\n            resource_key = instance._meta.db_table\n        elif resource_name:\n            resource_key = resource_name_map[resource_name]\n\n        if resource_key not in resource_map:\n            return None\n\n        return resource_map[resource_key]['viewset'].serializer_class", "docstring": "Return canonical serializer for a given resource name.\n\nArguments:\nresource_key - Resource key, usually DB table for model-based\nresources, otherwise the plural name.\nmodel - (Optional) Model class to look up by.\ninstance - (Optional) Model object instance.\nReturns: serializer class", "source": "juraj-google-style"}
{"code": "def Sign(verifiable, keypair):\n        \n        prikey = bytes(keypair.PrivateKey)\n        hashdata = verifiable.GetHashData()\n        res = Crypto.Default().Sign(hashdata, prikey)\n        return res", "docstring": "Sign the `verifiable` object with the private key from `keypair`.\n\nArgs:\nverifiable:\nkeypair (neocore.KeyPair):\n\nReturns:\nbool: True if successfully signed. False otherwise.", "source": "juraj-google-style"}
{"code": "def _open_debug_interface(self, conn_id, callback, connection_string=None):\n        \n        self._try_connect(connection_string)\n        callback(conn_id, self.id, True, None)", "docstring": "Enable debug interface for this IOTile device\n\nArgs:\nconn_id (int): the unique identifier for the connection\ncallback (callback): Callback to be called when this command finishes\ncallback(conn_id, adapter_id, success, failure_reason)", "source": "juraj-google-style"}
{"code": "def call(self, method, args=None):\n    message = SoapMessage(endpoint=self.endpoint, method=method, parameters=([] if (args is None) else args), http_headers=self.http_headers, soap_action='http:\n    try:\n        result_elt = message.call()\n    except SoapFault as exc:\n        if ('Client.TokenRefreshRequired' in exc.faultcode):\n            log.debug('Token refresh required. Trying again')\n            self._cached_soap_header = None\n            auth_token = exc.detail.findtext('.\n            private_key = exc.detail.findtext('.\n            self.music_service.account.oa_device_id = auth_token\n            self.music_service.account.key = private_key\n            message = SoapMessage(endpoint=self.endpoint, method=method, parameters=args, http_headers=self.http_headers, soap_action='http:\n            result_elt = message.call()\n        else:\n            raise MusicServiceException(exc.faultstring, exc.faultcode)\n    result = list(parse(XML.tostring(result_elt), process_namespaces=True, namespaces={'http:\n    return (result if (result is not None) else {})", "docstring": "Call a method on the server.\n\nArgs:\nmethod (str): The name of the method to call.\nargs (List[Tuple[str, str]] or None): A list of (parameter,\nvalue) pairs representing the parameters of the method.\nDefaults to `None`.\n\nReturns:\n~collections.OrderedDict: An OrderedDict representing the response.\n\nRaises:\n`MusicServiceException`: containing details of the error\nreturned by the music service.", "source": "codesearchnet"}
{"code": "def rot_matmul(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n\n    def row_mul(i: int) -> torch.Tensor:\n        return torch.stack([a[..., i, 0] * b[..., 0, 0] + a[..., i, 1] * b[..., 1, 0] + a[..., i, 2] * b[..., 2, 0], a[..., i, 0] * b[..., 0, 1] + a[..., i, 1] * b[..., 1, 1] + a[..., i, 2] * b[..., 2, 1], a[..., i, 0] * b[..., 0, 2] + a[..., i, 1] * b[..., 1, 2] + a[..., i, 2] * b[..., 2, 2]], dim=-1)\n    return torch.stack([row_mul(0), row_mul(1), row_mul(2)], dim=-2)", "docstring": "Performs matrix multiplication of two rotation matrix tensors. Written out by hand to avoid AMP downcasting.\n\nArgs:\na: [*, 3, 3] left multiplicand\nb: [*, 3, 3] right multiplicand\nReturns:\nThe product ab", "source": "github-repos"}
{"code": "def _AddUser(self, user):\n    self.logger.info('Creating a new user account for %s.', user)\n    command = self.useradd_cmd.format(user=user)\n    try:\n        subprocess.check_call(command.split(' '))\n    except subprocess.CalledProcessError as e:\n        self.logger.warning('Could not create user %s. %s.', user, str(e))\n        return False\n    else:\n        self.logger.info('Created user account %s.', user)\n        return True", "docstring": "Configure a Linux user account.\n\nArgs:\nuser: string, the name of the Linux user account to create.\n\nReturns:\nbool, True if user creation succeeded.", "source": "codesearchnet"}
{"code": "def matchmaker_request(url, token, method, content_type=None, accept=None, data=None):\n    headers = Headers()\n    headers = {'X-Auth-Token': token}\n    if content_type:\n        headers['Content-Type'] = content_type\n    if accept:\n        headers['Accept'] = accept\n    req_data = (data or {'timestamp': datetime.datetime.now().timestamp()})\n    json_response = None\n    try:\n        LOG.info('Sending {} request to MME url {}. Data sent: {}'.format(method, url, req_data))\n        resp = requests.request(method=method, url=url, headers=headers, data=json.dumps(req_data))\n        json_response = resp.json()\n        LOG.info('MME server response was:{}'.format(json_response))\n        if isinstance(json_response, str):\n            json_response = {'message': json_response}\n        elif isinstance(json_response, list):\n            return json_response\n        json_response['status_code'] = resp.status_code\n    except Exception as err:\n        LOG.info('An error occurred while sending HTTP request to server ({})'.format(err))\n        json_response = {'message': str(err)}\n    return json_response", "docstring": "Send a request to MatchMaker and return its response\n\nArgs:\nurl(str): url to send request to\ntoken(str): MME server authorization token\nmethod(str): 'GET', 'POST' or 'DELETE'\ncontent_type(str): MME request Content-Type\naccept(str): accepted response\ndata(dict): eventual data to send in request\n\nReturns:\njson_response(dict): server response", "source": "codesearchnet"}
{"code": "def GetMessages(self, files):\n    \n    result = {}\n    for file_name in files:\n      file_desc = self.pool.FindFileByName(file_name)\n      for name, msg in file_desc.message_types_by_name.items():\n        if file_desc.package:\n          full_name = '.'.join([file_desc.package, name])\n        else:\n          full_name = msg.name\n        result[full_name] = self.GetPrototype(\n            self.pool.FindMessageTypeByName(full_name))\n\n      \n      \n      \n      \n      \n      \n      \n      \n\n      for name, extension in file_desc.extensions_by_name.items():\n        if extension.containing_type.full_name not in self._classes:\n          self.GetPrototype(extension.containing_type)\n        extended_class = self._classes[extension.containing_type.full_name]\n        extended_class.RegisterExtension(extension)\n    return result", "docstring": "Gets all the messages from a specified file.\n\nThis will find and resolve dependencies, failing if the descriptor\npool cannot satisfy them.\n\nArgs:\nfiles: The file names to extract messages from.\n\nReturns:\nA dictionary mapping proto names to the message classes. This will include\nany dependent messages as well as any messages defined in the same file as\na specified message.", "source": "juraj-google-style"}
{"code": "def DeregisterSourceType(cls, source_type_class):\n    if (source_type_class.TYPE_INDICATOR not in cls._source_type_classes):\n        raise KeyError('Source type not set for type: {0:s}.'.format(source_type_class.TYPE_INDICATOR))\n    del cls._source_type_classes[source_type_class.TYPE_INDICATOR]", "docstring": "Deregisters a source type.\n\nSource types are identified based on their type indicator.\n\nArgs:\nsource_type_class (type): source type.\n\nRaises:\nKeyError: if a source type is not set for the corresponding type\nindicator.", "source": "codesearchnet"}
{"code": "def broadcast_shape(shape_x, shape_y):\n    if shape_x.ndims is None or shape_y.ndims is None:\n        return tensor_shape.unknown_shape()\n    return_dims = _broadcast_shape_helper(shape_x, shape_y)\n    if return_dims is None:\n        raise ValueError(f'Incompatible shapes for broadcasting. Two shapes are compatible if for each dimension pair they are either equal or one of them is 1. Received: {shape_x} and {shape_y}.')\n    return tensor_shape.TensorShape(return_dims)", "docstring": "Returns the broadcasted shape between `shape_x` and `shape_y`.\n\nArgs:\nshape_x: A `TensorShape`\nshape_y: A `TensorShape`\n\nReturns:\nA `TensorShape` representing the broadcasted shape.\n\nRaises:\nValueError: If the two shapes can not be broadcasted.", "source": "github-repos"}
{"code": "def total_seconds(td):\n    secs = (td.seconds + ((td.days * 24) * 3600))\n    if td.microseconds:\n        secs += 1\n    return secs", "docstring": "convert a timedelta to seconds.\n\nThis is patterned after timedelta.total_seconds, which is only\navailable in python 27.\n\nArgs:\ntd: a timedelta object.\n\nReturns:\ntotal seconds within a timedelta. Rounded up to seconds.", "source": "codesearchnet"}
{"code": "def read_double(self, little_endian=True):\n    if little_endian:\n        endian = '<'\n    else:\n        endian = '>'\n    return self.unpack(('%sd' % endian), 8)", "docstring": "Read 8 bytes as a double value from the stream.\n\nArgs:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nfloat:", "source": "codesearchnet"}
{"code": "def read_sql(cls, sql, con, index_col=None, **kwargs):\n        \n        if cls.read_sql_remote_task is None:\n            return super(RayIO, cls).read_sql(sql, con, index_col=index_col, **kwargs)\n\n        row_cnt_query = \"SELECT COUNT(*) FROM ({})\".format(sql)\n        row_cnt = pandas.read_sql(row_cnt_query, con).squeeze()\n        cols_names_df = pandas.read_sql(\n            \"SELECT * FROM ({}) LIMIT 0\".format(sql), con, index_col=index_col\n        )\n        cols_names = cols_names_df.columns\n        num_parts = cls.frame_mgr_cls._compute_num_partitions()\n        partition_ids = []\n        index_ids = []\n        limit = math.ceil(row_cnt / num_parts)\n        for part in range(num_parts):\n            offset = part * limit\n            query = \"SELECT * FROM ({}) LIMIT {} OFFSET {}\".format(sql, limit, offset)\n            partition_id = cls.read_sql_remote_task._remote(\n                args=(num_parts, query, con, index_col, kwargs),\n                num_return_vals=num_parts + 1,\n            )\n            partition_ids.append(\n                [cls.frame_partition_cls(obj) for obj in partition_id[:-1]]\n            )\n            index_ids.append(partition_id[-1])\n\n        if index_col is None:  \n            index_lens = ray.get(index_ids)\n            new_index = pandas.RangeIndex(sum(index_lens))\n        else:  \n            index_lst = [x for part_index in ray.get(index_ids) for x in part_index]\n            new_index = pandas.Index(index_lst).set_names(index_col)\n\n        new_query_compiler = cls.query_compiler_cls(\n            cls.frame_mgr_cls(np.array(partition_ids)), new_index, cols_names\n        )\n        return new_query_compiler", "docstring": "Reads a SQL query or database table into a DataFrame.\nArgs:\nsql: string or SQLAlchemy Selectable (select or text object) SQL query to be\nexecuted or a table name.\ncon: SQLAlchemy connectable (engine/connection) or database string URI or\nDBAPI2 connection (fallback mode)\nindex_col: Column(s) to set as index(MultiIndex).\nkwargs: Pass into pandas.read_sql function.", "source": "juraj-google-style"}
{"code": "def _html_tree_view(self, *, view: 'HtmlTreeView', name: Optional[str]=None, parent: Any=None, root_path: Optional[KeyPath]=None, **kwargs) -> Html:\n    return view.render(self, name=name, parent=parent, root_path=root_path, **kwargs)", "docstring": "Returns the topmost HTML representation of this extension.\n\nArgs:\nview: The view to render the object.\nname: The name of the object.\nparent: The parent of the object.\nroot_path: The key path of the object relative to the root.\n**kwargs: kwargs to pass to the view. See `_html_tree_view_config` for\nthe builtin arguments.\n\nReturns:\nThe rendered HTML.", "source": "github-repos"}
{"code": "def get_histograms_in_list(filename: str, list_name: str=None) -> Dict[(str, Any)]:\n    hists: dict = {}\n    with RootOpen(filename=filename, mode='READ') as fIn:\n        if (list_name is not None):\n            hist_list = fIn.Get(list_name)\n        else:\n            hist_list = [obj.ReadObj() for obj in fIn.GetListOfKeys()]\n        if (not hist_list):\n            fIn.ls()\n            fIn.Close()\n            raise ValueError(f'Could not find list with name \"{list_name}\". Possible names are listed above.')\n        for obj in hist_list:\n            _retrieve_object(hists, obj)\n    return hists", "docstring": "Get histograms from the file and make them available in a dict.\n\nLists are recursively explored, with all lists converted to dictionaries, such that the return\ndictionaries which only contains hists and dictionaries of hists (ie there are no ROOT ``TCollection``\nderived objects).\n\nArgs:\nfilename: Filename of the ROOT file containing the list.\nlist_name: Name of the list to retrieve.\nReturns:\nContains hists with keys as their names. Lists are recursively added, mirroring\nthe structure under which the hists were stored.\nRaises:\nValueError: If the list could not be found in the given file.", "source": "codesearchnet"}
{"code": "def _getargspec(target):\n    fullargspecs = getfullargspec(target)\n    if hasattr(_inspect, 'ArgSpec'):\n        argspecs = ArgSpec(args=fullargspecs.args, varargs=fullargspecs.varargs, keywords=fullargspecs.varkw, defaults=fullargspecs.defaults)\n    else:\n        argspecs = FullArgSpec(args=fullargspecs.args, varargs=fullargspecs.varargs, varkw=fullargspecs.varkw, defaults=fullargspecs.defaults, kwonlyargs=[], kwonlydefaults=None, annotations={})\n    return argspecs", "docstring": "A python3 version of getargspec.\n\nCalls `getfullargspec` and assigns args, varargs,\nvarkw, and defaults to a python 2/3 compatible `ArgSpec`.\n\nThe parameter name 'varkw' is changed to 'keywords' to fit the\n`ArgSpec` struct.\n\nArgs:\ntarget: the target object to inspect.\n\nReturns:\nAn ArgSpec with args, varargs, keywords, and defaults parameters\nfrom FullArgSpec.", "source": "github-repos"}
{"code": "def _add_trackable(self, trackable_object, trainable):\n    if isinstance(trackable_object, base_layer_utils.TrackableWeightHandler):\n        handler = trackable_object\n    else:\n        handler = base_layer_utils.TrackableWeightHandler(trackable_object)\n    if trainable:\n        self._trainable_weights.append(handler)\n    else:\n        self._non_trainable_weights.append(handler)\n    return handler", "docstring": "Adds a Trackable object to this layer's state.\n\nArgs:\ntrackable_object: The tf.tracking.Trackable object to add.\ntrainable: Boolean, whether the variable should be part of the layer's\n\"trainable_variables\" (e.g. variables, biases) or\n\"non_trainable_variables\" (e.g. BatchNorm mean and variance).\n\nReturns:\nThe TrackableWeightHandler used to track this object.", "source": "github-repos"}
{"code": "def deactivate(self, node_id):\n        \n        node = self.node_list[node_id]\n        self.node_list[node_id] = node._replace(active=False)", "docstring": "Deactivate the node identified by node_id.\n\nDeactivates the node corresponding to node_id, which means that\nit can never be the output of a nearest_point query.\n\nNote:\nThe node is not removed from the tree, its data is steel available.\n\nArgs:\nnode_id (int): The node identifier (given to the user after\nits insertion).", "source": "juraj-google-style"}
{"code": "def _determine_outliers_for_moving_average(moving_average: np.ndarray, moving_average_threshold: float, number_of_values_to_search_ahead: int, limit_of_number_of_values_below_threshold: int) -> int:\n    below_threshold = (moving_average < moving_average_threshold)\n    values_to_check = []\n    for i in range(limit_of_number_of_values_below_threshold):\n        values_to_check.append(below_threshold[i:((- ((limit_of_number_of_values_below_threshold - 1) - i)) or None)])\n    found_at_least_one_bin_above_threshold = False\n    cut_index = (- 1)\n    for (i, values) in enumerate(zip(*values_to_check)):\n        if (i == 0):\n            continue\n        above_threshold = [(not value) for value in values]\n        if any(above_threshold):\n            found_at_least_one_bin_above_threshold = True\n        if (found_at_least_one_bin_above_threshold and all(np.invert(above_threshold))):\n            logger.debug(f'i at found cut_index: {i} with moving_average: {moving_average[i]}')\n            cut_index = (i + (limit_of_number_of_values_below_threshold \n            break\n    return cut_index", "docstring": "Determine outliers to remove from a given moving average.\n\nNote:\nThe index returned is when the moving average first drops below the threshold for a moving average\ncalculated with that bin at the center. This is somewhat different from a standard moving average\ncalculation which would only look forward in the array.\n\nArgs:\nmoving_average: Moving average.\nmoving_average_threshold: Value of moving average under which we consider the moving average\nto be 0. Default: 2.\nnumber_of_values_to_search_ahead: Number of values to search ahead in the array when calculating\nthe moving average. Default: 5.\nlimit_of_number_of_values_below_threshold: Number of consecutive bins below the threshold to be considered\nthe beginning of outliers. Default: None, which will correspond to number_of_values_to_search_ahead - 1.\nReturns:\n0-indexed index of the histogram axes where the outliers begin.", "source": "codesearchnet"}
{"code": "def mask(self, image, nan_to_num=True, layers=None, in_global_mask=False):\n    self.set_mask(layers)\n    image = self.get_image(image, output='vector')\n    if in_global_mask:\n        masked_data = image[self.global_mask]\n        masked_data[(~ self.get_mask(in_global_mask=True))] = 0\n    else:\n        masked_data = image[self.current_mask]\n    if nan_to_num:\n        masked_data = np.nan_to_num(masked_data)\n    return masked_data", "docstring": "Vectorize an image and mask out all invalid voxels.\n\nArgs:\nimages: The image to vectorize and mask. Input can be any object\nhandled by get_image().\nlayers: Which mask layers to use (specified as int, string, or\nlist of ints and strings). When None, applies the conjunction\nof all layers.\nnan_to_num: boolean indicating whether to convert NaNs to 0.\nin_global_mask: Whether to return the resulting masked vector in\nthe globally masked space (i.e., n_voxels =\nlen(self.global_mask)). If False (default), returns in the full\nimage space (i.e., n_voxels = len(self.volume)).\nReturns:\nA 1D NumPy array of in-mask voxels.", "source": "codesearchnet"}
{"code": "def get_energy_tersoff(structure, gulp_cmd='gulp'):\n    gio = GulpIO()\n    gc = GulpCaller(gulp_cmd)\n    gin = gio.tersoff_input(structure)\n    gout = gc.run(gin)\n    return gio.get_energy(gout)", "docstring": "Compute the energy of a structure using Tersoff potential.\n\nArgs:\nstructure: pymatgen.core.structure.Structure\ngulp_cmd: GULP command if not in standard place", "source": "codesearchnet"}
{"code": "def ParseHeader(table):\n  \n  precondition.AssertIterableType(table, dict)\n\n  prototype = None  \n\n  for row in table:\n    columns = list(iterkeys(row))\n    if prototype is None:\n      prototype = columns\n    elif prototype != columns:\n      message = \"Expected columns '{expected}', got '{actual}' for table {json}\"\n      message = message.format(expected=prototype, actual=columns, json=table)\n      raise ValueError(message)\n\n  result = rdf_osquery.OsqueryHeader()\n  for name in prototype or []:\n    result.columns.append(rdf_osquery.OsqueryColumn(name=name))\n  return result", "docstring": "Parses header of osquery output.\n\nArgs:\ntable: A table in a \"parsed JSON\" representation.\n\nReturns:\nA parsed `rdf_osquery.OsqueryHeader` instance.", "source": "juraj-google-style"}
{"code": "def url(self, text, **kwargs):\n    indicator_obj = URL(text, **kwargs)\n    return self._indicator(indicator_obj)", "docstring": "Add URL Address data to Batch object.\n\nArgs:\ntext (str): The value for this Indicator.\nconfidence (str, kwargs): The threat confidence for this Indicator.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nlast_modified (str, kwargs): The date timestamp the Indicator was last modified.\nrating (str, kwargs): The threat rating for this Indicator.\nxid (str, kwargs): The external id for this Indicator.\n\nReturns:\nobj: An instance of URL.", "source": "codesearchnet"}
{"code": "def add_deps(self, deps):\n        \n        if isinstance(deps, collections.Mapping):\n            \n            deps = [Dependency(node, exts) for node, exts in deps.items()]\n\n        \n        if not isinstance(deps, (list, tuple)):\n            deps = [deps]\n\n        assert all(isinstance(d, Dependency) for d in deps)\n\n        \n        self._deps.extend(deps)\n\n        if self.is_work:\n            \n            for task in self:\n                task.add_deps(deps)\n\n        \n        \n        for dep in (d for d in deps if d.node.is_file):\n            dep.node.add_filechild(self)", "docstring": "Add a list of dependencies to the :class:`Node`.\n\nArgs:\ndeps: List of :class:`Dependency` objects specifying the dependencies of the node.\nor dictionary mapping nodes to file extensions e.g. {task: \"DEN\"}", "source": "juraj-google-style"}
{"code": "def from_event(cls, event):\n    return cls(uuid=event['uuid'], job_type=event['job_type'], event_type=event['type'], queue=event['queue'], hostname=event['hostname'], pid=event['pid'], name=event['name'], workflow_id=event['workflow_id'], event_time=event['time'], duration=event['duration'])", "docstring": "Create a JobEvent object from the event dictionary returned by celery.\n\nArgs:\nevent (dict): The dictionary as returned by celery.\n\nReturns:\nJobEvent: A fully initialized JobEvent object.", "source": "codesearchnet"}
{"code": "def scale_regularization_loss(regularization_loss):\n    if distribute_lib.has_strategy() and distribute_lib.in_cross_replica_context():\n        raise RuntimeError('You are calling `scale_regularization_loss` in cross replica context, while it was expected to be called in replica context.')\n    num_replicas = distribute_lib.get_strategy().num_replicas_in_sync\n    return math_ops.reduce_sum(regularization_loss) / num_replicas", "docstring": "Scales the sum of the given regularization losses by number of replicas.\n\nUsage with distribution strategy and custom training loop:\n\n```python\nwith strategy.scope():\ndef compute_loss(self, label, predictions):\nper_example_loss = tf.keras.losses.sparse_categorical_crossentropy(\nlabels, predictions)\n\n# Compute loss that is scaled by sample_weight and by global batch size.\nloss = tf.nn.compute_average_loss(\nper_example_loss,\nsample_weight=sample_weight,\nglobal_batch_size=GLOBAL_BATCH_SIZE)\n\n# Add scaled regularization losses.\nloss += tf.nn.scale_regularization_loss(tf.nn.l2_loss(weights))\nreturn loss\n```\n\nArgs:\nregularization_loss: Regularization loss.\n\nReturns:\nScalar loss value.", "source": "github-repos"}
{"code": "def as_base_units(self):\n    b = collections.defaultdict(int)\n    factor = 1\n    for (k, v) in self.items():\n        derived = False\n        for d in DERIVED_UNITS.values():\n            if (k in d):\n                for (k2, v2) in d[k].items():\n                    if isinstance(k2, Number):\n                        factor *= (k2 ** (v2 * v))\n                    else:\n                        b[k2] += (v2 * v)\n                derived = True\n                break\n        if (not derived):\n            (si, f) = _get_si_unit(k)\n            b[si] += v\n            factor *= (f ** v)\n    return ({k: v for (k, v) in b.items() if (v != 0)}, factor)", "docstring": "Converts all units to base SI units, including derived units.\n\nReturns:\n(base_units_dict, scaling factor). base_units_dict will not\ncontain any constants, which are gathered in the scaling factor.", "source": "codesearchnet"}
{"code": "def __ne__(self, rhs):\n        \n        return self.key != rhs.key or not self.sequence_equal(rhs)", "docstring": "Determine value inequality with another grouping.\n\nArgs:\nrhs: The object on the right-hand-side of the comparison must\nsupport a property called 'key' and be iterable.\n\nReturns:\nTrue if the keys or sequences are not equal, otherwise False.", "source": "juraj-google-style"}
{"code": "def add_columns(tree_view, df_py_dtypes, list_store):\n    \n    tree_view.set_model(list_store)\n\n    for column_i, (i, dtype_i) in df_py_dtypes[['i', 'dtype']].iterrows():\n        tree_column_i = gtk.TreeViewColumn(column_i)\n        tree_column_i.set_name(column_i)\n        if dtype_i in (int, long):\n            property_name = 'text'\n            cell_renderer_i = gtk.CellRendererSpin()\n        elif dtype_i == float:\n            property_name = 'text'\n            cell_renderer_i = gtk.CellRendererSpin()\n        elif dtype_i in (bool, ):\n            property_name = 'active'\n            cell_renderer_i = gtk.CellRendererToggle()\n        elif dtype_i in (str, ):\n            property_name = 'text'\n            cell_renderer_i = gtk.CellRendererText()\n        else:\n            raise ValueError('No cell renderer for dtype: %s' % dtype_i)\n        cell_renderer_i.set_data('column_i', i)\n        cell_renderer_i.set_data('column', tree_column_i)\n        tree_column_i.pack_start(cell_renderer_i, True)\n        tree_column_i.add_attribute(cell_renderer_i, property_name, i)\n        tree_view.append_column(tree_column_i)", "docstring": "Add columns to a `gtk.TreeView` for the types listed in `df_py_dtypes`.\n\nArgs:\n\ntree_view (gtk.TreeView) : Tree view to append columns to.\ndf_py_dtypes (pandas.DataFrame) : Data frame containing type\ninformation for one or more columns in `list_store`.\nlist_store (gtk.ListStore) : Model data.\n\nReturns:\n\nNone", "source": "juraj-google-style"}
{"code": "def _scalar_operations(self, axis, scalar, func):\n        \n        if isinstance(scalar, (list, np.ndarray, pandas.Series)):\n            new_index = self.index if axis == 0 else self.columns\n\n            def list_like_op(df):\n                if axis == 0:\n                    df.index = new_index\n                else:\n                    df.columns = new_index\n                return func(df)\n\n            new_data = self._map_across_full_axis(\n                axis, self._prepare_method(list_like_op)\n            )\n            return self.__constructor__(new_data, self.index, self.columns)\n        else:\n            return self._map_partitions(self._prepare_method(func))", "docstring": "Handler for mapping scalar operations across a Manager.\n\nArgs:\naxis: The axis index object to execute the function on.\nscalar: The scalar value to map.\nfunc: The function to use on the Manager with the scalar.\n\nReturns:\nA new QueryCompiler with updated data and new index.", "source": "juraj-google-style"}
{"code": "def clean_lines(string_list, remove_empty_lines=True):\n    for s in string_list:\n        clean_s = s\n        if ('\n            ind = s.index('\n            clean_s = s[:ind]\n        clean_s = clean_s.strip()\n        if ((not remove_empty_lines) or (clean_s != '')):\n            (yield clean_s)", "docstring": "Strips whitespace, carriage returns and empty lines from a list of strings.\n\nArgs:\nstring_list: List of strings\nremove_empty_lines: Set to True to skip lines which are empty after\nstripping.\n\nReturns:\nList of clean strings with no whitespaces.", "source": "codesearchnet"}
{"code": "def apply_schema(cls, schema: Optional[pg_typing.Schema]=None) -> None:\n    if schema is not None:\n        schema = cls._normalize_schema(schema)\n        setattr(cls, '__schema__', schema)\n        setattr(cls, '__sym_fields', pg_typing.Dict(schema))\n    cls._on_schema_update()", "docstring": "Applies a schema to a symbolic class.\n\nArgs:\nschema: The schema that will be applied to class. If `cls` was attached\nwith an existing schema. The old schema will be dropped. If None, the\ncls will update its signature and getters according to the (maybe\nupdated) old schema.", "source": "github-repos"}
{"code": "def destringize(self, string):\n        \n\n        \n        \n        \n        m = read_tuple_destr_pattern.match(string)\n        if not m:\n            smbl.messages.error(\n                \"'{}' is not a valid read name with respect to the RNF specification\".format(string),\n                program=\"RNFtools\", subprogram=\"RNF format\", exception=ValueError\n            )\n        groups = m.groups()\n        \n        self.prefix = groups[0]\n        read_tuple_id = groups[1]\n        self.read_tuple_id = int(read_tuple_id, 16)\n        self.segments = []\n        segments_str = groups[2:-1]\n        for b_str in segments_str:\n            if b_str is not None:\n                if b_str[0] == \",\":\n                    b_str = b_str[1:]\n                b = rnftools.rnfformat.Segment()\n                b.destringize(b_str)\n                self.segments.append(b)\n        self.suffix = groups[-1]", "docstring": "Get RNF values for this read from its textual representation and save them\ninto this object.\n\nArgs:\nstring(str): Textual representation of a read.\n\nRaises:\nValueError", "source": "juraj-google-style"}
{"code": "def murmur2(data):\n    \n    \n    \n    if six.PY2:\n        data = bytearray(bytes(data))\n\n    length = len(data)\n    seed = 0x9747b28c\n    \n    \n    m = 0x5bd1e995\n    r = 24\n\n    \n    h = seed ^ length\n    length4 = length \n\n    for i in range(length4):\n        i4 = i * 4\n        k = ((data[i4 + 0] & 0xff) +\n            ((data[i4 + 1] & 0xff) << 8) +\n            ((data[i4 + 2] & 0xff) << 16) +\n            ((data[i4 + 3] & 0xff) << 24))\n        k &= 0xffffffff\n        k *= m\n        k &= 0xffffffff\n        k ^= (k % 0x100000000) >> r \n        k &= 0xffffffff\n        k *= m\n        k &= 0xffffffff\n\n        h *= m\n        h &= 0xffffffff\n        h ^= k\n        h &= 0xffffffff\n\n    \n    extra_bytes = length % 4\n    if extra_bytes >= 3:\n        h ^= (data[(length & ~3) + 2] & 0xff) << 16\n        h &= 0xffffffff\n    if extra_bytes >= 2:\n        h ^= (data[(length & ~3) + 1] & 0xff) << 8\n        h &= 0xffffffff\n    if extra_bytes >= 1:\n        h ^= (data[length & ~3] & 0xff)\n        h &= 0xffffffff\n        h *= m\n        h &= 0xffffffff\n\n    h ^= (h % 0x100000000) >> 13 \n    h &= 0xffffffff\n    h *= m\n    h &= 0xffffffff\n    h ^= (h % 0x100000000) >> 15 \n    h &= 0xffffffff\n\n    return h", "docstring": "Pure-python Murmur2 implementation.\n\nBased on java client, see org.apache.kafka.common.utils.Utils.murmur2\n\nArgs:\ndata (bytes): opaque bytes\n\nReturns: MurmurHash2 of data", "source": "juraj-google-style"}
{"code": "def _store_request_line(self, req_line):\n    if (not isinstance(req_line, str)):\n        try:\n            req_line = self.raw_request_line = req_line.decode()\n        except UnicodeDecodeError:\n            raise HTTPErrorBadRequest\n    try:\n        (self.method_str, self.original_url, self.version) = req_line.split()\n    except ValueError:\n        raise HTTPErrorBadRequest()\n    if (self.version not in ('HTTP/1.1', 'HTTP/1.0')):\n        raise HTTPErrorVersionNotSupported(self.version)\n    try:\n        self.method = HTTPMethod[self.method_str]\n    except KeyError:\n        err = \"Unknown HTTP Method '{}'\".format(self.method_str)\n        raise HTTPErrorNotImplemented(err)\n    self._process_headers = {HTTPMethod.GET: self.process_get_headers, HTTPMethod.POST: self.process_post_headers}.get(self.method, (lambda data: True))\n    (_, num_str) = self.version.split('/', 1)\n    self.HTTP_VERSION = tuple(num_str.split('.'))\n    self.version_number = float(num_str)\n    self.parsed_url = urlparse(self.original_url)\n    self.path = unquote(self.parsed_url.path)\n    self.query = parse_qs(self.parsed_url.query)\n    return (self.method, self.parsed_url, self.version)", "docstring": "Splits the request line given into three components.\nEnsures that the version and method are valid for this server,\nand uses the urllib.parse function to parse the request URI.\n\nNote:\nThis method has the additional side effect of updating all\nrequest line related attributes of the parser.\n\nReturns:\ntuple: Tuple containing the parsed (method, parsed_url,\nversion)\n\nRaises:\nHTTPErrorBadRequest: If request line is invalid\nHTTPErrorNotImplemented: If HTTP method is not recognized\nHTTPErrorVersionNotSupported: If HTTP version is not\nrecognized.", "source": "codesearchnet"}
{"code": "def _on_change(self, field_updates: Dict[utils.KeyPath, base.FieldUpdate]):\n    del field_updates\n    return self._on_bound()", "docstring": "Event that is triggered when field values in the subtree are updated.\n\nThis event will be called\n* On per-field basis when object is modified via attribute.\n* In batch when multiple fields are modified via `rebind` method.\n\nWhen a field in an object tree is updated, all ancestors' `_on_change` event\nwill be triggered in order, from the nearest one to furthest one.\n\nArgs:\nfield_updates: Updates made to the subtree. Key path is relative to\ncurrent object.\n\nReturns:\nit will call `_on_bound` and return the return value of `_on_bound`.", "source": "github-repos"}
{"code": "def push(self, stream, reading):\n    reading = copy.copy(reading)\n    reading.stream = stream.encode()\n    if stream.buffered:\n        output_buffer = stream.output\n        if (self.id_assigner is not None):\n            reading.reading_id = self.id_assigner(stream, reading)\n        try:\n            self._engine.push(reading)\n        except StorageFullError:\n            if ((stream.output and (not self._rollover_streaming)) or ((not stream.output) and (not self._rollover_storage))):\n                raise\n            self._erase_buffer(stream.output)\n            self._engine.push(reading)\n        for walker in self._queue_walkers:\n            if (walker.selector.output == output_buffer):\n                walker.notify_added(stream)\n    for selector in self._monitors:\n        if ((selector is None) or selector.matches(stream)):\n            for callback in self._monitors[selector]:\n                callback(stream, reading)\n    for walker in self._virtual_walkers:\n        if walker.matches(stream):\n            walker.push(stream, reading)\n    self._last_values[stream] = reading", "docstring": "Push a reading into a stream, updating any associated stream walkers.\n\nArgs:\nstream (DataStream): the stream to push the reading into\nreading (IOTileReading): the reading to push", "source": "codesearchnet"}
{"code": "def get(object_ids):\n    worker = global_worker\n    worker.check_connected()\n    with profiling.profile('ray.get'):\n        if (worker.mode == LOCAL_MODE):\n            return object_ids\n        global last_task_error_raise_time\n        if isinstance(object_ids, list):\n            values = worker.get_object(object_ids)\n            for (i, value) in enumerate(values):\n                if isinstance(value, RayError):\n                    last_task_error_raise_time = time.time()\n                    raise value\n            return values\n        else:\n            value = worker.get_object([object_ids])[0]\n            if isinstance(value, RayError):\n                last_task_error_raise_time = time.time()\n                raise value\n            return value", "docstring": "Get a remote object or a list of remote objects from the object store.\n\nThis method blocks until the object corresponding to the object ID is\navailable in the local object store. If this object is not in the local\nobject store, it will be shipped from an object store that has it (once the\nobject has been created). If object_ids is a list, then the objects\ncorresponding to each object in the list will be returned.\n\nArgs:\nobject_ids: Object ID of the object to get or a list of object IDs to\nget.\n\nReturns:\nA Python object or a list of Python objects.\n\nRaises:\nException: An exception is raised if the task that created the object\nor that created one of the objects raised an exception.", "source": "codesearchnet"}
{"code": "def put(self, destination):\n        \n        if not self._fetched:\n            self._fetch()\n        DirectoryArchive.put(self, destination)", "docstring": "Copy the referenced directory to this path\n\nArgs:\ndestination (str): path to put this directory (which must NOT already exist)", "source": "juraj-google-style"}
{"code": "def create(cls, session, web_hook):\n    cls('/hooks.json', data=web_hook.to_api(), request_type=RequestPaginator.POST, session=session)\n    return True", "docstring": "Create a web hook.\n\nNote that creating a new web hook will overwrite the web hook that is\nalready configured for this company. There is also no way to\nprogrammatically determine if a web hook already exists for the\ncompany. This is a limitation of the HelpScout API and cannot be\ncircumvented.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nweb_hook (helpscout.models.WebHook): The web hook to be created.\n\nReturns:\nbool: ``True`` if the creation was a success. Errors otherwise.", "source": "codesearchnet"}
{"code": "def apply(self, transform, pvalueish=None, label=None):\n    if isinstance(transform, ptransform._NamedPTransform):\n        return self.apply(transform.transform, pvalueish, label or transform.label)\n    if not isinstance(transform, ptransform.PTransform):\n        raise TypeError('Expected a PTransform object, got %s' % transform)\n    if label:\n        old_label, transform.label = (transform.label, label)\n        try:\n            return self.apply(transform, pvalueish)\n        finally:\n            transform.label = old_label\n    if self._current_transform() is self._root_transform():\n        alter_label_if_ipython(transform, pvalueish)\n    full_label = '/'.join([self._current_transform().full_label, transform.label]).lstrip('/')\n    if full_label in self.applied_labels:\n        auto_unique_labels = self._options.view_as(StandardOptions).auto_unique_labels\n        if auto_unique_labels:\n            logging.warning('Using --auto_unique_labels could cause data loss when updating a pipeline or reloading the job state. This is not recommended for streaming jobs.')\n            unique_label = self._generate_unique_label(transform)\n            return self.apply(transform, pvalueish, unique_label)\n        else:\n            raise RuntimeError('A transform with label \"%s\" already exists in the pipeline. To apply a transform with a specified label, write pvalue | \"label\" >> transform or use the option \"auto_unique_labels\" to automatically generate unique transform labels. Note \"auto_unique_labels\" could cause data loss when updating a pipeline or reloading the job state. This is not recommended for streaming jobs.' % full_label)\n    self.applied_labels.add(full_label)\n    if pvalueish is None:\n        full_label = self._current_transform().full_label\n        raise TypeCheckError(f'Transform \"{full_label}\" was applied to the output of an object of type None.')\n    pvalueish, inputs = transform._extract_input_pvalues(pvalueish)\n    try:\n        if not isinstance(inputs, dict):\n            inputs = {str(ix): input for ix, input in enumerate(inputs)}\n    except TypeError:\n        raise NotImplementedError('Unable to extract PValue inputs from %s; either %s does not accept inputs of this format, or it does not properly override _extract_input_pvalues' % (pvalueish, transform))\n    for t, leaf_input in inputs.items():\n        if not isinstance(leaf_input, pvalue.PValue) or not isinstance(t, str):\n            raise NotImplementedError('%s does not properly override _extract_input_pvalues, returned %s from %s' % (transform, inputs, pvalueish))\n    current = AppliedPTransform(self._current_transform(), transform, full_label, inputs, None, annotations=self._current_annotations())\n    self._current_transform().add_part(current)\n    try:\n        self.transforms_stack.append(current)\n        type_options = self._options.view_as(TypeOptions)\n        if type_options.pipeline_type_check:\n            transform.type_check_inputs(pvalueish)\n        if isinstance(pvalueish, pvalue.PBegin) and isinstance(transform, ParDo):\n            full_label = self._current_transform().full_label\n            raise TypeCheckError(f\"Transform '{full_label}' expects a PCollection as input. Got a PBegin/Pipeline instead.\")\n        self._assert_not_applying_PDone(pvalueish, transform)\n        pvalueish_result = self.runner.apply(transform, pvalueish, self._options)\n        if type_options is not None and type_options.pipeline_type_check:\n            transform.type_check_outputs(pvalueish_result)\n        for tag, result in ptransform.get_named_nested_pvalues(pvalueish_result):\n            assert isinstance(result, (pvalue.PValue, pvalue.DoOutputsTuple))\n            if result.producer is None:\n                result.producer = current\n            self._infer_result_type(transform, tuple(inputs.values()), result)\n            assert isinstance(result.producer.inputs, tuple)\n            if isinstance(result, pvalue.DoOutputsTuple):\n                current.add_output(result, result._main_tag)\n                continue\n            base = tag\n            counter = 0\n            while tag in current.outputs:\n                counter += 1\n                tag = '%s_%d' % (base, counter)\n            current.add_output(result, tag)\n        if type_options is not None and type_options.type_check_strictness == 'ALL_REQUIRED' and (transform.get_type_hints().output_types is None):\n            ptransform_name = '%s(%s)' % (transform.__class__.__name__, full_label)\n            raise TypeCheckError('Pipeline type checking is enabled, however no output type-hint was found for the PTransform %s' % ptransform_name)\n    finally:\n        self.transforms_stack.pop()\n    return pvalueish_result", "docstring": "Applies a custom transform using the pvalueish specified.\n\nArgs:\ntransform (~apache_beam.transforms.ptransform.PTransform): the\n:class:`~apache_beam.transforms.ptransform.PTransform` to apply.\npvalueish (~apache_beam.pvalue.PCollection): the input for the\n:class:`~apache_beam.transforms.ptransform.PTransform` (typically a\n:class:`~apache_beam.pvalue.PCollection`).\nlabel (str): label of the\n:class:`~apache_beam.transforms.ptransform.PTransform`.\n\nRaises:\nTypeError: if the transform object extracted from the\nargument list is not a\n:class:`~apache_beam.transforms.ptransform.PTransform`.\nRuntimeError: if the transform object was already applied to\nthis pipeline and needs to be cloned in order to apply again.", "source": "github-repos"}
{"code": "def set_atten(self, idx, value):\n    if (not self.is_open):\n        raise attenuator.Error(('Connection to attenuator at %s is not open!' % self._telnet_client.host))\n    if ((idx + 1) > self.path_count):\n        raise IndexError('Attenuator index out of range!', self.path_count, idx)\n    if (value > self.max_atten):\n        raise ValueError('Attenuator value out of range!', self.max_atten, value)\n    self._telnet_client.cmd(('CHAN:%s:SETATT:%s' % ((idx + 1), value)))", "docstring": "Sets the attenuation value for a particular signal path.\n\nArgs:\nidx: Zero-based index int which is the identifier for a particular\nsignal path in an instrument. For instruments that only has one\nchannel, this is ignored by the device.\nvalue: A float that is the attenuation value to set.\n\nRaises:\nError: The underlying telnet connection to the instrument is not\nopen.\nIndexError: The index of the attenuator is greater than the maximum\nindex of the underlying instrument.\nValueError: The requested set value is greater than the maximum\nattenuation value.", "source": "codesearchnet"}
{"code": "def _get_parser(use_v2_converter):\n    parser = argparse.ArgumentParser(description='Command line tool to run TensorFlow Lite Converter.')\n    parser.add_argument('--output_file', type=str, help='Full filepath of the output file.', required=True)\n    if use_v2_converter:\n        _get_tf2_flags(parser)\n    else:\n        _get_tf1_flags(parser)\n    parser.add_argument('--experimental_new_converter', action=_ParseBooleanFlag, nargs='?', default=True, help='Experimental flag, subject to change. Enables MLIR-based conversion instead of TOCO conversion. (default True)')\n    parser.add_argument('--experimental_new_quantizer', action=_ParseBooleanFlag, nargs='?', help='Experimental flag, subject to change. Enables MLIR-based quantizer instead of flatbuffer conversion. (default True)')\n    return parser", "docstring": "Returns an ArgumentParser for tflite_convert.\n\nArgs:\nuse_v2_converter: Indicates which converter to return.\nReturn: ArgumentParser.", "source": "github-repos"}
{"code": "def from_rtm(cls, raw_event: MutableMapping) -> \"Event\":\n        \n        if raw_event[\"type\"].startswith(\"message\"):\n            return Message(raw_event)\n        else:\n            return Event(raw_event)", "docstring": "Create an event with data coming from the RTM API.\n\nIf the event type is a message a :class:`slack.events.Message` is returned.\n\nArgs:\nraw_event: JSON decoded data from the RTM API\n\nReturns:\n:class:`slack.events.Event` or :class:`slack.events.Message`", "source": "juraj-google-style"}
{"code": "def is_field_remote(model, field_name):\n    \n    if not hasattr(model, '_meta'):\n        \n        return False\n\n    model_field = get_model_field(model, field_name)\n    return isinstance(model_field, (ManyToManyField, RelatedObject))", "docstring": "Check whether a given model field is a remote field.\n\nA remote field is the inverse of a one-to-many or a\nmany-to-many relationship.\n\nArguments:\nmodel: a Django model\nfield_name: the name of a field\n\nReturns:\nTrue if `field_name` is a remote field, False otherwise.", "source": "juraj-google-style"}
{"code": "def flatten(self):\n    if (self._flat is None):\n        flat = {}\n        for arg in self.args:\n            if isinstance(arg, Option):\n                flat[arg.name] = arg\n            elif isinstance(arg, ListOption):\n                flat[arg.name] = arg\n            elif isinstance(arg, DictOption):\n                flat[arg.name] = arg\n                if arg.scheme:\n                    for (k, v) in arg.scheme.flatten().items():\n                        flat[((arg.name + '.') + k)] = v\n        self._flat = flat\n    return self._flat", "docstring": "Flatten the scheme into a dictionary where the keys are\ncompound 'dot' notation keys, and the values are the corresponding\noptions.\n\nReturns:\ndict: The flattened `Scheme`.", "source": "codesearchnet"}
{"code": "def precheck_dist_hash(context):\n  \n  \n  key = \"{}/{}/dist-hash\".format(context.service_name, context.env)\n  print_if_verbose(\"precheck_dist_hash with key: {}\".format(key))\n  try:\n    current_dist_hash = Version(context.aws_client(\"s3\").get_object(\n        Bucket=EFConfig.S3_VERSION_BUCKET,\n        Key=key\n    ))\n    print_if_verbose(\"dist-hash found: {}\".format(current_dist_hash.value))\n  except ClientError as error:\n    if error.response[\"Error\"][\"Code\"] == \"NoSuchKey\":\n      \n      \n      print_if_verbose(\"precheck passed without check because current dist-hash is None\")\n      return True\n    else:\n      fail(\"Exception while prechecking dist_hash for {} {}: {}\".format(context.service_name, context.env, error))\n\n  \n  \n  try:\n    response = urllib2.urlopen(current_dist_hash.location, None, 5)\n    if response.getcode() != 200:\n      raise IOError(\"Non-200 response \" + str(response.getcode()) + \" reading \" + current_dist_hash.location)\n    dist_hash_in_service = response.read().strip()\n  except urllib2.URLError as error:\n    raise IOError(\"URLError in http_get_dist_version: \" + repr(error))\n\n  \n  if dist_hash_in_service != current_dist_hash.value:\n    raise RuntimeError(\"{} dist-hash in service: {} but expected dist-hash: {}\"\n                       .format(key, dist_hash_in_service, current_dist_hash.value))\n\n  \n  return True", "docstring": "Is the dist in service the same as the dist marked current in the version records?\nThis tool won't update records unless the world state is coherent.\nArgs:\ncontext: a populated EFVersionContext object\nReturns:\nTrue if ok to proceed\nRaises:\nRuntimeError if not ok to proceed", "source": "juraj-google-style"}
{"code": "def assert_no_entries_with_modulus_zero(x, message=None, name='assert_no_entries_with_modulus_zero'):\n    with ops.name_scope(name, values=[x]):\n        x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name='x')\n        dtype = x.dtype.base_dtype\n        should_be_nonzero = math_ops.abs(x)\n        zero = tensor_conversion.convert_to_tensor_v2_with_dispatch(0, dtype=dtype.real_dtype)\n        return check_ops.assert_less(zero, should_be_nonzero, message=message)", "docstring": "Returns `Op` that asserts Tensor `x` has no entries with modulus zero.\n\nArgs:\nx:  Numeric `Tensor`, real, integer, or complex.\nmessage:  A string message to prepend to failure message.\nname:  A name to give this `Op`.\n\nReturns:\nAn `Op` that asserts `x` has no entries with modulus zero.", "source": "github-repos"}
{"code": "def isFrameRange(frange):\n        \n        \n        \n        frange = str(frange).translate(None, ''.join(PAD_MAP.keys()))\n        if not frange:\n            return True\n        for part in frange.split(','):\n            if not part:\n                continue\n            try:\n                FrameSet._parse_frange_part(part)\n            except ParseException:\n                return False\n        return True", "docstring": "Return True if the given string is a frame range. Any padding\ncharacters, such as '#' and '@' are ignored.\n\nArgs:\nfrange (str): a frame range to test\n\nReturns:\nbool:", "source": "juraj-google-style"}
{"code": "def description(self, value):\n    try:\n        value = np.dtype(value)\n    except TypeError as e:\n        return None\n    for (dtype, string) in self._all:\n        if (dtype == value):\n            return string\n    return None", "docstring": "Fetches the translated description for the given datatype.\n\nThe given value will be converted to a `numpy.dtype` object, matched\nagainst the supported datatypes and the description will be translated\ninto the preferred language. (Usually a settings dialog should be\navailable to change the language).\n\nIf the conversion fails or no match can be found, `None` will be returned.\n\nArgs:\nvalue (type|numpy.dtype): Any object or type.\n\nReturns:\nstr: The translated description of the datatype\nNone: If no match could be found or an error occured during convertion.", "source": "codesearchnet"}
{"code": "def get_single_value(value):\n    \n    if not all_elements_equal(value):\n        raise ValueError('Not all values are equal to each other.')\n\n    if is_scalar(value):\n        return value\n    return value.item(0)", "docstring": "Get a single value out of the given value.\n\nThis is meant to be used after a call to :func:`all_elements_equal` that returned True. With this\nfunction we return a single number from the input value.\n\nArgs:\nvalue (ndarray or number): a numpy array or a single number.\n\nReturns:\nnumber: a single number from the input\n\nRaises:\nValueError: if not all elements are equal", "source": "juraj-google-style"}
{"code": "def from_csv(cls, filename: str):\n    with open(filename, 'r', encoding='utf-8') as f:\n        reader = csv.reader(f, delimiter=unicode2str(','), quotechar=unicode2str('\"'), quoting=csv.QUOTE_MINIMAL)\n        entries = list()\n        header_read = False\n        elements = None\n        for row in reader:\n            if (not header_read):\n                elements = row[1:(len(row) - 1)]\n                header_read = True\n            else:\n                name = row[0]\n                energy = float(row[(- 1)])\n                comp = dict()\n                for ind in range(1, (len(row) - 1)):\n                    if (float(row[ind]) > 0):\n                        comp[Element(elements[(ind - 1)])] = float(row[ind])\n                entries.append(PDEntry(Composition(comp), energy, name))\n    return cls(entries)", "docstring": "Imports PDEntries from a csv.\n\nArgs:\nfilename: Filename to import from.\n\nReturns:\nList of Elements, List of PDEntries", "source": "codesearchnet"}
{"code": "def open(self):\n    log.info('WebSocket connection opened')\n    proto_version = self.get_argument('bokeh-protocol-version', default=None)\n    if (proto_version is None):\n        self.close()\n        raise ProtocolError('No bokeh-protocol-version specified')\n    session_id = self.get_argument('bokeh-session-id', default=None)\n    if (session_id is None):\n        self.close()\n        raise ProtocolError('No bokeh-session-id specified')\n    if (not check_session_id_signature(session_id, signed=self.application.sign_sessions, secret_key=self.application.secret_key)):\n        log.error('Session id had invalid signature: %r', session_id)\n        raise ProtocolError('Invalid session ID')\n\n    def on_fully_opened(future):\n        e = future.exception()\n        if (e is not None):\n            log.debug('Failed to fully open connection %r', e)\n    future = self._async_open(session_id, proto_version)\n    self.application.io_loop.add_future(future, on_fully_opened)", "docstring": "Initialize a connection to a client.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _parse_hextet(cls, hextet_str):\n        \n        \n        if not cls._HEX_DIGITS.issuperset(hextet_str):\n            raise ValueError(\"Only hex digits permitted in %r\" % hextet_str)\n        \n        \n        if len(hextet_str) > 4:\n            msg = \"At most 4 characters permitted in %r\"\n            raise ValueError(msg % hextet_str)\n        \n        return int(hextet_str, 16)", "docstring": "Convert an IPv6 hextet string into an integer.\n\nArgs:\nhextet_str: A string, the number to parse.\n\nReturns:\nThe hextet as an integer.\n\nRaises:\nValueError: if the input isn't strictly a hex number from\n[0..FFFF].", "source": "juraj-google-style"}
{"code": "def start_reloading_multiplexer(multiplexer, path_to_run, load_interval, reload_task):\n    if (load_interval < 0):\n        raise ValueError(('load_interval is negative: %d' % load_interval))\n\n    def _reload():\n        while True:\n            start = time.time()\n            logger.info('TensorBoard reload process beginning')\n            for (path, name) in six.iteritems(path_to_run):\n                multiplexer.AddRunsFromDirectory(path, name)\n            logger.info('TensorBoard reload process: Reload the whole Multiplexer')\n            multiplexer.Reload()\n            duration = (time.time() - start)\n            logger.info('TensorBoard done reloading. Load took %0.3f secs', duration)\n            if (load_interval == 0):\n                break\n            time.sleep(load_interval)\n    if (reload_task == 'process'):\n        logger.info('Launching reload in a child process')\n        import multiprocessing\n        process = multiprocessing.Process(target=_reload, name='Reloader')\n        process.daemon = True\n        process.start()\n    elif (reload_task in ('thread', 'auto')):\n        logger.info('Launching reload in a daemon thread')\n        thread = threading.Thread(target=_reload, name='Reloader')\n        thread.daemon = True\n        thread.start()\n    elif (reload_task == 'blocking'):\n        if (load_interval != 0):\n            raise ValueError('blocking reload only allowed with load_interval=0')\n        _reload()\n    else:\n        raise ValueError(('unrecognized reload_task: %s' % reload_task))", "docstring": "Starts automatically reloading the given multiplexer.\n\nIf `load_interval` is positive, the thread will reload the multiplexer\nby calling `ReloadMultiplexer` every `load_interval` seconds, starting\nimmediately. Otherwise, reloads the multiplexer once and never again.\n\nArgs:\nmultiplexer: The `EventMultiplexer` to add runs to and reload.\npath_to_run: A dict mapping from paths to run names, where `None` as the run\nname is interpreted as a run name equal to the path.\nload_interval: An integer greater than or equal to 0. If positive, how many\nseconds to wait after one load before starting the next load. Otherwise,\nreloads the multiplexer once and never again (no continuous reloading).\nreload_task: Indicates the type of background task to reload with.\n\nRaises:\nValueError: If `load_interval` is negative.", "source": "codesearchnet"}
{"code": "def pprint_value(self, value):\n    own_type = (type(value) if (self.type is None) else self.type)\n    formatter = (self.value_format if self.value_format else self.type_formatters.get(own_type))\n    if formatter:\n        if callable(formatter):\n            return formatter(value)\n        elif isinstance(formatter, basestring):\n            if isinstance(value, (dt.datetime, dt.date)):\n                return value.strftime(formatter)\n            elif isinstance(value, np.datetime64):\n                return util.dt64_to_dt(value).strftime(formatter)\n            elif re.findall('\\\\{(\\\\w+)\\\\}', formatter):\n                return formatter.format(value)\n            else:\n                return (formatter % value)\n    return unicode(bytes_to_unicode(value))", "docstring": "Applies the applicable formatter to the value.\n\nArgs:\nvalue: Dimension value to format\n\nReturns:\nFormatted dimension value", "source": "codesearchnet"}
{"code": "def _masked_crc32c(cls, value, crc32c_fn=_default_crc32c_fn):\n    crc = crc32c_fn(value)\n    return (crc >> 15 | crc << 17) + 2726488792 & 4294967295", "docstring": "Compute a masked crc32c checksum for a value.\n\nArgs:\nvalue: A bytes object for which we compute the crc.\ncrc32c_fn: A function that can compute a crc32c.\nThis is a performance hook that also helps with testing. Callers are\nnot expected to make use of it directly.\nReturns:\nMasked crc32c checksum.", "source": "github-repos"}
{"code": "def _as_document(self, partition):\n        \n        doc = super(self.__class__, self)._as_document(partition)\n\n        \n        \n        doc['keywords'] = doc['keywords'].replace('-', '_')\n        doc['doc'] = doc['doc'].replace('-', '_')\n        doc['title'] = doc['title'].replace('-', '_')\n\n        \n        doc['time_coverage'] = partition.time_coverage\n        return doc", "docstring": "Converts partition to document indexed by to FTS index.\n\nArgs:\npartition (orm.Partition): partition to convert.\n\nReturns:\ndict with structure matches to BasePartitionIndex._schema.", "source": "juraj-google-style"}
{"code": "def close(self):\n    raise NotImplementedError", "docstring": "Closes the current writer.\n\nPlease see documentation in ``iobase.Sink`` for an example.\n\nReturns:\nAn object representing the writes that were performed by the current\nwriter.", "source": "github-repos"}
{"code": "def _ParseUSNChangeJournal(self, parser_mediator, usn_change_journal):\n    if (not usn_change_journal):\n        return\n    usn_record_map = self._GetDataTypeMap('usn_record_v2')\n    usn_record_data = usn_change_journal.read_usn_record()\n    while usn_record_data:\n        current_offset = usn_change_journal.get_offset()\n        try:\n            usn_record = self._ReadStructureFromByteStream(usn_record_data, current_offset, usn_record_map)\n        except (ValueError, errors.ParseError) as exception:\n            raise errors.ParseError('Unable to parse USN record at offset: 0x{0:08x} with error: {1!s}'.format(current_offset, exception))\n        name_offset = (usn_record.name_offset - 60)\n        utf16_stream = usn_record.name[name_offset:usn_record.name_size]\n        try:\n            name_string = utf16_stream.decode('utf-16-le')\n        except (UnicodeDecodeError, UnicodeEncodeError) as exception:\n            name_string = utf16_stream.decode('utf-16-le', errors='replace')\n            parser_mediator.ProduceExtractionWarning('unable to decode USN record name string with error: {0:s}. Characters that cannot be decoded will be replaced with \"?\" or \"\\\\ufffd\".'.format(exception))\n        event_data = NTFSUSNChangeEventData()\n        event_data.file_attribute_flags = usn_record.file_attribute_flags\n        event_data.file_reference = usn_record.file_reference\n        event_data.filename = name_string\n        event_data.offset = current_offset\n        event_data.parent_file_reference = usn_record.parent_file_reference\n        event_data.update_reason_flags = usn_record.update_reason_flags\n        event_data.update_sequence_number = usn_record.update_sequence_number\n        event_data.update_source_flags = usn_record.update_source_flags\n        if (not usn_record.update_date_time):\n            date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n        else:\n            date_time = dfdatetime_filetime.Filetime(timestamp=usn_record.update_date_time)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ENTRY_MODIFICATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n        usn_record_data = usn_change_journal.read_usn_record()", "docstring": "Parses an USN change journal.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nusn_change_journal (pyfsntsfs.usn_change_journal): USN change journal.\n\nRaises:\nParseError: if an USN change journal record cannot be parsed.", "source": "codesearchnet"}
{"code": "def _empty_resource_attributes(self):\n    self.status_code = 404\n    self.headers = {}\n    self.exists = False\n    self.rdf = self._build_rdf()\n    if (type(self) == NonRDFSource):\n        self.binary.empty()", "docstring": "small method to empty values if resource is removed or absent\n\nArgs:\nNone\n\nReturn:\nNone: empties selected resource attributes", "source": "codesearchnet"}
{"code": "def traverse_levelorder(self, leaves=True, internal=True):\n        \n        q = deque(); q.append(self)\n        while len(q) != 0:\n            n = q.popleft()\n            if (leaves and n.is_leaf()) or (internal and not n.is_leaf()):\n                yield n\n            q.extend(n.children)", "docstring": "Perform a levelorder traversal starting at this ``Node`` object\n\nArgs:\n``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``", "source": "juraj-google-style"}
{"code": "def __init__(self, temp_dir, use_gpu):\n    \n    self._temp_dir = temp_dir\n    self._use_gpu = use_gpu\n    self._tmp_extracted_dir = os.path.join(self._temp_dir, 'tmp_extracted')\n    self._extracted_submission_dir = os.path.join(self._temp_dir, 'extracted')\n    self._sample_input_dir = os.path.join(self._temp_dir, 'input')\n    self._sample_output_dir = os.path.join(self._temp_dir, 'output')", "docstring": "Initializes instance of SubmissionValidator.\n\nArgs:\ntemp_dir: temporary working directory\nuse_gpu: whether to use GPU", "source": "juraj-google-style"}
{"code": "def _validate_inputs(self, input_tensors, quantized_input_stats):\n    if not self._is_unknown_shapes_allowed() and self._has_valid_tensors():\n        for tensor in input_tensors:\n            shape = tensor.shape\n            if not shape:\n                raise ValueError(\"Provide an input shape for input array '{0}'.\".format(_get_tensor_name(tensor)))\n            shape_list = shape.as_list()\n            if None in shape_list[1:]:\n                raise ValueError(\"None is only supported in the 1st dimension. Tensor '{0}' has invalid shape '{1}'.\".format(_get_tensor_name(tensor), shape_list))\n            elif shape_list and shape_list[0] is None:\n                self._set_batch_size(batch_size=1)\n    if quantized_input_stats:\n        self._quantized_stats = []\n        invalid_stats = []\n        for name in self.get_input_arrays():\n            if name in quantized_input_stats:\n                self._quantized_stats.append(quantized_input_stats[name])\n            else:\n                invalid_stats.append(name)\n        if invalid_stats:\n            raise ValueError(\"Quantization input stats are not available for input tensors '{0}'.\".format(','.join(invalid_stats)))\n    else:\n        self._quantized_stats = None", "docstring": "Validate input parameters.\n\nArgs:\ninput_tensors: List of input tensors.\nquantized_input_stats: Map of input tensor names to a tuple of floats\nrepresenting the mean and standard deviation of the training data.\n\nRaises:\nValueError:\nInput shape is not specified.\nQuantization input stats is required but not provided.", "source": "github-repos"}
{"code": "def inspect_secret(self, id):\n        \n        url = self._url('/secrets/{0}', id)\n        return self._result(self._get(url), True)", "docstring": "Retrieve secret metadata\n\nArgs:\nid (string): Full ID of the secret to remove\n\nReturns (dict): A dictionary of metadata\n\nRaises:\n:py:class:`docker.errors.NotFound`\nif no secret with that ID exists", "source": "juraj-google-style"}
{"code": "def _Recv(self, timeout):\n    buf = ''\n    wait_for_line = (timeout is TIMEOUT_FOREVER)\n    deadline = (time.time() + (timeout if (not wait_for_line) else 0))\n\n    def TimeLeft():\n        return max((1000 * (deadline - time.time())), 0)\n    continue_reading = True\n    while continue_reading:\n        poll_timeout = (None if wait_for_line else TimeLeft())\n        fd_list = [event[0] for event in self._poller.poll(poll_timeout) if (event[1] & (select.POLLIN | select.POLLPRI))]\n        if ((not wait_for_line) and (TimeLeft() == 0)):\n            continue_reading = False\n        if (self._outfile_r.fileno() in fd_list):\n            buf += self._outfile_r.readline()\n            if buf.endswith('\\n'):\n                return buf\n        if (self._errfile_r.fileno() in fd_list):\n            exc = self._errfile_r.readline()\n            if exc:\n                exc_text = '\\n-----------------------------------\\n'\n                exc_text += 'Error occurred within GdbService:\\n'\n                try:\n                    exc_text += json.loads(exc)\n                except ValueError:\n                    deadline = (time.time() + 0.5)\n                    while (self.is_running and (TimeLeft() > 0)):\n                        exc += self._errfile_r.read()\n                    try:\n                        exc_text += json.loads(exc)\n                    except ValueError:\n                        exc_text = exc\n                raise ProxyError(exc_text)\n    raise TimeoutError()", "docstring": "Receive output from gdb.\n\nThis reads gdb's stdout and stderr streams, returns a single line of gdb's\nstdout or rethrows any exceptions thrown from within gdb as well as it can.\n\nArgs:\ntimeout: floating point number of seconds after which to abort.\nA value of None or TIMEOUT_FOREVER means \"there is no timeout\", i.e.\nthis might block forever.\nRaises:\nProxyError: All exceptions received from the gdb service are generically\nreraised as this.\nTimeoutError: Raised if no answer is received from gdb in after the\nspecified time.\nReturns:\nThe current contents of gdb's stdout buffer, read until the next newline,\nor `None`, should the read fail or timeout.", "source": "codesearchnet"}
{"code": "def get_feature_from_key(self, feature_key):\n    \n    feature = self.feature_key_map.get(feature_key)\n\n    if feature:\n      return feature\n\n    self.logger.error('Feature \"%s\" is not in datafile.' % feature_key)\n    return None", "docstring": "Get feature for the provided feature key.\n\nArgs:\nfeature_key: Feature key for which feature is to be fetched.\n\nReturns:\nFeature corresponding to the provided feature key.", "source": "juraj-google-style"}
{"code": "def delete_nic(access_token, subscription_id, resource_group, nic_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Network/networkInterfaces/', nic_name,\n                        '?api-version=', NETWORK_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete a network interface.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nnic_name (str): Name of the NIC.\n\nReturns:\nHTTP response.", "source": "juraj-google-style"}
{"code": "def disconnect(signal, receiver):\n    \n    inputkey = __make_id(receiver)\n\n    with __lock:\n        __purge()\n        receivers = __receivers.get(signal)\n\n        for idx in six.moves.range(len(receivers)):\n            connected = receivers[idx]()\n\n            if inputkey != __make_id(connected):\n                continue\n\n            del receivers[idx]\n            return True  \n\n    return False", "docstring": "Disconnect the receiver `func` from the signal, identified by\n`signal_id`.\n\nArgs:\nsignal: The signal identifier.\nreceiver: The callable receiver to disconnect.\n\nReturns:\nTrue if the receiver was successfully disconnected. False otherwise.", "source": "juraj-google-style"}
{"code": "def run_pipeline(pipeline, context, pipeline_context_input=None, parse_input=True):\n    logger.debug('starting')\n    try:\n        if parse_input:\n            logger.debug('executing context_parser')\n            prepare_context(pipeline=pipeline, context_in_string=pipeline_context_input, context=context)\n        else:\n            logger.debug('skipping context_parser')\n        pypyr.stepsrunner.run_step_group(pipeline_definition=pipeline, step_group_name='steps', context=context)\n        logger.debug('pipeline steps complete. Running on_success steps now.')\n        pypyr.stepsrunner.run_step_group(pipeline_definition=pipeline, step_group_name='on_success', context=context)\n    except Exception:\n        logger.error('Something went wrong. Will now try to run on_failure.')\n        pypyr.stepsrunner.run_failure_step_group(pipeline=pipeline, context=context)\n        logger.debug('Raising original exception to caller.')\n        raise\n    logger.debug('done')", "docstring": "Run the specified pypyr pipeline.\n\nThis function runs the actual pipeline. If you are running another\npipeline from within a pipeline, call this, not main(). Do call main()\ninstead for your 1st pipeline if there are pipelines calling pipelines.\n\nPipeline and context should be already loaded.\n\nArgs:\npipeline (dict): Dictionary representing the pipeline.\ncontext (pypyr.context.Context): Reusable context object.\npipeline_context_input (str): Initialize the pypyr context with this\nstring.\nparse_input (bool): run context_parser in pipeline.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def get_status(self, batch_id):\n        \n        with self._lock:\n            if self._batch_committed(batch_id):\n                return ClientBatchStatus.COMMITTED\n            if batch_id in self._invalid:\n                return ClientBatchStatus.INVALID\n            if batch_id in self._pending:\n                return ClientBatchStatus.PENDING\n            return ClientBatchStatus.UNKNOWN", "docstring": "Returns the status enum for a batch.\n\nArgs:\nbatch_id (str): The id of the batch to get the status for\n\nReturns:\nint: The status enum", "source": "juraj-google-style"}
{"code": "def _fuse_awq_attention_layers(model, module, modules_to_fuse, current_module_name, target_cls):\n    from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV\n    module_has_been_fused = False\n    if len(modules_to_fuse['attention']) == 0:\n        return module_has_been_fused\n    if hasattr(module, modules_to_fuse['attention'][0]):\n        q_proj = getattr(module, modules_to_fuse['attention'][0])\n        if isinstance(q_proj, WQLinear_GEMV):\n            linear_target_cls = WQLinear_GEMV\n            cat_dim = 0\n        elif isinstance(q_proj, WQLinear_GEMM):\n            linear_target_cls = WQLinear_GEMM\n            cat_dim = 1\n        elif is_ipex_available() and version.parse(importlib.metadata.version('autoawq')) > version.parse('0.2.6'):\n            from awq.modules.linear import WQLinear_IPEX\n            if isinstance(q_proj, WQLinear_IPEX):\n                linear_target_cls = WQLinear_IPEX\n                cat_dim = 1\n        else:\n            raise ValueError('Unsupported q_proj type: {type(q_proj)}')\n        previous_device = q_proj.qweight.device\n        k_proj = getattr(module, modules_to_fuse['attention'][1])\n        v_proj = getattr(module, modules_to_fuse['attention'][2])\n        o_proj = getattr(module, modules_to_fuse['attention'][3])\n        bias = torch.cat([q_proj.bias, k_proj.bias, v_proj.bias], dim=0) if q_proj.bias is not None else None\n        qkv_layer = linear_target_cls(q_proj.w_bit, q_proj.group_size, q_proj.in_features, q_proj.out_features + k_proj.out_features + v_proj.out_features, q_proj.bias is not None, next(iter(module.state_dict().values())).device)\n        qkv_layer.qweight = torch.cat([q_proj.qweight, k_proj.qweight, v_proj.qweight], dim=cat_dim)\n        qkv_layer.qzeros = torch.cat([q_proj.qzeros, k_proj.qzeros, v_proj.qzeros], dim=cat_dim)\n        qkv_layer.scales = torch.cat([q_proj.scales, k_proj.scales, v_proj.scales], dim=cat_dim)\n        if isinstance(qkv_layer, WQLinear_GEMV):\n            qkv_layer.split_k_iters = q_proj.split_k_iters\n        qkv_layer.bias = bias\n        fused_attention_layer = target_cls(modules_to_fuse['hidden_size'], modules_to_fuse['num_attention_heads'], modules_to_fuse['num_key_value_heads'], qkv_layer, o_proj, previous_device, modules_to_fuse['max_seq_len'], use_alibi=modules_to_fuse['use_alibi'], rope_theta=modules_to_fuse.get('rope_theta', 10000.0))\n        fused_attention_layer.is_hf_transformers = True\n        parent_name, child_name = current_module_name.rsplit('.', 1)\n        parent = model.get_submodule(parent_name)\n        setattr(parent, child_name, fused_attention_layer.to(previous_device))\n        del q_proj, k_proj, v_proj, o_proj\n        module_has_been_fused = True\n    return module_has_been_fused", "docstring": "Fuse the Attention layers into a target class using autoawq\n\nArgs:\nmodel (`~PreTrainedModel`):\nThe input pretrained model\nmodule (`nn.Module`):\nThe pytorch parent module that has layernorm modules to fuse\nmodules_to_fuse (`List[str]`):\nThe module fusing mapping. The dictionary has to contain a field `attention` with attention module names\nin the correct order: q, k, v, o layer\ncurrent_module_name (`str`):\nThe current submodule name\ntarget_cls (`~autoawq.QuantAttentionFused`):\nThe `QuantAttentionFused` class as it only supports that class\nfor now.", "source": "github-repos"}
{"code": "def get_cookie_header(queue_item):\n        \n\n        header = []\n        path = URLHelper.get_path(queue_item.request.url)\n\n        for cookie in queue_item.request.cookies:\n            root_path = cookie.path == \"\" or cookie.path == \"/\"\n            if path.startswith(cookie.path) or root_path:\n                header.append(cookie.name + \"=\" + cookie.value)\n\n        return \"&\".join(header)", "docstring": "Convert a requests cookie jar to a HTTP request cookie header value.\n\nArgs:\nqueue_item (:class:`nyawc.QueueItem`): The parent queue item of the new request.\n\nReturns:\nstr: The HTTP cookie header value.", "source": "juraj-google-style"}
{"code": "def and_evaluator(conditions, leaf_evaluator):\n    saw_null_result = False\n    for condition in conditions:\n        result = evaluate(condition, leaf_evaluator)\n        if (result is False):\n            return False\n        if (result is None):\n            saw_null_result = True\n    return (None if saw_null_result else True)", "docstring": "Evaluates a list of conditions as if the evaluator had been applied\nto each entry and the results AND-ed together.\n\nArgs:\nconditions: List of conditions ex: [operand_1, operand_2].\nleaf_evaluator: Function which will be called to evaluate leaf condition values.\n\nReturns:\nBoolean:\n- True if all operands evaluate to True.\n- False if a single operand evaluates to False.\nNone: if conditions couldn't be evaluated.", "source": "codesearchnet"}
{"code": "class InstructBlipVideoEncoder(nn.Module):\n\n    def __init__(self, config: InstructBlipVideoConfig):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([InstructBlipVideoEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for idx, encoder_layer in enumerate(self.layers):\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`InstructBlipVideoEncoderLayer`].\n\nArgs:\nconfig (`InstructBlipVideoConfig`):\nThe corresponding vision configuration for the `InstructBlipVideoEncoder`.", "source": "github-repos"}
{"code": "def has_enough_gas_reserve(\n        raiden,\n        channels_to_open: int = 0,\n) -> Tuple[bool, int]:\n    \n    secure_reserve_estimate = get_reserve_estimate(raiden, channels_to_open)\n    current_account_balance = raiden.chain.client.balance(raiden.chain.client.address)\n\n    return secure_reserve_estimate <= current_account_balance, secure_reserve_estimate", "docstring": "Checks if the account has enough balance to handle the lifecycles of all\nopen channels as well as the to be created channels.\n\nNote: This is just an estimation.\n\nArgs:\nraiden: A raiden service instance\nchannels_to_open: The number of new channels that should be opened\n\nReturns:\nTuple of a boolean denoting if the account has enough balance for\nthe remaining lifecycle events and the estimate for the remaining\nlifecycle cost", "source": "juraj-google-style"}
{"code": "def Get(self, request, global_params=None):\n    config = self.GetMethodConfig('Get')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Returns information about a previously requested build. The `Build` that is returned includes its status (such as `SUCCESS`, `FAILURE`, or `WORKING`), and timing information.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsBuildsGetRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Build) The response message.", "source": "github-repos"}
{"code": "def setPresence(self, status=SkypeUtils.Status.Online):\n        \n        self.conn(\"PUT\", \"{0}/users/ME/presenceDocs/messagingService\".format(self.conn.msgsHost),\n                  auth=SkypeConnection.Auth.RegToken, json={\"status\": status.label})", "docstring": "Set the current user's presence on the network.  Supports :attr:`.Status.Online`, :attr:`.Status.Busy` or\n:attr:`.Status.Hidden` (shown as :attr:`.Status.Offline` to others).\n\nArgs:\nstatus (.Status): new availability to display to contacts", "source": "juraj-google-style"}
{"code": "def precompute_edge_matrices(adjacency, hparams):\n  \n  batch_size, num_nodes, _, edge_dim = common_layers.shape_list(adjacency)\n\n  \n  with tf.variable_scope(\"edge_network\"):\n    x = tf.reshape(\n        adjacency, [batch_size * num_nodes * num_nodes, edge_dim],\n        name=\"adj_reshape_in\")\n\n    for ip_layer in range(hparams.edge_network_layers):\n      name = \"edge_network_layer_%d\"%ip_layer\n      x = tf.layers.dense(common_layers.layer_preprocess(x, hparams),\n                          hparams.edge_network_hidden_size,\n                          activation=tf.nn.relu,\n                          name=name)\n    x = tf.layers.dense(common_layers.layer_preprocess(x, hparams),\n                        hparams.hidden_size**2,\n                        activation=None,\n                        name=\"edge_network_output\")\n\n  \n  edge_matrices_flat = tf.reshape(x, [batch_size, num_nodes,\n                                      num_nodes, hparams.hidden_size,\n                                      hparams.hidden_size])\n\n  \n  edge_matrices = tf.reshape(\n      tf.transpose(edge_matrices_flat, [0, 1, 3, 2, 4]), [\n          -1, num_nodes * hparams.hidden_size,\n          num_nodes * hparams.hidden_size\n      ],\n      name=\"edge_matrices\")\n\n  return edge_matrices", "docstring": "Precompute the a_in and a_out tensors.\n\n(we don't want to add to the graph everytime _fprop is called)\nArgs:\nadjacency: placeholder of real valued vectors of shape [B, L, L, E]\nhparams: HParams object\nReturns:\nedge_matrices: [batch, L * D, L * D] the dense matrix for message passing\nviewed as a block matrix (L,L) blocks of size (D,D). Each plot is a function\nof the edge vector of the adjacency matrix at that spot.", "source": "juraj-google-style"}
{"code": "def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1), padding='valid', data_format=None):\n    if data_format is None:\n        data_format = image_data_format()\n    if data_format not in {'channels_first', 'channels_last'}:\n        raise ValueError('Unknown data_format: ' + str(data_format))\n    if isinstance(output_shape, (tuple, list)):\n        output_shape = array_ops_stack.stack(output_shape)\n    x, tf_data_format = _preprocess_conv3d_input(x, data_format)\n    if data_format == 'channels_first' and tf_data_format == 'NDHWC':\n        output_shape = (output_shape[0], output_shape[2], output_shape[3], output_shape[4], output_shape[1])\n    if output_shape[0] is None:\n        output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])\n        output_shape = array_ops_stack.stack(list(output_shape))\n    padding = _preprocess_padding(padding)\n    if tf_data_format == 'NDHWC':\n        strides = (1,) + strides + (1,)\n    else:\n        strides = (1, 1) + strides\n    x = nn.conv3d_transpose(x, kernel, output_shape, strides, padding=padding, data_format=tf_data_format)\n    if data_format == 'channels_first' and tf_data_format == 'NDHWC':\n        x = array_ops.transpose(x, (0, 4, 1, 2, 3))\n    return x", "docstring": "3D deconvolution (i.e.\n\ntransposed convolution).\n\nArgs:\nx: input tensor.\nkernel: kernel tensor.\noutput_shape: 1D int tensor for the output shape.\nstrides: strides tuple.\npadding: string, \"same\" or \"valid\".\ndata_format: string, `\"channels_last\"` or `\"channels_first\"`.\n\nReturns:\nA tensor, result of transposed 3D convolution.\n\nRaises:\nValueError: if `data_format` is neither `channels_last` or\n`channels_first`.", "source": "github-repos"}
{"code": "def calculate_character_to_length_mapping(measurer: text_measurer.TextMeasurer, characters: Iterable[str]) -> Mapping[(str, float)]:\n    char_to_length = {}\n    for c in characters:\n        char_to_length[c] = measurer.text_width(c)\n    return char_to_length", "docstring": "Return a mapping between each given character and its length.\n\nArgs:\nmeasurer: The TextMeasurer used to measure the width of the text in\npixels.\ncharacters: The characters to measure e.g. \"ml\".\n\nReturns:\nA mapping from the given characters to their length in pixels, as\ndetermined by 'measurer' e.g. {'m': 5.2, 'l', 1.2}.", "source": "codesearchnet"}
{"code": "def final_block(x1, x2, dim='2d', training=True, scope='final_block'):\n  \n\n  \n  with tf.variable_scope(scope):\n    y = tf.concat([x1, x2], axis=CONFIG[dim]['split_axis'])\n    y = tf.layers.batch_normalization(y, training=training)\n    y = tf.nn.relu(y)\n\n    \n    net = tf.reduce_mean(y, CONFIG[dim]['reduction_dimensions'],\n                         name='final_pool', keep_dims=True)\n\n    return net", "docstring": "Converts activations from last RevNet block to pre-logits.\n\nArgs:\nx1: [NxHxWxC] tensor of network activations.\nx2: [NxHxWxC] tensor of network activations.\ndim: '2d' if 2-dimensional, '3d' if 3-dimensional.\ntraining: True for train phase, False for eval phase.\nscope: Optional variable scope for the final block.\n\nReturns:\n[N, hidden_dim] pre-logits tensor from activations x1 and x2.", "source": "juraj-google-style"}
{"code": "def full(shape, fill_value, dtype=None, **kwargs):\n    return (dc.zeros(shape, **kwargs) + fill_value).astype(dtype)", "docstring": "Create an array of given shape and type, filled with `fill_value`.\n\nArgs:\nshape (sequence of ints): 2D shape of the array.\nfill_value (scalar or numpy.ndarray): Fill value or array.\ndtype (data-type, optional): Desired data-type for the array.\nkwargs (optional): Other arguments of the array (*coords, attrs, and name).\n\nReturns:\narray (decode.array): Decode array filled with `fill_value`.", "source": "codesearchnet"}
{"code": "def __init__(self, nfiles=1, tmp_prefix=None):\n        \n        self._fnames = ['inchoate{}'.format(i) for i in range(nfiles)]\n        self._tmpprefix = tmp_prefix\n        self._fids = []", "docstring": "Initialization of instances:\n\nArgs:\nnfiles (int): number of files. Defaults to 1.\ntmp_prefix (str): prefix name of temporary files. Use this\nparameter if you want to easily track down the temporary files\ncreated by the manager.", "source": "juraj-google-style"}
{"code": "def __init__(self, modules: Sequence[RelativePositionBiasBase]):\n    super().__init__()\n    self.biases = nn.ModuleList(modules)", "docstring": "Class which sums up various computed biases.\n\nArgs:\nmodules (Sequence[RelativePositionBiasBase]):\nList of relative bias modules.", "source": "github-repos"}
{"code": "def to_string(self, ast_obj=None, fmt: str = \"medium\") -> str:\n        \n\n        if not ast_obj:\n            ast_obj = self\n\n        bel_relation = None\n        if self.bel_relation and fmt == \"short\":\n            bel_relation = self.spec[\"relations\"][\"to_short\"].get(\n                self.bel_relation, self.bel_relation\n            )\n        elif self.bel_relation:\n            bel_relation = self.spec[\"relations\"][\"to_long\"].get(\n                self.bel_relation, self.bel_relation\n            )\n\n        if self.bel_subject and bel_relation and self.bel_object:\n            if isinstance(self.bel_object, BELAst):\n                return \"{} {} ({})\".format(\n                    self.bel_subject.to_string(fmt=fmt),\n                    bel_relation,\n                    self.bel_object.to_string(fmt=fmt),\n                )\n            else:\n                return \"{} {} {}\".format(\n                    self.bel_subject.to_string(fmt=fmt),\n                    bel_relation,\n                    self.bel_object.to_string(fmt=fmt),\n                )\n\n        elif self.bel_subject:\n            return \"{}\".format(self.bel_subject.to_string(fmt=fmt))\n\n        else:\n            return \"\"", "docstring": "Convert AST object to string\n\nArgs:\nfmt (str): short, medium, long formatted BEL statements\nshort = short function and short relation format\nmedium = short function and long relation format\nlong = long function and long relation format\ncanonicalize\n\nReturns:\nstr: string version of BEL AST", "source": "juraj-google-style"}
{"code": "def copy_framebuffer(self, dst, src) -> None:\n    self.mglo.copy_framebuffer(dst.mglo, src.mglo)", "docstring": "Copy framebuffer content.\n\nUse this method to:\n\n- blit framebuffers.\n- copy framebuffer content into a texture.\n- downsample framebuffers. (it will allow to read the framebuffer's content)\n- downsample a framebuffer directly to a texture.\n\nArgs:\ndst (Framebuffer or Texture): Destination framebuffer or texture.\nsrc (Framebuffer): Source framebuffer.", "source": "codesearchnet"}
{"code": "def _update_graph_variables(self, learning_rate: float=None, momentum: float=None):\n    if (learning_rate is not None):\n        K.set_value(self.get_learning_rate_variable(), learning_rate)\n    if (momentum is not None):\n        K.set_value(self.get_momentum_variable(), momentum)", "docstring": "Update graph variables setting giving `learning_rate` and `momentum`\n\nArgs:\nlearning_rate: learning rate value to be set in graph (set if not None)\nmomentum: momentum value to be set in graph (set if not None)\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def frame(self, action):\n        \n        choices = {'on': '1',\n                   'off': '0'}\n        if action in choices:\n            self.send(chr(27)+'if'+choices[action])\n        else:\n            raise RuntimeError('Invalid action for function frame, choices are on and off')", "docstring": "Places/removes frame around text\n\nArgs:\naction -- Enable or disable frame. Options are 'on' and 'off'\nReturns:\nNone\nRaises:\nRuntimeError: Invalid action.", "source": "juraj-google-style"}
{"code": "def tile(tensor, tile_assignment, assign_tuple_sharding=False, use_sharding_op=False, unspecified_dims=None):\n    return Sharding.tile(tile_assignment).apply_to_tensor(tensor, assign_tuple_sharding=assign_tuple_sharding, use_sharding_op=use_sharding_op, unspecified_dims=unspecified_dims or [])", "docstring": "Returns a tensor that has tiled sharding.\n\nArgs:\ntensor: A tf.Tensor to shard.\ntile_assignment: An np.ndarray describing the topology of the tiling and\nwhich device will compute which part of the topology.\nassign_tuple_sharding: If the sharding type should be a tuple.\nuse_sharding_op: If true, adds a sharding op to set the sharding.\nunspecified_dims: An optional list of dimensions unspecified.", "source": "github-repos"}
{"code": "def pr_curves_route(self, request):\n    runs = request.args.getlist('run')\n    if (not runs):\n        return http_util.Respond(request, 'No runs provided when fetching PR curve data', 400)\n    tag = request.args.get('tag')\n    if (not tag):\n        return http_util.Respond(request, 'No tag provided when fetching PR curve data', 400)\n    try:\n        response = http_util.Respond(request, self.pr_curves_impl(runs, tag), 'application/json')\n    except ValueError as e:\n        return http_util.Respond(request, str(e), 'text/plain', 400)\n    return response", "docstring": "A route that returns a JSON mapping between runs and PR curve data.\n\nReturns:\nGiven a tag and a comma-separated list of runs (both stored within GET\nparameters), fetches a JSON object that maps between run name and objects\ncontaining data required for PR curves for that run. Runs that either\ncannot be found or that lack tags will be excluded from the response.", "source": "codesearchnet"}
{"code": "def as_qubit_order(val: 'qubit_order_or_list.QubitOrderOrList'\n                       ) -> 'QubitOrder':\n        \n        if isinstance(val, collections.Iterable):\n            return QubitOrder.explicit(val)\n        if isinstance(val, QubitOrder):\n            return val\n        raise ValueError(\n            \"Don't know how to interpret <{}> as a Basis.\".format(val))", "docstring": "Converts a value into a basis.\n\nArgs:\nval: An iterable or a basis.\n\nReturns:\nThe basis implied by the value.", "source": "juraj-google-style"}
{"code": "def get_all_prefixes(module_name):\n    parts = module_name.split('.')\n    name = parts[0]\n    out = [name]\n    for part in parts[1:]:\n        name = '.'.join([name, part])\n        out.append(name)\n    return out", "docstring": "Return all the prefixes of a module name.\n\ne.g. x.y.z => x, x.y, x.y.z\n\nArgs:\nmodule_name: module name\n\nReturns:\nList of prefixes", "source": "github-repos"}
{"code": "def run_multiple_processes(args_list: List[List[str]],\n                           die_on_failure: bool = True) -> None:\n    \n    for procargs in args_list:\n        start_process(procargs)\n    \n    wait_for_processes(die_on_failure=die_on_failure)", "docstring": "Fire up multiple processes, and wait for them to finihs.\n\nArgs:\nargs_list: command arguments for each process\ndie_on_failure: see :func:`wait_for_processes`", "source": "juraj-google-style"}
{"code": "def _log_band_edge_information(bs, edge_data):\n    if bs.is_spin_polarized:\n        spins = edge_data['band_index'].keys()\n        b_indices = [(', '.join([str((i + 1)) for i in edge_data['band_index'][spin]]) + '({})'.format(spin.name.capitalize())) for spin in spins]\n        b_indices = ', '.join(b_indices)\n    else:\n        b_indices = ', '.join([str((i + 1)) for i in edge_data['band_index'][Spin.up]])\n    kpoint = edge_data['kpoint']\n    kpoint_str = kpt_str.format(k=kpoint.frac_coords)\n    k_indices = ', '.join(map(str, edge_data['kpoint_index']))\n    if kpoint.label:\n        k_loc = kpoint.label\n    else:\n        branch = bs.get_branch(edge_data['kpoint_index'][0])[0]\n        k_loc = 'between {}'.format(branch['name'])\n    logging.info('  Energy: {:.3f} eV'.format(edge_data['energy']))\n    logging.info('  k-point: {}'.format(kpoint_str))\n    logging.info('  k-point location: {}'.format(k_loc))\n    logging.info('  k-point indices: {}'.format(k_indices))\n    logging.info('  Band indices: {}'.format(b_indices))", "docstring": "Log data about the valence band maximum or conduction band minimum.\n\nArgs:\nbs (:obj:`~pymatgen.electronic_structure.bandstructure.BandStructureSymmLine`):\nThe band structure.\nedge_data (dict): The :obj:`dict` from ``bs.get_vbm()`` or\n``bs.get_cbm()``", "source": "codesearchnet"}
{"code": "def db_ws010c(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `db_ws010c`'.format(value))\n    self._db_ws010c = value", "docstring": "Corresponds to IDD Field `db_ws010c`\nMean coincident dry-bulb temperature to wind speed corresponding to 1.0% cumulative frequency for coldest month\n\nArgs:\nvalue (float): value for IDD Field `db_ws010c`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def _VerifyValues(self, tensor_in_sizes, glimpse_sizes, offsets, expected_rows, expected_cols):\n    rows = tensor_in_sizes[0]\n    cols = tensor_in_sizes[1]\n    t_rows = array_ops.tile([[1.0 * r] for r in range(1, rows + 1)], [1, cols], name='tile_rows')\n    t_rows_4d = array_ops.transpose(array_ops.expand_dims(array_ops.expand_dims(t_rows, 0), 3), [0, 2, 1, 3])\n    t_cols = array_ops.tile([[1.0 * r for r in range(1, cols + 1)]], [rows, 1], name='tile_cols')\n    t_cols_4d = array_ops.transpose(array_ops.expand_dims(array_ops.expand_dims(t_cols, 0), 3), [0, 2, 1, 3])\n    t1 = constant_op.constant([glimpse_sizes[1], glimpse_sizes[0]], shape=[2])\n    t2 = constant_op.constant([offsets[1], offsets[0]], shape=[1, 2])\n    glimpse_rows = array_ops.transpose(image_ops.extract_glimpse(t_rows_4d, t1, t2), [0, 2, 1, 3])\n    glimpse_cols = array_ops.transpose(image_ops.extract_glimpse(t_cols_4d, t1, t2), [0, 2, 1, 3])\n    with self.cached_session() as sess:\n        value_rows, value_cols = self.evaluate([glimpse_rows, glimpse_cols])\n    self.assertEqual(value_rows.shape[1], glimpse_sizes[0])\n    self.assertEqual(value_rows.shape[2], glimpse_sizes[1])\n    self.assertEqual(value_cols.shape[1], glimpse_sizes[0])\n    self.assertEqual(value_cols.shape[2], glimpse_sizes[1])\n    min_random_val = 0\n    max_random_val = max(rows, cols)\n    for i in range(glimpse_sizes[0]):\n        for j in range(glimpse_sizes[1]):\n            if expected_rows[i] is None or expected_cols[j] is None:\n                self.assertGreaterEqual(value_rows[0][i][j][0], min_random_val)\n                self.assertLessEqual(value_rows[0][i][j][0], max_random_val)\n                self.assertGreaterEqual(value_cols[0][i][j][0], min_random_val)\n                self.assertLessEqual(value_cols[0][i][j][0], max_random_val)\n            else:\n                self.assertEqual(value_rows[0][i][j][0], expected_rows[i])\n                self.assertEqual(value_cols[0][i][j][0], expected_cols[j])", "docstring": "Verifies the output values of the glimpse extraction kernel.\n\nArgs:\ntensor_in_sizes: Input tensor dimensions in [input_rows, input_cols].\nglimpse_sizes: Dimensions of the glimpse in [glimpse_rows, glimpse_cols].\noffsets: Relative location of the center of the glimpse in the input\nimage expressed as [row_offset, col_offset].\nexpected_rows: A list containing the expected row numbers (None for\nout of bound entries that are expected to be replaced by uniform\nrandom entries in [0,1) ).\nexpected_cols: Same as expected_rows, but for column numbers.", "source": "github-repos"}
{"code": "def ParseAutofillRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    event_data = ChromeAutofillEventData()\n    event_data.field_name = self._GetRowValue(query_hash, row, 'name')\n    event_data.value = self._GetRowValue(query_hash, row, 'value')\n    event_data.usage_count = self._GetRowValue(query_hash, row, 'count')\n    event_data.query = query\n    timestamp = self._GetRowValue(query_hash, row, 'date_created')\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n    if (event_data.usage_count > 1):\n        timestamp = self._GetRowValue(query_hash, row, 'date_last_used')\n        date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_USED)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an autofill entry row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "codesearchnet"}
{"code": "def load_subset_weights_from_hdf5_group(f):\n    weight_names = load_attributes_from_hdf5_group(f, 'weight_names')\n    return [np.asarray(f[weight_name]) for weight_name in weight_names]", "docstring": "Load layer weights of a model from hdf5.\n\nArgs:\nf: A pointer to a HDF5 group.\n\nReturns:\nList of NumPy arrays of the weight values.\n\nRaises:\nValueError: in case of mismatch between provided model\nand weights file.", "source": "github-repos"}
{"code": "def get_storage_pools(self, id_or_uri):\n        \n        uri = self._client.build_uri(id_or_uri) + \"/storage-pools\"\n        return self._client.get(uri)", "docstring": "Gets a list of Storage pools. Returns a list of storage pools belonging to the storage system referred by the\nPath property {ID} parameter or URI.\n\nArgs:\nid_or_uri: Can be either the storage system ID (serial number) or the storage system URI.\nReturns:\ndict: Host types.", "source": "juraj-google-style"}
{"code": "def update(self, token_id: int):\n    raise NotImplementedError(f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')", "docstring": "Reads in a token and returns booleans that indicate the progress made by it. This function will update the\nstate of this object unlikes `does_advance(self, token_id: int)`.\n\nThis isn't to test whether a certain token will advance the progress; it's to update its state as if it has\nbeen generated. This becomes important if token_id != desired token (refer to else statement in\nPhrasalConstraint)\n\nArgs:\ntoken_id(`int`):\nThe id of a newly generated token in the beam search.\nReturn:\nstepped(`bool`):\nWhether this constraint has become one step closer to being fulfuilled.\ncompleted(`bool`):\nWhether this constraint has been completely fulfilled by this token being generated.\nreset (`bool`):\nWhether this constraint has reset its progress by this token being generated.", "source": "github-repos"}
{"code": "def update_shapes_dict_for_target_fn(target_fn, shapes_dict, call_spec, class_name):\n    if utils.is_default(target_fn):\n        return None\n    sig = inspect.signature(target_fn)\n    expected_names = []\n    for name, param in sig.parameters.items():\n        if param.kind in (param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY, param.KEYWORD_ONLY):\n            expected_names.append(name)\n    if len(expected_names) == 1:\n        key = expected_names[0]\n        values = tuple(shapes_dict.values())\n        if values:\n            input_shape = values[0]\n        else:\n            input_shape = None\n        return {key: input_shape}\n    kwargs = {}\n    for name in expected_names:\n        method_name = target_fn.__name__\n        error_preamble = f'For a `{method_name}()` method with more than one argument, all arguments should have a `_shape` suffix and match an argument from `call()`. E.g. `{method_name}(self, foo_shape, bar_shape)` '\n        if not name.endswith('_shape'):\n            raise ValueError(f\"{error_preamble} For layer '{class_name}', Received `{method_name}()` argument `{name}`, which does not end in `_shape`.\")\n        expected_call_arg = utils.removesuffix(name, '_shape')\n        if expected_call_arg not in call_spec.arguments_dict:\n            raise ValueError(f\"{error_preamble} For layer '{class_name}', received `{method_name}()` argument `{name}`, but `call()` does not have argument `{expected_call_arg}`.\")\n        if name in shapes_dict:\n            kwargs[name] = shapes_dict[name]\n    return kwargs", "docstring": "Updates a `shapes_dict` for `build()` or `compute_output_shape()`.\n\nThis function will align a dictionary of the shapes of all tensor\npassed to `call`, with the signatures of `build()` or\n`compute_output_shape()`.\n\nThe alignment is a follows:\n\n- If `build()` or `compute_output_shape()` accept only one argument,\nforward the shape of the first positional argument from call without\nchecking any argument names.\n- If `build()` or `compute_output_shape()` accept multiple arguments,\nenforce that all argument names match a call argument name, e.g.\n`foo_shape` would match call argument `foo`.\n\nReturns:\nAn updated `shapes_dict` that can be used to invoke\n`target_fn(**shapes_dict)`.", "source": "github-repos"}
{"code": "def is_cpu(self):\n    return (self._device.get_info(cl.device_info.TYPE) == cl.device_type.CPU)", "docstring": "Check if the device associated with this environment is a CPU.\n\nReturns:\nboolean: True if the device is an CPU, false otherwise.", "source": "codesearchnet"}
{"code": "def save_issue_data_task(self, issue, task_id, namespace='open'):\n        \n\n        issue_data = self.get_saved_issue_data(issue, namespace)\n\n        if not issue_data.has_key('tasks'):\n            issue_data['tasks'] = [task_id]\n        elif task_id not in issue_data['tasks']:\n            issue_data['tasks'].append(task_id)", "docstring": "Saves a issue data (tasks, etc.) to local data.\n\nArgs:\nissue:\n`int`. Github issue number.\ntask:\n`int`. Asana task ID.\nnamespace:\n`str`. Namespace for storing this issue.", "source": "juraj-google-style"}
{"code": "def _decode(obj):  \n    \n    if obj is None:\n        return u''\n    if six.PY3 and isinstance(obj, six.binary_type):\n        \n        return obj.decode('latin1')\n    elif six.PY3:\n        \n        return str(obj)\n    elif isinstance(obj, six.text_type):\n        \n        return obj\n    else:\n        \n        return str(obj).decode('utf-8')", "docstring": "Decode an object to unicode.\nArgs:\nobj (bytes or str or unicode or anything serializable): object to be decoded\nReturns:\nobject decoded in unicode.", "source": "juraj-google-style"}
{"code": "def __init__(self, **connection_args):\n        \n        super(DistributionServer, self).__init__(**connection_args)\n        self.connection[\"url\"] = self.connection[\"jss\"].base_url", "docstring": "Set up a connection to a distribution server.\n\nArgs:\nconnection_args: Dict, with required key:\njss: A JSS Object.", "source": "juraj-google-style"}
{"code": "def _read_coord_h5(files, shapes, header, twod):\n    \n    meshes = []\n    for h5file, shape in zip(files, shapes):\n        meshes.append({})\n        with h5py.File(h5file, 'r') as h5f:\n            for coord, mesh in h5f.items():\n                \n                meshes[-1][coord] = mesh[()].reshape(shape).T\n                meshes[-1][coord] = _make_3d(meshes[-1][coord], twod)\n\n    header['ncs'] = _ncores(meshes, twod)\n    header['nts'] = list((meshes[0]['X'].shape[i] - 1) * header['ncs'][i]\n                         for i in range(3))\n    header['nts'] = np.array([max(1, val) for val in header['nts']])\n    \n    \n    meshes = _conglomerate_meshes(meshes, header)\n    if np.any(meshes['Z'][:, :, 0] != 0):\n        \n        header['x_mesh'] = np.copy(meshes['Y'])  \n        header['y_mesh'] = np.copy(meshes['Z'])\n        header['z_mesh'] = np.copy(meshes['X'])\n        header['r_mesh'] = np.sqrt(header['x_mesh']**2 + header['y_mesh']**2 +\n                                   header['z_mesh']**2)\n        header['t_mesh'] = np.arccos(header['z_mesh'] / header['r_mesh'])\n        header['p_mesh'] = np.roll(\n            np.arctan2(header['y_mesh'], -header['x_mesh']) + np.pi, -1, 1)\n        header['e1_coord'] = header['t_mesh'][:, 0, 0]\n        header['e2_coord'] = header['p_mesh'][0, :, 0]\n        header['e3_coord'] = header['r_mesh'][0, 0, :]\n    else:\n        header['e1_coord'] = meshes['X'][:, 0, 0]\n        header['e2_coord'] = meshes['Y'][0, :, 0]\n        header['e3_coord'] = meshes['Z'][0, 0, :]\n    header['aspect'] = (header['e1_coord'][-1] - header['e2_coord'][0],\n                        header['e1_coord'][-1] - header['e2_coord'][0])\n    header['rcmb'] = header['e3_coord'][0]\n    if header['rcmb'] == 0:\n        header['rcmb'] = -1\n    else:\n        \n        header['e3_coord'] = header['e3_coord'] - header['rcmb']\n    if twod is None or 'X' in twod:\n        header['e1_coord'] = header['e1_coord'][:-1]\n    if twod is None or 'Y' in twod:\n        header['e2_coord'] = header['e2_coord'][:-1]\n    header['e3_coord'] = header['e3_coord'][:-1]", "docstring": "Read all coord hdf5 files of a snapshot.\n\nArgs:\nfiles (list of pathlib.Path): list of NodeCoordinates files of\na snapshot.\nshapes (list of (int,int)): shape of mesh grids.\nheader (dict): geometry info.\ntwod (str): 'XZ', 'YZ' or None depending on what is relevant.", "source": "juraj-google-style"}
{"code": "def hpo_terms(store, query=None, limit=None):\n    hpo_phenotypes = {}\n    if limit:\n        limit = int(limit)\n    hpo_phenotypes['phenotypes'] = list(store.hpo_terms(text=query, limit=limit))\n    return hpo_phenotypes", "docstring": "Retrieves a list of HPO terms from scout database\n\nArgs:\nstore (obj): an adapter to the scout database\nquery (str): the term to search in the database\nlimit (str): the number of desired results\n\nReturns:\nhpo_phenotypes (dict): the complete list of HPO objects stored in scout", "source": "codesearchnet"}
{"code": "def pretty_print(input_word, anagrams, by_length=False):\n    \n\n    scores = {}\n    if by_length:\n        noun = \"tiles\"\n        for word, score in anagrams:\n            try:\n                scores[len(word)].append(\"{0} ({1:d})\".format(word, score))\n            except KeyError:\n                scores[len(word)] = [\"{0} ({1:d})\".format(word, score)]\n    else:\n        noun = \"points\"\n        for word, score in anagrams:\n            try:\n                scores[score].append(word)\n            except KeyError:\n                scores[score] = [word]\n\n    print(\"Anagrams for {0}{1}:\".format(input_word, \" (score)\" * by_length))\n\n    if not valid_scrabble_word(input_word):\n        print(\"{0} is not possible in Scrabble.\".format(input_word))\n\n    for key, value in sorted(scores.items(), reverse=True):\n        print(\"{0:d} {1}: {2}\".format(key, noun, \", \".join(value)))", "docstring": "Prints the anagram results sorted by score to stdout.\n\nArgs:\ninput_word: the base word we searched on\nanagrams: generator of (word, score) from anagrams_in_word\nby_length: a boolean to declare printing by length instead of score", "source": "juraj-google-style"}
{"code": "def default(self, obj):\n        \n\n        from ..model import Model\n        from ..colors import Color\n        from .has_props import HasProps\n\n        \n        \n        if pd and isinstance(obj, (pd.Series, pd.Index)):\n            return transform_series(obj, force_list=True)\n        elif isinstance(obj, np.ndarray):\n            return transform_array(obj, force_list=True)\n        elif isinstance(obj, collections.deque):\n            return list(map(self.default, obj))\n        elif isinstance(obj, Model):\n            return obj.ref\n        elif isinstance(obj, HasProps):\n            return obj.properties_with_values(include_defaults=False)\n        elif isinstance(obj, Color):\n            return obj.to_css()\n\n        else:\n            return self.transform_python_types(obj)", "docstring": "The required ``default`` method for ``JSONEncoder`` subclasses.\n\nArgs:\nobj (obj) :\n\nThe object to encode. Anything not specifically handled in\nthis method is passed on to the default system JSON encoder.", "source": "juraj-google-style"}
{"code": "def dump_orm_object_as_insert_sql(engine: Engine,\n                                  obj: object,\n                                  fileobj: TextIO) -> None:\n    \n    \n    insp = inspect(obj)\n    \n    \n    \n    \n\n    \n    \n    \n    \n    \n    meta = MetaData(bind=engine)\n    table_name = insp.mapper.mapped_table.name\n    \n    table = Table(table_name, meta, autoload=True)\n    \n\n    \n    \n    query = select(table.columns)\n    \n    for orm_pkcol in insp.mapper.primary_key:\n        core_pkcol = table.columns.get(orm_pkcol.name)\n        pkval = getattr(obj, orm_pkcol.name)\n        query = query.where(core_pkcol == pkval)\n    \n    cursor = engine.execute(query)\n    row = cursor.fetchone()  \n    row_dict = dict(row)\n    \n    \n    statement = table.insert(values=row_dict)\n    \n    insert_str = get_literal_query(statement, bind=engine)\n    writeline_nl(fileobj, insert_str)", "docstring": "Takes a SQLAlchemy ORM object, and writes ``INSERT`` SQL to replicate it\nto the output file-like object.\n\nArgs:\nengine: SQLAlchemy :class:`Engine`\nobj: SQLAlchemy ORM object to write\nfileobj: file-like object to write to", "source": "juraj-google-style"}
{"code": "def get_iterator_type(script_settings, subscripts={}):\n        \n\n        if 'iterator_type' in script_settings:\n            \n            if script_settings['iterator_type'] == 'Loop':\n                iterator_type = 'loop'\n            elif script_settings['iterator_type'] == 'Parameter Sweep':\n                iterator_type = 'sweep'\n            else:\n                raise TypeError('unknown iterator type')\n        else:\n            \n            if 'sweep_param' in script_settings:\n                iterator_type = 'sweep'\n            elif 'num_loops' in script_settings:\n                iterator_type = 'loop'\n            else:\n                raise TypeError('unknown iterator type')\n\n        return iterator_type", "docstring": "figures out the iterator type based on the script settings and (optionally) subscripts\nArgs:\nscript_settings: iterator_type\nsubscripts: subscripts\nReturns:", "source": "juraj-google-style"}
{"code": "def tf_retrieve_indices(self, indices):\n        \n        states = dict()\n        for name in sorted(self.states_memory):\n            states[name] = tf.gather(params=self.states_memory[name], indices=indices)\n\n        internals = dict()\n        for name in sorted(self.internals_memory):\n            internals[name] = tf.gather(params=self.internals_memory[name], indices=indices)\n\n        actions = dict()\n        for name in sorted(self.actions_memory):\n            actions[name] = tf.gather(params=self.actions_memory[name], indices=indices)\n\n        terminal = tf.gather(params=self.terminal_memory, indices=indices)\n        reward = tf.gather(params=self.reward_memory, indices=indices)\n\n        if self.include_next_states:\n            assert util.rank(indices) == 1\n            next_indices = (indices + 1) % self.capacity\n\n            next_states = dict()\n            for name in sorted(self.states_memory):\n                next_states[name] = tf.gather(params=self.states_memory[name], indices=next_indices)\n\n            next_internals = dict()\n            for name in sorted(self.internals_memory):\n                next_internals[name] = tf.gather(params=self.internals_memory[name], indices=next_indices)\n\n            return dict(\n                states=states,\n                internals=internals,\n                actions=actions,\n                terminal=terminal,\n                reward=reward,\n                next_states=next_states,\n                next_internals=next_internals\n            )\n        else:\n            return dict(\n                states=states,\n                internals=internals,\n                actions=actions,\n                terminal=terminal,\n                reward=reward\n            )", "docstring": "Fetches experiences for given indices.\n\nArgs:\nindices: Index tensor\n\nReturns: Batch of experiences", "source": "juraj-google-style"}
{"code": "def _format_field_name(self, field_name) -> str:\n    field = self._get_model_field(field_name)\n    return self.qn(field.column)", "docstring": "Formats a field's name for usage in SQL.\n\nArguments:\nfield_name:\nThe field name to format.\n\nReturns:\nThe specified field name formatted for\nusage in SQL.", "source": "codesearchnet"}
{"code": "def list_bucket(self, bucket):\n    self.response.write('Listbucket result:\\n')\n    page_size = 1\n    stats = gcs.listbucket((bucket + '/foo'), max_keys=page_size)\n    while True:\n        count = 0\n        for stat in stats:\n            count += 1\n            self.response.write(repr(stat))\n            self.response.write('\\n')\n        if ((count != page_size) or (count == 0)):\n            break\n        stats = gcs.listbucket((bucket + '/foo'), max_keys=page_size, marker=stat.filename)", "docstring": "Create several files and paginate through them.\n\nProduction apps should set page_size to a practical value.\n\nArgs:\nbucket: bucket.", "source": "codesearchnet"}
{"code": "def readCmd(cls, cmd):\n        \n        args = shlex.split(cmd)\n        proc = subprocess.Popen(args, stdout=subprocess.PIPE)\n        (proc_stdout, proc_stderr) = proc.communicate(input=None)  \n        return proc_stdout.decode()", "docstring": "run command and return the str format stdout\n\nArgs:\ncmd: string\nReturns:\nstr: what the command's echo", "source": "juraj-google-style"}
{"code": "def __init__(\n      self, knowledge_base, formatter_mediator, fields_filter=None,\n      preferred_encoding='utf-8'):\n    \n    super(OutputMediator, self).__init__()\n    self._formatter_mediator = formatter_mediator\n    self._knowledge_base = knowledge_base\n    self._preferred_encoding = preferred_encoding\n    self._timezone = pytz.UTC\n\n    self.fields_filter = fields_filter", "docstring": "Initializes an output mediator.\n\nArgs:\nknowledge_base (KnowledgeBase): knowledge base.\nformatter_mediator (FormatterMediator): formatter mediator.\nfields_filter (Optional[FilterObject]): filter object that indicates\nwhich fields to output.\npreferred_encoding (Optional[str]): preferred encoding to output.", "source": "juraj-google-style"}
{"code": "def toregex(text, exact=False):\n    if isregex(text):\n        return text\n    escaped = re.escape(normalize_text(text))\n    if exact:\n        escaped = '\\\\A{}\\\\Z'.format(escaped)\n    return re.compile(escaped)", "docstring": "Returns a compiled regular expression for the given text.\n\nArgs:\ntext (str | RegexObject): The text to match.\nexact (bool, optional): Whether the generated regular expression should match exact\nstrings. Defaults to False.\n\nReturns:\nRegexObject: A compiled regular expression that will match the text.", "source": "codesearchnet"}
{"code": "def concat_excel_reports(addresses, output_file_name, endpoint, report_type, retry, api_key, api_secret, files_path):\n    master_workbook = openpyxl.Workbook()\n    if ((api_key is not None) and (api_secret is not None)):\n        client = ApiClient(api_key, api_secret)\n    else:\n        client = ApiClient()\n    errors = []\n    for (index, addr) in enumerate(addresses):\n        print('Processing {}'.format(addr[0]))\n        result = _get_excel_report(client, endpoint, addr[0], addr[1], report_type, retry)\n        if (not result['success']):\n            print('Error retrieving report for {}'.format(addr[0]))\n            print(result['content'])\n            errors.append({'address': addr[0], 'message': result['content']})\n            continue\n        orig_wb = openpyxl.load_workbook(filename=io.BytesIO(result['content']))\n        _save_individual_file(orig_wb, files_path, addr[0])\n        for sheet_name in orig_wb.get_sheet_names():\n            if (sheet_name in master_workbook.get_sheet_names()):\n                master_ws = master_workbook.get_sheet_by_name(sheet_name)\n            else:\n                master_ws = master_workbook.create_sheet(sheet_name)\n            orig_rows = orig_wb.get_sheet_by_name(sheet_name).rows\n            if ((sheet_name == 'Summary') or (sheet_name == 'Chart Data')):\n                _process_non_standard_sheet(master_ws, orig_rows, addr, index)\n                continue\n            _process_standard_sheet(master_ws, orig_rows, addr, index)\n    master_workbook.remove(master_workbook.worksheets[0])\n    if (len(errors) > 0):\n        errors_sheet = master_workbook.create_sheet('Errors')\n        for (error_idx, error) in enumerate(errors):\n            errors_sheet.cell(row=(error_idx + 1), column=1, value=error['address'])\n            errors_sheet.cell(row=(error_idx + 1), column=2, value=error['message'])\n    adjust_column_width_workbook(master_workbook)\n    output_file_path = os.path.join(files_path, output_file_name)\n    master_workbook.save(output_file_path)\n    print('Saved output to {}'.format(output_file_path))", "docstring": "Creates an Excel file made up of combining the Value Report or Rental Report Excel\noutput for the provided addresses.\n\nArgs:\naddresses: A list of (address, zipcode) tuples\noutput_file_name: A file name for the Excel output\nendpoint: One of 'value_report' or 'rental_report'\nreport_type: One of 'full' or 'summary'\nretry: optional boolean to retry if rate limit is reached\napi_key: optional API Key\napi_secret: optional API Secret\nfiles_path: Path to save individual files. If None, don't save files", "source": "codesearchnet"}
{"code": "def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):\n    raw = clean_lines.lines_without_raw_strings\n    line = raw[linenum]\n    if (IsBlankLine(line) and (not nesting_state.InNamespaceBody()) and (not nesting_state.InExternC())):\n        elided = clean_lines.elided\n        prev_line = elided[(linenum - 1)]\n        prevbrace = prev_line.rfind('{')\n        if ((prevbrace != (- 1)) and (prev_line[prevbrace:].find('}') == (- 1))):\n            exception = False\n            if Match(' {6}\\\\w', prev_line):\n                search_position = (linenum - 2)\n                while ((search_position >= 0) and Match(' {6}\\\\w', elided[search_position])):\n                    search_position -= 1\n                exception = ((search_position >= 0) and (elided[search_position][:5] == '    :'))\n            else:\n                exception = (Match(' {4}\\\\w[^\\\\(]*\\\\)\\\\s*(const\\\\s*)?(\\\\{\\\\s*$|:)', prev_line) or Match(' {4}:', prev_line))\n            if (not exception):\n                error(filename, linenum, 'whitespace/blank_line', 2, 'Redundant blank line at the start of a code block should be deleted.')\n        if ((linenum + 1) < clean_lines.NumLines()):\n            next_line = raw[(linenum + 1)]\n            if (next_line and Match('\\\\s*}', next_line) and (next_line.find('} else ') == (- 1))):\n                error(filename, linenum, 'whitespace/blank_line', 3, 'Redundant blank line at the end of a code block should be deleted.')\n        matched = Match('\\\\s*(public|protected|private):', prev_line)\n        if matched:\n            error(filename, linenum, 'whitespace/blank_line', 3, ('Do not leave a blank line after \"%s:\"' % matched.group(1)))\n    next_line_start = 0\n    if ((linenum + 1) < clean_lines.NumLines()):\n        next_line = raw[(linenum + 1)]\n        next_line_start = (len(next_line) - len(next_line.lstrip()))\n    CheckComment(line, filename, linenum, next_line_start, error)\n    line = clean_lines.elided[linenum]\n    if (Search('\\\\w\\\\s+\\\\[', line) and (not Search('(?:delete|return)\\\\s+\\\\[', line))):\n        error(filename, linenum, 'whitespace/braces', 5, 'Extra space before [')\n    if (Search('for *\\\\(.*[^:]:[^: ]', line) or Search('for *\\\\(.*[^: ]:[^:]', line)):\n        error(filename, linenum, 'whitespace/forcolon', 2, 'Missing space around colon in range-based for loop')", "docstring": "Checks for the correctness of various spacing issues in the code.\n\nThings we check for: spaces around operators, spaces after\nif/for/while/switch, no spaces around parens in function calls, two\nspaces between code and comment, don't start a block with a blank\nline, don't end a function with a blank line, don't add a blank line\nafter public/protected/private, don't have too many blank lines in a row.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nnesting_state: A NestingState instance which maintains information about\nthe current stack of nested blocks being parsed.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def convert_optional_traversals_to_compound_match_query(match_query, complex_optional_roots, location_to_optional_roots):\n    tree = construct_optional_traversal_tree(complex_optional_roots, location_to_optional_roots)\n    rooted_optional_root_location_subsets = tree.get_all_rooted_subtrees_as_lists()\n    omitted_location_subsets = [(set(complex_optional_roots) - set(subset)) for subset in rooted_optional_root_location_subsets]\n    sorted_omitted_location_subsets = sorted(omitted_location_subsets)\n    compound_match_traversals = []\n    for omitted_locations in reversed(sorted_omitted_location_subsets):\n        new_match_traversals = []\n        for match_traversal in match_query.match_traversals:\n            location = match_traversal[0].as_block.location\n            optional_root_locations_stack = location_to_optional_roots.get(location, None)\n            if (optional_root_locations_stack is not None):\n                optional_root_location = optional_root_locations_stack[(- 1)]\n            else:\n                optional_root_location = None\n            if ((optional_root_location is None) or (optional_root_location not in omitted_locations)):\n                new_match_traversal = _prune_traverse_using_omitted_locations(match_traversal, set(omitted_locations), complex_optional_roots, location_to_optional_roots)\n                new_match_traversals.append(new_match_traversal)\n            else:\n                pass\n        compound_match_traversals.append(new_match_traversals)\n    match_queries = [MatchQuery(match_traversals=match_traversals, folds=match_query.folds, output_block=match_query.output_block, where_block=match_query.where_block) for match_traversals in compound_match_traversals]\n    return CompoundMatchQuery(match_queries=match_queries)", "docstring": "Return 2^n distinct MatchQuery objects in a CompoundMatchQuery.\n\nGiven a MatchQuery containing `n` optional traverses that expand vertex fields,\nconstruct `2^n` different MatchQuery objects:\none for each possible subset of optional edges that can be followed.\nFor each edge `e` in a subset of optional edges chosen to be omitted,\ndiscard all traversals following `e`, and add filters specifying that `e` *does not exist*.\n\nArgs:\nmatch_query: MatchQuery object containing n `@optional` scopes which expand vertex fields\ncomplex_optional_roots: list of @optional locations (location preceding an @optional\ntraverse) that expand vertex fields within\nlocation_to_optional_roots: dict mapping from location -> optional_roots where location is\nwithin some number of @optionals and optional_roots is a list\nof optional root locations preceding the successive @optional\nscopes within which the location resides\n\nReturns:\nCompoundMatchQuery object containing 2^n MatchQuery objects,\none for each possible subset of the n optional edges being followed", "source": "codesearchnet"}
{"code": "def match(self, expected, actual, assert_items_equal=False):\n    if isinstance(expected, np.ndarray):\n        expected = expected.tolist()\n    if isinstance(actual, np.ndarray):\n        actual = actual.tolist()\n    self.assertEqual(type(expected), type(actual))\n    if nest.is_nested(expected):\n        self.assertEqual(len(expected), len(actual))\n        if isinstance(expected, dict):\n            for key1, key2 in zip(sorted(expected), sorted(actual)):\n                self.assertEqual(key1, key2)\n                self.match(expected[key1], actual[key2])\n        elif assert_items_equal:\n            for item1, item2 in zip(sorted(expected), sorted(actual)):\n                self.match(item1, item2)\n        else:\n            for item1, item2 in zip(expected, actual):\n                self.match(item1, item2)\n    elif isinstance(expected, sparse_tensor.SparseTensorValue):\n        self.match((expected.indices, expected.values, expected.dense_shape), (actual.indices, actual.values, actual.dense_shape))\n    elif isinstance(expected, ragged_tensor_value.RaggedTensorValue):\n        self.match((expected.values, expected.row_splits), (actual.values, actual.row_splits))\n    else:\n        self.assertEqual(expected, actual)", "docstring": "Matches nested structures.\n\nRecursively matches shape and values of `expected` and `actual`.\nHandles scalars, numpy arrays and other python sequence containers\ne.g. list, dict, as well as SparseTensorValue and RaggedTensorValue.\n\nArgs:\nexpected: Nested structure 1.\nactual: Nested structure 2.\nassert_items_equal: Tests the output has the expected elements regardless\nof order.\n\nRaises:\nAssertionError if matching fails.", "source": "github-repos"}
{"code": "def add_child(self, key, value):\n        \n        if type(value) in (list, tuple, dict):\n            if type(value)==dict:\n                for k in value.keys():\n                    self.add_child(k, value[k])\n                return\n            i = 0\n            for child in value:\n                self.add_child(key[i], child)\n                i = i + 1\n            return\n\n        if hasattr(value, 'attributes'):\n            value.attributes['data-parent-widget'] = self.identifier\n            value._parent = self\n\n        if key in self.children:\n            self._render_children_list.remove(key)\n        self._render_children_list.append(key)\n\n        self.children[key] = value", "docstring": "Adds a child to the Tag\n\nTo retrieve the child call get_child or access to the Tag.children[key] dictionary.\n\nArgs:\nkey (str):  Unique child's identifier, or iterable of keys\nvalue (Tag, str): can be a Tag, an iterable of Tag or a str. In case of iterable\nof Tag is a dict, each item's key is set as 'key' param", "source": "juraj-google-style"}
{"code": "def moves_from_last_n_games(self, n, moves, shuffle,\n                                column_family, column):\n        \n        self.wait_for_fresh_games()\n        latest_game = self.latest_game_number\n        utils.dbg('Latest game in %s: %s' % (self.btspec.table, latest_game))\n        if latest_game == 0:\n            raise ValueError('Cannot find a latest game in the table')\n\n        start = int(max(0, latest_game - n))\n        ds = self.moves_from_games(start, latest_game, moves, shuffle,\n                                   column_family, column)\n        return ds", "docstring": "Randomly choose a given number of moves from the last n games.\n\nArgs:\nn:  number of games at the end of this GameQueue to source.\nmoves:  number of moves to be sampled from `n` games.\nshuffle:  if True, shuffle the selected moves.\ncolumn_family:  name of the column family containing move examples.\ncolumn:  name of the column containing move examples.\n\nReturns:\na dataset containing the selected moves.", "source": "juraj-google-style"}
{"code": "def interpolate_graph(message, graph):\n    parsed_messaged, _, node_tags = parse_message(message)\n    error_message = ['Graph execution error:', '']\n    for tag in node_tags:\n        try:\n            op = graph.get_operation_by_name(tag.name)\n        except KeyError:\n            continue\n        else:\n            error_message.append(_build_node_error_message(op))\n    error_message.append(parsed_messaged.strip())\n    return '\\n'.join(error_message)", "docstring": "Interpolates an error message.\n\nThe error message can contain tags of form `{{node_type node_name}}`\nwhich will be parsed to identify the tf.Graph and op. If the op contains\ntraceback, the traceback will be attached to the error message.\n\nArgs:\nmessage: A string to interpolate.\ngraph: ops.Graph object containing all nodes referenced in the error\nmessage.\n\nReturns:\nThe error message string with node definition traceback.", "source": "github-repos"}
{"code": "def prefixlen_to_mask(prefixlen):\n    prefixlen = (prefixlen or '32')\n    addr = ('0.0.0.0/%s' % prefixlen)\n    return str(netaddr.IPNetwork(addr).netmask)", "docstring": "Converts a prefix length to a dotted decimal subnet mask\n\nArgs:\nprefixlen (str): The prefix length value to convert\n\nReturns:\nstr: The subt mask as a dotted decimal string", "source": "codesearchnet"}
{"code": "def fit(dataset_train: Dataset, dataset_val: typing.Optional[Dataset], features: typing.List[str], iters: int, weights_filename: str, log_filename: str, out_span: int) -> jax.Array:\n    with open(weights_filename, 'w') as f:\n        f.write('')\n    with open(log_filename, 'w') as f:\n        f.write('iter\\ttrain_accuracy\\ttrain_precision\\ttrain_recall\\ttrain_fscore')\n        if dataset_val:\n            f.write('\\ttest_accuracy\\ttest_precision\\ttest_recall\\ttest_fscore')\n        f.write('\\n')\n    print('Outputting learned weights to %s ...' % weights_filename)\n    M = len(features)\n    scores = jnp.zeros(M)\n    feature_score_buffer: typing.List[typing.Tuple[str, float]] = []\n    N_train = dataset_train.Y.shape[0]\n    N_test = dataset_val.Y.shape[0] if dataset_val else 0\n    Y_train = dataset_train.Y > 0\n    Y_test = dataset_val.Y > 0 if dataset_val else None\n    w = jnp.abs(dataset_train.Y) / jnp.sum(jnp.abs(dataset_train.Y))\n\n    def output_progress(t: int) -> None:\n        with open(weights_filename, 'a') as f:\n            f.write('\\n'.join(('%s\\t%.6f' % p for p in feature_score_buffer)) + '\\n')\n        feature_score_buffer.clear()\n        print('=== %s ===' % t)\n        print()\n        with open(log_filename, 'a') as f:\n            pred_train = pred(scores, dataset_train.X_rows, dataset_train.X_cols, N_train)\n            metrics_train = get_metrics(pred_train, Y_train)\n            print('train accuracy:\\t%.5f' % metrics_train.accuracy)\n            print('train prec.:\\t%.5f' % metrics_train.precision)\n            print('train recall:\\t%.5f' % metrics_train.recall)\n            print('train fscore:\\t%.5f' % metrics_train.fscore)\n            print()\n            f.write('%d\\t%.5f\\t%.5f\\t%.5f\\t%.5f' % (t, metrics_train.accuracy, metrics_train.precision, metrics_train.recall, metrics_train.fscore))\n            if dataset_val:\n                pred_test = pred(scores, dataset_val.X_rows, dataset_val.X_cols, N_test)\n                metrics_test = get_metrics(pred_test, Y_test)\n                print('test accuracy:\\t%.5f' % metrics_test.accuracy)\n                print('test prec.:\\t%.5f' % metrics_test.precision)\n                print('test recall:\\t%.5f' % metrics_test.recall)\n                print('test fscore:\\t%.5f' % metrics_test.fscore)\n                print()\n                f.write('\\t%.5f\\t%.5f\\t%.5f\\t%.5f' % (metrics_test.accuracy, metrics_test.precision, metrics_test.recall, metrics_test.fscore))\n            f.write('\\n')\n    for t in range(iters):\n        w, scores, best_feature_index, score = update(w, scores, dataset_train.X_rows, dataset_train.X_cols, Y_train)\n        w.block_until_ready()\n        feature = features[best_feature_index]\n        feature_score_buffer.append((feature, score))\n        if (t + 1) % out_span == 0:\n            output_progress(t + 1)\n    if len(feature_score_buffer) > 0:\n        output_progress(t + 1)\n    return scores", "docstring": "Trains an AdaBoost binary classifier.\n\nArgs:\ndataset_train (Dataset): A training dataset.\ndataset_val (Optional[Dataset]): A validation dataset.\nfeatures (List[str]): Features, which correspond to the columns of entries.\niters (int): A number of training iterations.\nweights_filename (str): A file path to write the learned weights.\nlog_filename (str): A file path to log the accuracy along with training.\nout_span (int): Iteration span to output metics and weights.\n\nReturns:\nscores (jax.Array): The contribution scores.", "source": "github-repos"}
{"code": "def __init__(self, fold_scope_location):\n        \n        super(FoldCountContextField, self).__init__(fold_scope_location)\n        self.fold_scope_location = fold_scope_location\n        self.validate()", "docstring": "Construct a new FoldCountContextField object for this fold.\n\nArgs:\nfold_scope_location: FoldScopeLocation specifying the fold whose size is being output.\n\nReturns:\nnew FoldCountContextField object", "source": "juraj-google-style"}
{"code": "def _get_hash(self):\n    if (self.optionals and self.optionals.ISBN):\n        isbn = self.optionals.ISBN.replace('-', '')\n        if (len(isbn) <= 10):\n            return ('97880' + isbn)\n        return isbn\n    if (self.optionals and self.optionals.EAN):\n        return self.optionals.EAN\n    return (self.title + ','.join(map((lambda x: x.name), self.authors)))", "docstring": "Create hash of the class.\n\nHash should be unique for given ebook, so ISBN is main component of the\nhash if provided.\n\nReturns:\nstr: Hash.", "source": "codesearchnet"}
{"code": "def type_check(self, instance):\n    raise NotImplementedError", "docstring": "Determines if the type of 'instance' satisfies this type constraint.\n\nArgs:\ninstance: An instance of a Python object.\n\nRaises:\n:class:`TypeError`: The passed **instance** doesn't satisfy\nthis :class:`TypeConstraint`. Subclasses of\n:class:`TypeConstraint` are free to raise any of the subclasses of\n:class:`TypeError` defined above, depending on\nthe manner of the type hint error.\n\nAll :class:`TypeConstraint` sub-classes must define this method in other\nfor the class object to be created.", "source": "github-repos"}
{"code": "def _step(time, output_ta_t, prev_output, *states):\n    current_input = tuple((ta[time] for ta in input_ta))\n    current_input = tree.pack_sequence_as(inputs, current_input)\n    mask_t = masking_fn(time)\n    output, new_states = step_function(current_input, tuple(states) + tuple(constants))\n    flat_output = tree.flatten(output)\n    flat_mask_output = flat_zero_output if zero_output_for_mask else tree.flatten(prev_output)\n    flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output)\n    flat_state = tree.flatten(states)\n    flat_new_state = tree.flatten(new_states)\n    flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state)\n    new_states = tree.pack_sequence_as(new_states, flat_final_state)\n    ta_index_to_write = time if return_all_outputs else 0\n    for ta, out in zip(output_ta_t, flat_new_output):\n        ta[ta_index_to_write] = out\n    return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(new_states)", "docstring": "RNN step function.\n\nArgs:\ntime: Current timestep value.\noutput_ta_t: TensorArray.\nprev_output: tuple of outputs from time - 1.\n*states: List of states.\n\nReturns:\nTuple: `(time + 1, output_ta_t, output) + tuple(new_states)`", "source": "github-repos"}
{"code": "def _ParseIdentifierMappingsTable(self, parser_mediator, esedb_table):\n    \n    identifier_mappings = {}\n\n    for esedb_record in esedb_table.records:\n      if parser_mediator.abort:\n        break\n\n      identifier, mapped_value = self._ParseIdentifierMappingRecord(\n          parser_mediator, esedb_table.name, esedb_record)\n      if identifier is None or mapped_value is None:\n        continue\n\n      if identifier in identifier_mappings:\n        parser_mediator.ProduceExtractionWarning(\n            'identifier: {0:d} already exists in mappings.'.format(identifier))\n        continue\n\n      identifier_mappings[identifier] = mapped_value\n\n    return identifier_mappings", "docstring": "Extracts identifier mappings from the SruDbIdMapTable table.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nesedb_table (pyesedb.table): table.\n\nReturns:\ndict[int, str]: mapping of numeric identifiers to their string\nrepresentation.", "source": "juraj-google-style"}
{"code": "def generator_container(generator_function):\n    \n\n    @functools.wraps(generator_function)\n    def generator_container_wrapper(*args, **kwargs):\n        \n        return GeneratorContainer(generator_function, *args, **kwargs)\n\n    return generator_container_wrapper", "docstring": "Function Decorator: Containerize calls to a generator function.\n\nArgs:\ngenerator_function(func): The generator function being containerized.\n\nReturns:\nfunc: A wrapper function that containerizes the calls to the generator\nfunction.", "source": "juraj-google-style"}
{"code": "def error_print(msg, color=colorama.Fore.RED, file=sys.stderr):\n  \n  if CLI_QUIET:\n    return\n  file.write('{sep}{bright}{color}Error: {normal}{msg}{sep}{reset}'.format(\n      sep=_linesep_for_file(file), bright=colorama.Style.BRIGHT, color=color,\n      normal=colorama.Style.NORMAL, msg=msg, reset=colorama.Style.RESET_ALL))\n  file.flush()", "docstring": "Print the error message to the file in the specified color.\n\nArgs:\nmsg: The error message to be printed.\ncolor: Optional colorama color string to be applied to the message. You can\nconcatenate colorama color strings together here, but note that style\nstrings will not be applied.\nfile: A file object to which the baracketed text will be written. Intended\nfor use with CLI output file objects, specifically sys.stderr.", "source": "juraj-google-style"}
{"code": "def initial_value_of_masked_time_series(time_series_tensor, broadcast_mask):\n    num_timesteps = tf.shape(input=time_series_tensor)[(- 1)]\n    unmasked_negindices = (tf.cast((~ broadcast_mask), tf.int32) * tf.range(num_timesteps, 0, (- 1)))\n    first_unmasked_indices = (num_timesteps - tf.reduce_max(input_tensor=unmasked_negindices, axis=(- 1)))\n    if (first_unmasked_indices.shape.ndims is None):\n        raise NotImplementedError('Cannot compute initial values of a masked time series withdynamic rank.')\n    return tf.squeeze(tf.compat.v1.batch_gather(params=time_series_tensor, indices=first_unmasked_indices[(..., tf.newaxis)]), axis=(- 1))", "docstring": "Get the first unmasked entry of each time series in the batch.\n\nArgs:\ntime_series_tensor: float `Tensor` of shape [..., num_timesteps].\nbroadcast_mask: bool `Tensor` of same shape as `time_series`.", "source": "codesearchnet"}
{"code": "def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    encoder_hidden_states = encoder_outputs[0]\n    if encoder_attention_mask is None:\n        batch_size, sequence_length = encoder_hidden_states.shape[:2]\n        encoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    batch_size, sequence_length = decoder_input_ids.shape\n    if decoder_attention_mask is None:\n        decoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    if decoder_position_ids is None:\n        if past_key_values is not None:\n            raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.')\n        decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n    inputs = {'params': params or self.params}\n    if past_key_values:\n        inputs['cache'] = past_key_values\n        mutable = ['cache']\n    else:\n        mutable = False\n\n    def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):\n        decoder_module = module._get_decoder_module()\n        outputs = decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)\n        hidden_states = outputs[0]\n        if self.config.tie_word_embeddings:\n            shared_embedding = module.model.variables['params']['shared']['embedding']\n            lm_logits = module.lm_head.apply({'params': {'kernel': shared_embedding.T}}, hidden_states)\n        else:\n            lm_logits = module.lm_head(hidden_states)\n        lm_logits += module.final_logits_bias\n        return (lm_logits, outputs)\n    outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)\n    if past_key_values is None:\n        lm_logits, decoder_outputs = outputs\n    else:\n        (lm_logits, decoder_outputs), past = outputs\n    if return_dict:\n        outputs = FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions)\n    else:\n        outputs = (lm_logits,) + decoder_outputs[1:]\n    if past_key_values is not None and return_dict:\n        outputs['past_key_values'] = unfreeze(past['cache'])\n        return outputs\n    elif past_key_values is not None and (not return_dict):\n        outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> import jax.numpy as jnp\n>>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration\n\n>>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained(\"facebook/blenderbot-400M-distill\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/blenderbot-400M-distill\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, max_length=1024, return_tensors=\"jax\")\n>>> encoder_outputs = model.encode(**inputs)\n\n>>> decoder_start_token_id = model.config.decoder_start_token_id\n>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype=\"i4\") * decoder_start_token_id\n\n>>> outputs = model.decode(decoder_input_ids, encoder_outputs)\n>>> logits = outputs.logits\n```", "source": "github-repos"}
{"code": "def update_instance(self, data):\n    for (key, val) in iteritems(data):\n        if (not hasattr(self, key)):\n            raise AttributeError('No field named {key} for model {model}'.format(key=key, model=self.__class__.__name__))\n        setattr(self, key, val)\n    self.save()\n    return self", "docstring": "Update a single record by id with the provided data.\n\nArgs:\ndata (dict): The new data to update the record with.\n\nReturns:\nself: This is an instance of itself with the updated data.\n\nRaises:\nAttributeError: This is raised if a key in the ``data`` isn't\na field on the model.", "source": "codesearchnet"}
{"code": "def _list(self, dir_or_prefix):\n    if not self.exists(dir_or_prefix):\n        return\n\n    def list_files(root):\n        for dirpath, _, files in os.walk(root):\n            for filename in files:\n                yield self.join(dirpath, filename)\n    try:\n        for f in list_files(dir_or_prefix):\n            try:\n                yield FileMetadata(f, os.path.getsize(f), os.path.getmtime(f))\n            except OSError:\n                pass\n    except Exception as e:\n        raise BeamIOError('List operation failed', {dir_or_prefix: e})", "docstring": "List files in a location.\n\nListing is non-recursive, for filesystems that support directories.\n\nArgs:\ndir_or_prefix: (string) A directory or location prefix (for filesystems\nthat don't have directories).\n\nReturns:\nGenerator of ``FileMetadata`` objects.\n\nRaises:\n``BeamIOError``: if listing fails, but not if no files were found.", "source": "github-repos"}
{"code": "def unravel_sections(section_data):\n        \n        sections = []\n        for type, subsection_list in section_data.items():\n            for section in subsection_list:\n                section['sectionType'] = type\n                sections.append(section)\n        return sections", "docstring": "Unravels section type dictionary into flat list of sections with\nsection type set as an attribute.\n\nArgs:\nsection_data(dict): Data return from py:method::get_sections\n\nReturns:\nlist: Flat list of sections with ``sectionType`` set to\ntype (i.e. recitation, lecture, etc)", "source": "juraj-google-style"}
{"code": "def _UnserializableObjectFallback(self, obj):\n    if isinstance(obj, libpython.PyInstanceObjectPtr):\n        in_class = obj.pyop_field('in_class')\n        result_dict = in_class.pyop_field('cl_dict').proxyval(set())\n        instanceproxy = obj.proxyval(set())\n        result_dict.update(instanceproxy.attrdict)\n        result_dict['__pyringe_type_name__'] = instanceproxy.cl_name\n        result_dict['__pyringe_address__'] = instanceproxy.address\n        return result_dict\n    if isinstance(obj, libpython.HeapTypeObjectPtr):\n        try:\n            type_ptr = obj.field('ob_type')\n            tp_dict = type_ptr.cast(GdbCache.TYPE)['tp_dict'].cast(GdbCache.DICT)\n            result_dict = libpython.PyDictObjectPtr(tp_dict).proxyval(set())\n        except gdb.error:\n            result_dict = {}\n        try:\n            result_dict.update(obj.get_attr_dict().proxyval(set()))\n            result_dict['__pyringe_type_name__'] = obj.safe_tp_name()\n            result_dict['__pyringe_address__'] = long(obj._gdbval)\n            return result_dict\n        except TypeError:\n            pass\n    try:\n        proxy = obj.proxyval(set())\n        if isinstance(proxy, dict):\n            return {str(key): val for (key, val) in proxy.iteritems()}\n        return proxy\n    except AttributeError:\n        return str(obj)", "docstring": "Handles sanitizing of unserializable objects for Json.\n\nFor instances of heap types, we take the class dict, augment it with the\ninstance's __dict__, tag it and transmit it over to the RPC client to be\nreconstructed there. (Works with both old and new style classes)\nArgs:\nobj: The object to Json-serialize\nReturns:\nA Json-serializable version of the parameter", "source": "codesearchnet"}
{"code": "def AddDescriptor(self, desc):\n    \n    if not isinstance(desc, descriptor.Descriptor):\n      raise TypeError('Expected instance of descriptor.Descriptor.')\n\n    self._descriptors[desc.full_name] = desc\n    self._AddFileDescriptor(desc.file)", "docstring": "Adds a Descriptor to the pool, non-recursively.\n\nIf the Descriptor contains nested messages or enums, the caller must\nexplicitly register them. This method also registers the FileDescriptor\nassociated with the message.\n\nArgs:\ndesc: A Descriptor.", "source": "juraj-google-style"}
{"code": "def __init__(self, **options):\n        \n        self._def = {}\n        for opt_name, opt_meta in options.items():\n            if _is_valid(opt_name):\n                self._def[opt_name] = opt_meta\n                self[opt_name] = opt_meta.default\n            else:\n                raise error.OptionError(opt_name)", "docstring": "Initialization of instances.\n\nArgs:\noptions (:class:`ConfOpt`): option metadata. The name of each\n*option* is the name of the keyword argument passed on to this\nfunction. Option names should be valid identifiers, otherwise\nan :class:`~loam.error.OptionError` is raised.", "source": "juraj-google-style"}
{"code": "def find_synonymous_field(field, model=DEFAULT_MODEL, app=DEFAULT_APP, score_cutoff=50, root_preference=1.02):\n    fields = (util.listify(field) + list(synonyms(field)))\n    model = get_model(model, app)\n    available_field_names = model._meta.get_all_field_names()\n    (best_match, best_ratio) = (None, None)\n    for (i, field_name) in enumerate(fields):\n        match = fuzzy.extractOne(str(field_name), available_field_names)\n        if (match and (match[1] >= score_cutoff)):\n            if ((not best_match) or (match[1] > (root_preference * best_ratio))):\n                (best_match, best_ratio) = match\n    return best_match", "docstring": "Use a dictionary of synonyms and fuzzy string matching to find a similarly named field\n\nReturns:\nA single model field name (string)\n\nExamples:\n\n>>> find_synonymous_field('date', model='WikiItem')\n'end_date_time'\n>>> find_synonymous_field('date', model='WikiItem')\n'date_time'\n>>> find_synonymous_field('time', model='WikiItem')\n'date_time'", "source": "codesearchnet"}
{"code": "def kill_reporter(self, check_alive=True):\n    if PY3:\n        self._kill_process_type(ray_constants.PROCESS_TYPE_REPORTER, check_alive=check_alive)", "docstring": "Kill the reporter.\n\nArgs:\ncheck_alive (bool): Raise an exception if the process was already\ndead.", "source": "codesearchnet"}
{"code": "def set_expected_update_frequency(self, update_frequency):\n    try:\n        int(update_frequency)\n    except ValueError:\n        update_frequency = Dataset.transform_update_frequency(update_frequency)\n    if (not update_frequency):\n        raise HDXError('Invalid update frequency supplied!')\n    self.data['data_update_frequency'] = update_frequency", "docstring": "Set expected update frequency\n\nArgs:\nupdate_frequency (str): Update frequency\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def to_service(self, service, version):\n        \n        service_url = self._service_locator.get_service_url(service, version)\n        return self.__copy_and_set('service_url', self.__strip_trailing_slashes(service_url))", "docstring": "Sets the service name and version the request should target\n\nArgs:\nservice (str): The name of the service as displayed in the services.json file\nversion (str): The version of the service as displayed in the services.json file\n\nReturns:\nThe request builder instance in order to chain calls", "source": "juraj-google-style"}
{"code": "def fetch(self):\n    raise NotImplementedError('Must be implemented in subclasses.')", "docstring": "Wait for the result of `RemoteValue` and return the numpy result.\n\nThis makes the value concrete by copying the remote value to local.\n\nReturns:\nThe numpy array structure of the actual output of the `tf.function`\nassociated with this `RemoteValue`, previously returned by a\n`tf.distribute.experimental.coordinator.ClusterCoordinator.schedule` call.\nThis can be a single value, or a structure of values, depending on the\noutput of the `tf.function`.\n\nRaises:\ntf.errors.CancelledError: If the function that produces this `RemoteValue`\nis aborted or cancelled due to failure.", "source": "github-repos"}
{"code": "def _ReadRecordSchemaAttributes(self, tables, file_object, record_offset):\n    \n    record_header = self._ReadRecordHeader(file_object, record_offset)\n\n    attribute_value_offsets = self._ReadRecordAttributeValueOffset(\n        file_object, record_offset + 24, 6)\n\n    file_offset = file_object.tell()\n    attribute_values_data_offset = file_offset - record_offset\n    attribute_values_data_size = record_header.data_size - (\n        file_offset - record_offset)\n    attribute_values_data = file_object.read(attribute_values_data_size)\n\n    relation_identifier = self._ReadAttributeValueInteger(\n        attribute_values_data, record_offset, attribute_values_data_offset,\n        attribute_value_offsets[0])\n\n    attribute_identifier = self._ReadAttributeValueInteger(\n        attribute_values_data, record_offset, attribute_values_data_offset,\n        attribute_value_offsets[1])\n\n    attribute_name_data_type = self._ReadAttributeValueInteger(\n        attribute_values_data, record_offset, attribute_values_data_offset,\n        attribute_value_offsets[2])\n\n    attribute_name = self._ReadAttributeValueString(\n        attribute_values_data, record_offset, attribute_values_data_offset,\n        attribute_value_offsets[3])\n\n    \n\n    attribute_data_type = self._ReadAttributeValueInteger(\n        attribute_values_data, record_offset, attribute_values_data_offset,\n        attribute_value_offsets[5])\n\n    table = tables.get(relation_identifier, None)\n    if not table:\n      raise errors.ParseError(\n          'Missing table for relation identifier: 0x{0:08}'.format(\n              relation_identifier))\n\n    if attribute_name is None and attribute_value_offsets[1] != 0:\n      attribute_value_offset = attribute_value_offsets[1]\n      attribute_value_offset -= attribute_values_data_offset + 1\n      attribute_name = attribute_values_data[\n          attribute_value_offset:attribute_value_offset + 4]\n      attribute_name = attribute_name.decode('ascii')\n\n    column = KeychainDatabaseColumn()\n    column.attribute_data_type = attribute_data_type\n    column.attribute_identifier = attribute_identifier\n    column.attribute_name = attribute_name\n\n    table.columns.append(column)\n\n    table = tables.get(self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_ATTRIBUTES, None)\n    if not table:\n      raise errors.ParseError('Missing CSSM_DL_DB_SCHEMA_ATTRIBUTES table.')\n\n    record = collections.OrderedDict({\n        'RelationID': relation_identifier,\n        'AttributeID': attribute_identifier,\n        'AttributeNameFormat': attribute_name_data_type,\n        'AttributeName': attribute_name,\n        'AttributeFormat': attribute_data_type})\n\n    table.records.append(record)", "docstring": "Reads a schema attributes (CSSM_DL_DB_SCHEMA_ATTRIBUTES) record.\n\nArgs:\ntables (dict[int, KeychainDatabaseTable]): tables per identifier.\nfile_object (file): file-like object.\nrecord_offset (int): offset of the record relative to the start of\nthe file.\n\nRaises:\nParseError: if the record cannot be read.", "source": "juraj-google-style"}
{"code": "def iou(boxes1, boxes2):\n  \n  intersect = intersection(boxes1, boxes2)\n  area1 = area(boxes1)\n  area2 = area(boxes2)\n  union = np.expand_dims(area1, axis=1) + np.expand_dims(\n      area2, axis=0) - intersect\n  return intersect / union", "docstring": "Computes pairwise intersection-over-union between box collections.\n\nArgs:\nboxes1: a numpy array with shape [N, 4] holding N boxes.\nboxes2: a numpy array with shape [M, 4] holding M boxes.\n\nReturns:\na numpy array with shape [N, M] representing pairwise iou scores.", "source": "juraj-google-style"}
{"code": "def rename_custom_ops(model, map_custom_op_renames):\n    for op_code in model.operatorCodes:\n        if op_code.customCode:\n            op_code_str = op_code.customCode.decode('ascii')\n            if op_code_str in map_custom_op_renames:\n                op_code.customCode = map_custom_op_renames[op_code_str].encode('ascii')", "docstring": "Rename custom ops so they use the same naming style as builtin ops.\n\nArgs:\nmodel: The input tflite model.\nmap_custom_op_renames: A mapping from old to new custom op names.", "source": "github-repos"}
{"code": "def create_van_der_corput_samples(idx, number_base=2):\n    assert (number_base > 1)\n    idx = (numpy.asarray(idx).flatten() + 1)\n    out = numpy.zeros(len(idx), dtype=float)\n    base = float(number_base)\n    active = numpy.ones(len(idx), dtype=bool)\n    while numpy.any(active):\n        out[active] += ((idx[active] % number_base) / base)\n        idx \n        base *= number_base\n        active = (idx > 0)\n    return out", "docstring": "Van der Corput samples.\n\nArgs:\nidx (int, numpy.ndarray):\nThe index of the sequence. If array is provided, all values in\narray is returned.\nnumber_base (int):\nThe numerical base from where to create the samples from.\n\nReturns (float, numpy.ndarray):\nVan der Corput samples.", "source": "codesearchnet"}
{"code": "def get_file_size(file_object):\n    \n    position = file_object.tell()\n\n    file_object.seek(0, 2)\n    file_size = file_object.tell()\n    file_object.seek(position, 0)\n\n    return file_size", "docstring": "Returns the size, in bytes, of a file. Expects an object that supports\nseek and tell methods.\n\nArgs:\nfile_object (file_object) - The object that represents the file\n\nReturns:\n(int): size of the file, in bytes", "source": "juraj-google-style"}
{"code": "def from_json_and_lambdas(cls, file: str, lambdas):\n    with open(file, 'r') as f:\n        data = json.load(f)\n    return cls.from_dict(data, lambdas)", "docstring": "Builds a GrFN from a JSON object.\n\nArgs:\ncls: The class variable for object creation.\nfile: Filename of a GrFN JSON file.\n\nReturns:\ntype: A GroundedFunctionNetwork object.", "source": "codesearchnet"}
{"code": "def __init__(self, num_tasks):\n    self._num_tasks = num_tasks\n    self._next_task = 0", "docstring": "Create a new `_RoundRobinStrategy`.\n\nArgs:\nnum_tasks: Number of ps tasks to cycle among.", "source": "github-repos"}
{"code": "def is_ready(self, node_id, metadata_priority=True):\n        \n        if not self._can_send_request(node_id):\n            return False\n\n        \n        \n        if metadata_priority:\n            if self._metadata_refresh_in_progress:\n                return False\n            if self.cluster.ttl() == 0:\n                return False\n        return True", "docstring": "Check whether a node is ready to send more requests.\n\nIn addition to connection-level checks, this method also is used to\nblock additional requests from being sent during a metadata refresh.\n\nArguments:\nnode_id (int): id of the node to check\nmetadata_priority (bool): Mark node as not-ready if a metadata\nrefresh is required. Default: True\n\nReturns:\nbool: True if the node is ready and metadata is not refreshing", "source": "juraj-google-style"}
{"code": "def _entry_allocated_bitmap(self, entry_number):\n    (index, offset) = divmod(entry_number, 8)\n    return bool((self._bitmap[index] & (1 << offset)))", "docstring": "Checks if a particular index is allocated.\n\nArgs:\nentry_number (int): Index to verify\n\nReturns:\nbool: True if it is allocated, False otherwise.", "source": "codesearchnet"}
{"code": "def unzip(archive, destination, filenames=None):\n    close = False\n    try:\n        if (not isinstance(archive, zipfile.ZipFile)):\n            archive = zipfile.ZipFile(archive, 'r', allowZip64=True)\n            close = True\n        logger.info(('Extracting: %s -> %s' % (archive.filename, destination)))\n        if isinstance(filenames, str):\n            filenames = [filenames]\n        if (filenames is None):\n            filenames = archive.namelist()\n        for filename in filenames:\n            if filename.endswith('/'):\n                shell.mkdir(os.path.join(destination, filename))\n            elif (not _extract_file(archive, destination, filename)):\n                raise Exception()\n        logger.info(('Extracting zip archive \"%s\" succeeded' % archive.filename))\n        return True\n    except Exception:\n        logger.exception(('Error while unzipping archive %s' % archive.filename))\n        return False\n    finally:\n        if close:\n            archive.close()", "docstring": "Unzip a zip archive into destination directory.\n\nIt unzips either the whole archive or specific file(s) from the archive.\n\nUsage:\n>>> output = os.path.join(os.getcwd(), 'output')\n>>> # Archive can be an instance of a ZipFile class\n>>> archive = zipfile.ZipFile('test.zip', 'r')\n>>> # Or just a filename\n>>> archive = 'test.zip'\n>>> # Extracts all files\n>>> unzip(archive, output)\n>>> # Extract only one file\n>>> unzip(archive, output, 'my_file.txt')\n>>> # Extract a list of files\n>>> unzip(archive, output, ['my_file1.txt', 'my_file2.txt'])\n>>> unzip_file('test.zip', 'my_file.txt', output)\n\nArgs:\narchive (zipfile.ZipFile or str): Zipfile object to extract from or\npath to the zip archive.\ndestination (str): Path to the output directory.\nfilenames (str or list of str or None): Path(s) to the filename(s)\ninside the zip archive that you want to extract.", "source": "codesearchnet"}
{"code": "def __init__(self, name=IGNORED, origin=IGNORED, context=IGNORED):\n    if context != IGNORED and (not isinstance(context, dict)):\n        raise ValueError('context must be a Python dictionary.')\n    self.name = name\n    self.origin = origin\n    self.context = context", "docstring": "Creates a MetricsStructuredNameMatcher.\n\nAny property not passed in to the constructor will be ignored when matching.\n\nArgs:\nname: A string with the metric name.\norigin: A string with the metric namespace.\ncontext: A key:value dictionary that will be matched to the\nstructured name.", "source": "github-repos"}
{"code": "def __init__(self, promise):\n        \n        super(BrokenPromise, self).__init__()\n        self._promise = promise", "docstring": "Configure the broken promise error.\n\nArgs:\npromise (Promise): The promise that was not satisfied.", "source": "juraj-google-style"}
{"code": "def dqdv_cycle(cycle, splitter=True, **kwargs):\n    c_first = cycle.loc[(cycle['direction'] == (- 1))]\n    c_last = cycle.loc[(cycle['direction'] == 1)]\n    converter = Converter(**kwargs)\n    converter.set_data(c_first['capacity'], c_first['voltage'])\n    converter.inspect_data()\n    converter.pre_process_data()\n    converter.increment_data()\n    converter.post_process_data()\n    voltage_first = converter.voltage_processed\n    incremental_capacity_first = converter.incremental_capacity\n    if splitter:\n        voltage_first = np.append(voltage_first, np.NaN)\n        incremental_capacity_first = np.append(incremental_capacity_first, np.NaN)\n    converter = Converter(**kwargs)\n    converter.set_data(c_last['capacity'], c_last['voltage'])\n    converter.inspect_data()\n    converter.pre_process_data()\n    converter.increment_data()\n    converter.post_process_data()\n    voltage_last = converter.voltage_processed[::(- 1)]\n    incremental_capacity_last = converter.incremental_capacity[::(- 1)]\n    voltage = np.concatenate((voltage_first, voltage_last))\n    incremental_capacity = np.concatenate((incremental_capacity_first, incremental_capacity_last))\n    return (voltage, incremental_capacity)", "docstring": "Convenience functions for creating dq-dv data from given capacity and\nvoltage cycle.\n\nReturns the a DataFrame with a 'voltage' and a 'incremental_capacity'\ncolumn.\n\nArgs:\ncycle (pandas.DataFrame): the cycle data ('voltage', 'capacity',\n'direction' (1 or -1)).\nsplitter (bool): insert a np.NaN row between charge and discharge.\n\nReturns:\nList of step numbers corresponding to the selected steptype.\nReturns a pandas.DataFrame\ninstead of a list if pdtype is set to True.\n\nExample:\n>>> cycle_df = my_data.get_cap(\n>>> ...   1,\n>>> ...   categorical_column=True,\n>>> ...   method = \"forth-and-forth\"\n>>> ... )\n>>> voltage, incremental = ica.dqdv_cycle(cycle_df)", "source": "codesearchnet"}
{"code": "def plot_summaries(self, show=False, save=True, figure_type=None):\n    if (not figure_type):\n        figure_type = self.default_figure_type\n    if (not (figure_type in self.default_figure_types)):\n        logger.debug('unknown figure type selected')\n        figure_type = self.default_figure_type\n    (color_list, symbol_list) = self._create_colors_markers_list()\n    summary_df = self.summary_df\n    selected_summaries = self.selected_summaries\n    batch_dir = self.batch_dir\n    batch_name = self.name\n    (fig, ax) = plot_summary_figure(self.info_df, summary_df, color_list, symbol_list, selected_summaries, batch_dir, batch_name, show=show, save=save, figure_type=figure_type)\n    self.figure[figure_type] = fig\n    self.axes[figure_type] = ax", "docstring": "Plot summary graphs.\n\nArgs:\nshow: shows the figure if True.\nsave: saves the figure if True.\nfigure_type: optional, figure type to create.", "source": "codesearchnet"}
{"code": "def display_required_items(msg_type):\n    \n    print(\"Configure a profile for: \" + msg_type)\n    print(\"You will need the following information:\")\n    for k, v in CONFIG[msg_type][\"settings\"].items():\n        print(\"   * \" + v)\n    print(\"Authorization/credentials required:\")\n    for k, v in CONFIG[msg_type][\"auth\"].items():\n        print(\"   * \" + v)", "docstring": "Display the required items needed to configure a profile for the given\nmessage type.\n\nArgs:\n:msg_type: (str) message type to create config entry.", "source": "juraj-google-style"}
{"code": "def parse_attributes(attributes=None, classname=None):\n  \n  if not attributes:\n    attributes = {}\n  attributes.setdefault('class', DEFAULT_CLASS_NAME)\n  \n  if classname:\n    attributes['class'] = classname\n  return attributes", "docstring": "Parses attributes,\n\nArgs:\nattributes (dict): Input attributes.\nclassname (:obj:`str`, optional): Class name of output SPAN tags.\n\nReturns:\nParsed attributes. (dict)", "source": "juraj-google-style"}
{"code": "def write_files(abs_data, basename='absorption', prefix=None, directory=None):\n    for (i, absorption) in enumerate(abs_data):\n        num_txt = ('_{}'.format((i + 1)) if (len(abs_data) > 1) else '')\n        prefix_txt = ('{}_'.format(prefix) if prefix else '')\n        filename = (((prefix_txt + basename) + num_txt) + '.dat')\n        if directory:\n            filename = os.path.join(directory, filename)\n        header = 'energy(eV)'\n        if (len(absorption[1].shape) == 2):\n            header += ' alpha_xx alpha_yy alpha_zz'\n            data = np.concatenate((absorption[0][(:, None)], absorption[1]), axis=1)\n        else:\n            header += ' alpha'\n            data = np.stack((absorption[0], absorption[1]), axis=1)\n        np.savetxt(filename, data, header=header)", "docstring": "Write the absorption or loss spectra to a file.\n\nNote that this function expects to receive an iterable series of spectra.\n\nArgs:\nabs_data (tuple): Series (either :obj:`list` or :obj:`tuple`) of\noptical absorption or loss spectra. Each spectrum should be\nformatted as a :obj:`tuple` of :obj:`list` of :obj:`float`. If the\ndata has been averaged, each spectrum should be::\n\n([energies], [alpha])\n\nElse, if the data has not been averaged, each spectrum should be::\n\n([energies], [alpha_xx, alpha_yy, alpha_zz]).\n\nprefix (:obj:`str`, optional): Prefix for file names.\ndirectory (:obj:`str`, optional): The directory in which to save files.", "source": "codesearchnet"}
{"code": "def merge_requests(self, **kwargs):\n    path = ('%s/%s/merge_requests' % (self.manager.path, self.get_id()))\n    return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "List the merge requests related to the commit.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the references could not be retrieved\n\nReturns:\nlist: The merge requests related to the commit.", "source": "codesearchnet"}
{"code": "def __init__(self, name, exchange, topics=[], enable_ping=True,\n                 listen_all=False):\n        \n        self.name = name\n        self.exchange = exchange\n        self.topics = topics\n        self.listeners = []\n        self.listen_all = listen_all\n\n        if enable_ping:\n            self.listeners.append(self._handle_ping)\n            if 'ping' not in self.topics:\n                self.topics.append('ping')\n\n        self._channel = None\n        self._conn = None\n        self._queue_name = None", "docstring": "Initialize the client with connection settings.\n\nArgs:\nname; name of the client\nexchange: name of the exchange to connect to\ntopics: list of routing keys to listen to\nenable_ping: enable answering to ping requests\n\nBy default, the 'ping' routing key will be added in order to enable\nresponse to ping requests expect specified otherwise.", "source": "juraj-google-style"}
{"code": "def _get_filename_from_url(url):\n    parse = urlparse(url)\n    return os.path.basename(parse.path)", "docstring": "Return a filename from a URL\n\nArgs:\nurl (str): URL to extract filename from\n\nReturns:\n(str): Filename in URL", "source": "codesearchnet"}
{"code": "def gumbel_sample(shape):\n    uniform_samples = tf.random_uniform(shape, minval=1e-05, maxval=0.99998)\n    return (- tf.log((- tf.log(uniform_samples))))", "docstring": "Sample from the Gumbel distribution, protect from overflows.\n\nArgs:\nshape: Shape of Gumbel samples.\n\nReturns:\nNoise drawn from Gumbel distribution.", "source": "codesearchnet"}
{"code": "def compute_inv_covariance(L_aug, Y, k, p):\n    \n    return np.linalg.inv(compute_covariance(L_aug, Y, k, p))", "docstring": "Given label matrix L and labels Y, compute the covariance.\n\nArgs:\nL: (np.array) [n, d] The augmented (indicator) label matrix\nY: (np.array int) [n] The true labels in {1,...,k}", "source": "juraj-google-style"}
{"code": "def group_items(items, groupids):\n    if callable(groupids):\n        keyfunc = groupids\n        pair_list = ((keyfunc(item), item) for item in items)\n    else:\n        pair_list = zip(groupids, items)\n    groupid_to_items = defaultdict(list)\n    for (key, item) in pair_list:\n        groupid_to_items[key].append(item)\n    return groupid_to_items", "docstring": "r\"\"\"\nGroups a list of items by group id.\n\nArgs:\nitems (Iterable): a list of items to group\ngroupids (Iterable or Callable): a corresponding list of item groupids\nor a function mapping an item to a groupid.\n\nReturns:\ndict: groupid_to_items: maps a groupid to a list of items\n\nCommandLine:\npython -m ubelt.util_dict group_items\n\nExample:\n>>> import ubelt as ub\n>>> items    = ['ham',     'jam',   'spam',     'eggs',    'cheese', 'banana']\n>>> groupids = ['protein', 'fruit', 'protein',  'protein', 'dairy',  'fruit']\n>>> groupid_to_items = ub.group_items(items, groupids)\n>>> print(ub.repr2(groupid_to_items, nl=0))\n{'dairy': ['cheese'], 'fruit': ['jam', 'banana'], 'protein': ['ham', 'spam', 'eggs']}", "source": "codesearchnet"}
{"code": "def remove_temp_dir_with_filepath(filepath, strategy):\n    remove_temp_dirpath(os.path.dirname(filepath), strategy)", "docstring": "Removes the temp path for file after writing is finished.\n\nArgs:\nfilepath: Original filepath that would be used without distribution.\nstrategy: The tf.distribute strategy object currently used.", "source": "github-repos"}
{"code": "def do_conneg(accept, supported):\n    for result in parse_accept_header(accept):\n        mime_type = result[0]\n        if (mime_type in supported):\n            return mime_type\n    return None", "docstring": "Parse accept header and look for preferred type in supported list.\n\nArguments:\naccept - HTTP Accept header\nsupported - list of MIME type supported by the server\n\nReturns:\nsupported MIME type with highest q value in request, else None.\n\nFIXME - Should replace this with negotiator2", "source": "codesearchnet"}
{"code": "def request(self, request_method, api_method, *args, **kwargs):\n        \n        url = self._build_url(api_method)\n        resp = requests.request(request_method, url, *args, **kwargs)\n\n        try:\n            rv = resp.json()\n        except ValueError:\n            raise RequestFailedError(resp, 'not a json body')\n\n        if not resp.ok:\n            raise RequestFailedError(resp, rv.get('error'))\n\n        return rv", "docstring": "Perform a request.\n\nArgs:\nrequest_method: HTTP method for this request.\napi_method: API method name for this request.\n*args: Extra arguments to pass to the request.\n**kwargs: Extra keyword arguments to pass to the request.\n\nReturns:\nA dict contains the request response data.\n\nRaises:\nRequestFailedError: Raises when BearyChat's OpenAPI responses\nwith status code != 2xx", "source": "juraj-google-style"}
{"code": "async def get_jsone_context_and_template(chain, parent_link, decision_link, tasks_for):\n    \n    if tasks_for == 'action':\n        jsone_context, tmpl = await get_action_context_and_template(\n            chain, parent_link, decision_link\n        )\n    else:\n        tmpl = await get_in_tree_template(decision_link)\n        jsone_context = await populate_jsone_context(\n            chain, parent_link, decision_link, tasks_for\n        )\n    return jsone_context, tmpl", "docstring": "Get the appropriate json-e context and template for any parent task.\n\nArgs:\nchain (ChainOfTrust): the chain of trust.\nparent_link (LinkOfTrust): the parent link to test.\ndecision_link (LinkOfTrust): the parent link's decision task link.\ntasks_for (str): the reason the parent link was created (cron,\nhg-push, action)\n\nReturns:\n(dict, dict): the json-e context and template.", "source": "juraj-google-style"}
{"code": "def add(self, timestamp, information):\n        \n        try:\n            item = Schema(CollectorStage.schema_event_items()).validate({\n                'timestamp': timestamp, 'information': information\n            })\n            self.events.append(item)\n        except SchemaError as exception:\n            Logger.get_logger(__name__).error(exception)\n            raise RuntimeError(str(exception))", "docstring": "Add event information.\n\nArgs:\ntimestamp (int): event timestamp.\ninformation (dict): event information.\n\nRaises:\nRuntimeError: when validation of parameters has failed.", "source": "juraj-google-style"}
{"code": "def delete(self, key, noreply=None):\n        \n        if noreply is None:\n            noreply = self.default_noreply\n        cmd = b'delete ' + self.check_key(key)\n        if noreply:\n            cmd += b' noreply'\n        cmd += b'\\r\\n'\n        results = self._misc_cmd([cmd], b'delete', noreply)\n        if noreply:\n            return True\n        return results[0] == b'DELETED'", "docstring": "The memcached \"delete\" command.\n\nArgs:\nkey: str, see class docs for details.\nnoreply: optional bool, True to not wait for the reply (defaults to\nself.default_noreply).\n\nReturns:\nIf noreply is True, always returns True. Otherwise returns True if\nthe key was deleted, and False if it wasn't found.", "source": "juraj-google-style"}
{"code": "def recipe_anonymize(config, auth, from_project, from_dataset, to_project, to_dataset):\n    anonymize(config, {'auth': auth, 'bigquery': {'from': {'project': from_project, 'dataset': from_dataset}, 'to': {'project': to_project, 'dataset': to_dataset}}})", "docstring": "Copies tables and view from one dataset to another and anynonamizes all rows.\nUsed to create sample datasets for dashboards.\n\nArgs:\nauth (authentication) - Credentials used.\nfrom_project (string) - Original project to read from.\nfrom_dataset (string) - Original dataset to read from.\nto_project (string) - Anonymous data will be writen to.\nto_dataset (string) - Anonymous data will be writen to.", "source": "github-repos"}
{"code": "def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides):\n        \n        return Element2D.clone(self, data, shared_data, new_type,\n                               *args, **overrides)", "docstring": "Clones the object, overriding data and parameters.\n\nArgs:\ndata: New data replacing the existing data\nshared_data (bool, optional): Whether to use existing data\nnew_type (optional): Type to cast object to\n*args: Additional arguments to pass to constructor\n**overrides: New keyword arguments to pass to constructor\n\nReturns:\nCloned Spline", "source": "juraj-google-style"}
{"code": "def FromBinary(cls, script_data, allow_unknown=True, show_rpcs=False):\n    curr = 0\n    records = []\n    header = cls.ParseHeader(script_data)\n    curr = header.header_length\n    cls.logger.debug('Parsed script header: %s, skipping %d bytes', header, curr)\n    record_count = 0\n    record_data = bytearray()\n    partial_match = None\n    match_offset = 0\n    while (curr < len(script_data)):\n        if ((len(script_data) - curr) < UpdateRecord.HEADER_LENGTH):\n            raise ArgumentError('Script ended with a partial record', remaining_length=(len(script_data) - curr))\n        (total_length, record_type) = struct.unpack_from('<LB', script_data[curr:])\n        cls.logger.debug('Found record of type %d, length %d', record_type, total_length)\n        record_data += script_data[curr:(curr + total_length)]\n        record_count += 1\n        curr += total_length\n        try:\n            if (show_rpcs and (record_type == SendRPCRecord.MatchType())):\n                cls.logger.debug('   {0}'.format(hexlify(record_data)))\n                record = SendRPCRecord.FromBinary(record_data[UpdateRecord.HEADER_LENGTH:], record_count)\n            elif (show_rpcs and (record_type == SendErrorCheckingRPCRecord.MatchType())):\n                cls.logger.debug('   {0}'.format(hexlify(record_data)))\n                record = SendErrorCheckingRPCRecord.FromBinary(record_data[UpdateRecord.HEADER_LENGTH:], record_count)\n            else:\n                record = UpdateRecord.FromBinary(record_data, record_count)\n        except DeferMatching as defer:\n            if (defer.partial_match is not None):\n                partial_match = defer.partial_match\n                match_offset = curr\n            continue\n        except DataError:\n            if ((record_count > 1) and partial_match):\n                record = partial_match\n                curr = match_offset\n            elif (not allow_unknown):\n                raise\n            elif (allow_unknown and (record_count > 1)):\n                raise ArgumentError('A record matched an initial record subset but failed matching a subsequent addition without leaving a partial_match')\n            else:\n                record = UnknownRecord(record_type, record_data[UpdateRecord.HEADER_LENGTH:])\n        record_count = 0\n        record_data = bytearray()\n        partial_match = None\n        match_offset = 0\n        records.append(record)\n    return UpdateScript(records)", "docstring": "Parse a binary update script.\n\nArgs:\nscript_data (bytearray): The binary data containing the script.\nallow_unknown (bool): Allow the script to contain unknown records\nso long as they have correct headers to allow us to skip them.\nshow_rpcs (bool): Show SendRPCRecord matches for each record rather than\nthe more specific operation\nRaises:\nArgumentError: If the script contains malformed data that cannot\nbe parsed.\nDataError: If the script contains unknown records and allow_unknown=False\n\nReturns:\nUpdateScript: The parsed update script.", "source": "codesearchnet"}
{"code": "def create_queue(self, register=False):\n    queue = asyncio.Queue(loop=self._loop)\n    if register:\n        self._work_queues.add(queue)\n    return queue", "docstring": "Create a new work queue and optionally register it.\n\nThis will make sure the queue is attached to the correct event loop.\nYou can optionally choose to automatically register it so that\nwait_idle() will block until the queue is empty.\n\nArgs:\nregister (bool): Whether to call register_workqueue() automatically.\n\nReturns:\nasyncio.Queue: The newly created queue.", "source": "codesearchnet"}
{"code": "def read_uint16(self, little_endian=True):\n        \n        if little_endian:\n            endian = \"<\"\n        else:\n            endian = \">\"\n        return self.unpack('%sH' % endian, 2)", "docstring": "Read 2 byte as an unsigned integer value from the stream.\n\nArgs:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint:", "source": "juraj-google-style"}
{"code": "def print_info(self, obj=None, buf=sys.stdout):\n    if (not obj):\n        self._print_info(buf)\n        return True\n    b = False\n    for fn in (self._print_tool_info, self._print_package_info, self._print_suite_info, self._print_context_info):\n        b_ = fn(obj, buf, b)\n        b |= b_\n        if b_:\n            ((print >> buf), '')\n    if (not b):\n        ((print >> buf), (\"Rez does not know what '%s' is\" % obj))\n    return b", "docstring": "Print a status message about the given object.\n\nIf an object is not provided, status info is shown about the current\nenvironment - what the active context is if any, and what suites are\nvisible.\n\nArgs:\nobj (str): String which may be one of the following:\n- A tool name;\n- A package name, possibly versioned;\n- A context filepath;\n- A suite filepath;\n- The name of a context in a visible suite.", "source": "codesearchnet"}
{"code": "def FormatAST(ast, style_config=None, lines=None):\n    style.SetGlobalStyle(style.CreateStyleFromConfig(style_config))\n    llines = pyparser.ParseCode(ast)\n    for lline in llines:\n        lline.CalculateFormattingInformation()\n    lines = _LineRangesToSet(lines)\n    _MarkLinesToFormat(llines, lines)\n    return reformatter.Reformat(_SplitSemicolons(llines), lines)", "docstring": "Format a parsed lib2to3 pytree.\n\nThis provides an alternative entry point to YAPF.\n\nArguments:\nunformatted_source: (unicode) The code to format.\nstyle_config: (string) Either a style name or a path to a file that contains\nformatting style settings. If None is specified, use the default style\nas set in style.DEFAULT_STYLE_FACTORY\nlines: (list of tuples of integers) A list of tuples of lines, [start, end],\nthat we want to format. The lines are 1-based indexed. It can be used by\nthird-party code (e.g., IDEs) when reformatting a snippet of code rather\nthan a whole file.\n\nReturns:\nThe source formatted according to the given formatting style.", "source": "github-repos"}
{"code": "def create_window(size=None, samples=16, *, fullscreen=False, title=None, threaded=True) -> Window:\n    if (size is None):\n        (width, height) = (1280, 720)\n    else:\n        (width, height) = size\n    if ((samples < 0) or ((samples & (samples - 1)) != 0)):\n        raise Exception(('Invalid number of samples: %d' % samples))\n    window = Window.__new__(Window)\n    window.wnd = glwnd.create_window(width, height, samples, fullscreen, title, threaded)\n    return window", "docstring": "Create the main window.\n\nArgs:\nsize (tuple): The width and height of the window.\nsamples (int): The number of samples.\n\nKeyword Args:\nfullscreen (bool): Fullscreen?\ntitle (bool): The title of the window.\nthreaded (bool): Threaded?\n\nReturns:\nWindow: The main window.", "source": "codesearchnet"}
{"code": "def get_role(self, item, state_root, from_state=False):\n        \n        if from_state:\n            \n            if self._identity_view is None:\n                self.update_view(state_root)\n            value = self._identity_view.get_role(item)\n            return value\n\n        value = self._cache.get(item)\n        if value is None:\n            if self._identity_view is None:\n                self.update_view(state_root)\n            value = self._identity_view.get_role(item)\n            self._cache[item] = value\n        return value", "docstring": "Used to retrieve an identity role.\nArgs:\nitem (string): the name of the role to be fetched\nstate_root(string): The state root of the previous block.\nfrom_state (bool): Whether the identity value should be read\ndirectly from state, instead of using the cached values.\nThis should be used when the state_root passed is not from\nthe current chain head.", "source": "juraj-google-style"}
{"code": "def success(channel, post):\n    \n\n    \n    datapacks = [(\"Game\", post[0], True), (\"Upvotes\", post[2], True)]\n\n    \n    gui = ui_embed.UI(\n        channel,\n        \"Link\",\n        post[1],\n        modulename=modulename,\n        colour=0xFF8800,\n        thumbnail=post[1],\n        datapacks=datapacks\n    )\n\n    return gui", "docstring": "Creates an embed UI containing the Reddit posts\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\npost (tuple): Tuples of (field, value, percentile)\n\nReturns:", "source": "juraj-google-style"}
{"code": "def append(self, annotation):\n    self._annotations[annotation.id] = annotation\n    self._dirty = True\n    return annotation", "docstring": "Add an annotation.\n\nArgs:\nannotation (gkeepapi.node.Annotation): An Annotation object.\n\nReturns:\ngkeepapi.node.Annotation: The Annotation.", "source": "codesearchnet"}
{"code": "def _Aff4Size(aff4_obj):\n    if (not isinstance(aff4_obj, aff4.AFF4Stream)):\n        message = 'Expected an instance of `%s` but received `%s`'\n        raise TypeError((message % (aff4.AFF4Stream, type(aff4_obj))))\n    return int(aff4_obj.Get(aff4_obj.Schema.SIZE))", "docstring": "Retrieves the total size in bytes of an AFF4 object.\n\nArgs:\naff4_obj: An AFF4 stream instance to retrieve size for.\n\nReturns:\nAn integer representing number of bytes.\n\nRaises:\nTypeError: If `aff4_obj` is not an instance of AFF4 stream.", "source": "codesearchnet"}
{"code": "def publishFeatureCollections(self, configs):\n        \n        if self.securityhandler is None:\n            print (\"Security handler required\")\n            return\n        config = None\n        res = None\n        resItm = None\n        try:\n            res = []\n            if isinstance(configs, list):\n                for config in configs:\n                    if 'ReplaceTag' in config:\n\n                        resItm = {\"ReplaceTag\":config['ReplaceTag'] }\n                    else:\n                        resItm = {\"ReplaceTag\":\"{FeatureService}\" }\n\n                    if 'Zip' in config:\n                        resItm['FCInfo'] = self._publishFeatureCollection(config=config)\n\n\n                    if not resItm['FCInfo'] is None and 'id' in resItm['FCInfo']:\n                        print (\"%s feature collection created\" % resItm['FCInfo']['id'])\n                        res.append(resItm)\n                    else:\n                        print (str(resItm['FCInfo']))\n\n\n            return res\n\n        except common.ArcRestHelperError as e:\n            raise e\n        except Exception as e:\n\n            line, filename, synerror = trace()\n            raise common.ArcRestHelperError({\n                \"function\": \"publishFeatureCollections\",\n                \"line\": line,\n                \"filename\":  filename,\n                \"synerror\": synerror,\n            })\n\n        finally:\n            resItm = None\n            config = None\n\n            del resItm\n            del config\n\n            gc.collect()", "docstring": "Publishes feature collections to a feature service.\n\nArgs:\nconfigs (list): A list of JSON configuration feature service details to publish.\nReturns:\ndict: A dictionary of results objects.", "source": "juraj-google-style"}
{"code": "def memory_write16(self, addr, data, zone=None):\n        \n        return self.memory_write(addr, data, zone, 16)", "docstring": "Writes half-words to memory of a target system.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): start address to write to\ndata (list): list of half-words to write\nzone (str): optional memory zone to access\n\nReturns:\nNumber of half-words written to target.\n\nRaises:\nJLinkException: on memory access error.", "source": "juraj-google-style"}
{"code": "def laid_out_pcoord(self, mesh_axis):\n    divisor = list_product(self.shape.to_integer_list[(mesh_axis + 1):])\n    modulus = self.shape[mesh_axis].size\n\n    def my_fn(pnum):\n        return ((pnum \n    return self.slicewise(my_fn, self.laid_out_pnum())", "docstring": "Returns a LaidOutTensor containing the processor coordinate.\n\nArgs:\nmesh_axis: int.\n\nReturns:\nLaidOutTensor where each slice is an integer scalar.", "source": "codesearchnet"}
{"code": "def add_op_callback(self, callback):\n    if callback not in self._thread_local_data.op_callbacks:\n        self._thread_local_data.op_callbacks.append(callback)", "docstring": "Add a post-op callback to the context.\n\nA post-op callback is invoked immediately after an eager operation or\nfunction has finished execution or after a op has been added to a graph,\nproviding access to the op's type, name input and output tensors. Multiple\nop callbacks can be added, in which case the callbacks will be invoked in\nthe order in which they are added.\n\nArgs:\ncallback: a callable of the signature `f(op_type, inputs, attrs, outputs,\nop_name=None, graph=None)`. See doc strings in `op_callbacks.py` for\ndetails on the function signature and its semantics.", "source": "github-repos"}
{"code": "def append_to_list(self, key, *value, pipeline=False):\n    if pipeline:\n        self._pipeline.rpush(key, *value)\n    else:\n        self._db.rpush(key, *value)", "docstring": "Add new element to the end of the list stored at key.\n\nArgs:\nkey (str): Key where the list is stored\nvalue: Value to add to the list\npipeline (bool): True, start a transaction block. Default false.", "source": "codesearchnet"}
{"code": "def set_compare_estimator_and_feature_spec(self, estimator, feature_spec):\n    self.delete('compare_custom_predict_fn')\n    self.store('compare_estimator_and_spec', {'estimator': estimator, 'feature_spec': feature_spec})\n    self.set_compare_inference_address('estimator')\n    if (not self.has_compare_model_name()):\n        self.set_compare_model_name('2')\n    return self", "docstring": "Sets a second model for inference as a TF Estimator.\n\nIf you wish to compare the results of two models in WIT, use this method\nto setup the details of the second model.\n\nInstead of using TF Serving to host a model for WIT to query, WIT can\ndirectly use a TF Estimator object as the model to query. In order to\naccomplish this, a feature_spec must also be provided to parse the\nexample protos for input into the estimator.\n\nArgs:\nestimator: The TF Estimator which will be used for model inference.\nfeature_spec: The feature_spec object which will be used for example\nparsing.\n\nReturns:\nself, in order to enabled method chaining.", "source": "codesearchnet"}
{"code": "def unwrap_values(distribution_strategy, grouped_inputs, grouped_outputs, grouped_updates=None, grouped_session_args=None, with_loss_tensor=False):\n    all_inputs = flatten_per_replica_values(distribution_strategy, grouped_inputs)\n    all_outputs = unwrap_outputs(distribution_strategy, grouped_outputs, with_loss_tensor)\n    if grouped_updates:\n        all_updates = flatten_per_replica_values(distribution_strategy, grouped_updates)\n    else:\n        all_updates = None\n    all_session_args = {}\n    if grouped_session_args:\n        grouped_feed_dict = grouped_session_args.get('feed_dict')\n        if grouped_feed_dict:\n            all_session_args['feed_dict'] = flatten_per_replica_values(distribution_strategy, grouped_feed_dict)\n        grouped_fetches = grouped_session_args.get('fetches')\n        if grouped_fetches:\n            all_session_args['fetches'] = flatten_per_replica_values(distribution_strategy, grouped_fetches)\n    return (all_inputs, all_outputs, all_updates, all_session_args)", "docstring": "Unwrap the list of values contained in the PerReplica parameters.\n\nThis function calls `flatten_per_replica_values` to parse each of the input\nparameters into a list of values on the different devices. If we set\n`with_loss_tensor` to be True, we also call `reduce` on the list of losses on\nthe different devices to give us one loss tensor.\n\nArgs:\ndistribution_strategy: DistributionStrategy used to distribute training and\nvalidation.\ngrouped_inputs: PerReplica inputs returned from the train or test function\nthat we ran on each device.\ngrouped_outputs: PerReplica outputs returned from the train or test function\nthat we ran on each device.\ngrouped_updates: PerReplica updates returned from the train or test function\nthat we ran on each device.\ngrouped_session_args: PerReplica session args returned from the train or\ntest function that we ran on each device.\nwith_loss_tensor: Boolean that indicates if we need to add the reduced loss\ntensor as one of the outputs.\n\nReturns:\nValues of each of the PerReplica parameters.", "source": "github-repos"}
{"code": "def to_pytd_type_of_instance(self, val: abstract.BaseValue) -> pytd.Type:\n    if val is self._ctx.consts.Any:\n        return pytd.AnythingType()\n    elif val is self._ctx.consts[None]:\n        return pytd.NamedType('builtins.NoneType')\n    elif isinstance(val, abstract.Union):\n        return pytd_utils.JoinTypes((self.to_pytd_type_of_instance(v) for v in val.options))\n    elif isinstance(val, abstract.SimpleClass):\n        return pytd.NamedType(val.name)\n    else:\n        raise NotImplementedError(f'to_pytd_type_of_instance() not implemented for {val.__class__.__name__}: {val}')", "docstring": "Returns the type of an instance of the abstract value, as a pytd node.\n\nFor example, if the abstract value is:\nInterpreterClass(C)\nthen to_pytd_type_of_instance() produces:\npytd.NamedType(C)\n\nArgs:\nval: The abstract value.", "source": "github-repos"}
{"code": "def add_inputs(self, *args, **kwargs):\n    if 'names' in kwargs:\n        return [self._inputs.add(arg, name=name) for arg, name in zip(args, kwargs['names'])]\n    else:\n        return [self._inputs.add(arg) for arg in args]", "docstring": "Add a sequence of inputs to the function invocation.\n\nArgs:\n*args: List of inputs to be converted (should be Tf.Tensor).\n**kwargs: This allows 'names' which should be a list of names.\n\nReturns:\nWrapped inputs (identity standins that have additional metadata). These\nare also are also tf.Tensor's.", "source": "github-repos"}
{"code": "def _GetIntegerValue(self, row, value_name):\n    \n    value = row.get(value_name, None)\n    try:\n      return int(value, 10)\n    except (TypeError, ValueError):\n      return None", "docstring": "Converts a specific value of the row to an integer.\n\nArgs:\nrow (dict[str, str]): fields of a single row, as specified in COLUMNS.\nvalue_name (str): name of the value within the row.\n\nReturns:\nint: value or None if the value cannot be converted.", "source": "juraj-google-style"}
{"code": "def on_epoch_begin(self, epoch, logs=None):", "docstring": "Called at the start of an epoch.\n\nSubclasses should override for any actions to run. This function should only\nbe called during TRAIN mode.\n\nArgs:\nepoch: Integer, index of epoch.\nlogs: Dict. Currently no data is passed to this argument for this method\nbut that may change in the future.", "source": "github-repos"}
{"code": "def detect_functions_called(contract):\n    result = []\n    for func in contract.all_functions_called:\n        for node in func.nodes:\n            for ir in node.irs:\n                if isinstance(ir, (InternalCall, SolidityCall)):\n                    result.append(ir.function)\n    return result", "docstring": "Returns a list of InternallCall, SolidityCall\ncalls made in a function\n\nReturns:\n(list): List of all InternallCall, SolidityCall", "source": "codesearchnet"}
{"code": "def make(cls, name: str, *, def_opcode: 'opcodes.Opcode', code: 'blocks.OrderedCode', f_locals: _instances.LazyConcreteDict, f_globals: _instances.LazyConcreteDict, defaults, kw_defaults, closure, annotations: 'dict[str, _base.BaseValue]', ctx: 'context.Context'):\n    annotations = annotations or {}\n    overloads = ctx.vm.frame.overloads[name]\n    if f_locals == ctx.convert.unsolvable:\n        local_members = {}\n    else:\n        local_members = f_locals.members\n    key = (name, code, _hash_all_dicts((f_globals.members, set(code.names)), (local_members, set(local_members) - set(code.varnames)), ({key: ctx.program.NewVariable([value], [], ctx.root_node) for key, value in annotations.items()}, None), (dict(enumerate((ctx.program.NewVariable([f], [], ctx.root_node) for f in overloads))), None), (dict(enumerate(defaults)), None), (dict(enumerate(closure or ())), None)))\n    if key not in ctx.function_cache:\n        ctx.function_cache[key] = cls(name, def_opcode, code, f_locals, f_globals, defaults, kw_defaults, closure, annotations, overloads, ctx)\n    elif closure:\n        ctx.function_cache[key].closure = closure\n    f = ctx.function_cache[key]\n    ctx.vm.frame.functions_created_in_frame[f.name.rsplit('.')[-1]].append(f)\n    return f", "docstring": "Get an InterpreterFunction.\n\nThings like anonymous functions and generator expressions are created\nevery time the corresponding code executes. Caching them makes it easier\nto detect when the environment hasn't changed and a function call can be\noptimized away.\n\nArguments:\nname: Function name.\ndef_opcode: The opcode for the def statement\ncode: A code object.\nf_locals: The locals used for name resolution.\nf_globals: The globals used for name resolution.\ndefaults: Default arguments.\nkw_defaults: Default arguments for kwonly parameters.\nclosure: The free variables this closure binds to.\nannotations: Function annotations. Dict of name -> BaseValue.\nctx: context.Context instance.\n\nReturns:\nAn InterpreterFunction.", "source": "github-repos"}
{"code": "def hex_is_dark(hexx, percent=50):\n    \n    r, g, b = hex_to_rgb(hexx)\n    luma = (0.2126 * r + 0.7152 * g + 0.0722 * b) / 2.55  \n\n    return (luma < percent)", "docstring": "Function to decide if a hex colour is dark.\n\nArgs:\nhexx (str): A hexadecimal colour, starting with '#'.\n\nReturns:\nbool: The colour's brightness is less than the given percent.", "source": "juraj-google-style"}
{"code": "def debye_integral(y):\n        \n        \n        \n        \n        factor = 3. / y ** 3\n        if y < 155:\n            integral = quadrature(lambda x: x ** 3 / (np.exp(x) - 1.), 0, y)\n            return list(integral)[0] * factor\n        else:\n            return 6.493939 * factor", "docstring": "Debye integral. Eq(5) in  doi.org/10.1016/j.comphy.2003.12.001\n\nArgs:\ny (float): debye temperature/T, upper limit\n\nReturns:\nfloat: unitless", "source": "juraj-google-style"}
{"code": "def check_type(value, type_def):\n    if (type_def == 'integer'):\n        try:\n            int(value)\n            return True\n        except ValueError:\n            return (isinstance(value, six.integer_types) and (not isinstance(value, bool)))\n    elif (type_def == 'number'):\n        return (isinstance(value, (six.integer_types, float)) and (not isinstance(value, bool)))\n    elif (type_def == 'string'):\n        return isinstance(value, (six.text_type, six.string_types, datetime.datetime))\n    elif (type_def == 'boolean'):\n        return (isinstance(value, bool) or (isinstance(value, (six.text_type, six.string_types)) and (value.lower() in ['true', 'false'])))\n    else:\n        return False", "docstring": "Check if the value is in the type given in type_def.\n\nArgs:\nvalue: the var to test.\ntype_def: string representing the type in swagger.\n\nReturns:\nTrue if the type is correct, False otherwise.", "source": "codesearchnet"}
{"code": "def _arguments(code, module):\n  \n  arg_parser = CommandParser.create('')\n  try:\n    \n    builtins = {'source': _table, 'datestring': _datestring}\n    env = {}\n    env.update(builtins)\n\n    \n    exec(code, env)\n\n    \n    \n    for key in env:\n\n      \n      if key in builtins or key[0] == '_':\n        continue\n      \n      \n      \n      \n      \n\n      val = env[key]\n      key = '--%s' % key\n\n      if isinstance(val, bool):\n        if val:\n          arg_parser.add_argument(key, default=val, action='store_true')\n        else:\n          arg_parser.add_argument(key, default=val, action='store_false')\n      elif isinstance(val, basestring) or isinstance(val, int) or isinstance(val, float) \\\n              or isinstance(val, int):\n        arg_parser.add_argument(key, default=val)\n      elif isinstance(val, list):\n        arg_parser.add_argument(key, default=val, nargs='+')\n      elif isinstance(val, tuple):\n        arg_parser.add_argument(key, default=list(val), nargs='+')\n\n      \n      elif isinstance(val, dict) and 'type' in val:\n        if val['type'] == 'datestring':\n          arg_parser.add_argument(key, default='',\n                                  type=_make_string_formatter(val['format'],\n                                                              offset=val['offset']))\n        elif val['type'] == 'table':\n          if val['format'] is not None:\n            arg_parser.add_argument(key, default='',\n                                    type=_make_table_formatter(val['format'],\n                                                               offset=val['offset']))\n          else:\n            arg_parser.add_argument(key, default=val['name'], type=_make_table)\n        else:\n          raise Exception('Cannot generate argument for %s of type %s' % (key, type(val)))\n      else:\n        raise Exception('Cannot generate argument for %s of type %s' % (key, type(val)))\n\n  except Exception as e:\n    print(\"%%sql arguments: %s from code '%s'\" % (str(e), str(code)))\n  return arg_parser", "docstring": "Define pipeline arguments.\n\nArgs:\ncode: the Python code to execute that defines the arguments.", "source": "juraj-google-style"}
{"code": "def __init__(self, wrapped: message.Message, unused_context: Context) -> None:\n    self.wrapped = wrapped", "docstring": "Initializes a new PrimitiveWrapper with wrapped.\n\nArgs:\nwrapped: The primitive message to wrap.", "source": "github-repos"}
{"code": "def TempDirPath(suffix='', prefix='tmp'):\n    precondition.AssertType(suffix, Text)\n    precondition.AssertType(prefix, Text)\n    return tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=_TempRootPath())", "docstring": "Creates a temporary directory based on the environment configuration.\n\nThe directory will be placed in folder as specified by the `TEST_TMPDIR`\nenvironment variable if available or fallback to `Test.tmpdir` of the current\nconfiguration if not.\n\nArgs:\nsuffix: A suffix to end the directory name with.\nprefix: A prefix to begin the directory name with.\n\nReturns:\nAn absolute path to the created directory.", "source": "codesearchnet"}
{"code": "def read(name, default=None, allow_none=False, fallback=None):\n    \n    raw_value = environ.get(name)\n    if raw_value is None and fallback is not None:\n        if not isinstance(fallback, builtins.list) and not isinstance(fallback, builtins.tuple):\n            fallback = [fallback]\n\n        for fall in fallback:\n            raw_value = environ.get(fall)\n            if raw_value is not None:\n                break\n\n    if raw_value or raw_value == '':\n        return raw_value\n    elif default is not None or allow_none:\n        return default\n    else:\n        raise KeyError('Set the \"{0}\" environment variable'.format(name))", "docstring": "Read the raw env value.\n\nRead the raw environment variable or use the default. If the value is not\nfound and no default is set throw an exception.\n\nArgs:\nname: The environment variable name\ndefault: The default value to use if no environment variable is found\nallow_none: If the return value can be `None` (i.e. optional)\nfallback: A list of fallback env variables to try and read if the primary environment\nvariable is unavailable.", "source": "juraj-google-style"}
{"code": "def check_semidefinite_positiveness(A):\n    B = empty_like(A)\n    B[:] = A\n    B[diag_indices_from(B)] += sqrt(finfo(float).eps)\n    try:\n        cholesky(B)\n    except LinAlgError:\n        return False\n    return True", "docstring": "Check if ``A`` is a semi-definite positive matrix.\n\nArgs:\nA (array_like): Matrix.\n\nReturns:\nbool: ``True`` if ``A`` is definite positive; ``False`` otherwise.", "source": "codesearchnet"}
{"code": "def WriteEventBody(self, event):\n    \n    for field_name in self._fields:\n      if field_name == 'datetime':\n        output_value = self._FormatDateTime(event)\n      else:\n        output_value = self._dynamic_fields_helper.GetFormattedField(\n            event, field_name)\n\n      output_value = self._RemoveIllegalXMLCharacters(output_value)\n\n      \n      column_index = self._fields.index(field_name)\n      self._column_widths.setdefault(column_index, 0)\n\n      if field_name == 'datetime':\n        column_width = min(\n            self._MAX_COLUMN_WIDTH, len(self._timestamp_format) + 2)\n      else:\n        column_width = min(self._MAX_COLUMN_WIDTH, len(output_value) + 2)\n\n      self._column_widths[column_index] = max(\n          self._MIN_COLUMN_WIDTH, self._column_widths[column_index],\n          column_width)\n      self._sheet.set_column(\n          column_index, column_index, self._column_widths[column_index])\n\n      if (field_name == 'datetime'\n          and isinstance(output_value, datetime.datetime)):\n        self._sheet.write_datetime(\n            self._current_row, column_index, output_value)\n      else:\n        self._sheet.write(self._current_row, column_index, output_value)\n\n    self._current_row += 1", "docstring": "Writes the body of an event object to the spreadsheet.\n\nArgs:\nevent (EventObject): event.", "source": "juraj-google-style"}
{"code": "def CredibleInterval(pmf, percentage=90):\n    \n    cdf = pmf.MakeCdf()\n    prob = (1 - percentage / 100.0) / 2\n    interval = cdf.Value(prob), cdf.Value(1 - prob)\n    return interval", "docstring": "Computes a credible interval for a given distribution.\n\nIf percentage=90, computes the 90% CI.\n\nArgs:\npmf: Pmf object representing a posterior distribution\npercentage: float between 0 and 100\n\nReturns:\nsequence of two floats, low and high", "source": "juraj-google-style"}
{"code": "def __init__(self, enum_values, case_sensitive=True):\n    \n    if not enum_values:\n      raise ValueError(\n          'enum_values cannot be empty, found \"{}\"'.format(enum_values))\n    super(EnumParser, self).__init__()\n    self.enum_values = enum_values\n    self.case_sensitive = case_sensitive", "docstring": "Initializes EnumParser.\n\nArgs:\nenum_values: [str], a non-empty list of string values in the enum.\ncase_sensitive: bool, whether or not the enum is to be case-sensitive.\n\nRaises:\nValueError: When enum_values is empty.", "source": "juraj-google-style"}
{"code": "def parse_date(value):\n    \n    if not value:\n        return None\n\n    if isinstance(value, datetime.date):\n        return value\n\n    return parse_datetime(value).date()", "docstring": "Attempts to parse `value` into an instance of ``datetime.date``. If\n`value` is ``None``, this function will return ``None``.\n\nArgs:\nvalue: A timestamp. This can be a string, datetime.date, or\ndatetime.datetime value.", "source": "juraj-google-style"}
{"code": "def can_match(cls, pattern: Pattern) -> bool:\n        \n        if not isinstance(pattern.expression, Operation) or isinstance(pattern.expression, CommutativeOperation):\n            return False\n\n        if op_len(pattern.expression) < 3:\n            return False\n\n        first, *_, last = op_iter(pattern.expression)\n\n        try:\n            cls._check_wildcard_and_get_name(first)\n            cls._check_wildcard_and_get_name(last)\n        except ValueError:\n            return False\n\n        return True", "docstring": "Check if a pattern can be matched with a sequence matcher.\n\nArgs:\npattern:\nThe pattern to check.\n\nReturns:\nTrue, iff the pattern can be matched with a sequence matcher.", "source": "juraj-google-style"}
{"code": "def write(self, output='jsonstat'):\n        \n\n        if output == 'jsonstat':\n            return json.dumps(self)\n        elif output == 'dataframe_list':\n            df_list = []\n            unnest_collection(self, df_list)\n            return df_list\n        else:\n            raise ValueError(\n                \"Allowed arguments are 'jsonstat' or 'dataframe_list'\")", "docstring": "Writes data from a Collection object to JSONstat or list of \\\nPandas Dataframes.\nArgs:\noutput(string): can accept 'jsonstat' or 'dataframe_list'\n\nReturns:\nSerialized JSONstat or a list of Pandas Dataframes,depending on \\\nthe 'output' parameter.", "source": "juraj-google-style"}
{"code": "def map_tensor_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rigid:\n    new_rots = self._rots.map_tensor_fn(fn)\n    new_trans = torch.stack(list(map(fn, torch.unbind(self._trans, dim=-1))), dim=-1)\n    return Rigid(new_rots, new_trans)", "docstring": "Apply a Tensor -> Tensor function to underlying translation and rotation tensors, mapping over the\ntranslation/rotation dimensions respectively.\n\nArgs:\nfn:\nA Tensor -> Tensor function to be mapped over the Rigid\nReturns:\nThe transformed Rigid object", "source": "github-repos"}
{"code": "def __init__(self, underlying_runner=None, render_option=None, skip_display=True, force_compute=True, blocking=True):\n    self._underlying_runner = underlying_runner or direct_runner.DirectRunner()\n    self._render_option = render_option\n    self._in_session = False\n    self._skip_display = skip_display\n    self._force_compute = force_compute\n    self._blocking = blocking", "docstring": "Constructor of InteractiveRunner.\n\nArgs:\nunderlying_runner: (runner.PipelineRunner)\nrender_option: (str) this parameter decides how the pipeline graph is\nrendered. See display.pipeline_graph_renderer for available options.\nskip_display: (bool) whether to skip display operations when running the\npipeline. Useful if running large pipelines when display is not\nneeded.\nforce_compute: (bool) whether sequential pipeline runs can use cached data\nof PCollections computed from the previous runs including show API\ninvocation from interactive_beam module. If True, always run the whole\npipeline and compute data for PCollections forcefully. If False, use\navailable data and run minimum pipeline fragment to only compute data\nnot available.\nblocking: (bool) whether the pipeline run should be blocking or not.", "source": "github-repos"}
{"code": "def item_status(self, **kwargs):\n    path = self._get_id_path('item_status')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Check to see if a movie id is already added to a list.\n\nArgs:\nmovie_id: The id of the movie.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def _FormatArgToken(self, token_data):\n    \n    return {\n        'string': token_data.argument_value.rstrip('\\x00'),\n        'num_arg': token_data.argument_index,\n        'is': token_data.argument_name}", "docstring": "Formats an argument token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_arg32|bsm_token_data_arg64): AUT_ARG32 or\nAUT_ARG64 token data.\n\nReturns:\ndict[str, str]: token values.", "source": "juraj-google-style"}
{"code": "def create(self, key, value, lease='1h'):\n        \n        return self._client.write(key, value, lease=lease)", "docstring": "Create key/value pair in Vault.\n\nArgs:\nkey (string): The data key.\nvalue (string): The data value.\nlease (string): The least time.", "source": "juraj-google-style"}
{"code": "def single_qubit_matrix_to_gates(mat: np.ndarray, tolerance: float=0) -> List[ops.SingleQubitGate]:\n    rotations = single_qubit_matrix_to_pauli_rotations(mat, tolerance)\n    return [(cast(ops.SingleQubitGate, pauli) ** ht) for (pauli, ht) in rotations]", "docstring": "Implements a single-qubit operation with few gates.\n\nArgs:\nmat: The 2x2 unitary matrix of the operation to implement.\ntolerance: A limit on the amount of error introduced by the\nconstruction.\n\nReturns:\nA list of gates that, when applied in order, perform the desired\noperation.", "source": "codesearchnet"}
{"code": "def _send_rpc(self, client, uuid, address, rpc, payload, timeout, key):\n    conn_id = self._validate_connection('send_rpc', uuid, key)\n    if (conn_id is None):\n        return\n    conn_data = self._connections[uuid]\n    conn_data['last_touch'] = monotonic()\n    slug = self._build_device_slug(uuid)\n    try:\n        resp = (yield self._manager.send_rpc(conn_id, address, (rpc >> 8), (rpc & 255), bytes(payload), timeout))\n    except Exception as exc:\n        self._logger.error(('Error in manager send rpc: %s' % str(exc)))\n        resp = {'success': False, 'reason': ('Internal error: %s' % str(exc))}\n    payload = {'client': client, 'type': 'response', 'operation': 'rpc'}\n    payload['success'] = resp['success']\n    if (resp['success'] is False):\n        payload['failure_reason'] = resp['reason']\n    else:\n        payload['status'] = resp['status']\n        payload['payload'] = binascii.hexlify(resp['payload'])\n    self._publish_response(slug, payload)", "docstring": "Send an RPC to a connected device\n\nArgs:\nclient (string): The client that sent the rpc request\nuuid (int): The id of the device we're opening the interface on\naddress (int): The address of the tile that we want to send the RPC to\nrpc (int): The id of the rpc that we want to send.\npayload (bytearray): The payload of arguments that we want to send\ntimeout (float): The number of seconds to wait for the response\nkey (string): The key to authenticate the caller", "source": "codesearchnet"}
{"code": "def build_url(self, data):\n        \n        query_part_one = []\n        query_part_two = []\n        keys_to_be_removed = []\n        for key, value in data.items():\n            if key not in ['version', 'restApi', 'resourcePath']:\n                if key == 'mapArea':\n                    query_part_one.append(','.join(str(val) for val in value))\n                    keys_to_be_removed.append(key)\n                elif key == 'includeLocationCodes':\n                    query_part_one.append(value)\n                    keys_to_be_removed.append(key)\n                else:\n                    if isinstance(value, list):\n                        value = ','.join(str(val) for val in value)\n                    query_part_two.append('{0}={1}'.format(key, value))\n                    keys_to_be_removed.append(key)\n        for k in keys_to_be_removed:\n            del data[k]\n        data['query'] = '{0}?{1}'.format('/'.join(query_part_one),\n                                         '&'.join(query_part_two))\n        return data", "docstring": "This method occurs after dumping the data into the class.\n\nArgs:\ndata (dict): dictionary of all the query values\n\nReturns:\ndata (dict): ordered dict of all the values", "source": "juraj-google-style"}
{"code": "def history(self, hash):\n        \n        txs = self._t.get(hash, max_transactions=10000)['transactions']\n        tree = defaultdict(list)\n        number_editions = 0\n\n        for tx in txs:\n            _tx = self._t.get(tx['txid'])\n            txid = _tx['txid']\n            verb_str = BlockchainSpider.check_script(_tx['vouts'])\n            verb = Spoolverb.from_verb(verb_str)\n            from_address, to_address, piece_address = BlockchainSpider._get_addresses(_tx)\n            timestamp_utc = _tx['time']\n            action = verb.action\n\n            edition_number = 0\n            if action != 'EDITIONS':\n                edition_number = verb.edition_number\n            else:\n                number_editions = verb.num_editions\n\n            tree[edition_number].append({'txid': txid,\n                                         'verb': verb_str,\n                                         'from_address': from_address,\n                                         'to_address': to_address,\n                                         'piece_address': piece_address,\n                                         'timestamp_utc': timestamp_utc,\n                                         'action': action,\n                                         'number_editions': number_editions,\n                                         'edition_number': edition_number})\n\n        \n        \n        for edition, chain in tree.items():\n            [d.update({'number_editions': number_editions}) for d in chain]\n        return dict(tree)", "docstring": "Retrieve the ownership tree of all editions of a piece given the hash.\n\nArgs:\nhash (str): Hash of the file to check. Can be created with the\n:class:`File` class\n\nReturns:\ndict: Ownsership tree of all editions of a piece.\n\n.. note:: For now we only support searching the blockchain by\nthe piece hash.", "source": "juraj-google-style"}
{"code": "def track(self, event_key, user_id, attributes=None, event_tags=None):\n    \n\n    if not self.is_valid:\n      self.logger.error(enums.Errors.INVALID_DATAFILE.format('track'))\n      return\n\n    if not validator.is_non_empty_string(event_key):\n      self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('event_key'))\n      return\n\n    if not isinstance(user_id, string_types):\n      self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))\n      return\n\n    if not self._validate_user_inputs(attributes, event_tags):\n      return\n\n    event = self.config.get_event(event_key)\n    if not event:\n      self.logger.info('Not tracking user \"%s\" for event \"%s\".' % (user_id, event_key))\n      return\n\n    conversion_event = self.event_builder.create_conversion_event(event_key, user_id, attributes, event_tags)\n    self.logger.info('Tracking event \"%s\" for user \"%s\".' % (event_key, user_id))\n    self.logger.debug('Dispatching conversion event to URL %s with params %s.' % (\n      conversion_event.url,\n      conversion_event.params\n    ))\n    try:\n      self.event_dispatcher.dispatch_event(conversion_event)\n    except:\n      self.logger.exception('Unable to dispatch conversion event!')\n    self.notification_center.send_notifications(enums.NotificationTypes.TRACK, event_key, user_id,\n                                                attributes, event_tags, conversion_event)", "docstring": "Send conversion event to Optimizely.\n\nArgs:\nevent_key: Event key representing the event which needs to be recorded.\nuser_id: ID for user.\nattributes: Dict representing visitor attributes and values which need to be recorded.\nevent_tags: Dict representing metadata associated with the event.", "source": "juraj-google-style"}
{"code": "def _sync_content_metadata(self, serialized_data, http_method):\n    try:\n        (status_code, response_body) = getattr(self, ('_' + http_method))(urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.course_api_path), serialized_data, self.CONTENT_PROVIDER_SCOPE)\n    except requests.exceptions.RequestException as exc:\n        raise ClientError('DegreedAPIClient request failed: {error} {message}'.format(error=exc.__class__.__name__, message=str(exc)))\n    if (status_code >= 400):\n        raise ClientError('DegreedAPIClient request failed with status {status_code}: {message}'.format(status_code=status_code, message=response_body))", "docstring": "Synchronize content metadata using the Degreed course content API.\n\nArgs:\nserialized_data: JSON-encoded object containing content metadata.\nhttp_method: The HTTP method to use for the API request.\n\nRaises:\nClientError: If Degreed API request fails.", "source": "codesearchnet"}
{"code": "def ParseOptions(cls, options, configuration_object):\n    \n    if not isinstance(configuration_object, tools.CLITool):\n      raise errors.BadConfigObject(\n          'Configuration object is not an instance of CLITool')\n\n    storage_format = cls._ParseStringOption(options, 'storage_format')\n    if not storage_format:\n      raise errors.BadConfigOption('Unable to determine storage format.')\n\n    if storage_format not in definitions.STORAGE_FORMATS:\n      raise errors.BadConfigOption(\n          'Unsupported storage format: {0:s}'.format(storage_format))\n\n    setattr(configuration_object, '_storage_format', storage_format)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.\nBadConfigOption: if the storage format is not defined or supported.", "source": "juraj-google-style"}
{"code": "def occupations( self, site_label ):\n        \n        return sum( atom.site.label == site_label for atom in self.atoms )", "docstring": "Number of these atoms occupying a specific site type.\n\nArgs:\nsite_label (Str): Label for the site type being considered.\n\nReturns:\n(Int): Number of atoms occupying sites of type `site_label`.", "source": "juraj-google-style"}
{"code": "def InventoryReceived(self, inventory):\n        \n        if inventory.Hash.ToBytes() in self._MissedBlocks:\n            self._MissedBlocks.remove(inventory.Hash.ToBytes())\n\n        if inventory is MinerTransaction:\n            return False\n\n        if type(inventory) is Block:\n            if BC.Default() is None:\n                return False\n\n            if BC.Default().ContainsBlock(inventory.Index):\n                return False\n\n            if not BC.Default().AddBlock(inventory):\n                return False\n\n        else:\n            if not inventory.Verify(self.MemPool.values()):\n                return False", "docstring": "Process a received inventory.\n\nArgs:\ninventory (neo.Network.Inventory): expect a Block type.\n\nReturns:\nbool: True if processed and verified. False otherwise.", "source": "juraj-google-style"}
{"code": "def extractDays(self, inp):\n        \n        inp = self._preprocess(inp)\n\n        def extractDayOfWeek(dayMatch):\n            if dayMatch.group(5) in self.__daysOfWeek__:\n                return self.__daysOfWeek__.index(dayMatch.group(5))\n            elif dayMatch.group(6) in self.__daysOfWeek__:\n                return self.__daysOfWeek__.index(dayMatch.group(6))\n\n        def extractMonth(dayMatch):\n            if dayMatch.group(7) in self.__months__:\n                return self.__months__.index(dayMatch.group(7)) + 1\n            elif dayMatch.group(7) in self.__shortMonths__:\n                return self.__shortMonths__.index(dayMatch.group(7)) + 1\n\n        def extractDay(dayMatch):\n            combined = dayMatch.group(8) + dayMatch.group(9)\n            if combined in self.__dateDescriptors__:\n                return self.__dateDescriptors__[combined]\n            elif dayMatch.group(8) in self.__dateDescriptors__:\n                return self.__dateDescriptors__[dayMatch.group(8)]\n            elif int(dayMatch.group(8)) in self.__dateDescriptors__.values():\n                return int(dayMatch.group(8))\n\n        def extractDaysFrom(dayMatch):\n            if not dayMatch.group(1):\n                return 0\n\n            def numericalPrefix(dayMatch):\n                \n                prefix = inp.split(dayMatch.group(1))[0].strip().split(' ')\n                prefix.reverse()\n                prefix = list(filter(lambda s: s != 'and', prefix))\n\n                \n                service = NumberService()\n                num = prefix[0]\n                if service.isValid(num):\n                    for n in prefix[1:]:\n                        inc = n + \" \" + num\n                        if service.isValid(inc):\n                            num = inc\n                        else:\n                            break\n                    return service.parse(num)\n                return 1\n\n            factor = numericalPrefix(dayMatch)\n\n            if dayMatch.group(2) == 'week':\n                return factor * 7\n            elif dayMatch.group(2) == 'day':\n                return factor * 1\n\n        def handleMatch(dayMatch):\n            def safe(exp):\n                \n                try:\n                    return exp()\n                except:\n                    return False\n\n            days_from = safe(lambda: extractDaysFrom(dayMatch))\n            today = safe(lambda: dayMatch.group(3) in self.__todayMatches__)\n            tomorrow = safe(lambda: dayMatch.group(3)\n                            in self.__tomorrowMatches__)\n            next_week = safe(lambda: dayMatch.group(4) == 'next')\n            day_of_week = safe(lambda: extractDayOfWeek(dayMatch))\n            month = safe(lambda: extractMonth(dayMatch))\n            day = safe(lambda: extractDay(dayMatch))\n\n            \n            if not dayMatch:\n                return None\n            elif today:\n                d = self.now\n            elif tomorrow:\n                d = self.now + datetime.timedelta(days=1)\n            elif type(day_of_week) == int:\n                current_day_of_week = self.now.weekday()\n                num_days_away = (day_of_week - current_day_of_week) % 7\n\n                if next_week:\n                    num_days_away += 7\n\n                d = self.now + \\\n                    datetime.timedelta(days=num_days_away)\n            elif month and day:\n                d = datetime.datetime(\n                    self.now.year, month, day,\n                    self.now.hour, self.now.minute)\n\n            if days_from:\n                d += datetime.timedelta(days=days_from)\n\n            return d\n\n        matches = self._dayRegex.finditer(inp)\n        return [handleMatch(dayMatch) for dayMatch in matches]", "docstring": "Extracts all day-related information from an input string.\nIgnores any information related to the specific time-of-day.\n\nArgs:\ninp (str): Input string to be parsed.\n\nReturns:\nA list of datetime objects containing the extracted date from the\ninput snippet, or an empty list if none found.", "source": "juraj-google-style"}
{"code": "def download_listing(self, file: Optional[IO],\n                         duration_timeout: Optional[float]=None) -> \\\n            ListingResponse:\n        \n        if self._session_state != SessionState.directory_request_sent:\n            raise RuntimeError('File request not sent')\n\n        self._session_state = SessionState.file_request_sent\n\n        yield from self.download(file=file, rewind=False,\n                                 duration_timeout=duration_timeout)\n\n        try:\n            if self._response.body.tell() == 0:\n                listings = ()\n            elif self._listing_type == 'mlsd':\n                self._response.body.seek(0)\n\n                machine_listings = wpull.protocol.ftp.util.parse_machine_listing(\n                    self._response.body.read().decode('utf-8',\n                                                      errors='surrogateescape'),\n                    convert=True, strict=False\n                )\n                listings = list(\n                    wpull.protocol.ftp.util.machine_listings_to_file_entries(\n                        machine_listings\n                    ))\n            else:\n                self._response.body.seek(0)\n\n                file = io.TextIOWrapper(self._response.body, encoding='utf-8',\n                                        errors='surrogateescape')\n\n                listing_parser = ListingParser(file=file)\n\n                listings = list(listing_parser.parse_input())\n\n                _logger.debug('Listing detected as %s', listing_parser.type)\n\n                \n                file.detach()\n\n        except (ListingError, ValueError) as error:\n            raise ProtocolError(*error.args) from error\n\n        self._response.files = listings\n\n        self._response.body.seek(0)\n\n        self._session_state = SessionState.response_received\n\n        return self._response", "docstring": "Read file listings.\n\nArgs:\nfile: A file object or asyncio stream.\nduration_timeout: Maximum time in seconds of which the\nentire file must be read.\n\nReturns:\nA Response populated the file listings\n\nBe sure to call :meth:`start_file_listing` first.\n\nCoroutine.", "source": "juraj-google-style"}
{"code": "def _print(self, *args):\n\n    def _format(name, arr):\n        \n        title = '\n        tlen = len(title)\n        print('-' * tlen)\n        print(title)\n        print('-' * tlen)\n        print(' Total \n        if arr:\n            for item in arr:\n                detail = ''\n                if isinstance(item[1], list):\n                    for itm in item[1]:\n                        detail += str(itm) + ', '\n                    detail = detail[:-2]\n                else:\n                    detail = str(item[1])\n                print(\"  %s ('%s')\\n\" % (str(item[0]), detail))\n        else:\n            print('  No %s' % name)\n        print('\\n')\n    for p_item in args:\n        if p_item == 'failures':\n            _format('Failures', self.failures)\n        elif p_item == 'successes':\n            _format('Successes', self.successes)\n        elif p_item == 'failure_msgs':\n            _format('Failure Messages', self.error_msg)\n        elif p_item == 'warning_msgs':\n            _format('Warning Messages', self.warning_msg)\n        else:\n            raise Exception('[Error] Wrong input provided for %s.' % _get_func_name())", "docstring": "Prints compatibility check status and failure or warning messages.\n\nPrints to console without using `logging`.\n\nArgs:\n*args: String(s) that is one of:\n[`failures`,       # all failures\n`successes`,      # all successes\n`failure_msgs`,   # failure message(s) recorded upon failure(s)\n`warning_msgs`]   # warning message(s) recorded upon warning(s)\nRaises:\nException: If *args not in:\n[`failures`, `successes`, `failure_msgs`, `warning_msg`]", "source": "github-repos"}
{"code": "def make_reply(self):\n    return Message(to=str(self.sender), sender=str(self.to), body=self.body, thread=self.thread, metadata=self.metadata)", "docstring": "Creates a copy of the message, exchanging sender and receiver\n\nReturns:\nspade.message.Message: a new message with exchanged sender and receiver", "source": "codesearchnet"}
{"code": "def has_shell_command(self, command) -> bool:\n    try:\n        output = self.shell(['command', '-v', command]).decode('utf-8').strip()\n        return command in output\n    except AdbError:\n        return False", "docstring": "Checks to see if a given check command exists on the device.\n\nArgs:\ncommand: A string that is the name of the command to check.\n\nReturns:\nA boolean that is True if the command exists and False otherwise.", "source": "github-repos"}
{"code": "def update_fetch_positions(self, partitions):\n        \n        \n        for tp in partitions:\n            if not self._subscriptions.is_assigned(tp):\n                log.warning(\"partition %s is not assigned - skipping offset\"\n                            \" update\", tp)\n                continue\n            elif self._subscriptions.is_fetchable(tp):\n                log.warning(\"partition %s is still fetchable -- skipping offset\"\n                            \" update\", tp)\n                continue\n\n            if self._subscriptions.is_offset_reset_needed(tp):\n                self._reset_offset(tp)\n            elif self._subscriptions.assignment[tp].committed is None:\n                \n                \n                self._subscriptions.need_offset_reset(tp)\n                self._reset_offset(tp)\n            else:\n                committed = self._subscriptions.assignment[tp].committed\n                log.debug(\"Resetting offset for partition %s to the committed\"\n                          \" offset %s\", tp, committed)\n                self._subscriptions.seek(tp, committed)", "docstring": "Update the fetch positions for the provided partitions.\n\nArguments:\npartitions (list of TopicPartitions): partitions to update\n\nRaises:\nNoOffsetForPartitionError: if no offset is stored for a given\npartition and no reset policy is available", "source": "juraj-google-style"}
{"code": "def _load_submissions_from_datastore_dir(self, dir_suffix, id_pattern):\n    \n    submissions = self._storage_client.list_blobs(\n        prefix=os.path.join(self._round_name, dir_suffix))\n    return {\n        id_pattern.format(idx): SubmissionDescriptor(\n            path=s, participant_id=participant_from_submission_path(s))\n        for idx, s in enumerate(submissions)\n    }", "docstring": "Loads list of submissions from the directory.\n\nArgs:\ndir_suffix: suffix of the directory where submissions are stored,\none of the folowing constants: ATTACK_SUBDIR, TARGETED_ATTACK_SUBDIR\nor DEFENSE_SUBDIR.\nid_pattern: pattern which is used to generate (internal) IDs\nfor submissins. One of the following constants: ATTACK_ID_PATTERN,\nTARGETED_ATTACK_ID_PATTERN or DEFENSE_ID_PATTERN.\n\nReturns:\ndictionary with all found submissions", "source": "juraj-google-style"}
{"code": "def create(self, validated_data):\n    email_query = models.EmailAddress.objects.filter(email=self.validated_data['email'])\n    if email_query.exists():\n        email = email_query.get()\n        email.send_duplicate_notification()\n    else:\n        email = super(EmailSerializer, self).create(validated_data)\n        email.send_confirmation()\n        user = validated_data.get('user')\n        query = models.EmailAddress.objects.filter(is_primary=True, user=user)\n        if (not query.exists()):\n            email.set_primary()\n    return email", "docstring": "Create a new email and send a confirmation to it.\n\nReturns:\nThe newly creating ``EmailAddress`` instance.", "source": "codesearchnet"}
{"code": "def execute_by_options(args):\n    if (args['subcommand'] == 'sphinx'):\n        s = Sphinx(proj_info)\n        if args['quickstart']:\n            s.quickstart()\n        elif args['gen_code_api']:\n            s.gen_code_api()\n        elif args['rst2html']:\n            s.rst2html()\n        pass\n    elif (args['subcommand'] == 'offline_dist'):\n        pod = PyOfflineDist()\n        if args['freeze_deps']:\n            pod.freeze_deps()\n        elif args['download_deps']:\n            pod.download_deps()\n        elif args['install_deps']:\n            pod.install_deps()\n        elif args['clean_deps']:\n            pod.clean_deps()\n        elif args['mkbinary']:\n            pod.pyinstaller_mkbinary(args['mkbinary'])\n        elif args['clean_binary']:\n            pod.clean_binary()\n    pass", "docstring": "execute by argument dictionary\n\nArgs:\nargs (dict): command line argument dictionary", "source": "codesearchnet"}
{"code": "def valueReadPreprocessor(valueString, replaceParamsFile=None):\n    \n    if type(valueString) is bool:\n        log.warning(\"Only numerical variable types can be handled by the valueReadPreprocessor function.\")\n        return valueString\n\n    \n    processedValue = valueString\n\n    \n    if replaceParamsFile is not None and valueString is not None:\n        if '[' in valueString or ']' in valueString:\n            \n            processedValue = '{0}'.format(REPLACE_NO_VALUE)\n\n            \n            for targetParam in replaceParamsFile.targetParameters:\n                if targetParam.targetVariable == valueString:\n                    processedValue = '{0}'.format(-1 * targetParam.id)\n                    break\n\n    return processedValue", "docstring": "Apply global pre-processing to values during reading throughout the project.\n\nArgs:\nvalueString (str): String representing the value to be preprocessed.\nreplaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if\nreplacement variables are included in the project.\n\nReturns:\nstr: Processed value as a string", "source": "juraj-google-style"}
{"code": "def plot_series(filename, plot_kwargs=None):\n    \n    import matplotlib.pyplot as plt\n\n    if plot_kwargs is None:\n        plot_kwargs = {}\n\n    data = np.genfromtxt(filename, dtype='i8,f4', names=['k', 'v'])\n    index = data['k']\n    values = data['v']\n    plt.plot(index, values, **plot_kwargs)", "docstring": "Plot series data from MonitorSeries output text file.\n\nArgs:\nfilename (str): Path to *.series.txt file produced by :obj:`~nnabla.MonitorSeries` class.\nplot_kwags (dict, optional):\nKeyward arguments passed to :function:`matplotlib.pyplot.plot`.\n\nNote:\nmatplotlib package is required.", "source": "juraj-google-style"}
{"code": "def console_wait_for_keypress(flush: bool) -> Key:\n    key = Key()\n    lib.TCOD_console_wait_for_keypress_wrapper(key.key_p, flush)\n    return key", "docstring": "Block until the user presses a key, then returns a new Key.\n\nArgs:\nflush bool: If True then the event queue is cleared before waiting\nfor the next event.\n\nReturns:\nKey: A new Key instance.\n\n.. deprecated:: 9.3\nUse the :any:`tcod.event.wait` function to wait for events.", "source": "codesearchnet"}
{"code": "def resname_in_proximity(resname, model, chains, resnums, threshold=5):\n    residues = [r for r in model.get_residues() if (r.get_resname() == resname)]\n    chains = ssbio.utils.force_list(chains)\n    resnums = ssbio.utils.force_list(resnums)\n    for chain in chains:\n        for resnum in resnums:\n            my_residue_last_atom = model[chain][resnum].child_list[(- 1)]\n            for rz in residues:\n                distance = (rz.child_list[(- 1)] - my_residue_last_atom)\n                if (distance < threshold):\n                    return True\n    return False", "docstring": "Search within the proximity of a defined list of residue numbers and their chains for any specifed residue name.\n\nArgs:\nresname (str): Residue name to search for in proximity of specified chains + resnums\nmodel: Biopython Model object\nchains (str, list): Chain ID or IDs to check\nresnums (int, list): Residue numbers within the chain to check\nthreshold (float): Cutoff in Angstroms for returning True if a RESNAME is near\n\nReturns:\nbool: True if a RESNAME is within the threshold cutoff", "source": "codesearchnet"}
{"code": "def front(self, n):\n        \n        new_dtypes = (\n            self._dtype_cache if self._dtype_cache is None else self._dtype_cache[:n]\n        )\n        \n        if self._is_transposed:\n            result = self.__constructor__(\n                self.data.transpose().take(0, n).transpose(),\n                self.index,\n                self.columns[:n],\n                new_dtypes,\n            )\n            result._is_transposed = True\n        else:\n            result = self.__constructor__(\n                self.data.take(1, n), self.index, self.columns[:n], new_dtypes\n            )\n        return result", "docstring": "Returns the first n columns.\n\nArgs:\nn: Integer containing the number of columns to return.\n\nReturns:\nDataManager containing the first n columns of the original DataManager.", "source": "juraj-google-style"}
{"code": "def proba2onehot(proba: [list, np.ndarray], confident_threshold: float, classes: [list, np.ndarray]) -> np.ndarray:\n    return labels2onehot(proba2labels(proba, confident_threshold, classes), classes)", "docstring": "Convert vectors of probabilities to one-hot representations using confident threshold\n\nArgs:\nproba: samples where each sample is a vector of probabilities to belong with given classes\nconfident_threshold: boundary of probability to belong with a class\nclasses: array of classes' names\n\nReturns:\n2d array with one-hot representation of given samples", "source": "codesearchnet"}
{"code": "def send(self, message):\n        \n        body = {\n            'notificationType': self._notification_type,\n            'priority': self._priority,\n            'isOrganization': self._is_organization,\n            'message': message,\n        }\n\n        if self._recipients:\n            body['recipients'] = self._recipients\n\n        self._tcex.log.debug('notification body: {}'.format(json.dumps(body)))\n\n        \n        resource = resource = self._tcex.resource('Notification')\n        resource.http_method = 'POST'\n        resource.body = json.dumps(body)\n\n        results = resource.request()  \n        if results.get('response').status_code == 200:\n            \n            response = results.get('response').json()\n        elif results.get('response').status_code == 400:\n            \n            \n            err = 'Failed to send notification ({})'.format(results.get('response').text)\n            self._tcex.log.error(err)\n            response = results.get('response').json()\n        else:\n            \n            err = 'Failed to send notification ({})'.format(results.get('response').text)\n            self._tcex.log.error(err)\n            raise RuntimeError(err)\n        return response", "docstring": "Send our message\n\nArgs:\nmessage (str): The message to be sent.\n\nReturns:\nrequests.models.Response: The response from the request.", "source": "juraj-google-style"}
{"code": "def tabledata_insert_all(self, table_name, rows):\n    url = ((Api._ENDPOINT + (Api._TABLES_PATH % table_name)) + '/insertAll')\n    data = {'kind': 'bigquery\n    return datalab.utils.Http.request(url, data=data, credentials=self._credentials)", "docstring": "Issues a request to insert data into a table.\n\nArgs:\ntable_name: the name of the table as a tuple of components.\nrows: the data to populate the table, as a list of dictionaries.\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "codesearchnet"}
{"code": "def format_arguments(*args):\n    \n    positional_args = []\n    kwargs = {}\n    split_key = None\n\n    for arg in args:\n        if arg.startswith('--'):\n            arg = arg[2:]\n\n            if '=' in arg:\n                key, value = arg.split('=', 1)\n                kwargs[key.replace('-', '_')] = value\n            else:\n                split_key = arg.replace('-', '_')\n        elif split_key:\n            kwargs[split_key] = arg\n            split_key = None\n        else:\n            positional_args.append(arg)\n\n    return positional_args, kwargs", "docstring": "Converts a list of arguments from the command line into a list of\npositional arguments and a dictionary of keyword arguments.\n\nHandled formats for keyword arguments are:\n* --argument=value\n* --argument value\n\nArgs:\n*args (list): a list of arguments\n\nReturns:\n([positional_args], {kwargs})", "source": "juraj-google-style"}
{"code": "def retry_api_check(exception):\n  \n  if isinstance(exception, apiclient.errors.HttpError):\n    if exception.resp.status in TRANSIENT_HTTP_ERROR_CODES:\n      _print_error('Retrying...')\n      return True\n\n  if isinstance(exception, socket.error):\n    if exception.errno in TRANSIENT_SOCKET_ERROR_CODES:\n      _print_error('Retrying...')\n      return True\n\n  if isinstance(exception, oauth2client.client.AccessTokenRefreshError):\n    _print_error('Retrying...')\n    return True\n\n  \n  \n  if isinstance(exception, SSLError):\n    _print_error('Retrying...')\n    return True\n\n  \n  \n  if isinstance(exception, ServerNotFoundError):\n    _print_error('Retrying...')\n    return True\n\n  return False", "docstring": "Return True if we should retry. False otherwise.\n\nArgs:\nexception: An exception to test for transience.\n\nReturns:\nTrue if we should retry. False otherwise.", "source": "juraj-google-style"}
{"code": "def process_gatt_service(services, event):\n    length = (len(event.payload) - 5)\n    (handle, start, end, uuid) = unpack(('<BHH%ds' % length), event.payload)\n    uuid = process_uuid(uuid)\n    services[uuid] = {'uuid_raw': uuid, 'start_handle': start, 'end_handle': end}", "docstring": "Process a BGAPI event containing a GATT service description and add it to a dictionary\n\nArgs:\nservices (dict): A dictionary of discovered services that is updated with this event\nevent (BGAPIPacket): An event containing a GATT service", "source": "codesearchnet"}
{"code": "def set_ignores(self, folder, *patterns):\n        \n        if not patterns:\n            return {}\n        data = {'ignore': list(patterns)}\n        return self.post('ignores', params={'folder': folder}, data=data)", "docstring": "Applies ``patterns`` to ``folder``'s ``.stignore`` file.\n\nArgs:\nfolder (str):\npatterns (str):\n\nReturns:\ndict", "source": "juraj-google-style"}
{"code": "def ParseTextToDicts(self, *args, **kwargs):\n    result_lists = self.ParseText(*args, **kwargs)\n    result_dicts = []\n    for row in result_lists:\n        result_dicts.append(dict(zip(self.header, row)))\n    return result_dicts", "docstring": "Calls ParseText and turns the result into list of dicts.\n\nList items are dicts of rows, dict key is column header and value is column\nvalue.\n\nArgs:\ntext: (str), Text to parse with embedded newlines.\neof: (boolean), Set to False if we are parsing only part of the file.\nSuppresses triggering EOF state.\n\nRaises:\nTextFSMError: An error occurred within the FSM.\n\nReturns:\nList of dicts.", "source": "codesearchnet"}
{"code": "def set_shape(self, shape):\n    raise NotImplementedError", "docstring": "Overrides the shape for this variable.\n\nArgs:\nshape: the `TensorShape` representing the overridden shape.", "source": "github-repos"}
{"code": "def send_email_message(self, recipient, subject, html_message, text_message, sender_email, sender_name):\n    sender = (('\"%s\" <%s>' % (sender_name, sender_email)) if sender_name else sender_email)\n    if (not current_app.testing):\n        try:\n            from flask_mail import Message\n            message = Message(subject, sender=sender, recipients=[recipient], html=html_message, body=text_message)\n            self.mail.send(message)\n        except (socket.gaierror, socket.error) as e:\n            raise EmailError('SMTP Connection error: Check your MAIL_SERVER and MAIL_PORT settings.')\n        except smtplib.SMTPAuthenticationError:\n            raise EmailError('SMTP Authentication error: Check your MAIL_USERNAME and MAIL_PASSWORD settings.')", "docstring": "Send email message via Flask-Mail.\n\nArgs:\nrecipient: Email address or tuple of (Name, Email-address).\nsubject: Subject line.\nhtml_message: The message body in HTML.\ntext_message: The message body in plain text.", "source": "codesearchnet"}
{"code": "def _generate_state(self, trans):\n        \n        state = PDAState()\n        state.id = self.nextstate()\n        state.type = 2\n        state.sym = state.id\n        state.trans = trans.copy()\n        self.toadd.append(state)\n        return state.id", "docstring": "Creates a new POP state (type - 2) with the same transitions.\nThe POPed symbol is the unique number of the state.\nArgs:\ntrans (dict): Transition dictionary\nReturns:\nInt: The state identifier", "source": "juraj-google-style"}
{"code": "def __init__(self, scope, parent, paren=False):\n        \n        CodeLiteral.__init__(self, scope, parent, None, 'null', paren)", "docstring": "Constructor for null literals.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.\n\nKwargs:\nparen (bool): Whether the null literal is enclosed in parentheses.", "source": "juraj-google-style"}
{"code": "def affine_coupling(name, x, mid_channels=512, activation=\"relu\",\n                    reverse=False, dropout=0.0):\n  \n  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n    x_shape = common_layers.shape_list(x)\n    x1, x2 = tf.split(x, num_or_size_splits=2, axis=-1)\n\n    \n    \n    \n    \n    \n    z1 = x1\n    log_scale_and_shift = conv_stack(\n        \"nn\", x1, mid_channels, x_shape[-1], activation=activation,\n        dropout=dropout)\n    shift = log_scale_and_shift[:, :, :, 0::2]\n    scale = tf.nn.sigmoid(log_scale_and_shift[:, :, :, 1::2] + 2.0)\n    if not reverse:\n      z2 = (x2 + shift) * scale\n    else:\n      z2 = x2 / scale - shift\n\n    objective = tf.reduce_sum(tf.log(scale), axis=[1, 2, 3])\n    if reverse:\n      objective *= -1\n    return tf.concat([z1, z2], axis=3), objective", "docstring": "Reversible affine coupling layer.\n\nArgs:\nname: variable scope.\nx: 4-D Tensor.\nmid_channels: number of channels in the coupling layer.\nactivation: Can be either \"relu\" or \"gatu\".\nreverse: Forward or reverse operation.\ndropout: default, 0.0\nReturns:\noutput: x shifted and scaled by an affine transformation.\nobjective: log-determinant of the jacobian", "source": "juraj-google-style"}
{"code": "def remove_attribute(self, attr):\n        \n        update = [fapi._attr_rem(attr)]\n        r = fapi.update_workspace_attributes(self.namespace, self.name,\n                                                update, self.api_url)\n        self.data[\"workspace\"][\"attributes\"].pop(attr, None)\n        fapi._check_response_code(r, 200)", "docstring": "Remove attribute from a workspace.\n\nArgs:\nattr (str): attribute name", "source": "juraj-google-style"}
{"code": "def ReadPathInfoHistory(self, client_id, path_type, components):\n    \n    histories = self.ReadPathInfosHistories(client_id, path_type, [components])\n    return histories[components]", "docstring": "Reads a collection of hash and stat entry for given path.\n\nArgs:\nclient_id: An identifier string for a client.\npath_type: A type of a path to retrieve path history for.\ncomponents: A tuple of path components corresponding to path to retrieve\ninformation for.\n\nReturns:\nA list of `rdf_objects.PathInfo` ordered by timestamp in ascending order.", "source": "juraj-google-style"}
{"code": "def _format_batch_statuses(statuses, batch_ids, tracker):\n    \n    proto_statuses = []\n\n    for batch_id in batch_ids:\n        if statuses[batch_id] == \\\n           client_batch_submit_pb2.ClientBatchStatus.INVALID:\n            invalid_txns = tracker.get_invalid_txn_info(batch_id)\n            for txn_info in invalid_txns:\n                try:\n                    txn_info['transaction_id'] = txn_info.pop('id')\n                except KeyError as e:\n                    LOGGER.debug(e)\n        else:\n            invalid_txns = None\n\n        proto_statuses.append(\n            client_batch_submit_pb2.ClientBatchStatus(\n                batch_id=batch_id,\n                status=statuses[batch_id],\n                invalid_transactions=invalid_txns))\n\n    return proto_statuses", "docstring": "Takes a statuses dict and formats it for transmission with Protobuf and\nZMQ.\n\nArgs:\nstatuses (dict of int): Dict with batch ids as the key, status as value\nbatch_ids (list of str): The batch ids in their original order\ntracker (BatchTracker): A batch tracker with access to invalid info", "source": "juraj-google-style"}
{"code": "def easeInOutQuart(n):\n    _checkRange(n)\n    n = (2 * n)\n    if (n < 1):\n        return (0.5 * (n ** 4))\n    else:\n        n = (n - 2)\n        return ((- 0.5) * ((n ** 4) - 2))", "docstring": "A quartic tween function that accelerates, reaches the midpoint, and then decelerates.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "codesearchnet"}
{"code": "def cmd_ssh(options):\n    import os\n    import subprocess\n    from os.path import expanduser\n    options.inst_state = 'running'\n    (i_info, param_str) = gather_data(options)\n    (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command)\n    home_dir = expanduser('~')\n    if (options.user is None):\n        tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami'])\n        options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name'])\n    else:\n        debg.dprint('LoginUser set by user: ', options.user)\n    os_spec = {'nt': ['powershell plink', '\\\\', 'ppk']}\n    c_itm = os_spec.get(os.name, ['ssh', '/', 'pem'])\n    cmd_ssh_run = c_itm[0]\n    if (not options.nopem):\n        cmd_ssh_run += ' -i {0}{1}.aws{1}{2}.{3}'.format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])\n    else:\n        debg.dprint('Connect string: ', 'ssh {}@{}'.format(options.user, i_info[tar_idx]['pub_dns_name']))\n    cmd_ssh_run += ' {0}@{1}'.format(options.user, i_info[tar_idx]['pub_dns_name'])\n    print(cmd_ssh_run)\n    subprocess.call(cmd_ssh_run, shell=True)", "docstring": "Connect to the specified instance via ssh.\n\nFinds instances that match the user specified args that are also\nin the 'running' state.  The target instance is determined, the\nrequired connection information is retreived (IP, key and ssh\nuser-name), then an 'ssh' connection is made to the instance.\n\nArgs:\noptions (object): contains args and data from parser", "source": "codesearchnet"}
{"code": "def _render_node_traceback(self, node_name):\n    lines = [RL(''), RL(''), RL('Traceback of node construction:', 'bold')]\n    try:\n        node_stack = self._debug_dump.node_traceback(node_name)\n        for depth, (file_path, line, function_name, text) in enumerate(node_stack):\n            lines.append('%d: %s' % (depth, file_path))\n            attribute = debugger_cli_common.MenuItem('', 'ps %s -b %d' % (file_path, line)) if text else None\n            line_number_line = RL('  ')\n            line_number_line += RL('Line:     %d' % line, attribute)\n            lines.append(line_number_line)\n            lines.append('  Function: %s' % function_name)\n            lines.append('  Text:     ' + ('\"%s\"' % text if text else 'None'))\n            lines.append('')\n    except KeyError:\n        lines.append('(Node unavailable in the loaded Python graph)')\n    except LookupError:\n        lines.append('(Unavailable because no Python graph has been loaded)')\n    return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)", "docstring": "Render traceback of a node's creation in Python, if available.\n\nArgs:\nnode_name: (str) name of the node.\n\nReturns:\nA RichTextLines object containing the stack trace of the node's\nconstruction.", "source": "github-repos"}
{"code": "def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str, inverted_values_shape: bool=False):\n    if direction not in ['inputs', 'outputs']:\n        raise ValueError(f'direction must either be \"inputs\" or \"outputs\", but {direction} was given')\n    name = 'past_key_values' if direction == 'inputs' else 'present'\n    for i in range(self.num_layers):\n        inputs_or_outputs[f'{name}.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'}\n        if inverted_values_shape:\n            inputs_or_outputs[f'{name}.{i}.value'] = {0: 'batch', 1: 'past_sequence + sequence'}\n        else:\n            inputs_or_outputs[f'{name}.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'}", "docstring": "Fill the input_or_outputs mapping with past_key_values dynamic axes considering.\n\nArgs:\ninputs_or_outputs: The mapping to fill.\ndirection: either \"inputs\" or \"outputs\", it specifies whether input_or_outputs is the input mapping or the\noutput mapping, this is important for axes naming.\ninverted_values_shape:\nIf `True`, store values on dynamic axis 1, else on axis 2.", "source": "github-repos"}
{"code": "def all_subnets_longer_prefix(ip_net, cidr):\n    \n    subnets_list = list()\n    while int(cidr) <= 32:\n        try:\n            subnets_list.append('%s/%s' % (whole_subnet_maker(ip_net, cidr), cidr))\n        except Exception as e:\n            LOGGER.critical('Function all_subnets_longer_prefix {item}'.format(item=e))\n            pass\n        cidr = str(int(cidr) + 1)\n    return subnets_list", "docstring": "Function to return every subnet a ip can belong to with a longer prefix\nArgs:\nip_net: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1\ncidr: CIDR value of 0 to 32\n\nReturns: returns a list of subnets", "source": "juraj-google-style"}
{"code": "def _parse_trunk_allowed_vlans(self, config):\n        \n        match = re.search(r'switchport trunk allowed vlan (.+)$', config, re.M)\n        return dict(trunk_allowed_vlans=match.group(1))", "docstring": "Scans the specified config and parse the trunk allowed vlans value\n\nArgs:\nconfig (str): The interface configuration block to scan\n\nReturns:\ndict: A Python dict object with the value of switchport trunk\nallowed vlans value.  The dict returned is intended to be\nmerged into the resource dict", "source": "juraj-google-style"}
{"code": "def layout(self, dimensions=None, **kwargs):\n        \n        dimensions = self._valid_dimensions(dimensions)\n        if len(dimensions) == self.ndims:\n            with item_check(False):\n                return NdLayout(self, **kwargs).reindex(dimensions)\n        return self.groupby(dimensions, container_type=NdLayout, **kwargs)", "docstring": "Group by supplied dimension(s) and lay out groups\n\nGroups data by supplied dimension(s) laying the groups along\nthe dimension(s) out in a NdLayout.\n\nArgs:\ndimensions: Dimension(s) to group by\n\nReturns:\nNdLayout with supplied dimensions", "source": "juraj-google-style"}
{"code": "def clipping_params(ts, capacity=100):\n    \n    ts_sorted = ts.order(ascending=False)\n    i, t0, t1, integral = 1, None, None, 0\n    while integral <= capacity and i+1 < len(ts):\n        i += 1\n        t0_within_capacity = t0\n        t1_within_capacity = t1\n        t0 = min(ts_sorted.index[:i])\n        t1 = max(ts_sorted.index[:i])\n        integral = integrated_change(ts[t0:t1])\n        print i, t0, ts[t0], t1, ts[t1], integral\n    if t0_within_capacity and t1_within_capacity:\n        return t0_within_capacity, t1_within_capacity", "docstring": "Start and end index that clips the price/value of a time series the most\n\nAssumes that the integrated maximum includes the peak (instantaneous maximum).\n\nArguments:\nts (TimeSeries): Time series to attempt to clip to as low a max value as possible\ncapacity (float): Total \"funds\" or \"energy\" available for clipping (integrated area under time series)\n\nReturns:\n2-tuple: Timestamp of the start and end of the period of the maximum clipped integrated increase", "source": "juraj-google-style"}
{"code": "def _GetVisitSource(self, visit_identifier, cache, database):\n    \n    sync_cache_results = cache.GetResults('sync')\n    if not sync_cache_results:\n      result_set = database.Query(self._SYNC_CACHE_QUERY)\n\n      cache.CacheQueryResults(result_set, 'sync', 'id', ('source',))\n      sync_cache_results = cache.GetResults('sync')\n\n    if sync_cache_results and visit_identifier:\n      results = sync_cache_results.get(visit_identifier, None)\n      if results:\n        return results[0]\n\n    return None", "docstring": "Retrieves a visit source type based on the identifier.\n\nArgs:\nvisit_identifier (str): identifier from the visits table for the\nparticular record.\ncache (SQLiteCache): cache which contains cached results from querying\nthe visit_source table.\ndatabase (SQLiteDatabase): database.\n\nReturns:\nint: visit source type or None if no visit source type was found for\nthe identifier.", "source": "juraj-google-style"}
{"code": "def export_template(access_token, subscription_id, rgname):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourcegroups/', rgname,\n                        '/exportTemplate',\n                        '?api-version=', RESOURCE_API])\n    rg_body = {'options':'IncludeParameterDefaultValue', 'resources':['*']}\n    body = json.dumps(rg_body)\n    return do_post(endpoint, body, access_token)", "docstring": "Capture the specified resource group as a template\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def determine_encoding(path, default=None):\n    byte_order_marks = (('utf-8-sig', (codecs.BOM_UTF8,)), ('utf-16', (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)), ('utf-32', (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)))\n    try:\n        with open(path, 'rb') as infile:\n            raw = infile.read(4)\n    except IOError:\n        return default\n    for (encoding, boms) in byte_order_marks:\n        if any((raw.startswith(bom) for bom in boms)):\n            return encoding\n    return default", "docstring": "Determines the encoding of a file based on byte order marks.\n\nArguments:\npath (str): The path to the file.\ndefault (str, optional): The encoding to return if the byte-order-mark\nlookup does not return an answer.\n\nReturns:\nstr: The encoding of the file.", "source": "codesearchnet"}
{"code": "def set_presence(self, state=None, status=None, priority=None):\n    state = (state if (state is not None) else self.state)\n    status = (status if (status is not None) else self.status)\n    priority = (priority if (priority is not None) else self.priority)\n    self.presenceserver.set_presence(state, status, priority)", "docstring": "Change the presence broadcast by the client.\nIf the client is currently connected, the new presence is broadcast immediately.\n\nArgs:\nstate(aioxmpp.PresenceState, optional): New presence state to broadcast (Default value = None)\nstatus(dict or str, optional): New status information to broadcast (Default value = None)\npriority (int, optional): New priority for the resource (Default value = None)", "source": "codesearchnet"}
{"code": "def __pad_value(value, pad_len_multiple, pad_char):\n    assert (pad_len_multiple > 0)\n    assert (len(pad_char) == 1)\n    padding_length = ((pad_len_multiple - (len(value) % pad_len_multiple)) % pad_len_multiple)\n    return (value + (pad_char * padding_length))", "docstring": "Add padding characters to the value if needed.\n\nArgs:\nvalue: The string value to be padded.\npad_len_multiple: Pad the result so its length is a multiple\nof pad_len_multiple.\npad_char: The character to use for padding.\n\nReturns:\nThe string value with padding characters added.", "source": "codesearchnet"}
{"code": "def ready(self, node_id, metadata_priority=True):\n        \n        self.maybe_connect(node_id)\n        return self.is_ready(node_id, metadata_priority=metadata_priority)", "docstring": "Check whether a node is connected and ok to send more requests.\n\nArguments:\nnode_id (int): the id of the node to check\nmetadata_priority (bool): Mark node as not-ready if a metadata\nrefresh is required. Default: True\n\nReturns:\nbool: True if we are ready to send to the given node", "source": "juraj-google-style"}
{"code": "def get_compatible_func(op, func):\n    op_signature = _remove_annotation(tf_inspect.signature(op))\n    func_signature = _remove_annotation(tf_inspect.signature(func))\n    if op_signature == func_signature:\n        return func\n    op_pos_names = _get_required_param_names(op_signature)\n    func_pos_names = _get_required_param_names(func_signature)\n    if op_pos_names != func_pos_names:\n        raise AssertionError(f\"The decorated function's non-default arguments must be identical to that of the overridden op. func has {func_pos_names}. op has {op_pos_names}.\")\n    func_missing_params = {}\n    for name in set(op_signature.parameters.keys()) - set(func_signature.parameters.keys()):\n        p = op_signature.parameters[name]\n        if p.default is p.empty:\n            raise AssertionError(f\"The decorated function's signature must implement all of the non-default arguments of the overridden op. Argument `{name}` is unimplemented.\")\n        func_missing_params[name] = p\n\n    def compatible_func(*args, **kwargs):\n        bound = op_signature.bind(*args, **kwargs)\n        for name, param in func_missing_params.items():\n            if name not in bound.arguments:\n                continue\n            value = bound.arguments.pop(name)\n            if value is not param.default:\n                raise AssertionError(f'Dispatched op is called with argument `{name}` set to a non-default value, which is not supported by the decorated function')\n        return func(*bound.args, **bound.kwargs)\n    return compatible_func", "docstring": "Returns a compatible function.\n\nArgs:\nop: a callable with whose signature the returned function is compatible.\nfunc: a callable which is called by the returned function.\n\nReturns:\na compatible function, which conducts the actions of `func` but can\nbe called like `op`, given that:\n- the list of required arguments in `func` and `op` are the same.\n- there is no override of the default arguments of `op` that are not\nsupported by `func`.", "source": "github-repos"}
{"code": "def generate_chrome_trace_format(self, show_dataflow: bool=True, show_memory: bool=False, op_time: str='schedule') -> str:\n    step_stats_analysis = self.analyze_step_stats(show_dataflow=show_dataflow, show_memory=show_memory, op_time=op_time)\n    return step_stats_analysis.chrome_trace.format_to_string(pretty=True)", "docstring": "Produces a trace in Chrome Trace Format.\n\nArgs:\nshow_dataflow: (Optional.) If True, add flow events to the trace\nconnecting producers and consumers of tensors.\nshow_memory: (Optional.) If True, add object snapshot events to the trace\nshowing the sizes and lifetimes of tensors.\nop_time: (Optional.) How the execution time of op is shown in timeline.\nPossible values are \"schedule\", \"gpu\" and \"all\".\n\"schedule\" will show op from the time it is scheduled to the end of\nthe scheduling.\nNotice by the end of its scheduling its async kernels may not start\nyet. It is shown using the default value from step_stats.\n\"gpu\" will show op with the execution time of its kernels on GPU.\n\"all\" will show op from the start of its scheduling to the end of\nits last kernel.\nReturns:\nA JSON formatted string in Chrome Trace format.", "source": "github-repos"}
{"code": "def get_lock_state_transaction(self, transaction_id):\n    response = None\n    try:\n        response = requests.get(urls.get_lockstate_transaction(self._giid, transaction_id), headers={'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)})\n    except requests.exceptions.RequestException as ex:\n        raise RequestError(ex)\n    _validate_response(response)\n    return json.loads(response.text)", "docstring": "Get lock state transaction status\n\nArgs:\ntransaction_id: Transaction ID received from set_lock_state", "source": "codesearchnet"}
{"code": "def __init__(self, validate_args=False, name=\"cholesky_outer_product\"):\n    \n    self._graph_parents = []\n    self._name = name\n    super(CholeskyOuterProduct, self).__init__(\n        forward_min_event_ndims=2,\n        validate_args=validate_args,\n        name=name)", "docstring": "Instantiates the `CholeskyOuterProduct` bijector.\n\nArgs:\nvalidate_args: Python `bool` indicating whether arguments should be\nchecked for correctness.\nname: Python `str` name given to ops managed by this object.", "source": "juraj-google-style"}
{"code": "def _get_internal_slot(slot_key=None, filler_pipeline_key=None, slot_dict=None):\n    if (slot_dict is None):\n        slot_dict = {}\n    slot_record = slot_dict.get(slot_key)\n    if (slot_record is None):\n        raise PipelineStatusError(('Could not find data for output slot key \"%s\".' % slot_key))\n    output = {}\n    if (slot_record.status == _SlotRecord.FILLED):\n        output['status'] = 'filled'\n        output['fillTimeMs'] = _get_timestamp_ms(slot_record.fill_time)\n        output['value'] = slot_record.value\n        filler_pipeline_key = _SlotRecord.filler.get_value_for_datastore(slot_record)\n    else:\n        output['status'] = 'waiting'\n    if filler_pipeline_key:\n        output['fillerPipelineId'] = filler_pipeline_key.name()\n    return output", "docstring": "Gets information about a _SlotRecord for display in UI.\n\nArgs:\nslot_key: The db.Key of the slot to fetch.\nfiller_pipeline_key: In the case the slot has not yet been filled, assume\nthat the given db.Key (for a _PipelineRecord) will be the filler of\nthe slot in the future.\nslot_dict: The slot JSON dictionary.\n\nReturns:\nDictionary with the keys:\nstatus: Slot status: 'filled' or 'waiting'\nfillTimeMs: Time in milliseconds since the epoch of when it was filled.\nvalue: The current value of the slot, which is a slot's JSON dictionary.\nfillerPipelineId: The pipeline ID of what stage has or should fill\nthis slot.\n\nRaises:\nPipelineStatusError if any input is bad.", "source": "codesearchnet"}
{"code": "def __init__(self, name: str, path: str):\n    self._test_suite = self.create_test_suite(name, path)", "docstring": "Initializes the YamlExamplesTestSuite.\n\nArgs:\nname: The name of the test suite. This will be used as the class name\nfor the dynamically generated test suite.\npath: A string representing the path or glob pattern to search for\nYAML example files.", "source": "github-repos"}
{"code": "def __call__(self, **kwargs):\n        \n\n        assert self._last_report_time is not None, (\n            \"StatusReporter._start() must be called before the first \"\n            \"report __call__ is made to ensure correct runtime metrics.\")\n\n        \n        \n        report_time = time.time()\n        if TIME_THIS_ITER_S not in kwargs:\n            kwargs[TIME_THIS_ITER_S] = report_time - self._last_report_time\n        self._last_report_time = report_time\n\n        \n        self._queue.put(kwargs.copy(), block=True)\n\n        \n        \n        \n        self._continue_semaphore.acquire()", "docstring": "Report updated training status.\n\nPass in `done=True` when the training job is completed.\n\nArgs:\nkwargs: Latest training result status.\n\nExample:\n>>> reporter(mean_accuracy=1, training_iteration=4)\n>>> reporter(mean_accuracy=1, training_iteration=4, done=True)\n\nRaises:\nStopIteration: A StopIteration exception is raised if the trial has\nbeen signaled to stop.", "source": "juraj-google-style"}
{"code": "def ProduceAnalysisReport(self, plugin):\n    analysis_report = plugin.CompileReport(self)\n    if (not analysis_report):\n        return\n    analysis_report.time_compiled = timelib.Timestamp.GetNow()\n    plugin_name = getattr(analysis_report, 'plugin_name', plugin.plugin_name)\n    if plugin_name:\n        analysis_report.plugin_name = plugin_name\n    if self._event_filter_expression:\n        analysis_report.filter_string = self._event_filter_expression\n    self._storage_writer.AddAnalysisReport(analysis_report)\n    self.number_of_produced_analysis_reports += 1\n    self.number_of_produced_event_tags = self._storage_writer.number_of_event_tags\n    self.last_activity_timestamp = time.time()", "docstring": "Produces an analysis report.\n\nArgs:\nplugin (AnalysisPlugin): plugin.", "source": "codesearchnet"}
{"code": "def _CreateCampaign(client, budget):\n  \n  campaign_service = client.GetService('CampaignService')\n\n  operations = [{\n      'operator': 'ADD',\n      'operand': {\n          'name': 'Interplanetary Cruise \n          \n          \n          \n          'status': 'PAUSED',\n          'advertisingChannelType': 'SEARCH',\n          'biddingStrategyConfiguration': {\n              'biddingStrategyType': 'MANUAL_CPC',\n          },\n          'budget': budget,\n          \n          'settings': [{\n              'xsi_type': 'DynamicSearchAdsSetting',\n              \n              'domainName': 'example.com',\n              'languageCode': 'en'\n          }],\n          \n          'startDate': (datetime.datetime.now() +\n                        datetime.timedelta(1)).strftime('%Y%m%d'),\n          \n          'endDate': (datetime.datetime.now() +\n                      datetime.timedelta(365)).strftime('%Y%m%d'),\n      }\n  }]\n\n  campaign = campaign_service.mutate(operations)['value'][0]\n  campaign_id = campaign['id']\n\n  print 'Campaign with ID \"%d\" and name \"%s\" was added.' % (\n      campaign_id, campaign['name'])\n\n  return campaign_id", "docstring": "Creates the campaign.\n\nArgs:\nclient: an AdWordsClient instance.\nbudget: a suds.sudsobject.Object representation of a created budget.\n\nReturns:\nAn integer campaign ID.", "source": "juraj-google-style"}
{"code": "def _ReadN(self, n):\n    \n    ret = \"\"\n    while True:\n      chunk = self._read_file.read(n - len(ret))\n      ret += chunk\n\n      if len(ret) == n or not chunk:\n        return ret", "docstring": "Reads n characters from the input stream, or until EOF.\n\nThis is equivalent to the current CPython implementation of read(n), but\nnot guaranteed by the docs.\n\nArgs:\nn: int\n\nReturns:\nstring", "source": "juraj-google-style"}
{"code": "def _binding_to_coroutine(state, b, bad_bindings, ret, top, ctx):\n    if b not in bad_bindings:\n        ret.PasteBinding(b)\n        return state\n    if ctx.matcher(state.node).match_var_against_type(b.variable, ctx.convert.generator_type, {}, {b.variable: b}) is not None:\n        ret_param = b.data.get_instance_type_parameter(abstract_utils.V)\n        coroutine = abstract.Coroutine(ctx, ret_param, state.node)\n        ret.AddBinding(coroutine, [b], state.node)\n        return state\n    if not top:\n        ret.PasteBinding(b)\n        return state\n    _, await_method = ctx.attribute_handler.get_attribute(state.node, b.data, '__await__', b)\n    if await_method is None or not await_method.bindings:\n        ret.PasteBinding(b)\n        return state\n    state, await_obj = ctx.vm.call_function_with_state(state, await_method, ())\n    state, subret = to_coroutine(state, await_obj, False, ctx)\n    ret.PasteVariable(subret)\n    return state", "docstring": "Helper for _to_coroutine.\n\nArgs:\nstate: The current state.\nb: A cfg.Binding.\nbad_bindings: Bindings that are not coroutines.\nret: A return variable that this helper will add to.\ntop: Whether this is the top-level recursive call.\nctx: The current context.\n\nReturns:\nThe state.", "source": "github-repos"}
{"code": "def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    if attention_mask is None:\n        attention_mask = jnp.ones_like(input_ids)\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n\n    def _encoder_forward(module, input_ids, attention_mask, **kwargs):\n        encode_module = module._get_encoder_module()\n        return encode_module(input_ids, attention_mask, **kwargs)\n    return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward)", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, FlaxT5ForConditionalGeneration\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google-t5/t5-small\")\n>>> model = FlaxT5ForConditionalGeneration.from_pretrained(\"google-t5/t5-small\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, return_tensors=\"np\")\n>>> encoder_outputs = model.encode(**inputs)\n```", "source": "github-repos"}
{"code": "def save_to_mat_file(self, parameter_space, result_parsing_function, filename, runs):\n    for key in parameter_space:\n        if (not isinstance(parameter_space[key], list)):\n            parameter_space[key] = [parameter_space[key]]\n    dimension_labels = ([{key: str(parameter_space[key])} for key in parameter_space.keys() if (len(parameter_space[key]) > 1)] + [{'runs': range(runs)}])\n    return savemat(filename, {'results': self.get_results_as_numpy_array(parameter_space, result_parsing_function, runs=runs), 'dimension_labels': dimension_labels})", "docstring": "Return the results relative to the desired parameter space in the form\nof a .mat file.\n\nArgs:\nparameter_space (dict): dictionary containing\nparameter/list-of-values pairs.\nresult_parsing_function (function): user-defined function, taking a\nresult dictionary as argument, that can be used to parse the\nresult files and return a list of values.\nfilename (path): name of output .mat file.\nruns (int): number of runs to gather for each parameter\ncombination.", "source": "codesearchnet"}
{"code": "def find_pad_index(self, array):\n    try:\n        return list(array).index(self.pad_value)\n    except ValueError:\n        return len(array)", "docstring": "Find padding index.\n\nArgs:\narray (list): integer list.\n\nReturns:\nidx: padding index.\n\nExamples:\n>>> array = [1, 2, 0]\n>>> self.find_pad_index(array)\n2", "source": "codesearchnet"}
{"code": "def display(port=None, height=None):\n  \n  _display(port=port, height=height, print_message=True, display_handle=None)", "docstring": "Display a TensorBoard instance already running on this machine.\n\nArgs:\nport: The port on which the TensorBoard server is listening, as an\n`int`, or `None` to automatically select the most recently\nlaunched TensorBoard.\nheight: The height of the frame into which to render the TensorBoard\nUI, as an `int` number of pixels, or `None` to use a default value\n(currently 800).", "source": "juraj-google-style"}
{"code": "async def _notify_event_internal(self, conn_string, name, event):\n    try:\n        self._currently_notifying = True\n        conn_id = self._get_conn_id(conn_string)\n        event_maps = self._monitors.get(conn_string, {})\n        wildcard_maps = self._monitors.get(None, {})\n        wildcard_handlers = wildcard_maps.get(name, {})\n        event_handlers = event_maps.get(name, {})\n        for (handler, func) in itertools.chain(event_handlers.items(), wildcard_handlers.items()):\n            try:\n                result = func(conn_string, conn_id, name, event)\n                if inspect.isawaitable(result):\n                    (await result)\n            except:\n                self._logger.warning('Error calling notification callback id=%s, func=%s', handler, func, exc_info=True)\n    finally:\n        for action in self._deferred_adjustments:\n            self._adjust_monitor_internal(*action)\n        self._deferred_adjustments = []\n        self._currently_notifying = False", "docstring": "Notify that an event has occured.\n\nThis method will send a notification and ensure that all callbacks\nregistered for it have completed by the time it returns.  In\nparticular, if the callbacks are awaitable, this method will await\nthem before returning.  The order in which the callbacks are called\nis undefined.\n\nThis is a low level method that is not intended to be called directly.\nYou should use the high level public notify_* methods for each of the\ntypes of events to ensure consistency in how the event objects are\ncreated.\n\nArgs:\nconn_string (str): The connection string for the device that the\nevent is associated with.\nname (str): The name of the event. Must be in SUPPORTED_EVENTS.\nevent (object): The event object.  The type of this object will\ndepend on what is being notified.", "source": "codesearchnet"}
{"code": "def to_csv(self, filename=None, as_text=True, use_descriptions=False, dlm=',', header=True):\n    if (filename is None):\n        if (not as_text):\n            raise StriplogError('You must provide a filename or set as_text to True.')\n    else:\n        as_text = False\n    if as_text:\n        output = StringIO()\n    else:\n        output = open(filename, 'w')\n    fieldnames = ['Top', 'Base', 'Component']\n    writer = csv.DictWriter(output, delimiter=dlm, fieldnames=fieldnames, quoting=csv.QUOTE_MINIMAL)\n    if header:\n        writer.writeheader()\n    for i in self.__list:\n        if (use_descriptions and i.description):\n            text = i.description\n        elif i.primary:\n            text = i.primary.summary()\n        else:\n            text = ''\n        data = {j: k for (j, k) in zip(fieldnames, [i.top.z, i.base.z, text])}\n        writer.writerow(data)\n    if as_text:\n        return output.getvalue()\n    else:\n        output.close\n        return None", "docstring": "Returns a CSV string built from the summaries of the Intervals.\n\nArgs:\nuse_descriptions (bool): Whether to use descriptions instead\nof summaries, if available.\ndlm (str): The delimiter.\nheader (bool): Whether to form a header row.\n\nReturns:\nstr: A string of comma-separated values.", "source": "codesearchnet"}
{"code": "def get_vnet(access_token, subscription_id, resource_group, vnet_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Network/virtualNetworks/', vnet_name,\n                        '?api-version=', NETWORK_API])\n    return do_get(endpoint, access_token)", "docstring": "Get details about the named virtual network.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvnet_name (str): Name of the VNet.\n\nReturns:\nHTTP response. VNet JSON body.", "source": "juraj-google-style"}
{"code": "def __init__(self, name: YangIdentifier, ns: Optional[YangIdentifier]):\n        \n        self.name = name\n        self.namespace = ns", "docstring": "Initialize the class instance.\n\nArgs:\nname: Member's local name.\nns: Member's namespace.", "source": "juraj-google-style"}
{"code": "def execute_command(self, command):\n        \n        logger.debug('Executing commands:\\n %s' % command)\n\n        err_msg = 'Something happened when executing some commands on device'\n\n        chan = self.ssh.get_transport().open_session()\n        chan.settimeout(5)\n\n        chan.exec_command(command)\n\n        error_chan = chan.makefile_stderr()\n        output_chan = chan.makefile()\n\n        error = ''\n        output = ''\n        for e in error_chan.read():\n            error = error + self._read_wrapper(e)\n        for o in output_chan.read():\n            output = output + self._read_wrapper(o)\n\n        if len(error) > 0:\n            msg = '%s %s:\\n%s\\n%s' % (err_msg, self.ssh.get_host_keys().keys()[0], command, error)\n            logger.error(msg)\n            raise exceptions.CommandExecutionException(msg)\n\n        regex = re.compile('Command fail')\n        if len(regex.findall(output)) > 0:\n            msg = '%s %s:\\n%s\\n%s' % (err_msg, self.ssh.get_host_keys().keys()[0], command, output)\n            logger.error(msg)\n            raise exceptions.CommandExecutionException(msg)\n\n        output = output.splitlines()\n\n        \n        i = 0\n        for line in output:\n            current_line = line.split('\n\n            if len(current_line) > 1:\n                output[i] = current_line[1]\n            else:\n                output[i] = current_line[0]\n            i += 1\n\n        return output[:-1]", "docstring": "This method will execute the commands on the device without as if you were just connected to it (it will not\nenter into any vdom). This method is not recommended unless you are 100% sure of what you are doing.\n\n\nArgs:\n* **command** (str) -- Command to execute.\n\nReturns:\nA list of strings containing the output.\n\nRaises:\nexceptions.CommandExecutionException -- If it detects any problem with the command.", "source": "juraj-google-style"}
{"code": "def scan_manifest(self, manifest):\n    top_roots = set()\n    for stored_path in manifest.keys():\n        if '/' in stored_path:\n            top_dir = stored_path.split('/', 1)[0]\n            if top_dir not in top_roots:\n                top_roots.add(top_dir)\n    import_roots = list(self.import_roots) + sorted(top_roots)\n    stored_resources = {}\n    for support_file in _runtime_support_files:\n        resource = fetch_support_file(support_file, self.timestamp_tuple)\n        stored_filename = resource.zipinfo.filename\n        stored_resources[stored_filename] = resource\n    for stored_path, local_path in manifest.items():\n        if local_path is None:\n            stored_resources[stored_path] = stored_resource.EmptyFile(stored_path, self.timestamp_tuple)\n        else:\n            stored_resources[stored_path] = stored_resource.StoredFile(stored_path, self.timestamp_tuple, local_path)\n    if '__main__.py' in stored_resources:\n        raise error.Error('Configuration error for [%s]: Manifest file included a file named __main__.py, which is not allowed' % self.manifest_filename)\n    stored_resources['__main__.py'] = self.generate_main(self.main_filename, self.generate_boilerplate(import_roots))\n    for stored_filename in _runtime_init_files:\n        if stored_filename in stored_resources:\n            logging.debug('Skipping __init__.py already present [%s]', stored_filename)\n            continue\n        stored_resources[stored_filename] = stored_resource.EmptyFile(stored_filename, self.timestamp_tuple)\n    return stored_resources", "docstring": "Return a dict of StoredResources based on an input manifest.\n\nReturns:\nA dict of store_filename to StoredResource", "source": "github-repos"}
{"code": "def predict(self, a, b):\n        \n        a = np.array(a).reshape((-1, 1))\n        b = np.array(b).reshape((-1, 1))\n        return (mutual_info_regression(a, b.reshape((-1,))) + mutual_info_regression(b, a.reshape((-1,))))/2", "docstring": "Compute the test statistic\n\nArgs:\na (array-like): Variable 1\nb (array-like): Variable 2\n\nReturns:\nfloat: test statistic", "source": "juraj-google-style"}
{"code": "def disease_terms(self, hgnc_id=None):\n    query = {}\n    if hgnc_id:\n        LOG.debug('Fetching all diseases for gene %s', hgnc_id)\n        query['genes'] = hgnc_id\n    else:\n        LOG.info('Fetching all disease terms')\n    return list(self.disease_term_collection.find(query))", "docstring": "Return all disease terms that overlaps a gene\n\nIf no gene, return all disease terms\n\nArgs:\nhgnc_id(int)\n\nReturns:\niterable(dict): A list with all disease terms that match", "source": "codesearchnet"}
{"code": "def readUserSession(datafile):\n  \n  for line in datafile:\n    pages = line.split()\n    total = len(pages)\n    \n    if total < 2:\n      continue\n\n    \n    if total > 500:\n      continue\n\n    return [PAGE_CATEGORIES[int(i) - 1] for i in pages]\n  return []", "docstring": "Reads the user session record from the file's cursor position\nArgs:\ndatafile: Data file whose cursor points at the beginning of the record\n\nReturns:\nlist of pages in the order clicked by the user", "source": "juraj-google-style"}
{"code": "def delete_vmss(access_token, subscription_id, resource_group, vmss_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,\n                        '?api-version=', COMP_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete a virtual machine scale set.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvmss_name (str): Name of the virtual machine scale set.\n\nReturns:\nHTTP response.", "source": "juraj-google-style"}
{"code": "def pprint(sequence, keys=None):\n    \n    if len(sequence) > 0:\n        columns = calculate_columns(sequence)\n        row_format = calculate_row_format(columns, keys)\n        header = row_format % dict([(key, key.title()) for key in columns])\n        separator = row_format % dict([(key, '-' * columns[key]) for key in columns])\n\n        print(separator)\n        print(header)\n        print(separator)\n\n        for row in sequence:\n            print(row_format % row)\n\n        print(separator)", "docstring": "Print sequence as ascii table to stdout.\n\nArgs:\nsequence (list or tuple): a sequence with a dictionary each entry.\nkeys (list): optional list of keys to order columns as well as to filter for them.", "source": "juraj-google-style"}
{"code": "async def append_entries(self, destination=None):\n        \n\n        \n        destination_list = [destination] if destination else self.state.cluster\n        for destination in destination_list:\n            data = {\n                'type': 'append_entries',\n\n                'term': self.storage.term,\n                'leader_id': self.id,\n                'commit_index': self.log.commit_index,\n\n                'request_id': self.request_id\n            }\n\n            next_index = self.log.next_index[destination]\n            prev_index = next_index - 1\n\n            if self.log.last_log_index >= next_index:\n                data['entries'] = [self.log[next_index]]\n\n            else:\n                data['entries'] = []\n\n            data.update({\n                'prev_log_index': prev_index,\n                'prev_log_term': self.log[prev_index]['term'] if self.log and prev_index else 0\n            })\n\n            asyncio.ensure_future(self.state.send(data, destination), loop=self.loop)", "docstring": "AppendEntries RPC — replicate log entries / heartbeat\nArgs:\ndestination — destination id\n\nRequest params:\nterm — leader’s term\nleader_id — so follower can redirect clients\nprev_log_index — index of log entry immediately preceding new ones\nprev_log_term — term of prev_log_index entry\ncommit_index — leader’s commit_index\n\nentries[] — log entries to store (empty for heartbeat)", "source": "juraj-google-style"}
{"code": "def _get_pdf_filenames_at(source_directory):\n    if (not os.path.isdir(source_directory)):\n        raise ValueError(('%s is not a directory!' % source_directory))\n    return [os.path.join(source_directory, filename) for filename in os.listdir(source_directory) if filename.endswith(PDF_EXTENSION)]", "docstring": "Find all PDF files in the specified directory.\n\nArgs:\nsource_directory (str): The source directory.\n\nReturns:\nlist(str): Filepaths to all PDF files in the specified directory.\n\nRaises:\nValueError", "source": "codesearchnet"}
{"code": "def unify_basis(self, keys=None, basis=None):\n        \n        if keys is None:\n            keys = [k for k, v in self.data.items() if isinstance(v, Curve)]\n        else:\n            keys = utils.flatten_list(keys)\n\n        if basis is None:\n            basis = self.survey_basis(keys=keys)\n        if basis is None:\n            m = \"No basis was provided and welly could not retrieve common basis.\"\n            raise WellError(m)\n\n        for k in keys:\n            if keys and (k not in keys):\n                continue\n            try:  \n                self.data[k] = self.data[k].to_basis(basis)\n            except:  \n                continue\n\n        return", "docstring": "Give everything, or everything in the list of keys, the same basis.\nIf you don't provide a basis, welly will try to get one using\n``survey_basis()``.\n\nArgs:\nbasis (ndarray): A basis: the regularly sampled depths at which\nyou want the samples.\nkeys (list): List of strings: the keys of the data items to\nunify, if not all of them.\n\nReturns:\nNone. Works in place.", "source": "juraj-google-style"}
{"code": "def django_cache_function(timeout: int = 5 * 60,\n                          cache_key: str = '',\n                          debug_cache: bool = False):\n    \n    cache_key = cache_key or None\n\n    def decorator(fn):\n        def wrapper(*args, **kwargs):\n            \n            \n            \n            \n            \n            \n            \n            \n            if cache_key:\n                \n                call_sig = ''\n                _cache_key = cache_key\n                check_stored_call_sig = False\n            else:\n                \n                \n                \n                call_sig = get_call_signature(fn, args, kwargs)\n                _cache_key = make_cache_key(call_sig)\n                check_stored_call_sig = True\n            if debug_cache:\n                log.critical(\"Checking cache for key: \" + _cache_key)\n            cache_result_tuple = cache.get(_cache_key)  \n            if cache_result_tuple is None:\n                if debug_cache:\n                    log.debug(\"Cache miss\")\n            else:\n                if debug_cache:\n                    log.debug(\"Cache hit\")\n                cached_call_sig, func_result = cache_result_tuple\n                if (not check_stored_call_sig) or cached_call_sig == call_sig:\n                    return func_result\n                log.warning(\n                    \"... Cache hit was due to hash collision; cached_call_sig \"\n                    \"{} != call_sig {}\".format(\n                        repr(cached_call_sig), repr(call_sig)))\n                \n                \n                \n            func_result = fn(*args, **kwargs)\n            cache_result_tuple = (call_sig, func_result)\n            cache.set(key=_cache_key, value=cache_result_tuple,\n                      timeout=timeout)  \n            return func_result\n\n        return wrapper\n\n    return decorator", "docstring": "Decorator to add caching to a function in Django.\nUses the Django default cache.\n\nArgs:\n\ntimeout: timeout in seconds; use None for \"never expire\", as 0 means\n\"do not cache\".\n\ncache_key: optional cache key to use (if falsy, we'll invent one)\ndebug_cache: show hits/misses?", "source": "juraj-google-style"}
{"code": "def list_bucket(self, bucket):\n    \n    self.response.write('Listbucket result:\\n')\n\n    page_size = 1\n    stats = gcs.listbucket(bucket + '/foo', max_keys=page_size)\n    while True:\n      count = 0\n      for stat in stats:\n        count += 1\n        self.response.write(repr(stat))\n        self.response.write('\\n')\n\n      if count != page_size or count == 0:\n        break\n      stats = gcs.listbucket(bucket + '/foo', max_keys=page_size,\n                             marker=stat.filename)", "docstring": "Create several files and paginate through them.\n\nProduction apps should set page_size to a practical value.\n\nArgs:\nbucket: bucket.", "source": "juraj-google-style"}
{"code": "class FlaxTemperatureLogitsWarper(FlaxLogitsWarper):\n\n    def __init__(self, temperature: float):\n        if not isinstance(temperature, float) or not temperature > 0:\n            raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}')\n        self.temperature = temperature\n\n    def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:\n        scores = scores / self.temperature\n        return scores", "docstring": "[`FlaxLogitsWarper`] for temperature (exponential scaling output probability distribution).\n\nArgs:\ntemperature (`float`):\nThe value used to module the logits distribution.", "source": "github-repos"}
{"code": "def signature(array):\n    \n    length = len(array)\n    index = _NUM_SIGNATURE_BYTES if length > _NUM_SIGNATURE_BYTES else length\n\n    return array[:index]", "docstring": "Returns the first 262 bytes of the given bytearray\nas part of the file header signature.\n\nArgs:\narray: bytearray to extract the header signature.\n\nReturns:\nFirst 262 bytes of the file content as bytearray type.", "source": "juraj-google-style"}
{"code": "def link_contentkey_authorization_policy(access_token, ckap_id, options_id, \\\nams_redirected_rest_endpoint):\n    \n    path = '/ContentKeyAuthorizationPolicies'\n    full_path = ''.join([path, \"('\", ckap_id, \"')\", \"/$links/Options\"])\n    full_path_encoded = urllib.parse.quote(full_path, safe='')\n    endpoint = ''.join([ams_rest_endpoint, full_path_encoded])\n    uri = ''.join([ams_redirected_rest_endpoint, 'ContentKeyAuthorizationPolicyOptions', \\\n    \"('\", options_id, \"')\"])\n    body = '{\"uri\": \"' + uri + '\"}'\n    return do_ams_post(endpoint, full_path_encoded, body, access_token, \"json_only\", \"1.0;NetFx\")", "docstring": "Link Media Service Content Key Authorization Policy.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nckap_id (str): A Media Service Asset Content Key Authorization Policy ID.\noptions_id (str): A Media Service Content Key Authorization Policy Options .\nams_redirected_rest_endpoint (str): A Media Service Redirected Endpoint.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def request(path):\n    \n    headers = {'Accept': 'application/json'}\n    try:\n        requested_object = requests.get(path, headers=headers)\n        requested_object.raise_for_status()\n    except requests.exceptions.HTTPError as exception:\n        LOGGER.error((inspect.stack()[0][3]) + ': HTTPError = ' +\n                     str(exception.response.status_code) + ' ' +\n                     str(exception.response.reason) + ' ' + str(path))\n        raise\n    except requests.exceptions.InvalidURL as exception:\n        LOGGER.error('URLError = ' + str(exception.reason) + ' ' + str(path))\n        raise\n    except Exception:\n        import traceback\n        LOGGER.error('Generic exception: ' + traceback.format_exc())\n        raise\n    else:\n        response = requested_object.json()\n        return response", "docstring": "Send a request to a given URL accepting JSON format and return a \\\ndeserialized Python object.\n\nArgs:\npath (str): The URI to be requested.\n\nReturns:\nresponse: Deserialized JSON Python object.\n\nRaises:\nHTTPError: the HTTP error returned by the requested server.\nInvalidURL: an invalid URL has been requested.\nException: generic exception.", "source": "juraj-google-style"}
{"code": "def get_cmd_handler(self, cmd):\n    cmd = cmd.replace('-', '_')\n    handler = getattr(self, cmd, None)\n    if (not handler):\n        raise BuildException('Command {} is not supported as a build command'.format(cmd))\n    return handler", "docstring": "Return an handler for cmd.\nThe handler and the command should have the same name.\nSee class description for more info about handlers.\n\nArgs:\ncmd (str): The name of the command\n\nReturns:\ncallable: which handles cmd\n\nRaises:\nlago.build.BuildException: If an handler for cmd doesn't exist", "source": "codesearchnet"}
{"code": "def send(self, **kwargs):\n    assert (len(kwargs) == 1), 'Must make a single request.'\n    res = self.send_req(sc_pb.Request(**kwargs))\n    return getattr(res, list(kwargs.keys())[0])", "docstring": "Create and send a specific request, and return the response.\n\nFor example: send(ping=sc_pb.RequestPing()) => sc_pb.ResponsePing\n\nArgs:\n**kwargs: A single kwarg with the name and value to fill in to Request.\n\nReturns:\nThe Response corresponding to your request.", "source": "codesearchnet"}
{"code": "def cashflows(self, market: pmd.ProcessedMarketData, name: Optional[str]=None) -> Tuple[types.DateTensor, types.FloatTensor]:\n    name = name or self._name + '_cashflows'\n    with tf.name_scope(name):\n        valuation_date = dateslib.convert_to_date_tensor(market.date)\n        future_cashflows = tf.cast(self._payment_dates >= valuation_date, dtype=self._dtype)\n        notional = tf.expand_dims(self._notional, axis=-1)\n        cashflows = notional * (future_cashflows * self._daycount_fractions * self._coupon_rate)\n        return (self._payment_dates, cashflows)", "docstring": "Returns cashflows for the fixed leg.\n\nArgs:\nmarket: An instance of `ProcessedMarketData`.\nname: Python str. The name to give to the ops created by this function.\nDefault value: `None` which maps to 'cashflows'.\n\nReturns:\nA tuple of two `Tensor`s of shape `batch_shape + [num_cashflows]` and\ncontaining the dates and the corresponding cashflows price for each\nstream based on the input market data.", "source": "github-repos"}
{"code": "def validate_request_success(response_text, request_url, status_code, expected_status_code):\n    try:\n        assert (status_code == expected_status_code)\n    except AssertionError:\n        msg = 'Request to {url} failed with status {status_code}:\\nThe reponse from the request was as follows:\\n\\n{content}'.format(url=request_url, status_code=status_code, content=response_text)\n        raise BadHttpRequestError(msg)", "docstring": "Validates that a request was successful.\n\nArgs:\nresponse_text (str): The response body of the request.\nrequest_url (str): The URL the request was made at.\nstatus_code (int): The status code of the response.\nexpected_status_code (int): The expected status code of the\nresponse.\n\nRaises:\n:class:`saltant.exceptions.BadHttpRequestError`: The HTTP\nrequest failed.", "source": "codesearchnet"}
{"code": "def get_naive(dt):\n    if (not dt.tzinfo):\n        return dt\n    if hasattr(dt, 'asdatetime'):\n        return dt.asdatetime()\n    return dt.replace(tzinfo=None)", "docstring": "Gets a naive datetime from a datetime.\n\ndatetime_tz objects can't just have tzinfo replaced with None, you need to\ncall asdatetime.\n\nArgs:\ndt: datetime object.\n\nReturns:\ndatetime object without any timezone information.", "source": "codesearchnet"}
{"code": "def run_interactive_command(command, env=None, **kwargs):\n    command_result = _run_command(command=command, out_pipe=sys.stdout, err_pipe=sys.stderr, stdin=sys.stdin, env=env, **kwargs)\n    return command_result", "docstring": "Runs a command interactively, reusing the current stdin, stdout and stderr\n\nArgs:\ncommand(list of str): args of the command to execute, including the\ncommand itself as command[0] as `['ls', '-l']`\nenv(dict of str:str): If set, will use the given dict as env for the\nsubprocess\n**kwargs: Any other keyword args passed will be passed to the\n:ref:subprocess.Popen call\n\nReturns:\nlago.utils.CommandStatus: result of the interactive execution", "source": "codesearchnet"}
{"code": "def load_template(self, name):\n        \n        \n        if name in self.cached_templates:\n            logger.debug(\"Using cached template: %s\", name)\n            return self.cached_templates[name]\n\n        logger.debug(\"Attempting to find template by name: %s\", name)\n        name_with_ext, provider_name, base_path = self.find_template_details(name)\n\n        full_path = None\n        if base_path is not None:\n            full_path = os.path.join(base_path, name_with_ext)\n\n        \n        template = template_exception_handler(\n            lambda: self.get_provider(provider_name).load_template(\n                name_with_ext,\n                full_path=full_path\n            ),\n            self.error_context,\n            filename=full_path\n        )\n\n        \n        self.cached_templates[name] = template\n        return template", "docstring": "Attempts to load the relevant template from our templating system/environment.\n\nArgs:\nname: The name of the template to load.\n\nReturn:\nOn success, a StatikTemplate object that can be used to render content.", "source": "juraj-google-style"}
{"code": "def _slice_single_param(param, param_event_ndims, slices, dist_batch_shape):\n    param_shape = tf.shape(input=param)\n    insert_ones = tf.ones([((tf.size(input=dist_batch_shape) + param_event_ndims) - tf.rank(param))], dtype=param_shape.dtype)\n    new_param_shape = tf.concat([insert_ones, param_shape], axis=0)\n    full_batch_param = tf.reshape(param, new_param_shape)\n    param_slices = []\n    param_dim_idx = 0\n    batch_dim_idx = 0\n    for slc in slices:\n        if (slc is tf.newaxis):\n            param_slices.append(slc)\n            continue\n        if (slc is Ellipsis):\n            if (batch_dim_idx < 0):\n                raise ValueError('Found multiple `...` in slices {}'.format(slices))\n            param_slices.append(slc)\n            num_remaining_non_newaxis_slices = sum([(s is not tf.newaxis) for s in slices[(slices.index(Ellipsis) + 1):]])\n            batch_dim_idx = (- num_remaining_non_newaxis_slices)\n            param_dim_idx = (batch_dim_idx - param_event_ndims)\n            continue\n        param_dim_size = new_param_shape[param_dim_idx]\n        batch_dim_size = dist_batch_shape[batch_dim_idx]\n        is_broadcast = (batch_dim_size > param_dim_size)\n        if isinstance(slc, slice):\n            (start, stop, step) = (slc.start, slc.stop, slc.step)\n            if (start is not None):\n                start = tf.where(is_broadcast, 0, start)\n            if (stop is not None):\n                stop = tf.where(is_broadcast, 1, stop)\n            if (step is not None):\n                step = tf.where(is_broadcast, 1, step)\n            param_slices.append(slice(start, stop, step))\n        else:\n            param_slices.append(tf.where(is_broadcast, 0, slc))\n        param_dim_idx += 1\n        batch_dim_idx += 1\n    param_slices.extend(([ALL_SLICE] * param_event_ndims))\n    return full_batch_param.__getitem__(param_slices)", "docstring": "Slices a single parameter of a distribution.\n\nArgs:\nparam: A `Tensor`, the original parameter to slice.\nparam_event_ndims: `int` event parameterization rank for this parameter.\nslices: A `tuple` of normalized slices.\ndist_batch_shape: The distribution's batch shape `Tensor`.\n\nReturns:\nnew_param: A `Tensor`, batch-sliced according to slices.", "source": "codesearchnet"}
{"code": "def verify_password(self, password, password_hash):\n        \n\n        \n        if isinstance(password_hash, self.user_manager.db_manager.UserClass):\n            print(\n                'Deprecation warning: verify_password(password, user) has been changed'\\\n                ' to: verify_password(password, password_hash). The user param will be deprecated.'\\\n                ' Please change your call with verify_password(password, user) into'\\\n                ' a call with verify_password(password, user.password)'\n                ' as soon as possible.')\n            password_hash = password_hash.password   \n\n        \n        return self.password_crypt_context.verify(password, password_hash)", "docstring": "Verify plaintext ``password`` against ``hashed password``.\n\nArgs:\npassword(str): Plaintext password that the user types in.\npassword_hash(str): Password hash generated by a previous call to ``hash_password()``.\nReturns:\n| True when ``password`` matches ``password_hash``.\n| False otherwise.\nExample:\n\n::\n\nif verify_password('mypassword', user.password):\nlogin_user(user)", "source": "juraj-google-style"}
{"code": "def get_cluster(self, label):\n        \n        for cluster in self._clusters:\n            if label == cluster['label']:\n                return self._get_connection(cluster)\n        raise AttributeError('No such cluster %s.' % label)", "docstring": "Returns a connection to a mongo-clusters.\n\nArgs:\nlabel (string): the label of a cluster.\n\nReturns:\nA connection to the cluster labeld with label.\n\nRaises:\nAttributeError: there is no cluster with the given label in the\nconfig", "source": "juraj-google-style"}
{"code": "def send_event(self, event_type, category=None, dimensions=None,\n                   properties=None, timestamp=None):\n        \n        if category and category not in SUPPORTED_EVENT_CATEGORIES:\n            raise ValueError('Event category is not one of the supported' +\n                             'types: {' +\n                             ', '.join(SUPPORTED_EVENT_CATEGORIES) + '}')\n\n        data = {\n            'eventType': event_type,\n            'category': category,\n            'dimensions': dimensions or {},\n            'properties': properties or {},\n            'timestamp': int(timestamp) if timestamp else None,\n        }\n\n        _logger.debug('Sending event to SignalFx: %s', data)\n        self._add_extra_dimensions(data)\n        return self._send_event(event_data=data, url='{0}/{1}'.format(\n            self._endpoint, self._INGEST_ENDPOINT_EVENT_SUFFIX),\n            session=self._session)", "docstring": "Send an event to SignalFx.\n\nArgs:\nevent_type (string): the event type (name of the event time\nseries).\ncategory (string): the category of the event.\ndimensions (dict): a map of event dimensions.\nproperties (dict): a map of extra properties on that event.\ntimestamp (float): timestamp when the event has occured", "source": "juraj-google-style"}
{"code": "def get(self, rid, data_callback=None, raise_on_error=True):\n        \n        cached_data = None\n        ds_data = self.ds.get(rid, raise_on_error=False)\n        if ds_data is not None:\n            expired = True\n            if ds_data.get('found') is True:\n                if self.ttl < int(ds_data.get('_source', {}).get('cache-date', 0)):\n                    cached_data = ds_data.get('_source', {}).get('cache-data')\n                    expired = False\n                    self.tcex.log.debug('Using cached data for ({}).'.format(rid))\n                else:\n                    self.tcex.log.debug('Cached data is expired for ({}).'.format(rid))\n\n            if expired or ds_data.get('found') is False:\n                \n                if callable(data_callback):\n                    cached_data = data_callback(rid)\n                    self.tcex.log.debug('Using callback data for ({}).'.format(rid))\n                    if cached_data:\n                        self.update(rid, cached_data, raise_on_error)  \n        return cached_data", "docstring": "Get cached data from the data store.\n\nArgs:\nrid (str): The record identifier.\ndata_callback (callable): A method that will return the data.\nraise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.\n\nReturns:\nobject : Python request response.", "source": "juraj-google-style"}
{"code": "def _name_to_tensor(self, tensor_name):\n    \n    id1, id2 = self._tensor_name_to_ids[tensor_name]\n    return self._operations[id1].outputs[id2]", "docstring": "The tensor with the given name.\n\nArgs:\ntensor_name: a string, name of a tensor in the graph.\n\nReturns:\na tf.Tensor or mtf.Tensor", "source": "juraj-google-style"}
{"code": "def _ListDir(dirpath, pathtype):\n    pathspec = rdf_paths.PathSpec(path=dirpath, pathtype=pathtype)\n    childpaths = []\n    try:\n        file_obj = vfs.VFSOpen(pathspec)\n        for path in file_obj.ListNames():\n            if ((pathtype != rdf_paths.PathSpec.PathType.REGISTRY) or path):\n                childpaths.append(path)\n    except IOError:\n        pass\n    return childpaths", "docstring": "Returns children of a given directory.\n\nThis function is intended to be used by the `PathComponent` subclasses to get\ninitial list of potential children that then need to be filtered according to\nthe rules of a specific component.\n\nArgs:\ndirpath: A path to the directory.\npathtype: The pathtype to use.\n\nRaises:\nValueError: in case of unsupported path types.", "source": "codesearchnet"}
{"code": "def click_slot(self, slot, right=False):\n        \n        if isinstance(slot, int):\n            slot = self.window.slots[slot]\n        button = constants.INV_BUTTON_RIGHT \\\n            if right else constants.INV_BUTTON_LEFT\n        return self.send_click(windows.SingleClick(slot, button))", "docstring": "Left-click or right-click the slot.\n\nArgs:\nslot (Slot): The clicked slot. Can be ``Slot`` instance or integer.\nSet to ``inventory.cursor_slot``\nfor clicking outside the window.", "source": "juraj-google-style"}
{"code": "def __cloudflare_request(self, *, account, path, args=None):\n        \n        if not args:\n            args = {}\n\n        if not self.cloudflare_initialized[account.account_id]:\n            self.cloudflare_session[account.account_id] = requests.Session()\n            self.cloudflare_session[account.account_id].headers.update({\n                'X-Auth-Email': account.email,\n                'X-Auth-Key': account.api_key,\n                'Content-Type': 'application/json'\n            })\n            self.cloudflare_initialized[account.account_id] = True\n\n        if 'per_page' not in args:\n            args['per_page'] = 100\n\n        response = self.cloudflare_session[account.account_id].get(account.endpoint + path, params=args)\n        if response.status_code != 200:\n            raise CloudFlareError('Request failed: {}'.format(response.text))\n\n        return response.json()", "docstring": "Helper function to interact with the CloudFlare API.\n\nArgs:\naccount (:obj:`CloudFlareAccount`): CloudFlare Account object\npath (`str`): URL endpoint to communicate with\nargs (:obj:`dict` of `str`: `str`): A dictionary of arguments for the endpoint to consume\n\nReturns:\n`dict`", "source": "juraj-google-style"}
{"code": "def orbit(self, orbit):\n    self._orbit = orbit\n    tle = Tle.from_orbit(orbit)\n    lines = tle.text.splitlines()\n    if (len(lines) == 3):\n        (_, line1, line2) = lines\n    else:\n        (line1, line2) = lines\n    self.tle = twoline2rv(line1, line2, wgs72)", "docstring": "Initialize the propagator\n\nArgs:\norbit (Orbit)", "source": "codesearchnet"}
{"code": "def _import_module(self, name, level):\n    key = (name, level)\n    if key not in self._imported_modules_cache:\n        self._imported_modules_cache[key] = self._do_import_module(name, level)\n    return self._imported_modules_cache[key]", "docstring": "Import the module and return the module object.\n\nArgs:\nname: Name of the module. E.g. \"sys\".\nlevel: Specifies whether to use absolute or relative imports. -1: (Python\n<= 3.1) \"Normal\" import. Try both relative and absolute.\n0: Absolute import.\n1: \"from . import abc\"\n2: \"from .. import abc\" etc.\n\nReturns:\nAn instance of abstract.Module or None if we couldn't find the module.", "source": "github-repos"}
{"code": "def __init__(self, capacity=100, initialization_list=None):\n    \n    self._capacity = capacity\n\n    \n    \n    \n    self._data = dict()\n\n    if initialization_list:\n      \n      \n      for entry in initialization_list:\n        triplet = HistoryTriplet._make(entry)\n        self._data[(triplet.device, triplet.tensor)] = NumericsAlertHistory(\n            initialization_list=triplet.jsonable_history)", "docstring": "Constructor.\n\nArgs:\ncapacity: (`int`) maximum number of device-tensor keys to store.\ninitialization_list: (`list`) An optional list (parsed from JSON) that\nis used to initialize the data within this registry. Use the\ncreate_jsonable_registry method of NumericsAlertRegistry to create such\na list.", "source": "juraj-google-style"}
{"code": "def insert(self, keys, values, name=None):\n    return self.insert_or_assign(keys, values, name)", "docstring": "Associates `keys` with `values`.\n\nArgs:\nkeys: Keys to insert. Can be a tensor of any shape. Must match the table's\nkey type.\nvalues: Values to be associated with keys. Must be a tensor of the same\nshape as `keys` and match the table's value type.\nname: A name for the operation (optional).\n\nReturns:\nThe created Operation.\n\nRaises:\nTypeError: when `keys` or `values` doesn't match the table data\ntypes.", "source": "github-repos"}
{"code": "def __init__(self,\n                 certificate_type=None,\n                 certificate_value=None):\n        \n        super(Certificate, self).__init__(Tags.CERTIFICATE)\n\n        if certificate_type is None:\n            self.certificate_type = CertificateType()\n        else:\n            self.certificate_type = CertificateType(certificate_type)\n\n        if certificate_value is None:\n            self.certificate_value = CertificateValue()\n        else:\n            self.certificate_value = CertificateValue(certificate_value)", "docstring": "Construct a Certificate object.\n\nArgs:\ncertificate_type (CertificateType): The type of the\ncertificate. Optional, defaults to None.\ncertificate_value (bytes): The bytes of the certificate. Optional,\ndefaults to None.", "source": "juraj-google-style"}
{"code": "def _ragged_stack_concat_axis_1(rt_inputs, stack_values):\n    num_inputs = len(rt_inputs)\n    nrows_checks = []\n    rt_nrows = rt_inputs[0].nrows()\n    for index, rt in enumerate(rt_inputs[1:]):\n        nrows_checks.append(check_ops.assert_equal(rt_nrows, rt.nrows(), message=f'Input tensors at index 0 (=x) and {index + 1} (=y) have incompatible shapes.'))\n    with ops.control_dependencies(nrows_checks):\n        concatenated_rt = _ragged_stack_concat_axis_0(rt_inputs, stack_values=False)\n        row_indices = math_ops.range(rt_nrows * num_inputs)\n        row_index_matrix = array_ops.reshape(row_indices, [num_inputs, -1])\n        transposed_row_index_matrix = array_ops.transpose(row_index_matrix)\n        row_permutation = array_ops.reshape(transposed_row_index_matrix, [-1])\n        permuted_rt = ragged_gather_ops.gather(concatenated_rt, row_permutation)\n        if stack_values:\n            stack_splits = math_ops.range(0, rt_nrows * num_inputs + 1, num_inputs)\n            _copy_row_shape(rt_inputs, stack_splits)\n            return ragged_tensor.RaggedTensor.from_row_splits(permuted_rt, stack_splits, validate=False)\n        else:\n            concat_splits = permuted_rt.row_splits[::num_inputs]\n            _copy_row_shape(rt_inputs, concat_splits)\n            return ragged_tensor.RaggedTensor.from_row_splits(permuted_rt.values, concat_splits, validate=False)", "docstring": "Helper function to concatenate or stack ragged tensors along axis 1.\n\nArgs:\nrt_inputs: A list of RaggedTensors, all with the same rank and ragged_rank.\nstack_values: Boolean.  If true, then stack values; otherwise, concatenate\nthem.\n\nReturns:\nA RaggedTensor.", "source": "github-repos"}
{"code": "def call(self, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, decoder_input_ids: tf.Tensor | None=None, decoder_attention_mask: tf.Tensor | None=None, decoder_position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, decoder_head_mask: tf.Tensor | None=None, cross_attn_head_mask: tf.Tensor | None=None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]]=None, past_key_values: List[tf.Tensor] | None=None, inputs_embeds: tf.Tensor | None=None, decoder_inputs_embeds: tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]:\n    if labels is not None:\n        labels = tf.where(labels == self.config.pad_token_id, tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), labels)\n        use_cache = False\n        if decoder_input_ids is None and decoder_inputs_embeds is None:\n            decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)\n    outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n    lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)\n    lm_logits = self.bias_layer(lm_logits)\n    masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)\n    if not return_dict:\n        output = (lm_logits,) + outputs[1:]\n        return (masked_lm_loss,) + output if masked_lm_loss is not None else output\n    return TFSeq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)", "docstring": "labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):\nLabels for computing the masked language modeling loss. Indices should either be in `[0, ...,\nconfig.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nReturns:", "source": "github-repos"}
{"code": "def __init__(\n        self,\n        tcex,\n        owner,\n        action=None,\n        attribute_write_type=None,\n        halt_on_error=True,\n        playbook_triggers_enabled=None,\n    ):\n        \n        self.tcex = tcex\n        self._action = action or 'Create'\n        self._attribute_write_type = attribute_write_type or 'Replace'\n        self._batch_max_chunk = 5000\n        self._halt_on_error = halt_on_error\n        self._hash_collision_mode = None\n        self._file_merge_mode = None\n        self._owner = owner\n        self._playbook_triggers_enabled = playbook_triggers_enabled\n\n        \n        self._group_shelf_fqfn = None\n        self._indicator_shelf_fqfn = None\n\n        \n        self._halt_on_batch_error = None\n        self._halt_on_file_error = None\n        self._halt_on_poll_error = None\n\n        \n        self._saved_xids = None\n        self._saved_groups = None  \n        self._saved_indicators = None  \n        self.enable_saved_file = False\n\n        \n        self._batch_data_count = None\n        self._poll_interval = None\n        self._poll_interval_times = []\n        self._poll_timeout = 3600\n\n        \n        self._files = {}\n        self._groups = None\n        self._groups_shelf = None\n        self._indicators = None\n        self._indicators_shelf = None\n\n        \n        self._gen_indicator_class()", "docstring": "Initialize Class Properties.\n\nArgs:\ntcex (obj): An instance of TcEx object.\nowner (str): The ThreatConnect owner for Batch action.\naction (str, default:Create): Action for the batch job ['Create', 'Delete'].\nattribute_write_type (str, default:Replace): Write type for Indicator attributes\n['Append', 'Replace'].\nhalt_on_error (bool, default:True): If True any batch error will halt the batch job.", "source": "juraj-google-style"}
{"code": "def centroid_distance(item_a, time_a, item_b, time_b, max_value):\n    (ax, ay) = item_a.center_of_mass(time_a)\n    (bx, by) = item_b.center_of_mass(time_b)\n    return (np.minimum(np.sqrt((((ax - bx) ** 2) + ((ay - by) ** 2))), max_value) / float(max_value))", "docstring": "Euclidean distance between the centroids of item_a and item_b.\n\nArgs:\nitem_a: STObject from the first set in ObjectMatcher\ntime_a: Time integer being evaluated\nitem_b: STObject from the second set in ObjectMatcher\ntime_b: Time integer being evaluated\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "codesearchnet"}
{"code": "def view_as_complex(x):\n    if any_symbolic_tensors((x,)):\n        return ViewAsComplex().symbolic_call(x)\n    x = backend.convert_to_tensor(x)\n    if len(x.shape) < 1 or x.shape[-1] != 2:\n        raise ValueError(f'Last dimension of input must be size 2 (real and imaginary). Received shape: {x.shape}')\n    real_part = x[..., 0]\n    imag_part = x[..., 1]\n    return backend.cast(real_part, dtype='complex64') + 1j * backend.cast(imag_part, dtype='complex64')", "docstring": "Converts a real tensor with shape `(..., 2)` to a complex tensor,\nwhere the last dimension represents the real and imaginary components\nof a complex tensor.\n\nArgs:\nx: A real tensor with last dimension of size 2.\n\nReturns:\nA complex tensor with shape `x.shape[:-1]`.\n\nExample:\n\n```\n>>> import numpy as np\n>>> from keras import ops\n\n>>> real_imag = np.array([[1.0, 2.0], [3.0, 4.0]])\n>>> complex_tensor = ops.view_as_complex(real_imag)\n>>> complex_tensor\narray([1.+2.j, 3.+4.j])\n```", "source": "github-repos"}
{"code": "def from_json(raw):\n    ncls = None\n    _type = raw.get('type')\n    try:\n        ncls = _type_map[NodeType(_type)]\n    except (KeyError, ValueError) as e:\n        logger.warning('Unknown node type: %s', _type)\n        if DEBUG:\n            raise_from(exception.ParseException(('Parse error for %s' % _type), raw), e)\n        return None\n    node = ncls()\n    node.load(raw)\n    return node", "docstring": "Helper to construct a node from a dict.\n\nArgs:\nraw (dict): Raw node representation.\n\nReturns:\nNode: A Node object or None.", "source": "codesearchnet"}
{"code": "def rpc_name(rpc_id):\n    \n\n    name = _RPC_NAME_MAP.get(rpc_id)\n    if name is None:\n        name = 'RPC 0x%04X' % rpc_id\n\n    return name", "docstring": "Map an RPC id to a string name.\n\nThis function looks the RPC up in a map of all globally declared RPCs,\nand returns a nice name string.  if the RPC is not found in the global\nname map, returns a generic name string such as 'rpc 0x%04X'.\n\nArgs:\nrpc_id (int): The id of the RPC that we wish to look up.\n\nReturns:\nstr: The nice name of the RPC.", "source": "juraj-google-style"}
{"code": "async def process_graph_input(graph, stream, value, rpc_executor):\n    \n\n    graph.sensor_log.push(stream, value)\n\n    \n    if stream.important:\n        associated_output = stream.associated_stream()\n        graph.sensor_log.push(associated_output, value)\n\n    to_check = deque([x for x in graph.roots])\n\n    while len(to_check) > 0:\n        node = to_check.popleft()\n        if node.triggered():\n            try:\n                results = node.process(rpc_executor, graph.mark_streamer)\n                for result in results:\n                    if inspect.iscoroutine(result.value):\n                        result.value = await asyncio.ensure_future(result.value)\n\n                    result.raw_time = value.raw_time\n                    graph.sensor_log.push(node.stream, result)\n            except:\n                logging.getLogger(__name__).exception(\"Unhandled exception in graph node processing function for node %s\", str(node))\n\n            \n            \n            if len(results) > 0:\n                to_check.extend(node.outputs)", "docstring": "Process an input through this sensor graph.\n\nThe tick information in value should be correct and is transfered\nto all results produced by nodes acting on this tick.  This coroutine\nis an asyncio compatible version of SensorGraph.process_input()\n\nArgs:\nstream (DataStream): The stream the input is part of\nvalue (IOTileReading): The value to process\nrpc_executor (RPCExecutor): An object capable of executing RPCs\nin case we need to do that.", "source": "juraj-google-style"}
{"code": "def getFileObjects(self):\n    files = {'project-file': self, 'mapping-table-file': self.mapTableFile, 'channel-input-file': self.channelInputFile, 'precipitation-file': self.precipFile, 'storm-pipe-network-file': self.stormPipeNetworkFile, 'hmet-file': self.hmetFile, 'nwsrfs-file': self.nwsrfsFile, 'orographic-gage-file': self.orographicGageFile, 'grid-pipe-file': self.gridPipeFile, 'grid-stream-file': self.gridStreamFile, 'time-series-file': self.timeSeriesFiles, 'projection-file': self.projectionFile, 'replace-parameters-file': self.replaceParamFile, 'replace-value-file': self.replaceValFile, 'output-location-file': self.outputLocationFiles, 'maps': self.maps, 'link-node-datasets-file': self.linkNodeDatasets}\n    return files", "docstring": "Retrieve a dictionary of file objects.\n\nThis is a utility method that can be used to programmatically access the GsshaPy file objects. Use this method\nin conjunction with the getFileKeys method to access only files that have been read into the database.\n\nReturns:\ndict: Dictionary with human readable keys and values of GsshaPy file object instances. Files that have not\nbeen read into the database will have a value of None.", "source": "codesearchnet"}
{"code": "def parse_machine_listing(text: str, convert: bool=True, strict: bool=True) -> \\\n        List[dict]:\n    \n    \n    listing = []\n\n    for line in text.splitlines(False):\n        facts = line.split(';')\n        row = {}\n        filename = None\n\n        for fact in facts:\n            name, sep, value = fact.partition('=')\n\n            if sep:\n                name = name.strip().lower()\n                value = value.strip().lower()\n\n                if convert:\n                    try:\n                        value = convert_machine_list_value(name, value)\n                    except ValueError:\n                        if strict:\n                            raise\n\n                row[name] = value\n            else:\n                if name[0:1] == ' ':\n                    \n                    filename = name[1:]\n                else:\n                    name = name.strip().lower()\n                    row[name] = ''\n\n        if filename:\n            row['name'] = filename\n            listing.append(row)\n        elif strict:\n            raise ValueError('Missing filename.')\n\n    return listing", "docstring": "Parse machine listing.\n\nArgs:\ntext: The listing.\nconvert: Convert sizes and dates.\nstrict: Method of handling errors. ``True`` will raise\n``ValueError``. ``False`` will ignore rows with errors.\n\nReturns:\nlist: A list of dict of the facts defined in RFC 3659.\nThe key names must be lowercase. The filename uses the key\n``name``.", "source": "juraj-google-style"}
{"code": "def concatenate(cls, list_of_stats):\n        \n\n        all_stats = np.stack([stats.values for stats in list_of_stats])\n        all_counts = all_stats[:, 4]\n        all_counts_relative = all_counts / np.sum(all_counts)\n\n        min_value = float(np.min(all_stats[:, 2]))\n        max_value = float(np.max(all_stats[:, 3]))\n        mean_value = float(np.sum(all_counts_relative * all_stats[:, 0]))\n        var_value = float(np.sum(all_counts_relative * (all_stats[:, 1] + np.power(all_stats[:, 0] - mean_value, 2))))\n        num_value = int(np.sum(all_counts))\n\n        return cls(mean_value, var_value, min_value, max_value, num_value)", "docstring": "Take a list of stats from different sets of data points and\nmerge the stats for getting stats overall data points.\n\nArgs:\nlist_of_stats (iterable): A list containing stats for different sets of data points.\n\nReturns:\nDataStats: Stats calculated overall sets of data points.", "source": "juraj-google-style"}
{"code": "def from_json(cls, data):\n        \n        required_keys = ('location', 'design_days')\n        for key in required_keys:\n            assert key in data, 'Required key \"{}\" is missing!'.format(key)\n\n        return cls(Location.from_json(data['location']),\n                   [DesignDay.from_json(des_day) for des_day in data['design_days']])", "docstring": "Create a DDY from a dictionary.\n\nArgs:\ndata = {\n\"location\": ladybug Location schema,\n\"design_days\": [] // list of ladybug DesignDay schemas}", "source": "juraj-google-style"}
{"code": "def postprocess_model(self, model: 'PreTrainedModel', **kwargs):\n    return self._process_model_after_weight_loading(model, **kwargs)", "docstring": "Post-process the model post weights loading.\nMake sure to override the abstract method `_process_model_after_weight_loading`.\n\nArgs:\nmodel (`~transformers.PreTrainedModel`):\nThe model to quantize\nkwargs (`dict`, *optional*):\nThe keyword arguments that are passed along `_process_model_after_weight_loading`.", "source": "github-repos"}
{"code": "def constant(times: np.ndarray, amp: complex) -> np.ndarray:\n    \n    return np.full(len(times), amp, dtype=np.complex_)", "docstring": "Continuous constant pulse.\n\nArgs:\ntimes: Times to output pulse for.\namp: Complex pulse amplitude.", "source": "juraj-google-style"}
{"code": "def modutf7_encode(data: str) -> bytes:\n    \n    ret = bytearray()\n    is_usascii = True\n    encode_start = None\n    for i, symbol in enumerate(data):\n        charpoint = ord(symbol)\n        if is_usascii:\n            if charpoint == 0x26:\n                ret.extend(b'&-')\n            elif 0x20 <= charpoint <= 0x7e:\n                ret.append(charpoint)\n            else:\n                encode_start = i\n                is_usascii = False\n        else:\n            if 0x20 <= charpoint <= 0x7e:\n                to_encode = data[encode_start:i]\n                encoded = _modified_b64encode(to_encode)\n                ret.append(0x26)\n                ret.extend(encoded)\n                ret.extend((0x2d, charpoint))\n                is_usascii = True\n    if not is_usascii:\n        to_encode = data[encode_start:]\n        encoded = _modified_b64encode(to_encode)\n        ret.append(0x26)\n        ret.extend(encoded)\n        ret.append(0x2d)\n    return bytes(ret)", "docstring": "Encode the string using modified UTF-7.\n\nArgs:\ndata: The input string to encode.", "source": "juraj-google-style"}
{"code": "def EnumerateConfig(self, service, path, cache, filter_type=None):\n    result = []\n    external = []\n    path = self._FixPath(path)\n    if (path not in cache):\n        external.append('%s -> %s', self.OLD_PAMCONF_FILENAME, path)\n        return (result, external)\n    for tokens in self.ParseEntries(cache[path]):\n        if (path == self.OLD_PAMCONF_FILENAME):\n            try:\n                service = tokens[0]\n                tokens = tokens[1:]\n            except IndexError:\n                continue\n        new_path = None\n        filter_request = None\n        try:\n            if (tokens[0] == '@include'):\n                new_path = tokens[1]\n            elif (tokens[1] in ['include', 'substack']):\n                new_path = tokens[2]\n                filter_request = tokens[0]\n        except IndexError:\n            pass\n        if new_path:\n            new_path = self._FixPath(new_path)\n            if (new_path not in cache):\n                external.append(('%s -> %s' % (path, new_path)))\n                continue\n            (r, e) = self.EnumerateConfig(service, new_path, cache, filter_request)\n            result.extend(r)\n            external.extend(e)\n        else:\n            if (filter_type and (tokens[0] != filter_type)):\n                continue\n            match = self.PAMCONF_RE.match(' '.join(tokens))\n            if match:\n                (p_type, control, module_path, module_args) = match.group(1, 2, 3, 4)\n                if p_type.startswith('-'):\n                    p_type = p_type[1:]\n                result.append(rdf_config_file.PamConfigEntry(service=service, type=p_type, control=control, module_path=module_path, module_args=module_args))\n    return (result, external)", "docstring": "Return PamConfigEntries it finds as it recursively follows PAM configs.\n\nArgs:\nservice: A string containing the service name we are processing.\npath: A string containing the file path name we want.\ncache: A dictionary keyed on path, with the file contents (list of str).\nfilter_type: A string containing type name of the results we want.\n\nReturns:\nA tuple of a list of RDFValue PamConfigEntries found & a list of strings\nwhich are the external config references found.", "source": "codesearchnet"}
{"code": "def visit_boolean_op(self, boolean_logic: _evaluation.BooleanOperatorNode) -> _sql_data_types.Select:\n    lhs_result = self.visit(boolean_logic.left)\n    rhs_result = self.visit(boolean_logic.right)\n    if lhs_result.sql_data_type != _sql_data_types.Boolean:\n        lhs_result = lhs_result.is_not_null()\n    if rhs_result.sql_data_type != _sql_data_types.Boolean:\n        rhs_result = rhs_result.is_not_null()\n    lhs_subquery = lhs_result.as_operand()\n    rhs_subquery = rhs_result.as_operand()\n    if boolean_logic.op == _ast.BooleanLogic.Op.IMPLIES:\n        sql_value = f'(NOT {lhs_subquery} OR {rhs_subquery})'\n    elif boolean_logic.op == _ast.BooleanLogic.Op.XOR:\n        sql_value = f'({lhs_subquery} <> {rhs_subquery})'\n    else:\n        sql_value = f'({lhs_subquery} {boolean_logic.op.upper()} {rhs_subquery})'\n    sql_alias = 'logic_'\n    return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_value, _sql_data_type=_sql_data_types.Boolean, _sql_alias=sql_alias), from_part=None)", "docstring": "Translates a FHIRPath Boolean logic operation to Standard SQL.\n\nNote that evaluation for Boolean logic is only supported for Boolean\noperands of scalar cardinality.\n\nArgs:\nboolean_logic: The FHIRPath AST `BooleanLogic` node.\n\nReturns:\nA compiled Standard SQL expression.", "source": "github-repos"}
{"code": "def generate_encodeable_characters(characters: Iterable[str], encodings: Iterable[str]) -> Iterable[str]:\n    for c in characters:\n        for encoding in encodings:\n            try:\n                c.encode(encoding)\n                (yield c)\n            except UnicodeEncodeError:\n                pass", "docstring": "Generates the subset of 'characters' that can be encoded by 'encodings'.\n\nArgs:\ncharacters: The characters to check for encodeability e.g. 'abcd'.\nencodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5'].\n\nReturns:\nThe subset of 'characters' that can be encoded using one of the provided\nencodings.", "source": "codesearchnet"}
{"code": "def from_json(cls, json):\n    \n    result = super(_ReducerReader, cls).from_json(json)\n    result.current_key = _ReducerReader.decode_data(json[\"current_key\"])\n    result.current_values = _ReducerReader.decode_data(json[\"current_values\"])\n    return result", "docstring": "Creates an instance of the InputReader for the given input shard state.\n\nArgs:\njson: The InputReader state as a dict-like object.\n\nReturns:\nAn instance of the InputReader configured using the values of json.", "source": "juraj-google-style"}
{"code": "def _from_keras_log_format(data, **kwargs):\n    \n    data_val = pd.DataFrame(data[['epoch']])\n\n    data_val['acc'] = data['val_acc']\n    data_val['loss'] = data['val_loss']\n    data_val['data'] = 'validation'\n\n    data_training = pd.DataFrame(data[['acc', 'loss', 'epoch']])\n    data_training['data'] = 'training'\n\n    result = pd.concat([data_training, data_val], sort=False)\n    plot(result, **kwargs)", "docstring": "Plot accuracy and loss from a panda's dataframe.\n\nArgs:\ndata: Panda dataframe in the format of the Keras CSV log.\noutput_dir_path: The path to the directory where the resultings plots\nshould end up.", "source": "juraj-google-style"}
{"code": "def get_processid(config):\n    pidfile = config.get('daemon', 'pidfile', fallback=None)\n    if (pidfile is None):\n        raise ValueError(\"Configuration doesn't have pidfile option!\")\n    try:\n        with open(pidfile, 'r') as _file:\n            pid = _file.read().rstrip()\n            try:\n                pid = int(pid)\n            except ValueError:\n                raise ValueError('stale pid file with invalid data:{}'.format(pid))\n            else:\n                if (pid in [(- 1), 1]):\n                    raise ValueError('invalid PID ({})'.format(pid))\n                else:\n                    return pid\n    except OSError as exc:\n        if (exc.errno == 2):\n            print(\"CRITICAL: anycast-healthchecker could be down as pid file {} doesn't exist\".format(pidfile))\n            sys.exit(2)\n        else:\n            raise ValueError('error while reading pid file:{}'.format(exc))", "docstring": "Return process id of anycast-healthchecker.\n\nArguments:\nconfig (obj): A configparser object with the configuration of\nanycast-healthchecker.\n\nReturns:\nThe process id found in the pid file\n\nRaises:\nValueError in the following cases\n- pidfile option is missing from the configuration\n- pid is either -1 or 1\n- stale pidfile, either with no data or invalid data\n- failure to read pidfile", "source": "codesearchnet"}
{"code": "def gpio_properties(self):\n        \n        res = self._dll.JLINK_EMU_GPIO_GetProps(0, 0)\n        if res < 0:\n            raise errors.JLinkException(res)\n\n        num_props = res\n        buf = (structs.JLinkGPIODescriptor * num_props)()\n        res = self._dll.JLINK_EMU_GPIO_GetProps(ctypes.byref(buf), num_props)\n        if res < 0:\n            raise errors.JLinkException(res)\n\n        return list(buf)", "docstring": "Returns the properties of the user-controllable GPIOs.\n\nProvided the device supports user-controllable GPIOs, they will be\nreturned by this method.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nA list of ``JLinkGPIODescriptor`` instances totalling the number of\nrequested properties.\n\nRaises:\nJLinkException: on error.", "source": "juraj-google-style"}
{"code": "def AddLabel(self, label):\n    \n    if not isinstance(label, py2to3.STRING_TYPES):\n      raise TypeError('label is not a string type. Is {0:s}'.format(\n          type(label)))\n    if not self._VALID_LABEL_REGEX.match(label):\n      raise ValueError((\n          'Unsupported label: \"{0:s}\". A label must only consist of '\n          'alphanumeric characters or underscores.').format(label))\n\n    if label not in self.labels:\n      self.labels.append(label)", "docstring": "Adds a label to the event tag.\n\nArgs:\nlabel (str): label.\n\nRaises:\nTypeError: if the label provided is not a string.\nValueError: if a label is malformed.", "source": "juraj-google-style"}
{"code": "def __init__(self, app, *, options=None):\n        \n        self.options = options or {}\n        self.application = app\n        super().__init__()", "docstring": "Initialize a new standalone application.\n\nArgs:\napp: A wsgi Python application.\noptions (dict): the configuration.", "source": "juraj-google-style"}
{"code": "class MetricContainer:\n    values: List[Union[int, float]]\n    timestamps: List[pd.Timestamp]\n\n    def sort_by_timestamp(self, in_place=True):\n        \n        timestamps, values = zip(*sorted(zip(self.timestamps, self.values)))\n        if not in_place:\n            return MetricContainer(values=values, timestamps=timestamps)\n        self.timestamps, self.values = zip(*sorted(zip(self.timestamps, self.values)))", "docstring": "This class holds the metric values and timestamps for a given metric.\nArgs:\nmetric_values: List of metric values.\ntimestamps: List of pandas timestamps corresponding to the metric values.", "source": "github-repos"}
{"code": "def __eq__(self, other) -> bool:\n        \n        if self.interval == other.interval and self.channel == other.channel:\n            return True\n        return False", "docstring": "Two time-slots are the same if they have the same interval and channel.\n\nArgs:\nother (Timeslot): other Timeslot", "source": "juraj-google-style"}
{"code": "async def get_in_tree_template(link):\n    \n    context = link.context\n    source_url = get_source_url(link)\n    if not source_url.endswith(('.yml', '.yaml')):\n        raise CoTError(\"{} source url {} doesn't end in .yml or .yaml!\".format(\n            link.name, source_url\n        ))\n    tmpl = await load_json_or_yaml_from_url(\n        context, source_url, os.path.join(\n            context.config[\"work_dir\"], \"{}_taskcluster.yml\".format(link.name)\n        )\n    )\n    return tmpl", "docstring": "Get the in-tree json-e template for a given link.\n\nBy convention, this template is SOURCE_REPO/.taskcluster.yml.\n\nArgs:\nlink (LinkOfTrust): the parent link to get the source url from.\n\nRaises:\nCoTError: on non-yaml `source_url`\nKeyError: on non-well-formed source template\n\nReturns:\ndict: the first task in the template.", "source": "juraj-google-style"}
{"code": "def __driver_stub(self, text, state):\n    origline = readline.get_line_buffer()\n    line = origline.lstrip()\n    if (line and (line[(- 1)] == '?')):\n        self.__driver_helper(line)\n    else:\n        toks = shlex.split(line)\n        return self.__driver_completer(toks, text, state)", "docstring": "Display help messages or invoke the proper completer.\n\nThe interface of helper methods and completer methods are documented in\nthe helper() decorator method and the completer() decorator method,\nrespectively.\n\nArguments:\ntext: A string, that is the current completion scope.\nstate: An integer.\n\nReturns:\nA string used to replace the given text, if any.\nNone if no completion candidates are found.\n\nRaises:\nThis method is called via the readline callback. If this method\nraises an error, it is silently ignored by the readline library.\nThis behavior makes debugging very difficult. For this reason,\nnon-driver methods are run within try-except blocks. When an error\noccurs, the stack trace is printed to self.stderr.", "source": "codesearchnet"}
{"code": "def __getitem__(self, index):\n    raise NotImplementedError", "docstring": "Gets batch at position `index`.\n\nArgs:\nindex: position of the batch in the Sequence.\n\nReturns:\nA batch", "source": "github-repos"}
{"code": "def emit(self, signal, message, analysis_id):\n        \n\n        log.debug('kernel {} zmq send ({}): {}'\n                  ''.format(analysis_id, signal, message))\n        self.zmq_publish.send(json.dumps({\n            'analysis_id': analysis_id,\n            'frame': {'signal': signal, 'load': message},\n        }, default=json_encoder_default).encode('utf-8'))", "docstring": "Emit signal to main.\n\nArgs:\nsignal: Name of the signal to be emitted.\nmessage: Message to be sent.\nanalysis_id: Identifies the instance of this analysis.", "source": "juraj-google-style"}
{"code": "def run_task_external(self, coroutine):\n        \n\n        self.verify_calling_thread(False, 'run_task_external must not be called from the emulation thread')\n\n        future = asyncio.run_coroutine_threadsafe(coroutine, self._loop)\n        return future.result()", "docstring": "Inject a task into the emulation loop and wait for it to finish.\n\nThe coroutine parameter is run as a Task inside the EmulationLoop\nuntil it completes and the return value (or any raised Exception) is\npased back into the caller's thread.\n\nArgs:\ncoroutine (coroutine): The task to inject into the event loop.\n\nReturns:\nobject: Whatever the coroutine returned.", "source": "juraj-google-style"}
{"code": "def create_tree(profile, tree):\n    \n    resource = \"/trees\"\n    payload = {\"tree\": tree}\n    data = api.post_request(profile, resource, payload)\n    return prepare(data)", "docstring": "Create a new tree.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\ntree\nA list of blob objects (each with a path, mode, type, and\ncontent or sha) to put in the tree.\n\nReturns:\nA dict with data about the tree.", "source": "juraj-google-style"}
{"code": "def _update_exponential_bucket_count(a_float, dist):\n    buckets = dist.exponentialBuckets\n    if (buckets is None):\n        raise ValueError((_BAD_UNSET_BUCKETS % u'exponential buckets'))\n    bucket_counts = dist.bucketCounts\n    num_finite_buckets = buckets.numFiniteBuckets\n    if (len(bucket_counts) < (num_finite_buckets + 2)):\n        raise ValueError(_BAD_LOW_BUCKET_COUNT)\n    scale = buckets.scale\n    factor = buckets.growthFactor\n    if (a_float <= scale):\n        index = 0\n    else:\n        index = (1 + int((math.log((a_float / scale)) / math.log(factor))))\n        index = min(index, (num_finite_buckets + 1))\n    bucket_counts[index] += 1\n    _logger.debug(u'scale:%f, factor:%f, sample:%f, index:%d', scale, factor, a_float, index)", "docstring": "Adds `a_float` to `dist`, updating its exponential buckets.\n\nArgs:\na_float (float): a new value\ndist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):\nthe Distribution being updated\n\nRaises:\nValueError: if `dist` does not already have exponential buckets defined\nValueError: if there are not enough bucket count fields in `dist`", "source": "codesearchnet"}
{"code": "def DoesNotContain(self, value):\n    self._awql = self._CreateSingleValueCondition(value, 'DOES_NOT_CONTAIN')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"does not contain\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "codesearchnet"}
{"code": "def get(self):\n    if (self.ttl[self.idx] <= 0):\n        self.buffers[self.idx] = self.inqueue.get(timeout=300.0)\n        self.ttl[self.idx] = self.cur_max_ttl\n        if (self.cur_max_ttl < self.max_ttl):\n            self.cur_max_ttl += 1\n    buf = self.buffers[self.idx]\n    self.ttl[self.idx] -= 1\n    released = (self.ttl[self.idx] <= 0)\n    if released:\n        self.buffers[self.idx] = None\n    self.idx = ((self.idx + 1) % len(self.buffers))\n    return (buf, released)", "docstring": "Get a new batch from the internal ring buffer.\n\nReturns:\nbuf: Data item saved from inqueue.\nreleased: True if the item is now removed from the ring buffer.", "source": "codesearchnet"}
{"code": "def get(self, key, mem_map=True):\n        \n        self.raise_error_if_not_open()\n\n        if key in self._file:\n            data = self._file[key]\n            sampling_rate = data.attrs[SAMPLING_RATE_ATTR]\n\n            if not mem_map:\n                data = data[()]\n\n            data = np.float32(data) / MAX_INT16_VALUE\n\n            return data, sampling_rate", "docstring": "Return the samples for the given key and the sampling-rate.\n\nArgs:\nkey (str): The key to read the data from.\nmem_map (bool): If ``True`` returns the data as\nmemory-mapped array, otherwise a copy is returned.\n\nNote:\nThe container has to be opened in advance.\n\nReturns:\ntuple: A tuple containing the samples as numpy array\nwith ``np.float32`` [-1.0,1.0] and the sampling-rate.", "source": "juraj-google-style"}
{"code": "def serialize_to_string(self):\n    return print_mdl.SerializeToString()", "docstring": "Serialize the ProfileProto to a binary string.\n\nUsers can write it to file for offline analysis by tfprof commandline\nor graphical interface.\n\nReturns:\nProfileProto binary string.", "source": "github-repos"}
{"code": "def _get_init_rng(self):\n    return self.seed_generator.next()", "docstring": "Returns a JAX `PRNGKey` or structure of `PRNGKey`s to pass to `init_fn`.\n\nBy default, this returns a single `PRNGKey` retrieved by calling\n`self.seed_generator.next()`. Override this to return a different\nstructure.\n\nReturns:\na JAX `PRNGKey` or structure of `PRNGKey`s that will be passed as\nthe `rng` argument of `init_fn`.", "source": "github-repos"}
{"code": "def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    encoder_hidden_states = encoder_outputs[0]\n    if encoder_attention_mask is None:\n        batch_size, sequence_length = encoder_hidden_states.shape[:2]\n        encoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    batch_size, sequence_length = decoder_input_ids.shape\n    if decoder_attention_mask is None:\n        decoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n    inputs = {'params': params or self.params}\n    if past_key_values:\n        inputs['cache'] = past_key_values\n        mutable = ['cache']\n    else:\n        mutable = False\n\n    def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs):\n        decoder_module = module._get_decoder_module()\n        decoder_outputs = decoder_module(decoder_input_ids, decoder_attention_mask, **kwargs)\n        sequence_output = decoder_outputs[0]\n        if self.config.tie_word_embeddings:\n            sequence_output = sequence_output * self.config.d_model ** (-0.5)\n        if self.config.tie_word_embeddings:\n            shared_embedding = module.shared.variables['params']['embedding']\n            lm_logits = module.lm_head.apply({'params': {'kernel': shared_embedding.T}}, sequence_output)\n        else:\n            lm_logits = module.lm_head(sequence_output)\n        return (lm_logits, decoder_outputs)\n    outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)\n    if past_key_values is None:\n        lm_logits, decoder_outputs = outputs\n    else:\n        (lm_logits, decoder_outputs), past = outputs\n    if return_dict:\n        outputs = FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions)\n    else:\n        outputs = (lm_logits,) + decoder_outputs[1:]\n    if past_key_values is not None and return_dict:\n        outputs['past_key_values'] = unfreeze(past['cache'])\n        return outputs\n    elif past_key_values is not None and (not return_dict):\n        outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration\n>>> import jax.numpy as jnp\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google-t5/t5-base\")\n>>> model = FlaxLongT5ForConditionalGeneration.from_pretrained(\"google/long-t5-local-base\")\n\n>>> text = \"summarize: My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, return_tensors=\"np\")\n>>> encoder_outputs = model.encode(**inputs)\n\n>>> decoder_start_token_id = model.config.decoder_start_token_id\n>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype=\"i4\") * decoder_start_token_id\n\n>>> outputs = model.decode(decoder_input_ids, encoder_outputs)\n>>> logits = outputs.logits\n```", "source": "github-repos"}
{"code": "def set_time(self, value: float):\n        \n        if value < 0:\n            value = 0\n\n        self.offset += self.get_time() - value", "docstring": "Set the current time. This can be used to jump in the timeline.\n\nArgs:\nvalue (float): The new time", "source": "juraj-google-style"}
{"code": "def _extract_blocks(x, block_h, block_w):\n  \n  (_, height, width, depth) = common_layers.shape_list(x)\n  assert height % block_h == 0\n  assert width % block_w == 0\n  x = tf.reshape(x, [-1, height\n                     width\n  return tf.transpose(x, [0, 1, 3, 2, 4, 5])", "docstring": "Helper function for local 2d attention.\n\nArgs:\nx: a [batch, height, width, depth] tensor\nblock_h: An integer. block height\nblock_w: An inteter. block width\n\nreturns:\na [batch, num_heads, height/block_h, width/block_w, depth] tensor", "source": "juraj-google-style"}
{"code": "def read(self, offset, length):\n        \n        if not isinstance(offset, (int, long)):\n            raise TypeError(\"Invalid offset type, should be integer.\")\n\n        offset = self._adjust_offset(offset)\n        self._validate_offset(offset, length)\n        return bytes(self.mapping[offset:offset + length])", "docstring": "Read a string of bytes from the specified `offset` in bytes,\nrelative to the base physical address of the MMIO region.\n\nArgs:\noffset (int, long): offset from base physical address, in bytes.\nlength (int): number of bytes to read.\n\nReturns:\nbytes: bytes read.\n\nRaises:\nTypeError: if `offset` type is invalid.\nValueError: if `offset` is out of bounds.", "source": "juraj-google-style"}
{"code": "def __init__(self, tcex):\n        \n\n        self.tcex = tcex\n        self._config_data = {}\n        self._default_args = None\n        self._default_args_resolved = None\n        self._parsed = False\n        self._parsed_resolved = False\n        self.parser = TcExArgParser()", "docstring": "Initialize Class Properties.\n\nArgs:\ntcex (tcex.TcEx): Instance of TcEx class.", "source": "juraj-google-style"}
{"code": "def config(self):\n    if (self._full_config is None):\n        self._full_config = DotDict()\n        self._full_config.merge(self._default)\n        self._full_config.merge(self._config)\n        self._full_config.merge(self._environment)\n        self._full_config.merge(self._override)\n    return self._full_config", "docstring": "Get the complete configuration where the default, config,\nenvironment, and override values are merged together.\n\nReturns:\n(DotDict): A dictionary of configuration values that\nallows lookups using dot notation.", "source": "codesearchnet"}
{"code": "def _make_sql_compatible(ll):\n    \n\n    new_ll = []\n    for l in ll:\n        new_l = ()\n        for i in l:\n            if not i:\n                new_l = new_l + (None,)\n            else:\n\n                if isinstance(i, str):\n                    if sys.version_info < (3, 0):\n\n                        val = i.decode('utf8').encode('ascii', errors='ignore')\n                    else:\n                        \n                        val = i\n                else:\n                    val = i\n                new_l = new_l + (val,)\n        new_ll.append(new_l)\n\n    return new_ll", "docstring": "Convert any python list of lists (or tuples) so that the strings are formatted correctly for insertion into\n\nArgs:\nll (list): List of lists (or tuples)", "source": "juraj-google-style"}
{"code": "def _gql(query_string, query_class=Query):\n  \n  from .google_imports import gql  \n  gql_qry = gql.GQL(query_string)\n  kind = gql_qry.kind()\n  if kind is None:\n    \n    \n    \n    modelclass = model.Expando\n  else:\n    modelclass = model.Model._lookup_model(\n        kind,\n        tasklets.get_context()._conn.adapter.default_model)\n    \n    kind = modelclass._get_kind()\n  ancestor = None\n  flt = gql_qry.filters()\n  filters = list(modelclass._default_filters())\n  for name_op in sorted(flt):\n    name, op = name_op\n    values = flt[name_op]\n    op = op.lower()\n    if op == 'is' and name == gql.GQL._GQL__ANCESTOR:\n      if len(values) != 1:\n        raise ValueError('\"is\" requires exactly one value')\n      [(func, args)] = values\n      ancestor = _args_to_val(func, args)\n      continue\n    if op not in _OPS:\n      raise NotImplementedError('Operation %r is not supported.' % op)\n    for (func, args) in values:\n      val = _args_to_val(func, args)\n      prop = _get_prop_from_modelclass(modelclass, name)\n      if prop._name != name:\n        raise RuntimeError('Whoa! _get_prop_from_modelclass(%s, %r) '\n                           'returned a property whose name is %r?!' %\n                           (modelclass.__name__, name, prop._name))\n      if isinstance(val, ParameterizedThing):\n        node = ParameterNode(prop, op, val)\n      elif op == 'in':\n        node = prop._IN(val)\n      else:\n        node = prop._comparison(op, val)\n      filters.append(node)\n  if filters:\n    filters = ConjunctionNode(*filters)\n  else:\n    filters = None\n  orders = _orderings_to_orders(gql_qry.orderings(), modelclass)\n  offset = gql_qry.offset()\n  limit = gql_qry.limit()\n  if limit < 0:\n    limit = None\n  keys_only = gql_qry._keys_only\n  if not keys_only:\n    keys_only = None\n  options = QueryOptions(offset=offset, limit=limit, keys_only=keys_only)\n  projection = gql_qry.projection()\n  if gql_qry.is_distinct():\n    group_by = projection\n  else:\n    group_by = None\n  qry = query_class(kind=kind,\n                    ancestor=ancestor,\n                    filters=filters,\n                    orders=orders,\n                    default_options=options,\n                    projection=projection,\n                    group_by=group_by)\n  return qry", "docstring": "Parse a GQL query string (internal version).\n\nArgs:\nquery_string: Full GQL query, e.g. 'SELECT * FROM Kind WHERE prop = 1'.\nquery_class: Optional class to use, default Query.\n\nReturns:\nAn instance of query_class.", "source": "juraj-google-style"}
{"code": "def exp(cls, x: 'TensorFluent') -> 'TensorFluent':\n        \n        return cls._unary_op(x, tf.exp, tf.float32)", "docstring": "Returns a TensorFluent for the exp function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the exp function.", "source": "juraj-google-style"}
{"code": "def read(self, uri):\n        \n        read_response = self.connect(uri)\n        fedora_graph = rdflib.Graph().parse(\n            data=read_response.read(),\n            format='turtle')\n        return fedora_graph", "docstring": "Method takes uri and creates a RDF graph from Fedora Repository\n\nArgs:\nuri(str): URI of Fedora URI\n\nReturns:\nrdflib.Graph", "source": "juraj-google-style"}
{"code": "def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n    pass", "docstring": "Returns a `Tensor`.\n\nThe output of this function will be used by model-builder-functions. For\nexample the pseudo code of `input_layer` will be like:\n\n```python\ndef input_layer(features, feature_columns, ...):\noutputs = [fc._get_dense_tensor(...) for fc in feature_columns]\nreturn tf.concat(outputs)\n```\n\nArgs:\ninputs: A `_LazyBuilder` object to access inputs.\nweight_collections: List of graph collections to which Variables (if any\nwill be created) are added.\ntrainable: If `True` also add variables to the graph collection\n`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n\nReturns:\n`Tensor` of shape [batch_size] + `_variable_shape`.", "source": "github-repos"}
{"code": "def EstimateTimeRemaining(self):\n    number_of_hashes = self.hash_queue.qsize()\n    hashes_per_batch = self._analyzer.hashes_per_batch\n    wait_time_per_batch = self._analyzer.wait_after_analysis\n    analyses_performed = self._analyzer.analyses_performed\n    if (analyses_performed == 0):\n        average_analysis_time = self._analyzer.seconds_spent_analyzing\n    else:\n        (average_analysis_time, _) = divmod(self._analyzer.seconds_spent_analyzing, analyses_performed)\n    (batches_remaining, _) = divmod(number_of_hashes, hashes_per_batch)\n    estimated_seconds_per_batch = (average_analysis_time + wait_time_per_batch)\n    return (batches_remaining * estimated_seconds_per_batch)", "docstring": "Estimates how long until all hashes have been analyzed.\n\nReturns:\nint: estimated number of seconds until all hashes have been analyzed.", "source": "codesearchnet"}
{"code": "def is_collection_aligned(self, data_collection):\n        \n        if self._collection_type != data_collection._collection_type:\n            return False\n        elif len(self.values) != len(data_collection.values):\n            return False\n        elif self.header.analysis_period != data_collection.header.analysis_period:\n                return False\n        return True", "docstring": "Check if this Data Collection is aligned with another.\n\nAligned Data Collections are of the same Data Collection class,\nhave the same number of values and have matching datetimes.\n\nArgs:\ndata_collection: The Data Collection which you want to test if this\ncollection is aligned with.\n\nReturn:\nTrue if collections are aligned, Fale if not aligned", "source": "juraj-google-style"}
{"code": "def cache_path(self):\n    cache_path = os.path.join(os.path.dirname(__file__), '..', 'cache')\n    if (not os.path.exists(cache_path)):\n        os.mkdir(cache_path)\n    return cache_path", "docstring": "make a directory to store all caches\n\nReturns:\n---------\ncache path", "source": "codesearchnet"}
{"code": "def WriteUInt256(self, value):\n        \n        if type(value) is UInt256:\n            value.Serialize(self)\n        else:\n            raise Exception(\"Cannot write value that is not UInt256\")", "docstring": "Write a UInt256 type to the stream.\n\nArgs:\nvalue (UInt256):\n\nRaises:\nException: when `value` is not of neocore.UInt256 type.", "source": "juraj-google-style"}
{"code": "def trim_wav_ms(in_path: Path, out_path: Path,\n                start_time: int, end_time: int) -> None:\n    \n\n    try:\n        trim_wav_sox(in_path, out_path, start_time, end_time)\n    except FileNotFoundError:\n        \n        trim_wav_pydub(in_path, out_path, start_time, end_time)\n    except subprocess.CalledProcessError:\n        \n        \n        \n        trim_wav_pydub(in_path, out_path, start_time, end_time)", "docstring": "Extracts part of a WAV File.\n\nFirst attempts to call sox. If sox is unavailable, it backs off to\npydub+ffmpeg.\n\nArgs:\nin_path: A path to the source file to extract a portion of\nout_path: A path describing the to-be-created WAV file.\nstart_time: The point in the source WAV file at which to begin\nextraction.\nend_time: The point in the source WAV file at which to end extraction.", "source": "juraj-google-style"}
{"code": "def read_uint8(self, little_endian=True):\n        \n        if little_endian:\n            endian = \"<\"\n        else:\n            endian = \">\"\n        return self.unpack('%sB' % endian)", "docstring": "Read 1 byte as an unsigned integer value from the stream.\n\nArgs:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint:", "source": "juraj-google-style"}
{"code": "def call(self, method_name: str, args: Optional[Sequence[core_tf_types.Tensor]]=None, output_specs=None, timeout_in_ms=0):\n    if args is None:\n        args = []\n    status_or, deleter = gen_rpc_ops.rpc_call(self._client_handle, args=nest.flatten(args), method_name=method_name, timeout_in_ms=timeout_in_ms)\n    return StatusOrResult(status_or, deleter, output_specs)", "docstring": "Method to invoke remote registered functions on the connected server.\n\nServer should be started before making an RPC Call.\n\nArgs:\nmethod_name: Registered method to invoke on Server.\nargs: Input arguments for the method.\noutput_specs: Output specs for the output from method.\ntimeout_in_ms: Timeout for this call. If 0, default client timeout will be\nused.\n\nReturns:\nStatusOrResult object. This function issues the RPC call to server, it\ndoes not block for the duration of RPC. Please call is_ok, get_error or\nget_value methods on the returned object to blocked till RPC finishes.", "source": "github-repos"}
{"code": "def scalar(self, tag, value, step=None):\n    value = float(onp.array(value))\n    if (step is None):\n        step = self._step\n    else:\n        self._step = step\n    summary = Summary(value=[Summary.Value(tag=tag, simple_value=value)])\n    self.add_summary(summary, step)", "docstring": "Saves scalar value.\n\nArgs:\ntag: str: label for this data\nvalue: int/float: number to log\nstep: int: training step", "source": "codesearchnet"}
{"code": "def Run(self, conf, args):\n    if not args:\n        help_text = self.Help()\n    else:\n        help_command = args.pop()\n        print('Usage: nsscache [global options] %s [options]' % help_command)\n        print()\n        try:\n            callable_action = getattr(inspect.getmodule(self), help_command.capitalize())\n            help_text = callable_action().Help()\n        except AttributeError:\n            print('command %r is not implemented' % help_command)\n            return 1\n    print(help_text)\n    return 0", "docstring": "Run the Help command.\n\nSee Command.Run() for full documentation on the Run() method.\n\nArgs:\nconf: nss_cache.config.Config object\nargs: list of arguments to be parsed by this command.\n\nReturns:\nzero, and prints the help text as a side effectg", "source": "github-repos"}
{"code": "def shift(self, time: int) -> 'TimeslotCollection':\n        \n        slots = [Timeslot(slot.interval.shift(time), slot.channel) for slot in self.timeslots]\n        return TimeslotCollection(*slots)", "docstring": "Return a new TimeslotCollection shifted by `time`.\n\nArgs:\ntime: time to be shifted by", "source": "juraj-google-style"}
{"code": "def count(self, files=False):\n        \n        return len(self.files) if files else len(self.unique())", "docstring": "Returns a count of unique values or files.\n\nArgs:\nfiles (bool): When True, counts all files mapped to the Entity.\nWhen False, counts all unique values.\nReturns: an int.", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    evtx_file = pyevtx.file()\n    evtx_file.set_ascii_codepage(parser_mediator.codepage)\n\n    try:\n      evtx_file.open_file_object(file_object)\n    except IOError as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to open file with error: {0!s}'.format(exception))\n      return\n\n    try:\n      self._ParseRecords(parser_mediator, evtx_file)\n    finally:\n      evtx_file.close()", "docstring": "Parses a Windows XML EventLog (EVTX) file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.", "source": "juraj-google-style"}
{"code": "def set_user(self, user):\n    self.session['user_id'] = user.key\n    self.session['user_data'] = user.clean_value()\n    role = self.get_role()\n    self.session['role_id'] = role.key\n    self.current.role_id = role.key\n    self.current.user_id = user.key\n    self.session['permissions'] = role.get_permissions()", "docstring": "Writes user data to session.\n\nArgs:\nuser: User object", "source": "codesearchnet"}
{"code": "def from_path(cls, path, suffix=\"\"):\n        \n\n        def _get_filepath(filename):\n            name_pattern = filename + suffix + '*' if filename != 'POTCAR' \\\n                else filename + '*'\n            paths = glob.glob(os.path.join(path, name_pattern))\n            fpath = None\n            if len(paths) >= 1:\n                \n                \n                \n                \n                paths.sort(reverse=True)\n                warning_msg = \"Multiple files detected, using %s\" \\\n                              % os.path.basename(paths[0]) if len(paths) > 1 \\\n                    else None\n                fpath = paths[0]\n            else:\n                warning_msg = \"Could not find %s\" % filename\n                if filename in ['AECCAR0', 'AECCAR2']:\n                    warning_msg += \", cannot calculate charge transfer.\"\n                elif filename == \"POTCAR\":\n                    warning_msg += \", interpret Bader results with caution.\"\n            if warning_msg:\n                warnings.warn(warning_msg)\n            return fpath\n\n        chgcar_filename = _get_filepath(\"CHGCAR\")\n        if chgcar_filename is None:\n            raise IOError(\"Could not find CHGCAR!\")\n        potcar_filename = _get_filepath(\"POTCAR\")\n        aeccar0 = _get_filepath(\"AECCAR0\")\n        aeccar2 = _get_filepath(\"AECCAR2\")\n        if (aeccar0 and aeccar2):\n            \n            chgref = Chgcar.from_file(aeccar0) + Chgcar.from_file(aeccar2)\n            chgref_filename = \"CHGREF\"\n            chgref.write_file(chgref_filename)\n        else:\n            chgref_filename = None\n        return cls(chgcar_filename, potcar_filename=potcar_filename,\n                   chgref_filename=chgref_filename)", "docstring": "Convenient constructor that takes in the path name of VASP run\nto perform Bader analysis.\n\nArgs:\npath (str): Name of directory where VASP output files are\nstored.\nsuffix (str): specific suffix to look for (e.g. '.relax1'\nfor 'CHGCAR.relax1.gz').", "source": "juraj-google-style"}
{"code": "def buffer(self, data=None, *, reserve=0, dynamic=False) -> Buffer:\n        \n\n        if type(reserve) is str:\n            reserve = mgl.strsize(reserve)\n\n        res = Buffer.__new__(Buffer)\n        res.mglo, res._size, res._glo = self.mglo.buffer(data, reserve, dynamic)\n        res._dynamic = dynamic\n        res.ctx = self\n        res.extra = None\n        return res", "docstring": "Create a :py:class:`Buffer` object.\n\nArgs:\ndata (bytes): Content of the new buffer.\n\nKeyword Args:\nreserve (int): The number of bytes to reserve.\ndynamic (bool): Treat buffer as dynamic.\n\nReturns:\n:py:class:`Buffer` object", "source": "juraj-google-style"}
{"code": "def format_page(self, page, link_resolver, output):\n    debug(('Formatting page %s' % page.link.ref), 'formatting')\n    if output:\n        actual_output = os.path.join(output, 'html')\n        if (not os.path.exists(actual_output)):\n            os.makedirs(actual_output)\n    else:\n        actual_output = None\n    page.format(self.formatter, link_resolver, actual_output)", "docstring": "Called by `project.Project.format_page`, to leave full control\nto extensions over the formatting of the pages they are\nresponsible of.\n\nArgs:\npage: tree.Page, the page to format.\nlink_resolver: links.LinkResolver, object responsible\nfor resolving links potentially mentioned in `page`\noutput: str, path to the output directory.", "source": "codesearchnet"}
{"code": "def _check_enum(parameter_name, value, parameter_config):\n    enum_values = [enum['backendValue'] for enum in parameter_config['enum'].values() if ('backendValue' in enum)]\n    if (value not in enum_values):\n        raise errors.EnumRejectionError(parameter_name, value, enum_values)", "docstring": "Checks if an enum value is valid.\n\nThis is called by the transform_parameter_value function and shouldn't be\ncalled directly.\n\nThis verifies that the value of an enum parameter is valid.\n\nArgs:\nparameter_name: A string containing the name of the parameter, which is\neither just a variable name or the name with the index appended. For\nexample 'var' or 'var[2]'.\nvalue: A string containing the value passed in for the parameter.\nparameter_config: The dictionary containing information specific to the\nparameter in question. This is retrieved from request.parameters in\nthe method config.\n\nRaises:\nEnumRejectionError: If the given value is not among the accepted\nenum values in the field parameter.", "source": "codesearchnet"}
{"code": "def get_pourbaix_plot(self, limits=None, title='', label_domains=True, plt=None):\n    if (limits is None):\n        limits = [[(- 2), 16], [(- 3), 3]]\n    plt = (plt or pretty_plot(16))\n    xlim = limits[0]\n    ylim = limits[1]\n    h_line = np.transpose([[xlim[0], ((- xlim[0]) * PREFAC)], [xlim[1], ((- xlim[1]) * PREFAC)]])\n    o_line = np.transpose([[xlim[0], (((- xlim[0]) * PREFAC) + 1.23)], [xlim[1], (((- xlim[1]) * PREFAC) + 1.23)]])\n    neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])\n    V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])\n    ax = plt.gca()\n    ax.set_xlim(xlim)\n    ax.set_ylim(ylim)\n    lw = 3\n    plt.plot(h_line[0], h_line[1], 'r--', linewidth=lw)\n    plt.plot(o_line[0], o_line[1], 'r--', linewidth=lw)\n    plt.plot(neutral_line[0], neutral_line[1], 'k-.', linewidth=lw)\n    plt.plot(V0_line[0], V0_line[1], 'k-.', linewidth=lw)\n    for (entry, vertices) in self._pd._stable_domain_vertices.items():\n        center = np.average(vertices, axis=0)\n        (x, y) = np.transpose(np.vstack([vertices, vertices[0]]))\n        plt.plot(x, y, 'k-', linewidth=lw)\n        if label_domains:\n            plt.annotate(generate_entry_label(entry), center, ha='center', va='center', fontsize=20, color='b')\n    plt.xlabel('pH')\n    plt.ylabel('E (V)')\n    plt.title(title, fontsize=20, fontweight='bold')\n    return plt", "docstring": "Plot Pourbaix diagram.\n\nArgs:\nlimits: 2D list containing limits of the Pourbaix diagram\nof the form [[xlo, xhi], [ylo, yhi]]\ntitle (str): Title to display on plot\nlabel_domains (bool): whether to label pourbaix domains\nplt (pyplot): Pyplot instance for plotting\n\nReturns:\nplt (pyplot) - matplotlib plot object with pourbaix diagram", "source": "codesearchnet"}
{"code": "def _on_qosok(self, qosok_frame):\n        \n        for name, args in self._exchanges.items():\n            self._channel.exchange_declare(\n                exchange=name,\n                exchange_type=args[\"type\"],\n                durable=args[\"durable\"],\n                auto_delete=args[\"auto_delete\"],\n                arguments=args[\"arguments\"],\n                passive=config.conf[\"passive_declares\"],\n                callback=self._on_exchange_declareok,\n            )\n        for name, args in self._queues.items():\n            self._channel.queue_declare(\n                queue=name,\n                durable=args[\"durable\"],\n                auto_delete=args[\"auto_delete\"],\n                exclusive=args[\"exclusive\"],\n                arguments=args[\"arguments\"],\n                passive=config.conf[\"passive_declares\"],\n                callback=self._on_queue_declareok,\n            )", "docstring": "Callback invoked when the server acknowledges the QoS settings.\n\nAsserts or creates the exchanges and queues exist.\n\nArgs:\nqosok_frame (pika.spec.Basic.Qos): The frame send from the server.", "source": "juraj-google-style"}
{"code": "def fit_transform(self, input_df, normalize=True):\n        \n        \n        _df = input_df.copy(deep=False)\n\n        \n        self.normalize = normalize\n\n        \n        self.convert_to_categorical(_df)\n\n        \n        self.cat_columns = _df.select_dtypes(include=['category']).columns.tolist()\n\n        \n        _df = _df.select_dtypes(include=['bool', 'int', 'float', 'category'])\n\n        \n        if self.normalize:\n            for column in list(_df.select_dtypes(include=[np.number]).columns.values):\n                print('Normalizing column {:s}...'.format(column))\n                _df[column], _min, _max = self._normalize_series(_df[column])\n                self.norm_map[column] = (_min, _max)\n\n        \n        return self.dummy_encoder.fit_transform(_df)", "docstring": "Convert the dataframe to a matrix (numpy ndarray)\nArgs:\ninput_df (dataframe): The dataframe to convert\nnormalize (bool): Boolean flag to normalize numeric columns (default=True)", "source": "juraj-google-style"}
{"code": "def find_contexts(self, in_request=None, in_resolve=None):\n    names = self.context_names\n    if in_request:\n\n        def _in_request(name):\n            context = self.context(name)\n            packages = set((x.name for x in context.requested_packages(True)))\n            return (in_request in packages)\n        names = [x for x in names if _in_request(x)]\n    if in_resolve:\n        if isinstance(in_resolve, basestring):\n            in_resolve = PackageRequest(in_resolve)\n\n        def _in_resolve(name):\n            context = self.context(name)\n            variant = context.get_resolved_package(in_resolve.name)\n            if variant:\n                overlap = (variant.version in in_resolve.range)\n                return ((in_resolve.conflict and (not overlap)) or (overlap and (not in_resolve.conflict)))\n            else:\n                return in_resolve.conflict\n        names = [x for x in names if _in_resolve(x)]\n    return names", "docstring": "Find contexts in the suite based on search criteria.\n\nArgs:\nin_request (str): Match contexts that contain the given package in\ntheir request.\nin_resolve (str or `Requirement`): Match contexts that contain the\ngiven package in their resolve. You can also supply a conflict\nrequirement - '!foo' will match any contexts whos resolve does\nnot contain any version of package 'foo'.\n\nReturns:\nList of context names that match the search criteria.", "source": "codesearchnet"}
{"code": "def _probe_characteristics_finished(self, result):\n        \n\n        handle = result['context']['handle']\n        conn_id = result['context']['connection_id']\n\n        conndata = self._get_connection(handle, 'preparing')\n\n        if conndata is None:\n            self._logger.info('Connection disconnected before probe_char... finished, conn_id=%d',\n                              conn_id)\n            return\n\n        callback = conndata['callback']\n\n        if result['result'] is False:\n            conndata['failed'] = True\n            conndata['failure_reason'] = 'Could not probe GATT characteristics'\n            self.disconnect_async(conn_id, self._on_connection_failed)\n            return\n\n        \n        services = result['return_value']['services']\n        if TileBusService not in services:\n            conndata['failed'] = True\n            conndata['failure_reason'] = 'TileBus service not present in GATT services'\n            self.disconnect_async(conn_id, self._on_connection_failed)\n            return\n\n        conndata['chars_done_time'] = time.time()\n        service_time = conndata['services_done_time'] - conndata['connect_time']\n        char_time = conndata['chars_done_time'] - conndata['services_done_time']\n        total_time = service_time + char_time\n        conndata['state'] = 'connected'\n        conndata['services'] = services\n\n        \n        conndata['parser'] = IOTileReportParser(report_callback=self._on_report, error_callback=self._on_report_error)\n        conndata['parser'].context = conn_id\n\n        del conndata['disconnect_handler']\n\n        with self.count_lock:\n            self.connecting_count -= 1\n\n        self._logger.info(\"Total time to connect to device: %.3f (%.3f enumerating services, %.3f enumerating chars)\", total_time, service_time, char_time)\n        callback(conndata['connection_id'], self.id, True, None)", "docstring": "Callback when BLE adapter has finished probing services and characteristics for a device\n\nArgs:\nresult (dict): Result from the probe_characteristics command", "source": "juraj-google-style"}
{"code": "def from_str(self, in_str):\n        \n        parts = in_str.split(\";\")\n        for part in parts:\n            var_name, value = part.split(\":\")\n            if var_name == \"Obs_Threshold\":\n                self.obs_threshold = float(value)\n            elif var_name == \"Thresholds\":\n                self.thresholds = np.array(value.split(), dtype=float)\n                self.contingency_tables = pd.DataFrame(columns=self.contingency_tables.columns,\n                                                       data=np.zeros((self.thresholds.size,\n                                                                     self.contingency_tables.columns.size)))\n            elif var_name in self.contingency_tables.columns:\n                self.contingency_tables[var_name] = np.array(value.split(), dtype=int)", "docstring": "Read the DistributedROC string and parse the contingency table values from it.\n\nArgs:\nin_str (str): The string output from the __str__ method", "source": "juraj-google-style"}
{"code": "def check_steps_argument(input_data, steps, steps_name):\n    is_x_iterator = isinstance(input_data, (iterator_ops.Iterator, iterator_ops.IteratorBase))\n    if input_data is None or is_x_iterator or has_symbolic_tensors(input_data) or (isinstance(input_data, list) and (not input_data)):\n        if steps is None:\n            input_type_str = 'a Dataset iterator' if is_x_iterator else 'data tensors'\n            raise ValueError('When using {input_type} as input to a model, you should specify the `{steps_name}` argument.'.format(input_type=input_type_str, steps_name=steps_name))\n        return True\n    if isinstance(input_data, (data_types.DatasetV1, data_types.DatasetV2)):\n        return True\n    if steps is not None:\n        list_types = (np.ndarray, list, tuple)\n        if isinstance(input_data, list_types) or (isinstance(input_data, dict) and any((isinstance(v, list_types) for v in input_data.values()))):\n            logging.warning('When passing input data as arrays, do not specify `steps_per_epoch`/`steps` argument. Please use `batch_size` instead.')\n    return False", "docstring": "Validates `steps` argument based on input data's type.\n\nThe cases when `steps` value must be provided are when\n1. input data passed is an iterator.\n2. model was built on top of symbolic tensors, input data is not\nrequired and is `None`.\n3. input data passed is a symbolic tensor.\n\nArgs:\ninput_data: Input data. Can be Numpy array(s) or TensorFlow tensor(s) or\ntf.data.Dataset iterator or `None`.\nsteps: Integer or `None`. Total number of steps (batches of samples) to\nexecute.\nsteps_name: The public API's parameter name for `steps`.\n\nReturns:\nboolean, True if `steps` argument is required, else False.\n\nRaises:\nValueError: if `steps` argument is required for given input data type\nbut not provided.", "source": "github-repos"}
{"code": "def _get_upload_session_status(res):\n    response = json.loads(res.body.decode())\n    if ('sessionStatus' not in response):\n        try:\n            info = response['errorMessage']['additionalInfo']['uploader_service.GoogleRupioAdditionalInfo']['completionInfo']['customerSpecificInfo']\n            reason = '{} : {}'.format(info['status'], info['message'])\n        except KeyError:\n            reason = 'unknown reason'\n        raise exceptions.NetworkError('image upload failed: {}'.format(reason))\n    return response['sessionStatus']", "docstring": "Parse the image upload response to obtain status.\n\nArgs:\nres: http_utils.FetchResponse instance, the upload response\n\nReturns:\ndict, sessionStatus of the response\n\nRaises:\nhangups.NetworkError: If the upload request failed.", "source": "codesearchnet"}
{"code": "def from_pure(cls, z):\n    return cls(cls._key, {z: 1.0}, {z: 1.0}, pyxray.element_symbol(z))", "docstring": "Creates a pure composition.\n\nArgs:\nz (int): atomic number", "source": "codesearchnet"}
{"code": "async def loadCoreModule(self, ctor, conf=None):\n        \n        if conf is None:\n            conf = {}\n\n        modu = self._loadCoreModule(ctor, conf=conf)\n\n        try:\n            await s_coro.ornot(modu.preCoreModule)\n        except asyncio.CancelledError:  \n            raise\n        except Exception:\n            logger.exception(f'module preCoreModule failed: {ctor}')\n            self.modules.pop(ctor, None)\n            return\n\n        mdefs = modu.getModelDefs()\n        self.model.addDataModels(mdefs)\n\n        cmds = modu.getStormCmds()\n        [self.addStormCmd(c) for c in cmds]\n\n        try:\n            await s_coro.ornot(modu.initCoreModule)\n        except asyncio.CancelledError:  \n            raise\n        except Exception:\n            logger.exception(f'module initCoreModule failed: {ctor}')\n            self.modules.pop(ctor, None)\n            return\n\n        await self.fire('core:module:load', module=ctor)\n\n        return modu", "docstring": "Load a single cortex module with the given ctor and conf.\n\nArgs:\nctor (str): The python module class path\nconf (dict):Config dictionary for the module", "source": "juraj-google-style"}
{"code": "def listEverything(matching=False):\n    \n    pages=pageNames()\n    if matching:\n        pages=[x for x in pages if matching in x]\n    for i,page in enumerate(pages):\n        pages[i]=\"%s%s (%s)\"%(pageFolder(page),page,getPageType(page))\n    print(\"\\n\".join(sorted(pages)))", "docstring": "Prints every page in the project to the console.\n\nArgs:\nmatching (str, optional): if given, only return names with this string in it", "source": "juraj-google-style"}
{"code": "def addFeature(self, f, conflict='error', missing='other'):\n    OPTIONS = ['error', 'ignore', 'me', 'other']\n    assert (missing in OPTIONS), 'Invalid value in `missing`.'\n    assert (conflict in OPTIONS), 'Invalid value in `missing`.'\n    if ((f.prop not in self.props) and (missing == 'error')):\n        raise Exception('Property has not set.')\n    elif ((f.prop not in self.props) and (missing in ['ignore', 'first'])):\n        return\n    if (isinstance(f.value, int) or isinstance(f.value, float)):\n        if (f.operator == '='):\n            inter1 = (f, f)\n        elif (f.operator[0] == '<'):\n            inter1 = (None, f)\n        elif (f.operator[0] == '>'):\n            inter1 = (f, None)\n        inter0 = self.props.get(f.prop, (None, None))\n        try:\n            self.props[f.prop] = Features._applyInter(inter0, inter1, conflict)\n        except Exception as e:\n            raise RADLParseException(('%s. Involved features: %s' % (e, [str(f0) for f0 in inter0])), line=f.line)\n    elif isinstance(f, SoftFeatures):\n        self.props.setdefault(f.prop, []).append(f)\n    elif (f.operator == 'contains'):\n        if ((f.prop in self.props) and (f.value.getValue('name') in self.props[f.prop])):\n            feature = self.props[f.prop][f.value.getValue('name')].clone()\n            for f0 in f.value.features:\n                feature.value.addFeature(f0, conflict, missing)\n            self.props[f.prop][f.value.getValue('name')] = feature\n        else:\n            self.props.setdefault(f.prop, {})[f.value.getValue('name')] = f\n    else:\n        value0 = self.props.get(f.prop, None)\n        if ((not value0) or (conflict == 'other')):\n            self.props[f.prop] = f\n        elif ((value0.value != f.value) and (conflict == 'error')):\n            raise RADLParseException(('Conflict adding `%s` because `%s` is already set and conflict is %s' % (f, value0, conflict)), line=f.line)", "docstring": "Add a feature.\n\nArgs:\n\n- f(Feature): feature to add.\n- conflict(str): if a property hasn't compatible values/constrains, do:\n- ``\"error\"``: raise exception.\n- ``\"ignore\"``: go on.\n- ``\"me\"``: keep the old value.\n- ``\"other\"``: set the passed value.\n- missing(str): if a property has not been set yet, do:\n- ``\"error\"``: raise exception.\n- ``\"ignore\"``: do nothning.\n- ``\"me\"``: do nothing.\n- ``\"other\"``: set the passed value.", "source": "codesearchnet"}
{"code": "def __init__(self, timestamp=None):\n    \n    if timestamp and (timestamp < 0 or timestamp > self._UINT60_MAX):\n      raise ValueError('Invalid UUID version 1 timestamp.')\n\n    super(UUIDTime, self).__init__()\n    self._precision = definitions.PRECISION_100_NANOSECONDS\n    self._timestamp = timestamp", "docstring": "Initializes an UUID version 1 timestamp.\n\nArgs:\ntimestamp (Optional[int]): UUID version 1 timestamp.\n\nRaises:\nValueError: if the UUID version 1 timestamp is invalid.", "source": "juraj-google-style"}
{"code": "def get_and_update(cls, id, **kwargs):\n        \n        model = cls.get(id)\n        for k, v in cls._preprocess_params(kwargs).items():\n            setattr(model, k, v)\n        cls.session.commit()\n        return model", "docstring": "Returns an updated instance of the service's model class.\n\nArgs:\nmodel: the model to update\n**kwargs: update parameters", "source": "juraj-google-style"}
{"code": "def _ExtractFileEntry(\n      self, path_spec, destination_path, output_writer, skip_duplicates=True):\n    \n    file_entry = path_spec_resolver.Resolver.OpenFileEntry(path_spec)\n\n    if not file_entry:\n      logger.warning('Unable to open file entry for path spec: {0:s}'.format(\n          path_spec.comparable))\n      return\n\n    if not self._filter_collection.Matches(file_entry):\n      return\n\n    file_entry_processed = False\n    for data_stream in file_entry.data_streams:\n      if self._abort:\n        break\n\n      self._ExtractDataStream(\n          file_entry, data_stream.name, destination_path, output_writer,\n          skip_duplicates=skip_duplicates)\n\n      file_entry_processed = True\n\n    if not file_entry_processed:\n      self._ExtractDataStream(\n          file_entry, '', destination_path, output_writer,\n          skip_duplicates=skip_duplicates)", "docstring": "Extracts a file entry.\n\nArgs:\npath_spec (dfvfs.PathSpec): path specification of the source file.\ndestination_path (str): path where the extracted files should be stored.\noutput_writer (CLIOutputWriter): output writer.\nskip_duplicates (Optional[bool]): True if files with duplicate content\nshould be skipped.", "source": "juraj-google-style"}
{"code": "def default_sample_indices_fn(metadata: VideoMetadata, num_frames=None, fps=None, **kwargs):\n    total_num_frames = metadata.total_num_frames\n    video_fps = metadata.fps\n    if num_frames is None and fps is not None:\n        num_frames = int(total_num_frames / video_fps * fps)\n        if num_frames > total_num_frames:\n            raise ValueError(f'When loading the video with fps={fps}, we computed num_frames={num_frames} which exceeds total_num_frames={total_num_frames}. Check fps or video metadata.')\n    if num_frames is not None:\n        indices = np.arange(0, total_num_frames, total_num_frames / num_frames, dtype=int)\n    else:\n        indices = np.arange(0, total_num_frames, dtype=int)\n    return indices", "docstring": "A default sampling function that replicates the logic used in get_uniform_frame_indices,\nwhile optionally handling `fps` if `num_frames` is not provided.\n\nArgs:\nmetadata (`VideoMetadata`):\n`VideoMetadata` object containing metadata about the video, such as \"total_num_frames\" or \"fps\".\nnum_frames (`int`, *optional*):\nNumber of frames to sample uniformly.\nfps (`int`, *optional*):\nDesired frames per second. Takes priority over num_frames if both are provided.\n\nReturns:\n`np.ndarray`: Array of frame indices to sample.", "source": "github-repos"}
{"code": "def limit_weights(weights, limit=0.1):\n    \n    if 1.0 / limit > len(weights):\n        raise ValueError('invalid limit -> 1 / limit must be <= len(weights)')\n\n    if isinstance(weights, dict):\n        weights = pd.Series(weights)\n\n    if np.round(weights.sum(), 1) != 1.0:\n        raise ValueError('Expecting weights (that sum to 1) - sum is %s'\n                         % weights.sum())\n\n    res = np.round(weights.copy(), 4)\n    to_rebalance = (res[res > limit] - limit).sum()\n\n    ok = res[res < limit]\n    ok += (ok / ok.sum()) * to_rebalance\n\n    res[res > limit] = limit\n    res[res < limit] = ok\n\n    if any(x > limit for x in res):\n        return limit_weights(res, limit=limit)\n\n    return res", "docstring": "Limits weights and redistributes excedent amount\nproportionally.\n\nex:\n- weights are {a: 0.7, b: 0.2, c: 0.1}\n- call with limit=0.5\n- excess 0.2 in a is ditributed to b and c\nproportionally.\n- result is {a: 0.5, b: 0.33, c: 0.167}\n\nArgs:\n* weights (Series): A series describing the weights\n* limit (float): Maximum weight allowed", "source": "juraj-google-style"}
{"code": "def _read_mode_pocsp(self, size, kind):\n    temp = self._read_binary(size)\n    data = dict(kind=kind, length=size, start=(True if int(temp[0]) else False), end=(True if int(temp[1]) else False), filler=bytes(chr(int(temp[2:], base=2)), encoding='utf-8'))\n    return data", "docstring": "Read Partial Order Connection Service Profile option.\n\nPositional arguments:\n* size - int, length of option\n* kind - int, 10 (POC-Serv Profile)\n\nReturns:\n* dict -- extracted Partial Order Connection Service Profile (POC-SP) option\n\nStructure of TCP POC-SP Option [RFC 1693][RFC 6247]:\n1 bit        1 bit    6 bits\n+----------+----------+------------+----------+--------+\n|  Kind=10 | Length=3 | Start_flag | End_flag | Filler |\n+----------+----------+------------+----------+--------+\n\nOctets      Bits        Name                    Description\n0           0     tcp.pocsp.kind          Kind (10)\n1           8     tcp.pocsp.length        Length (3)\n2          16     tcp.pocsp.start         Start Flag\n2          17     tcp.pocsp.end           End Flag\n2          18     tcp.pocsp.filler        Filler", "source": "codesearchnet"}
{"code": "def save_replay(self, replay_data, replay_dir, prefix=None):\n    \n    if not prefix:\n      replay_filename = \"\"\n    elif os.path.sep in prefix:\n      raise ValueError(\"Prefix '%s' contains '%s', use replay_dir instead.\" % (\n          prefix, os.path.sep))\n    else:\n      replay_filename = prefix + \"_\"\n    now = datetime.datetime.utcnow().replace(microsecond=0)\n    replay_filename += \"%s.SC2Replay\" % now.isoformat(\"-\").replace(\":\", \"-\")\n    replay_dir = self.abs_replay_path(replay_dir)\n    if not gfile.Exists(replay_dir):\n      gfile.MakeDirs(replay_dir)\n    replay_path = os.path.join(replay_dir, replay_filename)\n    with gfile.Open(replay_path, \"wb\") as f:\n      f.write(replay_data)\n    return replay_path", "docstring": "Save a replay to a directory, returning the path to the replay.\n\nArgs:\nreplay_data: The result of controller.save_replay(), ie the binary data.\nreplay_dir: Where to save the replay. This can be absolute or relative.\nprefix: Optional prefix for the replay filename.\n\nReturns:\nThe full path where the replay is saved.\n\nRaises:\nValueError: If the prefix contains the path seperator.", "source": "juraj-google-style"}
{"code": "def set_x_grid_info(self, x_low, x_high, num_x, xscale, xval_name):\n    self._set_grid_info('x', x_low, x_high, num_x, xscale, xval_name)\n    return", "docstring": "Set the grid values for x.\n\nCreate information for the grid of x values.\n\nArgs:\nnum_x (int): Number of points on axis.\nx_low/x_high (float): Lowest/highest value for the axis.\nxscale (str): Scale of the axis. Choices are 'log' or 'lin'.\nxval_name (str): Name representing the axis. See GenerateContainer documentation\nfor options for the name.", "source": "codesearchnet"}
{"code": "def get_parent(self, path):\n        \n\n        self.__validate_storage_path(path, projects_allowed=False)\n        path_steps = [step for step in path.split('/') if step]\n        del path_steps[-1]\n        parent_path = '/{0}'.format('/'.join(path_steps))\n        return self.api_client.get_entity_by_query(path=parent_path)", "docstring": "Get the parent entity of the entity pointed by the given path.\n\nArgs:\npath (str): The path of the entity whose parent is needed\n\nReturns:\nA JSON object of the parent entity if found.\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "juraj-google-style"}
{"code": "def init_from_acceptor(self, acceptor):\n        \n        states = sorted(\n            acceptor.states,\n            key=attrgetter('initial'),\n            reverse=True)\n        for state in states:\n            for arc in state.arcs:\n                itext = acceptor.isyms.find(arc.ilabel)\n                if itext in self.alphabet:\n                    self.add_arc(state.stateid, arc.nextstate, itext)\n            if state.final:\n                self[state.stateid].final = True\n            if state.initial:\n                self[state.stateid].initial = True", "docstring": "Adds a sink state\nArgs:\nalphabet (list): The input alphabet\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def get_lanczos_eig(self, compute_m=True, feed_dict=None):\n    \n    if compute_m:\n      min_eig, min_vec = self.sess.run([self.m_min_eig, self.m_min_vec], feed_dict=feed_dict)\n\n    else:\n      min_eig, min_vec = self.sess.run([self.h_min_eig, self.h_min_vec], feed_dict=feed_dict)\n\n    return min_vec, min_eig", "docstring": "Computes the min eigen value and corresponding vector of matrix M or H\nusing the Lanczos algorithm.\nArgs:\ncompute_m: boolean to determine whether we should compute eig val/vec\nfor M or for H. True for M; False for H.\nfeed_dict: dictionary mapping from TF placeholders to values (optional)\nReturns:\nmin_eig_vec: Corresponding eigen vector to min eig val\neig_val: Minimum eigen value", "source": "juraj-google-style"}
{"code": "def write_worksheets(workbook, data_list, result_info_key, identifier_keys):\n    \n\n    \n    worksheet_keys = get_worksheet_keys(data_list[0], result_info_key)\n\n    for key in worksheet_keys:\n\n        title = key.split('/')[1]\n\n        title = utilities.convert_snake_to_title_case(title)\n\n        title = KEY_TO_WORKSHEET_MAP.get(title, title)\n\n        if key == 'property/nod':\n            \n            create_property_nod_worksheets(workbook, data_list, result_info_key, identifier_keys)\n        else:\n            \n\n            \n            worksheet = workbook.create_sheet(title=title[:31])\n\n            processed_data = process_data(key, data_list, result_info_key, identifier_keys)\n\n            write_data(worksheet, processed_data)\n\n    \n    workbook.remove_sheet(workbook.active)", "docstring": "Writes rest of the worksheets to workbook.\n\nArgs:\nworkbook: workbook to write into\ndata_list: Analytics API data as a list of dicts\nresult_info_key: the key in api_data dicts that contains the data results\nidentifier_keys: the list of keys used as requested identifiers\n(address, zipcode, block_id, etc)", "source": "juraj-google-style"}
{"code": "def enable_eager_op_as_function(fn: _F) -> _F:\n\n    def wrapper(*args, **kwargs):\n        return fn(*args, **kwargs)\n    return wrapper", "docstring": "Returns the same fn. This will be removed once all usages are removed.\n\nArgs:\nfn: the function to be wrapped.\n\nReturns:\nThe wrapped function.", "source": "github-repos"}
{"code": "def yaml(modules_to_register: Iterable[Any]=None, classes_to_register: Iterable[Any]=None) -> ruamel.yaml.YAML:\n    yaml = ruamel.yaml.YAML(typ='rt')\n    yaml.representer.add_representer(np.ndarray, numpy_to_yaml)\n    yaml.constructor.add_constructor('!numpy_array', numpy_from_yaml)\n    yaml = register_module_classes(yaml=yaml, modules=modules_to_register)\n    yaml = register_classes(yaml=yaml, classes=classes_to_register)\n    return yaml", "docstring": "Create a YAML object for loading a YAML configuration.\n\nArgs:\nmodules_to_register: Modules containing classes to be registered with the YAML object. Default: None.\nclasses_to_register: Classes to be registered with the YAML object. Default: None.\nReturns:\nA newly creating YAML object, configured as apporpirate.", "source": "codesearchnet"}
{"code": "def dedupe_all_lists(obj, exclude_keys=()):\n    \n    squared_dedupe_len = 10\n    if isinstance(obj, dict):\n        new_obj = {}\n        for key, value in obj.items():\n            if key in exclude_keys:\n                new_obj[key] = value\n            else:\n                new_obj[key] = dedupe_all_lists(value)\n        return new_obj\n    elif isinstance(obj, (list, tuple, set)):\n        new_elements = [dedupe_all_lists(v) for v in obj]\n        if len(new_elements) < squared_dedupe_len:\n            new_obj = dedupe_list(new_elements)\n        else:\n            new_obj = dedupe_list_of_dicts(new_elements)\n        return type(obj)(new_obj)\n    else:\n        return obj", "docstring": "Recursively remove duplucates from all lists.\n\nArgs:\nobj: collection to deduplicate\nexclude_keys (Container[str]): key names to ignore for deduplication", "source": "juraj-google-style"}
{"code": "def output_of(*cmd: Optional[str], **kwargs) -> str:\n    result = cast(str, run_cmd(*cmd, log_run_to_stderr=False, out=TeeCapture(), **kwargs).out)\n    if result.endswith('\\n'):\n        result = result[:(- 1)]\n    return result", "docstring": "Invokes a subprocess and returns its output as a string.\n\nArgs:\ncmd: Components of the command to execute, e.g. [\"echo\", \"dog\"].\n**kwargs: Extra arguments for asyncio.create_subprocess_shell, such as\na cwd (current working directory) argument.\n\nReturns:\nA (captured output, captured error output, return code) triplet. The\ncaptured outputs will be None if the out or err parameters were not set\nto an instance of TeeCapture.\n\nRaises:\nsubprocess.CalledProcessError: The process returned a non-zero error\ncode and raise_on_fail was set.", "source": "codesearchnet"}
{"code": "def create_virtual_env(venv_path: str, requirements_paths: Iterable[str], python_path: str, verbose: bool) -> None:\n    shell_tools.run_cmd('virtualenv', (None if verbose else '--quiet'), '-p', python_path, venv_path, out=sys.stderr)\n    pip_path = os.path.join(venv_path, 'bin', 'pip')\n    for req_path in requirements_paths:\n        shell_tools.run_cmd(pip_path, 'install', (None if verbose else '--quiet'), '-r', req_path, out=sys.stderr)", "docstring": "Creates a new virtual environment and then installs dependencies.\n\nArgs:\nvenv_path: Where to put the virtual environment's state.\nrequirements_paths: Location of requirements files to -r install.\npython_path: The python binary to use.\nverbose: When set, more progress output is produced.", "source": "codesearchnet"}
{"code": "def _create_simple_tf1_conv_model(self, input_shape: Sequence[int]=(1, 3, 4, 3), filter_shape: Sequence[int]=(2, 3, 3, 2), use_variable_for_filter=False) -> Tuple[core.Tensor, core.Tensor]:\n    in_placeholder = array_ops.placeholder(dtypes.float32, shape=input_shape)\n    filters = random_ops.random_uniform(shape=filter_shape, minval=-1.0, maxval=1.0)\n    if use_variable_for_filter:\n        filters = variables.Variable(filters)\n    output_tensor = nn_ops.conv2d(in_placeholder, filters, strides=[1, 1, 2, 1], dilations=[1, 1, 1, 1], padding='SAME', data_format='NHWC')\n    return (in_placeholder, output_tensor)", "docstring": "Creates a basic convolution model.\n\nThis is intended to be used for TF1 (graph mode) tests.\n\nArgs:\ninput_shape: Shape of the input tensor.\nfilter_shape: Shape of the filter.\nuse_variable_for_filter: Setting this to `True` makes the filter for the\nconv operation a `tf.Variable`.\n\nReturns:\nin_placeholder: Input tensor placeholder.\noutput_tensor: The resulting tensor of the convolution operation.", "source": "github-repos"}
{"code": "def get_jobs(self, name=None):\n        \n        if self.applicationResource:\n            return self._get_elements(self.jobs, 'jobs', Job, None, name)\n        else:\n            return []", "docstring": "Retrieves jobs running on this resource in its instance.\n\nArgs:\nname (str, optional): Only return jobs containing property **name** that matches `name`. `name` can be a\nregular expression. If `name` is not supplied, then all jobs are returned.\n\nReturns:\nlist(Job): A list of jobs matching the given `name`.\n\n.. note:: If ``applicationResource`` is `False` an empty list is returned.\n.. versionadded:: 1.9", "source": "juraj-google-style"}
{"code": "def relative_to_contrib(diff, project):\n    path = pathlib.Path(diff.b_path)\n    contrib_path = project.contrib_module_path\n    return path.relative_to(contrib_path)", "docstring": "Compute relative path of changed file to contrib dir\n\nArgs:\ndiff (git.diff.Diff): file diff\nproject (Project): project\n\nReturns:\nPath", "source": "codesearchnet"}
{"code": "def es_get_class_defs(cls_def, cls_name):\n    \n    rtn_dict = {key: value for key, value in cls_def.items() \\\n                if key.startswith(\"kds_es\")}\n    for key in rtn_dict:\n        del cls_def[key]\n    return rtn_dict", "docstring": "Reads through the class defs and gets the related es class\ndefintions\n\nArgs:\n-----\nclass_defs: RdfDataset of class definitions", "source": "juraj-google-style"}
{"code": "def optionally(self, entity_type, attribute_name=None):\n    if (not attribute_name):\n        attribute_name = entity_type\n    self.optional += [(entity_type, attribute_name)]\n    return self", "docstring": "Parsed intents from this parser can optionally include an entity of the provided type.\n\nArgs:\nentity_type(str): an entity type\nattribute_name(str): the name of the attribute on the parsed intent. Defaults to match entity_type.\n\nReturns:\nself: to continue modifications.", "source": "codesearchnet"}
{"code": "def publishFsFromMXD(self, fs_config):\n        \n        fs = None\n        res = None\n        resItm = None\n        if self.securityhandler is None:\n            print (\"Security handler required\")\n            return\n        if self.securityhandler.is_portal:\n            url = self.securityhandler.org_url\n        else:\n            url = 'http:\n        try:\n            res = []\n            if isinstance(fs_config, list):\n                for fs in fs_config:\n                    if 'ReplaceTag' in fs:\n\n                        resItm = {\"ReplaceTag\":fs['ReplaceTag'] }\n                    else:\n                        resItm = {\"ReplaceTag\":\"{FeatureService}\" }\n\n                    resItm['FSInfo'] = self._publishFSFromMXD(config=fs, url=url)\n\n                    if not resItm['FSInfo'] is None and 'url' in resItm['FSInfo']:\n                        print (\"%s created\" % resItm['FSInfo']['url'])\n                        res.append(resItm)\n                    else:\n                        print (str(resItm['FSInfo']))\n\n            else:\n                if 'ReplaceTag' in fs_config:\n\n                    resItm = {\"ReplaceTag\":fs_config['ReplaceTag'] }\n                else:\n                    resItm = {\"ReplaceTag\":\"{FeatureService}\" }\n\n                resItm['FSInfo'] = self._publishFSFromMXD(config=fs_config, url=url)\n\n                if 'url' in resItm['FSInfo']:\n                    print (\"%s created\" % resItm['FSInfo']['url'])\n                    res.append(resItm)\n                else:\n                    print (str(resItm['FSInfo']))\n\n            return res\n        except common.ArcRestHelperError as e:\n            raise e\n        except Exception as e:\n\n            line, filename, synerror = trace()\n            raise common.ArcRestHelperError({\n                        \"function\": \"publishFsFromMXD\",\n                        \"line\": line,\n                        \"filename\":  filename,\n                        \"synerror\": synerror,\n                                        }\n                                        )\n\n        finally:\n            resItm = None\n            fs = None\n\n            del resItm\n            del fs\n\n            gc.collect()", "docstring": "Publishes the layers in a MXD to a feauture service.\n\nArgs:\nfs_config (list): A list of JSON configuration feature service details to publish.\nReturns:\ndict: A dictionary of results objects.", "source": "juraj-google-style"}
{"code": "def validate_config_must_have(config, required_keys):\n    missing_keys = (set(required_keys) - set(config))\n    if (len(missing_keys) > 0):\n        raise Exception(('Invalid config with missing keys \"%s\"' % ', '.join(missing_keys)))", "docstring": "Validate a config dictionary to make sure it has all of the specified keys\n\nArgs:\nconfig: the config to validate.\nrequired_keys: the list of possible keys that config must include.\n\nRaises:\nException if the config does not have any of them.", "source": "codesearchnet"}
{"code": "def valid_ip_prefix(ip_prefix):\n    \n    try:\n        ip_prefix = ipaddress.ip_network(ip_prefix)\n    except ValueError:\n        return False\n    else:\n        if ip_prefix.version == 4 and ip_prefix.max_prefixlen != 32:\n            return False\n        if ip_prefix.version == 6 and ip_prefix.max_prefixlen != 128:\n            return False\n        return True", "docstring": "Perform a sanity check on ip_prefix.\n\nArguments:\nip_prefix (str): The IP-Prefix to validate\n\nReturns:\nTrue if ip_prefix is a valid IPv4 address with prefix length 32 or a\nvalid IPv6 address with prefix length 128, otherwise False", "source": "juraj-google-style"}
{"code": "def monkhorst_automatic(cls, structure, ngkpt,\n                            use_symmetries=True, use_time_reversal=True, chksymbreak=None, comment=None):\n        \n        sg = SpacegroupAnalyzer(structure)\n        \n        \n        \n        nshiftk = 1\n        \n        shiftk = 3*(0.5,)\n\n        \n        \n        \n\n        return cls.monkhorst(\n            ngkpt, shiftk=shiftk, use_symmetries=use_symmetries, use_time_reversal=use_time_reversal,\n            chksymbreak=chksymbreak, comment=comment if comment else \"Automatic Monkhorst-Pack scheme\")", "docstring": "Convenient static constructor for an automatic Monkhorst-Pack mesh.\n\nArgs:\nstructure: :class:`Structure` object.\nngkpt: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors.\nuse_symmetries: Use spatial symmetries to reduce the number of k-points.\nuse_time_reversal: Use time-reversal symmetry to reduce the number of k-points.\n\nReturns:\n:class:`KSampling` object.", "source": "juraj-google-style"}
{"code": "def show(self, objtype, objid):\n    url = self._object_url(objtype, int(objid))\n    return self._make_request(url, method='get')", "docstring": "Query for a specific resource by ID\n\nArgs:\nobjtype (str): object type, e.g. 'device', 'interface'\nobjid (int): object ID (DeviceID, etc.)\nReturns:\nA dict with that object\nRaises:\nrequests.exceptions.HTTPError", "source": "codesearchnet"}
{"code": "def restore_saved_local_scope(\n        self,\n        saved_variables,\n        args_mapping,\n        line_number\n    ):\n        \n        restore_nodes = list()\n        for var in saved_variables:\n            \n            if var.RHS in args_mapping:\n                \n                restore_nodes.append(RestoreNode(\n                    var.RHS + ' = ' + args_mapping[var.RHS],\n                    var.RHS,\n                    [var.LHS],\n                    line_number=line_number,\n                    path=self.filenames[-1]\n                ))\n            else:\n                \n                restore_nodes.append(RestoreNode(\n                    var.RHS + ' = ' + var.LHS,\n                    var.RHS,\n                    [var.LHS],\n                    line_number=line_number,\n                    path=self.filenames[-1]\n                ))\n\n        \n        for node, successor in zip(restore_nodes, restore_nodes[1:]):\n            node.connect(successor)\n\n        if restore_nodes:\n            \n            self.nodes[-1].connect(restore_nodes[0])\n            self.nodes.extend(restore_nodes)\n\n        return restore_nodes", "docstring": "Restore the previously saved variables to their original values.\n\nArgs:\nsaved_variables(list[SavedVariable])\nargs_mapping(dict): A mapping of call argument to definition argument.\nline_number(int): Of the def of the function call about to be entered into.\n\nNote: We do not need connect_if_allowed because of the\npreceding call to save_local_scope.", "source": "juraj-google-style"}
{"code": "def _has_connection(hostname, port):\n    try:\n        host = socket.gethostbyname(hostname)\n        socket.create_connection((host, port), 2)\n        return True\n    except Exception:\n        return False", "docstring": "Checks if internet connection exists to host via specified port.\n\nIf any exception is raised while trying to open a socket this will return\nfalse.\n\nArgs:\nhostname (str): Hostname to connect to.\nport (int): Port to connect to\n\nReturns:\nbool: Has connection or not", "source": "codesearchnet"}
{"code": "def calculate_bias_shape(input_shape, bias_dims):\n    input_rank = len(input_shape)\n    if (bias_dims is None):\n        return input_shape[1:]\n    elif (not bias_dims):\n        return ()\n    else:\n        bias_shape = ([1] * input_rank)\n        for dim in bias_dims:\n            dim %= input_rank\n            if (dim == 0):\n                raise ValueError('Cannot apply bias across the minibatch dimension.')\n            bias_shape[dim] = input_shape[dim]\n        start = input_rank\n        for dim in xrange(1, input_rank):\n            if (bias_shape[dim] != 1):\n                start = dim\n                break\n        return tuple(bias_shape[start:])", "docstring": "Calculate `bias_shape` based on the `input_shape` and `bias_dims`.\n\nArgs:\ninput_shape: Shape of the input being passed into the module. The leading\ndimension is the minibatch size.\nbias_dims: The dimensions that bias should be applied over. The remaining\ndimensions will get broadcasted over.\n\nReturns:\nbias_shape: Tuple corresponding to the shape of bias Variable to create.\n\nRaises:\nValueError: If the user attempts to add bias over the minibatch dimension,\ne.g. `bias_dims=[0]`.", "source": "codesearchnet"}
{"code": "def _ScanFileSystem(self, scan_node, base_path_specs):\n    \n    if not scan_node or not scan_node.path_spec:\n      raise errors.SourceScannerError(\n          'Invalid or missing file system scan node.')\n\n    base_path_specs.append(scan_node.path_spec)", "docstring": "Scans a file system scan node for file systems.\n\nArgs:\nscan_node (SourceScanNode): file system scan node.\nbase_path_specs (list[PathSpec]): file system base path specifications.\n\nRaises:\nSourceScannerError: if the scan node is invalid.", "source": "juraj-google-style"}
{"code": "def write_tarball_from_ivorn_xml_tuples(ivorn_xml_tuples, filepath):\n    \n    out = tarfile.open(filepath, mode='w:bz2')\n    logger.info(\"Writing packets to tarball at \" + filepath)\n    packet_count = 0\n    try:\n        for (ivorn, xml) in ivorn_xml_tuples:\n            out.addfile(*bytestring_to_tar_tuple(\n                filename_from_ivorn(ivorn),\n                xml\n            ))\n            packet_count += 1\n    finally:\n        out.close()\n    return packet_count", "docstring": "Iterate over a series of ivorn / xml bstring tuples and write to bz'd tarball.\n\nArgs:\nivorn_xml_tuples (iterable): [(ivorn,xml)]\nAn iterable (e.g. list) of tuples containing two entries -\nan ivorn string and an xml bytestring.\nfilepath (string): Path to the new tarball to create. Typically of form\n'/path/to/foo.tar.bz2'\nReturns\npacket_count (int): Number of packets written to tarball", "source": "juraj-google-style"}
{"code": "def _check_tf2_flags(flags):\n    if not flags.keras_model_file and (not flags.saved_model_dir):\n        raise ValueError('one of the arguments --saved_model_dir --keras_model_file is required')", "docstring": "Checks the parsed and unparsed flags to ensure they are valid in 2.X.\n\nArgs:\nflags: argparse.Namespace object containing TFLite flags.\n\nRaises:\nValueError: Invalid flags.", "source": "github-repos"}
{"code": "def make_worksheet(self, sheet_name=None):\n        \n\n        if sheet_name is None:\n            sheet_name = self.table_name\n        if not sheet_name:\n            sheet_name = \"\"\n\n        self._stream = self.workbook.add_worksheet(sheet_name)\n        self._current_data_row = self._first_data_row", "docstring": "Make a worksheet to the current workbook.\n\nArgs:\nsheet_name (str):\nName of the worksheet to create. The name will be automatically generated\n(like ``\"Sheet1\"``) if the ``sheet_name`` is empty.", "source": "juraj-google-style"}
{"code": "def __init__(self, base_url, object_factory, single_request_timeout=None):\n        \n        check_type(base_url, basestring, may_be_none=False)\n        check_type(single_request_timeout, int)\n\n        super(AccessTokensAPI, self).__init__()\n\n        self._base_url = str(validate_base_url(base_url))\n        self._single_request_timeout = single_request_timeout\n        self._endpoint_url = urllib.parse.urljoin(self.base_url, API_ENDPOINT)\n        self._request_kwargs = {\"timeout\": single_request_timeout}\n\n        self._object_factory = object_factory", "docstring": "Initialize an AccessTokensAPI object with the provided RestSession.\n\nArgs:\nbase_url(basestring): The base URL the API endpoints.\nsingle_request_timeout(int): Timeout in seconds for the API\nrequests.\n\nRaises:\nTypeError: If the parameter types are incorrect.", "source": "juraj-google-style"}
{"code": "def spt(points, max_dist_error, max_speed_error):\n    if (len(points) <= 2):\n        return points\n    else:\n        is_error = False\n        e = 1\n        while ((e < len(points)) and (not is_error)):\n            i = 1\n            while ((i < e) and (not is_error)):\n                delta_e = (time_dist(points[e], points[0]) * I_3600)\n                delta_i = (time_dist(points[i], points[0]) * I_3600)\n                di_de = 0\n                if (delta_e != 0):\n                    di_de = (delta_i / delta_e)\n                d_lat = (points[e].lat - points[0].lat)\n                d_lon = (points[e].lon - points[0].lon)\n                point = Point((points[0].lat + (d_lat * di_de)), (points[0].lon + (d_lon * di_de)), None)\n                dt1 = time_dist(points[i], points[(i - 1)])\n                if (dt1 == 0):\n                    dt1 = 1e-09\n                dt2 = time_dist(points[(i + 1)], points[i])\n                if (dt2 == 0):\n                    dt2 = 1e-09\n                v_i_1 = (loc_dist(points[i], points[(i - 1)]) / dt1)\n                v_i = (loc_dist(points[(i + 1)], points[i]) / dt2)\n                if ((loc_dist(points[i], point) > max_dist_error) or (abs((v_i - v_i_1)) > max_speed_error)):\n                    is_error = True\n                else:\n                    i = (i + 1)\n            if is_error:\n                return ([points[0]] + spt(points[i:len(points)], max_dist_error, max_speed_error))\n            e = (e + 1)\n        if (not is_error):\n            return [points[0], points[(len(points) - 1)]]", "docstring": "A combination of both `td_sp` and `td_tr`\n\nDetailed in,\nSpatiotemporal Compression Techniques for Moving Point Objects,\nNirvana Meratnia and Rolf A. de By, 2004,\nin Advances in Database Technology - EDBT 2004: 9th\nInternational Conference on Extending Database Technology,\nHeraklion, Crete, Greece, March 14-18, 2004\n\nArgs:\npoints (:obj:`list` of :obj:`Point`)\nmax_dist_error (float): max distance error, in meters\nmax_speed_error (float): max speed error, in km/h\nReturns:\n:obj:`list` of :obj:`Point`", "source": "codesearchnet"}
{"code": "def _check_callback(callback):\n    \n    \n    if inspect.isclass(callback):\n        callback_object = callback()\n        if not callable(callback_object):\n            raise ValueError(\n                \"Callback must be a class that implements __call__ or a function.\"\n            )\n    elif callable(callback):\n        callback_object = callback\n    else:\n        raise ValueError(\n            \"Callback must be a class that implements __call__ or a function.\"\n        )\n\n    return callback_object", "docstring": "Turns a callback that is potentially a class into a callable object.\n\nArgs:\ncallback (object): An object that might be a class, method, or function.\nif the object is a class, this creates an instance of it.\n\nRaises:\nValueError: If an instance can't be created or it isn't a callable object.\nTypeError: If the class requires arguments to be instantiated.\n\nReturns:\ncallable: A callable object suitable for use as the consumer callback.", "source": "juraj-google-style"}
{"code": "def main(self, ignored_argv=('',)):\n    self._install_signal_handler(signal.SIGTERM, 'SIGTERM')\n    if self.flags.inspect:\n        logger.info('Not bringing up TensorBoard, but inspecting event files.')\n        event_file = os.path.expanduser(self.flags.event_file)\n        efi.inspect(self.flags.logdir, event_file, self.flags.tag)\n        return 0\n    if self.flags.version_tb:\n        print(version.VERSION)\n        return 0\n    try:\n        server = self._make_server()\n        sys.stderr.write(('TensorBoard %s at %s (Press CTRL+C to quit)\\n' % (version.VERSION, server.get_url())))\n        sys.stderr.flush()\n        self._register_info(server)\n        server.serve_forever()\n        return 0\n    except TensorBoardServerException as e:\n        logger.error(e.msg)\n        sys.stderr.write(('ERROR: %s\\n' % e.msg))\n        sys.stderr.flush()\n        return (- 1)", "docstring": "Blocking main function for TensorBoard.\n\nThis method is called by `tensorboard.main.run_main`, which is the\nstandard entrypoint for the tensorboard command line program. The\nconfigure() method must be called first.\n\nArgs:\nignored_argv: Do not pass. Required for Abseil compatibility.\n\nReturns:\nProcess exit code, i.e. 0 if successful or non-zero on failure. In\npractice, an exception will most likely be raised instead of\nreturning non-zero.\n\n:rtype: int", "source": "codesearchnet"}
{"code": "def _check_remote_command(self, destination, timeout_ms, success_msgs=None):\n    timeout = timeouts.PolledTimeout.from_millis(timeout_ms)\n    stream = self._adb_connection.open_stream(destination, timeout)\n    if (not stream):\n        raise usb_exceptions.AdbStreamUnavailableError('Service %s not supported', destination)\n    try:\n        message = stream.read(timeout_ms=timeout)\n        if any([(m in message) for m in success_msgs]):\n            return\n    except usb_exceptions.CommonUsbError:\n        if destination.startswith('reboot:'):\n            return\n        raise\n    raise usb_exceptions.AdbRemoteError('Device message: %s', message)", "docstring": "Open a stream to destination, check for remote errors.\n\nUsed for reboot, remount, and root services.  If this method returns, the\ncommand was successful, otherwise an appropriate error will have been\nraised.\n\nArgs:\ndestination: Stream destination to open.\ntimeout_ms: Timeout in milliseconds for the operation.\nsuccess_msgs: If provided, a list of messages that, if returned from the\ndevice, indicate success, so don't treat them as errors.\n\nRaises:\nAdbRemoteError: If the remote command fails, will contain any message we\ngot back from the device.\nAdbStreamUnavailableError: The service requested isn't supported.", "source": "codesearchnet"}
{"code": "def add_newlines(f, output, char):\n    \n    line_count = get_line_count(f)\n    f = open(f, 'r+')\n    output = open(output, 'r+')\n    for line in range(line_count):\n        string = f.readline()\n        string = re.sub(char, char + '\\n', string)\n        output.write(string)", "docstring": "Adds line breaks after every occurance of a given character in a file.\n\nArgs:\nf: string, path to input file.\n\noutput: string, path to output file.\n\nReturns:\nNone.", "source": "juraj-google-style"}
{"code": "def decode_conjure_bean_type(cls, obj, conjure_type):\n        \n        deserialized = {}  \n        for (python_arg_name, field_definition) \\\n                in conjure_type._fields().items():\n            field_identifier = field_definition.identifier\n\n            if field_identifier not in obj or obj[field_identifier] is None:\n                cls.check_null_field(\n                    obj, deserialized, python_arg_name, field_definition)\n            else:\n                value = obj[field_identifier]\n                field_type = field_definition.field_type\n                deserialized[python_arg_name] = \\\n                    cls.do_decode(value, field_type)\n        return conjure_type(**deserialized)", "docstring": "Decodes json into a conjure bean type (a plain bean, not enum\nor union).\n\nArgs:\nobj: the json object to decode\nconjure_type: a class object which is the bean type\nwe're decoding into\nReturns:\nA instance of a bean of type conjure_type.", "source": "juraj-google-style"}
{"code": "def _extract_from(raw_json, pandas_options=None):\n    data_frames = []\n    if (pandas_options is None):\n        pandas_options = {}\n    columns = pandas_options.pop('columns', None)\n    (columns, header_line_number) = _convert_pandas_csv_options(pandas_options, columns)\n    for table in raw_json:\n        list_data = [[(np.nan if (not e['text']) else e['text']) for e in row] for row in table['data']]\n        _columns = columns\n        if (isinstance(header_line_number, int) and (not columns)):\n            _columns = list_data.pop(header_line_number)\n            _columns = [('' if (e is np.nan) else e) for e in _columns]\n        data_frames.append(pd.DataFrame(data=list_data, columns=_columns, **pandas_options))\n    return data_frames", "docstring": "Extract tables from json.\n\nArgs:\nraw_json (list):\nDecoded list from tabula-java JSON.\npandas_options (dict optional):\npandas options for `pd.DataFrame()`", "source": "codesearchnet"}
{"code": "def create_atomic_observe_operations(self, states, actions, internals, terminal, reward, index):\n    num_episodes = tf.count_nonzero(input_tensor=terminal, dtype=util.tf_dtype('int'))\n    increment_episode = tf.assign_add(ref=self.episode, value=tf.to_int64(x=num_episodes))\n    increment_global_episode = tf.assign_add(ref=self.global_episode, value=tf.to_int64(x=num_episodes))\n    with tf.control_dependencies(control_inputs=(increment_episode, increment_global_episode)):\n        states = util.map_tensors(fn=tf.stop_gradient, tensors=states)\n        internals = util.map_tensors(fn=tf.stop_gradient, tensors=internals)\n        actions = util.map_tensors(fn=tf.stop_gradient, tensors=actions)\n        terminal = tf.stop_gradient(input=terminal)\n        reward = tf.stop_gradient(input=reward)\n        observation = self.fn_observe_timestep(states=states, internals=internals, actions=actions, terminal=terminal, reward=reward)\n    with tf.control_dependencies(control_inputs=(observation,)):\n        self.unbuffered_episode_output = (self.global_episode + 0)", "docstring": "Returns the tf op to fetch when unbuffered observations are passed in.\n\nArgs:\nstates (any): One state (usually a value tuple) or dict of states if multiple states are expected.\nactions (any): One action (usually a value tuple) or dict of states if multiple actions are expected.\ninternals (any): Internal list.\nterminal (bool): boolean indicating if the episode terminated after the observation.\nreward (float): scalar reward that resulted from executing the action.\n\nReturns: Tf op to fetch when `observe()` is called.", "source": "codesearchnet"}
{"code": "def set_of_vars(arg_plot):\n    \n    return set(var for var in arg_plot.split(',') if var in phyvars.PLATES)", "docstring": "Build set of needed variables.\n\nArgs:\narg_plot (str): string with variable names separated with ``,``.\nReturns:\nset of str: set of variables.", "source": "juraj-google-style"}
{"code": "def set_value_at_field(msg: message.Message, field: Union[descriptor.FieldDescriptor, str], value: Any):\n    if isinstance(field, str):\n        field = _field_descriptor_for_name(msg, field)\n    if field_is_repeated(field):\n        if field_is_primitive(field):\n            getattr(msg, field.name)[:] = value\n        else:\n            del getattr(msg, field.name)[:]\n            getattr(msg, field.name).extend(value)\n    elif field_is_primitive(field):\n        setattr(msg, field.name, value)\n    else:\n        getattr(msg, field.name).CopyFrom(value)", "docstring": "Sets value at the field.\n\nArgs:\nmsg: The message whose field to mutate.\nfield: The FieldDescriptor or name of the field to mutate.\nvalue: The value to set.", "source": "github-repos"}
{"code": "def __generate_object_term__(self, datatype, value):\n        \n        if datatype == NS_MGR.xsd.anyURI.rdflib:\n            term = rdflib.URIRef(value)\n        elif datatype:\n            term = rdflib.Literal(value, datatype=datatype)\n        else:\n            term = rdflib.Literal(value)\n        return term", "docstring": "Internal method takes a datatype (can be None) and returns\nthe RDF Object Term\n\nArgs:\n\n-----\ndatatype: None, or rdflib.URIRef\nvalue: Varys depending on ingester", "source": "juraj-google-style"}
{"code": "def ResolveFlats(\n  dem,\n  in_place = False\n):\n  \n  if type(dem) is not rdarray:\n    raise Exception(\"A richdem.rdarray or numpy.ndarray is required!\")\n\n  if not in_place:\n    dem = dem.copy()\n\n  _AddAnalysis(dem, \"ResolveFlats(dem, in_place={in_place})\".format(in_place=in_place))\n\n  demw = dem.wrap()\n\n  _richdem.rdResolveFlatsEpsilon(demw)\n\n  dem.copyFromWrapped(demw)\n\n  if not in_place:\n    return dem", "docstring": "Attempts to resolve flats by imposing a local gradient\n\nArgs:\ndem          (rdarray):   An elevation model\nin_place (bool):   If True, the DEM is modified in place and there is\nno return; otherwise, a new, altered DEM is returned.\n\nReturns:\nDEM modified such that all flats drain.", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n    original_shape = hidden_states.shape\n    hidden_states = hidden_states.view(-1, hidden_states.size(-1))\n    logits = self.router(hidden_states)\n    top_logits, top_indices = torch.topk(logits, k=self.config.moe_topk, dim=1)\n    scores = nn.functional.softmax(top_logits, dim=-1)\n    original_dtype = top_indices.dtype\n    tokens_per_expert = torch.histc(top_indices.flatten().to(torch.float32), bins=self.config.moe_num_experts, min=0, max=self.config.moe_num_experts - 1).to(original_dtype)\n    indices = top_indices\n    flatten_indices = indices.view(-1)\n    sorted_indices = torch.argsort(flatten_indices)\n    permuted_tokens = hidden_states.index_select(0, sorted_indices \n    expert_output = self.experts(permuted_tokens, tokens_per_expert)\n    unpermuted_tokens = torch.zeros((scores.shape[0] * self.config.moe_topk, expert_output.size(1)), dtype=expert_output.dtype, device=expert_output.device)\n    unpermuted_tokens.index_copy_(0, sorted_indices, expert_output)\n    unpermuted_tokens = unpermuted_tokens.view(-1, self.config.moe_topk, expert_output.size(1))\n    output = (unpermuted_tokens * scores.unsqueeze(-1)).sum(dim=1).view(original_shape)\n    shared_expert_output = self.shared_experts(hidden_states.view(original_shape))\n    return output + shared_expert_output", "docstring": "Forward pass of the MoE Layer.\n\nArgs:\nhidden_states (`torch.Tensor`):\nInput tensor of shape (batch_size, sequence_length, hidden_size).\n\nReturns:\ntorch.Tensor: Output tensor after passing through the MoE layer.\n\nProcess:\n1. Route tokens to experts using the router.\n2. Permute tokens based on routing decisions.\n3. Process tokens through experts.\n4. Unpermute and combine expert outputs.\n5. Add shared expert output to the final result.", "source": "github-repos"}
{"code": "def getHostCaPath(self, name):\n        \n        cert = self.getHostCert(name)\n        if cert is None:\n            return None\n\n        return self._getCaPath(cert)", "docstring": "Gets the path to the CA certificate that issued a given host keypair.\n\nArgs:\nname (str): The name of the host keypair.\n\nExamples:\nGet the path to the CA cert which issue the cert for \"myhost\":\n\nmypath = cdir.getHostCaPath('myhost')\n\nReturns:\nstr: The path if exists.", "source": "juraj-google-style"}
{"code": "def _convert_update_row(row):\n    \n    after_values = row['after_values']  \n    before_values = row['before_values']  \n    values = after_values\n    return {\n        'values': values,\n        'updated_values': _get_updated_values(before_values, after_values)\n    }", "docstring": "Convert a row for update event\n\nArgs:\nrow (dict): event row data", "source": "juraj-google-style"}
{"code": "def blit(self, dest: tcod.console.Console, fill_fore: bool=True, fill_back: bool=True) -> None:\n    if (not dest):\n        dest = tcod.console.Console._from_cdata(ffi.NULL)\n    if ((dest.width != self.width) or (dest.height != self.height)):\n        raise ValueError('ConsoleBuffer.blit: Destination console has an incorrect size.')\n    if fill_back:\n        bg = dest.bg.ravel()\n        bg[0::3] = self.back_r\n        bg[1::3] = self.back_g\n        bg[2::3] = self.back_b\n    if fill_fore:\n        fg = dest.fg.ravel()\n        fg[0::3] = self.fore_r\n        fg[1::3] = self.fore_g\n        fg[2::3] = self.fore_b\n        dest.ch.ravel()[:] = self.char", "docstring": "Use libtcod's \"fill\" functions to write the buffer to a console.\n\nArgs:\ndest (Console): Console object to modify.\nfill_fore (bool):\nIf True, fill the foreground color and characters.\nfill_back (bool):\nIf True, fill the background color.", "source": "codesearchnet"}
{"code": "def write(self, destination, filename, template_name, **kwargs):\n    template = self.env.get_template(template_name)\n    content = template.render(kwargs)\n    super(TemplateFileWriter, self).write(destination=destination, filename=filename, content=content)", "docstring": "Write a file according to the template name\n\nArgs:\ndestination (string): the destination location\nfilename (string): the filename that will be written\ntemplate_name (string): the name of the template\nkwargs (dict): all attribute that will be passed to the template", "source": "codesearchnet"}
{"code": "def load_videos(template, video_length, frame_shape):\n    filenames = tf.gfile.Glob(template)\n    if (not filenames):\n        raise ValueError('no files found.')\n    filenames = sorted(filenames)\n    dataset_len = len(filenames)\n    filenames = tf.constant(filenames)\n    dataset = tf.data.Dataset.from_tensor_slices(filenames)\n    dataset = dataset.apply(tf.data.experimental.map_and_batch((lambda filename: load_image_map_function(filename, frame_shape)), video_length, drop_remainder=True))\n    return (dataset, dataset_len)", "docstring": "Loads videos from files.\n\nArgs:\ntemplate: template string for listing the image files.\nvideo_length: length of the video.\nframe_shape: shape of each frame.\n\nReturns:\ndataset: the tf dataset frame by frame.\ndataset_len: number of the items which is the number of image files.\n\nRaises:\nValueError: if no files found.", "source": "codesearchnet"}
{"code": "def distances(self, word, words):\n    point = self[word]\n    vectors = np.asarray([self[w] for w in words])\n    diff = (vectors - point)\n    distances = np.linalg.norm(diff, axis=1)\n    return distances", "docstring": "Calculate eucledean pairwise distances between `word` and `words`.\n\nArgs:\nword (string): single word.\nwords (list): list of strings.\n\nReturns:\nnumpy array of the distances.\n\nNote:\nL2 metric is used to calculate distances.", "source": "codesearchnet"}
{"code": "def create_from_json(cls, json_data):\n        \n        msa = Msa()\n        msa.msa = json_data[\"msa_info\"][\"msa\"]\n        msa.meta = json_data[\"meta\"] if \"meta\" in json_data else None\n\n        msa.component_results = _create_component_results(json_data, \"msa_info\")\n\n        return msa", "docstring": "Deserialize msa json data into a Msa object\n\nArgs:\njson_data (dict): The json data for this msa\n\nReturns:\nMsa object", "source": "juraj-google-style"}
{"code": "def login(self, client_id, access_token, connection, scope='openid'):\n    return self.post('https:", "docstring": "Login using a social provider's access token\n\nGiven the social provider's access_token and the connection specified,\nit will do the authentication on the provider and return a dict with\nthe access_token and id_token. Currently, this endpoint only works for\nFacebook, Google, Twitter and Weibo.\n\nArgs:\nclient_id (str): application's client id.\n\naccess_token (str): social provider's access_token.\n\nconnection (str): connection type (e.g: 'facebook')\n\nReturns:\nA dict with 'access_token' and 'id_token' keys.", "source": "codesearchnet"}
{"code": "def delete_recursively_v2(path):\n    _pywrap_file_io.DeleteRecursively(compat.path_to_bytes(path))", "docstring": "Deletes everything under path recursively.\n\nArgs:\npath: string, a path\n\nRaises:\nerrors.OpError: If the operation fails.", "source": "github-repos"}
{"code": "def ConvertMessage(self, value, message):\n    \n    message_descriptor = message.DESCRIPTOR\n    full_name = message_descriptor.full_name\n    if _IsWrapperMessage(message_descriptor):\n      self._ConvertWrapperMessage(value, message)\n    elif full_name in _WKTJSONMETHODS:\n      methodcaller(_WKTJSONMETHODS[full_name][1], value, message)(self)\n    else:\n      self._ConvertFieldValuePair(value, message)", "docstring": "Convert a JSON object into a message.\n\nArgs:\nvalue: A JSON object.\nmessage: A WKT or regular protocol message to record the data.\n\nRaises:\nParseError: In case of convert problems.", "source": "juraj-google-style"}
{"code": "def write_input(self, output_dir='.', make_dir_if_not_present=True):\n    if (make_dir_if_not_present and (not os.path.exists(output_dir))):\n        os.makedirs(output_dir)\n    feff = self.all_input()\n    feff_input = '\\n\\n'.join((str(feff[k]) for k in ['HEADER', 'PARAMETERS', 'POTENTIALS', 'ATOMS'] if (k in feff)))\n    for (k, v) in feff.items():\n        with open(os.path.join(output_dir, k), 'w') as f:\n            f.write(str(v))\n    with open(os.path.join(output_dir, 'feff.inp'), 'w') as f:\n        f.write(feff_input)\n    if ('ATOMS' not in feff):\n        self.atoms.struct.to(fmt='cif', filename=os.path.join(output_dir, feff['PARAMETERS']['CIF']))", "docstring": "Writes a set of FEFF input to a directory.\n\nArgs:\noutput_dir: Directory to output the FEFF input files\nmake_dir_if_not_present: Set to True if you want the directory (\nand the whole path) to be created if it is not present.", "source": "codesearchnet"}
{"code": "def _parse_format_pages_isbn(html_chunk):\n    ppi = get_first_content(html_chunk.find('div', {'class': 'price-overflow'}))\n    if (not ppi):\n        return (None, None, None)\n    ppi = filter((lambda x: x.strip()), ppi.split('<br />'))[0]\n    isbn = dhtmlparser.parseString(ppi)\n    isbn = isbn.find('b')\n    isbn = (isbn[0].getContent() if isbn else None)\n    pages = None\n    book_format = None\n    details = ppi.split('|')\n    if (len(details) >= 2):\n        book_format = details[0].strip()\n        pages = details[1].strip()\n    return (book_format, pages, isbn)", "docstring": "Parse format, number of pages and ISBN.\n\nArgs:\nhtml_chunk (obj): HTMLElement containing slice of the page with details.\n\nReturns:\ntuple: (format, pages, isbn), all as string.", "source": "codesearchnet"}
{"code": "def call(self, input_ids=None, token_type_ids=None, inputs_embeds=None, training=False):\n    assert not (input_ids is None and inputs_embeds is None)\n    if input_ids is not None:\n        check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n        inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n    input_shape = shape_list(inputs_embeds)[:-1]\n    if token_type_ids is None:\n        token_type_ids = tf.fill(dims=input_shape, value=0)\n    position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)\n    position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)\n    token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)\n    final_embeddings = inputs_embeds + position_embeds + token_type_embeds\n    final_embeddings = self.LayerNorm(inputs=final_embeddings)\n    final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n    return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\nfinal_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github-repos"}
{"code": "def get_elements_between_bands(self, band_i, band_j):\n        \n        if band_i < 1 or band_i > self.nb_bands or band_j < 1 or band_j > self.nb_bands:\n            raise ValueError(\"Band index out of bounds\")\n\n        return self.data[:, band_i - 1, band_j - 1, :]", "docstring": "Method returning a numpy array with elements\n\n[cdum_x_real, cdum_x_imag, cdum_y_real, cdum_y_imag, cdum_z_real, cdum_z_imag]\n\nbetween bands band_i and band_j (vasp 1-based indexing) for all kpoints.\n\nArgs:\nband_i (Integer): Index of band i\nband_j (Integer): Index of band j\n\nReturns:\na numpy list of elements for each kpoint", "source": "juraj-google-style"}
{"code": "def is_prefix(cls, path):\n        \n        lagofile = paths.Paths(path).prefix_lagofile()\n        return os.path.isfile(lagofile)", "docstring": "Check if a path is a valid prefix\n\nArgs:\npath(str): path to be checked\n\nReturns:\nbool: True if the given path is a prefix", "source": "juraj-google-style"}
{"code": "def _ConvertValueBinaryDataToFloatingPointValue(self, value):\n    if (not value):\n        return None\n    value_length = len(value)\n    if (value_length not in (4, 8)):\n        raise errors.ParseError('Unsupported value data size: {0:d}'.format(value_length))\n    if (value_length == 4):\n        floating_point_map = self._GetDataTypeMap('float32le')\n    elif (value_length == 8):\n        floating_point_map = self._GetDataTypeMap('float64le')\n    try:\n        return self._ReadStructureFromByteStream(value, 0, floating_point_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse floating-point value with error: {0!s}'.format(exception))", "docstring": "Converts a binary data value into a floating-point value.\n\nArgs:\nvalue (bytes): binary data value containing an ASCII string or None.\n\nReturns:\nfloat: floating-point representation of binary data value or None if\nvalue is not set.\n\nRaises:\nParseError: if the floating-point value data size is not supported or\nif the value cannot be parsed.", "source": "codesearchnet"}
{"code": "def call(self, hidden_states: tf.Tensor, attention_mask: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, layer_head_mask: tf.Tensor | None=None, cross_attn_layer_head_mask: tf.Tensor | None=None, past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, training: Optional[bool]=False) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:\n    residual = hidden_states\n    self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n    hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask)\n    hidden_states = self.dropout(hidden_states, training=training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    cross_attn_present_key_value = None\n    cross_attn_weights = None\n    if encoder_hidden_states is not None:\n        residual = hidden_states\n        cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n        hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value)\n        hidden_states = self.dropout(hidden_states, training=training)\n        hidden_states = residual + hidden_states\n        hidden_states = self.encoder_attn_layer_norm(hidden_states)\n        present_key_value = present_key_value + cross_attn_present_key_value\n    residual = hidden_states\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = self.activation_dropout(hidden_states, training=training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = self.dropout(hidden_states, training=training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    return (hidden_states, self_attn_weights, cross_attn_weights, present_key_value)", "docstring": "Args:\nhidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`tf.Tensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\nencoder_hidden_states (`tf.Tensor`):\ncross attention input to the layer of shape `(batch, seq_len, embed_dim)`\nencoder_attention_mask (`tf.Tensor`): encoder attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\nlayer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size\n`(decoder_attention_heads,)`\ncross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.\n`(decoder_attention_heads,)`\npast_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states", "source": "github-repos"}
{"code": "def UpdateMaps(self, conf, incremental, force_write=False, force_lock=False):\n    if not self._Lock(path=conf.lockfile, force=force_lock):\n        self.log.error('Failed to acquire lock, aborting!')\n        return self.ERR_LOCK\n    retval = 0\n    for map_name in conf.maps:\n        if map_name not in conf.options:\n            self.log.error('No such map name defined in config: %s', map_name)\n            return 1\n        if incremental:\n            self.log.info('Updating and verifying %s cache.', map_name)\n        else:\n            self.log.info('Rebuilding and verifying %s cache.', map_name)\n        cache_options = conf.options[map_name].cache\n        source_options = conf.options[map_name].source\n        old_cwd = os.getcwd()\n        tempdir = tempfile.mkdtemp(dir=cache_options['dir'], prefix='nsscache-%s-' % map_name)\n        if not os.path.isabs(cache_options['dir']):\n            cache_options['dir'] = os.path.abspath(cache_options['dir'])\n        if not os.path.isabs(conf.timestamp_dir):\n            conf.timestamp_dir = os.path.abspath(conf.timestamp_dir)\n        if not os.path.isabs(tempdir):\n            tempdir = os.path.abspath(tempdir)\n        os.chdir(tempdir)\n        try:\n            try:\n                source = source_factory.Create(source_options)\n                updater = self._Updater(map_name, source, cache_options, conf)\n                if incremental:\n                    self.log.info('Updating and verifying %s cache.', map_name)\n                else:\n                    self.log.info('Rebuilding and verifying %s cache.', map_name)\n                retval = updater.UpdateFromSource(source, incremental=incremental, force_write=force_write)\n            except error.PermissionDenied:\n                self.log.error('Permission denied: could not update map %r.  Aborting', map_name)\n                retval += 1\n            except (error.EmptyMap, error.InvalidMap) as e:\n                self.log.error(e)\n                retval += 1\n            except error.InvalidMerge as e:\n                self.log.warning('Could not merge map %r: %s.  Skipping.', map_name, e)\n        finally:\n            os.chdir(old_cwd)\n            shutil.rmtree(tempdir)\n    return retval", "docstring": "Update each configured map.\n\nFor each configured map, create a source and cache object and\nupdate the cache from the source.\n\nArgs:\nconf: configuration object\nincremental: flag indicating incremental update should occur\nforce_write: optional flag indicating safety checks should be ignored\nforce_lock: optional flag indicating we override existing locks\n\nReturns:\ninteger, zero indicating success, non-zero failure", "source": "github-repos"}
{"code": "def impersonate(self, user, enterprise):\n    if ((not user) or (not enterprise)):\n        raise ValueError('You must set a user name and an enterprise name to begin impersonification')\n    self._is_impersonating = True\n    self._impersonation = ('%s@%s' % (user, enterprise))", "docstring": "Impersonate a user in a enterprise\n\nArgs:\nuser: the name of the user to impersonate\nenterprise: the name of the enterprise where to use impersonation", "source": "codesearchnet"}
{"code": "def _iterate(self, url, params, api_entity):\n        \n        params['resultLimit'] = self.result_limit\n        should_iterate = True\n        result_start = 0\n        while should_iterate:\n            \n            params['resultStart'] = result_start\n            r = self.tcex.session.get(url, params=params)\n            if not self.success(r):\n                err = r.text or r.reason\n                self.tcex.handle_error(950, [r.status_code, err, r.url])\n\n            data = r.json().get('data').get(api_entity)\n\n            if len(data) < self.result_limit:\n                should_iterate = False\n            result_start += self.result_limit\n\n            for result in data:\n                yield result", "docstring": "Args:\nurl:\nparams:\napi_entity:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def inference(self, state_arr, limit=1000):\n        \n        agent_x, agent_y = np.where(state_arr[0] == 1)\n        agent_x, agent_y = agent_x[0], agent_y[0]\n        result_list = [(agent_x, agent_y, 0.0)]\n        self.t = 1\n        while self.t <= limit:\n            next_action_arr = self.extract_possible_actions(state_arr)\n            next_q_arr = self.function_approximator.inference_q(next_action_arr)\n            action_arr, q = self.select_action(next_action_arr, next_q_arr)\n\n            agent_x, agent_y = np.where(action_arr[0] == 1)\n            agent_x, agent_y = agent_x[0], agent_y[0]\n            result_list.append((agent_x, agent_y, q[0]))\n\n            \n            state_arr = self.update_state(state_arr, action_arr)\n\n            \n            self.t += 1\n            \n            end_flag = self.check_the_end_flag(state_arr)\n            if end_flag is True:\n                break\n\n        return result_list", "docstring": "Infernce.\n\nArgs:\nstate_arr:    `np.ndarray` of state.\nlimit:        The number of inferencing.\n\nReturns:\n`list of `np.ndarray` of an optimal route.", "source": "juraj-google-style"}
{"code": "def ToScriptHash(data, unhex=True):\n        \n        if len(data) > 1 and unhex:\n            data = binascii.unhexlify(data)\n        return UInt160(data=binascii.unhexlify(bytes(Crypto.Hash160(data), encoding='utf-8')))", "docstring": "Get a script hash of the data.\n\nArgs:\ndata (bytes): data to hash.\nunhex (bool): (Default) True. Set to unhexlify the stream. Use when the bytes are not raw bytes; i.e. b'aabb'\n\nReturns:\nUInt160: script hash.", "source": "juraj-google-style"}
{"code": "def _make_tags_vector(self, tags, bucket_length=None) -> np.ndarray:\n        \n        bucket_length = bucket_length or len(tags)\n        answer = np.zeros(shape=(bucket_length,), dtype=np.int32)\n        for i, tag in enumerate(tags):\n            answer[i] = self.tags.tok2idx(tag)\n        return answer", "docstring": "Transforms a sentence of tags to Numpy array, which will be the network target.\n\nArgs:\ntags: input sentence of tags\nbucket_length: the width of the bucket\n\nReturns:\nA 2d array, answer[i][j] contains the index of j-th tag in i-th input sentence.", "source": "juraj-google-style"}
{"code": "def IsHFS(self):\n    tsk_fs_type = self.GetFsType()\n    return (tsk_fs_type in [pytsk3.TSK_FS_TYPE_HFS, pytsk3.TSK_FS_TYPE_HFS_DETECT])", "docstring": "Determines if the file system is HFS, HFS+ or HFSX.\n\nReturns:\nbool: True if the file system is HFS.", "source": "codesearchnet"}
{"code": "def cancel_merge_when_pipeline_succeeds(self, **kwargs):\n    path = ('%s/%s/cancel_merge_when_pipeline_succeeds' % (self.manager.path, self.get_id()))\n    server_data = self.manager.gitlab.http_put(path, **kwargs)\n    self._update_attrs(server_data)", "docstring": "Cancel merge when the pipeline succeeds.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabMROnBuildSuccessError: If the server could not handle the\nrequest", "source": "codesearchnet"}
{"code": "def DecryptPrivateKey(self, encrypted_private_key):\n        \n        aes = AES.new(self._master_key, AES.MODE_CBC, self._iv)\n        return aes.decrypt(encrypted_private_key)", "docstring": "Decrypt the provided ciphertext with the initialized private key.\n\nArgs:\nencrypted_private_key (byte string): the ciphertext to be decrypted.\n\nReturns:\nbytes: the ciphertext.", "source": "juraj-google-style"}
{"code": "def _list_inputs_or_outputs(self, recursive, node_name, depth, control, op_type, do_outputs=False):\n    if do_outputs:\n        tracker = self._debug_dump.node_recipients\n        type_str = 'Recipients of'\n        short_type_str = 'recipients'\n    else:\n        tracker = self._debug_dump.node_inputs\n        type_str = 'Inputs to'\n        short_type_str = 'inputs'\n    lines = []\n    font_attr_segs = {}\n    node_name, _ = debug_graphs.parse_node_or_tensor_name(node_name)\n    if not self._debug_dump.node_exists(node_name):\n        return cli_shared.error('There is no node named \"%s\" in the partition graphs' % node_name)\n    if recursive:\n        max_depth = depth\n    else:\n        max_depth = 1\n    if control:\n        include_ctrls_str = ', control %s included' % short_type_str\n    else:\n        include_ctrls_str = ''\n    line = '%s node \"%s\"' % (type_str, node_name)\n    font_attr_segs[0] = [(len(line) - 1 - len(node_name), len(line) - 1, 'bold')]\n    lines.append(line + ' (Depth limit = %d%s):' % (max_depth, include_ctrls_str))\n    command_template = 'lo -c -r %s' if do_outputs else 'li -c -r %s'\n    self._dfs_from_node(lines, font_attr_segs, node_name, tracker, max_depth, 1, [], control, op_type, command_template=command_template)\n    lines.append('')\n    lines.append('Legend:')\n    lines.append('  (d): recursion depth = d.')\n    if control:\n        lines.append('  (Ctrl): Control input.')\n    if op_type:\n        lines.append('  [Op]: Input node has op type Op.')\n    return debugger_cli_common.RichTextLines(lines, font_attr_segs=font_attr_segs)", "docstring": "Helper function used by list_inputs and list_outputs.\n\nFormat a list of lines to display the inputs or output recipients of a\ngiven node.\n\nArgs:\nrecursive: Whether the listing is to be done recursively, as a boolean.\nnode_name: The name of the node in question, as a str.\ndepth: Maximum recursion depth, applies only if recursive == True, as an\nint.\ncontrol: Whether control inputs or control recipients are included, as a\nboolean.\nop_type: Whether the op types of the nodes are to be included, as a\nboolean.\ndo_outputs: Whether recipients, instead of input nodes are to be\nlisted, as a boolean.\n\nReturns:\nInput or recipient tree formatted as a RichTextLines object.", "source": "github-repos"}
{"code": "def rename_attribute(self, attribute: str, new_name: str) -> None:\n    for (key_node, _) in self.yaml_node.value:\n        if (key_node.value == attribute):\n            key_node.value = new_name\n            break", "docstring": "Renames an attribute.\n\nUse only if is_mapping() returns true.\n\nIf the attribute does not exist, this will do nothing.\n\nArgs:\nattribute: The (old) name of the attribute to rename.\nnew_name: The new name to rename it to.", "source": "codesearchnet"}
{"code": "def bounce(sequence):\n    N = len(sequence)\n\n    def f(i):\n        (div, mod) = divmod(i, N)\n        if ((div % 2) == 0):\n            return sequence[mod]\n        else:\n            return sequence[((N - mod) - 1)]\n    return partial(force, sequence=_advance(f))", "docstring": "Return a driver function that can advance a \"bounced\" sequence\nof values.\n\n.. code-block:: none\n\nseq = [0, 1, 2, 3]\n\n# bounce(seq) => [0, 1, 2, 3, 3, 2, 1, 0, 0, 1, 2, ...]\n\nArgs:\nsequence (seq) : a sequence of values for the driver to bounce", "source": "codesearchnet"}
{"code": "def add_event(self, event):\n        \n        if not isinstance(event, event_pb2.Event):\n            raise TypeError(\"Expected an event_pb2.Event proto, \"\n                            \" but got %s\" % type(event))\n        self._async_writer.write(event.SerializeToString())", "docstring": "Adds an event to the event file.\n\nArgs:\nevent: An `Event` protocol buffer.", "source": "juraj-google-style"}
{"code": "def Match(self, artifact=None, os_name=None, cpe=None, label=None):\n    return [c for c in self.conditions if c.Match(artifact, os_name, cpe, label)]", "docstring": "Test if host data should trigger a check.\n\nArgs:\nartifact: An artifact name.\nos_name: An OS string.\ncpe: A CPE string.\nlabel: A label string.\n\nReturns:\nA list of conditions that match.", "source": "codesearchnet"}
{"code": "def encode_value(value):\n    \n    if value is None:\n        return document_pb2.Value(null_value=struct_pb2.NULL_VALUE)\n\n    \n    if isinstance(value, bool):\n        return document_pb2.Value(boolean_value=value)\n\n    if isinstance(value, six.integer_types):\n        return document_pb2.Value(integer_value=value)\n\n    if isinstance(value, float):\n        return document_pb2.Value(double_value=value)\n\n    if isinstance(value, DatetimeWithNanoseconds):\n        return document_pb2.Value(timestamp_value=value.timestamp_pb())\n\n    if isinstance(value, datetime.datetime):\n        return document_pb2.Value(timestamp_value=_datetime_to_pb_timestamp(value))\n\n    if isinstance(value, six.text_type):\n        return document_pb2.Value(string_value=value)\n\n    if isinstance(value, six.binary_type):\n        return document_pb2.Value(bytes_value=value)\n\n    \n    \n    document_path = getattr(value, \"_document_path\", None)\n    if document_path is not None:\n        return document_pb2.Value(reference_value=document_path)\n\n    if isinstance(value, GeoPoint):\n        return document_pb2.Value(geo_point_value=value.to_protobuf())\n\n    if isinstance(value, list):\n        value_list = [encode_value(element) for element in value]\n        value_pb = document_pb2.ArrayValue(values=value_list)\n        return document_pb2.Value(array_value=value_pb)\n\n    if isinstance(value, dict):\n        value_dict = encode_dict(value)\n        value_pb = document_pb2.MapValue(fields=value_dict)\n        return document_pb2.Value(map_value=value_pb)\n\n    raise TypeError(\n        \"Cannot convert to a Firestore Value\", value, \"Invalid type\", type(value)\n    )", "docstring": "Converts a native Python value into a Firestore protobuf ``Value``.\n\nArgs:\nvalue (Union[NoneType, bool, int, float, datetime.datetime, \\\nstr, bytes, dict, ~google.cloud.Firestore.GeoPoint]): A native\nPython value to convert to a protobuf field.\n\nReturns:\n~google.cloud.firestore_v1beta1.types.Value: A\nvalue encoded as a Firestore protobuf.\n\nRaises:\nTypeError: If the ``value`` is not one of the accepted types.", "source": "juraj-google-style"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls_segment_id = [2]\n    if token_ids_1 is None:\n        return len(token_ids_0 + sep) * [0] + cls_segment_id\n    return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet\nsequence pair mask has the following format:\n\n```\n0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n| first sequence    | second sequence |\n```\n\nIf `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).", "source": "github-repos"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(ProtocolVersion, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = utils.BytearrayStream(input_stream.read(self.length))\n    if self.is_tag_next(enums.Tags.PROTOCOL_VERSION_MAJOR, local_stream):\n        self._major = primitives.Integer(tag=enums.Tags.PROTOCOL_VERSION_MAJOR)\n        self._major.read(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Invalid encoding missing the major protocol version number.')\n    if self.is_tag_next(enums.Tags.PROTOCOL_VERSION_MINOR, local_stream):\n        self._minor = primitives.Integer(tag=enums.Tags.PROTOCOL_VERSION_MINOR)\n        self._minor.read(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Invalid encoding missing the minor protocol version number.')\n    self.is_oversized(local_stream)", "docstring": "Read the data encoding the ProtocolVersion struct and decode it into\nits constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if either the major or minor protocol versions\nare missing from the encoding.", "source": "codesearchnet"}
{"code": "def process(self, batch, device=None):\n    padded = self.pad(batch)\n    tensor = self.numericalize(padded, device=device)\n    return tensor", "docstring": "Process a list of examples to create a torch.Tensor.\n\nPad, numericalize, and postprocess a batch and create a tensor.\n\nArgs:\nbatch (list(object)): A list of object from a batch of examples.\nReturns:\ntorch.autograd.Variable: Processed object given the input\nand custom postprocessing Pipeline.", "source": "codesearchnet"}
{"code": "def split_window(self, fpath, vertical=False, size=None, bufopts=None):\n    command = ('split {}'.format(fpath) if fpath else 'new')\n    if vertical:\n        command = ('v' + command)\n    if size:\n        command = (str(size) + command)\n    self._vim.command(command)\n    if bufopts:\n        self.set_buffer_options(bufopts)", "docstring": "Open file in a new split window.\n\nArgs:\nfpath (str): Path of the file to open. If ``None``, a new empty\nsplit is created.\nvertical (bool): Whether to open a vertical split.\nsize (Optional[int]): The height (or width) to set for the new window.\nbufopts (Optional[dict]): Buffer-local options to set in the split window.\nSee :func:`.set_buffer_options`.", "source": "codesearchnet"}
{"code": "def simplify(self, assignments):\n    if self.right in assignments:\n        return self\n    else:\n        return self if self.right in assignments[self.left] else FALSE", "docstring": "Simplify this equality.\n\nThis will try to look up the values, and return FALSE if they're no longer\npossible. Also, when comparing two variables, it will compute the\nintersection, and return a disjunction of variable=value equalities instead.\n\nArgs:\nassignments: Variable assignments (dict mapping strings to sets of\nstrings). Used to determine whether this equality is still possible, and\nto compute intersections between two variables.\n\nReturns:\nA new BooleanTerm.", "source": "github-repos"}
{"code": "def input_elements(self, instruction_id, expected_inputs, abort_callback=None):\n    received = self._receiving_queue(instruction_id)\n    if received is None:\n        raise RuntimeError('Instruction cleaned up already %s' % instruction_id)\n    done_inputs = set()\n    abort_callback = abort_callback or (lambda: False)\n    log_interval_sec = 5 * 60\n    try:\n        start_time = time.time()\n        next_waiting_log_time = start_time + log_interval_sec\n        while len(done_inputs) < len(expected_inputs):\n            try:\n                element = received.get(timeout=1)\n            except queue.Empty:\n                if self._closed:\n                    raise RuntimeError('Channel closed prematurely.')\n                if abort_callback():\n                    return\n                if self._exception:\n                    raise self._exception from None\n                current_time = time.time()\n                if next_waiting_log_time <= current_time:\n                    _LOGGER.info('Detected input queue delay longer than %s seconds. Waiting to receive elements in input queue for instruction: %s for %.2f seconds.', log_interval_sec, instruction_id, current_time - start_time)\n                    next_waiting_log_time = current_time + log_interval_sec\n            else:\n                start_time = time.time()\n                next_waiting_log_time = start_time + log_interval_sec\n                if isinstance(element, beam_fn_api_pb2.Elements.Timers):\n                    if element.is_last:\n                        done_inputs.add((element.transform_id, element.timer_family_id))\n                    else:\n                        yield element\n                elif isinstance(element, beam_fn_api_pb2.Elements.Data):\n                    if element.is_last:\n                        done_inputs.add(element.transform_id)\n                    else:\n                        assert element.transform_id not in done_inputs\n                        yield element\n                else:\n                    raise ValueError('Unexpected input element type %s' % type(element))\n    finally:\n        self._clean_receiving_queue(instruction_id)", "docstring": "Generator to retrieve elements for an instruction_id\ninput_elements should be called only once for an instruction_id\n\nArgs:\ninstruction_id(str): instruction_id for which data is read\nexpected_inputs(collection): expected inputs, include both data and timer.", "source": "github-repos"}
{"code": "def get_all_organization_names(configuration=None, **kwargs):\n    organization = Organization(configuration=configuration)\n    organization['id'] = 'all organizations'\n    return organization._write_to_hdx('list', kwargs, 'id')", "docstring": "Get all organization names in HDX\n\nArgs:\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n**kwargs: See below\nsort (str): Sort the search results according to field name and sort-order. Allowed fields are ‘name’, ‘package_count’ and ‘title’. Defaults to 'name asc'.\norganizations (List[str]): List of names of the groups to return.\nall_fields (bool): Return group dictionaries instead of just names. Only core fields are returned - get some more using the include_* options. Defaults to False.\ninclude_extras (bool): If all_fields, include the group extra fields. Defaults to False.\ninclude_tags (bool): If all_fields, include the group tags. Defaults to False.\ninclude_groups: If all_fields, include the groups the groups are in. Defaults to False.\n\nReturns:\nList[str]: List of all organization names in HDX", "source": "codesearchnet"}
{"code": "def device_coordinates(self):\n    return self._device_coordinates", "docstring": "Describes the mapping from TPU devices to topology coordinates.\n\nReturns:\nA rank 3 int32 array with shape `[tasks, devices, axis]`.\n`tasks` is the number of tasks in the TPU cluster, `devices` is the number\nof TPU devices per task, and `axis` is the number of axes in the TPU\ncluster topology. Each entry gives the `axis`-th coordinate in the\ntopology of a task/device pair. TPU topologies are 4-dimensional, with\ndimensions `(x, y, z, core number)`.", "source": "github-repos"}
{"code": "def _psd_mask(x):\n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  eigenvalues, _ = tf.linalg.eigh(x)\n  return tf.cast(\n      tf.reduce_min(input_tensor=eigenvalues, axis=-1) >= 0, dtype=x.dtype)", "docstring": "Computes whether each square matrix in the input is positive semi-definite.\n\nArgs:\nx: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.\n\nReturns:\nmask: A floating-point `Tensor` of shape `[B1, ... Bn]`.  Each\nscalar is 1 if the corresponding matrix was PSD, otherwise 0.", "source": "juraj-google-style"}
{"code": "def wrap_layer_objects(layer, serialization_cache):\n    all_losses = layer._callable_losses[:]\n    for child_layer in utils.list_all_layers(layer):\n        all_losses.extend(child_layer._callable_losses)\n    keras_loss_cache = serialization_cache.setdefault('keras_losses', {})\n    wrapped_loss_functions = []\n    for loss_fn in all_losses:\n        if loss_fn in keras_loss_cache:\n            wrapped_loss_functions.append(keras_loss_cache[loss_fn])\n        else:\n            wrapped_loss = _wrap_unconditional_loss(loss_fn, len(keras_loss_cache))\n            keras_loss_cache[loss_fn] = wrapped_loss\n            wrapped_loss_functions.append(wrapped_loss)\n    wrapped_layer_losses = [keras_loss_cache[fn] for fn in layer._callable_losses[:]]\n    layer_metrics = data_structures.wrap_or_unwrap({m.name: m for m in layer._metrics})\n    return dict(variables=data_structures.wrap_or_unwrap(layer.variables), trainable_variables=data_structures.wrap_or_unwrap(layer.trainable_variables), non_trainable_variables=data_structures.wrap_or_unwrap(layer.non_trainable_variables), layers=data_structures.wrap_or_unwrap(utils.list_all_layers(layer)), metrics=data_structures.wrap_or_unwrap(layer.metrics), regularization_losses=data_structures.wrap_or_unwrap(wrapped_loss_functions), layer_regularization_losses=data_structures.wrap_or_unwrap(wrapped_layer_losses), layer_metrics=layer_metrics)", "docstring": "Returns extra trackable objects to attach to the serialized layer.\n\nArgs:\nlayer: Keras Layer object.\nserialization_cache: Dictionary shared between all objects during\nserialization.\n\nReturns:\nA dictionary containing all checkpointable objects from a\nSerializedAttributes object. See LayerAttributes and ModelAttributes for\nentire list of objects", "source": "github-repos"}
{"code": "def invite_by_email(self, email, user, organization, **kwargs):\n    try:\n        invitee = self.user_model.objects.get(email__iexact=email)\n    except self.user_model.DoesNotExist:\n        invitee = None\n    user_invitation = self.invitation_model.objects.create(invitee=invitee, invitee_identifier=email.lower(), invited_by=user, organization=organization)\n    self.send_invitation(user_invitation)\n    return user_invitation", "docstring": "Primary interface method by which one user invites another to join\n\nArgs:\nemail:\nrequest:\n**kwargs:\n\nReturns:\nan invitation instance\n\nRaises:\nMultipleObjectsReturned if multiple matching users are found", "source": "codesearchnet"}
{"code": "def parse_args(args=None):\n    \n    parser = argparse.ArgumentParser(description=\"Main script to run LIVVkit.\",\n                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n                                     fromfile_prefix_chars='@')\n\n    parser.add_argument('-o', '--out-dir',\n                        default=os.path.join(os.getcwd(), \"vv_\" + time.strftime(\"%Y-%m-%d\")),\n                        help='Location to output the LIVVkit webpages.'\n                        )\n\n    parser.add_argument('-v', '--verify',\n                        nargs=2,\n                        default=None,\n                        help=' '.join(['Specify the locations of the test and bench bundle to',\n                                       'compare (respectively).'\n                                       ])\n                        )\n\n    parser.add_argument('-V', '--validate',\n                        action='store',\n                        nargs='+',\n                        default=None,\n                        help=' '.join(['Specify the location of the configuration files for',\n                                       'validation tests.'\n                                       ])\n                        )\n\n    \n    parser.add_argument('-e', '--extension',\n                        action='store',\n                        nargs='+',\n                        default=None,\n                        dest='validate',\n                        metavar='EXTENSION',\n                        help=' '.join(['Specify the location of the configuration files for',\n                                       'LIVVkit extensions.'\n                                       ])\n                        )\n\n    parser.add_argument('-p', '--publish',\n                        action='store_true',\n                        help=' '.join(['Also produce a publication quality copy of the figure in',\n                                       'the output directory (eps, 600d pi).'\n                                       ])\n                        )\n\n    parser.add_argument('-s', '--serve',\n                        nargs='?', type=int, const=8000,\n                        help=' '.join(['Start a simple HTTP server for the output website specified',\n                                       'by OUT_DIR on port SERVE.'\n                                       ])\n                        )\n\n    parser.add_argument('--version',\n                        action='version',\n                        version='LIVVkit {}'.format(livvkit.__version__),\n                        help=\"Show LIVVkit's version number and exit\"\n                        )\n\n    return init(parser.parse_args(args))", "docstring": "Handles the parsing of options for LIVVkit's command line interface\n\nArgs:\nargs: The list of arguments, typically sys.argv[1:]", "source": "juraj-google-style"}
{"code": "def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, decoder_input_ids: np.ndarray | tf.Tensor | None=None, decoder_attention_mask: np.ndarray | tf.Tensor | None=None, decoder_position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, decoder_head_mask: np.ndarray | tf.Tensor | None=None, cross_attn_head_mask: np.ndarray | tf.Tensor | None=None, encoder_outputs: Optional[TFBaseModelOutput]=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]:\n    if labels is not None:\n        labels = tf.where(labels == self.config.pad_token_id, tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), labels)\n        use_cache = False\n        if decoder_input_ids is None and decoder_inputs_embeds is None:\n            decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)\n    outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n    lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)\n    lm_logits = self.bias_layer(lm_logits)\n    masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)\n    if not return_dict:\n        output = (lm_logits,) + outputs[1:]\n        return (masked_lm_loss,) + output if masked_lm_loss is not None else output\n    return TFSeq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)", "docstring": "labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\nLabels for computing the masked language modeling loss. Indices should either be in `[0, ...,\nconfig.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nReturns:", "source": "github-repos"}
{"code": "def _build_processor(cls, session: AppSession):\n    web_processor = cls._build_web_processor(session)\n    ftp_processor = cls._build_ftp_processor(session)\n    delegate_processor = session.factory.new('Processor')\n    delegate_processor.register('http', web_processor)\n    delegate_processor.register('https', web_processor)\n    delegate_processor.register('ftp', ftp_processor)", "docstring": "Create the Processor\n\nReturns:\nProcessor: An instance of :class:`.processor.BaseProcessor`.", "source": "codesearchnet"}
{"code": "def delete_existing_cname(env, zone_id, dns_name):\n    client = boto3.Session(profile_name=env).client('route53')\n    startrecord = None\n    newrecord_name = dns_name\n    startrecord = find_existing_record(env, zone_id, newrecord_name, check_key='Type', check_value='CNAME')\n    if startrecord:\n        LOG.info('Deleting old record: %s', newrecord_name)\n        _response = client.change_resource_record_sets(HostedZoneId=zone_id, ChangeBatch={'Changes': [{'Action': 'DELETE', 'ResourceRecordSet': startrecord}]})\n        LOG.debug('Response from deleting %s: %s', dns_name, _response)", "docstring": "Delete an existing CNAME record.\n\nThis is used when updating to multi-region for deleting old records. The\nrecord can not just be upserted since it changes types.\n\nArgs:\nenv (str): Deployment environment.\nzone_id (str): Route53 zone id.\ndns_name (str): FQDN of application's dns entry to add/update.", "source": "codesearchnet"}
{"code": "def _experiments_to_circuits(qobj):\n    \n    if qobj.experiments:\n        circuits = []\n        for x in qobj.experiments:\n            quantum_registers = [QuantumRegister(i[1], name=i[0])\n                                 for i in x.header.qreg_sizes]\n            classical_registers = [ClassicalRegister(i[1], name=i[0])\n                                   for i in x.header.creg_sizes]\n            circuit = QuantumCircuit(*quantum_registers,\n                                     *classical_registers,\n                                     name=x.header.name)\n            qreg_dict = {}\n            creg_dict = {}\n            for reg in quantum_registers:\n                qreg_dict[reg.name] = reg\n            for reg in classical_registers:\n                creg_dict[reg.name] = reg\n            for i in x.instructions:\n                instr_method = getattr(circuit, i.name)\n                qubits = []\n                try:\n                    for qubit in i.qubits:\n                        qubit_label = x.header.qubit_labels[qubit]\n                        qubits.append(\n                            qreg_dict[qubit_label[0]][qubit_label[1]])\n                except Exception:  \n                    pass\n                clbits = []\n                try:\n                    for clbit in i.memory:\n                        clbit_label = x.header.clbit_labels[clbit]\n                        clbits.append(\n                            creg_dict[clbit_label[0]][clbit_label[1]])\n                except Exception:  \n                    pass\n                params = []\n                try:\n                    params = i.params\n                except Exception:  \n                    pass\n                if i.name in ['snapshot']:\n                    instr_method(\n                        i.label,\n                        snapshot_type=i.snapshot_type,\n                        qubits=qubits,\n                        params=params)\n                elif i.name == 'initialize':\n                    instr_method(params, qubits)\n                else:\n                    instr_method(*params, *qubits, *clbits)\n            circuits.append(circuit)\n        return circuits\n    return None", "docstring": "Return a list of QuantumCircuit object(s) from a qobj\n\nArgs:\nqobj (Qobj): The Qobj object to convert to QuantumCircuits\nReturns:\nlist: A list of QuantumCircuit objects from the qobj", "source": "juraj-google-style"}
{"code": "def print_schema_results(results, level=0):\n    for error in results.errors:\n        print_level(logger.error, (_RED + '[X] %s'), level, error)", "docstring": "Print JSON Schema validation errors to stdout.\n\nArgs:\nresults: An instance of ObjectValidationResults.\nlevel: The level at which to print the results.", "source": "codesearchnet"}
{"code": "def _get_pmap_impl(f, devices, has_tpu):\n    if has_tpu:\n        output_is_list = [False]\n\n        def recorder(args, kwargs, res):\n            del args, kwargs\n            output_is_list[0] = isinstance(res, list)\n            return res\n        f = _record_result_type(recorder, f)\n\n    def tf_f(*tf_args):\n        \n        np_args = _tf_to_np(tf_args)\n        np_out = f(*np_args)\n        return np_out\n    if has_tpu:\n\n        @polymorphic_function.function(autograph=False)\n        def fn(inputs):\n            res = tpu.replicate(tf_f, inputs)\n            if res and isinstance(res[0], list) and (len(res[0]) == 1) and (not output_is_list[0]):\n                res = [x[0] for x in res]\n            return res\n        return fn\n    else:\n        jit_tf_f = polymorphic_function.function(tf_f, autograph=False)\n\n        @polymorphic_function.function(autograph=False)\n        def fn(all_per_device_args):\n            \n            results = []\n            for per_device_args, device in zip(all_per_device_args, devices):\n                with ops.device(device):\n                    results.append(jit_tf_f(*per_device_args))\n            return results\n        return fn", "docstring": "This is a helper function to return the pmap impl.\n\nArgs:\nf: a function that takes ndarrays and returns ndarrays.\ndevices: a list of strings; the device list.\nhas_tpu: boolean; whether `devices` contains TPU devices.\n\nReturns:\nA function that takes tensors and returns tensors.", "source": "github-repos"}
{"code": "def length_of_overlap(first_start, first_end, second_start, second_end):\n    if ((first_end <= second_start) or (first_start >= second_end)):\n        return 0.0\n    if (first_start < second_start):\n        if (first_end < second_end):\n            return abs((first_end - second_start))\n        else:\n            return abs((second_end - second_start))\n    if (first_start > second_start):\n        if (first_end > second_end):\n            return abs((second_end - first_start))\n        else:\n            return abs((first_end - first_start))", "docstring": "Find the length of the overlapping part of two segments.\n\nArgs:\nfirst_start (float): Start of the first segment.\nfirst_end (float): End of the first segment.\nsecond_start (float): Start of the second segment.\nsecond_end (float): End of the second segment.\n\nReturn:\nfloat: The amount of overlap or 0 if they don't overlap at all.", "source": "codesearchnet"}
{"code": "def from_string(cls, public_key):\n    public_key_data = _helpers.to_bytes(public_key)\n    if (_CERTIFICATE_MARKER in public_key_data):\n        cert = cryptography.x509.load_pem_x509_certificate(public_key_data, _BACKEND)\n        pubkey = cert.public_key()\n    else:\n        pubkey = serialization.load_pem_public_key(public_key_data, _BACKEND)\n    return cls(pubkey)", "docstring": "Construct an Verifier instance from a public key or public\ncertificate string.\n\nArgs:\npublic_key (Union[str, bytes]): The public key in PEM format or the\nx509 public key certificate.\n\nReturns:\nVerifier: The constructed verifier.\n\nRaises:\nValueError: If the public key can't be parsed.", "source": "codesearchnet"}
{"code": "def prepare_adiabatic_limit(slh, k=None):\n    if (k is None):\n        k = symbols('k', positive=True)\n    Ld = slh.L.dag()\n    LdL = (Ld * slh.L)[(0, 0)]\n    K = (((- LdL) / 2) + (I * slh.H)).expand().simplify_scalar()\n    N = slh.S.dag()\n    (B, A, Y) = K.series_expand(k, 0, 2)\n    (G, F) = Ld.series_expand(k, 0, 1)\n    return (Y, A, B, F, G, N)", "docstring": "Prepare the adiabatic elimination on an SLH object\n\nArgs:\nslh: The SLH object to take the limit for\nk: The scaling parameter $k \\rightarrow \\infty$. The default is a\npositive symbol 'k'\n\nReturns:\ntuple: The objects ``Y, A, B, F, G, N``\nnecessary to compute the limiting system.", "source": "codesearchnet"}
{"code": "def BuildDefaultValue(self, value_cls):\n    \n    try:\n      return value_cls()\n    except Exception as e:  \n      logging.exception(e)\n      raise DefaultValueError(\n          \"Can't create default for value %s: %s\" % (value_cls.__name__, e))", "docstring": "Renders default value of a given class.\n\nArgs:\nvalue_cls: Default value of this class will be rendered. This class has to\nbe (or to be a subclass of) a self.value_class (i.e. a class that this\nrenderer is capable of rendering).\n\nReturns:\nAn initialized default value.\n\nRaises:\nDefaultValueError: if something goes wrong.", "source": "juraj-google-style"}
{"code": "def GetMap(self, cache_info, data):\n    entries = collections.defaultdict(dict)\n    for line in json.loads(cache_info.read()):\n        key = line.get('Key', '').split('/')\n        value = line.get('Value', '')\n        if not value or not key:\n            continue\n        value = base64.b64decode(value)\n        name = str(key[-2])\n        entry_piece = key[-1]\n        entries[name][entry_piece] = value\n    for name, entry in list(entries.items()):\n        map_entry = self._ReadEntry(name, entry)\n        if map_entry is None:\n            self.log.warning('Could not create entry from line %r in cache, skipping', entry)\n            continue\n        if not data.Add(map_entry):\n            self.log.warning('Could not add entry %r read from line %r in cache', map_entry, entry)\n    return data", "docstring": "Returns a map from a cache.\n\nArgs:\ncache_info: file like object containing the cache.\ndata: a Map to populate.\nReturns:\nA child of Map containing the cache data.", "source": "github-repos"}
{"code": "def _anonymous_match(self, struct1, struct2, fu, s1_supercell=True,\n                         use_rms=False, break_on_match=False, single_match=False):\n        \n        if not isinstance(self._comparator, SpeciesComparator):\n            raise ValueError('Anonymous fitting currently requires SpeciesComparator')\n\n        \n        sp1 = struct1.composition.elements\n        sp2 = struct2.composition.elements\n        if len(sp1) != len(sp2):\n            return None\n\n        ratio = fu if s1_supercell else 1/fu\n        swapped = len(struct1) * ratio < len(struct2)\n\n        s1_comp = struct1.composition\n        s2_comp = struct2.composition\n        matches = []\n        for perm in itertools.permutations(sp2):\n            sp_mapping = dict(zip(sp1, perm))\n\n            \n            mapped_comp = Composition({sp_mapping[k]: v\n                                       for k, v in s1_comp.items()})\n            if (not self._subset) and (\n                    self._comparator.get_hash(mapped_comp) !=\n                    self._comparator.get_hash(s2_comp)):\n                continue\n\n            mapped_struct = struct1.copy()\n            mapped_struct.replace_species(sp_mapping)\n            if swapped:\n                m = self._strict_match(struct2, mapped_struct, fu,\n                                       (not s1_supercell), use_rms,\n                                       break_on_match)\n            else:\n                m = self._strict_match(mapped_struct, struct2, fu, s1_supercell,\n                                       use_rms, break_on_match)\n            if m:\n                matches.append((sp_mapping, m))\n                if single_match:\n                    break\n        return matches", "docstring": "Tries all permutations of matching struct1 to struct2.\nArgs:\nstruct1, struct2 (Structure): Preprocessed input structures\nReturns:\nList of (mapping, match)", "source": "juraj-google-style"}
{"code": "def release(self, connection: Connection, reuse: bool=True):\n    (yield from self._condition.acquire())\n    self.busy.remove(connection)\n    if reuse:\n        self.ready.add(connection)\n    self._condition.notify()\n    self._condition.release()", "docstring": "Unregister a connection.\n\nArgs:\nconnection: Connection instance returned from :meth:`acquire`.\nreuse: If True, the connection is made available for reuse.\n\nCoroutine.", "source": "codesearchnet"}
{"code": "def __init__(self, descriptor_db=None):\n    \n\n    self._internal_db = descriptor_database.DescriptorDatabase()\n    self._descriptor_db = descriptor_db\n    self._descriptors = {}\n    self._enum_descriptors = {}\n    self._service_descriptors = {}\n    self._file_descriptors = {}\n    self._toplevel_extensions = {}\n    \n    \n    self._file_desc_by_toplevel_extension = {}\n    \n    \n    \n    self._extensions_by_name = collections.defaultdict(dict)\n    self._extensions_by_number = collections.defaultdict(dict)", "docstring": "Initializes a Pool of proto buffs.\n\nThe descriptor_db argument to the constructor is provided to allow\nspecialized file descriptor proto lookup code to be triggered on demand. An\nexample would be an implementation which will read and compile a file\nspecified in a call to FindFileByName() and not require the call to Add()\nat all. Results from this database will be cached internally here as well.\n\nArgs:\ndescriptor_db: A secondary source of file descriptors.", "source": "juraj-google-style"}
{"code": "def resolve(self, method, path):\n    if ((method in self._literal) and (path in self._literal[method])):\n        return (self._literal[method][path], [], {})\n    else:\n        return self._resolve_non_literal_route(method, path)", "docstring": "Resolve a request to a route handler.\n\nArguments:\nmethod (str): HTTP method, e.g. GET, POST, etc. (type: str)\npath (str): Request path\n\nReturns:\ntuple or None: A tuple of three items:\n\n1. Route handler (callable)\n2. Positional arguments (list)\n3. Keyword arguments (dict)\n\n``None`` if no route matches the request.", "source": "codesearchnet"}
{"code": "def impad(img, shape, pad_val=0):\n    \n    if not isinstance(pad_val, (int, float)):\n        assert len(pad_val) == img.shape[-1]\n    if len(shape) < len(img.shape):\n        shape = shape + (img.shape[-1], )\n    assert len(shape) == len(img.shape)\n    for i in range(len(shape) - 1):\n        assert shape[i] >= img.shape[i]\n    pad = np.empty(shape, dtype=img.dtype)\n    pad[...] = pad_val\n    pad[:img.shape[0], :img.shape[1], ...] = img\n    return pad", "docstring": "Pad an image to a certain shape.\n\nArgs:\nimg (ndarray): Image to be padded.\nshape (tuple): Expected padding shape.\npad_val (number or sequence): Values to be filled in padding areas.\n\nReturns:\nndarray: The padded image.", "source": "juraj-google-style"}
{"code": "def allsame(iterable, eq=operator.eq):\n    iter_ = iter(iterable)\n    try:\n        first = next(iter_)\n    except StopIteration:\n        return True\n    return all((eq(first, item) for item in iter_))", "docstring": "Determine if all items in a sequence are the same\n\nArgs:\niterable (Iterable): items to determine if they are all the same\n\neq (Callable, optional): function to determine equality\n(default: operator.eq)\n\nExample:\n>>> allsame([1, 1, 1, 1])\nTrue\n>>> allsame([])\nTrue\n>>> allsame([0, 1])\nFalse\n>>> iterable = iter([0, 1, 1, 1])\n>>> next(iterable)\n>>> allsame(iterable)\nTrue\n>>> allsame(range(10))\nFalse\n>>> allsame(range(10), lambda a, b: True)\nTrue", "source": "codesearchnet"}
{"code": "def _get_target_dtype(self, from_dtype: Optional[_NpDType]) -> Optional[_NpDType]:", "docstring": "Validate and normalize the numpy dtype.\n\nArgs:\nfrom_dtype: DType of the array to cast\n\nReturns:\nto_dtype: DType of the array after casting", "source": "github-repos"}
{"code": "def wait_all(jobs, timeout=None):\n    \n    return Job._wait(jobs, timeout, concurrent.futures.ALL_COMPLETED)", "docstring": "Return when at all of the specified jobs have completed or timeout expires.\n\nArgs:\njobs: a Job or list of Jobs to wait on.\ntimeout: a timeout in seconds to wait for. None (the default) means no timeout.\nReturns:\nA list of the jobs that have now completed or None if there were no jobs.", "source": "juraj-google-style"}
{"code": "def __call__(self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]]=None, boxes: Optional[Union[List[List[int]], List[List[List[int]]]]]=None, word_labels: Optional[Union[List[int], List[List[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n\n    def _is_valid_text_input(t):\n        if isinstance(t, str):\n            return True\n        elif isinstance(t, (list, tuple)):\n            if len(t) == 0:\n                return True\n            elif isinstance(t[0], str):\n                return True\n            elif isinstance(t[0], (list, tuple)):\n                return len(t[0]) == 0 or isinstance(t[0][0], str)\n            else:\n                return False\n        else:\n            return False\n    if text_pair is not None:\n        if not _is_valid_text_input(text):\n            raise ValueError('text input must of type `str` (single example) or `List[str]` (batch of examples). ')\n        if not isinstance(text_pair, (list, tuple)):\n            raise ValueError('Words must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')\n    elif not isinstance(text, (list, tuple)):\n        raise ValueError('Words must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')\n    if text_pair is not None:\n        is_batched = isinstance(text, (list, tuple))\n    else:\n        is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))\n    words = text if text_pair is None else text_pair\n    if boxes is None:\n        raise ValueError('You must provide corresponding bounding boxes')\n    if is_batched:\n        if len(words) != len(boxes):\n            raise ValueError('You must provide words and boxes for an equal amount of examples')\n        for words_example, boxes_example in zip(words, boxes):\n            if len(words_example) != len(boxes_example):\n                raise ValueError('You must provide as many words as there are bounding boxes')\n    elif len(words) != len(boxes):\n        raise ValueError('You must provide as many words as there are bounding boxes')\n    if is_batched:\n        if text_pair is not None and len(text) != len(text_pair):\n            raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.')\n        batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text\n        is_pair = bool(text_pair is not None)\n        return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)\n    else:\n        return self.encode_plus(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)", "docstring": "Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of\nsequences with word-level normalized bounding boxes and optional labels.\n\nArgs:\ntext (`str`, `List[str]`, `List[List[str]]`):\nThe sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings\n(words of a single example or questions of a batch of examples) or a list of list of strings (batch of\nwords).\ntext_pair (`List[str]`, `List[List[str]]`):\nThe sequence or batch of sequences to be encoded. Each sequence should be a list of strings\n(pretokenized string).\nboxes (`List[List[int]]`, `List[List[List[int]]]`):\nWord-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.\nword_labels (`List[int]`, `List[List[int]]`, *optional*):\nWord-level integer labels (for token classification tasks such as FUNSD, CORD).", "source": "github-repos"}
{"code": "def explore_package(module_name):\n    packages = []\n    loader = pkgutil.get_loader(module_name)\n    for sub_module in pkgutil.walk_packages([os.path.dirname(loader.get_filename())], prefix=(module_name + '.')):\n        (_, sub_module_name, _) = sub_module\n        packages.append(sub_module_name)\n    return packages", "docstring": "returns all the packages in the module\n\nArgs:\nmodule_name: name of module\n\nReturns:", "source": "codesearchnet"}
{"code": "def run(self, timeout=(- 1)):\n\n    def target():\n        self.process = subprocess.Popen(self.cmd, stdout=self.stdout_dest, stderr=self.stderr_dest, shell=self.shell)\n        (stdout, stderr) = self.process.communicate()\n        if self.decode_out:\n            if stdout:\n                self.stdout = stdout.decode('utf-8')\n            if stderr:\n                self.stderr = stderr.decode('utf-8')\n    thread = threading.Thread(target=target)\n    thread.start()\n    if (timeout > 0):\n        thread.join(timeout)\n        if thread.is_alive():\n            self.process.terminate()\n            thread.join()\n            raise SubprocessError('Reached timeout after {t} seconds'.format(t=timeout))\n    else:\n        thread.join()\n    return (self.process.returncode, self.stdout, self.stderr)", "docstring": "Run the subprocess.\n\nArguments:\ntimeout (optional) If a positive real value, then timout after\nthe given number of seconds.\n\nRaises:\nSubprocessError If subprocess has not completed after \"timeout\"\nseconds.", "source": "codesearchnet"}
{"code": "def search(self, tags=None):\n        \n        if isinstance(tags, str):\n            tags = [tags]\n        return self.workbench.generate_sample_set(tags)", "docstring": "Wrapper for the Workbench search method\nArgs:\ntags: a single tag 'pcap' or a list of tags to search for ['bad','aptz13']\nReturns:\nA sample_set that contains the md5s for all matching samples", "source": "juraj-google-style"}
{"code": "def AsRegEx(self):\n    parts = self.__class__.REGEX_SPLIT_PATTERN.split(self._value)\n    result = u''.join((self._ReplaceRegExPart(p) for p in parts))\n    return rdf_standard.RegularExpression((u'(?i)\\\\A%s\\\\Z' % result))", "docstring": "Return the current glob as a simple regex.\n\nNote: No interpolation is performed.\n\nReturns:\nA RegularExpression() object.", "source": "codesearchnet"}
{"code": "def handle_or_else(self, orelse, test):\n    if isinstance(orelse[0], ast.If):\n        control_flow_node = self.visit(orelse[0])\n        control_flow_node.test.label = ('el' + control_flow_node.test.label)\n        test.connect(control_flow_node.test)\n        return control_flow_node.last_nodes\n    else:\n        else_connect_statements = self.stmt_star_handler(orelse, prev_node_to_avoid=self.nodes[(- 1)])\n        test.connect(else_connect_statements.first_statement)\n        return else_connect_statements.last_statements", "docstring": "Handle the orelse part of an if or try node.\n\nArgs:\norelse(list[Node])\ntest(Node)\n\nReturns:\nThe last nodes of the orelse branch.", "source": "codesearchnet"}
{"code": "def egress(self, envelope, http_headers, operation, binding_options):\n    \n    custom_headers = self._header_handler.GetHTTPHeaders()\n    http_headers.update(custom_headers)\n    return envelope, http_headers", "docstring": "Overriding the egress function to set our headers.\n\nArgs:\nenvelope: An Element with the SOAP request data.\nhttp_headers: A dict of the current http headers.\noperation: The SoapOperation instance.\nbinding_options: An options dict for the SOAP binding.\n\nReturns:\nA tuple of the envelope and headers.", "source": "juraj-google-style"}
{"code": "def update_asset(self, asset, asset_id, asset_name, asset_type):\n    if (not self.can_update()):\n        self._tcex.handle_error(910, [self.type])\n    if (asset == 'PHONE'):\n        return self.tc_requests.update_victim_phone_asset(self.unique_id, asset_id, asset_name)\n    if (asset == 'EMAIL'):\n        return self.tc_requests.update_victim_email_asset(self.unique_id, asset_id, asset_name, asset_type)\n    if (asset == 'NETWORK'):\n        return self.tc_requests.update_victim_network_asset(self.unique_id, asset_id, asset_name, asset_type)\n    if (asset == 'SOCIAL'):\n        return self.tc_requests.update_victim_social_asset(self.unique_id, asset_id, asset_name, asset_type)\n    if (asset == 'WEB'):\n        return self.tc_requests.update_victim_web_asset(self.unique_id, asset_id, asset_name)\n    self._tcex.handle_error(925, ['asset_type', 'update_asset', 'asset_type', 'asset_type', asset_type])\n    return None", "docstring": "Update a asset of a Victim\n\nValid asset_type:\n+ PHONE\n+ EMAIL\n+ NETWORK\n+ SOCIAL\n+ WEB\n\nArgs:\nasset:\nasset_name:\nasset_id:\nasset_type: PHONE, EMAIL, NETWORK, SOCIAL, or WEB\n\nReturns:", "source": "codesearchnet"}
{"code": "def _calculate_minimum_silent_period(baudrate):\n    _checkNumerical(baudrate, minvalue=1, description='baudrate')\n    BITTIMES_PER_CHARACTERTIME = 11\n    MINIMUM_SILENT_CHARACTERTIMES = 3.5\n    bittime = (1 / float(baudrate))\n    return ((bittime * BITTIMES_PER_CHARACTERTIME) * MINIMUM_SILENT_CHARACTERTIMES)", "docstring": "Calculate the silent period length to comply with the 3.5 character silence between messages.\n\nArgs:\nbaudrate (numerical): The baudrate for the serial port\n\nReturns:\nThe number of seconds (float) that should pass between each message on the bus.\n\nRaises:\nValueError, TypeError.", "source": "codesearchnet"}
{"code": "def wait_for(self, pattern, timeout=None):\n        \n        should_continue = True\n\n        if self.block:\n            raise TypeError(NON_BLOCKING_ERROR_MESSAGE)\n\n        def stop(signum, frame):  \n            nonlocal should_continue\n            if should_continue:\n                raise TimeoutError()\n\n        if timeout:\n            signal.signal(signal.SIGALRM, stop)\n            signal.alarm(timeout)\n\n        while should_continue:\n            output = self.poll_output() + self.poll_error()\n            filtered = [line for line in output if re.match(pattern, line)]\n            if filtered:\n                should_continue = False", "docstring": "Block until a pattern have been found in stdout and stderr\n\nArgs:\npattern(:class:`~re.Pattern`): The pattern to search\ntimeout(int): Maximum number of second to wait. If None, wait infinitely\n\nRaises:\nTimeoutError: When timeout is reach", "source": "juraj-google-style"}
{"code": "def try_to_create_directory(directory_path):\n    \n    logger = logging.getLogger(\"ray\")\n    directory_path = os.path.expanduser(directory_path)\n    if not os.path.exists(directory_path):\n        try:\n            os.makedirs(directory_path)\n        except OSError as e:\n            if e.errno != errno.EEXIST:\n                raise e\n            logger.warning(\n                \"Attempted to create '{}', but the directory already \"\n                \"exists.\".format(directory_path))\n        \n        \n    try:\n        os.chmod(directory_path, 0o0777)\n    except OSError as e:\n        \n        \n        \n        \n        \n        if e.errno in [errno.EACCES, errno.EPERM]:\n            pass\n        else:\n            raise", "docstring": "Attempt to create a directory that is globally readable/writable.\n\nArgs:\ndirectory_path: The path of the directory to create.", "source": "juraj-google-style"}
{"code": "def get_coordination_service_leader(self):\n    return '/job:' + self.get_job_name() + '/task:0'", "docstring": "Returns the location for coordination service.\n\nThe coordination service should be located on TPU worker0.\n\nReturns:\nA string indicate the location path.", "source": "github-repos"}
{"code": "def set_global(cls, user_agent=None, user_agent_config_yaml=None,\n                   user_agent_lookup=None, **kwargs):\n        \n        \n        cls.user_agent = cls._create(user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs)", "docstring": "Set global user agent string\n\nArgs:\nuser_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed.\nuser_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml.\nuser_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def GetVShadowStoreByPathSpec(self, path_spec):\n    \n    store_index = vshadow.VShadowPathSpecGetStoreIndex(path_spec)\n    if store_index is None:\n      return None\n\n    return self._vshadow_volume.get_store(store_index)", "docstring": "Retrieves a VSS store for a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\npyvshadow.store: a VSS store or None if not available.", "source": "juraj-google-style"}
{"code": "def connect_from(self, vertex, weight=1):\n        \n        for edge in self.edges_in:\n            if vertex == edge.vertex_out:\n                return edge\n        return Edge(vertex, self, weight)", "docstring": "Connect another vertex to this one.\n\nArgs:\nvertex (Vertex): vertex to connect from.\nweight (int): weight of the edge.\n\nReturns:\nEdge: the newly created edge.", "source": "juraj-google-style"}
{"code": "def find_furious_yaml(config_file=__file__):\n    \n    checked = set()\n    result = _find_furious_yaml(os.path.dirname(config_file), checked)\n    if not result:\n        result = _find_furious_yaml(os.getcwd(), checked)\n    return result", "docstring": "Traverse directory trees to find a furious.yaml file\n\nBegins with the location of this file then checks the\nworking directory if not found\n\nArgs:\nconfig_file: location of this file, override for\ntesting\nReturns:\nthe path of furious.yaml or None if not found", "source": "juraj-google-style"}
{"code": "def create_feature_map(features, feature_indices, output_dir):\n    feature_map = []\n    for (name, info) in feature_indices:\n        transform_name = features[name]['transform']\n        source_column = features[name]['source_column']\n        if (transform_name in [IDENTITY_TRANSFORM, SCALE_TRANSFORM]):\n            feature_map.append((info['index_start'], name))\n        elif (transform_name in [ONE_HOT_TRANSFORM, MULTI_HOT_TRANSFORM]):\n            (vocab, _) = read_vocab_file(os.path.join(output_dir, (VOCAB_ANALYSIS_FILE % source_column)))\n            for (i, word) in enumerate(vocab):\n                if (transform_name == ONE_HOT_TRANSFORM):\n                    feature_map.append(((info['index_start'] + i), ('%s=%s' % (source_column, word))))\n                elif (transform_name == MULTI_HOT_TRANSFORM):\n                    feature_map.append(((info['index_start'] + i), ('%s has \"%s\"' % (source_column, word))))\n        elif (transform_name == IMAGE_TRANSFORM):\n            for i in range(info['size']):\n                feature_map.append(((info['index_start'] + i), ('%s image feature %d' % (source_column, i))))\n    return feature_map", "docstring": "Returns feature_map about the transformed features.\n\nfeature_map includes information such as:\n1, cat1=0\n2, cat1=1\n3, numeric1\n...\nReturns:\nList in the from\n[(index, feature_description)]", "source": "codesearchnet"}
{"code": "def correlate(x1, x2, mode='valid'):\n    if any_symbolic_tensors((x1, x2)):\n        return Correlate(mode=mode).symbolic_call(x1, x2)\n    return backend.numpy.correlate(x1, x2, mode=mode)", "docstring": "Compute the cross-correlation of two 1-dimensional tensors.\n\nArgs:\nx1: First 1-dimensional input tensor of length M.\nx2: Second 1-dimensional input tensor of length N.\nmode: Either `valid`, `same` or `full`.\nBy default the mode is set to `valid`, which returns\nan output of length max(M, N) - min(M, N) + 1.\n`same` returns an output of length max(M, N).\n`full` mode returns the convolution at each point of\noverlap, with an output length of N+M-1\n\nReturns:\nOutput tensor, cross-correlation of `x1` and `x2`.", "source": "github-repos"}
{"code": "def is_value_type_valid_for_exact_conditions(self, value):\n    \n    \n    if isinstance(value, string_types) or isinstance(value, (numbers.Integral, float)):\n      return True\n\n    return False", "docstring": "Method to validate if the value is valid for exact match type evaluation.\n\nArgs:\nvalue: Value to validate.\n\nReturns:\nBoolean: True if value is a string, boolean, or number. Otherwise False.", "source": "juraj-google-style"}
{"code": "def merge(inputs, name=None):\n    if any((inp is None for inp in inputs)):\n        raise ValueError('At least one of the merge inputs is None: %s' % inputs)\n    with ops.name_scope(name, 'Merge', inputs) as name:\n        inputs = [ops.internal_convert_to_tensor_or_composite(inp, as_ref=True) for inp in inputs]\n        if all((isinstance(v, tensor_lib.Tensor) for v in inputs)):\n            if all((v.dtype._is_ref_dtype for v in inputs)):\n                return gen_control_flow_ops.ref_merge(inputs, name)\n            else:\n                return gen_control_flow_ops.merge(inputs, name)\n        else:\n            if all((isinstance(v, (indexed_slices.IndexedSlices, tensor_lib.Tensor)) for v in inputs)):\n                inputs = math_ops._as_indexed_slices_list(inputs, optimize=False)\n            for v in inputs:\n                if not isinstance(v, composite_tensor.CompositeTensor):\n                    raise TypeError('Type %s not supported' % type(v))\n            for v in inputs[1:]:\n                nest.assert_same_structure(inputs[0], v, expand_composites=True)\n            flat_inputs = [nest.flatten(v, expand_composites=True) for v in inputs]\n            merged_results = [gen_control_flow_ops.merge(component) for component in zip(*flat_inputs)]\n            flat_merged = [tensor for tensor, _ in merged_results]\n            chosen_index = merged_results[0][1]\n            merged_inputs = nest.pack_sequence_as(inputs[0], flat_merged, expand_composites=True)\n            return (merged_inputs, chosen_index)", "docstring": "Returns the value of an available element of `inputs`.\n\nThis op tests each of the tensors in `inputs` in turn to determine if any of\nthem is available. If it finds an available tensor, it returns it and its\nindex in `inputs`.\n\nIt is an error if more than one tensor in `inputs` is available. If no tensor\nin `inputs` is available, the returned tensor and index are not set.\n\nThis op handles both `Tensor`s and `IndexedSlices`. If inputs has a mix of\n`Tensor`s and `IndexedSlices`, all inputs are converted to IndexedSlices\nbefore merging.\n\nArgs:\ninputs: The input tensors, at most one of which is available.\nname: A name for this operation (optional).\n\nReturns:\nA tuple containing the chosen input tensor and its index in `inputs`.\n\nRaises:\nValueError: If any of the inputs is None, or inputs are IndexedSlices and\nsome but not all have a dense_shape property.", "source": "github-repos"}
{"code": "def hkl_transformation(transf, miller_index):\n    \n    \n    lcm = lambda a, b: a * b \n    reduced_transf = reduce(lcm, [int(1 / i) for i in itertools.chain(*transf) if i != 0]) * transf\n    reduced_transf = reduced_transf.astype(int)\n\n    \n    t_hkl = np.dot(reduced_transf, miller_index)\n    d = abs(reduce(gcd, t_hkl))\n    t_hkl = np.array([int(i / d) for i in t_hkl])\n\n    \n    if len([i for i in t_hkl if i < 0]) > 1:\n        t_hkl *= -1\n\n    return tuple(t_hkl)", "docstring": "Returns the Miller index from setting\nA to B using a transformation matrix\nArgs:\ntransf (3x3 array): The transformation matrix\nthat transforms a lattice of A to B\nmiller_index ([h, k, l]): Miller index to transform to setting B", "source": "juraj-google-style"}
{"code": "def MeshViewers(shape=(1, 1), titlebar='Mesh Viewers', keepalive=False, window_width=1280, window_height=960):\n    if (not test_for_opengl()):\n        return Dummy()\n    mv = MeshViewerLocal(shape=shape, titlebar=titlebar, uid=None, keepalive=keepalive, window_width=window_width, window_height=window_height)\n    return mv.get_subwindows()", "docstring": "Allows subplot-style inspection of primitives in multiple subwindows.\n\nArgs:\nshape: a tuple indicating the number of vertical and horizontal windows requested\n\nReturns: a list of lists of MeshViewer objects: one per window requested.", "source": "codesearchnet"}
{"code": "def get_propagator(name):\n    from .sgp4 import Sgp4\n    from .sgp4beta import Sgp4Beta\n    scope = locals().copy()\n    scope.update(globals())\n    if (name not in scope):\n        raise UnknownPropagatorError(name)\n    return scope[name]", "docstring": "Retrieve a named propagator\n\nArgs:\nname (str): Name of the desired propagator\nReturn:\nPropagator class", "source": "codesearchnet"}
{"code": "def set_examples(self, examples):\n    self.store('examples', examples)\n    if (len(examples) > 0):\n        self.store('are_sequence_examples', isinstance(examples[0], tf.train.SequenceExample))\n    return self", "docstring": "Sets the examples to be displayed in WIT.\n\nArgs:\nexamples: List of example protos.\n\nReturns:\nself, in order to enabled method chaining.", "source": "codesearchnet"}
{"code": "def load_words(self, words):\n    self._dictionary.update([word.lower() for word in words])\n    self._update_dictionary()", "docstring": "Load a list of words from which to generate a word frequency list\n\nArgs:\nwords (list): The list of words to be loaded", "source": "codesearchnet"}
{"code": "def init_pool_generator(gens, random_seed=None, id_queue=None):\n    global _SHARED_SEQUENCES\n    _SHARED_SEQUENCES = gens\n    worker_proc = multiprocessing.current_process()\n    worker_proc.name = 'Keras_worker_{}'.format(worker_proc.name)\n    if random_seed is not None:\n        np.random.seed(random_seed + worker_proc.ident)\n    if id_queue is not None:\n        id_queue.put(worker_proc.ident, block=True, timeout=0.1)", "docstring": "Initializer function for pool workers.\n\nArgs:\ngens: State which should be made available to worker processes.\nrandom_seed: An optional value with which to seed child processes.\nid_queue: A multiprocessing Queue of worker ids. This is used to indicate\nthat a worker process was created by Keras and can be terminated using\nthe cleanup_all_keras_forkpools utility.", "source": "github-repos"}
{"code": "def halted(self):\n    result = int(self._dll.JLINKARM_IsHalted())\n    if (result < 0):\n        raise errors.JLinkException(result)\n    return (result > 0)", "docstring": "Returns whether the CPU core was halted.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\n``True`` if the CPU core is halted, otherwise ``False``.\n\nRaises:\nJLinkException: on device errors.", "source": "codesearchnet"}
{"code": "def cli_cmd_to_string(args):\n    if isinstance(args, basestring):\n        return args\n    return ' '.join([pipes.quote(arg) for arg in args])", "docstring": "Converts a cmd arg list to string.\n\nArgs:\nargs: list of strings, the arguments of a command.\n\nReturns:\nString representation of the command.", "source": "codesearchnet"}
{"code": "def list_offers(access_token, subscription_id, location, publisher):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/providers/Microsoft.Compute/',\n                        'locations/', location,\n                        '/publishers/', publisher,\n                        '/artifacttypes/vmimage/offers?api-version=', COMP_API])\n    return do_get(endpoint, access_token)", "docstring": "List available VM image offers from a publisher.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nlocation (str): Azure data center location. E.g. westus.\npublisher (str): Publisher name, e.g. Canonical.\n\nReturns:\nHTTP response with JSON list of image offers.", "source": "juraj-google-style"}
{"code": "def write_index(self, overwrite: bool = False, mock: bool = False) -> None:\n        \n        write_if_allowed(self.index_filename, self.index_content(),\n                         overwrite=overwrite, mock=mock)", "docstring": "Writes the index file, if permitted.\n\nArgs:\noverwrite: allow existing files to be overwritten?\nmock: pretend to write, but don't", "source": "juraj-google-style"}
{"code": "def parse_radl(data):\n    if (data is None):\n        return None\n    elif os.path.isfile(data):\n        f = open(data)\n        data = ''.join(f.readlines())\n        f.close()\n    elif (data.strip() == ''):\n        return RADL()\n    data = (data + '\\n')\n    parser = RADLParser(lextab='radl')\n    return parser.parse(data)", "docstring": "Parse a RADL document.\n\nArgs:\n- data(str): filepath to a RADL content or a string with content.\n\nReturn: RADL object.", "source": "codesearchnet"}
{"code": "def _interactive_input_tensor_to_features_dict(feature_map, hparams):\n  \n  inputs = tf.convert_to_tensor(feature_map[\"inputs\"])\n  input_is_image = False if len(inputs.get_shape()) < 3 else True\n\n  x = inputs\n  if input_is_image:\n    x = tf.image.resize_images(x, [299, 299])\n    x = tf.reshape(x, [1, 299, 299, -1])\n    x = tf.to_int32(x)\n  else:\n    \n    num_samples = x[0]\n    length = x[2]\n    x = tf.slice(x, [3], tf.to_int32([length]))\n    x = tf.reshape(x, [1, -1, 1, 1])\n    \n    \n    x = tf.tile(x, tf.to_int32([num_samples, 1, 1, 1]))\n\n  p_hparams = hparams.problem_hparams\n  input_space_id = tf.constant(p_hparams.input_space_id)\n  target_space_id = tf.constant(p_hparams.target_space_id)\n\n  features = {}\n  features[\"input_space_id\"] = input_space_id\n  features[\"target_space_id\"] = target_space_id\n  features[\"decode_length\"] = (\n      IMAGE_DECODE_LENGTH if input_is_image else inputs[1])\n  features[\"inputs\"] = x\n  return features", "docstring": "Convert the interactive input format (see above) to a dictionary.\n\nArgs:\nfeature_map: dict with inputs.\nhparams: model hyperparameters\n\nReturns:\na features dictionary, as expected by the decoder.", "source": "juraj-google-style"}
{"code": "def __init__(self, name: str, func: Callable[..., T], args: Iterable[Expression], proxy: Optional[T]=None, _id: Optional[str]=None, requires_partition_by: partitionings.Partitioning=partitionings.Index(), preserves_partition_by: partitionings.Partitioning=partitionings.Singleton()):\n    if not _get_allow_non_parallel() and isinstance(requires_partition_by, partitionings.Singleton):\n        reason = requires_partition_by.reason or f'Encountered non-parallelizable form of {name!r}.'\n        raise NonParallelOperation(f\"{reason}\\nConsider using an allow_non_parallel_operations block if you're sure you want to do this. See https:\n    args = tuple(args)\n    if proxy is None:\n        proxy = func(*(arg.proxy() for arg in args))\n    super().__init__(name, proxy, _id)\n    self._func = func\n    self._args = args\n    self._requires_partition_by = requires_partition_by\n    self._preserves_partition_by = preserves_partition_by", "docstring": "Initialize a computed expression.\n\nArgs:\nname: The name of this expression.\nfunc: The function that will be used to compute the value of this\nexpression. Should accept arguments of the types returned when\nevaluating the `args` expressions.\nargs: The list of expressions that will be used to produce inputs to\n`func`.\nproxy: (Optional) a proxy object with same type as the objects that this\nComputedExpression will produce at execution time. If not provided, a\nproxy will be generated using `func` and the proxies of `args`.\n_id: (Optional) a string to uniquely identify this expression.\nrequires_partition_by: The required (common) partitioning of the args.\npreserves_partition_by: The level of partitioning preserved.", "source": "github-repos"}
{"code": "def __init__(self, matrix: np.ndarray) -> None:\n        \n        if matrix.shape != (2, 2) or not linalg.is_unitary(matrix):\n            raise ValueError('Not a 2x2 unitary matrix: {}'.format(matrix))\n        self._matrix = matrix", "docstring": "Initializes the 2-qubit matrix gate.\n\nArgs:\nmatrix: The matrix that defines the gate.", "source": "juraj-google-style"}
{"code": "def update( self, jump ):\n        \n        atom = jump.initial_site.atom\n        dr = jump.dr( self.cell_lengths )\n        \n        jump.final_site.occupation = atom.number\n        jump.final_site.atom = atom\n        jump.final_site.is_occupied = True\n        jump.initial_site.occupation = 0\n        jump.initial_site.atom = None\n        jump.initial_site.is_occupied = False\n        \n        atom.site = jump.final_site\n        atom.number_of_hops += 1\n        atom.dr += dr\n        atom.summed_dr2 += np.dot( dr, dr )", "docstring": "Update the lattice state by accepting a specific jump\n\nArgs:\njump (Jump): The jump that has been accepted.\n\nReturns:\nNone.", "source": "juraj-google-style"}
{"code": "def replace_output(self, output, tag=None):\n    if isinstance(output, pvalue.DoOutputsTuple):\n        self.replace_output(output[output._main_tag])\n    elif isinstance(output, pvalue.PValue):\n        self.outputs[tag] = output\n    elif isinstance(output, dict):\n        for output_tag, out in output.items():\n            self.outputs[output_tag] = out\n    else:\n        raise TypeError('Unexpected output type: %s' % output)\n    from apache_beam.transforms import external\n    if isinstance(self.transform, external.ExternalTransform):\n        self.transform.replace_named_outputs(self.named_outputs())", "docstring": "Replaces the output defined by the given tag with the given output.\n\nArgs:\noutput: replacement output\ntag: tag of the output to be replaced.", "source": "github-repos"}
{"code": "def _parse_trunk_groups(self, config):\n    values = TRUNK_GROUP_RE.findall(config)\n    return dict(trunk_groups=values)", "docstring": "_parse_trunk_groups scans the provided configuration block and\nextracts all the vlan trunk groups.  If no trunk groups are configured\nan empty List is returned as the vlaue.  The return dict is intended\nto be merged into the response dict.\n\nArgs:\nconfig (str): The vlan configuration block form the node's\nrunning configuration\n\nReturns:\ndict: resource dict attribute", "source": "codesearchnet"}
{"code": "def on_merge(self, to_be_merged, merge_result, context):\n    pass", "docstring": "Called when multiple windows are merged.\n\nArgs:\nto_be_merged: the set of windows to be merged\nmerge_result: the window into which the windows are being merged\ncontext: a context (e.g. a TriggerContext instance) for managing state\nand setting timers", "source": "github-repos"}
{"code": "def get_snapshot_by(self, volume_id_or_uri, field, value):\n        \n        uri = self.__build_volume_snapshot_uri(volume_id_or_uri)\n        return self._client.get_by(field, value, uri=uri)", "docstring": "Gets all snapshots that match the filter.\n\nThe search is case-insensitive.\n\nArgs:\nvolume_id_or_uri: Can be either the volume id or the volume uri.\nfield: Field name to filter.\nvalue: Value to filter.\n\nReturns:\nlist: Snapshots", "source": "juraj-google-style"}
{"code": "def cumprod(vari, axis=None):\n    if isinstance(vari, Poly):\n        if (np.prod(vari.shape) == 1):\n            return vari.copy()\n        if (axis is None):\n            vari = chaospy.poly.shaping.flatten(vari)\n            axis = 0\n        vari = chaospy.poly.shaping.rollaxis(vari, axis)\n        out = [vari[0]]\n        for poly in vari[1:]:\n            out.append((out[(- 1)] * poly))\n        return Poly(out, vari.dim, vari.shape, vari.dtype)\n    return np.cumprod(vari, axis)", "docstring": "Perform the cumulative product of a shapeable quantity over a given axis.\n\nArgs:\nvari (chaospy.poly.base.Poly, numpy.ndarray):\nInput data.\naxis (int):\nAxis over which the sum is taken. By default ``axis`` is None, and\nall elements are summed.\n\nReturns:\n(chaospy.poly.base.Poly):\nAn array shaped as ``vari`` but with the specified axis removed.\n\nExamples:\n>>> vari = cp.prange(4)\n>>> print(vari)\n[1, q0, q0^2, q0^3]\n>>> print(cp.cumprod(vari))\n[1, q0, q0^3, q0^6]", "source": "codesearchnet"}
{"code": "def log(x):\n    return math_ops.log(x)", "docstring": "Element-wise log.\n\nArgs:\nx: Tensor or variable.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def _layer_stack(mp,\n                 inputs,\n                 self_attention_bias,\n                 layers,\n                 hparams,\n                 encoder_output=None,\n                 encoder_decoder_attention_bias=None):\n  \n  layers = layers.strip(\",\").split(\",\")\n\n  \n  \n  self_attention_bias_3d = mp(tf.squeeze, self_attention_bias, 1)\n  if encoder_decoder_attention_bias is not None:\n    encoder_decoder_attention_bias_3d = mp(\n        tf.squeeze, encoder_decoder_attention_bias, 1)\n  relu_dropout_broadcast_dims = (\n      common_layers.comma_separated_string_to_integer_list(\n          getattr(hparams, \"relu_dropout_broadcast_dims\", \"\")))\n  mix_size = int(hparams.mix_fraction * hparams.hidden_size)\n  accumulator = inputs\n  x = inputs\n  for layer_num, layer_type in enumerate(layers):\n    with tf.variable_scope(\"%s_%d\" % (layer_type, layer_num)):\n      tf.logging.info(\"%s_%d\" % (layer_type, layer_num))\n      if layer_type == \"a\":\n        \n        accumulator = mp(tf.add, x, accumulator)\n        x = accumulator\n      elif layer_type == \"n\":\n        \n        x = mp(common_layers.apply_norm,\n               x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon)\n      elif layer_type == \"d\":\n        \n        x = mp(tf.nn.dropout, x, 1.0 - hparams.layer_prepostprocess_dropout)\n      elif layer_type == \"m\":\n        if mix_size > 0:\n          \n          def _split(t):\n            return tuple(tf.split(\n                t, [mix_size, hparams.hidden_size - mix_size], 2))\n          to_mix, to_keep = mp(_split, x)\n          mixed = expert_utils.all_reduce_ring(to_mix, mp)\n          mixed = mp(tf.multiply, mixed, mp.n ** -0.5)\n          x = mp(lambda a, b: tf.concat([a, b], 2), mixed, to_keep)\n      elif layer_type == \"att\":\n        \n        q = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False,\n               name=\"q_transform\")\n        x = mp(\n            common_attention.scaled_dot_product_attention_simple,\n            q, x, x, self_attention_bias_3d)\n        x = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False,\n               name=\"o_transform\")\n      elif layer_type == \"enc-att\":\n        \n        q = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False,\n               name=\"q_transform\")\n        assert encoder_output is not None\n        x = mp(\n            common_attention.scaled_dot_product_attention_simple,\n            q, encoder_output, encoder_output,\n            encoder_decoder_attention_bias_3d)\n        x = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False,\n               name=\"o_transform\")\n      elif layer_type == \"multihead-att\":\n        \n        x = mp(\n            common_attention.multihead_attention,\n            x,\n            None,\n            self_attention_bias,  \n            hparams.multihead_attention_key_channels or hparams.hidden_size,\n            hparams.multihead_attention_value_channels or hparams.hidden_size,\n            hparams.hidden_size,\n            hparams.multihead_attention_num_heads,\n            hparams.attention_dropout)\n      elif layer_type == \"enc-multihead-att\":\n        \n        x = mp(\n            common_attention.multihead_attention,\n            x,\n            encoder_output,\n            encoder_decoder_attention_bias,  \n            hparams.multihead_attention_key_channels or hparams.hidden_size,\n            hparams.multihead_attention_value_channels or hparams.hidden_size,\n            hparams.hidden_size,\n            hparams.multihead_attention_num_heads,\n            hparams.attention_dropout)\n      elif layer_type == \"ffn\":\n        x = mp(\n            common_layers.dense_relu_dense, x,\n            hparams.filter_size, hparams.hidden_size,\n            dropout=hparams.relu_dropout,\n            dropout_broadcast_dims=[relu_dropout_broadcast_dims] * mp.n)\n      else:\n        assert False, \"unknown sublayer %s\" % layer_type\n  return x", "docstring": "A stack of layers.\n\nArgs:\nmp: a Parallelism object\ninputs: a list of Tensors\nself_attention_bias: list of bias Tensor for self-attention\n(see common_attention.attention_bias())\nlayers: a string\nhparams: hyperparameters for model\nencoder_output: optional list of tensors\nencoder_decoder_attention_bias: optional list of tensors\n\nReturns:\ny: a list of Tensors", "source": "juraj-google-style"}
{"code": "def get_ams_access_token(accountname, accountkey):\n    \n    accountkey_encoded = urllib.parse.quote(accountkey, safe='')\n    body = \"grant_type=client_credentials&client_id=\" + accountname + \\\n\t\"&client_secret=\" + accountkey_encoded + \" &scope=urn%3aWindowsAzureMediaServices\"\n    return do_ams_auth(ams_auth_endpoint, body)", "docstring": "Get Media Services Authentication Token.\n\nArgs:\naccountname (str): Azure Media Services account name.\naccountkey (str): Azure Media Services Key.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def _ParseOriginalFilename(self, file_object, format_version):\n    \n    file_offset = file_object.tell()\n\n    if format_version == 1:\n      data_type_map = self._GetDataTypeMap(\n          'recycle_bin_metadata_utf16le_string')\n    else:\n      data_type_map = self._GetDataTypeMap(\n          'recycle_bin_metadata_utf16le_string_with_size')\n\n    try:\n      original_filename, _ = self._ReadStructureFromFileObject(\n          file_object, file_offset, data_type_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError(\n          'Unable to parse original filename with error: {0!s}'.format(\n              exception))\n\n    if format_version == 1:\n      return original_filename.rstrip('\\x00')\n\n    return original_filename.string.rstrip('\\x00')", "docstring": "Parses the original filename.\n\nArgs:\nfile_object (FileIO): file-like object.\nformat_version (int): format version.\n\nReturns:\nstr: filename or None on error.\n\nRaises:\nParseError: if the original filename cannot be read.", "source": "juraj-google-style"}
{"code": "def discard_event(event: events.Event, bot_id: str = None) -> bool:\n    \n    if event[\"type\"] in SKIP_EVENTS:\n        return True\n    elif bot_id and isinstance(event, events.Message):\n        if event.get(\"bot_id\") == bot_id:\n            LOG.debug(\"Ignoring event: %s\", event)\n            return True\n        elif \"message\" in event and event[\"message\"].get(\"bot_id\") == bot_id:\n            LOG.debug(\"Ignoring event: %s\", event)\n            return True\n    return False", "docstring": "Check if the incoming event needs to be discarded\n\nArgs:\nevent: Incoming :class:`slack.events.Event`\nbot_id: Id of connected bot\n\nReturns:\nboolean", "source": "juraj-google-style"}
{"code": "def joint_distribution(dataframe, rownames, colnames):\n    \n    cont_table = contingency_table(dataframe, rownames=rownames, colnames=colnames, margins=True)\n    total_observations = cont_table['All']['All']\n    return cont_table/total_observations", "docstring": "Joint Distribution Table\n- The Continguency Table normalized by the total number of observations\nArgs:\nrownames: the column name or list of columns names that make the keys of the rows\ncolnames: the column name or list of columns names that make the keys of the columns", "source": "juraj-google-style"}
{"code": "def from_service_account_file(cls, filename, *args, **kwargs):\n    credentials = service_account.Credentials.from_service_account_file(filename)\n    kwargs['credentials'] = credentials\n    return cls(*args, **kwargs)", "docstring": "Creates an instance of this client using the provided credentials\nfile.\n\nArgs:\nfilename (str): The path to the service account private key json\nfile.\nargs: Additional arguments to pass to the constructor.\nkwargs: Additional arguments to pass to the constructor.\n\nReturns:\ndialogflow_v2.SessionEntityTypesClient: The constructed client.", "source": "codesearchnet"}
{"code": "def SelectArtifacts(cls,\n                      os_name=None,\n                      cpe=None,\n                      labels=None,\n                      restrict_checks=None):\n    \n    results = set()\n    for condition in cls.Conditions(None, os_name, cpe, labels):\n      trigger = condition[1:]\n      for chk in itervalues(cls.checks):\n        if restrict_checks and chk.check_id not in restrict_checks:\n          continue\n        results.update(chk.triggers.Artifacts(*trigger))\n    return results", "docstring": "Takes targeting info, identifies artifacts to fetch.\n\nArgs:\nos_name: 0+ OS names.\ncpe: 0+ CPE identifiers.\nlabels: 0+ GRR labels.\nrestrict_checks: A list of check ids whose artifacts should be fetched.\n\nReturns:\nthe artifacts that should be collected.", "source": "juraj-google-style"}
{"code": "def _plot(self, axes_list, data = None):\n        \n\n        plot_type = self.settings['plot_style']\n        if data is None:\n            data = self.data\n\n        if data is not None and data is not {}:\n            if plot_type in ('main', 'two'):\n                if not data['random data'] is None:\n                    axes_list[0].plot(data['random data'])\n                    axes_list[0].hold(False)\n            if plot_type in ('aux', 'two', '2D'):\n                if not data['random data'] is None:\n                    axes_list[1].plot(data['random data'])\n                    axes_list[1].hold(False)\n            if plot_type == '2D':\n                if 'image data' in data and not data['image data'] is None:\n                    fig = axes_list[0].get_figure()\n                    implot = axes_list[0].imshow(data['image data'], cmap='pink', interpolation=\"nearest\", extent=[-1,1,1,-1])\n                    fig.colorbar(implot, label='kcounts/sec')", "docstring": "plots the data only the axes objects that are provided in axes_list\nArgs:\naxes_list: a list of axes objects, this should be implemented in each subscript\ndata: data to be plotted if empty take self.data\nReturns: None", "source": "juraj-google-style"}
{"code": "def save(self, fname, mode=None, validate=True, encoding='utf-8', wd=False, inline=False, relative=False, pack=False):\n    self._closed()\n    if (mode is None):\n        mode = 'abs'\n        if pack:\n            mode = 'pack'\n        elif wd:\n            mode = 'wd'\n        elif relative:\n            mode = 'rel'\n        msg = \"Using deprecated save method. Please save the workflow with: wf.save('{}', mode='{}'). Redirecting to new save method.\".format(fname, mode)\n        warnings.warn(msg, DeprecationWarning)\n    modes = ('rel', 'abs', 'wd', 'inline', 'pack')\n    if (mode not in modes):\n        msg = 'Illegal mode \"{}\". Choose one of ({}).'.format(mode, ','.join(modes))\n        raise ValueError(msg)\n    if validate:\n        self.validate()\n    dirname = os.path.dirname(os.path.abspath(fname))\n    if (not os.path.exists(dirname)):\n        os.makedirs(dirname)\n    if (mode == 'inline'):\n        msg = \"Inline saving is deprecated. Please save the workflow using mode='pack'. Setting mode to pack.\"\n        warnings.warn(msg, DeprecationWarning)\n        mode = 'pack'\n    if (mode == 'rel'):\n        relpath = dirname\n        save_yaml(fname=fname, wf=self, pack=False, relpath=relpath, wd=False)\n    if (mode == 'abs'):\n        save_yaml(fname=fname, wf=self, pack=False, relpath=None, wd=False)\n    if (mode == 'pack'):\n        self._pack(fname, encoding)\n    if (mode == 'wd'):\n        if (self.get_working_dir() is None):\n            raise ValueError('Working directory not set.')\n        else:\n            bn = os.path.basename(fname)\n            wd_file = os.path.join(self.working_dir, bn)\n            save_yaml(fname=wd_file, wf=self, pack=False, relpath=None, wd=True)\n            try:\n                shutil.copy2(wd_file, fname)\n            except shutil.Error:\n                pass", "docstring": "Save the workflow to file.\n\nSave the workflow to a CWL file that can be run with a CWL runner.\n\nArgs:\nfname (str): file to save the workflow to.\nmode (str): one of  (rel, abs, wd, inline, pack)\nencoding (str): file encoding to use (default: ``utf-8``).", "source": "codesearchnet"}
{"code": "def correct_tables(self, generation: str) -> str:\n    for l in generation.split('\\n'):\n        if l.count('\\\\begin{tabular}') > 15 or l.count('\\\\multicolumn') > 60 or l.count('&') > 400:\n            generation = generation.replace(l, '')\n    generation = generation.replace('\\\\begin{table} \\\\begin{tabular}', '\\\\begin{table}\\n\\\\begin{tabular}')\n    generation = generation.replace('\\\\end{tabular} \\\\end{table}', '\\\\end{tabular}\\n\\\\end{table}')\n    generation = generation.replace('\\\\end{table} Tab', '\\\\end{table}\\nTab')\n    generation = re.sub('(^.+)\\\\\\\\begin{tab', '\\\\1\\\\n\\\\\\\\begin{tab', generation, flags=re.M)\n    generation = generation.replace('\\\\begin{tabular}{l l}  & \\\\\\\\ \\\\end{tabular}', '')\n    generation = generation.replace('\\\\begin{tabular}{}\\n\\n\\\\end{tabular}', '')\n    return generation", "docstring": "Takes a generated string and fixes tables/tabulars to make them match the markdown format needed.\n\nArgs:\ngeneration (str): The generated text to be postprocessed.\n\nReturns:\nstr: The postprocessed text.\n\nExample:\n\n```python\ncorrect_tables(\"\\begin{table} \\begin{tabular}{l l} & \\ \\end{tabular} \\end{table}\")\n\"\\begin{table}\n\\begin{tabular}{l l} & \\ \\end{tabular}\n\\end{table}\"\n```", "source": "github-repos"}
{"code": "def safejoin(base, *elements):\n    \n    \n    base = os.path.abspath(base)\n    path = os.path.join(base, *elements)\n    path = os.path.normpath(path)\n    if not path_is_inside(path, base):\n        raise ValueError('target path is outside of the base path')\n    return path", "docstring": "Safely joins paths together.\n\nThe result will always be a subdirectory under `base`, otherwise ValueError\nis raised.\n\nArgs:\nbase (str): base path\nelements (list of strings): path elements to join to base\n\nReturns:\nelements joined to base", "source": "juraj-google-style"}
{"code": "def get_channel(self, **kwargs):\n        \n        if self.compatibility_mode:\n            \n            \n            if hasattr(self.chef_module, 'get_channel'):\n                config.LOGGER.info(\"Calling get_channel... \")\n                \n                channel = self.chef_module.get_channel(**kwargs)\n            \n            if hasattr(self.chef_module, 'create_channel'):\n                config.LOGGER.info(\"Calling create_channel... \")\n                \n                channel = self.chef_module.create_channel(**kwargs)\n            else:\n                channel = None  \n            return channel\n\n        elif hasattr(self, 'channel_info'):\n            \n            \n            channel = ChannelNode(\n                source_domain=self.channel_info['CHANNEL_SOURCE_DOMAIN'],\n                source_id=self.channel_info['CHANNEL_SOURCE_ID'],\n                title=self.channel_info['CHANNEL_TITLE'],\n                thumbnail=self.channel_info.get('CHANNEL_THUMBNAIL'),\n                language=self.channel_info.get('CHANNEL_LANGUAGE'),\n                description=self.channel_info.get('CHANNEL_DESCRIPTION'),\n            )\n            return channel\n\n        else:\n            raise NotImplementedError('BaseChef must overrride the get_channel method')", "docstring": "Call chef script's get_channel method in compatibility mode\n...or...\nCreate a `ChannelNode` from the Chef's `channel_info` class attribute.\n\nArgs:\nkwargs (dict): additional keyword arguments that `uploadchannel` received\nReturns: channel created from get_channel method or None", "source": "juraj-google-style"}
{"code": "def assert_no_current_path(self, path, **kwargs):\n        \n\n        query = CurrentPathQuery(path, **kwargs)\n\n        @self.document.synchronize\n        def assert_no_current_path():\n            if query.resolves_for(self):\n                raise ExpectationNotMet(query.negative_failure_message)\n\n        assert_no_current_path()\n\n        return True", "docstring": "Asserts that the page doesn't have the given path.\n\nArgs:\npath (str | RegexObject): The string or regex that the current \"path\" should match.\n**kwargs: Arbitrary keyword arguments for :class:`CurrentPathQuery`.\n\nReturns:\nTrue\n\nRaises:\nExpectationNotMet: If the assertion hasn't succeeded during the wait time.", "source": "juraj-google-style"}
{"code": "def setup_client(client_id: int, test_name: str, env: Mapping[str, str], num_local_devices: int):\n    redirect_output(f'test-{test_name}-process-{client_id}.log')\n    for var, val in env.items():\n        os.environ[var] = val\n    setup_local_devices(num_local_devices)\n    accelerator_util.initialize_accelerator_system()", "docstring": "Set up a DTensor client for use in multi-client tests.\n\nArgs:\nclient_id: the index of the client.\ntest_name: the name of the test under which this client is running, used To\nidentify the log file artifact containing the test output.\nenv: a dictionary of environment variables to update.\nnum_local_devices: number of local devices to set up.", "source": "github-repos"}
{"code": "def make_pose(translation, rotation):\n    pose = np.zeros((4, 4))\n    pose[(:3, :3)] = rotation\n    pose[(:3, 3)] = translation\n    pose[(3, 3)] = 1.0\n    return pose", "docstring": "Makes a homogenous pose matrix from a translation vector and a rotation matrix.\n\nArgs:\ntranslation: a 3-dim iterable\nrotation: a 3x3 matrix\n\nReturns:\npose: a 4x4 homogenous matrix", "source": "codesearchnet"}
{"code": "def set_nodes_vlan(site, nodes, interface, vlan_id):\n    \n    def _to_network_address(host):\n        \n        splitted = host.split('.')\n        splitted[0] = splitted[0] + \"-\" + interface\n        return \".\".join(splitted)\n\n    gk = get_api_client()\n    network_addresses = [_to_network_address(n) for n in nodes]\n    gk.sites[site].vlans[str(vlan_id)].submit({\"nodes\": network_addresses})", "docstring": "Set the interface of the nodes in a specific vlan.\n\nIt is assumed that the same interface name is available on the node.\n\nArgs:\nsite(str): site to consider\nnodes(list): nodes to consider\ninterface(str): the network interface to put in the vlan\nvlan_id(str): the id of the vlan", "source": "juraj-google-style"}
{"code": "def _ReadParserPresetsFromFile(self):\n    self._presets_file = os.path.join(self._data_location, self._PRESETS_FILE_NAME)\n    if (not os.path.isfile(self._presets_file)):\n        raise errors.BadConfigOption('No such parser presets file: {0:s}.'.format(self._presets_file))\n    try:\n        parsers_manager.ParsersManager.ReadPresetsFromFile(self._presets_file)\n    except errors.MalformedPresetError as exception:\n        raise errors.BadConfigOption('Unable to read presets from file with error: {0!s}'.format(exception))", "docstring": "Reads the parser presets from the presets.yaml file.\n\nRaises:\nBadConfigOption: if the parser presets file cannot be read.", "source": "codesearchnet"}
{"code": "def add_untagged(self, *responses: 'Response') -> None:\n    for resp in responses:\n        try:\n            merge_key = resp.merge_key\n        except TypeError:\n            self._untagged.append(resp)\n        else:\n            key = (type(resp), merge_key)\n            try:\n                untagged_idx = self._mergeable[key]\n            except KeyError:\n                untagged_idx = len(self._untagged)\n                self._mergeable[key] = untagged_idx\n                self._untagged.append(resp)\n            else:\n                merged = self._untagged[untagged_idx].merge(resp)\n                self._untagged[untagged_idx] = merged\n    self._raw = None", "docstring": "Add an untagged response. These responses are shown before the\nparent response.\n\nArgs:\nresponses: The untagged responses to add.", "source": "codesearchnet"}
{"code": "def _TestGetItem(self, struct, slice_spec, expected):\n    tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True)\n    tensor_slice_spec2 = _make_tensor_slice_spec(slice_spec, False)\n    value1 = struct.__getitem__(slice_spec)\n    value2 = struct.__getitem__(tensor_slice_spec1)\n    value3 = struct.__getitem__(tensor_slice_spec2)\n    self.assertAllEqual(value1, expected, 'slice_spec=%s' % (slice_spec,))\n    self.assertAllEqual(value2, expected, 'slice_spec=%s' % (slice_spec,))\n    self.assertAllEqual(value3, expected, 'slice_spec=%s' % (slice_spec,))", "docstring": "Helper function for testing StructuredTensor.__getitem__.\n\nChecks that calling `struct.__getitem__(slice_spec) returns the expected\nvalue.  Checks three different configurations for each slice spec:\n\n* Call __getitem__ with the slice spec as-is (with int values)\n* Call __getitem__ with int values in the slice spec wrapped in\n`tf.constant()`.\n* Call __getitem__ with int values in the slice spec wrapped in\n`tf.compat.v1.placeholder()` (so value is not known at graph\nconstruction time).\n\nArgs:\nstruct: The StructuredTensor to test.\nslice_spec: The slice spec.\nexpected: The expected value of struct.__getitem__(slice_spec), as a\npython list.", "source": "github-repos"}
{"code": "def convert_sqla_type_for_dialect(coltype: TypeEngine, dialect: Dialect, strip_collation: bool=True, convert_mssql_timestamp: bool=True, expand_for_scrubbing: bool=False) -> TypeEngine:\n    assert (coltype is not None)\n    to_mysql = (dialect.name == SqlaDialectName.MYSQL)\n    to_mssql = (dialect.name == SqlaDialectName.MSSQL)\n    typeclass = type(coltype)\n    if isinstance(coltype, sqltypes.Enum):\n        return sqltypes.String(length=coltype.length)\n    if isinstance(coltype, sqltypes.UnicodeText):\n        return sqltypes.UnicodeText()\n    if isinstance(coltype, sqltypes.Text):\n        return sqltypes.Text()\n    if isinstance(coltype, sqltypes.Unicode):\n        if (((coltype.length is None) and to_mysql) or expand_for_scrubbing):\n            return sqltypes.UnicodeText()\n    if isinstance(coltype, sqltypes.String):\n        if (((coltype.length is None) and to_mysql) or expand_for_scrubbing):\n            return sqltypes.Text()\n        if strip_collation:\n            return remove_collation(coltype)\n        return coltype\n    if ((typeclass == mssql.base.BIT) and to_mysql):\n        return mysql.base.BIT()\n    is_mssql_timestamp = isinstance(coltype, MSSQL_TIMESTAMP)\n    if (is_mssql_timestamp and to_mssql and convert_mssql_timestamp):\n        return mssql.base.BINARY(8)\n    return coltype", "docstring": "Converts an SQLAlchemy column type from one SQL dialect to another.\n\nArgs:\ncoltype: SQLAlchemy column type in the source dialect\n\ndialect: destination :class:`Dialect`\n\nstrip_collation: remove any ``COLLATION`` information?\n\nconvert_mssql_timestamp:\nsince you cannot write to a SQL Server ``TIMESTAMP`` field, setting\nthis option to ``True`` (the default) converts such types to\nsomething equivalent but writable.\n\nexpand_for_scrubbing:\nThe purpose of expand_for_scrubbing is that, for example, a\n``VARCHAR(200)`` field containing one or more instances of\n``Jones``, where ``Jones`` is to be replaced with ``[XXXXXX]``,\nwill get longer (by an unpredictable amount). So, better to expand\nto unlimited length.\n\nReturns:\nan SQLAlchemy column type instance, in the destination dialect", "source": "codesearchnet"}
{"code": "def EscapeWildcards(string):\n  \n  precondition.AssertType(string, Text)\n  return string.replace(\"%\", r\"\\%\").replace(\"_\", r\"\\_\")", "docstring": "Escapes wildcard characters for strings intended to be used with `LIKE`.\n\nDatabases don't automatically escape wildcard characters ('%', '_'), so any\nnon-literal string that is passed to `LIKE` and is expected to match literally\nhas to be manually escaped.\n\nArgs:\nstring: A string to escape.\n\nReturns:\nAn escaped string.", "source": "juraj-google-style"}
{"code": "def to_json(value: Any, **kwargs) -> Any:\n    if isinstance(value, Symbolic):\n        return value.sym_jsonify(**kwargs)\n    return utils.to_json(value, **kwargs)", "docstring": "Serializes a (maybe) symbolic value into a plain Python object.\n\nExample::\n\n@pg.members([\n('x', pg.typing.Any())\n])\nclass A(pg.Object):\npass\n\na1 = A(1)\njson = a1.to_json()\na2 = pg.from_json(json)\nassert pg.eq(a1, a2)\n\nArgs:\nvalue: value to serialize. Applicable value types are:\n\n* Builtin python types: None, bool, int, float, string;\n* JSONConvertible types;\n* List types;\n* Tuple types;\n* Dict types.\n\n**kwargs: Keyword arguments to pass to value.to_json if value is\nJSONConvertible.\n\nReturns:\nJSON value.", "source": "github-repos"}
{"code": "def build_masters(filename, master_dir, designspace_instance_dir=None, designspace_path=None, family_name=None, propagate_anchors=True, minimize_glyphs_diffs=False, normalize_ufos=False, create_background_layers=False, generate_GDEF=True, store_editor_state=True):\n    font = GSFont(filename)\n    if (not os.path.isdir(master_dir)):\n        os.mkdir(master_dir)\n    if (designspace_instance_dir is None):\n        instance_dir = None\n    else:\n        instance_dir = os.path.relpath(designspace_instance_dir, master_dir)\n    designspace = to_designspace(font, family_name=family_name, propagate_anchors=propagate_anchors, instance_dir=instance_dir, minimize_glyphs_diffs=minimize_glyphs_diffs, generate_GDEF=generate_GDEF, store_editor_state=store_editor_state)\n    ufos = {}\n    for source in designspace.sources:\n        if (source.filename in ufos):\n            assert (source.font is ufos[source.filename])\n            continue\n        if create_background_layers:\n            ufo_create_background_layer_for_all_glyphs(source.font)\n        ufo_path = os.path.join(master_dir, source.filename)\n        clean_ufo(ufo_path)\n        source.font.save(ufo_path)\n        if normalize_ufos:\n            import ufonormalizer\n            ufonormalizer.normalizeUFO(ufo_path, writeModTimes=False)\n        ufos[source.filename] = source.font\n    if (not designspace_path):\n        designspace_path = os.path.join(master_dir, designspace.filename)\n    designspace.write(designspace_path)\n    return Masters(ufos, designspace_path)", "docstring": "Write and return UFOs from the masters and the designspace defined in a\n.glyphs file.\n\nArgs:\nmaster_dir: Directory where masters are written.\ndesignspace_instance_dir: If provided, a designspace document will be\nwritten alongside the master UFOs though no instances will be built.\nfamily_name: If provided, the master UFOs will be given this name and\nonly instances with this name will be included in the designspace.\n\nReturns:\nA named tuple of master UFOs (`ufos`) and the path to the designspace\nfile (`designspace_path`).", "source": "codesearchnet"}
{"code": "def append(self, item):\n    self._items.append(item)", "docstring": "Append an item to the Menu.\n\nArgs:\nitem: (MenuItem) the item to be appended.", "source": "github-repos"}
{"code": "def uni_to_beta(text):\n    \n    u = _UNICODE_MAP\n\n    transform = []\n\n    for ch in text:\n        try:\n            conv = u[ch]\n        except KeyError:\n            conv = ch\n\n        transform.append(conv)\n\n    converted = ''.join(transform)\n    return converted", "docstring": "Convert unicode text to a betacode equivalent.\n\nThis method can handle tónos or oxeîa characters in the input.\n\nArgs:\ntext: The text to convert to betacode. This text does not have to all be\nGreek polytonic text, and only Greek characters will be converted. Note\nthat in this case, you cannot convert to beta and then back to unicode.\n\nReturns:\nThe betacode equivalent of the inputted text where applicable.", "source": "juraj-google-style"}
{"code": "def __init__(self, cell):\n    self._cell = cell", "docstring": "Creates a new BoolGaugeCell.\n\nArgs:\ncell: A c pointer of TFE_MonitoringBoolGaugeCell.", "source": "github-repos"}
{"code": "def _open_script_interface(self, conn_id, callback):\n        \n\n        try:\n            handle = self._find_handle(conn_id)\n            services = self._connections[handle]['services']\n        except (ValueError, KeyError):\n            callback(conn_id, self.id, False, 'Connection closed unexpectedly before we could open the script interface')\n            return\n\n        success = TileBusHighSpeedCharacteristic in services[TileBusService]['characteristics']\n        reason = None\n        if not success:\n            reason = 'Could not find high speed streaming characteristic'\n\n        callback(conn_id, self.id, success, reason)", "docstring": "Enable script streaming interface for this IOTile device\n\nArgs:\nconn_id (int): the unique identifier for the connection\ncallback (callback): Callback to be called when this command finishes\ncallback(conn_id, adapter_id, success, failure_reason)", "source": "juraj-google-style"}
{"code": "def conv2d(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:\n    scale = [1.0] * self.out_channel_size\n    offset = [0.5] * self.out_channel_size\n    mean, variance = (scale, offset)\n    out = nn_ops.conv2d(input_tensor, self.filters, strides=strides, dilations=dilations, padding=padding, data_format='NHWC', name='sample/conv')\n    if bias_fn is not None:\n        out = nn_ops.bias_add(out, self.bias)\n    if has_batch_norm:\n        out, _, _, _, _, _ = nn_ops.fused_batch_norm_v3(out, scale, offset, mean, variance, is_training=False)\n    if activation_fn is not None:\n        out = activation_fn(out)\n    return {'output': out}", "docstring": "Performs a 2D convolution operation.\n\nArgs:\ninput_tensor: Input tensor to perform convolution on.\n\nReturns:\nA map of: output key -> output result.", "source": "github-repos"}
{"code": "def find_indices(lst, element):\n    \n    result = []\n    offset = -1\n    while True:\n        try:\n            offset = lst.index(element, offset+1)\n        except ValueError:\n            return result\n        result.append(offset)", "docstring": "Returns the indices for all occurrences of 'element' in 'lst'.\n\nArgs:\nlst (list): List to search.\nelement:  Element to find.\n\nReturns:\nlist: List of indices or values", "source": "juraj-google-style"}
{"code": "def CaseGroups(unicode_dir=_UNICODE_DIR):\n    togroup = {}\n\n    def DoLine(codes, fields):\n        'Process single CaseFolding.txt line, updating togroup.'\n        (_, foldtype, lower, _) = fields\n        if (foldtype not in ('C', 'S')):\n            return\n        lower = _UInt(lower)\n        togroup.setdefault(lower, [lower]).extend(codes)\n    ReadUnicodeTable((unicode_dir + '/CaseFolding.txt'), 4, DoLine)\n    groups = togroup.values()\n    for g in groups:\n        g.sort()\n    groups.sort()\n    return (togroup, groups)", "docstring": "Returns list of Unicode code groups equivalent under case folding.\n\nEach group is a sorted list of code points,\nand the list of groups is sorted by first code point\nin the group.\n\nArgs:\nunicode_dir: Unicode data directory\n\nReturns:\nlist of Unicode code groups", "source": "codesearchnet"}
{"code": "def slope(self, other):\n        \n        X1, Y1, X2, Y2 = self.X, self.Y, other.X, other.Y\n        Y3 = Y1 - Y2\n        X3 = X1 - X2\n        return (Y3 * self.inverse(X3)) % self.P", "docstring": "Determines the slope between this point and another point.\n\nArgs:\nother (AffinePoint): The second point.\n\nReturns:\nint: Slope between self and other.", "source": "juraj-google-style"}
{"code": "def _sia(cache_key, subsystem):\n    \n    \n\n    log.info('Calculating big-phi data for %s...', subsystem)\n\n    \n    \n    \n    \n    \n    \n    \n    if not subsystem:\n        log.info('Subsystem %s is empty; returning null SIA '\n                 'immediately.', subsystem)\n        return _null_sia(subsystem)\n\n    if not connectivity.is_strong(subsystem.cm, subsystem.node_indices):\n        log.info('%s is not strongly connected; returning null SIA '\n                 'immediately.', subsystem)\n        return _null_sia(subsystem)\n\n    \n    \n    \n    if len(subsystem.cut_indices) == 1:\n        \n        if not subsystem.cm[subsystem.node_indices][subsystem.node_indices]:\n            log.info('Single micro nodes %s without selfloops cannot have '\n                     'phi; returning null SIA immediately.', subsystem)\n            return _null_sia(subsystem)\n        \n        elif not config.SINGLE_MICRO_NODES_WITH_SELFLOOPS_HAVE_PHI:\n            log.info('Single micro nodes %s with selfloops cannot have '\n                     'phi; returning null SIA immediately.', subsystem)\n            return _null_sia(subsystem)\n    \n\n    log.debug('Finding unpartitioned CauseEffectStructure...')\n    unpartitioned_ces = _ces(subsystem)\n\n    if not unpartitioned_ces:\n        log.info('Empty unpartitioned CauseEffectStructure; returning null '\n                 'SIA immediately.')\n        \n        return _null_sia(subsystem)\n\n    log.debug('Found unpartitioned CauseEffectStructure.')\n\n    \n    \n    if len(subsystem.cut_indices) == 1:\n        cuts = [Cut(subsystem.cut_indices, subsystem.cut_indices,\n                    subsystem.cut_node_labels)]\n    else:\n        cuts = sia_bipartitions(subsystem.cut_indices,\n                                subsystem.cut_node_labels)\n\n    engine = ComputeSystemIrreducibility(\n        cuts, subsystem, unpartitioned_ces)\n    result = engine.run(config.PARALLEL_CUT_EVALUATION)\n\n    if config.CLEAR_SUBSYSTEM_CACHES_AFTER_COMPUTING_SIA:\n        log.debug('Clearing subsystem caches.')\n        subsystem.clear_caches()\n\n    log.info('Finished calculating big-phi data for %s.', subsystem)\n\n    return result", "docstring": "Return the minimal information partition of a subsystem.\n\nArgs:\nsubsystem (Subsystem): The candidate set of nodes.\n\nReturns:\nSystemIrreducibilityAnalysis: A nested structure containing all the\ndata from the intermediate calculations. The top level contains the\nbasic irreducibility information for the given subsystem.", "source": "juraj-google-style"}
{"code": "def reverse(self, transfer_id, data={}, **kwargs):\n        \n        url = \"{}/{}/reversals\".format(self.base_url, transfer_id)\n        return self.post_url(url, data, **kwargs)", "docstring": "Reverse Transfer from given id\n\nArgs:\ntransfer_id : Id for which transfer object has to be reversed\n\nReturns:\nTransfer Dict which was reversed", "source": "juraj-google-style"}
{"code": "def coverage_score(gold, pred, ignore_in_gold=[], ignore_in_pred=[]):\n    \n    gold, pred = _preprocess(gold, pred, ignore_in_gold, ignore_in_pred)\n\n    return np.sum(pred != 0) / len(pred)", "docstring": "Calculate (global) coverage.\nArgs:\ngold: A 1d array-like of gold labels\npred: A 1d array-like of predicted labels (assuming abstain = 0)\nignore_in_gold: A list of labels for which elements having that gold\nlabel will be ignored.\nignore_in_pred: A list of labels for which elements having that pred\nlabel will be ignored.\n\nReturns:\nA float, the (global) coverage score", "source": "juraj-google-style"}
{"code": "def _environment_variables(**kwargs):\n        \n        \n\n        hdx_key = os.getenv('HDX_KEY')\n        if hdx_key is not None:\n            kwargs['hdx_key'] = hdx_key\n        hdx_url = os.getenv('HDX_URL')\n        if hdx_url is not None:\n            kwargs['hdx_url'] = hdx_url\n        else:\n            hdx_site = os.getenv('HDX_SITE')\n            if hdx_site is not None:\n                kwargs['hdx_site'] = hdx_site\n        return kwargs", "docstring": "Overwrite keyword arguments with environment variables\n\nArgs:\n**kwargs: See below\nhdx_url (str): HDX url to use. Overrides hdx_site.\nhdx_site (str): HDX site to use eg. prod, test. Defaults to test.\nhdx_key (str): Your HDX key. Ignored if hdx_read_only = True.\n\nReturns:\nkwargs: Changed keyword arguments", "source": "juraj-google-style"}
{"code": "def concat(self,array_like):\n        \n        arr = list(array_like)\n        if len(set([x.microns_per_pixel for x in arr])) != 1:\n            raise ValueError(\"Multiple microns per pixel set\")\n        cdf = CellDataFrame(pd.concat([pd.DataFrame(x) for x in arr]))\n        cdf.microns_per_pixel = arr[0].microns_per_pixel\n        return cdf", "docstring": "Concatonate multiple CellDataFrames\n\nthrows an error if the microns_per_pixel is not uniform across the frames\n\nArgs:\narray_like (list): a list of CellDataFrames with 1 or more CellDataFrames\n\nReturns:\nCellDataFrame", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    filename = parser_mediator.GetFilename()\n    if (not filename.startswith('INFO2')):\n        return\n    file_header_map = self._GetDataTypeMap('recycler_info2_file_header')\n    try:\n        (file_header, _) = self._ReadStructureFromFileObject(file_object, 0, file_header_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.UnableToParseFile('Unable to parse Windows Recycler INFO2 file header with error: {0!s}'.format(exception))\n    if (file_header.unknown1 != 5):\n        parser_mediator.ProduceExtractionWarning('unsupported format signature.')\n        return\n    file_entry_size = file_header.file_entry_size\n    if (file_entry_size not in (280, 800)):\n        parser_mediator.ProduceExtractionWarning('unsupported file entry size: {0:d}'.format(file_entry_size))\n        return\n    file_offset = file_object.get_offset()\n    file_size = file_object.get_size()\n    while (file_offset < file_size):\n        self._ParseInfo2Record(parser_mediator, file_object, file_offset, file_entry_size)\n        file_offset += file_entry_size", "docstring": "Parses a Windows Recycler INFO2 file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def detect_changepoints(points, min_time, data_processor=acc_difference):\n    data = data_processor(points)\n    changepoints = pelt(normal_mean(data, np.std(data)), len(data))\n    changepoints.append((len(points) - 1))\n    result = []\n    for (start, end) in pairwise(changepoints):\n        time_diff = points[end].time_difference(points[start])\n        if (time_diff > min_time):\n            result.append(start)\n    result.append(0)\n    result.append((len(points) - 1))\n    return sorted(list(set(result)))", "docstring": "Detects changepoints on points that have at least a specific duration\n\nArgs:\npoints (:obj:`Point`)\nmin_time (float): Min time that a sub-segmented, bounded by two changepoints, must have\ndata_processor (function): Function to extract data to feed to the changepoint algorithm.\nDefaults to `speed_difference`\nReturns:\n:obj:`list` of int: Indexes of changepoints", "source": "codesearchnet"}
{"code": "def sample(self, bqm, beta_range=None, num_reads=10, num_sweeps=1000):\n    if (not isinstance(num_reads, int)):\n        raise TypeError(\"'samples' should be a positive integer\")\n    if (num_reads < 1):\n        raise ValueError(\"'samples' should be a positive integer\")\n    (h, J, offset) = bqm.to_ising()\n    samples = []\n    energies = []\n    for __ in range(num_reads):\n        (sample, energy) = ising_simulated_annealing(h, J, beta_range, num_sweeps)\n        samples.append(sample)\n        energies.append(energy)\n    response = SampleSet.from_samples(samples, Vartype.SPIN, energies)\n    response.change_vartype(bqm.vartype, offset, inplace=True)\n    return response", "docstring": "Sample from low-energy spin states using simulated annealing.\n\nArgs:\nbqm (:obj:`.BinaryQuadraticModel`):\nBinary quadratic model to be sampled from.\n\nbeta_range (tuple, optional): Beginning and end of the beta schedule\n(beta is the inverse temperature) as a 2-tuple. The schedule is applied\nlinearly in beta. Default is chosen based on the total bias associated\nwith each node.\n\nnum_reads (int, optional, default=10):\nNumber of reads. Each sample is the result of a single run of\nthe simulated annealing algorithm.\n\nnum_sweeps (int, optional, default=1000):\nNumber of sweeps or steps.\n\nReturns:\n:obj:`.SampleSet`\n\nNote:\nThis is a reference implementation, not optimized for speed\nand therefore not an appropriate sampler for benchmarking.", "source": "codesearchnet"}
{"code": "def maybe_append_oov_vectors(embeddings, num_oov_buckets):\n    num_embeddings = np.shape(embeddings)[0]\n    embedding_dim = np.shape(embeddings)[1]\n    embeddings.resize([(num_embeddings + num_oov_buckets), embedding_dim], refcheck=False)", "docstring": "Adds zero vectors for oov buckets if num_oov_buckets > 0.\n\nSince we are assigning zero vectors, adding more that one oov bucket is only\nmeaningful if we perform fine-tuning.\n\nArgs:\nembeddings: Embeddings to extend.\nnum_oov_buckets: Number of OOV buckets in the extended embedding.", "source": "codesearchnet"}
{"code": "def cmAccuracy(cm):\n    \n    \n    cm = cm.type(torch.float64)\n    return cm.diag().sum() / (cm.sum() + 1e-15)", "docstring": "Calculates accuracy using :class:`~ignite.metrics.ConfusionMatrix` metric.\nArgs:\ncm (ConfusionMatrix): instance of confusion matrix metric\n\nReturns:\nMetricsLambda", "source": "juraj-google-style"}
{"code": "def trace_sync(self, data, timeout=5.0):\n    done = AwaitableResponse()\n    self.trace(data, callback=done.set_result)\n    return done.wait(timeout)", "docstring": "Send tracing data and wait for it to finish.\n\nThis awaitable coroutine wraps VirtualIOTileDevice.trace() and turns\nthe callback into an awaitable object.  The appropriate usage of this\nmethod is by calling it inside the event loop as:\n\nawait device.trace_sync(data)\n\nArgs:\ndata (bytes): The raw data that should be traced.\ntimeout (float): The maximum number of seconds to wait before\ntiming out.\n\nReturns:\nawaitable: An awaitable object with the result.\n\nThe result will be True if the data was sent successfully\nor False if the data could not be sent in its entirety.\n\nWhen False is returned, there is no guarantee about how much of\nthe data was sent, if any, just that it was not known to be\nsuccessfully sent.", "source": "codesearchnet"}
{"code": "def pandas_dataframe(self, start, stop, ncol, **kwargs):\n        \n        try:\n            int(start)\n            int(stop)\n        except TypeError:\n            print('start and stop must be ints')\n        try:\n            ncol = int(ncol)\n            return pd.read_csv(six.StringIO('\\n'.join(self[start:stop])), delim_whitespace=True, names=range(ncol), **kwargs)\n        except TypeError:\n            try:\n                ncol = list(ncol)\n                return pd.read_csv(six.StringIO('\\n'.join(self[start:stop])), delim_whitespace=True, names=ncol, **kwargs)\n            except TypeError:\n                print('Cannot pandas_dataframe if ncol is {}, must be int or list'.format(type(ncol)))", "docstring": "Returns the result of tab-separated pandas.read_csv on\na subset of the file.\n\nArgs:\nstart (int): line number where structured data starts\nstop (int): line number where structured data stops\nncol (int or list): the number of columns in the structured\ndata or a list of that length with column names\n\nReturns:\npd.DataFrame: structured data", "source": "juraj-google-style"}
{"code": "def write(self, fb):\n        \n        print('[{}.{}]'.format(fb.module, fb.func.__name__), file=self.file)\n        print('class = {}'.format(fb.func_ins.name), file=self.file)\n        print('inspecs = {}'.format(repr(fb.inspecs)), file=self.file)\n        print('func_args = {}'.format(repr(fb.func_args)), file=self.file)\n        print('func_kwargs = {}'.format(repr(fb.func_kwargs)), file=self.file)\n        print('ext = ({}, {})'.format(\n            repr(fb.ext), repr(fb.ext_kwargs)), file=self.file)\n        if self.setup_stat is not None:\n            self._write_a_stat('setup', self.setup_stat)\n        if self.foward_stat is not None:\n            self._write_a_stat('forward', self.forward_stat)\n        if self.backward_stat is not None:\n            self._write_a_stat('backward', self.backward_stat)", "docstring": "Write a single function benchmark.\n\nArgs:\nfb (FunctionBenchmark): FunctionBenchmark class instance.\nBefore passing to this, you should call ``fb.benchmark()``.", "source": "juraj-google-style"}
{"code": "def ReportStatus(self, request, global_params=None):\n    config = self.GetMethodConfig('ReportStatus')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Reports the status of dataflow WorkItems leased by a worker.\n\nArgs:\nrequest: (DataflowProjectsLocationsJobsWorkItemsReportStatusRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ReportWorkItemStatusResponse) The response message.", "source": "github-repos"}
{"code": "def render_parse_load(raw_config, environment=None, validate=True):\n    \n\n    pre_rendered = render(raw_config, environment)\n\n    rendered = process_remote_sources(pre_rendered, environment)\n\n    config = parse(rendered)\n\n    \n    \n    if config.namespace is None:\n        namespace = environment.get(\"namespace\")\n        if namespace:\n            logger.warn(\"DEPRECATION WARNING: specifying namespace in the \"\n                        \"environment is deprecated. See \"\n                        \"https:\n                        \"\n                        \"for more info.\")\n            config.namespace = namespace\n\n    if validate:\n        config.validate()\n\n    return load(config)", "docstring": "Encapsulates the render -> parse -> validate -> load process.\n\nArgs:\nraw_config (str): the raw stacker configuration string.\nenvironment (dict, optional): any environment values that should be\npassed to the config\nvalidate (bool): if provided, the config is validated before being\nloaded.\n\nReturns:\n:class:`Config`: the parsed stacker config.", "source": "juraj-google-style"}
{"code": "def interpret_obj(\n        self,\n        obj,\n        v_level_indexes,\n        h_level_indexes,\n        v_level_visibility,\n        h_level_visibility,\n        v_level_sort_keys,\n        h_level_sort_keys,\n        v_level_titles,\n        h_level_titles,\n    ):\n        \n        if not isinstance(obj, NonStringIterable):\n            raise self.error(\"Cannot make a table from object {!r}\".format(obj))\n\n        rectangular_rows = tabulate(\n            obj,\n            v_level_indexes=v_level_indexes,\n            h_level_indexes=h_level_indexes,\n            v_level_visibility=v_level_visibility,\n            h_level_visibility=h_level_visibility,\n            v_level_sort_keys=v_level_sort_keys,\n            h_level_sort_keys=h_level_sort_keys,\n            v_level_titles=v_level_titles,\n            h_level_titles=h_level_titles,\n        )\n        assert is_rectangular(rectangular_rows)\n        num_rows, num_cols = size(rectangular_rows)\n        return rectangular_rows, num_cols", "docstring": "Interpret the given Python object as a table.\n\nArgs:\nobj: A sequence (later a mapping, too)\n\nReturns:\nA list of lists represents rows of cells.\n\nRaises:\nTypeError: If the type couldn't be interpreted as a table.", "source": "juraj-google-style"}
{"code": "def _ParseEntryObject(self, file_object, file_offset):\n    entry_object_map = self._GetDataTypeMap('systemd_journal_entry_object')\n    try:\n        (entry_object, _) = self._ReadStructureFromFileObject(file_object, file_offset, entry_object_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse entry object at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))\n    if (entry_object.object_type != self._OBJECT_TYPE_ENTRY):\n        raise errors.ParseError('Unsupported object type: {0:d}.'.format(entry_object.object_type))\n    if (entry_object.object_flags != 0):\n        raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format(entry_object.object_flags))\n    return entry_object", "docstring": "Parses an entry object.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object.\nfile_offset (int): offset of the entry object relative to the start\nof the file-like object.\n\nReturns:\nsystemd_journal_entry_object: entry object.\n\nRaises:\nParseError: if the entry object cannot be parsed.", "source": "codesearchnet"}
{"code": "def __init__(self, input_energy: energy.BernoulliEnergy, num_expectation_samples: int, initial_seed: Union[None, tf.Tensor]=None, name: Union[None, str]=None):\n    super().__init__(input_energy, num_expectation_samples, initial_seed, name)\n    self._logits_variable = tf.Variable(input_energy.logits, trainable=False)\n    self._distribution = tfd.Bernoulli(logits=self._logits_variable, dtype=tf.int8)", "docstring": "Initializes a BernoulliEnergyInference.\n\nArgs:\ninput_energy: The parameterized energy function which defines this\ndistribution via the equations of an energy based model.  This class\nassumes that all parameters of `energy` are `tf.Variable`s and that\nthey are all returned by `energy.variables`.\nnum_expectation_samples: Number of samples to draw and use for estimating\nthe expectation value.\ninitial_seed: PRNG seed; see tfp.random.sanitize_seed for details. This\nseed will be used in the `sample` method.  If None, the seed is updated\nafter every inference call.  Otherwise, the seed is fixed.\nname: Optional name for the model.", "source": "github-repos"}
{"code": "def __init__(self, msg, url, headers, status_code, writer=None):\n        \n        self.msg = msg\n        self.url = url\n        self.http_headers = headers\n        self.status_code = status_code\n        self._init_writer(writer)", "docstring": "Set the main attributes and instantiate the writer if given.\n\nArgs:\nmsg(pandasdmx.model.Message): the SDMX message\nurl(str): the URL, if any, that had been sent to the SDMX server\nheaders(dict): http headers\nstatus_code(int): the status code returned by the server\nwriter(str): the module path for the writer class", "source": "juraj-google-style"}
{"code": "def ffn_self_attention_layer(x, filter_depth, output_depth, num_parts, dropout_rate, share_kv=False, name=None):\n    with tf.variable_scope(name, default_name='feedforward_self_attention', values=[x]):\n        x_shape = common_layers.shape_list(x)\n        part_depth = (filter_depth \n        if (not share_kv):\n            combined = common_layers.dense(x, (filter_depth * 3), use_bias=False, name='qkv_transform')\n            combined = tf.expand_dims(combined, axis=2)\n            (q, k, v) = tf.split(combined, 3, axis=3)\n        else:\n            q = tf.expand_dims(common_layers.dense(x, filter_depth, use_bias=False, name='q_transform'), axis=2)\n            kv_combined = tf.expand_dims(common_layers.dense(tf.concat([x, x], axis=1), filter_depth, use_bias=False, name='kv_transform'), axis=2)\n            (k, v) = tf.split(kv_combined, [x_shape[1], x_shape[1]], axis=1)\n        batch_q = tf.reshape(q, [(- 1), 1, num_parts, part_depth])\n        batch_k = tf.reshape(k, [(- 1), 1, num_parts, part_depth])\n        batch_v = tf.reshape(v, [(- 1), 1, num_parts, part_depth])\n        batch_q *= (part_depth ** (- 0.5))\n        bias = None\n        x = dot_product_attention(batch_q, batch_k, batch_v, bias, dropout_rate)\n        x = tf.reshape(x, [x_shape[0], x_shape[1], filter_depth])\n        x = common_layers.dense(x, output_depth, use_bias=False, name='output_transform')\n        return x", "docstring": "Self-attention feedforward layer.\n\nWe use self-attention to do feedforward computations. We apply this function\npositionwise where for each position, we linearly transform the output to have\ndepth filter_depth, and break up the result depth-wise into num_parts\ncontiguous parts. The parts self-attend, we concatenate the results\ndepth-wise, and we linearly transform to a depth of output_depth. The goal is\nto get multiplicative interactions between components of a representation.\n\nArgs:\nx: a Tensor with shape [batch, length, channels]\nfilter_depth: an integer\noutput_depth: an integer\nnum_parts: an integer dividing filter depth\ndropout_rate: a floating point number\nshare_kv: Share the key value transform\nname: an optional string\n\nReturns:\nA Tensor with shape [batch, length, output_depth].", "source": "codesearchnet"}
{"code": "def calibrate(self, dataset_gen):\n    self._feed_tensors(dataset_gen, resize_input=True)\n    return self._calibrator.Calibrate()", "docstring": "Calibrates the model with specified generator.\n\nReturns:\nA model with min and max calibration stats.\n\nArgs:\ndataset_gen: A generator that generates calibration samples.", "source": "github-repos"}
{"code": "def quoted_tweet(self):\n    quote_tweet = tweet_embeds.get_quoted_tweet(self)\n    if (quote_tweet is not None):\n        try:\n            return Tweet(quote_tweet)\n        except NotATweetError as nate:\n            raise NotATweetError(('The quote-tweet payload appears malformed.' + \" Failed with '{}'\".format(nate)))\n    else:\n        return None", "docstring": "The quoted Tweet as a Tweet object\nIf the Tweet is not a quote Tweet, return None\nIf the quoted Tweet payload cannot be loaded as a Tweet, this will\nraise a \"NotATweetError\"\n\nReturns:\nTweet: A Tweet representing the quoted status (or None)\n(see tweet_embeds.get_quote_tweet, this is that value as a Tweet)\n\nRaises:\nNotATweetError: if quoted tweet is malformed", "source": "codesearchnet"}
{"code": "def get_privkey(self, address: AddressHex, password: str) -> PrivateKey:\n        \n        address = add_0x_prefix(address).lower()\n\n        if not self.address_in_keystore(address):\n            raise ValueError('Keystore file not found for %s' % address)\n\n        with open(self.accounts[address]) as data_file:\n            data = json.load(data_file)\n\n        acc = Account(data, password, self.accounts[address])\n        return acc.privkey", "docstring": "Find the keystore file for an account, unlock it and get the private key\n\nArgs:\naddress: The Ethereum address for which to find the keyfile in the system\npassword: Mostly for testing purposes. A password can be provided\nas the function argument here. If it's not then the\nuser is interactively queried for one.\nReturns\nThe private key associated with the address", "source": "juraj-google-style"}
{"code": "def atoms_string_from_file(filename):\n        \n        with zopen(filename, \"rt\") as fobject:\n            f = fobject.readlines()\n            coords = 0\n            atoms_str = []\n\n            for line in f:\n                if coords == 0:\n                    find_atoms = line.find(\"ATOMS\")\n                    if find_atoms >= 0:\n                        coords = 1\n                if coords == 1 and not (\"END\" in line):\n                    atoms_str.append(line.replace(\"\\r\", \"\"))\n\n        return ''.join(atoms_str)", "docstring": "Reads atomic shells from file such as feff.inp or ATOMS file\nThe lines are arranged as follows:\n\nx y z   ipot    Atom Symbol   Distance   Number\n\nwith distance being the shell radius and ipot an integer identifying\nthe potential used.\n\nArgs:\nfilename: File name containing atomic coord data.\n\nReturns:\nAtoms string.", "source": "juraj-google-style"}
{"code": "def download_archive(self, name, file_path):\n        \n        uri = self.URI + \"/archive/\" + name\n        return self._client.download(uri, file_path)", "docstring": "Download archived logs of the OS Volume.\n\nArgs:\nname: Name of the OS Volume.\nfile_path (str): Destination file path.\n\nReturns:\nbool: Indicates if the resource was successfully downloaded.", "source": "juraj-google-style"}
{"code": "def _log_band_edge_information(bs, edge_data):\n    \n    if bs.is_spin_polarized:\n        spins = edge_data['band_index'].keys()\n        b_indices = [', '.join([str(i+1) for i in\n                                edge_data['band_index'][spin]])\n                     + '({})'.format(spin.name.capitalize()) for spin in spins]\n        b_indices = ', '.join(b_indices)\n    else:\n        b_indices = ', '.join([str(i+1) for i in\n                               edge_data['band_index'][Spin.up]])\n\n    kpoint = edge_data['kpoint']\n    kpoint_str = kpt_str.format(k=kpoint.frac_coords)\n    k_indices = ', '.join(map(str, edge_data['kpoint_index']))\n\n    if kpoint.label:\n        k_loc = kpoint.label\n    else:\n        branch = bs.get_branch(edge_data['kpoint_index'][0])[0]\n        k_loc = 'between {}'.format(branch['name'])\n\n    logging.info('  Energy: {:.3f} eV'.format(edge_data['energy']))\n    logging.info('  k-point: {}'.format(kpoint_str))\n    logging.info('  k-point location: {}'.format(k_loc))\n    logging.info('  k-point indices: {}'.format(k_indices))\n    logging.info('  Band indices: {}'.format(b_indices))", "docstring": "Log data about the valence band maximum or conduction band minimum.\n\nArgs:\nbs (:obj:`~pymatgen.electronic_structure.bandstructure.BandStructureSymmLine`):\nThe band structure.\nedge_data (dict): The :obj:`dict` from ``bs.get_vbm()`` or\n``bs.get_cbm()``", "source": "juraj-google-style"}
{"code": "def get_panel_info(panel_lines=None, panel_id=None, institute=None, version=None, date=None,\n                   display_name=None):\n    \n    panel_info = {\n        'panel_id': panel_id,\n        'institute': institute,\n        'version': version,\n        'date': date,\n        'display_name': display_name,\n    }\n\n    if panel_lines:\n        for line in panel_lines:\n            line = line.rstrip()\n            if not line.startswith('\n                break\n        \n            info = line[2:].split('=')\n            field = info[0]\n            value = info[1]\n        \n        \n            if not panel_info.get(field):\n                panel_info[field] = value\n\n    panel_info['date'] = get_date(panel_info['date'])\n\n    return panel_info", "docstring": "Parse metadata for a gene panel\n\nFor historical reasons it is possible to include all information about a gene panel in the\nheader of a panel file. This function parses the header.\n\nArgs:\npanel_lines(iterable(str))\n\nReturns:\npanel_info(dict): Dictionary with panel information", "source": "juraj-google-style"}
{"code": "def download(self, chunk_size=1024):\n        \n        stream = BytesIO()\n\n        response = self._swimlane.request(\n            'get',\n            'attachment/download/{}'.format(self.file_id),\n            stream=True\n        )\n\n        for chunk in response.iter_content(chunk_size):\n            stream.write(chunk)\n\n        stream.seek(0)\n\n        return stream", "docstring": "Download attachment\n\nArgs:\nchunk_size (int): Byte-size of chunked download request stream\n\nReturns:\nBytesIO: Stream ready for reading containing the attachment file contents", "source": "juraj-google-style"}
{"code": "def nr_genes(self, build=None):\n    if build:\n        LOG.info('Fetching all genes from build %s', build)\n    else:\n        LOG.info('Fetching all genes')\n    return self.hgnc_collection.find({'build': build}).count()", "docstring": "Return the number of hgnc genes in collection\n\nIf build is used, return the number of genes of a certain build\n\nReturns:\nresult()", "source": "codesearchnet"}
{"code": "def find_signature(self, signature_id=None, signer_email_address=None):\n        \n        if self.signatures:\n            for signature in self.signatures:\n                if signature.signature_id == signature_id or signature.signer_email_address == signer_email_address: \n                    return signature", "docstring": "Return a signature for the given parameters\n\nArgs:\n\nsignature_id (str):             Id of the signature to retrieve.\nsigner_email_address (str):     Email address of the associated signer for the signature to retrieve.\n\nReturns:\nA Signature object or None", "source": "juraj-google-style"}
{"code": "def verify_repo_matches_url(repo, url):\n    \n    repo_parts = urlparse(repo)\n    url_parts = urlparse(url)\n    errors = []\n    repo_path_parts = repo_parts.path.split('/')\n    url_path_parts = url_parts.path.split('/')\n    if repo_parts.hostname != url_parts.hostname:\n        errors.append(\"verify_repo_matches_url: Hostnames don't match! {} {}\".format(\n            repo_parts.hostname, url_parts.hostname\n        ))\n    if not url_parts.path.startswith(repo_parts.path) or \\\n            url_path_parts[:len(repo_path_parts)] != repo_path_parts:\n        errors.append(\"verify_repo_matches_url: Paths don't match! {} {}\".format(\n            repo_parts.path, url_parts.path\n        ))\n    if errors:\n        log.warning(\"\\n\".join(errors))\n        return False\n    return True", "docstring": "Verify ``url`` is a part of ``repo``.\n\nWe were using ``startswith()`` for a while, which isn't a good comparison.\nThis function allows us to ``urlparse`` and compare host and path.\n\nArgs:\nrepo (str): the repo url\nurl (str): the url to verify is part of the repo\n\nReturns:\nbool: ``True`` if the repo matches the url.", "source": "juraj-google-style"}
{"code": "def _check_currency_format(self, format=None):\n        \n        defaults = self.settings['currency']['format']\n        if hasattr(format, '__call__'):\n            format = format()\n        if is_str(format) and re.match('%v', format):\n\n            \n            return {\n                'pos': format,\n                'neg': format.replace(\"-\", \"\").replace(\"%v\", \"-%v\"),\n                'zero': format\n            }\n        elif not format or not format['por'] or not re.match('%v',\n                                                             format['pos']):\n            self.settings['currency']['format'] = {\n                'pos': defaults,\n                'neg': defaults.replace(\"%v\", \"-%v\"),\n                'zero': defaults\n            }\n            return self.settings\n\n        return format", "docstring": "Summary.\n\nArgs:\nformat (TYPE, optional): Description\n\nReturns:\nname (TYPE): Description", "source": "juraj-google-style"}
{"code": "def _get_source_chunks(self, input_text, language=None):\n    \n    chunks = ChunkList()\n    seek = 0\n    result = self._get_annotations(input_text, language=language)\n    tokens = result['tokens']\n    language = result['language']\n    for i, token in enumerate(tokens):\n      word = token['text']['content']\n      begin_offset = token['text']['beginOffset']\n      label = token['dependencyEdge']['label']\n      pos = token['partOfSpeech']['tag']\n      if begin_offset > seek:\n        chunks.append(Chunk.space())\n        seek = begin_offset\n      chunk = Chunk(word, pos, label)\n      if chunk.label in _DEPENDENT_LABEL:\n        \n        chunk.dependency = i < token['dependencyEdge']['headTokenIndex']\n      if chunk.is_punct():\n        chunk.dependency = chunk.is_open_punct()\n      chunks.append(chunk)\n      seek += len(word)\n    return chunks, language", "docstring": "Returns a chunk list retrieved from Syntax Analysis results.\n\nArgs:\ninput_text (str): Text to annotate.\nlanguage (:obj:`str`, optional): Language of the text.\n\nReturns:\nA chunk list. (:obj:`budou.chunk.ChunkList`)", "source": "juraj-google-style"}
{"code": "def validate(self):\n    errors = []\n    for cls in self.OPTIONS:\n        if 'validate' in cls.__dict__ and callable(cls.__dict__['validate']):\n            errors.extend(self.options.view_as(cls).validate(self))\n    return errors", "docstring": "Calls validate on subclassess and returns a list of errors.\n\nvalidate will call validate method on subclasses, accumulate the returned\nlist of errors, and returns the aggregate list.\n\nReturns:\nAggregate list of errors after all calling all possible validate methods.", "source": "github-repos"}
{"code": "def set_memory_growth(device, enable):\n    context.context().set_memory_growth(device, enable)", "docstring": "Set if memory growth should be enabled for a `PhysicalDevice`.\n\nIf memory growth is enabled for a `PhysicalDevice`, the runtime initialization\nwill not allocate all memory on the device. Memory growth cannot be configured\non a `PhysicalDevice` with virtual devices configured.\n\nFor example:\n\n>>> physical_devices = tf.config.list_physical_devices('GPU')\n>>> try:\n...   tf.config.experimental.set_memory_growth(physical_devices[0], True)\n... except:\n...   # Invalid device or cannot modify virtual devices once initialized.\n...   pass\n\nArgs:\ndevice: `PhysicalDevice` to configure\nenable: (Boolean) Whether to enable or disable memory growth\n\nRaises:\nValueError: Invalid `PhysicalDevice` specified.\nRuntimeError: Runtime is already initialized.", "source": "github-repos"}
{"code": "def ProcessAst(serializable_ast, module_map):\n    serializable_ast = _LookupClassReferences(serializable_ast, module_map, serializable_ast.ast.name)\n    serializable_ast = serializable_ast.Replace(class_type_nodes=None)\n    serializable_ast = FillLocalReferences(serializable_ast, {'': serializable_ast.ast, serializable_ast.ast.name: serializable_ast.ast})\n    return serializable_ast.ast", "docstring": "Postprocess a pickled ast.\n\nPostprocessing will either just fill the ClassType references from module_map\nor if module_name changed between pickling and loading rename the module\ninternal references to the new module_name.\nRenaming is more expensive than filling references, as the whole AST needs to\nbe rebuild.\n\nArgs:\nserializable_ast: A SerializableAst instance.\nmodule_map: Used to resolve ClassType.cls links to already loaded modules.\nThe loaded module will be added to the dict.\n\nReturns:\nA pytd.TypeDeclUnit, this is either the input raw_ast with the references\nset or a newly created AST with the new module_name and the references set.\n\nRaises:\nAssertionError: If module_name is already in module_map, which means that\nmodule_name is already loaded.\nUnrestorableDependencyError: If no concrete module exists in module_map for\none of the references from the pickled ast.", "source": "github-repos"}
{"code": "def merge_input_csv_forecast_json(input_csv_file, forecast_json_path, condition_models, dist_models):\n    try:\n        run_date = input_csv_file[:(- 4)].split('_')[(- 1)]\n        print(run_date)\n        ens_member = '_'.join(input_csv_file.split('/')[(- 1)][:(- 4)].split('_')[3:(- 1)])\n        ens_name = input_csv_file.split('/')[(- 1)].split('_')[2]\n        input_data = pd.read_csv(input_csv_file, index_col='Step_ID')\n        full_json_path = (forecast_json_path + '{0}/{1}/'.format(run_date, ens_member))\n        track_ids = sorted(input_data['Track_ID'].unique())\n        model_pred_cols = []\n        condition_models_ns = []\n        dist_models_ns = []\n        gamma_params = ['Shape', 'Location', 'Scale']\n        for condition_model in condition_models:\n            model_pred_cols.append((condition_model.replace(' ', '-') + '_Condition'))\n            condition_models_ns.append(condition_model.replace(' ', '-'))\n        for dist_model in dist_models:\n            dist_models_ns.append(dist_model.replace(' ', '-'))\n            for param in gamma_params:\n                model_pred_cols.append(((dist_model.replace(' ', '-') + '_') + param))\n        pred_data = pd.DataFrame(index=input_data.index, columns=model_pred_cols, dtype=float)\n        for track_id in track_ids:\n            track_id_num = track_id.split('_')[(- 1)]\n            json_filename = (full_json_path + '{0}_{1}_{2}_model_track_{3}.json'.format(ens_name, run_date, ens_member, track_id_num))\n            json_file = open(json_filename)\n            json_data = json.load(json_file)\n            json_file.close()\n            for (s, step) in enumerate(json_data['features']):\n                step_id = (track_id + '_{0:02d}'.format(s))\n                for cond_model in condition_models_ns:\n                    pred_data.loc[(step_id, (cond_model + '_Condition'))] = step['properties'][('condition_' + cond_model)]\n                for dist_model in dist_models_ns:\n                    pred_data.loc[(step_id, [((dist_model + '_') + p) for p in gamma_params])] = step['properties'][('dist_' + dist_model)]\n        out_data = input_data.merge(pred_data, left_index=True, right_index=True)\n        return (out_data, ens_name, ens_member)\n    except Exception as e:\n        print(traceback.format_exc())\n        raise e", "docstring": "Reads forecasts from json files and merges them with the input data from the step csv files.\n\nArgs:\ninput_csv_file: Name of the input data csv file being processed\nforecast_json_path: Path to the forecast json files toplevel directory\ncondition_models: List of models used to forecast hail or no hail\ndist_models: List of models used to forecast the hail size distribution\n\nReturns:", "source": "codesearchnet"}
{"code": "def add_slab(self, height, n_background=1., position='top'):\n        \n        assert position in ('top', 'bottom')\n\n        name = str(self.slab_count)\n\n        if not callable(n_background):\n            n_back = lambda wl: n_background\n        else:\n            n_back = n_background\n\n        height_discretised = self.y_step*((height \n\n        y_min = self._next_start\n        y_max = y_min + height_discretised\n        self.slabs[name] = Slab(name, self.x_step, self.y_step, self.x_max,\n                                 y_max, self.x_min, y_min, n_back, self._wl)\n\n        self.y_max = y_max\n        self._next_start = y_min + height_discretised\n        self.slab_count += 1\n\n        if position == 'bottom':\n            slabs = {}\n            for k in self.slabs.keys():\n                slabs[str(int(k)+1)] = self.slabs[k]\n            slabs['0'] = slabs.pop(str(self.slab_count))\n            self.slabs = slabs\n\n        return name", "docstring": "Creates and adds a :class:`Slab` object.\n\nArgs:\nheight (float): Height of the slab.\nn_background (float): The nominal refractive\nindex of the slab.  Default is 1 (air).\n\nReturns:\nstr: The name of the slab.", "source": "juraj-google-style"}
{"code": "def __call__(self, shardable_tensors: Sequence[sharding_util.ShardableTensor]) -> Sequence[sharding_util.Shard]:\n    tensors_by_task = {}\n    for shardable_tensor in shardable_tensors:\n        tensor = shardable_tensor.tensor\n        checkpoint_key = shardable_tensor.checkpoint_key\n        slice_spec = shardable_tensor.slice_spec\n        tensors_by_task.setdefault(checkpoint_key, {})[slice_spec] = tensor\n    return [tensors_by_task]", "docstring": "Callback to split tensors into shards based on their device spec task.\n\nArgs:\nshardable_tensors: A list of ShardableTensors.\n\nReturns:\nList of shard dicts containing tensors.\n[ {checkpoint key: {slice_spec: tensor} } ]", "source": "github-repos"}
{"code": "def __init__(self, logger=logging, instance_config_metadata=None):\n    \n    self.logger = logger\n    self.instance_config_metadata = instance_config_metadata\n    self.instance_config_header %= (\n        self.instance_config_script, self.instance_config_template)\n    \n    super(InstanceConfig, self).__init__(\n        config_file=self.instance_config_template,\n        config_header=self.instance_config_header)\n\n    \n    \n    \n    \n    config_files = [self.instance_config, self.instance_config_distro]\n    config_defaults = []\n    if self.instance_config_metadata:\n      config = parser.Parser()\n      try:\n        config.read_file(stringio.StringIO(self.instance_config_metadata))\n      except parser.Error as e:\n        self.logger.error('Error parsing metadata configs: %s', str(e))\n      else:\n        config_defaults.append(\n            dict((s, dict(config.items(s))) for s in config.sections()))\n    for config_file in config_files:\n      if os.path.exists(config_file):\n        config = parser.Parser()\n        try:\n          config.read(config_file)\n        except parser.Error as e:\n          self.logger.error('Error parsing config file: %s', str(e))\n        else:\n          config_defaults.append(\n              dict((s, dict(config.items(s))) for s in config.sections()))\n    config_defaults.append(self.instance_config_options)\n\n    for defaults in config_defaults:\n      for section, options in sorted(defaults.items()):\n        for option, value in sorted(options.items()):\n          super(InstanceConfig, self).SetOption(\n              section, option, value, overwrite=False)", "docstring": "Constructor.\n\nInherit from the ConfigManager class. Read the template for instance\ndefaults and write new sections and options. This prevents package\nupdates from overriding user set defaults.\n\nArgs:\nlogger: logger object, used to write to SysLog and serial port.\ninstance_config_metadata: string, a config file specified in metadata.", "source": "juraj-google-style"}
{"code": "def _StubMethod(self, stub, method_descriptor,\n                  rpc_controller, request, callback):\n    \n    return stub.rpc_channel.CallMethod(\n        method_descriptor, rpc_controller, request,\n        method_descriptor.output_type._concrete_class, callback)", "docstring": "The body of all service methods in the generated stub class.\n\nArgs:\nstub: Stub instance.\nmethod_descriptor: Descriptor of the invoked method.\nrpc_controller: Rpc controller to execute the method.\nrequest: Request protocol message.\ncallback: A callback to execute when the method finishes.\nReturns:\nResponse message (in case of blocking call).", "source": "juraj-google-style"}
{"code": "def calculate(self, token_list_x, token_list_y):\n        \n        match_list = [tanimoto_value for tanimoto_value in token_list_x if tanimoto_value in token_list_y]\n        return float(len(match_list) / (len(token_list_x) + len(token_list_y) - len(match_list)))", "docstring": "Calculate similarity with the Tanimoto coefficient.\n\nConcrete method.\n\nArgs:\ntoken_list_x:    [token, token, token, ...]\ntoken_list_y:    [token, token, token, ...]\n\nReturns:\nSimilarity.", "source": "juraj-google-style"}
{"code": "def get(self, value):\n    config = self.get_block(('vrf definition %s' % value))\n    if (not config):\n        return None\n    response = dict(vrf_name=value)\n    response.update(self._parse_rd(config))\n    response.update(self._parse_description(config))\n    config = self.get_block(('no ip routing vrf %s' % value))\n    if config:\n        response['ipv4_routing'] = False\n    else:\n        response['ipv4_routing'] = True\n    config = self.get_block(('no ipv6 unicast-routing vrf %s' % value))\n    if config:\n        response['ipv6_routing'] = False\n    else:\n        response['ipv6_routing'] = True\n    return response", "docstring": "Returns the VRF configuration as a resource dict.\n\nArgs:\nvalue (string): The vrf name to retrieve from the\nrunning configuration.\n\nReturns:\nA Python dict object containing the VRF attributes as\nkey/value pairs.", "source": "codesearchnet"}
{"code": "def __xor__(self, other: 'TensorFluent') -> 'TensorFluent':\n        \n        return self._binary_op(self, other, tf.logical_xor, tf.bool)", "docstring": "Returns a TensorFluent for the xor logical operator.\n\nArgs:\nself: The first operand.\nother: The second operand.\n\nReturns:\nA TensorFluent wrapping the operator's output.", "source": "juraj-google-style"}
{"code": "def op_and(self, *elements):\n    expression = self.add_operator(Operator(';'))\n    for element in elements:\n        expression.add_element(element)\n    return expression", "docstring": "Update the ``Expression`` by joining the specified additional\n``elements`` using an \"AND\" ``Operator``\n\nArgs:\n*elements (BaseExpression): The ``Expression`` and/or\n``Constraint`` elements which the \"AND\" ``Operator`` applies\nto.\n\nReturns:\nExpression: ``self`` or related ``Expression``.", "source": "codesearchnet"}
{"code": "def send_file(self, file_name, remote_destination=None, **kwargs):\n        \n        if not remote_destination:\n            remote_destination = file_name\n\n        return SubprocessTask(\n            self._rsync_cmd() +\n            ['-ut', file_name, '%s:%s' % (self.hostname, remote_destination)],\n            **kwargs)", "docstring": "Send a file to a remote host with rsync.\n\nArgs:\nfile_name (str): The relative location of the file on the local\nhost.\n\nremote_destination (str): The destination for the file on the remote\nhost. If `None`, will be assumed to be the same as\n**file_name**. Default `None`.\n\n**kwargs: Passed to ``SubprocessTask``'s init method.\n\nReturn:\n``pyrem.task.SubprocessTask``: The resulting task.", "source": "juraj-google-style"}
{"code": "def VerifyStructure(self, parser_mediator, lines):\n    try:\n        structure = self._SDF_HEADER.parseString(lines)\n    except pyparsing.ParseException:\n        logger.debug('Not a SkyDrive log file')\n        return False\n    try:\n        dfdatetime_time_elements.TimeElementsInMilliseconds(time_elements_tuple=structure.header_date_time)\n    except ValueError:\n        logger.debug('Not a SkyDrive log file, invalid date and time: {0!s}'.format(structure.header_date_time))\n        return False\n    return True", "docstring": "Verify that this file is a SkyDrive log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nlines (str): one or more lines from the text file.\n\nReturns:\nbool: True if this is the correct parser, False otherwise.", "source": "codesearchnet"}
{"code": "def key_exists(self, namespace, key):\n        \n\n        return namespace in self.__data and key in self.__data[namespace]", "docstring": "Checks a namespace for the existence of a specific key\n\nArgs:\nnamespace (str): Namespace to check in\nkey (str): Name of the key to check for\n\nReturns:\n`True` if key exists in the namespace, else `False`", "source": "juraj-google-style"}
{"code": "def add(self, path, compress=None):\n    if os.path.isdir(path):\n        self.add_dir(path, compress)\n    else:\n        self.add_file(path, compress)", "docstring": "Add `path` to the MAR file.\n\nIf `path` is a file, it will be added directly.\nIf `path` is a directory, it will be traversed recursively and all\nfiles inside will be added.\n\nArgs:\npath (str): path to file or directory on disk to add to this MAR\nfile\ncompress (str): One of 'xz', 'bz2', or None. Defaults to None.", "source": "codesearchnet"}
{"code": "def ensure(self, func, *args, **kwargs):\n    data = self.tryload()\n    if (data is None):\n        data = func(*args, **kwargs)\n        self.save(data)\n    return data", "docstring": "r\"\"\"\nWraps around a function. A cfgstr must be stored in the base cacher.\n\nArgs:\nfunc (callable): function that will compute data on cache miss\n*args: passed to func\n**kwargs: passed to func\n\nExample:\n>>> from ubelt.util_cache import *  # NOQA\n>>> def func():\n>>>     return 'expensive result'\n>>> fname = 'test_cacher_ensure'\n>>> cfgstr = 'func params'\n>>> cacher = Cacher(fname, cfgstr)\n>>> cacher.clear()\n>>> data1 = cacher.ensure(func)\n>>> data2 = cacher.ensure(func)\n>>> assert data1 == 'expensive result'\n>>> assert data1 == data2\n>>> cacher.clear()", "source": "codesearchnet"}
{"code": "def delete_tag(self, key, update_session=True):\n    existing_tags = {x.key: x for x in self.tags}\n    if (key in existing_tags):\n        if update_session:\n            db.session.delete(existing_tags[key])\n        self.tags.remove(existing_tags[key])\n        return True\n    return False", "docstring": "Removes a tag from a resource based on the tag key. Returns `True` if the tag was removed or `False` if the\ntag didn't exist\n\nArgs:\nkey (str): Key of the tag to delete\nupdate_session (bool): Automatically add the change to the SQLAlchemy session. Default: True\n\nReturns:", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context, file_object=None):\n    \n    super(VHDIFile, self).__init__(resolver_context, file_object=file_object)\n    self._parent_vhdi_files = []\n    self._sub_file_objects = []", "docstring": "Initializes a file-like object.\n\nArgs:\nresolver_context (Context): resolver context.\nfile_object (Optional[FileIO]): file-like object.", "source": "juraj-google-style"}
{"code": "def cloud_train(train_dataset, eval_dataset, analysis_dir, output_dir, features, model_type, max_steps, num_epochs, train_batch_size, eval_batch_size, min_eval_frequency, top_n, layer_sizes, learning_rate, epsilon, job_name, job_name_prefix, config):\n    import google.datalab.ml as ml\n    if ((len(train_dataset.input_files) != 1) or (len(eval_dataset.input_files) != 1)):\n        raise ValueError('CsvDataSets must be built with a file pattern, not list of files.')\n    if file_io.file_exists(output_dir):\n        raise ValueError('output_dir already exist. Use a new output path.')\n    if isinstance(features, dict):\n        if (not file_io.file_exists(output_dir)):\n            file_io.recursive_create_dir(output_dir)\n        features_file = os.path.join(output_dir, 'features_file.json')\n        file_io.write_string_to_file(features_file, json.dumps(features))\n    else:\n        features_file = features\n    if (not isinstance(config, ml.CloudTrainingConfig)):\n        raise ValueError('cloud should be an instance of google.datalab.ml.CloudTrainingConfig for cloud training.')\n    _assert_gcs_files([output_dir, train_dataset.input_files[0], eval_dataset.input_files[0], features_file, analysis_dir])\n    args = [('--train-data-paths=%s' % train_dataset.input_files[0]), ('--eval-data-paths=%s' % eval_dataset.input_files[0]), ('--preprocess-output-dir=%s' % analysis_dir), ('--transforms-file=%s' % features_file), ('--model-type=%s' % model_type), ('--max-steps=%s' % str(max_steps)), ('--train-batch-size=%s' % str(train_batch_size)), ('--eval-batch-size=%s' % str(eval_batch_size)), ('--min-eval-frequency=%s' % str(min_eval_frequency)), ('--learning-rate=%s' % str(learning_rate)), ('--epsilon=%s' % str(epsilon))]\n    if num_epochs:\n        args.append(('--num-epochs=%s' % str(num_epochs)))\n    if top_n:\n        args.append(('--top-n=%s' % str(top_n)))\n    if layer_sizes:\n        for i in range(len(layer_sizes)):\n            args.append(('--layer-size%s=%s' % ((i + 1), str(layer_sizes[i]))))\n    job_request = {'package_uris': [_package_to_staging(output_dir), _TF_GS_URL, _PROTOBUF_GS_URL], 'python_module': 'mltoolbox._structured_data.trainer.task', 'job_dir': output_dir, 'args': args}\n    job_request.update(dict(config._asdict()))\n    if (not job_name):\n        job_name = (job_name_prefix or 'structured_data_train')\n        job_name += ('_' + datetime.datetime.now().strftime('%y%m%d_%H%M%S'))\n    job = ml.Job.submit_training(job_request, job_name)\n    print('Job request send. View status of job at')\n    print(('https:\n    return job", "docstring": "Train model using CloudML.\n\nSee local_train() for a description of the args.\nArgs:\nconfig: A CloudTrainingConfig object.\njob_name: Training job name. A default will be picked if None.", "source": "codesearchnet"}
{"code": "def metric_streaming(self):\n    if (not self.__metric_streaming):\n        self.__metric_streaming = MetricStreaming(self.__connection)\n    return self.__metric_streaming", "docstring": "Gets the MetricStreaming API client.\n\nReturns:\nMetricStreaming:", "source": "codesearchnet"}
{"code": "def load_extra(cls, filename):\n    try:\n        with open(filename, 'rb') as configuration_file:\n            cls.load_extra_data(configuration_file.read())\n            sys.stderr.write('Config successfully loaded from {0:s}\\n'.format(filename))\n            return True\n    except IOError:\n        return False", "docstring": "Loads extra JSON configuration parameters from a file on the filesystem.\n\nArgs:\nfilename: str, the filename to open.\n\nReturns:\nbool: True if the extra configuration parameters were read.", "source": "codesearchnet"}
{"code": "def list_outputs(self, args, screen_info=None):\n    _ = screen_info\n    parsed = self._arg_parsers['list_outputs'].parse_args(args)\n    output = self._list_inputs_or_outputs(parsed.recursive, parsed.node_name, parsed.depth, parsed.control, parsed.op_type, do_outputs=True)\n    node_name = debug_graphs.get_node_name(parsed.node_name)\n    _add_main_menu(output, node_name=node_name, enable_list_outputs=False)\n    return output", "docstring": "Command handler for inputs.\n\nShow inputs to a given node.\n\nArgs:\nargs: Command-line arguments, excluding the command prefix, as a list of\nstr.\nscreen_info: Optional dict input containing screen information such as\ncols.\n\nReturns:\nOutput text lines as a RichTextLines object.", "source": "github-repos"}
{"code": "def log_cert_info(logger, msg_str, cert_obj):\n    \n    list(\n        map(\n            logger,\n            [\"{}:\".format(msg_str)]\n            + [\n                \"  {}\".format(v)\n                for v in [\n                    \"Subject: {}\".format(\n                        _get_val_str(cert_obj, [\"subject\", \"value\"], reverse=True)\n                    ),\n                    \"Issuer: {}\".format(\n                        _get_val_str(cert_obj, [\"issuer\", \"value\"], reverse=True)\n                    ),\n                    \"Not Valid Before: {}\".format(\n                        cert_obj.not_valid_before.isoformat()\n                    ),\n                    \"Not Valid After: {}\".format(cert_obj.not_valid_after.isoformat()),\n                    \"Subject Alt Names: {}\".format(\n                        _get_ext_val_str(\n                            cert_obj, \"SUBJECT_ALTERNATIVE_NAME\", [\"value\", \"value\"]\n                        )\n                    ),\n                    \"CRL Distribution Points: {}\".format(\n                        _get_ext_val_str(\n                            cert_obj,\n                            \"CRL_DISTRIBUTION_POINTS\",\n                            [\"value\", \"full_name\", \"value\", \"value\"],\n                        )\n                    ),\n                    \"Authority Access Location: {}\".format(\n                        extract_issuer_ca_cert_url(cert_obj) or \"<not found>\"\n                    ),\n                ]\n            ],\n        )\n    )", "docstring": "Dump basic certificate values to the log.\n\nArgs:\nlogger: Logger\nLogger to which to write the certificate values.\n\nmsg_str: str\nA message to write to the log before the certificate values.\n\ncert_obj: cryptography.Certificate\nCertificate containing values to log.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _replace_event_shape_in_shape_tensor(input_shape, event_shape_in, event_shape_out, validate_args):\n    (output_tensorshape, is_validated) = _replace_event_shape_in_tensorshape(tensorshape_util.constant_value_as_shape(input_shape), event_shape_in, event_shape_out)\n    validation_dependencies = (map(tf.identity, (event_shape_in, event_shape_out)) if validate_args else ())\n    if (tensorshape_util.is_fully_defined(output_tensorshape) and (is_validated or (not validate_args))):\n        with tf.control_dependencies(validation_dependencies):\n            output_shape = tf.convert_to_tensor(value=output_tensorshape, name='output_shape', dtype_hint=tf.int32)\n        return (output_shape, output_tensorshape)\n    with tf.control_dependencies(validation_dependencies):\n        event_shape_in_ndims = (tf.size(input=event_shape_in) if (tensorshape_util.num_elements(event_shape_in.shape) is None) else tensorshape_util.num_elements(event_shape_in.shape))\n        (input_non_event_shape, input_event_shape) = tf.split(input_shape, num_or_size_splits=[(- 1), event_shape_in_ndims])\n    additional_assertions = []\n    if is_validated:\n        pass\n    elif validate_args:\n        mask = (event_shape_in >= 0)\n        explicit_input_event_shape = tf.boolean_mask(tensor=input_event_shape, mask=mask)\n        explicit_event_shape_in = tf.boolean_mask(tensor=event_shape_in, mask=mask)\n        additional_assertions.append(assert_util.assert_equal(explicit_input_event_shape, explicit_event_shape_in, message='Input `event_shape` does not match `event_shape_in`.'))\n    with tf.control_dependencies(additional_assertions):\n        output_shape = tf.concat([input_non_event_shape, event_shape_out], axis=0, name='output_shape')\n    return (output_shape, output_tensorshape)", "docstring": "Replaces the rightmost dims in a `Tensor` representing a shape.\n\nArgs:\ninput_shape: a rank-1 `Tensor` of integers\nevent_shape_in: the event shape expected to be present in rightmost dims\nof `shape_in`.\nevent_shape_out: the event shape with which to replace `event_shape_in` in\nthe rightmost dims of `input_shape`.\nvalidate_args: Python `bool` indicating whether arguments should\nbe checked for correctness.\n\nReturns:\noutput_shape: A rank-1 integer `Tensor` with the same contents as\n`input_shape` except for the event dims, which are replaced with\n`event_shape_out`.", "source": "codesearchnet"}
{"code": "def __init__(self, config=None, all_linters=None):\n        \n        self._classes = all_linters or LINTERS\n        self._config = config or Config(self._classes)\n        LinterRunner.config = self._config", "docstring": "Initialize the only Config object and assign it to other classes.\n\nArgs:\nconfig (Config): Config object.\nall_linters (dict): Names and classes of all available linters.", "source": "juraj-google-style"}
{"code": "def getFilesFromAFolder(path):\n    from os import listdir\n    from os.path import isfile, join\n    onlyFiles = []\n    for f in listdir(path):\n        if isfile(join(path, f)):\n            onlyFiles.append(f)\n    return onlyFiles", "docstring": "Getting all the files in a folder.\n\nArgs:\n-----\npath: The path in which looking for the files\n\nReturns:\n--------\nlist: The list of filenames found.", "source": "codesearchnet"}
{"code": "def get_urls(self):\n    urls = self.get_subfields('856', 'u', i1='4', i2='2')\n    return map((lambda x: x.replace('&amp;', '&')), urls)", "docstring": "Content of field ``856u42``. Typically URL pointing to producers\nhomepage.\n\nReturns:\nlist: List of URLs defined by producer.", "source": "codesearchnet"}
{"code": "def build_offset_mapping_with_special_tokens(self, offset_mapping_0, offset_mapping_1=None):\n    if offset_mapping_1 is None:\n        return [(0, 0)] + offset_mapping_0 + [(0, 0)]\n    return [(0, 0)] + offset_mapping_0 + [(0, 0), (0, 0)] + offset_mapping_1 + [(0, 0)]", "docstring": "Build offset map from a pair of offset map by concatenating and adding offsets of special tokens. An Ernie-M\noffset_mapping has the following format:\n\n- single sequence: `(0,0) X (0,0)`\n- pair of sequences: `(0,0) A (0,0) (0,0) B (0,0)`\n\nArgs:\noffset_mapping_ids_0 (`List[tuple]`):\nList of char offsets to which the special tokens will be added.\noffset_mapping_ids_1 (`List[tuple]`, *optional*):\nOptional second list of wordpiece offsets for offset mapping pairs.\nReturns:\n`List[tuple]`: List of wordpiece offsets with the appropriate offsets of special tokens.", "source": "github-repos"}
{"code": "def lchmod(self, path, mode):\n        \n        if self.filesystem.is_windows_fs:\n            raise (NameError, \"name 'lchmod' is not defined\")\n        self.filesystem.chmod(path, mode, follow_symlinks=False)", "docstring": "Change the permissions of a file as encoded in integer mode.\nIf the file is a link, the permissions of the link are changed.\n\nArgs:\npath: (str) Path to the file.\nmode: (int) Permissions.", "source": "juraj-google-style"}
{"code": "def mark_locations(h,section,locs,markspec='or',**kwargs):\n    \n\n    \n    xyz = get_section_path(h,section)\n    (r,theta,phi) = sequential_spherical(xyz)\n    rcum = np.append(0,np.cumsum(r))\n\n    \n    if type(locs) is float or type(locs) is np.float64:\n        locs = np.array([locs])\n    if type(locs) is list:\n        locs = np.array(locs)\n    lengths = locs*rcum[-1]\n\n    \n    xyz_marks = []\n    for targ_length in lengths:\n        xyz_marks.append(find_coord(targ_length,xyz,rcum,theta,phi))\n    xyz_marks = np.array(xyz_marks)\n\n    \n    line, = plt.plot(xyz_marks[:,0], xyz_marks[:,1], \\\n                     xyz_marks[:,2], markspec, **kwargs)\n    return line", "docstring": "Marks one or more locations on along a section. Could be used to\nmark the location of a recording or electrical stimulation.\n\nArgs:\nh = hocObject to interface with neuron\nsection = reference to section\nlocs = float between 0 and 1, or array of floats\noptional arguments specify details of marker\n\nReturns:\nline = reference to plotted markers", "source": "juraj-google-style"}
{"code": "def populate_native_libraries(version):\n    with open(BINARY_EXT_TEMPLATE, 'r') as file_obj:\n        template = file_obj.read()\n    contents = template.format(revision=version)\n    with open(BINARY_EXT_FILE, 'w') as file_obj:\n        file_obj.write(contents)", "docstring": "Populates ``binary-extension.rst`` with release-specific data.\n\nArgs:\nversion (str): The current version.", "source": "codesearchnet"}
{"code": "def custom(colors, bins=None, bin_method=BinMethod.quantiles):\n    \n    return {\n        'colors': colors,\n        'bins': bins if bins is not None else len(colors),\n        'bin_method': bin_method,\n    }", "docstring": "Create a custom scheme.\n\nArgs:\ncolors (list of str): List of hex values for styling data\nbins (int, optional): Number of bins to style by. If not given, the\nnumber of colors will be used.\nbin_method (str, optional): Classification method. One of the values\nin :obj:`BinMethod`. Defaults to `quantiles`, which only works with\nquantitative data.", "source": "juraj-google-style"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    cls = [self.cls_token_id]\n    sep = [self.sep_token_id]\n    return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A DeBERTa sequence has the following format:\n\n- single sequence: [CLS] X [SEP]\n- pair of sequences: [CLS] A [SEP] B [SEP]\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "class MusicgenProcessor(ProcessorMixin):\n    feature_extractor_class = 'EncodecFeatureExtractor'\n    tokenizer_class = ('T5Tokenizer', 'T5TokenizerFast')\n\n    def __init__(self, feature_extractor, tokenizer):\n        super().__init__(feature_extractor, tokenizer)\n        self.current_processor = self.feature_extractor\n        self._in_target_context_manager = False\n\n    def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):\n        return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps)\n\n    def __call__(self, *args, **kwargs):\n        \n        if self._in_target_context_manager:\n            return self.current_processor(*args, **kwargs)\n        audio = kwargs.pop('audio', None)\n        sampling_rate = kwargs.pop('sampling_rate', None)\n        text = kwargs.pop('text', None)\n        if len(args) > 0:\n            audio = args[0]\n            args = args[1:]\n        if audio is None and text is None:\n            raise ValueError('You need to specify either an `audio` or `text` input to process.')\n        if text is not None:\n            inputs = self.tokenizer(text, **kwargs)\n        if audio is not None:\n            audio_inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)\n        if audio is None:\n            return inputs\n        elif text is None:\n            return audio_inputs\n        else:\n            inputs['input_values'] = audio_inputs['input_values']\n            if 'padding_mask' in audio_inputs:\n                inputs['padding_mask'] = audio_inputs['padding_mask']\n            return inputs\n\n    def batch_decode(self, *args, **kwargs):\n        \n        audio_values = kwargs.pop('audio', None)\n        padding_mask = kwargs.pop('padding_mask', None)\n        if len(args) > 0:\n            audio_values = args[0]\n            args = args[1:]\n        if audio_values is not None:\n            return self._decode_audio(audio_values, padding_mask=padding_mask)\n        else:\n            return self.tokenizer.batch_decode(*args, **kwargs)\n\n    def decode(self, *args, **kwargs):\n        \n        return self.tokenizer.decode(*args, **kwargs)\n\n    def _decode_audio(self, audio_values, padding_mask: Optional=None) -> List[np.ndarray]:\n        \n        audio_values = to_numpy(audio_values)\n        bsz, channels, seq_len = audio_values.shape\n        if padding_mask is None:\n            return list(audio_values)\n        padding_mask = to_numpy(padding_mask)\n        difference = seq_len - padding_mask.shape[-1]\n        padding_value = 1 - self.feature_extractor.padding_value\n        padding_mask = np.pad(padding_mask, ((0, 0), (0, difference)), 'constant', constant_values=padding_value)\n        audio_values = audio_values.tolist()\n        for i in range(bsz):\n            sliced_audio = np.asarray(audio_values[i])[padding_mask[i][None, :] != self.feature_extractor.padding_value]\n            audio_values[i] = sliced_audio.reshape(channels, -1)\n        return audio_values", "docstring": "Constructs a MusicGen processor which wraps an EnCodec feature extractor and a T5 tokenizer into a single processor\nclass.\n\n[`MusicgenProcessor`] offers all the functionalities of [`EncodecFeatureExtractor`] and [`TTokenizer`]. See\n[`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information.\n\nArgs:\nfeature_extractor (`EncodecFeatureExtractor`):\nAn instance of [`EncodecFeatureExtractor`]. The feature extractor is a required input.\ntokenizer (`T5Tokenizer`):\nAn instance of [`T5Tokenizer`]. The tokenizer is a required input.", "source": "github-repos"}
{"code": "def run_generate(verbose=True):\n    parser = argparse.ArgumentParser()\n    parser.add_argument('model_name', type=str, help='like facebook/bart-large-cnn,google-t5/t5-base, etc.')\n    parser.add_argument('input_path', type=str, help='like cnn_dm/test.source')\n    parser.add_argument('save_path', type=str, help='where to save summaries')\n    parser.add_argument('--reference_path', type=str, required=False, help='like cnn_dm/test.target')\n    parser.add_argument('--score_path', type=str, required=False, default='metrics.json', help='where to save metrics')\n    parser.add_argument('--device', type=str, required=False, default=DEFAULT_DEVICE, help='cuda, cuda:1, cpu etc.')\n    parser.add_argument('--prefix', type=str, required=False, default=None, help='will be added to the beginning of src examples')\n    parser.add_argument('--task', type=str, default='summarization', help='used for task_specific_params + metrics')\n    parser.add_argument('--bs', type=int, default=8, required=False, help='batch size')\n    parser.add_argument('--n_obs', type=int, default=-1, required=False, help='How many observations. Defaults to all.')\n    parser.add_argument('--fp16', action='store_true')\n    parser.add_argument('--dump-args', action='store_true', help='print the custom hparams with the results')\n    parser.add_argument('--info', nargs='?', type=str, const=datetime_now(), help=\"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g. lang=en-ru. If no value is passed, the current datetime string will be used.\")\n    args, rest = parser.parse_known_args()\n    parsed_args = parse_numeric_n_bool_cl_kwargs(rest)\n    if parsed_args and verbose:\n        print(f'parsed the following generate kwargs: {parsed_args}')\n    examples = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path).readlines()]\n    if args.n_obs > 0:\n        examples = examples[:args.n_obs]\n    Path(args.save_path).parent.mkdir(exist_ok=True)\n    if args.reference_path is None and Path(args.score_path).exists():\n        warnings.warn(f'score_path {args.score_path} will be overwritten unless you type ctrl-c.')\n    if args.device == 'cpu' and args.fp16:\n        raise ValueError(\"Can't mix --fp16 and --device cpu\")\n    runtime_metrics = generate_summaries_or_translations(examples, args.save_path, args.model_name, batch_size=args.bs, device=args.device, fp16=args.fp16, task=args.task, prefix=args.prefix, **parsed_args)\n    if args.reference_path is None:\n        return {}\n    score_fn = calculate_bleu if 'translation' in args.task else calculate_rouge\n    output_lns = [x.rstrip() for x in open(args.save_path).readlines()]\n    reference_lns = [x.rstrip() for x in open(args.reference_path).readlines()][:len(output_lns)]\n    scores: dict = score_fn(output_lns, reference_lns)\n    scores.update(runtime_metrics)\n    if args.dump_args:\n        scores.update(parsed_args)\n    if args.info:\n        scores['info'] = args.info\n    if verbose:\n        print(scores)\n    if args.score_path is not None:\n        json.dump(scores, open(args.score_path, 'w'))\n    return scores", "docstring": "Takes input text, generates output, and then using reference calculates the BLEU scores.\n\nThe results are saved to a file and returned to the caller, and printed out unless ``verbose=False`` is passed.\n\nArgs:\nverbose (:obj:`bool`, `optional`, defaults to :obj:`True`): print results to stdout\n\nReturns:\na tuple: ``(scores, params}``\n- ``scores``: a dict of scores data ``{'bleu': 39.6501, 'n_obs': 2000, 'runtime': 186, 'seconds_per_sample': 0.093}``\n- ``params``: a dict of custom params, e.g. ``{'num_beams': 5, 'length_penalty': 0.8}``", "source": "github-repos"}
{"code": "def add_slot(self, var, slot_name, initializer='zeros', shape=None):\n    if slot_name not in self._slot_names:\n        self._slot_names.append(slot_name)\n    var_key = _var_key(var)\n    slot_dict = self._slots.setdefault(var_key, {})\n    weight = slot_dict.get(slot_name, None)\n    if weight is None:\n        if isinstance(initializer, str) or callable(initializer):\n            initializer = initializers.get(initializer)\n            if isinstance(initializer, trackable.CheckpointInitialValueCallable) or shape is not None:\n                slot_shape = shape\n            else:\n                slot_shape = var.shape\n            initial_value = functools.partial(initializer, shape=slot_shape, dtype=var.dtype)\n        else:\n            initial_value = initializer\n        with self._distribution_strategy_scope():\n            strategy = distribute_lib.get_strategy()\n            if not strategy.extended.variable_created_in_scope(var):\n                raise ValueError(\"Trying to create optimizer slot variable under the scope for tf.distribute.Strategy ({}), which is different from the scope used for the original variable ({}). Make sure the slot variables are created under the same strategy scope. This may happen if you're restoring from a checkpoint outside the scope\".format(strategy, var))\n            with strategy.extended.colocate_vars_with(var):\n                weight = tf_variables.Variable(name='%s/%s' % (var._shared_name, slot_name), dtype=var.dtype, trainable=False, initial_value=initial_value)\n        backend.track_variable(weight)\n        slot_dict[slot_name] = weight\n        self._restore_slot_variable(slot_name=slot_name, variable=var, slot_variable=weight)\n        self._weights.append(weight)\n    return weight", "docstring": "Add a new slot variable for `var`.\n\nA slot variable is an additional variable associated with `var` to train.\nIt is allocated and managed by optimizers, e.g. `Adam`.\n\nArgs:\nvar: a `Variable` object.\nslot_name: name of the slot variable.\ninitializer: initializer of the slot variable\nshape: (Optional) shape of the slot variable. If not set, it will default\nto the shape of `var`.\n\nReturns:\nA slot variable.", "source": "github-repos"}
{"code": "def __init__(self, channel):\n        \n        self.ParseResume = channel.unary_unary(\n            \"/google.cloud.talent.v4beta1.ResumeService/ParseResume\",\n            request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_resume__service__pb2.ParseResumeRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_resume__service__pb2.ParseResumeResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def putfile(self, filepath, buildroot, metahash):\n\n    def gen_obj_path(filename):\n        filehash = util.hash_file(filepath).hexdigest()\n        return (filehash, os.path.join(self.obj_cachedir, filehash[0:2], filehash[2:4], filehash))\n    filepath_relative = filepath.split(buildroot)[1][1:]\n    incachepath = self._genpath(filepath_relative, metahash)\n    (filehash, obj_path) = gen_obj_path(filepath)\n    if (not os.path.exists(obj_path)):\n        obj_dir = os.path.dirname(obj_path)\n        if (not os.path.exists(obj_dir)):\n            os.makedirs(obj_dir)\n        log.debug('Adding to obj cache: %s -> %s', filepath, obj_path)\n        os.link(filepath, obj_path)\n    if os.path.exists(incachepath):\n        existingfile_hash = util.hash_file(incachepath).hexdigest()\n        if (filehash != existingfile_hash):\n            log.warn('File found in mh cache, but checksum differs. Replacing with this new version. (File: %s)', filepath)\n            log.warn('Possible reasons for this:')\n            log.warn(' 1. This build is not hermetic, and something differs about the build environment compared to the previous build.')\n            log.warn(' 2. This file has a timestamp or other build-time related data encoded into it, which will always cause the checksum to differ when built.')\n            log.warn(' 3. Everything is terrible and nothing works.')\n            os.unlink(incachepath)\n    if (not os.path.exists(incachepath)):\n        log.debug('Adding to mh cache: %s -> %s', filepath, incachepath)\n        if (not os.path.exists(os.path.dirname(incachepath))):\n            os.makedirs(os.path.dirname(incachepath))\n        os.link(obj_path, incachepath)", "docstring": "Put a file in the cache.\n\nArgs:\nfilepath: Path to file on disk.\nbuildroot: Path to buildroot\nbuildrule: The rule that generated this file.\nmetahash: hash object", "source": "codesearchnet"}
{"code": "def device(self, name):\n    if isinstance(name, LogicalDevice):\n        name = name.name\n    elif pydev.is_device_spec(name):\n        name = name.to_string()\n    return _EagerDeviceContext(self, name)", "docstring": "Context-manager to force placement of operations and Tensors on a device.\n\nArgs:\nname: Name of the device or None to get default placement.\n\nReturns:\nContext manager that forces device placement.\n\nRaises:\nValueError: If name is not a string or is an invalid device name.\nRuntimeError: If device scopes are not properly nested.", "source": "github-repos"}
{"code": "def _value_and_batch_jacobian(f, x):\n    if tf.executing_eagerly():\n        with tf.GradientTape() as tape:\n            tape.watch(x)\n            value = f(x)\n        batch_jacobian = tape.batch_jacobian(value, x)\n    else:\n        value = f(x)\n        batch_jacobian = gradients.batch_jacobian(value, x)\n    return (value, batch_jacobian)", "docstring": "Enables uniform interface to value and batch jacobian calculation.\n\nWorks in both eager and graph modes.\n\nArguments:\nf: The scalar function to evaluate.\nx: The value at which to compute the value and the batch jacobian.\n\nReturns:\nA tuple (f(x), J(x)), where J(x) is the batch jacobian.", "source": "codesearchnet"}
{"code": "def read_first_header(self):\n    self.file_obj.seek(0)\n    (header_dict, pos) = self.read_header()\n    self.file_obj.seek(0)\n    return header_dict", "docstring": "Read first header in file\n\nReturns:\nheader (dict): keyword:value pairs of header metadata", "source": "codesearchnet"}
{"code": "def update(self, sparql_query_only=False, auto_refresh=None, update_binary=True):\n    self._diff_graph()\n    sq = SparqlUpdate(self.rdf.prefixes, self.rdf.diffs)\n    if sparql_query_only:\n        return sq.build_query()\n    response = self.repo.api.http_request('PATCH', ('%s/fcr:metadata' % self.uri), data=sq.build_query(), headers={'Content-Type': 'application/sparql-update'})\n    if (response.status_code != 204):\n        logger.debug(response.content)\n        raise Exception(('HTTP %s, expecting 204' % response.status_code))\n    if ((type(self) == NonRDFSource) and update_binary and (type(self.binary.data) != requests.models.Response)):\n        self.binary._prep_binary()\n        binary_data = self.binary.data\n        binary_response = self.repo.api.http_request('PUT', self.uri, data=binary_data, headers={'Content-Type': self.binary.mimetype})\n        if ((not auto_refresh) and (not self.repo.default_auto_refresh)):\n            logger.debug('not refreshing resource RDF, but updated binary, so must refresh binary data')\n            updated_self = self.repo.get_resource(self.uri)\n            self.binary.refresh(updated_self)\n    if hasattr(self, '_post_update'):\n        self._post_update()\n    '\\n\\t\\tIf not updating binary, pass that bool to refresh as refresh_binary flag to avoid touching binary data\\n\\t\\t'\n    if auto_refresh:\n        self.refresh(refresh_binary=update_binary)\n    elif (auto_refresh == None):\n        if self.repo.default_auto_refresh:\n            self.refresh(refresh_binary=update_binary)\n    return True", "docstring": "Method to update resources in repository.  Firing this method computes the difference in the local modified graph and the original one,\ncreates an instance of SparqlUpdate and builds a sparql query that represents these differences, and sends this as a PATCH request.\n\nNote: send PATCH request, regardless of RDF or NonRDF, to [uri]/fcr:metadata\n\nIf the resource is NonRDF (Binary), this also method also updates the binary data.\n\nArgs:\nsparql_query_only (bool): If True, returns only the sparql query string and does not perform any actual updates\nauto_refresh (bool): If True, refreshes resource after update. If left None, defaults to repo.default_auto_refresh\nupdate_binary (bool): If True, and resource is NonRDF, updates binary data as well\n\nReturns:\n(bool)", "source": "codesearchnet"}
{"code": "def liquid_precipitation_depth(self, value=999.0):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `liquid_precipitation_depth`'.format(value))\n    self._liquid_precipitation_depth = value", "docstring": "Corresponds to IDD Field `liquid_precipitation_depth`\n\nArgs:\nvalue (float): value for IDD Field `liquid_precipitation_depth`\nUnit: mm\nMissing value: 999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def promote_artifacts(self, promote_stage='latest'):\n        \n        if promote_stage.lower() == 'alpha':\n            self._sync_to_uri(self.s3_canary_uri)\n        elif promote_stage.lower() == 'canary':\n            self._sync_to_uri(self.s3_latest_uri)\n        else:\n            self._sync_to_uri(self.s3_latest_uri)", "docstring": "Promote artifact version to dest.\n\nArgs:\npromote_stage (string): Stage that is being promoted", "source": "juraj-google-style"}
{"code": "def kde_partition_data(data, estimate_tails=True):\n    kde = stats.kde.gaussian_kde(data)\n    evaluation_bins = np.linspace(start=(np.min(data) - (kde.covariance_factor() / 2)), stop=(np.max(data) + (kde.covariance_factor() / 2)), num=np.floor((((np.max(data) - np.min(data)) / kde.covariance_factor()) + 1)).astype(int))\n    cdf_vals = [kde.integrate_box_1d((- np.inf), x) for x in evaluation_bins]\n    evaluation_weights = np.diff(cdf_vals)\n    if estimate_tails:\n        bins = np.concatenate(([(np.min(data) - (1.5 * kde.covariance_factor()))], evaluation_bins, [(np.max(data) + (1.5 * kde.covariance_factor()))]))\n    else:\n        bins = np.concatenate(([(- np.inf)], evaluation_bins, [np.inf]))\n    weights = np.concatenate(([cdf_vals[0]], evaluation_weights, [(1 - cdf_vals[(- 1)])]))\n    return {'bins': bins, 'weights': weights}", "docstring": "Convenience method for building a partition and weights using a gaussian Kernel Density Estimate and default bandwidth.\n\nArgs:\ndata (list-like): The data from which to construct the estimate\nestimate_tails (bool): Whether to estimate the tails of the distribution to keep the partition object finite\n\nReturns:\nA new partition_object::\n\n{\n\"partition\": (list) The endpoints of the partial partition of reals,\n\"weights\": (list) The densities of the bins implied by the partition.\n}", "source": "codesearchnet"}
{"code": "def get_soup_response(self):\n    if (self.response is not None):\n        if (self.__response_soup is None):\n            result = BeautifulSoup(self.response.text, 'lxml')\n            if self.decomposed:\n                return result\n            else:\n                self.__response_soup = BeautifulSoup(self.response.text, 'lxml')\n    return self.__response_soup", "docstring": "Get the response as a cached BeautifulSoup container.\n\nReturns:\nobj: The BeautifulSoup container.", "source": "codesearchnet"}
{"code": "def MakeNewConfig(self):\n    result = self.__class__()\n    result.type_infos = self.type_infos\n    result.defaults = self.defaults\n    result.context = self.context\n    result.valid_contexts = self.valid_contexts\n    return result", "docstring": "Creates a new configuration option based on this one.\n\nNote that it is not normally possible to just instantiate the\nconfig object because it will have an empty set of type\ndescriptors (i.e. no config options will be defined). Config\noptions are normally defined at import time, and then they get\nadded to the _CONFIG global in this module.\n\nTo obtain a new configuration object, inheriting the regular\nconfig options, this method must be called from the global _CONFIG\nobject, to make a copy.\n\nReturns:\nA new empty config object. which has the same parameter definitions as\nthis one.", "source": "codesearchnet"}
{"code": "def register(self, managed_object):\n    if (not isinstance(managed_object, pobjects.ManagedObject)):\n        raise TypeError('managed object must be a Pie ManagedObject')\n    object_attributes = list()\n    if hasattr(managed_object, 'cryptographic_usage_masks'):\n        if (managed_object.cryptographic_usage_masks is not None):\n            mask_attribute = self.attribute_factory.create_attribute(enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK, managed_object.cryptographic_usage_masks)\n            object_attributes.append(mask_attribute)\n    if hasattr(managed_object, 'operation_policy_name'):\n        if (managed_object.operation_policy_name is not None):\n            opn_attribute = self.attribute_factory.create_attribute(enums.AttributeType.OPERATION_POLICY_NAME, managed_object.operation_policy_name)\n            object_attributes.append(opn_attribute)\n    if hasattr(managed_object, 'names'):\n        if managed_object.names:\n            for name in managed_object.names:\n                name_attribute = self.attribute_factory.create_attribute(enums.AttributeType.NAME, name)\n                object_attributes.append(name_attribute)\n    template = cobjects.TemplateAttribute(attributes=object_attributes)\n    object_type = managed_object.object_type\n    secret = self.object_factory.convert(managed_object)\n    result = self.proxy.register(object_type, template, secret)\n    status = result.result_status.value\n    if (status == enums.ResultStatus.SUCCESS):\n        return result.uuid\n    else:\n        reason = result.result_reason.value\n        message = result.result_message.value\n        raise exceptions.KmipOperationFailure(status, reason, message)", "docstring": "Register a managed object with a KMIP appliance.\n\nArgs:\nmanaged_object (ManagedObject): A managed object to register. An\ninstantiatable subclass of ManagedObject from the Pie API.\n\nReturns:\nstring: The uid of the newly registered managed object.\n\nRaises:\nClientConnectionNotOpen: if the client connection is unusable\nKmipOperationFailure: if the operation result is a failure\nTypeError: if the input argument is invalid", "source": "codesearchnet"}
{"code": "def unpack(self, buff, offset=0):\n        \n        try:\n            unpacked_data = struct.unpack('!4B', buff[offset:offset+4])\n            self._value = '.'.join([str(x) for x in unpacked_data])\n        except struct.error as exception:\n            raise exceptions.UnpackException('%s; %s: %s' % (exception,\n                                                             offset, buff))", "docstring": "Unpack a binary message into this object's attributes.\n\nUnpack the binary value *buff* and update this object attributes based\non the results.\n\nArgs:\nbuff (bytes): Binary data package to be unpacked.\noffset (int): Where to begin unpacking.\n\nRaises:\nException: If there is a struct unpacking error.", "source": "juraj-google-style"}
{"code": "def dot(matrix, vector, matrix_ty, vector_ty):\n    \n    weld_obj = WeldObject(encoder_, decoder_)\n\n    matrix_var = weld_obj.update(matrix)\n    if isinstance(matrix, WeldObject):\n        matrix_var = matrix.obj_id\n        weld_obj.dependencies[matrix_var] = matrix\n\n    vector_var = weld_obj.update(vector)\n    loopsize_annotation = \"\"\n    if isinstance(vector, WeldObject):\n        vector_var = vector.obj_id\n        weld_obj.dependencies[vector_var] = vector\n    if isinstance(vector, np.ndarray):\n        loopsize_annotation = \"@(loopsize: %dL)\" % len(vector)\n\n    weld_template = \n    weld_obj.weld_code = weld_template % {\"matrix\": matrix_var,\n                                          \"vector\": vector_var,\n                                          \"matrix_ty\": matrix_ty,\n                                          \"vector_ty\": vector_ty,\n                                          \"loopsize_annotation\": loopsize_annotation}\n    return weld_obj", "docstring": "Computes the dot product between a matrix and a vector.\n\nArgs:\nmatrix (WeldObject / Numpy.ndarray): 2-d input matrix\nvector (WeldObject / Numpy.ndarray): 1-d input vector\nty (WeldType): Type of each element in the input matrix and vector\n\nReturns:\nA WeldObject representing this computation", "source": "juraj-google-style"}
{"code": "def time_and_memory(min_micros=1, min_bytes=1, min_accelerator_micros=0, min_cpu_micros=0, min_peak_bytes=0, min_residual_bytes=0, min_output_bytes=0):\n    return {'max_depth': 10000, 'min_bytes': min_bytes, 'min_peak_bytes': min_peak_bytes, 'min_residual_bytes': min_residual_bytes, 'min_output_bytes': min_output_bytes, 'min_micros': min_micros, 'min_accelerator_micros': min_accelerator_micros, 'min_cpu_micros': min_cpu_micros, 'min_params': 0, 'min_float_ops': 0, 'min_occurrence': 0, 'order_by': 'micros', 'account_type_regexes': ['.*'], 'start_name_regexes': ['.*'], 'trim_name_regexes': [], 'show_name_regexes': ['.*'], 'hide_name_regexes': [], 'account_displayed_op_only': True, 'select': ['micros', 'bytes'], 'step': -1, 'output': 'stdout'}", "docstring": "Show operation time and memory consumptions.\n\nArgs:\nmin_micros: Only show profiler nodes with execution time\nno less than this. It sums accelerator and cpu times.\nmin_bytes: Only show profiler nodes requested to allocate no less bytes\nthan this.\nmin_accelerator_micros: Only show profiler nodes spend no less than\nthis time on accelerator (e.g. GPU).\nmin_cpu_micros: Only show profiler nodes spend no less than\nthis time on cpu.\nmin_peak_bytes: Only show profiler nodes using no less than this bytes\nat peak (high watermark). For profiler nodes consist of multiple\ngraph nodes, it sums the graph nodes' peak_bytes.\nmin_residual_bytes: Only show profiler nodes have no less than\nthis bytes not being de-allocated after Compute() ends. For\nprofiler nodes consist of multiple graph nodes, it sums the\ngraph nodes' residual_bytes.\nmin_output_bytes: Only show profiler nodes have no less than this bytes\noutput. The output are not necessarily allocated by this profiler\nnodes.\nReturns:\nA dict of profiling options.", "source": "github-repos"}
{"code": "def __init__(self, req):\n        \n        super(Gateway, self).__init__(req)\n        self.started_response = False\n        self.env = self.get_environ()\n        self.remaining_bytes_out = None", "docstring": "Initialize WSGI Gateway instance with request.\n\nArgs:\nreq (HTTPRequest): current HTTP request", "source": "juraj-google-style"}
{"code": "def _check_preconditions(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor], bound_constraints: Dict[(str, Constraints)], default: Sequence[tf.Tensor]) -> Tuple[(tf.Tensor, Sequence[tf.Tensor], tf.Tensor)]:\n\n    def condition(i, a, checking):\n        not_checking = tf.reduce_any(tf.logical_not(checking))\n        return not_checking\n\n    def body(i, a, checking):\n        new_action = []\n        new_sampled_action = self._sample_action(bound_constraints, default)\n        new_preconds_checking = self.compiler.compile_action_preconditions_checking(state, new_sampled_action)\n        for (action_fluent, new_sampled_action_fluent) in zip(a, new_sampled_action):\n            new_action_fluent = tf.where(checking, action_fluent, new_sampled_action_fluent)\n            new_action.append(new_action_fluent)\n        new_action = tuple(new_action)\n        new_checking = tf.logical_or(checking, new_preconds_checking)\n        return ((i + 1), new_action, new_checking)\n    i0 = tf.constant(0)\n    preconds_checking = self.compiler.compile_action_preconditions_checking(state, action)\n    return tf.while_loop(condition, body, loop_vars=[i0, action, preconds_checking])", "docstring": "Samples action fluents until all preconditions are satisfied.\n\nChecks action preconditions for the sampled `action` and current `state`,\nand iff all preconditions are satisfied it returns the sampled action fluents.\n\nArgs:\nstate (Sequence[tf.Tensor]): A list of state fluents.\naction (Sequence[tf.Tensor]): A list of action fluents.\nbound_constraints (Dict[str, Tuple[Optional[TensorFluent], Optional[TensorFluent]]]): The bounds for each action fluent.\ndefault (Sequence[tf.Tensor]): The default action fluents.\n\nReturns:\nTuple[tf.Tensor, Sequence[tf.Tensor], tf.Tensor]: A tuple with\nan integer tensor corresponding to the number of samples,\naction fluents and a boolean tensor for checking all action preconditions.", "source": "codesearchnet"}
{"code": "def add_multiple(self, flags):\n    if (not isinstance(flags, list)):\n        raise TypeError('Expected list of flags, got object of type{}'.format(type(flags)))\n    for flag in flags:\n        if isinstance(flag, Flag):\n            self.add_item(flag)\n        elif isinstance(flag, tuple):\n            try:\n                item = Flag(*flag)\n                self.add_item(item)\n            except TypeError as e:\n                raise TypeError('Invalid arguments to initialize a flag definition, expect ({0} [, {1}]) but got {3}'.format(', '.join(Flag.REQUIRED_FIELDS), ', '.join(Flag.OPTIONAL_FIELDS), flag))", "docstring": "Add multiple command line flags\n\nArguments:\nflags (:obj:`list` of :obj:`tuple`): List of flags\nin tuples (name, flag_type, description, (optional) default)\n\nRaises:\nTypeError: Provided wrong arguments or arguments of wrong types, method will raise TypeError", "source": "codesearchnet"}
{"code": "def _reduced_stack(istart=3, iend=5, ipython=True):\n    import inspect\n    return [i[istart:iend] for i in inspect.stack() if _decorated_path(i[1])]", "docstring": "Returns the reduced function call stack that includes only relevant\nfunction calls (i.e., ignores any that are not part of the specified package\nor acorn.\n\nArgs:\npackage (str): name of the package that the logged method belongs to.", "source": "codesearchnet"}
{"code": "def DEFINE_multi_enum_class(name, default, enum_class, help, flag_values=_flagvalues.FLAGS, module_name=None, **args):\n    DEFINE_flag(_flag.MultiEnumClassFlag(name, default, help, enum_class), flag_values, module_name, **args)", "docstring": "Registers a flag whose value can be a list of enum members.\n\nUse the flag on the command line multiple times to place multiple\nenum values into the list.\n\nArgs:\nname: str, the flag name.\ndefault: Union[Iterable[Enum], Iterable[Text], Enum, Text, None], the\ndefault value of the flag; see\n`DEFINE_multi`; only differences are documented here. If the value is\na single Enum, it is treated as a single-item list of that Enum value.\nIf it is an iterable, text values within the iterable will be converted\nto the equivalent Enum objects.\nenum_class: class, the Enum class with all the possible values for the flag.\nhelp: str, the help message.\nflag_values: FlagValues, the FlagValues instance with which the flag will be\nregistered. This should almost never need to be overridden.\nmodule_name: A string, the name of the Python module declaring this flag. If\nnot provided, it will be computed using the stack trace of this call.\n**args: Dictionary with extra keyword args that are passed to the Flag\n__init__.", "source": "codesearchnet"}
{"code": "def account(transition, direction=Direction.BIDIRECTIONAL):\n    if (direction != Direction.BIDIRECTIONAL):\n        return directed_account(transition, direction)\n    return Account((directed_account(transition, Direction.CAUSE) + directed_account(transition, Direction.EFFECT)))", "docstring": "Return the set of all causal links for a |Transition|.\n\nArgs:\ntransition (Transition): The transition of interest.\n\nKeyword Args:\ndirection (Direction): By default the account contains actual causes\nand actual effects.", "source": "codesearchnet"}
{"code": "def marquee(text=\"\", width=78, mark='*'):\n    \n    if not text:\n        return (mark*width)[:width]\n\n    nmark = (width-len(text)-2)\n    if nmark < 0: \n        nmark = 0\n\n    marks = mark * nmark\n    return '%s %s %s' % (marks, text, marks)", "docstring": "Return the input string centered in a 'marquee'.\n\nArgs:\ntext (str): Input string\nwidth (int): Width of final output string.\nmark (str): Character used to fill string.\n\n:Examples:\n\n>>> marquee('A test', width=40)\n'**************** A test ****************'\n\n>>> marquee('A test', width=40, mark='-')\n'---------------- A test ----------------'\n\nmarquee('A test',40, ' ')\n'                 A test                 '", "source": "juraj-google-style"}
{"code": "def _StartWorkerProcess(self, process_name, storage_writer):\n    \n    process_name = 'Worker_{0:02d}'.format(self._last_worker_number)\n    logger.debug('Starting worker process {0:s}'.format(process_name))\n\n    if self._use_zeromq:\n      queue_name = '{0:s} task queue'.format(process_name)\n      task_queue = zeromq_queue.ZeroMQRequestConnectQueue(\n          delay_open=True, linger_seconds=0, name=queue_name,\n          port=self._task_queue_port,\n          timeout_seconds=self._TASK_QUEUE_TIMEOUT_SECONDS)\n    else:\n      task_queue = self._task_queue\n\n    process = worker_process.WorkerProcess(\n        task_queue, storage_writer, self._artifacts_filter_helper,\n        self.knowledge_base, self._session_identifier,\n        self._processing_configuration,\n        enable_sigsegv_handler=self._enable_sigsegv_handler, name=process_name)\n\n    \n    \n    \n    for handler in logging.root.handlers:\n      logging.root.removeHandler(handler)\n      handler.close()\n\n    process.start()\n\n    loggers.ConfigureLogging(\n        debug_output=self._debug_output, filename=self._log_filename,\n        mode='a', quiet_mode=self._quiet_mode)\n\n    try:\n      self._StartMonitoringProcess(process)\n\n    except (IOError, KeyError) as exception:\n      pid = process.pid\n      logger.error((\n          'Unable to monitor replacement worker process: {0:s} '\n          '(PID: {1:d}) with error: {2!s}').format(\n              process_name, pid, exception))\n\n      self._TerminateProcess(process)\n      return None\n\n    self._RegisterProcess(process)\n\n    self._last_worker_number += 1\n\n    return process", "docstring": "Creates, starts, monitors and registers a worker process.\n\nArgs:\nprocess_name (str): process name.\nstorage_writer (StorageWriter): storage writer for a session storage used\nto create task storage.\n\nReturns:\nMultiProcessWorkerProcess: extraction worker process or None if the\nprocess could not be started.", "source": "juraj-google-style"}
{"code": "def expand_tile(units, axis):\n    \n    assert axis in (1, 2)\n    n_time_steps = K.int_shape(units)[1]\n    repetitions = [1, 1, 1, 1]\n    repetitions[axis] = n_time_steps\n    if axis == 1:\n        expanded = Reshape(target_shape=( (1,) + K.int_shape(units)[1:] ))(units)\n    else:\n        expanded = Reshape(target_shape=(K.int_shape(units)[1:2] + (1,) + K.int_shape(units)[2:]))(units)\n    return K.tile(expanded, repetitions)", "docstring": "Expand and tile tensor along given axis\n\nArgs:\nunits: tf tensor with dimensions [batch_size, time_steps, n_input_features]\naxis: axis along which expand and tile. Must be 1 or 2", "source": "juraj-google-style"}
{"code": "def segmentation_images(self,*args,**kwargs):\n        \n        if not self.db: raise ValueError(\"Need to set db\")\n        segs = SegmentationImages.read_cellframe(self,*args,**kwargs)\n        segs.microns_per_pixel = segs.microns_per_pixel\n        return segs", "docstring": "Use the segmented images to create per-image graphics\n\nArgs:\nverbose (bool): output more details if true\n\nReturns:\nSegmentationImages: returns a class used to construct the image graphics", "source": "juraj-google-style"}
{"code": "def run_analysis(args):\n  \n  import google.datalab.bigquery as bq\n  if args.bigquery_table:\n    table = bq.Table(args.bigquery_table)\n    schema_list = table.schema._bq_schema\n  else:\n    schema_list = json.loads(\n        file_io.read_file_to_string(args.schema_file).decode())\n    table = bq.ExternalDataSource(\n        source=args.input_file_pattern,\n        schema=bq.Schema(schema_list))\n\n  \n  for col_schema in schema_list:\n    col_type = col_schema['type'].lower()\n    if col_type != 'string' and col_type != 'integer' and col_type != 'float':\n      raise ValueError('Schema contains an unsupported type %s.' % col_type)\n\n  run_numerical_analysis(table, schema_list, args)\n  run_categorical_analysis(table, schema_list, args)\n\n  \n  file_io.write_string_to_file(\n      os.path.join(args.output_dir, SCHEMA_FILE),\n      json.dumps(schema_list, indent=2, separators=(',', ': ')))", "docstring": "Builds an analysis file for training.\n\nUses BiqQuery tables to do the analysis.\n\nArgs:\nargs: command line args\n\nRaises:\nValueError if schema contains unknown types.", "source": "juraj-google-style"}
{"code": "def post(self, resource):\n        \n        response = self.api.execute(\n            \"POST\", self.endpoint, json=(resource.as_dict()))\n\n        if not response.ok:\n            raise Error.parse(response.json())\n\n        return self._cls.parse(response.json())", "docstring": "Creates a new instance of the resource.\n\nArgs:\nresource - gophish.models.Model - The resource instance", "source": "juraj-google-style"}
{"code": "def diff_bisectSplit(self, text1, text2, x, y, deadline):\n    \n    text1a = text1[:x]\n    text2a = text2[:y]\n    text1b = text1[x:]\n    text2b = text2[y:]\n\n    \n    diffs = self.diff_main(text1a, text2a, False, deadline)\n    diffsb = self.diff_main(text1b, text2b, False, deadline)\n\n    return diffs + diffsb", "docstring": "Given the location of the 'middle snake', split the diff in two parts\nand recurse.\n\nArgs:\ntext1: Old string to be diffed.\ntext2: New string to be diffed.\nx: Index of split point in text1.\ny: Index of split point in text2.\ndeadline: Time at which to bail if not yet complete.\n\nReturns:\nArray of diff tuples.", "source": "juraj-google-style"}
{"code": "def set_join_rule(self, room_id, join_rule):\n        \n        content = {\n            \"join_rule\": join_rule\n        }\n        return self.send_state_event(room_id, \"m.room.join_rules\", content)", "docstring": "Set the rule for users wishing to join the room.\n\nArgs:\nroom_id(str): The room to set the rules for.\njoin_rule(str): The chosen rule. One of: [\"public\", \"knock\",\n\"invite\", \"private\"]", "source": "juraj-google-style"}
{"code": "def compressuser(path, home='~'):\n    path = normpath(path)\n    userhome_dpath = userhome()\n    if path.startswith(userhome_dpath):\n        if (len(path) == len(userhome_dpath)):\n            path = home\n        elif (path[len(userhome_dpath)] == os.path.sep):\n            path = (home + path[len(userhome_dpath):])\n    return path", "docstring": "Inverse of `os.path.expanduser`\n\nArgs:\npath (PathLike): path in system file structure\nhome (str): symbol used to replace the home path. Defaults to '~', but\nyou might want to use '$HOME' or '%USERPROFILE%' instead.\n\nReturns:\nPathLike: path: shortened path replacing the home directory with a tilde\n\nCommandLine:\nxdoctest -m ubelt.util_path compressuser\n\nExample:\n>>> path = expanduser('~')\n>>> assert path != '~'\n>>> assert compressuser(path) == '~'\n>>> assert compressuser(path + '1') == path + '1'\n>>> assert compressuser(path + '/1') == join('~', '1')\n>>> assert compressuser(path + '/1', '$HOME') == join('$HOME', '1')", "source": "codesearchnet"}
{"code": "def remove_vtep(self, name, vtep, vlan=None):\n    if (not vlan):\n        cmd = 'vxlan flood vtep remove {}'.format(vtep)\n    else:\n        cmd = 'vxlan vlan {} flood vtep remove {}'.format(vlan, vtep)\n    return self.configure_interface(name, cmd)", "docstring": "Removes a VTEP endpoint from the global or local flood list\n\nEosVersion:\n4.13.7M\n\nArgs:\nname (str): The name of the interface to configure\nvtep (str): The IP address of the remote VTEP endpoint to add\nvlan (str): The VLAN ID associated with this VTEP.  If the VLAN\nkeyword is used, then the VTEP is configured as a local flood\nendpoing\n\nReturns:\nTrue if the command completes successfully", "source": "codesearchnet"}
{"code": "def get_mask_from_raster(rasterfile, outmaskfile, keep_nodata=False):\n        \n        raster_r = RasterUtilClass.read_raster(rasterfile)\n        xsize = raster_r.nCols\n        ysize = raster_r.nRows\n        nodata_value = raster_r.noDataValue\n        srs = raster_r.srs\n        x_min = raster_r.xMin\n        y_max = raster_r.yMax\n        dx = raster_r.dx\n        data = raster_r.data\n\n        if not keep_nodata:\n            i_min = ysize - 1\n            i_max = 0\n            j_min = xsize - 1\n            j_max = 0\n            for i in range(ysize):\n                for j in range(xsize):\n                    if abs(data[i][j] - nodata_value) > DELTA:\n                        i_min = min(i, i_min)\n                        i_max = max(i, i_max)\n                        j_min = min(j, j_min)\n                        j_max = max(j, j_max)\n\n            \n            y_size_mask = i_max - i_min + 1\n            x_size_mask = j_max - j_min + 1\n            x_min_mask = x_min + j_min * dx\n            y_max_mask = y_max - i_min * dx\n        else:\n            y_size_mask = ysize\n            x_size_mask = xsize\n            x_min_mask = x_min\n            y_max_mask = y_max\n            i_min = 0\n            j_min = 0\n        print('%dx%d -> %dx%d' % (xsize, ysize, x_size_mask, y_size_mask))\n\n        mask = numpy.zeros((y_size_mask, x_size_mask))\n\n        for i in range(y_size_mask):\n            for j in range(x_size_mask):\n                if abs(data[i + i_min][j + j_min] - nodata_value) > DELTA:\n                    mask[i][j] = 1\n                else:\n                    mask[i][j] = DEFAULT_NODATA\n\n        mask_geotrans = [x_min_mask, dx, 0, y_max_mask, 0, -dx]\n        RasterUtilClass.write_gtiff_file(outmaskfile, y_size_mask, x_size_mask, mask,\n                                         mask_geotrans, srs, DEFAULT_NODATA, GDT_Int32)\n        return Raster(y_size_mask, x_size_mask, mask, DEFAULT_NODATA, mask_geotrans, srs)", "docstring": "Generate mask data from a given raster data.\n\nArgs:\nrasterfile: raster file path.\noutmaskfile: output mask file path.\n\nReturns:\nRaster object of mask data.", "source": "juraj-google-style"}
{"code": "def __setitem__(self, anchor_id, anchor):\n        \n        with self._anchor_path(anchor_id).open(mode='wt') as f:\n            save_anchor(f, anchor, self.root)", "docstring": "Update an anchor.\n\nThis will update an existing anchor if it exists, or it will create new\nstorage if not.\n\nArgs:\nanchor_id: The ID of the anchor to update.\nanchor: The anchor to store.", "source": "juraj-google-style"}
{"code": "def add_delegate(self, callback):\n    if (callback in self._delegate_methods):\n        return\n    self._delegate_methods.append(callback)", "docstring": "Registers a new delegate callback\n\nThe prototype should be function(data), where data will be the decoded json push\n\nArgs:\ncallback (function): method to trigger when push center receives events", "source": "codesearchnet"}
{"code": "def distance_to_angle(distance, units='metric'):\n    \n    if units in ('km', 'metric'):\n        pass\n    elif units in ('sm', 'imperial', 'US customary'):\n        distance *= STATUTE_MILE\n    elif units in ('nm', 'nautical'):\n        distance *= NAUTICAL_MILE\n    else:\n        raise ValueError('Unknown units type %r' % units)\n\n    return math.degrees(distance / BODY_RADIUS)", "docstring": "Convert a distance in to an angle along a great circle.\n\nArgs:\ndistance (float): Distance to convert to degrees\nunits (str): Unit type to be used for distances\n\nReturns:\nfloat: Angle in degrees\n\nRaises:\nValueError: Unknown value for ``units``", "source": "juraj-google-style"}
{"code": "def raw_value(self):\n    if (self.parent_setting is not None):\n        return self.parent_setting.raw_value[self.full_name]\n    else:\n        return getattr(settings, self.full_name)", "docstring": "Property to return the variable defined in ``django.conf.settings``.\n\nReturns:\nobject: the variable defined in ``django.conf.settings``.\n\nRaises:\nAttributeError: if the variable is missing.\nKeyError: if the item is missing from nested setting.", "source": "codesearchnet"}
{"code": "def variance_inflation_factors(df):\n    \n    corr = np.corrcoef(df, rowvar=0)\n    corr_inv = np.linalg.inv(corr)\n    vifs = np.diagonal(corr_inv)\n    return pd.Series(vifs, df.columns, name='VIF')", "docstring": "Computes the variance inflation factor (VIF) for each column in the df.\nReturns a pandas Series of VIFs\n\nArgs:\ndf: pandas DataFrame with columns to run diagnostics on", "source": "juraj-google-style"}
{"code": "def _getsize_from_header(self, header):\n    for key in self._SIZE_KEYS:\n        try:\n            return int(header.pop(key))\n        except KeyError:\n            continue\n    else:\n        raise UnsupportedOperation('getsize')", "docstring": "Return the size from header\n\nArgs:\nheader (dict): Object header.\n\nReturns:\nint: Size in bytes.", "source": "codesearchnet"}
{"code": "def _ValidateCacheEntryHeader(self, cache_entry_header):\n    return ((cache_entry_header.request_size > 0) and (cache_entry_header.request_size < self._MAXIMUM_URL_LENGTH) and (cache_entry_header.major_format_version == 1) and (cache_entry_header.last_fetched_time > 0) and (cache_entry_header.fetch_count > 0))", "docstring": "Determines whether the values in the cache entry header are valid.\n\nArgs:\ncache_entry_header (firefox_cache1_entry_header): cache entry header.\n\nReturns:\nbool: True if the cache entry header is valid.", "source": "codesearchnet"}
{"code": "def set_enable(self, name, vrid, value=False, run=True):\n    if (value is False):\n        cmd = ('vrrp %d shutdown' % vrid)\n    elif (value is True):\n        cmd = ('no vrrp %d shutdown' % vrid)\n    else:\n        raise ValueError(\"vrrp property 'enable' must be True or False\")\n    if run:\n        result = self.configure_interface(name, cmd)\n        if (result is False):\n            return self.error\n        return result\n    return cmd", "docstring": "Set the enable property of the vrrp\n\nArgs:\nname (string): The interface to configure.\nvrid (integer): The vrid number for the vrrp to be managed.\nvalue (boolean): True to enable the vrrp, False to disable.\nrun (boolean): True to execute the command, False to\nreturn a string with the formatted command.\n\nReturns:\nIf run is True, returns True if the command executed successfully,\nerror if failure\n\nIf run is False, returns the formatted command string which can\nbe passed to the node", "source": "codesearchnet"}
{"code": "def _fqdn(o, oset=True, recheck=False, pmodule=None):\n    if ((id(o) in _set_failures) or (o is None)):\n        return None\n    if (recheck or (not _safe_hasattr(o, '__fqdn__'))):\n        import inspect\n        if (not hasattr(o, '__name__')):\n            msg.warn('Skipped object {}: no __name__ attribute.'.format(o), 3)\n            return\n        result = None\n        if (hasattr(o, '__acornext__') and (o.__acornext__ is not None)):\n            otarget = o.__acornext__\n        else:\n            otarget = o\n        omod = (_safe_getmodule(otarget) or pmodule)\n        if ((omod is None) and hasattr(otarget, '__objclass__') and (otarget.__objclass__ is not None)):\n            omod = _safe_getmodule(otarget.__objclass__)\n            parts = (('<unknown>' if (omod is None) else omod.__name__), otarget.__objclass__.__name__, otarget.__name__)\n            result = '{}.{}.{}'.format(*parts)\n        elif ((omod is None) and hasattr(otarget, '__class__') and (otarget.__class__ is not None)):\n            omod = _safe_getmodule(otarget.__class__)\n            parts = (('<unknown>' if (omod is None) else omod.__name__), otarget.__class__.__name__, otarget.__name__)\n            result = '{}.{}.{}'.format(*parts)\n        elif (omod is not otarget):\n            parts = (_fqdn(omod, False), otarget.__name__)\n            result = '{}.{}'.format(*parts)\n        else:\n            result = otarget.__name__\n        if oset:\n            _safe_setattr(o, '__fqdn__', result)\n        return result\n    if _safe_hasattr(o, '__fqdn__'):\n        return o.__fqdn__", "docstring": "Returns the fully qualified name of the object.\n\nArgs:\no (type): instance of the object's type.\noset (bool): when True, the fqdn will also be set on the object as attribute\n`__fqdn__`.\nrecheck (bool): for sub-classes, sometimes the super class has already had\nits __fqdn__ attribute set; in that case, we want to recheck the\nobject's name. This usually only gets used during object extension.", "source": "codesearchnet"}
{"code": "def _join_modules(module1, module2):\n    if not module1:\n        return module2\n    if not module2:\n        return module1\n    return '%s.%s' % (module1, module2)", "docstring": "Concatenate 2 module components.\n\nArgs:\nmodule1: First module to join.\nmodule2: Second module to join.\n\nReturns:\nGiven two modules aaa.bbb and ccc.ddd, returns a joined\nmodule aaa.bbb.ccc.ddd.", "source": "github-repos"}
{"code": "def write(self, data):\n        \n\n        while data:\n            try:\n                n = self._socket.send(data)\n            except socket.error:\n                n = None\n            if not n:\n                raise EOFError('Socket closed')\n            data = data[n:]", "docstring": "Send *n* bytes to socket.\n\nArgs:\ndata(bytes): The data to send.\n\nRaises:\nEOFError: If the socket was closed.", "source": "juraj-google-style"}
{"code": "def sub_map(self, counters_map):\n    for counter_name in counters_map.counters:\n        self.increment(counter_name, (- counters_map.counters[counter_name]))", "docstring": "Subtracts all counters from the map.\n\nFor each counter in the passed map, subtracts its value to the counter in\nthis map.\n\nArgs:\ncounters_map: CounterMap instance to subtract.", "source": "codesearchnet"}
{"code": "def Add(self, request, callback=None):\n    handler = RequestResponseAndHandler(request, None, callback)\n    self.__request_response_handlers[self._NewId()] = handler", "docstring": "Add a new request.\n\nArgs:\nrequest: A http_wrapper.Request to add to the batch.\ncallback: A callback to be called for this response, of the\nform callback(response, exception). The first parameter is the\ndeserialized response object. The second is an\napiclient.errors.HttpError exception object if an HTTP error\noccurred while processing the request, or None if no errors\noccurred.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def run_model(self, op_list, num_steps, feed_vars=(), feed_data=None, print_every=100, allow_initialize=True):\n    feed_data = (feed_data or itertools.repeat(()))\n    ops = [bookkeeper.global_step()]\n    ops.extend(op_list)\n    sess = tf.get_default_session()\n    self.prepare_model(sess, allow_initialize=allow_initialize)\n    results = []\n    try:\n        if (num_steps is None):\n            counter = itertools.count(0)\n        elif (num_steps >= 0):\n            counter = xrange(num_steps)\n        else:\n            raise ValueError(('num_steps cannot be negative: %s' % num_steps))\n        for (i, data) in zip(counter, feed_data):\n            log_this_time = (print_every and ((i % print_every) == 0))\n            if (len(data) != len(feed_vars)):\n                raise ValueError(('feed_data and feed_vars must be the same length: %d vs %d' % (len(data), len(feed_vars))))\n            if self._coord.should_stop():\n                print('Coordinator stopped')\n                sys.stdout.flush()\n                self.stop_queues()\n                break\n            if (len(feed_vars) != len(data)):\n                raise ValueError('Feed vars must be the same length as data.')\n            if (log_this_time and self._summary_writer):\n                results = sess.run((ops + [self._summaries]), dict(zip(feed_vars, data)))\n                self._summary_writer.add_summary(results[(- 1)], results[0])\n                results = results[:(- 1)]\n            else:\n                results = sess.run(ops, dict(zip(feed_vars, data)))\n            if log_this_time:\n                self._log_and_save(sess, results)\n        if (print_every and (not log_this_time)):\n            self._log_and_save(sess, results)\n    except tf.errors.OutOfRangeError as ex:\n        print(('Done training -- epoch limit reached %s' % ex.message))\n        sys.stdout.flush()\n        self.stop_queues()\n    except BaseException as ex:\n        print(('Exception -- stopping threads: %s' % ex), file=sys.stderr)\n        sys.stdout.flush()\n        self.stop_queues()\n        raise\n    return results", "docstring": "Runs `op_list` for `num_steps`.\n\nArgs:\nop_list: A list of ops to run.\nnum_steps: Number of steps to run this for.  If feeds are used, this is a\nmaximum. `None` can be used to signal \"forever\".\nfeed_vars: The variables to feed.\nfeed_data: An iterator that feeds data tuples.\nprint_every: Print a log line and checkpoing every so many steps.\nallow_initialize: If True, the model will be initialized if any variable\nis uninitialized, if False the model will not be initialized.\nReturns:\nThe final run result as a list.\nRaises:\nValueError: If feed_data doesn't match feed_vars.", "source": "codesearchnet"}
{"code": "def association(self, group_xid):\n        \n        association = {'groupXid': group_xid}\n        self._indicator_data.setdefault('associatedGroups', []).append(association)", "docstring": "Add association using xid value.\n\nArgs:\ngroup_xid (str): The external id of the Group to associate.", "source": "juraj-google-style"}
{"code": "def read_cdx(file, encoding='utf8'):\n    with codecs.getreader(encoding)(file) as stream:\n        header_line = stream.readline()\n        separator = header_line[0]\n        field_keys = header_line.strip().split(separator)\n        if (field_keys.pop(0) != 'CDX'):\n            raise ValueError('CDX header not found.')\n        for line in stream:\n            (yield dict(zip(field_keys, line.strip().split(separator))))", "docstring": "Iterate CDX file.\n\nArgs:\nfile (str): A file object.\nencoding (str): The encoding of the file.\n\nReturns:\niterator: Each item is a dict that maps from field key to value.", "source": "codesearchnet"}
{"code": "def _kl_normal_normal(n_a, n_b, name=None):\n  \n  with tf.name_scope(name or \"kl_normal_normal\"):\n    one = tf.constant(1, dtype=n_a.dtype)\n    two = tf.constant(2, dtype=n_a.dtype)\n    half = tf.constant(0.5, dtype=n_a.dtype)\n    s_a_squared = tf.square(n_a.scale)\n    s_b_squared = tf.square(n_b.scale)\n    ratio = s_a_squared / s_b_squared\n    return (tf.square(n_a.loc - n_b.loc) / (two * s_b_squared) + half *\n            (ratio - one - tf.math.log(ratio)))", "docstring": "Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.\n\nArgs:\nn_a: instance of a Normal distribution object.\nn_b: instance of a Normal distribution object.\nname: (optional) Name to use for created operations.\ndefault is \"kl_normal_normal\".\n\nReturns:\nBatchwise KL(n_a || n_b)", "source": "juraj-google-style"}
{"code": "def chi_squared(source_frequency, target_frequency):\n    \n    \n    \n    target_prob = frequency_to_probability(target_frequency)\n    source_len = sum(v for k, v in source_frequency.items() if k in target_frequency)\n\n    result = 0\n    for symbol, prob in target_prob.items():\n        symbol_frequency = source_frequency.get(symbol, 0)  \n        result += _calculate_chi_squared(symbol_frequency, prob, source_len)\n\n    return result", "docstring": "Calculate the Chi Squared statistic by comparing ``source_frequency`` with ``target_frequency``.\n\nExample:\n>>> chi_squared({'a': 2, 'b': 3}, {'a': 1, 'b': 2})\n0.1\n\nArgs:\nsource_frequency (dict): Frequency map of the text you are analyzing\ntarget_frequency (dict): Frequency map of the target language to compare with\n\nReturns:\nDecimal value of the chi-squared statistic", "source": "juraj-google-style"}
{"code": "def get_selection(cls, strings, title='Select an option', subtitle=None, exit_option=True, _menu=None):\n    menu = cls(strings, title, subtitle, exit_option)\n    if (_menu is not None):\n        _menu.append(menu)\n    menu.show()\n    menu.join()\n    return menu.selected_option", "docstring": "Single-method way of getting a selection out of a list of strings.\n\nArgs:\nstrings (:obj:`list` of :obj:`str`):  The list of strings this menu should be built from.\ntitle (str): The title of the menu.\nsubtitle (str): The subtitle of the menu.\nexit_option (bool): Specifies whether this menu should show an exit item by default. Defaults to True.\n_menu: Should probably only be used for testing, pass in a list and the created menu used internally by\nthe method will be appended to it\n\nReturns:\nint: The index of the selected option.", "source": "codesearchnet"}
{"code": "def _init_ready_op(self, ready_op=USE_DEFAULT, ready_for_local_init_op=USE_DEFAULT):\n    if ready_op is Supervisor.USE_DEFAULT:\n        ready_op = self._get_first_op_from_collection(ops.GraphKeys.READY_OP)\n        if ready_op is None:\n            ready_op = variables.report_uninitialized_variables()\n            ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)\n    self._ready_op = ready_op\n    if ready_for_local_init_op is Supervisor.USE_DEFAULT:\n        ready_for_local_init_op = self._get_first_op_from_collection(ops.GraphKeys.READY_FOR_LOCAL_INIT_OP)\n    self._ready_for_local_init_op = ready_for_local_init_op", "docstring": "Initializes ready_op.\n\nArgs:\nready_op: `Tensor` to check if the model is initialized. If it's set to\nUSE_DEFAULT, creates an op that checks all the variables are\ninitialized.\nready_for_local_init_op: `Tensor` to check if the model is ready to run\nlocal_init_op. If it's set to USE_DEFAULT, creates an op that checks all\nthe global variables are initialized.", "source": "github-repos"}
{"code": "def download(self, updates):\n    if (updates.count() == 0):\n        ret = {'Success': False, 'Updates': 'Nothing to download'}\n        return ret\n    downloader = self._session.CreateUpdateDownloader()\n    self._session.ClientApplicationID = 'Salt: Download Update'\n    with salt.utils.winapi.Com():\n        download_list = win32com.client.Dispatch('Microsoft.Update.UpdateColl')\n    ret = {'Updates': {}}\n    for update in updates.updates:\n        uid = update.Identity.UpdateID\n        ret['Updates'][uid] = {}\n        ret['Updates'][uid]['Title'] = update.Title\n        ret['Updates'][uid]['AlreadyDownloaded'] = bool(update.IsDownloaded)\n        if (not salt.utils.data.is_true(update.EulaAccepted)):\n            log.debug('Accepting EULA: %s', update.Title)\n            update.AcceptEula()\n        if (not salt.utils.data.is_true(update.IsDownloaded)):\n            log.debug('To Be Downloaded: %s', uid)\n            log.debug('\\tTitle: %s', update.Title)\n            download_list.Add(update)\n    if (download_list.Count == 0):\n        ret = {'Success': True, 'Updates': 'Nothing to download'}\n        return ret\n    downloader.Updates = download_list\n    try:\n        log.debug('Downloading Updates')\n        result = downloader.Download()\n    except pywintypes.com_error as error:\n        (hr, msg, exc, arg) = error.args\n        try:\n            failure_code = self.fail_codes[exc[5]]\n        except KeyError:\n            failure_code = 'Unknown Failure: {0}'.format(error)\n        log.error('Download Failed: %s', failure_code)\n        raise CommandExecutionError(failure_code)\n    result_code = {0: 'Download Not Started', 1: 'Download In Progress', 2: 'Download Succeeded', 3: 'Download Succeeded With Errors', 4: 'Download Failed', 5: 'Download Aborted'}\n    log.debug('Download Complete')\n    log.debug(result_code[result.ResultCode])\n    ret['Message'] = result_code[result.ResultCode]\n    if (result.ResultCode in [2, 3]):\n        log.debug('Downloaded Successfully')\n        ret['Success'] = True\n    else:\n        log.debug('Download Failed')\n        ret['Success'] = False\n    for i in range(download_list.Count):\n        uid = download_list.Item(i).Identity.UpdateID\n        ret['Updates'][uid]['Result'] = result_code[result.GetUpdateResult(i).ResultCode]\n    return ret", "docstring": "Download the updates passed in the updates collection. Load the updates\ncollection using ``search`` or ``available``\n\nArgs:\n\nupdates (Updates): An instance of the Updates class containing a\nthe updates to be downloaded.\n\nReturns:\ndict: A dictionary containing the results of the download\n\nCode Example:\n\n.. code-block:: python\n\nimport salt.utils.win_update\nwua = salt.utils.win_update.WindowsUpdateAgent()\n\n# Download KB3195454\nupdates = wua.search('KB3195454')\nresults = wua.download(updates)", "source": "codesearchnet"}
{"code": "def randwindow(self, window_shape):\n    row = random.randrange(window_shape[0], self.shape[1])\n    col = random.randrange(window_shape[1], self.shape[2])\n    return self[(:, (row - window_shape[0]):row, (col - window_shape[1]):col)]", "docstring": "Get a random window of a given shape from within an image\n\nArgs:\nwindow_shape (tuple): The desired shape of the returned image as (height, width) in pixels.\n\nReturns:\nimage: a new image object of the specified shape and same type", "source": "codesearchnet"}
{"code": "def _safe_scalar_div(numerator, denominator, name):\n    numerator.get_shape().with_rank_at_most(1)\n    denominator.get_shape().with_rank_at_most(1)\n    return math_ops.div_no_nan(numerator, denominator, name=name)", "docstring": "Divides two values, returning 0 if the denominator is 0.\n\nArgs:\nnumerator: A scalar `float64` `Tensor`.\ndenominator: A scalar `float64` `Tensor`.\nname: Name for the returned op.\n\nReturns:\n0 if `denominator` == 0, else `numerator` / `denominator`", "source": "github-repos"}
{"code": "def set_step(step):\n    _summary_state.step = step", "docstring": "Sets the default summary step for the current thread.\n\nFor convenience, this function sets a default value for the `step` parameter\nused in summary-writing functions elsewhere in the API so that it need not\nbe explicitly passed in every such invocation. The value can be a constant\nor a variable, and can be retrieved via `tf.summary.experimental.get_step()`.\n\nNote: when using this with @tf.functions, the step value will be captured at\nthe time the function is traced, so changes to the step outside the function\nwill not be reflected inside the function unless using a `tf.Variable` step.\n\nArgs:\nstep: An `int64`-castable default step value, or None to unset.", "source": "github-repos"}
{"code": "def _save_cached_when_graph_building(self, file_prefix, object_graph_tensor, options):\n    serialized_tensors, feed_additions, registered_savers, graph_proto = self._gather_serialized_tensors(object_graph_tensor)\n    if self._last_save_object_graph != graph_proto or context.executing_eagerly() or ops.inside_function():\n        saver = functional_saver.MultiDeviceSaver(serialized_tensors, registered_savers)\n        save_op = saver.save(file_prefix, options=options)\n        with ops.device('/cpu:0'):\n            with ops.control_dependencies([save_op]):\n                self._cached_save_operation = array_ops.identity(file_prefix)\n        self._last_save_object_graph = graph_proto\n    return (self._cached_save_operation, feed_additions)", "docstring": "Create or retrieve save ops.\n\nArgs:\nfile_prefix: The prefix for saved checkpoint files.\nobject_graph_tensor: A `Tensor` to which the current object graph will be\nfed.\noptions: `CheckpointOptions` object.\n\nReturns:\nA two-element tuple with a filename tensor and a feed_dict of tensors to\nfeed when running it (if graph building). The feed dict contains the\ncurrent object graph and any Python state to be saved in the\ncheckpoint. When executing eagerly only the first argument is meaningful.", "source": "github-repos"}
{"code": "def bootstrap_results(self, init_state):\n    del init_state\n    return []", "docstring": "Returns an object with the same type as returned by `one_step(...)[1]`.\n\nArgs:\ninit_state: 1D `tf.Tensor` which is the initial chain state.\n\nReturns:\nkernel_results: Empty list.", "source": "github-repos"}
{"code": "def chip_as_adjacency_list(device: 'cirq.google.XmonDevice',\n                           ) -> Dict[GridQubit, List[GridQubit]]:\n    \n    c_set = set(device.qubits)\n    c_adj = {} \n    for n in device.qubits:\n        c_adj[n] = []\n        for m in [above(n), left_of(n), below(n), right_of(n)]:\n            if m in c_set:\n                c_adj[n].append(m)\n    return c_adj", "docstring": "Gives adjacency list representation of a chip.\n\nThe adjacency list is constructed in order of above, left_of, below and\nright_of consecutively.\n\nArgs:\ndevice: Chip to be converted.\n\nReturns:\nMap from nodes to list of qubits which represent all the neighbours of\ngiven qubit.", "source": "juraj-google-style"}
{"code": "def __call__(self, name):\n        \n        priv = \"_\" + name    \n\n        \n        \n        \n        \n        def getter(this):\n            \n            \n            \n            if ((not hasattr(this, priv) or getattr(this, priv) is None) and\n                hasattr(this, \"_setters\") and isinstance(this._setters, (list, tuple))):\n                for prefix in this._setters:\n                    cmd = \"{}{}\".format(prefix, priv)\n                    if hasattr(this, cmd):\n                        getattr(this, cmd)()    \n                        if hasattr(this, priv):\n                            break\n            \n            if isinstance(self.pre_get, str):\n                getattr(this, self.pre_get)()\n            elif callable(self.pre_get):\n                self.pre_get(this)\n            return getattr(this, priv, None)    \n\n        def setter(this, value):\n            \n            \n            if self.autoconv and not isinstance(value, self.types) and value is not None:\n                for t in self.types:\n                    try:\n                        value = t(value)\n                        break\n                    except Exception as e:    \n                        if self.verbose:\n                            warnings.warn(\"Conversion of {} (with type {}) failed to type {}\\n{}\".format(name, type(value), t, str(e)))\n                else:          \n                    raise TypeError(\"Cannot convert object of type {} to any of {}.\".format(type(value), self.types))\n            \n            \n            \n            elif ((value is None and self.allow_none == False) or\n                  (not isinstance(value, self.types) and value is not None)):\n                raise TypeError(\"Object '{}' cannot have type {}, must be of type(s) {}.\".format(name, type(value), self.types))\n            \n            if isinstance(self.pre_set, str):\n                getattr(this, self.pre_set)()\n            elif callable(self.pre_set):\n                self.pre_set(this)\n            if isinstance(this, (pd.DataFrame, pd.SparseDataFrame)):\n                this[priv] = value\n            else:\n                setattr(this, priv, value)    \n            \n            if isinstance(self.post_set, str):\n                getattr(this, self.post_set)()\n            elif callable(self.post_set):\n                self.post_set(this)\n\n        def deleter(this):\n            \n            if isinstance(self.pre_del, str):\n                getattr(this, self.pre_del)()\n            elif callable(self.pre_del):\n                self.pre_del(this)\n            delattr(this, priv)    \n            \n            if isinstance(self.post_del, str):\n                getattr(this, self.post_del)()\n            elif callable(self.post_del):\n                self.post_del(this)\n\n        return property(getter, setter, deleter, doc=self.doc)", "docstring": "Construct the property.\n\nArgs:\nname (str): Attribute (property) name\n\nReturns:\nprop (property): Custom property definition with support for typing", "source": "juraj-google-style"}
{"code": "def file(cls, path, encoding=None, parser=None):\n    cls.__hierarchy.append(file.File(path, encoding, parser))", "docstring": "Set a file as a source.\n\nFile are parsed as literal python dicts by default, this behaviour\ncan be configured.\n\nArgs:\npath: The path to the file to be parsed\nencoding: The encoding of the file.\nDefaults to 'raw'. Available built-in values: 'ini', 'json', 'yaml'.\nCustom value can be used in conjunction with parser.\nparser: A parser function for a custom encoder.\nIt is expected to return a dict containing the parsed values\nwhen called with the contents of the file as an argument.", "source": "codesearchnet"}
{"code": "def get_string(self, sort_keys=False, pretty=False):\n        \n        keys = self.keys()\n        if sort_keys:\n            keys = sorted(keys)\n        lines = []\n        for k in keys:\n            if isinstance(self[k], dict):\n                if k in [\"ELNES\", \"EXELFS\"]:\n                    lines.append([k, self._stringify_val(self[k][\"ENERGY\"])])\n                    beam_energy = self._stringify_val(self[k][\"BEAM_ENERGY\"])\n                    beam_energy_list = beam_energy.split()\n                    if int(beam_energy_list[1]) == 0:  \n                        lines.append([beam_energy])\n                        lines.append([self._stringify_val(self[k][\"BEAM_DIRECTION\"])])\n                    else:\n                        \n                        beam_energy_list[2] = str(0)\n                        lines.append([self._stringify_val(beam_energy_list)])\n                    lines.append([self._stringify_val(self[k][\"ANGLES\"])])\n                    lines.append([self._stringify_val(self[k][\"MESH\"])])\n                    lines.append([self._stringify_val(self[k][\"POSITION\"])])\n            else:\n                lines.append([k, self._stringify_val(self[k])])\n        if pretty:\n            return tabulate(lines)\n        else:\n            return str_delimited(lines, None, \" \")", "docstring": "Returns a string representation of the Tags.  The reason why this\nmethod is different from the __str__ method is to provide options\nfor pretty printing.\n\nArgs:\nsort_keys: Set to True to sort the Feff parameters alphabetically.\nDefaults to False.\npretty: Set to True for pretty aligned output. Defaults to False.\n\nReturns:\nString representation of Tags.", "source": "juraj-google-style"}
{"code": "def cumprod(self, axis=None, skipna=True, *args, **kwargs):\n        \n        axis = self._get_axis_number(axis) if axis is not None else 0\n        self._validate_dtypes(numeric_only=True)\n        return self.__constructor__(\n            query_compiler=self._query_compiler.cumprod(\n                axis=axis, skipna=skipna, **kwargs\n            )\n        )", "docstring": "Perform a cumulative product across the DataFrame.\n\nArgs:\naxis (int): The axis to take product on.\nskipna (bool): True to skip NA values, false otherwise.\n\nReturns:\nThe cumulative product of the DataFrame.", "source": "juraj-google-style"}
{"code": "def read_execution_stack_trace(self, execution):\n    host_name = self._stack_frame_by_id[execution.stack_frame_ids[0]][0]\n    return (host_name, [self._stack_frame_by_id[frame_id][1:] for frame_id in execution.stack_frame_ids])", "docstring": "Read the stack trace of a given Execution object.\n\nArgs:\nexecution: The Execution object of interest.\n\nReturns:\n1. The host name.\n2. The stack trace, as a list of (file_path, lineno, func) tuples.", "source": "github-repos"}
{"code": "def __init__(self, cluster_spec, initializer=None, share_gpu=True):\n    _active_pool_runners.add(self)\n    self._cluster_spec = cluster_spec\n    self._initializer = initializer\n    self._share_gpu = share_gpu\n    self._conn = {}\n    self._runner = None", "docstring": "Creates a multi-process pool runner.\n\nArgs:\ncluster_spec: Dict for cluster spec. The following is an example of\ncluster with three workers.\n{\"worker\": [\"worker0.example.com:2222\",\n\"worker1.example.com:2222\",\n\"worker2.example.com:2222\"]}\ninitializer: a callable to called at the startup of worker processes.\nshare_gpu: Whether to share GPUs among workers. If False, each worker is\nassigned different GPUs in a roundrobin fashion.\n\nRaises:\nRuntimeError: if `multi_process_runner.test_main()` is not called.\nValueError: if there are more than one chief in the `cluster_spec`.", "source": "github-repos"}
{"code": "def cctop_save_xml(jobid, outpath):\n    \n    status = cctop_check_status(jobid=jobid)\n    if status == 'Finished':\n        result = 'http:\n        result_text = requests.post(result)\n        with open(outpath, 'w') as f:\n            f.write(result_text.text)\n        return outpath\n    else:\n        raise ConnectionRefusedError('CCTOP job incomplete, status is \"{}\"'.format(status))", "docstring": "Save the CCTOP results file in XML format.\n\nArgs:\njobid (str): Job ID obtained when job was submitted\noutpath (str): Path to output filename\n\nReturns:\nstr: Path to output filename", "source": "juraj-google-style"}
{"code": "def get_key_by_job_id(cls, mapreduce_id):\n    \n    return db.Key.from_path(cls.kind(), str(mapreduce_id))", "docstring": "Retrieves the Key for a Job.\n\nArgs:\nmapreduce_id: The job to retrieve.\n\nReturns:\nDatastore Key that can be used to fetch the MapreduceState.", "source": "juraj-google-style"}
{"code": "def getEstTraitCorrCoef(self, term_i=None):\n    cov = self.getEstTraitCovar(term_i)\n    stds = SP.sqrt(cov.diagonal())[(:, SP.newaxis)]\n    RV = ((cov / stds) / stds.T)\n    return RV", "docstring": "Returns the estimated trait correlation matrix\n\nArgs:\nterm_i:     index of the term we are interested in", "source": "codesearchnet"}
{"code": "def upload(self, title, description=\"\", keywords=\"\", developer_tags=None, access_control=AccessControl.Public):\n        \n        \n        if not self.authenticated:\n            raise ApiError(_(\"Authentication is required\"))\n\n        \n        my_media_group = gdata.media.Group(\n            title=gdata.media.Title(text=title),\n            description=gdata.media.Description(description_type='plain',\n                                                text=description),\n            keywords=gdata.media.Keywords(text=keywords),\n            category=[gdata.media.Category(\n                text='Autos',\n                scheme='http:\n                label='Autos')],\n            \n        )\n\n        \n        extension = self._access_control(access_control, my_media_group)\n\n        \n        video_entry = gdata.youtube.YouTubeVideoEntry(\n            media=my_media_group, extension_elements=extension)\n\n        \n        if developer_tags:\n            video_entry.AddDeveloperTags(developer_tags)\n\n        \n        response = Api.yt_service.GetFormUploadToken(video_entry)\n\n        \n        post_url = response[0]\n        youtube_token = response[1]\n\n        return {'post_url': post_url, 'youtube_token': youtube_token}", "docstring": "Browser based upload\nCreates the video entry and meta data to initiate a browser upload\n\nAuthentication is needed\n\nParams:\ntitle: string\ndescription: string\nkeywords: comma seperated string\ndeveloper_tags: tuple\n\nReturn:\ndict contains post_url and youtube_token. i.e { 'post_url': post_url, 'youtube_token': youtube_token }\n\nRaises:\nApiError: on no authentication", "source": "juraj-google-style"}
{"code": "def Cancel(self, request, global_params=None):\n    config = self.GetMethodConfig('Cancel')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsOperationsCancelRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Empty) The response message.", "source": "github-repos"}
{"code": "def match_urls_to_resources(self, url_values):\n    valid_values = {}\n    for resource in self.Meta.related_resources:\n        for (k, v) in url_values.items():\n            resource_url = resource.get_resource_url(resource, resource.Meta.base_url)\n            if isinstance(v, list):\n                if all([(resource_url in i) for i in v]):\n                    self.set_related_method(resource, v)\n                    valid_values[k] = v\n            elif (resource_url in v):\n                self.set_related_method(resource, v)\n                valid_values[k] = v\n    return valid_values", "docstring": "For the list of valid URLs, try and match them up\nto resources in the related_resources attribute.\n\nArgs:\nurl_values: A dictionary of keys and URL strings that\ncould be related resources.\nReturns:\nvalid_values: The values that are valid", "source": "codesearchnet"}
{"code": "def get_rdf_es_idx_map(cls, idx_obj):\n        \n        idx_name = list(idx_obj)[0]\n\n        es_map = {\n            \"index\": idx_name,\n            \"body\" : {\n                \"mappings\": {},\n                \"settings\": {\n                    \n                    \"index\": {\n                        \n                        \n                        \n                        \"analysis\": {\n                            \"analyzer\": {\n                                \"keylower\": {\n                                    \"tokenizer\": \"keyword\",\n                                    \"type\": \"custom\",\n                                    \"filter\": \"lowercase\",\n                                    \"ignore_above\" : 256\n                                }\n                            }\n                        }\n                    }\n                }\n            }\n        }\n\n        for idx_cls in idx_obj[idx_name]:\n            \n            es_map['body']['mappings'][idx_cls.es_defs['kds_esDocType'][0]] = \\\n                    {'properties': idx_cls.es_mapping(idx_cls)}\n\n        return es_map", "docstring": "Returns an elasticsearch mapping for the specified index based off\nof the mapping defined by rdf class definitions\n\nargs:\nidx_obj: Dictionary of the index and a list of rdfclasses\nincluded in the mapping", "source": "juraj-google-style"}
{"code": "def opt_separator(self) -> bool:\n    start = self.offset\n    self.dfa([{'': (lambda : (- 1)), ' ': (lambda : 0), '\\t': (lambda : 0), '\\n': (lambda : 0), '\\r': (lambda : 1), '/': (lambda : 2)}, {'': self._back_break, '\\n': (lambda : 0)}, {'': self._back_break, '/': (lambda : 3), '*': (lambda : 4)}, {'': (lambda : 3), '\\n': (lambda : 0)}, {'': (lambda : 4), '*': (lambda : 5)}, {'': (lambda : 4), '/': (lambda : 0), '*': (lambda : 5)}])\n    return (start < self.offset)", "docstring": "Parse an optional separator and return ``True`` if found.\n\nRaises:\nEndOfInput: If past the end of input.", "source": "codesearchnet"}
{"code": "def verify(self, verify_key):\n        \n        if not self.mardata.signatures or not self.mardata.signatures.sigs:\n            \n            return False\n\n        hashers = []\n        for sig in self.mardata.signatures.sigs:\n            hashers.append((sig.algorithm_id, sig.signature, make_hasher(sig.algorithm_id)))\n\n        assert len(hashers) == len(self.mardata.signatures.sigs)\n\n        for block in get_signature_data(self.fileobj,\n                                        self.mardata.signatures.filesize):\n            [h.update(block) for (_, _, h) in hashers]\n\n        for algo_id, sig, h in hashers:\n            if not verify_signature(verify_key, sig, h.finalize(), h.algorithm.name):\n                return False\n        else:\n            return True", "docstring": "Verify that this MAR file has a valid signature.\n\nArgs:\nverify_key (str): PEM formatted public key\n\nReturns:\nTrue if the MAR file's signature matches its contents\nFalse otherwise; this includes cases where there is no signature.", "source": "juraj-google-style"}
{"code": "def heightmap_multiply_hm(\n    hm1: np.ndarray, hm2: np.ndarray, hm3: np.ndarray\n) -> None:\n    \n    hm3[:] = hm1[:] * hm2[:]", "docstring": "Multiplies two heightmap's together and stores the result in ``hm3``.\n\nArgs:\nhm1 (numpy.ndarray): The first heightmap.\nhm2 (numpy.ndarray): The second heightmap to multiply with the first.\nhm3 (numpy.ndarray): A destination heightmap to store the result.\n\n.. deprecated:: 2.0\nDo ``hm3[:] = hm1[:] * hm2[:]`` instead.\nAlternatively you can do ``HeightMap(hm1.array[:] * hm2.array[:])``.", "source": "juraj-google-style"}
{"code": "def move(self, delta):\n        \n        self.pos = (self.pos[0]+delta[0], self.pos[1]+delta[1])", "docstring": "Move the node.\n\nArgs:\ndelta (tupel): A tupel, holding the adjustment of the position.", "source": "juraj-google-style"}
{"code": "def GetName(obj):\n    precondition.AssertType(obj, (type, types.FunctionType))\n    if PY2:\n        return obj.__name__.decode('ascii')\n    else:\n        return obj.__name__", "docstring": "A compatibility wrapper for getting object's name.\n\nIn Python 2 class names are returned as `bytes` (since class names can contain\nonly ASCII characters) whereas in Python 3 they are `unicode` (since class\nnames can contain arbitrary unicode characters).\n\nThis function makes this behaviour consistent and always returns class name as\nan unicode string.\n\nOnce support for Python 2 is dropped all invocations of this call can be\nreplaced with ordinary `__name__` access.\n\nArgs:\nobj: A type or function object to get the name for.\n\nReturns:\nName of the specified class as unicode string.", "source": "codesearchnet"}
{"code": "def _right_pad(x, final_rank):\n  \n  padded_shape = tf.concat(\n      [tf.shape(input=x),\n       tf.ones(final_rank - tf.rank(x), dtype=tf.int32)],\n      axis=0)\n  static_padded_shape = None\n  if x.shape.is_fully_defined() and isinstance(final_rank, int):\n    static_padded_shape = x.shape.as_list()\n    extra_dims = final_rank - len(static_padded_shape)\n    static_padded_shape.extend([1] * extra_dims)\n\n  padded_x = tf.reshape(x, static_padded_shape or padded_shape)\n  return padded_x", "docstring": "Pads the shape of x to the right to be of rank final_rank.\n\nExpands the dims of `x` to the right such that its rank is equal to\nfinal_rank. For example, if `x` is of shape [1, 5, 7, 2] and `final_rank` is\n7, we return padded_x, which is of shape [1, 5, 7, 2, 1, 1, 1].\n\nArgs:\nx: The tensor whose shape is to be padded.\nfinal_rank: Scalar int32 `Tensor` or Python `int`. The desired rank of x.\n\nReturns:\npadded_x: A tensor of rank final_rank.", "source": "juraj-google-style"}
{"code": "def get_attributes(path):\n    \n    if not os.path.exists(path):\n        raise CommandExecutionError('Path not found: {0}'.format(path))\n\n    \n    attributes = {}\n\n    \n    intAttributes = win32file.GetFileAttributes(path)\n\n    \n    attributes['archive'] = (intAttributes & 32) == 32\n    attributes['reparsePoint'] = (intAttributes & 1024) == 1024\n    attributes['compressed'] = (intAttributes & 2048) == 2048\n    attributes['directory'] = (intAttributes & 16) == 16\n    attributes['encrypted'] = (intAttributes & 16384) == 16384\n    attributes['hidden'] = (intAttributes & 2) == 2\n    attributes['normal'] = (intAttributes & 128) == 128\n    attributes['notIndexed'] = (intAttributes & 8192) == 8192\n    attributes['offline'] = (intAttributes & 4096) == 4096\n    attributes['readonly'] = (intAttributes & 1) == 1\n    attributes['system'] = (intAttributes & 4) == 4\n    attributes['temporary'] = (intAttributes & 256) == 256\n\n    \n    attributes['mountedVolume'] = False\n    if attributes['reparsePoint'] is True and attributes['directory'] is True:\n        fileIterator = win32file.FindFilesIterator(path)\n        findDataTuple = next(fileIterator)\n        if findDataTuple[6] == 0xA0000003:\n            attributes['mountedVolume'] = True\n    \n\n    \n    \n    \n\n    attributes['symbolicLink'] = False\n    if attributes['reparsePoint'] is True:\n        fileIterator = win32file.FindFilesIterator(path)\n        findDataTuple = next(fileIterator)\n        if findDataTuple[6] == 0xA000000C:\n            attributes['symbolicLink'] = True\n\n    return attributes", "docstring": "Return a dictionary object with the Windows\nfile attributes for a file.\n\nArgs:\npath (str): The path to the file or directory\n\nReturns:\ndict: A dictionary of file attributes\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' file.get_attributes c:\\\\temp\\\\a.txt", "source": "juraj-google-style"}
{"code": "def _ProcessAMCacheProgramKey(self, am_entry, parser_mediator):\n    \n    amcache_datetime = am_entry.get_value_by_name(\n        self._AMCACHE_P_INSTALLDATE).get_data_as_integer()\n    event_data = AmcacheProgramEventData()\n\n    name = am_entry.get_value_by_name(self._AMCACHE_P_NAME)\n    if name:\n      event_data.name = name.get_data_as_string()\n\n    version = am_entry.get_value_by_name(self._AMCACHE_P_VERSION)\n    if version:\n      event_data.version = version.get_data_as_string()\n\n    publisher = am_entry.get_value_by_name(self._AMCACHE_P_PUBLISHER)\n    if publisher:\n      event_data.publisher = publisher.get_data_as_string()\n\n    languagecode = am_entry.get_value_by_name(self._AMCACHE_P_LANGUAGECODE)\n    if languagecode:\n      event_data.languagecode = languagecode.get_data_as_string()\n\n    entrytype = am_entry.get_value_by_name(self._AMCACHE_P_ENTRYTYPE)\n    if entrytype:\n      event_data.entrytype = entrytype.get_data_as_string()\n\n    uninstallkey = am_entry.get_value_by_name(self._AMCACHE_P_UNINSTALLKEY)\n    if uninstallkey:\n      uninstallkey = uninstallkey.get_data()\n      uninstallkey = uninstallkey.decode('utf-16-LE')\n      event_data.uninstallkey = uninstallkey\n\n    filepaths = am_entry.get_value_by_name(self._AMCACHE_P_FILEPATHS)\n    if filepaths:\n      filepaths = filepaths.get_data()\n      filepaths = filepaths.decode('utf-16-LE')\n      event_data.filepaths = filepaths\n\n    productcode = am_entry.get_value_by_name(self._AMCACHE_P_PRODUCTCODE)\n    if productcode:\n      event_data.productcode = productcode.get_data_as_string()\n\n    packagecode = am_entry.get_value_by_name(self._AMCACHE_P_PACKAGECODE)\n    if packagecode:\n      event_data.packagecode = packagecode.get_data_as_string()\n\n    msiproductcode = am_entry.get_value_by_name(self._AMCACHE_P_MSIPRODUCTCODE)\n    if msiproductcode:\n      msiproductcode = msiproductcode.get_data()\n      msiproductcode = msiproductcode.decode('utf-16-LE')\n      event_data.msiproductcode = msiproductcode\n\n    msipackagecode = am_entry.get_value_by_name(self._AMCACHE_P_MSIPACKAGECODE)\n    if msipackagecode:\n      msipackagecode = msipackagecode.get_data()\n      msipackagecode = msipackagecode.decode('utf-16-LE')\n      event_data.msipackagecode = msipackagecode\n\n    files = am_entry.get_value_by_name(self._AMCACHE_P_FILES)\n    if files:\n      files = files.get_data()\n      files = files.decode('utf-16-LE')\n      event_data.files = files\n\n    event = time_events.DateTimeValuesEvent(\n        posix_time.PosixTime(amcache_datetime),\n        definitions.TIME_DESCRIPTION_INSTALLATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an Amcache Root/Programs key for events.\n\nArgs:\nam_entry (pyregf.key): amcache Programs key.\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.", "source": "juraj-google-style"}
{"code": "def _check_warnings(self, json_response):\n    self.warnings = None\n    if json_response:\n        self.warnings = json_response.get('warnings')\n    if (self.debug and self.warnings):\n        for w in self.warnings:\n            print(('WARNING: %s - %s' % (w['warning_name'], w['warning_msg'])))", "docstring": "Extract warnings from the response to make them accessible\n\nArgs:\njson_response (dict): JSON response", "source": "codesearchnet"}
{"code": "def new_module(self):\n    self.reset_run_errors()\n    if (self._code is None):\n        return None\n    module_name = ('bk_script_' + make_id().replace('-', ''))\n    module = ModuleType(str(module_name))\n    module.__dict__['__file__'] = os.path.abspath(self._path)\n    return module", "docstring": "Make a fresh module to run in.\n\nReturns:\nModule", "source": "codesearchnet"}
{"code": "def eval_features(json):\n    \n    return {'close'    : json[-1]['close'],\n            'sma'      : SMA.eval_from_json(json),\n            'rsi'      : RSI.eval_from_json(json),\n            'so'       : SO.eval_from_json(json),\n            'obv'      : OBV.eval_from_json(json)}", "docstring": "Gets technical analysis features from market data JSONs\n\nArgs:\njson: JSON data as a list of dict dates, where the keys are\nthe raw market statistics.\n\nReturns:\nDict of market features and their values", "source": "juraj-google-style"}
{"code": "def get_linear_interpolated_value(x_values, y_values, x):\n    \n    a = np.array(sorted(zip(x_values, y_values), key=lambda d: d[0]))\n\n    ind = np.where(a[:, 0] >= x)[0]\n\n    if len(ind) == 0 or ind[0] == 0:\n        raise ValueError(\"x is out of range of provided x_values\")\n\n    i = ind[0]\n    x1, x2 = a[i - 1][0], a[i][0]\n    y1, y2 = a[i - 1][1], a[i][1]\n\n    return y1 + (y2 - y1) / (x2 - x1) * (x - x1)", "docstring": "Returns an interpolated value by linear interpolation between two values.\nThis method is written to avoid dependency on scipy, which causes issues on\nthreading servers.\n\nArgs:\nx_values: Sequence of x values.\ny_values: Corresponding sequence of y values\nx: Get value at particular x\n\nReturns:\nValue at x.", "source": "juraj-google-style"}
{"code": "class FixedPointMul(Function):\n\n    @staticmethod\n    def forward(ctx, pre_act, pre_act_scaling_factor, bit_num, z_scaling_factor, identity=None, identity_scaling_factor=None):\n        if len(pre_act_scaling_factor.shape) == 3:\n            reshape = lambda x: x\n        else:\n            reshape = lambda x: x.view(1, 1, -1)\n        ctx.identity = identity\n        n = 2 ** (bit_num - 1) - 1\n        with torch.no_grad():\n            pre_act_scaling_factor = reshape(pre_act_scaling_factor)\n            if identity is not None:\n                identity_scaling_factor = reshape(identity_scaling_factor)\n            ctx.z_scaling_factor = z_scaling_factor\n            z_int = torch.round(pre_act / pre_act_scaling_factor)\n            _A = pre_act_scaling_factor.type(torch.double)\n            _B = z_scaling_factor.type(torch.float).type(torch.double)\n            new_scale = _A / _B\n            new_scale = reshape(new_scale)\n            m, e = batch_frexp(new_scale)\n            output = z_int.type(torch.double) * m.type(torch.double)\n            output = torch.round(output / 2.0 ** e)\n            if identity is not None:\n                wx_int = torch.round(identity / identity_scaling_factor)\n                _A = identity_scaling_factor.type(torch.double)\n                _B = z_scaling_factor.type(torch.float).type(torch.double)\n                new_scale = _A / _B\n                new_scale = reshape(new_scale)\n                m1, e1 = batch_frexp(new_scale)\n                output1 = wx_int.type(torch.double) * m1.type(torch.double)\n                output1 = torch.round(output1 / 2.0 ** e1)\n                output = output1 + output\n            return torch.clamp(output.type(torch.float), -n - 1, n)\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        identity_grad = None\n        if ctx.identity is not None:\n            identity_grad = grad_output.clone() / ctx.z_scaling_factor\n        return (grad_output.clone() / ctx.z_scaling_factor, None, None, None, None, identity_grad, None)", "docstring": "Function to perform fixed-point arithmetic that can match integer arithmetic on hardware.\n\nArgs:\npre_act (`torch.Tensor`):\nInput tensor.\npre_act_scaling_factor (`torch.Tensor`):\nScaling factor of the input tensor *pre_act*.\nbit_num (`int`):\nQuantization bitwidth.\nz_scaling_factor (`torch.Tensor`):\nScaling factor of the output tensor.\nidentity (`torch.Tensor`, *optional*):\nIdentity tensor, if exists.\nidentity_scaling_factor (`torch.Tensor`, *optional*):\nScaling factor of the identity tensor *identity*, if exists.\n\nReturns:\n`torch.Tensor`: Output tensor(*pre_act* if *identity* is not given, otherwise the addition of *pre_act* and\n*identity*), whose scale is rescaled to *z_scaling_factor*.", "source": "github-repos"}
{"code": "def FromString(cls, desc):\n        \n\n        parse_exp = Literal(u'run_time').suppress() + time_interval(u'interval')\n\n        try:\n            data = parse_exp.parseString(desc)\n            return TimeBasedStopCondition(data[u'interval'][0])\n        except ParseException:\n            raise ArgumentError(u\"Could not parse time based stop condition\")", "docstring": "Parse this stop condition from a string representation.\n\nThe string needs to match:\nrun_time number [seconds|minutes|hours|days|months|years]\n\nArgs:\ndesc (str): The description\n\nReturns:\nTimeBasedStopCondition", "source": "juraj-google-style"}
{"code": "def load_from_checkpoint(self, sess, latest_filename=None):\n    \n    \n    self._create_initializers()\n    if self._save_path:\n      ckpt = tf.train.get_checkpoint_state(\n          os.path.dirname(self._save_path), latest_filename)\n      if ckpt and ckpt.all_model_checkpoint_paths:\n        \n        \n        self._saver = tf.train.Saver(saver_def=self._saver.as_saver_def())\n        self._saver.set_last_checkpoints(list(ckpt.all_model_checkpoint_paths))\n    if self._saver.last_checkpoints:\n      self._saver.restore(sess, self._saver.last_checkpoints[-1])\n      return self._saver.last_checkpoints[-1]\n    else:\n      return None", "docstring": "Loads the model from the most recent checkpoint.\n\nThis gets the most current list of checkpoints each time it is called.\n\nArgs:\nsess: The current session.\nlatest_filename: The filename for the latest set of checkpoints, defaults\nto 'checkpoints'.\nReturns:\nThe loaded checkpoint or None if it failed to load.", "source": "juraj-google-style"}
{"code": "def console_fill_foreground(\n    con: tcod.console.Console,\n    r: Sequence[int],\n    g: Sequence[int],\n    b: Sequence[int],\n) -> None:\n    \n    if len(r) != len(g) or len(r) != len(b):\n        raise TypeError(\"R, G and B must all have the same size.\")\n    if (\n        isinstance(r, np.ndarray)\n        and isinstance(g, np.ndarray)\n        and isinstance(b, np.ndarray)\n    ):\n        \n        r_ = np.ascontiguousarray(r, dtype=np.intc)\n        g_ = np.ascontiguousarray(g, dtype=np.intc)\n        b_ = np.ascontiguousarray(b, dtype=np.intc)\n        cr = ffi.cast(\"int *\", r_.ctypes.data)\n        cg = ffi.cast(\"int *\", g_.ctypes.data)\n        cb = ffi.cast(\"int *\", b_.ctypes.data)\n    else:\n        \n        cr = ffi.new(\"int[]\", r)\n        cg = ffi.new(\"int[]\", g)\n        cb = ffi.new(\"int[]\", b)\n\n    lib.TCOD_console_fill_foreground(_console(con), cr, cg, cb)", "docstring": "Fill the foregound of a console with r,g,b.\n\nArgs:\ncon (Console): Any Console instance.\nr (Sequence[int]): An array of integers with a length of width*height.\ng (Sequence[int]): An array of integers with a length of width*height.\nb (Sequence[int]): An array of integers with a length of width*height.\n\n.. deprecated:: 8.4\nYou should assign to :any:`tcod.console.Console.fg` instead.", "source": "juraj-google-style"}
{"code": "def make_supercells_with_defects(self, scaling_matrix):\n    scs = []\n    sc = self._structure.copy()\n    sc.make_supercell(scaling_matrix)\n    scs.append(sc)\n    for (ids, defect_site) in enumerate(self._defect_sites):\n        sc_with_inter = sc.copy()\n        sc_with_inter.append(defect_site.species_string, defect_site.frac_coords, coords_are_cartesian=False, validate_proximity=False, properties=None)\n        if (not sc_with_inter):\n            raise RuntimeError('could not generate supercell with interstitial {}'.format((ids + 1)))\n        scs.append(sc_with_inter.copy())\n    return scs", "docstring": "Generate a sequence of supercells\nin which each supercell contains a single interstitial,\nexcept for the first supercell in the sequence\nwhich is a copy of the defect-free input structure.\n\nArgs:\nscaling_matrix (3x3 integer array): scaling matrix\nto transform the lattice vectors.\nReturns:\nscs ([Structure]): sequence of supercells.", "source": "codesearchnet"}
{"code": "async def rename(self, name):\n    (await self._client.rename_conversation(hangouts_pb2.RenameConversationRequest(request_header=self._client.get_request_header(), new_name=name, event_request_header=self._get_event_request_header())))", "docstring": "Rename this conversation.\n\nHangouts only officially supports renaming group conversations, so\ncustom names for one-to-one conversations may or may not appear in all\nfirst party clients.\n\nArgs:\nname (str): New name.\n\nRaises:\n.NetworkError: If conversation cannot be renamed.", "source": "codesearchnet"}
{"code": "def StatResultFromStatEntry(\n    stat_entry):\n  \n  values = []\n  for attr in _STAT_ATTRS[:10]:\n    values.append(stat_entry.Get(attr))\n  return os.stat_result(values)", "docstring": "Returns a `os.stat_result` with most information from `StatEntry`.\n\nThis is a lossy conversion, only the 10 first stat_result fields are\npopulated, because the os.stat_result constructor is inflexible.\n\nArgs:\nstat_entry: An instance of rdf_client_fs.StatEntry.\n\nReturns:\nAn instance of `os.stat_result` with basic fields populated.", "source": "juraj-google-style"}
{"code": "def instantiate(self, substitutions):\n    param_dict = self.substitute_params(substitutions)\n    (pkg, ident) = self.name.rsplit('.', 1)\n    pkg = ('malcolm.modules.%s' % pkg)\n    try:\n        ob = importlib.import_module(pkg)\n    except ImportError as e:\n        raise_with_traceback(ImportError(('\\n%s:%d:\\n%s' % (self.filename, self.lineno, e))))\n    try:\n        ob = getattr(ob, ident)\n    except AttributeError:\n        raise_with_traceback(ImportError(('\\n%s:%d:\\nPackage %r has no ident %r' % (self.filename, self.lineno, pkg, ident))))\n    try:\n        model = MethodModel.from_callable(ob, returns=False)\n        args = model.validate(param_dict)\n        ret = ob(**args)\n    except Exception as e:\n        sourcefile = inspect.getsourcefile(ob)\n        lineno = inspect.getsourcelines(ob)[1]\n        raise_with_traceback(YamlError(('\\n%s:%d:\\n%s:%d:\\n%s' % (self.filename, self.lineno, sourcefile, lineno, e))))\n    else:\n        return ret", "docstring": "Keep recursing down from base using dotted name, then call it with\nself.params and args\n\nArgs:\nsubstitutions (dict): Substitutions to make to self.param_dict\n\nReturns:\nThe found object called with (*args, map_from_d)\n\nE.g. if ob is malcolm.parts, and name is \"ca.CADoublePart\", then the\nobject will be malcolm.parts.ca.CADoublePart", "source": "codesearchnet"}
{"code": "def _ExtractRequestSummaryFields(document):\n  \n  headers = document.childAtPath('Header/RequestHeader')\n  body = document.childAtPath('Body')\n\n  summary_fields = {\n      'methodName': body.getChildren()[0].name\n  }\n\n  \n  \n  client_customer_id = headers.getChild('clientCustomerId')\n  if client_customer_id is not None:\n    summary_fields['clientCustomerId'] = client_customer_id.text\n\n  \n  \n  network_code = headers.getChild('networkCode')\n  if network_code is not None:\n    summary_fields['networkCode'] = network_code.text\n\n  return summary_fields", "docstring": "Extract logging fields from the request's suds.sax.element.Element.\n\nArgs:\ndocument: A suds.sax.element.Element instance containing the API request.\n\nReturns:\nA dict mapping logging field names to their corresponding value.", "source": "juraj-google-style"}
{"code": "def result(self):\n    if self._read_only:\n        return self._result\n    with self._condition:\n        if (self._wait_for_tree and (not self._result_set_in_context)):\n            self._condition.wait_for((lambda : (self._tree_has_set or self._result_set_in_context)))\n        return self._result", "docstring": "Return the value at an address, optionally waiting until it is\nset from the context_manager, or set based on the pre-fetch mechanism.\n\nReturns:\n(bytes): The opaque value for an address.", "source": "codesearchnet"}
{"code": "def DecryptPrivateKey(self, encrypted_private_key):\n    aes = AES.new(self._master_key, AES.MODE_CBC, self._iv)\n    return aes.decrypt(encrypted_private_key)", "docstring": "Decrypt the provided ciphertext with the initialized private key.\n\nArgs:\nencrypted_private_key (byte string): the ciphertext to be decrypted.\n\nReturns:\nbytes: the ciphertext.", "source": "codesearchnet"}
{"code": "def validate(data):\n        \n        try:\n            return Schema(Validator.SCHEMA).validate(data)\n        except SchemaError as exception:\n            logging.getLogger(__name__).error(exception)\n            return None", "docstring": "Validate data against the schema.\n\nArgs:\ndata(dict): data structure to validate.\n\nReturns:\ndict: data as provided and defaults where defined in schema.", "source": "juraj-google-style"}
{"code": "def GetRange(self, start, end=None, additional_headers=None, use_chunks=True):\n    self.EnsureInitialized()\n    progress_end_normalized = False\n    if (self.total_size is not None):\n        (progress, end_byte) = self.__NormalizeStartEnd(start, end)\n        progress_end_normalized = True\n    else:\n        progress = start\n        end_byte = end\n    while ((not progress_end_normalized) or (end_byte is None) or (progress <= end_byte)):\n        end_byte = self.__ComputeEndByte(progress, end=end_byte, use_chunks=use_chunks)\n        response = self.__GetChunk(progress, end_byte, additional_headers=additional_headers)\n        if (not progress_end_normalized):\n            self.__SetTotal(response.info)\n            (progress, end_byte) = self.__NormalizeStartEnd(start, end)\n            progress_end_normalized = True\n        response = self.__ProcessResponse(response)\n        progress += response.length\n        if (response.length == 0):\n            if (response.status_code == http_client.OK):\n                return\n            raise exceptions.TransferRetryError('Zero bytes unexpectedly returned in download response')", "docstring": "Retrieve a given byte range from this download, inclusive.\n\nRange must be of one of these three forms:\n* 0 <= start, end = None: Fetch from start to the end of the file.\n* 0 <= start <= end: Fetch the bytes from start to end.\n* start < 0, end = None: Fetch the last -start bytes of the file.\n\n(These variations correspond to those described in the HTTP 1.1\nprotocol for range headers in RFC 2616, sec. 14.35.1.)\n\nArgs:\nstart: (int) Where to start fetching bytes. (See above.)\nend: (int, optional) Where to stop fetching bytes. (See above.)\nadditional_headers: (bool, optional) Any additional headers to\npass with the request.\nuse_chunks: (bool, default: True) If False, ignore self.chunksize\nand fetch this range in a single request.\n\nReturns:\nNone. Streams bytes into self.stream.", "source": "codesearchnet"}
{"code": "def process(self):\n    threads = []\n    for client in self.find_clients(self.hostnames):\n        print(client)\n        thread = threading.Thread(target=self._process_thread, args=(client,))\n        threads.append(thread)\n        thread.start()\n    for thread in threads:\n        thread.join()", "docstring": "Collect the artifacts.\n\nRaises:\nDFTimewolfError: if no artifacts specified nor resolved by platform.", "source": "codesearchnet"}
{"code": "def parse_plugin_metadata(content):\n  \n  if not isinstance(content, bytes):\n    raise TypeError('Content type must be bytes')\n  result = plugin_data_pb2.PrCurvePluginData.FromString(content)\n  if result.version == 0:\n    return result\n  else:\n    logger.warn(\n        'Unknown metadata version: %s. The latest version known to '\n        'this build of TensorBoard is %s; perhaps a newer build is '\n        'available?', result.version, PROTO_VERSION)\n    return result", "docstring": "Parse summary metadata to a Python object.\n\nArguments:\ncontent: The `content` field of a `SummaryMetadata` proto\ncorresponding to the pr_curves plugin.\n\nReturns:\nA `PrCurvesPlugin` protobuf object.", "source": "juraj-google-style"}
{"code": "def IsPayable(self):\n    from neo.Core.State.ContractState import ContractPropertyState\n    return ((self.ContractProperties & ContractPropertyState.Payable) > 0)", "docstring": "Flag indicating if the contract accepts payments.\n\nReturns:\nbool: True if supported. False otherwise.", "source": "codesearchnet"}
{"code": "def destroy_cloudwatch_log_event(app='', env='dev', region=''):\n    \n\n    session = boto3.Session(profile_name=env, region_name=region)\n    cloudwatch_client = session.client('logs')\n\n    \n    \n    cloudwatch_client.delete_subscription_filter(logGroupName='/aws/lambda/awslimitchecker', filterName=app)\n\n    return True", "docstring": "Destroy Cloudwatch log event.\n\nArgs:\napp (str): Spinnaker Application name.\nenv (str): Deployment environment.\nregion (str): AWS region.\nReturns:\nbool: True upon successful completion.", "source": "juraj-google-style"}
{"code": "def create_model_package_from_algorithm(self, name, description, algorithm_arn, model_data):\n        \n        request = {\n            'ModelPackageName': name,\n            'ModelPackageDescription': description,\n            'SourceAlgorithmSpecification': {\n                'SourceAlgorithms': [\n                    {\n                        'AlgorithmName': algorithm_arn,\n                        'ModelDataUrl': model_data\n                    }\n                ]\n            }\n        }\n        try:\n            LOGGER.info('Creating model package with name: {}'.format(name))\n            self.sagemaker_client.create_model_package(**request)\n        except ClientError as e:\n            error_code = e.response['Error']['Code']\n            message = e.response['Error']['Message']\n\n            if (\n                    error_code == 'ValidationException'\n                    and 'ModelPackage already exists' in message\n            ):\n                LOGGER.warning('Using already existing model package: {}'.format(name))\n            else:\n                raise", "docstring": "Create a SageMaker Model Package from the results of training with an Algorithm Package\n\nArgs:\nname (str): ModelPackage name\ndescription (str): Model Package description\nalgorithm_arn (str): arn or name of the algorithm used for training.\nmodel_data (str): s3 URI to the model artifacts produced by training", "source": "juraj-google-style"}
{"code": "def do_ams_get_url(endpoint, access_token, flag=True):\n    \n    headers = {\"Content-Type\": json_acceptformat,\n               \"DataServiceVersion\": dsversion_min,\n               \"MaxDataServiceVersion\": dsversion_max,\n               \"Accept\": json_acceptformat,\n               \"Accept-Charset\" : charset,\n               \"Authorization\": \"Bearer \" + access_token,\n               \"x-ms-version\" : xmsversion}\n    body = ''\n    response = requests.get(endpoint, headers=headers, allow_redirects=flag)\n    if flag:\n        if response.status_code == 301:\n            response = requests.get(response.headers['location'], data=body, headers=headers)\n    return response", "docstring": "Do an AMS GET request to retrieve the Final AMS Endpoint and return JSON.\nArgs:\nendpoint (str): Azure Media Services Initial Endpoint.\naccess_token (str): A valid Azure authentication token.\nflag  (str): A Flag to follow the redirect or not.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def __init__(self, coords):\n        \n        self._coords = np.array(coords)\n        self.space_dim, self.simplex_dim = self._coords.shape\n        self.origin = self._coords[-1]\n        if self.space_dim == self.simplex_dim + 1:\n            \n            self._aug = np.concatenate([coords, np.ones((self.space_dim, 1))],\n                                       axis=-1)\n            self._aug_inv = np.linalg.inv(self._aug)", "docstring": "Initializes a Simplex from vertex coordinates.\n\nArgs:\ncoords ([[float]]): Coords of the vertices of the simplex. E.g.,\n[[1, 2, 3], [2, 4, 5], [6, 7, 8], [8, 9, 10].", "source": "juraj-google-style"}
{"code": "def select_tasks(self, nids=None, wslice=None, task_class=None):\n        \n        if nids is not None:\n            assert wslice is None\n            tasks = self.tasks_from_nids(nids)\n\n        elif wslice is not None:\n            tasks = []\n            for work in self[wslice]:\n                tasks.extend([t for t in work])\n        else:\n            \n            tasks = list(self.iflat_tasks())\n\n        \n        if task_class is not None:\n            tasks = [t for t in tasks if t.isinstance(task_class)]\n\n        return tasks", "docstring": "Return a list with a subset of tasks.\n\nArgs:\nnids: List of node identifiers.\nwslice: Slice object used to select works.\ntask_class: String or class used to select tasks. Ignored if None.\n\n.. note::\n\nnids and wslice are mutually exclusive.\nIf no argument is provided, the full list of tasks is returned.", "source": "juraj-google-style"}
{"code": "def unpack(self, parallel_tensor):\n    self._assert_eager()\n    unpacked_components = [[] for _ in range(len(self.components))]\n    with ops.device(self._name):\n        parallel_tensor = variable_utils.convert_variables_to_tensors(parallel_tensor)\n    for tensor in nest.flatten(parallel_tensor, expand_composites=True):\n        for accumulator, unpacked_tensor in zip(unpacked_components, self._unpack_tensor(tensor)):\n            accumulator.append(unpacked_tensor)\n    return [nest.pack_sequence_as(parallel_tensor, unpacked, expand_composites=True) for unpacked in unpacked_components]", "docstring": "Unpack a parallel tensor into its components.\n\nArgs:\nparallel_tensor: A tensor, composite tensor, or `tf.nest` of such placed\non the ParallelDevice. Passing `tf.Variable` objects reads their value,\nit does not share a mutable reference between the packed and unpacked\nforms.\n\nReturns:\nA list with the same length as `self.components` each with the same\nstructure as `parallel_tensor`, containing component tensors.", "source": "github-repos"}
{"code": "def parse_args(arglist=None):\n    climan = CLIManager(conf, **SUB_CMDS)\n    create_complete_files(climan, CONFIG_DIR, 'stagpy', 'stagpy-git', zsh_sourceable=True)\n    (cmd_args, all_subs) = climan.parse_args(arglist)\n    sub_cmd = cmd_args.loam_sub_name\n    if (sub_cmd is None):\n        return cmd_args.func\n    if (sub_cmd != 'config'):\n        commands.report_parsing_problems(PARSING_OUT)\n    if conf.common.set:\n        set_conf_str(conf, conf.common.set)\n    if conf.common.config:\n        commands.config_pp(all_subs)\n    load_mplstyle()\n    try:\n        _steps_to_slices()\n    except AttributeError:\n        pass\n    return cmd_args.func", "docstring": "Parse cmd line arguments.\n\nUpdate :attr:`stagpy.conf` accordingly.\n\nArgs:\narglist (list of str): the list of cmd line arguments. If set to\nNone, the arguments are taken from :attr:`sys.argv`.\n\nReturns:\nfunction: the function implementing the sub command to be executed.", "source": "codesearchnet"}
{"code": "def qc_data(self, tests, alias=None):\n        \n        \n        r = {m: c.quality(tests, alias) for m, c in self.data.items()}\n\n        s = self.qc_curve_group(tests, alias=alias)\n\n        for m, results in r.items():\n            if m in s:\n                results.update(s[m])\n\n        return r", "docstring": "Run a series of tests against the data and return the corresponding\nresults.\n\nArgs:\ntests (list): a list of functions.\n\nReturns:\nlist. The results. Stick to booleans (True = pass) or ints.", "source": "juraj-google-style"}
{"code": "def phenotypes_to_scored(self,phenotypes=None,overwrite=False):\n        \n        if not self.is_uniform(): raise ValueError(\"inconsistent phenotypes\")\n        if phenotypes is None: \n            phenotypes = self.phenotypes\n        elif isinstance(phenotypes,str):\n            phenotypes = [phenotypes]\n        def _post(binary,phenotype_label,phenotypes,overwrite):\n            d = binary.copy()\n            if len(set(phenotypes)&set(list(binary.keys()))) > 0 and overwrite==False:\n                raise ValueError(\"Error, phenotype already exists as a scored type\")\n            for label in phenotypes: d[label] = 0\n            if phenotype_label == phenotype_label and phenotype_label in phenotypes:\n                d[phenotype_label] = 1\n            return d\n        output = self.copy()\n        output['scored_calls'] = output.apply(lambda x: \n                _post(x['scored_calls'],x['phenotype_label'],phenotypes,overwrite)\n            ,1)\n        return output", "docstring": "Add mutually exclusive phenotypes to the scored calls\n\nArgs:\nphenotypes (list): a list of phenotypes to add to scored calls.  if none or not set, add them all\noverwrite (bool): if True allow the overwrite of a phenotype, if False, the phenotype must not exist in the scored calls\nReturns:\nCellDataFrame", "source": "juraj-google-style"}
{"code": "def __init__(self, timeout, proxy_config, cache):\n    \n    if not cache:\n      cache = zeep.cache.SqliteCache()\n    elif cache == ZeepServiceProxy.NO_CACHE:\n      cache = None\n\n    super(_ZeepProxyTransport, self).__init__(\n        timeout=timeout, operation_timeout=timeout, cache=cache)\n\n    self.session.proxies = proxy_config.proxies", "docstring": "Initializes _ZeepProxyTransport.\n\nArgs:\ntimeout: An integer timeout in MS for connections.\nproxy_config: A ProxyConfig instance representing proxy settings.\ncache: A zeep.cache.Base instance representing a cache strategy to employ.", "source": "juraj-google-style"}
{"code": "def broadcast_dynamic_shape_extended(a: DynamicRaggedShape, b: DynamicRaggedShape):\n    if a.row_partitions and b.row_partitions:\n        if a.dtype != b.dtype:\n            raise ValueError(\"Dtypes don't match\")\n    elif a.dtype != b.dtype:\n        if a.row_partitions:\n            b = b.with_dtype(a.dtype)\n        elif b.row_partitions:\n            a = a.with_dtype(b.dtype)\n        else:\n            a = a.with_dtype(dtypes.int64)\n            b = b.with_dtype(dtypes.int64)\n    if a.rank is None or b.rank is None:\n        raise ValueError('Unable to broadcast: unknown rank')\n    elif a.rank == 0:\n        return (b, _Broadcaster(a, b, []), _get_identity_broadcaster(b))\n    elif b.rank == 0:\n        return (a, _get_identity_broadcaster(a), _Broadcaster(b, a, []))\n    elif a.rank == 1 and b.rank == 1:\n        [a_layer, b_layer, target] = _broadcast_dynamic_shape_one_layer(a.inner_shape, b.inner_shape)\n        target_shape = DynamicRaggedShape._from_inner_shape(target)\n        return (target_shape, _Broadcaster(a, target_shape, [a_layer]), _Broadcaster(b, target_shape, [b_layer]))\n    if a.rank > b.rank:\n        c, bc, ac = _broadcast_dynamic_shape_extended_helper(b, a)\n        return (c, ac, bc)\n    return _broadcast_dynamic_shape_extended_helper(a, b)", "docstring": "Gets the smallest shape to which a and b can broadcast.\n\nIn order to create the smallest shape, one must also do most of the\nwork to figure out how to transform from the shapes given. Thus, in addition\nto returning the shape, it also creates transformations from the\noriginal shapes to the result.\n\nThis is the equivalent of:\n\nc = broadcast_dynamic_shape(a, b)\nac = get_broadcaster(a, c)\nbc = get_broadcaster(b, c)\nreturn (c, ac, bc)\n\nArgs:\na: a DynamicRaggedShape\nb: a DynamicRaggedShape\n\nReturns:\nA triple of a shape and two broadcasters.", "source": "github-repos"}
{"code": "def _add_common_constrain(token_lst: List[Dict], d: Dict) -> List[Dict]:\n        \n\n        result = []\n        for a_token in token_lst:\n            if not tf_transfer(d[\"is_required\"]):\n                a_token[\"OP\"] = \"?\"\n            result.append(a_token)\n        return result", "docstring": "Add common constrain for every token type, like \"is_required\"\nArgs:\ntoken_lst: List[Dict]\nd: Dict\n\nReturns: List[Dict]", "source": "juraj-google-style"}
{"code": "def parse_doctype(cls, file, encoding=None):\n    if encoding:\n        lxml_encoding = (to_lxml_encoding(encoding) or 'latin1')\n    else:\n        lxml_encoding = encoding\n    try:\n        parser = lxml.etree.XMLParser(encoding=lxml_encoding, recover=True)\n        tree = lxml.etree.parse(io.BytesIO(wpull.util.peek_file(file)), parser=parser)\n        if (tree.getroot() is not None):\n            return tree.docinfo.doctype\n    except lxml.etree.LxmlError:\n        pass", "docstring": "Get the doctype from the document.\n\nReturns:\nstr, None", "source": "codesearchnet"}
{"code": "def power(self, n):\n        \n        if not isinstance(n, int):\n            raise QiskitError(\"Can only take integer powers of Operator.\")\n        if self.input_dims() != self.output_dims():\n            raise QiskitError(\"Can only power with input_dims = output_dims.\")\n        \n        \n        return Operator(\n            np.linalg.matrix_power(self.data, n), self.input_dims(),\n            self.output_dims())", "docstring": "Return the matrix power of the operator.\n\nArgs:\nn (int): the power to raise the matrix to.\n\nReturns:\nBaseOperator: the n-times composed operator.\n\nRaises:\nQiskitError: if the input and output dimensions of the operator\nare not equal, or the power is not a positive integer.", "source": "juraj-google-style"}
{"code": "def plot_soma(ax, soma, plane='xy', soma_outline=True, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA):\n    (plane0, plane1) = _plane2col(plane)\n    color = _get_color(color, tree_type=NeuriteType.soma)\n    if isinstance(soma, SomaCylinders):\n        (plane0, plane1) = _plane2col(plane)\n        for (start, end) in zip(soma.points, soma.points[1:]):\n            common.project_cylinder_onto_2d(ax, (plane0, plane1), start=start[COLS.XYZ], end=end[COLS.XYZ], start_radius=start[COLS.R], end_radius=end[COLS.R], color=color, alpha=alpha)\n    elif soma_outline:\n        ax.add_artist(Circle(soma.center[[plane0, plane1]], soma.radius, color=color, alpha=alpha))\n    else:\n        (plane0, plane1) = _plane2col(plane)\n        points = [(p[plane0], p[plane1]) for p in soma.iter()]\n        if points:\n            points.append(points[0])\n            ax.plot(points, color=color, alpha=alpha, linewidth=linewidth)\n    ax.set_xlabel(plane[0])\n    ax.set_ylabel(plane[1])\n    bounding_box = geom.bounding_box(soma)\n    ax.dataLim.update_from_data_xy(np.vstack(([bounding_box[0][plane0], bounding_box[0][plane1]], [bounding_box[1][plane0], bounding_box[1][plane1]])), ignore=False)", "docstring": "Generates a 2d figure of the soma.\n\nArgs:\nax(matplotlib axes): on what to plot\nsoma(neurom.core.Soma): plotted soma\nplane(str): Any pair of 'xyz'\ndiameter_scale(float): Scale factor multiplied with segment diameters before plotting\nlinewidth(float): all segments are plotted with this width, but only if diameter_scale=None\ncolor(str or None): Color of plotted values, None corresponds to default choice\nalpha(float): Transparency of plotted values", "source": "codesearchnet"}
{"code": "def parse(self, s, term_join=None):\n        \n\n        if not term_join:\n            term_join = lambda x: '(' + ' OR '.join(x) + ')'\n\n        toks = self.scan(s)\n\n        \n        \n\n        \n        if toks and toks[0] and (toks[0][0] == self.TERM or toks[0][0] == self.QUOTEDTERM):\n            toks = [(self.MARKER, 'about')] + toks\n\n\n        \n        \n        \n        \n        \n        \n        \n        \n        \n\n        \n\n        bymarker = []\n        for t in toks:\n            if t[0] == self.MARKER:\n                bymarker.append((t[1], []))\n            else:\n                bymarker[-1][1].append(t)\n\n\n        \n        \n        \n        \n        \n        \n\n        \n        \n        comps = []\n        for t in bymarker:\n\n            t = list(t)\n\n            if t[0] == 'in' and len(t[1]) == 1 and isinstance(t[1][0][1], string_types) and self.stem(\n                    t[1][0][1]) in self.geograins.keys():\n                t[0] = 'by'\n\n            \n            if t[0] == 'from' and len(t[1]) == 1 and t[1][0][0] != self.YEAR:\n                t[0] = 'source'\n\n            comps.append(t)\n\n        \n        \n        \n        \n        \n\n        \n        groups = {marker: [] for marker, _ in comps}\n\n        for marker, terms in comps:\n            groups[marker] += [term for marker, term in terms]\n\n        \n        \n\n        \n        \n\n        for marker, group in groups.items():\n\n            if marker == 'about':\n                continue\n\n            if len(group) > 1 and marker not in self.multiterms:\n                groups[marker], extras = [group[0]], group[1:]\n\n                if not 'about' in groups:\n                    groups['about'] = extras\n                else:\n                    groups['about'] += extras\n\n            if marker == 'by':\n                groups['by'] = [ self.geograins.get(self.stem(e)) for e in group]\n\n        for marker, terms in iteritems(groups):\n\n            if len(terms) > 1:\n                if marker in 'in':\n                    groups[marker] = ' '.join(terms)\n                else:\n                    groups[marker] = term_join(terms)\n            elif len(terms) == 1:\n                groups[marker] = terms[0]\n            else:\n                pass\n\n        \n        \n        \n        \n        \n\n        \n\n\n        return groups", "docstring": "Parses search term to\n\nArgs:\ns (str): string with search term.\nor_join (callable): function to join 'OR' terms.\n\nReturns:\ndict: all of the terms grouped by marker. Key is a marker, value is a term.\n\nExample:\n>>> SearchTermParser().parse('table2 from 1978 to 1979 in california')\n{'to': 1979, 'about': 'table2', 'from': 1978, 'in': 'california'}", "source": "juraj-google-style"}
{"code": "def transform_rest_response(self, response_body):\n    \n    body_json = json.loads(response_body)\n    return json.dumps(body_json, indent=1, sort_keys=True)", "docstring": "Translates an apiserving REST response so it's ready to return.\n\nCurrently, the only thing that needs to be fixed here is indentation,\nso it's consistent with what the live app will return.\n\nArgs:\nresponse_body: A string containing the backend response.\n\nReturns:\nA reformatted version of the response JSON.", "source": "juraj-google-style"}
{"code": "def listen_forever(self, timeout_ms=30000, exception_handler=None, bad_sync_timeout=5):\n    _bad_sync_timeout = bad_sync_timeout\n    self.should_listen = True\n    while self.should_listen:\n        try:\n            self._sync(timeout_ms)\n            _bad_sync_timeout = bad_sync_timeout\n        except MatrixRequestError as e:\n            logger.warning('A MatrixRequestError occured during sync.')\n            if (e.code >= 500):\n                logger.warning('Problem occured serverside. Waiting %i seconds', bad_sync_timeout)\n                sleep(bad_sync_timeout)\n                _bad_sync_timeout = min((_bad_sync_timeout * 2), self.bad_sync_timeout_limit)\n            elif (exception_handler is not None):\n                exception_handler(e)\n            else:\n                raise\n        except Exception as e:\n            logger.exception('Exception thrown during sync')\n            if (exception_handler is not None):\n                exception_handler(e)\n            else:\n                raise", "docstring": "Keep listening for events forever.\n\nArgs:\ntimeout_ms (int): How long to poll the Home Server for before\nretrying.\nexception_handler (func(exception)): Optional exception handler\nfunction which can be used to handle exceptions in the caller\nthread.\nbad_sync_timeout (int): Base time to wait after an error before\nretrying. Will be increased according to exponential backoff.", "source": "codesearchnet"}
{"code": "def get_snapshots(self, volume_id_or_uri, start=0, count=(- 1), filter='', sort=''):\n    uri = self.__build_volume_snapshot_uri(volume_id_or_uri)\n    return self._client.get_all(start, count, filter=filter, sort=sort, uri=uri)", "docstring": "Gets all snapshots of a volume. Returns a list of snapshots based on optional sorting and filtering, and\nconstrained by start and count parameters.\n\nArgs:\nvolume_id_or_uri:\nCan be either the volume id or the volume uri.\nstart:\nThe first item to return, using 0-based indexing.\nIf not specified, the default is 0 - start with the first available item.\ncount:\nThe number of resources to return. A count of -1 requests all items.\nThe actual number of items in the response might differ from the requested\ncount if the sum of start and count exceeds the total number of items.\nfilter (list or str):\nA general filter/query string to narrow the list of items returned. The\ndefault is no filter; all resources are returned.\nsort:\nThe sort order of the returned data set. By default, the sort order is based\non create time with the oldest entry first.\n\nReturns:\nlist: A list of snapshots.", "source": "codesearchnet"}
{"code": "def add_point_feature(self, resnum, feat_type=None, feat_id=None, qualifiers=None):\n        \n        if self.feature_file:\n            raise ValueError('Feature file associated with sequence, please remove file association to append '\n                             'additional features.')\n\n        if not feat_type:\n            feat_type = 'Manually added protein sequence single residue feature'\n        newfeat = SeqFeature(location=FeatureLocation(ExactPosition(resnum-1), ExactPosition(resnum)),\n                             type=feat_type,\n                             id=feat_id,\n                             qualifiers=qualifiers)\n\n        self.features.append(newfeat)", "docstring": "Add a feature to the features list describing a single residue.\n\nArgs:\nresnum (int): Protein sequence residue number\nfeat_type (str, optional): Optional description of the feature type (ie. 'catalytic residue')\nfeat_id (str, optional): Optional ID of the feature type (ie. 'TM1')", "source": "juraj-google-style"}
{"code": "def run(self, resources):\n        \n        if not resources['connection']._port.startswith('jlink'):\n            raise ArgumentError(\"FlashBoardStep is currently only possible through jlink\", invalid_port=args['port'])\n\n        hwman = resources['connection']\n        debug = hwman.hwman.debug(self._debug_string)\n        debug.flash(self._file)", "docstring": "Runs the flash step\n\nArgs:\nresources (dict): A dictionary containing the required resources that\nwe needed access to in order to perform this step.", "source": "juraj-google-style"}
{"code": "class EvalLoopContainer:\n\n    def __init__(self, do_nested_concat: bool=True, padding_index: int=-100):\n        self.do_nested_concat = do_nested_concat\n        self.padding_index = padding_index\n        self.tensors = None\n        self.arrays = None\n\n    def add(self, tensors) -> None:\n        \n        if self.tensors is None:\n            self.tensors = tensors if self.do_nested_concat else [tensors]\n        elif self.do_nested_concat:\n            self.tensors = nested_concat(self.tensors, tensors, padding_index=self.padding_index)\n        else:\n            self.tensors.append(tensors)\n\n    def to_cpu_and_numpy(self) -> None:\n        \n        if self.tensors is None:\n            return\n        new_arrays = nested_numpify(self.tensors)\n        if self.arrays is None:\n            self.arrays = new_arrays\n        elif self.do_nested_concat:\n            self.arrays = nested_concat(self.arrays, new_arrays, padding_index=self.padding_index)\n        else:\n            self.arrays.extend(new_arrays)\n        self.tensors = None\n\n    def get_arrays(self):\n        \n        self.to_cpu_and_numpy()\n        return self.arrays", "docstring": "Container to store intermediate results of evaluation loop.\n\nArgs:\ndo_nested_concat (`bool`, *optional*, defaults to `True`):\nIf set to `True`, each iteration will recursively concatenate a new object containing tensors to\nthe existing stored tensors, provided that the structure of the existing object and the new one\nare identical. If set to `False`, all newly added tensors will be stored in a list.\npadding_index (`int`, *optional*, defaults to -100):\nValue used to pad tensors of different shapes when `do_nested_concat=True`.", "source": "github-repos"}
{"code": "def AddPerformanceOptions(self, argument_group):\n    argument_group.add_argument('--buffer_size', '--buffer-size', '--bs', dest='buffer_size', action='store', default=0, help='The buffer size for the output (defaults to 196MiB).')\n    argument_group.add_argument('--queue_size', '--queue-size', dest='queue_size', action='store', default=0, help='The maximum number of queued items per worker (defaults to {0:d})'.format(self._DEFAULT_QUEUE_SIZE))", "docstring": "Adds the performance options to the argument group.\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "codesearchnet"}
{"code": "def with_port_scanning(cls):\n\n    def init(wsgi_app, flags):\n        should_scan = (flags.port is None)\n        base_port = (core_plugin.DEFAULT_PORT if (flags.port is None) else flags.port)\n        max_attempts = (10 if should_scan else 1)\n        if (base_port > 65535):\n            raise TensorBoardServerException(('TensorBoard cannot bind to port %d > %d' % (base_port, 65535)))\n        max_attempts = (10 if should_scan else 1)\n        base_port = (min((base_port + max_attempts), 65536) - max_attempts)\n        for port in xrange(base_port, (base_port + max_attempts)):\n            subflags = argparse.Namespace(**vars(flags))\n            subflags.port = port\n            try:\n                return cls(wsgi_app=wsgi_app, flags=subflags)\n            except TensorBoardPortInUseError:\n                if (not should_scan):\n                    raise\n        raise TensorBoardServerException(('TensorBoard could not bind to any port around %s (tried %d times)' % (base_port, max_attempts)))\n    return init", "docstring": "Create a server factory that performs port scanning.\n\nThis function returns a callable whose signature matches the\nspecification of `TensorBoardServer.__init__`, using `cls` as an\nunderlying implementation. It passes through `flags` unchanged except\nin the case that `flags.port is None`, in which case it repeatedly\ninstantiates the underlying server with new port suggestions.\n\nArgs:\ncls: A valid implementation of `TensorBoardServer`. This class's\ninitializer should raise a `TensorBoardPortInUseError` upon\nfailing to bind to a port when it is expected that binding to\nanother nearby port might succeed.\n\nThe initializer for `cls` will only ever be invoked with `flags`\nsuch that `flags.port is not None`.\n\nReturns:\nA function that implements the `__init__` contract of\n`TensorBoardServer`.", "source": "codesearchnet"}
{"code": "def transpose(vari):\n    if isinstance(vari, Poly):\n        core = vari.A.copy()\n        for key in vari.keys:\n            core[key] = transpose(core[key])\n        return Poly(core, vari.dim, vari.shape[::(- 1)], vari.dtype)\n    return numpy.transpose(vari)", "docstring": "Transpose a shapeable quantety.\n\nArgs:\nvari (chaospy.poly.base.Poly, numpy.ndarray):\nQuantety of interest.\n\nReturns:\n(chaospy.poly.base.Poly, numpy.ndarray):\nSame type as ``vari``.\n\nExamples:\n>>> P = chaospy.reshape(chaospy.prange(4), (2,2))\n>>> print(P)\n[[1, q0], [q0^2, q0^3]]\n>>> print(chaospy.transpose(P))\n[[1, q0^2], [q0, q0^3]]", "source": "codesearchnet"}
{"code": "def _activation_summary(x):\n  \n  \n  \n  tf.histogram_summary(x.name + '/activations', x)\n  tf.scalar_summary(x.name + '/sparsity', tf.nn.zero_fraction(x))", "docstring": "Helper to create summaries for activations.\nCreates a summary that provides a histogram of activations.\nCreates a summary that measure the sparsity of activations.\nArgs:\nx: Tensor\nReturns:\nnothing", "source": "juraj-google-style"}
{"code": "def parse_table_schema_from_json(schema_string):\n    try:\n        json_schema = json.loads(schema_string)\n    except JSONDecodeError as e:\n        raise ValueError('Unable to parse JSON schema: %s - %r' % (schema_string, e))\n\n    def _parse_schema_field(field):\n        \n        schema = bigquery.TableFieldSchema()\n        schema.name = field['name']\n        schema.type = field['type']\n        if 'mode' in field:\n            schema.mode = field['mode']\n        else:\n            schema.mode = 'NULLABLE'\n        if 'description' in field:\n            schema.description = field['description']\n        if 'fields' in field:\n            schema.fields = [_parse_schema_field(x) for x in field['fields']]\n        return schema\n    fields = [_parse_schema_field(f) for f in json_schema['fields']]\n    return bigquery.TableSchema(fields=fields)", "docstring": "Parse the Table Schema provided as string.\n\nArgs:\nschema_string: String serialized table schema, should be a valid JSON.\n\nReturns:\nA TableSchema of the BigQuery export from either the Query or the Table.", "source": "github-repos"}
{"code": "def _remove_double_brackets(text):\n\n    def replacement_fn(s):\n        if (':' in s):\n            return ''\n        bar_pos = s.find('|')\n        if (bar_pos == (- 1)):\n            return s\n        return s[(bar_pos + 1):]\n    return _find_and_replace(text, '[[', ']]', replacement_fn)", "docstring": "Remove double brackets, but leave the viewable text.\n\nArgs:\ntext: a string\nReturns:\na string", "source": "codesearchnet"}
{"code": "def trace_max_buffer_capacity(self):\n        \n        cmd = enums.JLinkTraceCommand.GET_MAX_CAPACITY\n        data = ctypes.c_uint32(0)\n        res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n        if (res == 1):\n            raise errors.JLinkException('Failed to get max trace buffer size.')\n        return data.value", "docstring": "Retrieves the maximum size the trace buffer can be configured with.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\nThe maximum configurable capacity for the trace buffer.", "source": "juraj-google-style"}
{"code": "def __init__(self, config_builder, height=1000):\n    \n    tf.logging.set_verbosity(tf.logging.WARN)\n    config = config_builder.build()\n    copied_config = dict(config)\n    self.estimator_and_spec = (\n      dict(config.get('estimator_and_spec'))\n      if 'estimator_and_spec' in config else {})\n    self.compare_estimator_and_spec = (\n      dict(config.get('compare_estimator_and_spec'))\n      if 'compare_estimator_and_spec' in config else {})\n    if 'estimator_and_spec' in copied_config:\n      del copied_config['estimator_and_spec']\n    if 'compare_estimator_and_spec' in copied_config:\n      del copied_config['compare_estimator_and_spec']\n\n    self.custom_predict_fn = (\n      config.get('custom_predict_fn')\n      if 'custom_predict_fn' in config else None)\n    self.compare_custom_predict_fn = (\n      config.get('compare_custom_predict_fn')\n      if 'compare_custom_predict_fn' in config else None)\n    if 'custom_predict_fn' in copied_config:\n      del copied_config['custom_predict_fn']\n    if 'compare_custom_predict_fn' in copied_config:\n      del copied_config['compare_custom_predict_fn']\n\n\n    self._set_examples(config['examples'])\n    del copied_config['examples']\n\n    self.config = copied_config\n\n    \n    WitWidget.widgets.append(self)\n\n    \n    display.display(display.HTML(self._get_element_html()))\n    display.display(display.HTML(\n      WIT_HTML.format(\n        examples=json.dumps(self.examples), height=height, id=WitWidget.index)))\n\n    \n    WitWidget.index += 1\n\n    \n    output.eval_js(.format(\n      config=json.dumps(self.config)))\n    output.eval_js('updateExamplesCallback()')\n    self._generate_sprite()", "docstring": "Constructor for colab notebook WitWidget.\n\nArgs:\nconfig_builder: WitConfigBuilder object containing settings for WIT.\nheight: Optional height in pixels for WIT to occupy. Defaults to 1000.", "source": "juraj-google-style"}
{"code": "def is_auth(self):\n    if (self.user_id is None):\n        self.user_id = self.session.get('user_id')\n    return bool(self.user_id)", "docstring": "A property that indicates if current user is logged in or not.\n\nReturns:\nBoolean.", "source": "codesearchnet"}
{"code": "def __init__(self, func, argnames, func_name=None, grad_func=None, python_grad_func=None, out_names=None, **kwargs):\n    self._func = func\n    self._argnames = argnames\n    self._func_name = func_name\n    assert grad_func is None or isinstance(grad_func, _OverloadedFunction)\n    self._grad_func = grad_func\n    self._python_grad_func = python_grad_func\n    self._out_names = out_names\n    self._extra_kwargs = kwargs\n    self._overload = {}", "docstring": "Creates _DefinedFunction.\n\nArgs:\nfunc:  A python callable which constructs a tf function body.\nargnames: A list of strings for function argument names.\nfunc_name: The function name. Defaults to None, in which derives from\n'func'.\ngrad_func: This function's gradient function, if not None. Defaults\nto None.\npython_grad_func: A python callable implementing the gradient of\nthe function python-side.\nout_names: A list of strings for the function return value names.\n**kwargs: The keyword arguments. **kwargs is passed to every call\nsite of this function.\n\nRaises:\nValueError: The function definition is invalid.", "source": "github-repos"}
{"code": "def dstack(tup):\n    \n    \n    arrays = list(tup)\n    for i in range(len(arrays)):\n        if arrays[i].ndim is 1:\n            arrays[i] = arrays[i][np.newaxis, :]\n        if arrays[i].ndim is 2:\n            arrays[i] = arrays[i][:, :, np.newaxis]\n    return concatenate(arrays, axis=2)", "docstring": "Stack arrays in sequence depth wise (along third dimension),\nhandling ``RemoteArray`` and ``DistArray`` without moving data.\n\nArgs:\ntup (sequence of array_like)\n\nReturns:\nres: `ndarray`, if inputs were all local\n`RemoteArray`, if inputs were all on the same remote engine\n`DistArray`, if inputs were already scattered on different engines", "source": "juraj-google-style"}
{"code": "def close(self, virtual_account_id, data={}, **kwargs):\n        \n        url = \"{}/{}\".format(self.base_url, virtual_account_id)\n        data['status'] = 'closed'\n        return self.patch_url(url, data, **kwargs)", "docstring": "Close Virtual Account from given Id\n\nArgs:\nvirtual_account_id :\nId for which Virtual Account objects has to be Closed", "source": "juraj-google-style"}
{"code": "def query(self, query, additional_locals=None, safe_mode=False):\n    logger.debug('Attempting to execute database query: %s', query)\n    if (safe_mode and (not isinstance(query, dict))):\n        raise SafetyViolationError(context=self.error_context)\n    if isinstance(query, dict):\n        logger.debug('Executing query in safe mode (MLAlchemy)')\n        return mlalchemy.parse_query(query).to_sqlalchemy(self.session, self.tables).all()\n    else:\n        logger.debug('Executing unsafe query (Python exec())')\n        if (additional_locals is not None):\n            for (k, v) in iteritems(additional_locals):\n                locals()[k] = v\n        exec(compile(('result = %s' % query.strip()), '<string>', 'exec'), globals(), locals())\n        return locals()['result']", "docstring": "Executes the given SQLAlchemy query string.\n\nArgs:\nquery: The SQLAlchemy ORM query (or Python code) to be executed.\nadditional_locals: Any additional local variables to inject into the execution context\nwhen executing the query.\nsafe_mode: Boolean value indicating whether or not to execute queries in safe mode\nonly. If True, this only allows MLAlchemy-style queries. If False, this allows\nboth exec() and MLAlchemy-style queries. Default: False.\n\nReturns:\nThe result of executing the query.", "source": "codesearchnet"}
{"code": "def order_by(self, key_selector=identity):\n    if self.closed():\n        raise ValueError('Attempt to call order_by() on a closed Queryable.')\n    if (not is_callable(key_selector)):\n        raise TypeError('order_by() parameter key_selector={key_selector} is not callable'.format(key_selector=repr(key_selector)))\n    return self._create_ordered(iter(self), (- 1), key_selector)", "docstring": "Sorts by a key in ascending order.\n\nIntroduces a primary sorting order to the sequence. Additional sort\ncriteria should be specified by subsequent calls to then_by() and\nthen_by_descending().  Calling order_by() or order_by_descending() on\nthe results of a call to order_by() will introduce a new primary\nordering which will override any already established ordering.\n\nThis method performs a stable sort. The order of two elements with the\nsame key will be preserved.\n\nNote: This method uses deferred execution.\n\nArgs:\nkey_selector: A unary function which extracts a key from each\nelement using which the result will be ordered.\n\nReturns:\nAn OrderedQueryable over the sorted elements.\n\nRaises:\nValueError: If the Queryable is closed.\nTypeError: If the key_selector is not callable.", "source": "codesearchnet"}
{"code": "def get_metrics_namespace(self) -> str:\n    return 'BeamML_TF_Numpy'", "docstring": "Returns:\nA namespace for metrics collected by the RunInference transform.", "source": "github-repos"}
{"code": "def defaultStorable(self, python_type=None, storable_type=None, version=None, **kwargs):\n    if (python_type is None):\n        python_type = lookup_type(storable_type)\n    if self.verbose:\n        print('generating storable instance for type: {}'.format(python_type))\n    self.storables.registerStorable(default_storable(python_type, version=version, storable_type=storable_type), **kwargs)\n    return self.byPythonType(python_type, True).asVersion(version)", "docstring": "Generate a default storable instance.\n\nArguments:\n\npython_type (type): Python type of the object.\n\nstorable_type (str): storable type name.\n\nversion (tuple): version number of the storable handler.\n\nReturns:\n\nStorableHandler: storable instance.\n\nExtra keyword arguments are passed to :meth:`registerStorable`.", "source": "codesearchnet"}
{"code": "def copy_pkg(self, filename, id_=-1):\n        \n        for repo in self._children:\n            repo.copy_pkg(filename, id_)", "docstring": "Copy a pkg, dmg, or zip to all repositories.\n\nArgs:\nfilename: String path to the local file to copy.\nid_: Integer ID you wish to associate package with for a JDS\nor CDP only. Default is -1, which is used for creating\na new package object in the database.", "source": "juraj-google-style"}
{"code": "def en004(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `en004`'.format(value))\n\n        self._en004 = value", "docstring": "Corresponds to IDD Field `en004`\nmean coincident dry-bulb temperature to\nEnthalpy corresponding to 0.4% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `en004`\nUnit: kJ/kg\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def sendMessage(self, exchange, routing_key, message, properties=None, UUID=None):\n    if (properties is None):\n        properties = pika.BasicProperties(content_type=self.content_type, delivery_mode=1, headers={})\n    if (UUID is not None):\n        if (properties.headers is None):\n            properties.headers = {}\n        properties.headers['UUID'] = UUID\n    self.channel.basic_publish(exchange=exchange, routing_key=routing_key, properties=properties, body=message)", "docstring": "With this function, you can send message to `exchange`.\n\nArgs:\nexchange (str): name of exchange you want to message to be\ndelivered\nrouting_key (str): which routing key to use in headers of message\nmessage (str): body of message\nproperties (dict ,optional): properties of message - if not used,\nor set to ``None``, ``self.content_type``\nand ``delivery_mode=2`` (persistent) is\nused\nUUID (str, optional): UUID of the message. If set, it is included\ninto ``properties`` of the message.", "source": "codesearchnet"}
{"code": "def where_function(function: _evaluation.WhereFunction, operand_result: Optional[_sql_data_types.Select], params_result: Collection[_sql_data_types.Select]) -> _sql_data_types.Select:\n    del function\n    if not operand_result:\n        return _sql_data_types.Select(select_part=_sql_data_types.RawExpression('NULL', _sql_alias='where_clause_', _sql_data_type=_sql_data_types.Undefined), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK)\n    criteria = list(params_result)[0]\n    where_part = f'{operand_result.where_part} AND {criteria.as_operand()}' if operand_result.where_part else criteria.as_operand()\n    if operand_result.from_part:\n        from_part = operand_result.from_part\n    elif isinstance(operand_result.sql_data_type, _sql_data_types.Struct):\n        from_part = f'(SELECT {operand_result.select_part}.*) AS {operand_result.sql_alias}'\n    return _sql_data_types.Select(select_part=operand_result.select_part, from_part=from_part, where_part=where_part, sql_dialect=_sql_data_types.SqlDialect.SPARK)", "docstring": "Returns a collection of all the items that match the criteria expression.\n\nThis function takes one param (`criteria`) in addition to the operand.\n\nIf the operand is not provided the matches function returns the empty set\nwhich in this function translates to NULL.\n\nReturns an error in the event that the `criteria` param is not provided or its\ndata type is not bool.\n\n\nArgs:\nfunction: The FHIRPath AST `WhereFunction` node\noperand_result: The expression which is being evaluated\nparams_result: The parameter passed in to function\n\nReturns:\nA compiled Spark SQL expression.", "source": "github-repos"}
{"code": "def install_napp(cls, mgr):\n    try:\n        LOG.info('    Searching local NApp...')\n        mgr.install_local()\n        LOG.info('    Found and installed.')\n    except FileNotFoundError:\n        LOG.info('    Not found. Downloading from NApps Server...')\n        try:\n            mgr.install_remote()\n            LOG.info('    Downloaded and installed.')\n            return\n        except HTTPError as exception:\n            if (exception.code == 404):\n                LOG.error('    NApp not found.')\n            else:\n                LOG.error('    NApps Server error: %s', exception)\n        except URLError as exception:\n            LOG.error('    NApps Server error: %s', str(exception.reason))\n        raise KytosException('NApp not found.')", "docstring": "Install a NApp.\n\nRaises:\nKytosException: If a NApp hasn't been found.", "source": "codesearchnet"}
{"code": "def read_meta_graph_file(filename):\n    meta_graph_def = meta_graph_pb2.MetaGraphDef()\n    if not file_io.file_exists(filename):\n        raise IOError(f'File does not exist. Received: {filename}.')\n    with file_io.FileIO(filename, 'rb') as f:\n        file_content = f.read()\n    try:\n        meta_graph_def.ParseFromString(file_content)\n        if sys.byteorder == 'big':\n            bst.swap_tensor_content_in_graph_function(meta_graph_def, 'little', 'big')\n        return meta_graph_def\n    except Exception:\n        pass\n    try:\n        text_format.Merge(file_content.decode('utf-8'), meta_graph_def)\n        if sys.byteorder == 'big':\n            bst.swap_tensor_content_in_graph_function(meta_graph_def, 'little', 'big')\n    except text_format.ParseError as e:\n        raise IOError(f'Cannot parse file {filename}: {str(e)}.')\n    return meta_graph_def", "docstring": "Reads a file containing `MetaGraphDef` and returns the protocol buffer.\n\nArgs:\nfilename: `meta_graph_def` filename including the path.\n\nReturns:\nA `MetaGraphDef` protocol buffer.\n\nRaises:\nIOError: If the file doesn't exist, or cannot be successfully parsed.", "source": "github-repos"}
{"code": "def __init__(self, form, features):\n        \n        self.form = form\n        self.features = features", "docstring": "Construct Segment objectself.\n\nArgs:\nform (string): the segment as ipa\nfeatures (list): the segment as feature_names", "source": "juraj-google-style"}
{"code": "def __getitem__(self, id):\n        \n        if id == slice(None, None):\n            return list(self)\n        response = backend.spreadsheet(self._sheets, id)\n        result = models.SpreadSheet._from_response(response, self._sheets)\n        result._api = self\n        return result", "docstring": "Fetch and return the spreadsheet with the given id.\n\nArgs:\nid (str): unique alphanumeric id of the spreadsheet\nReturns:\nSpreadSheet: new SpreadSheet instance\nRaises:\nKeyError: if no spreadsheet with the given ``id`` is found", "source": "juraj-google-style"}
{"code": "def GetMessages(self, formatter_mediator, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    event_values = event.CopyToDict()\n    return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def write_model(model_object, output_tflite_file):\n    if sys.byteorder == 'big':\n        model_object = copy.deepcopy(model_object)\n        byte_swap_tflite_model_obj(model_object, 'big', 'little')\n    model_bytearray = convert_object_to_bytearray(model_object)\n    with gfile.GFile(output_tflite_file, 'wb') as output_file_handle:\n        output_file_handle.write(model_bytearray)", "docstring": "Writes the tflite model, a python object, into the output file.\n\nNOTE: This API only works for TFLite generated with\n_experimental_use_buffer_offset=false\n\nArgs:\nmodel_object: A tflite model as a python object\noutput_tflite_file: Full path name to the output tflite file.\n\nRaises:\nIOError: If output_tflite_file path is invalid or cannot be opened.", "source": "github-repos"}
{"code": "def _pre_run(self):\n    stage_name = STAGE_NAME_PRE_RUN\n    record = records.TestResultRecord(stage_name, self.TAG)\n    record.test_begin()\n    self.current_test_info = runtime_test_info.RuntimeTestInfo(stage_name, self.log_path, record)\n    try:\n        with self._log_test_stage(stage_name):\n            self.pre_run()\n        return True\n    except Exception as e:\n        logging.exception('%s failed for %s.', stage_name, self.TAG)\n        record.test_error(e)\n        self.results.add_class_error(record)\n        self.summary_writer.dump(record.to_dict(), records.TestSummaryEntryType.RECORD)\n        return False", "docstring": "Proxy function to guarantee the base implementation of `pre_run` is\ncalled.\n\nReturns:\nTrue if setup is successful, False otherwise.", "source": "github-repos"}
{"code": "def approve(self, sha=None, **kwargs):\n        \n        path = '%s/%s/approve' % (self.manager.path, self.get_id())\n        data = {}\n        if sha:\n            data['sha'] = sha\n\n        server_data = self.manager.gitlab.http_post(path, post_data=data,\n                                                    **kwargs)\n        self._update_attrs(server_data)", "docstring": "Approve the merge request.\n\nArgs:\nsha (str): Head SHA of MR\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabMRApprovalError: If the approval failed", "source": "juraj-google-style"}
{"code": "def get_text_features(self, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, return_dict: Optional[bool]=None) -> tf.Tensor:\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    text_outputs = self.blip.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, return_dict=return_dict)\n    pooled_output = text_outputs[1]\n    text_features = self.blip.text_projection(pooled_output)\n    return text_features", "docstring": "Returns:\ntext_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying\nthe projection layer to the pooled output of [`TFBlipTextModel`].\n\nExamples:\n\n```python\n>>> from transformers import AutoProcessor, TFBlipModel\n\n>>> model = TFBlipModel.from_pretrained(\"Salesforce/blip-image-captioning-base\")\n>>> processor = AutoProcessor.from_pretrained(\"Salesforce/blip-image-captioning-base\")\n\n>>> inputs = processor(text=[\"a photo of a cat\", \"a photo of a dog\"], padding=True, return_tensors=\"tf\")\n>>> text_features = model.get_text_features(**inputs)\n```", "source": "github-repos"}
{"code": "def _Read(self, input_file, schema, raw_binary=False):\n    raw_binary = ['--raw-binary'] if raw_binary else []\n    with TemporaryDirectoryResource() as tempdir:\n        basename = os.path.basename(input_file)\n        basename_no_extension, extension = os.path.splitext(basename)\n        if extension in ['.bin', '.tflite']:\n            returncode = subprocess.call([self._flatc_path, '-t', '--strict-json', '--defaults-json'] + raw_binary + ['-o', tempdir, schema, '--', input_file])\n            if returncode != 0:\n                raise RuntimeError('flatc failed to convert from binary to json.')\n            json_file = os.path.join(tempdir, basename_no_extension + '.json')\n            if not os.path.exists(json_file):\n                raise RuntimeError('Could not find %r' % json_file)\n        elif extension == '.json':\n            json_file = input_file\n        else:\n            raise ValueError('Invalid extension on input file %r' % input_file)\n        return json.load(open(json_file))", "docstring": "Read a tflite model assuming the given flatbuffer schema.\n\nIf `input_file` is in bin, then we must use flatc to convert the schema\nfrom binary to json.\n\nArgs:\ninput_file: a binary (flatbuffer) or json file to read from. Extension\nmust  be `.tflite`, `.bin`, or `.json` for FlatBuffer Binary or\nFlatBuffer JSON.\nschema: which schema to use for reading\nraw_binary: whether to assume raw_binary (versions previous to v3)\nthat lacked file_identifier require this.\n\nRaises:\nRuntimeError: 1. When flatc cannot be invoked.\n2. When json file does not exists.\nValueError: When the extension is not json or bin.\n\nReturns:\nA dictionary representing the read tflite model.", "source": "github-repos"}
{"code": "def register_converter(src_type: Union[Type[Any], Tuple[Type[Any], ...]], dest_type: Union[Type[Any], Tuple[Type[Any], ...]], convert_fn: Callable[[Any], Any]) -> None:\n    _TYPE_CONVERTER_REGISTRY.register(src_type, dest_type, convert_fn)", "docstring": "Register converter from source type to destination type.\n\nExamples::\n\n# Add converter from int to float.\npg.typing.register_converter(int, float, float)\n\nassert pg.typing.Float().apply(1) is 1.0\n\n# Add converter from a dict to class A.\ndef from_dict(d):\nreturn A(**d)\n\nassert isinstance(pg.typing.Object(A).apply({'x': 1, 'y': 2}), A)\n\nArgs:\nsrc_type: Source value type.\ndest_type: Target value type.\nconvert_fn: Function that performs the conversion, in signature\n(src_type) -> dest_type.", "source": "github-repos"}
{"code": "def _RemoveForwardedIps(self, forwarded_ips, interface):\n    \n    for address in forwarded_ips:\n      self.ip_forwarding_utils.RemoveForwardedIp(address, interface)", "docstring": "Remove the forwarded IP addresses from the network interface.\n\nArgs:\nforwarded_ips: list, the forwarded IP address strings to delete.\ninterface: string, the output device to use.", "source": "juraj-google-style"}
{"code": "def assets(self, asset_type=None):\n    if (not self.can_update()):\n        self._tcex.handle_error(910, [self.type])\n    if (not asset_type):\n        return self.tc_requests.adversary_assets(self.api_type, self.api_sub_type, self.unique_id)\n    if (asset_type == 'PHONE'):\n        return self.tc_requests.adversary_phone_assets(self.api_type, self.api_sub_type, self.unique_id)\n    if (asset_type == 'HANDLER'):\n        return self.tc_requests.adversary_handle_assets(self.api_type, self.api_sub_type, self.unique_id)\n    if (asset_type == 'URL'):\n        return self.tc_requests.adversary_url_assets(self.api_type, self.api_sub_type, self.unique_id)\n    self._tcex.handle_error(925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type])\n    return None", "docstring": "Retrieves all of the assets of a given asset_type\n\nArgs:\nasset_type: (str) Either None, PHONE, HANDLER, or URL\n\nReturns:", "source": "codesearchnet"}
{"code": "def inference(images):\n  \n  \n  \n  \n  \n  \n  \n  with tf.variable_scope('conv1') as scope:\n    kernel = _variable_with_weight_decay('weights',\n                                         shape=[5, 5, 3, 64],\n                                         stddev=5e-2,\n                                         wd=0.0)\n    conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')\n    biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))\n    pre_activation = tf.nn.bias_add(conv, biases)\n    conv1 = tf.nn.relu(pre_activation, name=scope.name)\n    _activation_summary(conv1)\n\n  \n  pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\n                         padding='SAME', name='pool1')\n  \n  norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,\n                    name='norm1')\n\n  \n  with tf.variable_scope('conv2') as scope:\n    kernel = _variable_with_weight_decay('weights',\n                                         shape=[5, 5, 64, 64],\n                                         stddev=5e-2,\n                                         wd=0.0)\n    conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')\n    biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))\n    pre_activation = tf.nn.bias_add(conv, biases)\n    conv2 = tf.nn.relu(pre_activation, name=scope.name)\n    _activation_summary(conv2)\n\n  \n  norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,\n                    name='norm2')\n  \n  pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],\n                         strides=[1, 2, 2, 1], padding='SAME', name='pool2')\n\n  \n  with tf.variable_scope('local3') as scope:\n    \n    reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])\n    dim = reshape.get_shape()[1].value\n    weights = _variable_with_weight_decay('weights', shape=[dim, 384],\n                                          stddev=0.04, wd=0.004)\n    biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))\n    local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n    _activation_summary(local3)\n\n  \n  with tf.variable_scope('local4') as scope:\n    weights = _variable_with_weight_decay('weights', shape=[384, 192],\n                                          stddev=0.04, wd=0.004)\n    biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))\n    local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)\n    _activation_summary(local4)\n\n  \n  \n  \n  \n  with tf.variable_scope('softmax_linear') as scope:\n    weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],\n                                          stddev=1/192.0, wd=0.0)\n    biases = _variable_on_cpu('biases', [NUM_CLASSES],\n                              tf.constant_initializer(0.0))\n    softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)\n    _activation_summary(softmax_linear)\n\n  return softmax_linear", "docstring": "Build the CIFAR-10 model.\n\nArgs:\nimages: Images returned from distorted_inputs() or inputs().\n\nReturns:\nLogits.", "source": "juraj-google-style"}
{"code": "def __init__(self, parent, module_name, module_ui):\n        \n\n        super(ModuleUIBaseFrame, self).__init__(parent, padding=8)\n        self.columnconfigure(0, weight=1)\n        self.rowconfigure(1, weight=1)\n\n        if module_ui is not None:\n            \n            module_ui.ModuleUIFrame(self).grid(row=0, column=0, sticky=\"W E N S\")\n        else:\n            logger.debug(\"No _ui.py found for '{}'\".format(module_name))\n\n        \n        help_frame = ttk.LabelFrame(self, padding=8, text=\"Help\")\n        help_frame.grid(row=1, column=0, sticky=\"W E N S\")\n        help_frame.columnconfigure(0, weight=1)\n        help_frame.rowconfigure(0, weight=1)\n        \n        _dir = os.path.realpath(\n            os.path.join(os.getcwd(), os.path.dirname(__file__)))\n        help_path = \"{}/modules/{}/{}\".format(_dir, module_name, \"_help.json\")\n        if os.path.isfile(help_path):\n            \n            helptools.add_help_text(help_frame, help_path)\n        else:\n            \n            tk.Label(help_frame, text=\"No _help.json file found for '{}'\".format(module_name)).grid(row=0, column=0,\n                                                                                                    sticky=\"W E N S\")", "docstring": "Create a new base for a module UI\n\nArgs:\nparent: A tk or ttk object\nmodule_name (str): The name of the module\nmodule_ui: The _ui.py file to add for the module", "source": "juraj-google-style"}
{"code": "def parse_latitude(latitude, hemisphere):\n    \n    latitude = int(latitude[:2]) + float(latitude[2:]) / 60\n    if hemisphere == 'S':\n        latitude = -latitude\n    elif not hemisphere == 'N':\n        raise ValueError('Incorrect North/South value %r' % hemisphere)\n    return latitude", "docstring": "Parse a NMEA-formatted latitude pair.\n\nArgs:\nlatitude (str): Latitude in DDMM.MMMM\nhemisphere (str): North or South\n\nReturns:\nfloat: Decimal representation of latitude", "source": "juraj-google-style"}
{"code": "def container(self, container_name):\n    original_container = self._container\n    with ops.init_scope():\n        original_init_container = ops.get_default_graph()._container\n    try:\n        self._container = container_name\n        with ops.init_scope():\n            ops.get_default_graph()._container = container_name\n        yield self._container\n    finally:\n        self._container = original_container\n        with ops.init_scope():\n            ops.get_default_graph()._container = original_init_container", "docstring": "Returns a context manager that specifies the resource container to use.\n\nOverridden from `tf.Graph` to update both the init_scope container\nand the present inner container. This is necessary to make sure setting\ncontainers applies correctly both to created variables and to stateful\nops.\n\nArgs:\ncontainer_name: container name string.\n\nReturns:\nA context manager for defining resource containers for stateful ops,\nyields the container name.", "source": "github-repos"}
{"code": "def get_atten(self):\n    return self.attenuation_device.get_atten(self.idx)", "docstring": "Gets the current attenuation setting of Attenuator.\n\nReturns:\nA float that is the current attenuation value. Unit is db.", "source": "github-repos"}
{"code": "def setup_keyword(dist, _, value):\n    \n    \n    if value is not True:\n        return\n    dist.entry_points = _ensure_entry_points_is_dict(dist.entry_points)\n\n    for command, subcommands in six.iteritems(_get_commands(dist)):\n        entry_point = '{command} = rcli.dispatcher:main'.format(\n            command=command)\n        entry_points = dist.entry_points.setdefault('console_scripts', [])\n        if entry_point not in entry_points:\n            entry_points.append(entry_point)\n        dist.entry_points.setdefault('rcli', []).extend(subcommands)", "docstring": "Add autodetected commands as entry points.\n\nArgs:\ndist: The distutils Distribution object for the project being\ninstalled.\n_: The keyword used in the setup function. Unused.\nvalue: The value set to the keyword in the setup function. If the value\nis not True, this function will do nothing.", "source": "juraj-google-style"}
{"code": "def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None):\n    return internal_convert_to_tensor_or_indexed_slices(value=value, dtype=dtype, name=name, as_ref=False)", "docstring": "Converts the given object to a `Tensor` or an `IndexedSlices`.\n\nIf `value` is an `IndexedSlices` or `SparseTensor` it is returned\nunmodified. Otherwise, it is converted to a `Tensor` using\n`convert_to_tensor()`.\n\nArgs:\nvalue: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed\nby `convert_to_tensor()`.\ndtype: (Optional.) The required `DType` of the returned `Tensor` or\n`IndexedSlices`.\nname: (Optional.) A name to use if a new `Tensor` is created.\n\nReturns:\nA `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.\n\nRaises:\nValueError: If `dtype` does not match the element type of `value`.", "source": "github-repos"}
{"code": "def modify_module(channel, module_name, module_state):\n    \n\n    \n    gui = ui_embed.UI(\n        channel,\n        \"{} updated\".format(module_name),\n        \"{} is now {}\".format(module_name, \"activated\" if module_state else \"deactivated\"),\n        modulename=modulename\n    )\n\n    return gui", "docstring": "Creates an embed UI containing the module modified message\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\nmodule_name (str): The name of the module that was updated\nmodule_state (bool): The current state of the module\n\nReturns:\nembed: The created embed", "source": "juraj-google-style"}
{"code": "def _SetBlankLinesBetweenCommentAndClassFunc(self, node):\n    index = 0\n    while pytree_utils.IsCommentStatement(node.children[index]):\n        self.Visit(node.children[index].children[0])\n        if not self.last_was_decorator:\n            _SetNumNewlines(node.children[index].children[0], _ONE_BLANK_LINE)\n        index += 1\n    if index and node.children[index].lineno - 1 == node.children[index - 1].children[0].lineno:\n        _SetNumNewlines(node.children[index], _NO_BLANK_LINES)\n    else:\n        if self.last_comment_lineno + 1 == node.children[index].lineno:\n            num_newlines = _NO_BLANK_LINES\n        else:\n            num_newlines = self._GetNumNewlines(node)\n        _SetNumNewlines(node.children[index], num_newlines)\n    return index", "docstring": "Set the number of blanks between a comment and class or func definition.\n\nClass and function definitions have leading comments as children of the\nclassdef and functdef nodes.\n\nArguments:\nnode: (pytree.Node) The classdef or funcdef node.\n\nReturns:\nThe index of the first child past the comment nodes.", "source": "github-repos"}
{"code": "def add_help_text(parent, filepath, prefix=\"!\"):\n    \n\n    import tkinter as tk\n    import tkinter.ttk as ttk\n\n    help_contents = get_help_data(filepath)\n\n    text = tk.Text(parent, wrap='word', font=(\"Helvetica\", 10))\n    text.grid(row=0, column=0, sticky=\"W E N S\")\n    text.tag_config(\"heading\", font=(\"Helvetica\", 14))\n    text.tag_config(\"command\", font=(\"Courier\", 10))\n    text.tag_config(\"param\", font=(\"Courier\", 10))\n    text.tag_config(\"description\")\n\n    \n    scrollbar = ttk.Scrollbar(parent, orient=\"vertical\", command=text.yview)\n    scrollbar.grid(column=1, row=0, sticky=\"N S\")\n    text['yscrollcommand'] = scrollbar.set\n\n    \n    for d in help_contents:\n        text.insert('end', d, \"heading\")\n        text.insert('end', '\\n')\n\n        if \"commands\" in d.lower():\n            for c in help_contents[d]:\n                if \"name\" not in c:\n                    continue\n\n                command = prefix + c[\"name\"]\n                text.insert('end', command, (\"command\", \"description\"))\n                if \"params\" in c:\n                    for param in c[\"params\"]:\n                        text.insert('end', \" [{}]\".format(param), (\"param\", \"description\"))\n                text.insert('end', \": \")\n                if \"description\" in c:\n                    text.insert('end', c[\"description\"], \"description\")\n\n                text.insert('end', '\\n')\n\n            text.insert('end', '\\n')\n        else:\n            text.insert('end', help_contents[d], \"description\")\n            text.insert('end', '\\n\\n')\n\n    text.config(state=tk.DISABLED)", "docstring": "Load help text from a file and adds it to the parent\n\nArgs:\nparent: A tk or ttk object\nfilepath (str): The file to load help text from\nprefix (str): The prefix to use for commands", "source": "juraj-google-style"}
{"code": "def get_structure_from_mp(formula):\n    m = MPRester()\n    entries = m.get_entries(formula, inc_structure='final')\n    if (len(entries) == 0):\n        raise ValueError(('No structure with formula %s in Materials Project!' % formula))\n    elif (len(entries) > 1):\n        warnings.warn(('%d structures with formula %s found in Materials Project. The lowest energy structure will be returned.' % (len(entries), formula)))\n    return min(entries, key=(lambda e: e.energy_per_atom)).structure", "docstring": "Convenience method to get a crystal from the Materials Project database via\nthe API. Requires PMG_MAPI_KEY to be set.\n\nArgs:\nformula (str): A formula\n\nReturns:\n(Structure) The lowest energy structure in Materials Project with that\nformula.", "source": "codesearchnet"}
{"code": "def save_binary(self, data: Union[dict, List[dict]]) -> str:\n    path, _ = os.path.splitext(self.output_path)\n    binary_path = os.path.extsep.join((path, 'pickle'))\n    with open(binary_path, 'wb+') as f_output:\n        pickle.dump(data, f_output)\n    return binary_path", "docstring": "Save the provided data object as a pickle-formatted binary data on the disk.\n\nArgs:\ndata (`dict` or list of `dict`): The data to store.\n\nReturns:\n`str`: Path where the data has been saved.", "source": "github-repos"}
{"code": "def write_wav(path, samples, sr=16000):\n    \n    max_value = np.abs(np.iinfo(np.int16).min)\n    data = (samples * max_value).astype(np.int16)\n    scipy.io.wavfile.write(path, sr, data)", "docstring": "Write to given samples to a wav file.\nThe samples are expected to be floating point numbers\nin the range of -1.0 to 1.0.\n\nArgs:\npath (str): The path to write the wav to.\nsamples (np.array): A float array .\nsr (int): The sampling rate.", "source": "juraj-google-style"}
{"code": "def SelectArtifacts(cls, os_name=None, cpe=None, labels=None, restrict_checks=None):\n    results = set()\n    for condition in cls.Conditions(None, os_name, cpe, labels):\n        trigger = condition[1:]\n        for chk in itervalues(cls.checks):\n            if (restrict_checks and (chk.check_id not in restrict_checks)):\n                continue\n            results.update(chk.triggers.Artifacts(*trigger))\n    return results", "docstring": "Takes targeting info, identifies artifacts to fetch.\n\nArgs:\nos_name: 0+ OS names.\ncpe: 0+ CPE identifiers.\nlabels: 0+ GRR labels.\nrestrict_checks: A list of check ids whose artifacts should be fetched.\n\nReturns:\nthe artifacts that should be collected.", "source": "codesearchnet"}
{"code": "def __getitem__(self, thing: Any) -> np.ndarray:\n\t\t\n\t\tif type(thing) is str:\n\t\t\treturn self.__getattr__(thing)\n\t\telse:\n\t\t\t\n\t\t\tlm = LayerManager(None)\n\t\t\tfor key, layer in self.items():\n\t\t\t\tlm[key] = loompy.MemoryLoomLayer(key, layer[thing])\n\t\t\treturn lm", "docstring": "Access a layer by name, or slice through all the layers\n\nArgs:\nthing:\t\tif string, return the specified layer (\"\" is the default layer)\nif slice 2-tuple, return a new LayerManager with all layers sliced", "source": "juraj-google-style"}
{"code": "def _create_variables_and_slots(self) -> Dict[str, Dict[str, tf_variables.Variable]]:\n    self._track_restore_info_for_cpu()\n    variables = {}\n    stacked_variables = self._create_variables_from_stacked_tables()\n    for table in self._table_config:\n        if table.name in stacked_variables:\n            variables[table.name] = {'parameters': stacked_variables[table.name]}\n        else:\n            variables[table.name] = self._create_variables(table, trainable=True)\n    return variables", "docstring": "Create variables for TPU embeddings.\n\nReturns:\nA dict of dicts. The outer dict is keyed by the table names and the inner\ndicts are keyed by 'parameters' and the slot variable names.", "source": "github-repos"}
{"code": "def DeregisterHelper(cls, helper_class):\n    \n    helper_name = helper_class.NAME.lower()\n    if helper_name not in cls._helper_classes:\n      raise KeyError('Helper class not set for name: {0:s}.'.format(\n          helper_class.NAME))\n\n    del cls._helper_classes[helper_name]", "docstring": "Deregisters a helper class.\n\nThe helper classes are identified based on their lower case name.\n\nArgs:\nhelper_class (type): class object of the argument helper.\n\nRaises:\nKeyError: if helper class is not set for the corresponding name.", "source": "juraj-google-style"}
{"code": "def _GetFileSystemCacheIdentifier(self, path_spec):\n    \n    string_parts = []\n\n    string_parts.append(getattr(path_spec.parent, 'comparable', ''))\n    string_parts.append('type: {0:s}'.format(path_spec.type_indicator))\n\n    return ''.join(string_parts)", "docstring": "Determines the file system cache identifier for the path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nstr: identifier of the VFS object.", "source": "juraj-google-style"}
{"code": "def penalize_boundary_complexity(shp, w=20, mask=None, C=0.5):\n  \n  def inner(T):\n    arr = T(\"input\")\n\n    \n    if mask is None:\n      mask_ = np.ones(shp)\n      mask_[:, w:-w, w:-w] = 0\n    else:\n      mask_ = mask\n\n    blur = _tf_blur(arr, w=5)\n    diffs = (blur-arr)**2\n    diffs += 0.8*(arr-C)**2\n\n    return -tf.reduce_sum(diffs*mask_)\n  return inner", "docstring": "Encourage the boundaries of an image to have less variation and of color C.\n\nArgs:\nshp: shape of T(\"input\") because this may not be known.\nw: width of boundary to penalize. Ignored if mask is set.\nmask: mask describing what area should be penalized.\n\nReturns:\nObjective.", "source": "juraj-google-style"}
{"code": "def modify_prefix(channel, new_prefix):\n    gui = ui_embed.UI(channel, 'Prefix updated', 'Modis prefix is now `{}`'.format(new_prefix), modulename=modulename)\n    return gui", "docstring": "Creates an embed UI containing the prefix modified message\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\nnew_prefix (str): The value of the new prefix\n\nReturns:\nembed: The created embed", "source": "codesearchnet"}
{"code": "def load_json(task: Task, file: str) -> Result:\n    kwargs: Dict[(str, Type[MutableMapping[(str, Any)]])] = {}\n    with open(file, 'r') as f:\n        data = json.loads(f.read(), **kwargs)\n    return Result(host=task.host, result=data)", "docstring": "Loads a json file.\n\nArguments:\nfile: path to the file containing the json file to load\n\nExamples:\n\nSimple example with ``ordered_dict``::\n\n> nr.run(task=load_json,\nfile=\"mydata.json\")\n\nfile: path to the file containing the json file to load\n\nReturns:\nResult object with the following attributes set:\n* result (``dict``): dictionary with the contents of the file", "source": "codesearchnet"}
{"code": "def FindStartOfExpressionInLine(line, endpos, stack):\n  \n  i = endpos\n  while i >= 0:\n    char = line[i]\n    if char in ')]}':\n      \n      stack.append(char)\n    elif char == '>':\n      \n      \n      \n      if (i > 0 and\n          (line[i - 1] == '-' or\n           Match(r'\\s>=\\s', line[i - 1:]) or\n           Search(r'\\boperator\\s*$', line[0:i]))):\n        i -= 1\n      else:\n        stack.append('>')\n    elif char == '<':\n      \n      if i > 0 and line[i - 1] == '<':\n        \n        i -= 1\n      else:\n        \n        \n        if stack and stack[-1] == '>':\n          stack.pop()\n          if not stack:\n            return (i, None)\n    elif char in '([{':\n      \n      \n      \n      \n      while stack and stack[-1] == '>':\n        stack.pop()\n      if not stack:\n        return (-1, None)\n      if ((char == '(' and stack[-1] == ')') or\n          (char == '[' and stack[-1] == ']') or\n          (char == '{' and stack[-1] == '}')):\n        stack.pop()\n        if not stack:\n          return (i, None)\n      else:\n        \n        return (-1, None)\n    elif char == ';':\n      \n      \n      \n      while stack and stack[-1] == '>':\n        stack.pop()\n      if not stack:\n        return (-1, None)\n\n    i -= 1\n\n  return (-1, stack)", "docstring": "Find position at the matching start of current expression.\n\nThis is almost the reverse of FindEndOfExpressionInLine, but note\nthat the input position and returned position differs by 1.\n\nArgs:\nline: a CleansedLines line.\nendpos: start searching at this position.\nstack: nesting stack at endpos.\n\nReturns:\nOn finding matching start: (index at matching start, None)\nOn finding an unclosed expression: (-1, None)\nOtherwise: (-1, new stack at beginning of this line)", "source": "juraj-google-style"}
{"code": "def __init__(self, app=None, env=None, region='us-east-1', prop_path=None):\n        \n        self.app_name = app\n        self.env = env\n        self.region = region\n        self.properties = get_properties(prop_path, env=self.env, region=self.region)\n        self.datapipeline_data = self.properties['datapipeline']\n        generated = get_details(app=self.app_name)\n        self.group = generated.data['project']\n\n        session = boto3.Session(profile_name=self.env, region_name=self.region)\n        self.client = session.client('datapipeline')\n        self.pipeline_id = None", "docstring": "AWS Data Pipeline object.\n\nArgs:\napp (str): Application name\nenv (str): Environment/Account\nregion (str): AWS Region\nprop_path (str): Path of environment property file", "source": "juraj-google-style"}
{"code": "def get_image_tokens(self, pixel_values: torch.FloatTensor):\n    batch_size = pixel_values.shape[0]\n    _, _, image_toks = self.vqmodel.encode(pixel_values)\n    bpe_toks = self.vocabulary_mapping.convert_img2bpe(image_toks)\n    bpe_toks = bpe_toks.view(batch_size, -1)\n    return bpe_toks", "docstring": "Tokenizes images into discrete tokens with VQGAN module. Converts\nobtained image tokens into BPE tokens and wraps with \"boi\" and \"eoi\"\nspecial tokens.\n\nArgs:\npixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):\nThe tensors corresponding to the input images.", "source": "github-repos"}
{"code": "def FindFirst(cls, setting_matcher, device_matcher=None, **kwargs):\n        \n        try:\n            return next(cls.FindDevices(\n                setting_matcher, device_matcher=device_matcher, **kwargs))\n        except StopIteration:\n            raise usb_exceptions.DeviceNotFoundError(\n                'No device available, or it is in the wrong configuration.')", "docstring": "Find and return the first matching device.\n\nArgs:\nsetting_matcher: See cls.FindDevices.\ndevice_matcher: See cls.FindDevices.\n**kwargs: See cls.FindDevices.\n\nReturns:\nAn instance of UsbHandle.\n\nRaises:\nDeviceNotFoundError: Raised if the device is not available.", "source": "juraj-google-style"}
{"code": "def set_snippet_client_verbose_logging(self, verbose):\n    self._ad.log.info('Set verbose logging to %s.', verbose)\n    self.verbose_logging = verbose", "docstring": "Switches verbose logging. True for logging full RPC response.\n\nBy default it will only write max_rpc_return_value_length for Rpc return\nstrings. If you need to see full message returned from Rpc, please turn\non verbose logging.\n\nmax_rpc_return_value_length will set to 1024 by default, the length\ncontains full Rpc response in Json format, included 1st element \"id\".\n\nArgs:\nverbose: bool. If True, turns on verbose logging, if False turns off", "source": "github-repos"}
{"code": "def interface_required(interface):\n        \n        def _interface_required(func):\n            \n            @functools.wraps(func)\n            def wrapper(self, *args, **kwargs):\n                \n                if self.tif != interface:\n                    raise errors.JLinkException('Unsupported for current interface.')\n                return func(self, *args, **kwargs)\n            return wrapper\n        return _interface_required", "docstring": "Decorator to specify that a particular interface type is required\nfor the given method to be used.\n\nArgs:\ninterface (int): attribute of ``JLinkInterfaces``\n\nReturns:\nA decorator function.", "source": "juraj-google-style"}
{"code": "def set_attr_text(self, attr_key, attr_val, el_idx=0):\n        \n        self.get_element_by_attr_key(attr_key, el_idx).attrib[attr_key] = attr_val", "docstring": "Set the value of the selected attribute of the selected element.\n\nArgs:\nattr_key : str\nName of attribute for which to search\n\nattr_val : str\nText to set for the attribute.\n\nel_idx : int\nIndex of element to use in the event that there are multiple sibling\nelements with the same name.", "source": "juraj-google-style"}
{"code": "def data_groups(self, groups, entity_count):\n    data = []\n    for xid in groups.keys():\n        assoc_group_data = self.data_group_association(xid)\n        data += assoc_group_data\n        entity_count += len(assoc_group_data)\n        if (entity_count >= self._batch_max_chunk):\n            break\n    return (data, entity_count)", "docstring": "Process Group data.\n\nArgs:\ngroups (list): The list of groups to process.\n\nReturns:\nlist: A list of groups including associations", "source": "codesearchnet"}
{"code": "def _publish_to_subscribers(event: Event):\n    \n    subscribers = get_subscribers(event.object_type)\n\n    \n    for sub in subscribers:\n        DB.prepend_to_list(_keys.published(event.object_type, sub),\n                           event.id, pipeline=True)\n        event_dict = deepcopy(event.config)\n        event_dict.pop('id')\n        DB.set_hash_value(_keys.data(event.object_type, sub), event.id,\n                          str(event_dict), pipeline=True)\n    DB.publish(event.object_type, event.id, pipeline=True)", "docstring": "Publish and event to all subscribers.\n\n- Adds the event id to the published event list for all subscribers.\n- Adds the event data to the published event data for all subscribers.\n- Publishes the event id notification to all subscribers.\n\nArgs:\nevent (Event): Event object to publish.", "source": "juraj-google-style"}
{"code": "def _FormatDateTime(self, event):\n    try:\n        datetime_object = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, tzinfo=pytz.UTC)\n        datetime_object += datetime.timedelta(microseconds=event.timestamp)\n        datetime_object.astimezone(self._output_mediator.timezone)\n        return datetime_object.replace(tzinfo=None)\n    except (OverflowError, ValueError) as exception:\n        self._ReportEventError(event, 'unable to copy timestamp: {0!s} to a human readable date and time with error: {1!s}. Defaulting to: \"ERROR\"'.format(event.timestamp, exception))\n        return 'ERROR'", "docstring": "Formats the date to a datetime object without timezone information.\n\nNote: timezone information must be removed due to lack of support\nby xlsxwriter and Excel.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\ndatetime.datetime|str: date and time value or a string containing\n\"ERROR\" on OverflowError.", "source": "codesearchnet"}
{"code": "def issuperset(self, other):\n    other = self._cast_to_frameset(other)\n    if (other is NotImplemented):\n        return NotImplemented\n    return (self.items >= other.items)", "docstring": "Check if the contents of `self` is a superset of the contents of\n`other.`\n\nArgs:\nother (:class:`FrameSet`):\n\nReturns:\nbool:\n:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`", "source": "codesearchnet"}
{"code": "def select(self, field_paths):\n    query = query_mod.Query(self)\n    return query.select(field_paths)", "docstring": "Create a \"select\" query with this collection as parent.\n\nSee\n:meth:`~.firestore_v1beta1.query.Query.select` for\nmore information on this method.\n\nArgs:\nfield_paths (Iterable[str, ...]): An iterable of field paths\n(``.``-delimited list of field names) to use as a projection\nof document fields in the query results.\n\nReturns:\n~.firestore_v1beta1.query.Query: A \"projected\" query.", "source": "codesearchnet"}
{"code": "def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:\n    input_ids = input_ids.to(self.assistant_model.device)\n    min_new_tokens, max_new_tokens = self._calculate_new_tokens(input_ids)\n    if max_new_tokens == 0:\n        return (input_ids, None)\n    self._update_past_and_masks(input_ids)\n    generation_args = self._prepare_generation_args(input_ids, min_new_tokens, max_new_tokens)\n    candidate_ids, candidate_logits = self._generate_candidates(generation_args)\n    return (candidate_ids, candidate_logits)", "docstring": "Fetches the candidates to be tried for the current input.\n\nArgs:\ninput_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\nIndices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)\n\nReturn:\n`torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be\nassessed by the model and a `torch.FloatTensor` of shape `(batch_size, candidate_length,\nvocabulary_size)` containing the logits associated to each candidate.", "source": "github-repos"}
{"code": "def _conv_general_param_type_converter(window_strides, lhs_dilation, rhs_dilation, dim):\n\n    def _as_list_of_size(item, size):\n        if item is None:\n            return None\n        return [item] * size if isinstance(item, int) else list(item)\n    return (_as_list_of_size(window_strides, dim), _as_list_of_size(lhs_dilation, dim), _as_list_of_size(rhs_dilation, dim))", "docstring": "Convert strides, lhs_dilation, rhs_dilation to match TF convention.\n\nFor example,\nin the 3D case, if lhs_dilation = 2, then convert it to [2, 2, 2]\nif lhs_dilation = (2, 2, 2), convert it also to [2, 2, 2]\n\nArgs:\nwindow_strides: window_strides to be converted\nlhs_dilation: lhs_dilation to be converted\nrhs_dilation: rhs_dilation to be converted\ndim: dim to be converted\n\nReturns:\nThe updated window_strides, lhs_dilation and rhs_dilation", "source": "github-repos"}
{"code": "def call_later(self, delay, callback):\n    if hasattr(self._connection.ioloop, 'call_later'):\n        self._connection.ioloop.call_later(delay, callback)\n    else:\n        self._connection.ioloop.add_timeout(delay, callback)", "docstring": "Schedule a one-shot timeout given delay seconds.\n\nThis method is only useful for compatibility with older versions of pika.\n\nArgs:\ndelay (float): Non-negative number of seconds from now until\nexpiration\ncallback (method): The callback method, having the signature\n`callback()`", "source": "codesearchnet"}
{"code": "def add_one(self, url: str, url_properties: Optional[URLProperties]=None, url_data: Optional[URLData]=None):\n    self.add_many([AddURLInfo(url, url_properties, url_data)])", "docstring": "Add a single URL to the table.\n\nArgs:\nurl: The URL to be added\nurl_properties: Additional values to be saved\nurl_data: Additional data to be saved", "source": "codesearchnet"}
{"code": "def __init__(self, session, max_retries, retry_backoff_base):\n        \n\n        \n        self.on_connect = event.Event('Channel.on_connect')\n        \n        self.on_reconnect = event.Event('Channel.on_reconnect')\n        \n        self.on_disconnect = event.Event('Channel.on_disconnect')\n        \n        self.on_receive_array = event.Event('Channel.on_receive_array')\n\n        self._max_retries = max_retries\n        self._retry_backoff_base = retry_backoff_base\n\n        \n        self._is_connected = False\n        \n        self._on_connect_called = False\n        \n        self._chunk_parser = None\n        \n        self._session = session\n\n        \n        self._sid_param = None\n        self._gsessionid_param = None", "docstring": "Create a new channel.\n\nArgs:\nsession (http_utils.Session): Request session.\nmax_retries (int): Number of retries for long-polling request.\nretry_backoff_base (int): The base term for the long-polling\nexponential backoff.", "source": "juraj-google-style"}
{"code": "def save_counter(self):\n    self._maybe_create_save_counter()\n    return self._save_counter", "docstring": "An integer variable which starts at zero and is incremented on save.\n\nUsed to number checkpoints.\n\nReturns:\nThe save counter variable.", "source": "github-repos"}
{"code": "def deterministic_shuffle(list_, seed=0, rng=None):\n    r\n    rng = ensure_rng(seed if rng is None else rng)\n    rng.shuffle(list_)\n    return list_", "docstring": "r\"\"\"\nArgs:\nlist_ (list):\nseed (int):\n\nReturns:\nlist: list_\n\nCommandLine:\npython -m utool.util_numpy --test-deterministic_shuffle\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_numpy import *  # NOQA\n>>> list_ = [1, 2, 3, 4, 5, 6]\n>>> seed = 1\n>>> list_ = deterministic_shuffle(list_, seed)\n>>> result = str(list_)\n>>> print(result)\n[3, 2, 5, 1, 4, 6]", "source": "juraj-google-style"}
{"code": "def truepath_relative(path, otherpath=None):\n    if (otherpath is None):\n        otherpath = os.getcwd()\n    otherpath = truepath(otherpath)\n    path_ = normpath(relpath(path, otherpath))\n    return path_", "docstring": "Normalizes and returns absolute path with so specs\n\nArgs:\npath (str):  path to file or directory\notherpath (None): (default = None)\n\nReturns:\nstr: path_\n\nCommandLine:\npython -m utool.util_path --exec-truepath_relative --show\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_path import *  # NOQA\n>>> import utool as ut\n>>> path = 'C:/foobar/foobiz'\n>>> otherpath = 'C:/foobar'\n>>> path_ = truepath_relative(path, otherpath)\n>>> result = ('path_ = %s' % (ut.repr2(path_),))\n>>> print(result)\npath_ = 'foobiz'", "source": "codesearchnet"}
{"code": "def remove(self, node, dirty=True):\n    if (node.id in self._children):\n        self._children[node.id].parent = None\n        del self._children[node.id]\n    if dirty:\n        self.touch()", "docstring": "Remove the given child node.\n\nArgs:\nnode (gkeepapi.Node): Node to remove.\ndirty (bool): Whether this node should be marked dirty.", "source": "codesearchnet"}
{"code": "def _get_child_class(self, path):\n        \n        if self._child_entity is None:\n            return BIDSNode\n\n        for i, child_ent in enumerate(listify(self._child_entity)):\n            template = self.available_entities[child_ent].directory\n            if template is None:\n                return BIDSNode\n            template = self.root_path + template\n            \n            to_rep = re.findall(r'\\{(.*?)\\}', template)\n            for ent in to_rep:\n                patt = self.available_entities[ent].pattern\n                template = template.replace('{%s}' % ent, patt)\n            template += r'[^\\%s]*$' % os.path.sep\n            if re.match(template, path):\n                return listify(self._child_class)[i]\n\n        return BIDSNode", "docstring": "Return the appropriate child class given a subdirectory path.\n\nArgs:\npath (str): The path to the subdirectory.\n\nReturns: An uninstantiated BIDSNode or one of its subclasses.", "source": "juraj-google-style"}
{"code": "def line_starts_subpgm(line: str) -> Tuple[bool, Optional[str]]:\n    \n\n    match = RE_SUB_START.match(line)\n    if match != None:\n        f_name = match.group(1)\n        return (True, f_name)\n\n    match = RE_FN_START.match(line)\n    if match != None:\n        f_name = match.group(1)\n        return (True, f_name)\n\n    return (False, None)", "docstring": "Indicates whether a line in the program is the first line of a subprogram\ndefinition.\n\nArgs:\nline\nReturns:\n(True, f_name) if line begins a definition for subprogram f_name;\n(False, None) if line does not begin a subprogram definition.", "source": "juraj-google-style"}
{"code": "def hpo_terms(self, query=None, hpo_term=None, text=None, limit=None):\n        \n        query_dict = {}\n        search_term = None\n        if query:\n            query_dict = {'$or':\n                [\n                    {'hpo_id': {'$regex': query, '$options':'i'}},\n                    {'description': {'$regex': query, '$options':'i'}},\n                ]   \n            }\n            search_term = query\n        elif text:\n            new_string = ''\n            for i,word in enumerate(text.split(' ')):\n                if i == 0:\n                    new_string += word\n                else:\n                    new_string += ' \\\"{0}\\\"'.format(word)\n            LOG.info(\"Search HPO terms with %s\", new_string)\n            query_dict['$text'] = {'$search': new_string}\n            search_term = text\n        elif hpo_term:\n            query_dict['hpo_id'] = hpo_term\n            search_term = hpo_term\n\n        limit = limit or int(10e10)\n        res = self.hpo_term_collection.find(query_dict).limit(limit).sort('hpo_number',ASCENDING)\n        \n\n        LOG.info(\"Found {0} terms with search word {1}\".format(res.count(), search_term))\n        return res", "docstring": "Return all HPO terms\n\nIf a query is sent hpo_terms will try to match with regex on term or\ndescription.\n\nArgs:\nquery(str): Part of a hpoterm or description\nhpo_term(str): Search for a specific hpo term\nlimit(int): the number of desired results\n\nReturns:\nresult(pymongo.Cursor): A cursor with hpo terms", "source": "juraj-google-style"}
{"code": "def release_client(self, client):\n        \n        if isinstance(client, Client):\n            if not self._is_expired_client(client):\n                LOG.debug('Client is not expired. Adding back to pool')\n                self.__pool.append(client)\n            elif client.is_connected():\n                LOG.debug('Client is expired and connected. Disconnecting')\n                client.disconnect()\n        if self.__sem is not None:\n            self.__sem.release()", "docstring": "Releases a client object to the pool.\n\nArgs:\nclient: Client object.", "source": "juraj-google-style"}
{"code": "def dismiss_confirm(self, text=None, wait=None):\n    with self.driver.dismiss_modal('confirm', text=text, wait=wait):\n        (yield)", "docstring": "Execute the wrapped code, dismissing a confirm.\n\nArgs:\ntext (str | RegexObject, optional): Text to match against the text in the modal.\nwait (int | float, optional): Maximum time to wait for the modal to appear after\nexecuting the wrapped code.\n\nRaises:\nModalNotFound: If a modal dialog hasn't been found.", "source": "codesearchnet"}
{"code": "def xslt_transformation(xml, template):\n    \n    transformer = ET.XSLT(\n        _read_template(template)\n    )\n    newdom = transformer(\n        _read_marcxml(xml)\n    )\n\n    return ET.tostring(newdom, pretty_print=True, encoding=\"utf-8\")", "docstring": "Transform `xml` using XSLT `template`.\n\nArgs:\nxml (str): Filename or XML string. Don't use ``\\\\n`` in case of\nfilename.\ntemplate (str): Filename or XML string. Don't use ``\\\\n`` in case of\nfilename.\n\nReturns:\nstr: Transformed `xml` as string.", "source": "juraj-google-style"}
{"code": "def extract_response(self, extractors):\n        \n        if not extractors:\n            return {}\n\n        logger.log_debug(\"start to extract from response object.\")\n        extracted_variables_mapping = OrderedDict()\n        extract_binds_order_dict = utils.ensure_mapping_format(extractors)\n\n        for key, field in extract_binds_order_dict.items():\n            extracted_variables_mapping[key] = self.extract_field(field)\n\n        return extracted_variables_mapping", "docstring": "extract value from requests.Response and store in OrderedDict.\n\nArgs:\nextractors (list):\n\n[\n{\"resp_status_code\": \"status_code\"},\n{\"resp_headers_content_type\": \"headers.content-type\"},\n{\"resp_content\": \"content\"},\n{\"resp_content_person_first_name\": \"content.person.name.first_name\"}\n]\n\nReturns:\nOrderDict: variable binds ordered dict", "source": "juraj-google-style"}
{"code": "def merge_all_models_into_first_model(biop_structure):\n    \n    from string import ascii_uppercase\n    idx = 1\n    first_model = biop_structure[0]\n\n    for m in biop_structure.get_models():\n        \n        if first_model.id == m.id:\n            continue\n        for c in m.get_chains():\n            c.id = ascii_uppercase[idx]\n            first_model.add(c)\n        idx += 1", "docstring": "Merge all existing models into a Structure's first_model attribute.\n\nThis directly modifies the Biopython Structure object. Chains IDs will start from A and increment for each new\nchain (model that is converted).\n\nArgs:\nbiop_structure (Structure): Structure with multiple models that should be merged", "source": "juraj-google-style"}
{"code": "def __init__(self, **kwargs):\n        \n        super(functionTagProcessor, self).__init__(**kwargs)\n\n        self.include_function_signatures = kwargs.get(\n            'include_function_signatures', False)", "docstring": "Initializer.\n\nArgs:\n**include_function_signatures: bool. See get_name() for more info.", "source": "juraj-google-style"}
{"code": "def get_many(self, query: Mapping[(str, Any)], context: PipelineContext=None, streaming: bool=False) -> Iterable[T]:\n    result = self._source.get_many(self._source_type, deepcopy(query), context)\n    LOGGER.info('Got results \"{result}\" from query \"{query}\" of source \"{source}\"'.format(result=result, query=query, source=self._source))\n    if (not streaming):\n        LOGGER.info('Non-streaming get_many request. Ensuring results \"{result}\" are a Iterable'.format(result=result))\n        result = list(result)\n        LOGGER.info('Sending results \"{result}\" to sinks before converting'.format(result=result))\n        for sink in self._before_transform:\n            sink.put_many(result, context)\n        LOGGER.info('Converting results \"{result}\" to request type'.format(result=result))\n        result = [self._transform(data=item, context=context) for item in result]\n        LOGGER.info('Sending results \"{result}\" to sinks after converting'.format(result=result))\n        for sink in self._after_transform:\n            sink.put_many(result, context)\n        return result\n    else:\n        LOGGER.info('Streaming get_many request. Returning result generator for results \"{result}\"'.format(result=result))\n        return self._get_many_generator(result)", "docstring": "Gets a query from the data source, where the query contains multiple elements to be extracted.\n\n1) Extracts the query from the data source.\n2) Inserts the result into any data sinks.\n3) Transforms the results into the requested type if it wasn't already.\n4) Inserts the transformed result into any data sinks.\n\nArgs:\nquery: The query being requested.\ncontext: The context for the extraction (mutable).\nstreaming: Specifies whether the results should be returned as a generator (default False).\n\nReturns:\nThe requested objects or a generator of the objects if streaming is True.", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n        \n        self.ListInstanceConfigs = channel.unary_unary(\n            \"/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs\",\n            request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstanceConfigsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstanceConfigsResponse.FromString,\n        )\n        self.GetInstanceConfig = channel.unary_unary(\n            \"/google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig\",\n            request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.GetInstanceConfigRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.InstanceConfig.FromString,\n        )\n        self.ListInstances = channel.unary_unary(\n            \"/google.spanner.admin.instance.v1.InstanceAdmin/ListInstances\",\n            request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstancesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstancesResponse.FromString,\n        )\n        self.GetInstance = channel.unary_unary(\n            \"/google.spanner.admin.instance.v1.InstanceAdmin/GetInstance\",\n            request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.GetInstanceRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.Instance.FromString,\n        )\n        self.CreateInstance = channel.unary_unary(\n            \"/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance\",\n            request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.CreateInstanceRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n        self.UpdateInstance = channel.unary_unary(\n            \"/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance\",\n            request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.UpdateInstanceRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n        self.DeleteInstance = channel.unary_unary(\n            \"/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance\",\n            request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.DeleteInstanceRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n        self.SetIamPolicy = channel.unary_unary(\n            \"/google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy\",\n            request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString,\n            response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,\n        )\n        self.GetIamPolicy = channel.unary_unary(\n            \"/google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy\",\n            request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString,\n            response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,\n        )\n        self.TestIamPermissions = channel.unary_unary(\n            \"/google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions\",\n            request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString,\n            response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def remove_from_list(self, key: str, value, count: int = 0,\n                         pipeline: bool = False):\n        \n        if pipeline:\n            if redis.__version__ == '2.10.6':\n                self._pipeline.lrem(name=key, value=value, num=count)\n            else:\n                self._pipeline.lrem(key, count, value)\n        else:\n            if self._db.exists(key):\n                if redis.__version__ == '2.10.6':\n                    self._db.lrem(name=key, value=value, num=count)\n                else:\n                    self._db.lrem(key, count, value)", "docstring": "Remove specified value(s) from the list stored at key.\n\nArgs:\nkey (str): Key where the list is stored.\nvalue: value to remove\ncount (int): Number of entries to remove, default 0 == all\npipeline(bool): If True, start a transaction block. Default False.", "source": "juraj-google-style"}
{"code": "def delete_project(self, resource):\n        \n        self.project_service.set_auth(self._token_project)\n        self.project_service.delete(resource)", "docstring": "Deletes the entity described by the given resource.\n\nArgs:\nresource (intern.resource.boss.BossResource)\n\nRaises:\nrequests.HTTPError on a failure.", "source": "juraj-google-style"}
{"code": "def mock(self, slot, rpc_id, value):\n    address = slot.address\n    if (address not in self.mock_rpcs):\n        self.mock_rpcs[address] = {}\n    self.mock_rpcs[address][rpc_id] = value", "docstring": "Store a mock return value for an RPC\n\nArgs:\nslot (SlotIdentifier): The slot we are mocking\nrpc_id (int): The rpc we are mocking\nvalue (int): The value that should be returned\nwhen the RPC is called.", "source": "codesearchnet"}
{"code": "def extract_signature(func, ignore_first=False):\n    sig_params = get_signature_params(func)\n    if ignore_first:\n        if (len(sig_params) == 0):\n            raise Exception(\"Methods must take a 'self' argument, but the method '{}' does not have one.\".format(func.__name__))\n        sig_params = sig_params[1:]\n    arg_names = []\n    arg_defaults = []\n    arg_is_positionals = []\n    keyword_names = set()\n    for (arg_name, parameter) in sig_params:\n        arg_names.append(arg_name)\n        arg_defaults.append(parameter.default)\n        arg_is_positionals.append((parameter.kind == parameter.VAR_POSITIONAL))\n        if (parameter.kind == Parameter.POSITIONAL_OR_KEYWORD):\n            keyword_names.add(arg_name)\n    return FunctionSignature(arg_names, arg_defaults, arg_is_positionals, keyword_names, func.__name__)", "docstring": "Extract the function signature from the function.\n\nArgs:\nfunc: The function whose signature should be extracted.\nignore_first: True if the first argument should be ignored. This should\nbe used when func is a method of a class.\n\nReturns:\nA function signature object, which includes the names of the keyword\narguments as well as their default values.", "source": "codesearchnet"}
{"code": "def _supervised_signature_def(method_name, inputs, loss=None, predictions=None, metrics=None):\n    if inputs is None or not inputs:\n        raise ValueError(f'{method_name} `inputs` cannot be None or empty.')\n    signature_inputs = {key: utils.build_tensor_info(tensor) for key, tensor in inputs.items()}\n    signature_outputs = {}\n    for output_set in (loss, predictions, metrics):\n        if output_set is not None:\n            sig_out = {key: utils.build_tensor_info(tensor) for key, tensor in output_set.items()}\n            signature_outputs.update(sig_out)\n    signature_def = build_signature_def(signature_inputs, signature_outputs, method_name)\n    return signature_def", "docstring": "Creates a signature for training and eval data.\n\nThis function produces signatures that describe the inputs and outputs\nof a supervised process, such as training or evaluation, that\nresults in loss, metrics, and the like. Note that this function only requires\ninputs to be not None.\n\nArgs:\nmethod_name: Method name of the SignatureDef as a string.\ninputs: dict of string to `Tensor`.\nloss: dict of string to `Tensor` representing computed loss.\npredictions: dict of string to `Tensor` representing the output predictions.\nmetrics: dict of string to `Tensor` representing metric ops.\n\nReturns:\nA train- or eval-flavored signature_def.\n\nRaises:\nValueError: If inputs or outputs is `None`.", "source": "github-repos"}
{"code": "def __init__(self, endpoint, project, token, api_base=\"api/v1\",\n                 is_skipped_an_issue=True, verify_ssl=True):\n        \n        super(ReportPortalService, self).__init__()\n        self.endpoint = endpoint\n        self.api_base = api_base\n        self.project = project\n        self.token = token\n        self.is_skipped_an_issue = is_skipped_an_issue\n        self.base_url = uri_join(self.endpoint,\n                                 self.api_base,\n                                 self.project)\n\n        self.session = requests.Session()\n        self.session.headers[\"Authorization\"] = \"bearer {0}\".format(self.token)\n        self.stack = [None]\n        self.launch_id = None\n        self.verify_ssl = verify_ssl", "docstring": "Init the service class.\n\nArgs:\nendpoint: endpoint of report portal service.\nproject: project name to use for launch names.\ntoken: authorization token.\napi_base: defaults to api/v1, can be changed to other version.\nis_skipped_an_issue: option to mark skipped tests as not\n'To Investigate' items on Server side.\nverify_ssl: option to not verify ssl certificates", "source": "juraj-google-style"}
{"code": "def record_markdown(text, cellid):\n    from acorn.logging.database import record\n    from time import time\n    ekey = 'nb-{}'.format(cellid)\n    global _cellid_map\n    if (cellid not in _cellid_map):\n        from acorn.logging.database import active_db\n        from difflib import SequenceMatcher\n        from acorn.logging.diff import cascade\n        taskdb = active_db()\n        if (ekey not in taskdb.entities):\n            possible = [k for k in taskdb.entities if (k[0:3] == 'nb-')]\n            (maxkey, maxvalue) = (None, 0.0)\n            for pkey in possible:\n                sequence = [e['c'] for e in taskdb.entities[pkey]]\n                state = ''.join(cascade(sequence))\n                matcher = SequenceMatcher(a=state, b=text)\n                ratio = matcher.quick_ratio()\n                if ((ratio > maxvalue) and (ratio > 0.5)):\n                    (maxkey, maxvalue) = (pkey, ratio)\n            if (maxkey is not None):\n                ekey = pkey\n        _cellid_map[cellid] = ekey\n    ekey = _cellid_map[cellid]\n    entry = {'m': 'md', 'a': None, 's': time(), 'r': None, 'c': text}\n    record(ekey, entry, diff=True)", "docstring": "Records the specified markdown text to the acorn database.\n\nArgs:\ntext (str): the *raw* markdown text entered into the cell in the ipython\nnotebook.", "source": "codesearchnet"}
{"code": "def validate_env(app):\n    \n\n    if not hasattr(app.env, 'javalink_config_cache'):\n        app.env.javalink_config_cache = {}\n\n    for conf_attr, (_, _, env_attr) in ref.CONFIG_VALUES.iteritems():\n        if not env_attr:\n            continue\n\n        value = getattr(app.config, conf_attr)\n        cached = app.env.javalink_config_cache.get(conf_attr, value)\n\n        app.env.javalink_config_cache[conf_attr] = value\n        if value != cached:\n            app.verbose('[javalink] config.%s has changed, clearing related env', conf_attr)\n            delattr(app.env, env_attr)", "docstring": "Purge expired values from the environment.\n\nWhen certain configuration values change, related values in the\nenvironment must be cleared. While Sphinx can rebuild documents on\nconfiguration changes, it does not notify extensions when this\nhappens. Instead, cache relevant values in the environment in order\nto detect when they change.\n\nArgs:\napp: The Sphinx application.", "source": "juraj-google-style"}
{"code": "def convert(self):\n    return super(TFLiteConverter, self).convert()", "docstring": "Converts a TensorFlow GraphDef based on instance variables.\n\nReturns:\nThe converted data in serialized format, either a TFLite Flatbuffer or\na Graphviz graph depending on value in `output_format`.\n\nRaises:\nValueError:\nInput shape is not specified.\nNone value for dimension in input_tensor.", "source": "github-repos"}
{"code": "def touch(self, key, expire=0, noreply=None):\n        \n        if noreply is None:\n            noreply = self.default_noreply\n        key = self.check_key(key)\n        cmd = b'touch ' + key + b' ' + six.text_type(expire).encode('ascii')\n        if noreply:\n            cmd += b' noreply'\n        cmd += b'\\r\\n'\n        results = self._misc_cmd([cmd], b'touch', noreply)\n        if noreply:\n            return True\n        return results[0] == b'TOUCHED'", "docstring": "The memcached \"touch\" command.\n\nArgs:\nkey: str, see class docs for details.\nexpire: optional int, number of seconds until the item is expired\nfrom the cache, or zero for no expiry (the default).\nnoreply: optional bool, True to not wait for the reply (defaults to\nself.default_noreply).\n\nReturns:\nTrue if the expiration time was updated, False if the key wasn't\nfound.", "source": "juraj-google-style"}
{"code": "def _is_quantized_function(self, func: function_pb2.FunctionDef) -> bool:\n    return func.signature.name.startswith('quantized_')", "docstring": "Determine whether a FunctionDef is quantized.\n\nArgs:\nfunc: A FunctionDef object.\n\nReturns:\nTrue iff `func` is quantized.", "source": "github-repos"}
{"code": "def FoldByteStream(self, mapped_value, context=None, **unused_kwargs):\n    \n    elements_data_size = self._CalculateElementsDataSize(context)\n    if elements_data_size is not None:\n      if elements_data_size != len(mapped_value):\n        raise errors.FoldingError(\n            'Mismatch between elements data size and mapped value size')\n\n    elif not self._HasElementsTerminator():\n      raise errors.FoldingError('Unable to determine elements data size')\n\n    else:\n      elements_terminator = self._data_type_definition.elements_terminator\n      elements_terminator_size = len(elements_terminator)\n      if mapped_value[-elements_terminator_size:] != elements_terminator:\n        mapped_value = b''.join([mapped_value, elements_terminator])\n\n    return mapped_value", "docstring": "Folds the data type into a byte stream.\n\nArgs:\nmapped_value (object): mapped value.\ncontext (Optional[DataTypeMapContext]): data type map context.\n\nReturns:\nbytes: byte stream.\n\nRaises:\nFoldingError: if the data type definition cannot be folded into\nthe byte stream.", "source": "juraj-google-style"}
{"code": "def _GetStringValue(self, data_dict, name, default_value=None):\n    values = data_dict.get(name, None)\n    if (not values):\n        return default_value\n    for (index, value) in enumerate(values):\n        if (',' in value):\n            values[index] = '\"{0:s}\"'.format(value)\n    return ', '.join(values)", "docstring": "Retrieves a specific string value from the data dict.\n\nArgs:\ndata_dict (dict[str, list[str]): values per name.\nname (str): name of the value to retrieve.\ndefault_value (Optional[object]): value to return if the name has no value\nset in data_dict.\n\nReturns:\nstr: value represented as a string.", "source": "codesearchnet"}
{"code": "def _insert_back_keep_dims(x, axis):\n  \n  for i in sorted(axis):\n    x = tf.expand_dims(x, axis=i)\n  return x", "docstring": "Insert the dims in `axis` back as singletons after being removed.\n\nArgs:\nx:  `Tensor`.\naxis:  Python list of integers.\n\nReturns:\n`Tensor` with same values as `x`, but additional singleton dimensions.", "source": "juraj-google-style"}
{"code": "def read_uint16(self, little_endian=True):\n    if little_endian:\n        endian = '<'\n    else:\n        endian = '>'\n    return self.unpack(('%sH' % endian), 2)", "docstring": "Read 2 byte as an unsigned integer value from the stream.\n\nArgs:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint:", "source": "codesearchnet"}
{"code": "def start_time_distance(item_a, item_b, max_value):\n    start_time_diff = np.abs((item_a.times[0] - item_b.times[0]))\n    return (np.minimum(start_time_diff, max_value) / float(max_value))", "docstring": "Absolute difference between the starting times of each item.\n\nArgs:\nitem_a: STObject from the first set in TrackMatcher\nitem_b: STObject from the second set in TrackMatcher\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "codesearchnet"}
{"code": "def __parse(self, function_meta):\n    self._func = get_mapping_function(function_meta['func_name'], self.functions_mapping)\n    self.func_name = self._func.__name__\n    self._args = prepare_lazy_data(function_meta.get('args', []), self.functions_mapping, self.check_variables_set)\n    self._kwargs = prepare_lazy_data(function_meta.get('kwargs', {}), self.functions_mapping, self.check_variables_set)\n    if (self.func_name == 'load_csv_file'):\n        if ((len(self._args) != 1) or self._kwargs):\n            raise exceptions.ParamsError('P() should only pass in one argument!')\n        self._args = [self._args[0]]\n    elif (self.func_name == 'get_os_environ'):\n        if ((len(self._args) != 1) or self._kwargs):\n            raise exceptions.ParamsError('ENV() should only pass in one argument!')\n        self._args = [self._args[0]]", "docstring": "init func as lazy functon instance\n\nArgs:\nfunction_meta (dict): function meta including name, args and kwargs", "source": "codesearchnet"}
{"code": "def ParseCall(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    guid = self._GetRowValue(query_hash, row, 'guid')\n    is_incoming = self._GetRowValue(query_hash, row, 'is_incoming')\n    videostatus = self._GetRowValue(query_hash, row, 'videostatus')\n\n    try:\n      aux = guid\n      if aux:\n        aux_list = aux.split('-')\n        src_aux = aux_list[0]\n        dst_aux = aux_list[1]\n      else:\n        src_aux = 'Unknown [no GUID]'\n        dst_aux = 'Unknown [no GUID]'\n    except IndexError:\n      src_aux = 'Unknown [{0:s}]'.format(guid)\n      dst_aux = 'Unknown [{0:s}]'.format(guid)\n\n    if is_incoming == '0':\n      user_start_call = True\n      source = src_aux\n\n      ip_address = self._GetRowValue(query_hash, row, 'ip_address')\n      if ip_address:\n        destination = '{0:s} <{1:s}>'.format(dst_aux, ip_address)\n      else:\n        destination = dst_aux\n    else:\n      user_start_call = False\n      source = src_aux\n      destination = dst_aux\n\n    call_identifier = self._GetRowValue(query_hash, row, 'id')\n\n    event_data = SkypeCallEventData()\n    event_data.dst_call = destination\n    event_data.offset = call_identifier\n    event_data.query = query\n    event_data.src_call = source\n    event_data.user_start_call = user_start_call\n    event_data.video_conference = videostatus == '3'\n\n    timestamp = self._GetRowValue(query_hash, row, 'try_call')\n    event_data.call_type = 'WAITING'\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype')\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    try:\n      timestamp = self._GetRowValue(query_hash, row, 'accept_call')\n      timestamp = int(timestamp)\n    except (ValueError, TypeError):\n      timestamp = None\n\n    if timestamp:\n      event_data.call_type = 'ACCEPTED'\n      date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype')\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      try:\n        call_duration = self._GetRowValue(query_hash, row, 'call_duration')\n        call_duration = int(call_duration)\n      except (ValueError, TypeError):\n        parser_mediator.ProduceExtractionWarning(\n            'unable to determine when call: {0:s} was finished.'.format(\n                call_identifier))\n        call_duration = None\n\n      if call_duration:\n        timestamp += call_duration\n        event_data.call_type = 'FINISHED'\n        date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype')\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a call.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from query.\nquery (Optional[str]): query.", "source": "juraj-google-style"}
{"code": "def order_by(self, *args):\n        \n        if self._solr_locked:\n            raise Exception(\"Query already executed, no changes can be made.\"\n                            \"%s %s\" % (self._solr_query, self._solr_params)\n                            )\n\n        for arg in args:\n            if arg.startswith('-'):\n                self._solr_params['sort'][arg[1:]] = 'desc'\n            else:\n                self._solr_params['sort'][arg] = 'asc'", "docstring": "Applies query ordering.\n\nNew parameters are appended to current ones, overwriting existing ones.\n\nArgs:\n**args: Order by fields names.\nDefaults to ascending, prepend with hypen (-) for desecending ordering.", "source": "juraj-google-style"}
{"code": "def write_label_list(path, label_list):\n    \n    entries = []\n    for label in label_list:\n        entries.append([label.start, label.end, label.value])\n\n    textfile.write_separated_lines(path, entries, separator='\\t')", "docstring": "Writes the given `label_list` to an audacity label file.\n\nArgs:\npath (str): Path to write the file to.\nlabel_list (audiomate.annotations.LabelList): Label list", "source": "juraj-google-style"}
{"code": "def get_info(\n        self,\n        userSpecifier,\n        **kwargs\n    ):\n        \n\n        request = Request(\n            'GET',\n            '/v3/users/{userSpecifier}'\n        )\n\n        request.set_path_param(\n            'userSpecifier',\n            userSpecifier\n        )\n\n        response = self.ctx.request(request)\n\n\n        if response.content_type is None:\n            return response\n\n        if not response.content_type.startswith(\"application/json\"):\n            return response\n\n        jbody = json.loads(response.raw_body)\n\n        parsed_body = {}\n\n        \n        \n        \n        if str(response.status) == \"200\":\n            if jbody.get('userInfo') is not None:\n                parsed_body['userInfo'] = \\\n                    self.ctx.user.UserInfo.from_dict(\n                        jbody['userInfo'],\n                        self.ctx\n                    )\n\n        elif str(response.status) == \"401\":\n            if jbody.get('errorCode') is not None:\n                parsed_body['errorCode'] = \\\n                    jbody.get('errorCode')\n\n            if jbody.get('errorMessage') is not None:\n                parsed_body['errorMessage'] = \\\n                    jbody.get('errorMessage')\n\n        elif str(response.status) == \"403\":\n            if jbody.get('errorCode') is not None:\n                parsed_body['errorCode'] = \\\n                    jbody.get('errorCode')\n\n            if jbody.get('errorMessage') is not None:\n                parsed_body['errorMessage'] = \\\n                    jbody.get('errorMessage')\n\n        elif str(response.status) == \"405\":\n            if jbody.get('errorCode') is not None:\n                parsed_body['errorCode'] = \\\n                    jbody.get('errorCode')\n\n            if jbody.get('errorMessage') is not None:\n                parsed_body['errorMessage'] = \\\n                    jbody.get('errorMessage')\n\n        \n        \n        \n        else:\n            parsed_body = jbody\n\n        response.body = parsed_body\n\n        return response", "docstring": "Fetch the user information for the specified user. This endpoint is\nintended to be used by the user themself to obtain their own\ninformation.\n\nArgs:\nuserSpecifier:\nThe User Specifier\n\nReturns:\nv20.response.Response containing the results from submitting the\nrequest", "source": "juraj-google-style"}
{"code": "def _GetPathSegmentIndexForOccurrenceWeights(\n      self, occurrence_weights, value_weights):\n    \n    largest_weight = occurrence_weights.GetLargestWeight()\n\n    if largest_weight > 0:\n      occurrence_weight_indexes = occurrence_weights.GetIndexesForWeight(\n          largest_weight)\n      number_of_occurrence_indexes = len(occurrence_weight_indexes)\n    else:\n      number_of_occurrence_indexes = 0\n\n    path_segment_index = None\n    if number_of_occurrence_indexes == 0:\n      path_segment_index = self._GetPathSegmentIndexForValueWeights(\n          value_weights)\n\n    elif number_of_occurrence_indexes == 1:\n      path_segment_index = occurrence_weight_indexes[0]\n\n    else:\n      largest_weight = 0\n\n      for occurrence_index in occurrence_weight_indexes:\n        value_weight = value_weights.GetWeightForIndex(occurrence_index)\n\n        if not path_segment_index or largest_weight < value_weight:\n          largest_weight = value_weight\n          path_segment_index = occurrence_index\n\n    return path_segment_index", "docstring": "Retrieves the index of the path segment based on occurrence weights.\n\nArgs:\noccurrence_weights: the occurrence weights object (instance of\n_PathSegmentWeights).\nvalue_weights: the value weights object (instance of _PathSegmentWeights).\n\nReturns:\nAn integer containing the path segment index.", "source": "juraj-google-style"}
{"code": "def get_nested_streams(dmap):\n    \n    return list({s for dmap in get_nested_dmaps(dmap) for s in dmap.streams})", "docstring": "Recurses supplied DynamicMap to find all streams\n\nArgs:\ndmap: DynamicMap to recurse to look for streams\n\nReturns:\nList of streams that were found", "source": "juraj-google-style"}
{"code": "def __init__(self, cache_folder, genome_build):\n        \n        self.api_version = ('1')\n        self.genome_build = genome_build\n        self.today = datetime.today()\n        \n        if not os.path.exists(cache_folder):\n            os.mkdir(cache_folder)\n        \n        \n        path = os.path.join(cache_folder, \"ensembl_cache.db\")\n        if not os.path.exists(path):\n            try:\n                with sqlite3.connect(path) as conn:\n                    with conn as cursor:\n                        cursor.execute(\"CREATE TABLE ensembl \" \\\n                            \"(key text PRIMARY KEY, genome_build text, \" \\\n                            \"cache_date text, api_version text, data blob)\")\n            except sqlite3.OperationalError:\n                time.sleep(random.uniform(1, 5))\n        \n        self.conn = sqlite3.connect(path)\n        self.conn.row_factory = sqlite3.Row", "docstring": "initialise the class with the local cache folder\n\nArgs:\ncache_folder: path to the cache", "source": "juraj-google-style"}
{"code": "def get_historical_data(nmr_problems):\n    observations = np.tile(np.array([[10, 256, 202, 97]]), (nmr_problems, 1))\n    nmr_tanks_ground_truth = (np.ones((nmr_problems,)) * 276)\n    return (observations, nmr_tanks_ground_truth)", "docstring": "Get the historical tank data.\n\nArgs:\nnmr_problems (int): the number of problems\n\nReturns:\ntuple: (observations, nmr_tanks_ground_truth)", "source": "codesearchnet"}
{"code": "def force_rerun(flag, outfile):\n    if flag:\n        return True\n    elif ((not flag) and (not op.exists(outfile))):\n        return True\n    elif ((not flag) and (not is_non_zero_file(outfile))):\n        return True\n    else:\n        return False", "docstring": "Check if we should force rerunning of a command if an output file exists.\n\nArgs:\nflag (bool): Flag to force rerun.\noutfile (str): Path to output file which may already exist.\n\nReturns:\nbool: If we should force rerunning of a command\n\nExamples:\n>>> force_rerun(flag=True, outfile='/not/existing/file.txt')\nTrue\n\n>>> force_rerun(flag=False, outfile='/not/existing/file.txt')\nTrue\n\n>>> force_rerun(flag=True, outfile='./utils.py')\nTrue\n\n>>> force_rerun(flag=False, outfile='./utils.py')\nFalse", "source": "codesearchnet"}
{"code": "def quantile_for_single_value(self, **kwargs):\n    if self._is_transposed:\n        kwargs['axis'] = (kwargs.get('axis', 0) ^ 1)\n        return self.transpose().quantile_for_single_value(**kwargs)\n    axis = kwargs.get('axis', 0)\n    q = kwargs.get('q', 0.5)\n    assert (type(q) is float)\n\n    def quantile_builder(df, **kwargs):\n        try:\n            return pandas.DataFrame.quantile(df, **kwargs)\n        except ValueError:\n            return pandas.Series()\n    func = self._build_mapreduce_func(quantile_builder, **kwargs)\n    result = self._full_axis_reduce(axis, func)\n    if (axis == 0):\n        result.index = [q]\n    else:\n        result.columns = [q]\n    return result", "docstring": "Returns quantile of each column or row.\n\nReturns:\nA new QueryCompiler object containing the quantile of each column or row.", "source": "codesearchnet"}
{"code": "def guaranteed_no_diff(modular_file_path, dependencies, models_in_diff):\n    model_name = modular_file_path.rsplit('modular_', 1)[1].replace('.py', '')\n    if model_name in models_in_diff:\n        return False\n    for dep in dependencies[modular_file_path]:\n        dependency_model_name = dep.split('.')[-2]\n        if dependency_model_name in models_in_diff:\n            return False\n    return True", "docstring": "Returns whether it is guaranteed to have no differences between the modular file and the modeling file.\n\nModel is in the diff -> not guaranteed to have no differences\nDependency is in the diff -> not guaranteed to have no differences\nOtherwise -> guaranteed to have no differences\n\nArgs:\nmodular_file_path: The path to the modular file.\ndependencies: A dictionary containing the dependencies of each modular file.\nmodels_in_diff: A set containing the names of the models that have been modified.\n\nReturns:\nA boolean indicating whether the model (code and tests) is guaranteed to have no differences.", "source": "github-repos"}
{"code": "def set_disk_usage(self, total_size, path=None):\n        \n        if path is None:\n            path = self.root.name\n        mount_point = self._mount_point_for_path(path)\n        if (mount_point['total_size'] is not None and\n                mount_point['used_size'] > total_size):\n            self.raise_io_error(errno.ENOSPC, path)\n        mount_point['total_size'] = total_size", "docstring": "Changes the total size of the file system, preserving the used space.\nExample usage: set the size of an auto-mounted Windows drive.\n\nArgs:\ntotal_size: The new total size of the filesystem in bytes.\n\npath: The disk space is changed for the file system device where\n`path` resides.\nDefaults to the root path (e.g. '/' on Unix systems).\n\nRaises:\nIOError: if the new space is smaller than the used size.", "source": "juraj-google-style"}
{"code": "def movies_box_office(self, **kwargs):\n    path = self._get_path('movies_box_office')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Gets the top box office earning movies from the API.\nSorted by most recent weekend gross ticket sales.\n\nArgs:\nlimit (optional): limits the number of movies returned, default=10\ncountry (optional): localized data for selected country, default=\"us\"\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def get_data(self, url, *args, **kwargs):\n        \n        res = self._conn.get(url, headers=self._prepare_headers(**kwargs))\n        if res.status_code == 200:\n            return res.text\n        else:\n            return None", "docstring": "Gets data from url as text\n\nReturns content under the provided url as text\n\nArgs:\n**url**: address of the wanted data\n\n.. versionadded:: 0.3.2\n**additional_headers**: (optional) Additional headers\nto be used with request\n\nReturns:\nstring", "source": "juraj-google-style"}
{"code": "def base_multinode_parser():\n    base_parser = ArgumentParser(add_help=False)\n    base_parser.add_argument('urls', type=str, nargs='+', help=\"The URLs of the validator's REST APIs of interest, separated by commas or spaces. (no default)\")\n    base_parser.add_argument('--users', type=str, action='append', metavar='USERNAME[:PASSWORD]', help='Specify the users to authorize requests, in the same order as the URLs, separate by commas. Passing empty strings between commas is supported.')\n    return base_parser", "docstring": "Creates a parser with arguments specific to sending HTTP requests\nto multiple REST APIs.\n\nReturns:\n{ArgumentParser}: Base parser with default HTTP args", "source": "codesearchnet"}
{"code": "def _get_flags(osm_obj):\n    \n    flags = []\n    if osm_obj.visible:\n        flags.append('visible')\n    if osm_obj.user:\n        flags.append('user: %s' % osm_obj.user)\n    if osm_obj.timestamp:\n        flags.append('timestamp: %s' % osm_obj.timestamp.isoformat())\n    if osm_obj.tags:\n        flags.append(', '.join('%s: %s' % (k, v)\n                               for k, v in sorted(osm_obj.tags.items())))\n    return flags", "docstring": "Create element independent flags output.\n\nArgs:\nosm_obj (Node): Object with OSM-style metadata\n\nReturns:\nlist: Human readable flags output", "source": "juraj-google-style"}
{"code": "def _ConstructAndTestGradient(self, pool_func, input_sizes, output_sizes, window_rows, window_cols, row_stride, col_stride, padding, data_format, use_gpu, x_init_value=None):\n    assert input_sizes[0] == output_sizes[0]\n    assert input_sizes[3] == output_sizes[3]\n    total_size = 1\n    for s in input_sizes:\n        total_size *= s\n    x = [f * 1.0 for f in range(1, total_size + 1)]\n    with self.cached_session(use_gpu=use_gpu):\n        input_tensor = constant_op.constant(x, shape=input_sizes, name='input')\n        if pool_func == nn_ops.avg_pool:\n            func_name = 'avg_pool'\n            err_tolerance = 0.0001\n        else:\n            if x_init_value is None:\n                x_init_value = np.asarray(np.arange(1, total_size + 1), dtype=np.float32).reshape(input_sizes)\n            func_name = 'max_pool'\n            err_tolerance = 0.001\n        if data_format == 'NCHW':\n            ksize = [1, 1, window_rows, window_cols]\n            strides = [1, 1, row_stride, col_stride]\n            if isinstance(padding, list):\n                padding = test_util.NHWCToNCHW(padding)\n            t = test_util.NHWCToNCHW(input_tensor)\n        else:\n            ksize = [1, window_rows, window_cols, 1]\n            strides = [1, row_stride, col_stride, 1]\n            t = input_tensor\n        t = pool_func(t, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=func_name)\n        if data_format == 'NCHW':\n            t = test_util.NCHWToNHWC(t)\n        err = gradient_checker.compute_gradient_error(input_tensor, input_sizes, t, output_sizes, x_init_value=x_init_value, delta=0.01)\n    tf_logging.info('%s gradient error = %.4f' % (func_name, err))\n    self.assertLess(err, err_tolerance)", "docstring": "Verifies the gradients of the max or avg pooling function.\n\nArgs:\npool_func: Function to be called, co.MaxPool, co.AvgPool,\nor the Lua version.\ninput_sizes: Input tensor dimensions.\noutput_sizes: Output tensor dimensions.\nwindow_rows: kernel size in row dim\nwindow_cols: kernel size in col dim\nrow_stride: Row Stride.\ncol_stride: Col Stride.\npadding: Padding type.\ndata_format: Data format.\nuse_gpu: whether we are running on GPU\nx_init_value: Values to be passed to the gradient checker.", "source": "github-repos"}
{"code": "def Reference(uri, meaning=None):\n    \n    attrib = {'uri': uri}\n    if meaning is not None:\n        attrib['meaning'] = meaning\n    return objectify.Element('Reference', attrib)", "docstring": "Represents external information, typically original obs data and metadata.\n\nArgs:\nuri(str): Uniform resource identifier for external data, e.g. FITS file.\nmeaning(str): The nature of the document referenced, e.g. what\ninstrument and filter was used to create the data?", "source": "juraj-google-style"}
{"code": "def _parse_schema_resource(info):\n    if ('fields' not in info):\n        return ()\n    schema = []\n    for r_field in info['fields']:\n        name = r_field['name']\n        field_type = r_field['type']\n        mode = r_field.get('mode', 'NULLABLE')\n        description = r_field.get('description')\n        sub_fields = _parse_schema_resource(r_field)\n        schema.append(SchemaField(name, field_type, mode, description, sub_fields))\n    return schema", "docstring": "Parse a resource fragment into a schema field.\n\nArgs:\ninfo: (Mapping[str->dict]): should contain a \"fields\" key to be parsed\n\nReturns:\n(Union[Sequence[:class:`google.cloud.bigquery.schema.SchemaField`],None])\na list of parsed fields, or ``None`` if no \"fields\" key found.", "source": "codesearchnet"}
{"code": "def SetValue(self, value, raise_on_error=True):\n    \n    type_mappings = [(Text, \"string\"), (bytes, \"data\"), (bool, \"boolean\"),\n                     (int, \"integer\"), (long, \"integer\"), (dict, \"dict\"),\n                     (float, \"float\")]\n\n    if value is None:\n      self.none = \"None\"\n\n    elif isinstance(value, rdfvalue.RDFValue):\n      self.rdf_value.data = value.SerializeToString()\n      self.rdf_value.age = int(value.age)\n      self.rdf_value.name = value.__class__.__name__\n\n    elif isinstance(value, (list, tuple)):\n      self.list.content.Extend([\n          DataBlob().SetValue(v, raise_on_error=raise_on_error) for v in value\n      ])\n\n    elif isinstance(value, set):\n      self.set.content.Extend([\n          DataBlob().SetValue(v, raise_on_error=raise_on_error) for v in value\n      ])\n\n    elif isinstance(value, dict):\n      self.dict.FromDict(value, raise_on_error=raise_on_error)\n\n    else:\n      for type_mapping, member in type_mappings:\n        if isinstance(value, type_mapping):\n          setattr(self, member, value)\n\n          return self\n\n      message = \"Unsupported type for ProtoDict: %s\" % type(value)\n      if raise_on_error:\n        raise TypeError(message)\n\n      setattr(self, \"string\", message)\n\n    return self", "docstring": "Receives a value and fills it into a DataBlob.\n\nArgs:\nvalue: value to set\nraise_on_error: if True, raise if we can't serialize.  If False, set the\nkey to an error string.\n\nReturns:\nself\nRaises:\nTypeError: if the value can't be serialized and raise_on_error is True", "source": "juraj-google-style"}
{"code": "def remove_profile(self, profile=None):\n        \n        with self.db:\n            return self.db.remove(self.query.profile == profile)", "docstring": "Remove profile from credentials file.\n\nArgs:\nprofile (str): Credentials profile to remove.\n\nReturns:\nlist: List of affected document IDs.", "source": "juraj-google-style"}
{"code": "def protect(self, developers_can_push=False, developers_can_merge=False, **kwargs):\n    id = self.get_id().replace('/', '%2F')\n    path = ('%s/%s/protect' % (self.manager.path, id))\n    post_data = {'developers_can_push': developers_can_push, 'developers_can_merge': developers_can_merge}\n    self.manager.gitlab.http_put(path, post_data=post_data, **kwargs)\n    self._attrs['protected'] = True", "docstring": "Protect the branch.\n\nArgs:\ndevelopers_can_push (bool): Set to True if developers are allowed\nto push to the branch\ndevelopers_can_merge (bool): Set to True if developers are allowed\nto merge to the branch\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabProtectError: If the branch could not be protected", "source": "codesearchnet"}
{"code": "def get_paths(self):\n    paths = []\n    for (key, child) in six.iteritems(self):\n        if (isinstance(child, TreeMap) and child):\n            for path in child.get_paths():\n                path.insert(0, key)\n                paths.append(path)\n        else:\n            paths.append([key])\n    return paths", "docstring": "Get all paths from the root to the leaves.\n\nFor example, given a chain like `{'a':{'b':{'c':None}}}`,\nthis method would return `[['a', 'b', 'c']]`.\n\nReturns:\nA list of lists of paths.", "source": "codesearchnet"}
{"code": "def load_dot_env_file(dot_env_path):\n    if (not os.path.isfile(dot_env_path)):\n        return {}\n    logger.log_info('Loading environment variables from {}'.format(dot_env_path))\n    env_variables_mapping = {}\n    with io.open(dot_env_path, 'r', encoding='utf-8') as fp:\n        for line in fp:\n            if ('=' in line):\n                (variable, value) = line.split('=', 1)\n            elif (':' in line):\n                (variable, value) = line.split(':', 1)\n            else:\n                raise exceptions.FileFormatError('.env format error')\n            env_variables_mapping[variable.strip()] = value.strip()\n    utils.set_os_environ(env_variables_mapping)\n    return env_variables_mapping", "docstring": "load .env file.\n\nArgs:\ndot_env_path (str): .env file path\n\nReturns:\ndict: environment variables mapping\n\n{\n\"UserName\": \"debugtalk\",\n\"Password\": \"123456\",\n\"PROJECT_KEY\": \"ABCDEFGH\"\n}\n\nRaises:\nexceptions.FileFormatError: If .env file format is invalid.", "source": "codesearchnet"}
{"code": "def get_meshes_vec(step, var):\n    \n    if step.geom.twod_xz:\n        xmesh, ymesh = step.geom.x_mesh[:, 0, :], step.geom.z_mesh[:, 0, :]\n        vec1 = step.fields[var + '1'][:, 0, :, 0]\n        vec2 = step.fields[var + '3'][:, 0, :, 0]\n    elif step.geom.cartesian and step.geom.twod_yz:\n        xmesh, ymesh = step.geom.y_mesh[0, :, :], step.geom.z_mesh[0, :, :]\n        vec1 = step.fields[var + '2'][0, :, :, 0]\n        vec2 = step.fields[var + '3'][0, :, :, 0]\n    else:  \n        xmesh, ymesh = step.geom.x_mesh[0, :, :], step.geom.y_mesh[0, :, :]\n        pmesh = step.geom.p_mesh[0, :, :]\n        vec_phi = step.fields[var + '2'][0, :, :, 0]\n        vec_r = step.fields[var + '3'][0, :, :, 0]\n        vec1 = vec_r * np.cos(pmesh) - vec_phi * np.sin(pmesh)\n        vec2 = vec_phi * np.cos(pmesh) + vec_r * np.sin(pmesh)\n    return xmesh, ymesh, vec1, vec2", "docstring": "Return vector field components along with coordinates meshes.\n\nOnly works properly in 2D geometry.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nvar (str): vector field name.\nReturns:\ntuple of :class:`numpy.array`: xmesh, ymesh, fldx, fldy\n2D arrays containing respectively the x position, y position, x\ncomponent and y component of the requested vector field.", "source": "juraj-google-style"}
{"code": "def option(self, key, value=None, **kwargs):\n        \n        if not isinstance(self._container, Section):\n            raise ValueError(\"Options can only be added inside a section!\")\n        option = Option(key, value, container=self._container, **kwargs)\n        option.value = value\n        self._container.structure.insert(self._idx, option)\n        self._idx += 1\n        return self", "docstring": "Creates a new option inside a section\n\nArgs:\nkey (str): key of the option\nvalue (str or None): value of the option\n**kwargs: are passed to the constructor of :class:`Option`\n\nReturns:\nself for chaining", "source": "juraj-google-style"}
{"code": "def truncated_normal_log_likelihood(params, low, high, data):\n    mu = params[0]\n    sigma = params[1]\n    if (sigma == 0):\n        return np.inf\n    ll = np.sum(norm.logpdf(data, mu, sigma))\n    ll -= (len(data) * np.log((norm.cdf(high, mu, sigma) - norm.cdf(low, mu, sigma))))\n    return (- ll)", "docstring": "Calculate the log likelihood of the truncated normal distribution.\n\nArgs:\nparams: tuple with (mean, std), the parameters under which we evaluate the model\nlow (float): the lower truncation bound\nhigh (float): the upper truncation bound\ndata (ndarray): the one dimension list of data points for which we want to calculate the likelihood\n\nReturns:\nfloat: the negative log likelihood of observing the given data under the given parameters.\nThis is meant to be used in minimization routines.", "source": "codesearchnet"}
{"code": "def __init__(self, maxsize, out_deque=None, **kw):\n        \n        super(DequeOutLRUCache, self).__init__(maxsize, **kw)\n        if out_deque is None:\n            out_deque = collections.deque()\n        elif not isinstance(out_deque, collections.deque):\n            raise ValueError(u'out_deque should be collections.deque')\n        self._out_deque = out_deque\n        self._tracking = {}", "docstring": "Constructor.\n\nArgs:\nmaxsize (int): the maximum number of entries in the queue\nout_deque :class:`collections.deque`: a `deque` in which to add items\nthat expire from the cache\n**kw: the other keyword args supported by constructor to\n:class:`cachetools.LRUCache`\n\nRaises:\nValueError: if out_deque is not a collections.deque", "source": "juraj-google-style"}
{"code": "def alternative_titles(self, **kwargs):\n        \n        path = self._get_id_path('alternative_titles')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the alternative titles for a specific movie id.\n\nArgs:\ncountry: (optional) ISO 3166-1 code.\nappend_to_response: (optional) Comma separated, any movie method.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def get_gene_info(ensembl_ids=None, hgnc_symbols=None):\n    \n    uniq_ensembl_ids = set(ensembl_id for ensembl_id in (ensembl_ids or []))\n    uniq_hgnc_symbols = set(hgnc_symbol for hgnc_symbol in (hgnc_symbols or []))\n    genes = []\n    gene_data = []\n    \n    if uniq_ensembl_ids:\n        for ensembl_id in uniq_ensembl_ids:\n            for res in query_gene(ensembl_id=ensembl_id):\n                gene_data.append(res) \n\n    elif uniq_hgnc_symbols:\n        for hgnc_symbol in uniq_hgnc_symbols:\n            query_res = query_gene(hgnc_symbol=hgnc_symbol)\n            if query_res:\n                for res in query_res:\n                    gene_data.append(res)\n            else:\n                \n                gene_data.append({\n                    'hgnc_symbol': hgnc_symbol,\n                    'hgnc_id': None,\n                    'ensembl_id': None,\n                    'description': None,\n                    'chrom': 'unknown',\n                    'start': 0,\n                    'stop': 0,\n                    'hi_score': None,\n                    'constraint_score': None,\n                })\n    for gene in gene_data:\n        genes.append(Gene(\n            symbol=gene ['hgnc_symbol'],\n            hgnc_id=gene['hgnc_id'],\n            ensembl_id=gene['ensembl_id'],\n            description=gene['description'],\n            chrom=gene['chrom'],\n            start=gene['start'],\n            stop=gene['stop'],\n            location=get_cytoband_coord(gene['chrom'], gene['start']),\n            hi_score=gene['hi_score'],\n            constraint_score=gene['constraint_score'],\n            omim_number=get_omim_number(gene['hgnc_symbol'])\n            ))\n\n    return genes", "docstring": "Return the genes info based on the transcripts found\n\nArgs:\nensembl_ids (Optional[list]): list of Ensembl gene ids\nhgnc_symbols (Optional[list]): list of HGNC gene symbols\n\nReturns:\niterable: an iterable with `Gene` objects", "source": "juraj-google-style"}
{"code": "def _WriteRow(self, output_writer, values):\n    maximum_row_width = ((self._MAXIMUM_WIDTH - self._column_width) - 3)\n    primary_format_string = '{{0:>{0:d}s}} : {{1:s}}\\n'.format(self._column_width)\n    secondary_format_string = '{{0:<{0:d}s}}{{1:s}}\\n'.format((self._column_width + 3))\n    if isinstance(values[1], py2to3.STRING_TYPES):\n        value_string = values[1]\n    else:\n        value_string = '{0!s}'.format(values[1])\n    if (len(value_string) < maximum_row_width):\n        output_writer.Write(primary_format_string.format(values[0], value_string))\n        return\n    words = value_string.split()\n    current = 0\n    lines = []\n    word_buffer = []\n    for word in words:\n        current += (len(word) + 1)\n        if (current >= maximum_row_width):\n            current = len(word)\n            lines.append(' '.join(word_buffer))\n            word_buffer = [word]\n        else:\n            word_buffer.append(word)\n    lines.append(' '.join(word_buffer))\n    output_writer.Write(primary_format_string.format(values[0], lines[0]))\n    for line in lines[1:]:\n        output_writer.Write(secondary_format_string.format('', line))", "docstring": "Writes a row of values aligned to the column width.\n\nArgs:\noutput_writer (OutputWriter): output writer.\nvalues (list[object]): values.", "source": "codesearchnet"}
{"code": "def DeregisterMountPoint(cls, mount_point):\n    if (mount_point not in cls._mount_points):\n        raise KeyError('Mount point: {0:s} not set.'.format(mount_point))\n    del cls._mount_points[mount_point]", "docstring": "Deregisters a path specification mount point.\n\nArgs:\nmount_point (str): mount point identifier.\n\nRaises:\nKeyError: if the corresponding mount point is not set.", "source": "codesearchnet"}
{"code": "def run_multiple_processes(args_list: List[List[str]], die_on_failure: bool=True) -> None:\n    for procargs in args_list:\n        start_process(procargs)\n    wait_for_processes(die_on_failure=die_on_failure)", "docstring": "Fire up multiple processes, and wait for them to finihs.\n\nArgs:\nargs_list: command arguments for each process\ndie_on_failure: see :func:`wait_for_processes`", "source": "codesearchnet"}
{"code": "def ask_when_work_is_populated(self, work):\n    work.read_all_from_datastore()\n    if work.work:\n        print('Work is already written to datastore.\\nIf you continue these data will be overwritten and possible corrupted.')\n        inp = input_str('Do you want to continue? (type \"yes\" without quotes to confirm): ')\n        return (inp == 'yes')\n    else:\n        return True", "docstring": "When work is already populated asks whether we should continue.\n\nThis method prints warning message that work is populated and asks\nwhether user wants to continue or not.\n\nArgs:\nwork: instance of WorkPiecesBase\n\nReturns:\nTrue if we should continue and populate datastore, False if we should stop", "source": "codesearchnet"}
{"code": "def stats(path, hash_type='sha256', follow_symlinks=True):\n    if (not os.path.exists(path)):\n        raise CommandExecutionError('Path not found: {0}'.format(path))\n    if (follow_symlinks and (sys.getwindowsversion().major >= 6)):\n        path = _resolve_symlink(path)\n    pstat = os.stat(path)\n    ret = {}\n    ret['inode'] = pstat.st_ino\n    ret['uid'] = get_uid(path, follow_symlinks=False)\n    ret['gid'] = ret['uid']\n    ret['user'] = uid_to_user(ret['uid'])\n    ret['group'] = ret['user']\n    ret['pgid'] = get_pgid(path, follow_symlinks)\n    ret['pgroup'] = gid_to_group(ret['pgid'])\n    ret['atime'] = pstat.st_atime\n    ret['mtime'] = pstat.st_mtime\n    ret['ctime'] = pstat.st_ctime\n    ret['size'] = pstat.st_size\n    ret['mode'] = six.text_type(oct(stat.S_IMODE(pstat.st_mode)))\n    if hash_type:\n        ret['sum'] = get_sum(path, hash_type)\n    ret['type'] = 'file'\n    if stat.S_ISDIR(pstat.st_mode):\n        ret['type'] = 'dir'\n    if stat.S_ISCHR(pstat.st_mode):\n        ret['type'] = 'char'\n    if stat.S_ISBLK(pstat.st_mode):\n        ret['type'] = 'block'\n    if stat.S_ISREG(pstat.st_mode):\n        ret['type'] = 'file'\n    if stat.S_ISLNK(pstat.st_mode):\n        ret['type'] = 'link'\n    if stat.S_ISFIFO(pstat.st_mode):\n        ret['type'] = 'pipe'\n    if stat.S_ISSOCK(pstat.st_mode):\n        ret['type'] = 'socket'\n    ret['target'] = os.path.realpath(path)\n    return ret", "docstring": "Return a dict containing the stats about a given file\n\nUnder Windows, `gid` will equal `uid` and `group` will equal `user`.\n\nWhile a file in Windows does have a 'primary group', this rarely used\nattribute generally has no bearing on permissions unless intentionally\nconfigured and is only used to support Unix compatibility features (e.g.\nServices For Unix, NFS services).\n\nSalt, therefore, remaps these properties to keep some kind of\ncompatibility with Unix behavior. If the 'primary group' is required, it\ncan be accessed in the `pgroup` and `pgid` properties.\n\nArgs:\npath (str): The path to the file or directory\nhash_type (str): The type of hash to return\nfollow_symlinks (bool):\nIf the object specified by ``path`` is a symlink, get attributes of\nthe linked file instead of the symlink itself. Default is True\n\nReturns:\ndict: A dictionary of file/directory stats\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' file.stats /etc/passwd", "source": "codesearchnet"}
{"code": "def json_compat_obj_encode(data_type, obj, caller_permissions=None, alias_validators=None, old_style=False, for_msgpack=False, should_redact=False):\n    serializer = StoneToPythonPrimitiveSerializer(caller_permissions, alias_validators, for_msgpack, old_style, should_redact)\n    return serializer.encode(data_type, obj)", "docstring": "Encodes an object into a JSON-compatible dict based on its type.\n\nArgs:\ndata_type (Validator): Validator for obj.\nobj (object): Object to be serialized.\ncaller_permissions (list): The list of raw-string caller permissions\nwith which to serialize.\n\nReturns:\nAn object that when passed to json.dumps() will produce a string\ngiving the JSON-encoded object.\n\nSee json_encode() for additional information about validation.", "source": "codesearchnet"}
{"code": "def like_shared_file(self, sharekey=None):\n        \n\n        if not sharekey:\n            raise Exception(\n                \"You must specify a sharekey of the file you\"\n                \"want to 'like'.\")\n\n        endpoint = '/api/sharedfile/{sharekey}/like'.format(sharekey=sharekey)\n        data = self._make_request(\"POST\", endpoint=endpoint, data=None)\n\n        try:\n            sf = SharedFile.NewFromJSON(data)\n            sf.liked = True\n            return sf\n        except:\n            raise Exception(\"{0}\".format(data['error']))", "docstring": "'Like' a SharedFile. mlkshk doesn't allow you to unlike a\nsharedfile, so this is ~~permanent~~.\n\nArgs:\nsharekey (str): Sharekey for the file you want to 'like'.\n\nReturns:\nEither a SharedFile on success, or an exception on error.", "source": "juraj-google-style"}
{"code": "def delete_resource_group(access_token, subscription_id, rgname):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '?api-version=', RESOURCE_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete the named resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\n\nReturns:\nHTTP response.", "source": "codesearchnet"}
{"code": "def generate_private_key(self):\n    random_string = base64.b64encode(os.urandom(4096)).decode('utf-8')\n    binary_data = bytes(random_string, 'utf-8')\n    hash_object = hashlib.sha256(binary_data)\n    message_digest_bin = hash_object.digest()\n    message_digest_hex = binascii.hexlify(message_digest_bin)\n    return message_digest_hex", "docstring": "Generates a private key based on the password.\n\nSHA-256 is a member of the SHA-2 cryptographic hash functions designed by\nthe NSA. SHA stands for Secure Hash Algorithm. The password is converted\nto bytes and hashed with SHA-256. The binary output is converted to a hex\nrepresentation.\n\nArgs:\ndata (str): The data to be hashed with SHA-256.\n\nReturns:\nbytes: The hexadecimal representation of the hashed binary data.", "source": "codesearchnet"}
{"code": "def __init__(self, unexpected_method, expected):\n    \n\n    Error.__init__(self)\n    self._unexpected_method = unexpected_method\n    self._expected = expected", "docstring": "Init exception.\n\nArgs:\n# unexpected_method: MockMethod that was called but was not at the head of\n#   the expected_method queue.\n# expected: MockMethod or UnorderedGroup the method should have\n#   been in.\nunexpected_method: MockMethod\nexpected: MockMethod or UnorderedGroup", "source": "juraj-google-style"}
{"code": "def RetrievePluginAsset(self, plugin_name, asset_name):\n    \n    return plugin_asset_util.RetrieveAsset(self.path, plugin_name, asset_name)", "docstring": "Return the contents of a given plugin asset.\n\nArgs:\nplugin_name: The string name of a plugin.\nasset_name: The string name of an asset.\n\nReturns:\nThe string contents of the plugin asset.\n\nRaises:\nKeyError: If the asset is not available.", "source": "juraj-google-style"}
{"code": "def watch(self, key, pipeline=False):\n    if pipeline:\n        self._pipeline.watch(key)\n    else:\n        self._db.watch(key)", "docstring": "Watch the given key.\n\nMarks the given key to be watch for conditional execution\nof a transaction.\n\nArgs:\nkey (str): Key that needs to be watched\npipeline (bool): True, start a transaction block. Default false.", "source": "codesearchnet"}
{"code": "def _prepare_lambada_data(tmp_dir, data_dir, vocab_size, vocab_filename):\n  \n\n  if not tf.gfile.Exists(data_dir):\n    tf.gfile.MakeDirs(data_dir)\n\n  file_path = generator_utils.maybe_download(tmp_dir, _TAR, _URL)\n  tar_all = tarfile.open(file_path)\n  tar_all.extractall(tmp_dir)\n  tar_all.close()\n  tar_train = tarfile.open(os.path.join(tmp_dir, \"train-novels.tar\"))\n  tar_train.extractall(tmp_dir)\n  tar_train.close()\n\n  vocab_path = os.path.join(data_dir, vocab_filename)\n  if not tf.gfile.Exists(vocab_path):\n    with tf.gfile.GFile(os.path.join(tmp_dir, _VOCAB), \"r\") as infile:\n      reader = csv.reader(infile, delimiter=\"\\t\")\n      words = [row[0] for row in reader]\n      words = [_UNK] + words[:vocab_size]\n    with tf.gfile.GFile(vocab_path, \"w\") as outfile:\n      outfile.write(\"\\n\".join(words))", "docstring": "Downloading and preparing the dataset.\n\nArgs:\ntmp_dir: tem directory\ndata_dir: data directory\nvocab_size: size of vocabulary\nvocab_filename: name of vocab file", "source": "juraj-google-style"}
{"code": "def _replace_tensors_for_gradient(x, grad):\n    if not isinstance(x, composite_tensor.CompositeTensor):\n        return grad\n    if not isinstance(x, CompositeTensorGradientProtocol):\n        raise ValueError(f'Type {type(x).__name__} is not supported as a gradient source.')\n    composite_gradient = x.__composite_gradient__\n    x_components = composite_gradient.get_gradient_components(x)\n    if x_components is x:\n        grad_components = grad\n    else:\n        grad_components = nest.map_structure_up_to(x_components, _replace_tensors_for_gradient, x_components, grad)\n    if grad_components is None:\n        return None\n    return composite_gradient.replace_gradient_components(x, grad_components)", "docstring": "Replaces the tensors in `x` that should be differentiated with `grad`.\n\nArgs:\nx: A `Tensor` or `CompositeTensor`.\ngrad: A nested structure of `Tensor`, with the same structure as the value\nreturned by `_get_tensors_for_gradient(x)`.\n\nReturns:\nA `Tensor` or `CompositeTensor`.", "source": "github-repos"}
{"code": "def get_organization(self):\n    return hdx.data.organization.Organization.read_from_hdx(self.data['owner_org'], configuration=self.configuration)", "docstring": "Get the dataset's organization.\n\nReturns:\nOrganization: Dataset's organization", "source": "codesearchnet"}
{"code": "def _run_static_range_ptq(src_saved_model_path: str, dst_saved_model_path: str, quant_opts: _QuantizationOptions, representative_dataset: Mapping[str, _RepresentativeDatasetFile], signature_def_map: _SignatureDefMap) -> None:\n    logging.info('Running static-range post-training quantization.')\n    signature_def_map_serialized = _serialize_signature_def_map(signature_def_map)\n    dataset_file_map_serialized = {signature_key: dataset_file.SerializeToString() for signature_key, dataset_file in representative_dataset.items()}\n    pywrap_quantize_model.quantize_ptq_static_range(src_saved_model_path, dst_saved_model_path, quantization_options_serialized=quant_opts.SerializeToString(), signature_keys=list(quant_opts.signature_keys), signature_def_map_serialized=signature_def_map_serialized, py_function_library=py_function_lib.PyFunctionLibrary(), representative_dataset_file_map_serialized=dataset_file_map_serialized)", "docstring": "Runs static-range Post-Training Quantization.\n\nRuns static-range PTQ for the model. Runs the calibration step with\n`representative_dataset` to collect statistics required for quantization. This\nproduces the quantized GraphDef along with the SignatureDefs which might have\nbeen modified according to the changes in the graph.\n\nArgs:\nsrc_saved_model_path: Path to the source SavedModel directory.\ndst_saved_model_path: Path to the destination SavedModel directory.\nquant_opts: Quantization options.\nrepresentative_dataset: A map from signature key to the saved representative\ndataset file.\nsignature_def_map: Signature def key -> SignatureDef mapping.\n\nRaises:\nValueError if the graph doesn't contain a valid signature.", "source": "github-repos"}
{"code": "def psd(data, dt, ndivide=1, window=hanning, overlap_half=False):\n    logger = getLogger('decode.utils.ndarray.psd')\n    if overlap_half:\n        step = int((len(data) / (ndivide + 1)))\n        size = (step * 2)\n    else:\n        step = int((len(data) / ndivide))\n        size = step\n    if (bin(len(data)).count('1') != 1):\n        logger.warning('warning: length of data is not power of 2: {}'.format(len(data)))\n    size = int((len(data) / ndivide))\n    if (bin(size).count('1') != 1.0):\n        if overlap_half:\n            logger.warning('warning: ((length of data) / (ndivide+1)) * 2 is not power of 2: {}'.format(size))\n        else:\n            logger.warning('warning: (length of data) / ndivide is not power of 2: {}'.format(size))\n    psd = np.zeros(size)\n    T = ((size - 1) * dt)\n    vs = (1 / dt)\n    vk_ = fftfreq(size, dt)\n    vk = vk_[np.where((vk_ >= 0))]\n    for i in range(ndivide):\n        d = data[(i * step):((i * step) + size)]\n        if (window is None):\n            w = np.ones(size)\n            corr = 1.0\n        else:\n            w = window(size)\n            corr = np.mean((w ** 2))\n        psd = (psd + ((((2 * (np.abs(fft((d * w))) ** 2)) / size) * dt) / corr))\n    return (vk, (psd[:len(vk)] / ndivide))", "docstring": "Calculate power spectrum density of data.\n\nArgs:\ndata (np.ndarray): Input data.\ndt (float): Time between each data.\nndivide (int): Do averaging (split data into ndivide, get psd of each, and average them).\nax (matplotlib.axes): Axis you want to plot on.\ndoplot (bool): Plot how averaging works.\noverlap_half (bool): Split data to half-overlapped regions.\n\nReturns:\nvk (np.ndarray): Frequency.\npsd (np.ndarray): PSD", "source": "codesearchnet"}
{"code": "def observe_reward_value(self, state_key, action_key):\n        \n        x, y = state_key\n\n        if self.__map_arr[y][x] == self.__end_point_label:\n            return 100.0\n        elif self.__map_arr[y][x] == self.__start_point_label:\n            return 0.0\n        elif self.__map_arr[y][x] == self.__wall_label:\n            raise ValueError(\"It is the wall. (x, y)=(%d, %d)\" % (x, y))\n        else:\n            reward_value = float(self.__map_arr[y][x])\n            self.save_r_df(state_key, reward_value)\n            return reward_value", "docstring": "Compute the reward value.\n\nArgs:\nstate_key:              The key of state.\naction_key:             The key of action.\n\nReturns:\nReward value.", "source": "juraj-google-style"}
{"code": "def edgelist_to_adjacency(edgelist):\n    \n    adjacency = dict()\n    for u, v in edgelist:\n        if u in adjacency:\n            adjacency[u].add(v)\n        else:\n            adjacency[u] = {v}\n        if v in adjacency:\n            adjacency[v].add(u)\n        else:\n            adjacency[v] = {u}\n    return adjacency", "docstring": "Converts an iterator of edges to an adjacency dict.\n\nArgs:\nedgelist (iterable):\nAn iterator over 2-tuples where each 2-tuple is an edge.\n\nReturns:\ndict: The adjacency dict. A dict of the form {v: Nv, ...} where v is a node in a graph and\nNv is the neighbors of v as an set.", "source": "juraj-google-style"}
{"code": "def get_available_transcript_languages(video_id):\n    available_languages = VideoTranscript.objects.filter(video__edx_video_id=video_id).values_list('language_code', flat=True)\n    return list(available_languages)", "docstring": "Get available transcript languages\n\nArguments:\nvideo_id(unicode): An id identifying the Video.\n\nReturns:\nA list containing transcript language codes for the Video.", "source": "codesearchnet"}
{"code": "def __discovery_doc_descriptor(self, services, hostname=None):\n    merged_api_info = self.__get_merged_api_info(services)\n    descriptor = self.get_descriptor_defaults(merged_api_info, hostname=hostname)\n    description = merged_api_info.description\n    if ((not description) and (len(services) == 1)):\n        description = services[0].__doc__\n    if description:\n        descriptor['description'] = description\n    descriptor['parameters'] = self.__standard_parameters_descriptor()\n    descriptor['auth'] = self.__standard_auth_descriptor(services)\n    if merged_api_info.namespace:\n        descriptor['ownerDomain'] = merged_api_info.namespace.owner_domain\n        descriptor['ownerName'] = merged_api_info.namespace.owner_name\n        descriptor['packagePath'] = (merged_api_info.namespace.package_path or '')\n    else:\n        if (merged_api_info.owner_domain is not None):\n            descriptor['ownerDomain'] = merged_api_info.owner_domain\n        if (merged_api_info.owner_name is not None):\n            descriptor['ownerName'] = merged_api_info.owner_name\n        if (merged_api_info.package_path is not None):\n            descriptor['packagePath'] = merged_api_info.package_path\n    method_map = {}\n    method_collision_tracker = {}\n    rest_collision_tracker = {}\n    resource_index = collections.defaultdict(list)\n    resource_map = {}\n    for service in services:\n        remote_methods = service.all_remote_methods()\n        for (protorpc_meth_name, protorpc_meth_info) in remote_methods.iteritems():\n            method_info = getattr(protorpc_meth_info, 'method_info', None)\n            if (method_info is None):\n                continue\n            path = method_info.get_path(service.api_info)\n            method_id = method_info.method_id(service.api_info)\n            canonical_method_id = self._get_canonical_method_id(method_id)\n            resource_path = self._get_resource_path(method_id)\n            if (method_id in method_collision_tracker):\n                raise api_exceptions.ApiConfigurationError(('Method %s used multiple times, in classes %s and %s' % (method_id, method_collision_tracker[method_id], service.__name__)))\n            else:\n                method_collision_tracker[method_id] = service.__name__\n            rest_identifier = (method_info.http_method, path)\n            if (rest_identifier in rest_collision_tracker):\n                raise api_exceptions.ApiConfigurationError(('%s path \"%s\" used multiple times, in classes %s and %s' % (method_info.http_method, path, rest_collision_tracker[rest_identifier], service.__name__)))\n            else:\n                rest_collision_tracker[rest_identifier] = service.__name__\n            if resource_path:\n                resource_index[resource_path[0]].append((service, protorpc_meth_info))\n            else:\n                method_map[canonical_method_id] = self.__method_descriptor(service, method_info, protorpc_meth_info)\n    for (resource, resource_methods) in resource_index.items():\n        resource_map[resource] = self.__resource_descriptor(resource, resource_methods)\n    if method_map:\n        descriptor['methods'] = method_map\n    if resource_map:\n        descriptor['resources'] = resource_map\n    schemas = self.__schemas_descriptor()\n    if schemas:\n        descriptor['schemas'] = schemas\n    return descriptor", "docstring": "Builds a discovery doc for an API.\n\nArgs:\nservices: List of protorpc.remote.Service instances implementing an\napi/version.\nhostname: string, Hostname of the API, to override the value set on the\ncurrent service. Defaults to None.\n\nReturns:\nA dictionary that can be deserialized into JSON in discovery doc format.\n\nRaises:\nApiConfigurationError: If there's something wrong with the API\nconfiguration, such as a multiclass API decorated with different API\ndescriptors (see the docstring for api()), or a repeated method\nsignature.", "source": "codesearchnet"}
{"code": "def include_revision(revision_num, skip_factor=1.1):\n  \n  if skip_factor <= 1.0:\n    return True\n  return (int(math.log1p(revision_num) / math.log(skip_factor)) != int(\n      math.log(revision_num + 2.0) / math.log(skip_factor)))", "docstring": "Decide whether to include a revision.\n\nIf the number of revisions is large, we exclude some revisions to avoid\na quadratic blowup in runtime, since the article is likely also large.\n\nWe make the ratio between consecutive included revision numbers\nappproximately equal to \"factor\".\n\nArgs:\nrevision_num: an integer\nskip_factor: a floating point number >= 1.0\n\nReturns:\na boolean", "source": "juraj-google-style"}
{"code": "def _get_tables(self, base_dir):\n        \n        table_dict = {}\n\n        for table in self.metadata['tables']:\n            if table['use']:\n                relative_path = os.path.join(base_dir, self.metadata['path'], table['path'])\n                data_table = pd.read_csv(relative_path)\n                pii_fields = self._get_pii_fields(table)\n                data_table = self._anonymize_table(data_table, pii_fields)\n\n                table_dict[table['name']] = (data_table, table)\n\n        return table_dict", "docstring": "Load the contents of meta_file and the corresponding data.\n\nIf fields containing Personally Identifiable Information are detected in the metadata\nthey are anonymized before asign them into `table_dict`.\n\nArgs:\nbase_dir(str): Root folder of the dataset files.\n\nReturns:\ndict: Mapping str -> tuple(pandas.DataFrame, dict)", "source": "juraj-google-style"}
{"code": "def fn(x: tuple[int]):\n    return x", "docstring": "Test function\n\nArgs:\nx: The input\n\n\nReturns:\nThe output", "source": "github-repos"}
{"code": "def find(self, _id, instance = None):\n        \n        \n        if instance is None:\n            \n            return self.service_instance.find(_id)\n        else:\n            \n            return self.service_binding.find(_id, instance)", "docstring": "Find\n\nArgs:\n_id (str): instance id or binding Id\n\nKeyword Arguments:\ninstance (AtlasServiceInstance.Instance): Existing instance\n\nReturns:\nAtlasServiceInstance.Instance or AtlasServiceBinding.Binding: An instance or binding.", "source": "juraj-google-style"}
{"code": "def hide_tool(self, context_name, tool_name):\n    data = self._context(context_name)\n    hidden_tools = data['hidden_tools']\n    if (tool_name not in hidden_tools):\n        self._validate_tool(context_name, tool_name)\n        hidden_tools.add(tool_name)\n        self._flush_tools()", "docstring": "Hide a tool so that it is not exposed in the suite.\n\nArgs:\ncontext_name (str): Context containing the tool.\ntool_name (str): Name of tool to hide.", "source": "codesearchnet"}
{"code": "def __init__(self, context_type=ContextType.PATH, debug=False):\n\t\t\n\n\t\tself._selector = DefaultSelector()\n\t\tself._interface = Interface()\n\t\tif context_type == ContextType.UDEV:\n\t\t\tself._udev = self._libudev.udev_new()\n\t\t\tself._li = self._libinput.libinput_udev_create_context(\n\t\t\t\tbyref(self._interface), None, self._udev)\n\t\telif context_type == ContextType.PATH:\n\t\t\tself._li = self._libinput.libinput_path_create_context(\n\t\t\t\tbyref(self._interface), None)\n\t\tself._log_handler = lambda pr, strn: print(pr.name, ': ', strn)\n\t\tself._set_default_log_handler()\n\t\tif debug:\n\t\t\tself._libinput.libinput_log_set_priority(\n\t\t\t\tself._li, LogPriority.DEBUG)\n\t\tself._selector.register(\n\t\t\tself._libinput.libinput_get_fd(self._li), EVENT_READ)", "docstring": "Initialize context.\n\nArgs:\ncontext_type (~libinput.constant.ContextType): If\n:attr:`~libinput.constant.ContextType.UDEV` devices are\nadded/removed from udev seat. If\n:attr:`~libinput.constant.ContextType.PATH` devices have to be\nadded/removed manually.\ndebug (bool): If false, only errors are printed.", "source": "juraj-google-style"}
{"code": "def _get_predictions(self, data, break_ties='random', return_probs=False, **kwargs):\n    data_loader = self._create_data_loader(data)\n    Y_p = []\n    Y = []\n    Y_s = []\n    for (batch_num, data) in enumerate(data_loader):\n        (Xb, Yb) = data\n        Y.append(self._to_numpy(Yb))\n        if (self.config['device'] != 'cpu'):\n            Xb = place_on_gpu(Xb)\n        (Y_pb, Y_sb) = self.predict(Xb, break_ties=break_ties, return_probs=True, **kwargs)\n        Y_p.append(self._to_numpy(Y_pb))\n        Y_s.append(self._to_numpy(Y_sb))\n    (Y_p, Y, Y_s) = map(self._stack_batches, [Y_p, Y, Y_s])\n    if return_probs:\n        return (Y_p, Y, Y_s)\n    else:\n        return (Y_p, Y)", "docstring": "Computes predictions in batch, given a labeled dataset\n\nArgs:\ndata: a Pytorch DataLoader, Dataset, or tuple with Tensors (X,Y):\nX: The input for the predict method\nY: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels\nin {1,...,k}\nbreak_ties: How to break ties when making predictions\nreturn_probs: Return the predicted probabilities as well\n\nReturns:\nY_p: A Tensor of predictions\nY: A Tensor of labels\n[Optionally: Y_s: An [n, k] np.ndarray of predicted probabilities]", "source": "codesearchnet"}
{"code": "def write_to_file(src, dst):\n    n = 0\n    for block in src:\n        dst.write(block)\n        n += len(block)\n    return n", "docstring": "Write data from `src` into `dst`.\n\nArgs:\nsrc (iterable): iterable that yields blocks of data to write\ndst (file-like object): file-like object that must support\n.write(block)\n\nReturns:\nnumber of bytes written to `dst`", "source": "codesearchnet"}
{"code": "def _Open(self, path_spec, mode='rb'):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    compression_method = getattr(path_spec, 'compression_method', None)\n    if not compression_method:\n      raise errors.PathSpecError(\n          'Unsupported path specification without compression method.')\n\n    self._compression_method = compression_method", "docstring": "Opens the file system defined by path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\nmode (Optional[str]): file access mode. The default is 'rb' which\nrepresents read-only binary.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file system could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def make_block_creator(yaml_path, filename=None):\n    (sections, yamlname, docstring) = Section.from_yaml(yaml_path, filename)\n    yamldir = os.path.dirname(yaml_path)\n    controller_sections = [s for s in sections if (s.section == 'controllers')]\n    assert (len(controller_sections) == 1), ('Expected exactly 1 controller, got %s' % (controller_sections,))\n    controller_section = controller_sections[0]\n\n    def block_creator(kwargs):\n        defines = _create_defines(sections, yamlname, yamldir, kwargs)\n        (controllers, parts) = _create_blocks_and_parts(sections, defines)\n        controller = controller_section.instantiate(defines)\n        for part in parts:\n            controller.add_part(part)\n        controllers.append(controller)\n        return controllers\n    creator = creator_with_nice_signature(block_creator, sections, yamlname, yaml_path, docstring)\n    return creator", "docstring": "Make a collection function that will create a list of blocks\n\nArgs:\nyaml_path (str): File path to YAML file, or a file in the same dir\nfilename (str): If give, use this filename as the last element in\nthe yaml_path (so yaml_path can be __file__)\n\nReturns:\nfunction: A collection function decorated with @takes. This can be\nused in other blocks or instantiated by the process. If the\nYAML text specified controllers or parts then a block instance\nwith the given name will be instantiated. If there are any\nblocks listed then they will be called. All created blocks\nby this or any sub collection will be returned", "source": "codesearchnet"}
{"code": "def __init__(self, name, collections=None, capture_by_value=None, structured_input_signature=None, structured_outputs=None):\n    super().__init__()\n    self.name = name\n    self.inputs = []\n    self.outputs = []\n    self.control_outputs = []\n    self.structured_input_signature = structured_input_signature\n    self.structured_outputs = structured_outputs\n    self._resource_tensor_inputs = object_identity.ObjectIdentitySet()\n    self._weak_variables = []\n    self._watched_variables = object_identity.ObjectIdentityWeakSet()\n    self.is_control_flow_graph = False\n    self._function_captures = capture_container.FunctionCaptures()\n    outer_graph = ops.get_default_graph()\n    self._weak_outer_graph = weakref.ref(outer_graph)\n    while outer_graph.building_function:\n        outer_graph = outer_graph.outer_graph\n    self._fallback_outer_graph = outer_graph\n    self._output_names = None\n    if capture_by_value is not None:\n        self.capture_by_value = capture_by_value\n    elif self.outer_graph is not None and isinstance(self.outer_graph, FuncGraph):\n        self.capture_by_value = self.outer_graph.capture_by_value\n    else:\n        self.capture_by_value = False\n    self._building_function = True\n    graph = self.outer_graph\n    if context.executing_eagerly():\n        self.seed = context.global_seed()\n        self._seed_used = False\n    else:\n        self.seed = graph.seed\n        self._seed_used = False\n        self._colocation_stack = graph._colocation_stack.copy()\n    if collections is None:\n        for collection_name in graph.get_all_collection_keys():\n            if collection_name not in ALLOWLIST_COLLECTIONS:\n                self._collections[collection_name] = graph.get_collection(collection_name)\n        for collection_name in ALLOWLIST_COLLECTIONS:\n            self._collections[collection_name] = graph.get_collection_ref(collection_name)\n    else:\n        self._collections = collections\n    self._saveable = True\n    self._saving_errors = set()\n    self._scope_exit_callbacks = None", "docstring": "Construct a new FuncGraph.\n\nThe graph will inherit its graph key, collections, seed, and distribution\nstrategy stack from the current context or graph.\n\nArgs:\nname: the name of the function.\ncollections: a dictionary of collections this FuncGraph should start with.\nIf not specified (None), the FuncGraph will read (but not write to) the\nouter graph's collections that are not allowlisted, and both read and\nwrite to the outer graph's collections that are allowlisted. The current\nallowlisted collections are the global variables, the local variables,\nand the trainable variables. Defaults to None.\ncapture_by_value: An optional boolean. If True, the func graph will\ncapture Variables by value instead of reference. By default inherit from\nouter graphs, and failing that will default to False.\nstructured_input_signature: Optional. The structured input signature to\nuse for initializing the FuncGraph. See the docstring for FuncGraph for\nmore information.\nstructured_outputs: Optional. The structured outputs to use for\ninitializing the FuncGraph. See the docstring for FuncGraph for more\ninformation.", "source": "github-repos"}
{"code": "def join_pretty_tensors(tensors, output, join_function=None, name='join'):\n  \n  if not tensors:\n    raise ValueError('pretty_tensors must be a non-empty sequence.')\n  with output.g.name_scope(name):\n    if join_function is None:\n      \n      last_dim = len(tensors[0].shape) - 1\n      return output.with_tensor(tf.concat(tensors, last_dim))\n    else:\n      return output.with_tensor(join_function(tensors))", "docstring": "Joins the list of pretty_tensors and sets head of output_pretty_tensor.\n\nArgs:\ntensors: A sequence of Layers or SequentialLayerBuilders to join.\noutput: A pretty_tensor to set the head with the result.\njoin_function: A function to join the tensors, defaults to concat on the\nlast dimension.\nname: A name that is used for the name_scope\nReturns:\nThe result of calling with_tensor on output\nRaises:\nValueError: if pretty_tensors is None or empty.", "source": "juraj-google-style"}
{"code": "def decode_terminated(data, encoding, strict=True):\n    codec_info = codecs.lookup(encoding)\n    encoding = codec_info.name\n    if (encoding in ('utf-8', 'iso8859-1')):\n        index = data.find(b'\\x00')\n        if (index == (- 1)):\n            res = (data.decode(encoding), b'')\n            if strict:\n                raise ValueError('not null terminated')\n            else:\n                return res\n        return (data[:index].decode(encoding), data[(index + 1):])\n    decoder = codec_info.incrementaldecoder()\n    r = []\n    for (i, b) in enumerate(iterbytes(data)):\n        c = decoder.decode(b)\n        if (c == u'\\x00'):\n            return (u''.join(r), data[(i + 1):])\n        r.append(c)\n    else:\n        r.append(decoder.decode(b'', True))\n        if strict:\n            raise ValueError('not null terminated')\n        return (u''.join(r), b'')", "docstring": "Returns the decoded data until the first NULL terminator\nand all data after it.\n\nArgs:\ndata (bytes): data to decode\nencoding (str): The codec to use\nstrict (bool): If True will raise ValueError in case no NULL is found\nbut the available data decoded successfully.\nReturns:\nTuple[`text`, `bytes`]: A tuple containing the decoded text and the\nremaining data after the found NULL termination.\n\nRaises:\nUnicodeError: In case the data can't be decoded.\nLookupError:In case the encoding is not found.\nValueError: In case the data isn't null terminated (even if it is\nencoded correctly) except if strict is False, then the decoded\nstring will be returned anyway.", "source": "codesearchnet"}
{"code": "def convert(self, graph_def, input_tensors, output_tensors):\n    self._validate_inputs(graph_def, input_tensors)\n    converter_kwargs = self._get_base_converter_args()\n    converter_kwargs.update(self._quant_mode.converter_flags())\n    if not self.experimental_new_converter:\n        logging.warning('Please consider switching to the new converter by setting experimental_new_converter=True. The old converter is deprecated.')\n    else:\n        logging.info('Using new converter: If you encounter a problem please file a bug. You can opt-out by setting experimental_new_converter=False')\n    result = _convert_graphdef(input_data=graph_def, input_tensors=input_tensors, output_tensors=output_tensors, **converter_kwargs)\n    return self._optimize_tflite_model(result, self._quant_mode, _build_conversion_flags(**converter_kwargs).debug_options, quant_io=self.experimental_new_quantizer)", "docstring": "Converts a TensorFlow GraphDef based on instance variables.\n\nArgs:\ngraph_def: Frozen TensorFlow GraphDef.\ninput_tensors: List of input tensors.\noutput_tensors: List of output tensors.\n\nReturns:\nThe converted data in serialized format.\n\nRaises:\nValueError:\nNo concrete function is specified.\nMultiple concrete functions are specified.\nInput shape is not specified.\nInvalid quantization parameters.", "source": "github-repos"}
{"code": "def get_space_group_info(self, symprec=0.01, angle_tolerance=5.0):\n    from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n    a = SpacegroupAnalyzer(self, symprec=symprec, angle_tolerance=angle_tolerance)\n    return (a.get_space_group_symbol(), a.get_space_group_number())", "docstring": "Convenience method to quickly get the spacegroup of a structure.\n\nArgs:\nsymprec (float): Same definition as in SpacegroupAnalyzer.\nDefaults to 1e-2.\nangle_tolerance (float): Same definition as in SpacegroupAnalyzer.\nDefaults to 5 degrees.\n\nReturns:\nspacegroup_symbol, international_number", "source": "codesearchnet"}
{"code": "def random_density_matrix(length, rank=None, method='Hilbert-Schmidt', seed=None):\n    if (method == 'Hilbert-Schmidt'):\n        return __random_density_hs(length, rank, seed)\n    elif (method == 'Bures'):\n        return __random_density_bures(length, rank, seed)\n    else:\n        raise QiskitError('Error: unrecognized method {}'.format(method))", "docstring": "Generate a random density matrix rho.\n\nArgs:\nlength (int): the length of the density matrix.\nrank (int or None): the rank of the density matrix. The default\nvalue is full-rank.\nmethod (string): the method to use.\n'Hilbert-Schmidt': sample rho from the Hilbert-Schmidt metric.\n'Bures': sample rho from the Bures metric.\nseed (int): Optional. To set a random seed.\nReturns:\nndarray: rho (length, length) a density matrix.\nRaises:\nQiskitError: if the method is not valid.", "source": "codesearchnet"}
{"code": "def get_coding_intervals(self, build='37', genes=None):\n        \n        intervals = {}\n        if not genes:\n            genes = self.all_genes(build=build)\n        LOG.info(\"Building interval trees...\")\n        for i,hgnc_obj in enumerate(genes):\n            chrom = hgnc_obj['chromosome']\n            start = max((hgnc_obj['start'] - 5000), 1)\n            end = hgnc_obj['end'] + 5000\n\n            \n            \n            if chrom not in intervals:\n                intervals[chrom] = intervaltree.IntervalTree()\n                intervals[chrom].addi(start, end, i)\n                continue\n\n            res = intervals[chrom].search(start, end)\n\n            \n            if not res:\n                intervals[chrom].addi(start, end, i)\n                continue\n\n            \n            for interval in res:\n                \n                if interval.begin < start:\n                    start = interval.begin\n\n                if interval.end > end:\n                    end = interval.end\n\n                \n                intervals[chrom].remove(interval)\n\n            \n            intervals[chrom].addi(start, end, i)\n\n        return intervals", "docstring": "Return a dictionary with chromosomes as keys and interval trees as values\n\nEach interval represents a coding region of overlapping genes.\n\nArgs:\nbuild(str): The genome build\ngenes(iterable(scout.models.HgncGene)):\n\nReturns:\nintervals(dict): A dictionary with chromosomes as keys and overlapping genomic intervals as values", "source": "juraj-google-style"}
{"code": "def query_dns(domain, record_type, cache=None, nameservers=None, timeout=2.0):\n    \n    domain = str(domain).lower()\n    record_type = record_type.upper()\n    cache_key = \"{0}_{1}\".format(domain, record_type)\n    if cache:\n        records = cache.get(cache_key, None)\n        if records:\n            return records\n\n    resolver = dns.resolver.Resolver()\n    timeout = float(timeout)\n    if nameservers is None:\n        nameservers = [\"1.1.1.1\", \"1.0.0.1\",\n                       \"2606:4700:4700::1111\", \"2606:4700:4700::1001\",\n                       ]\n    resolver.nameservers = nameservers\n    resolver.timeout = timeout\n    resolver.lifetime = timeout\n    if record_type == \"TXT\":\n        resource_records = list(map(\n            lambda r: r.strings,\n            resolver.query(domain, record_type, tcp=True)))\n        _resource_record = [\n            resource_record[0][:0].join(resource_record)\n            for resource_record in resource_records if resource_record]\n        records = [r.decode() for r in _resource_record]\n    else:\n        records = list(map(\n            lambda r: r.to_text().replace('\"', '').rstrip(\".\"),\n            resolver.query(domain, record_type, tcp=True)))\n    if cache:\n        cache[cache_key] = records\n\n    return records", "docstring": "Queries DNS\n\nArgs:\ndomain (str): The domain or subdomain to query about\nrecord_type (str): The record type to query for\ncache (ExpiringDict): Cache storage\nnameservers (list): A list of one or more nameservers to use\n(Cloudflare's public DNS resolvers by default)\ntimeout (float): Sets the DNS timeout in seconds\n\nReturns:\nlist: A list of answers", "source": "juraj-google-style"}
{"code": "def hwvtep_add_rbridgeid(self, **kwargs):\n        \n        name = kwargs.pop('name')\n        id = kwargs.pop('rb_range')\n        ip_args = dict(name=name, rb_add=id)\n        method_name = 'overlay_gateway_attach_rbridge_id_rb_add'\n        method_class = self._brocade_tunnels\n        gw_attr = getattr(method_class, method_name)\n        config = gw_attr(**ip_args)\n        output = self._callback(config)\n        return output", "docstring": "Add a range of rbridge-ids\n\nArgs:\nname  (str): gateway-name\nvlan (str): rbridge-ids range\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def get_connection_id(self, conn_or_int_id):\n    key = conn_or_int_id\n    if isinstance(key, str):\n        table = self._int_connections\n    elif isinstance(key, int):\n        table = self._connections\n    else:\n        raise ArgumentError('You must supply either an int connection id or a string internal id to _get_connection_state', id=key)\n    try:\n        data = table[key]\n    except KeyError:\n        raise ArgumentError('Could not find connection by id', id=key)\n    return data['conn_id']", "docstring": "Get the connection id.\n\nArgs:\nconn_or_int_id (int, string): The external integer connection id or\nand internal string connection id\n\nReturns:\ndict: The context data associated with that connection or None if it cannot\nbe found.\n\nRaises:\nArgumentError: When the key is not found in the list of active connections\nor is invalid.", "source": "codesearchnet"}
{"code": "def allsame(list_, strict=True):\n    if (len(list_) == 0):\n        return True\n    first_item = list_[0]\n    return list_all_eq_to(list_, first_item, strict)", "docstring": "checks to see if list is equal everywhere\n\nArgs:\nlist_ (list):\n\nReturns:\nTrue if all items in the list are equal", "source": "codesearchnet"}
{"code": "def _secant_step(x1, x2, y1, y2):\n    x_difference = x1 - x2\n    y_difference = y1 - y2\n    return -y1 * x_difference / y_difference", "docstring": "Returns the step size at the current position if using the secant method.\n\nThis function is meant for exclusive use by the `_brent_loop_body` function:\n- It does not guard against divisions by zero, and instead assumes that `y1`\nis distinct from `y2`. The `_brent_loop_body` function guarantees this\nproperty.\n- It does not guard against overflows which may occur if the difference\nbetween `y1` and `y2` is small while that between `x1` and `x2` is not.\nIn this case, the resulting step size will be larger than `bisection_step`\nand thus ignored by the `_brent_loop_body` function.\n\nArgs:\nx1: `Tensor` containing the current position.\nx2: `Tensor` containing the previous position.\ny1: `Tensor` containing the value of `objective_fn` at `x1`.\ny2: `Tensor` containing the value of `objective_fn` at `x2`.\n\nReturns:\nA `Tensor` with the same shape and dtype as `current`.", "source": "github-repos"}
{"code": "def __format__(self, format_spec='dms'):\n        \n        location = [super(Trigpoint, self).__format__(format_spec), ]\n        if self.altitude:\n            location.append('alt %im' % self.altitude)\n\n        if self.name:\n            return '%s (%s)' % (self.name, ' '.join(location))\n        else:\n            return ' '.join(location)", "docstring": "Extended pretty printing for location strings.\n\nArgs:\nformat_spec (str): Coordinate formatting system to use\n\nReturns:\nstr: Human readable string representation of ``Trigpoint`` object\n\nRaises:\nValueError: Unknown value for ``format_spec``", "source": "juraj-google-style"}
{"code": "def find_interface_by_mac(self, **kwargs):\n    mac = kwargs.pop('mac_address')\n    results = [x for x in self.mac_table if (x['mac_address'] == mac)]\n    return results", "docstring": "Find the interface through which a MAC can be reached.\n\nArgs:\nmac_address (str): A MAC address in 'xx:xx:xx:xx:xx:xx' format.\n\nReturns:\nlist[dict]: a list of mac table data.\n\nRaises:\nKeyError: if `mac_address` is not specified.\n\nExamples:\n>>> from pprint import pprint\n>>> import pynos.device\n>>> conn = ('10.24.39.211', '22')\n>>> auth = ('admin', 'password')\n>>> with pynos.device.Device(conn=conn, auth=auth) as dev:\n...     x = dev.find_interface_by_mac(\n...     mac_address='10:23:45:67:89:ab')\n...     pprint(x) # doctest: +ELLIPSIS\n[{'interface'...'mac_address'...'state'...'type'...'vlan'...}]", "source": "codesearchnet"}
{"code": "def sum(self, selector=identity):\n    if self.closed():\n        raise ValueError('Attempt to call sum() on a closed Queryable.')\n    if (not is_callable(selector)):\n        raise TypeError('sum() parameter selector={0} is not callable'.format(repr(selector)))\n    return sum(self.select(selector))", "docstring": "Return the arithmetic sum of the values in the sequence..\n\nAll of the source sequence will be consumed.\n\nNote: This method uses immediate execution.\n\nArgs:\nselector: An optional single argument function which will be used\nto project the elements of the sequence. If omitted, the\nidentity function is used.\n\nReturns:\nThe total value of the projected sequence, or zero for an empty\nsequence.\n\nRaises:\nValueError: If the Queryable has been closed.", "source": "codesearchnet"}
{"code": "def get_config_dict(self, services, hostname=None):\n    \n    if not isinstance(services, (tuple, list)):\n      services = [services]\n    \n    \n    \n    endpoints_util.check_list_type(services, remote._ServiceClass, 'services',\n                                   allow_none=False)\n\n    return self.__api_descriptor(services, hostname=hostname)", "docstring": "JSON dict description of a protorpc.remote.Service in API format.\n\nArgs:\nservices: Either a single protorpc.remote.Service or a list of them\nthat implements an api/version.\nhostname: string, Hostname of the API, to override the value set on the\ncurrent service. Defaults to None.\n\nReturns:\ndict, The API descriptor document as a JSON dict.", "source": "juraj-google-style"}
{"code": "def list(self, **kwargs):\n        \n        return [\n            self.prepare_model(s)\n            for s in self.client.api.services(**kwargs)\n        ]", "docstring": "List services.\n\nArgs:\nfilters (dict): Filters to process on the nodes list. Valid\nfilters: ``id``, ``name`` , ``label`` and ``mode``.\nDefault: ``None``.\n\nReturns:\nlist of :py:class:`Service`: The services.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def make_repr(inst, attrs):\n    arg_str = ', '.join((('%s=%r' % (a, getattr(inst, a))) for a in attrs if hasattr(inst, a)))\n    repr_str = ('%s(%s)' % (inst.__class__.__name__, arg_str))\n    return repr_str", "docstring": "Create a repr from an instance of a class\n\nArgs:\ninst: The class instance we are generating a repr of\nattrs: The attributes that should appear in the repr", "source": "codesearchnet"}
{"code": "def assign_methods(self, resource_class):\n    assert all([(x.upper() in VALID_METHODS) for x in resource_class.Meta.methods])\n    for method in resource_class.Meta.methods:\n        self._assign_method(resource_class, method.upper())", "docstring": "Given a resource_class and it's Meta.methods tuple,\nassign methods for communicating with that resource.\n\nArgs:\nresource_class: A single resource class", "source": "codesearchnet"}
{"code": "def is_allowed(self, filepath, excludes=[]):\n    if os.path.isabs(filepath):\n        raise FinderException(\"'Finder.is_allowed()' only accept relative filepath\")\n    if excludes:\n        for pattern in excludes:\n            if fnmatch.fnmatch(filepath, pattern):\n                return False\n    return True", "docstring": "Check from exclude patterns if a relative filepath is allowed\n\nArgs:\nfilepath (str): A relative file path. (exclude patterns are\nallways based from the source directory).\n\nKeyword Arguments:\nexcludes (list): A list of excluding (glob) patterns. If filepath\nmatchs one of patterns, filepath is not allowed.\n\nRaises:\nboussole.exception.FinderException: If given filepath is absolute.\n\nReturns:\nstr: Filepath with new extension.", "source": "codesearchnet"}
{"code": "def mix_in_audio_sample(track_data, track_offset, sample_data, sample_offset, clip_duration, sample_volume, ramp_in, ramp_out):\n    ramp_out_index = clip_duration - ramp_out\n    track_end = min(track_offset + clip_duration, track_data.shape[0])\n    track_end = min(track_end, track_offset + (sample_data.shape[0] - sample_offset))\n    sample_range = track_end - track_offset\n    for i in range(sample_range):\n        if i < ramp_in:\n            envelope_scale = i / ramp_in\n        elif i > ramp_out_index:\n            envelope_scale = (clip_duration - i) / ramp_out\n        else:\n            envelope_scale = 1\n        sample_input = sample_data[sample_offset + i]\n        track_data[track_offset + i] += sample_input * envelope_scale * sample_volume", "docstring": "Mixes the sample data into the main track at the specified offset.\n\nArgs:\ntrack_data: Numpy array holding main audio data. Modified in-place.\ntrack_offset: Where to mix the sample into the main track.\nsample_data: Numpy array of audio data to mix into the main track.\nsample_offset: Where to start in the audio sample.\nclip_duration: How long the sample segment is.\nsample_volume: Loudness to mix the sample in at.\nramp_in: Length in samples of volume increase stage.\nramp_out: Length in samples of volume decrease stage.", "source": "github-repos"}
{"code": "def qhalf(options, halfspaces, interior_point):\n    points = [(list(h.normal) + [h.offset]) for h in halfspaces]\n    data = [[len(interior_point), 1]]\n    data.append(map(repr, interior_point))\n    data.append([len(points[0])])\n    data.append([len(points)])\n    data.extend([map(repr, row) for row in points])\n    prep_str = [' '.join(map(str, line)) for line in data]\n    output = getattr(hull, 'qhalf')(options, '\\n'.join(prep_str))\n    return list(map(str.strip, output.strip().split('\\n')))", "docstring": "Similar to qvoronoi command in command-line qhull.\n\nArgs:\noption:\nAn options string. Up to two options separated by spaces\nare supported. See Qhull's qhalf help for info. Typically\nused options are:\nFp\nhalfspaces:\nList of Halfspaces as input.\ninterior_point:\nAn interior point (see qhalf documentation)\n\nReturns:\nOutput as a list of strings.\nE.g., ['3', '4', '     1      1         0 ', '     1     -1      2 ',\n'    -1      1      2 ', '     1      1      2 ']", "source": "codesearchnet"}
{"code": "def call_rpc(self, address, rpc_id, payload=b\"\"):\n        \n\n        if rpc_id < 0 or rpc_id > 0xFFFF:\n            raise RPCInvalidIDError(\"Invalid RPC ID: {}\".format(rpc_id))\n\n        if address not in self._rpc_overlays and address not in self._tiles:\n            raise TileNotFoundError(\"Unknown tile address, no registered handler\", address=address)\n\n        overlay = self._rpc_overlays.get(address, None)\n        tile = self._tiles.get(address, None)\n        if overlay is not None and overlay.has_rpc(rpc_id):\n            return overlay.call_rpc(rpc_id, payload)\n        elif tile is not None and tile.has_rpc(rpc_id):\n            return tile.call_rpc(rpc_id, payload)\n\n        raise RPCNotFoundError(\"Could not find RPC 0x%X at address %d\" % (rpc_id, address))", "docstring": "Call an RPC by its address and ID.\n\nArgs:\naddress (int): The address of the mock tile this RPC is for\nrpc_id (int): The number of the RPC\npayload (bytes): A byte string of payload parameters up to 20 bytes\n\nReturns:\nbytes: The response payload from the RPC", "source": "juraj-google-style"}
{"code": "def __init__(self, filename=None):\n        \n        assert isinstance(filename, str) or filename is None\n\n        \n        self._parser = self._setup_parser(filename)\n\n        \n        self._warn_on_old_config()\n\n        \n        self._engine = self._parse_engine()\n\n        \n        self._path = self._parse_path()\n\n        \n        self._directory = self._parse_directory()\n\n        \n        self._apps_to_ignore = self._parse_apps_to_ignore()\n\n        \n        self._apps_to_sync = self._parse_apps_to_sync()", "docstring": "Create a Config instance.\n\nArgs:\nfilename (str): Optional filename of the config file. If empty,\ndefaults to MACKUP_CONFIG_FILE", "source": "juraj-google-style"}
{"code": "def create_event_model(event):\n    if event['type'].startswith('task'):\n        factory = {JobEventName.Started: JobStartedEvent, JobEventName.Succeeded: JobSucceededEvent, JobEventName.Stopped: JobStoppedEvent, JobEventName.Aborted: JobAbortedEvent}\n        if (event['type'] in factory):\n            return factory[event['type']].from_event(event)\n        else:\n            raise JobEventTypeUnsupported('Unsupported event type {}'.format(event['type']))\n    elif event['type'].startswith('worker'):\n        raise WorkerEventTypeUnsupported('Unsupported event type {}'.format(event['type']))\n    else:\n        raise EventTypeUnknown('Unknown event type {}'.format(event['type']))", "docstring": "Factory function that turns a celery event into an event object.\n\nArgs:\nevent (dict): A dictionary that represents a celery event.\n\nReturns:\nobject: An event object representing the received event.\n\nRaises:\nJobEventTypeUnsupported: If an unsupported celery job event was received.\nWorkerEventTypeUnsupported: If an unsupported celery worker event was received.\nEventTypeUnknown: If an unknown event type (neither job nor worker) was received.", "source": "codesearchnet"}
{"code": "def load(self, filename, bs=512):\n        \n        with open(filename, 'rb') as f:\n            f.seek(GPT_HEADER_OFFSET + 0x0C)\n            header_size = struct.unpack(\"<I\", f.read(4))[0]\n            f.seek(GPT_HEADER_OFFSET)\n\n            header_data = f.read(header_size)\n            self.header = GPT_HEADER(header_data)\n\n            if (self.header.signature != GPT_SIGNATURE):\n                raise Exception(\"Invalid GPT signature\")\n\n            self.__load_partition_entries(f, bs)", "docstring": "Loads GPT partition table.\n\nArgs:\nfilename (str): path to file or device to open for reading\nbs (uint): Block size of the volume, default: 512\n\nRaises:\nIOError: If file does not exist or not readable", "source": "juraj-google-style"}
{"code": "def _GetTypeFromScope(self, package, type_name, scope):\n    \n    if type_name not in scope:\n      components = _PrefixWithDot(package).split('.')\n      while components:\n        possible_match = '.'.join(components + [type_name])\n        if possible_match in scope:\n          type_name = possible_match\n          break\n        else:\n          components.pop(-1)\n    return scope[type_name]", "docstring": "Finds a given type name in the current scope.\n\nArgs:\npackage: The package the proto should be located in.\ntype_name: The name of the type to be found in the scope.\nscope: Dict mapping short and full symbols to message and enum types.\n\nReturns:\nThe descriptor for the requested type.", "source": "juraj-google-style"}
{"code": "def wulff_gform_and_r(self, wulffshape, bulk_entry, r, from_sphere_area=False, r_units='nanometers', e_units='keV', normalize=False, scale_per_atom=False):\n    miller_se_dict = wulffshape.miller_energy_dict\n    new_wulff = self.scaled_wulff(wulffshape, r)\n    new_wulff_area = new_wulff.miller_area_dict\n    if (not from_sphere_area):\n        w_vol = new_wulff.volume\n        tot_wulff_se = 0\n        for hkl in new_wulff_area.keys():\n            tot_wulff_se += (miller_se_dict[hkl] * new_wulff_area[hkl])\n        Ebulk = (self.bulk_gform(bulk_entry) * w_vol)\n        new_r = new_wulff.effective_radius\n    else:\n        w_vol = (((4 / 3) * np.pi) * (r ** 3))\n        sphere_sa = ((4 * np.pi) * (r ** 2))\n        tot_wulff_se = (wulffshape.weighted_surface_energy * sphere_sa)\n        Ebulk = (self.bulk_gform(bulk_entry) * w_vol)\n        new_r = r\n    new_r = ((new_r / 10) if (r_units == 'nanometers') else new_r)\n    e = (Ebulk + tot_wulff_se)\n    e = ((e / 1000) if (e_units == 'keV') else e)\n    e = ((e / (((4 / 3) * np.pi) * (new_r ** 3))) if normalize else e)\n    bulk_struct = bulk_entry.structure\n    density = (len(bulk_struct) / bulk_struct.lattice.volume)\n    e = ((e / (density * w_vol)) if scale_per_atom else e)\n    return (e, new_r)", "docstring": "Calculates the formation energy of the particle with arbitrary radius r.\n\nArgs:\nwulffshape (WulffShape): Initial, unscaled WulffShape\nbulk_entry (ComputedStructureEntry): Entry of the corresponding bulk.\nr (float (Ang)): Arbitrary effective radius of the WulffShape\nfrom_sphere_area (bool): There are two ways to calculate the bulk\nformation energy. Either by treating the volume and thus surface\narea of the particle as a perfect sphere, or as a Wulff shape.\nr_units (str): Can be nanometers or Angstrom\ne_units (str): Can be keV or eV\nnormalize (bool): Whether or not to normalize energy by volume\nscale_per_atom (True): Whether or not to normalize by number of\natoms in the particle\n\nReturns:\nparticle formation energy (float in keV), effective radius", "source": "codesearchnet"}
{"code": "def lookup(self, keys, name=None):\n    if keys.dtype.base_dtype != self._key_dtype:\n        raise TypeError(f'Dtype of argument `keys` must be {self._key_dtype}, received: {keys.dtype}')\n    values = keys\n    if isinstance(keys, (sparse_tensor.SparseTensor, internal.RaggedTensor)):\n        values = keys.values\n    if self._table and self._table.key_dtype.base_dtype == dtypes.int64:\n        values = math_ops.cast(values, dtypes.int64)\n    with ops.name_scope(name, '%s_Lookup' % self.name):\n        buckets = string_ops.string_to_hash_bucket_fast(_as_string(values), num_buckets=self._num_oov_buckets, name='hash_bucket')\n        if self._table:\n            ids = self._table.lookup(values)\n            buckets = math_ops.add(buckets, self._table.size())\n            is_id_non_default = math_ops.not_equal(ids, self._table.default_value)\n            ids = array_ops.where_v2(is_id_non_default, ids, buckets)\n        else:\n            ids = buckets\n    if isinstance(keys, sparse_tensor.SparseTensor):\n        return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape)\n    elif isinstance(keys, internal.RaggedTensor):\n        return keys.with_values(ids)\n    return ids", "docstring": "Looks up `keys` in the table, outputs the corresponding values.\n\nIt assigns out-of-vocabulary keys to buckets based in their hashes.\n\nArgs:\nkeys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.\nname: Optional name for the op.\n\nReturns:\nA `SparseTensor` if keys are sparse, a `RaggedTensor` if keys are ragged,\notherwise a dense `Tensor`.\n\nRaises:\nTypeError: when `keys` doesn't match the table key data type.", "source": "github-repos"}
{"code": "def resolve(self, strict=None):\n        \n        if sys.version_info >= (3, 6) or pathlib2:\n            if strict is None:\n                strict = False\n        else:\n            if strict is not None:\n                raise TypeError(\n                    \"resolve() got an unexpected keyword argument 'strict'\")\n            strict = True\n        if self._closed:\n            self._raise_closed()\n        path = self._flavour.resolve(self, strict=strict)\n        if path is None:\n            self.stat()\n            path = str(self.absolute())\n        path = self.filesystem.absnormpath(path)\n        return FakePath(path)", "docstring": "Make the path absolute, resolving all symlinks on the way and also\nnormalizing it (for example turning slashes into backslashes\nunder Windows).\n\nArgs:\nstrict: If False (default) no exception is raised if the path\ndoes not exist.\nNew in Python 3.6.\n\nRaises:\nIOError: if the path doesn't exist (strict=True or Python < 3.6)", "source": "juraj-google-style"}
{"code": "def get_filename(self, **kwargs):\n    if (self.filename_parser is None):\n        raise RuntimeError('No filename pattern or specific filename provided')\n    output_filename = self.filename_parser.compose(kwargs)\n    dirname = os.path.dirname(output_filename)\n    if (dirname and (not os.path.isdir(dirname))):\n        LOG.info('Creating output directory: {}'.format(dirname))\n        os.makedirs(dirname)\n    return output_filename", "docstring": "Create a filename where output data will be saved.\n\nArgs:\nkwargs (dict): Attributes and other metadata to use for formatting\nthe previously provided `filename`.", "source": "codesearchnet"}
{"code": "def create_summary_metadata(display_name, description, num_thresholds):\n  \n  pr_curve_plugin_data = plugin_data_pb2.PrCurvePluginData(\n      version=PROTO_VERSION, num_thresholds=num_thresholds)\n  content = pr_curve_plugin_data.SerializeToString()\n  return summary_pb2.SummaryMetadata(\n      display_name=display_name,\n      summary_description=description,\n      plugin_data=summary_pb2.SummaryMetadata.PluginData(\n          plugin_name=PLUGIN_NAME,\n          content=content))", "docstring": "Create a `summary_pb2.SummaryMetadata` proto for pr_curves plugin data.\n\nArguments:\ndisplay_name: The display name used in TensorBoard.\ndescription: The description to show in TensorBoard.\nnum_thresholds: The number of thresholds to use for PR curves.\n\nReturns:\nA `summary_pb2.SummaryMetadata` protobuf object.", "source": "juraj-google-style"}
{"code": "def __strict(self):\n\n    def conc(a, b):\n        return (a + b)\n    b = np.array(reduce(conc, [[i.top.z, i.base.z] for i in self]))\n    return all((np.diff(b) >= 0))", "docstring": "Private method. Checks if striplog is monotonically increasing in\ndepth.\n\nReturns:\nBool.", "source": "codesearchnet"}
{"code": "def dict_values(src):\n    for v in src.values():\n        if isinstance(v, dict):\n            for v in dict_values(v):\n                (yield v)\n        else:\n            (yield v)", "docstring": "Recursively get values in dict.\n\nUnlike the builtin dict.values() function, this method will descend into\nnested dicts, returning all nested values.\n\nArguments:\nsrc (dict): Source dict.\n\nReturns:\nlist: List of values.", "source": "codesearchnet"}
{"code": "def __init__(self, message='Hello!'):\n    self.message = message", "docstring": "Constructor of the test class.\n\nConstructs a new ClassWithDocstring object.\n\nArgs:\nmessage: The default message to print.", "source": "github-repos"}
{"code": "def get_channel_id(turn_context: TurnContext) -> str:\n        \n\n        if turn_context.activity.channel_id is None:\n            return \"\"\n        else:\n            return turn_context.activity.channel_id", "docstring": "Get the Channel Id from the current Activity on the Turn Context.\n\nArgs:\nturn_context (TurnContext): The Turn Context to retrieve the Activity's Channel Id from.\n\nReturns:\nstr: The Channel Id from the Turn Context's Activity.", "source": "juraj-google-style"}
{"code": "def get_function_descriptor_list(self):\n    descriptor_list = []\n    if self.is_for_driver_task:\n        return descriptor_list\n    else:\n        descriptor_list.append(self.module_name.encode('ascii'))\n        descriptor_list.append(self.class_name.encode('ascii'))\n        descriptor_list.append(self.function_name.encode('ascii'))\n        if (len(self._function_source_hash) != 0):\n            descriptor_list.append(self._function_source_hash)\n        return descriptor_list", "docstring": "Return a list of bytes representing the function descriptor.\n\nThis function is used to pass this function descriptor to backend.\n\nReturns:\nA list of bytes.", "source": "codesearchnet"}
{"code": "def __init__(self, root=None, **kwargs):\n    super().__init__()\n    global _END_TIME_OF_LAST_WRITE\n    with _END_TIME_OF_LAST_WRITE_LOCK:\n        if _END_TIME_OF_LAST_WRITE is None:\n            _END_TIME_OF_LAST_WRITE = time.time()\n    self._root = root\n    self._kwargs = kwargs\n    self._delete_tracking('_kwargs')\n    self._async_checkpointer_impl = None\n    self._checkpoint_options = None\n    attached_dependencies = None\n    self._save_counter = None\n    self._save_assign_op = None\n    if root:\n        trackable_root = root() if isinstance(root, weakref.ref) else root\n        _assert_trackable(trackable_root, 'root')\n        attached_dependencies = []\n        kwargs['root'] = root\n        trackable_root._maybe_initialize_trackable()\n        self._save_counter = data_structures.NoDependency(trackable_root._lookup_dependency('save_counter'))\n    for k, v in sorted(kwargs.items(), key=lambda item: item[0]):\n        setattr(self, k, v)\n        converted_v = getattr(self, k)\n        if isinstance(converted_v, weakref.ref):\n            converted_v = converted_v()\n        _assert_trackable(converted_v, k)\n        if root:\n            child = trackable_root._lookup_dependency(k)\n            if child is None:\n                attached_dependencies.append(base.WeakTrackableReference(k, converted_v))\n            elif child != converted_v:\n                raise ValueError(f'Cannot create a Checkpoint with keyword argument {k} if root.{k} already exists.')\n    self._saver = TrackableSaver(graph_view_lib.ObjectGraphView(root if root else self, attached_dependencies=attached_dependencies))\n    self._attached_dependencies = data_structures.NoDependency(attached_dependencies)", "docstring": "Creates a training checkpoint for a single or group of objects.\n\nArgs:\nroot: The root object to checkpoint. `root` may be a trackable object or\n`WeakRef` of a trackable object.\n**kwargs: Keyword arguments are set as attributes of this object, and are\nsaved with the checkpoint. All `kwargs` must be trackable objects, or a\nnested structure of trackable objects (`list`, `dict`, or `tuple`).\n\nRaises:\nValueError: If `root` or the objects in `kwargs` are not trackable. A\n`ValueError` is also raised if the `root` object tracks different\nobjects from the ones listed in attributes in kwargs (e.g.\n`root.child = A` and `tf.train.Checkpoint(root, child=B)` are\nincompatible).", "source": "github-repos"}
{"code": "def swf2png(swf_path, png_path, swfrender_path='swfrender'):\n    try:\n        cmd = [swfrender_path, swf_path, '-o', png_path]\n        subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n    except subprocess.CalledProcessError as e:\n        raise ConversionError(('Failed to convert SWF file %s.\\n\\tCommand: %s\\n\\tExit status: %s.\\n\\tOutput:\\n%s' % (swf_path, ' '.join(cmd), e.returncode, e.output)))", "docstring": "Convert SWF slides into a PNG image\n\nRaises:\nOSError is raised if swfrender is not available.\nConversionError is raised if image cannot be created.", "source": "codesearchnet"}
{"code": "def __init__(self, _args):\n        \n        super(TcExValidate, self).__init__(_args)\n\n        \n        self._app_packages = []\n        self._install_json_schema = None\n        self._layout_json_schema = None\n        self.config = {}\n\n        if 'pkg_resources' in sys.modules:\n            \n            self.install_json_schema_file = pkg_resources.resource_filename(\n                __name__, '/'.join(['schema', 'install-json-schema.json'])\n            )\n            self.layout_json_schema_file = pkg_resources.resource_filename(\n                __name__, '/'.join(['schema', 'layout-json-schema.json'])\n            )\n        else:\n            self.install_json_schema_file = None\n            self.layout_json_schema_file = None\n        self.validation_data = self._validation_data", "docstring": "Init Class properties.\n\nArgs:\n_args (namespace): The argparser args Namespace.", "source": "juraj-google-style"}
{"code": "def parse_raw_fact(raw_fact):\n    \n    def at_split(string):\n        \n        result = string.split('@', 1)\n        length = len(result)\n        if length == 1:\n            front, back = result[0].strip(), None\n        else:\n            front, back = result\n            front, back = front.strip(), back.strip()\n        return (front, back)\n\n    def comma_split(string):\n        \n\n        result = string.split(',', 1)\n        length = len(result)\n        if length == 1:\n            category, description = result[0].strip(), None\n        else:\n            category, description = tuple(result)\n            category, description = category.strip(), description.strip()\n        return (category.strip(), description)\n\n    time_info, rest = time_helpers.extract_time_info(raw_fact)\n    activity_name, back = at_split(rest)\n\n    if back:\n        category_name, description = comma_split(back)\n    else:\n        category_name, description = None, None\n\n    return {\n        'timeinfo': time_info,\n        'category': category_name,\n        'activity': activity_name,\n        'description': description,\n    }", "docstring": "Extract semantically meaningful sub-components from a ``raw fact`` text.\n\nArgs:\nraw_fact (text_type): ``raw fact`` text to be parsed.\n\nReturns:\ndict: dict with sub-components as values.", "source": "juraj-google-style"}
{"code": "def _parse_ports(port_values: dict) -> dict:\n        \n        \n        endpoints = {}\n\n        for port_element in port_values:\n            target_port = port_element.split(':')\n            for port in target_port:\n                endpoints[int(port)] = int(port)\n\n        \n        endpoint_spec = docker.types.EndpointSpec(ports=endpoints)\n        return endpoint_spec", "docstring": "Parse ports key.\n\nArgs:\nport_values (dict): ports configuration values\n\nReturns:\ndict, Ports specification which contains exposed ports", "source": "juraj-google-style"}
{"code": "def load_default(self):\n        \n        path = ctypes_util.find_library(self._sdk)\n        if path is None:\n            \n            \n            \n            if self._windows or self._cygwin:\n                path = next(self.find_library_windows(), None)\n            elif sys.platform.startswith('linux'):\n                path = next(self.find_library_linux(), None)\n            elif sys.platform.startswith('darwin'):\n                path = next(self.find_library_darwin(), None)\n\n        if path is not None:\n            return self.load(path)\n\n        return False", "docstring": "Loads the default J-Link SDK DLL.\n\nThe default J-Link SDK is determined by first checking if ``ctypes``\ncan find the DLL, then by searching the platform-specific paths.\n\nArgs:\nself (Library): the ``Library`` instance\n\nReturns:\n``True`` if the DLL was loaded, otherwise ``False``.", "source": "juraj-google-style"}
{"code": "def onkeydown(self, key, keycode, ctrl, shift, alt):\n        \n        return (key, keycode, ctrl, shift, alt)", "docstring": "Called when user types and releases a key.\nThe widget should be able to receive the focus in order to emit the event.\nAssign a 'tabindex' attribute to make it focusable.\n\nArgs:\nkey (str): the character value\nkeycode (str): the numeric char code", "source": "juraj-google-style"}
{"code": "def detect_deprecated_references_in_node(self, node):\n    results = []\n    if node.expression:\n        results += self.detect_deprecation_in_expression(node.expression)\n    for dep_node in self.DEPRECATED_NODE_TYPES:\n        if (node.type == dep_node[0]):\n            results.append(dep_node)\n    return results", "docstring": "Detects if a node makes use of any deprecated standards.\n\nReturns:\nlist of tuple: (detecting_signature, original_text, recommended_text)", "source": "codesearchnet"}
{"code": "def load(self, filething):\n        \n\n        fileobj = filething.fileobj\n\n        self.info = ASFInfo()\n        self.tags = ASFTags()\n\n        self._tags = {}\n        self._header = HeaderObject.parse_full(self, fileobj)\n\n        for guid in [ContentDescriptionObject.GUID,\n                     ExtendedContentDescriptionObject.GUID,\n                     MetadataObject.GUID,\n                     MetadataLibraryObject.GUID]:\n            self.tags.extend(self._tags.pop(guid, []))\n\n        assert not self._tags", "docstring": "load(filething)\n\nArgs:\nfilething (filething)\nRaises:\nmutagen.MutagenError", "source": "juraj-google-style"}
{"code": "def collect_filters_to_first_location_occurrence(compound_match_query):\n    new_match_queries = []\n    for match_query in compound_match_query.match_queries:\n        location_to_filters = _construct_location_to_filter_list(match_query)\n        already_filtered_locations = set()\n        new_match_traversals = []\n        for match_traversal in match_query.match_traversals:\n            result = _apply_filters_to_first_location_occurrence(match_traversal, location_to_filters, already_filtered_locations)\n            (new_match_traversal, newly_filtered_locations) = result\n            new_match_traversals.append(new_match_traversal)\n            already_filtered_locations.update(newly_filtered_locations)\n        new_match_queries.append(MatchQuery(match_traversals=new_match_traversals, folds=match_query.folds, output_block=match_query.output_block, where_block=match_query.where_block))\n    return CompoundMatchQuery(match_queries=new_match_queries)", "docstring": "Collect all filters for a particular location to the first instance of the location.\n\nAdding edge field non-exsistence filters in `_prune_traverse_using_omitted_locations` may\nresult in filters being applied to locations after their first occurence.\nOrientDB does not resolve this behavior correctly. Therefore, for each MatchQuery,\nwe collect all the filters for each location in a list. For each location,\nwe make a conjunction of the filter list (`_predicate_list_to_where_block`) and apply\nthe new filter to only the first instance of that location.\nAll other instances will have no filters (None).\n\nArgs:\ncompound_match_query: CompoundMatchQuery object containing 2^n MatchQuery objects\n\nReturns:\nCompoundMatchQuery with all filters for each location applied to the first instance\nof that location.", "source": "codesearchnet"}
{"code": "def halo_exchange(x, blocks_dim, block_size_dim, halo_size, wrap=False):\n    if (halo_size == 0):\n        return x\n    block_size = block_size_dim.size\n    partial_size = (halo_size % block_size)\n    num_complete_blocks = (halo_size \n    parts = [x]\n    for i in xrange(1, (num_complete_blocks + 1)):\n        parts = (([shift(x, i, blocks_dim, wrap)] + parts) + [shift(x, (- i), blocks_dim, wrap)])\n    if (partial_size > 0):\n        left_margin = mtf_slice(x, 0, partial_size, block_size_dim.name)\n        right_margin = mtf_slice(x, (block_size_dim.size - partial_size), partial_size, block_size_dim.name)\n        parts = (([shift(right_margin, (num_complete_blocks + 1), blocks_dim, wrap)] + parts) + [shift(left_margin, (- (num_complete_blocks + 1)), blocks_dim, wrap)])\n    return concat(parts, block_size_dim.name)", "docstring": "Concat each block with the margins of adjacent blocks.\n\nGet left and right blocks_dim and concatenate along block_size_dim.\n\nArgs:\nx: a Tensor.\nblocks_dim: a Dimension in x.shape\nblock_size_dim: a Dimension in x.shape\nhalo_size: an integer\nwrap: a boolean\n\nReturns:\na Tensor with the same shape as x, other than in block_size_dim, whose\nsize is increased by 2*halo_size.", "source": "codesearchnet"}
{"code": "def _extract_all_responses(self, resources, api_endpoint, api_name):\n        \n        all_responses, resources = self._bulk_cache_lookup(api_name, resources)\n        resource_chunks = self._prepare_resource_chunks(resources)\n        response_chunks = self._request_reports(\"resource\", resource_chunks, api_endpoint)\n        self._extract_response_chunks(all_responses, response_chunks, api_name)\n\n        return all_responses", "docstring": "Aux function to extract all the API endpoint responses.\n\nArgs:\nresources: list of string hashes.\napi_endpoint: endpoint path\napi_name: endpoint name\nReturns:\nA dict with the hash as key and the VT report as value.", "source": "juraj-google-style"}
{"code": "def get_source_event_declaration(self, event):\n        \n        return next((x.source_mapping for x in self.events if x.name == event))", "docstring": "Return the source mapping where the event is declared\n\nArgs:\nevent (str): event name\nReturns:\n(dict): sourceMapping", "source": "juraj-google-style"}
{"code": "def __init__(self, cell):\n    self._cell = cell", "docstring": "Creates a new IntGaugeCell.\n\nArgs:\ncell: A c pointer of TFE_MonitoringIntGaugeCell.", "source": "github-repos"}
{"code": "def object(self, key):\n    \n    return _object.Object(self._name, key, context=self._context)", "docstring": "Retrieves a Storage Object for the specified key in this bucket.\n\nThe object need not exist.\n\nArgs:\nkey: the key of the object within the bucket.\nReturns:\nAn Object instance representing the specified key.", "source": "juraj-google-style"}
{"code": "def _indexOfEndTag(istack):\n    if (len(istack) <= 0):\n        return 0\n    if (not istack[0].isOpeningTag()):\n        return 0\n    cnt = 0\n    opener = istack[0]\n    for (index, el) in enumerate(istack[1:]):\n        if (el.isOpeningTag() and (el.getTagName().lower() == opener.getTagName().lower())):\n            cnt += 1\n        elif el.isEndTagTo(opener):\n            if (cnt == 0):\n                return (index + 1)\n            cnt -= 1\n    return 0", "docstring": "Go through `istack` and search endtag. Element at first index is considered\nas opening tag.\n\nArgs:\nistack (list): List of :class:`.HTMLElement` objects.\n\nReturns:\nint: Index of end tag or 0 if not found.", "source": "codesearchnet"}
{"code": "def create_ref(profile, ref, sha):\n    resource = '/refs'\n    payload = {'ref': ('refs/' + ref), 'sha': sha}\n    data = api.post_request(profile, resource, payload)\n    return prepare(data)", "docstring": "Create a ref.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nref\nThe ref to create, e.g., ``heads/my-feature-branch``.\n\nsha\nThe SHA of the commit to point the ref to.\n\nReturns\nA dict with data about the ref.", "source": "codesearchnet"}
{"code": "def usufyToOdsExport(d, fPath):\n    from pyexcel_ods import get_data\n    try:\n        oldData = {'OSRFramework': get_data(fPath)}\n    except:\n        oldData = {'OSRFramework': []}\n    tabularData = _generateTabularData(d, oldData)\n    from pyexcel_ods import save_data\n    save_data(fPath, tabularData)", "docstring": "Workaround to export to a .ods file.\n\nArgs:\n-----\nd: Data to export.\nfPath: File path for the output file.", "source": "codesearchnet"}
{"code": "def coerce(self, value):\n        \n        if isinstance(value, bool):\n\n            return value\n\n        if not hasattr(value, 'lower'):\n\n            raise TypeError('Value is not bool or string.')\n\n        if value.lower() in ('yes', 'true', '1'):\n\n            return True\n\n        if value.lower() in ('no', 'false', '0'):\n\n            return False\n\n        raise ValueError('Could not coerce {0} to a bool.'.format(value))", "docstring": "Convert text values into boolean values.\n\nTrue values are (case insensitive): 'yes', 'true', '1'. False values\nare (case insensitive): 'no', 'false', '0'.\n\nArgs:\nvalue (str or bool): The value to coerce.\n\nRaises:\nTypeError: If the value is not a bool or string.\nValueError: If the value is not bool or an acceptable value.\n\nReturns:\nbool: The True/False value represented.", "source": "juraj-google-style"}
{"code": "def _get_call_rng(self, training):\n    if training:\n        return self.seed_generator.next()\n    else:\n        return None", "docstring": "Returns a JAX `PRNGKey` or structure of `PRNGKey`s to pass to `call_fn`.\n\nBy default, this returns a single `PRNGKey` retrieved by calling\n`self.seed_generator.next()` when `training` is `True`, and `None` when\n`training` is `False`. Override this to return a different structure or\nto pass RNGs in inference mode too.\n\nReturns:\na JAX `PRNGKey` or structure of `PRNGKey`s that will be passed as\nthe `rng` argument of `call_fn`.", "source": "github-repos"}
{"code": "def get_all(cls):\n    issues = db.Issue.find((Issue.issue_type_id == IssueType.get(cls.issue_type).issue_type_id))\n    return {res.issue_id: cls(res) for res in issues}", "docstring": "Returns a list of all issues of a given type\n\nReturns:\nlist of issue objects", "source": "codesearchnet"}
{"code": "def remove_object_from_list(self, obj, list_element):\n    list_element = self._handle_location(list_element)\n    if isinstance(obj, JSSObject):\n        results = [item for item in list_element.getchildren() if (item.findtext('id') == obj.id)]\n    elif isinstance(obj, (int, basestring)):\n        results = [item for item in list_element.getchildren() if ((item.findtext('id') == str(obj)) or (item.findtext('name') == obj))]\n    if (len(results) == 1):\n        list_element.remove(results[0])\n    elif (len(results) > 1):\n        raise ValueError('There is more than one matching object at that path!')", "docstring": "Remove an object from a list element.\n\nArgs:\nobj: Accepts JSSObjects, id's, and names\nlist_element: Accepts an Element or a string path to that\nelement", "source": "codesearchnet"}
{"code": "def laplacian_pyramid_image(shape, n_levels=4, sd=None):\n    batch_dims = shape[:(- 3)]\n    (w, h, ch) = shape[(- 3):]\n    pyramid = 0\n    for n in range(n_levels):\n        k = (2 ** n)\n        pyramid += lowres_tensor(shape, (batch_dims + ((w \n    return pyramid", "docstring": "Simple laplacian pyramid paramaterization of an image.\n\nFor more flexibility, use a sum of lowres_tensor()s.\n\nArgs:\nshape: shape of resulting image, [batch, width, height, channels].\nn_levels: number of levels of laplacian pyarmid.\nsd: standard deviation of param initialization.\n\nReturns:\ntensor with shape from first argument.", "source": "codesearchnet"}
{"code": "def get_pose_error(target_pose, current_pose):\n    error = np.zeros(6)\n    target_pos = target_pose[(:3, 3)]\n    current_pos = current_pose[(:3, 3)]\n    pos_err = (target_pos - current_pos)\n    r1 = current_pose[(:3, 0)]\n    r2 = current_pose[(:3, 1)]\n    r3 = current_pose[(:3, 2)]\n    r1d = target_pose[(:3, 0)]\n    r2d = target_pose[(:3, 1)]\n    r3d = target_pose[(:3, 2)]\n    rot_err = (0.5 * ((np.cross(r1, r1d) + np.cross(r2, r2d)) + np.cross(r3, r3d)))\n    error[:3] = pos_err\n    error[3:] = rot_err\n    return error", "docstring": "Computes the error corresponding to target pose - current pose as a 6-dim vector.\nThe first 3 components correspond to translational error while the last 3 components\ncorrespond to the rotational error.\n\nArgs:\ntarget_pose: a 4x4 homogenous matrix for the target pose\ncurrent_pose: a 4x4 homogenous matrix for the current pose\n\nReturns:\nA 6-dim numpy array for the pose error.", "source": "codesearchnet"}
{"code": "def get_params(width, height, distortion_scale):\n        \n        half_height = int(height / 2)\n        half_width = int(width / 2)\n        topleft = (random.randint(0, int(distortion_scale * half_width)),\n                   random.randint(0, int(distortion_scale * half_height)))\n        topright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1),\n                    random.randint(0, int(distortion_scale * half_height)))\n        botright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1),\n                    random.randint(height - int(distortion_scale * half_height) - 1, height - 1))\n        botleft = (random.randint(0, int(distortion_scale * half_width)),\n                   random.randint(height - int(distortion_scale * half_height) - 1, height - 1))\n        startpoints = [(0, 0), (width - 1, 0), (width - 1, height - 1), (0, height - 1)]\n        endpoints = [topleft, topright, botright, botleft]\n        return startpoints, endpoints", "docstring": "Get parameters for ``perspective`` for a random perspective transform.\n\nArgs:\nwidth : width of the image.\nheight : height of the image.\n\nReturns:\nList containing [top-left, top-right, bottom-right, bottom-left] of the orignal image,\nList containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.", "source": "juraj-google-style"}
{"code": "def create_branch_and_checkout(self, branch_name: str):\n        \n        self.create_branch(branch_name)\n        self.checkout(branch_name)", "docstring": "Creates a new branch if it doesn't exist\n\nArgs:\nbranch_name: branch name", "source": "juraj-google-style"}
{"code": "def DeserializeExclusiveData(self, reader):\n        \n        if self.Version > 1:\n            raise Exception('Invalid format')\n\n        self.Script = reader.ReadVarBytes()\n\n        if len(self.Script) == 0:\n            raise Exception('Invalid Format')\n\n        if self.Version >= 1:\n            self.Gas = reader.ReadFixed8()\n            if self.Gas < Fixed8.Zero():\n                raise Exception(\"Invalid Format\")\n        else:\n            self.Gas = Fixed8(0)", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):\n\nRaises:\nException: If the version read is incorrect.", "source": "juraj-google-style"}
{"code": "def ensure_value_to_cell(value):\n\n    def dummy_fn():\n        value\n    cell_value = dummy_fn.__closure__[0]\n    if not isinstance(value, type(cell_value)):\n        return cell_value\n    return value", "docstring": "Ensures that a value is converted to a python cell object.\n\nArgs:\nvalue: Any value that needs to be casted to the cell type\n\nReturns:\nA value wrapped as a cell object (see function \"func_load\")", "source": "github-repos"}
{"code": "def recipe_cm360_segmentology(config, account, auth_read, auth_write, recipe_name, date_range, recipe_slug, advertisers):\n    dataset(config, {'description': 'Create a dataset for bigquery tables.', 'hour': [4], 'auth': auth_write, 'dataset': recipe_slug})\n    bigquery(config, {'auth': auth_write, 'function': 'Pearson Significance Test', 'to': {'dataset': recipe_slug}})\n    google_api(config, {'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'accounts.get', 'kwargs': {'id': account, 'fields': 'id,name'}, 'results': {'bigquery': {'auth': auth_write, 'dataset': recipe_slug, 'table': 'CM360_Account'}}})\n    dcm(config, {'auth': auth_read, 'report': {'filters': {'advertiser': {'values': advertisers}}, 'account': account, 'body': {'name': recipe_name, 'criteria': {'dateRange': {'kind': 'dfareporting\n    dcm(config, {'auth': auth_read, 'report': {'account': account, 'name': recipe_name}, 'out': {'bigquery': {'auth': auth_write, 'dataset': recipe_slug, 'table': 'CM360_KPI', 'header': True}}})\n    bigquery(config, {'auth': auth_write, 'from': {'query': 'SELECT\\n          Id AS Partner_Id,\\n          Name AS Partner,\\n          Advertiser_Id,\\n          Advertiser,\\n          Zip_Postal_Code AS Zip,\\n          SAFE_DIVIDE(Impressions, SUM(Impressions) OVER(PARTITION BY Advertiser_Id)) AS Impression,\\n          SAFE_DIVIDE(Clicks, Impressions) AS Click,\\n          SAFE_DIVIDE(Total_Conversions, Impressions) AS Conversion,\\n          Impressions AS Impressions          FROM `{dataset}.CM360_KPI`          CROSS JOIN `{dataset}.CM360_Account`        ', 'parameters': {'dataset': recipe_slug}, 'legacy': False}, 'to': {'dataset': recipe_slug, 'view': 'CM360_KPI_Normalized'}})\n    census(config, {'auth': auth_write, 'normalize': {'census_geography': 'zip_codes', 'census_year': '2018', 'census_span': '5yr'}, 'to': {'dataset': recipe_slug, 'type': 'view'}})\n    census(config, {'auth': auth_write, 'correlate': {'join': 'Zip', 'pass': ['Partner_Id', 'Partner', 'Advertiser_Id', 'Advertiser'], 'sum': ['Impressions'], 'correlate': ['Impression', 'Click', 'Conversion'], 'dataset': recipe_slug, 'table': 'CM360_KPI_Normalized', 'significance': 80}, 'to': {'dataset': recipe_slug, 'type': 'view'}})", "docstring": "CM360 funnel analysis using Census data.\n\nArgs:\naccount (string) - NA\nauth_read (authentication) - Credentials used for reading data.\nauth_write (authentication) - Authorization used for writing data.\nrecipe_name (string) - Name of report, not needed if ID used.\ndate_range (choice) - Timeframe to run report for.\nrecipe_slug (string) - Name of Google BigQuery dataset to create.\nadvertisers (integer_list) - Comma delimited list of CM360 advertiser ids.", "source": "github-repos"}
{"code": "def parseEquation(self, inp):\n    inp = MathService._preprocess(inp)\n    split = inp.split(' ')\n    for (i, w) in enumerate(split):\n        if (w in self.__unaryOperators__):\n            op = self.__unaryOperators__[w]\n            eq1 = ' '.join(split[:i])\n            eq2 = ' '.join(split[(i + 1):])\n            result = MathService._applyUnary(self.parseEquation(eq2), op)\n            return self.parseEquation(((eq1 + ' ') + str(result)))\n\n    def extractNumbersAndSymbols(inp):\n        numbers = []\n        symbols = []\n        next_number = ''\n        for w in inp.split(' '):\n            if (w in self.__binaryOperators__):\n                symbols.append(self.__binaryOperators__[w])\n                if next_number:\n                    numbers.append(next_number)\n                    next_number = ''\n            else:\n                if next_number:\n                    next_number += ' '\n                next_number += w\n        if next_number:\n            numbers.append(next_number)\n\n        def convert(n):\n            if (n in self.__constants__):\n                return self.__constants__[n]\n            converter = NumberService()\n            return converter.parse(n)\n        numbers = [convert(n) for n in numbers]\n        return (numbers, symbols)\n    (numbers, symbols) = extractNumbersAndSymbols(inp)\n    return MathService._calculate(numbers, symbols)", "docstring": "Solves the equation specified by the input string.\n\nArgs:\ninp (str): An equation, specified in words, containing some\ncombination of numbers, binary, and unary operations.\n\nReturns:\nThe floating-point result of carrying out the computation.", "source": "codesearchnet"}
{"code": "def _on_trace(self, sequence, topic, message):\n        \n\n        try:\n            conn_key = self._find_connection(topic)\n            conn_id = self.conns.get_connection_id(conn_key)\n        except ArgumentError:\n            self._logger.warn(\"Dropping trace message that does not correspond with a known connection, topic=%s\", topic)\n            return\n\n        try:\n            tracing = messages.TracingNotification.verify(message)\n            self._trigger_callback('on_trace', conn_id, tracing['trace'])\n        except Exception:\n            self._logger.exception(\"Error processing trace conn_id=%d\", conn_id)", "docstring": "Process a trace received from a device.\n\nArgs:\nsequence (int): The sequence number of the packet received\ntopic (string): The topic this message was received on\nmessage (dict): The message itself", "source": "juraj-google-style"}
{"code": "def get_variation(self, experiment_key, user_id, attributes=None):\n    if (not self.is_valid):\n        self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_variation'))\n        return None\n    if (not validator.is_non_empty_string(experiment_key)):\n        self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))\n        return None\n    if (not isinstance(user_id, string_types)):\n        self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))\n        return None\n    experiment = self.config.get_experiment_from_key(experiment_key)\n    variation_key = None\n    if (not experiment):\n        self.logger.info(('Experiment key \"%s\" is invalid. Not activating user \"%s\".' % (experiment_key, user_id)))\n        return None\n    if (not self._validate_user_inputs(attributes)):\n        return None\n    variation = self.decision_service.get_variation(experiment, user_id, attributes)\n    if variation:\n        variation_key = variation.key\n    if self.config.is_feature_experiment(experiment.id):\n        decision_notification_type = enums.DecisionNotificationTypes.FEATURE_TEST\n    else:\n        decision_notification_type = enums.DecisionNotificationTypes.AB_TEST\n    self.notification_center.send_notifications(enums.NotificationTypes.DECISION, decision_notification_type, user_id, (attributes or {}), {'experiment_key': experiment_key, 'variation_key': variation_key})\n    return variation_key", "docstring": "Gets variation where user will be bucketed.\n\nArgs:\nexperiment_key: Experiment for which user variation needs to be determined.\nuser_id: ID for user.\nattributes: Dict representing user attributes.\n\nReturns:\nVariation key representing the variation the user will be bucketed in.\nNone if user is not in experiment or if experiment is not Running.", "source": "codesearchnet"}
{"code": "def parse_cmd_line():\n    desc = 'Upload benchmark results to datastore.'\n    opts = [('-a', '--archivedir', str, None, True, 'Directory where benchmark files are archived.'), ('-d', '--datadir', str, None, True, 'Directory of benchmark files to upload.')]\n    parser = argparse.ArgumentParser(description=desc)\n    for opt in opts:\n        parser.add_argument(opt[0], opt[1], type=opt[2], default=opt[3], required=opt[4], help=opt[5])\n    return parser.parse_args()", "docstring": "Parse command line options.\n\nReturns:\nThe parsed arguments object.", "source": "github-repos"}
{"code": "def start(self, pipeline, return_task=True, countdown=None, eta=None):\n    for (name, slot) in pipeline.outputs._output_dict.iteritems():\n        slot.key = db.Key.from_path(*slot.key.to_path(), **dict(parent=pipeline._pipeline_key))\n    (_, output_slots, params_text, params_blob) = _generate_args(pipeline, pipeline.outputs, self.queue_name, self.base_path)\n\n    @db.transactional(propagation=db.INDEPENDENT)\n    def txn():\n        pipeline_record = db.get(pipeline._pipeline_key)\n        if (pipeline_record is not None):\n            raise PipelineExistsError(('Pipeline with idempotence key \"%s\" already exists; params=%s' % (pipeline._pipeline_key.name(), _short_repr(pipeline_record.params))))\n        entities_to_put = []\n        for (name, slot) in pipeline.outputs._output_dict.iteritems():\n            entities_to_put.append(_SlotRecord(key=slot.key, root_pipeline=pipeline._pipeline_key))\n        entities_to_put.append(_PipelineRecord(key=pipeline._pipeline_key, root_pipeline=pipeline._pipeline_key, is_root_pipeline=True, params=params_text, params_blob=params_blob, start_time=self._gettime(), class_path=pipeline._class_path, max_attempts=pipeline.max_attempts))\n        entities_to_put.extend(_PipelineContext._create_barrier_entities(pipeline._pipeline_key, pipeline._pipeline_key, _BarrierRecord.FINALIZE, output_slots))\n        db.put(entities_to_put)\n        task = taskqueue.Task(url=self.pipeline_handler_path, params=dict(pipeline_key=pipeline._pipeline_key), headers={'X-Ae-Pipeline-Key': pipeline._pipeline_key}, target=pipeline.target, countdown=countdown, eta=eta)\n        if return_task:\n            return task\n        task.add(queue_name=self.queue_name, transactional=True)\n    task = txn()\n    for output_slot in pipeline.outputs._output_dict.itervalues():\n        output_slot._exists = True\n    return task", "docstring": "Starts a pipeline.\n\nArgs:\npipeline: Pipeline instance to run.\nreturn_task: When True, do not submit the task to start the pipeline\nbut instead return it for someone else to enqueue.\ncountdown: Time in seconds into the future that this Task should execute.\nDefaults to zero.\neta: A datetime.datetime specifying the absolute time at which the task\nshould be executed. Must not be specified if 'countdown' is specified.\nThis may be timezone-aware or timezone-naive. If None, defaults to now.\nFor pull tasks, no worker will be able to lease this task before the\ntime indicated by eta.\n\nReturns:\nThe task to start this pipeline if return_task was True.\n\nRaises:\nPipelineExistsError if the pipeline with the given ID already exists.", "source": "codesearchnet"}
{"code": "def list_tags(self, image_name):\n        \n        \n        tags_url = self.registry_url + '/v2/{}/tags/list'\n\n        r = self.get(tags_url.format(image_name), auth=self.auth)\n        data = r.json()\n\n        if 'tags' in data:\n            return reversed(sorted(data['tags']))\n\n        return []", "docstring": "List all tags for the given image stored in the registry.\n\nArgs:\nimage_name (str):\nThe name of the image to query. The image must be present on the\nregistry for this call to return any values.\nReturns:\nlist[str]: List of tags for that image.", "source": "juraj-google-style"}
{"code": "def run(self, configurations):\n    result = CourgetteResult()\n    for configuration in configurations:\n        runner = CourgetteTestsRunner(url=self.url, username=self.username, password=self.password, enterprise=self.enterprise, version=self.apiversion, specification=configuration.specification, sdk_identifier=self.sdk_identifier, monolithe_config=self.monolithe_config, parent_resource=configuration.parent_resource_name, parent_id=configuration.parent_id, default_values=configuration.default_values)\n        result.add_report((configuration.specification.rest_name + '.spec'), runner.run())\n    return result", "docstring": "Run all tests\n\nReturns:\nA dictionnary containing tests results.", "source": "codesearchnet"}
{"code": "def validate(cls, job_config):\n    \n    reader_params = job_config.input_reader_params\n\n    \n    if cls.BUCKET_NAME_PARAM not in reader_params:\n      raise errors.BadReaderParamsError(\n          \"%s is required for Google Cloud Storage\" %\n          cls.BUCKET_NAME_PARAM)\n    try:\n      cloudstorage.validate_bucket_name(\n          reader_params[cls.BUCKET_NAME_PARAM])\n    except ValueError, error:\n      raise errors.BadReaderParamsError(\"Bad bucket name, %s\" % (error))\n\n    \n    if cls.OBJECT_NAMES_PARAM not in reader_params:\n      raise errors.BadReaderParamsError(\n          \"%s is required for Google Cloud Storage\" %\n          cls.OBJECT_NAMES_PARAM)\n    filenames = reader_params[cls.OBJECT_NAMES_PARAM]\n    if not isinstance(filenames, list):\n      raise errors.BadReaderParamsError(\n          \"Object name list is not a list but a %s\" %\n          filenames.__class__.__name__)\n    for filename in filenames:\n      if not isinstance(filename, basestring):\n        raise errors.BadReaderParamsError(\n            \"Object name is not a string but a %s\" %\n            filename.__class__.__name__)\n\n    \n    if cls.DELIMITER_PARAM in reader_params:\n      delimiter = reader_params[cls.DELIMITER_PARAM]\n      if not isinstance(delimiter, basestring):\n        raise errors.BadReaderParamsError(\n            \"%s is not a string but a %s\" %\n            (cls.DELIMITER_PARAM, type(delimiter)))\n\n    \n    if cls.BUFFER_SIZE_PARAM in reader_params:\n      buffer_size = reader_params[cls.BUFFER_SIZE_PARAM]\n      if not isinstance(buffer_size, int):\n        raise errors.BadReaderParamsError(\n            \"%s is not an int but a %s\" %\n            (cls.BUFFER_SIZE_PARAM, type(buffer_size)))\n\n    \n    if cls.PATH_FILTER_PARAM in reader_params:\n      path_filter = reader_params[cls.PATH_FILTER_PARAM]\n      if not isinstance(path_filter, PathFilter):\n        raise errors.BadReaderParamsError(\n            \"%s is not an instance of PathFilter but %s.\" %\n            (cls.PATH_FILTER_PARAM, type(path_filter)))", "docstring": "Validate mapper specification.\n\nArgs:\njob_config: map_job.JobConfig.\n\nRaises:\nBadReaderParamsError: if the specification is invalid for any reason such\nas missing the bucket name or providing an invalid bucket name.", "source": "juraj-google-style"}
{"code": "def item_from_topics(key, topics):\n    if re.match('{\\\\d+}', key):\n        pos = int(key.strip('{}'))\n        try:\n            binding = topics[pos]\n        except IndexError:\n            raise IndexError((pos + 1))\n    else:\n        echo('be.yaml template key not recognised')\n        sys.exit(PROJECT_ERROR)\n    return binding", "docstring": "Get binding from `topics` via `key`\n\nExample:\n{0} == hello --> be in hello world\n{1} == world --> be in hello world\n\nReturns:\nSingle topic matching the key\n\nRaises:\nIndexError (int): With number of required\narguments for the key", "source": "codesearchnet"}
{"code": "def permute(self, ordering: np.ndarray, axis: int) -> None:\n\t\t\n\t\tif self._file.__contains__(\"tiles\"):\n\t\t\tdel self._file['tiles']\n\n\t\tordering = list(np.array(ordering).flatten())  \n\t\tself.layers._permute(ordering, axis=axis)\n\t\tif axis == 0:\n\t\t\tself.row_attrs._permute(ordering)\n\t\t\tself.row_graphs._permute(ordering)\n\t\tif axis == 1:\n\t\t\tself.col_attrs._permute(ordering)\n\t\t\tself.col_graphs._permute(ordering)", "docstring": "Permute the dataset along the indicated axis.\n\nArgs:\nordering (list of int): \tThe desired order along the axis\n\naxis (int):\t\t\t\t\tThe axis along which to permute\n\nReturns:\nNothing.", "source": "juraj-google-style"}
{"code": "def DownloadPqlResultToCsv(self, pql_query, file_handle, values=None):\n    \n    pql_writer = csv.writer(file_handle, delimiter=',',\n                            quotechar='\"', quoting=csv.QUOTE_ALL)\n    self._PageThroughPqlSet(pql_query, pql_writer.writerow, values)", "docstring": "Downloads the results of a PQL query to CSV.\n\nArgs:\npql_query: str a statement filter to apply (the query should not include\nthe limit or the offset)\nfile_handle: file the file object to write to.\n[optional]\nvalues: A dict of python objects or a list of raw SOAP values to bind\nto the pql_query.", "source": "juraj-google-style"}
{"code": "def stop(self, timeout=None):\n    assert (self.state == STARTED), 'Process not started'\n    self.state = STOPPING\n    self._run_hook(ProcessStopHook, timeout=timeout)\n    for s in self._spawned:\n        if (not s.ready()):\n            self.log.debug('Waiting for %s *%s **%s', s._function, s._args, s._kwargs)\n        s.wait(timeout=timeout)\n    self._spawned = []\n    self._controllers = OrderedDict()\n    self._unpublished = set()\n    self.state = STOPPED\n    self.log.debug('Done process.stop()')", "docstring": "Stop the process and wait for it to finish\n\nArgs:\ntimeout (float): Maximum amount of time to wait for each spawned\nobject. None means forever", "source": "codesearchnet"}
{"code": "def scatter_update(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta)\n    return gen_state_ops.scatter_update(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)", "docstring": "Assigns `tf.IndexedSlices` to this variable.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to be assigned to this variable.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nA `Tensor` that will hold the new value of this variable after\nthe scattered assignment has completed.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"}
{"code": "def make_table(contents, headers=None):\n    if (not isinstance(contents, np.ndarray)):\n        raise ValueError('make_table contents must be a numpy ndarray')\n    if (contents.ndim not in [1, 2]):\n        raise ValueError(('make_table requires a 1d or 2d numpy array, was %dd' % contents.ndim))\n    if headers:\n        if isinstance(headers, (list, tuple)):\n            headers = np.array(headers)\n        if (not isinstance(headers, np.ndarray)):\n            raise ValueError(('Could not convert headers %s into np.ndarray' % headers))\n        if (headers.ndim != 1):\n            raise ValueError(('Headers must be 1d, is %dd' % headers.ndim))\n        expected_n_columns = (contents.shape[1] if (contents.ndim == 2) else 1)\n        if (headers.shape[0] != expected_n_columns):\n            raise ValueError(('Number of headers %d must match number of columns %d' % (headers.shape[0], expected_n_columns)))\n        header = ('<thead>\\n%s</thead>\\n' % make_table_row(headers, tag='th'))\n    else:\n        header = ''\n    n_rows = contents.shape[0]\n    if (contents.ndim == 1):\n        rows = (make_table_row([contents[i]]) for i in range(n_rows))\n    else:\n        rows = (make_table_row(contents[(i, :)]) for i in range(n_rows))\n    return ('<table>\\n%s<tbody>\\n%s</tbody>\\n</table>' % (header, ''.join(rows)))", "docstring": "Given a numpy ndarray of strings, concatenate them into a html table.\n\nArgs:\ncontents: A np.ndarray of strings. May be 1d or 2d. In the 1d case, the\ntable is laid out vertically (i.e. row-major).\nheaders: A np.ndarray or list of string header names for the table.\n\nReturns:\nA string containing all of the content strings, organized into a table.\n\nRaises:\nValueError: If contents is not a np.ndarray.\nValueError: If contents is not 1d or 2d.\nValueError: If contents is empty.\nValueError: If headers is present and not a list, tuple, or ndarray.\nValueError: If headers is not 1d.\nValueError: If number of elements in headers does not correspond to number\nof columns in contents.", "source": "codesearchnet"}
{"code": "def Write(self, output_writer):\n    \n    \n    for column_index, column_size in enumerate(self._column_sizes):\n      column_size, _ = divmod(column_size, self._NUMBER_OF_SPACES_IN_TAB)\n      column_size = (column_size + 1) * self._NUMBER_OF_SPACES_IN_TAB\n      self._column_sizes[column_index] = column_size\n\n    if self._columns:\n      self._WriteRow(output_writer, self._columns, in_bold=True)\n\n    for values in self._rows:\n      self._WriteRow(output_writer, values)", "docstring": "Writes the table to output writer.\n\nArgs:\noutput_writer (CLIOutputWriter): output writer.", "source": "juraj-google-style"}
{"code": "def Add(self, artifact=None, target=None, callback=None):\n    if (target is None):\n        target = Target()\n    os_name = (target.Get('os') or [None])\n    cpe = (target.Get('cpe') or [None])\n    label = (target.Get('label') or [None])\n    attributes = itertools.product(os_name, cpe, label)\n    new_conditions = [Condition(artifact, *attr) for attr in attributes]\n    self.conditions.update(new_conditions)\n    self._Register(new_conditions, callback)", "docstring": "Add criteria for a check.\n\nArgs:\nartifact: An artifact name.\ntarget: A tuple of artifact necessary to process the data.\ncallback: Entities that should be called if the condition matches.", "source": "codesearchnet"}
{"code": "class ImageClassifierOutputWithNoAttention(ModelOutput):\n    loss: Optional[torch.FloatTensor] = None\n    logits: Optional[torch.FloatTensor] = None\n    hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None", "docstring": "Base class for outputs of image classification models.\n\nArgs:\nloss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\nClassification (or regression if config.num_labels==1) loss.\nlogits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):\nClassification (or regression if config.num_labels==1) scores (before SoftMax).\nhidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\nTuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\none for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also\ncalled feature maps) of the model at the output of each stage.", "source": "github-repos"}
{"code": "def with_random_weights(cls, options):\n    return cls([(value, random.randint(1, len(options))) for value in options])", "docstring": "Initialize from a list of options with random weights.\n\nThe weights assigned to each object are uniformally random\nintegers between ``1`` and ``len(options)``\n\nArgs:\noptions (list): The list of options of any type this object\ncan return with the ``get()`` method.\n\nReturns:\nSoftOptions: A newly constructed instance", "source": "codesearchnet"}
{"code": "def unpack_validation_data(validation_data, raise_if_ambiguous=True):\n    if isinstance(validation_data, (iterator_ops.Iterator, iterator_ops.IteratorBase, data_types.DatasetV2, data_utils.Sequence)) or not hasattr(validation_data, '__len__'):\n        val_x = validation_data\n        val_y = None\n        val_sample_weight = None\n    elif len(validation_data) == 2:\n        try:\n            val_x, val_y = validation_data\n            val_sample_weight = None\n        except ValueError:\n            val_x, val_y, val_sample_weight = (validation_data, None, None)\n    elif len(validation_data) == 3:\n        try:\n            val_x, val_y, val_sample_weight = validation_data\n        except ValueError:\n            val_x, val_y, val_sample_weight = (validation_data, None, None)\n    else:\n        if raise_if_ambiguous:\n            raise ValueError('When passing a `validation_data` argument, it must contain either 2 items (x_val, y_val), or 3 items (x_val, y_val, val_sample_weights), or alternatively it could be a dataset or a dataset or a dataset iterator. However we received `validation_data=%s`' % validation_data)\n        val_x, val_y, val_sample_weight = (validation_data, None, None)\n    return (val_x, val_y, val_sample_weight)", "docstring": "Unpack validation data based input type.\n\nThe validation data is not touched if its dataset or dataset iterator.\nFor other type of input (Numpy or tensor), it will be unpacked into tuple of\n3 which is x, y and sample weights.\n\nArgs:\nvalidation_data: dataset, dataset iterator, or numpy, tensor tuple.\nraise_if_ambiguous: boolean on whether to fail if validation_data cannot be\nparsed. Otherwise simply return validation_data, None, None and defer the\ndecision to the caller.\n\nReturns:\ntuple of 3, (x, y, sample_weights) for numpy and tensor input.", "source": "github-repos"}
{"code": "def __init__(self, entity=None, key=None):\n    self.entity = entity\n    self.key = key\n    self._pb = FakeMessage(entity, key)", "docstring": "Fake mutation request object.\n\nRequires exactly one of entity or key to be set.\n\nArgs:\nentity: (``google.cloud.datastore.entity.Entity``) entity representing\nthis upsert mutation\nkey: (``google.cloud.datastore.key.Key``) key representing\nthis delete mutation", "source": "github-repos"}
{"code": "def __init__(self, project, error_context=None):\n        \n        self.project = project\n        self.error_context = error_context or StatikErrorContext()\n        self.supported_providers = project.config.template_providers\n        if project.safe_mode:\n            self.supported_providers = [provider for provider in self.supported_providers \\\n                                        if provider in SAFER_TEMPLATE_PROVIDERS]\n\n        if len(self.supported_providers) == 0:\n            raise NoSupportedTemplateProvidersError(\n                SAFER_TEMPLATE_PROVIDERS if project.safe_mode else DEFAULT_TEMPLATE_PROVIDERS,\n                project.safe_mode\n            )\n\n        self.provider_classes = dict()\n        self.providers_by_ext = dict()\n        self.exts = []\n        for provider in self.supported_providers:\n            self.provider_classes[provider] = get_template_provider_class(provider)\n            \n            for ext in TEMPLATE_PROVIDER_EXTS[provider]:\n                if ext not in self.providers_by_ext:\n                    self.providers_by_ext[ext] = provider\n                    self.exts.append(ext)\n\n        self.providers = dict()\n        self.cached_templates = dict()\n\n        \n        \n        self.template_paths = [os.path.join(project.path, project.TEMPLATES_DIR)]\n        \n        if project.config.theme is not None:\n            self.template_paths.append(os.path.join(\n                project.path,\n                project.THEMES_DIR,\n                project.config.theme,\n                project.TEMPLATES_DIR\n            ))\n\n        logger.debug(\n            \"Looking in the following path(s) (in the following order) for templates:\\n%s\",\n            \"\\n\".join(self.template_paths)\n        )\n\n        \n        for path in self.template_paths:\n            if not os.path.exists(path) or not os.path.isdir(path):\n                raise MissingProjectFolderError(path)\n\n        logger.debug(\n            \"Configured the following template providers: %s\",\n            \", \".join(self.supported_providers)\n        )", "docstring": "Constructor.\n\nArgs:\nproject: The project to which this template engine relates.", "source": "juraj-google-style"}
{"code": "def get_new_python_files_between_commits(base_commit: str, commits: List[str]) -> List[str]:\n    code_diff = []\n    for commit in commits:\n        for diff_obj in commit.diff(base_commit):\n            if diff_obj.change_type == 'A' and diff_obj.b_path.endswith('.py'):\n                code_diff.append(diff_obj.b_path)\n    return code_diff", "docstring": "Get the list of added python files between a base commit and one or several commits.\n\nArgs:\nrepo (`git.Repo`):\nA git repository (for instance the Transformers repo).\nbase_commit (`str`):\nThe commit reference of where to compare for the diff. This is the current commit, not the branching point!\ncommits (`List[str]`):\nThe list of commits with which to compare the repo at `base_commit` (so the branching point).\n\nReturns:\n`List[str]`: The list of python files added between a base commit and one or several commits.", "source": "github-repos"}
{"code": "def draw(canvas, mol):\n    \n    mol.require(\"ScaleAndCenter\")\n    mlb = mol.size2d[2]\n    if not mol.atom_count():\n        return\n    bond_type_fn = {\n        1: {\n            0: single_bond,\n            1: wedged_single,\n            2: dashed_wedged_single,\n            3: wave_single,\n        }, 2: {\n            0: cw_double,\n            1: counter_cw_double,\n            2: double_bond,\n            3: cross_double\n        }, 3: {\n            0: triple_bond\n        }\n    }\n    \n    for u, v, bond in mol.bonds_iter():\n        if not bond.visible:\n            continue\n        if (u < v) == bond.is_lower_first:\n            f, s = (u, v)\n        else:\n            s, f = (u, v)\n        p1 = mol.atom(f).coords\n        p2 = mol.atom(s).coords\n        if p1 == p2:\n            continue  \n        if mol.atom(f).visible:\n            p1 = gm.t_seg(p1, p2, F_AOVL, 2)[0]\n        if mol.atom(s).visible:\n            p2 = gm.t_seg(p1, p2, F_AOVL, 1)[1]\n        color1 = mol.atom(f).color\n        color2 = mol.atom(s).color\n        bond_type_fn[bond.order][bond.type](\n            canvas, p1, p2, color1, color2, mlb)\n\n    \n    for n, atom in mol.atoms_iter():\n        if not atom.visible:\n            continue\n        p = atom.coords\n        color = atom.color\n        \n        if atom.H_count:\n            cosnbrs = []\n            hrzn = (p[0] + 1, p[1])\n            for nbr in mol.graph.neighbors(n):\n                pnbr = mol.atom(nbr).coords\n                try:\n                    cosnbrs.append(gm.dot_product(hrzn, pnbr, p) /\n                                   gm.distance(p, pnbr))\n                except ZeroDivisionError:\n                    pass\n            if not cosnbrs or min(cosnbrs) > 0:\n                \n                text = atom.formula_html(True)\n                canvas.draw_text(p, text, color, \"right\")\n                continue\n            elif max(cosnbrs) < 0:\n                \n                text = atom.formula_html()\n                canvas.draw_text(p, text, color, \"left\")\n                continue\n        \n        text = atom.formula_html()\n        canvas.draw_text(p, text, color, \"center\")", "docstring": "Draw molecule structure image.\n\nArgs:\ncanvas: draw.drawable.Drawable\nmol: model.graphmol.Compound", "source": "juraj-google-style"}
{"code": "def really_unicode(in_string):\n    if isinstance(in_string, StringType):\n        for args in (('utf-8',), ('latin-1',), ('ascii', 'replace')):\n            try:\n                in_string = in_string.decode(*args)\n                break\n            except UnicodeDecodeError:\n                continue\n    if (not isinstance(in_string, UnicodeType)):\n        raise ValueError(('%s is not a string at all.' % in_string))\n    return in_string", "docstring": "Make a string unicode. Really.\n\nEnsure ``in_string`` is returned as unicode through a series of\nprogressively relaxed decodings.\n\nArgs:\nin_string (str): The string to convert.\n\nReturns:\nstr: Unicode.\n\nRaises:\nValueError", "source": "codesearchnet"}
{"code": "def _ProcessCompressedStreamTypes(self, mediator, path_spec, type_indicators):\n    \n    number_of_type_indicators = len(type_indicators)\n    if number_of_type_indicators == 0:\n      return\n\n    self.processing_status = definitions.STATUS_INDICATOR_COLLECTING\n\n    if number_of_type_indicators > 1:\n      display_name = mediator.GetDisplayName()\n      logger.debug((\n          'Found multiple format type indicators: {0:s} for '\n          'compressed stream file: {1:s}').format(\n              type_indicators, display_name))\n\n    for type_indicator in type_indicators:\n      if type_indicator == dfvfs_definitions.TYPE_INDICATOR_BZIP2:\n        compressed_stream_path_spec = path_spec_factory.Factory.NewPathSpec(\n            dfvfs_definitions.TYPE_INDICATOR_COMPRESSED_STREAM,\n            compression_method=dfvfs_definitions.COMPRESSION_METHOD_BZIP2,\n            parent=path_spec)\n\n      elif type_indicator == dfvfs_definitions.TYPE_INDICATOR_GZIP:\n        compressed_stream_path_spec = path_spec_factory.Factory.NewPathSpec(\n            dfvfs_definitions.TYPE_INDICATOR_GZIP, parent=path_spec)\n\n      else:\n        compressed_stream_path_spec = None\n\n        warning_message = (\n            'unsupported compressed stream format type indicators: '\n            '{0:s}').format(type_indicator)\n        mediator.ProduceExtractionWarning(\n            warning_message, path_spec=path_spec)\n\n      if compressed_stream_path_spec:\n        event_source = event_sources.FileEntryEventSource(\n            path_spec=compressed_stream_path_spec)\n        event_source.file_entry_type = dfvfs_definitions.FILE_ENTRY_TYPE_FILE\n        mediator.ProduceEventSource(event_source)\n\n        self.last_activity_timestamp = time.time()", "docstring": "Processes a data stream containing compressed stream types such as: bz2.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\npath_spec (dfvfs.PathSpec): path specification.\ntype_indicators(list[str]): dfVFS archive type indicators found in\nthe data stream.", "source": "juraj-google-style"}
{"code": "def get_miller_index_from_site_indexes(self, site_ids, round_dp=4, verbose=True):\n    return self.lattice.get_miller_index_from_coords(self.frac_coords[site_ids], coords_are_cartesian=False, round_dp=round_dp, verbose=verbose)", "docstring": "Get the Miller index of a plane from a set of sites indexes.\n\nA minimum of 3 sites are required. If more than 3 sites are given\nthe best plane that minimises the distance to all points will be\ncalculated.\n\nArgs:\nsite_ids (list of int): A list of site indexes to consider. A\nminimum of three site indexes are required. If more than three\nsites are provided, the best plane that minimises the distance\nto all sites will be calculated.\nround_dp (int, optional): The number of decimal places to round the\nmiller index to.\nverbose (bool, optional): Whether to print warnings.\n\nReturns:\n(tuple): The Miller index.", "source": "codesearchnet"}
{"code": "class FeatureMixerBlock(nn.Module):\n\n    def __init__(self, config: PatchTSMixerConfig):\n        super().__init__()\n        self.norm = PatchTSMixerNormLayer(config)\n        self.gated_attn = config.gated_attn\n        self.mlp = PatchTSMixerMLP(in_features=config.d_model, out_features=config.d_model, config=config)\n        if config.gated_attn:\n            self.gating_block = PatchTSMixerGatedAttention(in_size=config.d_model, out_size=config.d_model)\n\n    def forward(self, hidden: torch.Tensor):\n        \n        residual = hidden\n        hidden = self.norm(hidden)\n        hidden = self.mlp(hidden)\n        if self.gated_attn:\n            hidden = self.gating_block(hidden)\n        out = hidden + residual\n        return out", "docstring": "This module mixes the hidden feature dimension.\n\nArgs:\nconfig (`PatchTSMixerConfig`):\nConfiguration.", "source": "github-repos"}
{"code": "def get_unpartitioned_shape(self, shape):\n    shape = tensor_shape.as_shape(shape)\n    dims = shape.as_list()\n    if self._shard_dimension is None or self._number_of_partitions is None or (not dims):\n        return None\n    if dims[self._shard_dimension] is None:\n        raise ValueError(f'Shape {shape.as_list()} must have a fixed size for dimension {self._shard_dimension} that is known. ')\n    if self._number_of_partitions > 1:\n        dims[self._shard_dimension] *= self._number_of_partitions\n    return tensor_shape.as_shape(dims)", "docstring": "Returns the shape of an unpartitioned Tensor.\n\nWhen given the shape of a 'sharded-size' Tensor, returns the shape\nof the full shape of its unpartitioned Tensor.\n\nArgs:\nshape: The shape of the sharded Tensor.\n\nReturns:\nThe shape of the unpartitioned version of the Tensor.\n\nRaises:\nValueError: if shape has unknown sharded dimension", "source": "github-repos"}
{"code": "def _ParseShellItemPathSegment(self, shell_item):\n    \n    path_segment = None\n\n    if isinstance(shell_item, pyfwsi.root_folder):\n      description = shell_folder_ids.DESCRIPTIONS.get(\n          shell_item.shell_folder_identifier, None)\n\n      if description:\n        path_segment = description\n      else:\n        path_segment = '{{{0:s}}}'.format(shell_item.shell_folder_identifier)\n\n      path_segment = '<{0:s}>'.format(path_segment)\n\n    elif isinstance(shell_item, pyfwsi.volume):\n      if shell_item.name:\n        path_segment = shell_item.name\n      elif shell_item.identifier:\n        path_segment = '{{{0:s}}}'.format(shell_item.identifier)\n\n    elif isinstance(shell_item, pyfwsi.file_entry):\n      long_name = ''\n      for extension_block in shell_item.extension_blocks:\n        if isinstance(extension_block, pyfwsi.file_entry_extension):\n          long_name = extension_block.long_name\n\n      if long_name:\n        path_segment = long_name\n      elif shell_item.name:\n        path_segment = shell_item.name\n\n    elif isinstance(shell_item, pyfwsi.network_location):\n      if shell_item.location:\n        path_segment = shell_item.location\n\n    if path_segment is None and shell_item.class_type == 0x00:\n      \n      pass\n\n    if path_segment is None:\n      path_segment = '<UNKNOWN: 0x{0:02x}>'.format(shell_item.class_type)\n\n    return path_segment", "docstring": "Parses a shell item path segment.\n\nArgs:\nshell_item (pyfwsi.item): shell item.\n\nReturns:\nstr: shell item path segment.", "source": "juraj-google-style"}
{"code": "def getRow(self, key):\n        \n        return Row(self._impl.getRow(Tuple(key)._impl))", "docstring": "Get a row by value of the indexing columns. If the index is not\nspecified, gets the only row of a dataframe with no indexing columns.\n\nArgs:\nkey: Tuple representing the index of the desired row.\n\nReturns:\nThe row.", "source": "juraj-google-style"}
{"code": "def forward(self, input_embeds: torch.FloatTensor) -> torch.FloatTensor:\n    hidden_states = self.conv_pre(input_embeds)\n    for i in range(self.num_upsamples):\n        hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)\n        hidden_states = self.upsampler[i](hidden_states)\n        res_state = self.resblocks[i * self.num_kernels](hidden_states)\n        for j in range(1, self.num_kernels):\n            res_state += self.resblocks[i * self.num_kernels + j](hidden_states)\n        hidden_states = res_state / self.num_kernels\n    hidden_states = nn.functional.leaky_relu(hidden_states)\n    hidden_states = self.conv_post(hidden_states)\n    hidden_states = torch.tanh(hidden_states)\n    waveform = hidden_states.squeeze(1)\n    return waveform", "docstring": "Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch\nof speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech\nwaveform.\n\nArgs:\nspectrogram (`torch.FloatTensor`):\nTensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,\nmodel_in_dim)`, or un-batched and of shape `(sequence_length, model_in_dim)`. Note that `model_in_dim`\nis the sum of `config.unit_embed_dim`, `config.lang_embed_dim` and `config.spkr_embed_dim`.\n\nReturns:\n`torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of\nshape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.", "source": "github-repos"}
{"code": "def write_uint16(self, value, little_endian=True):\n        \n        if little_endian:\n            endian = \"<\"\n        else:\n            endian = \">\"\n        return self.pack('%sH' % endian, value)", "docstring": "Pack the value as an unsigned integer and write 2 bytes to the stream.\n\nArgs:\nvalue:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint: the number of bytes written.", "source": "juraj-google-style"}
{"code": "def assert_next(transformations):\n\n    def _apply_fn(dataset):\n        \n        return _AssertNextDataset(dataset, transformations)\n    return _apply_fn", "docstring": "A transformation that asserts which transformations happen next.\n\nTransformations should be referred to by their base name, not including\nversion suffix. For example, use \"Batch\" instead of \"BatchV2\". \"Batch\" will\nmatch any of \"Batch\", \"BatchV1\", \"BatchV2\", etc.\n\nArgs:\ntransformations: A `tf.string` vector `tf.Tensor` identifying the\ntransformations that are expected to happen next.\n\nReturns:\nA `Dataset` transformation function, which can be passed to\n`tf.data.Dataset.apply`.", "source": "github-repos"}
{"code": "def get_library_barcode_sequence_hash(self, inverse=False):\n        \n        action = os.path.join(self.record_url, \"get_library_barcode_sequence_hash\")\n        res = requests.get(url=action, headers=HEADERS, verify=False)\n        res.raise_for_status()\n        res_json = res.json()\n        \n        new_res = {}\n        for lib_id in res_json:\n            new_res[int(lib_id)] = res_json[lib_id]\n        res_json = new_res\n\n        if inverse:\n            rev = {}\n            for lib_id in res_json:\n                rev[res_json[lib_id]] = lib_id\n        res_json = rev\n        return res_json", "docstring": "Calls the SequencingRequest's get_library_barcode_sequence_hash server-side endpoint to\ncreate a hash of the form {LibraryID -> barcode_sequence} for all Libraries on the\nSequencingRequest.\n\nArgs:\ninverse: `bool`. True means to inverse the key and value pairs such that the barcode\nsequence serves as the key.\n\nReturns: `dict`.", "source": "juraj-google-style"}
{"code": "def to_code(self):\n    if (self.internals is not get_py_internals()):\n        raise ValueError('CodeObject is not compatible with the running python internals.')\n    if six.PY2:\n        return types.CodeType(self.co_argcount, self.co_nlocals, self.co_stacksize, self.co_flags, self.co_code, self.co_consts, self.co_names, self.co_varnames, self.co_filename, self.co_name, self.co_firstlineno, self.co_lnotab, self.co_freevars, self.co_cellvars)\n    else:\n        return types.CodeType(self.co_argcount, self.co_kwonlyargcount, self.co_nlocals, self.co_stacksize, self.co_flags, self.co_code, self.co_consts, self.co_names, self.co_varnames, self.co_filename, self.co_name, self.co_firstlineno, self.co_lnotab, self.co_freevars, self.co_cellvars)", "docstring": "Convert this instance back into a native python code object. This\nonly works if the internals of the code object are compatible with\nthose of the running python version.\n\nReturns:\ntypes.CodeType: The native python code object.", "source": "codesearchnet"}
{"code": "def get_backend(self, name=None, **kwargs):\n        \n        backends = self.backends(name, **kwargs)\n        if len(backends) > 1:\n            raise QiskitBackendNotFoundError('More than one backend matches the criteria')\n        elif not backends:\n            raise QiskitBackendNotFoundError('No backend matches the criteria')\n\n        return backends[0]", "docstring": "Return a single backend matching the specified filtering.\n\nArgs:\nname (str): name of the backend.\n**kwargs (dict): dict used for filtering.\n\nReturns:\nBaseBackend: a backend matching the filtering.\n\nRaises:\nQiskitBackendNotFoundError: if no backend could be found or\nmore than one backend matches.", "source": "juraj-google-style"}
{"code": "def gnuplot_3d_matrix(z_matrix, filename, title='', x_label='', y_label=''):\n    \n    _, ext = os.path.splitext(filename)\n    if ext != '.png':\n        filename += '.png'\n\n    gnuplot_cmds = \\\n    \n    scr = _GnuplotScriptTemp(gnuplot_cmds)\n    data = _GnuplotDataZMatrixTemp(z_matrix)\n\n    args_dict = {\n        'filename': filename,\n        'filename_data': data.name,\n        'title': title,\n        'x_label': x_label,\n        'y_label': y_label\n    }\n    gnuplot(scr.name, args_dict)", "docstring": "Function to produce a general 3D plot from a 2D matrix.\n\nArgs:\nz_matrix (list): 2D matrix.\nfilename (str): Filename of the output image.\ntitle (str): Title of the plot.  Default is '' (no title).\nx_label (str): x-axis label.\ny_label (str): y-axis label.", "source": "juraj-google-style"}
{"code": "def compute_stats(array):\n    q1 = np.percentile(array, 25)\n    q3 = np.percentile(array, 75)\n    low = q1 - 1.5 * (q3 - q1)\n    high = q3 + 1.5 * (q3 - q1)\n    filtered_array = list(filter(lambda x: low <= x and x <= high, array))\n    mean = np.mean(filtered_array)\n    min_val = np.min(filtered_array)\n    max_val = np.max(filtered_array)\n    max_diff = max(max_val - mean, mean - min_val)\n    diff = max_diff / mean * 100.0\n    return (mean, diff)", "docstring": "Reports mean and ± range for the given array.\n\nThe range computation follows benchstat's.\n\nArgs:\narray: The array to compute stats for.\n\nReturns:\nmean and ± %diff range.", "source": "github-repos"}
{"code": "def _validate_path(self, settings, name, value):\n        \n        if not os.path.exists(value):\n            raise SettingsInvalidError(\"Path from setting '{name}' does not \"\n                                       \"exists: {value}\".format(\n                                           name=name,\n                                           value=value\n                                       ))\n\n        return value", "docstring": "Validate path exists\n\nArgs:\nsettings (dict): Current settings.\nname (str): Setting name.\nvalue (str): Path to validate.\n\nRaises:\nboussole.exceptions.SettingsInvalidError: If path does not exists.\n\nReturns:\nstr: Validated path.", "source": "juraj-google-style"}
{"code": "def resample(self, data, input_rate):\n        \n        data16 = np.fromstring(string=data, dtype=np.int16)\n        resample_size = int(len(data16) / self.input_rate * self.RATE_PROCESS)\n        resample = signal.resample(data16, resample_size)\n        resample16 = np.array(resample, dtype=np.int16)\n        return resample16.tostring()", "docstring": "Microphone may not support our native processing sampling rate, so\nresample from input_rate to RATE_PROCESS here for webrtcvad and\ndeepspeech\n\nArgs:\ndata (binary): Input audio stream\ninput_rate (int): Input audio rate to resample from", "source": "juraj-google-style"}
{"code": "def plot(self, figure_list):\n        \n\n        \n        if self._current_subscript_stage is not None:\n            if self._current_subscript_stage['current_subscript'] is not None:\n                self._current_subscript_stage['current_subscript'].plot(figure_list)\n\n        if (self.is_running is False) and not (self.data == {} or self.data is None):\n\n            script_names = list(self.settings['script_order'].keys())\n            script_indices = [self.settings['script_order'][name] for name in script_names]\n            _, sorted_script_names = list(zip(*sorted(zip(script_indices, script_names))))\n\n            last_script = self.scripts[sorted_script_names[-1]]\n\n            last_script.force_update()  \n\n            axes_list = last_script.get_axes_layout(figure_list)\n\n            \n            try:\n                last_script._plot(axes_list, self.data)\n            except TypeError as err:\n                print((warnings.warn('can\\'t plot average script data because script.plot function doens\\'t take data as optional argument. Plotting last data set instead')))\n                print((err.message))\n                last_script.plot(figure_list)", "docstring": "When each subscript is called, uses its standard plotting\n\nArgs:\nfigure_list: list of figures passed from the guit", "source": "juraj-google-style"}
{"code": "def convert_padding(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting padding...')\n\n    if params['mode'] == 'constant':\n        \n\n        if params['value'] != 0.0:\n            raise AssertionError('Cannot convert non-zero padding')\n\n        if names:\n            tf_name = 'PADD' + random_string(4)\n        else:\n            tf_name = w_name + str(random.random())\n\n        \n        padding_name = tf_name\n        padding_layer = keras.layers.ZeroPadding2D(\n            padding=((params['pads'][2], params['pads'][6]), (params['pads'][3], params['pads'][7])),\n            name=padding_name\n        )\n\n        layers[scope_name] = padding_layer(layers[inputs[0]])\n    elif params['mode'] == 'reflect':\n\n        def target_layer(x, pads=params['pads']):\n            \n            layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[6]], [pads[3], pads[7]]], 'REFLECT')\n            \n            return layer\n\n        lambda_layer = keras.layers.Lambda(target_layer)\n        layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert padding layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def is_native_xmon_gate(gate: ops.Gate) -> bool:\n    return isinstance(gate, (ops.CZPowGate, ops.MeasurementGate, ops.PhasedXPowGate, ops.XPowGate, ops.YPowGate, ops.ZPowGate))", "docstring": "Check if a gate is a native xmon gate.\n\nArgs:\ngate: Input gate.\n\nReturns:\nTrue if the gate is native to the xmon, false otherwise.", "source": "codesearchnet"}
{"code": "def merge_variables(program: cfg.Program, node: cfg.CFGNode, variables: Sequence[cfg.Variable]) -> cfg.Variable:\n    if not variables:\n        return program.NewVariable()\n    elif all((v is variables[0] for v in variables)):\n        return variables[0].AssignToNewVariable(node)\n    else:\n        v = program.NewVariable()\n        for r in variables:\n            v.PasteVariable(r, node)\n        return v", "docstring": "Create a combined Variable for a list of variables.\n\nThe purpose of this function is to create a final result variable for\nfunctions that return a list of \"temporary\" variables. (E.g. function\ncalls).\n\nArgs:\nprogram: A cfg.Program instance.\nnode: The current CFG node.\nvariables: A list of cfg.Variables.\n\nReturns:\nA cfg.Variable.", "source": "github-repos"}
{"code": "def parse(self, request):\n        \n        assert isinstance(request, HttpRequest), \"Invalid request type: %s\" % type(request)\n\n        if settings.INBOUND_MANDRILL_AUTHENTICATION_KEY:\n            _check_mandrill_signature(\n                request=request,\n                key=settings.INBOUND_MANDRILL_AUTHENTICATION_KEY,\n            )\n\n        try:\n            messages = json.loads(request.POST['mandrill_events'])\n        except (ValueError, KeyError) as ex:\n            raise RequestParseError(\"Request is not a valid json: %s\" % ex)\n\n        if not messages:\n            logger.debug(\"No messages found in mandrill request: %s\", request.body)\n            return []\n\n        emails = []\n        for message in messages:\n            if message.get('event') != 'inbound':\n                logger.debug(\"Discarding non-inbound message\")\n                continue\n\n            msg = message.get('msg')\n            try:\n                from_email = msg['from_email']\n                to = list(self._get_recipients(msg['to']))\n                cc = list(self._get_recipients(msg['cc'])) if 'cc' in msg else []\n                bcc = list(self._get_recipients(msg['bcc'])) if 'bcc' in msg else []\n\n                subject = msg.get('subject', \"\")\n\n                attachments = msg.get('attachments', {})\n                attachments.update(msg.get('images', {}))\n\n                text = msg.get('text', \"\")\n                html = msg.get('html', \"\")\n            except (KeyError, ValueError) as ex:\n                raise RequestParseError(\n                    \"Inbound request is missing or got an invalid value.: %s.\" % ex\n                )\n\n            email = EmailMultiAlternatives(\n                subject=subject,\n                body=text,\n                from_email=self._get_sender(\n                    from_email=from_email,\n                    from_name=msg.get('from_name'),\n                ),\n                to=to,\n                cc=cc,\n                bcc=bcc,\n            )\n            if html is not None and len(html) > 0:\n                email.attach_alternative(html, \"text/html\")\n\n            email = self._process_attachments(email, attachments)\n            emails.append(email)\n\n        return emails", "docstring": "Parse incoming request and return an email instance.\n\nArgs:\nrequest: an HttpRequest object, containing a list of forwarded emails, as\nper Mandrill specification for inbound emails.\n\nReturns:\na list of EmailMultiAlternatives instances", "source": "juraj-google-style"}
{"code": "def set_tick(self, index, interval):\n    name = self.tick_name(index)\n    if (name is None):\n        return pack_error(ControllerSubsystem.SENSOR_GRAPH, Error.INVALID_ARRAY_KEY)\n    self.ticks[name] = interval\n    return Error.NO_ERROR", "docstring": "Update the a tick's interval.\n\nArgs:\nindex (int): The index of the tick that you want to fetch.\ninterval (int): The number of seconds between ticks.\nSetting this to 0 will disable the tick.\n\nReturns:\nint: An error code.", "source": "codesearchnet"}
{"code": "def reward(self,\n               state: Sequence[tf.Tensor],\n               action: Sequence[tf.Tensor],\n               next_state: Sequence[tf.Tensor]) -> tf.Tensor:\n        \n        scope = self.reward_scope(state, action, next_state)\n        r = self.compile_reward(scope).tensor\n        with self.graph.as_default():\n            with tf.name_scope('reward'):\n                return tf.expand_dims(r, -1)", "docstring": "Compiles the reward function given the current `state`, `action` and\n`next_state`.\n\nArgs:\nstate (Sequence[tf.Tensor]): A tuple of current state tensors.\naction (Sequence[tf.Tensor]): A tuple of action tensors.\nnext_state (Sequence[tf.Tensor]): A tuple of next state tensors.\n\nReturns:\n(:obj:`tf.Tensor`): A tensor representing the reward function.", "source": "juraj-google-style"}
{"code": "def convert_reduce_sum(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting reduce_sum ...')\n    keepdims = (params['keepdims'] > 0)\n    axis = params['axes']\n\n    def target_layer(x, keepdims=keepdims, axis=axis):\n        import keras.backend as K\n        return K.sum(x, keepdims=keepdims, axis=axis)\n    lambda_layer = keras.layers.Lambda(target_layer)\n    layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert reduce_sum layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def res_call(self, ns, types_ns, node, f_type, args, keywords):\n    raise NotImplementedError('subclasses must implement')", "docstring": "Resolves the return type an external function or method call.\n\nArgs:\nns: namespace\ntypes_ns: types namespace\nnode: str, the function name\nf_type: types of the actual function being called, if known\nargs: types of each respective argument in node.args\nkeywords: types of each respective argument in node.keywords\n\nReturns:\nTuple (return_type, side_effect_types). The first element is just the\nreturn types of the function. The second element is a map from\nargument names to sets of types, and allow modelling side effects of\nfunctions (for example via global or nonlocal).", "source": "github-repos"}
{"code": "def __init__(self, tag, *value):\n        \n        self.tag = tag\n        self.value = list(flatten(value))", "docstring": "init\n\nArgs:\ntag (str): The tag1\n*value: the elements you want to put into single's value(list), can be one element or several seperate by comma, or put into a list or combination of those. *value will be flattend to a single one deminision list. In subclasses' init, raw data should be converted to single if needed according to specific subclass.", "source": "juraj-google-style"}
{"code": "def case(self, case):\n        \n        LOG.debug(\"Getting case {0} from database\".format(case.get('case_id')))\n        case_id = case['case_id']\n        return self.db.case.find_one({'case_id': case_id})", "docstring": "Get a case from the database\n\nSearch the cases with the case id\n\nArgs:\ncase (dict): A case dictionary\n\nReturns:\nmongo_case (dict): A mongo case dictionary", "source": "juraj-google-style"}
{"code": "def from_dict(cls, data):\n    fulfillment = data['fulfillment']\n    if (not isinstance(fulfillment, (Fulfillment, type(None)))):\n        try:\n            fulfillment = Fulfillment.from_uri(data['fulfillment'])\n        except ASN1DecodeError:\n            raise InvalidSignature(\"Fulfillment URI couldn't been parsed\")\n        except TypeError:\n            fulfillment = _fulfillment_from_details(data['fulfillment'])\n    fulfills = TransactionLink.from_dict(data['fulfills'])\n    return cls(fulfillment, data['owners_before'], fulfills)", "docstring": "Transforms a Python dictionary to an Input object.\n\nNote:\nOptionally, this method can also serialize a Cryptoconditions-\nFulfillment that is not yet signed.\n\nArgs:\ndata (dict): The Input to be transformed.\n\nReturns:\n:class:`~bigchaindb.common.transaction.Input`\n\nRaises:\nInvalidSignature: If an Input's URI couldn't be parsed.", "source": "codesearchnet"}
{"code": "def or_filter(self, **filters):\n    clone = copy.deepcopy(self)\n    clone.adapter.add_query([('OR_QRY', filters)])\n    return clone", "docstring": "Works like \"filter\" but joins given filters with OR operator.\n\nArgs:\n**filters: Query filters as keyword arguments.\n\nReturns:\nSelf. Queryset object.\n\nExample:\n>>> Person.objects.or_filter(age__gte=16, name__startswith='jo')", "source": "codesearchnet"}
{"code": "def get_2d_local_memory_v2(x, query_shape, memory_flange):\n  \n  (_, height, width, depth_x) = common_layers.shape_list(x)\n  \n  \n  paddings = [[0, 0], [memory_flange[0], memory_flange[0]],\n              [memory_flange[1], memory_flange[1]], [0, 0]]\n  padded_x = tf.pad(x, paddings)\n  padded_x.set_shape([None, height+2*memory_flange[0],\n                      width+2*memory_flange[1], depth_x])\n  num_h_memory_blocks = height\n  num_w_memory_blocks = width\n  x_memory_blocks = _extract_blocks(padded_x,\n                                    query_shape[0], query_shape[1])\n  x_width_blocks = tf.split(x_memory_blocks, num_w_memory_blocks,\n                            2)\n  x_left_width = tf.concat(x_width_blocks[:num_w_memory_blocks - 1], axis=2)\n  x_right_width = tf.concat(x_width_blocks[1:], axis=2)\n  x_memory_blocks = tf.concat([x_left_width, x_right_width], axis=4)\n\n  x_height_blocks = tf.split(x_memory_blocks, num_h_memory_blocks, 1)\n  x_top_height = tf.concat(x_height_blocks[:num_h_memory_blocks - 1], axis=1)\n  x_bottom_height = tf.concat(x_height_blocks[1:], axis=1)\n  x = tf.concat([x_top_height, x_bottom_height], axis=3)\n\n  return x", "docstring": "Gathering memory blocks around query blocks. flange is half of query .\n\nOnly works if memory flanges are half of query sizes.\n\nArgs:\nx: a [batch, height, width, depth tensor]\nquery_shape: 2-d integer list of query shape\nmemory_flange: 2-d integer list of memory flanges\n\nReturns:\nx: A [batch, num_h_blocks, num_w_blocks,\nquery_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]]\ntensor.", "source": "juraj-google-style"}
{"code": "def decr(self, key, value, noreply=False):\n        \n        key = self.check_key(key)\n        cmd = b'decr ' + key + b' ' + six.text_type(value).encode('ascii')\n        if noreply:\n            cmd += b' noreply'\n        cmd += b'\\r\\n'\n        results = self._misc_cmd([cmd], b'decr', noreply)\n        if noreply:\n            return None\n        if results[0] == b'NOT_FOUND':\n            return None\n        return int(results[0])", "docstring": "The memcached \"decr\" command.\n\nArgs:\nkey: str, see class docs for details.\nvalue: int, the amount by which to increment the value.\nnoreply: optional bool, False to wait for the reply (the default).\n\nReturns:\nIf noreply is True, always returns None. Otherwise returns the new\nvalue of the key, or None if the key wasn't found.", "source": "juraj-google-style"}
{"code": "def get_lock_requests(self):\n    d = defaultdict(list)\n    if self._context:\n        for variant in self._context.resolved_packages:\n            name = variant.name\n            version = variant.version\n            lock = self.patch_locks.get(name)\n            if (lock is None):\n                lock = self.default_patch_lock\n            request = get_lock_request(name, version, lock)\n            if (request is not None):\n                d[lock].append(request)\n    return d", "docstring": "Take the current context, and the current patch locks, and determine\nthe effective requests that will be added to the main request.\n\nReturns:\nA dict of (PatchLock, [Requirement]) tuples. Each requirement will be\na weak package reference. If there is no current context, an empty\ndict will be returned.", "source": "codesearchnet"}
{"code": "def add_child(self, key, value):\n    if (type(value) in (list, tuple, dict)):\n        if (type(value) == dict):\n            for k in value.keys():\n                self.add_child(k, value[k])\n            return\n        i = 0\n        for child in value:\n            self.add_child(key[i], child)\n            i = (i + 1)\n        return\n    if hasattr(value, 'attributes'):\n        value.attributes['data-parent-widget'] = self.identifier\n        value._parent = self\n    if (key in self.children):\n        self._render_children_list.remove(key)\n    self._render_children_list.append(key)\n    self.children[key] = value", "docstring": "Adds a child to the Tag\n\nTo retrieve the child call get_child or access to the Tag.children[key] dictionary.\n\nArgs:\nkey (str):  Unique child's identifier, or iterable of keys\nvalue (Tag, str): can be a Tag, an iterable of Tag or a str. In case of iterable\nof Tag is a dict, each item's key is set as 'key' param", "source": "codesearchnet"}
{"code": "def update_logging_config(context, log_name=None, file_name='worker.log'):\n    log_name = (log_name or __name__.split('.')[0])\n    top_level_logger = logging.getLogger(log_name)\n    datefmt = context.config['log_datefmt']\n    fmt = context.config['log_fmt']\n    formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)\n    if context.config.get('verbose'):\n        top_level_logger.setLevel(logging.DEBUG)\n        if (len(top_level_logger.handlers) == 0):\n            handler = logging.StreamHandler()\n            handler.setFormatter(formatter)\n            top_level_logger.addHandler(handler)\n    else:\n        top_level_logger.setLevel(logging.INFO)\n    makedirs(context.config['log_dir'])\n    path = os.path.join(context.config['log_dir'], file_name)\n    if context.config['watch_log_file']:\n        handler = logging.handlers.WatchedFileHandler(path)\n    else:\n        handler = logging.FileHandler(path)\n    handler.setFormatter(formatter)\n    top_level_logger.addHandler(handler)\n    top_level_logger.addHandler(logging.NullHandler())", "docstring": "Update python logging settings from config.\n\nBy default, this sets the ``scriptworker`` log settings, but this will\nchange if some other package calls this function or specifies the ``log_name``.\n\n* Use formatting from config settings.\n* Log to screen if ``verbose``\n* Add a rotating logfile from config settings.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\nlog_name (str, optional): the name of the Logger to modify.\nIf None, use the top level module ('scriptworker').\nDefaults to None.", "source": "codesearchnet"}
{"code": "def publish(self, data):\n    if (self.entity_api_key == ''):\n        return {'status': 'failure', 'response': 'No API key found in request'}\n    publish_url = (self.base_url + 'api/0.1.0/publish')\n    publish_headers = {'apikey': self.entity_api_key}\n    publish_data = {'exchange': 'amq.topic', 'key': str(self.entity_id), 'body': str(data)}\n    with self.no_ssl_verification():\n        r = requests.post(publish_url, json.dumps(publish_data), headers=publish_headers)\n    response = dict()\n    if ('No API key' in str(r.content.decode('utf-8'))):\n        response['status'] = 'failure'\n        r = json.loads(r.content.decode('utf-8'))['message']\n    elif ('publish message ok' in str(r.content.decode('utf-8'))):\n        response['status'] = 'success'\n        r = r.content.decode('utf-8')\n    else:\n        response['status'] = 'failure'\n        r = r.content.decode('utf-8')\n    response['response'] = str(r)\n    return response", "docstring": "This function allows an entity to publish data to the middleware.\n\nArgs:\ndata    (string): contents to be published by this entity.", "source": "codesearchnet"}
{"code": "def Run(self):\n    if (not self.executable):\n        logging.error(('Could not locate \"%s\"' % self.long_name))\n        return 0\n    finfo = os.stat(self.executable)\n    self.date = time.localtime(finfo[stat.ST_MTIME])\n    logging.info(('Running: %s %s </dev/null 2>&1' % (self.executable, FLAGS.help_flag)))\n    (child_stdin, child_stdout_and_stderr) = os.popen4([self.executable, FLAGS.help_flag])\n    child_stdin.close()\n    self.output = child_stdout_and_stderr.readlines()\n    child_stdout_and_stderr.close()\n    if (len(self.output) < _MIN_VALID_USAGE_MSG):\n        logging.error(('Error: \"%s %s\" returned only %d lines: %s' % (self.name, FLAGS.help_flag, len(self.output), self.output)))\n        return 0\n    return 1", "docstring": "Run it and collect output.\n\nReturns:\n1 (true)   If everything went well.\n0 (false)  If there were problems.", "source": "codesearchnet"}
{"code": "def angle( x, y ):\n    \n    dot = np.dot( x, y )\n    x_mod = np.linalg.norm( x )\n    y_mod = np.linalg.norm( y )\n    cos_angle = dot / ( x_mod * y_mod )\n    return np.degrees( np.arccos( cos_angle ) )", "docstring": "Calculate the angle between two vectors, in degrees.\n\nArgs:\nx (np.array): one vector.\ny (np.array): the other vector.\n\nReturns:\n(float):      the angle between x and y in degrees.", "source": "juraj-google-style"}
{"code": "def _prepare_headers(self, request, filter=None, order_by=None, group_by=[], page=None, page_size=None):\n        \n\n        if filter:\n            request.set_header('X-Nuage-Filter', filter)\n\n        if order_by:\n            request.set_header('X-Nuage-OrderBy', order_by)\n\n        if page is not None:\n            request.set_header('X-Nuage-Page', str(page))\n\n        if page_size:\n            request.set_header('X-Nuage-PageSize', str(page_size))\n\n        if len(group_by) > 0:\n            header = \", \".join(group_by)\n            request.set_header('X-Nuage-GroupBy', 'true')\n            request.set_header('X-Nuage-Attributes', header)", "docstring": "Prepare headers for the given request\n\nArgs:\nrequest: the NURESTRequest to send\nfilter: string\norder_by: string\ngroup_by: list of names\npage: int\npage_size: int", "source": "juraj-google-style"}
{"code": "def _get_break_loop_node(break_node):\n    \n    loop_nodes = (astroid.For, astroid.While)\n    parent = break_node.parent\n    while not isinstance(parent, loop_nodes) or break_node in getattr(\n        parent, \"orelse\", []\n    ):\n        break_node = parent\n        parent = parent.parent\n        if parent is None:\n            break\n    return parent", "docstring": "Returns the loop node that holds the break node in arguments.\n\nArgs:\nbreak_node (astroid.Break): the break node of interest.\n\nReturns:\nastroid.For or astroid.While: the loop node holding the break node.", "source": "juraj-google-style"}
{"code": "def pooled_sample_variance(sample1, sample2):\n    deg_freedom = ((len(sample1) + len(sample2)) - 2)\n    mean1 = statistics.mean(sample1)\n    squares1 = (((x - mean1) ** 2) for x in sample1)\n    mean2 = statistics.mean(sample2)\n    squares2 = (((x - mean2) ** 2) for x in sample2)\n    return ((math.fsum(squares1) + math.fsum(squares2)) / float(deg_freedom))", "docstring": "Find the pooled sample variance for two samples.\n\nArgs:\nsample1: one sample.\nsample2: the other sample.\n\nReturns:\nPooled sample variance, as a float.", "source": "codesearchnet"}
{"code": "def __init__(self, name, metadata):\n        \n\n        if not isinstance(metadata, dict):\n            raise TypeError(\"Metadata should be a dictionary not a %s\" % str(\n                type(metadata)))\n\n        self.name = name\n        self.metadata = metadata\n        self.relations = {}\n        if 'relation' in self.metadata:\n            for relation in self.metadata['relation']:\n                tmp = re.split(r':\\s+', relation)\n                relname = tmp[0]\n                relval = tmp[1]\n\n                if relname in self.relations:\n                    self.relations[relname].append(relval)\n                else:\n                    self.relations[relname] = [relval]", "docstring": "Initialize base GEO object.\n\nArgs:\nname (:obj:`str`): Name of the object.\nmetadata (:obj:`dict`): Metadata information.\n\nRaises:\nTypeError: Metadata should be a dict.", "source": "juraj-google-style"}
{"code": "def check_media_service_name_availability(access_token, subscription_id, msname):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/microsoft.media/CheckNameAvailability?', 'api-version=', MEDIA_API])\n    ms_body = {'name': msname}\n    ms_body['type'] = 'mediaservices'\n    body = json.dumps(ms_body)\n    return do_post(endpoint, body, access_token)", "docstring": "Check media service name availability.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nmsname (str): media service name.\n\nReturns:\nHTTP response.", "source": "codesearchnet"}
{"code": "def nic_v2(msg, NICa, NICbc):\n    \n    if typecode(msg) < 5 or typecode(msg) > 22:\n        raise RuntimeError(\n            \"%s: Not a surface position message (5<TC<8), \\\n            airborne position message (8<TC<19), \\\n            or airborne position with GNSS height (20<TC<22)\" % msg\n        )\n\n    tc = typecode(msg)\n    NIC = uncertainty.TC_NICv2_lookup[tc]\n\n    if 20<=tc<=22:\n        NICs = 0\n    else:\n        NICs = NICa*2 + NICbc\n\n    try:\n        if isinstance(NIC, dict):\n            NIC = NIC[NICs]\n\n        Rc = uncertainty.NICv2[NIC][NICs]['Rc']\n    except KeyError:\n        Rc = uncertainty.NA\n\n    return Rc", "docstring": "Calculate NIC, navigation integrity category, for ADS-B version 2\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string\nNICa (int or string): NIC supplement - A\nNICbc (int or srting): NIC supplement - B or C\n\nReturns:\nint or string: Horizontal Radius of Containment", "source": "juraj-google-style"}
{"code": "def _patch_mask(body: dict) -> list:\n    mask = set()\n    if isinstance(body, dict):\n        for parent, value in body.items():\n            children = _patch_mask(value)\n            if children and parent not in ('budgetSegments', 'partnerCosts'):\n                for child in children:\n                    mask.add(parent + '.' + child)\n            else:\n                mask.add(parent)\n    elif isinstance(body, (list, tuple)):\n        for value in body:\n            mask.update(_patch_mask(value))\n    return list(mask)", "docstring": "Loop through dictionary defining API body and create patch mask on keys.\n\nEach patch mask has format parent.child repreated. Each leaf has a full path\ndescribing the patch. Exceptions are budgetSegments, and partnerCosts which\nare lists with an order and changed at the parent level not the leaves.\n\nArgs:\nbody: Any REST API call dictionary, defined by API endpoint.\n\nReturns:\nA list of strings representing full path to each leaf key.", "source": "github-repos"}
{"code": "def add_chapter(self, c):\n    try:\n        assert (type(c) == chapter.Chapter)\n    except AssertionError:\n        raise TypeError('chapter must be of type Chapter')\n    chapter_file_output = os.path.join(self.OEBPS_DIR, self.current_chapter_path)\n    c._replace_images_in_chapter(self.OEBPS_DIR)\n    c.write(chapter_file_output)\n    self._increase_current_chapter_number()\n    self.chapters.append(c)", "docstring": "Add a Chapter to your epub.\n\nArgs:\nc (Chapter): A Chapter object representing your chapter.\n\nRaises:\nTypeError: Raised if a Chapter object isn't supplied to this\nmethod.", "source": "codesearchnet"}
{"code": "def parse_line(line):\n  \n  columns = line.split()\n  token = columns.pop(0)\n  values = [float(column) for column in columns]\n  return token, values", "docstring": "Parses a line of a text embedding file.\n\nArgs:\nline: (str) One line of the text embedding file.\n\nReturns:\nA token string and its embedding vector in floats.", "source": "juraj-google-style"}
{"code": "def add_enumerable_item_to_dict(dict_, key, item):\n    dict_.setdefault(key, [])\n    if isinstance(item, (list, tuple)):\n        dict_[key].extend(item)\n    else:\n        dict_[key].append(item)", "docstring": "Add an item to a list contained in a dict.\n\nFor example: If the dict is ``{'some_key': ['an_item']}``, then calling this function\nwill alter the dict to ``{'some_key': ['an_item', 'another_item']}``.\n\nIf the key doesn't exist yet, the function initializes it with a list containing the\nitem.\n\nList-like items are allowed. In this case, the existing list will be extended.\n\nArgs:\ndict_ (dict): the dict to modify\nkey (str): the key to add the item to\nitem (whatever): The item to add to the list associated to the key", "source": "codesearchnet"}
{"code": "def _ReadSpecificationFile(self, path):\n    \n    specification_store = specification.FormatSpecificationStore()\n\n    with io.open(\n        path, 'rt', encoding=self._SPECIFICATION_FILE_ENCODING) as file_object:\n      for line in file_object.readlines():\n        line = line.strip()\n        if not line or line.startswith('\n          continue\n\n        try:\n          identifier, offset, pattern = line.split()\n        except ValueError:\n          logger.error('[skipping] invalid line: {0:s}'.format(line))\n          continue\n\n        try:\n          offset = int(offset, 10)\n        except ValueError:\n          logger.error('[skipping] invalid offset in line: {0:s}'.format(line))\n          continue\n\n        try:\n          \n          \n          pattern = codecs.escape_decode(pattern)[0]\n        \n        except ValueError:\n          logger.error(\n              '[skipping] invalid pattern in line: {0:s}'.format(line))\n          continue\n\n        format_specification = specification.FormatSpecification(identifier)\n        format_specification.AddNewSignature(pattern, offset=offset)\n        specification_store.AddSpecification(format_specification)\n\n    return specification_store", "docstring": "Reads the format specification file.\n\nArgs:\npath (str): path of the format specification file.\n\nReturns:\nFormatSpecificationStore: format specification store.", "source": "juraj-google-style"}
{"code": "def _AnalyzeSolutionSpace(initial_state):\n    count = 0\n    seen = set()\n    p_queue = []\n    node = _StateNode(initial_state, False, None)\n    heapq.heappush(p_queue, _QueueItem(_OrderedPenalty(0, count), node))\n    count += 1\n    while p_queue:\n        item = p_queue[0]\n        penalty = item.ordered_penalty.penalty\n        node = item.state_node\n        if not node.state.next_token:\n            break\n        heapq.heappop(p_queue)\n        if count > 10000:\n            node.state.ignore_stack_for_comparison = True\n        before_seen_count = len(seen)\n        seen.add(node.state)\n        if before_seen_count == len(seen):\n            continue\n        count = _AddNextStateToQueue(penalty, node, False, count, p_queue)\n        count = _AddNextStateToQueue(penalty, node, True, count, p_queue)\n    if not p_queue:\n        return False\n    _ReconstructPath(initial_state, heapq.heappop(p_queue).state_node)\n    return True", "docstring": "Analyze the entire solution space starting from initial_state.\n\nThis implements a variant of Dijkstra's algorithm on the graph that spans\nthe solution space (LineStates are the nodes). The algorithm tries to find\nthe shortest path (the one with the lowest penalty) from 'initial_state' to\nthe state where all tokens are placed.\n\nArguments:\ninitial_state: (format_decision_state.FormatDecisionState) The initial state\nto start the search from.\n\nReturns:\nTrue if a formatting solution was found. False otherwise.", "source": "github-repos"}
{"code": "def get_course_track_selection_url(course_run, query_parameters):\n    \n    try:\n        course_root = reverse('course_modes_choose', kwargs={'course_id': course_run['key']})\n    except KeyError:\n        LOGGER.exception(\n            \"KeyError while parsing course run data.\\nCourse Run: \\n[%s]\", course_run,\n        )\n        raise\n\n    url = '{}{}'.format(\n        settings.LMS_ROOT_URL,\n        course_root\n    )\n    course_run_url = update_query_parameters(url, query_parameters)\n\n    return course_run_url", "docstring": "Return track selection url for the given course.\n\nArguments:\ncourse_run (dict): A dictionary containing course run metadata.\nquery_parameters (dict): A dictionary containing query parameters to be added to course selection url.\n\nRaises:\n(KeyError): Raised when course run dict does not have 'key' key.\n\nReturns:\n(str): Course track selection url.", "source": "juraj-google-style"}
{"code": "def set_attributes(path, archive=None, hidden=None, normal=None, notIndexed=None, readonly=None, system=None, temporary=None):\n    if (not os.path.exists(path)):\n        raise CommandExecutionError('Path not found: {0}'.format(path))\n    if normal:\n        if (archive or hidden or notIndexed or readonly or system or temporary):\n            raise CommandExecutionError('Normal attribute may not be used with any other attributes')\n        ret = win32file.SetFileAttributes(path, 128)\n        return (True if (ret is None) else False)\n    intAttributes = win32file.GetFileAttributes(path)\n    if (archive is not None):\n        if archive:\n            intAttributes |= 32\n        else:\n            intAttributes &= 65503\n    if (hidden is not None):\n        if hidden:\n            intAttributes |= 2\n        else:\n            intAttributes &= 65533\n    if (notIndexed is not None):\n        if notIndexed:\n            intAttributes |= 8192\n        else:\n            intAttributes &= 57343\n    if (readonly is not None):\n        if readonly:\n            intAttributes |= 1\n        else:\n            intAttributes &= 65534\n    if (system is not None):\n        if system:\n            intAttributes |= 4\n        else:\n            intAttributes &= 65531\n    if (temporary is not None):\n        if temporary:\n            intAttributes |= 256\n        else:\n            intAttributes &= 65279\n    ret = win32file.SetFileAttributes(path, intAttributes)\n    return (True if (ret is None) else False)", "docstring": "Set file attributes for a file.  Note that the normal attribute\nmeans that all others are false.  So setting it will clear all others.\n\nArgs:\npath (str): The path to the file or directory\narchive (bool): Sets the archive attribute. Default is None\nhidden (bool): Sets the hidden attribute. Default is None\nnormal (bool):\nResets the file attributes. Cannot be used in conjunction with any\nother attribute. Default is None\nnotIndexed (bool): Sets the indexed attribute. Default is None\nreadonly (bool): Sets the readonly attribute. Default is None\nsystem (bool): Sets the system attribute. Default is None\ntemporary (bool): Sets the temporary attribute. Default is None\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' file.set_attributes c:\\\\temp\\\\a.txt normal=True\nsalt '*' file.set_attributes c:\\\\temp\\\\a.txt readonly=True hidden=True", "source": "codesearchnet"}
{"code": "def replace_dimensions(tensor_or_shape, old_dim_or_dims, new_dim_or_dims):\n    if isinstance(tensor_or_shape, Tensor):\n        return reshape(tensor_or_shape, replace_dimensions(tensor_or_shape.shape, old_dim_or_dims, new_dim_or_dims))\n    if (not isinstance(tensor_or_shape, Shape)):\n        raise ValueError(('tensor_or_shape must be a Tensor or Shape got %s' % (tensor_or_shape,)))\n    in_dims = tensor_or_shape.dims\n    if isinstance(old_dim_or_dims, Dimension):\n        old_dim_or_dims = [old_dim_or_dims]\n    if isinstance(new_dim_or_dims, Dimension):\n        new_dim_or_dims = [new_dim_or_dims]\n    if ((not isinstance(old_dim_or_dims, list)) or (not old_dim_or_dims)):\n        raise ValueError(('old_dim_or_dims must be a Dimension or a list of Dimension got %s' % (old_dim_or_dims,)))\n    if ((not isinstance(new_dim_or_dims, list)) or (not new_dim_or_dims)):\n        raise ValueError(('new_dim_or_dims must be a Dimension or a list of Dimension got %s' % (new_dim_or_dims,)))\n    try:\n        positions = [in_dims.index(d) for d in old_dim_or_dims]\n        pos = positions[0]\n        if (positions != list(range(pos, (pos + len(positions))))):\n            raise ValueError()\n    except ValueError:\n        raise ValueError((\"old_dim_or_dims must be a subsequence of the input's dimensions old_dim_or_dims=%s input's dimensions=%s\" % (old_dim_or_dims, in_dims)))\n    return Shape(((in_dims[:pos] + new_dim_or_dims) + in_dims[(pos + len(old_dim_or_dims)):]))", "docstring": "Replace dimensions in a Tensor or Shape.\n\nold_dim_or_dims consists of a single dimension or a list of dimensions\nthat must occur consecutively in the input shape.  They are replaced\nby the dimensions in new_dim_or_dims.\n\nArgs:\ntensor_or_shape: a Tensor or a Shape\nold_dim_or_dims: a Dimension or a list of Dimensions\nnew_dim_or_dims: a Dimensions or a list of Dimensions\nReturns:\na new Tensor or a Shape", "source": "codesearchnet"}
{"code": "async def destroy_attachment(self, a: Attachment):\n    (await self.connection('DELETE', 'tournaments/{}/matches/{}/attachments/{}'.format(self._tournament_id, self._id, a._id)))\n    if (a in self.attachments):\n        self.attachments.remove(a)", "docstring": "destroy a match attachment\n\n|methcoro|\n\nArgs:\na: the attachment you want to destroy\n\nRaises:\nAPIException", "source": "codesearchnet"}
{"code": "def scope(self, framebuffer, enable_only=None, *, textures=(), uniform_buffers=(), storage_buffers=()) -> 'Scope':\n        \n\n        textures = tuple((tex.mglo, idx) for tex, idx in textures)\n        uniform_buffers = tuple((buf.mglo, idx) for buf, idx in uniform_buffers)\n        storage_buffers = tuple((buf.mglo, idx) for buf, idx in storage_buffers)\n\n        res = Scope.__new__(Scope)\n        res.mglo = self.mglo.scope(framebuffer.mglo, enable_only, textures, uniform_buffers, storage_buffers)\n        res.ctx = self\n        res.extra = None\n        return res", "docstring": "Create a :py:class:`Scope` object.\n\nArgs:\nframebuffer (Framebuffer): The framebuffer to use when entering.\nenable_only (int): The enable_only flags to set when entering.\n\nKeyword Args:\ntextures (list): List of (texture, binding) tuples.\nuniform_buffers (list): List of (buffer, binding) tuples.\nstorage_buffers (list): List of (buffer, binding) tuples.", "source": "juraj-google-style"}
{"code": "def _calculate_scores(self, query, key):\n    if self.score_mode == 'dot':\n        scores = ops.matmul(query, ops.transpose(key, axes=[0, 2, 1]))\n        if self.scale is not None:\n            scores *= self.scale\n    elif self.score_mode == 'concat':\n        q_reshaped = ops.expand_dims(query, axis=-2)\n        k_reshaped = ops.expand_dims(key, axis=-3)\n        if self.scale is not None:\n            scores = self.concat_score_weight * ops.sum(ops.tanh(self.scale * (q_reshaped + k_reshaped)), axis=-1)\n        else:\n            scores = self.concat_score_weight * ops.sum(ops.tanh(q_reshaped + k_reshaped), axis=-1)\n    else:\n        raise ValueError('scores not computed')\n    return scores", "docstring": "Calculates attention scores as a query-key dot product.\n\nArgs:\nquery: Query tensor of shape `(batch_size, Tq, dim)`.\nkey: Key tensor of shape `(batch_size, Tv, dim)`.\n\nReturns:\nTensor of shape `(batch_size, Tq, Tv)`.", "source": "github-repos"}
{"code": "def get_gap(self, tol=0.001, abs_tol=False, spin=None):\n    (cbm, vbm) = self.get_cbm_vbm(tol, abs_tol, spin)\n    return max((cbm - vbm), 0.0)", "docstring": "Expects a DOS object and finds the gap.\n\nArgs:\ntol: tolerance in occupations for determining the gap\nabs_tol: An absolute tolerance (True) and a relative one (False)\nspin: Possible values are None - finds the gap in the summed\ndensities, Up - finds the gap in the up spin channel,\nDown - finds the gap in the down spin channel.\n\nReturns:\ngap in eV", "source": "codesearchnet"}
{"code": "def get_uri(self, key, is_list=False, is_optional=False, is_secret=False, is_local=False, default=None, options=None):\n    if is_list:\n        return self._get_typed_list_value(key=key, target_type=UriSpec, type_convert=self.parse_uri_spec, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options)\n    return self._get_typed_value(key=key, target_type=UriSpec, type_convert=self.parse_uri_spec, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options)", "docstring": "Get a the value corresponding to the key and converts it to `UriSpec`.\n\nArgs\nkey: the dict key.\nis_list: If this is one element or a list of elements.\nis_optional: To raise an error if key was not found.\nis_secret: If the key is a secret.\nis_local: If the key is a local to this service.\ndefault: default value if is_optional is True.\noptions: list/tuple if provided, the value must be one of these values.\n\nReturns:\n`str`: value corresponding to the key.", "source": "codesearchnet"}
{"code": "def GetEntries(self, parser_mediator, match=None, **unused_kwargs):\n    devices = match.get('Devices', {})\n    for (device_identifier, device_information) in iter(devices.items()):\n        datetime_value = device_information.get('Connected', None)\n        if (not datetime_value):\n            continue\n        event_data = IPodPlistEventData()\n        event_data.device_id = device_identifier\n        for (key, value) in iter(device_information.items()):\n            if (key == 'Connected'):\n                continue\n            attribute_name = key.lower().replace(' ', '_')\n            setattr(event_data, attribute_name, value)\n        event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_LAST_CONNECTED)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extract device information from the iPod plist.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmatch (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.", "source": "codesearchnet"}
{"code": "def batch_frexp(inputs, max_bit=31):\n    shape_of_input = inputs.size()\n    inputs = inputs.view(-1)\n    output_m, output_e = np.frexp(inputs.cpu().numpy())\n    tmp_m = []\n    for m in output_m:\n        int_m_shifted = int(decimal.Decimal(m * 2 ** max_bit).quantize(decimal.Decimal('1'), rounding=decimal.ROUND_HALF_UP))\n        tmp_m.append(int_m_shifted)\n    output_m = np.array(tmp_m)\n    output_e = float(max_bit) - output_e\n    return (torch.from_numpy(output_m).to(inputs.device).view(shape_of_input), torch.from_numpy(output_e).to(inputs.device).view(shape_of_input))", "docstring": "Decompose the scaling factor into mantissa and twos exponent.\n\nArgs:\nscaling_factor (`torch.Tensor`):\nTarget scaling factor to decompose.\n\nReturns:\n``Tuple(torch.Tensor, torch.Tensor)`: mantisa and exponent", "source": "github-repos"}
{"code": "def transformer_moe_2k():\n    hparams = transformer_moe_8k()\n    hparams.batch_size = 2048\n    hparams.default_ff = 'sep'\n    encoder_archi = 'a/a/a/a/a'\n    decoder_archi = 'a-sepm/a-sepm/a-moe/a-sepm/a-sepm'\n    hparams.layer_types = '{}\n    return hparams", "docstring": "Base transformers model with moe.\n\nWill have the following architecture:\n* No encoder.\n* Layer 0: a - sep  (self-attention - unmasked separable convolutions)\n* Layer 1: a - sep\n* Layer 2: a - sep\n* Layer 3: a - sep\n* Layer 4: a - sep\n* Decoder architecture:\n* Layer 0: a - a - sepm  (self-attention - enco/deco-attention - masked sep)\n* Layer 1: a - a - sepm\n* Layer 2: a - a - moe  (mixture of expert layers in the middle)\n* Layer 3: a - a - sepm\n* Layer 4: a - a - sepm\n\nReturns:\nhparams", "source": "codesearchnet"}
{"code": "def load_stopwords(self, path):\n    if path:\n        with open(path) as f:\n            self.stopwords = set(f.read().splitlines())\n    else:\n        self.stopwords = set(pkgutil.get_data('textplot', 'data/stopwords.txt').decode('utf8').splitlines())", "docstring": "Load a set of stopwords.\n\nArgs:\npath (str): The stopwords file path.", "source": "codesearchnet"}
{"code": "def For(start, limit, delta, inputs, body, name=None, hostmem=None, rewrite_with_while=None):\n    if rewrite_with_while:\n        return _ForUsingWhile(start, limit, delta, inputs, body, name, hostmem)\n    if body.captured_inputs:\n        ret = gen_functional_ops._for(start, limit, delta, inputs + body.captured_inputs, _LoopBodyCaptureWrapper(body), name=name)\n        ret = ret[:-len(body.captured_inputs)]\n    else:\n        ret = gen_functional_ops._for(start, limit, delta, inputs, body, name=name)\n    if hostmem:\n        num_for_params = 3\n        input_attr = attr_value_pb2.AttrValue()\n        input_attr.list.i.extend([num_for_params + i for i in hostmem])\n        ret[0].op._set_attr('_input_hostmem', input_attr)\n        output_attr = attr_value_pb2.AttrValue()\n        output_attr.list.i.extend(hostmem)\n        ret[0].op._set_attr('_output_hostmem', output_attr)\n    return ret", "docstring": "out = input; for i in range(start, limit, delta) out = body(i, out).\n\nArgs:\nstart: A `Tensor` of type `int32`.\nlimit: A `Tensor` of type `int32`.\ndelta: A `Tensor` of type `int32`.\ninputs: A list of `Tensor` objects. A list of input tensors whose types are\nT.\nbody: A function takes a list of tensors and returns another list of\ntensors. Both lists have the same types as (int32, T...).\nname: A name for the operation (optional).\nhostmem: A list of integer. If i is in the list, inputs[i] is a host memory\ntensor. In other words, (i+1)-th argument of the body function is\nexpecting a host memory.\nrewrite_with_while: If True, using While op to implement the For.\n\nReturns:\nA list of `Tensor` objects. Has the same type as `input`.\nA list of output tensors whose types are T.", "source": "github-repos"}
{"code": "def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):\n    y_pred_rank = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred).shape.ndims\n    y_true_rank = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_true).shape.ndims\n    if y_true_rank is not None and y_pred_rank is not None:\n        if y_pred_rank > 2:\n            y_pred = array_ops.reshape(y_pred, [-1, y_pred.shape[-1]])\n        if y_true_rank > 1:\n            y_true = array_ops.reshape(y_true, [-1])\n    return math_ops.cast(nn.in_top_k(y_pred, math_ops.cast(y_true, 'int32'), k), backend.floatx())", "docstring": "Computes how often integer targets are in the top `K` predictions.\n\nStandalone usage:\n>>> y_true = [2, 1]\n>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]\n>>> m = tf.keras.metrics.sparse_top_k_categorical_accuracy(\n...     y_true, y_pred, k=3)\n>>> assert m.shape == (2,)\n>>> m.numpy()\narray([1., 1.], dtype=float32)\n\nArgs:\ny_true: tensor of true targets.\ny_pred: tensor of predicted targets.\nk: (Optional) Number of top elements to look at for computing accuracy.\nDefaults to 5.\n\nReturns:\nSparse top K categorical accuracy value.", "source": "github-repos"}
{"code": "def __init__(\n        self, name, description='', creator='', raw={}):\n        \n        BossResource.__init__(self, name, description, creator, raw)", "docstring": "Constructor.\n\nArgs:\nname (string): Collection name.\ndescription (optional[string]): Collection description.  Defaults to empty.\ncreator (optional[string]): Resource creator.\nraw (optional[dictionary]): Holds JSON data returned by the Boss API on a POST (create) or GET operation.", "source": "juraj-google-style"}
{"code": "def _wrap_method(name):\n  \n  method = getattr(datetime.datetime, name)\n\n  \n  @functools.wraps(method, (\"__name__\", \"__doc__\"), ())\n  def wrapper(self, *args, **kw):\n    r = method(self, *args, **kw)\n\n    if isinstance(r, datetime.datetime) and not isinstance(r, type(self)):\n      r = type(self)(r)\n    return r\n\n  setattr(datetime_tz, name, wrapper)", "docstring": "Wrap a method.\n\nPatch a method which might return a datetime.datetime to return a\ndatetime_tz.datetime_tz instead.\n\nArgs:\nname: The name of the method to patch", "source": "juraj-google-style"}
{"code": "def compute_video_metrics_from_predictions(predictions, decode_hparams):\n    all_results = {}\n    (ssim_all_decodes, psnr_all_decodes) = ([], [])\n    for single_decode in predictions:\n        args = get_zipped_dataset_from_predictions(single_decode)\n        (psnr_single, ssim_single) = compute_one_decoding_video_metrics(*args)\n        psnr_all_decodes.append(psnr_single)\n        ssim_all_decodes.append(ssim_single)\n    psnr_all_decodes = np.array(psnr_all_decodes)\n    ssim_all_decodes = np.array(ssim_all_decodes)\n    all_results.update({'PSNR': psnr_all_decodes, 'SSIM': ssim_all_decodes})\n    return compute_all_metrics_statistics(all_results)", "docstring": "Computes metrics from predictions.\n\nArgs:\npredictions: list of list of dicts.\nouter length: num_decodes, inner_length: num_samples\ndecode_hparams: Decode hparams. instance of HParams.\nReturns:\nstatistics: dict of Tensors, key being the metric with each Tensor\nhaving the shape (num_samples, num_frames).", "source": "codesearchnet"}
{"code": "def bytes(self) -> bytes | None:\n    if self.part.text:\n        return self.text.encode()\n    if isinstance(self.part.inline_data, genai_types.Blob):\n        return self.part.inline_data.data\n    return None", "docstring": "Returns part contents as bytes.\n\nReturns:\nText encoded into bytes or bytes from inline data if the underlying part\nis a Blob.", "source": "github-repos"}
{"code": "def apply(self, predictions: Iterable[AnomalyPrediction]) -> AnomalyPrediction:\n    result_dict: dict[str, Any] = {}\n    _AggModelIdMixin.add_model_id(self, result_dict)\n    _SourcePredictionMixin.add_source_predictions(self, result_dict, predictions)\n    scores = [prediction.score for prediction in predictions if prediction.score is not None and (not math.isnan(prediction.score))]\n    if len(scores) > 0:\n        result_dict['score'] = self._agg(scores)\n    elif all(map(lambda x: x.score is None, predictions)):\n        result_dict['score'] = None\n    else:\n        result_dict['score'] = float('NaN')\n    return AnomalyPrediction(**result_dict)", "docstring": "Applies the score aggregation function to a list of predictions.\n\nArgs:\npredictions (Iterable[AnomalyPrediction]): A collection of\n`AnomalyPrediction` objects to be aggregated.\n\nReturns:\nAnomalyPrediction: A single `AnomalyPrediction` object with the\naggregated score. The aggregated score is determined as follows:\n\n- If there are any non-missing and non-error scores, the `agg_func` is\napplied to aggregate them.\n- If all scores are error scores (`None`), the aggregated score is also\n`None`.\n- If there are a mix of missing (`NaN`) and error scores (`None`), the\naggregated score is `NaN`.", "source": "github-repos"}
{"code": "def load(self, binary: pyquil.Program) -> 'QuantumFlowQVM':\n        \n\n        assert self.status in ['connected', 'done']\n        prog = quil_to_program(str(binary))\n\n        self._prog = prog\n        self.program = binary\n        self.status = 'loaded'\n\n        return self", "docstring": "Load a pyQuil program, and initialize QVM into a fresh state.\n\nArgs:\nbinary: A pyQuil program", "source": "juraj-google-style"}
{"code": "def new_message_from_header(header):\n    message_type = header.message_type\n    if (not isinstance(message_type, Type)):\n        try:\n            if isinstance(message_type, str):\n                message_type = Type[message_type]\n            elif isinstance(message_type, int):\n                message_type = Type(message_type)\n        except ValueError:\n            raise ValueError\n    message = new_message_from_message_type(message_type)\n    message.header.xid = header.xid\n    message.header.length = header.length\n    return message", "docstring": "Given an OF Header, return an empty message of header's message_type.\n\nArgs:\nheader (~pyof.v0x01.common.header.Header): Unpacked OpenFlow Header.\n\nReturns:\nEmpty OpenFlow message of the same type of message_type attribute from\nthe given header.\nThe header attribute of the message will be populated.\n\nRaises:\nKytosUndefinedMessageType: Unkown Message_Type.", "source": "codesearchnet"}
{"code": "def publish(self, message):\n    if (not isinstance(message, types.PubsubMessage)):\n        message = types.PubsubMessage(**message)\n    future = None\n    with self._state_lock:\n        if (not self.will_accept(message)):\n            return future\n        new_size = (self._size + message.ByteSize())\n        new_count = (len(self._messages) + 1)\n        overflow = ((new_size > self.settings.max_bytes) or (new_count >= self._settings.max_messages))\n        if ((not self._messages) or (not overflow)):\n            self._messages.append(message)\n            self._size = new_size\n            future = futures.Future(completed=threading.Event())\n            self._futures.append(future)\n    if overflow:\n        self.commit()\n    return future", "docstring": "Publish a single message.\n\nAdd the given message to this object; this will cause it to be\npublished once the batch either has enough messages or a sufficient\nperiod of time has elapsed.\n\nThis method is called by :meth:`~.PublisherClient.publish`.\n\nArgs:\nmessage (~.pubsub_v1.types.PubsubMessage): The Pub/Sub message.\n\nReturns:\nOptional[~google.api_core.future.Future]: An object conforming to\nthe :class:`~concurrent.futures.Future` interface or :data:`None`.\nIf :data:`None` is returned, that signals that the batch cannot\naccept a message.", "source": "codesearchnet"}
{"code": "def __call__(self, stream, content_type):\n        \n        try:\n            return json.load(codecs.getreader('utf-8')(stream))\n        finally:\n            stream.close()", "docstring": "Decode a JSON object into the corresponding Python object.\n\nArgs:\nstream (stream): The response stream to be deserialized.\ncontent_type (str): The content type of the response.\n\nReturns:\nobject: Body of the response deserialized into a JSON object.", "source": "juraj-google-style"}
{"code": "def _implicit_credentials_from_files():\n    credentials_filename = _get_environment_variable_file()\n    if (not credentials_filename):\n        credentials_filename = _get_well_known_file()\n        if os.path.isfile(credentials_filename):\n            extra_help = ' (produced automatically when running \"gcloud auth login\" command)'\n        else:\n            credentials_filename = None\n    else:\n        extra_help = ((' (pointed to by ' + GOOGLE_APPLICATION_CREDENTIALS) + ' environment variable)')\n    if (not credentials_filename):\n        return\n    SETTINGS.env_name = DEFAULT_ENV_NAME\n    try:\n        return _get_application_default_credential_from_file(credentials_filename)\n    except (ApplicationDefaultCredentialsError, ValueError) as error:\n        _raise_exception_for_reading_json(credentials_filename, extra_help, error)", "docstring": "Attempts to get implicit credentials from local credential files.\n\nFirst checks if the environment variable GOOGLE_APPLICATION_CREDENTIALS\nis set with a filename and then falls back to a configuration file (the\n\"well known\" file) associated with the 'gcloud' command line tool.\n\nReturns:\nCredentials object associated with the\nGOOGLE_APPLICATION_CREDENTIALS file or the \"well known\" file if\neither exist. If neither file is define, returns None, indicating\nno credentials from a file can detected from the current\nenvironment.", "source": "codesearchnet"}
{"code": "def CancelBatchJob(client, batch_job, max_poll_attempts=MAX_POLL_ATTEMPTS):\n    batch_job_service = client.GetService('BatchJobService', 'v201809')\n    batch_job['status'] = 'CANCELING'\n    operation = {'operator': 'SET', 'operand': batch_job}\n    batch_job_service.mutate([operation])\n    poll_attempt = 0\n    while ((poll_attempt in range(max_poll_attempts)) and (batch_job['status'] != 'CANCELED')):\n        sleep_interval = ((30 * (2 ** poll_attempt)) + (random.randint(0, 10000) / 1000))\n        print(('Batch Job not finished canceling, sleeping for %s seconds.' % sleep_interval))\n        time.sleep(sleep_interval)\n        batch_job = GetBatchJob(client, batch_job['id'])\n        poll_attempt += 1\n    if (batch_job['status'] == 'CANCELED'):\n        print(('Batch Job with ID \"%d\" has been successfully canceled.' % batch_job['id']))\n    else:\n        print(('Batch Job with ID \"%d\" failed to cancel after polling %d times.' % (batch_job['id'], max_poll_attempts)))", "docstring": "Cancels the given BatchJob.\n\nArgs:\nclient: an instantiated AdWordsClient used to cancel the BatchJob.\nbatch_job: a BatchJob to be canceled.\nmax_poll_attempts: an int defining the number of times the BatchJob will be\nchecked to determine whether it has been canceled.", "source": "codesearchnet"}
{"code": "def assert_same_structure(nest1, nest2, check_types=True):\n    nest_util.assert_same_structure(nest_util.Modality.DATA, nest1, nest2, check_types)", "docstring": "Asserts that two structures are nested in the same way.\n\nArgs:\nnest1: an arbitrarily nested structure.\nnest2: an arbitrarily nested structure.\ncheck_types: if `True` (default) types of sequences should be same as\nwell. For dictionary, \"type\" of dictionary is considered to include its\nkeys. In other words, two dictionaries with different keys are considered\nto have a different \"type\". If set to `False`, two iterables are\nconsidered same as long as they yield the elements that have same\nstructures.\n\nRaises:\nValueError: If the two structures do not have the same number of elements or\nif the two structures are not nested in the same way.\nTypeError: If the two structures differ in the type of sequence in any of\ntheir substructures. Only possible if `check_types` is `True`.", "source": "github-repos"}
{"code": "def __init__(self, date_time, date_time_description):\n    \n    super(OLECFSummaryInformationEvent, self).__init__(\n        date_time, date_time_description)\n    self.name = 'Summary Information'", "docstring": "Initializes an event.\n\nArgs:\ndate_time (dfdatetime.DateTimeValues): date and time values.\ndate_time_description (str): description of the meaning of the date\nand time values.", "source": "juraj-google-style"}
{"code": "def preprocess_image(image_buffer, output_height, output_width, num_channels, is_training=False):\n    if is_training:\n        image = _decode_crop_and_flip(image_buffer, num_channels)\n        mlperf_log.resnet_print(key=mlperf_log.INPUT_RESIZE, value=[output_height, output_width])\n        image = _resize_image(image, output_height, output_width)\n    else:\n        image = tf.image.decode_jpeg(image_buffer, channels=num_channels)\n        image = _aspect_preserving_resize(image, _RESIZE_MIN)\n        mlperf_log.resnet_print(key=mlperf_log.INPUT_RESIZE, value=[output_height, output_width])\n        image = _central_crop(image, output_height, output_width)\n    image.set_shape([output_height, output_width, num_channels])\n    return _mean_image_subtraction(image, _CHANNEL_MEANS, num_channels)", "docstring": "Preprocesses the given image.\n\nPreprocessing includes decoding, cropping, and resizing for both training\nand eval images. Training preprocessing, however, introduces some random\ndistortion of the image to improve accuracy.\n\nArgs:\nimage_buffer: scalar string Tensor representing the raw JPEG image buffer.\noutput_height: The height of the image after preprocessing.\noutput_width: The width of the image after preprocessing.\nnum_channels: Integer depth of the image buffer for decoding.\nis_training: `True` if we're preprocessing the image for training and\n`False` otherwise.\n\nReturns:\nA preprocessed image.", "source": "codesearchnet"}
{"code": "def find_importer_frame():\n    byte = (lambda ch: (ord(ch) if PY2 else ch))\n    frame = inspect.currentframe()\n    try:\n        while frame:\n            code = frame.f_code\n            lasti = frame.f_lasti\n            if (byte(code.co_code[lasti]) == dis.opmap['IMPORT_NAME']):\n                arg = (byte(code.co_code[(lasti + 1)]) + (byte(code.co_code[(lasti + 2)]) * 256))\n                name = code.co_names[arg]\n                if (name == 'end'):\n                    break\n                end\n            end\n            frame = frame.f_back\n        end\n        return frame\n    finally:\n        del frame\n    end", "docstring": "Returns the outer frame importing this \"end\" module.\n\nIf this module is being imported by other means than import statement,\nNone is returned.\n\nReturns:\nA frame object or None.", "source": "codesearchnet"}
{"code": "def _create_environment(config):\n  \n  if isinstance(config.env, str):\n    env = gym.make(config.env)\n  else:\n    env = config.env()\n  if config.max_length:\n    env = tools.wrappers.LimitDuration(env, config.max_length)\n  if isinstance(env.action_space, gym.spaces.Box):\n    if config.normalize_ranges:\n      env = tools.wrappers.RangeNormalize(env)\n    env = tools.wrappers.ClipAction(env)\n  elif isinstance(env.action_space, gym.spaces.Discrete):\n    if config.normalize_ranges:\n      env = tools.wrappers.RangeNormalize(env, action=False)\n  else:\n    message = \"Unsupported action space '{}'\".format(type(env.action_space))\n    raise NotImplementedError(message)\n  env = tools.wrappers.ConvertTo32Bit(env)\n  env = tools.wrappers.CacheSpaces(env)\n  return env", "docstring": "Constructor for an instance of the environment.\n\nArgs:\nconfig: Object providing configurations via attributes.\n\nRaises:\nNotImplementedError: For action spaces other than Box and Discrete.\n\nReturns:\nWrapped OpenAI Gym environment.", "source": "juraj-google-style"}
{"code": "def replace_batch_norm(model):\n    for name, module in model.named_children():\n        if isinstance(module, nn.BatchNorm2d):\n            new_module = GroundingDinoFrozenBatchNorm2d(module.num_features)\n            if not module.weight.device == torch.device('meta'):\n                new_module.weight.data.copy_(module.weight)\n                new_module.bias.data.copy_(module.bias)\n                new_module.running_mean.data.copy_(module.running_mean)\n                new_module.running_var.data.copy_(module.running_var)\n            model._modules[name] = new_module\n        if len(list(module.children())) > 0:\n            replace_batch_norm(module)", "docstring": "Recursively replace all `torch.nn.BatchNorm2d` with `GroundingDinoFrozenBatchNorm2d`.\n\nArgs:\nmodel (torch.nn.Module):\ninput model", "source": "github-repos"}
{"code": "class PatchTSMixerForTimeSeriesClassification(PatchTSMixerPreTrainedModel):\n\n    def __init__(self, config: PatchTSMixerConfig):\n        super().__init__(config)\n        self.model = PatchTSMixerModel(config)\n        self.head = PatchTSMixerLinearHead(config=config)\n        self.use_return_dict = config.use_return_dict\n        if config.scaling in ['std', 'mean', True]:\n            self.inject_scale = InjectScalerStatistics4D(d_model=config.d_model, num_patches=config.num_patches)\n        else:\n            self.inject_scale = None\n        if config.post_init:\n            self.post_init()\n\n    @auto_docstring\n    def forward(self, past_values: torch.Tensor, target_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=False, return_loss: bool=True, return_dict: Optional[bool]=None) -> PatchTSMixerForTimeSeriesClassificationOutput:\n        \n        loss = torch.nn.CrossEntropyLoss()\n        return_dict = return_dict if return_dict is not None else self.use_return_dict\n        model_output = self.model(past_values, output_hidden_states=output_hidden_states, return_dict=return_dict)\n        if isinstance(model_output, tuple):\n            model_output = PatchTSMixerModelOutput(*model_output)\n        if self.inject_scale is not None:\n            model_output.last_hidden_state = self.inject_scale(model_output.last_hidden_state, loc=model_output.loc, scale=model_output.scale)\n        y_hat = self.head(model_output.last_hidden_state)\n        if target_values is not None and return_loss is True:\n            loss_val = loss(y_hat, target_values)\n        else:\n            loss_val = None\n        if not return_dict:\n            return tuple((v for v in [loss_val, y_hat, model_output.last_hidden_state, model_output.hidden_states]))\n        return PatchTSMixerForTimeSeriesClassificationOutput(loss=loss_val, prediction_outputs=y_hat, last_hidden_state=model_output.last_hidden_state, hidden_states=model_output.hidden_states)", "docstring": "`PatchTSMixer` for classification application.\n\nArgs:\nconfig (`PatchTSMixerConfig`):\nConfiguration.\n\nReturns:\n`None`.", "source": "github-repos"}
{"code": "def connected_emulators(self, host=enums.JLinkHost.USB):\n        \n        res = self._dll.JLINKARM_EMU_GetList(host, 0, 0)\n        if res < 0:\n            raise errors.JLinkException(res)\n\n        num_devices = res\n        info = (structs.JLinkConnectInfo * num_devices)()\n        num_found = self._dll.JLINKARM_EMU_GetList(host, info, num_devices)\n        if num_found < 0:\n            raise errors.JLinkException(num_found)\n\n        return list(info)[:num_found]", "docstring": "Returns a list of all the connected emulators.\n\nArgs:\nself (JLink): the ``JLink`` instance\nhost (int): host type to search (default: ``JLinkHost.USB``)\n\nReturns:\nList of ``JLinkConnectInfo`` specifying the connected emulators.\n\nRaises:\nJLinkException: if fails to enumerate devices.", "source": "juraj-google-style"}
{"code": "def ragged_cumsum(x: ragged_tensor.Ragged, axis: int=0, exclusive: bool=False, reverse: bool=False, name: typing.Optional[str]=None):\n    with ops.name_scope(name, 'RaggedCumSum', [x, axis, exclusive, reverse]):\n        axis = array_ops.get_positive_axis(axis, x.shape.rank, ndims_name='rank')\n        if axis == x.ragged_rank:\n            last_rp = x._nested_row_partitions[-1]\n            return x.with_flat_values(_cumsum_flat_values_at_ragged_rank(last_rp, x.flat_values, exclusive=exclusive, reverse=reverse))\n        elif axis > x.ragged_rank:\n            new_axis = axis - x.ragged_rank\n            cumsum_bound = functools.partial(math_ops.cumsum, axis=new_axis, exclusive=exclusive, reverse=reverse)\n            return ragged_functional_ops.map_flat_values(cumsum_bound, x)\n        else:\n            dense_version = x.to_tensor()\n            result = math_ops.cumsum(dense_version, axis, exclusive=exclusive, reverse=reverse, name=name)\n            return ragged_tensor.RaggedTensor.from_tensor(result, lengths=x.nested_row_lengths())", "docstring": "Calculate math_ops.cumsum for a RaggedTensor.\n\nGiven a ragged tensor `x`, the `result` is a ragged tensor with the same\nshape. One can calculate the value of `result[i_1...i_k]` as follows:\n```\ndense_result=tf.math.cumsum(rt.to_tensor(), axis=axis, exclusive=exclusive,\nreverse=reverse)\nresult[i_1...i_k]=dense_result[i_1...i_k]\n```\n\nArgs:\nx: the original ragged tensor to sum.\naxis: the axis along which to sum, can range -rank<=axis<rank.\nexclusive: is the sum exclusive or inclusive? If True, then result[0]=0.\nIf False, then result[0]=x[0].\nreverse: If True, sum from back to front.\nname: the name of the op.\nReturns:\nthe cumulative sum.", "source": "github-repos"}
{"code": "def CreateSession(cls, artifact_filter_names=None, command_line_arguments=None, debug_mode=False, filter_file_path=None, preferred_encoding='utf-8', preferred_time_zone=None, preferred_year=None):\n    session = sessions.Session()\n    session.artifact_filters = artifact_filter_names\n    session.command_line_arguments = command_line_arguments\n    session.debug_mode = debug_mode\n    session.filter_file = filter_file_path\n    session.preferred_encoding = preferred_encoding\n    session.preferred_time_zone = preferred_time_zone\n    session.preferred_year = preferred_year\n    return session", "docstring": "Creates a session attribute container.\n\nArgs:\nartifact_filter_names (Optional[list[str]]): names of artifact definitions\nthat are used for filtering file system and Windows Registry\nkey paths.\ncommand_line_arguments (Optional[str]): the command line arguments.\ndebug_mode (bool): True if debug mode was enabled.\nfilter_file_path (Optional[str]): path to a file with find specifications.\npreferred_encoding (Optional[str]): preferred encoding.\npreferred_time_zone (Optional[str]): preferred time zone.\npreferred_year (Optional[int]): preferred year.\n\nReturns:\nSession: session attribute container.", "source": "codesearchnet"}
{"code": "def set_status(self, on, switch=1):\n        \n        \n        if isinstance(switch, int):\n            switch = str(switch)  \n        payload = self.generate_payload(SET, {switch:on})\n        \n\n        data = self._send_receive(payload)\n        log.debug('set_status received data=%r', data)\n\n        return data", "docstring": "Set status of the device to 'on' or 'off'.\n\nArgs:\non(bool):  True for 'on', False for 'off'.\nswitch(int): The switch to set", "source": "juraj-google-style"}
{"code": "def draw_lines(self, *points):\n        \n        point_array = ffi.new('SDL_Point[]', len(points))\n        for i, p in enumerate(points):\n            point_array[i] = p._ptr[0]\n        check_int_err(lib.SDL_RenderDrawLines(self._ptr, point_array, len(points)))", "docstring": "Draw a series of connected lines on the current rendering target.\n\nArgs:\n*points (Point): The points along the lines.\n\nRaises:\nSDLError: If an error is encountered.", "source": "juraj-google-style"}
{"code": "def handle_event(self, event_handler, event_name, user_args, event_timeout=None, cond=None, cond_timeout=None):\n    worker = self.executor.submit(self._handle, event_handler, event_name, user_args, event_timeout, cond, cond_timeout)\n    return worker", "docstring": "Handle events that don't have registered handlers\n\nIn a new thread, poll one event of specified type from its queue and\nexecute its handler. If no such event exists, the thread waits until\none appears.\n\nArgs:\nevent_handler: Handler for the event, which should take at least\none argument - the event json object.\nevent_name: Name of the event to be handled.\nuser_args: User arguments for the handler; to be passed in after\nthe event json.\nevent_timeout: Number of seconds to wait for the event to come.\ncond: A condition to wait on before executing the handler. Should\nbe a threading.Event object.\ncond_timeout: Number of seconds to wait before the condition times\nout. Never times out if None.\n\nReturns:\nA concurrent.Future object associated with the handler.\nIf blocking call worker.result() is triggered, the handler\nneeds to return something to unblock.", "source": "codesearchnet"}
{"code": "def get_header(message, name):\n    \n    header = message.get(name)\n    log.debug(\"Getting header {!r}: {!r}\".format(name, header))\n    if header:\n        return decode_header_part(header)\n    return six.text_type()", "docstring": "Gets an email.message.Message and a header name and returns\nthe mail header decoded with the correct charset.\n\nArgs:\nmessage (email.message.Message): email message object\nname (string): header to get\n\nReturns:\ndecoded header", "source": "juraj-google-style"}
{"code": "def suggestions(self, word):\n    \n    suggestions = set(self._misspelling_dict.get(word, [])).union(\n        set(self._misspelling_dict.get(word.lower(), [])))\n    return sorted([same_case(source=word, destination=w)\n                   for w in suggestions])", "docstring": "Returns a list of suggestions for a misspelled word.\n\nArgs:\nword: The word to check.\n\nReturns:\nList of zero or more suggested replacements for word.", "source": "juraj-google-style"}
{"code": "def dumps(o, encoder=None):\n    \n\n    retval = \"\"\n    if encoder is None:\n        encoder = TomlEncoder(o.__class__)\n    addtoretval, sections = encoder.dump_sections(o, \"\")\n    retval += addtoretval\n    while sections:\n        newsections = encoder.get_empty_table()\n        for section in sections:\n            addtoretval, addtosections = encoder.dump_sections(\n                sections[section], section)\n\n            if addtoretval or (not addtoretval and not addtosections):\n                if retval and retval[-2:] != \"\\n\\n\":\n                    retval += \"\\n\"\n                retval += \"[\" + section + \"]\\n\"\n                if addtoretval:\n                    retval += addtoretval\n            for s in addtosections:\n                newsections[section + \".\" + s] = addtosections[s]\n        sections = newsections\n    return retval", "docstring": "Stringifies input dict as toml\n\nArgs:\no: Object to dump into toml\n\npreserve: Boolean parameter. If true, preserve inline tables.\n\nReturns:\nString containing the toml corresponding to dict", "source": "juraj-google-style"}
{"code": "def __init__(self, section):\n        \n        self.section = section\n        super().__init__('invalid section name: {}'.format(section))", "docstring": "Initialization of instances:\n\nArgs:\nsection (str): invalid section name.\n\nAttributes:\nsection (str): invalid section name.", "source": "juraj-google-style"}
{"code": "def color_string(self, x):\n        \n        diff_str = \"\"\n        color = \"black\"\n\n        if len(x) == 2 and self.compare_file is not None:\n            difference = x[0] - x[1]\n            if difference:\n                color, sign = ('green', '-') if difference < 0 else ('red', '+')\n                diff_str = '{}{}'.format(sign, self.format_measure(difference))\n        return [self.format_measure(x[0]), [diff_str, color]]", "docstring": "Return a string formatted delta for the values in x.\n\nArgs:\nx: 2-item list of integers (representing number of calls) or\n2-item list of floats (representing seconds of runtime).\n\nReturns:\nA list with [formatted x[0], [color, formatted delta]], where\ncolor reflects whether x[1] is lower, greater, or the same as\nx[0].", "source": "juraj-google-style"}
{"code": "def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):\n    if (((class_info.last_line - class_info.starting_linenum) <= 24) or (linenum <= class_info.starting_linenum)):\n        return\n    matched = Match('\\\\s*(public|protected|private):', clean_lines.lines[linenum])\n    if matched:\n        prev_line = clean_lines.lines[(linenum - 1)]\n        if ((not IsBlankLine(prev_line)) and (not Search('\\\\b(class|struct)\\\\b', prev_line)) and (not Search('\\\\\\\\$', prev_line))):\n            end_class_head = class_info.starting_linenum\n            for i in range(class_info.starting_linenum, linenum):\n                if Search('\\\\{\\\\s*$', clean_lines.lines[i]):\n                    end_class_head = i\n                    break\n            if (end_class_head < (linenum - 1)):\n                error(filename, linenum, 'whitespace/blank_line', 3, ('\"%s:\" should be preceded by a blank line' % matched.group(1)))", "docstring": "Checks for additional blank line issues related to sections.\n\nCurrently the only thing checked here is blank line before protected/private.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nclass_info: A _ClassInfo objects.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def create_module_file(txt, directory):\n    name = nonpresent_module_filename()\n    path = os.path.join(directory, name)\n    with open(path, 'w') as fh:\n        fh.write(txt)\n    return path", "docstring": "Create a file in the given directory with\na valid module name populated with the given txt.\n\nReturns:\nA path to the file", "source": "codesearchnet"}
{"code": "def generate_encodeable_characters(characters: Iterable[str],\n                                   encodings: Iterable[str]) -> Iterable[str]:\n    \n    for c in characters:\n        for encoding in encodings:\n            try:\n                c.encode(encoding)\n                yield c\n            except UnicodeEncodeError:\n                pass", "docstring": "Generates the subset of 'characters' that can be encoded by 'encodings'.\n\nArgs:\ncharacters: The characters to check for encodeability e.g. 'abcd'.\nencodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5'].\n\nReturns:\nThe subset of 'characters' that can be encoded using one of the provided\nencodings.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n               make_distribution_fn,\n               convert_to_tensor_fn=tfd.Distribution.sample,\n               **kwargs):\n    \n    \n    \n    \n    \n    \n    \n    \n    \n\n    if isinstance(make_distribution_fn, six.string_types):\n      \n      make_distribution_fn = _deserialize_function(make_distribution_fn)\n\n    convert_to_tensor_fn = _get_convert_to_tensor_fn(convert_to_tensor_fn)\n\n    \n    \n    \n    kwargs.pop('function', None)\n\n    def _fn(*fargs, **fkwargs):\n      \n      d = make_distribution_fn(*fargs, **fkwargs)\n      value_is_seq = isinstance(d.dtype, collections.Sequence)\n      maybe_composite_convert_to_tensor_fn = (\n          (lambda d: tensor_tuple.TensorTuple(convert_to_tensor_fn(d)))\n          if value_is_seq else convert_to_tensor_fn)\n      distribution = dtc._TensorCoercible(  \n          distribution=d,\n          convert_to_tensor_fn=maybe_composite_convert_to_tensor_fn)\n\n      \n      \n      \n      \n      \n      value = distribution._value()  \n\n      \n      \n      value._tfp_distribution = distribution  \n      \n      \n      \n      if value_is_seq:\n        value.shape = value[-1].shape\n        value.get_shape = value[-1].get_shape\n        value.dtype = value[-1].dtype\n        distribution.shape = value[-1].shape\n        distribution.get_shape = value[-1].get_shape\n      else:\n        distribution.shape = value.shape\n        distribution.get_shape = value.get_shape\n      return distribution, value\n\n    super(DistributionLambda, self).__init__(_fn, **kwargs)\n\n    self._make_distribution_fn = make_distribution_fn\n    self._convert_to_tensor_fn = convert_to_tensor_fn\n\n    \n    \n    \n    self._enter_dunder_call = False", "docstring": "Create a `DistributionLambda` Keras layer.\n\nArgs:\nmake_distribution_fn: Python `callable` that takes previous layer outputs\nand returns a `tfd.Distribution` instance.\nconvert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`\ninstance and returns a `tf.Tensor`-like object. For examples, see\n`class` docstring.\nDefault value: `tfd.Distribution.sample`.\n**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.", "source": "juraj-google-style"}
{"code": "def padFrameRange(frange, zfill):\n\n    def _do_pad(match):\n        '\\n            Substitutes padded for unpadded frames.\\n            '\n        result = list(match.groups())\n        result[1] = pad(result[1], zfill)\n        if result[4]:\n            result[4] = pad(result[4], zfill)\n        return ''.join((i for i in result if i))\n    return PAD_RE.sub(_do_pad, frange)", "docstring": "Return the zero-padded version of the frame range string.\n\nArgs:\nfrange (str): a frame range to test\nzfill (int):\n\nReturns:\nstr:", "source": "codesearchnet"}
{"code": "def project(self, **kwargs: Dict[str, Any]) -> Union[Hist, Dict[str, Hist]]:\n        \n        if self.single_observable_projection:\n            return self._project_single_observable(**kwargs)\n        else:\n            return self._project_dict(**kwargs)", "docstring": "Perform the requested projection(s).\n\nNote:\nAll cuts on the original histograms will be reset when this function is completed.\n\nArgs:\nkwargs (dict): Additional named args to be passed to projection_name(...) and output_key_name(...)\nReturns:\nThe projected histogram(s). The projected histograms are also stored in ``output_observable``.", "source": "juraj-google-style"}
{"code": "def UpdateNumberOfEvents(\n      self, number_of_consumed_events, number_of_produced_events):\n    \n    consumed_events_delta = 0\n    if number_of_consumed_events is not None:\n      if number_of_consumed_events < self.number_of_consumed_events:\n        raise ValueError(\n            'Number of consumed events smaller than previous update.')\n\n      consumed_events_delta = (\n          number_of_consumed_events - self.number_of_consumed_events)\n\n      self.number_of_consumed_events = number_of_consumed_events\n      self.number_of_consumed_events_delta = consumed_events_delta\n\n    produced_events_delta = 0\n    if number_of_produced_events is not None:\n      if number_of_produced_events < self.number_of_produced_events:\n        raise ValueError(\n            'Number of produced events smaller than previous update.')\n\n      produced_events_delta = (\n          number_of_produced_events - self.number_of_produced_events)\n\n      self.number_of_produced_events = number_of_produced_events\n      self.number_of_produced_events_delta = produced_events_delta\n\n    return consumed_events_delta > 0 or produced_events_delta > 0", "docstring": "Updates the number of events.\n\nArgs:\nnumber_of_consumed_events (int): total number of events consumed by\nthe process.\nnumber_of_produced_events (int): total number of events produced by\nthe process.\n\nReturns:\nbool: True if either number of events has increased.\n\nRaises:\nValueError: if the consumed or produced number of events is smaller\nthan the value of the previous update.", "source": "juraj-google-style"}
{"code": "def get_list(self, id, name=None):\n    return self.create_list(dict(id=id, name=name))", "docstring": "Get a list\n\nReturns:\nList: The list with the given `id`", "source": "codesearchnet"}
{"code": "def set_pattern_actual_step(self, patternnumber, value):\n    _checkPatternNumber(patternnumber)\n    _checkStepNumber(value)\n    address = _calculateRegisterAddress('actualstep', patternnumber)\n    self.write_register(address, value, 0)", "docstring": "Set the 'actual step' parameter for a given pattern.\n\nArgs:\n* patternnumber (integer): 0-7\n* value (integer): 0-7", "source": "codesearchnet"}
{"code": "def dump(voevent, file, pretty_print=True, xml_declaration=True):\n    file.write(dumps(voevent, pretty_print, xml_declaration))", "docstring": "Writes the voevent to the file object.\n\ne.g.::\n\nwith open('/tmp/myvoevent.xml','wb') as f:\nvoeventparse.dump(v, f)\n\nArgs:\nvoevent(:class:`Voevent`): Root node of the VOevent etree.\nfile (io.IOBase): An open (binary mode) file object for writing.\npretty_print\npretty_print(bool): See :func:`dumps`\nxml_declaration(bool): See :func:`dumps`", "source": "codesearchnet"}
{"code": "def from_ops(*operations: ops.OP_TREE,\n                 strategy: InsertStrategy = InsertStrategy.EARLIEST,\n                 device: devices.Device = devices.UnconstrainedDevice\n                 ) -> 'Circuit':\n        \n        result = Circuit(device=device)\n        result.append(operations, strategy)\n        return result", "docstring": "Creates an empty circuit and appends the given operations.\n\nArgs:\noperations: The operations to append to the new circuit.\nstrategy: How to append the operations.\ndevice: Hardware that the circuit should be able to run on.\n\nReturns:\nThe constructed circuit containing the operations.", "source": "juraj-google-style"}
{"code": "def concat(values, axis, name: str='concat'):\n    if name is None:\n        name = 'concat'\n    _assert_concat_compatible_structured_tensors(values)\n\n    def leaf_op(values):\n        return array_ops.concat(values, axis)\n    axis = array_ops.get_positive_axis(axis, values[0].rank)\n    with ops.name_scope(name, 'StructuredConcat', values):\n        return _extend_op(values, leaf_op)", "docstring": "tf.concat for structured tensors.\n\nDoes not support (yet) checks on illegal axis values, et cetera.\n\nArgs:\nvalues: a sequence of StructuredTensors.\naxis: an axis to concatenate upon.\nname: the name of the op(s).\n\nReturns:\nthe params reorganized according to indices.", "source": "github-repos"}
{"code": "def get_variation_for_experiment(self, experiment_id):\n    \n\n    return self.experiment_bucket_map.get(experiment_id, {self.VARIATION_ID_KEY: None}).get(self.VARIATION_ID_KEY)", "docstring": "Helper method to retrieve variation ID for given experiment.\n\nArgs:\nexperiment_id: ID for experiment for which variation needs to be looked up for.\n\nReturns:\nVariation ID corresponding to the experiment. None if no decision available.", "source": "juraj-google-style"}
{"code": "def delete_direct(self, addresses):\n        \n\n        with self._lock:\n            for address in addresses:\n                self._validate_write(address)\n                if address in self._state:\n                    self._state[address].set_deleted()\n                else:\n                    fut = _ContextFuture(address=address)\n                    self._state[address] = fut\n                    fut.set_deleted()", "docstring": "Called in the context manager's delete method to either\nmark an entry for deletion , or create a new future and immediately\nset it for deletion in the future.\n\nArgs:\naddress_list (list of str): The unique full addresses.\n\nRaises:\nAuthorizationException", "source": "juraj-google-style"}
{"code": "def process_alias_export_namespace(namespace):\n    \n    namespace.export_path = os.path.abspath(namespace.export_path)\n    if os.path.isfile(namespace.export_path):\n        raise CLIError(FILE_ALREADY_EXISTS_ERROR.format(namespace.export_path))\n\n    export_path_dir = os.path.dirname(namespace.export_path)\n    if not os.path.isdir(export_path_dir):\n        os.makedirs(export_path_dir)\n\n    if os.path.isdir(namespace.export_path):\n        namespace.export_path = os.path.join(namespace.export_path, ALIAS_FILE_NAME)", "docstring": "Validate input arguments when the user invokes 'az alias export'.\n\nArgs:\nnamespace: argparse namespace object.", "source": "juraj-google-style"}
{"code": "def of(cls, key: SearchKey, params: SearchParams) -> 'SearchCriteria':\n        \n        key_name = key.value\n        if key_name in params.disabled:\n            raise SearchNotAllowed(key_name)\n        elif key.inverse:\n            return InverseSearchCriteria(key.not_inverse, params)\n        elif key_name == b'SEQSET':\n            return SequenceSetSearchCriteria(key.filter_sequence_set, params)\n        elif key_name == b'KEYSET':\n            return SearchCriteriaSet(key.filter_key_set, params)\n        elif key_name == b'ALL':\n            return AllSearchCriteria(params)\n        elif key_name == b'OR':\n            left_key, right_key = key.filter_key_or\n            return OrSearchCriteria(left_key, right_key, params)\n        elif key_name == b'ANSWERED':\n            return HasFlagSearchCriteria(Answered, True, params)\n        elif key_name == b'UNANSWERED':\n            return HasFlagSearchCriteria(Answered, False, params)\n        elif key_name == b'DELETED':\n            return HasFlagSearchCriteria(Deleted, True, params)\n        elif key_name == b'UNDELETED':\n            return HasFlagSearchCriteria(Deleted, False, params)\n        elif key_name == b'DRAFT':\n            return HasFlagSearchCriteria(Draft, True, params)\n        elif key_name == b'UNDRAFT':\n            return HasFlagSearchCriteria(Draft, False, params)\n        elif key_name == b'FLAGGED':\n            return HasFlagSearchCriteria(Flagged, True, params)\n        elif key_name == b'UNFLAGGED':\n            return HasFlagSearchCriteria(Flagged, False, params)\n        elif key_name == b'RECENT':\n            return HasFlagSearchCriteria(Recent, True, params)\n        elif key_name == b'OLD':\n            return HasFlagSearchCriteria(Recent, False, params)\n        elif key_name == b'SEEN':\n            return HasFlagSearchCriteria(Seen, True, params)\n        elif key_name == b'UNSEEN':\n            return HasFlagSearchCriteria(Seen, False, params)\n        elif key_name == b'KEYWORD':\n            return HasFlagSearchCriteria(key.filter_flag, True, params)\n        elif key_name == b'UNKEYWORD':\n            return HasFlagSearchCriteria(key.filter_flag, False, params)\n        elif key_name == b'NEW':\n            return NewSearchCriteria(params)\n        elif key_name == b'BEFORE':\n            return DateSearchCriteria(key.filter_datetime, '<', params)\n        elif key_name == b'ON':\n            return DateSearchCriteria(key.filter_datetime, '=', params)\n        elif key_name == b'SINCE':\n            return DateSearchCriteria(key.filter_datetime, '>=', params)\n        elif key_name == b'SENTBEFORE':\n            return HeaderDateSearchCriteria(key.filter_datetime, '<', params)\n        elif key_name == b'SENTON':\n            return HeaderDateSearchCriteria(key.filter_datetime, '=', params)\n        elif key_name == b'SENTSINCE':\n            return HeaderDateSearchCriteria(key.filter_datetime, '>=', params)\n        elif key_name == b'SMALLER':\n            return SizeSearchCriteria(key.filter_int, '<', params)\n        elif key_name == b'LARGER':\n            return SizeSearchCriteria(key.filter_int, '>', params)\n        elif key_name in (b'BCC', b'CC', b'FROM', b'SUBJECT', b'TO'):\n            return EnvelopeSearchCriteria(key_name, key.filter_str, params)\n        elif key_name == b'HEADER':\n            name, value = key.filter_header\n            return HeaderSearchCriteria(name, value, params)\n        elif key_name in (b'BODY', b'TEXT'):\n            return BodySearchCriteria(key.filter_str, params)\n        raise SearchNotAllowed(key_name)", "docstring": "Factory method for producing a search criteria sub-class from a\nsearch key.\n\nArgs:\nkey: The search key defining the criteria.\nparams: The parameters that may be used by some searches.", "source": "juraj-google-style"}
{"code": "def hpo_diseases(username, password, hpo_ids, p_value_treshold=1):\n    try:\n        results = query_phenomizer.query(username, password, *hpo_ids)\n        diseases = [result for result in results if (result['p_value'] <= p_value_treshold)]\n        return diseases\n    except SystemExit:\n        return None", "docstring": "Return the list of HGNC symbols that match annotated HPO terms.\n\nArgs:\nusername (str): username to use for phenomizer connection\npassword (str): password to use for phenomizer connection\n\nReturns:\nquery_result: a generator of dictionaries on the form\n{\n'p_value': float,\n'disease_source': str,\n'disease_nr': int,\n'gene_symbols': list(str),\n'description': str,\n'raw_line': str\n}", "source": "codesearchnet"}
{"code": "def ucast_ip(ip_addr, return_tuple=True):\n    \n    regex_ucast_ip = __re.compile(\"^((22[0-3])|(2[0-1][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))$\")\n    if return_tuple:\n        while not regex_ucast_ip.match(ip_addr):\n            print(\"Not a good unicast IP.\")\n            print(\"Please try again.\")\n            ip_addr = input(\"Please enter a unicast IP address in the following format x.x.x.x: \")\n        return ip_addr\n    elif not return_tuple:\n        if not regex_ucast_ip.match(ip_addr):\n            return False\n        else:\n            return True", "docstring": "Function to check if a address is unicast\nArgs:\nip_addr: Unicast IP address in the following format 192.168.1.1\nreturn_tuple: Set to True it returns a IP, set to False returns True or False\n\nReturns: see return_tuple for return options", "source": "juraj-google-style"}
{"code": "def _rpc(self, method, *args):\n        \n        with self._lock:\n            apiid = next(self._counter)\n            data = {'id': apiid, 'method': method, 'params': args}\n            request = json.dumps(data)\n            self._client_send(request)\n            response = self._client_receive()\n        if not response:\n            raise ProtocolError(self._ad,\n                                ProtocolError.NO_RESPONSE_FROM_SERVER)\n        result = json.loads(str(response, encoding='utf8'))\n        if result['error']:\n            raise ApiError(self._ad, result['error'])\n        if result['id'] != apiid:\n            raise ProtocolError(self._ad, ProtocolError.MISMATCHED_API_ID)\n        if result.get('callback') is not None:\n            if self._event_client is None:\n                self._event_client = self._start_event_client()\n            return callback_handler.CallbackHandler(\n                callback_id=result['callback'],\n                event_client=self._event_client,\n                ret_value=result['result'],\n                method_name=method,\n                ad=self._ad)\n        return result['result']", "docstring": "Sends an rpc to the app.\n\nArgs:\nmethod: str, The name of the method to execute.\nargs: any, The args of the method.\n\nReturns:\nThe result of the rpc.\n\nRaises:\nProtocolError: Something went wrong with the protocol.\nApiError: The rpc went through, however executed with errors.", "source": "juraj-google-style"}
{"code": "def __cloudflare_list_zone_records(self, *, account, zoneID, **kwargs):\n    done = False\n    records = {}\n    page = 1\n    while (not done):\n        kwargs['page'] = page\n        response = self.__cloudflare_request(account=account, path='/zones/{}/dns_records'.format(zoneID), args=kwargs)\n        info = response['result_info']\n        if (('total_pages' not in info) or (page >= info['total_pages'])):\n            done = True\n        else:\n            page += 1\n        for record in response['result']:\n            if (record['name'] in records):\n                records[record['name']]['value'] = sorted((records[record['name']]['value'] + [record['content']]))\n            else:\n                records[record['name']] = {'name': record['name'], 'value': sorted([record['content']]), 'type': record['type']}\n    return list(records.values())", "docstring": "Helper function to list all records on a CloudFlare DNS Zone. Returns a `dict` containing the records and\ntheir information.\n\nArgs:\naccount (:obj:`CloudFlareAccount`): A CloudFlare Account object\nzoneID (`int`): Internal CloudFlare ID of the DNS zone\n**kwargs (`dict`): Additional arguments to be consumed by the API endpoint\n\nReturns:\n:obj:`dict` of `str`: `dict`", "source": "codesearchnet"}
{"code": "def run(func, options, args=(), kwargs={}, host='localhost', port=8000):\n    run_stats = run_profilers((func, args, kwargs), options)\n    result = None\n    for prof in run_stats:\n        if (not result):\n            result = run_stats[prof]['result']\n        del run_stats[prof]['result']\n    post_data = gzip.compress(json.dumps(run_stats).encode('utf-8'))\n    urllib.request.urlopen(('http:\n    return result", "docstring": "Runs profilers on a function.\n\nArgs:\nfunc: A Python function.\noptions: A string with profilers configuration (i.e. 'cmh').\nargs: func non-keyword arguments.\nkwargs: func keyword arguments.\nhost: Host name to send collected data.\nport: Port number to send collected data.\n\nReturns:\nA result of func execution.", "source": "codesearchnet"}
{"code": "def _pool(inputs, initial_value, reduce_fn, pool_size, strides=None, padding='valid'):\n    if padding not in ('same', 'valid'):\n        raise ValueError(f\"Invalid padding '{padding}', must be 'same' or 'valid'.\")\n    padding = padding.upper()\n    return lax.reduce_window(inputs, initial_value, reduce_fn, pool_size, strides, padding)", "docstring": "Helper function to define pooling functions.\n\nArgs:\ninputs: input data of shape `N+2`.\ninitial_value: the initial value for the reduction.\nreduce_fn: a reduce function of the form `(T, T) -> T`.\npool_size: a sequence of `N` integers, representing the window size to\nreduce over.\nstrides: a sequence of `N` integers, representing the inter-window\nstrides (default: `(1, ..., 1)`).\npadding: either the string `same` or `valid`.\n\nReturns:\nThe output of the reduction for each window slice.", "source": "github-repos"}
{"code": "def offTagAdd(self, name, func):\n    if ('*' in name):\n        self.ontagaddglobs.rem(name, func)\n        return\n    cblist = self.ontagadds.get(name)\n    if (cblist is None):\n        return\n    try:\n        cblist.remove(func)\n    except ValueError:\n        pass", "docstring": "Unregister a callback for tag addition.\n\nArgs:\nname (str): The name of the tag or tag glob.\nfunc (function): The callback func(node, tagname, tagval).", "source": "codesearchnet"}
{"code": "def _cast_value(self, value):\n        \n        \n        if (self.convert_datetimes):\n            try:\n                date_time = datetime.datetime.fromtimestamp(float(value))\n                if datetime.datetime(1970, 1, 1) > date_time:\n                    raise ValueError\n                else:\n                    return date_time\n\n            \n            except ValueError:\n                pass\n\n        \n        tests = (int, float, str)\n        for test in tests:\n            try:\n                return test(value)\n            except ValueError:\n                continue\n        return value", "docstring": "Internal method that makes sure every value in dictionary\nis properly cast into the correct types, instead of\njust treating everything like a string from the csv file.\n\nArgs:\nvalue : The value to be casted\n\nReturns:\nA casted Value.", "source": "juraj-google-style"}
{"code": "def distribute_tensor(tensor, layout):\n    if isinstance(tensor, KerasTensor):\n        return tensor\n    return distribution_lib.distribute_tensor(tensor, layout)", "docstring": "Change the layout of a Tensor value in the jit function execution.\n\nArgs:\ntensor: a Tensor to change the layout.\nlayout: `TensorLayout` to be applied on the value.\n\nReturns:\na new value with the specified tensor layout.", "source": "github-repos"}
{"code": "def delete(self, id, **kwargs):\n    if (id is None):\n        path = self.path\n    else:\n        if (not isinstance(id, int)):\n            id = id.replace('/', '%2F')\n        path = ('%s/%s' % (self.path, id))\n    self.gitlab.http_delete(path, **kwargs)", "docstring": "Delete an object on the server.\n\nArgs:\nid: ID of the object to delete\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabDeleteError: If the server cannot perform the request", "source": "codesearchnet"}
{"code": "def quadratic_2d(data):\n    \n    arg_data_max = np.argmax(data)\n    i, j = np.unravel_index(arg_data_max, data.shape)\n    z_ = data[i-1:i+2, j-1:j+2]\n    \n    \n    \n    \n    \n    \n    try:\n        a = (-z_[0,0] + 2*z_[0,1] - z_[0,2] + 2*z_[1,0] + 5*z_[1,1] + 2*z_[1,2] -\n             z_[2,0] + 2*z_[2,1] - z_[2,2]) / 9\n        b = (-z_[0,0] - z_[0,1] - z_[0,2] + z_[2,0] + z_[2,1] + z_[2,2]) / 6\n        c = (-z_[0,0] + z_[0,2] - z_[1,0] + z_[1,2] - z_[2,0] + z_[2,2]) / 6\n        d = (z_[0,0] + z_[0,1] + z_[0,2] - z_[1,0]*2 - z_[1,1]*2 - z_[1,2]*2 +\n             z_[2,0] + z_[2,1] + z_[2,2])/6\n        e = (z_[0,0] - z_[0,2] - z_[2,0] + z_[2,2]) * .25\n        f = (z_[0,0] - 2 * z_[0,1] + z_[0,2] + z_[1,0] - 2 * z_[1,1] + z_[1,2] +\n             z_[2,0] - 2 * z_[2,1] + z_[2,2]) / 6\n    except IndexError:\n        return (i, j)\n\n    \n    det = 4 * d * f - e ** 2\n    xm = - (2 * f * b - c * e) / det\n    ym = - (2 * d * c - b * e) / det\n    return (i+xm, j+ym)", "docstring": "Compute the quadratic estimate of the centroid in a 2d-array.\n\nArgs:\ndata (2darray): two dimensional data array\n\nReturns\ncenter (tuple): centroid estimate on the row and column directions,\nrespectively", "source": "juraj-google-style"}
{"code": "def get(self, ID, index='vector-web-s'):\n        \n\n        url = self.get_url % index\n        r = self.gbdx_connection.get(url + ID)\n        r.raise_for_status()\n        return r.json()", "docstring": "Retrieves a vector.  Not usually necessary because searching is the best way to find & get stuff.\n\nArgs:\nID (str): ID of the vector object\nindex (str): Optional.  Index the object lives in.  defaults to 'vector-web-s'\n\nReturns:\nrecord (dict): A dict object identical to the json representation of the catalog record", "source": "juraj-google-style"}
{"code": "def forward(self, inputs_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    hidden_states = inputs_embeddings\n    encoder_states = () if output_hidden_states else None\n    all_attentions = () if output_attentions else None\n    projected_features = [self.channel_projection_layers[i](feature) for i, feature in enumerate(hidden_states)]\n    for encoder_layer_index, feature_to_project_index in enumerate(self.encoder_projection_indices):\n        if output_hidden_states:\n            encoder_states = encoder_states + (projected_features[feature_to_project_index],)\n        height, width = projected_features[feature_to_project_index].shape[2:]\n        src_flatten = projected_features[feature_to_project_index].flatten(2).permute(0, 2, 1)\n        if self.training or self.eval_size is None:\n            pos_embed = self.build_2d_sincos_position_embedding(width, height, self.encoder_hidden_dim, self.positional_encoding_temperature, device=src_flatten.device, dtype=src_flatten.dtype).to(src_flatten.device, src_flatten.dtype)\n        else:\n            pos_embed = None\n        layer_outputs = self.encoder[encoder_layer_index](src_flatten, pos_embed=pos_embed, output_attentions=output_attentions)\n        projected_features[feature_to_project_index] = layer_outputs[0].permute(0, 2, 1).reshape(-1, self.encoder_hidden_dim, height, width).contiguous()\n        if output_attentions:\n            all_attentions = all_attentions + (layer_outputs[1],)\n    if output_hidden_states:\n        encoder_states = encoder_states + (projected_features[feature_to_project_index],)\n    fpn_feature_maps = [projected_features[-1]]\n    for idx in range(len(self.in_channels) - 1, 0, -1):\n        feat_high = fpn_feature_maps[0]\n        feat_low = projected_features[idx - 1]\n        feat_high = self.lateral_convs[len(self.in_channels) - 1 - idx](feat_high)\n        fpn_feature_maps[0] = feat_high\n        upsample_feat = F.interpolate(feat_high, scale_factor=2.0, mode='nearest')\n        fps_map = self.fpn_blocks[len(self.in_channels) - 1 - idx](torch.concat([upsample_feat, feat_low], dim=1))\n        fpn_feature_maps.insert(0, fps_map)\n    fpn_states = [fpn_feature_maps[0]]\n    for idx in range(len(self.in_channels) - 1):\n        feat_low = fpn_states[-1]\n        feat_high = fpn_feature_maps[idx + 1]\n        downsample_feat = self.downsample_convs[idx](feat_low)\n        hidden_states = self.pan_blocks[idx](torch.concat([downsample_feat, feat_high.to(downsample_feat.device)], dim=1))\n        fpn_states.append(hidden_states)\n    if not return_dict:\n        return (fpn_states[-1], encoder_states, all_attentions, fpn_states)\n    return OmDetTurboEncoderOutput(last_hidden_state=fpn_states[-1], hidden_states=encoder_states, attentions=all_attentions, extracted_states=fpn_states)", "docstring": "Args:\ninputs_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\nFlattened feature map (output of the backbone + projection layers) that is passed to the encoder.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.\noutput_hidden_states (`bool`, *optional*):\nWhether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\nfor more detail.\nreturn_dict (`bool`, *optional*):\nWhether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.", "source": "github-repos"}
{"code": "def __init__(self, timestep, natoms, box, data):\n        \n        self.timestep = timestep\n        self.natoms = natoms\n        self.box = box\n        self.data = data", "docstring": "Base constructor.\n\nArgs:\ntimestep (int): Current timestep.\nnatoms (int): Total number of atoms in the box.\nbox (LammpsBox): Simulation box.\ndata (pd.DataFrame): Dumped atomic data.", "source": "juraj-google-style"}
{"code": "def extend(self, elts):\n        \n        \n        \n        \n        elts = elts[:]\n        self._in_deque.append(elts)\n        event = self._event_for(elts)\n        self._event_deque.append(event)\n        return event", "docstring": "Adds elts to the tasks.\n\nArgs:\nelts (Sequence): a iterable of elements that can be appended to the\ntask's bundle_field.\n\nReturns:\nEvent: an event that can be used to wait on the response.", "source": "juraj-google-style"}
{"code": "def _get_fullname(obj):\n        \n        \n        if not hasattr(obj, \"__name__\"):\n            obj = obj.__class__\n        if obj.__module__ in (\"builtins\", \"__builtin__\"):\n            return obj.__name__\n        return \"{}.{}\".format(obj.__module__, obj.__name__)", "docstring": "Get the full name of an object including the module.\n\nArgs:\nobj: An object.\n\nReturns:\nThe full class name of the object.", "source": "juraj-google-style"}
{"code": "def set_scf_algorithm_and_iterations(self, algorithm=\"diis\",\n                                         iterations=50):\n        \n        available_algorithms = {\"diis\", \"dm\", \"diis_dm\", \"diis_gdm\", \"gdm\",\n                                \"rca\", \"rca_diis\", \"roothaan\"}\n        if algorithm.lower() not in available_algorithms:\n            raise ValueError(\"Algorithm \" + algorithm +\n                             \" is not available in QChem\")\n        self.params[\"rem\"][\"scf_algorithm\"] = algorithm.lower()\n        self.params[\"rem\"][\"max_scf_cycles\"] = iterations", "docstring": "Set algorithm used for converging SCF and max number of SCF iterations.\n\nArgs:\nalgorithm: The algorithm used for converging SCF. (str)\niterations: The max number of SCF iterations. (Integer)", "source": "juraj-google-style"}
{"code": "def _finalize_outputs(cls, mapreduce_spec, mapreduce_state):\n    \n    \n    if (mapreduce_spec.mapper.output_writer_class() and\n        mapreduce_state.result_status == model.MapreduceState.RESULT_SUCCESS):\n      mapreduce_spec.mapper.output_writer_class().finalize_job(mapreduce_state)", "docstring": "Finalize outputs.\n\nArgs:\nmapreduce_spec: an instance of MapreduceSpec.\nmapreduce_state: an instance of MapreduceState.", "source": "juraj-google-style"}
{"code": "def create_game(self, map_name):\n    \n    map_inst = maps.get(map_name)\n    map_data = map_inst.data(self._run_config)\n    if map_name not in self._saved_maps:\n      for controller in self._controllers:\n        controller.save_map(map_inst.path, map_data)\n      self._saved_maps.add(map_name)\n\n    \n    create = sc_pb.RequestCreateGame(\n        local_map=sc_pb.LocalMap(map_path=map_inst.path),\n        disable_fog=False)\n\n    \n    for _ in range(self._num_agents):\n      create.player_setup.add(type=sc_pb.Participant)\n\n    \n    self._controllers[0].create_game(create)", "docstring": "Create a game for the agents to join.\n\nArgs:\nmap_name: The map to use.", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]:\n    out = []\n    for i, hidden_state in enumerate(hidden_states):\n        if i not in self.neck_ignore_stages:\n            cls_token, hidden_state = (hidden_state[:, 0], hidden_state[:, 1:])\n            batch_size, sequence_length, num_channels = hidden_state.shape\n            if patch_height is not None and patch_width is not None:\n                hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels)\n            else:\n                size = torch_int(sequence_length ** 0.5)\n                hidden_state = hidden_state.reshape(batch_size, size, size, num_channels)\n            hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()\n            feature_shape = hidden_state.shape\n            if self.config.readout_type == 'project':\n                hidden_state = hidden_state.flatten(2).permute((0, 2, 1))\n                readout = cls_token.unsqueeze(1).expand_as(hidden_state)\n                hidden_state = self.readout_projects[i](torch.cat((hidden_state, readout), -1))\n                hidden_state = hidden_state.permute(0, 2, 1).reshape(feature_shape)\n            elif self.config.readout_type == 'add':\n                hidden_state = hidden_state.flatten(2) + cls_token.unsqueeze(-1)\n                hidden_state = hidden_state.reshape(feature_shape)\n            hidden_state = self.layers[i](hidden_state)\n        out.append(hidden_state)\n    return out", "docstring": "Args:\nhidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`):\nList of hidden states from the backbone.", "source": "github-repos"}
{"code": "def connect_engine(self):\n    try:\n        self.connection = self.engine.connect()\n        return True\n    except sa.exc.OperationalError as opex:\n        LOG.fatal(\"Could not connect to the database. The error was: '%s'\", str(opex))\n    return False", "docstring": "Establish a connection to the database.\n\nProvides simple error handling for fatal errors.\n\nReturns:\nTrue, if we could establish a connection, else False.", "source": "codesearchnet"}
{"code": "def _ReadStreamDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):\n    if is_member:\n        supported_definition_values = self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_MEMBER_DATA_TYPE\n    else:\n        supported_definition_values = self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_DATA_TYPE\n    return self._ReadElementSequenceDataTypeDefinition(definitions_registry, definition_values, data_types.StreamDefinition, definition_name, supported_definition_values)", "docstring": "Reads a stream data type definition.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\ndefinition_values (dict[str, object]): definition values.\ndefinition_name (str): name of the definition.\nis_member (Optional[bool]): True if the data type definition is a member\ndata type definition.\n\nReturns:\nStreamDefinition: stream data type definition.\n\nRaises:\nDefinitionReaderError: if the definitions values are missing or if\nthe format is incorrect.", "source": "codesearchnet"}
{"code": "def write_to_path(self,path,suffix='',format='png',overwrite=False):\n        \n        if os.path.exists(path) and overwrite is False: raise ValueError(\"Error: use ovewrite=True to overwrite images\")\n        if not os.path.exists(path): os.makedirs(path)\n        for i,r in self.iterrows():\n            spath = os.path.join(path,r['project_name'],r['sample_name'])\n            if not os.path.exists(spath): os.makedirs(spath)\n            if suffix == '':\n                fname = os.path.join(spath,r['frame_name']+'.'+format)\n            else: fname = os.path.join(spath,r['frame_name']+'_'+suffix+'.'+format)\n            imageio.imwrite(fname, r['image'],format=format)", "docstring": "Output the data the dataframe's 'image' column to a directory structured by project->sample and named by frame\n\nArgs:\npath (str): Where to write the directory of images\nsuffix (str): for labeling the imaages you write\nformat (str): default 'png' format to write the file\noverwrite (bool): default False. if true can overwrite files in the path\n\nModifies:\nCreates path folder if necessary and writes images to path", "source": "juraj-google-style"}
{"code": "def get_extrema(self, normalize_rxn_coordinate=True):\n    x = np.arange(0, np.max(self.r), 0.01)\n    y = (self.spline(x) * 1000)\n    scale = (1 if (not normalize_rxn_coordinate) else (1 / self.r[(- 1)]))\n    min_extrema = []\n    max_extrema = []\n    for i in range(1, (len(x) - 1)):\n        if ((y[i] < y[(i - 1)]) and (y[i] < y[(i + 1)])):\n            min_extrema.append(((x[i] * scale), y[i]))\n        elif ((y[i] > y[(i - 1)]) and (y[i] > y[(i + 1)])):\n            max_extrema.append(((x[i] * scale), y[i]))\n    return (min_extrema, max_extrema)", "docstring": "Returns the positions of the extrema along the MEP. Both local\nminimums and maximums are returned.\n\nArgs:\nnormalize_rxn_coordinate (bool): Whether to normalize the\nreaction coordinate to between 0 and 1. Defaults to True.\n\nReturns:\n(min_extrema, max_extrema), where the extrema are given as\n[(x1, y1), (x2, y2), ...].", "source": "codesearchnet"}
{"code": "def install(self, ref, table_name=None, index_columns=None,logger=None):\n        \n\n\n        try:\n            obj_number = ObjectNumber.parse(ref)\n            if isinstance(obj_number, TableNumber):\n                table = self._library.table(ref)\n                connection = self._backend._get_connection()\n                return self._backend.install_table(connection, table, logger=logger)\n            else:\n                \n                raise NotObjectNumberError\n\n        except NotObjectNumberError:\n            \n            partition = self._library.partition(ref)\n            connection = self._backend._get_connection()\n\n            return self._backend.install(\n                connection, partition, table_name=table_name, index_columns=index_columns,\n                logger=logger)", "docstring": "Finds partition by reference and installs it to warehouse db.\n\nArgs:\nref (str): id, vid (versioned id), name or vname (versioned name) of the partition.", "source": "juraj-google-style"}
{"code": "def _fill_from_default(self, default_job_config):\n    if (self._job_type != default_job_config._job_type):\n        raise TypeError(((('attempted to merge two incompatible job types: ' + repr(self._job_type)) + ', ') + repr(default_job_config._job_type)))\n    new_job_config = self.__class__()\n    default_job_properties = copy.deepcopy(default_job_config._properties)\n    for key in self._properties:\n        if (key != self._job_type):\n            default_job_properties[key] = self._properties[key]\n    default_job_properties[self._job_type].update(self._properties[self._job_type])\n    new_job_config._properties = default_job_properties\n    return new_job_config", "docstring": "Merge this job config with a default job config.\n\nThe keys in this object take precedence over the keys in the default\nconfig. The merge is done at the top-level as well as for keys one\nlevel below the job type.\n\nArguments:\ndefault_job_config (google.cloud.bigquery.job._JobConfig):\nThe default job config that will be used to fill in self.\n\nReturns:\ngoogle.cloud.bigquery.job._JobConfig A new (merged) job config.", "source": "codesearchnet"}
{"code": "def set_bool(self, location, value):\n        \n        element = self._handle_location(location)\n        if isinstance(value, basestring):\n            value = True if value.upper() == \"TRUE\" else False\n        elif not isinstance(value, bool):\n            raise ValueError\n        if value is True:\n            element.text = \"true\"\n        else:\n            element.text = \"false\"", "docstring": "Set a boolean value.\n\nCasper booleans in XML are string literals of \"true\" or \"false\".\nThis method sets the text value of \"location\" to the correct\nstring representation of a boolean.\n\nArgs:\nlocation: Element or a string path argument to find()\nvalue: Boolean or string value to set. (Accepts\n\"true\"/\"True\"/\"TRUE\"; all other strings are False).", "source": "juraj-google-style"}
{"code": "def update_detector(self, detector_id, detector):\n    resp = self._put(self._u(self._DETECTOR_ENDPOINT_SUFFIX, detector_id), data=detector)\n    resp.raise_for_status()\n    return resp.json()", "docstring": "Update an existing detector.\n\nArgs:\ndetector_id (string): the ID of the detector.\ndetector (object): the detector model object. Will be serialized as\nJSON.\nReturns:\ndictionary of the response (updated detector model).", "source": "codesearchnet"}
{"code": "def commit(self):\n    commit_response = self._client._firestore_api.commit(self._client._database_string, self._write_pbs, transaction=None, metadata=self._client._rpc_metadata)\n    self._write_pbs = []\n    self.write_results = results = list(commit_response.write_results)\n    self.commit_time = commit_response.commit_time\n    return results", "docstring": "Commit the changes accumulated in this batch.\n\nReturns:\nList[google.cloud.proto.firestore.v1beta1.\\\nwrite_pb2.WriteResult, ...]: The write results corresponding\nto the changes committed, returned in the same order as the\nchanges were applied to this batch. A write result contains an\n``update_time`` field.", "source": "codesearchnet"}
{"code": "def _create_mirrored_tpu_replicated_variables(**kwargs):\n    initial_value = kwargs['initial_value']\n    with maybe_init_scope():\n        initial_value = initial_value() if callable(initial_value) else initial_value\n    mirrored_replicated_var_list = []\n    for replica_id in range(num_replicas):\n        replicated_var_list = []\n        for logic_core_id in range(num_cores_per_replica):\n            with ops.device(self._tpu_devices[replica_id][logic_core_id]):\n                kwargs['initial_value'] = initial_value\n                v = next_creator(**kwargs)\n            replicated_var_list.append(v)\n        replica_name = '{}/r:{}'.format(kwargs['name'], replica_id)\n        tpu_replicated_var = tpu_replicated_variable.TPUReplicatedVariable(variables=replicated_var_list, name=replica_name)\n        mirrored_replicated_var_list.append(tpu_replicated_var)\n    return mirrored_replicated_var_list", "docstring": "Returns a list of `TPUReplicatedVariable`s.\n\nThe list consists of `num_replicas` `TPUReplicatedVariable`s and can be\nused to initialize a `TPUMirroredVariable`. Each `TPUReplicatedVariable`\ncontains a list of `tf.Variable`s which are replicated to\n`num_cores_per_replica` logical cores to enable XLA SPMD compilation.\n\nArgs:\n**kwargs: the keyword arguments for creating a variable", "source": "github-repos"}
{"code": "def start_entry(self, target, var_id):\n        \n\n        self.in_progress = ConfigEntry(target, var_id, b'')\n\n        if self.data_size - self.data_index < self.in_progress.data_space():\n            return Error.DESTINATION_BUFFER_TOO_SMALL\n\n        self.in_progress.data += struct.pack(\"<H\", var_id)\n        self.data_index += self.in_progress.data_space()\n\n        return Error.NO_ERROR", "docstring": "Begin a new config database entry.\n\nIf there is a current entry in progress, it is aborted but the\ndata was already committed to persistent storage so that space\nis wasted.\n\nArgs:\ntarget (SlotIdentifer): The target slot for this config variable.\nvar_id (int): The config variable ID\n\nReturns:\nint: An error code from the global Errors enum.", "source": "juraj-google-style"}
{"code": "def _construct_location_to_filter_list(match_query):\n    \n    \n    \n    \n    location_to_filters = {}\n    for match_traversal in match_query.match_traversals:\n        for match_step in match_traversal:\n            current_filter = match_step.where_block\n            if current_filter is not None:\n                current_location = match_step.as_block.location\n                location_to_filters.setdefault(current_location, []).append(\n                    current_filter)\n\n    return location_to_filters", "docstring": "Return a dict mapping location -> list of filters applied at that location.\n\nArgs:\nmatch_query: MatchQuery object from which to extract location -> filters dict\n\nReturns:\ndict mapping each location in match_query to a list of\nFilter objects applied at that location", "source": "juraj-google-style"}
{"code": "def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):\n    output_shape = (kernel.shape[0],)\n    return local_conv(inputs, kernel, kernel_size, strides, output_shape, data_format)", "docstring": "Apply 1D conv with un-shared weights.\n\nArgs:\ninputs: 3D tensor with shape:\n(batch_size, steps, input_dim)\nif data_format is \"channels_last\" or\n(batch_size, input_dim, steps)\nif data_format is \"channels_first\".\nkernel: the unshared weight for convolution,\nwith shape (output_length, feature_dim, filters).\nkernel_size: a tuple of a single integer,\nspecifying the length of the 1D convolution window.\nstrides: a tuple of a single integer,\nspecifying the stride length of the convolution.\ndata_format: the data format, channels_first or channels_last.\n\nReturns:\nA 3d tensor with shape:\n(batch_size, output_length, filters)\nif data_format='channels_first'\nor 3D tensor with shape:\n(batch_size, filters, output_length)\nif data_format='channels_last'.", "source": "github-repos"}
{"code": "def sort_elements_by_child_values(obj_pyxb, child_name_list):\n    obj_pyxb.sort(key=(lambda x: [get_auto(getattr(x, n)) for n in child_name_list]))", "docstring": "In-place sort simple or complex elements in a PyXB object by values they contain\nin child elements.\n\nArgs:\nobj_pyxb: PyXB object\n\nchild_name_list: list of str\nList of element names that are direct children of the PyXB object.", "source": "codesearchnet"}
{"code": "def Lock(fd, path, blocking):\n    operation = (fcntl.LOCK_EX if blocking else (fcntl.LOCK_EX | fcntl.LOCK_NB))\n    try:\n        fcntl.flock(fd, operation)\n    except IOError as e:\n        if (e.errno == errno.EWOULDBLOCK):\n            raise IOError(('Exception locking %s. File already locked.' % path))\n        else:\n            raise IOError(('Exception locking %s. %s.' % (path, str(e))))", "docstring": "Lock the provided file descriptor.\n\nArgs:\nfd: int, the file descriptor of the file to lock.\npath: string, the name of the file to lock.\nblocking: bool, whether the function should return immediately.\n\nRaises:\nIOError, raised from flock while attempting to lock a file.", "source": "codesearchnet"}
{"code": "def get_excel_workbook(api_data, result_info_key, identifier_keys):\n    \n\n    cleaned_data = []\n\n    for item_data in api_data:\n        result_info = item_data.pop(result_info_key, {})\n\n        cleaned_item_data = {}\n\n        if 'meta' in item_data:\n            meta = item_data.pop('meta')\n            cleaned_item_data['meta'] = meta\n\n        for key in item_data:\n            cleaned_item_data[key] = item_data[key]['result']\n\n        cleaned_item_data[result_info_key] = result_info\n\n        cleaned_data.append(cleaned_item_data)\n\n    data_list = copy.deepcopy(cleaned_data)\n\n    workbook = openpyxl.Workbook()\n\n    write_worksheets(workbook, data_list, result_info_key, identifier_keys)\n\n    return workbook", "docstring": "Generates an Excel workbook object given api_data returned by the Analytics API\n\nArgs:\napi_data: Analytics API data as a list of dicts (one per identifier)\nresult_info_key: the key in api_data dicts that contains the data results\nidentifier_keys: the list of keys used as requested identifiers\n(address, zipcode, block_id, etc)\n\nReturns:\nraw excel file data", "source": "juraj-google-style"}
{"code": "def key_periods(ciphertext, max_key_period):\n    if (max_key_period <= 0):\n        raise ValueError('max_key_period must be a positive integer')\n    key_scores = []\n    for period in range(1, (min(max_key_period, len(ciphertext)) + 1)):\n        score = abs((ENGLISH_IC - index_of_coincidence(*split_columns(ciphertext, period))))\n        key_scores.append((period, score))\n    return [p[0] for p in sorted(key_scores, key=(lambda x: x[1]))]", "docstring": "Rank all key periods for ``ciphertext`` up to and including ``max_key_period``\n\nExample:\n>>> key_periods(ciphertext, 30)\n[2, 4, 8, 3, ...]\n\nArgs:\nciphertext (str): The text to analyze\nmax_key_period (int): The maximum period the key could be\n\nReturns:\nSorted list of keys\n\nRaises:\nValueError: If max_key_period is less than or equal to 0", "source": "codesearchnet"}
{"code": "def pull(self, platform=None):\n    (repository, _) = parse_repository_tag(self.image_name)\n    return self.collection.pull(repository, tag=self.id, platform=platform)", "docstring": "Pull the image digest.\n\nArgs:\nplatform (str): The platform to pull the image for.\nDefault: ``None``\n\nReturns:\n(:py:class:`Image`): A reference to the pulled image.", "source": "codesearchnet"}
{"code": "def read(self, filename, encoding=None):\n    with open(filename, encoding=encoding) as fp:\n        self._read(fp, filename)\n    self._filename = os.path.abspath(filename)", "docstring": "Read and parse a filename.\n\nArgs:\nfilename (str): path to file\nencoding (str): encoding of file, default None", "source": "codesearchnet"}
{"code": "def mean(x, axis=None, keepdims=False):\n    if any_symbolic_tensors((x,)):\n        return Mean(axis=axis, keepdims=keepdims).symbolic_call(x)\n    return backend.numpy.mean(x, axis=axis, keepdims=keepdims)", "docstring": "Compute the arithmetic mean along the specified axes.\n\nArgs:\nx: Input tensor.\naxis: Axis or axes along which the means are computed. The default\nis to compute the mean of the flattened tensor.\nkeepdims: If this is set to `True`, the axes which are reduced are left\nin the result as dimensions with size one.\n\nReturns:\nOutput tensor containing the mean values.", "source": "github-repos"}
{"code": "def scheduled_sample_count(ground_truth_x, generated_x, batch_size, scheduled_sample_var):\n    num_ground_truth = scheduled_sample_var\n    idx = tf.random_shuffle(tf.range(batch_size))\n    ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))\n    generated_idx = tf.gather(idx, tf.range(num_ground_truth, batch_size))\n    ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)\n    generated_examps = tf.gather(generated_x, generated_idx)\n    output = tf.dynamic_stitch([ground_truth_idx, generated_idx], [ground_truth_examps, generated_examps])\n    if isinstance(batch_size, int):\n        output.set_shape(([batch_size] + common_layers.shape_list(output)[1:]))\n    return output", "docstring": "Sample batch with specified mix of groundtruth and generated data points.\n\nArgs:\nground_truth_x: tensor of ground-truth data points.\ngenerated_x: tensor of generated data points.\nbatch_size: batch size\nscheduled_sample_var: number of ground-truth examples to include in batch.\nReturns:\nNew batch with num_ground_truth sampled from ground_truth_x and the rest\nfrom generated_x.", "source": "codesearchnet"}
{"code": "def penalty_satisfaction(response, bqm):\n    record = response.record\n    label_dict = response.variables.index\n    if (len(bqm.info['reduction']) == 0):\n        return np.array(([1] * len(record.sample)))\n    penalty_vector = np.prod([((record.sample[(:, label_dict[qi])] * record.sample[(:, label_dict[qj])]) == record.sample[(:, label_dict[valdict['product']])]) for ((qi, qj), valdict) in bqm.info['reduction'].items()], axis=0)\n    return penalty_vector", "docstring": "Creates a penalty satisfaction list\n\nGiven a sampleSet and a bqm object, will create a binary list informing\nwhether the penalties introduced during degree reduction are satisfied for\neach sample in sampleSet\n\nArgs:\nresponse (:obj:`.SampleSet`): Samples corresponding to provided bqm\n\nbqm (:obj:`.BinaryQuadraticModel`): a bqm object that contains\nits reduction info.\n\nReturns:\n:obj:`numpy.ndarray`: a binary array of penalty satisfaction information", "source": "codesearchnet"}
{"code": "def _FormatDescription(self, event):\n    \n    date_time_string = timelib.Timestamp.CopyToIsoFormat(\n        event.timestamp, timezone=self._output_mediator.timezone)\n    timestamp_description = event.timestamp_desc or 'UNKNOWN'\n\n    message, _ = self._output_mediator.GetFormattedMessages(event)\n    if message is None:\n      data_type = getattr(event, 'data_type', 'UNKNOWN')\n      raise errors.NoFormatterFound(\n          'Unable to find event formatter for: {0:s}.'.format(data_type))\n\n    description = '{0:s}; {1:s}; {2:s}'.format(\n        date_time_string, timestamp_description,\n        message.replace(self._DESCRIPTION_FIELD_DELIMITER, ' '))\n    return self._SanitizeField(description)", "docstring": "Formats the description.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: formatted description field.", "source": "juraj-google-style"}
{"code": "def add_layer(self, label, change_layer=True):\n    self.layer_stack.insert((self.last_layer() + 1), label)\n    if change_layer:\n        self.set_current_layer(self.last_layer())\n    return None", "docstring": "Add new mesh layer to the end of the stack\n\nArgs:\nlabel (str): new label for the mesh layer\nchange_layer (bool): change to the newly created layer", "source": "codesearchnet"}
{"code": "def plugins(self):\n    if (not self.loaded):\n        self.load_modules()\n    return get_plugins()[self.group]._filter(blacklist=self.blacklist, newest_only=True, type_filter=self.type_filter)", "docstring": "Newest version of all plugins in the group filtered by ``blacklist``\n\nReturns:\ndict: Nested dictionary of plugins accessible through dot-notation.\n\nPlugins are returned in a nested dictionary, but can also be accessed through dot-notion.\nJust as when accessing an undefined dictionary key with index-notation,\na :py:exc:`KeyError` will be raised if the plugin type or plugin does not exist.\n\nParent types are always included.\nChild plugins will only be included if a valid, non-blacklisted plugin is available.", "source": "codesearchnet"}
{"code": "def export_vms(self, vms_names=None, standalone=False, export_dir='.', compress=False, init_file_name='LagoInitFile', out_format=YAMLOutFormatPlugin(), collect_only=False, with_threads=True):\n    return self.virt_env.export_vms(vms_names, standalone, export_dir, compress, init_file_name, out_format, collect_only, with_threads)", "docstring": "Export vm images disks and init file.\nThe exported images and init file can be used to recreate\nthe environment.\n\nArgs:\nvms_names(list of str): Names of the vms to export, if None\nexport all the vms in the env (default=None)\nstandalone(bool): If false, export a layered image\n(default=False)\nexport_dir(str): Dir to place the exported images and init file\ncompress(bool): If True compress the images with xz\n(default=False)\ninit_file_name(str): The name of the exported init file\n(default='LagoInitfile')\nout_format(:class:`lago.plugins.output.OutFormatPlugin`):\nThe type of the exported init file (the default is yaml)\ncollect_only(bool): If True, return only a mapping from vm name\nto the disks that will be exported. (default=False)\nwith_threads(bool): If True, run the export in parallel\n(default=True)\n\nReturns\nUnless collect_only == True, a mapping between vms' disks.", "source": "codesearchnet"}
{"code": "def get_version(here_path, default_version=DEFAULT_VERSION):\n    if ('site-packages' in here_path):\n        return _version_from_file(here_path)\n    if os.environ.get('TRAVIS_TAG'):\n        if (not TEST_MODE):\n            return os.environ.get('TRAVIS_TAG').replace('v', '')\n        else:\n            warnings.warn('Travis detected, but TEST_MODE enabled', exceptions.ProsperVersionTestModeWarning)\n    try:\n        current_tag = _read_git_tags(default_version=default_version)\n    except Exception:\n        return _version_from_file(here_path)\n    with open(os.path.join(here_path, 'version.txt'), 'w') as v_fh:\n        v_fh.write(current_tag)\n    return current_tag", "docstring": "tries to resolve version number\n\nArgs:\nhere_path (str): path to project local dir\ndefault_version (str): what version to return if all else fails\n\nReturns:\nstr: semantic_version information for library", "source": "codesearchnet"}
{"code": "def keyDown(key, pause=None, _pause=True):\n    \n    if len(key) > 1:\n        key = key.lower()\n\n    _failSafeCheck()\n    platformModule._keyDown(key)\n\n    _autoPause(pause, _pause)", "docstring": "Performs a keyboard key press without the release. This will put that\nkey in a held down state.\n\nNOTE: For some reason, this does not seem to cause key repeats like would\nhappen if a keyboard key was held down on a text field.\n\nArgs:\nkey (str): The key to be pressed down. The valid names are listed in\nKEYBOARD_KEYS.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def read(self, size):\n        \n        now = time.time()\n        missing_dt = self._sleep_until - now\n        if missing_dt > 0:\n            time.sleep(missing_dt)\n        self._sleep_until = time.time() + self._sleep_time(size)\n        data = (self._wavep.readframes(size)\n                if self._wavep\n                else self._fp.read(size))\n        \n        if not data:\n            return b'\\x00' * size\n        return data", "docstring": "Read bytes from the stream and block until sample rate is achieved.\n\nArgs:\nsize: number of bytes to read from the stream.", "source": "juraj-google-style"}
{"code": "def merge_resources(resource1, resource2):\n    merged = resource1.copy()\n    merged.update(resource2)\n    return merged", "docstring": "Updates a copy of resource1 with resource2 values and returns the merged dictionary.\n\nArgs:\nresource1: original resource\nresource2: resource to update resource1\n\nReturns:\ndict: merged resource", "source": "codesearchnet"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor]=None, vision_feature_layer: int=-1):\n    vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer\n    patch_attention_mask = self._create_patch_attention_mask(pixel_mask)\n    image_outputs = self.vision_tower(pixel_values, patch_attention_mask=patch_attention_mask, output_hidden_states=True)\n    image_attn_mask = None\n    if patch_attention_mask is not None:\n        flattened_mask = patch_attention_mask.flatten(1)\n        image_attn_mask = torch.logical_not(flattened_mask)\n    selected_image_feature = image_outputs.hidden_states[vision_feature_layer]\n    image_features = self.multi_modal_projector(selected_image_feature, attn_mask=image_attn_mask)\n    return image_features", "docstring": "Obtains image last hidden states from the vision tower and apply multimodal projection.\n\nArgs:\npixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):\nThe tensors corresponding to the input images.\npixel_mask (`torch.FloatTensor]`, *optional*):\nThe tensors corresponding to the input image mask.\nvision_feature_layer (`Union[int, List[int]]`, *optional*):\nThe index of the layer to select the vision feature. If multiple indices are provided,\nthe vision feature of the corresponding indices will be concatenated to form the\nvision features.\nReturns:\nimage_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).", "source": "github-repos"}
{"code": "def __init__(self,\n               url,\n               params,\n               name=None,\n               eta=None,\n               countdown=None,\n               parent=None,\n               headers=None):\n    \n    self.url = url\n    self.name = name\n    self.eta = eta\n    self.countdown = countdown\n    self._headers = {\n        \"Content-Type\": \"application/octet-stream\",\n        self.PAYLOAD_VERSION_HEADER: self.PAYLOAD_VERSION\n    }\n    if headers:\n      self._headers.update(headers)\n\n    \n    payload_str = urllib.urlencode(params)\n    compressed_payload = \"\"\n    if len(payload_str) > self.MAX_TASK_PAYLOAD:\n      compressed_payload = zlib.compress(payload_str)\n\n    \n    if not compressed_payload:\n      self._payload = payload_str\n    \n    elif len(compressed_payload) < self.MAX_TASK_PAYLOAD:\n      self._payload = self.PAYLOAD_PARAM + compressed_payload\n    elif len(compressed_payload) > self.MAX_DB_PAYLOAD:\n      raise ValueError(\n          \"Payload from %s to big to be stored in database: %s\" %\n          (self.name, len(compressed_payload)))\n    \n    else:\n      if not parent:\n        raise ValueError(\"Huge tasks should specify parent entity.\")\n\n      payload_entity = _HugeTaskPayload(payload=compressed_payload,\n                                        parent=parent)\n      payload_key = payload_entity.put()\n      self._payload = self.PAYLOAD_KEY_PARAM + str(payload_key)", "docstring": "Init.\n\nArgs:\nurl: task url in str.\nparams: a dict from str to str.\nname: task name.\neta: task eta.\ncountdown: task countdown.\nparent: parent entity of huge task's payload.\nheaders: a dict of headers for the task.\n\nRaises:\nValueError: when payload is too big even for datastore, or parent is\nnot specified when payload is stored in datastore.", "source": "juraj-google-style"}
{"code": "def Serialize(self, writer):\n        \n        self.SerializeUnsigned(writer)\n        writer.WriteByte(1)\n        self.Script.Serialize(writer)", "docstring": "Serialize full object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def post_async(self, path, params=None):\n    request = Post(self._get_next_id(), path, params)\n    request.set_callback(self._q.put)\n    future = self._dispatch_request(request)\n    return future", "docstring": "Asynchronously calls a function on a child block\n\nArgs:\npath (list): The path to post to\nparams (dict): parameters for the call\n\nReturns:\nFuture: as single Future that will resolve to the result", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n        \n        self.BatchAnnotateImages = channel.unary_unary(\n            \"/google.cloud.vision.v1p4beta1.ImageAnnotator/BatchAnnotateImages\",\n            request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_image__annotator__pb2.BatchAnnotateImagesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_image__annotator__pb2.BatchAnnotateImagesResponse.FromString,\n        )\n        self.BatchAnnotateFiles = channel.unary_unary(\n            \"/google.cloud.vision.v1p4beta1.ImageAnnotator/BatchAnnotateFiles\",\n            request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_image__annotator__pb2.BatchAnnotateFilesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_image__annotator__pb2.BatchAnnotateFilesResponse.FromString,\n        )\n        self.AsyncBatchAnnotateImages = channel.unary_unary(\n            \"/google.cloud.vision.v1p4beta1.ImageAnnotator/AsyncBatchAnnotateImages\",\n            request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_image__annotator__pb2.AsyncBatchAnnotateImagesRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n        self.AsyncBatchAnnotateFiles = channel.unary_unary(\n            \"/google.cloud.vision.v1p4beta1.ImageAnnotator/AsyncBatchAnnotateFiles\",\n            request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_image__annotator__pb2.AsyncBatchAnnotateFilesRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def verify(self, byts, sign):\n        \n        try:\n            chosen_hash = c_hashes.SHA256()\n            hasher = c_hashes.Hash(chosen_hash, default_backend())\n            hasher.update(byts)\n            digest = hasher.finalize()\n            self.publ.verify(sign,\n                             digest,\n                             c_ec.ECDSA(c_utils.Prehashed(chosen_hash))\n                             )\n            return True\n        except InvalidSignature:\n            logger.exception('Error in publ.verify')\n            return False", "docstring": "Verify the signature for the given bytes using the ECC\npublic key.\n\nArgs:\nbyts (bytes): The data bytes.\nsign (bytes): The signature bytes.\n\nReturns:\nbool: True if the data was verified, False otherwise.", "source": "juraj-google-style"}
{"code": "def get_op(self, id: str, **kwargs: str) -> dict:\n    path = self._get_path_for_op_id(id)\n    return self.get_path(path, kwargs)", "docstring": "Queries the ESI by looking up an operation id.\n\nEndpoints are cached, so calls to this method\nfor the same op and args will return the data\nfrom the cache instead of making the API call.\n\nArgs:\nid: operation id\nkwargs: data to populate the endpoint's URL variables\n\nReturns:\nESI data", "source": "codesearchnet"}
{"code": "def _UpdateSudoer(self, user, sudoer=False):\n    \n    if sudoer:\n      self.logger.info('Adding user %s to the Google sudoers group.', user)\n      command = self.gpasswd_add_cmd.format(\n          user=user, group=self.google_sudoers_group)\n    else:\n      self.logger.info('Removing user %s from the Google sudoers group.', user)\n      command = self.gpasswd_remove_cmd.format(\n          user=user, group=self.google_sudoers_group)\n\n    try:\n      subprocess.check_call(command.split(' '))\n    except subprocess.CalledProcessError as e:\n      self.logger.warning('Could not update user %s. %s.', user, str(e))\n      return False\n    else:\n      self.logger.debug('Removed user %s from the Google sudoers group.', user)\n      return True", "docstring": "Update sudoer group membership for a Linux user account.\n\nArgs:\nuser: string, the name of the Linux user account.\nsudoer: bool, True if the user should be a sudoer.\n\nReturns:\nbool, True if user update succeeded.", "source": "juraj-google-style"}
{"code": "def dump_values(self, with_defaults=True, dict_cls=dict, flat=False):\n    values = dict_cls()\n    if flat:\n        for (str_path, item) in self.iter_items(recursive=True, key='str_path'):\n            if item.has_value:\n                if (with_defaults or (not item.is_default)):\n                    values[str_path] = item.value\n    else:\n        for (item_name, item) in self._tree.items():\n            if is_config_section(item):\n                section_values = item.dump_values(with_defaults=with_defaults, dict_cls=dict_cls)\n                if section_values:\n                    values[item_name] = section_values\n            elif item.has_value:\n                if (with_defaults or (not item.is_default)):\n                    values[item.name] = item.value\n    return values", "docstring": "Export values of all items contained in this section to a dictionary.\n\nItems with no values set (and no defaults set if ``with_defaults=True``) will be excluded.\n\nReturns:\ndict: A dictionary of key-value pairs, where for sections values are dictionaries\nof their contents.", "source": "codesearchnet"}
{"code": "def get_boards(self, **query_params):\n    boards = self.get_boards_json(self.base_uri, query_params=query_params)\n    boards_list = []\n    for board_json in boards:\n        boards_list.append(self.create_board(board_json))\n    return boards_list", "docstring": "Get all the boards for this organisation. Returns a list of Board s.\n\nReturns:\nlist(Board): The boards attached to this organisation", "source": "codesearchnet"}
{"code": "def find_file_in_load_dirs(relpath):\n    if relpath.startswith(os.path.sep):\n        relpath = relpath.lstrip(os.path.sep)\n    for ld in settings.DATA_DIRECTORIES:\n        possible_path = os.path.join(ld, relpath)\n        if os.path.exists(possible_path):\n            return possible_path", "docstring": "If given relative path exists in one of DevAssistant load paths,\nreturn its full path.\n\nArgs:\nrelpath: a relative path, e.g. \"assitants/crt/test.yaml\"\n\nReturns:\nabsolute path of the file, e.g. \"/home/x/.devassistant/assistanta/crt/test.yaml\nor None if file is not found", "source": "codesearchnet"}
{"code": "def execute_rex_code(self, code, filename=None, shell=None, parent_environ=None, **Popen_args):\n\n    def _actions_callback(executor):\n        executor.execute_code(code, filename=filename)\n    return self.execute_shell(shell=shell, parent_environ=parent_environ, command='', block=False, actions_callback=_actions_callback, **Popen_args)", "docstring": "Run some rex code in the context.\n\nNote:\nThis is just a convenience form of `execute_shell`.\n\nArgs:\ncode (str): Rex code to execute.\nfilename (str): Filename to report if there are syntax errors.\nshell: Shell type, for eg 'bash'. If None, the current shell type\nis used.\nparent_environ: Environment to run the shell process in, if None\nthen the current environment is used.\nPopen_args: args to pass to the shell process object constructor.\n\nReturns:\n`subprocess.Popen` object for the shell process.", "source": "codesearchnet"}
{"code": "def retry(self, **kwargs):\n    path = ('%s/%s/retry' % (self.manager.path, self.get_id()))\n    self.manager.gitlab.http_post(path)", "docstring": "Retry the job.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabJobRetryError: If the job could not be retried", "source": "codesearchnet"}
{"code": "def build_graph(path, term_depth=1000, skim_depth=10,\n                d_weights=False, **kwargs):\n\n    \n\n    \n    click.echo('\\nTokenizing text...')\n    t = Text.from_file(path)\n    click.echo('Extracted %d tokens' % len(t.tokens))\n\n    m = Matrix()\n\n    \n    click.echo('\\nIndexing terms:')\n    m.index(t, t.most_frequent_terms(term_depth), **kwargs)\n\n    g = Skimmer()\n\n    \n    click.echo('\\nGenerating graph:')\n    g.build(t, m, skim_depth, d_weights)\n\n    return g", "docstring": "Tokenize a text, index a term matrix, and build out a graph.\n\nArgs:\npath (str): The file path.\nterm_depth (int): Consider the N most frequent terms.\nskim_depth (int): Connect each word to the N closest siblings.\nd_weights (bool): If true, give \"close\" nodes low weights.\n\nReturns:\nSkimmer: The indexed graph.", "source": "juraj-google-style"}
{"code": "def MakeSelfExtractingZip(self, payload_data, output_path):\n    context = (self.context + ['Client Context'])\n    src_zip = zipfile.ZipFile(io.BytesIO(payload_data), mode='r')\n    zip_data = io.BytesIO()\n    output_zip = zipfile.ZipFile(zip_data, mode='w', compression=zipfile.ZIP_DEFLATED)\n    config_file_name = config.CONFIG.Get('ClientBuilder.config_filename', context=context)\n    for template_file in src_zip.namelist():\n        if (template_file != config_file_name):\n            CopyFileInZip(src_zip, template_file, output_zip)\n    client_config_content = self.GetClientConfig(context)\n    output_zip.writestr(config_file_name, client_config_content.encode('utf-8'), compress_type=zipfile.ZIP_STORED)\n    output_zip.comment = (b'$AUTORUN$>%s' % config.CONFIG.Get('ClientBuilder.autorun_command_line', context=context).encode('utf-8'))\n    output_zip.close()\n    utils.EnsureDirExists(os.path.dirname(output_path))\n    with open(output_path, 'wb') as fd:\n        stub_data = io.BytesIO()\n        unzipsfx_stub = config.CONFIG.Get('ClientBuilder.unzipsfx_stub', context=context)\n        stub_raw = open(unzipsfx_stub, 'rb').read()\n        if (b'level=\"requireAdministrator' not in stub_raw):\n            raise RuntimeError('Bad unzip binary in use. Not compiled with therequireAdministrator manifest option.')\n        stub_data.write(stub_raw)\n        SetPeSubsystem(stub_data, console=config.CONFIG.Get('ClientBuilder.console', context=context))\n        end_of_file = (zip_data.tell() + stub_data.tell())\n        offset_to_rsrc = stub_data.getvalue().find(b'.rsrc')\n        stub_data.seek((offset_to_rsrc + 20))\n        start_of_rsrc_section = struct.unpack('<I', stub_data.read(4))[0]\n        stub_data.seek((offset_to_rsrc + 16))\n        stub_data.write(struct.pack('<I', (end_of_file - start_of_rsrc_section)))\n        out_data = io.BytesIO()\n        out_data.write(stub_data.getvalue())\n        out_data.write(zip_data.getvalue())\n        fd.write(out_data.getvalue())\n    if self.signer:\n        self.signer.SignFile(output_path)\n    logging.info('Deployable binary generated at %s', output_path)\n    return output_path", "docstring": "Repack the installer into the payload.\n\nArgs:\npayload_data: data payload for zip file\noutput_path: filename for the zip output\n\nRaises:\nRuntimeError: if the ClientBuilder.unzipsfx_stub doesn't require admin.\nReturns:\noutput_path: filename string of zip output file", "source": "codesearchnet"}
{"code": "def combine_heads(self, x):\n    with tf.name_scope('combine_heads'):\n        batch_size = tf.shape(x)[0]\n        length = tf.shape(x)[2]\n        x = tf.transpose(x, [0, 2, 1, 3])\n        return tf.reshape(x, [batch_size, length, self.hidden_size])", "docstring": "Combine tensor that has been split.\n\nArgs:\nx: A tensor [batch_size, num_heads, length, hidden_size/num_heads]\n\nReturns:\nA tensor with shape [batch_size, length, hidden_size]", "source": "codesearchnet"}
{"code": "def connect_async(self, connection_id, connection_string, callback):\n        \n\n        if callback is not None:\n            callback(connection_id, self.id, False, \"connect command is not supported in device adapter\")", "docstring": "Asynchronously connect to a device\n\nArgs:\nconnection_id (int): A unique identifier that will refer to this connection\nconnection_string (string): A DeviceAdapter specific string that can be used to connect to\na device using this DeviceAdapter.\ncallback (callable): A function that will be called when the connection attempt finishes as\ncallback(connection_id, adapter_id, success: bool, failure_reason: string or None)", "source": "juraj-google-style"}
{"code": "def pack_eager_tensors(self, tensors):\n    self.ensure_initialized()\n    return pywrap_tfe.TFE_Py_PackEagerTensors(self._handle, tensors)", "docstring": "Pack multiple `EagerTensor`s of the same dtype and shape.\n\nArgs:\ntensors: a list of EagerTensors to pack.\n\nReturns:\nA packed EagerTensor.", "source": "github-repos"}
{"code": "def objects_delete(self, bucket, key):\n    url = (Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key))))\n    datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials, raw_response=True)", "docstring": "Deletes the specified object.\n\nArgs:\nbucket: the name of the bucket.\nkey: the key of the object within the bucket.\nRaises:\nException if there is an error performing the operation.", "source": "codesearchnet"}
{"code": "def configure(self, sbi_config: str):\n    config_dict = json.loads(sbi_config)\n    self.debug_stream('SBI configuration:\\n%s', json.dumps(config_dict, indent=2))\n    try:\n        sbi = Subarray(self.get_name()).configure_sbi(config_dict)\n    except jsonschema.exceptions.ValidationError as error:\n        return json.dumps(dict(path=error.absolute_path.__str__(), schema_path=error.schema_path.__str__(), message=error.message), indent=2)\n    except RuntimeError as error:\n        return json.dumps(dict(error=str(error)), indent=2)\n    return 'Accepted SBI: {}'.format(sbi.id)", "docstring": "Configure an SBI for this subarray.\n\nArgs:\nsbi_config (str): SBI configuration JSON\n\nReturns:\nstr,", "source": "codesearchnet"}
{"code": "def prod(x, axis=None, keepdims=False, dtype=None):\n    if any_symbolic_tensors((x,)):\n        return Prod(axis=axis, keepdims=keepdims, dtype=dtype).symbolic_call(x)\n    return backend.numpy.prod(x, axis=axis, keepdims=keepdims, dtype=dtype)", "docstring": "Return the product of tensor elements over a given axis.\n\nArgs:\nx: Input tensor.\naxis: Axis or axes along which a product is performed. The default,\n`axis=None`, will compute the product of all elements\nin the input tensor.\nkeepdims: If this is set to `True`, the axes which are reduce\nare left in the result as dimensions with size one.\ndtype: Data type of the returned tensor.\n\nReturns:\nProduct of elements of `x` over the given axis or axes.", "source": "github-repos"}
{"code": "def _make_model_class(message_type, indexed_fields, **props):\n    analyzed = _analyze_indexed_fields(indexed_fields)\n    for (field_name, sub_fields) in analyzed.iteritems():\n        if (field_name in props):\n            raise ValueError(('field name %s is reserved' % field_name))\n        try:\n            field = message_type.field_by_name(field_name)\n        except KeyError:\n            raise ValueError(('Message type %s has no field named %s' % (message_type.__name__, field_name)))\n        if isinstance(field, messages.MessageField):\n            if (not sub_fields):\n                raise ValueError(('MessageField %s cannot be indexed, only sub-fields' % field_name))\n            sub_model_class = _make_model_class(field.type, sub_fields)\n            prop = model.StructuredProperty(sub_model_class, field_name, repeated=field.repeated)\n        else:\n            if (sub_fields is not None):\n                raise ValueError(('Unstructured field %s cannot have indexed sub-fields' % field_name))\n            if isinstance(field, messages.EnumField):\n                prop = EnumProperty(field.type, field_name, repeated=field.repeated)\n            elif isinstance(field, messages.BytesField):\n                prop = model.BlobProperty(field_name, repeated=field.repeated, indexed=True)\n            else:\n                prop = model.GenericProperty(field_name, repeated=field.repeated)\n        props[field_name] = prop\n    return model.MetaModel(('_%s__Model' % message_type.__name__), (model.Model,), props)", "docstring": "Construct a Model subclass corresponding to a Message subclass.\n\nArgs:\nmessage_type: A Message subclass.\nindexed_fields: A list of dotted and undotted field names.\n**props: Additional properties with which to seed the class.\n\nReturns:\nA Model subclass whose properties correspond to those fields of\nmessage_type whose field name is listed in indexed_fields, plus\nthe properties specified by the **props arguments.  For dotted\nfield names, a StructuredProperty is generated using a Model\nsubclass created by a recursive call.\n\nRaises:\nWhatever _analyze_indexed_fields() raises.\nValueError if a field name conflicts with a name in **props.\nValueError if a field name is not valid field of message_type.\nValueError if an undotted field name designates a MessageField.", "source": "codesearchnet"}
{"code": "def _update_annotations(discretized_pulse: Callable) -> Callable:\n    \n    undecorated_annotations = list(discretized_pulse.__annotations__.items())\n    decorated_annotations = undecorated_annotations[1:]\n    decorated_annotations.insert(0, ('duration', int))\n    discretized_pulse.__annotations__ = dict(decorated_annotations)\n    return discretized_pulse", "docstring": "Update annotations of discretized continuous pulse function with duration.\n\nArgs:\ndiscretized_pulse: Discretized decorated continuous pulse.", "source": "juraj-google-style"}
{"code": "def setNetworkKey(self, key):\n        \n        masterKey = ''\n        print '%s call setNetworkKey' % self.port\n\n        try:\n            if not isinstance(key, str):\n                masterKey = self.__convertLongToString(key)\n\n                \n                if len(masterKey) < 32:\n                    masterKey = masterKey.zfill(32)\n\n                cmd = WPANCTL_CMD + 'setprop Network:Key %s' % masterKey\n                datasetCmd = WPANCTL_CMD + 'setprop Dataset:MasterKey %s' % masterKey\n            else:\n                masterKey = key\n                cmd = WPANCTL_CMD + 'setprop Network:Key %s' % masterKey\n                datasetCmd = WPANCTL_CMD + 'setprop Dataset:MasterKey %s' % masterKey\n\n            self.networkKey = masterKey\n            self.hasActiveDatasetToCommit = True\n            return self.__sendCommand(cmd)[0] != 'Fail' and self.__sendCommand(datasetCmd)[0] != 'Fail'\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger('setNetworkkey() Error: ' + str(e))", "docstring": "set Thread Network master key\n\nArgs:\nkey: Thread Network master key used in secure the MLE/802.15.4 packet\n\nReturns:\nTrue: successful to set the Thread Network master key\nFalse: fail to set the Thread Network master key", "source": "juraj-google-style"}
{"code": "def markdown_to_text(body):\n    md = markdown.markdown(body, extensions=['markdown.extensions.extra'])\n    soup = BeautifulSoup(md, 'html.parser')\n    return soup.get_text()", "docstring": "Converts markdown to text.\n\nArgs:\nbody: markdown (or plaintext, or maybe HTML) input\n\nReturns:\nPlaintext with all tags and frills removed", "source": "codesearchnet"}
{"code": "def intraday(ticker, dt, session='', **kwargs) -> pd.DataFrame:\n    from xbbg.core import intervals\n    cur_data = bdib(ticker=ticker, dt=dt, typ=kwargs.get('typ', 'TRADE'))\n    if cur_data.empty:\n        return pd.DataFrame()\n    fmt = '%H:%M:%S'\n    ss = intervals.SessNA\n    ref = kwargs.get('ref', None)\n    exch = (pd.Series() if (ref is None) else const.exch_info(ticker=ref))\n    if session:\n        ss = intervals.get_interval(ticker=kwargs.get('ref', ticker), session=session)\n    start_time = kwargs.get('start_time', None)\n    end_time = kwargs.get('end_time', None)\n    if (ss != intervals.SessNA):\n        start_time = pd.Timestamp(ss.start_time).strftime(fmt)\n        end_time = pd.Timestamp(ss.end_time).strftime(fmt)\n    if (start_time and end_time):\n        kw = dict(start_time=start_time, end_time=end_time)\n        if (not exch.empty):\n            cur_tz = cur_data.index.tz\n            res = cur_data.tz_convert(exch.tz).between_time(**kw)\n            if kwargs.get('keep_tz', False):\n                res = res.tz_convert(cur_tz)\n            return pd.DataFrame(res)\n        return pd.DataFrame(cur_data.between_time(**kw))\n    return cur_data", "docstring": "Bloomberg intraday bar data within market session\n\nArgs:\nticker: ticker\ndt: date\nsession: examples include\nday_open_30, am_normal_30_30, day_close_30, allday_exact_0930_1000\n**kwargs:\nref: reference ticker or exchange for timezone\nkeep_tz: if keep tz if reference ticker / exchange is given\nstart_time: start time\nend_time: end time\ntyp: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]\n\nReturns:\npd.DataFrame", "source": "codesearchnet"}
{"code": "def pop(self, identifier, default=None):\n    if (identifier in self.children):\n        item = self[identifier]\n        self.__delitem__(identifier)\n        return item\n    else:\n        return default", "docstring": "Pop a node of the AttrTree using its path string.\n\nArgs:\nidentifier: Path string of the node to return\ndefault: Value to return if no node is found\n\nReturns:\nThe node that was removed from the AttrTree", "source": "codesearchnet"}
{"code": "def get_index(self, prefix=''):\n    if prefix:\n        prefixed = ('%s_index' % prefix)\n    else:\n        prefixed = 'index'\n    if ((prefixed in self.__cli) and self.__cli[prefixed]):\n        index = self.__cli.get(prefixed)\n        from_conf = False\n    else:\n        index = self.__config.get(prefixed)\n        from_conf = True\n    return self.__abspath(index, from_conf)", "docstring": "Retrieve the absolute path to an index, according to\n`prefix`.\n\nArgs:\nprefix: str, the desired prefix or `None`.\n\nReturns:\nstr: An absolute path, or `None`", "source": "codesearchnet"}
{"code": "def _get_no_split_modules(self, device_map: str):\n    _no_split_modules = set()\n    modules_to_check = [self]\n    while len(modules_to_check) > 0:\n        module = modules_to_check.pop(-1)\n        if module.__class__.__name__ not in _no_split_modules:\n            if isinstance(module, PreTrainedModel):\n                if module._no_split_modules is None:\n                    raise ValueError(f\"{module.__class__.__name__} does not support `device_map='{device_map}'`. To implement support, the model class needs to implement the `_no_split_modules` attribute.\")\n                else:\n                    _no_split_modules = _no_split_modules | set(module._no_split_modules)\n            modules_to_check += list(module.children())\n    return list(_no_split_modules)", "docstring": "Get the modules of the model that should not be spit when using device_map. We iterate through the modules to\nget the underlying `_no_split_modules`.\n\nArgs:\ndevice_map (`str`):\nThe device map value. Options are [\"auto\", \"balanced\", \"balanced_low_0\", \"sequential\"]\n\nReturns:\n`List[str]`: List of modules that should not be split", "source": "github-repos"}
{"code": "def submodules(self):\n    return tuple(self._flatten(predicate=_is_module))", "docstring": "Sequence of all sub-modules.\n\nSubmodules are modules which are properties of this module, or found as\nproperties of modules which are properties of this module (and so on).\n\n>>> a = tf.Module()\n>>> b = tf.Module()\n>>> c = tf.Module()\n>>> a.b = b\n>>> b.c = c\n>>> list(a.submodules) == [b, c]\nTrue\n>>> list(b.submodules) == [c]\nTrue\n>>> list(c.submodules) == []\nTrue\n\nReturns:\nA sequence of all submodules.", "source": "github-repos"}
{"code": "async def update_state(self, name, state):\n        \n\n        await self.send_command(OPERATIONS.CMD_UPDATE_STATE,\n                                {'name': name, 'new_status': state},\n                                MESSAGES.UpdateStateResponse, timeout=5.0)", "docstring": "Update the state for a service.\n\nArgs:\nname (string): The name of the service\nstate (int): The new state of the service", "source": "juraj-google-style"}
{"code": "def mnist_common_generator(tmp_dir,\n                           training,\n                           how_many,\n                           data_filename,\n                           label_filename,\n                           start_from=0):\n  \n  data_path = os.path.join(tmp_dir, data_filename)\n  labels_path = os.path.join(tmp_dir, label_filename)\n  images = _extract_mnist_images(data_path, 60000 if training else 10000)\n  labels = _extract_mnist_labels(labels_path, 60000 if training else 10000)\n  \n  data = list(zip(images, labels))\n  random.shuffle(data)\n  images, labels = list(zip(*data))\n  return image_utils.image_generator(images[start_from:start_from + how_many],\n                                     labels[start_from:start_from + how_many])", "docstring": "Image generator for MNIST.\n\nArgs:\ntmp_dir: path to temporary storage directory.\ntraining: a Boolean; if true, we use the train set, otherwise the test set.\nhow_many: how many images and labels to generate.\ndata_filename: file that contains features data.\nlabel_filename: file that contains labels.\nstart_from: from which image to start.\n\nReturns:\nAn instance of image_generator that produces MNIST images.", "source": "juraj-google-style"}
{"code": "def __init__(self, latent_size, hidden_size):\n    \n    super(EncoderDynamicFactorized, self).__init__()\n    self.latent_size = latent_size\n    self.hidden_size = hidden_size\n    self.dense = tf.keras.layers.Dense(hidden_size, activation=tf.nn.leaky_relu)\n    self.output_layer = tf.keras.layers.Dense(2*latent_size)", "docstring": "Constructs a \"factorized\" encoder for `z_t`.\n\nArgs:\nlatent_size: An integer corresponding to the\ndimensionality of the distribution.\nhidden_size: Dimensionality of the affine function parameters.", "source": "juraj-google-style"}
{"code": "def _create_hash_str(self, input_arg, output_arg, node_def):\n    hasher = hashlib.sha1()\n\n    def update_num(n):\n        hasher.update(compat.as_bytes('%x' % n))\n\n    def update_str(s):\n        update_num(len(s))\n        hasher.update(compat.as_bytes(s))\n\n    def update_strs(slist):\n        update_num(len(slist))\n        for s in slist:\n            update_str(s)\n    for adef in input_arg:\n        update_str(adef.SerializeToString())\n    for adef in output_arg:\n        update_str(adef.SerializeToString())\n    for n in sorted(node_def, key=lambda n: n.name):\n        update_str(n.name)\n        update_str(n.op)\n        update_strs(n.input)\n        update_num(len(n.attr))\n        for k in sorted(n.attr):\n            update_str(k)\n            update_str(n.attr[k].SerializeToString())\n    return hasher.hexdigest()[:8]", "docstring": "Creates an 8-character string unique to this input.\n\nArgs:\ninput_arg: the input_arg field of an OpDef\n(e.g. self._definition.signature.input_arg)\noutput_arg: the output_arg field of an OpDef\n(e.g. self._definition.signature.output_arg)\nnode_def: the node_def field of a FunctionDef\n(e.g. self._definition.node_def)\n\nReturns:\nThe unique string for this input", "source": "github-repos"}
{"code": "def __init__(self, semantic_config: Optional[Dict]=None, coarse_acoustics_config: Optional[Dict]=None, fine_acoustics_config: Optional[Dict]=None, sample_rate=24000, codebook_size=1024, **kwargs):\n    if semantic_config is None:\n        semantic_config = {}\n        logger.info('semantic_config is None. initializing the semantic model with default values.')\n    if coarse_acoustics_config is None:\n        coarse_acoustics_config = {}\n        logger.info('coarse_acoustics_config is None. initializing the coarse model with default values.')\n    if fine_acoustics_config is None:\n        fine_acoustics_config = {}\n        logger.info('fine_acoustics_config is None. initializing the fine model with default values.')\n    self.semantic_config = BarkSemanticGenerationConfig(**semantic_config)\n    self.coarse_acoustics_config = BarkCoarseGenerationConfig(**coarse_acoustics_config)\n    self.fine_acoustics_config = BarkFineGenerationConfig(**fine_acoustics_config)\n    self.sample_rate = sample_rate\n    self.codebook_size = codebook_size", "docstring": "Class that holds a generation configuration for [`BarkModel`].\n\nThe [`BarkModel`] does not have a `generate` method, but uses this class to generate speeches with a nested\n[`BarkGenerationConfig`] which uses [`BarkSemanticGenerationConfig`], [`BarkCoarseGenerationConfig`],\n[`BarkFineGenerationConfig`].\n\nThis configuration inherit from [`GenerationConfig`] and can be used to control the model generation. Read the\ndocumentation from [`GenerationConfig`] for more information.\n\nArgs:\nsemantic_config (`Dict`, *optional*):\nSemantic generation configuration.\ncoarse_acoustics_config (`Dict`, *optional*):\nCoarse generation configuration.\nfine_acoustics_config (`Dict`, *optional*):\nFine generation configuration.\nsample_rate (`int`, *optional*, defaults to 24_000):\nSample rate.\ncodebook_size (`int`, *optional*, defaults to 1024):\nVector length for each codebook.", "source": "github-repos"}
{"code": "def speed_metrics(split, start_time, num_samples=None, num_steps=None, num_tokens=None):\n    runtime = time.time() - start_time\n    result = {f'{split}_runtime': round(runtime, 4)}\n    if runtime == 0:\n        return result\n    if num_samples is not None:\n        samples_per_second = num_samples / runtime\n        result[f'{split}_samples_per_second'] = round(samples_per_second, 3)\n    if num_steps is not None:\n        steps_per_second = num_steps / runtime\n        result[f'{split}_steps_per_second'] = round(steps_per_second, 3)\n    if num_tokens is not None:\n        tokens_per_second = num_tokens / runtime\n        result[f'{split}_tokens_per_second'] = round(tokens_per_second, 3)\n    return result", "docstring": "Measure and return speed performance metrics.\n\nThis function requires a time snapshot `start_time` before the operation to be measured starts and this function\nshould be run immediately after the operation to be measured has completed.\n\nArgs:\n- split: name to prefix metric (like train, eval, test...)\n- start_time: operation start time\n- num_samples: number of samples processed\n- num_steps: number of steps processed\n- num_tokens: number of tokens processed", "source": "github-repos"}
{"code": "def setModelData(self, editor, model, index):\n        \n        model.setData(index, editor.itemText(editor.currentIndex()))", "docstring": "Updates the model after changing data in the editor.\n\nArgs:\neditor (QtGui.QComboBox): The current editor for the item. Should be\na `QtGui.QComboBox` as defined in `createEditor`.\nmodel (ColumnDtypeModel): The model which holds the displayed data.\nindex (QtCore.QModelIndex): The index of the current item of the model.", "source": "juraj-google-style"}
{"code": "def ValidateServiceGaps(self, problems, validation_start_date, validation_end_date, service_gap_interval):\n    if (service_gap_interval is None):\n        return\n    departures = self.GenerateDateTripsDeparturesList(validation_start_date, validation_end_date)\n    first_day_without_service = validation_start_date\n    last_day_without_service = validation_start_date\n    consecutive_days_without_service = 0\n    for (day_date, day_trips, _) in departures:\n        if (day_trips == 0):\n            if (consecutive_days_without_service == 0):\n                first_day_without_service = day_date\n            consecutive_days_without_service += 1\n            last_day_without_service = day_date\n        else:\n            if (consecutive_days_without_service >= service_gap_interval):\n                problems.TooManyDaysWithoutService(first_day_without_service, last_day_without_service, consecutive_days_without_service)\n            consecutive_days_without_service = 0\n    if (consecutive_days_without_service >= service_gap_interval):\n        problems.TooManyDaysWithoutService(first_day_without_service, last_day_without_service, consecutive_days_without_service)", "docstring": "Validate consecutive dates without service in the feed.\nIssue a warning if it finds service gaps of at least\n\"service_gap_interval\" consecutive days in the date range\n[validation_start_date, last_service_date)\n\nArgs:\nproblems: The problem reporter object\nvalidation_start_date: A date object representing the date from which the\nvalidation should take place\nvalidation_end_date: A date object representing the first day the feed is\nactive\nservice_gap_interval: An integer indicating how many consecutive days the\nservice gaps need to have for a warning to be issued\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def add_edge_bias(x, filter_size):\n  \n  x_shape = common_layers.shape_list(x)\n  if filter_size[0] == 1 and filter_size[1] == 1:\n    return x\n  a = (filter_size[0] - 1) \n  b = (filter_size[1] - 1) \n  padding = [[0, 0], [a, a], [b, b], [0, 0]]\n  x_bias = tf.zeros(x_shape[:-1] + [1])\n\n  x = tf.pad(x, padding)\n  x_pad = tf.pad(x_bias, padding, constant_values=1)\n  return tf.concat([x, x_pad], axis=3)", "docstring": "Pad x and concatenates an edge bias across the depth of x.\n\nThe edge bias can be thought of as a binary feature which is unity when\nthe filter is being convolved over an edge and zero otherwise.\n\nArgs:\nx: Input tensor, shape (NHWC)\nfilter_size: filter_size to determine padding.\nReturns:\nx_pad: Input tensor, shape (NHW(c+1))", "source": "juraj-google-style"}
{"code": "def generate_states(state_count, process_matrix, process_covariance, initial_state=None):\n    process_matrix = np.atleast_2d(process_matrix)\n    process_covariance = np.atleast_2d(process_covariance)\n    state_dim = process_matrix.shape[0]\n    if (process_matrix.shape != (state_dim, state_dim)):\n        raise ValueError('Process matrix has inconsistent shape: {}'.format(process_matrix.shape))\n    if (process_covariance.shape != (state_dim, state_dim)):\n        raise ValueError('Process covariance has inconsistent shape: {}'.format(process_covariance.shape))\n    if (initial_state is None):\n        initial_state = np.zeros(process_matrix.shape[0])\n    states = [initial_state]\n    while (len(states) < state_count):\n        states.append((process_matrix.dot(states[(- 1)]) + np.random.multivariate_normal(mean=np.zeros(state_dim), cov=process_covariance)))\n    return np.vstack(states)", "docstring": "Generate states by simulating a linear system with constant process matrix\nand process noise covariance.\n\nArgs:\nstate_count (int): Number of states to generate.\nprocess_matrix (array): Square array\nprocess_covariance (array): Square array specifying process noise\ncovariance.\ninitial_state (array or None): If omitted, use zero-filled vector as\ninitial state.", "source": "codesearchnet"}
{"code": "def curie_search(self, curie:str) -> dict:\n        \n        ilx_row = self.curie2row.get(curie)\n        if not ilx_row:\n            return None\n        else:\n            return ilx_row", "docstring": "Returns the row in InterLex associated with the curie\n\nNote:\nPressumed to not have duplicate curies in InterLex\nArgs:\ncurie: The \"prefix:fragment_id\" of the existing_id pertaining to the ontology\nReturns:\nNone or dict", "source": "juraj-google-style"}
{"code": "def generate_code(max_length, max_nest, ops):\n  \n  stack = []\n  def fetch_one():\n    \n    if stack:\n      return stack.pop()\n    else:\n      \n      value = random.randint(10 ** (max_length - 1), 10 ** max_length - 1)\n      code = str(value)\n      return value, code\n\n  def fetch(num_operands):\n    values, codes = zip(*[fetch_one() for _ in six.moves.range(num_operands)])\n    return values, codes\n\n  for _ in six.moves.range(max_nest):\n    op = random.choice(ops)\n    values, codes = fetch(op.num_operands)\n    new_value = op.eval(values)\n    new_code = op.get_code(codes)\n    stack.append((new_value, \"(\" + new_code + \")\"))\n  final_value, final_code = stack.pop()\n  final_code = final_code[1:-1]\n  final_code.strip(\"()\")\n  if not op.is_memory:\n    final_value = int(final_value) % 10 ** (max_length+1)\n  return str(final_value), final_code", "docstring": "Generates code samples.\n\nArgs:\nmax_length: int.  max literal length.\nmax_nest: int. max nesting level.\nops: CodeOp. set of allowable operations.\n\nReturns:\n1. (str) output value.\n2. (str) Code operation.", "source": "juraj-google-style"}
{"code": "def read_committed_file(gitref, filename):\n    repo = Repo()\n    commitobj = repo.commit(gitref)\n    blob = commitobj.tree[(_delta_dir() + filename)]\n    return blob.data_stream.read()", "docstring": "Retrieve the content of a file in an old commit and returns it.\n\nKetword Arguments:\n:gitref: (str) -- full reference of the git commit\n:filename: (str) -- name (full path) of the file\n\nReturns:\nstr -- content of the file", "source": "codesearchnet"}
{"code": "def get_records(self, name):\n    if (name in self._cache):\n        return self._cache[name].values()\n    else:\n        return []", "docstring": "Return all the records for the given name in the cache.\n\nArgs:\nname (string): The name which the required models are stored under.\n\nReturns:\nlist: A list of :class:`cinder_data.model.CinderModel` models.", "source": "codesearchnet"}
{"code": "def _get_storage_model():\n    storage_model_settings = getattr(django.conf.settings, 'GOOGLE_OAUTH2_STORAGE_MODEL', None)\n    if (storage_model_settings is not None):\n        return (storage_model_settings['model'], storage_model_settings['user_property'], storage_model_settings['credentials_property'])\n    else:\n        return (None, None, None)", "docstring": "This configures whether the credentials will be stored in the session\nor the Django ORM based on the settings. By default, the credentials\nwill be stored in the session, unless `GOOGLE_OAUTH2_STORAGE_MODEL`\nis found in the settings. Usually, the ORM storage is used to integrate\ncredentials into an existing Django user system.\n\nReturns:\nA tuple containing three strings, or None. If\n``GOOGLE_OAUTH2_STORAGE_MODEL`` is configured, the tuple\nwill contain the fully qualifed path of the `django.db.model`,\nthe name of the ``django.contrib.auth.models.User`` field on the\nmodel, and the name of the\n:class:`oauth2client.contrib.django_util.models.CredentialsField`\nfield on the model. If Django ORM storage is not configured,\nthis function returns None.", "source": "codesearchnet"}
{"code": "def encode_json_body(data):\n    \n    \n    if hasattr(data, \"read\"):\n        return data\n\n    response.content_type = \"application/json; charset=utf-8\"\n\n    return json.dumps(\n        data,\n        indent=4,\n        separators=(',', ': ')\n    )", "docstring": "Return prettified JSON `data`, set ``response.content_type`` to\n``application/json; charset=utf-8``.\n\nArgs:\ndata (any): Any basic python data structure.\n\nReturns:\nstr: Data converted to prettified JSON.", "source": "juraj-google-style"}
{"code": "def GetEntries(self, parser_mediator, match=None, **unused_kwargs):\n    if ('RememberedNetworks' not in match):\n        return\n    for wifi in match['RememberedNetworks']:\n        ssid = wifi.get('SSIDString', 'UNKNOWN_SSID')\n        security_type = wifi.get('SecurityType', 'UNKNOWN_SECURITY_TYPE')\n        event_data = plist_event.PlistTimeEventData()\n        event_data.desc = '[WiFi] Connected to network: <{0:s}> using security {1:s}'.format(ssid, security_type)\n        event_data.key = 'item'\n        event_data.root = '/RememberedNetworks'\n        datetime_value = wifi.get('LastConnected', None)\n        if datetime_value:\n            event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n        else:\n            date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n            event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts relevant Airport entries.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmatch (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.", "source": "codesearchnet"}
{"code": "def list_offers(access_token, subscription_id, location, publisher):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/', 'locations/', location, '/publishers/', publisher, '/artifacttypes/vmimage/offers?api-version=', COMP_API])\n    return do_get(endpoint, access_token)", "docstring": "List available VM image offers from a publisher.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nlocation (str): Azure data center location. E.g. westus.\npublisher (str): Publisher name, e.g. Canonical.\n\nReturns:\nHTTP response with JSON list of image offers.", "source": "codesearchnet"}
{"code": "def _get_batch_data(self, batch_size, num_objects, num_features):\n    \n    all_inputs = []\n    all_labels = []\n    for _ in six.moves.range(batch_size):\n      inputs, labels = self._get_single_set(num_objects, num_features)\n      all_inputs += [inputs]\n      all_labels += [labels]\n    input_data = np.concatenate(all_inputs, axis=0)\n    label_data = np.concatenate(all_labels, axis=0)\n    return input_data, label_data", "docstring": "Assembles a batch of input tensors and output labels.\n\nArgs:\nbatch_size: int. number of sequence batches.\nnum_objects: int. number of objects in the sequence.\nnum_features: int. feature size of each object.\n\nReturns:\n1. np.ndarray (`batch_size`, `num_objects`,\n(`num_features` + 3 * `num_objects`)).\n2. np.ndarray (`batch_size`). Output object reference label.", "source": "juraj-google-style"}
{"code": "def write_top_half(f, row_metadata_df, col_metadata_df, metadata_null, filler_null):\n    \n    \n    size_of_top_half_df = (1 + col_metadata_df.shape[1],\n                           1 + row_metadata_df.shape[1] + col_metadata_df.shape[0])\n\n    top_half_df = pd.DataFrame(np.full(size_of_top_half_df, filler_null, dtype=object))\n\n    \n    top_half_df.iloc[0, :] = np.hstack((\"id\", row_metadata_df.columns.values, col_metadata_df.index.values))\n\n    \n    top_half_df.iloc[range(1, top_half_df.shape[0]), 0] = col_metadata_df.columns.values\n\n    \n    col_metadata_indices = (range(1, top_half_df.shape[0]),\n                            range(1 + row_metadata_df.shape[1], top_half_df.shape[1]))\n    \n    top_half_df.at[col_metadata_indices[0], col_metadata_indices[1]] = (\n        col_metadata_df.astype(str).replace(\"nan\", value=metadata_null).T.values)\n\n    \n    top_half_df.to_csv(f, header=False, index=False, sep=\"\\t\")", "docstring": "Write the top half of the gct file: top-left filler values, row metadata\nheaders, and top-right column metadata.\n\nArgs:\nf (file handle): handle for output file\nrow_metadata_df (pandas df)\ncol_metadata_df (pandas df)\nmetadata_null (string): how to represent missing values in the metadata\nfiller_null (string): what value to fill the top-left filler block with\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _fn(arg0, arg1, deprecated=True):\n    return arg0 + arg1 if deprecated else arg1 + arg0", "docstring": "fn doc.\n\nArgs:\narg0: Arg 0.\narg1: Arg 1.\ndeprecated: Deprecated!\n\nReturns:\nSum of args.", "source": "github-repos"}
{"code": "def add_request(self, request):\n    queue_item = QueueItem(request, Response(request.url))\n    self.add(queue_item)\n    return queue_item", "docstring": "Add a request to the queue.\n\nArgs:\nrequest (:class:`nyawc.http.Request`): The request to add.\n\nReturns:\n:class:`nyawc.QueueItem`: The created queue item.", "source": "codesearchnet"}
{"code": "def __init__(self, options, queue_item):\n        \n\n        self.__options = options\n        self.__queue_item = queue_item\n\n        self.__queue_item.response = self.__make_request(\n            self.__queue_item.request.url,\n            self.__queue_item.request.method,\n            self.__queue_item.request.data,\n            self.__queue_item.request.auth,\n            self.__queue_item.request.cookies,\n            self.__queue_item.request.headers,\n            self.__queue_item.request.proxies,\n            self.__queue_item.request.timeout,\n            self.__queue_item.request.verify\n        )\n\n        \n        \n        self.__queue_item.response.url = str(self.__queue_item.response.url)", "docstring": "Construct the HTTP handler.\n\nArgs:\noptions (:class:`nyawc.Options`): The settins/options object.\nqueue_item (:class:`nyawc.QueueItem`): The queue item containing the request.", "source": "juraj-google-style"}
{"code": "def _add_result(self, dict_entry, entry, dt, start_time):\n    \n    time_entry = {}\n    time_entry['dt'] = dt\n    time_entry['start_time'] = start_time\n    dict_entry[entry] = time_entry", "docstring": "Adds a result to the dictionary.\n\nArgs:\ndict_entry: main dict to add entry\nentry: slot for this entry (likely an integer)\ndt: the timing for the entry\nstart_time: when the entry started unix time float", "source": "juraj-google-style"}
{"code": "def __init__(self, name, lim_low=85., lim_high=95., **kwargs):\n        \n        self.lim_low = lim_low\n        self.lim_high = lim_high\n        super(DayNightCompositor, self).__init__(name, **kwargs)", "docstring": "Collect custom configuration values.\n\nArgs:\nlim_low (float): lower limit of Sun zenith angle for the\nblending of the given channels\nlim_high (float): upper limit of Sun zenith angle for the\nblending of the given channels", "source": "juraj-google-style"}
{"code": "def __init__(self, parent=None):\n        \n        super(DelimiterSelectionWidget, self).__init__(parent)\n        self.semicolonRadioButton = None\n        self.commaRadioButton = None\n        self.tabRadioButton = None\n        self.otherRadioButton = None\n        self.otherSeparatorLineEdit = None\n        self._initUI()", "docstring": "Constructs the object with the given parent.\n\nArgs:\nparent (QObject, optional): Causes the objected to be owned\nby `parent` instead of Qt. Defaults to `None`.", "source": "juraj-google-style"}
{"code": "def get_battery_level(self):\n    battery_level = self.get_characteristic_handle_from_uuid(UUID_BATTERY_LEVEL)\n    if (battery_level is None):\n        logger.warn('Failed to find handle for battery level')\n        return None\n    level = self.dongle._read_attribute(self.conn_handle, battery_level)\n    if (level is None):\n        return (- 1)\n    return ord(level)", "docstring": "Reads the battery level descriptor on the device.\n\nReturns:\nint. If successful this will be a positive value representing the current\nbattery level as a percentage. On error, -1 is returned.", "source": "codesearchnet"}
{"code": "def _get_output_tensors(self, interpreter: _interpreter.Interpreter) -> List[np.ndarray]:\n    outputs = []\n    for output_detail in interpreter.get_output_details():\n        tensor = interpreter.get_tensor(output_detail['index'])\n        if output_detail['dtype'] == np.int8:\n            quant_params = _get_quant_params(output_detail)\n            if quant_params:\n                scale, zero_point = quant_params\n                tensor = ((tensor.astype(np.float32) - zero_point) * scale).astype(np.float32)\n        outputs.append(tensor)\n    return outputs", "docstring": "Returns output tensors of given TFLite model Interpreter.\n\nArgs:\ninterpreter: a tf.lite.Interpreter object with allocated tensors.\n\nReturns:\na list of numpy arrays representing output tensor results.", "source": "github-repos"}
{"code": "def getattr_sdk(attr, name):\n    if inspect.isroutine(attr):\n        if hasattr(attr, '_sdkmeta'):\n            return attr\n    raise AttributeError(name)", "docstring": "Filter SDK attributes\n\nArgs:\nattr(attribute): Attribute as returned by :func:`getattr`.\nname(str): Attribute name.\n\nReturns:\n`attr` if passed.", "source": "codesearchnet"}
{"code": "def _UpdateCounters(self, event):\n    \n    self._session.parsers_counter['total'] += 1\n\n    \n    parser_name = getattr(event, 'parser', '')\n    _, _, parser_name = parser_name.rpartition('/')\n    if not parser_name:\n      parser_name = 'N/A'\n    self._session.parsers_counter[parser_name] += 1", "docstring": "Updates the counters.\n\nArgs:\nevent (EventObject): event.", "source": "juraj-google-style"}
{"code": "def __init__(self, initializer=None):\n    \n    if initializer is None:\n      self.data = []\n      return\n    if isinstance(initializer, Timeseries):\n      self.data = copy.deepcopy(initializer.data)\n      return\n    raise RuntimeError(\"Unrecognized initializer.\")", "docstring": "Create a timeseries with an optional initializer.\n\nArgs:\ninitializer: An optional Timeseries to clone.\n\nRaises:\nRuntimeError: If initializer is not understood.", "source": "juraj-google-style"}
{"code": "def put(cls, obj):\n        \n        return PyarrowOnRayFramePartition(ray.put(pyarrow.Table.from_pandas(obj)))", "docstring": "Put an object in the Plasma store and wrap it in this object.\n\nArgs:\nobj: The object to be put.\n\nReturns:\nA `RayRemotePartition` object.", "source": "juraj-google-style"}
{"code": "def dot(A, B):\n    \n    try:\n        result = A.__matmul__(B)\n        if result is NotImplemented:\n            result = B.__rmatmul__(A)\n    except AttributeError:\n        result = B.__rmatmul__(A)\n    return result", "docstring": "Matrix multiplication between A and B\n\nThis function is equivalent to ``A @ B``, which is unfortunately\nnot possible under python 2.x.\n\nArgs:\nA (sequence):\nB (sequence):\n\nReturns:\nsequence:", "source": "juraj-google-style"}
{"code": "def dframe(self, dimensions=None, multi_index=False):\n        \n        import pandas as pd\n        if dimensions is None:\n            outer_dimensions = self.kdims\n            inner_dimensions = None\n        else:\n            outer_dimensions = [self.get_dimension(d) for d in dimensions\n                                if d in self.kdims]\n            inner_dimensions = [d for d in dimensions\n                                if d not in outer_dimensions]\n        inds = [(d, self.get_dimension_index(d)) for d in outer_dimensions]\n\n        dframes = []\n        for key, element in self.data.items():\n            df = element.dframe(inner_dimensions, multi_index)\n            names = [d.name for d in outer_dimensions]\n            key_dims = [(d.name, key[i]) for d, i in inds]\n            if multi_index:\n                length = len(df)\n                indexes = [[v]*length for _, v in key_dims]\n                if df.index.names != [None]:\n                    indexes += [df.index]\n                    names += list(df.index.names)\n                df = df.set_index(indexes)\n                df.index.names = names\n            else:\n                for dim, val in key_dims:\n                    dimn = 1\n                    while dim in df:\n                        dim = dim+'_%d' % dimn\n                        if dim in df:\n                            dimn += 1\n                    df.insert(0, dim, val)\n            dframes.append(df)\n        return pd.concat(dframes)", "docstring": "Convert dimension values to DataFrame.\n\nReturns a pandas dataframe of columns along each dimension,\neither completely flat or indexed by key dimensions.\n\nArgs:\ndimensions: Dimensions to return as columns\nmulti_index: Convert key dimensions to (multi-)index\n\nReturns:\nDataFrame of columns corresponding to each dimension", "source": "juraj-google-style"}
{"code": "def tcp_ping(\n    task: Task, ports: List[int], timeout: int = 2, host: Optional[str] = None\n) -> Result:\n    \n\n    if isinstance(ports, int):\n        ports = [ports]\n\n    if isinstance(ports, list):\n        if not all(isinstance(port, int) for port in ports):\n            raise ValueError(\"Invalid value for 'ports'\")\n\n    else:\n        raise ValueError(\"Invalid value for 'ports'\")\n\n    host = host or task.host.hostname\n\n    result = {}\n    for port in ports:\n        s = socket.socket()\n        s.settimeout(timeout)\n        try:\n            status = s.connect_ex((host, port))\n            if status == 0:\n                connection = True\n            else:\n                connection = False\n        except (socket.gaierror, socket.timeout, socket.error):\n            connection = False\n        finally:\n            s.close()\n        result[port] = connection\n\n    return Result(host=task.host, result=result)", "docstring": "Tests connection to a tcp port and tries to establish a three way\nhandshake. To be used for network discovery or testing.\n\nArguments:\nports (list of int): tcp ports to ping\ntimeout (int, optional): defaults to 2\nhost (string, optional): defaults to ``hostname``\n\n\nReturns:\nResult object with the following attributes set:\n* result (``dict``): Contains port numbers as keys with True/False as values", "source": "juraj-google-style"}
{"code": "def formula_html(self, reversed_=False):\n    if (self.H_count == 1):\n        text = 'H'\n    elif (self.H_count > 1):\n        text = 'H<sub>{}</sub>'.format(self.H_count)\n    else:\n        text = ''\n    seq = [self.symbol, text, self.charge_sign_html()]\n    if reversed_:\n        seq = reversed(seq)\n    return ''.join(seq)", "docstring": "Chemical formula HTML\n\nArgs:\nreversed (bool): reversed text for leftmost atom groups", "source": "codesearchnet"}
{"code": "def _add_collection_def(meta_graph_def, key, export_scope=None):\n    meta_graph.add_collection_def(meta_graph_def, key, export_scope=export_scope)", "docstring": "Adds a collection to MetaGraphDef protocol buffer.\n\nArgs:\nmeta_graph_def: MetaGraphDef protocol buffer.\nkey: One of the GraphKeys or user-defined string.\nexport_scope: Optional `string`. Name scope to remove.", "source": "github-repos"}
{"code": "def __getattr__(cls, item):\n        \n        if item in cls._meta.settings.keys():\n            return cls._meta.settings[item]\n        raise AttributeError(\"'%s' class has no attribute '%s'\" % (cls.__name__, item))", "docstring": "Return a setting object if it is in the ``_meta.settings`` dictionary.\n\nArgs:\nitem (str):\nthe name of the setting variable (not the setting's name).\n\nReturns:\n``Setting``: the setting object.\n\nRaises:\nAttributeError if the setting does not exist.", "source": "juraj-google-style"}
{"code": "def load_img(path, grayscale=False, target_size=None):\n    \n    img = io.imread(path, grayscale)\n    if target_size:\n        img = transform.resize(img, target_size, preserve_range=True).astype('uint8')\n    return img", "docstring": "Utility function to load an image from disk.\n\nArgs:\npath: The image file path.\ngrayscale: True to convert to grayscale image (Default value = False)\ntarget_size: (w, h) to resize. (Default value = None)\n\nReturns:\nThe loaded numpy image.", "source": "juraj-google-style"}
{"code": "def set_mac_addr_adv_interval(self, name, vrid, value=None, disable=False, default=False, run=True):\n    if ((not default) and (not disable)):\n        if ((not int(value)) or (int(value) < 1) or (int(value) > 3600)):\n            raise ValueError(\"vrrp property 'mac_addr_adv_interval' must be in the range 1-3600\")\n    cmd = self.command_builder(('vrrp %d mac-address advertisement-interval' % vrid), value=value, default=default, disable=disable)\n    if run:\n        result = self.configure_interface(name, cmd)\n        if (result is False):\n            return self.error\n        return result\n    return cmd", "docstring": "Set the mac_addr_adv_interval property of the vrrp\n\nArgs:\nname (string): The interface to configure.\nvrid (integer): The vrid number for the vrrp to be managed.\nvalue (integer): mac-address advertisement-interval value to\nassign to the vrrp.\ndisable (boolean): Unset mac-address advertisement-interval\nif True.\ndefault (boolean): Set mac-address advertisement-interval to\ndefault if True.\nrun (boolean): Set to True to execute the command, False to\nreturn a string with the formatted command.\n\nReturns:\nIf run is True, returns True if the command executed successfully,\nerror if failure.\n\nIf run is False, returns the formatted command string which can\nbe passed to the node", "source": "codesearchnet"}
{"code": "def is_table(engine, sql):\n    if engine.dialect.has_table(engine, sql):\n        return True\n    return False", "docstring": "Check with the given sql arg is query or table\n\nArgs:\nengine: SQLAlchemy connection engine\nsql: SQL query or table name\n\nReturns:\nTrue for table or False if not", "source": "codesearchnet"}
{"code": "def root_cause(binding, node, seen=()):\n    if isinstance(binding, (list, tuple)):\n        bindings = list(binding)\n    else:\n        bindings = [binding]\n    del binding\n    key = frozenset(bindings)\n    if key in seen:\n        return (next(iter(bindings), None), node)\n    for b in bindings:\n        if not node.HasCombination([b]):\n            for o in b.origins:\n                for source_set in o.source_sets:\n                    cause, n = root_cause(list(source_set), o.where)\n                    if cause is not None:\n                        return (cause, n)\n            return (b, node)\n    return (None, None)", "docstring": "Tries to determine why a binding isn't possible at a node.\n\nThis tries to find the innermost source that's still impossible. It only works\nif the failure isn't due to a combination of bindings.\n\nArgs:\nbinding: A binding, or a list of bindings.\nnode: The node at which (one of the) binding(s) is impossible.\nseen: Internal. Bindings already looked at.\n\nReturns:\nA tuple (binding, node), with \"binding\" the innermost binding that's\nnot possible, and \"node\" the CFG node at which it isn't.", "source": "github-repos"}
{"code": "def EnrolFleetspeakClient(self, client_id):\n    \n    client_urn = rdf_client.ClientURN(client_id)\n\n    \n    if data_store.RelationalDBEnabled():\n      try:\n        data_store.REL_DB.ReadClientMetadata(client_id)\n        return False\n      except db.UnknownClientError:\n        pass\n    else:\n      if aff4.FACTORY.ExistsWithType(\n          client_urn, aff4_type=aff4_grr.VFSGRRClient, token=self.token):\n        return False\n\n    logging.info(\"Enrolling a new Fleetspeak client: %r\", client_id)\n\n    if data_store.RelationalDBEnabled():\n      now = rdfvalue.RDFDatetime.Now()\n      data_store.REL_DB.WriteClientMetadata(\n          client_id, first_seen=now, fleetspeak_enabled=True, last_ping=now)\n\n    if data_store.AFF4Enabled():\n      \n      \n      \n      \n      with aff4.FACTORY.Create(\n          client_urn,\n          aff4_type=aff4_grr.VFSGRRClient,\n          mode=\"rw\",\n          token=self.token) as client:\n\n        client.Set(client.Schema.FLEETSPEAK_ENABLED, rdfvalue.RDFBool(True))\n\n        index = client_index.CreateClientIndex(token=self.token)\n        index.AddClient(client)\n        if data_store.RelationalDBEnabled():\n          client_obj = rdf_objects.ClientSnapshot(\n              client_id=client_urn.Basename())\n          index = client_index.ClientIndex()\n          index.AddClient(client_obj)\n\n    \n    events.Events.PublishEvent(\"ClientEnrollment\", client_urn, token=self.token)\n    return True", "docstring": "Enrols a Fleetspeak-enabled client for use with GRR.\n\nArgs:\nclient_id: GRR client-id for the client.\n\nReturns:\nTrue if the client is new, and actually got enrolled. This method\nis a no-op if the client already exists (in which case False is returned).", "source": "juraj-google-style"}
{"code": "def min_sequence_length(self, dataset_split):\n    return {problem.DatasetSplit.TRAIN: 8, problem.DatasetSplit.EVAL: 65, problem.DatasetSplit.TEST: 65}[dataset_split]", "docstring": "Determine the minimum sequence length given a dataset_split.\n\nArgs:\ndataset_split: A problem.DatasetSplit.\n\nReturns:\nThe minimum length that a sequence can be for this dataset_split.", "source": "codesearchnet"}
{"code": "def rApply(d, f):\n  \n  remainingDicts = [(d, ())]\n  while len(remainingDicts) > 0:\n    current, prevKeys = remainingDicts.pop()\n    for k, v in current.iteritems():\n      keys = prevKeys + (k,)\n      if isinstance(v, dict):\n        remainingDicts.insert(0, (v, keys))\n      else:\n        f(v, keys)", "docstring": "Recursively applies f to the values in dict d.\n\nArgs:\nd: The dict to recurse over.\nf: A function to apply to values in d that takes the value and a list of\nkeys from the root of the dict to the value.", "source": "juraj-google-style"}
{"code": "def run(self, feed_dict=None, session=None) -> None:\n    _run_using_default_session(self, feed_dict, self.graph, session)", "docstring": "Runs this operation in a `Session`.\n\nCalling this method will execute all preceding operations that\nproduce the inputs needed for this operation.\n\n*N.B.* Before invoking `Operation.run()`, its graph must have been\nlaunched in a session, and either a default session must be\navailable, or `session` must be specified explicitly.\n\nArgs:\nfeed_dict: A dictionary that maps `Tensor` objects to feed values. See\n`tf.Session.run` for a description of the valid feed values.\nsession: (Optional.) The `Session` to be used to run to this operation. If\nnone, the default session will be used.", "source": "github-repos"}
{"code": "def stop(self):\n    self.log.debug('Stopping snippet package %s.', self.package)\n    self.close_connection()\n    self._stop_server()\n    self._destroy_event_client()\n    self.log.debug('Snippet package %s stopped.', self.package)", "docstring": "Releases all the resources acquired in `initialize`.\n\nThis function releases following resources:\n* Close the socket connection.\n* Stop forwarding the device port to host.\n* Stop the standing server subprocess running on the host side.\n* Stop the snippet server running on the device side.\n* Stop the event client and set `self._event_client` to None.\n\nRaises:\nandroid_device_lib_errors.DeviceError: if the server exited with errors on\nthe device side.", "source": "github-repos"}
{"code": "def __init__(self, code, message=None, command=None):\n        \n        super().__init__(message)\n        self.code = code\n        self.command = command", "docstring": "Initializes a new instance of SMTPCommandFailedError.\n\nArgs:\ncode (int): Error code returned by the SMTP server.\nmessage (str): Exception message, ideally providing help for the\nuser.\ncommand (str): Command sent to the server that originated the\nexception.", "source": "juraj-google-style"}
{"code": "def run(components=None, broker=None):\n    components = (components or COMPONENTS[GROUPS.single])\n    components = _determine_components(components)\n    broker = (broker or Broker())\n    for component in run_order(components):\n        start = time.time()\n        try:\n            if ((component not in broker) and (component in DELEGATES) and is_enabled(component)):\n                log.info(('Trying %s' % get_name(component)))\n                result = DELEGATES[component].process(broker)\n                broker[component] = result\n        except MissingRequirements as mr:\n            if log.isEnabledFor(logging.DEBUG):\n                name = get_name(component)\n                reqs = stringify_requirements(mr.requirements)\n                log.debug(('%s missing requirements %s' % (name, reqs)))\n            broker.add_exception(component, mr)\n        except SkipComponent:\n            pass\n        except Exception as ex:\n            tb = traceback.format_exc()\n            log.warn(tb)\n            broker.add_exception(component, ex, tb)\n        finally:\n            broker.exec_times[component] = (time.time() - start)\n            broker.fire_observers(component)\n    return broker", "docstring": "Executes components in an order that satisfies their dependency\nrelationships.\n\nKeyword Args:\ncomponents: Can be one of a dependency graph, a single component, a\ncomponent group, or a component type. If it's anything other than a\ndependency graph, the appropriate graph is built for you and before\nevaluation.\nbroker (Broker): Optionally pass a broker to use for evaluation. One is\ncreated by default, but it's often useful to seed a broker with an\ninitial dependency.\nReturns:\nBroker: The broker after evaluation.", "source": "codesearchnet"}
{"code": "def create_raw(self, key, value):\n        \n        data = None\n        if key is not None and value is not None:\n            data = self.db.create(key.strip(), value)\n        else:\n            self.tcex.log.warning(u'The key or value field was None.')\n        return data", "docstring": "Create method of CRUD operation for raw data.\n\nArgs:\nkey (string): The variable to write to the DB.\nvalue (any): The data to write to the DB.\n\nReturns:\n(string): Result of DB write.", "source": "juraj-google-style"}
{"code": "def create(self, algorithm, length, operation_policy_name=None, name=None, cryptographic_usage_mask=None):\n    if (not isinstance(algorithm, enums.CryptographicAlgorithm)):\n        raise TypeError('algorithm must be a CryptographicAlgorithm enumeration')\n    elif ((not isinstance(length, six.integer_types)) or (length <= 0)):\n        raise TypeError('length must be a positive integer')\n    if (cryptographic_usage_mask is not None):\n        if ((not isinstance(cryptographic_usage_mask, list)) or (all((isinstance(item, enums.CryptographicUsageMask) for item in cryptographic_usage_mask)) is False)):\n            raise TypeError('cryptographic_usage_mask must be a list of CryptographicUsageMask enumerations')\n    common_attributes = self._build_common_attributes(operation_policy_name)\n    key_attributes = self._build_key_attributes(algorithm, length, cryptographic_usage_mask)\n    key_attributes.extend(common_attributes)\n    if name:\n        key_attributes.extend(self._build_name_attribute(name))\n    template = cobjects.TemplateAttribute(attributes=key_attributes)\n    result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)\n    status = result.result_status.value\n    if (status == enums.ResultStatus.SUCCESS):\n        return result.uuid\n    else:\n        reason = result.result_reason.value\n        message = result.result_message.value\n        raise exceptions.KmipOperationFailure(status, reason, message)", "docstring": "Create a symmetric key on a KMIP appliance.\n\nArgs:\nalgorithm (CryptographicAlgorithm): An enumeration defining the\nalgorithm to use to generate the symmetric key.\nlength (int): The length in bits for the symmetric key.\noperation_policy_name (string): The name of the operation policy\nto use for the new symmetric key. Optional, defaults to None\nname (string): The name to give the key. Optional, defaults to None\ncryptographic_usage_mask (list): list of enumerations of crypto\nusage mask passing to the symmetric key. Optional, defaults to\nNone\n\nReturns:\nstring: The uid of the newly created symmetric key.\n\nRaises:\nClientConnectionNotOpen: if the client connection is unusable\nKmipOperationFailure: if the operation result is a failure\nTypeError: if the input arguments are invalid", "source": "codesearchnet"}
{"code": "def search(self, filepath=None, basedir=None, kind=None):\n    if (filepath is None):\n        filepath = ''\n    if (basedir is None):\n        basedir = '.'\n    if ((not basedir) and (not filepath)):\n        msg = 'Either basedir or filepath is required for discovering'\n        raise SettingsDiscoveryError(msg)\n    if (kind and (kind not in self.engines)):\n        msg = 'Given settings format is unknow: {}'\n        raise SettingsDiscoveryError(msg.format(kind))\n    if (not filepath):\n        (filename, engine) = self.guess_filename(basedir, kind)\n        filepath = os.path.join(basedir, filename)\n    else:\n        if os.path.isabs(filepath):\n            (basedir, filename) = os.path.split(filepath)\n        else:\n            filepath = os.path.join(basedir, filepath)\n        if (not os.path.exists(filepath)):\n            msg = 'Given settings file does not exists: {}'\n            raise SettingsDiscoveryError(msg.format(filepath))\n        engine = self.get_engine(filepath, kind)\n    return (filepath, engine)", "docstring": "Search for a settings file.\n\nKeyword Arguments:\nfilepath (string): Path to a config file, either absolute or\nrelative. If absolute set its directory as basedir (omitting\ngiven basedir argument). If relative join it to basedir.\nbasedir (string): Directory path where to search for.\nkind (string): Backend engine kind name (value of attribute\n``_kind_name``) to help discovering with empty or relative\nfilepath. Also if explicit absolute filepath is given, this\nwill enforce the backend engine (such as yaml kind will be\nforced for a ``foo.json`` file).\n\nReturns:\ntuple: Absolute filepath and backend engine class.", "source": "codesearchnet"}
{"code": "def _parse_trunk_native_vlan(self, config):\n    match = re.search('switchport trunk native vlan (\\\\d+)', config)\n    return dict(trunk_native_vlan=match.group(1))", "docstring": "Scans the specified config and parse the trunk native vlan value\n\nArgs:\nconfig (str): The interface configuration block to scan\n\nReturns:\ndict: A Python dict object with the value of switchport trunk\nnative vlan value.  The dict returned is intended to be\nmerged into the resource dict", "source": "codesearchnet"}
{"code": "def get_video_features(self, pixel_values_videos: torch.FloatTensor, vision_feature_layer: Optional[Union[int, List[int]]]=None):\n    vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer\n    batch_size_vid, num_frames, channels, height, width = pixel_values_videos.shape\n    pixel_values = pixel_values_videos.reshape(batch_size_vid * num_frames, channels, height, width)\n    video_outputs = self.video_tower(pixel_values, output_hidden_states=True)\n    if isinstance(vision_feature_layer, int):\n        video_features = video_outputs.hidden_states[vision_feature_layer]\n    else:\n        hs_pool = [video_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer]\n        video_features = torch.cat(hs_pool, dim=-1)\n    video_features = self.multi_modal_projector(video_features)\n    return (video_features, num_frames)", "docstring": "Obtains video last hidden states from the vision tower and apply multimodal projection.\n\nArgs:\npixel_values_videos (`torch.FloatTensor]` of shape `(batch_size, num_frames, channels, height, width)`)\nThe tensors corresponding to the input videos.\nvision_feature_layer (`Union[int, List[int]]`, *optional*):\nThe index of the layer to select the vision feature. If multiple indices are provided,\nthe vision feature of the corresponding indices will be concatenated to form the\nvision features.\nReturns:\nvideo_features (`torch.Tensor`): Video feature tensor of shape `(num_videos * num_frames, image_length, embed_dim)`).\nframes (`int`): Number of frames the videos have.", "source": "github-repos"}
{"code": "def _schema_line(args):\n  \n  \n  name = args['table'] if args['table'] else args['view']\n  if name is None:\n    raise Exception('No table or view specified; cannot show schema')\n\n  schema = _get_schema(name)\n  if schema:\n    html = _repr_html_table_schema(schema)\n    return IPython.core.display.HTML(html)\n  else:\n    raise Exception('%s is not a schema and does not appear to have a schema member' % name)", "docstring": "Implements the BigQuery schema magic used to display table/view schemas.\n\nArgs:\nargs: the arguments following '%bigquery schema'.\nReturns:\nThe HTML rendering for the schema.", "source": "juraj-google-style"}
{"code": "def get_file(self, url, path_or_file=None, headers=None, filename=None):\n    path_or_file = (path_or_file or filename)\n    if self.debug:\n        print(('GET FILE: %s, headers=%s' % (url, headers)))\n    self.headers = self._get_default_headers()\n    if (headers is not None):\n        self.headers.update(headers)\n    response = requests.get(url, headers=self.headers, auth=self.auth, verify=self.verify_ssl)\n    self.http_status_code = response.status_code\n    try:\n        self._check_error(response)\n        try:\n            path_or_file.write(response.content)\n        except AttributeError:\n            fd = os.open(path_or_file, (os.O_CREAT | os.O_RDWR))\n            with os.fdopen(fd, 'w+b') as f:\n                f.write(response.content)\n    except:\n        return False\n    return True", "docstring": "Get a file from a url and save it as `filename`\n\nArgs:\nurl (str): URL to send the request to\n\npath_or_file (str or file): A writable File-like object or a path to save the file to.\n\nfilename (str): [DEPRECATED] File name to save the file as, this can be either\na full path or a relative path\n\nheaders (str, optional): custom headers\n\nReturns:\nTrue if file is downloaded and written successfully, False\notherwise.", "source": "codesearchnet"}
{"code": "def pipelines(self, **kwargs):\n        \n\n        path = '%s/%s/pipelines' % (self.manager.path, self.get_id())\n        return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "List the merge request pipelines.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabListError: If the list could not be retrieved\n\nReturns:\nRESTObjectList: List of changes", "source": "juraj-google-style"}
{"code": "def mount(self, app=None):\n    for endpoint in self._routes:\n        endpoint.register_app(app)\n    return self", "docstring": "Mounts all registered routes to a bottle.py application instance.\n\nArgs:\napp (instance): A `bottle.Bottle()` application instance.\n\nReturns:\nThe Router instance (for chaining purposes).", "source": "codesearchnet"}
{"code": "def get_mysql_vars(mysql: str, host: str, port: int, user: str) -> Dict[(str, str)]:\n    cmdargs = [mysql, '-h', host, '-P', str(port), '-e', 'SHOW VARIABLES; SHOW STATUS', '-u', user, '-p']\n    log.info('Connecting to MySQL with user: {}', user)\n    log.debug(cmdargs)\n    process = subprocess.Popen(cmdargs, stdout=subprocess.PIPE)\n    (out, err) = process.communicate()\n    lines = out.decode('utf8').splitlines()\n    mysqlvars = {}\n    for line in lines:\n        (var, val) = line.split('\\t')\n        mysqlvars[var] = val\n    return mysqlvars", "docstring": "Asks MySQL for its variables and status.\n\nArgs:\nmysql: ``mysql`` executable filename\nhost: host name\nport: TCP/IP port number\nuser: username\n\nReturns:\ndictionary of MySQL variables/values", "source": "codesearchnet"}
{"code": "def proc_ovrds(**kwargs):\n    \n    return [\n        (k, v) for k, v in kwargs.items()\n        if k not in list(ELEM_KEYS.keys()) + list(ELEM_KEYS.values()) + PRSV_COLS\n    ]", "docstring": "Bloomberg overrides\n\nArgs:\n**kwargs: overrides\n\nReturns:\nlist of tuples\n\nExamples:\n>>> proc_ovrds(DVD_Start_Dt='20180101')\n[('DVD_Start_Dt', '20180101')]\n>>> proc_ovrds(DVD_Start_Dt='20180101', cache=True, has_date=True)\n[('DVD_Start_Dt', '20180101')]", "source": "juraj-google-style"}
{"code": "def unshare(self, group_id, **kwargs):\n        \n        path = '/projects/%s/share/%s' % (self.get_id(), group_id)\n        self.manager.gitlab.http_delete(path, **kwargs)", "docstring": "Delete a shared project link within a group.\n\nArgs:\ngroup_id (int): ID of the group.\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabDeleteError: If the server failed to perform the request", "source": "juraj-google-style"}
{"code": "def ones_like(array, dtype=None, keepmeta=True):\n    if keepmeta:\n        return xr.ones_like(array, dtype)\n    else:\n        return dc.ones(array.shape, dtype)", "docstring": "Create an array of ones with the same shape and type as the input array.\n\nArgs:\narray (xarray.DataArray): The shape and data-type of it define\nthese same attributes of the output array.\ndtype (data-type, optional): If spacified, this function overrides\nthe data-type of the output array.\nkeepmeta (bool, optional): Whether *coords, attrs, and name of the input\narray are kept in the output one. Default is True.\n\nReturns:\narray (decode.array): Decode array filled with ones.", "source": "codesearchnet"}
{"code": "def initGP(self,fast=False):\n        \n        if fast:\n            assert self.n_terms==2, 'CVarianceDecomposition: for fast inference number of terms must be == 2'\n            assert self.P>1,        'CVarianceDecomposition: for fast inference number of traits must be > 1'\n            self.vd.initGPkronSum()\n        else:\n            self.vd.initGP()\n        self.gp=self.vd.getGP()\n        self.init=True\n        self.fast=fast", "docstring": "Initialize GP objetct\n\nArgs:\nfast:   if fast==True initialize gpkronSum gp", "source": "juraj-google-style"}
{"code": "def call_requests(\n    requests: Union[Request, Iterable[Request]], methods: Methods, debug: bool\n) -> Response:\n    \n    if isinstance(requests, collections.Iterable):\n        return BatchResponse(safe_call(r, methods, debug=debug) for r in requests)\n    return safe_call(requests, methods, debug=debug)", "docstring": "Takes a request or list of Requests and calls them.\n\nArgs:\nrequests: Request object, or a collection of them.\nmethods: The list of methods that can be called.\ndebug: Include more information in error responses.", "source": "juraj-google-style"}
{"code": "def _bit_list_to_bytes(bit_list):\n    num_bits = len(bit_list)\n    byte_vals = bytearray()\n    for start in six.moves.xrange(0, num_bits, 8):\n        curr_bits = bit_list[start:(start + 8)]\n        char_val = sum(((val * digit) for (val, digit) in six.moves.zip(_POW2, curr_bits)))\n        byte_vals.append(char_val)\n    return bytes(byte_vals)", "docstring": "Converts an iterable of 1s and 0s to bytes.\n\nCombines the list 8 at a time, treating each group of 8 bits\nas a single byte.\n\nArgs:\nbit_list (Sequence): Sequence of 1s and 0s.\n\nReturns:\nbytes: The decoded bytes.", "source": "codesearchnet"}
{"code": "def _assert_splits_match(nested_splits_lists):\n    error_msg = 'Inputs must have identical ragged splits'\n    for splits_list in nested_splits_lists:\n        if len(splits_list) != len(nested_splits_lists[0]):\n            raise ValueError(error_msg)\n    return [check_ops.assert_equal(s1, s2, message=error_msg) for splits_list in nested_splits_lists[1:] for s1, s2 in zip(nested_splits_lists[0], splits_list)]", "docstring": "Checks that the given splits lists are identical.\n\nPerforms static tests to ensure that the given splits lists are identical,\nand returns a list of control dependency op tensors that check that they are\nfully identical.\n\nArgs:\nnested_splits_lists: A list of nested_splits_lists, where each split_list is\na list of `splits` tensors from a `RaggedTensor`, ordered from outermost\nragged dimension to innermost ragged dimension.\n\nReturns:\nA list of control dependency op tensors.\nRaises:\nValueError: If the splits are not identical.", "source": "github-repos"}
{"code": "def load_fasta_file(filename):\n    with open(filename, 'r') as handle:\n        records = list(SeqIO.parse(handle, 'fasta'))\n    return records", "docstring": "Load a FASTA file and return the sequences as a list of SeqRecords\n\nArgs:\nfilename (str): Path to the FASTA file to load\n\nReturns:\nlist: list of all sequences in the FASTA file as Biopython SeqRecord objects", "source": "codesearchnet"}
{"code": "def _timestamp_query_param_from_json(value, field):\n    \n    if _not_null(value, field):\n        \n        \n        \n        value = value.replace(\" \", \"T\", 1)\n        \n        value = value.replace(\"Z\", \"\")\n        value = value.replace(\"+00:00\", \"\")\n\n        if \".\" in value:\n            \n            return datetime.datetime.strptime(value, _RFC3339_MICROS_NO_ZULU).replace(\n                tzinfo=UTC\n            )\n        else:\n            \n            return datetime.datetime.strptime(value, _RFC3339_NO_FRACTION).replace(\n                tzinfo=UTC\n            )\n    else:\n        return None", "docstring": "Coerce 'value' to a datetime, if set or not nullable.\n\nArgs:\nvalue (str): The timestamp.\nfield (.SchemaField): The field corresponding to the value.\n\nReturns:\nOptional[datetime.datetime]: The parsed datetime object from\n``value`` if the ``field`` is not null (otherwise it is\n:data:`None`).", "source": "juraj-google-style"}
{"code": "def get_feature_variable_integer(self, feature_key, variable_key, user_id, attributes=None):\n    variable_type = entities.Variable.Type.INTEGER\n    return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)", "docstring": "Returns value for a certain integer variable attached to a feature flag.\n\nArgs:\nfeature_key: Key of the feature whose variable's value is being accessed.\nvariable_key: Key of the variable whose value is to be accessed.\nuser_id: ID for user.\nattributes: Dict representing user attributes.\n\nReturns:\nInteger value of the variable. None if:\n- Feature key is invalid.\n- Variable key is invalid.\n- Mismatch with type of variable.", "source": "codesearchnet"}
{"code": "def __init__(self, server, sock, makefile=MakeFile):\n        \n        self.server = server\n        self.socket = sock\n        self.rfile = makefile(sock, 'rb', self.rbufsize)\n        self.wfile = makefile(sock, 'wb', self.wbufsize)\n        self.requests_seen = 0\n\n        self.peercreds_enabled = self.server.peercreds_enabled\n        self.peercreds_resolve_enabled = self.server.peercreds_resolve_enabled\n\n        \n        \n        self.resolve_peer_creds = (\n            lru_cache(maxsize=1)(self.resolve_peer_creds)\n        )\n        self.get_peer_creds = (\n            lru_cache(maxsize=1)(self.get_peer_creds)\n        )", "docstring": "Initialize HTTPConnection instance.\n\nArgs:\nserver (HTTPServer): web server object receiving this request\nsocket (socket._socketobject): the raw socket object (usually\nTCP) for this connection\nmakefile (file): a fileobject class for reading from the socket", "source": "juraj-google-style"}
{"code": "def rot_vec_mul(r: torch.Tensor, t: torch.Tensor) -> torch.Tensor:\n    x, y, z = torch.unbind(t, dim=-1)\n    return torch.stack([r[..., 0, 0] * x + r[..., 0, 1] * y + r[..., 0, 2] * z, r[..., 1, 0] * x + r[..., 1, 1] * y + r[..., 1, 2] * z, r[..., 2, 0] * x + r[..., 2, 1] * y + r[..., 2, 2] * z], dim=-1)", "docstring": "Applies a rotation to a vector. Written out by hand to avoid transfer to avoid AMP downcasting.\n\nArgs:\nr: [*, 3, 3] rotation matrices\nt: [*, 3] coordinate tensors\nReturns:\n[*, 3] rotated coordinates", "source": "github-repos"}
{"code": "def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):\n    lr_lambda = partial(_get_linear_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps)\n    return LambdaLR(optimizer, lr_lambda, last_epoch)", "docstring": "Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after\na warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.\n\nArgs:\noptimizer ([`~torch.optim.Optimizer`]):\nThe optimizer for which to schedule the learning rate.\nnum_warmup_steps (`int`):\nThe number of steps for the warmup phase.\nnum_training_steps (`int`):\nThe total number of training steps.\nlast_epoch (`int`, *optional*, defaults to -1):\nThe index of the last epoch when resuming training.\n\nReturn:\n`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.", "source": "github-repos"}
{"code": "def render_unregistered(error=None):\n    return template(read_index_template(), registered=False, error=error, seeder_data=None, url_id=None)", "docstring": "Render template file for the unregistered user.\n\nArgs:\nerror (str, default None): Optional error message.\n\nReturns:\nstr: Template filled with data.", "source": "codesearchnet"}
{"code": "def func_dump(func):\n    if os.name == 'nt':\n        raw_code = marshal.dumps(func.__code__).replace(b'\\\\', b'/')\n        code = codecs.encode(raw_code, 'base64').decode('ascii')\n    else:\n        raw_code = marshal.dumps(func.__code__)\n        code = codecs.encode(raw_code, 'base64').decode('ascii')\n    defaults = func.__defaults__\n    if func.__closure__:\n        closure = tuple((c.cell_contents for c in func.__closure__))\n    else:\n        closure = None\n    return (code, defaults, closure)", "docstring": "Serializes a user defined function.\n\nArgs:\nfunc: the function to serialize.\n\nReturns:\nA tuple `(code, defaults, closure)`.", "source": "github-repos"}
{"code": "def BasenamePath(self, path):\n    if path.endswith(self.PATH_SEPARATOR):\n        path = path[:(- 1)]\n    (_, _, basename) = path.rpartition(self.PATH_SEPARATOR)\n    return basename", "docstring": "Determines the basename of the path.\n\nArgs:\npath (str): path.\n\nReturns:\nstr: basename of the path.", "source": "codesearchnet"}
{"code": "def output_shapes(self):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), self._element_spec)", "docstring": "Returns the shape of each component of an element of this iterator.\n\nReturns:\nA nested structure of `tf.TensorShape` objects corresponding to each\ncomponent of an element of this dataset.", "source": "github-repos"}
{"code": "def setup_data_stream(\n            self,\n            connection_factory: Callable[[tuple], Connection],\n            data_stream_factory: Callable[[Connection], DataStream]=DataStream) -> \\\n            DataStream:\n        \n        yield from self._control_stream.write_command(Command('TYPE', 'I'))\n        reply = yield from self._control_stream.read_reply()\n\n        self.raise_if_not_match('Binary mode', ReplyCodes.command_okay, reply)\n\n        address = yield from self.passive_mode()\n\n        connection = yield from connection_factory(address)\n\n        \n        \n        connection.reset()\n\n        yield from connection.connect()\n\n        data_stream = data_stream_factory(connection)\n\n        return data_stream", "docstring": "Create and setup a data stream.\n\nThis function will set up passive and binary mode and handle\nconnecting to the data connection.\n\nArgs:\nconnection_factory: A coroutine callback that returns a connection\ndata_stream_factory: A callback that returns a data stream\n\nCoroutine.\n\nReturns:\nDataStream", "source": "juraj-google-style"}
{"code": "def render(self, program: moderngl.Program, mode=None, vertices=-1, first=0, instances=1):\n        \n        vao = self.instance(program)\n\n        if mode is None:\n            mode = self.mode\n\n        vao.render(mode, vertices=vertices, first=first, instances=instances)", "docstring": "Render the VAO.\n\nArgs:\nprogram: The ``moderngl.Program``\n\nKeyword Args:\nmode: Override the draw mode (``TRIANGLES`` etc)\nvertices (int): The number of vertices to transform\nfirst (int): The index of the first vertex to start with\ninstances (int): The number of instances", "source": "juraj-google-style"}
{"code": "def getText(page, output='text'):\n    CheckParent(page)\n    dl = page.getDisplayList()\n    formats = ('text', 'html', 'json', 'xml', 'xhtml', 'dict', 'rawdict')\n    images = (0, 1, 1, 0, 1, 1, 1)\n    try:\n        f = formats.index(output.lower())\n    except:\n        f = 0\n    flags = (TEXT_PRESERVE_LIGATURES | TEXT_PRESERVE_WHITESPACE)\n    if images[f]:\n        flags |= TEXT_PRESERVE_IMAGES\n    tp = dl.getTextPage(flags)\n    t = tp._extractText(f)\n    del dl\n    del tp\n    return t", "docstring": "Extract a document page's text.\n\nArgs:\noutput: (str) text, html, dict, json, rawdict, xhtml or xml.\n\nReturns:\nthe output of TextPage methods extractText, extractHTML, extractDICT, extractJSON, extractRAWDICT, extractXHTML or etractXML respectively. Default and misspelling choice is \"text\".", "source": "codesearchnet"}
{"code": "def price(self, valuation_date, market, model=None, pricing_context=None, name=None):\n    model = model or rc.InterestRateModelType.LOGNORMAL_RATE\n    name = name or self._name + '_price'\n    with tf.name_scope(name):\n        valuation_date = dates.convert_to_date_tensor(valuation_date)\n        if model == rc.InterestRateModelType.LOGNORMAL_RATE:\n            caplet_prices = self._price_lognormal_rate(valuation_date, market, pricing_context)\n        else:\n            raise ValueError(f'Unsupported model {model}.')\n        return tf.math.segment_sum(caplet_prices, self._contract_index)", "docstring": "Returns the present value of the Cap/Floor on the valuation date.\n\nArgs:\nvaluation_date: A scalar `DateTensor` specifying the date on which\nvaluation is being desired.\nmarket: A namedtuple of type `InterestRateMarket` which contains the\nnecessary information for pricing the Cap/Floor.\nmodel: An optional input of type `InterestRateModelType` to specify which\nmodel to use for pricing.\nDefault value: `None` in which case `LOGNORMAL_RATE` model is used.\npricing_context: An optional input to provide additional parameters (such\nas model parameters) relevant for pricing.\nname: Python str. The name to give to the ops created by this function.\nDefault value: `None` which maps to `\"price\"`.\n\nReturns:\nA Rank 1 `Tensor` of real type containing the modeled price of each cap\n(or floor) based on the input market data.\n\nRaises:\nValueError: If an unsupported model is supplied to the function.", "source": "github-repos"}
{"code": "def _ReadBooleanDataTypeDefinition(\n      self, definitions_registry, definition_values, definition_name,\n      is_member=False):\n    \n    return self._ReadFixedSizeDataTypeDefinition(\n        definitions_registry, definition_values,\n        data_types.BooleanDefinition, definition_name,\n        self._SUPPORTED_ATTRIBUTES_BOOLEAN, is_member=is_member,\n        supported_size_values=(1, 2, 4))", "docstring": "Reads a boolean data type definition.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\ndefinition_values (dict[str, object]): definition values.\ndefinition_name (str): name of the definition.\nis_member (Optional[bool]): True if the data type definition is a member\ndata type definition.\n\nReturns:\nBooleanDataTypeDefinition: boolean data type definition.", "source": "juraj-google-style"}
{"code": "def list_inputs(self, args, screen_info=None):\n    _ = screen_info\n    parsed = self._arg_parsers['list_inputs'].parse_args(args)\n    output = self._list_inputs_or_outputs(parsed.recursive, parsed.node_name, parsed.depth, parsed.control, parsed.op_type, do_outputs=False)\n    node_name = debug_graphs.get_node_name(parsed.node_name)\n    _add_main_menu(output, node_name=node_name, enable_list_inputs=False)\n    return output", "docstring": "Command handler for inputs.\n\nShow inputs to a given node.\n\nArgs:\nargs: Command-line arguments, excluding the command prefix, as a list of\nstr.\nscreen_info: Optional dict input containing screen information such as\ncols.\n\nReturns:\nOutput text lines as a RichTextLines object.", "source": "github-repos"}
{"code": "def get_assigned_value(self, name):\n        \n        message_type = type(self)\n        try:\n            field = message_type.field_by_name(name)\n        except KeyError:\n            raise AttributeError('Message %s has no field %s' % (\n                message_type.__name__, name))\n        return self.__tags.get(field.number)", "docstring": "Get the assigned value of an attribute.\n\nGet the underlying value of an attribute. If value has not\nbeen set, will not return the default for the field.\n\nArgs:\nname: Name of attribute to get.\n\nReturns:\nValue of attribute, None if it has not been set.", "source": "juraj-google-style"}
{"code": "def image_data_format():\n    return _IMAGE_DATA_FORMAT", "docstring": "Returns the default image data format convention.\n\nReturns:\nA string, either `'channels_first'` or `'channels_last'`\n\nExample:\n>>> tf.keras.backend.image_data_format()\n'channels_last'", "source": "github-repos"}
{"code": "def FormatTime(fmt, stime = None):\n  \n  precondition.AssertType(fmt, str)\n  precondition.AssertOptionalType(stime, time.struct_time)\n\n  \n  \n  \n  \n  if stime is None:\n    strftime = time.strftime\n  else:\n    strftime = lambda fmt: time.strftime(fmt, stime)\n\n  if PY2:\n    return strftime(fmt.encode(\"ascii\")).decode(\"ascii\")\n  else:\n    return strftime(fmt)", "docstring": "A compatibility wrapper for the `strftime` function.\n\nIt is guaranteed to always take unicode string as an argument and return an\nunicode string as a result.\n\nArgs:\nfmt: A format string specifying formatting of the output.\nstime: A time representation as returned by `gmtime` or `localtime`.\n\nReturns:\nA human-readable representation of `stime`.", "source": "juraj-google-style"}
{"code": "def insert_bytes(fobj, size, offset, BUFFER_SIZE=(2 ** 16)):\n    if ((size < 0) or (offset < 0)):\n        raise ValueError\n    fobj.seek(0, 2)\n    filesize = fobj.tell()\n    movesize = (filesize - offset)\n    if (movesize < 0):\n        raise ValueError\n    resize_file(fobj, size, BUFFER_SIZE)\n    if (mmap is not None):\n        try:\n            mmap_move(fobj, (offset + size), offset, movesize)\n        except mmap.error:\n            fallback_move(fobj, (offset + size), offset, movesize, BUFFER_SIZE)\n    else:\n        fallback_move(fobj, (offset + size), offset, movesize, BUFFER_SIZE)", "docstring": "Insert size bytes of empty space starting at offset.\n\nfobj must be an open file object, open rb+ or\nequivalent. Mutagen tries to use mmap to resize the file, but\nfalls back to a significantly slower method if mmap fails.\n\nArgs:\nfobj (fileobj)\nsize (int): The amount of space to insert\noffset (int): The offset at which to insert the space\nRaises:\nIOError", "source": "codesearchnet"}
{"code": "def get_jobs(self, name=None):\n    if self.applicationResource:\n        return self._get_elements(self.jobs, 'jobs', Job, None, name)\n    else:\n        return []", "docstring": "Retrieves jobs running on this resource in its instance.\n\nArgs:\nname (str, optional): Only return jobs containing property **name** that matches `name`. `name` can be a\nregular expression. If `name` is not supplied, then all jobs are returned.\n\nReturns:\nlist(Job): A list of jobs matching the given `name`.\n\n.. note:: If ``applicationResource`` is `False` an empty list is returned.\n.. versionadded:: 1.9", "source": "codesearchnet"}
{"code": "def _load_variables_impl(config: Text, hosts: List[Tuple[int, Text]], variables: Dict[Text, Dict[Text, tf_variables.Variable]], table_config: tpu_embedding_v2_utils.TableConfig):\n\n    def select_fn(host_id):\n\n        def select_or_zeros(x):\n            if host_id >= len(x.variables):\n                return array_ops.zeros_like(x.variables[0])\n            return x.variables[host_id]\n        return select_or_zeros\n    for host_id, host in enumerate(hosts):\n        with ops.device(host):\n            host_variables = nest.map_structure(select_fn(host_id), variables)\n            for table in table_config:\n                table.optimizer._load()(table_name=table.name, num_shards=len(hosts), shard_id=host_id, config=config, **host_variables[table.name])\n                config = None", "docstring": "Load embedding tables to onto TPU for each table and host.\n\nArgs:\nconfig: A serialized TPUEmbeddingConfiguration proto.\nhosts: A list of CPU devices, on per host.\nvariables: A dictionary of dictionaries of TPUEmbeddingVariables. First key\nis the table name, second key is 'parameters' or the optimizer slot name.\ntable_config: A list of tf.tpu.experimental.embedding.TableConfig objects.", "source": "github-repos"}
{"code": "def CopyFromDateTimeString(self, time_string):\n    \n    date_time_values = self._CopyDateTimeFromString(time_string)\n\n    year = date_time_values.get('year', 0)\n    month = date_time_values.get('month', 0)\n    day_of_month = date_time_values.get('day_of_month', 0)\n    hours = date_time_values.get('hours', 0)\n    minutes = date_time_values.get('minutes', 0)\n    seconds = date_time_values.get('seconds', 0)\n    microseconds = date_time_values.get('microseconds', None)\n\n    if year > 9999:\n      raise ValueError('Unsupported year value: {0:d}.'.format(year))\n\n    timestamp = self._GetNumberOfSecondsFromElements(\n        year, month, day_of_month, hours, minutes, seconds)\n\n    timestamp = float(timestamp) / definitions.SECONDS_PER_DAY\n    timestamp += self._DELPHI_TO_POSIX_BASE\n    if microseconds is not None:\n      timestamp += float(microseconds) / definitions.MICROSECONDS_PER_DAY\n\n    self._normalized_timestamp = None\n    self._timestamp = timestamp\n    self.is_local_time = False", "docstring": "Copies a Delphi TDateTime timestamp from a string.\n\nArgs:\ntime_string (str): date and time value formatted as:\nYYYY-MM-DD hh:mm:ss.######[+-]##:##\n\nWhere # are numeric digits ranging from 0 to 9 and the seconds\nfraction can be either 3 or 6 digits. The time of day, seconds\nfraction and time zone offset are optional. The default time zone\nis UTC.\n\nRaises:\nValueError: if the time string is invalid or not supported.", "source": "juraj-google-style"}
{"code": "def _NewMatchSection(self, val):\n    section = {'criterion': val, 'config': {}}\n    self.matches.append(section)\n    self.section = section['config']\n    self.processor = self._ParseMatchGrp", "docstring": "Create a new configuration section for each match clause.\n\nEach match clause is added to the main config, and the criterion that will\ntrigger the match is recorded, as is the configuration.\n\nArgs:\nval: The value following the 'match' keyword.", "source": "codesearchnet"}
{"code": "def _GetTimestamps(self, olecf_item):\n    \n    if not olecf_item:\n      return None, None\n\n    try:\n      creation_time = olecf_item.get_creation_time_as_integer()\n    except OverflowError as exception:\n      logger.warning(\n          'Unable to read the creation time with error: {0!s}'.format(\n              exception))\n      creation_time = 0\n\n    try:\n      modification_time = olecf_item.get_modification_time_as_integer()\n    except OverflowError as exception:\n      logger.warning(\n          'Unable to read the modification time with error: {0!s}'.format(\n              exception))\n      modification_time = 0\n\n    \n    if not creation_time and not modification_time:\n      return None, None\n\n    \n    \n    if creation_time == 0xffffffffffffffff:\n      creation_time = 0\n\n    return creation_time, modification_time", "docstring": "Retrieves the timestamps from an OLECF item.\n\nArgs:\nolecf_item (pyolecf.item): OLECF item.\n\nReturns:\ntuple[int, int]: creation and modification FILETIME timestamp.", "source": "juraj-google-style"}
{"code": "def maybe_broadcast_structure(from_structure: Any, to_structure: Any) -> Any:\n  \n  flat_from = tf.nest.flatten(from_structure)\n  flat_to = tf.nest.flatten(to_structure)\n  if len(flat_from) == 1:\n    flat_from *= len(flat_to)\n  return tf.nest.pack_sequence_as(to_structure, flat_from)", "docstring": "Maybe broadcasts `from_structure` to `to_structure`.\n\nIf `from_structure` is a singleton, it is tiled to match the structure of\n`to_structure`. Note that the elements in `from_structure` are not copied if\nthis tiling occurs.\n\nArgs:\nfrom_structure: A structure.\nto_structure: A structure.\n\nReturns:\nnew_from_structure: Same structure as `to_structure`.", "source": "juraj-google-style"}
{"code": "def _on_disconnect(self, result):\n        \n\n        success, _, context = self._parse_return(result)\n\n        callback = context['callback']\n        connection_id = context['connection_id']\n        handle = context['handle']\n\n        callback(connection_id, self.id, success, \"No reason given\")\n        self._remove_connection(handle)", "docstring": "Callback called when disconnection command finishes\n\nArgs:\nresult (dict): result returned from diconnection command", "source": "juraj-google-style"}
{"code": "def get_vertex(self, key):\n    \n    if key in self.vertex_map:\n      return self.vertex_map[key]\n    vertex = self.new_vertex()\n    self.vertex_map[key] = vertex\n    return vertex", "docstring": "Returns or Creates a Vertex mapped by key.\n\nArgs:\nkey: A string reference for a vertex.  May refer to a new Vertex in which\ncase it will be created.\n\nReturns:\nA the Vertex mapped to by key.", "source": "juraj-google-style"}
{"code": "def tab(tab_name, element_list=None, section_list=None):\n    _tab = {'Type': 'Tab', 'Title': tab_name}\n    if (element_list is not None):\n        if isinstance(element_list, list):\n            _tab['Elements'] = element_list\n        else:\n            _tab['Elements'] = [element_list]\n    if (section_list is not None):\n        if isinstance(section_list, list):\n            _tab['Sections'] = section_list\n        elif ('Elements' not in section_list):\n            _tab['Elements'] = element_list\n        else:\n            _tab['Elements'].append(element_list)\n    return _tab", "docstring": "Returns a dictionary representing a new tab to display elements.\nThis can be thought of as a simple container for displaying multiple\ntypes of information.\n\nArgs:\ntab_name: The title to display\nelement_list: The list of elements to display. If a single element is\ngiven it will be wrapped in a list.\nsection_list: A list of sections to display.\n\nReturns:\nA dictionary with metadata specifying that it is to be rendered\nas a page containing multiple elements and/or tab.", "source": "codesearchnet"}
{"code": "def rotate_capture_handler_log(self, name):\n    for (sc_key, sc) in self._stream_capturers.iteritems():\n        for h in sc[0].capture_handlers:\n            if (h['name'] == name):\n                sc[0]._rotate_log(h)", "docstring": "Force a rotation of a handler's log file\n\nArgs:\nname:\nThe name of the handler who's log file should be rotated.", "source": "codesearchnet"}
{"code": "def _CreateBudget(client):\n  \n  budget_service = client.GetService('BudgetService', version='v201809')\n\n  \n  operation = {\n      'operand': {\n          'name': 'Interplanetary Cruise Budget \n          'deliveryMethod': 'STANDARD',\n          'amount': {\n              'microAmount': 500000\n          }\n      },\n      'operator': 'ADD'\n  }\n\n  budget = budget_service.mutate([operation])['value'][0]\n\n  print 'Budget with ID \"%d\" and name \"%s\" was created.' % (\n      budget['budgetId'], budget['name'])\n\n  return budget", "docstring": "Creates the budget.\n\nArgs:\nclient: an AdWordsClient instance.\n\nReturns:\na suds.sudsobject.Object representation of the created budget.", "source": "juraj-google-style"}
{"code": "def gene_panel(self, panel_id, version=None):\n    query = {'panel_name': panel_id}\n    if version:\n        LOG.info('Fetch gene panel {0}, version {1} from database'.format(panel_id, version))\n        query['version'] = version\n        return self.panel_collection.find_one(query)\n    else:\n        LOG.info('Fetching gene panels %s from database', panel_id)\n        res = self.panel_collection.find(query).sort('version', (- 1))\n        if (res.count() > 0):\n            return res[0]\n        else:\n            LOG.info('No gene panel found')\n            return None", "docstring": "Fetch a gene panel.\n\nIf no panel is sent return all panels\n\nArgs:\npanel_id (str): unique id for the panel\nversion (str): version of the panel. If 'None' latest version will be returned\n\nReturns:\ngene_panel: gene panel object", "source": "codesearchnet"}
{"code": "def equal_to_current(cls, json, fields_to_ignore=('id', 'change_date', 'changed_by')):\n    info = model_meta.get_field_info(cls)\n    for (field_name, relation_info) in info.relations.items():\n        if (relation_info.to_many and (field_name in json)):\n            json.pop(field_name)\n    new_instance = cls(**json)\n    key_field_args = tuple((getattr(new_instance, key) for key in cls.KEY_FIELDS))\n    current = cls.current(*key_field_args)\n    if (current.id is not None):\n        return current.fields_equal(new_instance, fields_to_ignore)\n    return False", "docstring": "Compares for equality this instance to a model instance constructed from the supplied JSON.\nThis will ignore any fields in `fields_to_ignore`.\n\nNote that this method cannot handle fields with many-to-many associations, as those can only\nbe set on a saved model instance (and saving the model instance will create a new entry).\nAll many-to-many field entries will be removed before the equality comparison is done.\n\nArgs:\njson: json representing an entry to compare\nfields_to_ignore: List of fields that should not be compared for equality. By default\nincludes `id`, `change_date`, and `changed_by`.\n\nReturns: True if the checked fields are all equivalent, else False", "source": "codesearchnet"}
{"code": "class PerceiverMultimodalPreprocessor(AbstractPreprocessor):\n\n    def __init__(self, modalities: Mapping[str, PreprocessorType], mask_probs: Optional[Mapping[str, float]]=None, min_padding_size: int=2):\n        super().__init__()\n        self.modalities = nn.ModuleDict(modalities)\n        self.min_padding_size = min_padding_size\n        self.mask_probs = mask_probs if mask_probs is not None else {}\n        self.padding = nn.ParameterDict({modality: nn.Parameter(torch.randn(1, self.num_channels - preprocessor.num_channels)) for modality, preprocessor in modalities.items()})\n        self.mask = nn.ParameterDict({modality: nn.Parameter(torch.randn(1, self.num_channels)) for modality, _ in self.mask_probs.items()})\n\n    @property\n    def num_channels(self) -> int:\n        max_channel_size = max((processor.num_channels for _, processor in self.modalities.items()))\n        common_channel_size = max_channel_size + self.min_padding_size\n        return common_channel_size\n\n    def forward(self, inputs: Mapping[str, torch.Tensor], pos: Optional[torch.Tensor]=None, network_input_is_1d: bool=True, interpolate_pos_encoding: bool=False) -> PreprocessorOutputType:\n        padded = {}\n        modality_sizes = {}\n        inputs_without_pos = {}\n        for modality, preprocessor in self.modalities.items():\n            output, _, inputs_without_pos[modality] = preprocessor(inputs[modality], pos=pos, network_input_is_1d=network_input_is_1d)\n            batch_size, num_samples, num_channels = output.shape\n            pos_enc = self.padding[modality].expand(batch_size, -1, -1)\n            padding = torch.broadcast_to(pos_enc, [batch_size, num_samples, self.num_channels - num_channels])\n            output_padded = torch.cat([output, padding], dim=2)\n            if modality in self.mask_probs:\n                mask_token = self.mask[modality].expand(batch_size, -1, -1)\n                mask_prob = self.mask_probs[modality]\n                mask = torch.bernoulli(torch.full([batch_size, num_samples], mask_prob))\n                mask = torch.unsqueeze(mask, dim=2).to(mask_token.device)\n                output_padded = (1 - mask) * output_padded + mask * mask_token\n            padded[modality] = output_padded\n            modality_sizes[modality] = output_padded.shape[1]\n        padded_ls = [padded[k] for k in sorted(padded.keys())]\n        final_inputs = torch.cat(padded_ls, dim=1)\n        return (final_inputs, modality_sizes, inputs_without_pos)", "docstring": "Multimodal preprocessing for Perceiver Encoder.\n\nInputs for each modality are preprocessed, then padded with trainable position embeddings to have the same number\nof channels.\n\nArgs:\nmodalities (`Mapping[str, PreprocessorType]`):\nDict mapping modality name to preprocessor.\nmask_probs (`Dict[str, float]`):\nDict mapping modality name to masking probability of that modality.\nmin_padding_size (`int`, *optional*, defaults to 2):\nThe minimum padding size for all modalities. The final output will have num_channels equal to the maximum\nchannels across all modalities plus min_padding_size.", "source": "github-repos"}
{"code": "def _project_observable(self, input_key: str, input_observable: Any, get_hist_args: Dict[(str, Any)]=None, projection_name_args: Dict[(str, Any)]=None, **kwargs) -> Hist:\n    if (get_hist_args is None):\n        get_hist_args = copy.deepcopy(kwargs)\n    if (projection_name_args is None):\n        projection_name_args = copy.deepcopy(kwargs)\n    get_hist_args.update({'observable': input_observable})\n    hist = self.get_hist(**get_hist_args)\n    projection_name_args.update(self.projection_information)\n    projection_name_args.update(kwargs)\n    projection_name_args.update({'input_key': input_key, 'input_observable': input_observable, 'input_hist': hist})\n    projection_name = self.projection_name(**projection_name_args)\n    logger.debug(f'hist: {hist}')\n    for axis in self.additional_axis_cuts:\n        logger.debug(f'Apply additional axis hist range: {axis.name}')\n        axis.apply_range_set(hist)\n    if (self.projection_dependent_cut_axes == []):\n        self.projection_dependent_cut_axes.append([])\n    duplicated_axes = [PDCA for PA in self.projection_axes for PDCA_group in self.projection_dependent_cut_axes for PDCA in PDCA_group if (PDCA.axis_type == PA.axis_type)]\n    if duplicated_axes:\n        raise ValueError(f'Axis {duplicated_axes} is in the projection axes and the projection dependent cut axes. This configuration is not allowed, as the range in the PDCA will be overwritten by the projection axes! Please revise your configuration.')\n    hists = []\n    for (i, axes) in enumerate(self.projection_dependent_cut_axes):\n        for axis in axes:\n            logger.debug(f'Apply projection dependent hist range: {axis.name}')\n            axis.apply_range_set(hist)\n        projected_hist = self.call_projection_function(hist)\n        projected_hist.SetName(f'{projection_name}_{i}')\n        hists.append(projected_hist)\n        self.cleanup_cuts(hist, cut_axes=axes)\n    self.cleanup_cuts(hist, cut_axes=self.additional_axis_cuts)\n    output_hist = hists[0]\n    for temp_hist in hists[1:]:\n        output_hist.Add(temp_hist)\n    output_hist.SetName(projection_name)\n    output_hist.SetDirectory(0)\n    return (output_hist, projection_name, projection_name_args)", "docstring": "Perform a projection for a single observable.\n\nNote:\nAll cuts on the original histograms will be reset when this function is completed.\n\nArgs:\ninput_key: Key to describe the input observable.\ninput_observable: Observable to project from.\nget_hist_args: Arguments to pass to ``get_hist(...)``. Made available so the args can be cached\nto avoid a ``deepcopy`` when looping. Default: None. In this case, they will be retrieved\nautomatically.\nprojection_name_args: Arguments to pass to ``projection_name(...)``. Made available so the args\ncan be cached to avoid a ``deepcopy`` when looping. Default: None. In this case, they will be\nretrieved automatically.\nkwargs: Additional named args to be passed to projection_name(...) and output_key_name(...).\nReturns:\nThe projected histogram.", "source": "codesearchnet"}
{"code": "def get(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs):\n    if (user_agent or user_agent_config_yaml or ('user_agent' in UserAgent._environment_variables(**kwargs))):\n        return UserAgent._create(user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs)\n    if cls.user_agent:\n        return cls.user_agent\n    else:\n        raise UserAgentError('You must either set the global user agent: UserAgent.set_global(...) or pass in user agent parameters!')", "docstring": "Get full user agent string from parameters if supplied falling back on global user agent if set.\n\nArgs:\nuser_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed.\nuser_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml.\nuser_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied.\n\nReturns:\nstr: Full user agent string", "source": "codesearchnet"}
{"code": "def bin_to_mac(bin, size=6):\n    \n    if len(bin) != size:\n        raise Exception(\"Invalid MAC address: %s\" % (bin))\n    return ':'.join([binascii.hexlify(o) for o in bin])", "docstring": "Convert 6 bytes into a MAC string.\n\nArgs:\nbin (str): hex string of lenth 6.\n\nReturns:\nstr: String representation of the MAC address in lower case.\n\nRaises:\nException: if ``len(bin)`` is not 6.", "source": "juraj-google-style"}
{"code": "def _get_prop_from_modelclass(modelclass, name):\n    if (name == '__key__'):\n        return modelclass._key\n    parts = name.split('.')\n    (part, more) = (parts[0], parts[1:])\n    prop = modelclass._properties.get(part)\n    if (prop is None):\n        if issubclass(modelclass, model.Expando):\n            prop = model.GenericProperty(part)\n        else:\n            raise TypeError(('Model %s has no property named %r' % (modelclass._get_kind(), part)))\n    while more:\n        part = more.pop(0)\n        if (not isinstance(prop, model.StructuredProperty)):\n            raise TypeError(('Model %s has no property named %r' % (modelclass._get_kind(), part)))\n        maybe = getattr(prop, part, None)\n        if (isinstance(maybe, model.Property) and (maybe._name == part)):\n            prop = maybe\n        else:\n            maybe = prop._modelclass._properties.get(part)\n            if (maybe is not None):\n                prop = getattr(prop, maybe._code_name)\n            elif (issubclass(prop._modelclass, model.Expando) and (not more)):\n                prop = model.GenericProperty()\n                prop._name = name\n            else:\n                raise KeyError(('Model %s has no property named %r' % (prop._modelclass._get_kind(), part)))\n    return prop", "docstring": "Helper for FQL parsing to turn a property name into a property object.\n\nArgs:\nmodelclass: The model class specified in the query.\nname: The property name.  This may contain dots which indicate\nsub-properties of structured properties.\n\nReturns:\nA Property object.\n\nRaises:\nKeyError if the property doesn't exist and the model clas doesn't\nderive from Expando.", "source": "codesearchnet"}
{"code": "def _retrieve_problem(self, id_):\n        \n        future = Future(self, id_, self.return_matrix, None)\n        self.client._poll(future)\n        return future", "docstring": "Resume polling for a problem previously submitted.\n\nArgs:\nid_: Identification of the query.\n\nReturns:\n:obj: `Future`", "source": "juraj-google-style"}
{"code": "def to_CAG_agraph(self):\n    CAG = self.to_CAG()\n    A = nx.nx_agraph.to_agraph(CAG)\n    A.graph_attr.update({'dpi': 227, 'fontsize': 20, 'fontname': 'Menlo'})\n    A.node_attr.update({'shape': 'rectangle', 'color': '\n    A.edge_attr.update({'color': '\n    return A", "docstring": "Returns a variable-only view of the GrFN in the form of an AGraph.\n\nReturns:\ntype: A CAG constructed via variable influence in the GrFN object.", "source": "codesearchnet"}
{"code": "def _blocking_poll(self, timeout=None):\n    if self._result_set:\n        return\n    retry_ = self._retry.with_deadline(timeout)\n    try:\n        retry_(self._done_or_raise)()\n    except exceptions.RetryError:\n        raise concurrent.futures.TimeoutError('Operation did not complete within the designated timeout.')", "docstring": "Poll and wait for the Future to be resolved.\n\nArgs:\ntimeout (int):\nHow long (in seconds) to wait for the operation to complete.\nIf None, wait indefinitely.", "source": "codesearchnet"}
{"code": "def insert_data(self, data, include_index=False, index_name=None):\n    max_rows_per_post = 500\n    post_interval = 0.05\n    if (not self.exists()):\n        raise Exception(('Table %s does not exist.' % self._full_name))\n    data_schema = _schema.Schema.from_data(data)\n    if isinstance(data, list):\n        if include_index:\n            if (not index_name):\n                index_name = 'Index'\n            data_schema._add_field(index_name, 'INTEGER')\n    table_schema = self.schema\n    for data_field in data_schema:\n        name = data_field.name\n        table_field = table_schema[name]\n        if (table_field is None):\n            raise Exception(('Table does not contain field %s' % name))\n        data_type = data_field.data_type\n        table_type = table_field.data_type\n        if (table_type != data_type):\n            raise Exception(('Field %s in data has type %s but in table has type %s' % (name, data_type, table_type)))\n    total_rows = len(data)\n    total_pushed = 0\n    job_id = uuid.uuid4().hex\n    rows = []\n    column_name_map = {}\n    is_dataframe = isinstance(data, pandas.DataFrame)\n    if is_dataframe:\n        gen = data.reset_index(drop=(not include_index)).iterrows()\n    else:\n        gen = enumerate(data)\n    for (index, row) in gen:\n        if is_dataframe:\n            row = row.to_dict()\n        elif include_index:\n            row[index_name] = index\n        rows.append({'json': self._encode_dict_as_row(row, column_name_map), 'insertId': (job_id + str(index))})\n        total_pushed += 1\n        if ((total_pushed == total_rows) or (len(rows) == max_rows_per_post)):\n            try:\n                response = self._api.tabledata_insert_all(self._name_parts, rows)\n            except Exception as e:\n                raise e\n            if ('insertErrors' in response):\n                raise Exception(('insertAll failed: %s' % response['insertErrors']))\n            time.sleep(post_interval)\n            rows = []\n    while True:\n        self._info = self._api.tables_get(self._name_parts)\n        if (('streamingBuffer' not in self._info) or ('estimatedRows' not in self._info['streamingBuffer']) or (int(self._info['streamingBuffer']['estimatedRows']) > 0)):\n            break\n        time.sleep(2)\n    return self", "docstring": "Insert the contents of a Pandas DataFrame or a list of dictionaries into the table.\n\nThe insertion will be performed using at most 500 rows per POST, and at most 10 POSTs per\nsecond, as BigQuery has some limits on streaming rates.\n\nArgs:\ndata: the DataFrame or list to insert.\ninclude_index: whether to include the DataFrame or list index as a column in the BQ table.\nindex_name: for a list, if include_index is True, this should be the name for the index.\nIf not specified, 'Index' will be used.\nReturns:\nThe table.\nRaises:\nException if the table doesn't exist, the table's schema differs from the data's schema,\nor the insert failed.", "source": "codesearchnet"}
{"code": "def create_file_writer_v2(logdir, max_queue=None, flush_millis=None, filename_suffix=None, name=None, experimental_trackable=False, experimental_mesh=None):\n    if logdir is None:\n        raise ValueError('Argument `logdir` cannot be None')\n    inside_function = ops.inside_function()\n    with ops.name_scope(name, 'create_file_writer') as scope, ops.device('cpu:0'):\n        with ops.init_scope():\n            if context.executing_eagerly():\n                _check_create_file_writer_args(inside_function, logdir=logdir, max_queue=max_queue, flush_millis=flush_millis, filename_suffix=filename_suffix)\n            logdir = ops.convert_to_tensor(logdir, dtype=dtypes.string)\n            if max_queue is None:\n                max_queue = constant_op.constant(10)\n            if flush_millis is None:\n                flush_millis = constant_op.constant(2 * 60 * 1000)\n            if filename_suffix is None:\n                filename_suffix = constant_op.constant('.v2')\n\n            def create_fn():\n                if context.executing_eagerly():\n                    shared_name = context.anonymous_name()\n                else:\n                    shared_name = ops.name_from_scope_name(scope)\n                return gen_summary_ops.summary_writer(shared_name=shared_name, name=name)\n            init_op_fn = functools.partial(gen_summary_ops.create_summary_file_writer, logdir=logdir, max_queue=max_queue, flush_millis=flush_millis, filename_suffix=filename_suffix)\n            if experimental_trackable:\n                return _TrackableResourceSummaryWriter(create_fn=create_fn, init_op_fn=init_op_fn, mesh=experimental_mesh)\n            else:\n                return _ResourceSummaryWriter(create_fn=create_fn, init_op_fn=init_op_fn, mesh=experimental_mesh)", "docstring": "Creates a summary file writer for the given log directory.\n\nArgs:\nlogdir: a string specifying the directory in which to write an event file.\nmax_queue: the largest number of summaries to keep in a queue; will flush\nonce the queue gets bigger than this. Defaults to 10.\nflush_millis: the largest interval between flushes. Defaults to 120,000.\nfilename_suffix: optional suffix for the event file name. Defaults to `.v2`.\nname: a name for the op that creates the writer.\nexperimental_trackable: a boolean that controls whether the returned writer\nwill be a `TrackableResource`, which makes it compatible with SavedModel\nwhen used as a `tf.Module` property.\nexperimental_mesh: a `tf.experimental.dtensor.Mesh` instance. When running\nwith DTensor, the mesh (experimental_mesh.host_mesh()) will be used for\nbringing all the DTensor logging from accelerator to CPU mesh.\n\nReturns:\nA SummaryWriter object.", "source": "github-repos"}
{"code": "def _extract_attrs(op, keys):\n    kwargs = {}\n    not_found = object()\n    for k in keys:\n        srcs = [getattr(op, k, not_found), getattr(op, '_' + k, not_found), getattr(op, 'parameters', {}).get(k, not_found)]\n        if any((v is not not_found for v in srcs)):\n            kwargs[k] = [v for v in srcs if v is not not_found][0]\n        else:\n            raise ValueError(f\"Could not determine an appropriate value for field `{k}` in object  `{op}`. Looked for \\n 1. an attr called `{k}`,\\n 2. an attr called `_{k}`,\\n 3. an entry in `op.parameters` with key '{k}'.\")\n        if k in op._composite_tensor_prefer_static_fields and kwargs[k] is not None:\n            if tensor_util.is_tensor(kwargs[k]):\n                static_val = tensor_util.constant_value(kwargs[k])\n                if static_val is not None:\n                    kwargs[k] = static_val\n        if isinstance(kwargs[k], (np.ndarray, np.generic)):\n            kwargs[k] = kwargs[k].tolist()\n    return kwargs", "docstring": "Extract constructor kwargs to reconstruct `op`.\n\nArgs:\nop: A `LinearOperator` instance.\nkeys: A Python `tuple` of strings indicating the names of the constructor\nkwargs to extract from `op`.\n\nReturns:\nkwargs: A Python `dict` of kwargs to `op`'s constructor, keyed by `keys`.", "source": "github-repos"}
{"code": "def _normalize_edge(self, edge: EDGE) -> EDGE:\n        \n\n        def lower(n: GridQubit, m: GridQubit) -> bool:\n            return n.row < m.row or (n.row == m.row and n.col < m.col)\n\n        n1, n2 = edge\n        return (n1, n2) if lower(n1, n2) else (n2, n1)", "docstring": "Gives unique representative of the edge.\n\nTwo edges are equivalent if they form an edge between the same nodes.\nThis method returns representative of this edge which can be compared\nusing equality operator later.\n\nArgs:\nedge: Edge to normalize.\n\nReturns:\nNormalized edge with lexicographically lower node on the first\nposition.", "source": "juraj-google-style"}
{"code": "def run(\n        self,\n        inputs: Dict[str, Union[float, Iterable]],\n        covers: Dict[str, Union[float, Iterable]],\n        torch_size: Optional[int] = None,\n    ) -> Union[float, Iterable]:\n        \n        \n        if len(covers) != len(self.cover_nodes):\n            raise ValueError(\"Incorrect number of cover values.\")\n\n        \n        for node_name, val in covers.items():\n            self.nodes[node_name][\"value\"] = val\n\n        return super().run(inputs, torch_size)", "docstring": "Executes the FIB over a particular set of inputs and returns the\nresult.\nArgs:\ninputs: Input set where keys are the names of input nodes in the\nGrFN and each key points to a set of input values (or just one).\nReturns:\nA set of outputs from executing the GrFN, one for every set of\ninputs.", "source": "juraj-google-style"}
{"code": "def add_work_item(self, work_item):\n        \n        with self._conn:\n            self._conn.execute(\n                , _work_item_to_row(work_item))", "docstring": "Add a WorkItems.\n\nArgs:\nwork_item: A WorkItem.", "source": "juraj-google-style"}
{"code": "def from_http_status(status_code, message, **kwargs):\n    \n    error_class = exception_class_for_http_status(status_code)\n    error = error_class(message, **kwargs)\n\n    if error.code is None:\n        error.code = status_code\n\n    return error", "docstring": "Create a :class:`GoogleAPICallError` from an HTTP status code.\n\nArgs:\nstatus_code (int): The HTTP status code.\nmessage (str): The exception message.\nkwargs: Additional arguments passed to the :class:`GoogleAPICallError`\nconstructor.\n\nReturns:\nGoogleAPICallError: An instance of the appropriate subclass of\n:class:`GoogleAPICallError`.", "source": "juraj-google-style"}
{"code": "def get_accounts(self, provider='aws'):\n    url = '{gate}/credentials'.format(gate=API_URL)\n    response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n    assert response.ok, 'Failed to get accounts: {0}'.format(response.text)\n    all_accounts = response.json()\n    self.log.debug('Accounts in Spinnaker:\\n%s', all_accounts)\n    filtered_accounts = []\n    for account in all_accounts:\n        if (account['type'] == provider):\n            filtered_accounts.append(account)\n    if (not filtered_accounts):\n        raise ForemastError('No Accounts matching {0}.'.format(provider))\n    return filtered_accounts", "docstring": "Get Accounts added to Spinnaker.\n\nArgs:\nprovider (str): What provider to find accounts for.\n\nReturns:\nlist: list of dicts of Spinnaker credentials matching _provider_.\n\nRaises:\nAssertionError: Failure getting accounts from Spinnaker.", "source": "codesearchnet"}
{"code": "def _send(self, **req_kwargs):\n    auth_token = self._auth.getAuthToken()\n    if (auth_token is None):\n        raise exception.LoginException('Not logged in')\n    req_kwargs.setdefault('headers', {'Authorization': ('OAuth ' + auth_token)})\n    return self._session.request(**req_kwargs)", "docstring": "Send an authenticated request to a Google API.\n\nArgs:\n**req_kwargs: Arbitrary keyword arguments to pass to Requests.\n\nReturn:\nrequests.Response: The raw response.\n\nRaises:\nLoginException: If :py:meth:`login` has not been called.", "source": "codesearchnet"}
{"code": "def predict(self, x_test):\n    if self.model:\n        lengths = map(len, x_test)\n        x_test = self.p.transform(x_test)\n        y_pred = self.model.predict(x_test)\n        y_pred = self.p.inverse_transform(y_pred, lengths)\n        return y_pred\n    else:\n        raise OSError('Could not find a model. Call load(dir_path).')", "docstring": "Returns the prediction of the model on the given test data.\n\nArgs:\nx_test : array-like, shape = (n_samples, sent_length)\nTest samples.\n\nReturns:\ny_pred : array-like, shape = (n_smaples, sent_length)\nPrediction labels for x.", "source": "codesearchnet"}
{"code": "def _save_work_results(self, run_stats, scores, num_processed_images, filename):\n    with open(filename, 'w') as f:\n        writer = csv.writer(f)\n        writer.writerow(['SubmissionID', 'ExternalSubmissionId', 'Score', 'CompletedBatches', 'BatchesWithError', 'ProcessedImages', 'MinEvalTime', 'MaxEvalTime', 'MedianEvalTime', 'MeanEvalTime', 'ErrorMsg'])\n        for submission_id in sorted(iterkeys(run_stats)):\n            stat = run_stats.get(submission_id, collections.defaultdict((lambda : float('NaN'))))\n            external_id = self.submissions.get_external_id(submission_id)\n            error_msg = ''\n            while ((not error_msg) and stat['error_messages']):\n                error_msg = stat['error_messages'].pop()\n                if error_msg.startswith('Cant copy adversarial batch locally'):\n                    error_msg = ''\n            writer.writerow([submission_id, external_id, scores.get(submission_id, None), stat['completed'], stat['num_errors'], num_processed_images.get(submission_id, None), stat['min_eval_time'], stat['max_eval_time'], stat['median_eval_time'], stat['mean_eval_time'], error_msg])", "docstring": "Saves statistics about each submission.\n\nSaved statistics include score; number of completed and failed batches;\nmin, max, average and median time needed to run one batch.\n\nArgs:\nrun_stats: dictionary with runtime statistics for submissions,\ncan be generated by WorkPiecesBase.compute_work_statistics\nscores: dictionary mapping submission ids to scores\nnum_processed_images: dictionary with number of successfully processed\nimages by each submission, one of the outputs of\nClassificationBatches.compute_classification_results\nfilename: output filename", "source": "codesearchnet"}
{"code": "def __init__(self, live_api_processor: live_model.LiveProcessor, chattiness: float=1.0, unsafe_string_list: list[str] | None=None):\n    self._processor = live_api_processor\n    self._chattiness = chattiness\n    self._commentator = CommentatorStateMachine()\n    self.ttfts = collections.deque(maxlen=50)\n    self._unsafe_string_list = unsafe_string_list\n    if unsafe_string_list is not None:\n        pattern = '|'.join((re.escape(s) for s in unsafe_string_list))\n        self._processor += text.MatchProcessor(pattern=pattern, substream_input='output_transcription', substream_output='unsafe_regex', remove_from_input_stream=False, flush_fn=lambda x: x.get_metadata('generation_complete') or x.get_metadata('interrupted') or x.get_metadata('interrupt_request') or x.get_metadata('turn_complete') or x.get_metadata('go_away'))", "docstring": "Initializes the processor.\n\nArgs:\nlive_api_processor: The live API processor to use.\nchattiness: Probability of triggering a comment when the model has\nfinished talking or every 3 seconds. Set to 0 to disable commenting.\nunsafe_string_list: The strings to use for unsafe content. If None, the\nprocessor will not block unsafe content. If set, the processor will\ninterrupt itself when it sees the string in the output.", "source": "github-repos"}
{"code": "def Create(path, password, generate_default_key=True):\n        \n        wallet = UserWallet(path=path, passwordKey=password, create=True)\n        if generate_default_key:\n            wallet.CreateKey()\n        return wallet", "docstring": "Create a new user wallet.\n\nArgs:\npath (str): A path indicating where to create or open the wallet e.g. \"/Wallets/mywallet\".\npassword (str): a 10 characters minimum password to secure the wallet with.\n\nReturns:\nUserWallet: a UserWallet instance.", "source": "juraj-google-style"}
{"code": "def CheckForHeaderGuard(filename, clean_lines, error):\n    raw_lines = clean_lines.lines_without_raw_strings\n    for i in raw_lines:\n        if Search('\n            return\n    for i in raw_lines:\n        if Search('^\\\\s*\n            return\n    cppvar = GetHeaderGuardCPPVariable(filename)\n    ifndef = ''\n    ifndef_linenum = 0\n    define = ''\n    endif = ''\n    endif_linenum = 0\n    for (linenum, line) in enumerate(raw_lines):\n        linesplit = line.split()\n        if (len(linesplit) >= 2):\n            if ((not ifndef) and (linesplit[0] == '\n                ifndef = linesplit[1]\n                ifndef_linenum = linenum\n            if ((not define) and (linesplit[0] == '\n                define = linesplit[1]\n        if line.startswith('\n            endif = line\n            endif_linenum = linenum\n    if ((not ifndef) or (not define) or (ifndef != define)):\n        error(filename, 0, 'build/header_guard', 5, ('No \n        return\n    if (ifndef != cppvar):\n        error_level = 0\n        if (ifndef != (cppvar + '_')):\n            error_level = 5\n        ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum, error)\n        error(filename, ifndef_linenum, 'build/header_guard', error_level, ('\n    ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum, error)\n    match = Match((('\n    if match:\n        if (match.group(1) == '_'):\n            error(filename, endif_linenum, 'build/header_guard', 0, ('\n        return\n    no_single_line_comments = True\n    for i in xrange(1, (len(raw_lines) - 1)):\n        line = raw_lines[i]\n        if Match('^(?:(?:\\\\\\'(?:\\\\.|[^\\\\\\'])*\\\\\\')|(?:\"(?:\\\\.|[^\"])*\")|[^\\\\\\'\"])*\n            no_single_line_comments = False\n            break\n    if no_single_line_comments:\n        match = Match((('\n        if match:\n            if (match.group(1) == '_'):\n                error(filename, endif_linenum, 'build/header_guard', 0, ('\n            return\n    error(filename, endif_linenum, 'build/header_guard', 5, ('", "docstring": "Checks that the file contains a header guard.\n\nLogs an error if no #ifndef header guard is present.  For other\nheaders, checks that the full pathname is used.\n\nArgs:\nfilename: The name of the C++ header file.\nclean_lines: A CleansedLines instance containing the file.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def indent_css(f, output):\n    line_count = get_line_count(f)\n    f = open(f, 'r+')\n    output = open(output, 'r+')\n    for line in range(line_count):\n        string = f.readline().rstrip()\n        if (len(string) > 0):\n            if (string[(- 1)] == ';'):\n                output.write((('    ' + string) + '\\n'))\n            else:\n                output.write((string + '\\n'))\n    output.close()\n    f.close()", "docstring": "Indentes css that has not been indented and saves it to a new file.\nA new file is created if the output destination does not already exist.\n\nArgs:\nf: string, path to file.\n\noutput: string, path/name of the output file (e.g. /directory/output.css).\nprint type(response.read())\n\nReturns:\nNone.", "source": "codesearchnet"}
{"code": "def compact(self, accumulator, *args, **kwargs):\n    return accumulator", "docstring": "Optionally returns a more compact representation of the accumulator.\n\nThis is called before an accumulator is sent across the wire, and can\nbe useful in cases where values are buffered or otherwise lazily\nkept unprocessed when added to the accumulator.  Should return an\nequivalent, though possibly modified, accumulator.\n\nBy default returns the accumulator unmodified.\n\nArgs:\naccumulator: the current accumulator\n*args: Additional arguments and side inputs.\n**kwargs: Additional arguments and side inputs.", "source": "github-repos"}
{"code": "def __init__(self, attention_logit_mod, name=\"attention\"):\n    \n    super(AttentiveRead, self).__init__(name=name)\n\n    self._attention_logit_mod = attention_logit_mod", "docstring": "Initialize AttentiveRead module.\n\nArgs:\nattention_logit_mod: Module that produces logit corresponding to a memory\nslot's compatibility. Must map a [batch_size * memory_size,\nmemory_word_size + query_word_size]-shaped Tensor to a\n[batch_size * memory_size, 1] shape Tensor.\nname: string. Name for module.", "source": "juraj-google-style"}
{"code": "def __init__(self, path):\n    \n    super(TaggingFile, self).__init__()\n    self._path = path", "docstring": "Initializes a tagging file.\n\nArgs:\npath (str): path to a file that contains one or more event tagging rules.", "source": "juraj-google-style"}
{"code": "def register_index(self, index):\n    \n\n    self._indexes[index._name] = index\n    self.create_index(index)\n    return index", "docstring": "Registers a given index:\n\n* Creates and opens an index for it (if it doesn't exist yet)\n* Sets some default values on it (unless they're already set)\n\nArgs:\nindex (PonyWhoosh.Index): An instance of PonyWhoosh.Index class", "source": "juraj-google-style"}
{"code": "def from_json(self, js, groups: Iterable[Group]):\n        \n        self.index = js[\"index\"]\n        self.groupIndex = js[\"groupIndex\"]\n        self.label = js[\"label\"]\n        self.functionalChannelType = FunctionalChannelType.from_str(\n            js[\"functionalChannelType\"], js[\"functionalChannelType\"]\n        )\n        self.groups = []\n        for id in js[\"groups\"]:\n            for g in groups:\n                if g.id == id:\n                    self.groups.append(g)\n                    break", "docstring": "this function will load the functional channel object\nfrom a json object and the given groups\n\nArgs:\njs(dict): the json object\ngroups(Iterable[Group]): the groups for referencing", "source": "juraj-google-style"}
{"code": "def iter_geno_marker(self, markers, return_index=False):\n    if (self._mode != 'r'):\n        raise UnsupportedOperation(\"not available in 'w' mode\")\n    if isinstance(markers, str):\n        markers = [markers]\n    if return_index:\n        for marker in markers:\n            (geno, seek) = self.get_geno_marker(marker, return_index=True)\n            (yield (marker, geno, seek))\n    else:\n        for marker in markers:\n            (yield (marker, self.get_geno_marker(marker)))", "docstring": "Iterates over genotypes for a list of markers.\n\nArgs:\nmarkers (list): The list of markers to iterate onto.\nreturn_index (bool): Wether to return the marker's index or not.\n\nReturns:\ntuple: The name of the marker as a string, and its genotypes as a\n:py:class:`numpy.ndarray` (additive format).", "source": "codesearchnet"}
{"code": "def assemble_schedules(schedules, qobj_id=None, qobj_header=None, run_config=None):\n    \n    qobj_config = QasmQobjConfig()\n    if run_config:\n        qobj_config = QasmQobjConfig(**run_config.to_dict())\n\n    \n    instruction_converter = PulseQobjConverter\n    instruction_converter = instruction_converter(PulseQobjInstruction, **run_config.to_dict())\n    lo_converter = LoConfigConverter(PulseQobjExperimentConfig, run_config.qubit_lo_freq,\n                                     run_config.meas_lo_freq, **run_config.to_dict())\n\n    \n    qobj_schedules = []\n    user_pulselib = set()\n    for idx, schedule in enumerate(schedules):\n        \n        qobj_instructions = []\n        \n        for shift, instruction in schedule.instructions:\n            \n            qobj_instructions.append(instruction_converter(shift, instruction))\n            if isinstance(instruction, PulseInstruction):\n                \n                user_pulselib.add(instruction.command)\n        \n        qobj_experiment_header = QobjExperimentHeader(\n            name=schedule.name or 'Experiment-%d' % idx\n        )\n\n        qobj_schedules.append({\n            'header': qobj_experiment_header,\n            'instructions': qobj_instructions\n        })\n\n    \n    run_config.pulse_library = [QobjPulseLibrary(name=pulse.name, samples=pulse.samples)\n                                for pulse in user_pulselib]\n\n    \n    experiments = []\n    if len(run_config.schedule_los) == 1:\n        lo_dict = run_config.schedule_los.pop()\n        \n        q_los = lo_converter.get_qubit_los(lo_dict)\n        if q_los:\n            run_config.qubit_lo_freq = q_los\n        m_los = lo_converter.get_meas_los(lo_dict)\n        if m_los:\n            run_config.meas_lo_freq = m_los\n\n    if run_config.schedule_los:\n        \n        if len(qobj_schedules) == 1:\n            \n            for lo_dict in run_config.schedule_los:\n                experiments.append(PulseQobjExperiment(\n                    instructions=qobj_schedules[0]['instructions'],\n                    experimentheader=qobj_schedules[0]['header'],\n                    experimentconfig=lo_converter(lo_dict)\n                ))\n        elif len(qobj_schedules) == len(run_config.schedule_los):\n            \n            for lo_dict, schedule in zip(run_config.schedule_los, qobj_schedules):\n                experiments.append(PulseQobjExperiment(\n                    instructions=schedule['instructions'],\n                    experimentheader=schedule['header'],\n                    experimentconfig=lo_converter(lo_dict)\n                ))\n        else:\n            raise QiskitError('Invalid LO setting is specified. '\n                              'The LO should be configured for each schedule, or '\n                              'single setup for all schedules (unique), or '\n                              'multiple setups for a single schedule (frequency sweep),'\n                              'or no LO configured at all.')\n    else:\n        \n        for schedule in qobj_schedules:\n            experiments.append(PulseQobjExperiment(\n                instructions=schedule['instructions'],\n                experimentheader=schedule['header'],\n            ))\n\n    qobj_config = PulseQobjConfig(**run_config.to_dict())\n\n    return PulseQobj(qobj_id=qobj_id,\n                     config=qobj_config,\n                     experiments=experiments,\n                     header=qobj_header)", "docstring": "Assembles a list of schedules into a qobj which can be run on the backend.\nArgs:\nschedules (list[Schedule]): schedules to assemble\nqobj_id (int): identifier for the generated qobj\nqobj_header (QobjHeader): header to pass to the results\nrun_config (RunConfig): configuration of the runtime environment\nReturns:\nPulseQobj: the Qobj to be run on the backends\nRaises:\nQiskitError: when invalid schedules or configs are provided", "source": "juraj-google-style"}
{"code": "def _ExtractContentSettingsExceptions(self, exceptions_dict, parser_mediator):\n    \n    for permission in exceptions_dict:\n      if permission not in self._EXCEPTIONS_KEYS:\n        continue\n\n      exception_dict = exceptions_dict.get(permission, {})\n      for urls, url_dict in exception_dict.items():\n        last_used = url_dict.get('last_used', None)\n        if not last_used:\n          continue\n\n        \n        \n        \n        primary_url, secondary_url = urls.split(',')\n\n        event_data = ChromeContentSettingsExceptionsEventData()\n        event_data.permission = permission\n        event_data.primary_url = primary_url\n        event_data.secondary_url = secondary_url\n\n        timestamp = int(last_used * 1000000)\n        date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n            timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts site specific events.\n\nArgs:\nexceptions_dict (dict): Permission exceptions data from Preferences file.\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.", "source": "juraj-google-style"}
{"code": "def visualize_qualitative_analysis(inputs, model, samples=1, batch_size=3, length=8):\n    average = (lambda dist: tf.reduce_mean(input_tensor=dist.mean(), axis=0))\n    with tf.compat.v1.name_scope('val_reconstruction'):\n        reconstruct = functools.partial(model.reconstruct, inputs=inputs, samples=samples)\n        visualize_reconstruction(inputs, average(reconstruct()))\n        visualize_reconstruction(inputs, average(reconstruct(sample_static=True)), name='static_prior')\n        visualize_reconstruction(inputs, average(reconstruct(sample_dynamic=True)), name='dynamic_prior')\n        visualize_reconstruction(inputs, average(reconstruct(swap_static=True)), name='swap_static')\n        visualize_reconstruction(inputs, average(reconstruct(swap_dynamic=True)), name='swap_dynamic')\n    with tf.compat.v1.name_scope('generation'):\n        generate = functools.partial(model.generate, batch_size=batch_size, length=length, samples=samples)\n        image_summary(average(generate(fix_static=True)), 'fix_static')\n        image_summary(average(generate(fix_dynamic=True)), 'fix_dynamic')", "docstring": "Visualizes a qualitative analysis of a given model.\n\nArgs:\ninputs: A tensor of the original inputs, of shape [batch, timesteps,\nh, w, c].\nmodel: A DisentangledSequentialVAE model.\nsamples: Number of samples to draw from the latent distributions.\nbatch_size: Number of sequences to generate.\nlength: Number of timesteps to generate for each sequence.", "source": "codesearchnet"}
{"code": "def participants(self, **kwargs):\n    path = ('%s/%s/participants' % (self.manager.path, self.get_id()))\n    return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "List the participants.\n\nArgs:\nall (bool): If True, return all the items, without pagination\nper_page (int): Number of items to retrieve per request\npage (int): ID of the page to return (starts with page 1)\nas_list (bool): If set to False and no pagination option is\ndefined, return a generator instead of a list\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabListError: If the list could not be retrieved\n\nReturns:\nRESTObjectList: The list of participants", "source": "codesearchnet"}
{"code": "def get_stdout(self, workflow_id, task_id):\n        \n        url = '%(wf_url)s/%(wf_id)s/tasks/%(task_id)s/stdout' % {\n            'wf_url': self.workflows_url, 'wf_id': workflow_id, 'task_id': task_id\n        }\n        r = self.gbdx_connection.get(url)\n        r.raise_for_status()\n\n        return r.text", "docstring": "Get stdout for a particular task.\n\nArgs:\nworkflow_id (str): Workflow id.\ntask_id (str): Task id.\n\nReturns:\nStdout of the task (string).", "source": "juraj-google-style"}
{"code": "def _query_response_to_snapshot(response_pb, collection, expected_prefix):\n    if (not response_pb.HasField('document')):\n        return None\n    document_id = _helpers.get_doc_id(response_pb.document, expected_prefix)\n    reference = collection.document(document_id)\n    data = _helpers.decode_dict(response_pb.document.fields, collection._client)\n    snapshot = document.DocumentSnapshot(reference, data, exists=True, read_time=response_pb.read_time, create_time=response_pb.document.create_time, update_time=response_pb.document.update_time)\n    return snapshot", "docstring": "Parse a query response protobuf to a document snapshot.\n\nArgs:\nresponse_pb (google.cloud.proto.firestore.v1beta1.\\\nfirestore_pb2.RunQueryResponse): A\ncollection (~.firestore_v1beta1.collection.CollectionReference): A\nreference to the collection that initiated the query.\nexpected_prefix (str): The expected prefix for fully-qualified\ndocument names returned in the query results. This can be computed\ndirectly from ``collection`` via :meth:`_parent_info`.\n\nReturns:\nOptional[~.firestore.document.DocumentSnapshot]: A\nsnapshot of the data returned in the query. If ``response_pb.document``\nis not set, the snapshot will be :data:`None`.", "source": "codesearchnet"}
{"code": "def group_device_names(devices, group_size):\n    num_devices = len(devices)\n    if (group_size > num_devices):\n        raise ValueError(('only %d devices, but group_size=%d' % (num_devices, group_size)))\n    num_groups = ((num_devices \n    groups = [[] for i in range(num_groups)]\n    for i in range(0, (num_groups * group_size)):\n        groups[(i % num_groups)].append(devices[(i % num_devices)])\n    return groups", "docstring": "Group device names into groups of group_size.\n\nArgs:\ndevices: list of strings naming devices.\ngroup_size: int >= 1\n\nReturns:\nlist of lists of devices, where each inner list is group_size long,\nand each device appears at least once in an inner list.  If\nlen(devices) % group_size = 0 then each device will appear\nexactly once.\n\nRaises:\nValueError: group_size > len(devices)", "source": "codesearchnet"}
{"code": "def Normal(cls, mean: 'TensorFluent', variance: 'TensorFluent', batch_size: Optional[int]=None) -> Tuple[(Distribution, 'TensorFluent')]:\n    if (mean.scope != variance.scope):\n        raise ValueError('Normal distribution: parameters must have same scope!')\n    loc = mean.tensor\n    scale = tf.sqrt(variance.tensor)\n    dist = tf.distributions.Normal(loc, scale)\n    batch = (mean.batch or variance.batch)\n    if ((not batch) and (batch_size is not None)):\n        t = dist.sample(batch_size)\n        batch = True\n    else:\n        t = dist.sample()\n    scope = mean.scope.as_list()\n    return (dist, TensorFluent(t, scope, batch=batch))", "docstring": "Returns a TensorFluent for the Normal sampling op with given mean and variance.\n\nArgs:\nmean: The mean parameter of the Normal distribution.\nvariance: The variance parameter of the Normal distribution.\nbatch_size: The size of the batch (optional).\n\nReturns:\nThe Normal distribution and a TensorFluent sample drawn from the distribution.\n\nRaises:\nValueError: If parameters do not have the same scope.", "source": "codesearchnet"}
{"code": "def __init__(self, format_str=None, color=None, attrs=None):\n    self._format_str = format_str\n    self._color = color\n    self._attrs = attrs or []", "docstring": "Defines a set of attributes for a piece of text.\n\nArgs:\nformat_str: (str), string that will be used to format the text\nwith. For example '[{}]', to enclose text in brackets.\ncolor: (Colors), the color the text should be formatted with.\nattrs: (Attrs), the attributes to apply to text.", "source": "github-repos"}
{"code": "def getAll(self, event_name):\n    raw_events = self._callEventGetAll(self._id, event_name)\n    return [snippet_event.from_dict(msg) for msg in raw_events]", "docstring": "Gets all the events of a certain name that have been received so\nfar. This is a non-blocking call.\n\nArgs:\ncallback_id: The id of the callback.\nevent_name: string, the name of the event to get.\n\nReturns:\nA list of SnippetEvent, each representing an event from the Java\nside.", "source": "github-repos"}
{"code": "def make_list_of_audio(audio: Union[list[AudioInput], AudioInput]) -> AudioInput:\n    if isinstance(audio, (list, tuple)) and is_valid_list_of_audio(audio):\n        return audio\n    if is_valid_audio(audio):\n        return [audio]\n    raise ValueError('Invalid input type. Must be a single audio or a list of audio')", "docstring": "Ensure that the output is a list of audio.\nArgs:\naudio (`Union[List[AudioInput], AudioInput]`):\nThe input audio.\nReturns:\nlist: A list of audio.", "source": "github-repos"}
{"code": "def datasets_delete(self, dataset_name, delete_contents):\n    \n    url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)\n    args = {}\n    if delete_contents:\n      args['deleteContents'] = True\n    return datalab.utils.Http.request(url, method='DELETE', args=args,\n                                      credentials=self._credentials, raw_response=True)", "docstring": "Issues a request to delete a dataset.\n\nArgs:\ndataset_name: the name of the dataset to delete.\ndelete_contents: if True, any tables in the dataset will be deleted. If False and the\ndataset is non-empty an exception will be raised.\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"}
{"code": "def _CheckSignature(self, value_data):\n    signature_map = self._GetDataTypeMap('uint32le')\n    try:\n        signature = self._ReadStructureFromByteStream(value_data, 0, signature_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse signature value with error: {0!s}'.format(exception))\n    format_type = self._HEADER_SIGNATURES.get(signature, None)\n    if (format_type == self._FORMAT_TYPE_2003):\n        return self._FORMAT_TYPE_2003\n    if (format_type == self._FORMAT_TYPE_8):\n        cached_entry_signature = value_data[signature:(signature + 4)]\n        if (cached_entry_signature in (self._CACHED_ENTRY_SIGNATURE_8_0, self._CACHED_ENTRY_SIGNATURE_8_1)):\n            return self._FORMAT_TYPE_8\n    elif (format_type == self._FORMAT_TYPE_10):\n        cached_entry_signature = value_data[signature:(signature + 4)]\n        if (cached_entry_signature == self._CACHED_ENTRY_SIGNATURE_8_1):\n            return self._FORMAT_TYPE_10\n    return format_type", "docstring": "Parses and validates the signature.\n\nArgs:\nvalue_data (bytes): value data.\n\nReturns:\nint: format type or None if format could not be determined.\n\nRaises:\nParseError: if the value data could not be parsed.", "source": "codesearchnet"}
{"code": "def return_main_dataset(self):\n    if (not self.main_dataset['source']):\n        raise exceptions.UserError('Source is empty')\n    extraction_code = self.main_dataset['source']\n    extraction_function = functions.import_object_from_string_code(extraction_code, 'extract_main_dataset')\n    try:\n        (X, y) = extraction_function()\n    except Exception as e:\n        raise exceptions.UserError('User code exception', exception_message=str(e))\n    (X, y) = (np.array(X), np.array(y))\n    return (X, y)", "docstring": "Returns main data set from self\n\nReturns:\nX (numpy.ndarray): Features\n\ny (numpy.ndarray): Labels", "source": "codesearchnet"}
{"code": "def set_date_range(self, start=None, end=None):\n        \n        start = self._start if start is None else pd.to_datetime(start)\n        end = self._end if end is None else pd.to_datetime(end)\n        self._update(self._prices.loc[start:end])", "docstring": "Update date range of stats, charts, etc. If None then\nthe original date range is used. So to reset to the original\nrange, just call with no args.\n\nArgs:\n* start (date): start date\n* end (end): end date", "source": "juraj-google-style"}
{"code": "def _PrintWarningsDetails(self, storage):\n    if (not storage.HasWarnings()):\n        self._output_writer.Write('No warnings stored.\\n\\n')\n        return\n    for (index, warning) in enumerate(storage.GetWarnings()):\n        title = 'Warning: {0:d}'.format(index)\n        table_view = views.ViewsFactory.GetTableView(self._views_format_type, title=title)\n        table_view.AddRow(['Message', warning.message])\n        table_view.AddRow(['Parser chain', warning.parser_chain])\n        path_specification = warning.path_spec.comparable\n        for (path_index, line) in enumerate(path_specification.split('\\n')):\n            if (not line):\n                continue\n            if (path_index == 0):\n                table_view.AddRow(['Path specification', line])\n            else:\n                table_view.AddRow(['', line])\n        table_view.Write(self._output_writer)", "docstring": "Prints the details of the warnings.\n\nArgs:\nstorage (BaseStore): storage.", "source": "codesearchnet"}
{"code": "def read_index(fn):\n    \n    index = None\n    with open(fn, \"rb\") as i_file:\n        if i_file.read(len(_CHECK_STRING)) != _CHECK_STRING:\n            raise ValueError(\"{}: not a valid index file\".format(fn))\n\n        index = pd.read_csv(io.StringIO(\n            zlib.decompress(i_file.read()).decode(encoding=\"utf-8\"),\n        ))\n\n    return index", "docstring": "Reads index from file.\n\nArgs:\nfn (str): the name of the file containing the index.\n\nReturns:\npandas.DataFrame: the index of the file.\n\nBefore reading the index, we check the first couple of bytes to see if it\nis a valid index file.", "source": "juraj-google-style"}
{"code": "def __params_order_descriptor(self, message_type, path, is_params_class=False):\n    \n    path_params = []\n    query_params = []\n    path_parameter_dict = self.__get_path_parameters(path)\n\n    for field in sorted(message_type.all_fields(), key=lambda f: f.number):\n      matched_path_parameters = path_parameter_dict.get(field.name, [])\n      if not isinstance(field, messages.MessageField):\n        name = field.name\n        if name in matched_path_parameters:\n          path_params.append(name)\n        elif is_params_class and field.required:\n          query_params.append(name)\n      else:\n        for subfield_list in self.__field_to_subfields(field):\n          name = '.'.join(subfield.name for subfield in subfield_list)\n          if name in matched_path_parameters:\n            path_params.append(name)\n          elif is_params_class and field.required:\n            query_params.append(name)\n\n    return path_params + sorted(query_params)", "docstring": "Describe the order of path parameters.\n\nArgs:\nmessage_type: messages.Message class, Message with parameters to describe.\npath: string, HTTP path to method.\nis_params_class: boolean, Whether the message represents URL parameters.\n\nReturns:\nDescriptor list for the parameter order.", "source": "juraj-google-style"}
{"code": "def readfrom(fpath, aslines=False, errors='replace', verbose=None):\n    \n    if verbose:\n        print('Reading text file: %r ' % (fpath,))\n    if not exists(fpath):\n        raise IOError('File %r does not exist' % (fpath,))\n    with open(fpath, 'rb') as file:\n        if aslines:\n            text = [line.decode('utf8', errors=errors)\n                    for line in file.readlines()]\n            if sys.platform.startswith('win32'):  \n                \n                text = [\n                    line[:-2] + '\\n' if line.endswith('\\r\\n') else line\n                    for line in text\n                ]\n        else:\n            text = file.read().decode('utf8', errors=errors)\n    return text", "docstring": "Reads (utf8) text from a file.\n\nArgs:\nfpath (PathLike): file path\naslines (bool): if True returns list of lines\nverbose (bool): verbosity flag\n\nReturns:\nstr: text from fpath (this is unicode)", "source": "juraj-google-style"}
{"code": "def Deserialize(self, reader):\n        \n        self.AssetId = reader.ReadUInt256()\n        self.Value = reader.ReadFixed8()\n        self.ScriptHash = reader.ReadUInt160()\n        if self.ScriptHash is None:\n            raise Exception(\"Script hash is required from deserialize!!!!!!!!\")", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def omega(self, structure, n, u):\n        \n        l0 = np.dot(np.sum(structure.lattice.matrix, axis=0), n)\n        l0 *= 1e-10 \n        weight = float(structure.composition.weight) * 1.66054e-27 \n        vol = structure.volume * 1e-30 \n        vel = (1e9 * self[0].einsum_sequence([n, u, n, u])\n               / (weight / vol)) ** 0.5\n        return vel / l0", "docstring": "Finds directional frequency contribution to the heat\ncapacity from direction and polarization\n\nArgs:\nstructure (Structure): Structure to be used in directional heat\ncapacity determination\nn (3x1 array-like): direction for Cv determination\nu (3x1 array-like): polarization direction, note that\nno attempt for verification of eigenvectors is made", "source": "juraj-google-style"}
{"code": "def get_slot_names(self, *args, **kwargs):\n    return self._opt.get_slot_names(*args, **kwargs)", "docstring": "Return a list of the names of slots created by the `Optimizer`.\n\nThis simply wraps the get_slot_names() from the actual optimizer.\n\nArgs:\n*args: Arguments for get_slot().\n**kwargs: Keyword arguments for get_slot().\n\nReturns:\nA list of strings.", "source": "github-repos"}
{"code": "def array(self, size_chunk, start, bytesize):\n        \n\n        with open(self.img, 'rb') as f1:\n            f1.seek(self.start_byte + start * self.bytesize)\n            data = f1.read(size_chunk * self.bytesize)\n            Z = np.fromstring(data, dtype=self.dtype, count=size_chunk)\n            if self.grid == 'LOLA':\n                return Z * float(self.SCALING_FACTOR)\n            else:\n                return Z", "docstring": "Read part of the binary file\n\nArgs:\nsize_chunk (int) : Size of the chunk to read\nstart (int): Starting byte\nbytesize (int): Ending byte\n\nReturns:\n(np.array): array of the corresponding values", "source": "juraj-google-style"}
{"code": "def gramschmidt(vin, uin):\n    vin_uin = np.inner(vin, uin)\n    uin_uin = np.inner(uin, uin)\n    if (uin_uin <= 0.0):\n        raise ValueError('Zero or negative inner product!')\n    return (vin - ((vin_uin / uin_uin) * uin))", "docstring": "Returns that part of the first input vector\nthat is orthogonal to the second input vector.\nThe output vector is not normalized.\n\nArgs:\nvin (numpy array):\nfirst input vector\nuin (numpy array):\nsecond input vector", "source": "codesearchnet"}
{"code": "def build_markdown_table(headers, rows, row_keys=None):\n    \n    row_maxes = _find_row_maxes(headers, rows)\n    row_keys = row_keys or [key for key, value in headers.items()]\n    table = [\n        _build_row(headers, row_maxes, row_keys),\n        _build_separator(row_maxes, row_keys)\n    ]\n\n    for row in rows:\n        table.append(_build_row(row, row_maxes, row_keys))\n    return '\\n'.join(table) + '\\n'", "docstring": "Build a lined up markdown table.\n\nArgs:\nheaders (dict): A key -> value pairing fo the headers.\nrows (list): List of dictionaries that contain all the keys listed in\nthe headers.\nrow_keys (list): A sorted list of keys to display\n\nReturns:\nA valid Markdown Table as a string.", "source": "juraj-google-style"}
{"code": "def _prune_traverse_using_omitted_locations(match_traversal, omitted_locations, complex_optional_roots, location_to_optional_roots):\n    new_match_traversal = []\n    for step in match_traversal:\n        new_step = step\n        if (isinstance(step.root_block, Traverse) and step.root_block.optional):\n            current_location = step.as_block.location\n            optional_root_locations_stack = location_to_optional_roots.get(current_location, None)\n            optional_root_location = optional_root_locations_stack[(- 1)]\n            if (optional_root_location is None):\n                raise AssertionError(u'Found optional Traverse location {} that was not present in location_to_optional_roots dict: {}'.format(current_location, location_to_optional_roots))\n            elif (optional_root_location in omitted_locations):\n                field_name = step.root_block.get_field_name()\n                new_predicate = filter_edge_field_non_existence(LocalField(field_name))\n                old_filter = new_match_traversal[(- 1)].where_block\n                if (old_filter is not None):\n                    new_predicate = BinaryComposition(u'&&', old_filter.predicate, new_predicate)\n                new_match_step = new_match_traversal[(- 1)]._replace(where_block=Filter(new_predicate))\n                new_match_traversal[(- 1)] = new_match_step\n                new_step = None\n            elif (optional_root_location in complex_optional_roots):\n                new_root_block = Traverse(step.root_block.direction, step.root_block.edge_name)\n                new_step = step._replace(root_block=new_root_block)\n            else:\n                pass\n        if (new_step is None):\n            break\n        else:\n            new_match_traversal.append(new_step)\n    return new_match_traversal", "docstring": "Return a prefix of the given traverse, excluding any blocks after an omitted optional.\n\nGiven a subset (omitted_locations) of complex_optional_roots, return a new match traversal\nremoving all MatchStep objects that are within any omitted location.\n\nArgs:\nmatch_traversal: list of MatchStep objects to be pruned\nomitted_locations: subset of complex_optional_roots to be omitted\ncomplex_optional_roots: list of all @optional locations (location immmediately preceding\nan @optional traverse) that expand vertex fields\nlocation_to_optional_roots: dict mapping from location -> optional_roots where location is\nwithin some number of @optionals and optional_roots is a list\nof optional root locations preceding the successive @optional\nscopes within which the location resides\n\nReturns:\nlist of MatchStep objects as a copy of the given match traversal\nwith all steps within any omitted location removed.", "source": "codesearchnet"}
{"code": "def _time_to_datetime(value):\n  \n  if not isinstance(value, datetime.time):\n    raise TypeError('Cannot convert to datetime expected time value; '\n                    'received %s' % value)\n  return datetime.datetime(1970, 1, 1,\n                           value.hour, value.minute, value.second,\n                           value.microsecond)", "docstring": "Convert a time to a datetime for Cloud Datastore storage.\n\nArgs:\nvalue: A datetime.time object.\n\nReturns:\nA datetime object with date set to 1970-01-01.", "source": "juraj-google-style"}
{"code": "def _StructPackDecoder(wire_type, format):\n  \n\n  value_size = struct.calcsize(format)\n  local_unpack = struct.unpack\n\n  \n  \n\n  \n  \n  \n\n  def InnerDecode(buffer, pos):\n    new_pos = pos + value_size\n    result = local_unpack(format, buffer[pos:new_pos])[0]\n    return (result, new_pos)\n  return _SimpleDecoder(wire_type, InnerDecode)", "docstring": "Return a constructor for a decoder for a fixed-width field.\n\nArgs:\nwire_type:  The field's wire type.\nformat:  The format string to pass to struct.unpack().", "source": "juraj-google-style"}
{"code": "def __init__(self, text_encoder_config=None, target_language=None, **kwargs):\n    \n    \n    if target_language not in _TARGET_LANGUAGES:\n      raise ValueError(\"Invalid target language: %s \" % target_language)\n\n    \n    encoder_name = (\n        text_encoder_config.name if text_encoder_config else \"plain_text\")\n    name = \"en%s_%s\" % (target_language, encoder_name)\n\n    description = (\"Translation dataset from English to %s, uses encoder %s.\"\n                  ) % (target_language, encoder_name)\n    super(ParaCrawlConfig, self).__init__(\n        name=name, description=description, **kwargs)\n\n    \n    self.text_encoder_config = (\n        text_encoder_config or tfds.features.text.TextEncoderConfig())\n    self.target_language = target_language\n    self.data_url = _BASE_DATA_URL_FORMAT_STR.format(\n        target_lang=target_language)", "docstring": "BuilderConfig for ParaCrawl.\n\nArgs:\ntext_encoder_config: `tfds.features.text.TextEncoderConfig`, configuration\nfor the `tfds.features.text.TextEncoder` used for the features feature.\ntarget_language: Target language that will be used to translate to from\nEnglish which is always the source language. It has to contain 2-letter\ncoded strings. For example: \"se\", \"hu\".\n**kwargs: Keyword arguments forwarded to super.", "source": "juraj-google-style"}
{"code": "def VisitNamedType(self, t):\n    if t.name in self._module_map:\n        if self._alias_name and '.' in self._alias_name:\n            return pytd.Module(name=self._alias_name, module_name=t.name)\n        else:\n            return t\n    module_name, dot, name = t.name.rpartition('.')\n    if not dot or self._IsLocalName(module_name):\n        return t\n    if module_name in self._module_alias_map:\n        module_name = self._module_alias_map[module_name]\n    try:\n        module, cls_prefix = self._LookupModuleRecursive(module_name)\n    except KeyError:\n        if self._unit and f'{self.name}.{module_name}' in self._unit:\n            return t\n        raise\n    module_name = module.name\n    if module_name == self.name:\n        return t\n    if cls_prefix:\n        try:\n            maybe_alias = pytd.LookupItemRecursive(module, cls_prefix[:-1])\n        except KeyError:\n            pass\n        else:\n            if isinstance(maybe_alias, pytd.Alias) and isinstance(maybe_alias.type, pytd.Module):\n                if maybe_alias.type.module_name not in self._module_map:\n                    raise KeyError(f'{t.name} refers to unknown module {maybe_alias.name}')\n                module = self._module_map[maybe_alias.type.module_name]\n                cls_prefix = ''\n    name = cls_prefix + name\n    try:\n        if name == '*':\n            self._star_imports.add(module_name)\n            item = t\n        else:\n            item = pytd.LookupItemRecursive(module, name)\n    except KeyError as e:\n        item = self._ResolveUsingGetattr(module_name, module)\n        if item is None:\n            item = self._ResolveUsingStarImport(module, name)\n            if item is None:\n                raise KeyError(f'No {name} in module {module_name}') from e\n    if isinstance(item, pytd.Alias):\n        lookup_local = LookupLocalTypes()\n        lookup_local.unit = module\n        new_item = item.Visit(lookup_local)\n        if lookup_local.local_names:\n            item = new_item\n    if not self._in_generic_type and isinstance(item, pytd.Alias):\n        item = MaybeSubstituteParameters(item.type) or item\n    if isinstance(item, pytd.Constant) and item.name == 'typing_extensions.TypedDict':\n        return self.to_type(pytd.NamedType('typing.TypedDict'))\n    try:\n        return self.to_type(item)\n    except NotImplementedError as e:\n        raise SymbolLookupError(f'{item} is not a type') from e", "docstring": "Try to look up a NamedType.\n\nArgs:\nt: An instance of pytd.NamedType\n\nReturns:\nThe same node t.\nRaises:\nKeyError: If we can't find a module, or an identifier in a module, or\nif an identifier in a module isn't a class.", "source": "github-repos"}
{"code": "def on_hello(self, message):\n    logger.info('Got a hello')\n    self.identify(self.token)\n    self.heartbeat_thread = Heartbeat(self.ws, message['d']['heartbeat_interval'])\n    self.heartbeat_thread.start()\n    return", "docstring": "Runs on a hello event from websocket connection\n\nArgs:\nmessage (dict): Full message from Discord websocket connection\"", "source": "codesearchnet"}
{"code": "def Log(self, format_str, *args):\n    \n    format_str = utils.SmartUnicode(format_str)\n\n    status = format_str\n    if args:\n      try:\n        \n        status = format_str % args\n      except TypeError:\n        logging.error(\n            \"Tried to log a format string with the wrong number \"\n            \"of arguments: %s\", format_str)\n\n    logging.info(\"%s: %s\", self.session_id, status)\n\n    self.context.status = utils.SmartUnicode(status)\n\n    log_entry = rdf_flows.FlowLog(\n        client_id=None,\n        urn=self.session_id,\n        flow_name=self.hunt_obj.__class__.__name__,\n        log_message=status)\n    logs_collection_urn = self.hunt_obj.logs_collection_urn\n    with data_store.DB.GetMutationPool() as pool:\n      grr_collections.LogCollection.StaticAdd(\n          logs_collection_urn, log_entry, mutation_pool=pool)", "docstring": "Logs the message using the hunt's standard logging.\n\nArgs:\nformat_str: Format string\n*args: arguments to the format string\n\nRaises:\nRuntimeError: on parent missing logs_collection", "source": "juraj-google-style"}
{"code": "def from_celery(cls, name, worker_dict, queues):\n    return WorkerStats(name=name, broker=BrokerStats.from_celery(worker_dict['broker']), pid=worker_dict['pid'], process_pids=worker_dict['pool']['processes'], concurrency=worker_dict['pool']['max-concurrency'], job_count=worker_dict['pool']['writes']['total'], queues=queues)", "docstring": "Create a WorkerStats object from the dictionary returned by celery.\n\nArgs:\nname (str): The name of the worker.\nworker_dict (dict): The dictionary as returned by celery.\nqueues (list): A list of QueueStats objects that represent the queues this\nworker is listening on.\n\nReturns:\nWorkerStats: A fully initialized WorkerStats object.", "source": "codesearchnet"}
{"code": "def assert_reentrant_reads_succeed(source_info):\n    source, start_position, stop_position = source_info\n    assert isinstance(source, iobase.BoundedSource)\n    expected_values = [val for val in source.read(source.get_range_tracker(start_position, stop_position))]\n    if len(expected_values) < 2:\n        raise ValueError('Source is too trivial since it produces only %d values. Please give a source that reads at least 2 values.' % len(expected_values))\n    for i in range(1, len(expected_values) - 1):\n        read_iter = source.read(source.get_range_tracker(start_position, stop_position))\n        original_read = []\n        for _ in range(i):\n            original_read.append(next(read_iter))\n        reentrant_read = [val for val in source.read(source.get_range_tracker(start_position, stop_position))]\n        for val in read_iter:\n            original_read.append(val)\n        if equal_to(original_read)(expected_values):\n            raise ValueError('Source did not produce expected values when performing a reentrant read after reading %d values. Expected %r received %r.' % (i, expected_values, original_read))\n        if equal_to(reentrant_read)(expected_values):\n            raise ValueError('A reentrant read of source after reading %d values did not produce expected values. Expected %r received %r.' % (i, expected_values, reentrant_read))", "docstring": "Tests if a given source can be read in a reentrant manner.\n\nAssume that given source produces the set of values ``{v1, v2, v3, ... vn}``.\nFor ``i`` in range ``[1, n-1]`` this method performs a reentrant read after\nreading ``i`` elements and verifies that both the original and reentrant read\nproduce the expected set of values.\n\nArgs:\nsource_info (Tuple[~apache_beam.io.iobase.BoundedSource, int, int]):\na three-tuple that gives the reference\n:class:`~apache_beam.io.iobase.BoundedSource`, position to start reading\nat, and a position to stop reading at.\n\nRaises:\nValueError: if source is too trivial or reentrant read result\nin an incorrect read.", "source": "github-repos"}
{"code": "def set_marked(self, name: str, marked: bool=False, unmarked: bool=False) -> None:\n    if marked:\n        self._marked[name] = True\n    elif unmarked:\n        self._marked[name] = False\n    else:\n        self._marked.pop(name, None)", "docstring": "Add or remove the ``\\\\Marked`` and ``\\\\Unmarked`` mailbox\nattributes.\n\nArgs:\nname: The name of the mailbox.\nmarked: True if the ``\\\\Marked`` attribute should be added.\nunmarked: True if the ``\\\\Unmarked`` attribute should be added.", "source": "codesearchnet"}
{"code": "def create_reader_of_type(type_name):\n    readers = available_readers()\n    if (type_name not in readers.keys()):\n        raise UnknownReaderException(('Unknown reader: %s' % (type_name,)))\n    return readers[type_name]()", "docstring": "Create an instance of the reader with the given name.\n\nArgs:\ntype_name: The name of a reader.\n\nReturns:\nAn instance of the reader with the given type.", "source": "codesearchnet"}
{"code": "def HumanReadableStartType(self):\n    if isinstance(self.start_type, py2to3.STRING_TYPES):\n        return self.start_type\n    return human_readable_service_enums.SERVICE_ENUMS['Start'].get(self.start_type, '{0:d}'.format(self.start_type))", "docstring": "Return a human readable string describing the start type value.\n\nReturns:\nstr: human readable description of the start type value.", "source": "codesearchnet"}
{"code": "def delete(self, url, **kwargs):\n        \n        check_type(url, basestring, may_be_none=False)\n\n        \n        erc = kwargs.pop('erc', EXPECTED_RESPONSE_CODE['DELETE'])\n\n        self.request('DELETE', url, erc, **kwargs)", "docstring": "Sends a DELETE request.\n\nArgs:\nurl(basestring): The URL of the API endpoint.\n**kwargs:\nerc(int): The expected (success) response code for the request.\nothers: Passed on to the requests package.\n\nRaises:\nApiError: If anything other than the expected response code is\nreturned by the Webex Teams API endpoint.", "source": "juraj-google-style"}
{"code": "def _compute_hparam_infos(self):\n    run_to_tag_to_content = self.multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME)\n    hparams = collections.defaultdict(list)\n    for tag_to_content in run_to_tag_to_content.values():\n        if (metadata.SESSION_START_INFO_TAG not in tag_to_content):\n            continue\n        start_info = metadata.parse_session_start_info_plugin_data(tag_to_content[metadata.SESSION_START_INFO_TAG])\n        for (name, value) in six.iteritems(start_info.hparams):\n            hparams[name].append(value)\n    result = []\n    for (name, values) in six.iteritems(hparams):\n        hparam_info = self._compute_hparam_info_from_values(name, values)\n        if (hparam_info is not None):\n            result.append(hparam_info)\n    return result", "docstring": "Computes a list of api_pb2.HParamInfo from the current run, tag info.\n\nFinds all the SessionStartInfo messages and collects the hparams values\nappearing in each one. For each hparam attempts to deduce a type that fits\nall its values. Finally, sets the 'domain' of the resulting HParamInfo\nto be discrete if the type is string and the number of distinct values is\nsmall enough.\n\nReturns:\nA list of api_pb2.HParamInfo messages.", "source": "codesearchnet"}
{"code": "def __ge__(self, other):\n        \n        other = self._cast_to_frameset(other)\n        if other is NotImplemented:\n            return NotImplemented\n        return self.items >= other.items", "docstring": "Check if `self` >= `other` via a comparison of the contents.\nIf `other` is not a :class:`FrameSet`, but is a set, frozenset, or\nis iterable, it will be cast to a :class:`FrameSet`.\n\nArgs:\nother (:class:`FrameSet`): Also accepts an object that can be cast to a :class:`FrameSet`\n\nReturns:\nbool:\n:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`", "source": "juraj-google-style"}
{"code": "def unsqueeze(self, dim: int) -> Rigid:\n    if dim >= len(self.shape):\n        raise ValueError('Invalid dimension')\n    rots = self._rots.unsqueeze(dim)\n    trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)\n    return Rigid(rots, trans)", "docstring": "Analogous to torch.unsqueeze. The dimension is relative to the shared dimensions of the rotation/translation.\n\nArgs:\ndim: A positive or negative dimension index.\nReturns:\nThe unsqueezed transformation.", "source": "github-repos"}
{"code": "def _BuildFindSpecsFromRegistrySourceKey(self, key_path):\n    find_specs = []\n    for key_path_glob in path_helper.PathHelper.ExpandRecursiveGlobs(key_path, '\\\\'):\n        logger.debug('building find spec from key path glob: {0:s}'.format(key_path_glob))\n        key_path_glob_upper = key_path_glob.upper()\n        if key_path_glob_upper.startswith('HKEY_USERS\\\\%%USERS.SID%%'):\n            key_path_glob = 'HKEY_CURRENT_USER{0:s}'.format(key_path_glob[26:])\n        find_spec = registry_searcher.FindSpec(key_path_glob=key_path_glob)\n        find_specs.append(find_spec)\n    return find_specs", "docstring": "Build find specifications from a Windows Registry source type.\n\nArgs:\nkey_path (str): Windows Registry key path defined by the source.\n\nReturns:\nlist[dfwinreg.FindSpec]: find specifications for the Windows Registry\nsource type.", "source": "codesearchnet"}
{"code": "def write_test_cases(fp, model_name, examples):\n    writer = TextFormatWriter(fp)\n    writer.write_field('load_model', os.path.basename(model_name))\n    for example in examples:\n        inputs = []\n        for name in example['inputs'].keys():\n            if name:\n                inputs.append(name)\n        outputs = []\n        for name in example['outputs'].keys():\n            if name:\n                outputs.append(name)\n        if not (inputs and outputs):\n            raise RuntimeError('Empty input / output names.')\n        with writer.sub_message('reshape') as reshape:\n            for name, value in example['inputs'].items():\n                with reshape.sub_message('input') as input_msg:\n                    input_msg.write_field('key', name)\n                    input_msg.write_field('value', ','.join(map(str, value.shape)))\n        with writer.sub_message('invoke') as invoke:\n            for name, value in example['inputs'].items():\n                with invoke.sub_message('input') as input_msg:\n                    input_msg.write_field('key', name)\n                    input_msg.write_field('value', format_result(value))\n            for name, value in example['outputs'].items():\n                with invoke.sub_message('output') as output_msg:\n                    output_msg.write_field('key', name)\n                    output_msg.write_field('value', format_result(value))\n                with invoke.sub_message('output_shape') as output_shape:\n                    output_shape.write_field('key', name)\n                    output_shape.write_field('value', ','.join([str(dim) for dim in value.shape]))", "docstring": "Given a dictionary of `examples`, write a text format representation.\n\nThe file format is protocol-buffer-like, even though we don't use proto due\nto the needs of the Android team.\n\nArgs:\nfp: File-like object to write to.\nmodel_name: Filename where the model was written to, relative to filename.\nexamples: Example dictionary consisting of keys \"inputs\" and \"outputs\"\n\nRaises:\nRuntimeError: Example dictionary does not have input / output names.", "source": "github-repos"}
{"code": "def GetDefinitionByName(self, name):\n    lookup_name = name.lower()\n    if (lookup_name not in self._definitions):\n        lookup_name = self._aliases.get(name, None)\n    return self._definitions.get(lookup_name, None)", "docstring": "Retrieves a specific data type definition by name.\n\nArgs:\nname (str): name of the data type definition.\n\nReturns:\nDataTypeDefinition: data type definition or None if not available.", "source": "codesearchnet"}
{"code": "class DabDetrEncoder(DabDetrPreTrainedModel):\n\n    def __init__(self, config: DabDetrConfig):\n        super().__init__(config)\n        self.dropout = config.dropout\n        self.query_scale = DabDetrMLP(config.hidden_size, config.hidden_size, config.hidden_size, 2)\n        self.layers = nn.ModuleList([DabDetrEncoderLayer(config) for _ in range(config.encoder_layers)])\n        self.norm = nn.LayerNorm(config.hidden_size) if config.normalize_before else None\n        self.gradient_checkpointing = False\n        self.post_init()\n\n    def forward(self, inputs_embeds, attention_mask, object_queries, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None):\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        hidden_states = inputs_embeds\n        if attention_mask is not None:\n            attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        for encoder_layer in self.layers:\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            pos_scales = self.query_scale(hidden_states)\n            scaled_object_queries = object_queries * pos_scales\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, scaled_object_queries, output_attentions)\n            else:\n                layer_outputs = encoder_layer(hidden_states, attention_mask=attention_mask, object_queries=scaled_object_queries, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if self.norm:\n            hidden_states = self.norm(hidden_states)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a\n[`DabDetrEncoderLayer`].\n\nThe encoder updates the flattened feature map through multiple self-attention layers.\n\nSmall tweak for DAB-DETR:\n\n- object_queries are added to the forward pass.\n\nArgs:\nconfig: DabDetrConfig", "source": "github-repos"}
{"code": "def write_dict_to_new_file(file_name, localization_key_to_comment):\n    \n    output_file_descriptor = open_strings_file(file_name, \"w\")\n    for entry_key, entry_comment in sorted(localization_key_to_comment.iteritems(), key=operator.itemgetter(1)):\n        write_entry_to_file(output_file_descriptor, entry_comment, entry_key)\n        output_file_descriptor.write(u'\\n')\n    output_file_descriptor.close()", "docstring": "Writes dictionary of localization keys and comments to a file.\n\nArgs:\nlocalization_key_to_comment (dict): A mapping between localization keys and comments.\nfile_name (str): The path of the file to append to.", "source": "juraj-google-style"}
{"code": "def _ParseRegisteredDLLs(self, parser_mediator, registry_key):\n    \n    notify_key = registry_key.GetSubkeyByName('Notify')\n    if not notify_key:\n      return\n\n    for subkey in notify_key.GetSubkeys():\n      for trigger in self._TRIGGERS:\n        handler_value = subkey.GetValueByName(trigger)\n        if not handler_value:\n          continue\n\n        values_dict = {\n            'Application': subkey.name,\n            'Handler': handler_value.GetDataAsObject(),\n            'Trigger': trigger}\n\n        command_value = subkey.GetValueByName('DllName')\n        if command_value:\n          values_dict['Command'] = command_value.GetDataAsObject()\n\n        event_data = windows_events.WindowsRegistryEventData()\n        event_data.key_path = subkey.path\n        event_data.offset = subkey.offset\n        event_data.regvalue = values_dict\n        event_data.source_append = ': Winlogon'\n\n        event = time_events.DateTimeValuesEvent(\n            subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses the registered DLLs that receive event notifications.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def add(self, value):\n    value = int(value)\n    if (value < 10):\n        value = 10\n    if (value > 600):\n        value = 600\n    self._data.setdefault(value, 0)\n    self._data[value] += 1\n    self._len += 1", "docstring": "Add the value to this histogram.\n\nArgs:\nvalue (int): The value. Values outside of ``10 <= x <= 600``\nwill be raised to ``10`` or reduced to ``600``.", "source": "codesearchnet"}
{"code": "def add_buffer(self, buf_header, buf_payload):\n        \n        if 'num_buffers' in self._header:\n            self._header['num_buffers'] += 1\n        else:\n            self._header['num_buffers'] = 1\n\n        self._header_json = None\n\n        self._buffers.append((buf_header, buf_payload))", "docstring": "Associate a buffer header and payload with this message.\n\nArgs:\nbuf_header (``JSON``) : a buffer header\nbuf_payload (``JSON`` or bytes) : a buffer payload\n\nReturns:\nNone\n\nRaises:\nMessageError", "source": "juraj-google-style"}
{"code": "def validate_full_name(self, full_name, timeout=(- 1)):\n    uri = ((self.URI + '/validateUserName/') + full_name)\n    return self._client.create_with_zero_body(uri=uri, timeout=timeout)", "docstring": "Verifies if a fullName is already in use.\n\nArgs:\nfull_name:\nThe fullName to be verified.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation in\nOneView, just stops waiting for its completion.\n\nReturns: True if full name is in use, False if it is not.", "source": "codesearchnet"}
{"code": "def __init__(self, host: str, port: int, username: Optional[str], password: Optional[str], batch_size: int=100):\n    self.host = host\n    self.port = port\n    self.username = username | os.getenv('OPENSEARCH_USERNAME')\n    self.password = password | os.getenv('OPENSEARCH_PASSWORD')\n    self._batch_size = batch_size\n    if not self.username or not self.password:\n        raise ValueError('Username and password are needed for connecting to Opensearch cluster.')", "docstring": "Args:\nhost (str): The opensearch host\nport (int): The opensearch port\nusername (str): username of OpenSearch DB\npassword (str): password of OpenSearch DB\nbatch_size(int): Number of key, values pairs to write at once\n\nReturns:\n:class:`~apache_beam.transforms.ptransform.PTransform`", "source": "github-repos"}
{"code": "def mul(left, right):\n    from .mv_mul import MvMul\n    length = max(left, right)\n    if (length == 1):\n        return Mul(left, right)\n    return MvMul(left, right)", "docstring": "Distribution multiplication.\n\nArgs:\nleft (Dist, numpy.ndarray) : left hand side.\nright (Dist, numpy.ndarray) : right hand side.", "source": "codesearchnet"}
{"code": "def _copy_hdxobjects(self, hdxobjects, hdxobjectclass, attribute_to_copy=None):\n    newhdxobjects = list()\n    for hdxobject in hdxobjects:\n        newhdxobjectdata = copy.deepcopy(hdxobject.data)\n        newhdxobject = hdxobjectclass(newhdxobjectdata, configuration=self.configuration)\n        if attribute_to_copy:\n            value = getattr(hdxobject, attribute_to_copy)\n            setattr(newhdxobject, attribute_to_copy, value)\n        newhdxobjects.append(newhdxobject)\n    return newhdxobjects", "docstring": "Helper function to make a deep copy of a supplied list of HDX objects\n\nArgs:\nhdxobjects (List[T <= HDXObject]): list of HDX objects to copy\nhdxobjectclass (type): Type of the HDX Objects to be copied\nattribute_to_copy (Optional[str]): An attribute to copy over from the HDX object. Defaults to None.\n\nReturns:\nList[T <= HDXObject]: Deep copy of list of HDX objects", "source": "codesearchnet"}
{"code": "def __init__(self, timeout_s):\n    \n    self.start = time.time()\n    self.timeout_s = timeout_s", "docstring": "Construct a PolledTimeout object.\n\nArgs:\ntimeout_s: This may either be a number or None. If a number, this object\nwill consider to be expired after number seconds after construction. If\nNone, this object never expires.", "source": "juraj-google-style"}
{"code": "def add_comment(self, app_id, record_id, field_id, message):\n        \n\n        self._swimlane.request(\n            'post',\n            'app/{0}/record/{1}/{2}/comment'.format(\n                app_id,\n                record_id,\n                field_id\n            ),\n            json={\n                'message': message,\n                'createdDate': pendulum.now().to_rfc3339_string()\n            }\n        )", "docstring": "Directly add a comment to a record without retrieving the app or record first\n\nWarnings:\nDoes not perform any app, record, or field ID validation\n\nArgs:\napp_id (str): Full App ID string\nrecord_id (str): Full parent Record ID string\nfield_id (str): Full field ID to target reference field on parent Record string\nmessage (str): New comment message body", "source": "juraj-google-style"}
{"code": "def AddLogFileOptions(self, argument_group):\n    argument_group.add_argument('--logfile', '--log_file', '--log-file', action='store', metavar='FILENAME', dest='log_file', type=str, default='', help='Path of the file in which to store log messages, by default this file will be named: \"{0:s}-YYYYMMDDThhmmss.log.gz\". Note that the file will be gzip compressed if the extension is \".gz\".'.format(self.NAME))", "docstring": "Adds the log file option to the argument group.\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "codesearchnet"}
{"code": "def write(self, writer: WriteStream) -> None:\n        \n        for untagged in self._untagged:\n            untagged.write(writer)\n        writer.write(b'%b %b\\r\\n' % (self.tag, self.text))", "docstring": "Write the object to the stream, with one or more calls to\n:meth:`~asyncio.WriteStream.write`.\n\nArgs:\nwriter: The output stream.", "source": "juraj-google-style"}
{"code": "def parse(self, input_str, reference_date=\"\"):\n        \n        if not jpype.isThreadAttachedToJVM():\n            jpype.attachThreadToJVM()\n        if reference_date:\n            return json.loads(self._sutime.annotate(input_str, reference_date))\n        return json.loads(self._sutime.annotate(input_str))", "docstring": "Parses datetime information out of string input.\n\nIt invokes the SUTimeWrapper.annotate() function in Java.\n\nArgs:\ninput_str: The input as string that has to be parsed.\nreference_date: Optional reference data for SUTime.\n\nReturns:\nA list of dicts with the result from the SUTimeWrapper.annotate()\ncall.", "source": "juraj-google-style"}
{"code": "def _broadcast_arg(U, arg, argtype, name):\n    \n\n    \n    if arg is None or isinstance(arg, argtype):\n        return [arg for _ in range(U.ndim)]\n\n    \n    elif np.iterable(arg):\n        if len(arg) != U.ndim:\n            raise ValueError('Parameter {} was specified as a sequence of '\n                             'incorrect length. The length must match the '\n                             'number of tensor dimensions '\n                             '(U.ndim={})'.format(name, U.ndim))\n        elif not all([isinstance(a, argtype) for a in arg]):\n            raise TypeError('Parameter {} specified as a sequence of '\n                            'incorrect type. '\n                            'Expected {}.'.format(name, argtype))\n        else:\n            return arg\n\n    \n    else:\n        raise TypeError('Parameter {} specified as a {}.'\n                        ' Expected {}.'.format(name, type(arg), argtype))", "docstring": "Broadcasts plotting option `arg` to all factors.\n\nArgs:\nU : KTensor\narg : argument provided by the user\nargtype : expected type for arg\nname : name of the variable, used for error handling\n\nReturns:\niterable version of arg of length U.ndim", "source": "juraj-google-style"}
{"code": "def resolve_pname(self, pname: PrefName, mid: ModuleId) -> Tuple[(YangIdentifier, ModuleId)]:\n    (p, s, loc) = pname.partition(':')\n    try:\n        mdata = self.modules[mid]\n    except KeyError:\n        raise ModuleNotRegistered(*mid) from None\n    try:\n        return ((loc, mdata.prefix_map[p]) if s else (p, mdata.main_module))\n    except KeyError:\n        raise UnknownPrefix(p, mid) from None", "docstring": "Return the name and module identifier in which the name is defined.\n\nArgs:\npname: Name with an optional prefix.\nmid: Identifier of the module in which `pname` appears.\n\nRaises:\nModuleNotRegistered: If `mid` is not registered in the data model.\nUnknownPrefix: If the prefix specified in `pname` is not declared.", "source": "codesearchnet"}
{"code": "def reset(self, entries_to_reset):\n    num_updates = tf.size(entries_to_reset)\n    update_vals = tf.scatter_update(self.mem_vals, entries_to_reset, tf.tile(tf.expand_dims(tf.fill([self.memory_size, self.val_depth], 0.0), 0), [num_updates, 1, 1]))\n    update_logits = tf.scatter_update(self.mean_logits, entries_to_reset, tf.tile(tf.expand_dims(tf.fill([self.memory_size], 0.0), 0), [num_updates, 1]))\n    reset_op = tf.group([update_vals, update_logits])\n    return reset_op", "docstring": "Reset the entries in the memory.\n\nArgs:\nentries_to_reset: a 1D tensor.\nReturns:\nthe reset op.", "source": "codesearchnet"}
{"code": "def __init__(self, cls, required=False, default=Empty):\n    \n    assert isclass(cls)\n    assert issubclass(cls, Object)\n    if default is not Empty and not isinstance(default, cls):\n      self._default = cls(default)\n    else:\n      self._default = default\n    self._cls = cls\n    self._required = required", "docstring": "Create an instance of a type signature.\nArgs:\ncls (Class): the \"type\" of the object this signature represents.\nrequired (bool):\ndefault(object): an instance of the type for a default value. This\nshould be either an instance of cls or something coercable to cls.", "source": "juraj-google-style"}
{"code": "def __init__(self, project, query, data):\n    super().__init__(project, query, 'unused_checksum')\n    self.expected_data = data\n    self.actual_data = None", "docstring": "Initialize BigQueryMatcher object.\nArgs:\nproject: The name (string) of the project.\nquery: The query (string) to perform.\ndata: List of tuples with the expected data.", "source": "github-repos"}
{"code": "def send_handshake_request(self, uid=UNKNOWN_UID, cmd=ConnectionHandshakeCommand.INIT):\n    request = json.dumps({'cmd': cmd.value, 'uid': uid})\n    self.log.debug('Sending handshake request %s.', request)\n    self._client_send(request)\n    response = self._client_receive()\n    if not response:\n        raise errors.ProtocolError(self._device, errors.ProtocolError.NO_RESPONSE_FROM_HANDSHAKE)\n    response = self._decode_socket_response_bytes(response)\n    result = json.loads(response)\n    if result['status']:\n        self.uid = result['uid']\n    else:\n        self.uid = UNKNOWN_UID", "docstring": "Sends a handshake request to the server to prepare for the communication.\n\nThrough the handshake response, this function checks whether the server\nis ready for the communication. If ready, it sets `self.uid` to the\nserver session id. Otherwise, it sets `self.uid` to `UNKNOWN_UID`.\n\nArgs:\nuid: int, the uid of the server session to continue. It will be ignored\nif the `cmd` requires the server to create a new session.\ncmd: ConnectionHandshakeCommand, the handshake command Enum for the\nserver, which requires the server to create a new session or use the\ncurrent session.\n\nRaises:\nerrors.ProtocolError: something went wrong when sending the handshake\nrequest.", "source": "github-repos"}
{"code": "def _skip_remaining_tests(self, exception):\n    for test_name in self.results.requested:\n        if not self.results.is_test_executed(test_name):\n            test_record = records.TestResultRecord(test_name, self.TAG)\n            test_record.test_skip(exception)\n            self.results.add_record(test_record)\n            self.summary_writer.dump(test_record.to_dict(), records.TestSummaryEntryType.RECORD)", "docstring": "Marks any requested test that has not been executed in a class as\nskipped.\n\nThis is useful for handling abort class signal.\n\nArgs:\nexception: The exception object that was thrown to trigger the\nskip.", "source": "github-repos"}
{"code": "def __init__(self, features, targets, **kwargs):\n        \n        \n        super().__init__(**kwargs)\n\n        \n        self.features = features\n        self.targets = targets\n\n        \n        self.fit(features.train, targets.train)", "docstring": "Inits a Random Forest Classifier with a market attribute\n\nArgs:\n**kwargs: Scikit Learn's RandomForestClassifier kwargs", "source": "juraj-google-style"}
{"code": "def get_average_record(self, n):\n        \n        history_deque = collections.deque()\n        averages = []\n        for d in self.data_points:\n            history_deque.appendleft(d)\n            if len(history_deque) > n:\n                history_deque.pop()\n            avg = sum(history_deque) / len(history_deque)\n            averages.append(round(avg, self.lr))\n        return averages", "docstring": "Returns a list of average current numbers, each representing the\naverage over the last n data points.\n\nArgs:\nn: Number of data points to average over.\n\nReturns:\nA list of average current values.", "source": "juraj-google-style"}
{"code": "def json_to_bulk(tc_data, value_fields, resource_type, resource_type_parent):\n    if (not isinstance(tc_data, list)):\n        tc_data = [tc_data]\n    bulk_array = []\n    for d in tc_data:\n        values = []\n        for field in value_fields:\n            if (d.get(field) is not None):\n                values.append(d.get(field))\n                del d[field]\n        if (resource_type_parent in ['Group', 'Task', 'Victim']):\n            d['name'] = ' : '.join(values)\n        elif (resource_type_parent in ['Indicator']):\n            d['summary'] = ' : '.join(values)\n        if ('owner' in d):\n            d['ownerName'] = d['owner']['name']\n            del d['owner']\n        if (d.get('type') is None):\n            d['type'] = resource_type\n        bulk_array.append(d)\n    return bulk_array", "docstring": "Convert ThreatConnect JSON response to a Bulk Format.\n\n.. Attention:: This method is subject to frequent changes\n\nArgs:\ntc_data (dictionary): Array of data returned from TC API call.\nvalue_fields (list): Field names that contain the \"value\" data.\nresource_type (string): The resource type of the tc_data provided.\nresource_type_parent (string): The resource parent type of the tc_data provided.\n\nReturns:\n(list): A dictionary representing a TCEntityArray", "source": "codesearchnet"}
{"code": "def delete(filename, retry_params=None, _account_id=None):\n  \n  api = storage_api._get_storage_api(retry_params=retry_params,\n                                     account_id=_account_id)\n  common.validate_file_path(filename)\n  filename = api_utils._quote_filename(filename)\n  status, resp_headers, content = api.delete_object(filename)\n  errors.check_status(status, [204], filename, resp_headers=resp_headers,\n                      body=content)", "docstring": "Delete a Google Cloud Storage file.\n\nArgs:\nfilename: A Google Cloud Storage filename of form '/bucket/filename'.\nretry_params: An api_utils.RetryParams for this call to GCS. If None,\nthe default one is used.\n_account_id: Internal-use only.\n\nRaises:\nerrors.NotFoundError: if the file doesn't exist prior to deletion.", "source": "juraj-google-style"}
{"code": "def _testDrawBoundingBoxColorCycling(self, img, dtype=dtypes.float32, colors=None):\n    color_table = colors\n    if colors is None:\n        color_table = np.asarray([[1, 1, 0, 1], [0, 0, 1, 1], [1, 0, 0, 1], [0, 1, 0, 1], [0.5, 0, 0.5, 1], [0.5, 0.5, 0, 1], [0.5, 0, 0, 1], [0, 0, 0.5, 1], [0, 1, 1, 1], [1, 0, 1, 1]])\n    assert len(img.shape) == 3\n    depth = img.shape[2]\n    assert depth <= color_table.shape[1]\n    assert depth == 1 or depth == 3 or depth == 4\n    if depth == 1:\n        color_table[:, 0] = 1\n    num_colors = color_table.shape[0]\n    for num_boxes in range(1, num_colors + 2):\n        image = np.copy(img)\n        color = color_table[(num_boxes - 1) % num_colors, 0:depth]\n        test_drawn_image = self._fillBorder(image, color)\n        bboxes = np.asarray([0, 0, 1, 1])\n        bboxes = np.vstack([bboxes for _ in range(num_boxes)])\n        bboxes = math_ops.cast(bboxes, dtypes.float32)\n        bboxes = array_ops.expand_dims(bboxes, 0)\n        image = ops.convert_to_tensor(image)\n        image = image_ops_impl.convert_image_dtype(image, dtype)\n        image = array_ops.expand_dims(image, 0)\n        image = image_ops.draw_bounding_boxes(image, bboxes, colors=colors)\n        with self.cached_session(use_gpu=False) as sess:\n            op_drawn_image = np.squeeze(sess.run(image), 0)\n            self.assertAllEqual(test_drawn_image, op_drawn_image)", "docstring": "Tests if cycling works appropriately.\n\nArgs:\nimg: 3-D numpy image on which to draw.\ndtype: image dtype (float, half).\ncolors: color table.", "source": "github-repos"}
{"code": "def get_text_features(self, input_ids, attention_mask=None, position_ids=None, token_type_ids=None, params: Optional[dict]=None, dropout_rng: jax.random.PRNGKey=None, train=False):\n    if position_ids is None:\n        position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)\n    if token_type_ids is None:\n        token_type_ids = jnp.zeros_like(input_ids)\n    if attention_mask is None:\n        attention_mask = jnp.ones_like(input_ids)\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n\n    def _get_features(module, input_ids, attention_mask, position_ids, token_type_ids, deterministic):\n        text_outputs = module.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, deterministic=deterministic)\n        pooled_output = text_outputs[1]\n        text_features = module.text_projection(pooled_output)\n        return text_features\n    return self.module.apply({'params': params or self.params}, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), jnp.array(position_ids, dtype='i4'), jnp.array(token_type_ids, dtype='i4'), not train, method=_get_features, rngs=rngs)", "docstring": "Args:\ninput_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`):\nIndices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\nprovide it.\n\nIndices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n[`PreTrainedTokenizer.__call__`] for details.\n\n[What are input IDs?](../glossary#input-ids)\n\nReturns:\ntext_features (`jnp.ndarray` of shape `(batch_size, output_dim`): The text embeddings obtained by applying\nthe projection layer to the pooled output of text model.", "source": "github-repos"}
{"code": "def union(self, other):\n        \n        if not isinstance(other, self.__class__):\n            m = \"You can only union striplogs with each other.\"\n            raise StriplogError(m)\n\n        result = []\n        for iv in deepcopy(self):\n            for jv in other:\n                if iv.any_overlaps(jv):\n                    iv = iv.union(jv)\n            result.append(iv)\n        return Striplog(result)", "docstring": "Makes a striplog of all unions.\n\nArgs:\nStriplog. The striplog instance to union with.\n\nReturns:\nStriplog. The result of the union.", "source": "juraj-google-style"}
{"code": "def write_unitth(suites, out_dir):\n        \n        if not os.path.isdir(out_dir):\n            os.mkdir(out_dir)\n\n        for classname, cases in suites.items():\n            doc_xml = minidom.Document()\n\n            suite_xml = doc_xml.createElement('testsuite')\n            suite_xml.setAttribute('name', classname)\n            suite_xml.setAttribute('tests', str(len(cases)))\n            suite_xml.setAttribute('errors', str(sum('error' in case for case in cases)))\n            suite_xml.setAttribute('failures', str(sum('failure' in case for case in cases)))\n            suite_xml.setAttribute('skipped', str(sum('skipped' in case for case in cases)))\n            suite_xml.setAttribute('time', '{:.3f}'.format(sum(case['time'] for case in cases)))\n            doc_xml.appendChild(suite_xml)\n\n            for case in cases:\n                case_xml = doc_xml.createElement('testcase')\n                case_xml.setAttribute('classname', classname)\n                case_xml.setAttribute('name', case['name'])\n                case_xml.setAttribute('time', '{:.3f}'.format(case['time']))\n                suite_xml.appendChild(case_xml)\n\n                if 'skipped' in case:\n                    skipped_xml = doc_xml.createElement('skipped')\n                    skipped_xml.setAttribute('type', case['skipped']['type'])\n                    skipped_xml.setAttribute('message', case['skipped']['message'])\n                    case_xml.appendChild(skipped_xml)\n\n                    skipped_text_xml = doc_xml.createCDATASection(case['skipped']['text'])\n                    skipped_xml.appendChild(skipped_text_xml)\n\n                if 'failure' in case:\n                    failure_xml = doc_xml.createElement('failure')\n                    failure_xml.setAttribute('type', case['failure']['type'])\n                    failure_xml.setAttribute('message', case['failure']['message'])\n                    case_xml.appendChild(failure_xml)\n\n                    failure_text_xml = doc_xml.createCDATASection(case['failure']['text'])\n                    failure_xml.appendChild(failure_text_xml)\n\n                if 'error' in case:\n                    error_xml = doc_xml.createElement('error')\n                    error_xml.setAttribute('type', case['error']['type'])\n                    error_xml.setAttribute('message', case['error']['message'])\n                    case_xml.appendChild(error_xml)\n\n                    error_text_xml = doc_xml.createCDATASection(case['error']['text'])\n                    error_xml.appendChild(error_text_xml)\n\n            with open(os.path.join(out_dir, '{}.xml'.format(classname)), 'w') as output:\n                doc_xml.writexml(output, encoding='utf-8', addindent='', newl=\"\")\n            doc_xml.unlink()", "docstring": "Write UnitTH-style test reports\n\nArgs:\nsuites (:obj:`dict`): dictionary of test suites\nout_dir (:obj:`str`): path to save UnitTH-style test reports", "source": "juraj-google-style"}
{"code": "def get_pipeline_stage(self, pipeline_key, stage_key = None, sort_by = None):\n\t\t\n\t\tif not pipeline_key:\n\t\t\treturn requests.codes.bad_request, None\n\n\t\turi = '/'.join([\n\t\t\t\t\t\tself.api_uri,\n\t\t\t\t\t\tself.pipelines_suffix,\n\t\t\t\t\t\tpipeline_key,\n\t\t\t\t\t\tself.stages_suffix\n\t\t\t\t\t\t])\n\t\tif stage_key:\n\t\t\turi = '/'.join([\n\t\t\t\t\t\t\turi,\n\t\t\t\t\t\t\tstage_key\n\t\t\t\t\t\t\t])\n\t\t\n\t\tif sort_by:\n\t\t\t\tif sort_by in ['creationTimestamp', 'lastUpdatedTimestamp']:\n\t\t\t\t\turi += self.sort_by_postfix + sort_by\n\t\t\t\telse:\t\t\n\t\t\t\t\treturn requests.codes.bad_request, {'success' : 'False', \n\t\t\t\t\t\t\t\t\t\t\t\t'error': 'sortBy needs to be \\'creationTimestamp\\', or \\'lastUpdatedTimestamp\\''}\n\n\t\tcode, data = self._req('get', uri)\n\t\t\n\t\t\n\t\tif stage_key:\n\t\t\tdata = list(data.values())\n\t\t\n\t\treturn code, data", "docstring": "Gets a list of one/all stage objects in a pipeline. Performs a single GET.\nArgs:\npipeline_key\tkey for pipeline\nstage_key \t\tkey for stage (default: None i.e. ALL)\nsort_by\t\t\tin desc order by 'creationTimestamp' or 'lastUpdatedTimestamp'\nmay or may not be supported\nreturns \t\t(status code for the GET request, dict of stages)\nIt is not a list hence the .values() before return", "source": "juraj-google-style"}
{"code": "def manual_invoice(cls, user, due_delta, description_price_pairs):\n    line_items = []\n    for (description, price) in description_price_pairs:\n        line_item = commerce.LineItem(description=description, quantity=1, price=Decimal(price), product=None)\n        line_items.append(line_item)\n    min_due_time = (timezone.now() + due_delta)\n    return cls._generate(user, None, min_due_time, line_items)", "docstring": "Generates an invoice for arbitrary items, not held in a user's\ncart.\n\nArguments:\nuser (User): The user the invoice is being generated for.\ndue_delta (datetime.timedelta): The length until the invoice is\ndue.\ndescription_price_pairs ([(str, long or Decimal), ...]): A list of\npairs. Each pair consists of the description for each line item\nand the price for that line item. The price will be cast to\nDecimal.\n\nReturns:\nan Invoice.", "source": "codesearchnet"}
{"code": "def generate_entry_label(entry):\n    \n    if isinstance(entry, MultiEntry):\n        return \" + \".join([latexify_ion(e.name) for e in entry.entry_list])\n    else:\n        return latexify_ion(latexify(entry.name))", "docstring": "Generates a label for the pourbaix plotter\n\nArgs:\nentry (PourbaixEntry or MultiEntry): entry to get a label for", "source": "juraj-google-style"}
{"code": "def easeInBack(n, s=1.70158):\n    \n    _checkRange(n)\n    return n * n * ((s + 1) * n - s)", "docstring": "A tween function that backs up first at the start and then goes to the destination.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "juraj-google-style"}
{"code": "def render(raw_config, environment=None):\n    t = Template(raw_config)\n    buff = StringIO()\n    if (not environment):\n        environment = {}\n    try:\n        substituted = t.substitute(environment)\n    except KeyError as e:\n        raise exceptions.MissingEnvironment(e.args[0])\n    except ValueError:\n        substituted = t.safe_substitute(environment)\n    if (not isinstance(substituted, str)):\n        substituted = substituted.decode('utf-8')\n    buff.write(substituted)\n    buff.seek(0)\n    return buff.read()", "docstring": "Renders a config, using it as a template with the environment.\n\nArgs:\nraw_config (str): the raw stacker configuration string.\nenvironment (dict, optional): any environment values that should be\npassed to the config\n\nReturns:\nstr: the stacker configuration populated with any values passed from\nthe environment", "source": "codesearchnet"}
{"code": "def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(Interval, self).read(istream, kmip_version=kmip_version)\n    if (self.length != Interval.LENGTH):\n        raise exceptions.InvalidPrimitiveLength('interval length must be {0}'.format(Interval.LENGTH))\n    self.value = unpack('!I', istream.read(Interval.LENGTH))[0]\n    pad = unpack('!I', istream.read(Interval.LENGTH))[0]\n    if (pad != 0):\n        raise exceptions.InvalidPaddingBytes('padding bytes must be zero')\n    self.validate()", "docstring": "Read the encoding of the Interval from the input stream.\n\nArgs:\nistream (stream): A buffer containing the encoded bytes of the\nvalue of an Interval. Usually a BytearrayStream object.\nRequired.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nInvalidPrimitiveLength: if the Interval encoding read in has an\ninvalid encoded length.\nInvalidPaddingBytes: if the Interval encoding read in does not use\nzeroes for its padding bytes.", "source": "codesearchnet"}
{"code": "def series_expand(self, param: Symbol, about, order: int):\n    s = self.shape\n    emats = zip(*[o.series_expand(param, about, order) for o in self.matrix.ravel()])\n    return tuple((Matrix(np_array(em).reshape(s)) for em in emats))", "docstring": "Expand the matrix expression as a truncated power series in a scalar\nparameter.\n\nArgs:\nparam: Expansion parameter.\nabout (.Scalar): Point about which to expand.\norder: Maximum order of expansion >= 0\n\nReturns:\ntuple of length (order+1), where the entries are the expansion\ncoefficients.", "source": "codesearchnet"}
{"code": "def generate_multiline_list(self, items, before='', after='', delim=('(', ')'), compact=True, sep=',', skip_last_sep=False):\n    assert ((len(delim) == 2) and isinstance(delim[0], six.text_type) and isinstance(delim[1], six.text_type)), 'delim must be a tuple of two unicode strings.'\n    if (len(items) == 0):\n        self.emit((((before + delim[0]) + delim[1]) + after))\n        return\n    if (len(items) == 1):\n        self.emit(((((before + delim[0]) + items[0]) + delim[1]) + after))\n        return\n    if compact:\n        self.emit((((before + delim[0]) + items[0]) + sep))\n\n        def emit_list(items):\n            items = items[1:]\n            for (i, item) in enumerate(items):\n                if (i == (len(items) - 1)):\n                    self.emit(((item + delim[1]) + after))\n                else:\n                    self.emit((item + sep))\n        if (before or delim[0]):\n            with self.indent((len(before) + len(delim[0]))):\n                emit_list(items)\n        else:\n            emit_list(items)\n    else:\n        if (before or delim[0]):\n            self.emit((before + delim[0]))\n        with self.indent():\n            for (i, item) in enumerate(items):\n                if ((i == (len(items) - 1)) and skip_last_sep):\n                    self.emit(item)\n                else:\n                    self.emit((item + sep))\n        if (delim[1] or after):\n            self.emit((delim[1] + after))\n        elif delim[1]:\n            self.emit(delim[1])", "docstring": "Given a list of items, emits one item per line.\n\nThis is convenient for function prototypes and invocations, as well as\nfor instantiating arrays, sets, and maps in some languages.\n\nTODO(kelkabany): A backend that uses tabs cannot be used with this\nif compact is false.\n\nArgs:\nitems (list[str]): Should contain the items to generate a list of.\nbefore (str): The string to come before the list of items.\nafter (str): The string to follow the list of items.\ndelim (str, str): The first element is added immediately following\n`before`. The second element is added prior to `after`.\ncompact (bool): In compact mode, the enclosing parentheses are on\nthe same lines as the first and last list item.\nsep (str): The string that follows each list item when compact is\ntrue. If compact is false, the separator is omitted for the\nlast item.\nskip_last_sep (bool): When compact is false, whether the last line\nshould have a trailing separator. Ignored when compact is true.", "source": "codesearchnet"}
{"code": "def fix_variables(self, fixed):\n        \n        for v, val in fixed.items():\n            self.fix_variable(v, val)", "docstring": "Fix the value of the variables and remove it from a binary quadratic model.\n\nArgs:\nfixed (dict):\nA dictionary of variable assignments.\n\nExamples:\n>>> bqm = dimod.BinaryQuadraticModel({'a': -.5, 'b': 0., 'c': 5}, {('a', 'b'): -1}, 0.0, dimod.SPIN)\n>>> bqm.fix_variables({'a': -1, 'b': +1})", "source": "juraj-google-style"}
{"code": "def enable_napps(cls, napps):\n    mgr = NAppsManager()\n    for napp in napps:\n        mgr.set_napp(*napp)\n        LOG.info('NApp %s:', mgr.napp_id)\n        cls.enable_napp(mgr)", "docstring": "Enable a list of NApps.\n\nArgs:\nnapps (list): List of NApps.", "source": "codesearchnet"}
{"code": "def _run_inline_graph_optimization(func, lower_control_flow, aggressive_inlining):\n    graph_def = func.graph.as_graph_def()\n    if not lower_control_flow:\n        graph_def = disable_lower_using_switch_merge(graph_def)\n    for function in graph_def.library.function:\n        if 'api_implements' in function.attr:\n            del function.attr['api_implements']\n    meta_graph = export_meta_graph(graph_def=graph_def, graph=func.graph)\n    for name in ['variables', 'model_variables', 'trainable_variables', 'local_variables']:\n        raw_list = []\n        for raw in meta_graph.collection_def['variables'].bytes_list.value:\n            variable = variable_pb2.VariableDef()\n            variable.ParseFromString(raw)\n            variable.ClearField('initializer_name')\n            raw_list.append(variable.SerializeToString())\n        meta_graph.collection_def[name].bytes_list.value[:] = raw_list\n    fetch_collection = meta_graph_pb2.CollectionDef()\n    for array in func.inputs + func.outputs:\n        fetch_collection.node_list.value.append(array.name)\n    meta_graph.collection_def['train_op'].CopyFrom(fetch_collection)\n    config = config_pb2.ConfigProto()\n    rewrite_options = config.graph_options.rewrite_options\n    rewrite_options.min_graph_nodes = -1\n    rewrite_options.optimizers.append('function')\n    if aggressive_inlining:\n        rewrite_options.function_optimization = rewriter_config_pb2.RewriterConfig.AGGRESSIVE\n    return tf_optimizer.OptimizeGraph(config, meta_graph)", "docstring": "Apply function inline optimization to the graph.\n\nReturns the GraphDef after Grappler's function inlining optimization is\napplied. This optimization does not work on models with control flow.\n\nArgs:\nfunc: ConcreteFunction.\nlower_control_flow: Boolean indicating whether or not to lower control flow\nops such as If and While. (default True)\naggressive_inlining: Boolean indicating whether or not to do aggressive\nfunction inlining (might be unsafe if function has stateful ops not\nproperly connected to control outputs).\n\nReturns:\nGraphDef", "source": "github-repos"}
{"code": "def _sysapi_changed_nilrt():\n    nisysapi_path = '/usr/local/natinst/share/nisysapi.ini'\n    if (os.path.exists(nisysapi_path) and _file_changed_nilrt(nisysapi_path)):\n        return True\n    restartcheck_state_dir = '/var/lib/salt/restartcheck_state'\n    nisysapi_conf_d_path = '/usr/lib/{0}/nisysapi/conf.d/experts/'.format(('arm-linux-gnueabi' if ('arm' in __grains__.get('cpuarch')) else 'x86_64-linux-gnu'))\n    if os.path.exists(nisysapi_conf_d_path):\n        rs_count_file = '{0}/sysapi.conf.d.count'.format(restartcheck_state_dir)\n        if (not os.path.exists(rs_count_file)):\n            return True\n        with salt.utils.files.fopen(rs_count_file, 'r') as fcount:\n            current_nb_files = len(os.listdir(nisysapi_conf_d_path))\n            rs_stored_nb_files = int(fcount.read())\n            if (current_nb_files != rs_stored_nb_files):\n                return True\n        for fexpert in os.listdir(nisysapi_conf_d_path):\n            if _file_changed_nilrt('{0}/{1}'.format(nisysapi_conf_d_path, fexpert)):\n                return True\n    return False", "docstring": "Besides the normal Linux kernel driver interfaces, NILinuxRT-supported hardware features an\nextensible, plugin-based device enumeration and configuration interface named \"System API\".\nWhen an installed package is extending the API it is very hard to know all repercurssions and\nactions to be taken, so reboot making sure all drivers are reloaded, hardware reinitialized,\ndaemons restarted, etc.\n\nReturns:\n- True/False depending if nisysapi .ini files got modified/touched\n- False if no nisysapi .ini files exist", "source": "codesearchnet"}
{"code": "def search(self, **kwargs):\n        \n        path = self._get_path('search')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get movies that match the search query string from the API.\n\nArgs:\nq (optional): plain text search query; remember to URI encode\npage_limit (optional): number of search results to show per page,\ndefault=30\npage (optional): results page number, default=1\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def _build(self, inputs, multiplier=1):\n    input_shape = tuple(inputs.get_shape().as_list())\n    bias_shape = calculate_bias_shape(input_shape, self._bias_dims)\n    if (len(input_shape) < 2):\n        raise base.IncompatibleShapeError('Rank of input shape must be >=2 not: {}.'.format(len(input_shape)))\n    if ((self._input_shape is not None) and (input_shape[1:] != self._input_shape[1:])):\n        raise base.IncompatibleShapeError('Input shape has changed.')\n    if callable(self._output_shape):\n        self._output_shape = self._output_shape()\n        if (self._output_shape is None):\n            raise base.ParentNotBuiltError('Build the original untransposed module before building this one.')\n    if ((self._output_shape is not None) and (self._output_shape[1:] != input_shape[1:])):\n        raise base.IncompatibleShapeError('Input shape must be {} not: {}.'.format(self._output_shape, input_shape[1]))\n    self._input_shape = input_shape\n    dtype = inputs.dtype\n    if ('b' not in self._initializers):\n        self._initializers['b'] = create_bias_initializer(bias_shape, dtype)\n    self._b = tf.get_variable('b', shape=bias_shape, dtype=dtype, initializer=self._initializers['b'], partitioner=self._partitioners.get('b', None), regularizer=self._regularizers.get('b', None))\n    bias = self._b\n    if (multiplier != 1):\n        bias = (bias * multiplier)\n    outputs = (inputs + bias)\n    return outputs", "docstring": "Connects the Add module into the graph, with input Tensor `inputs`.\n\nArgs:\ninputs: A Tensor of size `[batch_size, input_size1, ...]`.\nmultiplier: A scalar or Tensor which the bias term is multiplied by\nbefore adding it to `inputs`. Anything which works in the expression\n`bias * multiplier` is acceptable here. This may be useful if you want\nto add a bias in one place and subtract the same bias in another place\nvia `multiplier=-1`.\n\nReturns:\nA Tensor of size `[batch_size, input_size1, ...]`.\n\nRaises:\nbase.IncompatibleShapeError: If the input is not a >= 2D `Tensor`.\nbase.IncompatibleShapeError: If connecting the module into the graph\nany time after the first time, and the inferred size of the input does\nnot match previous invocations.\nbase.IncompatibleShapeError: If the `output_shape` has been specified\nbut it does not match the input_shape`.\nbase.ParentNotBuiltError: If the module is a transposed and the original\nuntransposed module has not been built.", "source": "codesearchnet"}
{"code": "def save_weights_to_hdf5_group(f, layers):\n    from tensorflow.python.keras import __version__ as keras_version\n    save_attributes_to_hdf5_group(f, 'layer_names', [layer.name.encode('utf8') for layer in layers])\n    f.attrs['backend'] = backend.backend().encode('utf8')\n    f.attrs['keras_version'] = str(keras_version).encode('utf8')\n    for layer in sorted(layers, key=lambda x: x.name):\n        g = f.create_group(layer.name)\n        weights = _legacy_weights(layer)\n        weight_values = backend.batch_get_value(weights)\n        weight_names = [w.name.encode('utf8') for w in weights]\n        save_attributes_to_hdf5_group(g, 'weight_names', weight_names)\n        for name, val in zip(weight_names, weight_values):\n            param_dset = g.create_dataset(name, val.shape, dtype=val.dtype)\n            if not val.shape:\n                param_dset[()] = val\n            else:\n                param_dset[:] = val", "docstring": "Saves the weights of a list of layers to a HDF5 group.\n\nArgs:\nf: HDF5 group.\nlayers: List of layer instances.", "source": "github-repos"}
{"code": "def result(self, timeout=None):\n        \n        self._blocking_poll(timeout=timeout)\n\n        if self._exception is not None:\n            \n            \n            raise self._exception\n\n        return self._result", "docstring": "Get the result of the operation, blocking if necessary.\n\nArgs:\ntimeout (int):\nHow long (in seconds) to wait for the operation to complete.\nIf None, wait indefinitely.\n\nReturns:\ngoogle.protobuf.Message: The Operation's result.\n\nRaises:\ngoogle.api_core.GoogleAPICallError: If the operation errors or if\nthe timeout is reached before the operation completes.", "source": "juraj-google-style"}
{"code": "def set(self, *args):\n        \n        assert len(args) in (1, 2)\n        if len(args) == 1:\n            value = args[0]\n            self._impl.set(value)\n        else:\n            index, value = args\n            if isinstance(value, Real):\n                self._impl.setTplDbl(Tuple(index)._impl, value)\n            elif isinstance(value, basestring):\n                self._impl.setTplStr(Tuple(index)._impl, value)\n            else:\n                raise TypeError", "docstring": "Set the value of a single instance of this parameter.\n\nArgs:\nargs: value if the parameter is scalar, index and value\notherwise.\n\nRaises:\nRuntimeError: If the entity has been deleted in the underlying\nAMPL.\n\nTypeError: If the parameter is not scalar and the index is not\nprovided.", "source": "juraj-google-style"}
{"code": "def load_exons(self, exons, genes=None, build='37'):\n        \n        genes = genes or self.ensembl_genes(build)\n        for exon in exons:\n            exon_obj = build_exon(exon, genes)\n            if not exon_obj:\n                continue\n            \n            res = self.exon_collection.insert_one(exon_obj)", "docstring": "Create exon objects and insert them into the database\n\nArgs:\nexons(iterable(dict))", "source": "juraj-google-style"}
{"code": "def get(self, personId):\n        \n        check_type(personId, basestring, may_be_none=False)\n\n        \n        json_data = self._session.get(API_ENDPOINT + '/' + personId)\n\n        \n        return self._object_factory(OBJECT_TYPE, json_data)", "docstring": "Get a person's details, by ID.\n\nArgs:\npersonId(basestring): The ID of the person to be retrieved.\n\nReturns:\nPerson: A Person object with the details of the requested person.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "juraj-google-style"}
{"code": "def _GenerateNames(name, fromlist, globals):\n\n    def GetCurrentPackage(globals):\n        'Finds the name of the package for the currently executing module.'\n        if (not globals):\n            return None\n        current = globals.get('__name__')\n        if (not current):\n            return None\n        current_file = globals.get('__file__')\n        if (not current_file):\n            return None\n        root = os.path.splitext(os.path.basename(current_file))[0]\n        if (root == '__init__'):\n            return current\n        else:\n            return current.rpartition('.')[0]\n    curpkg = GetCurrentPackage(globals)\n    names = set()\n    for from_entry in (fromlist or []):\n        entry = (((name + '.') + from_entry) if name else from_entry)\n        names.add(entry)\n        if curpkg:\n            names.add(((curpkg + '.') + entry))\n    while name:\n        names.add(name)\n        if curpkg:\n            names.add(((curpkg + '.') + name))\n        name = name.rpartition('.')[0]\n    return names", "docstring": "Generates the names of modules that might be loaded via this import.\n\nArgs:\nname: Argument as passed to the importer.\nfromlist: Argument as passed to the importer.\nglobals: Argument as passed to the importer.\n\nReturns:\nA set that contains the names of all modules that are loaded by the\ncurrently executing import statement, as they would show up in sys.modules.\nThe returned set may contain module names that were already loaded before\nthe execution of this import statement.\nThe returned set may contain names that are not real modules.", "source": "codesearchnet"}
{"code": "def _write_object_proto(self, proto, options):\n    write_object_proto_for_resource_variable(self, proto, options)", "docstring": "Writes additional information of the variable into the SavedObject proto.\n\nSubclasses of ResourceVariables could choose to override this method to\ncustomize extra information to provide when saving a SavedModel.\n\nIdeally, this should contain the logic in\nwrite_object_proto_for_resource_variable but `DistributedValue` is an\noutlier at the momemnt. Once `DistributedValue` becomes a proper\nResourceVariable, we should remove the helper method below.\n\nArgs:\nproto: `SavedObject` proto to update.\noptions: A `SaveOption` instance that configures save behavior.", "source": "github-repos"}
{"code": "def _set_median_session_metrics(session_group, aggregation_metric):\n  \n  measurements = sorted(_measurements(session_group, aggregation_metric),\n                        key=operator.attrgetter('metric_value.value'))\n  median_session = measurements[(len(measurements) - 1) \n  del session_group.metric_values[:]\n  session_group.metric_values.MergeFrom(\n      session_group.sessions[median_session].metric_values)", "docstring": "Sets the metrics for session_group to those of its \"median session\".\n\nThe median session is the session in session_group with the median value\nof the metric given by 'aggregation_metric'. The median is taken over the\nsubset of sessions in the group whose 'aggregation_metric' was measured\nat the largest training step among the sessions in the group.\n\nArgs:\nsession_group: A SessionGroup protobuffer.\naggregation_metric: A MetricName protobuffer.", "source": "juraj-google-style"}
{"code": "def init(self):\n    resp = self._execute(Command.NEW_SESSION, {'desiredCapabilities': self.desired_capabilities}, False)\n    resp.raise_for_status()\n    self.session_id = str(resp.session_id)\n    self.capabilities = resp.value", "docstring": "Create Session by desiredCapabilities\n\nSupport:\nAndroid iOS Web(WebView)\n\nReturns:\nWebDriver Object.", "source": "codesearchnet"}
{"code": "def round_to_nearest(dt, n_round_sec=1.0):\n    \n    ts = ts_from_dt(strip_timezone(dt)) + n_round_sec / 2.0\n    res = dt_from_ts(ts - (ts % n_round_sec))\n    return res.replace(tzinfo=dt.tzinfo)", "docstring": "Round datetime up or down to nearest divisor.\n\nRound datetime up or down to nearest number of seconds that divides evenly by\nthe divisor.\n\nAny timezone is preserved but ignored in the rounding.\n\nArgs:\ndt: datetime\n\nn_round_sec : int or float\nDivisor for rounding\n\nExamples:\n- ``n_round_sec`` = 0.1: nearest 10th of a second.\n- ``n_round_sec`` = 1: nearest second.\n- ``n_round_sec`` = 30: nearest half minute.", "source": "juraj-google-style"}
{"code": "def path_in_cache(self, filename, metahash):\n    cpath = self._genpath(filename, metahash)\n    if os.path.exists(cpath):\n        return cpath\n    else:\n        raise CacheMiss", "docstring": "Generates the path to a file in the mh cache.\n\nThe generated path does not imply the file's existence!\n\nArgs:\nfilename: Filename relative to buildroot\nrule: A targets.SomeBuildRule object\nmetahash: hash object", "source": "codesearchnet"}
{"code": "def get_entry(self, pathname_name):\n    pathname_name = self._normalized_entryname(pathname_name)\n    return self.contents[pathname_name]", "docstring": "Retrieves the specified child file or directory entry.\n\nArgs:\npathname_name: The basename of the child object to retrieve.\n\nReturns:\nThe fake file or directory object.\n\nRaises:\nKeyError: if no child exists by the specified name.", "source": "codesearchnet"}
{"code": "def check_get_splits(self, query, num_splits, num_entities):\n    for id_or_name in [True, False, None]:\n        if id_or_name is None:\n            client_entities = helper.create_client_entities(num_entities, False)\n            client_entities.extend(helper.create_client_entities(num_entities, True))\n            num_entities *= 2\n        else:\n            client_entities = helper.create_client_entities(num_entities, id_or_name)\n        mock_client = mock.MagicMock()\n        mock_client_query = mock.MagicMock()\n        mock_client_query.fetch.return_value = client_entities\n        with mock.patch.object(types.Query, '_to_client_query', return_value=mock_client_query):\n            split_queries = query_splitter.get_splits(mock_client, query, num_splits)\n        mock_client_query.fetch.assert_called_once()\n        expected_num_splits = min(num_splits, num_entities + 1)\n        self.assertEqual(len(split_queries), expected_num_splits)\n        prev_client_key = None\n        last_query_seen = False\n        for split_query in split_queries:\n            self.assertFalse(last_query_seen)\n            lt_key = None\n            gte_key = None\n            for _filter in split_query.filters:\n                self.assertEqual(query_splitter.KEY_PROPERTY_NAME, _filter[0])\n                if _filter[1] == '<':\n                    lt_key = _filter[2]\n                elif _filter[1] == '>=':\n                    gte_key = _filter[2]\n            if lt_key is None and gte_key is None:\n                self.assertEqual(1, len(split_queries))\n                break\n            if prev_client_key is None:\n                self.assertIsNone(gte_key)\n                self.assertIsNotNone(lt_key)\n                prev_client_key = lt_key\n            else:\n                self.assertEqual(prev_client_key, gte_key)\n                prev_client_key = lt_key\n                if lt_key is None:\n                    last_query_seen = True", "docstring": "A helper method to test the query_splitter get_splits method.\n\nArgs:\nquery: the query to be split\nnum_splits: number of splits\nnum_entities: number of scatter entities returned to the splitter.", "source": "github-repos"}
{"code": "def slice(array, start, size, ty):\n    \n    weld_obj = WeldObject(encoder_, decoder_)\n\n    array_var = weld_obj.update(array)\n    if isinstance(array, WeldObject):\n        array_var = array.obj_id\n        weld_obj.dependencies[array_var] = array\n\n    weld_template = \n    weld_obj.weld_code = weld_template % {\"array\": array_var, \"start\": start,\n                                          \"ty\": ty, \"size\": size}\n\n    return weld_obj", "docstring": "Returns a new array-of-arrays with each array truncated, starting at\nindex `start` for `length` characters.\n\nArgs:\narray (WeldObject / Numpy.ndarray): Input array\nstart (int): starting index\nsize (int): length to truncate at\nty (WeldType): Type of each element in the input array\n\nReturns:\nA WeldObject representing this computation", "source": "juraj-google-style"}
{"code": "def _validate_testbed_configs(testbed_configs):\n    \n    seen_names = set()\n    \n    for config in testbed_configs:\n        \n        \n        name = config[keys.Config.key_testbed_name.value]\n        _validate_testbed_name(name)\n        \n        if name in seen_names:\n            raise MoblyConfigError('Duplicate testbed name %s found.' % name)\n        seen_names.add(name)", "docstring": "Validates the testbed configurations.\n\nArgs:\ntestbed_configs: A list of testbed configuration dicts.\n\nRaises:\nMoblyConfigError: Some parts of the configuration is invalid.", "source": "juraj-google-style"}
{"code": "def cumulative_distribution(self, X):\n        \n        self.check_fit()\n\n        \n        def func(*args):\n            return self.probability_density(list(args))\n\n        \n        lower_bound = self.get_lower_bound()\n\n        ranges = [[lower_bound, val] for val in X]\n        return integrate.nquad(func, ranges)[0]", "docstring": "Computes the cumulative distribution function for the copula\n\nArgs:\nX: `numpy.ndarray` or `pandas.DataFrame`\n\nReturns:\nnp.array: cumulative probability", "source": "juraj-google-style"}
{"code": "def build_authorization_endpoint(self, request, disable_sso=None):\n    self.load_config()\n    redirect_to = request.GET.get(REDIRECT_FIELD_NAME, None)\n    if (not redirect_to):\n        redirect_to = django_settings.LOGIN_REDIRECT_URL\n    redirect_to = base64.urlsafe_b64encode(redirect_to.encode()).decode()\n    query = QueryDict(mutable=True)\n    query.update({'response_type': 'code', 'client_id': settings.CLIENT_ID, 'resource': settings.RELYING_PARTY_ID, 'redirect_uri': self.redirect_uri(request), 'state': redirect_to})\n    if (self._mode == 'openid_connect'):\n        query['scope'] = 'openid'\n        if (((disable_sso is None) and settings.DISABLE_SSO) or (disable_sso is True)):\n            query['prompt'] = 'login'\n    return '{0}?{1}'.format(self.authorization_endpoint, query.urlencode())", "docstring": "This function returns the ADFS authorization URL.\n\nArgs:\nrequest(django.http.request.HttpRequest): A django Request object\ndisable_sso(bool): Whether to disable single sign-on and force the ADFS server to show a login prompt.\n\nReturns:\nstr: The redirect URI", "source": "codesearchnet"}
{"code": "def interceptable(func):\n\n    @functools.wraps(func)\n    def func_wrapped(*args, **kwargs):\n        with get_next_interceptor() as interceptor:\n            return interceptor(func, *args, **kwargs)\n    return func_wrapped", "docstring": "Decorator that wraps `func` so that its execution is intercepted.\n\nThe wrapper passes `func` to the interceptor for the current thread.\n\nIf there is no next interceptor, we perform an \"immediate\" call to `func`.\nThat is, `func` terminates without forwarding its execution to another\ninterceptor.\n\nArgs:\nfunc: Function to wrap.\n\nReturns:\nThe decorated function.", "source": "codesearchnet"}
{"code": "def dirac_notation(state: Sequence, decimals: int=2) -> str:\n    perm_list = [''.join(seq) for seq in itertools.product('01', repeat=(int(len(state)).bit_length() - 1))]\n    components = []\n    ket = '|{}⟩'\n    for x in range(len(perm_list)):\n        format_str = (('({:.' + str(decimals)) + 'g})')\n        val = (round(state[x].real, decimals) + (1j * round(state[x].imag, decimals)))\n        if ((round(val.real, decimals) == 0) and (round(val.imag, decimals) != 0)):\n            val = val.imag\n            format_str = (('{:.' + str(decimals)) + 'g}j')\n        elif ((round(val.imag, decimals) == 0) and (round(val.real, decimals) != 0)):\n            val = val.real\n            format_str = (('{:.' + str(decimals)) + 'g}')\n        if (val != 0):\n            if (round(state[x], decimals) == 1):\n                components.append(ket.format(perm_list[x]))\n            else:\n                components.append((format_str + ket).format(val, perm_list[x]))\n    if (not components):\n        return '0'\n    return ' + '.join(components).replace(' + -', ' - ')", "docstring": "Returns the wavefunction as a string in Dirac notation.\n\nFor example:\n\nstate = np.array([1/np.sqrt(2), 1/np.sqrt(2)], dtype=np.complex64)\nprint(dirac_notation(state)) -> 0.71|0⟩ + 0.71|1⟩\n\nArgs:\nstate: A sequence representing a wave function in which the ordering\nmapping to qubits follows the standard Kronecker convention of\nnumpy.kron.\ndecimals: How many decimals to include in the pretty print.\n\nReturns:\nA pretty string consisting of a sum of computational basis kets\nand non-zero floats of the specified accuracy.", "source": "codesearchnet"}
{"code": "def _recreate(self, proto, node_id, nodes):\n    registered_class = registration.get_registered_class(proto.registered_name)\n    if registered_class is None:\n        registered_class = _BUILT_IN_REGISTRATIONS.get(proto.WhichOneof('kind'))\n    dependencies = {}\n    for key, dep_node_id in self._get_node_dependencies(proto).items():\n        dependencies[key] = nodes[dep_node_id]\n    if registered_class:\n        obj = registered_class._deserialize_from_proto(proto=proto.serialized_user_proto, object_proto=proto, dependencies=dependencies, export_dir=self._export_dir, asset_file_def=self._asset_file_def, operation_attributes=self._operation_attributes)\n        if isinstance(obj, base.Trackable):\n            setter = type(obj)._add_trackable_child\n        else:\n            setter = setattr\n        return (obj, setter)\n    else:\n        return self._recreate_default(proto, node_id, dependencies)", "docstring": "Creates a Python object from a SavedObject protocol buffer.\n\nArgs:\nproto: a SavedObject proto\nnode_id: int, the index of this object in the SavedObjectGraph node list.\nnodes: dict mapping int node_ids -> created objects.\n\nReturns:\nThe recreated object, and the set-attribute function for reconnecting\nthe trackable children.", "source": "github-repos"}
{"code": "def import_module(self, module=None, recursive=False, **params):\n        \n        if module is None:\n            if \"module_\" in params:\n                warnings.warn(\n                    \"Parameter 'module_' is deprecated. Use 'module' instead.\")\n                module = params.pop(\"module_\")\n            else:\n                raise ValueError(\"no module specified\")\n\n        if \"bases\" in params:\n            params[\"bases\"] = get_impls(params[\"bases\"])\n\n        space = (\n            self._impl.model.currentspace\n        ) = self._impl.new_space_from_module(\n            module, recursive=recursive, **params\n        )\n        return get_interfaces(space)", "docstring": "Create a child space from an module.\n\nArgs:\nmodule: a module object or name of the module object.\nrecursive: Not yet implemented.\n**params: arguments to pass to ``new_space``\n\nReturns:\nThe new child space created from the module.", "source": "juraj-google-style"}
{"code": "def transform_data(input_handle, outfile_prefix, working_dir, schema_file, transform_dir=None, max_rows=None, pipeline_args=None, publish_to_bq=False, project=None, metrics_table=None, metrics_dataset=None):\n\n    def preprocessing_fn(inputs):\n        \n        outputs = {}\n        for key in taxi.DENSE_FLOAT_FEATURE_KEYS:\n            outputs[taxi.transformed_name(key)] = transform.scale_to_z_score(_fill_in_missing(inputs[key]))\n        for key in taxi.VOCAB_FEATURE_KEYS:\n            outputs[taxi.transformed_name(key)] = transform.compute_and_apply_vocabulary(_fill_in_missing(inputs[key]), top_k=taxi.VOCAB_SIZE, num_oov_buckets=taxi.OOV_SIZE)\n        for key in taxi.BUCKET_FEATURE_KEYS:\n            outputs[taxi.transformed_name(key)] = transform.bucketize(_fill_in_missing(inputs[key]), taxi.FEATURE_BUCKET_COUNT)\n        for key in taxi.CATEGORICAL_FEATURE_KEYS:\n            outputs[taxi.transformed_name(key)] = _fill_in_missing(inputs[key])\n        taxi_fare = _fill_in_missing(inputs[taxi.FARE_KEY])\n        tips = _fill_in_missing(inputs[taxi.LABEL_KEY])\n        outputs[taxi.transformed_name(taxi.LABEL_KEY)] = tf.where(tf.is_nan(taxi_fare), tf.cast(tf.zeros_like(taxi_fare), tf.int64), tf.cast(tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))\n        return outputs\n    namespace = metrics_table\n    metrics_monitor = None\n    if publish_to_bq:\n        metrics_monitor = MetricsReader(publish_to_bq=publish_to_bq, project_name=project, bq_table=metrics_table, bq_dataset=metrics_dataset, namespace=namespace, filters=MetricsFilter().with_namespace(namespace))\n    schema = taxi.read_schema(schema_file)\n    raw_feature_spec = taxi.get_raw_feature_spec(schema)\n    raw_schema = schema_utils.schema_from_feature_spec(raw_feature_spec)\n    raw_data_metadata = dataset_metadata.DatasetMetadata(raw_schema)\n    pipeline = beam.Pipeline(argv=pipeline_args)\n    with tft_beam.Context(temp_dir=working_dir):\n        query = taxi.make_sql(input_handle, max_rows, for_eval=False)\n        raw_data = pipeline | 'ReadBigQuery' >> ReadFromBigQuery(query=query, project=project, use_standard_sql=True) | 'Measure time: start' >> beam.ParDo(MeasureTime(namespace))\n        decode_transform = beam.Map(taxi.clean_raw_data_dict, raw_feature_spec=raw_feature_spec)\n        if transform_dir is None:\n            decoded_data = raw_data | 'DecodeForAnalyze' >> decode_transform\n            transform_fn = (decoded_data, raw_data_metadata) | 'Analyze' >> tft_beam.AnalyzeDataset(preprocessing_fn)\n            _ = transform_fn | 'WriteTransformFn' >> tft_beam.WriteTransformFn(working_dir)\n        else:\n            transform_fn = pipeline | tft_beam.ReadTransformFn(transform_dir)\n        shuffled_data = raw_data | 'RandomizeData' >> beam.transforms.Reshuffle()\n        decoded_data = shuffled_data | 'DecodeForTransform' >> decode_transform\n        transformed_data, transformed_metadata = ((decoded_data, raw_data_metadata), transform_fn) | 'Transform' >> tft_beam.TransformDataset()\n        coder = example_proto_coder.ExampleProtoCoder(transformed_metadata.schema)\n        _ = transformed_data | 'SerializeExamples' >> beam.Map(coder.encode) | 'Measure time: end' >> beam.ParDo(MeasureTime(namespace)) | 'WriteExamples' >> beam.io.WriteToTFRecord(os.path.join(working_dir, outfile_prefix), file_name_suffix='.gz')\n    result = pipeline.run()\n    result.wait_until_finish()\n    if metrics_monitor:\n        metrics_monitor.publish_metrics(result)", "docstring": "The main tf.transform method which analyzes and transforms data.\n\nArgs:\ninput_handle: BigQuery table name to process specified as DATASET.TABLE or\npath to csv file with input data.\noutfile_prefix: Filename prefix for emitted transformed examples\nworking_dir: Directory in which transformed examples and transform function\nwill be emitted.\nschema_file: An file path that contains a text-serialized TensorFlow\nmetadata schema of the input data.\ntransform_dir: Directory in which the transform output is located. If\nprovided, this will load the transform_fn from disk instead of computing\nit over the data. Hint: this is useful for transforming eval data.\nmax_rows: Number of rows to query from BigQuery\npipeline_args: additional DataflowRunner or DirectRunner args passed to the\nbeam pipeline.", "source": "github-repos"}
{"code": "def main(raw_args=None):\n    multifile_choices = frozenset(['c_files'])\n    if (raw_args is None):\n        raw_args = sys.argv[1:]\n    parser = build_parser()\n    args = parser.parse_args(raw_args)\n    if ((args.output is None) and (args.format in multifile_choices)):\n        print(('You must specify an output file with -o, --output when using a format that produces multiple files (-f %s)' % args.format))\n        return 1\n    desc = TBDescriptor(args.bus_definition)\n    if (args.format == 'json'):\n        print('JSON output is not yet supported')\n        return 1\n    block = desc.get_block()\n    template_map = {'command_map_c': 'command_map_c.c.tpl', 'command_map_h': 'command_map_c.h.tpl', 'config_map_c': 'config_variables_c.c.tpl', 'config_map_h': 'config_variables_c.h.tpl'}\n    template_name = template_map.get(args.format)\n    data = block.render_template(template_name)\n    print(data)\n    return 0", "docstring": "Run the iotile-tbcompile script.\n\nArgs:\nraw_args (list): Optional list of command line arguments.  If not\npassed these are pulled from sys.argv.", "source": "codesearchnet"}
{"code": "def __init__(self, iterable):\n        \n        if not is_iterable(iterable):\n            raise TypeError(\"Cannot construct Queryable from non-iterable {0}\"\n                            .format(str(type(iterable))[7: -2]))\n\n        self._iterable = iterable", "docstring": "Construct a Queryable from any iterable.\n\nArgs:\niterable: Any object supporting the iterator protocol.\n\nRaises:\nTypeError: if iterable does not support the iterator protocol.", "source": "juraj-google-style"}
{"code": "def binary_mask_to_rle(mask):\n    if is_torch_tensor(mask):\n        mask = mask.numpy()\n    pixels = mask.flatten()\n    pixels = np.concatenate([[0], pixels, [0]])\n    runs = np.where(pixels[1:] != pixels[:-1])[0] + 1\n    runs[1::2] -= runs[::2]\n    return list(runs)", "docstring": "Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format.\n\nArgs:\nmask (`torch.Tensor` or `numpy.array`):\nA binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target\nsegment_id or class_id.\nReturns:\n`List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE\nformat.", "source": "github-repos"}
{"code": "def parse(self, filename):\n    filehandle = storage.open_vos_or_local(filename, 'rb')\n    assert (filehandle is not None), 'Failed to open file {} '.format(filename)\n    filestr = filehandle.read()\n    filehandle.close()\n    assert (filestr is not None), 'File contents are None'\n    observations = self._parse_observation_list(filestr)\n    self._parse_observation_headers(filestr, observations)\n    sys_header = self._parse_system_header(filestr)\n    sources = self._parse_source_data(filestr, observations)\n    return AstromData(observations, sys_header, sources, discovery_only=self.discovery_only)", "docstring": "Parses a file into an AstromData structure.\n\nArgs:\nfilename: str\nThe name of the file whose contents will be parsed.\n\nReturns:\ndata: AstromData\nThe file contents extracted into a data structure for programmatic\naccess.", "source": "codesearchnet"}
{"code": "def poll_error(self):\n    if self.block:\n        return self.error\n    new_list = self.error[self.old_error_size:]\n    self.old_error_size += len(new_list)\n    return new_list", "docstring": "Append lines from stderr to self.errors.\n\nReturns:\nlist: The lines added since last call", "source": "codesearchnet"}
{"code": "def create_run_config(hp, output_dir=None):\n  \n  save_ckpt_steps = max(FLAGS.iterations_per_loop, FLAGS.local_eval_frequency)\n  save_ckpt_secs = FLAGS.save_checkpoints_secs or None\n  if save_ckpt_secs:\n    save_ckpt_steps = None\n  assert FLAGS.output_dir or FLAGS.checkpoint_path\n  tpu_config_extra_kwargs = {}\n  if FLAGS.tpu_job_name is not None:\n    tpu_config_extra_kwargs[\"tpu_job_name\"] = FLAGS.tpu_job_name\n\n  if getattr(hp, \"mtf_mode\", False):\n    save_ckpt_steps = None  \n    save_ckpt_secs = None  \n    tpu_config_extra_kwargs = {\n        \"num_cores_per_replica\": 1,\n        \"per_host_input_for_training\": tpu_config.InputPipelineConfig.BROADCAST,\n    }\n\n  \n  \n  daisy_chain_variables = (\n      hp.daisy_chain_variables and\n      hp.activation_dtype == \"float32\" and\n      hp.weight_dtype == \"float32\")\n  return trainer_lib.create_run_config(\n      model_name=FLAGS.model,\n      model_dir=output_dir or os.path.expanduser(FLAGS.output_dir),\n      master=FLAGS.master,\n      iterations_per_loop=FLAGS.iterations_per_loop,\n      num_shards=FLAGS.tpu_num_shards,\n      log_device_placement=FLAGS.log_device_placement,\n      save_checkpoints_steps=save_ckpt_steps,\n      save_checkpoints_secs=save_ckpt_secs,\n      keep_checkpoint_max=FLAGS.keep_checkpoint_max,\n      keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours,\n      num_gpus=FLAGS.worker_gpu,\n      gpu_order=FLAGS.gpu_order,\n      num_async_replicas=FLAGS.worker_replicas,\n      gpu_mem_fraction=FLAGS.worker_gpu_memory_fraction,\n      enable_graph_rewriter=FLAGS.enable_graph_rewriter,\n      use_tpu=FLAGS.use_tpu,\n      use_tpu_estimator=FLAGS.use_tpu_estimator,\n      xla_jit_level=FLAGS.xla_jit_level,\n      schedule=FLAGS.schedule,\n      no_data_parallelism=hp.no_data_parallelism,\n      optionally_use_dist_strat=FLAGS.optionally_use_dist_strat,\n      daisy_chain_variables=daisy_chain_variables,\n      ps_replicas=FLAGS.ps_replicas,\n      ps_job=FLAGS.ps_job,\n      ps_gpu=FLAGS.ps_gpu,\n      sync=FLAGS.sync,\n      worker_id=FLAGS.worker_id,\n      worker_job=FLAGS.worker_job,\n      random_seed=FLAGS.random_seed,\n      tpu_infeed_sleep_secs=FLAGS.tpu_infeed_sleep_secs,\n      inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,\n      log_step_count_steps=FLAGS.log_step_count_steps,\n      intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,\n      tpu_config_extra_kwargs=tpu_config_extra_kwargs,\n      cloud_tpu_name=FLAGS.cloud_tpu_name)", "docstring": "Create a run config.\n\nArgs:\nhp: model hyperparameters\noutput_dir: model's output directory, defaults to output_dir flag.\n\nReturns:\na run config", "source": "juraj-google-style"}
{"code": "def get_operation(self, name, options=None):\n    request = operations_pb2.GetOperationRequest(name=name)\n    return self._get_operation(request, options)", "docstring": "Gets the latest state of a long-running operation.  Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.\n\nExample:\n>>> from google.gapic.longrunning import operations_client\n>>> api = operations_client.OperationsClient()\n>>> name = ''\n>>> response = api.get_operation(name)\n\nArgs:\nname (string): The name of the operation resource.\noptions (:class:`google.gax.CallOptions`): Overrides the default\nsettings for this call, e.g, timeout, retries etc.\n\nReturns:\nA :class:`google.longrunning.operations_pb2.Operation` instance.\n\nRaises:\n:exc:`google.gax.errors.GaxError` if the RPC is aborted.\n:exc:`ValueError` if the parameters are invalid.", "source": "codesearchnet"}
{"code": "def _parse_address(self, config):\n        \n        match = re.search(r'ip address ([^\\s]+)', config)\n        value = match.group(1) if match else None\n        return dict(address=value)", "docstring": "Parses the config block and returns the ip address value\n\nThe provided configuration block is scaned and the configured value\nfor the IP address is returned as a dict object.  If the IP address\nvalue is not configured, then None is returned for the value\n\nArgs:\nconfig (str): The interface configuration block to parse\n\nReturn:\ndict: A dict object intended to be merged into the resource dict", "source": "juraj-google-style"}
{"code": "def getsource(classorfunc):\n    \n    if _isbuiltin(classorfunc):\n        return ''\n\n    try:\n        source = inspect.getsource(classorfunc)\n    except TypeError:  \n        source = getsourcefallback(classorfunc)\n\n    declaration = []\n\n    lines = source.splitlines()\n    if PY2 and not isinstance(source, unicode):\n        encoding = detect_encoding(iter(lines).next)[0]\n        sourcelines = (s.decode(encoding) for s in lines)\n    else:\n        sourcelines = iter(lines)\n\n    \n    found_keyword = False\n    for line in sourcelines:\n        words = line.split()\n        if not words:\n            continue\n        if words[0] in ('def', 'class'):\n            found_keyword = True\n        if found_keyword:\n            cind = line.find(':')\n            if cind > 0:\n                declaration.append(line[:cind + 1])\n                after_decl = line[cind + 1:].strip()\n                break\n            else:\n                declaration.append(line)\n\n    bodylines = list(sourcelines)  \n\n    \n    \n    \n    \n    if type(classorfunc) == type:\n        cls = classorfunc\n        base_imports = {}\n        for base in cls.__bases__:\n            if base.__name__ == 'object' and base.__module__ == 'builtins':  \n                continue\n            if base in base_imports:\n                continue\n            if base.__module__ == '__main__':\n                continue\n            base_imports[base] = 'from %s import %s' % (base.__module__, base.__name__)\n        cind = declaration[0].index('class ')\n\n        declstring = declaration[0][:cind] + 'class %s(%s):%s' % (\n            cls.__name__,\n            ','.join([base.__name__ for base in cls.__bases__]),\n            after_decl)\n        declaration = [impstring for c, impstring in base_imports.items()\n                       if c.__module__ != '__builtin__']\n        declaration.append(declstring)\n\n    else:\n        declaration[-1] += after_decl\n\n    return '\\n'.join(declaration + bodylines)", "docstring": "Return the source code for a class or function.\n\nNotes:\nReturned source will not include any decorators for the object.\nThis will only return the explicit declaration of the object, not any dependencies\n\nArgs:\nclassorfunc (type or function): the object to get the source code for\n\nReturns:\nstr: text of source code (without any decorators). Note: in python 2, this returns unicode", "source": "juraj-google-style"}
{"code": "def __call__(self, shape, dtype, axis=0):\n    raise NotImplementedError", "docstring": "Partitions the given `shape` and returns the partition results.\n\nExamples of a partitioner that allocates a fixed number of shards:\n\n```python\npartitioner = FixedShardsPartitioner(num_shards=2)\npartitions = partitioner(tf.TensorShape([10, 3], tf.float32), axis=0)\nprint(partitions) # [2, 0]\n```\n\nArgs:\nshape: a `tf.TensorShape`, the shape to partition.\ndtype: a `tf.dtypes.Dtype` indicating the type of the partition value.\naxis: The axis to partition along.  Default: outermost axis.\n\nReturns:\nA list of integers representing the number of partitions on each axis,\nwhere i-th value corresponds to i-th axis.", "source": "github-repos"}
{"code": "def _unknown_args(self, args):\n    for u in args:\n        self.tcex.log.warning(u'Unsupported arg found ({}).'.format(u))", "docstring": "Log argparser unknown arguments.\n\nArgs:\nargs (list): List of unknown arguments", "source": "codesearchnet"}
{"code": "def extract_table(tabletag):\n    \n    \n    theadtag = tabletag.find_next('thead')\n\n    headertags = theadtag.find_all('th')\n    if len(headertags) == 0:\n        headertags = theadtag.find_all('td')\n    headers = []\n    for tag in headertags:\n        headers.append(get_text(tag))\n\n    tbodytag = tabletag.find_next('tbody')\n    trtags = tbodytag.find_all('tr')\n\n    table = list()\n    for trtag in trtags:\n        row = dict()\n        tdtags = trtag.find_all('td')\n        for i, tag in enumerate(tdtags):\n            row[headers[i]] = get_text(tag)\n        table.append(row)\n    return table", "docstring": "Extract HTML table as list of dictionaries\n\nArgs:\ntabletag (Tag): BeautifulSoup tag\n\nReturns:\nstr: Text of tag stripped of leading and trailing whitespace and newlines and with &nbsp replaced with space", "source": "juraj-google-style"}
{"code": "def get_sharded_shape(self, shape, shard_index=None):\n    if self._shard_dimension is None or self._number_of_shards is None:\n        return None\n    if shard_index is not None:\n        if shard_index < 0 or shard_index >= self.number_of_shards:\n            raise ValueError(f'Requested shard_index {shard_index}, but shard_index must be in [0,{self._number_of_shards}).')\n    shape = tensor_shape.as_shape(shape)\n    if self._number_of_shards == 1:\n        return shape\n    ndims = shape.ndims\n    if ndims is None:\n        raise ValueError(f'Shape {shape} must be a known shape.')\n    if ndims <= self._shard_dimension:\n        raise ValueError(f'Shape {shape.as_list()} does not contain shard_dimension {self._shard_dimension}')\n    dims = shape.as_list()\n    if dims[self._shard_dimension] is None:\n        raise ValueError(f'Shape {shape.as_list()} must have a fixed size for dimension {self._shard_dimension} that is known at construction time.')\n    if dims[self._shard_dimension] % self._number_of_shards != 0:\n        raise ValueError(f'Shape {shape.as_list()} cannot be sharded {self._number_of_shards} ways along dimension {self._shard_dimension}')\n    dims[self._shard_dimension] \n    return tensor_shape.TensorShape(dims)", "docstring": "Returns the shape of a shard of a full Tensor.\n\nWhen given the shape of a 'full-size' Tensor, returns the shape of\nthe sub-Tensor after it has been sharded. Freezes the policy if it\nhas not yet been frozen.\n\nArgs:\nshape: The shape of the full-size Tensor to be sharded.\nshard_index: The index of the shard whose shape should be returned.\nshard_index can be None for sharding policies that use the same shape\nfor every shard.\n\nReturns:\nThe shape of the sharded version of the Tensor.\n\nRaises:\nValueError: If shard_index is None when shards are of different\nshapes; or shard_index is not None and\n!(0<=shard_index<number_of_shards); or shape does not have at\nleast self.shard_dimension+1 dimensions; or the value of\nshape's shard dimension is not a multiple of\nself.number_of_shards", "source": "github-repos"}
{"code": "def add(self, data, conn_type, squash=True):\n    if (data in self.children):\n        return data\n    if (not squash):\n        self.children.append(data)\n        return data\n    if (self.connector == conn_type):\n        if (isinstance(data, QBase) and (not data.negated) and ((data.connector == conn_type) or (len(data) == 1))):\n            self.children.extend(data.children)\n            return self\n        else:\n            self.children.append(data)\n            return data\n    else:\n        obj = self._new_instance(self.children, self.connector, self.negated)\n        self.connector = conn_type\n        self.children = [obj, data]\n        return data", "docstring": "Combine this tree and the data represented by data using the\nconnector conn_type. The combine is done by squashing the node other\naway if possible.\n\nThis tree (self) will never be pushed to a child node of the\ncombined tree, nor will the connector or negated properties change.\n\nReturn a node which can be used in place of data regardless if the\nnode other got squashed or not.\n\nIf `squash` is False the data is prepared and added as a child to\nthis tree without further logic.\n\nArgs:\nconn_type (str, optional [\"AND\", \"OR\"]): connection method", "source": "codesearchnet"}
{"code": "def adversary(self, name, owner=None, **kwargs):\n        \n        return Adversary(self.tcex, name, owner=owner, **kwargs)", "docstring": "Create the Adversary TI object.\n\nArgs:\nowner:\nname:\n**kwargs:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def slh_associate(a_features, b_features, max_sigma=5):\n    proximity = _weighted_proximity(a_features, b_features)\n    association_matrix = _proximity_to_association(proximity)\n    associations = []\n    if (association_matrix.shape[0] == 0):\n        return np.zeros((0, 2))\n    col_max_idxs = np.argmax(association_matrix, axis=0)\n    prox_threshold = np.exp((((- 0.5) * max_sigma) * max_sigma))\n    for (row_idx, row) in enumerate(association_matrix):\n        if (row.shape[0] == 0):\n            continue\n        col_idx = np.argmax(row)\n        if (col_max_idxs[col_idx] == row_idx):\n            prox = proximity[(row_idx, col_idx)]\n            if (prox > prox_threshold):\n                associations.append((row_idx, col_idx))\n    if (len(associations) == 0):\n        return np.zeros((0, 2))\n    return np.vstack(associations)", "docstring": "An implementation of the Scott and Longuet-Higgins algorithm for feature\nassociation.\n\nThis function takes two lists of features. Each feature is a\n:py:class:`MultivariateNormal` instance representing a feature\nlocation and its associated uncertainty.\n\nArgs:\na_features (list of MultivariateNormal)\nb_features (list of MultivariateNormal)\nmax_sigma (float or int): maximum number of standard deviations two\nfeatures can be separated and still considered \"associated\".\n\nReturns:\n(array): A Nx2 array of feature associations. Column 0 is the index into\nthe a_features list, column 1 is the index into the b_features list.", "source": "codesearchnet"}
{"code": "async def warn_user(channel, user):\n    \n\n    data = datatools.get_data()\n    server_id = channel.server.id\n\n    if \"warnings_max\" not in data[\"discord\"][\"servers\"][server_id][_data.modulename]:\n        data[\"discord\"][\"servers\"][server_id][_data.modulename][\"warnings_max\"] = 3\n    if \"warnings\" not in data[\"discord\"][\"servers\"][server_id][_data.modulename]:\n        data[\"discord\"][\"servers\"][server_id][_data.modulename][\"warnings\"] = {}\n\n    if user.id in data[\"discord\"][\"servers\"][server_id][_data.modulename][\"warnings\"]:\n        data[\"discord\"][\"servers\"][server_id][_data.modulename][\"warnings\"][user.id] += 1\n    else:\n        data[\"discord\"][\"servers\"][server_id][_data.modulename][\"warnings\"][user.id] = 1\n\n    datatools.write_data(data)\n\n    warnings = data[\"discord\"][\"servers\"][server_id][_data.modulename][\"warnings\"][user.id]\n    max_warnings = data[\"discord\"][\"servers\"][server_id][_data.modulename][\"warnings_max\"]\n\n    await client.send_typing(channel)\n    embed = ui_embed.user_warning(channel, user, warnings, max_warnings)\n    await embed.send()\n\n    if warnings >= max_warnings:\n        await ban_user(channel, user)", "docstring": "Gives a user a warning, and bans them if they are over the maximum warnings\n\nArgs:\nchannel: The channel to send the warning message in\nuser: The user to give the warning to", "source": "juraj-google-style"}
{"code": "def broadcast_implementation(self, tensor, destinations):\n    return simple_broadcast(tensor, destinations, always_mirrored=True, canonicalize_devices=self._canonicalize_devices)", "docstring": "Implementation of `broadcast`.\n\nArgs:\ntensor: a `tf.Tensor` like object. The value to broadcast.\ndestinations: a `tf.distribute.DistributedValues`, a `tf.Variable`, a\n`tf.Tensor` alike object, or a device string. It specifies the devices\nto broadcast to.\n`destinations`. Note that if it's a `tf.Variable`, the value is\nbroadcasted to the devices of that variable, this method doesn't update\nthe variable.\n\nReturns:\nA `tf.Tensor` or `tf.distribute.DistributedValues`.", "source": "github-repos"}
{"code": "def get_version():\n    sys.modules['setup_helpers'] = object()\n    sys.modules['setup_helpers_macos'] = object()\n    sys.modules['setup_helpers_windows'] = object()\n    filename = os.path.join(_ROOT_DIR, 'setup.py')\n    loader = importlib.machinery.SourceFileLoader('setup', filename)\n    setup_mod = loader.load_module()\n    return setup_mod.VERSION", "docstring": "Get the current version from ``setup.py``.\n\nAssumes that importing ``setup.py`` will have no side-effects (i.e.\nassumes the behavior is guarded by ``if __name__ == \"__main__\"``).\n\nReturns:\nstr: The current version in ``setup.py``.", "source": "codesearchnet"}
{"code": "def WriteGraphExecutionTrace(self, graph_execution_trace):\n    debug_event = debug_event_pb2.DebugEvent(graph_execution_trace=graph_execution_trace)\n    self._EnsureTimestampAdded(debug_event)\n    _pywrap_debug_events_writer.WriteGraphExecutionTrace(self._dump_root, debug_event)", "docstring": "Write a GraphExecutionTrace proto with the writer.\n\nArgs:\ngraph_execution_trace: A GraphExecutionTrace proto, concerning the value\nof an intermediate tensor or a list of intermediate tensors that are\ncomputed during the graph's execution.", "source": "github-repos"}
{"code": "def _EscapeGlobCharacters(path):\n  \n  drive, path = os.path.splitdrive(path)\n  return '%s%s' % (drive, _ESCAPE_GLOB_CHARACTERS_REGEX.sub(r'[\\1]', path))", "docstring": "Escapes the glob characters in a path.\n\nPython 3 has a glob.escape method, but python 2 lacks it, so we manually\nimplement this method.\n\nArgs:\npath: The absolute path to escape.\n\nReturns:\nThe escaped path string.", "source": "juraj-google-style"}
{"code": "def merge_translations(localization_bundle_path):\n    \n    logging.info(\"Merging translations\")\n    for lang_dir in os.listdir(localization_bundle_path):\n        if lang_dir == DEFAULT_LANGUAGE_DIRECTORY_NAME:\n            continue\n        for translated_path in glob.glob(os.path.join(localization_bundle_path, lang_dir, \"*\" + TRANSLATED_SUFFIX)):\n            strings_path = translated_path[:-1 * len(TRANSLATED_SUFFIX)]\n            localizable_path = os.path.join(localization_bundle_path,\n                                            DEFAULT_LANGUAGE_DIRECTORY_NAME,\n                                            os.path.basename(strings_path))\n\n            localization_merge_back(localizable_path, strings_path, translated_path, strings_path)", "docstring": "Merges the new translation with the old one.\n\nThe translated files are saved as '.translated' file, and are merged with old translated file.\n\nArgs:\nlocalization_bundle_path (str): The path to the localization bundle.", "source": "juraj-google-style"}
{"code": "def prepend(self, node):\n    if (not isinstance(node, grammar.STATEMENTS)):\n        raise ValueError\n    self.to_prepend[(- 1)].appendleft(node)", "docstring": "Prepend a statement to the current statement.\n\nNote that multiple calls to prepend will result in the last statement to be\nprepended to end up at the top.\n\nArgs:\nnode: The statement to prepend.\n\nRaises:\nValueError: If the given node is not a statement.", "source": "codesearchnet"}
{"code": "class Idefics3Encoder(nn.Module):\n\n    def __init__(self, config: Idefics3Config):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([Idefics3EncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for encoder_layer in self.layers:\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, output_attentions)\n            else:\n                layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`Idefics3EncoderLayer`].\n\nArgs:\nconfig: Idefics3Config", "source": "github-repos"}
{"code": "def from_dict(cls, data):\n    try:\n        fulfillment = _fulfillment_from_details(data['condition']['details'])\n    except KeyError:\n        fulfillment = data['condition']['uri']\n    try:\n        amount = int(data['amount'])\n    except ValueError:\n        raise AmountError(('Invalid amount: %s' % data['amount']))\n    return cls(fulfillment, data['public_keys'], amount)", "docstring": "Transforms a Python dictionary to an Output object.\n\nNote:\nTo pass a serialization cycle multiple times, a\nCryptoconditions Fulfillment needs to be present in the\npassed-in dictionary, as Condition URIs are not serializable\nanymore.\n\nArgs:\ndata (dict): The dict to be transformed.\n\nReturns:\n:class:`~bigchaindb.common.transaction.Output`", "source": "codesearchnet"}
{"code": "def _call_post_with_session(self, url, payload):\n    now = datetime.datetime.utcnow()\n    if (now >= self.expires_at):\n        self.session.close()\n        self._create_session()\n    response = self.session.post(url, data=payload)\n    return (response.status_code, response.text)", "docstring": "Make a post request using the session object to a SuccessFactors endpoint.\n\nArgs:\nurl (str): The url to post to.\npayload (str): The json encoded payload to post.", "source": "codesearchnet"}
{"code": "def copy_script(self, filename, id_=(- 1)):\n    if (('jss' in self.connection.keys()) and self.connection['jss'].jss_migrated):\n        self._copy_script_migrated(filename, id_, SCRIPT_FILE_TYPE)\n    else:\n        basename = os.path.basename(filename)\n        self._copy(filename, os.path.join(self.connection['mount_point'], 'Scripts', basename))", "docstring": "Copy a script to the repo's Script subdirectory.\n\nScripts are copied as files to a path, or, on a \"migrated\" JSS,\nare POSTed to the JSS (pass an id if you wish to associate\nthe script with an existing Script object).\n\nArgs:\nfilename: Path for file to copy.\nid_: Int ID, used _only_ for migrated repos. Default is -1,\nwhich creates a new Script.", "source": "codesearchnet"}
{"code": "def is_single_tree(data_wrapper):\n    db = data_wrapper.data_block\n    bad_ids = db[(db[(:, COLS.P)] == (- 1))][(1:, COLS.ID)]\n    return CheckResult((len(bad_ids) == 0), bad_ids.tolist())", "docstring": "Check that data forms a single tree\n\nOnly the first point has ID of -1.\n\nReturns:\nCheckResult with result and list of IDs\n\nNote:\nThis assumes no_missing_parents passed.", "source": "codesearchnet"}
{"code": "def extract_ranges(index_list, range_size_limit=32):\n    if (not index_list):\n        return ([], [])\n    first = index_list[0]\n    last = first\n    ranges = []\n    singles = []\n    for i in index_list[1:]:\n        if ((i == (last + 1)) and ((last - first) <= range_size_limit)):\n            last = i\n        else:\n            if (last > first):\n                ranges.append([first, last])\n            else:\n                singles.append(first)\n            first = i\n            last = i\n    if (last > first):\n        ranges.append([first, last])\n    else:\n        singles.append(first)\n    return (ranges, singles)", "docstring": "Extract consecutive ranges and singles from index_list.\n\nArgs:\nindex_list: List of monotone increasing non-negative integers.\nrange_size_limit: Largest size range to return.  If a larger\nconsecutive range exists it will be returned as multiple\nranges.\n\nReturns:\nranges, singles where ranges is a list of [first, last] pairs of\nconsecutive elements in index_list, and singles is all of the\nother elements, in original order.", "source": "codesearchnet"}
{"code": "def __init__(self, logger=None, timeout=60):\n    \n    self.etag = 0\n    self.logger = logger or logging\n    self.timeout = timeout", "docstring": "Constructor.\n\nArgs:\nlogger: logger object, used to write to SysLog and serial port.\ntimeout: int, timeout in seconds for metadata requests.", "source": "juraj-google-style"}
{"code": "def sget_voltage(self, cycle, step, set_number=None):\n        \n\n        time_00 = time.time()\n        set_number = self._validate_dataset_number(set_number)\n        if set_number is None:\n            self._report_empty_dataset()\n            return\n        cycle_index_header = self.headers_normal.cycle_index_txt\n        voltage_header = self.headers_normal.voltage_txt\n        step_index_header = self.headers_normal.step_index_txt\n        test = self.datasets[set_number].dfdata\n\n        if isinstance(step, (list, tuple)):\n            warnings.warn(f\"The varialbe step is a list.\"\n                          f\"Should be an integer.\"\n                          f\"{step}\")\n            step = step[0]\n\n        c = test[(test[cycle_index_header] == cycle) &\n                 (test[step_index_header] == step)]\n\n        self.logger.debug(f\"(dt: {(time.time() - time_00):4.2f}s)\")\n        if not self.is_empty(c):\n            v = c[voltage_header]\n            return v\n        else:\n            return None", "docstring": "Returns voltage for cycle, step.\n\nConvinience function; same as issuing\ndfdata[(dfdata[cycle_index_header] == cycle) &\n(dfdata[step_index_header] == step)][voltage_header]\n\nArgs:\ncycle: cycle number\nstep: step number\nset_number: the dataset number (automatic selection if None)\n\nReturns:\npandas.Series or None if empty", "source": "juraj-google-style"}
{"code": "def add_constant(self, stream, value):\n        \n\n        if stream in self.constant_database:\n            raise ArgumentError(\"Attempted to set the same constant twice\", stream=stream, old_value=self.constant_database[stream], new_value=value)\n\n        self.constant_database[stream] = value", "docstring": "Store a constant value for use in this sensor graph.\n\nConstant assignments occur after all sensor graph nodes have been\nallocated since they must be propogated to all appropriate virtual\nstream walkers.\n\nArgs:\nstream (DataStream): The constant stream to assign the value to\nvalue (int): The value to assign.", "source": "juraj-google-style"}
{"code": "def GrabObject(self, identifier):\n    \n    if identifier not in self._values:\n      raise KeyError('Missing cached object for identifier: {0:s}'.format(\n          identifier))\n\n    cache_value = self._values[identifier]\n    if not cache_value:\n      raise RuntimeError('Missing cache value for identifier: {0:s}'.format(\n          identifier))\n\n    cache_value.IncrementReferenceCount()", "docstring": "Grabs a cached object based on the identifier.\n\nThis method increments the cache value reference count.\n\nArgs:\nidentifier (str): VFS object identifier.\n\nRaises:\nKeyError: if the VFS object is not found in the cache.\nRuntimeError: if the cache value is missing.", "source": "juraj-google-style"}
{"code": "def get_average_record(self, n):\n    history_deque = collections.deque()\n    averages = []\n    for d in self.data_points:\n        history_deque.appendleft(d)\n        if (len(history_deque) > n):\n            history_deque.pop()\n        avg = (sum(history_deque) / len(history_deque))\n        averages.append(round(avg, self.lr))\n    return averages", "docstring": "Returns a list of average current numbers, each representing the\naverage over the last n data points.\n\nArgs:\nn: Number of data points to average over.\n\nReturns:\nA list of average current values.", "source": "codesearchnet"}
{"code": "class PatchTSMixerPatchify(nn.Module):\n\n    def __init__(self, config: PatchTSMixerConfig):\n        super().__init__()\n        self.sequence_length = config.context_length\n        self.patch_length = config.patch_length\n        self.patch_stride = config.patch_stride\n        if self.sequence_length <= self.patch_length:\n            raise ValueError(f'Sequence length ({self.sequence_length}) has to be greater than the patch length ({self.patch_length})')\n        self.num_patches = (max(self.sequence_length, self.patch_length) - self.patch_length) \n        new_sequence_length = self.patch_length + self.patch_stride * (self.num_patches - 1)\n        self.sequence_start = self.sequence_length - new_sequence_length\n\n    def forward(self, past_values: torch.Tensor):\n        \n        sequence_length = past_values.shape[-2]\n        if sequence_length != self.sequence_length:\n            raise ValueError(f\"Input sequence length ({sequence_length}) doesn't match model configuration ({self.sequence_length}).\")\n        output = past_values[:, self.sequence_start:, :]\n        output = output.unfold(dimension=-2, size=self.patch_length, step=self.patch_stride)\n        output = output.transpose(-2, -3).contiguous()\n        return output", "docstring": "A class to patchify the time series sequence into different patches\n\nReturns:\n`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`", "source": "github-repos"}
{"code": "def get_data_xls(file_name, file_contents=None, on_demand=False):\n\n    def tuple_to_iso_date(tuple_date):\n        \"\\n        Turns a gregorian (year, month, day, hour, minute, nearest_second) into a\\n        standard YYYY-MM-DDTHH:MM:SS ISO date.  If the date part is all zeros, it's\\n        assumed to be a time; if the time part is all zeros it's assumed to be a date;\\n        if all of it is zeros it's taken to be a time, specifically 00:00:00 (midnight).\\n\\n        Note that datetimes of midnight will come back as date-only strings.  A date\\n        of month=0 and day=0 is meaningless, so that part of the coercion is safe.\\n        For more on the hairy nature of Excel date/times see\\n        http:\n        (y, m, d, hh, mm, ss) = tuple_date\n        non_zero = (lambda n: (n != 0))\n        date = (('%04d-%02d-%02d' % (y, m, d)) if list(filter(non_zero, (y, m, d))) else '')\n        time = (('T%02d:%02d:%02d' % (hh, mm, ss)) if (list(filter(non_zero, (hh, mm, ss))) or (not date)) else '')\n        return (date + time)\n\n    def format_excel_val(book, val_type, value, want_tuple_date):\n        'Cleans up the incoming excel data'\n        if (val_type == 2):\n            if (value == int(value)):\n                value = int(value)\n        elif (val_type == 3):\n            datetuple = xlrd.xldate_as_tuple(value, book.datemode)\n            value = (datetuple if want_tuple_date else tuple_to_iso_date(datetuple))\n        elif (val_type == 5):\n            value = xlrd.error_text_from_code[value]\n        return value\n\n    def xlrd_xsl_to_array(file_name, file_contents=None):\n        '\\n        Returns:\\n            A list of 2-D tables holding the converted cells of each sheet\\n        '\n        book = xlrd.open_workbook(file_name, file_contents=file_contents, on_demand=on_demand)\n        formatter = (lambda t_v: format_excel_val(book, t_v[0], t_v[1], False))\n        row_builder = (lambda s, r: list(map(formatter, zip(s.row_types(r), s.row_values(r)))))\n        data = [SheetYielder(book, index, row_builder) for index in range(book.nsheets)]\n        if (not on_demand):\n            for sheet in data:\n                sheet.load()\n            book.release_resources()\n        return data\n    return xlrd_xsl_to_array(file_name, file_contents)", "docstring": "Loads the old excel format files. New format files will automatically\nget loaded as well.\n\nArgs:\nfile_name: The name of the local file, or the holder for the\nextension type when the file_contents are supplied.\nfile_contents: The file-like object holding contents of file_name.\nIf left as None, then file_name is directly loaded.\non_demand: Requests that a yielder be used in place of a full data\ncopy.", "source": "codesearchnet"}
{"code": "def IsDefault(self):\n    if ((not self._tsk_attribute) or (not self._file_system)):\n        return True\n    if self._file_system.IsHFS():\n        attribute_type = getattr(self._tsk_attribute.info, 'type', None)\n        return (attribute_type in (pytsk3.TSK_FS_ATTR_TYPE_HFS_DEFAULT, pytsk3.TSK_FS_ATTR_TYPE_HFS_DATA))\n    if self._file_system.IsNTFS():\n        return (not bool(self.name))\n    return True", "docstring": "Determines if the data stream is the default data stream.\n\nReturns:\nbool: True if the data stream is the default data stream, false if not.", "source": "codesearchnet"}
{"code": "def _GetProcessedStorageFilePath(self, task):\n    filename = '{0:s}.plaso'.format(task.identifier)\n    return os.path.join(self._processed_task_storage_path, filename)", "docstring": "Retrieves the path of a task storage file in the processed directory.\n\nArgs:\ntask (Task): task.\n\nReturns:\nstr: path of a task storage file in the processed directory.", "source": "codesearchnet"}
{"code": "def _process_worker(call_queue, result_queue, initializer, initargs, processes_management_lock, timeout, worker_exit_lock, current_depth):\n    if (initializer is not None):\n        try:\n            initializer(*initargs)\n        except BaseException:\n            _base.LOGGER.critical('Exception in initializer:', exc_info=True)\n            return\n    global _CURRENT_DEPTH\n    _CURRENT_DEPTH = current_depth\n    _process_reference_size = None\n    _last_memory_leak_check = None\n    pid = os.getpid()\n    mp.util.debug(('Worker started with timeout=%s' % timeout))\n    while True:\n        try:\n            call_item = call_queue.get(block=True, timeout=timeout)\n            if (call_item is None):\n                mp.util.info('Shutting down worker on sentinel')\n        except queue.Empty:\n            mp.util.info(('Shutting down worker after timeout %0.3fs' % timeout))\n            if processes_management_lock.acquire(block=False):\n                processes_management_lock.release()\n                call_item = None\n            else:\n                mp.util.info('Could not acquire processes_management_lock')\n                continue\n        except BaseException as e:\n            previous_tb = traceback.format_exc()\n            try:\n                result_queue.put(_RemoteTraceback(previous_tb))\n            except BaseException:\n                print(previous_tb)\n            sys.exit(1)\n        if (call_item is None):\n            result_queue.put(pid)\n            with worker_exit_lock:\n                return\n        try:\n            r = call_item()\n        except BaseException as e:\n            exc = _ExceptionWithTraceback(e)\n            result_queue.put(_ResultItem(call_item.work_id, exception=exc))\n        else:\n            _sendback_result(result_queue, call_item.work_id, result=r)\n            del r\n        del call_item\n        if _USE_PSUTIL:\n            if (_process_reference_size is None):\n                _process_reference_size = _get_memory_usage(pid, force_gc=True)\n                _last_memory_leak_check = time()\n                continue\n            if ((time() - _last_memory_leak_check) > _MEMORY_LEAK_CHECK_DELAY):\n                mem_usage = _get_memory_usage(pid)\n                _last_memory_leak_check = time()\n                if ((mem_usage - _process_reference_size) < _MAX_MEMORY_LEAK_SIZE):\n                    continue\n                mem_usage = _get_memory_usage(pid, force_gc=True)\n                _last_memory_leak_check = time()\n                if ((mem_usage - _process_reference_size) < _MAX_MEMORY_LEAK_SIZE):\n                    continue\n                mp.util.info('Memory leak detected: shutting down worker')\n                result_queue.put(pid)\n                with worker_exit_lock:\n                    return\n        elif ((_last_memory_leak_check is None) or ((time() - _last_memory_leak_check) > _MEMORY_LEAK_CHECK_DELAY)):\n            gc.collect()\n            _last_memory_leak_check = time()", "docstring": "Evaluates calls from call_queue and places the results in result_queue.\n\nThis worker is run in a separate process.\n\nArgs:\ncall_queue: A ctx.Queue of _CallItems that will be read and\nevaluated by the worker.\nresult_queue: A ctx.Queue of _ResultItems that will written\nto by the worker.\ninitializer: A callable initializer, or None\ninitargs: A tuple of args for the initializer\nprocess_management_lock: A ctx.Lock avoiding worker timeout while some\nworkers are being spawned.\ntimeout: maximum time to wait for a new item in the call_queue. If that\ntime is expired, the worker will shutdown.\nworker_exit_lock: Lock to avoid flagging the executor as broken on\nworkers timeout.\ncurrent_depth: Nested parallelism level, to avoid infinite spawning.", "source": "codesearchnet"}
{"code": "def compose_path(pub, uuid_url=False):\n    \n    if uuid_url:\n        return join(\n            \"/\",\n            UUID_DOWNLOAD_KEY,\n            str(pub.uuid)\n        )\n\n    return join(\n        \"/\",\n        DOWNLOAD_KEY,\n        basename(pub.file_pointer),\n        basename(pub.filename)\n    )", "docstring": "Compose absolute path for given `pub`.\n\nArgs:\npub (obj): :class:`.DBPublication` instance.\nuuid_url (bool, default False): Compose URL using UUID.\n\nReturns:\nstr: Absolute url-path of the publication, without server's address \\\nand protocol.\n\nRaises:\nPrivatePublicationError: When the `pub` is private publication.", "source": "juraj-google-style"}
{"code": "def ExpandSubClasses(self, t):\n    queue = [t]\n    seen = set()\n    while queue:\n        item = queue.pop()\n        if item not in seen:\n            seen.add(item)\n            queue.extend(self._subclasses[item])\n    return seen", "docstring": "Generate a set of all (known) subclasses for a type.\n\nArguments:\nt: A type. E.g. NamedType(\"int\").\n\nReturns:\nA set of types. This set includes t as well as all its subclasses. For\nexample, this will return \"int\" and \"bool\" for \"int\".", "source": "github-repos"}
{"code": "def victim(self, name, owner=None, **kwargs):\n    return Victim(self.tcex, name, owner=owner, **kwargs)", "docstring": "Create the Victim TI object.\n\nArgs:\nowner:\nname:\n**kwargs:\n\nReturn:", "source": "codesearchnet"}
{"code": "def rh45(msg):\n    d = hex2bin(data(msg))\n    if (d[38] == '0'):\n        return None\n    rh = (bin2int(d[39:51]) * 16)\n    return rh", "docstring": "Radio height.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nint: radio height in ft", "source": "codesearchnet"}
{"code": "async def invoke(self, context):\n    try:\n        tasks = (await self._run_cancellable(claim_work(context)))\n        if ((not tasks) or (not tasks.get('tasks', []))):\n            (await self._run_cancellable(asyncio.sleep(context.config['poll_interval'])))\n            return None\n        status = None\n        for task_defn in tasks.get('tasks', []):\n            prepare_to_run_task(context, task_defn)\n            reclaim_fut = context.event_loop.create_task(reclaim_task(context, context.task))\n            try:\n                status = (await do_run_task(context, self._run_cancellable, self._to_cancellable_process))\n                artifacts_paths = filepaths_in_dir(context.config['artifact_dir'])\n            except WorkerShutdownDuringTask:\n                shutdown_artifact_paths = [os.path.join('public', 'logs', log_file) for log_file in ['chain_of_trust.log', 'live_backing.log']]\n                artifacts_paths = [path for path in shutdown_artifact_paths if os.path.isfile(os.path.join(context.config['artifact_dir'], path))]\n                status = STATUSES['worker-shutdown']\n            status = worst_level(status, (await do_upload(context, artifacts_paths)))\n            (await complete_task(context, status))\n            reclaim_fut.cancel()\n            cleanup(context)\n        return status\n    except asyncio.CancelledError:\n        return None", "docstring": "Claims and processes Taskcluster work.\n\nArgs:\ncontext (scriptworker.context.Context): context of worker\n\nReturns: status code of build", "source": "codesearchnet"}
{"code": "def event_stream(self, from_token, timeout=30000):\n        \n        warnings.warn(\"event_stream is deprecated. Use sync instead.\",\n                      DeprecationWarning)\n        path = \"/events\"\n        return self._send(\n            \"GET\", path, query_params={\n                \"timeout\": timeout,\n                \"from\": from_token\n            }\n        )", "docstring": "Deprecated. Use sync instead.\nPerforms /events\n\nArgs:\nfrom_token (str): The 'from' query parameter.\ntimeout (int): Optional. The 'timeout' query parameter.", "source": "juraj-google-style"}
{"code": "def get_nested_dmaps(dmap):\n    \n    if not isinstance(dmap, DynamicMap):\n        return []\n    dmaps = [dmap]\n    for o in dmap.callback.inputs:\n        dmaps.extend(get_nested_dmaps(o))\n    return list(set(dmaps))", "docstring": "Recurses DynamicMap to find DynamicMaps inputs\n\nArgs:\ndmap: DynamicMap to recurse to look for DynamicMap inputs\n\nReturns:\nList of DynamicMap instances that were found", "source": "juraj-google-style"}
{"code": "def __init__(self, weight_shape: Sequence[int], same_scale_op: str) -> None:\n    self.filters = np.random.uniform(low=-1.0, high=1.0, size=weight_shape)\n    self.same_scale_op = same_scale_op", "docstring": "Initializes a MatmulModel.\n\nArgs:\nweight_shape: Shape of the weight tensor.\nsame_scale_op: Name of the same-scale op to be tested. Raises error\nwhen an unknown name is given.", "source": "github-repos"}
{"code": "def extract_all_content(\n        self,\n        path=None,\n        payload=None,\n        objectInput=None,\n        pretty_print=False,\n        convert_to_obj=False,\n    ):\n        \n        f = file_path(path, payload, objectInput)\n        switches = [\"-J\", \"-t\", \"-r\", f]\n        if not pretty_print:\n            switches.remove(\"-r\")\n        result = self._command_template(switches)\n\n        if result and convert_to_obj:\n            result = json.loads(result, encoding=\"utf-8\")\n\n        return result, path, f", "docstring": "This function returns a JSON of all contents and\nmetadata of passed file\n\nArgs:\npath (string): Path of file to analyze\npayload (string): Payload base64 to analyze\nobjectInput (object): file object/standard input to analyze\npretty_print (boolean): If True adds newlines and whitespace,\nfor better readability\nconvert_to_obj (boolean): If True convert JSON in object", "source": "juraj-google-style"}
{"code": "def process_layer(layer_data):\n    layer_name = layer_data['name']\n    if 'module' not in layer_data:\n        layer = saving_utils.model_from_config(layer_data, custom_objects=custom_objects)\n    else:\n        layer = serialization_lib.deserialize_keras_object(layer_data, custom_objects=custom_objects)\n    if not isinstance(layer, Operation):\n        raise ValueError(f'Unexpected object from deserialization, expected a layer or operation, got a {type(layer)}')\n    created_layers[layer_name] = layer\n    inbound_nodes_data = layer_data['inbound_nodes']\n    for node_data in inbound_nodes_data:\n        add_unprocessed_node(layer, node_data)", "docstring": "Deserializes a layer and index its inbound nodes.\n\nArgs:\nlayer_data: layer config dict.", "source": "github-repos"}
{"code": "def node_info(self, args, screen_info=None):\n    _ = screen_info\n    parsed = self._arg_parsers['node_info'].parse_args(args)\n    node_name, unused_slot = debug_graphs.parse_node_or_tensor_name(parsed.node_name)\n    if not self._debug_dump.node_exists(node_name):\n        output = cli_shared.error('There is no node named \"%s\" in the partition graphs' % node_name)\n        _add_main_menu(output, node_name=None, enable_list_tensors=True, enable_node_info=False, enable_list_inputs=False, enable_list_outputs=False)\n        return output\n    lines = ['Node %s' % node_name]\n    font_attr_segs = {0: [(len(lines[-1]) - len(node_name), len(lines[-1]), 'bold')]}\n    lines.append('')\n    lines.append('  Op: %s' % self._debug_dump.node_op_type(node_name))\n    lines.append('  Device: %s' % self._debug_dump.node_device(node_name))\n    output = debugger_cli_common.RichTextLines(lines, font_attr_segs=font_attr_segs)\n    inputs = self._exclude_denylisted_ops(self._debug_dump.node_inputs(node_name))\n    ctrl_inputs = self._exclude_denylisted_ops(self._debug_dump.node_inputs(node_name, is_control=True))\n    output.extend(self._format_neighbors('input', inputs, ctrl_inputs))\n    recs = self._exclude_denylisted_ops(self._debug_dump.node_recipients(node_name))\n    ctrl_recs = self._exclude_denylisted_ops(self._debug_dump.node_recipients(node_name, is_control=True))\n    output.extend(self._format_neighbors('recipient', recs, ctrl_recs))\n    if parsed.attributes:\n        output.extend(self._list_node_attributes(node_name))\n    if parsed.dumps:\n        output.extend(self._list_node_dumps(node_name))\n    if parsed.traceback:\n        output.extend(self._render_node_traceback(node_name))\n    _add_main_menu(output, node_name=node_name, enable_node_info=False)\n    return output", "docstring": "Command handler for node_info.\n\nQuery information about a given node.\n\nArgs:\nargs: Command-line arguments, excluding the command prefix, as a list of\nstr.\nscreen_info: Optional dict input containing screen information such as\ncols.\n\nReturns:\nOutput text lines as a RichTextLines object.", "source": "github-repos"}
{"code": "def _example_from_complex_def(self, prop_spec):\n        \n        if 'schema' not in prop_spec:\n            return [{}]\n        elif 'type' not in prop_spec['schema']:\n            definition_name = self.get_definition_name_from_ref(prop_spec['schema']['$ref'])\n            if self.build_one_definition_example(definition_name):\n                return self.definitions_example[definition_name]\n        elif prop_spec['schema']['type'] == 'array':  \n            \n            if 'items' in prop_spec.keys():\n                definition_name = self.get_definition_name_from_ref(prop_spec['items']['$ref'])\n            else:\n                if '$ref' in prop_spec['schema']['items']:\n                    definition_name = self.get_definition_name_from_ref(prop_spec['schema']['items']['$ref'])\n                else:\n                    definition_name = self.get_definition_name_from_ref(prop_spec['schema']['items']['type'])\n                    return [definition_name]\n            return [self.definitions_example[definition_name]]\n        else:\n            return self.get_example_from_prop_spec(prop_spec['schema'])", "docstring": "Get an example from a property specification.\n\nIn case there is no \"type\" key in the root of the dictionary.\n\nArgs:\nprop_spec: property specification you want an example of.\n\nReturns:\nAn example.", "source": "juraj-google-style"}
{"code": "def _load_config_section(self, section_name):\n    if self._config.has_section(section_name):\n        section = dict(self._config.items(section_name))\n    elif self._config.has_section('Default'):\n        section = dict(self._config.items('Default'))\n    else:\n        raise KeyError((\"'{}' was not found in the configuration file and no default \" + 'configuration was provided.').format(section_name))\n    if (('protocol' in section) and ('host' in section) and ('token' in section)):\n        return section\n    else:\n        raise KeyError(('Missing values in configuration data. ' + 'Must contain: protocol, host, token'))", "docstring": "Method to load the specific Service section from the config file if it\nexists, or fall back to the default\n\nArgs:\nsection_name (str): The desired service section name\n\nReturns:\n(dict): the section parameters", "source": "codesearchnet"}
{"code": "def assemble_concatenated_meta(concated_meta_dfs, remove_all_metadata_fields):\n    if remove_all_metadata_fields:\n        for df in concated_meta_dfs:\n            df.drop(df.columns, axis=1, inplace=True)\n    all_concated_meta_df = pd.concat(concated_meta_dfs, axis=0)\n    n_rows = all_concated_meta_df.shape[0]\n    logger.debug('all_concated_meta_df.shape[0]: {}'.format(n_rows))\n    n_rows_cumulative = sum([df.shape[0] for df in concated_meta_dfs])\n    assert (n_rows == n_rows_cumulative)\n    all_concated_meta_df_sorted = all_concated_meta_df.sort_index(axis=0).sort_index(axis=1)\n    return all_concated_meta_df_sorted", "docstring": "Assemble the concatenated metadata dfs together. For example,\nif horizontally concatenating, the concatenated metadata dfs are the\ncolumn metadata dfs. Both indices are sorted.\n\nArgs:\nconcated_meta_dfs (list of pandas dfs)\n\nReturns:\nall_concated_meta_df_sorted (pandas df)", "source": "codesearchnet"}
{"code": "def inspect_distribution(self, image, auth_config=None):\n    (registry, _) = auth.resolve_repository_name(image)\n    headers = {}\n    if (auth_config is None):\n        header = auth.get_config_header(self, registry)\n        if header:\n            headers['X-Registry-Auth'] = header\n    else:\n        log.debug('Sending supplied auth config')\n        headers['X-Registry-Auth'] = auth.encode_header(auth_config)\n    url = self._url('/distribution/{0}/json', image)\n    return self._result(self._get(url, headers=headers), True)", "docstring": "Get image digest and platform information by contacting the registry.\n\nArgs:\nimage (str): The image name to inspect\nauth_config (dict): Override the credentials that are found in the\nconfig for this request.  ``auth_config`` should contain the\n``username`` and ``password`` keys to be valid.\n\nReturns:\n(dict): A dict containing distribution data\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def parameterized_codec(raw, b64):\n    if isinstance(raw, bytes):\n        raw = raw.decode('utf-8')\n    result = _parameterize_string(raw)\n    return (Base64(result.data) if b64 else result)", "docstring": "Parameterize a string, possibly encoding it as Base64 afterwards\n\nArgs:\nraw (`str` | `bytes`): String to be processed. Byte strings will be\ninterpreted as UTF-8.\nb64 (`bool`): Whether to wrap the output in a Base64 CloudFormation\ncall\n\nReturns:\n:class:`troposphere.AWSHelperFn`: output to be included in a\nCloudFormation template.", "source": "codesearchnet"}
{"code": "def backward(ctx, grad_at_output: torch.Tensor):\n    multiplier, selected_experts, masked_gates = ctx.saved_tensors\n    grad_at_output = grad_at_output * multiplier\n    grad_at_scores_expanded = masked_gates * grad_at_output.mul(-1)\n    grad_at_scores_expanded.scatter_add_(dim=-1, index=selected_experts, src=grad_at_output)\n    return (grad_at_scores_expanded, None, None, None, None)", "docstring": "Backward pass for the custom autograd function.\n\nArgs:\nctx: Context object with saved tensors from the forward pass.\ngrad_at_output (torch.Tensor): Gradient at the output.\n\nReturns:\nTuple[torch.Tensor, None, None, None, None]: Gradients for the inputs.", "source": "github-repos"}
{"code": "def get_container_details(self, container_id_or_name: str) -> dict:\n        \n        container = self._client.containers.get(container_id_or_name)\n        return container.attrs", "docstring": "Get details of a container.\n\nArgs:\ncontainer_id_or_name (string): docker container id or name\n\nReturns:\ndict, details of the container", "source": "juraj-google-style"}
{"code": "def validate(obj, schema):\n        \n        if isinstance(obj, str):\n            obj = json.loads(obj)\n        return JsonValidator(schema)._validate(obj)", "docstring": "Validate an object against a schema\n\nArgs:\nobj (dict):\nschema (dict):", "source": "juraj-google-style"}
{"code": "def CheckForNonConstReference(filename, clean_lines, linenum, nesting_state, error):\n    line = clean_lines.elided[linenum]\n    if ('&' not in line):\n        return\n    if IsDerivedFunction(clean_lines, linenum):\n        return\n    if IsOutOfLineMethodDefinition(clean_lines, linenum):\n        return\n    if (linenum > 1):\n        previous = None\n        if Match('\\\\s*::(?:[\\\\w<>]|::)+\\\\s*&\\\\s*\\\\S', line):\n            previous = Search('\\\\b((?:const\\\\s*)?(?:[\\\\w<>]|::)+[\\\\w<>])\\\\s*$', clean_lines.elided[(linenum - 1)])\n        elif Match('\\\\s*[a-zA-Z_]([\\\\w<>]|::)+\\\\s*&\\\\s*\\\\S', line):\n            previous = Search('\\\\b((?:const\\\\s*)?(?:[\\\\w<>]|::)+::)\\\\s*$', clean_lines.elided[(linenum - 1)])\n        if previous:\n            line = (previous.group(1) + line.lstrip())\n        else:\n            endpos = line.rfind('>')\n            if (endpos > (- 1)):\n                (_, startline, startpos) = ReverseCloseExpression(clean_lines, linenum, endpos)\n                if ((startpos > (- 1)) and (startline < linenum)):\n                    line = ''\n                    for i in xrange(startline, (linenum + 1)):\n                        line += clean_lines.elided[i].strip()\n    if (nesting_state.previous_stack_top and (not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or isinstance(nesting_state.previous_stack_top, _NamespaceInfo)))):\n        return\n    if (linenum > 0):\n        for i in xrange((linenum - 1), max(0, (linenum - 10)), (- 1)):\n            previous_line = clean_lines.elided[i]\n            if (not Search('[),]\\\\s*$', previous_line)):\n                break\n            if Match('^\\\\s*:\\\\s+\\\\S', previous_line):\n                return\n    if Search('\\\\\\\\\\\\s*$', line):\n        return\n    if IsInitializerList(clean_lines, linenum):\n        return\n    whitelisted_functions = '(?:[sS]wap(?:<\\\\w:+>)?|operator\\\\s*[<>][<>]|static_assert|COMPILE_ASSERT)\\\\s*\\\\('\n    if Search(whitelisted_functions, line):\n        return\n    elif (not Search('\\\\S+\\\\([^)]*$', line)):\n        for i in xrange(2):\n            if ((linenum > i) and Search(whitelisted_functions, clean_lines.elided[((linenum - i) - 1)])):\n                return\n    decls = ReplaceAll('{[^}]*}', ' ', line)\n    for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):\n        if ((not Match(_RE_PATTERN_CONST_REF_PARAM, parameter)) and (not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter))):\n            error(filename, linenum, 'runtime/references', 2, ('Is this a non-const reference? If so, make const or use a pointer: ' + ReplaceAll(' *<', '<', parameter)))", "docstring": "Check for non-const references.\n\nSeparate from CheckLanguage since it scans backwards from current\nline, instead of scanning forward.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nnesting_state: A NestingState instance which maintains information about\nthe current stack of nested blocks being parsed.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:\n    outputs = self.tapas(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n    return outputs", "docstring": "Returns:\n\nExamples:\n\n```python\n>>> from transformers import AutoTokenizer, TapasModel\n>>> import pandas as pd\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google/tapas-base\")\n>>> model = TapasModel.from_pretrained(\"google/tapas-base\")\n\n>>> data = {\n...     \"Actors\": [\"Brad Pitt\", \"Leonardo Di Caprio\", \"George Clooney\"],\n...     \"Age\": [\"56\", \"45\", \"59\"],\n...     \"Number of movies\": [\"87\", \"53\", \"69\"],\n... }\n>>> table = pd.DataFrame.from_dict(data)\n>>> queries = [\"How many movies has George Clooney played in?\", \"How old is Brad Pitt?\"]\n\n>>> inputs = tokenizer(table=table, queries=queries, padding=\"max_length\", return_tensors=\"tf\")\n>>> outputs = model(**inputs)\n\n>>> last_hidden_states = outputs.last_hidden_state\n```", "source": "github-repos"}
{"code": "def validate(self):\n    if (not isinstance(self.value, bytes)):\n        raise TypeError('secret value must be bytes')\n    elif (not isinstance(self.data_type, enums.SecretDataType)):\n        raise TypeError('secret data type must be a SecretDataType enumeration')\n    mask_count = len(self.cryptographic_usage_masks)\n    for i in range(mask_count):\n        mask = self.cryptographic_usage_masks[i]\n        if (not isinstance(mask, enums.CryptographicUsageMask)):\n            position = '({0} in list)'.format(i)\n            raise TypeError('secret data mask {0} must be a CryptographicUsageMask enumeration'.format(position))\n    name_count = len(self.names)\n    for i in range(name_count):\n        name = self.names[i]\n        if (not isinstance(name, six.string_types)):\n            position = '({0} in list)'.format(i)\n            raise TypeError('secret data name {0} must be a string'.format(position))", "docstring": "Verify that the contents of the SecretData object are valid.\n\nRaises:\nTypeError: if the types of any SecretData attributes are invalid.", "source": "codesearchnet"}
{"code": "def intersection(self, other):\n        \n        if not hasattr(other, \"__iter__\"):\n            other = [other]\n\n        bounds = self.bounds\n        for range in other:\n            bounds = self._intersection(bounds, range.bounds)\n            if not bounds:\n                return None\n\n        range = VersionRange(None)\n        range.bounds = bounds\n        return range", "docstring": "AND together version ranges.\n\nCalculates the intersection of this range with one or more other ranges.\n\nArgs:\nother: VersionRange object (or list of) to AND with.\n\nReturns:\nNew VersionRange object representing the intersection, or None if\nno ranges intersect.", "source": "juraj-google-style"}
{"code": "def get_version(\n        here_path,\n        default_version=DEFAULT_VERSION,\n):\n    \n    if 'site-packages' in here_path:\n        \n        return _version_from_file(here_path)\n\n    if os.environ.get('TRAVIS_TAG'):\n        \n        if not TEST_MODE:  \n            return os.environ.get('TRAVIS_TAG').replace('v', '')\n        else:\n            warnings.warn(\n                'Travis detected, but TEST_MODE enabled',\n                exceptions.ProsperVersionTestModeWarning)\n\n    try:\n        current_tag = _read_git_tags(default_version=default_version)\n    except Exception:  \n        return _version_from_file(here_path)\n\n    \n    \n\n    with open(os.path.join(here_path, 'version.txt'), 'w') as v_fh:\n        \n        v_fh.write(current_tag)\n\n    return current_tag", "docstring": "tries to resolve version number\n\nArgs:\nhere_path (str): path to project local dir\ndefault_version (str): what version to return if all else fails\n\nReturns:\nstr: semantic_version information for library", "source": "juraj-google-style"}
{"code": "def get_oxi_state_decorated_structure(self, structure):\n    s = structure.copy()\n    if s.is_ordered:\n        valences = self.get_valences(s)\n        s.add_oxidation_state_by_site(valences)\n    else:\n        valences = self.get_valences(s)\n        s = add_oxidation_state_by_site_fraction(s, valences)\n    return s", "docstring": "Get an oxidation state decorated structure. This currently works only\nfor ordered structures only.\n\nArgs:\nstructure: Structure to analyze\n\nReturns:\nA modified structure that is oxidation state decorated.\n\nRaises:\nValueError if the valences cannot be determined.", "source": "codesearchnet"}
{"code": "def restore_component(self, component_name, save_path):\n        \n        component = self.get_component(component_name=component_name)\n        self._validate_savable(component=component, component_name=component_name)\n        component.restore(sess=self.session, save_path=save_path)", "docstring": "Restores a component's parameters from a save location.\n\nArgs:\ncomponent_name: The component to restore.\nsave_path: The save location.", "source": "juraj-google-style"}
{"code": "def rest_error(self):\n    error_json = self.__format_error('errors')\n    return json.dumps(error_json, indent=1, sort_keys=True)", "docstring": "Format this error into a response to a REST request.\n\nReturns:\nA string containing the reformatted error response.", "source": "codesearchnet"}
{"code": "def InitPrivateKey(self):\n    if self.private_key:\n        try:\n            self.common_name = rdf_client.ClientURN.FromPrivateKey(self.private_key)\n            logging.info('Starting client %s', self.common_name)\n            return self.private_key\n        except type_info.TypeValueError:\n            pass\n    key = rdf_crypto.RSAPrivateKey.GenerateKey(bits=config.CONFIG['Client.rsa_key_length'])\n    self.common_name = rdf_client.ClientURN.FromPrivateKey(key)\n    logging.info('Client pending enrolment %s', self.common_name)\n    self.SavePrivateKey(key)\n    return key", "docstring": "Makes sure this client has a private key set.\n\nIt first tries to load an RSA key from the certificate.\n\nIf no certificate is found, or it is invalid, we make a new random RSA key,\nand store it as our certificate.\n\nReturns:\nAn RSA key - either from the certificate or a new random key.", "source": "codesearchnet"}
{"code": "def create_leaflet_viewer(self, idaho_image_results, filename):\n    description = self.describe_images(idaho_image_results)\n    if (len(description) > 0):\n        functionstring = ''\n        for (catid, images) in description.items():\n            for (partnum, part) in images['parts'].items():\n                num_images = len(list(part.keys()))\n                partname = None\n                if (num_images == 1):\n                    partname = [p for p in list(part.keys())][0]\n                    pan_image_id = ''\n                elif (num_images == 2):\n                    partname = [p for p in list(part.keys()) if (p is not 'PAN')][0]\n                    pan_image_id = part['PAN']['id']\n                if (not partname):\n                    self.logger.debug('Cannot find part for idaho image.')\n                    continue\n                bandstr = {'RGBN': '0,1,2', 'WORLDVIEW_8_BAND': '4,2,1', 'PAN': '0'}.get(partname, '0,1,2')\n                part_boundstr_wkt = part[partname]['boundstr']\n                part_polygon = from_wkt(part_boundstr_wkt)\n                bucketname = part[partname]['bucket']\n                image_id = part[partname]['id']\n                (W, S, E, N) = part_polygon.bounds\n                functionstring += (\"addLayerToMap('%s','%s',%s,%s,%s,%s,'%s');\\n\" % (bucketname, image_id, W, S, E, N, pan_image_id))\n        __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n        try:\n            with open(os.path.join(__location__, 'leafletmap_template.html'), 'r') as htmlfile:\n                data = htmlfile.read().decode('utf8')\n        except AttributeError:\n            with open(os.path.join(__location__, 'leafletmap_template.html'), 'r') as htmlfile:\n                data = htmlfile.read()\n        data = data.replace('FUNCTIONSTRING', functionstring)\n        data = data.replace('CENTERLAT', str(S))\n        data = data.replace('CENTERLON', str(W))\n        data = data.replace('BANDS', bandstr)\n        data = data.replace('TOKEN', self.gbdx_connection.access_token)\n        with codecs.open(filename, 'w', 'utf8') as outputfile:\n            self.logger.debug(('Saving %s' % filename))\n            outputfile.write(data)\n    else:\n        print('No items returned.')", "docstring": "Create a leaflet viewer html file for viewing idaho images.\n\nArgs:\nidaho_image_results (dict): IDAHO image result set as returned from\nthe catalog.\nfilename (str): Where to save output html file.", "source": "codesearchnet"}
{"code": "def _infer_graph(self, inputs, clusters):\n    assert isinstance(inputs, list)\n    scores = self._distance_graph(inputs, clusters, self._distance_metric)\n    output = []\n    if self._distance_metric == COSINE_DISTANCE and (not self._clusters_l2_normalized()):\n        with ops.colocate_with(clusters, ignore_existing=True):\n            clusters = nn_impl.l2_normalize(clusters, axis=1)\n    for inp, score in zip(inputs, scores):\n        with ops.colocate_with(inp, ignore_existing=True):\n            indices, distances = gen_clustering_ops.nearest_neighbors(inp, clusters, 1)\n            if self._distance_metric == COSINE_DISTANCE:\n                distances *= 0.5\n            output.append((score, array_ops.squeeze(distances, [-1]), array_ops.squeeze(indices, [-1])))\n    return zip(*output)", "docstring": "Maps input to closest cluster and the score.\n\nArgs:\ninputs: list of input Tensors.\nclusters: Tensor of cluster centers.\n\nReturns:\nList of tuple, where each value in tuple corresponds to a value in inp.\nThe tuple has following three elements:\nall_scores: distance of each input to each cluster center.\nscore: distance of each input to closest cluster center.\ncluster_idx: index of cluster center closest to the corresponding input.", "source": "github-repos"}
{"code": "class SimpleSlidingQuantileTracker(WindowedTracker, QuantileTracker):\n\n    def __init__(self, window_size, q):\n        super().__init__(window_mode=WindowMode.SLIDING, window_size=window_size)\n        QuantileTracker.__init__(self, q)\n\n    def get(self):\n        \n        with warnings.catch_warnings(record=False):\n            warnings.simplefilter('ignore')\n            return np.nanquantile(self._queue, self._q)", "docstring": "Sliding window quantile tracker using NumPy.\n\nThis tracker uses NumPy's `nanquantile` function to calculate the specified\nquantile of the values currently in the sliding window. It's a simple,\nnon-incremental approach.\n\nArgs:\nwindow_size: The size of the sliding window.\nq: The quantile to calculate, a float between 0 and 1 (inclusive).", "source": "github-repos"}
{"code": "def removeTags(dom):\n    \n    \n    try:\n        string_type = basestring\n    except NameError:\n        string_type = str\n\n    \n    element_stack = None\n    if type(dom) in [list, tuple]:\n        element_stack = dom\n    elif isinstance(dom, HTMLElement):\n        element_stack = dom.childs if dom.isTag() else [dom]\n    elif isinstance(dom, string_type):\n        element_stack = parseString(dom).childs\n    else:\n        element_stack = dom\n\n    \n    output = \"\"\n    while element_stack:\n        el = element_stack.pop(0)\n\n        if not (el.isTag() or el.isComment() or not el.getTagName()):\n            output += el.__str__()\n\n        if el.childs:\n            element_stack = el.childs + element_stack\n\n    return output", "docstring": "Remove all tags from `dom` and obtain plaintext representation.\n\nArgs:\ndom (str, obj, array): str, HTMLElement instance or array of elements.\n\nReturns:\nstr: Plain string without tags.", "source": "juraj-google-style"}
{"code": "def radar_xsect(scatterer, h_pol=True):\n    \n    Z = scatterer.get_Z()\n    if h_pol:\n        return 2 * np.pi * \\\n            (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1])\n    else:\n        return 2 * np.pi * \\\n            (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])", "docstring": "Radar cross section for the current setup.\n\nArgs:\nscatterer: a Scatterer instance.\nh_pol: If True (default), use horizontal polarization.\nIf False, use vertical polarization.\n\nReturns:\nThe radar cross section.", "source": "juraj-google-style"}
{"code": "def _status(self):\n        \n\n        job_id_list = ' '.join(self.resources.keys())\n\n        jobs_missing = list(self.resources.keys())\n\n        retcode, stdout, stderr = self.channel.execute_wait(\"qstat {0}\".format(job_id_list), 3)\n        for line in stdout.split('\\n'):\n            parts = line.split()\n            if not parts or parts[0].upper().startswith('JOB') or parts[0].startswith('---'):\n                continue\n            job_id = parts[0]\n            status = translate_table.get(parts[4], 'UNKNOWN')\n            self.resources[job_id]['status'] = status\n            jobs_missing.remove(job_id)\n\n        \n        \n        for missing_job in jobs_missing:\n            if self.resources[missing_job]['status'] in ['PENDING', 'RUNNING']:\n                self.resources[missing_job]['status'] = translate_table['E']", "docstring": "Internal: Do not call. Returns the status list for a list of job_ids\n\nArgs:\nself\n\nReturns:\n[status...] : Status list of all jobs", "source": "juraj-google-style"}
{"code": "def state_nums():\n    st_nums = {}\n    fname = pkg_resources.resource_filename(__name__, 'resources/States.csv')\n    with open(fname, 'rU') as csvfile:\n        reader = csv.reader(csvfile, delimiter=',')\n        i = 0\n        for row in reader:\n            st_nums[row[0]] = i\n            i = (i + 1)\n    return st_nums", "docstring": "Get a dictionary of state names mapped to their 'legend' value.\n\nReturns:\ndictionary of state names mapped to their numeric value", "source": "codesearchnet"}
{"code": "def _get_model_field(self, name: str):\n    field_name = self._normalize_field_name(name)\n    if ((field_name == 'pk') and self.query.model._meta.pk):\n        return self.query.model._meta.pk\n    for field in self.query.model._meta.local_concrete_fields:\n        if ((field.name == field_name) or (field.column == field_name)):\n            return field\n    return None", "docstring": "Gets the field on a model with the specified name.\n\nArguments:\nname:\nThe name of the field to look for.\n\nThis can be both the actual field name, or\nthe name of the column, both will work :)\n\nReturns:\nThe field with the specified name or None if\nno such field exists.", "source": "codesearchnet"}
{"code": "def select(self, inputs: List[Any], global_state: pg.geno.AttributeDict, step: int) -> List[Any]:", "docstring": "Select a list of outputs from the inputs.\n\nA selector has two use cases:\n\n* Used as parents selector, which selects individuals from the population\nas parents for recombination. It will be called before the recombination\nstep within the :meth:`pyglove.evolution.Evolution.propose` method.\n\n* Used as a population updater, which selects individuals from previous\npopulation as a new population. It will be called everytime the\npopulation is updated, triggered by the\n:meth:`pyglove.evolution.Evolution.feedback` method.\n\nArgs:\ninputs: a list of objects as input.\nglobal_state: An `AttributeDict` object as the global state container,\nwhich is readable/writable during the operation.\nstep: Number of examples historically proposed, which can be used for\ndetermining a cross over schedule.", "source": "github-repos"}
{"code": "def save_pickle(obj, outfile, protocol=2):\n    with open(outfile, 'wb') as f:\n        pickle.dump(obj, f, protocol=protocol)\n    return outfile", "docstring": "Save the object as a pickle file\n\nArgs:\noutfile (str): Filename\nprotocol (int): Pickle protocol to use. Default is 2 to remain compatible with Python 2\n\nReturns:\nstr: Path to pickle file", "source": "codesearchnet"}
{"code": "def seq_int_arr(seqs):\n    \n    return np.array([[NT_TO_INT[c] for c in x.upper()] for x in seqs])", "docstring": "Convert list of ACGT strings to matix of 1-4 ints\n\nArgs:\nseqs (list of str): nucleotide sequences with only 'ACGT' characters\n\nReturns:\nnumpy.array of int: matrix of integers from 1 to 4 inclusive representing A, C, G, and T\nstr: nucleotide sequence string", "source": "juraj-google-style"}
{"code": "def load_settings(self, path):\n        \n        if not os.path.exists(path):\n            raise exceptions.ConfigurationError(\n                \"The server configuration file ('{0}') could not be \"\n                \"located.\".format(path)\n            )\n\n        self._logger.info(\n            \"Loading server configuration settings from: {0}\".format(path)\n        )\n\n        parser = configparser.ConfigParser()\n        parser.read(path)\n        self._parse_settings(parser)\n        self.parse_auth_settings(parser)", "docstring": "Load configuration settings from the file pointed to by path.\n\nThis will overwrite all current setting values.\n\nArgs:\npath (string): The path to the configuration file containing\nthe settings to load. Required.\nRaises:\nConfigurationError: Raised if the path does not point to an\nexisting file or if a setting value is invalid.", "source": "juraj-google-style"}
{"code": "def swap_gain(mapping, node_id1, mapping_id1, node_id2, mapping_id2, weight_dict, match_num):\n    \n    new_mapping_list = mapping[:]\n    \n    \n    new_mapping_list[node_id1] = mapping_id2\n    new_mapping_list[node_id2] = mapping_id1\n    if tuple(new_mapping_list) in match_triple_dict:\n        return match_triple_dict[tuple(new_mapping_list)] - match_num\n    gain = 0\n    new_mapping1 = (node_id1, mapping_id2)\n    new_mapping2 = (node_id2, mapping_id1)\n    old_mapping1 = (node_id1, mapping_id1)\n    old_mapping2 = (node_id2, mapping_id2)\n    if node_id1 > node_id2:\n        new_mapping2 = (node_id1, mapping_id2)\n        new_mapping1 = (node_id2, mapping_id1)\n        old_mapping1 = (node_id2, mapping_id2)\n        old_mapping2 = (node_id1, mapping_id1)\n    if new_mapping1 in weight_dict:\n        for key in weight_dict[new_mapping1]:\n            if key == -1:\n                gain += weight_dict[new_mapping1][-1]\n            elif new_mapping_list[key[0]] == key[1]:\n                gain += weight_dict[new_mapping1][key]\n    if new_mapping2 in weight_dict:\n        for key in weight_dict[new_mapping2]:\n            if key == -1:\n                gain += weight_dict[new_mapping2][-1]\n            \n            elif key[0] == node_id1:\n                continue\n            elif new_mapping_list[key[0]] == key[1]:\n                gain += weight_dict[new_mapping2][key]\n    if old_mapping1 in weight_dict:\n        for key in weight_dict[old_mapping1]:\n            if key == -1:\n                gain -= weight_dict[old_mapping1][-1]\n            elif mapping[key[0]] == key[1]:\n                gain -= weight_dict[old_mapping1][key]\n    if old_mapping2 in weight_dict:\n        for key in weight_dict[old_mapping2]:\n            if key == -1:\n                gain -= weight_dict[old_mapping2][-1]\n            \n            elif key[0] == node_id1:\n                continue\n            elif mapping[key[0]] == key[1]:\n                gain -= weight_dict[old_mapping2][key]\n    match_triple_dict[tuple(new_mapping_list)] = match_num + gain\n    return gain", "docstring": "Compute the triple match number gain from the swapping\nArguments:\nmapping: current node mapping list\nnode_id1: node 1 index in AMR 1\nmapping_id1: the node index in AMR 2 node 1 maps to (in the current mapping)\nnode_id2: node 2 index in AMR 1\nmapping_id2: the node index in AMR 2 node 2 maps to (in the current mapping)\nweight_dict: weight dictionary\nmatch_num: the original matching triple number\nReturns:\nthe gain number (might be negative)", "source": "juraj-google-style"}
{"code": "def replace_dimensions(cls, dimensions, overrides):\n    from .dimension import Dimension\n    replaced = []\n    for d in dimensions:\n        if (d.name in overrides):\n            override = overrides[d.name]\n        elif (d.label in overrides):\n            override = overrides[d.label]\n        else:\n            override = None\n        if (override is None):\n            replaced.append(d)\n        elif isinstance(override, (util.basestring, tuple)):\n            replaced.append(d.clone(override))\n        elif isinstance(override, Dimension):\n            replaced.append(override)\n        elif isinstance(override, dict):\n            replaced.append(d.clone(override.get('name', None), **{k: v for (k, v) in override.items() if (k != 'name')}))\n        else:\n            raise ValueError('Dimension can only be overridden with another dimension or a dictionary of attributes')\n    return replaced", "docstring": "Replaces dimensions in list with dictionary of overrides.\n\nArgs:\ndimensions: List of dimensions\noverrides: Dictionary of dimension specs indexed by name\n\nReturns:\nlist: List of dimensions with replacements applied", "source": "codesearchnet"}
{"code": "def prep_itasser_modeling(self, itasser_installation, itlib_folder, runtype, create_in_dir=None, execute_from_dir=None, print_exec=False, **kwargs):\n    if (not create_in_dir):\n        if (not self.structure_dir):\n            raise ValueError('Output directory must be specified')\n        self.homology_models_dir = op.join(self.structure_dir, 'homology_models')\n    else:\n        self.homology_models_dir = create_in_dir\n    ssbio.utils.make_dir(self.homology_models_dir)\n    if (not execute_from_dir):\n        execute_from_dir = self.homology_models_dir\n    repseq = self.representative_sequence\n    itasser_kwargs = {'light': True, 'java_home': None, 'binding_site_pred': False, 'ec_pred': False, 'go_pred': False, 'job_scheduler_header': None, 'additional_options': None}\n    if kwargs:\n        itasser_kwargs.update(kwargs)\n    ITASSERPrep(ident=self.id, seq_str=repseq.seq_str, root_dir=self.homology_models_dir, itasser_path=itasser_installation, itlib_path=itlib_folder, runtype=runtype, print_exec=print_exec, execute_dir=execute_from_dir, java_home=itasser_kwargs['java_home'], light=itasser_kwargs['light'], binding_site_pred=itasser_kwargs['binding_site_pred'], ec_pred=itasser_kwargs['ec_pred'], go_pred=itasser_kwargs['go_pred'], job_scheduler_header=itasser_kwargs['job_scheduler_header'], additional_options=itasser_kwargs['additional_options'])\n    log.debug('Prepared I-TASSER modeling folder {}'.format(self.homology_models_dir))", "docstring": "Prepare to run I-TASSER homology modeling for the representative sequence.\n\nArgs:\nitasser_installation (str): Path to I-TASSER folder, i.e. ``~/software/I-TASSER4.4``\nitlib_folder (str): Path to ITLIB folder, i.e. ``~/software/ITLIB``\nruntype: How you will be running I-TASSER - local, slurm, or torque\ncreate_in_dir (str): Local directory where folders will be created\nexecute_from_dir (str): Optional path to execution directory - use this if you are copying the homology\nmodels to another location such as a supercomputer for running\nall_genes (bool): If all genes should be prepped, or only those without any mapped structures\nprint_exec (bool): If the execution statement should be printed to run modelling\n\nTodo:\n* Document kwargs - extra options for I-TASSER, SLURM or Torque execution\n* Allow modeling of any sequence in sequences attribute, select by ID or provide SeqProp?", "source": "codesearchnet"}
{"code": "def convert_graphdef(input_data, input_tensors, output_tensors, **kwargs):\n    model_flags = build_model_flags(**kwargs)\n    conversion_flags = build_conversion_flags(**kwargs)\n    saved_model_dir = kwargs.get('saved_model_dir', None)\n    input_shapes = kwargs.get('input_shapes', None)\n    quantized_input_stats = kwargs.get('quantized_input_stats', None)\n    debug_info = kwargs.get('debug_info', None)\n    for idx, input_tensor in enumerate(input_tensors):\n        input_array = model_flags.input_arrays.add()\n        if saved_model_dir:\n            input_array.name = input_tensor.name\n        else:\n            input_array.name = util.get_tensor_name(input_tensor)\n        input_array.data_type = convert_tensor_tf_type_to_tflite_type(input_tensor.dtype, usage='input type of the TensorFlow model')\n        if _is_quantized_input_stats_required(conversion_flags):\n            if quantized_input_stats:\n                input_array.mean_value, input_array.std_value = quantized_input_stats[idx]\n            else:\n                warnings.warn('Statistics for quantized inputs were expected, but not specified; continuing anyway.')\n        if input_shapes is None:\n            shape = input_tensor.shape\n        else:\n            shape = input_shapes[idx]\n        if shape.rank is not None:\n            dims = []\n            for dim in shape:\n                if dim is None or (isinstance(dim, tensor_shape.Dimension) and dim.value is None):\n                    dims.append(-1)\n                else:\n                    dims.append(int(dim))\n            input_array.shape.dims.extend(dims)\n            input_array.shape.unknown_rank = False\n        else:\n            input_array.shape.unknown_rank = True\n    for output_tensor in output_tensors:\n        if saved_model_dir:\n            model_flags.output_arrays.append(output_tensor.name)\n        else:\n            model_flags.output_arrays.append(util.get_tensor_name(output_tensor))\n    data = convert(model_flags, conversion_flags, input_data.SerializeToString(), debug_info_str=debug_info.SerializeToString() if debug_info else None)\n    return data", "docstring": "Convert a frozen GraphDef model using the TF Lite converter.\n\nConversion can be customized by providing arguments that are forwarded to\n`build_model_flags` and `build_conversion_flags` (see documentation).\n\nArgs:\ninput_data: Input data (i.e. often `sess.graph_def`),\ninput_tensors: List of input tensors. Type and shape are computed using\n`foo.shape` and `foo.dtype`.\noutput_tensors: List of output tensors (only .name is used from this).\n**kwargs: See `build_model_flags` and `build_conversion_flags`.\n\nReturns:\nThe converted data. For example if TFLite was the destination, then\nthis will be a tflite flatbuffer in a bytes array.\n\nRaises:\nDefined in `build_conversion_flags`.", "source": "github-repos"}
{"code": "def table_update(self, table_name, table_info):\n    url = (Api._ENDPOINT + (Api._TABLES_PATH % table_name))\n    return datalab.utils.Http.request(url, method='PUT', data=table_info, credentials=self._credentials)", "docstring": "Updates the Table info.\n\nArgs:\ntable_name: the name of the table to update as a tuple of components.\ntable_info: the Table resource with updated fields.", "source": "codesearchnet"}
{"code": "def inject_positional_args(self, method):\n    inspect = self._modules['inspect']\n    argspec = inspect.getargspec(method)\n    keyword_arg_index = ((- 1) * len((argspec.defaults or [])))\n    arg_names = argspec.args[:(keyword_arg_index or None)]\n    kwarg_names = argspec.args[len(arg_names):]\n    functools = self._modules['functools']\n\n    @functools.wraps(method)\n    def method_wrapper(**kwargs):\n        'Wrapper that pulls values from openhtf.util.conf.'\n        for kwarg in kwarg_names:\n            if (kwarg in self):\n                self._logger.warning('Keyword arg %s not set from configuration, but is a configuration key', kwarg)\n        final_kwargs = {name: self[name] for name in arg_names if (name in self)}\n        for overridden in (set(kwargs) & set(final_kwargs)):\n            self._logger.warning('Overriding configuration value for kwarg %s (%s) with provided kwarg value: %s', overridden, self[overridden], kwargs[overridden])\n        final_kwargs.update(kwargs)\n        if inspect.ismethod(method):\n            name = ('%s.%s' % (method.__self__.__class__.__name__, method.__name__))\n        else:\n            name = method.__name__\n        self._logger.debug('Invoking %s with %s', name, final_kwargs)\n        return method(**final_kwargs)\n    if (argspec.args[0] == 'self'):\n\n        @functools.wraps(method)\n        def self_wrapper(self, **kwargs):\n            'Wrapper that pulls values from openhtf.util.conf.'\n            kwargs['self'] = self\n            return method_wrapper(**kwargs)\n        return self_wrapper\n    return method_wrapper", "docstring": "Decorator for injecting positional arguments from the configuration.\n\nThis decorator wraps the given method, so that any positional arguments are\npassed with corresponding values from the configuration.  The name of the\npositional argument must match the configuration key.\n\nKeyword arguments are *NEVER* modified, even if their names match\nconfiguration keys.  Avoid naming keyword args names that are also\nconfiguration keys to avoid confusion.\n\nAdditional positional arguments may be used that do not appear in the\nconfiguration, but those arguments *MUST* be specified as keyword arguments\nupon invocation of the method.  This is to avoid ambiguity in which\npositional arguments are getting which values.\n\nArgs:\nmethod: The method to wrap.\n\nReturns:\nA wrapper that, when invoked, will call the wrapped method, passing in\nconfiguration values for positional arguments.", "source": "codesearchnet"}
{"code": "def process_latest_result(self, latest_results, current_time_ms, recognize_element):\n    if latest_results.shape[0] != self._label_count:\n        raise ValueError('The results for recognition should contain {} elements, but there are {} produced'.format(self._label_count, latest_results.shape[0]))\n    if self._previous_results.__len__() != 0 and current_time_ms < self._previous_results[0][0]:\n        raise ValueError('Results must be fed in increasing time order, but receive a timestamp of {}, which was earlier than the previous one of {}'.format(current_time_ms, self._previous_results[0][0]))\n    self._previous_results.append([current_time_ms, latest_results])\n    time_limit = current_time_ms - self._average_window_duration_ms\n    while time_limit > self._previous_results[0][0]:\n        self._previous_results.popleft()\n    how_many_results = self._previous_results.__len__()\n    earliest_time = self._previous_results[0][0]\n    sample_duration = current_time_ms - earliest_time\n    if how_many_results < self._minimum_count or sample_duration < self._average_window_duration_ms / 4:\n        recognize_element.founded_command = self._previous_top_label\n        recognize_element.score = 0.0\n        recognize_element.is_new_command = False\n        return\n    average_scores = np.zeros(self._label_count)\n    for item in self._previous_results:\n        score = item[1]\n        for i in range(score.size):\n            average_scores[i] += score[i] / how_many_results\n    sorted_averaged_index_score = []\n    for i in range(self._label_count):\n        sorted_averaged_index_score.append([i, average_scores[i]])\n    sorted_averaged_index_score = sorted(sorted_averaged_index_score, key=lambda p: p[1], reverse=True)\n    current_top_index = sorted_averaged_index_score[0][0]\n    current_top_label = self._labels[current_top_index]\n    current_top_score = sorted_averaged_index_score[0][1]\n    time_since_last_top = 0\n    if self._previous_top_label == '_silence_' or self._previous_top_time == -np.inf:\n        time_since_last_top = np.inf\n    else:\n        time_since_last_top = current_time_ms - self._previous_top_time\n    if current_top_score > self._detection_threshold and current_top_label != self._previous_top_label and (time_since_last_top > self._suppression_ms):\n        self._previous_top_label = current_top_label\n        self._previous_top_time = current_time_ms\n        recognize_element.is_new_command = True\n    else:\n        recognize_element.is_new_command = False\n    recognize_element.founded_command = current_top_label\n    recognize_element.score = current_top_score", "docstring": "Smoothing the results in average window when a new result is added in.\n\nReceive a new result from inference and put the founded command into\na RecognizeResult instance after the smoothing procedure.\n\nArgs:\nlatest_results: A list containing the confidences of all labels.\ncurrent_time_ms: The start timestamp of the input audio clip.\nrecognize_element: An instance of RecognizeResult to store founded\ncommand, its scores and if it is a new command.\n\nRaises:\nValueError: The length of this result from inference doesn't match\nlabel count.\nValueError: The timestamp of this result is earlier than the most\nprevious one in the average window", "source": "github-repos"}
{"code": "def min(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent':\n        \n        return cls._binary_op(x, y, tf.minimum, tf.float32)", "docstring": "Returns a TensorFluent for the minimum function.\n\nArgs:\nx: The first operand.\ny: The second operand.\n\nReturns:\nA TensorFluent wrapping the minimum function.", "source": "juraj-google-style"}
{"code": "def sanitize_filename(filename):\n    sanitized_filename = re.sub('[/\\\\\\\\:*?\"<>|]', '-', filename)\n    sanitized_filename = sanitized_filename.replace('&', 'and')\n    sanitized_filename = sanitized_filename.replace('\"', '')\n    sanitized_filename = sanitized_filename.replace(\"'\", '')\n    sanitized_filename = sanitized_filename.replace('/', '')\n    sanitized_filename = sanitized_filename.replace('\\\\', '')\n    if (sanitized_filename[0] == '.'):\n        sanitized_filename = (u'dot' + sanitized_filename[1:])\n    return sanitized_filename", "docstring": "Make sure filenames are valid paths.\n\nReturns:\nstr:", "source": "codesearchnet"}
{"code": "def recursively_convert_to_json_serializable(test_obj):\n    \n    \n    \n    \n    try:\n        if not isinstance(test_obj, list) and np.isnan(test_obj):\n            \n            \n            return None\n    except TypeError:\n        pass\n    except ValueError:\n        pass\n\n    if isinstance(test_obj, (string_types, integer_types, float, bool)):\n        \n        return test_obj\n\n    elif isinstance(test_obj, dict):\n        new_dict = {}\n        for key in test_obj:\n            \n            new_dict[str(key)] = recursively_convert_to_json_serializable(\n                test_obj[key])\n\n        return new_dict\n\n    elif isinstance(test_obj, (list, tuple, set)):\n        new_list = []\n        for val in test_obj:\n            new_list.append(recursively_convert_to_json_serializable(val))\n\n        return new_list\n\n    elif isinstance(test_obj, (np.ndarray, pd.Index)):\n        \n        \n        \n        return [recursively_convert_to_json_serializable(x) for x in test_obj.tolist()]\n\n    \n    \n    elif test_obj is None:\n        \n        return test_obj\n\n    elif isinstance(test_obj, (datetime.datetime, datetime.date)):\n        return str(test_obj)\n\n    \n    \n    elif np.issubdtype(type(test_obj), np.bool_):\n        return bool(test_obj)\n\n    elif np.issubdtype(type(test_obj), np.integer) or np.issubdtype(type(test_obj), np.uint):\n        return int(test_obj)\n\n    elif np.issubdtype(type(test_obj), np.floating):\n        \n        return float(round(test_obj, sys.float_info.dig))\n\n    elif isinstance(test_obj, pd.DataFrame):\n        return recursively_convert_to_json_serializable(test_obj.to_dict(orient='records'))\n\n    \n        \n        \n        \n        \n        \n\n    elif isinstance(test_obj, decimal.Decimal):\n        return float(test_obj)\n\n    else:\n        raise TypeError('%s is of type %s which cannot be serialized.' % (\n            str(test_obj), type(test_obj).__name__))", "docstring": "Helper function to convert a dict object to one that is serializable\n\nArgs:\ntest_obj: an object to attempt to convert a corresponding json-serializable object\n\nReturns:\n(dict) A converted test_object\n\nWarning:\ntest_obj may also be converted in place.", "source": "juraj-google-style"}
{"code": "def validate_gcs_path(path, require_object):\n    (bucket, key) = datalab.storage._bucket.parse_name(path)\n    if (bucket is None):\n        raise Exception(('Invalid GCS path \"%s\"' % path))\n    if (require_object and (key is None)):\n        raise Exception(('It appears the GCS path \"%s\" is a bucket path but not an object path' % path))", "docstring": "Check whether a given path is a valid GCS path.\n\nArgs:\npath: the config to check.\nrequire_object: if True, the path has to be an object path but not bucket path.\n\nRaises:\nException if the path is invalid", "source": "codesearchnet"}
{"code": "def get_correct_answer(question, default=None, required=False, answer=None, is_answer_correct=None):\n    while 1:\n        if (default is None):\n            msg = u' - No Default Available'\n        else:\n            msg = u'\\n[DEFAULT] -> {}\\nPress Enter To Use Default'.format(default)\n        prompt = ((question + msg) + u'\\n--> ')\n        if (answer is None):\n            answer = six.moves.input(prompt)\n        if ((answer == '') and required and (default is not None)):\n            print(u'You have to enter a value\\n\\n')\n            six.moves.input(u'Press enter to continue')\n            print(u'\\n\\n')\n            answer = None\n            continue\n        if ((answer == u'') and (default is not None)):\n            answer = default\n        _ans = ask_yes_no(u'You entered {}, is this correct?'.format(answer), answer=is_answer_correct)\n        if _ans:\n            return answer\n        else:\n            answer = None", "docstring": "u\"\"\"Ask user a question and confirm answer\n\nArgs:\n\nquestion (str): Question to ask user\n\ndefault (str): Default answer if no input from user\n\nrequired (str): Require user to input answer\n\nanswer (str): Used for testing\n\nis_answer_correct (str): Used for testing", "source": "codesearchnet"}
{"code": "def _ParseAttribute(self, file_object):\n    \n    file_offset = file_object.tell()\n    attribute_map = self._GetDataTypeMap('cups_ipp_attribute')\n\n    try:\n      attribute, _ = self._ReadStructureFromFileObject(\n          file_object, file_offset, attribute_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError(\n          'Unable to parse attribute with error: {0!s}'.format(exception))\n\n    value = None\n    if attribute.tag_value in self._INTEGER_TAG_VALUES:\n      \n      value = self._ParseIntegerValue(attribute.value_data, file_offset)\n\n    elif attribute.tag_value == self._TAG_VALUE_BOOLEAN:\n      value = self._ParseBooleanValue(attribute.value_data)\n\n    elif attribute.tag_value == self._TAG_VALUE_DATE_TIME:\n      \n      value = self._ParseDateTimeValue(attribute.value_data, file_offset)\n\n    elif attribute.tag_value in self._STRING_WITHOUT_LANGUAGE_VALUES:\n      value = attribute.value_data.decode(self._last_charset_attribute)\n\n    elif attribute.tag_value in self._ASCII_STRING_VALUES:\n      value = attribute.value_data.decode('ascii')\n\n      if attribute.tag_value == self._TAG_VALUE_CHARSET:\n        self._last_charset_attribute = value\n\n    else:\n      value = attribute.value_data\n\n    return attribute.name, value", "docstring": "Parses a CUPS IPP attribute from a file-like object.\n\nArgs:\nfile_object (dfvfs.FileIO): file-like object.\n\nReturns:\ntuple[str, object]: attribute name and value.\n\nRaises:\nParseError: if the attribute cannot be parsed.", "source": "juraj-google-style"}
{"code": "def _get_val_from_ddb_data(data, keylist):\n    \n    next_type = None\n    \n    for k in keylist:\n        for k1 in k:\n            if next_type is None:\n                data = data[k[k1]]\n            else:\n                temp_dict = data[next_type]\n                data = temp_dict[k[k1]]\n            next_type = k1\n    if next_type == 'L':\n        \n        return _convert_ddb_list_to_list(data[next_type])\n    if next_type == 'N':\n        \n        \n        return int(data[next_type])\n    \n    return str(data[next_type])", "docstring": "Given a dictionary of dynamodb data (including the datatypes) and a\nproperly structured keylist, it will return the value of the lookup\n\nArgs:\ndata (dict): the raw dynamodb data\nkeylist(list): a list of keys to lookup. This must include the\ndatatype\n\nReturns:\nvarious: It returns the value from the dynamodb record, and casts it\nto a matching python datatype", "source": "juraj-google-style"}
{"code": "def next(self):\n    self._set_consumer_timeout_start()\n    while True:\n        try:\n            return six.next(self._get_message_iterator())\n        except StopIteration:\n            self._reset_message_iterator()\n        self._check_consumer_timeout()", "docstring": "Return the next available message\n\nBlocks indefinitely unless consumer_timeout_ms > 0\n\nReturns:\na single KafkaMessage from the message iterator\n\nRaises:\nConsumerTimeout after consumer_timeout_ms and no message\n\nNote:\nThis is also the method called internally during iteration", "source": "codesearchnet"}
{"code": "def List(self, request, global_params=None):\n    config = self.GetMethodConfig('List')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Lists all row access policies on the specified table.\n\nArgs:\nrequest: (BigqueryRowAccessPoliciesListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ListRowAccessPoliciesResponse) The response message.", "source": "github-repos"}
{"code": "def username(self, value):\n        \n        self._username = value\n        self._connectionXML.set('username', value)", "docstring": "Set the connection's username property.\n\nArgs:\nvalue:  New username value. String.\n\nReturns:\nNothing.", "source": "juraj-google-style"}
{"code": "def CheckMySQLConnection(db_options):\n  \n  for tries_left in range(_MYSQL_MAX_RETRIES, -1, -1):\n    try:\n      connection_options = dict(\n          host=db_options[\"Mysql.host\"],\n          port=db_options[\"Mysql.port\"],\n          db=db_options[\"Mysql.database_name\"],\n          user=db_options[\"Mysql.database_username\"],\n          passwd=db_options[\"Mysql.database_password\"],\n          charset=\"utf8\")\n\n      ssl_enabled = \"Mysql.client_key_path\" in db_options\n      if ssl_enabled:\n        connection_options[\"ssl\"] = {\n            \"key\": db_options[\"Mysql.client_key_path\"],\n            \"cert\": db_options[\"Mysql.client_cert_path\"],\n            \"ca\": db_options[\"Mysql.ca_cert_path\"],\n        }\n\n      connection = MySQLdb.connect(**connection_options)\n\n      if ssl_enabled:\n        cursor = connection.cursor()\n        cursor.execute(\"SHOW VARIABLES LIKE 'have_ssl'\")\n        res = cursor.fetchone()\n        if res[0] == \"have_ssl\" and res[1] == \"YES\":\n          print(\"SSL enabled successfully.\")\n        else:\n          print(\"Unable to establish SSL connection to MySQL.\")\n          return False\n\n      return True\n    except MySQLdb.OperationalError as mysql_op_error:\n      if len(mysql_op_error.args) < 2:\n        \n        \n        print(\"Unexpected exception type received from MySQL. %d attempts \"\n              \"left: %s\" % (tries_left, mysql_op_error))\n        time.sleep(_MYSQL_RETRY_WAIT_SECS)\n        continue\n      if mysql_op_error.args[0] == mysql_conn_errors.CONNECTION_ERROR:\n        print(\"Failed to connect to MySQL. Is it running? %d attempts left.\" %\n              tries_left)\n      elif mysql_op_error.args[0] == mysql_conn_errors.UNKNOWN_HOST:\n        print(\"Unknown-hostname error encountered while trying to connect to \"\n              \"MySQL.\")\n        return False  \n      elif mysql_op_error.args[0] == general_mysql_errors.BAD_DB_ERROR:\n        \n        \n        return True\n      elif mysql_op_error.args[0] in (\n          general_mysql_errors.ACCESS_DENIED_ERROR,\n          general_mysql_errors.DBACCESS_DENIED_ERROR):\n        print(\"Permission error encountered while trying to connect to \"\n              \"MySQL: %s\" % mysql_op_error)\n        return False  \n      else:\n        print(\"Unexpected operational error encountered while trying to \"\n              \"connect to MySQL. %d attempts left: %s\" %\n              (tries_left, mysql_op_error))\n    except MySQLdb.Error as mysql_error:\n      print(\"Unexpected error encountered while trying to connect to MySQL. \"\n            \"%d attempts left: %s\" % (tries_left, mysql_error))\n    time.sleep(_MYSQL_RETRY_WAIT_SECS)\n  return False", "docstring": "Checks whether a connection can be established to MySQL.\n\nArgs:\ndb_options: A dict mapping GRR MySQL config options to their values.\n\nReturns:\nA boolean indicating whether a connection could be made to a MySQL server\ninstance with the given options.", "source": "juraj-google-style"}
{"code": "def is_test_executed(self, test_name):\n        \n        for record in self.executed:\n            if record.test_name == test_name:\n                return True\n        return False", "docstring": "Checks if a specific test has been executed.\n\nArgs:\ntest_name: string, the name of the test to check.\n\nReturns:\nTrue if the test has been executed according to the test result,\nFalse otherwise.", "source": "juraj-google-style"}
{"code": "def Matches(self, file_entry):\n    \n    if not self._date_time_ranges:\n      return None\n\n    for date_time_range in self._date_time_ranges:\n      time_attribute = self._TIME_VALUE_MAPPINGS.get(\n          date_time_range.time_value, None)\n      if not time_attribute:\n        continue\n\n      timestamp = getattr(file_entry, time_attribute, None)\n      if timestamp is None:\n        continue\n\n      if (date_time_range.start_date_time is not None and\n          timestamp < date_time_range.start_date_time):\n        return False\n\n      if (date_time_range.end_date_time is not None and\n          timestamp > date_time_range.end_date_time):\n        return False\n\n    return True", "docstring": "Compares the file entry against the filter.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry to compare.\n\nReturns:\nbool: True if the file entry matches the filter, False if not or\nNone if the filter does not apply.", "source": "juraj-google-style"}
{"code": "def list(self):\n        \n        self._initialize_list()\n        interested = True\n\n        response = self._cloudFormation.list_stacks()\n        print('Stack(s):')\n        while interested:\n            if 'StackSummaries' in response:\n                for stack in response['StackSummaries']:\n                    stack_status = stack['StackStatus']\n                    if stack_status != 'DELETE_COMPLETE':\n                        print('    [{}] - {}'.format(stack['StackStatus'], stack['StackName']))\n\n            next_token = response.get('NextToken', None)\n            if next_token:\n                response = self._cloudFormation.list_stacks(NextToken=next_token)\n            else:\n                interested = False\n\n        return True", "docstring": "List the existing stacks in the indicated region\n\nArgs:\nNone\n\nReturns:\nTrue if True\n\nTodo:\nFigure out what could go wrong and take steps\nto hanlde problems.", "source": "juraj-google-style"}
{"code": "def wait_for_compilation_job(self, job, poll=5):\n    desc = _wait_until((lambda : _compilation_job_status(self.sagemaker_client, job)), poll)\n    self._check_job_status(job, desc, 'CompilationJobStatus')\n    return desc", "docstring": "Wait for an Amazon SageMaker Neo compilation job to complete.\n\nArgs:\njob (str): Name of the compilation job to wait for.\npoll (int): Polling interval in seconds (default: 5).\n\nReturns:\n(dict): Return value from the ``DescribeCompilationJob`` API.\n\nRaises:\nValueError: If the compilation job fails.", "source": "codesearchnet"}
{"code": "def transform(self, col):\n        \n        out = pd.DataFrame()\n        out[self.col_name] = self.safe_datetime_cast(col)\n        out[self.col_name] = self.to_timestamp(out)\n\n        return out", "docstring": "Prepare the transformer to convert data and return the processed table.\n\nArgs:\ncol(pandas.DataFrame): Data to transform.\n\nReturns:\npandas.DataFrame", "source": "juraj-google-style"}
{"code": "def prefer_static_broadcast_shape(shape1,\n                                  shape2,\n                                  name=\"prefer_static_broadcast_shape\"):\n  \n  with tf.name_scope(name):\n\n    def make_shape_tensor(x):\n      return tf.convert_to_tensor(value=x, name=\"shape\", dtype=tf.int32)\n\n    def get_tensor_shape(s):\n      if isinstance(s, tf.TensorShape):\n        return s\n      s_ = tf.get_static_value(make_shape_tensor(s))\n      if s_ is not None:\n        return tf.TensorShape(s_)\n      return None\n\n    def get_shape_tensor(s):\n      if not isinstance(s, tf.TensorShape):\n        return make_shape_tensor(s)\n      if tensorshape_util.is_fully_defined(s):\n        return make_shape_tensor(tensorshape_util.as_list(s))\n      raise ValueError(\"Cannot broadcast from partially \"\n                       \"defined `TensorShape`.\")\n\n    shape1_ = get_tensor_shape(shape1)\n    shape2_ = get_tensor_shape(shape2)\n    if shape1_ is not None and shape2_ is not None:\n      return tf.broadcast_static_shape(shape1_, shape2_)\n\n    shape1_ = get_shape_tensor(shape1)\n    shape2_ = get_shape_tensor(shape2)\n    return tf.broadcast_dynamic_shape(shape1_, shape2_)", "docstring": "Convenience function which statically broadcasts shape when possible.\n\nArgs:\nshape1:  `1-D` integer `Tensor`.  Already converted to tensor!\nshape2:  `1-D` integer `Tensor`.  Already converted to tensor!\nname:  A string name to prepend to created ops.\n\nReturns:\nThe broadcast shape, either as `TensorShape` (if broadcast can be done\nstatically), or as a `Tensor`.", "source": "juraj-google-style"}
{"code": "def workspace_from_url(self, mets_url, dst_dir=None, clobber_mets=False, mets_basename=None, download=False, baseurl=None):\n    if (dst_dir and (not dst_dir.startswith('/'))):\n        dst_dir = abspath(dst_dir)\n    if (mets_url is None):\n        if (baseurl is None):\n            raise Exception('Must pass mets_url and/or baseurl to workspace_from_url')\n        else:\n            mets_url = ('file:\n    if (baseurl is None):\n        baseurl = mets_url.rsplit('/', 1)[0]\n    log.debug(\"workspace_from_url\\nmets_url='%s'\\nbaseurl='%s'\\ndst_dir='%s'\", mets_url, baseurl, dst_dir)\n    if (':\n        mets_url = ('file:\n    if (dst_dir is None):\n        if mets_url.startswith('file:\n            dst_dir = dirname(mets_url[len('file:\n        else:\n            dst_dir = tempfile.mkdtemp(prefix=TMP_PREFIX)\n            log.debug(\"Creating workspace '%s' for METS @ <%s>\", dst_dir, mets_url)\n    if (mets_basename is None):\n        mets_basename = mets_url.rsplit('/', 1)[(- 1)].split('?')[0].split('\n    dst_mets = join(dst_dir, mets_basename)\n    log.debug(\"Copying mets url '%s' to '%s'\", mets_url, dst_mets)\n    if (('file:\n        log.debug('Target and source mets are identical')\n    elif (exists(dst_mets) and (not clobber_mets)):\n        raise Exception((\"File '%s' already exists but clobber_mets is false\" % dst_mets))\n    else:\n        self.download_to_directory(dst_dir, mets_url, basename=mets_basename)\n    workspace = Workspace(self, dst_dir, mets_basename=mets_basename, baseurl=baseurl)\n    if download:\n        for f in workspace.mets.find_files():\n            workspace.download_file(f)\n    return workspace", "docstring": "Create a workspace from a METS by URL.\n\nSets the mets.xml file\n\nArguments:\nmets_url (string): Source mets URL\ndst_dir (string, None): Target directory for the workspace\nclobber_mets (boolean, False): Whether to overwrite existing mets.xml. By default existing mets.xml will raise an exception.\ndownload (boolean, False): Whether to download all the files\nbaseurl (string, None): Base URL for resolving relative file locations\n\nReturns:\nWorkspace", "source": "codesearchnet"}
{"code": "def message_index(index_url):\n    idx = csv.reader(urllib2.urlopen(index_url), delimiter=':')\n    messages = []\n    for line in idx:\n        messages.append(line)\n    return messages", "docstring": "get message index of components for urllib2.\n\nArgs:\nurl(string):\n\nReturns:\nlist: messages", "source": "codesearchnet"}
{"code": "def batch_predict_async(training_dir, prediction_input_file, output_dir, mode, batch_size=16, shard_files=True, output_format='csv', cloud=False):\n    import google.datalab.utils as du\n    with warnings.catch_warnings():\n        warnings.simplefilter('ignore')\n        if cloud:\n            runner_results = cloud_batch_predict(training_dir, prediction_input_file, output_dir, mode, batch_size, shard_files, output_format)\n            job = du.DataflowJob(runner_results)\n        else:\n            runner_results = local_batch_predict(training_dir, prediction_input_file, output_dir, mode, batch_size, shard_files, output_format)\n            job = du.LambdaJob((lambda : runner_results.wait_until_finish()), job_id=None)\n    return job", "docstring": "Local and cloud batch prediction.\n\nArgs:\ntraining_dir: The output folder of training.\nprediction_input_file: csv file pattern to a file. File must be on GCS if\nrunning cloud prediction\noutput_dir: output location to save the results. Must be a GSC path if\nrunning cloud prediction.\nmode: 'evaluation' or 'prediction'. If 'evaluation', the input data must\ncontain a target column. If 'prediction', the input data must not\ncontain a target column.\nbatch_size: Int. How many instances to run in memory at once. Larger values\nmean better performace but more memeory consumed.\nshard_files: If False, the output files are not shardded.\noutput_format: csv or json. Json file are json-newlined.\ncloud: If ture, does cloud batch prediction. If False, runs batch prediction\nlocally.\n\nReturns:\nA google.datalab.utils.Job object that can be used to query state from or wait.", "source": "codesearchnet"}
{"code": "def get_q2(self, thetas=None, phis=None):\n    if ((thetas is not None) and (phis is not None)):\n        self.compute_trigonometric_terms(thetas, phis)\n    nnn = len(self._pow_sin_t[1])\n    nnn_range = range(nnn)\n    sqrt_15_2pi = sqrt((15.0 / (2.0 * pi)))\n    sqrt_5_pi = sqrt((5.0 / pi))\n    pre_y_2_2 = [((0.25 * sqrt_15_2pi) * val) for val in self._pow_sin_t[2]]\n    pre_y_2_1 = [(((0.5 * sqrt_15_2pi) * val[0]) * val[1]) for val in zip(self._pow_sin_t[1], self._pow_cos_t[1])]\n    acc = 0.0\n    real = imag = 0.0\n    for i in nnn_range:\n        real += (pre_y_2_2[i] * self._cos_n_p[2][i])\n        imag -= (pre_y_2_2[i] * self._sin_n_p[2][i])\n    acc += ((real * real) + (imag * imag))\n    real = imag = 0.0\n    for i in nnn_range:\n        real += (pre_y_2_1[i] * self._cos_n_p[1][i])\n        imag -= (pre_y_2_1[i] * self._sin_n_p[1][i])\n    acc += ((real * real) + (imag * imag))\n    real = imag = 0.0\n    for i in nnn_range:\n        real += ((0.25 * sqrt_5_pi) * ((3.0 * self._pow_cos_t[2][i]) - 1.0))\n    acc += (real * real)\n    real = imag = 0.0\n    for i in nnn_range:\n        real -= (pre_y_2_1[i] * self._cos_n_p[1][i])\n        imag -= (pre_y_2_1[i] * self._sin_n_p[1][i])\n    acc += ((real * real) + (imag * imag))\n    real = imag = 0.0\n    for i in nnn_range:\n        real += (pre_y_2_2[i] * self._cos_n_p[2][i])\n        imag += (pre_y_2_2[i] * self._sin_n_p[2][i])\n    acc += ((real * real) + (imag * imag))\n    q2 = sqrt((((4.0 * pi) * acc) / (5.0 * float((nnn * nnn)))))\n    return q2", "docstring": "Calculates the value of the bond orientational order parameter of\nweight l=2.  If the function is called with non-empty lists of\npolar and azimuthal angles the corresponding trigonometric terms\nare computed afresh.  Otherwise, it is expected that the\ncompute_trigonometric_terms function has been just called.\n\nArgs:\nthetas ([float]): polar angles of all neighbors in radians.\nphis ([float]): azimuth angles of all neighbors in radians.\n\nReturns:\nfloat: bond orientational order parameter of weight l=2\ncorresponding to the input angles thetas and phis.", "source": "codesearchnet"}
{"code": "def cast_vdata(vdata=None, vtype='REG_SZ'):\n    registry = Registry()\n    vtype_value = registry.vtype[vtype]\n    if (vtype_value in [win32con.REG_SZ, win32con.REG_EXPAND_SZ]):\n        return _to_unicode(vdata)\n    elif (vtype_value == win32con.REG_BINARY):\n        if isinstance(vdata, six.text_type):\n            return vdata.encode('utf-8')\n        return vdata\n    elif (vtype_value == win32con.REG_MULTI_SZ):\n        return [_to_unicode(i) for i in vdata]\n    elif (vtype_value == win32con.REG_QWORD):\n        return (vdata if six.PY3 else long(vdata))\n    else:\n        return int(vdata)", "docstring": "Cast the ``vdata` value to the appropriate data type for the registry type\nspecified in ``vtype``\n\nArgs:\n\nvdata (str, int, list, bytes): The data to cast\n\nvtype (str):\nThe type of data to be written to the registry. Must be one of the\nfollowing:\n\n- REG_BINARY\n- REG_DWORD\n- REG_EXPAND_SZ\n- REG_MULTI_SZ\n- REG_QWORD\n- REG_SZ\n\nReturns:\nThe vdata cast to the appropriate type. Will be unicode string, binary,\nlist of unicode strings, or int\n\nUsage:\n\n.. code-block:: python\n\nimport salt.utils.win_reg\nwinreg.cast_vdata(vdata='This is the string', vtype='REG_SZ')", "source": "codesearchnet"}
{"code": "def _get_resource(self, label: str, source: dict, resource_type: str):\n        \n        try:\n            return source[label]\n        except KeyError:\n            raise ValueError(\"Cannot find {0} with label '{1}'.\\nExisting {0} labels: {2}\".format(\n                resource_type, label, list(source.keys())))", "docstring": "Generic resoure fetcher handling errors.\n\nArgs:\nlabel (str): The label to fetch\nsource (dict): The dictionary to look up the label\nresource_type str: The display name of the resource type (used in errors)", "source": "juraj-google-style"}
{"code": "def __init__(\n      self, resolver_context, file_system, path_spec, is_root=False,\n      is_virtual=False):\n    \n    compressed_stream = resolver.Resolver.OpenFileObject(\n        path_spec, resolver_context=resolver_context)\n    if not compressed_stream:\n      raise errors.BackEndError(\n          'Unable to open compressed stream: {0:s}.'.format(\n              self.path_spec.comparable))\n\n    super(CompressedStreamFileEntry, self).__init__(\n        resolver_context, file_system, path_spec, is_root=is_root,\n        is_virtual=is_virtual)\n    self._compressed_stream = compressed_stream\n    self.entry_type = definitions.FILE_ENTRY_TYPE_FILE", "docstring": "Initializes a file entry.\n\nArgs:\nresolver_context (Context): resolver context.\nfile_system (FileSystem): file system.\npath_spec (PathSpec): path specification.\nis_root (Optional[bool]): True if the file entry is the root file entry\nof the corresponding file system.\nis_virtual (Optional[bool]): True if the file entry is a virtual file\n\nRaises:\nBackEndError: when the compressed stream is missing.", "source": "juraj-google-style"}
{"code": "def get_aws_session(account):\n    from cloud_inquisitor.config import dbconfig\n    from cloud_inquisitor.plugins.types.accounts import AWSAccount\n    if (not isinstance(account, AWSAccount)):\n        raise InquisitorError('Non AWSAccount passed to get_aws_session, got {}'.format(account.__class__.__name__))\n    session = get_local_aws_session()\n    if (session.get_credentials().method in ['iam-role', 'env', 'explicit']):\n        sts = session.client('sts')\n    else:\n        temp_sts = session.client('sts')\n        audit_sts_role = temp_sts.assume_role(RoleArn=app_config.aws_api.instance_role_arn, RoleSessionName='inquisitor')\n        sts = boto3.session.Session(audit_sts_role['Credentials']['AccessKeyId'], audit_sts_role['Credentials']['SecretAccessKey'], audit_sts_role['Credentials']['SessionToken']).client('sts')\n    role = sts.assume_role(RoleArn='arn:aws:iam::{}:role/{}'.format(account.account_number, dbconfig.get('role_name', default='cinq_role')), RoleSessionName='inquisitor')\n    sess = boto3.session.Session(role['Credentials']['AccessKeyId'], role['Credentials']['SecretAccessKey'], role['Credentials']['SessionToken'])\n    return sess", "docstring": "Function to return a boto3 Session based on the account passed in the first argument.\n\nArgs:\naccount (:obj:`Account`): Account to create the session object for\n\nReturns:\n:obj:`boto3:boto3.session.Session`", "source": "codesearchnet"}
{"code": "def join(self):\n    c_api.TF_ServerJoin(self._server)", "docstring": "Blocks until the server has shut down.\n\nThis method currently blocks forever.\n\nRaises:\ntf.errors.OpError: Or one of its subclasses if an error occurs while\njoining the TensorFlow server.", "source": "github-repos"}
{"code": "def response_list(data, key):\n    if (key not in data):\n        return None\n    if isinstance(data[key], list):\n        return data[key]\n    else:\n        return [data[key]]", "docstring": "Obtain the relevant response data in a list.\n\nIf the response does not already contain the result in a list, a new one\nwill be created to ease iteration in the parser methods.\n\nArgs:\ndata (dict): API response.\nkey (str): Attribute of the response that contains the result values.\n\nReturns:\nList of response items (usually dict) or None if the key is not present.", "source": "codesearchnet"}
{"code": "def initialize_plugs(self, plug_types=None):\n    \n    types = plug_types if plug_types is not None else self._plug_types\n    for plug_type in types:\n      \n      \n      plug_logger = self.logger.getChild(plug_type.__name__)\n      if plug_type in self._plugs_by_type:\n        continue\n      try:\n        if not issubclass(plug_type, BasePlug):\n          raise InvalidPlugError(\n              'Plug type \"%s\" is not an instance of BasePlug' % plug_type)\n        if plug_type.logger != _LOG:\n          \n          raise InvalidPlugError(\n              'Do not override \"logger\" in your plugs.', plug_type)\n\n        \n        plug_type.logger = plug_logger\n        try:\n          plug_instance = plug_type()\n        finally:\n          \n          plug_type.logger = _LOG\n        \n        \n        \n        if plug_instance.logger != _LOG:\n          raise InvalidPlugError(\n              'Do not set \"self.logger\" in __init__ in your plugs', plug_type)\n        else:\n          \n          plug_instance.logger = plug_logger\n      except Exception:  \n        plug_logger.exception('Exception instantiating plug type %s', plug_type)\n        self.tear_down_plugs()\n        raise\n      self.update_plug(plug_type, plug_instance)", "docstring": "Instantiate required plugs.\n\nInstantiates plug types and saves the instances in self._plugs_by_type for\nuse in provide_plugs().\n\nArgs:\nplug_types: Plug types may be specified here rather than passed\ninto the constructor (this is used primarily for unit testing\nphases).", "source": "juraj-google-style"}
{"code": "def AddFiles(self, hash_id_metadatas):\n    \n    for hash_id, metadata in iteritems(hash_id_metadatas):\n      self.AddFile(hash_id, metadata)", "docstring": "Adds multiple files to the file store.\n\nArgs:\nhash_id_metadatas: A dictionary mapping hash ids to file metadata (a tuple\nof hash client path and blob references).", "source": "juraj-google-style"}
{"code": "def create_cloudwatch_event(app_name, env, region, rules):\n    session = boto3.Session(profile_name=env, region_name=region)\n    cloudwatch_client = session.client('events')\n    rule_name = rules.get('rule_name')\n    schedule = rules.get('schedule')\n    rule_description = rules.get('rule_description')\n    json_input = rules.get('json_input', {})\n    if (schedule is None):\n        LOG.critical('Schedule is required and no schedule is defined!')\n        raise InvalidEventConfiguration('Schedule is required and no schedule is defined!')\n    if (rule_name is None):\n        LOG.critical('Rule name is required and no rule_name is defined!')\n        raise InvalidEventConfiguration('Rule name is required and no rule_name is defined!')\n    else:\n        LOG.info('%s and %s', app_name, rule_name)\n        rule_name = '{}_{}'.format(app_name, rule_name.replace(' ', '_'))\n    if (rule_description is None):\n        rule_description = '{} - {}'.format(app_name, rule_name)\n    lambda_arn = get_lambda_arn(app=app_name, account=env, region=region)\n    account_id = get_env_credential(env=env)['accountId']\n    principal = 'events.amazonaws.com'\n    statement_id = '{}_cloudwatch_{}'.format(app_name, rule_name)\n    source_arn = 'arn:aws:events:{}:{}:rule/{}'.format(region, account_id, rule_name)\n    add_lambda_permissions(function=lambda_arn, statement_id=statement_id, action='lambda:InvokeFunction', principal=principal, source_arn=source_arn, env=env, region=region)\n    cloudwatch_client.put_rule(Name=rule_name, ScheduleExpression=schedule, State='ENABLED', Description=rule_description)\n    targets = []\n    json_payload = '{}'.format(json.dumps(json_input))\n    target = {'Id': app_name, 'Arn': lambda_arn, 'Input': json_payload}\n    targets.append(target)\n    put_targets_response = cloudwatch_client.put_targets(Rule=rule_name, Targets=targets)\n    LOG.debug('Cloudwatch put targets response: %s', put_targets_response)\n    LOG.info('Created Cloudwatch event \"%s\" with schedule: %s', rule_name, schedule)", "docstring": "Create cloudwatch event for lambda from rules.\n\nArgs:\napp_name (str): name of the lambda function\nenv (str): Environment/Account for lambda function\nregion (str): AWS region of the lambda function\nrules (dict): Trigger rules from the settings", "source": "codesearchnet"}
{"code": "def __init__(self, _args):\n        \n        super(TcExRun, self).__init__(_args)\n\n        \n        self._signal_handler_init()\n        self._config = None\n        self._profile = {}\n        self._staging_data = None\n        self.container = None\n        self.reports = Reports()\n        self.tcex = None\n        self.docker_image = 'tcintegrations/tci-dev:latest'\n\n        \n        self.log = self._logger()\n\n        self._clear_redis_tracker = []\n        self.json_report = {}\n        self.max_diff = 10\n        self.sleep = 0\n        \n        self.display_name = None\n        self.program_main = None\n        self.program_version = None\n        self.runtime_level = None\n\n        self.shell = False", "docstring": "Initialize Class properties.\n\nArgs:\n_args (namespace): The argparser args Namespace.", "source": "juraj-google-style"}
{"code": "def reschedule(cls,\n                 mapreduce_state,\n                 mapreduce_spec,\n                 serial_id,\n                 queue_name=None):\n    \n    task_name = ControllerCallbackHandler.get_task_name(\n        mapreduce_spec, serial_id)\n    task_params = ControllerCallbackHandler.controller_parameters(\n        mapreduce_spec, serial_id)\n    if not queue_name:\n      queue_name = os.environ.get(\"HTTP_X_APPENGINE_QUEUENAME\", \"default\")\n\n    controller_callback_task = model.HugeTask(\n        url=(mapreduce_spec.params[\"base_path\"] + \"/controller_callback/\" +\n             mapreduce_spec.mapreduce_id),\n        name=task_name, params=task_params,\n        countdown=parameters.config._CONTROLLER_PERIOD_SEC,\n        parent=mapreduce_state,\n        headers=util._get_task_headers(mapreduce_spec.mapreduce_id))\n\n    if not _run_task_hook(mapreduce_spec.get_hooks(),\n                          \"enqueue_controller_task\",\n                          controller_callback_task,\n                          queue_name):\n      try:\n        controller_callback_task.add(queue_name)\n      except (taskqueue.TombstonedTaskError,\n              taskqueue.TaskAlreadyExistsError), e:\n        logging.warning(\"Task %r with params %r already exists. %s: %s\",\n                        task_name, task_params, e.__class__, e)", "docstring": "Schedule new update status callback task.\n\nArgs:\nmapreduce_state: mapreduce state as model.MapreduceState\nmapreduce_spec: mapreduce specification as MapreduceSpec.\nserial_id: id of the invocation as int.\nqueue_name: The queue to schedule this task on. Will use the current\nqueue of execution if not supplied.", "source": "juraj-google-style"}
{"code": "def apply(self, func, *args, **kwargs):\n    ret = func(self._t, *args, **kwargs)\n    return LinearWrap(ret)", "docstring": "Apply a function on the wrapped tensor.\n\nReturns:\nLinearWrap: ``LinearWrap(func(self.tensor(), *args, **kwargs))``.", "source": "codesearchnet"}
{"code": "def collect_trajectories(env, policy_fun, num_trajectories=1, policy='greedy', max_timestep=None, epsilon=0.1):\n    trajectories = []\n    for t in range(num_trajectories):\n        t_start = time.time()\n        rewards = []\n        actions = []\n        done = False\n        observation = env.reset()\n        observation_history = observation[(np.newaxis, np.newaxis, :)]\n        ts = 0\n        while ((not done) and ((not max_timestep) or (observation_history.shape[1] < max_timestep))):\n            ts_start = time.time()\n            predictions = policy_fun(observation_history)\n            predictions = np.squeeze(predictions, axis=0)[(- 1)]\n            action = None\n            if (policy == 'greedy'):\n                action = np.argmax(predictions)\n            elif (policy == 'epsilon-greedy'):\n                if (onp.random.random() < epsilon):\n                    action = onp.random.randint(0, high=len(predictions))\n                else:\n                    action = np.argmax(predictions)\n            elif (policy == 'categorical-sampling'):\n                predictions = np.exp(predictions)\n                action = onp.argwhere((onp.random.multinomial(1, predictions) == 1))\n            else:\n                raise ValueError(('Unknown policy: %s' % policy))\n            try:\n                action = int(action)\n            except TypeError as err:\n                logging.error('Cannot convert action into an integer: [%s]', err)\n                logging.error('action.shape: [%s]', action.shape)\n                logging.error('action: [%s]', action)\n                logging.error('predictions.shape: [%s]', predictions.shape)\n                logging.error('predictions: [%s]', predictions)\n                logging.error('observation_history: [%s]', observation_history)\n                raise err\n            (observation, reward, done, _) = env.step(action)\n            observation_history = np.concatenate([observation_history, observation[(np.newaxis, np.newaxis, :)]], axis=1)\n            rewards.append(reward)\n            actions.append(action)\n            ts += 1\n            logging.vlog(2, '  Collected time-step[ %5d] of trajectory[ %5d] in [%0.2f] msec.', ts, t, get_time(ts_start))\n        logging.vlog(2, ' Collected trajectory[ %5d] in [%0.2f] msec.', t, get_time(t_start))\n        assert (done or (max_timestep and (max_timestep >= observation_history.shape[1])))\n        observation_history = np.squeeze(observation_history, axis=0)\n        trajectories.append((observation_history, np.stack(actions), np.stack(rewards)))\n    return trajectories", "docstring": "Collect trajectories with the given policy net and behaviour.\n\nArgs:\nenv: A gym env interface, for now this is not-batched.\npolicy_fun: observations(B,T+1) -> log-probabs(B,T+1, A) callable.\nnum_trajectories: int, number of trajectories.\npolicy: string, \"greedy\", \"epsilon-greedy\", or \"categorical-sampling\" i.e.\nhow to use the policy_fun to return an action.\nmax_timestep: int or None, the index of the maximum time-step at which we\nreturn the trajectory, None for ending a trajectory only when env\nreturns done.\nepsilon: float, the epsilon for `epsilon-greedy` policy.\n\nReturns:\ntrajectory: list of (observation, action, reward) tuples, where each element\n`i` is a tuple of numpy arrays with shapes as follows:\nobservation[i] = (B, T_i + 1)\naction[i] = (B, T_i)\nreward[i] = (B, T_i)", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context):\n    \n    super(APFSFileSystem, self).__init__(resolver_context)\n    self._fsapfs_volume = None", "docstring": "Initializes an APFS file system.\n\nArgs:\nresolver_context (Context): resolver context.", "source": "juraj-google-style"}
{"code": "def validate_signature(self, signature, data, encoding='utf8'):\n    if isinstance(data, string_types):\n        data = bytearray(data, encoding)\n    if isinstance(signature, string_types):\n        signature = bytearray(signature, encoding)\n    secret_key = bytearray(self.secret_key, 'utf8')\n    hashed = hmac.new(secret_key, data, sha1)\n    encoded = b64encode(hashed.digest())\n    return (encoded.strip() == signature.strip())", "docstring": "Validate the signature for the provided data.\n\nArgs:\nsignature (str or bytes or bytearray): Signature that was provided\nfor the request.\ndata (str or bytes or bytearray): Data string to validate against\nthe signature.\nencoding (str, optional): If a string was provided for ``data`` or\n``signature``, this is the character encoding.\n\nReturns:\nbool: Whether the signature is valid for the provided data.", "source": "codesearchnet"}
{"code": "def add_nodes(self, root_id, current_node, indent=1):\n        \n        \n        if not current_node.children:\n            return\n\n        config.LOGGER.info(\"({count} of {total} uploaded) {indent}Processing {title} ({kind})\".format(\n            count=self.node_count_dict['upload_count'],\n            total=self.node_count_dict['total_count'],\n            indent=\"   \" * indent,\n            title=current_node.title,\n            kind=current_node.__class__.__name__)\n        )\n\n        \n        try:\n            chunks = [current_node.children[x:x+10] for x in range(0, len(current_node.children), 10)]\n            for chunk in chunks:\n                payload_children = []\n\n                for child in chunk:\n                    failed = [f for f in child.files if f.is_primary and (not f.filename or self.failed_uploads.get(f.filename))]\n                    if any(failed):\n                        if not self.failed_node_builds.get(root_id):\n                            error_message = \"\"\n                            for fail in failed:\n                                reason = fail.filename + \": \" + self.failed_uploads.get(fail.filename) if fail.filename else \"File failed to download\"\n                                error_message = error_message + reason + \", \"\n                            self.failed_node_builds[root_id] = {'node': current_node, 'error': error_message[:-2]}\n                    else:\n                        payload_children.append(child.to_dict())\n                payload = {\n                    'root_id': root_id,\n                    'content_data': payload_children\n                }\n\n                \n                \n\n                response = config.SESSION.post(config.add_nodes_url(), data=json.dumps(payload))\n                if response.status_code != 200:\n                    self.failed_node_builds[root_id] = {'node': current_node, 'error': response.reason}\n                else:\n                    response_json = json.loads(response._content.decode(\"utf-8\"))\n                    self.node_count_dict['upload_count'] += len(chunk)\n\n                    if response_json['root_ids'].get(child.get_node_id().hex):\n                        for child in chunk:\n                            self.add_nodes(response_json['root_ids'].get(child.get_node_id().hex), child, indent + 1)\n        except ConnectionError as ce:\n            self.failed_node_builds[root_id] = {'node': current_node, 'error': ce}", "docstring": "add_nodes: adds processed nodes to tree\nArgs:\nroot_id (str): id of parent node on Kolibri Studio\ncurrent_node (Node): node to publish children\nindent (int): level of indentation for printing\nReturns: link to uploadedchannel", "source": "juraj-google-style"}
{"code": "def run(self, dag):\n        \n        num_dag_qubits = sum([qreg.size for qreg in dag.qregs.values()])\n        if num_dag_qubits > self.coupling_map.size():\n            raise TranspilerError('Number of qubits greater than device.')\n        best_sub = self._best_subset(num_dag_qubits)\n        layout = Layout()\n        map_iter = 0\n        for qreg in dag.qregs.values():\n            for i in range(qreg.size):\n                layout[(qreg, i)] = int(best_sub[map_iter])\n                map_iter += 1\n        self.property_set['layout'] = layout", "docstring": "Pick a convenient layout depending on the best matching\nqubit connectivity, and set the property `layout`.\n\nArgs:\ndag (DAGCircuit): DAG to find layout for.\n\nRaises:\nTranspilerError: if dag wider than self.coupling_map", "source": "juraj-google-style"}
{"code": "def flatten(structure):\n    return tree_impl.flatten(structure)", "docstring": "Flattens a possibly nested structure into a list.\n\nIn the case of dict instances, the sequence consists of the values,\nsorted by key to ensure deterministic behavior. However, instances of\n`collections.OrderedDict` are handled differently: their sequence order is\nused instead of the sorted keys. The same convention is followed in\n`pack_sequence_as`. This correctly unflattens dicts and `OrderedDict` after\nthey have been flattened, or vice-versa.\n\nDictionaries with non-sortable keys are not supported.\n\nExamples:\n\n>>> keras.tree.flatten([[1, 2, 3], [4, [5], [[6]]]])\n[1, 2, 3, 4, 5, 6]\n>>> keras.tree.flatten(None)\n[None]\n>>> keras.tree.flatten(1)\n[1]\n>>> keras.tree.flatten({100: 'world!', 6: 'Hello'})\n['Hello', 'world!']\n\nArgs:\nstructure: An arbitrarily nested structure.\n\nReturns:\nA list, the flattened version of the input `structure`.", "source": "github-repos"}
{"code": "def _read_messages_until_true(self, predicate, timeout):\n    while (not predicate()):\n        self._message_received.acquire()\n        if self._reader_lock.acquire(False):\n            try:\n                self._message_received.release()\n                if predicate():\n                    return\n                self._handle_message(self.adb_connection.read_for_stream(self, timeout))\n                with self._message_received:\n                    self._message_received.notify_all()\n            finally:\n                self._reader_lock.release()\n        else:\n            try:\n                self._message_received.wait(timeout.remaining)\n                if timeout.has_expired():\n                    raise usb_exceptions.AdbTimeoutError('%s timed out reading messages.', self)\n            finally:\n                self._message_received.release()", "docstring": "Read a message from this stream and handle it.\n\nThis method tries to read a message from this stream, blocking until a\nmessage is read.  Once read, it will handle it accordingly by calling\nself._handle_message().\n\nThis is repeated as long as predicate() returns False.  There is some\nlocking used internally here so that we don't end up with multiple threads\nblocked on a call to read_for_stream when another thread has read the\nmessage that caused predicate() to become True.\n\nArgs:\npredicate: Callable, keep reading messages until it returns true.  Note\nthat predicate() should not block, as doing so may cause this method to\nhang beyond its timeout.\ntimeout: Timeout to use for this call.\n\nRaises:\nAdbStreamClosedError: If this stream is already closed.", "source": "codesearchnet"}
{"code": "def assert_is_compatible_with(self, other):\n    if not self.is_compatible_with(other):\n        raise ValueError('Dimensions %s and %s are not compatible' % (self, other))", "docstring": "Raises an exception if `other` is not compatible with this Dimension.\n\nArgs:\nother: Another Dimension.\n\nRaises:\nValueError: If `self` and `other` are not compatible (see\nis_compatible_with).", "source": "github-repos"}
{"code": "def _IsBase64(cls, s):\n    \n    try:\n      if base64.b64encode(base64.b64decode(s)).decode('utf-8') == s:\n        return True\n    except (TypeError, binascii.Error):\n      pass\n    return False", "docstring": "An imperfect but decent method for determining if a string is base64.\n\nArgs:\ns: A string with the data to test.\n\nReturns:\nTrue if s is base64, else False.", "source": "juraj-google-style"}
{"code": "def loss_l2(self, l2=0):\n    if isinstance(l2, (int, float)):\n        D = (l2 * torch.eye(self.d))\n    else:\n        D = torch.diag(torch.from_numpy(l2))\n    return (torch.norm((D @ (self.mu - self.mu_init))) ** 2)", "docstring": "L2 loss centered around mu_init, scaled optionally per-source.\n\nIn other words, diagonal Tikhonov regularization,\n||D(\\mu-\\mu_{init})||_2^2\nwhere D is diagonal.\n\nArgs:\n- l2: A float or np.array representing the per-source regularization\nstrengths to use", "source": "codesearchnet"}
{"code": "def get_table(e: exp.Expression) -> str:\n    table = e.find(exp.Table).args['this'].args['this']\n    if table in table_dataset_map:\n        table = table_dataset_map[table]\n    return table", "docstring": "Get the table name from an expression.\n\nArgs:\ne (Expression): The expression containing table information.\n\nReturns:\nstr: The table name.", "source": "github-repos"}
{"code": "def save_variation_for_experiment(self, experiment_id, variation_id):\n    \n\n    self.experiment_bucket_map.update({\n      experiment_id: {\n        self.VARIATION_ID_KEY: variation_id\n      }\n    })", "docstring": "Helper method to save new experiment/variation as part of the user's profile.\n\nArgs:\nexperiment_id: ID for experiment for which the decision is to be stored.\nvariation_id: ID for variation that the user saw.", "source": "juraj-google-style"}
{"code": "def callback_handler(self):\n    decorator = self\n\n    class OAuth2Handler(webapp.RequestHandler):\n        'Handler for the redirect_uri of the OAuth 2.0 dance.'\n\n        @login_required\n        def get(self):\n            error = self.request.get('error')\n            if error:\n                errormsg = self.request.get('error_description', error)\n                self.response.out.write('The authorization request failed: {0}'.format(_safe_html(errormsg)))\n            else:\n                user = users.get_current_user()\n                decorator._create_flow(self)\n                credentials = decorator.flow.step2_exchange(self.request.params)\n                decorator._storage_class(decorator._credentials_class, None, decorator._credentials_property_name, user=user).put(credentials)\n                redirect_uri = _parse_state_value(str(self.request.get('state')), user)\n                if (redirect_uri is None):\n                    self.response.out.write('The authorization request failed')\n                    return\n                if (decorator._token_response_param and credentials.token_response):\n                    resp_json = json.dumps(credentials.token_response)\n                    redirect_uri = _helpers._add_query_parameter(redirect_uri, decorator._token_response_param, resp_json)\n                self.redirect(redirect_uri)\n    return OAuth2Handler", "docstring": "RequestHandler for the OAuth 2.0 redirect callback.\n\nUsage::\n\napp = webapp.WSGIApplication([\n('/index', MyIndexHandler),\n...,\n(decorator.callback_path, decorator.callback_handler())\n])\n\nReturns:\nA webapp.RequestHandler that handles the redirect back from the\nserver during the OAuth 2.0 dance.", "source": "codesearchnet"}
{"code": "def get_uri(dir_name):\n    \n    fullpath = os.path.abspath(dir_name)\n    try:\n        hostname = socket.gethostbyaddr(socket.gethostname())[0]\n    except:\n        hostname = socket.gethostname()\n    return \"{}:{}\".format(hostname, fullpath)", "docstring": "Returns the URI path for a directory. This allows files hosted on\ndifferent file servers to have distinct locations.\n\nArgs:\ndir_name:\nA directory name.\n\nReturns:\nFull URI path, e.g., fileserver.host.com:/full/path/of/dir_name.", "source": "juraj-google-style"}
{"code": "def load_model(file_path: str) -> NormalizedModel:\n    with open(file_path) as f:\n        model = json.load(f)\n    model_flat = OrderedDict()\n    for category in model:\n        for item in model[category]:\n            model_flat['%s:%s' % (category, item)] = model[category][item]\n    weights = jnp.array(list(model_flat.values()))\n    weights = weights / weights.std()\n    weights = weights - weights.mean()\n    keys = list(model_flat.keys())\n    return NormalizedModel(keys, weights)", "docstring": "Loads a model as a pair of a features list and a normalized weight vector.\n\nArgs:\nfile_path: A file path for the model JSON file.\n\nReturns:\nA normalized model, which is a pair of a list of feature identifiers and a\nnormalized weight vector.", "source": "github-repos"}
{"code": "def parse_meta(filename, data):\n    \n    if \".\" not in filename:\n        raise MetaParsingException(\n            \"Can't recognize type of your metadata ('%s')!\" % filename\n        )\n\n    suffix = filename.rsplit(\".\", 1)[1].lower()\n\n    if suffix not in SUPPORTED_FILES:\n        raise MetaParsingException(\"Can't parse file of type '%s'!\" % suffix)\n\n    fp = validator.FieldParser()\n    for key, val in SUPPORTED_FILES[suffix](data).items():\n        fp.process(key, val)\n\n    return fp.get_epublication()", "docstring": "Parse `data` to EPublication.\n\nArgs:\nfilename (str): Used to choose right parser based at suffix.\ndata (str): Content of the metadata file.\n\nReturns:\nEPublication: object.", "source": "juraj-google-style"}
{"code": "def CheckTaskReadyForMerge(self, task):\n    if (self._storage_type != definitions.STORAGE_TYPE_SESSION):\n        raise IOError('Unsupported storage type.')\n    if (not self._processed_task_storage_path):\n        raise IOError('Missing processed task storage path.')\n    processed_storage_file_path = self._GetProcessedStorageFilePath(task)\n    try:\n        stat_info = os.stat(processed_storage_file_path)\n    except (IOError, OSError):\n        return False\n    task.storage_file_size = stat_info.st_size\n    return True", "docstring": "Checks if a task is ready for merging with this session storage.\n\nIf the task is ready to be merged, this method also sets the task's\nstorage file size.\n\nArgs:\ntask (Task): task.\n\nReturns:\nbool: True if the task is ready to be merged.\n\nRaises:\nIOError: if the storage type is not supported or\nOSError: if the storage type is not supported or\nif the temporary path for the task storage does not exist.", "source": "codesearchnet"}
{"code": "def AddRow(self, values):\n    \n    super(CLITableView, self).AddRow(values)\n\n    value_length = len(values[0])\n    if value_length > self._column_width:\n      self._column_width = value_length", "docstring": "Adds a row of values.\n\nArgs:\nvalues (list[object]): values.\n\nRaises:\nValueError: if the number of values is out of bounds.", "source": "juraj-google-style"}
{"code": "def _register_and_parse_flags_with_usage(argv=None, flags_parser=parse_flags_with_usage):\n    if _register_and_parse_flags_with_usage.done:\n        raise SystemError('Flag registration can be done only once.')\n    define_help_flags()\n    original_argv = (sys.argv if (argv is None) else argv)\n    args_to_main = flags_parser(original_argv)\n    if (not FLAGS.is_parsed()):\n        raise Error('FLAGS must be parsed after flags_parser is called.')\n    if FLAGS.only_check_args:\n        sys.exit(0)\n    if FLAGS['verbosity'].using_default_value:\n        FLAGS.verbosity = 0\n    _register_and_parse_flags_with_usage.done = True\n    return args_to_main", "docstring": "Registers help flags, parses arguments and shows usage if appropriate.\n\nThis also calls sys.exit(0) if flag --only_check_args is True.\n\nArgs:\nargv: [str], a non-empty list of the command line arguments including\nprogram name, sys.argv is used if None.\nflags_parser: Callable[[List[Text]], Any], the function used to parse flags.\nThe return value of this function is passed to `main` untouched.\nIt must guarantee FLAGS is parsed after this function is called.\n\nReturns:\nThe return value of `flags_parser`. When using the default `flags_parser`,\nit returns the following:\n[str], a non-empty list of remaining command line arguments after parsing\nflags, including program name.\n\nRaises:\nError: Raised when flags_parser is called, but FLAGS is not parsed.\nSystemError: Raised when it's called more than once.", "source": "codesearchnet"}
{"code": "def _policy_equivalent_to_dtype(policy):\n    return type(policy) == Policy and list(policy.get_config().keys()) == ['name'] and (policy.name == '_infer' or _is_convertible_to_dtype(policy.name))", "docstring": "Returns True if the Policy is equivalent to a single dtype.\n\nA policy is equivalent to a single dtype if the policy's compute and variable\ndtypes are the same and the policy's type is Policy and not a subclass of\nPolicy (such as PolicyV1).\n\nThe \"_infer\" policy is considered equivalent to a single dtype.\n\nArgs:\npolicy: A Policy.\n\nReturns:\nTrue, if the policy is equivalent to a single dtype.", "source": "github-repos"}
{"code": "async def retry_async(func, attempts=5, sleeptime_callback=calculate_sleep_time, retry_exceptions=Exception, args=(), kwargs=None, sleeptime_kwargs=None):\n    kwargs = (kwargs or {})\n    attempt = 1\n    while True:\n        try:\n            return (await func(*args, **kwargs))\n        except retry_exceptions:\n            attempt += 1\n            if (attempt > attempts):\n                log.warning('retry_async: {}: too many retries!'.format(func.__name__))\n                raise\n            sleeptime_kwargs = (sleeptime_kwargs or {})\n            sleep_time = sleeptime_callback(attempt, **sleeptime_kwargs)\n            log.debug('retry_async: {}: sleeping {} seconds before retry'.format(func.__name__, sleep_time))\n            (await asyncio.sleep(sleep_time))", "docstring": "Retry ``func``, where ``func`` is an awaitable.\n\nArgs:\nfunc (function): an awaitable function.\nattempts (int, optional): the number of attempts to make.  Default is 5.\nsleeptime_callback (function, optional): the function to use to determine\nhow long to sleep after each attempt.  Defaults to ``calculateSleepTime``.\nretry_exceptions (list or exception, optional): the exception(s) to retry on.\nDefaults to ``Exception``.\nargs (list, optional): the args to pass to ``function``.  Defaults to ()\nkwargs (dict, optional): the kwargs to pass to ``function``.  Defaults to\n{}.\nsleeptime_kwargs (dict, optional): the kwargs to pass to ``sleeptime_callback``.\nIf None, use {}.  Defaults to None.\n\nReturns:\nobject: the value from a successful ``function`` call\n\nRaises:\nException: the exception from a failed ``function`` call, either outside\nof the retry_exceptions, or one of those if we pass the max\n``attempts``.", "source": "codesearchnet"}
{"code": "def GetSubkeyByPath(self, key_path):\n    \n    pyregf_key = self._pyregf_key.get_sub_key_by_path(key_path)\n    if not pyregf_key:\n      return None\n\n    key_path = key_paths.JoinKeyPath([self._key_path, key_path])\n    return REGFWinRegistryKey(pyregf_key, key_path=key_path)", "docstring": "Retrieves a subkey by path.\n\nArgs:\nkey_path (str): path of the subkey.\n\nReturns:\nWinRegistryKey: Windows Registry subkey or None if not found.", "source": "juraj-google-style"}
{"code": "def __init__(self, dataFrame=None, editable=False):\n        \n        super(ColumnDtypeModel, self).__init__()\n        self.headers = ['column', 'data type']\n\n        self._editable = editable\n\n        self._dataFrame = pandas.DataFrame()\n        if dataFrame is not None:\n            self.setDataFrame(dataFrame)", "docstring": "the __init__ method.\n\nArgs:\ndataFrame (pandas.core.frame.DataFrame, optional): initializes the model with given DataFrame.\nIf none is given an empty DataFrame will be set. defaults to None.\neditable (bool, optional): apply changes while changing dtype. defaults to True.", "source": "juraj-google-style"}
{"code": "def set_tick(self, index, interval):\n        \n\n        name = self.tick_name(index)\n        if name is None:\n            return pack_error(ControllerSubsystem.SENSOR_GRAPH, Error.INVALID_ARRAY_KEY)\n\n        self.ticks[name] = interval\n        return Error.NO_ERROR", "docstring": "Update the a tick's interval.\n\nArgs:\nindex (int): The index of the tick that you want to fetch.\ninterval (int): The number of seconds between ticks.\nSetting this to 0 will disable the tick.\n\nReturns:\nint: An error code.", "source": "juraj-google-style"}
{"code": "def save(self, data):\n        \n        if self.__nested:\n            raise ConfigLoaderException(\"Cannot save the config if the 'nested' paramter is True!\")\n\n        if self.__loaded_config_file is None:\n            raise ConfigLoaderException(\"Load not called yet!\")\n\n        try:\n            with open(self.__loaded_config_file, 'w') as f:\n                f.write(self.__formatter.encode(data))\n        except Exception as e:\n            raise ConfigLoaderException(\"Config data is not serializable: %s\" % e)", "docstring": "Save the config data\n\nArgs:\ndata: any serializable config data\n\nRaises:\nConfigLoaderException: if the ConfigLoader.load not called, so there is no config file name,\nor the data is not serializable or the loader is nested", "source": "juraj-google-style"}
{"code": "def _handle_location(self, location):\n        \n        if not isinstance(location, ElementTree.Element):\n            element = self.find(location)\n            if element is None:\n                raise ValueError(\"Invalid path!\")\n        else:\n            element = location\n        return element", "docstring": "Return an element located at location with flexible args.\n\nArgs:\nlocation: String xpath to use in an Element.find search OR\nan Element (which is simply returned).\n\nReturns:\nThe found Element.\n\nRaises:\nValueError if the location is a string that results in a\nfind of None.", "source": "juraj-google-style"}
{"code": "def do_post(self, uri, resource, timeout, custom_headers):\n        \n        self.validate_resource_uri(uri)\n\n        task, entity = self._connection.post(uri, resource, custom_headers=custom_headers)\n\n        if not task:\n            return entity\n\n        return self._task_monitor.wait_for_task(task, timeout)", "docstring": "Helps to make post requests.\n\nArgs:\nuri: URI of  the resource.\nresource: Resource data to post.\ntimeout: Time out for the request in seconds.\ncutom_headers: Allows to add custom http headers.\n\nReturns:\nRetunrs Task object.", "source": "juraj-google-style"}
{"code": "def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        tstream = BytearrayStream()\n\n        self.extension_name.write(tstream, kmip_version=kmip_version)\n\n        if self.extension_tag is not None:\n            self.extension_tag.write(tstream, kmip_version=kmip_version)\n        if self.extension_type is not None:\n            self.extension_type.write(tstream, kmip_version=kmip_version)\n\n        self.length = tstream.length()\n        super(ExtensionInformation, self).write(\n            ostream,\n            kmip_version=kmip_version\n        )\n        ostream.write(tstream.buffer)", "docstring": "Write the data encoding the ExtensionInformation object to a stream.\n\nArgs:\nostream (Stream): A data stream in which to encode object data,\nsupporting a write method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def replace_by_etree(self, root_el, el_idx=0):\n        \n        el = self.get_element_by_name(root_el.tag, el_idx)\n        el[:] = list(root_el)\n        el.attrib = root_el.attrib", "docstring": "Replace element.\n\nSelect element that has the same name as ``root_el``, then replace the selected\nelement with ``root_el``\n\n``root_el`` can be a single element or the root of an element tree.\n\nArgs:\nroot_el : element\nNew element that will replace the existing element.", "source": "juraj-google-style"}
{"code": "def label_sequential_regions(inlist):\n    import more_itertools as mit\n    df = pd.DataFrame(inlist).set_index(0)\n    labeled = {}\n    for label in df[1].unique():\n        iterable = df[(df[1] == label)].index.tolist()\n        labeled.update({'{}{}'.format(label, (i + 1)): items for (i, items) in enumerate([list(group) for group in mit.consecutive_groups(iterable)])})\n    return labeled", "docstring": "Input a list of labeled tuples and return a dictionary of sequentially labeled regions.\n\nArgs:\ninlist (list): A list of tuples with the first number representing the index and the second the index label.\n\nReturns:\ndict: Dictionary of labeled regions.\n\nExamples:\n\n>>> label_sequential_regions([(1, 'O'), (2, 'O'), (3, 'O'), (4, 'M'), (5, 'M'), (6, 'I'), (7, 'M'), (8, 'O'), (9, 'O')])\n{'O1': [1, 2, 3], 'M1': [4, 5], 'I1': [6], 'M2': [7], 'O2': [8, 9]}", "source": "codesearchnet"}
{"code": "def aggregate_periods(self, periods):\n        \n        try:\n            fieldname = self.raster_field.name\n        except TypeError:\n            raise exceptions.FieldDoesNotExist('Raster field not found')\n        arrays = self.arrays(fieldname)\n        arr = arrays[0]\n        if len(arrays) > 1:\n            if getattr(arr, 'ndim', 0) > 2:\n                arrays = np.vstack(arrays)\n            fill = getattr(arr, 'fill_value', None)\n            arr = np.ma.masked_values(arrays, fill, copy=False)\n        \n        \n        try:\n            means = arr.reshape((periods, -1)).mean(axis=1)\n        except ValueError:\n            means = np.array([a.mean() for a in np.array_split(arr, periods)])\n        obj = self[0]\n        setattr(obj, fieldname, means)\n        return [obj]", "docstring": "Returns list of ndarrays averaged to a given number of periods.\n\nArguments:\nperiods -- desired number of periods as int", "source": "juraj-google-style"}
{"code": "def log_warning(self, msg):\n    if self.__logger:\n        self.__logger.warning(msg)\n    if self.__raise_exception_on_warning:\n        raise RuntimeError(msg)", "docstring": "Log a warning if ``logger`` exists.\n\nArgs:\nmsg: Warning to log.\n\nWarning:\nCan raise a ``RuntimeError`` if this was asked in the constructor.", "source": "codesearchnet"}
{"code": "def hgnc_genes(self, hgnc_symbol, build='37', search=False):\n    LOG.debug(('Fetching genes with symbol %s' % hgnc_symbol))\n    if search:\n        full_query = self.hgnc_collection.find({'$or': [{'aliases': hgnc_symbol}, {'hgnc_id': (int(hgnc_symbol) if hgnc_symbol.isdigit() else None)}], 'build': build})\n        if (full_query.count() != 0):\n            return full_query\n        return self.hgnc_collection.find({'aliases': {'$regex': hgnc_symbol, '$options': 'i'}, 'build': build})\n    return self.hgnc_collection.find({'build': build, 'aliases': hgnc_symbol})", "docstring": "Fetch all hgnc genes that match a hgnc symbol\n\nCheck both hgnc_symbol and aliases\n\nArgs:\nhgnc_symbol(str)\nbuild(str): The build in which to search\nsearch(bool): if partial searching should be used\n\nReturns:\nresult()", "source": "codesearchnet"}
{"code": "def get_index(uid, i):\n    return _SHARED_SEQUENCES[uid][i]", "docstring": "Get the value from the Sequence `uid` at index `i`.\n\nTo allow multiple Sequences to be used at the same time, we use `uid` to\nget a specific one. A single Sequence would cause the validation to\noverwrite the training Sequence.\n\nArgs:\nuid: int, Sequence identifier\ni: index\n\nReturns:\nThe value at index `i`.", "source": "github-repos"}
{"code": "def bfs(self, graph, start):\n        \n        newstatediag = {}\n\n        \n        queue = []\n        visited = []\n        \n        queue.append(start)\n        while queue:\n            \n            state = queue.pop(0)\n            \n            \n            visited.append(state.id)\n            \n            \n            for key in state.trans:\n                if state.trans[key] != []:\n                    if key not in visited:\n                        for nextstate in graph:\n                            if graph[nextstate].id == key:\n                                queue.append(graph[nextstate])\n                                break\n        i = 0\n        for state in graph:\n            if graph[state].id in visited:\n                newstatediag[i] = graph[state]\n                i = i + 1\n\n        return newstatediag", "docstring": "Performs BFS operation for eliminating useless loop transitions\nArgs:\ngraph (PDA): the PDA object\nstart (PDA state): The PDA initial state\nReturns:\nlist: A cleaned, smaller list of DFA states", "source": "juraj-google-style"}
{"code": "def perspective(img, startpoints, endpoints, interpolation=Image.BICUBIC):\n    \n    if not _is_pil_image(img):\n        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n    coeffs = _get_perspective_coeffs(startpoints, endpoints)\n    return img.transform(img.size, Image.PERSPECTIVE, coeffs, interpolation)", "docstring": "Perform perspective transform of the given PIL Image.\n\nArgs:\nimg (PIL Image): Image to be transformed.\ncoeffs (tuple) : 8-tuple (a, b, c, d, e, f, g, h) which contains the coefficients.\nfor a perspective transform.\ninterpolation: Default- Image.BICUBIC\nReturns:\nPIL Image:  Perspectively transformed Image.", "source": "juraj-google-style"}
{"code": "def parse_options(cls, options):\n    d = {}\n    for (filename_check, dictionary) in cls.filename_checks.items():\n        filename_data = getattr(options, filename_check)\n        if (len(filename_data) != 0):\n            parsed_params = {}\n            for single_line in filename_data:\n                a = [s.strip() for s in single_line.split('=')]\n                if (a[0] in ['filter_regex', 'filename_regex']):\n                    parsed_params[a[0]] = a[1]\n            d[filename_check] = parsed_params\n    cls.filename_checks.update(d)\n    cls.filename_checks = {x: y for (x, y) in cls.filename_checks.items() if (len(y) > 0)}", "docstring": "Required by flake8\nparse the options, called after add_options\n\nArgs:\noptions (dict): options to be parsed", "source": "codesearchnet"}
{"code": "def predict_undirected_graph(self, data):\n        \n        graph = Graph()\n\n        for idx_i, i in enumerate(data.columns):\n            for idx_j, j in enumerate(data.columns[idx_i+1:]):\n                score = self.predict(data[i].values, data[j].values)\n                if abs(score) > 0.001:\n                    graph.add_edge(i, j, weight=score)\n\n        return graph", "docstring": "Build a skeleton using a pairwise independence criterion.\n\nArgs:\ndata (pandas.DataFrame): Raw data table\n\nReturns:\nnetworkx.Graph: Undirected graph representing the skeleton.", "source": "juraj-google-style"}
{"code": "def __process_node(self, node: yaml.Node,\n                       expected_type: Type) -> yaml.Node:\n        \n        logger.info('Processing node {} expecting type {}'.format(\n            node, expected_type))\n\n        \n        recognized_types, message = self.__recognizer.recognize(\n            node, expected_type)\n\n        if len(recognized_types) != 1:\n            raise RecognitionError(message)\n\n        recognized_type = recognized_types[0]\n\n        \n        logger.debug('Savorizing node {}'.format(node))\n        if recognized_type in self._registered_classes.values():\n            node = self.__savorize(node, recognized_type)\n        logger.debug('Savorized, now {}'.format(node))\n\n        \n        logger.debug('Recursing into subnodes')\n        if is_generic_list(recognized_type):\n            if node.tag != 'tag:yaml.org,2002:seq':\n                raise RecognitionError('{}{}Expected a {} here'.format(\n                    node.start_mark, os.linesep,\n                    type_to_desc(expected_type)))\n            for item in node.value:\n                self.__process_node(item,\n                                    generic_type_args(recognized_type)[0])\n        elif is_generic_dict(recognized_type):\n            if node.tag != 'tag:yaml.org,2002:map':\n                raise RecognitionError('{}{}Expected a {} here'.format(\n                    node.start_mark, os.linesep,\n                    type_to_desc(expected_type)))\n            for _, value_node in node.value:\n                self.__process_node(value_node,\n                                    generic_type_args(recognized_type)[1])\n\n        elif recognized_type in self._registered_classes.values():\n            if (not issubclass(recognized_type, enum.Enum)\n                    and not issubclass(recognized_type, str)\n                    and not issubclass(recognized_type, UserString)):\n                for attr_name, type_, _ in class_subobjects(recognized_type):\n                    cnode = Node(node)\n                    if cnode.has_attribute(attr_name):\n                        subnode = cnode.get_attribute(attr_name)\n                        new_subnode = self.__process_node(\n                            subnode.yaml_node, type_)\n                        cnode.set_attribute(attr_name, new_subnode)\n        else:\n            logger.debug('Not a generic class or a user-defined class, not'\n                         ' recursing')\n\n        node.tag = self.__type_to_tag(recognized_type)\n        logger.debug('Finished processing node {}'.format(node))\n        return node", "docstring": "Processes a node.\n\nThis is the main function that implements yatiml's \\\nfunctionality. It figures out how to interpret this node \\\n(recognition), then applies syntactic sugar, and finally \\\nrecurses to the subnodes, if any.\n\nArgs:\nnode: The node to process.\nexpected_type: The type we expect this node to be.\n\nReturns:\nThe transformed node, or a transformed copy.", "source": "juraj-google-style"}
{"code": "def line_line_collide(line1, line2):\n    \n    s, t, success = segment_intersection(\n        line1[:, 0], line1[:, 1], line2[:, 0], line2[:, 1]\n    )\n    if success:\n        return _helpers.in_interval(s, 0.0, 1.0) and _helpers.in_interval(\n            t, 0.0, 1.0\n        )\n\n    else:\n        disjoint, _ = parallel_lines_parameters(\n            line1[:, 0], line1[:, 1], line2[:, 0], line2[:, 1]\n        )\n        return not disjoint", "docstring": "Determine if two line segments meet.\n\nThis is a helper for :func:`convex_hull_collide` in the\nspecial case that the two convex hulls are actually\njust line segments. (Even in this case, this is only\nproblematic if both segments are on a single line.)\n\nArgs:\nline1 (numpy.ndarray): ``2 x 2`` array of start and end nodes.\nline2 (numpy.ndarray): ``2 x 2`` array of start and end nodes.\n\nReturns:\nbool: Indicating if the line segments collide.", "source": "juraj-google-style"}
{"code": "def _convert_to_hashable(data, types=True):\n    if (data is None):\n        hashable = b'NONE'\n        prefix = b'NULL'\n    elif isinstance(data, six.binary_type):\n        hashable = data\n        prefix = b'TXT'\n    elif isinstance(data, six.text_type):\n        hashable = data.encode('utf-8')\n        prefix = b'TXT'\n    elif isinstance(data, _intlike):\n        hashable = _int_to_bytes(data)\n        prefix = b'INT'\n    elif isinstance(data, float):\n        (a, b) = float(data).as_integer_ratio()\n        hashable = ((_int_to_bytes(a) + b'/') + _int_to_bytes(b))\n        prefix = b'FLT'\n    else:\n        hash_func = _HASHABLE_EXTENSIONS.lookup(data)\n        (prefix, hashable) = hash_func(data)\n    if types:\n        return (prefix, hashable)\n    else:\n        return (b'', hashable)", "docstring": "r\"\"\"\nConverts `data` into a hashable byte representation if an appropriate\nhashing function is known.\n\nArgs:\ndata (object): ordered data with structure\ntypes (bool): include type prefixes in the hash\n\nReturns:\ntuple(bytes, bytes): prefix, hashable:\na prefix hinting the original data type and the byte representation\nof `data`.\n\nRaises:\nTypeError : if data has no registered hash methods\n\nExample:\n>>> assert _convert_to_hashable(None) == (b'NULL', b'NONE')\n>>> assert _convert_to_hashable('string') == (b'TXT', b'string')\n>>> assert _convert_to_hashable(1) == (b'INT', b'\\x01')\n>>> assert _convert_to_hashable(1.0) == (b'FLT', b'\\x01/\\x01')\n>>> assert _convert_to_hashable(_intlike[-1](1)) == (b'INT', b'\\x01')", "source": "codesearchnet"}
{"code": "def register(self, name, option):\n        \n        if name in self._options:\n\n            raise ValueError(\"Option {0} already exists.\".format(name))\n\n        if not isinstance(option, opt.Option):\n\n            raise TypeError(\"Options must be of type Option.\")\n\n        self._options[name] = option", "docstring": "Register a new option with the namespace.\n\nArgs:\nname (str): The name to register the option under.\noption (option.Option): The option object to register.\n\nRaises:\nTypeError: If the option is not an option.Option object.\nValueError: If the name is already registered.", "source": "juraj-google-style"}
{"code": "def _batch_static_inner_shape(old_shape: tensor_shape.TensorShape, batch_size: Optional[int]) -> tensor_shape.TensorShape:\n    head_dim = tensor_shape.dimension_at_index(old_shape, 0) * batch_size\n    return head_dim + old_shape[1:]", "docstring": "Returns a copy of old_shape with axis=0 multiplied by batch_size.\n\nOnly use if this is the inner_shape of a DynamicRaggedShape.Spec with one\nor more row partitions.\n\nArgs:\nold_shape: the original inner_shape.\nbatch_size: the batch size.\n\nReturns:\na new shape.", "source": "github-repos"}
{"code": "def _get_subclass_names(self, classname, namespace, deep_inheritance):\n    assert ((classname is None) or isinstance(classname, (six.string_types, CIMClassName)))\n    if isinstance(classname, CIMClassName):\n        classname = classname.classname\n    try:\n        classes = self.classes[namespace]\n    except KeyError:\n        classes = NocaseDict()\n    if (classname is None):\n        rtn_classnames = [cl.classname for cl in six.itervalues(classes) if (cl.superclass is None)]\n    else:\n        rtn_classnames = [cl.classname for cl in six.itervalues(classes) if (cl.superclass and (cl.superclass.lower() == classname.lower()))]\n    if deep_inheritance:\n        subclass_names = []\n        if rtn_classnames:\n            for cn in rtn_classnames:\n                subclass_names.extend(self._get_subclass_names(cn, namespace, deep_inheritance))\n        rtn_classnames.extend(subclass_names)\n    return rtn_classnames", "docstring": "Get class names that are subclasses of the\nclassname input parameter from the repository.\n\nIf DeepInheritance is False, get only classes in the\nrepository for the defined namespace for which this class is a\ndirect super class.\n\nIf deep_inheritance is `True`, get all direct and indirect\nsubclasses.  If false, get only a the next level of the\nhiearchy.\n\nReturns:\nlist of strings with the names of all subclasses of `classname`.", "source": "codesearchnet"}
{"code": "def get_generated_cols(X_original, X_transformed, to_transform):\n    \n    original_cols = list(X_original.columns)\n\n    if len(to_transform) > 0:\n        [original_cols.remove(c) for c in to_transform]\n\n    current_cols = list(X_transformed.columns)\n    if len(original_cols) > 0:\n        [current_cols.remove(c) for c in original_cols]\n\n    return current_cols", "docstring": "Returns a list of the generated/transformed columns.\n\nArguments:\nX_original: df\nthe original (input) DataFrame.\nX_transformed: df\nthe transformed (current) DataFrame.\nto_transform: [str]\na list of columns that were transformed (as in the original DataFrame), commonly self.cols.\n\nOutput:\na list of columns that were transformed (as in the current DataFrame).", "source": "juraj-google-style"}
{"code": "def _attach_files(filepaths, email_):\n    for filepath in filepaths:\n        base = os.path.basename(filepath)\n        with open(filepath, 'rb') as file:\n            part = MIMEApplication(file.read(), Name=base)\n            part['Content-Disposition'] = ('attachment; filename=\"%s\"' % base)\n            email_.attach(part)", "docstring": "Take a list of filepaths and attach the files to a MIMEMultipart.\n\nArgs:\nfilepaths (list(str)): A list of filepaths.\nemail_ (email.MIMEMultipart): A MIMEMultipart email_.", "source": "codesearchnet"}
{"code": "def _ContainsExactlyElementsIn(self, expected, warn_elements_in=False):\n    if not expected:\n        if self._actual:\n            self._FailWithProposition('is empty')\n        return _InOrder()\n    missing = _DuplicateCounter()\n    extra = _DuplicateCounter()\n    actual_iter = iter(self._actual)\n    expected_iter = iter(expected)\n    warning = ''\n    if warn_elements_in:\n        warning = ' Passing a single iterable to ContainsExactly(*expected) is often not the correct thing to do. Did you mean to call ContainsExactlyElementsIn(Iterable) instead?'\n    while True:\n        try:\n            actual_element = next(actual_iter)\n        except StopIteration:\n            break\n        try:\n            expected_element = next(expected_iter)\n        except StopIteration:\n            extra.Increment(actual_element)\n            break\n        if actual_element != expected_element:\n            missing.Increment(expected_element)\n            for m in expected_iter:\n                missing.Increment(m)\n            if actual_element in missing:\n                missing.Decrement(actual_element)\n            else:\n                extra.Increment(actual_element)\n            for e in actual_iter:\n                if e in missing:\n                    missing.Decrement(e)\n                else:\n                    extra.Increment(e)\n            if missing:\n                if extra:\n                    self._FailWithProposition('contains exactly <{0!r}>. It is missing <{1}> and has unexpected items <{2}>'.format(expected, missing, extra), suffix=warning)\n                else:\n                    self._FailWithBadResults('contains exactly', expected, 'is missing', missing, suffix=warning)\n            if extra:\n                self._FailWithBadResults('contains exactly', expected, 'has unexpected items', extra, suffix=warning)\n            return _NotInOrder(self._actual, 'contains exactly these elements in order', expected)\n    for e in actual_iter:\n        extra.Increment(e)\n    if extra:\n        self._FailWithBadResults('contains exactly', expected, 'has unexpected items', extra, suffix=warning)\n    for m in expected_iter:\n        missing.Increment(m)\n    if missing:\n        self._FailWithBadResults('contains exactly', expected, 'is missing', missing, suffix=warning)\n    return _InOrder()", "docstring": "Determines if the subject contains exactly the expected elements.\n\nHelper function for ContainsExactly() and ContainsExactlyElementsIn().\n\nArgs:\nexpected: iterable of objects that should be contained in the subject.\nwarn_elements_in: boolean, default False. If True, and the assertion\nfails, and the developer invoked ContainsExactly() with a single\niterable, warn that this usage is error-prone.\n\nReturns:\nIf the subject does contain exactly the expected elements, returns an\n_Ordered predicate on which .InOrder() can be subsequently called.\n\nRaises:\nTruthAssertionError: the subject is missing any of the expected elements,\nor the subject contains any element not in the expected elements.", "source": "github-repos"}
{"code": "def problem_id(self, value):\n        \n        if value == self._defaults['problemId'] and 'problemId' in self._values:\n            del self._values['problemId']\n        else:\n            self._values['problemId'] = value", "docstring": "The problem_id property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def get_inspection_units(logdir='', event_file='', tag=''):\n    if logdir:\n        subdirs = io_wrapper.GetLogdirSubdirectories(logdir)\n        inspection_units = []\n        for subdir in subdirs:\n            generator = itertools.chain(*[generator_from_event_file(os.path.join(subdir, f)) for f in tf.io.gfile.listdir(subdir) if io_wrapper.IsTensorFlowEventsFile(os.path.join(subdir, f))])\n            inspection_units.append(InspectionUnit(name=subdir, generator=generator, field_to_obs=get_field_to_observations_map(generator, tag)))\n        if inspection_units:\n            print('Found event files in:\\n{}\\n'.format('\\n'.join([u.name for u in inspection_units])))\n        elif io_wrapper.IsTensorFlowEventsFile(logdir):\n            print('It seems that {} may be an event file instead of a logdir. If this is the case, use --event_file instead of --logdir to pass it in.'.format(logdir))\n        else:\n            print('No event files found within logdir {}'.format(logdir))\n        return inspection_units\n    elif event_file:\n        generator = generator_from_event_file(event_file)\n        return [InspectionUnit(name=event_file, generator=generator, field_to_obs=get_field_to_observations_map(generator, tag))]\n    return []", "docstring": "Returns a list of InspectionUnit objects given either logdir or event_file.\n\nIf logdir is given, the number of InspectionUnits should equal the\nnumber of directories or subdirectories that contain event files.\n\nIf event_file is given, the number of InspectionUnits should be 1.\n\nArgs:\nlogdir: A log directory that contains event files.\nevent_file: Or, a particular event file path.\ntag: An optional tag name to query for.\n\nReturns:\nA list of InspectionUnit objects.", "source": "codesearchnet"}
{"code": "def uniform_binning_correction(x, n_bits=8):\n  \n  n_bins = 2**n_bits\n  batch_size, height, width, n_channels = common_layers.shape_list(x)\n  hwc = float(height * width * n_channels)\n\n  x = x + tf.random_uniform(\n      shape=(batch_size, height, width, n_channels),\n      minval=0.0, maxval=1.0/n_bins)\n  objective = -np.log(n_bins) * hwc * tf.ones(batch_size)\n  return x, objective", "docstring": "Replaces x^i with q^i(x) = U(x, x + 1.0 / 256.0).\n\nArgs:\nx: 4-D Tensor of shape (NHWC)\nn_bits: optional.\nReturns:\nx: x ~ U(x, x + 1.0 / 256)\nobjective: Equivalent to -q(x)*log(q(x)).", "source": "juraj-google-style"}
{"code": "def _ParseSourcePathOption(self, options):\n    self._source_path = self.ParseStringOption(options, self._SOURCE_OPTION)\n    if (not self._source_path):\n        raise errors.BadConfigOption('Missing source path.')\n    self._source_path = os.path.abspath(self._source_path)", "docstring": "Parses the source path option.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "codesearchnet"}
{"code": "def provide(self, cls):\n    support.verify_class_type(cls, 'cls')\n    if (not self._is_injectable_fn(cls)):\n        provide_loc = locations.get_back_frame_loc()\n        raise errors.NonExplicitlyBoundClassError(provide_loc, cls)\n    try:\n        return self._obj_provider.provide_class(cls, self._injection_context_factory.new(cls.__init__), direct_init_pargs=[], direct_init_kwargs={})\n    except errors.Error as e:\n        if self._use_short_stack_traces:\n            raise e\n        else:\n            raise", "docstring": "Provides an instance of the given class.\n\nArgs:\ncls: a class (not an instance)\nReturns:\nan instance of cls\nRaises:\nError: an instance of cls is not providable", "source": "codesearchnet"}
{"code": "def transform_function(self, fn, user_context):\n    cache_subkey = self.get_caching_key(user_context)\n    if self._cache.has(fn, cache_subkey):\n        factory = self._cached_factory(fn, cache_subkey)\n    else:\n        with self._cache_lock:\n            if self._cache.has(fn, cache_subkey):\n                factory = self._cached_factory(fn, cache_subkey)\n            else:\n                logging.log(1, '%s is not cached for subkey %s', fn, cache_subkey)\n                nodes, ctx = super(PyToPy, self).transform_function(fn, user_context)\n                if isinstance(nodes, gast.Lambda):\n                    nodes = gast.Assign(targets=[gast.Name(ctx.info.name, ctx=gast.Store(), annotation=None, type_comment=None)], value=nodes)\n                else:\n                    nodes.name = ctx.info.name\n                if logging.has_verbosity(2):\n                    logging.log(2, 'Transformed %s:\\n\\n%s\\n', fn, parser.unparse(nodes))\n                factory = _PythonFnFactory(ctx.info.name, fn.__code__.co_freevars, self.get_extra_locals())\n                factory.create(nodes, ctx.namer, future_features=ctx.info.future_features)\n                self._cache[fn][cache_subkey] = factory\n    transformed_fn = factory.instantiate(globals_=fn.__globals__, closure=fn.__closure__ or (), defaults=fn.__defaults__, kwdefaults=getattr(fn, '__kwdefaults__', None))\n    return (transformed_fn, factory.module, factory.source_map)", "docstring": "Transforms a function. See GenericTranspiler.transform_function.\n\nThis overload wraps the parent's `transform_function`, adding caching and\nfacilities to instantiate the output as a Python object. It also\nadds facilities to make new symbols available to the generated Python code,\nvisible as local variables - see `get_extra_locals`.\n\nArgs:\nfn: A function or lambda.\nuser_context: An opaque object (may be None) that is forwarded to\ntransform_ast, through the ctx.user attribute.\n\nReturns:\nA tuple:\n* A function or lambda with the same signature and closure as `fn`\n* The temporary module into which the transformed function was loaded\n* The source map as a\nDict[origin_info.LineLocation, origin_info.OriginInfo]", "source": "github-repos"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        local_stream = BytearrayStream()\n\n        if self._wrapping_method:\n            self._wrapping_method.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        else:\n            raise ValueError(\n                \"Invalid struct missing the wrapping method attribute.\"\n            )\n\n        if self._encryption_key_information:\n            self._encryption_key_information.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self._mac_signature_key_information:\n            self._mac_signature_key_information.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self._mac_signature:\n            self._mac_signature.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self._iv_counter_nonce:\n            self._iv_counter_nonce.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self._encoding_option:\n            self._encoding_option.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        self.length = local_stream.length()\n        super(KeyWrappingData, self).write(\n            output_stream,\n            kmip_version=kmip_version\n        )\n        output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the KeyWrappingData struct to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def _find_penultimate_layer(model, layer_idx, penultimate_layer_idx):\n    if (penultimate_layer_idx is None):\n        for (idx, layer) in utils.reverse_enumerate(model.layers[:(layer_idx - 1)]):\n            if isinstance(layer, Wrapper):\n                layer = layer.layer\n            if isinstance(layer, (_Conv, _Pooling1D, _Pooling2D, _Pooling3D)):\n                penultimate_layer_idx = idx\n                break\n    if (penultimate_layer_idx is None):\n        raise ValueError('Unable to determine penultimate `Conv` or `Pooling` layer for layer_idx: {}'.format(layer_idx))\n    if (layer_idx < 0):\n        layer_idx = (len(model.layers) + layer_idx)\n    if (penultimate_layer_idx > layer_idx):\n        raise ValueError('`penultimate_layer_idx` needs to be before `layer_idx`')\n    return model.layers[penultimate_layer_idx]", "docstring": "Searches for the nearest penultimate `Conv` or `Pooling` layer.\n\nArgs:\nmodel: The `keras.models.Model` instance.\nlayer_idx: The layer index within `model.layers`.\npenultimate_layer_idx: The pre-layer to `layer_idx`. If set to None, the nearest penultimate\n`Conv` or `Pooling` layer is used.\n\nReturns:\nThe penultimate layer.", "source": "codesearchnet"}
{"code": "def Close(self, abort=False):\n    \n    if abort:\n      \n      self._queue.cancel_join_thread()\n\n    self._queue.close()\n    self._queue.join_thread()", "docstring": "Closes the queue.\n\nThis needs to be called from any process or thread putting items onto\nthe queue.\n\nArgs:\nabort (Optional[bool]): True if the close was issued on abort.", "source": "juraj-google-style"}
{"code": "def poll(self, transaction_hash: bytes):\n    if (len(transaction_hash) != 32):\n        raise ValueError('transaction_hash must be a 32 byte hash')\n    transaction_hash = encode_hex(transaction_hash)\n    last_result = None\n    while True:\n        transaction = self.web3.eth.getTransaction(transaction_hash)\n        if ((transaction is None) and (last_result is not None)):\n            raise Exception('invalid transaction, check gas price')\n        if (transaction and (transaction['blockNumber'] is not None)):\n            last_result = transaction\n            transaction_block = transaction['blockNumber']\n            confirmation_block = (transaction_block + self.default_block_num_confirmations)\n            block_number = self.block_number()\n            if (block_number >= confirmation_block):\n                return transaction\n        gevent.sleep(1.0)", "docstring": "Wait until the `transaction_hash` is applied or rejected.\n\nArgs:\ntransaction_hash: Transaction hash that we are waiting for.", "source": "codesearchnet"}
{"code": "def relevant_connections(n, _from, to):\n    cm = np.zeros((n, n))\n    if ((not _from) or (not to)):\n        return cm\n    cm[np.ix_(_from, to)] = 1\n    return cm", "docstring": "Construct a connectivity matrix.\n\nArgs:\nn (int): The dimensions of the matrix\n_from (tuple[int]): Nodes with outgoing connections to ``to``\nto (tuple[int]): Nodes with incoming connections from ``_from``\n\nReturns:\nnp.ndarray: An |n x n| connectivity matrix with the |i,jth| entry is\n``1`` if |i| is in ``_from`` and |j| is in ``to``, and 0 otherwise.", "source": "codesearchnet"}
{"code": "def _export_work_errors(self, work, output_file):\n    errors = set()\n    for v in itervalues(work.work):\n        if (v['is_completed'] and (v['error'] is not None)):\n            errors.add(v['error'])\n    with open(output_file, 'w') as f:\n        for e in sorted(errors):\n            f.write(e)\n            f.write('\\n')", "docstring": "Saves errors for given work pieces into file.\n\nArgs:\nwork: instance of either AttackWorkPieces or DefenseWorkPieces\noutput_file: name of the output file", "source": "codesearchnet"}
{"code": "async def do_run_task(context, run_cancellable, to_cancellable_process):\n    status = 0\n    try:\n        if context.config['verify_chain_of_trust']:\n            chain = ChainOfTrust(context, context.config['cot_job_type'])\n            (await run_cancellable(verify_chain_of_trust(chain)))\n        status = (await run_task(context, to_cancellable_process))\n        generate_cot(context)\n    except asyncio.CancelledError:\n        log.info('CoT cancelled asynchronously')\n        raise WorkerShutdownDuringTask\n    except ScriptWorkerException as e:\n        status = worst_level(status, e.exit_code)\n        log.error('Hit ScriptWorkerException: {}'.format(e))\n    except Exception as e:\n        log.exception('SCRIPTWORKER_UNEXPECTED_EXCEPTION task {}'.format(e))\n        raise\n    return status", "docstring": "Run the task logic.\n\nReturns the integer status of the task.\n\nargs:\ncontext (scriptworker.context.Context): the scriptworker context.\nrun_cancellable (typing.Callable): wraps future such that it'll cancel upon worker shutdown\nto_cancellable_process (typing.Callable): wraps ``TaskProcess`` such that it will stop if the worker is shutting\ndown\n\nRaises:\nException: on unexpected exception.\n\nReturns:\nint: exit status", "source": "codesearchnet"}
{"code": "def setNetworkIDTimeout(self, iNwkIDTimeOut):\n        \n        print '%s call setNetworkIDTimeout' % self.port\n        print iNwkIDTimeOut\n        iNwkIDTimeOut /= 1000\n        try:\n            cmd = 'networkidtimeout %s' % str(iNwkIDTimeOut)\n            print cmd\n            return self.__sendCommand(cmd)[0] == 'Done'\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger(\"setNetworkIDTimeout() Error: \" + str(e))", "docstring": "set networkid timeout for Thread device\n\nArgs:\niNwkIDTimeOut: a given NETWORK_ID_TIMEOUT\n\nReturns:\nTrue: successful to set NETWORK_ID_TIMEOUT\nFalse: fail to set NETWORK_ID_TIMEOUT", "source": "juraj-google-style"}
{"code": "def present(name, save=False, **kwargs):\n    ret = {'name': name, 'result': True, 'changes': {}, 'comment': []}\n    current_beacons = __salt__['beacons.list'](return_yaml=False, **kwargs)\n    beacon_data = [{k: v} for (k, v) in six.iteritems(kwargs)]\n    if (name in current_beacons):\n        if (beacon_data == current_beacons[name]):\n            ret['comment'].append('Job {0} in correct state'.format(name))\n        elif (('test' in __opts__) and __opts__['test']):\n            kwargs['test'] = True\n            result = __salt__['beacons.modify'](name, beacon_data, **kwargs)\n            ret['comment'].append(result['comment'])\n            ret['changes'] = result['changes']\n        else:\n            result = __salt__['beacons.modify'](name, beacon_data, **kwargs)\n            if (not result['result']):\n                ret['result'] = result['result']\n                ret['comment'] = result['comment']\n                return ret\n            elif ('changes' in result):\n                ret['comment'].append('Modifying {0} in beacons'.format(name))\n                ret['changes'] = result['changes']\n            else:\n                ret['comment'].append(result['comment'])\n    elif (('test' in __opts__) and __opts__['test']):\n        kwargs['test'] = True\n        result = __salt__['beacons.add'](name, beacon_data, **kwargs)\n        ret['comment'].append(result['comment'])\n    else:\n        result = __salt__['beacons.add'](name, beacon_data, **kwargs)\n        if (not result['result']):\n            ret['result'] = result['result']\n            ret['comment'] = result['comment']\n            return ret\n        else:\n            ret['comment'].append('Adding {0} to beacons'.format(name))\n    if save:\n        __salt__['beacons.save'](**kwargs)\n        ret['comment'].append('Beacon {0} saved'.format(name))\n    ret['comment'] = '\\n'.join(ret['comment'])\n    return ret", "docstring": "Ensure beacon is configured with the included beacon data.\n\nArgs:\n\nname (str):\nThe name of the beacon ensure is configured.\n\nsave (bool):\n``True`` updates the beacons.conf. Default is ``False``.\n\nReturns:\ndict: A dictionary of information about the results of the state\n\nExample:\n\n.. code-block:: yaml\n\nps_beacon:\nbeacon.present:\n- name: ps\n- save: True\n- enable: False\n- services:\nsalt-master: running\napache2: stopped", "source": "codesearchnet"}
{"code": "def __ge__(self, other):\n        \n        if other.__class__ is not self.__class__:\n            return NotImplemented\n        return not self < other", "docstring": "Test if self is greater than or equal an object of the same class.\n\nArgs:\nother: The object to compare against.\n\nReturns:\nTrue if self is greater than or equal to other; else False.\n\nRaises:\nTypeError: Raised if the objects are not of the same class.", "source": "juraj-google-style"}
{"code": "def get_next_of_type(self, processor_type):\n    with self._condition:\n        if (processor_type not in self):\n            self.wait_for_registration(processor_type)\n        try:\n            processor = self[processor_type].next_processor()\n        except NoProcessorVacancyError:\n            processor = self.wait_for_vacancy(processor_type)\n        processor.inc_occupancy()\n        return processor", "docstring": "Get the next available processor of a particular type and increment\nits occupancy counter.\n\nArgs:\nprocessor_type (ProcessorType): The processor type associated with\na zmq identity.\n\nReturns:\n(Processor): Information about the transaction processor", "source": "codesearchnet"}
{"code": "def prop(pode, prop):\n    \n    form = pode[0][0]\n    if prop.startswith(form):\n        prop = prop[len(form):]\n    if prop[0] == ':':\n        prop = prop[1:]\n    return pode[1]['props'].get(prop)", "docstring": "Return the valu of a given property on the node.\n\nArgs:\npode (tuple): A packed node.\nprop (str): Property to retrieve.\n\nNotes:\nThe prop argument may be the full property name (foo:bar:baz), relative property name (:baz) , or the unadorned\nproperty name (baz).\n\nReturns:", "source": "juraj-google-style"}
{"code": "def initialize_write(self):\n    raise NotImplementedError", "docstring": "Initializes the sink before writing begins.\n\nInvoked before any data is written to the sink.\n\n\nPlease see documentation in ``iobase.Sink`` for an example.\n\nReturns:\nAn object that contains any sink specific state generated by\ninitialization. This object will be passed to open_writer() and\nfinalize_write() methods.", "source": "github-repos"}
{"code": "def create_graph_from_data(self, data, **kwargs):\n    self.arguments['{SCORE}'] = self.scores[self.score]\n    self.arguments['{CUTOFF}'] = str(self.cutoff)\n    self.arguments['{VARSEL}'] = str(self.variablesel).upper()\n    self.arguments['{SELMETHOD}'] = self.var_selection[self.selmethod]\n    self.arguments['{PRUNING}'] = str(self.pruning).upper()\n    self.arguments['{PRUNMETHOD}'] = self.var_selection[self.prunmethod]\n    self.arguments['{NJOBS}'] = str(self.nb_jobs)\n    self.arguments['{VERBOSE}'] = str(self.verbose).upper()\n    results = self._run_cam(data, verbose=self.verbose)\n    return nx.relabel_nodes(nx.DiGraph(results), {idx: i for (idx, i) in enumerate(data.columns)})", "docstring": "Apply causal discovery on observational data using CAM.\n\nArgs:\ndata (pandas.DataFrame): DataFrame containing the data\n\nReturns:\nnetworkx.DiGraph: Solution given by the CAM algorithm.", "source": "codesearchnet"}
{"code": "def set_db_row(db, start, size, _bytearray):\n    \n    client.db_write(db, start, size, _bytearray)", "docstring": "Here we replace a piece of data in a db block with new data\n\nArgs:\ndb (int): The db to use\nstart(int): The start within the db\nsize(int): The size of the data in bytes\n_butearray (enumerable): The data to put in the db", "source": "juraj-google-style"}
{"code": "def BuildTypeDescriptor(self, value_cls):\n    \n    result = ApiRDFValueDescriptor(\n        name=value_cls.__name__,\n        parents=[klass.__name__ for klass in value_cls.__mro__],\n        doc=value_cls.__doc__ or \"\",\n        kind=\"PRIMITIVE\")\n\n    result.default = self.BuildDefaultValue(value_cls)\n\n    return result", "docstring": "Renders metadata of a given value class.\n\nArgs:\nvalue_cls: Metadata of this class will be rendered. This class has to be\n(or to be a subclass of) a self.value_class (i.e. a class that this\nrenderer is capable of rendering).\n\nReturns:\nDictionary with class metadata.", "source": "juraj-google-style"}
{"code": "def GetEntries(self, parser_mediator, match=None, **unused_kwargs):\n    \n    shortcuts = match.get('UserShortcuts', {})\n    for search_text, data in iter(shortcuts.items()):\n      datetime_value = data.get('LAST_USED', None)\n      if not datetime_value:\n        continue\n\n      display_name = data.get('DISPLAY_NAME', '<DISPLAY_NAME>')\n      path = data.get('PATH', '<PATH>')\n\n      event_data = plist_event.PlistTimeEventData()\n      event_data.desc = (\n          'Spotlight term searched \"{0:s}\" associate to {1:s} ({2:s})').format(\n              search_text, display_name, path)\n      event_data.key = search_text\n      event_data.root = '/UserShortcuts'\n\n      event = time_events.PythonDatetimeEvent(\n          datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts relevant Spotlight entries.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmatch (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.", "source": "juraj-google-style"}
{"code": "def switch(data, pred, dtype=None, name=None):\n    with ops.name_scope(name, 'Switch', [data, pred]) as name:\n        data = ops.internal_convert_to_tensor_or_composite(data, dtype=dtype, name='data', as_ref=True)\n        pred = ops.convert_to_tensor(pred, name='pred')\n        if isinstance(data, tensor_lib.Tensor):\n            return gen_control_flow_ops.switch(data, pred, name=name)\n        else:\n            if not isinstance(data, composite_tensor.CompositeTensor):\n                raise TypeError(f\"'data' must be a Tensor or CompositeTensor. Received: {type(data)}.\")\n            tensors = nest.flatten(data, expand_composites=True)\n            mapped = [gen_control_flow_ops.switch(tensor, pred) for tensor in tensors]\n            mapped_f, mapped_t = zip(*mapped)\n            return (nest.pack_sequence_as(data, mapped_f, expand_composites=True), nest.pack_sequence_as(data, mapped_t, expand_composites=True))", "docstring": "Forwards `data` to an output determined by `pred`.\n\nIf `pred` is false, the `data` input is forwarded to the first output.\nOtherwise, the data goes to the second output.\n\nThis op handles `Tensor`s and `IndexedSlices`.\n\nArgs:\ndata: The tensor to be forwarded to the appropriate output.\npred: A scalar that specifies which output port will receive data.\ndtype: Optional element type for the returned tensor. If missing, the type\nis inferred from the type of `value`.\nname: A name for this operation (optional).\n\nReturns:\n`(output_false, output_true)`: If `pred` is true, data will be forwarded\nto `output_true`, otherwise it goes to `output_false`.", "source": "github-repos"}
{"code": "def __init__(self, latent_size, hidden_size):\n    \n    super(EncoderStatic, self).__init__()\n    self.latent_size = latent_size\n    self.hidden_size = hidden_size\n    self.bilstm = tf.keras.layers.Bidirectional(\n        tf.keras.layers.LSTM(hidden_size),\n        merge_mode=\"sum\")\n    self.output_layer = tf.keras.layers.Dense(2*latent_size)", "docstring": "Constructs an encoder for `f`.\n\nArgs:\nlatent_size: An integer corresponding to the dimensionality of the\ndistribution.\nhidden_size: Dimensionality of the LSTM, RNN, and affine function\nparameters.", "source": "juraj-google-style"}
{"code": "def show_rules(cls, *names, attr=None):\n        \n        from qnet.printing import srepr\n        try:\n            if attr is None:\n                attr = cls._rules_attr()\n            rules = getattr(cls, attr)\n        except TypeError:\n            rules = {}\n        for (name, rule) in rules.items():\n            if len(names) > 0 and name not in names:\n                continue\n            pat, repl = rule\n            print(name)\n            print(\"    PATTERN:\")\n            print(textwrap.indent(\n                textwrap.dedent(srepr(pat, indented=True)),\n                prefix=\" \"*8))\n            print(\"    REPLACEMENT:\")\n            print(textwrap.indent(\n                textwrap.dedent(inspect.getsource(repl).rstrip()),\n                prefix=\" \"*8))", "docstring": "Print algebraic rules used by :class:`create`\n\nPrint a summary of the algebraic rules with the given names, or all\nrules if not names a given.\n\nArgs:\nnames (str): Names of rules to show\nattr (None or str): Name of the class attribute from which to get\nthe rules. Cf. :meth:`add_rule`.\n\nRaises:\nAttributeError: If invalid `attr`", "source": "juraj-google-style"}
{"code": "class Wav2Vec2DecoderWithLMOutput(ModelOutput):\n    text: Union[List[List[str]], List[str], str]\n    logit_score: Union[List[List[float]], List[float], float] = None\n    lm_score: Union[List[List[float]], List[float], float] = None\n    word_offsets: Union[List[List[ListOfDict]], List[ListOfDict], ListOfDict] = None", "docstring": "Output type of [`Wav2Vec2DecoderWithLM`], with transcription.\n\nArgs:\ntext (list of `str` or `str`):\nDecoded logits in text from. Usually the speech transcription.\nlogit_score (list of `float` or `float`):\nTotal logit score of the beams associated with produced text.\nlm_score (list of `float`):\nFused lm_score of the beams associated with produced text.\nword_offsets (list of `List[Dict[str, Union[int, str]]]` or `List[Dict[str, Union[int, str]]]`):\nOffsets of the decoded words. In combination with sampling rate and model downsampling rate word offsets\ncan be used to compute time stamps for each word.", "source": "github-repos"}
{"code": "def start_tpot(automated_run, session, path):\n    module = functions.import_string_code_as_module(automated_run.source)\n    extraction = session.query(models.Extraction).first()\n    (X, y) = extraction.return_train_dataset()\n    tpot_learner = module.tpot_learner\n    tpot_learner.fit(X, y)\n    temp_filename = os.path.join(path, 'tpot-temp-export-{}'.format(os.getpid()))\n    tpot_learner.export(temp_filename)\n    with open(temp_filename) as f:\n        base_learner_source = f.read()\n    base_learner_source = (constants.tpot_learner_docstring + base_learner_source)\n    try:\n        os.remove(temp_filename)\n    except OSError:\n        pass\n    blo = models.BaseLearnerOrigin(source=base_learner_source, name='TPOT Learner', meta_feature_generator='predict')\n    session.add(blo)\n    session.commit()", "docstring": "Starts a TPOT automated run that exports directly to base learner setup\n\nArgs:\nautomated_run (xcessiv.models.AutomatedRun): Automated run object\n\nsession: Valid SQLAlchemy session\n\npath (str, unicode): Path to project folder", "source": "codesearchnet"}
{"code": "def set_style(self, style):\n        \n        if style is not None:\n            try:\n                self.style.update(style)\n            except ValueError:\n                for s in style.split(';'):\n                    k, v = s.split(':', 1)\n                    self.style[k.strip()] = v.strip()", "docstring": "Allows to set style properties for the widget.\nArgs:\nstyle (str or dict): The style property dictionary or json string.", "source": "juraj-google-style"}
{"code": "def HasBalance(self, assetId):\n    for (key, fixed8) in self.Balances.items():\n        if (key == assetId):\n            return True\n    return False", "docstring": "Flag indicating if the asset has a balance.\n\nArgs:\nassetId (UInt256):\n\nReturns:\nbool: True if a balance is present. False otherwise.", "source": "codesearchnet"}
{"code": "def iter_variants_by_names(self, names):\n        \n        for name in names:\n            for result in self.get_variant_by_name(name):\n                yield result", "docstring": "Iterates over the genotypes for variants using a list of names.\n\nArgs:\nnames (list): The list of names for variant extraction.", "source": "juraj-google-style"}
{"code": "def authorize(self, scheme, **params):\n        \n        if scheme not in self.schemes:\n            return False\n\n        for field, value in iteritems(params):\n            setattr(self, field, value)\n            if field in self.schemes[scheme][u'params'].keys() and value:\n                self.schemes[scheme][u'params'][field] = value\n\n        return True", "docstring": "Store credentials required to satisfy a given auth scheme.\n\nArgs:\nscheme (str): The name of the Authentication scheme.\n**params: parameters for the specified scheme.\n\nReturns:\nTrue if parameters are set successfully (note that this doesn't mean\nthe credentials are valid)\nFalse if the scheme specified is not supported", "source": "juraj-google-style"}
{"code": "def separate_words(text, acronyms=None):\n    \n    words, _case, _sep = case_parse.parse_case(text, acronyms, preserve_case=True)\n    return ' '.join(words)", "docstring": "Return text in \"seperate words\" style.\n\nArgs:\ntext: input string to convert case\ndetect_acronyms: should attempt to detect acronyms\nacronyms: a list of acronyms to detect\n\n>>> separate_words(\"HELLO_WORLD\")\n'HELLO WORLD'\n>>> separate_words(\"helloHTMLWorld\", True, [\"HTML\"])\n'hello HTML World'", "source": "juraj-google-style"}
{"code": "async def _sync_all_conversations(client):\n    conv_states = []\n    sync_timestamp = None\n    request = hangouts_pb2.SyncRecentConversationsRequest(request_header=client.get_request_header(), max_conversations=CONVERSATIONS_PER_REQUEST, max_events_per_conversation=1, sync_filter=[hangouts_pb2.SYNC_FILTER_INBOX, hangouts_pb2.SYNC_FILTER_ARCHIVED])\n    for _ in range(MAX_CONVERSATION_PAGES):\n        logger.info('Requesting conversations page %s', request.last_event_timestamp)\n        response = (await client.sync_recent_conversations(request))\n        conv_states = (list(response.conversation_state) + conv_states)\n        sync_timestamp = parsers.from_timestamp(response.response_header.current_server_time)\n        if (response.continuation_end_timestamp == 0):\n            logger.info('Reached final conversations page')\n            break\n        else:\n            request.last_event_timestamp = response.continuation_end_timestamp\n    else:\n        logger.warning('Exceeded maximum number of conversation pages')\n    logger.info('Synced %s total conversations', len(conv_states))\n    return (conv_states, sync_timestamp)", "docstring": "Sync all conversations by making paginated requests.\n\nConversations are ordered by ascending sort timestamp.\n\nArgs:\nclient (Client): Connected client.\n\nRaises:\nNetworkError: If the requests fail.\n\nReturns:\ntuple of list of ``ConversationState`` messages and sync timestamp", "source": "codesearchnet"}
{"code": "def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs):\n    return cls(backbone_config=backbone_config, **kwargs)", "docstring": "Instantiate a [`DetrConfig`] (or a derived class) from a pre-trained backbone model configuration.\n\nArgs:\nbackbone_config ([`PretrainedConfig`]):\nThe backbone configuration.\nReturns:\n[`DetrConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def gen_subject_info_tree(subject_info_pyxb, authn_subj, include_duplicates=False):\n\n    class State():\n        'self.'\n        pass\n    state = State()\n    state.subject_info_pyxb = subject_info_pyxb\n    state.include_duplicates = include_duplicates\n    state.visited_set = set()\n    state.tree = SubjectInfoNode('Root', TYPE_NODE_TAG)\n    _add_subject(state, state.tree, authn_subj)\n    symbolic_node = state.tree.add_child('Symbolic', TYPE_NODE_TAG)\n    _add_subject(state, symbolic_node, d1_common.const.SUBJECT_AUTHENTICATED)\n    _trim_tree(state)\n    return state.tree", "docstring": "Convert the flat, self referential lists in the SubjectInfo to a tree structure.\n\nArgs:\nsubject_info_pyxb: SubjectInfo PyXB object\n\nauthn_subj: str\nThe authenticated subject that becomes the root subject in the tree of\nsubjects built from the SubjectInfo.\n\nOnly subjects that are authenticated by a direct or indirect connection to\nthis subject are included in the tree.\n\ninclude_duplicates:\nInclude branches of the tree that contain subjects that have already been\nincluded via other branches.\n\nIf the tree is intended for rendering, including the duplicates will\nprovide a more complete view of the SubjectInfo.\n\nReturns:\nSubjectInfoNode : Tree of nodes holding information about subjects that are\ndirectly or indirectly connected to the authenticated subject in the root.", "source": "codesearchnet"}
{"code": "def _apply_conv(self, inputs, w):\n    outputs = tf.nn.convolution(inputs, w, strides=self._stride, padding=self._conv_op_padding, dilation_rate=self._rate, data_format=self._data_format)\n    return outputs", "docstring": "Apply a convolution operation on `inputs` using variable `w`.\n\nArgs:\ninputs: A Tensor of shape `data_format` and of type `tf.float16`,\n`tf.bfloat16` or `tf.float32`.\nw: A weight matrix of the same type as `inputs`.\n\nReturns:\noutputs: The result of the convolution operation on `inputs`.", "source": "codesearchnet"}
{"code": "def __init__(self, embedding_shape, initializer, weight_collections=None, trainable=True, name=None, **kwargs):\n    super(_EmbeddingColumnLayer, self).__init__(trainable=trainable, name=name, **kwargs)\n    self._embedding_shape = embedding_shape\n    self._initializer = initializer\n    self._weight_collections = weight_collections", "docstring": "Constructor.\n\nArgs:\nembedding_shape: Shape of the embedding variable used for lookup.\ninitializer: A variable initializer function to be used in embedding\nvariable initialization.\nweight_collections: A list of collection names to which the Variable will\nbe added. Note that, variables will also be added to collections\n`tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.\ntrainable: If `True` also add the variable to the graph collection\n`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\nname: Name of the layer\n**kwargs: keyword named properties.", "source": "github-repos"}
{"code": "def gene_filter(self, query, mongo_query):\n        \n        LOG.debug('Adding panel and genes-related parameters to the query')\n\n        gene_query = []\n\n        if query.get('hgnc_symbols') and query.get('gene_panels'):\n            gene_query.append({'hgnc_symbols': {'$in': query['hgnc_symbols']}})\n            gene_query.append({'panels': {'$in': query['gene_panels']}})\n            mongo_query['$or']=gene_query\n        else:\n            if query.get('hgnc_symbols'):\n                hgnc_symbols = query['hgnc_symbols']\n                mongo_query['hgnc_symbols'] = {'$in': hgnc_symbols}\n                LOG.debug(\"Adding hgnc_symbols: %s to query\" %\n                             ', '.join(hgnc_symbols))\n\n            if query.get('gene_panels'):\n                gene_panels = query['gene_panels']\n                mongo_query['panels'] = {'$in': gene_panels}\n\n        return gene_query", "docstring": "Adds gene-related filters to the query object\n\nArgs:\nquery(dict): a dictionary of query filters specified by the users\nmongo_query(dict): the query that is going to be submitted to the database\n\nReturns:\nmongo_query(dict): returned object contains gene and panel-related filters", "source": "juraj-google-style"}
{"code": "async def shuffle_participants(self):\n    res = (await self.connection('POST', 'tournaments/{}/participants/randomize'.format(self._id)))\n    self._refresh_participants_from_json(res)", "docstring": "Shuffle participants' seeds\n\n|methcoro|\n\nNote:\n|from_api| Randomize seeds among participants. Only applicable before a tournament has started.\n\nRaises:\nAPIException", "source": "codesearchnet"}
{"code": "def get_available_host_port():\n    logging.warning('The method mobly.utils.get_available_host_port is deprecated because it is unreliable. Pass \"tcp:0\" to adb forward instead.')\n    from mobly.controllers.android_device_lib import adb\n    port = portpicker.pick_unused_port()\n    if not adb.is_adb_available():\n        return port\n    for _ in range(MAX_PORT_ALLOCATION_RETRY):\n        if port not in adb.list_occupied_adb_ports():\n            return port\n        port = portpicker.pick_unused_port()\n    raise Error('Failed to find available port after {} retries'.format(MAX_PORT_ALLOCATION_RETRY))", "docstring": "Gets a host port number available for adb forward.\n\nDEPRECATED: This method is unreliable. Pass `tcp:0` to adb forward instead.\n\nReturns:\nAn integer representing a port number on the host available for adb\nforward.\n\nRaises:\nError: when no port is found after MAX_PORT_ALLOCATION_RETRY times.", "source": "github-repos"}
{"code": "def add_args(self, args):\n    for (key, value) in vars(args).items():\n        if (value is not None):\n            setattr(self, key.upper(), value)", "docstring": "Add the args\n\nArgs:\nargs (namespace): The commandline args", "source": "codesearchnet"}
{"code": "def get_message(self, block=False, timeout=None):\n        \n        try:\n            message = self._inbox.get(block=block, timeout=timeout)\n            return message\n        except Exception:\n            return None", "docstring": "Removes and returns a RTMMessage from self._inbox\n\nArgs:\nblock(bool): if True block until a RTMMessage is available,\nelse it will return None when self._inbox is empty\ntimeout(int): it blocks at most timeout seconds\n\nReturns:\nRTMMessage if self._inbox is not empty, else None", "source": "juraj-google-style"}
{"code": "def get_hash(self, handle):\n        \n        response = self.open_url(url=handle, suffix='.hash')\n        try:\n            return response.read()\n        finally:\n            response.close()", "docstring": "Get the associated hash for the given handle, the hash file must\nexist (``handle + '.hash'``).\n\nArgs:\nhandle (str): Path to the template to get the hash from\n\nReturns:\nstr: Hash for the given handle", "source": "juraj-google-style"}
{"code": "def top_1(x, reduced_dim, dtype=tf.int32, name=None):\n    reduced_dim = convert_to_dimension(reduced_dim)\n    with tf.name_scope(name, default_name='top_1'):\n        max_val = reduce_max(x, reduced_dim=reduced_dim)\n        is_max = to_float(equal(x, max_val))\n        pos = mtf_range(x.mesh, reduced_dim, tf.float32)\n        ret = reduce_max((is_max * pos), reduced_dim=reduced_dim)\n        ret = cast(ret, dtype)\n        return (ret, max_val)", "docstring": "Argmax and Max.\n\nArgs:\nx: a Tensor\nreduced_dim: a Dimension in x.shape.dims\ndtype: a tf.dtype (for the output)\nname: an optional string\nReturns:\nindices: a Tensor with given dtype\nvalues: optional Tensor equal to mtf.reduce_max(x, reduced_dim=reduced_dim)", "source": "codesearchnet"}
{"code": "def from_json(cls, json_data):\n    \n    return cls(user=json_data['client_email'],\n               keydata=json_data['private_key'],\n               token_uri=json_data['token_uri'])", "docstring": "Create an uploader given (parsed) JSON data.\n\nNote that this is a JSON-formatted key file downloaded from Google when\nthe service account key is created, *NOT* a json-encoded\noauth2client.client.SignedJwtAssertionCredentials object.\n\nArgs:\njson_data: Dict containing the loaded JSON key data.\n\nReturns:\na MfgInspectorCallback with credentials.", "source": "juraj-google-style"}
{"code": "def _step(self, actions):\n    self.assert_common_preconditions()\n    assert (len(actions) == len(self._envs))\n    observations = []\n    rewards = []\n    dones = []\n    infos = []\n    for (env, action) in zip(self._envs, actions):\n        (observation, reward, done, info) = env.step(action)\n        observations.append(observation)\n        rewards.append(reward)\n        dones.append(done)\n        infos.append(info)\n    return tuple(map(np.stack, [observations, rewards, dones, infos]))", "docstring": "Takes a step in all environments, shouldn't pre-process or record.\n\nSubclasses should override this to do the actual step if something other\nthan the default implementation is desired.\n\nArgs:\nactions: (np.ndarray) with first dimension equal to the batch size.\n\nReturns:\na tuple of stacked raw observations, raw rewards, dones and infos.", "source": "codesearchnet"}
{"code": "def make_prior(num_topics, initial_value):\n  \n  def _softplus_inverse(x):\n    return np.log(np.expm1(x))\n\n  logit_concentration = tf.compat.v1.get_variable(\n      \"logit_concentration\",\n      shape=[1, num_topics],\n      initializer=tf.compat.v1.initializers.constant(\n          _softplus_inverse(initial_value)))\n  concentration = _clip_dirichlet_parameters(\n      tf.nn.softplus(logit_concentration))\n\n  def prior():\n    return tfd.Dirichlet(concentration=concentration,\n                         name=\"topics_prior\")\n\n  prior_variables = [logit_concentration]\n\n  return prior, prior_variables", "docstring": "Create the prior distribution.\n\nArgs:\nnum_topics: Number of topics.\ninitial_value: The starting value for the prior parameters.\n\nReturns:\nprior: A `callable` that returns a `tf.distribution.Distribution`\ninstance, the prior distribution.\nprior_variables: A `list` of `Variable` objects, the trainable parameters\nof the prior.", "source": "juraj-google-style"}
{"code": "def add_messages(self, validation):\n    if (not isinstance(validation, Validation)):\n        raise TypeError('Argument must be of type Validation')\n    self.messages.extend(validation.messages)", "docstring": "Adds all the messages in the specified `Validation` object to this instance's\nmessages array.\n\nArgs:\nvalidation (Validation): An object containing the messages to add to this instance's messages.", "source": "codesearchnet"}
{"code": "def get_public_tokens(self):\n        \n        r = self.remote_utils.get_url(self.url() + \"public_tokens/\")\n        return r.json()", "docstring": "Get a list of public tokens available on this server.\n\nArguments:\nNone\n\nReturns:\nstr[]: list of public tokens", "source": "juraj-google-style"}
{"code": "def should_execute_combination(self, kwargs):\n    del kwargs\n    return (True, None)", "docstring": "Indicates whether the combination of test arguments should be executed.\n\nIf the environment doesn't satisfy the dependencies of the test\ncombination, then it can be skipped.\n\nArgs:\nkwargs:  Arguments that are passed to the test combination.\n\nReturns:\nA tuple boolean and an optional string.  The boolean False indicates\nthat the test should be skipped.  The string would indicate a textual\ndescription of the reason.  If the test is going to be executed, then\nthis method returns `None` instead of the string.", "source": "github-repos"}
{"code": "def process_column(body: ProcessColumnRequest) -> ResponseReturnValue:\n    credentials = get_credentials(body.auth_config)\n    logger: Logger\n    if not body.log_table:\n        logger = PrintLogger()\n    else:\n        logger = BigQueryLogger(body.log_table, body.auth_config)\n    logger.set_base_log(__version__, body.workflow_execution_id, body.display_source_table, datetime.utcnow())\n    bq_read_client = get_bq_read_client(credentials)\n    parser, usable_rules = map_parser_to_rules(body.column_config['parser'])\n    rules = generate_selected_rules(body.column_config['rules'], usable_rules)\n    column_name = body.column_config['column']\n    cells_iterator = get_cells_iterator(bq_read_client, body.source_table, column_name)\n    row_counter = 0\n    parse_failures = 0\n    rule_errors = 0\n    check_violations = 0\n    for cell in cells_iterator:\n        try:\n            value = parser(cell)\n        except Exception as e:\n            logger.parser(column_name, parser.__name__, str(e), cell)\n            parse_failures += 1\n        else:\n            for rule in rules:\n                try:\n                    result = rule(value)\n                except Exception as e:\n                    logger.rule(column_name, rule.__name__, str(e), value, rule.__kwdefaults__)\n                    rule_errors += 1\n                else:\n                    if result is not None:\n                        logger.rule(column_name, rule.__name__, result, value, rule.__kwdefaults__)\n                        check_violations += 1\n        row_counter += 1\n    logger.flush(force=True)\n    if row_counter == 0:\n        raise RuntimeError('Source table was empty.')\n    message = f'DQM processed {row_counter} rows, with {parse_failures} parse failures, {rule_errors} rule errors, {check_violations} rule check violations.'\n    return (DQMResponse(name='', description=message, code=200), 200)", "docstring": "Process a given column from the specified table.\n\nArgs:\n* body: ProcessColumnRequest HTTP request body\n\nReturns:\n* DQMResponse for the run with a 200 status code\n\nRaises:\n* MalformedConfigError: if the request body was malformed", "source": "github-repos"}
{"code": "def merge_pot1_files(self, delete_source=True):\n        \n        natom = len(self[0].input.structure)\n        max_pertcase = 3 * natom\n\n        pot1_files = []\n        for task in self:\n            if not isinstance(task, DfptTask): continue\n            paths = task.outdir.list_filepaths(wildcard=\"*_POT*\")\n            for path in paths:\n                \n                i = path.rindex(\"_POT\")\n                pertcase = int(path[i+4:].replace(\".nc\", \"\"))\n                if pertcase <= max_pertcase:\n                    pot1_files.append(path)\n\n        \n        if not pot1_files: return None\n\n        self.history.info(\"Will call mrgdvdb to merge %s files:\" % len(pot1_files))\n\n        \n        out_dvdb = self.outdir.path_in(\"out_DVDB\")\n\n        if len(pot1_files) == 1:\n            \n            shutil.copy(pot1_files[0], out_dvdb)\n        else:\n            \n            \n            \n            mrgdvdb = wrappers.Mrgdvdb(manager=self[0].manager, verbose=0)\n            mrgdvdb.merge(self.outdir.path, pot1_files, out_dvdb, delete_source=delete_source)\n\n        return out_dvdb", "docstring": "This method is called when all the q-points have been computed.\nIt runs `mrgdvdb` in sequential on the local machine to produce\nthe final DVDB file in the outdir of the `Work`.\n\nArgs:\ndelete_source: True if POT1 files should be removed after (successful) merge.\n\nReturns:\npath to the output DVDB file. None if not DFPT POT file is found.", "source": "juraj-google-style"}
{"code": "def localization_diff(localizable_file, translated_file, excluded_strings_file, output_translation_file):\n    old_translated_file_dictionary = generate_localization_key_to_entry_dictionary_from_file(translated_file)\n    if ((excluded_strings_file is not None) and os.path.isfile(excluded_strings_file)):\n        excluded_file_dictionary = generate_localization_key_to_entry_dictionary_from_file(excluded_strings_file)\n    else:\n        excluded_file_dictionary = {}\n    translated_list = old_translated_file_dictionary.keys()\n    output_dictionary = {}\n    output_file_elements = []\n    f = open_strings_file(localizable_file, 'r')\n    output_file_elements.append(Comment((u'\\n\\n' % (VALUE_PLACEHOLDER,))))\n    for (_header_comment, comments, key, value) in extract_header_comment_key_value_tuples_from_file(f):\n        if ((key in translated_list) or (key in excluded_file_dictionary)):\n            if (key in old_translated_file_dictionary):\n                old_translated_file_dictionary.pop(key)\n        elif (value in output_dictionary):\n            output_dictionary[value].add_comments(comments)\n            output_file_elements.append(Comment((u\"\\n\" % value)))\n        else:\n            loc_obj = LocalizationEntry(comments, value, VALUE_PLACEHOLDER)\n            output_dictionary[value] = loc_obj\n            output_file_elements.append(loc_obj)\n    for (key, removed_trans) in old_translated_file_dictionary.items():\n        output_file_elements.append(Comment((u'\\n\\n' % (', '.join(removed_trans.comments), removed_trans.key, removed_trans.value))))\n    write_file_elements_to_strings_file(output_translation_file, output_file_elements)", "docstring": "Generates a strings file representing the strings that were yet to be translated.\n\nArgs:\nlocalizable_file (str): The path to the localization strings file, meaning the file that represents the strings\nthat require translation.\ntranslated_file (str): The path to the translated strings file, meaning the file containing the strings that\nwere already translated.\nexcluded_strings_file (str): The path to a file that contains all the strings we want to exclude from this and\nfrom future diffs.\noutput_translation_file (str): The path to the output file, which will contain the strings the require\ntranslation, but are not in the already given translation file.", "source": "codesearchnet"}
{"code": "def all_to_all_v3(communicator, t, group_assignment=None, timeout_seconds=None):\n    if group_assignment is None:\n        group_assignment = []\n    return gen_collective_ops.collective_all_to_all_v3(communicator=communicator, input=t, group_assignment=group_assignment, timeout_seconds=timeout_seconds)", "docstring": "Exchanges tensors mutually.\n\nArgs:\ncommunicator: the resource `tf.Tensor` returned from\n`initialize_communicator`.\nt: a `tf.Tensor`. The first dimension should have the length as the size of\nthe group. `t[i]` is sent to `rank i` within the group.\ngroup_assignment: Optional int32 `tf.Tensor` with shape [num_groups,\nnum_ranks_per_group]. `group_assignment[i]` represents the ranks in the\n`ith` subgroup.\ntimeout_seconds: If set to a non zero, set a completion timeout to detect\nstaleness. If the timer goes off, a DeadlineExceededError is raised. The\ntimeout value in seconds. This feature is experimental.\n\nReturns:\na `tf.Tensor`. `t[i]` is sent from `rank i` within the group.", "source": "github-repos"}
{"code": "def listen_forever(\n            self,\n            timeout_ms: int = 30000,\n            exception_handler: Callable[[Exception], None] = None,\n            bad_sync_timeout: int = 5,\n    ):\n        \n        _bad_sync_timeout = bad_sync_timeout\n        self.should_listen = True\n        while self.should_listen:\n            try:\n                \n                self._sync(timeout_ms)\n                _bad_sync_timeout = bad_sync_timeout\n            except MatrixRequestError as e:\n                log.warning('A MatrixRequestError occured during sync.')\n                if e.code >= 500:\n                    log.warning(\n                        'Problem occured serverside. Waiting',\n                        wait_for=_bad_sync_timeout,\n                    )\n                    gevent.sleep(_bad_sync_timeout)\n                    _bad_sync_timeout = min(_bad_sync_timeout * 2, self.bad_sync_timeout_limit)\n                else:\n                    raise\n            except MatrixHttpLibError:\n                log.exception('A MatrixHttpLibError occured during sync.')\n                if self.should_listen:\n                    gevent.sleep(_bad_sync_timeout)\n                    _bad_sync_timeout = min(_bad_sync_timeout * 2, self.bad_sync_timeout_limit)\n            except Exception as e:\n                log.exception('Exception thrown during sync')\n                if exception_handler is not None:\n                    exception_handler(e)\n                else:\n                    raise", "docstring": "Keep listening for events forever.\nArgs:\ntimeout_ms: How long to poll the Home Server for before retrying.\nexception_handler: Optional exception handler function which can\nbe used to handle exceptions in the caller thread.\nbad_sync_timeout: Base time to wait after an error before retrying.\nWill be increased according to exponential backoff.", "source": "juraj-google-style"}
{"code": "def extract(self, html_text: str, strategy: Strategy=Strategy.ALL_TEXT) \\\n            -> List[Extraction]:\n        \n\n        if html_text:\n            if strategy == Strategy.ALL_TEXT:\n                soup = BeautifulSoup(html_text, 'html.parser')\n                texts = soup.findAll(text=True)\n                visible_texts = filter(self._tag_visible, texts)\n                all_text = u\" \".join(t.strip() for t in visible_texts)\n                return [Extraction(all_text, self.name)]\n            else:\n                relax = strategy == Strategy.MAIN_CONTENT_RELAXED\n                readable = Document(html_text, recallPriority=relax).summary(html_partial=False)\n                clean_text = BeautifulSoup(readable.encode('utf-8'), 'lxml').strings\n                readability_text = ' '.join(clean_text)\n                return [Extraction(readability_text, self.name)]\n        else:\n            return []", "docstring": "Extracts text from an HTML page using a variety of strategies\n\nArgs:\nhtml_text (str): html page in string\nstrategy (enum[Strategy.ALL_TEXT, Strategy.MAIN_CONTENT_RELAXED, Strategy.MAIN_CONTENT_STRICT]): one of\nStrategy.ALL_TEXT, Strategy.MAIN_CONTENT_STRICT and Strategy.MAIN_CONTENT_RELAXED\n\nReturns:\nList[Extraction]: typically a singleton list with the extracted text", "source": "juraj-google-style"}
{"code": "def write(self, b):\n        \n        if not self._writable:\n            raise UnsupportedOperation('write')\n\n        size = len(b)\n        b_view = memoryview(b)\n        size_left = size\n        buffer_size = self._buffer_size\n        max_buffers = self._max_buffers\n\n        with self._seek_lock:\n            end = self._buffer_seek\n            buffer_view = memoryview(self._write_buffer)\n\n            while size_left > 0:\n                \n                start = end\n                end = start + size_left\n\n                if end > buffer_size:\n                    \n                    end = buffer_size\n                    flush = True\n                else:\n                    flush = False\n\n                buffer_range = end - start\n\n                \n                b_start = size - size_left\n                size_left -= buffer_range\n\n                \n                buffer_view[start:end] = b_view[b_start: b_start + buffer_range]\n\n                \n                if flush:\n                    \n                    \n                    self._buffer_seek = end\n\n                    \n                    \n                    self._seek += 1\n\n                    \n                    \n                    if max_buffers:\n                        futures = self._write_futures\n                        flush_wait = self._FLUSH_WAIT\n                        while sum(1 for future in futures\n                                  if not future.done()) >= max_buffers:\n                            sleep(flush_wait)\n\n                    \n                    with handle_os_exceptions():\n                        self._flush()\n\n                    \n                    self._write_buffer = bytearray(buffer_size)\n                    buffer_view = memoryview(self._write_buffer)\n                    end = 0\n\n            \n            self._buffer_seek = end\n            return size", "docstring": "Write the given bytes-like object, b, to the underlying raw stream,\nand return the number of bytes written.\n\nArgs:\nb (bytes-like object): Bytes to write.\n\nReturns:\nint: The number of bytes written.", "source": "juraj-google-style"}
{"code": "def get_requires(self, build_requires=False, private_build_requires=False):\n        \n        requires = self.requires or []\n\n        if build_requires:\n            requires = requires + (self.build_requires or [])\n        if private_build_requires:\n            requires = requires + (self.private_build_requires or [])\n\n        return requires", "docstring": "Get the requirements of the variant.\n\nArgs:\nbuild_requires (bool): If True, include build requirements.\nprivate_build_requires (bool): If True, include private build\nrequirements.\n\nReturns:\nList of `Requirement` objects.", "source": "juraj-google-style"}
{"code": "def get_okeeffe_params(el_symbol):\n    \n\n    el = Element(el_symbol)\n    if el not in list(BV_PARAMS.keys()):\n        raise RuntimeError(\"Could not find O'Keeffe parameters for element\"\n                           \" \\\"{}\\\" in \\\"BV_PARAMS\\\"dictonary\"\n                           \" provided by pymatgen\".format(el_symbol))\n\n    return BV_PARAMS[el]", "docstring": "Returns the elemental parameters related to atom size and\nelectronegativity which are used for estimating bond-valence\nparameters (bond length) of pairs of atoms on the basis of data\nprovided in 'Atoms Sizes and Bond Lengths in Molecules and Crystals'\n(O'Keeffe & Brese, 1991).\n\nArgs:\nel_symbol (str): element symbol.\nReturns:\n(dict): atom-size ('r') and electronegativity-related ('c')\nparameter.", "source": "juraj-google-style"}
{"code": "def data_impl(self, request):\n    \n    run = request.args.get('run')\n    tool = request.args.get('tag')\n    host = request.args.get('host')\n    run_dir = self._run_dir(run)\n    \n    profile_run = os.path.basename(run_dir)\n\n    if tool not in TOOLS:\n      return None\n\n    self.start_grpc_stub_if_necessary()\n    if tool == 'trace_viewer@' and self.stub is not None:\n      from tensorflow.contrib.tpu.profiler import tpu_profiler_analysis_pb2\n      grpc_request = tpu_profiler_analysis_pb2.ProfileSessionDataRequest()\n      grpc_request.repository_root = run_dir\n      grpc_request.session_id = profile_run[:-1]\n      grpc_request.tool_name = 'trace_viewer'\n      \n      grpc_request.host_name = host.rstrip('.')\n\n      grpc_request.parameters['resolution'] = request.args.get('resolution')\n      if request.args.get('start_time_ms') is not None:\n        grpc_request.parameters['start_time_ms'] = request.args.get(\n            'start_time_ms')\n      if request.args.get('end_time_ms') is not None:\n        grpc_request.parameters['end_time_ms'] = request.args.get('end_time_ms')\n      grpc_response = self.stub.GetSessionToolData(grpc_request)\n      return grpc_response.output\n\n    if tool not in TOOLS:\n      return None\n    tool_name = str(host) + TOOLS[tool]\n    asset_path = os.path.join(run_dir, tool_name)\n    raw_data = None\n    try:\n      with tf.io.gfile.GFile(asset_path, 'rb') as f:\n        raw_data = f.read()\n    except tf.errors.NotFoundError:\n      logger.warn('Asset path %s not found', asset_path)\n    except tf.errors.OpError as e:\n      logger.warn(\"Couldn't read asset path: %s, OpError %s\", asset_path, e)\n\n    if raw_data is None:\n      return None\n    if tool == 'trace_viewer':\n      return process_raw_trace(raw_data)\n    if tool in _RAW_DATA_TOOLS:\n      return raw_data\n    return None", "docstring": "Retrieves and processes the tool data for a run and a host.\n\nArgs:\nrequest: XMLHttpRequest\n\nReturns:\nA string that can be served to the frontend tool or None if tool,\nrun or host is invalid.", "source": "juraj-google-style"}
{"code": "def sub_index(self, sub, start=0, end=None):\n\t\t\n\t\tstart_index = self.index(sub[0], start, end)\n\t\tend = self._fix_end_index(end)\n\t\tif start_index + len(sub) > end:\n\t\t\traise ValueError\n\t\tfor i in range(1, len(sub)):\n\t\t\tif sub[i] != self[start_index + i]:\n\t\t\t\traise ValueError\n\t\treturn start_index", "docstring": "Return the index of a subsequence.\n\nThis runs in O(len(sub))\n\nArgs:\nsub (Sequence): An Iterable to search for\nReturns:\nint: The index of the first element of sub\nRaises:\nValueError: If sub isn't a subsequence\nTypeError: If sub isn't iterable\nIndexError: If start or end are out of range", "source": "juraj-google-style"}
{"code": "def training(loss_op):\n  \n  global_step = tf.Variable(0, name='global_step', trainable=False)\n  with tf.name_scope('train'):\n    optimizer = tf.train.AdamOptimizer(epsilon=0.001)\n    train_op = optimizer.minimize(loss_op, global_step)\n    return train_op, global_step", "docstring": "Calculates the loss from the logits and the labels.\n\nArgs:\nlogits: Logits tensor, float - [batch_size, NUM_CLASSES].\nlabels: Labels tensor, int32 - [batch_size].\nReturns:\nloss: Loss tensor of type float.", "source": "juraj-google-style"}
{"code": "def from_comm(cls, pub):\n        \n        filename = None\n        if pub.b64_data:\n            filename = cls._save_to_unique_filename(pub)\n\n        return cls(\n            isbn=pub.isbn,\n            uuid=pub.uuid,\n            aleph_id=pub.aleph_id,\n\n            dir_pointer=filename\n        )", "docstring": "Convert communication namedtuple to this class.\n\nArgs:\npub (obj): :class:`.Archive` instance which will be converted.\n\nReturns:\nobj: :class:`DBArchive` instance.", "source": "juraj-google-style"}
{"code": "def save_plot(code, elem):\n    \n    if 'plt' in elem.attributes:\n        figurewidth, figureheight = elem.attributes['plt'].split(',')\n    else:\n        try:\n            figureheight = elem.attributes['height']\n        except KeyError:\n            figureheight = '4cm'\n\n        try:\n            figurewidth = elem.attributes['width']\n        except KeyError:\n            figurewidth = '6cm'\n\n    return f", "docstring": "Converts matplotlib plots to tikz code.\n\nIf elem has either the plt attribute (format: plt=width,height) or the\nattributes width=width and/or height=height, the figurewidth and -height\nare set accordingly. If none are given, a height of 4cm and a width of 6cm\nis used as default.\n\nArgs:\ncode: The matplotlib code.\nelem: The element.\n\nReturns:\nThe code and some code to invoke matplotlib2tikz.", "source": "juraj-google-style"}
{"code": "def convert_exchange_to_compounds(model):\n    exchanges = set()\n    for reaction in model.reactions:\n        equation = reaction.properties.get('equation')\n        if (equation is None):\n            continue\n        if (len(equation.compounds) != 1):\n            if ((len(equation.left) == 0) != (len(equation.right) == 0)):\n                logger.warning('Exchange reaction {} has more than one compound, it was not converted to exchange compound'.format(reaction.id))\n            continue\n        exchanges.add(reaction.id)\n    for reaction_id in exchanges:\n        equation = model.reactions[reaction_id].equation\n        (compound, value) = equation.compounds[0]\n        if (compound.compartment != model.extracellular_compartment):\n            continue\n        if (compound in model.exchange):\n            logger.warning('Compound {} is already defined in the exchange definition'.format(compound))\n            continue\n        (lower_flux, upper_flux) = (None, None)\n        if (reaction_id in model.limits):\n            (_, lower, upper) = model.limits[reaction_id]\n            if (lower is not None):\n                lower_flux = (lower * abs(value))\n            if (upper is not None):\n                upper_flux = (upper * abs(value))\n        if ((lower_flux is None) and (equation.direction == Direction.Forward)):\n            lower_flux = 0\n        if ((upper_flux is None) and (equation.direction == Direction.Reverse)):\n            upper_flux = 0\n        if (value > 0):\n            (lower_flux, upper_flux) = (((- upper_flux) if (upper_flux is not None) else None), ((- lower_flux) if (lower_flux is not None) else None))\n        model.exchange[compound] = (compound, reaction_id, lower_flux, upper_flux)\n        model.reactions.discard(reaction_id)\n        model.limits.pop(reaction_id, None)", "docstring": "Convert exchange reactions in model to exchange compounds.\n\nOnly exchange reactions in the extracellular compartment are converted.\nThe extracelluar compartment must be defined for the model.\n\nArgs:\nmodel: :class:`NativeModel`.", "source": "codesearchnet"}
{"code": "def __init__(self, size: DurationTypes, offset: TimestampTypes=0):\n    if size <= 0:\n        raise ValueError('The size parameter must be strictly positive.')\n    self.size = Duration.of(size)\n    self.offset = Timestamp.of(offset) % self.size", "docstring": "Initialize a ``FixedWindows`` function for a given size and offset.\n\nArgs:\nsize (int): Size of the window in seconds.\noffset(int): Offset of this window as seconds. Windows start at\nt=N * size + offset where t=0 is the UNIX epoch. The offset must be a\nvalue in range [0, size). If it is not it will be normalized to this\nrange.", "source": "github-repos"}
{"code": "def seek(self, partition, offset):\n    if (not isinstance(partition, TopicPartition)):\n        raise TypeError('partition must be a TopicPartition namedtuple')\n    assert (isinstance(offset, int) and (offset >= 0)), 'Offset must be >= 0'\n    assert (partition in self._subscription.assigned_partitions()), 'Unassigned partition'\n    log.debug('Seeking to offset %s for partition %s', offset, partition)\n    self._subscription.assignment[partition].seek(offset)", "docstring": "Manually specify the fetch offset for a TopicPartition.\n\nOverrides the fetch offsets that the consumer will use on the next\n:meth:`~kafka.KafkaConsumer.poll`. If this API is invoked for the same\npartition more than once, the latest offset will be used on the next\n:meth:`~kafka.KafkaConsumer.poll`.\n\nNote: You may lose data if this API is arbitrarily used in the middle of\nconsumption to reset the fetch offsets.\n\nArguments:\npartition (TopicPartition): Partition for seek operation\noffset (int): Message offset in partition\n\nRaises:\nAssertionError: If offset is not an int >= 0; or if partition is not\ncurrently assigned.", "source": "codesearchnet"}
{"code": "def _wait_creative_activation(self, creative_id, timeout=128):\n    if store.get('CREATIVE', creative_id):\n        creative = self._api_creatives().get(profileId=self.profile_id, id=str(creative_id)).execute()\n        wait = 2\n        while not creative['active'] and timeout > 0:\n            print('Waiting %s seconds for creative %s activation...' % (wait, creative_id))\n            time.sleep(wait)\n            timeout -= wait\n            wait *= 2\n            creative = self._api_creatives().get(profileId=self.profile_id, id=str(creative_id)).execute()\n        if not creative['active']:\n            raise Exception('Creative %s failed to activate within defined timeout' % creative['id'])", "docstring": "Waits for a creative to become active.\n\nThis function checks the if the creative is active in intervals that\nincrease exponentially (exponential backoff).\n\nArgs:\ncreative_id: Creative identifier.\ntimeout: Optional parameter, determines how many seconds to wait for the\nactivation.\n\nRaises:\nException: In case the creative doesn't activate within the specified\ntimeout", "source": "github-repos"}
{"code": "def configure_and_build(self, show_progress=True, optimized=True, skip_configuration=False):\n    if (not skip_configuration):\n        configuration_command = ['python', 'waf', 'configure', '--enable-examples', '--disable-gtk', '--disable-python']\n        if optimized:\n            configuration_command += ['--build-profile=optimized', '--out=build/optimized']\n        subprocess.call(configuration_command, cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n    build_process = subprocess.Popen(['python', 'waf', 'build'], cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n    if show_progress:\n        line_iterator = self.get_build_output(build_process)\n        pbar = None\n        try:\n            [initial, total] = next(line_iterator)\n            pbar = tqdm(line_iterator, initial=initial, total=total, unit='file', desc='Building ns-3', smoothing=0)\n            for (current, total) in pbar:\n                pbar.n = current\n        except StopIteration:\n            if (pbar is not None):\n                pbar.n = pbar.total\n    else:\n        build_process.communicate()", "docstring": "Configure and build the ns-3 code.\n\nArgs:\nshow_progress (bool): whether or not to display a progress bar\nduring compilation.\noptimized (bool): whether to use an optimized build. If False, use\na standard ./waf configure.\nskip_configuration (bool): whether to skip the configuration step,\nand only perform compilation.", "source": "codesearchnet"}
{"code": "def parse_functions(bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors) -> Tuple[(Parsed, Errors)]:\n    parens = char_locs['parens']\n    if (not parens):\n        bels_len = (len(bels) - 1)\n        span = (0, bels_len)\n        parsed[span] = {'name': ''.join(bels), 'type': 'Function', 'span': span, 'name_span': span, 'function_level': 'top'}\n        return (parsed, errors)\n    for sp in sorted(parens):\n        (ep, function_level) = parens[sp]\n        if (bels[(sp - 1)] == ' '):\n            continue\n        for i in range((sp - 1), 0, (- 1)):\n            if (bels[i] in [' ', ',', '(']):\n                if (i < (sp - 1)):\n                    if (ep == (- 1)):\n                        span = ((i + 1), (len(bels) - 1))\n                    else:\n                        span = ((i + 1), ep)\n                    parsed[span] = {'name': ''.join(bels[(i + 1):sp]), 'type': 'Function', 'span': span, 'name_span': ((i + 1), (sp - 1)), 'parens_span': (sp, ep), 'function_level': function_level}\n                break\n        else:\n            if (ep == (- 1)):\n                span = (0, (len(bels) - 1))\n            else:\n                span = (0, ep)\n            parsed[span] = {'name': ''.join(bels[0:sp]), 'type': 'Function', 'span': span, 'name_span': (0, (sp - 1)), 'parens_span': (sp, ep), 'function_level': function_level}\n    return (parsed, errors)", "docstring": "Parse functions from BEL using paren, comma, quote character locations\n\nArgs:\nbels: BEL string as list of chars\nchar_locs: paren, comma, quote character locations\nerrors: Any error messages generated during the parse\n\nReturns:\n(functions, errors): function names and locations and error messages", "source": "codesearchnet"}
{"code": "def extract_bundle(self, resource, timeout=-1):\n        \n        return self._client.update(resource, timeout=timeout, custom_headers={\"Content-Type\": \"text/plain\"})", "docstring": "Extracts the existing bundle on the appliance and creates all the artifacts.\n\nArgs:\nresource (dict): Artifact Bundle to extract.\ntimeout:\nTimeout in seconds. Waits for task completion by default. The timeout does not abort the operation in\nOneView, it just stops waiting for its completion.\n\nReturns:\ndict: The Artifact Bundle.", "source": "juraj-google-style"}
{"code": "def has_key(cls, *args):\n    key = (args if (len(args) > 1) else args[0])\n    return (key in cls._instances)", "docstring": "Check whether flyweight object with specified key has already been created.\n\nReturns:\nbool: True if already created, False if not", "source": "codesearchnet"}
{"code": "def emit(self, signal, message, analysis_id):\n    log.debug('kernel {} zmq send ({}): {}'.format(analysis_id, signal, message))\n    self.zmq_publish.send(json.dumps({'analysis_id': analysis_id, 'frame': {'signal': signal, 'load': message}}, default=json_encoder_default).encode('utf-8'))", "docstring": "Emit signal to main.\n\nArgs:\nsignal: Name of the signal to be emitted.\nmessage: Message to be sent.\nanalysis_id: Identifies the instance of this analysis.", "source": "codesearchnet"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_2_0):\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        raise exceptions.VersionNotSupported('KMIP {} does not support the Attributes object.'.format(kmip_version.value))\n    local_stream = BytearrayStream()\n    for attribute in self._attributes:\n        tag = attribute.tag\n        if (not enums.is_attribute(tag, kmip_version=kmip_version)):\n            raise exceptions.AttributeNotSupported('Attribute {} is not supported by KMIP {}.'.format(tag.name, kmip_version.value))\n        attribute.write(local_stream, kmip_version=kmip_version)\n    self.length = local_stream.length()\n    super(Attributes, self).write(output_stream, kmip_version=kmip_version)\n    output_stream.write(local_stream.buffer)", "docstring": "Write the Attributes structure encoding to the data stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode\nAttributes structure data, supporting a write method.\nkmip_version (enum): A KMIPVersion enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 2.0.\n\nRaises:\nAttributeNotSupported: Raised if an unsupported attribute is\nfound in the attribute list while encoding.\nVersionNotSupported: Raised when a KMIP version is provided that\ndoes not support the Attributes object.", "source": "codesearchnet"}
{"code": "def list_projects(self, entity=None):\n        \n        query = gql()\n        return self._flatten_edges(self.gql(query, variable_values={\n            'entity': entity or self.settings('entity')})['models'])", "docstring": "Lists projects in W&B scoped by entity.\n\nArgs:\nentity (str, optional): The entity to scope this project to.\n\nReturns:\n[{\"id\",\"name\",\"description\"}]", "source": "juraj-google-style"}
{"code": "def get_mapping(self):\n    return {key: val for (key, val) in self.__dict__.iteritems() if val}", "docstring": "Convert the class to dict.\n\nReturns:\ndict: Copy of ``self.__dict__``.", "source": "codesearchnet"}
{"code": "def _check_required_fields(self, fields=None, either_fields=None):\n    for (key, value) in fields.items():\n        if (not value):\n            raise HSException((\"Field '%s' is required.\" % key))\n    if (either_fields is not None):\n        for field in either_fields:\n            if (not any(field.values())):\n                raise HSException(('One of the following fields is required: %s' % ', '.join(field.keys())))", "docstring": "Check the values of the fields\n\nIf no value found in `fields`, an exception will be raised.\n`either_fields` are the fields that one of them must have a value\n\nRaises:\nHSException: If no value found in at least one item of`fields`, or\nno value found in one of the items of `either_fields`\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def zip_fit_params(data):\n    \n    genes, cells = data.shape\n    m = data.mean(1)\n    v = data.var(1)\n    M = (v-m)/(m**2+v-m)\n    \n    \n    M = np.array([min(1.0, max(0.0, x)) for x in M])\n    L = m + v/m - 1.0\n    \n    L[np.isnan(L)] = 0.0\n    L = np.array([max(0.0, x) for x in L])\n    return L, M", "docstring": "Returns the ZIP parameters that best fit a given data set.\n\nArgs:\ndata (array): 2d array of genes x cells belonging to a given cluster\n\nReturns:\nL (array): 1d array of means\nM (array): 1d array of zero-inflation parameter", "source": "juraj-google-style"}
{"code": "def _GetRequestClass(self, method_descriptor):\n    \n    if method_descriptor.containing_service != self.descriptor:\n      raise RuntimeError(\n          'GetRequestClass() given method descriptor for wrong service type.')\n    return method_descriptor.input_type._concrete_class", "docstring": "Returns the class of the request protocol message.\n\nArgs:\nmethod_descriptor: Descriptor of the method for which to return the\nrequest protocol message class.\n\nReturns:\nA class that represents the input protocol message of the specified\nmethod.", "source": "juraj-google-style"}
{"code": "def diff_cleanupEfficiency(self, diffs):\n    changes = False\n    equalities = []\n    lastEquality = None\n    pointer = 0\n    pre_ins = False\n    pre_del = False\n    post_ins = False\n    post_del = False\n    while (pointer < len(diffs)):\n        if (diffs[pointer][0] == self.DIFF_EQUAL):\n            if ((len(diffs[pointer][1]) < self.Diff_EditCost) and (post_ins or post_del)):\n                equalities.append(pointer)\n                pre_ins = post_ins\n                pre_del = post_del\n                lastEquality = diffs[pointer][1]\n            else:\n                equalities = []\n                lastEquality = None\n            post_ins = post_del = False\n        else:\n            if (diffs[pointer][0] == self.DIFF_DELETE):\n                post_del = True\n            else:\n                post_ins = True\n            if (lastEquality and ((pre_ins and pre_del and post_ins and post_del) or ((len(lastEquality) < (self.Diff_EditCost / 2)) and ((((pre_ins + pre_del) + post_ins) + post_del) == 3)))):\n                diffs.insert(equalities[(- 1)], (self.DIFF_DELETE, lastEquality))\n                diffs[(equalities[(- 1)] + 1)] = (self.DIFF_INSERT, diffs[(equalities[(- 1)] + 1)][1])\n                equalities.pop()\n                lastEquality = None\n                if (pre_ins and pre_del):\n                    post_ins = post_del = True\n                    equalities = []\n                else:\n                    if len(equalities):\n                        equalities.pop()\n                    if len(equalities):\n                        pointer = equalities[(- 1)]\n                    else:\n                        pointer = (- 1)\n                    post_ins = post_del = False\n                changes = True\n        pointer += 1\n    if changes:\n        self.diff_cleanupMerge(diffs)", "docstring": "Reduce the number of edits by eliminating operationally trivial\nequalities.\n\nArgs:\ndiffs: Array of diff tuples.", "source": "codesearchnet"}
{"code": "def create_hammersley_samples(order, dim=1, burnin=(- 1), primes=()):\n    if (dim == 1):\n        return create_halton_samples(order=order, dim=1, burnin=burnin, primes=primes)\n    out = numpy.empty((dim, order), dtype=float)\n    out[:(dim - 1)] = create_halton_samples(order=order, dim=(dim - 1), burnin=burnin, primes=primes)\n    out[(dim - 1)] = numpy.linspace(0, 1, (order + 2))[1:(- 1)]\n    return out", "docstring": "Create samples from the Hammersley set.\n\nFor ``dim == 1`` the sequence falls back to Van Der Corput sequence.\n\nArgs:\norder (int):\nThe order of the Hammersley sequence. Defines the number of samples.\ndim (int):\nThe number of dimensions in the Hammersley sequence.\nburnin (int):\nSkip the first ``burnin`` samples. If negative, the maximum of\n``primes`` is used.\nprimes (tuple):\nThe (non-)prime base to calculate values along each axis. If\nempty, growing prime values starting from 2 will be used.\n\nReturns:\n(numpy.ndarray):\nHammersley set with ``shape == (dim, order)``.", "source": "codesearchnet"}
{"code": "def get_keys(keyfiles, signature_type):\n    \n    builtin_keys = {\n        ('release', 'sha1'): [mardor.mozilla.release1_sha1, mardor.mozilla.release2_sha1],\n        ('release', 'sha384'): [mardor.mozilla.release1_sha384, mardor.mozilla.release2_sha384],\n        ('nightly', 'sha1'): [mardor.mozilla.nightly1_sha1, mardor.mozilla.nightly2_sha1],\n        ('nightly', 'sha384'): [mardor.mozilla.nightly1_sha384, mardor.mozilla.nightly2_sha384],\n        ('dep', 'sha1'): [mardor.mozilla.dep1_sha1, mardor.mozilla.dep2_sha1],\n        ('dep', 'sha384'): [mardor.mozilla.dep1_sha384, mardor.mozilla.dep2_sha384],\n        ('autograph-stage', 'sha384'): [mardor.mozilla.autograph_stage_sha384],\n    }\n    keys = []\n    for keyfile in keyfiles:\n        if keyfile.startswith(':mozilla-'):\n            name = keyfile.split(':mozilla-')[1]\n            try:\n                keys.extend(builtin_keys[name, signature_type])\n            except KeyError:\n                raise ValueError('Invalid internal key name: {}'\n                                 .format(keyfile))\n        else:\n            key = open(keyfile, 'rb').read()\n            keys.append(key)\n    return keys", "docstring": "Get public keys for the given keyfiles.\n\nArgs:\nkeyfiles: List of filenames with public keys, or :mozilla- prefixed key\nnames\nsignature_type: one of 'sha1' or 'sha384'\n\nReturns:\nList of public keys as strings", "source": "juraj-google-style"}
{"code": "def convert_concat(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting concat ...')\n    concat_nodes = [layers[i] for i in inputs]\n\n    if len(concat_nodes) == 1:\n        \n        layers[scope_name] = concat_nodes[0]\n        return\n\n    if names == 'short':\n        tf_name = 'CAT' + random_string(5)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    cat = keras.layers.Concatenate(name=tf_name, axis=params['axis'])\n    layers[scope_name] = cat(concat_nodes)", "docstring": "Convert concatenation.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def _create(self, monomer, mon_vector):\n    while (self.length != (self.n_units - 1)):\n        if self.linear_chain:\n            move_direction = (np.array(mon_vector) / np.linalg.norm(mon_vector))\n        else:\n            move_direction = self._next_move_direction()\n        self._add_monomer(monomer.copy(), mon_vector, move_direction)", "docstring": "create the polymer from the monomer\n\nArgs:\nmonomer (Molecule)\nmon_vector (numpy.array): molecule vector that starts from the\nstart atom index to the end atom index", "source": "codesearchnet"}
{"code": "def _ova_to_spec(self, filename):\n    ova_extracted_dir = os.path.splitext(filename)[0]\n    if (not os.path.exists(ova_extracted_dir)):\n        os.makedirs(ova_extracted_dir)\n        subprocess.check_output(['tar', '-xvf', filename, '-C', ova_extracted_dir], stderr=subprocess.STDOUT)\n    ovf = glob.glob((ova_extracted_dir + '/master/vms/*/*.ovf'))\n    if (len(ovf) != 1):\n        raise RuntimeError('We support only one vm in ova')\n    image_file = None\n    memory = None\n    vcpus = None\n    with open(ovf[0]) as fd:\n        obj = xmltodict.parse(fd.read())\n        hardware_items = [section for section in obj['ovf:Envelope']['Content']['Section'] if (section['@xsi:type'] == 'ovf:VirtualHardwareSection_Type')]\n        if (len(hardware_items) != 1):\n            raise RuntimeError('We support only one machine desc in ova')\n        hardware_items = hardware_items[0]\n        for item in hardware_items['Item']:\n            CPU_RESOURCE = 3\n            MEMORY_RESOURCE = 4\n            DISK_RESOURCE = 17\n            resource_type = int(item['rasd:ResourceType'])\n            if (resource_type == CPU_RESOURCE):\n                vcpus = (int(item['rasd:cpu_per_socket']) * int(item['rasd:num_of_sockets']))\n            elif (resource_type == MEMORY_RESOURCE):\n                memory = int(item['rasd:VirtualQuantity'])\n                if (item['rasd:AllocationUnits'] != 'MegaBytes'):\n                    raise TypeError('Fix me : we need to suport other units too')\n            elif (resource_type == DISK_RESOURCE):\n                image_file = item['rasd:HostResource']\n    if (image_file is not None):\n        disk_meta = {'root-partition': '/dev/sda1'}\n        disk_spec = [{'type': 'template', 'template_type': 'qcow2', 'format': 'qcow2', 'dev': 'vda', 'name': os.path.basename(image_file), 'path': ((ova_extracted_dir + '/images/') + image_file), 'metadata': disk_meta}]\n    return (disk_spec, memory, vcpus)", "docstring": "Retrieve the given ova and makes a template of it.\nCreates a disk from network provided ova.\nCalculates the needed memory from the ovf.\nThe disk will be cached in the template repo\n\nArgs:\nfilename(str): the url to retrive the data from\n\nTODO:\n* Add hash checking against the server\nfor faster download and latest version\n* Add config script running on host - other place\n* Add cloud init support - by using cdroms in other place\n* Handle cpu in some way - some other place need to pick it up\n* Handle the memory units properly - we just assume MegaBytes\n\nReturns:\nlist of dict: list with the disk specification\nint: VM memory, None if none defined\nint: Number of virtual cpus, None if none defined\n\nRaises:\nRuntimeError: If the ova format is not supported\nTypeError: If the memory units in the ova are noot supported\n(currently only 'MegaBytes')", "source": "codesearchnet"}
{"code": "def get_tensors_from_tensor_names(graph, tensor_names):\n    tensor_name_to_tensor = {}\n    for op in graph.get_operations():\n        for tensor in op.values():\n            tensor_name_to_tensor[get_tensor_name(tensor)] = tensor\n    tensors = []\n    invalid_tensors = []\n    for name in tensor_names:\n        if not isinstance(name, str):\n            raise ValueError(\"Invalid type for a tensor name in the provided graph. Expected type for a tensor name is 'str', instead got type '{}' for tensor name '{}'\".format(type(name), name))\n        tensor = tensor_name_to_tensor.get(name)\n        if tensor is None:\n            invalid_tensors.append(name)\n        else:\n            tensors.append(tensor)\n    if invalid_tensors:\n        raise ValueError(\"Invalid tensors '{}' were found.\".format(','.join(invalid_tensors)))\n    return tensors", "docstring": "Gets the Tensors associated with the `tensor_names` in the provided graph.\n\nArgs:\ngraph: TensorFlow Graph.\ntensor_names: List of strings that represent names of tensors in the graph.\n\nReturns:\nA list of Tensor objects in the same order the names are provided.\n\nRaises:\nValueError:\ntensor_names contains an invalid tensor name.", "source": "github-repos"}
{"code": "def mod(self, other, axis=\"columns\", level=None, fill_value=None):\n        \n        return self._binary_op(\n            \"mod\", other, axis=axis, level=level, fill_value=fill_value\n        )", "docstring": "Mods this DataFrame against another DataFrame/Series/scalar.\n\nArgs:\nother: The object to use to apply the mod against this.\naxis: The axis to mod over.\nlevel: The Multilevel index level to apply mod over.\nfill_value: The value to fill NaNs with.\n\nReturns:\nA new DataFrame with the Mod applied.", "source": "juraj-google-style"}
{"code": "def save(self, config=None):\n        \n        if config is not None:\n            clist = [config]\n        else:\n            clist = [\n                self._system_config,\n                self._global_config,\n                self._repo_config,\n                self._local_config,\n            ]\n\n        for conf in clist:\n            if conf.filename is None:\n                continue\n\n            try:\n                logger.debug(\"Writing '{}'.\".format(conf.filename))\n                dname = os.path.dirname(os.path.abspath(conf.filename))\n                try:\n                    os.makedirs(dname)\n                except OSError as exc:\n                    if exc.errno != errno.EEXIST:\n                        raise\n                conf.write()\n            except Exception as exc:\n                msg = \"failed to write config '{}'\".format(conf.filename)\n                raise ConfigError(msg, exc)", "docstring": "Saves config to config files.\n\nArgs:\nconfig (configobj.ConfigObj): optional config object to save.\n\nRaises:\ndvc.config.ConfigError: thrown if failed to write config file.", "source": "juraj-google-style"}
{"code": "def slice_inputs(self, indices_dataset, inputs):\n    dataset = dataset_ops.DatasetV2.zip((indices_dataset, dataset_ops.DatasetV2.from_tensors(inputs).repeat()))\n\n    def grab_batch(i, data):\n        return nest.map_structure(lambda d: array_ops.gather(d, i, axis=0), data)\n    dataset = dataset.map(grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)\n    options = options_lib.Options()\n    options.experimental_optimization.apply_default_optimizations = False\n    if self._shuffle:\n        options.experimental_external_state_policy = options_lib.ExternalStatePolicy.IGNORE\n    dataset = dataset.with_options(options)\n    return dataset", "docstring": "Slice inputs into a Dataset of batches.\n\nGiven a Dataset of batch indices and the unsliced inputs,\nthis step slices the inputs in a parallelized fashion\nand produces a dataset of input batches.\n\nArgs:\nindices_dataset: A Dataset of batched indices\ninputs: A python data structure that contains the inputs, targets,\nand possibly sample weights.\n\nReturns:\nA Dataset of input batches matching the batch indices.", "source": "github-repos"}
{"code": "def __init__(self, group, tipe, kind, name, version):\n        \n        group = None if \"*\" == group else group \n        tipe = None if \"*\" == tipe else tipe\n        kind = None if \"*\" == kind else kind\n        name  = None if \"*\" == name else name\n        version = None if \"*\" == version else version\n        \n        self._group = group\n        self._type = tipe\n        self._kind = kind\n        self._name = name\n        self._version = version", "docstring": "Creates instance of a component descriptor\n\nArgs:\ngroup: logical group: 'pip-services-runtime', 'pip-services-logging'\ntype: external type: 'cache', 'services' or 'controllers'\nkind - implementation: 'memory', 'file' or 'memcached'\nname - internal content\nversion: compatibility version: '1.0'. '1.5' or '10.4'", "source": "juraj-google-style"}
{"code": "def _tf_core_packed_nest_with_indices(structure, flat, index, is_nested_fn, sequence_fn=None):\n    packed = []\n    sequence_fn = sequence_fn or sequence_like\n    for s in _tf_core_yield_value(structure):\n        if is_nested_fn(s):\n            new_index, child = _tf_core_packed_nest_with_indices(s, flat, index, is_nested_fn, sequence_fn)\n            packed.append(sequence_fn(s, child))\n            index = new_index\n        else:\n            packed.append(flat[index])\n            index += 1\n    return (index, packed)", "docstring": "Helper function for pack_sequence_as.\n\nArgs:\nstructure: structure to mimic.\nflat: Flattened values to output substructure for.\nindex: Index at which to start reading from flat.\nis_nested_fn: Function used to test if a value should be treated as a nested\nstructure.\nsequence_fn: Function used to generate a new structure instance.\n\nReturns:\nThe tuple (new_index, child), where:\n* new_index - the updated index into `flat` having processed `structure`.\n* packed - the subset of `flat` corresponding to `structure`,\nhaving started at `index`, and packed into the same nested\nformat.\n\nRaises:\nValueError: if `structure` contains more atoms than `flat`\n(assuming indexing starts from `index`).", "source": "github-repos"}
{"code": "def decode_response(data):\n    res = CaseInsensitiveDict()\n    for dataline in data.decode('utf-8').splitlines()[1:]:\n        dataline = dataline.strip()\n        if (not dataline):\n            continue\n        line_parts = dataline.split(':', 1)\n        if (len(line_parts) < 2):\n            line_parts = (line_parts[0], '')\n        res[line_parts[0].strip()] = line_parts[1].strip()\n    return res", "docstring": "Decodes the data from a SSDP response.\n\nArgs:\ndata (bytes): The encoded response.\n\nReturns:\ndict of string -> string: Case-insensitive dictionary of header name to\nheader value pairs extracted from the response.", "source": "codesearchnet"}
{"code": "def describe(self, **kwargs):\n    new_columns = pandas.DataFrame(columns=self.columns).astype(self.dtypes).describe(**kwargs).columns\n\n    def describe_builder(df, internal_indices=[], **kwargs):\n        return df.iloc[(:, internal_indices)].describe(**kwargs)\n    func = self._prepare_method(describe_builder, **kwargs)\n    new_data = self._full_axis_reduce_along_select_indices(func, 0, new_columns)\n    new_index = self.compute_index(0, new_data, False)\n    return self.__constructor__(new_data, new_index, new_columns)", "docstring": "Generates descriptive statistics.\n\nReturns:\nDataFrame object containing the descriptive statistics of the DataFrame.", "source": "codesearchnet"}
{"code": "def handle_message(self, msg, host):\n    logger.debug('Executing handle_message method.')\n    response = None\n    if (self.encryption and self.server_key):\n        msg_data = unserialize_data(msg, self.compression, self.encryption)\n    else:\n        msg_data = unserialize_data(msg, self.compression)\n    logger.debug(('Packet received: ' + pformat(msg_data)))\n    if (not msg_data):\n        return response\n    if ('method' in msg_data):\n        if (msg_data['method'] == 'OHAI Client'):\n            logger.debug(('<%s> Autodiscover response from server received from: %s' % (self.cuuid, host[0])))\n            self.discovered_servers[host] = [msg_data['version'], msg_data['server_name']]\n            if self.autoregistering:\n                self.register(host)\n                self.autoregistering = False\n        elif (msg_data['method'] == 'NOTIFY'):\n            self.event_notifies[msg_data['euuid']] = msg_data['event_data']\n            logger.debug(('<%s> Notify received' % self.cuuid))\n            logger.debug(('<%s> Notify event buffer: %s' % (self.cuuid, pformat(self.event_notifies))))\n            response = serialize_data({'cuuid': str(self.cuuid), 'method': 'OK NOTIFY', 'euuid': msg_data['euuid']}, self.compression, self.encryption, self.server_key)\n        elif (msg_data['method'] == 'OK REGISTER'):\n            logger.debug(('<%s> Ok register received' % self.cuuid))\n            self.registered = True\n            self.server = host\n            if (('encryption' in msg_data) and self.encryption):\n                self.server_key = PublicKey(msg_data['encryption'][0], msg_data['encryption'][1])\n        elif ((msg_data['method'] == 'LEGAL') or (msg_data['method'] == 'ILLEGAL')):\n            logger.debug(('<%s> Legality message received' % str(self.cuuid)))\n            self.legal_check(msg_data)\n            response = serialize_data({'cuuid': str(self.cuuid), 'method': 'OK EVENT', 'euuid': msg_data['euuid']}, self.compression, self.encryption, self.server_key)\n    logger.debug('Packet processing completed')\n    return response", "docstring": "Processes messages that have been delivered from the transport\nprotocol\n\nArgs:\nmsg (string): The raw packet data delivered from the transport\nprotocol.\nhost (tuple): A tuple containing the (address, port) combination of\nthe message's origin.\n\nReturns:\nA formatted response to the client with the results of the processed\nmessage.\n\nExamples:\n>>> msg\n{\"method\": \"OHAI Client\", \"version\": \"1.0\"}\n>>> host\n('192.168.0.20', 36545)", "source": "codesearchnet"}
{"code": "def isparent(path1, path2):\n    bits1 = path1.split('/')\n    bits2 = path2.split('/')\n    while (bits1 and (bits1[(- 1)] == '')):\n        bits1.pop()\n    if (len(bits1) > len(bits2)):\n        return False\n    for (bit1, bit2) in zip(bits1, bits2):\n        if (bit1 != bit2):\n            return False\n    return True", "docstring": "Check if ``path1`` is a parent directory of ``path2``.\n\nArguments:\npath1 (str): A PyFilesytem path.\npath2 (str): A PyFilesytem path.\n\nReturns:\nbool: `True` if ``path1`` is a parent directory of ``path2``\n\nExample:\n>>> isparent(\"foo/bar\", \"foo/bar/spam.txt\")\nTrue\n>>> isparent(\"foo/bar/\", \"foo/bar\")\nTrue\n>>> isparent(\"foo/barry\", \"foo/baz/bar\")\nFalse\n>>> isparent(\"foo/bar/baz/\", \"foo/baz/bar\")\nFalse", "source": "codesearchnet"}
{"code": "def doc_replace(match, sphinx_docs):\n    \n    sphinx_docs.append(match.group(\"path\"))\n    return \"`{}`_\".format(match.group(\"value\"))", "docstring": "Convert Sphinx ``:doc:`` to plain reST link.\n\nArgs:\nmatch (_sre.SRE_Match): A match (from ``re``) to be used\nin substitution.\nsphinx_docs (list): List to be track the documents that have been\nencountered.\n\nReturns:\nstr: The ``match`` converted to a link.", "source": "juraj-google-style"}
{"code": "def run_scratch(self, path_to_scratch, num_cores=1, outname=None, outdir=None, force_rerun=False):\n    if (not outname):\n        outname = self.project_name\n    if (not outdir):\n        outdir = ''\n    outname = op.join(outdir, outname)\n    self.out_sspro = '{}.ss'.format(outname)\n    self.out_sspro8 = '{}.ss8'.format(outname)\n    self.out_accpro = '{}.acc'.format(outname)\n    self.out_accpro20 = '{}.acc20'.format(outname)\n    ssbio.utils.command_runner(shell_command='{} {} {} {}'.format(path_to_scratch, self.seq_file, outname, num_cores), force_rerun_flag=force_rerun, outfile_checker='{}.ss'.format(outname))", "docstring": "Run SCRATCH on the sequence_file that was loaded into the class.\n\nArgs:\npath_to_scratch: Path to the SCRATCH executable, run_SCRATCH-1D_predictors.sh\noutname: Prefix to name the output files\noutdir: Directory to store the output files\nforce_rerun: Flag to force rerunning of SCRATCH even if the output files exist\n\nReturns:", "source": "codesearchnet"}
{"code": "def get_sid_from_name(name):\n    \n    \n    if name is None:\n        name = 'NULL SID'\n\n    try:\n        sid = win32security.LookupAccountName(None, name)[0]\n    except pywintypes.error as exc:\n        raise CommandExecutionError(\n            'User {0} not found: {1}'.format(name, exc))\n\n    return win32security.ConvertSidToStringSid(sid)", "docstring": "This is a tool for getting a sid from a name. The name can be any object.\nUsually a user or a group\n\nArgs:\nname (str): The name of the user or group for which to get the sid\n\nReturns:\nstr: The corresponding SID", "source": "juraj-google-style"}
{"code": "def execute_by_options(args):\n    \n    if args['subcommand'] == 'sphinx':\n        s = Sphinx(proj_info)\n        if args['quickstart']:\n            s.quickstart()\n        elif args['gen_code_api']:\n            s.gen_code_api()\n        elif args['rst2html']:\n            s.rst2html()\n        pass\n    elif args['subcommand'] == 'offline_dist':\n        pod = PyOfflineDist()\n        if args['freeze_deps']:\n            pod.freeze_deps()\n        elif args['download_deps']:\n            pod.download_deps()\n        elif args['install_deps']:\n            pod.install_deps()\n        elif args['clean_deps']:\n            pod.clean_deps()\n        elif args['mkbinary']:\n            pod.pyinstaller_mkbinary(args['mkbinary'])\n        elif args['clean_binary']:\n            pod.clean_binary()\n\n    pass", "docstring": "execute by argument dictionary\n\nArgs:\nargs (dict): command line argument dictionary", "source": "juraj-google-style"}
{"code": "def save_randomly_initialized_version(config_name: str, save_dir: str, **config_kwargs):\n    cfg = AutoConfig.from_pretrained(config_name, **config_kwargs)\n    model = AutoModelForSeq2SeqLM.from_config(cfg)\n    model.save_pretrained(save_dir)\n    AutoTokenizer.from_pretrained(config_name).save_pretrained(save_dir)\n    return model", "docstring": "Save a randomly initialized version of a model using a pretrained config.\nArgs:\nconfig_name: which config to use\nsave_dir: where to save the resulting model and tokenizer\nconfig_kwargs: Passed to AutoConfig\n\nUsage::\nsave_randomly_initialized_version(\"facebook/bart-large-cnn\", \"distilbart_random_cnn_6_3\", encoder_layers=6, decoder_layers=3, num_beams=3)", "source": "github-repos"}
{"code": "def rr_history(self, ips):\n    api_name = 'opendns-rr_history'\n    fmt_url_path = u'dnsdb/ip/a/{0}.json'\n    return self._multi_get(api_name, fmt_url_path, ips)", "docstring": "Get the domains related to input ips.\n\nArgs:\nips: an enumerable of strings as ips\nReturns:\nAn enumerable of resource records and features", "source": "codesearchnet"}
{"code": "def set_webconfiguration_settings(name, settings, location=''):\n    ps_cmd = []\n    if (not settings):\n        log.warning('No settings provided')\n        return False\n    settings = _prepare_settings(name, settings)\n    for (idx, setting) in enumerate(settings):\n        if (setting['name'].split('.')[(- 1)] != 'Collection'):\n            settings[idx]['value'] = six.text_type(setting['value'])\n    current_settings = get_webconfiguration_settings(name=name, settings=settings, location=location)\n    if (settings == current_settings):\n        log.debug('Settings already contain the provided values.')\n        return True\n    for setting in settings:\n        if (setting['name'].split('.')[(- 1)] != 'Collection'):\n            try:\n                complex(setting['value'])\n                value = setting['value']\n            except ValueError:\n                value = \"'{0}'\".format(setting['value'])\n        else:\n            configelement_list = []\n            for value_item in setting['value']:\n                configelement_construct = []\n                for (key, value) in value_item.items():\n                    configelement_construct.append(\"{0}='{1}'\".format(key, value))\n                configelement_list.append((('@{' + ';'.join(configelement_construct)) + '}'))\n            value = ','.join(configelement_list)\n        ps_cmd.extend(['Set-WebConfigurationProperty', '-PSPath', \"'{0}'\".format(name), '-Filter', \"'{0}'\".format(setting['filter']), '-Name', \"'{0}'\".format(setting['name']), '-Location', \"'{0}'\".format(location), '-Value', '{0};'.format(value)])\n    cmd_ret = _srvmgr(ps_cmd)\n    if (cmd_ret['retcode'] != 0):\n        msg = 'Unable to set settings for {0}'.format(name)\n        raise CommandExecutionError(msg)\n    new_settings = get_webconfiguration_settings(name=name, settings=settings, location=location)\n    failed_settings = []\n    for (idx, setting) in enumerate(settings):\n        is_collection = (setting['name'].split('.')[(- 1)] == 'Collection')\n        if (((not is_collection) and (six.text_type(setting['value']) != six.text_type(new_settings[idx]['value']))) or (is_collection and (list(map(dict, setting['value'])) != list(map(dict, new_settings[idx]['value']))))):\n            failed_settings.append(setting)\n    if failed_settings:\n        log.error('Failed to change settings: %s', failed_settings)\n        return False\n    log.debug('Settings configured successfully: %s', settings)\n    return True", "docstring": "r'''\nSet the value of the setting for an IIS container.\n\nArgs:\nname (str): The PSPath of the IIS webconfiguration settings.\nsettings (list): A list of dictionaries containing setting name, filter and value.\nlocation (str): The location of the settings (optional)\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.set_webconfiguration_settings name='IIS:\\' settings=\"[{'name': 'enabled', 'filter': 'system.webServer/security/authentication/anonymousAuthentication', 'value': False}]\"", "source": "codesearchnet"}
{"code": "def consult_hook(self, item_session: ItemSession, verdict: bool, reason: str, test_info: dict):\n    try:\n        reasons = {'filters': test_info['map'], 'reason': reason}\n        verdict = self.hook_dispatcher.call(PluginFunctions.accept_url, item_session, verdict, reasons)\n        reason = 'callback_hook'\n    except HookDisconnected:\n        pass\n    return (verdict, reason)", "docstring": "Consult the scripting hook.\n\nReturns:\ntuple: (bool, str)", "source": "codesearchnet"}
{"code": "def create_histogram(df):\n    \n    fig = Figure(\"/mg/histogram/\", \"mg_histogram\")\n    fig.layout.set_size(width=450, height=200)\n    fig.layout.set_margin(left=40, right=40)\n    fig.graphics.animate_on_load()\n\n    \n    return Histogram(df, fig, \"value\", 20, init_params={\"Data\": \"Steps\"})", "docstring": "create a mg line plot\n\nArgs:\ndf (pandas.DataFrame): data to plot", "source": "juraj-google-style"}
{"code": "def _on_channel_close(self, channel, reply_code_or_reason, reply_text=None):\n    if isinstance(reply_code_or_reason, pika_errs.ChannelClosed):\n        reply_code = reply_code_or_reason.reply_code\n        reply_text = reply_code_or_reason.reply_text\n    elif isinstance(reply_code_or_reason, int):\n        reply_code = reply_code_or_reason\n    else:\n        reply_code = 0\n        reply_text = str(reply_code_or_reason)\n    _log.info('Channel %r closed (%d): %s', channel, reply_code, reply_text)\n    self._channel = None", "docstring": "Callback invoked when the channel is closed.\n\nArgs:\nchannel (pika.channel.Channel): The channel that got closed.\nreply_code_or_reason (int|Exception): The reason why the channel\nwas closed. In older versions of pika, this is the AMQP code.\nreply_text (str): The human-readable reason for the channel's\nclosure (only in older versions of pika).", "source": "codesearchnet"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def _sign_simple_signature_fulfillment(cls, input_, message, key_pairs):\n        \n        \n        \n        \n        \n        input_ = deepcopy(input_)\n        public_key = input_.owners_before[0]\n        message = sha3_256(message.encode())\n        if input_.fulfills:\n            message.update('{}{}'.format(\n                input_.fulfills.txid, input_.fulfills.output).encode())\n\n        try:\n            \n            \n            input_.fulfillment.sign(\n                message.digest(), base58.b58decode(key_pairs[public_key].encode()))\n        except KeyError:\n            raise KeypairMismatchException('Public key {} is not a pair to '\n                                           'any of the private keys'\n                                           .format(public_key))\n        return input_", "docstring": "Signs a Ed25519Fulfillment.\n\nArgs:\ninput_ (:class:`~bigchaindb.common.transaction.\nInput`) The input to be signed.\nmessage (str): The message to be signed\nkey_pairs (dict): The keys to sign the Transaction with.", "source": "juraj-google-style"}
{"code": "def plot_internal_energy(self, tmin, tmax, ntemp, ylim=None, **kwargs):\n    temperatures = np.linspace(tmin, tmax, ntemp)\n    if self.structure:\n        ylabel = '$\\\\Delta E$ (kJ/mol)'\n    else:\n        ylabel = '$\\\\Delta E$ (kJ/mol-c)'\n    fig = self._plot_thermo(self.dos.internal_energy, temperatures, ylabel=ylabel, ylim=ylim, factor=0.001, **kwargs)\n    return fig", "docstring": "Plots the vibrational internal energy in a temperature range.\n\nArgs:\ntmin: minimum temperature\ntmax: maximum temperature\nntemp: number of steps\nylim: tuple specifying the y-axis limits.\nkwargs: kwargs passed to the matplotlib function 'plot'.\nReturns:\nmatplotlib figure", "source": "codesearchnet"}
{"code": "def export(self, composite=False):\n    if composite:\n        if (rname_rfc6680 is None):\n            raise NotImplementedError('Your GSSAPI implementation does not support RFC 6680 (the GSSAPI naming extensions)')\n        return rname_rfc6680.export_name_composite(self)\n    else:\n        return rname.export_name(self)", "docstring": "Export this name as a token.\n\nThis method exports the name into a byte string which can then be\nimported by using the `token` argument of the constructor.\n\nArgs:\ncomposite (bool): whether or not use to a composite token --\n:requires-ext:`rfc6680`\n\nReturns:\nbytes: the exported name in token form\n\nRaises:\nMechanismNameRequiredError\nBadNameTypeError\nBadNameError", "source": "codesearchnet"}
{"code": "async def send_script(self, conn_id, data):\n    self._ensure_connection(conn_id, True)\n    connection_string = self._get_property(conn_id, 'connection_string')\n    msg = dict(connection_string=connection_string, fragment_count=1, fragment_index=0, script=base64.b64encode(data))\n    (await self._send_command(OPERATIONS.SEND_SCRIPT, msg, COMMANDS.SendScriptResponse))", "docstring": "Send a a script to this IOTile device\n\nArgs:\nconn_id (int): A unique identifier that will refer to this connection\ndata (bytes): the script to send to the device", "source": "codesearchnet"}
{"code": "def init(name, *args):\n    \n    matcher = get(name)\n    if not matcher:\n        raise ValueError('Cannot find matcher: {}'.format(name))\n    return matcher(*args)", "docstring": "Initializes a matcher instance passing variadic arguments to\nits constructor. Acts as a delegator proxy.\n\nArguments:\nname (str): matcher class name or alias to execute.\n*args (mixed): variadic argument\n\nReturns:\nmatcher: matcher instance.\n\nRaises:\nValueError: if matcher was not found.", "source": "juraj-google-style"}
{"code": "def list_vmss(access_token, subscription_id, resource_group):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets', '?api-version=', COMP_API])\n    return do_get_next(endpoint, access_token)", "docstring": "List VM Scale Sets in a resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\n\nReturns:\nHTTP response. JSON body of a list of scale set model views.", "source": "codesearchnet"}
{"code": "def __init__(self, dfk, max_threads=10):\n        \n        self._scaling_enabled = False\n\n        self.label = 'data_manager'\n        self.dfk = dfk\n        self.max_threads = max_threads\n        self.globus = None\n        self.managed = True", "docstring": "Initialize the DataManager.\n\nArgs:\n- dfk (DataFlowKernel): The DataFlowKernel that this DataManager is managing data for.\n\nKwargs:\n- max_threads (int): Number of threads. Default is 10.\n- executors (list of Executors): Executors for which data transfer will be managed.", "source": "juraj-google-style"}
{"code": "def get_dimension(self, key, value, **kwargs):\n        \n        return self._get_object_by_name(self._DIMENSION_ENDPOINT_SUFFIX,\n                                        '{0}/{1}'.format(key, value),\n                                        **kwargs)", "docstring": "get a dimension by key and value\n\nArgs:\nkey (string): key of the dimension\nvalue (string): value of the dimension\n\nReturns:\ndictionary of response", "source": "juraj-google-style"}
{"code": "def in_template_path(fn):\n    \n    return os.path.join(\n        os.path.abspath(os.path.dirname(__file__)),\n        \"../templates\",\n        fn,\n    )", "docstring": "Return `fn` in template context, or in other words add `fn` to template\npath, so you don't need to write absolute path of `fn` in template\ndirectory manually.\n\nArgs:\nfn (str): Name of the file in template dir.\n\nReturn:\nstr: Absolute path to the file.", "source": "juraj-google-style"}
{"code": "def _expand_args(argv):\n\n    def _expand_single_arg(arg, result):\n        if arg.startswith('@'):\n            with open(arg[1:]) as f:\n                for earg in f.read().splitlines():\n                    _expand_single_arg(earg, result)\n        else:\n            result.append(arg)\n    expanded_args = []\n    for arg in argv:\n        _expand_single_arg(arg, expanded_args)\n    return expanded_args", "docstring": "Returns argv with flagfiles expanded.\n\nA flagfile is an argument starting with \"@\". The remainder of the argument is\ninterpreted as the path to a file containing a list of arguments, one per\nline. Flagfiles may contain references to other flagfiles.\n\nArgs:\nargv: Command line arguments.", "source": "github-repos"}
{"code": "def find_runner(program):\n    \n    if os.path.isfile(program) and not os.access(program, os.X_OK):\n        \n        try:\n            opened = open(program)\n        except PermissionError:\n            return None\n        first_line = opened.readline().strip()\n        if first_line.startswith('\n            return shlex.split(first_line[2:])\n        if program.endswith('.py'):\n            return [sys.executable]\n    return None", "docstring": "Return a command that will run program.\n\nArgs:\nprogram: The string name of the program to try to run.\nReturns:\ncommandline list of strings to run the program (eg. with subprocess.call()) or None", "source": "juraj-google-style"}
{"code": "def set_xlim(self, xlims, dx, xscale, reverse=False):\n    self._set_axis_limits('x', xlims, dx, xscale, reverse)\n    return", "docstring": "Set x limits for plot.\n\nThis will set the limits for the x axis\nfor the specific plot.\n\nArgs:\nxlims (len-2 list of floats): The limits for the axis.\ndx (float): Amount to increment by between the limits.\nxscale (str): Scale of the axis. Either `log` or `lin`.\nreverse (bool, optional): If True, reverse the axis tick marks. Default is False.", "source": "codesearchnet"}
{"code": "def Print(self, output_writer):\n    \n    if self._file_scanner:\n      output_writer.Write('\\tsignature identifiers: {0:s}\\n'.format(\n          ', '.join(self._signature_identifiers)))", "docstring": "Prints a human readable version of the filter.\n\nArgs:\noutput_writer (CLIOutputWriter): output writer.", "source": "juraj-google-style"}
{"code": "def call(self, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, token_type_ids: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, labels: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, bbox: tf.Tensor | None=None, pixel_values: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor], Tuple[tf.Tensor, tf.Tensor, tf.Tensor], Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]]:\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    outputs = self.layoutlmv3(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, bbox=bbox, pixel_values=pixel_values, training=training)\n    sequence_output = outputs[0][:, 0, :]\n    logits = self.classifier(sequence_output, training=training)\n    loss = None if labels is None else self.hf_compute_loss(labels, logits)\n    if not return_dict:\n        output = (logits,) + outputs[1:]\n        return (loss,) + output if loss is not None else output\n    return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "Returns:\n\nExamples:\n\n```python\n>>> from transformers import AutoProcessor, TFAutoModelForSequenceClassification\n>>> from datasets import load_dataset\n>>> import tensorflow as tf\n\n>>> processor = AutoProcessor.from_pretrained(\"microsoft/layoutlmv3-base\", apply_ocr=False)\n>>> model = TFAutoModelForSequenceClassification.from_pretrained(\"microsoft/layoutlmv3-base\")\n\n>>> dataset = load_dataset(\"nielsr/funsd-layoutlmv3\", split=\"train\", trust_remote_code=True)\n>>> example = dataset[0]\n>>> image = example[\"image\"]\n>>> words = example[\"tokens\"]\n>>> boxes = example[\"bboxes\"]\n\n>>> encoding = processor(image, words, boxes=boxes, return_tensors=\"tf\")\n>>> sequence_label = tf.convert_to_tensor([1])\n\n>>> outputs = model(**encoding, labels=sequence_label)\n>>> loss = outputs.loss\n>>> logits = outputs.logits\n```", "source": "github-repos"}
{"code": "def get_instance(cls, device):\n    if (cls._nuis.get(device) is None):\n        cls._nuis[device] = AndroidUiautomationPoco(device)\n    return cls._nuis[device]", "docstring": "This is only a slot to store and get already initialized poco instance rather than initializing again. You can\nsimply pass the ``current device instance`` provided by ``airtest`` to get the AndroidUiautomationPoco instance.\nIf no such AndroidUiautomationPoco instance, a new instance will be created and stored.\n\nArgs:\ndevice (:py:obj:`airtest.core.device.Device`): more details refer to ``airtest doc``\n\nReturns:\npoco instance", "source": "codesearchnet"}
{"code": "def _save_to_database(url, property_name, data):\n    \n    data = json.dumps([\n        d.to_dict() if hasattr(d, \"to_dict\") else d\n        for d in data\n    ])\n\n    logger.debug(\"_save_to_database() data: %s\" % repr(data))\n\n    requests.post(\n        _WEB_URL + _REQUEST_DB_SAVE,\n        timeout=REQUEST_TIMEOUT,\n        allow_redirects=True,\n        verify=False,\n        data={\n            \"url\": url,\n            \"value\": data,\n            \"property_name\": property_name,\n        }\n    )\n\n    logger.info(\n        \"`%s` for `%s` sent to REST DB.\" % (\n            property_name,\n            url,\n        )\n    )", "docstring": "Store `data` under `property_name` in the `url` key in REST API DB.\n\nArgs:\nurl (obj): URL of the resource to which `property_name` will be stored.\nproperty_name (str): Name of the property under which the `data` will\nbe stored.\ndata (obj): Any object.", "source": "juraj-google-style"}
{"code": "def dict_from_items_with_values(*dictionaries, **items):\n    \n    dict_list = list(dictionaries)\n    dict_list.append(items)\n    result = {}\n    for d in dict_list:\n        for key, value in d.items():\n            if value is not None:\n                result[key] = value\n    return result", "docstring": "Creates a dict with the inputted items; pruning any that are `None`.\n\nArgs:\n*dictionaries(dict): Dictionaries of items to be pruned and included.\n**items: Items to be pruned and included.\n\nReturns:\ndict: A dictionary containing all of the items with a 'non-None' value.", "source": "juraj-google-style"}
{"code": "def eager_mask(batch_size: int, cache_position: torch.Tensor, kv_length: int, kv_offset: int=0, mask_function: Callable=causal_mask_function, attention_mask: Optional[torch.Tensor]=None, dtype: torch.dtype=torch.float32, **kwargs) -> torch.Tensor:\n    _ = kwargs.pop('allow_is_causal_skip', None)\n    mask = sdpa_mask(batch_size=batch_size, cache_position=cache_position, kv_length=kv_length, kv_offset=kv_offset, mask_function=mask_function, attention_mask=attention_mask, allow_is_causal_skip=False, allow_torch_fix=False, **kwargs)\n    min_dtype = torch.finfo(dtype).min\n    mask = torch.where(mask, torch.tensor(0.0, device=mask.device, dtype=dtype), min_dtype)\n    return mask", "docstring": "Create a 4D float mask of shape `(batch_size, 1, query_length, kv_length)` where a value of 0 indicates that\nthe element should take part in the attention computation, and -inf (minimum value for the given `dtype`) that\nit should not.\n\nArgs:\nbatch_size (`int`):\nThe batch size of the input sequence.\ncache_position (`torch.Tensor`):\nA tensor of shape (query_length,) indicating the current indices of the input sequence elements.\nkv_length (`int`):\nThe size that the key and value states will have during the attention computation.\nkv_offset (`int`, optional):\nAn optional offset to indicate at which first position the key and values states will refer to.\nmask_function (`Callable`):\nThe mask factory function describing the mask pattern.\nattention_mask (`torch.Tensor`, optional):\nThe 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length)\ndtype (`torch.dtype`, optional):\nThe dtype to use for the mask. By default, `torch.float32`.", "source": "github-repos"}
{"code": "def run(self, dag):\n        \n        \n        for node in dag.op_nodes(self.gate):\n            \n            if not node.op.definition:\n                continue\n            \n            rule = node.op.definition\n            \n            \n            decomposition = DAGCircuit()\n            decomposition.add_qreg(rule[0][1][0][0])\n            if rule[0][2]:\n                decomposition.add_creg(rule[0][2][0][0])\n            for inst in rule:\n                decomposition.apply_operation_back(*inst)\n            dag.substitute_node_with_dag(node, decomposition)\n        return dag", "docstring": "Expand a given gate into its decomposition.\n\nArgs:\ndag(DAGCircuit): input dag\nReturns:\nDAGCircuit: output dag where gate was expanded.", "source": "juraj-google-style"}
{"code": "def inspect_service(self, service, insert_defaults=None):\n        \n        url = self._url('/services/{0}', service)\n        params = {}\n        if insert_defaults is not None:\n            if utils.version_lt(self._version, '1.29'):\n                raise errors.InvalidVersion(\n                    'insert_defaults is not supported in API version < 1.29'\n                )\n            params['insertDefaults'] = insert_defaults\n\n        return self._result(self._get(url, params=params), True)", "docstring": "Return information about a service.\n\nArgs:\nservice (str): Service name or ID.\ninsert_defaults (boolean): If true, default values will be merged\ninto the service inspect output.\n\nReturns:\n(dict): A dictionary of the server-side representation of the\nservice, including all relevant properties.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def data_group_type(self, group_data):\n    if isinstance(group_data, dict):\n        file_content = group_data.pop('fileContent', None)\n        if (file_content is not None):\n            self._files[group_data.get('xid')] = {'fileContent': file_content, 'type': group_data.get('type')}\n    else:\n        GROUPS_STRINGS_WITH_FILE_CONTENTS = ['Document', 'Report']\n        if (group_data.data.get('type') in GROUPS_STRINGS_WITH_FILE_CONTENTS):\n            self._files[group_data.data.get('xid')] = group_data.file_data\n        group_data = group_data.data\n    return group_data", "docstring": "Return dict representation of group data.\n\nArgs:\ngroup_data (dict|obj): The group data dict or object.\n\nReturns:\ndict: The group data in dict format.", "source": "codesearchnet"}
{"code": "def export_saved_model(sess, export_dir, tag_set, signatures):\n    import tensorflow as tf\n    g = sess.graph\n    g._unsafe_unfinalize()\n    builder = tf.saved_model.builder.SavedModelBuilder(export_dir)\n    logging.info('===== signatures: {}'.format(signatures))\n    signature_def_map = {}\n    for (key, sig) in signatures.items():\n        signature_def_map[key] = tf.saved_model.signature_def_utils.build_signature_def(inputs={name: tf.saved_model.utils.build_tensor_info(tensor) for (name, tensor) in sig['inputs'].items()}, outputs={name: tf.saved_model.utils.build_tensor_info(tensor) for (name, tensor) in sig['outputs'].items()}, method_name=(sig['method_name'] if ('method_name' in sig) else key))\n    logging.info('===== signature_def_map: {}'.format(signature_def_map))\n    builder.add_meta_graph_and_variables(sess, tag_set.split(','), signature_def_map=signature_def_map, clear_devices=True)\n    g.finalize()\n    builder.save()", "docstring": "Convenience function to export a saved_model using provided arguments\n\nThe caller specifies the saved_model signatures in a simplified python dictionary form, as follows::\n\nsignatures = {\n'signature_def_key': {\n'inputs': { 'input_tensor_alias': input_tensor_name },\n'outputs': { 'output_tensor_alias': output_tensor_name },\n'method_name': 'method'\n}\n}\n\nAnd this function will generate the `signature_def_map` and export the saved_model.\n\nArgs:\n:sess: a tf.Session instance\n:export_dir: path to save exported saved_model\n:tag_set: string tag_set to identify the exported graph\n:signatures: simplified dictionary representation of a TensorFlow signature_def_map\n\nReturns:\nA saved_model exported to disk at ``export_dir``.", "source": "codesearchnet"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_stream = utils.BytearrayStream()\n    if (len(self._credentials) == 0):\n        raise ValueError('Authentication struct missing credentials.')\n    for credential in self._credentials:\n        credential.write(local_stream, kmip_version=kmip_version)\n    self.length = local_stream.length()\n    super(Authentication, self).write(output_stream, kmip_version=kmip_version)\n    output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the Authentication struct to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def single_gate_params(gate, params=None):\n    \n    if gate in ('U', 'u3'):\n        return params[0], params[1], params[2]\n    elif gate == 'u2':\n        return np.pi / 2, params[0], params[1]\n    elif gate == 'u1':\n        return 0, 0, params[0]\n    elif gate == 'id':\n        return 0, 0, 0\n    raise QiskitError('Gate is not among the valid types: %s' % gate)", "docstring": "Apply a single qubit gate to the qubit.\n\nArgs:\ngate(str): the single qubit gate name\nparams(list): the operation parameters op['params']\nReturns:\ntuple: a tuple of U gate parameters (theta, phi, lam)\nRaises:\nQiskitError: if the gate name is not valid", "source": "juraj-google-style"}
{"code": "def split_folder_and_path(filepath):\n    \n    dirname = op.dirname(filepath)\n    filename = op.basename(filepath)\n    splitext = op.splitext(filename)\n    filename_without_extension = splitext[0]\n    extension = splitext[1]\n\n    return dirname, filename_without_extension, extension", "docstring": "Split a file path into its folder, filename, and extension\n\nArgs:\npath (str): Path to a file\n\nReturns:\ntuple: of (folder, filename (without extension), extension)", "source": "juraj-google-style"}
{"code": "def add(self, payload=None):\n    try:\n        db = self._client[self.database]\n        col = db[WORKFLOW_DATA_COLLECTION_NAME]\n        return str(col.insert_one({DataStoreDocumentSection.Meta: (payload if isinstance(payload, dict) else {}), DataStoreDocumentSection.Data: {}}).inserted_id)\n    except ConnectionFailure:\n        raise DataStoreNotConnected()", "docstring": "Adds a new document to the data store and returns its id.\n\nArgs:\npayload (dict): Dictionary of initial data that should be stored\nin the new document in the meta section.\n\nRaises:\nDataStoreNotConnected: If the data store is not connected to the server.\n\nReturns:\nstr: The id of the newly created document.", "source": "codesearchnet"}
{"code": "def NewPathSpec(cls, type_indicator, **kwargs):\n    if (type_indicator not in cls._path_spec_types):\n        raise KeyError('Path specification type: {0:s} not set.'.format(type_indicator))\n    if (('parent' in kwargs) and (kwargs['parent'] is None)):\n        del kwargs['parent']\n    path_spec_type = cls._path_spec_types[type_indicator]\n    return path_spec_type(**kwargs)", "docstring": "Creates a new path specification for the specific type indicator.\n\nArgs:\ntype_indicator (str): type indicator.\nkwargs (dict): keyword arguments depending on the path specification.\n\nReturns:\nPathSpec: path specification.\n\nRaises:\nKeyError: if path specification is not registered.", "source": "codesearchnet"}
{"code": "def get_typecast_value(self, value, type):\n    \n\n    if type == entities.Variable.Type.BOOLEAN:\n      return value == 'true'\n    elif type == entities.Variable.Type.INTEGER:\n      return int(value)\n    elif type == entities.Variable.Type.DOUBLE:\n      return float(value)\n    else:\n      return value", "docstring": "Helper method to determine actual value based on type of feature variable.\n\nArgs:\nvalue: Value in string form as it was parsed from datafile.\ntype: Type denoting the feature flag type.\n\nReturn:\nValue type-casted based on type of feature variable.", "source": "juraj-google-style"}
{"code": "def get_trace(self, trace_id, project_id=None):\n        \n        if project_id is None:\n            project_id = self.project\n\n        return self.trace_api.get_trace(project_id=project_id, trace_id=trace_id)", "docstring": "Gets a single trace by its ID.\n\nArgs:\ntrace_id (str): ID of the trace to return.\n\nproject_id (str): Required. ID of the Cloud project where the trace\ndata is stored.\n\nReturns:\nA Trace dict.", "source": "juraj-google-style"}
{"code": "def set_external_captures(self, captures):\n    self._captured_inputs = captures", "docstring": "Updates the function capture values.\n\nThe new values must have tensor types and shapes consistent with the\noriginal captures of the concrete function, but it is allowed to change a\nvalue captured with a deferred one and vice-versa.\n\nArgs:\ncaptures: A list of tensors or closures. Tensors are value captures, and\nclosures are call-time (deferred captures).", "source": "github-repos"}
{"code": "def __truediv__(self, other):\n    raise TypeError(\"unsupported operand type(s) for /: 'Dimension' and '{}', please use", "docstring": "Use `__floordiv__` via `x // y` instead.\n\nThis function exists only to have a better error message. Instead of:\n`TypeError: unsupported operand type(s) for /: 'Dimension' and 'int'`,\nthis function will explicitly call for usage of `//` instead.\n\nArgs:\nother: Another `Dimension`.\n\nRaises:\nTypeError.", "source": "github-repos"}
{"code": "def _collect_feature_info(self, candidate_feature_diffs):\n        \n        project_root = self.project.path\n        for diff in candidate_feature_diffs:\n            path = diff.b_path\n            modname = relpath_to_modname(path)\n            modpath = project_root.joinpath(path)\n            importer = partial(import_module_at_path, modname, modpath)\n            yield importer, modname, modpath", "docstring": "Collect feature info\n\nArgs:\ncandidate_feature_diffs (List[git.diff.Diff]): list of Diffs\ncorresponding to admissible file changes compared to\ncomparison ref\n\nReturns:\nList[Tuple]: list of tuple of importer, module name, and module\npath. The \"importer\" is a callable that returns a module", "source": "juraj-google-style"}
{"code": "def mark_flags_as_required(flag_names, flag_values=_flagvalues.FLAGS):\n  \n  for flag_name in flag_names:\n    mark_flag_as_required(flag_name, flag_values)", "docstring": "Ensures that flags are not None during program execution.\n\nRecommended usage:\n\nif __name__ == '__main__':\nflags.mark_flags_as_required(['flag1', 'flag2', 'flag3'])\napp.run()\n\nArgs:\nflag_names: Sequence[str], names of the flags.\nflag_values: flags.FlagValues, optional FlagValues instance where the flags\nare defined.\nRaises:\nAttributeError: If any of flag name has not already been defined as a flag.", "source": "juraj-google-style"}
{"code": "def __init__(self, state=None):\n    \n    self._state = state\n\n    self.job_config = map_job_config.JobConfig._to_map_job_config(\n        state.mapreduce_spec,\n        queue_name=state.mapreduce_spec.params.get(\"queue_name\"))", "docstring": "Init the job instance representing the job with id job_id.\n\nDo not directly call this method. Use class methods to construct\nnew instances.\n\nArgs:\nstate: model.MapreduceState.", "source": "juraj-google-style"}
{"code": "def measure_topology(script):\n    filter_xml = '  <xmlfilter name=\"Compute Topological Measures\"/>\\n'\n    util.write_filter(script, filter_xml)\n    if isinstance(script, mlx.FilterScript):\n        script.parse_topology = True\n    return None", "docstring": "Compute a set of topological measures over a mesh\n\nArgs:\nscript: the mlx.FilterScript object or script filename to write\nthe filter to.\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "codesearchnet"}
{"code": "def get_history_by_flight_number(self, flight_number, page=1, limit=100):\n    url = FLT_BASE.format(flight_number, str(self.AUTH_TOKEN), page, limit)\n    return self._fr24.get_data(url)", "docstring": "Fetch the history of a flight by its number.\n\nThis method can be used to get the history of a flight route by the number.\nIt checks the user authentication and returns the data accordingly.\n\nArgs:\nflight_number (str): The flight number, e.g. AI101\npage (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data\nlimit (int): Optional limit on number of records returned\n\nReturns:\nA list of dicts with the data; one dict for each row of data from flightradar24\n\nExample::\n\nfrom pyflightdata import FlightData\nf=FlightData()\n#optional login\nf.login(myemail,mypassword)\nf.get_history_by_flight_number('AI101')\nf.get_history_by_flight_number('AI101',page=1,limit=10)", "source": "codesearchnet"}
{"code": "def reload(self, napps=None):\n    client = NAppsClient(self._config)\n    client.reload_napps(napps)", "docstring": "Reload a NApp or all NApps.\n\nArgs:\nnapps (list): NApp list to be reloaded.\nRaises:\nrequests.HTTPError: When there's a server error.", "source": "codesearchnet"}
{"code": "def softplus(x):\n    return math_ops.softplus(x)", "docstring": "Softplus activation function, `softplus(x) = log(exp(x) + 1)`.\n\nExample Usage:\n\n>>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)\n>>> b = tf.keras.activations.softplus(a)\n>>> b.numpy()\narray([2.0611537e-09, 3.1326166e-01, 6.9314718e-01, 1.3132616e+00,\n2.0000000e+01], dtype=float32)\n\nArgs:\nx: Input tensor.\n\nReturns:\nThe softplus activation: `log(exp(x) + 1)`.", "source": "github-repos"}
{"code": "def extract(self, destination):\n    if os.path.exists(destination):\n        raise OSError(20, 'Destination exists', destination)\n    self.__extract_directory('.', self.files['files'], destination)", "docstring": "Extracts the contents of the archive to the specifed directory.\n\nArgs:\ndestination (str):\nPath to an empty directory to extract the files to.", "source": "codesearchnet"}
{"code": "def recall_at_precision(y_true, y_pred, precision):\n    \n    y_true, y_pred = _mask_value_nan(y_true, y_pred)\n    precision, recall, _ = skm.precision_recall_curve(y_true, y_pred)\n    return recall[np.searchsorted(precision - precision, 0)]", "docstring": "Recall at a certain precision threshold\n\nArgs:\ny_true: true labels\ny_pred: predicted labels\nprecision: resired precision level at which where to compute the recall", "source": "juraj-google-style"}
{"code": "def parse_pair_args(labels, argclass):\n  \n  label_data = set()\n  for arg in labels:\n    name, value = split_pair(arg, '=', nullable_idx=1)\n    label_data.add(argclass(name, value))\n  return label_data", "docstring": "Parse flags of key=value pairs and return a list of argclass.\n\nFor pair variables, we need to:\n* split the input into name=value pairs (value optional)\n* Create the EnvParam object\n\nArgs:\nlabels: list of 'key' or 'key=value' strings.\nargclass: Container class for args, must instantiate with argclass(k, v).\n\nReturns:\nlist of argclass objects.", "source": "juraj-google-style"}
{"code": "def end_day_to_datetime(end_day, config):\n    day_start_time = config['day_start']\n    day_end_time = get_day_end(config)\n    if (day_start_time == datetime.time(0, 0, 0)):\n        end = datetime.datetime.combine(end_day, day_end_time)\n    else:\n        end = (datetime.datetime.combine(end_day, day_end_time) + datetime.timedelta(days=1))\n    return end", "docstring": "Convert a given end day to its proper datetime.\n\nThis is non trivial because of variable ``day_start``. We want to make sure\nthat even if an 'end day' is specified the actual point in time may reach into the following\nday.\n\nArgs:\nend (datetime.date): Raw end date that is to be adjusted.\nconfig: Controller config containing information on when a workday starts.\n\nReturns:\ndatetime.datetime: The endday as a adjusted datetime object.\n\nExample:\nGiven a ``day_start`` of ``5:30`` and end date of ``2015-04-01`` we actually want to\nconsider even points in time up to ``2015-04-02 5:29``. That is to represent that a\n*work day*\ndoes not match *calendar days*.\n\nNote:\nAn alternative implementation for the similar problem in legacy hamster:\n``hamster.storage.db.Storage.__get_todays_facts``.", "source": "codesearchnet"}
{"code": "def remove_section(self, name):\n        \n        existed = self.has_section(name)\n        if existed:\n            idx = self._get_section_idx(name)\n            del self._structure[idx]\n        return existed", "docstring": "Remove a file section.\n\nArgs:\nname: name of the section\n\nReturns:\nbool: whether the section was actually removed", "source": "juraj-google-style"}
{"code": "def SetEnvironmentVariable(self, name, value):\n    if isinstance(value, py2to3.STRING_TYPES):\n        value = self._PathStripPrefix(value)\n    if (value is not None):\n        self._environment_variables[name.upper()] = value", "docstring": "Sets an environment variable in the Windows path helper.\n\nArgs:\nname (str): name of the environment variable without enclosing\n%-characters, e.g. SystemRoot as in %SystemRoot%.\nvalue (str): value of the environment variable.", "source": "codesearchnet"}
{"code": "def download_and_uncompress(self, fileobj, dst_path):\n    \n    try:\n      with tarfile.open(mode=\"r|*\", fileobj=fileobj) as tgz:\n        for tarinfo in tgz:\n          abs_target_path = _merge_relative_path(dst_path, tarinfo.name)\n\n          if tarinfo.isfile():\n            self._extract_file(tgz, tarinfo, abs_target_path)\n          elif tarinfo.isdir():\n            tf_v1.gfile.MakeDirs(abs_target_path)\n          else:\n            \n            raise ValueError(\n                \"Unexpected object type in tar archive: %s\" % tarinfo.type)\n\n        total_size_str = tf_utils.bytes_to_readable_str(\n            self._total_bytes_downloaded, True)\n        self._print_download_progress_msg(\n            \"Downloaded %s, Total size: %s\" % (self._url, total_size_str),\n            flush=True)\n    except tarfile.ReadError:\n      raise IOError(\"%s does not appear to be a valid module.\" % self._url)", "docstring": "Streams the content for the 'fileobj' and stores the result in dst_path.\n\nArgs:\nfileobj: File handle pointing to .tar/.tar.gz content.\ndst_path: Absolute path where to store uncompressed data from 'fileobj'.\n\nRaises:\nValueError: Unknown object encountered inside the TAR file.", "source": "juraj-google-style"}
{"code": "def request_and_check(self, url, method='get',\n                          expected_content_type=None, **kwargs):\n        \n        assert method in ['get', 'post']\n        result = self.driver.request(method, url, **kwargs)\n        if result.status_code != requests.codes.ok:\n            raise RuntimeError('Error requesting %r, status = %d' %\n                               (url, result.status_code))\n        if expected_content_type is not None:\n            content_type = result.headers.get('content-type', '')\n            if not re.match(expected_content_type, content_type):\n                raise RuntimeError(\n                    'Error requesting %r, content type %r does not match %r' %\n                    (url, content_type, expected_content_type))\n        return result", "docstring": "Performs a request, and checks that the status is OK, and that the\ncontent-type matches expectations.\n\nArgs:\nurl: URL to request\nmethod: either 'get' or 'post'\nexpected_content_type: prefix to match response content-type against\n**kwargs: passed to the request method directly.\n\nRaises:\nRuntimeError if status_code does not match.", "source": "juraj-google-style"}
{"code": "def parse_exception(line):\n    \n    m = RAISES_REGEX.match(line)\n    if m is None:\n        raise CartoucheSyntaxError('Cartouche: Invalid argument syntax \"{line}\" for Raises block'.format(line=line))\n    return m.group(2), m.group(1)", "docstring": "Parse the first line of a Cartouche exception description.\n\nArgs:\nline (str): A single line Cartouche exception description.\n\nReturns:\nA 2-tuple containing the exception type and the first line of the description.", "source": "juraj-google-style"}
{"code": "def from_json(cls, json):\n    mapreduce_spec = cls(json['name'], json['mapreduce_id'], json['mapper_spec'], json.get('params'), json.get('hooks_class_name'))\n    return mapreduce_spec", "docstring": "Create new MapreduceSpec from the json, encoded by to_json.\n\nArgs:\njson: json representation of MapreduceSpec.\n\nReturns:\nan instance of MapreduceSpec with all data deserialized from json.", "source": "codesearchnet"}
{"code": "def _init_volume_service(self, version):\n        \n        volume_cfg = self._load_config_section(CONFIG_VOLUME_SECTION)\n        self._token_volume = volume_cfg[CONFIG_TOKEN]\n        proto = volume_cfg[CONFIG_PROTOCOL]\n        host = volume_cfg[CONFIG_HOST]\n\n        self._volume = VolumeService(host, version)\n        self._volume.base_protocol = proto\n        self._volume.set_auth(self._token_volume)", "docstring": "Method to initialize the Volume Service from the config data\n\nArgs:\nversion (string): Version of Boss API to use.\n\nReturns:\nNone\n\nRaises:\n(KeyError): if given invalid version.", "source": "juraj-google-style"}
{"code": "def __call__(self, shape, dtype=None, **kwargs):\n    _validate_kwargs(self.__class__.__name__, kwargs)\n    dtype = _get_dtype(dtype)\n    if not dtype.is_floating and (not dtype.is_integer):\n        raise ValueError('Expected float or integer dtype, got %s.' % dtype)\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    return self._random_generator.random_uniform(shape, self.minval, self.maxval, dtype)", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor. Only floating point and integer\ntypes are supported. If not specified,\n`tf.keras.backend.floatx()` is used,\nwhich default to `float32` unless you configured it otherwise\n(via `tf.keras.backend.set_floatx(float_dtype)`).\n**kwargs: Additional keyword arguments.", "source": "github-repos"}
{"code": "def _CreateDictReader(self, line_reader):\n    delimiter = self.DELIMITER\n    quotechar = self.QUOTE_CHAR\n    magic_test_string = self._MAGIC_TEST_STRING\n    if py2to3.PY_3:\n        delimiter = delimiter.decode(self._encoding)\n        quotechar = quotechar.decode(self._encoding)\n        magic_test_string = magic_test_string.decode(self._encoding)\n    return csv.DictReader(line_reader, delimiter=delimiter, fieldnames=self.COLUMNS, quotechar=quotechar, restkey=magic_test_string, restval=magic_test_string)", "docstring": "Returns a reader that processes each row and yields dictionaries.\n\ncsv.DictReader does this job well for single-character delimiters; parsers\nthat need multi-character delimiters need to override this method.\n\nArgs:\nline_reader (iter): yields lines from a file-like object.\n\nReturns:\niter: a reader of dictionaries, as returned by csv.DictReader().", "source": "codesearchnet"}
{"code": "def run_numerical_analysis(table, schema_list, args):\n  \n  import google.datalab.bigquery as bq\n\n  \n  numerical_columns = []\n  for col_schema in schema_list:\n    col_type = col_schema['type'].lower()\n    if col_type == 'integer' or col_type == 'float':\n      numerical_columns.append(col_schema['name'])\n\n  \n  if numerical_columns:\n    sys.stdout.write('Running numerical analysis...')\n    max_min = [\n        ('max({name}) as max_{name}, '\n         'min({name}) as min_{name}, '\n         'avg({name}) as avg_{name} ').format(name=name)\n        for name in numerical_columns]\n    if args.bigquery_table:\n      sql = 'SELECT  %s from `%s`' % (', '.join(max_min), parse_table_name(args.bigquery_table))\n      numerical_results = bq.Query(sql).execute().result().to_dataframe()\n    else:\n      sql = 'SELECT  %s from csv_table' % ', '.join(max_min)\n      query = bq.Query(sql, data_sources={'csv_table': table})\n      numerical_results = query.execute().result().to_dataframe()\n\n    \n    results_dict = {}\n    for name in numerical_columns:\n      results_dict[name] = {'max': numerical_results.iloc[0]['max_%s' % name],\n                            'min': numerical_results.iloc[0]['min_%s' % name],\n                            'mean': numerical_results.iloc[0]['avg_%s' % name]}\n\n    file_io.write_string_to_file(\n        os.path.join(args.output_dir, NUMERICAL_ANALYSIS_FILE),\n        json.dumps(results_dict, indent=2, separators=(',', ': ')))\n\n    sys.stdout.write('done.\\n')", "docstring": "Find min/max values for the numerical columns and writes a json file.\n\nArgs:\ntable: Reference to FederatedTable (if bigquery_table is false) or a\nregular Table (otherwise)\nschema_list: Bigquery schema json object\nargs: the command line args", "source": "juraj-google-style"}
{"code": "def save_hdf5(X, y, path):\n    with h5py.File(path, 'w') as f:\n        is_sparse = (1 if sparse.issparse(X) else 0)\n        f['issparse'] = is_sparse\n        f['target'] = y\n        if is_sparse:\n            if (not sparse.isspmatrix_csr(X)):\n                X = X.tocsr()\n            f['shape'] = np.array(X.shape)\n            f['data'] = X.data\n            f['indices'] = X.indices\n            f['indptr'] = X.indptr\n        else:\n            f['data'] = X", "docstring": "Save data as a HDF5 file.\n\nArgs:\nX (numpy or scipy sparse matrix): Data matrix\ny (numpy array): Target vector.\npath (str): Path to the HDF5 file to save data.", "source": "codesearchnet"}
{"code": "def create_from(cls, has_display_data):\n    if not isinstance(has_display_data, HasDisplayData):\n        raise ValueError('Element of class {}.{} does not subclass HasDisplayData'.format(has_display_data.__module__, has_display_data.__class__.__name__))\n    return cls(has_display_data._get_display_data_namespace(), has_display_data.display_data())", "docstring": "Creates :class:`~apache_beam.transforms.display.DisplayData` from a\n:class:`HasDisplayData` instance.\n\nReturns:\n~apache_beam.transforms.display.DisplayData:\nA :class:`~apache_beam.transforms.display.DisplayData` instance with\npopulated items.\n\nRaises:\nValueError: If the **has_display_data** argument is\nnot an instance of :class:`HasDisplayData`.", "source": "github-repos"}
{"code": "def require_config(config_model):\n    \n    def _decorator(func):\n        \n        @wraps(func)\n        def _inner(*args, **kwargs):\n            \n            if not config_model.current().enabled:\n                return HttpResponseNotFound()\n            return func(*args, **kwargs)\n        return _inner\n    return _decorator", "docstring": "View decorator that enables/disables a view based on configuration.\n\nArguments:\nconfig_model (ConfigurationModel subclass): The class of the configuration\nmodel to check.\n\nReturns:\nHttpResponse: 404 if the configuration model is disabled,\notherwise returns the response from the decorated view.", "source": "juraj-google-style"}
{"code": "def filter_curated_references(root, head, update):\n    if (('references' not in head) or ('references' not in update)):\n        return (root, head, update)\n    references_curated = are_references_curated(root.get('references', []), head.get('references', []))\n    if ('references' in root):\n        root = root.remove('references')\n    if references_curated:\n        update = update.remove('references')\n    else:\n        head = head.remove('references')\n    return (root, head, update)", "docstring": "Remove references from either ``head`` or ``update`` depending on curation.\n\nIf references have been curated, then it removes all references from the\nupdate to keep the existing ones. Otherwise, it removes all references from\nthe head to force replacement with the update ones.\n\nArgs:\nroot (pmap): the root record.\nhead (pmap): the head record.\nupdate (pmap): the update record.\n\nReturns:\ntuple: ``(root, head, update)`` with ``references`` removed from ``root`` and either\n``head`` or ``update``.", "source": "codesearchnet"}
{"code": "def tokenize(self, text: str, customize=True, disable=[]) -> List[Token]:\n        \n\n        \n        if not self.keep_multi_space:\n            text = re.sub(' +', ' ', text)\n        \n        tokens = self.nlp(text, disable=disable)\n        if customize:\n            tokens = [self.custom_token(a_token) for a_token in tokens]\n\n        return tokens", "docstring": "Tokenize the given text, returning a list of tokens. Type token: class spacy.tokens.Token\n\nArgs:\ntext (string):\n\nReturns: [tokens]", "source": "juraj-google-style"}
{"code": "def from_dict(cls, fields, mapping):\n        \n        iterable = [None] * len(fields)\n        for key, value in mapping.items():\n            try:\n                index = fields.index(key)\n            except KeyError:\n                raise ItsdbError('Invalid field name(s): ' + key)\n            iterable[index] = value\n        return cls(fields, iterable)", "docstring": "Create a Record from a dictionary of field mappings.\n\nThe *fields* object is used to determine the column indices\nof fields in the mapping.\n\nArgs:\nfields: the Relation schema for the table of this record\nmapping: a dictionary or other mapping from field names to\ncolumn values\nReturns:\na :class:`Record` object", "source": "juraj-google-style"}
{"code": "def _step(time, output_ta_t, *states):\n    current_input = tuple((ta.read(time) for ta in input_ta))\n    current_input = tree.pack_sequence_as(inputs, current_input)\n    output, new_states = step_function(current_input, tuple(states) + tuple(constants))\n    flat_new_state = tree.flatten(new_states)\n    flat_output = tree.flatten(output)\n    ta_index_to_write = time if return_all_outputs else 0\n    output_ta_t = tuple((ta.write(ta_index_to_write, out) for ta, out in zip(output_ta_t, flat_output)))\n    new_states = tree.pack_sequence_as(initial_states, flat_new_state)\n    return (time + 1, output_ta_t) + tuple(new_states)", "docstring": "RNN step function.\n\nArgs:\ntime: Current timestep value.\noutput_ta_t: TensorArray.\n*states: List of states.\n\nReturns:\nTuple: `(time + 1,output_ta_t) + tuple(new_states)`", "source": "github-repos"}
{"code": "def SetEventTag(self, event_tag):\n    event_identifier = event_tag.GetEventIdentifier()\n    lookup_key = event_identifier.CopyToString()\n    self._index[lookup_key] = event_tag.GetIdentifier()", "docstring": "Sets an event tag in the index.\n\nArgs:\nevent_tag (EventTag): event tag.", "source": "codesearchnet"}
{"code": "def GetSoapXMLForComplexType(self, type_name, value):\n    \n    schema = self.suds_client.wsdl.schema\n    definition_type = schema.elements[(type_name, self._namespace_override)]\n    marshaller = suds.mx.literal.Literal(schema)\n    content = suds.mx.Content(\n        tag=type_name, value=value,\n        name=type_name, type=definition_type)\n    data = marshaller.process(content)\n    return data", "docstring": "Return an XML string representing a SOAP complex type.\n\nArgs:\ntype_name: The name of the type with namespace prefix if necessary.\nvalue: A python dictionary to hydrate the type instance with.\n\nReturns:\nA string containing the SOAP XML for the type.", "source": "juraj-google-style"}
{"code": "def flag_all(self, thresh_dict=None, include=None, exclude=None):\n        \n        if thresh_dict is None:\n            thresh_dict = {}\n        row_idx = set()\n        col_idx = set()\n        include = self.results if include is None else include\n        include = list(\n            set(include) - set(exclude)) if exclude is not None else include\n        for diagnostic in include:\n            if diagnostic in thresh_dict:\n                flagged = self.flag(diagnostic, thresh_dict[diagnostic])\n            else:\n                flagged = self.flag(diagnostic)\n\n            if diagnostic == 'RowMahalanobisDistances':\n                row_idx = row_idx.union(flagged)\n            else:\n                col_idx = col_idx.union(flagged)\n\n        return sorted(list(row_idx)), sorted(list(col_idx))", "docstring": "Returns indices of (rows, columns) that satisfy flag() on any\ndiagnostic. Uses user-provided thresholds in thresh_dict/\n\nArgs:\nthresh_dict (dict): dictionary of diagnostic->threshold functions\ninclude (list): optional sublist of diagnostics to flag\nexclude (list): optional sublist of diagnostics to not flag", "source": "juraj-google-style"}
{"code": "def write_transcriptions(utterances: List[Utterance], tgt_dir: Path, ext: str, lazy: bool) -> None:\n    tgt_dir.mkdir(parents=True, exist_ok=True)\n    for utter in utterances:\n        out_path = (tgt_dir / '{}.{}'.format(utter.prefix, ext))\n        if (lazy and out_path.is_file()):\n            continue\n        with out_path.open('w') as f:\n            print(utter.text, file=f)", "docstring": "Write the utterance transcriptions to files in the tgt_dir. Is lazy and\nchecks if the file already exists.\n\nArgs:\nutterances: A list of Utterance objects to be written.\ntgt_dir: The directory in which to write the text of the utterances,\none file per utterance.\next: The file extension for the utterances. Typically something like\n\"phonemes\", or \"phonemes_and_tones\".", "source": "codesearchnet"}
{"code": "def get_tracks_for_album(self, artist, album, full_album_art_uri=False):\n        \n        subcategories = [artist, album]\n        result = self.get_album_artists(\n            full_album_art_uri=full_album_art_uri,\n            subcategories=subcategories,\n            complete_result=True)\n        result._metadata['search_type'] = 'tracks_for_album'\n        return result", "docstring": "Get the tracks of an artist's album.\n\nArgs:\nartist (str): an artist's name.\nalbum (str): an album name.\nfull_album_art_uri: whether the album art URI should be\nabsolute (i.e. including the IP address). Default `False`.\n\nReturns:\nA `SearchResult` instance.", "source": "juraj-google-style"}
{"code": "def on_delete(self, req, resp, handler=None, **kwargs):\n    self.handle((handler or self.delete), req, resp, **kwargs)\n    resp.status = falcon.HTTP_ACCEPTED", "docstring": "Respond on DELETE HTTP request assuming resource deletion flow.\n\nThis request handler assumes that DELETE requests are associated with\nresource deletion. Thus default flow for such requests is:\n\n* Delete existing resource instance.\n* Set response status code to ``202 Accepted``.\n\nArgs:\nreq (falcon.Request): request object instance.\nresp (falcon.Response): response object instance to be modified\nhandler (method): deletion method handler to be called. Defaults\nto ``self.delete``.\n**kwargs: additional keyword arguments retrieved from url template.", "source": "codesearchnet"}
{"code": "def _project_TH3(self, hist: Hist) -> Any:\n    if ((len(self.projection_axes) < 1) or (len(self.projection_axes) > 2)):\n        raise ValueError(len(self.projection_axes), 'Invalid number of axes')\n    projection_axis_name = ''\n    for axis in self.projection_axes:\n        proj_axis_name = axis.axis_type.name[:1]\n        if (proj_axis_name not in ['x', 'y', 'z']):\n            raise ValueError(f\"Projection axis name {proj_axis_name} is not 'x', 'y', or 'z'. Please check your configuration.\")\n        projection_axis_name += proj_axis_name\n    if (len(self.projection_axes) == 2):\n        projection_axis_name = projection_axis_name[::(- 1)]\n    logger.info(f'Projecting onto axes \"{projection_axis_name}\" from hist {hist.GetName()}')\n    projected_hist = hist.Project3D(projection_axis_name)\n    return projected_hist", "docstring": "Perform the actual TH3 -> TH1 projection.\n\nThis projection could be to 1D or 2D.\n\nArgs:\nhist (ROOT.TH3): Histogram from which the projections should be performed.\nReturns:\nROOT.TH1: The projected histogram.", "source": "codesearchnet"}
{"code": "def load_stopwords(self, path):\n\n        \n\n        if path:\n            with open(path) as f:\n                self.stopwords = set(f.read().splitlines())\n\n        else:\n            self.stopwords = set(\n                pkgutil\n                .get_data('textplot', 'data/stopwords.txt')\n                .decode('utf8')\n                .splitlines()\n            )", "docstring": "Load a set of stopwords.\n\nArgs:\npath (str): The stopwords file path.", "source": "juraj-google-style"}
{"code": "def _collapse_state(args: Dict[(str, Any)]):\n    index = args['index']\n    result = args['result']\n    prob_one = args['prob_one']\n    state = _state_shard(args)\n    normalization = np.sqrt((prob_one if result else (1 - prob_one)))\n    state *= ((_one_projector(args, index) * result) + ((1 - _one_projector(args, index)) * (1 - result)))\n    state /= normalization", "docstring": "Projects state shards onto the appropriate post measurement state.\n\nThis function makes no assumptions about the interpretation of quantum\ntheory.\n\nArgs:\nargs: The args from shard_num_args.", "source": "codesearchnet"}
{"code": "def org(self, notification_type, priority='Low'):\n        \n        self._notification_type = notification_type\n        self._recipients = None\n        self._priority = priority\n        self._is_organization = True", "docstring": "Set vars for the passed in data. Used for org notification.\n\n.. code-block:: javascript\n\n{\n\"notificationType\": notification_type,\n\"priority\": priority\n\"isOrganization\": true\n}\n\nArgs:\nnotification_type (str): The notification type.\npriority (str): The priority: Low, Medium, High.", "source": "juraj-google-style"}
{"code": "class DynamicBackend:\n\n    def __init__(self, backend=None):\n        self._backend = backend or backend_module.backend()\n\n    def set_backend(self, backend):\n        if backend not in ('tensorflow', 'jax', 'torch', 'numpy', 'openvino'):\n            raise ValueError(f\"Available backends are ('tensorflow', 'jax', 'torch', 'numpy' and 'openvino'). Received: backend={backend}\")\n        self._backend = backend\n\n    def reset(self):\n        self._backend = backend_module.backend()\n\n    @property\n    def name(self):\n        return self._backend\n\n    def __getattr__(self, name):\n        if self._backend == 'tensorflow':\n            module = importlib.import_module('keras.src.backend.tensorflow')\n            return getattr(module, name)\n        if self._backend == 'jax':\n            module = importlib.import_module('keras.src.backend.jax')\n            return getattr(module, name)\n        if self._backend == 'torch':\n            module = importlib.import_module('keras.src.backend.torch')\n            return getattr(module, name)\n        if self._backend == 'numpy':\n            if backend_module.backend() == 'numpy':\n                return getattr(backend_module, name)\n            else:\n                raise NotImplementedError('Currently, we cannot dynamically import the numpy backend because it would disrupt the namespace of the import.')\n        if self._backend == 'openvino':\n            module = importlib.import_module('keras.src.backend.openvino')\n            return getattr(module, name)", "docstring": "A class that can be used to switch from one backend to another.\n\nExample:\n\n```python\nbackend = DynamicBackend(\"tensorflow\")\ny = backend.square(tf.constant(...))\nbackend.set_backend(\"jax\")\ny = backend.square(jax.numpy.array(...))\n```\n\nArgs:\nbackend: Initial backend to use (string).", "source": "github-repos"}
{"code": "def delete_session_tensor(handle, name=None):\n    handle_device = TensorHandle._get_device_name(handle)\n    with ops.device(handle_device):\n        holder = array_ops.placeholder(dtypes.string)\n        deleter = gen_data_flow_ops.delete_session_tensor(holder, name=name)\n    return (holder, deleter)", "docstring": "Delete the tensor for the given tensor handle.\n\nThis is EXPERIMENTAL and subject to change.\n\nDelete the tensor of a given tensor handle. The tensor is produced\nin a previous run() and stored in the state of the session.\n\nArgs:\nhandle: The string representation of a persistent tensor handle.\nname: Optional name prefix for the return tensor.\n\nReturns:\nA pair of graph elements. The first is a placeholder for feeding a\ntensor handle and the second is a deletion operation.", "source": "github-repos"}
{"code": "def load_virt_stream(virt_fd):\n    \n    try:\n        virt_conf = json.load(virt_fd)\n    except ValueError:\n        virt_fd.seek(0)\n        virt_conf = yaml.load(virt_fd)\n\n    return deepcopy(virt_conf)", "docstring": "Loads the given conf stream into a dict, trying different formats if\nneeded\n\nArgs:\nvirt_fd (str): file like objcect with the virt config to load\n\nReturns:\ndict: Loaded virt config", "source": "juraj-google-style"}
{"code": "def declarations(cls, extra_defs=None):\n        \n        warnings.warn(\n            \"Factory.declarations is deprecated; use Factory._meta.pre_declarations instead.\",\n            DeprecationWarning,\n            stacklevel=2,\n        )\n        decls = cls._meta.pre_declarations.as_dict()\n        decls.update(extra_defs or {})\n        return decls", "docstring": "Retrieve a copy of the declared attributes.\n\nArgs:\nextra_defs (dict): additional definitions to insert into the\nretrieved DeclarationDict.", "source": "juraj-google-style"}
{"code": "def post(self, path, body, headers=None):\n        \n\n        response = requests.post(\n            self._url_for(path),\n            data=json.dumps(body),\n            headers=self._headers(headers)\n        )\n        self._handle_errors(response)\n        return response", "docstring": "Perform a POST request, providing a body, which will be JSON-encoded.\n\nArgs:\npath (str): A path that gets appended to ``base_url``.\nbody (dict): Dictionary that will be JSON-encoded and sent as the body.\n\nExample:\napi_client.post('/users', body={'name': 'Billy Jean'})\n\nReturns:\nA requests ``Response`` object.", "source": "juraj-google-style"}
{"code": "def merge(self, status: 'Status[Input, Output]') -> 'Status[Input, Output]':\n        \n        if status is None or status.farthest is None:\n            \n            pass\n        elif self.farthest is None:\n            \n            self.farthest = status.farthest\n            self.expected = status.expected\n        elif status.farthest.position < self.farthest.position:\n            \n            pass\n        elif status.farthest.position > self.farthest.position:\n            \n            self.farthest = status.farthest\n            self.expected = status.expected\n        else:\n            \n            self.expected = status.expected + self.expected\n\n        return self", "docstring": "Merge the failure message from another status into this one.\n\nWhichever status represents parsing that has gone the farthest is\nretained. If both statuses have gone the same distance, then the\nexpected values from both are retained.\n\nArgs:\nstatus: The status to merge into this one.\n\nReturns:\nThis ``Status`` which may have ``farthest`` and ``expected``\nupdated accordingly.", "source": "juraj-google-style"}
{"code": "def _ParseBinaryDataAsString(self, parser_mediator, binary_data_value):\n    \n    if not binary_data_value:\n      return None\n\n    try:\n      return binary_data_value.decode('utf-8')\n    except UnicodeDecodeError:\n      parser_mediator.ProduceExtractionWarning(\n          'invalid binary data string value: {0:s}'.format(\n              repr(binary_data_value)))\n      return None", "docstring": "Parses a binary data value as string\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nbinary_data_value (bytes): binary data value\n(CSSM_DB_ATTRIBUTE_FORMAT_BLOB)\n\nReturns:\nstr: binary data value formatted as a string or None if no string could\nbe extracted or binary data value is None (NULL).", "source": "juraj-google-style"}
{"code": "def __init__(self, options, log):\n        \n        self.options = options\n        self.log = log\n        self.compile_pattern()", "docstring": "Initializer.  Subclass may override.\n\nArgs:\noptions: an dict containing the options passed to RefactoringTool\nthat could be used to customize the fixer through the command line.\nlog: a list to append warnings and other messages to.", "source": "juraj-google-style"}
{"code": "def from_label(cls, label):\n        r\n        z = np.zeros(len(label), dtype=np.bool)\n        x = np.zeros(len(label), dtype=np.bool)\n        for i, char in enumerate(label):\n            if char == 'X':\n                x[-i - 1] = True\n            elif char == 'Z':\n                z[-i - 1] = True\n            elif char == 'Y':\n                z[-i - 1] = True\n                x[-i - 1] = True\n            elif char != 'I':\n                raise QiskitError(\"Pauli string must be only consisted of 'I', 'X', \"\n                                  \"'Y' or 'Z' but you have {}.\".format(char))\n        return cls(z=z, x=x)", "docstring": "r\"\"\"Take pauli string to construct pauli.\n\nThe qubit index of pauli label is q_{n-1} ... q_0.\nE.g., a pauli is $P_{n-1} \\otimes ... \\otimes P_0$\n\nArgs:\nlabel (str): pauli label\n\nReturns:\nPauli: the constructed pauli\n\nRaises:\nQiskitError: invalid character in the label", "source": "juraj-google-style"}
{"code": "def size(self, name=None):\n    with ops.name_scope(name, '%s_Size' % self.name, [self.resource_handle]):\n        return gen_lookup_ops.lookup_table_size_v2(self.resource_handle)", "docstring": "Compute the number of elements in this table.\n\nArgs:\nname: A name for the operation (optional).\n\nReturns:\nA scalar tensor containing the number of elements in this table.", "source": "github-repos"}
{"code": "def plot_normal_cdf(rbound=None, lbound=None, mean=0, sd=1):\n    \n    shade = rbound is not None or lbound is not None\n    shade_left = rbound is not None and lbound is not None\n    inf = 3.5 * sd\n    step = 0.1\n    rlabel = rbound\n    llabel = lbound\n    if rbound is None:\n        rbound = inf + mean\n        rlabel = \"$\\infty$\"\n    if lbound is None:\n        lbound = -inf + mean\n        llabel = \"-$\\infty$\"\n    pdf_range = np.arange(-inf + mean, inf + mean, step)\n    plt.plot(pdf_range, stats.norm.pdf(pdf_range, loc=mean, scale=sd), color='k', lw=1)\n    cdf_range = np.arange(lbound, rbound + step, step)\n    if shade:\n        plt.fill_between(cdf_range, stats.norm.pdf(cdf_range, loc=mean, scale=sd), color='gold')\n    if shade_left:\n        cdf_range = np.arange(-inf+mean, lbound + step, step)\n        plt.fill_between(cdf_range, stats.norm.pdf(cdf_range, loc=mean, scale=sd), color='darkblue')\n    plt.ylim(0, stats.norm.pdf(0, loc=0, scale=sd) * 1.25)\n    plt.xlabel('z')\n    plt.ylabel('$\\phi$(z)', rotation=90)\n    plt.title(\"Normal Curve ~ ($\\mu$ = {0}, $\\sigma$ = {1}) \"\n              \"{2} < z < {3}\".format(mean, sd, llabel, rlabel), fontsize=16)\n    plt.show()", "docstring": "Plots a normal curve with specified parameters and area below curve shaded\nbetween ``lbound`` and ``rbound``.\n\nArgs:\n``rbound`` (numeric): right boundary of shaded region\n\n``lbound`` (numeric): left boundary of shaded region; by default is negative infinity\n\n``mean`` (numeric): mean/expectation of normal distribution\n\n``sd`` (numeric): standard deviation of normal distribution", "source": "juraj-google-style"}
{"code": "def __init__(self, open_id_valid, jwks_uri):\n        \n        self._open_id_valid = open_id_valid\n        self._jwks_uri = jwks_uri", "docstring": "Create an instance of IsserUriConfig.\n\nArgs:\nopen_id_valid: indicates whether the corresponding issuer is valid for\nOpenId discovery.\njwks_uri: is the saved jwks_uri. Its value can be None if the OpenId\ndiscovery process has not begun or has already failed.", "source": "juraj-google-style"}
{"code": "def update(self, *names: str) -> 'ListTree':\n    for name in names:\n        parts = name.split(self._delimiter)\n        self._root.add(*parts)\n    return self", "docstring": "Add all the mailbox names to the tree, filling in any missing nodes.\n\nArgs:\nnames: The names of the mailboxes.", "source": "codesearchnet"}
{"code": "def convert_convtranspose(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting transposed convolution ...')\n    if (names == 'short'):\n        tf_name = ('C' + random_string(7))\n    elif (names == 'keep'):\n        tf_name = w_name\n    else:\n        tf_name = (w_name + str(random.random()))\n    bias_name = '{0}.bias'.format(w_name)\n    weights_name = '{0}.weight'.format(w_name)\n    if (len(weights[weights_name].numpy().shape) == 4):\n        W = weights[weights_name].numpy().transpose(2, 3, 1, 0)\n        (height, width, n_filters, channels) = W.shape\n        n_groups = params['group']\n        if (n_groups > 1):\n            raise AssertionError('Cannot convert conv1d with groups != 1')\n        if (params['dilations'][0] > 1):\n            raise AssertionError('Cannot convert conv1d with dilation_rate != 1')\n        if (bias_name in weights):\n            biases = weights[bias_name].numpy()\n            has_bias = True\n        else:\n            biases = None\n            has_bias = False\n        input_name = inputs[0]\n        if has_bias:\n            weights = [W, biases]\n        else:\n            weights = [W]\n        conv = keras.layers.Conv2DTranspose(filters=n_filters, kernel_size=(height, width), strides=(params['strides'][0], params['strides'][1]), padding='valid', output_padding=0, weights=weights, use_bias=has_bias, activation=None, dilation_rate=params['dilations'][0], bias_initializer='zeros', kernel_initializer='zeros', name=tf_name)\n        layers[scope_name] = conv(layers[input_name])\n        layers[scope_name].set_shape(layers[scope_name]._keras_shape)\n        pads = params['pads']\n        if (pads[0] > 0):\n            assert ((len(pads) == 2) or ((pads[2] == pads[0]) and (pads[3] == pads[1])))\n            crop = keras.layers.Cropping2D(pads[:2], name=(tf_name + '_crop'))\n            layers[scope_name] = crop(layers[scope_name])\n    else:\n        raise AssertionError('Layer is not supported for now')", "docstring": "Convert transposed convolution layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def _initialize_slots(self, seed, hashvalues):\n    self.seed = seed\n    self.hashvalues = self._parse_hashvalues(hashvalues)", "docstring": "Initialize the slots of the LeanMinHash.\n\nArgs:\nseed (int): The random seed controls the set of random\npermutation functions generated for this LeanMinHash.\nhashvalues: The hash values is the internal state of the LeanMinHash.", "source": "codesearchnet"}
{"code": "def __init__(self, scope, parent, result):\n        \n        CodeExpression.__init__(self, scope, parent, '(default)', result)", "docstring": "Constructor for default arguments.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.\nresult (str): The return type of the argument in the program.", "source": "juraj-google-style"}
{"code": "def classes_in_module(module) -> List:\n    md = module.__dict__\n    return [md[c] for c in md if (isinstance(md[c], type) and issubclass(md[c], ETKModule) and (md[c].__module__ == module.__name__))]", "docstring": "Return all classes with super class ExtractionModule\n\nArgs:\nmodule:\n\nReturns: List of classes", "source": "codesearchnet"}
{"code": "def read(self, n, echo=None):\n        \n\n        d = self.channel.read(n)\n        if echo or (echo is None and self.echo):\n            sys.stdout.write(d.decode('latin1'))\n            sys.stdout.flush()\n        return d", "docstring": "Read *n* bytes from the channel.\n\nArgs:\nn(int): The number of bytes to read from the channel.\necho(bool): Whether to write the read data to stdout.\n\nReturns:\nbytes: *n* bytes of data.\n\nRaises:\nEOFError: If the channel was closed.", "source": "juraj-google-style"}
{"code": "def _get_executor_init(self, workers):\n\n    def pool_fn(seqs):\n        pool = get_pool_class(True)(workers, initializer=init_pool_generator, initargs=(seqs, self.random_seed, get_worker_id_queue()))\n        _DATA_POOLS.add(pool)\n        return pool\n    return pool_fn", "docstring": "Gets the Pool initializer for multiprocessing.\n\nArgs:\nworkers: Number of works.\n\nReturns:\nA Function to initialize the pool", "source": "github-repos"}
{"code": "def quad_genz_keister_24 ( order ):\n    \n    order = sorted(GENZ_KEISTER_24.keys())[order]\n\n    abscissas, weights = GENZ_KEISTER_24[order]\n    abscissas = numpy.array(abscissas)\n    weights = numpy.array(weights)\n\n    weights /= numpy.sum(weights)\n    abscissas *= numpy.sqrt(2)\n\n    return abscissas, weights", "docstring": "Hermite Genz-Keister 24 rule.\n\nArgs:\norder (int):\nThe quadrature order. Must be in the interval (0, 8).\n\nReturns:\n(:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]):\nAbscissas and weights\n\nExamples:\n>>> abscissas, weights = quad_genz_keister_24(1)\n>>> print(numpy.around(abscissas, 4))\n[-1.7321  0.      1.7321]\n>>> print(numpy.around(weights, 4))\n[0.1667 0.6667 0.1667]", "source": "juraj-google-style"}
{"code": "def profile_args(_args):\n        \n        \n        if (\n            _args.get('app', {}).get('optional') is not None\n            or _args.get('app', {}).get('required') is not None\n        ):\n            \n            app_args_optional = _args.get('app', {}).get('optional', {})\n            app_args_required = _args.get('app', {}).get('required', {})\n            default_args = _args.get('default', {})\n            _args = {}\n            _args.update(app_args_optional)\n            _args.update(app_args_required)\n            _args.update(default_args)\n        elif _args.get('app') is not None and _args.get('default') is not None:\n            \n            app_args = _args.get('app', {})\n            default_args = _args.get('default', {})\n            _args = {}\n            _args.update(app_args)\n            _args.update(default_args)\n\n        return _args", "docstring": "Return args for v1, v2, or v3 structure.\n\nArgs:\n_args (dict): The args section from the profile.\n\nReturns:\ndict: A collapsed version of the args dict.", "source": "juraj-google-style"}
{"code": "def mkzip(archive, items, mode='w', save_full_paths=False):\n    close = False\n    try:\n        if (not isinstance(archive, zipfile.ZipFile)):\n            archive = zipfile.ZipFile(archive, mode, allowZip64=True)\n            close = True\n        logger.info('mkdzip: Creating %s, from: %s', archive.filename, items)\n        if isinstance(items, str):\n            items = [items]\n        for item in items:\n            item = os.path.abspath(item)\n            basename = os.path.basename(item)\n            if os.path.isdir(item):\n                for (root, directoires, filenames) in os.walk(item):\n                    for filename in filenames:\n                        path = os.path.join(root, filename)\n                        if save_full_paths:\n                            archive_path = path.encode('utf-8')\n                        else:\n                            archive_path = os.path.join(basename, path.replace(item, '').strip('\\\\/')).encode('utf-8')\n                        archive.write(path, archive_path)\n            elif os.path.isfile(item):\n                if save_full_paths:\n                    archive_name = item.encode('utf-8')\n                else:\n                    archive_name = basename.encode('utf-8')\n                archive.write(item, archive_name)\n        return True\n    except Exception as e:\n        logger.error(('Error occurred during mkzip: %s' % e))\n        return False\n    finally:\n        if close:\n            archive.close()", "docstring": "Recursively zip a directory.\n\nArgs:\narchive (zipfile.ZipFile or str): ZipFile object add to or path to the\noutput zip archive.\nitems (str or list of str): Single item or list of items (files and\ndirectories) to be added to zipfile.\nmode (str): w for create new and write a for append to.\nsave_full_paths (bool): Preserve full paths.", "source": "codesearchnet"}
{"code": "def increase_route_count(self, crawled_request):\n        \n\n        for route in self.__routing_options.routes:\n            if re.compile(route).match(crawled_request.url):\n                count_key = str(route) + crawled_request.method\n                \n                if count_key in self.__routing_count.keys():\n                    self.__routing_count[count_key] += 1\n                else:\n                    self.__routing_count[count_key] = 1\n\n                break", "docstring": "Increase the count that determines how many times a URL of a certain route has been crawled.\n\nArgs:\ncrawled_request (:class:`nyawc.http.Request`): The request that possibly matches a route.", "source": "juraj-google-style"}
{"code": "def _get_entities(self, text, language=''):\n    \n    body = {\n        'document': {\n            'type': 'PLAIN_TEXT',\n            'content': text,\n        },\n        'encodingType': 'UTF32',\n    }\n    if language:\n      body['document']['language'] = language\n\n    request = self.service.documents().analyzeEntities(body=body)\n    response = request.execute()\n    result = []\n    for entity in response.get('entities', []):\n      mentions = entity.get('mentions', [])\n      if not mentions:\n        continue\n      entity_text = mentions[0]['text']\n      offset = entity_text['beginOffset']\n      for word in entity_text['content'].split():\n        result.append({'content': word, 'beginOffset': offset})\n        offset += len(word)\n    return result", "docstring": "Returns the list of entities retrieved from the given text.\n\nArgs:\ntext (str): Input text.\nlanguage (:obj:`str`, optional): Language code.\n\nReturns:\nList of entities.", "source": "juraj-google-style"}
{"code": "def design_stat_heating(self, value='Heating'):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `design_stat_heating`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `design_stat_heating`')\n        vals = set()\n        vals.add('Heating')\n        if (value not in vals):\n            raise ValueError('value {} is not an accepted value for field `design_stat_heating`'.format(value))\n    self._design_stat_heating = value", "docstring": "Corresponds to IDD Field `design_stat_heating`\n\nArgs:\nvalue (str): value for IDD Field `design_stat_heating`\nAccepted values are:\n- Heating\nDefault value: Heating\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def _verify_required_claims_exist(jwt_claims):\n    \n    for claim_name in [u\"aud\", u\"exp\", u\"iss\", u\"sub\"]:\n        if claim_name not in jwt_claims:\n            raise suppliers.UnauthenticatedException(u'Missing \"%s\" claim' % claim_name)", "docstring": "Verifies that the required claims exist.\n\nArgs:\njwt_claims: the JWT claims to be verified.\n\nRaises:\nUnauthenticatedException: if some claim doesn't exist.", "source": "juraj-google-style"}
{"code": "def _GetPathSegmentIndexForOccurrenceWeights(self, occurrence_weights, value_weights):\n    largest_weight = occurrence_weights.GetLargestWeight()\n    if (largest_weight > 0):\n        occurrence_weight_indexes = occurrence_weights.GetIndexesForWeight(largest_weight)\n        number_of_occurrence_indexes = len(occurrence_weight_indexes)\n    else:\n        number_of_occurrence_indexes = 0\n    path_segment_index = None\n    if (number_of_occurrence_indexes == 0):\n        path_segment_index = self._GetPathSegmentIndexForValueWeights(value_weights)\n    elif (number_of_occurrence_indexes == 1):\n        path_segment_index = occurrence_weight_indexes[0]\n    else:\n        largest_weight = 0\n        for occurrence_index in occurrence_weight_indexes:\n            value_weight = value_weights.GetWeightForIndex(occurrence_index)\n            if ((not path_segment_index) or (largest_weight < value_weight)):\n                largest_weight = value_weight\n                path_segment_index = occurrence_index\n    return path_segment_index", "docstring": "Retrieves the index of the path segment based on occurrence weights.\n\nArgs:\noccurrence_weights: the occurrence weights object (instance of\n_PathSegmentWeights).\nvalue_weights: the value weights object (instance of _PathSegmentWeights).\n\nReturns:\nAn integer containing the path segment index.", "source": "codesearchnet"}
{"code": "def get_best_gain(mapping, candidate_mappings, weight_dict, instance_len, cur_match_num):\n    \n    largest_gain = 0\n    \n    use_swap = True\n    \n    node1 = None\n    \n    \n    node2 = None\n    \n    unmatched = set(range(instance_len))\n    \n    \n    for nid in mapping:\n        if nid in unmatched:\n            unmatched.remove(nid)\n    for i, nid in enumerate(mapping):\n        \n        for nm in unmatched:\n            if nm in candidate_mappings[i]:\n                \n                \n                if veryVerbose:\n                    print(\"Remap node\", i, \"from \", nid, \"to\", nm, file=DEBUG_LOG)\n                mv_gain = move_gain(mapping, i, nid, nm, weight_dict, cur_match_num)\n                if veryVerbose:\n                    print(\"Move gain:\", mv_gain, file=DEBUG_LOG)\n                    new_mapping = mapping[:]\n                    new_mapping[i] = nm\n                    new_match_num = compute_match(new_mapping, weight_dict)\n                    if new_match_num != cur_match_num + mv_gain:\n                        print(mapping, new_mapping, file=ERROR_LOG)\n                        print(\"Inconsistency in computing: move gain\", cur_match_num, mv_gain, new_match_num,\n                              file=ERROR_LOG)\n                if mv_gain > largest_gain:\n                    largest_gain = mv_gain\n                    node1 = i\n                    node2 = nm\n                    use_swap = False\n    \n    for i, m in enumerate(mapping):\n        for j in range(i + 1, len(mapping)):\n            m2 = mapping[j]\n            \n            \n            if veryVerbose:\n                print(\"Swap node\", i, \"and\", j, file=DEBUG_LOG)\n                print(\"Before swapping:\", i, \"-\", m, \",\", j, \"-\", m2, file=DEBUG_LOG)\n                print(mapping, file=DEBUG_LOG)\n                print(\"After swapping:\", i, \"-\", m2, \",\", j, \"-\", m, file=DEBUG_LOG)\n            sw_gain = swap_gain(mapping, i, m, j, m2, weight_dict, cur_match_num)\n            if veryVerbose:\n                print(\"Swap gain:\", sw_gain, file=DEBUG_LOG)\n                new_mapping = mapping[:]\n                new_mapping[i] = m2\n                new_mapping[j] = m\n                print(new_mapping, file=DEBUG_LOG)\n                new_match_num = compute_match(new_mapping, weight_dict)\n                if new_match_num != cur_match_num + sw_gain:\n                    print(mapping, new_mapping, file=ERROR_LOG)\n                    print(\"Inconsistency in computing: swap gain\", cur_match_num, sw_gain, new_match_num,\n                          file=ERROR_LOG)\n            if sw_gain > largest_gain:\n                largest_gain = sw_gain\n                node1 = i\n                node2 = j\n                use_swap = True\n    \n    cur_mapping = mapping[:]\n    if node1 is not None:\n        if use_swap:\n            if veryVerbose:\n                print(\"Use swap gain\", file=DEBUG_LOG)\n            temp = cur_mapping[node1]\n            cur_mapping[node1] = cur_mapping[node2]\n            cur_mapping[node2] = temp\n        else:\n            if veryVerbose:\n                print(\"Use move gain\", file=DEBUG_LOG)\n            cur_mapping[node1] = node2\n    else:\n        if veryVerbose:\n            print(\"no move/swap gain found\", file=DEBUG_LOG)\n    if veryVerbose:\n        print(\"Original mapping\", mapping, file=DEBUG_LOG)\n        print(\"Current mapping\", cur_mapping, file=DEBUG_LOG)\n    return largest_gain, cur_mapping", "docstring": "Hill-climbing method to return the best gain swap/move can get\nArguments:\nmapping: current node mapping\ncandidate_mappings: the candidates mapping list\nweight_dict: the weight dictionary\ninstance_len: the number of the nodes in AMR 2\ncur_match_num: current triple match number\nReturns:\nthe best gain we can get via swap/move operation", "source": "juraj-google-style"}
{"code": "def num_accumulated(self, name=None):\n    if name is None:\n        name = '%s_NumAccumulated' % self._name\n    return gen_data_flow_ops.accumulator_num_accumulated(self._accumulator_ref, name=name)", "docstring": "Number of gradients that have currently been aggregated in accumulator.\n\nArgs:\nname: Optional name for the operation.\n\nReturns:\nNumber of accumulated gradients currently in accumulator.", "source": "github-repos"}
{"code": "def gallery_section(images, title):\n    imgs = []\n    while True:\n        img = (yield marv.pull(images))\n        if (img is None):\n            break\n        imgs.append({'src': img.relpath})\n    if (not imgs):\n        return\n    widget = {'title': images.title, 'gallery': {'images': imgs}}\n    section = {'title': title, 'widgets': [widget]}\n    (yield marv.push(section))", "docstring": "Create detail section with gallery.\n\nArgs:\ntitle (str): Title to be displayed for detail section.\nimages: stream of marv image files\n\nReturns\nOne detail section.", "source": "codesearchnet"}
{"code": "def __strip_extra_attributes(self, node: yaml.Node, known_attrs: List[str]) -> None:\n    known_keys = list(known_attrs)\n    known_keys.remove('self')\n    if ('yatiml_extra' in known_keys):\n        known_keys.remove('yatiml_extra')\n    for (key_node, value_node) in node.value:\n        if ((not isinstance(key_node, yaml.ScalarNode)) or (key_node.tag != 'tag:yaml.org,2002:str')):\n            raise RecognitionError('{}{}Mapping keys that are not of type string are not supported by YAtiML.'.format(node.start_mark, os.linesep))\n        if (key_node.value not in known_keys):\n            self.__strip_tags(value_node)", "docstring": "Strips tags from extra attributes.\n\nThis prevents nodes under attributes that are not part of our \\\ndata model from being converted to objects. They'll be plain \\\nCommentedMaps instead, which then get converted to OrderedDicts \\\nfor the user.\n\nArgs:\nnode: The node to process\nknown_attrs: The attributes to not strip", "source": "codesearchnet"}
{"code": "def post(self, headers={}, body=''):\n    (code, message) = self.command('POST')\n    if (code != 340):\n        raise NNTPReplyError(code, message)\n    hdrs = utils.unparse_headers(headers)\n    self.socket.sendall(hdrs)\n    if isinstance(body, basestring):\n        body = cStringIO.StringIO(body)\n    illegal = False\n    for line in body:\n        if line.startswith('.'):\n            line = ('.' + line)\n        if line.endswith('\\r\\n'):\n            line = line[:(- 2)]\n        elif line.endswith('\\n'):\n            line = line[:(- 1)]\n        if any(((c in line) for c in '\\x00\\r')):\n            illegal = True\n            break\n        self.socket.sendall((line + '\\r\\n'))\n    self.socket.sendall('.\\r\\n')\n    (code, message) = self.status()\n    if illegal:\n        raise NNTPDataError('Illegal characters found')\n    if (code != 240):\n        raise NNTPReplyError(code, message)\n    message_id = message.split(None, 1)[0]\n    if (message_id.startswith('<') and message_id.endswith('>')):\n        return message_id\n    return True", "docstring": "POST command.\n\nArgs:\nheaders: A dictionary of headers.\nbody: A string or file like object containing the post content.\n\nRaises:\nNNTPDataError: If binary characters are detected in the message\nbody.\n\nReturns:\nA value that evaluates to true if posting the message succeeded.\n(See note for further details)\n\nNote:\n'\\\\n' line terminators are converted to '\\\\r\\\\n'\n\nNote:\nThough not part of any specification it is common for usenet servers\nto return the message-id for a successfully posted message. If a\nmessage-id is identified in the response from the server then that\nmessage-id will be returned by the function, otherwise True will be\nreturned.\n\nNote:\nDue to protocol issues if illegal characters are found in the body\nthe message will still be posted but will be truncated as soon as\nan illegal character is detected. No illegal characters will be sent\nto the server. For information illegal characters include embedded\ncarriage returns '\\\\r' and null characters '\\\\0' (because this\nfunction converts line feeds to CRLF, embedded line feeds are not an\nissue)", "source": "codesearchnet"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, cls_token_id: Optional[int]=None, sep_token_id: Optional[int]=None) -> List[int]:\n    cls = [self.cls_token_id] if cls_token_id is None else [cls_token_id]\n    sep = [self.sep_token_id] if sep_token_id is None else [sep_token_id]\n    if token_ids_1 is None:\n        return cls + token_ids_0 + sep\n    return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A BERT sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def handle_block(\n        mediator_state: MediatorTransferState,\n        state_change: Block,\n        channelidentifiers_to_channels: ChannelMap,\n        pseudo_random_generator: random.Random,\n) -> TransitionResult[MediatorTransferState]:\n    \n    expired_locks_events = events_to_remove_expired_locks(\n        mediator_state,\n        channelidentifiers_to_channels,\n        state_change.block_number,\n        pseudo_random_generator,\n    )\n\n    secret_reveal_events = events_for_onchain_secretreveal_if_dangerzone(\n        channelmap=channelidentifiers_to_channels,\n        secrethash=mediator_state.secrethash,\n        transfers_pair=mediator_state.transfers_pair,\n        block_number=state_change.block_number,\n        block_hash=state_change.block_hash,\n    )\n\n    unlock_fail_events = events_for_expired_pairs(\n        channelidentifiers_to_channels=channelidentifiers_to_channels,\n        transfers_pair=mediator_state.transfers_pair,\n        waiting_transfer=mediator_state.waiting_transfer,\n        block_number=state_change.block_number,\n    )\n\n    iteration = TransitionResult(\n        mediator_state,\n        unlock_fail_events + secret_reveal_events + expired_locks_events,\n    )\n\n    return iteration", "docstring": "After Raiden learns about a new block this function must be called to\nhandle expiration of the hash time locks.\nArgs:\nstate: The current state.\nReturn:\nTransitionResult: The resulting iteration", "source": "juraj-google-style"}
{"code": "def load_data(self):\n    units = ''\n    if (self.file_objects[0] is None):\n        raise IOError()\n    (var_name, z_index) = self.format_var_name(self.variable, list(self.file_objects[0].variables.keys()))\n    ntimes = 0\n    if ('time' in self.file_objects[0].variables[var_name].dimensions):\n        ntimes = len(self.file_objects[0].dimensions['time'])\n    if (ntimes > 1):\n        if (z_index is None):\n            data = self.file_objects[0].variables[var_name][self.forecast_hours].astype(np.float32)\n        else:\n            data = self.file_objects[0].variables[var_name][(self.forecast_hours, z_index)].astype(np.float32)\n    else:\n        (y_dim, x_dim) = self.file_objects[0].variables[var_name].shape[(- 2):]\n        data = np.zeros((len(self.valid_dates), y_dim, x_dim), dtype=np.float32)\n        for (f, file_object) in enumerate(self.file_objects):\n            if (file_object is not None):\n                if (z_index is None):\n                    data[f] = file_object.variables[var_name][0]\n                else:\n                    data[f] = file_object.variables[var_name][(0, z_index)]\n    if hasattr(self.file_objects[0].variables[var_name], 'units'):\n        units = self.file_objects[0].variables[var_name].units\n    return (data, units)", "docstring": "Load data from netCDF file objects or list of netCDF file objects. Handles special variable name formats.\n\nReturns:\nArray of data loaded from files in (time, y, x) dimensions, Units", "source": "codesearchnet"}
{"code": "def get_engine(self, filepath, kind=None):\n    if (not kind):\n        extension = os.path.splitext(filepath)[1]\n        if (not extension):\n            msg = 'Unable to discover settings format from an empty file extension: {}'\n            raise SettingsDiscoveryError(msg.format(filepath))\n        elif (extension[1:] not in self.extensions):\n            msg = 'Settings file extension is unknowed from available backends: {}'\n            raise SettingsDiscoveryError(msg.format(filepath))\n        kind = self.extensions[extension[1:]]\n    elif (kind not in self.engines):\n        msg = 'Given settings format is unknow: {}'\n        raise SettingsDiscoveryError(msg.format(kind))\n    return self.engines[kind]", "docstring": "From given filepath try to discover which backend format to use.\n\nDiscovering is pretty naive as it find format from file extension.\n\nArgs:\nfilepath (str): Settings filepath or filename.\n\nKeyword Arguments:\nkind (str): A format name to enforce a specific backend. Can be any\nvalue from attribute ``_kind_name`` of available backend\nengines.\n\nRaises:\nboussole.exceptions.SettingsDiscoveryError: If extension is\nunknowed or if given format name is unknowed.\n\nReturns:\nobject: Backend engine class.", "source": "codesearchnet"}
{"code": "def is_directory_v2(path):\n    try:\n        return _pywrap_file_io.IsDirectory(compat.path_to_bytes(path))\n    except errors.OpError:\n        return False", "docstring": "Returns whether the path is a directory or not.\n\nArgs:\npath: string, path to a potential directory\n\nReturns:\nTrue, if the path is a directory; False otherwise", "source": "github-repos"}
{"code": "def convert_prediction_values(values, serving_bundle, model_spec=None):\n    if (serving_bundle.model_type == 'classification'):\n        response = classification_pb2.ClassificationResponse()\n        for example_index in range(len(values)):\n            classification = response.result.classifications.add()\n            for class_index in range(len(values[example_index])):\n                class_score = classification.classes.add()\n                class_score.score = values[example_index][class_index]\n                class_score.label = str(class_index)\n    else:\n        response = regression_pb2.RegressionResponse()\n        for example_index in range(len(values)):\n            regression = response.result.regressions.add()\n            regression.value = values[example_index]\n    if model_spec:\n        response.model_spec.CopyFrom(model_spec)\n    return response", "docstring": "Converts tensor values into ClassificationResponse or RegressionResponse.\n\nArgs:\nvalues: For classification, a 2D list of numbers. The first dimension is for\neach example being predicted. The second dimension are the probabilities\nfor each class ID in the prediction. For regression, a 1D list of numbers,\nwith a regression score for each example being predicted.\nserving_bundle: A `ServingBundle` object that contains the information about\nthe serving request that the response was generated by.\nmodel_spec: Optional model spec to put into the response.\n\nReturns:\nA ClassificationResponse or RegressionResponse.", "source": "codesearchnet"}
{"code": "def filter_bboxes_by_visibility(original_shape, bboxes, transformed_shape, transformed_bboxes, threshold=0.0, min_area=0.0):\n    (img_height, img_width) = original_shape[:2]\n    (transformed_img_height, transformed_img_width) = transformed_shape[:2]\n    visible_bboxes = []\n    for (bbox, transformed_bbox) in zip(bboxes, transformed_bboxes):\n        if (not all(((0.0 <= value <= 1.0) for value in transformed_bbox[:4]))):\n            continue\n        bbox_area = calculate_bbox_area(bbox, img_height, img_width)\n        transformed_bbox_area = calculate_bbox_area(transformed_bbox, transformed_img_height, transformed_img_width)\n        if (transformed_bbox_area < min_area):\n            continue\n        visibility = (transformed_bbox_area / bbox_area)\n        if (visibility >= threshold):\n            visible_bboxes.append(transformed_bbox)\n    return visible_bboxes", "docstring": "Filter bounding boxes and return only those boxes whose visibility after transformation is above\nthe threshold and minimal area of bounding box in pixels is more then min_area.\n\nArgs:\noriginal_shape (tuple): original image shape\nbboxes (list): original bounding boxes\ntransformed_shape(tuple): transformed image\ntransformed_bboxes (list): transformed bounding boxes\nthreshold (float): visibility threshold. Should be a value in the range [0.0, 1.0].\nmin_area (float): Minimal area threshold.", "source": "codesearchnet"}
{"code": "def inverse_transform(self, y, lengths=None):\n    y = np.argmax(y, (- 1))\n    inverse_y = [self._label_vocab.id2doc(ids) for ids in y]\n    if (lengths is not None):\n        inverse_y = [iy[:l] for (iy, l) in zip(inverse_y, lengths)]\n    return inverse_y", "docstring": "Return label strings.\n\nArgs:\ny: label id matrix.\nlengths: sentences length.\n\nReturns:\nlist: list of list of strings.", "source": "codesearchnet"}
{"code": "def partial_trace(tensor: np.ndarray, keep_indices: List[int]) -> np.ndarray:\n    ndim = (tensor.ndim \n    if (not all(((tensor.shape[i] == tensor.shape[(i + ndim)]) for i in range(ndim)))):\n        raise ValueError('Tensors must have shape (d_0,...,d_{{k-1}},d_0,...,d_{{k-1}}) but had shape ({}).'.format(tensor.shape))\n    if (not all(((i < ndim) for i in keep_indices))):\n        raise ValueError('keep_indices were {} but must be in first half, i.e. have index less that {}.'.format(keep_indices, ndim))\n    keep_set = set(keep_indices)\n    keep_map = dict(zip(keep_indices, sorted(keep_indices)))\n    left_indices = [(keep_map[i] if (i in keep_set) else i) for i in range(ndim)]\n    right_indices = [((ndim + i) if (i in keep_set) else i) for i in left_indices]\n    return np.einsum(tensor, (left_indices + right_indices))", "docstring": "Takes the partial trace of a given tensor.\n\nThe input tensor must have shape `(d_0, ..., d_{k-1}, d_0, ..., d_{k-1})`.\nThe trace is done over all indices that are not in keep_indices. The\nresulting tensor has shape `(d_{i_0}, ..., d_{i_r}, d_{i_0}, ..., d_{i_r})`\nwhere `i_j` is the `j`th element of `keep_indices`.\n\nArgs:\ntensor: The tensor to sum over. This tensor must have a shape\n`(d_0, ..., d_{k-1}, d_0, ..., d_{k-1})`.\nkeep_indices: Which indices to not sum over. These are only the indices\nof the first half of the tensors indices (i.e. all elements must\nbe between `0` and `tensor.ndims / 2 - 1` inclusive).\n\nRaises:\nValueError: if the tensor is not of the correct shape or the indices\nare not from the first half of valid indices for the tensor.", "source": "codesearchnet"}
{"code": "def _get_instance_attributes(self):\n    for (name, value) in self.__dict__.items():\n        if (name in map((lambda x: x[0]), self.get_class_attributes())):\n            (yield (name, value))", "docstring": "Return a generator for instance attributes' name and value.\n\n.. code-block:: python3\n\nfor _name, _value in self._get_instance_attributes():\nprint(\"attribute name: {}\".format(_name))\nprint(\"attribute value: {}\".format(_value))\n\nReturns:\ngenerator: tuples with attribute name and value.", "source": "codesearchnet"}
{"code": "def _set_label(self, which, label, **kwargs):\n    prop_default = {'fontsize': 18}\n    for (prop, default) in prop_default.items():\n        kwargs[prop] = kwargs.get(prop, default)\n    setattr(self.label, which, label)\n    setattr(self.label, (which + '_kwargs'), kwargs)\n    return", "docstring": "Private method for setting labels.\n\nArgs:\nwhich (str): The indicator of which part of the plots\nto adjust. This currently handles `xlabel`/`ylabel`,\nand `title`.\nlabel (str): The label to be added.\nfontsize (int, optional): Fontsize for associated label. Default\nis None.", "source": "codesearchnet"}
{"code": "def abs(self: EventSetOrNode) -> EventSetOrNode:\n    from temporian.core.operators.unary import abs\n    return abs(self)", "docstring": "Gets the absolute value of an [`EventSet`][temporian.EventSet]'s\nfeatures.\n\nExample:\n```python\n>>> a = tp.event_set(\n...     timestamps=[1, 2, 3],\n...     features={\"M\":[np.nan, -1., 2.], \"N\":  [-1, -3, 5]},\n... )\n>>> a.abs()\nindexes: ...\n'M': [nan 1. 2.]\n'N': [1 3 5]\n...\n\n```\n\nReturns:\nEventSet with positive valued features.", "source": "github-repos"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(ObtainLeaseResponsePayload, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = utils.BytearrayStream(input_stream.read(self.length))\n    if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):\n        self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)\n        self._unique_identifier.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.LEASE_TIME, local_stream):\n        self._lease_time = primitives.Interval(tag=enums.Tags.LEASE_TIME)\n        self._lease_time.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.LAST_CHANGE_DATE, local_stream):\n        self._last_change_date = primitives.DateTime(tag=enums.Tags.LAST_CHANGE_DATE)\n        self._last_change_date.read(local_stream, kmip_version=kmip_version)\n    self.is_oversized(local_stream)", "docstring": "Read the data encoding the ObtainLease response payload and decode it\ninto its constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the data attribute is missing from the\nencoded payload.", "source": "codesearchnet"}
{"code": "def add_dspam_headers(self, results):\n        \n        for header in self.headers:\n            hname = self.header_prefix + header\n            if header.lower() in results:\n                hvalue = results[header.lower()]\n                logger.debug(\n                    '<{}> Adding header {}: {}'.format(self.id, hname, hvalue))\n                self.addheader(hname, hvalue)\n            elif header == 'Processed':\n                \n                hvalue = datetime.datetime.now().strftime(\n                    '%a %b %d %H:%M:%S %Y')\n                logger.debug(\n                    '<{}> Adding header {}: {}'.format(self.id, hname, hvalue))\n                self.addheader(hname, hvalue)\n            else:\n                logger.warning(\n                    '<{}> Not adding header {}, no data available in '\n                    'DSPAM results'.format(self.id, hname))", "docstring": "Format DSPAM headers with passed results, and add them to the message.\n\nArgs:\nresults -- A results dictionary from DspamClient.", "source": "juraj-google-style"}
{"code": "def _SparseSliceGrad(op: ops.Operation, *grads):\n    backprop_val_grad = grads[1]\n    input_indices = op.inputs[0]\n    input_start = op.inputs[3]\n    output_indices = op.outputs[0]\n    val_grad = gen_sparse_ops.sparse_slice_grad(backprop_val_grad, input_indices, input_start, output_indices)\n    val_grad.set_shape(op.inputs[1].get_shape())\n    return (None, val_grad, None, None, None)", "docstring": "The backward operator for the SparseSlice op.\n\nThis op takes in the upstream gradient w.r.t. non-empty values of\nthe sliced `SparseTensor`, and outputs the gradients w.r.t.\nthe non-empty values of input `SparseTensor`.\n\nArgs:\nop: the SparseSlice op\n*grads: the incoming gradients, one element per output of `op`\n\nReturns:\nGradient for each of the 5 input tensors of SparseSlice:\n(indices, values, shape, start, size)\nThe gradients for the indices, shape, start and the size are None.", "source": "github-repos"}
{"code": "def regex(self, *patterns, **kwargs):\n        \n        start = kwargs.pop(\"start\", 0)\n        stop = kwargs.pop(\"stop\", None)\n        keys_only = kwargs.pop(\"keys_only\", False)\n        flags = kwargs.pop(\"flags\", 0)\n        results = {pattern: [] for pattern in patterns}\n        stop = stop if stop is not None else -1\n        for i, line in enumerate(self[start:stop]):\n            for pattern in patterns:\n                grps = re.search(pattern, line, flags=flags)\n                if grps and keys_only:\n                    results[pattern].append(i)\n                elif grps and grps.groups():\n                    for group in grps.groups():\n                        results[pattern].append((i, group))\n                elif grps:\n                    results[pattern].append((i, line))\n        if len(patterns) == 1:\n            return results[patterns[0]]\n        return results", "docstring": "Search the editor for lines matching the regular expression.\nre.MULTILINE is not currently supported.\n\nArgs:\n\\*patterns: Regular expressions to search each line for\nkeys_only (bool): Only return keys\nflags (re.FLAG): flags passed to re.search\n\nReturns:\nresults (dict): Dictionary of pattern keys, line values (or groups - default)", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.Dump = channel.unary_stream(\n        '/debug.Debug/Dump',\n        request_serializer=client_dot_debug_dot_debug__pb2.DumpRequest.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString,\n        )\n    self.Profile = channel.unary_stream(\n        '/debug.Debug/Profile',\n        request_serializer=client_dot_debug_dot_debug__pb2.ProfileRequest.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString,\n        )\n    self.Binary = channel.unary_stream(\n        '/debug.Debug/Binary',\n        request_serializer=client_dot_debug_dot_debug__pb2.BinaryRequest.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def _show_inputs_outputs_mgd(meta_graph_def, signature_def_key, indent):\n    inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(meta_graph_def, signature_def_key)\n    outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(meta_graph_def, signature_def_key)\n    indent_str = '  ' * indent\n\n    def in_print(s):\n        print(indent_str + s)\n    in_print('The given SavedModel SignatureDef contains the following input(s):')\n    for input_key, input_tensor in sorted(inputs_tensor_info.items()):\n        in_print(\"  inputs['%s'] tensor_info:\" % input_key)\n        _print_tensor_info(input_tensor, indent + 1)\n    in_print('The given SavedModel SignatureDef contains the following output(s):')\n    for output_key, output_tensor in sorted(outputs_tensor_info.items()):\n        in_print(\"  outputs['%s'] tensor_info:\" % output_key)\n        _print_tensor_info(output_tensor, indent + 1)\n    in_print('Method name is: %s' % meta_graph_def.signature_def[signature_def_key].method_name)", "docstring": "Prints input and output TensorInfos.\n\nPrints the details of input and output TensorInfos for the SignatureDef mapped\nby the given signature_def_key.\n\nArgs:\nmeta_graph_def: MetaGraphDef to inspect.\nsignature_def_key: A SignatureDef key string.\nindent: How far (in increments of 2 spaces) to indent each line of output.", "source": "github-repos"}
{"code": "def read_blocks(file_path, start=0.0, end=float('inf'), buffer_size=5760000):\n    buffer = []\n    n_buffer = 0\n    n_samples = 0\n    with audioread.audio_open(file_path) as input_file:\n        n_channels = input_file.channels\n        sr_native = input_file.samplerate\n        start_sample = (int(np.round((sr_native * start))) * n_channels)\n        end_sample = end\n        if (end_sample != np.inf):\n            end_sample = (int(np.round((sr_native * end))) * n_channels)\n        for block in input_file:\n            block = librosa.util.buf_to_float(block)\n            n_prev = n_samples\n            n_samples += len(block)\n            if (n_samples < start_sample):\n                continue\n            if (n_prev > end_sample):\n                break\n            if (n_samples > end_sample):\n                block = block[:(end_sample - n_prev)]\n            if (n_prev <= start_sample <= n_samples):\n                block = block[(start_sample - n_prev):]\n            n_buffer += len(block)\n            buffer.append(block)\n            if (n_buffer >= buffer_size):\n                (yield process_buffer(buffer, n_channels))\n                buffer = []\n                n_buffer = 0\n        if (len(buffer) > 0):\n            (yield process_buffer(buffer, n_channels))", "docstring": "Read an audio file block after block. The blocks are yielded one by one.\n\nArgs:\nfile_path (str): Path to the file to read.\nstart (float): Start in seconds to read from.\nend (float): End in seconds to read to.\n``inf`` means to the end of the file.\nbuffer_size (int): Number of samples to load into memory at once and\nreturn as a single block. The exact number of loaded\nsamples depends on the block-size of the\naudioread library. So it can be of x higher,\nwhere the x is typically 1024 or 4096.\n\nReturns:\nGenerator: A generator yielding the samples for every block.", "source": "codesearchnet"}
{"code": "def metrics(expected_box_encodings, expected_scores, actual_box_encodings, actual_scores):\n    squashed_expected_scores = tf.math.divide(1.0, 1.0 + tf.math.exp(-expected_scores))\n    squashed_actual_scores = tf.math.divide(1.0, 1.0 + tf.math.exp(-actual_scores))\n    kld_metric = kl_divergence.symmetric_kl_divergence(expected_scores, actual_scores)\n    high_scoring_indices = tf.math.logical_or(tf.math.greater(squashed_expected_scores, 0.1), tf.math.greater(squashed_actual_scores, 0.1))\n    high_scoring_actual_boxes = tf.where(condition=tf.broadcast_to(input=high_scoring_indices, shape=tf.shape(actual_box_encodings)), x=actual_box_encodings, y=expected_box_encodings)\n    box_diff = high_scoring_actual_boxes - expected_box_encodings\n    box_squared_diff = tf.math.pow(box_diff, 2)\n    box_mse = tf.divide(tf.math.reduce_sum(box_squared_diff), tf.math.maximum(tf.math.count_nonzero(high_scoring_indices, dtype=tf.float32), 1.0))\n    ok = tf.logical_and(kld_metric < 0.1, box_mse < 0.01)\n    return [kld_metric, box_mse, ok]", "docstring": "Calculate metrics from expected and actual blazeface outputs.\n\nArgs:\nexpected_box_encodings: box encodings from model\nexpected_scores: classifications from model\nactual_box_encodings: golden box encodings\nactual_scores: golden classifications\n\nReturns:\ntwo-item list with classification error and localization error", "source": "github-repos"}
{"code": "def read_file(*components, **kwargs):\n    \n    must_exist = kwargs.get(\"must_exist\", True)\n\n    if must_exist:\n        path = fs.must_exist(*components)\n    else:\n        path = fs.path(*components)\n\n    try:\n        with open(path) as infile:\n            return loads(infile.read())\n    except ValueError as e:\n        raise ValueError(\n            \"malformed JSON file '{path}'. Message from parser: {err}\"\n            .format(path=fs.basename(path), err=str(e)))\n    except IOError as e:\n        if not must_exist:\n            return {}\n        else:\n            return e", "docstring": "Load a JSON data blob.\n\nArguments:\npath (str): Path to file.\nmust_exist (bool, otional): If False, return empty dict if file does\nnot exist.\n\nReturns:\narray or dict: JSON data.\n\nRaises:\nFile404: If path does not exist, and must_exist is True.\nInvalidFile: If JSON is malformed.", "source": "juraj-google-style"}
{"code": "def get_array_for_fit(observables: dict, track_pt_bin: int, jet_pt_bin: int) -> histogram.Histogram1D:\n    \n    for name, observable in observables.items():\n        if observable.track_pt_bin == track_pt_bin and observable.jet_pt_bin == jet_pt_bin:\n            return histogram.Histogram1D.from_existing_hist(observable.hist)\n\n    raise ValueError(\"Cannot find fit with jet pt bin {jet_pt_bin} and track pt bin {track_pt_bin}\")", "docstring": "Get a Histogram1D associated with the selected jet and track pt bins.\n\nThis is often used to retrieve data for fitting.\n\nArgs:\nobservables (dict): The observables from which the hist should be retrieved.\ntrack_pt_bin (int): Track pt bin of the desired hist.\njet_ptbin (int): Jet pt bin of the desired hist.\nReturns:\nHistogram1D: Converted TH1 or uproot histogram.\nRaises:\nValueError: If the requested observable couldn't be found.", "source": "juraj-google-style"}
{"code": "def encode(self, inputs, attention_bias):\n    \n    with tf.name_scope(\"encode\"):\n      \n      \n      embedded_inputs = self.embedding_softmax_layer(inputs)\n      inputs_padding = model_utils.get_padding(inputs)\n\n      with tf.name_scope(\"add_pos_encoding\"):\n        length = tf.shape(embedded_inputs)[1]\n        pos_encoding = model_utils.get_position_encoding(\n            length, self.params.hidden_size)\n        encoder_inputs = embedded_inputs + pos_encoding\n\n      if self.train:\n        mlperf_log.transformer_print(\n            key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,\n            value=self.params.layer_postprocess_dropout)\n        encoder_inputs = tf.nn.dropout(\n            encoder_inputs, 1 - self.params.layer_postprocess_dropout)\n\n      return self.encoder_stack(encoder_inputs, attention_bias, inputs_padding)", "docstring": "Generate continuous representation for inputs.\n\nArgs:\ninputs: int tensor with shape [batch_size, input_length].\nattention_bias: float tensor with shape [batch_size, 1, 1, input_length]\n\nReturns:\nfloat tensor with shape [batch_size, input_length, hidden_size]", "source": "juraj-google-style"}
{"code": "def Open(self, path, ascii_codepage='cp1252'):\n    path_specification = self._path_resolver.ResolvePath(path)\n    if (path_specification is None):\n        return None\n    return self._OpenPathSpec(path_specification)", "docstring": "Opens the Windows Registry file specified by the path.\n\nArgs:\npath (str): path of the Windows Registry file.\nascii_codepage (Optional[str]): ASCII string codepage.\n\nReturns:\nWinRegistryFile: Windows Registry file or None.", "source": "codesearchnet"}
{"code": "def generate_nb_data(P, R, n_cells, assignments=None):\n    \n    genes, clusters = P.shape\n    output = np.zeros((genes, n_cells))\n    if assignments is None:\n        cluster_probs = np.ones(clusters)/clusters\n    labels = []\n    for i in range(n_cells):\n        if assignments is None:\n            c = np.random.choice(range(clusters), p=cluster_probs)\n        else:\n            c = assignments[i]\n        labels.append(c)\n        \n        output[:,i] = np.random.negative_binomial(R[:,c], 1.0-P[:,c])\n    return output, np.array(labels)", "docstring": "Generates negative binomial data\n\nArgs:\nP (array): genes x clusters\nR (array): genes x clusters\nn_cells (int): number of cells\nassignments (list): cluster assignment of each cell. Default:\nrandom uniform\n\nReturns:\ndata array with shape genes x cells\nlabels - array of cluster labels", "source": "juraj-google-style"}
{"code": "def _FindAugmentingPath(self, queue):\n    while queue:\n        x = queue.popleft()\n        for y in self.right - self.t:\n            if not self._InEqualitySubgraph(x, y):\n                continue\n            if y not in self.matches:\n                return (True, x, y)\n            self.t.add(y)\n            queue.append(self.matches[y])\n            self._AddToTree(self.matches[y], x)\n    return (False, None, None)", "docstring": "Find an augmenting path for the current labeling.\n\nPerform a BFS to find an augmenting path for the current labeling.\n\nArgs:\nqueue: Queue for performing BFS traversal.\nReturns:\nfound: True if path was found.\nx: Left vertex of final path edge.\ny: Right vertex of final path edge.", "source": "github-repos"}
{"code": "def shannon_entropy(pvec, base=2):\n    if (base == 2):\n\n        def logfn(x):\n            return ((- x) * np.log2(x))\n    elif (base == np.e):\n\n        def logfn(x):\n            return ((- x) * np.log(x))\n    else:\n\n        def logfn(x):\n            return (((- x) * np.log(x)) / np.log(base))\n    h = 0.0\n    for x in pvec:\n        if (0 < x < 1):\n            h += logfn(x)\n    return h", "docstring": "Compute the Shannon entropy of a probability vector.\n\nThe shannon entropy of a probability vector pv is defined as\n$H(pv) = - \\\\sum_j pv[j] log_b (pv[j])$ where $0 log_b 0 = 0$.\n\nArgs:\npvec (array_like): a probability vector.\nbase (int): the base of the logarith\n\nReturns:\nfloat: The Shannon entropy H(pvec).", "source": "codesearchnet"}
{"code": "def parse_account(config, auth, account):\n    network_id = account\n    advertiser_ids = None\n    profile_id = None\n    try:\n        network_id, profile_id = network_id.split('@', 1)\n    except:\n        profile_id = None\n    try:\n        network_id, advertiser_ids = network_id.split(':', 1)\n    except:\n        pass\n    if network_id is not None:\n        network_id = int(network_id)\n    if advertiser_ids is not None:\n        advertiser_ids = [int(advertiser_id.strip()) for advertiser_id in advertiser_ids.split(',')]\n    return (network_id, advertiser_ids)", "docstring": "Breaks a [account:advertiser@profile] string into parts if supplied.\n\nThis function was created to accomodate supplying advertiser and profile\ninformation\nas a single token.  It needs to be refactored as this approach is messy.\n\nPossible variants include:\n* [account:advertiser@profile]\n* [account:advertiser]\n* [account@profile]\n\nArgs:\n* auth: (string) Either user or service.\n* account: (string) A string represeting [account:advertiser@profile]\n\nReturns:\n* ( network_id, advertiser_ids, profile_id) after parsing the account token.", "source": "github-repos"}
{"code": "def _minigui_report_search_status(self, leaves):\n        \n\n        root = self._player.get_root()\n\n        msg = {\n            \"id\": hex(id(root)),\n            \"n\": int(root.N),\n            \"q\": float(root.Q),\n        }\n\n        msg[\"childQ\"] = [int(round(q * 1000)) for q in root.child_Q]\n        msg[\"childN\"] = [int(n) for n in root.child_N]\n\n        ranked_children = root.rank_children()\n        variations = {}\n        for i in ranked_children[:15]:\n            if root.child_N[i] == 0 or i not in root.children:\n                break\n            c = coords.to_gtp(coords.from_flat(i))\n            child = root.children[i]\n            nodes = child.most_visited_path_nodes()\n            moves = [coords.to_gtp(coords.from_flat(m.fmove)) for m in nodes]\n            variations[c] = {\n                \"n\": int(root.child_N[i]),\n                \"q\": float(root.child_Q[i]),\n                \"moves\": [c] + moves,\n            }\n\n        if leaves:\n            path = []\n            leaf = leaves[0]\n            while leaf != root:\n                path.append(leaf.fmove)\n                leaf = leaf.parent\n            if path:\n                path.reverse()\n                variations[\"live\"] = {\n                    \"n\": int(root.child_N[path[0]]),\n                    \"q\": float(root.child_Q[path[0]]),\n                    \"moves\": [coords.to_gtp(coords.from_flat(m)) for m in path]\n                }\n\n        if variations:\n            msg[\"variations\"] = variations\n\n        dbg(\"mg-update:%s\" % json.dumps(msg, sort_keys=True))", "docstring": "Prints the current MCTS search status to stderr.\n\nReports the current search path, root node's child_Q, root node's\nchild_N, the most visited path in a format that can be parsed by\none of the STDERR_HANDLERS in minigui.ts.\n\nArgs:\nleaves: list of leaf MCTSNodes returned by tree_search().", "source": "juraj-google-style"}
{"code": "def __init__(self, entries, elements=None):\n        \n        if elements is None:\n            elements = set()\n            for entry in entries:\n                elements.update(entry.composition.elements)\n        elements = list(elements)\n        dim = len(elements)\n\n        get_reduced_comp = lambda e: e.composition.reduced_composition\n\n        entries = sorted(entries, key=get_reduced_comp)\n\n        el_refs = {}\n        min_entries = []\n        all_entries = []\n        for c, g in itertools.groupby(entries, key=get_reduced_comp):\n            g = list(g)\n            min_entry = min(g, key=lambda e: e.energy_per_atom)\n            if c.is_element:\n                el_refs[c.elements[0]] = min_entry\n            min_entries.append(min_entry)\n            all_entries.extend(g)\n\n        if len(el_refs) != dim:\n            raise PhaseDiagramError(\n                \"There are no entries associated with a terminal element!.\")\n\n        data = np.array([\n            [e.composition.get_atomic_fraction(el) for el in elements] + [\n                e.energy_per_atom]\n            for e in min_entries\n        ])\n\n        \n        vec = [el_refs[el].energy_per_atom for el in elements] + [-1]\n        form_e = -np.dot(data, vec)\n        inds = np.where(form_e < -self.formation_energy_tol)[0].tolist()\n\n        \n        inds.extend([min_entries.index(el) for el in el_refs.values()])\n\n        qhull_entries = [min_entries[i] for i in inds]\n        qhull_data = data[inds][:, 1:]\n\n        \n        \n        extra_point = np.zeros(dim) + 1 / dim\n        extra_point[-1] = np.max(qhull_data) + 1\n        qhull_data = np.concatenate([qhull_data, [extra_point]], axis=0)\n\n        if dim == 1:\n            self.facets = [qhull_data.argmin(axis=0)]\n        else:\n            facets = get_facets(qhull_data)\n            finalfacets = []\n            for facet in facets:\n                \n                if max(facet) == len(qhull_data) - 1:\n                    continue\n                m = qhull_data[facet]\n                m[:, -1] = 1\n                if abs(np.linalg.det(m)) > 1e-14:\n                    finalfacets.append(facet)\n            self.facets = finalfacets\n\n        self.simplexes = [Simplex(qhull_data[f, :-1]) for f in self.facets]\n        self.all_entries = all_entries\n        self.qhull_data = qhull_data\n        self.dim = dim\n        self.el_refs = el_refs\n        self.elements = elements\n        self.qhull_entries = qhull_entries\n        self._stable_entries = set(self.qhull_entries[i] for i in\n                                   set(itertools.chain(*self.facets)))", "docstring": "Standard constructor for phase diagram.\n\nArgs:\nentries ([PDEntry]): A list of PDEntry-like objects having an\nenergy, energy_per_atom and composition.\nelements ([Element]): Optional list of elements in the phase\ndiagram. If set to None, the elements are determined from\nthe the entries themselves.", "source": "juraj-google-style"}
{"code": "def FindServiceByName(self, full_name):\n    full_name = _NormalizeFullyQualifiedName(full_name)\n    if (full_name not in self._service_descriptors):\n        self._FindFileContainingSymbolInDb(full_name)\n    return self._service_descriptors[full_name]", "docstring": "Loads the named service descriptor from the pool.\n\nArgs:\nfull_name: The full name of the service descriptor to load.\n\nReturns:\nThe service descriptor for the named service.\n\nRaises:\nKeyError: if the service cannot be found in the pool.", "source": "codesearchnet"}
{"code": "def get_all_checkpoints(rundir='runinfo'):\n    if (not os.path.isdir(rundir)):\n        return []\n    dirs = sorted(os.listdir(rundir))\n    checkpoints = []\n    for runid in dirs:\n        checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))\n        if os.path.isdir(checkpoint):\n            checkpoints.append(checkpoint)\n    return checkpoints", "docstring": "Finds the checkpoints from all last runs.\n\nNote that checkpoints are incremental, and this helper will not find\nprevious checkpoints from earlier than the most recent run. It probably\nshould be made to do so.\n\nKwargs:\n- rundir(str) : Path to the runinfo directory\n\nReturns:\n- a list suitable for the checkpointFiles parameter of DataFlowKernel\nconstructor", "source": "codesearchnet"}
{"code": "def merge_strings_files(old_strings_file, new_strings_file):\n    \n    old_localizable_dict = generate_localization_key_to_entry_dictionary_from_file(old_strings_file)\n    output_file_elements = []\n\n    f = open_strings_file(new_strings_file, \"r+\")\n\n    for header_comment, comments, key, value in extract_header_comment_key_value_tuples_from_file(f):\n        if len(header_comment) > 0:\n            output_file_elements.append(Comment(header_comment))\n\n        localize_value = value\n        if key in old_localizable_dict:\n            localize_value = old_localizable_dict[key].value\n\n        output_file_elements.append(LocalizationEntry(comments, key, localize_value))\n\n    f.close()\n\n    write_file_elements_to_strings_file(old_strings_file, output_file_elements)", "docstring": "Merges the old strings file with the new one.\n\nArgs:\nold_strings_file (str): The path to the old strings file (previously produced, and possibly altered)\nnew_strings_file (str): The path to the new strings file (newly produced).", "source": "juraj-google-style"}
{"code": "def stack_template_url(bucket_name, blueprint, endpoint):\n    \n    key_name = stack_template_key_name(blueprint)\n    return \"%s/%s/%s\" % (endpoint, bucket_name, key_name)", "docstring": "Produces an s3 url for a given blueprint.\n\nArgs:\nbucket_name (string): The name of the S3 bucket where the resulting\ntemplates are stored.\nblueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint\nobject to create the URL to.\nendpoint (string): The s3 endpoint used for the bucket.\n\nReturns:\nstring: S3 URL.", "source": "juraj-google-style"}
{"code": "def merge_from(self, lam_dict, op):\n    for key, val in lam_dict.items():\n        if key in self:\n            self[key] = op(self[key], val, key)\n        else:\n            self[key] = val\n    for cur_id in range(lam_dict.aliases.latest_id):\n        parent_id = lam_dict.aliases.parent[cur_id]\n        cur_name = lam_dict.aliases.id2name[cur_id]\n        parent_name = lam_dict.aliases.id2name[parent_id]\n        if self.aliases.find_by_name(cur_name) != self.aliases.find_by_name(parent_name):\n            self.add_alias(cur_name, parent_name, op)", "docstring": "Merge the other `AliasingDict` into current class.\n\nArgs:\nlam_dict: The dict to merge from.\nop: The function used to merge the values.", "source": "github-repos"}
{"code": "def _update_album_art_to_full_uri(self, item):\n        \n        if getattr(item, 'album_art_uri', False):\n            item.album_art_uri = self.build_album_art_full_uri(\n                item.album_art_uri)", "docstring": "Update an item's Album Art URI to be an absolute URI.\n\nArgs:\nitem: The item to update the URI for", "source": "juraj-google-style"}
{"code": "def compose_tree_path(tree, issn=False):\n    if issn:\n        return join('/', ISSN_DOWNLOAD_KEY, basename(tree.issn))\n    return join('/', PATH_DOWNLOAD_KEY, quote_plus(tree.path).replace('%2F', '/'))", "docstring": "Compose absolute path for given `tree`.\n\nArgs:\npub (obj): :class:`.Tree` instance.\nissn (bool, default False): Compose URL using ISSN.\n\nReturns:\nstr: Absolute path of the tree, without server's address and protocol.", "source": "codesearchnet"}
{"code": "def _neigh_template(parameters, index, left=True, required=False, notfoundmsg=None):\n    fn_string = ('has_neigh(%s, left=%s)' % (repr(parameters.fn_params)[1:(- 1)], repr(left)))\n    output = (IND + 'el = dom.find(\\n')\n    output += ((IND + IND) + ('%s,\\n' % repr(parameters.tag_name)))\n    if parameters.params:\n        output += ((IND + IND) + ('%s,\\n' % repr(parameters.params)))\n    output += ((IND + IND) + ('fn=%s\\n' % fn_string))\n    output += (IND + ')\\n\\n')\n    if required:\n        return (output + _required_idiom(parameters.fn_params[0], index, notfoundmsg))\n    return (output + _index_idiom('el', index))", "docstring": "Generate neighbour matching call for HTMLElement, which returns only\nelements with required neighbours.\n\nArgs:\nparameters (list): List of parameters for ``.match()``.\nindex (int): Index of the item you want to get from ``.match()`` call.\nleft (bool, default True): Look for neigbour in the left side of el.\nrequired (bool, default False): Use :func:`_required_idiom` to returned\ndata.\nnotfoundmsg (str, default None): Message which will be used for\n:func:`_required_idiom` if the item is not found.\n\nReturns:\nstr: Python code.", "source": "codesearchnet"}
{"code": "def unpad_image(tensor, original_size):\n    if not isinstance(original_size, (list, tuple)):\n        if not isinstance(original_size, (torch.Tensor, np.ndarray)):\n            raise TypeError(f'image_size invalid type: {type(original_size)} not valid, should be either list, tuple, np.ndarray or tensor')\n        original_size = original_size.tolist()\n    original_height, original_width = original_size\n    current_height, current_width = tensor.shape[1:]\n    original_aspect_ratio = original_width / original_height\n    current_aspect_ratio = current_width / current_height\n    if original_aspect_ratio > current_aspect_ratio:\n        scale_factor = current_width / original_width\n        new_height = int(round(original_height * scale_factor, 7))\n        padding = (current_height - new_height) \n        unpadded_tensor = tensor[:, padding:current_height - padding, :]\n    else:\n        scale_factor = current_height / original_height\n        new_width = int(round(original_width * scale_factor, 7))\n        padding = (current_width - new_width) \n        unpadded_tensor = tensor[:, :, padding:current_width - padding]\n    return unpadded_tensor", "docstring": "Unpads a PyTorch tensor of a padded and resized image.\n\nArgs:\ntensor (`torch.Tensor`):\nThe image tensor, assumed to be of shape (num_channels, height, width).\noriginal_size (`tuple`):\nThe original size of the image (height, width).\n\nReturns:\n`torch.Tensor`: The unpadded image tensor.", "source": "github-repos"}
{"code": "def stop(self) -> None:\n    self._stop()", "docstring": "Stops the server.\n\nRaises:\ntf.errors.OpError: Or one of its subclasses if an error occurs while\nstopping the server.", "source": "github-repos"}
{"code": "def one_hot(indices, num_classes):\n    return array_ops.one_hot(indices, depth=num_classes, axis=-1)", "docstring": "Computes the one-hot representation of an integer tensor.\n\nArgs:\nindices: nD integer tensor of shape\n`(batch_size, dim1, dim2, ... dim(n-1))`\nnum_classes: Integer, number of classes to consider.\n\nReturns:\n(n + 1)D one hot representation of the input\nwith shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`\n\nReturns:\nThe one-hot tensor.", "source": "github-repos"}
{"code": "def get_file_contents(self, file_key):\n\t\t\n\t\t\n\t\tself._raise_unimplemented_error()\n\t\t\n\t\turi = '/'.join([self.api_uri,\n\t\t\t\t\t\tself.files_suffix,\n\t\t\t\t\t\tfile_key,\n\t\t\t\t\t\tself.file_contents_suffix,\n\t\t\t\t\t\t])\n\t\treturn self._req('get', uri)", "docstring": "Gets file contents\nArgs:\nfile_key\t\tkey for the file\nreturn\t\t\t(status code, ?)", "source": "juraj-google-style"}
{"code": "def cmd_list(options):\n    (i_info, param_str) = gather_data(options)\n    if i_info:\n        awsc.get_all_aminames(i_info)\n        param_str = (('Instance List - ' + param_str) + '\\n')\n        list_instances(i_info, param_str)\n    else:\n        print('No instances found with parameters: {}'.format(param_str))", "docstring": "Gather data for instances matching args and call display func.\n\nArgs:\noptions (object): contains args and data from parser.", "source": "codesearchnet"}
{"code": "def load_data(path, dense=False):\n    \n\n    catalog = {'.csv': load_csv, '.sps': load_svmlight_file, '.h5': load_hdf5}\n\n    ext = os.path.splitext(path)[1]\n    func = catalog[ext]\n    X, y = func(path)\n\n    if dense and sparse.issparse(X):\n        X = X.todense()\n\n    return X, y", "docstring": "Load data from a CSV, LibSVM or HDF5 file based on the file extension.\n\nArgs:\npath (str): A path to the CSV, LibSVM or HDF5 format file containing data.\ndense (boolean): An optional variable indicating if the return matrix\nshould be dense.  By default, it is false.\n\nReturns:\nData matrix X and target vector y", "source": "juraj-google-style"}
{"code": "def kill_plasma_store(self, check_alive=True):\n    self._kill_process_type(ray_constants.PROCESS_TYPE_PLASMA_STORE, check_alive=check_alive)", "docstring": "Kill the plasma store.\n\nArgs:\ncheck_alive (bool): Raise an exception if the process was already\ndead.", "source": "codesearchnet"}
{"code": "def ReadFileObject(self, artifacts_reader, file_object):\n    for artifact_definition in artifacts_reader.ReadFileObject(file_object):\n        self.RegisterDefinition(artifact_definition)", "docstring": "Reads artifact definitions into the registry from a file-like object.\n\nArgs:\nartifacts_reader (ArtifactsReader): an artifacts reader.\nfile_object (file): file-like object to read from.", "source": "codesearchnet"}
{"code": "def call_function_with_args(self, node, val, args):\n    assert isinstance(val.data, abstract.INTERPRETER_FUNCTION_TYPES)\n    with val.data.record_calls():\n        new_node, ret = self._call_function_in_frame(node, val, *attrs.astuple(args, recurse=False))\n    return (new_node, ret)", "docstring": "Call a function.\n\nArgs:\nnode: The given node.\nval: A cfg.Binding containing the function.\nargs: A function.Args object.\n\nReturns:\nA tuple of (1) a node and (2) a cfg.Variable of the return value.", "source": "github-repos"}
{"code": "def get_feature_variable_integer(self, feature_key, variable_key, user_id, attributes=None):\n    \n\n    variable_type = entities.Variable.Type.INTEGER\n    return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)", "docstring": "Returns value for a certain integer variable attached to a feature flag.\n\nArgs:\nfeature_key: Key of the feature whose variable's value is being accessed.\nvariable_key: Key of the variable whose value is to be accessed.\nuser_id: ID for user.\nattributes: Dict representing user attributes.\n\nReturns:\nInteger value of the variable. None if:\n- Feature key is invalid.\n- Variable key is invalid.\n- Mismatch with type of variable.", "source": "juraj-google-style"}
{"code": "def create_image_lists(image_dir, testing_percentage, validation_percentage):\n    if (not tf.gfile.Exists(image_dir)):\n        tf.logging.error(((\"Image directory '\" + image_dir) + \"' not found.\"))\n        return None\n    result = collections.OrderedDict()\n    sub_dirs = sorted((x[0] for x in tf.gfile.Walk(image_dir)))\n    is_root_dir = True\n    for sub_dir in sub_dirs:\n        if is_root_dir:\n            is_root_dir = False\n            continue\n        extensions = sorted(set((os.path.normcase(ext) for ext in ['JPEG', 'JPG', 'jpeg', 'jpg', 'png'])))\n        file_list = []\n        dir_name = os.path.basename((sub_dir[:(- 1)] if sub_dir.endswith('/') else sub_dir))\n        if (dir_name == image_dir):\n            continue\n        tf.logging.info(((\"Looking for images in '\" + dir_name) + \"'\"))\n        for extension in extensions:\n            file_glob = os.path.join(image_dir, dir_name, ('*.' + extension))\n            file_list.extend(tf.gfile.Glob(file_glob))\n        if (not file_list):\n            tf.logging.warning('No files found')\n            continue\n        if (len(file_list) < 20):\n            tf.logging.warning('WARNING: Folder has less than 20 images, which may cause issues.')\n        elif (len(file_list) > MAX_NUM_IMAGES_PER_CLASS):\n            tf.logging.warning('WARNING: Folder {} has more than {} images. Some images will never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))\n        label_name = re.sub('[^a-z0-9]+', ' ', dir_name.lower())\n        training_images = []\n        testing_images = []\n        validation_images = []\n        for file_name in file_list:\n            base_name = os.path.basename(file_name)\n            hash_name = re.sub('_nohash_.*$', '', file_name)\n            hash_name_hashed = hashlib.sha1(tf.compat.as_bytes(hash_name)).hexdigest()\n            percentage_hash = ((int(hash_name_hashed, 16) % (MAX_NUM_IMAGES_PER_CLASS + 1)) * (100.0 / MAX_NUM_IMAGES_PER_CLASS))\n            if (percentage_hash < validation_percentage):\n                validation_images.append(base_name)\n            elif (percentage_hash < (testing_percentage + validation_percentage)):\n                testing_images.append(base_name)\n            else:\n                training_images.append(base_name)\n        result[label_name] = {'dir': dir_name, 'training': training_images, 'testing': testing_images, 'validation': validation_images}\n    return result", "docstring": "Builds a list of training images from the file system.\n\nAnalyzes the sub folders in the image directory, splits them into stable\ntraining, testing, and validation sets, and returns a data structure\ndescribing the lists of images for each label and their paths.\n\nArgs:\nimage_dir: String path to a folder containing subfolders of images.\ntesting_percentage: Integer percentage of the images to reserve for tests.\nvalidation_percentage: Integer percentage of images reserved for validation.\n\nReturns:\nAn OrderedDict containing an entry for each label subfolder, with images\nsplit into training, testing, and validation sets within each label.\nThe order of items defines the class indices.", "source": "codesearchnet"}
{"code": "def dump_data(data, filename=None, file_type='json', klazz=YapconfError, open_kwargs=None, dump_kwargs=None):\n    _check_file_type(file_type, klazz)\n    open_kwargs = (open_kwargs or {'encoding': 'utf-8'})\n    dump_kwargs = (dump_kwargs or {})\n    if filename:\n        with open(filename, 'w', **open_kwargs) as conf_file:\n            _dump(data, conf_file, file_type, **dump_kwargs)\n    else:\n        _dump(data, sys.stdout, file_type, **dump_kwargs)", "docstring": "Dump data given to file or stdout in file_type.\n\nArgs:\ndata (dict): The dictionary to dump.\nfilename (str, optional): Defaults to None. The filename to write\nthe data to. If none is provided, it will be written to STDOUT.\nfile_type (str, optional): Defaults to 'json'. Can be any of\nyapconf.FILE_TYPES\nklazz (optional): Defaults to YapconfError a special error to throw\nwhen something goes wrong.\nopen_kwargs (dict, optional): Keyword arguments to open.\ndump_kwargs (dict, optional): Keyword arguments to dump.", "source": "codesearchnet"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n    output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    if token_ids_1 is not None:\n        output += token_ids_1 + [self.sep_token_id]\n    return output", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A LayoutLM sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def forward(self, hidden_state):\n    residual = hidden_state\n    hidden_state = self.norm(hidden_state)\n    if self.self_attn:\n        batch_size, n_vars, num_patches, d_model = hidden_state.shape\n        hidden_state_reshaped = hidden_state.reshape(batch_size * n_vars, num_patches, d_model)\n        x_attn, _, _ = self.self_attn_layer(hidden_state_reshaped, output_attentions=False)\n        x_attn = x_attn.reshape(batch_size, n_vars, num_patches, d_model)\n    hidden_state = hidden_state.transpose(2, 3)\n    hidden_state = self.mlp(hidden_state)\n    if self.gated_attn:\n        hidden_state = self.gating_block(hidden_state)\n    hidden_state = hidden_state.transpose(2, 3)\n    if self.self_attn:\n        hidden_state = self.norm_attn(hidden_state + x_attn)\n    out = hidden_state + residual\n    return out", "docstring": "Args:\nhidden_state (`torch.Tensor`): Input tensor.\n\nReturns:\n`torch.Tensor`: Transformed tensor.", "source": "github-repos"}
{"code": "def tokenize_numbers(text_array: List[str]) -> List[str]:\n    tokenized = []\n    for i in range(len(text_array)):\n        reg, sub = MATCH_NUMBERS\n        replaced = re.sub(reg, sub, text_array[i]).split()\n        tokenized.extend(replaced)\n    return tokenized", "docstring": "Splits large comma-separated numbers and floating point values. This is done by replacing commas with ' @,@ ' and\ndots with ' @.@ '.\n\nArgs:\ntext_array: An already tokenized text as list.\n\nReturns:\nA list of strings with tokenized numbers.\n\nExample:\n\n```python\n>>> tokenize_numbers([\"$\", \"5,000\", \"1.73\", \"m\"])\n['$', '5', '@,@', '000', '1', '@.@', '73', 'm']\n```", "source": "github-repos"}
{"code": "def fromkeys(cls, iterable, value=None):\n        \n        \n        \n        if not callable(value):\n            return cls(dict.fromkeys(iterable, value))\n\n        return cls((key, value(key)) for key in iterable)", "docstring": "Create a new d from\n\nArgs:\niterable: Iterable containing keys\nvalue: value to associate with each key.\nIf callable, will be value[key]\n\nReturns: new DictWrapper\n\nExample:\n\n>>> from ww import d\n>>> sorted(d.fromkeys('123', value=4).items())\n[('1', 4), ('2', 4), ('3', 4)]\n>>> sorted(d.fromkeys(range(3), value=lambda e:e**2).items())\n[(0, 0), (1, 1), (2, 4)]", "source": "juraj-google-style"}
{"code": "def alignment(self, align):\n        \n        if align=='left':\n            align = '0'\n        elif align=='center':\n            align = '1'\n        elif align=='right':\n            align = '2'\n        elif align=='justified':\n            align = '3'\n        else:\n            raise RuntimeError('Invalid alignment in function alignment')\n        self.send(chr(27)+'a'+align)", "docstring": "Sets the alignment of the printer.\n\nArgs:\nalign: desired alignment. Options are 'left', 'center', 'right', and 'justified'. Anything else\nwill throw an error.\nReturns:\nNone\nRaises:\nRuntimeError: Invalid alignment.", "source": "juraj-google-style"}
{"code": "def get_reference_points(spatial_shapes, valid_ratios, device):\n    reference_points_list = []\n    for lvl, (height, width) in enumerate(spatial_shapes):\n        ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, height - 0.5, height, dtype=valid_ratios.dtype, device=device), torch.linspace(0.5, width - 0.5, width, dtype=valid_ratios.dtype, device=device))\n        ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * height)\n        ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * width)\n        ref = torch.stack((ref_x, ref_y), -1)\n        reference_points_list.append(ref)\n    reference_points = torch.cat(reference_points_list, 1)\n    reference_points = reference_points[:, :, None] * valid_ratios[:, None]\n    return reference_points", "docstring": "Get reference points for each feature map. Used in decoder.\n\nArgs:\nspatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):\nSpatial shapes of each feature map.\nvalid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):\nValid ratios of each feature map.\ndevice (`torch.device`):\nDevice on which to create the tensors.\nReturns:\n`torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)`", "source": "github-repos"}
{"code": "def get_lagged_subsequences(self, sequence: torch.Tensor, subsequences_length: int, shift: int=0) -> torch.Tensor:\n    indices = [lag - shift for lag in self.config.lags_sequence]\n    sequence_length = sequence.shape[1]\n    if max(indices) + subsequences_length > sequence_length:\n        raise ValueError(f'lags cannot go further than history length, found lag {max(indices)} while history length is only {sequence_length}')\n    lagged_values = []\n    for lag_index in indices:\n        begin_index = -lag_index - subsequences_length\n        end_index = -lag_index if lag_index > 0 else None\n        lagged_values.append(sequence[:, begin_index:end_index, ...])\n    return torch.stack(lagged_values, dim=-1)", "docstring": "Returns lagged subsequences of a given sequence. Returns a tensor of shape (batch_size, subsequences_length,\nfeature_size, indices_length), containing lagged subsequences. Specifically, lagged[i, j, :, k] = sequence[i,\n-indices[k]-subsequences_length+j, :].\n\nArgs:\nsequence (`torch.Tensor` or shape `(batch_size, context_length,\nfeature_size)`): The sequence from which lagged subsequences should be extracted.\nsubsequences_length (`int`):\nLength of the subsequences to be extracted.\nshift (`int`, *optional* defaults to 0):\nShift the lags by this amount back in the time index.", "source": "github-repos"}
{"code": "def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int=1) -> Conv1D:\n    index = index.to(layer.weight.device)\n    W = layer.weight.index_select(dim, index).detach().clone()\n    if dim == 0:\n        b = layer.bias.detach().clone()\n    else:\n        b = layer.bias[index].detach().clone()\n    new_size = list(layer.weight.size())\n    new_size[dim] = len(index)\n    new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)\n    new_layer.weight.requires_grad = False\n    new_layer.weight.copy_(W.contiguous())\n    new_layer.weight.requires_grad = True\n    new_layer.bias.requires_grad = False\n    new_layer.bias.copy_(b.contiguous())\n    new_layer.bias.requires_grad = True\n    return new_layer", "docstring": "Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights\nare transposed.\n\nUsed to remove heads.\n\nArgs:\nlayer ([`~pytorch_utils.Conv1D`]): The layer to prune.\nindex (`torch.LongTensor`): The indices to keep in the layer.\ndim (`int`, *optional*, defaults to 1): The dimension on which to keep the indices.\n\nReturns:\n[`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`.", "source": "github-repos"}
{"code": "def get_setter(proto):\n    _, type_registrations = _REVIVED_TYPE_REGISTRY.get(proto.identifier, (None, None))\n    if type_registrations is not None:\n        for type_registration in type_registrations:\n            if type_registration.should_load(proto):\n                return type_registration.setter\n    return None", "docstring": "Gets the registered setter function for the SavedUserObject proto.\n\nSee VersionedTypeRegistration for info about the setter function.\n\nArgs:\nproto: SavedUserObject proto\n\nReturns:\nsetter function", "source": "github-repos"}
{"code": "def _get_facet_chempots(self, facet):\n    complist = [self.qhull_entries[i].composition for i in facet]\n    energylist = [self.qhull_entries[i].energy_per_atom for i in facet]\n    m = [[c.get_atomic_fraction(e) for e in self.elements] for c in complist]\n    chempots = np.linalg.solve(m, energylist)\n    return dict(zip(self.elements, chempots))", "docstring": "Calculates the chemical potentials for each element within a facet.\n\nArgs:\nfacet: Facet of the phase diagram.\n\nReturns:\n{ element: chempot } for all elements in the phase diagram.", "source": "codesearchnet"}
{"code": "def create_branch(profile, name, branch_off):\n    branch_off_sha = get_branch_sha(profile, branch_off)\n    ref = ('heads/' + name)\n    data = refs.create_ref(profile, ref, branch_off_sha)\n    return data", "docstring": "Create a branch.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nname\nThe name of the new branch.\n\nbranch_off\nThe name of a branch to create the new branch off of.\n\nReturns:\nA dict with data about the new branch.", "source": "codesearchnet"}
{"code": "def get_meshes_vec(step, var):\n    if step.geom.twod_xz:\n        (xmesh, ymesh) = (step.geom.x_mesh[(:, 0, :)], step.geom.z_mesh[(:, 0, :)])\n        vec1 = step.fields[(var + '1')][(:, 0, :, 0)]\n        vec2 = step.fields[(var + '3')][(:, 0, :, 0)]\n    elif (step.geom.cartesian and step.geom.twod_yz):\n        (xmesh, ymesh) = (step.geom.y_mesh[(0, :, :)], step.geom.z_mesh[(0, :, :)])\n        vec1 = step.fields[(var + '2')][(0, :, :, 0)]\n        vec2 = step.fields[(var + '3')][(0, :, :, 0)]\n    else:\n        (xmesh, ymesh) = (step.geom.x_mesh[(0, :, :)], step.geom.y_mesh[(0, :, :)])\n        pmesh = step.geom.p_mesh[(0, :, :)]\n        vec_phi = step.fields[(var + '2')][(0, :, :, 0)]\n        vec_r = step.fields[(var + '3')][(0, :, :, 0)]\n        vec1 = ((vec_r * np.cos(pmesh)) - (vec_phi * np.sin(pmesh)))\n        vec2 = ((vec_phi * np.cos(pmesh)) + (vec_r * np.sin(pmesh)))\n    return (xmesh, ymesh, vec1, vec2)", "docstring": "Return vector field components along with coordinates meshes.\n\nOnly works properly in 2D geometry.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nvar (str): vector field name.\nReturns:\ntuple of :class:`numpy.array`: xmesh, ymesh, fldx, fldy\n2D arrays containing respectively the x position, y position, x\ncomponent and y component of the requested vector field.", "source": "codesearchnet"}
{"code": "def parse_args(arglist=None):\n    \n    climan = CLIManager(conf, **SUB_CMDS)\n\n    create_complete_files(climan, CONFIG_DIR, 'stagpy', 'stagpy-git',\n                          zsh_sourceable=True)\n\n    cmd_args, all_subs = climan.parse_args(arglist)\n    sub_cmd = cmd_args.loam_sub_name\n\n    if sub_cmd is None:\n        return cmd_args.func\n\n    if sub_cmd != 'config':\n        commands.report_parsing_problems(PARSING_OUT)\n\n    if conf.common.set:\n        set_conf_str(conf, conf.common.set)\n\n    if conf.common.config:\n        commands.config_pp(all_subs)\n\n    load_mplstyle()\n\n    try:\n        _steps_to_slices()\n    except AttributeError:\n        pass\n    return cmd_args.func", "docstring": "Parse cmd line arguments.\n\nUpdate :attr:`stagpy.conf` accordingly.\n\nArgs:\narglist (list of str): the list of cmd line arguments. If set to\nNone, the arguments are taken from :attr:`sys.argv`.\n\nReturns:\nfunction: the function implementing the sub command to be executed.", "source": "juraj-google-style"}
{"code": "def query_properties_with_values(self, query, include_defaults=True):\n        \n        themed_keys = set()\n        result = dict()\n        if include_defaults:\n            keys = self.properties()\n        else:\n            \n            \n            \n            \n            keys = set(self._property_values.keys()) | set(self._unstable_default_values.keys())\n            if self.themed_values():\n                themed_keys = set(self.themed_values().keys())\n                keys |= themed_keys\n\n        for key in keys:\n            descriptor = self.lookup(key)\n            if not query(descriptor):\n                continue\n\n            value = descriptor.serializable_value(self)\n            if not include_defaults and key not in themed_keys:\n                if isinstance(value, PropertyValueContainer) and key in self._unstable_default_values:\n                    continue\n            result[key] = value\n\n        return result", "docstring": "Query the properties values of |HasProps| instances with a\npredicate.\n\nArgs:\nquery (callable) :\nA callable that accepts property descriptors and returns True\nor False\n\ninclude_defaults (bool, optional) :\nWhether to include properties that have not been explicitly\nset by a user (default: True)\n\nReturns:\ndict : mapping of property names and values for matching properties", "source": "juraj-google-style"}
{"code": "def add(self, text, checked=False, sort=None):\n    node = ListItem(parent_id=self.id, parent_server_id=self.server_id)\n    node.checked = checked\n    node.text = text\n    if (sort is not None):\n        node.sort = sort\n    self.append(node, True)\n    self.touch(True)\n    return node", "docstring": "Add a new item to the list.\n\nArgs:\ntext (str): The text.\nchecked (bool): Whether this item is checked.\nsort (int): Item id for sorting.", "source": "codesearchnet"}
{"code": "def _get_full_name(self):\n    full_name_parts = [self._get_class(), self._get_name()]\n    return '", "docstring": "Gets the qualified name of the test method corresponding to the\ninstrumentation block.\n\nReturns:\nA string containing the fully qualified name of the\ninstrumentation test method. If parts are missing, then degrades\nsteadily.", "source": "github-repos"}
{"code": "def parse_metadata(lines):\n    \n    meta = defaultdict(list)\n    for line in lines:\n        line = line.rstrip()\n        if line.startswith(\"!\"):\n            if \"_table_begin\" in line or \"_table_end\" in line:\n                continue\n            key, value = __parse_entry(line)\n            meta[key].append(value)\n\n    return dict(meta)", "docstring": "Parse list of lines with metadata information from SOFT file.\n\nArgs:\nlines (:obj:`Iterable`): Iterator over the lines.\n\nReturns:\n:obj:`dict`: Metadata from SOFT file.", "source": "juraj-google-style"}
{"code": "def run(self, dag):\n    for node in dag.op_nodes(self.gate):\n        if (not node.op.definition):\n            continue\n        rule = node.op.definition\n        decomposition = DAGCircuit()\n        decomposition.add_qreg(rule[0][1][0][0])\n        if rule[0][2]:\n            decomposition.add_creg(rule[0][2][0][0])\n        for inst in rule:\n            decomposition.apply_operation_back(*inst)\n        dag.substitute_node_with_dag(node, decomposition)\n    return dag", "docstring": "Expand a given gate into its decomposition.\n\nArgs:\ndag(DAGCircuit): input dag\nReturns:\nDAGCircuit: output dag where gate was expanded.", "source": "codesearchnet"}
{"code": "def get_tensor_num_entries(self, tensor_name, partial_layout=None,\n                             mesh_dimension_to_size=None):\n    \n    shape = self.get_tensor_shape(tensor_name)\n    \n    \n    num_entries = 1\n    for dim in shape.dims:\n      num_entries = num_entries * dim.value\n\n    if not partial_layout:\n      return num_entries\n\n    for mtf_dimension_name in self.get_tensor_mtf_dimension_names(tensor_name):\n      if mtf_dimension_name not in partial_layout:\n        continue\n      mesh_dimension_name = partial_layout[mtf_dimension_name]\n      mesh_dimension_size = mesh_dimension_to_size[mesh_dimension_name]\n      num_entries = int(math.ceil(num_entries / mesh_dimension_size))\n\n    return num_entries", "docstring": "The number of entries in a tensor.\n\nIf partial_layout is specified, then mesh_dimension_to_size must also be. In\nthis case, the number of entries on a single device is returned.\n\nArgs:\ntensor_name: a string, name of a tensor in the graph.\npartial_layout: an optional {string: string}, from MTF dimension name to\nmesh dimension name.\nmesh_dimension_to_size: an optional {string: int}, from mesh dimension\nname to size.\n\nReturns:\nan integer", "source": "juraj-google-style"}
{"code": "def _assert_float_dtype(dtype):\n    if not dtype.is_floating:\n        raise ValueError(f'Argument `dtype` is expected to be floating point. Received: {dtype}.')\n    return dtype", "docstring": "Validate and return floating point type based on `dtype`.\n\n`dtype` must be a floating point type.\n\nArgs:\ndtype: The data type to validate.\n\nReturns:\nValidated type.\n\nRaises:\nValueError: if `dtype` is not a floating point type.", "source": "github-repos"}
{"code": "def get_data_location(self, catalog_id):\n        \n\n        try:\n            record = self.get(catalog_id)\n        except:\n            return None\n\n        \n        if 'Landsat8' in record['type'] and 'LandsatAcquisition' in record['type']:\n            bucket = record['properties']['bucketName']\n            prefix = record['properties']['bucketPrefix']\n            return 's3:\n\n        \n        if 'DigitalGlobeAcquisition' in record['type']:\n            o = Ordering()\n            res = o.location([catalog_id])\n            return res['acquisitions'][0]['location']\n\n        return None", "docstring": "Find and return the S3 data location given a catalog_id.\n\nArgs:\ncatalog_id: The catalog ID\n\nReturns:\nA string containing the s3 location of the data associated with a catalog ID.  Returns\nNone if the catalog ID is not found, or if there is no data yet associated with it.", "source": "juraj-google-style"}
{"code": "def group_systems(self, group_name, systems):\n    api_group_id = None\n    headers = {'Content-Type': 'application/json'}\n    group_path = (self.api_url + '/v1/groups')\n    group_get_path = (group_path + ('?display_name=%s' % quote(group_name)))\n    logger.debug('GET group: %s', group_get_path)\n    net_logger.info('GET %s', group_get_path)\n    get_group = self.session.get(group_get_path)\n    logger.debug('GET group status: %s', get_group.status_code)\n    if (get_group.status_code == 200):\n        api_group_id = get_group.json()['id']\n    if (get_group.status_code == 404):\n        logger.debug('POST group')\n        data = json.dumps({'display_name': group_name})\n        net_logger.info('POST', group_path)\n        post_group = self.session.post(group_path, headers=headers, data=data)\n        logger.debug('POST group status: %s', post_group.status_code)\n        logger.debug('POST Group: %s', post_group.json())\n        self.handle_fail_rcs(post_group)\n        api_group_id = post_group.json()['id']\n    logger.debug('PUT group')\n    data = json.dumps(systems)\n    net_logger.info('PUT %s', (group_path + ('/%s/systems' % api_group_id)))\n    put_group = self.session.put((group_path + ('/%s/systems' % api_group_id)), headers=headers, data=data)\n    logger.debug('PUT group status: %d', put_group.status_code)\n    logger.debug('PUT Group: %s', put_group.json())", "docstring": "Adds an array of systems to specified group\n\nArgs:\ngroup_name: Display name of group\nsystems: Array of {'machine_id': machine_id}", "source": "codesearchnet"}
{"code": "def _pull_response(self, namespace, req_type, **params):\n    self._validate_namespace(namespace)\n    context_id = params['EnumerationContext']\n    try:\n        context_data = self.enumeration_contexts[context_id]\n    except KeyError:\n        raise CIMError(CIM_ERR_INVALID_ENUMERATION_CONTEXT, _format('EnumerationContext {0!A} not found in mock server enumeration contexts.', context_id))\n    if (context_data['pull_type'] != req_type):\n        raise CIMError(CIM_ERR_INVALID_ENUMERATION_CONTEXT, _format('Invalid pull operations {0!A} does not match expected {1!A} for EnumerationContext {2!A}', context_data['pull_type'], req_type, context_id))\n    objs_list = context_data['data']\n    max_obj_cnt = params['MaxObjectCount']\n    if (not max_obj_cnt):\n        max_obj_cnt = _DEFAULT_MAX_OBJECT_COUNT\n    if (len(objs_list) <= max_obj_cnt):\n        eos = u'TRUE'\n        rtn_objs_list = objs_list\n        del self.enumeration_contexts[context_id]\n        context_id = ''\n    else:\n        eos = u'FALSE'\n        rtn_objs_list = objs_list[0:max_obj_cnt]\n        del objs_list[0:max_obj_cnt]\n    return self._make_pull_imethod_resp(rtn_objs_list, eos, context_id)", "docstring": "Common method for all of the Pull methods. Since all of the pull\nmethods operate independent of the type of data, this single function\nsevers as common code\n\nThis method validates the namespace, gets data on the enumeration\nsequence from the enumeration_contexts table, validates the pull\ntype, and returns the required number of objects.\n\nThis method assumes the same context_id throughout the sequence.\n\nRaises:\n\nCIMError: CIM_ERR_INVALID_ENUMERATION_CONTEXT", "source": "codesearchnet"}
{"code": "def find_duplicate_items(items, k=2):\n    r\n    import utool as ut\n    \n    duplicate_map = ut.ddict(list)\n    for count, item in enumerate(items):\n        duplicate_map[item].append(count)\n    \n    singleton_keys = []\n    for key in six.iterkeys(duplicate_map):\n        if len(duplicate_map[key]) == 1:\n            singleton_keys.append(key)\n    for key in singleton_keys:\n        del duplicate_map[key]\n    duplicate_map = dict(duplicate_map)\n    return duplicate_map", "docstring": "r\"\"\"\nArgs:\nitems (list):\n\nReturns:\ndict: duplicate_map of indexes\n\nCommandLine:\npython -m utool.util_list --test-find_duplicate_items\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_list import *  # NOQA\n>>> items = [0, 1, 2, 3, 3, 0, 12, 2, 9]\n>>> duplicate_map = find_duplicate_items(items)\n>>> result = str(duplicate_map)\n>>> print(result)", "source": "juraj-google-style"}
{"code": "def destroy_connection(self, connection):\n    log.debug('Destroying connection at <{0}>'.format(hex(id(connection))))\n    self._decontextualise_connection(connection)\n    connection.unbind()", "docstring": "Destroys a connection. Removes the connection from the appcontext, and\nunbinds it.\n\nArgs:\nconnection (ldap3.Connection):  The connnection to destroy", "source": "codesearchnet"}
{"code": "def Send(self, message):\n    if (not isinstance(message, common_pb2.Message)):\n        raise ValueError('Send requires a fleetspeak.Message')\n    if (message.destination.service_name == 'system'):\n        raise ValueError('Only predefined messages can have destination.service_name == \"system\"')\n    return self._SendImpl(message)", "docstring": "Send a message through Fleetspeak.\n\nArgs:\nmessage: A message protocol buffer.\nReturns:\nSize of the message in bytes.\nRaises:\nValueError: If message is not a common_pb2.Message.", "source": "codesearchnet"}
{"code": "def print_network_spec(mlmodel_spec, interface_only=False):\n    \n    inputs, outputs, layers_info = summarize_neural_network_spec(mlmodel_spec)\n\n    print('Inputs:')\n    for i in inputs:\n        name, description = i\n        print('  {} {}'.format(name, description))\n\n    print('Outputs:')\n    for o in outputs:\n        name, description = o\n        print('  {} {}'.format(name, description))\n\n    if layers_info is None:\n        print('\\n(This MLModel is not a neural network model or does not contain any layers)')\n\n    if layers_info and not interface_only:\n        print('\\nLayers:')\n        for idx, l in enumerate(layers_info):\n            layer_type, name, in_blobs, out_blobs, params_info = l\n            print('[{}] ({}) {}'.format(idx, layer_type, name))\n            print('  Input blobs: {}'.format(in_blobs))\n            print('  Output blobs: {}'.format(out_blobs))\n            if len(params_info) > 0:\n                print('  Parameters: ')\n            for param in params_info:\n                print('    {} = {}'.format(param[0], param[1]))\n\n    print('\\n')", "docstring": "Print the network information summary.\nArgs:\nmlmodel_spec : the mlmodel spec\ninterface_only : Shows only the input and output of the network", "source": "juraj-google-style"}
{"code": "def orient_graph(self, df_data, graph, nb_runs=6, printout=None, **kwargs):\n    if (type(graph) == nx.DiGraph):\n        edges = [a for a in list(graph.edges()) if ((a[1], a[0]) in list(graph.edges()))]\n        oriented_edges = [a for a in list(graph.edges()) if ((a[1], a[0]) not in list(graph.edges()))]\n        for a in edges:\n            if ((a[1], a[0]) in list(graph.edges())):\n                edges.remove(a)\n        output = nx.DiGraph()\n        for i in oriented_edges:\n            output.add_edge(*i)\n    elif (type(graph) == nx.Graph):\n        edges = list(graph.edges())\n        output = nx.DiGraph()\n    else:\n        raise TypeError('Data type not understood.')\n    res = []\n    for (idx, (a, b)) in enumerate(edges):\n        weight = self.predict_proba(df_data[a].values.reshape(((- 1), 1)), df_data[b].values.reshape(((- 1), 1)), idx=idx, nb_runs=nb_runs, **kwargs)\n        if (weight > 0):\n            output.add_edge(a, b, weight=weight)\n        else:\n            output.add_edge(b, a, weight=abs(weight))\n        if (printout is not None):\n            res.append([((str(a) + '-') + str(b)), weight])\n            DataFrame(res, columns=['SampleID', 'Predictions']).to_csv(printout, index=False)\n    for node in list(df_data.columns.values):\n        if (node not in output.nodes()):\n            output.add_node(node)\n    return output", "docstring": "Orient an undirected graph using the pairwise method defined by the subclass.\n\nThe pairwise method is ran on every undirected edge.\n\nArgs:\ndf_data (pandas.DataFrame): Data\numg (networkx.Graph): Graph to orient\nnb_runs (int): number of times to rerun for each pair (bootstrap)\nprintout (str): (optional) Path to file where to save temporary results\n\nReturns:\nnetworkx.DiGraph: a directed graph, which might contain cycles\n\n.. warning:\nRequirement : Name of the nodes in the graph correspond to name of\nthe variables in df_data", "source": "codesearchnet"}
{"code": "def from_args(cls: Type[ConfigT], args: Namespace) -> ConfigT:\n        \n        parsed_args = cls.parse_args(args)\n        return cls(args, host=args.host, port=args.port, debug=args.debug,\n                   reject_insecure_auth=not args.insecure_login,\n                   cert_file=args.cert, key_file=args.key,\n                   **parsed_args)", "docstring": "Build and return a new :class:`IMAPConfig` using command-line\narguments.\n\nArgs:\nargs: The arguments parsed from the command-line.", "source": "juraj-google-style"}
{"code": "def sampling_query(sql, context, fields=None, count=5, sampling=None, udfs=None, data_sources=None):\n    return Query(_sampling.Sampling.sampling_query(sql, fields, count, sampling), context=context, udfs=udfs, data_sources=data_sources)", "docstring": "Returns a sampling Query for the SQL object.\n\nArgs:\nsql: the SQL statement (string) or Query object to sample.\ncontext: a Context object providing project_id and credentials.\nfields: an optional list of field names to retrieve.\ncount: an optional count of rows to retrieve which is used if a specific\nsampling is not specified.\nsampling: an optional sampling strategy to apply to the table.\nudfs: array of UDFs referenced in the SQL.\ndata_sources: dictionary of federated (external) tables referenced in the SQL.\nReturns:\nA Query object for sampling the table.", "source": "codesearchnet"}
{"code": "def filter_by_conditional_statement(self, statement):\n        \n        _filt_values, _filt_datetimes = self._filter_by_statement(statement)\n        if self._enumeration is None:\n            self._get_mutable_enumeration()\n        col_obj = self._enumeration['mutable'][self._collection_type]\n        collection = col_obj(self.header.duplicate(), _filt_values, _filt_datetimes)\n        collection._validated_a_period = self._validated_a_period\n        return collection", "docstring": "Filter the Data Collection based on a conditional statement.\n\nArgs:\nstatement: A conditional statement as a string (e.g. a > 25 and a%5 == 0).\nThe variable should always be named as 'a' (without quotations).\n\nReturn:\nA new Data Collection containing only the filtered data", "source": "juraj-google-style"}
{"code": "class API:\n\n    def __init__(self, config, api):\n        self.config = config\n        self.api = api['api']\n        self.version = api['version']\n        self.auth = api['auth']\n        self.uri = api.get('uri')\n        self.key = api.get('key')\n        self.labels = api.get('labels')\n        self.function_stack = list(filter(None, api.get('function', '').split('.')))\n        self.function_kwargs = API.__clean__(api.get('kwargs', {}))\n        self.iterate = api.get('iterate', False)\n        self.limit = api.get('limit')\n        self.headers = api.get('headers', {})\n        self.function = None\n        self.job = None\n        self.response = None\n\n    def __str__(self):\n        return '%s.%s.%s' % (self.api, self.version, '.'.join(self.function_stack))\n\n    def __getattr__(self, function_name):\n        self.function_stack.append(function_name)\n\n        def function_call(**kwargs):\n            self.function_kwargs = API.__clean__(kwargs)\n            return self\n        return function_call\n\n    @staticmethod\n    def __clean__(struct: Union[dict, list]) -> Union[dict, list]:\n        \n        if isinstance(struct, dict):\n            for key, value in struct.items():\n                if isinstance(value, bytes):\n                    struct[key] = base64.standard_b64encode(value).decode('ascii')\n                elif isinstance(value, date):\n                    struct[key] = str(value)\n                else:\n                    API.__clean__(value)\n        elif isinstance(struct, list):\n            for index, value in enumerate(struct):\n                if isinstance(value, bytes):\n                    struct[index] = base64.standard_b64encode(value).decode('ascii')\n                elif isinstance(value, date):\n                    struct[index] = str(value)\n                else:\n                    API.__clean__(value)\n        return struct\n\n    def call(self, function_chain):\n        for function_name in function_chain.split('.'):\n            self.function_stack.append(function_name)\n        return self\n\n    def execute(self, run=True, iterate=False, limit=None):\n        self.function = get_service(config=self.config, api=self.api, version=self.version, auth=self.auth, headers=self.headers, key=self.key, labels=self.labels, uri_file=self.uri)\n        for f_n in self.function_stack:\n            self.function = getattr(self.function if isinstance(self.function, Resource) else self.function(), f_n)\n        self.job = self.function(**self.function_kwargs)\n        if run:\n            self.response = API_Retry(self.job)\n            if iterate or self.iterate:\n                return API_Iterator(self.function, self.function_kwargs, self.response, limit or self.limit)\n            else:\n                return self.response\n        else:\n            return self.job\n\n    def upload(self, retries=5, wait=61):\n        job = self.execute(run=False)\n        response = None\n        while response is None:\n            error = None\n            try:\n                print('Uploading file...')\n                status, response = job.next_chunk()\n                if 'id' in response:\n                    print(\"Object id '%s' was successfully uploaded.\" % response['id'])\n                else:\n                    exit('The upload failed with an unexpected response: %s' % response)\n            except HttpError as e:\n                if retries > 0 and e.resp.status in RETRIABLE_STATUS_CODES:\n                    error = 'A retriable HTTP error %d occurred:\\n%s' % (e.resp.status, e.content.decode())\n                else:\n                    raise\n            except RETRIABLE_EXCEPTIONS as e:\n                if retries > 0:\n                    error = 'A retriable error occurred: %s' % e\n                else:\n                    raise\n            if error is not None:\n                print(error)\n                retries -= 1\n                wait = wait * 2\n                print('Sleeping %d seconds and then retrying...' % wait)\n                time.sleep(wait)", "docstring": "A wrapper around Google API with built in helpers for StarThinker.\n\nThe wrapper mimics function calls, storing the m in a stack, until it\nencounters\nexecute().  Then it uses the stored stack and arguments to call the actual\nAPI.\nThis allows handlers on execute such as API_Retry and API_Iterator.\n\nSee module level description for wrapped changes to Google API.  The class\nis\ndesigned to be a connector to JSON, hence the configuraton is a JSON object.\n\napi = {\n\"api\":\"doubleclickbidmanager\",\n\"version\":\"v1.1\",\n\"auth\":\"user\",\n\"iterate\":False\n}\napi = API(config, api).placements().list(profile_id=1234,\narchived=False).execute()\n\nArgs:\nconfig: (json) see example above, configures all authentication parameters\napi: (json) see example above, configures all API parameters\n\nReturns:\nIf nextpageToken in result or iterate is True: return iterator of API\nresponse\nOtherwise: returns API response", "source": "github-repos"}
{"code": "def spliceext(filepath, s):\n    (root, ext) = os.path.splitext(safepath(filepath))\n    return ((root + s) + ext)", "docstring": "Add s into filepath before the extension\n\nArgs:\nfilepath (str, path): file path\ns (str): string to splice\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def __init__(self, json_data=None, **kwargs):\n        \n        if isinstance(json_data, OhPickle):\n            return\n        if isinstance(json_data, basestring):\n            json_data = json.loads(json_data)\n        if json_data is not None:\n            kwargs = type(self).json_to_initkwargs(json_data, kwargs)\n        super(JsonRecordList, self).__init__(**kwargs)", "docstring": "Build a new JsonRecord sub-class.\n\nArgs:\n``json_data=``\\ *LIST|other*\nJSON data (string or already ``json.loads``'d)\n\n``**kwargs``\nOther initializer attributes, for lists with extra\nattributes (eg, paging information)", "source": "juraj-google-style"}
{"code": "def __init__(self, url_formatter, mapsources):\n        \n        super().__init__(url_formatter)\n        self.map_folders = {\n            root: {\n                \"folders\": folders,\n                \"maps\": maps\n            } for root, folders, maps in walk_mapsources(mapsources)\n        }\n        self.add_maps(parent=self.kml_doc)", "docstring": "Create a KML master document.\n\nArgs:\nmapsources (list of MapSource):", "source": "juraj-google-style"}
{"code": "def _from_to_as_term(self, frm, to):\n        \n\n        \n        \n        \n        from_year = ''\n        to_year = ''\n\n        def year_or_empty(prefix, year, suffix):\n            try:\n                return prefix + str(int(year)) + suffix\n            except (ValueError, TypeError):\n                return ''\n\n        if frm:\n            from_year = year_or_empty('', frm, ' ')\n\n        if to:\n            to_year = year_or_empty(' ', to, '')\n\n        if bool(from_year) or bool(to_year):\n            return '[{}TO{}]'.format(from_year, to_year)\n        else:\n            return None", "docstring": "Turns from and to into the query format.\n\nArgs:\nfrm (str): from year\nto (str): to year\n\nReturns:\nFTS query str with years range.", "source": "juraj-google-style"}
{"code": "def _generate_splits(self, m, r):\n        \n        new_rects = []\n        \n        if r.left > m.left:\n            new_rects.append(Rectangle(m.left, m.bottom, r.left-m.left, m.height))\n        if r.right < m.right:\n            new_rects.append(Rectangle(r.right, m.bottom, m.right-r.right, m.height))\n        if r.top < m.top:\n            new_rects.append(Rectangle(m.left, r.top, m.width, m.top-r.top))\n        if r.bottom > m.bottom:\n            new_rects.append(Rectangle(m.left, m.bottom, m.width, r.bottom-m.bottom))\n        \n        return new_rects", "docstring": "When a rectangle is placed inside a maximal rectangle, it stops being one\nand up to 4 new maximal rectangles may appear depending on the placement.\n_generate_splits calculates them.\n\nArguments:\nm (Rectangle): max_rect rectangle\nr (Rectangle): rectangle placed\n\nReturns:\nlist : list containing new maximal rectangles or an empty list", "source": "juraj-google-style"}
{"code": "def enable_preset_args(include_all_preset_kwargs: bool=False, preset_name: str='global') -> Callable[[types.FunctionType], types.FunctionType]:\n\n    def decorator(func):\n        sig = inspect.signature(func)\n        positional_arg_names = [p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD]\n        arg_defaults = {}\n        has_preset_value = False\n        has_varkw = False\n        for p in sig.parameters.values():\n            if p.kind == inspect.Parameter.VAR_KEYWORD:\n                has_varkw = True\n                continue\n            if p.kind == inspect.Parameter.VAR_POSITIONAL:\n                continue\n            if p.default == inspect.Parameter.empty:\n                continue\n            if isinstance(p.default, PresetArgValue):\n                has_preset_value = True\n            arg_defaults[p.name] = p.default\n        if has_preset_value:\n\n            @functools.wraps(func)\n            def _func(*args, **kwargs):\n                presets = utils.thread_local_peek(_TLS_KEY_PRESET_KWARGS, None)\n                preset_kwargs = presets.get_preset(preset_name) if presets else {}\n                args, kwargs = PresetArgValue.resolve_args(args, kwargs, positional_arg_names, arg_defaults, preset_kwargs, include_all_preset_kwargs=include_all_preset_kwargs and has_varkw)\n                return func(*args, **kwargs)\n            return _func\n        return func\n    return decorator", "docstring": "Decorator for functions that maybe use preset argument values.\n\nUsage::\n\n@pg.typing.enable_preset_args\ndef foo(x, y=pg.typing.PresetArgValue(default=1)):\nreturn x + y\n\nwith pg.typing.preset_args(y=2):\nprint(foo(x=1))  # 3: y=2\nprint(foo(x=1))  # 2: y=1\n\nArgs:\ninclude_all_preset_kwargs: Whether to include all preset kwargs (even\nnot makred as `PresetArgValue`) when callng the function.\npreset_name: The name of the preset to specify kwargs.\n\nReturns:\nA decorated function that could consume the preset argument values.", "source": "github-repos"}
{"code": "def _ParseIdentifierMappingsTable(self, parser_mediator, esedb_table):\n    identifier_mappings = {}\n    for esedb_record in esedb_table.records:\n        if parser_mediator.abort:\n            break\n        (identifier, mapped_value) = self._ParseIdentifierMappingRecord(parser_mediator, esedb_table.name, esedb_record)\n        if ((identifier is None) or (mapped_value is None)):\n            continue\n        if (identifier in identifier_mappings):\n            parser_mediator.ProduceExtractionWarning('identifier: {0:d} already exists in mappings.'.format(identifier))\n            continue\n        identifier_mappings[identifier] = mapped_value\n    return identifier_mappings", "docstring": "Extracts identifier mappings from the SruDbIdMapTable table.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nesedb_table (pyesedb.table): table.\n\nReturns:\ndict[int, str]: mapping of numeric identifiers to their string\nrepresentation.", "source": "codesearchnet"}
{"code": "def plot_seebeck_temp(self, doping='all', output='average'):\n    import matplotlib.pyplot as plt\n    if (output == 'average'):\n        sbk = self._bz.get_seebeck(output='average')\n    elif (output == 'eigs'):\n        sbk = self._bz.get_seebeck(output='eigs')\n    plt.figure(figsize=(22, 14))\n    tlist = sorted(sbk['n'].keys())\n    doping = (self._bz.doping['n'] if (doping == 'all') else doping)\n    for (i, dt) in enumerate(['n', 'p']):\n        plt.subplot((121 + i))\n        for dop in doping:\n            d = self._bz.doping[dt].index(dop)\n            sbk_temp = []\n            for temp in tlist:\n                sbk_temp.append(sbk[dt][temp][d])\n            if (output == 'average'):\n                plt.plot(tlist, sbk_temp, marker='s', label=(str(dop) + ' $cm^{-3}$'))\n            elif (output == 'eigs'):\n                for xyz in range(3):\n                    plt.plot(tlist, zip(*sbk_temp)[xyz], marker='s', label=(((str(xyz) + ' ') + str(dop)) + ' $cm^{-3}$'))\n        plt.title((dt + '-type'), fontsize=20)\n        if (i == 0):\n            plt.ylabel('Seebeck \\n coefficient  ($\\\\mu$V/K)', fontsize=30.0)\n        plt.xlabel('Temperature (K)', fontsize=30.0)\n        p = ('lower right' if (i == 0) else '')\n        plt.legend(loc=p, fontsize=15)\n        plt.grid()\n        plt.xticks(fontsize=25)\n        plt.yticks(fontsize=25)\n    plt.tight_layout()\n    return plt", "docstring": "Plot the Seebeck coefficient in function of temperature for different\ndoping levels.\n\nArgs:\ndopings: the default 'all' plots all the doping levels in the analyzer.\nSpecify a list of doping levels if you want to plot only some.\noutput: with 'average' you get an average of the three directions\nwith 'eigs' you get all the three directions.\nReturns:\na matplotlib object", "source": "codesearchnet"}
{"code": "def feature_path(self, gff_path):\n        \n        if not gff_path:\n            self.feature_dir = None\n            self.feature_file = None\n\n        else:\n            if not op.exists(gff_path):\n                raise OSError('{}: file does not exist!'.format(gff_path))\n\n            if not op.dirname(gff_path):\n                self.feature_dir = '.'\n            else:\n                self.feature_dir = op.dirname(gff_path)\n            self.feature_file = op.basename(gff_path)", "docstring": "Load a GFF file with information on a single sequence and store features in the ``features`` attribute\n\nArgs:\ngff_path: Path to GFF file.", "source": "juraj-google-style"}
{"code": "def __init__(self, resolver_context):\n    \n    super(TSKPartitionFile, self).__init__(resolver_context)\n    self._file_system = None", "docstring": "Initializes a file-like object.\n\nArgs:\nresolver_context (Context): resolver context.", "source": "juraj-google-style"}
{"code": "def run(self, input_dir, output_dir, epsilon):\n    \n    print('Running attack ', self.name)\n    cmd = [self.docker_binary(), 'run',\n           '-v', '{0}:/input_images'.format(input_dir),\n           '-v', '{0}:/output_images'.format(output_dir),\n           '-v', '{0}:/code'.format(self.directory),\n           '-w', '/code',\n           self.container,\n           './' + self.entry_point,\n           '/input_images',\n           '/output_images',\n           str(epsilon)]\n    print(' '.join(cmd))\n    subprocess.call(cmd)", "docstring": "Runs attack inside Docker.\n\nArgs:\ninput_dir: directory with input (dataset).\noutput_dir: directory where output (adversarial images) should be written.\nepsilon: maximum allowed size of adversarial perturbation,\nshould be in range [0, 255].", "source": "juraj-google-style"}
{"code": "def __init__(self, inputs, mesh=None, name=None):\n    \n    if mesh is None:\n      if not inputs:\n        raise ValueError(\"mesh must be specified if no inputs\")\n      mesh = inputs[0].mesh\n    self._inputs = inputs\n    self._outputs = []\n    self._mesh = mesh\n    \n    self._splittable_dims, self._unsplittable_dims = (\n        self._initialize_all_dimensions_as_splittable())\n    assert name is not None\n    self._name = mesh.graph.unique_name(name)\n    mesh.graph.operations.append(self)", "docstring": "Initializer.\n\nArgs:\ninputs: a list of Tensor\nmesh: an optional Mesh (if unspecified, will be inferred from first input)\nname: a string, which will get uniquified (in TensorFlow style)\n\nRaises:\nValueError: mesh was not provided and there were no inputs to infer from.", "source": "juraj-google-style"}
{"code": "def frame_counts(self,subsets=None):\n        \n        mergeon = self.cdf.frame_columns+['region_label']\n        if subsets is None:\n            cnts = self.groupby(mergeon+['phenotype_label']).count()[['cell_index']].\\\n                rename(columns={'cell_index':'count'})\n            mr = self.measured_regions\n            mr['_key'] =  1\n            mp = pd.DataFrame({'phenotype_label':self.measured_phenotypes})\n            mp['_key'] = 1\n            mr = mr.merge(mp,on='_key').drop(columns='_key')\n            cnts = mr.merge(cnts,on=mergeon+['phenotype_label'],how='left').fillna(0)\n        else:\n             \n            if isinstance(subsets,SL): subsets=[subsets]\n            cnts = []\n            labels = set([s.label for s in subsets])\n            for x in subsets: \n                if x.label is None: raise ValueError(\"Subsets must be named\")\n            if len(labels) != len(subsets): raise ValueError(\"Subsets must be uniquely named.\")\n            seen_labels = []\n            for sl in subsets:\n                if sl.label in seen_labels: raise ValueError(\"cannot use the same label twice in the subsets list\")\n                seen_labels.append(sl.label)\n\n                df = self.cdf.subset(sl)\n                df = df.groupby(mergeon).count()[['cell_index']].\\\n                    rename(columns={'cell_index':'count'}).reset_index()\n                df = self.measured_regions.merge(df,on=mergeon,how='left').fillna(0)\n                df['phenotype_label'] = sl.label\n                cnts.append(df)\n            cnts = pd.concat(cnts)\n        cnts = cnts[mergeon+['region_area_pixels','phenotype_label','count']]\n        cnts['region_area_mm2'] = cnts.apply(lambda x: \n            (x['region_area_pixels']/1000000)*(self.microns_per_pixel*self.microns_per_pixel),1)\n        cnts['density_mm2'] = cnts.apply(lambda x: np.nan if x['region_area_mm2'] == 0 else x['count']/x['region_area_mm2'],1)\n        \n        cnts.loc[cnts['region_area_pixels']<self.minimum_region_size_pixels,['count','density_mm2']] = np.nan\n        return cnts", "docstring": "Frame counts is the core of all the counting operations.  It counts on a per-frame/per-region basis.\n\nArgs:\nsubsets (list): a list of Subset Objects.  if not specified, the phenotypes are used.\n\nReturns:\npandas.DataFrame: A dataframe of count data", "source": "juraj-google-style"}
{"code": "def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error):\n  \n  line = clean_lines.elided[linenum]\n\n  \n  \n  match = Match(r'^(.*)\\[\\s*(?:=|&[^\\w])', line)\n  if match:\n    \n    \n    \n    line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1)))\n    if pos >= 0 and Match(r'^\\s*[{(]', line[pos:]):\n      error(filename, linenum, 'build/c++11',\n            4,  \n            'Default lambda captures are an unapproved C++ feature.')", "docstring": "Check that default lambda captures are not used.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def find_all(self, model_class, params={}):\n    url = '{host}/{namespace}/{model}{params}'.format(host=self._host, namespace=self._namespace, model=self._translate_name(model_class.__name__), params=self._build_param_string(params))\n    data = self._get_json(url)['data']\n    fresh_models = []\n    for item in data:\n        fresh_model = model_class(item['attributes'])\n        fresh_model.id = item['id']\n        fresh_model.validate()\n        fresh_models.append(fresh_model)\n        if (self._cache is not None):\n            self._cache.set_record(model_class.__name__, fresh_model.id, fresh_model)\n    return fresh_models", "docstring": "Return an list of models from the API and caches the result.\n\nArgs:\nmodel_class (:class:`cinder_data.model.CinderModel`): A subclass of\n:class:`cinder_data.model.CinderModel` of your chosen model.\nparams (dict, optional): Description\n\nReturns:\nlist: A list of instances of you model_class or and empty list.", "source": "codesearchnet"}
{"code": "def range_index_map(batch_shape, num_segments, name='range_index_map'):\n    device = num_segments.device if torch.is_tensor(num_segments) else 'cpu'\n    batch_shape = torch.as_tensor(batch_shape, dtype=torch.long, device=device)\n    assert len(batch_shape.size()) == 1\n    num_segments = torch.as_tensor(num_segments, device=device)\n    assert len(num_segments.size()) == 0\n    indices = torch.arange(start=0, end=num_segments, device=num_segments.device)\n    new_tensor = torch.cat([torch.ones_like(batch_shape, dtype=torch.long, device=num_segments.device), num_segments.unsqueeze(dim=0)], dim=0)\n    new_shape = [int(x) for x in new_tensor.tolist()]\n    indices = indices.view(new_shape)\n    multiples = torch.cat([batch_shape, torch.as_tensor([1], device=device)], dim=0)\n    indices = indices.repeat(multiples.tolist())\n    return IndexMap(indices=indices, num_segments=num_segments, batch_dims=list(batch_shape.size())[0])", "docstring": "Constructs an index map equal to range(num_segments).\n\nArgs:\nbatch_shape (`torch.Size`):\nBatch shape\nnum_segments (`int`):\nNumber of segments\nname (`str`, *optional*, defaults to 'range_index_map'):\nName for the operation. Currently not used\n\nReturns:\n(`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments).", "source": "github-repos"}
{"code": "def calculate_hashes(self):\n    hashers = []\n    if (not self.mardata.signatures):\n        return []\n    for s in self.mardata.signatures.sigs:\n        h = make_hasher(s.algorithm_id)\n        hashers.append((s.algorithm_id, h))\n    for block in get_signature_data(self.fileobj, self.mardata.signatures.filesize):\n        [h.update(block) for (_, h) in hashers]\n    return [(algo_id, h.finalize()) for (algo_id, h) in hashers]", "docstring": "Return hashes of the contents of this MAR file.\n\nThe hashes depend on the algorithms defined in the MAR file's signature block.\n\nReturns:\nA list of (algorithm_id, hash) tuples", "source": "codesearchnet"}
{"code": "def _create_partition_config(option: t.Tuple, config: Config) -> Config:\n    copy = cp.deepcopy(config.selection)\n    out = cp.deepcopy(config)\n    for idx, key in enumerate(config.partition_keys):\n        copy[key] = [option[idx]]\n    if 'hdate' in copy:\n        copy['hdate'] = [generate_hdate(copy['date'][0], v) for v in copy['hdate']]\n    out.selection = copy\n    return out", "docstring": "Create a config for a single partition option.\n\nOutput a config dictionary, overriding the range of values for\neach key with the partition instance in 'selection'.\nContinuing the example from prepare_partitions, the selection section\nwould be:\n{ 'foo': ..., 'year': ['2020'], 'month': ['01'], ... }\n{ 'foo': ..., 'year': ['2020'], 'month': ['02'], ... }\n{ 'foo': ..., 'year': ['2020'], 'month': ['03'], ... }\n\nArgs:\noption: A single item in the range of partition_keys.\nconfig: The download config, including the parameters and selection sections.\n\nReturns:\nA configuration with that selects a single download partition.", "source": "github-repos"}
{"code": "def from_nested_row_lengths(cls, flat_values, nested_row_lengths, name=None, validate=True):\n    if not isinstance(validate, bool):\n        raise TypeError(f'Argument `validate` must have type bool. Received {validate}.')\n    if isinstance(nested_row_lengths, tensor_lib.Tensor):\n        raise TypeError(f'Argument `nested_row_lengths` must be a list of Tensors. Received {nested_row_lengths}.')\n    with ops.name_scope(name, 'RaggedFromNestedRowlengths', [flat_values] + list(nested_row_lengths)):\n        result = flat_values\n        for lengths in reversed(nested_row_lengths):\n            result = cls.from_row_lengths(result, lengths, validate=validate)\n        return result", "docstring": "Creates a `RaggedTensor` from a nested list of `row_lengths` tensors.\n\nEquivalent to:\n\n```python\nresult = flat_values\nfor row_lengths in reversed(nested_row_lengths):\nresult = from_row_lengths(result, row_lengths)\n```\n\nArgs:\nflat_values: A potentially ragged tensor.\nnested_row_lengths: A list of 1-D integer tensors.  The `i`th tensor is\nused as the `row_lengths` for the `i`th ragged dimension.\nname: A name prefix for the RaggedTensor (optional).\nvalidate: If true, then use assertions to check that the arguments form\na valid `RaggedTensor`.  Note: these assertions incur a runtime cost,\nsince they must be checked for each tensor value.\n\nReturns:\nA `RaggedTensor` (or `flat_values` if `nested_row_lengths` is empty).", "source": "github-repos"}
{"code": "def record(self, auth, resource, entries, options={}, defer=False):\n        \n        return self._call('record', auth, [resource, entries, options], defer)", "docstring": "Records a list of historical entries to the resource specified.\n\nNote: This API is depricated, use recordbatch instead.\n\nCalls a function that bulids a request that writes a list of historical entries to the\nspecified resource.\n\nArgs:\nauth: Takes the device cik\nresource: Takes the dataport alias or rid.\nentries: A list of entries to write to the resource.\noptions: Currently unused.", "source": "juraj-google-style"}
{"code": "def logs_urlpatterns(admin_view=lambda x: x):\n    \n    return [\n        url(r'^$',\n            admin_view(LogsMenu.as_view()),\n            name='logs'),\n        url(r'^status_codes$',\n            admin_view(LogsStatusCodes.as_view()),\n            name='logs_status_codes'),\n        url(r'^status_codes_by_date$',\n            admin_view(LogsStatusCodesByDate.as_view()),\n            name='logs_status_codes_by_date'),\n        url(r'^most_visited_pages$',\n            admin_view(LogsMostVisitedPages.as_view()),\n            name='logs_most_visited_pages')\n    ]", "docstring": "Return the URL patterns for the logs views.\n\nArgs:\nadmin_view (callable): admin_view method from an AdminSite instance.\n\nReturns:\nlist: the URL patterns for the logs views.", "source": "juraj-google-style"}
{"code": "def get_user_groups(name, sid=False):\n    if (name == 'SYSTEM'):\n        groups = [name]\n    else:\n        groups = win32net.NetUserGetLocalGroups(None, name)\n    if (not sid):\n        return groups\n    ret_groups = set()\n    for group in groups:\n        ret_groups.add(get_sid_from_name(group))\n    return ret_groups", "docstring": "Get the groups to which a user belongs\n\nArgs:\nname (str): The user name to query\nsid (bool): True will return a list of SIDs, False will return a list of\ngroup names\n\nReturns:\nlist: A list of group names or sids", "source": "codesearchnet"}
{"code": "def autocov(x):\n    acorr = autocorr(x)\n    varx = ((np.var(x, ddof=1) * (len(x) - 1)) / len(x))\n    acov = (acorr * varx)\n    return acov", "docstring": "Compute autocovariance estimates for every lag for the input array.\n\nArgs:\nx (array-like): An array containing MCMC samples.\n\nReturns:\nnp.ndarray: An array of the same size as the input array.", "source": "codesearchnet"}
{"code": "def lint(cls, document, is_saved, flags=''):\n    if (not is_saved):\n        return cls.last_diags[document.path]\n    path = document.path\n    if sys.platform.startswith('win'):\n        path = path.replace('\\\\', '/')\n    (out, _err) = py_run('{} -f json {}'.format(path, flags), return_std=True)\n    json_str = out.getvalue()\n    if (not json_str.strip()):\n        cls.last_diags[document.path] = []\n        return []\n    diagnostics = []\n    for diag in json.loads(json_str):\n        line = (diag['line'] - 1)\n        col = diag['column']\n        end_col = (len(document.lines[line]) if document.lines else 0)\n        err_range = {'start': {'line': line, 'character': col}, 'end': {'line': line, 'character': end_col}}\n        if (diag['type'] == 'convention'):\n            severity = lsp.DiagnosticSeverity.Information\n        elif (diag['type'] == 'error'):\n            severity = lsp.DiagnosticSeverity.Error\n        elif (diag['type'] == 'fatal'):\n            severity = lsp.DiagnosticSeverity.Error\n        elif (diag['type'] == 'refactor'):\n            severity = lsp.DiagnosticSeverity.Hint\n        elif (diag['type'] == 'warning'):\n            severity = lsp.DiagnosticSeverity.Warning\n        diagnostics.append({'source': 'pylint', 'range': err_range, 'message': '[{}] {}'.format(diag['symbol'], diag['message']), 'severity': severity, 'code': diag['message-id']})\n    cls.last_diags[document.path] = diagnostics\n    return diagnostics", "docstring": "Plugin interface to pyls linter.\n\nArgs:\ndocument: The document to be linted.\nis_saved: Whether or not the file has been saved to disk.\nflags: Additional flags to pass to pylint. Not exposed to\npyls_lint, but used for testing.\n\nReturns:\nA list of dicts with the following format:\n\n{\n'source': 'pylint',\n'range': {\n'start': {\n'line': start_line,\n'character': start_column,\n},\n'end': {\n'line': end_line,\n'character': end_column,\n},\n}\n'message': msg,\n'severity': lsp.DiagnosticSeverity.*,\n}", "source": "codesearchnet"}
{"code": "def validate_source_dir(script, directory):\n    \n    if directory:\n        if not os.path.isfile(os.path.join(directory, script)):\n            raise ValueError('No file named \"{}\" was found in directory \"{}\".'.format(script, directory))\n\n    return True", "docstring": "Validate that the source directory exists and it contains the user script\n\nArgs:\nscript (str):  Script filename.\ndirectory (str): Directory containing the source file.\n\nRaises:\nValueError: If ``directory`` does not exist, is not a directory, or does not contain ``script``.", "source": "juraj-google-style"}
{"code": "def __convertIp6PrefixStringToIp6Address(self, strIp6Prefix):\n        \n        prefix1 = strIp6Prefix.rstrip('L')\n        prefix2 = prefix1.lstrip(\"0x\")\n        hexPrefix = str(prefix2).ljust(16,'0')\n        hexIter = iter(hexPrefix)\n        finalMac = ':'.join(a + b + c + d for a,b,c,d in zip(hexIter, hexIter,hexIter,hexIter))\n        prefix = str(finalMac)\n        strIp6Prefix = prefix[:20]\n        return strIp6Prefix +':'", "docstring": "convert IPv6 prefix string to IPv6 dotted-quad format\nfor example:\n2001000000000000 -> 2001::\n\nArgs:\nstrIp6Prefix: IPv6 address string\n\nReturns:\nIPv6 address dotted-quad format", "source": "juraj-google-style"}
{"code": "def nb_fit(data, P_init=None, R_init=None, epsilon=1e-8, max_iters=100):\n    \n    means = data.mean(1)\n    variances = data.var(1)\n    if (means > variances).any():\n        raise ValueError(\"For NB fit, means must be less than variances\")\n    genes, cells = data.shape\n    \n    P = 1.0 - means/variances\n    R = means*(1-P)/P\n    for i in range(genes):\n        result = minimize(nb_ll_row, [P[i], R[i]], args=(data[i,:],),\n                bounds = [(0, 1), (eps, None)])\n        params = result.x\n        P[i] = params[0]\n        R[i] = params[1]\n        \n        \n    return P,R", "docstring": "Fits the NB distribution to data using method of moments.\n\nArgs:\ndata (array): genes x cells\nP_init (array, optional): NB success prob param - genes x 1\nR_init (array, optional): NB stopping param - genes x 1\n\nReturns:\nP, R - fit to data", "source": "juraj-google-style"}
{"code": "def yield_batch(iterable, batch_size, num_tensors=1):\n  \n  tensors = [[] for i in range(num_tensors)]\n  for item in iterable:\n    if item is None:\n      break\n    for i in range(num_tensors):\n      tmp = str(item[i]) if type(item[i]) is bytearray else item[i]\n      tensors[i].append(tmp)\n    if len(tensors[0]) >= batch_size:\n      yield tensors\n      tensors = [[] for i in range(num_tensors)]\n  if len(tensors[0]) > 0:\n      yield tensors", "docstring": "Generator that yields batches of a DataFrame iterator.\n\nArgs:\n:iterable: Spark partition iterator.\n:batch_size: number of items to retrieve per invocation.\n:num_tensors: number of tensors (columns) expected in each item.\n\nReturns:\nAn array of ``num_tensors`` arrays, each of length `batch_size`", "source": "juraj-google-style"}
{"code": "def count(self, axis=0, level=None, numeric_only=False):\n        \n        axis = self._get_axis_number(axis) if axis is not None else 0\n        return self._reduce_dimension(\n            self._query_compiler.count(\n                axis=axis, level=level, numeric_only=numeric_only\n            )\n        )", "docstring": "Get the count of non-null objects in the DataFrame.\n\nArguments:\naxis: 0 or 'index' for row-wise, 1 or 'columns' for column-wise.\nlevel: If the axis is a MultiIndex (hierarchical), count along a\nparticular level, collapsing into a DataFrame.\nnumeric_only: Include only float, int, boolean data\n\nReturns:\nThe count, in a Series (or DataFrame if level is specified).", "source": "juraj-google-style"}
{"code": "def get_metadata(self, key) -> str:\n    return (self.metadata[key] if (key in self.metadata) else None)", "docstring": "Get the value of a metadata. Returns None if metadata does not exist.\n\nArgs:\nkey (str): name of the metadata\n\nReturns:\nstr: the value of the metadata (or None)", "source": "codesearchnet"}
{"code": "def normalize_log_line_timestamp(log_line_timestamp):\n    return sanitize_filename(log_line_timestamp)", "docstring": "Replace special characters in log line timestamp with normal characters.\n\n.. deprecated:: 1.10\n\nThis method is obsolete with the more general `sanitize_filename` method\nand is only kept for backwards compatibility. In a future update, this\nmethod may be removed.\n\nArgs:\nlog_line_timestamp: A string in the log line timestamp format. Obtained\nwith get_log_line_timestamp.\n\nReturns:\nA string representing the same time as input timestamp, but without\nspecial characters.", "source": "github-repos"}
{"code": "def GetHashers(cls, hasher_names):\n    hashers = []\n    for (hasher_name, hasher_class) in iter(cls._hasher_classes.items()):\n        if (hasher_name in hasher_names):\n            hashers.append(hasher_class())\n    return hashers", "docstring": "Retrieves instances for all the specified hashers.\n\nArgs:\nhasher_names (list[str]): names of the hashers to retrieve.\n\nReturns:\nlist[BaseHasher]: hashers.", "source": "codesearchnet"}
{"code": "def operator(name=None, operators=None, aliases=None, kind=None):\n\n    def delegator(assertion, subject, expected, *args, **kw):\n        return assertion.test(subject, expected, *args, **kw)\n\n    def decorator(fn):\n        operator = Operator(fn=fn, aliases=aliases, kind=kind)\n        _name = (name if isinstance(name, six.string_types) else fn.__name__)\n        operator.operators = (_name,)\n        _operators = operators\n        if isinstance(_operators, list):\n            _operators = tuple(_operators)\n        if isinstance(_operators, tuple):\n            operator.operators += _operators\n        Engine.register(operator)\n        return functools.partial(delegator, operator)\n    return (decorator(name) if inspect.isfunction(name) else decorator)", "docstring": "Registers a new operator function in the test engine.\n\nArguments:\n*args: variadic arguments.\n**kw: variadic keyword arguments.\n\nReturns:\nfunction", "source": "codesearchnet"}
{"code": "def addAllowMAC(self, xEUI):\n        \n        print '%s call addAllowMAC' % self.port\n        print xEUI\n        if isinstance(xEUI, str):\n            macAddr = xEUI\n        else:\n            macAddr = self.__convertLongToString(xEUI)\n\n        try:\n            if self._addressfilterMode != 'whitelist':\n                if self.__setAddressfilterMode('Whitelist'):\n                    self._addressfilterMode = 'whitelist'\n\n            cmd = WPANCTL_CMD + 'insert MAC:Whitelist:Entries %s' % macAddr\n            ret = self.__sendCommand(cmd)[0] != 'Fail'\n\n            self._addressfilterSet.add(macAddr)\n            print 'current whitelist entries:'\n            for addr in self._addressfilterSet:\n                print addr\n            return ret\n\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger('addAllowMAC() Error: ' + str(e))", "docstring": "add a given extended address to the whitelist addressfilter\n\nArgs:\nxEUI: a given extended address in hex format\n\nReturns:\nTrue: successful to add a given extended address to the whitelist entry\nFalse: fail to add a given extended address to the whitelist entry", "source": "juraj-google-style"}
{"code": "def build_example(label, param_dict_real, zip_path_label):\n    np.random.seed(RANDOM_SEED)\n    report = {'tflite_converter': report_lib.NOTRUN, 'tf': report_lib.FAILED}\n    report['tf_log'] = ''\n    report['tflite_converter_log'] = ''\n    tf.compat.v1.reset_default_graph()\n    with tf.Graph().as_default():\n        with tf.device('/cpu:0'):\n            try:\n                inputs, outputs = make_graph(param_dict_real)\n                inputs = [x for x in inputs if x is not None]\n            except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError, ValueError):\n                report['tf_log'] += traceback.format_exc()\n                return (None, report)\n        sess = tf.compat.v1.Session()\n        try:\n            baseline_inputs, baseline_outputs = make_test_inputs(param_dict_real, sess, inputs, outputs)\n            baseline_inputs = [x for x in baseline_inputs if x is not None]\n            input_names = [_normalize_input_name(x.name) for x in inputs]\n            output_names = [_normalize_output_name(x.name) for x in outputs]\n            baseline_input_map = dict(zip(input_names, baseline_inputs))\n            baseline_output_map = dict(zip(output_names, baseline_outputs))\n        except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError, ValueError):\n            report['tf_log'] += traceback.format_exc()\n            return (None, report)\n        report['tflite_converter'] = report_lib.FAILED\n        report['tf'] = report_lib.SUCCESS\n        input_names, tensor_info_inputs = _get_tensor_info(inputs, 'input_', _normalize_input_name)\n        output_tensors, tensor_info_outputs = _get_tensor_info(outputs, 'output_', _normalize_output_name)\n        input_tensors = [(name, t.shape, t.dtype) for name, t in zip(input_names, inputs)]\n        inference_signature = tf.compat.v1.saved_model.signature_def_utils.build_signature_def(inputs=tensor_info_inputs, outputs=tensor_info_outputs, method_name='op_test')\n        saved_model_dir = tempfile.mkdtemp('op_test')\n        saved_model_tags = [tf.saved_model.SERVING]\n        signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY\n        builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(saved_model_dir)\n        builder.add_meta_graph_and_variables(sess, saved_model_tags, signature_def_map={signature_key: inference_signature}, strip_default_attrs=True)\n        builder.save(as_text=False)\n        graph_def = freeze_graph(sess, tf.compat.v1.global_variables() + inputs + outputs) if use_frozen_graph else sess.graph_def\n    if 'split_tflite_lstm_inputs' in param_dict_real:\n        extra_convert_options.split_tflite_lstm_inputs = param_dict_real['split_tflite_lstm_inputs']\n    tflite_model_binary, converter_log = options.tflite_convert_function(options, saved_model_dir, input_tensors, output_tensors, extra_convert_options=extra_convert_options, test_params=param_dict_real)\n    report['tflite_converter'] = report_lib.SUCCESS if tflite_model_binary is not None else report_lib.FAILED\n    report['tflite_converter_log'] = converter_log\n    if options.save_graphdefs:\n        zipinfo = zipfile.ZipInfo(zip_path_label + '.pbtxt')\n        archive.writestr(zipinfo, text_format.MessageToString(graph_def), zipfile.ZIP_DEFLATED)\n    if tflite_model_binary:\n        if options.make_edgetpu_tests:\n            baseline_input_map, baseline_output_map = generate_inputs_outputs(tflite_model_binary, min_value=0, max_value=255)\n        zipinfo = zipfile.ZipInfo(zip_path_label + '.bin')\n        if sys.byteorder == 'big':\n            tflite_model_binary = flatbuffer_utils.byte_swap_tflite_buffer(tflite_model_binary, 'big', 'little')\n        archive.writestr(zipinfo, tflite_model_binary, zipfile.ZIP_DEFLATED)\n        example = {'inputs': baseline_input_map, 'outputs': baseline_output_map}\n        example_fp = io.StringIO()\n        write_examples(example_fp, [example])\n        zipinfo = zipfile.ZipInfo(zip_path_label + '.inputs')\n        archive.writestr(zipinfo, example_fp.getvalue(), zipfile.ZIP_DEFLATED)\n        example_fp2 = io.StringIO()\n        write_test_cases(example_fp2, zip_path_label + '.bin', [example])\n        zipinfo = zipfile.ZipInfo(zip_path_label + '_tests.txt')\n        archive.writestr(zipinfo, example_fp2.getvalue(), zipfile.ZIP_DEFLATED)\n        zip_manifest_label = zip_path_label + ' ' + label\n        if zip_path_label == label:\n            zip_manifest_label = zip_path_label\n        zip_manifest.append(zip_manifest_label + '\\n')\n    return (tflite_model_binary, report)", "docstring": "Build the model with parameter values set in param_dict_real.\n\nArgs:\nlabel: Label of the model\nparam_dict_real: Parameter dictionary (arguments to the factories\nmake_graph and make_test_inputs)\nzip_path_label: Filename in the zip\n\nReturns:\n(tflite_model_binary, report) where tflite_model_binary is the\nserialized flatbuffer as a string and report is a dictionary with\nkeys `tflite_converter_log` (log of conversion), `tf_log` (log of tf\nconversion), `converter` (a string of success status of the\nconversion), `tf` (a string success status of the conversion).", "source": "github-repos"}
{"code": "def tokenize(self, text: TextInput, **kwargs) -> list[str]:\n    split_special_tokens = kwargs.pop('split_special_tokens', self.split_special_tokens)\n    text, kwargs = self.prepare_for_tokenization(text, **kwargs)\n    if kwargs:\n        logger.warning(f'Keyword arguments {kwargs} not recognized.')\n    if hasattr(self, 'do_lower_case') and self.do_lower_case:\n        escaped_special_toks = [re.escape(s_tok) for s_tok in self.all_special_tokens]\n        escaped_special_toks += [re.escape(s_tok.content) for s_tok in self._added_tokens_decoder.values() if not s_tok.special and s_tok.normalized]\n        pattern = '(' + '|'.join(escaped_special_toks) + ')|' + '(.+?)'\n        text = re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text)\n    if split_special_tokens:\n        no_split_token = []\n        tokens = [text]\n    else:\n        no_split_token = self._added_tokens_encoder.keys()\n        tokens = self.tokens_trie.split(text)\n    for i, token in enumerate(tokens):\n        if token in no_split_token:\n            tok_extended = self._added_tokens_decoder.get(self._added_tokens_encoder[token], None)\n            left = tokens[i - 1] if i > 0 else None\n            right = tokens[i + 1] if i < len(tokens) - 1 else None\n            if isinstance(tok_extended, AddedToken):\n                if tok_extended.rstrip and right:\n                    tokens[i + 1] = right.lstrip()\n                if tok_extended.lstrip and left:\n                    tokens[i - 1] = left.rstrip()\n                if tok_extended.single_word and left and (left[-1] != ' '):\n                    tokens[i - 1] += token\n                    tokens[i] = ''\n                elif tok_extended.single_word and right and (right[0] != ' '):\n                    tokens[i + 1] = token + tokens[i + 1]\n                    tokens[i] = ''\n            else:\n                raise ValueError(f'{tok_extended} cannot be tokenized because it was not properly added to the tokenizer. This means that it is not an `AddedToken` but a {type(tok_extended)}')\n    tokenized_text = []\n    for token in tokens:\n        if not token:\n            continue\n        if token in no_split_token:\n            tokenized_text.append(token)\n        else:\n            tokenized_text.extend(self._tokenize(token))\n    return tokenized_text", "docstring": "Converts a string into a sequence of tokens, using the tokenizer.\n\nSplit in words for word-based vocabulary or sub-words for sub-word-based vocabularies\n(BPE/SentencePieces/WordPieces). Takes care of added tokens.\n\nArgs:\ntext (`str`):\nThe sequence to be encoded.\n**kwargs (additional keyword arguments):\nPassed along to the model-specific `prepare_for_tokenization` preprocessing method.\n\nReturns:\n`List[str]`: The list of tokens.", "source": "github-repos"}
{"code": "def new_reviewer(self, name, anomalous=None):\n        \n        n = self._reviewer_cls(\n            self, name=name, credibility=self.credibility, anomalous=anomalous)\n        self.graph.add_node(n)\n        self.reviewers.append(n)\n        return n", "docstring": "Create a new reviewer.\n\nArgs:\nname: name of the new reviewer.\nanomalous: initial anomalous score. (default: None)\n\nReturns:\nA new reviewer instance.", "source": "juraj-google-style"}
{"code": "def box_area(boxes: Tensor) -> Tensor:\n    boxes = _upcast(boxes)\n    return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "docstring": "Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates.\n\nArgs:\nboxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`):\nBoxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1\n< x2` and `0 <= y1 < y2`.\n\nReturns:\n`torch.FloatTensor`: a tensor containing the area for each box.", "source": "github-repos"}
{"code": "def __init__(self, model, generation_config: GenerationConfig, manual_eviction: bool=False, max_queue_size=0, streaming: bool=True):\n    self.model = model\n    self.generation_config = generation_config\n    self.input_queue = queue.Queue(maxsize=max_queue_size)\n    self.output_queue = queue.Queue()\n    self.stop_event = threading.Event()\n    self.streaming = streaming\n    self.log_prob_generation = getattr(generation_config, 'log_prob_generation', False)\n    self._generation_thread = None\n    self._request_counter = 0\n    self._request_lock = threading.Lock()\n    self.model.generation_config.top_p = None\n    self.do_sample = getattr(generation_config, 'do_sample', True)\n    self.logit_processor = self.model._get_logits_processor(self.model.generation_config)\n    self.use_cuda_graph = getattr(generation_config, 'use_cuda_graph', True)\n    self.profile = getattr(generation_config, 'profile', False)\n    self.manual_eviction = manual_eviction\n    self.batch_processor: Optional[ContinuousBatchProcessor] = None", "docstring": "Initialize the continuous batching manager.\n\nArgs:\nmodel: The language model for generation\ngeneration_config: Configuration for generation parameters\nmax_queue_size: Maximum size of the request queue (0 = unlimited)\nstreaming: Whether to stream tokens as they are generated", "source": "github-repos"}
{"code": "def _IsMetadataFile(self, file_entry):\n    \n    if (file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK and\n        file_entry.path_spec.location in self._METADATA_FILE_LOCATIONS_TSK):\n      return True\n\n    return False", "docstring": "Determines if the file entry is a metadata file.\n\nArgs:\nfile_entry (dfvfs.FileEntry): a file entry object.\n\nReturns:\nbool: True if the file entry is a metadata file.", "source": "juraj-google-style"}
{"code": "def list_storage_accounts_sub(access_token, subscription_id):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Storage/storageAccounts', '?api-version=', STORAGE_API])\n    return do_get(endpoint, access_token)", "docstring": "List the storage accounts in the specified subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body list of storage accounts.", "source": "codesearchnet"}
{"code": "def __str__(self):\n    return self.str_internal()", "docstring": "Generates a useful string for this object.\n\nCompactly displays interesting fields.  In particular, pickled\nfields are not displayed.  Note that we collapse the fields of the\ncontained Worker* object into this object, since there is a 1-1\nmapping between Operation and operation_specs.Worker*.\n\nReturns:\nCompact string representing this object.", "source": "github-repos"}
{"code": "def _broadcast(value, target):\n  \n  return tf.broadcast_to(\n      tf.convert_to_tensor(value=value, dtype=target.dtype),\n      distribution_util.prefer_static_shape(target)[:-1])", "docstring": "Broadcast a value to match the batching dimensions of a target.\n\nIf necessary the value is converted into a tensor. Both value and target\nshould be of the same dtype.\n\nArgs:\nvalue: A value to broadcast.\ntarget: A `Tensor` of shape [b1, ..., bn, d].\n\nReturns:\nA `Tensor` of shape [b1, ..., bn] and same dtype as the target.", "source": "juraj-google-style"}
{"code": "def get_section_header(self, section):\n        \n\n        self._ensure_section_headers_loaded()\n        if type(section) is int:\n            return self._section_headers_by_index[section]\n        else:\n            return self._section_headers_by_name[section]", "docstring": "Get a specific section header by index or name.\n\nArgs:\nsection(int or str): The index or name of the section header to return.\n\nReturns:\n:class:`~ELF.SectionHeader`: The section header.\n\nRaises:\nKeyError: The requested section header does not exist.", "source": "juraj-google-style"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_buffer = utils.BytearrayStream()\n    if self._object_type:\n        self._object_type.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The DeriveKey request payload is missing the object type field.')\n    if self._unique_identifiers:\n        for unique_identifier in self._unique_identifiers:\n            unique_identifier.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The DeriveKey request payload is missing the unique identifiers field.')\n    if self._derivation_method:\n        self._derivation_method.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The DeriveKey request payload is missing the derivation method field.')\n    if self._derivation_parameters:\n        self._derivation_parameters.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The DeriveKey request payload is missing the derivation parameters field.')\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        if self._template_attribute:\n            self._template_attribute.write(local_buffer, kmip_version=kmip_version)\n        else:\n            raise exceptions.InvalidField('The DeriveKey request payload is missing the template attribute field.')\n    elif self._template_attribute:\n        attrs = objects.convert_template_attribute_to_attributes(self._template_attribute)\n        attrs.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The DeriveKey request payload is missing the template attribute field.')\n    self.length = local_buffer.length()\n    super(DeriveKeyRequestPayload, self).write(output_buffer, kmip_version=kmip_version)\n    output_buffer.write(local_buffer.buffer)", "docstring": "Write the data encoding the DeriveKey request payload to a stream.\n\nArgs:\noutput_buffer (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the data attribute is not defined.", "source": "codesearchnet"}
{"code": "def edge(self, tail_name, head_name, label=None, _attributes=None, **attrs):\n        \n        tail_name = self._quote_edge(tail_name)\n        head_name = self._quote_edge(head_name)\n        attr_list = self._attr_list(label, attrs, _attributes)\n        line = self._edge % (tail_name, head_name, attr_list)\n        self.body.append(line)", "docstring": "Create an edge between two nodes.\n\nArgs:\ntail_name: Start node identifier.\nhead_name: End node identifier.\nlabel: Caption to be displayed near the edge.\nattrs: Any additional edge attributes (must be strings).", "source": "juraj-google-style"}
{"code": "def f():\n    return constant_op.constant(1)", "docstring": "First sentence.\n\nSecond sentence.\n\nReturns:\nSomething.", "source": "github-repos"}
{"code": "def set_precision(predictions, labels,\n                  weights_fn=common_layers.weights_nonzero):\n  \n  with tf.variable_scope(\"set_precision\", values=[predictions, labels]):\n    labels = tf.squeeze(labels, [2, 3])\n    weights = weights_fn(labels)\n    labels = tf.one_hot(labels, predictions.shape[-1])\n    labels = tf.reduce_max(labels, axis=1)\n    labels = tf.cast(labels, tf.bool)\n    return tf.to_float(tf.equal(labels, predictions)), weights", "docstring": "Precision of set predictions.\n\nArgs:\npredictions : A Tensor of scores of shape [batch, nlabels].\nlabels: A Tensor of int32s giving true set elements,\nof shape [batch, seq_length].\nweights_fn: A function to weight the elements.\n\nReturns:\nhits: A Tensor of shape [batch, nlabels].\nweights: A Tensor of shape [batch, nlabels].", "source": "juraj-google-style"}
{"code": "def kaiser_sinc_filter1d(cutoff, half_width, kernel_size):\n    is_even = kernel_size % 2 == 0\n    half_size = kernel_size \n    delta_f = 4 * half_width\n    attenuation = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95\n    if attenuation > 50.0:\n        beta = 0.1102 * (attenuation - 8.7)\n    elif attenuation >= 21.0:\n        beta = 0.5842 * (attenuation - 21) ** 0.4 + 0.07886 * (attenuation - 21.0)\n    else:\n        beta = 0.0\n    kaiser_window = torch.kaiser_window(kernel_size, beta=beta, periodic=False, dtype=torch.float32)\n    if is_even:\n        time_indices = torch.arange(-half_size, half_size) + 0.5\n    else:\n        time_indices = torch.arange(kernel_size) - half_size\n    if cutoff == 0:\n        return torch.zeros((1, 1, kernel_size), dtype=torch.float32)\n    sinc_filter = torch.sinc(2 * cutoff * time_indices)\n    normalized_filter = 2 * cutoff * kaiser_window * sinc_filter\n    normalized_filter /= normalized_filter.sum()\n    return normalized_filter.view(1, 1, kernel_size)", "docstring": "Generates a 1D Kaiser-windowed sinc filter.\n\nArgs:\ncutoff (float): Normalized cutoff frequency (0 to 0.5).\nhalf_width (float): Transition bandwidth.\nkernel_size (int): Number of filter taps.\n\nReturns:\ntorch.Tensor: A tensor of shape (1, 1, kernel_size) representing the filter.", "source": "github-repos"}
{"code": "def transform(self, X, y=None):\n        \n        word_ids = [self._word_vocab.doc2id(doc) for doc in X]\n        word_ids = pad_sequences(word_ids, padding='post')\n\n        if self._use_char:\n            char_ids = [[self._char_vocab.doc2id(w) for w in doc] for doc in X]\n            char_ids = pad_nested_sequences(char_ids)\n            features = [word_ids, char_ids]\n        else:\n            features = word_ids\n\n        if y is not None:\n            y = [self._label_vocab.doc2id(doc) for doc in y]\n            y = pad_sequences(y, padding='post')\n            y = to_categorical(y, self.label_size).astype(int)\n            \n            \n            \n            \n            \n            \n            y = y if len(y.shape) == 3 else np.expand_dims(y, axis=0)\n            return features, y\n        else:\n            return features", "docstring": "Transform documents to document ids.\n\nUses the vocabulary learned by fit.\n\nArgs:\nX : iterable\nan iterable which yields either str, unicode or file objects.\ny : iterabl, label strings.\n\nReturns:\nfeatures: document id matrix.\ny: label id matrix.", "source": "juraj-google-style"}
{"code": "def __init__(self, caption, content, enabled=True):\n    self._caption = caption\n    self._content = content\n    self._enabled = enabled", "docstring": "Menu constructor.\n\nTODO(cais): Nested menu is currently not supported. Support it.\n\nArgs:\ncaption: (str) caption of the menu item.\ncontent: Content of the menu item. For a menu item that triggers\na command, for example, content is the command string.\nenabled: (bool) whether this menu item is enabled.", "source": "github-repos"}
{"code": "def period_neighborhood_probability(self, radius, smoothing, threshold, stride, start_time, end_time):\n    neighbor_x = self.x[(::stride, ::stride)]\n    neighbor_y = self.y[(::stride, ::stride)]\n    neighbor_kd_tree = cKDTree(np.vstack((neighbor_x.ravel(), neighbor_y.ravel())).T)\n    neighbor_prob = np.zeros((self.data.shape[0], neighbor_x.shape[0], neighbor_x.shape[1]))\n    print('Forecast Hours: {0}-{1}'.format(start_time, end_time))\n    for m in range(len(self.members)):\n        period_max = self.data[(m, start_time:end_time, :, :)].max(axis=0)\n        (valid_i, valid_j) = np.where((period_max >= threshold))\n        print(self.members[m], len(valid_i))\n        if (len(valid_i) > 0):\n            var_kd_tree = cKDTree(np.vstack((self.x[(valid_i, valid_j)], self.y[(valid_i, valid_j)])).T)\n            exceed_points = np.unique(np.concatenate(var_kd_tree.query_ball_tree(neighbor_kd_tree, radius))).astype(int)\n            (exceed_i, exceed_j) = np.unravel_index(exceed_points, neighbor_x.shape)\n            neighbor_prob[m][(exceed_i, exceed_j)] = 1\n            if (smoothing > 0):\n                neighbor_prob[m] = gaussian_filter(neighbor_prob[m], smoothing, mode='constant')\n    return neighbor_prob", "docstring": "Calculate the neighborhood probability over the full period of the forecast\n\nArgs:\nradius: circular radius from each point in km\nsmoothing: width of Gaussian smoother in km\nthreshold: intensity of exceedance\nstride: number of grid points to skip for reduced neighborhood grid\n\nReturns:\n(neighborhood probabilities)", "source": "codesearchnet"}
{"code": "def generic_object_comparison(lhs, rhs, lhs_path, rhs_path, max_depth):\n    if id(lhs) == id(rhs):\n        return 0\n    if type(lhs) != type(rhs):\n        return compare(str(type(lhs)), str(type(rhs)))\n    if type(lhs) in [int, float, bool, str, bool, bytes, bytearray]:\n        return compare(lhs, rhs)\n    if isinstance(lhs, enum.Enum):\n        return compare(lhs.name, rhs.name)\n    max_depth -= 1\n    if max_depth < 0:\n        return 0\n    if id(lhs) in lhs_path or id(rhs) in rhs_path:\n        return 0\n    lhs_path.append(id(lhs))\n    rhs_path.append(id(rhs))\n    result = _generic_object_comparison_recursive_path(lhs, rhs, lhs_path, rhs_path, max_depth)\n    lhs_path.pop()\n    rhs_path.pop()\n    return result", "docstring": "Identifies which object goes first in an (almost) total order of objects.\n\nArgs:\nlhs: An arbitrary Python object or built-in type.\nrhs: An arbitrary Python object or built-in type.\nlhs_path: Traversal path from the root lhs object up to, but not including,\nlhs. The original contents of lhs_path are restored before the function\nreturns.\nrhs_path: Same as lhs_path except for the rhs.\nmax_depth: Maximum recursion depth.\n\nReturns:\n-1, 0, or 1 depending on whether lhs or rhs goes first in the total order.\n0 if max_depth is exhausted.\n0 if lhs is in lhs_path or rhs is in rhs_path (there is a cycle).", "source": "github-repos"}
{"code": "def publish(self, subject, msg, reply=None):\n        \n        if msg is None:\n            msg = ''\n\n        if reply is None:\n            command = 'PUB %s %d' % (subject, len(msg))\n        else:\n            command = 'PUB %s %s %d' % (subject, reply, len(msg))\n\n        self._send(command)\n        self._send(msg)", "docstring": "Publish publishes the data argument to the given subject.\n\nArgs:\nsubject (string): a string with the subject\nmsg (string): payload string\nreply (string): subject used in the reply", "source": "juraj-google-style"}
{"code": "def remove_alias(type_):\n    if isinstance(type_, cpptypes.type_t):\n        type_ref = type_\n    elif isinstance(type_, typedef.typedef_t):\n        type_ref = type_.decl_type\n    else:\n        return type_\n    if type_ref.cache.remove_alias:\n        return type_ref.cache.remove_alias\n    no_alias = __remove_alias(type_ref.clone())\n    type_ref.cache.remove_alias = no_alias\n    return no_alias", "docstring": "Returns `type_t` without typedef\n\nArgs:\ntype_ (type_t | declaration_t): type or declaration\n\nReturns:\ntype_t: the type associated to the inputted declaration", "source": "codesearchnet"}
{"code": "def _check_id(entity, entity_type):\n    \n\n    if entity is None:\n        raise ParseError('{} ID missing'.format(entity_type))\n    elif not isinstance(entity, string_types):\n        msg = '{} ID must be a string, id was {}.'.format(entity_type, entity)\n        if isinstance(entity, bool):\n            msg += (' You may have accidentally used an ID value that YAML'\n                    ' interprets as a boolean, such as \"yes\", \"no\", \"on\",'\n                    ' \"off\", \"true\" or \"false\". To use this ID, you have to'\n                    ' quote it with single or double quotes')\n        raise ParseError(msg)\n    elif len(entity) == 0:\n        raise ParseError('{} ID must not be empty'.format(entity_type))", "docstring": "Check whether the ID is valid.\n\nFirst check if the ID is missing, and then check if it is a qualified\nstring type, finally check if the string is empty. For all checks, it\nwould raise a ParseError with the corresponding message.\n\nArgs:\nentity: a string type object to be checked.\nentity_type: a string that shows the type of entities to check, usually\n`Compound` or 'Reaction'.", "source": "juraj-google-style"}
{"code": "def pauli_from_char(ch, n=0):\n    ch = ch.upper()\n    if (ch == 'I'):\n        return I\n    if (ch == 'X'):\n        return X(n)\n    if (ch == 'Y'):\n        return Y(n)\n    if (ch == 'Z'):\n        return Z(n)\n    raise ValueError('ch shall be X, Y, Z or I')", "docstring": "Make Pauli matrix from an character.\n\nArgs:\nch (str): \"X\" or \"Y\" or \"Z\" or \"I\".\nn (int, optional): Make Pauli matrix as n-th qubits.\n\nReturns:\nIf ch is \"X\" => X, \"Y\" => Y, \"Z\" => Z, \"I\" => I\n\nRaises:\nValueError: When ch is not \"X\", \"Y\", \"Z\" nor \"I\".", "source": "codesearchnet"}
{"code": "def match_variables(self, pattern, return_type='name'):\n        \n        pattern = re.compile(pattern)\n        vars_ = [v for v in self.variables.values() if pattern.search(v.name)]\n        return vars_ if return_type.startswith('var') \\\n            else [v.name for v in vars_]", "docstring": "Return columns whose names match the provided regex pattern.\n\nArgs:\npattern (str): A regex pattern to match all variable names against.\nreturn_type (str): What to return. Must be one of:\n'name': Returns a list of names of matching variables.\n'variable': Returns a list of Variable objects whose names\nmatch.", "source": "juraj-google-style"}
{"code": "def _create_controller_info_record(self, controller_module_name):\n    module = self._controller_modules[controller_module_name]\n    controller_info = None\n    try:\n        controller_info = module.get_info(copy.copy(self._controller_objects[controller_module_name]))\n    except AttributeError:\n        logging.warning('No optional debug info found for controller %s. To provide it, implement `get_info`.', controller_module_name)\n    try:\n        yaml.dump(controller_info)\n    except TypeError:\n        logging.warning('The info of controller %s in class \"%s\" is not YAML serializable! Coercing it to string.', controller_module_name, self._class_name)\n        controller_info = str(controller_info)\n    return records.ControllerInfoRecord(self._class_name, module.MOBLY_CONTROLLER_CONFIG_NAME, controller_info)", "docstring": "Creates controller info record for a particular controller type.\n\nInfo is retrieved from all the controller objects spawned from the\nspecified module, using the controller module's `get_info` function.\n\nArgs:\ncontroller_module_name: string, the name of the controller module\nto retrieve info from.\n\nReturns:\nA records.ControllerInfoRecord object.", "source": "codesearchnet"}
{"code": "def returnListOfConfigurationValues(util):\n    \n\n    VALUES = {}\n\n    \n    configPath = os.path.join(getConfigPath()[\"appPath\"], \"general.cfg\")\n\n    \n    if not os.path.exists(configPath):\n        \n        defaultConfigPath = os.path.join(getConfigPath()[\"appPathDefaults\"], \"general.cfg\")\n\n        try:\n            \n            with open(defaultConfigPath) as iF:\n                cont = iF.read()\n                \n                with open(configPath, \"w\") as oF:\n                    oF.write(cont)\n        except Exception as e:\n            raise errors.DefaultConfigurationFileNotFoundError(configPath, defaultConfigPath);\n\n    \n    config = ConfigParser.ConfigParser()\n    config.read(configPath)\n\n    LISTS = [\"tlds\", \"domains\", \"platforms\", \"extension\", \"exclude_platforms\", \"exclude_domains\"]\n\n    \n    for section in config.sections():\n        incomplete = False\n        if section.lower() == util.lower():\n            \n            for (param, value) in config.items(section):\n                if value == '':\n                    \n                    if param in LISTS:\n                        value = []\n                    else:\n                        value = \"\"\n                \n                elif param in LISTS:\n                    value = value.split(' ')\n                \n                elif param == \"threads\":\n                    try:\n                        value = int(value)\n                    except Exception as err:\n                        raise errors.ConfigurationParameterNotValidError(configPath, section, param, value)\n                elif param == \"debug\":\n                    try:\n                        if int(value) == 0:\n                            value = False\n                        else:\n                            value = True\n                    except Exception as err:\n                        print(\"Something happened when processing this debug option. Resetting to default.\")\n                        \n                        defaultConfigPath = os.path.join(getConfigPath()[\"appPathDefaults\"], \"general.cfg\")\n\n                        try:\n                            \n                            with open(defaultConfigPath) as iF:\n                                cont = iF.read()\n                                \n                                with open(configPath, \"w\") as oF:\n                                    oF.write(cont)\n                        except Exception as e:\n                            raise errors.DefaultConfigurationFileNotFoundError(configPath, defaultConfigPath);\n\n                        \n                VALUES[param] = value\n            break\n\n    return VALUES", "docstring": "Method that recovers the configuration information about each program\n\nTODO: Grab the default file from the package data instead of storing it in\nthe main folder.\n\nArgs:\n-----\nutil: Any of the utils that are contained in the framework: domainfy,\nentify, mailfy, phonefy, searchfy, usufy.\n\nReturns:\n--------\nA dictionary containing the default configuration.", "source": "juraj-google-style"}
{"code": "def get_message(self, metadata=False, asctime=True):\n        \n        msg = self.msg if is_string(self.msg) else str(self.msg)\n        if self.args:\n            try:\n                msg = msg % self.args\n            except:\n                msg += str(self.args)\n\n        if asctime: msg = \"[\" + self.asctime + \"] \" + msg\n\n        \n        if metadata:\n            msg += \"\\nCalled by %s at %s:%s\\n\" % (self.func_name, self.pathname, self.lineno)\n\n        return msg", "docstring": "Return the message after merging any user-supplied arguments with the message.\n\nArgs:\nmetadata: True if function and module name should be added.\nasctime: True if time string should be added.", "source": "juraj-google-style"}
{"code": "def segmentation_to_mask(polys, height, width):\n    \n    polys = [p.flatten().tolist() for p in polys]\n    assert len(polys) > 0, \"Polygons are empty!\"\n\n    import pycocotools.mask as cocomask\n    rles = cocomask.frPyObjects(polys, height, width)\n    rle = cocomask.merge(rles)\n    return cocomask.decode(rle)", "docstring": "Convert polygons to binary masks.\n\nArgs:\npolys: a list of nx2 float array. Each array contains many (x, y) coordinates.\n\nReturns:\na binary matrix of (height, width)", "source": "juraj-google-style"}
{"code": "def _build_document_scrapers(cls, session: AppSession):\n    html_parser = session.factory['HTMLParser']\n    element_walker = session.factory.new('ElementWalker')\n    scrapers = [session.factory.new('HTMLScraper', html_parser, element_walker, followed_tags=session.args.follow_tags, ignored_tags=session.args.ignore_tags, only_relative=session.args.relative, robots=session.args.robots, encoding_override=session.args.remote_encoding)]\n    if ('css' in session.args.link_extractors):\n        css_scraper = session.factory.new('CSSScraper', encoding_override=session.args.remote_encoding)\n        scrapers.append(css_scraper)\n        element_walker.css_scraper = css_scraper\n    if ('javascript' in session.args.link_extractors):\n        javascript_scraper = session.factory.new('JavaScriptScraper', encoding_override=session.args.remote_encoding)\n        scrapers.append(javascript_scraper)\n        element_walker.javascript_scraper = javascript_scraper\n    if session.args.sitemaps:\n        scrapers.append(session.factory.new('SitemapScraper', html_parser, encoding_override=session.args.remote_encoding))\n    return scrapers", "docstring": "Create the document scrapers.\n\nReturns:\nA list of document scrapers", "source": "codesearchnet"}
{"code": "def _set_state(self, shard_state, tstate, task_directive):\n    if (task_directive in (self._TASK_DIRECTIVE.RETRY_TASK, self._TASK_DIRECTIVE.DROP_TASK)):\n        return task_directive\n    if (task_directive == self._TASK_DIRECTIVE.ABORT_SHARD):\n        shard_state.set_for_abort()\n        return task_directive\n    if (task_directive == self._TASK_DIRECTIVE.PROCEED_TASK):\n        shard_state.advance_for_next_slice()\n        tstate.advance_for_next_slice()\n        return task_directive\n    if (task_directive == self._TASK_DIRECTIVE.RECOVER_SLICE):\n        tstate.advance_for_next_slice(recovery_slice=True)\n        shard_state.advance_for_next_slice(recovery_slice=True)\n        return task_directive\n    if (task_directive == self._TASK_DIRECTIVE.RETRY_SLICE):\n        task_directive = self._attempt_slice_retry(shard_state, tstate)\n    if (task_directive == self._TASK_DIRECTIVE.RETRY_SHARD):\n        task_directive = self._attempt_shard_retry(shard_state, tstate)\n    if (task_directive == self._TASK_DIRECTIVE.FAIL_TASK):\n        shard_state.set_for_failure()\n    return task_directive", "docstring": "Set shard_state and tstate based on task_directive.\n\nArgs:\nshard_state: model.ShardState for current shard.\ntstate: model.TransientShardState for current shard.\ntask_directive: self._TASK_DIRECTIVE for current shard.\n\nReturns:\nA _TASK_DIRECTIVE enum.\nPROCEED_TASK if task should proceed normally.\nRETRY_SHARD if shard should be retried.\nRETRY_SLICE if slice should be retried.\nFAIL_TASK if sahrd should fail.\nRECOVER_SLICE if slice should be recovered.\nABORT_SHARD if shard should be aborted.\nRETRY_TASK if task should be retried.\nDROP_TASK if task should be dropped.", "source": "codesearchnet"}
{"code": "def add_multiple_to_queue(self, items, container=None):\n        \n        if container is not None:\n            container_uri = container.resources[0].uri\n            container_metadata = to_didl_string(container)\n        else:\n            container_uri = ''  \n            container_metadata = ''  \n\n        chunk_size = 16  \n        item_list = list(items)  \n        for index in range(0, len(item_list), chunk_size):\n            chunk = item_list[index:index + chunk_size]\n            uris = ' '.join([item.resources[0].uri for item in chunk])\n            uri_metadata = ' '.join([to_didl_string(item) for item in chunk])\n            self.avTransport.AddMultipleURIsToQueue([\n                ('InstanceID', 0),\n                ('UpdateID', 0),\n                ('NumberOfURIs', len(chunk)),\n                ('EnqueuedURIs', uris),\n                ('EnqueuedURIsMetaData', uri_metadata),\n                ('ContainerURI', container_uri),\n                ('ContainerMetaData', container_metadata),\n                ('DesiredFirstTrackNumberEnqueued', 0),\n                ('EnqueueAsNext', 0)\n            ])", "docstring": "Add a sequence of items to the queue.\n\nArgs:\nitems (list): A sequence of items to the be added to the queue\ncontainer (DidlObject, optional): A container object which\nincludes the items.", "source": "juraj-google-style"}
{"code": "def create_from(cls, backend):\n        \n        backend_config = backend.configuration()\n\n        \n        try:\n            backend_default = backend.defaults()\n        except ModelValidationError:\n            from collections import namedtuple\n            BackendDefault = namedtuple('BackendDefault', ('qubit_freq_est', 'meas_freq_est'))\n\n            backend_default = BackendDefault(\n                qubit_freq_est=backend_config.defaults['qubit_freq_est'],\n                meas_freq_est=backend_config.defaults['meas_freq_est']\n            )\n\n        \n        n_qubits = backend_config.n_qubits\n        n_registers = backend_config.n_registers\n        n_uchannels = backend_config.n_uchannels\n\n        if n_uchannels > 0 and n_uchannels != n_qubits:\n            raise PulseError(\"This version assumes no U-channels or \n\n        \n        qubit_lo_freqs = backend_default.qubit_freq_est\n        qubit_lo_ranges = backend_config.qubit_lo_range\n        meas_lo_freqs = backend_default.meas_freq_est\n        meas_lo_ranges = backend_config.meas_lo_range\n\n        \n        drives = [\n            DriveChannel(i, qubit_lo_freqs[i], tuple(qubit_lo_ranges[i]))\n            for i in range(n_qubits)\n        ]\n        measures = [\n            MeasureChannel(i, meas_lo_freqs[i], tuple(meas_lo_ranges[i]))\n            for i in range(n_qubits)\n        ]\n        acquires = [AcquireChannel(i) for i in range(n_qubits)]\n        controls = [ControlChannel(i) for i in range(n_uchannels)]\n\n        qubits = []\n        for i in range(n_qubits):\n            \n            qubit = Qubit(i,\n                          drive_channels=[drives[i]],\n                          control_channels=None if n_uchannels == 0 else controls[i],\n                          measure_channels=[measures[i]],\n                          acquire_channels=[acquires[i]])\n            qubits.append(qubit)\n\n        registers = [RegisterSlot(i) for i in range(n_registers)]\n        \n        mem_slots = [MemorySlot(i) for i in range(len(qubits))]\n\n        return DeviceSpecification(qubits, registers, mem_slots)", "docstring": "Create device specification with values in backend configuration.\nArgs:\nbackend(Backend): backend configuration\nReturns:\nDeviceSpecification: created device specification\nRaises:\nPulseError: when an invalid backend is specified", "source": "juraj-google-style"}
{"code": "def ParseDom(self, dom, feed):\n    \n    shape_num = 0\n    for node in dom.getElementsByTagName('Placemark'):\n      p = self.ParsePlacemark(node)\n      if p.IsPoint():\n        (lon, lat) = p.coordinates[0]\n        m = self.stopNameRe.search(p.name)\n        feed.AddStop(lat, lon, m.group(1))\n      elif p.IsLine():\n        self.ConvertPlacemarkToShape(p, feed)", "docstring": "Parses the given kml dom tree and updates the Google transit feed object.\n\nArgs:\ndom - kml dom tree\nfeed - an instance of Schedule class to be updated", "source": "juraj-google-style"}
{"code": "def reqAccountUpdatesMulti(\n            self, account: str = '', modelCode: str = ''):\n        \n        self._run(self.reqAccountUpdatesMultiAsync(account, modelCode))", "docstring": "It is recommended to use :meth:`.accountValues` instead.\n\nRequest account values of multiple accounts and keep updated.\n\nThis method is blocking.\n\nArgs:\naccount: If specified, filter for this account name.\nmodelCode: If specified, filter for this account model.", "source": "juraj-google-style"}
{"code": "def to_soft(self, path_or_handle, as_gzip=False):\n    if isinstance(path_or_handle, str):\n        if as_gzip:\n            with gzip.open(path_or_handle, 'wt') as outfile:\n                outfile.write(self._get_object_as_soft())\n        else:\n            with open(path_or_handle, 'w') as outfile:\n                outfile.write(self._get_object_as_soft())\n    else:\n        path_or_handle.write(self._get_object_as_soft())", "docstring": "Save the object in a SOFT format.\n\nArgs:\npath_or_handle (:obj:`str` or :obj:`file`): Path or handle to\noutput file\nas_gzip (:obj:`bool`): Save as gzip", "source": "codesearchnet"}
{"code": "def cut_setting(self, cut):\n        \n        \n        cut_settings = {'full' : 0b00000001,\n                        'half' : 0b00000010,\n                        'chain': 0b00000100,\n                        'special': 0b00001000\n                        }\n        if cut in cut_settings:\n            self.send(chr(27)+'iC'+chr(cut_settings[cut]))\n        else:\n            raise RuntimeError('Invalid cut type.')", "docstring": "Set cut setting for printer.\n\nArgs:\ncut: The type of cut setting we want. Choices are 'full', 'half', 'chain', and 'special'.\nReturns:\nNone\nRaises:\nRuntimeError: Invalid cut type.", "source": "juraj-google-style"}
{"code": "def recode_curesim_reads(curesim_fastq_fo, rnf_fastq_fo, fai_fo, genome_id, number_of_read_tuples=(10 ** 9), recode_random=False):\n    curesim_pattern = re.compile('@(.*)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)')\n    '\\n\\t\\t\\tCuReSim read name format\\n\\n\\t\\t\\t@<\n    max_seq_len = 0\n    fai_index = rnftools.utils.FaIdx(fai_fo=fai_fo)\n    read_tuple_id_width = len(format(number_of_read_tuples, 'x'))\n    fq_creator = rnftools.rnfformat.FqCreator(fastq_fo=rnf_fastq_fo, read_tuple_id_width=read_tuple_id_width, genome_id_width=2, chr_id_width=fai_index.chr_id_width, coor_width=fai_index.coor_width, info_reads_in_tuple=True, info_simulator='curesim')\n    read_tuple_id = 0\n    i = 0\n    for line in curesim_fastq_fo:\n        if ((i % 4) == 0):\n            m = curesim_pattern.search(line)\n            if (m is None):\n                rnftools.utils.error(\"Read '{}' was not generated by CuReSim.\".format(line[1:]), program='RNFtools', subprogram='MIShmash', exception=ValueError)\n            contig_name = m.group(1)\n            start_pos = int(m.group(2))\n            direction = ('R' if int(m.group(3)) else 'F')\n            random = bool(m.group(4))\n            ins_nb = int(m.group(5))\n            del_nb = int(m.group(6))\n            subst_nb = int(m.group(7))\n            rd_id = int(m.group(8))\n            end_pos = (((start_pos - 1) - ins_nb) + del_nb)\n            chr_id = 0\n            random = (contig_name[:4] == 'rand')\n        elif ((i % 4) == 1):\n            bases = line.strip()\n            end_pos += len(bases)\n            if recode_random:\n                left = 0\n                right = 0\n            else:\n                left = (start_pos + 1)\n                right = end_pos\n            segment = rnftools.rnfformat.Segment(genome_id=genome_id, chr_id=chr_id, direction=direction, left=left, right=right)\n        elif ((i % 4) == 2):\n            pass\n        elif ((i % 4) == 3):\n            qualities = line.strip()\n            if (random == recode_random):\n                fq_creator.add_read(read_tuple_id=read_tuple_id, bases=bases, qualities=qualities, segments=[segment])\n            read_tuple_id += 1\n        i += 1\n    fq_creator.flush_read_tuple()", "docstring": "Recode CuReSim output FASTQ file to the RNF-compatible output FASTQ file.\n\nArgs:\ncuresim_fastq_fo (file object): File object of CuReSim FASTQ file.\nfastq_rnf_fo (file object): File object of RNF FASTQ.\nfai_fo (file object): File object for FAI file of the reference genome.\ngenome_id (int): RNF genome ID to be used.\nnumber_of_read_tuples (int): Expected number of read tuples (to estimate number of digits in RNF).\nrecode_random (bool): Recode random reads.\n\nRaises:\nValueError", "source": "codesearchnet"}
{"code": "def _txn_is_in_valid_batch(self, txn_id):\n        \n\n        batch = self._batches_by_txn_id[txn_id]\n\n        \n        \n        return all(\n            self._txn_results[sig].is_valid\n            for sig in set(self._txn_results).intersection(\n                (txn.header_signature for txn in batch.transactions)))", "docstring": "Returns whether the transaction is in a valid batch.\n\nArgs:\ntxn_id (str): The transaction header signature.\n\nReturns:\n(bool): True if the txn's batch is valid, False otherwise.", "source": "juraj-google-style"}
{"code": "def l2_distance_sq(t1, t2, name=None):\n  \n  with tf.name_scope(name, 'l2_distance_sq', [t1, t2]) as scope:\n    t1 = tf.convert_to_tensor(t1, name='t1')\n    t2 = tf.convert_to_tensor(t2, name='t2')\n    return length_squared(tf.subtract(t1, t2), name=scope)", "docstring": "Square of l2 distance between t1 and t2.\n\nArgs:\nt1: A tensor.\nt2: A tensor that is the same size as t1.\nname: Optional name for this op.\nReturns:\nThe l2 distance between t1 and t2.", "source": "juraj-google-style"}
{"code": "def _allocate_channel(self):\n    try:\n        channel = (yield self.channel())\n    except pika.exceptions.NoFreeChannels:\n        raise NoFreeChannels()\n    _std_log.debug('Created AMQP channel id %d', channel.channel_number)\n    if self._confirms:\n        (yield channel.confirm_delivery())\n    defer.returnValue(channel)", "docstring": "Allocate a new AMQP channel.\n\nRaises:\nNoFreeChannels: If this connection has reached its maximum number of channels.", "source": "codesearchnet"}
{"code": "def getmu_vertices_stability_phase(self, target_comp, dep_elt, tol_en=0.01):\n    muref = np.array([self.el_refs[e].energy_per_atom for e in self.elements if (e != dep_elt)])\n    chempot_ranges = self.get_chempot_range_map([e for e in self.elements if (e != dep_elt)])\n    for e in self.elements:\n        if (not (e in target_comp.elements)):\n            target_comp = (target_comp + Composition({e: 0.0}))\n    coeff = [(- target_comp[e]) for e in self.elements if (e != dep_elt)]\n    for e in chempot_ranges.keys():\n        if (e.composition.reduced_composition == target_comp.reduced_composition):\n            multiplicator = (e.composition[dep_elt] / target_comp[dep_elt])\n            ef = (e.energy / multiplicator)\n            all_coords = []\n            for s in chempot_ranges[e]:\n                for v in s._coords:\n                    elts = [e for e in self.elements if (e != dep_elt)]\n                    res = {}\n                    for i in range(len(elts)):\n                        res[elts[i]] = (v[i] + muref[i])\n                    res[dep_elt] = ((np.dot((v + muref), coeff) + ef) / target_comp[dep_elt])\n                    already_in = False\n                    for di in all_coords:\n                        dict_equals = True\n                        for k in di:\n                            if (abs((di[k] - res[k])) > tol_en):\n                                dict_equals = False\n                                break\n                        if dict_equals:\n                            already_in = True\n                            break\n                    if (not already_in):\n                        all_coords.append(res)\n    return all_coords", "docstring": "returns a set of chemical potentials corresponding to the vertices of\nthe simplex in the chemical potential phase diagram.\nThe simplex is built using all elements in the target_composition\nexcept dep_elt.\nThe chemical potential of dep_elt is computed from the target\ncomposition energy.\nThis method is useful to get the limiting conditions for\ndefects computations for instance.\n\nArgs:\ntarget_comp: A Composition object\ndep_elt: the element for which the chemical potential is computed\nfrom the energy of\nthe stable phase at the target composition\ntol_en: a tolerance on the energy to set\n\nReturns:\n[{Element:mu}]: An array of conditions on simplex vertices for\nwhich each element has a chemical potential set to a given\nvalue. \"absolute\" values (i.e., not referenced to element energies)", "source": "codesearchnet"}
{"code": "def password(message: Text, default: Text='', validate: Union[(Type[Validator], Callable[([Text], bool)], None)]=None, qmark: Text=DEFAULT_QUESTION_PREFIX, style: Optional[Style]=None, **kwargs: Any) -> Question:\n    return text.text(message, default, validate, qmark, style, is_password=True, **kwargs)", "docstring": "Question the user to enter a secret text not displayed in the prompt.\n\nThis question type can be used to prompt the user for information\nthat should not be shown in the command line. The typed text will be\nreplaced with `*`.\n\nArgs:\nmessage: Question text\n\ndefault: Default value will be returned if the user just hits\nenter.\n\nvalidate: Require the entered value to pass a validation. The\nvalue can not be submited until the validator accepts\nit (e.g. to check minimum password length).\n\nThis can either be a function accepting the input and\nreturning a boolean, or an class reference to a\nsubclass of the prompt toolkit Validator class.\n\nqmark: Question prefix displayed in front of the question.\nBy default this is a `?`\n\nstyle: A custom color and style for the question parts. You can\nconfigure colors as well as font types for different elements.\n\nReturns:\nQuestion: Question instance, ready to be prompted (using `.ask()`).", "source": "codesearchnet"}
{"code": "def clear_history(vcs):\n    \n    evidence_path = _get_committed_history_path(vcs)\n    if os.path.exists(evidence_path):\n        os.remove(evidence_path)", "docstring": "Clear (committed) test run history from this project.\n\nArgs:\nvcs (easyci.vcs.base.Vcs)", "source": "juraj-google-style"}
{"code": "def set_s3_prefix(self, region, name):\n    ct = self.session.client('cloudtrail', region_name=region)\n    ct.update_trail(Name=name, S3KeyPrefix=self.account.account_name)\n    auditlog(event='cloudtrail.set_s3_prefix', actor=self.ns, data={'account': self.account.account_name, 'region': region})\n    self.log.info('Updated S3KeyPrefix to {0} for {0}/{1}'.format(self.account.account_name, region))", "docstring": "Sets the S3 prefix for a CloudTrail Trail\n\nArgs:\nregion (`str`): Name of the AWS region\nname (`str`): Name of the CloudTrail Trail\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def qmhl(data: quantum_data.QuantumData, input_qhbm: qhbm.QHBM):\n    return data.expectation(input_qhbm.modular_hamiltonian) + input_qhbm.e_inference.log_partition()", "docstring": "Calculate the QMHL loss of the QHBM against the quantum data.\n\nSee equation 21 in the appendix.\n\nArgs:\ndata: The data mixed state to learn.\ninput_qhbm: QHBM being trained to approximate `data`.\n\nReturns:\nThe quantum cross-entropy between the data and the model.", "source": "github-repos"}
{"code": "def save_image(image_url, image_directory, image_name):\n    \n    image_type = get_image_type(image_url)\n    if image_type is None:\n        raise ImageErrorException(image_url)\n    full_image_file_name = os.path.join(image_directory, image_name + '.' + image_type)\n\n    \n    if os.path.exists(image_url):\n        shutil.copy(image_url, full_image_file_name)\n        return image_type\n\n    try:\n        \n        with open(full_image_file_name, 'wb') as f:\n            user_agent = r'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0'\n            request_headers = {'User-Agent': user_agent}\n            requests_object = requests.get(image_url, headers=request_headers)\n            try:\n                content = requests_object.content\n                \n                f.write(content)\n            except AttributeError:\n                raise ImageErrorException(image_url)\n    except IOError:\n        raise ImageErrorException(image_url)\n    return image_type", "docstring": "Saves an online image from image_url to image_directory with the name image_name.\nReturns the extension of the image saved, which is determined dynamically.\n\nArgs:\nimage_url (str): The url of the image.\nimage_directory (str): The directory to save the image in.\nimage_name (str): The file name to save the image as.\n\nRaises:\nImageErrorException: Raised if unable to save the image at image_url", "source": "juraj-google-style"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. mBART does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def get_gene_associations(model):\n    \n\n    for reaction in model.reactions:\n        assoc = None\n        if reaction.genes is None:\n            continue\n        elif isinstance(reaction.genes, string_types):\n            assoc = boolean.Expression(reaction.genes)\n        else:\n            variables = [boolean.Variable(g) for g in reaction.genes]\n            assoc = boolean.Expression(boolean.And(*variables))\n        yield reaction.id, assoc", "docstring": "Create gene association for class :class:`.GeneDeletionStrategy`.\n\nReturn a dict mapping reaction IDs to\n:class:`psamm.expression.boolean.Expression` objects,\nrepresenting relationships between reactions and related genes. This helper\nfunction should be called when creating :class:`.GeneDeletionStrategy`\nobjects.\n\nArgs:\nmodel: :class:`psamm.datasource.native.NativeModel`.", "source": "juraj-google-style"}
{"code": "def add_reader(\n            self,\n            fd: IFileLike,\n            callback: typing.Callable[[IFileLike], typing.Any],\n    ) -> None:\n        \n        raise NotImplementedError()", "docstring": "Add a file descriptor to the processor and wait for READ.\n\nArgs:\nfd (IFileLike): Any obect that exposes a 'fileno' method that\nreturns a valid file descriptor integer.\ncallback (typing.Callable[[IFileLike], typing.Any]): A function\nthat consumes the IFileLike object whenever the READ event is\nfired.", "source": "juraj-google-style"}
{"code": "def save_op(self, filename_tensor, saveables):\n    tensor_names = []\n    tensors = []\n    tensor_slices = []\n    for saveable in saveables:\n        for spec in saveable.specs:\n            tensor_names.append(spec.name)\n            tensors.append(spec.tensor)\n            tensor_slices.append(spec.slice_spec)\n    if self._write_version == saver_pb2.SaverDef.V1:\n        return io_ops._save(filename=filename_tensor, tensor_names=tensor_names, tensors=tensors, tensor_slices=tensor_slices)\n    elif self._write_version == saver_pb2.SaverDef.V2:\n        return io_ops.save_v2(filename_tensor, tensor_names, tensor_slices, tensors)\n    else:\n        raise RuntimeError('Unexpected write_version: ' + self._write_version)", "docstring": "Create an Op to save 'saveables'.\n\nThis is intended to be overridden by subclasses that want to generate\ndifferent Ops.\n\nArgs:\nfilename_tensor: String Tensor.\nsaveables: A list of BaseSaverBuilder.SaveableObject objects.\n\nReturns:\nAn Operation that save the variables.\n\nRaises:\nRuntimeError: (implementation detail) if \"self._write_version\" is an\nunexpected value.", "source": "github-repos"}
{"code": "def get_recipe(filepath=None, includepath=None, stringcontent=None):\n    if filepath:\n        with open(filepath) as recipe_file:\n            stringcontent = recipe_file.read()\n    try:\n        return recipe_includes(json.loads(stringcontent.replace('\\n', ' ')), includepath)\n    except ValueError as e:\n        pos = 0\n        for count, line in enumerate(stringcontent.splitlines(), 1):\n            pos += len(line)\n            if pos >= e.pos:\n                e.lineno = count\n                e.pos = pos\n                e.args = ('JSON ERROR: %s LINE: %s CHARACTER: %s ERROR: %s LINE: %s' % (filepath, count, pos - e.pos, str(e.msg), line.strip()),)\n                raise", "docstring": "Loads json for recipe, replaces newlines, and expands includes.\n\nArgs:\n- filepath: (string) The local file path to the recipe json file to load.\n\nReturns:\nDictionary of recipe file.", "source": "github-repos"}
{"code": "def RegisterPathSpec(cls, path_spec_type):\n    \n    type_indicator = path_spec_type.TYPE_INDICATOR\n    if type_indicator in cls._path_spec_types:\n      raise KeyError(\n          'Path specification type: {0:s} already set.'.format(\n              type_indicator))\n\n    cls._path_spec_types[type_indicator] = path_spec_type\n\n    if getattr(path_spec_type, '_IS_SYSTEM_LEVEL', False):\n      cls._system_level_type_indicators[type_indicator] = path_spec_type", "docstring": "Registers a path specification type.\n\nArgs:\npath_spec_type (type): path specification type.\n\nRaises:\nKeyError: if path specification is already registered.", "source": "juraj-google-style"}
{"code": "def _WriteHeader(self, output_writer):\n    header_string = ''\n    if self._title:\n        header_string = ' {0:s} '.format(self._title)\n    header_string = self._HEADER_FORMAT_STRING.format(header_string)\n    output_writer.Write(header_string)", "docstring": "Writes a header.\n\nArgs:\noutput_writer (OutputWriter): output writer.", "source": "codesearchnet"}
{"code": "def create_failover_dns(self, primary_region='us-east-1'):\n    dns_record = self.generated.dns()['global']\n    zone_ids = get_dns_zone_ids(env=self.env, facing=self.elb_subnet)\n    elb_dns_aws = find_elb(name=self.app_name, env=self.env, region=self.region)\n    elb_dns_zone_id = find_elb_dns_zone_id(name=self.app_name, env=self.env, region=self.region)\n    if (primary_region in elb_dns_aws):\n        failover_state = 'PRIMARY'\n    else:\n        failover_state = 'SECONDARY'\n    self.log.info('%s set as %s record', elb_dns_aws, failover_state)\n    self.log.info('Updating Application Failover URL: %s', dns_record)\n    dns_kwargs = {'dns_name': dns_record, 'elb_dns_zone_id': elb_dns_zone_id, 'elb_aws_dns': elb_dns_aws, 'dns_ttl': self.dns_ttl, 'failover_state': failover_state}\n    for zone_id in zone_ids:\n        self.log.debug('zone_id: %s', zone_id)\n        update_failover_dns_record(self.env, zone_id, **dns_kwargs)\n    return dns_record", "docstring": "Create dns entries in route53 for multiregion failover setups.\n\nArgs:\nprimary_region (str): primary AWS region for failover\nReturns:\nAuto-generated DNS name.", "source": "codesearchnet"}
{"code": "def from_file(cls, fp, is_outlook=False):\n    log.debug('Parsing email from file {!r}'.format(fp))\n    with ported_open(fp) as f:\n        message = email.message_from_file(f)\n    if is_outlook:\n        log.debug('Removing temp converted Outlook email {!r}'.format(fp))\n        os.remove(fp)\n    return cls(message)", "docstring": "Init a new object from a file path.\n\nArgs:\nfp (string): file path of raw email\nis_outlook (boolean): if True is an Outlook email\n\nReturns:\nInstance of MailParser", "source": "codesearchnet"}
{"code": "def block_embedding_to(self, device):\n    self.block_emb = self.block_emb.to(device)", "docstring": "Send `self.block_emb` to a specific device.\n\nArgs:\ndevice (`str` or `torch.device`):\nThe device to which `self.block_emb` will be sent.", "source": "github-repos"}
{"code": "def _ParseFileVersion(file_version):\n  \n  tokens = file_version.split('brain.Event:')\n  try:\n    return float(tokens[-1])\n  except ValueError:\n    \n    \n    logger.warn(\n        ('Invalid event.proto file_version. Defaulting to use of '\n         'out-of-order event.step logic for purging expired events.'))\n    return -1", "docstring": "Convert the string file_version in event.proto into a float.\n\nArgs:\nfile_version: String file_version from event.proto\n\nReturns:\nVersion number as a float.", "source": "juraj-google-style"}
{"code": "def comments_1(self, value=None):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type str '\n                                 'for field `comments_1`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `comments_1`')\n\n        self._comments_1 = value", "docstring": "Corresponds to IDD Field `comments_1`\n\nArgs:\nvalue (str): value for IDD Field `comments_1`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def apply_to_miz(self, miz):\n        \n        miz.mission.day = self.date.day\n        miz.mission.month = self.date.month\n        miz.mission.year = self.date.year\n        miz.mission.mission_start_time = self.mission_start_time\n\n        return True", "docstring": "Applies this datetime to a Miz object (it will be mutated in place)\n\nArgs:\nmiz: MIZ object to mutate\n\nReturns: True", "source": "juraj-google-style"}
{"code": "def get_group_id(self, uuid=None):\n        \n        group_data = self.get_group(uuid)\n        try:\n            return group_data['response']['docs'][0]['id']\n        except (KeyError, IndexError):\n            failure_message = ('Error in get_group response data - '\n                               'got {0}'.format(group_data))\n            log.exception(failure_message)\n            raise PyLmodUnexpectedData(failure_message)", "docstring": "Get group id based on uuid.\n\nArgs:\nuuid (str): optional uuid. defaults to self.cuuid\n\nRaises:\nPyLmodUnexpectedData: No group data was returned.\nrequests.RequestException: Exception connection error\n\nReturns:\nint: numeric group id", "source": "juraj-google-style"}
{"code": "def convertDateTimeStrToDateStr(datetime):\n    if not datetime == None and 'T' in datetime:\n        datetime = datetime.split('T')[0]\n    return datetime", "docstring": "Convert a DateTime string (YYYY-MM-DDTHH:mm:SSZ) to just a Date string by removing the time (YYYY-MM-DD)\n\nArgs:\ndatetime: the datetime as a string\n\nReturns:\nA string representation of the date in the following\nformat YYYY-MM-DD", "source": "github-repos"}
{"code": "def list_vms_sub(access_token, subscription_id):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/providers/Microsoft.Compute/virtualMachines',\n                        '?api-version=', COMP_API])\n    return do_get_next(endpoint, access_token)", "docstring": "List VMs in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body of a list of VM model views.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n               num_parameter_servers=0,\n               ps_device='/job:ps',\n               placement='CPU:0'):\n    \n    self._num_ps = num_parameter_servers\n    self._ps_device = ps_device\n    self._placement = placement if num_parameter_servers == 0 else 'CPU:0'\n    self._next_task_id = 0", "docstring": "Initialize VariableDeviceChooser.\n\nArgs:\nnum_parameter_servers: number of parameter servers.\nps_device: string representing the parameter server device.\nplacement: string representing the placement of the variable either CPU:0\nor GPU:0. When using parameter servers forced to CPU:0.", "source": "juraj-google-style"}
{"code": "def _getsize_from_header(self, header):\n        \n        \n        for key in self._SIZE_KEYS:\n            try:\n                return int(header.pop(key))\n            except KeyError:\n                continue\n        else:\n            raise UnsupportedOperation('getsize')", "docstring": "Return the size from header\n\nArgs:\nheader (dict): Object header.\n\nReturns:\nint: Size in bytes.", "source": "juraj-google-style"}
{"code": "def value_to_key_strokes(value):\n    \n    result = ''\n    if isinstance(value, Integral):\n        value = str(value)\n\n    for v in value:\n        if isinstance(v, Keys):\n            result += v.value\n        elif isinstance(v, Integral):\n            result += str(v)\n        else:\n            result += v\n    return [result]", "docstring": "Convert value to a list of key strokes\n>>> value_to_key_strokes(123)\n['123']\n>>> value_to_key_strokes('123')\n['123']\n>>> value_to_key_strokes([1, 2, 3])\n['123']\n>>> value_to_key_strokes(['1', '2', '3'])\n['123']\n\nArgs:\nvalue(int|str|list)\n\nReturns:\nA list of string.", "source": "juraj-google-style"}
{"code": "def call(self, x):\n    \n    with tf.name_scope(\"embedding\"):\n      embeddings = tf.gather(self.shared_weights, x)\n\n      \n      embeddings *= self.hidden_size ** 0.5\n\n      \n      \n      padding = model_utils.get_padding(x)\n\n      \n      embeddings *= tf.expand_dims(1 - padding, -1)\n      return embeddings", "docstring": "Get token embeddings of x.\n\nArgs:\nx: An int64 tensor with shape [batch_size, length]\nReturns:\nembeddings: float32 tensor with shape [batch_size, length, embedding_size]\npadding: float32 tensor with shape [batch_size, length] indicating the\nlocations of the padding tokens in x.", "source": "juraj-google-style"}
{"code": "def _awaitReset(self, utcTimeStamp, verbose=True):\n    resetTime = pytz.utc.localize(datetime.utcfromtimestamp(utcTimeStamp))\n    _vPrint(verbose, '--- Current Timestamp')\n    _vPrint(verbose, ('      %s' % time.strftime('%c')))\n    now = pytz.utc.localize(datetime.utcnow())\n    waitTime = (round((resetTime - now).total_seconds()) + 1)\n    _vPrint(verbose, '--- Current UTC Timestamp')\n    _vPrint(verbose, ('      %s' % now.strftime('%c')))\n    _vPrint(verbose, '--- GITHUB NEEDS A BREAK Until UTC Timestamp')\n    _vPrint(verbose, ('      %s' % resetTime.strftime('%c')))\n    self._countdown(waitTime, printString='--- Waiting %*d seconds...', verbose=verbose)\n    _vPrint(verbose, '--- READY!')", "docstring": "Wait until the given UTC timestamp.\n\nArgs:\nutcTimeStamp (int): A UTC format timestamp.\nverbose (Optional[bool]): If False, all extra printouts will be\nsuppressed. Defaults to True.", "source": "codesearchnet"}
{"code": "def _add_sub_parsers(self, top_level_parser, methods_to_parse, class_name):\n        \n        description = \"Accessible methods of {}\".format(class_name)\n        sub_parsers = top_level_parser.add_subparsers(description=description,\n                                                      dest=\"method\")\n        \n        \n        \n        parser_to_method = {}\n        for method_name, parser in methods_to_parse.items():\n            \n            \n            parser_name = parser.get_name() or method_name\n            \n            if parser_name.startswith(\"_\"):\n                if not self._parse_private:\n                    \n                    \n                    continue\n                \n                \n                parser_name = parser_name.strip(\"_\")\n            parser_name = parser_name.replace(\"_\", \"-\")\n            parser_to_method[parser_name] = method_name\n            sub_parsers.add_parser(parser_name, parents=[parser],\n                                   add_help=False,\n                                   description=parser.description)\n        return parser_to_method", "docstring": "Add all the sub-parsers to the top_level_parser.\n\nArgs:\ntop_level_parser: the top level parser\nmethods_to_parse: dict of method name pointing to their associated\nargument parser\nclass_name: name of the decorated class\n\nReturns:\na dict of registered name of the parser i.e. sub command name\npointing to the method real name", "source": "juraj-google-style"}
{"code": "def _check_type(obj, expected_types):\n    if not isinstance(obj, expected_types):\n        raise TypeError('Expected type %s; got type %s' % (expected_types, type(obj)))", "docstring": "Check if an object is of the expected type.\n\nArgs:\nobj: The object being checked.\nexpected_types: (`type` or an iterable of `type`s) The expected `type`(s)\nof obj.\n\nRaises:\nTypeError: If obj is not an instance of expected_type.", "source": "github-repos"}
{"code": "def __init__(self, identifier=None):\n    \n    super(SessionStart, self).__init__()\n    self.artifact_filters = None\n    self.command_line_arguments = None\n    self.debug_mode = False\n    self.enabled_parser_names = None\n    self.filter_file = None\n    self.identifier = identifier\n    self.parser_filter_expression = None\n    self.preferred_encoding = None\n    self.preferred_time_zone = None\n    self.preferred_year = None\n    self.product_name = None\n    self.product_version = None\n    self.timestamp = None", "docstring": "Initializes a session start attribute container.\n\nArgs:\nidentifier (Optional[str]): unique identifier of the session.\nThe identifier should match that of the corresponding\nsession completion information.", "source": "juraj-google-style"}
{"code": "def build_recursive_gcs_delocalize_env(source, outputs):\n  \n  filtered_outs = [\n      var for var in outputs\n      if var.recursive and var.file_provider == job_model.P_GCS\n  ]\n  return '\\n'.join([\n      'export {0}={1}/{2}'.format(var.name,\n                                  source.rstrip('/'),\n                                  var.docker_path.rstrip('/'))\n      for var in filtered_outs\n  ])", "docstring": "Return a multi-line string with export statements for the variables.\n\nArguments:\nsource: Folder with the data.\nFor example /mnt/data\noutputs: a list of OutputFileParam\n\nReturns:\na multi-line string with a shell script that sets environment variables\ncorresponding to the outputs.", "source": "juraj-google-style"}
{"code": "def parse_genotypes(variant, individuals, individual_positions):\n    genotypes = []\n    for ind in individuals:\n        pos = individual_positions[ind['individual_id']]\n        genotypes.append(parse_genotype(variant, ind, pos))\n    return genotypes", "docstring": "Parse the genotype calls for a variant\n\nArgs:\nvariant(cyvcf2.Variant)\nindividuals: List[dict]\nindividual_positions(dict)\nReturns:\ngenotypes(list(dict)): A list of genotypes", "source": "codesearchnet"}
{"code": "def optionally(self, entity_type, attribute_name=None):\n        \n        if not attribute_name:\n            attribute_name = entity_type\n        self.optional += [(entity_type, attribute_name)]\n        return self", "docstring": "Parsed intents from this parser can optionally include an entity of the provided type.\n\nArgs:\nentity_type(str): an entity type\nattribute_name(str): the name of the attribute on the parsed intent. Defaults to match entity_type.\n\nReturns:\nself: to continue modifications.", "source": "juraj-google-style"}
{"code": "def add_history(self, filename, color_scheme, font, wrap):\n        \n        filename = encoding.to_unicode_from_fs(filename)\n        if filename in self.filenames:\n            return\n        editor = codeeditor.CodeEditor(self)\n        if osp.splitext(filename)[1] == '.py':\n            language = 'py'\n        else:\n            language = 'bat'\n        editor.setup_editor(linenumbers=False,\n                            language=language,\n                            scrollflagarea=False,\n                            show_class_func_dropdown=False)\n        editor.focus_changed.connect(lambda: self.focus_changed.emit())\n        editor.setReadOnly(True)\n\n        editor.set_font(font, color_scheme)\n        editor.toggle_wrap_mode(wrap)\n\n        text, _ = encoding.read(filename)\n        editor.set_text(text)\n        editor.set_cursor_position('eof')\n\n        self.editors.append(editor)\n        self.filenames.append(filename)\n        index = self.tabwidget.addTab(editor, osp.basename(filename))\n        self.find_widget.set_editor(editor)\n        self.tabwidget.setTabToolTip(index, filename)\n        self.tabwidget.setCurrentIndex(index)", "docstring": "Add new history tab.\n\nArgs:\nfilename (str): file to be loaded in a new tab.", "source": "juraj-google-style"}
{"code": "def supported_view_classes(cls) -> Set[Type['View']]:\n    supported_view_classes = set()\n    view_class = pg_typing.get_outer_class(cls, base_cls=View, immediate=True)\n    if view_class is not None and (not inspect.isabstract(view_class)):\n        supported_view_classes.add(view_class)\n    for base_cls in cls.__bases__:\n        if issubclass(base_cls, View.Extension):\n            supported_view_classes.update(base_cls.supported_view_classes())\n    return supported_view_classes", "docstring": "Returns all non-abstract View classes that the current class supports.\n\nA class can inherit from multiple ``View.Extension`` classes. For example:\n\n.. code-block:: python\n\nclass MyObject(View1.Extension, View2.Extension):\n...\n\nIn this case, ``MyObject`` supports both ``View1`` and ``View2``.\n\nReturns:\nAll non-abstract View classes that the current class supports.", "source": "github-repos"}
{"code": "def GetDisplayNameForPathSpec(self, path_spec):\n    return path_helper.PathHelper.GetDisplayNameForPathSpec(path_spec, mount_path=self._mount_path, text_prepend=self._text_prepend)", "docstring": "Retrieves the display name for a path specification.\n\nArgs:\npath_spec (dfvfs.PathSpec): path specification.\n\nReturns:\nstr: human readable version of the path specification.", "source": "codesearchnet"}
{"code": "def fetch(self, settlement_id, data={}, **kwargs):\n    return super(Settlement, self).fetch(settlement_id, data, **kwargs)", "docstring": "Fetch Settlement data for given Id\n\nArgs:\nsettlement_id : Id for which settlement object has to be retrieved\n\nReturns:\nsettlement dict for given settlement id", "source": "codesearchnet"}
{"code": "def once(coro, raise_exception=False, return_value=None):\n    return times(coro, limit=1, return_value=return_value, raise_exception=raise_exception)", "docstring": "Wrap a given coroutine function that is restricted to one execution.\n\nRepeated calls to the coroutine function will return the value of the first\ninvocation.\n\nThis function can be used as decorator.\n\narguments:\ncoro (coroutinefunction): coroutine function to wrap.\nraise_exception (bool): raise exception if execution times exceeded.\nreturn_value (mixed): value to return when execution times exceeded,\ninstead of the memoized one from last invocation.\n\nRaises:\nTypeError: if coro argument is not a coroutine function.\n\nReturns:\ncoroutinefunction\n\nUsage::\n\nasync def mul_2(num):\nreturn num * 2\n\nonce = paco.once(mul_2)\nawait once(2)\n# => 4\nawait once(3)\n# => 4\n\nonce = paco.once(mul_2, return_value='exceeded')\nawait once(2)\n# => 4\nawait once(3)\n# => 'exceeded'", "source": "codesearchnet"}
{"code": "def _regular_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001):\n    mean, var = nn.moments(x, reduction_axes, None, None, False)\n    normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)\n    return (normed, mean, var)", "docstring": "Non-fused version of `normalize_batch_in_training`.\n\nArgs:\nx: Input tensor or variable.\ngamma: Tensor by which to scale the input.\nbeta: Tensor with which to center the input.\nreduction_axes: iterable of integers,\naxes over which to normalize.\nepsilon: Fuzz factor.\n\nReturns:\nA tuple length of 3, `(normalized_tensor, mean, variance)`.", "source": "github-repos"}
{"code": "def get_query_columns(engine, query):\n    con = engine.connect()\n    result = con.execute(query).fetchone()\n    values = list(result)\n    cols_names = result.keys()\n    cols = OrderedDict()\n    for i in range(len(cols_names)):\n        cols[cols_names[i]] = type(values[i]).__name__\n    return cols", "docstring": "Extract columns names and python typos from query\n\nArgs:\nengine: SQLAlchemy connection engine\nquery: SQL query\n\nReturns:\ndict with columns names and python types", "source": "codesearchnet"}
{"code": "def transfer(self, transfer_payload=None, *, from_user, to_user):\n    if (self.persist_id is None):\n        raise EntityNotYetPersistedError('Entities cannot be transferred until they have been persisted')\n    return self.plugin.transfer(self.persist_id, transfer_payload, from_user=from_user, to_user=to_user)", "docstring": "Transfer this entity to another owner on the backing\npersistence layer\n\nArgs:\ntransfer_payload (dict): Payload for the transfer\nfrom_user (any): A user based on the model specified by the\npersistence layer\nto_user (any): A user based on the model specified by the\npersistence layer\n\nReturns:\nstr: Id of the resulting transfer action on the persistence\nlayer\n\nRaises:\n:exc:`~.EntityNotYetPersistedError`: If the entity being\ntransferred is not associated with an id on the\npersistence layer (:attr:`~Entity.persist_id`) yet\n:exc:`~.EntityNotFoundError`: If the entity could not be\nfound on the persistence layer\n:exc:`~.EntityTransferError`: If the entity fails to be\ntransferred on the persistence layer\n:exc:`~.PersistenceError`: If any other unhandled error\nin the plugin occurred", "source": "codesearchnet"}
{"code": "def blocking_reader(reader, input, buffer_size=_DEFAULT_BUFFER_SIZE):\n    \n    ion_event = None\n    while True:\n        read_event = (yield ion_event)\n        ion_event = reader.send(read_event)\n        while ion_event is not None and ion_event.event_type.is_stream_signal:\n            data = input.read(buffer_size)\n            if len(data) == 0:\n                \n                if ion_event.event_type is IonEventType.INCOMPLETE:\n                    ion_event = reader.send(NEXT_EVENT)\n                    continue\n                else:\n                    yield ION_STREAM_END_EVENT\n                    return\n            ion_event = reader.send(read_data_event(data))", "docstring": "Provides an implementation of using the reader co-routine with a file-like object.\n\nArgs:\nreader(Coroutine): A reader co-routine.\ninput(BaseIO): The file-like object to read from.\nbuffer_size(Optional[int]): The optional buffer size to use.", "source": "juraj-google-style"}
{"code": "def _generate_malformed_query(data):\n        \n        if isinstance(data, six.text_type):\n            \n            query_str = data.replace(':', ' ')\n        else:\n            query_str = ' '.join([word.strip(':') for word in data.children])\n\n        return {\n            'simple_query_string': {\n                'fields': ['_all'],\n                'query': query_str\n            }\n        }", "docstring": "Generates a query on the ``_all`` field with all the query content.\n\nArgs:\ndata (six.text_type or list): The query in the format of ``six.text_type`` (when used from parsing driver)\nor ``list`` when used from withing the ES visitor.", "source": "juraj-google-style"}
{"code": "def colored_block(text: str, block_start: str, block_end: str, color: Optional[str]=None, background: Optional[str]=None, styles: Optional[List[str]]=None) -> str:\n    if not color and (not background) and (not styles):\n        return text\n    s = []\n    start_index = 0\n    end_index = 0\n    previous_color = None\n\n    def write_nonblock_text(text: str, previous_color: Optional[str]):\n        if previous_color:\n            s.append(previous_color)\n        s.append(text)\n    while start_index < len(text):\n        start_index = text.find(block_start, end_index)\n        if start_index == -1:\n            write_nonblock_text(text[end_index:], previous_color)\n            break\n        since_last_block = text[end_index:start_index]\n        write_nonblock_text(since_last_block, previous_color)\n        colors = re.findall(_ANSI_COLOR_REGEX, since_last_block)\n        if colors:\n            previous_color = colors[-1]\n        end_index = text.find(block_end, start_index + len(block_start))\n        if end_index == -1:\n            write_nonblock_text(text[start_index:], previous_color)\n            break\n        end_index += len(block_end)\n        block = text[start_index:end_index]\n        block = colored(block, color=color, background=background, styles=styles)\n        s.append(block)\n    return ''.join(s)", "docstring": "Apply colors to text blocks.\n\nArgs:\ntext: A string that may or may not already has ANSI color characters.\nblock_start: A string that signals the start of a block. E.g. '{{'\nblock_end: A string that signals the end of a block. E.g. '}}'.\ncolor: A string for text colors. Applicable values are:\n'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'.\nbackground: A string for background colors. Applicable values are:\n'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'.\nstyles: A list of strings for applying styles on the text.\nApplicable values are:\n'bold', 'dark', 'underline', 'blink', 'reverse', 'concealed'.\n\nReturns:\nA string with ANSI color characters embracing the matched text blocks.", "source": "github-repos"}
{"code": "def generate_xid(identifier=None):\n    if (identifier is None):\n        identifier = str(uuid.uuid4())\n    elif isinstance(identifier, list):\n        identifier = '-'.join([str(i) for i in identifier])\n        identifier = hashlib.sha256(identifier.encode('utf-8')).hexdigest()\n    return hashlib.sha256(identifier.encode('utf-8')).hexdigest()", "docstring": "Generate xid from provided identifiers.\n\n.. Important::  If no identifier is provided a unique xid will be returned, but it will\nnot be reproducible. If a list of identifiers are provided they must be\nin the same order to generate a reproducible xid.\n\nArgs:\nidentifier (list|str):  Optional *string* value(s) to be used to make a unique and\nreproducible xid.", "source": "codesearchnet"}
{"code": "def start_simple_webserver(domain=None, port=5832):\n    import tornado.ioloop\n    import tornado.web\n    import tornado.httpserver\n    import tornado.wsgi\n    import flask\n    app = flask.Flask('__simple__')\n\n    @app.route('/', methods=['GET', 'POST', 'DELETE', 'PUT'])\n    def echo_args(*args, **kwargs):\n        from flask import request\n        print('Simple server was pinged')\n        print(('args = %r' % (args,)))\n        print(('kwargs = %r' % (kwargs,)))\n        print(('request.args = %r' % (request.args,)))\n        print(('request.form = %r' % (request.form,)))\n        return ''\n    if (domain is None):\n        domain = get_localhost()\n    app.server_domain = domain\n    app.server_port = port\n    app.server_url = ('http:\n    print(('app.server_url = %s' % (app.server_url,)))\n    http_server = tornado.httpserver.HTTPServer(tornado.wsgi.WSGIContainer(app))\n    http_server.listen(app.server_port)\n    tornado.ioloop.IOLoop.instance().start()", "docstring": "r\"\"\"\nsimple webserver that echos its arguments\n\nArgs:\ndomain (None): (default = None)\nport (int): (default = 5832)\n\nCommandLine:\npython -m utool.util_web --exec-start_simple_webserver:0\npython -m utool.util_web --exec-start_simple_webserver:1\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_web import *  # NOQA\n>>> domain = None\n>>> port = 5832\n>>> result = start_simple_webserver(domain, port)\n>>> print(result)", "source": "codesearchnet"}
{"code": "def _tf_flatten_batch_dims(x, num_nonbatch_dims):\n  \n  shape = x.shape.as_list()\n  assert None not in shape\n  new_shape = ([list_product(shape[:-num_nonbatch_dims])]\n               + shape[-num_nonbatch_dims:])\n  if new_shape != shape:\n    x = tf.reshape(x, new_shape)\n  return x", "docstring": "Flatten all but last num_nonbatch_dims into one dimension.\n\nArgs:\nx: a tf.Tensor:\nnum_nonbatch_dims: an integer\n\nReturns:\na tf.Tensor with 1 + num_nonbatch_dims dimensions.", "source": "juraj-google-style"}
{"code": "def _PrintSessionsOverview(self, storage_reader):\n    table_view = views.ViewsFactory.GetTableView(self._views_format_type, title='Sessions')\n    for session in storage_reader.GetSessions():\n        start_time = timelib.Timestamp.CopyToIsoFormat(session.start_time)\n        session_identifier = uuid.UUID(hex=session.identifier)\n        session_identifier = '{0!s}'.format(session_identifier)\n        table_view.AddRow([session_identifier, start_time])\n    table_view.Write(self._output_writer)", "docstring": "Prints a sessions overview.\n\nArgs:\nstorage_reader (StorageReader): storage reader.", "source": "codesearchnet"}
{"code": "def prepare_model_settings(label_count, sample_rate, clip_duration_ms, window_size_ms, window_stride_ms, feature_bin_count, preprocess):\n    desired_samples = int(sample_rate * clip_duration_ms / 1000)\n    window_size_samples = int(sample_rate * window_size_ms / 1000)\n    window_stride_samples = int(sample_rate * window_stride_ms / 1000)\n    length_minus_window = desired_samples - window_size_samples\n    if length_minus_window < 0:\n        spectrogram_length = 0\n    else:\n        spectrogram_length = 1 + int(length_minus_window / window_stride_samples)\n    if preprocess == 'average':\n        fft_bin_count = 1 + _next_power_of_two(window_size_samples) / 2\n        average_window_width = int(math.floor(fft_bin_count / feature_bin_count))\n        fingerprint_width = int(math.ceil(fft_bin_count / average_window_width))\n    elif preprocess == 'mfcc':\n        average_window_width = -1\n        fingerprint_width = feature_bin_count\n    elif preprocess == 'micro':\n        average_window_width = -1\n        fingerprint_width = feature_bin_count\n    else:\n        raise ValueError('Unknown preprocess mode \"%s\" (should be \"mfcc\", \"average\", or \"micro\")' % preprocess)\n    fingerprint_size = fingerprint_width * spectrogram_length\n    return {'desired_samples': desired_samples, 'window_size_samples': window_size_samples, 'window_stride_samples': window_stride_samples, 'spectrogram_length': spectrogram_length, 'fingerprint_width': fingerprint_width, 'fingerprint_size': fingerprint_size, 'label_count': label_count, 'sample_rate': sample_rate, 'preprocess': preprocess, 'average_window_width': average_window_width}", "docstring": "Calculates common settings needed for all models.\n\nArgs:\nlabel_count: How many classes are to be recognized.\nsample_rate: Number of audio samples per second.\nclip_duration_ms: Length of each audio clip to be analyzed.\nwindow_size_ms: Duration of frequency analysis window.\nwindow_stride_ms: How far to move in time between frequency windows.\nfeature_bin_count: Number of frequency bins to use for analysis.\npreprocess: How the spectrogram is processed to produce features.\n\nReturns:\nDictionary containing common settings.\n\nRaises:\nValueError: If the preprocessing mode isn't recognized.", "source": "github-repos"}
{"code": "def remove(self, key):\n        \n        if self.prepickle:\n            key = pickle.dumps(key)\n        if key not in self.keys:\n            raise ValueError(\"The given key does not exist\")\n        for H, hashtable in zip(self.keys[key], self.hashtables):\n            hashtable.remove_val(H, key)\n            if not hashtable.get(H):\n                hashtable.remove(H)\n        self.keys.remove(key)", "docstring": "Remove the key from the index.\n\nArgs:\nkey (hashable): The unique identifier of a set.", "source": "juraj-google-style"}
{"code": "def create(self, params=None, headers=None):\n    path = '/creditor_bank_accounts'\n    if (params is not None):\n        params = {self._envelope_key(): params}\n    try:\n        response = self._perform_request('POST', path, params, headers, retry_failures=True)\n    except errors.IdempotentCreationConflictError as err:\n        return self.get(identity=err.conflicting_resource_id, params=params, headers=headers)\n    return self._resource_for(response)", "docstring": "Create a creditor bank account.\n\nCreates a new creditor bank account object.\n\nArgs:\nparams (dict, optional): Request body.\n\nReturns:\nListResponse of CreditorBankAccount instances", "source": "codesearchnet"}
{"code": "def assertNotAllEqual(self, a, b, msg=None):\n    try:\n        self.assertAllEqual(a, b)\n    except AssertionError:\n        return\n    msg = msg or ''\n    raise AssertionError('The two values are equal at all elements. %s' % msg)", "docstring": "Asserts that two numpy arrays or Tensors do not have the same values.\n\nArgs:\na: the expected numpy ndarray or anything can be converted to one.\nb: the actual numpy ndarray or anything can be converted to one.\nmsg: Optional message to report on failure.", "source": "github-repos"}
{"code": "def __init__(self, paths=None, separator='/'):\n    \n    if not paths:\n      raise errors.FormatError('Missing paths value.')\n\n    super(PathSourceType, self).__init__()\n    self.paths = paths\n    self.separator = separator", "docstring": "Initializes a source type.\n\nArgs:\npaths (Optional[str]): paths relative to the root of the file system.\nseparator (Optional[str]): path segment separator.\n\nRaises:\nFormatError: when paths is not set.", "source": "juraj-google-style"}
{"code": "def to_hdf(self,path,key,mode='a'):\n        \n        pd.DataFrame(self.serialize()).to_hdf(path,key,mode=mode,format='table',complib='zlib',complevel=9)\n        f = h5py.File(path,'r+')\n        f[key].attrs[\"microns_per_pixel\"] = float(self.microns_per_pixel) if self.microns_per_pixel is not None else np.nan\n        f.close()", "docstring": "Save the CellDataFrame to an hdf5 file.\n\nArgs:\npath (str): the path to save to\nkey (str): the name of the location to save it to\nmode (str): write mode", "source": "juraj-google-style"}
{"code": "def get_output_from_cache(name, filename):\n    \n    cache_filename = _get_cache_filename(name, filename)\n    if (os.path.exists(cache_filename)\n            and os.path.getmtime(filename) < os.path.getmtime(cache_filename)):\n        with io.open(cache_filename) as f:\n            return f.read()\n\n    return None", "docstring": "Returns the output from the cache if still valid.\n\nIt checks that the cache file is defined and that its modification time is\nafter the modification time of the original file.\n\nArgs:\nname: string: name of the linter.\nfilename: string: path of the filename for which we are retrieving the\noutput.\n\nReturns: a string with the output, if it is still valid, or None otherwise.", "source": "juraj-google-style"}
{"code": "def abspath(fpath):\n    \n    from os import path, getcwd, chdir\n    original = getcwd()\n    chdir(reporoot)\n    result = path.abspath(path.expanduser(fpath))\n    chdir(original)\n    return result", "docstring": "Returns the absolute path to the specified file/folder *relative to the\nrepository root*.\n\nArgs:\nfpath (str): path to a file or folder; doesn't need to exist.", "source": "juraj-google-style"}
{"code": "def run_inference(self, batch: Sequence[scipy.sparse.csr_matrix], model: Union[xgboost.Booster, xgboost.XGBModel], inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n    return self._inference_fn(batch, model, inference_args)", "docstring": "Runs inferences on a batch of SciPy sparse matrices.\n\nArgs:\nbatch: A sequence of examples as Scipy sparse matrices.\nThe dimensions must match the dimensions of the data\nused to train the model.\nmodel: XGBoost booster or XBGModel (sklearn interface). Must implement\npredict(X). Where the parameter X is a SciPy sparse matrix.\ninference_args: Any additional arguments for an inference.\n\nReturns:\nAn Iterable of type PredictionResult.", "source": "github-repos"}
{"code": "def _GetFrameCodeObjectName(frame):\n  \n  \n  \n  if frame.f_code.co_argcount >= 1 and 'self' == frame.f_code.co_varnames[0]:\n    return (frame.f_locals['self'].__class__.__name__ +\n            '.' + frame.f_code.co_name)\n  else:\n    return frame.f_code.co_name", "docstring": "Gets the code object name for the frame.\n\nArgs:\nframe: the frame to get the name from\n\nReturns:\nThe function name if the code is a static function or the class name with\nthe method name if it is an member function.", "source": "juraj-google-style"}
{"code": "def whois_domains(self, domains):\n    api_name = 'opendns-whois-domain'\n    fmt_url_path = u'whois/{0}'\n    return self._multi_get(api_name, fmt_url_path, domains)", "docstring": "Calls WHOIS domain end point\n\nArgs:\ndomains: An enumerable of domains\nReturns:\nA dict of {domain: domain_result}", "source": "codesearchnet"}
{"code": "def get_child(self, injection_site_fn, binding):\n    child_scope_id = binding.scope_id\n    new_binding_stack = (self._binding_stack + [binding])\n    if (binding in self._binding_stack):\n        raise errors.CyclicInjectionError(new_binding_stack)\n    if (not self._is_scope_usable_from_scope_fn(child_scope_id, self._scope_id)):\n        raise errors.BadDependencyScopeError(self.get_injection_site_desc(), self._scope_id, child_scope_id, binding.binding_key)\n    return _InjectionContext(injection_site_fn, new_binding_stack, child_scope_id, self._is_scope_usable_from_scope_fn)", "docstring": "Creates a child injection context.\n\nA \"child\" injection context is a context for a binding used to\ninject something into the current binding's provided value.\n\nArgs:\ninjection_site_fn: the child function being injected into\nbinding: a Binding\nReturns:\na new _InjectionContext", "source": "codesearchnet"}
{"code": "def filterfalse_items(item_list, flag_list):\n    \n    assert len(item_list) == len(flag_list)\n    filtered_items = list(util_iter.ifilterfalse_items(item_list, flag_list))\n    return filtered_items", "docstring": "Returns items in item list where the corresponding item in flag list is true\n\nArgs:\nitem_list (list): list of items\nflag_list (list): list of truthy values\n\nReturns:\nfiltered_items : items where the corresponding flag was truthy\n\nSeeAlso:\nutil_iter.ifilterfalse_items", "source": "juraj-google-style"}
{"code": "def switch_to_window(page_class, webdriver):\n    window_list = list(webdriver.window_handles)\n    original_window = webdriver.current_window_handle\n    for window_handle in window_list:\n        webdriver.switch_to_window(window_handle)\n        try:\n            return PageFactory.create_page(page_class, webdriver)\n        except:\n            pass\n    webdriver.switch_to_window(original_window)\n    raise WindowNotFoundError(u('Window {0} not found.').format(page_class.__class__.__name__))", "docstring": "Utility method for switching between windows.  It will search through currently open\nwindows, then switch to the window matching the provided PageObject class.\n\nArgs:\npage_class (PageObject): Page class to search for/instantiate.\nwebdriver (WebDriver): Selenium webdriver.\n\nUsage::\n\nWebUtils.switch_to_window(DetailsPopUpPage, driver) # switches to the pop up window.", "source": "codesearchnet"}
{"code": "def relation_completions(\n    completion_text: str, bel_spec: BELSpec, bel_fmt: str, size: int\n) -> list:\n    \n\n    if bel_fmt == \"short\":\n        relation_list = bel_spec[\"relations\"][\"list_short\"]\n    else:\n        relation_list = bel_spec[\"relations\"][\"list_long\"]\n\n    matches = []\n    for r in relation_list:\n        if re.match(completion_text, r):\n            matches.append(r)\n\n    replace_list = []\n    for match in matches:\n        highlight = match.replace(completion_text, f\"<em>{completion_text}</em>\")\n        replace_list.append(\n            {\n                \"replacement\": match,\n                \"label\": match,\n                \"highlight\": highlight,\n                \"type\": \"Relation\",\n            }\n        )\n\n    return replace_list[:size]", "docstring": "Filter BEL relations by prefix\n\nArgs:\nprefix: completion string\nbel_fmt: short, medium, long BEL formats\nspec: BEL specification\n\nReturns:\nlist: list of BEL relations that match prefix", "source": "juraj-google-style"}
{"code": "def save(variable, filename):\n    \n    fileObj = open(filename, 'wb')\n    pickle.dump(variable, fileObj)\n    fileObj.close()", "docstring": "Save variable on given path using Pickle\n\nArgs:\nvariable: what to save\npath (str): path of the output", "source": "juraj-google-style"}
{"code": "def variant(self, document_id, gene_panels=None, case_id=None):\n        \n        query = {}\n        if case_id:\n            \n            query['case_id'] = case_id\n            query['variant_id'] = document_id\n        else:\n            \n            query['_id'] = document_id\n\n        variant_obj = self.variant_collection.find_one(query)\n        if variant_obj:\n            variant_obj = self.add_gene_info(variant_obj, gene_panels)\n            if variant_obj['chromosome'] in ['X', 'Y']:\n                \n                variant_obj['is_par'] = is_par(variant_obj['chromosome'],\n                                               variant_obj['position'])\n        return variant_obj", "docstring": "Returns the specified variant.\n\nArguments:\ndocument_id : A md5 key that represents the variant or \"variant_id\"\ngene_panels(List[GenePanel])\ncase_id (str): case id (will search with \"variant_id\")\n\nReturns:\nvariant_object(Variant): A odm variant object", "source": "juraj-google-style"}
{"code": "def _retry_failed_log(failed_trigger_log):\n    model = type(failed_trigger_log)\n    try:\n        failed_trigger_log = model.objects.select_for_update().get(id=failed_trigger_log.id, state=TRIGGER_LOG_STATE['FAILED'])\n    except model.DoesNotExist:\n        return False\n    failed_trigger_log.redo()\n    return True", "docstring": "Try to re-apply a failed trigger log action.\n\nMakes sure the argument trigger log is in a FAILED state and acquires a row lock on it.\n\nReturns:\nTrue if the operation succeeded", "source": "codesearchnet"}
{"code": "def __init__(self, additional_note=\"\", kwargs_dict=None):\n    \n    self._additional_note = additional_note\n    if kwargs_dict:\n      bullets = []\n      for key in sorted(kwargs_dict.keys()):\n        value = kwargs_dict[key]\n        if any(x.isspace() for x in key):\n          raise ValueError(\"Parameter name \\\"%s\\\" contains whitespace.\" % key)\n        value = value.lstrip()\n        if \"\\n\" in value:\n          raise ValueError(\n              \"Parameter description for \\\"%s\\\" contains newlines.\" % key)\n        bullets.append(\"*  `%s`: %s\" % (key, value))\n      self._additional_note += (\"\\n\\n", "docstring": "Initializes the AppendDocstring object.\n\nArgs:\nadditional_note: Python string added as additional docstring to public\nversion of function.\nkwargs_dict: Python string/string dictionary representing specific kwargs\nexpanded from the **kwargs input.\n\nRaises:\nValueError: if kwargs_dict.key contains whitespace.\nValueError: if kwargs_dict.value contains newlines.", "source": "juraj-google-style"}
{"code": "def control_status_ctx():\n    ret = _control_ctx()[-1]\n    return ret", "docstring": "Returns the current control context for autograph.\n\nThis method is useful when calling `tf.__internal__.autograph.tf_convert`,\nThe context will be used by tf_convert to determine whether it should convert\nthe input function. See the sample usage like below:\n\n```\ndef foo(func):\nreturn tf.__internal__.autograph.tf_convert(\ninput_fn, ctx=tf.__internal__.autograph.control_status_ctx())()\n```\n\nReturns:\nThe current control context of autograph.", "source": "github-repos"}
{"code": "def ExamineEvent(self, mediator, event):\n    \n    if event.data_type not in self._DATATYPES:\n      return\n\n    url = getattr(event, 'url', None)\n    if url is None:\n      return\n    parsed_url = urlparse.urlparse(url)\n    domain = getattr(parsed_url, 'netloc', None)\n    if domain in self._domains:\n      \n      return\n    self._domains.append(domain)", "docstring": "Analyzes an event and extracts domains from it.\n\nWe only evaluate straightforward web history events, not visits which can\nbe inferred by TypedURLs, cookies or other means.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between\nanalysis plugins and other components, such as storage and dfvfs.\nevent (EventObject): event to examine.", "source": "juraj-google-style"}
{"code": "def _delegate_method(keras_tensor_cls, method_name):\n\n    def delegate(self, *args, **kwargs):\n        return InstanceMethod(method_name)(self, args, kwargs)\n    setattr(keras_tensor_cls, method_name, delegate)", "docstring": "Register method on a KerasTensor class.\n\nCalling this function times with the same arguments should be a no-op.\n\nThis method exposes an instance method on the KerasTensor class that will use\nan `InstanceMethod` layer to run the desired method on the represented\nintermediate values in the model.\n\nArgs:\nkeras_tensor_cls: The KerasTensor subclass that should expose the property.\nmethod_name: The name of the method to expose and delegate to the\nrepresented (Composite)Tensor.", "source": "github-repos"}
{"code": "def asset(self, asset_id, asset_type, action='GET'):\n    if (not self.can_update()):\n        self._tcex.handle_error(910, [self.type])\n    if (asset_type == 'PHONE'):\n        return self.tc_requests.victim_phone_asset(self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action)\n    if (asset_type == 'EMAIL'):\n        return self.tc_requests.victim_email_asset(self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action)\n    if (asset_type == 'NETWORK'):\n        return self.tc_requests.victim_network_asset(self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action)\n    if (asset_type == 'SOCIAL'):\n        return self.tc_requests.victim_social_asset(self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action)\n    if (asset_type == 'WEB'):\n        return self.tc_requests.victim_web_asset(self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action)\n    self._tcex.handle_error(925, ['asset_type', 'asset', 'asset_type', 'asset_type', asset_type])\n    return None", "docstring": "Gets a asset of a Victim\n\nValid asset_type:\n+ PHONE\n+ EMAIL\n+ NETWORK\n+ SOCIAL\n+ WEB\n\nArgs:\nasset_type:\nasset_id:\naction:\n\nReturns:", "source": "codesearchnet"}
{"code": "def add_logs(self, logs):\n    self._log.extend(logs)\n    for log in logs:\n        print('%s line %d:%d: %s' % log)", "docstring": "Record a log and print it.\n\nThe log should be a tuple `(severity, lineno, col_offset, msg)`, which will\nbe printed and recorded. It is part of the log available in the `self.log`\nproperty.\n\nArgs:\nlogs: The logs to add. Must be a list of tuples\n`(severity, lineno, col_offset, msg)`.", "source": "github-repos"}
{"code": "def recipe_trends_places_to_bigquery_via_value(config, auth_write, secret, key, woeids, destination_dataset, destination_table):\n    twitter(config, {'auth': auth_write, 'secret': secret, 'key': key, 'trends': {'places': {'single_cell': True, 'values': woeids}}, 'out': {'bigquery': {'dataset': destination_dataset, 'table': destination_table}}})", "docstring": "Move using hard coded WOEID values.\n\nArgs:\nauth_write (authentication) - Credentials used for writing data.\nsecret (string) - NA\nkey (string) - NA\nwoeids (integer_list) - NA\ndestination_dataset (string) - NA\ndestination_table (string) - NA", "source": "github-repos"}
{"code": "def load_notebook_node(notebook_path):\n    nb = nbformat.reads(papermill_io.read(notebook_path), as_version=4)\n    if (not hasattr(nb.metadata, 'papermill')):\n        nb.metadata['papermill'] = {'parameters': dict(), 'environment_variables': dict(), 'version': __version__}\n    for cell in nb.cells:\n        if (not hasattr(cell.metadata, 'tags')):\n            cell.metadata['tags'] = []\n        if (not hasattr(cell.metadata, 'papermill')):\n            cell.metadata['papermill'] = dict()\n    return nb", "docstring": "Returns a notebook object with papermill metadata loaded from the specified path.\n\nArgs:\nnotebook_path (str): Path to the notebook file.\n\nReturns:\nnbformat.NotebookNode", "source": "codesearchnet"}
{"code": "def __init__(self, object_graph_proto, save_path, save_path_tensor, reader, restore_op_cache, graph_view, options, saveables_cache):\n    self.options = options\n    self.object_graph_proto = object_graph_proto\n    self.restore_uid = ops.uid()\n    self.unused_attributes = {}\n    self.object_by_proto_id = weakref.WeakValueDictionary()\n    self.matched_proto_ids = set()\n    self.all_python_objects = object_identity.ObjectIdentityWeakSet()\n    self.save_path_tensor = save_path_tensor\n    self.save_path_string = save_path\n    self.dtype_map = reader.get_variable_to_dtype_map()\n    self.shape_map = reader.get_variable_to_shape_map()\n    self.restore_ops = []\n    self.restore_ops_by_name = restore_op_cache\n    self.graph_view = graph_view\n    self.new_restore_ops_callback = None\n    self.deferred_slot_restorations = {}\n    self.slot_restorations = collections.defaultdict(list)\n    self.expect_partial_attr = False\n    if not self.options.experimental_skip_slot_variables:\n        for node_index, node in enumerate(self.object_graph_proto.nodes):\n            for slot_reference in node.slot_variables:\n                self.slot_restorations[slot_reference.original_variable_node_id].append(base._SlotVariableRestoration(optimizer_id=node_index, slot_variable_id=slot_reference.slot_variable_node_id, slot_name=slot_reference.slot_name))\n    self._deleter = _CheckpointRestoreCoordinatorDeleter(self.expect_partial_attr, self.object_graph_proto, self.matched_proto_ids, self.unused_attributes)\n    self.saveables_cache = saveables_cache", "docstring": "Specify the checkpoint being loaded.\n\nArgs:\nobject_graph_proto: The TrackableObjectGraph protocol buffer associated\nwith this checkpoint.\nsave_path: A string, the path to the checkpoint, as returned by\n`tf.train.latest_checkpoint`.\nsave_path_tensor: A string `Tensor` which contains or will be fed the save\npath.\nreader: A `CheckpointReader` for `save_path`. If None,\n`_CheckpointRestoreCoordinator` will initialize one itself.\nrestore_op_cache: A dictionary shared between\n`_CheckpointRestoreCoordinator`s for the same Python objects, used to\nlook up restore ops by name to avoid re-creating them across multiple\n`restore()` calls.\ngraph_view: A graph_view_lib.ObjectGraphView object for the restored\nobjects.\noptions: A CheckpointOptions object.\nsaveables_cache: An optional cache storing previously created\nSaveableObjects created for each Trackable. Maps Trackables to a\ndictionary of attribute names to Trackable.", "source": "github-repos"}
{"code": "def build_relative_position(query_size, key_size):\n    q_ids = tf.range(query_size, dtype=tf.int32)\n    k_ids = tf.range(key_size, dtype=tf.int32)\n    rel_pos_ids = q_ids[:, None] - tf.tile(tf.reshape(k_ids, [1, -1]), [query_size, 1])\n    rel_pos_ids = rel_pos_ids[:query_size, :]\n    rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)\n    return tf.cast(rel_pos_ids, tf.int64)", "docstring": "Build relative position according to the query and key\n\nWe assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key\n\\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -\nP_k\\)\n\nArgs:\nquery_size (int): the length of query\nkey_size (int): the length of key\n\nReturn:\n`tf.Tensor`: A tensor with shape [1, query_size, key_size]", "source": "github-repos"}
{"code": "def kernel_initrds(self):\n    kernels = []\n    initrds = []\n    name_values = [(k, v) for (k, v) in self.data.get('configs', [])]\n    for value in (self.data.get('title', []) + self.data.get('menuentry', [])):\n        name_values.extend(value)\n    for (name, value) in name_values:\n        if name.startswith('module'):\n            if ('vmlinuz' in value):\n                kernels.append(_parse_kernel_initrds_value(value))\n            elif (('initrd' in value) or ('initramfs' in value)):\n                initrds.append(_parse_kernel_initrds_value(value))\n        elif name.startswith(('kernel', 'linux')):\n            if ('ipxe.lkrn' in value):\n                return {}\n            elif ('xen.gz' not in value):\n                kernels.append(_parse_kernel_initrds_value(value))\n        elif (name.startswith('initrd') or name.startswith('initrd16')):\n            initrds.append(_parse_kernel_initrds_value(value))\n    return {GRUB_KERNELS: kernels, GRUB_INITRDS: initrds}", "docstring": "Get the `kernel` and `initrd` files referenced in GRUB configuration files\n\nReturns:\n(dict): Returns a dict of the `kernel` and `initrd` files referenced\nin GRUB configuration files", "source": "codesearchnet"}
{"code": "def load_from_file(self, yamlfile, _override=True, _allow_undeclared=False):\n    self._logger.info('Loading configuration from file: %s', yamlfile)\n    try:\n        parsed_yaml = self._modules['yaml'].safe_load(yamlfile.read())\n    except self._modules['yaml'].YAMLError:\n        self._logger.exception('Problem parsing YAML')\n        raise self.ConfigurationInvalidError(('Failed to load from %s as YAML' % yamlfile))\n    if (not isinstance(parsed_yaml, dict)):\n        raise self.ConfigurationInvalidError('YAML parsed, but wrong type, should be dict', parsed_yaml)\n    self._logger.debug('Configuration loaded from file: %s', parsed_yaml)\n    self.load_from_dict(parsed_yaml, _override=_override, _allow_undeclared=_allow_undeclared)", "docstring": "Loads the configuration from a file.\n\nParsed contents must be a single dict mapping config key to value.\n\nArgs:\nyamlfile: The opened file object to load configuration from.\nSee load_from_dict() for other args' descriptions.\n\nRaises:\nConfigurationInvalidError: If configuration file can't be read, or can't\nbe parsed as either YAML (or JSON, which is a subset of YAML).", "source": "codesearchnet"}
{"code": "def _maybe_add_default_serving_output(export_outputs):\n    if len(export_outputs) == 1:\n        (key, value), = export_outputs.items()\n        if key != signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n            export_outputs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = value\n    if len(export_outputs) > 1:\n        if signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY not in export_outputs:\n            raise ValueError('Multiple export_outputs were provided, but none of them is specified as the default.  Do this by naming one of them with signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY.')\n    return export_outputs", "docstring": "Add a default serving output to the export_outputs if not present.\n\nArgs:\nexport_outputs: Describes the output signatures to be exported to\n`SavedModel` and used during serving. Should be a dict.\n\nReturns:\nexport_outputs dict with default serving signature added if necessary\n\nRaises:\nValueError: if multiple export_outputs were provided without a default\nserving key.", "source": "github-repos"}
{"code": "def BuildArtifactsRegistry(\n      cls, artifact_definitions_path, custom_artifacts_path):\n    \n    if artifact_definitions_path and not os.path.isdir(\n        artifact_definitions_path):\n      raise errors.BadConfigOption(\n          'No such artifacts filter file: {0:s}.'.format(\n              artifact_definitions_path))\n\n    if custom_artifacts_path and not os.path.isfile(custom_artifacts_path):\n      raise errors.BadConfigOption(\n          'No such artifacts filter file: {0:s}.'.format(custom_artifacts_path))\n\n    registry = artifacts_registry.ArtifactDefinitionsRegistry()\n    reader = artifacts_reader.YamlArtifactsReader()\n\n    try:\n      registry.ReadFromDirectory(reader, artifact_definitions_path)\n\n    except (KeyError, artifacts_errors.FormatError) as exception:\n      raise errors.BadConfigOption((\n          'Unable to read artifact definitions from: {0:s} with error: '\n          '{1!s}').format(artifact_definitions_path, exception))\n\n    if custom_artifacts_path:\n      try:\n        registry.ReadFromFile(reader, custom_artifacts_path)\n\n      except (KeyError, artifacts_errors.FormatError) as exception:\n        raise errors.BadConfigOption((\n            'Unable to read artifact definitions from: {0:s} with error: '\n            '{1!s}').format(custom_artifacts_path, exception))\n\n    return registry", "docstring": "Build Find Specs from artifacts or filter file if available.\n\nArgs:\nartifact_definitions_path (str): path to artifact definitions file.\ncustom_artifacts_path (str): path to custom artifact definitions file.\n\nReturns:\nartifacts.ArtifactDefinitionsRegistry: artifact definitions registry.\n\nRaises:\nRuntimeError: if no valid FindSpecs are built.", "source": "juraj-google-style"}
{"code": "def cd(new_directory, clean_up=lambda: True):  \n    \n    previous_directory = os.getcwd()\n    os.chdir(os.path.expanduser(new_directory))\n    try:\n        yield\n    finally:\n        os.chdir(previous_directory)\n        clean_up()", "docstring": "Changes into a given directory and cleans up after it is done\n\nArgs:\nnew_directory: The directory to change to\nclean_up: A method to clean up the working directory once done", "source": "juraj-google-style"}
{"code": "def is_known_type(self, type_name):\n    type_name = str(type_name)\n    if (type_name in self.known_types):\n        return True\n    return False", "docstring": "Check if type is known to the type system.\n\nReturns:\nbool: True if the type is a known instantiated simple type, False otherwise", "source": "codesearchnet"}
{"code": "def transform(self, y):\n    if y.ndim == 1:\n        return y.reshape(-1, 1)\n    return y", "docstring": "Makes 1D y 2D.\n\nArgs:\ny : np.ndarray\nTarget y to be transformed.\n\nReturns:\nnp.ndarray\nA numpy array, of dimension at least 2.", "source": "github-repos"}
{"code": "def get_pipeline_yaml(file):\n    tag_representers = [PyString, SicString]\n    yaml_loader = get_yaml_parser_safe()\n    for representer in tag_representers:\n        yaml_loader.register_class(representer)\n    pipeline_definition = yaml_loader.load(file)\n    return pipeline_definition", "docstring": "Return pipeline yaml from open file object.\n\nUse specific custom representers to model the custom pypyr pipeline yaml\nformat, to load in special literal types like py and sic strings.\n\nIf looking to extend the pypyr pipeline syntax with special types, add\nthese to the tag_representers list.\n\nArgs:\nfile: open file-like object.\n\nReturns:\ndict-like representation of loaded yaml.", "source": "codesearchnet"}
{"code": "def add_feature(feature, package=None, source=None, limit_access=False, enable_parent=False, image=None, restart=False):\n    cmd = ['DISM', '/Quiet', ('/Image:{0}'.format(image) if image else '/Online'), '/Enable-Feature', '/FeatureName:{0}'.format(feature)]\n    if package:\n        cmd.append('/PackageName:{0}'.format(package))\n    if source:\n        cmd.append('/Source:{0}'.format(source))\n    if limit_access:\n        cmd.append('/LimitAccess')\n    if enable_parent:\n        cmd.append('/All')\n    if (not restart):\n        cmd.append('/NoRestart')\n    return __salt__['cmd.run_all'](cmd)", "docstring": "Install a feature using DISM\n\nArgs:\nfeature (str): The feature to install\npackage (Optional[str]): The parent package for the feature. You do not\nhave to specify the package if it is the Windows Foundation Package.\nOtherwise, use package to specify the parent package of the feature\nsource (Optional[str]): The optional source of the capability. Default\nis set by group policy and can be Windows Update\nlimit_access (Optional[bool]): Prevent DISM from contacting Windows\nUpdate for the source package\nenable_parent (Optional[bool]): True will enable all parent features of\nthe specified feature\nimage (Optional[str]): The path to the root directory of an offline\nWindows image. If `None` is passed, the running operating system is\ntargeted. Default is None.\nrestart (Optional[bool]): Reboot the machine if required by the install\n\nReturns:\ndict: A dictionary containing the results of the command\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' dism.add_feature NetFx3", "source": "codesearchnet"}
{"code": "def DeregisterAnalyzer(cls, analyzer_class):\n    analyzer_name = analyzer_class.NAME.lower()\n    if (analyzer_name not in cls._analyzer_classes):\n        raise KeyError('analyzer class not set for name: {0:s}'.format(analyzer_class.NAME))\n    del cls._analyzer_classes[analyzer_name]", "docstring": "Deregisters a analyzer class.\n\nThe analyzer classes are identified based on their lower case name.\n\nArgs:\nanalyzer_class (type): class object of the analyzer.\n\nRaises:\nKeyError: if analyzer class is not set for the corresponding name.", "source": "codesearchnet"}
{"code": "def run_processor(processorClass, ocrd_tool=None, mets_url=None, resolver=None, workspace=None, page_id=None, log_level=None, input_file_grp=None, output_file_grp=None, parameter=None, working_dir=None):\n    workspace = _get_workspace(workspace, resolver, mets_url, working_dir)\n    if (parameter is not None):\n        if (not (':\n            fname = os.path.abspath(parameter)\n        else:\n            fname = workspace.download_url(parameter)\n        with open(fname, 'r') as param_json_file:\n            parameter = json.load(param_json_file)\n    else:\n        parameter = {}\n    log.debug('Running processor %s', processorClass)\n    processor = processorClass(workspace, ocrd_tool=ocrd_tool, page_id=page_id, input_file_grp=input_file_grp, output_file_grp=output_file_grp, parameter=parameter)\n    ocrd_tool = processor.ocrd_tool\n    name = ('%s v%s' % (ocrd_tool['executable'], processor.version))\n    otherrole = ocrd_tool['steps'][0]\n    log.debug('Processor instance %s (%s doing %s)', processor, name, otherrole)\n    processor.process()\n    workspace.mets.add_agent(name=name, _type='OTHER', othertype='SOFTWARE', role='OTHER', otherrole=otherrole)\n    workspace.save_mets()\n    return processor", "docstring": "Create a workspace for mets_url and run processor through it\n\nArgs:\nparameter (string): URL to the parameter", "source": "codesearchnet"}
{"code": "def is_generic_dict(type_: Type) -> bool:\n    \n    if hasattr(typing, '_GenericAlias'):\n        \n        return (isinstance(type_, typing._GenericAlias) and     \n                type_.__origin__ is dict)\n    else:\n        \n        return (isinstance(type_, typing.GenericMeta) and\n                type_.__origin__ is Dict)", "docstring": "Determines whether a type is a Dict[...].\n\nHow to do this varies for different Python versions, due to the\ntyping library not having a stable API. This functions smooths\nover the differences.\n\nArgs:\ntype_: The type to check.\n\nReturns:\nTrue iff it's a Dict[...something...].", "source": "juraj-google-style"}
{"code": "async def fetch_messages(self, selected: SelectedMailbox, sequence_set: SequenceSet, attributes: FrozenSet[FetchAttribute]) -> Tuple[(Iterable[Tuple[(int, MessageInterface)]], SelectedMailbox)]:\n    ...", "docstring": "Get a list of loaded message objects corresponding to given sequence\nset.\n\nArgs:\nselected: The selected mailbox session.\nsequence_set: Sequence set of message sequences or UIDs.\nattributes: Fetch attributes for the messages.\n\nRaises:\n:class:`~pymap.exceptions.MailboxNotFound`", "source": "codesearchnet"}
{"code": "def dump_database_as_insert_sql(engine: Engine, fileobj: TextIO=sys.stdout, include_ddl: bool=False, multirow: bool=False) -> None:\n    for tablename in get_table_names(engine):\n        dump_table_as_insert_sql(engine=engine, table_name=tablename, fileobj=fileobj, include_ddl=include_ddl, multirow=multirow)", "docstring": "Reads an entire database and writes SQL to replicate it to the output\nfile-like object.\n\nArgs:\nengine: SQLAlchemy :class:`Engine`\nfileobj: file-like object to write to\ninclude_ddl: if ``True``, include the DDL to create the table as well\nmultirow: write multi-row ``INSERT`` statements", "source": "codesearchnet"}
{"code": "def get_time(self, force_uptime=False):\n    if force_uptime:\n        return self.uptime\n    time = (self.uptime + self.time_offset)\n    if self.is_utc:\n        time |= (1 << 31)\n    return time", "docstring": "Get the current UTC time or uptime.\n\nBy default, this method will return UTC time if possible and fall back\nto uptime if not.  If you specify, force_uptime=True, it will always\nreturn uptime even if utc time is available.\n\nArgs:\nforce_uptime (bool): Always return uptime, defaults to False.\n\nReturns:\nint: The current uptime or encoded utc time.", "source": "codesearchnet"}
{"code": "def create_user_dsn(driver: str, **kw) -> bool:\n    \n    attributes = []  \n    for attr in kw.keys():\n        attributes.append(\"%s=%s\" % (attr, kw[attr]))\n    return bool(\n        ctypes.windll.ODBCCP32.SQLConfigDataSource(0, ODBC_ADD_DSN, driver,\n                                                   nul.join(attributes))\n    )", "docstring": "(Windows only.)\nCreate a user ODBC data source name (DSN).\n\nArgs:\ndriver: ODBC driver name\nkw: Driver attributes\n\nReturns:\nbool: was the DSN created?", "source": "juraj-google-style"}
{"code": "def wait_for_compilation_job(self, job, poll=5):\n        \n        desc = _wait_until(lambda: _compilation_job_status(self.sagemaker_client, job), poll)\n        self._check_job_status(job, desc, 'CompilationJobStatus')\n        return desc", "docstring": "Wait for an Amazon SageMaker Neo compilation job to complete.\n\nArgs:\njob (str): Name of the compilation job to wait for.\npoll (int): Polling interval in seconds (default: 5).\n\nReturns:\n(dict): Return value from the ``DescribeCompilationJob`` API.\n\nRaises:\nValueError: If the compilation job fails.", "source": "juraj-google-style"}
{"code": "def _preprocess_params(cls, kwargs):\n    for (attr, val) in kwargs.items():\n        if (cls.is_the_primary_key(attr) and cls._prevent_primary_key_initialization_):\n            del kwargs[attr]\n            continue\n        if (val == ''):\n            kwargs[attr] = None\n            continue\n        if ((attr in class_mapper(cls).relationships) and (attr not in cls._no_overwrite_)):\n            rel = class_mapper(cls).relationships[attr]\n            if rel.uselist:\n                if isinstance(val, list):\n                    if all((isinstance(v, dict) for v in val)):\n                        rel_cls = cls.mapped_rel_class(attr)\n                        kwargs[attr] = rel_cls.update_or_new_all(list_of_kwargs=val, keys=[rel_cls.primary_key_name()])\n                elif isinstance(val, dict):\n                    rel_cls = cls.mapped_rel_class(attr)\n                    mapping_col = rel.collection_class().keyfunc.name\n                    list_of_kwargs = [merge(v, {mapping_col: k}) for (k, v) in val.items()]\n                    kwargs[attr] = {getattr(obj, mapping_col): obj for obj in rel_cls.update_or_new_all(list_of_kwargs=list_of_kwargs, keys=[rel_cls.primary_key_name()])}\n            elif isinstance(val, dict):\n                rel_cls = cls.mapped_rel_class(attr)\n                kwargs[attr] = rel_cls.update_or_new(**merge(val, {'keys': [rel_cls.primary_key_name()]}))\n    return kwargs", "docstring": "Returns a preprocessed dictionary of parameters.\nUse this to filter the kwargs passed to `new`, `create`,\n`build` methods.\n\nArgs:\n\n**kwargs: a dictionary of parameters", "source": "codesearchnet"}
{"code": "def _expand_to_event_rank(self, x):\n    expanded_x = x\n    for _ in range(tensorshape_util.rank(self.event_shape)):\n        expanded_x = tf.expand_dims(expanded_x, (- 1))\n    return expanded_x", "docstring": "Expand the rank of x up to static_event_rank times for broadcasting.\n\nThe static event rank was checked to not be None at construction time.\n\nArgs:\nx: A tensor to expand.\nReturns:\nThe expanded tensor.", "source": "codesearchnet"}
{"code": "def GetShadowMap(self, since=None):\n    return ShadowUpdateGetter().GetUpdates(self, self.conf['shadow_url'], since)", "docstring": "Return the shadow map from this source.\n\nArgs:\nsince: Get data only changed since this timestamp (inclusive) or None\nfor all data.\n\nReturns:\ninstance of shadow.ShadowMap", "source": "github-repos"}
{"code": "def _resolve_task_logging(job_metadata, job_resources, task_descriptors):\n  \n  if not job_resources.logging:\n    return\n\n  for task_descriptor in task_descriptors:\n    logging_uri = provider_base.format_logging_uri(\n        job_resources.logging.uri, job_metadata, task_descriptor.task_metadata)\n    logging_path = job_model.LoggingParam(logging_uri,\n                                          job_resources.logging.file_provider)\n\n    if task_descriptor.task_resources:\n      task_descriptor.task_resources = task_descriptor.task_resources._replace(\n          logging_path=logging_path)\n    else:\n      task_descriptor.task_resources = job_model.Resources(\n          logging_path=logging_path)", "docstring": "Resolve the logging path from job and task properties.\n\nArgs:\njob_metadata: Job metadata, such as job-id, job-name, and user-id.\njob_resources: Resources specified such as ram, cpu, and logging path.\ntask_descriptors: Task metadata, parameters, and resources.\n\nResolve the logging path, which may have substitution parameters such as\njob-id, task-id, user-id, and job-name.", "source": "juraj-google-style"}
{"code": "def target(self):\n    return c_api.TF_ServerTarget(self._server)", "docstring": "Returns the target for a `tf.compat.v1.Session` to connect to this server.\n\nTo create a\n`tf.compat.v1.Session` that\nconnects to this server, use the following snippet:\n\n```python\nserver = tf.distribute.Server(...)\nwith tf.compat.v1.Session(server.target):\n# ...\n```\n\nReturns:\nA string containing a session target for this server.", "source": "github-repos"}
{"code": "def restart_apppool(name):\n    \n    ps_cmd = ['Restart-WebAppPool', r\"'{0}'\".format(name)]\n\n    cmd_ret = _srvmgr(ps_cmd)\n\n    return cmd_ret['retcode'] == 0", "docstring": "Restart an IIS application pool.\n\n.. versionadded:: 2016.11.0\n\nArgs:\nname (str): The name of the IIS application pool.\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.restart_apppool name='MyTestPool'", "source": "juraj-google-style"}
{"code": "def __init__(self, meter_id=Meter.OFPM_ALL):\n        \n        super().__init__()\n        self.meter_id = meter_id", "docstring": "Create a MeterMultipartRequest with the optional parameters below.\n\nArgs:\nmeter_id(Meter): Meter Indentify.The value Meter.OFPM_ALL is used\nto refer to all Meters on the switch.", "source": "juraj-google-style"}
{"code": "def batch_reduce(self, reduce_op, value_destination_pairs, options=None):\n    if options is None:\n        options = collective_util.Options()\n    if not _validate_value_destination_pairs(value_destination_pairs):\n        value_destination_pairs = _normalize_value_destination_pairs(value_destination_pairs)\n    for _, d in value_destination_pairs:\n        validate_destinations(d)\n    if self._num_between_graph_workers == 1 and _all_devices_match(value_destination_pairs, self._canonicalize_devices) and (len(value_destination_pairs[0][0].values) == 1):\n        return [distribute_utils.regroup(v.values, wrap_class=value_lib.Mirrored) for v, _ in value_destination_pairs]\n    if options is None:\n        options = collective_util.Options()\n    return self.batch_reduce_implementation(reduce_op, value_destination_pairs, options)", "docstring": "Reduce values to destinations in batches.\n\nSee `tf.distribute.StrategyExtended.batch_reduce_to`. This can only be\ncalled in the cross-replica context.\n\nArgs:\nreduce_op: a `tf.distribute.ReduceOp` specifying how values should be\ncombined.\nvalue_destination_pairs: a sequence of (value, destinations) pairs. See\n`tf.distribute.CrossDeviceOps.reduce` for descriptions.\noptions: a `tf.distribute.experimental.CommunicationOptions`. See\n`tf.distribute.experimental.CommunicationOptions` for details.\n\nReturns:\nA list of `tf.Tensor` or `tf.distribute.DistributedValues`, one per pair\nin `value_destination_pairs`.\n\nRaises:\nValueError: if `value_destination_pairs` is not an iterable of\ntuples of `tf.distribute.DistributedValues` and destinations.", "source": "github-repos"}
{"code": "def put_headers_in_environ(headers, environ):\n    for (key, value) in headers:\n        environ[('HTTP_%s' % key.upper().replace('-', '_'))] = value", "docstring": "Given a list of headers, put them into environ based on PEP-333.\n\nThis converts headers to uppercase, prefixes them with 'HTTP_', and\nconverts dashes to underscores before adding them to the environ dict.\n\nArgs:\nheaders: A list of (header, value) tuples.  The HTTP headers to add to the\nenvironment.\nenviron: An environ dict for the request as defined in PEP-333.", "source": "codesearchnet"}
{"code": "def is_lambda(fun):\n    \n    return isinstance(fun, type(LAMBDA)) and fun.__name__ == LAMBDA.__name__", "docstring": "Check whether the given function is a lambda function.\n\n.. testsetup::\n\nfrom proso.func import is_lambda\n\n.. testcode::\n\ndef not_lambda_fun():\nreturn 1\n\nlambda_fun = lambda: 1\n\nprint(\nis_lambda(not_lambda_fun),\nis_lambda(lambda_fun)\n)\n.. testoutput::\n\nFalse True\n\nArgs:\nfun (function)\n\nReturns:\nbool: True if the given function is a lambda function, False otherwise", "source": "juraj-google-style"}
{"code": "def __init__(self, report_interval: float = 5.0, max_pbcs: int = 4):\n        \n        LOG.info('Starting Processing Block Scheduler.')\n        self._queue = self._init_queue()\n        self._pb_events = ProcessingBlockList().subscribe(__service_name__)\n        self._report_interval = report_interval\n        self._num_pbcs = 0  \n        self._max_pbcs = max_pbcs\n        self._pb_list = ProcessingBlockList()", "docstring": "Initialise the Scheduler.\n\nArgs:\nreport_interval (float): Minimum interval between reports, in s\nmax_pbcs (int): Maximum number of concurrent PBCs\n(and therefore PBs) that can be running.", "source": "juraj-google-style"}
{"code": "def write_double(self, value, little_endian=True):\n    if little_endian:\n        endian = '<'\n    else:\n        endian = '>'\n    return self.pack(('%sd' % endian), value)", "docstring": "Pack the value as a double and write 8 bytes to the stream.\n\nArgs:\nvalue (number): the value to write to the stream.\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint: the number of bytes written.", "source": "codesearchnet"}
{"code": "def CreateSmartShoppingAdGroup(client, campaign_id):\n  \n  ad_group_service = client.GetService('AdGroupService', version='v201809')\n  \n  ad_group = {\n      'campaignId': campaign_id,\n      'name': 'Smart Shopping ad group \n      \n      'adGroupType': 'SHOPPING_GOAL_OPTIMIZED_ADS'\n  }\n\n  adgroup_operations = {\n      'operator': 'ADD',\n      'operand': ad_group\n  }\n\n  \n  ad_group = ad_group_service.mutate(adgroup_operations)['value'][0]\n  ad_group_id = ad_group['id']\n\n  print ('AdGroup with name \"%s\" and ID \"%s\" was added.'\n         % (ad_group['name'], ad_group_id))\n\n  return ad_group_id", "docstring": "Adds a new Smart Shopping ad group.\n\nArgs:\nclient: an AdWordsClient instance.\ncampaign_id: the str ID of a Smart Shopping campaign.\nReturns:\nAn ad group ID.", "source": "juraj-google-style"}
{"code": "class Pop2PianoProcessor(ProcessorMixin):\n    attributes = ['feature_extractor', 'tokenizer']\n    feature_extractor_class = 'Pop2PianoFeatureExtractor'\n    tokenizer_class = 'Pop2PianoTokenizer'\n\n    def __init__(self, feature_extractor, tokenizer):\n        super().__init__(feature_extractor, tokenizer)\n\n    def __call__(self, audio: Union[np.ndarray, List[float], List[np.ndarray]]=None, sampling_rate: Optional[Union[int, List[int]]]=None, steps_per_beat: int=2, resample: Optional[bool]=True, notes: Union[List, TensorType]=None, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, verbose: bool=True, **kwargs) -> Union[BatchFeature, BatchEncoding]:\n        \n        if (audio is None and sampling_rate is None) and notes is None:\n            raise ValueError('You have to specify at least audios and sampling_rate in order to use feature extractor or notes to use the tokenizer part.')\n        if audio is not None and sampling_rate is not None:\n            inputs = self.feature_extractor(audio=audio, sampling_rate=sampling_rate, steps_per_beat=steps_per_beat, resample=resample, **kwargs)\n        if notes is not None:\n            encoded_token_ids = self.tokenizer(notes=notes, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)\n        if notes is None:\n            return inputs\n        elif audio is None or sampling_rate is None:\n            return encoded_token_ids\n        else:\n            inputs['token_ids'] = encoded_token_ids['token_ids']\n            return inputs\n\n    def batch_decode(self, token_ids, feature_extractor_output: BatchFeature, return_midi: bool=True) -> BatchEncoding:\n        \n        return self.tokenizer.batch_decode(token_ids=token_ids, feature_extractor_output=feature_extractor_output, return_midi=return_midi)\n\n    @property\n    def model_input_names(self):\n        tokenizer_input_names = self.tokenizer.model_input_names\n        feature_extractor_input_names = self.feature_extractor.model_input_names\n        return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))\n\n    def save_pretrained(self, save_directory, **kwargs):\n        if os.path.isfile(save_directory):\n            raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file')\n        os.makedirs(save_directory, exist_ok=True)\n        return super().save_pretrained(save_directory, **kwargs)\n\n    @classmethod\n    def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):\n        args = cls._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs)\n        return cls(*args)", "docstring": "Constructs an Pop2Piano processor which wraps a Pop2Piano Feature Extractor and Pop2Piano Tokenizer into a single\nprocessor.\n\n[`Pop2PianoProcessor`] offers all the functionalities of [`Pop2PianoFeatureExtractor`] and [`Pop2PianoTokenizer`].\nSee the docstring of [`~Pop2PianoProcessor.__call__`] and [`~Pop2PianoProcessor.decode`] for more information.\n\nArgs:\nfeature_extractor (`Pop2PianoFeatureExtractor`):\nAn instance of [`Pop2PianoFeatureExtractor`]. The feature extractor is a required input.\ntokenizer (`Pop2PianoTokenizer`):\nAn instance of ['Pop2PianoTokenizer`]. The tokenizer is a required input.", "source": "github-repos"}
{"code": "def _prepare_4d_attention_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int]=None):\n    return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)", "docstring": "Creates a non-causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n`(batch_size, key_value_length)`\n\nArgs:\nmask (`torch.Tensor`):\nA 2D attention mask of shape `(batch_size, key_value_length)`\ndtype (`torch.dtype`):\nThe torch dtype the created mask shall have.\ntgt_len (`int`):\nThe target length or query length the created mask shall have.", "source": "github-repos"}
{"code": "def add_vectors(self, vectors):\n        \n        if isinstance(vectors[0], (list, np.ndarray)):\n            for vec in vectors:\n                self.vectors.append(vec)\n        else:\n            self.vectors.append(vectors)", "docstring": "Add a list of vectors to Bloch sphere.\n\nArgs:\nvectors (array_like):\nArray with vectors of unit length or smaller.", "source": "juraj-google-style"}
{"code": "def from_Z(z: int):\n        \n        for sym, data in _pt_data.items():\n            if data[\"Atomic no\"] == z:\n                return Element(sym)\n        raise ValueError(\"No element with this atomic number %s\" % z)", "docstring": "Get an element from an atomic number.\n\nArgs:\nz (int): Atomic number\n\nReturns:\nElement with atomic number z.", "source": "juraj-google-style"}
{"code": "def _tensor_list_column_heads(self, parsed, max_timestamp_width, max_dump_size_width, max_op_type_width):\n    base_command = 'list_tensors'\n    if parsed.tensor_filter:\n        base_command += ' -f %s' % parsed.tensor_filter\n    if parsed.op_type_filter:\n        base_command += ' -t %s' % parsed.op_type_filter\n    if parsed.node_name_filter:\n        base_command += ' -n %s' % parsed.node_name_filter\n    attr_segs = {0: []}\n    row = self._TIMESTAMP_COLUMN_HEAD\n    command = '%s -s %s' % (base_command, SORT_TENSORS_BY_TIMESTAMP)\n    if parsed.sort_by == SORT_TENSORS_BY_TIMESTAMP and (not parsed.reverse):\n        command += ' -r'\n    attr_segs[0].append((0, len(row), [debugger_cli_common.MenuItem(None, command), 'bold']))\n    row += ' ' * (max_timestamp_width - len(row))\n    prev_len = len(row)\n    row += self._DUMP_SIZE_COLUMN_HEAD\n    command = '%s -s %s' % (base_command, SORT_TENSORS_BY_DUMP_SIZE)\n    if parsed.sort_by == SORT_TENSORS_BY_DUMP_SIZE and (not parsed.reverse):\n        command += ' -r'\n    attr_segs[0].append((prev_len, len(row), [debugger_cli_common.MenuItem(None, command), 'bold']))\n    row += ' ' * (max_dump_size_width + max_timestamp_width - len(row))\n    prev_len = len(row)\n    row += self._OP_TYPE_COLUMN_HEAD\n    command = '%s -s %s' % (base_command, SORT_TENSORS_BY_OP_TYPE)\n    if parsed.sort_by == SORT_TENSORS_BY_OP_TYPE and (not parsed.reverse):\n        command += ' -r'\n    attr_segs[0].append((prev_len, len(row), [debugger_cli_common.MenuItem(None, command), 'bold']))\n    row += ' ' * (max_op_type_width + max_dump_size_width + max_timestamp_width - len(row))\n    prev_len = len(row)\n    row += self._TENSOR_NAME_COLUMN_HEAD\n    command = '%s -s %s' % (base_command, SORT_TENSORS_BY_TENSOR_NAME)\n    if parsed.sort_by == SORT_TENSORS_BY_TENSOR_NAME and (not parsed.reverse):\n        command += ' -r'\n    attr_segs[0].append((prev_len, len(row), [debugger_cli_common.MenuItem('', command), 'bold']))\n    row += ' ' * (max_op_type_width + max_dump_size_width + max_timestamp_width - len(row))\n    return debugger_cli_common.RichTextLines([row], font_attr_segs=attr_segs)", "docstring": "Generate a line containing the column heads of the tensor list.\n\nArgs:\nparsed: Parsed arguments (by argparse) of the list_tensors command.\nmax_timestamp_width: (int) maximum width of the timestamp column.\nmax_dump_size_width: (int) maximum width of the dump size column.\nmax_op_type_width: (int) maximum width of the op type column.\n\nReturns:\nA RichTextLines object.", "source": "github-repos"}
{"code": "def get_full_description(self):\n    try:\n        time_segment = self.get_time_of_day_description()\n        day_of_month_desc = self.get_day_of_month_description()\n        month_desc = self.get_month_description()\n        day_of_week_desc = self.get_day_of_week_description()\n        year_desc = self.get_year_description()\n        description = '{0}{1}{2}{3}{4}'.format(time_segment, day_of_month_desc, day_of_week_desc, month_desc, year_desc)\n        description = self.transform_verbosity(description, self._options.verbose)\n        description = self.transform_case(description, self._options.casing_type)\n    except Exception:\n        description = _('An error occured when generating the expression description.  Check the cron expression syntax.')\n        if self._options.throw_exception_on_parse_error:\n            raise FormatException(description)\n    return description", "docstring": "Generates the FULL description\n\nReturns:\nThe FULL description\nRaises:\nFormatException: if formating fails and throw_exception_on_parse_error is True", "source": "codesearchnet"}
{"code": "def officers(self, num, **kwargs):\n        \n        baseuri = self._BASE_URI + \"company/{}/officers\".format(num)\n        res = self.session.get(baseuri, params=kwargs)\n        self.handle_http_error(res)\n        return res", "docstring": "Search for a company's registered officers by company number.\n\nArgs:\nnum (str): Company number to search on.\nkwargs (dict): additional keywords passed into\nrequests.session.get *params* keyword.", "source": "juraj-google-style"}
{"code": "def FileEntryExistsByPathSpec(self, path_spec):\n    \n    \n    tsk_file = None\n    inode = getattr(path_spec, 'inode', None)\n    location = getattr(path_spec, 'location', None)\n\n    try:\n      if inode is not None:\n        tsk_file = self._tsk_file_system.open_meta(inode=inode)\n      elif location is not None:\n        tsk_file = self._tsk_file_system.open(location)\n\n    except IOError:\n      pass\n\n    return tsk_file is not None", "docstring": "Determines if a file entry for a path specification exists.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nbool: True if the file entry exists.", "source": "juraj-google-style"}
{"code": "def get_configs(__pkg: str, __name: str='config') -> List[str]:\n    dirs = [user_config(__pkg)]\n    dirs.extend((path.expanduser(path.sep.join([d, __pkg])) for d in getenv('XDG_CONFIG_DIRS', '/etc/xdg').split(':')))\n    configs = []\n    for dname in reversed(dirs):\n        test_path = path.join(dname, __name)\n        if path.exists(test_path):\n            configs.append(test_path)\n    return configs", "docstring": "Return all configs for given package.\n\nArgs:\n__pkg: Package name\n__name: Configuration file name", "source": "codesearchnet"}
{"code": "def convert_acquire(self, shift, instruction):\n    meas_level = self._run_config.get('meas_level', 2)\n    command_dict = {'name': 'acquire', 't0': (shift + instruction.start_time), 'duration': instruction.duration, 'qubits': [q.index for q in instruction.acquires], 'memory_slot': [m.index for m in instruction.mem_slots]}\n    if (meas_level == 2):\n        if instruction.command.discriminator:\n            command_dict.update({'discriminators': [QobjMeasurementOption(name=instruction.command.discriminator.name, params=instruction.command.discriminator.params)]})\n        command_dict.update({'register_slot': [regs.index for regs in instruction.reg_slots]})\n    if (meas_level >= 1):\n        if instruction.command.kernel:\n            command_dict.update({'kernels': [QobjMeasurementOption(name=instruction.command.kernel.name, params=instruction.command.kernel.params)]})\n    return self._qobj_model(**command_dict)", "docstring": "Return converted `AcquireInstruction`.\n\nArgs:\nshift(int): Offset time.\ninstruction (AcquireInstruction): acquire instruction.\nReturns:\ndict: Dictionary of required parameters.", "source": "codesearchnet"}
{"code": "def get_hosted_zone_by_name(client, zone_name):\n    p = client.get_paginator('list_hosted_zones')\n    for i in p.paginate():\n        for zone in i['HostedZones']:\n            if (zone['Name'] == zone_name):\n                return parse_zone_id(zone['Id'])\n    return None", "docstring": "Get the zone id of an existing zone by name.\n\nArgs:\nclient (:class:`botocore.client.Route53`): The connection used to\ninteract with Route53's API.\nzone_name (string): The name of the DNS hosted zone to create.\n\nReturns:\nstring: The Id of the Hosted Zone.", "source": "codesearchnet"}
{"code": "def split_heads(self, x):\n    \n    with tf.name_scope(\"split_heads\"):\n      batch_size = tf.shape(x)[0]\n      length = tf.shape(x)[1]\n\n      \n      depth = (self.hidden_size \n\n      \n      x = tf.reshape(x, [batch_size, length, self.num_heads, depth])\n\n      \n      return tf.transpose(x, [0, 2, 1, 3])", "docstring": "Split x into different heads, and transpose the resulting value.\n\nThe tensor is transposed to insure the inner dimensions hold the correct\nvalues during the matrix multiplication.\n\nArgs:\nx: A tensor with shape [batch_size, length, hidden_size]\n\nReturns:\nA tensor with shape [batch_size, num_heads, length, hidden_size/num_heads]", "source": "juraj-google-style"}
{"code": "def get_path(\n        self, start_x: int, start_y: int, goal_x: int, goal_y: int\n    ) -> List[Tuple[int, int]]:\n        \n        lib.TCOD_path_compute(self._path_c, start_x, start_y, goal_x, goal_y)\n        path = []\n        x = ffi.new(\"int[2]\")\n        y = x + 1\n        while lib.TCOD_path_walk(self._path_c, x, y, False):\n            path.append((x[0], y[0]))\n        return path", "docstring": "Return a list of (x, y) steps to reach the goal point, if possible.\n\nArgs:\nstart_x (int): Starting X position.\nstart_y (int): Starting Y position.\ngoal_x (int): Destination X position.\ngoal_y (int): Destination Y position.\nReturns:\nList[Tuple[int, int]]:\nA list of points, or an empty list if there is no valid path.", "source": "juraj-google-style"}
{"code": "def CopyToDict(self):\n    dictionary = {}\n    for (attribute_name, attribute_value) in self.GetAttributes():\n        if (attribute_value is None):\n            continue\n        dictionary[attribute_name] = attribute_value\n    return dictionary", "docstring": "Copies the attribute container to a dictionary.\n\nReturns:\ndict[str, object]: attribute values per name.", "source": "codesearchnet"}
{"code": "def parse_resource_type(self, response):\n\n\t\t\n\n\t\t\n\t\tlinks = [\n\t\t\tlink.split(\";\")[0].lstrip('<').rstrip('>')\n\t\t\tfor link in response.headers['Link'].split(', ')\n\t\t\tif link.startswith('<http:\n\n\t\t\n\t\tldp_resource_types = [\n\t\t\tself.repo.namespace_manager.compute_qname(resource_type)[2]\n\t\t\tfor resource_type in links]\n\n\t\tlogger.debug('Parsed LDP resource types from LINK header: %s' % ldp_resource_types)\n\n\t\t\n\t\t\n\t\tif 'NonRDFSource' in ldp_resource_types:\n\t\t\treturn NonRDFSource\n\t\t\n\t\telif 'BasicContainer' in ldp_resource_types:\n\t\t\treturn BasicContainer\n\t\t\n\t\telif 'DirectContainer' in ldp_resource_types:\n\t\t\treturn DirectContainer\n\t\t\n\t\telif 'IndirectContainer' in ldp_resource_types:\n\t\t\treturn IndirectContainer\n\t\telse:\n\t\t\tlogger.debug('could not determine resource type from Link header, returning False')\n\t\t\treturn False", "docstring": "parse resource type from self.http_request()\n\nNote: uses isinstance() as plugins may extend these base LDP resource type.\n\nArgs:\nresponse (requests.models.Response): response object\n\nReturns:\n[NonRDFSource, BasicContainer, DirectContainer, IndirectContainer]", "source": "juraj-google-style"}
{"code": "def _verify_output(self, submission_type):\n    \n    result = True\n    if submission_type == 'defense':\n      try:\n        image_classification = load_defense_output(\n            os.path.join(self._sample_output_dir, 'result.csv'))\n        expected_keys = [IMAGE_NAME_PATTERN.format(i)\n                         for i in range(BATCH_SIZE)]\n        if set(image_classification.keys()) != set(expected_keys):\n          logging.error('Classification results are not saved for all images')\n          result = False\n      except IOError as e:\n        logging.error('Failed to read defense output file: %s', e)\n        result = False\n    else:\n      for i in range(BATCH_SIZE):\n        image_filename = os.path.join(self._sample_output_dir,\n                                      IMAGE_NAME_PATTERN.format(i))\n        try:\n          img = np.array(Image.open(image_filename).convert('RGB'))\n          if list(img.shape) != [299, 299, 3]:\n            logging.error('Invalid image size %s for image %s',\n                          str(img.shape), image_filename)\n            result = False\n        except IOError as e:\n          result = False\n    return result", "docstring": "Verifies correctness of the submission output.\n\nArgs:\nsubmission_type: type of the submission\n\nReturns:\nTrue if output looks valid", "source": "juraj-google-style"}
{"code": "def __init__(self, reduce_to_device=None, accumulation_fn=None):\n    self.reduce_to_device = reduce_to_device\n    self.accumulation_fn = accumulation_fn or math_ops.add_n\n    super(ReductionToOneDevice, self).__init__()", "docstring": "Initializes with a device to reduce to and a way to accumulate.\n\nArgs:\nreduce_to_device: the intermediate device to reduce to. If None, reduce\nto the first device in `destinations` of the `reduce` method.\naccumulation_fn: a function that does accumulation.  If None,\n`tf.math.add_n` is used.", "source": "github-repos"}
{"code": "def minimize(f, start=None, smooth=False, log=None, array=False, **vargs):\n    if (start is None):\n        assert (not array), 'Please pass starting values explicitly when array=True'\n        arg_count = f.__code__.co_argcount\n        assert (arg_count > 0), 'Please pass starting values explicitly for variadic functions'\n        start = ([0] * arg_count)\n    if (not hasattr(start, '__len__')):\n        start = [start]\n    if array:\n        objective = f\n    else:\n\n        @functools.wraps(f)\n        def objective(args):\n            return f(*args)\n    if ((not smooth) and ('method' not in vargs)):\n        vargs['method'] = 'Powell'\n    result = optimize.minimize(objective, start, **vargs)\n    if (log is not None):\n        log(result)\n    if (len(start) == 1):\n        return result.x.item(0)\n    else:\n        return result.x", "docstring": "Minimize a function f of one or more arguments.\n\nArgs:\nf: A function that takes numbers and returns a number\n\nstart: A starting value or list of starting values\n\nsmooth: Whether to assume that f is smooth and use first-order info\n\nlog: Logging function called on the result of optimization (e.g. print)\n\nvargs: Other named arguments passed to scipy.optimize.minimize\n\nReturns either:\n(a) the minimizing argument of a one-argument function\n(b) an array of minimizing arguments of a multi-argument function", "source": "codesearchnet"}
{"code": "def _process_new(self, feed_item):\n    campaign = self.campaign_dao.get(feed_item, required=True)\n    placement_group = self.placement_group_dao.get(feed_item, required=True)\n    feed_item[FieldMap.CAMPAIGN_ID] = campaign['id']\n    feed_item[FieldMap.CAMPAIGN_NAME] = campaign['name']\n    if placement_group:\n        feed_item[FieldMap.PLACEMENT_GROUP_ID] = placement_group['id']\n        feed_item[FieldMap.PLACEMENT_GROUP_NAME] = placement_group['name']\n    result = {'name': feed_item.get(FieldMap.PLACEMENT_NAME, None), 'adBlockingOptOut': feed_item.get(FieldMap.PLACEMENT_AD_BLOCKING, False), 'campaignId': campaign['id'], 'placementGroupId': placement_group['id'] if placement_group else None, 'archived': feed_item.get(FieldMap.PLACEMENT_ARCHIVED, False), 'siteId': feed_item.get(FieldMap.SITE_ID, None), 'paymentSource': 'PLACEMENT_AGENCY_PAID', 'pricingSchedule': {'startDate': StringExtensions.convertDateTimeStrToDateStr(feed_item.get(FieldMap.PLACEMENT_START_DATE, None)), 'endDate': StringExtensions.convertDateTimeStrToDateStr(feed_item.get(FieldMap.PLACEMENT_END_DATE, None)), 'pricingType': feed_item.get(FieldMap.PLACEMENT_PRICING_SCHEDULE_COST_STRUCTURE, None) or 'PRICING_TYPE_CPM', 'pricingPeriods': [{'startDate': feed_item.get(FieldMap.PLACEMENT_START_DATE, None), 'endDate': feed_item.get(FieldMap.PLACEMENT_END_DATE, None)}]}}\n    self._process_skipability(feed_item, result)\n    if feed_item.get(FieldMap.PLACEMENT_ADDITIONAL_KEY_VALUES, None):\n        result['tagSetting'] = {'additionalKeyValues': feed_item.get(FieldMap.PLACEMENT_ADDITIONAL_KEY_VALUES, None)}\n    if feed_item.get(FieldMap.PLACEMENT_PRICING_TESTING_START, None):\n        result['pricingSchedule']['testingStartDate'] = feed_item.get(FieldMap.PLACEMENT_PRICING_TESTING_START, None)\n    self._process_active_view_and_verification(result, feed_item)\n    if feed_item.get(FieldMap.PLACEMENT_TYPE, None) == 'VIDEO' or feed_item[FieldMap.PLACEMENT_TYPE] == 'IN_STREAM_VIDEO':\n        result['compatibility'] = 'IN_STREAM_VIDEO'\n        result['size'] = {'width': '0', 'height': '0'}\n        result['tagFormats'] = ['PLACEMENT_TAG_INSTREAM_VIDEO_PREFETCH']\n    elif feed_item[FieldMap.PLACEMENT_TYPE] == 'IN_STREAM_AUDIO':\n        result['compatibility'] = 'IN_STREAM_AUDIO'\n        result['size'] = {'width': '0', 'height': '0'}\n        result['tagFormats'] = ['PLACEMENT_TAG_INSTREAM_VIDEO_PREFETCH']\n    else:\n        result['compatibility'] = 'DISPLAY'\n        width = 1\n        height = 1\n        raw_size = feed_item.get(FieldMap.ASSET_SIZE, '0x0')\n        if raw_size and 'x' in raw_size:\n            width, height = raw_size.strip().lower().split('x')\n        sizes = self.get_sizes(int(width), int(height))\n        if sizes:\n            result['size'] = {'id': sizes[0]['id']}\n        else:\n            result['size'] = {'width': int(width), 'height': int(height)}\n        result['tagFormats'] = ['PLACEMENT_TAG_STANDARD', 'PLACEMENT_TAG_JAVASCRIPT', 'PLACEMENT_TAG_IFRAME_JAVASCRIPT', 'PLACEMENT_TAG_IFRAME_ILAYER', 'PLACEMENT_TAG_INTERNAL_REDIRECT', 'PLACEMENT_TAG_TRACKING', 'PLACEMENT_TAG_TRACKING_IFRAME', 'PLACEMENT_TAG_TRACKING_JAVASCRIPT']\n    self._process_transcode(result, feed_item)\n    self._process_pricing_schedule(result, feed_item)\n    return result", "docstring": "Creates a new placement DCM object from a feed item representing an placement from the Bulkdozer feed.\n\nThis function simply creates the object to be inserted later by the BaseDAO\nobject.\n\nArgs:\nfeed_item: Feed item representing the placement from the Bulkdozer feed.\n\nReturns:\nAn placement object ready to be inserted in DCM through the API.", "source": "github-repos"}
{"code": "def get_table_metadata(engine, table):\n    metadata = MetaData()\n    metadata.reflect(bind=engine, only=[table])\n    table_metadata = Table(table, metadata, autoload=True)\n    return table_metadata", "docstring": "Extract all useful infos from the given table\n\nArgs:\nengine: SQLAlchemy connection engine\ntable: table name\n\nReturns:\nDictionary of infos", "source": "codesearchnet"}
{"code": "def join(path, *paths):\n    path_ = compat.as_str_any(compat.path_to_str(path))\n    if ':\n        return urljoin(path, *paths)\n    return os.path.join(path, *paths)", "docstring": "Join one or more path components intelligently.\n\nTensorFlow specific filesystems will be joined\nlike a url (using \"/\" as the path seperator) on all platforms:\n\nOn Windows or Linux/Unix-like:\n>>> tf.io.gfile.join(\"gcs://folder\", \"file.py\")\n'gcs://folder/file.py'\n\n>>> tf.io.gfile.join(\"ram://folder\", \"file.py\")\n'ram://folder/file.py'\n\nBut the native filesystem is handled just like os.path.join:\n\n>>> path = tf.io.gfile.join(\"folder\", \"file.py\")\n>>> if os.name == \"nt\":\n...   expected = \"folder\\\\file.py\"  # Windows\n... else:\n...   expected = \"folder/file.py\"  # Linux/Unix-like\n>>> path == expected\nTrue\n\nArgs:\npath: string, path to a directory\npaths: string, additional paths to concatenate\n\nReturns:\npath: the joined path.", "source": "github-repos"}
{"code": "def add_ldap_group_link(self, cn, group_access, provider, **kwargs):\n        \n        path = '/groups/%s/ldap_group_links' % self.get_id()\n        data = {'cn': cn, 'group_access': group_access, 'provider': provider}\n        self.manager.gitlab.http_post(path, post_data=data, **kwargs)", "docstring": "Add an LDAP group link.\n\nArgs:\ncn (str): CN of the LDAP group\ngroup_access (int): Minimum access level for members of the LDAP\ngroup\nprovider (str): LDAP provider for the LDAP group\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabCreateError: If the server cannot perform the request", "source": "juraj-google-style"}
{"code": "def propose(self, n=1):\n    proposed_params = []\n    for i in range(n):\n        candidate_params = self._create_candidates()\n        if (candidate_params is None):\n            return None\n        predictions = self.predict(candidate_params)\n        idx = self._acquire(predictions)\n        params = {}\n        for i in range(candidate_params[(idx, :)].shape[0]):\n            inverse_transformed = self.tunables[i][1].inverse_transform(candidate_params[(idx, i)])\n            params[self.tunables[i][0]] = inverse_transformed\n        proposed_params.append(params)\n    return (params if (n == 1) else proposed_params)", "docstring": "Use the trained model to propose a new set of parameters.\n\nArgs:\nn (int, optional): number of candidates to propose\n\nReturns:\nMapping of tunable name to proposed value. If called with n>1 then proposal is a list\nof dictionaries.", "source": "codesearchnet"}
{"code": "def is_duplicated(self, item):\n        \n        if isinstance(item, dict):\n            hashable_item = json.dumps(item, sort_keys=True)\n        elif isinstance(item, list):\n            hashable_item = frozenset(item)\n        else:\n            hashable_item = item\n        if hashable_item in self._cache:\n            return True\n        else:\n            if self.cache_capacity > 0 and len(\n                    self._cache) >= self.cache_capacity:\n                self._cache.popitem(False)\n            self._cache[hashable_item] = 1\n            return False", "docstring": "Check whether the item has been in the cache\n\nIf the item has not been seen before, then hash it and put it into\nthe cache, otherwise indicates the item is duplicated. When the cache\nsize exceeds capacity, discard the earliest items in the cache.\n\nArgs:\nitem (object): The item to be checked and stored in cache. It must\nbe immutable or a list/dict.\nReturns:\nbool: Whether the item has been in cache.", "source": "juraj-google-style"}
{"code": "def emit(self, name, *args, **kwargs):\n    e = self.__property_events.get(name)\n    if (e is None):\n        e = self.__events[name]\n    return e(*args, **kwargs)", "docstring": "Dispatches an event to any subscribed listeners\n\nNote:\nIf a listener returns :obj:`False`, the event will stop dispatching to\nother listeners. Any other return value is ignored.\n\nArgs:\nname (str): The name of the :class:`Event` to dispatch\n*args (Optional): Positional arguments to be sent to listeners\n**kwargs (Optional): Keyword arguments to be sent to listeners", "source": "codesearchnet"}
{"code": "def _validate_xoxp_token(self):\n    if self.token.startswith('xoxb'):\n        method_name = inspect.stack()[1][3]\n        msg = \"The method '{}' cannot be called with a Bot Token.\".format(method_name)\n        raise err.BotUserAccessError(msg)", "docstring": "Ensures that an xoxp token is used when the specified method is called.\n\nRaises:\nBotUserAccessError: If the API method is called with a Bot User OAuth Access Token.", "source": "codesearchnet"}
{"code": "def add_documents(self, docs):\n        \n        for sent in docs:\n            sent = map(self.process_token, sent)\n            self._token_count.update(sent)", "docstring": "Update dictionary from a collection of documents. Each document is a list\nof tokens.\n\nArgs:\ndocs (list): documents to add.", "source": "juraj-google-style"}
{"code": "def ae_latent_sample_beam(latents_dense_in, inputs, ed, embed, hparams):\n\n    def symbols_to_logits_fn(ids):\n        'Go from ids to logits.'\n        ids = tf.expand_dims(ids, axis=2)\n        latents_discrete = tf.pad(ids[(:, 1:)], [[0, 0], [0, 1], [0, 0]])\n        with tf.variable_scope(tf.get_variable_scope(), reuse=False):\n            latents_dense = embed(tf.one_hot(latents_discrete, depth=(2 ** hparams.bottleneck_bits)), hparams.hidden_size)\n            latents_pred = transformer_latent_decoder(latents_dense, inputs, ed, hparams, name='latent_prediction')\n            logits = tf.layers.dense(latents_pred, (2 ** hparams.bottleneck_bits), name='logits_dense')\n            current_output_position = (common_layers.shape_list(ids)[1] - 1)\n            logits = logits[(:, current_output_position, :)]\n        return logits\n    initial_ids = tf.zeros([tf.shape(latents_dense_in)[0]], dtype=tf.int32)\n    length = tf.shape(latents_dense_in)[1]\n    (ids, _, _) = beam_search.beam_search(symbols_to_logits_fn, initial_ids, 1, length, (2 ** hparams.bottleneck_bits), alpha=0.0, eos_id=(- 1), stop_early=False)\n    res = tf.expand_dims(ids[(:, 0, :)], axis=2)\n    return res[(:, 1:)]", "docstring": "Samples from the latent space in the autoencoder.\n\nArgs:\nlatents_dense_in: Tensor of shape [batch, length_q, ...]. Only the shape of\nits first two dimensions are used. length_q is the latent length, which is\nheight * width * hparams.num_latents / (2**hparams.num_compress_steps).\ninputs: Tensor of shape [batch, length_kv, hparams.hidden_size]. Encodings\nto attend to in decoder.\ned: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q,\nlength_kv]. Encoder-decoder attention bias.\nembed: Callable which embeds discrete latent hot-vectors and a hidden size\nand returns dense vectors.\nhparams: HParams.\n\nReturns:\nTensor of shape [batch, length].", "source": "codesearchnet"}
{"code": "def Begin(self, function_name):\n    \n    self.in_a_function = True\n    self.lines_in_function = 0\n    self.current_function = function_name", "docstring": "Start analyzing function body.\n\nArgs:\nfunction_name: The name of the function being tracked.", "source": "juraj-google-style"}
{"code": "def generate_hpo_gene_list(self, *hpo_terms):\n    genes = {}\n    for term in hpo_terms:\n        hpo_obj = self.hpo_term(term)\n        if hpo_obj:\n            for hgnc_id in hpo_obj['genes']:\n                if (hgnc_id in genes):\n                    genes[hgnc_id] += 1\n                else:\n                    genes[hgnc_id] = 1\n        else:\n            LOG.warning('Term %s could not be found', term)\n    sorted_genes = sorted(genes.items(), key=operator.itemgetter(1), reverse=True)\n    return sorted_genes", "docstring": "Generate a sorted list with namedtuples of hpogenes\n\nEach namedtuple of the list looks like (hgnc_id, count)\n\nArgs:\nhpo_terms(iterable(str))\n\nReturns:\nhpo_genes(list(HpoGene))", "source": "codesearchnet"}
{"code": "def recipe_bigquery_run_query(config, auth_write, query, legacy):\n    bigquery(config, {'auth': auth_write, 'run': {'query': query, 'legacy': legacy}})", "docstring": "Run query on a project.\n\nArgs:\nauth_write (authentication) - Credentials used for writing data.\nquery (text) - SQL with newlines and all.\nlegacy (boolean) - Query type must match table and query format.", "source": "github-repos"}
{"code": "def load_checkpoint(model,\n                    filename,\n                    map_location=None,\n                    strict=False,\n                    logger=None):\n    \n    \n    if filename.startswith('modelzoo:\n        import torchvision\n        model_urls = dict()\n        for _, name, ispkg in pkgutil.walk_packages(\n                torchvision.models.__path__):\n            if not ispkg:\n                _zoo = import_module('torchvision.models.{}'.format(name))\n                _urls = getattr(_zoo, 'model_urls')\n                model_urls.update(_urls)\n        model_name = filename[11:]\n        checkpoint = model_zoo.load_url(model_urls[model_name])\n    elif filename.startswith('open-mmlab:\n        model_name = filename[13:]\n        checkpoint = model_zoo.load_url(open_mmlab_model_urls[model_name])\n    elif filename.startswith(('http:\n        checkpoint = model_zoo.load_url(filename)\n    else:\n        if not osp.isfile(filename):\n            raise IOError('{} is not a checkpoint file'.format(filename))\n        checkpoint = torch.load(filename, map_location=map_location)\n    \n    if isinstance(checkpoint, OrderedDict):\n        state_dict = checkpoint\n    elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:\n        state_dict = checkpoint['state_dict']\n    else:\n        raise RuntimeError(\n            'No state_dict found in checkpoint file {}'.format(filename))\n    \n    if list(state_dict.keys())[0].startswith('module.'):\n        state_dict = {k[7:]: v for k, v in checkpoint['state_dict'].items()}\n    \n    if hasattr(model, 'module'):\n        load_state_dict(model.module, state_dict, strict, logger)\n    else:\n        load_state_dict(model, state_dict, strict, logger)\n    return checkpoint", "docstring": "Load checkpoint from a file or URI.\n\nArgs:\nmodel (Module): Module to load checkpoint.\nfilename (str): Either a filepath or URL or modelzoo://xxxxxxx.\nmap_location (str): Same as :func:`torch.load`.\nstrict (bool): Whether to allow different params for the model and\ncheckpoint.\nlogger (:mod:`logging.Logger` or None): The logger for error message.\n\nReturns:\ndict or OrderedDict: The loaded checkpoint.", "source": "juraj-google-style"}
{"code": "def invoke_process_batch(self, windowed_batch, additional_args=None, additional_kwargs=None):\n    raise NotImplementedError", "docstring": "Invokes the DoFn.process() function.\n\nArgs:\nwindowed_batch: a WindowedBatch object that gives a batch of elements for\nwhich process_batch() method should be invoked, along with\nthe window each element belongs to.\nadditional_args: additional arguments to be passed to the current\n`DoFn.process()` invocation, usually as side inputs.\nadditional_kwargs: additional keyword arguments to be passed to the\ncurrent `DoFn.process()` invocation.", "source": "github-repos"}
{"code": "def _make_hostport(conn, default_host, default_port, default_user='', default_password=None):\n    \n    parsed = urllib.parse.urlparse('\n    return Connection(\n        parsed.hostname or default_host,\n        parsed.port or default_port,\n        parsed.username if parsed.username is not None else default_user,\n        parsed.password if parsed.password is not None else default_password,\n    )", "docstring": "Convert a '[user[:pass]@]host:port' string to a Connection tuple.\n\nIf the given connection is empty, use defaults.\nIf no port is given, use the default.\n\nArgs:\nconn (str): the string describing the target hsot/port\ndefault_host (str): the host to use if ``conn`` is empty\ndefault_port (int): the port to use if not given in ``conn``.\n\nReturns:\n(str, int): a (host, port) tuple.", "source": "juraj-google-style"}
{"code": "def _parse_positive_int_param(request, param_name):\n  \n  param = request.args.get(param_name)\n  if not param:\n    return None\n  try:\n    param = int(param)\n    if param <= 0:\n      raise ValueError()\n    return param\n  except ValueError:\n    return -1", "docstring": "Parses and asserts a positive (>0) integer query parameter.\n\nArgs:\nrequest: The Werkzeug Request object\nparam_name: Name of the parameter.\n\nReturns:\nParam, or None, or -1 if parameter is not a positive integer.", "source": "juraj-google-style"}
{"code": "def get_entries(attr_name):\n    assert attr_name in ['inputs', 'outputs']\n    entries = {}\n    for op_type in ops._gradient_registry.list():\n        if op_type in _EXCLUDED_OPS:\n            continue\n        num_values = _get_num_inputs_outputs(op_type)[0 if attr_name == 'inputs' else 1]\n        gradient_fn = ops._gradient_registry.lookup(op_type)\n        if gradient_fn is None:\n            if num_values != -1:\n                entries[op_type] = '{\"%s\"},' % op_type\n            continue\n        used_tensors = _live_tensors(gradient_fn, attr_name=attr_name)\n        if used_tensors is _ALL:\n            continue\n        elif not used_tensors:\n            entries[op_type] = '{\"%s\"},' % op_type\n        else:\n            all_tensors = set(range(num_values))\n            unused_tensors = all_tensors - used_tensors\n            if unused_tensors:\n                unused_tensor_list = sorted(list(unused_tensors))\n                entries[op_type] = '{\"%s\", %d, {%s}},' % (op_type, len(unused_tensor_list), ', '.join((str(i) for i in unused_tensor_list)))\n    return entries", "docstring": "Returns the dict of entries.\n\nEach entry is of the form {op_name, {true|false, indices}}\n\ntrue: All values are unused.\nfalse: `indices` are the only unused indices.\n\nNote: ops for which all values are used are not printed.\n\nArgs:\nattr_name: inputs or outputs.\n\nReturns:\nA dict from op_type to formatted entry in the dict.", "source": "github-repos"}
{"code": "def ping(self, destination, length=20):\n        \n        print '%s call ping' % self.port\n        print 'destination: %s' %destination\n        try:\n            cmd = 'ping %s %s' % (destination, str(length))\n            print cmd\n            self._sendline(cmd)\n            self._expect(cmd)\n            \n            time.sleep(1)\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger(\"ping() Error: \" + str(e))", "docstring": "send ICMPv6 echo request with a given length to a unicast destination\naddress\n\nArgs:\ndestination: the unicast destination address of ICMPv6 echo request\nlength: the size of ICMPv6 echo request payload", "source": "juraj-google-style"}
{"code": "def __init__(self, error_formatter):\n    self._formatter = error_formatter", "docstring": "Creates a ParserError instance.\n\nArgs:\nerror_formatter: An ErrorFormatter to format the parse errors.", "source": "github-repos"}
{"code": "def validate_id(tx_body):\n        \n        \n        tx_body = deepcopy(tx_body)\n        try:\n            proposed_tx_id = tx_body['id']\n        except KeyError:\n            raise InvalidHash('No transaction id found!')\n\n        tx_body['id'] = None\n\n        tx_body_serialized = Transaction._to_str(tx_body)\n        valid_tx_id = Transaction._to_hash(tx_body_serialized)\n\n        if proposed_tx_id != valid_tx_id:\n            err_msg = (\"The transaction's id '{}' isn't equal to \"\n                       \"the hash of its body, i.e. it's not valid.\")\n            raise InvalidHash(err_msg.format(proposed_tx_id))", "docstring": "Validate the transaction ID of a transaction\n\nArgs:\ntx_body (dict): The Transaction to be transformed.", "source": "juraj-google-style"}
{"code": "def to_hgnc(self, hgnc_alias, build='37'):\n        \n        result = self.hgnc_genes(hgnc_symbol=hgnc_alias, build=build)\n        if result:\n            for gene in result:\n                return gene['hgnc_symbol']\n        else:\n            return None", "docstring": "Check if a hgnc symbol is an alias\n\nReturn the correct hgnc symbol, if not existing return None\n\nArgs:\nhgnc_alias(str)\n\nReturns:\nhgnc_symbol(str)", "source": "juraj-google-style"}
{"code": "def add(self, element, multiplicity=1):\n    if (multiplicity < 1):\n        raise ValueError('Multiplicity must be positive')\n    self._elements[element] += multiplicity\n    self._total += multiplicity", "docstring": "Adds an element to the multiset.\n\n>>> ms = Multiset()\n>>> ms.add('a')\n>>> sorted(ms)\n['a']\n\nAn optional multiplicity can be specified to define how many of the element are added:\n\n>>> ms.add('b', 2)\n>>> sorted(ms)\n['a', 'b', 'b']\n\nThis extends the :meth:`MutableSet.add` signature to allow specifying the multiplicity.\n\nArgs:\nelement:\nThe element to add to the multiset.\nmultiplicity:\nThe multiplicity i.e. count of elements to add.", "source": "codesearchnet"}
{"code": "def poll(self, batch_id, retry_seconds=None, back_off=None, timeout=None, halt_on_error=True):\n    if (self.halt_on_poll_error is not None):\n        halt_on_error = self.halt_on_poll_error\n    if ((self._poll_interval is None) and (self._batch_data_count is not None)):\n        self._poll_interval = max(math.ceil((self._batch_data_count / 300)), 5)\n    elif (self._poll_interval is None):\n        self._poll_interval = 15\n    if (back_off is None):\n        poll_interval_back_off = 2.5\n    else:\n        poll_interval_back_off = float(back_off)\n    if (retry_seconds is None):\n        poll_retry_seconds = 5\n    else:\n        poll_retry_seconds = int(retry_seconds)\n    if (timeout is None):\n        timeout = self.poll_timeout\n    else:\n        timeout = int(timeout)\n    params = {'includeAdditional': 'true'}\n    poll_count = 0\n    poll_time_total = 0\n    data = {}\n    while True:\n        poll_count += 1\n        poll_time_total += self._poll_interval\n        time.sleep(self._poll_interval)\n        self.tcex.log.info('Batch poll time: {} seconds'.format(poll_time_total))\n        try:\n            r = self.tcex.session.get('/v2/batch/{}'.format(batch_id), params=params)\n            if ((not r.ok) or ('application/json' not in r.headers.get('content-type', ''))):\n                self.tcex.handle_error(545, [r.status_code, r.text], halt_on_error)\n                return data\n            data = r.json()\n            if (data.get('status') != 'Success'):\n                self.tcex.handle_error(545, [r.status_code, r.text], halt_on_error)\n        except Exception as e:\n            self.tcex.handle_error(540, [e], halt_on_error)\n        if (data.get('data', {}).get('batchStatus', {}).get('status') == 'Completed'):\n            modifier = (poll_time_total * 0.7)\n            self._poll_interval_times = (self._poll_interval_times[(- 4):] + [modifier])\n            weights = [1]\n            poll_interval_time_weighted_sum = 0\n            for poll_interval_time in self._poll_interval_times:\n                poll_interval_time_weighted_sum += (poll_interval_time * weights[(- 1)])\n                weights.append((weights[(- 1)] * 1.5))\n            weights.pop()\n            self._poll_interval = math.floor((poll_interval_time_weighted_sum / sum(weights)))\n            if (poll_count == 1):\n                self._poll_interval = (self._poll_interval * 0.85)\n            self.tcex.log.debug('Batch Status: {}'.format(data))\n            return data\n        self._poll_interval = min((poll_retry_seconds + int((poll_count * poll_interval_back_off))), 20)\n        if (poll_time_total >= timeout):\n            self.tcex.handle_error(550, [timeout], True)", "docstring": "Poll Batch status to ThreatConnect API.\n\n.. code-block:: javascript\n\n{\n\"status\": \"Success\",\n\"data\": {\n\"batchStatus\": {\n\"id\":3505,\n\"status\":\"Completed\",\n\"errorCount\":0,\n\"successCount\":0,\n\"unprocessCount\":0\n}\n}\n}\n\nArgs:\nbatch_id (str): The ID returned from the ThreatConnect API for the current batch job.\nretry_seconds (int): The base number of seconds used for retries when job is not\ncompleted.\nback_off (float): A multiplier to use for backing off on each poll attempt when job has\nnot completed.\ntimeout (int, optional): The number of seconds before the poll should timeout.\nhalt_on_error (bool, default:True): If True any exception will raise an error.\n\nReturns:\ndict: The batch status returned from the ThreatConnect API.", "source": "codesearchnet"}
{"code": "def isprocess(pid, error=False):\n    \n    try:\n        \n        \n        os.kill(pid, 0)\n        return True\n    except OSError:\n        return False", "docstring": "Check that a process is running.\n\nArguments:\n\npid (int): Process ID to check.\n\nReturns:\n\nTrue if the process is running, else false.", "source": "juraj-google-style"}
{"code": "def from_location(cls, location):\n        \n        if not location:\n            return cls()\n        try:\n            if hasattr(location, 'isLocation'):\n                \n                return location\n\n            elif hasattr(location, 'Latitude'):\n                \n                return cls(city=str(location.Name.replace(\",\", \" \")),\n                           latitude=location.Latitude,\n                           longitude=location.Longitude)\n\n            elif location.startswith('Site:'):\n                loc, city, latitude, longitude, time_zone, elevation = \\\n                    [x.strip() for x in re.findall(r'\\r*\\n*([^\\r\\n]*)[,|;]',\n                                                   location, re.DOTALL)]\n            else:\n                try:\n                    city, latitude, longitude, time_zone, elevation = \\\n                        [key.split(\":\")[-1].strip()\n                         for key in location.split(\",\")]\n                except ValueError:\n                    \n                    return cls(city=location)\n\n            return cls(city=city, country=None, latitude=latitude,\n                       longitude=longitude, time_zone=time_zone,\n                       elevation=elevation)\n\n        except Exception as e:\n            raise ValueError(\n                \"Failed to create a Location from %s!\\n%s\" % (location, e))", "docstring": "Try to create a Ladybug location from a location string.\n\nArgs:\nlocationString: Location string\n\nUsage:\n\nl = Location.from_location(locationString)", "source": "juraj-google-style"}
{"code": "def variable_accessed(variable):\n    variables = _variables_override(variable)\n    for var in variables:\n        pywrap_tfe.TFE_Py_TapeVariableAccessed(var)\n        pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var)", "docstring": "Notifies all tapes in the stack that a variable has been accessed.\n\nArgs:\nvariable: variable to be watched.", "source": "github-repos"}
{"code": "def tick(self):\n    self._handle_command_buffer()\n    self._client.release()\n    self._client.acquire()\n    return self._get_full_state()", "docstring": "Ticks the environment once. Normally used for multi-agent environments.\n\nReturns:\ndict: A dictionary from agent name to its full state. The full state is another dictionary\nfrom :obj:`holodeck.sensors.Sensors` enum to np.ndarray, containing the sensors information\nfor each sensor. The sensors always include the reward and terminal sensors.", "source": "codesearchnet"}
{"code": "def init_from_class_batches(self, class_batches, num_shards=None):\n    shards_for_submissions = {}\n    shard_idx = 0\n    for (idx, (batch_id, batch_val)) in enumerate(iteritems(class_batches)):\n        work_id = DEFENSE_WORK_ID_PATTERN.format(idx)\n        submission_id = batch_val['submission_id']\n        shard_id = None\n        if num_shards:\n            shard_id = shards_for_submissions.get(submission_id)\n            if (shard_id is None):\n                shard_id = (shard_idx % num_shards)\n                shards_for_submissions[submission_id] = shard_id\n                shard_idx += 1\n        self.work[work_id] = {'claimed_worker_id': None, 'claimed_worker_start_time': None, 'is_completed': False, 'error': None, 'elapsed_time': None, 'submission_id': submission_id, 'shard_id': shard_id, 'output_classification_batch_id': batch_id}", "docstring": "Initializes work pieces from classification batches.\n\nArgs:\nclass_batches: dict with classification batches, could be obtained\nas ClassificationBatches.data\nnum_shards: number of shards to split data into,\nif None then no sharding is done.", "source": "codesearchnet"}
{"code": "def decompress_decoder(inputs,\n                       hparams,\n                       strides=(2, 2),\n                       kernel=(3, 3),\n                       name=None):\n  \n  with tf.variable_scope(name, default_name=\"decompress\"):\n    x = inputs\n    x = tf.layers.dense(x, hparams.hidden_size, name=name + \"_dense\")\n    x = residual_block_layer(x, hparams)\n    for i in range(hparams.num_compress_steps \n      j = hparams.num_compress_steps \n      with tf.variable_scope(name + \"_%d\" % j):\n        if hparams.do_decompress_attend:\n          y = compress_self_attention_layer(\n              x, hparams, name=\"decompress_selfatt\")\n          x += y\n        y = tf.layers.conv2d_transpose(\n            x,\n            hparams.hidden_size,\n            kernel,\n            strides=strides,\n            padding=\"SAME\",\n            activation=tf.nn.relu if i > 0 else None,\n            name=\"decompress_conv\")\n        x = y\n    return x", "docstring": "Decoder that decompresses 2-D inputs by 2**num_compress_steps.\n\nArgs:\ninputs: Tensor of shape [batch, compress_height, compress_width, channels].\nhparams: HParams.\nstrides: Tuple, strides for conv block.\nkernel: Tuple, kernel window size for conv block.\nname: string, variable scope.\n\nReturns:\nTensor of shape [batch, height, width, hparams.hidden_size].", "source": "juraj-google-style"}
{"code": "def __init__(self, *args, **kwargs):\n        \n        super(InvocationTransaction, self).__init__(*args, **kwargs)\n        self.Gas = Fixed8(0)\n        self.Type = TransactionType.InvocationTransaction", "docstring": "Create an instance.\n\nArgs:\n*args:\n**kwargs:", "source": "juraj-google-style"}
{"code": "def get_video_features(self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor]=None):\n    pixel_values_videos = pixel_values_videos.type(self.visual.dtype)\n    video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw)\n    split_sizes = (video_grid_thw.prod(-1) \n    video_embeds = torch.split(video_embeds, split_sizes)\n    return video_embeds", "docstring": "Encodes videos into continuous embeddings that can be forwarded to the language model.\n\nArgs:\npixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\nThe tensors corresponding to the input videos.\nvideo_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):\nThe temporal, height and width of feature shape of each video in LLM.", "source": "github-repos"}
{"code": "def request_openbus(self, service, endpoint, **kwargs):\n    if (service == 'bus'):\n        endpoints = ENDPOINTS_BUS\n    elif (service == 'geo'):\n        endpoints = ENDPOINTS_GEO\n    else:\n        return None\n    if (endpoint not in endpoints):\n        return None\n    url = (URL_OPENBUS + endpoints[endpoint])\n    kwargs['idClient'] = self._emt_id\n    kwargs['passKey'] = self._emt_pass\n    return requests.post(url, data=kwargs, verify=True).json()", "docstring": "Make a request to the given endpoint of the ``openbus`` server.\n\nThis returns the plain JSON (dict) response which can then be parsed\nusing one of the implemented types.\n\nArgs:\nservice (str): Service to fetch ('bus' or 'geo').\nendpoint (str): Endpoint to send the request to.\nThis string corresponds to the key in the ``ENDPOINTS`` dict.\n**kwargs: Request arguments.\n\nReturns:\nObtained response (dict) or None if the endpoint was not found.", "source": "codesearchnet"}
{"code": "def __init__(self, options):\n        \n        capacity = options[u\"capacity\"] if u\"capacity\" in options else 200\n        self._cache = pylru.lrucache(capacity)", "docstring": "Initializes an LruBackend.\n\nArgs:\noptions: a dictionary that contains configuration options.", "source": "juraj-google-style"}
{"code": "def build_grab_exception(ex, curl):\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    if ex.args[0] == 23:\n        if getattr(curl, 'grab_callback_interrupted', None) is True:\n            \n            \n            \n            \n            return None\n        else:\n            return error.GrabNetworkError(ex.args[1], ex)\n    else:\n        if ex.args[0] == 28:\n            return error.GrabTimeoutError(ex.args[1], ex)\n        elif ex.args[0] == 7:\n            return error.GrabConnectionError(ex.args[1], ex)\n        elif ex.args[0] == 67:\n            return error.GrabAuthError(ex.args[1], ex)\n        elif ex.args[0] == 47:\n            return error.GrabTooManyRedirectsError(ex.args[1], ex)\n        elif ex.args[0] == 6:\n            return error.GrabCouldNotResolveHostError(ex.args[1], ex)\n        elif ex.args[0] == 3:\n            return error.GrabInvalidUrl(ex.args[1], ex)\n        else:\n            return error.GrabNetworkError(ex.args[1], ex)", "docstring": "Build Grab exception from the pycurl exception\n\nArgs:\nex - the original pycurl exception\ncurl - the Curl instance raised the exception", "source": "juraj-google-style"}
{"code": "def __init__(self, config_dict=None):\n        \n        self.config_dict = deepcopy(config_dict)\n        self.plugins = Config.load_installed_plugins()\n        self.analysis_groups = []\n\n        if not config_dict:\n            return\n\n        analysis = config_dict.get('analysis', {})\n\n        if isinstance(analysis, dict):\n            for group_key, group_def in analysis.items():\n                try:\n                    self.analysis_groups.append(\n                        self.inflate_analysis_group(group_key, group_def))\n                except ValueError as e:\n                    logger.error(\n                        'Error while inflating \"%s\" analysis group. '\n                        'The group will not be added to the list. '\n                        'Exception: %s.', group_key, e)\n        else:\n            raise ValueError('%s type is not supported for \"analysis\" key, '\n                             'use dict only' % type(analysis))", "docstring": "Initialization method.\n\nArgs:\nconfig_dict (dict): the configuration as a dictionary.", "source": "juraj-google-style"}
{"code": "def update(self, session, arrays=None, frame=None):\n    \n    new_config = self._get_config()\n\n    if self._enough_time_has_passed(self.previous_config['FPS']):\n      self.visualizer.update(new_config)\n      self.last_update_time = time.time()\n      final_image = self._update_frame(session, arrays, frame, new_config)\n      self._update_recording(final_image, new_config)", "docstring": "Creates a frame and writes it to disk.\n\nArgs:\narrays: a list of np arrays. Use the \"custom\" option in the client.\nframe: a 2D np array. This way the plugin can be used for video of any\nkind, not just the visualization that comes with the plugin.\n\nframe can also be a function, which only is evaluated when the\n\"frame\" option is selected by the client.", "source": "juraj-google-style"}
{"code": "def is_valid(self, value):\n    if (not self.is_array):\n        return self._valid(value)\n    if isinstance(value, (list, set, tuple)):\n        return all([self._valid(item) for item in value])\n    return self._valid(value)", "docstring": "Validate value before actual instance setting based on type.\n\nArgs:\nvalue (object): The value object for validation.\n\nReturns:\nTrue if value validation succeeds else False.", "source": "codesearchnet"}
{"code": "def find_from(path):\n        \n        realpath = os.path.realpath(path)\n        config_path = os.path.join(realpath, '.ensime')\n\n        if os.path.isfile(config_path):\n            return config_path\n        elif realpath == os.path.abspath('/'):\n            return None\n        else:\n            dirname = os.path.dirname(realpath)\n            return ProjectConfig.find_from(dirname)", "docstring": "Find path of an .ensime config, searching recursively upward from path.\n\nArgs:\npath (str): Path of a file or directory from where to start searching.\n\nReturns:\nstr: Canonical path of nearest ``.ensime``, or ``None`` if not found.", "source": "juraj-google-style"}
{"code": "def __call__(self, name, value):\n        \n        super(IntegerTypeChecker, self).__call__(name, value)\n        if isinstance(self.minimum, int):\n            if value < self.minimum:\n                raise ValueError(\"%s must be greater or equal %s\" % (name, self.minimum))\n        if isinstance(self.maximum, int):\n            if value > self.maximum:\n                raise ValueError(\"%s must be less or equal %s\" % (name, self.maximum))", "docstring": "Call method.\n\nArgs:\nname (str): the value's name.\nvalue (int): the value to check.\n\nRaises:\nValueError: if value is not type int.\nValueError: if value is less than minimum.\nValueError: if value is more than maximum.", "source": "juraj-google-style"}
{"code": "def do_put(self, uri, resource, timeout, custom_headers):\n    self.validate_resource_uri(uri)\n    (task, body) = self._connection.put(uri, resource, custom_headers=custom_headers)\n    if (not task):\n        return body\n    return self._task_monitor.wait_for_task(task, timeout)", "docstring": "Helps to make put requests.\n\nArgs:\nuri: URI of the resource\ntimeout: Time out for the request in seconds.\ncustom_headers: Allows to set custom http headers.\n\nRetuns:\nReturns Task object", "source": "codesearchnet"}
{"code": "def pan_and_scan(self, image: np.ndarray, pan_and_scan_min_crop_size: int, pan_and_scan_max_num_crops: int, pan_and_scan_min_ratio_to_activate: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):\n    height, width = get_image_size(image)\n    if width >= height:\n        if width / height < pan_and_scan_min_ratio_to_activate:\n            return []\n        num_crops_w = int(math.floor(width / height + 0.5))\n        num_crops_w = min(int(math.floor(width / pan_and_scan_min_crop_size)), num_crops_w)\n        num_crops_w = max(2, num_crops_w)\n        num_crops_w = min(pan_and_scan_max_num_crops, num_crops_w)\n        num_crops_h = 1\n    else:\n        if height / width < pan_and_scan_min_ratio_to_activate:\n            return []\n        num_crops_h = int(math.floor(height / width + 0.5))\n        num_crops_h = min(int(math.floor(height / pan_and_scan_min_crop_size)), num_crops_h)\n        num_crops_h = max(2, num_crops_h)\n        num_crops_h = min(pan_and_scan_max_num_crops, num_crops_h)\n        num_crops_w = 1\n    crop_size_w = int(math.ceil(width / num_crops_w))\n    crop_size_h = int(math.ceil(height / num_crops_h))\n    if min(crop_size_w, crop_size_h) < pan_and_scan_min_crop_size:\n        return []\n    crop_positions_w = [crop_size_w * i for i in range(num_crops_w)]\n    crop_positions_h = [crop_size_h * i for i in range(num_crops_h)]\n    if input_data_format == ChannelDimension.LAST:\n        image_crops = [image[pos_h:pos_h + crop_size_h, pos_w:pos_w + crop_size_w] for pos_h, pos_w in itertools.product(crop_positions_h, crop_positions_w)]\n    else:\n        image_crops = [image[:, pos_h:pos_h + crop_size_h, pos_w:pos_w + crop_size_w] for pos_h, pos_w in itertools.product(crop_positions_h, crop_positions_w)]\n    return image_crops", "docstring": "Pan and Scan and image, by cropping into smaller images when the aspect ratio exceeds\nminimum allowed ratio.\n\nArgs:\nimage (`np.ndarray`):\nImage to resize.\npan_and_scan_min_crop_size (`int`, *optional*):\nMinimum size of each crop in pan and scan.\npan_and_scan_max_num_crops (`int`, *optional*):\nMaximum number of crops per image in pan and scan.\npan_and_scan_min_ratio_to_activate (`float`, *optional*):\nMinimum aspect ratio to activate pan and scan.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def ldap_sync(self, **kwargs):\n    path = ('/groups/%s/ldap_sync' % self.get_id())\n    self.manager.gitlab.http_post(path, **kwargs)", "docstring": "Sync LDAP groups.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabCreateError: If the server cannot perform the request", "source": "codesearchnet"}
{"code": "def _process_regular_parameters(sig, func, class_name, documented_params, indent_level, undocumented_parameters):\n    docstring = ''\n    source_args_dict = source_args_doc([ModelArgs, ImageProcessorArgs])\n    missing_args = {}\n    for param_name, param in sig.parameters.items():\n        if param_name in ARGS_TO_IGNORE or param.kind == inspect.Parameter.VAR_POSITIONAL or param.kind == inspect.Parameter.VAR_KEYWORD:\n            continue\n        param_type, optional = _process_parameter_type(param, param_name, func)\n        param_default = ''\n        if param.default != inspect._empty and param.default is not None:\n            param_default = f', defaults to `{str(param.default)}`'\n        param_type, optional_string, shape_string, additional_info, description, is_documented = _get_parameter_info(param_name, documented_params, source_args_dict, param_type, optional)\n        if is_documented:\n            if param_name == 'config':\n                if param_type == '':\n                    param_type = f'[`{class_name}`]'\n                else:\n                    param_type = f'[`{param_type.split('.')[-1]}`]'\n            elif param_type == '' and False:\n                print(f'🚨 {param_name} for {func.__qualname__} in file {func.__code__.co_filename} has no type')\n            param_type = param_type if '`' in param_type else f'`{param_type}`'\n            if additional_info:\n                param_docstring = f'{param_name} ({param_type}{additional_info}):{description}'\n            else:\n                param_docstring = f'{param_name} ({param_type}{shape_string}{optional_string}{param_default}):{description}'\n            docstring += set_min_indent(param_docstring, indent_level + 8)\n        else:\n            missing_args[param_name] = {'type': param_type if param_type else '<fill_type>', 'optional': optional, 'shape': shape_string, 'description': description if description else '\\n    <fill_description>', 'default': param_default}\n            undocumented_parameters.append(f\"🚨 `{param_name}` is part of {func.__qualname__}'s signature, but not documented. Make sure to add it to the docstring of the function in {func.__code__.co_filename}.\")\n    return (docstring, missing_args)", "docstring": "Process all regular parameters (not kwargs parameters) from the function signature.\n\nArgs:\nsig (`inspect.Signature`): Function signature\nfunc (`function`): Function the parameters belong to\nclass_name (`str`): Name of the class\ndocumented_params (`dict`): Dictionary of parameters that are already documented\nindent_level (`int`): Indentation level\nundocumented_parameters (`list`): List to append undocumented parameters to", "source": "github-repos"}
{"code": "def _ReadDefinitionFile(self, filename):\n    \n    if not filename:\n      return None\n\n    path = os.path.join(self._DEFINITION_FILES_PATH, filename)\n    with open(path, 'rb') as file_object:\n      definition = file_object.read()\n\n    return dtfabric_fabric.DataTypeFabric(yaml_definition=definition)", "docstring": "Reads a dtFabric definition file.\n\nArgs:\nfilename (str): name of the dtFabric definition file.\n\nReturns:\ndtfabric.DataTypeFabric: data type fabric which contains the data format\ndata type maps of the data type definition, such as a structure, that\ncan be mapped onto binary data or None if no filename is provided.", "source": "juraj-google-style"}
{"code": "def add_symbol(self, symbol_name, namespace_stack, node, module):\n    if namespace_stack:\n        last_namespace = self.namespaces\n        for namespace in namespace_stack:\n            last_namespace = last_namespace.setdefault(namespace, {})\n    else:\n        last_namespace = self.namespaces[None]\n    return self._add(symbol_name, last_namespace, node, module)", "docstring": "Adds symbol_name defined in namespace_stack to the symbol table.\n\nArgs:\nsymbol_name: 'name of the symbol to lookup'\nnamespace_stack: None or ['namespaces', 'symbol', 'defined', 'in']\nnode: ast.Node that defines this symbol\nmodule: module (any object) this symbol is defined in\n\nReturns:\nbool(if symbol was *not* already present)", "source": "codesearchnet"}
{"code": "def get_compound_pd(self):\n    entry1 = PDEntry(self.entry1.composition, 0)\n    entry2 = PDEntry(self.entry2.composition, 0)\n    cpd = CompoundPhaseDiagram((self.rxn_entries + [entry1, entry2]), [Composition(entry1.composition.reduced_formula), Composition(entry2.composition.reduced_formula)], normalize_terminal_compositions=False)\n    return cpd", "docstring": "Get the CompoundPhaseDiagram object, which can then be used for\nplotting.\n\nReturns:\n(CompoundPhaseDiagram)", "source": "codesearchnet"}
{"code": "def files_comments_edit(\n        self, *, comment: str, file: str, id: str, **kwargs\n    ) -> SlackResponse:\n        \n        kwargs.update({\"comment\": comment, \"file\": file, \"id\": id})\n        return self.api_call(\"files.comments.edit\", json=kwargs)", "docstring": "Edit an existing file comment.\n\nArgs:\ncomment (str): The body of the comment.\ne.g. 'Everyone should take a moment to read this file.'\nfile (str): The file id. e.g. 'F1234467890'\nid (str): The file comment id. e.g. 'Fc1234567890'", "source": "juraj-google-style"}
{"code": "def copartition_datasets(self, axis, other, left_func, right_func):\n    if (left_func is None):\n        new_self = self\n    else:\n        new_self = self.map_across_full_axis(axis, left_func)\n    if (right_func is None):\n        if ((axis == 0) and (not np.array_equal(other.block_lengths, new_self.block_lengths))):\n            new_other = other.manual_shuffle(axis, (lambda x: x), new_self.block_lengths)\n        elif ((axis == 1) and (not np.array_equal(other.block_widths, new_self.block_widths))):\n            new_other = other.manual_shuffle(axis, (lambda x: x), new_self.block_widths)\n        else:\n            new_other = other\n    else:\n        new_other = other.manual_shuffle(axis, right_func, (new_self.block_lengths if (axis == 0) else new_self.block_widths))\n    return (new_self, new_other)", "docstring": "Copartition two BlockPartitions objects.\n\nArgs:\naxis: The axis to copartition.\nother: The other BlockPartitions object to copartition with.\nleft_func: The function to apply to left. If None, just use the dimension\nof self (based on axis).\nright_func: The function to apply to right. If None, check the dimensions of\nother and use the identity function if splitting needs to happen.\n\nReturns:\nA tuple of BlockPartitions objects, left and right.", "source": "codesearchnet"}
{"code": "def get_unconditional_inputs(self, num_samples=1):\n    last_hidden_state = torch.zeros((num_samples, 1, self.config.text_encoder.hidden_size), device=self.device, dtype=self.dtype)\n    attention_mask = torch.zeros((num_samples, 1), device=self.device, dtype=torch.long)\n    return MusicgenUnconditionalInput(encoder_outputs=(last_hidden_state,), attention_mask=attention_mask, guidance_scale=1.0)", "docstring": "Helper function to get null inputs for unconditional generation, enabling the model to be used without the\nfeature extractor or tokenizer.\n\nArgs:\nnum_samples (int, *optional*):\nNumber of audio samples to unconditionally generate.\nmax_new_tokens (int, *optional*):\nNumber of tokens to generate for each sample. More tokens means longer audio samples, at the expense of\nlonger inference (since more audio tokens need to be generated per sample).\n\nExample:\n```python\n>>> from transformers import MusicgenForConditionalGeneration\n\n>>> model = MusicgenForConditionalGeneration.from_pretrained(\"facebook/musicgen-small\")\n\n>>> # get the unconditional (or 'null') inputs for the model\n>>> unconditional_inputs = model.get_unconditional_inputs(num_samples=1)\n>>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256)\n```", "source": "github-repos"}
{"code": "def DeregisterFormatter(cls, formatter_class):\n    \n    formatter_data_type = formatter_class.DATA_TYPE.lower()\n    if formatter_data_type not in cls._formatter_classes:\n      raise KeyError(\n          'Formatter class not set for data type: {0:s}.'.format(\n              formatter_class.DATA_TYPE))\n\n    del cls._formatter_classes[formatter_data_type]", "docstring": "Deregisters a formatter class.\n\nThe formatter classes are identified based on their lower case data type.\n\nArgs:\nformatter_class (type): class of the formatter.\n\nRaises:\nKeyError: if formatter class is not set for the corresponding data type.", "source": "juraj-google-style"}
{"code": "def w8a8_block_fp8_matmul_triton(A: torch.Tensor, B: torch.Tensor, As: torch.Tensor, Bs: torch.Tensor, block_size: List[int], output_dtype: torch.dtype=torch.float32) -> torch.Tensor:\n    assert len(block_size) == 2\n    block_n, block_k = (block_size[0], block_size[1])\n    assert A.shape[-1] == B.shape[-1]\n    assert A.shape[:-1] == As.shape[:-1] and A.is_contiguous()\n    assert triton.cdiv(A.shape[-1], block_k) == As.shape[-1]\n    M = A.numel() \n    assert B.ndim == 2 and B.is_contiguous() and (Bs.ndim == 2)\n    N, K = B.shape\n    assert triton.cdiv(N, block_n) == Bs.shape[0]\n    assert triton.cdiv(K, block_k) == Bs.shape[1]\n    C_shape = A.shape[:-1] + (N,)\n    C = A.new_empty(C_shape, dtype=output_dtype)\n    BLOCK_SIZE_M = 128\n    if M < BLOCK_SIZE_M:\n        BLOCK_SIZE_M = triton.next_power_of_2(M)\n        BLOCK_SIZE_M = max(BLOCK_SIZE_M, 16)\n    BLOCK_SIZE_K = block_k\n    assert block_k % BLOCK_SIZE_K == 0\n    BLOCK_SIZE_N = block_n\n\n    def grid(META):\n        return (triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),)\n    _w8a8_block_fp8_matmul[grid](A, B, C, As, Bs, M, N, K, block_n, block_k, A.stride(-2), A.stride(-1), B.stride(1), B.stride(0), C.stride(-2), C.stride(-1), As.stride(-2), As.stride(-1), Bs.stride(1), Bs.stride(0), BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K, GROUP_SIZE_M=8)\n    return C", "docstring": "This function performs matrix multiplication with block-wise\nquantization.\nIt takes two input tensors `A` and `B` with scales `As` and `Bs`.\nThe output is returned in the specified `output_dtype`.\nArgs:\nA: The input tensor, e.g., activation.\nB: The input tensor, e.g., weight.\nAs: The per-token-group quantization scale for `A`.\nBs: The per-block quantization scale for `B`.\nblock_size: The block size for per-block quantization. It should\nbe 2-dim, e.g., [128, 128].\noutput_dytpe: The dtype of the returned tensor.\nReturns:\ntorch.Tensor: The result of matmul.", "source": "github-repos"}
{"code": "def render_list(self, cnt, unique=False, progress_callback=None, **kwargs):\n        \n        \n        rendered_list = []\n        i = 0\n        total_attempts = 0\n        while True:\n            if i >= cnt:\n                break\n            if total_attempts > cnt * self.unique_attempts_factor:\n                raise StringGenerator.UniquenessError(u\"couldn't satisfy uniqueness\")\n            s = self.render(**kwargs)\n            if unique:\n                if not s in rendered_list:\n                    rendered_list.append(s)\n                    i += 1\n            else:\n                rendered_list.append(s)\n                i += 1\n            total_attempts += 1\n\n            \n            if progress_callback and callable(progress_callback):\n                progress_callback(i, cnt)\n\n        return rendered_list", "docstring": "Return a list of generated strings.\n\nArgs:\ncnt (int): length of list\nunique (bool): whether to make entries unique\n\nReturns:\nlist.\n\nWe keep track of total attempts because a template may\nspecify something impossible to attain, like [1-9]{} with cnt==1000", "source": "juraj-google-style"}
{"code": "def _write_to_hdx(self, action, data, id_field_name, file_to_upload=None):\n    file = None\n    try:\n        if file_to_upload:\n            file = open(file_to_upload, 'rb')\n            files = [('upload', file)]\n        else:\n            files = None\n        return self.configuration.call_remoteckan(self.actions()[action], data, files=files)\n    except Exception as e:\n        raisefrom(HDXError, ('Failed when trying to %s %s! (POST)' % (action, data[id_field_name])), e)\n    finally:\n        if (file_to_upload and file):\n            file.close()", "docstring": "Creates or updates an HDX object in HDX and return HDX object metadata dict\n\nArgs:\naction (str): Action to perform eg. 'create', 'update'\ndata (Dict): Data to write to HDX\nid_field_name (str): Name of field containing HDX object identifier or None\nfile_to_upload (Optional[str]): File to upload to HDX\n\nReturns:\nDict: HDX object metadata", "source": "codesearchnet"}
{"code": "def add(self, pattern: Union[(Pattern, FlatTerm)], final_label: T=None) -> int:\n    index = len(self._patterns)\n    self._patterns.append((pattern, final_label))\n    flatterm = (FlatTerm(pattern.expression) if (not isinstance(pattern, FlatTerm)) else pattern)\n    if (flatterm.is_syntactic or (len(flatterm) == 1)):\n        net = self._generate_syntactic_net(flatterm, index)\n    else:\n        net = self._generate_net(flatterm, index)\n    if self._root:\n        self._root = self._product_net(self._root, net)\n    else:\n        self._root = net\n    return index", "docstring": "Add a pattern to the discrimination net.\n\nArgs:\npattern:\nThe pattern which is added to the DiscriminationNet. If an expression is given, it will be converted to\na `FlatTerm` for internal processing. You can also pass a `FlatTerm` directly.\nfinal_label:\nA label that is returned if the pattern matches when using :meth:`match`. This will default to the\npattern itself.\n\nReturns:\nThe index of the newly added pattern. This is used internally to later to get the pattern and its final\nlabel once a match is found.", "source": "codesearchnet"}
{"code": "def get_csr(self, bay_number=None):\n        \n        uri = \"{}/https/certificaterequest\".format(self.data['uri'])\n\n        if bay_number:\n            uri += \"?bayNumber=%d\" % (bay_number)\n\n        return self._helper.do_get(uri)", "docstring": "Get an enclosure's Certificate Signing Request (CSR) that was generated by previous POST to the same URI.\n\nArgs:\nbay_number: OA to retrieve the previously generated CSR.\n\nReturns:\ndict", "source": "juraj-google-style"}
{"code": "def send_data(data):\n    datalength = len(data)\n    csm1 = checksum1(data, datalength)\n    csm2 = checksum2(csm1)\n    data.insert(0, 255)\n    data.insert(1, 255)\n    data.insert(5, csm1)\n    data.insert(6, csm2)\n    stringtosend = ''\n    for i in range(len(data)):\n        byteformat = ('%02X' % data[i])\n        stringtosend = ((stringtosend + '\\\\x') + byteformat)\n    try:\n        SERPORT.write(stringtosend.decode('string-escape'))\n    except:\n        raise HerkulexError('could not communicate with motors')", "docstring": "Send data to herkulex\n\nPaketize & write the packet to serial port\n\nArgs:\ndata (list): the data to be sent\n\nRaises:\nSerialException: Error occured while opening serial port", "source": "codesearchnet"}
{"code": "def __init__(self, location=None, parent=None, **kwargs):\n    \n    if not parent:\n      raise ValueError('Missing parent value.')\n\n    super(CPIOPathSpec, self).__init__(\n        location=location, parent=parent, **kwargs)", "docstring": "Initializes a path specification.\n\nNote that the CPIO file path specification must have a parent.\n\nArgs:\nlocation (Optional[str]): CPIO file internal location string prefixed\nwith a path separator character.\nparent (Optional[PathSpec]): parent path specification.\n\nRaises:\nValueError: when parent is not set.", "source": "juraj-google-style"}
{"code": "def _update_docstring(discretized_pulse: Callable, sampler_inst: Callable) -> Callable:\n    \n    wrapped_docstring = pydoc.render_doc(discretized_pulse, '%s')\n    header, body = wrapped_docstring.split('\\n', 1)\n    body = textwrap.indent(body, '                    ')\n    wrapped_docstring = header+body\n    updated_ds = .format(continuous_name=discretized_pulse.__name__,\n                           sampler_name=sampler_inst.__name__,\n                           continuous_doc=wrapped_docstring)\n\n    discretized_pulse.__doc__ = updated_ds\n    return discretized_pulse", "docstring": "Update annotations of discretized continuous pulse function.\n\nArgs:\ndiscretized_pulse: Discretized decorated continuous pulse.\nsampler_inst: Applied sampler.", "source": "juraj-google-style"}
{"code": "def fit_transform(self, X, y=None, **params):\n    return self.fit(X, y).transform(X, y)", "docstring": "Learn vocabulary and return document id matrix.\n\nThis is equivalent to fit followed by transform.\n\nArgs:\nX : iterable\nan iterable which yields either str, unicode or file objects.\n\nReturns:\nlist : document id matrix.\nlist: label id matrix.", "source": "codesearchnet"}
{"code": "def requested_packages(self, include_implicit=False):\n        \n        if include_implicit:\n            return self._package_requests + self.implicit_packages\n        else:\n            return self._package_requests", "docstring": "Get packages in the request.\n\nArgs:\ninclude_implicit (bool): If True, implicit packages are appended\nto the result.\n\nReturns:\nList of `PackageRequest` objects.", "source": "juraj-google-style"}
{"code": "def Query(args):\n  \n  query = args.query.encode(\"utf-8\")\n  timeout = args.timeout_millis / 1000  \n  \n  \n  try:\n    \n    \n    \n    \n    \n    command = [config.CONFIG[\"Osquery.path\"], \"--S\", \"--json\", query]\n    proc = subprocess.run(\n        command,\n        timeout=timeout,\n        check=True,\n        stdout=subprocess.PIPE,\n        stderr=subprocess.PIPE)\n  \n  \n  \n  \n  except subprocess.TimeoutExpired as error:\n    raise TimeoutError(cause=error)\n  except subprocess.CalledProcessError as error:\n    raise Error(\"osquery invocation error\", cause=error)\n  \n\n  stdout = proc.stdout.decode(\"utf-8\")\n  stderr = proc.stderr.decode(\"utf-8\").strip()\n  return ProcOutput(stdout=stdout, stderr=stderr)", "docstring": "Calls osquery with given query and returns its output.\n\nArgs:\nargs: A query to call osquery with.\n\nReturns:\nA \"parsed JSON\" representation of the osquery output.\n\nRaises:\nQueryError: If the query is incorrect.\nTimeoutError: If a call to the osquery executable times out.\nError: If anything else goes wrong with the subprocess call.", "source": "juraj-google-style"}
{"code": "def plot_real_feature(df, feature_name, bins=50, figsize=(15, 15)):\n    \n\n    ix_negative_target = df[df.target == 0].index\n    ix_positive_target = df[df.target == 1].index\n\n    plt.figure(figsize=figsize)\n\n    ax_overall_dist = plt.subplot2grid((3, 2), (0, 0), colspan=2)\n    ax_target_conditional_dist = plt.subplot2grid((3, 2), (1, 0), colspan=2)\n\n    ax_botplot = plt.subplot2grid((3, 2), (2, 0))\n    ax_violin_plot = plt.subplot2grid((3, 2), (2, 1))\n\n    ax_overall_dist.set_title('Distribution of {}'.format(feature_name), fontsize=16)\n    sns.distplot(\n        df[feature_name],\n        bins=50,\n        ax=ax_overall_dist\n    )\n\n    sns.distplot(\n        df.loc[ix_positive_target][feature_name],\n        bins=bins,\n        ax=ax_target_conditional_dist,\n        label='Positive Target'\n    )\n    sns.distplot(\n        df.loc[ix_negative_target][feature_name],\n        bins=bins,\n        ax=ax_target_conditional_dist,\n        label='Negative Target'\n    )\n    ax_target_conditional_dist.legend(loc='upper right', prop={'size': 14})\n\n    sns.boxplot(\n        y=feature_name,\n        x='target',\n        data=df,\n        ax=ax_botplot\n    )\n    sns.violinplot(\n        y=feature_name,\n        x='target',\n        data=df,\n        ax=ax_violin_plot\n    )\n\n    plt.show()", "docstring": "Plot the distribution of a real-valued feature conditioned by the target.\n\nExamples:\n`plot_real_feature(X, 'emb_mean_euclidean')`\n\nArgs:\ndf: Pandas dataframe containing the target column (named 'target').\nfeature_name: The name of the feature to plot.\nbins: The number of histogram bins for the distribution plot.\nfigsize: The size of the plotted figure.", "source": "juraj-google-style"}
{"code": "def is_complex_format_str(node):\n    \n    inferred = utils.safe_infer(node)\n    if inferred is None or not isinstance(inferred.value, str):\n        return True\n    try:\n        parsed = list(string.Formatter().parse(inferred.value))\n    except ValueError:\n        \n        return False\n    for _, _, format_spec, _ in parsed:\n        if format_spec:\n            return True\n    return False", "docstring": "Checks if node represents a string with complex formatting specs.\n\nArgs:\nnode (astroid.node_classes.NodeNG): AST node to check\nReturns:\nbool: True if inferred string uses complex formatting, False otherwise", "source": "juraj-google-style"}
{"code": "def GetParserPluginsInformation(cls, parser_filter_expression=None):\n    \n    parser_plugins_information = []\n    for _, parser_class in cls.GetParsers(\n        parser_filter_expression=parser_filter_expression):\n      if parser_class.SupportsPlugins():\n        for plugin_name, plugin_class in parser_class.GetPlugins():\n          description = getattr(plugin_class, 'DESCRIPTION', '')\n          parser_plugins_information.append((plugin_name, description))\n\n    return parser_plugins_information", "docstring": "Retrieves the parser plugins information.\n\nArgs:\nparser_filter_expression (Optional[str]): parser filter expression,\nwhere None represents all parsers and plugins.\n\nReturns:\nlist[tuple[str, str]]: pairs of parser plugin names and descriptions.", "source": "juraj-google-style"}
{"code": "def get_db_prep_value(self, value, connection, prepared=False):\n    if prepared:\n        return value\n    if (value is None):\n        return []\n    values = (value if self.multi_valued_field else [value])\n    prepared_values = [self.get_prep_value(v) for v in values]\n    return list(sorted(set((v for v in prepared_values if v))))", "docstring": "Prepare a value for DB interaction.\n\nReturns:\n- list(bytes) if not prepared\n- list(str) if prepared", "source": "codesearchnet"}
{"code": "def _forward_and_backward_functions(self, inference_args, input_tangents):\n    outputs = []\n    iteration_count = 0\n    while len(outputs) < len(self._func_graph.outputs) and any((backprop_util.IsTrainable(output) for output in self._func_graph.outputs[len(outputs):])):\n        iteration_count += 1\n        if iteration_count >= 20 and iteration_count % 5 == 0:\n            new_op_with_trainable_output = None\n            num_new_trainable_outputs = 0\n            for output in self._func_graph.outputs[len(outputs):]:\n                if backprop_util.IsTrainable(output):\n                    num_new_trainable_outputs += 1\n                    new_op_with_trainable_output = output.op\n            logging.warning(\"Determining side outputs for the function '{}' is taking longer than expected ({} iterations, typically this converges in 5 or so). This could indicate that a gradient registration is adding new ops to the forward pass every time gradients are generated. {} new trainable output(s) were added this iteration, one from the following op:\\n {}\\nThis may indicate a TensorFlow bug, or an issue in a tf.custom_gradient.\".format(self._func_graph.name, iteration_count, num_new_trainable_outputs, new_op_with_trainable_output))\n        outputs = list(self._func_graph.outputs)\n        self._build_functions_for_outputs(outputs, inference_args, input_tangents)\n    forward_function, forward_graph, backward_function, output_indices, num_output_tangents = self._build_functions_for_outputs(outputs, inference_args, input_tangents)\n    if len(self._func_graph.outputs) > len(outputs) and any((backprop_util.IsTrainable(output) for output in self._func_graph.outputs[len(outputs):])):\n        raise errors.InternalError(f'Unexpectedly added new outputs to the forward function when building the backward function: {self._func_graph.outputs[len(outputs):]}.')\n    return (forward_function, forward_graph, backward_function, output_indices, num_output_tangents)", "docstring": "Forward and backward functions suitable for higher-order gradients.\n\nUnlike in `_FirstOrderTapeGradientFunctions`, the backward function built by\nthis method accepts gradients for all of the outputs of the returned forward\nfunction, including side outputs.\n\nArgs:\ninference_args: A flat list of Tensors, arguments to the inference\nfunction.\ninput_tangents: A flat list of Tensors, jvps associated with\n`inference_args`.\n\nReturns:\nA tuple of (forward_function, backward_function):\nforward_function: Takes the same inputs as the inference function, but\nreturns side outputs used by backward_function in addition to the\ninference function's outputs.\nbackward_function: Takes side outputs from forward_function and\ngradients with respect to all of its outputs, real and side. Returns\ngradients with respect to the inputs.", "source": "github-repos"}
{"code": "def write_markdown_to_file(self, f):\n    print('---', file=f)\n    print('---', file=f)\n    print('<!-- This file is machine generated: DO NOT EDIT! -->', file=f)\n    print('', file=f)\n    print('\n    if self._prefix:\n        print(self._prefix, file=f)\n    print('[TOC]', file=f)\n    print('', file=f)\n    if (self._module is not None):\n        self._write_module_markdown_to_file(f, self._module)", "docstring": "Prints this library to file `f`.\n\nArgs:\nf: File to write to.\n\nReturns:\nDictionary of documented members.", "source": "codesearchnet"}
{"code": "def open_usb_handle(self, port_num):\n    serial = self.get_usb_serial(port_num)\n    return local_usb.LibUsbHandle.open(serial_number=serial)", "docstring": "open usb port\n\nArgs:\nport_num: port number on the Cambrionix unit\n\nReturn:\nusb handle", "source": "codesearchnet"}
{"code": "def get_volumes(blocks, layout_info):\n    \n    volumes = {}\n\n    vol_blocks_lists = sort.by_vol_id(blocks, layout_info[2])\n\n    for vol_rec in blocks[layout_info[0]].vtbl_recs:\n        vol_name = vol_rec.name.strip(b'\\x00').decode('utf-8')\n        if vol_rec.rec_index not in vol_blocks_lists:\n            vol_blocks_lists[vol_rec.rec_index] = []\n        volumes[vol_name] = description(vol_rec.rec_index, vol_rec, vol_blocks_lists[vol_rec.rec_index])\n            \n    return volumes", "docstring": "Get a list of UBI volume objects from list of blocks\n\nArguments:\nList:blocks            -- List of layout block objects\nList:layout_info    -- Layout info (indexes of layout blocks and\nassociated data blocks.)\n\nReturns:\nDict -- Of Volume objects by volume name, including any\nrelevant blocks.", "source": "juraj-google-style"}
{"code": "def _CheckStorageMetadata(cls, metadata_values, check_readable_only=False):\n    \n    format_version = metadata_values.get('format_version', None)\n\n    if not format_version:\n      raise IOError('Missing format version.')\n\n    try:\n      format_version = int(format_version, 10)\n    except (TypeError, ValueError):\n      raise IOError('Invalid format version: {0!s}.'.format(format_version))\n\n    if not check_readable_only and format_version != cls._FORMAT_VERSION:\n      raise IOError('Format version: {0:d} is not supported.'.format(\n          format_version))\n\n    if format_version < cls._COMPATIBLE_FORMAT_VERSION:\n      raise IOError(\n          'Format version: {0:d} is too old and no longer supported.'.format(\n              format_version))\n\n    if format_version > cls._FORMAT_VERSION:\n      raise IOError(\n          'Format version: {0:d} is too new and not yet supported.'.format(\n              format_version))\n\n    metadata_values['format_version'] = format_version\n\n    compression_format = metadata_values.get('compression_format', None)\n    if compression_format not in definitions.COMPRESSION_FORMATS:\n      raise IOError('Unsupported compression format: {0:s}'.format(\n          compression_format))\n\n    serialization_format = metadata_values.get('serialization_format', None)\n    if serialization_format != definitions.SERIALIZER_FORMAT_JSON:\n      raise IOError('Unsupported serialization format: {0:s}'.format(\n          serialization_format))\n\n    storage_type = metadata_values.get('storage_type', None)\n    if storage_type not in definitions.STORAGE_TYPES:\n      raise IOError('Unsupported storage type: {0:s}'.format(\n          storage_type))", "docstring": "Checks the storage metadata.\n\nArgs:\nmetadata_values (dict[str, str]): metadata values per key.\ncheck_readable_only (Optional[bool]): whether the store should only be\nchecked to see if it can be read. If False, the store will be checked\nto see if it can be read and written to.\n\nRaises:\nIOError: if the format version or the serializer format is not supported.\nOSError: if the format version or the serializer format is not supported.", "source": "juraj-google-style"}
{"code": "def _create_scalar_select(lhs_result: _sql_data_types.StandardSqlExpression, rhs_result: _sql_data_types.StandardSqlExpression, scalar_check_op: str, sql_data_type: _sql_data_types.StandardSqlDataType, sql_alias: str):\n    return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(f'({lhs_result.as_operand()} {scalar_check_op} {rhs_result.as_operand()})', _sql_data_type=sql_data_type, _sql_alias=sql_alias), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK)", "docstring": "Construct a Spark SQL select statement for scalar values.\n\nArgs:\nlhs_result: The result of the left-hand side expression.\nrhs_result: The result of the right-hand side expression.\nscalar_check_op: The scalar operation to be applied ('=' or '!=').\nsql_data_type: The SQL data type for the result.\nsql_alias: The SQL alias for the result.\n\nReturns:\nA compiled Spark SQL select statement.", "source": "github-repos"}
{"code": "def get_book_metadata(self, asin):\n    kbm = self._get_api_call('get_book_metadata', ('\"%s\"' % asin))\n    return KindleCloudReaderAPI._kbm_to_book(kbm)", "docstring": "Returns a book's metadata.\n\nArgs:\nasin: The ASIN of the book to be queried.\n\nReturns:\nA `KindleBook` instance corresponding to the book associated with\n`asin`.", "source": "codesearchnet"}
{"code": "def model_config(instance_type, model, role=None, image=None):\n    s3_operations = {}\n    model.image = (image or model.image)\n    if isinstance(model, sagemaker.model.FrameworkModel):\n        container_def = prepare_framework_container_def(model, instance_type, s3_operations)\n    else:\n        container_def = model.prepare_container_def(instance_type)\n        base_name = utils.base_name_from_image(container_def['Image'])\n        model.name = (model.name or utils.name_from_base(base_name))\n    primary_container = session._expand_container_def(container_def)\n    config = {'ModelName': model.name, 'PrimaryContainer': primary_container, 'ExecutionRoleArn': (role or model.role)}\n    if model.vpc_config:\n        config['VpcConfig'] = model.vpc_config\n    if s3_operations:\n        config['S3Operations'] = s3_operations\n    return config", "docstring": "Export Airflow model config from a SageMaker model\n\nArgs:\ninstance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'\nmodel (sagemaker.model.FrameworkModel): The SageMaker model to export Airflow config from\nrole (str): The ``ExecutionRoleArn`` IAM Role ARN for the model\nimage (str): An container image to use for deploying the model\n\nReturns:\ndict: Model config that can be directly used by SageMakerModelOperator in Airflow. It can also be part\nof the config used by SageMakerEndpointOperator and SageMakerTransformOperator in Airflow.", "source": "codesearchnet"}
{"code": "def __init__(self, scandir_path, system, name, header, bytes_path):\n        \n        self._cache = dict()\n        self._system = system\n        self._name = name\n        self._header = header\n        self._path = ''.join((\n            scandir_path if scandir_path[-1] == '/' else (scandir_path + '/'),\n            name))\n        self._bytes_path = bytes_path", "docstring": "Should only be instantiated by \"scandir\".\n\nArgs:\nscandir_path (str): scandir path argument.\nsystem (pycosio._core.io_system.SystemBase subclass):\nStorage system.\nname (str): Name of the object relative to \"scandir_path\".\nheader (dict): Object header\nbytes_path (bool): True if path must be returned as bytes.", "source": "juraj-google-style"}
{"code": "def get_metric_parsers(metric_packages=tuple(), include_defaults=True):\n    metric_parsers = set()\n    if include_defaults:\n        import git_code_debt.metrics\n        metric_parsers.update(discover(git_code_debt.metrics, is_metric_cls))\n    for metric_package in metric_packages:\n        metric_parsers.update(discover(metric_package, is_metric_cls))\n    return metric_parsers", "docstring": "Gets all of the metric parsers.\n\nArgs:\nmetric_packages - Defaults to no extra packages. An iterable of\nmetric containing packages.  A metric inherits DiffParserBase\nand does not have __metric__ = False\nA metric package must be imported using import a.b.c\ninclude_defaults - Whether to include the generic metric parsers", "source": "codesearchnet"}
{"code": "def list_sites():\n    ret = dict()\n    ps_cmd = ['Get-ChildItem', '-Path', \"'IIS:\\\\Sites'\", '|', 'Select-Object applicationPool, applicationDefaults, Bindings, ID, Name, PhysicalPath, State']\n    keep_keys = ('certificateHash', 'certificateStoreName', 'protocol', 'sslFlags')\n    cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)\n    try:\n        items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n    except ValueError:\n        raise CommandExecutionError('Unable to parse return data as Json.')\n    for item in items:\n        bindings = dict()\n        for binding in item['bindings']['Collection']:\n            if (binding['protocol'] not in ['http', 'https']):\n                continue\n            filtered_binding = dict()\n            for key in binding:\n                if (key in keep_keys):\n                    filtered_binding.update({key.lower(): binding[key]})\n            binding_info = binding['bindingInformation'].split(':', 2)\n            (ipaddress, port, hostheader) = [element.strip() for element in binding_info]\n            filtered_binding.update({'hostheader': hostheader, 'ipaddress': ipaddress, 'port': port})\n            bindings[binding['bindingInformation']] = filtered_binding\n        application_defaults = dict()\n        for attribute in item['applicationDefaults']['Attributes']:\n            application_defaults.update({attribute['Name']: attribute['Value']})\n        ret[item['name']] = {'apppool': item['applicationPool'], 'bindings': bindings, 'applicationDefaults': application_defaults, 'id': item['id'], 'state': item['state'], 'sourcepath': item['physicalPath']}\n    if (not ret):\n        log.warning('No sites found in output: %s', cmd_ret['stdout'])\n    return ret", "docstring": "List all the currently deployed websites.\n\nReturns:\ndict: A dictionary of the IIS sites and their properties.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.list_sites", "source": "codesearchnet"}
{"code": "def set_name(self, name, anyway=False):\n    set_name(self.startEA, name, anyway=anyway)", "docstring": "Set Function Name.\n\nDefault behavior throws an exception when setting to a name that already exists in\nthe IDB. to make IDA automatically add a counter to the name (like in the GUI,)\nuse `anyway=True`.\n\nArgs:\nname: Desired name.\nanyway: `True` to set anyway.", "source": "codesearchnet"}
{"code": "def walknset_vars(self, task_class=None, *args, **kwargs):\n        \n        def change_task(task):\n            if task_class is not None and task.__class__ is not task_class: return False\n            return True\n\n        if self.is_work:\n            for task in self:\n                if not change_task(task): continue\n                task.set_vars(*args, **kwargs)\n\n        elif self.is_flow:\n            for task in self.iflat_tasks():\n                if not change_task(task): continue\n                task.set_vars(*args, **kwargs)\n\n        else:\n            raise TypeError(\"Don't know how to set variables for object class %s\"  % self.__class__.__name__)", "docstring": "Set the values of the ABINIT variables in the input files of the nodes\n\nArgs:\ntask_class: If not None, only the input files of the tasks belonging\nto class `task_class` are modified.\n\nExample:\n\nflow.walknset_vars(ecut=10, kptopt=4)", "source": "juraj-google-style"}
{"code": "def _wait_for_function(self, function_descriptor, driver_id, timeout=10):\n    start_time = time.time()\n    warning_sent = False\n    while True:\n        with self.lock:\n            if (self._worker.actor_id.is_nil() and (function_descriptor.function_id in self._function_execution_info[driver_id])):\n                break\n            elif ((not self._worker.actor_id.is_nil()) and (self._worker.actor_id in self._worker.actors)):\n                break\n        if ((time.time() - start_time) > timeout):\n            warning_message = 'This worker was asked to execute a function that it does not have registered. You may have to restart Ray.'\n            if (not warning_sent):\n                ray.utils.push_error_to_driver(self._worker, ray_constants.WAIT_FOR_FUNCTION_PUSH_ERROR, warning_message, driver_id=driver_id)\n            warning_sent = True\n        time.sleep(0.001)", "docstring": "Wait until the function to be executed is present on this worker.\n\nThis method will simply loop until the import thread has imported the\nrelevant function. If we spend too long in this loop, that may indicate\na problem somewhere and we will push an error message to the user.\n\nIf this worker is an actor, then this will wait until the actor has\nbeen defined.\n\nArgs:\nfunction_descriptor : The FunctionDescriptor of the function that\nwe want to execute.\ndriver_id (str): The ID of the driver to push the error message to\nif this times out.", "source": "codesearchnet"}
{"code": "def _model_setup():\n    context.set_log_device_placement(True)\n    batch_size = 64\n    steps = 2\n    with collective_strategy.CollectiveAllReduceStrategy().scope():\n        train_ds, _ = mnist_testing_utils.mnist_synthetic_dataset(batch_size, steps)\n        model = mnist_testing_utils.get_mnist_model((28, 28, 1))\n    return (batch_size, steps, train_ds, model)", "docstring": "Set up a MNIST Keras model for testing purposes.\n\nBuilds a MNIST Keras model and returns model information.\n\nReturns:\nA tuple of (batch_size, steps, train_dataset, mode)", "source": "github-repos"}
{"code": "def norm_zero_one(array, dim=None):\n    if (not util_type.is_float(array)):\n        array = array.astype(np.float32)\n    array_max = array.max(dim)\n    array_min = array.min(dim)\n    array_exnt = np.subtract(array_max, array_min)\n    array_norm = np.divide(np.subtract(array, array_min), array_exnt)\n    return array_norm", "docstring": "normalizes a numpy array from 0 to 1 based in its extent\n\nArgs:\narray (ndarray):\ndim   (int):\n\nReturns:\nndarray:\n\nCommandLine:\npython -m utool.util_alg --test-norm_zero_one\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_alg import *  # NOQA\n>>> import utool as ut\n>>> array = np.array([ 22, 1, 3, 2, 10, 42, ])\n>>> dim = None\n>>> array_norm = norm_zero_one(array, dim)\n>>> result = ut.repr2(list(array_norm), precision=3)\n>>> print(result)\n[0.512, 0.000, 0.049, 0.024, 0.220, 1.000]", "source": "codesearchnet"}
{"code": "def __init__(self, operation, shape, dtype, name=None, index=0):\n    \n    if not isinstance(shape, Shape):\n      raise ValueError(\"shape must be a Shape got %s\" % shape.to_string)\n    if not isinstance(dtype, tf.DType):\n      raise ValueError(\"dtype must be a tf.DType got %s\" % dtype)\n    self._mesh = operation.mesh\n    self._operation = operation\n    self._shape = shape\n    self._dtype = dtype\n    if name is None:\n      name = self.operation.name + \":\" + str(index)\n    self._name = name", "docstring": "Create a Tensor.\n\nArgs:\noperation: the Operation that outputs this tensor\nshape: a Shape\ndtype: a tf.DType\nname: an optional string\nindex: optional integer, the index among operation's output tensors", "source": "juraj-google-style"}
{"code": "def getSpatialReferenceId(self, session):\n        \n        statement = .format(self.geometryColumnName,\n                               self.tableName,\n                               self.id)\n\n        result = session.execute(statement)\n\n        for row in result:\n            return row.srid", "docstring": "Retrieve the spatial reference id by which the geometry column is registered.\n\nThis method is a veneer for an SQL query that calls the ``ST_SRID()`` function on the geometry column.\n\nArgs:\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.\n\nReturns:\nstr: PostGIS spatial reference ID.", "source": "juraj-google-style"}
{"code": "def expand(tmpl, *args, **kwargs):\n    replacer = functools.partial(_expand_variable_match, list(args), kwargs)\n    return _VARIABLE_RE.sub(replacer, tmpl)", "docstring": "Expand a path template with the given variables.\n\n..code-block:: python\n\n>>> expand('users/*/messages/*', 'me', '123')\nusers/me/messages/123\n>>> expand('/v1/{name=shelves/*/books/*}', name='shelves/1/books/3')\n/v1/shelves/1/books/3\n\nArgs:\ntmpl (str): The path template.\nargs: The positional variables for the path.\nkwargs: The named variables for the path.\n\nReturns:\nstr: The expanded path\n\nRaises:\nValueError: If a positional or named variable is required by the\ntemplate but not specified or if an unexpected template expression\nis encountered.", "source": "codesearchnet"}
{"code": "def add_logger(name, level=None, format=None):\n    \n    format = format or '%(filename)-11s %(lineno)-3d: %(message)s'\n    log = logging.getLogger(name)\n\n    \n    log.setLevel(level or logging.INFO)\n\n    ch = logging.StreamHandler(sys.stdout)\n    ch.setFormatter(logging.Formatter(format))\n    log.addHandler(ch)\n\n    return log", "docstring": "Set up a stdout logger.\n\nArgs:\nname (str): name of the logger\nlevel: defaults to logging.INFO\nformat (str): format string for logging output.\ndefaults to ``%(filename)-11s %(lineno)-3d: %(message)s``.\n\nReturns:\nThe logger object.", "source": "juraj-google-style"}
{"code": "async def vsetup(self, author):\n    if self.vready:\n        logger.warning('Attempt to init voice when already initialised')\n        return\n    if (self.state != 'starting'):\n        logger.error(\"Attempt to init from wrong state ('{}'), must be 'starting'.\".format(self.state))\n        return\n    self.logger.debug('Setting up voice')\n    self.vchannel = author.voice.voice_channel\n    if self.vchannel:\n        self.statuslog.info('Connecting to voice')\n        try:\n            self.vclient = (await client.join_voice_channel(self.vchannel))\n        except discord.ClientException as e:\n            logger.exception(e)\n            self.statuslog.warning(\"I'm already connected to a voice channel.\")\n            return\n        except discord.opus.OpusNotLoaded as e:\n            logger.exception(e)\n            logger.error('Could not load Opus. This is an error with your FFmpeg setup.')\n            self.statuslog.error('Could not load Opus.')\n            return\n        except discord.DiscordException as e:\n            logger.exception(e)\n            self.statuslog.error(\"I couldn't connect to the voice channel. Check my permissions.\")\n            return\n        except Exception as e:\n            self.statuslog.error('Internal error connecting to voice, disconnecting.')\n            logger.error('Error connecting to voice {}'.format(e))\n            return\n    else:\n        self.statuslog.error(\"You're not connected to a voice channel.\")\n        return\n    self.vready = True", "docstring": "Creates the voice client\n\nArgs:\nauthor (discord.Member): The user that the voice ui will seek", "source": "codesearchnet"}
{"code": "def bind_to_uniform_block(self, binding=0, *, offset=0, size=(- 1)) -> None:\n    self.mglo.bind_to_uniform_block(binding, offset, size)", "docstring": "Bind the buffer to a uniform block.\n\nArgs:\nbinding (int): The uniform block binding.\n\nKeyword Args:\noffset (int): The offset.\nsize (int): The size. Value ``-1`` means all.", "source": "codesearchnet"}
{"code": "def get(\n            self,\n            section_name,\n            key_name,\n    ):\n        \n        value = None\n        try:\n            value = self.local_config.get(section_name, key_name)\n        except Exception as error_msg:\n            self.logger.warning(\n                '%s.%s not found in local config', section_name, key_name\n            )\n            try:\n                value = self.global_config.get(section_name, key_name)\n            except Exception as error_msg:\n                self.logger.error(\n                    '%s.%s not found in global config', section_name, key_name\n                )\n                raise KeyError('Could not find option in local/global config')\n\n        return value", "docstring": "Replicate configparser.get() functionality\n\nArgs:\nsection_name (str): section name in config\nkey_name (str): key name in config.section_name\n\nReturns:\nstr: do not check defaults, only return local value\n\nRaises:\nKeyError: unable to find option in either local or global config", "source": "juraj-google-style"}
{"code": "def _LogForwardedIpChanges(\n      self, configured, desired, to_add, to_remove, interface):\n    \n    if not to_add and not to_remove:\n      return\n    self.logger.info(\n        'Changing %s IPs from %s to %s by adding %s and removing %s.',\n        interface, configured or None, desired or None, to_add or None,\n        to_remove or None)", "docstring": "Log the planned IP address changes.\n\nArgs:\nconfigured: list, the IP address strings already configured.\ndesired: list, the IP address strings that will be configured.\nto_add: list, the forwarded IP address strings to configure.\nto_remove: list, the forwarded IP address strings to delete.\ninterface: string, the output device to modify.", "source": "juraj-google-style"}
{"code": "def ParseLSQuarantineRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    event_data = LsQuarantineEventData()\n    event_data.agent = self._GetRowValue(query_hash, row, 'Agent')\n    event_data.data = self._GetRowValue(query_hash, row, 'Data')\n    event_data.query = query\n    event_data.url = self._GetRowValue(query_hash, row, 'URL')\n    timestamp = self._GetRowValue(query_hash, row, 'Time')\n    date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a launch services quarantine event row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "codesearchnet"}
{"code": "def read(self, size):\n    data_list = []\n    bytes_read = 0\n    last_block_position = self.position\n    while bytes_read < size:\n        bytes_from_remaining = min(size - bytes_read, len(self.remaining))\n        data_list.append(self.remaining[0:bytes_from_remaining])\n        self.remaining = self.remaining[bytes_from_remaining:]\n        self.position += bytes_from_remaining\n        bytes_read += bytes_from_remaining\n        if not self.remaining:\n            try:\n                self.remaining = self.conn.recv_bytes()\n            except EOFError:\n                break\n    last_block = b''.join(data_list)\n    if last_block:\n        self.last_block_position = last_block_position\n        self.last_block = last_block\n    return last_block", "docstring": "Read data from the wrapped pipe connection.\n\nArgs:\nsize: Number of bytes to read. Actual number of bytes read is always\nequal to size unless EOF is reached.\n\nReturns:\ndata read as str.", "source": "github-repos"}
{"code": "def class_predictor(self, image_feats: torch.FloatTensor, query_embeds: Optional[torch.FloatTensor]=None, query_mask: Optional[torch.Tensor]=None) -> Tuple[torch.FloatTensor]:\n    pred_logits, image_class_embeds = self.class_head(image_feats, query_embeds, query_mask)\n    return (pred_logits, image_class_embeds)", "docstring": "Args:\nimage_feats:\nFeatures extracted from the `image_text_embedder`.\nquery_embeds:\nText query embeddings.\nquery_mask:\nMust be provided with query_embeddings. A mask indicating which query embeddings are valid.", "source": "github-repos"}
{"code": "def bessel_j1(x, name=None):\n    with ops.name_scope(name, 'bessel_j1', [x]):\n        return gen_special_math_ops.bessel_j1(x)", "docstring": "Computes the Bessel j1 function of `x` element-wise.\n\nModified Bessel function of order 1.\n\n>>> tf.math.special.bessel_j1([0.5, 1., 2., 4.]).numpy()\narray([ 0.24226846,  0.44005059,  0.57672481, -0.06604333], dtype=float32)\n\nArgs:\nx: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n`float32`, `float64`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.j1\n@end_compatibility", "source": "github-repos"}
{"code": "def build_data(data_path, size, dataset):\n    \n    image_size = 32\n    if dataset == \"cifar10\":\n        label_bytes = 1\n        label_offset = 0\n    elif dataset == \"cifar100\":\n        label_bytes = 1\n        label_offset = 1\n    depth = 3\n    image_bytes = image_size * image_size * depth\n    record_bytes = label_bytes + label_offset + image_bytes\n\n    def load_transform(value):\n        \n        record = tf.reshape(tf.decode_raw(value, tf.uint8), [record_bytes])\n        label = tf.cast(tf.slice(record, [label_offset], [label_bytes]),\n                        tf.int32)\n        \n        \n        depth_major = tf.reshape(\n            tf.slice(record, [label_bytes], [image_bytes]),\n            [depth, image_size, image_size])\n        \n        image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)\n        return (image, label)\n    \n    data_files = tf.gfile.Glob(data_path)\n    data = tf.contrib.data.FixedLengthRecordDataset(data_files,\n                                                    record_bytes=record_bytes)\n    data = data.map(load_transform)\n    data = data.batch(size)\n    iterator = data.make_one_shot_iterator()\n    return iterator.get_next()", "docstring": "Creates the queue and preprocessing operations for the dataset.\n\nArgs:\ndata_path: Filename for cifar10 data.\nsize: The number of images in the dataset.\ndataset: The dataset we are using.\n\nReturns:\nqueue: A Tensorflow queue for extracting the images and labels.", "source": "juraj-google-style"}
{"code": "def constant(interval=1):\n    \n    try:\n        itr = iter(interval)\n    except TypeError:\n        itr = itertools.repeat(interval)\n\n    for val in itr:\n        yield val", "docstring": "Generator for constant intervals.\n\nArgs:\ninterval: A constant value to yield or an iterable of such values.", "source": "juraj-google-style"}
{"code": "def cancel(self):\n    raise NotImplementedError()", "docstring": "Cancels the pipeline execution.\n\nRaises:\nIOError: If there is a persistent problem getting job\ninformation.\nNotImplementedError: If the runner does not support this\noperation.\n\nReturns:\nThe final state of the pipeline.", "source": "github-repos"}
{"code": "def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5):\n    logger.warning_once('`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use `post_process_semantic_segmentation`.')\n    out_logits, raw_masks = (outputs.logits, outputs.pred_masks)\n    empty_label = out_logits.shape[-1] - 1\n    preds = []\n\n    def to_tuple(tup):\n        if isinstance(tup, tuple):\n            return tup\n        return tuple(tup.tolist())\n    for cur_logits, cur_masks, size in zip(out_logits, raw_masks, target_sizes):\n        cur_scores, cur_labels = cur_logits.softmax(-1).max(-1)\n        keep = cur_labels.ne(empty_label) & (cur_scores > threshold)\n        cur_scores = cur_scores[keep]\n        cur_labels = cur_labels[keep]\n        cur_masks = cur_masks[keep]\n        cur_masks = nn.functional.interpolate(cur_masks[:, None], to_tuple(size), mode='bilinear').squeeze(1)\n        cur_masks = (cur_masks.sigmoid() > mask_threshold) * 1\n        predictions = {'scores': cur_scores, 'labels': cur_labels, 'masks': cur_masks}\n        preds.append(predictions)\n    return preds", "docstring": "Converts the output of [`DetrForSegmentation`] into image segmentation predictions. Only supports PyTorch.\n\nArgs:\noutputs ([`DetrSegmentationOutput`]):\nRaw outputs of the model.\ntarget_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`):\nTorch Tensor (or list) corresponding to the requested final size (h, w) of each prediction.\nthreshold (`float`, *optional*, defaults to 0.9):\nThreshold to use to filter out queries.\nmask_threshold (`float`, *optional*, defaults to 0.5):\nThreshold to use when turning the predicted masks into binary values.\nReturns:\n`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image\nin the batch as predicted by the model.", "source": "github-repos"}
{"code": "def normalize_collaboration(collaboration):\n    if (not collaboration):\n        return []\n    collaboration = collaboration.strip()\n    if (collaboration.startswith('(') and collaboration.endswith(')')):\n        collaboration = collaboration[1:(- 1)]\n    collaborations = _RE_AND.split(collaboration)\n    collaborations = (_RE_COLLABORATION_LEADING.sub('', collab) for collab in collaborations)\n    collaborations = (_RE_COLLABORATION_TRAILING.sub('', collab) for collab in collaborations)\n    return [collab.strip() for collab in collaborations]", "docstring": "Normalize collaboration string.\n\nArgs:\ncollaboration: a string containing collaboration(s) or None\n\nReturns:\nlist: List of extracted and normalized collaborations\n\nExamples:\n>>> from inspire_schemas.utils import normalize_collaboration\n>>> normalize_collaboration('for the CMS and ATLAS Collaborations')\n['CMS', 'ATLAS']", "source": "codesearchnet"}
{"code": "def get_src_folder(self):\n    with open(('%s/settings.gradle' % self.path)) as f:\n        for line in f.readlines():\n            if line.startswith('include'):\n                matches = re.findall(\"\\\\'\\\\:?(.+?)\\\\'\", line)\n            if (len(matches) == 0):\n                continue\n            for folder in matches:\n                if self.is_app_folder(folder):\n                    return folder\n    return 'app'", "docstring": "Gets the app source folder from settings.gradle file.\n\nReturns:\nA string containing the project source folder name (default is \"app\")", "source": "codesearchnet"}
{"code": "def has_filename(self, filename):\n        \n        fixpath = lambda path: osp.normcase(osp.realpath(path))\n        for index, finfo in enumerate(self.data):\n            if fixpath(filename) == fixpath(finfo.filename):\n                return index\n        return None", "docstring": "Return the self.data index position for the filename.\n\nArgs:\nfilename: Name of the file to search for in self.data.\n\nReturns:\nThe self.data index for the filename.  Returns None\nif the filename is not found in self.data.", "source": "juraj-google-style"}
{"code": "def run_population(population, evolution, gpus):\n    \n    population_size = len(population)\n    for k in range(population_size \n        procs = []\n        for j in range(len(gpus)):\n            i = k * len(gpus) + j\n            if i < population_size:\n                save_path = expand_path(\n                    evolution.get_value_from_config(parse_config(population[i]),\n                                                    evolution.path_to_models_save_path))\n\n                save_path.mkdir(parents=True, exist_ok=True)\n                f_name = save_path / \"config.json\"\n                save_json(population[i], f_name)\n\n                with save_path.joinpath('out.txt').open('w', encoding='utf8') as outlog,\\\n                        save_path.joinpath('err.txt').open('w', encoding='utf8') as errlog:\n                    env = dict(os.environ)\n                    if len(gpus) > 1 or gpus[0] != -1:\n                        env['CUDA_VISIBLE_DEVICES'] = str(gpus[j])\n\n                    procs.append(Popen(\"{} -m deeppavlov train {}\".format(sys.executable, str(f_name)),\n                                       shell=True, stdout=outlog, stderr=errlog, env=env))\n        for j, proc in enumerate(procs):\n            i = k * len(gpus) + j\n            log.info(f'Waiting on {i}th proc')\n            if proc.wait() != 0:\n                save_path = expand_path(\n                    evolution.get_value_from_config(parse_config(population[i]),\n                                                    evolution.path_to_models_save_path))\n                with save_path.joinpath('err.txt').open(encoding='utf8') as errlog:\n                    log.warning(f'Population {i} returned an error code {proc.returncode} and an error log:\\n' +\n                                errlog.read())\n    return None", "docstring": "Change save and load paths for obtained population, save config.json with model config,\nrun population via current python executor (with which evolve.py already run)\nand on given devices (-1 means CPU, other integeres - visible for evolve.py GPUs)\nArgs:\npopulation: list of dictionaries - configs of current population\nevolution: ParamsEvolution\ngpus: list of given devices (list of integers)\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def __init__(self, file_object, encoding='utf-8', end_of_line='\\n'):\n    \n    super(TextFile, self).__init__()\n    self._file_object = file_object\n    self._file_object_size = file_object.get_size()\n    self._encoding = encoding\n    self._end_of_line = end_of_line.encode(self._encoding)\n    self._end_of_line_length = len(self._end_of_line)\n    self._lines = []\n    self._lines_buffer = b''\n    self._lines_buffer_offset = 0\n    self._current_offset = 0", "docstring": "Initializes the text file.\n\nArgs:\nfile_object (FileIO): a file-like object to read from.\nencoding (Optional[str]): text encoding.\nend_of_line (Optional[str]): end of line indicator.", "source": "juraj-google-style"}
{"code": "def convert_legacy_structure(output_types, output_shapes, output_classes):\n    flat_types = nest.flatten(output_types)\n    flat_shapes = nest.flatten(output_shapes)\n    flat_classes = nest.flatten(output_classes)\n    flat_ret = []\n    for flat_type, flat_shape, flat_class in zip(flat_types, flat_shapes, flat_classes):\n        if isinstance(flat_class, type_spec.TypeSpec):\n            flat_ret.append(flat_class)\n        elif issubclass(flat_class, sparse_tensor.SparseTensor):\n            flat_ret.append(sparse_tensor.SparseTensorSpec(flat_shape, flat_type))\n        elif issubclass(flat_class, tensor_lib.Tensor):\n            flat_ret.append(tensor_lib.TensorSpec(flat_shape, flat_type))\n        elif issubclass(flat_class, tensor_array_ops.TensorArray):\n            flat_ret.append(tensor_array_ops.TensorArraySpec(flat_shape[2:], flat_type, dynamic_size=tensor_shape.dimension_value(flat_shape[0]), infer_shape=tensor_shape.dimension_value(flat_shape[1])))\n        else:\n            raise TypeError('Could not build a structure for output class {}. Make sure any component class in `output_classes` inherits from one of the following classes: `tf.TypeSpec`, `tf.sparse.SparseTensor`, `tf.Tensor`, `tf.TensorArray`.'.format(flat_class.__name__))\n    return nest.pack_sequence_as(output_classes, flat_ret)", "docstring": "Returns a `Structure` that represents the given legacy structure.\n\nThis method provides a way to convert from the existing `Dataset` and\n`Iterator` structure-related properties to a `Structure` object. A \"legacy\"\nstructure is represented by the `tf.data.Dataset.output_types`,\n`tf.data.Dataset.output_shapes`, and `tf.data.Dataset.output_classes`\nproperties.\n\nTODO(b/110122868): Remove this function once `Structure` is used throughout\n`tf.data`.\n\nArgs:\noutput_types: A nested structure of `tf.DType` objects corresponding to\neach component of a structured value.\noutput_shapes: A nested structure of `tf.TensorShape` objects\ncorresponding to each component a structured value.\noutput_classes: A nested structure of Python `type` objects corresponding\nto each component of a structured value.\n\nReturns:\nA `Structure`.\n\nRaises:\nTypeError: If a structure cannot be built from the arguments, because one of\nthe component classes in `output_classes` is not supported.", "source": "github-repos"}
{"code": "def guess_content_kind(path=None, web_video_data=None, questions=None):\n    \n    \n    if questions and len(questions) > 0:\n        return content_kinds.EXERCISE\n\n    \n    if path:\n        ext = os.path.splitext(path)[1][1:].lower()\n        if ext in content_kinds.MAPPING:\n            return content_kinds.MAPPING[ext]\n        raise InvalidFormatException(\"Invalid file type: Allowed formats are {0}\".format([key for key, value in content_kinds.MAPPING.items()]))\n    elif web_video_data:\n        return content_kinds.VIDEO\n    else:\n        return content_kinds.TOPIC", "docstring": "guess_content_kind: determines what kind the content is\nArgs:\nfiles (str or list): files associated with content\nReturns: string indicating node's kind", "source": "juraj-google-style"}
{"code": "def load(self, label_lookup_path, uid_lookup_path):\n    if (not tf.gfile.Exists(uid_lookup_path)):\n        tf.logging.fatal('File does not exist %s', uid_lookup_path)\n    if (not tf.gfile.Exists(label_lookup_path)):\n        tf.logging.fatal('File does not exist %s', label_lookup_path)\n    proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()\n    uid_to_human = {}\n    p = re.compile('[n\\\\d]*[ \\\\S,]*')\n    for line in proto_as_ascii_lines:\n        parsed_items = p.findall(line)\n        uid = parsed_items[0]\n        human_string = parsed_items[2]\n        uid_to_human[uid] = human_string\n    node_id_to_uid = {}\n    proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()\n    for line in proto_as_ascii:\n        if line.startswith('  target_class:'):\n            target_class = int(line.split(': ')[1])\n        if line.startswith('  target_class_string:'):\n            target_class_string = line.split(': ')[1]\n            node_id_to_uid[target_class] = target_class_string[1:(- 2)]\n    node_id_to_name = {}\n    for (key, val) in node_id_to_uid.items():\n        if (val not in uid_to_human):\n            tf.logging.fatal('Failed to locate: %s', val)\n        name = uid_to_human[val]\n        node_id_to_name[key] = name\n    return node_id_to_name", "docstring": "Loads a human readable English name for each softmax node.\n\nArgs:\nlabel_lookup_path: string UID to integer node ID.\nuid_lookup_path: string UID to human-readable string.\n\nReturns:\ndict from integer node ID to human-readable string.", "source": "codesearchnet"}
{"code": "def locator(self, value):\n        \n        self._locator = value\n        self._latitude, self._longitude = utils.from_grid_locator(value)", "docstring": "Update the locator, and trigger a latitude and longitude update.\n\nArgs:\nvalue (str): New Maidenhead locator string", "source": "juraj-google-style"}
{"code": "def center_slab(slab):\n    bdists = sorted([nn[1] for nn in slab.get_neighbors(slab[0], 10) if (nn[1] > 0)])\n    r = (bdists[0] * 3)\n    all_indices = [i for (i, site) in enumerate(slab)]\n    for site in slab:\n        if any([(nn[1] > slab.lattice.c) for nn in slab.get_neighbors(site, r)]):\n            shift = ((1 - site.frac_coords[2]) + 0.05)\n            slab.translate_sites(all_indices, [0, 0, shift])\n    weights = [s.species.weight for s in slab]\n    center_of_mass = np.average(slab.frac_coords, weights=weights, axis=0)\n    shift = (0.5 - center_of_mass[2])\n    slab.translate_sites(all_indices, [0, 0, shift])\n    return slab", "docstring": "The goal here is to ensure the center of the slab region\nis centered close to c=0.5. This makes it easier to\nfind the surface sites and apply operations like doping.\n\nThere are three cases where the slab in not centered:\n1. The slab region is completely between two vacuums in the\nbox but not necessarily centered. We simply shift the\nslab by the difference in its center of mass and 0.5\nalong the c direction.\n2. The slab completely spills outside the box from the bottom\nand into the top. This makes it incredibly difficult to\nlocate surface sites. We iterate through all sites that\nspill over (z>c) and shift all sites such that this specific\nsite is now on the other side. Repeat for all sites with z>c.\n3. This is a simpler case of scenario 2. Either the top or bottom\nslab sites are at c=0 or c=1. Treat as scenario 2.\nArgs:\nslab (Slab): Slab structure to center\nReturns:\nReturns a centered slab structure", "source": "codesearchnet"}
{"code": "def _check_default_values(method_signature, base_signature):\n    for base_param_name, base_default_value in base_signature.defaults.items():\n        if base_param_name in base_signature.kwonly_params:\n            if base_param_name not in method_signature.kwonly_params and base_param_name not in method_signature.param_names:\n                continue\n            method_param_name = base_param_name\n        else:\n            base_param_index = base_signature.param_names.index(base_param_name)\n            if base_param_index >= len(method_signature.param_names):\n                continue\n            method_param_name = method_signature.param_names[base_param_index]\n        try:\n            method_default_value = method_signature.defaults[method_param_name]\n        except KeyError:\n            return SignatureError(SignatureErrorType.DEFAULT_PARAMETER_MISMATCH, f\"Parameter '{method_param_name}' must have a default value.\")\n        try:\n            base_default = abstract_utils.get_atomic_python_constant(base_default_value)\n            method_default = abstract_utils.get_atomic_python_constant(method_default_value)\n        except abstract_utils.ConversionError:\n            continue\n        if base_default != method_default:\n            return SignatureError(SignatureErrorType.DEFAULT_VALUE_MISMATCH, f\"Parameter '{base_param_name}' must have the same default value.\")\n    return None", "docstring": "Checks that default parameter values of the overriding method match.\n\nArgs:\nmethod_signature: signature of the overriding method.\nbase_signature: signature of the overridden method.\n\nReturns:\nSignatureError if a mismatch is detected. Otherwise returns None.", "source": "github-repos"}
{"code": "def process_tfma(schema_file, big_query_table=None, eval_model_dir=None, max_eval_rows=None, pipeline_args=None, publish_to_bq=False, project=None, metrics_table=None, metrics_dataset=None):\n    if big_query_table is None:\n        raise ValueError('--big_query_table should be provided.')\n    slice_spec = [tfma.slicer.SingleSliceSpec(), tfma.slicer.SingleSliceSpec(columns=['trip_start_hour'])]\n    metrics_namespace = metrics_table\n    schema = taxi.read_schema(schema_file)\n    eval_shared_model = tfma.default_eval_shared_model(eval_saved_model_path=eval_model_dir, add_metrics_callbacks=[tfma.post_export_metrics.calibration_plot_and_prediction_histogram(), tfma.post_export_metrics.auc_plots()])\n    metrics_monitor = None\n    if publish_to_bq:\n        metrics_monitor = MetricsReader(publish_to_bq=publish_to_bq, project_name=project, bq_table=metrics_table, bq_dataset=metrics_dataset, namespace=metrics_namespace, filters=MetricsFilter().with_namespace(metrics_namespace))\n    pipeline = beam.Pipeline(argv=pipeline_args)\n    query = taxi.make_sql(big_query_table, max_eval_rows, for_eval=True)\n    raw_feature_spec = taxi.get_raw_feature_spec(schema)\n    raw_data = pipeline | 'ReadBigQuery' >> ReadFromBigQuery(query=query, project=project, use_standard_sql=True) | 'Measure time: Start' >> beam.ParDo(MeasureTime(metrics_namespace)) | 'CleanData' >> beam.Map(lambda x: taxi.clean_raw_data_dict(x, raw_feature_spec))\n    coder = taxi.make_proto_coder(schema)\n    extractors = tfma.default_extractors(eval_shared_model=eval_shared_model, slice_spec=slice_spec, desired_batch_size=None, materialize=False)\n    evaluators = tfma.default_evaluators(eval_shared_model=eval_shared_model, desired_batch_size=None, num_bootstrap_samples=1)\n    _ = raw_data | 'ToSerializedTFExample' >> beam.Map(coder.encode) | 'Extract Results' >> tfma.InputsToExtracts() | 'Extract and evaluate' >> tfma.ExtractAndEvaluate(extractors=extractors, evaluators=evaluators) | 'Map Evaluations to PCollection' >> MapEvalToPCollection() | 'Measure time: End' >> beam.ParDo(MeasureTime(metrics_namespace))\n    result = pipeline.run()\n    result.wait_until_finish()\n    if metrics_monitor:\n        metrics_monitor.publish_metrics(result)", "docstring": "Runs a batch job to evaluate the eval_model against the given input.\n\nArgs:\nschema_file: A file containing a text-serialized Schema that describes the\neval data.\nbig_query_table: A BigQuery table name specified as DATASET.TABLE which\nshould be the input for evaluation. This can only be set if input_csv is\nNone.\neval_model_dir: A directory where the eval model is located.\nmax_eval_rows: Number of rows to query from BigQuery.\npipeline_args: additional DataflowRunner or DirectRunner args passed to\nthe beam pipeline.\npublish_to_bq:\nproject:\nmetrics_dataset:\nmetrics_table:\n\nRaises:\nValueError: if input_csv and big_query_table are not specified correctly.", "source": "github-repos"}
{"code": "def _refresh(self, http):\n        \n        try:\n            self._retrieve_info(http)\n            self.access_token, self.token_expiry = _metadata.get_token(\n                http, service_account=self.service_account_email)\n        except http_client.HTTPException as err:\n            raise client.HttpAccessTokenRefreshError(str(err))", "docstring": "Refreshes the access token.\n\nSkip all the storage hoops and just refresh using the API.\n\nArgs:\nhttp: an object to be used to make HTTP requests.\n\nRaises:\nHttpAccessTokenRefreshError: When the refresh fails.", "source": "juraj-google-style"}
{"code": "def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor]=None, chunk_size: Optional[int]=None, use_memory_efficient_kernel: bool=False, use_lma: bool=False, inplace_safe: bool=False) -> torch.Tensor:\n    if mask is None:\n        mask = x.new_ones(x.shape[:-1])\n    if not self.starting:\n        x = x.transpose(-2, -3)\n        mask = mask.transpose(-1, -2)\n    x = self.layer_norm(x)\n    mask_bias = (self.inf * (mask - 1))[..., :, None, None, :]\n    triangle_bias = permute_final_dims(self.linear(x), (2, 0, 1))\n    triangle_bias = triangle_bias.unsqueeze(-4)\n    biases = [mask_bias, triangle_bias]\n    if chunk_size is not None:\n        x = self._chunk(x, biases, chunk_size, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma, inplace_safe=inplace_safe)\n    else:\n        x = self.mha(q_x=x, kv_x=x, biases=biases, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma)\n    if not self.starting:\n        x = x.transpose(-2, -3)\n    return x", "docstring": "Args:\nx:\n[*, I, J, C_in] input tensor (e.g. the pair representation)\nReturns:\n[*, I, J, C_in] output tensor", "source": "github-repos"}
{"code": "def find_vasp_calculations():\n    dir_list = [('./' + re.sub('vasprun\\\\.xml', '', path)) for path in glob.iglob('**/vasprun.xml', recursive=True)]\n    gz_dir_list = [('./' + re.sub('vasprun\\\\.xml\\\\.gz', '', path)) for path in glob.iglob('**/vasprun.xml.gz', recursive=True)]\n    return (dir_list + gz_dir_list)", "docstring": "Returns a list of all subdirectories that contain either a vasprun.xml file\nor a compressed vasprun.xml.gz file.\n\nArgs:\nNone\n\nReturns:\n(List): list of all VASP calculation subdirectories.", "source": "codesearchnet"}
{"code": "def disp(obj: Any, mode: str='') -> None:\n    if _Options.LINE in mode:\n        raise NotImplementedError('Line mode not supported in `disp()`')\n    _display_and_return(obj, options=mode)", "docstring": "Display the object.\n\nThis is the functional API for the `;` auto display magic.\n\nArgs:\nobj: The object to display\nmode: Any mode supported by `ecolab.auto_display()`", "source": "github-repos"}
{"code": "def find_all_documented_objects() -> List[str]:\n    documented_obj = []\n    documented_methods_map = {}\n    for doc_file in Path(PATH_TO_DOC).glob('**/*.md'):\n        with open(doc_file, 'r', encoding='utf-8', newline='\\n') as f:\n            content = f.read()\n        raw_doc_objs = re.findall('\\\\[\\\\[autodoc\\\\]\\\\]\\\\s+(\\\\S+)\\\\s+', content)\n        documented_obj += [obj.split('.')[-1] for obj in raw_doc_objs]\n        for obj in raw_doc_objs:\n            obj_public_methods = re.findall(f'\\\\[\\\\[autodoc\\\\]\\\\] {obj}((\\\\n\\\\s+-.*)+)', content)\n            if len(obj_public_methods) == 0:\n                continue\n            else:\n                documented_methods_map[obj] = re.findall('(?<=-\\\\s).*', obj_public_methods[0][0])\n    return (documented_obj, documented_methods_map)", "docstring": "Parse the content of all doc files to detect which classes and functions it documents.\n\nReturns:\n`List[str]`: The list of all object names being documented.\n`Dict[str, List[str]]`: A dictionary mapping the object name (full import path, e.g.\n`integrations.PeftAdapterMixin`) to its documented methods", "source": "github-repos"}
{"code": "def get_appliance_by_name(self, appliance_name):\n        \n        appliances = self.get_appliances()\n\n        if appliances:\n            for appliance in appliances:\n                if appliance['name'] == appliance_name:\n                    return appliance\n        return None", "docstring": "Gets the particular Image Streamer resource based on its name.\n\nArgs:\nappliance_name:\nThe Image Streamer resource name.\n\nReturns:\ndict: Image Streamer resource.", "source": "juraj-google-style"}
{"code": "def write(grp, out_path):\n    with open(out_path, 'w') as f:\n        for x in grp:\n            f.write((str(x) + '\\n'))", "docstring": "Write a GRP to a text file.\n\nArgs:\ngrp (list): GRP object to write to new-line delimited text file\nout_path (string): output path\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _has_valid_catchup_replies(self, seq_no: int, txns_to_process: List[Tuple[(int, Any)]]) -> Tuple[(bool, str, int)]:\n    assert (seq_no == txns_to_process[0][0])\n    (node_name, catchup_rep) = self._find_catchup_reply_for_seq_no(seq_no)\n    txns = catchup_rep.txns\n    txns = [self._provider.transform_txn_for_ledger(txn) for (s, txn) in txns_to_process[:len(txns)] if (str(s) in txns)]\n    temp_tree = self._ledger.treeWithAppliedTxns(txns)\n    proof = catchup_rep.consProof\n    final_size = self._catchup_till.final_size\n    final_hash = self._catchup_till.final_hash\n    try:\n        logger.info('{} verifying proof for {}, {}, {}, {}, {}'.format(self, temp_tree.tree_size, final_size, temp_tree.root_hash, final_hash, proof))\n        verified = self._provider.verifier(self._ledger_id).verify_tree_consistency(temp_tree.tree_size, final_size, temp_tree.root_hash, Ledger.strToHash(final_hash), [Ledger.strToHash(p) for p in proof])\n    except Exception as ex:\n        logger.info('{} could not verify catchup reply {} since {}'.format(self, catchup_rep, ex))\n        verified = False\n    return (bool(verified), node_name, len(txns))", "docstring": "Transforms transactions for ledger!\n\nReturns:\nWhether catchup reply corresponding to seq_no\nName of node from which txns came\nNumber of transactions ready to be processed", "source": "codesearchnet"}
{"code": "class FlaxImageClassifierOutputWithNoAttention(ModelOutput):\n    logits: Optional[jnp.ndarray] = None\n    hidden_states: Optional[Tuple[jnp.ndarray]] = None", "docstring": "Base class for outputs of image classification models.\n\nArgs:\nlogits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`):\nClassification (or regression if config.num_labels==1) scores (before SoftMax).\nhidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when\n`config.output_hidden_states=True`):\nTuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one\nfor the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also\ncalled feature maps) of the model at the output of each stage.", "source": "github-repos"}
{"code": "def save_as(self, filename: str) -> None:\n        \n        lib.TCOD_image_save(self.image_c, filename.encode(\"utf-8\"))", "docstring": "Save the Image to a 32-bit .bmp or .png file.\n\nArgs:\nfilename (Text): File path to same this Image.", "source": "juraj-google-style"}
{"code": "def make_all(module_name, doc_string_modules=None):\n    if doc_string_modules is None:\n        doc_string_modules = [_sys.modules[module_name]]\n    cur_members = set((name for name, _ in _tf_inspect.getmembers(_sys.modules[module_name])))\n    results = set()\n    for doc_module in doc_string_modules:\n        results.update([m.group(1) for m in _reference_pattern.finditer(doc_module.__doc__) if m.group(1) in cur_members])\n    return list(results)", "docstring": "Generates `__all__` from the docstring of one or more modules.\n\nUsage: `make_all(__name__)` or\n`make_all(__name__, [sys.modules(__name__), other_module])`. The doc string\nmodules must each a docstring, and `__all__` will contain all symbols with\n`@@` references, where that symbol currently exists in the module named\n`module_name`.\n\nArgs:\nmodule_name: The name of the module (usually `__name__`).\ndoc_string_modules: a list of modules from which to take docstring.\nIf None, then a list containing only the module named `module_name` is used.\n\nReturns:\nA list suitable for use as `__all__`.", "source": "github-repos"}
{"code": "def matches(self, spec):\n        \n        if callable(spec) and not isinstance(spec, type): return spec(self)\n        elif isinstance(spec, type): return isinstance(self, spec)\n        specification = (self.__class__.__name__, self.group, self.label)\n        split_spec = tuple(spec.split('.')) if not isinstance(spec, tuple) else spec\n        split_spec, nocompare = zip(*((None, True) if s == '*' or s is None else (s, False)\n                                    for s in split_spec))\n        if all(nocompare): return True\n        match_fn = itemgetter(*(idx for idx, nc in enumerate(nocompare) if not nc))\n        self_spec = match_fn(split_spec)\n        unescaped_match = match_fn(specification[:len(split_spec)]) == self_spec\n        if unescaped_match: return True\n        sanitizers = [util.sanitize_identifier, util.group_sanitizer, util.label_sanitizer]\n        identifier_specification = tuple(fn(ident, escape=False)\n                                         for ident, fn in zip(specification, sanitizers))\n        identifier_match = match_fn(identifier_specification[:len(split_spec)]) == self_spec\n        return identifier_match", "docstring": "Whether the spec applies to this object.\n\nArgs:\nspec: A function, spec or type to check for a match\n* A 'type[[.group].label]' string which is compared\nagainst the type, group and label of this object\n* A function which is given the object and returns\na boolean.\n* An object type matched using isinstance.\n\nReturns:\nbool: Whether the spec matched this object.", "source": "juraj-google-style"}
{"code": "def _ParseQuery(self, parser_mediator, database, query, callback, cache):\n    \n    row_cache = cache.GetRowCache(query)\n\n    try:\n      rows = database.Query(query)\n\n    except sqlite3.DatabaseError as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to run query: {0:s} on database with error: {1!s}'.format(\n              query, exception))\n      return\n\n    for index, row in enumerate(rows):\n      if parser_mediator.abort:\n        break\n\n      row_hash = self._HashRow(row)\n      if row_hash in row_cache:\n        continue\n\n      try:\n        callback(parser_mediator, query, row, cache=cache, database=database)\n\n      except Exception as exception:  \n        parser_mediator.ProduceExtractionWarning((\n            'unable to parse row: {0:d} with callback: {1:s} on database '\n            'with error: {2!s}').format(\n                index, callback.__name__, exception))\n        \n        return\n\n      row_cache.add(row_hash)", "docstring": "Queries a database and parses the results.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\ndatabase (SQLiteDatabase): database.\nquery (str): query.\ncallback (function): function to invoke to parse an individual row.\ncache (SQLiteCache): cache.", "source": "juraj-google-style"}
{"code": "def make_noise_surface(dims=DEFAULT_DIMS, blur=10, seed=None):\n    if (seed is not None):\n        np.random.seed(seed)\n    return gaussian_filter(np.random.normal(size=dims), blur)", "docstring": "Makes a surface by generating random noise and blurring it.\n\nArgs:\ndims (pair): the dimensions of the surface to create\nblur (float): the amount of Gaussian blur to apply\nseed (int): a random seed to use (optional)\n\nReturns:\nsurface: A surface.", "source": "codesearchnet"}
{"code": "def device_function(self, op):\n    if not self._merge_devices and op.device:\n        return op.device\n    current_device = pydev.DeviceSpec.from_string(op.device or '')\n    node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def\n    if self._ps_tasks and self._ps_device and (node_def.op in self._ps_ops):\n        ps_device = pydev.DeviceSpec.from_string(self._ps_device)\n        current_job, ps_job = (current_device.job, ps_device.job)\n        if ps_job and (not current_job or current_job == ps_job):\n            ps_device = ps_device.replace(task=self._ps_strategy(op))\n        ps_device = ps_device.make_merged_spec(current_device)\n        return ps_device.to_string()\n    worker_device = pydev.DeviceSpec.from_string(self._worker_device or '')\n    worker_device = worker_device.make_merged_spec(current_device)\n    return worker_device.to_string()", "docstring": "Choose a device for `op`.\n\nArgs:\nop: an `Operation`.\n\nReturns:\nThe device to use for the `Operation`.", "source": "github-repos"}
{"code": "def GetValueRepresentation(cls, value,\n                             version=sorted(_SERVICE_MAP.keys())[-1]):\n    \n    if isinstance(value, str) or isinstance(value, unicode):\n      return {'value': value, 'xsi_type': 'TextValue'}\n    elif isinstance(value, bool):\n      return {'value': value, 'xsi_type': 'BooleanValue'}\n    elif isinstance(value, numbers.Number):\n      return {'value': value, 'xsi_type': 'NumberValue'}\n    \n    \n    elif isinstance(value, datetime.datetime):\n      if value.tzinfo is None:\n        raise googleads.errors.GoogleAdsValueError(\n            'Datetime %s is not timezone aware.' % value\n        )\n\n      return {\n          'xsi_type': 'DateTimeValue',\n          'value': {\n              'date': {\n                  'year': value.year,\n                  'month': value.month,\n                  'day': value.day,\n              },\n              'hour': value.hour,\n              'minute': value.minute,\n              'second': value.second,\n              'timeZoneId' if version >= 'v201811' else 'timeZoneID':\n                  value.tzinfo.zone,\n          }\n      }\n    elif isinstance(value, datetime.date):\n      return {\n          'xsi_type': 'DateValue',\n          'value': {\n              'year': value.year,\n              'month': value.month,\n              'day': value.day,\n          }\n      }\n    elif isinstance(value, list):\n      if value and not all(isinstance(x, type(value[0])) for x in value):\n        raise googleads.errors.GoogleAdsValueError('Cannot pass more than one '\n                                                   'type in a set.')\n\n      return {\n          'xsi_type': 'SetValue',\n          'values': [cls.GetValueRepresentation(v, version) for v in value]\n      }\n    else:\n      raise googleads.errors.GoogleAdsValueError(\n          'Can\\'t represent unknown type: %s.' % type(value))", "docstring": "Converts a single python value to its PQL representation.\n\nArgs:\nvalue: A python value.\nversion: A string identifying the Ad Manager version the value object\nis compatible with. This defaults to what is currently the latest\nversion. This will be updated in future releases to point to what is\nthen the latest version.\n\nReturns:\nThe value formatted for PQL statements which are compatible with a\nparticular API version.", "source": "juraj-google-style"}
{"code": "def absolute_name(self):\n    if (self.is_root() or self.parent.is_root()):\n        return utils.slugify(self.name)\n    return ':'.join([self.parent.absolute_name, utils.slugify(self.name)])", "docstring": "Get the absolute name of ``self``.\n\nReturns:\nstr: the absolute name.", "source": "codesearchnet"}
{"code": "def get_all_pattern_variables(self, patternnumber):\n        \n        _checkPatternNumber(patternnumber)\n        \n        outputstring = ''\n        for stepnumber in range(8):\n            outputstring += 'SP{0}: {1}  Time{0}: {2}\\n'.format(stepnumber, \\\n                self.get_pattern_step_setpoint( patternnumber, stepnumber), \\\n                self.get_pattern_step_time(     patternnumber, stepnumber)   )\n        \n        outputstring += 'Actual step:        {0}\\n'.format(self.get_pattern_actual_step(        patternnumber) )\n        outputstring += 'Additional cycles:  {0}\\n'.format(self.get_pattern_additional_cycles(  patternnumber) )\n        outputstring += 'Linked pattern:     {0}\\n'.format(self.get_pattern_link_topattern(     patternnumber) ) \n            \n        return outputstring", "docstring": "Get all variables for a given pattern at one time.\n\nArgs:\npatternnumber (integer): 0-7\n\nReturns:\nA descriptive multiline string.", "source": "juraj-google-style"}
{"code": "def _step(time, output_ta_t, prev_output, *states):\n    current_input = tuple((ta.read(time) for ta in input_ta))\n    current_input = tree.pack_sequence_as(inputs, current_input)\n    mask_t = masking_fn(time)\n    output, new_states = step_function(current_input, tuple(states) + tuple(constants))\n    flat_output = tree.flatten(output)\n    flat_mask_output = flat_zero_output if zero_output_for_mask else tree.flatten(prev_output)\n    flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output)\n    flat_state = tree.flatten(states)\n    flat_new_state = tree.flatten(new_states)\n    flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state)\n    new_states = tree.pack_sequence_as(new_states, flat_final_state)\n    ta_index_to_write = time if return_all_outputs else 0\n    output_ta_t = tuple((ta.write(ta_index_to_write, out) for ta, out in zip(output_ta_t, flat_new_output)))\n    return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(new_states)", "docstring": "RNN step function.\n\nArgs:\ntime: Current timestep value.\noutput_ta_t: TensorArray.\nprev_output: tuple of outputs from time - 1.\n*states: List of states.\n\nReturns:\nTuple: `(time + 1, output_ta_t, output) + tuple(new_states)`", "source": "github-repos"}
{"code": "def exit_code(self, code):\n        \n        if code is not None and code in [0, 1, 3]:\n            self._exit_code = code\n        else:\n            self.log.warning(u'Invalid exit code')", "docstring": "Set the App exit code.\n\nFor TC Exchange Apps there are 3 supported exit codes.\n* 0 indicates a normal exit\n* 1 indicates a failure during execution\n* 3 indicates a partial failure\n\nArgs:\ncode (integer): The exit code value for the app.", "source": "juraj-google-style"}
{"code": "def function_from_graph_def(graph_def, inputs, outputs, captures=None):\n\n    def _imports_graph_def():\n        importer.import_graph_def(graph_def, name='')\n        graph = ops.get_default_graph()\n        if captures is not None:\n            for c in captures:\n                graph.add_capture(captures[c], graph.get_tensor_by_name(str(c) + ':0'))\n    wrapped_import = wrap_function(_imports_graph_def, [])\n    import_graph = wrapped_import.graph\n    return wrapped_import.prune(nest.map_structure(import_graph.as_graph_element, inputs), nest.map_structure(import_graph.as_graph_element, outputs))", "docstring": "Creates a ConcreteFunction from a GraphDef.\n\nArgs:\ngraph_def: A GraphDef to make a function out of.\ninputs: A Tensor name or nested structure of names in `graph_def` which\nshould be inputs to the function.\noutputs: A Tensor name or nested structure of names in `graph_def` which\nshould be outputs of the function.\ncaptures: (Optional) A dictionary mapping node names in `graph_def` that\nshould be captured as inputs to tensors containing the value of the\ncaptured inputs.\n\nReturns:\nA ConcreteFunction.", "source": "github-repos"}
{"code": "def init_logger(name='', handler_path_levels=None, level=logging.INFO, formatter=None, formatter_str=None, datefmt='%Y-%m-%d %H:%M:%S'):\n    levels = {'NOTSET': logging.NOTSET, 'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL}\n    if (not formatter):\n        if formatter_str:\n            formatter_str = formatter_str\n        else:\n            formatter_str = '%(asctime)s %(levelname)-5s [%(name)s] %(filename)s(%(lineno)s): %(message)s'\n        formatter = logging.Formatter(formatter_str, datefmt=datefmt)\n    logger = (name if isinstance(name, logging.Logger) else logging.getLogger(str(name)))\n    logger.setLevel(level)\n    handler_path_levels = (handler_path_levels or [['', 'INFO']])\n    for each_handler in handler_path_levels:\n        (path, handler_level) = each_handler\n        handler = (logging.FileHandler(path) if path else logging.StreamHandler())\n        handler.setLevel((levels.get(handler_level.upper(), 1) if isinstance(handler_level, str) else handler_level))\n        handler.setFormatter(formatter)\n        logger.addHandler(handler)\n    return logger", "docstring": "Add a default handler for logger.\n\nArgs:\n\nname = '' or logger obj.\n\nhandler_path_levels = [['loggerfile.log',13],['','DEBUG'],['','info'],['','notSet']] # [[path,level]]\n\nlevel = the least level for the logger.\n\nformatter = logging.Formatter(\n'%(levelname)-7s %(asctime)s %(name)s (%(filename)s: %(lineno)s): %(message)s',\n\"%Y-%m-%d %H:%M:%S\")\n\nformatter_str = '%(levelname)-7s %(asctime)s  %(name)s (%(funcName)s: %(lineno)s): %(message)s'\n\ncustom formatter:\n%(asctime)s  %(created)f  %(filename)s  %(funcName)s  %(levelname)s  %(levelno)s  %(lineno)s   %(message)s   %(module)s    %(name)s   %(pathname)s   %(process)s   %(relativeCreated)s   %(thread)s  %(threadName)s", "source": "codesearchnet"}
{"code": "def from_config(config_file, use_admin=False):\n    with open(config_file) as f:\n        d = json.load(f)\n        user = (d['admin_user'] if use_admin else d['readonly_user'])\n        password = (d['admin_password'] if use_admin else d['readonly_password'])\n        return QueryEngine(host=d['host'], port=d['port'], database=d['database'], user=user, password=password, collection=d['collection'], aliases_config=d.get('aliases_config', None))", "docstring": "Initialize a QueryEngine from a JSON config file generated using mgdb\ninit.\n\nArgs:\nconfig_file:\nFilename of config file.\nuse_admin:\nIf True, the admin user and password in the config file is\nused. Otherwise, the readonly_user and password is used.\nDefaults to False.\n\nReturns:\nQueryEngine", "source": "codesearchnet"}
{"code": "def split(self, n):\n    new_range_filters = []\n    name = self.start[0]\n    prop_cls = self.prop.__class__\n    if (prop_cls in _DISCRETE_PROPERTY_SPLIT_FUNCTIONS):\n        splitpoints = _DISCRETE_PROPERTY_SPLIT_FUNCTIONS[prop_cls](self.start[2], self.end[2], n, (self.start[1] == '>='), (self.end[1] == '<='))\n        start_filter = (name, '>=', splitpoints[0])\n        for p in splitpoints[1:]:\n            end_filter = (name, '<', p)\n            new_range_filters.append([start_filter, end_filter])\n            start_filter = (name, '>=', p)\n    else:\n        splitpoints = _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS[prop_cls](self.start[2], self.end[2], n)\n        start_filter = self.start\n        for p in splitpoints:\n            end_filter = (name, '<', p)\n            new_range_filters.append([start_filter, end_filter])\n            start_filter = (name, '>=', p)\n        new_range_filters.append([start_filter, self.end])\n    for f in new_range_filters:\n        f.extend(self._equality_filters)\n    return [self.__class__(f, self.model_class_path) for f in new_range_filters]", "docstring": "Evenly split this range into contiguous, non overlapping subranges.\n\nArgs:\nn: number of splits.\n\nReturns:\na list of contiguous, non overlapping sub PropertyRanges. Maybe less than\nn when not enough subranges.", "source": "codesearchnet"}
{"code": "def least_loaded_node(self):\n    nodes = [broker.nodeId for broker in self.cluster.brokers()]\n    random.shuffle(nodes)\n    inflight = float('inf')\n    found = None\n    for node_id in nodes:\n        conn = self._conns.get(node_id)\n        connected = ((conn is not None) and conn.connected())\n        blacked_out = ((conn is not None) and conn.blacked_out())\n        curr_inflight = (len(conn.in_flight_requests) if (conn is not None) else 0)\n        if (connected and (curr_inflight == 0)):\n            return node_id\n        elif ((not blacked_out) and (curr_inflight < inflight)):\n            inflight = curr_inflight\n            found = node_id\n    if (found is not None):\n        return found\n    return None", "docstring": "Choose the node with fewest outstanding requests, with fallbacks.\n\nThis method will prefer a node with an existing connection and no\nin-flight-requests. If no such node is found, a node will be chosen\nrandomly from disconnected nodes that are not \"blacked out\" (i.e.,\nare not subject to a reconnect backoff). If no node metadata has been\nobtained, will return a bootstrap node (subject to exponential backoff).\n\nReturns:\nnode_id or None if no suitable node was found", "source": "codesearchnet"}
{"code": "def recursive_create_dir(dirname):\n    recursive_create_dir_v2(dirname)", "docstring": "Creates a directory and all parent/intermediate directories.\n\nIt succeeds if dirname already exists and is writable.\n\nArgs:\ndirname: string, name of the directory to be created\n\nRaises:\nerrors.OpError: If the operation fails.", "source": "github-repos"}
{"code": "def populate(self, filename):\n    if os.path.isfile(filename):\n        fid_st = os.stat(filename)\n        self.name = os.path.abspath(filename)\n        self.full_name = filename\n        self.size = fid_st.st_size\n        self.last_modified = fid_st.st_mtime\n        self.last_accessed = fid_st.st_atime\n        self.last_info_changed = fid_st.st_ctime\n        self.location = os.path.dirname(filename)", "docstring": "Finds the file-stats and populates the class with stat values.\n\nArgs:\nfilename (str): name of the file.", "source": "codesearchnet"}
{"code": "def set(self, x: int, y: int, back_r: int, back_g: int, back_b: int, fore_r: int, fore_g: int, fore_b: int, char: str) -> None:\n    i = ((self.width * y) + x)\n    self.back_r[i] = back_r\n    self.back_g[i] = back_g\n    self.back_b[i] = back_b\n    self.fore_r[i] = fore_r\n    self.fore_g[i] = fore_g\n    self.fore_b[i] = fore_b\n    self.char[i] = ord(char)", "docstring": "Set the background color, foreground color and character of one cell.\n\nArgs:\nx (int): X position to change.\ny (int): Y position to change.\nback_r (int): Red background color, from 0 to 255.\nback_g (int): Green background color, from 0 to 255.\nback_b (int): Blue background color, from 0 to 255.\nfore_r (int): Red foreground color, from 0 to 255.\nfore_g (int): Green foreground color, from 0 to 255.\nfore_b (int): Blue foreground color, from 0 to 255.\nchar (AnyStr): A single character str or bytes object.", "source": "codesearchnet"}
{"code": "def compute_metrics(self, previous):\n        \n        delta_t = self.time_difference(previous)\n        delta_x = self.distance(previous)\n        vel = 0\n        delta_v = 0\n        acc = 0\n        if delta_t != 0:\n            vel = delta_x/delta_t\n            delta_v = vel - previous.vel\n            acc = delta_v/delta_t\n\n        self.dt = delta_t\n        self.dx = delta_x\n        self.acc = acc\n        self.vel = vel\n        return self", "docstring": "Computes the metrics of this point\n\nComputes and updates the dt, vel and acc attributes.\n\nArgs:\nprevious (:obj:`Point`): Point before\nReturns:\n:obj:`Point`: Self", "source": "juraj-google-style"}
{"code": "def _get_state_cache_size_bytes(options):\n    max_cache_memory_usage_mb = options.view_as(WorkerOptions).max_cache_memory_usage_mb\n    experiments = options.view_as(DebugOptions).experiments or []\n    for experiment in experiments:\n        if re.match('state_cache_size=', experiment):\n            _LOGGER.warning('--experiments=state_cache_size=X is deprecated and will be removed in future releases.Please use --max_cache_memory_usage_mb=X to set the cache size for user state API and side inputs.')\n            return int(re.match('state_cache_size=(?P<state_cache_size>.*)', experiment).group('state_cache_size')) << 20\n    return max_cache_memory_usage_mb << 20", "docstring": "Return the maximum size of state cache in bytes.\n\nReturns:\nan int indicating the maximum number of bytes to cache.", "source": "github-repos"}
{"code": "def set_memcache_policy(self, func):\n    \n    if func is None:\n      func = self.default_memcache_policy\n    elif isinstance(func, bool):\n      func = lambda unused_key, flag=func: flag\n    self._memcache_policy = func", "docstring": "Set the memcache policy function.\n\nArgs:\nfunc: A function that accepts a Key instance as argument and returns\na bool indicating if it should be cached.  May be None.", "source": "juraj-google-style"}
{"code": "def group_structures(self, s_list, anonymous=False):\n        \n        if self._subset:\n            raise ValueError(\"allow_subset cannot be used with\"\n                             \" group_structures\")\n\n        original_s_list = list(s_list)\n        s_list = self._process_species(s_list)\n\n        \n        if anonymous:\n            c_hash = lambda c: c.anonymized_formula\n        else:\n            c_hash = self._comparator.get_hash\n        s_hash = lambda s: c_hash(s[1].composition)\n        sorted_s_list = sorted(enumerate(s_list), key=s_hash)\n        all_groups = []\n\n        \n        for k, g in itertools.groupby(sorted_s_list, key=s_hash):\n            unmatched = list(g)\n            while len(unmatched) > 0:\n                i, refs = unmatched.pop(0)\n                matches = [i]\n                if anonymous:\n                    inds = filter(lambda i: self.fit_anonymous(refs,\n                            unmatched[i][1]), list(range(len(unmatched))))\n                else:\n                    inds = filter(lambda i: self.fit(refs, unmatched[i][1]),\n                                  list(range(len(unmatched))))\n                inds = list(inds)\n                matches.extend([unmatched[i][0] for i in inds])\n                unmatched = [unmatched[i] for i in range(len(unmatched))\n                             if i not in inds]\n                all_groups.append([original_s_list[i] for i in matches])\n\n        return all_groups", "docstring": "Given a list of structures, use fit to group\nthem by structural equality.\n\nArgs:\ns_list ([Structure]): List of structures to be grouped\nanonymous (bool): Wheher to use anonymous mode.\n\nReturns:\nA list of lists of matched structures\nAssumption: if s1 == s2 but s1 != s3, than s2 and s3 will be put\nin different groups without comparison.", "source": "juraj-google-style"}
{"code": "def _assert_not_running(self):\n    if self.is_alive:\n        raise Error(self._ad, 'Logcat thread is already running, cannot start another one.')", "docstring": "Asserts the logcat service is not running.\n\nRaises:\nError, if the logcat service is running.", "source": "github-repos"}
{"code": "def cmd_ssh_user(tar_aminame, inst_name):\n    if (tar_aminame == 'Unknown'):\n        tar_aminame = inst_name\n    userlu = {'ubunt': 'ubuntu', 'debia': 'admin', 'fedor': 'root', 'cento': 'centos', 'openb': 'root'}\n    usertemp = (['name'] + [value for (key, value) in list(userlu.items()) if (key in tar_aminame.lower())])\n    usertemp = dict(zip(usertemp[::2], usertemp[1::2]))\n    username = usertemp.get('name', 'ec2-user')\n    debg.dprint('loginuser Calculated: ', username)\n    return username", "docstring": "Calculate instance login-username based on image-name.\n\nArgs:\ntar_aminame (str): name of the image instance created with.\ninst_name (str): name of the instance.\nReturns:\nusername (str): name for ssh based on AMI-name.", "source": "codesearchnet"}
{"code": "def decrease_exponent_to(self, new_exp):\n    if (new_exp > self.exponent):\n        raise ValueError(('New exponent %i should be more negative than old exponent %i' % (new_exp, self.exponent)))\n    multiplied = (self * pow(EncodedNumber.BASE, (self.exponent - new_exp)))\n    multiplied.exponent = new_exp\n    return multiplied", "docstring": "Return an EncryptedNumber with same value but lower exponent.\n\nIf we multiply the encoded value by :attr:`EncodedNumber.BASE` and\ndecrement :attr:`exponent`, then the decoded value does not change.\nThus we can almost arbitrarily ratchet down the exponent of an\n`EncryptedNumber` - we only run into trouble when the encoded\ninteger overflows. There may not be a warning if this happens.\n\nWhen adding `EncryptedNumber` instances, their exponents must\nmatch.\n\nThis method is also useful for hiding information about the\nprecision of numbers - e.g. a protocol can fix the exponent of\nall transmitted `EncryptedNumber` instances to some lower bound(s).\n\nArgs:\nnew_exp (int): the desired exponent.\n\nReturns:\nEncryptedNumber: Instance with the same plaintext and desired\nexponent.\n\nRaises:\nValueError: You tried to increase the exponent.", "source": "codesearchnet"}
{"code": "def _evolve(self, state, qargs=None):\n        \n        state = self._format_state(state, density_matrix=True)\n        if qargs is None:\n            if state.shape[0] != self._input_dim:\n                raise QiskitError(\n                    \"QuantumChannel input dimension is not equal to state dimension.\"\n                )\n            shape_in = self._input_dim * self._input_dim\n            shape_out = (self._output_dim, self._output_dim)\n            \n            return np.reshape(\n                np.dot(self._data, np.reshape(state, shape_in, order='F')),\n                shape_out,\n                order='F')\n        \n        return self._evolve_subsystem(state, qargs)", "docstring": "Evolve a quantum state by the QuantumChannel.\n\nArgs:\nstate (QuantumState): The input statevector or density matrix.\nqargs (list): a list of QuantumState subsystem positions to apply\nthe operator on.\n\nReturns:\nDensityMatrix: the output quantum state as a density matrix.\n\nRaises:\nQiskitError: if the operator dimension does not match the\nspecified QuantumState subsystem dimensions.", "source": "juraj-google-style"}
{"code": "def deserialize_feature_column(config, custom_objects=None, columns_by_name=None):\n    if isinstance(config, six.string_types):\n        return config\n    module_feature_column_classes = {cls.__name__: cls for cls in _FEATURE_COLUMNS}\n    if columns_by_name is None:\n        columns_by_name = {}\n    cls, cls_config = _class_and_config_for_serialized_keras_object(config, module_objects=module_feature_column_classes, custom_objects=custom_objects, printable_module_name='feature_column_v2')\n    if not issubclass(cls, fc_types.FeatureColumn):\n        raise ValueError('Expected FeatureColumn class, instead found: {}'.format(cls))\n    new_instance = cls.from_config(cls_config, custom_objects=custom_objects, columns_by_name=columns_by_name)\n    return columns_by_name.setdefault(_column_name_with_class_name(new_instance), new_instance)", "docstring": "Deserializes a `config` generated with `serialize_feature_column`.\n\nThis method should only be used to deserialize parent FeatureColumns when\nimplementing FeatureColumn.from_config(), else deserialize_feature_columns()\nis preferable. Returns a FeatureColumn for this config.\n\nArgs:\nconfig: A Dict with the serialization of feature columns acquired by\n`serialize_feature_column`, or a string representing a raw column.\ncustom_objects: A Dict from custom_object name to the associated keras\nserializable objects (FeatureColumns, classes or functions).\ncolumns_by_name: A Dict[String, FeatureColumn] of existing columns in order\nto avoid duplication.\n\nRaises:\nValueError if `config` has invalid format (e.g: expected keys missing,\nor refers to unknown classes).\n\nReturns:\nA FeatureColumn corresponding to the input `config`.", "source": "github-repos"}
{"code": "def UsageText(component, trace=None, verbose=False):\n    if trace:\n        command = trace.GetCommand()\n        needs_separating_hyphen_hyphen = trace.NeedsSeparatingHyphenHyphen()\n    else:\n        command = None\n        needs_separating_hyphen_hyphen = False\n    if not command:\n        command = ''\n    continued_command = command\n    spec = inspectutils.GetFullArgSpec(component)\n    metadata = decorators.GetMetadata(component)\n    actions_grouped_by_kind = _GetActionsGroupedByKind(component, verbose=verbose)\n    possible_actions = _GetPossibleActions(actions_grouped_by_kind)\n    continuations = []\n    if possible_actions:\n        continuations.append(_GetPossibleActionsUsageString(possible_actions))\n    availability_lines = _UsageAvailabilityLines(actions_grouped_by_kind)\n    if callable(component):\n        callable_items = _GetCallableUsageItems(spec, metadata)\n        if callable_items:\n            continuations.append(' '.join(callable_items))\n        elif trace:\n            continuations.append(trace.separator)\n        availability_lines.extend(_GetCallableAvailabilityLines(spec))\n    if continuations:\n        continued_command += ' ' + ' | '.join(continuations)\n    help_command = command + (' -- ' if needs_separating_hyphen_hyphen else ' ') + '--help'\n    return f'Usage: {continued_command}\\n{''.join(availability_lines)}\\nFor detailed information on this command, run:\\n  {help_command}'", "docstring": "Returns usage text for the given component.\n\nArgs:\ncomponent: The component to determine the usage text for.\ntrace: The Fire trace object containing all metadata of current execution.\nverbose: Whether to display the usage text in verbose mode.\n\nReturns:\nString suitable for display in an error screen.", "source": "github-repos"}
{"code": "def from_string(cls, s, name=None, modules=None, active=None):\n    r = cls(name=name, modules=modules, active=active)\n    _parse_repp(s.splitlines(), r, None)\n    return r", "docstring": "Instantiate a REPP from a string.\n\nArgs:\nname (str, optional): the name of the REPP module\nmodules (dict, optional): a mapping from identifiers to\nREPP modules\nactive (iterable, optional): an iterable of default module\nactivations", "source": "codesearchnet"}
{"code": "def write_asc_file(filename, data, xsize, ysize, geotransform, nodata_value):\n        \n        UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(filename)))\n        header = 'NCOLS %d\\n' \\\n                 'NROWS %d\\n' \\\n                 'XLLCENTER %f\\n' \\\n                 'YLLCENTER %f\\n' \\\n                 'CELLSIZE %f\\n' \\\n                 'NODATA_VALUE %f' % (xsize, ysize, geotransform[0] + 0.5 * geotransform[1],\n                                      geotransform[3] - (ysize - 0.5) * geotransform[1],\n                                      geotransform[1], nodata_value)\n\n        with open(filename, 'w', encoding='utf-8') as f:\n            f.write(header)\n            for i in range(0, ysize):\n                for j in range(0, xsize):\n                    f.write('%s\\t' % repr(data[i][j]))\n                f.write('\\n')\n        f.close()", "docstring": "Output Raster to ASCII file.\n\nArgs:\nfilename: output ASCII filename.\ndata: 2D array data.\nxsize: Col count.\nysize: Row count.\ngeotransform: geographic transformation.\nnodata_value: nodata_flow value.", "source": "juraj-google-style"}
{"code": "def clip_and_copy_attack_outputs(self, attack_name, is_targeted):\n    if is_targeted:\n        self._targeted_attack_names.add(attack_name)\n    else:\n        self._attack_names.add(attack_name)\n    attack_dir = os.path.join((self.targeted_attacks_output_dir if is_targeted else self.attacks_output_dir), attack_name)\n    for fname in os.listdir(attack_dir):\n        if (not (fname.endswith('.png') or fname.endswith('.jpg'))):\n            continue\n        image_id = fname[:(- 4)]\n        if (image_id not in self.dataset_max_clip):\n            continue\n        image_max_clip = self.dataset_max_clip[image_id]\n        image_min_clip = self.dataset_min_clip[image_id]\n        adversarial_image = np.array(Image.open(os.path.join(attack_dir, fname)).convert('RGB'))\n        clipped_adv_image = np.clip(adversarial_image, image_min_clip, image_max_clip)\n        output_basename = '{0:08d}'.format(self._output_image_idx)\n        self._output_image_idx += 1\n        self._output_to_attack_mapping[output_basename] = (attack_name, is_targeted, image_id)\n        if is_targeted:\n            self._targeted_attack_image_count += 1\n        else:\n            self._attack_image_count += 1\n        Image.fromarray(clipped_adv_image).save(os.path.join(self.all_adv_examples_dir, (output_basename + '.png')))", "docstring": "Clips results of attack and copy it to directory with all images.\n\nArgs:\nattack_name: name of the attack.\nis_targeted: if True then attack is targeted, otherwise non-targeted.", "source": "codesearchnet"}
{"code": "def get_servo_temperature(self):\n    data = []\n    data.append(9)\n    data.append(self.servoid)\n    data.append(RAM_READ_REQ)\n    data.append(TEMPERATURE_RAM)\n    data.append(BYTE2)\n    send_data(data)\n    rxdata = []\n    try:\n        rxdata = SERPORT.read(13)\n        return ord(rxdata[9])\n    except HerkulexError:\n        raise HerkulexError('Could not communicate with motors')", "docstring": "Gets the current temperature of Herkulex\n\nArgs:\nnone\n\nReturns:\nint: the current temperature register of Herkulex\n\nRaises:\nSerialException: Error occured while opening serial port", "source": "codesearchnet"}
{"code": "def max_validator(max_value):\n\n    def validator(value):\n        if (value > max_value):\n            raise ValidationError('{} is not <= {}'.format(value, max_value))\n    return validator", "docstring": "Return validator function that ensures upper bound of a number.\n\nResult validation function will validate the internal value of resource\ninstance field with the ``value >= min_value`` check.\n\nArgs:\nmax_value: maximum value for new validator", "source": "codesearchnet"}
{"code": "def _GetAttributeScripts(self, attribute_data, dest_dir):\n    script_dict = {}\n    attribute_data = (attribute_data or {})\n    metadata_key = ('%s-script' % self.script_type)\n    metadata_value = attribute_data.get(metadata_key)\n    if metadata_value:\n        self.logger.info('Found %s in metadata.', metadata_key)\n        with tempfile.NamedTemporaryFile(mode='w', dir=dest_dir, delete=False) as dest:\n            dest.write(metadata_value.lstrip())\n            script_dict[metadata_key] = dest.name\n    metadata_key = ('%s-script-url' % self.script_type)\n    metadata_value = attribute_data.get(metadata_key)\n    if metadata_value:\n        self.logger.info('Found %s in metadata.', metadata_key)\n        script_dict[metadata_key] = self._DownloadScript(metadata_value, dest_dir)\n    return script_dict", "docstring": "Retrieve the scripts from attribute metadata.\n\nArgs:\nattribute_data: dict, the contents of the attributes metadata.\ndest_dir: string, the path to a directory for storing metadata scripts.\n\nReturns:\ndict, a dictionary mapping metadata keys to files storing scripts.", "source": "codesearchnet"}
{"code": "def to(self, new_unit):\n    return FloatWithUnit((self * self.unit.get_conversion_factor(new_unit)), unit_type=self._unit_type, unit=new_unit)", "docstring": "Conversion to a new_unit. Right now, only supports 1 to 1 mapping of\nunits of each type.\n\nArgs:\nnew_unit: New unit type.\n\nReturns:\nA FloatWithUnit object in the new units.\n\nExample usage:\n>>> e = Energy(1.1, \"eV\")\n>>> e = Energy(1.1, \"Ha\")\n>>> e.to(\"eV\")\n29.932522246 eV", "source": "codesearchnet"}
{"code": "def get_sparsity_modes(model_object):\n    if not model_object or not model_object.metadata:\n        return []\n    result = set()\n    for subgraph in model_object.subgraphs:\n        for tensor in subgraph.tensors:\n            if not tensor.sparsity:\n                continue\n            if tensor.sparsity.blockMap.size == 0 or not tensor.sparsity.blockMap:\n                result.add(conversion_metadata_fb.ModelOptimizationMode.RANDOM_SPARSITY)\n            else:\n                result.add(conversion_metadata_fb.ModelOptimizationMode.BLOCK_SPARSITY)\n    return list(result)", "docstring": "Get sparsity modes used in a tflite model.\n\nThe sparsity modes are listed in conversion_metadata.fbs file.\n\nArgs:\nmodel_object: A tflite model in object form.\n\nReturns:\nThe list of sparsity modes used in the model.", "source": "github-repos"}
{"code": "def write(self, x: int, y: int, text: str, transposed_text: 'Optional[str]'=None):\n    entry = self.entries.get((x, y), _DiagramText('', ''))\n    self.entries[(x, y)] = _DiagramText((entry.text + text), (entry.transposed_text + (transposed_text if transposed_text else text)))", "docstring": "Adds text to the given location.\n\nArgs:\nx: The column in which to write the text.\ny: The row in which to write the text.\ntext: The text to write at location (x, y).\ntransposed_text: Optional text to write instead, if the text\ndiagram is transposed.", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    win_registry_reader = FileObjectWinRegistryFileReader()\n    try:\n        registry_file = win_registry_reader.Open(file_object)\n    except IOError as exception:\n        parser_mediator.ProduceExtractionWarning('unable to open Windows Registry file with error: {0!s}'.format(exception))\n        return\n    win_registry = dfwinreg_registry.WinRegistry()\n    key_path_prefix = win_registry.GetRegistryFileMapping(registry_file)\n    registry_file.SetKeyPathPrefix(key_path_prefix)\n    root_key = registry_file.GetRootKey()\n    if (not root_key):\n        return\n    registry_find_specs = getattr(parser_mediator.artifacts_filter_helper, 'registry_find_specs', None)\n    if (not registry_find_specs):\n        try:\n            self._ParseRecurseKeys(parser_mediator, root_key)\n        except IOError as exception:\n            parser_mediator.ProduceExtractionWarning('{0!s}'.format(exception))\n    else:\n        artifacts_filter_helper = artifact_filters.ArtifactDefinitionsFilterHelper\n        if (not artifacts_filter_helper.CheckKeyCompatibility(key_path_prefix)):\n            logger.warning('Artifacts filters are not supported for Windows Registry file with key path prefix: \"{0:s}\".'.format(key_path_prefix))\n        else:\n            try:\n                win_registry.MapFile(key_path_prefix, registry_file)\n                self._ParseKeysFromFindSpecs(parser_mediator, win_registry, registry_find_specs)\n            except IOError as exception:\n                parser_mediator.ProduceExtractionWarning('{0!s}'.format(exception))", "docstring": "Parses a Windows Registry file-like object.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nfile_object (dfvfs.FileIO): a file-like object.", "source": "codesearchnet"}
{"code": "def copy_image_on_background(image, color=WHITE):\n    \n    background = Image.new(\"RGB\", image.size, color)\n    background.paste(image, mask=image.split()[3])\n    return background", "docstring": "Create a new image by copying the image on a *color* background.\n\nArgs:\nimage (PIL.Image.Image): Image to copy\ncolor (tuple): Background color usually WHITE or BLACK\n\nReturns:\nPIL.Image.Image", "source": "juraj-google-style"}
{"code": "def _fill_table_entry(self, row, col):\n        \n        self.observation_table[row, col] = self._membership_query(row + col)", "docstring": "Fill an entry of the observation table.\nArgs:\nrow (str): The row of the observation table\ncol (str): The column of the observation table\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _read_csv_from_file_pandas_on_ray(cls, filepath, kwargs={}):\n        \n        names = kwargs.get(\"names\", None)\n        index_col = kwargs.get(\"index_col\", None)\n        if names is None:\n            \n            \n            \n            \n            kwargs[\"index_col\"] = None\n            names = pandas.read_csv(\n                file_open(filepath, \"rb\"), **dict(kwargs, nrows=0, skipfooter=0)\n            ).columns\n            kwargs[\"index_col\"] = index_col\n\n        empty_pd_df = pandas.read_csv(\n            file_open(filepath, \"rb\"), **dict(kwargs, nrows=0, skipfooter=0)\n        )\n        column_names = empty_pd_df.columns\n        skipfooter = kwargs.get(\"skipfooter\", None)\n        skiprows = kwargs.pop(\"skiprows\", None)\n        parse_dates = kwargs.pop(\"parse_dates\", False)\n        partition_kwargs = dict(\n            kwargs,\n            header=None,\n            names=names,\n            skipfooter=0,\n            skiprows=None,\n            parse_dates=parse_dates,\n        )\n        with file_open(filepath, \"rb\") as f:\n            \n            prefix = b\"\"\n            if kwargs.get(\"encoding\", None) is not None:\n                prefix = f.readline()\n                partition_kwargs[\"skiprows\"] = 1\n                f.seek(0, os.SEEK_SET)  \n\n            prefix_id = ray.put(prefix)\n            partition_kwargs_id = ray.put(partition_kwargs)\n            \n            \n            kwargs[\"skiprows\"] = skiprows\n            cls._skip_header(f, kwargs)\n            \n            partition_ids = []\n            index_ids = []\n            total_bytes = file_size(f)\n            \n            num_parts = cls.frame_mgr_cls._compute_num_partitions()\n            \n            num_splits = min(len(column_names), num_parts)\n            \n            chunk_size = max(1, (total_bytes - f.tell()) \n\n            while f.tell() < total_bytes:\n                start = f.tell()\n                f.seek(chunk_size, os.SEEK_CUR)\n                f.readline()  \n                partition_id = cls.read_csv_remote_task._remote(\n                    args=(\n                        filepath,\n                        num_splits,\n                        start,\n                        f.tell(),\n                        partition_kwargs_id,\n                        prefix_id,\n                    ),\n                    num_return_vals=num_splits + 1,\n                )\n                partition_ids.append(\n                    [cls.frame_partition_cls(obj) for obj in partition_id[:-1]]\n                )\n                index_ids.append(partition_id[-1])\n\n        if index_col is None:\n            new_index = pandas.RangeIndex(sum(ray.get(index_ids)))\n        else:\n            new_index_ids = get_index.remote([empty_pd_df.index.name], *index_ids)\n            new_index = ray.get(new_index_ids)\n\n        \n        \n        \n        \n        if parse_dates is not None:\n            \n            if isinstance(parse_dates, list) and isinstance(parse_dates[0], list):\n                for group in parse_dates:\n                    new_col_name = \"_\".join(group)\n                    column_names = column_names.drop(group).insert(0, new_col_name)\n            \n            elif isinstance(parse_dates, dict):\n                for new_col_name, group in parse_dates.items():\n                    column_names = column_names.drop(group).insert(0, new_col_name)\n\n        new_query_compiler = cls.query_compiler_cls(\n            cls.frame_mgr_cls(np.array(partition_ids)), new_index, column_names\n        )\n\n        if skipfooter:\n            new_query_compiler = new_query_compiler.drop(\n                new_query_compiler.index[-skipfooter:]\n            )\n        if kwargs.get(\"squeeze\", False) and len(new_query_compiler.columns) == 1:\n            return new_query_compiler[new_query_compiler.columns[0]]\n        return new_query_compiler", "docstring": "Constructs a DataFrame from a CSV file.\n\nArgs:\nfilepath (str): path to the CSV file.\nnpartitions (int): number of partitions for the DataFrame.\nkwargs (dict): args excluding filepath provided to read_csv.\n\nReturns:\nDataFrame or Series constructed from CSV file.", "source": "juraj-google-style"}
{"code": "def resize_image(buf, width, height, num_channels, new_width, new_height):\n    \n    new_size = new_width * new_height * num_channels\n    input_pixels = ffi.from_buffer(buf)\n    output_pixels = ffi.new('unsigned char[]', new_size)\n\n    result = lib.stbir_resize_uint8(\n        ffi.cast('unsigned char*', input_pixels), width, height, 0,\n        output_pixels, new_width, new_height, 0, num_channels\n    )\n\n    if not result:\n        raise ResizeError()\n\n    return ffi.buffer(output_pixels, new_size)", "docstring": "Resize an image\n\nArgs:\nbuf (Buffer): Buffer coming from `load_image`\nwidth (int): Width of `buf`\nheight (int): Height of `buf`\nnum_channels (int): Number of channels in `buf` (RGBA=4)\nnew_width (int): Desired width\nnew_height (int): Desired height\n\nReturns:\nBuffer: Resized image\n\nRaises:\nResizeError: If an error occurs during resize", "source": "juraj-google-style"}
{"code": "def animate_cli(animation_, step, event):\n    \n    while True:  \n        time.sleep(step)\n        frame = next(animation_)\n        sys.stdout.write(frame)\n        sys.stdout.flush()\n        if event.is_set():\n            break\n    sys.stdout.write(animation_.get_erase_frame())\n    sys.stdout.flush()\n    animation_.reset()", "docstring": "Print out the animation cycle to stdout. This function is for use with\nsynchronous functions and must be run in a thread.\n\nArgs:\nanimation_ (generator): A generator that produces strings for the\nanimation. Should be endless.\nstep (float): Seconds between each animation frame.", "source": "juraj-google-style"}
{"code": "def get_template(self, template_id):\n        \n        request = self._get_request()\n        return request.get(self.TEMPLATE_GET_URL + template_id)", "docstring": "Gets a Template which includes a list of Accounts that can access it\n\nArgs:\n\ntemplate_id (str): The id of the template to retrieve\n\nReturns:\nA Template object", "source": "juraj-google-style"}
{"code": "def new_reviewer(self, name, anomalous=None):\n    n = self._reviewer_cls(self, name=name, credibility=self.credibility, anomalous=anomalous)\n    self.graph.add_node(n)\n    self.reviewers.append(n)\n    return n", "docstring": "Create a new reviewer.\n\nArgs:\nname: name of the new reviewer.\nanomalous: initial anomalous score. (default: None)\n\nReturns:\nA new reviewer instance.", "source": "codesearchnet"}
{"code": "def send_rpc_request(self, request):", "docstring": "Sends the JSON RPC request to the server and gets a response.\n\nNote that the request and response are both in string format. So if the\nconnection with server provides interfaces in bytes format, please\ntransform them to string in the implementation of this function.\n\nArgs:\nrequest: str, a string of the RPC request.\n\nReturns:\nA string of the RPC response.\n\nRaises:\nerrors.ProtocolError: something went wrong when exchanging data with the\nserver.", "source": "github-repos"}
{"code": "def assemble_buffer(self, buf_header, buf_payload):\n    if (self.header.get('num_buffers', 0) <= len(self._buffers)):\n        raise ProtocolError(('too many buffers received expecting ' + str(self.header['num_buffers'])))\n    self._buffers.append((buf_header, buf_payload))", "docstring": "Add a buffer header and payload that we read from the socket.\n\nThis differs from add_buffer() because we're validating vs.\nthe header's num_buffers, instead of filling in the header.\n\nArgs:\nbuf_header (``JSON``) : a buffer header\nbuf_payload (``JSON`` or bytes) : a buffer payload\n\nReturns:\nNone\n\nRaises:\nProtocolError", "source": "codesearchnet"}
{"code": "def upload_file(self, url, file, callback=None, extra_headers={}):\n        \n        extra_headers = extra_headers.copy()\n        response = None\n        if os.stat(file.name).st_size == 0:\n            raise CommError(\"%s is an empty file\" % file.name)\n        try:\n            progress = Progress(file, callback=callback)\n            response = requests.put(\n                url, data=progress, headers=extra_headers)\n            response.raise_for_status()\n        except requests.exceptions.RequestException as e:\n            total = progress.len\n            status = self._status_request(url, total)\n            \n            \n            if status.status_code in (308, 408, 500, 502, 503, 504):\n                util.sentry_reraise(retry.TransientException(exc=e))\n            else:\n                util.sentry_reraise(e)\n\n        return response", "docstring": "Uploads a file to W&B with failure resumption\n\nArgs:\nurl (str): The url to download\nfile (str): The path to the file you want to upload\ncallback (:obj:`func`, optional): A callback which is passed the number of\nbytes uploaded since the last time it was called, used to report progress\n\nReturns:\nThe requests library response object", "source": "juraj-google-style"}
{"code": "def get_padding(x, padding_value=0):\n    with tf.name_scope('padding'):\n        return tf.to_float(tf.equal(x, padding_value))", "docstring": "Return float tensor representing the padding values in x.\n\nArgs:\nx: int tensor with any shape\npadding_value: int value that\n\nReturns:\nflaot tensor with same shape as x containing values 0 or 1.\n0 -> non-padding, 1 -> padding", "source": "codesearchnet"}
{"code": "def _get_tensors(graph, signature_def_tensor_names=None, user_tensor_names=None):\n    tensors = []\n    if user_tensor_names:\n        user_tensor_names = sorted(user_tensor_names)\n        tensors = util.get_tensors_from_tensor_names(graph, user_tensor_names)\n    elif signature_def_tensor_names:\n        tensors = [graph.get_tensor_by_name(name) for name in sorted(signature_def_tensor_names)]\n    else:\n        raise ValueError('Specify either signature_def_tensor_names or user_tensor_names')\n    return tensors", "docstring": "Gets the tensors associated with the tensor names.\n\nEither signature_def_tensor_names or user_tensor_names should be provided. If\nthe user provides tensors, the tensors associated with the user provided\ntensor names are provided. Otherwise, the tensors associated with the names in\nthe SignatureDef are provided.\n\nArgs:\ngraph: GraphDef representing graph.\nsignature_def_tensor_names: Tensor names stored in either the inputs or\noutputs of a SignatureDef. (default None)\nuser_tensor_names: Tensor names provided by the user. (default None)\n\nReturns:\nList of tensors.\n\nRaises:\nValueError:\nsignature_def_tensors and user_tensor_names are undefined or empty.\nuser_tensor_names are not valid.", "source": "github-repos"}
{"code": "def generate_nearest_neighbour_lookup_table( self ):\n        \n        self.jump_probability = {}\n        for site_label_1 in self.connected_site_pairs:\n            self.jump_probability[ site_label_1 ] = {}\n            for site_label_2 in self.connected_site_pairs[ site_label_1 ]:\n                self.jump_probability[ site_label_1 ][ site_label_2 ] = {}\n                for coordination_1 in range( self.max_coordination_per_site[ site_label_1 ] ):\n                    self.jump_probability[ site_label_1 ][ site_label_2 ][ coordination_1 ] = {}\n                    for coordination_2 in range( 1, self.max_coordination_per_site[ site_label_2 ] + 1 ):\n                        self.jump_probability[ site_label_1 ][ site_label_2 ][ coordination_1 ][ coordination_2 ] = self.relative_probability( site_label_1, site_label_2, coordination_1, coordination_2 )", "docstring": "Construct a look-up table of relative jump probabilities for a nearest-neighbour interaction Hamiltonian.\n\nArgs:\nNone.\n\nReturns:\nNone.", "source": "juraj-google-style"}
{"code": "def sample_static_prior(self, samples, batch_size, fixed=False):\n    dist = self.static_prior()\n    if fixed:\n        sample = (dist.sample((samples, 1)) + tf.zeros([batch_size, 1]))\n    else:\n        sample = dist.sample((samples, batch_size))\n    return (sample, dist)", "docstring": "Sample the static latent prior.\n\nArgs:\nsamples: Number of samples to draw from the latent distribution.\nbatch_size: Number of sequences to sample.\nfixed: Boolean for whether or not to share the same random\nsample across all sequences.\n\nReturns:\nA tuple of a sample tensor of shape [samples, batch_size,\nlatent_size], and a MultivariateNormalDiag distribution from which\nthe tensor was sampled, with event shape [latent_size], and batch\nshape [].", "source": "codesearchnet"}
{"code": "def read(self, size=(- 1)):\n    self._check_open()\n    if (not self._remaining()):\n        return ''\n    data_list = []\n    while True:\n        remaining = self._buffer.remaining()\n        if ((size >= 0) and (size < remaining)):\n            data_list.append(self._buffer.read(size))\n            self._offset += size\n            break\n        else:\n            size -= remaining\n            self._offset += remaining\n            data_list.append(self._buffer.read())\n            if (self._buffer_future is None):\n                if ((size < 0) or (size >= self._remaining())):\n                    needs = self._remaining()\n                else:\n                    needs = size\n                data_list.extend(self._get_segments(self._offset, needs))\n                self._offset += needs\n                break\n            if self._buffer_future:\n                self._buffer.reset(self._buffer_future.get_result())\n                self._buffer_future = None\n    if (self._buffer_future is None):\n        self._request_next_buffer()\n    return ''.join(data_list)", "docstring": "Read data from RAW file.\n\nArgs:\nsize: Number of bytes to read as integer. Actual number of bytes\nread is always equal to size unless EOF is reached. If size is\nnegative or unspecified, read the entire file.\n\nReturns:\ndata read as str.\n\nRaises:\nIOError: When this buffer is closed.", "source": "codesearchnet"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    if not self.add_bos_token:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False)\n    if token_ids_1 is None:\n        return [1] + [0] * len(token_ids_0)\n    return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1)", "docstring": "Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def disease_terms(self, hgnc_id=None):\n        \n        query = {}\n        if hgnc_id:\n            LOG.debug(\"Fetching all diseases for gene %s\", hgnc_id)\n            query['genes'] = hgnc_id\n        else:\n            LOG.info(\"Fetching all disease terms\")\n\n        return list(self.disease_term_collection.find(query))", "docstring": "Return all disease terms that overlaps a gene\n\nIf no gene, return all disease terms\n\nArgs:\nhgnc_id(int)\n\nReturns:\niterable(dict): A list with all disease terms that match", "source": "juraj-google-style"}
{"code": "def list_address(self, id=None, endpoint=None):\n        \n        return self._call_endpoint(LIST_ADDRESS, id=id, endpoint=endpoint)", "docstring": "Lists all the addresses in the current wallet.\nArgs:\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"}
{"code": "def build_backward_pass_step(get_transition_matrix_for_timestep):\n  \n\n  def backward_pass_step(state,\n                         filtered_parameters):\n    \n\n    (filtered_mean, filtered_cov,\n     predicted_mean, predicted_cov) = filtered_parameters\n    transition_matrix = get_transition_matrix_for_timestep(state.timestep)\n\n    next_posterior_mean = state.backward_mean\n    next_posterior_cov = state.backward_cov\n\n    posterior_mean, posterior_cov = backward_smoothing_update(\n        filtered_mean,\n        filtered_cov,\n        predicted_mean,\n        predicted_cov,\n        next_posterior_mean,\n        next_posterior_cov,\n        transition_matrix)\n\n    return BackwardPassState(backward_mean=posterior_mean,\n                             backward_cov=posterior_cov,\n                             timestep=state.timestep-1)\n\n  return backward_pass_step", "docstring": "Build a callable that perform one step for backward smoothing.\n\nArgs:\nget_transition_matrix_for_timestep: callable taking a timestep\nas an integer `Tensor` argument, and returning a `LinearOperator`\nof shape `[latent_size, latent_size]`.\n\nReturns:\nbackward_pass_step: a callable that updates a BackwardPassState\nfrom timestep `t` to `t-1`.", "source": "juraj-google-style"}
{"code": "def _validate(self):\n    for key in self:\n        if (key not in DEFAULTS):\n            raise exceptions.ConfigurationException('Unknown configuration key \"{}\"! Valid configuration keys are {}'.format(key, list(DEFAULTS.keys())))\n    validate_queues(self['queues'])\n    validate_bindings(self['bindings'])\n    validate_client_properties(self['client_properties'])", "docstring": "Perform checks on the configuration to assert its validity\n\nRaises:\nConfigurationException: If the configuration is invalid.", "source": "codesearchnet"}
{"code": "def remove_empty_keys(values, remove=({}, None, [], 'null')):\n    \n    if isinstance(values, dict):\n        return {key: remove_empty_keys(value, remove=remove)\n                for key, value in deepcopy(values).items() if value not in remove}\n    if isinstance(values, list):\n        return [remove_empty_keys(value, remove=remove)\n                for value in deepcopy(values) if value not in remove]\n\n    return values", "docstring": "Recursively remove key/value pairs where the value is in ``remove``.\n\nThis is targeted at comparing json-e rebuilt task definitions, since\njson-e drops key/value pairs with empty values.\n\nArgs:\nvalues (dict/list): the dict or list to remove empty keys from.\n\nReturns:\nvalues (dict/list): a dict or list copy, with empty keys removed.", "source": "juraj-google-style"}
{"code": "def aggregate(all_stats):\n    aggregate_stats = {'means': [], 'standard_deviations': []}\n    for optimizer_key in all_stats:\n        mean_stats = copy.deepcopy(all_stats[optimizer_key]['mean'])\n        mean_stats['name'] = optimizer_key\n        aggregate_stats['means'].append(mean_stats)\n        sd_stats = copy.deepcopy(all_stats[optimizer_key]['standard_deviation'])\n        sd_stats['name'] = optimizer_key\n        aggregate_stats['standard_deviations'].append(sd_stats)\n    _add_mean_sd_to_stats(aggregate_stats, 'means')\n    return aggregate_stats", "docstring": "Combine stats for multiple optimizers to obtain one mean and sd.\n\nUseful for combining stats for the same optimizer class and multiple problems.\n\nArgs:\nall_stats: dict; output from compare.", "source": "codesearchnet"}
{"code": "def isfunc(x):\n    \n    return any([\n        inspect.isfunction(x) and not asyncio.iscoroutinefunction(x),\n        inspect.ismethod(x) and not asyncio.iscoroutinefunction(x)\n    ])", "docstring": "Returns `True` if the given value is a function or method object.\n\nArguments:\nx (mixed): value to check.\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def JoinPath(self, path_segments):\n    \n    \n    \n\n    \n    path_segments = [\n        segment.split(self.PATH_SEPARATOR) for segment in path_segments]\n\n    \n    path_segments = [\n        element for sublist in path_segments for element in sublist]\n\n    \n    path_segments = list(filter(None, path_segments))\n\n    return '{0:s}{1:s}'.format(\n        self.PATH_SEPARATOR, self.PATH_SEPARATOR.join(path_segments))", "docstring": "Joins the path segments into a path.\n\nArgs:\npath_segments (list[str]): path segments.\n\nReturns:\nstr: joined path segments prefixed with the path separator.", "source": "juraj-google-style"}
{"code": "def __init__(self, string_table):\n    self._string_table = string_table\n    self._function_key_to_function = {}", "docstring": "Constructor.\n\nArgs:\nstring_table: A `StringTable` object.", "source": "github-repos"}
{"code": "def on_run_start(self, request):", "docstring": "Callback invoked on run() calls to the debug-wrapper session.\n\nThis is a blocking callback.\nThe invocation happens after the wrapper's run() call is entered,\nafter an increment of run call counter.\n\nArgs:\nrequest: (`OnRunStartRequest`) callback request object carrying\ninformation about the run call such as the fetches, feed dict, run\noptions, run metadata, and how many `run()` calls to this wrapper\nsession have occurred.\n\nReturns:\nAn instance of `OnRunStartResponse`, carrying information to\ndebug URLs used to watch the tensors.", "source": "github-repos"}
{"code": "def _create_topk_unique(inputs, k):\n  \n  height = inputs.shape[0]\n  width = inputs.shape[1]\n  neg_inf_r0 = tf.constant(-np.inf, dtype=tf.float32)\n  ones = tf.ones([height, width], dtype=tf.float32)\n  neg_inf_r2 = ones * neg_inf_r0\n  inputs = tf.where(tf.is_nan(inputs), neg_inf_r2, inputs)\n\n  \n  \n  \n  tmp = inputs\n  topk_r2 = tf.zeros([height, k], dtype=tf.float32)\n  for i in range(k):\n    kth_order_statistic = tf.reduce_max(tmp, axis=1, keepdims=True)\n    k_mask = tf.tile(tf.expand_dims(tf.equal(tf.range(k), tf.fill([k], i)), 0),\n                     [height, 1])\n    topk_r2 = tf.where(k_mask, tf.tile(kth_order_statistic, [1, k]), topk_r2)\n    ge_r2 = tf.greater_equal(inputs, tf.tile(kth_order_statistic, [1, width]))\n    tmp = tf.where(ge_r2, neg_inf_r2, inputs)\n\n  log2_ceiling = int(math.ceil(math.log(float(int(width)), 2)))\n  next_power_of_two = 1 << log2_ceiling\n  count_mask = next_power_of_two - 1\n  mask_r0 = tf.constant(count_mask)\n  mask_r2 = tf.fill([height, k], mask_r0)\n  topk_r2_s32 = tf.bitcast(topk_r2, tf.int32)\n  topk_indices_r2 = tf.bitwise.bitwise_and(topk_r2_s32, mask_r2)\n  return topk_r2, topk_indices_r2", "docstring": "Creates the top k values in sorted order with indices.\n\nArgs:\ninputs: A tensor with rank of 2. [batch_size, original_size].\nk: An integer, number of top elements to select.\n\nReturns:\ntopk_r2: A tensor, the k largest elements. [batch_size, k].\ntopk_indices_r2: A tensor, indices of the top k values. [batch_size, k].", "source": "juraj-google-style"}
{"code": "def GetArtifactDependencies(rdf_artifact, recursive=False, depth=1):\n  \n  deps = set()\n  for source in rdf_artifact.sources:\n    \n    \n    \n    if source.type in (rdf_artifacts.ArtifactSource.SourceType.ARTIFACT,\n                       rdf_artifacts.ArtifactSource.SourceType.ARTIFACT_GROUP):\n      if source.attributes.GetItem(\"names\"):\n        deps.update(source.attributes.GetItem(\"names\"))\n\n  if depth > 10:\n    raise RuntimeError(\"Max artifact recursion depth reached.\")\n\n  deps_set = set(deps)\n  if recursive:\n    for dep in deps:\n      artifact_obj = REGISTRY.GetArtifact(dep)\n      new_dep = GetArtifactDependencies(artifact_obj, True, depth=depth + 1)\n      if new_dep:\n        deps_set.update(new_dep)\n\n  return deps_set", "docstring": "Return a set of artifact dependencies.\n\nArgs:\nrdf_artifact: RDF object artifact.\nrecursive: If True recurse into dependencies to find their dependencies.\ndepth: Used for limiting recursion depth.\n\nReturns:\nA set of strings containing the dependent artifact names.\n\nRaises:\nRuntimeError: If maximum recursion depth reached.", "source": "juraj-google-style"}
{"code": "def print_live_output(self):\n    if self.block:\n        raise TypeError(NON_BLOCKING_ERROR_MESSAGE)\n    else:\n        while (self.thread.is_alive() or (self.old_output_size < len(self.output)) or (self.old_error_size < len(self.error))):\n            if ((self._stdout is not None) and (len(self.output) > self.old_output_size)):\n                while (self.old_output_size < len(self.output)):\n                    self.logger.info(self.output[self.old_output_size])\n                    self.old_output_size += 1\n            if ((self._stderr is not None) and (len(self.error) > self.old_error_size)):\n                while (self.old_error_size < len(self.error)):\n                    self.logger.error(self.error[self.old_error_size])\n                    self.old_error_size += 1", "docstring": "Block and print the output of the command\n\nRaises:\nTypeError: If command is blocking", "source": "codesearchnet"}
{"code": "def execute_phase(self, phase):\n    repeat_count = 1\n    repeat_limit = (phase.options.repeat_limit or sys.maxsize)\n    while (not self._stopping.is_set()):\n        is_last_repeat = (repeat_count >= repeat_limit)\n        phase_execution_outcome = self._execute_phase_once(phase, is_last_repeat)\n        if (phase_execution_outcome.is_repeat and (not is_last_repeat)):\n            repeat_count += 1\n            continue\n        return phase_execution_outcome\n    return PhaseExecutionOutcome(None)", "docstring": "Executes a phase or skips it, yielding PhaseExecutionOutcome instances.\n\nArgs:\nphase: Phase to execute.\n\nReturns:\nThe final PhaseExecutionOutcome that wraps the phase return value\n(or exception) of the final phase run. All intermediary results, if any,\nare REPEAT and handled internally. Returning REPEAT here means the phase\nhit its limit for repetitions.", "source": "codesearchnet"}
{"code": "def render_text(text, preformatted=False):\n    return IPython.core.display.HTML(_html.HtmlBuilder.render_text(text, preformatted))", "docstring": "Return text formatted as a HTML\n\nArgs:\ntext: the text to render\npreformatted: whether the text should be rendered as preformatted", "source": "codesearchnet"}
{"code": "def save_pretrained(self, save_directory: Union[str, os.PathLike], safe_serialization: bool=True, **kwargs):\n    use_auth_token = kwargs.pop('use_auth_token', None)\n    if use_auth_token is not None:\n        warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)\n        if kwargs.get('token', None) is not None:\n            raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')\n        kwargs['token'] = use_auth_token\n    if os.path.isfile(save_directory):\n        logger.error(f'Provided path ({save_directory}) should be a directory, not a file')\n        return\n    os.makedirs(save_directory, exist_ok=True)\n    if hasattr(self, '_registered_impl'):\n        pipeline_info = self._registered_impl.copy()\n        custom_pipelines = {}\n        for task, info in pipeline_info.items():\n            if info['impl'] != self.__class__:\n                continue\n            info = info.copy()\n            module_name = info['impl'].__module__\n            last_module = module_name.split('.')[-1]\n            info['impl'] = f'{last_module}.{info['impl'].__name__}'\n            info['pt'] = tuple((c.__name__ for c in info['pt']))\n            info['tf'] = tuple((c.__name__ for c in info['tf']))\n            custom_pipelines[task] = info\n        self.model.config.custom_pipelines = custom_pipelines\n        custom_object_save(self, save_directory)\n    kwargs['safe_serialization'] = safe_serialization\n    self.model.save_pretrained(save_directory, **kwargs)\n    if self.tokenizer is not None:\n        self.tokenizer.save_pretrained(save_directory, **kwargs)\n    if self.feature_extractor is not None:\n        self.feature_extractor.save_pretrained(save_directory, **kwargs)\n    if self.image_processor is not None:\n        self.image_processor.save_pretrained(save_directory, **kwargs)\n    if self.modelcard is not None:\n        self.modelcard.save_pretrained(save_directory)", "docstring": "Save the pipeline's model and tokenizer.\n\nArgs:\nsave_directory (`str` or `os.PathLike`):\nA path to the directory where to saved. It will be created if it doesn't exist.\nsafe_serialization (`str`):\nWhether to save the model using `safetensors` or the traditional way for PyTorch or Tensorflow.\nkwargs (`Dict[str, Any]`, *optional*):\nAdditional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.", "source": "github-repos"}
{"code": "def set_position_p(self, pvalue):\n    pvalue_msb = (int(pvalue) >> 8)\n    pvalue_lsb = (int(pvalue) & 255)\n    data = []\n    data.append(11)\n    data.append(self.servoid)\n    data.append(RAM_WRITE_REQ)\n    data.append(POSITION_KP_RAM)\n    data.append(BYTE2)\n    data.append(pvalue_lsb)\n    data.append(pvalue_msb)\n    send_data(data)", "docstring": "Set the P gain of the  position PID\n\nArgs:\n\npvalue (int): P value", "source": "codesearchnet"}
{"code": "def _forward(self):\n    try:\n        self.current_token = next(self.tokens)\n    except StopIteration:\n        raise MissingTokensError(('Unexpected end of token stream at %d.' % self.current_pos))\n    self.current_pos += 1", "docstring": "Advance to the next token.\n\nInternal methods, updates:\n- self.current_token\n- self.current_pos\n\nRaises:\nMissingTokensError: when trying to advance beyond the end of the\ntoken flow.", "source": "codesearchnet"}
{"code": "def build_image(registry, image):\n    if (':' in image['name']):\n        (_, tag) = image['name'].split(':', 1)\n    else:\n        (_, tag) = (image['name'], None)\n    values = {'registry': ('' if (registry is None) else (registry + '/')), 'image': image['name'], 'tag': tag}\n    if (tag is None):\n        args = ['-t {registry}{image}'.format(**values), '-t {registry}{image}:{version}'.format(version=versioning.current(), **values)]\n    else:\n        args = ['-t {registry}{image}'.format(**values)]\n    if ('file' in image):\n        args.append('-f {}'.format(conf.proj_path(image['file'])))\n    with conf.within_proj_dir(image.get('path', '.')):\n        log.info('Building <33>{registry}<35>/{image}', **values)\n        shell.run('docker build {args} .'.format(args=' '.join(args)))", "docstring": "Build docker image.\n\nArgs:\nregistry (str):\nThe name of the registry this image belongs to. If not given, the\nresulting image will have a name without the registry.\nimage (dict[str, Any]):\nThe dict containing the information about the built image. This is\nthe same dictionary as defined in DOCKER_IMAGES variable.", "source": "codesearchnet"}
{"code": "def start(self, container, *args, **kwargs):\n    if (args or kwargs):\n        raise errors.DeprecatedMethod('Providing configuration in the start() method is no longer supported. Use the host_config param in create_container instead.')\n    url = self._url('/containers/{0}/start', container)\n    res = self._post(url)\n    self._raise_for_status(res)", "docstring": "Start a container. Similar to the ``docker start`` command, but\ndoesn't support attach options.\n\n**Deprecation warning:** Passing configuration options in ``start`` is\nno longer supported. Users are expected to provide host config options\nin the ``host_config`` parameter of\n:py:meth:`~ContainerApiMixin.create_container`.\n\n\nArgs:\ncontainer (str): The container to start\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.\n:py:class:`docker.errors.DeprecatedMethod`\nIf any argument besides ``container`` are provided.\n\nExample:\n\n>>> container = cli.create_container(\n...     image='busybox:latest',\n...     command='/bin/sleep 30')\n>>> cli.start(container=container.get('Id'))", "source": "codesearchnet"}
{"code": "def get(self, name):\n        \n        return self.prepare_model(self.client.api.inspect_image(name))", "docstring": "Gets an image.\n\nArgs:\nname (str): The name of the image.\n\nReturns:\n(:py:class:`Image`): The image.\n\nRaises:\n:py:class:`docker.errors.ImageNotFound`\nIf the image does not exist.\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def __new__(mcs, cls, bases, dct):\n        \n        super_new = super(_Metaclass, mcs).__new__\n\n        \n        \n        parents = [b for b in bases if isinstance(b, _Metaclass)]\n        if not parents:\n            return super_new(mcs, cls, bases, dct)\n\n        new_attr = {}\n        _meta = dct.pop(\"Meta\", type(\"Meta\", (), {\"setting_prefix\": \"\"}))()\n        _meta.settings = {}\n\n        for name, setting in dct.items():\n            if isinstance(setting, Setting):\n                _meta.settings[name] = setting\n                \n                if setting.name == \"\":\n                    setting.name = name\n                \n                if setting.prefix == \"\":\n                    setting.prefix = _meta.setting_prefix\n            else:\n                new_attr[name] = setting\n        new_attr[\"_meta\"] = _meta\n        new_attr[\"settings\"] = _meta.settings\n\n        return super_new(mcs, cls, bases, new_attr)", "docstring": "New method.\n\nArgs:\ncls (str): class name.\nbases (tuple): base classes to inherit from.\ndct (dict): class attributes.\n\nReturns:\nclass: the new created class.", "source": "juraj-google-style"}
{"code": "def print_logs(redis_client, threads_stopped):\n    pubsub_client = redis_client.pubsub(ignore_subscribe_messages=True)\n    pubsub_client.subscribe(ray.gcs_utils.LOG_FILE_CHANNEL)\n    localhost = services.get_node_ip_address()\n    try:\n        num_consecutive_messages_received = 0\n        while True:\n            if threads_stopped.is_set():\n                return\n            msg = pubsub_client.get_message()\n            if (msg is None):\n                num_consecutive_messages_received = 0\n                threads_stopped.wait(timeout=0.01)\n                continue\n            num_consecutive_messages_received += 1\n            data = json.loads(ray.utils.decode(msg['data']))\n            if (data['ip'] == localhost):\n                for line in data['lines']:\n                    print('{}{}(pid={}){} {}'.format(colorama.Style.DIM, colorama.Fore.CYAN, data['pid'], colorama.Style.RESET_ALL, line))\n            else:\n                for line in data['lines']:\n                    print('{}{}(pid={}, ip={}){} {}'.format(colorama.Style.DIM, colorama.Fore.CYAN, data['pid'], data['ip'], colorama.Style.RESET_ALL, line))\n            if (((num_consecutive_messages_received % 100) == 0) and (num_consecutive_messages_received > 0)):\n                logger.warning(\"The driver may not be able to keep up with the stdout/stderr of the workers. To avoid forwarding logs to the driver, use 'ray.init(log_to_driver=False)'.\")\n    finally:\n        pubsub_client.close()", "docstring": "Prints log messages from workers on all of the nodes.\n\nArgs:\nredis_client: A client to the primary Redis shard.\nthreads_stopped (threading.Event): A threading event used to signal to\nthe thread that it should exit.", "source": "codesearchnet"}
{"code": "def filterfalse_items(item_list, flag_list):\n    assert (len(item_list) == len(flag_list))\n    filtered_items = list(util_iter.ifilterfalse_items(item_list, flag_list))\n    return filtered_items", "docstring": "Returns items in item list where the corresponding item in flag list is true\n\nArgs:\nitem_list (list): list of items\nflag_list (list): list of truthy values\n\nReturns:\nfiltered_items : items where the corresponding flag was truthy\n\nSeeAlso:\nutil_iter.ifilterfalse_items", "source": "codesearchnet"}
{"code": "def __get_labels(self):\n    labels = []\n    try:\n        with self.fs.open(self.fs.join(self.path, self.LABEL_FILE), 'r') as file_desc:\n            for line in file_desc.readlines():\n                line = line.strip()\n                (label_name, label_color) = line.split(',', 1)\n                labels.append(Label(name=label_name, color=label_color))\n    except IOError:\n        pass\n    return labels", "docstring": "Read the label file of the documents and extract all the labels\n\nReturns:\nAn array of labels.Label objects", "source": "codesearchnet"}
{"code": "def apply_actions(self, actions):\n        \n        modified = []\n        for a in actions:\n            if \"dict\" in a:\n                k = a[\"dict\"]\n                modified.append(k)\n                self.feffinp[k] = self.modify_object(a[\"action\"], self.feffinp[k])\n            elif \"file\" in a:\n                self.modify(a[\"action\"], a[\"file\"])\n            else:\n                raise ValueError(\"Unrecognized format: {}\".format(a))\n        if modified:\n            feff = self.feffinp\n            feff_input = \"\\n\\n\".join(str(feff[k]) for k in\n                                     [\"HEADER\", \"PARAMETERS\", \"POTENTIALS\", \"ATOMS\"]\n                                     if k in feff)\n            for k, v in six.iteritems(feff):\n                with open(os.path.join('.', k), \"w\") as f:\n                    f.write(str(v))\n\n            with open(os.path.join('.', \"feff.inp\"), \"w\") as f:\n                f.write(feff_input)", "docstring": "Applies a list of actions to the FEFF Input Set and rewrites modified\nfiles.\n\nArgs:\nactions [dict]: A list of actions of the form {'file': filename,\n'action': moddermodification} or {'dict': feffinput_key,\n'action': moddermodification}", "source": "juraj-google-style"}
{"code": "def _ExportEvent(self, output_module, event, deduplicate_events=True):\n    if (event.timestamp != self._export_event_timestamp):\n        self._FlushExportBuffer(output_module, deduplicate_events=deduplicate_events)\n        self._export_event_timestamp = event.timestamp\n    self._export_event_heap.PushEvent(event)", "docstring": "Exports an event using an output module.\n\nArgs:\noutput_module (OutputModule): output module.\nevent (EventObject): event.\ndeduplicate_events (Optional[bool]): True if events should be\ndeduplicated.", "source": "codesearchnet"}
{"code": "def AssertDictType(dct, expected_key_type, expected_value_type):\n  \n  AssertType(dct, dict)\n  for key, value in iteritems(dct):\n    AssertType(key, expected_key_type)\n    AssertType(value, expected_value_type)", "docstring": "Ensures that given dictionary is actually a dictionary of specified type.\n\nArgs:\ndct: A dictionary to assert the type for.\nexpected_key_type: An expected type for dictionary keys.\nexpected_value_type: An expected type for dictionary values.\n\nRaises:\nTypeError: If given dictionary is not really a dictionary or not all its\nkeys and values have the expected type.", "source": "juraj-google-style"}
{"code": "def SetCredential(self, path_spec, identifier, data):\n    supported_credentials = manager.CredentialsManager.GetCredentials(path_spec)\n    if (identifier not in supported_credentials.CREDENTIALS):\n        raise KeyError('Unsuppored credential: {0:s} for path specification type: {1:s}'.format(identifier, path_spec.type_indicator))\n    credentials = self._credentials_per_path_spec.get(path_spec.comparable, {})\n    credentials[identifier] = data\n    self._credentials_per_path_spec[path_spec.comparable] = credentials", "docstring": "Sets a specific credential for the path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nidentifier (str): credential identifier.\ndata (object): credential data.\n\nRaises:\nKeyError: if the credential is not supported by the path specification\ntype.", "source": "codesearchnet"}
{"code": "def MakeTokenRegex(meta_left, meta_right):\n    \n    key = meta_left, meta_right\n    if key not in _token_re_cache:\n        \n        \n        \n        \n        _token_re_cache[key] = re.compile(\n            r'(' +\n            re.escape(meta_left) +\n            r'\\S.*?' +\n            re.escape(meta_right) +\n            r')')\n    return _token_re_cache[key]", "docstring": "Return a (compiled) regular expression for tokenization.\n\nArgs:\nmeta_left, meta_right: e.g. '{' and '}'\n\n- The regular expressions are memoized.\n- This function is public so the syntax highlighter can use it.", "source": "juraj-google-style"}
{"code": "def get_resize_output_image_size(video, resolution_max_side: int) -> tuple[int, int]:\n    height, width = video.size()[-2:]\n    resolution_max_side = min(MAX_IMAGE_SIZE, resolution_max_side)\n    resolution_max_side = max(height, width) if resolution_max_side is None else resolution_max_side\n    aspect_ratio = width / height\n    if width >= height:\n        width = resolution_max_side\n        height = int(width / aspect_ratio)\n        if height % 2 != 0:\n            height += 1\n    elif height > width:\n        height = resolution_max_side\n        width = int(height * aspect_ratio)\n        if width % 2 != 0:\n            width += 1\n    height = max(height, 1)\n    width = max(width, 1)\n    return (height, width)", "docstring": "Get the output size of the video after resizing given a dictionary specifying the max and min sizes.\nArgs:\nvideo (`np.ndarray`):\nVideo to resize.\nresolution_max_side (`int`):\nThe longest edge of the video will be resized to this value. The shortest edge will be resized to keep the\ninput aspect ratio.\nReturns:\nThe output size of the video after resizing.", "source": "github-repos"}
{"code": "def _convert_scipy_sparse_tensor(value, expected_input):\n    if issparse is not None and issparse(value):\n        if backend.is_sparse(expected_input):\n            sparse_coo = value.tocoo()\n            row, col = (sparse_coo.row, sparse_coo.col)\n            data, shape = (sparse_coo.data, sparse_coo.shape)\n            indices = np.concatenate((np.expand_dims(row, 1), np.expand_dims(col, 1)), 1)\n            return sparse_tensor.SparseTensor(indices, data, shape)\n        else:\n            if ops.executing_eagerly_outside_functions():\n                raise ValueError('A SciPy sparse matrix was passed to a model that expects dense inputs. Please densify your inputs first, such as by calling `x.toarray().')\n            return value.toarray()\n    else:\n        return value", "docstring": "Handle scipy sparse tensor conversions.\n\nThis method takes a value 'value' and returns the proper conversion. If\nvalue is a scipy sparse tensor and the expected input is a dense tensor,\nwe densify 'value'. If value is a scipy sparse tensor and the expected input\nis a TF SparseTensor, we convert 'value' to a SparseTensor. If 'value' is\nnot a scipy sparse tensor, or scipy is not imported, we pass it through\nunchanged.\n\nArgs:\nvalue: An object that may be a scipy sparse tensor\nexpected_input: The expected input placeholder.\n\nReturns:\nThe possibly-converted 'value'.", "source": "github-repos"}
{"code": "def GetPrototype(self, descriptor):\n    \n    if descriptor.full_name not in self._classes:\n      descriptor_name = descriptor.name\n      if str is bytes:  \n        descriptor_name = descriptor.name.encode('ascii', 'ignore')\n      result_class = reflection.GeneratedProtocolMessageType(\n          descriptor_name,\n          (message.Message,),\n          {'DESCRIPTOR': descriptor, '__module__': None})\n          \n      self._classes[descriptor.full_name] = result_class\n      for field in descriptor.fields:\n        if field.message_type:\n          self.GetPrototype(field.message_type)\n      for extension in result_class.DESCRIPTOR.extensions:\n        if extension.containing_type.full_name not in self._classes:\n          self.GetPrototype(extension.containing_type)\n        extended_class = self._classes[extension.containing_type.full_name]\n        extended_class.RegisterExtension(extension)\n    return self._classes[descriptor.full_name]", "docstring": "Builds a proto2 message class based on the passed in descriptor.\n\nPassing a descriptor with a fully qualified name matching a previous\ninvocation will cause the same class to be returned.\n\nArgs:\ndescriptor: The descriptor to build from.\n\nReturns:\nA class describing the passed in descriptor.", "source": "juraj-google-style"}
{"code": "def _GetMessage(self, message_file_key, lcid, message_identifier):\n    table_name = 'message_table_{0:d}_0x{1:08x}'.format(message_file_key, lcid)\n    has_table = self._database_file.HasTable(table_name)\n    if (not has_table):\n        return None\n    column_names = ['message_string']\n    condition = 'message_identifier == \"0x{0:08x}\"'.format(message_identifier)\n    values = list(self._database_file.GetValues([table_name], column_names, condition))\n    number_of_values = len(values)\n    if (number_of_values == 0):\n        return None\n    if (number_of_values == 1):\n        return values[0]['message_string']\n    raise RuntimeError('More than one value found in database.')", "docstring": "Retrieves a specific message from a specific message table.\n\nArgs:\nmessage_file_key (int): message file key.\nlcid (int): language code identifier (LCID).\nmessage_identifier (int): message identifier.\n\nReturns:\nstr: message string or None if not available.\n\nRaises:\nRuntimeError: if more than one value is found in the database.", "source": "codesearchnet"}
{"code": "def normalize_width(layer):\n        \n        instructions = [instruction for instruction in filter(lambda x: x is not None, layer)]\n        longest = max([instruction.length for instruction in instructions])\n        for instruction in instructions:\n            instruction.layer_width = longest", "docstring": "When the elements of the layer have different widths, sets the width to the max elements.\nArgs:\nlayer (list): A list of elements.", "source": "juraj-google-style"}
{"code": "def image_summary(predictions, targets, hparams):\n    del hparams\n    results = tf.cast(tf.argmax(predictions, axis=(- 1)), tf.uint8)\n    gold = tf.cast(targets, tf.uint8)\n    summary1 = tf.summary.image('prediction', results, max_outputs=2)\n    summary2 = tf.summary.image('data', gold, max_outputs=2)\n    summary = tf.summary.merge([summary1, summary2])\n    return (summary, tf.zeros_like(predictions))", "docstring": "Reshapes predictions and passes it to tensorboard.\n\nArgs:\npredictions : The predicted image (logits).\ntargets : The ground truth.\nhparams: model hparams.\n\nReturns:\nsummary_proto: containing the summary images.\nweights: A Tensor of zeros of the same shape as predictions.", "source": "codesearchnet"}
{"code": "def get_multi(cls, blob_keys, **ctx_options):\n    \n    futs = cls.get_multi_async(blob_keys, **ctx_options)\n    return [fut.get_result() for fut in futs]", "docstring": "Multi-key version of get().\n\nArgs:\nblob_keys: A list of blob keys.\n**ctx_options: Context options for Model().get_by_id().\n\nReturns:\nA list whose items are each either a BlobInfo entity or None.", "source": "juraj-google-style"}
{"code": "def Read(self, file_object):\n    file_object.seek(self.last_read, os.SEEK_SET)\n    read_data = file_object.read(self._MAXIMUM_READ_SIZE)\n    self.last_read = file_object.get_offset()\n    compressed_data = b''.join([self._compressed_data, read_data])\n    (decompressed, extra_compressed) = self._decompressor.Decompress(compressed_data)\n    self._compressed_data = extra_compressed\n    self.uncompressed_offset += len(decompressed)\n    return decompressed", "docstring": "Reads the next uncompressed data from the gzip stream.\n\nArgs:\nfile_object (FileIO): file object that contains the compressed stream.\n\nReturns:\nbytes: next uncompressed data from the compressed stream.", "source": "codesearchnet"}
{"code": "def _StubMethod(self, stub, method_descriptor, rpc_controller, request, callback):\n    return stub.rpc_channel.CallMethod(method_descriptor, rpc_controller, request, method_descriptor.output_type._concrete_class, callback)", "docstring": "The body of all service methods in the generated stub class.\n\nArgs:\nstub: Stub instance.\nmethod_descriptor: Descriptor of the invoked method.\nrpc_controller: Rpc controller to execute the method.\nrequest: Request protocol message.\ncallback: A callback to execute when the method finishes.\nReturns:\nResponse message (in case of blocking call).", "source": "codesearchnet"}
{"code": "def rCopy(d, f=identityConversion, discardNoneKeys=True, deepCopy=True):\n    if deepCopy:\n        d = copy.deepcopy(d)\n    newDict = {}\n    toCopy = [(k, v, newDict, ()) for (k, v) in d.iteritems()]\n    while (len(toCopy) > 0):\n        (k, v, d, prevKeys) = toCopy.pop()\n        prevKeys = (prevKeys + (k,))\n        if isinstance(v, dict):\n            d[k] = dict()\n            toCopy[0:0] = [(innerK, innerV, d[k], prevKeys) for (innerK, innerV) in v.iteritems()]\n        else:\n            newV = f(v, prevKeys)\n            if ((not discardNoneKeys) or (newV is not None)):\n                d[k] = newV\n    return newDict", "docstring": "Recursively copies a dict and returns the result.\n\nArgs:\nd: The dict to copy.\nf: A function to apply to values when copying that takes the value and the\nlist of keys from the root of the dict to the value and returns a value\nfor the new dict.\ndiscardNoneKeys: If True, discard key-value pairs when f returns None for\nthe value.\ndeepCopy: If True, all values in returned dict are true copies (not the\nsame object).\nReturns:\nA new dict with keys and values from d replaced with the result of f.", "source": "codesearchnet"}
{"code": "def load_case(adapter, case_obj, update=False):\n    \n    logger.info('Loading case {} into database'.format(case_obj['display_name']))\n\n    \n    existing_case = adapter.case(case_obj['_id'])\n\n    if existing_case:\n        if update:\n            adapter.update_case(case_obj)\n        else:\n            raise IntegrityError(\"Case {0} already exists in database\".format(case_obj['_id']))\n    else:\n        adapter.add_case(case_obj)\n    return case_obj", "docstring": "Load a case into the database\n\nIf the case already exists the function will exit.\nIf the user want to load a case that is already in the database\n'update' has to be 'True'\n\nArgs:\nadapter (MongoAdapter): connection to the database\ncase_obj (dict): case object to persist to the database\nupdate(bool): If existing case should be updated\n\nReturns:\ncase_obj(dict): A dictionary with the builded case", "source": "juraj-google-style"}
{"code": "def assert_parse_equals_golden(self, json_path: str, proto_path: str, proto_cls: Type[message.Message], *, parse_f: Callable[..., message.Message], json_delimiter: Optional[str]=None, proto_delimiter: Optional[str]=None, **parse_kwargs: Any) -> None:\n    testdata = self._read_json_and_protos(json_path, proto_path, proto_cls, json_delimiter=json_delimiter, proto_delimiter=proto_delimiter)\n    for json_str, proto in zip(testdata.json_strs, testdata.protos):\n        from_json = parse_f(json_str, proto_cls, **parse_kwargs)\n        self.assertEqual(from_json, proto)", "docstring": "Compare parser output against 'golden' file.\n\nNote that we perform a comparison between protobuf representations.\n\nIf json_delimiter and proto_delimiter are supplied, the cardinality of the\nresulting sequences must match exactly or an error will be thrown.\n\nArgs:\njson_path: The filepath to the .json file (loaded as a 'test case').\nproto_path: The filepath to the .prototxt file (loaded as a 'golden').\nproto_cls: The type of protobuf message to parse into.\nparse_f: The function responsible for parsing FHIR JSON to exmaine.\njson_delimiter: An optional delimiter for the .json file to load multiple\nrepresentations. Defaults to None.\nproto_delimiter: An optional delimiter for the .prototxt file to load\nmultiple representations. Defaults to None.\n**parse_kwargs: Optional key/value arguments to supply to parse_f.", "source": "github-repos"}
{"code": "def skip_on_exceptions(self, exceptions: Sequence[Union[Type[Exception], Tuple[Exception, str]]]):\n\n    def skip_on_exception(unused_error):\n        error_stack = traceback.format_exc()\n        logging.warning('Skipping trial on unhandled exception: %s', error_stack)\n        self.skip(error_stack)\n    return utils.catch_errors(exceptions, skip_on_exception)", "docstring": "Returns a context manager to skip trial on user-specified exceptions.\n\nUsages::\n\nwith feedback.skip_on_exceptions((ValueError, KeyError)):\n...\n\nwith feedback.skip_on_exceptions(((ValueError, 'bad value for .*'),\n(ValueError, '.* invalid range'),\nTypeError)):\n...\n\nArgs:\nexceptions: A sequence of (exception type, or exception type plus regular\nexpression for error message).\n\nReturns:\nA context manager for skipping trials on user-specified exceptions.", "source": "github-repos"}
{"code": "def add_loss(self, loss, name=None, regularization=False, add_summaries=True):\n    \n    \n    _ = name  \n    if regularization:\n      self._g.add_to_collection(GraphKeys.REGULARIZATION_LOSSES, loss)\n\n    tf.add_to_collection(GraphKeys.LOSSES, loss)\n    if add_summaries:\n      self.add_scalar_summary(loss, 'loss')\n      self.add_average_summary(loss, 'loss_average')", "docstring": "Append a loss to the total loss for the network.\n\nArgs:\nloss: append this loss operation\nname: The name for this loss, defaults to loss.op.name\nregularization: Set to True if this is a regularization loss.\nadd_summaries: Set to True if you want to see scalar and average summary.", "source": "juraj-google-style"}
{"code": "def _pick_inserted_ops_moment_indices(operations: Sequence[ops.Operation], start: int=0, frontier: Dict[(ops.Qid, int)]=None) -> Tuple[(Sequence[int], Dict[(ops.Qid, int)])]:\n    if (frontier is None):\n        frontier = defaultdict((lambda : 0))\n    moment_indices = []\n    for op in operations:\n        op_start = max(start, max((frontier[q] for q in op.qubits)))\n        moment_indices.append(op_start)\n        for q in op.qubits:\n            frontier[q] = max(frontier[q], (op_start + 1))\n    return (moment_indices, frontier)", "docstring": "Greedily assigns operations to moments.\n\nArgs:\noperations: The operations to assign to moments.\nstart: The first moment to consider assignment to.\nfrontier: The first moment to which an operation acting on a qubit\ncan be assigned. Updated in place as operations are assigned.\n\nReturns:\nThe frontier giving the index of the moment after the last one to\nwhich an operation that acts on each qubit is assigned. If a\nfrontier was specified as an argument, this is the same object.", "source": "codesearchnet"}
{"code": "def first(seq, key=(lambda x: bool(x)), default=None, apply=(lambda x: x)):\n    return next((apply(x) for x in seq if key(x)), (default() if callable(default) else default))", "docstring": "Give the first value that satisfies the key test.\n\nArgs:\nseq (iterable):\nkey (callable): test for each element of iterable\ndefault: returned when all elements fail test\napply (callable): applied to element before return, but not to default value\n\nReturns: first element in seq that passes key, mutated with optional apply\n\nExamples:\n>>> first([0, False, None, [], (), 42])\n42\n>>> first([0, False, None, [], ()]) is None\nTrue\n>>> first([0, False, None, [], ()], default='ohai')\n'ohai'\n>>> import re\n>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])\n>>> m.group(1)\n'bc'\n\nThe optional `key` argument specifies a one-argument predicate function\nlike that used for `filter()`.  The `key` argument, if supplied, must be\nin keyword form.  For example:\n>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)\n4", "source": "codesearchnet"}
{"code": "def Deserialize(self, reader):\n        \n        super(Header, self).Deserialize(reader)\n        if reader.ReadByte() != 0:\n            raise Exception('Incorrect Header Format')", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def double_sphere(cdata, sym):\n    \n    \n    nrows = cdata.shape[0]\n    ncols = cdata.shape[1]\n\n    ddata = np.zeros([nrows, ncols], dtype=np.complex128)\n\n    for n in xrange(0, nrows):\n        for m in xrange(0, ncols):\n            s = sym * cdata[np.mod(nrows - n, nrows),\n                          np.mod(int(np.floor(ncols / 2)) + m, ncols)]\n            t = cdata[n, m]\n\n            if s * t == 0:\n                ddata[n, m] = s + t\n            else:\n                ddata[n, m] = (s + t) / 2\n\n    return ddata", "docstring": "Ensures that the data within cdata has double sphere symmetry.\n\nExample::\n\n>>> spherepy.doublesphere(cdata, 1)\n\nArgs:\nsym (int): is 1 for scalar data and -1 for vector data\n\nReturns:\nnumpy.array([*,*], dtype=np.complex128) containing array with\ndoublesphere symmetry.", "source": "juraj-google-style"}
{"code": "def inverse_stft_window_fn_inner(frame_length, dtype):\n    with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):\n        frame_step_ = ops.convert_to_tensor(frame_step, name='frame_step')\n        frame_step_.shape.assert_has_rank(0)\n        frame_length = ops.convert_to_tensor(frame_length, name='frame_length')\n        frame_length.shape.assert_has_rank(0)\n        forward_window = forward_window_fn(frame_length, dtype=dtype)\n        denom = math_ops.square(forward_window)\n        overlaps = -(-frame_length \n        denom = array_ops.pad(denom, [(0, overlaps * frame_step_ - frame_length)])\n        denom = array_ops.reshape(denom, [overlaps, frame_step_])\n        denom = math_ops.reduce_sum(denom, 0, keepdims=True)\n        denom = array_ops.tile(denom, [overlaps, 1])\n        denom = array_ops.reshape(denom, [overlaps * frame_step_])\n        return forward_window / denom[:frame_length]", "docstring": "Computes a window that can be used in `inverse_stft`.\n\nArgs:\nframe_length: An integer scalar `Tensor`. The window length in samples.\ndtype: Data type of waveform passed to `stft`.\n\nReturns:\nA window suitable for reconstructing original waveform in `inverse_stft`.\n\nRaises:\nValueError: If `frame_length` is not scalar, `forward_window_fn` is not a\ncallable that takes a window length and a `dtype` keyword argument and\nreturns a `[window_length]` `Tensor` of samples in the provided datatype\n`frame_step` is not scalar, or `frame_step` is not scalar.", "source": "github-repos"}
{"code": "def __init__(self, instance, pretty=False, expand=StringFormatType.error):\n        \n        self.instance = instance\n        self.pretty = pretty\n        self.expand = expand", "docstring": "Create a formatter.\n\nArgs:\ninstance: The object to format with.\npretty: If True, references to non-string attributes such as lists\nare converted to basic form, with characters such as brackets\nand parentheses removed.\nexpand: `StringFormatType`.", "source": "juraj-google-style"}
{"code": "def add_string_pairs_from_attributed_ui_element(results, ui_element, comment_prefix):\n    \n    attributed_strings = ui_element.getElementsByTagName('attributedString')\n    if attributed_strings.length == 0:\n        return False\n\n    attributed_element = attributed_strings[0]\n    fragment_index = 1\n    for fragment in attributed_element.getElementsByTagName('fragment'):\n        \n        \n        try:\n            label_entry_key = fragment.attributes['content'].value\n        except KeyError:\n            label_entry_key = fragment.getElementsByTagName('string')[0].firstChild.nodeValue\n\n        comment = \"%s Part %d\" % (comment_prefix, fragment_index)\n        results.append((label_entry_key, comment))\n        fragment_index += 1\n\n    return fragment_index > 1", "docstring": "Adds string pairs from a UI element with attributed text\n\nArgs:\nresults (list): The list to add the results to.\nattributed_element (element): The element from the xib that contains, to extract the fragments from.\ncomment_prefix (str): The prefix of the comment to use for extracted string\n(will be appended \"Part X\" suffices)\n\nReturns:\nbool: Whether or not an attributed string was found.", "source": "juraj-google-style"}
{"code": "def cos_distance(t1, t2, epsilon=1e-12, name=None):\n  \n  with tf.name_scope(name, 'cos_distance', [t1, t2]) as scope:\n    t1 = tf.convert_to_tensor(t1, name='t1')\n    t2 = tf.convert_to_tensor(t2, name='t2')\n    x_inv_norm = tf.rsqrt(tf.maximum(length_squared(t1) * length_squared(t2),\n                                     epsilon))\n    return tf.subtract(1.0, dot_product(t1, t2) * x_inv_norm, name=scope)", "docstring": "Cos distance between t1 and t2 and caps the gradient of the Square Root.\n\nArgs:\nt1: A tensor\nt2: A tensor that can be multiplied by t1.\nepsilon: A lower bound value for the distance. The square root is used as\nthe normalizer.\nname: Optional name for this op.\nReturns:\nThe cos distance between t1 and t2.", "source": "juraj-google-style"}
{"code": "def rating(self, **kwargs):\n        \n        path = self._get_id_path('rating')\n\n        payload = {\n            'value': kwargs.pop('value', None),\n        }\n\n        response = self._POST(path, kwargs, payload)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "This method lets users rate a movie. A valid session id or guest\nsession id is required.\n\nArgs:\nsession_id: see Authentication.\nguest_session_id: see Authentication.\nvalue: Rating value.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def profile_match(adapter, profiles, hard_threshold=0.95, soft_threshold=0.9):\n    matches = {sample: [] for sample in profiles.keys()}\n    for case in adapter.cases():\n        for individual in case['individuals']:\n            for sample in profiles.keys():\n                if individual.get('profile'):\n                    similarity = compare_profiles(profiles[sample], individual['profile'])\n                    if (similarity >= hard_threshold):\n                        msg = f\"individual {sample} has a {similarity} similarity with individual {individual['ind_id']} in case {case['case_id']}\"\n                        LOG.critical(msg)\n                        raise ProfileError\n                    if (similarity >= soft_threshold):\n                        match = f\"{case['case_id']}.{individual['ind_id']}\"\n                        matches[sample].append(match)\n    return matches", "docstring": "given a dict of profiles, searches through all the samples in the DB\nfor a match. If a matching sample is found an exception is raised,\nand the variants will not be loaded into the database.\n\nArgs:\nadapter (MongoAdapter): Adapter to mongodb\nprofiles (dict(str)): The profiles (given as strings) for each sample in vcf.\nhard_threshold(float): Rejects load if hamming distance above this is found\nsoft_threshold(float): Stores similar samples if hamming distance above this is found\n\nReturns:\nmatches(dict(list)): list of similar samples for each sample in vcf.", "source": "codesearchnet"}
{"code": "def get(self, url, params=None, **kwargs):\n        \n        check_type(url, basestring, may_be_none=False)\n        check_type(params, dict)\n\n        \n        erc = kwargs.pop('erc', EXPECTED_RESPONSE_CODE['GET'])\n\n        response = self.request('GET', url, erc, params=params, **kwargs)\n        return extract_and_parse_json(response)", "docstring": "Sends a GET request.\n\nArgs:\nurl(basestring): The URL of the API endpoint.\nparams(dict): The parameters for the HTTP GET request.\n**kwargs:\nerc(int): The expected (success) response code for the request.\nothers: Passed on to the requests package.\n\nRaises:\nApiError: If anything other than the expected response code is\nreturned by the Webex Teams API endpoint.", "source": "juraj-google-style"}
{"code": "def train(self, X_feat, X_seq, y, id_vec=None, n_folds=10, use_stored_folds=None, n_cores=1, train_global_model=False):\n    self._use_stored_folds = use_stored_folds\n    self._n_folds = n_folds\n    self._n_rows = X_feat.shape[0]\n    self._kf = self._get_folds(self._n_rows, self._n_folds, self._use_stored_folds)\n    cv_obj = {}\n    if (id_vec is None):\n        id_vec = np.arange(1, (self._n_rows + 1))\n    best_val_acc_epoch_l = []\n    for (fold, train, test) in self._kf:\n        X_feat_train = X_feat[train]\n        X_seq_train = X_seq[train]\n        y_train = y[train]\n        X_feat_test = X_feat[test]\n        X_seq_test = X_seq[test]\n        y_test = y[test]\n        id_vec_test = id_vec[test]\n        print(fold, '/', n_folds)\n        dc = copy.deepcopy(self._concise_model)\n        dc.train(X_feat_train, X_seq_train, y_train, X_feat_test, X_seq_test, y_test, n_cores=n_cores)\n        dc._test(X_feat_test, X_seq_test, y_test, id_vec_test)\n        cv_obj[fold] = dc\n        best_val_acc_epoch_l.append(dc.get_accuracy()['best_val_acc_epoch'])\n    self._cv_model = cv_obj\n    if train_global_model:\n        dc = copy.deepcopy(self._concise_model)\n        dc._param['n_epochs'] = int(np.array(best_val_acc_epoch_l).mean())\n        print(('tranining global model with n_epochs = ' + str(dc._param['n_epochs'])))\n        dc.train(X_feat, X_seq, y, n_cores=n_cores)\n        dc._test(X_feat, X_seq, y, id_vec)\n        self._concise_global_model = dc", "docstring": "Train the Concise model in cross-validation.\n\nArgs:\nX_feat: See :py:func:`concise.Concise.train`\nX_seq: See :py:func:`concise.Concise.train`\ny: See :py:func:`concise.Concise.train`\nid_vec: List of character id's used to differentiate the trainig samples. Returned by :py:func:`concise.prepare_data`.\nn_folds (int): Number of CV-folds to use.\nuse_stored_folds (chr or None): File path to a .json file containing the fold information (as returned by :py:func:`concise.ConciseCV.get_folds`). If None, the folds are generated.\nn_cores (int): Number of CPU cores used for training. If available, GPU is used for training and this argument is ignored.\ntrain_global_model (bool): In addition to training the model in cross-validation, should the global model be fitted (using all the samples from :code:`(X_feat, X_seq, y)`).", "source": "codesearchnet"}
{"code": "def _DeepCopy(self, obj):\n    \n    precondition.AssertType(obj, rdfvalue.RDFValue)\n\n    return obj.__class__.FromSerializedString(obj.SerializeToString())", "docstring": "Creates an object copy by serializing/deserializing it.\n\nRDFStruct.Copy() doesn't deep-copy repeated fields which may lead to\nhard to catch bugs.\n\nArgs:\nobj: RDFValue to be copied.\n\nReturns:\nA deep copy of the passed RDFValue.", "source": "juraj-google-style"}
{"code": "def recipe_smartsheet_report_to_bigquery(config, auth_read, auth_write, token, report, dataset, table, schema):\n    smartsheet(config, {'auth': auth_read, 'token': token, 'report': report, 'out': {'bigquery': {'auth': auth_write, 'dataset': dataset, 'table': table, 'schema': schema}}})", "docstring": "Move report data into a BigQuery table.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nauth_write (authentication) - Credentials used for writing data.\ntoken (string) - Retrieve from SmartSheet account settings.\nreport (string) - Retrieve from report properties.\ndataset (string) - Existing BigQuery dataset.\ntable (string) - Table to create from this report.\nschema (json) - Schema provided in JSON list format or leave empty to auto detect.", "source": "github-repos"}
{"code": "def create_doc_id_from_json(doc) -> str:\n        \n        return hashlib.sha256(json.dumps(doc, sort_keys=True).encode('utf-8')).hexdigest()", "docstring": "Docs with identical contents get the same ID.\nArgs:\ndoc:\n\nReturns: a string with the hash of the given document.", "source": "juraj-google-style"}
{"code": "def exp(vector):\n    \n    weld_type = None\n    if isinstance(vector, LazyOpResult):\n        weld_type = vector.weld_type\n        vector = vector.expr\n    elif isinstance(vector, np.ndarray):\n        weld_type = numpy_weld_impl.numpy_to_weld_type_mapping[\n            str(vector.dtype)]\n    return NumpyArrayWeld(numpy_weld_impl.exp(vector, weld_type), WeldDouble())", "docstring": "Computes a per-element exponent of the passed-in vector.\n\nArgs:\nvector (TYPE): Description", "source": "juraj-google-style"}
{"code": "def savedata(self, output, location=None):\n    output.persist = True\n    if location:\n        output.persist_location = location", "docstring": "Save output data from any task in this workflow to S3\n\nArgs:\noutput: Reference task output (e.g. task.outputs.output1).\n\nlocation (optional): Subfolder under which the output will be saved.\nIt will be placed under the account directory in gbd-customer-data bucket:\ns3://gbd-customer-data/{account_id}/{location}\nLeave blank to save to: workflow_output/{workflow_id}/{task_name}/{port_name}\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _TerminateProcessByPid(self, pid):\n    \n    self._RaiseIfNotRegistered(pid)\n\n    process = self._processes_per_pid[pid]\n\n    self._TerminateProcess(process)\n    self._StopMonitoringProcess(process)", "docstring": "Terminate a process that's monitored by the engine.\n\nArgs:\npid (int): process identifier (PID).\n\nRaises:\nKeyError: if the process is not registered with and monitored by the\nengine.", "source": "juraj-google-style"}
{"code": "def should_trigger_for_step(self, step):\n    if self._last_triggered_step is None:\n        return True\n    if self._last_triggered_step == step:\n        return False\n    if self._every_secs is not None:\n        if time.time() >= self._last_triggered_time + self._every_secs:\n            return True\n    if self._every_steps is not None:\n        if step >= self._last_triggered_step + self._every_steps:\n            return True\n    return False", "docstring": "Return true if the timer should trigger for the specified step.\n\nArgs:\nstep: Training step to trigger on.\n\nReturns:\nTrue if the difference between the current time and the time of the last\ntrigger exceeds `every_secs`, or if the difference between the current\nstep and the last triggered step exceeds `every_steps`. False otherwise.", "source": "github-repos"}
{"code": "def _RegisterFlagByModule(self, module_name, flag):\n    \n    flags_by_module = self.FlagsByModuleDict()\n    flags_by_module.setdefault(module_name, []).append(flag)", "docstring": "Records the module that defines a specific flag.\n\nWe keep track of which flag is defined by which module so that we\ncan later sort the flags by module.\n\nArgs:\nmodule_name: A string, the name of a Python module.\nflag: A Flag object, a flag that is key to the module.", "source": "juraj-google-style"}
{"code": "def attribute(*args, **kw):\n    return operator(*args, kind=Operator.Type.ATTRIBUTE, **kw)", "docstring": "Registers a new attribute only operator function in the test engine.\n\nArguments:\n*args: variadic arguments.\n**kw: variadic keyword arguments.\n\nReturns:\nfunction", "source": "codesearchnet"}
{"code": "def registration_info_request(self, registration_id):\n    return self.requests_session.get((self.INFO_END_POINT + registration_id), params={'details': 'true'})", "docstring": "Makes a request for registration info and returns the response object\n\nArgs:\nregistration_id: id to be checked\n\nReturns:\nresponse of registration info request", "source": "codesearchnet"}
{"code": "def read_frame(self):\n    (ret, frame) = self.capture.read()\n    if (not ret):\n        self.event_source.stop()\n        try:\n            self.capture.release()\n        except AttributeError:\n            pass\n        return None\n    if ((self.convert_color != (- 1)) and is_color_image(frame)):\n        return cv2.cvtColor(frame, self.convert_color)\n    return frame", "docstring": "Reads a frame and converts the color if needed.\n\nIn case no frame is available, i.e. self.capture.read() returns False\nas the first return value, the event_source of the TimedAnimation is\nstopped, and if possible the capture source released.\n\nReturns:\nNone if stopped, otherwise the color converted source image.", "source": "codesearchnet"}
{"code": "def plot_scatter_matrix(self, freq=None, title=None,\n                            figsize=(10, 10), **kwargs):\n        \n        if title is None:\n            title = self._get_default_plot_title(\n                freq, 'Return Scatter Matrix')\n\n        plt.figure()\n        ser = self._get_series(freq).to_returns().dropna()\n        pd.scatter_matrix(ser, figsize=figsize, **kwargs)\n        return plt.suptitle(title)", "docstring": "Wrapper around pandas' scatter_matrix.\n\nArgs:\n* freq (str): Data frequency used for display purposes.\nRefer to pandas docs for valid freq strings.\n* figsize ((x,y)): figure size\n* title (str): Title if default not appropriate\n* kwargs: passed to pandas' scatter_matrix method", "source": "juraj-google-style"}
{"code": "def CopyToStatTimeTuple(self):\n    normalized_timestamp = self._GetNormalizedTimestamp()\n    if (normalized_timestamp is None):\n        return (None, None)\n    if (self._precision in (definitions.PRECISION_1_NANOSECOND, definitions.PRECISION_100_NANOSECONDS, definitions.PRECISION_1_MICROSECOND, definitions.PRECISION_1_MILLISECOND, definitions.PRECISION_100_MILLISECONDS)):\n        remainder = int(((normalized_timestamp % 1) * self._100NS_PER_SECOND))\n        return (int(normalized_timestamp), remainder)\n    return (int(normalized_timestamp), None)", "docstring": "Copies the date time value to a stat timestamp tuple.\n\nReturns:\ntuple[int, int]: a POSIX timestamp in seconds and the remainder in\n100 nano seconds or (None, None) on error.", "source": "codesearchnet"}
{"code": "def get_logging_metric_hook(benchmark_log_dir=None,\n                            tensors_to_log=None,\n                            every_n_secs=600,\n                            **kwargs):  \n  \n  if benchmark_log_dir is None:\n    raise ValueError(\"metric_log_dir should be provided to use metric logger\")\n  if tensors_to_log is None:\n    tensors_to_log = _TENSORS_TO_LOG\n  return metric_hook.LoggingMetricHook(\n      tensors=tensors_to_log,\n      log_dir=benchmark_log_dir,\n      every_n_secs=every_n_secs)", "docstring": "Function to get LoggingMetricHook.\n\nArgs:\nbenchmark_log_dir: `string`, directory path to save the metric log.\ntensors_to_log: List of tensor names or dictionary mapping labels to tensor\nnames. If not set, log _TENSORS_TO_LOG by default.\nevery_n_secs: `int`, the frequency for logging the metric. Default to every\n10 mins.\n\nReturns:\nReturns a ProfilerHook that writes out timelines that can be loaded into\nprofiling tools like chrome://tracing.", "source": "juraj-google-style"}
{"code": "def compose_args(self, action_name, in_argdict):\n    for action in self.actions:\n        if (action.name == action_name):\n            break\n    else:\n        raise AttributeError('Unknown Action: {0}'.format(action_name))\n    unexpected = (set(in_argdict) - set((argument.name for argument in action.in_args)))\n    if unexpected:\n        raise ValueError(\"Unexpected argument '{0}'. Method signature: {1}\".format(next(iter(unexpected)), str(action)))\n    composed = []\n    for argument in action.in_args:\n        name = argument.name\n        if (name in in_argdict):\n            composed.append((name, in_argdict[name]))\n            continue\n        if (name in self.DEFAULT_ARGS):\n            composed.append((name, self.DEFAULT_ARGS[name]))\n            continue\n        if (argument.vartype.default is not None):\n            composed.append((name, argument.vartype.default))\n        raise ValueError(\"Missing argument '{0}'. Method signature: {1}\".format(argument.name, str(action)))\n    return composed", "docstring": "Compose the argument list from an argument dictionary, with\nrespect for default values.\n\nArgs:\naction_name (str): The name of the action to be performed.\nin_argdict (dict): Arguments as a dict, eg\n``{'InstanceID': 0, 'Speed': 1}. The values\ncan be a string or something with a string representation.\n\nReturns:\nlist: a list of ``(name, value)`` tuples.\n\nRaises:\n`AttributeError`: If this service does not support the action.\n`ValueError`: If the argument lists do not match the action\nsignature.", "source": "codesearchnet"}
{"code": "def geosearch(self, latitude=None, longitude=None, radius=1000, title=None, auto_suggest=True, results=10):\n\n    def test_lat_long(val):\n        ' handle testing lat and long '\n        if (not isinstance(val, Decimal)):\n            error = 'Latitude and Longitude must be specified either as a Decimal or in formats that can be coerced into a Decimal.'\n            try:\n                return Decimal(val)\n            except (DecimalException, TypeError):\n                raise ValueError(error)\n        return val\n    params = {'list': 'geosearch', 'gsradius': radius, 'gslimit': results}\n    if (title is not None):\n        if auto_suggest:\n            title = self.suggest(title)\n        params['gspage'] = title\n    else:\n        lat = test_lat_long(latitude)\n        lon = test_lat_long(longitude)\n        params['gscoord'] = '{0}|{1}'.format(lat, lon)\n    raw_results = self.wiki_request(params)\n    self._check_error_response(raw_results, title)\n    return [d['title'] for d in raw_results['query']['geosearch']]", "docstring": "Search for pages that relate to the provided geocoords or near\nthe page\n\nArgs:\nlatitude (Decimal or None): Latitude geocoord; must be \\\ncoercable to decimal\nlongitude (Decimal or None): Longitude geocoord; must be \\\ncoercable to decimal\nradius (int): Radius around page or geocoords to pull back; \\\nin meters\ntitle (str): Page title to use as a geocoordinate; this has \\\nprecedence over lat/long\nauto_suggest (bool): Auto-suggest the page title\nresults (int): Number of pages within the radius to return\nReturns:\nlist: A listing of page titles\nRaises:\nValueError: If either the passed latitutde or longitude are \\\nnot coercable to a Decimal", "source": "codesearchnet"}
{"code": "def download_as_obj(\n    base_url=d1_common.const.URL_DATAONE_ROOT,\n    timeout_sec=d1_common.const.DEFAULT_HTTP_TIMEOUT,\n):\n    \n    return decode_der(download_as_der(base_url, timeout_sec))", "docstring": "Download public certificate from a TLS/SSL web server as Certificate object.\n\nAlso see download_as_der().\n\nArgs:\nbase_url : str\nA full URL to a DataONE service endpoint or a server hostname\ntimeout_sec : int or float\nTimeout for the SSL socket operations\n\nReturns:\ncryptography.Certificate", "source": "juraj-google-style"}
{"code": "def _reload_config(self, reload_original_config):\n        \n        \n        if reload_original_config:\n            self.original_config = self.running_config\n            self.original_config.set_name('original')\n\n        paths = self.running_config.get_paths()\n\n        self.running_config = FortiConfig('running', vdom=self.vdom)\n\n        for path in paths:\n            self.load_config(path, empty_candidate=True)", "docstring": "This command will update the running config from the live device.\n\nArgs:\n* reload_original_config:\n* If ``True`` the original config will be loaded with the running config before reloading the\\\noriginal config.\n* If ``False`` the original config will remain untouched.", "source": "juraj-google-style"}
{"code": "def get_list(self, key, pipeline=False):\n    if pipeline:\n        return self._pipeline.lrange(key, 0, (- 1))\n    return self._db.lrange(key, 0, (- 1))", "docstring": "Get all the value in the list stored at key.\n\nArgs:\nkey (str): Key where the list is stored.\npipeline (bool): True, start a transaction block. Default false.\n\nReturns:\nlist: values in the list ordered by list index", "source": "codesearchnet"}
{"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    y_true = ops.convert_to_tensor(y_true, dtype=self._dtype)\n    y_pred = ops.convert_to_tensor(y_pred, dtype=self._dtype)\n    y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)\n    if not self._built:\n        self._build(y_true.shape, y_pred.shape)\n    if sample_weight is None:\n        sample_weight = 1\n    sample_weight = ops.convert_to_tensor(sample_weight, dtype=self.dtype)\n    if len(sample_weight.shape) == 1:\n        sample_weight = ops.expand_dims(sample_weight, axis=1)\n    sample_weight = ops.broadcast_to(sample_weight, ops.shape(y_true))\n    weighted_y_true = y_true * ops.cast(sample_weight, y_true.dtype)\n    self.sum.assign(self.sum + ops.sum(weighted_y_true, axis=0))\n    self.squared_sum.assign(self.squared_sum + ops.sum(y_true * weighted_y_true, axis=0))\n    self.total_mse.assign(self.total_mse + ops.sum((y_true - y_pred) ** 2 * ops.cast(sample_weight, y_true.dtype), axis=0))\n    self.count.assign(self.count + ops.sum(sample_weight, axis=0))\n    self.num_samples.assign(self.num_samples + ops.size(y_true))", "docstring": "Accumulates root mean squared error statistics.\n\nArgs:\ny_true: The ground truth values.\ny_pred: The predicted values.\nsample_weight: Optional weighting of each example. Can\nbe a `Tensor` whose rank is either 0, or the same rank as\n`y_true`, and must be broadcastable to `y_true`.\nDefaults to `1`.\n\nReturns:\nUpdate op.", "source": "github-repos"}
{"code": "def _check_if_fenced(self, name):\n        \n        if name in object.__getattribute__(self, '_attributes_to_fence'):\n            raise TranspilerAccessError(\"The fenced %s has the property %s protected\" %\n                                        (type(object.__getattribute__(self, '_wrapped')), name))", "docstring": "Checks if the attribute name is in the list of attributes to protect. If so, raises\nTranspilerAccessError.\n\nArgs:\nname (string): the attribute name to check\n\nRaises:\nTranspilerAccessError: when name is the list of attributes to protect.", "source": "juraj-google-style"}
{"code": "def __init__(self, jids, _id = None):\n        \n        super(GetStatusesIqProtocolEntity, self).__init__(self.__class__.XMLNS, _id, _type = \"get\", to = YowConstants.WHATSAPP_SERVER)\n        self.setGetStatusesProps(jids)", "docstring": "Request the statuses of users. Should be sent once after login.\n\nArgs:\n- jids: A list of jids representing the users whose statuses you are\ntrying to get.", "source": "juraj-google-style"}
{"code": "def generate_version(max_major: int=1, max_minor: int=7, max_patch: int=15) -> str:\n    major = randint(0, max_major)\n    minor = randint(0, max_minor)\n    patch = randint(0, max_patch)\n    return '{:d}.{:d}.{:d}'.format(major, minor, patch)", "docstring": "Select a random version.\n\nArgs:\nmax_major (int, optional) maximum major version\nmax_minor (int, optional) maximum minor version\nmax_patch (int, optional) maximum patch version\n\nReturns:\nstr, Version String", "source": "codesearchnet"}
{"code": "def _bind_length_scalar_handlers(tids, scalar_factory, lns=_NON_ZERO_LENGTH_LNS):\n    \n    handler = partial(_length_scalar_handler, scalar_factory)\n    return _bind_length_handlers(tids, handler, lns)", "docstring": "Binds a set of scalar handlers for an inclusive range of low-nibble values.\n\nArgs:\ntids (Sequence[int]): The Type IDs to bind to.\nscalar_factory (Callable): The factory for the scalar parsing function.\nThis function can itself return a function representing a thunk to defer the\nscalar parsing or a direct value.\nlns (Sequence[int]): The low-nibble lengths to bind to.", "source": "juraj-google-style"}
{"code": "def CopyAttributesFromSessionCompletion(self, session_completion):\n    if (self.identifier != session_completion.identifier):\n        raise ValueError('Session identifier mismatch.')\n    self.aborted = session_completion.aborted\n    if session_completion.analysis_reports_counter:\n        self.analysis_reports_counter = session_completion.analysis_reports_counter\n    self.completion_time = session_completion.timestamp\n    if session_completion.event_labels_counter:\n        self.event_labels_counter = session_completion.event_labels_counter\n    if session_completion.parsers_counter:\n        self.parsers_counter = session_completion.parsers_counter", "docstring": "Copies attributes from a session completion.\n\nArgs:\nsession_completion (SessionCompletion): session completion attribute\ncontainer.\n\nRaises:\nValueError: if the identifier of the session completion does not match\nthat of the session.", "source": "codesearchnet"}
{"code": "def fuzzy_index_match(possiblities, label, **kwargs):\n    possibilities = list(possiblities)\n    if isinstance(label, basestring):\n        return fuzzy_get(possibilities, label, **kwargs)\n    if isinstance(label, int):\n        return possibilities[label]\n    if isinstance(label, list):\n        return [fuzzy_get(possibilities, lbl) for lbl in label]", "docstring": "Find the closest matching column label, key, or integer indexed value\n\nReturns:\ntype(label): sequence of immutable objects corresponding to best matches to each object in label\nif label is an int returns the object (value) in the list of possibilities at that index\nif label is a str returns the closest str match in possibilities\n\n>>> from collections import OrderedDict as odict\n>>> fuzzy_index_match(pd.DataFrame(pd.np.random.randn(9,4), columns=list('ABCD'), index=range(9)), 'b')\n'B'\n>>> fuzzy_index_match(odict(zip('12345','ABCDE')), 'r2d2')\n'2'\n>>> fuzzy_index_match(odict(zip('12345','ABCDE')), 1)\n'2'\n>>> fuzzy_index_match(odict(zip('12345','ABCDE')), -1)\n'5'\n>>> fuzzy_index_match(odict(zip(range(4),'FOUR')), -4)\n0", "source": "codesearchnet"}
{"code": "def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):\n    if (not pdb_file_type):\n        pdb_file_type = self.pdb_file_type\n    counter = 0\n    for g in tqdm(self.genes):\n        pdbs = g.protein.pdb_downloader_and_metadata(outdir=outdir, pdb_file_type=pdb_file_type, force_rerun=force_rerun)\n        if pdbs:\n            counter += len(pdbs)\n    log.info('Updated PDB metadata dataframe. See the \"df_pdb_metadata\" attribute for a summary dataframe.')\n    log.info('Saved {} structures total'.format(counter))", "docstring": "Download ALL mapped experimental structures to each protein's structures directory.\n\nArgs:\noutdir (str): Path to output directory, if GEM-PRO directories were not set or other output directory is\ndesired\npdb_file_type (str): Type of PDB file to download, if not already set or other format is desired\nforce_rerun (bool): If files should be re-downloaded if they already exist", "source": "codesearchnet"}
{"code": "def _scrub_method_name(self, method_name):\n    if (method_name not in self._scrubbed_method_names):\n        self._scrubbed_method_names[method_name] = scrub_method_name(method_name)\n    return self._scrubbed_method_names[method_name]", "docstring": "Scrubs a method name, returning result from local cache if available.\n\nThis method wraps fitparse.utils.scrub_method_name and memoizes results,\nas scrubbing a method name is expensive.\n\nArgs:\nmethod_name: Method name to scrub.\n\nReturns:\nScrubbed method name.", "source": "codesearchnet"}
{"code": "def __init__(self, scope, parent, name, result, paren=False):\n        \n        CodeEntity.__init__(self, scope, parent)\n        self.name = name\n        self.result = result\n        self.parenthesis = paren", "docstring": "Constructor for expressions.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.\nname (str): The name of the expression in the program.\nresult (str): The return type of the expression in the program.\n\nKwargs:\nparen (bool): Whether the expression is enclosed in parentheses.", "source": "juraj-google-style"}
{"code": "def wrap_embedded_keyvalue(self, data):\n        \n        if data is not None:\n            try:\n                data = u'{}'.format(data)\n                \n            except UnicodeEncodeError:\n                \n                pass\n\n            variables = []\n            for v in re.finditer(self._vars_keyvalue_embedded, data):\n                variables.append(v.group(0))\n\n            for var in set(variables):  \n                \n                variable_string = re.search(self._variable_parse, var).group(0)\n                \n                \n                data = data.replace(var, '\": \"{}\"'.format(variable_string))\n        return data", "docstring": "Wrap keyvalue embedded variable in double quotes.\n\nArgs:\ndata (string): The data with embedded variables.\n\nReturns:\n(string): Results retrieved from DB", "source": "juraj-google-style"}
{"code": "def _remove(self, removeList, selfValue):\n        \n        for removeValue in removeList:\n            print(removeValue, removeList)\n            \n            removeEverything(removeValue, selfValue)", "docstring": "Remove elements from a list by matching the elements in the other list.\n\nThis method only looks inside current instance's value, not recursive.\nThere is no need for a recursive one anyway.\nMatch by == operation.\n\nArgs:\nremoveList (list): The list of matching elements.\nselfValue (list): The list you remove value from. Usually ``self.value``", "source": "juraj-google-style"}
{"code": "def valid(self, value, include_name=True):\n\t\t\n\n\t\t\n\t\treturn super(Tree, self).valid(value, include_name and [self._name] or [])", "docstring": "Valid\n\nChecks if a value is valid based on the instance's values\n\nArguments:\nvalue {mixed} -- The value to validate\ninclude_name {bool} -- If true, Tree's name will be prepended to\nall error keys\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def position(self, partition):\n    if (not isinstance(partition, TopicPartition)):\n        raise TypeError('partition must be a TopicPartition namedtuple')\n    assert self._subscription.is_assigned(partition), 'Partition is not assigned'\n    offset = self._subscription.assignment[partition].position\n    if (offset is None):\n        self._update_fetch_positions([partition])\n        offset = self._subscription.assignment[partition].position\n    return offset", "docstring": "Get the offset of the next record that will be fetched\n\nArguments:\npartition (TopicPartition): Partition to check\n\nReturns:\nint: Offset", "source": "codesearchnet"}
{"code": "def merge(self, other):\n    for attr in self.attrs:\n        if (not (getattr(other, attr, None) is None)):\n            setattr(self, attr, getattr(other, attr))\n    if other.raw:\n        if (not self.raw):\n            self.raw = {}\n        self.raw.update(other.raw)", "docstring": "Copy properties from other into self, skipping ``None`` values.  Also merges the raw data.\n\nArgs:\nother (SkypeObj): second object to copy fields from", "source": "codesearchnet"}
{"code": "def GetOutputClass(cls, name):\n    \n    if not isinstance(name, py2to3.STRING_TYPES):\n      raise ValueError('Name attribute is not a string.')\n\n    name = name.lower()\n    if name not in cls._output_classes:\n      raise KeyError(\n          'Name: [{0:s}] not registered as an output module.'.format(name))\n\n    return cls._output_classes[name]", "docstring": "Retrieves the output class for a specific name.\n\nArgs:\nname (str): name of the output module.\n\nReturns:\ntype: output module class.\n\nRaises:\nKeyError: if there is no output class found with the supplied name.\nValueError: if name is not a string.", "source": "juraj-google-style"}
{"code": "def getexcfo(e):\n    \n    tb = sys.exc_info()[2]\n    tbinfo = traceback.extract_tb(tb)\n    path, line, name, src = '', '', '', None\n    if tbinfo:\n        path, line, name, sorc = tbinfo[-1]\n    retd = {\n        'msg': str(e),\n        'file': path,\n        'line': line,\n        'name': name,\n        'src': src\n    }\n\n    if isinstance(e, s_exc.SynErr):\n        retd['syn:err'] = e.errinfo\n\n    return (e.__class__.__name__, retd)", "docstring": "Get an err tufo from an exception.\n\nArgs:\ne (Exception): An Exception (or Exception subclass).\n\nNotes:\nThis can be called outside of the context of an exception handler,\nhowever details such as file, line, function name and source may be\nmissing.\n\nReturns:\n((str, dict)):", "source": "juraj-google-style"}
{"code": "def functional_from_config(cls, config, custom_objects=None):\n    created_layers = {}\n    unprocessed_nodes = {}\n\n    def add_unprocessed_node(layer, node_data):\n        \n        if layer not in unprocessed_nodes:\n            unprocessed_nodes[layer] = [node_data]\n        else:\n            unprocessed_nodes[layer].append(node_data)\n\n    def process_node(layer, node_data):\n        \n        args, kwargs = deserialize_node(node_data, created_layers)\n        layer(*args, **kwargs)\n\n    def process_layer(layer_data):\n        \n        layer_name = layer_data['name']\n        if 'module' not in layer_data:\n            layer = saving_utils.model_from_config(layer_data, custom_objects=custom_objects)\n        else:\n            layer = serialization_lib.deserialize_keras_object(layer_data, custom_objects=custom_objects)\n        if not isinstance(layer, Operation):\n            raise ValueError(f'Unexpected object from deserialization, expected a layer or operation, got a {type(layer)}')\n        created_layers[layer_name] = layer\n        inbound_nodes_data = layer_data['inbound_nodes']\n        for node_data in inbound_nodes_data:\n            add_unprocessed_node(layer, node_data)\n    functional_config = {}\n    for key in ['layers', 'input_layers', 'output_layers']:\n        functional_config[key] = config.pop(key)\n    for key in ['name', 'trainable']:\n        if key in config:\n            functional_config[key] = config.pop(key)\n        else:\n            functional_config[key] = None\n    for layer_data in functional_config['layers']:\n        process_layer(layer_data)\n    while unprocessed_nodes:\n        for layer_data in functional_config['layers']:\n            layer = created_layers[layer_data['name']]\n            if layer in unprocessed_nodes:\n                node_data_list = unprocessed_nodes[layer]\n                node_index = 0\n                while node_index < len(node_data_list):\n                    node_data = node_data_list[node_index]\n                    try:\n                        process_node(layer, node_data)\n                    except IndexError:\n                        break\n                    node_index += 1\n                if node_index < len(node_data_list):\n                    unprocessed_nodes[layer] = node_data_list[node_index:]\n                else:\n                    del unprocessed_nodes[layer]\n    name = functional_config['name']\n    trainable = functional_config['trainable']\n\n    def get_tensor(layer_name, node_index, tensor_index):\n        assert layer_name in created_layers\n        layer = created_layers[layer_name]\n        if isinstance(layer, Functional):\n            node_index -= 1\n        layer_output_tensors = layer._inbound_nodes[node_index].output_tensors\n        return layer_output_tensors[tensor_index]\n\n    def map_tensors(tensors):\n        if isinstance(tensors, list) and len(tensors) == 3 and isinstance(tensors[0], str):\n            return get_tensor(*tensors)\n        if isinstance(tensors, dict):\n            return {k: map_tensors(v) for k, v in tensors.items()}\n        if isinstance(tensors, tuple):\n            return tuple([map_tensors(v) for v in tensors])\n        return [map_tensors(v) for v in tensors]\n    input_tensors = map_tensors(functional_config['input_layers'])\n    output_tensors = map_tensors(functional_config['output_layers'])\n    return cls(inputs=input_tensors, outputs=output_tensors, name=name, trainable=trainable, **config)", "docstring": "Instantiates a Functional model from its config (from `get_config()`).\n\nArgs:\ncls: Class of the model, e.g. a custom subclass of `Model`.\nconfig: Output of `get_config()` for the original model instance.\ncustom_objects: Optional dict of custom objects.\n\nReturns:\nAn instance of `cls`.", "source": "github-repos"}
{"code": "def get_security_group_id(name='', env='', region=''):\n    \n    vpc_id = get_vpc_id(env, region)\n\n    LOG.info('Find %s sg in %s [%s] in %s', name, env, region, vpc_id)\n\n    url = '{0}/securityGroups/{1}/{2}/{3}?vpcId={4}'.format(API_URL, env, region, name, vpc_id)\n    response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n    assert response.ok\n\n    result = response.json()\n    try:\n        security_group_id = result['id']\n    except KeyError:\n        msg = 'Security group ({0}) not found'.format(name)\n        raise SpinnakerSecurityGroupError(msg)\n\n    LOG.info('Found: %s', security_group_id)\n    return security_group_id", "docstring": "Get a security group ID.\n\nArgs:\nname (str): Security Group name to find.\nenv (str): Deployment environment to search.\nregion (str): AWS Region to search.\n\nReturns:\nstr: ID of Security Group, e.g. sg-xxxx.\n\nRaises:\nAssertionError: Call to Gate API was not successful.\nSpinnakerSecurityGroupError: Security Group _name_ was not found for\n_env_ in _region_.", "source": "juraj-google-style"}
{"code": "def stage_redis(self, variable, data):\n        \n        if isinstance(data, int):\n            data = str(data)\n        \n        if variable.endswith('Binary'):\n            try:\n                data = base64.b64decode(data)\n            except binascii.Error:\n                msg = 'The Binary staging data for variable {} is not properly base64 encoded.'\n                msg = msg.format(variable)\n                sys.exit(msg)\n        elif variable.endswith('BinaryArray'):\n            if isinstance(data, string_types):\n                data = json.loads(data)\n\n            try:\n                \n                decoded_data = []\n                for d in data:\n                    d_decoded = base64.b64decode(d)\n                    decoded_data.append(d_decoded)\n                data = decoded_data\n            except binascii.Error:\n                msg = 'The BinaryArray staging data for variable {} is not properly base64 encoded.'\n                msg = msg.format(variable)\n                sys.exit(msg)\n        self.log.info(u'[stage] Creating variable {}'.format(variable))\n        self.tcex.playbook.create(variable, data)", "docstring": "Stage data in Redis.\n\nArgs:\nvariable (str): The Redis variable name.\ndata (dict|list|str): The data to store in Redis.", "source": "juraj-google-style"}
{"code": "def id_by_index(index, resources):\n        \n        if index < 0 or index >= len(resources):\n            return ''\n\n        try:\n            return resources[index].header_signature\n        except AttributeError:\n            return resources[index].address", "docstring": "Helper method to fetch the id or address of a resource by its index\n\nArgs:\nresources (list of objects): The resources to be paginated\nindex (integer): The index of the target resource\n\nReturns:\nstr: The address or header_signature of the resource,\nreturns an empty string if not found", "source": "juraj-google-style"}
{"code": "def __init__(self, base_fd, handlers, pathspec=None, progress_callback=None):\n    \n    del pathspec  \n    self.base_fd = base_fd\n    self.progress_callback = progress_callback\n    self._handlers = handlers\n    if base_fd is None:\n      self.pathspec = rdf_paths.PathSpec()\n    else:\n      \n      self.pathspec = base_fd.pathspec.Copy()\n    self.metadata = {}", "docstring": "Constructor.\n\nArgs:\nbase_fd: A handler to the predecessor handler.\nhandlers: A mapping from rdf_paths.PathSpec.PathType to classes\nimplementing VFSHandler.\npathspec: The pathspec to open.\nprogress_callback: A callback to indicate that the open call is still\nworking but needs more time.\n\nRaises:\nIOError: if this handler can not be instantiated over the\nrequested path.", "source": "juraj-google-style"}
{"code": "def _merge_input_ids_with_input_values(self, input_ids: Optional[torch.Tensor]=None, input_values: Optional[torch.Tensor]=None, input_values_cutoffs: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None) -> Optional[torch.Tensor]:\n    inputs_embeds = self.embed_text_tokens(input_ids)\n    if input_values is not None:\n        input_values_cutoffs = nn.functional.pad(input_values_cutoffs, (1, 0))\n        audio_lengths = input_values_cutoffs[input_values_cutoffs >= 0].diff()\n        audio_lengths = audio_lengths[audio_lengths > 0]\n        input_values_mask = torch.arange(input_values_cutoffs.max(), device=input_values.device).expand(len(audio_lengths), -1)\n        input_values_mask = input_values_mask < audio_lengths.unsqueeze(1)\n        with torch.no_grad():\n            audio_tokens_list = []\n            for batch_input_values, batch_input_values_cutoffs in zip(input_values, input_values_cutoffs):\n                batch_input_values_cutoffs = batch_input_values_cutoffs[batch_input_values_cutoffs >= 0]\n                for i in range(batch_input_values_cutoffs.shape[0] - 1):\n                    start_idx = batch_input_values_cutoffs[i]\n                    end_idx = batch_input_values_cutoffs[i + 1]\n                    audio_batch = batch_input_values[..., start_idx:end_idx]\n                    codec_outputs = self.codec_model.encode(audio_batch.unsqueeze(0))\n                    codebook_ids = codec_outputs.audio_codes.transpose(1, -1)\n                    audio_tokens_list.append(codebook_ids[0])\n            max_audio_frames = max((el.shape[0] for el in audio_tokens_list))\n            batched_audio_token_ids = torch.stack([nn.functional.pad(el, (0, 0, 0, max_audio_frames - el.shape[0])) for el in audio_tokens_list])\n            audio_codes_mask = self.codec_model.get_audio_codes_mask(input_values_mask)\n        audio_token_id = self.config.audio_token_id\n        audio_token_mask = input_ids == audio_token_id\n        audio_embeds = self.backbone_model.embed_tokens(batched_audio_token_ids)\n        inputs_embeds[audio_token_mask] = audio_embeds[audio_codes_mask]\n        audio_eos_frame_ids = torch.ones((1, 1, self.config.num_codebooks), device=input_ids.device, dtype=torch.long) * self.config.codebook_eos_token_id\n        audio_eos_embeds = self.backbone_model.embed_tokens(audio_eos_frame_ids).squeeze(1)\n        audio_eos_token_mask = input_ids == self.config.audio_eos_token_id\n        inputs_embeds[audio_eos_token_mask] = audio_eos_embeds.repeat(audio_eos_token_mask.sum(), 1)\n        if labels is not None:\n            labels_expanded = labels.unsqueeze(-1).repeat(1, 1, self.config.num_codebooks)\n            labels_expanded[audio_token_mask] = batched_audio_token_ids[audio_codes_mask]\n            labels_expanded[audio_eos_token_mask] = audio_eos_frame_ids\n            depth_decoder_ignore_frames_idxs = (labels == -101).nonzero(as_tuple=True)\n            labels_expanded[depth_decoder_ignore_frames_idxs[0], depth_decoder_ignore_frames_idxs[1], 1:] = -100\n            labels = labels_expanded\n    return {'inputs_embeds': inputs_embeds, 'labels': labels}", "docstring": "Merges the input_ids and input_values to produce a single inputs_embeds tensor:\n1 - Infers the codec model on the input_values to retreive codebook token.\n2 - Embeds codebook tokens and places them at the correct positions in the inputs_embeds tensor.\n3 - If labels are provided, expands them to match codebook dimensions and position the target codebook tokens in the inputs_embeds tensor.\n\nArgs:\ninput_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`):\nThe input ids to embed.\ninput_values (`torch.Tensor` of shape `(batch_size, channels, audio_sequence_length)`):\nThe audio input values to embed.\ninput_values_cutoffs (`torch.Tensor` of shape `(batch_size, max_num_audio)`):\nThe cutoffs of the audio input values relative to its batch index, padded with -1 when no audio.", "source": "github-repos"}
{"code": "def GetDataStream(self, name, case_sensitive=True):\n    if (not isinstance(name, py2to3.STRING_TYPES)):\n        raise ValueError('Name is not a string.')\n    name_lower = name.lower()\n    matching_data_stream = None\n    for data_stream in self._GetDataStreams():\n        if (data_stream.name == name):\n            return data_stream\n        if ((not case_sensitive) and (data_stream.name.lower() == name_lower)):\n            if (not matching_data_stream):\n                matching_data_stream = data_stream\n    return matching_data_stream", "docstring": "Retrieves a data stream by name.\n\nArgs:\nname (str): name of the data stream.\ncase_sensitive (Optional[bool]): True if the name is case sensitive.\n\nReturns:\nDataStream: a data stream or None if not available.\n\nRaises:\nValueError: if the name is not string.", "source": "codesearchnet"}
{"code": "def get(self):\n    return dict(interfaces=self.interfaces.getall(), instances=self.instances.getall())", "docstring": "Returns the spanning-tree configuration as a dict object\n\nThe dictionary object represents the entire spanning-tree\nconfiguration derived from the nodes running config.  This\nincludes both globally configuration attributes as well as\ninterfaces and instances.  See the StpInterfaces and StpInstances\nclasses for the key/value pair definitions.\n\nNote:\nSee the individual classes for detailed message structures\n\nReturns:\nA Python dictionary object of key/value pairs the represent\nthe entire supported spanning-tree configuration::\n\n{\n\"mode\": [mstp, none],\n\"interfaces\": {...},\n\"instances\": {...}\n}", "source": "codesearchnet"}
{"code": "def AddProcessingOptions(self, argument_group):\n    \n    argument_group.add_argument(\n        '--single_process', '--single-process', dest='single_process',\n        action='store_true', default=False, help=(\n            'Indicate that the tool should run in a single process.'))\n\n    argument_helper_names = ['temporary_directory', 'workers', 'zeromq']\n    if self._CanEnforceProcessMemoryLimit():\n      argument_helper_names.append('process_resources')\n    helpers_manager.ArgumentHelperManager.AddCommandLineArguments(\n        argument_group, names=argument_helper_names)", "docstring": "Adds the processing options to the argument group.\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "juraj-google-style"}
{"code": "def indent_xml(elem, level=0, more_sibs=False):\n    i = '\\n'\n    pad = '    '\n    if level:\n        i += ((level - 1) * pad)\n    num_kids = len(elem)\n    if num_kids:\n        if ((not elem.text) or (not elem.text.strip())):\n            elem.text = (i + pad)\n            if level:\n                elem.text += pad\n        count = 0\n        for kid in elem:\n            if (kid.tag == 'data'):\n                kid.text = '*DATA*'\n            indent_xml(kid, (level + 1), (count < (num_kids - 1)))\n            count += 1\n        if ((not elem.tail) or (not elem.tail.strip())):\n            elem.tail = i\n            if more_sibs:\n                elem.tail += pad\n    elif (level and ((not elem.tail) or (not elem.tail.strip()))):\n        elem.tail = i\n        if more_sibs:\n            elem.tail += pad", "docstring": "Indent an xml element object to prepare for pretty printing.\n\nTo avoid changing the contents of the original Element, it is\nrecommended that a copy is made to send to this function.\n\nArgs:\nelem: Element to indent.\nlevel: Int indent level (default is 0)\nmore_sibs: Bool, whether to anticipate further siblings.", "source": "codesearchnet"}
{"code": "def AddNewSpecification(self, identifier):\n    \n    if identifier in self._format_specifications:\n      raise KeyError(\n          'Format specification {0:s} is already defined in store.'.format(\n              identifier))\n\n    self._format_specifications[identifier] = FormatSpecification(identifier)\n\n    return self._format_specifications[identifier]", "docstring": "Adds a new format specification.\n\nArgs:\nidentifier (str): format identifier, which should be unique for the store.\n\nReturns:\nFormatSpecification: format specification.\n\nRaises:\nKeyError: if the store already contains a specification with\nthe same identifier.", "source": "juraj-google-style"}
{"code": "def createGroup(self, group, vendorSpecific=None):\n        \n        response = self.createGroupResponse(group, vendorSpecific)\n        return self._read_boolean_response(response)", "docstring": "See Also: createGroupResponse()\n\nArgs:\ngroup:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def __init__(self, pyregf_key, key_path=''):\n    \n    super(REGFWinRegistryKey, self).__init__(key_path=key_path)\n    self._pyregf_key = pyregf_key", "docstring": "Initializes a Windows Registry key object.\n\nArgs:\npyregf_key (pyregf.key): pyregf key object.\nkey_path (Optional[str]): Windows Registry key path.", "source": "juraj-google-style"}
{"code": "class TFBaseModelOutputWithPoolingAndNoAttention(ModelOutput):\n    last_hidden_state: Optional[tf.Tensor] = None\n    pooler_output: Optional[tf.Tensor] = None\n    hidden_states: Optional[Tuple[tf.Tensor, ...]] = None", "docstring": "Base class for model's outputs that also contains a pooling of the last hidden states.\n\nArgs:\nlast_hidden_state (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\nSequence of hidden-states at the output of the last layer of the model.\npooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`):\nLast layer hidden-state after a pooling operation on the spatial dimensions.\nhidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\nTuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for\nthe output of each layer) of shape `(batch_size, num_channels, height, width)`.\n\nHidden-states of the model at the output of each layer plus the optional initial embedding outputs.", "source": "github-repos"}
{"code": "def run_step(context):\n    logger.debug('started')\n    context.assert_key_has_value(key='contextClear', caller=__name__)\n    for k in context['contextClear']:\n        logger.debug(f'removing {k} from context')\n        context.pop(k, None)\n        logger.info(f'removed {k} from context')\n    logger.debug('done')", "docstring": "Remove specified keys from context.\n\nArgs:\nContext is a dictionary or dictionary-like.\ncontext['contextClear'] must exist. It's a dictionary.\nWill iterate context['contextClear'] and remove those keys from\ncontext.\n\nFor example, say input context is:\nkey1: value1\nkey2: value2\nkey3: value3\nkey4: value4\ncontextClear:\n- key2\n- key4\n- contextClear\n\nThis will result in return context:\nkey1: value1\nkey3: value3", "source": "codesearchnet"}
{"code": "def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs):\n    for (root, key, datetime_value) in interface.RecurseKey(top_level):\n        if (not isinstance(datetime_value, datetime.datetime)):\n            continue\n        event_data = plist_event.PlistTimeEventData()\n        event_data.key = key\n        event_data.root = root\n        event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Simple method to exact date values from a Plist.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ntop_level (dict[str, object]): plist top-level key.", "source": "codesearchnet"}
{"code": "def CreateWithLock(self, urn, aff4_type, token=None, age=NEWEST_TIME, force_new_version=True, blocking=True, blocking_lock_timeout=10, blocking_sleep_interval=1, lease_time=100):\n    if (not data_store.AFF4Enabled()):\n        raise NotImplementedError('AFF4 data store has been disabled.')\n    transaction = self._AcquireLock(urn, blocking=blocking, blocking_lock_timeout=blocking_lock_timeout, blocking_sleep_interval=blocking_sleep_interval, lease_time=lease_time)\n    return self.Create(urn, aff4_type, mode='rw', token=token, age=age, force_new_version=force_new_version, transaction=transaction)", "docstring": "Creates a new object and locks it.\n\nSimilar to OpenWithLock below, this creates a locked object. The difference\nis that when you call CreateWithLock, the object does not yet have to exist\nin the data store.\n\nArgs:\nurn: The object to create.\naff4_type: The desired type for this object.\ntoken: The Security Token to use for opening this item.\nage: The age policy used to build this object. Only makes sense when mode\nhas \"r\".\nforce_new_version: Forces the creation of a new object in the data_store.\nblocking: When True, wait and repeatedly try to grab the lock.\nblocking_lock_timeout: Maximum wait time when sync is True.\nblocking_sleep_interval: Sleep time between lock grabbing attempts. Used\nwhen blocking is True.\nlease_time: Maximum time the object stays locked. Lock will be considered\nreleased when this time expires.\n\nReturns:\nAn AFF4 object of the desired type and mode.\n\nRaises:\nAttributeError: If the mode is invalid.", "source": "codesearchnet"}
{"code": "def from_config(cls, config):\n    \n    config = config.copy()\n    function_keys = [\n        'kernel_posterior_fn',\n        'kernel_posterior_tensor_fn',\n        'kernel_prior_fn',\n        'kernel_divergence_fn',\n        'bias_posterior_fn',\n        'bias_posterior_tensor_fn',\n        'bias_prior_fn',\n        'bias_divergence_fn',\n    ]\n    for function_key in function_keys:\n      serial = config[function_key]\n      function_type = config.pop(function_key + '_type')\n      if serial is not None:\n        config[function_key] = tfp_layers_util.deserialize_function(\n            serial,\n            function_type=function_type)\n    return cls(**config)", "docstring": "Creates a layer from its config.\n\nThis method is the reverse of `get_config`, capable of instantiating the\nsame layer from the config dictionary.\n\nArgs:\nconfig: A Python dictionary, typically the output of `get_config`.\n\nReturns:\nlayer: A layer instance.", "source": "juraj-google-style"}
{"code": "def HandleNetworkInterfaces(self, result):\n    network_interfaces = self._ExtractInterfaceMetadata(result)\n    if self.network_setup_enabled:\n        self.network_setup.EnableNetworkInterfaces([interface.name for interface in network_interfaces[1:]])\n    for interface in network_interfaces:\n        if self.ip_forwarding_enabled:\n            self.ip_forwarding.HandleForwardedIps(interface.name, interface.forwarded_ips, interface.ip)", "docstring": "Called when network interface metadata changes.\n\nArgs:\nresult: dict, the metadata response with the network interfaces.", "source": "codesearchnet"}
{"code": "def scatter_max(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta)\n    return gen_state_ops.scatter_max(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)", "docstring": "Updates this variable with the max of `tf.IndexedSlices` and itself.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to use as an argument of max with this\nvariable.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nA `Tensor` that will hold the new value of this variable after\nthe scattered maximization has completed.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"}
{"code": "def join(self, other, *args, **kwarg):\n    event = Event(*args, **kwarg)\n    if self.intersects(other):\n        if self.starts_within(other):\n            event.begin = other.begin\n        else:\n            event.begin = self.begin\n        if self.ends_within(other):\n            event.end = other.end\n        else:\n            event.end = self.end\n        return event\n    raise ValueError(\"Cannot join {} with {}: they don't intersect.\".format(self, other))", "docstring": "Create a new event which covers the time range of two intersecting events\n\nAll extra parameters are passed to the Event constructor.\n\nArgs:\nother: the other event\n\nReturns:\na new Event instance", "source": "codesearchnet"}
{"code": "def apply_func_to_select_indices(self, axis, func, indices, keep_remaining=False):\n    if (self.partitions.size == 0):\n        return np.array([[]])\n    if isinstance(indices, dict):\n        dict_indices = indices\n        indices = list(indices.keys())\n    else:\n        dict_indices = None\n    if (not isinstance(indices, list)):\n        indices = [indices]\n    partitions_dict = self._get_dict_of_block_index(axis, indices, ordered=(not keep_remaining))\n    if (not axis):\n        partitions_for_apply = self.partitions.T\n    else:\n        partitions_for_apply = self.partitions\n    if (dict_indices is not None):\n\n        def local_to_global_idx(partition_id, local_idx):\n            if (partition_id == 0):\n                return local_idx\n            if (axis == 0):\n                cumulative_axis = np.cumsum(self.block_widths)\n            else:\n                cumulative_axis = np.cumsum(self.block_lengths)\n            return (cumulative_axis[(partition_id - 1)] + local_idx)\n        if (not keep_remaining):\n            result = np.array([self._apply_func_to_list_of_partitions(func, partitions_for_apply[o_idx], func_dict={i_idx: dict_indices[local_to_global_idx(o_idx, i_idx)] for i_idx in list_to_apply if (i_idx >= 0)}) for (o_idx, list_to_apply) in partitions_dict])\n        else:\n            result = np.array([(partitions_for_apply[i] if (i not in partitions_dict) else self._apply_func_to_list_of_partitions(func, partitions_for_apply[i], func_dict={idx: dict_indices[local_to_global_idx(i, idx)] for idx in partitions_dict[i] if (idx >= 0)})) for i in range(len(partitions_for_apply))])\n    elif (not keep_remaining):\n        result = np.array([self._apply_func_to_list_of_partitions(func, partitions_for_apply[idx], internal_indices=list_to_apply) for (idx, list_to_apply) in partitions_dict])\n    else:\n        result = np.array([(partitions_for_apply[i] if (i not in partitions_dict) else self._apply_func_to_list_of_partitions(func, partitions_for_apply[i], internal_indices=partitions_dict[i])) for i in range(len(partitions_for_apply))])\n    return (self.__constructor__(result.T) if (not axis) else self.__constructor__(result))", "docstring": "Applies a function to select indices.\n\nNote: Your internal function must take a kwarg `internal_indices` for\nthis to work correctly. This prevents information leakage of the\ninternal index to the external representation.\n\nArgs:\naxis: The axis to apply the func over.\nfunc: The function to apply to these indices.\nindices: The indices to apply the function to.\nkeep_remaining: Whether or not to keep the other partitions.\nSome operations may want to drop the remaining partitions and\nkeep only the results.\n\nReturns:\nA new BaseFrameManager object, the type of object that called this.", "source": "codesearchnet"}
{"code": "def bucket(self, experiment, user_id, bucketing_id):\n    if (not experiment):\n        return None\n    if (experiment.groupPolicy in GROUP_POLICIES):\n        group = self.config.get_group(experiment.groupId)\n        if (not group):\n            return None\n        user_experiment_id = self.find_bucket(bucketing_id, experiment.groupId, group.trafficAllocation)\n        if (not user_experiment_id):\n            self.config.logger.info(('User \"%s\" is in no experiment.' % user_id))\n            return None\n        if (user_experiment_id != experiment.id):\n            self.config.logger.info(('User \"%s\" is not in experiment \"%s\" of group %s.' % (user_id, experiment.key, experiment.groupId)))\n            return None\n        self.config.logger.info(('User \"%s\" is in experiment %s of group %s.' % (user_id, experiment.key, experiment.groupId)))\n    variation_id = self.find_bucket(bucketing_id, experiment.id, experiment.trafficAllocation)\n    if variation_id:\n        variation = self.config.get_variation_from_id(experiment.key, variation_id)\n        self.config.logger.info(('User \"%s\" is in variation \"%s\" of experiment %s.' % (user_id, variation.key, experiment.key)))\n        return variation\n    self.config.logger.info(('User \"%s\" is in no variation.' % user_id))\n    return None", "docstring": "For a given experiment and bucketing ID determines variation to be shown to user.\n\nArgs:\nexperiment: Object representing the experiment for which user is to be bucketed.\nuser_id: ID for user.\nbucketing_id: ID to be used for bucketing the user.\n\nReturns:\nVariation in which user with ID user_id will be put in. None if no variation.", "source": "codesearchnet"}
{"code": "def from_np_datetimes(np_datetimes):\n    ordinals = tf.constant(np_datetimes, dtype=tf.int32) + _ORDINAL_OF_1_1_1970\n    return from_ordinals(ordinals, validate=False)", "docstring": "Creates DateTensor from a Numpy array of dtype datetime64.\n\nArgs:\nnp_datetimes: Numpy array of dtype datetime64.\n\nReturns:\nDateTensor object.\n\n#### Example\n\n```python\nimport datetime\nimport numpy as np\n\ndate_tensor_np = np.array(\n[[datetime.date(2019, 3, 25), datetime.date(2020, 6, 2)],\n[datetime.date(2020, 9, 15), datetime.date(2020, 12, 27)]],\ndtype=np.datetime64)\n\ndate_tensor = tff.datetime.dates_from_np_datetimes(date_tensor_np)\n```", "source": "github-repos"}
{"code": "def read_from_hdx(identifier, configuration=None):\n        \n        \n\n        showcase = Showcase(configuration=configuration)\n        result = showcase._load_from_hdx('showcase', identifier)\n        if result:\n            return showcase\n        return None", "docstring": "Reads the showcase given by identifier from HDX and returns Showcase object\n\nArgs:\nidentifier (str): Identifier of showcase\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nOptional[Showcase]: Showcase object if successful read, None if not", "source": "juraj-google-style"}
{"code": "def fit(self, **kwargs):\n    if (self.fit_method is not None):\n        fit_args = self._fit_params.copy()\n        fit_args.update(kwargs)\n        getattr(self.instance, self.fit_method)(**fit_args)", "docstring": "Call the fit method of the primitive.\n\nThe given keyword arguments will be passed directly to the `fit`\nmethod of the primitive instance specified in the JSON annotation.\n\nIf any of the arguments expected by the produce method had been\ngiven during the MLBlock initialization, they will be passed as well.\n\nIf the fit method was not specified in the JSON annotation, or if\nthe primitive is a simple function, this will be a noop.\n\nArgs:\n**kwargs: Any given keyword argument will be directly passed\nto the primitive fit method.\n\nRaises:\nTypeError: A `TypeError` might be raised if any argument not\nexpected by the primitive fit method is given.", "source": "codesearchnet"}
{"code": "def _get_candidates(self):\n    candidates = np.where((self.dpp_vector == 0))\n    return (None if (len(candidates[0]) == 0) else candidates[0])", "docstring": "Finds the pipelines that are not yet tried.\n\nReturns:\nnp.array: Indices corresponding to columns in ``dpp_matrix`` that haven't been tried on\n``X``. ``None`` if all pipelines have been tried on X.", "source": "codesearchnet"}
{"code": "def bounter(size_mb=None, need_iteration=True, need_counts=True, log_counting=None):\n    if (not need_counts):\n        return CardinalityEstimator()\n    if (size_mb is None):\n        raise ValueError('Max size in MB must be provided.')\n    if need_iteration:\n        if log_counting:\n            raise ValueError('Log counting is only supported with CMS implementation (need_iteration=False).')\n        return HashTable(size_mb=size_mb)\n    else:\n        return CountMinSketch(size_mb=size_mb, log_counting=log_counting)", "docstring": "Factory method for bounter implementation.\n\nArgs:\nsize_mb (int): Desired memory footprint of the counter.\nneed_iteration (Bool): With `True`, create a `HashTable` implementation which can\niterate over inserted key/value pairs.\nWith `False`, create a `CountMinSketch` implementation which performs better in limited-memory scenarios,\nbut does not support iteration over elements.\nneed_counts (Bool): With `True`, construct the structure normally. With `False`, ignore all remaining\nparameters and create a minimalistic cardinality counter based on hyperloglog which only takes 64KB memory.\nlog_counting (int): Counting to use with `CountMinSketch` implementation. Accepted values are\n`None` (default counting with 32-bit integers), 1024 (16-bit), 8 (8-bit).\nSee `CountMinSketch` documentation for details.\nRaise ValueError if not `None `and `need_iteration` is `True`.", "source": "codesearchnet"}
{"code": "def mouse_event_callback(self, window, xpos, ypos):\n        \n        \n        self.example.mouse_position_event(xpos, ypos)", "docstring": "Mouse event callback from glfw.\nTranslates the events forwarding them to :py:func:`cursor_event`.\n\nArgs:\nwindow: The window\nxpos: viewport x pos\nypos: viewport y pos", "source": "juraj-google-style"}
{"code": "def _modeIsValid(self, mode):\n    try:\n        return (mode in self.modes.keys())\n    except AttributeError as e:\n        if (mode in self.isValidMode.keys()):\n            if (mode in self.isValidMode.keys()):\n                return True\n    return False", "docstring": "Verification of whether the mode is a correct option to be used.\n\nArgs:\n-----\nmode: Mode to be executed.\n\nReturn:\n-------\nTrue if the mode exists in the three main folders.", "source": "codesearchnet"}
{"code": "def _build_url_filters(cls, session: AppSession):\n    args = session.args\n    filters = [(HTTPSOnlyFilter() if args.https_only else SchemeFilter()), RecursiveFilter(enabled=args.recursive, page_requisites=args.page_requisites), FollowFTPFilter(follow=args.follow_ftp)]\n    if args.no_parent:\n        filters.append(ParentFilter())\n    if (args.domains or args.exclude_domains):\n        filters.append(BackwardDomainFilter(args.domains, args.exclude_domains))\n    if (args.hostnames or args.exclude_hostnames):\n        filters.append(HostnameFilter(args.hostnames, args.exclude_hostnames))\n    if args.tries:\n        filters.append(TriesFilter(args.tries))\n    if ((args.level and args.recursive) or args.page_requisites_level):\n        filters.append(LevelFilter(args.level, inline_max_depth=args.page_requisites_level))\n    if (args.accept_regex or args.reject_regex):\n        filters.append(RegexFilter(args.accept_regex, args.reject_regex))\n    if (args.include_directories or args.exclude_directories):\n        filters.append(DirectoryFilter(args.include_directories, args.exclude_directories))\n    if (args.accept or args.reject):\n        filters.append(BackwardFilenameFilter(args.accept, args.reject))\n    return filters", "docstring": "Create the URL filter instances.\n\nReturns:\nA list of URL filter instances", "source": "codesearchnet"}
{"code": "def CheckAddressState(self, script_hash):\n        \n        for key, contract in self._contracts.items():\n            if contract.ScriptHash.ToBytes() == script_hash.ToBytes():\n                return AddressState.InWallet\n        for watch in self._watch_only:\n            if watch == script_hash:\n                return AddressState.InWallet | AddressState.WatchOnly\n        return AddressState.NoState", "docstring": "Determine the address state of the provided script hash.\n\nArgs:\nscript_hash (UInt160): a script hash to determine the address state of.\n\nReturns:\nAddressState: the address state.", "source": "juraj-google-style"}
{"code": "def convert(self):\n    saved_model_convert_result = self._convert_as_saved_model()\n    if saved_model_convert_result:\n        return saved_model_convert_result\n    return super(TFLiteKerasModelConverter, self).convert()", "docstring": "Converts a Keras model based on instance variables.\n\nReturns:\nThe converted data in serialized format, either a TFLite Flatbuffer or\na Graphviz graph depending on value in `output_format`.\n\nRaises:\nValueError:\nInput shape is not specified.\nNone value for dimension in input_tensor.", "source": "github-repos"}
{"code": "def clean_registration_ids(self, registration_ids=[]):\n        \n        valid_registration_ids = []\n        for registration_id in registration_ids:\n            details = self.registration_info_request(registration_id)\n            if details.status_code == 200:\n                valid_registration_ids.append(registration_id)\n        return valid_registration_ids", "docstring": "Checks registration ids and excludes inactive ids\n\nArgs:\nregistration_ids (list, optional): list of ids to be cleaned\n\nReturns:\nlist: cleaned registration ids", "source": "juraj-google-style"}
{"code": "def GetKeyByPath(self, key_path):\n    \n    key_path_upper = key_path.upper()\n    if key_path_upper.startswith(self._key_path_prefix_upper):\n      relative_key_path = key_path[self._key_path_prefix_length:]\n    elif key_path.startswith(definitions.KEY_PATH_SEPARATOR):\n      relative_key_path = key_path\n      key_path = ''.join([self._key_path_prefix, key_path])\n    else:\n      return None\n\n    try:\n      regf_key = self._regf_file.get_key_by_path(relative_key_path)\n    except IOError:\n      regf_key = None\n    if not regf_key:\n      return None\n\n    return REGFWinRegistryKey(regf_key, key_path=key_path)", "docstring": "Retrieves the key for a specific path.\n\nArgs:\nkey_path (str): Windows Registry key path.\n\nReturns:\nWinRegistryKey: Registry key or None if not available.", "source": "juraj-google-style"}
{"code": "def _AddsAnalysisProcessStatusTableRow(self, process_status, table_view):\n    used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory)\n    events = ''\n    if ((process_status.number_of_consumed_events is not None) and (process_status.number_of_consumed_events_delta is not None)):\n        events = '{0:d} ({1:d})'.format(process_status.number_of_consumed_events, process_status.number_of_consumed_events_delta)\n    event_tags = ''\n    if ((process_status.number_of_produced_event_tags is not None) and (process_status.number_of_produced_event_tags_delta is not None)):\n        event_tags = '{0:d} ({1:d})'.format(process_status.number_of_produced_event_tags, process_status.number_of_produced_event_tags_delta)\n    reports = ''\n    if ((process_status.number_of_produced_reports is not None) and (process_status.number_of_produced_reports_delta is not None)):\n        reports = '{0:d} ({1:d})'.format(process_status.number_of_produced_reports, process_status.number_of_produced_reports_delta)\n    table_view.AddRow([process_status.identifier, process_status.pid, process_status.status, used_memory, events, event_tags, reports])", "docstring": "Adds an analysis process status table row.\n\nArgs:\nprocess_status (ProcessStatus): processing status.\ntable_view (CLITabularTableView): table view.", "source": "codesearchnet"}
{"code": "def crop_image_to_patches(self, images: 'torch.Tensor', min_patches: int, max_patches: int, use_thumbnail: bool=True, patch_size: Optional[Union[Tuple, int, dict]]=None, interpolation: Optional['F.InterpolationMode']=None):\n    patch_size_height, patch_size_width = (patch_size.height, patch_size.width)\n    original_height, original_width = images.shape[-2:]\n    num_columns, num_rows = get_optimal_tiled_canvas((original_height, original_width), (patch_size_height, patch_size_width), min_patches, max_patches)\n    target_width = patch_size_width * num_columns\n    target_height = patch_size_height * num_rows\n    num_blocks = num_columns * num_rows\n    resized_image = self.resize(images, SizeDict(height=target_height, width=target_width), interpolation=interpolation)\n    processed_images = []\n    for i in range(num_blocks):\n        column = i % num_columns\n        row = i \n        box = (column * patch_size_width, row * patch_size_height, (column + 1) * patch_size_width, (row + 1) * patch_size_height)\n        patch_image = resized_image[..., box[1]:box[3], box[0]:box[2]]\n        processed_images.append(patch_image)\n    if use_thumbnail and len(processed_images) != 1:\n        thumbnail_img = self.resize(images, patch_size, interpolation=interpolation)\n        processed_images.append(thumbnail_img)\n    processed_images = torch.stack(processed_images, dim=0).transpose(0, 1).contiguous()\n    return processed_images", "docstring": "Crop the images to patches and return a list of cropped images.\nThe number of patches and their grid arrangement are determined by the original image size,\nthe target patch size and the minimum and maximum number of patches.\nThe aspect ratio of the patches grid is chosen to be the closest to the original image aspect ratio.\n\nArgs:\nimages (`torch.Tensor`):\nThe images to be cropped.\nmin_patches (`int`):\nThe minimum number of patches to be extracted from the image.\nmax_patches (`int`):\nThe maximum number of patches to be extracted from the image.\nuse_thumbnail (`bool`, *optional*, defaults to `True`):\nWhether to add a thumbnail image to the list of cropped patches.\npatch_size (`int`, `Tuple[int, int]`, `dict`, *optional*):\nThe size of the output patches.\nThe format of the image data. If `None`, the format is inferred from the input image.\n\nReturns:\nList[`PIL.Image.Image`] or List[np.ndarray]: The list of cropped images.", "source": "github-repos"}
{"code": "def get_parameter_bounds(self, include_frozen=False):\n        \n        if include_frozen:\n            return self.parameter_bounds\n        return list(p\n                    for p, f in zip(self.parameter_bounds, self.unfrozen_mask)\n                    if f)", "docstring": "Get a list of the parameter bounds\n\nArgs:\ninclude_frozen (Optional[bool]): Should the frozen parameters be\nincluded in the returned value? (default: ``False``)", "source": "juraj-google-style"}
{"code": "def do_hook_actions(self, actions, hook_type):\n    logger.log_debug('call {} hook actions.'.format(hook_type))\n    for action in actions:\n        if (isinstance(action, dict) and (len(action) == 1)):\n            (var_name, hook_content) = list(action.items())[0]\n            hook_content_eval = self.session_context.eval_content(hook_content)\n            logger.log_debug('assignment with hook: {} = {} => {}'.format(var_name, hook_content, hook_content_eval))\n            self.session_context.update_test_variables(var_name, hook_content_eval)\n        else:\n            logger.log_debug('call hook function: {}'.format(action))\n            self.session_context.eval_content(action)", "docstring": "call hook actions.\n\nArgs:\nactions (list): each action in actions list maybe in two format.\n\nformat1 (dict): assignment, the value returned by hook function will be assigned to variable.\n{\"var\": \"${func()}\"}\nformat2 (str): only call hook functions.\n${func()}\n\nhook_type (enum): setup/teardown", "source": "codesearchnet"}
{"code": "def __dir__() -> list[str]:\n    return ['__all__', 'LAZY_MODULES', 'print_current_imports']", "docstring": "`lazy_imports` public API.\n\nBecause `globals()` contains hundreds of symbols, we overwrite `dir(module)`\nto avoid poluting the namespace during auto-completion.\n\nReturns:\npublic symbols", "source": "github-repos"}
{"code": "def reqs(amend: bool=False, stage: bool=False):\n    changed_files = CTX.repo.changed_files()\n    if (('requirements.txt' in changed_files) or ('requirements-dev.txt' in changed_files)):\n        LOGGER.error('Requirements have changed; cannot update them')\n        sys.exit((- 1))\n    _write_reqs(amend, stage)", "docstring": "Write requirements files\n\nArgs:\namend: amend last commit with changes\nstage: stage changes", "source": "codesearchnet"}
{"code": "def _transform_col(self, x, i):\n    labels = self.label_encoder._transform_col(x, i)\n    label_max = self.label_encoder.label_maxes[i]\n    index = np.array(range(len(labels)))\n    i = index[(labels > 0)]\n    j = (labels[(labels > 0)] - 1)\n    if (len(i) > 0):\n        return sparse.coo_matrix((np.ones_like(i), (i, j)), shape=(x.shape[0], label_max))\n    else:\n        return None", "docstring": "Encode one categorical column into sparse matrix with one-hot-encoding.\n\nArgs:\nx (pandas.Series): a categorical column to encode\ni (int): column index\n\nReturns:\nX (scipy.sparse.coo_matrix): sparse matrix encoding a categorical\nvariable into dummy variables", "source": "codesearchnet"}
{"code": "def _gen_sentence(self, assetid_body_tuple):\n    (asset_id, body) = assetid_body_tuple\n    text = self._process(body)\n    sentence = LabeledSentence(text, labels=[('DOC_%s' % str(asset_id))])\n    return sentence", "docstring": "Takes an assetid_body_tuple and returns a Doc2Vec LabeledSentence\n\nArgs:\nassetid_body_tuple (tuple): (assetid, bodytext) pair", "source": "codesearchnet"}
{"code": "def use_spec(self, spec: DNASpec) -> 'DNA':\n    if not isinstance(spec, DNASpec):\n        raise ValueError(f\"Argument 'spec' must be a `pg.DNASpec` object. Encountered: {spec!r}.\")\n    if self._spec is spec:\n        return self\n\n    def _use_spec_for_child_choices(spec: DNASpec, children: List[DNA]):\n        \n        assert spec.is_categorical, spec\n        if spec.num_choices != len(children):\n            raise ValueError(f'Number of choices ({spec.num_choices}) does not match with the number of child values (len(children)). Spec: {spec!r}, Children: {children!r}.')\n        for i, child in enumerate(children):\n            subchoice = spec.subchoice(i)\n            child.use_spec(subchoice)\n        child_values = [c.value for c in children]\n        if spec.sorted and sorted(child_values) != child_values:\n            raise ValueError(f'Child values {child_values!r} are not sorted. Spec: {spec!r}.')\n        if spec.distinct and len(set(child_values)) != len(child_values):\n            raise ValueError(f'Child values {child_values!r} are not distinct. Spec: {spec!r}.')\n    while spec.is_space and len(spec.elements) == 1:\n        spec = spec.elements[0]\n    if spec.is_space:\n        if self.value is not None:\n            raise ValueError(f'DNA value type mismatch. Value: {self.value}, Spec: {spec!r}.')\n        if len(spec.elements) != len(self.children):\n            raise ValueError(f'Length of DNA child values ({len(self.children)}) is different from the number of elements ({len(spec.elements)}) in Spec: {spec!r}.')\n        for i, elem_spec in enumerate(spec.elements):\n            self.children[i].use_spec(elem_spec)\n    elif spec.is_categorical:\n        if spec.num_choices == 1:\n            if not isinstance(self.value, int):\n                raise ValueError(f'DNA value type mismatch. Value: {self.value}, Spec: {spec!r}.')\n            if self.value >= len(spec.candidates):\n                raise ValueError(f'Value of DNA is out of range according to the DNA spec. Value: {self.value}, Spec: {spec!r}.')\n            chosen_candidate = spec.candidates[self.value]\n            assert chosen_candidate.is_space, chosen_candidate\n            if not chosen_candidate.elements and self.children:\n                raise ValueError(f'There is no DNA spec for child DNA values. Child values: {self.children}.')\n            if len(chosen_candidate.elements) > 1:\n                if len(chosen_candidate.elements) != len(self.children):\n                    raise ValueError(f'Number of elements in child templates ({len(chosen_candidate.elements)}) does not match with the length of children ({len(self.children)}) from DNA: {self!r}, Spec: {chosen_candidate}.')\n                for i, elem_spec in enumerate(chosen_candidate.elements):\n                    self.children[i].use_spec(elem_spec)\n            elif len(chosen_candidate.elements) == 1:\n                sub_spec = chosen_candidate\n                while sub_spec.is_space and len(sub_spec.elements) == 1:\n                    sub_spec = sub_spec.elements[0]\n                if sub_spec.is_numerical or sub_spec.is_custom_decision_point:\n                    if len(self.children) != 1:\n                        raise ValueError(f'Encountered more than 1 value.Child value: {self.children}, Spec: {sub_spec}.')\n                    self.children[0].use_spec(sub_spec)\n                else:\n                    assert sub_spec.is_categorical, sub_spec\n                    _use_spec_for_child_choices(sub_spec, self.children)\n        else:\n            if self.value is not None:\n                raise ValueError(f'Cannot apply multi-choice DNA spec on value {self.value}: {spec!r}.')\n            _use_spec_for_child_choices(spec, self.children)\n    elif spec.is_numerical:\n        if not isinstance(self.value, float):\n            raise ValueError(f'DNA value type mismatch. Value: {self.value}, Spec: {spec!r}.')\n        if self.value < spec.min_value:\n            raise ValueError(f'DNA value should be no less than {spec.min_value}. Encountered {self.value}, Spec: {spec!r}.')\n        if self.value > spec.max_value:\n            raise ValueError(f'DNA value should be no greater than {spec.max_value}. Encountered {self.value}, Spec: {spec!r}.')\n    else:\n        assert spec.is_custom_decision_point, spec\n        if not isinstance(self.value, str):\n            raise ValueError(f'DNA value type mismatch, Value: {self.value!r}, Spec: {spec!r}.')\n    self._spec = spec\n    return self", "docstring": "Use a DNA spec for this node and children recursively.\n\nArgs:\nspec: DNA spec.\n\nReturns:\nSelf.\n\nRaises:\nValueError: current DNA tree does not conform to the DNA spec.", "source": "github-repos"}
{"code": "def data_file(file_fmt, info=None, **kwargs):\n    if isinstance(info, dict):\n        kwargs['hash_key'] = hashlib.sha256(json.dumps(info).encode('utf-8')).hexdigest()\n        kwargs.update(info)\n    return utils.fstr(fmt=file_fmt, **kwargs)", "docstring": "Data file name for given infomation\n\nArgs:\nfile_fmt: file format in terms of f-strings\ninfo: dict, to be hashed and then pass to f-string using 'hash_key'\nthese info will also be passed to f-strings\n**kwargs: arguments for f-strings\n\nReturns:\nstr: data file name", "source": "codesearchnet"}
{"code": "def _SkipFieldMessage(tokenizer):\n  \n\n  if tokenizer.TryConsume('<'):\n    delimiter = '>'\n  else:\n    tokenizer.Consume('{')\n    delimiter = '}'\n\n  while not tokenizer.LookingAt('>') and not tokenizer.LookingAt('}'):\n    _SkipField(tokenizer)\n\n  tokenizer.Consume(delimiter)", "docstring": "Skips over a field message.\n\nArgs:\ntokenizer: A tokenizer to parse the field name and values.", "source": "juraj-google-style"}
{"code": "def get_callable_name(func):\n    try:\n        return meta_util_six.get_funcname(func)\n    except AttributeError:\n        if isinstance(func, type):\n            return repr(func).replace(\"<type '\", '').replace(\"'>\", '')\n        elif hasattr(func, '__name__'):\n            return func.__name__\n        else:\n            raise NotImplementedError(('cannot get func_name of func=%rtype(func)=%r' % (func, type(func))))", "docstring": "Works on must functionlike objects including str, which has no func_name\n\nArgs:\nfunc (function):\n\nReturns:\nstr:\n\nCommandLine:\npython -m utool.util_str --exec-get_callable_name\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_str import *  # NOQA\n>>> func = len\n>>> result = get_callable_name(func)\n>>> print(result)\nlen", "source": "codesearchnet"}
{"code": "def _CreateLogicalLines(tokens):\n    formatted_tokens = []\n    prev_tok = None\n    for tok in tokens:\n        tok = TokenInfo(*tok)\n        if prev_tok and prev_tok.line.rstrip().endswith('\\\\') and (prev_tok.start[0] < tok.start[0]):\n            ctok = TokenInfo(type=CONTINUATION, string='\\\\', start=(prev_tok.start[0], prev_tok.start[1] + 1), end=(prev_tok.end[0], prev_tok.end[0] + 2), line=prev_tok.line)\n            ctok.lineno = ctok.start[0]\n            ctok.column = ctok.start[1]\n            ctok.value = '\\\\'\n            formatted_tokens.append(format_token.FormatToken(ctok, 'CONTINUATION'))\n        tok.lineno = tok.start[0]\n        tok.column = tok.start[1]\n        tok.value = tok.string\n        formatted_tokens.append(format_token.FormatToken(tok, token.tok_name[tok.type]))\n        prev_tok = tok\n    logical_lines, cur_logical_line = ([], [])\n    depth = 0\n    for tok in formatted_tokens:\n        if tok.type == tokenize.ENDMARKER:\n            break\n        if tok.type == tokenize.NEWLINE:\n            logical_lines.append(logical_line.LogicalLine(depth, cur_logical_line))\n            cur_logical_line = []\n        elif tok.type == tokenize.INDENT:\n            depth += 1\n        elif tok.type == tokenize.DEDENT:\n            depth -= 1\n        elif tok.type == tokenize.NL:\n            pass\n        else:\n            if cur_logical_line and (not tok.type == tokenize.COMMENT) and (cur_logical_line[0].type == tokenize.COMMENT):\n                logical_lines.append(logical_line.LogicalLine(depth, cur_logical_line))\n                cur_logical_line = []\n            cur_logical_line.append(tok)\n    for line in logical_lines:\n        previous = line.first\n        bracket_stack = [previous] if previous.OpensScope() else []\n        for tok in line.tokens[1:]:\n            tok.previous_token = previous\n            previous.next_token = tok\n            previous = tok\n            if tok.OpensScope():\n                bracket_stack.append(tok)\n            elif tok.ClosesScope():\n                bracket_stack[-1].matching_bracket = tok\n                tok.matching_bracket = bracket_stack.pop()\n    return logical_lines", "docstring": "Separate tokens into logical lines.\n\nArguments:\ntokens: (list of tokenizer.TokenInfo) Tokens generated by tokenizer.\n\nReturns:\nA list of LogicalLines.", "source": "github-repos"}
{"code": "def DecompressMessageList(cls, packed_message_list):\n    compression = packed_message_list.compression\n    if (compression == rdf_flows.PackedMessageList.CompressionType.UNCOMPRESSED):\n        data = packed_message_list.message_list\n    elif (compression == rdf_flows.PackedMessageList.CompressionType.ZCOMPRESSION):\n        try:\n            data = zlib.decompress(packed_message_list.message_list)\n        except zlib.error as e:\n            raise DecodingError(('Failed to decompress: %s' % e))\n    else:\n        raise DecodingError('Compression scheme not supported')\n    try:\n        result = rdf_flows.MessageList.FromSerializedString(data)\n    except rdfvalue.DecodeError:\n        raise DecodingError('RDFValue parsing failed.')\n    return result", "docstring": "Decompress the message data from packed_message_list.\n\nArgs:\npacked_message_list: A PackedMessageList rdfvalue with some data in it.\n\nReturns:\na MessageList rdfvalue.\n\nRaises:\nDecodingError: If decompression fails.", "source": "codesearchnet"}
{"code": "def add_method(self, m, **kwargs):\n        \n        if isinstance(m, types.FunctionType):\n            self['function', id(m)] = m\n        else:\n            f, obj = get_method_vars(m)\n            wrkey = (f, id(obj))\n            self[wrkey] = obj", "docstring": "Add an instance method or function\n\nArgs:\nm: The instance method or function to store", "source": "juraj-google-style"}
{"code": "def map_parser_to_rules(parser_name: str) -> Tuple[TypeParser, RulesMap]:\n    parser: TypeParser\n    usable_rules: dict[str, RuleWrapper]\n    if parser_name == 'parse_str':\n        parser = Parsers['parse_str']\n        usable_rules = TextRules\n    elif parser_name == 'parse_int':\n        parser = Parsers['parse_int']\n        usable_rules = NumericRules\n    elif parser_name == 'parse_float':\n        parser = Parsers['parse_float']\n        usable_rules = NumericRules\n    else:\n        raise ValueError('Invalid parser specified.')\n    parser.__name__ = parser_name\n    return (parser, usable_rules)", "docstring": "Check if the chosen parser exists and return the matching\nparser function and available rule mappings.\n\nArgs:\n* parser: string\n\nReturns: Tuple, with\n* TypeParser: Func that parses Any to Type\n* RulesMap: Dict of rule name to rule wrapper\n\nRaises:\n* ValueError: if non-existent parser name provided", "source": "github-repos"}
{"code": "def make_edge_vectors(adjacency_matrix, num_edge_types, depth, name=None):\n  \n  with tf.variable_scope(name, default_name=\"edge_vectors\"):\n    att_adj_vectors_shape = [num_edge_types, depth]\n    adjacency_matrix_shape = common_layers.shape_list(adjacency_matrix)\n    adj_vectors = (\n        tf.get_variable(\n            \"adj_vectors\",\n            att_adj_vectors_shape,\n            initializer=tf.random_normal_initializer(0, depth**-0.5)) *\n        (depth**0.5))\n    \n    \n    \n\n    adjacency_matrix_one_hot = tf.one_hot(adjacency_matrix, num_edge_types)\n\n    att_adj_vectors = tf.matmul(\n        tf.reshape(tf.to_float(adjacency_matrix_one_hot), [-1, num_edge_types]),\n        adj_vectors)\n    return tf.reshape(att_adj_vectors,\n                      [adjacency_matrix_shape[0], adjacency_matrix_shape[1],\n                       adjacency_matrix_shape[2], depth])", "docstring": "Gets edge vectors for the edge types in the adjacency matrix.\n\nArgs:\nadjacency_matrix: A [batch, num_nodes, num_nodes] tensor of ints.\nnum_edge_types: Number of different edge types\ndepth: Number of channels\nname: a string\nReturns:\nA [batch, num_nodes, num_nodes, depth] vector of tensors", "source": "juraj-google-style"}
{"code": "def get_pattern_actual_step(self, patternnumber):\n    _checkPatternNumber(patternnumber)\n    address = _calculateRegisterAddress('actualstep', patternnumber)\n    return self.read_register(address, 0)", "docstring": "Get the 'actual step' parameter for a given pattern.\n\nArgs:\npatternnumber (integer): 0-7\n\nReturns:\nThe 'actual step' parameter (int).", "source": "codesearchnet"}
{"code": "def constant_value(pred):\n    if isinstance(pred, tensor.Tensor):\n        return tensor_util.constant_value(pred)\n    if pred in {0, 1}:\n        return bool(pred)\n    if isinstance(pred, bool):\n        return pred\n    if isinstance(pred, variables.Variable):\n        return None\n    raise TypeError('`pred` must be a Tensor, or a Python bool, or 1 or 0. Found instead: %s' % type(pred))", "docstring": "Return the bool value for `pred`, or None if `pred` had a dynamic value.\n\nArgs:\npred: A scalar, either a Python bool or a TensorFlow boolean variable\nor tensor, or the Python integer 1 or 0.\n\nReturns:\nTrue or False if `pred` has a constant boolean value, None otherwise.\n\nRaises:\nTypeError: If `pred` is not a Variable, Tensor or bool, or Python\ninteger 1 or 0.", "source": "github-repos"}
{"code": "def _convert_from_saved_model(self, graph_def):\n    self._save_conversion_params_metric(graph_def)\n    quant_mode = QuantizationMode(self.optimizations, self.target_spec, self.representative_dataset, graph_def, self._experimental_disable_per_channel, self.experimental_new_dynamic_range_quantizer, self._experimental_low_bit_qat, self._experimental_full_integer_quantization_bias_type, self._experimental_variable_quantization, self._experimental_strict_qdq)\n    self._validate_inference_input_output_types(quant_mode)\n    converter_kwargs = {'enable_tflite_resource_variables': self.experimental_enable_resource_variables}\n    converter_kwargs.update(self._get_base_converter_args())\n    converter_kwargs.update(quant_mode.converter_flags())\n    result = _convert_saved_model(**converter_kwargs)\n    return self._optimize_tflite_model(result, quant_mode, _build_conversion_flags(**converter_kwargs).debug_options, quant_io=self.experimental_new_quantizer)", "docstring": "Helper method that converts saved model.\n\nArgs:\ngraph_def: GraphDef object for the model, used only for stats.\n\nReturns:\nThe converted TFLite model.", "source": "github-repos"}
{"code": "def describe(self, **kwargs):\n    description = {'label': self.label, 'details': inspect.cleandoc(self.details), 'required': self.required, 'many': self.many, 'spec': self.spec, 'default': self.default, 'type': (self.type or 'unspecified')}\n    description.update(kwargs)\n    return description", "docstring": "Describe this parameter instance for purpose of self-documentation.\n\nArgs:\nkwargs (dict): dictionary of additional description items for\nextending default description\n\nReturns:\ndict: dictionary of description items\n\n\nSuggested way for overriding description fields or extending it with\nadditional items is calling super class method with new/overriden\nfields passed as keyword arguments like following:\n\n.. code-block:: python\n\nclass DummyParam(BaseParam):\ndef description(self, **kwargs):\nsuper().describe(is_dummy=True, **kwargs)", "source": "codesearchnet"}
{"code": "def console_from_file(filename: str) -> tcod.console.Console:\n    \n    return tcod.console.Console._from_cdata(\n        lib.TCOD_console_from_file(filename.encode(\"utf-8\"))\n    )", "docstring": "Return a new console object from a filename.\n\nThe file format is automactially determined.  This can load REXPaint `.xp`,\nASCII Paint `.apf`, or Non-delimited ASCII `.asc` files.\n\nArgs:\nfilename (Text): The path to the file, as a string.\n\nReturns: A new :any`Console` instance.", "source": "juraj-google-style"}
{"code": "def untar(file_path, extract_folder=None):\n    file_path = Path(file_path)\n    if (extract_folder is None):\n        extract_folder = file_path.parent\n    extract_folder = Path(extract_folder)\n    tar = tarfile.open(file_path)\n    tar.extractall(extract_folder)\n    tar.close()", "docstring": "Simple tar archive extractor\n\nArgs:\nfile_path: path to the tar file to be extracted\nextract_folder: folder to which the files will be extracted", "source": "codesearchnet"}
{"code": "def whois_emails(self, emails):\n    api_name = 'opendns-whois-emails'\n    fmt_url_path = u'whois/emails/{0}'\n    return self._multi_get(api_name, fmt_url_path, emails)", "docstring": "Calls WHOIS Email end point\n\nArgs:\nemails: An enumerable of string Emails\nReturns:\nA dict of {email: domain_result}", "source": "codesearchnet"}
{"code": "def _gen_rpc_request(self, rpc_id, rpc_func_name, *args, **kwargs):\n    data = {'id': rpc_id, 'method': rpc_func_name, 'params': args}\n    if kwargs:\n        data['kwargs'] = kwargs\n    return json.dumps(data, sort_keys=True)", "docstring": "Generates the JSON RPC request.\n\nIn the generated JSON string, the fields are sorted by keys in ascending\norder.\n\nArgs:\nrpc_id: int, the id of this RPC.\nrpc_func_name: str, the name of the snippet function to execute\non the server.\n*args: any, the positional arguments of the RPC.\n**kwargs: any, the keyword arguments of the RPC.\n\nReturns:\nA string of the JSON RPC request.", "source": "github-repos"}
{"code": "def load_local_config(filename):\n    if (not filename):\n        return imp.new_module('local_pylint_config')\n    module = imp.load_source('local_pylint_config', filename)\n    return module", "docstring": "Loads the pylint.config.py file.\n\nArgs:\nfilename (str): The python file containing the local configuration.\n\nReturns:\nmodule: The loaded Python module.", "source": "codesearchnet"}
{"code": "def threshold(image, block_size=DEFAULT_BLOCKSIZE, mask=None):\n    \n    if mask is None:\n        mask = np.zeros(image.shape[:2], dtype=np.uint8)\n        mask[:] = 255\n\n    if len(image.shape) > 2 and image.shape[2] == 4:\n        image = cv2.cvtColor(image, cv2.COLOR_BGRA2GRAY)\n    res = _calc_block_mean_variance(image, mask, block_size)\n    res = image.astype(np.float32) - res.astype(np.float32) + 255\n    _, res = cv2.threshold(res, 215, 255, cv2.THRESH_BINARY)\n    return res", "docstring": "Applies adaptive thresholding to the given image.\n\nArgs:\nimage: BGRA image.\nblock_size: optional int block_size to use for adaptive thresholding.\nmask: optional mask.\nReturns:\nThresholded image.", "source": "juraj-google-style"}
{"code": "def dict_from_file(filename, key_type=str):\n    \n    mapping = {}\n    with open(filename, 'r') as f:\n        for line in f:\n            items = line.rstrip('\\n').split()\n            assert len(items) >= 2\n            key = key_type(items[0])\n            val = items[1:] if len(items) > 2 else items[1]\n            mapping[key] = val\n    return mapping", "docstring": "Load a text file and parse the content as a dict.\n\nEach line of the text file will be two or more columns splited by\nwhitespaces or tabs. The first column will be parsed as dict keys, and\nthe following columns will be parsed as dict values.\n\nArgs:\nfilename(str): Filename.\nkey_type(type): Type of the dict's keys. str is user by default and\ntype conversion will be performed if specified.\n\nReturns:\ndict: The parsed contents.", "source": "juraj-google-style"}
{"code": "def set_cellpy_datadir(self, directory=None):\n    if (directory is None):\n        self.logger.info('no directory name given')\n        return\n    if (not os.path.isdir(directory)):\n        self.logger.info('directory does not exist')\n        return\n    self.cellpy_datadir = directory", "docstring": "Set the directory containing .hdf5-files.\n\nUsed for setting directory for looking for hdf5-files.\nA valid directory name is required.\n\nArgs:\ndirectory (str): path to hdf5-directory\n\nExample:\n>>> d = CellpyData()\n>>> directory = \"MyData/HDF5\"\n>>> d.set_raw_datadir(directory)", "source": "codesearchnet"}
{"code": "def transpose(self, *args, **kwargs):\n    new_data = self.data.transpose(*args, **kwargs)\n    new_manager = self.__constructor__(new_data, self.columns, self.index)\n    new_manager._is_transposed = (self._is_transposed ^ 1)\n    return new_manager", "docstring": "Transposes this DataManager.\n\nReturns:\nTransposed new DataManager.", "source": "codesearchnet"}
{"code": "def get_transition_chempots(self, element):\n    if (element not in self.elements):\n        raise ValueError('get_transition_chempots can only be called with elements in the phase diagram.')\n    critical_chempots = []\n    for facet in self.facets:\n        chempots = self._get_facet_chempots(facet)\n        critical_chempots.append(chempots[element])\n    clean_pots = []\n    for c in sorted(critical_chempots):\n        if (len(clean_pots) == 0):\n            clean_pots.append(c)\n        elif (abs((c - clean_pots[(- 1)])) > PhaseDiagram.numerical_tol):\n            clean_pots.append(c)\n    clean_pots.reverse()\n    return tuple(clean_pots)", "docstring": "Get the critical chemical potentials for an element in the Phase\nDiagram.\n\nArgs:\nelement: An element. Has to be in the PD in the first place.\n\nReturns:\nA sorted sequence of critical chemical potentials, from less\nnegative to more negative.", "source": "codesearchnet"}
{"code": "def save(self, branch, commit_message, **kwargs):\n        \n        self.branch = branch\n        self.commit_message = commit_message\n        self.file_path = self.file_path.replace('/', '%2F')\n        super(ProjectFile, self).save(**kwargs)", "docstring": "Save the changes made to the file to the server.\n\nThe object is updated to match what the server returns.\n\nArgs:\nbranch (str): Branch in which the file will be updated\ncommit_message (str): Message to send with the commit\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabUpdateError: If the server cannot perform the request", "source": "juraj-google-style"}
{"code": "def __format_error(self, error_list_tag):\n    error = {'domain': self.domain(), 'reason': self.reason(), 'message': self.message()}\n    error.update((self.extra_fields() or {}))\n    return {'error': {error_list_tag: [error], 'code': self.status_code(), 'message': self.message()}}", "docstring": "Format this error into a JSON response.\n\nArgs:\nerror_list_tag: A string specifying the name of the tag to use for the\nerror list.\n\nReturns:\nA dict containing the reformatted JSON error response.", "source": "codesearchnet"}
{"code": "async def download_cot_artifacts(chain):\n    upstream_artifacts = chain.task['payload'].get('upstreamArtifacts', [])\n    all_artifacts_per_task_id = get_all_artifacts_per_task_id(chain, upstream_artifacts)\n    mandatory_artifact_tasks = []\n    optional_artifact_tasks = []\n    for (task_id, paths) in all_artifacts_per_task_id.items():\n        for path in paths:\n            coroutine = asyncio.ensure_future(download_cot_artifact(chain, task_id, path))\n            if is_artifact_optional(chain, task_id, path):\n                optional_artifact_tasks.append(coroutine)\n            else:\n                mandatory_artifact_tasks.append(coroutine)\n    mandatory_artifacts_paths = (await raise_future_exceptions(mandatory_artifact_tasks))\n    (succeeded_optional_artifacts_paths, failed_optional_artifacts) = (await get_results_and_future_exceptions(optional_artifact_tasks))\n    if failed_optional_artifacts:\n        log.warning('Could not download {} artifacts: {}'.format(len(failed_optional_artifacts), failed_optional_artifacts))\n    return (mandatory_artifacts_paths + succeeded_optional_artifacts_paths)", "docstring": "Call ``download_cot_artifact`` in parallel for each \"upstreamArtifacts\".\n\nOptional artifacts are allowed to not be downloaded.\n\nArgs:\nchain (ChainOfTrust): the chain of trust object\n\nReturns:\nlist: list of full paths to downloaded artifacts. Failed optional artifacts\naren't returned\n\nRaises:\nCoTError: on chain of trust sha validation error, on a mandatory artifact\nBaseDownloadError: on download error on a mandatory artifact", "source": "codesearchnet"}
{"code": "def update_media_assetfile(access_token, parent_asset_id, asset_id, content_length, name):\n    path = '/Files'\n    full_path = ''.join([path, \"('\", asset_id, \"')\"])\n    full_path_encoded = urllib.parse.quote(full_path, safe='')\n    endpoint = ''.join([ams_rest_endpoint, full_path_encoded])\n    body = (((((((('{ \\t\\t\"ContentFileSize\": \"' + str(content_length)) + '\", \\t\\t\"Id\": \"') + asset_id) + '\", \\t\\t\"MimeType\": \"video/mp4\", \\t\\t\"Name\": \"') + name) + '\", \\t\\t\"ParentAssetId\": \"') + parent_asset_id) + '\" \\t}')\n    return do_ams_patch(endpoint, full_path_encoded, body, access_token)", "docstring": "Update Media Service Asset File.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nparent_asset_id (str): A Media Service Asset Parent Asset ID.\nasset_id (str): A Media Service Asset Asset ID.\ncontent_length (str): A Media Service Asset Content Length.\nname (str): A Media Service Asset name.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def load_glossary(file_path: str, read_json=False) -> List[str]:\n        \n        if read_json:\n            if file_path.endswith(\".gz\"):\n                return json.load(gzip.open(file_path))\n            return json.load(open(file_path))\n\n        return open(file_path).read().splitlines()", "docstring": "A glossary is a text file, one entry per line.\n\nArgs:\nfile_path (str): path to a text file containing a glossary.\nread_json (bool): set True if the glossary is in json format\nReturns: List of the strings in the glossary.", "source": "juraj-google-style"}
{"code": "def typify(value, type_hint=None):\n    if isinstance(value, string_types):\n        value = value.strip()\n    elif (type_hint is None):\n        return value\n    if isiterable(type_hint):\n        if (isinstance(type_hint, type) and issubclass(type_hint, Enum)):\n            try:\n                return type_hint(value)\n            except ValueError:\n                return type_hint[value]\n        type_hint = set(type_hint)\n        if (not (type_hint - NUMBER_TYPES_SET)):\n            return numberify(value)\n        elif (not (type_hint - STRING_TYPES_SET)):\n            return text_type(value)\n        elif (not (type_hint - {bool, NoneType})):\n            return boolify(value, nullable=True)\n        elif (not (type_hint - (STRING_TYPES_SET | {bool}))):\n            return boolify(value, return_string=True)\n        elif (not (type_hint - (STRING_TYPES_SET | {NoneType}))):\n            value = text_type(value)\n            return (None if (value.lower() == 'none') else value)\n        elif (not (type_hint - {bool, int})):\n            return typify_str_no_hint(text_type(value))\n        else:\n            raise NotImplementedError()\n    elif (type_hint is not None):\n        try:\n            return (boolify(value) if (type_hint == bool) else type_hint(value))\n        except ValueError as e:\n            raise TypeCoercionError(value, text_type(e))\n    else:\n        return typify_str_no_hint(value)", "docstring": "Take a primitive value, usually a string, and try to make a more relevant type out of it.\nAn optional type_hint will try to coerce the value to that type.\n\nArgs:\nvalue (Any): Usually a string, not a sequence\ntype_hint (type or Tuple[type]):\n\nExamples:\n>>> typify('32')\n32\n>>> typify('32', float)\n32.0\n>>> typify('32.0')\n32.0\n>>> typify('32.0.0')\n'32.0.0'\n>>> [typify(x) for x in ('true', 'yes', 'on')]\n[True, True, True]\n>>> [typify(x) for x in ('no', 'FALSe', 'off')]\n[False, False, False]\n>>> [typify(x) for x in ('none', 'None', None)]\n[None, None, None]", "source": "codesearchnet"}
{"code": "def d_hkl(self, miller_index: Vector3Like) -> float:\n        \n\n        gstar = self.reciprocal_lattice_crystallographic.metric_tensor\n        hkl = np.array(miller_index)\n        return 1 / ((dot(dot(hkl, gstar), hkl.T)) ** (1 / 2))", "docstring": "Returns the distance between the hkl plane and the origin\n\nArgs:\nmiller_index ([h,k,l]): Miller index of plane\n\nReturns:\nd_hkl (float)", "source": "juraj-google-style"}
{"code": "def GetGtfsClassByFileName(self, filename):\n    \n    if filename not in self._file_mapping:\n      return None\n    mapping = self._file_mapping[filename]\n    class_list = mapping['classes']\n    if len(class_list) > 1:\n      raise problems.NonStandardMapping(filename)\n    else:\n      return self._class_mapping[class_list[0]]", "docstring": "Returns the transitfeed class corresponding to a GTFS file.\n\nArgs:\nfilename: The filename whose class is to be returned\n\nRaises:\nNonStandardMapping if the specified filename has more than one\ncorresponding class", "source": "juraj-google-style"}
{"code": "def piece_size(model_file=None, model_proto=None, name=None):\n  \n\n  return _gen_sentencepiece_processor_op.sentencepiece_get_piece_size(\n      model_file=model_file, model_proto=model_proto, name=name)", "docstring": "Returns the piece size (vocabulary size).\n\nArgs:\nmodel_file: The sentencepiece model file path.\nmodel_proto: The sentencepiece model serialized proto.\nEither `model_file` or `model_proto` must be set.\nname: The name argument that is passed to the op function.\nReturns:\nA scalar representing the vocabulary size.", "source": "juraj-google-style"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. BART does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def __init__(\n      self, resolver_context, file_system, path_spec, file_entry_type=None,\n      is_root=False):\n    \n    super(FakeFileEntry, self).__init__(\n        resolver_context, file_system, path_spec, is_root=is_root,\n        is_virtual=True)\n    self._date_time = dfdatetime_fake_time.FakeTime()\n    self._name = None\n    self.entry_type = file_entry_type", "docstring": "Initializes a file entry.\n\nArgs:\nresolver_context (Context): resolver context.\nfile_system (FileSystem): file system.\npath_spec (PathSpec): path specification.\nfile_entry_type (Optional[str]): file entry type.\nis_root (Optional[bool]): True if the file entry is the root file entry\nof the corresponding file system.", "source": "juraj-google-style"}
{"code": "def stats(data):\n    return {'len': len(data), 'mean': np.mean(data), 'sum': np.sum(data), 'std': np.std(data), 'min': np.min(data), 'max': np.max(data)}", "docstring": "Dictionary with summary stats for data\n\nReturns:\ndicitonary with length, mean, sum, standard deviation,\\\nmin and max of data", "source": "codesearchnet"}
{"code": "def user(self, user: str) -> 'ChildHTTPAPI':\n    if self.is_real_user:\n        raise ValueError(\"Can't get child of real user\")\n    try:\n        return self.children[user]\n    except KeyError:\n        child = ChildHTTPAPI(user, self)\n        self.children[user] = child\n        return child", "docstring": "Get a child HTTPAPI instance.\n\nArgs:\nuser: The Matrix ID of the user whose API to get.\n\nReturns:\nA HTTPAPI instance that always uses the given Matrix ID.", "source": "codesearchnet"}
{"code": "def _make_inc_temp(self, suffix=\"\", prefix=\"\", directory_name=\"/tmp/ray\"):\n        \n        directory_name = os.path.expanduser(directory_name)\n        index = self._incremental_dict[suffix, prefix, directory_name]\n        \n        \n        while index < tempfile.TMP_MAX:\n            if index == 0:\n                filename = os.path.join(directory_name, prefix + suffix)\n            else:\n                filename = os.path.join(directory_name,\n                                        prefix + \".\" + str(index) + suffix)\n            index += 1\n            if not os.path.exists(filename):\n                \n                self._incremental_dict[suffix, prefix, directory_name] = index\n                return filename\n\n        raise FileExistsError(errno.EEXIST,\n                              \"No usable temporary filename found\")", "docstring": "Return a incremental temporary file name. The file is not created.\n\nArgs:\nsuffix (str): The suffix of the temp file.\nprefix (str): The prefix of the temp file.\ndirectory_name (str) : The base directory of the temp file.\n\nReturns:\nA string of file name. If there existing a file having\nthe same name, the returned name will look like\n\"{directory_name}/{prefix}.{unique_index}{suffix}\"", "source": "juraj-google-style"}
{"code": "def get_index_mapping(index):\n    \n    \n    mappings_dir = get_setting(\"mappings_dir\")\n    filename = \"%s.json\" % index\n    path = os.path.join(mappings_dir, filename)\n    with open(path, \"r\") as f:\n        return json.load(f)", "docstring": "Return the JSON mapping file for an index.\n\nMappings are stored as JSON files in the mappings subdirectory of this\napp. They must be saved as {{index}}.json.\n\nArgs:\nindex: string, the name of the index to look for.", "source": "juraj-google-style"}
{"code": "def handle_encodnig(html):\n    \n    encoding = _get_encoding(\n        dhtmlparser.parseString(\n            html.split(\"</head>\")[0]\n        )\n    )\n\n    if encoding == \"utf-8\":\n        return html\n\n    return html.decode(encoding).encode(\"utf-8\")", "docstring": "Look for encoding in given `html`. Try to convert `html` to utf-8.\n\nArgs:\nhtml (str): HTML code as string.\n\nReturns:\nstr: HTML code encoded in UTF.", "source": "juraj-google-style"}
{"code": "def __init__(self, name, buckets, description, *labels):\n    super(Sampler, self).__init__('Sampler', _sampler_methods, len(labels), name, buckets.buckets, description, *labels)", "docstring": "Creates a new Sampler.\n\nArgs:\nname: name of the new metric.\nbuckets: bucketing strategy of the new metric.\ndescription: description of the new metric.\n*labels: The label list of the new metric.", "source": "github-repos"}
{"code": "def propagate(self, date):\n        \n\n        if self.propagator.orbit is not self:\n            self.propagator.orbit = self\n\n        return self.propagator.propagate(date)", "docstring": "Propagate the orbit to a new date\n\nArgs:\ndate (Date)\nReturn:\nOrbit", "source": "juraj-google-style"}
{"code": "def _MakeParseFn(fn, metadata):\n    fn_spec = inspectutils.GetFullArgSpec(fn)\n    num_required_args = len(fn_spec.args) - len(fn_spec.defaults)\n    required_kwonly = set(fn_spec.kwonlyargs) - set(fn_spec.kwonlydefaults)\n\n    def _ParseFn(args):\n        \n        kwargs, remaining_kwargs, remaining_args = _ParseKeywordArgs(args, fn_spec)\n        parsed_args, kwargs, remaining_args, capacity = _ParseArgs(fn_spec.args, fn_spec.defaults, num_required_args, kwargs, remaining_args, metadata)\n        if fn_spec.varargs or fn_spec.varkw:\n            capacity = True\n        extra_kw = set(kwargs) - set(fn_spec.kwonlyargs)\n        if fn_spec.varkw is None and extra_kw:\n            raise FireError('Unexpected kwargs present:', extra_kw)\n        missing_kwonly = set(required_kwonly) - set(kwargs)\n        if missing_kwonly:\n            raise FireError('Missing required flags:', missing_kwonly)\n        if fn_spec.varargs is not None:\n            varargs, remaining_args = (remaining_args, [])\n        else:\n            varargs = []\n        for index, value in enumerate(varargs):\n            varargs[index] = _ParseValue(value, None, None, metadata)\n        varargs = parsed_args + varargs\n        remaining_args += remaining_kwargs\n        consumed_args = args[:len(args) - len(remaining_args)]\n        return ((varargs, kwargs), consumed_args, remaining_args, capacity)\n    return _ParseFn", "docstring": "Creates a parse function for fn.\n\nArgs:\nfn: The function or class to create the parse function for.\nmetadata: Additional metadata about the component the parse function is for.\nReturns:\nA parse function for fn. The parse function accepts a list of arguments\nand returns (varargs, kwargs), remaining_args. The original function fn\ncan then be called with fn(*varargs, **kwargs). The remaining_args are\nthe leftover args from the arguments to the parse function.", "source": "github-repos"}
{"code": "def tetragonal(a: float, c: float):\n    return Lattice.from_parameters(a, a, c, 90, 90, 90)", "docstring": "Convenience constructor for a tetragonal lattice.\n\nArgs:\na (float): *a* lattice parameter of the tetragonal cell.\nc (float): *c* lattice parameter of the tetragonal cell.\n\nReturns:\nTetragonal lattice of dimensions a x a x c.", "source": "codesearchnet"}
{"code": "def _GetFieldAttributes(field):\n    if (not isinstance(field, messages.Field)):\n        raise TypeError(('Field %r to be copied not a ProtoRPC field.' % (field,)))\n    positional_args = []\n    kwargs = {'required': field.required, 'repeated': field.repeated, 'variant': field.variant, 'default': field._Field__default}\n    if isinstance(field, messages.MessageField):\n        kwargs.pop('default')\n        if (not isinstance(field, message_types.DateTimeField)):\n            positional_args.insert(0, field.message_type)\n    elif isinstance(field, messages.EnumField):\n        positional_args.insert(0, field.type)\n    return (positional_args, kwargs)", "docstring": "Decomposes field into the needed arguments to pass to the constructor.\n\nThis can be used to create copies of the field or to compare if two fields\nare \"equal\" (since __eq__ is not implemented on messages.Field).\n\nArgs:\nfield: A ProtoRPC message field (potentially to be copied).\n\nRaises:\nTypeError: If the field is not an instance of messages.Field.\n\nReturns:\nA pair of relevant arguments to be passed to the constructor for the field\ntype. The first element is a list of positional arguments for the\nconstructor and the second is a dictionary of keyword arguments.", "source": "codesearchnet"}
{"code": "def f(options, expected_tf_failures=0):\n    test_parameters = [{'ksize': [[1, 1, 1, 1, 1], [1, 2, 2, 2, 1], [1, 2, 3, 4, 1]], 'strides': [[1, 1, 1, 1, 1], [1, 2, 1, 2, 1], [1, 2, 2, 4, 1]], 'input_shape': [[1, 1, 1, 1, 1], [1, 16, 15, 14, 1], [3, 16, 15, 14, 3]], 'padding': ['SAME', 'VALID'], 'data_format': ['NDHWC']}]\n\n    def build_graph(parameters):\n        input_tensor = tf.compat.v1.placeholder(dtype=tf.float32, name='input', shape=parameters['input_shape'])\n        out = pool_op(input_tensor, ksize=parameters['ksize'], strides=parameters['strides'], data_format=parameters['data_format'], padding=parameters['padding'])\n        return ([input_tensor], [out])\n\n    def build_inputs(parameters, sess, inputs, outputs):\n        input_values = create_tensor_data(tf.float32, parameters['input_shape'])\n        return ([input_values], sess.run(outputs, feed_dict=dict(zip(inputs, [input_values]))))\n    extra_convert_options = ExtraConvertOptions()\n    extra_convert_options.allow_custom_ops = True\n    make_zip_of_tests(options, test_parameters, build_graph, build_inputs, extra_convert_options, expected_tf_failures=expected_tf_failures)", "docstring": "Actual function that generates examples.\n\nArgs:\noptions: An Options instance.\nexpected_tf_failures: number of expected tensorflow failures.", "source": "github-repos"}
{"code": "def run(argv=None, save_main_session=True):\n    known_args, pipeline_args = parse_known_args(argv)\n    pipeline_options = PipelineOptions(pipeline_args)\n    pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n    with beam.Pipeline(options=pipeline_options) as pipeline:\n        _ = pipeline | 'Read Data' >> beam.io.ReadFromText(known_args.input) | 'Split data to make List' >> beam.Map(lambda x: x.split(',')) | 'Filter rows' >> beam.Filter(custom_filter) | 'Create Key' >> beam.ParDo(CreateKey()) | 'Group by education' >> beam.GroupByKey() | 'Prepare Data' >> beam.ParDo(PrepareDataforTraining()) | 'Train Model' >> beam.ParDo(TrainModel()) | 'Save' >> fileio.WriteToFiles(path=known_args.output, sink=ModelSink())", "docstring": "Args:\nargv: Command line arguments defined for this example.\nsave_main_session: Used for internal testing.", "source": "github-repos"}
{"code": "def GetFormattedEventObject(cls, event):\n    time_string = timelib.Timestamp.CopyToIsoFormat(event.timestamp)\n    lines_of_text = [('+-' * 40), '[Timestamp]:', '  {0:s}'.format(time_string)]\n    pathspec = getattr(event, 'pathspec', None)\n    if pathspec:\n        lines_of_text.append('[Pathspec]:')\n        attribute_string = pathspec.comparable.replace('\\n', '\\n  ')\n        attribute_string = '  {0:s}\\n'.format(attribute_string)\n        lines_of_text.append(attribute_string)\n    lines_of_text.append('[Reserved attributes]:')\n    out_additional = ['[Additional attributes]:']\n    for (attribute_name, attribute_value) in sorted(event.GetAttributes()):\n        if (attribute_name not in definitions.RESERVED_VARIABLE_NAMES):\n            attribute_string = '  {{{0!s}}} {1!s}'.format(attribute_name, attribute_value)\n            out_additional.append(attribute_string)\n        elif (attribute_name not in ('pathspec', 'tag')):\n            attribute_string = '  {{{0!s}}} {1!s}'.format(attribute_name, attribute_value)\n            lines_of_text.append(attribute_string)\n    lines_of_text.append('')\n    out_additional.append('')\n    lines_of_text.extend(out_additional)\n    return '\\n'.join(lines_of_text)", "docstring": "Retrieves a string representation of the event.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: string representation of the event.", "source": "codesearchnet"}
{"code": "def ParseIfaddrs(ifaddrs):\n    precondition.AssertOptionalType(ifaddrs, ctypes.POINTER(Ifaddrs))\n    ifaces = {}\n    for ifaddr in IterIfaddrs(ifaddrs):\n        ifname = ctypes.string_at(ifaddr.ifa_name).decode('utf-8')\n        iface = ifaces.setdefault(ifname, rdf_client_network.Interface())\n        iface.ifname = ifname\n        if (not ifaddr.ifa_addr):\n            continue\n        sockaddr = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddr))\n        iffamily = sockaddr.contents.sa_family\n        if (iffamily == AF_INET):\n            sockaddrin = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddrin))\n            address = rdf_client_network.NetworkAddress()\n            address.address_type = rdf_client_network.NetworkAddress.Family.INET\n            address.packed_bytes = struct.pack('=L', sockaddrin.contents.sin_addr)\n            iface.addresses.append(address)\n        elif (iffamily == AF_INET6):\n            sockaddrin = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddrin6))\n            address = rdf_client_network.NetworkAddress()\n            address.address_type = rdf_client_network.NetworkAddress.Family.INET6\n            address.packed_bytes = bytes(list(sockaddrin.contents.sin6_addr))\n            iface.addresses.append(address)\n        elif (iffamily == AF_LINK):\n            sockaddrdl = ctypes.cast(ifaddr.ifa_addr, ctypes.POINTER(Sockaddrdl))\n            nlen = sockaddrdl.contents.sdl_nlen\n            alen = sockaddrdl.contents.sdl_alen\n            iface.mac_address = bytes(sockaddrdl.contents.sdl_data[nlen:(nlen + alen)])\n        else:\n            raise ValueError(('Unexpected socket address family: %s' % iffamily))\n    return itervalues(ifaces)", "docstring": "Parses contents of the intrusive linked list of `ifaddrs`.\n\nArgs:\nifaddrs: A pointer to the first node of `ifaddrs` linked list. Can be NULL.\n\nReturns:\nAn iterator over instances of `rdf_client_network.Interface`.", "source": "codesearchnet"}
{"code": "def block_matrix(A, B, C, D):\n    r\n    return vstackm((hstackm((A, B)), hstackm((C, D))))", "docstring": "r\"\"\"Generate the operator matrix with quadrants\n\n.. math::\n\n\\begin{pmatrix} A B \\\\ C D \\end{pmatrix}\n\nArgs:\nA (Matrix): Matrix of shape ``(n, m)``\nB (Matrix): Matrix of shape ``(n, k)``\nC (Matrix): Matrix of shape ``(l, m)``\nD (Matrix): Matrix of shape ``(l, k)``\n\nReturns:\nMatrix: The combined block matrix ``[[A, B], [C, D]]``.", "source": "juraj-google-style"}
{"code": "def convert(in_file, out_file, in_fmt=\"\", out_fmt=\"\"):\n    \n    \n    in_file = os.path.expanduser(in_file)\n    out_file = os.path.expanduser(out_file)\n\n    if not os.path.exists(in_file):\n        raise IOError(\"Input file {0} does not exist, stopping...\"\n                      .format(in_file))\n\n    \n    \n    in_fmt = in_fmt.lower() or _guess_format_from_extension(\n        in_file.split('.')[-1].lower())\n    out_fmt = out_fmt.lower() or _guess_format_from_extension(\n        out_file.split('.')[-1].lower())\n\n    if not in_fmt or not out_fmt:\n        raise ValueError(\"Cannot determine conversion formats.\")\n        return False\n\n    if in_fmt is out_fmt:\n        \n        \n        shutil.copyfileobj(in_file, out_file)\n        return out_file\n\n    \n    if in_fmt == 'hdf5':\n        from . import hdf5\n        data = hdf5.load(in_file)\n    elif in_fmt == 'tiff':\n        from . import tiff\n        data = tiff.load(in_file)\n    elif in_fmt == 'png':\n        from . import png\n        data = png.load(in_file)\n    else:\n        return _fail_pair_conversion(in_fmt, out_fmt)\n\n    \n    if out_fmt == 'hdf5':\n        from . import hdf5\n        return hdf5.save(out_file, data)\n    elif out_fmt == 'tiff':\n        from . import tiff\n        return tiff.save(out_file, data)\n    elif out_fmt == 'png':\n        from . import png\n        return png.export_png(out_file, data)\n    else:\n        return _fail_pair_conversion(in_fmt, out_fmt)\n\n    return _fail_pair_conversion(in_fmt, out_fmt)", "docstring": "Converts in_file to out_file, guessing datatype in the absence of\nin_fmt and out_fmt.\n\nArguments:\nin_file:    The name of the (existing) datafile to read\nout_file:   The name of the file to create with converted data\nin_fmt:     Optional. The format of incoming data, if not guessable\nout_fmt:    Optional. The format of outgoing data, if not guessable\n\nReturns:\nString. Output filename", "source": "juraj-google-style"}
{"code": "def handle(self, handler, req, resp, **kwargs):\n    params = self.require_params(req)\n    if getattr(self, '_with_context', False):\n        handler = partial(handler, context=req.context)\n    (meta, content) = self.require_meta_and_content(handler, params, **kwargs)\n    self.make_body(resp, params, meta, content)\n    return content", "docstring": "Handle given resource manipulation flow in consistent manner.\n\nThis mixin is intended to be used only as a base class in new flow\nmixin classes. It ensures that regardless of resource manunipulation\nsemantics (retrieve, get, delete etc.) the flow is always the same:\n\n1. Decode and validate all request parameters from the query string\nusing ``self.require_params()`` method.\n2. Use ``self.require_meta_and_content()`` method to construct ``meta``\nand ``content`` dictionaries that will be later used to create\nserialized response body.\n3. Construct serialized response body using ``self.body()`` method.\n\nArgs:\nhandler (method): resource manipulation method handler.\nreq (falcon.Request): request object instance.\nresp (falcon.Response): response object instance to be modified.\n**kwargs: additional keyword arguments retrieved from url\ntemplate.\n\nReturns:\nContent dictionary (preferably resource representation).", "source": "codesearchnet"}
{"code": "def set_current(self, current):\n    self.current = current\n    self.input = current.input\n    self.output = current.output\n    self.cmd = current.task_data['cmd']\n    if (self.cmd and (NEXT_CMD_SPLITTER in self.cmd)):\n        (self.cmd, self.next_cmd) = self.cmd.split(NEXT_CMD_SPLITTER)\n    else:\n        self.next_cmd = None", "docstring": "Creates some aliases for attributes of ``current``.\n\nArgs:\ncurrent: :attr:`~zengine.engine.WFCurrent` object.", "source": "codesearchnet"}
{"code": "def plogdet(K):\n    egvals = eigvalsh(K)\n    return npsum(log(egvals[(egvals > epsilon)]))", "docstring": "r\"\"\"Log of the pseudo-determinant.\n\nIt assumes that ``K`` is a positive semi-definite matrix.\n\nArgs:\nK (array_like): matrix.\n\nReturns:\nfloat: log of the pseudo-determinant.", "source": "codesearchnet"}
{"code": "def query_string_to_dict(query):\n        \n\n        query_params = {}\n\n        for key_value in query.split(\"&\"):\n            key_value_pair = key_value.split(\"=\", 1)\n\n            key = key_value_pair[0] if len(key_value_pair) >= 1 else \"\"\n            value = key_value_pair[1] if len(key_value_pair) == 2 else \"\"\n\n            query_params[key] = value\n\n        return query_params", "docstring": "Convert a string to a query dict.\n\nArgs:\nquery (str): The query string.\n\nReturns:\nobj: The key value object with query params.\n\nNote:\nThis method does the same as urllib.parse.parse_qsl except\nthat it doesn't actually decode the values.", "source": "juraj-google-style"}
{"code": "def _benchmarkRunOpPrebuilt(self, name, target, iters):\n    times = []\n    with ops.Graph().as_default():\n        v = variables.Variable(random_ops.random_normal([]))\n        with session.Session(target) as sess:\n            sess.run(v.initializer)\n            runner = sess.make_callable(v.op)\n            runner()\n            for _ in range(iters):\n                start_time = time.time()\n                runner()\n                end_time = time.time()\n                times.append(end_time - start_time)\n    print('%s %f' % (name, np.median(times)))\n    self.report_benchmark(iters=1, wall_time=np.median(times), name=name)", "docstring": "Runs a microbenchmark to measure the cost of running an op.\n\nReports the median cost of running a trivial (Variable) op.\n\nArgs:\nname: A human-readable name for logging the output.\ntarget: The session target to use for the benchmark.\niters: The number of iterations to perform.", "source": "github-repos"}
{"code": "def _CreateArgItem(arg, docstring_info, spec):\n    max_str_length = LINE_LENGTH - SECTION_INDENTATION - SUBSECTION_INDENTATION\n    description = _GetArgDescription(arg, docstring_info)\n    arg_string = formatting.BoldUnderline(arg.upper())\n    arg_type = _GetArgType(arg, spec)\n    arg_type = f'Type: {arg_type}' if arg_type else ''\n    available_space = max_str_length - len(arg_type)\n    arg_type = formatting.EllipsisTruncate(arg_type, available_space, max_str_length)\n    description = '\\n'.join((part for part in (arg_type, description) if part))\n    return _CreateItem(arg_string, description, indent=SUBSECTION_INDENTATION)", "docstring": "Returns a string describing a positional argument.\n\nArgs:\narg: The name of the positional argument.\ndocstring_info: A docstrings.DocstringInfo namedtuple with information about\nthe containing function's docstring.\nspec: An instance of fire.inspectutils.FullArgSpec, containing type and\ndefault information about the arguments to a callable.\n\nReturns:\nA string to be used in constructing the help screen for the function.", "source": "github-repos"}
{"code": "def _operations_list(self, ops_filter, max_tasks, page_size, page_token):\n    \n\n    \n    \n    \n    \n    max_page_size = 128\n\n    \n    page_size = min(sz for sz in [page_size, max_page_size, max_tasks] if sz)\n\n    \n    api = self._service.projects().operations().list(\n        name='projects/{}/operations'.format(self._project),\n        filter=ops_filter,\n        pageToken=page_token,\n        pageSize=page_size)\n    response = google_base.Api.execute(api)\n\n    return [\n        GoogleOperation(op)\n        for op in response.get('operations', [])\n        if google_v2_operations.is_dsub_operation(op)\n    ], response.get('nextPageToken')", "docstring": "Gets the list of operations for the specified filter.\n\nArgs:\nops_filter: string filter of operations to return\nmax_tasks: the maximum number of job tasks to return or 0 for no limit.\npage_size: the number of operations to requested on each list operation to\nthe pipelines API (if 0 or None, the API default is used)\npage_token: page token returned by a previous _operations_list call.\n\nReturns:\nOperations matching the filter criteria.", "source": "juraj-google-style"}
{"code": "def mean(data, n=3, **kwargs):\n    \n    \n    if len(data[-n:]) < n:\n        forecast = np.nan\n    else:\n        \n        forecast = np.mean(data[-n:])\n    return forecast", "docstring": "The mean forecast for the next point is the mean value of the previous ``n`` points in\nthe series.\n\nArgs:\ndata (np.array): Observed data, presumed to be ordered in time.\nn (int): period over which to calculate the mean\n\nReturns:\nfloat: a single-valued forecast for the next value in the series.", "source": "juraj-google-style"}
{"code": "def add_node(self, node_descriptor):\n        \n\n        if self._max_nodes is not None and len(self.nodes) >= self._max_nodes:\n            raise ResourceUsageError(\"Maximum number of nodes exceeded\", max_nodes=self._max_nodes)\n\n        node, inputs, processor = parse_node_descriptor(node_descriptor, self.model)\n\n        in_root = False\n\n        for i, input_data in enumerate(inputs):\n            selector, trigger = input_data\n\n            walker = self.sensor_log.create_walker(selector)\n\n            \n            if walker.selector.inexhaustible:\n                walker.reading = IOTileReading(0xFFFFFFFF, walker.selector.as_stream(), 0)\n\n            node.connect_input(i, walker, trigger)\n\n            if selector.input and not in_root:\n                self.roots.append(node)\n                in_root = True  \n            else:\n                found = False\n                for other in self.nodes:\n                    if selector.matches(other.stream):\n                        other.connect_output(node)\n                        found = True\n\n                if not found and selector.buffered:\n                    raise NodeConnectionError(\"Node has input that refers to another node that has not been created yet\", node_descriptor=node_descriptor, input_selector=str(selector), input_index=i)\n\n        \n        \n        \n        for other_node in self.nodes:\n            for selector, trigger in other_node.inputs:\n                if selector.matches(node.stream):\n                    node.connect_output(other_node)\n\n        \n        func = self.find_processing_function(processor)\n        if func is None:\n            raise ProcessingFunctionError(\"Could not find processing function in installed packages\", func_name=processor)\n\n        node.set_func(processor, func)\n        self.nodes.append(node)", "docstring": "Add a node to the sensor graph based on the description given.\n\nThe node_descriptor must follow the sensor graph DSL and describe\na node whose input nodes already exist.\n\nArgs:\nnode_descriptor (str): A description of the node to be added\nincluding its inputs, triggering conditions, processing function\nand output stream.", "source": "juraj-google-style"}
{"code": "def __init__(self, module, method_name=None, **kwargs):\n    super(ModuleWrapper, self).__init__(**kwargs)\n    if method_name is None:\n        if hasattr(module, '__call__'):\n            method_name = '__call__'\n        elif hasattr(module, 'call'):\n            method_name = 'call'\n    if method_name is None or not hasattr(module, method_name):\n        raise ValueError('{} is not defined on object {}'.format(method_name, module))\n    self._module = module\n    self._method_name = method_name\n    method = getattr(module, method_name)\n    method_arg_spec = tf_inspect.getfullargspec(method)\n    self._expects_training_arg = 'training' in method_arg_spec.args or method_arg_spec.varkw is not None\n    self._expects_mask_arg = 'mask' in method_arg_spec.args or method_arg_spec.varkw is not None", "docstring": "Initializes the wrapper Layer for this module.\n\nArgs:\nmodule: The `tf.Module` instance to be wrapped.\nmethod_name: (Optional) str. The name of the method to use as the forward\npass of the module. If not set, defaults to '__call__' if defined, or\n'call'.\n**kwargs: Additional keywrod arguments. See `tf.keras.layers.Layer`.\n\nRaises:\nValueError: If `method` is not defined on `module`.", "source": "github-repos"}
{"code": "def rescale(self, image: np.ndarray, scale: Union[int, float], offset: bool=True, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs):\n    rescaled_image = rescale(image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs)\n    if offset:\n        rescaled_image = rescaled_image - 1\n    return rescaled_image", "docstring": "Rescale an image by a scale factor.\n\nIf `offset` is `True`, the image has its values rescaled by `scale` and then offset by 1. If `scale` is\n1/127.5, the image is rescaled between [-1, 1].\nimage = image * scale - 1\n\nIf `offset` is `False`, and `scale` is 1/255, the image is rescaled between [0, 1].\nimage = image * scale\n\nArgs:\nimage (`np.ndarray`):\nImage to rescale.\nscale (`int` or `float`):\nScale to apply to the image.\noffset (`bool`, *optional*):\nWhether to scale the image in both negative and positive directions.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def _add_op_node(self, op, qargs, cargs, condition=None):\n        \n        node_properties = {\n            \"type\": \"op\",\n            \"op\": op,\n            \"name\": op.name,\n            \"qargs\": qargs,\n            \"cargs\": cargs,\n            \"condition\": condition\n        }\n\n        \n        self._max_node_id += 1\n        new_node = DAGNode(data_dict=node_properties, nid=self._max_node_id)\n        self._multi_graph.add_node(new_node)\n        self._id_to_node[self._max_node_id] = new_node", "docstring": "Add a new operation node to the graph and assign properties.\n\nArgs:\nop (Instruction): the operation associated with the DAG node\nqargs (list): list of quantum wires to attach to.\ncargs (list): list of classical wires to attach to.\ncondition (tuple or None): optional condition (ClassicalRegister, int)", "source": "juraj-google-style"}
{"code": "def in_top_k(targets, predictions, k):\n    if any_symbolic_tensors((targets, predictions)):\n        return InTopK(k).symbolic_call(targets, predictions)\n    return backend.math.in_top_k(targets, predictions, k)", "docstring": "Checks if the targets are in the top-k predictions.\n\nArgs:\ntargets: A tensor of true labels.\npredictions: A tensor of predicted labels.\nk: An integer representing the number of predictions to consider.\n\nReturns:\nA boolean tensor of the same shape as `targets`, where each element\nindicates whether the corresponding target is in the top-k predictions.\n\nExample:\n\n>>> targets = keras.ops.convert_to_tensor([2, 5, 3])\n>>> predictions = keras.ops.convert_to_tensor(\n... [[0.1, 0.4, 0.6, 0.9, 0.5],\n...  [0.1, 0.7, 0.9, 0.8, 0.3],\n...  [0.1, 0.6, 0.9, 0.9, 0.5]])\n>>> in_top_k(targets, predictions, k=3)\narray([ True False  True], shape=(3,), dtype=bool)", "source": "github-repos"}
{"code": "def read_geom_h5(xdmf_file, snapshot):\n    \n    header = {}\n    xdmf_root = xmlET.parse(str(xdmf_file)).getroot()\n    if snapshot is None:\n        return None, xdmf_root\n\n    \n    \n    elt_snap = xdmf_root[0][0][snapshot]\n    header['ti_ad'] = float(elt_snap.find('Time').get('Value'))\n    header['mo_lambda'] = _maybe_get(elt_snap, 'mo_lambda', 'Value', float)\n    header['mo_thick_sol'] = _maybe_get(elt_snap, 'mo_thick_sol', 'Value',\n                                        float)\n    header['ntb'] = 1\n    coord_h5 = []  \n    coord_shape = []  \n    twod = None\n    for elt_subdomain in elt_snap.findall('Grid'):\n        if elt_subdomain.get('Name').startswith('meshYang'):\n            header['ntb'] = 2\n            break  \n        elt_geom = elt_subdomain.find('Geometry')\n        if elt_geom.get('Type') == 'X_Y' and twod is None:\n            twod = ''\n            for data_item in elt_geom.findall('DataItem'):\n                coord = data_item.text.strip()[-1]\n                if coord in 'XYZ':\n                    twod += coord\n        data_item = elt_geom.find('DataItem')\n        coord_shape.append(_get_dim(data_item))\n        coord_h5.append(\n            xdmf_file.parent / data_item.text.strip().split(':/', 1)[0])\n    _read_coord_h5(coord_h5, coord_shape, header, twod)\n    return header, xdmf_root", "docstring": "Extract geometry information from hdf5 files.\n\nArgs:\nxdmf_file (:class:`pathlib.Path`): path of the xdmf file.\nsnapshot (int): snapshot number.\nReturns:\n(dict, root): geometry information and root of xdmf document.", "source": "juraj-google-style"}
{"code": "def encoder_decoder_attention_loss(expected_attention_logits, actual_attentions, loss_type='kl_divergence', loss_multiplier=1.0):\n\n    def combine_attentions(attention_list):\n        'Combine different layer attentions and then average over layers/heads.'\n        attentions = tf.stack(attention_list)\n        return tf.reduce_mean(attentions, [0, 2])\n\n    def kl_divergence_loss(expected_logits, actual_logits):\n        p = tfp.distributions.Categorical(logits=expected_logits)\n        q = tfp.distributions.Categorical(logits=actual_logits)\n        return tfp.distributions.kl_divergence(p, q)\n\n    def mse_loss(expected_logits, actual_weights):\n        expected_weights = tf.nn.softmax(expected_logits)\n        return tf.losses.mean_squared_error(expected_weights, actual_weights)\n    loss = 0.0\n    if (loss_type == 'mse'):\n        actual_encdec_attention_weights = [t for (layer_key, t) in actual_attentions.items() if (('encdec_attention' in layer_key) and (not layer_key.endswith('/logits')))]\n        actual_attention_weights = combine_attentions(actual_encdec_attention_weights)\n        loss = mse_loss(expected_attention_logits, actual_attention_weights)\n    else:\n        actual_encdec_attention_logits = [t for (layer_key, t) in actual_attentions.items() if (('encdec_attention' in layer_key) and layer_key.endswith('/logits'))]\n        actual_attention_logits = combine_attentions(actual_encdec_attention_logits)\n        loss = kl_divergence_loss(expected_attention_logits, actual_attention_logits)\n    return (loss * loss_multiplier)", "docstring": "Computes encdec attention loss between expected and actual attentions.\n\nArgs:\nexpected_attention_logits: Tensor storing the expected encoder-decoder\nattention logits with shape [batch_size, target_length, input_length].\nactual_attentions: Dictionary with actual attention logits for different\nattention types and hidden layers.\nloss_type: type of the loss function.\nloss_multiplier: multiplier for the attention loss.\n\nReturns:\nKL_divergence loss between the actual and expected attention logits.", "source": "codesearchnet"}
{"code": "def get_volumes(blocks, layout_info):\n    volumes = {}\n    vol_blocks_lists = sort.by_vol_id(blocks, layout_info[2])\n    for vol_rec in blocks[layout_info[0]].vtbl_recs:\n        vol_name = vol_rec.name.strip(b'\\x00').decode('utf-8')\n        if (vol_rec.rec_index not in vol_blocks_lists):\n            vol_blocks_lists[vol_rec.rec_index] = []\n        volumes[vol_name] = description(vol_rec.rec_index, vol_rec, vol_blocks_lists[vol_rec.rec_index])\n    return volumes", "docstring": "Get a list of UBI volume objects from list of blocks\n\nArguments:\nList:blocks            -- List of layout block objects\nList:layout_info    -- Layout info (indexes of layout blocks and\nassociated data blocks.)\n\nReturns:\nDict -- Of Volume objects by volume name, including any\nrelevant blocks.", "source": "codesearchnet"}
{"code": "def parse_multiple_json(json_file, offset=None):\n    \n    json_info_list = []\n    if not os.path.exists(json_file):\n        return json_info_list\n\n    try:\n        with open(json_file, \"r\") as f:\n            if offset:\n                f.seek(offset)\n            for line in f:\n                if line[-1] != \"\\n\":\n                    \n                    break\n                json_info = json.loads(line)\n                json_info_list.append(json_info)\n                offset += len(line)\n    except BaseException as e:\n        logging.error(e.message)\n\n    return json_info_list, offset", "docstring": "Parse multiple json records from the given file.\n\nSeek to the offset as the start point before parsing\nif offset set. return empty list if the json file does\nnot exists or exception occurs.\n\nArgs:\njson_file (str): File path to be parsed.\noffset (int): Initial seek position of the file.\n\nReturns:\nA dict of json info.\nNew offset after parsing.", "source": "juraj-google-style"}
{"code": "def evaluate(self, expression):\n    dump_tensors_iter = re.finditer(_DUMP_TENSOR_PATTERN, expression)\n    rewritten_expression = expression\n    for match in reversed(list(dump_tensors_iter)):\n        tensor_name = match.group(0)[1:-1].strip()\n        device_name, node_name, output_slot, debug_op, exec_index = _parse_debug_tensor_name(tensor_name)\n        if tensor_name not in self._cached_tensor_values:\n            try:\n                value = self._dump.get_tensors(node_name, output_slot, debug_op, device_name=device_name)[exec_index]\n            except debug_data.WatchKeyDoesNotExistInDebugDumpDirError:\n                raise ValueError('Eval failed due to the value of %s:%d:DebugIdentity being unavailable' % (node_name, output_slot))\n            self._cached_tensor_values[tensor_name] = value\n        rewritten_expression = rewritten_expression[:match.start(0)] + \"self._cached_tensor_values['\" + tensor_name + \"']\" + rewritten_expression[match.end(0):]\n    return eval(rewritten_expression)", "docstring": "Parse an expression.\n\nArgs:\nexpression: the expression to be parsed.\n\nReturns:\nThe result of the evaluation.\n\nRaises:\nValueError: If the value of one or more of the debug tensors in the\nexpression are not available.", "source": "github-repos"}
{"code": "def replace_drive_enclosure(self, information):\n    uri = '{}/replaceDriveEnclosure'.format(self.data['uri'])\n    result = self._helper.create(information, uri)\n    self.refresh()\n    return result", "docstring": "When a drive enclosure has been physically replaced, initiate the replacement operation that enables the\nnew drive enclosure to take over as a replacement for the prior drive enclosure. The request requires\nspecification of both the serial numbers of the original drive enclosure and its replacement to be provided.\n\nArgs:\ninformation: Options to replace the drive enclosure.\n\nReturns:\ndict: SAS Logical Interconnect.", "source": "codesearchnet"}
{"code": "def get_parsed_context(pipeline, context_in_string):\n    logger.debug('starting')\n    if ('context_parser' in pipeline):\n        parser_module_name = pipeline['context_parser']\n        logger.debug(f'context parser found: {parser_module_name}')\n        parser_module = pypyr.moduleloader.get_module(parser_module_name)\n        try:\n            logger.debug(f'running parser {parser_module_name}')\n            result_context = parser_module.get_parsed_context(context_in_string)\n            logger.debug(f'step {parser_module_name} done')\n            if (result_context is None):\n                logger.debug(f'{parser_module_name} returned None. Using empty context instead')\n                return pypyr.context.Context()\n            else:\n                return pypyr.context.Context(result_context)\n        except AttributeError:\n            logger.error(f\"The parser {parser_module_name} doesn't have a get_parsed_context(context) function.\")\n            raise\n    else:\n        logger.debug('pipeline does not have custom context parser. Using empty context.')\n        logger.debug('done')\n        return pypyr.context.Context()", "docstring": "Execute get_parsed_context handler if specified.\n\nDynamically load the module specified by the context_parser key in pipeline\ndict and execute the get_parsed_context function on that module.\n\nArgs:\npipeline: dict. Pipeline object.\ncontext_in_string: string. Argument string used to initialize context.\n\nReturns:\npypyr.context.Context() instance.\n\nRaises:\nAttributeError: parser specified on pipeline missing get_parsed_context\nfunction.", "source": "codesearchnet"}
{"code": "def patch_on_type(src: symbolic.Symbolic, value_type: Union[Type[Any], Tuple[Type[Any], ...]], value: Any=None, value_fn: Optional[Callable[[Any], Any]]=None, skip_notification: Optional[bool]=None) -> Any:\n    return _conditional_patch(src, lambda k, v, p: isinstance(v, value_type), value, value_fn, skip_notification)", "docstring": "Recursively patch values on matched types.\n\nExample::\n\nd = pg.Dict(a={'x': 1}, b=2)\nprint(pg.patching.patch_on_type(d, int, value_fn=lambda x: x * 2))\n# {a={x=2}, b=4}\n\nArgs:\nsrc: symbolic value to patch.\nvalue_type: Value type to match.\nvalue: New value for field that satisfy `condition`.\nvalue_fn: Callable object that produces new value based on old value.\nIf not None, `value` must be None.\nskip_notification: If True, `on_change` event will not be triggered for this\noperation. If None, the behavior is decided by `pg.notify_on_rebind`.\nPlease see `symbolic.Symbolic.rebind` for details.\n\nReturns:\n`src` after being patched.", "source": "github-repos"}
{"code": "def create_van_der_corput_samples(idx, number_base=2):\n    \n    assert number_base > 1\n\n    idx = numpy.asarray(idx).flatten() + 1\n    out = numpy.zeros(len(idx), dtype=float)\n\n    base = float(number_base)\n    active = numpy.ones(len(idx), dtype=bool)\n    while numpy.any(active):\n        out[active] += (idx[active] % number_base)/base\n        idx \n        base *= number_base\n        active = idx > 0\n    return out", "docstring": "Van der Corput samples.\n\nArgs:\nidx (int, numpy.ndarray):\nThe index of the sequence. If array is provided, all values in\narray is returned.\nnumber_base (int):\nThe numerical base from where to create the samples from.\n\nReturns (float, numpy.ndarray):\nVan der Corput samples.", "source": "juraj-google-style"}
{"code": "def anti_clobber_dir_path(dir_path, suffix='.d'):\n    dir_path = os.path.normpath(dir_path)\n    parts = dir_path.split(os.sep)\n    for index in range(len(parts)):\n        test_path = os.sep.join(parts[:(index + 1)])\n        if os.path.isfile(test_path):\n            parts[index] += suffix\n            return os.sep.join(parts)\n    return dir_path", "docstring": "Return a directory path free of filenames.\n\nArgs:\ndir_path (str): A directory path.\nsuffix (str): The suffix to append to the part of the path that is\na file.\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def _watch(self, primals, tangents):\n\n    def _watch(primal, tangent):\n        if not primal.dtype.is_floating:\n            logging.log_first_n(logging.WARN, 'The dtype of the watched primal must be floating (e.g. tf.float32), got %r', 5, primal.dtype)\n        tangent = ops.convert_to_tensor(tangent, dtype=primal.dtype)\n        if hasattr(primal, 'handle'):\n            primal = ops.convert_to_tensor(primal.handle)\n        pywrap_tfe.TFE_Py_ForwardAccumulatorWatch(self._accumulator, primal, tangent)\n    nest.map_structure(_watch, primals, tangents)", "docstring": "Ensures that `primals` are being traced by this accumulator.\n\nMathematically, `tangents` is a vector right-multiplying the Jacobian matrix\n(a Jacobian-vector product) for the function computed while this accumulator\nis active. Since JVPs are computed in forward mode as the computation\nhappens, this vector must be supplied in advance.\n\nWatching a single tensor multiple times sums each of its `tangents`. Any\nun-watched tensor has zeros for its tangent vector.\n\nArgs:\nprimals: A Tensor or list of Tensors.\ntangents: A Tensor or list of Tensors matching `primals`.", "source": "github-repos"}
{"code": "def download(self, temp_ver, store_metadata=True):\n    dest = self._prefixed(temp_ver.name)\n    temp_dest = ('%s.tmp' % dest)\n    with utils.LockFile((dest + '.lock')):\n        if os.path.exists(dest):\n            return\n        temp_ver.download(temp_dest)\n        if store_metadata:\n            with open(('%s.metadata' % dest), 'w') as f:\n                utils.json_dump(temp_ver.get_metadata(), f)\n        sha1 = utils.get_hash(temp_dest)\n        if (temp_ver.get_hash() != sha1):\n            raise RuntimeError(('Image %s does not match the expected hash %s' % (temp_ver.name, sha1)))\n        with open(('%s.hash' % dest), 'w') as f:\n            f.write(sha1)\n        with log_utils.LogTask('Convert image', logger=LOGGER):\n            result = utils.run_command(['qemu-img', 'convert', '-O', 'raw', temp_dest, dest])\n            os.unlink(temp_dest)\n            if result:\n                raise RuntimeError(result.err)", "docstring": "Retrieve the given template version\n\nArgs:\ntemp_ver (TemplateVersion): template version to retrieve\nstore_metadata (bool): If set to ``False``, will not refresh the\nlocal metadata with the retrieved one\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def fit(weights: Array, train_dataset: Dataset, iters: int, learning_rate: float, log_span: int, val_dataset: typing.Optional[Dataset]=None) -> Array:\n    grad_loss = jit(grad(cross_entropy_loss, argnums=0))\n    for t in range(iters):\n        weights = weights - learning_rate * grad_loss(weights, train_dataset.X, train_dataset.Y)\n        if (t + 1) % log_span != 0:\n            continue\n        metrics_train = jit(get_metrics)(weights, train_dataset)\n        print()\n        print('iter:\\t%d' % (t + 1))\n        print()\n        print('train accuracy:\\t%.5f' % metrics_train.accuracy)\n        print('train prec.:\\t%.5f' % metrics_train.precision)\n        print('train recall:\\t%.5f' % metrics_train.recall)\n        print('train fscore:\\t%.5f' % metrics_train.fscore)\n        print('train loss:\\t%.5f' % metrics_train.loss)\n        print()\n        if val_dataset is None:\n            continue\n        metrics_val = jit(get_metrics)(weights, val_dataset)\n        print('val accuracy:\\t%.5f' % metrics_val.accuracy)\n        print('val prec.:\\t%.5f' % metrics_val.precision)\n        print('val recall:\\t%.5f' % metrics_val.recall)\n        print('val fscore:\\t%.5f' % metrics_val.fscore)\n        print('val loss:\\t%.5f' % metrics_val.loss)\n        print()\n    return weights", "docstring": "Updates the weights with the given dataset.\n\nArgs:\nweights: A weight vector.\ntrain_dataset: A train dataset.\niters: A number of iterations.\nlearning_rate: A learning rate.\nlog_span: A span to log metrics.\nval_dataset: A validation dataset (optional).\n\nReturns:\nAn updated weight vector.", "source": "github-repos"}
{"code": "def run_conditional_decorators(self, context):\n    logger.debug('starting')\n    run_me = context.get_formatted_as_type(self.run_me, out_type=bool)\n    skip_me = context.get_formatted_as_type(self.skip_me, out_type=bool)\n    swallow_me = context.get_formatted_as_type(self.swallow_me, out_type=bool)\n    if run_me:\n        if (not skip_me):\n            try:\n                if self.retry_decorator:\n                    self.retry_decorator.retry_loop(context, self.invoke_step)\n                else:\n                    self.invoke_step(context=context)\n            except Exception as ex_info:\n                if swallow_me:\n                    logger.error(f)\n                else:\n                    raise\n        else:\n            logger.info(f'{self.name} not running because skip is True.')\n    else:\n        logger.info(f'{self.name} not running because run is False.')\n    logger.debug('done')", "docstring": "Evaluate the step decorators to decide whether to run step or not.\n\nUse pypyr.dsl.Step.run_step if you intend on executing the step the\nsame way pypyr does.\n\nArgs:\ncontext: (pypyr.context.Context) The pypyr context. This arg will\nmutate.", "source": "codesearchnet"}
{"code": "def add_input(self, **kwargs):\n    self._closed()\n\n    def _get_item(args):\n        'Get a single item from args.'\n        if (not args):\n            raise ValueError('No parameter specified.')\n        item = args.popitem()\n        if args:\n            raise ValueError('Too many parameters, not clear what to do with {}'.format(kwargs))\n        return item\n    symbols = None\n    input_dict = CommentedMap()\n    if ('default' in kwargs):\n        input_dict['default'] = kwargs.pop('default')\n    if ('label' in kwargs):\n        input_dict['label'] = kwargs.pop('label')\n    if ('symbols' in kwargs):\n        symbols = kwargs.pop('symbols')\n    (name, input_type) = _get_item(kwargs)\n    if (input_type == 'enum'):\n        typ = CommentedMap()\n        typ['type'] = 'enum'\n        if (symbols is None):\n            raise ValueError(\"Please specify the enum's symbols.\")\n        if (symbols == []):\n            raise ValueError(\"The enum's symbols cannot be empty.\")\n        if (type(symbols) != list):\n            raise ValueError('Symbols should be a list.')\n        symbols = [str(s) for s in symbols]\n        typ['symbols'] = symbols\n        input_dict['type'] = typ\n    elif bool(input_dict):\n        input_dict['type'] = input_type\n    msg = ('\"{}\" is already used as a workflow input. Please use a ' + 'different name.')\n    if (name in self.wf_inputs):\n        raise ValueError(msg.format(name))\n    if isinstance(input_type, dict):\n        input_dict['type'] = input_type\n    if bool(input_dict):\n        self.wf_inputs[name] = input_dict\n    else:\n        self.wf_inputs[name] = input_type\n    return Reference(input_name=name)", "docstring": "Add workflow input.\n\nArgs:\nkwargs (dict): A dict with a `name: type` item\nand optionally a `default: value` item, where name is the\nname (id) of the workflow input (e.g., `dir_in`) and type is\nthe type of the input (e.g., `'Directory'`).\nThe type of input parameter can be learned from\n`step.inputs(step_name=input_name)`.\n\nReturns:\ninputname\n\nRaises:\nValueError: No or multiple parameter(s) have been specified.", "source": "codesearchnet"}
{"code": "def flowshow(flow, win_name='', wait_time=0):\n    \n    flow = flowread(flow)\n    flow_img = flow2rgb(flow)\n    imshow(rgb2bgr(flow_img), win_name, wait_time)", "docstring": "Show optical flow.\n\nArgs:\nflow (ndarray or str): The optical flow to be displayed.\nwin_name (str): The window name.\nwait_time (int): Value of waitKey param.", "source": "juraj-google-style"}
{"code": "def unstem(self, term):\n\n        \n\n        originals = []\n        for i in self.terms[term]:\n            originals.append(self.tokens[i]['unstemmed'])\n\n        mode = Counter(originals).most_common(1)\n        return mode[0][0]", "docstring": "Given a stemmed term, get the most common unstemmed variant.\n\nArgs:\nterm (str): A stemmed term.\n\nReturns:\nstr: The unstemmed token.", "source": "juraj-google-style"}
{"code": "def find_invalid_filenames(filenames, repository_root):\n    errors = []\n    for filename in filenames:\n        if (not os.path.abspath(filename).startswith(repository_root)):\n            errors.append((filename, ('Error: File %s does not belong to repository %s' % (filename, repository_root))))\n        if (not os.path.exists(filename)):\n            errors.append((filename, ('Error: File %s does not exist' % (filename,))))\n        if os.path.isdir(filename):\n            errors.append((filename, ('Error: %s is a directory. Directories are not yet supported' % (filename,))))\n    return errors", "docstring": "Find files that does not exist, are not in the repo or are directories.\n\nArgs:\nfilenames: list of filenames to check\nrepository_root: the absolute path of the repository's root.\n\nReturns: A list of errors.", "source": "codesearchnet"}
{"code": "def __init__(self, **kwargs) -> 'PygalleBaseClass':  \n        \n        self.options = kwargs\n        self.init_properties() \\\n            .set_uid() \\\n            .set_class_name() \\\n            .set_category()", "docstring": "Create a new instance of :class:`PygalleBaseClass`\n\n# Arguments\nargs:\nkwargs:\n\n# Returns:\nPygalleBaseClass: An instance of :class:`PygalleBaseClass`", "source": "juraj-google-style"}
{"code": "def reciprocal_lattice_from_outcar( filename ): \n    \n    outcar = open(filename, \"r\").read()\n    \n    recLat = re.findall(r\"reciprocal\\s*lattice\\s*vectors\\s*([-.\\s\\d]*)\",\n                        outcar)[-1]\n    recLat = recLat.split()\n    recLat = np.array(recLat, dtype=float)\n    \n    recLat.shape = (3, 6)\n    recLat = recLat[:, 3:]\n    return recLat", "docstring": "Finds and returns the reciprocal lattice vectors, if more than\none set present, it just returns the last one.\nArgs:\nfilename (Str): The name of the outcar file to be read\n\nReturns:\nList(Float): The reciprocal lattice vectors.", "source": "juraj-google-style"}
{"code": "class Speech2Text2Processor(ProcessorMixin):\n    feature_extractor_class = 'AutoFeatureExtractor'\n    tokenizer_class = 'Speech2Text2Tokenizer'\n\n    def __init__(self, feature_extractor, tokenizer):\n        super().__init__(feature_extractor, tokenizer)\n        self.current_processor = self.feature_extractor\n        self._in_target_context_manager = False\n\n    def __call__(self, *args, **kwargs):\n        \n        if self._in_target_context_manager:\n            return self.current_processor(*args, **kwargs)\n        if 'raw_speech' in kwargs:\n            warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')\n            audio = kwargs.pop('raw_speech')\n        else:\n            audio = kwargs.pop('audio', None)\n        sampling_rate = kwargs.pop('sampling_rate', None)\n        text = kwargs.pop('text', None)\n        if len(args) > 0:\n            audio = args[0]\n            args = args[1:]\n        if audio is None and text is None:\n            raise ValueError('You need to specify either an `audio` or `text` input to process.')\n        if audio is not None:\n            inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)\n        if text is not None:\n            encodings = self.tokenizer(text, **kwargs)\n        if text is None:\n            return inputs\n        elif audio is None:\n            return encodings\n        else:\n            inputs['labels'] = encodings['input_ids']\n            return inputs\n\n    def batch_decode(self, *args, **kwargs):\n        \n        return self.tokenizer.batch_decode(*args, **kwargs)\n\n    def decode(self, *args, **kwargs):\n        \n        return self.tokenizer.decode(*args, **kwargs)\n\n    @contextmanager\n    def as_target_processor(self):\n        \n        warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.')\n        self._in_target_context_manager = True\n        self.current_processor = self.tokenizer\n        yield\n        self.current_processor = self.feature_extractor\n        self._in_target_context_manager = False", "docstring": "Constructs a Speech2Text2 processor which wraps a Speech2Text2 feature extractor and a Speech2Text2 tokenizer into\na single processor.\n\n[`Speech2Text2Processor`] offers all the functionalities of [`AutoFeatureExtractor`] and [`Speech2Text2Tokenizer`].\nSee the [`~Speech2Text2Processor.__call__`] and [`~Speech2Text2Processor.decode`] for more information.\n\nArgs:\nfeature_extractor (`AutoFeatureExtractor`):\nAn instance of [`AutoFeatureExtractor`]. The feature extractor is a required input.\ntokenizer (`Speech2Text2Tokenizer`):\nAn instance of [`Speech2Text2Tokenizer`]. The tokenizer is a required input.", "source": "github-repos"}
{"code": "def report_filter(config, auth, body, filters):\n    new_body = body.copy()\n    for f, d in filters.items():\n        for v in get_rows(config, auth, d):\n            new_body['params'].setdefault('filters', []).append({'type': f, 'value': v})\n    return new_body", "docstring": "Adds filters to a report body\n\nFilters cannot be easily added to the reports without templateing, this allows\nfilters to be passed as lists.\nValues are specified using get_rows(...) helper, see\nstarthinker/util/data/__init__.py.\nTo specify a filter, use the official filter name and a list of values.\n\nFor exmaple:\n\n```\nfilters = {\n\"FILTER_PARTNER\": {\n\"values\":789\n},\n\"FILTER_ADVERTISER\": {\n\"values\":[1234, 5678, 91011]\n}\n}\n```\n\nArgs:\n* auth: (string) Either user or service.\n* body: (json) the report body ( with or without filters )\n* filters: (json) a dictionary of filters to apply ( see above examples )\n\nReturns:\n* body: ( json ) modified report body", "source": "github-repos"}
{"code": "def get_percentage_volume_change(self):\n    initial_vol = self.initial.lattice.volume\n    final_vol = self.final.lattice.volume\n    return ((final_vol / initial_vol) - 1)", "docstring": "Returns the percentage volume change.\n\nReturns:\nVolume change in percentage, e.g., 0.055 implies a 5.5% increase.", "source": "codesearchnet"}
{"code": "def stat(self, follow_symlinks=True):\n        \n        if follow_symlinks:\n            if self._statresult_symlink is None:\n                file_object = self._filesystem.resolve(self.path)\n                if self._filesystem.is_windows_fs:\n                    file_object.st_nlink = 0\n                self._statresult_symlink = file_object.stat_result.copy()\n            return self._statresult_symlink\n\n        if self._statresult is None:\n            file_object = self._filesystem.lresolve(self.path)\n            self._inode = file_object.st_ino\n            if self._filesystem.is_windows_fs:\n                file_object.st_nlink = 0\n            self._statresult = file_object.stat_result.copy()\n        return self._statresult", "docstring": "Return a stat_result object for this entry.\n\nArgs:\nfollow_symlinks: If False and the entry is a symlink, return the\nresult for the symlink, otherwise for the object it points to.", "source": "juraj-google-style"}
{"code": "def build_model(self, token_encoder_model, trainable_embeddings=True, output_activation='softmax'):\n    if (not isinstance(token_encoder_model, SequenceEncoderBase)):\n        raise ValueError('`token_encoder_model` should be an instance of `{}`'.format(SequenceEncoderBase))\n    if ((not token_encoder_model.allows_dynamic_length()) and (self.max_tokens is None)):\n        raise ValueError('The provided `token_encoder_model` does not allow variable length mini-batches. You need to provide `max_tokens`')\n    if (self.embeddings_index is None):\n        embedding_layer = Embedding(len(self.token_index), self.embedding_dims, input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings)\n    else:\n        embedding_layer = Embedding(len(self.token_index), self.embedding_dims, weights=[build_embedding_weights(self.token_index, self.embeddings_index)], input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings)\n    sequence_input = Input(shape=(self.max_tokens,), dtype='int32')\n    x = embedding_layer(sequence_input)\n    x = token_encoder_model(x)\n    x = Dense(self.num_classes, activation=output_activation)(x)\n    return Model(sequence_input, x)", "docstring": "Builds a model using the given `text_model`\n\nArgs:\ntoken_encoder_model: An instance of `SequenceEncoderBase` for encoding all the tokens within a document.\nThis encoding is then fed into a final `Dense` layer for classification.\ntrainable_embeddings: Whether or not to fine tune embeddings.\noutput_activation: The output activation to use. (Default value: 'softmax')\nUse:\n- `softmax` for binary or multi-class.\n- `sigmoid` for multi-label classification.\n- `linear` for regression output.\n\nReturns:\nThe model output tensor.", "source": "codesearchnet"}
{"code": "def where(self, predicate):\n    if self.closed():\n        raise ValueError('Attempt to call where() on a closed Queryable.')\n    if (not is_callable(predicate)):\n        raise TypeError('where() parameter predicate={predicate} is not callable'.format(predicate=repr(predicate)))\n    return self._create(ifilter(predicate, self))", "docstring": "Filters elements according to whether they match a predicate.\n\nNote: This method uses deferred execution.\n\nArgs:\npredicate: A unary function which is applied to each element in the\nsource sequence. Source elements for which the predicate\nreturns True will be present in the result.\n\nReturns:\nA Queryable over those elements of the source sequence for which\nthe predicate is True.\n\nRaises:\nValueError: If the Queryable is closed.\nTypeError: If the predicate is not callable.", "source": "codesearchnet"}
{"code": "def call(self, *args, **kwargs):\n    if (not self.is_connected()):\n        if self.autoconnect:\n            return self._call_with_autoconnect(*args, **kwargs)\n        else:\n            error = ConnectionError('you are not connected and autoconnect=False')\n            return tornado.gen.maybe_future(error)\n    return self._call(*args, **kwargs)", "docstring": "Calls a redis command and returns a Future of the reply.\n\nArgs:\n*args: full redis command as variable length argument list or\na Pipeline object (as a single argument).\n**kwargs: internal private options (do not use).\n\nReturns:\na Future with the decoded redis reply as result (when available) or\na ConnectionError object in case of connection error.\n\nRaises:\nClientError: your Pipeline object is empty.\n\nExamples:\n\n>>> @tornado.gen.coroutine\ndef foobar():\nclient = Client()\nresult = yield client.call(\"HSET\", \"key\", \"field\", \"val\")", "source": "codesearchnet"}
{"code": "def patch_addPadding(self, patches):\n    \n    paddingLength = self.Patch_Margin\n    nullPadding = \"\"\n    for x in range(1, paddingLength + 1):\n      nullPadding += chr(x)\n\n    \n    for patch in patches:\n      patch.start1 += paddingLength\n      patch.start2 += paddingLength\n\n    \n    patch = patches[0]\n    diffs = patch.diffs\n    if not diffs or diffs[0][0] != self.DIFF_EQUAL:\n      \n      diffs.insert(0, (self.DIFF_EQUAL, nullPadding))\n      patch.start1 -= paddingLength  \n      patch.start2 -= paddingLength  \n      patch.length1 += paddingLength\n      patch.length2 += paddingLength\n    elif paddingLength > len(diffs[0][1]):\n      \n      extraLength = paddingLength - len(diffs[0][1])\n      newText = nullPadding[len(diffs[0][1]):] + diffs[0][1]\n      diffs[0] = (diffs[0][0], newText)\n      patch.start1 -= extraLength\n      patch.start2 -= extraLength\n      patch.length1 += extraLength\n      patch.length2 += extraLength\n\n    \n    patch = patches[-1]\n    diffs = patch.diffs\n    if not diffs or diffs[-1][0] != self.DIFF_EQUAL:\n      \n      diffs.append((self.DIFF_EQUAL, nullPadding))\n      patch.length1 += paddingLength\n      patch.length2 += paddingLength\n    elif paddingLength > len(diffs[-1][1]):\n      \n      extraLength = paddingLength - len(diffs[-1][1])\n      newText = diffs[-1][1] + nullPadding[:extraLength]\n      diffs[-1] = (diffs[-1][0], newText)\n      patch.length1 += extraLength\n      patch.length2 += extraLength\n\n    return nullPadding", "docstring": "Add some padding on text start and end so that edges can match\nsomething.  Intended to be called only from within patch_apply.\n\nArgs:\npatches: Array of Patch objects.\n\nReturns:\nThe padding string added to each side.", "source": "juraj-google-style"}
{"code": "def _parse_flowcontrol_receive(self, config):\n        \n        value = 'off'\n        match = re.search(r'flowcontrol receive (\\w+)$', config, re.M)\n        if match:\n            value = match.group(1)\n        return dict(flowcontrol_receive=value)", "docstring": "Scans the config block and returns the flowcontrol receive value\n\nArgs:\nconfig (str): The interface config block to scan\n\nReturns:\ndict: Returns a dict object with the flowcontrol receive value\nretrieved from the config block.  The returned dict object\nis intended to be merged into the interface resource dict", "source": "juraj-google-style"}
{"code": "def plot(self, tag, mpl_plt, step=None, close_plot=True):\n    if (step is None):\n        step = self._step\n    else:\n        self._step = step\n    fig = mpl_plt.get_current_fig_manager()\n    (img_w, img_h) = fig.canvas.get_width_height()\n    image_buf = io.BytesIO()\n    mpl_plt.savefig(image_buf, format='png')\n    image_summary = Summary.Image(encoded_image_string=image_buf.getvalue(), colorspace=4, height=img_h, width=img_w)\n    summary = Summary(value=[Summary.Value(tag=tag, image=image_summary)])\n    self.add_summary(summary, step)\n    if close_plot:\n        mpl_plt.close()", "docstring": "Saves matplotlib plot output to summary image.\n\nArgs:\ntag: str: label for this data\nmpl_plt: matplotlib stateful pyplot object with prepared plotting state\nstep: int: training step\nclose_plot: bool: automatically closes plot", "source": "codesearchnet"}
{"code": "def uncompress(element, output_spec):\n    flat_types = structure.get_flat_tensor_types(output_spec)\n    flat_shapes = structure.get_flat_tensor_shapes(output_spec)\n    tensor_list = ged_ops.uncompress_element(element, output_types=flat_types, output_shapes=flat_shapes)\n    return structure.from_tensor_list(output_spec, tensor_list)", "docstring": "Uncompress a compressed dataset element.\n\nArgs:\nelement: A scalar variant tensor to uncompress. The element should have been\ncreated by calling `compress`.\noutput_spec: A nested structure of `tf.TypeSpec` representing the type(s) of\nthe uncompressed element.\n\nReturns:\nThe uncompressed element.", "source": "github-repos"}
{"code": "def get_mim_genes(genemap_lines, mim2gene_lines):\n    \n    LOG.info(\"Get the mim genes\")\n    \n    genes = {}\n    hgnc_genes = {}\n    \n    gene_nr = 0\n    no_hgnc = 0\n    \n    for entry in parse_mim2gene(mim2gene_lines):\n        if 'gene' in entry['entry_type']:\n            mim_nr = entry['mim_number']\n            gene_nr += 1\n            if not 'hgnc_symbol' in entry:\n                no_hgnc += 1\n            else:\n                genes[mim_nr] = entry\n    \n    LOG.info(\"Number of genes without hgnc symbol %s\", str(no_hgnc))\n    \n    for entry in parse_genemap2(genemap_lines):\n        mim_number = entry['mim_number']\n        inheritance = entry['inheritance']\n        phenotype_info = entry['phenotypes']\n        hgnc_symbol = entry['hgnc_symbol']\n        hgnc_symbols = entry['hgnc_symbols']\n        if mim_number in genes:\n            genes[mim_number]['inheritance'] = inheritance\n            genes[mim_number]['phenotypes'] = phenotype_info\n            genes[mim_number]['hgnc_symbols'] = hgnc_symbols\n\n    for mim_nr in genes:\n        gene_info = genes[mim_nr]\n        hgnc_symbol = gene_info['hgnc_symbol']\n        \n        if hgnc_symbol in hgnc_genes:\n            existing_info = hgnc_genes[hgnc_symbol]\n            if not existing_info['phenotypes']:\n                hgnc_genes[hgnc_symbol] = gene_info\n            \n        else:\n            hgnc_genes[hgnc_symbol] = gene_info\n    \n    return hgnc_genes", "docstring": "Get a dictionary with genes and their omim information\n\nArgs:\ngenemap_lines(iterable(str))\nmim2gene_lines(iterable(str))\n\nReturns.\nhgnc_genes(dict): A dictionary with hgnc_symbol as keys", "source": "juraj-google-style"}
{"code": "def are_values_same_type(first_val, second_val):\n    first_val_type = type(first_val)\n    second_val_type = type(second_val)\n    if (isinstance(first_val, string_types) and isinstance(second_val, string_types)):\n        return True\n    if (isinstance(first_val, bool) or isinstance(second_val, bool)):\n        return (first_val_type == second_val_type)\n    if (isinstance(first_val, (numbers.Integral, float)) and isinstance(second_val, (numbers.Integral, float))):\n        return True\n    return False", "docstring": "Method to verify that both values belong to same type. Float and integer are\nconsidered as same type.\n\nArgs:\nfirst_val: Value to validate.\nsecond_Val: Value to validate.\n\nReturns:\nBoolean: True if both values belong to same type. Otherwise False.", "source": "codesearchnet"}
{"code": "def _url_dirname(self, url_or_path):\n    return os.path.dirname(url_or_path)", "docstring": "Pass through to os.path.dirname.\n\nThis version uses os.path instead of posixpath to be compatible with the\nhost OS.\n\nArgs:\nurl_or_path: A string in the form of /some/path.", "source": "github-repos"}
{"code": "def __getattr__(self, name: str) -> column_expression_builder.ColumnExpressionBuilder:\n    lookup = name[:-1] if name.endswith('_') and keyword.iskeyword(name[:-1]) else name\n    expression = None\n    if self._fields:\n        for field in self._fields:\n            if field.column_name == lookup:\n                expression = field.builder\n    else:\n        expression = getattr(self._root_resource.builder, lookup)\n    if expression is None:\n        raise AttributeError(f'No such field {name}')\n    return column_expression_builder.ColumnExpressionBuilder.from_fhir_path_builder(expression)", "docstring": "Used to support building expressions directly off of the base view.\n\nSee the class-level documentation for guidance on use.\n\nArgs:\nname: the name of the FHIR field to start with in the builder.\n\nReturns:\nA ColumnExpressionBuilder for the field in question", "source": "github-repos"}
{"code": "def _cumprod(l):\n  \n  ret = [1]\n  for item in l:\n    ret.append(ret[-1] * item)\n  return ret", "docstring": "Cumulative product of a list.\n\nArgs:\nl: a list of integers\nReturns:\na list with one more element (starting with 1)", "source": "juraj-google-style"}
{"code": "def put_many(self, type: Type[T], items: Iterable[T], context: PipelineContext = None) -> None:\n        \n        pass", "docstring": "Puts multiple objects of the same type into the data sink.\n\nArgs:\ntype: The type of the objects being inserted.\nitems: The objects to be inserted.\ncontext: The context of the insertion (mutable).", "source": "juraj-google-style"}
{"code": "def export_as_tfhub_module(model_name, hparams, decode_hparams, problem, checkpoint_path, export_dir):\n\n    def hub_module_fn():\n        'Creates the TF graph for the hub module.'\n        model_fn = t2t_model.T2TModel.make_estimator_model_fn(model_name, hparams, decode_hparams=decode_hparams, use_tpu=FLAGS.use_tpu)\n        features = problem.serving_input_fn(hparams, decode_hparams, use_tpu=FLAGS.use_tpu).features\n        original_features = features.copy()\n        spec = model_fn(features, labels=None, mode=tf.estimator.ModeKeys.PREDICT)\n        hub.add_signature(inputs=original_features, outputs=spec.export_outputs['serving_default'].outputs)\n    drop_collections = [tf.GraphKeys.LOSSES, tf.GraphKeys.SUMMARIES, tf.GraphKeys.LOCAL_VARIABLES]\n    module_spec = hub.create_module_spec(hub_module_fn, drop_collections=drop_collections)\n    export_module_spec_with_checkpoint(module_spec, checkpoint_path=checkpoint_path, export_path=export_dir, scope_prefix='')", "docstring": "Exports the last checkpoint from the directory as tfhub module.\n\nIt creates the Module spec and signature (based on T2T problem information),\nwhich is later used to create and export the hub module.\nModule will be saved inside the ckpt_dir.\n\nArgs:\nmodel_name: name of the model to be exported.\nhparams: T2T parameters, model graph will be based on them.\ndecode_hparams: T2T parameters for decoding.\nproblem: the name of the problem\ncheckpoint_path: path to the checkpoint to be exported.\nexport_dir: Directory to write the exported model to.", "source": "codesearchnet"}
{"code": "def to_dict(self):\n    return {'all_set': self._is_all_set(), 'progress': self.progress(), 'values': {property_name: (getattr(self, property_name) or []) for property_name in worker_mapping().keys()}}", "docstring": "This method is used in with connection to REST API. It basically\nconverts all important properties to dictionary, which may be used by\nfrontend.\n\nReturns:\ndict: ``{\"all_set\": bool, \"progress\": [int(done), int(how_many)], \\\n\"values\": {\"property\": [values], ..}}``", "source": "codesearchnet"}
{"code": "def get_output(self):\n    template_function = TEMPLATE_WRAPPER.format(function_name=self.js_function_name, template_code=self.output.getvalue()).strip()\n    module_format = JS_MODULE_FORMATS[self.js_module_format]\n    return module_format(self.dependencies, template_function)", "docstring": "Returns the generated JavaScript code.\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def fix_docstring(obj: Any, old_doc_args: str, new_doc_args: str):\n    source, line_number = inspect.getsourcelines(obj)\n    idx = 0\n    while idx < len(source) and _re_args.search(source[idx]) is None:\n        idx += 1\n    if idx == len(source):\n        return\n    indent = find_indent(source[idx])\n    idx += 1\n    start_idx = idx\n    while idx < len(source) and (len(source[idx].strip()) == 0 or find_indent(source[idx]) > indent):\n        idx += 1\n    idx -= 1\n    while len(source[idx].strip()) == 0:\n        idx -= 1\n    idx += 1\n    if ''.join(source[start_idx:idx])[:-1] != old_doc_args:\n        return\n    obj_file = find_source_file(obj)\n    with open(obj_file, 'r', encoding='utf-8') as f:\n        content = f.read()\n    lines = content.split('\\n')\n    lines = lines[:line_number + start_idx - 1] + [new_doc_args] + lines[line_number + idx - 1:]\n    print(f'Fixing the docstring of {obj.__name__} in {obj_file}.')\n    with open(obj_file, 'w', encoding='utf-8') as f:\n        f.write('\\n'.join(lines))", "docstring": "Fixes the docstring of an object by replacing its arguments documentation by the one matched with the signature.\n\nArgs:\nobj (`Any`):\nThe object whose dostring we are fixing.\nold_doc_args (`str`):\nThe current documentation of the parameters of `obj` in the docstring (as returned by\n`match_docstring_with_signature`).\nnew_doc_args (`str`):\nThe documentation of the parameters of `obj` matched with its signature (as returned by\n`match_docstring_with_signature`).", "source": "github-repos"}
{"code": "def create_summary_metadata(hparams_plugin_data_pb):\n  \n  if not isinstance(hparams_plugin_data_pb, plugin_data_pb2.HParamsPluginData):\n    raise TypeError('Needed an instance of plugin_data_pb2.HParamsPluginData.'\n                    ' Got: %s' % type(hparams_plugin_data_pb))\n  content = plugin_data_pb2.HParamsPluginData()\n  content.CopyFrom(hparams_plugin_data_pb)\n  content.version = PLUGIN_DATA_VERSION\n  return tf.compat.v1.SummaryMetadata(\n      plugin_data=tf.compat.v1.SummaryMetadata.PluginData(\n          plugin_name=PLUGIN_NAME, content=content.SerializeToString()))", "docstring": "Returns a summary metadata for the HParams plugin.\n\nReturns a summary_pb2.SummaryMetadata holding a copy of the given\nHParamsPluginData message in its plugin_data.content field.\nSets the version field of the hparams_plugin_data_pb copy to\nPLUGIN_DATA_VERSION.\n\nArgs:\nhparams_plugin_data_pb: the HParamsPluginData protobuffer to use.", "source": "juraj-google-style"}
{"code": "def construct_lanczos_params(self):\n    \n    \n    \n    self.min_eigen_vec = autograph.to_graph(utils.tf_lanczos_smallest_eigval)\n\n    def _m_vector_prod_fn(x):\n      return self.get_psd_product(x, dtype=self.lanczos_dtype)\n    def _h_vector_prod_fn(x):\n      return self.get_h_product(x, dtype=self.lanczos_dtype)\n\n    \n    self.m_min_vec_estimate = np.zeros(shape=(self.matrix_m_dimension, 1), dtype=np.float64)\n    zeros_m = tf.zeros(shape=(self.matrix_m_dimension, 1), dtype=tf.float64)\n    self.m_min_vec_ph = tf.placeholder_with_default(input=zeros_m,\n                                                    shape=(self.matrix_m_dimension, 1),\n                                                    name='m_min_vec_ph')\n    self.m_min_eig, self.m_min_vec = self.min_eigen_vec(_m_vector_prod_fn,\n                                                        self.matrix_m_dimension,\n                                                        self.m_min_vec_ph,\n                                                        self.lzs_params['max_iter'],\n                                                        dtype=self.lanczos_dtype)\n    self.m_min_eig = tf.cast(self.m_min_eig, self.nn_dtype)\n    self.m_min_vec = tf.cast(self.m_min_vec, self.nn_dtype)\n\n    self.h_min_vec_estimate = np.zeros(shape=(self.matrix_m_dimension - 1, 1), dtype=np.float64)\n    zeros_h = tf.zeros(shape=(self.matrix_m_dimension - 1, 1), dtype=tf.float64)\n    self.h_min_vec_ph = tf.placeholder_with_default(input=zeros_h,\n                                                    shape=(self.matrix_m_dimension - 1, 1),\n                                                    name='h_min_vec_ph')\n    self.h_min_eig, self.h_min_vec = self.min_eigen_vec(_h_vector_prod_fn,\n                                                        self.matrix_m_dimension-1,\n                                                        self.h_min_vec_ph,\n                                                        self.lzs_params['max_iter'],\n                                                        dtype=self.lanczos_dtype)\n    self.h_min_eig = tf.cast(self.h_min_eig, self.nn_dtype)\n    self.h_min_vec = tf.cast(self.h_min_vec, self.nn_dtype)", "docstring": "Computes matrices T and V using the Lanczos algorithm.\n\nArgs:\nk: number of iterations and dimensionality of the tridiagonal matrix\nReturns:\neig_vec: eigen vector corresponding to min eigenvalue", "source": "juraj-google-style"}
{"code": "def IsDevice(self):\n    if (self._stat_object is None):\n        self._stat_object = self._GetStat()\n    if (self._stat_object is not None):\n        self.entry_type = self._stat_object.type\n    return (self.entry_type == definitions.FILE_ENTRY_TYPE_DEVICE)", "docstring": "Determines if the file entry is a device.\n\nReturns:\nbool: True if the file entry is a device.", "source": "codesearchnet"}
{"code": "class GeneratorEnqueuer(SequenceEnqueuer):\n\n    def __init__(self, generator, use_multiprocessing=False, random_seed=None):\n        super(GeneratorEnqueuer, self).__init__(generator, use_multiprocessing)\n        self.random_seed = random_seed\n\n    def _get_executor_init(self, workers):\n        \n\n        def pool_fn(seqs):\n            pool = get_pool_class(True)(workers, initializer=init_pool_generator, initargs=(seqs, self.random_seed, get_worker_id_queue()))\n            _DATA_POOLS.add(pool)\n            return pool\n        return pool_fn\n\n    def _run(self):\n        \n        self._send_sequence()\n        with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:\n            while True:\n                if self.stop_signal.is_set():\n                    return\n                self.queue.put(executor.apply_async(next_sample, (self.uid,)), block=True)\n\n    def get(self):\n        \n        try:\n            while self.is_running():\n                inputs = self.queue.get(block=True).get()\n                self.queue.task_done()\n                if inputs is not None:\n                    yield inputs\n        except StopIteration:\n            last_ones = []\n            while self.queue.qsize() > 0:\n                last_ones.append(self.queue.get(block=True))\n            for f in last_ones:\n                f.wait()\n            last_ones = [future.get() for future in last_ones if future.successful()]\n            for inputs in last_ones:\n                if inputs is not None:\n                    yield inputs\n        except Exception as e:\n            self.stop()\n            if 'generator already executing' in str(e):\n                raise RuntimeError('Your generator is NOT thread-safe. Keras requires a thread-safe generator when `use_multiprocessing=False, workers > 1`. ')\n            raise e", "docstring": "Builds a queue out of a data generator.\n\nThe provided generator can be finite in which case the class will throw\na `StopIteration` exception.\n\nArgs:\ngenerator: a generator function which yields data\nuse_multiprocessing: use multiprocessing if True, otherwise threading\nrandom_seed: Initial seed for workers,\nwill be incremented by one for each worker.", "source": "github-repos"}
{"code": "def __init__(self, obj):\n        \n        if distob.engine is None:\n            setup_engines()\n        if isinstance(obj, Ref):\n            self._ref = obj\n            self.is_local = (self._ref.id.engine is distob.engine.eid)\n        else:\n            self._ref = Ref(obj)\n            self.is_local = True\n        if self.is_local:\n            self._dv = None\n            self._obcache = distob.engine[self._ref.id]\n            self._obcache_current = True\n        else:\n            self._dv = distob.engine._client[self._ref.id.engine]\n            self._dv.use_dill()\n            self._obcache = None\n            self._obcache_current = False\n        self._id = self._ref.id\n        \n        self.prefer_local = True \n        \n        instance_methods, instance_attribs, size = call(\n                _scan_instance, self, self.__class__._include_underscore,\n                self.__class__._exclude, prefer_local=False)\n        for name, doc in instance_methods:\n            setattr(self, name, _make_proxy_method(name, doc))\n        for name, doc in instance_attribs:\n            setattr(self.__class__, name, _make_proxy_property(name, doc))\n        self.__engine_affinity__ = (self._ref.id.engine, size)", "docstring": "Set up the Remote* proxy object to access an already-existing object,\nwhich may be local or remote.\n\nArgs:\nobj (Ref or object): either a Ref reference to the (possibly remote)\nobject to be controlled, or else an actual (local) object to be\ncontrolled.", "source": "juraj-google-style"}
{"code": "async def update_state(self, short_name, state):\n    if (short_name not in self.services):\n        raise ArgumentError('Service name is unknown', short_name=short_name)\n    if (state not in states.KNOWN_STATES):\n        raise ArgumentError('Invalid service state', state=state)\n    serv = self.services[short_name]['state']\n    if (serv.state == state):\n        return\n    update = {}\n    update['old_status'] = serv.state\n    update['new_status'] = state\n    update['new_status_string'] = states.KNOWN_STATES[state]\n    serv.state = state\n    (await self._notify_update(short_name, 'state_change', update))", "docstring": "Set the current state of a service.\n\nIf the state is unchanged from a previous attempt, this routine does\nnothing.\n\nArgs:\nshort_name (string): The short name of the service\nstate (int): The new stae of the service", "source": "codesearchnet"}
{"code": "def add_institute(self, institute_obj):\n        \n        internal_id = institute_obj['internal_id']\n        display_name = institute_obj['internal_id']\n\n        \n        if self.institute(institute_id=internal_id):\n            raise IntegrityError(\"Institute {0} already exists in database\"\n                                 .format(display_name))\n\n        LOG.info(\"Adding institute with internal_id: {0} and \"\n                    \"display_name: {1}\".format(internal_id,\n                                               display_name))\n\n        insert_info = self.institute_collection.insert_one(institute_obj)\n        \n        LOG.info(\"Institute saved\")", "docstring": "Add a institute to the database\n\nArgs:\ninstitute_obj(Institute)", "source": "juraj-google-style"}
{"code": "def load(self, key_filter=None, header_preproc=None):\n    df = pd.read_csv(self.input_file, sep='\\t', dtype=object)\n    if (key_filter is not None):\n        df = df[df[df.columns[0]].str.match(key_filter)]\n    meta_col = df.columns[0]\n    df[meta_col] = df[meta_col].str.split(',').str[(- 1)]\n    for col_name in df.columns[1:]:\n        stripped = df[col_name].str.replace('[a-z]', '')\n        df[col_name] = pd.to_numeric(stripped, errors='coerce')\n    if (header_preproc is not None):\n        df.columns = (list(df.columns[:1]) + [header_preproc(c) for c in df.columns[1:]])\n    df.columns = (['key'] + [int(y) for y in df.columns[1:]])\n    return df", "docstring": "Load data table from tsv file, from default location\n\nArgs:\nkey_filter (str): additional filter for key column - regex matching\nkey values to include; None for no filter\n\nheader_preproc (func): function to apply to column headers to extract year numbers (as strings)\n\nReturns:\npd.DataFrame: data", "source": "codesearchnet"}
{"code": "def json_to_url(json, symbol):\n    start = json[0]['date']\n    end = json[(- 1)]['date']\n    diff = (end - start)\n    periods = [300, 900, 1800, 7200, 14400, 86400]\n    diffs = {}\n    for p in periods:\n        diffs[p] = abs((1 - (p / (diff / len(json)))))\n    period = min(diffs, key=diffs.get)\n    url = 'https:\n    return url", "docstring": "Converts a JSON to a URL by the Poloniex API\n\nArgs:\njson: JSON data as a list of dict dates, where the keys are\nthe raw market statistics.\nsymbol: String of currency pair, like a ticker symbol.\n\nReturns:\nString URL to Poloniex API representing the given JSON.", "source": "codesearchnet"}
{"code": "def validate_filename(filename, white_list_formats):\n    return filename.lower().endswith(white_list_formats) and os.path.isfile(filename)", "docstring": "Check if a filename refers to a valid file.\n\nArgs:\nfilename: String, absolute path to a file\nwhite_list_formats: Set, allowed file extensions\nReturns:\nA boolean value indicating if the filename is valid or not", "source": "github-repos"}
{"code": "def CopyToDateTimeStringISO8601(self):\n    date_time_string = self.CopyToDateTimeString()\n    if date_time_string:\n        date_time_string = date_time_string.replace(' ', 'T')\n        date_time_string = '{0:s}Z'.format(date_time_string)\n    return date_time_string", "docstring": "Copies the date time value to an ISO 8601 date and time string.\n\nReturns:\nstr: date and time value formatted as an ISO 8601 date and time string or\nNone if the timestamp cannot be copied to a date and time string.", "source": "codesearchnet"}
{"code": "def loadfile(method=True, writable=False, create=False):\n    \n\n    def convert_file_args(args, kwargs):\n        filething = args[0] if args else None\n        filename = kwargs.pop(\"filename\", None)\n        fileobj = kwargs.pop(\"fileobj\", None)\n        return filething, filename, fileobj, args[1:], kwargs\n\n    def wrap(func):\n\n        @wraps(func)\n        def wrapper(self, *args, **kwargs):\n            filething, filename, fileobj, args, kwargs = \\\n                convert_file_args(args, kwargs)\n            with _openfile(self, filething, filename, fileobj,\n                           writable, create) as h:\n                return func(self, h, *args, **kwargs)\n\n        @wraps(func)\n        def wrapper_func(*args, **kwargs):\n            filething, filename, fileobj, args, kwargs = \\\n                convert_file_args(args, kwargs)\n            with _openfile(None, filething, filename, fileobj,\n                           writable, create) as h:\n                return func(h, *args, **kwargs)\n\n        return wrapper if method else wrapper_func\n\n    return wrap", "docstring": "A decorator for functions taking a `filething` as a first argument.\n\nPasses a FileThing instance as the first argument to the wrapped function.\n\nArgs:\nmethod (bool): If the wrapped functions is a method\nwritable (bool): If a filename is passed opens the file readwrite, if\npassed a file object verifies that it is writable.\ncreate (bool): If passed a filename that does not exist will create\na new empty file.", "source": "juraj-google-style"}
{"code": "def _ParseKeywordArgs(args, fn_spec):\n    kwargs = {}\n    remaining_kwargs = []\n    remaining_args = []\n    fn_keywords = fn_spec.varkw\n    fn_args = fn_spec.args + fn_spec.kwonlyargs\n    if not args:\n        return (kwargs, remaining_kwargs, remaining_args)\n    skip_argument = False\n    for index, argument in enumerate(args):\n        if skip_argument:\n            skip_argument = False\n            continue\n        if _IsFlag(argument):\n            contains_equals = '=' in argument\n            stripped_argument = argument.lstrip('-')\n            if contains_equals:\n                key, value = stripped_argument.split('=', 1)\n            else:\n                key = stripped_argument\n                value = None\n            key = key.replace('-', '_')\n            is_bool_syntax = not contains_equals and (index + 1 == len(args) or _IsFlag(args[index + 1]))\n            keyword = ''\n            if key in fn_args or (is_bool_syntax and key.startswith('no') and (key[2:] in fn_args)) or fn_keywords:\n                keyword = key\n            elif len(key) == 1:\n                matching_fn_args = [arg for arg in fn_args if arg[0] == key]\n                if len(matching_fn_args) == 1:\n                    keyword = matching_fn_args[0]\n                elif len(matching_fn_args) > 1:\n                    raise FireError(f\"The argument '{argument}' is ambiguous as it could refer to any of the following arguments: {matching_fn_args}\")\n            if not keyword:\n                got_argument = False\n            elif contains_equals:\n                got_argument = True\n            elif is_bool_syntax:\n                got_argument = True\n                if keyword in fn_args:\n                    value = 'True'\n                elif keyword.startswith('no'):\n                    keyword = keyword[2:]\n                    value = 'False'\n                else:\n                    value = 'True'\n            else:\n                assert index + 1 < len(args)\n                value = args[index + 1]\n                got_argument = True\n            skip_argument = not contains_equals and (not is_bool_syntax)\n            if got_argument:\n                kwargs[keyword] = value\n            else:\n                remaining_kwargs.append(argument)\n                if skip_argument:\n                    remaining_kwargs.append(args[index + 1])\n        else:\n            remaining_args.append(argument)\n    return (kwargs, remaining_kwargs, remaining_args)", "docstring": "Parses the supplied arguments for keyword arguments.\n\nGiven a list of arguments, finds occurrences of --name value, and uses 'name'\nas the keyword and 'value' as the value. Constructs and returns a dictionary\nof these keyword arguments, and returns a list of the remaining arguments.\n\nOnly if fn_keywords is None, this only finds argument names used by the\nfunction, specified through fn_args.\n\nThis returns the values of the args as strings. They are later processed by\n_ParseArgs, which converts them to the appropriate type.\n\nArgs:\nargs: A list of arguments.\nfn_spec: The inspectutils.FullArgSpec describing the given callable.\nReturns:\nkwargs: A dictionary mapping keywords to values.\nremaining_kwargs: A list of the unused kwargs from the original args.\nremaining_args: A list of the unused arguments from the original args.\nRaises:\nFireError: If a single-character flag is passed that could refer to multiple\npossible args.", "source": "github-repos"}
{"code": "def get_instance(self):\n    return Instance(self.rest_client.make_request(self.instance), self.rest_client)", "docstring": "Get the Streams instance that owns this view.\n\nReturns:\nInstance: Streams instance owning this view.", "source": "codesearchnet"}
{"code": "def get_adif_id(self, callsign, timestamp=timestamp_now):\n        \n        return self.get_all(callsign, timestamp)[const.ADIF]", "docstring": "Returns ADIF id of a callsign's country\n\nArgs:\ncallsign (str): Amateur Radio callsign\ntimestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)\n\nReturns:\nint: containing the country ADIF id\n\nRaises:\nKeyError: No Country found for callsign", "source": "juraj-google-style"}
{"code": "def check_output(self, want, got, optionflags):\n    if got and (not want):\n        return True\n    if want is None:\n        want = ''\n    if want == got:\n        return True\n    want = self._ADDRESS_RE.sub('at ...>', want)\n    want, want_changed = self._tf_tensor_numpy_output(want)\n    if want_changed:\n        got, _ = self._tf_tensor_numpy_output(got)\n    want_text_parts, self.want_floats = self.extract_floats(want)\n    want_text_parts = [part.strip(' ') for part in want_text_parts]\n    want_text_wild = '...'.join(want_text_parts)\n    if '....' in want_text_wild:\n        want_text_wild = re.sub('\\\\.\\\\.\\\\.\\\\.+', '...', want_text_wild)\n    _, self.got_floats = self.extract_floats(got)\n    self.text_good = super(TfDoctestOutputChecker, self).check_output(want=want_text_wild, got=got, optionflags=optionflags)\n    if not self.text_good:\n        return False\n    if self.want_floats.size == 0:\n        return True\n    self.float_size_good = self.want_floats.size == self.got_floats.size\n    if self.float_size_good:\n        return self._allclose(self.want_floats, self.got_floats)\n    else:\n        return False", "docstring": "Compares the docstring output to the output gotten by running the code.\n\nPython addresses in the output are replaced with wildcards.\n\nFloat values in the output compared as using `np.allclose`:\n\n* Float values are extracted from the text and replaced with wildcards.\n* The wildcard text is compared to the actual output.\n* The float values are compared using `np.allclose`.\n\nThe method returns `True` if both the text comparison and the numeric\ncomparison are successful.\n\nThe numeric comparison will fail if either:\n\n* The wrong number of floats are found.\n* The float values are not within tolerence.\n\nArgs:\nwant: The output in the docstring.\ngot: The output generated after running the snippet.\noptionflags: Flags passed to the doctest.\n\nReturns:\nA bool, indicating if the check was successful or not.", "source": "github-repos"}
{"code": "def suggest(self, query):\n        \n        res, suggest = self.search(query, results=1, suggestion=True)\n        try:\n            title = suggest or res[0]\n        except IndexError:  \n            title = None\n        return title", "docstring": "Gather suggestions based on the provided title or None if no\nsuggestions found\n\nArgs:\nquery (str): Page title\nReturns:\nString or None: Suggested page title or **None** if no \\\nsuggestion found", "source": "juraj-google-style"}
{"code": "def _QueryProcessStatus(self, process):\n    \n    process_is_alive = process.is_alive()\n    if process_is_alive:\n      rpc_client = self._rpc_clients_per_pid.get(process.pid, None)\n      process_status = rpc_client.CallFunction()\n    else:\n      process_status = None\n    return process_status", "docstring": "Queries a process to determine its status.\n\nArgs:\nprocess (MultiProcessBaseProcess): process to query for its status.\n\nReturns:\ndict[str, str]: status values received from the worker process.", "source": "juraj-google-style"}
{"code": "def get_end_start_epochs(year, month, day, direction, unit, count):\n    if (year or month or day):\n        if (not year):\n            year = 2017\n        if (not month):\n            month = 1\n        if (not day):\n            day = 1\n        initial_delorean = date_to_delorean(year, month, day)\n    else:\n        count += 1\n        initial_delorean = now_delorean()\n    initial_epoch = int(initial_delorean.epoch)\n    shifted_epoch = shift_epoch(initial_delorean, direction, unit, count)\n    return {'initial': initial_epoch, 'shifted': shifted_epoch}", "docstring": "Gets epoch from a start date and epoch from a shifted date\n\nArgs:\nyear: Int between 1 and 9999.\nmonth: Int between 1 and 12.\nday: Int between 1 and 31.\ndirection: String to shift time forwards or backwards.\nValid values: 'last', 'next'.\nunit: String of time period unit for count argument.\nHow far back to check historical market data.\nValid values: 'hour', 'day', 'week', 'month', 'year'.\ncount: Int of units.\nHow far back to check historical market data?\n\nReturns:\nDict of int epochs in UTC with keys 'initial' and 'shifted'", "source": "codesearchnet"}
{"code": "def _MakeServiceDescriptor(self, service_proto, service_index, scope, package, file_desc):\n    if package:\n        service_name = '.'.join((package, service_proto.name))\n    else:\n        service_name = service_proto.name\n    methods = [self._MakeMethodDescriptor(method_proto, service_name, package, scope, index) for (index, method_proto) in enumerate(service_proto.method)]\n    desc = descriptor.ServiceDescriptor(name=service_proto.name, full_name=service_name, index=service_index, methods=methods, options=_OptionsOrNone(service_proto), file=file_desc)\n    self._service_descriptors[service_name] = desc\n    return desc", "docstring": "Make a protobuf ServiceDescriptor given a ServiceDescriptorProto.\n\nArgs:\nservice_proto: The descriptor_pb2.ServiceDescriptorProto protobuf message.\nservice_index: The index of the service in the File.\nscope: Dict mapping short and full symbols to message and enum types.\npackage: Optional package name for the new message EnumDescriptor.\nfile_desc: The file containing the service descriptor.\n\nReturns:\nThe added descriptor.", "source": "codesearchnet"}
{"code": "def _GetContainerTypes(self):\n    self._cursor.execute(self._TABLE_NAMES_QUERY)\n    table_names = [row[0] for row in self._cursor.fetchall()]\n    return [table_name for table_name in self._CONTAINER_TYPES if (table_name in table_names)]", "docstring": "Retrieves the container types to merge.\n\nContainer types not defined in _CONTAINER_TYPES are ignored and not merged.\n\nSpecific container types reference other container types, such\nas event referencing event data. The names are ordered to ensure the\nattribute containers are merged in the correct order.\n\nReturns:\nlist[str]: names of the container types to merge.", "source": "codesearchnet"}
{"code": "def squeeze_batch_dims(inp, op, inner_rank):\n    with ops.name_scope_v2('squeeze_batch_dims'):\n        shape = inp.shape\n        inner_shape = shape[-inner_rank:]\n        if not inner_shape.is_fully_defined():\n            inner_shape = array_ops.shape(inp)[-inner_rank:]\n        batch_shape = shape[:-inner_rank]\n        if not batch_shape.is_fully_defined():\n            batch_shape = array_ops.shape(inp)[:-inner_rank]\n        if isinstance(inner_shape, tensor_shape.TensorShape):\n            inp_reshaped = array_ops.reshape(inp, [-1] + inner_shape.as_list())\n        else:\n            inp_reshaped = array_ops.reshape(inp, array_ops.concat(([-1], inner_shape), axis=-1))\n        out_reshaped = op(inp_reshaped)\n        out_inner_shape = out_reshaped.shape[-inner_rank:]\n        if not out_inner_shape.is_fully_defined():\n            out_inner_shape = array_ops.shape(out_reshaped)[-inner_rank:]\n        out = array_ops.reshape(out_reshaped, array_ops.concat((batch_shape, out_inner_shape), axis=-1))\n        out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:])\n        return out", "docstring": "Returns `unsqueeze_batch(op(squeeze_batch(inp)))`.\n\nWhere `squeeze_batch` reshapes `inp` to shape\n`[prod(inp.shape[:-inner_rank])] + inp.shape[-inner_rank:]`\nand `unsqueeze_batch` does the reverse reshape but on the output.\n\nArgs:\ninp: A tensor with dims `batch_shape + inner_shape` where `inner_shape`\nis length `inner_rank`.\nop: A callable that takes a single input tensor and returns a single.\noutput tensor.\ninner_rank: A python integer.\n\nReturns:\n`unsqueeze_batch_op(squeeze_batch(inp))`.", "source": "github-repos"}
{"code": "def _set_initial_contents(self, contents):\n        \n        contents = self._encode_contents(contents)\n        changed = self._byte_contents != contents\n        st_size = len(contents)\n\n        if self._byte_contents:\n            self.size = 0\n        current_size = self.st_size or 0\n        self.filesystem.change_disk_usage(\n            st_size - current_size, self.name, self.st_dev)\n        self._byte_contents = contents\n        self.st_size = st_size\n        self.epoch += 1\n        return changed", "docstring": "Sets the file contents and size.\nCalled internally after initial file creation.\n\nArgs:\ncontents: string, new content of file.\n\nReturns:\nTrue if the contents have been changed.\n\nRaises:\nIOError: if the st_size is not a non-negative integer,\nor if st_size exceeds the available file system space", "source": "juraj-google-style"}
{"code": "def attach(self, droplet_id, region):\n    return self.get_data(('volumes/%s/actions/' % self.id), type=POST, params={'type': 'attach', 'droplet_id': droplet_id, 'region': region})", "docstring": "Attach a Volume to a Droplet.\n\nArgs:\ndroplet_id: int - droplet id\nregion: string - slug identifier for the region", "source": "codesearchnet"}
{"code": "def read(file_path):\n    actual_file_path = os.path.expanduser(file_path)\n    with open(actual_file_path, 'r') as f:\n        lines = f.readlines()\n    gmt = []\n    for (line_num, line) in enumerate(lines):\n        fields = line.split('\\t')\n        assert (len(fields) > 2), ('Each line must have at least 3 tab-delimited items. ' + 'line_num: {}, fields: {}').format(line_num, fields)\n        fields[(- 1)] = fields[(- 1)].rstrip()\n        entries = fields[2:]\n        entries = [x for x in entries if x]\n        assert (len(set(entries)) == len(entries)), ('There should not be duplicate entries for the same set. ' + 'line_num: {}, entries: {}').format(line_num, entries)\n        line_dict = {SET_IDENTIFIER_FIELD: fields[0], SET_DESC_FIELD: fields[1], SET_MEMBERS_FIELD: entries}\n        gmt.append(line_dict)\n    verify_gmt_integrity(gmt)\n    return gmt", "docstring": "Read a gmt file at the path specified by file_path.\n\nArgs:\nfile_path (string): path to gmt file\n\nReturns:\ngmt (GMT object): list of dicts, where each dict corresponds to one\nline of the GMT file", "source": "codesearchnet"}
{"code": "def __init__(self, estimator, logdir=None):\n        \n        threading.Thread.__init__(self)\n        self.event = threading.Event()\n        self.estimator = estimator\n        self.logdir = logdir or tempfile.mkdtemp()", "docstring": "Initialize ``Tensorboard`` instance.\n\nArgs:\nestimator (sagemaker.estimator.Framework): A SageMaker ``Estimator``.\nlogdir (str): Directory for logs (default: None). If not specified, a temporary directory is made.", "source": "juraj-google-style"}
{"code": "def dp020(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `dp020`'.format(value))\n\n        self._dp020 = value", "docstring": "Corresponds to IDD Field `dp020`\nDew-point temperature corresponding to 2.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `dp020`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def array(self, dimensions=None):\n    if (dimensions is None):\n        dims = [d for d in (self.kdims + self.vdims)]\n    else:\n        dims = [self.get_dimension(d, strict=True) for d in dimensions]\n    (columns, types) = ([], [])\n    for dim in dims:\n        column = self.dimension_values(dim)\n        columns.append(column)\n        types.append(column.dtype.kind)\n    if (len(set(types)) > 1):\n        columns = [c.astype('object') for c in columns]\n    return np.column_stack(columns)", "docstring": "Convert dimension values to columnar array.\n\nArgs:\ndimensions: List of dimensions to return\n\nReturns:\nArray of columns corresponding to each dimension", "source": "codesearchnet"}
{"code": "def _find_cellid(self, code):\n        \n        from difflib import SequenceMatcher\n        maxvalue = 0.\n        maxid = None\n        \n        for cellid, c in self.cellids.items():\n            matcher = SequenceMatcher(a=c, b=code)\n            ratio = matcher.quick_ratio()\n            if ratio > maxvalue and ratio > 0.5:\n                maxid, maxvalue = cellid, ratio\n\n        return maxid", "docstring": "Determines the most similar cell (if any) to the specified code. It\nmust have at least 50% overlap ratio and have been a loop-intercepted\ncell previously.\n\nArgs:\ncode (str): contents of the code cell that were executed.", "source": "juraj-google-style"}
{"code": "def add_input(self, *args, **kwargs):\n    return self._inputs.add(*args, **kwargs)", "docstring": "Add a wrapped input argument to the hint.\n\nArgs:\n*args: The input tensor.\n**kwargs:\n\"name\" label\n\"tag\" a tag to group multiple arguments that will be aggregated. I.e.\na string like 'cool_input'. Basically multiple inputs can be added\nto the same hint for parallel operations that will eventually be\ncombined. An example would be static_rnn which creates multiple copies\nof state or inputs.\n\"aggregate\" aggregation strategy that is valid only for tag non None.\nAcceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST,\nand OpHint.AGGREGATE_STACK.\n\"index_override\" The global index to use. This corresponds to the\nargument order in the final stub that will be generated.\nReturns:\nThe wrapped input tensor.", "source": "github-repos"}
{"code": "def fullpath(self):\n    return str(os.path.join(self.path, self.directory))", "docstring": "Full path to the Mackup configuration files.\n\nThe full path to the directory when Mackup is storing the configuration\nfiles.\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def absolute_url(self):\n    if self.is_root():\n        return utils.concat_urls(self.url)\n    return utils.concat_urls(self.parent.absolute_url, self.url)", "docstring": "Get the absolute url of ``self``.\n\nReturns:\nstr: the absolute url.", "source": "codesearchnet"}
{"code": "def extract_github_repo_owner_and_name(url):\n    \n    _check_github_url_is_supported(url)\n\n    parts = get_parts_of_url_path(url)\n    repo_owner = parts[0]\n    repo_name = parts[1]\n\n    return repo_owner, _strip_trailing_dot_git(repo_name)", "docstring": "Given an URL, return the repo name and who owns it.\n\nArgs:\nurl (str): The URL to the GitHub repository\n\nRaises:\nValueError: on url that aren't from github\n\nReturns:\nstr, str: the owner of the repository, the repository name", "source": "juraj-google-style"}
{"code": "def restore_collection(backup):\n    \n    for k, v in six.iteritems(backup):\n        del tf.get_collection_ref(k)[:]\n        tf.get_collection_ref(k).extend(v)", "docstring": "Restore from a collection backup.\n\nArgs:\nbackup (dict):", "source": "juraj-google-style"}
{"code": "def fragment_search(self, fragement:str) -> List[dict]:\n        \n        fragement = self.extract_fragment(fragement)\n        ilx_rows = self.fragment2rows.get(fragement)\n        if not ilx_rows:\n            return None\n        else:\n            return ilx_rows", "docstring": "Returns the rows in InterLex associated with the fragment\n\nNote:\nPressumed to have duplicate fragements in InterLex\nArgs:\nfragment: The fragment_id of the curie pertaining to the ontology\nReturns:\nNone or List[dict]", "source": "juraj-google-style"}
{"code": "def predict_proba(self, x, y=None, **kwargs):\n    if (self.clf is None):\n        raise ValueError('Model has to be trained before making predictions.')\n    if (x is pandas.Series):\n        input_ = self.featurize_row(x.iloc[0], x.iloc[1]).reshape((1, (- 1)))\n    elif (x is pandas.DataFrame):\n        input_ = np.array([self.featurize_row(x.iloc[0], x.iloc[1]) for row in x])\n    elif (y is not None):\n        input_ = self.featurize_row(x, y).reshape((1, (- 1)))\n    else:\n        raise TypeError('DataType not understood.')\n    return self.clf.predict(input_)", "docstring": "Predict the causal score using a trained RCC model\n\nArgs:\nx (numpy.array or pandas.DataFrame or pandas.Series): First variable or dataset.\nargs (numpy.array): second variable (optional depending on the 1st argument).\n\nReturns:\nfloat: Causation score (Value : 1 if a->b and -1 if b->a)", "source": "codesearchnet"}
{"code": "def round_f1(y_true, y_predicted):\n    \n    try:\n        predictions = [np.round(x) for x in y_predicted]\n    except TypeError:\n        predictions = y_predicted\n\n    return f1_score(y_true, predictions)", "docstring": "Calculates F1 (binary) measure.\n\nArgs:\ny_true: list of true values\ny_predicted: list of predicted values\n\nReturns:\nF1 score", "source": "juraj-google-style"}
{"code": "def StaticAdd(cls, queue_urn, rdf_value, mutation_pool=None):\n    if (not isinstance(rdf_value, cls.rdf_type)):\n        raise ValueError(('This collection only accepts values of type %s.' % cls.rdf_type.__name__))\n    if (mutation_pool is None):\n        raise ValueError(\"Mutation pool can't be none.\")\n    timestamp = rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()\n    if (not isinstance(queue_urn, rdfvalue.RDFURN)):\n        queue_urn = rdfvalue.RDFURN(queue_urn)\n    mutation_pool.QueueAddItem(queue_urn, rdf_value, timestamp)", "docstring": "Adds an rdf value the queue.\n\nAdds an rdf value to a queue. Does not require that the queue be locked, or\neven open. NOTE: The caller is responsible for ensuring that the queue\nexists and is of the correct type.\n\nArgs:\nqueue_urn: The urn of the queue to add to.\n\nrdf_value: The rdf value to add to the queue.\n\nmutation_pool: A MutationPool object to write to.\n\nRaises:\nValueError: rdf_value has unexpected type.", "source": "codesearchnet"}
{"code": "def _VerifyValues(self, image, ksizes, strides, rates, padding, patches):\n    ksizes = [1] + ksizes + [1]\n    strides = [1] + strides + [1]\n    rates = [1] + rates + [1]\n    for dtype in [np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype]:\n        out_tensor = array_ops.extract_image_patches(constant_op.constant(image, dtype=dtype), ksizes=ksizes, strides=strides, rates=rates, padding=padding, name='im2col')\n        self.assertAllClose(np.array(patches, dtype=dtype), self.evaluate(out_tensor))", "docstring": "Tests input-output pairs for the ExtractImagePatches op.\n\nArgs:\nimage: Input tensor with shape: [batch, in_rows, in_cols, depth].\nksizes: Patch size specified as: [ksize_rows, ksize_cols].\nstrides: Output strides, specified as [stride_rows, stride_cols].\nrates: Atrous rates, specified as [rate_rows, rate_cols].\npadding: Padding type.\npatches: Expected output.", "source": "github-repos"}
{"code": "def assert_rank(x, rank, data=None, summarize=None, message=None, name=None):\n    with ops.name_scope(name, 'assert_rank', (x, rank) + tuple(data or [])):\n        if not isinstance(x, sparse_tensor.SparseTensor):\n            x = ops.convert_to_tensor(x, name='x')\n        rank = ops.convert_to_tensor(rank, name='rank')\n        message = _message_prefix(message)\n        static_condition = lambda actual_rank, given_rank: actual_rank == given_rank\n        dynamic_condition = math_ops.equal\n        if context.executing_eagerly() or isinstance(x, sparse_tensor.SparseTensor):\n            name = ''\n        else:\n            name = x.name\n        if data is None:\n            data = [message, 'Tensor %s must have rank' % name, rank, 'Received shape: ', array_ops.shape(x)]\n        try:\n            assert_op = _assert_rank_condition(x, rank, static_condition, dynamic_condition, data, summarize)\n        except ValueError as e:\n            if e.args[0] == 'Static rank condition failed':\n                raise ValueError('%sTensor %s must have rank %d.  Received rank %d, shape %s' % (message, name, e.args[2], e.args[1], x.get_shape()))\n            else:\n                raise ValueError(e.args[0])\n    return assert_op", "docstring": "Assert `x` has rank equal to `rank`.\n\nExample of adding a dependency to an operation:\n\n```python\nwith tf.control_dependencies([tf.compat.v1.assert_rank(x, 2)]):\noutput = tf.reduce_sum(x)\n```\n\nArgs:\nx:  Numeric `Tensor`.\nrank:  Scalar integer `Tensor`.\ndata:  The tensors to print out if the condition is False.  Defaults to\nerror message and the shape of `x`.\nsummarize: Print this many entries of each tensor.\nmessage: A string to prefix to the default message.\nname: A name for this operation (optional).  Defaults to \"assert_rank\".\n\nReturns:\nOp raising `InvalidArgumentError` unless `x` has specified rank.\nIf static checks determine `x` has correct rank, a `no_op` is returned.\n\nRaises:\nValueError:  If static checks determine `x` has wrong rank.", "source": "github-repos"}
{"code": "def anti_join(df, other, **kwargs):\n    (left_on, right_on, suffixes) = get_join_parameters(kwargs)\n    if (not right_on):\n        right_on = [col_name for col_name in df.columns.values.tolist() if (col_name in other.columns.values.tolist())]\n        left_on = right_on\n    elif (not isinstance(right_on, (list, tuple))):\n        right_on = [right_on]\n    other_reduced = other[right_on].drop_duplicates()\n    joined = df.merge(other_reduced, how='left', left_on=left_on, right_on=right_on, suffixes=('', '_y'), indicator=True).query('_merge==\"left_only\"')[df.columns.values.tolist()]\n    return joined", "docstring": "Returns all of the rows in the left DataFrame that do not have a\nmatch in the right DataFrame.\n\nArgs:\ndf (pandas.DataFrame): Left DataFrame (passed in via pipe)\nother (pandas.DataFrame): Right DataFrame\n\nKwargs:\nby (str or list): Columns to join on. If a single string, will join\non that column. If a list of lists which contain strings or\nintegers, the right/left columns to join on.\n\nExample:\na >> anti_join(b, by='x1')\n\nx1  x2\n2  C   3", "source": "codesearchnet"}
{"code": "def get_config(filepath=None, default_loader=None, on_missing=None):\n    cache_key = (filepath, default_loader, on_missing)\n    if (CACHE.get(cache_key) is not None):\n        return CACHE.get(cache_key)\n    logger = logging.getLogger('birding')\n    if (filepath is None):\n        filepath = BIRDING_CONF\n    if (default_loader is None):\n        default_loader = get_defaults_file\n    if (on_missing is None):\n        on_missing = logger.info\n    logger.info('Looking for configuration file: {}'.format(os.path.abspath(filepath)))\n    if (not os.path.exists(filepath)):\n        on_missing('No {} configuration file found.'.format(filepath))\n        if (filepath != BIRDING_CONF_DEFAULT):\n            os.stat(filepath)\n    config = yaml.safe_load(default_loader())\n    tv.validate(SCHEMA, config)\n    if os.path.exists(filepath):\n        file_config = yaml.safe_load(open(filepath))\n        if file_config:\n            config = overlay(file_config, config)\n            tv.validate(SCHEMA, config)\n    CACHE.put(cache_key, config)\n    return config", "docstring": "Get a dict for the current birding configuration.\n\nThe resulting dictionary is fully populated with defaults, such that all\nvalid keys will resolve to valid values. Invalid and extra values in the\nconfiguration result in an exception.\n\nSee :ref:`config` (module-level docstring) for discussion on how birding\nconfiguration works, including filepath loading. Note that a non-default\nfilepath set via env results in a :py:exc:`OSError` when the file is\nmissing, but the default filepath is ignored when missing.\n\nThis function caches its return values as to only parse configuration once\nper set of inputs. As such, treat the resulting dictionary as read-only as\nnot to accidentally write values which will be seen by other handles of the\ndictionary.\n\nArgs:\nfilepath (str): path to birding configuration YAML file.\ndefault_loader (callable):\ncallable which returns file descriptor with YAML data of default\nconfiguration values\non_missing (callable): callback to call when file is missing.\nReturns:\ndict: dict of current birding configuration; treat as read-only.", "source": "codesearchnet"}
{"code": "def cancelRealTimeBars(self, bars: RealTimeBarList):\n        \n        self.client.cancelRealTimeBars(bars.reqId)\n        self.wrapper.endSubscription(bars)", "docstring": "Cancel the realtime bars subscription.\n\nArgs:\nbars: The bar list that was obtained from ``reqRealTimeBars``.", "source": "juraj-google-style"}
{"code": "def getConfigPath(configFileName=None):\n    paths = {}\n    applicationPath = './'\n    if (sys.platform == 'win32'):\n        applicationPath = os.path.expanduser(os.path.join('~\\\\', 'OSRFramework'))\n    else:\n        applicationPath = os.path.expanduser(os.path.join('~/', '.config', 'OSRFramework'))\n    paths = {'appPath': applicationPath, 'appPathData': os.path.join(applicationPath, 'data'), 'appPathDefaults': os.path.join(applicationPath, 'default'), 'appPathPlugins': os.path.join(applicationPath, 'plugins'), 'appPathWrappers': os.path.join(applicationPath, 'plugins', 'wrappers'), 'appPathPatterns': os.path.join(applicationPath, 'plugins', 'patterns')}\n    for path in paths.keys():\n        if (not os.path.exists(paths[path])):\n            os.makedirs(paths[path])\n    return paths", "docstring": "Auxiliar function to get the configuration paths depending on the system\n\nArgs:\n-----\nconfigFileName: TODO.\n\nReturns:\n--------\nA dictionary with the following keys: appPath, appPathDefaults,\nappPathTransforms, appPathPlugins, appPathPatterns, appPathPatterns.", "source": "codesearchnet"}
{"code": "def summarize(self, geom, stat=None):\n        \n        if not hasattr(geom, 'num_coords'):\n            raise TypeError('Need OGR or GEOS geometry, %s found' % type(geom))\n        clone = self._clone()\n        for obj in clone:\n            arr = obj.array(geom)\n            if arr is not None:\n                if stat:\n                    arr = agg_dims(arr, stat)\n                try:\n                    arr = arr.squeeze()\n                except ValueError:\n                    pass\n            obj.image = arr\n        return clone", "docstring": "Returns a new RasterQuerySet with subsetted/summarized ndarrays.\n\nArguments:\ngeom -- geometry for masking or spatial subsetting\nKeyword args:\nstat -- any numpy summary stat method as str (min/max/mean/etc)", "source": "juraj-google-style"}
{"code": "def save_with_exif_info(img, *args, **kwargs):\n    if ('exif' in kwargs):\n        exif = kwargs.pop('exif')\n    else:\n        exif = img.info.get('exif')\n    img.save(*args, exif=exif, **kwargs)", "docstring": "Saves an image using PIL, preserving the exif information.\n\nArgs:\nimg (PIL.Image.Image):\n*args: The arguments for the `save` method of the Image class.\n**kwargs: The keywords for the `save` method of the Image class.", "source": "codesearchnet"}
{"code": "def Shell(self, command, timeout_ms=None):\n        \n        return self.protocol_handler.Command(\n            self._handle, service=b'shell', command=command,\n            timeout_ms=timeout_ms)", "docstring": "Run command on the device, returning the output.\n\nArgs:\ncommand: Shell command to run\ntimeout_ms: Maximum time to allow the command to run.", "source": "juraj-google-style"}
{"code": "def _write_reqs(amend: bool=False, stage: bool=False):\n    LOGGER.info('writing requirements')\n    base_cmd = 'pipenv lock -r'\n    _write_reqs_file(f'{base_cmd}', 'requirements.txt')\n    _write_reqs_file(f'{base_cmd} -d', 'requirements-dev.txt')\n    files_to_add = ['Pipfile', 'requirements.txt', 'requirements-dev.txt']\n    if amend:\n        CTX.repo.amend_commit(append_to_msg='update requirements [auto]', files_to_add=files_to_add)\n    elif stage:\n        CTX.repo.stage_subset(*files_to_add)", "docstring": "Writes the requirement files\n\nArgs:\namend: amend last commit with changes\nstage: stage changes", "source": "codesearchnet"}
{"code": "def _HashRow(cls, row):\n    \n    values = []\n    for value in row:\n      try:\n        value = '{0!s}'.format(value)\n      except UnicodeDecodeError:\n        \n        \n        \n        \n        value = repr(value)\n\n      values.append(value)\n\n    return hash(' '.join(values))", "docstring": "Hashes the given row.\n\nArgs:\nrow (sqlite3.Row): row.\n\nReturns:\nint: hash value of the given row.", "source": "juraj-google-style"}
{"code": "def apply(self, var, props, reverse=False):\n        \n        vs, vid = sort_vid_split(var)\n        if reverse:\n            \n            \n            tms = []\n        else:\n            tms = [(a, op, b) for a, op, b in self._typemap if op in _LR_OPS]\n        for src, op, tgt in tms:\n            if _valmatch([vs], src, op, None, self._semi, 'variables'):\n                vs = vs if tgt == ['*'] else tgt[0]\n                break\n        newvar = '{}{}'.format(vs, vid)\n\n        newprops = {}\n        for featsets, valmap in self._propmap:\n            if reverse:\n                tgtfeats, srcfeats = featsets\n                pms = [(b, op, a) for a, op, b in valmap if op in _RL_OPS]\n            else:\n                srcfeats, tgtfeats = featsets\n                pms = [(a, op, b) for a, op, b in valmap if op in _LR_OPS]\n            vals = [props.get(f) for f in srcfeats]\n            for srcvals, op, tgtvals in pms:\n                if _valmatch(vals, srcvals, op, vs, self._semi, 'properties'):\n                    for i, featval in enumerate(zip(tgtfeats, tgtvals)):\n                        k, v = featval\n                        if v == '*':\n                            print(i, len(vals), vals, k, v)\n                            if i < len(vals) and vals[i] is not None:\n                                newprops[k] = vals[i]\n                        elif v != '!':\n                            newprops[k] = v\n                    break\n\n        return newvar, newprops", "docstring": "Apply the VPM to variable *var* and properties *props*.\n\nArgs:\nvar: a variable\nprops: a dictionary mapping properties to values\nreverse: if `True`, apply the rules in reverse (e.g. from\ngrammar-external to grammar-internal forms)\nReturns:\na tuple (v, p) of the mapped variable and properties", "source": "juraj-google-style"}
{"code": "def _IsIdentifier(cls, string):\n    return (string and (not string[0].isdigit()) and all(((character.isalnum() or (character == '_')) for character in string)))", "docstring": "Checks if a string contains an identifier.\n\nArgs:\nstring (str): string to check.\n\nReturns:\nbool: True if the string contains an identifier, False otherwise.", "source": "codesearchnet"}
{"code": "def flatten(repertoire, big_endian=False):\n    \n    if repertoire is None:\n        return None\n\n    order = 'C' if big_endian else 'F'\n    \n    \n    return repertoire.squeeze().ravel(order=order)", "docstring": "Flatten a repertoire, removing empty dimensions.\n\nBy default, the flattened repertoire is returned in little-endian order.\n\nArgs:\nrepertoire (np.ndarray or None): A repertoire.\n\nKeyword Args:\nbig_endian (boolean): If ``True``, flatten the repertoire in big-endian\norder.\n\nReturns:\nnp.ndarray: The flattened repertoire.", "source": "juraj-google-style"}
{"code": "def _faster_to_representation(self, instance):\n        \n\n        ret = {}\n        fields = self._readable_fields\n\n        is_fast = isinstance(instance, prefetch.FastObject)\n        id_fields = self._readable_id_fields\n\n        for field in fields:\n            attribute = None\n\n            \n            \n            if (\n                is_fast and\n                not isinstance(\n                    field,\n                    (DynamicGenericRelationField, DynamicRelationField)\n                )\n            ):\n                if field in id_fields and field.source not in instance:\n                    \n                    attribute = instance.get(field.source + '_id')\n                    ret[field.field_name] = attribute\n                    continue\n                else:\n                    try:\n                        attribute = instance[field.source]\n                    except KeyError:\n                        \n                        \n                        if hasattr(instance, field.source):\n                            attribute = getattr(instance, field.source)\n                        else:\n                            \n                            attribute = field.get_attribute(instance)\n                            print(\n                                'Missing %s from %s' % (\n                                    field.field_name,\n                                    self.__class__.__name__\n                                )\n                            )\n            else:\n                try:\n                    attribute = field.get_attribute(instance)\n                except SkipField:\n                    continue\n\n            if attribute is None:\n                \n                \n                ret[field.field_name] = None\n            else:\n                ret[field.field_name] = field.to_representation(attribute)\n\n        return ret", "docstring": "Modified to_representation with optimizations.\n\n1) Returns a plain old dict as opposed to OrderedDict.\n(Constructing ordered dict is ~100x slower than `{}`.)\n2) Ensure we use a cached list of fields\n(this optimization exists in DRF 3.2 but not 3.1)\n\nArguments:\ninstance: a model instance or data object\nReturns:\nDict of primitive datatypes.", "source": "juraj-google-style"}
{"code": "def _CreateIndexIfNotExists(self, index_name, mappings):\n    \n    try:\n      if not self._client.indices.exists(index_name):\n        self._client.indices.create(\n            body={'mappings': mappings}, index=index_name)\n\n    except elasticsearch.exceptions.ConnectionError as exception:\n      raise RuntimeError(\n          'Unable to create Elasticsearch index with error: {0!s}'.format(\n              exception))", "docstring": "Creates an Elasticsearch index if it does not exist.\n\nArgs:\nindex_name (str): mame of the index.\nmappings (dict[str, object]): mappings of the index.\n\nRaises:\nRuntimeError: if the Elasticsearch index cannot be created.", "source": "juraj-google-style"}
{"code": "def make_flat_list_of_images(images: Union[list[ImageInput], ImageInput]) -> ImageInput:\n    if isinstance(images, (list, tuple)) and all((isinstance(images_i, (list, tuple)) for images_i in images)) and all((is_valid_list_of_images(images_i) for images_i in images)):\n        return [img for img_list in images for img in img_list]\n    if isinstance(images, (list, tuple)) and is_valid_list_of_images(images):\n        if is_pil_image(images[0]) or images[0].ndim == 3:\n            return images\n        if images[0].ndim == 4:\n            return [img for img_list in images for img in img_list]\n    if is_valid_image(images):\n        if is_pil_image(images) or images.ndim == 3:\n            return [images]\n        if images.ndim == 4:\n            return list(images)\n    raise ValueError(f'Could not make a flat list of images from {images}')", "docstring": "Ensure that the output is a flat list of images. If the input is a single image, it is converted to a list of length 1.\nIf the input is a nested list of images, it is converted to a flat list of images.\nArgs:\nimages (`Union[List[ImageInput], ImageInput]`):\nThe input image.\nReturns:\nlist: A list of images or a 4d array of images.", "source": "github-repos"}
{"code": "def run(self, gin):\n    with ScratchDir('.'):\n        p = subprocess.Popen(self._gulp_cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)\n        (out, err) = p.communicate(bytearray(gin, 'utf-8'))\n        out = out.decode('utf-8')\n        err = err.decode('utf-8')\n        if (('Error' in err) or ('error' in err)):\n            print(gin)\n            print('----output_0---------')\n            print(out)\n            print('----End of output_0------\\n\\n\\n')\n            print('----output_1--------')\n            print(out)\n            print('----End of output_1------')\n            raise GulpError(err)\n        if ('ERROR' in out):\n            raise GulpError(out)\n        conv_err_string = 'Conditions for a minimum have not been satisfied'\n        if (conv_err_string in out):\n            raise GulpConvergenceError()\n        gout = ''\n        for line in out.split('\\n'):\n            gout = ((gout + line) + '\\n')\n        return gout", "docstring": "Run GULP using the gin as input\n\nArgs:\ngin: GULP input string\n\nReturns:\ngout: GULP output string", "source": "codesearchnet"}
{"code": "def freeze(self, permanent_value: Any=utils.MISSING_VALUE, apply_before_use: bool=True) -> 'ValueSpec':", "docstring": "Sets the default value using a permanent value and freezes current spec.\n\nA frozen value spec will not accept any value that is not the default\nvalue. A frozen value spec is useful when a subclass fixes the value of a\nsymoblic attribute and want to prevent it from being modified.\n\nArgs:\npermanent_value: A permanent value used for current spec.\nIf MISSING_VALUE, freeze the value spec with current default value.\napply_before_use: If True, invoke `apply` on permanent value\nwhen permanent_value is provided, otherwise use it as is.\n\nReturns:\nValueSpec itself.\n\nRaises:\nValueError if current default value is MISSING_VALUE and the permanent\nvalue is not specified.", "source": "github-repos"}
{"code": "def _fork_children_processes(name, successors):\n    logging.info('Process \"%s\" started, PID: %d!', name, os.getpid())\n    children_process = [multiprocessing.Process(target=_fork_children_processes, args=args) for args in successors]\n    for child_process in children_process:\n        child_process.start()\n    if 'child' in name:\n        time.sleep(4)\n    for child_process in children_process:\n        child_process.join()\n    logging.info('Process \"%s\" exit.', name)", "docstring": "Forks children processes and its descendants recursively.\n\nArgs:\nname: The name of this process.\nsuccessors: The args for the descendant processes.", "source": "github-repos"}
{"code": "def _check_error(self, response, json_response=None):\n    if (response.status_code >= 400):\n        json_response = (json_response or self._get_json_response(response))\n        err_cls = self._check_http_error_code(response.status_code)\n        try:\n            raise err_cls(('%s error: %s' % (response.status_code, json_response['error']['error_msg'])), response.status_code)\n        except TypeError:\n            raise err_cls(('%s error: %s' % (response.status_code, json_response['error_description'])), response.status_code)\n    return True", "docstring": "Check for HTTP error code from the response, raise exception if there's any\n\nArgs:\nresponse (object): Object returned by requests' `get` and `post`\nmethods\n\njson_response (dict): JSON response, if applicable\n\nRaises:\nHTTPError: If the status code of response is either 4xx or 5xx\n\nReturns:\nTrue if status code is not error code", "source": "codesearchnet"}
{"code": "def resolve_one_of(tags, at_least_one):\n    if (len(tags) < len(at_least_one)):\n        return None\n    for possible_resolution in choose_1_from_each(at_least_one):\n        resolution = {}\n        pr = possible_resolution[:]\n        for entity_type in pr:\n            last_end_index = (- 1)\n            if (entity_type in resolution):\n                last_end_index = resolution.get[entity_type][(- 1)].get('end_token')\n            (tag, value, c) = find_first_tag(tags, entity_type, after_index=last_end_index)\n            if (not tag):\n                break\n            else:\n                if (entity_type not in resolution):\n                    resolution[entity_type] = []\n                resolution[entity_type].append(tag)\n        if (len(resolution) == len(possible_resolution)):\n            return resolution\n    return None", "docstring": "This searches tags for Entites in at_least_one and returns any match\n\nArgs:\ntags(list): List of tags with Entities to search for Entities\nat_least_one(list): List of Entities to find in tags\n\nReturns:\nobject: returns None if no match is found but returns any match as an object", "source": "codesearchnet"}
{"code": "def _shape_invariant_to_type_spec(var, shape=None):\n    var = _convert_tensorarray_to_flow(var)\n    if shape is None:\n        return type_spec.type_spec_from_value(var)\n    elif isinstance(shape, type_spec.TypeSpec):\n        if not shape.is_compatible_with(var):\n            raise TypeError('TypeSpec %r is not compatible with %r' % (shape, var))\n        return shape\n    elif not isinstance(shape, tensor_shape.TensorShape):\n        raise TypeError(f\"'shape' must be one of TypeSpec, TensorShape or None. Received: {type(shape)}\")\n    if isinstance(var, tensor_lib.Tensor):\n        return tensor_lib.TensorSpec(shape, var.dtype)\n    else:\n        try:\n            return var._shape_invariant_to_type_spec(shape)\n        except NotImplementedError as e:\n            raise TypeError(f'To describe or constrain a {type(var).__name__}, use a {type(var._type_spec).__name__} instead of a TensorShape.') from e", "docstring": "Converts a shape invariant to a TypeSpec.\n\nIf `var` is a TensorArray, it will first be converted to its flow.\n\nArgs:\nvar: The tensor, tensor array or composite tensor whose shape is described\nby the shape invariant.\nshape: A `TypeSpec` or `TensorShape`.  If `shape` is already a `TypeSpec`,\nthen it is simply returned as-is.\n\nReturns:\nA `TypeSpec` for `var`, consistent with the given shape.\n\nRaises:\nTypeError: If `shape` is a TypeSpec and not compatible with `var`.\nTypeError: If `shape` is not None, a TypeSpec, or a TensorShape.\nTypeError: If `shape` is a TensorShape, `var` is a CompositeTensor, and\n`var` doesn't implement the `_shape_invariant_to_type_spec` method.", "source": "github-repos"}
{"code": "def _collect_feature_info(self, candidate_feature_diffs):\n    project_root = self.project.path\n    for diff in candidate_feature_diffs:\n        path = diff.b_path\n        modname = relpath_to_modname(path)\n        modpath = project_root.joinpath(path)\n        importer = partial(import_module_at_path, modname, modpath)\n        (yield (importer, modname, modpath))", "docstring": "Collect feature info\n\nArgs:\ncandidate_feature_diffs (List[git.diff.Diff]): list of Diffs\ncorresponding to admissible file changes compared to\ncomparison ref\n\nReturns:\nList[Tuple]: list of tuple of importer, module name, and module\npath. The \"importer\" is a callable that returns a module", "source": "codesearchnet"}
{"code": "def query_blockchain_events(\n        web3: Web3,\n        contract_manager: ContractManager,\n        contract_address: Address,\n        contract_name: str,\n        topics: List,\n        from_block: BlockNumber,\n        to_block: BlockNumber,\n) -> List[Dict]:\n    \n    filter_params = {\n        'fromBlock': from_block,\n        'toBlock': to_block,\n        'address': to_checksum_address(contract_address),\n        'topics': topics,\n    }\n\n    events = web3.eth.getLogs(filter_params)\n\n    contract_abi = contract_manager.get_contract_abi(contract_name)\n    return [\n        decode_event(\n            abi=contract_abi,\n            log_=raw_event,\n        )\n        for raw_event in events\n    ]", "docstring": "Returns events emmitted by a contract for a given event name, within a certain range.\n\nArgs:\nweb3: A Web3 instance\ncontract_manager: A contract manager\ncontract_address: The address of the contract to be filtered, can be `None`\ncontract_name: The name of the contract\ntopics: The topics to filter for\nfrom_block: The block to start search events\nto_block: The block to stop searching for events\n\nReturns:\nAll matching events", "source": "juraj-google-style"}
{"code": "def perspective(img, startpoints, endpoints, interpolation=Image.BICUBIC):\n    if (not _is_pil_image(img)):\n        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n    coeffs = _get_perspective_coeffs(startpoints, endpoints)\n    return img.transform(img.size, Image.PERSPECTIVE, coeffs, interpolation)", "docstring": "Perform perspective transform of the given PIL Image.\n\nArgs:\nimg (PIL Image): Image to be transformed.\ncoeffs (tuple) : 8-tuple (a, b, c, d, e, f, g, h) which contains the coefficients.\nfor a perspective transform.\ninterpolation: Default- Image.BICUBIC\nReturns:\nPIL Image:  Perspectively transformed Image.", "source": "codesearchnet"}
{"code": "def query_band(self, value):\n    self._query_band = value\n    if (value is None):\n        try:\n            del self._connectionXML.attrib['query-band-spec']\n        except KeyError:\n            pass\n    else:\n        self._connectionXML.set('query-band-spec', value)", "docstring": "Set the connection's query_band property.\n\nArgs:\nvalue:  New query_band value. String.\n\nReturns:\nNothing.", "source": "codesearchnet"}
{"code": "def get_connection_string(params, hide_password=True):\n        \n        connection_string = params['driver'] + ':\n\n        user = params.get('user', None)\n        password = params.get('password', None)\n        host = params.get('host', None)\n        port = params.get('port', None)\n        database = params.get('database', None)\n\n        if database is None:\n            raise ValueError(\"Field 'database' of connection parameters cannot be None.\")\n\n        \n        if password is None and user is not None:\n            \n            password = Client._get_password(params)\n\n            if password is None:\n                raise RuntimeError(\"Password not defined and not available in keyring.\")\n\n        \n        if host is not None:\n\n            \n            if user is not None:\n                connection_string += user\n\n                \n                if len(password) > 0:\n                    if hide_password:\n                        connection_string += \":[password hidden]\"\n                    else:\n                        connection_string += \":\" + password\n\n                connection_string += \"@\"\n\n            connection_string += host\n\n            if port is not None:\n                connection_string += ':' + str(port)\n\n        \n        connection_string += '/' + database\n\n        return connection_string", "docstring": "Get a database connection string\n\nArgs:\nparams (dict): database configuration, as defined in :mod:`ozelot.config`\nhide_password (bool): if True, the password is hidden in the returned string\n(use this for logging purposes).\n\nReturns:\nstr: connection string", "source": "juraj-google-style"}
{"code": "def add_tags(self):\n    session = boto3.session.Session(profile_name=self.env, region_name=self.region)\n    resource = session.resource('ec2')\n    group_id = get_security_group_id(self.app_name, self.env, self.region)\n    security_group = resource.SecurityGroup(group_id)\n    try:\n        tag = security_group.create_tags(DryRun=False, Tags=[{'Key': 'app_group', 'Value': self.group}, {'Key': 'app_name', 'Value': self.app_name}])\n        self.log.debug('Security group has been tagged: %s', tag)\n    except botocore.exceptions.ClientError as error:\n        self.log.warning(error)\n    return True", "docstring": "Add tags to security group.\n\nReturns:\nTrue: Upon successful completion.", "source": "codesearchnet"}
{"code": "def insert(self, iterable, index=0, data=None, weight=1.0):\n    if (index == len(iterable)):\n        self.is_terminal = True\n        self.key = iterable\n        self.weight = weight\n        if data:\n            self.data.add(data)\n    else:\n        if (iterable[index] not in self.children):\n            self.children[iterable[index]] = TrieNode()\n        self.children[iterable[index]].insert(iterable, (index + 1), data)", "docstring": "Insert new node into tree\n\nArgs:\niterable(hashable): key used to find in the future.\ndata(object): data associated with the key\nindex(int): an index used for insertion.\nweight(float): the wait given for the item added.", "source": "codesearchnet"}
{"code": "def int(name, default=None, allow_none=False, fallback=None):\n    \n    value = read(name, default, allow_none, fallback=fallback)\n    if isinstance(value, builtins.str):\n        value = value.strip()\n\n    if value is None and allow_none:\n        return None\n    else:\n        return builtins.int(value)", "docstring": "Get a string environment value or the default.\n\nArgs:\nname: The environment variable name\ndefault: The default value to use if no environment variable is found\nallow_none: If the return value can be `None` (i.e. optional)", "source": "juraj-google-style"}
{"code": "def plot_state_hinton(rho, title='', figsize=None):\n    \n    if not HAS_MATPLOTLIB:\n        raise ImportError('Must have Matplotlib installed.')\n    rho = _validate_input_state(rho)\n    if figsize is None:\n        figsize = (8, 5)\n    num = int(np.log2(len(rho)))\n    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=figsize)\n    max_weight = 2 ** np.ceil(np.log(np.abs(rho).max()) / np.log(2))\n    datareal = np.real(rho)\n    dataimag = np.imag(rho)\n    column_names = [bin(i)[2:].zfill(num) for i in range(2**num)]\n    row_names = [bin(i)[2:].zfill(num) for i in range(2**num)]\n    lx = len(datareal[0])            \n    ly = len(datareal[:, 0])\n    \n    ax1.patch.set_facecolor('gray')\n    ax1.set_aspect('equal', 'box')\n    ax1.xaxis.set_major_locator(plt.NullLocator())\n    ax1.yaxis.set_major_locator(plt.NullLocator())\n\n    for (x, y), w in np.ndenumerate(datareal):\n        color = 'white' if w > 0 else 'black'\n        size = np.sqrt(np.abs(w) / max_weight)\n        rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,\n                             facecolor=color, edgecolor=color)\n        ax1.add_patch(rect)\n\n    ax1.set_xticks(np.arange(0, lx+0.5, 1))\n    ax1.set_yticks(np.arange(0, ly+0.5, 1))\n    ax1.set_yticklabels(row_names, fontsize=14)\n    ax1.set_xticklabels(column_names, fontsize=14, rotation=90)\n    ax1.autoscale_view()\n    ax1.invert_yaxis()\n    ax1.set_title('Real[rho]', fontsize=14)\n    \n    ax2.patch.set_facecolor('gray')\n    ax2.set_aspect('equal', 'box')\n    ax2.xaxis.set_major_locator(plt.NullLocator())\n    ax2.yaxis.set_major_locator(plt.NullLocator())\n\n    for (x, y), w in np.ndenumerate(dataimag):\n        color = 'white' if w > 0 else 'black'\n        size = np.sqrt(np.abs(w) / max_weight)\n        rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,\n                             facecolor=color, edgecolor=color)\n        ax2.add_patch(rect)\n    if np.any(dataimag != 0):\n        ax2.set_xticks(np.arange(0, lx+0.5, 1))\n        ax2.set_yticks(np.arange(0, ly+0.5, 1))\n        ax2.set_yticklabels(row_names, fontsize=14)\n        ax2.set_xticklabels(column_names, fontsize=14, rotation=90)\n    ax2.autoscale_view()\n    ax2.invert_yaxis()\n    ax2.set_title('Imag[rho]', fontsize=14)\n    if title:\n        fig.suptitle(title, fontsize=16)\n    plt.tight_layout()\n    plt.close(fig)\n    return fig", "docstring": "Plot a hinton diagram for the quanum state.\n\nArgs:\nrho (ndarray): Numpy array for state vector or density matrix.\ntitle (str): a string that represents the plot title\nfigsize (tuple): Figure size in inches.\nReturns:\nmatplotlib.Figure: The matplotlib.Figure of the visualization\n\nRaises:\nImportError: Requires matplotlib.", "source": "juraj-google-style"}
{"code": "def save_image(tensor, filename, nrow=8, padding=2,\n               normalize=False, range=None, scale_each=False, pad_value=0):\n    \n    from PIL import Image\n    grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,\n                     normalize=normalize, range=range, scale_each=scale_each)\n    \n    ndarr = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()\n    im = Image.fromarray(ndarr)\n    im.save(filename)", "docstring": "Save a given Tensor into an image file.\n\nArgs:\ntensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\nsaves the tensor as a grid of images by calling ``make_grid``.\n**kwargs: Other arguments are documented in ``make_grid``.", "source": "juraj-google-style"}
{"code": "def get_token(self):\n    if (self.token == None):\n        self.token = self.authenticate(self.username, self.password)\n    try:\n        return str(self.token, 'utf-8')\n    except TypeError:\n        return self.token", "docstring": "Method to retrieve an auth token.\n\nThe cached global token is looked up and returned if it exists. If it\nis `None` a new one is requested and returned.\n\nReturns:\nSimplenote API token as string", "source": "codesearchnet"}
{"code": "def is_diagonal_scale(scale):\n  \n  if not isinstance(scale, tf.linalg.LinearOperator):\n    raise TypeError(\"Expected argument 'scale' to be instance of LinearOperator\"\n                    \". Found: %s\" % scale)\n  return (isinstance(scale, tf.linalg.LinearOperatorIdentity) or\n          isinstance(scale, tf.linalg.LinearOperatorScaledIdentity) or\n          isinstance(scale, tf.linalg.LinearOperatorDiag))", "docstring": "Returns `True` if `scale` is a `LinearOperator` that is known to be diag.\n\nArgs:\nscale:  `LinearOperator` instance.\n\nReturns:\nPython `bool`.\n\nRaises:\nTypeError:  If `scale` is not a `LinearOperator`.", "source": "juraj-google-style"}
{"code": "def __init__(self, receive_port):\n    \n    super(InteractiveDebuggerDataServer, self).__init__(\n        receive_port, InteractiveDebuggerDataStreamHandler)\n\n    self._incoming_channel = queue.Queue()\n    self._outgoing_channel = comm_channel_lib.CommChannel()\n    self._run_states = RunStates(breakpoints_func=lambda: self.breakpoints)\n    self._tensor_store = tensor_store_lib.TensorStore()\n    self._source_manager = SourceManager()\n\n    curried_handler_constructor = functools.partial(\n        InteractiveDebuggerDataStreamHandler,\n        self._incoming_channel, self._outgoing_channel, self._run_states,\n        self._tensor_store)\n    grpc_debug_server.EventListenerBaseServicer.__init__(\n        self, receive_port, curried_handler_constructor)", "docstring": "Receives health pills from a debugger and writes them to disk.\n\nArgs:\nreceive_port: The port at which to receive health pills from the\nTensorFlow debugger.\nalways_flush: A boolean indicating whether the EventsWriter will be\nflushed after every write. Can be used for testing.", "source": "juraj-google-style"}
{"code": "def add_streamer(self, binary_descriptor):\n    streamer = streamer_descriptor.parse_binary_descriptor(binary_descriptor)\n    try:\n        self.graph.add_streamer(streamer)\n        self.streamer_status[(len(self.graph.streamers) - 1)] = StreamerStatus()\n        return Error.NO_ERROR\n    except ResourceUsageError:\n        return _pack_sgerror(SensorGraphError.NO_MORE_STREAMER_RESOURCES)", "docstring": "Add a streamer to the sensor_graph using a binary streamer descriptor.\n\nArgs:\nbinary_descriptor (bytes): An encoded binary streamer descriptor.\n\nReturns:\nint: A packed error code", "source": "codesearchnet"}
{"code": "def get_config_parameter(config: ConfigParser,\n                         section: str,\n                         param: str,\n                         fn: Callable[[Any], Any],\n                         default: Any) -> Any:\n    \n    try:\n        value = fn(config.get(section, param))\n    except (TypeError, ValueError, NoOptionError):\n        log.warning(\n            \"Configuration variable {} not found or improper in section [{}]; \"\n            \"using default of {!r}\", param, section, default)\n        if default is None:\n            value = default\n        else:\n            value = fn(default)\n    return value", "docstring": "Fetch parameter from ``configparser`` ``.INI`` file.\n\nArgs:\nconfig: :class:`ConfigParser` object\nsection: section name within config file\nparam: name of parameter within section\nfn: function to apply to string parameter (e.g. ``int``)\ndefault: default value\n\nReturns:\nparameter value, or ``None`` if ``default is None``, or ``fn(default)``", "source": "juraj-google-style"}
{"code": "def log_error(cls, msg):\n    cls.error_logger.error(msg)\n    cls.debug_logger.debug(msg)", "docstring": "Logs the provided error message to both the error logger and the debug logger logging\ninstances.\n\nArgs:\nmsg: `str`. The error message to log.", "source": "codesearchnet"}
{"code": "def element(self, using, value):\n    return self._execute(Command.FIND_CHILD_ELEMENT, {'using': using, 'value': value})", "docstring": "find an element in the current element.\n\nSupport:\nAndroid iOS Web(WebView)\n\nArgs:\nusing(str): The element location strategy.\nvalue(str): The value of the location strategy.\n\nReturns:\nWebElement Object.\n\nRaises:\nWebDriverException.", "source": "codesearchnet"}
{"code": "def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n    del weight_collections\n    del trainable\n    return inputs.get(self)", "docstring": "Returns dense `Tensor` representing numeric feature.\n\nArgs:\ninputs: A `_LazyBuilder` object to access inputs.\nweight_collections: Unused `weight_collections` since no variables are\ncreated in this function.\ntrainable: Unused `trainable` bool since no variables are created in this\nfunction.\n\nReturns:\nDense `Tensor` created within `_transform_feature`.", "source": "github-repos"}
{"code": "def stringize(self, rnf_profile):\n    coor_width = max(rnf_profile.coor_width, len(str(self.left)), len(str(self.right)))\n    return '({},{},{},{},{})'.format(str(self.genome_id).zfill(rnf_profile.genome_id_width), str(self.chr_id).zfill(rnf_profile.chr_id_width), self.direction, str(self.left).zfill(coor_width), str(self.right).zfill(coor_width))", "docstring": "Create RNF representation of this segment.\n\nArgs:\nrnf_profile (rnftools.rnfformat.RnfProfile): RNF profile (with widths).", "source": "codesearchnet"}
{"code": "def get_kpoint_weights(self, kpoints, atol=1e-05):\n    kpts = np.array(kpoints)\n    shift = []\n    mesh = []\n    for i in range(3):\n        nonzero = [i for i in kpts[(:, i)] if (abs(i) > 1e-05)]\n        if (len(nonzero) != len(kpts)):\n            if (not nonzero):\n                mesh.append(1)\n            else:\n                m = np.abs(np.round((1 / np.array(nonzero))))\n                mesh.append(int(max(m)))\n            shift.append(0)\n        else:\n            m = np.abs(np.round((0.5 / np.array(nonzero))))\n            mesh.append(int(max(m)))\n            shift.append(1)\n    (mapping, grid) = spglib.get_ir_reciprocal_mesh(np.array(mesh), self._cell, is_shift=shift, symprec=self._symprec)\n    mapping = list(mapping)\n    grid = ((np.array(grid) + (np.array(shift) * (0.5, 0.5, 0.5))) / mesh)\n    weights = []\n    mapped = defaultdict(int)\n    for k in kpoints:\n        for (i, g) in enumerate(grid):\n            if np.allclose(pbc_diff(k, g), (0, 0, 0), atol=atol):\n                mapped[tuple(g)] += 1\n                weights.append(mapping.count(mapping[i]))\n                break\n    if ((len(mapped) != len(set(mapping))) or (not all([(v == 1) for v in mapped.values()]))):\n        raise ValueError('Unable to find 1:1 corresponding between input kpoints and irreducible grid!')\n    return [(w / sum(weights)) for w in weights]", "docstring": "Calculate the weights for a list of kpoints.\n\nArgs:\nkpoints (Sequence): Sequence of kpoints. np.arrays is fine. Note\nthat the code does not check that the list of kpoints\nprovided does not contain duplicates.\natol (float): Tolerance for fractional coordinates comparisons.\n\nReturns:\nList of weights, in the SAME order as kpoints.", "source": "codesearchnet"}
{"code": "def additive_coupling(name, x, mid_channels=512, reverse=False,\n                      activation=\"relu\", dropout=0.0):\n  \n  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n    output_channels = common_layers.shape_list(x)[-1] \n    x1, x2 = tf.split(x, num_or_size_splits=2, axis=-1)\n\n    z1 = x1\n    shift = conv_stack(\"nn\", x1, mid_channels, output_channels=output_channels,\n                       activation=activation, dropout=dropout)\n\n    if not reverse:\n      z2 = x2 + shift\n    else:\n      z2 = x2 - shift\n    return tf.concat([z1, z2], axis=3), 0.0", "docstring": "Reversible additive coupling layer.\n\nArgs:\nname: variable scope.\nx: 4-D Tensor, shape=(NHWC).\nmid_channels: number of channels in the coupling layer.\nreverse: Forward or reverse operation.\nactivation: \"relu\" or \"gatu\"\ndropout: default, 0.0\nReturns:\noutput: 4-D Tensor, shape=(NHWC)\nobjective: 0.0", "source": "juraj-google-style"}
{"code": "def TrimVariableTable(self, new_size):\n    \n\n    def ProcessBufferFull(variables):\n      for variable in variables:\n        var_index = variable.get('varTableIndex')\n        if var_index is not None and (var_index >= new_size):\n          variable['varTableIndex'] = 0  \n        members = variable.get('members')\n        if members is not None:\n          ProcessBufferFull(members)\n\n    del self._var_table[new_size:]\n    ProcessBufferFull(self.breakpoint['evaluatedExpressions'])\n    for stack_frame in self.breakpoint['stackFrames']:\n      ProcessBufferFull(stack_frame['arguments'])\n      ProcessBufferFull(stack_frame['locals'])\n    ProcessBufferFull(self._var_table)", "docstring": "Trims the variable table in the formatted breakpoint message.\n\nRemoves trailing entries in variables table. Then scans the entire\nbreakpoint message and replaces references to the trimmed variables to\npoint to var_index of 0 (\"buffer full\").\n\nArgs:\nnew_size: desired size of variables table.", "source": "juraj-google-style"}
{"code": "def get_course_final_price(self, mode, currency='$', enterprise_catalog_uuid=None):\n    try:\n        price_details = self.client.baskets.calculate.get(sku=[mode['sku']], username=self.user.username, catalog=enterprise_catalog_uuid)\n    except (SlumberBaseException, ConnectionError, Timeout) as exc:\n        LOGGER.exception('Failed to get price details for sku %s due to: %s', mode['sku'], str(exc))\n        price_details = {}\n    price = price_details.get('total_incl_tax', mode['min_price'])\n    if (price != mode['min_price']):\n        return format_price(price, currency)\n    return mode['original_price']", "docstring": "Get course mode's SKU discounted price after applying any entitlement available for this user.\n\nReturns:\nstr: Discounted price of the course mode.", "source": "codesearchnet"}
{"code": "def ExpandWindowsEnvironmentVariables(data_string, knowledge_base):\n  r\n  win_environ_regex = re.compile(r\"%([^%]+?)%\")\n  components = []\n  offset = 0\n  for match in win_environ_regex.finditer(data_string):\n    components.append(data_string[offset:match.start()])\n\n    \n    kb_value = getattr(knowledge_base, \"environ_%s\" % match.group(1).lower(),\n                       None)\n    if isinstance(kb_value, string_types) and kb_value:\n      components.append(kb_value)\n    else:\n      \n      components.append(\"%%%s%%\" % match.group(1))\n    offset = match.end()\n  components.append(data_string[offset:])  \n  return \"\".join(components)", "docstring": "r\"\"\"Take a string and expand any windows environment variables.\n\nArgs:\ndata_string: A string, e.g. \"%SystemRoot%\\\\LogFiles\"\nknowledge_base: A knowledgebase object.\n\nReturns:\nA string with available environment variables expanded. If we can't expand\nwe just return the string with the original variables.", "source": "juraj-google-style"}
{"code": "def HasOutputClass(cls, name):\n    \n    if not isinstance(name, py2to3.STRING_TYPES):\n      return False\n\n    return name.lower() in cls._output_classes", "docstring": "Determines if a specific output class is registered with the manager.\n\nArgs:\nname (str): name of the output module.\n\nReturns:\nbool: True if the output class is registered.", "source": "juraj-google-style"}
{"code": "def get_country_info_from_m49(cls, m49, use_live=True, exception=None):\n        \n        \n        iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)\n        if iso3 is not None:\n            return cls.get_country_info_from_iso3(iso3, exception=exception)\n        return None", "docstring": "Get country name from M49 code\n\nArgs:\nm49 (int): M49 numeric code for which to get country information\nuse_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\nexception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\nReturns:\nOptional[Dict[str]]: Country information", "source": "juraj-google-style"}
{"code": "async def remove_participant(self, p: Participant):\n    (await self.connection('DELETE', 'tournaments/{}/participants/{}'.format(self._id, p._id)))\n    if (p in self.participants):\n        self.participants.remove(p)", "docstring": "remove a participant from the tournament\n\n|methcoro|\n\nArgs:\np: the participant to remove\n\nRaises:\nAPIException", "source": "codesearchnet"}
{"code": "def from_raw(self, raw: RawScalar) -> Optional[ScalarValue]:\n    if isinstance(raw, str):\n        return raw", "docstring": "Return a cooked value of the receiver type.\n\nArgs:\nraw: Raw value obtained from JSON parser.", "source": "codesearchnet"}
{"code": "def userinfo(self, access_token):\n    return self.get(url='https:", "docstring": "Returns the user information based on the Auth0 access token.\nThis endpoint will work only if openid was granted as a scope for the access_token.\n\nArgs:\naccess_token (str): Auth0 access token (obtained during login).\n\nReturns:\nThe user profile.", "source": "codesearchnet"}
{"code": "def missing_values(self, flatten: bool=True) -> Dict[Union[str, int], Any]:", "docstring": "Returns missing values from this object.\n\nArgs:\nflatten: If True, convert nested structures into a flattened dict using\nkey path (delimited by '.' and '[]') as key.\n\nReturns:\nA dict of key to MISSING_VALUE.", "source": "github-repos"}
{"code": "def _MaybeCaptured(t):\n    if not isinstance(t, ops.EagerTensor) and _IsFunction(t.op.graph) and (t.op.type == 'Placeholder'):\n        for input_t, placeholder_t in _Captures(t.op.graph):\n            if t is placeholder_t:\n                return _MaybeCaptured(input_t)\n    return t", "docstring": "If t is a captured value placeholder, returns the original captured value.\n\nArgs:\nt: Tensor\n\nReturns:\nA tensor, potentially from a different Graph/FuncGraph.", "source": "github-repos"}
{"code": "def sanity_check_type(self, other):\n    if type(self) is not type(other):\n        raise ValueError('No TypeSpec is compatible with both %s and %s' % (self, other))\n    if self._input_workers.serialize() != other._input_workers.serialize():\n        raise ValueError('_input_workers is not compatible with both %s and %s' % (self, other))\n    if self._strategy is not other._strategy:\n        raise ValueError('tf.distribute strategy is not compatible with both %s and %s' % (self, other))", "docstring": "Returns the most specific TypeSpec compatible with `self` and `other`.\n\nArgs:\nother: A `TypeSpec`.\n\nRaises:\nValueError: If there is no TypeSpec that is compatible with both `self`\nand `other`.", "source": "github-repos"}
{"code": "def _DownloadScript(self, url, dest_dir):\n    \n    \n    \n    if url.startswith(r'gs:\n      \n      url = re.sub('^gs:\n      return self._DownloadAuthUrl(url, dest_dir)\n\n    header = r'http[s]?:\n    domain = r'storage\\.googleapis\\.com'\n\n    \n    \n    \n    bucket = r'(?P<bucket>[a-z0-9][-_.a-z0-9]*[a-z0-9])'\n\n    \n    obj = r'(?P<obj>[^\\*\\?]+)'\n\n    \n    \n    \n    gs_regex = re.compile(r'\\A%s%s\\.%s/%s\\Z' % (header, bucket, domain, obj))\n    match = gs_regex.match(url)\n    if match:\n      return self._DownloadAuthUrl(url, dest_dir)\n\n    \n    \n    \n    \n    \n    \n    \n    gs_regex = re.compile(\n        r'\\A%s(commondata)?%s/%s/%s\\Z' % (header, domain, bucket, obj))\n    match = gs_regex.match(url)\n    if match:\n      return self._DownloadAuthUrl(url, dest_dir)\n\n    \n    return self._DownloadUrl(url, dest_dir)", "docstring": "Download the contents of the URL to the destination.\n\nArgs:\nurl: string, the URL to download.\ndest_dir: string, the path to a directory for storing metadata scripts.\n\nReturns:\nstring, the path to the file storing the metadata script.", "source": "juraj-google-style"}
{"code": "def update_branch(profile, name, sha):\n    ref = ('heads/' + name)\n    data = refs.update_ref(profile, ref, sha)\n    return data", "docstring": "Move a branch's HEAD to a new SHA.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nname\nThe name of the branch to update.\n\nsha\nThe commit SHA to point the branch's HEAD to.\n\nReturns:\nA dict with data about the branch.", "source": "codesearchnet"}
{"code": "def check_email_exists_by_subject(self, subject, match_recipient=None):\n        \n        \n        self._mail.select(\"inbox\")\n\n        try:\n            matches = self.__search_email_by_subject(subject, match_recipient)\n            if len(matches) <= 0:\n                return False\n            else:\n                return True\n        except Exception as e:\n            raise e", "docstring": "Searches for Email by Subject.  Returns True or False.\n\nArgs:\nsubject (str): Subject to search for.\n\nKwargs:\nmatch_recipient (str) : Recipient to match exactly. (don't care if not specified)\n\nReturns:\nTrue - email found, False - email not found", "source": "juraj-google-style"}
{"code": "def ConfigsToTest():\n\n    def Config(input_size, filter_size, out_size, stride=1, padding='SAME', dilations=None):\n        return (input_size, filter_size, out_size, stride, padding, dilations)\n    return [Config([4, 5, 5, 48], [1, 1, 48, 2], [4, 5, 5, 96]), Config([4, 8, 8, 84], [1, 3, 84, 1], [4, 8, 8, 84]), Config([4, 17, 17, 48], [3, 1, 48, 4], [4, 17, 17, 192]), Config([4, 9, 27, 8], [3, 3, 8, 1], [4, 9, 27, 8]), Config([4, 31, 31, 7], [3, 3, 7, 1], [4, 31, 31, 7]), Config([4, 35, 35, 2], [5, 5, 2, 1], [4, 35, 35, 2]), Config([4, 147, 147, 2], [3, 3, 2, 8], [4, 49, 49, 16], 3, padding='VALID'), Config([3, 299, 299, 3], [3, 2, 3, 8], [3, 150, 150, 24], 2), Config([5, 183, 183, 1], [5, 5, 1, 2], [5, 92, 92, 2], 2), Config([5, 183, 183, 1], [5, 5, 1, 2], [5, 183, 183, 2], dilations=[2, 2]), Config([5, 41, 35, 2], [4, 7, 2, 2], [5, 32, 23, 4], padding='VALID', dilations=[3, 2])]", "docstring": "Iterator for different convolution shapes, strides and paddings.\n\nReturns:\nList of tuples (input_size, filter_size, out_size, stride, padding,\ndilations), the depthwise convolution parameters.", "source": "github-repos"}
{"code": "def notify_by(self, invoice_id, medium, **kwargs):\n        \n        url = \"{}/{}/notify_by/{}\".format(self.base_url, invoice_id, medium)\n        return self.post_url(url, {}, **kwargs)", "docstring": "Send/Resend notifications to customer via email/sms\n\nArgs:\ninvoice_id : Id for trigger notify\nmedium : Medium for triggering notification via email or sms\n\nReturns:\n{\"success\": true}", "source": "juraj-google-style"}
{"code": "def __init__(self, kind=None, project=None, namespace=None, ancestor=None, filters=(), projection=(), order=(), distinct_on=(), limit=None):\n    self.kind = kind\n    self.project = project\n    self.namespace = namespace\n    self.ancestor = ancestor\n    self.filters = filters or ()\n    self.projection = projection\n    self.order = order\n    self.distinct_on = distinct_on\n    self.limit = limit", "docstring": "Represents a Datastore query.\n\nArgs:\nkind: (str) The kind to query.\nproject: (str) Required. Project associated with query.\nnamespace: (str, ValueProvider(str)) (Optional) Namespace to restrict\nresults to.\nancestor: (:class:`~apache_beam.io.gcp.datastore.v1new.types.Key`)\n(Optional) key of the ancestor to which this query's results are\nrestricted.\nfilters: (sequence of tuple[str, str, str],\nsequence of\ntuple[ValueProvider(str), ValueProvider(str), ValueProvider(str)])\nProperty filters applied by this query.\nThe sequence is ``(property_name, operator, value)``.\nprojection: (sequence of string) fields returned as part of query results.\norder: (sequence of string) field names used to order query results.\nPrepend ``-`` to a field name to sort it in descending order.\ndistinct_on: (sequence of string) field names used to group query\nresults.\nlimit: (int) Maximum amount of results to return.", "source": "github-repos"}
{"code": "def _post_process(self, feed_item, item):\n    campaign = self._campaign_dao.get(feed_item, required=True)\n    feed_item[FieldMap.CAMPAIGN_ID] = campaign['id']\n    feed_item[FieldMap.CAMPAIGN_NAME] = campaign['name']\n    landing_page = self._landing_page_dao.get(feed_item, required=True)\n    if landing_page:\n        feed_item[FieldMap.AD_LANDING_PAGE_ID] = landing_page['id']\n    self._sub_entity_map(feed_item['creative_assignment'], item, campaign)\n    self._sub_entity_map(feed_item['placement_assignment'], item, campaign)\n    self._sub_entity_map(feed_item['event_tag_assignment'], item, campaign)", "docstring": "Maps ids and names of related entities so they can be updated in the Bulkdozer feed.\n\nWhen Bulkdozer is done processing an item, it writes back the updated names\nand ids of related objects, this method makes sure those are updated in the\nad feed.\n\nArgs:\nfeed_item: Feed item representing the ad from the Bulkdozer feed.\nitem: The DCM ad being updated or created.", "source": "github-repos"}
{"code": "def read_at(self, d, index=False):\n        \n        for i, iv in enumerate(self):\n            if iv.spans(d):\n                return i if index else iv\n        return None", "docstring": "Get the index of the interval at a particular 'depth' (though this\nmight be an elevation or age or anything).\n\nArgs:\nd (Number): The 'depth' to query.\nindex (bool): Whether to return the index instead of the interval.\n\nReturns:\nInterval: The interval, or if ``index==True`` the index of the\ninterval, at the specified 'depth', or ``None`` if the depth is\noutside the striplog's range.", "source": "juraj-google-style"}
{"code": "def from_keras_log(csv_path, output_dir_path, **kwargs):\n    data = pd.read_csv(csv_path, sep=None, engine='python')\n    _from_keras_log_format(data, output_dir_path=output_dir_path, **kwargs)", "docstring": "Plot accuracy and loss from a Keras CSV log.\n\nArgs:\ncsv_path: The path to the CSV log with the actual data.\noutput_dir_path: The path to the directory where the resultings plots\nshould end up.", "source": "codesearchnet"}
{"code": "def transform_and_overwrite_file(self, file_path: str, transformation: Optional[Callable[[Iterator[str]], Iterator[str]]]=None) -> None:\n    if transformation is None:\n        transformation = self.annotate_test_file\n    with open(file_path, mode='r') as original_file, tempfile.NamedTemporaryFile(mode='w', delete=False) as transformed_file:\n        transformed_file.writelines(transformation(original_file))\n    shutil.move(transformed_file.name, file_path)", "docstring": "Transforms the contents of `file_path`, overwriting the file.\n\nArgs:\nfile_path: The path to the file whose contents are to be transformed.\ntransformation: A function that takes an iterator over the lines of an HLO\nfile and returns an iterator over the lines of the transformed file. If\nthis is left as `None`, `self.annotate_test_file` will be used.", "source": "github-repos"}
{"code": "def restore(cdiff, a):\n    \n    left = a.splitlines(1) if isinstance(a, string_types) else a\n    lrest = []\n    iline = 0\n    \n    for i, line in enumerate(left):\n        if iline not in cdiff:\n            lrest.append(\"  \" + line)\n            iline += 1\n        else:\n            cs = [l[0] for l in cdiff[iline]]\n            add = cs.count('+') - cs.count('-')\n            lrest.extend(cdiff[iline])\n            iline += add + 1\n            \n    for i in sorted(cdiff.keys()):\n        if i >= len(left):\n            lrest.extend(cdiff[i])\n\n    from difflib import restore\n    return list(restore(lrest, 2))", "docstring": "Restores the full text of either the edited text using the\ncompressed diff.\n\nArgs:\ncdiff (dict): compressed diff returned by\n:func:`~acorn.logging.diff.compress`.\na (str or list): *original* string or list of strings to use as a\nreference to restore the edited version.", "source": "juraj-google-style"}
{"code": "def get_table_schema(schema):\n    if schema is None:\n        return schema\n    elif isinstance(schema, str):\n        return bigquery_tools.parse_table_schema_from_json(schema)\n    elif isinstance(schema, dict):\n        return bigquery_tools.parse_table_schema_from_json(json.dumps(schema))\n    else:\n        raise TypeError('Unexpected schema argument: %s.' % schema)", "docstring": "Transform the table schema into a bigquery.TableSchema instance.\n\nArgs:\nschema: The schema to be used if the BigQuery table to write has to be\ncreated. This is a dictionary object created in the WriteToBigQuery\ntransform.\nReturns:\ntable_schema: The schema to be used if the BigQuery table to write has\nto be created but in the bigquery.TableSchema format.", "source": "github-repos"}
{"code": "def replace_code(code: str, replace_pattern: str) -> str:\n    if len(replace_pattern) > 0:\n        patterns = replace_pattern.replace('with', '').split(',')\n        patterns = [_re_replace_pattern.search(p) for p in patterns]\n        for pattern in patterns:\n            if pattern is None:\n                continue\n            obj1, obj2, option = pattern.groups()\n            code = re.sub(obj1, obj2, code)\n            if option.strip() == 'all-casing':\n                code = re.sub(obj1.lower(), obj2.lower(), code)\n                code = re.sub(obj1.upper(), obj2.upper(), code)\n    return code", "docstring": "Replace `code` by a pattern of the form `with X1->X2,Y1->Y2,Z1->Z2`.\n\nArgs:\ncode (`str`): The code to be modified.\nreplace_pattern (`str`): The pattern used to modify `code`.\n\nReturns:\n`str`: The modified code.", "source": "github-repos"}
{"code": "def _assert_sparse_compatible(sparse_tensors):\n    checks = []\n    first = sparse_tensors[0]\n    for t in sparse_tensors[1:]:\n        checks.append(check_ops.assert_equal(first.dense_shape, t.dense_shape, message='Mismatched shapes!'))\n        checks.append(check_ops.assert_equal(first.indices, t.indices, message='Mismatched indices!'))\n    return checks", "docstring": "Check that all of `sparse_tensors` have same `indices` and `dense_shape`.\n\nArgs:\nsparse_tensors: A list of sparse tensors.\n\nReturns:\nAn op to be used as a control dependency.", "source": "github-repos"}
{"code": "def print_summary(model, line_length=None, positions=None, print_fn=None):\n    if print_fn is None:\n        print_fn = print\n    if model.__class__.__name__ == 'Sequential':\n        sequential_like = True\n    elif not model._is_graph_network:\n        sequential_like = True\n    else:\n        sequential_like = True\n        nodes_by_depth = model._nodes_by_depth.values()\n        nodes = []\n        for v in nodes_by_depth:\n            if len(v) > 1 or (len(v) == 1 and len(nest.flatten(v[0].keras_inputs)) > 1):\n                sequential_like = False\n                break\n            nodes += v\n        if sequential_like:\n            for layer in model.layers:\n                flag = False\n                for node in layer._inbound_nodes:\n                    if node in nodes:\n                        if flag:\n                            sequential_like = False\n                            break\n                        else:\n                            flag = True\n                if not sequential_like:\n                    break\n    if sequential_like:\n        line_length = line_length or 65\n        positions = positions or [0.45, 0.85, 1.0]\n        if positions[-1] <= 1:\n            positions = [int(line_length * p) for p in positions]\n        to_display = ['Layer (type)', 'Output Shape', 'Param \n    else:\n        line_length = line_length or 98\n        positions = positions or [0.33, 0.55, 0.67, 1.0]\n        if positions[-1] <= 1:\n            positions = [int(line_length * p) for p in positions]\n        to_display = ['Layer (type)', 'Output Shape', 'Param \n        relevant_nodes = []\n        for v in model._nodes_by_depth.values():\n            relevant_nodes += v\n\n    def print_row(fields, positions):\n        line = ''\n        for i in range(len(fields)):\n            if i > 0:\n                line = line[:-1] + ' '\n            line += str(fields[i])\n            line = line[:positions[i]]\n            line += ' ' * (positions[i] - len(line))\n        print_fn(line)\n    print_fn('Model: \"{}\"'.format(model.name))\n    print_fn('_' * line_length)\n    print_row(to_display, positions)\n    print_fn('=' * line_length)\n\n    def print_layer_summary(layer):\n        \n        try:\n            output_shape = layer.output_shape\n        except AttributeError:\n            output_shape = 'multiple'\n        except RuntimeError:\n            output_shape = '?'\n        name = layer.name\n        cls_name = layer.__class__.__name__\n        if not layer.built and (not getattr(layer, '_is_graph_network', False)):\n            params = '0 (unused)'\n        else:\n            params = layer.count_params()\n        fields = [name + ' (' + cls_name + ')', output_shape, params]\n        print_row(fields, positions)\n\n    def print_layer_summary_with_connections(layer):\n        \n        try:\n            output_shape = layer.output_shape\n        except AttributeError:\n            output_shape = 'multiple'\n        connections = []\n        for node in layer._inbound_nodes:\n            if relevant_nodes and node not in relevant_nodes:\n                continue\n            for inbound_layer, node_index, tensor_index, _ in node.iterate_inbound():\n                connections.append('{}[{}][{}]'.format(inbound_layer.name, node_index, tensor_index))\n        name = layer.name\n        cls_name = layer.__class__.__name__\n        if not connections:\n            first_connection = ''\n        else:\n            first_connection = connections[0]\n        fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params(), first_connection]\n        print_row(fields, positions)\n        if len(connections) > 1:\n            for i in range(1, len(connections)):\n                fields = ['', '', '', connections[i]]\n                print_row(fields, positions)\n    layers = model.layers\n    for i in range(len(layers)):\n        if sequential_like:\n            print_layer_summary(layers[i])\n        else:\n            print_layer_summary_with_connections(layers[i])\n        if i == len(layers) - 1:\n            print_fn('=' * line_length)\n        else:\n            print_fn('_' * line_length)\n    if hasattr(model, '_collected_trainable_weights'):\n        trainable_count = count_params(model._collected_trainable_weights)\n    else:\n        trainable_count = count_params(model.trainable_weights)\n    non_trainable_count = count_params(model.non_trainable_weights)\n    print_fn('Total params: {:,}'.format(trainable_count + non_trainable_count))\n    print_fn('Trainable params: {:,}'.format(trainable_count))\n    print_fn('Non-trainable params: {:,}'.format(non_trainable_count))\n    print_fn('_' * line_length)", "docstring": "Prints a summary of a model.\n\nArgs:\nmodel: Keras model instance.\nline_length: Total length of printed lines\n(e.g. set this to adapt the display to different\nterminal window sizes).\npositions: Relative or absolute positions of log elements in each line.\nIf not provided, defaults to `[.33, .55, .67, 1.]`.\nprint_fn: Print function to use.\nIt will be called on each line of the summary.\nYou can set it to a custom function\nin order to capture the string summary.\nIt defaults to `print` (prints to stdout).", "source": "github-repos"}
{"code": "def __init__(self, graph, fetches, feeds, feed_handles=None):\n    with graph.as_default():\n        self._fetch_mapper = _FetchMapper.for_fetch(fetches)\n    self._fetches = []\n    self._targets = []\n    self._feeds = feeds\n    self._feed_handles = feed_handles or {}\n    self._ops = []\n    self._fetch_handles = {}\n    for fetch in self._fetch_mapper.unique_fetches():\n        if isinstance(fetch, ops.Operation):\n            self._assert_fetchable(graph, fetch)\n            self._targets.append(fetch)\n            self._ops.append(True)\n        else:\n            self._assert_fetchable(graph, fetch.op)\n            self._fetches.append(fetch)\n            self._ops.append(False)\n        if isinstance(fetch, tensor.Tensor) and (fetch.op.type == 'GetSessionHandle' or fetch.op.type == 'GetSessionHandleV2'):\n            self._fetch_handles[fetch.ref()] = fetch.op.inputs[0].dtype\n    self._final_fetches = [x for x in self._fetches if x.ref() not in feeds]", "docstring": "Creates a fetch handler.\n\nArgs:\ngraph: Graph of the fetches.   Used to check for fetchability and to\nconvert all fetches to tensors or ops as needed.\nfetches: An arbitrary fetch structure: singleton, list, tuple, namedtuple,\nor dict.\nfeeds: A feed dict where keys are Tensors.\nfeed_handles: A dict from feed Tensors to TensorHandle objects used as\ndirect feeds.", "source": "github-repos"}
{"code": "def group(self, group_type, name, **kwargs):\n        \n        group_obj = Group(group_type, name, **kwargs)\n        return self._group(group_obj)", "docstring": "Add Group data to Batch object.\n\nArgs:\ngroup_type (str): The ThreatConnect define Group type.\nname (str): The name for this Group.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nxid (str, kwargs): The external id for this Group.\n\nReturns:\nobj: An instance of Group.", "source": "juraj-google-style"}
{"code": "def _get_elmt_amt_in_rxt(self, rxt):\n        \n        return sum([rxt.get_el_amount(e) for e in self.pd.elements])", "docstring": "Computes total number of atoms in a reaction formula for elements\nnot in external reservoir. This method is used in the calculation\nof reaction energy per mol of reaction formula.\n\nArgs:\nrxt (Reaction): a reaction.\n\nReturns:\nTotal number of atoms for non_reservoir elements.", "source": "juraj-google-style"}
{"code": "def query(self, connection, query, fetch=True):\n        \n\n        self.install_module(connection)\n\n        statements = sqlparse.parse(sqlparse.format(query, strip_comments=True))\n\n        \n        \n        logger.debug('Finding and installing all partitions from query. \\n    query: {}'.format(query))\n        new_query = []\n\n        if len(statements) > 1:\n            raise BadSQLError(\"Can only query a single statement\")\n\n        if len(statements) == 0:\n            raise BadSQLError(\"DIdn't get any statements in '{}'\".format(query))\n\n        statement = statements[0]\n\n        logger.debug( 'Searching statement for partition ref.\\n    statement: {}'.format(statement.to_unicode()))\n\n        \n\n        logger.debug(\n            'Executing updated query after partition install.'\n            '\\n    query before update: {}\\n    query to execute (updated query): {}'\n            .format(statement, new_query))\n\n        return self._execute(connection, statement.to_unicode(), fetch=fetch)", "docstring": "Creates virtual tables for all partitions found in the query and executes query.\n\nArgs:\nquery (str): sql query\nfetch (bool): fetch result from database if True, do not fetch overwise.", "source": "juraj-google-style"}
{"code": "def process_result_value(self, value, dialect):\n        \n        masks = list()\n        if value:\n            for e in enums.CryptographicUsageMask:\n                if e.value & value:\n                    masks.append(e)\n        return masks", "docstring": "Returns a new list of enums.CryptographicUsageMask Enums. This converts\nthe integer value into the list of enums.\n\nArgs:\nvalue(int): The integer value stored in the database that is used\nto create the list of enums.CryptographicUsageMask Enums.\ndialect(string): SQL dialect", "source": "juraj-google-style"}
{"code": "def get_metrics(self, name=None):\n    return self._get_elements(self.metrics, 'metrics', Metric, name=name)", "docstring": "Get metrics for this operator.\n\nArgs:\nname(str, optional): Only return metrics matching `name`, where `name` can be a regular expression.  If\n`name` is not supplied, then all metrics for this operator are returned.\n\nReturns:\nlist(Metric): List of matching metrics.\n\nRetrieving a list of metrics whose name contains the string \"temperatureSensor\" could be performed as followed\nExample:\n>>> from streamsx import rest\n>>> sc = rest.StreamingAnalyticsConnection()\n>>> instances = sc.get_instances()\n>>> operator = instances[0].get_operators()[0]\n>>> metrics = op.get_metrics(name='*temperatureSensor*')", "source": "codesearchnet"}
{"code": "def get_available_transcript_languages(video_id):\n    \n    available_languages = VideoTranscript.objects.filter(\n        video__edx_video_id=video_id\n    ).values_list(\n        'language_code', flat=True\n    )\n    return list(available_languages)", "docstring": "Get available transcript languages\n\nArguments:\nvideo_id(unicode): An id identifying the Video.\n\nReturns:\nA list containing transcript language codes for the Video.", "source": "juraj-google-style"}
{"code": "def iter(self):\n    page = 1\n    fetch_all = True\n    url = '{}/{}'.format(__endpoint__, self.type.RESOURCE)\n    if ('page' in self.params):\n        page = self.params['page']\n        fetch_all = False\n    response = RestClient.get(url, self.params)[self.type.RESOURCE]\n    while len(response):\n        for item in response:\n            (yield self.type(item))\n        if (not fetch_all):\n            break\n        else:\n            page += 1\n            self.where(page=page)\n        response = RestClient.get(url, self.params)[self.type.RESOURCE]", "docstring": "Gets all resources, automating paging through data\n\nReturns:\niterable of object: Iterable of resource objects", "source": "codesearchnet"}
{"code": "def _ProcessAMCacheFileKey(self, am_entry, parser_mediator):\n    \n    amcache_datetime = am_entry.get_value_by_name(\n        self._AMCACHE_DATETIME).get_data_as_integer()\n    event_data = AmcacheEventData()\n\n    event_data.full_path = am_entry.get_value_by_name(\n        self._AMCACHE_FULL_PATH).get_data_as_string()\n    \n    event_data.sha1 = am_entry.get_value_by_name(\n        self._AMCACHE_SHA1).get_data_as_string()[4:]\n\n    productname = am_entry.get_value_by_name(self._AMCACHE_PRODUCTNAME)\n    if productname:\n      event_data.productname = productname.get_data_as_string()\n\n    companyname = am_entry.get_value_by_name(self._AMCACHE_COMPANYNAME)\n    if companyname:\n      event_data.companyname = companyname.get_data_as_string()\n\n    fileversion = am_entry.get_value_by_name(self._AMCACHE_FILEVERSION)\n    if fileversion:\n      event_data.fileversion = fileversion.get_data_as_string()\n\n    languagecode = am_entry.get_value_by_name(self._AMCACHE_LANGUAGECODE)\n    if languagecode:\n      event_data.languagecode = languagecode.get_data_as_integer()\n\n    filesize = am_entry.get_value_by_name(self._AMCACHE_FILESIZE)\n    if filesize:\n      event_data.filesize = filesize.get_data_as_integer()\n\n    filedescription = am_entry.get_value_by_name(self._AMCACHE_FILEDESCRIPTION)\n    if filedescription:\n      event_data.filedescription = filedescription.get_data_as_string()\n\n    linkerts = am_entry.get_value_by_name(self._AMCACHE_LINKERTS)\n    if linkerts:\n      event_data.linkerts = linkerts.get_data_as_integer()\n\n    lastmodifiedts = am_entry.get_value_by_name(self._AMCACHE_LASTMODIFIEDTS)\n    if lastmodifiedts:\n      event_data.lastmodifiedts = lastmodifiedts.get_data_as_integer()\n\n    createdts = am_entry.get_value_by_name(self._AMCACHE_CREATEDTS)\n    if createdts:\n      event_data.createdts = createdts.get_data_as_integer()\n\n    programid = am_entry.get_value_by_name(self._AMCACHE_PROGRAMID)\n    if programid:\n      event_data.programid = programid.get_data_as_string()\n\n    event = time_events.DateTimeValuesEvent(\n        filetime.Filetime(amcache_datetime),\n        definitions.TIME_DESCRIPTION_MODIFICATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    if event_data.createdts:\n      event = time_events.DateTimeValuesEvent(\n          filetime.Filetime(event_data.createdts),\n          definitions.TIME_DESCRIPTION_CREATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    if event_data.lastmodifiedts:\n      event = time_events.DateTimeValuesEvent(\n          filetime.Filetime(event_data.lastmodifiedts),\n          definitions.TIME_DESCRIPTION_MODIFICATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    if event_data.linkerts:\n      event = time_events.DateTimeValuesEvent(\n          posix_time.PosixTime(event_data.linkerts),\n          definitions.TIME_DESCRIPTION_CHANGE)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an Amcache Root/File key for events.\n\nArgs:\nam_entry (pyregf.key): amcache File key.\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.", "source": "juraj-google-style"}
{"code": "def ask_stories(self, raw=False, limit=None):\n    ask_stories = self._get_stories('askstories', limit)\n    if raw:\n        ask_stories = [story.raw for story in ask_stories]\n    return ask_stories", "docstring": "Returns list of item ids of latest Ask HN stories\n\nArgs:\nlimit (int): specifies the number of stories to be returned.\nraw (bool): Flag to indicate whether to transform all\nobjects into raw json.\n\nReturns:\n`list` object containing ids of Ask HN stories.", "source": "codesearchnet"}
{"code": "def ListPlugins(logdir):\n  \n  plugins_dir = os.path.join(logdir, _PLUGINS_DIR)\n  try:\n    entries = tf.io.gfile.listdir(plugins_dir)\n  except tf.errors.NotFoundError:\n    return []\n  \n  \n  return [x.rstrip('/') for x in entries\n          if x.endswith('/') or _IsDirectory(plugins_dir, x)]", "docstring": "List all the plugins that have registered assets in logdir.\n\nIf the plugins_dir does not exist, it returns an empty list. This maintains\ncompatibility with old directories that have no plugins written.\n\nArgs:\nlogdir: A directory that was created by a TensorFlow events writer.\n\nReturns:\na list of plugin names, as strings", "source": "juraj-google-style"}
{"code": "def get_descriptor_defaults(self, api_info, hostname=None):\n    \n    if self.__request:\n      hostname = self.__request.reconstruct_hostname()\n      protocol = self.__request.url_scheme\n    else:\n      hostname = (hostname or util.get_app_hostname() or\n                  api_info.hostname)\n      protocol = 'http' if ((hostname and hostname.startswith('localhost')) or\n                            util.is_running_on_devserver()) else 'https'\n    full_base_path = '{0}{1}/{2}/'.format(api_info.base_path,\n                                          api_info.name,\n                                          api_info.path_version)\n    base_url = '{0}:\n    root_url = '{0}:\n    defaults = {\n        'kind': 'discovery\n        'discoveryVersion': 'v1',\n        'id': '{0}:{1}'.format(api_info.name, api_info.path_version),\n        'name': api_info.name,\n        'version': api_info.api_version,\n        'icons': {\n            'x16': 'https:\n            'x32': 'https:\n        },\n        'protocol': 'rest',\n        'servicePath': '{0}/{1}/'.format(api_info.name, api_info.path_version),\n        'batchPath': 'batch',\n        'basePath': full_base_path,\n        'rootUrl': root_url,\n        'baseUrl': base_url,\n        'description': 'This is an API',\n    }\n    if api_info.description:\n        defaults['description'] = api_info.description\n    if api_info.title:\n        defaults['title'] = api_info.title\n    if api_info.documentation:\n        defaults['documentationLink'] = api_info.documentation\n    if api_info.canonical_name:\n        defaults['canonicalName'] = api_info.canonical_name\n\n    return defaults", "docstring": "Gets a default configuration for a service.\n\nArgs:\napi_info: _ApiInfo object for this service.\nhostname: string, Hostname of the API, to override the value set on the\ncurrent service. Defaults to None.\n\nReturns:\nA dictionary with the default configuration.", "source": "juraj-google-style"}
{"code": "def synthesize(self, duration):\n    sr = self.samplerate.samples_per_second\n    seconds = (duration / Seconds(1))\n    samples = np.random.uniform(low=(- 1.0), high=1.0, size=int((sr * seconds)))\n    return AudioSamples(samples, self.samplerate)", "docstring": "Synthesize white noise\n\nArgs:\nduration (numpy.timedelta64): The duration of the synthesized sound", "source": "codesearchnet"}
{"code": "def populations():\n    city_pops = {}\n    fname = pkg_resources.resource_filename(__name__, 'resources/CityPops.csv')\n    with open(fname, 'rU') as csvfile:\n        reader = csv.reader(csvfile, delimiter=',')\n        for row in reader:\n            city_pops[row[0]] = int(row[1])\n    return city_pops", "docstring": "Get a dictionary of Backpage city names mapped to their citizen populations.\n\nReturns:\ndictionary of Backpage city names mapped to their populations (integers)", "source": "codesearchnet"}
{"code": "def unique_name(self, name, mark_as_used=True):\n    scope_name = tf.get_variable_scope().name\n    if scope_name:\n        name = ((scope_name + '/') + name)\n    name_key = name.lower()\n    i = self._names_in_use.get(name_key, 0)\n    if mark_as_used:\n        self._names_in_use[name_key] = (i + 1)\n    if (i > 0):\n        base_name_key = name_key\n        while (name_key in self._names_in_use):\n            name_key = ('%s_%d' % (base_name_key, i))\n            i += 1\n        if mark_as_used:\n            self._names_in_use[name_key] = 1\n        name = ('%s_%d' % (name, (i - 1)))\n    return name", "docstring": "Like tf.Graph.unique_name, returns a unique operation name for `name`.\n\nArgs:\nname: The name for an operation.\nmark_as_used: whether to mark this name as being used.\n\nReturns:\nA string to use as the name for the operation.", "source": "codesearchnet"}
{"code": "def get_events(self, event_title, regex=False):\n    regex_val = 0\n    if regex:\n        regex_val = 1\n    r = requests.get('{0}/events/?api_key={1}&username={2}&c-title={3}&regex={4}'.format(self.url, self.api_key, self.username, event_title, regex_val), verify=self.verify)\n    if (r.status_code == 200):\n        json_obj = json.loads(r.text)\n        return json_obj\n    else:\n        log.error('Non-200 status code from get_event: {}'.format(r.status_code))\n        return None", "docstring": "Search for events with the provided title\n\nArgs:\nevent_title: The title of the event\nReturns:\nAn event JSON object returned from the server with the following:\n{\n\"meta\":{\n\"limit\": 20, \"next\": null, \"offset\": 0,\n\"previous\": null, \"total_count\": 3\n},\n\"objects\": [{}, {}, etc]\n}\nor None if an error occurred.", "source": "codesearchnet"}
{"code": "def load_requires_from_file(filepath):\n    with open(filepath) as fp:\n        return [pkg_name.strip() for pkg_name in fp.readlines()]", "docstring": "Read a package list from a given file path.\n\nArgs:\nfilepath: file path of the package list.\n\nReturns:\na list of package names.", "source": "codesearchnet"}
{"code": "def __init__(self, _channel, loop=None, executor=None, standalone_pool_for_streaming=False):\n        \n        self._channel = _channel\n        if loop is None:\n            loop = _asyncio.get_event_loop()\n        self._loop = loop\n        self._executor = executor\n        self._standalone_pool = standalone_pool_for_streaming\n        self._subscribe_map = {}", "docstring": "Constructor.\n\nArgs:\n_channel: wrapped grpc.Channel\nloop: asyncio event loop\nexecutor: a thread pool, or None to use the default pool of the loop\nstandalone_pool_for_streaming: create a new thread pool (with 1 thread) for each streaming\nmethod", "source": "juraj-google-style"}
{"code": "def reward(self, action=None):\n        \n        reward = 0.\n\n        \n        if self._check_success():\n            reward = 1.0\n\n        \n        if self.reward_shaping:\n\n            \n            cube_pos = self.sim.data.body_xpos[self.cube_body_id]\n            gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]\n            dist = np.linalg.norm(gripper_site_pos - cube_pos)\n            reaching_reward = 1 - np.tanh(10.0 * dist)\n            reward += reaching_reward\n\n            \n            touch_left_finger = False\n            touch_right_finger = False\n            for i in range(self.sim.data.ncon):\n                c = self.sim.data.contact[i]\n                if c.geom1 in self.l_finger_geom_ids and c.geom2 == self.cube_geom_id:\n                    touch_left_finger = True\n                if c.geom1 == self.cube_geom_id and c.geom2 in self.l_finger_geom_ids:\n                    touch_left_finger = True\n                if c.geom1 in self.r_finger_geom_ids and c.geom2 == self.cube_geom_id:\n                    touch_right_finger = True\n                if c.geom1 == self.cube_geom_id and c.geom2 in self.r_finger_geom_ids:\n                    touch_right_finger = True\n            if touch_left_finger and touch_right_finger:\n                reward += 0.25\n\n        return reward", "docstring": "Reward function for the task.\n\nThe dense reward has three components.\n\nReaching: in [0, 1], to encourage the arm to reach the cube\nGrasping: in {0, 0.25}, non-zero if arm is grasping the cube\nLifting: in {0, 1}, non-zero if arm has lifted the cube\n\nThe sparse reward only consists of the lifting component.\n\nArgs:\naction (np array): unused for this task\n\nReturns:\nreward (float): the reward", "source": "juraj-google-style"}
{"code": "def exists(self, filename):\n        \n        if is_package(filename):\n            filepath = os.path.join(self.connection[\"mount_point\"],\n                                    \"Packages\", filename)\n        else:\n            filepath = os.path.join(self.connection[\"mount_point\"],\n                                    \"Scripts\", filename)\n        return os.path.exists(filepath)", "docstring": "Report whether a file exists on the distribution point.\n\nDetermines file type by extension.\n\nArgs:\nfilename: Filename you wish to check. (No path! e.g.:\n\"AdobeFlashPlayer-14.0.0.176.pkg\")", "source": "juraj-google-style"}
{"code": "def parse(self, arguments):\n    \n    new_values = self._parse(arguments)\n    if self.present:\n      self.value.extend(new_values)\n    else:\n      self.value = new_values\n    self.present += len(new_values)", "docstring": "Parses one or more arguments with the installed parser.\n\nArgs:\narguments: a single argument or a list of arguments (typically a\nlist of default values); a single argument is converted\ninternally into a list containing one item.", "source": "juraj-google-style"}
{"code": "def xxd_output_to_bytes(input_cc_file):\n    pattern = re.compile('\\\\W*(0x[0-9a-fA-F,x ]+).*')\n    model_bytearray = bytearray()\n    with open(input_cc_file) as file_handle:\n        for line in file_handle:\n            values_match = pattern.match(line)\n            if values_match is None:\n                continue\n            list_text = values_match.group(1)\n            values_text = filter(None, list_text.split(','))\n            values = [int(x, base=16) for x in values_text]\n            model_bytearray.extend(values)\n    return bytes(model_bytearray)", "docstring": "Converts xxd output C++ source file to bytes (immutable).\n\nArgs:\ninput_cc_file: Full path name to th C++ source file dumped by xxd\n\nRaises:\nRuntimeError: If input_cc_file path is invalid.\nIOError: If input_cc_file cannot be opened.\n\nReturns:\nA bytearray corresponding to the input cc file array.", "source": "github-repos"}
{"code": "def __fetch_route53_zone_records(self, zone_id):\n    route53 = self.session.client('route53')\n    done = False\n    nextName = nextType = None\n    records = {}\n    try:\n        while (not done):\n            if (nextName and nextType):\n                response = route53.list_resource_record_sets(HostedZoneId=zone_id, StartRecordName=nextName, StartRecordType=nextType)\n            else:\n                response = route53.list_resource_record_sets(HostedZoneId=zone_id)\n            if response['IsTruncated']:\n                nextName = response['NextRecordName']\n                nextType = response['NextRecordType']\n            else:\n                done = True\n            if ('ResourceRecordSets' in response):\n                for record in response['ResourceRecordSets']:\n                    record_id = self._get_resource_hash(zone_id, record)\n                    if ('AliasTarget' in record):\n                        value = record['AliasTarget']['DNSName']\n                        records[record_id] = {'id': record_id, 'name': record['Name'].rstrip('.'), 'type': 'ALIAS', 'ttl': 0, 'value': [value]}\n                    else:\n                        value = [y['Value'] for y in record['ResourceRecords']]\n                        records[record_id] = {'id': record_id, 'name': record['Name'].rstrip('.'), 'type': record['Type'], 'ttl': record['TTL'], 'value': value}\n        return list(records.values())\n    finally:\n        del route53", "docstring": "Return all resource records for a specific Route53 zone\n\nArgs:\nzone_id (`str`): Name / ID of the hosted zone\n\nReturns:\n`dict`", "source": "codesearchnet"}
{"code": "def peek(self, iroute: \"InstanceRoute\") -> Optional[Value]:\n        \n        val = self.value\n        sn = self.schema_node\n        for sel in iroute:\n            val, sn = sel.peek_step(val, sn)\n            if val is None:\n                return None\n        return val", "docstring": "Return a value within the receiver's subtree.\n\nArgs:\niroute: Instance route (relative to the receiver).", "source": "juraj-google-style"}
{"code": "def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple]]=None):\n    logits = outputs.logits\n    if target_sizes is not None:\n        if len(logits) != len(target_sizes):\n            raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n        if is_torch_tensor(target_sizes):\n            target_sizes = target_sizes.numpy()\n        semantic_segmentation = []\n        for idx in range(len(logits)):\n            resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)\n            semantic_map = resized_logits[0].argmax(dim=0)\n            semantic_segmentation.append(semantic_map)\n    else:\n        semantic_segmentation = logits.argmax(dim=1)\n        semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]\n    return semantic_segmentation", "docstring": "Converts the output of [`MobileNetV2ForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.\n\nArgs:\noutputs ([`MobileNetV2ForSemanticSegmentation`]):\nRaw outputs of the model.\ntarget_sizes (`List[Tuple]` of length `batch_size`, *optional*):\nList of tuples corresponding to the requested final size (height, width) of each prediction. If unset,\npredictions will not be resized.\n\nReturns:\nsemantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic\nsegmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is\nspecified). Each entry of each `torch.Tensor` correspond to a semantic class id.", "source": "github-repos"}
{"code": "def fixity(self, response_format=None):\n    if (not response_format):\n        response_format = self.repo.default_serialization\n    response = self.repo.api.http_request('GET', ('%s/fcr:fixity' % self.uri))\n    fixity_graph = self.repo.api.parse_rdf_payload(response.content, response.headers)\n    for outcome in fixity_graph.objects(None, self.rdf.prefixes.premis.hasEventOutcome):\n        if (outcome.toPython() == 'SUCCESS'):\n            verdict = True\n        else:\n            verdict = False\n    return {'verdict': verdict, 'premis_graph': fixity_graph}", "docstring": "Issues fixity check, return parsed graph\n\nArgs:\nNone\n\nReturns:\n(dict): ('verdict':(bool): verdict of fixity check, 'premis_graph':(rdflib.Graph): parsed PREMIS graph from check)", "source": "codesearchnet"}
{"code": "def _add_value(self, field_name: str, value, provenance_path=None) -> bool:\n        \n        if not isinstance(value, list):\n            value = [value]\n\n        all_valid = True\n        for x in value:\n            valid = self._add_single_value(field_name, x, provenance_path=provenance_path)\n            all_valid = all_valid and valid\n        return all_valid", "docstring": "Helper function to add values to a knowledge graph\nArgs:\nfield_name: a field in the knowledge graph, assumed correct\nvalue: any Python type\n\nReturns: True if the value is compliant with the field schema, False otherwise", "source": "juraj-google-style"}
{"code": "def _execute(self, command, params=None):\n    if (not params):\n        params = {}\n    params['id'] = self._id\n    return self._parent.execute(command, params)", "docstring": "Executes a command against the underlying HTML element.\n\nArgs:\ncommand: The name of the command to _execute as a string.\nparams: A dictionary of named parameters to send with the command.\n\nReturns:\nThe command's JSON response loaded into a dictionary object.", "source": "codesearchnet"}
{"code": "def read_var_bytes(self, max_size=sys.maxsize) -> bytes:\n    length = self.read_var_int(max_size)\n    return self.read_bytes(length)", "docstring": "Read a variable length of bytes from the stream.\n\nArgs:\nmax_size (int): (Optional) maximum number of bytes to read.\n\nReturns:\nbytes:", "source": "codesearchnet"}
{"code": "def content_metadata_uploads(self, mirror=False):\n        \n        excludes_str = ''\n        includes_cmds = []\n        cmd_base = self._get_upload_cmd(mirror=mirror)\n\n        for content in self.s3props.get('content_metadata'):\n            full_path = os.path.join(self.artifact_path, content['path'])\n            if not os.listdir(full_path):\n                raise S3ArtifactNotFound\n\n            excludes_str += '--exclude \"{}/*\" '.format(content['path'])\n            include_cmd = '{} --exclude \"*\", --include \"{}/*\"'.format(cmd_base, content['path'])\n            include_cmd += ' --content-encoding {} --metadata-directive REPLACE'.format(content['content-encoding'])\n            includes_cmds.append(include_cmd)\n\n        exclude_cmd = '{} {}'.format(cmd_base, excludes_str)\n        result = subprocess.run(exclude_cmd, check=True, shell=True, stdout=subprocess.PIPE)\n        LOG.info(\"Uploaded files without metadata with command: %s\", exclude_cmd)\n        LOG.debug(\"Upload Command Output: %s\", result.stdout)\n\n        for include_cmd in includes_cmds:\n            result = subprocess.run(include_cmd, check=True, shell=True, stdout=subprocess.PIPE)\n            LOG.info(\"Uploaded files with metadata with command: %s\", include_cmd)\n            LOG.debug(\"Upload Command Output: %s\", result.stdout)\n\n        return True", "docstring": "Finds all specified encoded directories and uploads in multiple parts,\nsetting metadata for objects.\n\nArgs:\nmirror (bool): If true, uses a flat directory structure instead of nesting under a version.\n\nReturns:\nbool: True if uploaded", "source": "juraj-google-style"}
{"code": "def _create_graph(structure_dict):\n    graph = pydot.Dot()\n    for node in structure_dict['nodes']:\n        graph.add_node(pydot.Node(node))\n    for (node1, node2) in structure_dict['edges']:\n        graph.add_edge(pydot.Edge(node1, node2))\n    return graph", "docstring": "Creates pydot graph from the pipeline structure dict.\n\nArgs:\nstructure_dict (dict): dict returned by step.upstream_structure\n\nReturns:\ngraph (pydot.Dot): object representing upstream pipeline structure (with regard to the current Step).", "source": "codesearchnet"}
{"code": "def loop(coord, timer_interval_secs, target, args=None, kwargs=None):\n    looper = LooperThread(coord, timer_interval_secs, target=target, args=args, kwargs=kwargs)\n    looper.start()\n    return looper", "docstring": "Start a LooperThread that calls a function periodically.\n\nIf `timer_interval_secs` is None the thread calls `target(args)`\nrepeatedly.  Otherwise `target(args)` is called every `timer_interval_secs`\nseconds.  The thread terminates when a stop of the coordinator is\nrequested.\n\nArgs:\ncoord: A Coordinator.\ntimer_interval_secs: Number. Time boundaries at which to call `target`.\ntarget: A callable object.\nargs: Optional arguments to pass to `target` when calling it.\nkwargs: Optional keyword arguments to pass to `target` when calling it.\n\nReturns:\nThe started thread.", "source": "github-repos"}
{"code": "def get(self, request, *args, **kwargs):\n        \n        context = self.get_context_data(**kwargs)\n        context.update(self.extra_context)\n        context['crumbs'] = self.get_crumbs()\n        context['title'] = self.title\n        context['suit'] = 'suit' in settings.INSTALLED_APPS\n        if context.get('dashboard_grid', None) is None and self.grid:\n            context['dashboard_grid'] = self.grid\n        return self.render_to_response(context)", "docstring": "Django view get function.\n\nAdd items of extra_context, crumbs and grid to context.\n\nArgs:\nrequest (): Django's request object.\n*args (): request args.\n**kwargs (): request kwargs.\n\nReturns:\nresponse: render to response with context.", "source": "juraj-google-style"}
{"code": "def add_extension_to_message(extension: message.Message, msg: message.Message) -> None:\n    desc = msg.DESCRIPTOR\n    fields_by_url = {get_inlined_extension_url(field): field for field in desc.fields if field.name != 'id'}\n    id_field = desc.fields_by_name.get('id')\n    if proto_utils.field_is_set(extension, id_field):\n        proto_utils.set_value_at_field(msg, id_field, cast(Any, extension).id)\n    if proto_utils.field_is_set(extension, 'value'):\n        if len(fields_by_url) != 1:\n            raise fhir_errors.InvalidFhirError(f'Expected a single field, found {len(fields_by_url)}; {desc.full_name} is an invalid extension type.')\n        field = list(fields_by_url.items())[0][1]\n        if proto_utils.field_is_repeated(field):\n            raise fhir_errors.InvalidFhirError(f'Expected {field.full_name} to be a singular field. {desc.full_name} is an invalid extension type.')\n        _add_extension_value_to_message(extension, msg, field)\n        return\n    child_extensions = proto_utils.get_value_at_field(extension, 'extension')\n    for child_extension in child_extensions:\n        field = fields_by_url.get(child_extension.url.value)\n        if field is None:\n            raise ValueError(f'Message of type: {desc.full_name} has no field with name: {child_extension.url.value}.')\n        if proto_utils.field_is_set(child_extension, 'value'):\n            _add_extension_value_to_message(child_extension, msg, field)\n            continue\n        if not proto_utils.field_is_repeated(field):\n            if proto_utils.field_is_set(msg, field):\n                raise ValueError(f'Field: {field.full_name} is already set on message: {desc.full_name}.')\n            if proto_utils.field_content_length(child_extension, 'extension') > 1:\n                raise ValueError(f'Cardinality mismatch between field: {field.full_name} and extension: {desc.full_name}.')\n        child_message = proto_utils.set_in_parent_or_add(msg, field)\n        add_extension_to_message(child_extension, child_message)", "docstring": "Recursively parses extension and adds to message.\n\nArgs:\nextension: The FHIR extension to serialize and add.\nmsg: The message to add the extension onto\n\nRaises:\nInvalidFhirError: In the event that a value is set on the extension, but the\ncorresponding message field to copy it to is repeated (extension values are\nsingular only).", "source": "github-repos"}
{"code": "def _as_document(self, dataset):\n        \n\n        \n\n        assert isinstance(dataset, Dataset)\n\n        execute = object_session(dataset).connection().execute\n\n        query = text()\n\n        columns = u('\\n').join(\n            [u(' ').join(list(text_type(e) for e in t)) for t in execute(query, dataset_vid=str(dataset.identity.vid))])\n\n        doc = '\\n'.join([u('{}').format(x) for x in [dataset.config.metadata.about.title,\n                                                     dataset.config.metadata.about.summary,\n                                                     dataset.identity.id_,\n                                                     dataset.identity.vid,\n                                                     dataset.identity.source,\n                                                     dataset.identity.name,\n                                                     dataset.identity.vname,\n                                                     columns]])\n\n        \n        \n        parts = u('{}').format(dataset.identity.source).split('.')\n        sources = (['.'.join(g) for g in [parts[-i:] for i in range(2, len(parts) + 1)]]\n                   + ['.'.join(g) for g in [parts[:i] for i in range(0, len(parts))]])\n\n        \n        \n        def resum(g):\n            try:\n                return str(GVid.parse(g).summarize())\n            except (KeyError, ValueError):\n                return g\n\n        def as_list(value):\n            \n            if not value:\n                return []\n            if isinstance(value, string_types):\n                lst = [value]\n            else:\n                try:\n                    lst = list(value)\n                except TypeError:\n                    lst = [value]\n            return lst\n\n        about_time = as_list(dataset.config.metadata.about.time)\n        about_grain = as_list(dataset.config.metadata.about.grain)\n\n        keywords = (\n            list(dataset.config.metadata.about.groups) +\n            list(dataset.config.metadata.about.tags) +\n            about_time +\n            [resum(g) for g in about_grain] +\n            sources)\n\n        document = dict(\n            vid=u('{}').format(dataset.identity.vid),\n            title=u('{} {}').format(dataset.identity.name, dataset.config.metadata.about.title),\n            doc=u('{}').format(doc),\n            keywords=' '.join(u('{}').format(x) for x in keywords)\n        )\n\n        return document", "docstring": "Converts dataset to document indexed by to FTS index.\n\nArgs:\ndataset (orm.Dataset): dataset to convert.\n\nReturns:\ndict with structure matches to BaseDatasetIndex._schema.", "source": "juraj-google-style"}
{"code": "def scan_directory(self, dirname, exclude_exts=(), exclude_fnames=()):\n        \n        for i, ext in enumerate(exclude_exts):\n            if not ext.strip().startswith(\".\"):\n                exclude_exts[i] =  \".\" + ext.strip()\n\n        \n        paths = []\n        for fname in os.listdir(dirname):\n            root, ext = os.path.splitext(fname)\n            path = os.path.join(dirname, fname)\n            if (ext in exclude_exts or fname in exclude_fnames or\n                fname.startswith(\".\") or not os.path.isfile(path)): continue\n            paths.append(path)\n\n        pseudos = []\n        for path in paths:\n            \n            try:\n                pseudo = self.parse(path)\n            except:\n                pseudo = None\n\n            if pseudo is not None:\n                pseudos.append(pseudo)\n                self._parsed_paths.extend(path)\n            else:\n                self._wrong_paths.extend(path)\n\n        return pseudos", "docstring": "Analyze the files contained in directory dirname.\n\nArgs:\ndirname: directory path\nexclude_exts: list of file extensions that should be skipped.\nexclude_fnames: list of file names that should be skipped.\n\nReturns:\nList of pseudopotential objects.", "source": "juraj-google-style"}
{"code": "def as_vartype(vartype):\n    \n    if isinstance(vartype, Vartype):\n        return vartype\n\n    try:\n        if isinstance(vartype, str):\n            vartype = Vartype[vartype]\n        elif isinstance(vartype, frozenset):\n            vartype = Vartype(vartype)\n        else:\n            vartype = Vartype(frozenset(vartype))\n\n    except (ValueError, KeyError):\n        raise TypeError((\"expected input vartype to be one of: \"\n                         \"Vartype.SPIN, 'SPIN', {-1, 1}, \"\n                         \"Vartype.BINARY, 'BINARY', or {0, 1}.\"))\n\n    return vartype", "docstring": "Cast various inputs to a valid vartype object.\n\nArgs:\nvartype (:class:`.Vartype`/str/set):\nVariable type. Accepted input values:\n\n* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``\n* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``\n\nReturns:\n:class:`.Vartype`: Either :class:`.Vartype.SPIN` or\n:class:`.Vartype.BINARY`.\n\nSee also:\n:func:`~dimod.decorators.vartype_argument`", "source": "juraj-google-style"}
{"code": "def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:\n    if not os.path.isdir(save_directory):\n        logger.error(f'Vocabulary path ({save_directory}) should be a directory')\n        return\n    out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab'])\n    with open(out_vocab_file, 'w') as file:\n        file.write(json.dumps(self.encoder))\n    return (out_vocab_file,)", "docstring": "Saves the tokenizer's vocabulary dictionary to the provided save_directory.\n\nArgs:\nsave_directory (`str`):\nA path to the directory where to saved. It will be created if it doesn't exist.\nfilename_prefix (`Optional[str]`, *optional*):\nA prefix to add to the names of the files saved by the tokenizer.", "source": "github-repos"}
{"code": "def _as_variant_tensor(self):\n    raise NotImplementedError(f'{type(self)}.as_variant_tensor()')", "docstring": "Creates a scalar `tf.Tensor` of `tf.variant` representing this dataset.\n\nReturns:\nA scalar `tf.Tensor` of `tf.variant` type, which represents this dataset.", "source": "github-repos"}
{"code": "def prepare_image_question_encoder(image_feat, question, hparams):\n  \n\n  encoder_input = tf.concat([image_feat, question], axis=1)\n  encoder_padding = common_attention.embedding_to_padding(encoder_input)\n  ignore_padding = common_attention.attention_bias_ignore_padding(\n      encoder_padding)\n  encoder_self_attention_bias = ignore_padding\n  encoder_decoder_attention_bias = ignore_padding\n  \n  if hparams.pos == \"timing\":\n    question = common_attention.add_timing_signal_1d(question)\n  elif hparams.pos == \"emb\":\n    question = common_attention.add_positional_embedding(\n        question, hparams.max_length, \"inputs_positional_embedding\",\n        None)\n  encoder_input = tf.concat([image_feat, question], axis=1)\n\n  return (encoder_input, encoder_self_attention_bias,\n          encoder_decoder_attention_bias)", "docstring": "Prepare encoder.\n\nArgs:\nimage_feat: a Tensor.\nquestion: a Tensor.\nhparams: run hyperparameters\n\nReturns:\nencoder_input: a Tensor, bottom of encoder stack\nencoder_self_attention_bias: a bias tensor for use in encoder self-attention", "source": "juraj-google-style"}
{"code": "def load_region(adapter, case_id, hgnc_id=None, chrom=None, start=None, end=None):\n    \n    if hgnc_id:\n        gene_obj = adapter.hgnc_gene(hgnc_id)\n        if not gene_obj:\n            ValueError(\"Gene {} does not exist in database\".format(hgnc_id))\n        chrom = gene_obj['chromosome']\n        start = gene_obj['start']\n        end = gene_obj['end']\n\n    case_obj = adapter.case(case_id=case_id)\n    if not case_obj:\n        raise ValueError(\"Case {} does not exist in database\".format(case_id))\n\n    log.info(\"Load clinical SNV variants for case: {0} region: chr {1}, start\"\n             \" {2}, end {3}\".format(case_obj['_id'], chrom, start, end))\n\n    adapter.load_variants(case_obj=case_obj, variant_type='clinical',\n                          category='snv', chrom=chrom, start=start, end=end)\n\n    vcf_sv_file = case_obj['vcf_files'].get('vcf_sv')\n    if vcf_sv_file:\n        log.info(\"Load clinical SV variants for case: {0} region: chr {1}, \"\n                 \"start {2}, end {3}\".format(case_obj['_id'], chrom, start, end))\n        adapter.load_variants(case_obj=case_obj, variant_type='clinical',\n                              category='sv', chrom=chrom, start=start, end=end)\n\n    vcf_str_file = case_obj['vcf_files'].get('vcf_str')\n    if vcf_str_file: \n        log.info(\"Load clinical STR variants for case: {0} region: chr {1}, \"\n                 \"start {2}, end {3}\".format(case_obj['_id'], chrom, start, end))\n        adapter.load_variants(case_obj=case_obj, variant_type='clinical',\n                              category='str', chrom=chrom, start=start, end=end)\n\n    if case_obj['is_research']:\n        log.info(\"Load research SNV variants for case: {0} region: chr {1}, \"\n                 \"start {2}, end {3}\".format(case_obj['_id'], chrom, start, end))\n        adapter.load_variants(case_obj=case_obj, variant_type='research',\n                              category='snv', chrom=chrom, start=start, end=end)\n\n        vcf_sv_research = case_obj['vcf_files'].get('vcf_sv_research')\n        if vcf_sv_research:\n            log.info(\"Load research SV variants for case: {0} region: chr {1},\"\n                     \" start {2}, end {3}\".format(case_obj['_id'], chrom, start, end))\n            adapter.load_variants(case_obj=case_obj, variant_type='research',\n                                  category='sv', chrom=chrom, start=start, end=end)", "docstring": "Load all variants in a region defined by a HGNC id\n\nArgs:\nadapter (MongoAdapter)\ncase_id (str): Case id\nhgnc_id (int): If all variants from a gene should be uploaded\nchrom (str): If variants from coordinates should be uploaded\nstart (int): Start position for region\nend (int): Stop position for region", "source": "juraj-google-style"}
{"code": "def _StopAnalysisProcesses(self, abort=False):\n    \n    logger.debug('Stopping analysis processes.')\n    self._StopMonitoringProcesses()\n\n    \n    \n    \n\n    if abort:\n      \n      self._AbortTerminate()\n\n    if not self._use_zeromq:\n      logger.debug('Emptying queues.')\n      for event_queue in self._event_queues.values():\n        event_queue.Empty()\n\n    \n    \n    for event_queue in self._event_queues.values():\n      event_queue.PushItem(plaso_queue.QueueAbort(), block=False)\n\n    \n    self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)\n    for event_queue in self._event_queues.values():\n      event_queue.Close(abort=abort)\n\n    if abort:\n      \n      self._AbortKill()\n    else:\n      \n      self._AbortTerminate()\n      self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)\n\n      for event_queue in self._event_queues.values():\n        event_queue.Close(abort=True)", "docstring": "Stops the analysis processes.\n\nArgs:\nabort (bool): True to indicated the stop is issued on abort.", "source": "juraj-google-style"}
{"code": "def get(self, uri):\n        \n        uri = self.URI + uri\n        return self._client.get(uri)", "docstring": "Gets an index resource by URI.\n\nArgs:\nuri: The resource URI.\n\nReturns:\ndict: The index resource.", "source": "juraj-google-style"}
{"code": "def __remove_alias(type_):\n    \n    if isinstance(type_, cpptypes.declarated_t) and \\\n            isinstance(type_.declaration, typedef.typedef_t):\n        return __remove_alias(type_.declaration.decl_type)\n    if isinstance(type_, cpptypes.compound_t):\n        type_.base = __remove_alias(type_.base)\n        return type_\n    return type_", "docstring": "Implementation detail.\n\nArgs:\ntype_ (type_t): type\n\nReturns:\ntype_t: the type associated to the inputted type", "source": "juraj-google-style"}
{"code": "def get_fieldset_index(fieldsets, index_or_name):\n    \n    if isinstance(index_or_name, six.integer_types):\n        return index_or_name\n\n    for key, value in enumerate(fieldsets):\n        if value[0] == index_or_name:\n            return key\n\n    raise KeyError(\"Key not found: '{}'.\".format(index_or_name))", "docstring": "Return the index of a fieldset in the ``fieldsets`` list.\n\nArgs:\nfieldsets (list): The original ``fieldsets`` list.\nindex_or_name (int or str): The value of the reference element, or directly its numeric index.\n\nReturns:\n(int) The index of the fieldset in the ``fieldsets`` list.", "source": "juraj-google-style"}
{"code": "def PushEvent(self, event):\n    \n    event_string = event.GetAttributeValuesString()\n    heap_values = (event.timestamp, event.timestamp_desc, event_string, event)\n    heapq.heappush(self._heap, heap_values)", "docstring": "Pushes an event onto the heap.\n\nArgs:\nevent (EventObject): event.", "source": "juraj-google-style"}
{"code": "def execute(self, sensor_graph, scope_stack):\n        \n\n        parent = scope_stack[-1]\n        alloc = parent.allocator\n\n        \n        output = alloc.allocate_stream(DataStream.UnbufferedType, attach=True)\n\n        trigger_stream, trigger_cond = parent.trigger_chain()\n        streamer_const = alloc.allocate_stream(DataStream.ConstantType, attach=True)\n\n        sensor_graph.add_node(u\"({} {} && {} always) => {} using trigger_streamer\".format(trigger_stream, trigger_cond, streamer_const, output))\n        sensor_graph.add_constant(streamer_const, self.index)", "docstring": "Execute this statement on the sensor_graph given the current scope tree.\n\nThis adds a single node to the sensor graph with the trigger_streamer function\nas is processing function.\n\nArgs:\nsensor_graph (SensorGraph): The sensor graph that we are building or\nmodifying\nscope_stack (list(Scope)): A stack of nested scopes that may influence\nhow this statement allocates clocks or other stream resources.", "source": "juraj-google-style"}
{"code": "def params_size(num_components, component_params_size, name=None):\n    with tf.compat.v1.name_scope(name, 'MixtureSameFamily_params_size', [num_components, component_params_size]):\n        num_components = tf.convert_to_tensor(value=num_components, name='num_components', dtype_hint=tf.int32)\n        component_params_size = tf.convert_to_tensor(value=component_params_size, name='component_params_size')\n        num_components = dist_util.prefer_static_value(num_components)\n        component_params_size = dist_util.prefer_static_value(component_params_size)\n        return (num_components + (num_components * component_params_size))", "docstring": "Number of `params` needed to create a `MixtureSameFamily` distribution.\n\nArguments:\nnum_components: Number of component distributions in the mixture\ndistribution.\ncomponent_params_size: Number of parameters needed to create a single\ncomponent distribution.\nname: The name to use for the op to compute the number of parameters\n(if such an op needs to be created).\n\nReturns:\nparams_size: The number of parameters needed to create the mixture\ndistribution.", "source": "codesearchnet"}
{"code": "def arctan(self: EventSetOrNode) -> EventSetOrNode:\n    from temporian.core.operators.unary import arctan\n    return arctan(self)", "docstring": "Calculates the inverse tangent of an [`EventSet`][temporian.EventSet]'s features.\n\nCan only be used on floating point features.\n\nExample:\n```python\n>>> a = tp.event_set(\n...     timestamps=[1, 2, 3, 4],\n...     features={\"M\": [0, 1.0, -1.0, 5.0]},\n... )\n>>> a.arctan()\nindexes: ...\ntimestamps: [1. 2. 3. 4.]\n'M': [ 0.      0.7854 -0.7854  1.3734]\n...\n\n```\n\nReturns:\nEventSetOrNode with inverse tangent of input features.", "source": "github-repos"}
{"code": "def update_email_asset(self, asset_id, name, asset_type):\n        \n        self.update_asset('EMAIL', asset_id, name, asset_type)", "docstring": "Updates a Email Asset\nArgs:\nname: The name provided to the email asset\nasset_type: The type provided to the email asset\nasset_id:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def add_handler(self, handler):\n        \n        handler['logger'] = self._get_logger(handler)\n        handler['reads'] = 0\n        handler['data_read'] = 0\n\n        self.capture_handlers.append(handler)", "docstring": "Add an additional handler\n\nArgs:\nhandler:\nA dictionary of handler configuration for the handler\nthat should be added. See :func:`__init__` for details\non valid parameters.", "source": "juraj-google-style"}
{"code": "def _RunIpRoute(self, args=None, options=None):\n    \n    args = args or []\n    options = options or {}\n    command = ['ip', 'route']\n    command.extend(args)\n    for item in options.items():\n      command.extend(item)\n    try:\n      process = subprocess.Popen(\n          command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n      stdout, stderr = process.communicate()\n    except OSError as e:\n      self.logger.warning('Exception running %s. %s.', command, str(e))\n    else:\n      if process.returncode:\n        message = 'Non-zero exit status running %s. %s.'\n        self.logger.warning(message, command, stderr.strip())\n      else:\n        return stdout.decode('utf-8', 'replace')\n    return ''", "docstring": "Run a command with ip route and return the response.\n\nArgs:\nargs: list, the string ip route command args to execute.\noptions: dict, the string parameters to append to the ip route command.\n\nReturns:\nstring, the standard output from the ip route command execution.", "source": "juraj-google-style"}
{"code": "def backward_propagation(parameters, cache, X, Y):\n    \n    m = X.shape[1]\n\n    \n    W1 = parameters[\"W1\"]\n    W2 = parameters[\"W2\"]\n\n    \n    A1 = cache[\"A1\"]\n    A2 = cache[\"A2\"]\n\n    \n    dZ2 = A2 - Y\n    dW2 = 1.0 / m * np.dot(dZ2, A1.T)\n    db2 = 1.0 / m * np.sum(dZ2, axis=1, keepdims=True)\n    dZ1 = W2.T * dZ2 * (1 - np.power(A1, 2))\n    dW1 = 1.0 / m * np.dot(dZ1, X.T)\n    db1 = 1.0 / m * np.sum(dZ1, axis=1, keepdims=True)\n\n    grads = {\"dW1\": dW1,\n             \"db1\": db1,\n             \"dW2\": dW2,\n             \"db2\": db2}\n\n    return grads", "docstring": "Implement the backward propagation using the instructions above.\n\nArguments:\nparameters -- python dictionary containing our parameters\ncache -- a dictionary containing \"Z1\", \"A1\", \"Z2\" and \"A2\".\nX -- input data of shape (2, number of examples)\nY -- \"true\" labels vector of shape (1, number of examples)\n\nReturns:\ngrads -- python dictionary containing your gradients with respect to different parameters", "source": "juraj-google-style"}
{"code": "def cmd_list(options):\n    \n    (i_info, param_str) = gather_data(options)\n    if i_info:\n        awsc.get_all_aminames(i_info)\n        param_str = \"Instance List - \" + param_str + \"\\n\"\n        list_instances(i_info, param_str)\n    else:\n        print(\"No instances found with parameters: {}\".format(param_str))", "docstring": "Gather data for instances matching args and call display func.\n\nArgs:\noptions (object): contains args and data from parser.", "source": "juraj-google-style"}
{"code": "def numbafy(fn, args, compiler='jit', **nbkws):\n    kwargs = {}\n    if (not isinstance(args, (tuple, list))):\n        args = (args,)\n    if isinstance(compiler, six.string_types):\n        compiler_ = getattr(nb, compiler, None)\n        if (compiler is None):\n            raise AttributeError('No numba function with name {}.'.format(compiler))\n        compiler = compiler_\n    if (compiler in (nb.jit, nb.njit, nb.autojit)):\n        kwargs.update(jitkwargs)\n        sig = nbkws.pop('signature', None)\n    else:\n        kwargs.update(veckwargs)\n        sig = nbkws.pop('signatures', None)\n        if (sig is None):\n            warn(\"Vectorization without 'signatures' can lead to wrong results!\")\n    kwargs.update(nbkws)\n    if isinstance(fn, sy.Expr):\n        fn = sy.expand_func(fn)\n    func = sy.lambdify(args, fn, modules='numpy')\n    if (sig is None):\n        try:\n            func = compiler(**kwargs)(func)\n        except RuntimeError:\n            kwargs['cache'] = False\n            func = compiler(**kwargs)(func)\n    else:\n        try:\n            func = compiler(sig, **kwargs)(func)\n        except RuntimeError:\n            kwargs['cache'] = False\n            func = compiler(sig, **kwargs)(func)\n    return func", "docstring": "Compile a string, sympy expression or symengine expression using numba.\n\nNot all functions are supported by Python's numerical package (numpy). For\ndifficult cases, valid Python code (as string) may be more suitable than\nsymbolic expressions coming from sympy, symengine, etc. When compiling\nvectorized functions, include valid signatures (see `numba`_ documentation).\n\nArgs:\nfn: Symbolic expression as sympy/symengine expression or string\nargs (iterable): Symbolic arguments\ncompiler: String name or callable numba compiler\nnbkws: Compiler keyword arguments (if none provided, smart defaults are used)\n\nReturns:\nfunc: Compiled function\n\nWarning:\nFor vectorized functions, valid signatures are (almost always) required.", "source": "codesearchnet"}
{"code": "def _apply_base_theme(app):\n    \n\n    if QT_VERSION < (5,):\n        app.setStyle('plastique')\n    else:\n        app.setStyle('Fusion')\n\n    with open(_STYLESHEET) as stylesheet:\n        app.setStyleSheet(stylesheet.read())", "docstring": "Apply base theme to the application.\n\nArgs:\napp (QApplication): QApplication instance.", "source": "juraj-google-style"}
{"code": "def switch(condition, then_expression, else_expression):\n    if condition.dtype != dtypes_module.bool:\n        condition = math_ops.cast(condition, 'bool')\n    cond_ndim = ndim(condition)\n    if not cond_ndim:\n        if not callable(then_expression):\n\n            def then_expression_fn():\n                return then_expression\n        else:\n            then_expression_fn = then_expression\n        if not callable(else_expression):\n\n            def else_expression_fn():\n                return else_expression\n        else:\n            else_expression_fn = else_expression\n        x = cond.cond(condition, then_expression_fn, else_expression_fn)\n    else:\n        if callable(then_expression):\n            then_expression = then_expression()\n        if callable(else_expression):\n            else_expression = else_expression()\n        expr_ndim = ndim(then_expression)\n        if cond_ndim > expr_ndim:\n            raise ValueError('Rank of `condition` should be less than or equal to rank of `then_expression` and `else_expression`. ndim(condition)=' + str(cond_ndim) + ', ndim(then_expression)=' + str(expr_ndim))\n        if cond_ndim > 1:\n            ndim_diff = expr_ndim - cond_ndim\n            cond_shape = array_ops.concat([array_ops.shape(condition), [1] * ndim_diff], axis=0)\n            condition = array_ops.reshape(condition, cond_shape)\n            expr_shape = array_ops.shape(then_expression)\n            shape_diff = expr_shape - cond_shape\n            tile_shape = array_ops.where_v2(shape_diff > 0, expr_shape, array_ops.ones_like(expr_shape))\n            condition = array_ops.tile(condition, tile_shape)\n        x = array_ops.where_v2(condition, then_expression, else_expression)\n    return x", "docstring": "Switches between two operations depending on a scalar value.\n\nNote that both `then_expression` and `else_expression`\nshould be symbolic tensors of the *same shape*.\n\nArgs:\ncondition: tensor (`int` or `bool`).\nthen_expression: either a tensor, or a callable that returns a tensor.\nelse_expression: either a tensor, or a callable that returns a tensor.\n\nReturns:\nThe selected tensor.\n\nRaises:\nValueError: If rank of `condition` is greater than rank of expressions.", "source": "github-repos"}
{"code": "def put_async(self, path, value):\n        \n        request = Put(self._get_next_id(), path, value)\n        request.set_callback(self._q.put)\n        future = self._dispatch_request(request)\n        return future", "docstring": "Puts a value to a path and returns immediately\n\nArgs:\npath (list): The path to put to\nvalue (object): The value to set\n\nReturns:\nFuture: A single Future which will resolve to the result", "source": "juraj-google-style"}
{"code": "def show_tricky_tasks(self, verbose=0):\n        \n        nids, tasks = [], []\n        for task in self.iflat_tasks():\n            if task.num_launches > 1 or any(n > 0 for n in (task.num_restarts, task.num_corrections)):\n                nids.append(task.node_id)\n                tasks.append(task)\n\n        if not nids:\n            cprint(\"Everything's fine, no tricky tasks found\", color=\"green\")\n        else:\n            self.show_status(nids=nids)\n            if not verbose:\n                print(\"Use --verbose to print task history.\")\n                return\n\n            for nid, task in zip(nids, tasks):\n                cprint(repr(task), **task.status.color_opts)\n                self.show_history(nids=[nid], full_history=False, metadata=False)\n                \n                \n                if task.num_corrections:\n                    self.show_corrections(nids=[nid])", "docstring": "Print list of tricky tasks i.e. tasks that have been restarted or\nlaunched more than once or tasks with corrections.\n\nArgs:\nverbose: Verbosity level. If > 0, task history and corrections (if any) are printed.", "source": "juraj-google-style"}
{"code": "def add_args(self, args):\n        \n        for key, value in vars(args).items():\n            if value is not None:\n                setattr(self, key.upper(), value)", "docstring": "Add the args\n\nArgs:\nargs (namespace): The commandline args", "source": "juraj-google-style"}
{"code": "def getRow(self, key):\n    return Row(self._impl.getRow(Tuple(key)._impl))", "docstring": "Get a row by value of the indexing columns. If the index is not\nspecified, gets the only row of a dataframe with no indexing columns.\n\nArgs:\nkey: Tuple representing the index of the desired row.\n\nReturns:\nThe row.", "source": "codesearchnet"}
{"code": "def get_inlined_extension_url(field: descriptor.FieldDescriptor) -> str:\n    options = annotation_utils.get_options(field)\n    if options.HasExtension(annotations_pb2.fhir_inlined_extension_url):\n        return options.Extensions[annotations_pb2.fhir_inlined_extension_url]\n    return field.camelcase_name", "docstring": "Returns the FHIR inlined extension URL for a field.\n\nArgs:\nfield: The FieldDescriptor to examine.\n\nReturns:\nThe FHIR inlined extension URL, if one exists, otherwise returns the camel-\ncase name of the FieldDescriptor.", "source": "github-repos"}
{"code": "def __init__(self, binary_line_reader, delimiter):\n    \n    super(BinaryDSVReader, self).__init__()\n    self._line_reader = binary_line_reader\n    self._delimiter = delimiter", "docstring": "Initializes the delimited separated values reader.\n\nArgs:\nbinary_line_reader (BinaryLineReader): a binary file reader\ndelimiter (bytes): field delimiter.", "source": "juraj-google-style"}
{"code": "def query(self, rank):\n    self._flush()\n    current = self._head\n    if (not current):\n        return 0\n    mid_rank = math.floor((rank * self._observations))\n    max_rank = (mid_rank + math.floor((self._invariant(mid_rank, self._observations) / 2)))\n    rank = 0.0\n    while current._successor:\n        rank += current._rank\n        if (((rank + current._successor._rank) + current._successor._delta) > max_rank):\n            return current._value\n        current = current._successor\n    return current._value", "docstring": "Retrieves the value estimate for the requested quantile rank.\n\nThe requested quantile rank must be registered in the estimator's\ninvariants a priori!\n\nArgs:\nrank: A floating point quantile rank along the interval [0, 1].\n\nReturns:\nA numeric value for the quantile estimate.", "source": "codesearchnet"}
{"code": "def StreamMedia(self, callback=None, finish_callback=None, additional_headers=None):\n    return self.__StreamMedia(callback=callback, finish_callback=finish_callback, additional_headers=additional_headers, use_chunks=False)", "docstring": "Send this resumable upload in a single request.\n\nArgs:\ncallback: Progress callback function with inputs\n(http_wrapper.Response, transfer.Upload)\nfinish_callback: Final callback function with inputs\n(http_wrapper.Response, transfer.Upload)\nadditional_headers: Dict of headers to include with the upload\nhttp_wrapper.Request.\n\nReturns:\nhttp_wrapper.Response of final response.", "source": "codesearchnet"}
{"code": "def contains(self, sub):\n    sub = sub.lower()\n    found_words = set()\n    res = cgaddag.gdg_contains(self.gdg, sub.encode(encoding='ascii'))\n    tmp = res\n    while tmp:\n        word = tmp.contents.str.decode('ascii')\n        found_words.add(word)\n        tmp = tmp.contents.next\n    cgaddag.gdg_destroy_result(res)\n    return list(found_words)", "docstring": "Find all words containing a substring.\n\nArgs:\nsub: A substring to be searched for.\n\nReturns:\nA list of all words found.", "source": "codesearchnet"}
{"code": "def _TypeCompatibilityCheck(self, type_params):\n    type_params = {t for t in type_params if not isinstance(t, pytd.AnythingType)}\n    if not all((isinstance(t, pytd.ClassType) for t in type_params)):\n        return False\n    mro_list = [set(mro.GetBasesInMRO(t.cls)) for t in type_params]\n    mro_list.sort(key=len)\n    prev = set()\n    for cur in mro_list:\n        if not cur.issuperset(prev):\n            return False\n        prev = cur\n    return True", "docstring": "Check if the types are compatible.\n\nIt is used to handle the case:\nclass A(Sequence[A]): pass\nclass B(A, Sequence[B]): pass\nclass C(B, Sequence[C]): pass\nIn class `C`, the type parameter `_T` of Sequence could be `A`, `B` or `C`.\nNext we will check they have a linear inheritance relationship:\n`A` -> `B` -> `C`.\n\nArgs:\ntype_params: The class type params.\n\nReturns:\nTrue if all the types are compatible.", "source": "github-repos"}
{"code": "def __init__(self, options={}):\n        \n        settings = {\n            'currency': {\n                'symbol': \"$\",\n                'format': \"%s%v\",\n                'decimal': \".\",\n                'thousand': \",\",\n                'precision': 2,\n                'grouping': 3\n            },\n            'number': {\n                'precision': 0,\n                'grouping': 3,\n                'thousand': \",\",\n                'decimal': \".\"\n            }\n        }\n        if options:\n            settings.update(options)\n\n        self.settings = settings", "docstring": "Summary.\n\nArgs:\noptions (dict, optional): settings configuration object.", "source": "juraj-google-style"}
{"code": "def set_inheritance(obj_name, enabled, obj_type='file', clear=False):\n    if (obj_type not in ['file', 'registry', 'registry32']):\n        raise SaltInvocationError('obj_type called with incorrect parameter: {0}'.format(obj_name))\n    if clear:\n        obj_dacl = dacl(obj_type=obj_type)\n    else:\n        obj_dacl = dacl(obj_name, obj_type)\n    return obj_dacl.save(obj_name, (not enabled))", "docstring": "Enable or disable an objects inheritance.\n\nArgs:\n\nobj_name (str):\nThe name of the object\n\nenabled (bool):\nTrue to enable inheritance, False to disable\n\nobj_type (Optional[str]):\nThe type of object. Only three objects allow inheritance. Valid\nobjects are:\n\n- file (default): This is a file or directory\n- registry\n- registry32 (for WOW64)\n\nclear (Optional[bool]):\nTrue to clear existing ACEs, False to keep existing ACEs.\nDefault is False\n\nReturns:\nbool: True if successful, otherwise an Error\n\nUsage:\n\n.. code-block:: python\n\nsalt.utils.win_dacl.set_inheritance('C:\\\\Temp', False)", "source": "codesearchnet"}
{"code": "def is_int(string):\n    \n    try:\n        a = float(string)\n        b = int(a)\n    except ValueError:\n        return False\n    else:\n        return a == b", "docstring": "Checks if a string is an integer. If the string value is an integer\nreturn True, otherwise return False.\n\nArgs:\nstring: a string to test.\n\nReturns:\nboolean", "source": "juraj-google-style"}
{"code": "def sign(mv):\n    md5 = hashlib.md5()\n    update_hash(md5, mv)\n    return md5.digest()", "docstring": "Obtains a signature for a `MetricValue`\n\nArgs:\nmv (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricValue`): a\nMetricValue that's part of an operation\n\nReturns:\nstring: a unique signature for that operation", "source": "codesearchnet"}
{"code": "def __rmfile(path):\n    \n    logger.info(\"rmfile: %s\" % path)\n    try:\n        os.remove(path)\n        return True\n    except Exception as e:\n        logger.error(\"rmfile: %s failed! Error: %s\" % (path, e))\n        return False", "docstring": "Delete a file.\n\nArgs:\npath (str): Path to the file that needs to be deleted.\n\nReturns:\nbool: True if the operation is successful, False otherwise.", "source": "juraj-google-style"}
{"code": "def get_messages(module):\n    answer = collections.OrderedDict()\n    for name in dir(module):\n        candidate = getattr(module, name)\n        if (inspect.isclass(candidate) and issubclass(candidate, message.Message)):\n            answer[name] = candidate\n    return answer", "docstring": "Discovers all protobuf Message classes in a given import module.\n\nArgs:\nmodule (module): A Python module; :func:`dir` will be run against this\nmodule to find Message subclasses.\n\nReturns:\ndict[str, google.protobuf.message.Message]: A dictionary with the\nMessage class names as keys, and the Message subclasses themselves\nas values.", "source": "codesearchnet"}
{"code": "def deserialize_function(serial, function_type):\n    if (function_type == 'function'):\n        function = tf.keras.utils.deserialize_keras_object(serial)\n    elif (function_type == 'lambda'):\n        function = generic_utils.func_load(serial)\n    else:\n        raise TypeError('Unknown function type:', function_type)\n    return function", "docstring": "Deserializes the Keras-serialized function.\n\n(De)serializing Python functions from/to bytecode is unsafe. Therefore we\nalso use the function's type as an anonymous function ('lambda') or named\nfunction in the Python environment ('function'). In the latter case, this lets\nus use the Python scope to obtain the function rather than reload it from\nbytecode. (Note that both cases are brittle!)\n\nKeras-deserialized functions do not perform lexical scoping. Any modules that\nthe function requires must be imported within the function itself.\n\nThis serialization mimicks the implementation in `tf.keras.layers.Lambda`.\n\nArgs:\nserial: Serialized Keras object: typically a dict, string, or bytecode.\nfunction_type: Python string denoting 'function' or 'lambda'.\n\nReturns:\nfunction: Function the serialized Keras object represents.\n\n#### Examples\n\n```python\nserial, function_type = serialize_function(lambda x: x)\nfunction = deserialize_function(serial, function_type)\nassert function(2.3) == 2.3  # function is identity\n```", "source": "codesearchnet"}
{"code": "def UpdateNumberOfEventSources(self, number_of_consumed_sources, number_of_produced_sources):\n    consumed_sources_delta = 0\n    if (number_of_consumed_sources is not None):\n        if (number_of_consumed_sources < self.number_of_consumed_sources):\n            raise ValueError('Number of consumed sources smaller than previous update.')\n        consumed_sources_delta = (number_of_consumed_sources - self.number_of_consumed_sources)\n        self.number_of_consumed_sources = number_of_consumed_sources\n        self.number_of_consumed_sources_delta = consumed_sources_delta\n    produced_sources_delta = 0\n    if (number_of_produced_sources is not None):\n        if (number_of_produced_sources < self.number_of_produced_sources):\n            raise ValueError('Number of produced sources smaller than previous update.')\n        produced_sources_delta = (number_of_produced_sources - self.number_of_produced_sources)\n        self.number_of_produced_sources = number_of_produced_sources\n        self.number_of_produced_sources_delta = produced_sources_delta\n    return ((consumed_sources_delta > 0) or (produced_sources_delta > 0))", "docstring": "Updates the number of event sources.\n\nArgs:\nnumber_of_consumed_sources (int): total number of event sources consumed\nby the process.\nnumber_of_produced_sources (int): total number of event sources produced\nby the process.\n\nReturns:\nbool: True if either number of event sources has increased.\n\nRaises:\nValueError: if the consumed or produced number of event sources is\nsmaller than the value of the previous update.", "source": "codesearchnet"}
{"code": "def conv(self, conv_input: core.Tensor) -> Mapping[str, core.Tensor]:\n    out = nn_ops.conv2d(conv_input, self.conv_filters, strides=[1, 1, 2, 1], dilations=[1, 1, 1, 1], padding='SAME', data_format='NHWC')\n    return {'output': out}", "docstring": "Performs a 2D convolution operation.\n\nArgs:\nconv_input: Input tensor to perform convolution on.\n\nReturns:\nA map of: output key -> output result.", "source": "github-repos"}
{"code": "def get_pmg_structure(phonopy_structure):\n    lattice = phonopy_structure.get_cell()\n    frac_coords = phonopy_structure.get_scaled_positions()\n    symbols = phonopy_structure.get_chemical_symbols()\n    masses = phonopy_structure.get_masses()\n    mms = phonopy_structure.get_magnetic_moments()\n    mms = (mms or ([0] * len(symbols)))\n    return Structure(lattice, symbols, frac_coords, site_properties={'phonopy_masses': masses, 'magnetic_moments': mms})", "docstring": "Convert a PhonopyAtoms object to pymatgen Structure object.\n\nArgs:\nphonopy_structure (PhonopyAtoms): A phonopy structure object.", "source": "codesearchnet"}
{"code": "def set_name(self, name):\n        \n        if not self._campfire.get_user().admin:\n            return False\n\n        result = self._connection.put(\"room/%s\" % self.id, {\"room\": {\"name\": name}})\n        if result[\"success\"]:\n            self._load()\n        return result[\"success\"]", "docstring": "Set the room name.\n\nArgs:\nname (str): Name\n\nReturns:\nbool. Success", "source": "juraj-google-style"}
{"code": "def __init__(self, application_namespace=None, application_data=None):\n        \n        super(ApplicationSpecificInformation, self).__init__(\n            Tags.APPLICATION_SPECIFIC_INFORMATION)\n\n        if application_namespace is None:\n            self.application_namespace = ApplicationNamespace()\n        else:\n            self.application_namespace = application_namespace\n\n        if application_data is None:\n            self.application_data = ApplicationData()\n        else:\n            self.application_data = application_data\n\n        self.validate()", "docstring": "Construct an ApplicationSpecificInformation object.\n\nArgs:\napplication_namespace (ApplicationNamespace): The name of a\nnamespace supported by the server. Optional, defaults to None.\napplication_data (ApplicationData): String data relevant to the\nspecified namespace. Optional, defaults to None.", "source": "juraj-google-style"}
{"code": "def get_config_bool_option(parser: ConfigParser, section: str, option: str, default: bool=None) -> bool:\n    if (not parser.has_section(section)):\n        raise ValueError(('config missing section: ' + section))\n    return parser.getboolean(section, option, fallback=default)", "docstring": "Retrieves a boolean value from a parser.\n\nArgs:\nparser: instance of :class:`ConfigParser`\nsection: section name within config file\noption: option (variable) name within that section\ndefault: value to return if option is absent\n\nReturns:\nstring value\n\nRaises:\nValueError: if the section is absent", "source": "codesearchnet"}
{"code": "def initial_value(self):\n    raise NotImplementedError", "docstring": "Returns the Tensor used as the initial value for the variable.\n\nNote that this is different from `initialized_value()` which runs\nthe op that initializes the variable before returning its value.\nThis method returns the tensor that is used by the op that initializes\nthe variable.\n\nReturns:\nA `Tensor`.", "source": "github-repos"}
{"code": "def kill_pid(self, pid):\n        \n        try:\n\n            p = psutil.Process(pid)\n\n            p.terminate()\n\n            self.info_log('Killed [pid:%s][name:%s]' % (p.pid, p.name()))\n        except psutil.NoSuchProcess:\n            self.error_log('No such process: [pid:%s]' % pid)", "docstring": "Kill process by pid\n\nArgs:\npid (int)", "source": "juraj-google-style"}
{"code": "def _generate_shape(word: str) -> str:\n        \n\n        def counting_stars(w) -> List[int]:\n            count = [1]\n            for i in range(1, len(w)):\n                if w[i - 1] == w[i]:\n                    count[-1] += 1\n                else:\n                    count.append(1)\n\n            return count\n\n        shape = \"\"\n        p = 0\n        for c in counting_stars(word):\n            if c > 4:\n                shape += word[p:p + 4]\n            else:\n                shape += word[p:p + c]\n            p = p + c\n\n        return shape", "docstring": "Recreate shape from a token input by user\nArgs:\nword: str\n\nReturns: str", "source": "juraj-google-style"}
{"code": "def qr(x, mode='reduced'):\n    if any_symbolic_tensors((x,)):\n        return Qr(mode=mode).symbolic_call(x)\n    x = backend.convert_to_tensor(x)\n    return backend.linalg.qr(x, mode=mode)", "docstring": "Computes the QR decomposition of a tensor.\n\nArgs:\nx: Input tensor of shape `(..., M, N)`.\nmode: A string specifying the mode of the QR decomposition.\n- 'reduced': Returns the reduced QR decomposition. (default)\n- 'complete': Returns the complete QR decomposition.\n\nReturns:\nA tuple containing two tensors. The first tensor of shape `(..., M, K)`\nis the orthogonal matrix `q` and the second tensor of shape\n`(..., K, N)` is the upper triangular matrix `r`, where `K = min(M, N)`.\n\nExample:\n\n>>> x = keras.ops.convert_to_tensor([[1., 2.], [3., 4.], [5., 6.]])\n>>> q, r = qr(x)\n>>> print(q)\narray([[-0.16903079  0.897085]\n[-0.5070925   0.2760267 ]\n[-0.8451542  -0.34503305]], shape=(3, 2), dtype=float32)", "source": "github-repos"}
{"code": "def set_expected_update_frequency(self, update_frequency):\n        \n        \n        try:\n            int(update_frequency)\n        except ValueError:\n            update_frequency = Dataset.transform_update_frequency(update_frequency)\n        if not update_frequency:\n            raise HDXError('Invalid update frequency supplied!')\n        self.data['data_update_frequency'] = update_frequency", "docstring": "Set expected update frequency\n\nArgs:\nupdate_frequency (str): Update frequency\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def compare_profiles(profile1, profile2):\n\n    \n\n    length = len(profile1)\n\n    profile1 = np.array(list(profile1))\n    profile2 = np.array(list(profile2))\n\n    similarity_array = profile1 == profile2\n\n    matches = np.sum(similarity_array)\n\n    similarity_ratio = matches/length\n\n    return similarity_ratio", "docstring": "Given two profiles, determine the ratio of similarity, i.e.\nthe hamming distance between the strings.\n\nArgs:\nprofile1/2 (str): profile string\nReturns:\nsimilarity_ratio (float): the ratio of similiarity (0-1)", "source": "juraj-google-style"}
{"code": "def plot(self, data):\n    \n\n    import IPython\n\n    if not isinstance(data, dict) or not all(isinstance(v, pd.DataFrame) for v in data.values()):\n      raise ValueError('Expect a dictionary where the values are all dataframes.')\n\n    gfsg = GenericFeatureStatisticsGenerator()\n    data = [{'name': k, 'table': self._remove_nonascii(v)} for k, v in six.iteritems(data)]\n    data_proto = gfsg.ProtoFromDataFrames(data)\n    protostr = base64.b64encode(data_proto.SerializeToString()).decode(\"utf-8\")\n    html_id = 'f' + datalab.utils.commands.Html.next_id()\n\n    HTML_TEMPLATE = \n    html = HTML_TEMPLATE.format(html_id=html_id, protostr=protostr)\n    return IPython.core.display.HTML(html)", "docstring": "Plots an overview in a list of dataframes\n\nArgs:\ndata: a dictionary with key the name, and value the dataframe.", "source": "juraj-google-style"}
{"code": "def fail_request(self, orig_request, message, start_response):\n    \n    cors_handler = self._create_cors_handler(orig_request)\n    return util.send_wsgi_error_response(\n        message, start_response, cors_handler=cors_handler)", "docstring": "Write an immediate failure response to outfile, no redirect.\n\nThis calls start_response and returns the error body.\n\nArgs:\norig_request: An ApiRequest, the original request from the user.\nmessage: A string containing the error message to be displayed to user.\nstart_response: A function with semantics defined in PEP-333.\n\nReturns:\nA string containing the body of the error response.", "source": "juraj-google-style"}
{"code": "def _decode_doubles(message):\n    binary = base64.b64decode(message)\n    return struct.unpack(('<' + ('d' * (len(binary)", "docstring": "Helper for decode_qp, decodes a double array.\n\nThe double array is stored as little endian 64 bit doubles.\nThe array has then been base64 encoded. Since we are decoding we do these\nsteps in reverse.\n\nArgs:\nmessage: the double array\n\nReturns:\ndecoded double array", "source": "codesearchnet"}
{"code": "def unitary(val: Any, default: TDefault=RaiseTypeErrorIfNotProvided) -> Union[(np.ndarray, TDefault)]:\n    from cirq import Gate, Operation\n    getter = getattr(val, '_unitary_', None)\n    result = (NotImplemented if (getter is None) else getter())\n    if (result is not NotImplemented):\n        return result\n    if isinstance(val, (Gate, Operation)):\n        decomposed_unitary = _decompose_and_get_unitary(val)\n        if (decomposed_unitary is not None):\n            return decomposed_unitary\n    if (default is not RaiseTypeErrorIfNotProvided):\n        return default\n    if (getter is None):\n        raise TypeError(\"object of type '{}' has no _unitary_ method.\".format(type(val)))\n    raise TypeError(\"object of type '{}' does have a _unitary_ method, but it returned NotImplemented.\".format(type(val)))", "docstring": "Returns a unitary matrix describing the given value.\n\nArgs:\nval: The value to describe with a unitary matrix.\ndefault: Determines the fallback behavior when `val` doesn't have\na unitary matrix. If `default` is not set, a TypeError is raised. If\ndefault is set to a value, that value is returned.\n\nReturns:\nIf `val` has a _unitary_ method and its result is not NotImplemented,\nthat result is returned. Otherwise, if `val` is a cirq.Gate or\ncirq.Operation, decomposition will be attempted and the resulting\nunitary is returned if unitaries exist for all operations of the\ndecompostion. If the result is still NotImplemented and a default value\nwas specified, the default value is returned.\n\nRaises:\nTypeError: `val` doesn't have a _unitary_ method (or that method\nreturned NotImplemented) and also no default value was specified.", "source": "codesearchnet"}
{"code": "class RunThresholdCriterion(beam.PTransform[beam.PCollection[NestedKeyedOutputT], beam.PCollection[NestedKeyedOutputT]]):\n\n    def __init__(self, threshold_criterion: ThresholdFn):\n        self._threshold_fn = threshold_criterion\n\n    def expand(self, input: beam.PCollection[NestedKeyedOutputT]) -> beam.PCollection[NestedKeyedOutputT]:\n        if self._threshold_fn.is_stateful:\n            return input | beam.ParDo(_StatefulThresholdDoFn(self._threshold_fn.to_spec()))\n        else:\n            return input | beam.ParDo(_StatelessThresholdDoFn(self._threshold_fn.to_spec()))", "docstring": "Applies a threshold criterion to anomaly detection results.\n\nThis PTransform applies a `ThresholdFn` to the anomaly scores in\n`AnomalyResult` objects, updating the prediction labels. It handles both\nstateful and stateless `ThresholdFn` implementations.\n\nArgs:\nthreshold_criterion: The `ThresholdFn` to apply.", "source": "github-repos"}
{"code": "def get_visualizations():\n    if (not hasattr(g, 'visualizations')):\n        g.visualizations = {}\n        for VisClass in _get_visualization_classes():\n            vis = VisClass(get_model())\n            g.visualizations[vis.__class__.__name__] = vis\n    return g.visualizations", "docstring": "Get the available visualizations from the request context.  Put the\nvisualizations in the request context if they are not yet there.\n\nReturns:\n:obj:`list` of instances of :class:`.BaseVisualization` or\nderived class", "source": "codesearchnet"}
{"code": "def from_text(cls, text, lexicon, required=None, first_only=True):\n    component = lexicon.get_component(text, first_only=first_only)\n    if (required and (required not in component)):\n        return None\n    else:\n        return cls(component)", "docstring": "Generate a Component from a text string, using a Lexicon.\n\nArgs:\ntext (str): The text string to parse.\nlexicon (Lexicon): The dictionary to use for the\ncategories and lexemes.\nrequired (str): An attribute that we must have. If a required\nattribute is missing from the component, then None is returned.\nfirst_only (bool): Whether to only take the first\nmatch of a lexeme against the text string.\n\nReturns:\nComponent: A Component object, or None if there was no\nmust-have field.", "source": "codesearchnet"}
{"code": "def validlocations(configuration=None):\n        \n        \n        if Locations._validlocations is None:\n            if configuration is None:\n                configuration = Configuration.read()\n            Locations._validlocations = configuration.call_remoteckan('group_list', {'all_fields': True})\n        return Locations._validlocations", "docstring": "Read valid locations from HDX\n\nArgs:\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nList[Dict]: A list of valid locations", "source": "juraj-google-style"}
{"code": "def handle_arguments(self, string, root, opening, closing):\n    args = string[(opening + 1):closing].replace(' ', '')\n    if ((opening > 0) or (not self.arguments.match(args))):\n        if (opening == 0):\n            raise errors.ParseError('Invalid argument sequence!')\n        (string, meta) = self.escape_meta(string, opening)\n        (string, meta) = self.escape_meta(string, meta.start())\n        return (string, root, meta)\n    if ('!' in args):\n        root.override = True\n        args = args.replace('!', '')\n    if ('+' in args):\n        root.increment = True\n        args = args.replace('+', '')\n    root.arguments = [int(i) for i in args.split(',') if i]\n    string = string[(closing + 1):]\n    meta = self.meta.search(string)\n    return (string, root, meta)", "docstring": "Handles phrase-arguments.\n\nSets the override and increment flags if found. Also makes\nsure that the argument sequence is at the start of the phrase\nand else warns about the unescaped meta characters. If the\narguments are indeed at the start but do not match the arguments\nregular expression, an error is raised.\n\nArguments:\nstring (str): The string being parsed.\nroot (str): The current root phrase.\nopening (int): The index of the opening paranthese.\nclosing (int): The index of the closing paranthese.\n\nReturns:\nThe (possibly escaped) string, the root phrase (if no escaping,\nthen with arguments and flags) and the next meta match.\n\nRaises:\nerrors.ParseError: If the arguments are invalid.", "source": "codesearchnet"}
{"code": "def retrieve_review(self, reviewer, product):\n        \n        if not isinstance(reviewer, self._reviewer_cls):\n            raise TypeError(\n                \"Type of given reviewer isn't acceptable:\", reviewer,\n                \", expected:\", self._reviewer_cls)\n        elif not isinstance(product, self._product_cls):\n            raise TypeError(\n                \"Type of given product isn't acceptable:\", product,\n                \", expected:\", self._product_cls)\n\n        try:\n            return self.graph[reviewer][product][\"review\"]\n        except TypeError:\n            raise KeyError(\n                \"{0} does not review {1}.\".format(reviewer, product))", "docstring": "Retrieve review that the given reviewer put the given product.\n\nArgs:\nreviewer: An instance of Reviewer.\nproduct: An instance of Product.\n\nReturns:\nA review object.\n\nRaises:\nTypeError: when given reviewer and product aren't instance of\nspecified reviewer and product class when this graph is constructed.\nKeyError: When the reviewer does not review the product.", "source": "juraj-google-style"}
{"code": "def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs):\n    vision_data = {}\n    if image_sizes is not None:\n        images_kwargs = Qwen2_5_VLProcessorKwargs._defaults.get('images_kwargs', {})\n        images_kwargs.update(kwargs)\n        merge_size = images_kwargs.get('merge_size', None) or self.image_processor.merge_size\n        num_image_patches = [self.image_processor.get_number_of_image_patches(*image_size, images_kwargs) for image_size in image_sizes]\n        num_image_tokens = [num_patches \n        vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})\n    if video_sizes is not None:\n        videos_kwargs = Qwen2_5_VLProcessorKwargs._defaults.get('videos_kwargs', {})\n        videos_kwargs.update(kwargs)\n        num_video_patches = [self.video_processor.get_number_of_video_patches(*video_size, videos_kwargs) for video_size in video_sizes]\n        num_video_tokens = [num_patches \n        vision_data['num_video_tokens'] = num_video_tokens\n    return MultiModalData(**vision_data)", "docstring": "Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.\nArgs:\nimage_sizes (`List[List[int]]`, *optional*):\nThe input sizes formatted as (height, width) per each image.\nvideo_sizes (`List[List[int]]`, *optional*):\nThe input sizes formatted as (num_frames, height, width) per each video.\nReturns:\n`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided\ninput modalities, along with other useful data.", "source": "github-repos"}
{"code": "def sackin(self, normalize='leaves'):\n    num_nodes_from_root = dict()\n    sackin = 0\n    num_leaves = 0\n    for node in self.traverse_preorder():\n        num_nodes_from_root[node] = 1\n        if (not node.is_root()):\n            num_nodes_from_root[node] += num_nodes_from_root[node.parent]\n        if node.is_leaf():\n            num_nodes_from_root[node] -= 1\n            sackin += num_nodes_from_root[node]\n            num_leaves += 1\n    if ((normalize is None) or (normalize is False)):\n        return sackin\n    elif (not isinstance(normalize, str)):\n        raise TypeError('normalize must be None or a string')\n    normalize = normalize.lower()\n    if (normalize == 'leaves'):\n        return (float(sackin) / num_leaves)\n    elif (normalize == 'yule'):\n        x = sum(((1.0 / i) for i in range(2, (num_leaves + 1))))\n        return ((sackin - ((2 * num_leaves) * x)) / num_leaves)\n    elif (normalize == 'pda'):\n        return (sackin / (num_leaves ** 1.5))\n    else:\n        raise RuntimeError(\"normalize must be None, 'leaves', 'yule', or 'pda'\")", "docstring": "Compute the Sackin balance index of this ``Tree``\n\nArgs:\n``normalize`` (``str``): How to normalize the Sackin index (if at all)\n\n* ``None`` to not normalize\n\n* ``\"leaves\"`` to normalize by the number of leaves\n\n* ``\"yule\"`` to normalize to the Yule model\n\n* ``\"pda\"`` to normalize to the Proportional to Distinguishable Arrangements model\n\nReturns:\n``float``: Sackin index (either normalized or not)", "source": "codesearchnet"}
{"code": "def Collect(self, knowledge_base, artifact_definition, searcher):\n    for source in artifact_definition.sources:\n        if (source.type_indicator not in (artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY, artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE)):\n            continue\n        if (source.type_indicator == artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY):\n            key_value_pairs = [{'key': key} for key in source.keys]\n        else:\n            key_value_pairs = source.key_value_pairs\n        for key_value_pair in key_value_pairs:\n            key_path = key_value_pair['key']\n            key_path_upper = key_path.upper()\n            if key_path_upper.startswith('%%CURRENT_CONTROL_SET%%'):\n                key_path = '{0:s}{1:s}'.format('HKEY_LOCAL_MACHINE\\\\System\\\\CurrentControlSet', key_path[23:])\n            find_spec = registry_searcher.FindSpec(key_path_glob=key_path)\n            for key_path in searcher.Find(find_specs=[find_spec]):\n                try:\n                    registry_key = searcher.GetKeyByPath(key_path)\n                except IOError as exception:\n                    raise errors.PreProcessFail('Unable to retrieve Windows Registry key: {0:s} with error: {1!s}'.format(key_path, exception))\n                if registry_key:\n                    value_name = key_value_pair.get('value', None)\n                    self._ParseKey(knowledge_base, registry_key, value_name)", "docstring": "Collects values using a Windows Registry value artifact definition.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nartifact_definition (artifacts.ArtifactDefinition): artifact definition.\nsearcher (dfwinreg.WinRegistrySearcher): Windows Registry searcher to\npreprocess the Windows Registry.\n\nRaises:\nPreProcessFail: if the Windows Registry key or value cannot be read.", "source": "codesearchnet"}
{"code": "def save_headers(cls, filename: str, response: HTTPResponse):\n        \n        new_filename = filename + '-new'\n\n        with open('wb') as new_file:\n            new_file.write(response.header())\n\n            with wpull.util.reset_file_offset(response.body):\n                response.body.seek(0)\n                shutil.copyfileobj(response.body, new_file)\n\n        os.remove(filename)\n        os.rename(new_filename, filename)", "docstring": "Prepend the HTTP response header to the file.\n\nArgs:\nfilename: The path of the file\nresponse: Response", "source": "juraj-google-style"}
{"code": "def insort_event_right(self, event, lo=0, hi=None):\n    if (lo < 0):\n        raise ValueError('lo must be non-negative')\n    if (hi is None):\n        hi = len(self.queue)\n    while (lo < hi):\n        mid = ((lo + hi) \n        if (event[0] < self.queue[mid][0]):\n            hi = mid\n        else:\n            lo = (mid + 1)\n    self.queue.insert(lo, event)", "docstring": "Insert event in queue, and keep it sorted assuming queue is sorted.\n\nIf event is already in queue, insert it to the right of the rightmost\nevent (to keep FIFO order).\n\nOptional args lo (default 0) and hi (default len(a)) bound the\nslice of a to be searched.\n\nArgs:\nevent: a (time in sec since unix epoch, callback, args, kwds) tuple.", "source": "codesearchnet"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(MACSignatureKeyInformation, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = BytearrayStream(input_stream.read(self.length))\n    if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):\n        self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)\n        self._unique_identifier.read(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Invalid struct missing the unique identifier attribute.')\n    if self.is_tag_next(enums.Tags.CRYPTOGRAPHIC_PARAMETERS, local_stream):\n        self._cryptographic_parameters = CryptographicParameters()\n        self._cryptographic_parameters.read(local_stream, kmip_version=kmip_version)\n    self.is_oversized(local_stream)", "docstring": "Read the data encoding the MACSignatureKeyInformation struct and\ndecode it into its constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def retrieve_artifacts(self, compose_data, output_data_config, job_name):\n    artifacts = os.path.join(self.container_root, 'artifacts')\n    compressed_artifacts = os.path.join(self.container_root, 'compressed_artifacts')\n    os.mkdir(artifacts)\n    model_artifacts = os.path.join(artifacts, 'model')\n    output_artifacts = os.path.join(artifacts, 'output')\n    artifact_dirs = [model_artifacts, output_artifacts, compressed_artifacts]\n    for d in artifact_dirs:\n        os.mkdir(d)\n    for host in self.hosts:\n        volumes = compose_data['services'][str(host)]['volumes']\n        for volume in volumes:\n            (host_dir, container_dir) = volume.split(':')\n            if (container_dir == '/opt/ml/model'):\n                sagemaker.local.utils.recursive_copy(host_dir, model_artifacts)\n            elif (container_dir == '/opt/ml/output'):\n                sagemaker.local.utils.recursive_copy(host_dir, output_artifacts)\n    model_files = [os.path.join(model_artifacts, name) for name in os.listdir(model_artifacts)]\n    output_files = [os.path.join(output_artifacts, name) for name in os.listdir(output_artifacts)]\n    sagemaker.utils.create_tar_file(model_files, os.path.join(compressed_artifacts, 'model.tar.gz'))\n    sagemaker.utils.create_tar_file(output_files, os.path.join(compressed_artifacts, 'output.tar.gz'))\n    if (output_data_config['S3OutputPath'] == ''):\n        output_data = ('file:\n    else:\n        output_data = sagemaker.local.utils.move_to_destination(compressed_artifacts, output_data_config['S3OutputPath'], job_name, self.sagemaker_session)\n    _delete_tree(model_artifacts)\n    _delete_tree(output_artifacts)\n    return os.path.join(output_data, 'model.tar.gz')", "docstring": "Get the model artifacts from all the container nodes.\n\nUsed after training completes to gather the data from all the individual containers. As the\nofficial SageMaker Training Service, it will override duplicate files if multiple containers have\nthe same file names.\n\nArgs:\ncompose_data(dict): Docker-Compose configuration in dictionary format.\n\nReturns: Local path to the collected model artifacts.", "source": "codesearchnet"}
{"code": "def victim_asset_associations(\n        self, main_type, sub_type, unique_id, branch_type, owner=None, params=None\n    ):\n        \n        params = params or {}\n\n        if owner:\n            params['owner'] = owner\n\n        if not sub_type:\n            url = '/v2/{}/{}/victimAssets/{}'.format(main_type, unique_id, branch_type)\n        else:\n            url = '/v2/{}/{}/{}/victimAssets/{}'.format(main_type, sub_type, unique_id, branch_type)\n\n        for vaa in self._iterate(url, params, 'victimAsset'):\n            yield vaa", "docstring": "Args:\nowner:\nmain_type:\nsub_type:\nunique_id:\nbranch_type:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def get_scan_plot(self, coords=None):\n        \n        from pymatgen.util.plotting import pretty_plot\n\n        plt = pretty_plot(12, 8)\n\n        d = self.read_scan()\n\n        if coords and coords in d[\"coords\"]:\n            x = d[\"coords\"][coords]\n            plt.xlabel(coords)\n        else:\n            x = range(len(d[\"energies\"]))\n            plt.xlabel(\"points\")\n\n        plt.ylabel(\"Energy (eV)\")\n\n        e_min = min(d[\"energies\"])\n        y = [(e - e_min) * Ha_to_eV for e in d[\"energies\"]]\n\n        plt.plot(x, y, \"ro--\")\n        return plt", "docstring": "Get a matplotlib plot of the potential energy surface.\n\nArgs:\ncoords: internal coordinate name to use as abcissa.", "source": "juraj-google-style"}
{"code": "def _add_genotypes(self, variant_obj, gemini_variant, case_id, individual_objs):\n    for ind in individual_objs:\n        index = ind.ind_index\n        variant_obj.add_individual(Genotype(sample_id=ind.ind_id, genotype=gemini_variant['gts'][index], case_id=case_id, phenotype=ind.phenotype, ref_depth=gemini_variant['gt_ref_depths'][index], alt_depth=gemini_variant['gt_alt_depths'][index], depth=gemini_variant['gt_depths'][index], genotype_quality=gemini_variant['gt_quals'][index]))", "docstring": "Add the genotypes for a variant for all individuals\n\nArgs:\nvariant_obj (puzzle.models.Variant)\ngemini_variant (GeminiQueryRow): The gemini variant\ncase_id (str): related case id\nindividual_objs (list(dict)): A list of Individuals", "source": "codesearchnet"}
{"code": "def get_descriptor_defaults(self, api_info, hostname=None, x_google_api_name=False):\n    \n    hostname = (hostname or util.get_app_hostname() or\n                api_info.hostname)\n    protocol = 'http' if ((hostname and hostname.startswith('localhost')) or\n                          util.is_running_on_devserver()) else 'https'\n    base_path = api_info.base_path\n    if base_path != '/':\n        base_path = base_path.rstrip('/')\n    defaults = {\n        'swagger': '2.0',\n        'info': {\n            'version': api_info.api_version,\n            'title': api_info.name\n        },\n        'host': hostname,\n        'consumes': ['application/json'],\n        'produces': ['application/json'],\n        'schemes': [protocol],\n        'basePath': base_path,\n    }\n\n    if x_google_api_name:\n        defaults['x-google-api-name'] = _validate_api_name(api_info.name)\n\n    return defaults", "docstring": "Gets a default configuration for a service.\n\nArgs:\napi_info: _ApiInfo object for this service.\nhostname: string, Hostname of the API, to override the value set on the\ncurrent service. Defaults to None.\n\nReturns:\nA dictionary with the default configuration.", "source": "juraj-google-style"}
{"code": "def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> torch.FloatTensor:\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    text_outputs: BaseModelOutputWithPooling = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states)\n    pooled_output = text_outputs.pooler_output\n    text_features = self.text_projection(pooled_output)\n    return text_features", "docstring": "Returns:\ntext_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by\napplying the projection layer to the pooled output of [`CLIPTextModel`].\n\nExamples:\n\n```python\n>>> from transformers import AutoTokenizer, CLIPModel\n\n>>> model = CLIPModel.from_pretrained(\"openai/clip-vit-base-patch32\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-base-patch32\")\n\n>>> inputs = tokenizer([\"a photo of a cat\", \"a photo of a dog\"], padding=True, return_tensors=\"pt\")\n>>> text_features = model.get_text_features(**inputs)\n```", "source": "github-repos"}
{"code": "def get_config(self):\n    config = {}\n    for (_, curriculum) in self.brains_to_curriculums.items():\n        curr_config = curriculum.get_config()\n        config.update(curr_config)\n    return config", "docstring": "Get the combined configuration of all curriculums in this\nMetaCurriculum.\n\nReturns:\nA dict from parameter to value.", "source": "codesearchnet"}
{"code": "def getOption(self, name):\n        \n        try:\n            value = lock_and_call(\n                lambda: self._impl.getOption(name).value(),\n                self._lock\n            )\n        except RuntimeError:\n            return None\n        else:\n            try:\n                return int(value)\n            except ValueError:\n                try:\n                    return float(value)\n                except ValueError:\n                    return value", "docstring": "Get the current value of the specified option. If the option does not\nexist, returns None.\n\nArgs:\nname: Option name.\n\nReturns:\nValue of the option.\n\nRaises:\nInvalidArgumet: if the option name is not valid.", "source": "juraj-google-style"}
{"code": "def download_file(bucket_name, path, target, sagemaker_session):\n    \n    path = path.lstrip('/')\n    boto_session = sagemaker_session.boto_session\n\n    s3 = boto_session.resource('s3')\n    bucket = s3.Bucket(bucket_name)\n    bucket.download_file(path, target)", "docstring": "Download a Single File from S3 into a local path\n\nArgs:\nbucket_name (str): S3 bucket name\npath (str): file path within the bucket\ntarget (str): destination directory for the downloaded file.\nsagemaker_session (:class:`sagemaker.session.Session`): a sagemaker session to interact with S3.", "source": "juraj-google-style"}
{"code": "def add_file(self, path, compress):\n    if (not os.path.isfile(path)):\n        raise ValueError('{} is not a file'.format(path))\n    self.fileobj.seek(self.last_offset)\n    with open(path, 'rb') as f:\n        flags = (os.stat(path).st_mode & 511)\n        self.add_fileobj(f, path, compress, flags)", "docstring": "Add a single file to the MAR file.\n\nArgs:\npath (str): path to a file to add to this MAR file.\ncompress (str): One of 'xz', 'bz2', or None. Defaults to None.", "source": "codesearchnet"}
{"code": "def union(*schedules: List[Union[ScheduleComponent, Tuple[int, ScheduleComponent]]],\n          name: str = None) -> Schedule:\n    \n    if name is None and schedules:\n        sched = schedules[0]\n        if isinstance(sched, (list, tuple)):\n            name = sched[1].name\n        else:\n            name = sched.name\n    return Schedule(*schedules, name=name)", "docstring": "Create a union (and also shift if desired) of all input `Schedule`s.\n\nArgs:\n*schedules: Schedules to take the union of\nname: Name of the new schedule. Defaults to first element of `schedules`", "source": "juraj-google-style"}
{"code": "def __init__(self, location=None, parent=None, store_index=None, **kwargs):\n    \n    if not parent:\n      raise ValueError('Missing parent value.')\n\n    super(VShadowPathSpec, self).__init__(parent=parent, **kwargs)\n    self.location = location\n    self.store_index = store_index", "docstring": "Initializes a path specification.\n\nNote that the VSS path specification must have a parent.\n\nArgs:\nlocation (Optional[str]): location.\nparent (Optional[PathSpec]): parent path specification.\nstore_index (Optional[int]): store index.\n\nRaises:\nValueError: when parent is not set.", "source": "juraj-google-style"}
{"code": "def tritonast2arybo(e, use_exprs=True, use_esf=False, context=None):\n    \n\n    children_ = e.getChildren()\n    children = (tritonast2arybo(c,use_exprs,use_esf,context) for c in children_)\n    reversed_children = (tritonast2arybo(c,use_exprs,use_esf,context) for c in reversed(children_))\n\n    Ty = e.getType()\n    if Ty == TAstN.ZX:\n        n = next(children)\n        v = next(children)\n        n += v.nbits\n        if n == v.nbits:\n            return v\n        return v.zext(n)\n    if Ty == TAstN.SX:\n        n = next(children)\n        v = next(children)\n        n += v.nbits\n        if n == v.nbits:\n            return v\n        return v.sext(n)\n    if Ty == TAstN.INTEGER:\n        return e.getInteger()\n    if Ty == TAstN.BV:\n        cst = next(children)\n        nbits = next(children)\n        if use_exprs:\n            return EX.ExprCst(cst, nbits)\n        else:\n            return _get_mba(nbits,use_esf).from_cst(cst)\n    if Ty == TAstN.EXTRACT:\n        last = next(children)\n        first = next(children)\n        v = next(children)\n        return v[first:last+1]\n    if Ty == TAstN.CONCAT:\n        if use_exprs:\n            return EX.ExprConcat(*list(reversed_children))\n        else:\n            return flatten(reversed_children)\n    if Ty == TAstN.VARIABLE:\n        name = e.getSymbolicVariable().getName()\n        ret = _get_mba(e.getBitvectorSize(),use_esf).var(name)\n        if use_exprs:\n            ret = EX.ExprBV(ret)\n        return ret\n    if Ty == TAstN.REFERENCE:\n        if context is None:\n            raise ValueError(\"reference node without context can't be resolved\")\n        id_ = e.getSymbolicExpression().getId()\n        ret = context.get(id_, None)\n        if ret is None:\n            raise ValueError(\"expression id %d not found in context\" % id_)\n        return ret\n    if Ty == TAstN.LET:\n        \n        \n        raise ValueError(\"unsupported LET operation\")\n\n    \n    shifts = {\n        TAstN.BVASHR: lambda a,b: a.ashr(b),\n        TAstN.BVLSHR: lambda a,b: a.lshr(b),\n        TAstN.BVSHL:  operator.lshift,\n        TAstN.BVROL:  lambda x,n: x.rol(n),\n        TAstN.BVROR:  lambda x,n: x.ror(n)\n    }\n    shift = shifts.get(Ty, None)\n    if not shift is None:\n        v = next(children)\n        n = next(children)\n        return shift(v,n)\n\n    \n    unops = {\n        TAstN.BVNOT: lambda x: ~x,\n        TAstN.LNOT:  lambda x: ~x,\n        TAstN.BVNEG: operator.neg\n    }\n    unop = unops.get(Ty, None)\n    if not unop is None:\n        return unop(next(children))\n\n    binops = {\n        TAstN.BVADD:  operator.add,\n        TAstN.BVSUB:  operator.sub,\n        TAstN.BVAND:  operator.and_,\n        TAstN.BVOR:   operator.or_,\n        TAstN.BVXOR:  operator.xor,\n        TAstN.BVMUL:  operator.mul,\n        TAstN.BVNAND: lambda x,y: ~(x&y),\n        TAstN.BVNOR:  lambda x,y: ~(x|y),\n        TAstN.BVXNOR: lambda x,y: ~(x^y),\n        TAstN.BVUDIV: lambda x,y: x.udiv(y),\n        TAstN.BVSDIV: lambda x,y: x.sdiv(y),\n        TAstN.BVUREM: lambda x,y: x.urem(y),\n        TAstN.BVSREM: lambda x,y: x.srem(y),\n        TAstN.LAND:   operator.and_,\n        TAstN.LOR:    operator.or_\n    }\n    binop = binops.get(Ty, None)\n    if not binop is None:\n        return reduce(binop, children)\n\n    \n    lops = {\n        TAstN.EQUAL:    lambda x,y: EX.ExprCmpEq(x,y),\n        TAstN.DISTINCT: lambda x,y: EX.ExprCmpNeq(x,y),\n        TAstN.BVUGE:    lambda x,y: EX.ExprCmpGte(x,y,False),\n        TAstN.BVUGT:    lambda x,y: EX.ExprCmpGt(x,y,False),\n        TAstN.BVULE:    lambda x,y: EX.ExprCmpLte(x,y,False),\n        TAstN.BVULT:    lambda x,y: EX.ExprCmpLt(x,y,False),\n        TAstN.BVSGE:    lambda x,y: EX.ExprCmpGte(x,y,True),\n        TAstN.BVSGT:    lambda x,y: EX.ExprCmpGt(x,y,True),\n        TAstN.BVSLE:    lambda x,y: EX.ExprCmpLte(x,y,True),\n        TAstN.BVSLT:    lambda x,y: EX.ExprCmpLt(x,y,True)\n    }\n    lop = lops.get(Ty, None)\n    if not lop is None:\n        return reduce(lop, children)\n\n    \n    if Ty != TAstN.ITE:\n        raise ValueError(\"unsupported node type %s\" % str(Ty))\n    return EX.ExprCond(next(children), next(children), next(children))", "docstring": "Convert a subset of Triton's AST into Arybo's representation\n\nArgs:\ne: Triton AST\nuse_esf: use ESFs when creating the final expression\ncontext: dictionnary that associates Triton expression ID to arybo expressions\n\nReturns:\nAn :class:`arybo.lib.MBAVariable` object", "source": "juraj-google-style"}
{"code": "def _copy_delpoy_scripts(self, scripts):\n        \n        if not os.path.exists(self.paths.scripts()):\n            os.makedirs(self.paths.scripts())\n\n        new_scripts = []\n        for script in scripts:\n            script = os.path.expandvars(script)\n            if not os.path.exists(script):\n                raise RuntimeError('Script %s does not exist' % script)\n\n            sanitized_name = script.replace('/', '_')\n            new_script_cur_path = os.path.expandvars(\n                self.paths.scripts(sanitized_name)\n            )\n            shutil.copy(script, new_script_cur_path)\n\n            new_script_init_path = os.path.join(\n                '$LAGO_PREFIX_PATH',\n                os.path.basename(self.paths.scripts()),\n                sanitized_name,\n            )\n            new_scripts.append(new_script_init_path)\n\n        return new_scripts", "docstring": "Copy the given deploy scripts to the scripts dir in the prefix\n\nArgs:\nscripts(list of str): list of paths of the scripts to copy to the\nprefix\n\nReturns:\nlist of str: list with the paths to the copied scripts, with a\nprefixed with $LAGO_PREFIX_PATH so the full path is not\nhardcoded", "source": "juraj-google-style"}
{"code": "def reconstruct_non_debug_graph_def(debug_graph_def):\n    return DebugGraph(debug_graph_def).non_debug_graph_def", "docstring": "Reconstruct original (non-debugger-decorated) partition GraphDef.\n\nThis method strips the input `tf.compat.v1.GraphDef` of the Copy* and\nDebug*-type nodes inserted by the debugger.\n\nThe reconstructed partition graph is identical to the original (i.e.,\nnon-debugger-decorated) partition graph except in the following respects:\n1) The exact names of the runtime-inserted internal nodes may differ.\nThese include _Send, _Recv, _HostSend, _HostRecv, _Retval ops.\n2) As a consequence of 1, the nodes that receive input directly from such\nsend- and recv-type ops will have different input names.\n3) The parallel_iteration attribute of while-loop Enter ops are set to 1.\n\nArgs:\ndebug_graph_def: The debugger-decorated `tf.compat.v1.GraphDef`, with the\ndebugger-inserted Copy* and Debug* nodes.\n\nReturns:\nThe reconstructed `tf.compat.v1.GraphDef` stripped of the debugger-inserted\nnodes.", "source": "github-repos"}
{"code": "def _connect_to_device(self, uuid, key, client):\n        \n\n        slug = self._build_device_slug(uuid)\n        message = {'client': client, 'type': 'response', 'operation': 'connect'}\n\n        self._logger.info(\"Connection attempt for device %d\", uuid)\n\n        \n        if uuid in self._connections:\n            message['success'] = False\n            message['failure_reason'] = 'Someone else is connected to the device'\n\n            self._publish_status(slug, message)\n            return\n\n        \n        resp = yield self._manager.connect(uuid)\n        message['success'] = resp['success']\n        if resp['success']:\n            conn_id = resp['connection_id']\n            self._connections[uuid] = {'key': key, 'client': client, 'connection_id': conn_id, 'last_touch': monotonic(),\n                                       'script': [], 'trace_accum': bytes(), 'last_trace': None, 'trace_scheduled': False,\n                                       'last_progress': None}\n        else:\n            message['failure_reason'] = resp['reason']\n            self._connections[uuid] = {}\n\n        connection = self._connections[uuid]\n        connection['report_monitor'] = self._manager.register_monitor(uuid, ['report'], self._notify_report)\n        connection['trace_monitor'] = self._manager.register_monitor(uuid, ['trace'], self._notify_trace)\n\n        self._publish_status(slug, message)", "docstring": "Connect to a device given its uuid\n\nArgs:\nuuid (int): The unique id of the device\nkey (string): A 64 byte string used to secure this connection\nclient (string): The client id for who is trying to connect\nto the device.", "source": "juraj-google-style"}
{"code": "def cxx(project, detect_project=False):\n    from benchbuild.utils import cmd\n    cxx_name = str(CFG['compiler']['cxx'])\n    wrap_cc(cxx_name, compiler(cxx_name), project, detect_project=detect_project)\n    return cmd['./{name}'.format(name=cxx_name)]", "docstring": "Return a clang++ that hides CFLAGS and LDFLAGS.\n\nThis will generate a wrapper script in the current directory\nand return a complete plumbum command to it.\n\nArgs:\ncflags: The CFLAGS we want to hide.\nldflags: The LDFLAGS we want to hide.\nfunc (optional): A function that will be pickled alongside the compiler.\nIt will be called before the actual compilation took place. This\nway you can intercept the compilation process with arbitrary python\ncode.\n\nReturns (benchbuild.utils.cmd):\nPath to the new clang command.", "source": "codesearchnet"}
{"code": "def compose_full_url(pub, uuid_url=False):\n    url = compose_path(pub, uuid_url)\n    if (WEB_PORT == 80):\n        return ('%s:\n    return ('%s:", "docstring": "Compose full url for given `pub`, with protocol, server's address and port.\n\nArgs:\npub (obj): :class:`.DBPublication` instance.\nuuid_url (bool, default False): Compose URL using UUID.\n\nReturns:\nstr: Absolute url of the publication.\nRaises:\nPrivatePublicationError: When the `pub` is private publication.", "source": "codesearchnet"}
{"code": "def _GetISO8601String(self, structure):\n    fraction_of_second_length = len(structure.fraction_of_second)\n    if (fraction_of_second_length not in (3, 6, 7)):\n        raise ValueError('unsupported time fraction of second length: {0:d}'.format(fraction_of_second_length))\n    try:\n        fraction_of_second = int(structure.fraction_of_second, 10)\n    except (TypeError, ValueError) as exception:\n        raise ValueError('unable to determine fraction of second with error: {0!s}'.format(exception))\n    if (fraction_of_second_length == 7):\n        (fraction_of_second, _) = divmod(fraction_of_second, 10)\n    date_time_string = '{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}'.format(structure.year, structure.month, structure.day, structure.hour, structure.minute, structure.second)\n    if (fraction_of_second_length > 0):\n        date_time_string = '{0:s}.{1:d}'.format(date_time_string, fraction_of_second)\n    utc_offset_minutes = structure.get('utc_offset_minutes', None)\n    if (utc_offset_minutes is not None):\n        try:\n            time_zone_offset = int(utc_offset_minutes[1:], 10)\n        except (IndexError, ValueError) as exception:\n            raise ValueError('Unable to parse time zone offset with error: {0!s}.'.format(exception))\n        (time_zone_hours, time_zone_minutes) = divmod(time_zone_offset, 60)\n        date_time_string = '{0:s}{1:s}{2:02d}:{3:02d}'.format(date_time_string, utc_offset_minutes[0], time_zone_hours, time_zone_minutes)\n    return date_time_string", "docstring": "Retrieves an ISO8601 date time string from the structure.\n\nThe date and time values in the SCCM log are formatted as:\ntime=\"19:33:19.766-330\" date=\"11-28-2014\"\n\nArgs:\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.\n\nReturns:\nstr: ISO 8601 date time string.\n\nRaises:\nValueError: if the structure cannot be converted into a date time string.", "source": "codesearchnet"}
{"code": "def __init__(self, initial_structure, final_structure):\n        \n        if final_structure.formula != initial_structure.formula:\n            raise ValueError(\"Initial and final structures have different \" +\n                             \"formulas!\")\n        self.initial = initial_structure\n        self.final = final_structure", "docstring": "Please note that the input and final structures should have the same\nordering of sites. This is typically the case for most computational\ncodes.\n\nArgs:\ninitial_structure (Structure): Initial input structure to\ncalculation.\nfinal_structure (Structure): Final output structure from\ncalculation.", "source": "juraj-google-style"}
{"code": "def add(self, rid, data, raise_on_error=True):\n        \n        return self.post(rid, data, raise_on_error)", "docstring": "Write data to the DataStore. Alias for post() method.\n\nArgs:\nrid (str): The record identifier.\ndata (dict): The record data.\nraise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.\n\nReturns:\nobject : Python request response.", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    file_header_map = self._GetDataTypeMap('java_idx_file_header')\n\n    try:\n      file_header, file_offset = self._ReadStructureFromFileObject(\n          file_object, 0, file_header_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.UnableToParseFile(\n          'Unable to parse file header with error: {0!s}'.format(\n              exception))\n\n    if not file_header.format_version in self._SUPPORTED_FORMAT_VERSIONS:\n      raise errors.UnableToParseFile('Unsupported format version.')\n\n    if file_header.format_version == 602:\n      section1_map = self._GetDataTypeMap('java_idx_602_section1')\n    elif file_header.format_version in (603, 604):\n      section1_map = self._GetDataTypeMap('java_idx_603_section1')\n    elif file_header.format_version == 605:\n      section1_map = self._GetDataTypeMap('java_idx_605_section1')\n\n    try:\n      section1, data_size = self._ReadStructureFromFileObject(\n          file_object, file_offset, section1_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.UnableToParseFile((\n          'Unable to parse section 1 (format version: {0:d}) with error: '\n          '{1!s}').format(file_header.format_version, exception))\n\n    file_offset += data_size\n\n    if file_header.format_version == 602:\n      section2_map = self._GetDataTypeMap('java_idx_602_section2')\n    elif file_header.format_version in (603, 604, 605):\n      file_offset = 128\n      section2_map = self._GetDataTypeMap('java_idx_603_section2')\n\n    try:\n      section2, data_size = self._ReadStructureFromFileObject(\n          file_object, file_offset, section2_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.UnableToParseFile((\n          'Unable to parse section 2 (format version: {0:d}) with error: '\n          '{1!s}').format(file_header.format_version, exception))\n\n    file_offset += data_size\n\n    if not section2.url:\n      raise errors.UnableToParseFile('URL not found in file.')\n\n    date_http_header = None\n    for _ in range(section2.number_of_http_headers):\n      http_header_map = self._GetDataTypeMap('java_idx_http_header')\n      try:\n        http_header, data_size = self._ReadStructureFromFileObject(\n            file_object, file_offset, http_header_map)\n      except (ValueError, errors.ParseError) as exception:\n        parser_mediator.ProduceExtractionWarning(\n            'Unable to parse HTTP header value at offset: 0x{0:08x}'.format(\n                file_offset))\n        break\n\n      file_offset += data_size\n\n      if http_header.name == 'date':\n        date_http_header = http_header\n        break\n\n    event_data = JavaIDXEventData()\n    event_data.idx_version = file_header.format_version\n    event_data.ip_address = getattr(section2, 'ip_address', None)\n    event_data.url = section2.url\n\n    date_time = dfdatetime_java_time.JavaTime(\n        timestamp=section1.modification_time)\n    \n    event = time_events.DateTimeValuesEvent(date_time, 'File Hosted Date')\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    if section1.expiration_time:\n      date_time = dfdatetime_java_time.JavaTime(\n          timestamp=section1.expiration_time)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_EXPIRATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    if date_http_header:\n      \n      \n      \n      \n      try:\n        download_date = timelib.Timestamp.FromTimeString(\n            date_http_header.value, gmt_as_timezone=False)\n      except errors.TimestampError:\n        parser_mediator.ProduceExtractionWarning(\n            'Unable to parse date HTTP header value: {0:s}'.format(\n                date_http_header.value))\n\n      if download_date:\n        event = time_events.TimestampEvent(\n            download_date, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a Java WebStart Cache IDX file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dvfvs.FileIO): a file-like object to parse.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def pose2mat(pose):\n    homo_pose_mat = np.zeros((4, 4), dtype=np.float32)\n    homo_pose_mat[(:3, :3)] = quat2mat(pose[1])\n    homo_pose_mat[(:3, 3)] = np.array(pose[0], dtype=np.float32)\n    homo_pose_mat[(3, 3)] = 1.0\n    return homo_pose_mat", "docstring": "Converts pose to homogeneous matrix.\n\nArgs:\npose: a (pos, orn) tuple where pos is vec3 float cartesian, and\norn is vec4 float quaternion.\n\nReturns:\n4x4 homogeneous matrix", "source": "codesearchnet"}
{"code": "def from_api_repr(cls, resource):\n        \n        config = cls(resource[\"sourceFormat\"])\n        for optcls in _OPTION_CLASSES:\n            opts = resource.get(optcls._RESOURCE_NAME)\n            if opts is not None:\n                config._options = optcls.from_api_repr(opts)\n                break\n        config._properties = copy.deepcopy(resource)\n        return config", "docstring": "Factory: construct an :class:`~.external_config.ExternalConfig`\ninstance given its API representation.\n\nArgs:\nresource (Dict[str, Any]):\nDefinition of an :class:`~.external_config.ExternalConfig`\ninstance in the same representation as is returned from the\nAPI.\n\nReturns:\n:class:`~.external_config.ExternalConfig`:\nConfiguration parsed from ``resource``.", "source": "juraj-google-style"}
{"code": "def __init__(self, low, high, output_shape):\n        \n        self.__low = low\n        self.__high = high\n        self.__output_shape = output_shape", "docstring": "Init.\n\nArgs:\nlow:            Lower boundary of the output interval.\nAll values generated will be greater than or equal to low.\n\nhigh:           Upper boundary of the output interval.\nAll values generated will be less than high.\n\noutput_shape:   Output shape.\nthe shape is `(batch size, d1, d2, d3, ...)`.", "source": "juraj-google-style"}
{"code": "def verify(self, obj):\n        \n\n        if not isinstance(obj, int):\n            raise ValidationError(\"Object is not a int\", reason='object is not a int', object=obj,\n                                  type=type(obj), int_type=int)\n\n        return obj", "docstring": "Verify that the object conforms to this verifier's schema\n\nArgs:\nobj (object): A python object to verify\n\nRaises:\nValidationError: If there is a problem verifying the dictionary, a\nValidationError is thrown with at least the reason key set indicating\nthe reason for the lack of validation.", "source": "juraj-google-style"}
{"code": "def list_autoscale_settings(access_token, subscription_id):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/providers/microsoft.insights/',\n                        '/autoscaleSettings?api-version=', INSIGHTS_API])\n    return do_get(endpoint, access_token)", "docstring": "List the autoscale settings in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body of autoscale settings.", "source": "juraj-google-style"}
{"code": "def normalize(code):\n    \n    if len(code) == 3:\n        return code\n\n    normalized = translate(code)\n\n    if normalized:\n        return normalized\n\n    country = countries.get(code, None)\n\n    if country:\n        return country.alpha3.lower()\n\n    return code", "docstring": "Normalize language codes to ISO 639-2. If all conversions fails, return the\n`code` as it was given.\n\nArgs:\ncode (str): Language / country code.\n\nReturns:\nstr: ISO 639-2 country code.", "source": "juraj-google-style"}
{"code": "def get_log_id(cls, id):\n        \n        conn = Qubole.agent()\n        r = conn.get_raw(cls.element_path(id) + \"/logs\")\n        return r.text", "docstring": "Fetches log for the command represented by this id\n\nArgs:\n`id`: command id", "source": "juraj-google-style"}
{"code": "def __call__(self, environ, start_response):\n        \n        start_time = datetime.datetime.utcnow()\n        name = environ.get('PATH_INFO') or '/'\n        closure = {'status': '200 OK'}\n        http_method = environ.get('REQUEST_METHOD', 'GET')\n\n        self.client.context.operation.id = str(uuid.uuid4())\n        \n        self.client.context.operation.name = http_method + ' ' + name\n\n        def status_interceptor(status_string, headers_array, exc_info=None):\n            closure['status'] = status_string\n            start_response(status_string, headers_array, exc_info)\n\n        for part in self._wsgi_application(environ, status_interceptor):\n            yield part\n\n        success = True\n        response_match = re.match(r'\\s*(?P<code>\\d+)', closure['status'])\n        if response_match:\n            response_code = response_match.group('code')\n            if int(response_code) >= 400:\n                success = False\n        else:\n            response_code = closure['status']\n            success = False\n            \n        url = name\n        query_string = environ.get('QUERY_STRING')\n        if query_string:\n            url += '?' + query_string\n\n        scheme = environ.get('wsgi.url_scheme', 'http')\n        host =  environ.get('HTTP_HOST', environ.get('SERVER_NAME', 'unknown'))\n\n        url = scheme + ':\n\n        end_time = datetime.datetime.utcnow()\n        duration = int((end_time - start_time).total_seconds() * 1000)\n\n        self.client.track_request(name, url, success, start_time.isoformat() + 'Z', duration, response_code, http_method, self._common_properties)", "docstring": "Callable implementation for WSGI middleware.\n\nArgs:\nenviron (dict). a dictionary containing all WSGI environment properties for this request.\\n\nstart_response (func). a function used to store the status, HTTP headers to be sent to the client and optional exception information.\n\nReturns:\n(obj). the response to send back to the client.", "source": "juraj-google-style"}
{"code": "def outer_definition_name(cls):\n    outer_definition = cls.message_definition()\n    if (not outer_definition):\n        return util.get_package_for_module(cls.__module__)\n    return outer_definition.definition_name()", "docstring": "Helper method for creating outer definition name.\n\nReturns:\nIf definition is nested, will return the outer definitions\nname, else the package name.", "source": "codesearchnet"}
{"code": "def get_student_current_grade(self, username, course_id):\n    resp = self.requester.get(urljoin(self.base_url, '/api/grades/v1/courses/{course_key}/?username={username}'.format(username=username, course_key=course_id)))\n    resp.raise_for_status()\n    return CurrentGrade(resp.json()[0])", "docstring": "Returns an CurrentGrade object for the user in a course\n\nArgs:\nusername (str): an edx user's username\ncourse_id (str): an edX course id.\n\nReturns:\nCurrentGrade: object representing the student current grade for a course", "source": "codesearchnet"}
{"code": "def _strict_match(self, struct1, struct2, fu, s1_supercell=True,\n                      use_rms=False, break_on_match=False):\n        \n        if fu < 1:\n            raise ValueError(\"fu cannot be less than 1\")\n\n        mask, s1_t_inds, s2_t_ind = self._get_mask(struct1, struct2,\n                                                   fu, s1_supercell)\n\n        if mask.shape[0] > mask.shape[1]:\n            raise ValueError('after supercell creation, struct1 must '\n                             'have more sites than struct2')\n\n        \n        if (not self._subset) and mask.shape[1] != mask.shape[0]:\n            return None\n\n        if LinearAssignment(mask).min_cost > 0:\n            return None\n\n        best_match = None\n        \n        for s1fc, s2fc, avg_l, sc_m in \\\n                self._get_supercells(struct1, struct2, fu, s1_supercell):\n            \n            normalization = (len(s1fc) / avg_l.volume) ** (1/3)\n            inv_abc = np.array(avg_l.reciprocal_lattice.abc)\n            frac_tol = inv_abc * self.stol / (np.pi * normalization)\n            \n            for s1i in s1_t_inds:\n                t = s1fc[s1i] - s2fc[s2_t_ind]\n                t_s2fc = s2fc + t\n                if self._cmp_fstruct(s1fc, t_s2fc, frac_tol, mask):\n                    inv_lll_abc = np.array(avg_l.get_lll_reduced_lattice().reciprocal_lattice.abc)\n                    lll_frac_tol = inv_lll_abc * self.stol / (np.pi * normalization)\n                    dist, t_adj, mapping = self._cart_dists(\n                        s1fc, t_s2fc, avg_l, mask, normalization, lll_frac_tol)\n                    if use_rms:\n                        val = np.linalg.norm(dist) / len(dist) ** 0.5\n                    else:\n                        val = max(dist)\n                    if best_match is None or val < best_match[0]:\n                        total_t = t + t_adj\n                        total_t -= np.round(total_t)\n                        best_match = val, dist, sc_m, total_t, mapping\n                        if (break_on_match or val < 1e-5) and val < self.stol:\n                            return best_match\n\n        if best_match and best_match[0] < self.stol:\n            return best_match", "docstring": "Matches struct2 onto struct1 (which should contain all sites in\nstruct2).\n\nArgs:\nstruct1, struct2 (Structure): structures to be matched\nfu (int): size of supercell to create\ns1_supercell (bool): whether to create the supercell of\nstruct1 (vs struct2)\nuse_rms (bool): whether to minimize the rms of the matching\nbreak_on_match (bool): whether to stop search at first\nvalid match", "source": "juraj-google-style"}
{"code": "def is_param_method(obj, has_deps=False):\n    parameterized = (inspect.ismethod(obj) and isinstance(get_method_owner(obj), param.Parameterized))\n    if (parameterized and has_deps):\n        return getattr(obj, '_dinfo', {}).get('dependencies')\n    return parameterized", "docstring": "Whether the object is a method on a parameterized object.\n\nArgs:\nobj: Object to check\nhas_deps (boolean, optional): Check for dependencies\nWhether to also check whether the method has been annotated\nwith param.depends\n\nReturns:\nA boolean value indicating whether the object is a method\non a Parameterized object and if enabled whether it has any\ndependencies", "source": "codesearchnet"}
{"code": "def gcd_float(numbers, tol=1e-08):\n\n    def pair_gcd_tol(a, b):\n        'Calculate the Greatest Common Divisor of a and b.\\n\\n        Unless b==0, the result will have the same sign as b (so that when\\n        b is divided by it, the result comes out positive).\\n        '\n        while (b > tol):\n            (a, b) = (b, (a % b))\n        return a\n    n = numbers[0]\n    for i in numbers:\n        n = pair_gcd_tol(n, i)\n    return n", "docstring": "Returns the greatest common divisor for a sequence of numbers.\nUses a numerical tolerance, so can be used on floats\n\nArgs:\nnumbers: Sequence of numbers.\ntol: Numerical tolerance\n\nReturns:\n(int) Greatest common divisor of numbers.", "source": "codesearchnet"}
{"code": "def remove_import_statements(code):\n    new_code = []\n    for line in code.splitlines():\n        if ((not line.lstrip().startswith('import ')) and (not line.lstrip().startswith('from '))):\n            new_code.append(line)\n    while (new_code and (new_code[0] == '')):\n        new_code.pop(0)\n    while (new_code and (new_code[(- 1)] == '')):\n        new_code.pop()\n    return '\\n'.join(new_code)", "docstring": "Removes lines with import statements from the code.\n\nArgs:\ncode: The code to be stripped.\n\nReturns:\nThe code without import statements.", "source": "codesearchnet"}
{"code": "def GetCacheSize(self):\n    if ((not self._cache_start_offset) or (not self._cache_end_offset)):\n        return 0\n    return (self._cache_end_offset - self._cache_start_offset)", "docstring": "Determines the size of the uncompressed cached data.\n\nReturns:\nint: number of cached bytes.", "source": "codesearchnet"}
{"code": "def get_config_status():\n    cmd = 'Get-DscConfigurationStatus | Select-Object -Property HostName, Status, MetaData, @{Name=\"StartDate\";Expression={Get-Date ($_.StartDate) -Format g}}, Type, Mode, RebootRequested, NumberofResources'\n    try:\n        return _pshell(cmd, ignore_retcode=True)\n    except CommandExecutionError as exc:\n        if ('No status information available' in exc.info['stderr']):\n            raise CommandExecutionError('Not Configured')\n        raise", "docstring": "Get the status of the current DSC Configuration\n\nReturns:\ndict: A dictionary representing the status of the current DSC\nConfiguration on the machine\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' dsc.get_config_status", "source": "codesearchnet"}
{"code": "def _parse_price(html_chunk):\n    \n    price = get_first_content(\n        html_chunk.find(\"div\", {\"class\": \"prices\"})\n    )\n\n    if not price:\n        return None\n\n    \n    price = dhtmlparser.removeTags(price)\n    price = price.split(\"\\n\")[-1]\n\n    return price", "docstring": "Parse price of the book.\n\nArgs:\nhtml_chunk (obj): HTMLElement containing slice of the page with details.\n\nReturns:\nstr/None: Price as string with currency or None if not found.", "source": "juraj-google-style"}
{"code": "def default_output_fn(prediction, accept):\n    \n    return _worker.Response(response=_encoders.encode(prediction, accept), mimetype=accept)", "docstring": "Function responsible to serialize the prediction for the response.\n\nArgs:\nprediction (obj): prediction returned by predict_fn .\naccept (str): accept content-type expected by the client.\n\nReturns:\n(worker.Response): a Flask response object with the following args:\n\n* Args:\nresponse: the serialized data to return\naccept: the content-type that the data was transformed to.", "source": "juraj-google-style"}
{"code": "def _GetScanner(self, specification_store, signature_identifiers):\n    if (not specification_store):\n        return None\n    scanner_object = pysigscan.scanner()\n    for format_specification in specification_store.specifications:\n        if (format_specification.identifier not in signature_identifiers):\n            continue\n        for signature in format_specification.signatures:\n            pattern_offset = signature.offset\n            if (pattern_offset is None):\n                signature_flags = pysigscan.signature_flags.NO_OFFSET\n            elif (pattern_offset < 0):\n                pattern_offset *= (- 1)\n                signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END\n            else:\n                signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START\n            scanner_object.add_signature(signature.identifier, pattern_offset, signature.pattern, signature_flags)\n        self._signature_identifiers.append(format_specification.identifier)\n    return scanner_object", "docstring": "Initializes the scanner form the specification store.\n\nArgs:\nspecification_store (FormatSpecificationStore): a specification store.\nsignature_identifiers (list[str]): signature identifiers.\n\nReturns:\npysigscan.scanner: signature scanner or None.", "source": "codesearchnet"}
{"code": "def encode_field(self, field, value):\n    for encoder in _GetFieldCodecs(field, 'encoder'):\n        result = encoder(field, value)\n        value = result.value\n        if result.complete:\n            return value\n    if isinstance(field, messages.EnumField):\n        if field.repeated:\n            remapped_value = [(GetCustomJsonEnumMapping(field.type, python_name=e.name) or e.name) for e in value]\n        else:\n            remapped_value = GetCustomJsonEnumMapping(field.type, python_name=value.name)\n        if remapped_value:\n            return remapped_value\n    if (isinstance(field, messages.MessageField) and (not isinstance(field, message_types.DateTimeField))):\n        value = json.loads(self.encode_message(value))\n    return super(_ProtoJsonApiTools, self).encode_field(field, value)", "docstring": "Encode the given value as JSON.\n\nArgs:\nfield: a messages.Field for the field we're encoding.\nvalue: a value for field.\n\nReturns:\nA python value suitable for json.dumps.", "source": "codesearchnet"}
{"code": "def destroy_elb(app='', env='dev', region='us-east-1', **_):\n    \n    task_json = get_template(\n        template_file='destroy/destroy_elb.json.j2',\n        app=app,\n        env=env,\n        region=region,\n        vpc=get_vpc_id(account=env, region=region))\n\n    wait_for_task(task_json)\n\n    return True", "docstring": "Destroy ELB Resources.\n\nArgs:\napp (str): Spinnaker Application name.\nenv (str): Deployment environment.\nregion (str): AWS region.\n\nReturns:\nTrue upon successful completion.", "source": "juraj-google-style"}
{"code": "def add_gene_info(self, variant_obj, gene_panels=None):\n        \n        gene_panels = gene_panels or []\n\n        \n        variant_obj['has_refseq'] = False\n\n        \n\n        \n        \n        extra_info = {}\n        for panel_obj in gene_panels:\n            for gene_info in panel_obj['genes']:\n                hgnc_id = gene_info['hgnc_id']\n                if hgnc_id not in extra_info:\n                    extra_info[hgnc_id] = []\n\n                extra_info[hgnc_id].append(gene_info)\n\n        \n        \n        for variant_gene in variant_obj.get('genes', []):\n            hgnc_id = variant_gene['hgnc_id']\n            \n            hgnc_gene = self.hgnc_gene(hgnc_id)\n\n            if not hgnc_gene:\n                continue\n\n            \n            \n            transcripts_dict = {}\n            \n            for transcript in hgnc_gene.get('transcripts', []):\n                tx_id = transcript['ensembl_transcript_id']\n                transcripts_dict[tx_id] = transcript\n\n            \n            hgnc_gene['transcripts_dict'] = transcripts_dict\n\n            if hgnc_gene.get('incomplete_penetrance'):\n                variant_gene['omim_penetrance'] = True\n\n            \n            \n            panel_info = extra_info.get(hgnc_id, [])\n\n            \n            disease_associated = set()\n            \n            disease_associated_no_version = set()\n            manual_penetrance = False\n            mosaicism = False\n            manual_inheritance = set()\n\n            \n            for gene_info in panel_info:\n                \n                for tx in gene_info.get('disease_associated_transcripts', []):\n                    \n                    stripped = re.sub(r'\\.[0-9]', '', tx)\n                    disease_associated_no_version.add(stripped)\n                    disease_associated.add(tx)\n\n                if gene_info.get('reduced_penetrance'):\n                    manual_penetrance = True\n\n                if gene_info.get('mosaicism'):\n                    mosaicism = True\n\n                manual_inheritance.update(gene_info.get('inheritance_models', []))\n\n            variant_gene['disease_associated_transcripts'] = list(disease_associated)\n            variant_gene['manual_penetrance'] = manual_penetrance\n            variant_gene['mosaicism'] = mosaicism\n            variant_gene['manual_inheritance'] = list(manual_inheritance)\n\n            \n            \n\n            \n            for transcript in variant_gene.get('transcripts', []):\n                tx_id = transcript['transcript_id']\n                if not tx_id in transcripts_dict:\n                    continue\n\n                \n                hgnc_transcript = transcripts_dict[tx_id]\n\n                \n                if hgnc_transcript.get('is_primary'):\n                    transcript['is_primary'] = True\n                \n                \n                if not hgnc_transcript.get('refseq_id'):\n                    continue\n\n                refseq_id = hgnc_transcript['refseq_id']\n                transcript['refseq_id'] = refseq_id\n                variant_obj['has_refseq'] = True\n                \n                if refseq_id in disease_associated_no_version:\n                    transcript['is_disease_associated'] = True\n\n                \n                \n                transcript['refseq_identifiers'] = hgnc_transcript.get('refseq_identifiers',[])\n\n            variant_gene['common'] = hgnc_gene\n            \n            variant_gene['disease_terms'] = self.disease_terms(hgnc_id)\n\n        return variant_obj", "docstring": "Add extra information about genes from gene panels\n\nArgs:\nvariant_obj(dict): A variant from the database\ngene_panels(list(dict)): List of panels from database", "source": "juraj-google-style"}
{"code": "def stop_condition(self, condition):\n        \n\n        \n        \n        for cond_format in self._known_conditions:\n            try:\n                cond = cond_format.FromString(condition)\n                self.stop_conditions.append(cond)\n                return\n            except ArgumentError:\n                continue\n\n        raise ArgumentError(\"Stop condition could not be processed by any known StopCondition type\", condition=condition, suggestion=\"It may be mistyped or otherwise invalid.\")", "docstring": "Add a stop condition to this simulation.\n\nStop conditions are specified as strings and parsed into\nthe appropriate internal structures.\n\nArgs:\ncondition (str): a string description of the stop condition", "source": "juraj-google-style"}
{"code": "def _get_path_params(match):\n    \n    result = {}\n    for var_name, value in match.groupdict().iteritems():\n      actual_var_name = ApiConfigManager._from_safe_path_param_name(var_name)\n      result[actual_var_name] = urllib.unquote_plus(value)\n    return result", "docstring": "Gets path parameters from a regular expression match.\n\nArgs:\nmatch: A regular expression Match object for a path.\n\nReturns:\nA dictionary containing the variable names converted from base64.", "source": "juraj-google-style"}
{"code": "def predict_proba(self, x, y=None, **kwargs):\n        \n        if self.clf is None:\n            raise ValueError(\"Model has to be trained before making predictions.\")\n        if x is pandas.Series:\n            input_ = self.featurize_row(x.iloc[0], x.iloc[1]).reshape((1, -1))\n        elif x is pandas.DataFrame:\n            input_ = np.array([self.featurize_row(x.iloc[0], x.iloc[1]) for row in x])\n        elif y is not None:\n            input_ = self.featurize_row(x, y).reshape((1, -1))\n        else:\n            raise TypeError(\"DataType not understood.\")\n        return self.clf.predict(input_)", "docstring": "Predict the causal score using a trained RCC model\n\nArgs:\nx (numpy.array or pandas.DataFrame or pandas.Series): First variable or dataset.\nargs (numpy.array): second variable (optional depending on the 1st argument).\n\nReturns:\nfloat: Causation score (Value : 1 if a->b and -1 if b->a)", "source": "juraj-google-style"}
{"code": "def stop_apppool(name):\n    ps_cmd = ['Stop-WebAppPool', \"'{0}'\".format(name)]\n    cmd_ret = _srvmgr(ps_cmd)\n    return (cmd_ret['retcode'] == 0)", "docstring": "Stop an IIS application pool.\n\n.. versionadded:: 2017.7.0\n\nArgs:\nname (str): The name of the App Pool to stop.\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.stop_apppool name='MyTestPool'", "source": "codesearchnet"}
{"code": "def cycle_find(key, width=4):\n    key_len = len(key)\n    buf = ''\n    it = deBruijn(width, 26)\n    for i in range(key_len):\n        buf += chr((ord('A') + next(it)))\n    if (buf == key):\n        return 0\n    for (i, c) in enumerate(it):\n        buf = (buf[1:] + chr((ord('A') + c)))\n        if (buf == key):\n            return (i + 1)\n    return (- 1)", "docstring": "Given an element of a de Bruijn sequence, find its index in that sequence.\n\nArgs:\nkey(str): The piece of the de Bruijn sequence to find.\nwidth(int): The width of each element in the sequence.\n\nReturns:\nint: The index of ``key`` in the de Bruijn sequence.", "source": "codesearchnet"}
{"code": "def get_client_kwargs(self, path):\n        \n        bucket_name, key = self.split_locator(path)\n        kwargs = dict(Bucket=bucket_name)\n        if key:\n            kwargs['Key'] = key\n        return kwargs", "docstring": "Get base keyword arguments for client for a\nspecific path.\n\nArgs:\npath (str): Absolute path or URL.\n\nReturns:\ndict: client args", "source": "juraj-google-style"}
{"code": "def GetVolumeSystemTypeIndicators(cls, path_spec, resolver_context=None):\n    \n    if (cls._volume_system_remainder_list is None or\n        cls._volume_system_store is None):\n      specification_store, remainder_list = cls._GetSpecificationStore(\n          definitions.FORMAT_CATEGORY_VOLUME_SYSTEM)\n      cls._volume_system_remainder_list = remainder_list\n      cls._volume_system_store = specification_store\n\n    if cls._volume_system_scanner is None:\n      cls._volume_system_scanner = cls._GetSignatureScanner(\n          cls._volume_system_store)\n\n    return cls._GetTypeIndicators(\n        cls._volume_system_scanner, cls._volume_system_store,\n        cls._volume_system_remainder_list, path_spec,\n        resolver_context=resolver_context)", "docstring": "Determines if a file contains a supported volume system types.\n\nArgs:\npath_spec (PathSpec): path specification.\nresolver_context (Optional[Context]): resolver context, where None\nrepresents the built-in context which is not multi process safe.\n\nReturns:\nlist[str]: supported format type indicators.", "source": "juraj-google-style"}
{"code": "def unpackStruct(self, data, def_buf):\n        \n        struct_str = \"=\"\n        for fld in def_buf:\n            if not def_buf[fld][MeterData.CalculatedFlag]:\n                struct_str = struct_str + str(def_buf[fld][MeterData.SizeValue]) + \"s\"\n        if len(data) == 255:\n            contents = struct.unpack(struct_str, str(data))\n        else:\n            self.writeCmdMsg(\"Length error.  Len() size = \" + str(len(data)))\n            contents = ()\n        return contents", "docstring": "Wrapper for struct.unpack with SerialBlock buffer definitionns.\n\nArgs:\ndata (str): Implicit cast bytes to str, serial port return.\ndef_buf (SerialBlock): Block object holding field lengths.\n\nReturns:\ntuple: parsed result of struct.unpack() with field definitions.", "source": "juraj-google-style"}
{"code": "def replace_batch_norm(model):\n    for name, module in model.named_children():\n        if isinstance(module, nn.BatchNorm2d):\n            new_module = DetrFrozenBatchNorm2d(module.num_features)\n            if not module.weight.device == torch.device('meta'):\n                new_module.weight.data.copy_(module.weight)\n                new_module.bias.data.copy_(module.bias)\n                new_module.running_mean.data.copy_(module.running_mean)\n                new_module.running_var.data.copy_(module.running_var)\n            model._modules[name] = new_module\n        if len(list(module.children())) > 0:\n            replace_batch_norm(module)", "docstring": "Recursively replace all `torch.nn.BatchNorm2d` with `DetrFrozenBatchNorm2d`.\n\nArgs:\nmodel (torch.nn.Module):\ninput model", "source": "github-repos"}
{"code": "def logdet(x):\n    if any_symbolic_tensors((x,)):\n        return Logdet().symbolic_call(x)\n    return backend.math.logdet(x)", "docstring": "Computes log of the determinant of a hermitian positive definite matrix.\n\nArgs:\nx: Input matrix. It must 2D and square.\n\nReturns:\nThe natural log of the determinant of matrix.", "source": "github-repos"}
{"code": "def _locate_elements_in_line(line, indices_list, ref_indices):\n    batch_size = len(indices_list)\n    offsets = [indices[-1] - ref_indices[-1] for indices in indices_list]\n    start_columns = [None] * batch_size\n    end_columns = [None] * batch_size\n    if _NUMPY_OMISSION in line:\n        ellipsis_index = line.find(_NUMPY_OMISSION)\n    else:\n        ellipsis_index = len(line)\n    matches_iter = re.finditer(_NUMBER_REGEX, line)\n    batch_pos = 0\n    offset_counter = 0\n    for match in matches_iter:\n        if match.start() > ellipsis_index:\n            break\n        if offset_counter == offsets[batch_pos]:\n            start_columns[batch_pos] = match.start()\n            end_columns[batch_pos] = match.end() - 1\n            batch_pos += 1\n            if batch_pos >= batch_size:\n                break\n        offset_counter += 1\n    return (start_columns, end_columns)", "docstring": "Determine the start and end indices of an element in a line.\n\nArgs:\nline: (str) the line in which the element is to be sought.\nindices_list: (list of list of int) list of indices of the element to\nsearch for. Assumes that the indices in the batch are unique and sorted\nin ascending order.\nref_indices: (list of int) reference indices, i.e., the indices of the\nfirst element represented in the line.\n\nReturns:\nstart_columns: (list of int) start column indices, if found. If not found,\nNone.\nend_columns: (list of int) end column indices, if found. If not found,\nNone.\nIf found, the element is represented in the left-closed-right-open interval\n[start_column, end_column].", "source": "github-repos"}
{"code": "def match_hail_size_step_distributions(self, model_tracks, obs_tracks, track_pairings):\n        \n        label_columns = [\"Matched\", \"Max_Hail_Size\", \"Num_Matches\", \"Shape\", \"Location\", \"Scale\"]\n        s = 0\n        for m, model_track in enumerate(model_tracks):\n            model_track.observations = pd.DataFrame(index=model_track.times, columns=label_columns, dtype=np.float64)\n            model_track.observations.loc[:, :] = 0\n            model_track.observations[\"Matched\"] = model_track.observations[\"Matched\"].astype(np.int32)\n            for t, time in enumerate(model_track.times):\n                model_track.observations.loc[time, \"Matched\"] = track_pairings.loc[s, \"Matched\"]\n                if model_track.observations.loc[time, \"Matched\"] > 0:\n                    all_hail_sizes = []\n                    step_pairs = track_pairings.loc[s, \"Pairings\"]\n                    for step_pair in step_pairs:\n                        obs_step = obs_tracks[step_pair[0]].timesteps[step_pair[1]].ravel()\n                        obs_mask = obs_tracks[step_pair[0]].masks[step_pair[1]].ravel()\n                        all_hail_sizes.append(obs_step[(obs_mask == 1) & (obs_step >= self.mrms_ew.min_thresh)])\n                    combined_hail_sizes = np.concatenate(all_hail_sizes)\n                    min_hail = combined_hail_sizes.min() - 0.1\n                    model_track.observations.loc[time, \"Max_Hail_Size\"] = combined_hail_sizes.max()\n                    model_track.observations.loc[time, \"Num_Matches\"] = step_pairs.shape[0]\n                    model_track.observations.loc[time, [\"Shape\", \"Location\", \"Scale\"]] = gamma.fit(combined_hail_sizes,\n                                                                                                   floc=min_hail)\n                s += 1", "docstring": "Given a matching set of observed tracks for each model track,\n\nArgs:\nmodel_tracks:\nobs_tracks:\ntrack_pairings:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def transpose(self):\n    graph = self.graph\n    transposed = DAG()\n    for (node, edges) in graph.items():\n        transposed.add_node(node)\n    for (node, edges) in graph.items():\n        for edge in edges:\n            transposed.add_edge(edge, node)\n    return transposed", "docstring": "Builds a new graph with the edges reversed.\n\nReturns:\n:class:`stacker.dag.DAG`: The transposed graph.", "source": "codesearchnet"}
{"code": "def get_memberships(self):\n    response = self._get_xml((self.rest_url + '/group/membership'))\n    if (not response.ok):\n        return None\n    xmltree = etree.fromstring(response.content)\n    memberships = {}\n    for mg in xmltree.findall('membership'):\n        group = u'{}'.format(mg.get('group'))\n        users = [u'{}'.format(u.get('name')) for u in mg.find('users').findall('user')]\n        groups = [u'{}'.format(g.get('name')) for g in mg.find('groups').findall('group')]\n        memberships[group] = {u'users': users, u'groups': groups}\n    return memberships", "docstring": "Fetches all group memberships.\n\nReturns:\ndict:\nkey: group name\nvalue: (array of users, array of groups)", "source": "codesearchnet"}
{"code": "def distance_and_image_from_frac_coords(self, fcoords, jimage=None):\n    return self.lattice.get_distance_and_image(self.frac_coords, fcoords, jimage=jimage)", "docstring": "Gets distance between site and a fractional coordinate assuming\nperiodic boundary conditions. If the index jimage of two sites atom j\nis not specified it selects the j image nearest to the i atom and\nreturns the distance and jimage indices in terms of lattice vector\ntranslations. If the index jimage of atom j is specified it returns the\ndistance between the i atom and the specified jimage atom, the given\njimage is also returned.\n\nArgs:\nfcoords (3x1 array): fcoords to get distance from.\njimage (3x1 array): Specific periodic image in terms of\nlattice translations, e.g., [1,0,0] implies to take periodic\nimage that is one a-lattice vector away. If jimage is None,\nthe image that is nearest to the site is found.\n\nReturns:\n(distance, jimage): distance and periodic lattice translations\nof the other site for which the distance applies.", "source": "codesearchnet"}
{"code": "def push(self, targets, jobs=None, remote=None, show_checksums=False):\n        \n        return self.repo.cache.local.push(\n            targets,\n            jobs=jobs,\n            remote=self._get_cloud(remote, \"push\"),\n            show_checksums=show_checksums,\n        )", "docstring": "Push data items in a cloud-agnostic way.\n\nArgs:\ntargets (list): list of targets to push to the cloud.\njobs (int): number of jobs that can be running simultaneously.\nremote (dvc.remote.base.RemoteBase): optional remote to push to.\nBy default remote from core.remote config option is used.\nshow_checksums (bool): show checksums instead of file names in\ninformation messages.", "source": "juraj-google-style"}
{"code": "def regression_signature_def(examples, predictions):\n    if examples is None:\n        raise ValueError('Regression `examples` cannot be None.')\n    if not isinstance(examples, tensor_lib.Tensor):\n        raise ValueError(f'Expected regression `examples` to be of type Tensor. Found `examples` of type {type(examples)}.')\n    if predictions is None:\n        raise ValueError('Regression `predictions` cannot be None.')\n    input_tensor_info = utils.build_tensor_info(examples)\n    if input_tensor_info.dtype != types_pb2.DT_STRING:\n        raise ValueError(f'Regression input tensors must be of type string. Found tensors with type {input_tensor_info.dtype}.')\n    signature_inputs = {signature_constants.REGRESS_INPUTS: input_tensor_info}\n    output_tensor_info = utils.build_tensor_info(predictions)\n    if output_tensor_info.dtype != types_pb2.DT_FLOAT:\n        raise ValueError(f'Regression output tensors must be of type float. Found tensors with type {output_tensor_info.dtype}.')\n    signature_outputs = {signature_constants.REGRESS_OUTPUTS: output_tensor_info}\n    signature_def = build_signature_def(signature_inputs, signature_outputs, signature_constants.REGRESS_METHOD_NAME)\n    return signature_def", "docstring": "Creates regression signature from given examples and predictions.\n\nThis function produces signatures intended for use with the TensorFlow Serving\nRegress API (tensorflow_serving/apis/prediction_service.proto), and so\nconstrains the input and output types to those allowed by TensorFlow Serving.\n\nArgs:\nexamples: A string `Tensor`, expected to accept serialized tf.Examples.\npredictions: A float `Tensor`.\n\nReturns:\nA regression-flavored signature_def.\n\nRaises:\nValueError: If examples is `None`.", "source": "github-repos"}
{"code": "def compute_one_decoding_video_metrics(iterator, feed_dict, num_videos):\n  \n  output, target = iterator.get_next()\n  metrics = psnr_and_ssim(output, target)\n\n  with tf.Session() as sess:\n    sess.run(tf.local_variables_initializer())\n    initalizer = iterator._initializer  \n    if initalizer is not None:\n      sess.run(initalizer, feed_dict=feed_dict)\n\n    all_psnr, all_ssim = [], []\n    for i in range(num_videos):\n      print(\"Computing video: %d\" % i)\n      psnr_np, ssim_np = sess.run(metrics)\n      all_psnr.append(psnr_np)\n      all_ssim.append(ssim_np)\n    all_psnr = np.array(all_psnr)\n    all_ssim = np.array(all_ssim)\n    return all_psnr, all_ssim", "docstring": "Computes the average of all the metric for one decoding.\n\nArgs:\niterator: dataset iterator.\nfeed_dict: feed dict to initialize iterator.\nnum_videos: number of videos.\n\nReturns:\nall_psnr: 2-D Numpy array, shape=(num_samples, num_frames)\nall_ssim: 2-D Numpy array, shape=(num_samples, num_frames)", "source": "juraj-google-style"}
{"code": "def compute_mel_filterbank_features(waveforms, sample_rate=16000, dither=(1.0 / np.iinfo(np.int16).max), preemphasis=0.97, frame_length=25, frame_step=10, fft_length=None, window_fn=functools.partial(tf.contrib.signal.hann_window, periodic=True), lower_edge_hertz=80.0, upper_edge_hertz=7600.0, num_mel_bins=80, log_noise_floor=0.001, apply_mask=True):\n    wav_lens = (tf.reduce_max((tf.expand_dims(tf.range(tf.shape(waveforms)[1]), 0) * tf.to_int32(tf.not_equal(waveforms, 0.0))), axis=(- 1)) + 1)\n    if (dither > 0):\n        waveforms += tf.random_normal(tf.shape(waveforms), stddev=dither)\n    if (preemphasis > 0):\n        waveforms = (waveforms[(:, 1:)] - (preemphasis * waveforms[(:, :(- 1))]))\n        wav_lens -= 1\n    frame_length = int(((frame_length * sample_rate) / 1000.0))\n    frame_step = int(((frame_step * sample_rate) / 1000.0))\n    if (fft_length is None):\n        fft_length = int((2 ** np.ceil(np.log2(frame_length))))\n    stfts = tf.contrib.signal.stft(waveforms, frame_length=frame_length, frame_step=frame_step, fft_length=fft_length, window_fn=window_fn, pad_end=True)\n    stft_lens = ((wav_lens + (frame_step - 1)) \n    masks = tf.to_float(tf.less_equal(tf.expand_dims(tf.range(tf.shape(stfts)[1]), 0), tf.expand_dims(stft_lens, 1)))\n    magnitude_spectrograms = tf.abs(stfts)\n    num_spectrogram_bins = magnitude_spectrograms.shape[(- 1)].value\n    linear_to_mel_weight_matrix = tf.contrib.signal.linear_to_mel_weight_matrix(num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz, upper_edge_hertz)\n    mel_spectrograms = tf.tensordot(magnitude_spectrograms, linear_to_mel_weight_matrix, 1)\n    mel_spectrograms.set_shape(magnitude_spectrograms.shape[:(- 1)].concatenate(linear_to_mel_weight_matrix.shape[(- 1):]))\n    log_mel_sgram = tf.log(tf.maximum(log_noise_floor, mel_spectrograms))\n    if apply_mask:\n        log_mel_sgram *= tf.expand_dims(tf.to_float(masks), (- 1))\n    return tf.expand_dims(log_mel_sgram, (- 1), name='mel_sgrams')", "docstring": "Implement mel-filterbank extraction using tf ops.\n\nArgs:\nwaveforms: float32 tensor with shape [batch_size, max_len]\nsample_rate: sampling rate of the waveform\ndither: stddev of Gaussian noise added to waveform to prevent quantization\nartefacts\npreemphasis: waveform high-pass filtering constant\nframe_length: frame length in ms\nframe_step: frame_Step in ms\nfft_length: number of fft bins\nwindow_fn: windowing function\nlower_edge_hertz: lowest frequency of the filterbank\nupper_edge_hertz: highest frequency of the filterbank\nnum_mel_bins: filterbank size\nlog_noise_floor: clip small values to prevent numeric overflow in log\napply_mask: When working on a batch of samples, set padding frames to zero\nReturns:\nfilterbanks: a float32 tensor with shape [batch_size, len, num_bins, 1]", "source": "codesearchnet"}
{"code": "def get_loss_reduction():\n    if not distribute_lib.get_strategy()._scale_loss_for_estimator:\n        return ReduceOp.SUM\n    last_reduction = ops.get_default_graph()._last_loss_reduction\n    if last_reduction == losses_impl.Reduction.SUM or last_reduction == 'sum':\n        return ReduceOp.SUM\n    return ReduceOp.MEAN", "docstring": "`tf.distribute.ReduceOp` corresponding to the last loss reduction.\n\nReturns:\n`tf.distribute.ReduceOp` corresponding to the last loss reduction for\nestimator and v1 optimizer use case. `tf.distribute.ReduceOp.SUM` otherwise.", "source": "github-repos"}
{"code": "def call(command, collect_missing=False, silent=True):\n    return (_execCommand if silent else execCommand)(shlex.split(command), collect_missing)", "docstring": "r\"\"\"Calls a task, as if it were called from the command line.\n\nArgs:\ncommand (str): A route followed by params (as if it were entered in the shell).\ncollect_missing (bool): Collects any missing argument for the command through the shell. Defaults to False.\n\nReturns:\nThe return value of the called command.", "source": "codesearchnet"}
{"code": "def parse_numpy_printoption(kv_str):\n    k_v_str = kv_str.split('=', 1)\n    if len(k_v_str) != 2 or not k_v_str[0]:\n        raise argparse.ArgumentTypeError(\"'%s' is not in the form k=v.\" % kv_str)\n    k, v_str = k_v_str\n    printoptions = np.get_printoptions()\n    if k not in printoptions:\n        raise argparse.ArgumentTypeError(\"'%s' is not a valid printoption.\" % k)\n    v_type = type(printoptions[k])\n    if v_type is type(None):\n        raise argparse.ArgumentTypeError(\"Setting '%s' from the command line is not supported.\" % k)\n    try:\n        v = v_type(v_str) if v_type is not bool else flags.BooleanParser().parse(v_str)\n    except ValueError as e:\n        raise argparse.ArgumentTypeError(e.message)\n    np.set_printoptions(**{k: v})", "docstring": "Sets a single numpy printoption from a string of the form 'x=y'.\n\nSee documentation on numpy.set_printoptions() for details about what values\nx and y can take. x can be any option listed there other than 'formatter'.\n\nArgs:\nkv_str: A string of the form 'x=y', such as 'threshold=100000'\n\nRaises:\nargparse.ArgumentTypeError: If the string couldn't be used to set any\nnumpy printoption.", "source": "github-repos"}
{"code": "def execute_code_block(elem, doc):\n    command = select_executor(elem, doc).split(' ')\n    code = elem.text\n    if (('plt' in elem.attributes) or ('plt' in elem.classes)):\n        code = save_plot(code, elem)\n    command.append(code)\n    if ('args' in elem.attributes):\n        for arg in elem.attributes['args'].split():\n            command.append(arg)\n    cwd = (elem.attributes['wd'] if ('wd' in elem.attributes) else None)\n    return subprocess.run(command, encoding='utf8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd).stdout", "docstring": "Executes a code block by passing it to the executor.\n\nArgs:\nelem The AST element.\ndoc  The document.\n\nReturns:\nThe output of the command.", "source": "codesearchnet"}
{"code": "async def do_upload(context, files):\n    status = 0\n    try:\n        (await upload_artifacts(context, files))\n    except ScriptWorkerException as e:\n        status = worst_level(status, e.exit_code)\n        log.error('Hit ScriptWorkerException: {}'.format(e))\n    except aiohttp.ClientError as e:\n        status = worst_level(status, STATUSES['intermittent-task'])\n        log.error('Hit aiohttp error: {}'.format(e))\n    except Exception as e:\n        log.exception('SCRIPTWORKER_UNEXPECTED_EXCEPTION upload {}'.format(e))\n        raise\n    return status", "docstring": "Upload artifacts and return status.\n\nReturns the integer status of the upload.\n\nargs:\ncontext (scriptworker.context.Context): the scriptworker context.\nfiles (list of str): list of files to be uploaded as artifacts\n\nRaises:\nException: on unexpected exception.\n\nReturns:\nint: exit status", "source": "codesearchnet"}
{"code": "def new(cls, access_token, environment='prod'):\n    api_client = ApiClient.new(access_token, environment)\n    return cls(api_client)", "docstring": "Create new storage service client.\n\nArguments:\nenvironment(str): The service environment to be used for the client.\n'prod' or 'dev'.\naccess_token(str): The access token used to authenticate with the\nservice\n\nReturns:\nA storage_service.Client instance", "source": "codesearchnet"}
{"code": "def set_status(self, status):\n    text = ''\n    colour = '\n    if (status == 0):\n        text = 'OFFLINE'\n        colour = '\n    elif (status == 1):\n        text = 'STARTING'\n        colour = '\n    elif (status == 2):\n        text = 'ONLINE'\n        colour = '\n    self.status.set(text)\n    self.statusbar.config(background=colour)", "docstring": "Updates the status text\n\nArgs:\nstatus (int): The offline/starting/online status of Modis\n0: offline, 1: starting, 2: online", "source": "codesearchnet"}
{"code": "def scroll(self, direction='vertical', percent=0.6, duration=2.0):\n    if (direction not in ('vertical', 'horizontal')):\n        raise ValueError('Argument `direction` should be one of \"vertical\" or \"horizontal\". Got {}'.format(repr(direction)))\n    focus1 = (self._focus or [0.5, 0.5])\n    focus2 = list(focus1)\n    half_distance = (percent / 2)\n    if (direction == 'vertical'):\n        focus1[1] += half_distance\n        focus2[1] -= half_distance\n    else:\n        focus1[0] += half_distance\n        focus2[0] -= half_distance\n    return self.focus(focus1).drag_to(self.focus(focus2), duration=duration)", "docstring": "Simply touch down from point A and move to point B then release up finally. This action is performed within\nspecific motion range and duration.\n\nArgs:\ndirection (:py:obj:`str`): scrolling direction. \"vertical\" or \"horizontal\"\npercent (:py:obj:`float`): scrolling distance percentage of selected UI height or width according to\ndirection\nduration (:py:obj:`float`): time interval in which the action is performed\n\nRaises:\nPocoNoSuchNodeException: raised when the UI element does not exist", "source": "codesearchnet"}
{"code": "def _unicode_def_src_to_str(srclist: List[Union[str, int]]) -> str:\n    \n    charlist = []  \n    for src in srclist:\n        if isinstance(src, int):\n            charlist.append(chr(src))\n        else:\n            \n            first, last = [int(x, 16) for x in src.split(\"-\")]\n            charlist += [chr(x) for x in range(first, last + 1)]\n    return \"\".join(charlist)", "docstring": "Used to create :data:`UNICODE_CATEGORY_STRINGS`.\n\nArgs:\nsrclist: list of integers or hex range strings like ``\"0061-007A\"``\n\nReturns:\na string with all characters described by ``srclist``: either the\ncharacter corresponding to the integer Unicode character number, or\nall characters corresponding to the inclusive range described", "source": "juraj-google-style"}
{"code": "def bytestring_to_tar_tuple(filename, bytes):\n    \n    info = tarfile.TarInfo(filename)\n    info.size = len(bytes)\n    return info, BytesIO(bytes)", "docstring": "Take a string + filename, return a (tarinfo, stringbuf) tuple for insertion.\n\nArgs:\nbytes (bstring): Bytestring representation of the filedata.\nfilename (string): Filepath relative to tarfile root.\nReturns:\ntuple: (tarfile.TarInfo,io.BytesIO).\nThis can be passed directly to TarFile.addfile().", "source": "juraj-google-style"}
{"code": "def tar_add_bytes(tf, filename, bytestring):\n    if (not isinstance(bytestring, bytes)):\n        bytestring = bytestring.encode('ascii')\n    buff = io.BytesIO(bytestring)\n    tarinfo = tarfile.TarInfo(filename)\n    tarinfo.size = len(bytestring)\n    tf.addfile(tarinfo, buff)", "docstring": "Add a file to a tar archive\n\nArgs:\ntf (tarfile.TarFile): tarfile to add the file to\nfilename (str): path within the tar file\nbytestring (bytes or str): file contents. Must be :class:`bytes` or\nascii-encodable :class:`str`", "source": "codesearchnet"}
{"code": "def _lookup_in_all_namespaces(self, symbol):\n        \n        namespace = self.namespaces\n        \n        namespace_stack = []\n        for current in symbol.namespace_stack:\n            namespace = namespace.get(current)\n            if namespace is None or not isinstance(namespace, dict):\n                break\n            namespace_stack.append(namespace)\n\n        \n        \n        for namespace in reversed(namespace_stack):\n            try:\n                return self._lookup_namespace(symbol, namespace)\n            except Error:\n                pass\n        return None", "docstring": "Helper for lookup_symbol that looks for symbols in all namespaces.\n\nArgs:\nsymbol: Symbol", "source": "juraj-google-style"}
{"code": "def CheckSupportedFormat(cls, path, check_readable_only=False):\n    \n    try:\n      connection = sqlite3.connect(\n          path, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)\n\n      cursor = connection.cursor()\n\n      query = 'SELECT * FROM metadata'\n      cursor.execute(query)\n\n      metadata_values = {row[0]: row[1] for row in cursor.fetchall()}\n\n      cls._CheckStorageMetadata(\n          metadata_values, check_readable_only=check_readable_only)\n\n      connection.close()\n      result = True\n\n    except (IOError, sqlite3.DatabaseError):\n      result = False\n\n    return result", "docstring": "Checks if the storage file format is supported.\n\nArgs:\npath (str): path to the storage file.\ncheck_readable_only (Optional[bool]): whether the store should only be\nchecked to see if it can be read. If False, the store will be checked\nto see if it can be read and written to.\n\nReturns:\nbool: True if the format is supported.", "source": "juraj-google-style"}
{"code": "def _GetStructureValue(self, structure, key):\n    value = structure.get(key)\n    return (value if (not isinstance(value, pyparsing.ParseResults)) else None)", "docstring": "Retrieves a value from a parsed log line, removing empty results.\n\nArgs:\nstructure (pyparsing.ParseResults): parsed log line.\nkey (str): results key to retrieve from the parsed log line.\n\nReturns:\ntype or None: the value of the named key in the parsed log line, or None\nif the value is a ParseResults object.", "source": "codesearchnet"}
{"code": "def objects(self, prefix=None, delimiter=None):\n    return _object.Objects(self._name, prefix, delimiter, context=self._context)", "docstring": "Get an iterator for the objects within this bucket.\n\nArgs:\nprefix: an optional prefix to match objects.\ndelimiter: an optional string to simulate directory-like semantics. The returned objects\nwill be those whose names do not contain the delimiter after the prefix. For\nthe remaining objects, the names will be returned truncated after the delimiter\nwith duplicates removed (i.e. as pseudo-directories).\nReturns:\nAn iterable list of objects within this bucket.", "source": "codesearchnet"}
{"code": "def _check(self, check, radl):\n        \n\n        \n        \n        if check[0] == float:\n            if not isinstance(self.value, int) and not isinstance(self.value, float):\n                raise RADLParseException(\"Invalid type; expected %s\" % check[0],\n                                         line=self.line)\n        elif check[0] == str:\n            if not isinstance(self.value, str) and not isinstance(self.value, unicode):\n                raise RADLParseException(\"Invalid type; expected %s\" % check[0],\n                                         line=self.line)\n        else:\n            if not isinstance(self.value, check[0]):\n                raise RADLParseException(\"Invalid type; expected %s\" % check[0],\n                                         line=self.line)\n        \n        if (isinstance(self.value, str) or isinstance(self.value, unicode)) and self.prop.find('version') == -1:\n            if self.operator != \"=\":\n                raise RADLParseException(\"Invalid operator; expected '='\",\n                                         line=self.line)\n        elif isinstance(self.value, int) or isinstance(self.value, float) or self.prop.find('version') >= 0:\n            if self.operator not in [\"=\", \"<=\", \">=\", \">\", \"<\"]:\n                raise RADLParseException(\"Invalid operator; expected '=', '<=', \" +\n                                         \"'>=', '>' or '<'\", line=self.line)\n        elif isinstance(self.value, Features):\n            if self.operator != \"contains\":\n                raise RADLParseException(\n                    \"Invalid operator; expected 'contains'\", line=self.line)\n        \n        if isinstance(check[1], list):\n            if self.value.upper() not in check[1]:\n                raise RADLParseException(\"Invalid value; expected one of %s\" % check[1],\n                                         line=self.line)\n        elif callable(check[1]):\n            if not check[1](self, radl):\n                raise RADLParseException(\"Invalid value in property '%s'\" % self.prop, line=self.line)\n        \n        if len(check) < 3 or check[2] is None:\n            if self.unit:\n                raise RADLParseException(\"Invalid unit; expected none\", line=self.line)\n        elif len(check) > 2 and check[2]:\n            if self.unit.upper() not in check[2]:\n                raise RADLParseException(\n                    \"Invalid unit; expected one of %s\" % check[2], line=self.line)\n        return True", "docstring": "Check type, operator and unit in a feature.\n\nArgs:\n- check(tuple):\n- v[0]: expected type of the feature value.\n- v[1]: can be a list of possible values or a function to test the value or None.\n- v[2] (optional): can be a list of possible units; if None or not set the\nunit valid is none.\n- radl: second argument passed when calling v[1].", "source": "juraj-google-style"}
{"code": "def setup_test_logger(log_path, prefix=None, filename=None):\n    \n    utils.create_dir(log_path)\n    _setup_test_logger(log_path, prefix)\n    logging.info('Test output folder: \"%s\"', log_path)\n    create_latest_log_alias(log_path)", "docstring": "Customizes the root logger for a test run.\n\nArgs:\nlog_path: Location of the report file.\nprefix: A prefix for each log line in terminal.\nfilename: Name of the files. The default is the time the objects\nare requested.", "source": "juraj-google-style"}
{"code": "def _CreateDictReader(self, line_reader):\n    \n    delimiter = self.DELIMITER\n    quotechar = self.QUOTE_CHAR\n    magic_test_string = self._MAGIC_TEST_STRING\n    \n    if py2to3.PY_3:\n      delimiter = delimiter.decode(self._encoding)\n      quotechar = quotechar.decode(self._encoding)\n      magic_test_string = magic_test_string.decode(self._encoding)\n\n    return csv.DictReader(\n        line_reader, delimiter=delimiter, fieldnames=self.COLUMNS,\n        quotechar=quotechar, restkey=magic_test_string,\n        restval=magic_test_string)", "docstring": "Returns a reader that processes each row and yields dictionaries.\n\ncsv.DictReader does this job well for single-character delimiters; parsers\nthat need multi-character delimiters need to override this method.\n\nArgs:\nline_reader (iter): yields lines from a file-like object.\n\nReturns:\niter: a reader of dictionaries, as returned by csv.DictReader().", "source": "juraj-google-style"}
{"code": "def run_example(example_cls: Example, args=None):\n    \n    values = parse_args(args)\n    window_cls = get_window_cls(values.window)\n\n    window = window_cls(\n        title=example_cls.title,\n        size=example_cls.window_size,\n        fullscreen=values.fullscreen,\n        resizable=example_cls.resizable,\n        gl_version=example_cls.gl_version,\n        aspect_ratio=example_cls.aspect_ratio,\n        vsync=values.vsync,\n        samples=values.samples,\n        cursor=values.cursor,\n    )\n\n    window.example = example_cls(ctx=window.ctx, wnd=window)\n\n    start_time = time.time()\n    current_time = start_time\n    prev_time = start_time\n    frame_time = 0\n\n    while not window.is_closing:\n        current_time, prev_time = time.time(), current_time\n        frame_time = max(current_time - prev_time, 1 / 1000)\n\n        window.render(current_time - start_time, frame_time)\n        window.swap_buffers()\n\n    duration = time.time() - start_time\n    window.destroy()\n    print(\"Duration: {0:.2f}s @ {1:.2f} FPS\".format(duration, window.frames / duration))", "docstring": "Run an example entering a blocking main loop\n\nArgs:\nexample_cls: The exmaple class to render\nargs: Override sys.args", "source": "juraj-google-style"}
{"code": "def _FormatSubjectOrProcessToken(self, token_data):\n    \n    ip_address = self._FormatPackedIPv4Address(token_data.ip_address)\n    return {\n        'aid': token_data.audit_user_identifier,\n        'euid': token_data.effective_user_identifier,\n        'egid': token_data.effective_group_identifier,\n        'uid': token_data.real_user_identifier,\n        'gid': token_data.real_group_identifier,\n        'pid': token_data.process_identifier,\n        'session_id': token_data.session_identifier,\n        'terminal_port': token_data.terminal_port,\n        'terminal_ip': ip_address}", "docstring": "Formats a subject or process token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_subject32|bsm_token_data_subject64):\nAUT_SUBJECT32, AUT_PROCESS32, AUT_SUBJECT64 or AUT_PROCESS64 token\ndata.\n\nReturns:\ndict[str, str]: token values.", "source": "juraj-google-style"}
{"code": "def add_residues_highlight_to_nglview(view, structure_resnums, chain, res_color='red'):\n    \n    chain = ssbio.utils.force_list(chain)\n\n    if isinstance(structure_resnums, list):\n        structure_resnums = list(set(structure_resnums))\n    elif isinstance(structure_resnums, int):\n        structure_resnums = ssbio.utils.force_list(structure_resnums)\n    else:\n        raise ValueError('Input must either be a residue number of a list of residue numbers')\n\n    to_show_chains = '( '\n    for c in chain:\n        to_show_chains += ':{} or'.format(c)\n    to_show_chains = to_show_chains.strip(' or ')\n    to_show_chains += ' )'\n\n    to_show_res = '( '\n    for m in structure_resnums:\n        to_show_res += '{} or '.format(m)\n    to_show_res = to_show_res.strip(' or ')\n    to_show_res += ' )'\n\n    log.info('Selection: {} and not hydrogen and {}'.format(to_show_chains, to_show_res))\n\n    view.add_ball_and_stick(selection='{} and not hydrogen and {}'.format(to_show_chains, to_show_res), color=res_color)", "docstring": "Add a residue number or numbers to an NGLWidget view object.\n\nArgs:\nview (NGLWidget): NGLWidget view object\nstructure_resnums (int, list): Residue number(s) to highlight, structure numbering\nchain (str, list): Chain ID or IDs of which residues are a part of. If not provided, all chains in the\nmapped_chains attribute will be used. If that is also empty, and exception is raised.\nres_color (str): Color to highlight residues with", "source": "juraj-google-style"}
{"code": "def GetEntries(self, parser_mediator, match=None, **unused_kwargs):\n    \n    devices = match.get('Devices', {})\n    for device_identifier, device_information in iter(devices.items()):\n      datetime_value = device_information.get('Connected', None)\n      if not datetime_value:\n        continue\n\n      event_data = IPodPlistEventData()\n      event_data.device_id = device_identifier\n\n      \n      for key, value in iter(device_information.items()):\n        if key == 'Connected':\n          continue\n        attribute_name = key.lower().replace(' ', '_')\n        setattr(event_data, attribute_name, value)\n\n      event = time_events.PythonDatetimeEvent(\n          datetime_value, definitions.TIME_DESCRIPTION_LAST_CONNECTED)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extract device information from the iPod plist.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmatch (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.", "source": "juraj-google-style"}
{"code": "def extract_random_video_patch(videos, num_frames=-1):\n  \n  if num_frames == -1:\n    return videos\n  batch_size, num_total_frames, h, w, c = common_layers.shape_list(videos)\n  if num_total_frames < num_frames:\n    raise ValueError(\"Expected num_frames <= %d, got %d\" %\n                     (num_total_frames, num_frames))\n\n  \n  frame_start = tf.random_uniform(\n      shape=(batch_size,), minval=0, maxval=num_total_frames - num_frames + 1,\n      dtype=tf.int32)\n\n  \n  \n  range_inds = tf.expand_dims(tf.range(num_frames), axis=0)\n  frame_inds = range_inds + tf.expand_dims(frame_start, axis=1)\n  frame_inds = tf.reshape(frame_inds, [-1])\n\n  \n  batch_inds = tf.expand_dims(tf.range(batch_size), axis=1)\n  batch_inds = tf.tile(batch_inds, [1, num_frames])\n  batch_inds = tf.reshape(batch_inds, [-1])\n\n  gather_inds = tf.stack((batch_inds, frame_inds), axis=1)\n  video_patches = tf.gather_nd(videos, gather_inds)\n  return tf.reshape(video_patches, (batch_size, num_frames, h, w, c))", "docstring": "For every video, extract a random consecutive patch of num_frames.\n\nArgs:\nvideos: 5-D Tensor, (NTHWC)\nnum_frames: Integer, if -1 then the entire video is returned.\nReturns:\nvideo_patch: 5-D Tensor, (NTHWC) with T = num_frames.\nRaises:\nValueError: If num_frames is greater than the number of total frames in\nthe video.", "source": "juraj-google-style"}
{"code": "def analogy_rank_score(analogies, word_vectors, no_threads=1):\n    input_vectors = ((word_vectors[analogies[(:, 1)]] + word_vectors[analogies[(:, 2)]]) - word_vectors[analogies[(:, 0)]])\n    word_vector_norms = np.linalg.norm(word_vectors, axis=1)\n    rank_violations = np.zeros(input_vectors.shape[0], dtype=np.int32)\n    compute_rank_violations(word_vectors, word_vector_norms, input_vectors, analogies[(:, 3)], analogies, rank_violations, no_threads)\n    return (rank_violations / float(word_vectors.shape[0]))", "docstring": "Calculate the analogy rank score for the given set of analogies.\n\nA rank of zero denotes a perfect score; with random word vectors\nwe would expect a rank of 0.5.\n\nArguments:\n- analogies: a numpy array holding the ids of the words in the analogy tasks,\nas constructed by `construct_analogy_test_set`.\n- word_vectors: numpy array holding the word vectors to use.\n- num_threads: number of parallel threads to use in the calculation.\n\nReturns:\n- ranks: a numpy array holding the normalized rank of the target word\nin each analogy task. Rank 0 means that the target words was\nreturned first; rank 1 means it was returned last.", "source": "codesearchnet"}
{"code": "def find(pcoll, regex, group=0):\n    regex = Regex._regex_compile(regex)\n\n    def _process(element):\n        r = regex.search(element)\n        if r:\n            yield r.group(group)\n    return pcoll | FlatMap(_process)", "docstring": "Returns the matches if a portion of the line matches the Regex. Returns\nthe entire group (group 0 by default). Group can be integer value or a\nstring value.\n\nArgs:\nregex: the regular expression string or (re.compile) pattern.\ngroup: (optional) name of the group, it can be integer or a string value.", "source": "github-repos"}
{"code": "def get_drives(self, id_or_uri):\n    uri = (self._client.build_uri(id_or_uri=id_or_uri) + self.DRIVES_PATH)\n    return self._client.get(id_or_uri=uri)", "docstring": "Gets the list of drives allocated to this SAS logical JBOD.\n\nArgs:\nid_or_uri: Can be either the SAS logical JBOD ID or the SAS logical JBOD URI.\n\nReturns:\nlist: A list of Drives", "source": "codesearchnet"}
{"code": "def list_street_poi_parking(self, **kwargs):\n        \n        \n        url_args = {\n            'language': util.language_code(kwargs.get('lang')),\n            'address': kwargs.get('address', '')\n        }\n\n        \n        result = self.make_request('list_street_poi_parking', url_args)\n\n        if not util.check_result(result):\n            return False, result.get('message', 'UNKNOWN ERROR')\n\n        \n        values = util.response_list(result, 'Data')\n        return True, [emtype.ParkingPoi(**a) for a in values]", "docstring": "Obtain a list of addresses and POIs.\n\nThis endpoint uses an address to perform the search\n\nArgs:\nlang (str): Language code (*es* or *en*).\naddress (str): Address in which to perform the search.\n\nReturns:\nStatus boolean and parsed response (list[ParkingPoi]), or message\nstring in case of error.", "source": "juraj-google-style"}
{"code": "def save_json(obj, filename, **kwargs):\n    \n\n    with open(filename, 'w', encoding='utf-8') as f:\n        json.dump(obj, f, **kwargs)", "docstring": "Save an object as a JSON file.\n\nArgs:\nobj: The object to save. Must be JSON-serializable.\nfilename: Path to the output file.\n**kwargs: Additional arguments to `json.dump`.", "source": "juraj-google-style"}
{"code": "def __init__(self, graph=None, op_log=None):\n    if not graph and (not context.executing_eagerly()):\n        graph = ops.get_default_graph()\n    self._coverage = 0.0\n    self._graph = graph\n    op_log = tfprof_logger.merge_default_with_oplog(self._graph, op_log=op_log)\n    print_mdl.NewProfiler(_graph_string(self._graph), op_log.SerializeToString())", "docstring": "Constructor.\n\nArgs:\ngraph: tf.Graph. If None and eager execution is not enabled, use default\ngraph.\nop_log: optional. tensorflow::tfprof::OpLogProto proto. Used to define\nextra op types.", "source": "github-repos"}
{"code": "def __init__(self,\n                 domain_mapper,\n                 mode='classification',\n                 class_names=None,\n                 random_state=None):\n        \n        self.random_state = random_state\n        self.mode = mode\n        self.domain_mapper = domain_mapper\n        self.local_exp = {}\n        self.intercept = {}\n        self.score = None\n        self.local_pred = None\n        self.scaled_data = None\n        if mode == 'classification':\n            self.class_names = class_names\n            self.top_labels = None\n            self.predict_proba = None\n        elif mode == 'regression':\n            self.class_names = ['negative', 'positive']\n            self.predicted_value = None\n            self.min_value = 0.0\n            self.max_value = 1.0\n            self.dummy_label = 1\n        else:\n            raise LimeError('Invalid explanation mode \"{}\". '\n                            'Should be either \"classification\" '\n                            'or \"regression\".'.format(mode))", "docstring": "Initializer.\n\nArgs:\ndomain_mapper: must inherit from DomainMapper class\ntype: \"classification\" or \"regression\"\nclass_names: list of class names (only used for classification)\nrandom_state: an integer or numpy.RandomState that will be used to\ngenerate random numbers. If None, the random state will be\ninitialized using the internal numpy seed.", "source": "juraj-google-style"}
{"code": "def load(self, train=True, test=True, shuffle=True) -> tuple:\n        \n        return self.__load(self.__load_files, train, test, shuffle=shuffle)", "docstring": "Load the vectorized representations of the stored data files\nArgs:\ntrain: Whether to load train data\ntest: Whether to load test data", "source": "juraj-google-style"}
{"code": "def get_transcript_credentials_state_for_org(org, provider=None):\n    \n    query_filter = {'org': org}\n    if provider:\n        query_filter['provider'] = provider\n\n    return {\n        credential.provider: credential.exists\n        for credential in ThirdPartyTranscriptCredentialsState.objects.filter(**query_filter)\n    }", "docstring": "Returns transcript credentials state for an org\n\nArguments:\norg (unicode): course organization\nprovider (unicode): transcript provider\n\nReturns:\ndict: provider name and their credential existance map\n\n{\nu'Cielo24': True\n}\n{\nu'3PlayMedia': False,\nu'Cielo24': True\n}", "source": "juraj-google-style"}
{"code": "def _init_from_bool(self, z, x):\n        \n        if z is None:\n            raise QiskitError(\"z vector must not be None.\")\n        if x is None:\n            raise QiskitError(\"x vector must not be None.\")\n        if len(z) != len(x):\n            raise QiskitError(\"length of z and x vectors must be \"\n                              \"the same. (z: {} vs x: {})\".format(len(z), len(x)))\n\n        z = _make_np_bool(z)\n        x = _make_np_bool(x)\n        self._z = z\n        self._x = x\n\n        return self", "docstring": "Construct pauli from boolean array.\n\nArgs:\nz (numpy.ndarray): boolean, z vector\nx (numpy.ndarray): boolean, x vector\n\nReturns:\nPauli: self\n\nRaises:\nQiskitError: if z or x are None or the length of z and x are different.", "source": "juraj-google-style"}
{"code": "def set_smartplug_state(self, device_label, state):\n    response = None\n    try:\n        response = requests.post(urls.smartplug(self._giid), headers={'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps([{'deviceLabel': device_label, 'state': state}]))\n    except requests.exceptions.RequestException as ex:\n        raise RequestError(ex)\n    _validate_response(response)", "docstring": "Turn on or off smartplug\n\nArgs:\ndevice_label (str): Smartplug device label\nstate (boolean): new status, 'True' or 'False'", "source": "codesearchnet"}
{"code": "def lookup_zone_exception(self, callsign, timestamp=datetime.utcnow().replace(tzinfo=UTC)):\n    callsign = callsign.strip().upper()\n    if (self._lookuptype == 'clublogxml'):\n        return self._check_zone_exception_for_date(callsign, timestamp, self._zone_exceptions, self._zone_exceptions_index)\n    elif (self._lookuptype == 'redis'):\n        (data_dict, index) = self._get_dicts_from_redis('_zone_ex_', '_zone_ex_index_', self._redis_prefix, callsign)\n        return self._check_zone_exception_for_date(callsign, timestamp, data_dict, index)\n    raise KeyError", "docstring": "Returns a CQ Zone if an exception exists for the given callsign\n\nArgs:\ncallsign (string): Amateur radio callsign\ntimestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)\n\nReturns:\nint: Value of the the CQ Zone exception which exists for this callsign (at the given time)\n\nRaises:\nKeyError: No matching callsign found\nAPIKeyMissingError: API Key for Clublog missing or incorrect\n\nExample:\nThe following code checks the Clublog XML database if a CQ Zone exception exists for the callsign DP0GVN.\n\n>>> from pyhamtools import LookupLib\n>>> my_lookuplib = LookupLib(lookuptype=\"clublogxml\", apikey=\"myapikey\")\n>>> print my_lookuplib.lookup_zone_exception(\"DP0GVN\")\n38\n\nThe prefix \"DP\" It is assigned to Germany, but the station is located in Antarctica, and therefore\nin CQ Zone 38\n\nNote:\nThis method is available for\n\n- clublogxml\n- redis", "source": "codesearchnet"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    if token_ids_1 is None:\n        return [1] + [0] * len(token_ids_0) + [1]\n    return [1] + [0] * len(token_ids_0) + [1] + [1] + [0] * len(token_ids_1) + [1]", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def pairwise_iou(boxlist1, boxlist2):\n    \n    intersections = pairwise_intersection(boxlist1, boxlist2)\n    areas1 = area(boxlist1)\n    areas2 = area(boxlist2)\n    unions = (\n        tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)\n    return tf.where(\n        tf.equal(intersections, 0.0),\n        tf.zeros_like(intersections), tf.truediv(intersections, unions))", "docstring": "Computes pairwise intersection-over-union between box collections.\n\nArgs:\nboxlist1: Nx4 floatbox\nboxlist2: Mx4\n\nReturns:\na tensor with shape [N, M] representing pairwise iou scores.", "source": "juraj-google-style"}
{"code": "def read_from_directory(self, dataset_info_dir):\n    \n    if not dataset_info_dir:\n      raise ValueError(\n          \"Calling read_from_directory with undefined dataset_info_dir.\")\n\n    json_filename = self._dataset_info_filename(dataset_info_dir)\n\n    \n    parsed_proto = read_from_json(json_filename)\n\n    \n    self._set_splits(splits_lib.SplitDict.from_proto(parsed_proto.splits))\n\n    \n    if self.features:\n      self.features.load_metadata(dataset_info_dir)\n\n    \n    \n    \n    for field_name, field in self.as_proto.DESCRIPTOR.fields_by_name.items():\n      field_value = getattr(self._info_proto, field_name)\n      field_value_restored = getattr(parsed_proto, field_name)\n\n      try:\n        is_defined = self._info_proto.HasField(field_name)\n      except ValueError:\n        is_defined = bool(field_value)\n\n      try:\n        is_defined_in_restored = parsed_proto.HasField(field_name)\n      except ValueError:\n        is_defined_in_restored = bool(field_value_restored)\n\n      \n      if is_defined:\n        if field_value != field_value_restored:\n          logging.info(\n              \"Field info.%s from disk and from code do not match. Keeping \"\n              \"the one from code.\", field_name)\n        continue\n      \n      if not is_defined_in_restored:\n        continue\n      \n      if field.type == field.TYPE_MESSAGE:\n        field_value.MergeFrom(field_value_restored)\n      else:\n        setattr(self._info_proto, field_name, field_value_restored)\n\n    if self._builder._version != self.version:  \n      raise AssertionError(\n          \"The constructed DatasetInfo instance and the restored proto version \"\n          \"do not match. Builder version: {}. Proto version: {}\".format(\n              self._builder._version, self.version))  \n\n    \n    self._fully_initialized = True", "docstring": "Update DatasetInfo from the JSON file in `dataset_info_dir`.\n\nThis function updates all the dynamically generated fields (num_examples,\nhash, time of creation,...) of the DatasetInfo.\n\nThis will overwrite all previous metadata.\n\nArgs:\ndataset_info_dir: `str` The directory containing the metadata file. This\nshould be the root directory of a specific dataset version.", "source": "juraj-google-style"}
{"code": "def input_streams(self):\n    streams = []\n    for (walker, _trigger) in self.inputs:\n        if ((walker.selector is None) or (not walker.selector.singular)):\n            continue\n        streams.append(walker.selector.as_stream())\n    return streams", "docstring": "Return a list of DataStream objects for all singular input streams.\n\nThis function only returns individual streams, not the streams that would\nbe selected from a selector like 'all outputs' for example.\n\nReturns:\nlist(DataStream): A list of all of the individual DataStreams that are inputs\nof the node.  Input selectors that select multiple streams are not included", "source": "codesearchnet"}
{"code": "def webhook(self, webhook_url):\n        \n        if not webhook_url:\n            raise Exception('Url can not be None')\n\n        matcher = re.match(self.__webhook_url_format, webhook_url)\n        if not matcher:\n            raise Exception('Invalid url format, looking for: ' + self.__webhook_url_format)\n\n        self.api_keys(int(matcher.group(1)), matcher.group(2))", "docstring": "Load object with webhook_url\n\nArgs:\nwebhook_url (str): full webhook url given by Discord 'create webhook' func", "source": "juraj-google-style"}
{"code": "def WriteRow(self, values):\n    \n    precondition.AssertDictType(values, text, text)\n\n    row = []\n    for column in self._columns:\n      try:\n        value = values[column]\n      except KeyError:\n        raise ValueError(\"Row does not contain required column `%s`\" % column)\n\n      row.append(value)\n\n    self._writer.WriteRow(row)", "docstring": "Writes a single row to the underlying buffer.\n\nArgs:\nvalues: A dictionary mapping column names to values to be inserted into\nthe CSV output.", "source": "juraj-google-style"}
{"code": "def check_configuration(ctx, base_key, needed_keys):\n    \n\n    \n    if base_key not in ctx.keys():\n        exit(\"[{}ERROR{}] missing configuration for '{}'\"\n             .format(ERROR_COLOR, RESET_COLOR, base_key))\n        \n    if ctx.releaser is None:\n        exit(\"[{}ERROR{}] empty configuration for '{}' found\"\n             .format(ERROR_COLOR, RESET_COLOR, base_key))\n        \n\n    \n    for my_key in needed_keys:\n        if my_key not in ctx[base_key].keys():\n            exit(\"[{}ERROR{}] missing configuration key '{}.{}'\"\n                 .format(ERROR_COLOR, RESET_COLOR, base_key, my_key))", "docstring": "Confrim a valid configuration.\n\nArgs:\nctx (invoke.context):\nbase_key (str): the base configuration key everything is under.\nneeded_keys (list): sub-keys of the base key that are checked to make\nsure they exist.", "source": "juraj-google-style"}
{"code": "def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=''):\n    key_flags = self._GetKeyFlagsForModule(module)\n    if key_flags:\n        self.__RenderModuleFlags(module, key_flags, output_lines, prefix)", "docstring": "Generates a help string for the key flags of a given module.\n\nArgs:\nmodule: A module object or a module name (a string).\noutput_lines: A list of strings.  The generated help message\nlines will be appended to this list.\nprefix: A string that is prepended to each generated help line.", "source": "codesearchnet"}
{"code": "def find_by(cls, payload, require=False):\n    if (not isinstance(payload, dict)):\n        raise ValueError(\"The 'payload' parameter must be provided a dictionary object.\")\n    url = os.path.join(cls.URL, 'find_by')\n    payload = {'find_by': payload}\n    cls.debug_logger.debug('Searching Pulsar {} for {}'.format(cls.__name__, json.dumps(payload, indent=4)))\n    res = requests.post(url=url, json=payload, headers=HEADERS, verify=False)\n    res.raise_for_status()\n    res_json = res.json()\n    if res_json:\n        try:\n            res_json = res_json[cls.MODEL_NAME]\n        except KeyError:\n            pass\n    elif require:\n        raise RecordNotFound(\"Can't find any {} records with search criteria: '{}'.\".format(cls.__name__, payload))\n    return res_json", "docstring": "Searches the model in question by AND joining the query parameters.\n\nImplements a Railsy way of looking for a record using a method by the same name and passing\nin the query as a dict. as well. Only the first hit is returned, and there is no particular\nordering specified in the server-side API method.\n\nArgs:\npayload: `dict`. The attributes of a record to restrict the search to.\nrequire: `bool`. True means to raise a `pulsarpy.models.RecordNotFound` exception if no\nrecord is found.\n\nReturns:\n`dict`: The JSON serialization of the record, if any, found by the API call.\n`None`: If the API call didnt' return any results.\n\nRaises:\n`pulsarpy.models.RecordNotFound`: No records were found, and the `require` parameter is\nTrue.", "source": "codesearchnet"}
{"code": "def _GetTimeValues(self, number_of_seconds):\n    \n    number_of_seconds = int(number_of_seconds)\n    number_of_minutes, seconds = divmod(number_of_seconds, 60)\n    number_of_hours, minutes = divmod(number_of_minutes, 60)\n    number_of_days, hours = divmod(number_of_hours, 24)\n    return number_of_days, hours, minutes, seconds", "docstring": "Determines time values.\n\nArgs:\nnumber_of_seconds (int|decimal.Decimal): number of seconds.\n\nReturns:\ntuple[int, int, int, int]: days, hours, minutes, seconds.", "source": "juraj-google-style"}
{"code": "class _BaseThresholdDoFn(beam.DoFn):\n\n    def __init__(self, threshold_fn_spec: Spec):\n        self._threshold_fn_spec = threshold_fn_spec\n\n    def _apply_threshold_to_predictions(self, result: AnomalyResult) -> AnomalyResult:\n        \n        predictions = [dataclasses.replace(p, label=self._threshold_fn.apply(p.score), threshold=self._threshold_fn.threshold) for p in result.predictions]\n        return dataclasses.replace(result, predictions=predictions)", "docstring": "Applies a ThresholdFn to anomaly detection results.\n\nThis abstract base class defines the structure for DoFns that use a\n`ThresholdFn` to convert anomaly scores into anomaly labels (e.g., normal\nor outlier). It handles the core logic of applying the threshold function\nand updating the prediction labels within `AnomalyResult` objects.\n\nArgs:\nthreshold_fn_spec (Spec): Specification defining the `ThresholdFn` to be\nused.", "source": "github-repos"}
{"code": "def CreateDefaultPartition(client, ad_group_id):\n  \n  ad_group_criterion_service = client.GetService('AdGroupCriterionService',\n                                                 version='v201809')\n\n  operations = [{\n      'operator': 'ADD',\n      'operand': {\n          'xsi_type': 'BiddableAdGroupCriterion',\n          'adGroupId': ad_group_id,\n          \n          \n          \n          'criterion': {\n              'xsi_type': 'ProductPartition',\n              'partitionType': 'UNIT'\n          },\n          'biddingStrategyConfiguration': {\n              'bids': [{\n                  'xsi_type': 'CpcBid',\n                  'bid': {\n                      'microAmount': 500000\n                  }\n              }]\n          }\n      }\n  }]\n\n  ad_group_criterion = ad_group_criterion_service.mutate(operations)['value'][0]\n\n  print ('Ad group criterion with ID \"%d\" in ad group with ID \"%d\" was added.'\n         % (ad_group_criterion['criterion']['id'],\n            ad_group_criterion['adGroupId']))", "docstring": "Creates a default partition.\n\nArgs:\nclient: an AdWordsClient instance.\nad_group_id: an integer ID for an ad group.", "source": "juraj-google-style"}
{"code": "def subspace_index(self, little_endian_bits_int: int) -> Tuple[(Union[(slice, int, 'ellipsis')], ...)]:\n    return linalg.slice_for_qubits_equal_to(self.axes, little_endian_bits_int)", "docstring": "An index for the subspace where the target axes equal a value.\n\nArgs:\nlittle_endian_bits_int: The desired value of the qubits at the\ntargeted `axes`, packed into an integer. The least significant\nbit of the integer is the desired bit for the first axis, and\nso forth in increasing order.\n\nReturns:\nA value that can be used to index into `target_tensor` and\n`available_buffer`, and manipulate only the part of Hilbert space\ncorresponding to a given bit assignment.\n\nExample:\nIf `target_tensor` is a 4 qubit tensor and `axes` is `[1, 3]` and\nthen this method will return the following when given\n`little_endian_bits=0b01`:\n\n`(slice(None), 0, slice(None), 1, Ellipsis)`\n\nTherefore the following two lines would be equivalent:\n\nargs.target_tensor[args.subspace_index(0b01)] += 1\n\nargs.target_tensor[:, 0, :, 1] += 1", "source": "codesearchnet"}
{"code": "def __strip_extra_attributes(self, node: yaml.Node,\n                                 known_attrs: List[str]) -> None:\n        \n        known_keys = list(known_attrs)\n        known_keys.remove('self')\n        if 'yatiml_extra' in known_keys:\n            known_keys.remove('yatiml_extra')\n\n        for key_node, value_node in node.value:\n            if (not isinstance(key_node, yaml.ScalarNode)\n                    or key_node.tag != 'tag:yaml.org,2002:str'):\n                raise RecognitionError(\n                    ('{}{}Mapping keys that are not of type'\n                     ' string are not supported by YAtiML.').format(\n                         node.start_mark, os.linesep))\n            if key_node.value not in known_keys:\n                self.__strip_tags(value_node)", "docstring": "Strips tags from extra attributes.\n\nThis prevents nodes under attributes that are not part of our \\\ndata model from being converted to objects. They'll be plain \\\nCommentedMaps instead, which then get converted to OrderedDicts \\\nfor the user.\n\nArgs:\nnode: The node to process\nknown_attrs: The attributes to not strip", "source": "juraj-google-style"}
{"code": "def rename_keys(d: Dict[(str, Any)], mapping: Dict[(str, str)]) -> Dict[(str, Any)]:\n    result = {}\n    for (k, v) in d.items():\n        if (k in mapping):\n            k = mapping[k]\n        result[k] = v\n    return result", "docstring": "Returns a copy of the dictionary ``d`` with its keys renamed according to\n``mapping``.\n\nArgs:\nd: the starting dictionary\nmapping: a dictionary of the format ``{old_key_name: new_key_name}``\n\nReturns:\na new dictionary\n\nKeys that are not in ``mapping`` are left unchanged.\nThe input parameters are not modified.", "source": "codesearchnet"}
{"code": "def calc_checksum(sentence):\n    \n    if sentence.startswith('$'):\n        sentence = sentence[1:]\n    sentence = sentence.split('*')[0]\n    return reduce(xor, map(ord, sentence))", "docstring": "Calculate a NMEA 0183 checksum for the given sentence.\n\nNMEA checksums are a simple XOR of all the characters in the sentence\nbetween the leading \"$\" symbol, and the \"*\" checksum separator.\n\nArgs:\nsentence (str): NMEA 0183 formatted sentence", "source": "juraj-google-style"}
{"code": "def reset_sequence(cls, value=None, force=False):\n        \n        cls._meta.reset_sequence(value, force=force)", "docstring": "Reset the sequence counter.\n\nArgs:\nvalue (int or None): the new 'next' sequence value; if None,\nrecompute the next value from _setup_next_sequence().\nforce (bool): whether to force-reset parent sequence counters\nin a factory inheritance chain.", "source": "juraj-google-style"}
{"code": "def get_session_tensor(handle, dtype, name=None):\n    handle_device = TensorHandle._get_device_name(handle)\n    with ops.device(handle_device):\n        holder = array_ops.placeholder(dtypes.string)\n        _register_handle_feeder(holder.graph, holder, dtype)\n        tensor = gen_data_flow_ops.get_session_tensor(holder, dtype, name=name)\n    return (holder, tensor)", "docstring": "Get the tensor of type `dtype` by feeding a tensor handle.\n\nThis is EXPERIMENTAL and subject to change.\n\nGet the value of the tensor from a tensor handle. The tensor\nis produced in a previous run() and stored in the state of the\nsession.\n\nArgs:\nhandle: The string representation of a persistent tensor handle.\ndtype: The type of the output tensor.\nname: Optional name prefix for the return tensor.\n\nReturns:\nA pair of tensors. The first is a placeholder for feeding a\ntensor handle and the second is the tensor in the session state\nkeyed by the tensor handle.\n\nExample:\n\n```python\nc = tf.multiply(a, b)\nh = tf.compat.v1.get_session_handle(c)\nh = sess.run(h)\n\np, a = tf.compat.v1.get_session_tensor(h.handle, tf.float32)\nb = tf.multiply(a, 10)\nc = sess.run(b, feed_dict={p: h.handle})\n```", "source": "github-repos"}
{"code": "def tf_loss_per_instance(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None):\n    raise NotImplementedError", "docstring": "Creates the TensorFlow operations for calculating the loss per batch instance.\n\nArgs:\nstates: Dict of state tensors.\ninternals: Dict of prior internal state tensors.\nactions: Dict of action tensors.\nterminal: Terminal boolean tensor.\nreward: Reward tensor.\nnext_states: Dict of successor state tensors.\nnext_internals: List of posterior internal state tensors.\nupdate: Boolean tensor indicating whether this call happens during an update.\nreference: Optional reference tensor(s), in case of a comparative loss.\n\nReturns:\nLoss per instance tensor.", "source": "codesearchnet"}
{"code": "def merge_from(self, dev):\n    self.job, self.replica, self.task, self.device_type, self.device_index = self._get_combined_properties(dev)", "docstring": "Merge the properties of \"dev\" into this `DeviceSpec`.\n\nNote: Will be removed in TensorFlow 2.x since DeviceSpecs will become\nimmutable.\n\nArgs:\ndev: a `DeviceSpec`.", "source": "github-repos"}
{"code": "def __init__(self, request_builder, upload_url, current_content_length=0,\n               is_last=False):\n    \n    self._request_builder = request_builder\n    if current_content_length < 0:\n      raise googleads.errors.GoogleAdsValueError(\n          'Current content length %s is < 0.' % current_content_length)\n    self._current_content_length = current_content_length\n    self._is_last = is_last\n    self._url_opener = urllib2.build_opener(\n        *self._request_builder.client.proxy_config.GetHandlers())\n    if self._request_builder.client.custom_http_headers:\n      self._url_opener.addheaders.extend(\n          self._request_builder.client.custom_http_headers.items())\n\n    self._upload_url = self._InitializeURL(upload_url, current_content_length)", "docstring": "Initializes the IncrementalUpload.\n\nArgs:\nrequest_builder: an AbstractUploadRequestBuilder instance.\nupload_url: a string url provided by the BatchJobService.\ncurrent_content_length: an integer identifying the current content length\nof data uploaded to the Batch Job.\nis_last: a boolean indicating whether this is the final increment.\nRaises:\nGoogleAdsValueError: if the content length is lower than 0.", "source": "juraj-google-style"}
{"code": "def SetDayOfWeekHasService(self, dow, has_service=True):\n    \n    assert(dow >= 0 and dow < 7)\n    self.day_of_week[dow] = has_service", "docstring": "Set service as running (or not) on a day of the week. By default the\nservice does not run on any days.\n\nArgs:\ndow: 0 for Monday through 6 for Sunday\nhas_service: True if this service operates on dow, False if it does not.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _get_shoulds(options):\n    if (options.version == '2.0'):\n        return shoulds20.list_shoulds(options)\n    else:\n        return shoulds21.list_shoulds(options)", "docstring": "Return the list of 'SHOULD' validators for the correct version of STIX.\n\nArgs:\noptions: ValidationOptions instance with validation options for this\nvalidation run, including the STIX spec version.", "source": "codesearchnet"}
{"code": "def trivial_reward(example):\n    return example", "docstring": "Reward for the trivial search space.\n\nThe reward (i.e. fitness) is the value itself. The goal of the search,\ntherefore, is to find the value 1.\n\nArgs:\nexample: a materialized value.\n\nReturns:\nThe corresponding reward.", "source": "github-repos"}
{"code": "def find_files(paths, file_predicate):\n    \n    file_list = []\n    for path in paths:\n        p = abs_path(path)\n        for dirPath, _, fileList in os.walk(p):\n            for fname in fileList:\n                name, ext = os.path.splitext(fname)\n                if file_predicate(name, ext):\n                    file_list.append((dirPath, name, ext))\n    return file_list", "docstring": "Locate files whose names and extensions match the given predicate in\nthe specified directories.\n\nArgs:\npaths: A list of directory paths where to find the files.\nfile_predicate: A function that returns True if the file name and\nextension are desired.\n\nReturns:\nA list of files that match the predicate.", "source": "juraj-google-style"}
{"code": "def format_arguments(*args):\n    positional_args = []\n    kwargs = {}\n    split_key = None\n    for arg in args:\n        if arg.startswith('--'):\n            arg = arg[2:]\n            if ('=' in arg):\n                (key, value) = arg.split('=', 1)\n                kwargs[key.replace('-', '_')] = value\n            else:\n                split_key = arg.replace('-', '_')\n        elif split_key:\n            kwargs[split_key] = arg\n            split_key = None\n        else:\n            positional_args.append(arg)\n    return (positional_args, kwargs)", "docstring": "Converts a list of arguments from the command line into a list of\npositional arguments and a dictionary of keyword arguments.\n\nHandled formats for keyword arguments are:\n* --argument=value\n* --argument value\n\nArgs:\n*args (list): a list of arguments\n\nReturns:\n([positional_args], {kwargs})", "source": "codesearchnet"}
{"code": "def __add__(self, other):\n        \n        sum_roc = DistributedROC(self.thresholds, self.obs_threshold)\n        sum_roc.contingency_tables = self.contingency_tables + other.contingency_tables\n        return sum_roc", "docstring": "Add two DistributedROC objects together and combine their contingency table values.\n\nArgs:\nother: Another DistributedROC object.", "source": "juraj-google-style"}
{"code": "def maybe_download(self, filename, work_directory, source_url):\n        \n        if not os.path.exists(work_directory):\n            os.makedirs(work_directory)\n        filepath = os.path.join(work_directory, filename)\n        if not os.path.exists(filepath):\n            temp_file_name, _ = urllib.request.urlretrieve(source_url)\n            copyfile(temp_file_name, filepath)\n            print('Successfully downloaded', filename)\n        return filepath", "docstring": "Download the data from source url, unless it's already here.\nArgs:\nfilename: string, name of the file in the directory.\nwork_directory: string, path to working directory.\nsource_url: url to download from if file doesn't exist.\nReturns:\nPath to resulting file.", "source": "juraj-google-style"}
{"code": "def write_uint16(self, value, little_endian=True):\n    if little_endian:\n        endian = '<'\n    else:\n        endian = '>'\n    return self.pack(('%sH' % endian), value)", "docstring": "Pack the value as an unsigned integer and write 2 bytes to the stream.\n\nArgs:\nvalue:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint: the number of bytes written.", "source": "codesearchnet"}
{"code": "def ConvertMessage(self, value, message):\n    message_descriptor = message.DESCRIPTOR\n    full_name = message_descriptor.full_name\n    if _IsWrapperMessage(message_descriptor):\n        self._ConvertWrapperMessage(value, message)\n    elif (full_name in _WKTJSONMETHODS):\n        methodcaller(_WKTJSONMETHODS[full_name][1], value, message)(self)\n    else:\n        self._ConvertFieldValuePair(value, message)", "docstring": "Convert a JSON object into a message.\n\nArgs:\nvalue: A JSON object.\nmessage: A WKT or regular protocol message to record the data.\n\nRaises:\nParseError: In case of convert problems.", "source": "codesearchnet"}
{"code": "def parse_docs(docs, marks):\n    \n    if docs is None:\n        return {}\n    indexs = []\n    for mark in marks:\n        i = docs.find(mark)\n        if i >= 0:\n            indexs.append(i)\n    if not indexs:\n        return {\"$desc\": textwrap.dedent(docs).strip()}\n    start = min(indexs)\n    start = docs.rfind(\"\\n\", 0, start)\n    yamltext = textwrap.dedent(docs[start + 1:])\n    meta = yaml.load(yamltext)\n    meta[\"$desc\"] = textwrap.dedent(docs[:start]).strip()\n    return meta", "docstring": "Parse YAML syntax content from docs\n\nIf docs is None, return {}\nIf docs has no YAML content, return {\"$desc\": docs}\nElse, parse YAML content, return {\"$desc\": docs, YAML}\n\nArgs:\ndocs (str): docs to be parsed\nmarks (list): list of which indicate YAML content starts\nReturns:\nA dict contains information of docs", "source": "juraj-google-style"}
{"code": "def RegisterHelper(cls, resolver_helper):\n    \n    if resolver_helper.type_indicator in cls._resolver_helpers:\n      raise KeyError((\n          'Resolver helper object already set for type indicator: '\n          '{0!s}.').format(resolver_helper.type_indicator))\n\n    cls._resolver_helpers[resolver_helper.type_indicator] = resolver_helper", "docstring": "Registers a path specification resolver helper.\n\nArgs:\nresolver_helper (ResolverHelper): resolver helper.\n\nRaises:\nKeyError: if resolver helper object is already set for the corresponding\ntype indicator.", "source": "juraj-google-style"}
{"code": "def GetFrequencyStopTimes(self, problems=None):\n    stoptimes_list = []\n    stoptime_pattern = self.GetStopTimes()\n    first_secs = stoptime_pattern[0].arrival_secs\n    stoptime_class = self.GetGtfsFactory().StopTime\n    for run_secs in self.GetFrequencyStartTimes():\n        stoptimes = []\n        for st in stoptime_pattern:\n            (arrival_secs, departure_secs) = (None, None)\n            if (st.arrival_secs != None):\n                arrival_secs = ((st.arrival_secs - first_secs) + run_secs)\n            if (st.departure_secs != None):\n                departure_secs = ((st.departure_secs - first_secs) + run_secs)\n            stoptimes.append(stoptime_class(problems=problems, stop=st.stop, arrival_secs=arrival_secs, departure_secs=departure_secs, stop_headsign=st.stop_headsign, pickup_type=st.pickup_type, drop_off_type=st.drop_off_type, shape_dist_traveled=st.shape_dist_traveled, stop_sequence=st.stop_sequence, timepoint=st.timepoint))\n        stoptimes_list.append(stoptimes)\n    return stoptimes_list", "docstring": "Return a list of StopTime objects for each headway-based run.\n\nReturns:\na list of list of StopTime objects. Each list of StopTime objects\nrepresents one run. If this trip doesn't have headways returns an empty\nlist.", "source": "codesearchnet"}
{"code": "def dummy_inputs(self) -> Dict[str, tf.Tensor]:\n    dummies = {}\n    for key, spec in self.input_signature.items():\n        dummy_shape = [dim if dim is not None else 2 for dim in spec.shape]\n        if spec.shape[0] is None:\n            dummy_shape[0] = 1\n        dummies[key] = tf.ones(shape=dummy_shape, dtype=spec.dtype)\n        if key == 'token_type_ids':\n            dummies[key] = tf.zeros_like(dummies[key])\n    if self.config.add_cross_attention and 'encoder_hidden_states' in inspect.signature(self.call).parameters:\n        if 'encoder_hidden_states' not in dummies:\n            if self.main_input_name == 'input_ids':\n                dummies['encoder_hidden_states'] = tf.ones(shape=(1, 2, self.config.hidden_size), dtype=tf.float32, name='encoder_hidden_states')\n            else:\n                raise NotImplementedError(\"Model has cross-attention but we couldn't infer the shape for the encoder hidden states. Please manually override dummy_inputs!\")\n    return dummies", "docstring": "Dummy inputs to build the network.\n\nReturns:\n`Dict[str, tf.Tensor]`: The dummy inputs.", "source": "github-repos"}
{"code": "def select_segments(self, jsonpath: str) -> List[Segment]:\n        \n        path = self.etk.parse_json_path(jsonpath)\n        matches = path.find(self.cdr_document)\n\n        segments = list()\n        for a_match in matches:\n            this_segment = Segment(str(a_match.full_path), a_match.value, self)\n            segments.append(this_segment)\n\n        return segments", "docstring": "Dereferences the json_path inside the document and returns the selected elements.\nThis method should compile and cache the compiled json_path in case the same path\nis reused by multiple extractors.\n\nArgs:\njsonpath (str): a valid JSON path.\n\nReturns: A list of Segments object that contains the elements selected by the json path.", "source": "juraj-google-style"}
{"code": "def fetch(self, customer_id, data={}, **kwargs):\n    return super(Customer, self).fetch(customer_id, data, **kwargs)", "docstring": "Fetch Customer for given Id\n\nArgs:\ncustomer_id : Id for which customer object has to be retrieved\n\nReturns:\nOrder dict for given customer Id", "source": "codesearchnet"}
{"code": "def _get_single_variable(self, name, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, partition_info=None, reuse=None, trainable=None, caching_device=None, validate_shape=True, constraint=None, synchronization=vs.VariableSynchronization.AUTO, aggregation=vs.VariableAggregation.NONE):\n    initializing_from_value = False\n    if initializer is not None and (not callable(initializer)):\n        initializing_from_value = True\n    if shape is not None and initializing_from_value:\n        raise ValueError('If initializer is a constant, do not specify shape.')\n    dtype = dtypes.as_dtype(dtype)\n    shape = as_shape(shape)\n    if name in self._vars:\n        if reuse is False:\n            err_msg = 'Variable %s already exists, disallowed. Did you mean to set reuse=True or reuse=tf.AUTO_REUSE in VarScope?' % name\n            raise ValueError(err_msg)\n        found_var = self._vars[name]\n        if not shape.is_compatible_with(found_var.get_shape()):\n            raise ValueError('Trying to share variable %s, but specified shape %s and found shape %s.' % (name, shape, found_var.get_shape()))\n        if not dtype.is_compatible_with(found_var.dtype):\n            dtype_str = dtype.name\n            found_type_str = found_var.dtype.name\n            raise ValueError('Trying to share variable %s, but specified dtype %s and found dtype %s.' % (name, dtype_str, found_type_str))\n        return found_var\n    if reuse is True:\n        raise ValueError('Variable %s does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=tf.AUTO_REUSE in VarScope?' % name)\n    if initializer is None:\n        initializer, initializing_from_value = self._get_default_initializer(name=name, shape=shape, dtype=dtype)\n    with ops.init_scope():\n        if initializing_from_value:\n            init_val = initializer\n            variable_dtype = None\n        else:\n            if tf_inspect.isclass(initializer):\n                initializer = initializer()\n            if shape.is_fully_defined():\n                if 'partition_info' in tf_inspect.getargspec(initializer).args:\n                    init_val = functools.partial(initializer, shape.as_list(), dtype=dtype, partition_info=partition_info)\n                else:\n                    init_val = functools.partial(initializer, shape.as_list(), dtype=dtype)\n                variable_dtype = dtype.base_dtype\n            else:\n                init_val = initializer\n                variable_dtype = None\n    with ops.init_scope():\n        v = variables.Variable(initial_value=init_val, name=name, trainable=trainable, caching_device=caching_device, dtype=variable_dtype, validate_shape=validate_shape, constraint=constraint, synchronization=synchronization, aggregation=aggregation)\n    self._vars[name] = v\n    logging.vlog(1, 'Created variable %s with shape %s and init %s', v.name, format(shape), initializer)\n    if regularizer:\n        self.add_regularizer(v, regularizer)\n    return v", "docstring": "Get or create a single Variable (e.g.\n\na shard or entire variable).\n\nSee the documentation of get_variable above (ignore partitioning components)\nfor details.\n\nArgs:\nname: see get_variable.\nshape: see get_variable.\ndtype: see get_variable.\ninitializer: see get_variable.\nregularizer: see get_variable.\npartition_info: _PartitionInfo object.\nreuse: see get_variable.\ntrainable: see get_variable.\ncaching_device: see get_variable.\nvalidate_shape: see get_variable.\nconstraint: see get_variable.\nsynchronization: see get_variable.\naggregation: see get_variable.\n\nReturns:\nA Variable.  See documentation of get_variable above.\n\nRaises:\nValueError: See documentation of get_variable above.", "source": "github-repos"}
{"code": "def generate_contour_data(pid):\n    \n    \n    \n    if isinstance(pid, GenInput):\n        pid = pid.return_dict()\n\n    begin_time = time.time()\n\n    WORKING_DIRECTORY = '.'\n    if 'WORKING_DIRECTORY' not in pid['general'].keys():\n        pid['general']['WORKING_DIRECTORY'] = WORKING_DIRECTORY\n\n    \n    running_process = GenProcess(**{**pid, **pid['generate_info']})\n    running_process.set_parameters()\n    running_process.run_snr()\n\n    \n    file_out = FileReadOut(running_process.xvals, running_process.yvals,\n                           running_process.final_dict,\n                           **{**pid['general'], **pid['generate_info'], **pid['output_info']})\n\n    print('outputing file:', pid['general']['WORKING_DIRECTORY'] + '/'\n          + pid['output_info']['output_file_name'])\n    getattr(file_out, file_out.output_file_type + '_read_out')()\n\n    print(time.time()-begin_time)\n    return", "docstring": "Main function for this program.\n\nThis will read in sensitivity_curves and binary parameters; calculate snrs\nwith a matched filtering approach; and then read the contour data out to a file.\n\nArgs:\npid (obj or dict): GenInput class or dictionary containing all of the input information for\nthe generation. See BOWIE documentation and example notebooks for usage of\nthis class.", "source": "juraj-google-style"}
{"code": "def delete(filething):\n    \n\n    t = OggFLAC(filething)\n    filething.fileobj.seek(0)\n    t.delete(filething)", "docstring": "delete(filething)\n\nArguments:\nfilething (filething)\nRaises:\nmutagen.MutagenError\n\nRemove tags from a file.", "source": "juraj-google-style"}
{"code": "def _determine_hpp_url(self, platform, action):\n    base_uri = settings.BASE_HPP_URL.format(platform)\n    service = (action + '.shtml')\n    result = '/'.join([base_uri, service])\n    return result", "docstring": "This returns the Adyen HPP endpoint based on the provided platform,\nand action.\n\nArgs:\nplatform (str): Adyen platform, ie 'live' or 'test'.\naction (str):   the HPP action to perform.\npossible actions: select, pay, skipDetails, directory", "source": "codesearchnet"}
{"code": "def expand_char_ngrams(source, minn, maxn, itself='ASIS', name=None):\n    with ops.name_scope(name, 'ExpandCharNgrams', [source]):\n        source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string)\n        if isinstance(source, tf.SparseTensor):\n            (child_indices, child_values, child_shape) = ops_module.expand_char_ngrams(source.values, minn, maxn, itself)\n            result = _combine_sparse_successor(source.indices, source.dense_shape, child_indices, child_values, child_shape)\n        else:\n            (indices, values, shape) = ops_module.expand_char_ngrams(source, minn, maxn, itself)\n            result = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)\n        return result", "docstring": "Split unicode strings into char ngrams.\nNgrams size configures with minn and max\n\nArgs:\nsource: `Tensor` or `SparseTensor` of any shape, strings to split\nminn: Minimum length of char ngram\nminn: Maximum length of char ngram\nitself: Scalar value, strategy for source word preserving.\nOne of `\"ASIS\"`, `\"NEVER\"`, `\"ALWAYS\"`, `\"ALONE\"`.\nname: A name for the operation (optional).\nReturns:\n`SparseTensor` with an additional dimension of size 1 added.", "source": "codesearchnet"}
{"code": "def unwrap_outputs(distribution_strategy, grouped_outputs, with_loss_tensor=False):\n    if not with_loss_tensor:\n        return flatten_per_replica_values(distribution_strategy, grouped_outputs)\n    if not isinstance(grouped_outputs, list):\n        grouped_outputs = [grouped_outputs]\n    loss = distribution_strategy.reduce(reduce_util.ReduceOp.SUM, grouped_outputs[0], axis=None)\n    all_outputs = flatten_per_replica_values(distribution_strategy, grouped_outputs[1:])\n    if backend.is_tpu_strategy(distribution_strategy) and ops.executing_eagerly_outside_functions():\n        all_outputs = all_outputs[::distribution_strategy.num_replicas_in_sync]\n    return [loss] + all_outputs", "docstring": "Unwrap the list of outputs contained in the PerReplica parameters.\n\nThis function calls `flatten_per_replica_values` to parse each of the input\nparameters into a list of outputs on the different devices. If we set\n`with_loss_tensor` to be True, we also call `reduce` on the list of losses on\nthe different devices to give us one loss tensor.\n\nArgs:\ndistribution_strategy: DistributionStrategy used to distribute training and\nvalidation.\ngrouped_outputs: PerReplica outputs returned from the train or test function\nthat we ran on each device.\nwith_loss_tensor: Boolean that indicates if we need to add the reduced loss\ntensor as one of the outputs.\n\nReturns:\nValues of each of the PerReplica outputs.", "source": "github-repos"}
{"code": "def dml_loss(pred, labels, weights_fn=_weights_one_third, reduce_sum=True):\n    real_labels = convert_rgb_to_symmetric_real(labels)\n    dml_loss_value = discretized_mix_logistic_loss(pred=pred, labels=real_labels)\n    weights = weights_fn(labels)\n    loss_num = (weights * dml_loss_value)\n    loss_den = weights_nonzero(weights)\n    if reduce_sum:\n        loss_num = tf.reduce_sum(loss_num)\n        loss_den = tf.reduce_sum(loss_den)\n    return (loss_num, loss_den)", "docstring": "Discretized mixture of logistics loss.\n\nArgs:\npred: A [batch, height, width, num_mixtures*10] tensor of floats\ncomprising one unconstrained mixture probability, three means\n(one per channel), three standard deviations (one per channel),\nand three coefficients which linearly parameterize dependence across\nchannels.\nlabels: A [batch, height, width, channels] tensor of 8-bit pixel\nintensities. The computation assumes channels is 3.\nweights_fn: A function of labels, returning a Tensor of shape\n[batch, height, width] which weights each loss term. Default is to scale\neach loss term by 1/3 so that they capture the average across channels.\nreduce_sum: A boolean, to return scalar loss instead of per position.\n\nReturns:\nTuple of loss tensors for numerator and denominator, each a scalar if\nreduce_sum else of shape [batch, height, width]. The sum of their divisions\nis the number of nats for each pixel in labels.", "source": "codesearchnet"}
{"code": "def __init__(self, filename: str, mode: str = 'r+', *, validate: bool = True, spec_version: str = \"2.0.1\") -> None:\n\t\t\n\t\tif not os.path.exists(filename):\n\t\t\traise IOError(f\"File '{filename}' not found\")\n\t\t\n\t\tif mode != 'r+' and mode != 'r':\n\t\t\traise ValueError(\"Mode must be either 'r' or 'r+'\")\n\t\tself.filename = filename  \n\n\t\t\n\t\tif validate:\n\t\t\tlv = loompy.LoomValidator(version=spec_version)\n\t\t\tif not lv.validate(filename):\n\t\t\t\traise ValueError(\"\\n\".join(lv.errors) + f\"\\n{filename} does not appead to be a valid Loom file according to Loom spec version '{spec_version}'\")\n\n\t\tself._file = h5py.File(filename, mode)\n\t\tself._closed = False\n\t\tif \"matrix\" in self._file:\n\t\t\tself.shape = self._file[\"/matrix\"].shape  \n\t\telse:\n\t\t\tself.shape = (0, 0)\n\t\tself.layers = loompy.LayerManager(self)\n\t\tself.view = loompy.ViewManager(self)  \n\t\tself.ra = loompy.AttributeManager(self, axis=0)  \n\t\tself.ca = loompy.AttributeManager(self, axis=1)  \n\t\tself.attrs = loompy.FileAttributeManager(self._file)  \n\t\tself.row_graphs = loompy.GraphManager(self, axis=0)  \n\t\tself.col_graphs = loompy.GraphManager(self, axis=1)  \n\n\t\t\n\t\tself.layer = self.layers\n\t\tself.row_attrs = self.ra\n\t\tself.col_attrs = self.ca", "docstring": "Establish a connection to a Loom file.\n\nArgs:\nfilename:\t\t\tName of the .loom file to open\nmode:\t\t\t\tread/write mode, accepts 'r+' (read/write) or\n'r' (read-only), defaults to 'r+' without arguments,\nand to 'r' with incorrect arguments\nvalidate:\t\t\tValidate that the file conforms with the Loom specification\nReturns:\nNothing.", "source": "juraj-google-style"}
{"code": "def create_workspace(self, did, name, version_id=None):\n        \n\n        payload = {\n            'isPublic': True,\n            'name': name,\n        }\n\n        if version_id:\n            payload['versionId'] = version_id\n\n        return self._api.request('post', '/api/documents/d/' + did + '/workspaces', body=payload)", "docstring": "Create a workspace in the specified document.\n\nArgs:\n- did (str): the document id of where to create the new workspace\n- name (str): the new name of the copied workspace.\n- version_id (str): the ID of the version to be copied into a new workspace\n\nReturns:\n- requests.Response: Onshape response data", "source": "juraj-google-style"}
{"code": "def post_file(self, url, filename, file_stream, *args, **kwargs):\n        \n        res = self._conn.post(url, files={filename: file_stream},\n                              headers=self._prepare_headers(**kwargs))\n        if res.status_code == 200 or res.status_code == 201:\n            return res.text\n        else:\n            return None", "docstring": "Uploads file to provided url.\n\nReturns contents as text\n\nArgs:\n**url**: address where to upload file\n\n**filename**: Name of the uploaded file\n\n**file_stream**: file like object to upload\n\n.. versionadded:: 0.3.2\n**additional_headers**: (optional) Additional headers\nto be used with request\n\nReturns:\nstring", "source": "juraj-google-style"}
{"code": "def __init__(self, python_function, name, input_signature=None, autograph=True, jit_compile=None, reduce_retracing=False, experimental_implements=None, experimental_autograph_options=None, experimental_attributes=None):\n    self._lock = threading.RLock()\n    self._python_function = python_function\n    self._function_type, self._default_values = function_type_utils.make_function_type(python_function, input_signature)\n    self._function_cache = function_cache.FunctionCache()\n    self._function_captures = capture_container.FunctionCaptures()\n    self._attributes = {}\n    if experimental_implements is not None:\n        self._attributes = self._create_implements_attribute(experimental_implements)\n    if experimental_attributes is not None:\n        self._attributes.update(experimental_attributes)\n    for attribute in self._attributes:\n        if attribute not in attributes_lib.POLYMORPHIC_FUNCTION_ALLOWLIST:\n            raise ValueError(f'`{attribute} is not supported by tf.function as an attribute.')\n    self._is_pure = self._attributes and attributes_lib.IMPLEMENTS in self._attributes\n    self._shared_rendezvous = None\n    self._autograph = autograph\n    self._experimental_autograph_options = experimental_autograph_options\n    self._reduce_retracing = reduce_retracing\n    self._jit_compile = jit_compile\n    self._created_variables = None\n    self._variable_creation_config = None\n    self._no_variable_creation_config = None\n    self._descriptor_cache = weakref.WeakKeyDictionary()\n    self._name = name\n    self._key_for_call_stats = self._get_key_for_call_stats()\n    self._omit_frequent_tracing_warning = False\n    ops._tf_function_api_gauge.get_cell().set(True)", "docstring": "Initializes a `Function`.\n\nArgs:\npython_function: the function to be wrapped.\nname: the name given to it.\ninput_signature: See the documentation for `tf.function`.\nautograph: See the documentation for `tf.function`.\njit_compile: See the documentation for `tf.function`.\nreduce_retracing: See the documentation for `tf.function`.\nexperimental_implements: See the documentation for `tf.function`.\nexperimental_autograph_options: See the documentation for `tf.function`.\nexperimental_attributes: See the documentation for `tf.function`.\n\nRaises:\nValueError: if `input_signature` is not None and the `python_function`'s\nargspec has keyword arguments.", "source": "github-repos"}
{"code": "def _read_signer(key_filename):\n    \n    filename = key_filename\n    if filename is None:\n        filename = os.path.join(os.path.expanduser('~'),\n                                '.sawtooth',\n                                'keys',\n                                getpass.getuser() + '.priv')\n\n    try:\n        with open(filename, 'r') as key_file:\n            signing_key = key_file.read().strip()\n    except IOError as e:\n        raise CliException('Unable to read key file: {}'.format(str(e)))\n\n    try:\n        private_key = Secp256k1PrivateKey.from_hex(signing_key)\n    except ParseError as e:\n        raise CliException('Unable to read key in file: {}'.format(str(e)))\n\n    context = create_context('secp256k1')\n    crypto_factory = CryptoFactory(context)\n    return crypto_factory.new_signer(private_key)", "docstring": "Reads the given file as a hex key.\n\nArgs:\nkey_filename: The filename where the key is stored. If None,\ndefaults to the default key for the current user.\n\nReturns:\nSigner: the signer\n\nRaises:\nCliException: If unable to read the file.", "source": "juraj-google-style"}
{"code": "def synchronize_task(self, func, *args, **kwargs):\n\n    async def _runner():\n        return func(*args, **kwargs)\n    return self.emulator.run_task_external(_runner())", "docstring": "Run callable in the rpc thread and wait for it to finish.\n\nThe callable ``func`` will be passed into the EmulationLoop and run\nthere.  This method will block until ``func`` is finished and\nreturn/raise whatever that callable returns/raises.\n\nThis method is mainly useful for performing an activity that needs to\nbe synchronized with the rpc thread for safety reasons.\n\nIf this method is called from the rpc thread itself, it will just\nrun the task and return its result.\n\nArgs:\nfunc (callable): A method with signature callable(*args, **kwargs),\nthat will be called with the optional *args and **kwargs passed\nto this method.\n*args: Arguments that will be passed to callable.\n**kwargs: Keyword arguments that will be passed to callable.\n\nReturns:\nobject: Whatever callable returns after it runs.", "source": "codesearchnet"}
{"code": "def _calculateCrcString(inputstring):\n    \n    _checkString(inputstring, description='input CRC string')\n \n    \n    register = 0xFFFF\n\n    for char in inputstring:\n        register = (register >> 8) ^ _CRC16TABLE[(register ^ ord(char)) & 0xFF]\n \n    return _numToTwoByteString(register, LsbFirst=True)", "docstring": "Calculate CRC-16 for Modbus.\n\nArgs:\ninputstring (str): An arbitrary-length message (without the CRC).\n\nReturns:\nA two-byte CRC string, where the least significant byte is first.", "source": "juraj-google-style"}
{"code": "def extract_keywords(self, sentence, span_info=False):\n    keywords_extracted = []\n    if (not sentence):\n        return keywords_extracted\n    if (not self.case_sensitive):\n        sentence = sentence.lower()\n    current_dict = self.keyword_trie_dict\n    sequence_start_pos = 0\n    sequence_end_pos = 0\n    reset_current_dict = False\n    idx = 0\n    sentence_len = len(sentence)\n    while (idx < sentence_len):\n        char = sentence[idx]\n        if (char not in self.non_word_boundaries):\n            if ((self._keyword in current_dict) or (char in current_dict)):\n                sequence_found = None\n                longest_sequence_found = None\n                is_longer_seq_found = False\n                if (self._keyword in current_dict):\n                    sequence_found = current_dict[self._keyword]\n                    longest_sequence_found = current_dict[self._keyword]\n                    sequence_end_pos = idx\n                if (char in current_dict):\n                    current_dict_continued = current_dict[char]\n                    idy = (idx + 1)\n                    while (idy < sentence_len):\n                        inner_char = sentence[idy]\n                        if ((inner_char not in self.non_word_boundaries) and (self._keyword in current_dict_continued)):\n                            longest_sequence_found = current_dict_continued[self._keyword]\n                            sequence_end_pos = idy\n                            is_longer_seq_found = True\n                        if (inner_char in current_dict_continued):\n                            current_dict_continued = current_dict_continued[inner_char]\n                        else:\n                            break\n                        idy += 1\n                    else:\n                        if (self._keyword in current_dict_continued):\n                            longest_sequence_found = current_dict_continued[self._keyword]\n                            sequence_end_pos = idy\n                            is_longer_seq_found = True\n                    if is_longer_seq_found:\n                        idx = sequence_end_pos\n                current_dict = self.keyword_trie_dict\n                if longest_sequence_found:\n                    keywords_extracted.append((longest_sequence_found, sequence_start_pos, idx))\n                reset_current_dict = True\n            else:\n                current_dict = self.keyword_trie_dict\n                reset_current_dict = True\n        elif (char in current_dict):\n            current_dict = current_dict[char]\n        else:\n            current_dict = self.keyword_trie_dict\n            reset_current_dict = True\n            idy = (idx + 1)\n            while (idy < sentence_len):\n                char = sentence[idy]\n                if (char not in self.non_word_boundaries):\n                    break\n                idy += 1\n            idx = idy\n        if ((idx + 1) >= sentence_len):\n            if (self._keyword in current_dict):\n                sequence_found = current_dict[self._keyword]\n                keywords_extracted.append((sequence_found, sequence_start_pos, sentence_len))\n        idx += 1\n        if reset_current_dict:\n            reset_current_dict = False\n            sequence_start_pos = idx\n    if span_info:\n        return keywords_extracted\n    return [value[0] for value in keywords_extracted]", "docstring": "Searches in the string for all keywords present in corpus.\nKeywords present are added to a list `keywords_extracted` and returned.\n\nArgs:\nsentence (str): Line of text where we will search for keywords\n\nReturns:\nkeywords_extracted (list(str)): List of terms/keywords found in sentence that match our corpus\n\nExamples:\n>>> from flashtext import KeywordProcessor\n>>> keyword_processor = KeywordProcessor()\n>>> keyword_processor.add_keyword('Big Apple', 'New York')\n>>> keyword_processor.add_keyword('Bay Area')\n>>> keywords_found = keyword_processor.extract_keywords('I love Big Apple and Bay Area.')\n>>> keywords_found\n>>> ['New York', 'Bay Area']", "source": "codesearchnet"}
{"code": "def get_metrics_namespace(self) -> str:\n    return 'BeamML_PyTorch'", "docstring": "Returns:\nA namespace for metrics collected by the RunInference transform.", "source": "github-repos"}
{"code": "def service_status(self, short_name):\n        \n\n        if short_name not in self.services:\n            raise ArgumentError(\"Unknown service name\", short_name=short_name)\n\n        info = {}\n\n        service = self.services[short_name]['state']\n\n        info['heartbeat_age'] = monotonic() - service.last_heartbeat\n        info['numeric_status'] = service.state\n        info['string_status'] = service.string_state\n\n        return info", "docstring": "Get the current status of a service.\n\nReturns information about the service such as the length since the last\nheartbeat, any status messages that have been posted about the service\nand whether the heartbeat should be considered out of the ordinary.\n\nArgs:\nshort_name (string): The short name of the service to query\n\nReturns:\ndict: A dictionary with the status of the service", "source": "juraj-google-style"}
{"code": "def values_override(self) -> Optional[Mapping[str, Any]]:\n    if hasattr(self._config, 'use_cache'):\n        return {'use_cache': False}\n    return None", "docstring": "Dictionary of keys to override in the model's config before exporting\n\nReturns:\nDictionary with the keys (and their corresponding values) to override", "source": "github-repos"}
{"code": "def nic_b(msg):\n    \n    tc = typecode(msg)\n\n    if tc < 9 or tc > 18:\n        raise RuntimeError(\"%s: Not a airborne position message, expecting 8<TC<19\" % msg)\n\n    msgbin = common.hex2bin(msg)\n    nic_b = int(msgbin[39])\n\n    return nic_b", "docstring": "Obtain NICb, navigation integrity category supplement-b\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string\n\nReturns:\nint: NICb number (0 or 1)", "source": "juraj-google-style"}
{"code": "def stop_gradient(cls, x: 'TensorFluent') -> 'TensorFluent':\n        \n        scope = x.scope.as_list()\n        batch = x.batch\n        return TensorFluent(tf.stop_gradient(x.tensor), scope, batch)", "docstring": "Returns a copy of the input fluent with stop_gradient at tensor level.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent that stops backpropagation of gradient computations.", "source": "juraj-google-style"}
{"code": "def get_operation_device(self, operation_name):\n    \n    operation = self._name_to_operation(operation_name)\n    if isinstance(operation, tf.Operation):\n      return operation.device\n    else:  \n      return None", "docstring": "The device of an operation.\n\nNote that only tf operations have device assignments.\n\nArgs:\noperation_name: a string, name of an operation in the graph.\n\nReturns:\na string or None, representing the device name.", "source": "juraj-google-style"}
{"code": "def path_get(p: tcod.path.AStar, idx: int) -> Tuple[(int, int)]:\n    x = ffi.new('int *')\n    y = ffi.new('int *')\n    lib.TCOD_path_get(p._path_c, idx, x, y)\n    return (x[0], y[0])", "docstring": "Get a point on a path.\n\nArgs:\np (AStar): An AStar instance.\nidx (int): Should be in range: 0 <= inx < :any:`path_size`", "source": "codesearchnet"}
{"code": "def _GetParserFilters(cls, parser_filter_expression):\n    if (not parser_filter_expression):\n        return ({}, {})\n    includes = {}\n    excludes = {}\n    preset_names = cls._presets.GetNames()\n    for parser_filter in parser_filter_expression.split(','):\n        parser_filter = parser_filter.strip()\n        if (not parser_filter):\n            continue\n        if parser_filter.startswith('!'):\n            parser_filter = parser_filter[1:]\n            active_dict = excludes\n        else:\n            active_dict = includes\n        parser_filter = parser_filter.lower()\n        if (parser_filter in preset_names):\n            for parser_in_category in cls._GetParsersFromPresetCategory(parser_filter):\n                (parser, _, plugin) = parser_in_category.partition('/')\n                active_dict.setdefault(parser, [])\n                if plugin:\n                    active_dict[parser].append(plugin)\n        else:\n            (parser, _, plugin) = parser_filter.partition('/')\n            active_dict.setdefault(parser, [])\n            if plugin:\n                active_dict[parser].append(plugin)\n    cls._ReduceParserFilters(includes, excludes)\n    return (includes, excludes)", "docstring": "Retrieves the parsers and plugins to include and exclude.\n\nTakes a comma separated string and splits it up into two dictionaries,\nof parsers and plugins to include and to exclude from selection. If a\nparticular filter is prepended with an exclamation point it will be\nadded to the exclude section, otherwise in the include.\n\nArgs:\nparser_filter_expression (str): parser filter expression, where None\nrepresents all parsers and plugins.\n\nReturns:\ntuple: containing:\n\n* dict[str, BaseParser]: included parsers and plugins by name.\n* dict[str, BaseParser]: excluded parsers and plugins by name.", "source": "codesearchnet"}
{"code": "def clean_for_storage(self, data):\n        \n        data = self.data_to_unicode(data)\n        if isinstance(data, dict):\n            for k in dict(data).keys():\n                if k == '_id':\n                    del data[k]\n                    continue\n                if '.' in k:\n                    new_k = k.replace('.', '_')\n                    data[new_k] = data[k]\n                    del data[k]\n                    k = new_k\n                if isinstance(data[k], dict):\n                    data[k] = self.clean_for_storage(data[k])\n                elif isinstance(data[k], list):\n                    data[k] = [self.clean_for_storage(item) for item in data[k]]\n        return data", "docstring": "Clean data in preparation for storage.\n\nDeletes items with key having a '.' or is '_id'. Also deletes those items\nwhose value is a dictionary or a list.\n\nArgs:\ndata: Sample data dictionary to be cleaned.\n\nReturns:\nCleaned data dictionary.", "source": "juraj-google-style"}
{"code": "def remind_signature_request(self, signature_request_id, email_address):\n    request = self._get_request()\n    return request.post((self.SIGNATURE_REQUEST_REMIND_URL + signature_request_id), data={'email_address': email_address})", "docstring": "Sends an email to the signer reminding them to sign the signature request\n\nSends an email to the signer reminding them to sign the signature\nrequest. You cannot send a reminder within 1 hours of the last reminder\nthat was sent. This includes manual AND automatic reminders.\n\nArgs:\n\nsignature_request_id (str): The id of the SignatureRequest to send a reminder for\n\nemail_address (str):        The email address of the signer to send a reminder to\n\nReturns:\nA SignatureRequest object", "source": "codesearchnet"}
{"code": "def iplot_state_paulivec(rho, figsize=None, slider=False, show_legend=False):\n    \n\n    \n    html_template = Template()\n\n    \n    javascript_template = Template()\n    rho = _validate_input_state(rho)\n    \n    if figsize is None:\n        figsize = (7, 5)\n\n    options = {'width': figsize[0], 'height': figsize[1],\n               'slider': int(slider), 'show_legend': int(show_legend)}\n\n    \n    div_number = str(time.time())\n    div_number = re.sub('[.]', '', div_number)\n\n    data_to_plot = []\n    rho_data = process_data(rho)\n    data_to_plot.append(dict(\n        data=rho_data\n    ))\n\n    html = html_template.substitute({\n        'divNumber': div_number\n    })\n\n    javascript = javascript_template.substitute({\n        'divNumber': div_number,\n        'executions': data_to_plot,\n        'options': options\n    })\n\n    display(HTML(html + javascript))", "docstring": "Create a paulivec representation.\n\nGraphical representation of the input array.\n\nArgs:\nrho (array): State vector or density matrix.\nfigsize (tuple): Figure size in pixels.\nslider (bool): activate slider\nshow_legend (bool): show legend of graph content", "source": "juraj-google-style"}
{"code": "def Log(self, frame):\n    if (not self._log_message):\n        return {'isError': True, 'description': {'format': LOG_ACTION_NOT_SUPPORTED}}\n    if self._quota_recovery_start_time:\n        ms_elapsed = ((time.time() - self._quota_recovery_start_time) * 1000)\n        if (ms_elapsed > self.quota_recovery_ms):\n            self._quota_recovery_start_time = None\n        else:\n            return\n    message = ('LOGPOINT: ' + _FormatMessage(self._definition.get('logMessageFormat', ''), self._EvaluateExpressions(frame)))\n    line = self._definition['location']['line']\n    cdbg_logging_location = (NormalizePath(frame.f_code.co_filename), line, _GetFrameCodeObjectName(frame))\n    if native.ApplyDynamicLogsQuota(len(message)):\n        self._log_message(message)\n    else:\n        self._quota_recovery_start_time = time.time()\n        self._log_message(DYNAMIC_LOG_OUT_OF_QUOTA)\n    del cdbg_logging_location\n    return None", "docstring": "Captures the minimal application states, formats it and logs the message.\n\nArgs:\nframe: Python stack frame of breakpoint hit.\n\nReturns:\nNone on success or status message on error.", "source": "codesearchnet"}
{"code": "def save_screenshot(self, path=None, **kwargs):\n    path = _prepare_path(path, 'png')\n    self.driver.save_screenshot(path, **kwargs)\n    return path", "docstring": "Save a screenshot of the page.\n\nIf invoked without arguments, it will save a file to :data:`capybara.save_path` and the\nfile will be given a randomly generated filename. If invoked with a relative path, the path\nwill be relative to :data:`capybara.save_path`.\n\nArgs:\npath (str, optional): The path to where it should be saved.\n**kwargs: Arbitrary keywords arguments for the driver.\n\nReturns:\nstr: The path to which the file was saved.", "source": "codesearchnet"}
{"code": "def _inverse_document_frequency(self, token_document_counts, num_documents):\n    return tf.math.log(1 + num_documents / (1 + token_document_counts))", "docstring": "Computes the inverse-document-frequency (IDF) component of \"tf_idf\".\nArgs:\ntoken_document_counts: An array of the # of documents each token\nappears in.\nnum_documents: An int representing the total number of documents\n\nReturns:\nAn array of \"inverse document frequency\" weights.", "source": "github-repos"}
{"code": "def __init__(self, stream_number, entry_index):\n    \n    super(SerializedStreamIdentifier, self).__init__()\n    self.entry_index = entry_index\n    self.stream_number = stream_number", "docstring": "Initializes a serialized stream attribute container identifier.\n\nArgs:\nstream_number (int): number of the serialized attribute container stream.\nentry_index (int): number of the serialized event within the stream.", "source": "juraj-google-style"}
{"code": "def render_build_args(options, ns):\n    build_args = options.get('buildArgs', {})\n    for (key, value) in build_args.items():\n        build_args[key] = value.format(**ns)\n    return build_args", "docstring": "Get docker build args dict, rendering any templated args.\n\nArgs:\noptions (dict):\nThe dictionary for a given image from chartpress.yaml.\nFields in `options['buildArgs']` will be rendered and returned,\nif defined.\nns (dict): the namespace used when rendering templated arguments", "source": "codesearchnet"}
{"code": "def init_logging(log_filename, verbose, quiet):\n    \n    \n    \n    logger = logging.getLogger('')\n    logger.setLevel(logging.DEBUG)\n\n    \n    if log_filename:\n        file_handler = logging.FileHandler(log_filename)\n        file_handler.setLevel(logging.DEBUG)\n        \n        file_handler.setFormatter(\n            logging.Formatter(\n                fmt=\"%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\\t%(message)s\"\n            ))\n        logger.addHandler(file_handler)\n\n    \n    console_handler = logging.StreamHandler(sys.stdout)\n    stderr_hdl = logging.StreamHandler(sys.stderr)\n\n    \n    fmt_verbose = logging.Formatter(\n        fmt=\"%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\\t%(message)s\",\n        datefmt='%Y-%m-%d,%H:%M:%S.%f'\n    )\n    fmt_regular = logging.Formatter(\n        \"%(asctime)s [%(levelname).4s] [%(filename).8s] %(message)s\", \"%H:%M:%S\")\n\n    \n    if verbose:\n        console_handler.setLevel(logging.DEBUG)\n        console_handler.setFormatter(fmt_verbose)\n        stderr_hdl.setFormatter(fmt_verbose)\n    elif quiet:\n        console_handler.setLevel(logging.WARNING)\n        console_handler.setFormatter(fmt_regular)\n        stderr_hdl.setFormatter(fmt_regular)\n    else:\n        console_handler.setLevel(logging.INFO)\n        console_handler.setFormatter(fmt_regular)\n        stderr_hdl.setFormatter(fmt_regular)\n\n    \n    \n    f_err = SingleLevelFilter(logging.ERROR, True)\n    f_warn = SingleLevelFilter(logging.WARNING, True)\n    f_crit = SingleLevelFilter(logging.CRITICAL, True)\n    console_handler.addFilter(f_err)\n    console_handler.addFilter(f_warn)\n    console_handler.addFilter(f_crit)\n    logger.addHandler(console_handler)\n\n    f_info = SingleLevelFilter(logging.INFO, True)\n    f_debug = SingleLevelFilter(logging.DEBUG, True)\n    stderr_hdl.addFilter(f_info)\n    stderr_hdl.addFilter(f_debug)\n    logger.addHandler(stderr_hdl)", "docstring": "Set up logging with default parameters:\n* default console logging level is INFO\n* ERROR, WARNING and CRITICAL are redirected to stderr\n\nArgs:\nlog_filename (str): if set, will write DEBUG log there\nverbose (bool): DEBUG level in console, overrides 'quiet'\nquiet (bool): WARNING level in console", "source": "juraj-google-style"}
{"code": "def size(self, name=None):\n    if name is None:\n        name = '%s_size' % self._name\n    return self._size_fn(shared_name=self._name, name=name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit)", "docstring": "Returns the number of elements in the staging area.\n\nArgs:\nname: A name for the operation (optional)\n\nReturns:\nThe created op", "source": "github-repos"}
{"code": "def create_dummy_class(klass, dependency):\n    assert (not building_rtfd())\n\n    class _DummyMetaClass(type):\n\n        def __getattr__(_, __):\n            raise AttributeError(\"Cannot import '{}', therefore '{}' is not available\".format(dependency, klass))\n\n    @six.add_metaclass(_DummyMetaClass)\n    class _Dummy(object):\n\n        def __init__(self, *args, **kwargs):\n            raise ImportError(\"Cannot import '{}', therefore '{}' is not available\".format(dependency, klass))\n    return _Dummy", "docstring": "When a dependency of a class is not available, create a dummy class which throws ImportError when used.\n\nArgs:\nklass (str): name of the class.\ndependency (str): name of the dependency.\n\nReturns:\nclass: a class object", "source": "codesearchnet"}
{"code": "def set_fig_size(self, width, height=None):\n        \n        self.figure.figure_width = width\n        self.figure.figure_height = height\n        return", "docstring": "Set the figure size in inches.\n\nSets the figure size with a call to fig.set_size_inches.\nDefault in code is 8 inches for each.\n\nArgs:\nwidth (float): Dimensions for figure width in inches.\nheight (float, optional): Dimensions for figure height in inches. Default is None.", "source": "juraj-google-style"}
{"code": "def find_copy_constructor(type_):\n    \n    copy_ = type_.constructors(\n        lambda x: is_copy_constructor(x),\n        recursive=False,\n        allow_empty=True)\n    if copy_:\n        return copy_[0]\n\n    return None", "docstring": "Returns reference to copy constructor.\n\nArgs:\ntype_ (declarations.class_t): the class to be searched.\n\nReturns:\ndeclarations.constructor_t: the copy constructor", "source": "juraj-google-style"}
{"code": "def stack_template_url(bucket_name, blueprint, endpoint):\n    key_name = stack_template_key_name(blueprint)\n    return ('%s/%s/%s' % (endpoint, bucket_name, key_name))", "docstring": "Produces an s3 url for a given blueprint.\n\nArgs:\nbucket_name (string): The name of the S3 bucket where the resulting\ntemplates are stored.\nblueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint\nobject to create the URL to.\nendpoint (string): The s3 endpoint used for the bucket.\n\nReturns:\nstring: S3 URL.", "source": "codesearchnet"}
{"code": "def configure_attributes(self, json_data):\n    env = boto3.session.Session(profile_name=self.env, region_name=self.region)\n    elbclient = env.client('elb')\n    elb_settings = self.properties['elb']\n    LOG.debug('Block ELB Settings Pre Configure Load Balancer Attributes:\\n%s', pformat(elb_settings))\n    for job in json.loads(json_data)['job']:\n        load_balancer_attributes = {'CrossZoneLoadBalancing': {'Enabled': True}, 'AccessLog': {'Enabled': False}, 'ConnectionDraining': {'Enabled': False}, 'ConnectionSettings': {'IdleTimeout': 60}}\n        if elb_settings.get('connection_draining_timeout'):\n            connection_draining_timeout = int(elb_settings['connection_draining_timeout'])\n            LOG.info('Applying Custom Load Balancer Connection Draining Timeout: %d', connection_draining_timeout)\n            load_balancer_attributes['ConnectionDraining'] = {'Enabled': True, 'Timeout': connection_draining_timeout}\n        if elb_settings.get('idle_timeout'):\n            idle_timeout = int(elb_settings['idle_timeout'])\n            LOG.info('Applying Custom Load Balancer Idle Timeout: %d', idle_timeout)\n            load_balancer_attributes['ConnectionSettings'] = {'IdleTimeout': idle_timeout}\n        if elb_settings.get('access_log'):\n            access_log_bucket_name = elb_settings['access_log']['bucket_name']\n            access_log_bucket_prefix = elb_settings['access_log']['bucket_prefix']\n            access_log_emit_interval = int(elb_settings['access_log']['emit_interval'])\n            LOG.info('Applying Custom Load Balancer Access Log: %s/%s every %d minutes', access_log_bucket_name, access_log_bucket_prefix, access_log_emit_interval)\n            load_balancer_attributes['AccessLog'] = {'Enabled': True, 'S3BucketName': access_log_bucket_name, 'EmitInterval': access_log_emit_interval, 'S3BucketPrefix': access_log_bucket_prefix}\n        LOG.info('Applying Load Balancer Attributes')\n        LOG.debug('Load Balancer Attributes:\\n%s', pformat(load_balancer_attributes))\n        elbclient.modify_load_balancer_attributes(LoadBalancerName=self.app, LoadBalancerAttributes=load_balancer_attributes)", "docstring": "Configure load balancer attributes such as idle timeout, connection draining, etc\n\nArgs:\njson_data (json): return data from ELB upsert", "source": "codesearchnet"}
{"code": "def present(name, parent=None, vlan=None):\n    ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}\n    comment_bridge_created = 'Bridge {0} created.'.format(name)\n    comment_bridge_notcreated = 'Unable to create bridge: {0}.'.format(name)\n    comment_bridge_exists = 'Bridge {0} already exists.'.format(name)\n    comment_bridge_mismatch = 'Bridge {0} already exists, but has a different parent or VLAN ID.'.format(name)\n    changes_bridge_created = {name: {'old': 'Bridge {0} does not exist.'.format(name), 'new': 'Bridge {0} created'.format(name)}}\n    bridge_exists = __salt__['openvswitch.bridge_exists'](name)\n    if bridge_exists:\n        current_parent = __salt__['openvswitch.bridge_to_parent'](name)\n        if (current_parent == name):\n            current_parent = None\n        current_vlan = __salt__['openvswitch.bridge_to_vlan'](name)\n        if (current_vlan == 0):\n            current_vlan = None\n    if __opts__['test']:\n        if bridge_exists:\n            if ((current_parent == parent) and (current_vlan == vlan)):\n                ret['result'] = True\n                ret['comment'] = comment_bridge_exists\n            else:\n                ret['result'] = False\n                ret['comment'] = comment_bridge_mismatch\n        else:\n            ret['result'] = None\n            ret['comment'] = comment_bridge_created\n        return ret\n    if bridge_exists:\n        if ((current_parent == parent) and (current_vlan == vlan)):\n            ret['result'] = True\n            ret['comment'] = comment_bridge_exists\n        else:\n            ret['result'] = False\n            ret['comment'] = comment_bridge_mismatch\n    else:\n        bridge_create = __salt__['openvswitch.bridge_create'](name, parent=parent, vlan=vlan)\n        if bridge_create:\n            ret['result'] = True\n            ret['comment'] = comment_bridge_created\n            ret['changes'] = changes_bridge_created\n        else:\n            ret['result'] = False\n            ret['comment'] = comment_bridge_notcreated\n    return ret", "docstring": "Ensures that the named bridge exists, eventually creates it.\n\nArgs:\nname: The name of the bridge.\nparent: The name of the parent bridge (if the bridge shall be created\nas a fake bridge). If specified, vlan must also be specified.\nvlan: The VLAN ID of the bridge (if the bridge shall be created as a\nfake bridge). If specified, parent must also be specified.", "source": "codesearchnet"}
{"code": "def get_otp(self, message_list):\n        \n        if isinstance(message_list, six.string_types):\n            message_list = [message_list, ]\n        for x in message_list:\n            if self.separator in x:\n                raise ValueError('Messages cannot contain separator')\n        message_list = self.separator.join(message_list)\n        dt = int(time.time())\n        prefix = ''.join([random.choice(string.ascii_letters) for x in range(random.randint(0, 20))])\n        tail = ''.join([random.choice(string.ascii_letters) for x in range(random.randint(0, 20))])\n        message_list = f'{message_list}{self.separator}{prefix}{dt}{tail}'\n        message_list = self.encryption_suite.encrypt(message_list.encode())\n        return base64.urlsafe_b64encode(message_list)", "docstring": "Generates a url-safe base64 encoded encypted message together with current timestamp (to the second).\nThrows in some random number of characters to prenvent ecryption chill exploit\nArgs:\nmessage_list: the message to be encrypted\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _parse_local_interface(self, config):\n        \n        match = re.search(r'local-interface (\\w+)', config)\n        value = match.group(1) if match else None\n        return dict(local_interface=value)", "docstring": "Scans the config block and parses the local-interface value\n\nArgs:\nconfig (str): The config block to scan\n\nReturns:\ndict: A dict object that is intended to be merged into the\nresource dict", "source": "juraj-google-style"}
{"code": "def rotate_sites(self, indices=None, theta=0, axis=None, anchor=None, to_unit_cell=True):\n    from numpy.linalg import norm\n    from numpy import cross, eye\n    from scipy.linalg import expm\n    if (indices is None):\n        indices = range(len(self))\n    if (axis is None):\n        axis = [0, 0, 1]\n    if (anchor is None):\n        anchor = [0, 0, 0]\n    anchor = np.array(anchor)\n    axis = np.array(axis)\n    theta %= (2 * np.pi)\n    rm = expm((cross(eye(3), (axis / norm(axis))) * theta))\n    for i in indices:\n        site = self._sites[i]\n        coords = (np.dot(rm, np.array((site.coords - anchor)).T).T + anchor).ravel()\n        new_site = PeriodicSite(site.species, coords, self._lattice, to_unit_cell=to_unit_cell, coords_are_cartesian=True, properties=site.properties)\n        self._sites[i] = new_site", "docstring": "Rotate specific sites by some angle around vector at anchor.\n\nArgs:\nindices (list): List of site indices on which to perform the\ntranslation.\ntheta (float): Angle in radians\naxis (3x1 array): Rotation axis vector.\nanchor (3x1 array): Point of rotation.\nto_unit_cell (bool): Whether new sites are transformed to unit\ncell", "source": "codesearchnet"}
{"code": "def index_2d(seqs: List[List[Any]], target: Any) -> Tuple[int, int]:\n    \n    for i in range(len(seqs)):\n        for j in range(len(seqs[i])):\n            if seqs[i][j] == target:\n                return i, j\n    raise ValueError('Item not present.')", "docstring": "Finds the first index of a target item within a list of lists.\n\nArgs:\nseqs: The list of lists to search.\ntarget: The item to find.\n\nRaises:\nValueError: Item is not present.", "source": "juraj-google-style"}
{"code": "def add_error(self, error):\n        \n        self._count += 1\n        self._record.add_error('expect@%s+%s' % (time.time(), self._count),\n                               error)", "docstring": "Record an error from expect APIs.\n\nThis method generates a position stamp for the expect. The stamp is\ncomposed of a timestamp and the number of errors recorded so far.\n\nArgs:\nerror: Exception or signals.ExceptionRecord, the error to add.", "source": "juraj-google-style"}
{"code": "def extract_features(data_path: str, thres: int) -> typing.List[str]:\n    counter: typing.Counter[str] = Counter()\n    with open(data_path) as f:\n        for row in f:\n            cols = row.strip().split('\\t')\n            if len(cols) < 2:\n                continue\n            counter.update(cols[1:])\n    return [item[0] for item in counter.most_common() if item[1] > thres]", "docstring": "Extracts a features list from the given encoded data file. This filters out\nfeatures whose number of occurrences does not exceed the threshold.\n\nArgs:\ndata_path (str): The path to the encoded data file that contains the\nfeatures to be extracted, which is typically a training data file.\nthres (int): A threshold to filter out features  whose number of occurrences\ndoes not exceed the threshold.\n\nReturns:\nA list of features", "source": "github-repos"}
{"code": "def get_template_path(filename):\n    \n    if os.path.isfile(filename):\n        return os.path.abspath(filename)\n    for i in sys.path:\n        if os.path.isfile(os.path.join(i, filename)):\n            return os.path.abspath(os.path.join(i, filename))\n\n    return None", "docstring": "Find raw template in working directory or in sys.path.\n\ntemplate_path from config may refer to templates colocated with the Stacker\nconfig, or files in remote package_sources. Here, we emulate python module\nloading to find the path to the template.\n\nArgs:\nfilename (str): Template filename.\n\nReturns:\nOptional[str]: Path to file, or None if no file found", "source": "juraj-google-style"}
{"code": "def list(self, orgId=None, **request_parameters):\n    check_type(orgId, basestring)\n    params = dict_from_items_with_values(request_parameters, orgId=orgId)\n    items = self._session.get_items(API_ENDPOINT, params=params)\n    for item in items:\n        (yield self._object_factory(OBJECT_TYPE, item))", "docstring": "List all licenses for a given organization.\n\nIf no orgId is specified, the default is the organization of the\nauthenticated user.\n\nArgs:\norgId(basestring): Specify the organization, by ID.\n**request_parameters: Additional request parameters (provides\nsupport for parameters that may be added in the future).\n\nReturns:\nGeneratorContainer: A GeneratorContainer which, when iterated,\nyields the licenses returned by the Webex Teams query.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"}
{"code": "def CreatePrecisionHelper(cls, precision):\n    precision_helper_class = cls._PRECISION_CLASSES.get(precision, None)\n    if (not precision_helper_class):\n        raise ValueError('Unsupported precision: {0!s}'.format(precision))\n    return precision_helper_class", "docstring": "Creates a precision helper.\n\nArgs:\nprecision (str): precision of the date and time value, which should\nbe one of the PRECISION_VALUES in definitions.\n\nReturns:\nclass: date time precision helper class.\n\nRaises:\nValueError: if the precision value is unsupported.", "source": "codesearchnet"}
{"code": "def set_func(self, name, func):\n    self.func_name = name\n    self.func = func", "docstring": "Set the processing function to use for this node.\n\nArgs:\nname (str): The name of the function to use.  This is\njust stored for reference in case we need to serialize\nthe node later.\nfunc (callable): A function that is called to process inputs\nfor this node.  It should have the following signature:\ncallable(input1_walker, input2_walker, ...)\nIt should return a list of IOTileReadings that are then pushed into\nthe node's output stream", "source": "codesearchnet"}
{"code": "def query(self, time_indices):\n    if self._disposed:\n        raise ValueError('Cannot query: this _WatchStore instance is already disposed')\n    if (not isinstance(time_indices, (tuple, list))):\n        time_indices = [time_indices]\n    output = []\n    for time_index in time_indices:\n        if isinstance(self._data[time_index], _TensorValueDiscarded):\n            output.append(None)\n        else:\n            data_item = self._data[time_index]\n            if (hasattr(data_item, 'dtype') and (tensor_helper.translate_dtype(data_item.dtype) == 'string')):\n                (_, _, data_item) = tensor_helper.array_view(data_item)\n                data_item = np.array(tensor_helper.process_buffers_for_display(data_item), dtype=np.object)\n            output.append(data_item)\n    return output", "docstring": "Query the values at given time indices.\n\nArgs:\ntime_indices: 0-based time indices to query, as a `list` of `int`.\n\nReturns:\nValues as a list of `numpy.ndarray` (for time indices in memory) or\n`None` (for time indices discarded).", "source": "codesearchnet"}
{"code": "def __init__(self, skype=None, raw=None):\n        \n        self.skype = skype\n        self.raw = raw", "docstring": "Instantiate a plain instance of this class, and store a reference to the Skype object for later API calls.\n\nNormally this method won't be called or implemented directly.\n\nImplementers should make use of :meth:`fromRaw` and the :meth:`initAttrs` decorator instead.\n\nArgs:\nskype (Skype): parent Skype instance\nraw (dict): raw object, as provided by the API", "source": "juraj-google-style"}
{"code": "def _SetupDatabase(host=None, port=None, user=None, password=None, database=None, client_key_path=None, client_cert_path=None, ca_cert_path=None):\n    with contextlib.closing(_Connect(host=host, port=port, user=user, password=password, database=None, client_key_path=client_key_path, client_cert_path=client_cert_path, ca_cert_path=ca_cert_path)) as conn:\n        with contextlib.closing(conn.cursor()) as cursor:\n            try:\n                cursor.execute(CREATE_DATABASE_QUERY.format(database))\n            except MySQLdb.MySQLError as e:\n                if (e.args[0] != mysql_error_constants.DB_CREATE_EXISTS):\n                    raise\n            cursor.execute('USE {}'.format(database))\n            _CheckCollation(cursor)\n\n    def _MigrationConnect():\n        return _Connect(host=host, port=port, user=user, password=password, database=database, client_key_path=client_key_path, client_cert_path=client_cert_path, ca_cert_path=ca_cert_path)\n    mysql_migration.ProcessMigrations(_MigrationConnect, config.CONFIG['Mysql.migrations_dir'])", "docstring": "Connect to the given MySQL host and create a utf8mb4_unicode_ci database.\n\nArgs:\nhost: The hostname to connect to.\nport: The port to connect to.\nuser: The username to connect as.\npassword: The password to connect with.\ndatabase: The database name to create.\nclient_key_path: The path of the client private key file.\nclient_cert_path: The path of the client public key certificate file.\nca_cert_path: The path of the Certificate Authority (CA) certificate file.", "source": "codesearchnet"}
{"code": "def cancelPnL(self, account, modelCode: str=''):\n    key = (account, modelCode)\n    reqId = self.wrapper.pnlKey2ReqId.pop(key, None)\n    if reqId:\n        self.client.cancelPnL(reqId)\n        self.wrapper.pnls.pop(reqId, None)\n    else:\n        self._logger.error(f'cancelPnL: No subscription for account {account}, modelCode {modelCode}')", "docstring": "Cancel PnL subscription.\n\nArgs:\naccount: Cancel for this account.\nmodelCode: If specified, cancel for this account model.", "source": "codesearchnet"}
{"code": "def get_py_internals(version=None, default=None):\n    \n\n    if version is None:\n        version = default\n\n    if isinstance(version, dict):\n        return version\n    elif version in PY_INTERNALS:\n        return PY_INTERNALS[version]\n    else:\n        return ValueError('Unsupported python version %r requested.' % version)", "docstring": "Given a version specification. It can be any dict which is returned\nverbatim, an index into :data:`PY_INTERNALS` or ``None``.\n\nArguments:\nversion: The python version to return the internals of.\ndefault: The python version that will be looked up if ``version`` is\nNone.\n\nReturns:\ndict: The python internals for the requested version.", "source": "juraj-google-style"}
{"code": "def assert_same_rank(self, other):\n    other = as_shape(other)\n    if self.rank is not None and other.rank is not None:\n        if self.rank != other.rank:\n            raise ValueError('Shapes %s and %s must have the same rank' % (self, other))", "docstring": "Raises an exception if `self` and `other` do not have compatible ranks.\n\nArgs:\nother: Another `TensorShape`.\n\nRaises:\nValueError: If `self` and `other` do not represent shapes with the\nsame rank.", "source": "github-repos"}
{"code": "def convert_variables_to_constants_v2_as_graph(func, lower_control_flow=True, aggressive_inlining=False):\n    converter_data = _FunctionConverterDataInEager(func=func, lower_control_flow=lower_control_flow, aggressive_inlining=aggressive_inlining)\n    output_graph_def, converted_input_indices = _replace_variables_by_constants(converter_data=converter_data)\n    frozen_func = _construct_concrete_function(func, output_graph_def, converted_input_indices)\n    return (frozen_func, output_graph_def)", "docstring": "Replaces all the variables in a graph with constants of the same values.\n\nThis function works as same as convert_variables_to_constants_v2, but it\nreturns the intermediate `GraphDef` as well. This `GraphDef` contains all the\ndebug information after all the transformations in the frozen phase.\n\nArgs:\nfunc: ConcreteFunction.\nlower_control_flow: Boolean indicating whether or not to lower control flow\nops such as If and While. (default True)\naggressive_inlining: Boolean indicating whether or not to do aggressive\nfunction inlining (might be unsafe if function has stateful ops, not\nproperly connected to control outputs).\n\nReturns:\nConcreteFunction containing a simplified version of the original, and also\nthe intermediate GraphDef containing the node debug information for the\ntransformations in the frozen phase.", "source": "github-repos"}
{"code": "def wsgi_simple_responder(result: Union[(str, bytes)], handler: Callable[([Union[(str, bytes)]], WSGI_TUPLE_TYPE)], start_response: TYPE_WSGI_START_RESPONSE, status: str='200 OK', extraheaders: TYPE_WSGI_RESPONSE_HEADERS=None) -> TYPE_WSGI_APP_RESULT:\n    extraheaders = (extraheaders or [])\n    (contenttype, extraheaders2, output) = handler(result)\n    response_headers = [('Content-Type', contenttype), ('Content-Length', str(len(output)))]\n    response_headers.extend(extraheaders)\n    if (extraheaders2 is not None):\n        response_headers.extend(extraheaders2)\n    start_response(status, response_headers)\n    return [output]", "docstring": "Simple WSGI app.\n\nArgs:\nresult: the data to be processed by ``handler``\nhandler: a function returning a ``(contenttype, extraheaders, data)``\ntuple, e.g. ``text_result``, ``html_result``\nstart_response: standard WSGI ``start_response`` function\nstatus: status code (default ``\"200 OK\"``)\nextraheaders: optional extra HTTP headers\n\nReturns:\nWSGI application result", "source": "codesearchnet"}
{"code": "def DeserializeUnsigned(self, reader):\n    txtype = reader.ReadByte()\n    if (txtype != int.from_bytes(self.Type, 'little')):\n        raise Exception('incorrect type {}, wanted {}'.format(txtype, int.from_bytes(self.Type, 'little')))\n    self.DeserializeUnsignedWithoutType(reader)", "docstring": "Deserialize object.\n\nArgs:\nreader (neo.IO.BinaryReader):\n\nRaises:\nException: if transaction type is incorrect.", "source": "codesearchnet"}
{"code": "def save_aggregate_reports_to_splunk(self, aggregate_reports):\n    logger.debug('Saving aggregate reports to Splunk')\n    if (type(aggregate_reports) == dict):\n        aggregate_reports = [aggregate_reports]\n    if (len(aggregate_reports) < 1):\n        return\n    data = self._common_data.copy()\n    json_str = ''\n    for report in aggregate_reports:\n        for record in report['records']:\n            new_report = dict()\n            for metadata in report['report_metadata']:\n                new_report[metadata] = report['report_metadata'][metadata]\n            new_report['published_policy'] = report['policy_published']\n            new_report['source_ip_address'] = record['source']['ip_address']\n            new_report['source_country'] = record['source']['country']\n            new_report['source_reverse_dns'] = record['source']['reverse_dns']\n            new_report['source_base_domain'] = record['source']['base_domain']\n            new_report['message_count'] = record['count']\n            new_report['disposition'] = record['policy_evaluated']['disposition']\n            new_report['spf_aligned'] = record['alignment']['spf']\n            new_report['dkim_aligned'] = record['alignment']['dkim']\n            new_report['passed_dmarc'] = record['alignment']['dmarc']\n            new_report['header_from'] = record['identifiers']['header_from']\n            new_report['envelope_from'] = record['identifiers']['envelope_from']\n            if ('dkim' in record['auth_results']):\n                new_report['dkim_results'] = record['auth_results']['dkim']\n            if ('spf' in record['auth_results']):\n                new_report['spf_results'] = record['auth_results']['spf']\n            data['sourcetype'] = 'dmarc:aggregate'\n            timestamp = human_timestamp_to_timestamp(new_report['begin_date'])\n            data['time'] = timestamp\n            data['event'] = new_report.copy()\n            json_str += '{0}\\n'.format(json.dumps(data))\n    if (not self.session.verify):\n        logger.debug('Skipping certificate verification for Splunk HEC')\n    try:\n        response = self.session.post(self.url, data=json_str, timeout=self.timeout)\n        response = response.json()\n    except Exception as e:\n        raise SplunkError(e.__str__())\n    if (response['code'] != 0):\n        raise SplunkError(response['text'])", "docstring": "Saves aggregate DMARC reports to Splunk\n\nArgs:\naggregate_reports: A list of aggregate report dictionaries\nto save in Splunk", "source": "codesearchnet"}
{"code": "def __ne__(self, other: 'TensorFluent') -> 'TensorFluent':\n        \n        return self._binary_op(self, other, tf.not_equal, tf.float32)", "docstring": "Returns a TensorFluent for the not-equal relational operator.\n\nArgs:\nself: The first operand.\nother: The second operand.", "source": "juraj-google-style"}
{"code": "def collect_variables(self, variables: MultisetOfVariables) -> None:\n    if (self.variable_name is not None):\n        variables.add(self.variable_name)", "docstring": "Recursively adds all variables occuring in the expression to the given multiset.\n\nThis is used internally by `variables`. Needs to be overwritten by inheriting container expression classes.\nThis method can be used when gathering the `variables` of multiple expressions, because only one multiset\nneeds to be created and that is more efficient.\n\nArgs:\nvariables:\nMultiset of variables. All variables contained in the expression are recursively added to this multiset.", "source": "codesearchnet"}
{"code": "def apply_strain(self, strain):\n    s = ((1 + np.array(strain)) * np.eye(3))\n    self.lattice = Lattice(np.dot(self._lattice.matrix.T, s).T)", "docstring": "Apply a strain to the lattice.\n\nArgs:\nstrain (float or list): Amount of strain to apply. Can be a float,\nor a sequence of 3 numbers. E.g., 0.01 means all lattice\nvectors are increased by 1%. This is equivalent to calling\nmodify_lattice with a lattice with lattice parameters that\nare 1% larger.", "source": "codesearchnet"}
{"code": "def form_out(self, _form=None):\n    _form = (_form or self.object_form)\n    self.output['forms'] = _form.serialize()\n    self._add_meta_props(_form)\n    self.output['forms']['grouping'] = _form.Meta.grouping\n    self.output['forms']['constraints'] = _form.Meta.constraints\n    self._patch_form(self.output['forms'])\n    self.set_client_cmd('form')", "docstring": "Renders form. Applies form modifiers, then writes\nresult to response payload. If supplied, given form\nobject instance will be used instead of view's\ndefault ObjectForm.\n\nArgs:\n_form (:py:attr:`~zengine.forms.json_form.JsonForm`):\nForm object to override `self.object_form`", "source": "codesearchnet"}
{"code": "def _pack(formatstring, value):\n    _checkString(formatstring, description='formatstring', minlength=1)\n    try:\n        result = struct.pack(formatstring, value)\n    except:\n        errortext = 'The value to send is probably out of range, as the num-to-bytestring conversion failed.'\n        errortext += ' Value: {0!r} Struct format code is: {1}'\n        raise ValueError(errortext.format(value, formatstring))\n    if (sys.version_info[0] > 2):\n        return str(result, encoding='latin1')\n    return result", "docstring": "Pack a value into a bytestring.\n\nUses the built-in :mod:`struct` Python module.\n\nArgs:\n* formatstring (str): String for the packing. See the :mod:`struct` module for details.\n* value (depends on formatstring): The value to be packed\n\nReturns:\nA bytestring (str).\n\nRaises:\nValueError\n\nNote that the :mod:`struct` module produces byte buffers for Python3,\nbut bytestrings for Python2. This is compensated for automatically.", "source": "codesearchnet"}
{"code": "def __init__(self, timestamp=None):\n    \n    super(PosixTimeInNanoseconds, self).__init__()\n    self._precision = definitions.PRECISION_1_NANOSECOND\n    self._timestamp = timestamp", "docstring": "Initializes a POSIX timestamp in nanoseconds.\n\nArgs:\ntimestamp (Optional[int]): POSIX timestamp in nanoseconds.", "source": "juraj-google-style"}
{"code": "def add(self, *args: Any, **kwargs: Any) -> Optional[Callable]:\n    self.items = {**self.items, **{m.__name__: validate(m) for m in args}, **{k: validate(v) for (k, v) in kwargs.items()}}\n    if len(args):\n        return args[0]\n    return None", "docstring": "Register a function to the list.\n\nArgs:\n*args: Set/Sequence of positional arguments.\n**kwargs: Mapping of named arguments.\n\nRaises:\nAttributeError: Raised if the method being added has no name. (i.e. it has\nno `__name__` property, and no `name` argument was given.)\n\nExamples:\nmethods = Methods()\n@methods.add\ndef subtract(minuend, subtrahend):\nreturn minuend - subtrahend", "source": "codesearchnet"}
{"code": "def adist(self, codes):\n        \n\n        assert codes.ndim == 2\n        N, M = codes.shape\n        assert M == self.dtable.shape[0]\n\n        \n        dists = np.sum(self.dtable[range(M), codes], axis=1)\n\n        \n        \n        \n        \n        \n\n        return dists", "docstring": "Given PQ-codes, compute Asymmetric Distances between the query (self.dtable)\nand the PQ-codes.\n\nArgs:\ncodes (np.ndarray): PQ codes with shape=(N, M) and\ndtype=pq.code_dtype where pq is a pq instance that creates the codes\n\nReturns:\nnp.ndarray: Asymmetric Distances with shape=(N, ) and dtype=np.float32", "source": "juraj-google-style"}
{"code": "def ReleaseFileSystem(self, file_system):\n    \n    identifier, cache_value = self._file_system_cache.GetCacheValueByObject(\n        file_system)\n\n    if not identifier:\n      raise RuntimeError('Object not cached.')\n\n    if not cache_value:\n      raise RuntimeError('Invalid cache value.')\n\n    self._file_system_cache.ReleaseObject(identifier)\n\n    result = cache_value.IsDereferenced()\n    if result:\n      self._file_system_cache.RemoveObject(identifier)\n\n    return result", "docstring": "Releases a cached file system object.\n\nArgs:\nfile_system (FileSystem): file system object.\n\nReturns:\nbool: True if the file system object can be closed.\n\nRaises:\nPathSpecError: if the path specification is incorrect.\nRuntimeError: if the file system object is not cached or an inconsistency\nis detected in the cache.", "source": "juraj-google-style"}
{"code": "def _has_data(self):\n    return 'end' in self.result and 'sum' in self.result['end']", "docstring": "Checks if the iperf result has valid throughput data.\n\nReturns:\nTrue if the result contains throughput data. False otherwise.", "source": "github-repos"}
{"code": "def pretty_print_config_to_json(self, services, hostname=None, x_google_api_name=False):\n    descriptor = self.get_openapi_dict(services, hostname, x_google_api_name=x_google_api_name)\n    return json.dumps(descriptor, sort_keys=True, indent=2, separators=(',', ': '))", "docstring": "JSON string description of a protorpc.remote.Service in OpenAPI format.\n\nArgs:\nservices: Either a single protorpc.remote.Service or a list of them\nthat implements an api/version.\nhostname: string, Hostname of the API, to override the value set on the\ncurrent service. Defaults to None.\n\nReturns:\nstring, The OpenAPI descriptor document as a JSON string.", "source": "codesearchnet"}
{"code": "def _curvature_range(self):\n    self._curv_win = tf.get_variable('curv_win', dtype=tf.float32, trainable=False, shape=[self.curvature_window_width], initializer=tf.zeros_initializer)\n    self._curv_win = tf.scatter_update(self._curv_win, (self._step % self.curvature_window_width), tf.log(self._grad_norm_squared))\n    valid_window = tf.slice(self._curv_win, tf.constant([0]), tf.expand_dims(tf.minimum(tf.constant(self.curvature_window_width), (self._step + 1)), dim=0))\n    self._h_min_t = tf.reduce_min(valid_window)\n    self._h_max_t = tf.reduce_max(valid_window)\n    curv_range_ops = []\n    with tf.control_dependencies([self._h_min_t, self._h_max_t]):\n        avg_op = self._moving_averager.apply([self._h_min_t, self._h_max_t])\n        with tf.control_dependencies([avg_op]):\n            self._h_min = tf.exp(tf.identity(self._moving_averager.average(self._h_min_t)))\n            self._h_max = tf.exp(tf.identity(self._moving_averager.average(self._h_max_t)))\n            if self._sparsity_debias:\n                self._h_min *= self._sparsity_avg\n                self._h_max *= self._sparsity_avg\n    curv_range_ops.append(avg_op)\n    return curv_range_ops", "docstring": "Curvature range.\n\nReturns:\nh_max_t, h_min_t ops", "source": "codesearchnet"}
{"code": "def _create_file_if_needed(filename):\n    if os.path.exists(filename):\n        return False\n    else:\n        open(filename, 'a+b').close()\n        logger.info('Credential file {0} created'.format(filename))\n        return True", "docstring": "Creates the an empty file if it does not already exist.\n\nReturns:\nTrue if the file was created, False otherwise.", "source": "codesearchnet"}
{"code": "def get_repo_url(pypirc, repository):\n    \n    pypirc = os.path.abspath(os.path.expanduser(pypirc))\n    pypi_config = base.PyPIConfig(pypirc)\n    repo_config = pypi_config.get_repo_config(repository)\n    if repo_config:\n        return repo_config.get_clean_url()\n    else:\n        return base.RepositoryURL(repository)", "docstring": "Fetch the RepositoryURL for a given repository, reading info from pypirc.\n\nWill try to find the repository in the .pypirc, including username/password.\n\nArgs:\npypirc (str): path to the .pypirc config file\nrepository (str): URL or alias for the repository\n\nReturns:\nbase.RepositoryURL for the repository", "source": "juraj-google-style"}
{"code": "def AddPathSegment(self, path_segment, scan_object):\n    if (path_segment in self._path_segments):\n        raise ValueError('Path segment already set.')\n    if isinstance(scan_object, PathFilterScanTreeNode):\n        scan_object.parent = self\n    self._path_segments[path_segment] = scan_object", "docstring": "Adds a path segment.\n\nArgs:\npath_segment: a string containing the path segment.\nscan_object: a scan object, either a scan tree sub node (instance of\nPathFilterScanTreeNode) or a string containing a path.\n\nRaises:\nValueError: if the node already contains a scan object for\nthe path segment.", "source": "codesearchnet"}
{"code": "def transactional(func, args, kwds, **options):\n    return transactional_async.wrapped_decorator(func, args, kwds, **options).get_result()", "docstring": "Decorator to make a function automatically run in a transaction.\n\nArgs:\n**ctx_options: Transaction options (see transaction(), but propagation\ndefault to TransactionOptions.ALLOWED).\n\nThis supports two forms:\n\n(1) Vanilla:\n@transactional\ndef callback(arg):\n...\n\n(2) With options:\n@transactional(retries=1)\ndef callback(arg):\n...", "source": "codesearchnet"}
{"code": "def GetTestConfigs():\n    test_configs = [('NDHWC', False), ('NDHWC', True)]\n    if test.is_gpu_available(cuda_only=True):\n        test_configs += [('NCDHW', True)]\n    return test_configs", "docstring": "Get all the valid tests configs to run.\n\nReturns:\nall the valid test configs as tuples of data_format and use_gpu.", "source": "github-repos"}
{"code": "def parse_exception(line):\n    m = RAISES_REGEX.match(line)\n    if (m is None):\n        raise CartoucheSyntaxError('Cartouche: Invalid argument syntax \"{line}\" for Raises block'.format(line=line))\n    return (m.group(2), m.group(1))", "docstring": "Parse the first line of a Cartouche exception description.\n\nArgs:\nline (str): A single line Cartouche exception description.\n\nReturns:\nA 2-tuple containing the exception type and the first line of the description.", "source": "codesearchnet"}
{"code": "def write_uint64(self, value, little_endian=True):\n        \n        if little_endian:\n            endian = \"<\"\n        else:\n            endian = \">\"\n        return self.pack('%sQ' % endian, value)", "docstring": "Pack the value as an unsigned integer and write 8 bytes to the stream.\n\nArgs:\nvalue:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint: the number of bytes written.", "source": "juraj-google-style"}
{"code": "def GetMessages(self, formatter_mediator, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    event_values = event.CopyToDict()\n\n    regvalue = event_values.get('regvalue', {})\n    string_parts = []\n    for key, value in sorted(regvalue.items()):\n      string_parts.append('{0:s}: {1!s}'.format(key, value))\n    event_values['text'] = ' '.join(string_parts)\n\n    return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def process(self, feed_item):\n    if feed_item.get(FieldMap.ADVERTISER_ID, None) and feed_item.get(FieldMap.DYNAMIC_TARGETING_KEY_NAME, None):\n        if not self._key_exists(feed_item.get(FieldMap.ADVERTISER_ID, None), feed_item.get(FieldMap.DYNAMIC_TARGETING_KEY_NAME, None)):\n            self._create_key(feed_item.get(FieldMap.DYNAMIC_TARGETING_KEY_NAME, None), 'OBJECT_ADVERTISER', feed_item.get(FieldMap.ADVERTISER_ID, None))\n        object_type = feed_item.get(FieldMap.DYNAMIC_TARGETING_KEY_OBJECT_TYPE, None)\n        entity_id = feed_item.get(FieldMap.DYNAMIC_TARGETING_KEY_OBJECT_ID, None)\n        if object_type and len(object_type) > 7:\n            entity = object_type[7:]\n            translated_id = store.translate(entity, entity_id)\n            entity_id = translated_id or entity_id\n        self._create_key(feed_item.get(FieldMap.DYNAMIC_TARGETING_KEY_NAME, None), object_type, entity_id)\n        feed_item[FieldMap.DYNAMIC_TARGETING_KEY_OBJECT_ID] = entity_id\n    else:\n        raise Exception('Dynamic targeting key, %s and %s are required' % (FieldMap.ADVERTISER_ID, FieldMap.DYNAMIC_TARGETING_KEY_NAME))", "docstring": "Processes a Bulkdozer feed item.\n\nThis method identifies if the dyanmic targeting key already exists in CM, if\nit doesn't it creates it associated with the advertiser, and then inserts an\nassociation with the identified object.\n\nArgs:\nfeed_item: Bulkdozer feed item to process.\n\nReturns:\nNewly created or updated CM object.", "source": "github-repos"}
{"code": "def _read_content_or_path(content_or_path):\n    \n    if \"\\n\" in content_or_path.strip():\n        return content_or_path\n\n    if not os.path.exists(content_or_path):\n        raise IOError(\"File '%s' doesn't exists!\" % content_or_path)\n\n    with open(content_or_path) as f:\n        return f.read()", "docstring": "If `content_or_path` contains ``\\\\n``, return it. Else assume, that it is\npath and read file at that path.\n\nArgs:\ncontent_or_path (str): Content or path to the file.\n\nReturns:\nstr: Content.\n\nRaises:\nIOError: whhen the file is not found.", "source": "juraj-google-style"}
{"code": "def Generate(self, *args):", "docstring": "Generage a valid value.\n\nArgs:\n*args: External arguments necessary for generation.\n\nReturns: The generated value.", "source": "github-repos"}
{"code": "def check_version_info(redis_client):\n    redis_reply = redis_client.get('VERSION_INFO')\n    if (redis_reply is None):\n        return\n    true_version_info = tuple(json.loads(ray.utils.decode(redis_reply)))\n    version_info = _compute_version_info()\n    if (version_info != true_version_info):\n        node_ip_address = ray.services.get_node_ip_address()\n        error_message = (((((((((((((('Version mismatch: The cluster was started with:\\n    Ray: ' + true_version_info[0]) + '\\n    Python: ') + true_version_info[1]) + '\\n    Pyarrow: ') + str(true_version_info[2])) + '\\nThis process on node ') + node_ip_address) + ' was started with:') + '\\n    Ray: ') + version_info[0]) + '\\n    Python: ') + version_info[1]) + '\\n    Pyarrow: ') + str(version_info[2]))\n        if (version_info[:2] != true_version_info[:2]):\n            raise Exception(error_message)\n        else:\n            logger.warning(error_message)", "docstring": "Check if various version info of this process is correct.\n\nThis will be used to detect if workers or drivers are started using\ndifferent versions of Python, pyarrow, or Ray. If the version\ninformation is not present in Redis, then no check is done.\n\nArgs:\nredis_client: A client for the primary Redis shard.\n\nRaises:\nException: An exception is raised if there is a version mismatch.", "source": "codesearchnet"}
{"code": "def merge(self, other):\n        \n        if self.m != other.m or self.p != other.p:\n            raise ValueError(\"Cannot merge HyperLogLog with different\\\n                    precisions.\")\n        self.reg = np.maximum(self.reg, other.reg)", "docstring": "Merge the other HyperLogLog with this one, making this the union of the\ntwo.\n\nArgs:\nother (datasketch.HyperLogLog):", "source": "juraj-google-style"}
{"code": "def edit_distance_2(self, word):\n        \n        word = word.lower()\n        return [\n            e2 for e1 in self.edit_distance_1(word) for e2 in self.edit_distance_1(e1)\n        ]", "docstring": "Compute all strings that are two edits away from `word` using only\nthe letters in the corpus\n\nArgs:\nword (str): The word for which to calculate the edit distance\nReturns:\nset: The set of strings that are edit distance two from the \\\nprovided word", "source": "juraj-google-style"}
{"code": "def create_sonos_playlist_from_queue(self, title):\n        \n        \n        \n        \n        response = self.avTransport.SaveQueue([\n            ('InstanceID', 0),\n            ('Title', title),\n            ('ObjectID', '')\n        ])\n        item_id = response['AssignedObjectID']\n        obj_id = item_id.split(':', 2)[1]\n        uri = \"file:\n        res = [DidlResource(uri=uri, protocol_info=\"x-rincon-playlist:*:*:*\")]\n        return DidlPlaylistContainer(\n            resources=res, title=title, parent_id='SQ:', item_id=item_id)", "docstring": "Create a new Sonos playlist from the current queue.\n\nArgs:\ntitle: Name of the playlist\n\n:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`", "source": "juraj-google-style"}
{"code": "def line_init(xo: int, yo: int, xd: int, yd: int) -> None:\n    \n    lib.TCOD_line_init(xo, yo, xd, yd)", "docstring": "Initilize a line whose points will be returned by `line_step`.\n\nThis function does not return anything on its own.\n\nDoes not include the origin point.\n\nArgs:\nxo (int): X starting point.\nyo (int): Y starting point.\nxd (int): X destination point.\nyd (int): Y destination point.\n\n.. deprecated:: 2.0\nUse `line_iter` instead.", "source": "juraj-google-style"}
{"code": "def _init_exception_logging(self, app):\n        \n        enabled = not app.config.get(CONF_DISABLE_EXCEPTION_LOGGING, False)\n\n        if not enabled:\n            return\n\n        exception_telemetry_client = TelemetryClient(\n            self._key, telemetry_channel=self._channel)\n\n        @app.errorhandler(Exception)\n        def exception_handler(exception):\n            if HTTPException and isinstance(exception, HTTPException):\n                return exception\n\n            try:\n                raise exception\n            except Exception:\n                exception_telemetry_client.track_exception()\n            finally:\n                raise exception\n\n        self._exception_telemetry_client = exception_telemetry_client", "docstring": "Sets up exception logging unless ``APPINSIGHTS_DISABLE_EXCEPTION_LOGGING``\nis set in the Flask config.\n\nArgs:\napp (flask.Flask). the Flask application for which to initialize the extension.", "source": "juraj-google-style"}
{"code": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, RealmEmbedderOutput]:\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    realm_outputs = self.realm(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n    pooler_output = realm_outputs[1]\n    projected_score = self.cls(pooler_output)\n    if not return_dict:\n        return (projected_score,) + realm_outputs[2:4]\n    else:\n        return RealmEmbedderOutput(projected_score=projected_score, hidden_states=realm_outputs.hidden_states, attentions=realm_outputs.attentions)", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, RealmEmbedder\n>>> import torch\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google/realm-cc-news-pretrained-embedder\")\n>>> model = RealmEmbedder.from_pretrained(\"google/realm-cc-news-pretrained-embedder\")\n\n>>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n>>> outputs = model(**inputs)\n\n>>> projected_score = outputs.projected_score\n```", "source": "github-repos"}
{"code": "def set_uri(self, uri, resource_nr=0, protocol_info=None):\n    try:\n        self.resources[resource_nr].uri = uri\n        if (protocol_info is not None):\n            self.resources[resource_nr].protocol_info = protocol_info\n    except IndexError:\n        if (protocol_info is None):\n            protocol_info = (uri[:uri.index(':')] + ':*:*:*')\n        self.resources.append(DidlResource(uri, protocol_info))", "docstring": "Set a resource uri for this instance. If no resource exists, create\na new one with the given protocol info.\n\nArgs:\nuri (str): The resource uri.\nresource_nr (int): The index of the resource on which to set the\nuri. If it does not exist, a new resource is added to the list.\nNote that by default, only the uri of the first resource is\nused for playing the item.\nprotocol_info (str): Protocol info for the resource. If none is\ngiven and the resource does not exist yet, a default protocol\ninfo is constructed as '[uri prefix]:*:*:*'.", "source": "codesearchnet"}
{"code": "def __rsub__(self, other):\n    other = as_dimension(other)\n    if self._value is None or other.value is None:\n        return Dimension(None)\n    else:\n        return Dimension(other.value - self._value)", "docstring": "Returns the subtraction of `self` from `other`.\n\nArgs:\nother: Another Dimension, or a value accepted by `as_dimension`.\n\nReturns:\nA Dimension whose value is the subtraction of `self` from `other`.", "source": "github-repos"}
{"code": "def MakeMixture(metapmf, name='mix'):\n    \n    mix = Pmf(name=name)\n    for pmf, p1 in metapmf.Items():\n        for x, p2 in pmf.Items():\n            mix.Incr(x, p1 * p2)\n    return mix", "docstring": "Make a mixture distribution.\n\nArgs:\nmetapmf: Pmf that maps from Pmfs to probs.\nname: string name for the new Pmf.\n\nReturns: Pmf object.", "source": "juraj-google-style"}
{"code": "def _get_bucket_statistics(self, bucket_name, bucket_region, storage_type, statistic, days):\n    cw = self.session.client('cloudwatch', region_name=bucket_region)\n    try:\n        obj_stats = cw.get_metric_statistics(Namespace='AWS/S3', MetricName=statistic, Dimensions=[{'Name': 'StorageType', 'Value': storage_type}, {'Name': 'BucketName', 'Value': bucket_name}], Period=86400, StartTime=(datetime.utcnow() - timedelta(days=days)), EndTime=datetime.utcnow(), Statistics=['Average'])\n        stat_value = (obj_stats['Datapoints'][0]['Average'] if obj_stats['Datapoints'] else 'NO_DATA')\n        return stat_value\n    except Exception as e:\n        self.log.error('Could not get bucket statistic for account {} / bucket {} / {}'.format(self.account.account_name, bucket_name, e))\n    finally:\n        del cw", "docstring": "Returns datapoints from cloudwatch for bucket statistics.\n\nArgs:\nbucket_name `(str)`: The name of the bucket\nstatistic `(str)`: The statistic you want to fetch from\ndays `(int)`: Sample period for the statistic", "source": "codesearchnet"}
{"code": "def makedirs(path):\n    if path:\n        if (not os.path.exists(path)):\n            log.debug('makedirs({})'.format(path))\n            os.makedirs(path)\n        else:\n            realpath = os.path.realpath(path)\n            if (not os.path.isdir(realpath)):\n                raise ScriptWorkerException('makedirs: {} already exists and is not a directory!'.format(path))", "docstring": "Equivalent to mkdir -p.\n\nArgs:\npath (str): the path to mkdir -p\n\nRaises:\nScriptWorkerException: if path exists already and the realpath is not a dir.", "source": "codesearchnet"}
{"code": "def __hash__(self) -> int:\n    return self._text.__hash__()", "docstring": "Hash function.\n\nNOTE(daiyip): ConstStrKey shares the same hash with its text, which\nmakes it easy to lookup a dict of string by an ConstStrKey object, and\nvice versa.\n\nReturns:\nHash code.", "source": "github-repos"}
{"code": "def plot_waterfall(self, f_start=None, f_stop=None, if_id=0, logged=True, cb=True, MJD_time=False, **kwargs):\n        \n\n        plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)\n\n        \n        if self.header[b'foff'] < 0:\n            plot_data = plot_data[..., ::-1] \n            plot_f = plot_f[::-1]\n\n        if logged:\n            plot_data = db(plot_data)\n\n        \n        dec_fac_x, dec_fac_y = 1, 1\n        if plot_data.shape[0] > MAX_IMSHOW_POINTS[0]:\n            dec_fac_x = int(plot_data.shape[0] / MAX_IMSHOW_POINTS[0])\n\n        if plot_data.shape[1] > MAX_IMSHOW_POINTS[1]:\n            dec_fac_y =  int(plot_data.shape[1] /  MAX_IMSHOW_POINTS[1])\n\n        plot_data = rebin(plot_data, dec_fac_x, dec_fac_y)\n\n        try:\n            plt.title(self.header[b'source_name'])\n        except KeyError:\n            plt.title(self.filename)\n\n        extent = self._calc_extent(plot_f=plot_f,plot_t=self.timestamps,MJD_time=MJD_time)\n\n        plt.imshow(plot_data,\n            aspect='auto',\n            origin='lower',\n            rasterized=True,\n            interpolation='nearest',\n            extent=extent,\n            cmap='viridis',\n            **kwargs\n        )\n        if cb:\n            plt.colorbar()\n        plt.xlabel(\"Frequency [MHz]\")\n        if MJD_time:\n            plt.ylabel(\"Time [MJD]\")\n        else:\n            plt.ylabel(\"Time [s]\")", "docstring": "Plot waterfall of data\n\nArgs:\nf_start (float): start frequency, in MHz\nf_stop (float): stop frequency, in MHz\nlogged (bool): Plot in linear (False) or dB units (True),\ncb (bool): for plotting the colorbar\nkwargs: keyword args to be passed to matplotlib imshow()", "source": "juraj-google-style"}
{"code": "def RegisterParser(cls, parser_class):\n    parser_name = parser_class.NAME.lower()\n    if (parser_name in cls._parser_classes):\n        raise KeyError('Parser class already set for name: {0:s}.'.format(parser_class.NAME))\n    cls._parser_classes[parser_name] = parser_class", "docstring": "Registers a parser class.\n\nThe parser classes are identified based on their lower case name.\n\nArgs:\nparser_class (type): parser class (subclass of BaseParser).\n\nRaises:\nKeyError: if parser class is already set for the corresponding name.", "source": "codesearchnet"}
{"code": "def intersection(boxes1, boxes2):\n  \n  [y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)\n  [y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)\n\n  all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))\n  all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))\n  intersect_heights = np.maximum(\n      np.zeros(all_pairs_max_ymin.shape, dtype='f4'),\n      all_pairs_min_ymax - all_pairs_max_ymin)\n  all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))\n  all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))\n  intersect_widths = np.maximum(\n      np.zeros(all_pairs_max_xmin.shape, dtype='f4'),\n      all_pairs_min_xmax - all_pairs_max_xmin)\n  return intersect_heights * intersect_widths", "docstring": "Compute pairwise intersection areas between boxes.\n\nArgs:\nboxes1: a numpy array with shape [N, 4] holding N boxes\nboxes2: a numpy array with shape [M, 4] holding M boxes\n\nReturns:\na numpy array with shape [N*M] representing pairwise intersection area", "source": "juraj-google-style"}
{"code": "def make_sine_surface(dims=DEFAULT_DIMS, offset=0.5, scale=1.0):\n    \n    gradients = (np.array(make_gradients(dims)) - offset) * scale * np.pi\n    return np.sin(np.linalg.norm(gradients, axis=0))", "docstring": "Makes a surface from the 3D sine function.\n\nArgs:\ndims (pair): the dimensions of the surface to create\noffset (float): an offset applied to the function\nscale (float): a scale applied to the sine frequency\n\nReturns:\nsurface: A surface.", "source": "juraj-google-style"}
{"code": "def _FractionalAvgPoolGrad(op: ops.Operation, grad_0, unused_grad_1, unused_grad_2):\n    return gen_nn_ops.fractional_avg_pool_grad(op.inputs[0].get_shape(), grad_0, op.outputs[1], op.outputs[2], op.get_attr('overlapping'))", "docstring": "Returns gradient for FractionalAvgPool.\n\nSince FractionalAvgPool has three outputs, there are three gradients passed in\nfor each of the outputs. Only the first one is useful, the other two gradients\nare empty.\n\nArgs:\nop: The FractionalAvgPoolOp.\ngrad_0: Gradient with respect to op.outputs[0]\nunused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.\nunused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.\n\nReturns:\nInput backprop for FractionalAvgPool op.", "source": "github-repos"}
{"code": "def assert_that(actual, matcher, label='assert_that', reify_windows=False, use_global_window=True):\n    assert isinstance(actual, pvalue.PCollection), '%s is not a supported type for Beam assert' % type(actual)\n    pipeline = actual.pipeline\n    if getattr(actual.pipeline, 'result', None):\n        raise RuntimeError('assert_that must be used within a beam.Pipeline context. ' + 'Prior to Beam 2.60.0, asserts outside of the context of a pipeline ' + 'were silently ignored, starting with Beam 2.60.0 this is no longer ' + 'allowed. To fix, move your assert_that call into your pipeline ' + 'context so that it is added before the pipeline is run. For more ' + 'information, see https:\n    if label in pipeline.applied_labels:\n        label_idx = 2\n        while f'{label}_{label_idx}' in pipeline.applied_labels:\n            label_idx += 1\n        label = f'{label}_{label_idx}'\n    if isinstance(matcher, _EqualToPerWindowMatcher):\n        reify_windows = True\n        use_global_window = True\n\n    class ReifyTimestampWindow(DoFn):\n\n        def process(self, element, timestamp=DoFn.TimestampParam, window=DoFn.WindowParam, pane_info=DoFn.PaneInfoParam):\n            return [TestWindowedValue(element, timestamp, [window], pane_info)]\n\n    class AddWindow(DoFn):\n\n        def process(self, element, window=DoFn.WindowParam):\n            yield (element, window)\n\n    class AssertThat(PTransform):\n\n        def expand(self, pcoll):\n            if reify_windows:\n                pcoll = pcoll | ParDo(ReifyTimestampWindow())\n            keyed_singleton = pcoll.pipeline | Create([(None, None)])\n            keyed_singleton.is_bounded = True\n            if use_global_window:\n                pcoll = pcoll | WindowInto(window.GlobalWindows())\n            keyed_actual = pcoll | 'ToVoidKey' >> Map(lambda v: (None, v))\n            keyed_actual.is_bounded = True\n            plain_actual = (keyed_singleton, keyed_actual) | 'Group' >> CoGroupByKey() | 'Unkey' >> Map(lambda k_values: list(k_values[1][1]))\n            if not use_global_window:\n                plain_actual = plain_actual | 'AddWindow' >> ParDo(AddWindow())\n            return plain_actual | 'Match' >> Map(matcher)\n\n        def default_label(self):\n            return label\n    return actual | AssertThat()", "docstring": "A PTransform that checks a PCollection has an expected value.\n\nNote that assert_that should be used only for testing pipelines since the\ncheck relies on materializing the entire PCollection being checked.\n\nArgs:\nactual: A PCollection.\nmatcher: A matcher function taking as argument the actual value of a\nmaterialized PCollection. The matcher validates this actual value against\nexpectations and raises BeamAssertException if they are not met.\nlabel: Optional string label. This is needed in case several assert_that\ntransforms are introduced in the same pipeline.\nreify_windows: If True, matcher is passed a list of TestWindowedValue.\nuse_global_window: If False, matcher is passed a dictionary of\n(k, v) = (window, elements in the window).\n\nReturns:\nIgnored.", "source": "github-repos"}
{"code": "def _show_inputs_outputs(saved_model_dir, tag_set, signature_def_key, indent=0):\n    meta_graph_def = saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)\n    _show_inputs_outputs_mgd(meta_graph_def, signature_def_key, indent)", "docstring": "Prints input and output TensorInfos.\n\nPrints the details of input and output TensorInfos for the SignatureDef mapped\nby the given signature_def_key.\n\nArgs:\nsaved_model_dir: Directory containing the SavedModel to inspect.\ntag_set: Group of tag(s) of the MetaGraphDef, in string format, separated by\n','. For tag-set contains multiple tags, all tags must be passed in.\nsignature_def_key: A SignatureDef key string.\nindent: How far (in increments of 2 spaces) to indent each line of output.", "source": "github-repos"}
{"code": "def _add_to(self, db, index, item, default=OOSet):\n        \n        row = db.get(index, None)\n\n        if row is None:\n            row = default()\n            db[index] = row\n\n        row.add(item)", "docstring": "Add `item` to `db` under `index`. If `index` is not yet in `db`, create\nit using `default`.\n\nArgs:\ndb (dict-obj): Dict-like object used to connect to database.\nindex (str): Index used to look in `db`.\nitem (obj): Persistent object, which may be stored in DB.\ndefault (func/obj): Reference to function/object, which will be\nused to create the object under `index`.\nDefault :class:`OOSet`.", "source": "juraj-google-style"}
{"code": "def _assert_struct_type(self, struct, name, types, path=None, extra_info=None):\n    wanted_yaml_typenames = set()\n    for t in types:\n        wanted_yaml_typenames.add(self._get_yaml_typename(t))\n    wanted_yaml_typenames = ' or '.join(wanted_yaml_typenames)\n    actual_yaml_typename = self._get_yaml_typename(type(struct))\n    if (not isinstance(struct, types)):\n        err = []\n        if path:\n            err.append(self._format_error_path((path + [name])))\n        err.append('  Expected {w} value for \"{n}\", got value of type {a}: \"{v}\"'.format(w=wanted_yaml_typenames, n=name, a=actual_yaml_typename, v=struct))\n        if extra_info:\n            err.append(('Tip: ' + extra_info))\n        raise exceptions.YamlTypeError('\\n'.join(err))", "docstring": "Asserts that given structure is of any of given types.\n\nArgs:\nstruct: structure to check\nname: displayable name of the checked structure (e.g. \"run_foo\" for section run_foo)\ntypes: list/tuple of types that are allowed for given struct\npath: list with a source file as a first element and previous names\n(as in name argument to this method) as other elements\nextra_info: extra information to print if error is found (e.g. hint how to fix this)\nRaises:\nYamlTypeError: if given struct is not of any given type; error message contains\nsource file and a \"path\" (e.g. args -> somearg -> flags) specifying\nwhere the problem is", "source": "codesearchnet"}
{"code": "def writeProject(self, session, directory, name):\n    self.project_directory = directory\n    with tmp_chdir(directory):\n        batchDirectory = self._getBatchDirectory(directory)\n        replaceParamFile = self.replaceParamFile\n        self._writeReplacementFiles(session=session, directory=directory, name=name)\n        self.write(session=session, directory=directory, name=name)\n        self._writeXput(session=session, directory=directory, fileCards=self.INPUT_FILES, name=name, replaceParamFile=replaceParamFile)\n        self._writeXput(session=session, directory=batchDirectory, fileCards=self.OUTPUT_FILES, name=name)\n        self._writeXputMaps(session=session, directory=directory, mapCards=self.INPUT_MAPS, name=name, replaceParamFile=replaceParamFile)\n        self._writeWMSDatasets(session=session, directory=batchDirectory, wmsDatasetCards=self.WMS_DATASETS, name=name)", "docstring": "Write all files for a project from the database to file.\n\nUse this method to write all GsshaPy supported files back into their native file formats. If writing to execute\nthe model, increase efficiency by using the writeInput method to write only the file needed to run the model.\n\nArgs:\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database\ndirectory (str): Directory where the files will be written.\nname (str): Name that will be given to project when written (e.g.: 'example'). Files that follow the project\nnaming convention will be given this name with the appropriate extension (e.g.: 'example.prj',\n'example.cmt', and 'example.gag'). Files that do not follow this convention will retain their original\nfile names.", "source": "codesearchnet"}
{"code": "def _PathStripPrefix(self, path):\n    if (path.startswith('\\\\\\\\.\\\\') or path.startswith('\\\\\\\\?\\\\')):\n        if ((len(path) < 7) or (path[5] != ':') or (path[6] != self._PATH_SEPARATOR)):\n            return None\n        path = path[7:]\n    elif path.startswith('\\\\\\\\'):\n        return None\n    elif ((len(path) >= 3) and (path[1] == ':')):\n        if (path[2] != self._PATH_SEPARATOR):\n            return None\n        path = path[3:]\n    elif path.startswith('\\\\'):\n        path = path[1:]\n    else:\n        return None\n    return path", "docstring": "Strips the prefix from a path.\n\nArgs:\npath (str): Windows path to strip the prefix from.\n\nReturns:\nstr: path without the prefix or None if the path is not supported.", "source": "codesearchnet"}
{"code": "def get_example_from_tensor_dict(self, tensor_dict):\n    raise NotImplementedError()", "docstring": "Gets an example from a dict with tensorflow tensors.\n\nArgs:\ntensor_dict: Keys and values should match the corresponding Glue\ntensorflow_dataset examples.", "source": "github-repos"}
{"code": "def back_up(self, epoch):\n    backend.set_value(self._ckpt_saved_epoch, epoch)\n    if self.write_checkpoint_manager.save():\n        distributed_file_utils.remove_temp_dirpath(self.write_checkpoint_manager.directory, self._model.distribute_strategy)", "docstring": "Back up the current state of training into a checkpoint file.\n\nArgs:\nepoch: The current epoch information to be saved.", "source": "github-repos"}
{"code": "def get_vertices_to_edges_matrix(self, want_xyz=True):\n        \n        import numpy as np\n        import scipy.sparse as sp\n\n        vpe = np.asarray(self.vertices_per_edge, dtype=np.int32)\n        IS = np.repeat(np.arange(len(vpe)), 2)\n        JS = vpe.flatten()\n        data = np.ones_like(vpe)\n        data[:, 1] = -1\n        data = data.flatten()\n\n        if want_xyz:\n            IS = np.concatenate((IS*3, IS*3+1, IS*3+2))\n            JS = np.concatenate((JS*3, JS*3+1, JS*3+2))\n            data = np.concatenate((data, data, data))\n\n        ij = np.vstack((IS.flatten(), JS.flatten()))\n        return sp.csc_matrix((data, ij))", "docstring": "Returns a matrix M, which if multiplied by vertices,\ngives back edges (so \"e = M.dot(v)\"). Note that this generates\none edge per edge, *not* two edges per triangle.\n\nArgs:\nwant_xyz: if true, takes and returns xyz coordinates, otherwise\ntakes and returns x *or* y *or* z coordinates", "source": "juraj-google-style"}
{"code": "def get(self, po):\n    name = po.name\n    typ = po.typ\n    default = po.default\n    handler = getattr(self, '_get_{}'.format(typ), None)\n    if (handler is None):\n        raise ValueError(typ)\n    self.seen.add(name)\n    if (not self.parser.has_option(self.section, name)):\n        if (default is REQUIRED):\n            raise NameError(self.section, name)\n        if isinstance(default, INHERIT_GLOBAL):\n            return handler('global', name, default.default)\n    return handler(self.section, name, default)", "docstring": "Lookup value for a PluginOption instance\n\nArgs:\npo: PluginOption\n\nReturns: converted value", "source": "codesearchnet"}
{"code": "def __init__(self, maxsize, ttl, out_deque=None, **kw):\n        \n        super(DequeOutTTLCache, self).__init__(maxsize, ttl, **kw)\n        if out_deque is None:\n            out_deque = collections.deque()\n        elif not isinstance(out_deque, collections.deque):\n            raise ValueError(u'out_deque should be a collections.deque')\n        self._out_deque = out_deque\n        self._tracking = {}", "docstring": "Constructor.\n\nArgs:\nmaxsize (int): the maximum number of entries in the queue\nttl (int): the ttl for entries added to the cache\nout_deque :class:`collections.deque`: a `deque` in which to add items\nthat expire from the cache\n**kw: the other keyword args supported by the constructor to\n:class:`cachetools.TTLCache`\n\nRaises:\nValueError: if out_deque is not a collections.deque", "source": "juraj-google-style"}
{"code": "def from_session(cls, sess, input_tensors, output_tensors):\n    TFLiteConverterBase._set_original_model_type(conversion_metadata_fb.ModelType.TF_SESSION)\n    graph_def = _freeze_graph(sess, input_tensors, output_tensors)\n    return cls(graph_def, input_tensors, output_tensors, experimental_debug_info_func=_build_debug_info_func(sess.graph))", "docstring": "Creates a TFLiteConverter class from a TensorFlow Session.\n\nArgs:\nsess: TensorFlow Session.\ninput_tensors: List of input tensors. Type and shape are computed using\n`foo.shape` and `foo.dtype`.\noutput_tensors: List of output tensors (only .name is used from this).\n\nReturns:\nTFLiteConverter class.", "source": "github-repos"}
{"code": "def __init__(self, emt_id='', emt_pass=''):\n        \n        if emt_id and emt_pass:\n            self.initialize(emt_id, emt_pass)", "docstring": "Initialize the interface attributes.\n\nInitialization may also be performed at a later point by manually\ncalling the ``initialize()`` method.\n\nArgs:\nemt_id (str): ID given by the server upon registration\nemt_pass (str): Token given by the server upon registration", "source": "juraj-google-style"}
{"code": "def marginal_counts(counts, meas_qubits):\n    num_of_qubits = len(list(counts.keys())[0])\n    qs = sorted(meas_qubits, reverse=True)\n    meas_keys = count_keys(len(qs))\n    rgx = [reduce((lambda x, y: ((key[qs.index(y)] if (y in qs) else '\\\\d') + x)), range(num_of_qubits), '') for key in meas_keys]\n    meas_counts = []\n    for m in rgx:\n        c = 0\n        for (key, val) in counts.items():\n            if match(m, key):\n                c += val\n        meas_counts.append(c)\n    return dict(zip(meas_keys, meas_counts))", "docstring": "Compute the marginal counts for a subset of measured qubits.\n\nArgs:\ncounts (dict): the counts returned from a backend ({str: int}).\nmeas_qubits (list[int]): the qubits to return the marginal\ncounts distribution for.\n\nReturns:\ndict: A counts dict for the meas_qubits.abs\nExample: if `counts = {'00': 10, '01': 5}`\n`marginal_counts(counts, [0])` returns `{'0': 15, '1': 0}`.\n`marginal_counts(counts, [0])` returns `{'0': 10, '1': 5}`.", "source": "codesearchnet"}
{"code": "def transform(self, X):\n        \n\n        for i, col in enumerate(X.columns):\n            X_col = self._transform_col(X[col], i)\n            if X_col is not None:\n                if i == 0:\n                    X_new = X_col\n                else:\n                    X_new = sparse.hstack((X_new, X_col))\n\n            logger.debug('{} --> {} features'.format(\n                col, self.label_encoder.label_maxes[i])\n            )\n\n        return X_new", "docstring": "Encode categorical columns into sparse matrix with one-hot-encoding.\n\nArgs:\nX (pandas.DataFrame): categorical columns to encode\n\nReturns:\nX_new (scipy.sparse.coo_matrix): sparse matrix encoding categorical\nvariables into dummy variables", "source": "juraj-google-style"}
{"code": "def call(self, args: Args[FrameType]) -> _HasReturnT:", "docstring": "Calls this function with the given arguments.\n\nArgs:\nargs: The function arguments.\n\nReturns:\nAn object with information about the result of the function call, with a\nget_return_value() method that retrieves the return value.", "source": "github-repos"}
{"code": "def _get_client_address(self, req):\n        \n        try:\n            forwarded_for = req.get_header('X-Forwarded-For', True)\n            return forwarded_for.split(',')[0].strip()\n        except (KeyError, HTTPMissingHeader):\n            return (\n                req.env.get('REMOTE_ADDR') if self.remote_address_fallback\n                else None\n            )", "docstring": "Get address from ``X-Forwarded-For`` header or use remote address.\n\nRemote address is used if the ``X-Forwarded-For`` header is not\navailable. Note that this may not be safe to depend on both without\nproper authorization backend.\n\nArgs:\nreq (falcon.Request): falcon.Request object.\n\nReturns:\nstr: client address.", "source": "juraj-google-style"}
{"code": "def create_blob(profile, content):\n    \n    resource = \"/blobs\"\n    payload = {\"content\": content}\n    data = api.post_request(profile, resource, payload)\n    return data", "docstring": "Create a blob.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\ncontent\nThe (UTF-8 encoded) content to create in the blob.\n\nReturns:\nA dict with data about the newly created blob.", "source": "juraj-google-style"}
{"code": "def value_container(self, value):\n    raise NotImplementedError('must be implemented in descendants')", "docstring": "Returns the container that this per-replica `value` belongs to.\n\nArgs:\nvalue: A value returned by `run()` or a variable created in `scope()`.\n\nReturns:\nA container that `value` belongs to.\nIf value does not belong to any container (including the case of\ncontainer having been destroyed), returns the value itself.\n`value in experimental_local_results(value_container(value))` will\nalways be true.", "source": "github-repos"}
{"code": "def UpdateUser(self, user, ssh_keys):\n    \n    if not bool(USER_REGEX.match(user)):\n      self.logger.warning('Invalid user account name %s.', user)\n      return False\n    if not self._GetUser(user):\n      \n      \n      if not (self._AddUser(user)\n              and self._UpdateUserGroups(user, self.groups)):\n        return False\n    \n    if not self._UpdateSudoer(user, sudoer=True):\n      return False\n\n    \n    \n    \n    pw_entry = self._GetUser(user)\n    if pw_entry and os.path.basename(pw_entry.pw_shell) == 'nologin':\n      message = 'Not updating user %s. User set `nologin` as login shell.'\n      self.logger.debug(message, user)\n      return True\n\n    try:\n      self._UpdateAuthorizedKeys(user, ssh_keys)\n    except (IOError, OSError) as e:\n      message = 'Could not update the authorized keys file for user %s. %s.'\n      self.logger.warning(message, user, str(e))\n      return False\n    else:\n      return True", "docstring": "Update a Linux user with authorized SSH keys.\n\nArgs:\nuser: string, the name of the Linux user account.\nssh_keys: list, the SSH key strings associated with the user.\n\nReturns:\nbool, True if the user account updated successfully.", "source": "juraj-google-style"}
{"code": "def sign(mv):\n    \n    md5 = hashlib.md5()\n    update_hash(md5, mv)\n    return md5.digest()", "docstring": "Obtains a signature for a `MetricValue`\n\nArgs:\nmv (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricValue`): a\nMetricValue that's part of an operation\n\nReturns:\nstring: a unique signature for that operation", "source": "juraj-google-style"}
{"code": "def _save_tf_record_dataset(self, repr_ds: RepresentativeDataset, signature_def_key: str) -> _RepresentativeDatasetFile:\n    if not context.executing_eagerly():\n        with session.Session() as sess:\n            repr_ds = replace_tensors_by_numpy_ndarrays(repr_ds, sess)\n    expected_input_keys = self.expected_input_key_map.get(signature_def_key, None)\n    tfrecord_file_path = self.path_map[signature_def_key]\n    with python_io.TFRecordWriter(tfrecord_file_path) as writer:\n        for repr_sample in repr_ds:\n            if expected_input_keys is not None and set(repr_sample.keys()) != expected_input_keys:\n                raise KeyError(f'Invalid input keys for representative sample. The function expects input keys of: {set(expected_input_keys)}. Got: {set(repr_sample.keys())}. Please provide correct input keys for representative samples.')\n            sample = _RepresentativeDataSample()\n            for input_name, input_value in repr_sample.items():\n                sample.tensor_proto_inputs[input_name].CopyFrom(tensor_util.make_tensor_proto(input_value))\n            writer.write(sample.SerializeToString())\n    logging.info('Saved representative dataset for signature def: %s to: %s', signature_def_key, tfrecord_file_path)\n    return _RepresentativeDatasetFile(tfrecord_file_path=str(tfrecord_file_path))", "docstring": "Saves `repr_ds` to a TFRecord file.\n\nEach sample in `repr_ds` is serialized as `RepresentativeDataSample`.\n\nArgs:\nrepr_ds: `RepresentativeDataset` to save.\nsignature_def_key: The signature def key associated with `repr_ds`.\n\nReturns:\na RepresentativeDatasetFile instance contains the path to the saved file.\n\nRaises:\nKeyError: If the set of input keys in the dataset samples doesn't match\nthe set of expected input keys.", "source": "github-repos"}
{"code": "def get_sessions(self, app_path):\n        \n        if app_path not in self._applications:\n            raise ValueError(\"Application %s does not exist on this server\" % app_path)\n        return list(self._applications[app_path].sessions)", "docstring": "Gets all currently active sessions for an application.\n\nArgs:\napp_path (str) :\nThe configured application path for the application to return\nsessions for.\n\nReturns:\nlist[ServerSession]", "source": "juraj-google-style"}
{"code": "def email(self, name, subject, header, body, **kwargs):\n    group_obj = Email(name, subject, header, body, **kwargs)\n    return self._group(group_obj)", "docstring": "Add Email data to Batch object.\n\nArgs:\nname (str): The name for this Group.\nsubject (str): The subject for this Email.\nheader (str): The header for this Email.\nbody (str): The body for this Email.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nfrom_addr (str, kwargs): The **from** address for this Email.\nto_addr (str, kwargs): The **to** address for this Email.\nxid (str, kwargs): The external id for this Group.\n\nReturns:\nobj: An instance of Email.", "source": "codesearchnet"}
{"code": "def unpack(self, buff, offset=0):\n        \n        self.action_type = UBInt16(enum_ref=ActionType)\n        self.action_type.unpack(buff, offset)\n\n        for cls in ActionHeader.__subclasses__():\n            if self.action_type.value in cls.get_allowed_types():\n                self.__class__ = cls\n                break\n\n        super().unpack(buff, offset)", "docstring": "Unpack a binary message into this object's attributes.\n\nUnpack the binary value *buff* and update this object attributes based\non the results.\n\nArgs:\nbuff (bytes): Binary data package to be unpacked.\noffset (int): Where to begin unpacking.\n\nRaises:\nException: If there is a struct unpacking error.", "source": "juraj-google-style"}
{"code": "def group_molecules(self, mol_list):\n    mol_hash = [(i, self._mapper.get_molecule_hash(m)) for (i, m) in enumerate(mol_list)]\n    mol_hash.sort(key=(lambda x: x[1]))\n    raw_groups = tuple([tuple([m[0] for m in g]) for (k, g) in itertools.groupby(mol_hash, key=(lambda x: x[1]))])\n    group_indices = []\n    for rg in raw_groups:\n        mol_eq_test = [(p[0], p[1], self.fit(mol_list[p[0]], mol_list[p[1]])) for p in itertools.combinations(sorted(rg), 2)]\n        mol_eq = set([(p[0], p[1]) for p in mol_eq_test if p[2]])\n        not_alone_mols = set(itertools.chain.from_iterable(mol_eq))\n        alone_mols = (set(rg) - not_alone_mols)\n        group_indices.extend([[m] for m in alone_mols])\n        while (len(not_alone_mols) > 0):\n            current_group = {not_alone_mols.pop()}\n            while (len(not_alone_mols) > 0):\n                candidate_pairs = set([tuple(sorted(p)) for p in itertools.product(current_group, not_alone_mols)])\n                mutual_pairs = (candidate_pairs & mol_eq)\n                if (len(mutual_pairs) == 0):\n                    break\n                mutual_mols = set(itertools.chain.from_iterable(mutual_pairs))\n                current_group |= mutual_mols\n                not_alone_mols -= mutual_mols\n            group_indices.append(sorted(current_group))\n    group_indices.sort(key=(lambda x: (len(x), (- x[0]))), reverse=True)\n    all_groups = [[mol_list[i] for i in g] for g in group_indices]\n    return all_groups", "docstring": "Group molecules by structural equality.\n\nArgs:\nmol_list: List of OpenBabel OBMol or pymatgen objects\n\nReturns:\nA list of lists of matched molecules\nAssumption: if s1=s2 and s2=s3, then s1=s3\nThis may not be true for small tolerances.", "source": "codesearchnet"}
{"code": "def image_channel_compress_top(body_output, targets, model_hparams, vocab_size):\n  \n  del targets  \n  with tf.variable_scope(\"image_channel_compress_modality\"):\n    hidden_size = model_hparams.hidden_size\n    img_len = model_hparams.img_len\n    channels = 3  \n    batch = common_layers.shape_list(body_output)[0]\n    x = tf.layers.conv2d(\n        body_output,\n        hidden_size * channels,\n        kernel_size=(1, 1),\n        strides=(1, 1),\n        padding=\"VALID\",\n        activation=tf.nn.relu,\n        name=\"decompress_conv\")\n    x = tf.reshape(x, [batch, img_len, img_len * channels, hidden_size])\n    x = common_layers.layer_preprocess(x, model_hparams)\n    x = tf.layers.dense(x,\n                        vocab_size,\n                        use_bias=True,\n                        activation=None,\n                        name=\"output_conv\")\n    x = tf.reshape(\n        x, [batch, img_len, img_len, channels, vocab_size])\n    return x", "docstring": "Transforms body output to return logits.\n\nArgs:\nbody_output: Tensor of shape [batch, img_len, img_len, depth].\ntargets:\nmodel_hparams: HParams, model hyperparmeters.\nvocab_size: int, vocabulary size.\n\nReturns:\nTensor of shape [batch, img_len, img_len, channels, vocab_size].", "source": "juraj-google-style"}
{"code": "def add_compound(self, compound):\n        \n        logger.debug(\"Adding compound {0} to variant {1}\".format(\n            compound, self['variant_id']))\n        self['compounds'].append(compound)", "docstring": "Add the information of a compound variant\n\nThis adds a compound dict to variant['compounds']\n\nArgs:\ncompound (dict): A compound dictionary", "source": "juraj-google-style"}
{"code": "def process_streamer(self, streamer, callback=None):\n    index = streamer.index\n    if (index in self._in_progress_streamers):\n        raise InternalError('You cannot add a streamer again until it has finished streaming.')\n    queue_item = QueuedStreamer(streamer, callback)\n    self._in_progress_streamers.add(index)\n    self._logger.debug('Streamer %d: queued to send %d readings', index, queue_item.initial_count)\n    self._queue.put_nowait(queue_item)", "docstring": "Start streaming a streamer.\n\nArgs:\nstreamer (DataStreamer): The streamer itself.\ncallback (callable): An optional callable that will be called as:\ncallable(index, success, highest_id_received_from_other_side)", "source": "codesearchnet"}
{"code": "def make_connection(transport, **kwargs):\n    if (transport not in TRANSPORTS):\n        raise TypeError('invalid transport specified')\n    klass = TRANSPORTS[transport]\n    return klass(**kwargs)", "docstring": "Creates a connection instance based on the transport\n\nThis function creates the EapiConnection object based on the desired\ntransport.  It looks up the transport class in the TRANSPORTS global\ndictionary.\n\nArgs:\ntransport (string): The transport to use to create the instance.\n**kwargs: Arbitrary keyword arguments.\n\nReturns:\nAn instance of a connection object based on the transport\n\nRaises:\nTypeError: A TypeError is raised if the transport keyword is not\nfound in the list (keys) of available transports.", "source": "codesearchnet"}
{"code": "def ragged_assert_compatible_and_get_flat_values(values, mask=None):\n    if isinstance(values, list):\n        is_all_ragged = all((isinstance(rt, ragged_tensor.RaggedTensor) for rt in values))\n        is_any_ragged = any((isinstance(rt, ragged_tensor.RaggedTensor) for rt in values))\n    else:\n        is_all_ragged = isinstance(values, ragged_tensor.RaggedTensor)\n        is_any_ragged = is_all_ragged\n    if is_all_ragged and (mask is None or isinstance(mask, ragged_tensor.RaggedTensor)):\n        to_be_stripped = False\n        if not isinstance(values, list):\n            values = [values]\n            to_be_stripped = True\n        nested_row_split_list = [rt.nested_row_splits for rt in values]\n        assertion_list = _assert_splits_match(nested_row_split_list)\n        if isinstance(mask, ragged_tensor.RaggedTensor):\n            assertion_list_for_mask = _assert_splits_match([nested_row_split_list[0], mask.nested_row_splits])\n            with ops.control_dependencies(assertion_list_for_mask):\n                mask = array_ops.expand_dims(mask.flat_values, -1)\n        flat_values = []\n        for value in values:\n            with ops.control_dependencies(assertion_list):\n                flat_values.append(array_ops.expand_dims(value.flat_values, -1))\n        values = flat_values[0] if to_be_stripped else flat_values\n    elif is_any_ragged:\n        raise TypeError('One of the inputs does not have acceptable types.')\n    elif isinstance(mask, ragged_tensor.RaggedTensor):\n        raise TypeError('Ragged mask is not allowed with non-ragged inputs.')\n    return (values, mask)", "docstring": "If ragged, it checks the compatibility and then returns the flat_values.\n\nNote: If two tensors are dense, it does not check their compatibility.\nNote: Although two ragged tensors with different ragged ranks could have\nidentical overall rank and dimension sizes and hence be compatible,\nwe do not support those cases.\nArgs:\nvalues: A list of potentially ragged tensor of the same ragged_rank.\nmask: A potentially ragged tensor of the same ragged_rank as elements in\nValues.\n\nReturns:\nA tuple in which the first element is the list of tensors and the second\nis the mask tensor. ([Values], mask). Mask and the element in Values\nare equal to the flat_values of the input arguments (if they were ragged).", "source": "github-repos"}
{"code": "def download_tile(map_layer, zoom, x, y):\n    try:\n        tile_url = map_layer.get_tile_url(zoom, x, y)\n        (tmp_file, headers) = urllib.request.urlretrieve(tile_url)\n        return ((x, y), tmp_file)\n    except URLError as e:\n        app.logger.info('Error downloading tile x={}, y={}, z={} for layer {}: {}'.format(x, y, zoom, map_layer, e.reason))\n        return ((x, y), pkg_resources.resource_filename('geos', 'static/empty_tile.png'))", "docstring": "Download a given tile from the tile server.\n\nArgs:\nmap_layer (MapLayer): MapLayer object which provides the tile-url.\nzoom (int): zoom level\nx (int): Tile-x-coordinate\ny (int): Tile-y-coordinate\n\nReturns:\nfile: temporary file containing the downloaded image.", "source": "codesearchnet"}
{"code": "def is50or60(msg, spd_ref, trk_ref, alt_ref):\n    \n    def vxy(v, angle):\n        vx = v * np.sin(np.radians(angle))\n        vy = v * np.cos(np.radians(angle))\n        return vx, vy\n\n    if not (bds50.is50(msg) and bds60.is60(msg)):\n        return None\n\n    h50 = bds50.trk50(msg)\n    v50 = bds50.gs50(msg)\n\n    if h50 is None or v50 is None:\n        return 'BDS50,BDS60'\n\n    h60 = bds60.hdg60(msg)\n    m60 = bds60.mach60(msg)\n    i60 = bds60.ias60(msg)\n\n    if h60 is None or (m60 is None and i60 is None):\n        return 'BDS50,BDS60'\n\n    m60 = np.nan if m60 is None else m60\n    i60 = np.nan if i60 is None else i60\n\n    XY5 = vxy(v50*aero.kts, h50)\n    XY6m = vxy(aero.mach2tas(m60, alt_ref*aero.ft), h60)\n    XY6i = vxy(aero.cas2tas(i60*aero.kts, alt_ref*aero.ft), h60)\n\n    allbds = ['BDS50', 'BDS60', 'BDS60']\n\n    X = np.array([XY5, XY6m, XY6i])\n    Mu = np.array(vxy(spd_ref*aero.kts, trk_ref))\n\n    \n    \n    \n    \n\n    \n    \n    try:\n        dist = np.linalg.norm(X-Mu, axis=1)\n        BDS = allbds[np.nanargmin(dist)]\n    except ValueError:\n        return 'BDS50,BDS60'\n\n    return BDS", "docstring": "Use reference ground speed and trk to determine BDS50 and DBS60.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\nspd_ref (float): reference speed (ADS-B ground speed), kts\ntrk_ref (float): reference track (ADS-B track angle), deg\nalt_ref (float): reference altitude (ADS-B altitude), ft\n\nReturns:\nString or None: BDS version, or possible versions, or None if nothing matches.", "source": "juraj-google-style"}
{"code": "def from_string(key, password='notasecret'):\n        \n        parsed_pem_key = _helpers._parse_pem_key(_helpers._to_bytes(key))\n        if parsed_pem_key:\n            pkey = RSA.importKey(parsed_pem_key)\n        else:\n            raise NotImplementedError(\n                'No key in PEM format was detected. This implementation '\n                'can only use the PyCrypto library for keys in PEM '\n                'format.')\n        return PyCryptoSigner(pkey)", "docstring": "Construct a Signer instance from a string.\n\nArgs:\nkey: string, private key in PEM format.\npassword: string, password for private key file. Unused for PEM\nfiles.\n\nReturns:\nSigner instance.\n\nRaises:\nNotImplementedError if the key isn't in PEM format.", "source": "juraj-google-style"}
{"code": "def _KillProcess(self, pid):\n    \n    if sys.platform.startswith('win'):\n      process_terminate = 1\n      handle = ctypes.windll.kernel32.OpenProcess(\n          process_terminate, False, pid)\n      ctypes.windll.kernel32.TerminateProcess(handle, -1)\n      ctypes.windll.kernel32.CloseHandle(handle)\n\n    else:\n      try:\n        os.kill(pid, signal.SIGKILL)\n      except OSError as exception:\n        logger.error('Unable to kill process {0:d} with error: {1!s}'.format(\n            pid, exception))", "docstring": "Issues a SIGKILL or equivalent to the process.\n\nArgs:\npid (int): process identifier (PID).", "source": "juraj-google-style"}
{"code": "def _isbn_cleaner(fn):\n    \n    @wraps(fn)\n    def wrapper(isbn):\n        return fn(_clean_isbn(isbn))\n\n    return wrapper", "docstring": "Decorator for calling other functions from this module.\n\nPurpose of this decorator is to clean the ISBN string from garbage and\nreturn list of digits.\n\nArgs:\nfn (function): function in which will be :func:`_clean_isbn(isbn)` call\nwrapped.", "source": "juraj-google-style"}
{"code": "def download(dest_file_path: [List[Union[(str, Path)]]], source_url: str, force_download=True):\n    if isinstance(dest_file_path, list):\n        dest_file_paths = [Path(path) for path in dest_file_path]\n    else:\n        dest_file_paths = [Path(dest_file_path).absolute()]\n    if (not force_download):\n        to_check = list(dest_file_paths)\n        dest_file_paths = []\n        for p in to_check:\n            if p.exists():\n                log.info(f'File already exists in {p}')\n            else:\n                dest_file_paths.append(p)\n    if dest_file_paths:\n        cache_dir = os.getenv('DP_CACHE_DIR')\n        cached_exists = False\n        if cache_dir:\n            first_dest_path = (Path(cache_dir) / md5(source_url.encode('utf8')).hexdigest()[:15])\n            cached_exists = first_dest_path.exists()\n        else:\n            first_dest_path = dest_file_paths.pop()\n        if (not cached_exists):\n            first_dest_path.parent.mkdir(parents=True, exist_ok=True)\n            simple_download(source_url, first_dest_path)\n        else:\n            log.info(f'Found cached {source_url} in {first_dest_path}')\n        for dest_path in dest_file_paths:\n            dest_path.parent.mkdir(parents=True, exist_ok=True)\n            shutil.copy(str(first_dest_path), str(dest_path))", "docstring": "Download a file from URL to one or several target locations\n\nArgs:\ndest_file_path: path or list of paths to the file destination files (including file name)\nsource_url: the source URL\nforce_download: download file if it already exists, or not", "source": "codesearchnet"}
{"code": "def __init__(self, corruption_type=None, severity=1, **kwargs):\n    \n    super(Imagenet2012CorruptedConfig, self).__init__(**kwargs)\n    self.corruption_type = corruption_type\n    self.severity = severity", "docstring": "BuilderConfig for Imagenet2012Corrupted.\n\nArgs:\ncorruption_type: string, must be one of the items in TYPE_LIST.\nseverity: integer, bewteen 1 and 5.\n**kwargs: keyword arguments forwarded to super.", "source": "juraj-google-style"}
{"code": "def StopService(service_name, service_binary_name=None):\n    try:\n        status = win32serviceutil.QueryServiceStatus(service_name)[1]\n    except pywintypes.error as e:\n        if (getattr(e, 'winerror', None) == winerror.ERROR_SERVICE_DOES_NOT_EXIST):\n            logging.debug(\"Tried to stop '%s', but the service is not installed.\", service_name)\n        else:\n            logging.exception(\"Unable to query status of service '%s':\", service_name)\n        return\n    for _ in range(20):\n        if (status == win32service.SERVICE_STOPPED):\n            break\n        elif (status != win32service.SERVICE_STOP_PENDING):\n            try:\n                win32serviceutil.StopService(service_name)\n            except pywintypes.error:\n                logging.exception(\"Unable to stop service '%s':\", service_name)\n        time.sleep(1)\n        status = win32serviceutil.QueryServiceStatus(service_name)[1]\n    if (status == win32service.SERVICE_STOPPED):\n        logging.info(\"Service '%s' stopped.\", service_name)\n        return\n    elif (not service_binary_name):\n        return\n    output = subprocess.check_output(['taskkill', '/im', ('%s*' % service_binary_name), '/f'], shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE)\n    logging.debug('%s', output)\n    time.sleep(2)", "docstring": "Stop a Windows service with the given name.\n\nArgs:\nservice_name: string The name of the service to be stopped.\nservice_binary_name: string If given, also kill this binary as a best effort\nfallback solution.", "source": "codesearchnet"}
{"code": "def verify_link_in_task_graph(chain, decision_link, task_link):\n    \n    log.info(\"Verifying the {} {} task definition is part of the {} {} task graph...\".format(\n        task_link.name, task_link.task_id, decision_link.name, decision_link.task_id\n    ))\n    if task_link.task_id in decision_link.task_graph:\n        graph_defn = deepcopy(decision_link.task_graph[task_link.task_id])\n        verify_task_in_task_graph(task_link, graph_defn)\n        log.info(\"Found {} in the graph; it's a match\".format(task_link.task_id))\n        return\n    raise_on_errors([\"Can't find task {} {} in {} {} task-graph.json!\".format(\n        task_link.name, task_link.task_id, decision_link.name, decision_link.task_id\n    )])", "docstring": "Compare the runtime task definition against the decision task graph.\n\nArgs:\nchain (ChainOfTrust): the chain we're operating on.\ndecision_link (LinkOfTrust): the decision task link\ntask_link (LinkOfTrust): the task link we're testing\n\nRaises:\nCoTError: on failure.", "source": "juraj-google-style"}
{"code": "def group(text, size):\n    \n    if size <= 0:\n        raise ValueError(\"n must be a positive integer\")\n\n    return [text[i:i + size] for i in range(0, len(text), size)]", "docstring": "Group ``text`` into blocks of ``size``.\n\nExample:\n>>> group(\"test\", 2)\n['te', 'st']\n\nArgs:\ntext (str): text to separate\nsize (int): size of groups to split the text into\n\nReturns:\nList of n-sized groups of text\n\nRaises:\nValueError: If n is non positive", "source": "juraj-google-style"}
{"code": "def __init__(self, unique_identifier=None, attribute_names=None):\n        \n\n        super(GetAttributeListResponsePayload, self).__init__(\n            enums.Tags.RESPONSE_PAYLOAD\n        )\n\n        self._unique_identifier = None\n        self._attribute_names = list()\n\n        self.unique_identifier = unique_identifier\n        self.attribute_names = attribute_names", "docstring": "Construct a GetAttributeList response payload.\n\nArgs:\nunique_identifier (string): The ID of the managed object with\nwhich the retrieved attribute names should be associated.\nOptional, defaults to None.\nattribute_names: A list of strings identifying the names of the\nattributes associated with the managed object. Optional,\ndefaults to None.", "source": "juraj-google-style"}
{"code": "def collapse_phenotypes(self,input_phenotype_labels,output_phenotype_label,verbose=True):\n        \n        if isinstance(input_phenotype_labels,str): input_phenotype_labels = [input_phenotype_labels]\n        bad_phenotypes = set(input_phenotype_labels)-set(self.phenotypes)\n        if len(bad_phenotypes) > 0: raise ValueError(\"Error phenotype(s) \"+str(bad_phenotypes)+\" are not in the data.\")\n        data = self.copy()\n        if len(input_phenotype_labels) == 0: return data\n        def _swap_in(d,inputs,output):\n            \n            overlap = set(d.keys()).intersection(inputs)\n            \n            if len(overlap) == 0: return d\n            keepers = [(k,v) for k,v in d.items() if k not in inputs]\n            \n            return dict(keepers+\\\n                        [(output_phenotype_label,max([d[x] for x in overlap]))])\n        data['phenotype_calls'] = data.apply(lambda x:\n            _swap_in(x['phenotype_calls'],input_phenotype_labels,output_phenotype_label)\n            ,1)\n        def _set_label(d):\n            vals = [k for k,v in d.items() if v==1]\n            return np.nan if len(vals) == 0 else vals[0]\n        data['phenotype_label'] = data.apply(lambda x:\n                _set_label(x['phenotype_calls']),1)\n        return data", "docstring": "Rename one or more input phenotypes to a single output phenotype\n\nArgs:\ninput_phenotype_labels (list): A str name or list of names to combine\noutput_phenotype_label (list): A str name to change the phenotype names to\nverbose (bool): output more details\n\nReturns:\nCellDataFrame: The CellDataFrame modified.", "source": "juraj-google-style"}
{"code": "def GetFormatsWithSignatures(cls, parser_filter_expression=None):\n    specification_store = specification.FormatSpecificationStore()\n    remainder_list = []\n    for (parser_name, parser_class) in cls.GetParsers(parser_filter_expression=parser_filter_expression):\n        format_specification = parser_class.GetFormatSpecification()\n        if (format_specification and format_specification.signatures):\n            specification_store.AddSpecification(format_specification)\n            if (parser_name == 'plist'):\n                remainder_list.append(parser_name)\n        else:\n            remainder_list.append(parser_name)\n    return (specification_store, remainder_list)", "docstring": "Retrieves the format specifications that have signatures.\n\nThis method will create a specification store for parsers that define\na format specification with signatures and a list of parser names for\nthose that do not.\n\nArgs:\nparser_filter_expression (Optional[str]): parser filter expression,\nwhere None represents all parsers and plugins.\n\nReturns:\ntuple: containing:\n\n* FormatSpecificationStore: format specifications with signatures.\n* list[str]: names of parsers that do not have format specifications with\nsignatures, or have signatures but also need to be applied 'brute\nforce'.", "source": "codesearchnet"}
{"code": "def flip_back(output_flipped, flip_pairs, target_type='gaussian-heatmap'):\n    if target_type not in ['gaussian-heatmap', 'combined-target']:\n        raise ValueError('target_type should be gaussian-heatmap or combined-target')\n    if output_flipped.ndim != 4:\n        raise ValueError('output_flipped should be [batch_size, num_keypoints, height, width]')\n    batch_size, num_keypoints, height, width = output_flipped.shape\n    channels = 1\n    if target_type == 'combined-target':\n        channels = 3\n        output_flipped[:, 1::3, ...] = -output_flipped[:, 1::3, ...]\n    output_flipped = output_flipped.reshape(batch_size, -1, channels, height, width)\n    output_flipped_back = output_flipped.clone()\n    for left, right in flip_pairs.tolist():\n        output_flipped_back[:, left, ...] = output_flipped[:, right, ...]\n        output_flipped_back[:, right, ...] = output_flipped[:, left, ...]\n    output_flipped_back = output_flipped_back.reshape((batch_size, num_keypoints, height, width))\n    output_flipped_back = output_flipped_back.flip(-1)\n    return output_flipped_back", "docstring": "Flip the flipped heatmaps back to the original form.\n\nArgs:\noutput_flipped (`torch.tensor` of shape `(batch_size, num_keypoints, height, width)`):\nThe output heatmaps obtained from the flipped images.\nflip_pairs (`torch.Tensor` of shape `(num_keypoints, 2)`):\nPairs of keypoints which are mirrored (for example, left ear -- right ear).\ntarget_type (`str`, *optional*, defaults to `\"gaussian-heatmap\"`):\nTarget type to use. Can be gaussian-heatmap or combined-target.\ngaussian-heatmap: Classification target with gaussian distribution.\ncombined-target: The combination of classification target (response map) and regression target (offset map).\nPaper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020).\n\nReturns:\ntorch.Tensor: heatmaps that flipped back to the original image", "source": "github-repos"}
{"code": "def __setitem__(self, key, value):\n        \n        if isinstance(value, np.ndarray):\n            dtype = str(value.dtype)\n            weld_type = grizzly_impl.numpy_to_weld_type_mapping[dtype]\n            self.unmaterialized_cols[key] = SeriesWeld(\n                value,\n                weld_type,\n                self,\n                key\n            )\n        elif isinstance(value, SeriesWeld):\n            self.unmaterialized_cols[key] = value\n        elif isinstance(value, LazyOpResult):\n            self.unmaterialized_cols[key] = SeriesWeld(\n                value.expr,\n                value.weld_type,\n                self,\n                key\n            )", "docstring": "Summary\n\nArgs:\nkey (TYPE): Description\nvalue (TYPE): Description\n\nReturns:\nTYPE: Description", "source": "juraj-google-style"}
{"code": "def do(self,\n           resource,\n           method,\n           params=None,\n           data=None,\n           json=None,\n           headers=None):\n        \n        uri = \"{0}/{1}\".format(self._api_base, resource)\n        if not params:\n            params = {}\n        params.update({'token': self._token})\n\n        req = Request(\n            method=method,\n            url=uri,\n            params=params,\n            headers=headers,\n            data=data,\n            json=json)\n        s = Session()\n        prepped = s.prepare_request(req)\n        resp = s.send(prepped)\n\n        return RTMResponse(resp)", "docstring": "Does the request job\n\nArgs:\nresource(str): resource uri(relative path)\nmethod(str): HTTP method\nparams(dict): uri queries\ndata(dict): HTTP body(form)\njson(dict): HTTP body(json)\nheaders(dict): HTTP headers\n\nReturns:\nRTMResponse", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.capture_realtime = channel.unary_stream(\n        '/ClearlyServer/capture_realtime',\n        request_serializer=protos_dot_clearly__pb2.CaptureRequest.SerializeToString,\n        response_deserializer=protos_dot_clearly__pb2.RealtimeEventMessage.FromString,\n        )\n    self.filter_tasks = channel.unary_stream(\n        '/ClearlyServer/filter_tasks',\n        request_serializer=protos_dot_clearly__pb2.FilterTasksRequest.SerializeToString,\n        response_deserializer=protos_dot_clearly__pb2.TaskMessage.FromString,\n        )\n    self.filter_workers = channel.unary_stream(\n        '/ClearlyServer/filter_workers',\n        request_serializer=protos_dot_clearly__pb2.FilterWorkersRequest.SerializeToString,\n        response_deserializer=protos_dot_clearly__pb2.WorkerMessage.FromString,\n        )\n    self.find_task = channel.unary_unary(\n        '/ClearlyServer/find_task',\n        request_serializer=protos_dot_clearly__pb2.FindTaskRequest.SerializeToString,\n        response_deserializer=protos_dot_clearly__pb2.TaskMessage.FromString,\n        )\n    self.seen_tasks = channel.unary_unary(\n        '/ClearlyServer/seen_tasks',\n        request_serializer=protos_dot_clearly__pb2.Empty.SerializeToString,\n        response_deserializer=protos_dot_clearly__pb2.SeenTasksMessage.FromString,\n        )\n    self.reset_tasks = channel.unary_unary(\n        '/ClearlyServer/reset_tasks',\n        request_serializer=protos_dot_clearly__pb2.Empty.SerializeToString,\n        response_deserializer=protos_dot_clearly__pb2.Empty.FromString,\n        )\n    self.get_stats = channel.unary_unary(\n        '/ClearlyServer/get_stats',\n        request_serializer=protos_dot_clearly__pb2.Empty.SerializeToString,\n        response_deserializer=protos_dot_clearly__pb2.StatsMessage.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def _on_disconnect(self):\n        \n\n        self._logger.info(\"Connection to device %s was interrupted\", self.connection_string)\n        self.connection_interrupted = True", "docstring": "Callback when a device is disconnected unexpectedly.\n\nArgs:\nadapter_id (int): An ID for the adapter that was connected to the device\nconnection_id (int): An ID for the connection that has become disconnected", "source": "juraj-google-style"}
{"code": "def ToScriptHash(data, unhex=True):\n    if ((len(data) > 1) and unhex):\n        data = binascii.unhexlify(data)\n    return UInt160(data=binascii.unhexlify(bytes(Crypto.Hash160(data), encoding='utf-8')))", "docstring": "Get a script hash of the data.\n\nArgs:\ndata (bytes): data to hash.\nunhex (bool): (Default) True. Set to unhexlify the stream. Use when the bytes are not raw bytes; i.e. b'aabb'\n\nReturns:\nUInt160: script hash.", "source": "codesearchnet"}
{"code": "def parse_arguments(argv):\n  \n  parser = argparse.ArgumentParser(\n      formatter_class=argparse.RawDescriptionHelpFormatter,\n      description=textwrap.dedent())\n\n  source_group = parser.add_mutually_exclusive_group(required=True)\n\n  source_group.add_argument(\n      '--csv',\n      metavar='FILE',\n      required=False,\n      action='append',\n      help='CSV data to transform.')\n\n  source_group.add_argument(\n      '--bigquery',\n      metavar='PROJECT_ID.DATASET.TABLE_NAME',\n      type=str,\n      required=False,\n      help=('Must be in the form `project.dataset.table_name`. BigQuery '\n            'data to transform'))\n\n  parser.add_argument(\n      '--analysis',\n      metavar='ANALYSIS_OUTPUT_DIR',\n      required=True,\n      help='The output folder of analyze')\n\n  parser.add_argument(\n      '--prefix',\n      metavar='OUTPUT_FILENAME_PREFIX',\n      required=True,\n      type=str)\n\n  parser.add_argument(\n      '--output',\n      metavar='DIR',\n      default=None,\n      required=True,\n      help=('Google Cloud Storage or Local directory in which '\n            'to place outputs.'))\n\n  parser.add_argument(\n      '--shuffle',\n      action='store_true',\n      default=False,\n      help='If used, data source is shuffled. This is recommended for training data.')\n\n  parser.add_argument(\n      '--batch-size',\n      metavar='N',\n      type=int,\n      default=100,\n      help='Larger values increase performance and peak memory usage.')\n\n  cloud_group = parser.add_argument_group(\n      title='Cloud Parameters',\n      description='These parameters are only used if --cloud is used.')\n\n  cloud_group.add_argument(\n      '--cloud',\n      action='store_true',\n      help='Run preprocessing on the cloud.')\n\n  cloud_group.add_argument(\n      '--job-name',\n      type=str,\n      help='Unique dataflow job name.')\n\n  cloud_group.add_argument(\n      '--project-id',\n      help='The project to which the job will be submitted.')\n\n  cloud_group.add_argument(\n      '--num-workers',\n      metavar='N',\n      type=int,\n      default=0,\n      help='Set to 0 to use the default size determined by the Dataflow service.')\n\n  cloud_group.add_argument(\n      '--worker-machine-type',\n      metavar='NAME',\n      type=str,\n      help='A machine name from https:\n           ' If not given, the service uses the default machine type.')\n\n  cloud_group.add_argument(\n      '--async',\n      action='store_true',\n      help='If used, this script returns before the dataflow job is completed.')\n\n  args = parser.parse_args(args=argv[1:])\n\n  if args.cloud and not args.project_id:\n    raise ValueError('--project-id is needed for --cloud')\n\n  if args.async and not args.cloud:\n    raise ValueError('--async should only be used with --cloud')\n\n  if not args.job_name:\n    args.job_name = ('dataflow-job-{}'.format(\n        datetime.datetime.now().strftime('%Y%m%d%H%M%S')))\n  return args", "docstring": "Parse command line arguments.\nArgs:\nargv: list of command line arguments including program name.\nReturns:\nThe parsed arguments as returned by argparse.ArgumentParser.", "source": "juraj-google-style"}
{"code": "def ReadClientPostingLists(self, keywords):\n    \n\n    start_time, end_time, filtered_keywords, _ = self._AnalyzeKeywords(keywords)\n\n    \n    \n    return self.ReadPostingLists(\n        filtered_keywords,\n        start_time=start_time.AsMicrosecondsSinceEpoch(),\n        end_time=end_time.AsMicrosecondsSinceEpoch())", "docstring": "Looks up all clients associated with any of the given keywords.\n\nArgs:\nkeywords: A list of keywords we are interested in.\n\nReturns:\nA dict mapping each keyword to a list of matching clients.", "source": "juraj-google-style"}
{"code": "def gene_to_panels(self, case_obj):\n    LOG.info('Building gene to panels')\n    gene_dict = {}\n    for panel_info in case_obj.get('panels', []):\n        panel_name = panel_info['panel_name']\n        panel_version = panel_info['version']\n        panel_obj = self.gene_panel(panel_name, version=panel_version)\n        if (not panel_obj):\n            LOG.warning('Panel: {0}, version {1} does not exist in database'.format(panel_name, panel_version))\n        for gene in panel_obj['genes']:\n            hgnc_id = gene['hgnc_id']\n            if (hgnc_id not in gene_dict):\n                gene_dict[hgnc_id] = set([panel_name])\n                continue\n            gene_dict[hgnc_id].add(panel_name)\n    LOG.info('Gene to panels done')\n    return gene_dict", "docstring": "Fetch all gene panels and group them by gene\n\nArgs:\ncase_obj(scout.models.Case)\nReturns:\ngene_dict(dict): A dictionary with gene as keys and a set of\npanel names as value", "source": "codesearchnet"}
{"code": "def get_publications():\n    data = DOWNER.download(URL)\n    dom = dhtmlparser.parseString(handle_encodnig(data))\n    book_list = dom.find('div', {'class': 'polozka'})\n    books = []\n    for book in book_list:\n        books.append(_process_book(book))\n    return books", "docstring": "Get list of publication offered by cpress.cz.\n\nReturns:\nlist: List of :class:`.Publication` objects.", "source": "codesearchnet"}
{"code": "def __ne__(self, other):\n    \n    if not isinstance(other, SemanticTime):\n      return True\n\n    return self._SORT_ORDER != other._SORT_ORDER", "docstring": "Determines if the date time values are not equal to other.\n\nArgs:\nother (DateTimeValues): date time values to compare against.\n\nReturns:\nbool: True if the date time values are not equal to other.", "source": "juraj-google-style"}
{"code": "async def put(self, cid):\n    if settings.SIGNATURE_VERIFICATION:\n        super().verify()\n    try:\n        body = json.loads(self.request.body)\n    except:\n        self.set_status(400)\n        self.write({'error': 400, 'reason': 'Unexpected data format. JSON required'})\n        raise tornado.web.Finish\n    public_key = body.get('public_key', None)\n    if isinstance(body['message'], str):\n        message = json.loads(body['message'])\n    elif isinstance(body['message'], dict):\n        message = body['message']\n    descr = message.get('description')\n    coinid = message.get('coinid')\n    if (not (coinid in settings.bridges.keys())):\n        self.set_status(400)\n        self.write({'error': 400, 'reason': 'Unknown coin id'})\n        raise tornado.web.Finish\n    if (not all([public_key, descr, coinid])):\n        self.set_status(400)\n        self.write({'error': 400, 'reason': 'Missed required fields'})\n        raise tornado.web.Finish\n    owneraddr = self.account.validator[coinid](public_key)\n    response = (await self.account.blockchain.ownerbycid(cid=cid))\n    if isinstance(response, dict):\n        if ('error' in response.keys()):\n            error_code = response['error']\n            self.set_status(error_code)\n            self.write({'error': error_code, 'reason': response['error']})\n            raise tornado.web.Finish\n    if (response != owneraddr):\n        self.set_status(403)\n        self.write({'error': 403, 'reason': 'Owner does not match.'})\n        raise tornado.web.Finish\n    fee = (await billing.update_description_fee(owneraddr=owneraddr, cid=cid, description=descr))\n    if (coinid in settings.bridges.keys()):\n        self.account.blockchain.setendpoint(settings.bridges[coinid])\n    else:\n        self.set_status(400)\n        self.write({'error': 400, 'reason': 'Invalid coinid'})\n        raise tornado.web.Finish\n    request = (await self.account.blockchain.setdescrforcid(cid=cid, descr=descr, owneraddr=owneraddr))\n    if ('error' in request.keys()):\n        self.set_status(request['error'])\n        self.write(request)\n        raise tornado.web.Finish\n    self.write({'cid': cid, 'description': descr, 'coinid': coinid, 'owneraddr': owneraddr})", "docstring": "Update description for content\n\nAccepts:\nQuery string args:\n- \"cid\" - int\nRequest body parameters:\n- message (signed dict):\n- \"description\" - str\n- \"coinid\" - str\n\nReturns:\ndict with following fields:\n- \"confirmed\": None\n- \"txid\" - str\n- \"description\" - str\n- \"content\" - str\n- \"read_access\" - int\n- \"write_access\" - int\n- \"cid\" - int\n- \"txid\" - str\n- \"seller_pubkey\" - str\n- \"seller_access_string\": None or str\n\nVerified: True", "source": "codesearchnet"}
{"code": "def get_samples(self, md5='', sha1='', sha256=''):\n        \n        params = {'api_key': self.api_key, 'username': self.username}\n        if md5:\n            params['c-md5'] = md5\n        if sha1:\n            params['c-sha1'] = sha1\n        if sha256:\n            params['c-sha256'] = sha256\n        r = requests.get('{0}/samples/'.format(self.url),\n                         params=params,\n                         verify=self.verify,\n                         proxies=self.proxies)\n        if r.status_code == 200:\n            result_data = json.loads(r.text)\n            if 'meta' in result_data:\n                if 'total_count' in result_data['meta']:\n                    if result_data['meta']['total_count'] > 0:\n                        return result_data\n        else:\n            log.error('Non-200 status code: {}'.format(r.status_code))\n        return None", "docstring": "Searches for a sample in CRITs. Currently only hashes allowed.\n\nArgs:\nmd5: md5sum\nsha1: sha1sum\nsha256: sha256sum\nReturns:\nJSON response or None if not found", "source": "juraj-google-style"}
{"code": "def get_age(dob: PotentialDatetimeType,\n            when: PotentialDatetimeType,\n            default: str = \"\") -> Union[int, str]:\n    \n    dob = coerce_to_pendulum_date(dob)\n    when = coerce_to_pendulum_date(when)\n    if dob is None or when is None:\n        return default\n    return (when - dob).years", "docstring": "Age (in whole years) at a particular date, or ``default``.\n\nArgs:\ndob: date of birth\nwhen: date/time at which to calculate age\ndefault: value to return if either input is ``None``\n\nReturns:\nage in whole years (rounded down), or ``default``", "source": "juraj-google-style"}
{"code": "def _SetCredentials(self, **kwds):\n        \n        args = {\n            'api_key': self._API_KEY,\n            'client': self,\n            'client_id': self._CLIENT_ID,\n            'client_secret': self._CLIENT_SECRET,\n            'package_name': self._PACKAGE,\n            'scopes': self._SCOPES,\n            'user_agent': self._USER_AGENT,\n        }\n        args.update(kwds)\n        \n        from apitools.base.py import credentials_lib\n        \n        \n        \n        \n        \n        self._credentials = credentials_lib.GetCredentials(**args)", "docstring": "Fetch credentials, and set them for this client.\n\nNote that we can't simply return credentials, since creating them\nmay involve side-effecting self.\n\nArgs:\n**kwds: Additional keyword arguments are passed on to GetCredentials.\n\nReturns:\nNone. Sets self._credentials.", "source": "juraj-google-style"}
{"code": "def sample_frames(self, video: 'torch.Tensor', frame_factor: int, min_frames: int, max_frames: int, metadata: Optional[Union[VideoMetadata, dict]]=None, num_frames: Optional[int]=None, fps: Optional[int]=None):\n    if fps is not None and num_frames is not None:\n        raise ValueError('`num_frames` and `fps` are mutually exclusive arguments, please use only one!')\n    num_frames = num_frames if num_frames is not None else self.num_frames\n    fps = fps if fps is not None else self.fps\n    total_num_frames = video.shape[0]\n    if num_frames is not None:\n        num_frames = round(num_frames / frame_factor) * frame_factor\n    elif fps is not None:\n        if metadata is None:\n            raise ValueError('Asked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. Please pass in `VideoMetadata` object or use a fixed `num_frames` per input video')\n        max_frames = math.floor(min(max_frames, total_num_frames) / frame_factor) * frame_factor\n        num_frames = total_num_frames / metadata['fps'] * fps\n        num_frames = min(min(max(num_frames, min_frames), max_frames), total_num_frames)\n        num_frames = math.floor(num_frames / frame_factor) * frame_factor\n    if num_frames > total_num_frames:\n        raise ValueError(f\"Video can't be sampled. The inferred `num_frames={num_frames}` exceeds `total_num_frames={total_num_frames}`. Decrease `num_frames` or `fps` for sampling.\")\n    if num_frames is not None:\n        indices = torch.arange(0, total_num_frames, total_num_frames / num_frames).int()\n    else:\n        indices = torch.arange(0, total_num_frames).int()\n    video = video[indices].contiguous()\n    return video", "docstring": "Default sampling function which uniformly samples the desired number of frames between 0 and total number of frames.\nIf `fps` is passed along with metadata, `fps` frames per second are sampled uniformty. Arguments `num_frames`\nand `fps` are mutually exclusive.\n\nArgs:\nvideo (`torch.Tensor`):\nVideo that need to be sampled.\nframe_factor (`int`):\nThe temporal patch size of the vision encoder. Number of sampled frames will be rounded to be divisible by frame factor.\nmin_frames (`int`):\nThe minimum number of frames that can be sampled.\nmax_frames (`int`):\nThe maximum number of frames that can be sampled.\nmetadata (`VideoMetadata`, *optional*):\nMetadata of the video containing information about total duration, fps and total number of frames.\nnum_frames (`int`, *optional*):\nMaximum number of frames to sample. Defaults to `self.num_frames`.\nfps (`int`, *optional*):\nTarget frames to sample per second. Defaults to `self.fps`.\n\nReturns:\ntorch.Tensor:\nSampled video frames.", "source": "github-repos"}
{"code": "def _CheckCacheFileForMatch(self, cache_filename, scopes):\n    creds = {'scopes': (sorted(list(scopes)) if scopes else None), 'svc_acct_name': self.__service_account_name}\n    cache_file = _MultiProcessCacheFile(cache_filename)\n    try:\n        cached_creds_str = cache_file.LockedRead()\n        if (not cached_creds_str):\n            return None\n        cached_creds = json.loads(cached_creds_str)\n        if (creds['svc_acct_name'] == cached_creds['svc_acct_name']):\n            if (creds['scopes'] in (None, cached_creds['scopes'])):\n                return cached_creds['scopes']\n    except KeyboardInterrupt:\n        raise\n    except:\n        pass", "docstring": "Checks the cache file to see if it matches the given credentials.\n\nArgs:\ncache_filename: Cache filename to check.\nscopes: Scopes for the desired credentials.\n\nReturns:\nList of scopes (if cache matches) or None.", "source": "codesearchnet"}
{"code": "def zip(self, destination: typing.Union[(str, Path)]=None, encode: bool=True) -> str:\n    if encode:\n        self._encode()\n    if (destination is None):\n        destination_path = self.miz_path.parent.joinpath(f'{self.miz_path.stem}_EMIZ.miz')\n    else:\n        destination_path = elib.path.ensure_file(destination, must_exist=False)\n    LOGGER.debug('zipping mission to: %s', destination_path)\n    destination_path.write_bytes(dummy_miz)\n    with ZipFile(str(destination_path), mode='w', compression=8) as zip_file:\n        for (root, _, items) in os.walk(self.temp_dir.absolute()):\n            for item in items:\n                item_abs_path = Path(root, item).absolute()\n                item_rel_path = Path(item_abs_path).relative_to(self.temp_dir)\n                zip_file.write(item_abs_path, arcname=item_rel_path)\n    return str(destination_path)", "docstring": "Write mission, dictionary etc. to a MIZ file\n\nArgs:\ndestination: target MIZ file (if none, defaults to source MIZ + \"_EMIZ\"\n\nReturns: destination file", "source": "codesearchnet"}
{"code": "def engine_from_environment() -> Engine:\n    api_key = os.environ.get(ENV_API_KEY)\n    if (not api_key):\n        raise EnvironmentError('Environment variable {} is not set.'.format(ENV_API_KEY))\n    default_project_id = os.environ.get(ENV_DEFAULT_PROJECT_ID)\n    return Engine(api_key=api_key, default_project_id=default_project_id)", "docstring": "Returns an Engine instance configured using environment variables.\n\nIf the environment variables are set, but incorrect, an authentication\nfailure will occur when attempting to run jobs on the engine.\n\nRequired Environment Variables:\nQUANTUM_ENGINE_PROJECT: The name of a google cloud project, with the\nquantum engine enabled, that you have access to.\nQUANTUM_ENGINE_API_KEY: An API key for the google cloud project named\nby QUANTUM_ENGINE_PROJECT.\n\nRaises:\nEnvironmentError: The environment variables are not set.", "source": "codesearchnet"}
{"code": "def search_orcid(orcid):\n    \n    url = 'https:\n    r = requests.get(url, headers=headers)\n    if r.status_code != 200:\n        r.raise_for_status()\n    return r.json()", "docstring": "Search the ORCID public API\n\nSpecfically, return a dictionary with the personal details\n(name, etc.) of the person associated with the given ORCID\n\nArgs:\norcid (`str`): The ORCID to be searched\n\nReturns:\n`dict`: Dictionary with the JSON response from the API\n\nRaises:\n`~requests.HTTPError`: If the given ORCID cannot be found, an `~requests.HTTPError`\nis raised with status code 404", "source": "juraj-google-style"}
{"code": "def get_item(target, i, opts):\n    assert isinstance(opts, GetItemOpts)\n    if isinstance(target, tensor_array_ops.TensorArray):\n        return _tf_tensorarray_get_item(target, i)\n    elif tensor_util.is_tf_type(target):\n        if target.dtype == dtypes.variant:\n            return _tf_tensor_list_get_item(target, i, opts)\n        elif target.dtype == dtypes.string and target.shape.ndims == 0:\n            return _tf_tensor_string_get_item(target, i)\n        else:\n            return _tf_tensor_get_item(target, i)\n    else:\n        return _py_get_item(target, i)", "docstring": "The slice read operator (i.e. __getitem__).\n\nNote: it is unspecified whether target will be mutated or not. In general,\nif target is mutable (like Python lists), it will be mutated.\n\nArgs:\ntarget: An entity that supports getitem semantics.\ni: Index to read from.\nopts: A GetItemOpts object.\n\nReturns:\nThe read element.\n\nRaises:\nValueError: if target is not of a supported type.", "source": "github-repos"}
{"code": "def cumprod(x, axis=None, dtype=None):\n    return Cumprod(axis=axis, dtype=dtype)(x)", "docstring": "Return the cumulative product of elements along a given axis.\n\nArgs:\nx: Input tensor.\naxis: Axis along which the cumulative product is computed.\nBy default the input is flattened.\ndtype: dtype of returned tensor. Defaults to x.dtype.\n\nReturns:\nOutput tensor.", "source": "github-repos"}
{"code": "def parse_name(name):\n  \n  bucket = None\n  item = None\n  m = re.match(_STORAGE_NAME, name)\n  if m:\n    \n    bucket = m.group(1)\n    item = m.group(2)\n    if item is not None:\n      item = item[1:]  \n  else:\n    m = re.match('(' + _OBJECT_NAME + ')', name)\n    if m:\n      item = m.group(1)\n  return bucket, item", "docstring": "Parse a gs:// URL into the bucket and item names.\n\nArgs:\nname: a GCS URL of the form gs://bucket or gs://bucket/item\nReturns:\nThe bucket name (with no gs:// prefix), and the item name if present. If the name\ncould not be parsed returns None for both.", "source": "juraj-google-style"}
{"code": "def ToHashArray(self):\n    hashes = set()\n    MerkleTree.__DepthFirstSearch(self.Root, hashes)\n    return list(hashes)", "docstring": "Turn the tree into a list of hashes.\n\nReturns:\nlist:", "source": "codesearchnet"}
{"code": "def quantile_gaussianize(x):\n    from scipy.stats import norm, rankdata\n    x = asarray(x, float).copy()\n    ok = isfinite(x)\n    x[ok] *= (- 1)\n    y = empty_like(x)\n    y[ok] = rankdata(x[ok])\n    y[ok] = norm.isf((y[ok] / (sum(ok) + 1)))\n    y[(~ ok)] = x[(~ ok)]\n    return y", "docstring": "Normalize a sequence of values via rank and Normal c.d.f.\n\nArgs:\nx (array_like): sequence of values.\n\nReturns:\nGaussian-normalized values.\n\nExample:\n\n.. doctest::\n\n>>> from scipy_sugar.stats import quantile_gaussianize\n>>> print(quantile_gaussianize([-1, 0, 2]))\n[-0.67448975  0.          0.67448975]", "source": "codesearchnet"}
{"code": "def listdir(path='.'):\n    return [name.rstrip('/') for (name, _) in get_instance(path).list_objects(path, first_level=True)]", "docstring": "Return a list containing the names of the entries in the directory given by\npath.\n\nEquivalent to \"os.listdir\".\n\nArgs:\npath (path-like object): Path or URL.\n\nReturns:\nlist of str: Entries names.", "source": "codesearchnet"}
{"code": "def _separate_hdxobjects(self, hdxobjects, hdxobjects_name, id_field, hdxobjectclass):\n    new_hdxobjects = self.data.get(hdxobjects_name, list())\n    ':type : List[HDXObjectUpperBound]'\n    if new_hdxobjects:\n        hdxobject_names = set()\n        for hdxobject in hdxobjects:\n            hdxobject_name = hdxobject[id_field]\n            hdxobject_names.add(hdxobject_name)\n            for new_hdxobject in new_hdxobjects:\n                if (hdxobject_name == new_hdxobject[id_field]):\n                    merge_two_dictionaries(hdxobject, new_hdxobject)\n                    break\n        for new_hdxobject in new_hdxobjects:\n            if (not (new_hdxobject[id_field] in hdxobject_names)):\n                hdxobjects.append(hdxobjectclass(new_hdxobject, configuration=self.configuration))\n        del self.data[hdxobjects_name]", "docstring": "Helper function to take a list of HDX objects contained in the internal dictionary and add them to a\nsupplied list of HDX objects or update existing metadata if any objects already exist in the list. The list in\nthe internal dictionary is then deleted.\n\nArgs:\nhdxobjects (List[T <= HDXObject]): list of HDX objects to which to add new objects or update existing ones\nhdxobjects_name (str): Name of key in internal dictionary from which to obtain list of HDX objects\nid_field (str): Field on which to match to determine if object already exists in list\nhdxobjectclass (type): Type of the HDX Object to be added/updated\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def circuit_to_latex_using_qcircuit(circuit: circuits.Circuit, qubit_order: ops.QubitOrderOrList=ops.QubitOrder.DEFAULT) -> str:\n    diagram = circuit.to_text_diagram_drawer(qubit_namer=qcircuit_qubit_namer, qubit_order=qubit_order, get_circuit_diagram_info=get_qcircuit_diagram_info)\n    return _render(diagram)", "docstring": "Returns a QCircuit-based latex diagram of the given circuit.\n\nArgs:\ncircuit: The circuit to represent in latex.\nqubit_order: Determines the order of qubit wires in the diagram.\n\nReturns:\nLatex code for the diagram.", "source": "codesearchnet"}
{"code": "def commit_output(cls, shard_ctx, iterator):\n    \n    \n    outs = tuple(iterator)\n    shard_ctx._state.writer_state[\"outs\"] = outs", "docstring": "Saves output references when a shard finishes.\n\nInside end_shard(), an output writer can optionally use this method\nto persist some references to the outputs from this shard\n(e.g a list of filenames)\n\nArgs:\nshard_ctx: map_job_context.ShardContext for this shard.\niterator: an iterator that yields json serializable\nreferences to the outputs from this shard.\nContents from the iterator can be accessible later via\nmap_job.Job.get_outputs.", "source": "juraj-google-style"}
{"code": "def run_categorical_analysis(table, schema_list, args):\n  \n  import google.datalab.bigquery as bq\n\n  \n  categorical_columns = []\n  for col_schema in schema_list:\n    col_type = col_schema['type'].lower()\n    if col_type == 'string':\n      categorical_columns.append(col_schema['name'])\n\n  if categorical_columns:\n    sys.stdout.write('Running categorical analysis...')\n    for name in categorical_columns:\n      if args.bigquery_table:\n        table_name = parse_table_name(args.bigquery_table)\n      else:\n        table_name = 'table_name'\n\n      sql = .format(name=name, table=table_name)\n      out_file = os.path.join(args.output_dir,\n                              CATEGORICAL_ANALYSIS_FILE % name)\n\n      \n      \n      if args.bigquery_table:\n        df = bq.Query(sql).execute().result().to_dataframe()\n      else:\n        query = bq.Query(sql, data_sources={'table_name': table})\n        df = query.execute().result().to_dataframe()\n\n      \n      string_buff = six.StringIO()\n      df.to_csv(string_buff, index=False, header=False)\n      file_io.write_string_to_file(out_file, string_buff.getvalue())\n\n    sys.stdout.write('done.\\n')", "docstring": "Find vocab values for the categorical columns and writes a csv file.\n\nThe vocab files are in the from\nlabel1\nlabel2\nlabel3\n...\n\nArgs:\ntable: Reference to FederatedTable (if bigquery_table is false) or a\nregular Table (otherwise)\nschema_list: Bigquery schema json object\nargs: the command line args", "source": "juraj-google-style"}
{"code": "def named(self, name: str) -> 'ColumnExpressionBuilder':\n    if self._children:\n        raise AttributeError(f'named() must not be called on a builder with child selects. Got named called on {str(self)}.')\n    return ColumnExpressionBuilder(self._builder, name, self._children, self._needs_unnest, True)", "docstring": "The named() function.\n\nSets the column name of a given FHIR path in the View. Once the column\nname is set, the FHIR path is sealed to be immutable.\n\nArgs:\nname: The column name as a string.\n\nReturns:\nA new ColumnExpressionBuilder with the given alias name.", "source": "github-repos"}
{"code": "def get_compile_config(self):\n    if self.compiled and hasattr(self, '_compile_config'):\n        return self._compile_config.serialize()\n    return {}", "docstring": "Returns a serialized config with information for compiling the model.\n\nThis method returns a config dictionary containing all the information\n(optimizer, loss, metrics, etc.) with which the model was compiled.\n\nReturns:\nA dict containing information for compiling the model.", "source": "github-repos"}
{"code": "def get_image_patches(self, image: np.array, grid_pinpoints: List[Tuple[int, int]], patch_size: int, resample: PILImageResampling, data_format: ChannelDimension, input_data_format: ChannelDimension) -> List[np.array]:\n    if not isinstance(grid_pinpoints, list):\n        raise TypeError('grid_pinpoints must be a list of possible resolutions.')\n    possible_resolutions = grid_pinpoints\n    image_size = get_image_size(image, channel_dim=input_data_format)\n    best_resolution = select_best_resolution(image_size, possible_resolutions)\n    resized_image = self._resize_for_patching(image, best_resolution, resample=resample, input_data_format=input_data_format)\n    padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=input_data_format)\n    patches = divide_to_patches(padded_image, patch_size=patch_size, input_data_format=input_data_format)\n    patches = [to_channel_dimension_format(patch, channel_dim=data_format, input_channel_dim=input_data_format) for patch in patches]\n    return patches", "docstring": "Process an image with variable resolutions by dividing it into patches.\n\nArgs:\nimage (`np.array`):\nThe input image to be processed.\ngrid_pinpoints (List[Tuple[int, int]]):\nA list of possible resolutions as tuples.\npatch_size (`int`):\nSize of the patches to divide the image into.\nresample (`PILImageResampling`):\nResampling filter to use if resizing the image.\ndata_format (`ChannelDimension` or `str`):\nThe channel dimension format for the output image.\ninput_data_format (`ChannelDimension` or `str`):\nThe channel dimension format of the input image.\n\nReturns:\n`List[np.array]`: A list of NumPy arrays containing the processed image patches.", "source": "github-repos"}
{"code": "def update(self, rid, data, raise_on_error=True):\n        \n        return self.put(rid, data, raise_on_error)", "docstring": "Update the for the provided Id. Alias for put() method.\n\nArgs:\nrid (str): The record identifier.\ndata (dict): The record data.\nraise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.\n\nReturns:\nobject : Python request response.", "source": "juraj-google-style"}
{"code": "def find_vasp_calculations():\n    \n    dir_list = [ './' + re.sub( r'vasprun\\.xml', '', path ) for path in glob.iglob( '**/vasprun.xml', recursive=True ) ]\n    gz_dir_list = [ './' + re.sub( r'vasprun\\.xml\\.gz', '', path ) for path in glob.iglob( '**/vasprun.xml.gz', recursive=True ) ]\n    return dir_list + gz_dir_list", "docstring": "Returns a list of all subdirectories that contain either a vasprun.xml file\nor a compressed vasprun.xml.gz file.\n\nArgs:\nNone\n\nReturns:\n(List): list of all VASP calculation subdirectories.", "source": "juraj-google-style"}
{"code": "def parse_newsgroup(line):\n    parts = line.split()\n    try:\n        group = parts[0]\n        low = int(parts[1])\n        high = int(parts[2])\n        status = parts[3]\n    except (IndexError, ValueError):\n        raise ValueError('Invalid newsgroup info')\n    return (group, low, high, status)", "docstring": "Parse a newsgroup info line to python types.\n\nArgs:\nline: An info response line containing newsgroup info.\n\nReturns:\nA tuple of group name, low-water as integer, high-water as integer and\nposting status.\n\nRaises:\nValueError: If the newsgroup info cannot be parsed.\n\nNote:\nPosting status is a character is one of (but not limited to):\n\"y\" posting allowed\n\"n\" posting not allowed\n\"m\" posting is moderated", "source": "codesearchnet"}
{"code": "def createTemplate(data):\n    conn = Qubole.agent()\n    return conn.post(Template.rest_entity_path, data)", "docstring": "Create a new template.\n\nArgs:\n`data`: json data required for creating a template\nReturns:\nDictionary containing the details of the template with its ID.", "source": "codesearchnet"}
{"code": "def get_bit_mask_from_enumerations(enumerations):\n    return functools.reduce((lambda x, y: (x | y)), [z.value for z in enumerations])", "docstring": "A utility function that computes a bit mask from a collection of\nenumeration values.\n\nArgs:\nenumerations (list): A list of enumeration values to be combined in a\ncomposite bit mask.\n\nReturns:\nint: The composite bit mask.", "source": "codesearchnet"}
{"code": "def firmware_version(self):\n    namespace = 'urn:brocade.com:mgmt:brocade-firmware-ext'\n    request_ver = ET.Element('show-firmware-version', xmlns=namespace)\n    ver = self._callback(request_ver, handler='get')\n    return ver.find(('.", "docstring": "Returns firmware version.\n\nArgs:\nNone\n\nReturns:\nDictionary\n\nRaises:\nNone", "source": "codesearchnet"}
{"code": "def __init__(self, field, value, **kwargs):\n        \n        return super(DomainCondition, self).__init__(\n            field=field, value=value, **kwargs\n        )", "docstring": "Initialize a new generic query condition.\n\nArgs:\nfield (str): Field name to search on. This should be the\nPythonified name as in the internal models, not the\nname as provided in the API e.g. ``first_name`` for\nthe Customer's first name instead of ``firstName``.\nvalue (mixed): The value of the field.", "source": "juraj-google-style"}
{"code": "def cumulative_distribution(self, X):\n        \n        self.check_fit()\n\n        U, V = self.split_matrix(X)\n\n        if self.theta == 1:\n            return np.multiply(U, V)\n\n        else:\n            h = np.power(-np.log(U), self.theta) + np.power(-np.log(V), self.theta)\n            h = -np.power(h, 1.0 / self.theta)\n            cdfs = np.exp(h)\n            return cdfs", "docstring": "Computes the cumulative distribution function for the copula, :math:`C(u, v)`\n\nArgs:\nX: `np.ndarray`\n\nReturns:\nnp.array: cumulative probability", "source": "juraj-google-style"}
{"code": "def add(self, index, value):\n    self.buf.append(value)\n    if ((index - self.flush_at) < self.interval):\n        return\n    value = np.mean(self.buf)\n    if self.verbose:\n        logger.info('iter={} {{{}}}={}'.format(index, self.name, value))\n    if (self.fd is not None):\n        print('{} {:g}'.format(index, value), file=self.fd)\n    self.flush_at = index\n    self.buf = []", "docstring": "Add a value to the series.\n\nArgs:\nindex (int): Index.\nvalue (float): Value.", "source": "codesearchnet"}
{"code": "def get_student_current_grade(self, username, course_id):\n        \n        \n        resp = self.requester.get(\n            urljoin(\n                self.base_url,\n                '/api/grades/v1/courses/{course_key}/?username={username}'.format(\n                    username=username,\n                    course_key=course_id\n                )\n            )\n        )\n\n        resp.raise_for_status()\n\n        return CurrentGrade(resp.json()[0])", "docstring": "Returns an CurrentGrade object for the user in a course\n\nArgs:\nusername (str): an edx user's username\ncourse_id (str): an edX course id.\n\nReturns:\nCurrentGrade: object representing the student current grade for a course", "source": "juraj-google-style"}
{"code": "def create_cloudwatch_log_event(app_name, env, region, rules):\n    session = boto3.Session(profile_name=env, region_name=region)\n    cloudwatch_client = session.client('logs')\n    log_group = rules.get('log_group')\n    filter_name = rules.get('filter_name')\n    filter_pattern = rules.get('filter_pattern')\n    if (not log_group):\n        LOG.critical('Log group is required and no \"log_group\" is defined!')\n        raise InvalidEventConfiguration('Log group is required and no \"log_group\" is defined!')\n    if (not filter_name):\n        LOG.critical('Filter name is required and no filter_name is defined!')\n        raise InvalidEventConfiguration('Filter name is required and no filter_name is defined!')\n    if (filter_pattern is None):\n        LOG.critical('Filter pattern is required and no filter_pattern is defined!')\n        raise InvalidEventConfiguration('Filter pattern is required and no filter_pattern is defined!')\n    lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region)\n    statement_id = '{}_cloudwatchlog_{}'.format(app_name, filter_name.replace(' ', '_'))\n    principal = 'logs.{}.amazonaws.com'.format(region)\n    account_id = get_env_credential(env=env)['accountId']\n    source_arn = 'arn:aws:logs:{0}:{1}:log-group:{2}:*'.format(region, account_id, log_group)\n    add_lambda_permissions(function=lambda_alias_arn, statement_id=statement_id, action='lambda:InvokeFunction', principal=principal, source_arn=source_arn, env=env, region=region)\n    cloudwatch_client.put_subscription_filter(logGroupName=log_group, filterName=filter_name, filterPattern=filter_pattern, destinationArn=lambda_alias_arn)\n    LOG.info('Created Cloudwatch log event with filter: %s', filter_pattern)", "docstring": "Create cloudwatch log event for lambda from rules.\n\nArgs:\napp_name (str): name of the lambda function\nenv (str): Environment/Account for lambda function\nregion (str): AWS region of the lambda function\nrules (str): Trigger rules from the settings", "source": "codesearchnet"}
{"code": "def delete_tag(self, key, update_session=True):\n        \n        existing_tags = {x.key: x for x in self.tags}\n        if key in existing_tags:\n            if update_session:\n                db.session.delete(existing_tags[key])\n\n            self.tags.remove(existing_tags[key])\n            return True\n\n        return False", "docstring": "Removes a tag from a resource based on the tag key. Returns `True` if the tag was removed or `False` if the\ntag didn't exist\n\nArgs:\nkey (str): Key of the tag to delete\nupdate_session (bool): Automatically add the change to the SQLAlchemy session. Default: True\n\nReturns:", "source": "juraj-google-style"}
{"code": "def ParseBookmarkFolderRow(\n      self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    title = self._GetRowValue(query_hash, row, 'title')\n\n    event_data = FirefoxPlacesBookmarkFolderEventData()\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.title = title or 'N/A'\n\n    timestamp = self._GetRowValue(query_hash, row, 'dateAdded')\n    if timestamp:\n      date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n          timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_ADDED)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'lastModified')\n    if timestamp:\n      date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n          timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a bookmark folder row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def parse_dict(self, args: dict[str, Any], allow_extra_keys: bool=False) -> tuple[DataClass, ...]:\n    unused_keys = set(args.keys())\n    outputs = []\n    for dtype in self.dataclass_types:\n        keys = {f.name for f in dataclasses.fields(dtype) if f.init}\n        inputs = {k: v for k, v in args.items() if k in keys}\n        unused_keys.difference_update(inputs.keys())\n        obj = dtype(**inputs)\n        outputs.append(obj)\n    if not allow_extra_keys and unused_keys:\n        raise ValueError(f'Some keys are not used by the HfArgumentParser: {sorted(unused_keys)}')\n    return tuple(outputs)", "docstring": "Alternative helper method that does not use `argparse` at all, instead uses a dict and populating the dataclass\ntypes.\n\nArgs:\nargs (`dict`):\ndict containing config values\nallow_extra_keys (`bool`, *optional*, defaults to `False`):\nDefaults to False. If False, will raise an exception if the dict contains keys that are not parsed.\n\nReturns:\nTuple consisting of:\n\n- the dataclass instances in the same order as they were passed to the initializer.", "source": "github-repos"}
{"code": "def copy_directory_structure(destination_directory, relative_path):\n    full_path = os.path.join(destination_directory, relative_path)\n    if os.path.exists(full_path):\n        return\n    os.makedirs(destination_directory, relative_path)", "docstring": "Create all the intermediate directories required for relative_path to exist within destination_directory.\nThis assumes that relative_path is a directory located within root_dir.\n\nExamples:\ndestination_directory: /tmp/destination\nrelative_path: test/unit/\n\nwill create:  /tmp/destination/test/unit\n\nArgs:\ndestination_directory (str): root of the destination directory where the directory structure will be created.\nrelative_path (str): relative path that will be created within destination_directory", "source": "codesearchnet"}
{"code": "def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False) -> ContextManager[None]:\n    if context.executing_eagerly():\n        if op is not None:\n            if not hasattr(op, 'device'):\n                op = convert_to_tensor(op)\n            return device(op.device)\n        else:\n            return NullContextmanager()\n    else:\n        default_graph = get_default_graph()\n        if isinstance(op, EagerTensor):\n            if default_graph.building_function:\n                return default_graph.device(op.device)\n            else:\n                raise ValueError('Encountered an Eager-defined Tensor during graph construction, but a function was not being built.')\n        return default_graph._colocate_with_for_gradient(op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)", "docstring": "Returns a context manager for colocating op gradients with an op.\n\nInternal API. In eager mode, returns a context manager that sets the default\ndevice for new ops to the same device as the given op. Does the same if a\nfunction is currently being built (i.e. the current mode is graph, but the\noverall mode is eager).\n\nIn all other cases, returns a `Graph.colocate_with()` context manager,\noptionally accounting for gradients (if a gradient UID is specified).\n\nArgs:\nop: Operation or Tensor with which to colocate.\ngradient_uid: Optional gradient UID to enable colocation of gradients during\ncompilation.\nignore_existing: See `Graph.colocate_with()`.\n\nReturns:\nA context manager used to colocate ops and gradients with the specified\noperation.", "source": "github-repos"}
{"code": "def assert_scalar(tensor, name=None, message=None):\n    with ops.name_scope(name, 'assert_scalar', [tensor]) as name_scope:\n        tensor = ops.convert_to_tensor(tensor, name=name_scope)\n        shape = tensor.get_shape()\n        message = _message_prefix(message)\n        if shape.ndims != 0:\n            if context.executing_eagerly():\n                raise ValueError('%sExpected scalar shape, saw shape: %s.' % (message, shape))\n            else:\n                raise ValueError('%sExpected scalar shape for %s, saw shape: %s.' % (message, tensor.name, shape))\n        return tensor", "docstring": "Asserts that the given `tensor` is a scalar (i.e. zero-dimensional).\n\nThis function raises `ValueError` unless it can be certain that the given\n`tensor` is a scalar. `ValueError` is also raised if the shape of `tensor` is\nunknown.\n\nArgs:\ntensor: A `Tensor`.\nname:  A name for this operation. Defaults to \"assert_scalar\"\nmessage: A string to prefix to the default message.\n\nReturns:\nThe input tensor (potentially converted to a `Tensor`).\n\nRaises:\nValueError: If the tensor is not scalar (rank 0), or if its shape is\nunknown.", "source": "github-repos"}
{"code": "def __eq__(self, other):\n        \n        if not isinstance(other, self.__class__):\n            return False\n\n        headers = self._headers.copy()\n        other_headers = other._headers.copy()\n        try:\n            del headers[\"sent-at\"]\n        except KeyError:\n            pass\n        try:\n            del other_headers[\"sent-at\"]\n        except KeyError:\n            pass\n\n        return (\n            self.topic == other.topic\n            and self.body == other.body\n            and headers == other_headers\n        )", "docstring": "Two messages of the same class with the same topic, headers, and body are equal.\n\nThe \"sent-at\" header is excluded from the equality check as this is set\nautomatically and is dependent on when the object is created.\n\nArgs:\nother (object): The object to check for equality.\n\nReturns:\nbool: True if the messages are equal.", "source": "juraj-google-style"}
{"code": "def DEFINE_boolean(flag_name, default_value, docstring):  \n    \n\n    \n    def str2bool(bool_str):\n        \n        return bool_str.lower() in ('true', 't', '1')\n\n    get_context_parser().add_argument(\n        '--' + flag_name,\n        nargs='?',\n        const=True,\n        help=docstring,\n        default=default_value,\n        type=str2bool)\n\n    \n    \n    get_context_parser().add_argument(\n        '--no' + flag_name,\n        action='store_false',\n        dest=flag_name.replace('-', '_'))", "docstring": "Defines a flag of type 'boolean'.\nArgs:\nflag_name: The name of the flag as a string.\ndefault_value: The default value the flag should take as a boolean.\ndocstring: A helpful message explaining the use of the flag.", "source": "juraj-google-style"}
{"code": "def wait_stopped(self, timeout=None, force=False):\n    self.join(timeout)\n    if (self.is_alive() and (force is False)):\n        raise TimeoutExpiredError('Error waiting for background thread to exit', timeout=timeout)", "docstring": "Wait for the thread to stop.\n\nYou must have previously called signal_stop or this function will\nhang.\n\nArgs:\n\ntimeout (float): The maximum time to wait for the thread to stop\nbefore raising a TimeoutExpiredError.  If force is True,\nTimeoutExpiredError is not raised and the thread is just\nmarked as a daemon thread so that it does not block cleanly\nexiting the process.\nforce (bool): If true and the thread does not exit in timeout seconds\nno error is raised since the thread is marked as daemon and will\nbe killed when the process exits.", "source": "codesearchnet"}
{"code": "class FixedThreshold(ThresholdFn):\n\n    def __init__(self, cutoff: float, **kwargs):\n        super().__init__(**kwargs)\n        self._cutoff = cutoff\n\n    @property\n    def is_stateful(self) -> bool:\n        \n        return False\n\n    @property\n    def threshold(self) -> float:\n        \n        return self._cutoff\n\n    def apply(self, score: Optional[float]) -> Optional[int]:\n        \n        if score is None:\n            return None\n        if math.isnan(score):\n            return self._missing_label\n        if score < self.threshold:\n            return self._normal_label\n        return self._outlier_label", "docstring": "Applies a fixed cutoff value to anomaly scores.\n\nThis `ThresholdFn` is stateless and uses a pre-defined cutoff value to\nclassify anomaly scores. Scores below the cutoff are considered normal, while\nscores at or above the cutoff are classified as outliers.\n\nArgs:\ncutoff (float): The fixed threshold value. Anomaly scores at or above this\nvalue will be labeled as outliers.\n**kwargs: Additional keyword arguments to be passed to the base\n`ThresholdFn` constructor.", "source": "github-repos"}
{"code": "def circuit_diagram_info(val: Any, args: Optional[CircuitDiagramInfoArgs]=None, default=RaiseTypeErrorIfNotProvided):\n    if (args is None):\n        args = CircuitDiagramInfoArgs.UNINFORMED_DEFAULT\n    getter = getattr(val, '_circuit_diagram_info_', None)\n    result = (NotImplemented if (getter is None) else getter(args))\n    if isinstance(result, str):\n        return CircuitDiagramInfo(wire_symbols=(result,))\n    if isinstance(result, collections.Iterable):\n        return CircuitDiagramInfo(wire_symbols=tuple(result))\n    if (result is not NotImplemented):\n        return result\n    if (default is not RaiseTypeErrorIfNotProvided):\n        return default\n    if (getter is None):\n        raise TypeError(\"object of type '{}' has no _circuit_diagram_info_ method.\".format(type(val)))\n    raise TypeError(\"object of type '{}' does have a _circuit_diagram_info_ method, but it returned NotImplemented.\".format(type(val)))", "docstring": "Requests information on drawing an operation in a circuit diagram.\n\nCalls _circuit_diagram_info_ on `val`. If `val` doesn't have\n_circuit_diagram_info_, or it returns NotImplemented, that indicates that\ndiagram information is not available.\n\nArgs:\nval: The operation or gate that will need to be drawn.\nargs: A CircuitDiagramInfoArgs describing the desired drawing style.\ndefault: A default result to return if the value doesn't have circuit\ndiagram information. If not specified, a TypeError is raised\ninstead.\n\nReturns:\nIf `val` has no _circuit_diagram_info_ method or it returns\nNotImplemented, then `default` is returned (or a TypeError is\nraised if no `default` is specified).\n\nOtherwise, the value returned by _circuit_diagram_info_ is returned.\n\nRaises:\nTypeError:\n`val` doesn't have circuit diagram information and `default` was\nnot specified.", "source": "codesearchnet"}
{"code": "def GetHostname(self, session_identifier=CURRENT_SESSION):\n    \n    hostname_artifact = self._hostnames.get(session_identifier, None)\n    if not hostname_artifact:\n      return ''\n\n    return hostname_artifact.name or ''", "docstring": "Retrieves the hostname related to the event.\n\nIf the hostname is not stored in the event it is determined based\non the preprocessing information that is stored inside the storage file.\n\nArgs:\nsession_identifier (Optional[str])): session identifier, where\nCURRENT_SESSION represents the active session.\n\nReturns:\nstr: hostname.", "source": "juraj-google-style"}
{"code": "def diff_parameters(old_params, new_params):\n    \n    [changes, diff] = diff_dictionaries(old_params, new_params)\n    if changes == 0:\n        return []\n    return diff", "docstring": "Compares the old vs. new parameters and returns a \"diff\"\n\nIf there are no changes, we return an empty list.\n\nArgs:\nold_params(dict): old paramters\nnew_params(dict): new parameters\n\nReturns:\nlist: A list of differences", "source": "juraj-google-style"}
{"code": "def _get_events_data(object_key: str) -> List[dict]:\n    \n    events_data = []\n    key = _keys.events_data(object_key)\n    for event_id in _get_events_list(object_key):\n        event_dict = literal_eval(DB.get_hash_value(key, event_id))\n        events_data.append(event_dict)\n    return events_data", "docstring": "Get the list of event data for the object with the specified key.\n\nArgs:\nobject_key (str): Key of an object in the database.", "source": "juraj-google-style"}
{"code": "def _restore_output_tensor_names(graph_def: graph_pb2.GraphDef) -> graph_pb2.GraphDef:\n    output_renaming_map = {}\n    with session.Session(graph=ops.Graph()):\n        importer.import_graph_def(graph_def, name='')\n        graph = ops.get_default_graph()\n        for op in graph.get_operations():\n            if op.type == '_Retval':\n                expected_node_name = op.name\n                if op.get_attr('tf_saved_model.index_path') is not None:\n                    index_path_name = op.get_attr('tf_saved_model.index_path')[0]\n                    index_path_name = index_path_name.decode('utf-8').split(':')[0]\n                    try:\n                        index_path_node = graph.get_operation_by_name(index_path_name)\n                        if index_path_node.type == '_Retval':\n                            expected_node_name = index_path_name\n                    except KeyError:\n                        pass\n                retval_input_node_name = op.inputs[0].op.name\n                output_renaming_map[retval_input_node_name] = expected_node_name\n    for node in reversed(graph_def.node):\n        if node.name in output_renaming_map:\n            node.name = output_renaming_map[node.name]\n        elif node.op == '_Retval':\n            graph_def.node.remove(node)\n        else:\n            for idx, input_name in enumerate(node.input):\n                if input_name in output_renaming_map:\n                    node.input[idx] = output_renaming_map[input_name]\n            updating_inputs = []\n            for input_name in reversed(node.input):\n                if input_name.startswith('^') and input_name[1:] in output_renaming_map:\n                    updating_inputs.append(input_name[1:])\n                    node.input.remove(input_name)\n            for updating_input in updating_inputs:\n                node.input.append('^' + output_renaming_map[updating_input])\n    return graph_def", "docstring": "Restores the output tensor names of the converted model.\n\nDuring the conversion, the output tensor names of the original model are\nembedded in the `tf_saved_model.index_path` attribute of the RetVal nodes and\nmight become the name of Retval nodes as well (with an index suffix if there\nare multiple output tensors from one node). Since Retval nodes are not used in\nSavedModel, this function removes them and restore the names to the actual\noutput tensors.\n\nArgs:\ngraph_def: the converted GraphDef.\n\nReturns:\nThe GraphDef with Retval nodes removed and output tensor names restored.", "source": "github-repos"}
{"code": "def _ParseHeader(self, parser_mediator, file_object):\n    header_map = self._GetDataTypeMap('cups_ipp_header')\n    try:\n        (header, _) = self._ReadStructureFromFileObject(file_object, 0, header_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.UnableToParseFile('[{0:s}] Unable to parse header with error: {1!s}'.format(self.NAME, exception))\n    format_version = '{0:d}.{1:d}'.format(header.major_version, header.minor_version)\n    if (format_version not in self._SUPPORTED_FORMAT_VERSIONS):\n        raise errors.UnableToParseFile('[{0:s}] Unsupported format version {1:s}.'.format(self.NAME, format_version))\n    if (header.operation_identifier != 5):\n        display_name = parser_mediator.GetDisplayName()\n        logger.debug('[{0:s}] Non-standard operation identifier: 0x{1:08x} in file header of: {2:s}.'.format(self.NAME, header.operation_identifier, display_name))", "docstring": "Parses a CUPS IPP header from a file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nUnableToParseFile: when the header cannot be parsed.", "source": "codesearchnet"}
{"code": "def parameterized_send(self, request, parameter_list):\n        \n        response_queues = OrderedDict()\n        for parameter in parameter_list:\n            response_queues[parameter] = self.send(request % parameter)\n        return response_queues", "docstring": "Send batched requests for a list of parameters\n\nArgs:\nrequest (str): Request to send, like \"%s.*?\\n\"\nparameter_list (list): parameters to format with, like\n[\"TTLIN\", \"TTLOUT\"]\n\nReturns:\ndict: {parameter: response_queue}", "source": "juraj-google-style"}
{"code": "def write_structure(times=None):\n    \n    if times is None:\n        return report_loc.write_structure(f.root.times)\n    else:\n        if not isinstance(times, Times):\n            raise TypeError(\"Expected Times instance for param 'times' (default is root).\")\n        return report_loc.write_structure(times)", "docstring": "Produce a formatted record of a times data structure.\n\nArgs:\ntimes (Times, optional): If not provided, uses the current root timer.\n\nReturns:\nstr: Timer tree hierarchy in a formatted string.\n\nRaises:\nTypeError: If provided argument is not a Times object.", "source": "juraj-google-style"}
{"code": "def __init__(self, encoding=None, suppress_output=False):\n    if not encoding:\n        encoding = self._GetConsoleEncoding()\n    elif encoding == 'win':\n        encoding = 'cp437'\n    self._encoding = encoding or 'ascii'\n    self._term = '' if suppress_output else os.getenv('TERM', '').lower()\n    if self.SupportsAnsi():\n        self._csi = '\\x1b['\n        self._font_bold = '1'\n        self._font_italic = '4'\n    else:\n        self._csi = None\n        self._font_bold = ''\n        self._font_italic = ''\n    is_screen_reader = False\n    if self._encoding == 'utf8' and (not is_screen_reader):\n        self._box_line_characters = BoxLineCharactersUnicode()\n        self._bullets = self._BULLETS_UNICODE\n        self._progress_tracker_symbols = ProgressTrackerSymbolsUnicode()\n    elif self._encoding == 'cp437' and (not is_screen_reader):\n        self._box_line_characters = BoxLineCharactersUnicode()\n        self._bullets = self._BULLETS_WINDOWS\n        self._progress_tracker_symbols = ProgressTrackerSymbolsAscii()\n    else:\n        self._box_line_characters = BoxLineCharactersAscii()\n        if is_screen_reader:\n            self._box_line_characters = BoxLineCharactersScreenReader()\n        self._bullets = self._BULLETS_ASCII\n        self._progress_tracker_symbols = ProgressTrackerSymbolsAscii()\n    self._get_raw_key = [console_attr_os.GetRawKeyFunction()]\n    self._term_size = (0, 0) if suppress_output else console_attr_os.GetTermSize()\n    self._display_width_cache = {}", "docstring": "Constructor.\n\nArgs:\nencoding: Encoding override.\nascii -- ASCII art. This is the default.\nutf8 -- UTF-8 unicode.\nwin -- Windows code page 437.\nsuppress_output: True to create a ConsoleAttr that doesn't want to output\nanything.", "source": "github-repos"}
{"code": "def build_byte_align_buff(bits):\n    bitmod = (len(bits) % 8)\n    if (bitmod == 0):\n        rdiff = bitarray()\n    else:\n        rdiff = bitarray((8 - bitmod))\n        rdiff.setall(False)\n    return (rdiff + bits)", "docstring": "Pad the left side of a bitarray with 0s to align its length with byte boundaries.\n\nArgs:\nbits: A bitarray to be padded and aligned.\n\nReturns:\nA newly aligned bitarray.", "source": "codesearchnet"}
{"code": "def _ValidateDataTypeDefinition(cls, data_type_definition):\n    \n    if not cls._IsIdentifier(data_type_definition.name):\n      raise ValueError(\n          'Data type definition name: {0!s} not a valid identifier'.format(\n              data_type_definition.name))\n\n    if keyword.iskeyword(data_type_definition.name):\n      raise ValueError(\n          'Data type definition name: {0!s} matches keyword'.format(\n              data_type_definition.name))\n\n    members = getattr(data_type_definition, 'members', None)\n    if not members:\n      raise ValueError(\n          'Data type definition name: {0!s} missing members'.format(\n              data_type_definition.name))\n\n    defined_attribute_names = set()\n\n    for member_definition in members:\n      attribute_name = member_definition.name\n\n      if not cls._IsIdentifier(attribute_name):\n        raise ValueError('Attribute name: {0!s} not a valid identifier'.format(\n            attribute_name))\n\n      if attribute_name.startswith('_'):\n        raise ValueError('Attribute name: {0!s} starts with underscore'.format(\n            attribute_name))\n\n      if keyword.iskeyword(attribute_name):\n        raise ValueError('Attribute name: {0!s} matches keyword'.format(\n            attribute_name))\n\n      if attribute_name in defined_attribute_names:\n        raise ValueError('Attribute name: {0!s} already defined'.format(\n            attribute_name))\n\n      defined_attribute_names.add(attribute_name)", "docstring": "Validates the data type definition.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\n\nRaises:\nValueError: if the data type definition is not considered valid.", "source": "juraj-google-style"}
{"code": "def convert(self):\n    if not _jit:\n        raise ImportError('Cannot import jit from jax.')\n    if not self._serving_funcs:\n        raise ValueError('No serving func is specified.')\n    if not self._inputs:\n        raise ValueError('Input tensors are not specified.')\n    if len(self._inputs) != len(self._serving_funcs):\n        msg = 'Input tensor mapping len {} does not match serving func len {}.'.format(len(self._inputs), len(self._serving_funcs))\n        raise ValueError(msg)\n    if not isinstance(self._inputs, (tuple, list)):\n        raise ValueError('Input tensors should be pass in a tuple list wrapped in an array.')\n    if len(self._serving_funcs) > 1:\n        raise ValueError('Currently only support single serving function.')\n    if not isinstance(self._inputs[0], (tuple, list)):\n        raise ValueError('The input placeholders are not a dictionary.')\n    input_names = []\n    ordered_inputs = []\n    for input_name, tensor in self._inputs[0]:\n        input_names.append(input_name)\n        ordered_inputs.append(tensor)\n    try:\n        hlo_proto = _jit(self._serving_funcs[0]).trace(*ordered_inputs).lower(lowering_platforms=('cpu',)).compiler_ir('hlo').as_serialized_hlo_module_proto()\n    except Exception:\n        raise ValueError('Failed to convert the given Jax function to hlo.')\n    converter_kwargs = {'input_content': hlo_proto, 'input_names': input_names, 'is_proto_format': True}\n    converter_kwargs.update(self._get_base_converter_args())\n    quant_mode = QuantizationMode(self.optimizations, self.target_spec, self.representative_dataset, None, experimental_qdq_annotation=self._experimental_strict_qdq)\n    self._validate_inference_input_output_types(quant_mode)\n    converter_kwargs.update(quant_mode.converter_flags())\n    result = _convert_jax_hlo(**converter_kwargs)\n    return self._optimize_tflite_model(result, quant_mode, _build_conversion_flags(**converter_kwargs).debug_options, quant_io=self.experimental_new_quantizer)", "docstring": "Converts a Jax serving func based on instance variables.\n\nReturns:\nThe converted data in serialized format.\n\nRaises:\nImportError:\nIf cannot import the jit from jax.\nValueError:\nNo serving function is specified.\nInput tensors are not specified.\nThe truth value of an array with more than one element is ambiguous.\nFailed to convert the given Jax function to hlo.", "source": "github-repos"}
{"code": "class CustomObjectScope(object):\n\n    def __init__(self, *args):\n        self.custom_objects = args\n        self.backup = None\n\n    def __enter__(self):\n        self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()\n        for objects in self.custom_objects:\n            _GLOBAL_CUSTOM_OBJECTS.update(objects)\n        return self\n\n    def __exit__(self, *args, **kwargs):\n        _GLOBAL_CUSTOM_OBJECTS.clear()\n        _GLOBAL_CUSTOM_OBJECTS.update(self.backup)", "docstring": "Exposes custom classes/functions to Keras deserialization internals.\n\nUnder a scope `with custom_object_scope(objects_dict)`, Keras methods such\nas `tf.keras.models.load_model` or `tf.keras.models.model_from_config`\nwill be able to deserialize any custom object referenced by a\nsaved config (e.g. a custom layer or metric).\n\nExample:\n\nConsider a custom regularizer `my_regularizer`:\n\n```python\nlayer = Dense(3, kernel_regularizer=my_regularizer)\nconfig = layer.get_config()  # Config contains a reference to `my_regularizer`\n...\n# Later:\nwith custom_object_scope({'my_regularizer': my_regularizer}):\nlayer = Dense.from_config(config)\n```\n\nArgs:\n*args: Dictionary or dictionaries of `{name: object}` pairs.", "source": "github-repos"}
{"code": "def _adjust_block(p, ip, filters, block_id=None):\n    channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1\n    img_dim = 2 if backend.image_data_format() == 'channels_first' else -2\n    with backend.name_scope('adjust_block'):\n        if p is None:\n            p = ip\n        elif p.shape[img_dim] != ip.shape[img_dim]:\n            with backend.name_scope(f'adjust_reduction_block_{block_id}'):\n                p = layers.Activation('relu', name=f'adjust_relu_1_{block_id}')(p)\n                p1 = layers.AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name=f'adjust_avg_pool_1_{block_id}')(p)\n                p1 = layers.Conv2D(filters \n                p2 = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(p)\n                p2 = layers.Cropping2D(cropping=((1, 0), (1, 0)))(p2)\n                p2 = layers.AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name=f'adjust_avg_pool_2_{block_id}')(p2)\n                p2 = layers.Conv2D(filters \n                p = layers.concatenate([p1, p2], axis=channel_dim)\n                p = layers.BatchNormalization(axis=channel_dim, momentum=0.9997, epsilon=0.001, name=f'adjust_bn_{block_id}')(p)\n        elif p.shape[channel_dim] != filters:\n            with backend.name_scope(f'adjust_projection_block_{block_id}'):\n                p = layers.Activation('relu')(p)\n                p = layers.Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name=f'adjust_conv_projection_{block_id}', use_bias=False, kernel_initializer='he_normal')(p)\n                p = layers.BatchNormalization(axis=channel_dim, momentum=0.9997, epsilon=0.001, name=f'adjust_bn_{block_id}')(p)\n    return p", "docstring": "Adjusts the input `previous path` to match the shape of the `input`.\n\nUsed in situations where the output number of filters needs to be changed.\n\nArgs:\np: Input tensor which needs to be modified\nip: Input tensor whose shape needs to be matched\nfilters: Number of output filters to be matched\nblock_id: String block_id\n\nReturns:\nAdjusted Keras tensor", "source": "github-repos"}
{"code": "def to_image(dataset):\n    \n    dataset = dataset.squeeze()\n    if dataset.ndim < 2:\n        raise ValueError(\"Need at least a 2D array to make an image.\")\n    else:\n        return XRImage(dataset)", "docstring": "convert ``dataset`` into a :class:`~trollimage.xrimage.XRImage` instance.\n\nConvert the ``dataset`` into an instance of the\n:class:`~trollimage.xrimage.XRImage` class.  This function makes no other\nchanges.  To get an enhanced image, possibly with overlays and decoration,\nsee :func:`~get_enhanced_image`.\n\nArgs:\ndataset (xarray.DataArray): Data to be converted to an image.\n\nReturns:\nInstance of :class:`~trollimage.xrimage.XRImage`.", "source": "juraj-google-style"}
{"code": "def _validate_all_blocks_supported(ir_blocks, query_metadata_table):\n    if (len(ir_blocks) < 3):\n        raise AssertionError(u'Unexpectedly attempting to validate IR blocks with fewer than 3 blocks. A minimal query is expected to have at least a QueryRoot, GlobalOperationsStart, and ConstructResult block. The query metadata table is {}.'.format(query_metadata_table))\n    construct_result = _get_construct_result(ir_blocks)\n    unsupported_blocks = []\n    unsupported_fields = []\n    for block in ir_blocks[:(- 1)]:\n        if isinstance(block, constants.SUPPORTED_BLOCK_TYPES):\n            continue\n        if isinstance(block, constants.SKIPPABLE_BLOCK_TYPES):\n            continue\n        unsupported_blocks.append(block)\n    for (field_name, field) in six.iteritems(construct_result.fields):\n        if (not isinstance(field, constants.SUPPORTED_OUTPUT_EXPRESSION_TYPES)):\n            unsupported_fields.append((field_name, field))\n        elif (field.location.field in constants.UNSUPPORTED_META_FIELDS):\n            unsupported_fields.append((field_name, field))\n    if ((len(unsupported_blocks) > 0) or (len(unsupported_fields) > 0)):\n        raise NotImplementedError(u'Encountered unsupported blocks {} and unsupported fields {} during construction of SQL query tree for IR blocks {} with query metadata table {}.'.format(unsupported_blocks, unsupported_fields, ir_blocks, query_metadata_table))", "docstring": "Validate that all IR blocks and ConstructResult fields passed to the backend are supported.\n\nArgs:\nir_blocks: List[BasicBlock], IR blocks to validate.\nquery_metadata_table: QueryMetadataTable, object containing all metadata collected during\nquery processing, including location metadata (e.g. which locations\nare folded or optional).\n\nRaises:\nNotImplementedError, if any block or ConstructResult field is unsupported.", "source": "codesearchnet"}
{"code": "def remove_room_alias(self, room_alias):\n    try:\n        self.api.remove_room_alias(room_alias)\n        return True\n    except MatrixRequestError:\n        return False", "docstring": "Remove mapping of an alias\n\nArgs:\nroom_alias(str): The alias to be removed.\n\nReturns:\nbool: True if the alias is removed, False otherwise.", "source": "codesearchnet"}
{"code": "def _log_score(score):\n    logger.info('Score of ({}/{}) set for submission {}'.format(score.points_earned, score.points_possible, score.submission.uuid))", "docstring": "Log the creation of a score.\n\nArgs:\nscore (Score): The score model.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def sg_float(tensor, opt):\n    r\n    return tf.cast(tensor, tf.sg_floatx, name=opt.name)", "docstring": "r\"\"\"Casts a tensor to floatx.\n\nSee `tf.cast()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` or `SparseTensor` (automatically given by chain).\nopt:\nname : If provided, it replaces current tensor's name\n\nReturns:\nA `Tensor` or `SparseTensor` with same shape as `tensor`.", "source": "juraj-google-style"}
{"code": "def usufyToXlsExport(d, fPath):\n    \n    from pyexcel_xls import get_data\n    try:\n        \n        \n        oldData = {\"OSRFramework\": get_data(fPath) }\n    except:\n        \n        oldData = {\"OSRFramework\":[]}\n\n    \n    tabularData = _generateTabularData(d, oldData)\n    from pyexcel_xls import save_data\n    \n    save_data(fPath, tabularData)", "docstring": "Workaround to export to a .xls file.\n\nArgs:\n-----\nd: Data to export.\nfPath: File path for the output file.", "source": "juraj-google-style"}
{"code": "def build_chain(self, source, chain):\n        \n\n        for group in WalkByGroup(source, chain.order+1):\n            pre = group[:-1]\n            res = group[-1]\n\n            if pre not in chain.content:\n                chain.content[pre] = {res: 1}\n            else:\n                if res not in chain.content[pre]:\n                    chain.content[pre][res] = 1\n                else:\n                    chain.content[pre][res] += 1\n\n        chain.decache()", "docstring": "Build markov chain from source on top of existin chain\n\nArgs:\nsource: iterable which will be used to build chain\nchain: MarkovChain in currently loaded shelve file that\nwill be extended by source", "source": "juraj-google-style"}
{"code": "def list(self, accountID, **kwargs):\n    request = Request('GET', '/v3/accounts/{accountID}/orders')\n    request.set_path_param('accountID', accountID)\n    request.set_param('ids', kwargs.get('ids'))\n    request.set_param('state', kwargs.get('state'))\n    request.set_param('instrument', kwargs.get('instrument'))\n    request.set_param('count', kwargs.get('count'))\n    request.set_param('beforeID', kwargs.get('beforeID'))\n    response = self.ctx.request(request)\n    if (response.content_type is None):\n        return response\n    if (not response.content_type.startswith('application/json')):\n        return response\n    jbody = json.loads(response.raw_body)\n    parsed_body = {}\n    if (str(response.status) == '200'):\n        if (jbody.get('orders') is not None):\n            parsed_body['orders'] = [self.ctx.order.Order.from_dict(d, self.ctx) for d in jbody.get('orders')]\n        if (jbody.get('lastTransactionID') is not None):\n            parsed_body['lastTransactionID'] = jbody.get('lastTransactionID')\n    elif (str(response.status) == '400'):\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    elif (str(response.status) == '404'):\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    elif (str(response.status) == '405'):\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    else:\n        parsed_body = jbody\n    response.body = parsed_body\n    return response", "docstring": "Get a list of Orders for an Account\n\nArgs:\naccountID:\nAccount Identifier\nids:\nList of Order IDs to retrieve\nstate:\nThe state to filter the requested Orders by\ninstrument:\nThe instrument to filter the requested orders by\ncount:\nThe maximum number of Orders to return\nbeforeID:\nThe maximum Order ID to return. If not provided the most recent\nOrders in the Account are returned\n\nReturns:\nv20.response.Response containing the results from submitting the\nrequest", "source": "codesearchnet"}
{"code": "def create(self, name, domain_name):\n        \n        name = self.wrap(self.resource.create(dict(name=name,\n                                                   domain_name=domain_name)))\n\n        self.add(name)\n        return name", "docstring": "Register a url (e.g. wallet.gem.co) for\n\nArgs:\nname (str): human-readable wallet name (e.g. wallet)\ndomain_name (str): the domain name to create subdomain on (e.g. gem.co)\nthis domain must already be registered with Gem\n\nReturns: The new round.NetkiName", "source": "juraj-google-style"}
{"code": "def console_print_rect_ex(con: tcod.console.Console, x: int, y: int, w: int, h: int, flag: int, alignment: int, fmt: str) -> int:\n    return int(lib.TCOD_console_printf_rect_ex(_console(con), x, y, w, h, flag, alignment, _fmt(fmt)))", "docstring": "Print a string constrained to a rectangle with blend and alignment.\n\nReturns:\nint: The number of lines of text once word-wrapped.\n\n.. deprecated:: 8.5\nUse :any:`Console.print_rect` instead.", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    encoding = (self._ENCODING or parser_mediator.codepage)\n    text_file_object = text_file.TextFile(file_object, encoding=encoding)\n    if (not self._ParseAndValidateRecord(parser_mediator, text_file_object)):\n        raise errors.UnableToParseFile('Unable to parse as Opera global_history.dat.')\n    while self._ParseRecord(parser_mediator, text_file_object):\n        pass", "docstring": "Parses an Opera global history file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def convert_strain_to_deformation(strain, shape=\"upper\"):\n    \n    strain = SquareTensor(strain)\n    ftdotf = 2*strain + np.eye(3)\n    if shape == \"upper\":\n        result = scipy.linalg.cholesky(ftdotf)\n    elif shape == \"symmetric\":\n        result = scipy.linalg.sqrtm(ftdotf)\n    else:\n        raise ValueError(\"shape must be \\\"upper\\\" or \\\"symmetric\\\"\")\n    return Deformation(result)", "docstring": "This function converts a strain to a deformation gradient that will\nproduce that strain.  Supports three methods:\n\nArgs:\nstrain (3x3 array-like): strain matrix\nshape: (string): method for determining deformation, supports\n\"upper\" produces an upper triangular defo\n\"lower\" produces a lower triangular defo\n\"symmetric\" produces a symmetric defo", "source": "juraj-google-style"}
{"code": "def GetFileSystemReferenceCount(self, path_spec):\n    identifier = self._GetFileSystemCacheIdentifier(path_spec)\n    cache_value = self._file_system_cache.GetCacheValue(identifier)\n    if (not cache_value):\n        return None\n    return cache_value.reference_count", "docstring": "Retrieves the reference count of a cached file system object.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nint: reference count or None if there is no file system object for\nthe corresponding path specification cached.", "source": "codesearchnet"}
{"code": "def _randomUniformAvoidAnchors(self, low, high, anchors, radius, num_samples):\n    self.assertTrue(low < high)\n    self.assertTrue(radius >= 0)\n    num_anchors = len(anchors)\n    self.assertTrue(2 * radius * num_anchors < 0.5 * (high - low))\n    anchors = np.reshape(anchors, num_anchors)\n    samples = []\n    while len(samples) < num_samples:\n        sample = np.random.uniform(low, high)\n        if np.all(np.fabs(sample - anchors) > radius):\n            samples.append(sample)\n    return samples", "docstring": "Generate samples that are far enough from a set of anchor points.\n\nWe generate uniform samples in [low, high], then reject those that are less\nthan radius away from any point in anchors. We stop after we have accepted\nnum_samples samples.\n\nArgs:\nlow: The lower end of the interval.\nhigh: The upper end of the interval.\nanchors: A list of length num_crops with anchor points to avoid.\nradius: Distance threshold for the samples from the anchors.\nnum_samples: How many samples to produce.\n\nReturns:\nsamples: A list of length num_samples with the accepted samples.", "source": "github-repos"}
{"code": "def nic_v1(msg, NICs):\n    \n    if typecode(msg) < 5 or typecode(msg) > 22:\n        raise RuntimeError(\n            \"%s: Not a surface position message (5<TC<8), \\\n            airborne position message (8<TC<19), \\\n            or airborne position with GNSS height (20<TC<22)\" % msg\n        )\n\n    tc = typecode(msg)\n    NIC = uncertainty.TC_NICv1_lookup[tc]\n\n    if isinstance(NIC, dict):\n        NIC = NIC[NICs]\n\n    try:\n        Rc = uncertainty.NICv1[NIC][NICs]['Rc']\n        VPL = uncertainty.NICv1[NIC][NICs]['VPL']\n    except KeyError:\n        Rc, VPL = uncertainty.NA, uncertainty.NA\n\n    return Rc, VPL", "docstring": "Calculate NIC, navigation integrity category, for ADS-B version 1\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string\nNICs (int or string): NIC supplement\n\nReturns:\nint or string: Horizontal Radius of Containment\nint or string: Vertical Protection Limit", "source": "juraj-google-style"}
{"code": "def run_server(cls, args=None, **kwargs):\n    if (args is None):\n        args = sys.argv[1:]\n    args = ([cls.__name__] + list(args))\n    green_mode = getattr(cls, 'green_mode', None)\n    kwargs.setdefault('green_mode', green_mode)\n    return run((cls,), args, **kwargs)", "docstring": "Run the class as a device server.\nIt is based on the tango.server.run method.\n\nThe difference is that the device class\nand server name are automatically given.\n\nArgs:\nargs (iterable): args as given in the tango.server.run method\nwithout the server name. If None, the sys.argv\nlist is used\nkwargs: the other keywords argument are as given\nin the tango.server.run method.", "source": "codesearchnet"}
{"code": "def is_legal_subject(self, c: OntologyClass) -> bool:\n        \n        domains = self.included_domains()\n        return c and (not domains or c in domains or c.super_classes_closure() & domains)", "docstring": "is_legal_subject(c) = true if\n- c in included_domains(self) or\n- super_classes_closure(c) intersection included_domains(self) is not empty\n\nThere is no need to check the included_domains(super_properties_closure(self)) because\nincluded_domains(super_properties_closure(self)) is subset of super_classes_closure(included_domains(self))\n\nArgs:\nc:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def interm_fluent_ordering(self) -> List[str]:\n    interm_fluents = self.intermediate_fluents.values()\n    key = (lambda pvar: (pvar.level, pvar.name))\n    return [str(pvar) for pvar in sorted(interm_fluents, key=key)]", "docstring": "The list of intermediate-fluent names in canonical order.\n\nReturns:\nList[str]: A list of fluent names.", "source": "codesearchnet"}
{"code": "async def download_file(context, url, abs_filename, session=None, chunk_size=128):\n    \n    session = session or context.session\n    loggable_url = get_loggable_url(url)\n    log.info(\"Downloading %s\", loggable_url)\n    parent_dir = os.path.dirname(abs_filename)\n    async with session.get(url) as resp:\n        if resp.status == 404:\n            await _log_download_error(resp, \"404 downloading %(url)s: %(status)s; body=%(body)s\")\n            raise Download404(\"{} status {}!\".format(loggable_url, resp.status))\n        elif resp.status != 200:\n            await _log_download_error(resp, \"Failed to download %(url)s: %(status)s; body=%(body)s\")\n            raise DownloadError(\"{} status {} is not 200!\".format(loggable_url, resp.status))\n        makedirs(parent_dir)\n        with open(abs_filename, \"wb\") as fd:\n            while True:\n                chunk = await resp.content.read(chunk_size)\n                if not chunk:\n                    break\n                fd.write(chunk)\n    log.info(\"Done\")", "docstring": "Download a file, async.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\nurl (str): the url to download\nabs_filename (str): the path to download to\nsession (aiohttp.ClientSession, optional): the session to use.  If\nNone, use context.session.  Defaults to None.\nchunk_size (int, optional): the chunk size to read from the response\nat a time.  Default is 128.", "source": "juraj-google-style"}
{"code": "def get_band_structure_from_vasp_multiple_branches(dir_name, efermi=None, projections=False):\n    if os.path.exists(os.path.join(dir_name, 'branch_0')):\n        branch_dir_names = [os.path.abspath(d) for d in glob.glob('{i}/branch_*'.format(i=dir_name)) if os.path.isdir(d)]\n        sort_by = (lambda x: int(x.split('_')[(- 1)]))\n        sorted_branch_dir_names = sorted(branch_dir_names, key=sort_by)\n        branches = []\n        for dir_name in sorted_branch_dir_names:\n            xml_file = os.path.join(dir_name, 'vasprun.xml')\n            if os.path.exists(xml_file):\n                run = Vasprun(xml_file, parse_projected_eigen=projections)\n                branches.append(run.get_band_structure(efermi=efermi))\n            else:\n                warnings.warn('Skipping {}. Unable to find {}'.format(d=dir_name, f=xml_file))\n        return get_reconstructed_band_structure(branches, efermi)\n    else:\n        xml_file = os.path.join(dir_name, 'vasprun.xml')\n        if os.path.exists(xml_file):\n            return Vasprun(xml_file, parse_projected_eigen=projections).get_band_structure(kpoints_filename=None, efermi=efermi)\n        else:\n            return None", "docstring": "This method is used to get band structure info from a VASP directory. It\ntakes into account that the run can be divided in several branches named\n\"branch_x\". If the run has not been divided in branches the method will\nturn to parsing vasprun.xml directly.\n\nThe method returns None is there\"s a parsing error\n\nArgs:\ndir_name: Directory containing all bandstructure runs.\nefermi: Efermi for bandstructure.\nprojections: True if you want to get the data on site projections if\nany. Note that this is sometimes very large\n\nReturns:\nA BandStructure Object", "source": "codesearchnet"}
{"code": "def get_extended_attention_mask(self, attention_mask: torch.Tensor, input_shape: Tuple[int], device: torch.device, has_query: bool=False) -> torch.Tensor:\n    if attention_mask.dim() == 3:\n        extended_attention_mask = attention_mask[:, None, :, :]\n    elif attention_mask.dim() == 2:\n        extended_attention_mask = attention_mask[:, None, None, :]\n    else:\n        raise ValueError('Wrong shape for input_ids (shape {}) or attention_mask (shape {})'.format(input_shape, attention_mask.shape))\n    extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)\n    extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n    return extended_attention_mask", "docstring": "Makes broadcastable attention and causal masks so that future and masked tokens are ignored.\n\nArguments:\nattention_mask (`torch.Tensor`):\nMask with ones indicating tokens to attend to, zeros for tokens to ignore.\ninput_shape (`Tuple[int]`):\nThe shape of the input to the model.\ndevice (`torch.device`):\nThe device of the input to the model.\n\nReturns:\n`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.", "source": "github-repos"}
{"code": "def end_at(self, document_fields):\n    query = query_mod.Query(self)\n    return query.end_at(document_fields)", "docstring": "End query at a cursor with this collection as parent.\n\nSee\n:meth:`~.firestore_v1beta1.query.Query.end_at` for\nmore information on this method.\n\nArgs:\ndocument_fields (Union[~.firestore_v1beta1.\\\ndocument.DocumentSnapshot, dict, list, tuple]): a document\nsnapshot or a dictionary/list/tuple of fields representing a\nquery results cursor. A cursor is a collection of values that\nrepresent a position in a query result set.\n\nReturns:\n~.firestore_v1beta1.query.Query: A query with cursor.", "source": "codesearchnet"}
{"code": "def get_base_branch():\n    base_branch = git.guess_base_branch()\n    if (base_branch is None):\n        log.info(\"Can't guess the base branch, you have to pick one yourself:\")\n        base_branch = choose_branch()\n    return base_branch", "docstring": "Return the base branch for the current branch.\n\nThis function will first try to guess the base branch and if it can't it\nwill let the user choose the branch from the list of all local branches.\n\nReturns:\nstr: The name of the branch the current branch is based on.", "source": "codesearchnet"}
{"code": "def __init__(self, samplerate, nframes, wavpath):\n        \n        imp = minidom.getDOMImplementation()\n        dt = imp.createDocumentType('sonic-visualiser', None, None)\n        self.doc = doc = imp.createDocument(None,'sv', dt)\n        root = doc.documentElement\n        self.__dname = dict()\n\n        self.data = root.appendChild(doc.createElement('data'))\n        self.display = root.appendChild(doc.createElement('display'))\n        window = self.display.appendChild(doc.createElement('window'))\n        self.defwidth = 900\n        window.setAttribute('width', str(self.defwidth))\n        window.setAttribute('height', str(856))\n        self.selections = root.appendChild(doc.createElement('selections'))\n\n\n        self.nbdata = 0\n\n        \n        self.samplerate =  samplerate\n        self.nframes = nframes\n\n        self.__setMainWaveModel(wavpath)", "docstring": "Init a sonic visualiser environment structure based on\nthe attributes of the main audio file\n\nArgs:\nsamplerate(int): media sample rate (Hz)\nnframes(int): number of samples\nwavpath(str): Full path to the wav file used in the current environment", "source": "juraj-google-style"}
{"code": "def _page_to_text(page):\n  \n  \n  start_pos = page.find(u\"<text\")\n  assert start_pos != -1\n  end_tag_pos = page.find(u\">\", start_pos)\n  assert end_tag_pos != -1\n  end_tag_pos += len(u\">\")\n  end_pos = page.find(u\"</text>\")\n  if end_pos == -1:\n    return u\"\"\n  return page[end_tag_pos:end_pos]", "docstring": "Extract the text from a page.\n\nArgs:\npage: a unicode string\nReturns:\na unicode string", "source": "juraj-google-style"}
{"code": "def _CreateProcessingConfiguration(self, knowledge_base):\n    configuration = configurations.ProcessingConfiguration()\n    configuration.artifact_filters = self._artifact_filters\n    configuration.credentials = self._credential_configurations\n    configuration.debug_output = self._debug_mode\n    configuration.event_extraction.text_prepend = self._text_prepend\n    configuration.extraction.hasher_file_size_limit = self._hasher_file_size_limit\n    configuration.extraction.hasher_names_string = self._hasher_names_string\n    configuration.extraction.process_archives = self._process_archives\n    configuration.extraction.process_compressed_streams = self._process_compressed_streams\n    configuration.extraction.yara_rules_string = self._yara_rules_string\n    configuration.filter_file = self._filter_file\n    configuration.input_source.mount_path = self._mount_path\n    configuration.log_filename = self._log_file\n    configuration.parser_filter_expression = self._parser_filter_expression\n    configuration.preferred_year = self._preferred_year\n    configuration.profiling.directory = self._profiling_directory\n    configuration.profiling.sample_rate = self._profiling_sample_rate\n    configuration.profiling.profilers = self._profilers\n    configuration.temporary_directory = self._temporary_directory\n    if (not configuration.parser_filter_expression):\n        operating_system = knowledge_base.GetValue('operating_system')\n        operating_system_product = knowledge_base.GetValue('operating_system_product')\n        operating_system_version = knowledge_base.GetValue('operating_system_version')\n        preset_definitions = parsers_manager.ParsersManager.GetPresetsForOperatingSystem(operating_system, operating_system_product, operating_system_version)\n        if preset_definitions:\n            preset_names = [preset_definition.name for preset_definition in preset_definitions]\n            filter_expression = ','.join(preset_names)\n            logger.info('Parser filter expression set to: {0:s}'.format(filter_expression))\n            configuration.parser_filter_expression = filter_expression\n    return configuration", "docstring": "Creates a processing configuration.\n\nArgs:\nknowledge_base (KnowledgeBase): contains information from the source\ndata needed for parsing.\n\nReturns:\nProcessingConfiguration: processing configuration.\n\nRaises:\nBadConfigOption: if more than 1 parser and parser plugins preset\nwas found for the detected operating system.", "source": "codesearchnet"}
{"code": "def plot_timestream(array, kidid, xtick='time', scantypes=None, ax=None, **kwargs):\n    if (ax is None):\n        ax = plt.gca()\n    index = np.where((array.kidid == kidid))[0]\n    if (len(index) == 0):\n        raise KeyError('Such a kidid does not exist.')\n    index = int(index)\n    if (scantypes is None):\n        if (xtick == 'time'):\n            ax.plot(array.time, array[(:, index)], label='ALL', **kwargs)\n        elif (xtick == 'index'):\n            ax.plot(np.ogrid[:len(array.time)], array[(:, index)], label='ALL', **kwargs)\n    else:\n        for scantype in scantypes:\n            if (xtick == 'time'):\n                ax.plot(array.time[(array.scantype == scantype)], array[(:, index)][(array.scantype == scantype)], label=scantype, **kwargs)\n            elif (xtick == 'index'):\n                ax.plot(np.ogrid[:len(array.time[(array.scantype == scantype)])], array[(:, index)][(array.scantype == scantype)], label=scantype, **kwargs)\n    ax.set_xlabel('{}'.format(xtick))\n    ax.set_ylabel(str(array.datatype.values))\n    ax.legend()\n    kidtpdict = {0: 'wideband', 1: 'filter', 2: 'blind'}\n    try:\n        kidtp = kidtpdict[int(array.kidtp[index])]\n    except KeyError:\n        kidtp = 'filter'\n    ax.set_title('ch \n    logger.info('timestream data (ch={}) has been plotted.'.format(kidid))", "docstring": "Plot timestream data.\n\nArgs:\narray (xarray.DataArray): Array which the timestream data are included.\nkidid (int): Kidid.\nxtick (str): Type of x axis.\n'time': Time.\n'index': Time index.\nscantypes (list): Scantypes. If None, all scantypes are used.\nax (matplotlib.axes): Axis you want to plot on.\nkwargs (optional): Plot options passed to ax.plot().", "source": "codesearchnet"}
{"code": "def sendline(self, text):\n        \n        logger.debug(\"Sending input '{0}' to '{1}'\".format(text, self.name))\n        try:\n            return self._spawn.sendline(text)\n        except pexpect.exceptions.EOF as e:\n            logger.debug(\"Raising termination exception.\")\n            raise TerminationException(instance=self, real_exception=e, output=self.get_output())\n        except pexpect.exceptions.TIMEOUT as e:\n            logger.debug(\"Raising timeout exception.\")\n            raise TimeoutException(instance=self, real_exception=e, output=self.get_output())\n        except Exception as e:\n            logger.debug(\"Sending input failed: \" + str(e))\n            raise NestedException(instance=self, real_exception=e, output=self.get_output())", "docstring": "Sends an input line to the running program, including os.linesep.\n\nArgs:\ntext (str): The input text to be send.\n\nRaises:\nTerminationException: The program terminated before / while / after sending the input.\nNestedException: An internal problem occured while waiting for the output.", "source": "juraj-google-style"}
{"code": "def _simplify_non_context_field_binary_composition(expression):\n    if any((isinstance(expression.left, ContextField), isinstance(expression.right, ContextField))):\n        raise AssertionError(u'Received a BinaryComposition {} with a ContextField operand. This should never happen.'.format(expression))\n    if (expression.operator == u'||'):\n        if ((expression.left == TrueLiteral) or (expression.right == TrueLiteral)):\n            return TrueLiteral\n        else:\n            return expression\n    elif (expression.operator == u'&&'):\n        if (expression.left == TrueLiteral):\n            return expression.right\n        if (expression.right == TrueLiteral):\n            return expression.left\n        else:\n            return expression\n    else:\n        return expression", "docstring": "Return a simplified BinaryComposition if either operand is a TrueLiteral.\n\nArgs:\nexpression: BinaryComposition without any ContextField operand(s)\n\nReturns:\nsimplified expression if the given expression is a disjunction/conjunction\nand one of it's operands is a TrueLiteral,\nand the original expression otherwise", "source": "codesearchnet"}
{"code": "def __init__(self, cell):\n    self._cell = cell", "docstring": "Creates a new CounterCell.\n\nArgs:\ncell: A c pointer of TFE_MonitoringCounterCell.", "source": "github-repos"}
{"code": "def generate_key_pair(secret=None):\n    \n    if secret:\n        keypair_raw = ed25519_generate_key_pair_from_secret(secret)\n        return CryptoKeypair(\n            *(k.decode() for k in keypair_raw))\n    else:\n        return generate_keypair()", "docstring": "Generates a cryptographic key pair.\nArgs:\nsecret (:class:`string`): A secret that serves as a seed\nReturns:\n:class:`~bigchaindb.common.crypto.CryptoKeypair`: A\n:obj:`collections.namedtuple` with named fields\n:attr:`~bigchaindb.common.crypto.CryptoKeypair.private_key` and\n:attr:`~bigchaindb.common.crypto.CryptoKeypair.public_key`.", "source": "juraj-google-style"}
{"code": "def from_json(cls, data):\n        \n        assert 'header' in data, 'Required keyword \"header\" is missing!'\n        assert 'values' in data, 'Required keyword \"values\" is missing!'\n        assert 'datetimes' in data, 'Required keyword \"datetimes\" is missing!'\n        coll = cls(Header.from_json(data['header']), data['values'], data['datetimes'])\n        if 'validated_a_period' in data:\n            coll._validated_a_period = data['validated_a_period']\n        return coll", "docstring": "Create a Data Collection from a dictionary.\n\nArgs:\n{\n\"header\": A Ladybug Header,\n\"values\": An array of values,\n\"datetimes\": An array of datetimes,\n\"validated_a_period\": Boolean for whether header analysis_period is valid\n}", "source": "juraj-google-style"}
{"code": "def set_time(self, value: float):\n        \n        if value < 0:\n            value = 0\n\n        self.controller.row = self.rps * value", "docstring": "Set the current time jumping in the timeline.\n\nArgs:\nvalue (float): The new time", "source": "juraj-google-style"}
{"code": "def all_to_all(x, concat_dimension, split_dimension, split_count, group_assignment=None, name=None):\n    if group_assignment is None:\n        group_assignment = _create_default_group_assignment()\n    return gen_tpu_ops.all_to_all(x, group_assignment, concat_dimension=concat_dimension, split_dimension=split_dimension, split_count=split_count, name=name)", "docstring": "Exchange data across TPU replicas.\n\nArgs:\nx: The local tensor.\nconcat_dimension: The dimension number to concatenate.\nsplit_dimension: The dimension number to split.\nsplit_count: The number of splits, this number must equal to the sub-group\nsize(group_assignment.get_shape()[1])\ngroup_assignment: Optional 2d int32 lists with shape [num_groups,\nnum_replicas_per_group]. `group_assignment[i]` represents the replica ids\nin the ith subgroup.\nname: Optional op name.\n\nReturns:\nA `Tensor` which is concatenated by data from different replicas.", "source": "github-repos"}
{"code": "def query_source_file_line(self, file_path, lineno):\n    if not self._source_files:\n        raise ValueError('This debug server has not received any source file contents yet.')\n    for source_files in self._source_files:\n        for source_file_proto in source_files.source_files:\n            if source_file_proto.file_path == file_path:\n                return source_file_proto.lines[lineno - 1]\n    raise ValueError('Source file at path %s has not been received by the debug server', file_path)", "docstring": "Query the content of a given line in a source file.\n\nArgs:\nfile_path: Path to the source file.\nlineno: Line number as an `int`.\n\nReturns:\nContent of the line as a string.\n\nRaises:\nValueError: If no source file is found at the given file_path.", "source": "github-repos"}
{"code": "def __init__(self, stream):\n        \n        super(BinaryReader, self).__init__()\n        self.stream = stream", "docstring": "Create an instance.\n\nArgs:\nstream (BytesIO): a stream to operate on. i.e. a neo.IO.MemoryStream or raw BytesIO.", "source": "juraj-google-style"}
{"code": "def inspect_swarm(self):\n    url = self._url('/swarm')\n    return self._result(self._get(url), True)", "docstring": "Retrieve low-level information about the current swarm.\n\nReturns:\nA dictionary containing data about the swarm.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def _DropCommonSuffixes(filename):\n    for suffix in itertools.chain((('%s.%s' % (test_suffix.lstrip('_'), ext)) for (test_suffix, ext) in itertools.product(_test_suffixes, GetNonHeaderExtensions())), (('%s.%s' % (suffix, ext)) for (suffix, ext) in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))):\n        if (filename.endswith(suffix) and (len(filename) > len(suffix)) and (filename[((- len(suffix)) - 1)] in ('-', '_'))):\n            return filename[:((- len(suffix)) - 1)]\n    return os.path.splitext(filename)[0]", "docstring": "Drops common suffixes like _test.cc or -inl.h from filename.\n\nFor example:\n>>> _DropCommonSuffixes('foo/foo-inl.h')\n'foo/foo'\n>>> _DropCommonSuffixes('foo/bar/foo.cc')\n'foo/bar/foo'\n>>> _DropCommonSuffixes('foo/foo_internal.h')\n'foo/foo'\n>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')\n'foo/foo_unusualinternal'\n\nArgs:\nfilename: The input filename.\n\nReturns:\nThe filename with the common suffix removed.", "source": "codesearchnet"}
{"code": "def GetValueByName(self, name):\n    \n    if not self._registry_key and self._registry:\n      self._GetKeyFromRegistry()\n\n    if not self._registry_key:\n      return None\n\n    return self._registry_key.GetValueByName(name)", "docstring": "Retrieves a value by name.\n\nArgs:\nname (str): name of the value or an empty string for the default value.\n\nReturns:\nWinRegistryValue: Windows Registry value or None if not found.", "source": "juraj-google-style"}
{"code": "def find_one(self, collection, query):\n    obj = getattr(self.db, collection)\n    result = obj.find_one(query)\n    return result", "docstring": "Search a collection for the query provided and return one result. Just\na raw interface to mongo to do any query you want.\n\nArgs:\ncollection: The db collection. See main class documentation.\nquery: A mongo find query.\nReturns:\npymongo Cursor object with the results.", "source": "codesearchnet"}
{"code": "def sequence_equal(self, second_iterable, equality_comparer=operator.eq):\n    if self.closed():\n        raise ValueError('Attempt to call to_tuple() on a closed Queryable.')\n    if (not is_iterable(second_iterable)):\n        raise TypeError('Cannot compute sequence_equal() with second_iterable of non-iterable {type}'.format(type=str(type(second_iterable))[7:(- 1)]))\n    if (not is_callable(equality_comparer)):\n        raise TypeError('aggregate() parameter equality_comparer={equality_comparer} is not callable'.format(equality_comparer=repr(equality_comparer)))\n    try:\n        if (len(self._iterable) != len(second_iterable)):\n            return False\n    except TypeError:\n        pass\n    sentinel = object()\n    for (first, second) in izip_longest(self, second_iterable, fillvalue=sentinel):\n        if ((first is sentinel) or (second is sentinel)):\n            return False\n        if (not equality_comparer(first, second)):\n            return False\n    return True", "docstring": "Determine whether two sequences are equal by elementwise comparison.\n\nSequence equality is defined as the two sequences being equal length\nand corresponding elements being equal as determined by the equality\ncomparer.\n\nNote: This method uses immediate execution.\n\nArgs:\nsecond_iterable: The sequence which will be compared with the\nsource sequence.\n\nequality_comparer: An optional binary predicate function which is\nused to compare corresponding elements. Should return True if\nthe elements are equal, otherwise False.  The default equality\ncomparer is operator.eq which calls __eq__ on elements of the\nsource sequence with the corresponding element of the second\nsequence as a parameter.\n\nReturns:\nTrue if the sequences are equal, otherwise False.\n\nRaises:\nValueError: If the Queryable is closed.\nTypeError: If second_iterable is not in fact iterable.\nTypeError: If equality_comparer is not callable.", "source": "codesearchnet"}
{"code": "def codemirror_instance(config_name, varname, element_id, assets=True):\n    output = io.StringIO()\n    manifesto = CodemirrorAssetTagRender()\n    manifesto.register(config_name)\n    if assets:\n        output.write(manifesto.css_html())\n        output.write(manifesto.js_html())\n    html = manifesto.codemirror_html(config_name, varname, element_id)\n    output.write(html)\n    content = output.getvalue()\n    output.close()\n    return mark_safe(content)", "docstring": "Return HTML to init a CodeMirror instance for an element.\n\nThis will output the whole HTML needed to initialize a CodeMirror instance\nwith needed assets loading. Assets can be omitted with the ``assets``\noption.\n\nExample:\n::\n\n{% load djangocodemirror_tags %}\n{% codemirror_instance 'a-config-name' 'foo_codemirror' 'foo' %}\n\nArguments:\nconfig_name (string): A registred config name.\nvarname (string): A Javascript variable name.\nelement_id (string): An HTML element identifier (without\nleading ``#``) to attach to a CodeMirror instance.\n\nKeyword Arguments:\nassets (Bool): Adds needed assets before the HTML if ``True``, else\nonly CodeMirror instance will be outputed. Default value is\n``True``.\n\nReturns:\nstring: HTML.", "source": "codesearchnet"}
{"code": "def _remove_string_from_commastring(self, field, string):\n    commastring = self.data.get(field, '')\n    if (string in commastring):\n        self.data[field] = commastring.replace(string, '')\n        return True\n    return False", "docstring": "Remove a string from a comma separated list of strings\n\nArgs:\nfield (str): Field containing comma separated list\nstring (str): String to remove\n\nReturns:\nbool: True if string removed or False if not", "source": "codesearchnet"}
{"code": "def warning_handler(self, handler):\n        \n        if not self.opened():\n            handler = handler or util.noop\n            self._warning_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler)\n            self._dll.JLINKARM_SetWarnOutHandler(self._warning_handler)", "docstring": "Setter for the warning handler function.\n\nIf the DLL is open, this function is a no-op, so it should be called\nprior to calling ``open()``.\n\nArgs:\nself (JLink): the ``JLink`` instance\nhandler (function): function to call on warning messages\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def selected(self):\n    query_results = self.map((lambda el: el.is_selected()), 'selected').results\n    if query_results:\n        return all(query_results)\n    return False", "docstring": "Check whether all the matched elements are selected.\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def list_vmss_vm_instance_view_pg(access_token, subscription_id, resource_group, vmss_name, link=None):\n    if (link is None):\n        endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/virtualMachines?$expand=instanceView&$select=instanceView', '&api-version=', COMP_API])\n    else:\n        endpoint = link\n    return do_get(endpoint, access_token)", "docstring": "Gets one page of a paginated list of scale set VM instance views.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvmss_name (str): Name of the virtual machine scale set.\nlink (str): Optional link to URI to get list (as part of a paginated API query).\n\nReturns:\nHTTP response. JSON body of list of VM instance views.", "source": "codesearchnet"}
{"code": "def submit(self, func, *args, executors='all', fn_hash=None, cache=False, **kwargs):\n    if self.cleanup_called:\n        raise ValueError('Cannot submit to a DFK that has been cleaned up')\n    task_id = self.task_count\n    self.task_count += 1\n    if (isinstance(executors, str) and (executors.lower() == 'all')):\n        choices = list((e for e in self.executors if (e != 'data_manager')))\n    elif isinstance(executors, list):\n        choices = executors\n    executor = random.choice(choices)\n    (args, kwargs) = self._add_input_deps(executor, args, kwargs)\n    task_def = {'depends': None, 'executor': executor, 'func': func, 'func_name': func.__name__, 'args': args, 'kwargs': kwargs, 'fn_hash': fn_hash, 'memoize': cache, 'callback': None, 'exec_fu': None, 'checkpoint': None, 'fail_count': 0, 'fail_history': [], 'env': None, 'status': States.unsched, 'id': task_id, 'time_submitted': None, 'time_returned': None, 'app_fu': None}\n    if (task_id in self.tasks):\n        raise DuplicateTaskError('internal consistency error: Task {0} already exists in task list'.format(task_id))\n    else:\n        self.tasks[task_id] = task_def\n    (dep_cnt, depends) = self._gather_all_deps(args, kwargs)\n    self.tasks[task_id]['depends'] = depends\n    task_stdout = kwargs.get('stdout')\n    task_stderr = kwargs.get('stderr')\n    logger.info('Task {} submitted for App {}, waiting on tasks {}'.format(task_id, task_def['func_name'], [fu.tid for fu in depends]))\n    self.tasks[task_id]['task_launch_lock'] = threading.Lock()\n    app_fu = AppFuture(tid=task_id, stdout=task_stdout, stderr=task_stderr)\n    self.tasks[task_id]['app_fu'] = app_fu\n    app_fu.add_done_callback(partial(self.handle_app_update, task_id))\n    self.tasks[task_id]['status'] = States.pending\n    logger.debug('Task {} set to pending state with AppFuture: {}'.format(task_id, task_def['app_fu']))\n    for d in depends:\n\n        def callback_adapter(dep_fut):\n            self.launch_if_ready(task_id)\n        try:\n            d.add_done_callback(callback_adapter)\n        except Exception as e:\n            logger.error('add_done_callback got an exception {} which will be ignored'.format(e))\n    self.launch_if_ready(task_id)\n    return task_def['app_fu']", "docstring": "Add task to the dataflow system.\n\nIf the app task has the executors attributes not set (default=='all')\nthe task will be launched on a randomly selected executor from the\nlist of executors. If the app task specifies a particular set of\nexecutors, it will be targeted at the specified executors.\n\n>>> IF all deps are met:\n>>>   send to the runnable queue and launch the task\n>>> ELSE:\n>>>   post the task in the pending queue\n\nArgs:\n- func : A function object\n- *args : Args to the function\n\nKWargs :\n- executors (list or string) : List of executors this call could go to.\nDefault='all'\n- fn_hash (Str) : Hash of the function and inputs\nDefault=None\n- cache (Bool) : To enable memoization or not\n- kwargs (dict) : Rest of the kwargs to the fn passed as dict.\n\nReturns:\n(AppFuture) [DataFutures,]", "source": "codesearchnet"}
{"code": "def install_package(tar_url, folder, md5_url='{tar_url}.md5',\n                    on_download=lambda: None, on_complete=lambda: None):\n    \n    data_file = join(folder, basename(tar_url))\n\n    md5_url = md5_url.format(tar_url=tar_url)\n    try:\n        remote_md5 = download(md5_url).decode('utf-8').split(' ')[0]\n    except (UnicodeDecodeError, URLError):\n        raise ValueError('Invalid MD5 url: ' + md5_url)\n    if remote_md5 != calc_md5(data_file):\n        on_download()\n        if isfile(data_file):\n            try:\n                with tarfile.open(data_file) as tar:\n                    for i in reversed(list(tar)):\n                        try:\n                            os.remove(join(folder, i.path))\n                        except OSError:\n                            pass\n            except (OSError, EOFError):\n                pass\n\n        download_extract_tar(tar_url, folder, data_file)\n        on_complete()\n        if remote_md5 != calc_md5(data_file):\n            raise ValueError('MD5 url does not match tar: ' + md5_url)\n        return True\n    return False", "docstring": "Install or update a tar package that has an md5\n\nArgs:\ntar_url (str): URL of package to download\nfolder (str): Location to extract tar. Will be created if doesn't exist\nmd5_url (str): URL of md5 to use to check for updates\non_download (Callable): Function that gets called when downloading a new update\non_complete (Callable): Function that gets called when a new download is complete\n\nReturns:\nbool: Whether the package was updated", "source": "juraj-google-style"}
{"code": "def get_reverse_dns(ip_address, cache=None, nameservers=None, timeout=2.0):\n    \n    hostname = None\n    try:\n        address = dns.reversename.from_address(ip_address)\n        hostname = query_dns(address, \"PTR\", cache=cache,\n                             nameservers=nameservers,\n                             timeout=timeout)[0]\n\n    except dns.exception.DNSException:\n        pass\n\n    return hostname", "docstring": "Resolves an IP address to a hostname using a reverse DNS query\n\nArgs:\nip_address (str): The IP address to resolve\ncache (ExpiringDict): Cache storage\nnameservers (list): A list of one or more nameservers to use\n(Cloudflare's public DNS resolvers by default)\ntimeout (float): Sets the DNS query timeout in seconds\n\nReturns:\nstr: The reverse DNS hostname (if any)", "source": "juraj-google-style"}
{"code": "def delete_object(self, ref, delete_arguments=None):\n        \n        opts = self._get_request_options()\n        if not isinstance(delete_arguments, dict):\n            delete_arguments = {}\n        url = self._construct_url(ref, query_params=delete_arguments)\n        self._log_request('delete', url, opts)\n        r = self.session.delete(url, **opts)\n\n        self._validate_authorized(r)\n\n        if r.status_code != requests.codes.ok:\n            self._check_service_availability('delete', r, ref)\n\n            raise ib_ex.InfobloxCannotDeleteObject(\n                response=jsonutils.loads(r.content),\n                ref=ref,\n                content=r.content,\n                code=r.status_code)\n\n        return self._parse_reply(r)", "docstring": "Remove an Infoblox object\n\nArgs:\nref               (str): Object reference\ndelete_arguments (dict): Extra delete arguments\nReturns:\nThe object reference of the removed object\nRaises:\nInfobloxException", "source": "juraj-google-style"}
{"code": "def pcolls_from_streaming_cache(user_pipeline: beam.Pipeline, query_pipeline: beam.Pipeline, name_to_pcoll: Dict[str, beam.PCollection]) -> Dict[str, beam.PCollection]:\n\n    def exception_handler(e):\n        _LOGGER.error(str(e))\n        return True\n    cache_manager = ie.current_env().get_cache_manager(user_pipeline, create_if_absent=True)\n    test_stream_service = ie.current_env().get_test_stream_service_controller(user_pipeline)\n    if not test_stream_service:\n        test_stream_service = TestStreamServiceController(cache_manager, exception_handler=exception_handler)\n        test_stream_service.start()\n        ie.current_env().set_test_stream_service_controller(user_pipeline, test_stream_service)\n    tag_to_name = {}\n    for name, pcoll in name_to_pcoll.items():\n        key = CacheKey.from_pcoll(name, pcoll).to_str()\n        tag_to_name[key] = name\n    output_pcolls = query_pipeline | test_stream.TestStream(output_tags=set(tag_to_name.keys()), coder=cache_manager._default_pcoder, endpoint=test_stream_service.endpoint)\n    sql_source = {}\n    for tag, output in output_pcolls.items():\n        name = tag_to_name[tag]\n        output.element_type = name_to_pcoll[name].element_type\n        sql_source[name] = output\n    return sql_source", "docstring": "Reads PCollection cache through the TestStream.\n\nArgs:\nuser_pipeline: The beam.Pipeline object defined by the user in the\nnotebook.\nquery_pipeline: The beam.Pipeline object built by the magic to execute the\nSQL query.\nname_to_pcoll: PCollections with variable names used in the SQL query.\n\nReturns:\nA Dict[str, beam.PCollection], where each PCollection is tagged with\ntheir PCollection variable names, read from the cache.\n\nWhen the user_pipeline has unbounded sources, we force all cache reads to go\nthrough the TestStream even if they are bounded sources.", "source": "github-repos"}
{"code": "def _validate_bool(value):\n    if isinstance(value, six.text_type):\n        if (value.strip().lower() == 'true'):\n            value = True\n        elif (value.strip().lower() == 'false'):\n            value = False\n        else:\n            raise ValueError('\"{}\" must be a boolean (\"True\" or \"False\")'.format(value))\n    if (not isinstance(value, bool)):\n        raise ValueError('\"{}\" is not a boolean value.'.format(value))\n    return value", "docstring": "Validate a setting is a bool.\n\nReturns:\nbool: The value as a boolean.\n\nRaises:\nValueError: If the value can't be parsed as a bool string or isn't already bool.", "source": "codesearchnet"}
{"code": "def load_schema(schema_name, resolved=False):\n    schema_data = ''\n    with open(get_schema_path(schema_name, resolved)) as schema_fd:\n        schema_data = json.loads(schema_fd.read())\n    return schema_data", "docstring": "Load the given schema from wherever it's installed.\n\nArgs:\nschema_name(str): Name of the schema to load, for example 'authors'.\nresolved(bool): If True will return the resolved schema, that is with\nall the $refs replaced by their targets.\n\nReturns:\ndict: the schema with the given name.", "source": "codesearchnet"}
{"code": "def _update(self, namespace, name, oldobj, newobj, is_class_namespace=False):\n    try:\n        notify_info2('Updating: ', oldobj)\n        if (oldobj is newobj):\n            return\n        if (type(oldobj) is not type(newobj)):\n            notify_error(('Type of: %s changed... Skipping.' % (oldobj,)))\n            return\n        if isinstance(newobj, types.FunctionType):\n            self._update_function(oldobj, newobj)\n            return\n        if isinstance(newobj, types.MethodType):\n            self._update_method(oldobj, newobj)\n            return\n        if isinstance(newobj, classmethod):\n            self._update_classmethod(oldobj, newobj)\n            return\n        if isinstance(newobj, staticmethod):\n            self._update_staticmethod(oldobj, newobj)\n            return\n        if hasattr(types, 'ClassType'):\n            classtype = (types.ClassType, type)\n        else:\n            classtype = type\n        if isinstance(newobj, classtype):\n            self._update_class(oldobj, newobj)\n            return\n        if (hasattr(newobj, '__metaclass__') and hasattr(newobj, '__class__') and (newobj.__metaclass__ == newobj.__class__)):\n            self._update_class(oldobj, newobj)\n            return\n        if (namespace is not None):\n            if ((oldobj != newobj) and (str(oldobj) != str(newobj)) and (repr(oldobj) != repr(newobj))):\n                xreload_old_new = None\n                if is_class_namespace:\n                    xreload_old_new = getattr(namespace, '__xreload_old_new__', None)\n                    if (xreload_old_new is not None):\n                        self.found_change = True\n                        xreload_old_new(name, oldobj, newobj)\n                elif ('__xreload_old_new__' in namespace):\n                    xreload_old_new = namespace['__xreload_old_new__']\n                    xreload_old_new(namespace, name, oldobj, newobj)\n                    self.found_change = True\n    except:\n        notify_error(('Exception found when updating %s. Proceeding for other items.' % (name,)))\n        pydev_log.exception()", "docstring": "Update oldobj, if possible in place, with newobj.\n\nIf oldobj is immutable, this simply returns newobj.\n\nArgs:\noldobj: the object to be updated\nnewobj: the object used as the source for the update", "source": "codesearchnet"}
{"code": "def CheckPrintf(filename, clean_lines, linenum, error):\n    line = clean_lines.elided[linenum]\n    match = Search('snprintf\\\\s*\\\\(([^,]*),\\\\s*([0-9]*)\\\\s*,', line)\n    if (match and (match.group(2) != '0')):\n        error(filename, linenum, 'runtime/printf', 3, ('If you can, use sizeof(%s) instead of %s as the 2nd arg to snprintf.' % (match.group(1), match.group(2))))\n    if Search('\\\\bsprintf\\\\s*\\\\(', line):\n        error(filename, linenum, 'runtime/printf', 5, 'Never use sprintf. Use snprintf instead.')\n    match = Search('\\\\b(strcpy|strcat)\\\\s*\\\\(', line)\n    if match:\n        error(filename, linenum, 'runtime/printf', 4, ('Almost always, snprintf is better than %s' % match.group(1)))", "docstring": "Check for printf related issues.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def create_bq_dataset(project, dataset_base_name):\n    client = bigquery.Client(project=project)\n    unique_dataset_name = '%s%d%s' % (dataset_base_name, int(time.time()), secrets.token_hex(3))\n    dataset_ref = client.dataset(unique_dataset_name, project=project)\n    dataset = bigquery.Dataset(dataset_ref)\n    client.create_dataset(dataset)\n    return dataset_ref", "docstring": "Creates an empty BigQuery dataset.\n\nArgs:\nproject: Project to work in.\ndataset_base_name: Prefix for dataset id.\n\nReturns:\nA ``google.cloud.bigquery.dataset.DatasetReference`` object pointing to the\nnew dataset.", "source": "github-repos"}
{"code": "def _update_triplestore(self, es_result, action_list, **kwargs):\n        \n        idx_time = XsdDatetime(datetime.datetime.utcnow())\n        uri_keys = {}\n        bnode_keys = {}\n        for item in action_list:\n            try:\n                uri_keys[item['_id']] = item['_source'][\"uri\"]\n            except KeyError:\n                bnode_keys[item['_id']] = item['_id']\n        error_dict = {}\n        error_bnodes = {}\n        if es_result[1]:\n            for result in es_result[1]:\n                err_item = list(result.values())[0]\n                try:\n                    error_dict[uri_keys.pop(err_item['_id'])] = \\\n                            XsdString(err_item['error']['reason'])\n                except KeyError:\n                    error_bnodes[bnode_keys.pop(err_item['_id'])] = \\\n                            XsdString(err_item['error']['reason'])\n        if uri_keys:\n            sparql_good = .format(idx_time=idx_time.sparql,\n                           subj_list=\"<%s>\" % \">\\n<\".join(uri_keys.values()))\n            self.tstore_conn.update_query(sparql_good)\n        \n        if not error_dict:\n            return\n        \n        sparql_error = .format(subj_list=\"<%s>\" % \">\\n<\".join(error_dict.keys()))\n        self.tstore_conn.update_query(sparql_error)\n        del sparql_error\n        sparql_update = .format(\n                    idx_time=idx_time.sparql,\n                    error_list=\"\\n\".join([\"(<%s> %s)\" % (key, val.sparql)\n                                          for key, val in error_dict.items()]))\n\n        \n        \n        self.tstore_conn.update_query(sparql_update)\n        del sparql_update", "docstring": "updates the triplestore with success of saves and failues of indexing\n\nArgs:\n-----\nes_result: the elasticsearch result list\naction_list: list of elasticsearch action items that were indexed", "source": "juraj-google-style"}
{"code": "def full_name(decl, with_defaults=True):\n    if (None is decl):\n        raise RuntimeError('Unable to generate full name for None object!')\n    if with_defaults:\n        if (not decl.cache.full_name):\n            path = declaration_path(decl)\n            if (path == ['']):\n                decl.cache.full_name = ''\n            else:\n                decl.cache.full_name = full_name_from_declaration_path(path)\n        return decl.cache.full_name\n    else:\n        if (not decl.cache.full_partial_name):\n            path = partial_declaration_path(decl)\n            if (path == ['']):\n                decl.cache.full_partial_name = ''\n            else:\n                decl.cache.full_partial_name = full_name_from_declaration_path(path)\n        return decl.cache.full_partial_name", "docstring": "Returns declaration full qualified name.\n\nIf `decl` belongs to anonymous namespace or class, the function will return\nC++ illegal qualified name.\n\nArgs:\ndecl (declaration_t): declaration for which the full qualified name\nshould be calculated.\n\nReturns:\nlist[(str | basestring)]: full name of the declaration.", "source": "codesearchnet"}
{"code": "def account_states(self, **kwargs):\n    path = self._get_id_path('account_states')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "This method lets users get the status of whether or not the movie has\nbeen rated or added to their favourite or watch lists. A valid session\nid is required.\n\nArgs:\nsession_id: see Authentication.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def __field_to_parameter_type_and_format(self, field):\n    \n    \n    variant = field.variant\n    if variant == messages.Variant.MESSAGE:\n      raise TypeError('A message variant can\\'t be used in a parameter.')\n\n    \n    \n    \n\n    custom_variant_map = {\n        messages.Variant.DOUBLE: ('number', 'double'),\n        messages.Variant.FLOAT: ('number', 'float'),\n        messages.Variant.INT64: ('string', 'int64'),\n        messages.Variant.SINT64: ('string', 'int64'),\n        messages.Variant.UINT64: ('string', 'uint64'),\n        messages.Variant.INT32: ('integer', 'int32'),\n        messages.Variant.SINT32: ('integer', 'int32'),\n        messages.Variant.UINT32: ('integer', 'uint32'),\n        messages.Variant.BOOL: ('boolean', None),\n        messages.Variant.STRING: ('string', None),\n        messages.Variant.BYTES: ('string', 'byte'),\n        messages.Variant.ENUM: ('string', None),\n    }\n    return custom_variant_map.get(variant) or (variant.name.lower(), None)", "docstring": "Converts the field variant type into a tuple describing the parameter.\n\nArgs:\nfield: An instance of a subclass of messages.Field.\n\nReturns:\nA tuple with the type and format of the field, respectively.\n\nRaises:\nTypeError: if the field variant is a message variant.", "source": "juraj-google-style"}
{"code": "def parse_global_args(argv):\n    parser = create_parser()\n    args = parser.parse_args(argv)\n    should_log = (args.include or args.exclude or (args.verbose > 0))\n    verbosity = args.verbose\n    root = logging.getLogger()\n    if should_log:\n        formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname).3s %(name)s %(message)s', '%y-%m-%d %H:%M:%S')\n        if args.logfile:\n            handler = logging.FileHandler(args.logfile)\n        else:\n            handler = logging.StreamHandler()\n        handler.setFormatter(formatter)\n        if (args.include and args.exclude):\n            print('You cannot combine whitelisted (-i) and blacklisted (-e) loggers, you must use one or the other.')\n            sys.exit(1)\n        loglevels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]\n        if (verbosity >= len(loglevels)):\n            verbosity = (len(loglevels) - 1)\n        level = loglevels[verbosity]\n        if args.include:\n            for name in args.include:\n                logger = logging.getLogger(name)\n                logger.setLevel(level)\n                logger.addHandler(handler)\n            root.addHandler(logging.NullHandler())\n        else:\n            for name in args.exclude:\n                logger = logging.getLogger(name)\n                logger.disabled = True\n            root.setLevel(level)\n            root.addHandler(handler)\n    else:\n        root.addHandler(logging.NullHandler())\n    return args", "docstring": "Parse all global iotile tool arguments.\n\nAny flag based argument at the start of the command line is considered as\na global flag and parsed.  The first non flag argument starts the commands\nthat are passed to the underlying hierarchical shell.\n\nArgs:\nargv (list): The command line for this command\n\nReturns:\nNamespace: The parsed arguments, with all of the commands that should\nbe executed in an iotile shell as the attribute 'commands'", "source": "codesearchnet"}
{"code": "def extract_formats(config_handle):\n    configurations = dict(config_handle)\n    formats = dict(configurations.get('formats', {}))\n    return formats", "docstring": "Get application formats.\n\nSee :class:`gogoutils.Formats` for available options.\n\nArgs:\nconfig_handle (configparser.ConfigParser): Instance of configurations.\n\nReturns:\ndict: Formats in ``{$format_type: $format_pattern}``.", "source": "codesearchnet"}
{"code": "def base_path(self):\n    path = self.request.path\n    base_path = path[:path.rfind('/')]\n    if (not base_path.endswith('/command')):\n        raise BadRequestPathError('Json handlers should have /command path prefix')\n    return base_path[:base_path.rfind('/')]", "docstring": "Base path for all mapreduce-related urls.\n\nJSON handlers are mapped to /base_path/command/command_name thus they\nrequire special treatment.\n\nRaises:\nBadRequestPathError: if the path does not end with \"/command\".\n\nReturns:\nThe base path.", "source": "codesearchnet"}
{"code": "def get_current_and_head_revision(\n        database_url: str,\n        alembic_config_filename: str,\n        alembic_base_dir: str = None,\n        version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> Tuple[str, str]:\n    \n    \n    head_revision = get_head_revision_from_alembic(\n        alembic_config_filename=alembic_config_filename,\n        alembic_base_dir=alembic_base_dir,\n        version_table=version_table\n    )\n    log.info(\"Intended database version: {}\", head_revision)\n\n    \n    current_revision = get_current_revision(\n        database_url=database_url,\n        version_table=version_table\n    )\n    log.info(\"Current database version: {}\", current_revision)\n\n    \n    return current_revision, head_revision", "docstring": "Returns a tuple of ``(current_revision, head_revision)``; see\n:func:`get_current_revision` and :func:`get_head_revision_from_alembic`.\n\nArguments:\ndatabase_url: SQLAlchemy URL for the database\nalembic_config_filename: config filename\nalembic_base_dir: directory to start in, so relative paths in the\nconfig file work.\nversion_table: table name for Alembic versions", "source": "juraj-google-style"}
{"code": "def getVariable(self, name):\n        \n        return lock_and_call(\n            lambda: Variable(self._impl.getVariable(name)),\n            self._lock\n        )", "docstring": "Get the variable with the corresponding name.\n\nArgs:\nname: Name of the variable to be found.\n\nRaises:\nTypeError: if the specified variable does not exist.", "source": "juraj-google-style"}
{"code": "def plot_spectrum(self, t=0, f_start=None, f_stop=None, logged=False, if_id=0, c=None, **kwargs):\n        \n        if self.header[b'nbits'] <=2:\n            logged = False\n            t='all'\n        ax = plt.gca()\n\n        plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)\n\n        \n        if self.header[b'foff'] < 0:\n            plot_data = plot_data[..., ::-1] \n            plot_f = plot_f[::-1]\n\n        if isinstance(t, int):\n            print(\"extracting integration %i...\" % t)\n            plot_data = plot_data[t]\n        elif t == b'all':\n            print(\"averaging along time axis...\")\n            \n            if len(plot_data.shape) > 1:\n                plot_data = plot_data.mean(axis=0)\n            else:\n                plot_data = plot_data.mean()\n        else:\n            raise RuntimeError(\"Unknown integration %s\" % t)\n\n        \n        dec_fac_x = 1\n        if plot_data.shape[0] > MAX_PLT_POINTS:\n            dec_fac_x = int(plot_data.shape[0] / MAX_PLT_POINTS)\n\n        plot_data = rebin(plot_data, dec_fac_x, 1)\n        plot_f    = rebin(plot_f, dec_fac_x, 1)\n\n        if not c:\n            kwargs['c'] = '\n\n        if logged:\n            plt.plot(plot_f, db(plot_data),label='Stokes I', **kwargs)\n            plt.ylabel(\"Power [dB]\")\n        else:\n\n            plt.plot(plot_f, plot_data,label='Stokes I', **kwargs)\n            plt.ylabel(\"Power [counts]\")\n        plt.xlabel(\"Frequency [MHz]\")\n        plt.legend()\n\n        try:\n            plt.title(self.header[b'source_name'])\n        except KeyError:\n            plt.title(self.filename)\n\n        plt.xlim(plot_f[0], plot_f[-1])", "docstring": "Plot frequency spectrum of a given file\n\nArgs:\nt (int): integration number to plot (0 -> len(data))\nlogged (bool): Plot in linear (False) or dB units (True)\nif_id (int): IF identification (if multiple IF signals in file)\nc: color for line\nkwargs: keyword args to be passed to matplotlib plot()", "source": "juraj-google-style"}
{"code": "def _validate_clientsecrets(clientsecrets_dict):\n    _INVALID_FILE_FORMAT_MSG = 'Invalid file format. See https:\n    if (clientsecrets_dict is None):\n        raise InvalidClientSecretsError(_INVALID_FILE_FORMAT_MSG)\n    try:\n        ((client_type, client_info),) = clientsecrets_dict.items()\n    except (ValueError, AttributeError):\n        raise InvalidClientSecretsError((_INVALID_FILE_FORMAT_MSG + ' Expected a JSON object with a single property for a \"web\" or \"installed\" application'))\n    if (client_type not in VALID_CLIENT):\n        raise InvalidClientSecretsError('Unknown client type: {0}.'.format(client_type))\n    for prop_name in VALID_CLIENT[client_type]['required']:\n        if (prop_name not in client_info):\n            raise InvalidClientSecretsError('Missing property \"{0}\" in a client type of \"{1}\".'.format(prop_name, client_type))\n    for prop_name in VALID_CLIENT[client_type]['string']:\n        if client_info[prop_name].startswith('[['):\n            raise InvalidClientSecretsError('Property \"{0}\" is not configured.'.format(prop_name))\n    return (client_type, client_info)", "docstring": "Validate parsed client secrets from a file.\n\nArgs:\nclientsecrets_dict: dict, a dictionary holding the client secrets.\n\nReturns:\ntuple, a string of the client type and the information parsed\nfrom the file.", "source": "codesearchnet"}
{"code": "def __init__(self, controller,\n                 device_initializer=\\\n                 lambda sc, idcode: JTAGDevice(sc,idcode),\n                 ignore_jtag_enabled=False, debug=False,\n                 collect_compiler_artifacts=False,\n                 collect_compiler_merge_artifacts=False,\n                 print_statistics=False):\n        \n        self._debug = debug\n        self._collect_compiler_artifacts = collect_compiler_artifacts\n        self._collect_compiler_merge_artifacts = collect_compiler_merge_artifacts\n        self._print_statistics = print_statistics\n        self._fitted_lv1_prim_cache = {}\n        self._devices = []\n        self._hasinit = False\n        self._sm = JTAGStateMachine()\n        self._ignore_jtag_enabled = ignore_jtag_enabled\n        self._desired_speed = None\n\n        self.initialize_device_from_id = device_initializer\n        self.get_descriptor_for_idcode = \\\n                    jtagDeviceDescription.get_descriptor_for_idcode\n\n        if isinstance(controller, InaccessibleController):\n            raise DevicePermissionDeniedError()\n        self._controller = controller\n        \n        self._controller._scanchain = self\n\n        self._command_queue = CommandQueue(self)\n\n        default_prims = {RunInstruction,\n                         TransitionTAP, RWReg, RWDR, RWIR, Sleep,\n                         RWDevDR, RWDevIR}\n        self._chain_primitives = {}\n        self._device_primitives = {}\n        self._lv1_chain_primitives = []\n\n        for prim in default_prims:\n            assert issubclass(prim, Primitive)\n            if issubclass(prim, DeviceTarget):\n                self._device_primitives[prim._function_name] = prim\n            else:\n                self._chain_primitives[prim._function_name] = prim\n\n        for prim in self._controller._primitives:\n            if not issubclass(prim, Primitive):\n                raise Exception(\"Registered Controller Prim has \"\n                                \"unknown type. (%s)\"%prim)\n            if issubclass(prim, DeviceTarget):\n                self._device_primitives[prim._function_name] = prim\n            else:\n                self._chain_primitives[prim._function_name] = prim\n                if issubclass(prim, Level1Primitive):\n                    self._lv1_chain_primitives.append(prim)\n\n        for func_name, prim in self._chain_primitives.items():\n            if not self._gen_prim_adder(prim):\n                raise Exception(\"Failed adding primitive %s, \"\\\n                                \"primitive with name %s \"\\\n                                \"already exists on scanchain\"%\\\n                                (prim, prim._function_name))", "docstring": "Create a new JTAGScanChain to track and control a real chain.\n\nArgs:\ncontroller: The CableDriver that this ScanChain will control.\ndevice_initializer: A callable that can map a (JTAGScanChain, Bitarray) to an instance of a JTAGDevice (Allows custom classes to be used).\nignore_jtag_enabled: A boolean on if errors should be ignored when JTA is already enabled on the controller.\ndebug: A boolean to enable extra debug printing.", "source": "juraj-google-style"}
{"code": "def __init__(self, address=\"0.0.0.0/32\", netmask=None):\n        \n        if '/' in address:\n            address, netmask = address.split('/')\n        else:\n            netmask = 32 if netmask is None else netmask\n\n        super().__init__(address)\n        self.netmask = int(netmask)", "docstring": "Create an IPAddress with the parameters below.\n\nArgs:\naddress (str): IP Address using ipv4. Defaults to '0.0.0.0/32'", "source": "juraj-google-style"}
{"code": "def replace_urls(status):\n    text = status.text\n    if (not has_url(status)):\n        return text\n    urls = [(e['indices'], e['expanded_url']) for e in status.entities['urls']]\n    urls.sort(key=(lambda x: x[0][0]), reverse=True)\n    for ((start, end), url) in urls:\n        text = ((text[:start] + url) + text[end:])\n    return text", "docstring": "Replace shorturls in a status with expanded urls.\n\nArgs:\nstatus (tweepy.status): A tweepy status object\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def absolute_hinge_difference(arr1, arr2, min_diff=10, dtype=np.uint8):\n  \n  diff = np.abs(arr1.astype(np.int) - arr2, dtype=np.int)\n  return np.maximum(diff - min_diff, 0).astype(dtype)", "docstring": "Point-wise, hinge loss-like, difference between arrays.\n\nArgs:\narr1: integer array to compare.\narr2: integer array to compare.\nmin_diff: minimal difference taken into consideration.\ndtype: dtype of returned array.\n\nReturns:\narray", "source": "juraj-google-style"}
{"code": "def mask_to_rgb(self, image: np.ndarray, palette: Optional[List[Tuple[int, int]]]=None, data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n    return mask_to_rgb(image, palette=palette, data_format=data_format)", "docstring": "Converts a segmentation map to RGB format.\n\nArgs:\nimage (`np.ndarray`):\nSegmentation map with dimensions (height, width) where pixel values represent the class index.\npalette (`List[Tuple[int, int]]`, *optional*, defaults to `None`):\nPalette to use to convert the mask to RGB format. If unset, the mask is duplicated across the channel\ndimension.\ndata_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format for the output image. If unset, the channel dimension format of the input\nimage is used. Can be one of:\n- `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n- `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n\nReturns:\n`np.ndarray`: The mask in RGB format.", "source": "github-repos"}
{"code": "def has_sample(self, md5):\n    sample = self.get_sample(md5)\n    return (True if sample else False)", "docstring": "Checks if data store has this sample.\n\nArgs:\nmd5: The md5 digest of the required sample.\n\nReturns:\nTrue if sample with this md5 is present, else False.", "source": "codesearchnet"}
{"code": "def number(digit):\n    spoken = str(digit)\n    if (spoken.startswith('8') or (spoken[:(len(spoken) % 3)] == '11')):\n        article = 'an '\n    else:\n        article = 'a '\n    if (spoken.endswith('1') and (spoken != '11')):\n        suffix = 'st'\n    elif (spoken.endswith('2') and (spoken != '12')):\n        suffix = 'nd'\n    elif (spoken.endswith('3') and (spoken != '13')):\n        suffix = 'rd'\n    else:\n        suffix = 'th'\n    if (digit > 999):\n        prefix = (len(spoken) % 3)\n        separated = spoken[:prefix]\n        for n in range(prefix, len(spoken), 3):\n            separated += (',' + spoken[n:(n + 3)])\n        spoken = separated\n    return ((article + spoken) + suffix)", "docstring": "Gets a spoken-word representation for a number.\n\nArguments:\ndigit (int): An integer to convert into spoken-word.\n\nReturns:\nA spoken-word representation for a digit,\nincluding an article ('a' or 'an') and a suffix,\ne.g. 1 -> 'a 1st', 11 -> \"an 11th\". Adittionally\ndelimits characters in pairs of three for values > 999.", "source": "codesearchnet"}
{"code": "def validate_definition(self, definition_name, dict_to_test, definition=None):\n    if ((definition_name not in self.specification['definitions'].keys()) and (definition is None)):\n        return False\n    spec_def = (definition or self.specification['definitions'][definition_name])\n    all_required_keys_present = all(((req in dict_to_test.keys()) for req in spec_def.get('required', {})))\n    if (('required' in spec_def) and (not all_required_keys_present)):\n        return False\n    properties_dict = spec_def.get('properties', {})\n    for (key, value) in dict_to_test.items():\n        if (value is not None):\n            if (key not in properties_dict):\n                return False\n            elif (not self._validate_type(properties_dict[key], value)):\n                return False\n    return True", "docstring": "Validate the given dict according to the given definition.\n\nArgs:\ndefinition_name: name of the the definition.\ndict_to_test: dict to test.\n\nReturns:\nTrue if the given dict match the definition, False otherwise.", "source": "codesearchnet"}
{"code": "def int64_user_distribution(namespace, name, metric, ptransform=None) -> metrics_pb2.MonitoringInfo:\n    labels = create_labels(ptransform=ptransform, namespace=namespace, name=name)\n    payload = _encode_distribution(coders.VarIntCoder(), metric.count, metric.sum, metric.min, metric.max)\n    return create_monitoring_info(USER_DISTRIBUTION_URN, DISTRIBUTION_INT64_TYPE, payload, labels)", "docstring": "Return the distribution monitoring info for the URN, metric and labels.\n\nArgs:\nurn: The URN of the monitoring info/metric.\nmetric: The DistributionData for the metric.\nptransform: The ptransform id used as a label.", "source": "github-repos"}
{"code": "def get_relative_import_files(module_file: Union[str, os.PathLike]) -> list[str]:\n    no_change = False\n    files_to_check = [module_file]\n    all_relative_imports = []\n    while not no_change:\n        new_imports = []\n        for f in files_to_check:\n            new_imports.extend(get_relative_imports(f))\n        module_path = Path(module_file).parent\n        new_import_files = [str(module_path / m) for m in new_imports]\n        new_import_files = [f for f in new_import_files if f not in all_relative_imports]\n        files_to_check = [f'{f}.py' for f in new_import_files]\n        no_change = len(new_import_files) == 0\n        all_relative_imports.extend(files_to_check)\n    return all_relative_imports", "docstring": "Get the list of all files that are needed for a given module. Note that this function recurses through the relative\nimports (if a imports b and b imports c, it will return module files for b and c).\n\nArgs:\nmodule_file (`str` or `os.PathLike`): The module file to inspect.\n\nReturns:\n`list[str]`: The list of all relative imports a given module needs (recursively), which will give us the list\nof module files a given module needs.", "source": "github-repos"}
{"code": "def to_tensor_7(self) -> torch.Tensor:\n    tensor = self._trans.new_zeros((*self.shape, 7))\n    tensor[..., :4] = self._rots.get_quats()\n    tensor[..., 4:] = self._trans\n    return tensor", "docstring": "Converts a transformation to a tensor with 7 final columns, four for the quaternion followed by three for the\ntranslation.\n\nReturns:\nA [*, 7] tensor representation of the transformation", "source": "github-repos"}
{"code": "def loads(self, config_str, as_defaults=False):\n        \n        self._rw.load_config_from_string(self._config, config_str, as_defaults=as_defaults)", "docstring": "Load configuration values from the specified source string.\n\nArgs:\nconfig_str:\nas_defaults (bool): if ``True``, contents of ``source`` will be treated as schema of configuration items.", "source": "juraj-google-style"}
{"code": "class RunAggregationStrategy(beam.PTransform[beam.PCollection[NestedKeyedOutputT], beam.PCollection[NestedKeyedOutputT]]):\n\n    def __init__(self, aggregation_strategy: Optional[AggregationFn], agg_model_id: str):\n        self._aggregation_fn = aggregation_strategy\n        self._agg_model_id = agg_model_id\n\n    def expand(self, input: beam.PCollection[NestedKeyedOutputT]) -> beam.PCollection[NestedKeyedOutputT]:\n        post_gbk = input | beam.MapTuple(lambda k, v: ((k, v[0]), v[1])) | beam.GroupByKey()\n        if self._aggregation_fn is None:\n            ret = post_gbk | beam.MapTuple(lambda k, v: (k[0], (k[1], AnomalyResult(example=v[0].example, predictions=[prediction for result in v for prediction in result.predictions]))))\n            return ret\n        aggregation_fn_spec = self._aggregation_fn.to_spec()\n        aggregation_fn_spec.config['_run_init'] = True\n        aggregation_fn = Specifiable.from_spec(aggregation_fn_spec)\n        if isinstance(aggregation_fn, aggregations._AggModelIdMixin):\n            aggregation_fn._set_agg_model_id_if_unset(self._agg_model_id)\n        ret = post_gbk | beam.MapTuple(lambda k, v, agg=aggregation_fn: (k[0], (k[1], AnomalyResult(example=v[0].example, predictions=[agg.apply([prediction for result in v for prediction in result.predictions])]))))\n        return ret", "docstring": "Applies an aggregation strategy to grouped anomaly detection results.\n\nThis PTransform aggregates anomaly predictions from multiple models or\ndata points using an `AggregationFn`. It handles both custom and simple\naggregation strategies.\n\nArgs:\naggregation_strategy: The `AggregationFn` to use.\nagg_model_id: The model ID for aggregation.", "source": "github-repos"}
{"code": "def _update_size(self, size, future):\n    with self._size_lock:\n        if ((size > self._size) and future.done):\n            self._size = size", "docstring": "Keep track of the file size during writing.\n\nIf specified size value is greater than the current size, update the\ncurrent size using specified value.\n\nUsed as callback in default \"_flush\" implementation for files supporting\nrandom write access.\n\nArgs:\nsize (int): Size value.\nfuture (concurrent.futures._base.Future): future.", "source": "codesearchnet"}
{"code": "async def on_message(message):\n    \n\n    \n    server = message.server\n    author = message.author\n    channel = message.channel\n    content = message.content\n\n    data = datatools.get_data()\n\n    \n    if server is not None and author != channel.server.me:\n        prefix = data[\"discord\"][\"servers\"][server.id][\"prefix\"]\n        \n        if channel.server.me in message.mentions:\n            await client.send_typing(channel)\n            response = \"The current server prefix is `{0}`. Type `{0}help` for help.\".format(prefix)\n            await client.send_message(channel, response)\n\n        \n        if content.startswith(prefix):\n            \n            package = content.split(\" \")\n            command = package[0][len(prefix):]\n            args = package[1:]\n            arg = ' '.join(args)\n\n            \n            if command not in [\"prefix\", \"activate\", \"deactivate\", \"warnmax\", \"warn\", \"ban\"]:\n                return\n\n            is_admin = author == server.owner\n            for role in message.author.roles:\n                if role.permissions.administrator:\n                    is_admin = True\n\n            if not is_admin:\n                await client.send_typing(channel)\n                reason = \"You must have a role that has the permission 'Administrator'\"\n                embed = ui_embed.error(channel, \"Insufficient Permissions\", reason)\n                await embed.send()\n                return\n\n            if command == \"prefix\" and args:\n                new_prefix = arg.replace(\" \", \"\").strip()\n                data[\"discord\"][\"servers\"][server.id][\"prefix\"] = new_prefix\n                \n                datatools.write_data(data)\n\n                await client.send_typing(channel)\n                embed = ui_embed.modify_prefix(channel, new_prefix)\n                await embed.send()\n\n            if command == \"warnmax\" and args:\n                try:\n                    warn_max = int(arg)\n                    if warn_max > 0:\n                        data[\"discord\"][\"servers\"][server.id][_data.modulename][\"warnings_max\"] = warn_max\n                        datatools.write_data(data)\n                        await client.send_typing(channel)\n                        embed = ui_embed.warning_max_changed(channel, warn_max)\n                        await embed.send()\n                    else:\n                        reason = \"Maximum warnings must be greater than 0\"\n                        embed = ui_embed.error(channel, \"Error\", reason)\n                        await embed.send()\n                except (ValueError, TypeError):\n                    reason = \"Warning maximum must be a number\"\n                    embed = ui_embed.error(channel, \"Error\", reason)\n                    await embed.send()\n                except Exception as e:\n                    logger.exception(e)\n\n            if command == \"warn\" and args:\n                for user in message.mentions:\n                    await api_manager.warn_user(channel, user)\n\n            if command == \"ban\" and args:\n                for user in message.mentions:\n                    await api_manager.ban_user(channel, user)\n\n            if command == \"activate\" and args:\n                await api_manager.activate_module(channel, arg, True)\n            elif command == \"deactivate\" and args:\n                await api_manager.activate_module(channel, arg, False)", "docstring": "The on_message event handler for this module\n\nArgs:\nmessage (discord.Message): Input message", "source": "juraj-google-style"}
{"code": "def __init__(self, key, iv):\n    \n    self.key = key.RawBytes()\n    self.iv = iv.RawBytes()", "docstring": "Init.\n\nArgs:\nkey: The key, a rdf_crypto.EncryptionKey instance.\niv: The iv, a rdf_crypto.EncryptionKey instance.", "source": "juraj-google-style"}
{"code": "def list_datasets(self, get_global_public):\n    appending = ''\n    if get_global_public:\n        appending = 'public'\n    url = (self.url() + '/resource/{}dataset/'.format(appending))\n    req = self.remote_utils.get_url(url)\n    if (req.status_code is not 200):\n        raise RemoteDataNotFoundError('Could not find {}'.format(req.text))\n    else:\n        return req.json()", "docstring": "Lists datasets in resources. Setting 'get_global_public' to 'True'\nwill retrieve all public datasets in cloud. 'False' will get user's\npublic datasets.\n\nArguments:\nget_global_public (bool): True if user wants all public datasets in\ncloud. False if user wants only their\npublic datasets.\n\nReturns:\ndict: Returns datasets in JSON format", "source": "codesearchnet"}
{"code": "def create_chebyshev_samples(order, dim=1):\n    \n    x_data = .5*numpy.cos(numpy.arange(order, 0, -1)*numpy.pi/(order+1)) + .5\n    x_data = chaospy.quad.combine([x_data]*dim)\n    return x_data.T", "docstring": "Chebyshev sampling function.\n\nArgs:\norder (int):\nThe number of samples to create along each axis.\ndim (int):\nThe number of dimensions to create samples for.\n\nReturns:\nsamples following Chebyshev sampling scheme mapped to the\n``[0, 1]^dim`` hyper-cube and ``shape == (dim, order)``.", "source": "juraj-google-style"}
{"code": "def get_key_by_job_id(cls, mapreduce_id):\n    return db.Key.from_path(cls.kind(), ('%s:%s' % (mapreduce_id, cls._KEY_NAME)))", "docstring": "Retrieves the Key for a mapreduce ID.\n\nArgs:\nmapreduce_id: The job to fetch.\n\nReturns:\nDatastore Key for the command for the given job ID.", "source": "codesearchnet"}
{"code": "def verify_signature(public_key, signature, hash, hash_algo):\n    hash_algo = _hash_algorithms[hash_algo]\n    try:\n        return (get_publickey(public_key).verify(signature, hash, padding.PKCS1v15(), utils.Prehashed(hash_algo)) is None)\n    except InvalidSignature:\n        return False", "docstring": "Verify the given signature is correct for the given hash and public key.\n\nArgs:\npublic_key (str): PEM encoded public key\nsignature (bytes): signature to verify\nhash (bytes): hash of data\nhash_algo (str): hash algorithm used\n\nReturns:\nTrue if the signature is valid, False otherwise", "source": "codesearchnet"}
{"code": "def csv_to_numpy(string_like, dtype=None):\n    stream = StringIO(string_like)\n    return np.genfromtxt(stream, dtype=dtype, delimiter=',')", "docstring": "Convert a CSV object to a numpy array.\n\nArgs:\nstring_like (str): CSV string.\ndtype (dtype, optional):  Data type of the resulting array. If None, the dtypes will be determined by the\ncontents of each column, individually. This argument can only be used to\n'upcast' the array.  For downcasting, use the .astype(t) method.\nReturns:\n(np.array): numpy array", "source": "codesearchnet"}
{"code": "def get_converter(in_type, out_type, *args, **kwargs):\n    convs = pliers.converters.__all__\n    out_type = listify(out_type)[::(- 1)]\n    default_convs = config.get_option('default_converters')\n    for ot in out_type:\n        conv_str = ('%s->%s' % (in_type.__name__, ot.__name__))\n        if (conv_str in default_convs):\n            convs = (list(default_convs[conv_str]) + convs)\n    for name in convs:\n        cls = getattr(pliers.converters, name)\n        if (not issubclass(cls, Converter)):\n            continue\n        available = (cls.available if issubclass(cls, EnvironmentKeyMixin) else True)\n        if ((cls._input_type == in_type) and (cls._output_type in out_type) and available):\n            conv = cls(*args, **kwargs)\n            return conv\n    return None", "docstring": "Scans the list of available Converters and returns an instantiation\nof the first one whose input and output types match those passed in.\n\nArgs:\nin_type (type): The type of input the converter must have.\nout_type (type): The type of output the converter must have.\nargs, kwargs: Optional positional and keyword arguments to pass onto\nmatching Converter's initializer.", "source": "codesearchnet"}
{"code": "def convert_to_row_table(self, add_units=True):\n    rtable = []\n    if add_units:\n        relavent_units = self.get_relavent_units()\n    for row_index in range(self.start[0], self.end[0]):\n        for column_index in range(self.start[1], self.end[1]):\n            cell = self.table[row_index][column_index]\n            if ((cell != None) and isinstance(cell, (int, float, long))):\n                titles = self._find_titles(row_index, column_index)\n                titles.append(cell)\n                if add_units:\n                    titles.append(relavent_units.get((row_index, column_index)))\n                rtable.append(titles)\n    if (not rtable):\n        for row_index in range(self.start[0], self.end[0]):\n            row = []\n            rtable.append(row)\n            for column_index in range(self.start[1], self.end[1]):\n                row.append(self.table[row_index][column_index])\n            if add_units:\n                row.append(relavent_units.get((row_index, column_index)))\n    return rtable", "docstring": "Converts the block into row titled elements. These elements are copied into the return\ntable, which can be much longer than the  original block.\n\nArgs:\nadd_units: Indicates if units should be appened to each row item.\n\nReturns:\nA row-titled table representing the data in the block.", "source": "codesearchnet"}
{"code": "def lowres_tensor(shape, underlying_shape, offset=None, sd=None):\n    sd = (sd or 0.01)\n    init_val = (sd * np.random.randn(*underlying_shape).astype('float32'))\n    underlying_t = tf.Variable(init_val)\n    t = resize_bilinear_nd(underlying_t, shape)\n    if (offset is not None):\n        if (not isinstance(offset, list)):\n            offset = (len(shape) * [offset])\n        for n in range(len(offset)):\n            if (offset[n] is True):\n                offset[n] = ((shape[n] / underlying_shape[n]) / 2)\n            if (offset[n] is False):\n                offset[n] = 0\n            offset[n] = int(offset[n])\n        padding = [(pad, 0) for pad in offset]\n        t = tf.pad(t, padding, 'SYMMETRIC')\n        begin = (len(shape) * [0])\n        t = tf.slice(t, begin, shape)\n    return t", "docstring": "Produces a tensor paramaterized by a interpolated lower resolution tensor.\n\nThis is like what is done in a laplacian pyramid, but a bit more general. It\ncan be a powerful way to describe images.\n\nArgs:\nshape: desired shape of resulting tensor\nunderlying_shape: shape of the tensor being resized into final tensor\noffset: Describes how to offset the interpolated vector (like phase in a\nFourier transform). If None, apply no offset. If a scalar, apply the same\noffset to each dimension; if a list use each entry for each dimension.\nIf a int, offset by that much. If False, do not offset. If True, offset by\nhalf the ratio between shape and underlying shape (analagous to 90\ndegrees).\nsd: Standard deviation of initial tensor variable.\n\nReturns:\nA tensor paramaterized by a lower resolution tensorflow variable.", "source": "codesearchnet"}
{"code": "def _parse_control_fields(self, fields, tag_id='tag'):\n    for field in fields:\n        params = field.params\n        if (tag_id not in params):\n            continue\n        self.controlfields[params[tag_id]] = field.getContent().strip()", "docstring": "Parse control fields.\n\nArgs:\nfields (list): list of HTMLElements\ntag_id (str):  parameter name, which holds the information, about\nfield name this is normally \"tag\", but in case of\noai_marc \"id\".", "source": "codesearchnet"}
{"code": "def get_public_api(api_mapping_files: Sequence[str], file_prefixes_to_strip: Sequence[str], packages_to_ignore: Sequence[str], output_package: str, module_prefix: str) -> PublicAPI:\n    ea = exported_api.ExportedApi()\n    for f in api_mapping_files:\n        ea.read(f)\n    v1_entrypoints_by_module = collections.defaultdict(set)\n    v2_entrypoints_by_module = collections.defaultdict(set)\n\n    def add_exported_symbols(api_names: list[str], s: exported_api.ExportedSymbol, entrypoints_by_module: Mapping[str, set[_Entrypoint]]):\n        for api_name in api_names:\n            index_of_last_dot = api_name.rfind('.')\n            index_of_first_dot = api_name.find('.')\n            module = output_package\n            if index_of_first_dot + 1 < index_of_last_dot:\n                module += f'.{api_name[index_of_first_dot + 1:index_of_last_dot]}'\n            name = api_name[index_of_last_dot + 1:]\n            entrypoints_by_module[module].add(_Entrypoint(module, name, s))\n    for s in ea.symbols:\n        if _should_skip_file(s.file_name, file_prefixes_to_strip, packages_to_ignore, module_prefix):\n            continue\n        add_exported_symbols(s.v1_apis, s, v1_entrypoints_by_module)\n        add_exported_symbols(s.v2_apis, s, v2_entrypoints_by_module)\n    v1_generated_imports_by_module = collections.defaultdict(set)\n    v2_generated_imports_by_module = collections.defaultdict(set)\n\n    def add_generated_imports(entrypoints_by_module: Mapping[str, set[_Entrypoint]], generated_imports_by_module: Mapping[str, set[str]]):\n        for module in entrypoints_by_module:\n            i = module.rfind('.')\n            if i == -1:\n                continue\n            while i != -1:\n                parent = module[:i]\n                generated_imports_by_module[parent].add(module)\n                module = parent\n                i = module.rfind('.')\n    add_generated_imports(v1_entrypoints_by_module, v1_generated_imports_by_module)\n    add_generated_imports(v2_entrypoints_by_module, v2_generated_imports_by_module)\n    docs_by_module = {}\n    for d in ea.docs:\n        for m in d.modules:\n            if m in docs_by_module:\n                raise DocExportedTwiceError(f'Docstring at {d.file_name}:{d.line_no} is registered for {m}, which already has a registered docstring.')\n            docs_by_module[m] = d.docstring\n    return PublicAPI(v1_entrypoints_by_module=v1_entrypoints_by_module, v2_entrypoints_by_module=v2_entrypoints_by_module, v1_generated_imports_by_module=v1_generated_imports_by_module, v2_generated_imports_by_module=v2_generated_imports_by_module, docs_by_module=docs_by_module)", "docstring": "Generates the structure of the public API from the given files.\n\nArgs:\napi_mapping_files: List of files containing the exported API mappings and\ndocstrings.\nfile_prefixes_to_strip: A list of prefixes to strip from files when\ndetermining the packages to ignore.\npackages_to_ignore: A list of python packages that should be ignored when\nsearching for tf_exports.\noutput_package: The package to use for the imports.\nmodule_prefix: A prefix to add to the non-generated imports.\n\nRaises:\nDocExportedTwiceError: Two docstrings are registered for the same module.\n\nReturns:\nThe public API structure.", "source": "github-repos"}
{"code": "def valid_config_exists(config_path=CONFIG_PATH):\n    \n    if os.path.isfile(config_path):\n        try:\n            config = read_config(config_path)\n            check_config(config)\n        except (ConfigurationError, IOError):\n            return False\n    else:\n        return False\n    return True", "docstring": "Verify that a valid config file exists.\n\nArgs:\nconfig_path (str): Path to the config file.\n\nReturns:\nboolean: True if there is a valid config file, false if not.", "source": "juraj-google-style"}
{"code": "def default_pass_manager_simulator(basis_gates):\n    pass_manager = PassManager()\n    pass_manager.append(Unroller(basis_gates))\n    pass_manager.append([RemoveResetInZeroState(), Depth(), FixedPoint('depth')], do_while=(lambda property_set: (not property_set['depth_fixed_point'])))\n    return pass_manager", "docstring": "The default pass manager without a coupling map.\n\nArgs:\nbasis_gates (list[str]): list of basis gate names to unroll to.\n\nReturns:\nPassManager: A passmanager that just unrolls, without any optimization.", "source": "codesearchnet"}
{"code": "def decode(self, tx):\n    if (not isinstance(self._service, BitcoinBlockrService)):\n        raise NotImplementedError('Currently only supported for \"blockr.io\"')\n    return self._service.decode(tx)", "docstring": "Decodes the given transaction.\n\nArgs:\ntx: hex of transaction\nReturns:\ndecoded transaction\n\n.. note:: Only supported for blockr.io at the moment.", "source": "codesearchnet"}
{"code": "def are_same(self, path_1, path_2):\n    if (path_1 == path_2):\n        return True\n    repo_1 = self.get_repository(path_1)\n    repo_2 = self.get_repository(path_2)\n    return (repo_1.uid == repo_2.uid)", "docstring": "Test that `path_1` and `path_2` refer to the same repository.\n\nThis is more reliable than testing that the strings match, since slightly\ndifferent strings might refer to the same repository (consider small\ndifferences in a filesystem path for example, eg '//svr/foo', '/svr/foo').\n\nReturns:\nTrue if the paths refer to the same repository, False otherwise.", "source": "codesearchnet"}
{"code": "def potential_purviews(self, direction, mechanism):\n    all_purviews = utils.powerset(self._node_indices)\n    return irreducible_purviews(self.cm, direction, mechanism, all_purviews)", "docstring": "All purviews which are not clearly reducible for mechanism.\n\nArgs:\ndirection (Direction): |CAUSE| or |EFFECT|.\nmechanism (tuple[int]): The mechanism which all purviews are\nchecked for reducibility over.\n\nReturns:\nlist[tuple[int]]: All purviews which are irreducible over\n``mechanism``.", "source": "codesearchnet"}
{"code": "def _create_per_worker_resources(self, fn, args=None, kwargs=None):\n    results = []\n    for w in self._cluster.workers:\n        results.append(w.create_resource(fn, args=args, kwargs=kwargs))\n    return PerWorkerValues(tuple(results))", "docstring": "Synchronously create resources on the workers.\n\nThe resources are represented by\n`tf.distribute.experimental.coordinator.RemoteValue`s.\n\nArgs:\nfn: The function to be dispatched to all workers for execution\nasynchronously.\nargs: Positional arguments for `fn`.\nkwargs: Keyword arguments for `fn`.\n\nReturns:\nA `tf.distribute.experimental.coordinator.PerWorkerValues` object, which\nwraps a tuple of `tf.distribute.experimental.coordinator.RemoteValue`\nobjects.", "source": "github-repos"}
{"code": "def repository_compare(self, from_, to, **kwargs):\n        \n        path = '/projects/%s/repository/compare' % self.get_id()\n        query_data = {'from': from_, 'to': to}\n        return self.manager.gitlab.http_get(path, query_data=query_data,\n                                            **kwargs)", "docstring": "Return a diff between two branches/commits.\n\nArgs:\nfrom_(str): Source branch/SHA\nto(str): Destination branch/SHA\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the server failed to perform the request\n\nReturns:\nstr: The diff", "source": "juraj-google-style"}
{"code": "def create_column_token_type_ids_from_sequences(self, query_ids: List[int], table_values: List[TableValue]) -> List[int]:\n    table_column_ids = list(zip(*table_values))[1] if table_values else []\n    return [0] * (1 + len(query_ids) + 1) + list(table_column_ids)", "docstring": "Creates the column token type IDs according to the query token IDs and a list of table values.\n\nArgs:\nquery_ids (`List[int]`): list of token IDs corresponding to the ID.\ntable_values (`List[TableValue]`): lift of table values, which are named tuples containing the\ntoken value, the column ID and the row ID of said token.\n\nReturns:\n`List[int]`: List of ints containing the column token type IDs values.", "source": "github-repos"}
{"code": "def log_deprecated(name=\"\", text=\"\", eos=\"\"):\n    \n    assert name or text\n    if eos:\n        eos = \"after \" + datetime(*map(int, eos.split(\"-\"))).strftime(\"%d %b\")\n    if name:\n        if eos:\n            warn_msg = \"%s will be deprecated %s. %s\" % (name, eos, text)\n        else:\n            warn_msg = \"%s was deprecated. %s\" % (name, text)\n    else:\n        warn_msg = text\n        if eos:\n            warn_msg += \" Legacy period ends %s\" % eos\n    logger.warn(\"[Deprecated] \" + warn_msg)", "docstring": "Log deprecation warning.\n\nArgs:\nname (str): name of the deprecated item.\ntext (str, optional): information about the deprecation.\neos (str, optional): end of service date such as \"YYYY-MM-DD\".", "source": "juraj-google-style"}
{"code": "def compile_sgf(in_path, optimize=True, model=None):\n    \n\n    if model is None:\n        model = DeviceModel()\n\n    parser = SensorGraphFileParser()\n    parser.parse_file(in_path)\n    parser.compile(model)\n\n    if optimize:\n        opt = SensorGraphOptimizer()\n        opt.optimize(parser.sensor_graph, model=model)\n\n    return parser.sensor_graph", "docstring": "Compile and optionally optimize an SGF file.\n\nArgs:\nin_path (str): The input path to the sgf file to compile.\noptimize (bool): Whether to optimize the compiled result,\ndefaults to True if not passed.\nmodel (DeviceModel): Optional device model if we are\ncompiling for a nonstandard device.  Normally you should\nleave this blank.\n\nReturns:\nSensorGraph: The compiled sensorgraph object", "source": "juraj-google-style"}
{"code": "def cast_naive_datetime_to_tz(dt, tz=UTC()):\n    if has_tz(dt):\n        return dt\n    return dt.replace(tzinfo=tz)", "docstring": "If datetime is tz-naive, set it to ``tz``. If datetime is tz-aware, return it\nunmodified.\n\nArgs:\ndt : datetime\ntz-naive or tz-aware datetime.\n\ntz : datetime.tzinfo\nThe timezone to which to adjust tz-naive datetime.\n\nReturns:\ndatetime\ntz-aware datetime.\n\nWarning:\nThis will change the actual moment in time that is represented if the datetime is\nnaive and represents a date and time not in ``tz``.\n\nSee Also:\n``normalize_datetime_to_utc()``", "source": "codesearchnet"}
{"code": "def __init__(self, test=None, t_node=None, f_node=None):\n        \n        self.test = test\n        self.t_node = t_node\n        self.f_node = f_node", "docstring": "Construct a BoolTree object\n\nArgs:\ntest (bool): test for whether to traverse the true-node or the\nfalse-node (`BoolTree.t_node` or `BoolTree.f_node`)\nt_node (BoolTree/Int): node to follow if test is `True`\nf_node (BoolTree/Int): node to follow if test is `False`", "source": "juraj-google-style"}
{"code": "def _make_lcdproc(lcd_host, lcd_port, retry_config, charset=DEFAULT_LCDPROC_CHARSET, lcdd_debug=False):\n\n    class ServerSpawner(utils.AutoRetryCandidate):\n        'Spawn the server, using auto-retry.'\n\n        @utils.auto_retry\n        def connect(self):\n            return lcdrunner.LcdProcServer(lcd_host, lcd_port, charset=charset, debug=lcdd_debug)\n    spawner = ServerSpawner(retry_config=retry_config, logger=logger)\n    try:\n        return spawner.connect()\n    except socket.error as e:\n        logger.error('Unable to connect to lcdproc %s:%s : %r', lcd_host, lcd_port, e)\n        raise SystemExit(1)", "docstring": "Create and connect to the LCDd server.\n\nArgs:\nlcd_host (str): the hostname to connect to\nlcd_prot (int): the port to connect to\ncharset (str): the charset to use when sending messages to lcdproc\nlcdd_debug (bool): whether to enable full LCDd debug\nretry_attempts (int): the number of connection attempts\nretry_wait (int): the time to wait between connection attempts\nretry_backoff (int): the backoff for increasing inter-attempt delay\n\nReturns:\nlcdproc.server.Server", "source": "codesearchnet"}
{"code": "def get_group(self, name, user_name=None):\n        \n        self.project_service.set_auth(self._token_project)\n        return self.project_service.get_group(name, user_name)", "docstring": "Get information on the given group or whether or not a user is a member\nof the group.\n\nArgs:\nname (string): Name of group to query.\nuser_name (optional[string]): Supply None if not interested in\ndetermining if user is a member of the given group.\n\nReturns:\n(mixed): Dictionary if getting group information or bool if a user\nname is supplied.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def deregister(cls, name: str) -> None:\n    if (name not in cls.available):\n        raise ConnectionPluginNotRegistered(f'Connection {name!r} is not registered')\n    cls.available.pop(name)", "docstring": "Deregisters a registered connection plugin by its name\n\nArgs:\nname: name of the connection plugin to deregister\n\nRaises:\n:obj:`nornir.core.exceptions.ConnectionPluginNotRegistered`", "source": "codesearchnet"}
{"code": "def encode_configuration(self, did, eid, parameters):\n    parameters = [{'parameterId': k, 'parameterValue': v} for (k, v) in parameters.items()]\n    payload = {'parameters': parameters}\n    req_headers = {'Accept': 'application/vnd.onshape.v1+json', 'Content-Type': 'application/json'}\n    res = self._api.request('post', (((('/api/elements/d/' + did) + '/e/') + eid) + '/configurationencodings'), body=payload, headers=req_headers)\n    return json.loads(res.content.decode('utf-8'))['encodedId']", "docstring": "Encode parameters as a URL-ready string\n\nArgs:\n- did (str): Document ID\n- eid (str): Element ID\n- parameters (dict): key-value pairs of the parameters to be encoded\nReturns:\n- configuration (str): the url-ready configuration string.", "source": "codesearchnet"}
{"code": "def print_format_output(dataframe):\n    print_df = pd.DataFrame()\n    dropped_cols = []\n    empty_cols = []\n    for (i, col) in enumerate(dataframe):\n        if dataframe[col].isnull().all():\n            empty_cols += [col]\n            continue\n        print_df[col] = dataframe[col]\n        test_table = tabulate(print_df, headers='keys', tablefmt='psql')\n        if (str(test_table).index('\\n') > TERM_WIDTH):\n            print_df.drop(col, axis=1, inplace=True)\n            dropped_cols += list(dataframe.columns)[i:]\n            break\n    table = tabulate(print_df, headers='keys', tablefmt='psql', showindex='never')\n    print(table)\n    if dropped_cols:\n        print('Dropped columns:', dropped_cols)\n        print('Please increase your terminal size to view remaining columns.')\n    if empty_cols:\n        print('Empty columns:', empty_cols)\n    return (table, dropped_cols, empty_cols)", "docstring": "Prints output of given dataframe to fit into terminal.\n\nReturns:\ntable (pd.DataFrame): Final outputted dataframe.\ndropped_cols (list): Columns dropped due to terminal size.\nempty_cols (list): Empty columns (dropped on default).", "source": "codesearchnet"}
{"code": "def from_version(cls, version, op=None):\n    lower = None\n    upper = None\n    if (op is None):\n        lower = _LowerBound(version, True)\n        upper = _UpperBound(version.next(), False)\n    elif (op in ('eq', '==')):\n        lower = _LowerBound(version, True)\n        upper = _UpperBound(version, True)\n    elif (op in ('gt', '>')):\n        lower = _LowerBound(version, False)\n    elif (op in ('gte', '>=')):\n        lower = _LowerBound(version, True)\n    elif (op in ('lt', '<')):\n        upper = _UpperBound(version, False)\n    elif (op in ('lte', '<=')):\n        upper = _UpperBound(version, True)\n    else:\n        raise VersionError((\"Unknown bound operation '%s'\" % op))\n    bound = _Bound(lower, upper)\n    range = cls(None)\n    range.bounds = [bound]\n    return range", "docstring": "Create a range from a version.\n\nArgs:\nversion: Version object. This is used as the upper/lower bound of\nthe range.\nop: Operation as a string. One of 'gt'/'>', 'gte'/'>=', lt'/'<',\n'lte'/'<=', 'eq'/'=='. If None, a bounded range will be created\nthat contains the version superset.\n\nReturns:\n`VersionRange` object.", "source": "codesearchnet"}
{"code": "def process(self, feed_item):\n    if not feed_item.get(FieldMap.CAMPAIGN_CREATIVE_ASSOCIATION_ID, None):\n        campaign = self.campaign_dao.get(feed_item, required=True)\n        creative = self.creative_dao.get(feed_item, required=True)\n        if campaign and creative:\n            if campaign:\n                feed_item[FieldMap.CAMPAIGN_ID] = campaign['id']\n                feed_item[FieldMap.CAMPAIGN_NAME] = campaign['name']\n            association = {'creativeId': str(creative['id'])}\n            result = self._api().insert(profileId=self.profile_id, campaignId=str(campaign['id']), body=association).execute()\n            feed_item[FieldMap.CAMPAIGN_CREATIVE_ASSOCIATION_ID] = '%s|%s' % (campaign['id'], creative['id'])\n            return result\n    return None", "docstring": "Processes a feed item by creating the creative association in DCM.\n\nArgs:\nfeed_item: Feed item representing the creative association from the\nBulkdozer feed.\n\nReturns:\nThe newly created object from DCM.", "source": "github-repos"}
{"code": "def cancel(self, force=False):\n    return self.rest_client._sc._delegator._cancel_job(self, force)", "docstring": "Cancel this job.\n\nArgs:\nforce (bool, optional): Forcefully cancel this job.\n\nReturns:\nbool: True if the job was cancelled, otherwise False if an error occurred.", "source": "codesearchnet"}
{"code": "def copy(reader, writer, start, stop, insertLocation=None, tsCol=None):\n    assert (stop >= start)\n    startRows = []\n    copyRows = []\n    ts = None\n    inc = None\n    if (tsCol is None):\n        tsCol = reader.getTimestampFieldIdx()\n    for (i, row) in enumerate(reader):\n        if (ts is None):\n            ts = row[tsCol]\n        elif (inc is None):\n            inc = (row[tsCol] - ts)\n        if ((i >= start) and (i <= stop)):\n            copyRows.append(row)\n        startRows.append(row)\n    if (insertLocation is None):\n        insertLocation = (stop + 1)\n    startRows[insertLocation:insertLocation] = copyRows\n    for row in startRows:\n        row[tsCol] = ts\n        writer.appendRecord(row)\n        ts += inc", "docstring": "Copies a range of values to a new location in the data set.\n\nArgs:\nreader: A FileRecordStream object with input data.\nwriter: A FileRecordStream object to write output data to.\nstart: The first row in the range to copy.\nstop: The last row in the range to copy.\ninsertLocation: The location to insert the copied range. If not specified,\nthe range is inserted immediately following itself.", "source": "codesearchnet"}
{"code": "def render_trees(trees, path_composer):\n    \n    trees = list(trees)  \n\n    def create_pub_cache(trees):\n        \n        sub_pubs_uuids = sum((x.collect_publications() for x in trees), [])\n\n        uuid_mapping = {\n            uuid: search_pubs_by_uuid(uuid)\n            for uuid in set(sub_pubs_uuids)\n        }\n\n        \n        return {\n            uuid: pub[0]\n            for uuid, pub in uuid_mapping.iteritems()\n            if pub\n        }\n\n    \n    pub_cache = create_pub_cache(trees)\n\n    def render_tree(tree, ind=1):\n        \n        if not tree.is_public:\n            return \"\"\n\n        rendered_tree = SimpleTemplate(TREE_TEMPLATE).render(\n            tree=tree,\n            render_tree=render_tree,\n            ind=ind,\n            path_composer=path_composer,\n            pub_cache=pub_cache,\n        )\n\n        \n        ind_txt = ind * \"  \"\n        return ind_txt + (\"\\n\" + ind_txt).join(rendered_tree.splitlines())\n\n    \n    parent = tree_handler().get_parent(trees[0])\n    link_up = path_composer(parent) if parent else None\n\n    return SimpleTemplate(TREES_TEMPLATE).render(\n        trees=trees,\n        render_tree=render_tree,\n        link_up=link_up,\n    )", "docstring": "Render list of `trees` to HTML.\n\nArgs:\ntrees (list): List of :class:`.Tree`.\npath_composer (fn reference): Function used to compose paths from UUID.\nLook at :func:`.compose_tree_path` from :mod:`.web_tools`.\n\nReturns:\nstr: HTML representation of trees.", "source": "juraj-google-style"}
{"code": "def _ParseRecord(self, parser_mediator, text_file_object):\n    try:\n        title = text_file_object.readline()\n    except UnicodeDecodeError:\n        parser_mediator.ProduceExtractionWarning('unable to read and decode title')\n        return False\n    if (not title):\n        return False\n    try:\n        url = text_file_object.readline()\n    except UnicodeDecodeError:\n        parser_mediator.ProduceExtractionWarning('unable to read and decode url')\n        return False\n    try:\n        timestamp = text_file_object.readline()\n    except UnicodeDecodeError:\n        parser_mediator.ProduceExtractionWarning('unable to read and decode timestamp')\n        return False\n    try:\n        popularity_index = text_file_object.readline()\n    except UnicodeDecodeError:\n        parser_mediator.ProduceExtractionWarning('unable to read and decode popularity index')\n        return False\n    event_data = OperaGlobalHistoryEventData()\n    event_data.url = url.strip()\n    title = title.strip()\n    if (title != event_data.url):\n        event_data.title = title\n    popularity_index = popularity_index.strip()\n    try:\n        event_data.popularity_index = int(popularity_index, 10)\n    except ValueError:\n        parser_mediator.ProduceExtractionWarning('unable to convert popularity index: {0:s}'.format(popularity_index))\n    if (event_data.popularity_index < 0):\n        event_data.description = 'First and Only Visit'\n    else:\n        event_data.description = 'Last Visit'\n    timestamp = timestamp.strip()\n    try:\n        timestamp = int(timestamp, 10)\n    except ValueError:\n        parser_mediator.ProduceExtractionWarning('unable to convert timestamp: {0:s}'.format(timestamp))\n        timestamp = None\n    if (timestamp is None):\n        date_time = dfdatetime_semantic_time.SemanticTime('Invalid')\n    else:\n        date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n    return True", "docstring": "Parses an Opera global history record.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ntext_file_object (dfvfs.TextFile): text file.\n\nReturns:\nbool: True if the record was successfully parsed.", "source": "codesearchnet"}
{"code": "def add_fileobj(self, fileobj, path, compress, flags=None):\n    f = file_iter(fileobj)\n    flags = (flags or (os.stat(path) & 511))\n    return self.add_stream(f, path, compress, flags)", "docstring": "Add the contents of a file object to the MAR file.\n\nArgs:\nfileobj (file-like object): open file object\npath (str): name of this file in the MAR file\ncompress (str): One of 'xz', 'bz2', or None. Defaults to None.\nflags (int): permission of this file in the MAR file. Defaults to the permissions of `path`", "source": "codesearchnet"}
{"code": "def _get_ops_in_metagraph(meta_graph_def):\n    return set(meta_graph_lib.ops_used_by_graph_def(meta_graph_def.graph_def))", "docstring": "Returns a set of the ops in the MetaGraph.\n\nReturns the set of all the ops used in the MetaGraphDef indicated by the\ntag_set stored in SavedModel directory.\n\nArgs:\nmeta_graph_def: MetaGraphDef to list the ops of.\n\nReturns:\nA set of ops.", "source": "github-repos"}
{"code": "def create(self, name, command_to_run, description='', environment_variables=None, required_arguments=None, required_arguments_default_values=None, extra_data_to_post=None):\n    if (environment_variables is None):\n        environment_variables = []\n    if (required_arguments is None):\n        required_arguments = []\n    if (required_arguments_default_values is None):\n        required_arguments_default_values = {}\n    request_url = (self._client.base_api_url + self.list_url)\n    data_to_post = {'name': name, 'description': description, 'command_to_run': command_to_run, 'environment_variables': json.dumps(environment_variables), 'required_arguments': json.dumps(required_arguments), 'required_arguments_default_values': json.dumps(required_arguments_default_values)}\n    if (extra_data_to_post is not None):\n        data_to_post.update(extra_data_to_post)\n    response = self._client.session.post(request_url, data=data_to_post)\n    self.validate_request_success(response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_201_CREATED)\n    return self.response_data_to_model_instance(response.json())", "docstring": "Create a task type.\n\nArgs:\nname (str): The name of the task.\ncommand_to_run (str): The command to run to execute the task.\ndescription (str, optional): The description of the task type.\nenvironment_variables (list, optional): The environment\nvariables required on the host to execute the task.\nrequired_arguments (list, optional): The argument names for\nthe task type.\nrequired_arguments_default_values (dict, optional): Default\nvalues for the tasks required arguments.\nextra_data_to_post (dict, optional): Extra key-value pairs\nto add to the request data. This is useful for\nsubclasses which require extra parameters.\n\nReturns:\n:class:`saltant.models.base_task_instance.BaseTaskType`:\nA task type model instance representing the task type\njust created.", "source": "codesearchnet"}
{"code": "def add_phenotype(self, institute, case, user, link, hpo_term=None, omim_term=None, is_group=False):\n    hpo_results = []\n    try:\n        if hpo_term:\n            hpo_results = [hpo_term]\n        elif omim_term:\n            LOG.debug('Fetching info for mim term {0}'.format(omim_term))\n            disease_obj = self.disease_term(omim_term)\n            if disease_obj:\n                for hpo_term in disease_obj.get('hpo_terms', []):\n                    hpo_results.append(hpo_term)\n        else:\n            raise ValueError('Must supply either hpo or omim term')\n    except ValueError as e:\n        raise e\n    existing_terms = set((term['phenotype_id'] for term in case.get('phenotype_terms', [])))\n    updated_case = case\n    phenotype_terms = []\n    for hpo_term in hpo_results:\n        LOG.debug('Fetching info for hpo term {0}'.format(hpo_term))\n        hpo_obj = self.hpo_term(hpo_term)\n        if (hpo_obj is None):\n            raise ValueError(('Hpo term: %s does not exist in database' % hpo_term))\n        phenotype_id = hpo_obj['_id']\n        description = hpo_obj['description']\n        if (phenotype_id not in existing_terms):\n            phenotype_term = dict(phenotype_id=phenotype_id, feature=description)\n            phenotype_terms.append(phenotype_term)\n            LOG.info('Creating event for adding phenotype term for case {0}'.format(case['display_name']))\n            self.create_event(institute=institute, case=case, user=user, link=link, category='case', verb='add_phenotype', subject=case['display_name'], content=phenotype_id)\n        if is_group:\n            updated_case = self.case_collection.find_one_and_update({'_id': case['_id']}, {'$addToSet': {'phenotype_terms': {'$each': phenotype_terms}, 'phenotype_groups': {'$each': phenotype_terms}}}, return_document=pymongo.ReturnDocument.AFTER)\n        else:\n            updated_case = self.case_collection.find_one_and_update({'_id': case['_id']}, {'$addToSet': {'phenotype_terms': {'$each': phenotype_terms}}}, return_document=pymongo.ReturnDocument.AFTER)\n    LOG.debug('Case updated')\n    return updated_case", "docstring": "Add a new phenotype term to a case\n\nCreate a phenotype term and event with the given information\n\nArgs:\ninstitute (Institute): A Institute object\ncase (Case): Case object\nuser (User): A User object\nlink (str): The url to be used in the event\nhpo_term (str): A hpo id\nomim_term (str): A omim id\nis_group (bool): is phenotype term a group?", "source": "codesearchnet"}
{"code": "def EnumValueName(self, enum, value):\n    return self.enum_types_by_name[enum].values_by_number[value].name", "docstring": "Returns the string name of an enum value.\n\nThis is just a small helper method to simplify a common operation.\n\nArgs:\nenum: string name of the Enum.\nvalue: int, value of the enum.\n\nReturns:\nstring name of the enum value.\n\nRaises:\nKeyError if either the Enum doesn't exist or the value is not a valid\nvalue for the enum.", "source": "codesearchnet"}
{"code": "def handle(self, connection_id, message_content):\n    try:\n        request = self._request_proto()\n        request.ParseFromString(message_content)\n    except DecodeError:\n        LOGGER.info('Protobuf %s failed to deserialize', request)\n        return self._wrap_result(self._status.INTERNAL_ERROR)\n    try:\n        response = self._respond(request)\n    except _ResponseFailed as e:\n        response = e.status\n    return self._wrap_result(response)", "docstring": "Handles parsing incoming requests, and wrapping the final response.\n\nArgs:\nconnection_id (str): ZMQ identity sent over ZMQ socket\nmessage_content (bytes): Byte encoded request protobuf to be parsed\n\nReturns:\nHandlerResult: result to be sent in response back to client", "source": "codesearchnet"}
{"code": "def solve(self, print_solution=False):\n    self._cp_solver = cp_model.CpSolver()\n    status = self._cp_solver.Solve(self._model)\n    if (status != cp_model.OPTIMAL):\n        if (status == cp_model.FEASIBLE):\n            logging.warning('A potentially suboptimal solution was found.')\n        else:\n            logging.error('Solver returned status %d.', status)\n            raise SolverError('The solver could not solve the problem and returned status {}.'.format(status))\n    if print_solution:\n        print_cp_model_solution.print_solution(self._model, self._cp_solver)\n    layout = []\n    for mtf_dimension_name in self._layout_validator.splittable_mtf_dimension_names:\n        for mesh_dimension_name in self._layout_validator.mesh_dimension_name_to_size:\n            value = self._cp_solver.Value(self._global_vars[(mtf_dimension_name, mesh_dimension_name)])\n            if value:\n                layout.append(((mtf_dimension_name + ':') + mesh_dimension_name))\n    layout.sort()\n    return ';'.join(layout)", "docstring": "Solves the current integer program and returns the computed layout.\n\nArgs:\nprint_solution: An optional boolean indicating whether to print the full\nsolution in human-readable format.\n\nReturns:\nThe computed layout (as a string).\n\nRaises:\nSolverError: the internal solver could not find a solution, or the\nsolution found is infeasible.", "source": "codesearchnet"}
{"code": "def _get_energy(self, x):\n        \n        return self.pd.get_hull_energy(self.comp1 * x + self.comp2 * (1-x)) - \\\n            self.e1 * x - self.e2 * (1-x)", "docstring": "Computes reaction energy in eV/atom at mixing ratio x : (1-x) for\nself.comp1 : self.comp2.\n\nArgs:\nx (float): Mixing ratio x of reactants, a float between 0 and 1.\n\nReturns:\nReaction energy.", "source": "juraj-google-style"}
{"code": "def estimate_size(self) -> Optional[int]:\n    raise NotImplementedError", "docstring": "Estimates the size of source in bytes.\n\nAn estimate of the total size (in bytes) of the data that would be read\nfrom this source. This estimate is in terms of external storage size,\nbefore performing decompression or other processing.\n\nReturns:\nestimated size of the source if the size can be determined, ``None``\notherwise.", "source": "github-repos"}
{"code": "def GetEventTagByIdentifier(self, identifier):\n    \n    event_tag = self._GetAttributeContainerByIndex(\n        self._CONTAINER_TYPE_EVENT_TAG, identifier.row_identifier - 1)\n    if event_tag:\n      event_identifier = identifiers.SQLTableIdentifier(\n          self._CONTAINER_TYPE_EVENT, event_tag.event_row_identifier)\n      event_tag.SetEventIdentifier(event_identifier)\n\n      del event_tag.event_row_identifier\n\n    return event_tag", "docstring": "Retrieves a specific event tag.\n\nArgs:\nidentifier (SQLTableIdentifier): event tag identifier.\n\nReturns:\nEventTag: event tag or None if not available.", "source": "juraj-google-style"}
{"code": "def setMood(self, mood):\n        \n        self.conn(\"POST\", \"{0}/users/{1}/profile/partial\".format(SkypeConnection.API_USER, self.userId),\n                  auth=SkypeConnection.Auth.SkypeToken, json={\"payload\": {\"mood\": mood or \"\"}})\n        self.user.mood = SkypeUser.Mood(plain=mood) if mood else None", "docstring": "Update the activity message for the current user.\n\nArgs:\nmood (str): new mood message", "source": "juraj-google-style"}
{"code": "def get_legacy_output_types(dataset_or_iterator):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), get_structure(dataset_or_iterator))", "docstring": "Returns the output shapes for elements of the input dataset / iterator.\n\nArgs:\ndataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.\n\nReturns:\nA (nested) structure of `tf.DType` objects matching the structure of\ndataset / iterator elements and specifying the shape of the individual\ncomponents.\n\n@compatibility(TF2)\nThis is a legacy API for inspecting the type signature of dataset elements. In\nTF 2, you should use the `tf.data.Dataset.element_spec` attribute instead.\n@end_compatibility", "source": "github-repos"}
{"code": "def get_num_of_video_patches(self, num_frames: int, height: int, width: int, videos_kwargs=None):\n    min_pixels = videos_kwargs.get('min_pixels', None) or self.size['shortest_edge']\n    max_pixels = videos_kwargs.get('max_pixels', None) or self.size['longest_edge']\n    patch_size = videos_kwargs.get('patch_size', None) or self.patch_size\n    merge_size = videos_kwargs.get('merge_size', None) or self.merge_size\n    temporal_patch_size = videos_kwargs.get('temporal_patch_size', None) or self.temporal_patch_size\n    factor = patch_size * merge_size\n    resized_height, resized_width = smart_resize(height, width, factor, min_pixels=min_pixels, max_pixels=max_pixels)\n    grid_h, grid_w = (resized_height \n    grid_t = num_frames \n    return grid_t * grid_h * grid_w", "docstring": "A utility that returns number of video patches a given video size.\n\nArgs:\nnum_frames (`int`):\nNumber of frames in the input video.\nheight (`int`):\nHeight of the input video.\nwidth (`int`):\nWidth of the input video.\nvideos_kwargs (`dict`, *optional*)\nAny kwargs to override defaults of the video processor.\nReturns:\n`Tuple(int, int)`: Number of placeholder tokens required and number of patches per image.", "source": "github-repos"}
{"code": "def __init__(self, values, row_splits):\n    if not (isinstance(row_splits, (np.ndarray, np.generic)) and row_splits.dtype in (np.int64, np.int32) and (row_splits.ndim == 1)):\n        raise TypeError('row_splits must be a 1D int32 or int64 numpy array')\n    if not isinstance(values, (np.ndarray, np.generic, RaggedTensorValue)):\n        raise TypeError('values must be a numpy array or a RaggedTensorValue')\n    if isinstance(values, RaggedTensorValue) and row_splits.dtype != values.row_splits.dtype:\n        raise ValueError('row_splits and values.row_splits must have the same dtype')\n    self._values = values\n    self._row_splits = row_splits", "docstring": "Creates a `RaggedTensorValue`.\n\nArgs:\nvalues: A numpy array of any type and shape; or a RaggedTensorValue.\nrow_splits: A 1-D int32 or int64 numpy array.", "source": "github-repos"}
{"code": "def _resolve_non_literal_route(self, method, path):\n    for route_dict in (self._wildcard, self._regex):\n        if (method in route_dict):\n            for route in reversed(route_dict[method]):\n                callback_data = route.match(path)\n                if (callback_data is not None):\n                    return callback_data\n    return None", "docstring": "Resolve a request to a wildcard or regex route handler.\n\nArguments:\nmethod (str): HTTP method name, e.g. GET, POST, etc.\npath (str): Request path\n\nReturns:\ntuple or None: A tuple of three items:\n\n1. Route handler (callable)\n2. Positional arguments (list)\n3. Keyword arguments (dict)\n\n``None`` if no route matches the request.", "source": "codesearchnet"}
{"code": "def heightmap_get_normal(hm: np.ndarray, x: float, y: float, waterLevel: float) -> Tuple[(float, float, float)]:\n    cn = ffi.new('float[3]')\n    lib.TCOD_heightmap_get_normal(_heightmap_cdata(hm), x, y, cn, waterLevel)\n    return tuple(cn)", "docstring": "Return the map normal at given coordinates.\n\nArgs:\nhm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.\nx (float): The x coordinate.\ny (float): The y coordinate.\nwaterLevel (float): The heightmap is considered flat below this value.\n\nReturns:\nTuple[float, float, float]: An (x, y, z) vector normal.", "source": "codesearchnet"}
{"code": "def create_normal_matrix(self, modelview):\n    normal_m = Matrix33.from_matrix44(modelview)\n    normal_m = normal_m.inverse\n    normal_m = normal_m.transpose()\n    return normal_m", "docstring": "Creates a normal matrix from modelview matrix\n\nArgs:\nmodelview: The modelview matrix\n\nReturns:\nA 3x3 Normal matrix as a :py:class:`numpy.array`", "source": "codesearchnet"}
{"code": "def setup(self, artifacts, use_tsk, reason, grr_server_url, grr_username, grr_password, approvers=None, verify=True):\n    super(GRRHuntArtifactCollector, self).setup(reason, grr_server_url, grr_username, grr_password, approvers=approvers, verify=verify)\n    self.artifacts = [item.strip() for item in artifacts.strip().split(',')]\n    if (not artifacts):\n        self.state.add_error('No artifacts were specified.', critical=True)\n    self.use_tsk = use_tsk", "docstring": "Initializes a GRR Hunt artifact collector.\n\nArgs:\nartifacts: str, comma-separated list of GRR-defined artifacts.\nuse_tsk: toggle for use_tsk flag.\nreason: justification for GRR access.\ngrr_server_url: GRR server URL.\ngrr_username: GRR username.\ngrr_password: GRR password.\napprovers: str, comma-separated list of GRR approval recipients.\nverify: boolean, whether to verify the GRR server's x509 certificate.", "source": "codesearchnet"}
{"code": "def delete_snl(self, snl_ids):\n    try:\n        payload = {'ids': json.dumps(snl_ids)}\n        response = self.session.post('{}/snl/delete'.format(self.preamble), data=payload)\n        if (response.status_code in [200, 400]):\n            resp = json.loads(response.text, cls=MontyDecoder)\n            if resp['valid_response']:\n                if resp.get('warning'):\n                    warnings.warn(resp['warning'])\n                return resp\n            else:\n                raise MPRestError(resp['error'])\n        raise MPRestError('REST error with status code {} and error {}'.format(response.status_code, response.text))\n    except Exception as ex:\n        raise MPRestError(str(ex))", "docstring": "Delete earlier submitted SNLs.\n\n.. note::\n\nAs of now, this MP REST feature is open only to a select group of\nusers. Opening up submissions to all users is being planned for\nthe future.\n\nArgs:\nsnl_ids: List of SNL ids.\n\nRaises:\nMPRestError", "source": "codesearchnet"}
{"code": "def _ensure_servable(input_tensors, names_to_output_tensor_infos):\n    plain_input_tensors = nest.flatten(input_tensors, expand_composites=True)\n    graph = op_selector.get_unique_graph(plain_input_tensors)\n    output_tensors = [utils.get_tensor_from_tensor_info(tensor, graph=graph) for tensor in names_to_output_tensor_infos.values()]\n    plain_output_tensors = nest.flatten(output_tensors, expand_composites=True)\n    dependency_ops = op_selector.get_backward_walk_ops(plain_output_tensors, stop_at_ts=plain_input_tensors)\n    fed_tensors = object_identity.ObjectIdentitySet(plain_input_tensors)\n    for dependency_op in dependency_ops:\n        if _must_be_fed(dependency_op) and (not all((output in fed_tensors for output in dependency_op.outputs))):\n            input_tensor_names = [tensor.name for tensor in plain_input_tensors]\n            output_tensor_keys = list(names_to_output_tensor_infos.keys())\n            output_tensor_names = [tensor.name for tensor in plain_output_tensors]\n            dependency_path = op_selector.show_path(dependency_op, plain_output_tensors, plain_input_tensors)\n            raise ValueError(f\"The signature's input tensors {input_tensor_names} are insufficient to compute its output keys {output_tensor_keys} (respectively, tensors {output_tensor_names}) because of the dependency on `{dependency_op.name}` which is not given as a signature input, as illustrated by the following dependency path: {dependency_path}\")", "docstring": "Check that the signature outputs don't depend on unreachable placeholders.\n\nArgs:\ninput_tensors: An iterable of `Tensor`s specified as the signature's inputs.\nnames_to_output_tensor_infos: An mapping from output names to respective\n`TensorInfo`s corresponding to the signature's output tensors.\n\nRaises:\nValueError: If any of the signature's outputs depend on placeholders not\nprovided as signature's inputs.", "source": "github-repos"}
{"code": "def __discover_node(self, node, depth):\n        \n        if (node == None):\n            return\n\n        if (depth >= self.max_depth):\n            return\n\n        if (node.discovered > 0):\n            return\n        node.discovered = 1\n\n        \n        \n        \n        if (node.ip[0] == '0.0.0.0'):\n            return\n\n        \n        if (node.snmpobj.success == 0):\n            return\n\n        \n        dcodes = DCODE_STEP_INTO\n        if (depth == 0):\n            dcodes |= DCODE_ROOT\n        self.__print_step(node.ip[0], node.name, depth, dcodes)\n\n        \n        snmpobj = node.snmpobj\n\n        \n        valid_neighbors = []\n\n        \n        cdp_neighbors  = node.get_cdp_neighbors()\n        lldp_neighbors = node.get_lldp_neighbors()\n        neighbors      = cdp_neighbors + lldp_neighbors\n        if (len(neighbors) == 0):\n            return\n\n        for n in neighbors:\n            \n            if (n.remote_ip == None):\n                n.remote_ip = '0.0.0.0'\n\n            \n            acl_action = self.__match_node_acl(n.remote_ip, n.remote_name)\n            if (acl_action == 'deny'):\n                \n                continue\n            \n            dcodes = DCODE_DISCOVERED\n            child = None\n            if (acl_action == 'include'):\n                \n                child    = natlas_node()\n                child.ip = [n.remote_ip]\n                dcodes  |= DCODE_INCLUDE\n            else:\n                \n                child, query_result = self.__query_node(n.remote_ip, n.remote_name)\n\n            \n            if (child.snmpobj.success == 0):\n                child.name = util.shorten_host_name(n.remote_name, self.config.host_domains)\n                dcodes  |= DCODE_ERR_SNMP\n            \n            \n            acl_action = self.__match_node_acl(n.remote_ip, n.remote_name, n.remote_plat, n.remote_ios, child.serial)\n            if (acl_action == 'deny'):\n                continue\n\n            if (query_result == NODE_NEW):\n                self.nodes.append(child)\n                if (acl_action == 'leaf'):          dcodes |= DCODE_LEAF\n                if (n.discovered_proto == 'cdp'):   dcodes |= DCODE_CDP\n                if (n.discovered_proto == 'lldp'):  dcodes |= DCODE_LLDP\n                self.__print_step(n.remote_ip, n.remote_name, depth+1, dcodes)\n\n            \n            child.plat = n.remote_plat\n            child.ios  = n.remote_ios\n            \n            \n            n.node = child\n            self.__add_link(node, n)\n\n            \n            if ((query_result == NODE_NEW) & (acl_action != 'leaf') & (acl_action != 'include')):\n                valid_neighbors.append(child)\n\n        \n        for n in valid_neighbors:\n            self.__discover_node(n, depth+1)", "docstring": "Given a node, recursively enumerate its adjacencies\nuntil we reach the specified depth (>0).\n\nArgs:\nnode:   natlas_node object to enumerate.\ndepth:  The depth left that we can go further away from the root.", "source": "juraj-google-style"}
{"code": "def listen(self, message_consumer):\n        \n        while not self._rfile.closed:\n            request_str = self._read_message()\n\n            if request_str is None:\n                break\n\n            try:\n                message_consumer(json.loads(request_str.decode('utf-8')))\n            except ValueError:\n                log.exception(\"Failed to parse JSON message %s\", request_str)\n                continue", "docstring": "Blocking call to listen for messages on the rfile.\n\nArgs:\nmessage_consumer (fn): function that is passed each message as it is read off the socket.", "source": "juraj-google-style"}
{"code": "def db_update_record(self, table_name, column, value):\n        \n        sql = 'UPDATE {} SET {} = \\'{}\\''.format(table_name, column, value)\n        cur = self.db_conn.cursor()\n        cur.execute(sql)", "docstring": "Insert records into DB.\n\nArgs:\ntable_name (str): The name of the table.\ncolumn (str): The column name in which the value is to be updated.\nvalue (str): The value to update in the column.", "source": "juraj-google-style"}
{"code": "def _build_shuffle_scatter(reduced_shards, dst_devices):\n    num_devices = len(dst_devices)\n    out_tensors = []\n    for d in range(0, num_devices):\n        with ops.device(dst_devices[d]):\n            out_tensors.append(array_ops.concat(reduced_shards, 0))\n    return out_tensors", "docstring": "Build the scatter phase of shuffle all-reduce.\n\nArgs:\nreduced_shards:  list of `tf.Tensor` fully reduced shards\ndst_devices: list of names of devices at which the fully-reduced value\nshould be reconstituted.\n\nReturns:\nlist of `tf.Tensor` scattered tensors.", "source": "github-repos"}
{"code": "def parse_instrumentation_options(self, parameters=None):\n    if parameters is None:\n        return {}\n    filtered_parameters = {}\n    for parameter_key, parameter_value in parameters.items():\n        if parameter_key.startswith(self.DEFAULT_INSTRUMENTATION_OPTION_PREFIX):\n            option_key = parameter_key[len(self.DEFAULT_INSTRUMENTATION_OPTION_PREFIX):]\n            filtered_parameters[option_key] = parameter_value\n    return filtered_parameters", "docstring": "Returns the options for the instrumentation test from user_params.\n\nBy default, this method assume that the correct instrumentation options\nall start with DEFAULT_INSTRUMENTATION_OPTION_PREFIX.\n\nArgs:\nparameters: dict, the key value pairs representing an assortment\nof parameters including instrumentation options. Usually,\nthis argument will be from self.user_params.\n\nReturns:\nA dictionary of options/parameters for the instrumentation tst.", "source": "github-repos"}
{"code": "def _get_input_readers(self, state):\n    serialized_input_readers_key = (self._SERIALIZED_INPUT_READERS_KEY % state.key().id_or_name())\n    serialized_input_readers = model._HugeTaskPayload.get_by_key_name(serialized_input_readers_key, parent=state)\n    input_reader_class = state.mapreduce_spec.mapper.input_reader_class()\n    split_param = state.mapreduce_spec.mapper\n    if issubclass(input_reader_class, map_job.InputReader):\n        split_param = map_job.JobConfig._to_map_job_config(state.mapreduce_spec, os.environ.get('HTTP_X_APPENGINE_QUEUENAME'))\n    if (serialized_input_readers is None):\n        readers = input_reader_class.split_input(split_param)\n    else:\n        readers = [input_reader_class.from_json_str(_json) for _json in json.loads(zlib.decompress(serialized_input_readers.payload))]\n    if (not readers):\n        return (None, None)\n    state.mapreduce_spec.mapper.shard_count = len(readers)\n    state.active_shards = len(readers)\n    if (serialized_input_readers is None):\n        serialized_input_readers = model._HugeTaskPayload(key_name=serialized_input_readers_key, parent=state)\n        readers_json_str = [i.to_json_str() for i in readers]\n        serialized_input_readers.payload = zlib.compress(json.dumps(readers_json_str))\n    return (readers, serialized_input_readers)", "docstring": "Get input readers.\n\nArgs:\nstate: a MapreduceState model.\n\nReturns:\nA tuple: (a list of input readers, a model._HugeTaskPayload entity).\nThe payload entity contains the json serialized input readers.\n(None, None) when input reader inplitting returned no data to process.", "source": "codesearchnet"}
{"code": "def diff_str(a: str | object, b: str | object) -> str:\n    if not isinstance(a, str):\n        a = pretty_repr(a).split('\\n')\n    if not isinstance(b, str):\n        b = pretty_repr(b).split('\\n')\n    diff = difflib.ndiff(a, b)\n    return '\\n'.join(diff)", "docstring": "Pretty diff between 2 objects.\n\nArgs:\na: Object/str to compare\nb: Object/str to compare\n\nReturns:\nThe diff string", "source": "github-repos"}
{"code": "def volatility(self, strike: types.FloatTensor, expiry_dates: Optional[types.DateTensor]=None, expiry_times: Optional[types.FloatTensor]=None, term: Optional[types.Period]=None) -> types.FloatTensor:\n    del term\n    if expiry_dates is not None and expiry_times is not None:\n        raise ValueError('Unexpected inputs: Both expiry_dates and expiry times are specified')\n    if expiry_times is None:\n        expiry_dates = dateslib.convert_to_date_tensor(expiry_dates)\n        expiries = self._day_count_fn(start_date=self._valuation_date, end_date=expiry_dates, dtype=self._dtype)\n    else:\n        expiries = tf.convert_to_tensor(expiry_times, dtype=self._dtype)\n    strike = tf.convert_to_tensor(strike, dtype=self._dtype, name='strike')\n    return self._interpolator(expiries, strike)", "docstring": "Returns the interpolated volatility on a specified set of expiries.\n\nArgs:\nstrike: The strikes for which the interpolation is desired.\nexpiry_dates: Optional input specifying the expiry dates for which\ninterpolation is desired. The user should supply either `expiry_dates`\nor `expiry_times` for interpolation.\nexpiry_times: Optional real `Tensor` containing the time to expiration\nfor which interpolation is desired. The user should supply either\n`expiry_dates` or `expiry_times` for interpolation.\nterm: Optional input specifying the term of the underlying rate for\nwhich the interpolation is desired. Relevant for interest rate implied\nvolatility data.\n\nReturns:\nA `Tensor` of the same shape as `expiry` with the interpolated volatility\nfrom the volatility surface.\n\nRaises:\nValueError is both `expiry_dates` and `expiry_times`  are specified.", "source": "github-repos"}
{"code": "def load_disease_term(self, disease_obj):\n        \n        LOG.debug(\"Loading disease term %s into database\", disease_obj['_id'])\n        try:\n            self.disease_term_collection.insert_one(disease_obj)\n        except DuplicateKeyError as err:\n            raise IntegrityError(\"Disease term %s already exists in database\".format(disease_obj['_id']))\n\n        LOG.debug(\"Disease term saved\")", "docstring": "Load a disease term into the database\n\nArgs:\ndisease_obj(dict)", "source": "juraj-google-style"}
{"code": "def get_ref(profile, ref):\n    \n    resource = \"/refs/\" + ref\n    data = api.get_request(profile, resource)\n    return prepare(data)", "docstring": "Fetch a ref.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nref\nThe ref to fetch, e.g., ``heads/my-feature-branch``.\n\nReturns\nA dict with data about the ref.", "source": "juraj-google-style"}
{"code": "def _parse_trunk_groups(self, config):\n        \n        values = TRUNK_GROUP_RE.findall(config)\n        return dict(trunk_groups=values)", "docstring": "_parse_trunk_groups scans the provided configuration block and\nextracts all the vlan trunk groups.  If no trunk groups are configured\nan empty List is returned as the vlaue.  The return dict is intended\nto be merged into the response dict.\n\nArgs:\nconfig (str): The vlan configuration block form the node's\nrunning configuration\n\nReturns:\ndict: resource dict attribute", "source": "juraj-google-style"}
{"code": "def lower_and_check_unique(dict_to_check):\n    if (dict_to_check == None):\n        return None\n    else:\n        to_return = {}\n        for key in dict_to_check:\n            new_key = key.lower()\n            if (new_key == 'jobtype'):\n                new_key = 'job_type'\n            if (new_key in to_return):\n                raise Exception((('Multiple instances of key ' + new_key) + ' found!'))\n            else:\n                try:\n                    to_return[new_key] = dict_to_check.get(key).lower()\n                except AttributeError:\n                    to_return[new_key] = dict_to_check.get(key)\n        return to_return", "docstring": "Takes a dictionary and makes all the keys lower case. Also replaces\n\"jobtype\" with \"job_type\" just so that key specifically can be called\nelsewhere without ambiguity. Finally, ensures that multiple identical\nkeys, that differed only due to different capitalizations, are not\npresent. If there are multiple equivalent keys, an Exception is raised.\n\nArgs:\ndict_to_check (dict): The dictionary to check and standardize\n\nReturns:\nto_return (dict): An identical dictionary but with all keys made\nlower case and no identical keys present.", "source": "codesearchnet"}
{"code": "def top_1_tpu(inputs):\n    inputs_max = tf.reduce_max(inputs, axis=(- 1), keepdims=True)\n    mask = tf.to_int32(tf.equal(inputs_max, inputs))\n    index = (tf.range(tf.shape(inputs)[(- 1)]) * mask)\n    return (tf.squeeze(inputs_max, (- 1)), tf.reduce_max(index, axis=(- 1)))", "docstring": "find max and argmax over the last dimension.\n\nWorks well on TPU\n\nArgs:\ninputs: A tensor with shape [..., depth]\n\nReturns:\nvalues: a Tensor with shape [...]\nindices: a Tensor with shape [...]", "source": "codesearchnet"}
{"code": "def _from_yang_library(self, yang_lib: Dict[(str, Any)]) -> None:\n    try:\n        for item in yang_lib['ietf-yang-library:modules-state']['module']:\n            name = item['name']\n            rev = item['revision']\n            mid = (name, rev)\n            mdata = ModuleData(mid)\n            self.modules[mid] = mdata\n            if (item['conformance-type'] == 'implement'):\n                if (name in self.implement):\n                    raise MultipleImplementedRevisions(name)\n                self.implement[name] = rev\n            mod = self._load_module(name, rev)\n            mdata.statement = mod\n            if ('feature' in item):\n                mdata.features.update(item['feature'])\n            locpref = mod.find1('prefix', required=True).argument\n            mdata.prefix_map[locpref] = mid\n            if ('submodule' in item):\n                for s in item['submodule']:\n                    sname = s['name']\n                    smid = (sname, s['revision'])\n                    sdata = ModuleData(mid)\n                    self.modules[smid] = sdata\n                    mdata.submodules.add(smid)\n                    submod = self._load_module(*smid)\n                    sdata.statement = submod\n                    bt = submod.find1('belongs-to', name, required=True)\n                    locpref = bt.find1('prefix', required=True).argument\n                    sdata.prefix_map[locpref] = mid\n    except KeyError as e:\n        raise BadYangLibraryData(('missing ' + str(e))) from None\n    self._process_imports()\n    self._check_feature_dependences()", "docstring": "Set the schema structures from YANG library data.\n\nArgs:\nyang_lib: Dictionary with YANG library data.\n\nRaises:\nBadYangLibraryData: If YANG library data is invalid.\nFeaturePrerequisiteError: If a pre-requisite feature isn't\nsupported.\nMultipleImplementedRevisions: If multiple revisions of an\nimplemented module are listed in YANG library.\nModuleNotFound: If a YANG module wasn't found in any of the\ndirectories specified in `mod_path`.", "source": "codesearchnet"}
{"code": "def count(self, event):\n        \n        return len(self._listeners[event]) + len(self._once[event])", "docstring": "Get the number of listeners for the event.\n\nArgs:\nevent (str): The event for which to count all listeners.\n\nThe resulting count is a combination of listeners added using\n'on'/'add_listener' and 'once'.", "source": "juraj-google-style"}
{"code": "def get_all_voronoi_polyhedra(self, structure):\n    if (len(structure) == 1):\n        return [self.get_voronoi_polyhedra(structure, 0)]\n    if (self.targets is None):\n        targets = structure.composition.elements\n    else:\n        targets = self.targets\n    sites = [x.to_unit_cell() for x in structure]\n    indices = [(i, 0, 0, 0) for (i, _) in enumerate(structure)]\n    all_neighs = structure.get_all_neighbors(self.cutoff, include_index=True, include_image=True)\n    for neighs in all_neighs:\n        sites.extend([x[0] for x in neighs])\n        indices.extend([((x[2],) + x[3]) for x in neighs])\n    indices = np.array(indices, dtype=np.int)\n    (indices, uniq_inds) = np.unique(indices, return_index=True, axis=0)\n    sites = np.array(sites)[uniq_inds]\n    (root_images,) = np.nonzero((np.abs(indices[(:, 1:)]).max(axis=1) == 0))\n    del indices\n    qvoronoi_input = [s.coords for s in sites]\n    voro = Voronoi(qvoronoi_input)\n    return [self._extract_cell_info(structure, i, sites, targets, voro, self.compute_adj_neighbors) for i in root_images.tolist()]", "docstring": "Get the Voronoi polyhedra for all site in a simulation cell\n\nArgs:\nstructure (Structure): Structure to be evaluated\nReturns:\nA dict of sites sharing a common Voronoi facet with the site\nn mapped to a directory containing statistics about the facet:\n- solid_angle - Solid angle subtended by face\n- angle_normalized - Solid angle normalized such that the\nfaces with the largest\n- area - Area of the facet\n- face_dist - Distance between site n and the facet\n- volume - Volume of Voronoi cell for this face\n- n_verts - Number of vertices on the facet", "source": "codesearchnet"}
{"code": "def render_diagram(root_task, out_base, max_param_len=20, horizontal=False, colored=False):\n    import re\n    import codecs\n    import subprocess\n    from ozelot import config\n    from ozelot.etl.tasks import get_task_name, get_task_param_string\n    lines = [u'digraph G {']\n    if horizontal:\n        lines.append(u'rankdir=LR;')\n\n    def get_id(task):\n        s = ((get_task_name(task) + '_') + get_task_param_string(task))\n        return re.sub('\\\\W+', '', re.sub(' ', '_', s))\n    existing_nodes = set()\n    existing_edges = set()\n\n    def _build(task, parent_id=None):\n        tid = get_id(task)\n        if (tid not in existing_nodes):\n            params = task.to_str_params()\n            param_list = ''\n            for (k, v) in params.items():\n                if (len(v) > max_param_len):\n                    v = (v[:max_param_len] + '...')\n                param_list += '<TR><TD ALIGN=\"LEFT\"><FONT POINT-SIZE=\"10\">{:s}</FONT></TD><TD ALIGN=\"LEFT\"><FONT POINT-SIZE=\"10\">{:s}</FONT></TD></TR>'.format(k, v)\n            label = (('<TABLE BORDER=\"0\" CELLSPACING=\"1\" CELLPADDING=\"1\"><TR><TD COLSPAN=\"2\" ALIGN=\"CENTER\"><FONT POINT-SIZE=\"12\">{:s}</FONT></TD></TR>'.format(get_task_name(task)) + param_list) + '</TABLE>')\n            style = getattr(task, 'diagram_style', [])\n            if colored:\n                color = ', color=\"{:s}\"'.format(('green' if task.complete() else 'red'))\n            else:\n                color = ''\n            lines.append(u'{name:s} [label=< {label:s} >, shape=\"rect\" {color:s}, style=\"{style:s}\"];\\n'.format(name=tid, label=label, color=color, style=','.join(style)))\n            existing_nodes.add(tid)\n            for req in task.requires():\n                _build(req, parent_id=tid)\n        if ((parent_id is not None) and ((tid, parent_id) not in existing_edges)):\n            lines.append(u'{source:s} -> {target:s};\\n'.format(source=tid, target=parent_id))\n    _build(root_task)\n    lines.append(u'}')\n    with codecs.open((out_base + '.dot'), 'w', encoding='utf-8') as f:\n        f.write(u'\\n'.join(lines))\n    if (not hasattr(config, 'DOT_EXECUTABLE')):\n        raise RuntimeError(\"Please configure the 'DOT_EXECUTABLE' variable in your 'project_config.py'\")\n    if (not os.path.exists(config.DOT_EXECUTABLE)):\n        raise IOError((\"Could not find file pointed to by 'DOT_EXECUTABLE': \" + str(config.DOT_EXECUTABLE)))\n    subprocess.check_call([config.DOT_EXECUTABLE, '-T', 'png', '-o', (out_base + '.png'), (out_base + '.dot')])", "docstring": "Render a diagram of the ETL pipeline\n\nAll upstream tasks (i.e. requirements) of :attr:`root_task` are rendered.\n\nNodes are, by default, styled as simple rects. This style is augmented by any\n:attr:`diagram_style` attributes of the tasks.\n\n.. note:: This function requires the 'dot' executable from the GraphViz package to be installed\nand its location configured in your `project_config.py` variable :attr:`DOT_EXECUTABLE`.\n\nArgs:\nroot_task (luigi.Task): Task instance that defines the 'upstream root' of the pipeline\nout_base (str): base output file name (file endings will be appended)\nmax_param_len (int): Maximum shown length of task parameter values\nhorizontal (bool): If True, layout graph left-to-right instead of top-to-bottom\ncolored (bool): If True, show task completion status by color of nodes", "source": "codesearchnet"}
{"code": "def from_Z(z: int):\n    for (sym, data) in _pt_data.items():\n        if (data['Atomic no'] == z):\n            return Element(sym)\n    raise ValueError(('No element with this atomic number %s' % z))", "docstring": "Get an element from an atomic number.\n\nArgs:\nz (int): Atomic number\n\nReturns:\nElement with atomic number z.", "source": "codesearchnet"}
{"code": "def parse_zip(zipfilename: str, regex: Pattern, invert_match: bool, files_with_matches: bool, files_without_match: bool, grep_inner_file_name: bool, show_inner_file: bool) -> None:\n    assert (not (files_without_match and files_with_matches))\n    report_lines = ((not files_without_match) and (not files_with_matches))\n    report_hit_lines = (report_lines and (not invert_match))\n    report_miss_lines = (report_lines and invert_match)\n    log.debug(('Checking ZIP: ' + zipfilename))\n    found_in_zip = False\n    try:\n        with ZipFile(zipfilename, 'r') as zf:\n            for contentsfilename in zf.namelist():\n                log.debug(('... checking file: ' + contentsfilename))\n                if grep_inner_file_name:\n                    found_in_filename = bool(regex.search(contentsfilename))\n                    found_in_zip = (found_in_zip or found_in_filename)\n                    if (files_with_matches and found_in_zip):\n                        report_hit_filename(zipfilename, contentsfilename, show_inner_file)\n                        return\n                    if ((report_hit_lines and found_in_filename) or (report_miss_lines and (not found_in_filename))):\n                        report_line(zipfilename, contentsfilename, contentsfilename, show_inner_file)\n                else:\n                    try:\n                        with zf.open(contentsfilename, 'r') as file:\n                            try:\n                                for line in file.readlines():\n                                    found_in_line = bool(regex.search(line))\n                                    found_in_zip = (found_in_zip or found_in_line)\n                                    if (files_with_matches and found_in_zip):\n                                        report_hit_filename(zipfilename, contentsfilename, show_inner_file)\n                                        return\n                                    if ((report_hit_lines and found_in_line) or (report_miss_lines and (not found_in_line))):\n                                        report_line(zipfilename, contentsfilename, line, show_inner_file)\n                            except EOFError:\n                                pass\n                    except RuntimeError as e:\n                        log.warning('RuntimeError whilst processing {} [{}]: probably encrypted contents; error was {!r}', zipfilename, contentsfilename, e)\n    except (zlib.error, BadZipFile) as e:\n        log.debug('Invalid zip: {}; error was {!r}', zipfilename, e)\n    if (files_without_match and (not found_in_zip)):\n        report_miss_filename(zipfilename)", "docstring": "Implement a \"grep within an OpenXML file\" for a single OpenXML file, which\nis by definition a ``.zip`` file.\n\nArgs:\nzipfilename: name of the OpenXML (zip) file\nregex: regular expression to match\ninvert_match: find files that do NOT match, instead of ones that do?\nfiles_with_matches: show filenames of files with a match?\nfiles_without_match: show filenames of files with no match?\ngrep_inner_file_name: search the names of \"inner\" files, rather than\ntheir contents?\nshow_inner_file: show the names of the \"inner\" files, not just the\n\"outer\" (OpenXML) file?", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: Optional[torch.Tensor]=None, output_attentions: bool=False):\n    residual = hidden_states\n    hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, object_queries=object_queries, output_attentions=output_attentions)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    residual = hidden_states\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    if self.training:\n        if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():\n            clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n            hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`): attention mask of size\n`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative\nvalues.\nobject_queries (`torch.FloatTensor`, *optional*):\nObject queries (also called content embeddings), to be added to the hidden states.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def check_correct(state, check, diagnose):\n    feedback = None\n    try:\n        multi(state, check)\n    except TestFail as e:\n        feedback = e.feedback\n    try:\n        multi(state, diagnose)\n    except TestFail as e:\n        if ((feedback is not None) or state.force_diagnose):\n            feedback = e.feedback\n    if (feedback is not None):\n        state.report(feedback)\n    return state", "docstring": "Allows feedback from a diagnostic SCT, only if a check SCT fails.\n\nArgs:\nstate: State instance describing student and solution code. Can be omitted if used with Ex().\ncheck: An sct chain that must succeed.\ndiagnose: An sct chain to run if the check fails.\n\n:Example:\nThe SCT below tests whether students query result is correct, before running diagnostic SCTs.. ::\n\nEx().check_correct(\ncheck_result(),\ncheck_node('SelectStmt')\n)", "source": "codesearchnet"}
{"code": "def evaluate_rpn(rpn):\n    vals_stack = []\n    for item in rpn:\n        if (item in _ALL_OPS):\n            v2 = vals_stack.pop()\n            if (item in _UNARY_OPS):\n                res = _UNARY_OPS[item](v2)\n            elif (item in _BIN_OPS):\n                v1 = vals_stack.pop()\n                res = _BIN_OPS[item](v1, v2)\n            else:\n                raise ValueError(('%s not in unary_ops or bin_ops' % str(item)))\n            vals_stack.append(res)\n        else:\n            vals_stack.append(item)\n    assert (len(vals_stack) == 1)\n    assert isinstance(vals_stack[0], bool)\n    return vals_stack[0]", "docstring": "Evaluates the RPN form produced my map2rpn.\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def _peer_bfd_tx(self, **kwargs):\n        \n        method_name = 'rbridge_id_router_router_bgp_router_bgp_attributes_' \\\n                      'neighbor_neighbor_ips_neighbor_addr_bfd_interval_min_tx'\n        bfd_tx = getattr(self._rbridge, method_name)\n        config = bfd_tx(**kwargs)\n        if kwargs['delete']:\n            tag = 'min-tx'\n            config.find('.\n        return config", "docstring": "Return the BFD minimum transmit interval XML.\n\nYou should not use this method.\nYou probably want `BGP.bfd`.\n\nArgs:\npeer_ip (str): Peer IPv4 address for BFD setting.\nmin_tx (str): BFD transmit interval in milliseconds (300, 500, etc)\ndelete (bool): Remove the configuration if ``True``.\n\nReturns:\nXML to be passed to the switch.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def append(self, data):\n        \n        if isinstance(data, list) and len(data) > 0:\n            self.nodes.append(data)\n        else:\n            self.nodes.append([data])", "docstring": "Appends items or lists to the Lattice\n\nArgs:\ndata (item,list) : The Item or List to be added to the Lattice", "source": "juraj-google-style"}
{"code": "def v4_int_to_packed(address):\n    \n    if address > _BaseV4._ALL_ONES:\n        raise ValueError('Address too large for IPv4')\n    return Bytes(struct.pack('!I', address))", "docstring": "The binary representation of this address.\n\nArgs:\naddress: An integer representation of an IPv4 IP address.\n\nReturns:\nThe binary representation of this address.\n\nRaises:\nValueError: If the integer is too large to be an IPv4 IP\naddress.", "source": "juraj-google-style"}
{"code": "def __init__(self, pipeline: Union[beam_runner_api_pb2.Pipeline, beam.Pipeline], default_vertex_attrs={'shape': 'box'}, default_edge_attrs=None, render_option=None):\n    self._lock = threading.Lock()\n    self._graph: pydot.Dot = None\n    self._pipeline_instrument = None\n    if isinstance(pipeline, beam.Pipeline):\n        self._pipeline_instrument = inst.PipelineInstrument(pipeline, pipeline._options)\n        self._pipeline_instrument.preprocess()\n    if isinstance(pipeline, beam_runner_api_pb2.Pipeline):\n        self._pipeline_proto = pipeline\n    elif isinstance(pipeline, beam.Pipeline):\n        self._pipeline_proto = pipeline.to_runner_api()\n    else:\n        raise TypeError('pipeline should either be a %s or %s, while %s is given' % (beam_runner_api_pb2.Pipeline, beam.Pipeline, type(pipeline)))\n    self._consumers: DefaultDict[str, List[str]] = collections.defaultdict(list)\n    self._producers: Dict[str, str] = {}\n    for transform_id, transform_proto in self._top_level_transforms():\n        for pcoll_id in transform_proto.inputs.values():\n            self._consumers[pcoll_id].append(transform_id)\n        for pcoll_id in transform_proto.outputs.values():\n            self._producers[pcoll_id] = transform_id\n    default_vertex_attrs = default_vertex_attrs or {'shape': 'box'}\n    if 'color' not in default_vertex_attrs:\n        default_vertex_attrs['color'] = 'blue'\n    if 'fontcolor' not in default_vertex_attrs:\n        default_vertex_attrs['fontcolor'] = 'blue'\n    vertex_dict, edge_dict = self._generate_graph_dicts()\n    self._construct_graph(vertex_dict, edge_dict, default_vertex_attrs, default_edge_attrs)\n    self._renderer = pipeline_graph_renderer.get_renderer(render_option)", "docstring": "Constructor of PipelineGraph.\n\nExamples:\ngraph = pipeline_graph.PipelineGraph(pipeline_proto)\ngraph.get_dot()\n\nor\n\ngraph = pipeline_graph.PipelineGraph(pipeline)\ngraph.get_dot()\n\nArgs:\npipeline: (Pipeline proto) or (Pipeline) pipeline to be rendered.\ndefault_vertex_attrs: (Dict[str, str]) a dict of default vertex attributes\ndefault_edge_attrs: (Dict[str, str]) a dict of default edge attributes\nrender_option: (str) this parameter decides how the pipeline graph is\nrendered. See display.pipeline_graph_renderer for available options.", "source": "github-repos"}
{"code": "def getaccountaddress(self, user_id=''):\n    address = self.rpc.call('getaccountaddress', user_id)\n    self.logger.debug('Your', self.coin, 'address is', address)\n    return address", "docstring": "Get the coin address associated with a user id.\n\nIf the specified user id does not yet have an address for this\ncoin, then generate one.\n\nArgs:\nuser_id (str): this user's unique identifier\n\nReturns:\nstr: Base58Check address for this account", "source": "codesearchnet"}
{"code": "def init_from_dataset_and_submissions_write_to_datastore(\n      self, dataset_batches, attack_submission_ids):\n    \n    batches_x_attacks = itertools.product(dataset_batches.data.keys(),\n                                          attack_submission_ids)\n    for idx, (dataset_batch_id, attack_id) in enumerate(batches_x_attacks):\n      adv_batch_id = ADVERSARIAL_BATCH_ID_PATTERN.format(idx)\n      self.add_batch(adv_batch_id,\n                     {'dataset_batch_id': dataset_batch_id,\n                      'submission_id': attack_id})\n    self.write_to_datastore()", "docstring": "Init list of adversarial batches from dataset batches and submissions.\n\nArgs:\ndataset_batches: instances of DatasetBatches\nattack_submission_ids: iterable with IDs of all (targeted and nontargeted)\nattack submissions, could be obtains as\nCompetitionSubmissions.get_all_attack_ids()", "source": "juraj-google-style"}
{"code": "def hasReservation(self, pid, subject, vendorSpecific=None):\n        \n        response = self.hasReservationResponse(pid, subject, vendorSpecific)\n        return self._read_boolean_404_response(response)", "docstring": "See Also: hasReservationResponse()\n\nArgs:\npid:\nsubject:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _init_summary_op(self, summary_op=USE_DEFAULT):\n    if summary_op is Supervisor.USE_DEFAULT:\n        summary_op = self._get_first_op_from_collection(ops.GraphKeys.SUMMARY_OP)\n        if summary_op is None:\n            summary_op = _summary.merge_all()\n            if summary_op is not None:\n                ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)\n    self._summary_op = summary_op", "docstring": "Initializes summary_op.\n\nArgs:\nsummary_op: An Operation that returns a Summary for the event logs. If set\nto USE_DEFAULT, create an op that merges all the summaries.", "source": "github-repos"}
{"code": "def ReadFrom(self, byte_stream):\n    try:\n        return self._struct.unpack_from(byte_stream)\n    except (TypeError, struct.error) as exception:\n        raise IOError('Unable to read byte stream with error: {0!s}'.format(exception))", "docstring": "Read values from a byte stream.\n\nArgs:\nbyte_stream (bytes): byte stream.\n\nReturns:\ntuple[object, ...]: values copies from the byte stream.\n\nRaises:\nIOError: if byte stream cannot be read.\nOSError: if byte stream cannot be read.", "source": "codesearchnet"}
{"code": "def resume():\n    t = timer()\n    if f.t.stopped:\n        raise StoppedError('Cannot resume stopped timer.')\n    if (not f.t.paused):\n        raise PausedError('Cannot resume timer that is not paused.')\n    f.t.paused = False\n    f.t.start_t = t\n    f.t.last_t = t\n    return t", "docstring": "Resume a paused timer, re-activating it.  Subsequent time accumulates in\nthe total.\n\nReturns:\nfloat: The current time.\n\nRaises:\nPausedError: If timer was not in paused state.\nStoppedError: If timer was already stopped.", "source": "codesearchnet"}
{"code": "def _translate(pattern, case_sensitive=True):\n    \n    \n    if not case_sensitive:\n        pattern = pattern.lower()\n    i, n = 0, len(pattern)\n    res = \"\"\n    while i < n:\n        c = pattern[i]\n        i = i + 1\n        if c == \"*\":\n            res = res + \"[^/]*\"\n        elif c == \"?\":\n            res = res + \".\"\n        elif c == \"[\":\n            j = i\n            if j < n and pattern[j] == \"!\":\n                j = j + 1\n            if j < n and pattern[j] == \"]\":\n                j = j + 1\n            while j < n and pattern[j] != \"]\":\n                j = j + 1\n            if j >= n:\n                res = res + \"\\\\[\"\n            else:\n                stuff = pattern[i:j].replace(\"\\\\\", \"\\\\\\\\\")\n                i = j + 1\n                if stuff[0] == \"!\":\n                    stuff = \"^\" + stuff[1:]\n                elif stuff[0] == \"^\":\n                    stuff = \"\\\\\" + stuff\n                res = \"%s[%s]\" % (res, stuff)\n        else:\n            res = res + re.escape(c)\n    return res", "docstring": "Translate a wildcard pattern to a regular expression.\n\nThere is no way to quote meta-characters.\n\nArguments:\npattern (str): A wildcard pattern.\ncase_sensitive (bool): Set to `False` to use a case\ninsensitive regex (default `True`).\n\nReturns:\nstr: A regex equivalent to the given pattern.", "source": "juraj-google-style"}
{"code": "def dapply(self, fn, pairwise=False, symmetric=True, diagonal=False, block=None, **kwargs):\n    \n    search_keys = [k for k, v in kwargs.items() if isinstance(v, list) and len(v) > 1]\n    functions = util.make_list(fn)\n    search = list(product(functions, util.dict_product(kwargs)))\n\n    results = []\n    for fn, kw in search:\n        if not pairwise:\n            r = self.index.to_series().apply(lambda step: fn(step, **kw))\n        else:\n            r = apply_pairwise(self, fn,\n                               symmetric=symmetric, diagonal=diagonal, block=block,\n                               **kw)\n\n        name = [] if len(functions) == 1 else [fn.__name__]\n        name += util.dict_subset(kw, search_keys).values()\n\n        if isinstance(r, pd.DataFrame):\n            columns = pd.MultiIndex.from_tuples(\n                    [tuple(name + util.make_list(c)) for c in r.columns])\n            r.columns = columns\n        else:\n            r.name = tuple(name)\n        results.append(r)\n\n    if len(results) > 1:\n        result = pd.concat(results, axis=1)\n        \n        column_names = [] if len(functions) == 1 else [None]\n        column_names += search_keys\n        column_names += [None]*(len(result.columns.names)-len(column_names))\n        result.columns.names = column_names\n\n        return StepFrame(result)\n    else:\n        result = results[0]\n        if isinstance(result, pd.DataFrame):\n            return StepFrame(result)\n        else:\n            result.name = functions[0].__name__\n            return StepSeries(result)", "docstring": "Apply function to each step object in the index\n\nArgs:\nfn: function to apply. If a list then each function is applied\npairwise: whether to apply the function to pairs of steps\nsymmetric, diagonal, block: passed to apply_pairwise when pairwise=True\nkwargs: a keyword arguments to pass to each function. Arguments\nwith list value are grid searched using util.dict_product.\n\nReturns: a StepFrame or StepSeries", "source": "juraj-google-style"}
{"code": "def _validate_inputs(self, graph_def, input_tensors):\n    self._save_conversion_params_metric(graph_def)\n    self._quant_mode = QuantizationMode(self.optimizations, self.target_spec, self.representative_dataset, graph_def, self._experimental_disable_per_channel, self.experimental_new_dynamic_range_quantizer, self._experimental_low_bit_qat, self._experimental_full_integer_quantization_bias_type, self._experimental_variable_quantization, self._experimental_strict_qdq)\n    self._validate_inference_input_output_types(self._quant_mode)\n    if not self._is_unknown_shapes_allowed():\n        for tensor in input_tensors:\n            shape_list = tensor.shape.as_list()\n            if None in shape_list[1:]:\n                raise ValueError(\"None is only supported in the 1st dimension. Tensor '{0}' has invalid shape '{1}'.\".format(_get_tensor_name(tensor), shape_list))\n            elif shape_list and shape_list[0] is None:\n                shape = tensor.shape.as_list()\n                shape[0] = 1\n                tensor.set_shape(shape)\n    if self._trackable_obj is None or not hasattr(self._trackable_obj, 'graph_debug_info'):\n        self._debug_info = _get_debug_info(_build_debug_info_func(self._funcs[0].graph), graph_def)\n    else:\n        self._debug_info = _get_debug_info(_convert_debug_info_func(self._trackable_obj.graph_debug_info), graph_def)", "docstring": "Validate the input parameters.\n\nArgs:\ngraph_def: The TensorFlow GraphDef.\ninput_tensors: List of input tensors.\n\nRaise:\nValueError: Input shape is not specified. Invalid quantization parameters.", "source": "github-repos"}
{"code": "def size(input: ragged_tensor.Ragged, out_type=dtypes.int32, name=None):\n    if ragged_tensor.is_ragged(input):\n        return array_ops.size(input.flat_values, out_type=out_type, name=name)\n    else:\n        return array_ops.size(input, out_type=out_type, name=name)", "docstring": "Returns the size of a potentially ragged tensor.\n\nThe size of a ragged tensor is the size of its inner values.\n\n#### Example:\n\n>>> tf.size(tf.ragged.constant([[1, 2], [3]])).numpy().item()\n3\n\nArgs:\ninput: A potentially ragged `Tensor`.\nout_type: The numeric output type for the operation.\nname: A name for the operation (optional).\n\nReturns:\nA Tensor of type `out_type`.", "source": "github-repos"}
{"code": "def __init__(self, meter_id=Meter.OFPM_ALL):\n        \n        super().__init__(InstructionType.OFPIT_METER)\n        self.meter_id = meter_id", "docstring": "Create a InstructionMeter with the optional parameters below.\n\nArgs:\nmeter_id (int): Meter instance.", "source": "juraj-google-style"}
{"code": "def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        super(RevocationReason, self).read(istream, kmip_version=kmip_version)\n        tstream = BytearrayStream(istream.read(self.length))\n\n        self.revocation_code = RevocationReasonCode()\n        self.revocation_code.read(tstream, kmip_version=kmip_version)\n\n        if self.is_tag_next(Tags.REVOCATION_MESSAGE, tstream):\n            self.revocation_message = TextString()\n            self.revocation_message.read(tstream, kmip_version=kmip_version)\n\n        self.is_oversized(tstream)\n        self.validate()", "docstring": "Read the data encoding the RevocationReason object and decode it\ninto its constituent parts.\n\nArgs:\nistream (Stream): A data stream containing encoded object data,\nsupporting a read method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def GetMessages(self, formatter_mediator, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    event_values = event.CopyToDict()\n\n    event_type = event_values.get('event_type', None)\n    if event_type is not None:\n      event_values['event_type'] = self.GetEventTypeString(event_type)\n\n    \n\n    severity = event_values.get('severity', None)\n    if severity is not None:\n      event_values['severity'] = self.GetSeverityString(severity)\n\n    source_name = event_values.get('source_name', None)\n    message_identifier = event_values.get('message_identifier', None)\n    strings = event_values.get('strings', [])\n    if source_name and message_identifier:\n      message_string = formatter_mediator.GetWindowsEventMessage(\n          source_name, message_identifier)\n      if message_string:\n        try:\n          event_values['message_string'] = message_string.format(*strings)\n        except IndexError:\n          \n          pass\n\n    message_strings = []\n    for string in strings:\n      message_strings.append('\\'{0:s}\\''.format(string))\n    message_string = ', '.join(message_strings)\n    event_values['strings'] = '[{0:s}]'.format(message_string)\n\n    return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions between\nformatters and other components, such as storage and Windows EventLog\nresources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def GetValueByPath(self, path_segments):\n    key = self.root_key\n    for path_segment in path_segments:\n        if isinstance(key, dict):\n            try:\n                key = key[path_segment]\n            except KeyError:\n                return None\n        elif isinstance(key, list):\n            try:\n                list_index = int(path_segment, 10)\n            except ValueError:\n                return None\n            key = key[list_index]\n        else:\n            return None\n        if (not key):\n            return None\n    return key", "docstring": "Retrieves a plist value by path.\n\nArgs:\npath_segments (list[str]): path segment strings relative to the root\nof the plist.\n\nReturns:\nobject: The value of the key specified by the path or None.", "source": "codesearchnet"}
{"code": "def get_counters(counter_list):\n    \n    if not isinstance(counter_list, list):\n        raise CommandExecutionError('counter_list must be a list of tuples')\n\n    try:\n        \n        query = win32pdh.OpenQuery()\n\n        \n        counters = build_counter_list(counter_list)\n\n        \n        for counter in counters:\n            counter.add_to_query(query)\n\n        \n        win32pdh.CollectQueryData(query)\n        \n        \n        time.sleep(1)\n        win32pdh.CollectQueryData(query)\n        ret = {}\n\n        for counter in counters:\n            try:\n                ret.update({counter.path: counter.value()})\n            except pywintypes.error as exc:\n                if exc.strerror == 'No data to return.':\n                    \n                    \n                    continue\n                else:\n                    raise\n\n    finally:\n        win32pdh.CloseQuery(query)\n\n    return ret", "docstring": "Get the values for the passes list of counters\n\nArgs:\ncounter_list (list):\nA list of counters to lookup\n\nReturns:\ndict: A dictionary of counters and their values", "source": "juraj-google-style"}
{"code": "def add_defaults(self, ctype: ContentType=None) -> 'InstanceNode':\n    val = self.value\n    if (not (isinstance(val, StructuredValue) and self.is_internal())):\n        return self\n    res = self\n    if isinstance(val, ObjectValue):\n        if val:\n            for mn in self._member_names():\n                m = (res._member(mn) if (res is self) else res.sibling(mn))\n                res = m.add_defaults(ctype)\n            res = res.up()\n        return self.schema_node._add_defaults(res, ctype)\n    if (not val):\n        return res\n    en = res[0]\n    while True:\n        res = en.add_defaults(ctype)\n        try:\n            en = res.next()\n        except NonexistentInstance:\n            break\n    return res.up()", "docstring": "Return the receiver with defaults added recursively to its value.\n\nArgs:\nctype: Content type of the defaults to be added. If it is\n``None``, the content type will be the same as receiver's.", "source": "codesearchnet"}
{"code": "def get_imagery(cls, lat, lon, date=None, dim=None, cloud_score=False):\n    instance = cls('planetary/earth/imagery')\n    filters = {'lat': lat, 'lon': lon, 'date': date, 'dim': dim, 'cloud_score': cloud_score}\n    return instance.get_resource(**filters)", "docstring": "Returns satellite image\n\nArgs:\nlat: latitude float\nlon: longitude float\ndate: date instance of available date from `get_assets`\ndim: width and height of image in degrees as float\ncloud_score: boolean to calculate the percentage of the image covered by clouds\n\nReturns:\njson", "source": "codesearchnet"}
{"code": "def iplot_state_qsphere(rho, figsize=None):\n    \n\n    \n    html_template = Template()\n\n    \n    javascript_template = Template()\n    rho = _validate_input_state(rho)\n    if figsize is None:\n        options = {}\n    else:\n        options = {'width': figsize[0], 'height': figsize[1]}\n\n    qspheres_data = []\n    \n    num = int(np.log2(len(rho)))\n\n    \n    weig, stateall = linalg.eigh(rho)\n\n    for _ in range(2**num):\n        \n        probmix = weig.max()\n        prob_location = weig.argmax()\n        if probmix > 0.001:\n            \n            \n            state = stateall[:, prob_location]\n            loc = np.absolute(state).argmax()\n            \n            for j in range(2**num):\n                test = np.absolute(np.absolute(state[j]) -\n                                   np.absolute(state[loc]))\n                if test < 0.001:\n                    loc = j\n                    break\n            \n            angles = (np.angle(state[loc]) + 2 * np.pi) % (2 * np.pi)\n            angleset = np.exp(-1j*angles)\n            state = angleset*state\n            state.flatten()\n\n            spherepoints = []\n            for i in range(2**num):\n                \n\n                element = bin(i)[2:].zfill(num)\n                weight = element.count(\"1\")\n\n                number_of_divisions = n_choose_k(num, weight)\n                weight_order = bit_string_index(element)\n\n                angle = weight_order * 2 * np.pi / number_of_divisions\n\n                zvalue = -2 * weight / num + 1\n                xvalue = np.sqrt(1 - zvalue**2) * np.cos(angle)\n                yvalue = np.sqrt(1 - zvalue**2) * np.sin(angle)\n\n                \n                prob = np.real(np.dot(state[i], state[i].conj()))\n                angles = (np.angle(state[i]) + 2 * np.pi) % (2 * np.pi)\n                qpoint = {\n                    'x': xvalue,\n                    'y': yvalue,\n                    'z': zvalue,\n                    'prob': prob,\n                    'phase': angles\n                }\n                spherepoints.append(qpoint)\n\n            \n            sphere = {\n                'points': spherepoints,\n                'eigenvalue': probmix\n            }\n\n            \n            qspheres_data.append(sphere)\n            weig[prob_location] = 0\n\n    div_number = str(time.time())\n    div_number = re.sub('[.]', '', div_number)\n\n    html = html_template.substitute({\n        'divNumber': div_number\n    })\n\n    javascript = javascript_template.substitute({\n        'data': qspheres_data,\n        'divNumber': div_number,\n        'options': options\n    })\n\n    display(HTML(html + javascript))", "docstring": "Create a Q sphere representation.\n\nGraphical representation of the input array, using a Q sphere for each\neigenvalue.\n\nArgs:\nrho (array): State vector or density matrix.\nfigsize (tuple): Figure size in pixels.", "source": "juraj-google-style"}
{"code": "def _GetMessageFromFactory(factory, full_name):\n    proto_descriptor = factory.pool.FindMessageTypeByName(full_name)\n    proto_cls = factory.GetPrototype(proto_descriptor)\n    return proto_cls", "docstring": "Get a proto class from the MessageFactory by name.\n\nArgs:\nfactory: a MessageFactory instance.\nfull_name: str, the fully qualified name of the proto type.\nReturns:\nA class, for the type identified by full_name.\nRaises:\nKeyError, if the proto is not found in the factory's descriptor pool.", "source": "codesearchnet"}
{"code": "def _kl_laplace_laplace(a, b, name=None):\n  \n  with tf.name_scope(name or \"kl_laplace_laplace\"):\n    \n    \n    distance = tf.abs(a.loc - b.loc)\n    ratio = a.scale / b.scale\n\n    return (-tf.math.log(ratio) - 1 + distance / b.scale +\n            ratio * tf.exp(-distance / a.scale))", "docstring": "Calculate the batched KL divergence KL(a || b) with a and b Laplace.\n\nArgs:\na: instance of a Laplace distribution object.\nb: instance of a Laplace distribution object.\nname: (optional) Name to use for created operations.\ndefault is \"kl_laplace_laplace\".\n\nReturns:\nBatchwise KL(a || b)", "source": "juraj-google-style"}
{"code": "def set_privilege(self, name, value=None):\n        \n        cmd = 'username %s' % name\n        if value is not None:\n            if not isprivilege(value):\n                raise TypeError('priviledge value must be between 0 and 15')\n            cmd += ' privilege %s' % value\n        else:\n            cmd += ' privilege 1'\n        return self.configure(cmd)", "docstring": "Configures the user privilege value in EOS\n\nArgs:\nname (str): The name of the user to craete\n\nvalue (int): The privilege value to assign to the user.  Valid\nvalues are in the range of 0 to 15\n\nReturns:\nTrue if the operation was successful otherwise False\n\nRaises:\nTypeError: if the value is not in the valid range", "source": "juraj-google-style"}
{"code": "def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):\n    outputs, new_state = cell_call_fn(inputs, state, **kwargs)\n\n    def assert_shape_match(inp, out):\n        inp.get_shape().assert_is_compatible_with(out.get_shape())\n\n    def default_residual_fn(inputs, outputs):\n        nest.assert_same_structure(inputs, outputs)\n        nest.map_structure(assert_shape_match, inputs, outputs)\n        return nest.map_structure(lambda inp, out: inp + out, inputs, outputs)\n    res_outputs = (self._residual_fn or default_residual_fn)(inputs, outputs)\n    return (res_outputs, new_state)", "docstring": "Run the cell and then apply the residual_fn on its inputs to its outputs.\n\nArgs:\ninputs: cell inputs.\nstate: cell state.\ncell_call_fn: Wrapped cell's method to use for step computation (cell's\n`__call__` or 'call' method).\n**kwargs: Additional arguments passed to the wrapped cell's `call`.\n\nReturns:\nTuple of cell outputs and new state.\n\nRaises:\nTypeError: If cell inputs and outputs have different structure (type).\nValueError: If cell inputs and outputs have different structure (value).", "source": "github-repos"}
{"code": "def bessel_i1(x, name=None):\n    with ops.name_scope(name, 'bessel_i1', [x]):\n        return gen_special_math_ops.bessel_i1(x)", "docstring": "Computes the Bessel i1 function of `x` element-wise.\n\nModified Bessel function of order 1.\n\nIt is preferable to use the numerically stabler function `i1e(x)` instead.\n\n>>> tf.math.special.bessel_i1([-1., -0.5, 0.5, 1.]).numpy()\narray([-0.5651591 , -0.25789431,  0.25789431,  0.5651591 ], dtype=float32)\n\nArgs:\nx: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n`float32`, `float64`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.i1\n@end_compatibility", "source": "github-repos"}
{"code": "def get_variation_for_experiment(self, experiment_id):\n    return self.experiment_bucket_map.get(experiment_id, {self.VARIATION_ID_KEY: None}).get(self.VARIATION_ID_KEY)", "docstring": "Helper method to retrieve variation ID for given experiment.\n\nArgs:\nexperiment_id: ID for experiment for which variation needs to be looked up for.\n\nReturns:\nVariation ID corresponding to the experiment. None if no decision available.", "source": "codesearchnet"}
{"code": "def create_dir(self, directory_path, perm_bits=PERM_DEF):\n        \n        directory_path = self.make_string_path(directory_path)\n        directory_path = self.absnormpath(directory_path)\n        self._auto_mount_drive_if_needed(directory_path)\n        if self.exists(directory_path, check_link=True):\n            self.raise_os_error(errno.EEXIST, directory_path)\n        path_components = self._path_components(directory_path)\n        current_dir = self.root\n\n        new_dirs = []\n        for component in path_components:\n            directory = self._directory_content(current_dir, component)[1]\n            if not directory:\n                new_dir = FakeDirectory(component, filesystem=self)\n                new_dirs.append(new_dir)\n                current_dir.add_entry(new_dir)\n                current_dir = new_dir\n            else:\n                if S_ISLNK(directory.st_mode):\n                    directory = self.resolve(directory.contents)\n                current_dir = directory\n                if directory.st_mode & S_IFDIR != S_IFDIR:\n                    self.raise_os_error(errno.ENOTDIR, current_dir.path)\n\n        \n        \n        for new_dir in new_dirs:\n            new_dir.st_mode = S_IFDIR | perm_bits\n\n        self._last_ino += 1\n        current_dir.st_ino = self._last_ino\n        return current_dir", "docstring": "Create `directory_path`, and all the parent directories.\n\nHelper method to set up your test faster.\n\nArgs:\ndirectory_path: The full directory path to create.\nperm_bits: The permission bits as set by `chmod`.\n\nReturns:\nThe newly created FakeDirectory object.\n\nRaises:\nOSError: if the directory already exists.", "source": "juraj-google-style"}
{"code": "def get_momentum_variable(self):\n    optimizer = self.get_optimizer()\n    if hasattr(optimizer, 'rho'):\n        return optimizer.rho\n    elif hasattr(optimizer, 'beta_1'):\n        return optimizer.beta_1\n    return None", "docstring": "Extract values of momentum variables from optimizer\n\nReturns:\noptimizer's `rho` or `beta_1`", "source": "codesearchnet"}
{"code": "def __getitem__(self, key: Union[int, slice, str, utils.KeyPath, 'DecisionPoint']) -> Union[None, 'DNA', List[Optional['DNA']]]:\n    if isinstance(key, (int, slice)):\n        return self.children[key]\n    if isinstance(key, DNASpec):\n        key = key.id\n        return self._decision_by_id[key]\n    else:\n        v = self.named_decisions.get(key, None)\n        if v is None:\n            v = self._decision_by_id[key]\n        return v", "docstring": "Get an immediate child DNA or DNA in the sub-tree.\n\nArgs:\nkey: The key for retrieving the sub-DNA or sub-DNA list. The key should\nbe one of:\n1) An integer as the index of an immediate child DNA.\n2) A name (string) for named decisions whose DNASpec has a not-None\n`name` argument.\n3) An ID (string or KeyPath) for the decision point to retrieve.\nSee `DNASpec.id` for details.\n4) A DecisionPoint object whose decision value will be retrived.\n\nReturns:\nThe return value should be one of the following:\n1) A DNA object if the key only maps to a single DNA object.\n2) None if the decision point exists but it's inactive.\n3) A list of DNA or None if there are multiple decision points associated\nwith the key.", "source": "github-repos"}
{"code": "def _ExtractOAuth2Client(product_yaml_key, product_data, proxy_config):\n    oauth2_kwargs = {'proxy_config': proxy_config}\n    if all(((config in product_data) for config in _OAUTH2_INSTALLED_APP_KEYS)):\n        oauth2_args = [product_data['client_id'], product_data['client_secret'], product_data['refresh_token']]\n        oauth2_client = googleads.oauth2.GoogleRefreshTokenClient\n        for key in _OAUTH2_INSTALLED_APP_KEYS:\n            del product_data[key]\n    elif all(((config in product_data) for config in _OAUTH2_SERVICE_ACCT_KEYS)):\n        oauth2_args = [product_data['path_to_private_key_file'], googleads.oauth2.GetAPIScope(product_yaml_key)]\n        oauth2_kwargs.update({'sub': product_data.get('delegated_account')})\n        oauth2_client = googleads.oauth2.GoogleServiceAccountClient\n        for key in _OAUTH2_SERVICE_ACCT_KEYS:\n            del product_data[key]\n        for optional_key in _OAUTH2_SERVICE_ACCT_KEYS_OPTIONAL:\n            if (optional_key in product_data):\n                del product_data[optional_key]\n    else:\n        raise googleads.errors.GoogleAdsValueError(('Your yaml file is incorrectly configured for OAuth2. You need to specify credentials for either the installed application flow (%s) or service account flow (%s).' % (_OAUTH2_INSTALLED_APP_KEYS, _OAUTH2_SERVICE_ACCT_KEYS)))\n    return oauth2_client(*oauth2_args, **oauth2_kwargs)", "docstring": "Generates an GoogleOAuth2Client subclass using the given product_data.\n\nArgs:\nproduct_yaml_key: a string key identifying the product being configured.\nproduct_data: a dict containing the configurations for a given product.\nproxy_config: a ProxyConfig instance.\n\nReturns:\nAn instantiated GoogleOAuth2Client subclass.\n\nRaises:\nA GoogleAdsValueError if the OAuth2 configuration for the given product is\nmisconfigured.", "source": "codesearchnet"}
{"code": "def mme_nodes(mme_base_url, token):\n    nodes = []\n    if ((not mme_base_url) or (not token)):\n        return nodes\n    url = ''.join([mme_base_url, '/nodes'])\n    nodes = matchmaker_request(url=url, token=token, method='GET')\n    LOG.info('Matchmaker has the following connected nodes:{}'.format(nodes))\n    return nodes", "docstring": "Return the available MatchMaker nodes\n\nArgs:\nmme_base_url(str): base URL of MME service\ntoken(str): MME server authorization token\n\nReturns:\nnodes(list): a list of node disctionaries", "source": "codesearchnet"}
{"code": "def validate_allowed_values(allowed_values, value):\n    \n    \n    if not allowed_values or isinstance(value, CFNParameter):\n        return True\n\n    return value in allowed_values", "docstring": "Support a variable defining which values it allows.\n\nArgs:\nallowed_values (Optional[list]): A list of allowed values from the\nvariable definition\nvalue (obj): The object representing the value provided for the\nvariable\n\nReturns:\nbool: Boolean for whether or not the value is valid.", "source": "juraj-google-style"}
{"code": "async def on_message(message):\n    \n\n    \n    server = message.server\n    author = message.author\n    channel = message.channel\n    content = message.content\n\n    data = datatools.get_data()\n\n    if not data[\"discord\"][\"servers\"][server.id][_data.modulename][\"activated\"]:\n        return\n\n    \n    if server is not None and author != channel.server.me:\n        \n        prefix = data[\"discord\"][\"servers\"][server.id][\"prefix\"]\n        if content.startswith(prefix):\n            \n            package = content.split(\" \")\n            command = package[0][len(prefix):]\n            args = package[1:]\n            arg = ' '.join(args)\n\n            \n            if server.id not in _data.cache or _data.cache[server.id].state == 'destroyed':\n                _data.cache[server.id] = _musicplayer.MusicPlayer(server.id)\n\n            \n            if command in ['play', 'playnext', 'playnow', 'playshuffle', 'insert',\n                           'pause', 'resume', 'skip', 'remove',\n                           'rewind', 'restart', 'shuffle', 'volume',\n                           'stop', 'destroy', 'front', 'movehere',\n                           'settopic', 'cleartopic', 'notopic', 'loop']:\n                try:\n                    await client.delete_message(message)\n                except discord.errors.NotFound:\n                    logger.warning(\"Could not delete music player command message - NotFound\")\n                except discord.errors.Forbidden:\n                    logger.warning(\"Could not delete music player command message - Forbidden\")\n\n            \n            if command == 'play':\n                await _data.cache[server.id].play(author, channel, arg)\n\n            if command == 'playnext':\n                await _data.cache[server.id].play(author, channel, arg, index=1)\n\n            if command == 'playnow':\n                await _data.cache[server.id].play(author, channel, arg, index=1, stop_current=True)\n\n            if command == 'playshuffle':\n                await _data.cache[server.id].play(author, channel, arg, shuffle=True)\n\n            if command == 'insert':\n                if len(args) >= 2:\n                    index = args[0]\n                    query = ' '.join(args[1:])\n                    await _data.cache[server.id].play(author, channel, query, index=index)\n                else:\n                    await _data.cache[server.id].play(author, channel, arg)\n\n            elif command == 'pause':\n                await _data.cache[server.id].pause()\n\n            elif command == 'resume':\n                await _data.cache[server.id].resume()\n\n            elif command == 'skip':\n                await _data.cache[server.id].skip(query=arg)\n\n            elif command == 'remove':\n                await _data.cache[server.id].remove(index=arg)\n\n            elif command == 'rewind':\n                await _data.cache[server.id].rewind(query=arg)\n\n            elif command == 'restart':\n                await _data.cache[server.id].rewind(query=\"0\")\n\n            elif command == 'shuffle':\n                await _data.cache[server.id].shuffle()\n\n            elif command == 'loop':\n                await _data.cache[server.id].set_loop(arg)\n\n            elif command == 'stop':\n                await _data.cache[server.id].stop(log_stop=True)\n\n            elif command == 'volume':\n                await _data.cache[server.id].setvolume(arg)\n\n            elif command == 'settopic':\n                await _data.cache[server.id].set_topic_channel(channel)\n\n            elif command == 'cleartopic' or command == 'notopic':\n                await _data.cache[server.id].clear_topic_channel(channel)\n\n            elif command == 'nowplaying':\n                await _data.cache[server.id].nowplaying_info(channel)\n\n            elif command == 'destroy':\n                await _data.cache[server.id].destroy()\n\n            elif command == 'front' or command == 'movehere':\n                await _data.cache[server.id].movehere(channel)", "docstring": "The on_message event handler for this module\n\nArgs:\nmessage (discord.Message): Input message", "source": "juraj-google-style"}
{"code": "def jacobian(output, inputs, use_pfor=True, parallel_iterations=None):\n    flat_inputs = nest.flatten(inputs)\n    output_tensor_shape = output.shape\n    output_shape = array_ops.shape(output)\n    output = array_ops.reshape(output, [-1])\n\n    def loop_fn(i):\n        y = array_ops.gather(output, i)\n        return gradient_ops.gradients(y, flat_inputs)\n    try:\n        output_size = int(output.shape[0])\n    except TypeError:\n        output_size = array_ops.shape(output)[0]\n    if use_pfor:\n        pfor_outputs = control_flow_ops.pfor(loop_fn, output_size, parallel_iterations=parallel_iterations)\n    else:\n        pfor_outputs = control_flow_ops.for_loop(loop_fn, [output.dtype] * len(flat_inputs), output_size, parallel_iterations=parallel_iterations)\n    for i, out in enumerate(pfor_outputs):\n        if isinstance(out, tensor.Tensor):\n            new_shape = array_ops.concat([output_shape, array_ops.shape(out)[1:]], axis=0)\n            out = array_ops.reshape(out, new_shape)\n            out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape))\n            pfor_outputs[i] = out\n    return nest.pack_sequence_as(inputs, pfor_outputs)", "docstring": "Computes jacobian of `output` w.r.t. `inputs`.\n\nArgs:\noutput: A tensor.\ninputs: A tensor or a nested structure of tensor objects.\nuse_pfor: If true, uses pfor for computing the jacobian. Else uses\ntf.while_loop.\nparallel_iterations: A knob to control how many iterations and dispatched in\nparallel. This knob can be used to control the total memory usage.\n\nReturns:\nA tensor or a nested structure of tensors with the same structure as\n`inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding\nvalue in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has\nshape [x_1, ..., x_m], the corresponding jacobian has shape\n[y_1, ..., y_n, x_1, ..., x_m]. Note that in cases where the gradient is\nsparse (IndexedSlices), jacobian function currently makes it dense and\nreturns a Tensor instead. This may change in the future.", "source": "github-repos"}
{"code": "def set_dimension(tensor, axis, value):\n    shape = tensor.shape.as_list()\n    if (shape[axis] not in (value, None)):\n        message = 'Cannot set dimension {} of tensor {} to {}; is already {}.'\n        raise ValueError(message.format(axis, tensor.name, value, shape[axis]))\n    shape[axis] = value\n    tensor.set_shape(shape)", "docstring": "Set the length of a tensor along the specified dimension.\n\nArgs:\ntensor: Tensor to define shape of.\naxis: Dimension to set the static shape for.\nvalue: Integer holding the length.\n\nRaises:\nValueError: When the tensor already has a different length specified.", "source": "codesearchnet"}
{"code": "def money(s, thousand_sep=\".\", decimal_sep=\",\"):\n        \n        s = s.replace(thousand_sep, \"\")\n        s = s.replace(decimal_sep, \".\")\n        return Decimal(s)", "docstring": "Converts money amount in string to a Decimal object.\n\nWith the default arguments, the format is expected to be\n``-38.500,00``, where dots separate thousands and comma the decimals.\n\nArgs:\nthousand_sep: Separator for thousands.\ndecimal_sep: Separator for decimals.\n\nReturns:\nA ``Decimal`` object of the string encoded money amount.", "source": "juraj-google-style"}
{"code": "def add_real_directory(self, source_path, read_only=True, lazy_read=True, target_path=None):\n    source_path = self._path_without_trailing_separators(source_path)\n    if (not os.path.exists(source_path)):\n        self.raise_io_error(errno.ENOENT, source_path)\n    target_path = (target_path or source_path)\n    if lazy_read:\n        parent_path = os.path.split(target_path)[0]\n        if self.exists(parent_path):\n            parent_dir = self.get_object(parent_path)\n        else:\n            parent_dir = self.create_dir(parent_path)\n        new_dir = FakeDirectoryFromRealDirectory(source_path, self, read_only, target_path)\n        parent_dir.add_entry(new_dir)\n        self._last_ino += 1\n        new_dir.st_ino = self._last_ino\n    else:\n        new_dir = self.create_dir(target_path)\n        for (base, _, files) in os.walk(source_path):\n            new_base = os.path.join(new_dir.path, os.path.relpath(base, source_path))\n            for fileEntry in files:\n                self.add_real_file(os.path.join(base, fileEntry), read_only, os.path.join(new_base, fileEntry))\n    return new_dir", "docstring": "Create a fake directory corresponding to the real directory at the\nspecified path.  Add entries in the fake directory corresponding to\nthe entries in the real directory.\n\nArgs:\nsource_path: The path to the existing directory.\nread_only: If set, all files under the directory are treated as\nread-only, e.g. a write access raises an exception;\notherwise, writing to the files changes the fake files only\nas usually.\nlazy_read: If set (default), directory contents are only read when\naccessed, and only until the needed subdirectory level.\n\n.. note:: This means that the file system size is only updated\nat the time the directory contents are read; set this to\n`False` only if you are dependent on accurate file system\nsize in your test\ntarget_path: If given, the target directory, otherwise,\nthe target directory is the same as `source_path`.\n\nReturns:\nthe newly created FakeDirectory object.\n\nRaises:\nOSError: if the directory does not exist in the real file system.\nIOError: if the directory already exists in the fake file system.", "source": "codesearchnet"}
{"code": "def is_diagonal_scale(scale):\n    if (not isinstance(scale, tf.linalg.LinearOperator)):\n        raise TypeError((\"Expected argument 'scale' to be instance of LinearOperator. Found: %s\" % scale))\n    return (isinstance(scale, tf.linalg.LinearOperatorIdentity) or isinstance(scale, tf.linalg.LinearOperatorScaledIdentity) or isinstance(scale, tf.linalg.LinearOperatorDiag))", "docstring": "Returns `True` if `scale` is a `LinearOperator` that is known to be diag.\n\nArgs:\nscale:  `LinearOperator` instance.\n\nReturns:\nPython `bool`.\n\nRaises:\nTypeError:  If `scale` is not a `LinearOperator`.", "source": "codesearchnet"}
{"code": "def _slot_dict(self, slot_name):\n    named_slots = self._slots.get(slot_name, None)\n    if named_slots is None:\n        named_slots = {}\n        self._slots[slot_name] = named_slots\n    return named_slots", "docstring": "Returns a dict for caching slots created under the given name.\n\nArgs:\nslot_name: Name for the slot.\n\nReturns:\nA dict that maps primary `Variable` objects to the slot created\nfor that variable, under the given slot name.", "source": "github-repos"}
{"code": "def get_cohp_by_label(self, label):\n        \n        if label.lower() == \"average\":\n            return Cohp(efermi=self.efermi, energies=self.energies,\n                        cohp=self.cohp, are_coops=self.are_coops, icohp=self.icohp)\n        else:\n            try:\n                return Cohp(efermi=self.efermi, energies=self.energies,\n                            cohp=self.all_cohps[label].get_cohp(spin=None, integrated=False),\n                            are_coops=self.are_coops,\n                            icohp=self.all_cohps[label].get_icohp(spin=None))\n\n            except KeyError:\n                print(\"The label does not exist\")", "docstring": "Get specific COHP object.\n\nArgs:\nlabel: string (for newer Lobster versions: a number)\n\nReturns:\nReturns the COHP object to simplify plotting", "source": "juraj-google-style"}
{"code": "def getWhoisInfo(domain):\n    \n    new = []\n\n    \n    try:\n        emails = {}\n        emails[\"type\"] = \"i3visio.alias\"\n        emails[\"value\"] = str(domain.split(\".\")[0])\n        emails[\"attributes\"] = []\n        new.append(emails)\n    except:\n        pass\n\n    info = whois.whois(domain)\n\n    if info.status == None:\n        raise Exception(\"UnknownDomainError: \" + domain + \" could not be resolved.\")\n\n    \n    try:\n        emails = {}\n        emails[\"type\"] = \"i3visio.email\"\n        if type(info.emails) is not list:\n            aux = [info.emails]\n            emails[\"value\"] = json.dumps(aux)\n        else:\n            emails[\"value\"] = json.dumps(info.emails)\n        emails[\"attributes\"] = []\n        new.append(emails)\n    except:\n        pass\n\n    \n    try:\n        tmp = {}\n        tmp[\"type\"] = \"i3visio.location.country\"\n        tmp[\"value\"] = str(info.country)\n        tmp[\"attributes\"] = []\n        new.append(tmp)\n    except:\n        pass\n\n    \n    try:\n        tmp = {}\n        tmp[\"type\"] = \"i3visio.registrar\"\n        tmp[\"value\"] = str(info.registrar)\n        tmp[\"attributes\"] = []\n        new.append(tmp)\n    except:\n        pass\n\n    \n    try:\n        tmp = {}\n        tmp[\"type\"] = \"i3visio.fullname\"\n        try:\n            tmp[\"value\"] = str(info.name)\n        except:\n            tmp[\"value\"] = info.name\n        tmp[\"attributes\"] = []\n        new.append(tmp)\n    except:\n        pass\n\n    return new", "docstring": "Method that trie to recover the whois info from a domain.\n\nArgs:\n-----\ndomain: The domain to verify.\n\nReturns:\n--------\ndict: A dictionary containing the result as an i3visio entity with its\n`value`, `type` and `attributes`.", "source": "juraj-google-style"}
{"code": "def read(self, vals):\n        \n        i = 0\n        count = int(vals[i])\n        i += 1\n        for _ in range(count):\n            obj = GroundTemperature()\n            obj.read(vals[i:i + obj.field_count])\n            self.add_ground_temperature(obj)\n            i += obj.field_count", "docstring": "Read values.\n\nArgs:\nvals (list): list of strings representing values", "source": "juraj-google-style"}
{"code": "def get_keyvault(access_token, subscription_id, rgname, vault_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults/', vault_name, '?api-version=', KEYVAULT_API])\n    return do_get(endpoint, access_token)", "docstring": "Gets details about the named key vault.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\nvault_name (str): Name of the key vault.\n\nReturns:\nHTTP response. JSON body of key vault properties.", "source": "codesearchnet"}
{"code": "def find_in_coord_list(coord_list, coord, atol=1e-8):\n    \n    if len(coord_list) == 0:\n        return []\n    diff = np.array(coord_list) - np.array(coord)[None, :]\n    return np.where(np.all(np.abs(diff) < atol, axis=1))[0]", "docstring": "Find the indices of matches of a particular coord in a coord_list.\n\nArgs:\ncoord_list: List of coords to test\ncoord: Specific coordinates\natol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and\narray.\n\nReturns:\nIndices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.", "source": "juraj-google-style"}
{"code": "def _as_graph_element(obj):\n    conv_fn = getattr(obj, '_as_graph_element', None)\n    if conv_fn and callable(conv_fn):\n        return conv_fn()\n    return None", "docstring": "Convert `obj` to a graph element if possible, otherwise return `None`.\n\nArgs:\nobj: Object to convert.\n\nReturns:\nThe result of `obj._as_graph_element()` if that method is available;\notherwise `None`.", "source": "github-repos"}
{"code": "def _open_debug_interface(self, conn_id, callback, connection_string=None):\n    self._try_connect(connection_string)\n    callback(conn_id, self.id, True, None)", "docstring": "Enable debug interface for this IOTile device\n\nArgs:\nconn_id (int): the unique identifier for the connection\ncallback (callback): Callback to be called when this command finishes\ncallback(conn_id, adapter_id, success, failure_reason)", "source": "codesearchnet"}
{"code": "def read_hdf(cls, path, key=None):\n    df = pd.read_hdf(path, key)\n    df['scored_calls'] = df['scored_calls'].apply((lambda x: json.loads(x)))\n    df['channel_values'] = df['channel_values'].apply((lambda x: json.loads(x)))\n    df['regions'] = df['regions'].apply((lambda x: json.loads(x)))\n    df['phenotype_calls'] = df['phenotype_calls'].apply((lambda x: json.loads(x)))\n    df['neighbors'] = df['neighbors'].apply((lambda x: json.loads(x)))\n    df['neighbors'] = df['neighbors'].apply((lambda x: (np.nan if (not isinstance(x, dict)) else dict(zip([int(y) for y in x.keys()], x.values())))))\n    df['frame_shape'] = df['frame_shape'].apply((lambda x: tuple(json.loads(x))))\n    df = cls(df)\n    f = h5py.File(path, 'r')\n    mpp = f[key].attrs['microns_per_pixel']\n    if (not np.isnan(mpp)):\n        df.microns_per_pixel = mpp\n    f.close()\n    return df", "docstring": "Read a CellDataFrame from an hdf5 file.\n\nArgs:\npath (str): the path to read from\nkey (str): the name of the location to read from\n\nReturns:\nCellDataFrame", "source": "codesearchnet"}
{"code": "def select_executor(elem, doc):\n    \n    executor = EXECUTORS['default']\n\n    if 'cmd' in elem.attributes.keys():\n        executor = elem.attributes['cmd']\n    elif 'runas' in elem.attributes.keys():\n        executor = EXECUTORS[elem.attributes['runas']]\n    elif elem.classes[0] != 'exec':\n        executor = EXECUTORS[elem.classes[0]]\n\n    return executor", "docstring": "Determines the executor for the code in `elem.text`.\n\nThe elem attributes and classes select the executor in this order (highest\nto lowest):\n- custom commands (cmd=...)\n- runas (runas=...) takes a key for the executors\n- first element class (.class) determines language and thus executor\n\nArgs:\nelem The AST element.\ndoc  The document.\n\nReturns:\nThe command to execute code.", "source": "juraj-google-style"}
{"code": "async def init(self, *, advertise_addr: str=None, listen_addr: str='0.0.0.0:2377', force_new_cluster: bool=False, swarm_spec: Mapping=None) -> str:\n    data = {'AdvertiseAddr': advertise_addr, 'ListenAddr': listen_addr, 'ForceNewCluster': force_new_cluster, 'Spec': swarm_spec}\n    response = (await self.docker._query_json('swarm/init', method='POST', data=data))\n    return response", "docstring": "Initialize a new swarm.\n\nArgs:\nListenAddr: listen address used for inter-manager communication\nAdvertiseAddr: address advertised to other nodes.\nForceNewCluster: Force creation of a new swarm.\nSwarmSpec: User modifiable swarm configuration.\n\nReturns:\nid of the swarm node", "source": "codesearchnet"}
{"code": "def Remove(self, row):\n    if ((row == 0) or (row > self.size)):\n        raise TableError('Attempt to remove header row')\n    new_table = []\n    for t_row in self._table:\n        if (t_row.row != row):\n            new_table.append(t_row)\n            if (t_row.row > row):\n                t_row.row -= 1\n    self._table = new_table", "docstring": "Removes a row from the table.\n\nArgs:\nrow: int, the row number to delete. Must be >= 1, as the header\ncannot be removed.\n\nRaises:\nTableError: Attempt to remove nonexistent or header row.", "source": "codesearchnet"}
{"code": "def delete_url(self, url, token=''):\n        \n        if (token == ''):\n            token = self._user_token\n\n        return requests.delete(url,\n                               headers={\n                                   'Authorization': 'Token {}'.format(token)},\n                               verify=False,)", "docstring": "Returns a delete resquest object taking in a url and user token.\n\nArguments:\nurl (str): The url to make post to\ntoken (str): The authentication token\n\nReturns:\nobj: Delete request object", "source": "juraj-google-style"}
{"code": "def plot_state_paulivec(rho, title='', figsize=None, color=None):\n    if (not HAS_MATPLOTLIB):\n        raise ImportError('Must have Matplotlib installed.')\n    rho = _validate_input_state(rho)\n    if (figsize is None):\n        figsize = (7, 5)\n    num = int(np.log2(len(rho)))\n    labels = list(map((lambda x: x.to_label()), pauli_group(num)))\n    values = list(map((lambda x: np.real(np.trace(np.dot(x.to_matrix(), rho)))), pauli_group(num)))\n    numelem = len(values)\n    if (color is None):\n        color = '\n    ind = np.arange(numelem)\n    width = 0.5\n    (fig, ax) = plt.subplots(figsize=figsize)\n    ax.grid(zorder=0, linewidth=1, linestyle='--')\n    ax.bar(ind, values, width, color=color, zorder=2)\n    ax.axhline(linewidth=1, color='k')\n    ax.set_ylabel('Expectation value', fontsize=14)\n    ax.set_xticks(ind)\n    ax.set_yticks([(- 1), (- 0.5), 0, 0.5, 1])\n    ax.set_xticklabels(labels, fontsize=14, rotation=70)\n    ax.set_xlabel('Pauli', fontsize=14)\n    ax.set_ylim([(- 1), 1])\n    ax.set_facecolor('\n    for tick in (ax.xaxis.get_major_ticks() + ax.yaxis.get_major_ticks()):\n        tick.label.set_fontsize(14)\n    ax.set_title(title, fontsize=16)\n    plt.close(fig)\n    return fig", "docstring": "Plot the paulivec representation of a quantum state.\n\nPlot a bargraph of the mixed state rho over the pauli matrices\n\nArgs:\nrho (ndarray): Numpy array for state vector or density matrix\ntitle (str): a string that represents the plot title\nfigsize (tuple): Figure size in inches.\ncolor (list or str): Color of the expectation value bars.\nReturns:\nmatplotlib.Figure: The matplotlib.Figure of the visualization\nRaises:\nImportError: Requires matplotlib.", "source": "codesearchnet"}
{"code": "def _unique_parameters(self) -> 'list[cfg.Variable]':\n    return []", "docstring": "Get unique parameter subtypes as variables.\n\nThis will retrieve 'children' of this value that contribute to the\ntype of it. So it will retrieve type parameters, but not attributes. To\nkeep the number of possible combinations reasonable, when we encounter\nmultiple instances of the same type, we include only one.\n\nReturns:\nA list of variables.", "source": "github-repos"}
{"code": "def metric(self, name, description, data_type, interval, keyed=False):\n    from .tcex_metrics_v2 import TcExMetricsV2\n    return TcExMetricsV2(self, name, description, data_type, interval, keyed)", "docstring": "Get instance of the Metrics module.\n\nArgs:\nname (string): The name for the metric.\ndescription (string): The description of the metric.\ndata_type (string): The type of metric: Sum, Count, Min, Max, First, Last, and Average.\ninterval (string): The metric interval: Hourly, Daily, Weekly, Monthly, and Yearly.\nkeyed (boolean): Indicates whether the data will have a keyed value.\n\nReturns:\n(object): An instance of the Metrics Class.", "source": "codesearchnet"}
{"code": "class FlaxBeamSearchOutput(ModelOutput):\n    sequences: Optional[jnp.ndarray] = None\n    scores: Optional[jnp.ndarray] = None", "docstring": "Flax Base class for outputs of decoder-only generation models using greedy search.\n\n\nArgs:\nsequences (`jnp.ndarray` of shape `(batch_size, max_length)`):\nThe generated sequences.\nscores (`jnp.ndarray` of shape `(batch_size,)`):\nThe scores (log probabilities) of the generated sequences.", "source": "github-repos"}
{"code": "def _dict_to_tensor(self, x, k1, k2):\n    return array_ops_stack.stack([array_ops_stack.stack([x[i, j] for j in range(k2)]) for i in range(k1)])", "docstring": "Convert a dictionary to a tensor.\n\nArgs:\nx: A k1 * k2 dictionary.\nk1: First dimension of x.\nk2: Second dimension of x.\n\nReturns:\nA k1 * k2 tensor.", "source": "github-repos"}
{"code": "def urlretrieve(url, filename, reporthook=None, data=None):\n\n    def chunk_read(response, chunk_size=8192, reporthook=None):\n        content_type = response.info().get('Content-Length')\n        total_size = -1\n        if content_type is not None:\n            total_size = int(content_type.strip())\n        count = 0\n        while True:\n            chunk = response.read(chunk_size)\n            count += 1\n            if reporthook is not None:\n                reporthook(count, chunk_size, total_size)\n            if chunk:\n                yield chunk\n            else:\n                break\n    response = urlopen(url, data)\n    with open(filename, 'wb') as fd:\n        for chunk in chunk_read(response, reporthook=reporthook):\n            fd.write(chunk)", "docstring": "Replacement for `urlretrieve` for Python 2.\n\nUnder Python 2, `urlretrieve` relies on `FancyURLopener` from legacy\n`urllib` module, known to have issues with proxy management.\n\nArgs:\nurl: url to retrieve.\nfilename: where to store the retrieved data locally.\nreporthook: a hook function that will be called once on establishment of\nthe network connection and once after each block read thereafter. The\nhook will be passed three arguments; a count of blocks transferred so\nfar, a block size in bytes, and the total size of the file.\ndata: `data` argument passed to `urlopen`.", "source": "github-repos"}
{"code": "def by_type(blocks, slist=None):\n    layout = []\n    data = []\n    int_vol = []\n    unknown = []\n    for i in blocks:\n        if (slist and (i not in slist)):\n            continue\n        if (blocks[i].is_vtbl and blocks[i].is_valid):\n            layout.append(i)\n        elif (blocks[i].is_internal_vol and blocks[i].is_valid):\n            int_vol.append(i)\n        elif blocks[i].is_valid:\n            data.append(i)\n        else:\n            unknown.append(i)\n    return (layout, data, int_vol, unknown)", "docstring": "Sort blocks into layout, internal volume, data or unknown\n\nArguments:\nObj:blocks   -- List of block objects.\nList:slist   -- (optional) List of block indexes.\n\nReturns:\nList:layout  -- List of block indexes of blocks containing the\nvolume table records.\nList:data    -- List of block indexes containing filesystem data.\nList:int_vol -- List of block indexes  containing volume ids\ngreater than UBI_INTERNAL_VOL_START that are not\nlayout volumes.\nList:unknown -- List of block indexes of blocks that failed validation\nof crc in ed_hdr or vid_hdr.", "source": "codesearchnet"}
{"code": "def plot_seebeck_mu(self, temp=600, output='eig', xlim=None):\n        \n        import matplotlib.pyplot as plt\n        plt.figure(figsize=(9, 7))\n        seebeck = self._bz.get_seebeck(output=output, doping_levels=False)[\n            temp]\n        plt.plot(self._bz.mu_steps, seebeck,\n                 linewidth=3.0)\n\n        self._plot_bg_limits()\n        self._plot_doping(temp)\n        if output == 'eig':\n            plt.legend(['S$_1$', 'S$_2$', 'S$_3$'])\n        if xlim is None:\n            plt.xlim(-0.5, self._bz.gap + 0.5)\n        else:\n            plt.xlim(xlim[0], xlim[1])\n        plt.ylabel(\"Seebeck \\n coefficient  ($\\\\mu$V/K)\", fontsize=30.0)\n        plt.xlabel(\"E-E$_f$ (eV)\", fontsize=30)\n        plt.xticks(fontsize=25)\n        plt.yticks(fontsize=25)\n        plt.tight_layout()\n        return plt", "docstring": "Plot the seebeck coefficient in function of Fermi level\n\nArgs:\ntemp:\nthe temperature\nxlim:\na list of min and max fermi energy by default (0, and band gap)\nReturns:\na matplotlib object", "source": "juraj-google-style"}
{"code": "def are_symmetrically_related(self, point_a, point_b, tol=0.001):\n    if np.allclose(self.operate(point_a), point_b, atol=tol):\n        return True\n    if np.allclose(self.operate(point_b), point_a, atol=tol):\n        return True\n    return False", "docstring": "Checks if two points are symmetrically related.\n\nArgs:\npoint_a (3x1 array): First point.\npoint_b (3x1 array): Second point.\ntol (float): Absolute tolerance for checking distance.\n\nReturns:\nTrue if self.operate(point_a) == point_b or vice versa.", "source": "codesearchnet"}
{"code": "def Write(self, string):\n    \n    if sys.version_info[0] < 3:\n      super(StdoutOutputWriter, self).Write(string)\n    else:\n      \n      \n      sys.stdout.write(string)", "docstring": "Writes a string to the output.\n\nArgs:\nstring (str): output.", "source": "juraj-google-style"}
{"code": "def unique(ar):\n    r\n\n    import dask.array as da\n\n    if isinstance(ar, da.core.Array):\n        return da.unique(ar)\n\n    return _unique(ar)", "docstring": "r\"\"\"Find the unique elements of an array.\n\nIt uses ``dask.array.unique`` if necessary.\n\nArgs:\nar (array_like): Input array.\n\nReturns:\narray_like: the sorted unique elements.", "source": "juraj-google-style"}
{"code": "def _RemoveDefaultAttrs(producer_op_list, graph_def):\n    producer_op_dict = {op.name: op for op in producer_op_list.op}\n    for node in graph_def.node:\n        if node.op in producer_op_dict:\n            op_def = op_def_registry.get(node.op)\n            if op_def is None:\n                continue\n            producer_op_def = producer_op_dict[node.op]\n            for key in list(node.attr):\n                if _FindAttrInOpDef(key, op_def) is None:\n                    attr_def = _FindAttrInOpDef(key, producer_op_def)\n                    if attr_def and attr_def.HasField('default_value') and (node.attr[key] == attr_def.default_value):\n                        del node.attr[key]", "docstring": "Removes unknown default attrs according to `producer_op_list`.\n\nRemoves any unknown attrs in `graph_def` (i.e. attrs that do not appear in\nregistered OpDefs) that have a default value in `producer_op_list`.\n\nArgs:\nproducer_op_list: OpList proto.\ngraph_def: GraphDef proto", "source": "github-repos"}
{"code": "def SetHasherNames(self, hasher_names_string):\n    hasher_names = hashers_manager.HashersManager.GetHasherNamesFromString(hasher_names_string)\n    debug_hasher_names = ', '.join(hasher_names)\n    logger.debug('Got hasher names: {0:s}'.format(debug_hasher_names))\n    self._hashers = hashers_manager.HashersManager.GetHashers(hasher_names)\n    self._hasher_names_string = hasher_names_string", "docstring": "Sets the hashers that should be enabled.\n\nArgs:\nhasher_names_string (str): comma separated names of hashers to enable.", "source": "codesearchnet"}
{"code": "def _set_auditpol_data(option, value):\n    auditpol_values = {'None': 'No Auditing', '0': 'No Auditing', '1': 'Success', '2': 'Failure', '3': 'Success and Failure'}\n    defaults = _get_audit_defaults(option)\n    return __utils__['auditpol.set_setting'](name=defaults['Auditpol Name'], value=auditpol_values[value])", "docstring": "Helper function that updates the current applied settings to match what has\njust been set in the audit.csv files. We're doing it this way instead of\nrunning `gpupdate`\n\nArgs:\noption (str): The name of the option to set\nvalue (str): The value to set. ['None', '0', '1', '2', '3']\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``", "source": "codesearchnet"}
{"code": "def prepare_xml_read(data, objectify=False):\n    mod = (_objectify if objectify else etree)\n    if hasattr(data, 'readlines'):\n        data = mod.parse(data).getroot()\n    elif isinstance(data, list):\n        data = mod.fromstring(''.join(data))\n    elif isinstance(data, basestring):\n        data = mod.parse(open(data)).getroot()\n    else:\n        raise TypeError(('Unable to handle data of type %r' % type(data)))\n    return data", "docstring": "Prepare various input types for XML parsing.\n\nArgs:\ndata (iter): Data to read\nobjectify (bool): Parse using lxml's objectify data binding\n\nReturns:\netree.ElementTree: Tree suitable for parsing\n\nRaises:\nTypeError: Invalid value for data", "source": "codesearchnet"}
{"code": "def _parse_single_video(self, example_proto):\n    context_features = {'game_duration_loops': tf.io.FixedLenFeature([1], tf.int64), 'game_duration_seconds': tf.io.FixedLenFeature([1], tf.float32), 'n_steps': tf.io.FixedLenFeature([1], tf.int64), 'screen_size': tf.io.FixedLenFeature([2], tf.int64)}\n    sequence_features = {'rgb_screen': tf.io.FixedLenSequenceFeature([], tf.string)}\n    (_, seq_feat) = tf.io.parse_single_sequence_example(example_proto, context_features=context_features, sequence_features=sequence_features)\n    video_frames = tf.map_fn(tf.image.decode_png, seq_feat['rgb_screen'], dtype=tf.uint8)\n    return video_frames", "docstring": "Parses single video from the input tfrecords.\n\nArgs:\nexample_proto: tfExample proto with a single video.\n\nReturns:\ndict with all frames, positions and actions.", "source": "codesearchnet"}
{"code": "def on_snapshot(self, proto):\n        \n        TargetChange = firestore_pb2.TargetChange\n\n        target_changetype_dispatch = {\n            TargetChange.NO_CHANGE: self._on_snapshot_target_change_no_change,\n            TargetChange.ADD: self._on_snapshot_target_change_add,\n            TargetChange.REMOVE: self._on_snapshot_target_change_remove,\n            TargetChange.RESET: self._on_snapshot_target_change_reset,\n            TargetChange.CURRENT: self._on_snapshot_target_change_current,\n        }\n\n        target_change = proto.target_change\n        if str(target_change):\n            target_change_type = target_change.target_change_type\n            _LOGGER.debug(\"on_snapshot: target change: \" + str(target_change_type))\n            meth = target_changetype_dispatch.get(target_change_type)\n            if meth is None:\n                _LOGGER.info(\n                    \"on_snapshot: Unknown target change \" + str(target_change_type)\n                )\n                self.close(\n                    reason=\"Unknown target change type: %s \" % str(target_change_type)\n                )\n            else:\n                try:\n                    meth(proto)\n                except Exception as exc2:\n                    _LOGGER.debug(\"meth(proto) exc: \" + str(exc2))\n                    raise\n\n            \n            \n            \n\n        elif str(proto.document_change):\n            _LOGGER.debug(\"on_snapshot: document change\")\n\n            \n            \n            target_ids = proto.document_change.target_ids or []\n            removed_target_ids = proto.document_change.removed_target_ids or []\n            changed = False\n            removed = False\n\n            if WATCH_TARGET_ID in target_ids:\n                changed = True\n\n            if WATCH_TARGET_ID in removed_target_ids:\n                removed = True\n\n            if changed:\n                _LOGGER.debug(\"on_snapshot: document change: CHANGED\")\n\n                \n                document_change = proto.document_change\n                \n                document = document_change.document\n\n                data = _helpers.decode_dict(document.fields, self._firestore)\n\n                \n                \n                \n                document_name = document.name\n                db_str = self._firestore._database_string\n                db_str_documents = db_str + \"/documents/\"\n                if document_name.startswith(db_str_documents):\n                    document_name = document_name[len(db_str_documents) :]\n\n                document_ref = self._firestore.document(document_name)\n\n                snapshot = self.DocumentSnapshot(\n                    reference=document_ref,\n                    data=data,\n                    exists=True,\n                    read_time=None,\n                    create_time=document.create_time,\n                    update_time=document.update_time,\n                )\n                self.change_map[document.name] = snapshot\n\n            elif removed:\n                _LOGGER.debug(\"on_snapshot: document change: REMOVED\")\n                document = proto.document_change.document\n                self.change_map[document.name] = ChangeType.REMOVED\n\n        \n        \n\n        elif str(proto.document_delete):\n            _LOGGER.debug(\"on_snapshot: document change: DELETE\")\n            name = proto.document_delete.document\n            self.change_map[name] = ChangeType.REMOVED\n\n        elif str(proto.document_remove):\n            _LOGGER.debug(\"on_snapshot: document change: REMOVE\")\n            name = proto.document_remove.document\n            self.change_map[name] = ChangeType.REMOVED\n\n        elif proto.filter:\n            _LOGGER.debug(\"on_snapshot: filter update\")\n            if proto.filter.count != self._current_size():\n                \n                self._reset_docs()\n                \n                \n                \n\n        else:\n            _LOGGER.debug(\"UNKNOWN TYPE. UHOH\")\n            self.close(reason=ValueError(\"Unknown listen response type: %s\" % proto))", "docstring": "Called everytime there is a response from listen. Collect changes\nand 'push' the changes in a batch to the customer when we receive\n'current' from the listen response.\n\nArgs:\nlisten_response(`google.cloud.firestore_v1beta1.types.ListenResponse`):\nCallback method that receives a object to", "source": "juraj-google-style"}
{"code": "def get_tick(self, index):\n        \n\n        name = self.tick_name(index)\n        if name is None:\n            return [pack_error(ControllerSubsystem.SENSOR_GRAPH, Error.INVALID_ARRAY_KEY), 0]\n\n        return [Error.NO_ERROR, self.ticks[name]]", "docstring": "Get a tick's interval.\n\nArgs:\nindex (int): The index of the tick that you want to fetch.\n\nReturns:\nint, int: Error code and The tick's interval in seconds.\n\nA value of 0 means that the tick is disabled.", "source": "juraj-google-style"}
{"code": "def data_in_db(db_data, user_data):\n    if isinstance(user_data, list):\n        if (db_data in user_data):\n            return True\n    return False", "docstring": "Validate db data in user data.\n\nArgs:\ndb_data (str): The data store in Redis.\nuser_data (list): The user provided data.\n\nReturns:\nbool: True if the data passed validation.", "source": "codesearchnet"}
{"code": "def add_timestamps(with_ms: bool=False, substream_name: str | None=None) -> processor.Processor:\n    if substream_name is None:\n        substream_name = ''\n    return processor.processor_function(functools.partial(_add_timestamps, with_ms=with_ms, substream_name=substream_name))", "docstring": "Adds timestamps to image chunks.\n\nBy default the timestamps are added with the format `mm:ss` where\n`mm` is the number of minutes, `ss` is the number of seconds.\n\nArgs:\nwith_ms: Whether to add milliseconds to the timestamp. When `True`, the\ntimestamp is added with the format `mm:ss.SSS` where `SSS` is the number\nof milliseconds.\nsubstream_name: The substream name to use for the timestamps.\n\nReturns:\nA processor that adds timestamps after each image chunk.", "source": "github-repos"}
{"code": "def set(config, section, opt, value):\n        \n        if section not in config.keys():\n            config[section] = {}\n\n        config[section][opt] = value", "docstring": "Sets specified option in the config.\n\nArgs:\nconfig (configobj.ConfigObj): config to work on.\nsection (str): section name.\nopt (str): option name.\nvalue: value to set option to.", "source": "juraj-google-style"}
{"code": "def override(state, solution):\n    old_ast = state.solution_ast\n    new_ast = ast.parse(solution)\n    if ((not isinstance(old_ast, ast.Module)) and (len(new_ast.body) == 1)):\n        expr = new_ast.body[0]\n        candidates = ([expr, expr.value] if isinstance(expr, ast.Expr) else [expr])\n        for node in candidates:\n            if isinstance(node, old_ast.__class__):\n                new_ast = node\n                break\n    kwargs = (state.messages[(- 1)] if state.messages else {})\n    child = state.to_child(solution_ast=new_ast, student_ast=state.student_ast, highlight=state.highlight, append_message={'msg': '', 'kwargs': kwargs})\n    return child", "docstring": "Override the solution code with something arbitrary.\n\nThere might be cases in which you want to temporarily override the solution code\nso you can allow for alternative ways of solving an exercise.\nWhen you use ``override()`` in an SCT chain, the remainder of that SCT chain will\nrun as if the solution code you specified is the only code that was in the solution.\n\nCheck the glossary for an example (pandas plotting)\n\nArgs:\nsolution: solution code as a string that overrides the original solution code.\nstate: State instance describing student and solution code. Can be omitted if used with Ex().", "source": "codesearchnet"}
{"code": "def get_pipeline_options(project: str, job_name: str, mode: str, num_workers: int=cfg.NUM_WORKERS, streaming: bool=True) -> PipelineOptions:\n    job_name = f'{job_name}-{datetime.now().strftime('%Y%m%d%H%M%S')}'\n    staging_bucket = f'gs:\n    dataflow_options = {'runner': 'DirectRunner' if mode == 'local' else 'DataflowRunner', 'job_name': job_name, 'project': project, 'region': cfg.REGION, 'staging_location': f'{staging_bucket}/dflow-staging', 'temp_location': f'{staging_bucket}/dflow-temp', 'setup_file': './setup.py', 'streaming': streaming}\n    if num_workers:\n        dataflow_options.update({'num_workers': num_workers})\n    return PipelineOptions(flags=[], **dataflow_options)", "docstring": "Function to retrieve the pipeline options.\nArgs:\nproject: GCP project to run on\nmode: Indicator to run local, cloud or template\nnum_workers: Number of Workers for running the job parallely\nmax_num_workers: Maximum number of workers running the job parallely\nReturns:\nDataflow pipeline options", "source": "github-repos"}
{"code": "def check_provider_healthcheck(settings, default_provider='Discovery'):\n    ProviderHealthCheck = collections.namedtuple('ProviderHealthCheck', ['providers', 'has_healthcheck'])\n    eureka_enabled = settings['app']['eureka_enabled']\n    providers = settings['asg']['provider_healthcheck']\n    LOG.debug('Template defined Health Check Providers: %s', providers)\n    health_check_providers = []\n    has_healthcheck = False\n    normalized_default_provider = default_provider.capitalize()\n    if eureka_enabled:\n        LOG.info('Eureka enabled, enabling default Provider Health Check: %s', normalized_default_provider)\n        for (provider, active) in providers.items():\n            if (provider.lower() == normalized_default_provider.lower()):\n                providers[provider] = True\n                LOG.debug('Override defined Provider Health Check: %s -> %s', active, providers[provider])\n                break\n        else:\n            LOG.debug('Adding default Provider Health Check: %s', normalized_default_provider)\n            providers[normalized_default_provider] = True\n    for (provider, active) in providers.items():\n        if active:\n            health_check_providers.append(provider.capitalize())\n    LOG.info('Provider healthchecks: %s', health_check_providers)\n    if health_check_providers:\n        has_healthcheck = True\n    return ProviderHealthCheck(providers=health_check_providers, has_healthcheck=has_healthcheck)", "docstring": "Set Provider Health Check when specified.\n\nReturns:\ncollections.namedtuple: **ProviderHealthCheck** with attributes:\n\n* providers (list): Providers set to use native Health Check.\n* has_healthcheck (bool): If any native Health Checks requested.", "source": "codesearchnet"}
{"code": "def __init__(self, raise_warnings=False):\n    \n    self.raise_warnings = raise_warnings\n    self.accumulator = SimpleProblemAccumulator()", "docstring": "Initialise.\n\nArgs:\nraise_warnings: If this is True then warnings are also raised as\nexceptions.\nIf it is false, warnings are printed to the console using\nSimpleProblemAccumulator.", "source": "juraj-google-style"}
{"code": "def get_group(self, group_id):\n    \n\n    group = self.group_id_map.get(group_id)\n\n    if group:\n      return group\n\n    self.logger.error('Group ID \"%s\" is not in datafile.' % group_id)\n    self.error_handler.handle_error(exceptions.InvalidGroupException(enums.Errors.INVALID_GROUP_ID_ERROR))\n    return None", "docstring": "Get group for the provided group ID.\n\nArgs:\ngroup_id: Group ID for which group is to be determined.\n\nReturns:\nGroup corresponding to the provided group ID.", "source": "juraj-google-style"}
{"code": "def ParseFileEntry(self, parser_mediator, file_entry):\n    \n    stat_object = file_entry.GetStat()\n    if not stat_object:\n      return\n\n    file_system_type = self._GetFileSystemTypeFromFileEntry(file_entry)\n\n    event_data = FileStatEventData()\n    event_data.file_entry_type = stat_object.type\n    event_data.file_size = getattr(stat_object, 'size', None)\n    event_data.file_system_type = file_system_type\n    event_data.is_allocated = file_entry.IsAllocated()\n\n    if file_entry.access_time:\n      event = time_events.DateTimeValuesEvent(\n          file_entry.access_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    if file_entry.creation_time:\n      event = time_events.DateTimeValuesEvent(\n          file_entry.creation_time, definitions.TIME_DESCRIPTION_CREATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    if file_entry.change_time:\n      event = time_events.DateTimeValuesEvent(\n          file_entry.change_time, definitions.TIME_DESCRIPTION_CHANGE)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    if file_entry.modification_time:\n      event = time_events.DateTimeValuesEvent(\n          file_entry.modification_time,\n          definitions.TIME_DESCRIPTION_MODIFICATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    for time_attribute, usage in self._TIMESTAMP_DESCRIPTIONS.items():\n      posix_time = getattr(stat_object, time_attribute, None)\n      if posix_time is None:\n        continue\n\n      nano_time_attribute = '{0:s}_nano'.format(time_attribute)\n      nano_time_attribute = getattr(stat_object, nano_time_attribute, None)\n\n      timestamp = posix_time * 1000000\n      if nano_time_attribute is not None:\n        \n        micro_time_attribute, _ = divmod(nano_time_attribute, 10)\n        timestamp += micro_time_attribute\n\n      \n      if (file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK and\n          not timestamp):\n        continue\n\n      date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n          timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(date_time, usage)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a file entry.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_entry (dfvfs.FileEntry): a file entry.", "source": "juraj-google-style"}
{"code": "def __verify_server_version(self):\n    if (compare_versions('.'.join([_lib_major_version, _lib_minor_version]), self.product_version) > 0):\n        logger.warning('Client version {} connecting to server with newer minor release {}.'.format(_lib_full_version, self.product_version))\n    if (compare_versions(_lib_major_version, self.product_version) != 0):\n        raise InvalidSwimlaneProductVersion(self, '{}.0'.format(_lib_major_version), '{}.0'.format(str((int(_lib_major_version) + 1))))", "docstring": "Verify connected to supported server product version\n\nNotes:\nLogs warning if connecting to a newer minor server version\n\nRaises:\nswimlane.exceptions.InvalidServerVersion: If server major version is higher than package major version", "source": "codesearchnet"}
{"code": "def cosmic_link(variant_obj):\n    cosmic_ids = variant_obj.get('cosmic_ids')\n    if (not cosmic_ids):\n        return None\n    else:\n        cosmic_id = cosmic_ids[0]\n        url_template = 'https:\n    return url_template.format(cosmic_id)", "docstring": "Compose link to COSMIC Database.\n\nArgs:\nvariant_obj(scout.models.Variant)\n\nReturns:\nurl_template(str): Link to COSMIIC database if cosmic id is present", "source": "codesearchnet"}
{"code": "def call_fn(fn: TransitionOperator, args: Union[(Tuple[Any], Any)]) -> Any:\n    if (isinstance(args, (list, tuple)) and (not mcmc_util.is_namedtuple_like(args))):\n        args = args\n        return fn(*args)\n    else:\n        return fn(args)", "docstring": "Calls a transition operator with args, unpacking args if its a sequence.\n\nArgs:\nfn: A `TransitionOperator`.\nargs: Arguments to `fn`\n\nReturns:\nret: Return value of `fn`.", "source": "codesearchnet"}
{"code": "def get_counter(self, name, combine_fn):\n    with self._lock:\n        counter = self.counters.get(name, None)\n        if counter:\n            assert counter.combine_fn == combine_fn\n        else:\n            if isinstance(combine_fn, cy_combiners.AccumulatorCombineFn):\n                counter = AccumulatorCombineFnCounter(name, combine_fn)\n            else:\n                counter = Counter(name, combine_fn)\n            self.counters[name] = counter\n        return counter", "docstring": "Returns a counter with the requested name.\n\nPassing in the same name will return the same counter; the\ncombine_fn must agree.\n\nArgs:\nname: the name of this counter.  Typically has three parts:\n\"step-output-counter\".\ncombine_fn: the CombineFn to use for aggregation\nReturns:\nA new or existing counter with the requested name.", "source": "github-repos"}
{"code": "def _CreateEventTag(self, event, comment, labels):\n    \n    event_identifier = event.GetIdentifier()\n\n    event_tag = events.EventTag(comment=comment)\n    event_tag.SetEventIdentifier(event_identifier)\n    event_tag.AddLabels(labels)\n\n    event_identifier_string = event_identifier.CopyToString()\n    logger.debug('Created event tag: {0:s} for event: {1:s}'.format(\n        comment, event_identifier_string))\n\n    return event_tag", "docstring": "Creates an event tag.\n\nArgs:\nevent (EventObject): event to tag.\ncomment (str): event tag comment.\nlabels (list[str]): event tag labels.\n\nReturns:\nEventTag: the event tag.", "source": "juraj-google-style"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. PhoBERT does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def nr_cases(self, snv_cases=None, sv_cases=None):\n        \n        query = {}\n        \n        if snv_cases:\n            query = {'vcf_path': {'$exists':True}}\n        if sv_cases:\n            query = {'vcf_sv_path': {'$exists':True}}\n        if snv_cases and sv_cases:\n            query = None\n\n        return self.db.case.count_documents(query)", "docstring": "Return the number of cases in the database\n\nArgs:\nsnv_cases(bool): If only snv cases should be searched\nsv_cases(bool): If only snv cases should be searched\n\nReturns:\ncases (Iterable(Case)): A iterable with mongo cases", "source": "juraj-google-style"}
{"code": "async def _open_connection_http(self, location):\n        \n        sock = await connect_tcp(location[0], location[1], bind_host=self.source_address)\n        sock._active = True\n        return sock", "docstring": "Creates a normal async socket, returns it.\nArgs:\nlocation (tuple(str, int)): A tuple of net location (eg\n'127.0.0.1' or 'example.org') and port (eg 80 or 25000).", "source": "juraj-google-style"}
{"code": "def importGurobiSolution(self, grbmodel):\n    self.eval(''.join(('let {} := {};'.format(var.VarName, var.X) for var in grbmodel.getVars() if ('$' not in var.VarName))))", "docstring": "Import the solution from a gurobipy.Model object.\n\nArgs:\ngrbmodel: A :class:`gurobipy.Model` object with the model solved.", "source": "codesearchnet"}
{"code": "def get_use_xla_spmd(device_type):\n    return device_type == 'TPU' and '0' != os.environ.get('DTENSOR_TEST_USE_XLA_SPMD', '0')", "docstring": "Returns True when device_type is TPU and environment variable is set.\n\nArgs:\ndevice_type: A str representing the type of device on the mesh.\n\nReturns:\nbool: True when device_type is TPU and environment variable is set.", "source": "github-repos"}
{"code": "def __add__(self, other):\n        \n        if isinstance(other, FieldPath):\n            parts = self.parts + other.parts\n            return FieldPath(*parts)\n        elif isinstance(other, six.string_types):\n            parts = self.parts + FieldPath.from_string(other).parts\n            return FieldPath(*parts)\n        else:\n            return NotImplemented", "docstring": "Adds `other` field path to end of this field path.\n\nArgs:\nother (~google.cloud.firestore_v1beta1._helpers.FieldPath, str):\nThe field path to add to the end of this `FieldPath`.", "source": "juraj-google-style"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    if token_ids_1 is None:\n        return len(sep + token_ids_0) * [0]\n    return len(sep + token_ids_0 + sep + sep + token_ids_1) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does\nnot make use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def CheckAddressState(self, script_hash):\n    for (key, contract) in self._contracts.items():\n        if (contract.ScriptHash.ToBytes() == script_hash.ToBytes()):\n            return AddressState.InWallet\n    for watch in self._watch_only:\n        if (watch == script_hash):\n            return (AddressState.InWallet | AddressState.WatchOnly)\n    return AddressState.NoState", "docstring": "Determine the address state of the provided script hash.\n\nArgs:\nscript_hash (UInt160): a script hash to determine the address state of.\n\nReturns:\nAddressState: the address state.", "source": "codesearchnet"}
{"code": "def mds(means, weights, d):\n    X = dim_reduce(means, weights, d)\n    if (X.shape[0] == 2):\n        return X.dot(weights)\n    else:\n        return X.T.dot(weights)", "docstring": "Dimensionality reduction using MDS.\n\nArgs:\nmeans (array): genes x clusters\nweights (array): clusters x cells\nd (int): desired dimensionality\n\nReturns:\nW_reduced (array): array of shape (d, cells)", "source": "codesearchnet"}
{"code": "def memory_write32(self, addr, data, zone=None):\n    return self.memory_write(addr, data, zone, 32)", "docstring": "Writes words to memory of a target system.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): start address to write to\ndata (list): list of words to write\nzone (str): optional memory zone to access\n\nReturns:\nNumber of words written to target.\n\nRaises:\nJLinkException: on memory access error.", "source": "codesearchnet"}
{"code": "def set_status(self, status, msg):\n    if (len(msg) > 2000):\n        msg = msg[:2000]\n        msg += '\\n... snip ...\\n'\n    if ((self.status == self.S_LOCKED) or (status == self.S_LOCKED)):\n        err_msg = ('Locked files must be explicitly unlocked before calling set_status but\\ntask.status = %s, input status = %s' % (self.status, status))\n        raise RuntimeError(err_msg)\n    status = Status.as_status(status)\n    changed = True\n    if hasattr(self, '_status'):\n        changed = (status != self._status)\n    self._status = status\n    if (status == self.S_RUN):\n        if (self.datetimes.start is None):\n            self.datetimes.start = datetime.datetime.now()\n    if changed:\n        if (status == self.S_SUB):\n            self.datetimes.submission = datetime.datetime.now()\n            self.history.info(('Submitted with MPI=%s, Omp=%s, Memproc=%.1f [Gb] %s ' % (self.mpi_procs, self.omp_threads, self.mem_per_proc.to('Gb'), msg)))\n        elif (status == self.S_OK):\n            self.history.info('Task completed %s', msg)\n        elif (status == self.S_ABICRITICAL):\n            self.history.info('Status set to S_ABI_CRITICAL due to: %s', msg)\n        else:\n            self.history.info('Status changed to %s. msg: %s', status, msg)\n    if (status == self.S_DONE):\n        self._on_done()\n    if (status == self.S_OK):\n        if (not self.finalized):\n            self._on_ok()\n            if ((self.gc is not None) and (self.gc.policy == 'task')):\n                self.clean_output_files()\n        if (self.status == self.S_OK):\n            self.send_signal(self.S_OK)\n    return status", "docstring": "Set and return the status of the task.\n\nArgs:\nstatus: Status object or string representation of the status\nmsg: string with human-readable message used in the case of errors.", "source": "codesearchnet"}
{"code": "def deploy_ray_func(func, partition, kwargs):  \n    \n    try:\n        return func(partition, **kwargs)\n    \n    \n    \n    except ValueError:\n        return func(partition.copy(), **kwargs)", "docstring": "Deploy a function to a partition in Ray.\n\nNote: Ray functions are not detected by codecov (thus pragma: no cover)\n\nArgs:\nfunc: The function to apply.\npartition: The partition to apply the function to.\nkwargs: A dictionary of keyword arguments for the function.\n\nReturns:\nThe result of the function.", "source": "juraj-google-style"}
{"code": "def __send_ses_email(self, recipients, subject, body_html, body_text):\n    source_arn = dbconfig.get('source_arn', NS_EMAIL)\n    return_arn = dbconfig.get('return_path_arn', NS_EMAIL)\n    session = get_local_aws_session()\n    ses = session.client('ses', region_name=dbconfig.get('ses_region', NS_EMAIL, 'us-west-2'))\n    body = {}\n    if body_html:\n        body['Html'] = {'Data': body_html}\n    if body_text:\n        body['Text'] = {'Data': body_text}\n    ses_options = {'Source': self.sender, 'Destination': {'ToAddresses': recipients}, 'Message': {'Subject': {'Data': subject}, 'Body': body}}\n    if (source_arn and return_arn):\n        ses_options.update({'SourceArn': source_arn, 'ReturnPathArn': return_arn})\n    ses.send_email(**ses_options)", "docstring": "Send an email using SES\n\nArgs:\nrecipients (`1ist` of `str`): List of recipient email addresses\nsubject (str): Subject of the email\nbody_html (str): HTML body of the email\nbody_text (str): Text body of the email\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def _create_events_writer(self, directory):\n    total_size = 0\n    events_files = self._fetch_events_files_on_disk()\n    for file_name in events_files:\n        file_path = os.path.join(self._events_directory, file_name)\n        total_size += tf.io.gfile.stat(file_path).length\n    if (total_size >= self.total_file_size_cap_bytes):\n        for file_name in events_files:\n            if (total_size < self.total_file_size_cap_bytes):\n                break\n            file_path = os.path.join(self._events_directory, file_name)\n            file_size = tf.io.gfile.stat(file_path).length\n            try:\n                tf.io.gfile.remove(file_path)\n                total_size -= file_size\n                logger.info('Deleted %s because events files take up over %d bytes', file_path, self.total_file_size_cap_bytes)\n            except IOError as err:\n                logger.error('Deleting %s failed: %s', file_path, err)\n    self._events_file_count += 1\n    file_path = ('%s.%d.%d' % (os.path.join(directory, DEBUGGER_EVENTS_FILE_STARTING_TEXT), time.time(), self._events_file_count))\n    logger.info('Creating events file %s', file_path)\n    return pywrap_tensorflow.EventsWriter(tf.compat.as_bytes(file_path))", "docstring": "Creates a new events writer.\n\nArgs:\ndirectory: The directory in which to write files containing events.\n\nReturns:\nA new events writer, which corresponds to a new events file.", "source": "codesearchnet"}
{"code": "def register_list(self):\n    num_items = self.MAX_NUM_CPU_REGISTERS\n    buf = (ctypes.c_uint32 * num_items)()\n    num_regs = self._dll.JLINKARM_GetRegisterList(buf, num_items)\n    return buf[:num_regs]", "docstring": "Returns a list of the indices for the CPU registers.\n\nThe returned indices can be used to read the register content or grab\nthe register name.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nList of registers.", "source": "codesearchnet"}
{"code": "def GetUpdates(self, s3_client, bucket, obj, since):\n    try:\n        if since is not None:\n            response = s3_client.get_object(Bucket=bucket, IfModifiedSince=timestamps.FromTimestampToDateTime(since), Key=obj)\n        else:\n            response = s3_client.get_object(Bucket=bucket, Key=obj)\n        body = response['Body']\n        last_modified_ts = timestamps.FromDateTimeToTimestamp(response['LastModified'])\n    except ClientError as e:\n        error_code = int(e.response['Error']['Code'])\n        if error_code == 304:\n            return []\n        self.log.error('error getting S3 object ({}): {}'.format(obj, e))\n        raise error.SourceUnavailable('unable to download object from S3')\n    data_map = self.GetMap(cache_info=body)\n    data_map.SetModifyTimestamp(last_modified_ts)\n    return data_map", "docstring": "Get updates from a source.\n\nArgs:\ns3_client: initialized s3 client\nbucket: s3 bucket\nobj: object with the data\nsince: a timestamp representing the last change (None to force-get)\n\nReturns:\nA tuple containing the map of updates and a maximum timestamp\n\nRaises:\nValueError: an object in the source map is malformed\nConfigurationError:", "source": "github-repos"}
{"code": "def to_hour(num) -> str:\n    \n    to_str = str(int(num))\n    return pd.Timestamp(f'{to_str[:-2]}:{to_str[-2:]}').strftime('%H:%M')", "docstring": "Convert YAML input to hours\n\nArgs:\nnum: number in YMAL file, e.g., 900, 1700, etc.\n\nReturns:\nstr\n\nExamples:\n>>> to_hour(900)\n'09:00'\n>>> to_hour(1700)\n'17:00'", "source": "juraj-google-style"}
{"code": "def make_initializable_iterator(self):\n    return self._make_initializable_iterator()", "docstring": "Get an initializable iterator for DistributedDatasetV1.\n\nNote: This API is deprecated. Please use\n`tf.compat.v1.data.make_initializable_iterator(dataset)` to create an\ninitializable iterator.\n\nReturns:\nA DistributedIteratorV1 instance.", "source": "github-repos"}
{"code": "def _fluent_range_type(cls, fluents, ordering) -> Sequence[str]:\n    range_types = []\n    for name in ordering:\n        fluent = fluents[name]\n        range_type = fluent.range\n        range_types.append(range_type)\n    return tuple(range_types)", "docstring": "Returns the range types of `fluents` following the given `ordering`.\n\nReturns:\nSequence[str]: A tuple of range types representing\nthe range of each fluent.", "source": "codesearchnet"}
{"code": "def _scalar_to_vector(self, m):\n        \n        if not isinstance(m.y0, numbers.Number):\n            return m\n        else:\n            m = copy.deepcopy(m)\n            t0 = 0.0\n            if isinstance(m.y0, numbers.Integral):\n                numtype = np.float64\n            else:\n                numtype = type(m.y0)\n            y0_orig = m.y0\n            m.y0 = np.array([m.y0], dtype=numtype)\n            def make_vector_fn(fn):\n                def newfn(y, t):\n                    return np.array([fn(y[0], t)], dtype=numtype)\n                newfn.__name__ = fn.__name__\n                return newfn\n            def make_matrix_fn(fn):\n                def newfn(y, t):\n                    return np.array([[fn(y[0], t)]], dtype=numtype)\n                newfn.__name__ = fn.__name__\n                return newfn\n            def make_coupling_fn(fn):\n                def newfn(source_y, target_y, weight):\n                    return np.array([fn(source_y[0], target_y[0], weight)])\n                newfn.__name__ = fn.__name__\n                return newfn\n            if isinstance(m.f(y0_orig, t0), numbers.Number):\n                m.f = make_vector_fn(m.f)\n            if hasattr(m, 'G') and isinstance(m.G(y0_orig,t0), numbers.Number):\n                m.G = make_matrix_fn(m.G)\n            if (hasattr(m, 'coupling') and\n                    isinstance(m.coupling(y0_orig, y0_orig, 0.5),\n                               numbers.Number)):\n                m.coupling = make_coupling_fn(m.coupling)\n            return m", "docstring": "Allow submodels with scalar equations. Convert to 1D vector systems.\nArgs:\nm (Model)", "source": "juraj-google-style"}
{"code": "def main():\n    parser = argparse.ArgumentParser(description='Cherry picking automation.')\n    parser.add_argument('--filename', help='path to whl file we are copying', required=True)\n    parser.add_argument('--new_py_ver', help='two digit py version eg. 27 or 33', required=True)\n    args = parser.parse_args()\n    args.filename = os.path.abspath(args.filename)\n    check_existence(args.filename)\n    regex_groups = re.search(TF_NIGHTLY_REGEX, args.filename)\n    directory = regex_groups.group(1)\n    package = regex_groups.group(2)\n    version = regex_groups.group(3)\n    origin_tag = regex_groups.group(4)\n    old_py_ver = re.search('(cp\\\\d\\\\d)', origin_tag).group(1)\n    new_tag = origin_tag.replace(old_py_ver, 'cp' + args.new_py_ver)\n    copy_binary(directory, origin_tag, new_tag, version, package)", "docstring": "This script copies binaries.\n\nRequirements:\nfilename: The path to the whl file\nAND\nnew_py_ver: Create a nightly tag with current date\n\nRaises:\nRuntimeError: If the whl file was not found", "source": "github-repos"}
{"code": "def delete(self, option=None):\n    write_pb = _helpers.pb_for_delete(self._document_path, option)\n    commit_response = self._client._firestore_api.commit(self._client._database_string, [write_pb], transaction=None, metadata=self._client._rpc_metadata)\n    return commit_response.commit_time", "docstring": "Delete the current document in the Firestore database.\n\nArgs:\noption (Optional[~.firestore_v1beta1.client.WriteOption]): A\nwrite option to make assertions / preconditions on the server\nstate of the document before applying changes.\n\nReturns:\ngoogle.protobuf.timestamp_pb2.Timestamp: The time that the delete\nrequest was received by the server. If the document did not exist\nwhen the delete was sent (i.e. nothing was deleted), this method\nwill still succeed and will still return the time that the\nrequest was received by the server.", "source": "codesearchnet"}
{"code": "def make_message(self, text, channel):\n    try:\n        channel_id = self.slack.channel_from_name(channel)['id']\n    except ValueError:\n        channel_id = channel\n    return pack({'text': text, 'type': 'message', 'channel': channel_id, 'id': self.message_id})", "docstring": "High-level function for creating messages. Return packed bytes.\n\nArgs:\ntext: {str}\nchannel: {str} Either name or ID", "source": "codesearchnet"}
{"code": "def extract(self, destdir, decompress='auto'):\n    for e in self.mardata.index.entries:\n        name = e.name\n        entry_path = safejoin(destdir, name)\n        entry_dir = os.path.dirname(entry_path)\n        mkdir(entry_dir)\n        with open(entry_path, 'wb') as f:\n            write_to_file(self.extract_entry(e, decompress), f)\n            os.chmod(entry_path, e.flags)", "docstring": "Extract the entire MAR file into a directory.\n\nArgs:\ndestdir (str): A local directory on disk into which the contents of\nthis MAR file will be extracted. Required parent directories\nwill be created as necessary.\ndecompress (obj, optional): Controls whether files are decompressed\nwhen extracted. Must be one of 'auto' or None. Defaults to\n'auto'.", "source": "codesearchnet"}
{"code": "def linear_extrapolation_plot(log_prob_adv_array, y, file_name, min_epsilon=(- 10), max_epsilon=10, num_points=21):\n    import matplotlib\n    matplotlib.use('Agg')\n    import matplotlib.pyplot as plt\n    figure = plt.figure()\n    figure.canvas.set_window_title('Cleverhans: Linear Extrapolation Plot')\n    correct_idx = np.argmax(y, axis=0)\n    fig = plt.figure()\n    plt.xlabel('Epsilon')\n    plt.ylabel('Logits')\n    x_axis = np.linspace(min_epsilon, max_epsilon, num_points)\n    plt.xlim((min_epsilon - 1), (max_epsilon + 1))\n    for i in range(y.shape[0]):\n        if (i == correct_idx):\n            ls = '-'\n            linewidth = 5\n        else:\n            ls = '--'\n            linewidth = 2\n        plt.plot(x_axis, log_prob_adv_array[(:, i)], ls=ls, linewidth=linewidth, label='{}'.format(i))\n    plt.legend(loc='best', fontsize=14)\n    plt.show()\n    fig.savefig(file_name)\n    plt.clf()\n    return figure", "docstring": "Generate linear extrapolation plot.\n\nArgs:\nlog_prob_adv_array: Numpy array containing log probabilities\ny: Tf placeholder for the labels\nfile_name: Plot filename\nmin_epsilon: Minimum value of epsilon over the interval\nmax_epsilon: Maximum value of epsilon over the interval\nnum_points: Number of points used to interpolate", "source": "codesearchnet"}
{"code": "def build_bird_configuration(config):\n    bird_configuration = {}\n    if config.getboolean('daemon', 'ipv4'):\n        if os.path.islink(config.get('daemon', 'bird_conf')):\n            config_file = os.path.realpath(config.get('daemon', 'bird_conf'))\n            print(\"'bird_conf' is set to a symbolic link ({s} -> {d}, but we will use the canonical path of that link\".format(s=config.get('daemon', 'bird_conf'), d=config_file))\n        else:\n            config_file = config.get('daemon', 'bird_conf')\n        dummy_ip_prefix = config.get('daemon', 'dummy_ip_prefix')\n        if (not valid_ip_prefix(dummy_ip_prefix)):\n            raise ValueError('invalid dummy IPv4 prefix: {i}'.format(i=dummy_ip_prefix))\n        bird_configuration[4] = {'config_file': config_file, 'variable_name': config.get('daemon', 'bird_variable'), 'dummy_ip_prefix': dummy_ip_prefix, 'reconfigure_cmd': config.get('daemon', 'bird_reconfigure_cmd'), 'keep_changes': config.getboolean('daemon', 'bird_keep_changes'), 'changes_counter': config.getint('daemon', 'bird_changes_counter')}\n    if config.getboolean('daemon', 'ipv6'):\n        if os.path.islink(config.get('daemon', 'bird6_conf')):\n            config_file = os.path.realpath(config.get('daemon', 'bird6_conf'))\n            print(\"'bird6_conf' is set to a symbolic link ({s} -> {d}, but we will use the canonical path of that link\".format(s=config.get('daemon', 'bird6_conf'), d=config_file))\n        else:\n            config_file = config.get('daemon', 'bird6_conf')\n        dummy_ip_prefix = config.get('daemon', 'dummy_ip6_prefix')\n        if (not valid_ip_prefix(dummy_ip_prefix)):\n            raise ValueError('invalid dummy IPv6 prefix: {i}'.format(i=dummy_ip_prefix))\n        bird_configuration[6] = {'config_file': config_file, 'variable_name': config.get('daemon', 'bird6_variable'), 'dummy_ip_prefix': dummy_ip_prefix, 'reconfigure_cmd': config.get('daemon', 'bird6_reconfigure_cmd'), 'keep_changes': config.getboolean('daemon', 'bird6_keep_changes'), 'changes_counter': config.getint('daemon', 'bird6_changes_counter')}\n    return bird_configuration", "docstring": "Build bird configuration structure.\n\nFirst it performs a sanity check against bird settings and then builds a\ndictionary structure with bird configuration per IP version.\n\nArguments:\nconfig (obj): A configparser object which holds our configuration.\n\nReturns:\nA dictionary\n\nRaises:\nValueError if sanity check fails.", "source": "codesearchnet"}
{"code": "def add_object(self, file_path, file_object, error_fct=None):\n    error_fct = (error_fct or self.raise_os_error)\n    if (not file_path):\n        target_directory = self.root\n    else:\n        target_directory = self.resolve(file_path)\n        if (not S_ISDIR(target_directory.st_mode)):\n            error = (errno.ENOENT if self.is_windows_fs else errno.ENOTDIR)\n            error_fct(error, file_path)\n    target_directory.add_entry(file_object)", "docstring": "Add a fake file or directory into the filesystem at file_path.\n\nArgs:\nfile_path: The path to the file to be added relative to self.\nfile_object: File or directory to add.\nerror_class: The error class to be thrown if file_path does\nnot correspond to a directory (used internally(\n\nRaises:\nIOError or OSError: if file_path does not correspond to a\ndirectory.", "source": "codesearchnet"}
{"code": "def inverse_guass(self, mu: float, sigma: float) -> float:\n        \n        return float(\n            lib.TCOD_random_get_gaussian_double_inv(self.random_c, mu, sigma)\n        )", "docstring": "Return a random Gaussian number using the Box-Muller transform.\n\nArgs:\nmu (float): The median returned value.\nsigma (float): The standard deviation.\n\nReturns:\nfloat: A random float.", "source": "juraj-google-style"}
{"code": "def get_max_instability(self, min_voltage=None, max_voltage=None):\n    data = []\n    for pair in self._select_in_voltage_range(min_voltage, max_voltage):\n        if (pair.decomp_e_charge is not None):\n            data.append(pair.decomp_e_charge)\n        if (pair.decomp_e_discharge is not None):\n            data.append(pair.decomp_e_discharge)\n    return (max(data) if (len(data) > 0) else None)", "docstring": "The maximum instability along a path for a specific voltage range.\n\nArgs:\nmin_voltage: The minimum allowable voltage.\nmax_voltage: The maximum allowable voltage.\n\nReturns:\nMaximum decomposition energy of all compounds along the insertion\npath (a subset of the path can be chosen by the optional arguments)", "source": "codesearchnet"}
{"code": "def add_adsorbate_atom(self, indices, specie, distance):\n        \n        \n        center = np.sum([self[i].coords for i in indices], axis=0) / len(\n            indices)\n\n        coords = center + self.normal * distance / np.linalg.norm(self.normal)\n\n        self.append(specie, coords, coords_are_cartesian=True)", "docstring": "Gets the structure of single atom adsorption.\nslab structure from the Slab class(in [0, 0, 1])\n\nArgs:\nindices ([int]): Indices of sites on which to put the absorbate.\nAbsorbed atom will be displaced relative to the center of\nthese sites.\nspecie (Specie/Element/str): adsorbed atom species\ndistance (float): between centers of the adsorbed atom and the\ngiven site in Angstroms.", "source": "juraj-google-style"}
{"code": "def set_property_filter(filter_proto, name, op, value):\n  \n  filter_proto.Clear()\n  pf = filter_proto.property_filter\n  pf.property.name = name\n  pf.op = op\n  set_value(pf.value, value)\n  return filter_proto", "docstring": "Set property filter contraint in the given datastore.Filter proto message.\n\nArgs:\nfilter_proto: datastore.Filter proto message\nname: property name\nop: datastore.PropertyFilter.Operation\nvalue: property value\n\nReturns:\nthe same datastore.Filter.\n\nUsage:\n>>> set_property_filter(filter_proto, 'foo',\n...   datastore.PropertyFilter.EQUAL, 'a')  # WHERE 'foo' = 'a'", "source": "juraj-google-style"}
{"code": "def from_hyperplane(basis, origin, point, internal = True):\n        \n        basis = np.array(basis)\n        assert basis.shape[0] + 1 == basis.shape[1]\n\n        big_basis = np.zeros((basis.shape[1], basis.shape[1]))\n        big_basis[:basis.shape[0],:basis.shape[1]] = basis\n\n        u, s, vh = np.linalg.svd(big_basis)\n        null_mask = (s <= 1e-8)\n        normal = np.compress(null_mask, vh, axis=0)[0]\n\n        if np.inner(np.array(point)-np.array(origin), normal) > 0:\n            if internal:\n                normal *= -1\n        else:\n            if not internal:\n                normal *= -1\n        offset = -np.dot(origin, normal)\n        return Halfspace(normal, offset)", "docstring": "Returns a Halfspace defined by a list of vectors parallel to the\nbounding hyperplane.\n\nArgs:\nbasis: basis for the hyperplane (array with vector rows)\norigin: point on the hyperplane\npoint: point not on the hyperplane\ninternal: whether point is inside the halfspace", "source": "juraj-google-style"}
{"code": "def get_decor(self, c, match_only=None):\n        \n        if isinstance(c, Component):\n            if c:\n                if match_only:\n                    \n                    c = Component({k: getattr(c, k, None) for k in match_only})\n                for decor in self.__list:\n                    try:\n                        if c == decor.component:\n                            return decor\n                    except AttributeError:\n                        continue\n        else:\n            for decor in self.__list:\n                try:\n                    if getattr(c, 'mnemonic').lower() == decor.curve.mnemonic:\n                        return decor\n                except AttributeError:\n                    continue\n        return Decor({'colour': '", "docstring": "Get the decor for a component.\n\nArgs:\nc (component): The component to look up.\nmatch_only (list of str): The component attributes to include in the\ncomparison. Default: All of them.\n\nReturns:\nDecor. The matching Decor from the Legend, or None if not found.", "source": "juraj-google-style"}
{"code": "def __init__(self, system_time_tuple=None):\n    \n    super(Systemtime, self).__init__()\n    self._number_of_seconds = None\n    self._precision = definitions.PRECISION_1_MILLISECOND\n    self.day_of_month = None\n    self.day_of_week = None\n    self.hours = None\n    self.milliseconds = None\n    self.minutes = None\n    self.month = None\n    self.seconds = None\n    self.year = None\n\n    if system_time_tuple:\n      if len(system_time_tuple) < 8:\n        raise ValueError('Invalid system time tuple 8 elements required.')\n\n      if system_time_tuple[0] < 1601 or system_time_tuple[0] > 30827:\n        raise ValueError('Year value out of bounds.')\n\n      if system_time_tuple[1] not in range(1, 13):\n        raise ValueError('Month value out of bounds.')\n\n      if system_time_tuple[2] not in range(0, 7):\n        raise ValueError('Day of week value out of bounds.')\n\n      days_per_month = self._GetDaysPerMonth(\n          system_time_tuple[0], system_time_tuple[1])\n      if system_time_tuple[3] < 1 or system_time_tuple[3] > days_per_month:\n        raise ValueError('Day of month value out of bounds.')\n\n      if system_time_tuple[4] not in range(0, 24):\n        raise ValueError('Hours value out of bounds.')\n\n      if system_time_tuple[5] not in range(0, 60):\n        raise ValueError('Minutes value out of bounds.')\n\n      \n      if system_time_tuple[6] not in range(0, 60):\n        raise ValueError('Seconds value out of bounds.')\n\n      if system_time_tuple[7] < 0 or system_time_tuple[7] > 999:\n        raise ValueError('Milliseconds value out of bounds.')\n\n      self.day_of_month = system_time_tuple[3]\n      self.day_of_week = system_time_tuple[2]\n      self.hours = system_time_tuple[4]\n      self.milliseconds = system_time_tuple[7]\n      self.minutes = system_time_tuple[5]\n      self.month = system_time_tuple[1]\n      self.seconds = system_time_tuple[6]\n      self.year = system_time_tuple[0]\n\n      self._number_of_seconds = self._GetNumberOfSecondsFromElements(\n          self.year, self.month, self.day_of_month, self.hours, self.minutes,\n          self.seconds)", "docstring": "Initializes a SYSTEMTIME structure.\n\nArgs:\nsystem_time_tuple\n(Optional[tuple[int, int, int, int, int, int, int, int]]):\nsystem time, contains year, month, day of week, day of month,\nhours, minutes, seconds and milliseconds.\n\nRaises:\nValueError: if the system time is invalid.", "source": "juraj-google-style"}
{"code": "def get_product_value(self, value_name, wanted_type=None):\n    if (not self.__reg_products_handle):\n        return None\n    (subkey, search_value_name) = os.path.split(value_name)\n    try:\n        if subkey:\n            handle = win32api.RegOpenKeyEx(self.__reg_products_handle, subkey, 0, (win32con.KEY_READ | self.__reg_32bit_access))\n            (item_value, item_type) = self.__reg_query_value(handle, search_value_name)\n            win32api.RegCloseKey(handle)\n        else:\n            (item_value, item_type) = win32api.RegQueryValueEx(self.__reg_products_handle, value_name)\n    except pywintypes.error as exc:\n        if (exc.winerror == winerror.ERROR_FILE_NOT_FOUND):\n            return None\n        raise\n    if (wanted_type and (item_type not in self.__reg_types[wanted_type])):\n        item_value = None\n    return item_value", "docstring": "For the product section of the registry return the name value.\n\nArgs:\nvalue_name (str): Registry value name.\nwanted_type (str):\nThe type of value wanted if the type does not match\nNone is return. wanted_type support values are\n``str`` ``int`` ``list`` ``bytes``.\n\nReturns:\nvalue: Value requested or ``None`` if not found.", "source": "codesearchnet"}
{"code": "def union(self, other, recursive=True, overwrite=False):\n    if (not isinstance(other, composite)):\n        raise AssertionError('Cannot union composite and {} types'.format(type(other)))\n    if (self.meta_type != other.meta_type):\n        return composite([self, other])\n    if (self.meta_type == 'list'):\n        keep = []\n        for item in self._list:\n            keep.append(item)\n        for item in other._list:\n            if (item not in self._list):\n                keep.append(item)\n        return composite(keep)\n    elif (self.meta_type == 'dict'):\n        keep = {}\n        for key in list(set((list(self._dict.keys()) + list(other._dict.keys())))):\n            left = self._dict.get(key)\n            right = other._dict.get(key)\n            if (recursive and isinstance(left, composite) and isinstance(right, composite)):\n                keep[key] = left.union(right, recursive=recursive, overwrite=overwrite)\n            elif (left == right):\n                keep[key] = left\n            elif (left is None):\n                keep[key] = right\n            elif (right is None):\n                keep[key] = left\n            elif overwrite:\n                keep[key] = right\n            else:\n                keep[key] = composite([left, right])\n        return composite(keep)\n    return", "docstring": "Recursively compute union of data. For dictionaries, items\nfor specific keys will be combined into a list, depending on the\nstatus of the overwrite= parameter. For lists, items will be appended\nand reduced to unique items. This method is meant to be analogous\nto set.union for composite objects.\n\nArgs:\nother (composite): Other composite object to union with.\nrecursive (bool): Whether or not to perform the operation recursively,\nfor all nested composite objects.\noverwrite (bool): Whether or not to overwrite entries with the same\nkey in a nested dictionary.", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    fixed_section_data_map = self._GetDataTypeMap(\n        'job_fixed_length_data_section')\n\n    try:\n      fixed_length_section, file_offset = self._ReadStructureFromFileObject(\n          file_object, 0, fixed_section_data_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.UnableToParseFile(\n          'Unable to parse fixed-length data section with error: {0!s}'.format(\n              exception))\n\n    if not fixed_length_section.product_version in self._PRODUCT_VERSIONS:\n      raise errors.UnableToParseFile(\n          'Unsupported product version in: 0x{0:04x}'.format(\n              fixed_length_section.product_version))\n\n    if not fixed_length_section.format_version == 1:\n      raise errors.UnableToParseFile(\n          'Unsupported format version in: {0:d}'.format(\n              fixed_length_section.format_version))\n\n    variable_section_data_map = self._GetDataTypeMap(\n        'job_variable_length_data_section')\n\n    try:\n      variable_length_section, data_size = self._ReadStructureFromFileObject(\n          file_object, file_offset, variable_section_data_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.UnableToParseFile((\n          'Unable to parse variable-length data section with error: '\n          '{0!s}').format(exception))\n\n    file_offset += data_size\n\n    event_data = self._ParseEventData(variable_length_section)\n\n    date_time = self._ParseLastRunTime(parser_mediator, fixed_length_section)\n    if date_time:\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_LAST_RUN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    trigger_data_map = self._GetDataTypeMap('job_trigger')\n\n    for trigger_index in range(0, variable_length_section.number_of_triggers):\n      try:\n        trigger, data_size = self._ReadStructureFromFileObject(\n            file_object, file_offset, trigger_data_map)\n      except (ValueError, errors.ParseError) as exception:\n        raise errors.UnableToParseFile((\n            'Unable to parse trigger: {0:d} with error: {2!s}').format(\n                trigger_index, exception))\n\n      file_offset += data_size\n\n      event_data.trigger_type = trigger.trigger_type\n\n      date_time = self._ParseTriggerStartTime(parser_mediator, trigger)\n      if date_time:\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_SCHEDULED_TO_START,\n            time_zone=parser_mediator.timezone)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      date_time = self._ParseTriggerEndTime(parser_mediator, trigger)\n      if date_time:\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_SCHEDULED_TO_START,\n            time_zone=parser_mediator.timezone)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a Windows job file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def load(self, read_tuple_name):\n    self.prefix_width = 0\n    self.read_tuple_id_width = 0\n    self.genome_id_width = 0\n    self.chr_id_width = 0\n    self.coor_width = 0\n    parts = read_tuple_name.split('__')\n    self.prefix_width = len(parts[0])\n    self.read_tuple_id_width = len(parts[1])\n    segments = parts[2][1:(- 1)].split('),(')\n    for segment in segments:\n        int_widths = list(map(len, segment.split(',')))\n        self.genome_id_width = max(self.genome_id_width, int_widths[0])\n        self.chr_id_width = max(self.chr_id_width, int_widths[1])\n        self.coor_width = max(self.coor_width, int_widths[2], int_widths[3])", "docstring": "Load RNF values from a read tuple name.\n\nArgs:\nread_tuple_name (str): Read tuple name which the values are taken from.", "source": "codesearchnet"}
{"code": "def struct_member_error(err, sid, name, offset, size):\n    \n    exception, msg = STRUCT_ERROR_MAP[err]\n    struct_name = idc.GetStrucName(sid)\n    return exception(('AddStructMember(struct=\"{}\", member=\"{}\", offset={}, size={}) '\n                      'failed: {}').format(\n        struct_name,\n        name,\n        offset,\n        size,\n        msg\n    ))", "docstring": "Create and format a struct member exception.\n\nArgs:\nerr: The error value returned from struct member creation\nsid: The struct id\nname: The member name\noffset: Memeber offset\nsize: Member size\n\nReturns:\nA ``SarkErrorAddStructMemeberFailed`` derivative exception, with an\ninformative message.", "source": "juraj-google-style"}
{"code": "def _get_message(self, target_message, indices, pending, timeout, condition):\n    start_time = time.time()\n    target_id = self._get_message_id(target_message)\n    if (target_id not in indices):\n        for (i, incoming) in enumerate(self._incoming):\n            if (incoming.id > target_id):\n                indices[target_id] = i\n                break\n        else:\n            indices[target_id] = len(self._incoming)\n    future = self._client.loop.create_future()\n    last_idx = indices[target_id]\n    if (last_idx < len(self._incoming)):\n        incoming = self._incoming[last_idx]\n        if condition(incoming, target_id):\n            indices[target_id] += 1\n            future.set_result(incoming)\n            return future\n    pending[target_id] = future\n    return self._get_result(future, start_time, timeout)", "docstring": "Gets the next desired message under the desired condition.\n\nArgs:\ntarget_message (`object`):\nThe target message for which we want to find another\nresponse that applies based on `condition`.\n\nindices (`dict`):\nThis dictionary remembers the last ID chosen for the\ninput `target_message`.\n\npending (`dict`):\nThis dictionary remembers {msg_id: Future} to be set\nonce `condition` is met.\n\ntimeout (`int`):\nThe timeout (in seconds) override to use for this operation.\n\ncondition (`callable`):\nThe condition callable that checks if an incoming\nmessage is a valid response.", "source": "codesearchnet"}
{"code": "def whois_domains_history(self, domains):\n    api_name = 'opendns-whois-domain-history'\n    fmt_url_path = u'whois/{0}/history'\n    return self._multi_get(api_name, fmt_url_path, domains)", "docstring": "Calls WHOIS domain history end point\n\nArgs:\ndomains: An enumerable of domains\nReturns:\nA dict of {domain: domain_history_result}", "source": "codesearchnet"}
{"code": "def copy(source_file_names, destination_file_names):\n    if len(source_file_names) == 0:\n        return\n    filesystem = FileSystems.get_filesystem(source_file_names[0])\n    return filesystem.copy(source_file_names, destination_file_names)", "docstring": "Recursively copy the file list from the source to the destination\n\nArgs:\nsource_file_names: list of source file objects that needs to be copied\ndestination_file_names: list of destination of the new object\n\nRaises:\n``BeamIOError``: if any of the copy operations fail", "source": "github-repos"}
{"code": "def _is_in_targets(self, site, targets):\n        \n        elems = self._get_elements(site)\n        for elem in elems:\n            if elem not in targets:\n                return False\n        return True", "docstring": "Test whether a site contains elements in the target list\n\nArgs:\nsite (Site): Site to assess\ntargets ([Element]) List of elements\nReturns:\n(boolean) Whether this site contains a certain list of elements", "source": "juraj-google-style"}
{"code": "def __init__(self, max_attempts, *args, **kwargs):\n        \n        Exception.__init__(self, *args, **kwargs)\n        self.max_attempts = max_attempts", "docstring": "Initializer.\n\nArgs:\nmax_attempts: Maximum number of attempts to make for this task,\ninclusive. So 2 means try two times and then retire the task.\n*args, **kwargs: Optional Exception arguments.", "source": "juraj-google-style"}
{"code": "def simple_layer_stack(include_encdec_attention, num_layers=6, d_ff=2048, num_heads=8, d_kv=128, dropout_rate=0.1):\n    ret = []\n    for _ in xrange(num_layers):\n        ret.append(transformer_layers.SelfAttention(num_heads=num_heads, key_value_size=d_kv, attention_kwargs={'dropout_rate': dropout_rate}))\n        if include_encdec_attention:\n            ret.append(transformer_layers.EncDecAttention(num_heads=num_heads, key_value_size=d_kv, attention_kwargs={'dropout_rate': dropout_rate}))\n        ret.append(transformer_layers.DenseReluDense(hidden_size=d_ff, dropout_rate=dropout_rate))\n    return transformer.LayerStack(ret)", "docstring": "Create a layer stack.\n\nArgs:\ninclude_encdec_attention: a boolean\nnum_layers: an integer\nd_ff: an integer\nnum_heads: an integer\nd_kv: an integer\ndropout_rate: a float\n\nReturns:\na LayerStack", "source": "codesearchnet"}
{"code": "def generate_index(fn, cols=None, names=None, sep=' '):\n    assert (cols is not None), \"'cols' was not set\"\n    assert (names is not None), \"'names' was not set\"\n    assert (len(cols) == len(names))\n    (bgzip, open_func) = get_open_func(fn, return_fmt=True)\n    data = pd.read_csv(fn, sep=sep, engine='c', usecols=cols, names=names, compression=('gzip' if bgzip else None))\n    f = open_func(fn, 'rb')\n    data['seek'] = np.fromiter(_seek_generator(f), dtype=np.uint)[:(- 1)]\n    f.close()\n    write_index(get_index_fn(fn), data)\n    return data", "docstring": "Build a index for the given file.\n\nArgs:\nfn (str): the name of the file.\ncols (list): a list containing column to keep (as int).\nnames (list): the name corresponding to the column to keep (as str).\nsep (str): the field separator.\n\nReturns:\npandas.DataFrame: the index.", "source": "codesearchnet"}
{"code": "def split(self, path):\n    path = path.strip()\n    if not path.startswith(S3FileSystem.S3_PREFIX):\n        raise ValueError('Path %r must be S3 path.' % path)\n    prefix_len = len(S3FileSystem.S3_PREFIX)\n    last_sep = path[prefix_len:].rfind('/')\n    if last_sep >= 0:\n        last_sep += prefix_len\n    if last_sep > 0:\n        return (path[:last_sep], path[last_sep + 1:])\n    elif last_sep < 0:\n        return (path, '')\n    else:\n        raise ValueError('Invalid path: %s' % path)", "docstring": "Splits the given path into two parts.\n\nSplits the path into a pair (head, tail) such that tail contains the last\ncomponent of the path and head contains everything up to that.\n\nHead will include the S3 prefix ('s3://').\n\nArgs:\npath: path as a string\nReturns:\na pair of path components as strings.", "source": "github-repos"}
{"code": "def baby_names(max_length=15):\n  \n  names = []\n  lengths = []\n  targets = []\n  with open(os.path.join(os.path.dirname(sys.modules[__name__].__file__),\n                         'baby_names.csv'), 'rb') as f:\n    first = True\n    for l in csv.reader(f, delimiter=','):\n      if first:\n        first = False\n        continue\n      assert len(l) == 4, l\n      name = l[0]\n      if max_length < len(name):\n        raise ValueError('Max length is too small: %d > %d' %\n                         (max_length, len(name)))\n      chars = [convert_to_int(c) for c in name]\n      names.append(chars + ([EOS] * (max_length - len(chars))))\n      lengths.append([len(name)])\n      values = [float(l[2]), float(l[3])]\n      if abs(sum(values) - 1) > 0.001:\n        raise ValueError('Each row must sum to 1: %s' % l)\n      targets.append(values)\n  return np.array(names), np.array(targets), np.array(lengths)", "docstring": "Opens the baby_names csv file and produces numpy array.\n\nArgs:\nmax_length: The maximum length, 15 was the longest name when this was\nwritten.  Short entries will be padded with the EOS marker.\nReturns:\nA numpy array of the names converted to ascii codes, the labels and an\narray of lengths.\nRaises:\nValueError: if max_length is too small.", "source": "juraj-google-style"}
{"code": "def name(self, name):\n        \n        self._data['name'] = name\n        request = self._base_request\n        request['name'] = name\n        return self._tc_requests.update(request, owner=self.owner)", "docstring": "Updates the security labels name.\n\nArgs:\nname:", "source": "juraj-google-style"}
{"code": "def vocabulary_size(self):\n    return self._lookup_layer.vocabulary_size()", "docstring": "Gets the current size of the layer's vocabulary.\n\nReturns:\nThe integer size of the vocabulary, including optional\nmask and OOV indices.", "source": "github-repos"}
{"code": "def closed_by(self, **kwargs):\n        \n        path = '%s/%s/closed_by' % (self.manager.path, self.get_id())\n        return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "List merge requests that will close the issue when merged.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetErrot: If the merge requests could not be retrieved\n\nReturns:\nlist: The list of merge requests.", "source": "juraj-google-style"}
{"code": "def modify_user_power_levels(self, users=None, users_default=None):\n        \n        try:\n            content = self.client.api.get_power_levels(self.room_id)\n            if users_default:\n                content[\"users_default\"] = users_default\n\n            if users:\n                if \"users\" in content:\n                    content[\"users\"].update(users)\n                else:\n                    content[\"users\"] = users\n\n                \n                for user, power_level in list(content[\"users\"].items()):\n                    if power_level is None:\n                        del content[\"users\"][user]\n            self.client.api.set_power_levels(self.room_id, content)\n            return True\n        except MatrixRequestError:\n            return False", "docstring": "Modify the power level for a subset of users\n\nArgs:\nusers(dict): Power levels to assign to specific users, in the form\n{\"@name0:host0\": 10, \"@name1:host1\": 100, \"@name3:host3\", None}\nA level of None causes the user to revert to the default level\nas specified by users_default.\nusers_default(int): Default power level for users in the room\n\nReturns:\nTrue if successful, False if not", "source": "juraj-google-style"}
{"code": "def parse_clnsig(acc, sig, revstat, transcripts):\n    \n    clnsig_accsessions = []\n\n    if acc:\n    \n        try:\n            acc = int(acc)\n        except ValueError:\n            pass\n        \n        \n        if isinstance(acc, int):\n            revstat_groups = []\n            if revstat:\n                revstat_groups = [rev.lstrip('_') for rev in revstat.split(',')]\n\n            sig_groups = []\n            if sig:\n                for significance in sig.split('/'):\n                    splitted_word = significance.split('_')\n                    sig_groups.append(' '.join(splitted_word[:2]))\n\n            for sign_term in sig_groups:\n                clnsig_accsessions.append({\n                    'value': sign_term,\n                    'accession': int(acc),\n                    'revstat': ', '.join(revstat_groups),\n                })\n        else:\n            \n            \n            acc_groups = acc.split('|')\n            sig_groups = sig.split('|')\n            revstat_groups = revstat.split('|')\n            for acc_group, sig_group, revstat_group in zip(acc_groups, sig_groups, revstat_groups):\n                accessions = acc_group.split(',')\n                significances = sig_group.split(',')\n                revstats = revstat_group.split(',')\n                for accession, significance, revstat in zip(accessions, significances, revstats):\n                    clnsig_accsessions.append({\n                        'value': int(significance),\n                        'accession': accession,\n                        'revstat': revstat,\n                    })\n\n    elif transcripts:\n        clnsig = set()\n        for transcript in transcripts:\n            for annotation in transcript.get('clinsig', []):\n                clnsig.add(annotation)\n        for annotation in clnsig:\n            clnsig_accsessions.append({'value': annotation})\n\n    return clnsig_accsessions", "docstring": "Get the clnsig information\n\nArgs:\nacc(str): The clnsig accession number, raw from vcf\nsig(str): The clnsig significance score, raw from vcf\nrevstat(str): The clnsig revstat, raw from vcf\ntranscripts(iterable(dict))\n\nReturns:\nclnsig_accsessions(list): A list with clnsig accessions", "source": "juraj-google-style"}
{"code": "def angle(x, y):\n    dot = np.dot(x, y)\n    x_mod = np.linalg.norm(x)\n    y_mod = np.linalg.norm(y)\n    cos_angle = (dot / (x_mod * y_mod))\n    return np.degrees(np.arccos(cos_angle))", "docstring": "Calculate the angle between two vectors, in degrees.\n\nArgs:\nx (np.array): one vector.\ny (np.array): the other vector.\n\nReturns:\n(float):      the angle between x and y in degrees.", "source": "codesearchnet"}
{"code": "def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:\n    if attention_mask.dim() == 3:\n        extended_attention_mask = attention_mask[:, None, :, :]\n    elif attention_mask.dim() == 2:\n        if is_decoder:\n            batch_size, seq_length = input_shape\n            seq_ids = torch.arange(seq_length, device=device)\n            causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]\n            causal_mask = causal_mask.to(attention_mask.dtype)\n            if causal_mask.shape[1] < attention_mask.shape[1]:\n                prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]\n                causal_mask = torch.cat([torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), causal_mask], axis=-1)\n            extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]\n        else:\n            extended_attention_mask = attention_mask[:, None, None, :]\n    else:\n        raise ValueError('Wrong shape for input_ids (shape {}) or attention_mask (shape {})'.format(input_shape, attention_mask.shape))\n    extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)\n    extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n    return extended_attention_mask", "docstring": "Makes broadcastable attention and causal masks so that future and masked tokens are ignored.\n\nArguments:\nattention_mask (`torch.Tensor`):\nMask with ones indicating tokens to attend to, zeros for tokens to ignore.\ninput_shape (`Tuple[int]`):\nThe shape of the input to the model.\ndevice (`torch.device`):\nThe device of the input to the model.\n\nReturns:\n`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.", "source": "github-repos"}
{"code": "def load(file_path, parse_line_fn):\n  \n  vocabulary = []\n  embeddings = []\n  embeddings_dim = None\n  for line in tf.gfile.GFile(file_path):\n    token, embedding = parse_line_fn(line)\n    if not embeddings_dim:\n      embeddings_dim = len(embedding)\n    elif embeddings_dim != len(embedding):\n      raise ValueError(\n          \"Inconsistent embedding dimension detected, %d != %d for token %s\",\n          embeddings_dim, len(embedding), token)\n\n    vocabulary.append(token)\n    embeddings.append(embedding)\n\n  return vocabulary, np.array(embeddings)", "docstring": "Loads a text embedding into memory as a numpy matrix.\n\nArgs:\nfile_path: Path to the text embedding file.\nparse_line_fn: callback function to parse each file line.\n\nReturns:\nA tuple of (list of vocabulary tokens, numpy matrix of embedding vectors).\n\nRaises:\nValueError: if the data in the sstable is inconsistent.", "source": "juraj-google-style"}
{"code": "def from_sr_code(code):\n    code = str(code)\n    proj4 = utils.crscode_to_string('sr-org', code, 'proj4')\n    crs = from_proj4(proj4)\n    return crs", "docstring": "Load crs object from sr-org code, via spatialreference.org.\nParses based on the proj4 representation.\n\nArguments:\n\n- *code*: The SR-ORG code as an integer.\n\nReturns:\n\n- A CS instance of the indicated type.", "source": "codesearchnet"}
{"code": "def CompileFilter(self, filter_expression):\n    \n    filter_parser = pfilter.BaseParser(filter_expression).Parse()\n    matcher = filter_parser.Compile(pfilter.PlasoAttributeFilterImplementation)\n\n    self._filter_expression = filter_expression\n    self._matcher = matcher", "docstring": "Compiles the filter expression.\n\nThe filter expression contains an object filter expression.\n\nArgs:\nfilter_expression (str): filter expression.\n\nRaises:\nParseError: if the filter expression cannot be parsed.", "source": "juraj-google-style"}
{"code": "def clear(self, color: Tuple[int, int, int]) -> None:\n        \n        lib.TCOD_image_clear(self.image_c, color)", "docstring": "Fill this entire Image with color.\n\nArgs:\ncolor (Union[Tuple[int, int, int], Sequence[int]]):\nAn (r, g, b) sequence or Color instance.", "source": "juraj-google-style"}
{"code": "def _channel_flatten_input(x, data_format):\n    graph = ops.get_default_graph()\n    cache_key = (graph, x.ref(), data_format)\n    if cache_key not in _channel_flatten_input_cache:\n        x_shape = array_ops.shape(x)\n        neg_ones = constant_op.constant([-1], dtype=x_shape.dtype)\n        if data_format == b'NCHW':\n            order = [1, 0, 2, 3, 4]\n            shape = array_ops.concat([x_shape[1:2], neg_ones, x_shape[3:]], axis=0)\n            reverse_order = order\n        else:\n            order = [1, 2, 3, 0, 4]\n            shape = array_ops.concat([x_shape[1:4], neg_ones], axis=0)\n            reverse_order = [3, 0, 1, 2, 4]\n        x = array_ops.transpose(x, order)\n        reverse_shape = array_ops.shape(x)\n        x = array_ops.reshape(x, shape)\n        outputs = (x, reverse_order, reverse_shape)\n        _channel_flatten_input_cache[cache_key] = outputs\n    else:\n        outputs = _channel_flatten_input_cache[cache_key]\n    return outputs", "docstring": "Merge the stack dimension with the channel dimension.\n\nIf S is pfor's stacking dimension, then,\n- for SNCHW, we transpose to NSCHW. If N dimension has size 1, the transpose\nshould be cheap.\n- for SNHWC, we transpose to NHWSC.\nWe then merge the S and C dimension.\n\nArgs:\nx: tensor_lib.Tensor to transform.\ndata_format: \"NCHW\" or \"NHWC\".\n\nReturns:\nA 3-element tuple with the transformed value, along with the shape for\nreshape and order for transpose required to transform back.", "source": "github-repos"}
{"code": "def __extract_file(self, path, fileinfo, destination):\n        \n\n        if 'offset' not in fileinfo:\n            self.__copy_extracted(path, destination)\n            return\n\n        self.asarfile.seek(\n            self.__absolute_offset(fileinfo['offset'])\n        )\n\n        \n        contents = self.asarfile.read(\n            self.__absolute_offset(fileinfo['size'])\n        )\n\n        destination_path = os.path.join(destination, path)\n\n        with open(destination_path, 'wb') as fp:\n            fp.write(contents)\n\n        LOGGER.debug('Extracted %s to %s', path, destination_path)", "docstring": "Extracts the specified file to the specified destination.\n\nArgs:\npath (str):\nRelative (to the root of the archive) path of the\nfile to extract.\n\nfileinfo (dict):\nDictionary containing the offset and size of the file\n(Extracted from the header).\n\ndestination (str):\nDirectory to extract the archive to.", "source": "juraj-google-style"}
{"code": "def get_help_usage(command):\n    if (not command):\n        doc = get_primary_command_usage()\n    elif (command in ('-a', '--all')):\n        subcommands = [k for k in settings.subcommands if (k is not None)]\n        available_commands = (subcommands + ['help'])\n        command_doc = '\\nAvailable commands:\\n{}\\n'.format('\\n'.join(('  {}'.format(c) for c in sorted(available_commands))))\n        doc = get_primary_command_usage(command_doc)\n    elif command.startswith('-'):\n        raise ValueError(\"Unrecognized option '{}'.\".format(command))\n    elif (command in settings.subcommands):\n        subcommand = settings.subcommands[command]\n        doc = format_usage(subcommand.__doc__)\n    docopt.docopt(doc, argv=('--help',))", "docstring": "Print out a help message and exit the program.\n\nArgs:\ncommand: If a command value is supplied then print the help message for\nthe command module if available. If the command is '-a' or '--all',\nthen print the standard help message but with a full list of\navailable commands.\n\nRaises:\nValueError: Raised if the help message is requested for an invalid\ncommand or an unrecognized option is passed to help.", "source": "codesearchnet"}
{"code": "def _buildTraitCovar(self, trait_covar_type='freeform', rank=1, fixed_trait_covar=None, jitter=0.0001):\n    assert (trait_covar_type in ['freeform', 'diag', 'lowrank', 'lowrank_id', 'lowrank_diag', 'block', 'block_id', 'block_diag', 'fixed']), 'VarianceDecomposition:: trait_covar_type not valid'\n    if (trait_covar_type == 'freeform'):\n        cov = FreeFormCov(self.P, jitter=jitter)\n    elif (trait_covar_type == 'fixed'):\n        assert (fixed_trait_covar is not None), 'VarianceDecomposition:: set fixed_trait_covar'\n        assert (fixed_trait_covar.shape[0] == self.P), 'VarianceDecomposition:: Incompatible shape for fixed_trait_covar'\n        assert (fixed_trait_covar.shape[1] == self.P), 'VarianceDecomposition:: Incompatible shape for fixed_trait_covar'\n        cov = FixedCov(fixed_trait_covar)\n    elif (trait_covar_type == 'diag'):\n        cov = DiagonalCov(self.P)\n    elif (trait_covar_type == 'lowrank'):\n        cov = LowRankCov(self.P, rank=rank)\n    elif (trait_covar_type == 'lowrank_id'):\n        cov = SumCov(LowRankCov(self.P, rank=rank), FixedCov(sp.eye(self.P)))\n    elif (trait_covar_type == 'lowrank_diag'):\n        cov = SumCov(LowRankCov(self.P, rank=rank), DiagonalCov(self.P))\n    elif (trait_covar_type == 'block'):\n        cov = FixedCov(sp.ones([self.P, self.P]))\n    elif (trait_covar_type == 'block_id'):\n        cov1 = FixedCov(sp.ones([self.P, self.P]))\n        cov2 = FixedCov(sp.eye(self.P))\n        cov = SumCov(cov1, cov2)\n    elif (trait_covar_type == 'block_diag'):\n        cov1 = FixedCov(sp.ones([self.P, self.P]))\n        cov2 = FixedCov(sp.eye(self.P))\n        cov = SumCov(cov1, cov2)\n    return cov", "docstring": "Internal functions that builds the trait covariance matrix using the LIMIX framework\n\nArgs:\ntrait_covar_type: type of covaraince to use. Default 'freeform'. possible values are\nrank:                rank of a possible lowrank component (default 1)\nfixed_trait_covar:   PxP matrix for the (predefined) trait-to-trait covariance matrix if fixed type is used\njitter:              diagonal contribution added to freeform covariance matrices for regularization\nReturns:\nLIMIX::Covariance for Trait covariance matrix", "source": "codesearchnet"}
{"code": "def point_consensus(self, consensus_type):\n    if ('mean' in consensus_type):\n        consensus_data = np.mean(self.data, axis=0)\n    elif ('std' in consensus_type):\n        consensus_data = np.std(self.data, axis=0)\n    elif ('median' in consensus_type):\n        consensus_data = np.median(self.data, axis=0)\n    elif ('max' in consensus_type):\n        consensus_data = np.max(self.data, axis=0)\n    elif ('percentile' in consensus_type):\n        percentile = int(consensus_type.split('_')[1])\n        consensus_data = np.percentile(self.data, percentile, axis=0)\n    else:\n        consensus_data = np.zeros(self.data.shape[1:])\n    consensus = EnsembleConsensus(consensus_data, consensus_type, self.ensemble_name, self.run_date, self.variable, self.start_date, self.end_date, self.units)\n    return consensus", "docstring": "Calculate grid-point statistics across ensemble members.\n\nArgs:\nconsensus_type: mean, std, median, max, or percentile_nn\n\nReturns:\nEnsembleConsensus containing point statistic", "source": "codesearchnet"}
{"code": "def all_min(tensors):\n    return _apply_all_reduce('min', tensors)", "docstring": "Returns a list of tensors with the all-reduce min across `tensors`.\n\nThe computation is done with an all-reduce operation, so if only some of the\nreturned tensors are evaluated then the computation will hang.\n\nArgs:\ntensors: The input tensors across which to reduce; must be assigned\nto GPU devices.\n\nReturns:\nList of tensors, each with the minimum of the input tensors, where tensor i\nhas the same device as `tensors[i]`.", "source": "github-repos"}
{"code": "def parse(filename, encoding=None):\n    \n\n    with open(filename, encoding=encoding) as source:\n        for line in source:\n            for word in line.split():\n                yield word", "docstring": "!DEMO!\nSimple file parsing generator\n\nArgs:\nfilename: absolute or relative path to file on disk\nencoding: encoding string that is passed to open function", "source": "juraj-google-style"}
{"code": "def _GetSourceFileSystem(self, source_path_spec, resolver_context=None):\n    \n    if not source_path_spec:\n      raise RuntimeError('Missing source.')\n\n    file_system = path_spec_resolver.Resolver.OpenFileSystem(\n        source_path_spec, resolver_context=resolver_context)\n\n    type_indicator = source_path_spec.type_indicator\n    if path_spec_factory.Factory.IsSystemLevelTypeIndicator(type_indicator):\n      mount_point = source_path_spec\n    else:\n      mount_point = source_path_spec.parent\n\n    return file_system, mount_point", "docstring": "Retrieves the file system of the source.\n\nArgs:\nsource_path_spec (dfvfs.PathSpec): source path specification of the file\nsystem.\nresolver_context (dfvfs.Context): resolver context.\n\nReturns:\ntuple: containing:\n\ndfvfs.FileSystem: file system.\ndfvfs.PathSpec: mount point path specification that refers\nto the base location of the file system.\n\nRaises:\nRuntimeError: if source path specification is not set.", "source": "juraj-google-style"}
{"code": "def abort_all_if(expr, reason, extras=None):\n    if expr:\n        abort_all(reason, extras)", "docstring": "Abort all subsequent tests, if the expression evaluates to True.\n\nArgs:\nexpr: The expression that is evaluated.\nreason: The reason to abort.\nextras: An optional field for extra information to be included in\ntest result.\n\nRaises:\nsignals.TestAbortAll: Abort all subsequent tests.", "source": "github-repos"}
{"code": "def __init__(self, paths, case_sensitive=True, path_segment_separator='/'):\n    \n    super(PathFilterScanTree, self).__init__()\n    self._case_sensitive = case_sensitive\n    self._path_segment_separator = path_segment_separator\n    self._root_node = None\n\n    if not self._case_sensitive:\n      paths = [path.lower() for path in paths]\n\n    path_filter_table = _PathFilterTable(\n        paths, [], path_segment_separator=self._path_segment_separator)\n\n    if path_filter_table.paths:\n      self._root_node = self._BuildScanTreeNode(path_filter_table, [])", "docstring": "Initializes and builds a path filter scan tree.\n\nArgs:\npaths: a list of strings containing the paths.\ncase_sensitive: optional boolean value to indicate string matches should\nbe case sensitive.\npath_segment_separator: optional string containing the path segment\nseparator.", "source": "juraj-google-style"}
{"code": "def find_required_filehandlers(self, requirements, filename_info):\n    req_fh = []\n    filename_info = set(filename_info.items())\n    if requirements:\n        for requirement in requirements:\n            for fhd in self.file_handlers[requirement]:\n                if set(fhd.filename_info.items()).issubset(filename_info):\n                    req_fh.append(fhd)\n                    break\n            else:\n                raise RuntimeError('No matching requirement file of type {}'.format(requirement))\n    return req_fh", "docstring": "Find the necessary file handlers for the given requirements.\n\nWe assume here requirements are available.\n\nRaises:\nKeyError, if no handler for the given requirements is available.\nRuntimeError, if there is a handler for the given requirements,\nbut it doesn't match the filename info.", "source": "codesearchnet"}
{"code": "def make_connection(self):", "docstring": "Makes a connection to the snippet server on the remote device.\n\nThis function makes a connection to the server and sends a handshake\nrequest to ensure the server is available for upcoming RPCs.\n\nThere are two types of connections used by snippet clients:\n* The client makes a new connection each time it needs to send an RPC.\n* The client makes a connection in this stage and uses it for all the RPCs.\nIn this case, the client should implement `close_connection` to close\nthe connection.\n\nRaises:\nerrors.ProtocolError: something went wrong when exchanging data with the\nserver.", "source": "github-repos"}
{"code": "def _get_attributes(self, attributes):\n    \n\n    params = []\n\n    if isinstance(attributes, dict):\n      for attribute_key in attributes.keys():\n        attribute_value = attributes.get(attribute_key)\n        \n        if validator.is_attribute_valid(attribute_key, attribute_value):\n          attribute_id = self.config.get_attribute_id(attribute_key)\n          if attribute_id:\n            params.append({\n              'entity_id': attribute_id,\n              'key': attribute_key,\n              'type': self.EventParams.CUSTOM,\n              'value': attribute_value\n            })\n\n    \n    bot_filtering_value = self._get_bot_filtering()\n    if isinstance(bot_filtering_value, bool):\n      params.append({\n          'entity_id': enums.ControlAttributes.BOT_FILTERING,\n          'key': enums.ControlAttributes.BOT_FILTERING,\n          'type': self.EventParams.CUSTOM,\n          'value': bot_filtering_value\n      })\n\n    return params", "docstring": "Get attribute(s) information.\n\nArgs:\nattributes: Dict representing user attributes and values which need to be recorded.\n\nReturns:\nList consisting of valid attributes for the user. Empty otherwise.", "source": "juraj-google-style"}
{"code": "def CheckFlowCanBeStartedOnClient(flow_name):\n    flow_cls = flow.GRRFlow.GetPlugin(flow_name)\n    if flow_cls.category:\n        return True\n    else:\n        raise access_control.UnauthorizedAccess((\"Flow %s can't be started on a client by non-suid users.\" % flow_name))", "docstring": "Checks if flow can be started on a particular client.\n\nOnly flows with a category can bestarted. Having a category means that the\nflow will be accessible from the UI.\n\nArgs:\nflow_name: Name of the flow to check access for.\n\nReturns:\nTrue if flow is externally accessible.\nRaises:\naccess_control.UnauthorizedAccess: if flow is not externally accessible.", "source": "codesearchnet"}
{"code": "def AddFile(self, fd, external=True):\n    \n    files_for_write = []\n\n    for sub_store in self.GetChildrenByPriority(allow_external=external):\n      new_file = sub_store.AddFile(fd)\n      if new_file:\n        files_for_write.append(new_file)\n\n    fd.Seek(0)\n    while files_for_write:\n      \n      data = fd.Read(self.CHUNK_SIZE)\n      if not data:\n        break\n\n      for child in files_for_write:\n        child.Write(data)\n\n    for child in files_for_write:\n      child.Close()", "docstring": "Create a new file in the file store.\n\nWe delegate the actual file addition to our contained\nimplementations. Implementations can either implement the AddFile() method,\nreturning a file like object which will be written on, or directly support\nthe AddBlobToStore() method which can copy the VFSBlobImage efficiently.\n\nArgs:\nfd: An AFF4 object open for read/write.\nexternal: If true, attempt to add files to stores defined as EXTERNAL.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.Predict = channel.unary_unary(\n            \"/google.cloud.automl.v1beta1.PredictionService/Predict\",\n            request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_prediction__service__pb2.PredictRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_prediction__service__pb2.PredictResponse.FromString,\n        )\n        self.BatchPredict = channel.unary_unary(\n            \"/google.cloud.automl.v1beta1.PredictionService/BatchPredict\",\n            request_serializer=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_prediction__service__pb2.BatchPredictRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def __init__(self, **namespaces):\n        \n        super(Configuration, self).__init__()\n        for key, entry in compat.iteritems(namespaces):\n\n            self.register(key, entry)", "docstring": "Initialize a configuration with a series of namespaces.\n\nArgs:\n**namespaces: Each keyword should be a Namespace object which will\nbe added to the configuration file.\n\nRaises:\nTypeError: If an entry is not a Namespace object.\nValueError: If the namespace is already registered.", "source": "juraj-google-style"}
{"code": "def persist_compilestats(run, session, stats):\n    \n    for stat in stats:\n        stat.run_id = run.id\n        session.add(stat)", "docstring": "Persist the run results in the database.\n\nArgs:\nrun: The run we attach the compilestats to.\nsession: The db transaction we belong to.\nstats: The stats we want to store in the database.", "source": "juraj-google-style"}
{"code": "def find_divisors(n):\n\n    \n\n    if not isinstance(n, int):\n        raise TypeError(\"Expecting a strictly positive integer\")\n    if n <= 0:\n        raise ValueError(\"Expecting a strictly positive integer\")\n\n    for i in range(1, int(n**0.5) + 1):\n        if n % i == 0:\n            divisors = {i, n\n            for divisor in divisors:\n                yield divisor", "docstring": "Find all the positive divisors of the given integer n.\n\nArgs:\nn (int): strictly positive integer\n\nReturns:\nA generator of all the positive divisors of n\n\nRaises:\nTypeError: if n is not an integer\nValueError: if n is negative", "source": "juraj-google-style"}
{"code": "def _ParseCmdItem(self, cmd_input, template_file=None):\n    \n    \n    fsm = textfsm.TextFSM(template_file)\n    if not self._keys:\n      self._keys = set(fsm.GetValuesByAttrib('Key'))\n\n    \n    table = texttable.TextTable()\n    table.header = fsm.header\n\n    \n    for record in fsm.ParseText(cmd_input):\n      table.Append(record)\n    return table", "docstring": "Creates Texttable with output of command.\n\nArgs:\ncmd_input: String, Device response.\ntemplate_file: File object, template to parse with.\n\nReturns:\nTextTable containing command output.\n\nRaises:\nCliTableError: A template was not found for the given command.", "source": "juraj-google-style"}
{"code": "def _rand_dtype(rand, shape, dtype, scale=1.0, post=lambda x: x):\n    r = lambda: numpy_compat.np_asarray(scale * rand(*_dims_of_shape(shape)), dtype)\n    if onp.issubdtype(dtype, onp.complexfloating):\n        vals = r() + 1j * r()\n    else:\n        vals = r()\n    return _cast_to_shape(numpy_compat.np_asarray(post(vals), dtype), shape, dtype)", "docstring": "Produce random values given shape, dtype, scale, and post-processor.\n\nArgs:\nrand: a function for producing random values of a given shape, e.g. a\nbound version of either onp.RandomState.randn or onp.RandomState.rand.\nshape: a shape value as a tuple of positive integers.\ndtype: a numpy dtype.\nscale: optional, a multiplicative scale for the random values (default 1).\npost: optional, a callable for post-processing the random values (default\nidentity).\n\nReturns:\nAn ndarray of the given shape and dtype using random values based on a call\nto rand but scaled, converted to the appropriate dtype, and post-processed.", "source": "github-repos"}
{"code": "def GetLogicalLines(self):\n    self._StartNewLine()\n    return self._logical_lines", "docstring": "Fetch the result of the tree walk.\n\nNote: only call this after visiting the whole tree.\n\nReturns:\nA list of LogicalLine objects.", "source": "github-repos"}
{"code": "def _ParseCachedEntry2003(self, value_data, cached_entry_offset):\n    try:\n        cached_entry = self._ReadStructureFromByteStream(value_data[cached_entry_offset:], cached_entry_offset, self._cached_entry_data_type_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse cached entry value with error: {0!s}'.format(exception))\n    path_size = cached_entry.path_size\n    maximum_path_size = cached_entry.maximum_path_size\n    path_offset = cached_entry.path_offset\n    if ((path_offset > 0) and (path_size > 0)):\n        path_size += path_offset\n        maximum_path_size += path_offset\n        try:\n            path = value_data[path_offset:path_size].decode('utf-16-le')\n        except UnicodeDecodeError:\n            raise errors.ParseError('Unable to decode cached entry path to string')\n    cached_entry_object = AppCompatCacheCachedEntry()\n    cached_entry_object.cached_entry_size = self._cached_entry_data_type_map.GetByteSize()\n    cached_entry_object.file_size = getattr(cached_entry, 'file_size', None)\n    cached_entry_object.last_modification_time = cached_entry.last_modification_time\n    cached_entry_object.path = path\n    return cached_entry_object", "docstring": "Parses a Windows 2003 cached entry.\n\nArgs:\nvalue_data (bytes): value data.\ncached_entry_offset (int): offset of the first cached entry data\nrelative to the start of the value data.\n\nReturns:\nAppCompatCacheCachedEntry: cached entry.\n\nRaises:\nParseError: if the value data could not be parsed.", "source": "codesearchnet"}
{"code": "def object_hook(self, object_dict):\n    \n    instance = self.decoder(object_dict)\n    self.condition_list.append(instance)\n    self.index += 1\n    return self.index", "docstring": "Hook which when passed into a json.JSONDecoder will replace each dict\nin a json string with its index and convert the dict to an object as defined\nby the passed in condition_decoder. The newly created condition object is\nappended to the conditions_list.\n\nArgs:\nobject_dict: Dict representing an object.\n\nReturns:\nAn index which will be used as the placeholder in the condition_structure", "source": "juraj-google-style"}
{"code": "def diff(self, sym: Symbol, n: int = 1, expand_simplify: bool = True):\n        \n        if not isinstance(sym, sympy.Basic):\n            raise TypeError(\"%s needs to be a Sympy symbol\" % sym)\n        if sym.free_symbols.issubset(self.free_symbols):\n            \n            \n            \n            deriv = QuantumDerivative.create(self, derivs={sym: n}, vals=None)\n            if not deriv.is_zero and expand_simplify:\n                deriv = deriv.expand().simplify_scalar()\n            return deriv\n        else:\n            \n            \n            \n            \n            return self.__class__._zero", "docstring": "Differentiate by scalar parameter `sym`.\n\nArgs:\nsym: What to differentiate by.\nn: How often to differentiate\nexpand_simplify: Whether to simplify the result.\n\nReturns:\nThe n-th derivative.", "source": "juraj-google-style"}
{"code": "def put(self, item, *args, **kwargs):\n    if (not self.enabled):\n        return\n    timeout = kwargs.pop('timeout', None)\n    if (timeout is None):\n        timeout = self.default_timeout\n    cache_key = self.make_key(args, kwargs)\n    with self._cache_lock:\n        self._cache[cache_key] = ((time() + timeout), item)", "docstring": "Put an item into the cache, for this combination of args and kwargs.\n\nArgs:\n*args: any arguments.\n**kwargs: any keyword arguments. If ``timeout`` is specified as one\nof the keyword arguments, the item will remain available\nfor retrieval for ``timeout`` seconds. If ``timeout`` is\n`None` or not specified, the ``default_timeout`` for this\ncache will be used. Specify a ``timeout`` of 0 (or ensure that\nthe ``default_timeout`` for this cache is 0) if this item is\nnot to be cached.", "source": "codesearchnet"}
{"code": "def render_header(image: np.ndarray, header: str, input_data_format: Optional[Union[str, ChildProcessError]]=None, **kwargs):\n    requires_backends(render_header, 'vision')\n    image = to_pil_image(image, input_data_format=input_data_format)\n    header_image = render_text(header, **kwargs)\n    new_width = max(header_image.width, image.width)\n    new_height = int(image.height * (new_width / image.width))\n    new_header_height = int(header_image.height * (new_width / header_image.width))\n    new_image = Image.new('RGB', (new_width, new_height + new_header_height), 'white')\n    new_image.paste(header_image.resize((new_width, new_header_height)), (0, 0))\n    new_image.paste(image.resize((new_width, new_height)), (0, new_header_height))\n    new_image = to_numpy_array(new_image)\n    if infer_channel_dimension_format(new_image) == ChannelDimension.LAST:\n        new_image = to_channel_dimension_format(new_image, ChannelDimension.LAST)\n    return new_image", "docstring": "Renders the input text as a header on the input image.\n\nArgs:\nimage (`np.ndarray`):\nThe image to render the header on.\nheader (`str`):\nThe header text.\ndata_format (`Union[ChannelDimension, str]`, *optional*):\nThe data format of the image. Can be either \"ChannelDimension.channels_first\" or\n\"ChannelDimension.channels_last\".\n\nReturns:\n`np.ndarray`: The image with the header rendered.", "source": "github-repos"}
{"code": "def get_asset_filename_to_add(asset_filepath, asset_filename_map):\n    asset_filename = os.path.basename(asset_filepath)\n    if asset_filename not in asset_filename_map:\n        return asset_filename\n    other_asset_filepath = asset_filename_map[asset_filename]\n    if other_asset_filepath == asset_filepath:\n        return asset_filename\n    if not file_io.filecmp(asset_filepath, other_asset_filepath):\n        return _get_unique_asset_filename(asset_filename, asset_filename_map)\n    return asset_filename", "docstring": "Get a unique basename to add to the SavedModel if this file is unseen.\n\nAssets come from users as full paths, and we save them out to the\nSavedModel as basenames. In some cases, the basenames collide. Here,\nwe dedupe asset basenames by first checking if the file is the same,\nand, if different, generate and return an index-suffixed basename\nthat can be used to add the asset to the SavedModel.\n\nArgs:\nasset_filepath: the full path to the asset that is being saved\nasset_filename_map: a dict of filenames used for saving the asset in\nthe SavedModel to full paths from which the filenames were derived.\n\nReturns:\nUniquified filename string if the file is not a duplicate, or the original\nfilename if the file has already been seen and saved.", "source": "github-repos"}
{"code": "def resolve_variables(variables, context, provider):\n    for variable in variables:\n        variable.resolve(context, provider)", "docstring": "Given a list of variables, resolve all of them.\n\nArgs:\nvariables (list of :class:`stacker.variables.Variable`): list of\nvariables\ncontext (:class:`stacker.context.Context`): stacker context\nprovider (:class:`stacker.provider.base.BaseProvider`): subclass of the\nbase provider", "source": "codesearchnet"}
{"code": "def matrices_to_flat_transforms(transform_matrices):\n    with ops.name_scope('matrices_to_flat_transforms'):\n        transform_matrices = ops.convert_to_tensor(transform_matrices, name='transform_matrices')\n        if transform_matrices.shape.ndims not in (2, 3):\n            raise ValueError('Matrices should be 2D or 3D, got: %s' % transform_matrices)\n        transforms = array_ops.reshape(transform_matrices, constant_op.constant([-1, 9]))\n        transforms /= transforms[:, 8:9]\n        return transforms[:, :8]", "docstring": "Converts affine matrices to `tf.contrib.image` projective transforms.\n\nNote that we expect matrices that map output coordinates to input coordinates.\nTo convert forward transformation matrices, call `tf.linalg.inv` on the\nmatrices and use the result here.\n\nArgs:\ntransform_matrices: One or more affine transformation matrices, for the\nreverse transformation in homogeneous coordinates. Shape `(3, 3)` or `(N,\n3, 3)`.\n\nReturns:\n2D tensor of flat transforms with shape `(N, 8)`, which may be passed into\n`tf.contrib.image.transform`.\n\nRaises:\nValueError: If `transform_matrices` have an invalid shape.", "source": "github-repos"}
{"code": "def loop_until_valid_response(prompt):\n    \n    responses = {\"Y\": True, \"YES\": True, \"TRUE\": True,\n                 \"N\": False, \"NO\": False, \"FALSE\": False}\n    response = \"\"\n    while response.upper() not in responses:\n        response = raw_input(prompt)\n\n    return responses[response.upper()]", "docstring": "Loop over entering input until it is a valid bool-ish response.\n\nArgs:\nprompt: Text presented to user.\n\nReturns:\nThe bool value equivalent of what was entered.", "source": "juraj-google-style"}
{"code": "def path(self, source, target):\n    visited = set(source.split('+'))\n    targets = (set(target.split('+')) - visited)\n    for tablename in visited.union(targets):\n        self[tablename]\n    if (len(targets) == 0):\n        return []\n    paths = [[(tablename, None)] for tablename in visited]\n    while True:\n        newpaths = []\n        for path in paths:\n            (laststep, pivot) = path[(- 1)]\n            if (laststep in targets):\n                return path[1:]\n            else:\n                for key in self[laststep].keys():\n                    for step in (set(self.find(key)) - visited):\n                        visited.add(step)\n                        newpaths.append((path + [(step, key)]))\n        if newpaths:\n            paths = newpaths\n        else:\n            break\n    raise ItsdbError('no relation path found from {} to {}'.format(source, target))", "docstring": "Find the path of id fields connecting two tables.\n\nThis is just a basic breadth-first-search. The relations file\nshould be small enough to not be a problem.\n\nReturns:\nlist: (table, fieldname) pairs describing the path from\nthe source to target tables\nRaises:\n:class:`delphin.exceptions.ItsdbError`: when no path is\nfound\nExample:\n>>> relations.path('item', 'result')\n[('parse', 'i-id'), ('result', 'parse-id')]\n>>> relations.path('parse', 'item')\n[('item', 'i-id')]\n>>> relations.path('item', 'item')\n[]", "source": "codesearchnet"}
{"code": "def graph(self, as_dot=False):\n        \n        if not self.has_graph:\n            return None\n\n        if not as_dot:\n            if self.graph_ is None:\n                \n                self.graph_ = read_graph_from_string(self.graph_string)\n            return self.graph_\n\n        if self.graph_string:\n            if self.graph_string.startswith('{'):  \n                self.graph_ = read_graph_from_string(self.graph_string)\n            else:\n                \n                \n                \n                return self.graph_string\n\n        return write_dot(self.graph_)", "docstring": "Get the resolve graph.\n\nArgs:\nas_dot: If True, get the graph as a dot-language string. Otherwise,\na pygraph.digraph object is returned.\n\nReturns:\nA string or `pygraph.digraph` object, or None if there is no graph\nassociated with the resolve.", "source": "juraj-google-style"}
{"code": "def Serialize(self, writer):\n        \n        self.SerializeUnsigned(writer)\n        writer.WriteSerializableArray(self.scripts)", "docstring": "Serialize object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def event_stream(app, *, filter_by_prefix=None):\n    q = Queue()\n\n    def handle_event(event):\n        if ((filter_by_prefix is None) or ((filter_by_prefix is not None) and event['type'].startswith(filter_by_prefix))):\n            q.put(event)\n\n    def receive_events():\n        with app.connection() as connection:\n            recv = app.events.Receiver(connection, handlers={'*': handle_event})\n            recv.capture(limit=None, timeout=None, wakeup=True)\n    t = threading.Thread(target=receive_events)\n    t.start()\n    while True:\n        (yield q.get(block=True))", "docstring": "Generator function that returns celery events.\n\nThis function turns the callback based celery event handling into a generator.\n\nArgs:\napp: Reference to a celery application object.\nfilter_by_prefix (str): If not None, only allow events that have a type that\nstarts with this prefix to yield an generator event.\n\nReturns:\ngenerator: A generator that returns celery events.", "source": "codesearchnet"}
{"code": "def _get_sample_generator(samples):\n    if isinstance(samples, Mapping):\n\n        def samples_generator():\n            for ind in range(samples[list(samples.keys())[0]].shape[0]):\n                (yield np.array([samples[s][(ind, :)] for s in sorted(samples)]))\n    elif isinstance(samples, np.ndarray):\n\n        def samples_generator():\n            for ind in range(samples.shape[0]):\n                (yield samples[ind])\n    else:\n        samples_generator = samples\n    return samples_generator", "docstring": "Get a sample generator from the given polymorphic input.\n\nArgs:\nsamples (ndarray, dict or generator): either an matrix of shape (d, p, n) with d problems, p parameters and\nn samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally,\na generator function that yields sample arrays of shape (p, n).\n\nReturns:\ngenerator: a generator that yields a matrix of size (p, n) for every problem in the input.", "source": "codesearchnet"}
{"code": "def _chglog(amend: bool = False, stage: bool = False, next_version: str = None, auto_next_version: bool = False):\n    \n    if config.CHANGELOG_DISABLE():\n        LOGGER.info('skipping changelog update as per config')\n    else:\n        epab.utils.ensure_exe('git')\n        epab.utils.ensure_exe('gitchangelog')\n        LOGGER.info('writing changelog')\n        if auto_next_version:\n            next_version = epab.utils.get_next_version()\n        with gitchangelog_config():\n            with temporary_tag(next_version):\n                changelog, _ = elib_run.run('gitchangelog', mute=True)\n        \n        changelog = re.sub(BOGUS_LINE_PATTERN, '\\\\1\\n', changelog)\n        Path(config.CHANGELOG_FILE_PATH()).write_text(changelog, encoding='utf8')\n        if amend:\n            CTX.repo.amend_commit(\n                append_to_msg='update changelog [auto]', files_to_add=str(config.CHANGELOG_FILE_PATH())\n            )\n        elif stage:\n            CTX.repo.stage_subset(str(config.CHANGELOG_FILE_PATH()))", "docstring": "Writes the changelog\n\nArgs:\namend: amend last commit with changes\nstage: stage changes", "source": "juraj-google-style"}
{"code": "def to_representation(self, instance):\n    updated_course = copy.deepcopy(instance)\n    enterprise_customer_catalog = self.context['enterprise_customer_catalog']\n    updated_course['enrollment_url'] = enterprise_customer_catalog.get_course_enrollment_url(updated_course['key'])\n    for course_run in updated_course['course_runs']:\n        course_run['enrollment_url'] = enterprise_customer_catalog.get_course_run_enrollment_url(course_run['key'])\n    return updated_course", "docstring": "Return the updated course data dictionary.\n\nArguments:\ninstance (dict): The course data.\n\nReturns:\ndict: The updated course data.", "source": "codesearchnet"}
{"code": "def guided_registration(request, page_number=None):\n    PAGE_PROFILE = 1\n    PAGE_TICKET = 2\n    PAGE_PRODUCTS = 3\n    PAGE_PRODUCTS_MAX = 4\n    TOTAL_PAGES = 4\n    ticket_category = inventory.Category.objects.get(id=settings.TICKET_PRODUCT_CATEGORY)\n    cart = CartController.for_user(request.user)\n    attendee = people.Attendee.get_instance(request.user)\n    if attendee.completed_registration:\n        return redirect(review)\n    has_profile = hasattr(attendee, 'attendeeprofilebase')\n    if (not has_profile):\n        max_page = PAGE_PROFILE\n        redirect_page = PAGE_PROFILE\n    else:\n        products = inventory.Product.objects.filter(productitem__cart=cart.cart)\n        products = products.filter(category=ticket_category)\n        if (products.count() == 0):\n            max_page = PAGE_TICKET\n            redirect_page = PAGE_TICKET\n        else:\n            max_page = PAGE_PRODUCTS_MAX\n            redirect_page = PAGE_PRODUCTS\n    if ((page_number is None) or (int(page_number) > max_page)):\n        return redirect('guided_registration', redirect_page)\n    page_number = int(page_number)\n    next_step = redirect('guided_registration', (page_number + 1))\n    with BatchController.batch(request.user):\n        available = ProductController.available_products(request.user, category=ticket_category)\n        if (not available):\n            messages.error(request, 'There are no more tickets available.')\n            return redirect('dashboard')\n        sections = []\n        if (page_number == PAGE_PROFILE):\n            title = 'Attendee information'\n            sections = _guided_registration_profile_and_voucher(request)\n        elif (page_number == PAGE_TICKET):\n            title = 'Select ticket type'\n            sections = _guided_registration_products(request, GUIDED_MODE_TICKETS_ONLY)\n        elif (page_number == PAGE_PRODUCTS):\n            title = 'Additional items'\n            sections = _guided_registration_products(request, GUIDED_MODE_ALL_ADDITIONAL)\n        elif (page_number == PAGE_PRODUCTS_MAX):\n            title = 'More additional items'\n            sections = _guided_registration_products(request, GUIDED_MODE_EXCLUDE_COMPLETE)\n        if (not sections):\n            attendee.completed_registration = True\n            attendee.save()\n            return redirect('review')\n        if (sections and (request.method == 'POST')):\n            for section in sections:\n                if section.form.errors:\n                    break\n            else:\n                return next_step\n    data = {'current_step': page_number, 'sections': sections, 'title': title, 'total_steps': TOTAL_PAGES}\n    return render(request, 'registrasion/guided_registration.html', data)", "docstring": "Goes through the registration process in order, making sure user sees\nall valid categories.\n\nThe user must be logged in to see this view.\n\nParameter:\npage_number:\n1) Profile form (and e-mail address?)\n2) Ticket type\n3) Remaining products\n4) Mark registration as complete\n\nReturns:\nrender: Renders ``registrasion/guided_registration.html``,\nwith the following data::\n\n{\n\"current_step\": int(),  # The current step in the\n# registration\n\"sections\": sections,   # A list of\n# GuidedRegistrationSections\n\"title\": str(),         # The title of the page\n\"total_steps\": int(),   # The total number of steps\n}", "source": "codesearchnet"}
{"code": "def _get_required_params_for_conversion(self, event_key, event_tags):\n    \n    snapshot = {}\n\n    event_dict = {\n      self.EventParams.EVENT_ID: self.config.get_event(event_key).id,\n      self.EventParams.TIME: self._get_time(),\n      self.EventParams.KEY: event_key,\n      self.EventParams.UUID: str(uuid.uuid4())\n    }\n\n    if event_tags:\n      revenue_value = event_tag_utils.get_revenue_value(event_tags)\n      if revenue_value is not None:\n        event_dict[event_tag_utils.REVENUE_METRIC_TYPE] = revenue_value\n\n      numeric_value = event_tag_utils.get_numeric_value(event_tags, self.config.logger)\n      if numeric_value is not None:\n        event_dict[event_tag_utils.NUMERIC_METRIC_TYPE] = numeric_value\n\n      if len(event_tags) > 0:\n        event_dict[self.EventParams.TAGS] = event_tags\n\n    snapshot[self.EventParams.EVENTS] = [event_dict]\n    return snapshot", "docstring": "Get parameters that are required for the conversion event to register.\n\nArgs:\nevent_key: Key representing the event which needs to be recorded.\nevent_tags: Dict representing metadata associated with the event.\n\nReturns:\nDict consisting of the decisions and events info for conversion event.", "source": "juraj-google-style"}
{"code": "def loadfile(method=True, writable=False, create=False):\n\n    def convert_file_args(args, kwargs):\n        filething = (args[0] if args else None)\n        filename = kwargs.pop('filename', None)\n        fileobj = kwargs.pop('fileobj', None)\n        return (filething, filename, fileobj, args[1:], kwargs)\n\n    def wrap(func):\n\n        @wraps(func)\n        def wrapper(self, *args, **kwargs):\n            (filething, filename, fileobj, args, kwargs) = convert_file_args(args, kwargs)\n            with _openfile(self, filething, filename, fileobj, writable, create) as h:\n                return func(self, h, *args, **kwargs)\n\n        @wraps(func)\n        def wrapper_func(*args, **kwargs):\n            (filething, filename, fileobj, args, kwargs) = convert_file_args(args, kwargs)\n            with _openfile(None, filething, filename, fileobj, writable, create) as h:\n                return func(h, *args, **kwargs)\n        return (wrapper if method else wrapper_func)\n    return wrap", "docstring": "A decorator for functions taking a `filething` as a first argument.\n\nPasses a FileThing instance as the first argument to the wrapped function.\n\nArgs:\nmethod (bool): If the wrapped functions is a method\nwritable (bool): If a filename is passed opens the file readwrite, if\npassed a file object verifies that it is writable.\ncreate (bool): If passed a filename that does not exist will create\na new empty file.", "source": "codesearchnet"}
{"code": "def Collect(self, top_frame):\n    frame = top_frame\n    top_line = self.breakpoint['location']['line']\n    breakpoint_frames = self.breakpoint['stackFrames']\n    try:\n        if ('expressions' in self.breakpoint):\n            self.breakpoint['evaluatedExpressions'] = [self._CaptureExpression(top_frame, expression) for expression in self.breakpoint['expressions']]\n        while (frame and (len(breakpoint_frames) < self.max_frames)):\n            line = (top_line if (frame == top_frame) else frame.f_lineno)\n            code = frame.f_code\n            if (len(breakpoint_frames) < self.max_expand_frames):\n                (frame_arguments, frame_locals) = self.CaptureFrameLocals(frame)\n            else:\n                frame_arguments = []\n                frame_locals = []\n            breakpoint_frames.append({'function': _GetFrameCodeObjectName(frame), 'location': {'path': NormalizePath(code.co_filename), 'line': line}, 'arguments': frame_arguments, 'locals': frame_locals})\n            frame = frame.f_back\n    except BaseException as e:\n        self.breakpoint['status'] = {'isError': True, 'description': {'format': 'INTERNAL ERROR: Failed while capturing locals of frame $0: $1', 'parameters': [str(len(breakpoint_frames)), str(e)]}}\n    num_vars = 1\n    while ((num_vars < len(self._var_table)) and (self._total_size < self.max_size)):\n        self._var_table[num_vars] = self.CaptureVariable(self._var_table[num_vars], 0, self.default_capture_limits, can_enqueue=False)\n        num_vars += 1\n    self.TrimVariableTable(num_vars)\n    self._CaptureEnvironmentLabels()\n    self._CaptureRequestLogId()\n    self._CaptureUserId()", "docstring": "Collects call stack, local variables and objects.\n\nStarts collection from the specified frame. We don't start from the top\nframe to exclude the frames due to debugger. Updates the content of\nself.breakpoint.\n\nArgs:\ntop_frame: top frame to start data collection.", "source": "codesearchnet"}
{"code": "def in_coord_list_pbc(fcoord_list, fcoord, atol=1e-8):\n    \n    return len(find_in_coord_list_pbc(fcoord_list, fcoord, atol=atol)) > 0", "docstring": "Tests if a particular fractional coord is within a fractional coord_list.\n\nArgs:\nfcoord_list: List of fractional coords to test\nfcoord: A specific fractional coord to test.\natol: Absolute tolerance. Defaults to 1e-8.\n\nReturns:\nTrue if coord is in the coord list.", "source": "juraj-google-style"}
{"code": "def invoke_string(self, line):\n        \n\n        \n        line = str(line)\n\n        \n        if len(line) == 0:\n            return True\n\n        if line[0] == u'\n            return True\n\n        args = self._split_line(line)\n        return self.invoke(args)", "docstring": "Parse and invoke a string line.\n\nArgs:\nline (str): The line that we want to parse and invoke.\n\nReturns:\nbool: A boolean specifying if the last function created a new context\n(False if a new context was created) and a list with the remainder of the\ncommand line if this function did not consume all arguments.)", "source": "juraj-google-style"}
{"code": "def altitude_diff(msg):\n    tc = common.typecode(msg)\n    if (tc != 19):\n        raise RuntimeError(('%s: Not a airborne velocity message, expecting TC=19' % msg))\n    msgbin = common.hex2bin(msg)\n    sign = ((- 1) if int(msgbin[80]) else 1)\n    value = common.bin2int(msgbin[81:88])\n    if ((value == 0) or (value == 127)):\n        return None\n    else:\n        return ((sign * (value - 1)) * 25)", "docstring": "Decode the differece between GNSS and barometric altitude\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string, TC=19\n\nReturns:\nint: Altitude difference in ft. Negative value indicates GNSS altitude\nbelow barometric altitude.", "source": "codesearchnet"}
{"code": "def execute(self, triple_map, output, **kwargs):\n    subjects = []\n    found_elements = self.source.xpath(str(triple_map.logicalSource.iterator), namespaces=self.xml_ns)\n    for element in found_elements:\n        subject = self.generate_term(term_map=triple_map.subjectMap, element=element, **kwargs)\n        start = len(output)\n        for row in triple_map.predicateObjectMap:\n            predicate = row.predicate\n            if (row.template is not None):\n                obj_ = self.generate_term(term_map=row, **kwargs)\n                output.add((subject, predicate, obj_))\n            if (row.parentTriplesMap is not None):\n                self.__handle_parents__(output, parent_map=row.parentTriplesMap, subject=subject, predicate=predicate, **kwargs)\n            new_subjects = self.__reference_handler__(output, predicate_obj_map=row, element=element, subject=subject)\n            subjects.extend(new_subjects)\n            if (row.constant is not None):\n                output.add((subject, predicate, row.constant))\n        if (start < len(output)):\n            if (triple_map.subjectMap.class_ is not None):\n                output.add((subject, NS_MGR.rdf.type.rdflib, triple_map.subjectMap.class_))\n            subjects.append(subject)\n    return subjects", "docstring": "Method executes mapping between source\n\nArgs:\n\n-----\ntriple_map: SimpleNamespace, Triple Map", "source": "codesearchnet"}
{"code": "def delete(self, service):\n        \n        url = self._url_format(service)\n        return self.rest_action(\n            self._session.delete, url\n        )", "docstring": "Generic DELETE operation for Learning Modules API.\n\nArgs:\nservice (str): The endpoint service to use, i.e. gradebook\n\nRaises:\nrequests.RequestException: Exception connection error\nValueError: Unable to decode response content\n\nReturns:\nlist: the json-encoded content of the response", "source": "juraj-google-style"}
{"code": "def transformer_prepare_decoder(targets, hparams, features=None):\n  \n  if hparams.causal_decoder_self_attention:\n    \n    if hparams.prepend_mode == \"prepend_inputs_full_attention\":\n      decoder_self_attention_bias = (\n          common_attention.attention_bias_prepend_inputs_full_attention(\n              common_attention.embedding_to_padding(targets)))\n    else:\n      decoder_self_attention_bias = (\n          common_attention.attention_bias_lower_triangle(\n              common_layers.shape_list(targets)[1]))\n  else:\n    \n    decoder_padding = common_attention.embedding_to_padding(targets)\n    decoder_self_attention_bias = (\n        common_attention.attention_bias_ignore_padding(decoder_padding))\n\n  if features and \"targets_segmentation\" in features:\n    \n    targets_segmentation = features[\"targets_segmentation\"]\n    targets_position = features[\"targets_position\"]\n    decoder_self_attention_bias += common_attention.attention_bias_same_segment(\n        targets_segmentation, targets_segmentation)\n  else:\n    targets_position = None\n  if hparams.proximity_bias:\n    decoder_self_attention_bias += common_attention.attention_bias_proximal(\n        common_layers.shape_list(targets)[1])\n  decoder_input = common_layers.shift_right_3d(targets)\n  if hparams.pos == \"timing\":\n    if targets_position is not None:\n      decoder_input = common_attention.add_timing_signal_1d_given_position(\n          decoder_input, targets_position)\n    else:\n      decoder_input = common_attention.add_timing_signal_1d(decoder_input)\n  elif hparams.pos == \"emb\":\n    decoder_input = common_attention.add_positional_embedding(\n        decoder_input, hparams.max_length, \"targets_positional_embedding\",\n        targets_position)\n\n  if hparams.activation_dtype == \"bfloat16\":\n    decoder_self_attention_bias = tf.cast(decoder_self_attention_bias,\n                                          tf.bfloat16)\n  return (decoder_input, decoder_self_attention_bias)", "docstring": "Prepare one shard of the model for the decoder.\n\nArgs:\ntargets: a Tensor.\nhparams: run hyperparameters\nfeatures: optionally pass the entire features dictionary as well. This is\nneeded now for \"packed\" datasets.\n\nReturns:\ndecoder_input: a Tensor, bottom of decoder stack\ndecoder_self_attention_bias: a bias tensor for use in decoder self-attention", "source": "juraj-google-style"}
{"code": "def create_or_update_video_transcript(video_id, language_code, metadata, file_data=None):\n    \n    \n    metadata = {\n        prop: value\n        for prop, value in six.iteritems(metadata)\n        if prop in ['provider', 'language_code', 'file_name', 'file_format'] and value\n    }\n\n    file_format = metadata.get('file_format')\n    if file_format and file_format not in list(dict(TranscriptFormat.CHOICES).keys()):\n        raise InvalidTranscriptFormat('{} transcript format is not supported'.format(file_format))\n\n    provider = metadata.get('provider')\n    if provider and provider not in list(dict(TranscriptProviderType.CHOICES).keys()):\n        raise InvalidTranscriptProvider('{} transcript provider is not supported'.format(provider))\n\n    try:\n        \n        video = Video.objects.get(edx_video_id=video_id)\n        video_transcript, __ = VideoTranscript.create_or_update(video, language_code, metadata, file_data)\n    except Video.DoesNotExist:\n        return None\n\n    return video_transcript.url()", "docstring": "Create or Update video transcript for an existing video.\n\nArguments:\nvideo_id: it can be an edx_video_id or an external_id extracted from external sources in a video component.\nlanguage_code: language code of a video transcript\nmetadata (dict): A dict containing (to be overwritten) properties\nfile_data (InMemoryUploadedFile): Transcript data to be saved for a course video.\n\nReturns:\nvideo transcript url", "source": "juraj-google-style"}
{"code": "def f2format(filename):\n    print(('Now converting %r...' % filename))\n    encoding = os.getenv('F2FORMAT_ENCODING', LOCALE_ENCODING)\n    lineno = dict()\n    content = list()\n    with open(filename, 'r', encoding=encoding) as file:\n        lineno[1] = 0\n        for (lnum, line) in enumerate(file, start=1):\n            content.append(line)\n            lineno[(lnum + 1)] = (lineno[lnum] + len(line))\n    string = ''.join(content)\n    text = convert(string, lineno)\n    with open(filename, 'w', encoding=encoding) as file:\n        file.write(text)", "docstring": "Wrapper works for conversion.\n\nArgs:\n- filename -- str, file to be converted", "source": "codesearchnet"}
{"code": "def add_arguments(self, parser):\n    group = parser.add_mutually_exclusive_group(required=True)\n    group.add_argument('-l', '--list', nargs='?', type=str.lower, default='_', choices=['usb', 'ip'], help='list all the connected emulators')\n    group.add_argument('-s', '--supported', nargs=1, help='query whether a device is supported')\n    group.add_argument('-t', '--test', action='store_true', help='perform a self-test')\n    return None", "docstring": "Adds the arguments for the emulator command.\n\nArgs:\nself (EmulatorCommand): the ``EmulatorCommand`` instance\nparser (argparse.ArgumentParser): parser to add the commands to\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def list_documents(project_id, knowledge_base_id):\n    import dialogflow_v2beta1 as dialogflow\n    client = dialogflow.DocumentsClient()\n    knowledge_base_path = client.knowledge_base_path(project_id, knowledge_base_id)\n    print('Documents for Knowledge Id: {}'.format(knowledge_base_id))\n    for document in client.list_documents(knowledge_base_path):\n        print(' - Display Name: {}'.format(document.display_name))\n        print(' - Knowledge ID: {}'.format(document.name))\n        print(' - MIME Type: {}'.format(document.mime_type))\n        print(' - Knowledge Types:')\n        for knowledge_type in document.knowledge_types:\n            print('    - {}'.format(KNOWLEDGE_TYPES[knowledge_type]))\n        print(' - Source: {}\\n'.format(document.content_uri))", "docstring": "Lists the Documents belonging to a Knowledge base.\n\nArgs:\nproject_id: The GCP project linked with the agent.\nknowledge_base_id: Id of the Knowledge base.", "source": "codesearchnet"}
{"code": "def convert_upsample_bilinear(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting upsample...')\n\n    if names == 'short':\n        tf_name = 'UPSL' + random_string(4)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    output_size = params['output_size']\n    align_corners = params['align_corners'] > 0\n\n    def target_layer(x, size=output_size, align_corners=align_corners):\n        import tensorflow as tf\n        x = tf.transpose(x, [0, 2, 3, 1])\n        x = tf.image.resize_images(x, size, align_corners=align_corners)\n        x = tf.transpose(x, [0, 3, 1, 2])\n        return x\n\n    lambda_layer = keras.layers.Lambda(target_layer)\n    layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert upsample_bilinear2d layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def repsep(parser: Union[(Parser, Sequence[Input])], separator: Union[(Parser, Sequence[Input])]) -> RepeatedSeparatedParser:\n    if isinstance(parser, str):\n        parser = lit(parser)\n    if isinstance(separator, str):\n        separator = lit(separator)\n    return RepeatedSeparatedParser(parser, separator)", "docstring": "Match a parser zero or more times separated by another parser.\n\nThis matches repeated sequences of ``parser`` separated by ``separator``. A\nlist is returned containing the value from each match of ``parser``. The\nvalues from ``separator`` are discarded. If there are no matches, an empty\nlist is returned.\n\nArgs:\nparser: Parser or literal\nseparator: Parser or literal", "source": "codesearchnet"}
{"code": "def encode(self, object_):\n        \n        if self.enforce_reversible:\n            self.enforce_reversible = False\n            if self.decode(self.encode(object_)) != object_:\n                raise ValueError('Encoding is not reversible for \"%s\"' % object_)\n            self.enforce_reversible = True\n\n        return object_", "docstring": "Encodes an object.\n\nArgs:\nobject_ (object): Object to encode.\n\nReturns:\nobject: Encoding of the object.", "source": "juraj-google-style"}
{"code": "def get(self, name: str) -> Optional[ListEntry]:\n        \n        parts = name.split(self._delimiter)\n        try:\n            node = self._find(self._root, *parts)\n        except KeyError:\n            return None\n        else:\n            marked = self._marked.get(name)\n            return ListEntry(name, node.exists, marked, bool(node.children))", "docstring": "Return the named entry in the list tree.\n\nArgs:\nname: The entry name.", "source": "juraj-google-style"}
{"code": "def notebook_content(model, notebook_comms_target=None, theme=FromCurdoc):\n    if (not isinstance(model, Model)):\n        raise ValueError('notebook_content expects a single Model instance')\n    with OutputDocumentFor([model], apply_theme=theme, always_new=True) as new_doc:\n        (docs_json, [render_item]) = standalone_docs_json_and_render_items([model])\n    div = div_for_render_item(render_item)\n    render_item = render_item.to_json()\n    if notebook_comms_target:\n        render_item['notebook_comms_target'] = notebook_comms_target\n    script = DOC_NB_JS.render(docs_json=serialize_json(docs_json), render_items=serialize_json([render_item]))\n    return (encode_utf8(script), encode_utf8(div), new_doc)", "docstring": "Return script and div that will display a Bokeh plot in a Jupyter\nNotebook.\n\nThe data for the plot is stored directly in the returned HTML.\n\nArgs:\nmodel (Model) : Bokeh object to render\n\nnotebook_comms_target (str, optional) :\nA target name for a Jupyter Comms object that can update\nthe document that is rendered to this notebook div\n\ntheme (Theme, optional) :\nDefaults to the ``Theme`` instance in the current document.\nSetting this to ``None`` uses the default theme or the theme\nalready specified in the document. Any other value must be an\ninstance of the ``Theme`` class.\n\nReturns:\nscript, div, Document\n\n.. note::\nAssumes :func:`~bokeh.io.notebook.load_notebook` or the equivalent\nhas already been executed.", "source": "codesearchnet"}
{"code": "def is44(msg):\n    if allzeros(msg):\n        return False\n    d = hex2bin(data(msg))\n    if wrongstatus(d, 5, 6, 23):\n        return False\n    if wrongstatus(d, 35, 36, 46):\n        return False\n    if wrongstatus(d, 47, 48, 49):\n        return False\n    if wrongstatus(d, 50, 51, 56):\n        return False\n    if (bin2int(d[0:4]) > 4):\n        return False\n    vw = wind44(msg)\n    if ((vw is not None) and (vw[0] > 250)):\n        return False\n    (temp, temp2) = temp44(msg)\n    if ((min(temp, temp2) > 60) or (max(temp, temp2) < (- 80))):\n        return False\n    return True", "docstring": "Check if a message is likely to be BDS code 4,4.\n\nMeteorological routine air report\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nbool: True or False", "source": "codesearchnet"}
{"code": "def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):\n    line = clean_lines.elided[linenum]\n    match = Search(pattern, line)\n    if (not match):\n        return False\n    context = line[0:(match.start(1) - 1)]\n    if Match('.*\\\\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\\\\s*$', context):\n        return False\n    if (linenum > 0):\n        for i in xrange((linenum - 1), max(0, (linenum - 5)), (- 1)):\n            context = (clean_lines.elided[i] + context)\n    if Match('.*\\\\b[_A-Z][_A-Z0-9]*\\\\s*\\\\((?:\\\\([^()]*\\\\)|[^()])*$', context):\n        return False\n    if (context.endswith(' operator++') or context.endswith(' operator--')):\n        return False\n    remainder = line[match.end(0):]\n    if Match('^\\\\s*(?:;|const\\\\b|throw\\\\b|final\\\\b|override\\\\b|[=>{),]|->)', remainder):\n        if Match('^\\\\s*>', remainder):\n            return False\n        matched_zero = Match('^\\\\s=\\\\s*(\\\\S+)\\\\s*;', remainder)\n        if (matched_zero and (matched_zero.group(1) != '0')):\n            return False\n        if Match('.*\\\\)\\\\s*$', line[0:match.start(0)]):\n            return False\n        raw_line = clean_lines.raw_lines[linenum]\n        if ('/*' in raw_line):\n            return False\n        error(filename, linenum, 'readability/function', 3, 'All parameters should be named in a function')\n        return True\n    error(filename, linenum, 'readability/casting', 4, ('Using C-style cast.  Use %s<%s>(...) instead' % (cast_type, match.group(1))))\n    return True", "docstring": "Checks for a C-style cast by looking for the pattern.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\ncast_type: The string for the C++ cast to recommend.  This is either\nreinterpret_cast, static_cast, or const_cast, depending.\npattern: The regular expression used to find C-style casts.\nerror: The function to call with any errors found.\n\nReturns:\nTrue if an error was emitted.\nFalse otherwise.", "source": "codesearchnet"}
{"code": "def compute_serialized_parameters_size(num_parameters: int, dtype: ParameterFormat) -> int:\n    return num_parameters * dtype.size", "docstring": "Compute the size taken by all the parameters in the given the storage format when serializing the model\n\nArgs:\nnum_parameters: Number of parameters to be saved\ndtype: The data format each parameter will be saved\n\nReturns:\nSize (in byte) taken to save all the parameters", "source": "github-repos"}
{"code": "def _compute_static_batch_dim(self):\n    new_batch_dim = tensor_util.constant_value(self._batch_sizes)\n    if new_batch_dim is None:\n        return None\n    if isinstance(new_batch_dim, np.ndarray):\n        if len(new_batch_dim.shape) == 1:\n            if np.all(new_batch_dim == new_batch_dim[0]):\n                new_batch_dim = new_batch_dim[0]\n            else:\n                return None\n        elif len(new_batch_dim.shape) > 1:\n            raise ValueError(f'Invalid `batch_sizes`. Expected `batch_sizes` to be a scalar or a vector. Received `batch_sizes` of rank {len(new_batch_dim.shape)}.')\n    if self._may_form_partial_batches(new_batch_dim):\n        return None\n    return new_batch_dim", "docstring": "Computes the static batch dimension of a dataset if it can be determined.\n\nGiven the RebatchDataset parameters, determines the batch dimension of this\ndataset statically. Returns None if this cannot be determined or is\nvariable.\n\nReturns:\nAn integer representing the batch dimension of the dataset. If it cannot\nbe determined statically, returns None.\n\nRaises:\nValueError: The batch_sizes parameter is malformed, input_dataset is\nnot batched, or input_dataset batch sizes are incompatible with each\nother.", "source": "github-repos"}
{"code": "def from_series(self, series, add_index_column=True):\n        \n\n        if series.name:\n            self.headers = [series.name]\n        else:\n            self.headers = [\"value\"]\n\n        self.type_hints = [self.__get_typehint_from_dtype(series.dtype)]\n\n        if add_index_column:\n            self.headers = [\"\"] + self.headers\n            if self.type_hints:\n                self.type_hints = [None] + self.type_hints\n            self.value_matrix = [\n                [index] + [value] for index, value in zip(series.index.tolist(), series.tolist())\n            ]\n        else:\n            self.value_matrix = [[value] for value in series.tolist()]", "docstring": "Set tabular attributes to the writer from :py:class:`pandas.Series`.\nFollowing attributes are set by the method:\n\n- :py:attr:`~.headers`\n- :py:attr:`~.value_matrix`\n- :py:attr:`~.type_hints`\n\nArgs:\nseries(pandas.Series):\nInput pandas.Series object.\nadd_index_column(bool, optional):\nIf |True|, add a column of ``index`` of the ``series``.\nDefaults to |True|.", "source": "juraj-google-style"}
{"code": "def _cache_form_details(self, form):\n        \n        cache = FormCache()\n        form['model']['form_key'] = cache.form_id\n        form['model']['form_name'] = self.__class__.__name__\n        cache.set(\n            {\n                'model': list(form['model'].keys()),  \n                'non_data_fields': self.non_data_fields\n            }\n        )", "docstring": "Caches some form details to lates process and validate incoming (response) form data\n\nArgs:\nform: form dict", "source": "juraj-google-style"}
{"code": "def __init__(self, fut, file_obj, tid=None):\n        \n        super().__init__()\n        self._tid = tid\n        if isinstance(file_obj, str):\n            self.file_obj = File(file_obj)\n        elif isinstance(file_obj, File):\n            self.file_obj = file_obj\n        else:\n            raise ValueError(\"DataFuture must be initialized with a str or File\")\n        self.parent = fut\n        self._exception = None\n\n        if fut is None:\n            logger.debug(\"Setting result to filepath since no future was passed\")\n            self.set_result(self.file_obj)\n\n        else:\n            if isinstance(fut, Future):\n                self.parent.add_done_callback(self.parent_callback)\n            else:\n                raise NotFutureError(\"DataFuture can be created only with a FunctionFuture on None\")\n\n        logger.debug(\"Creating DataFuture with parent: %s\", self.parent)\n        logger.debug(\"Filepath: %s\", self.filepath)", "docstring": "Construct the DataFuture object.\n\nIf the file_obj is a string convert to a File.\n\nArgs:\n- fut (AppFuture) : AppFuture that this DataFuture will track\n- file_obj (string/File obj) : Something representing file(s)\n\nKwargs:\n- tid (task_id) : Task id that this DataFuture tracks", "source": "juraj-google-style"}
{"code": "def _reset_build_compile_trackers(model):\n    model.built = False\n    model.inputs = None\n    model.outputs = None\n    model._is_compiled = False\n    if not ops.executing_eagerly_outside_functions():\n        model._v1_compile_was_called = False\n    model.optimizer = None", "docstring": "Reset state trackers for model.\n\nNote that we do not actually zero out attributes such as optimizer,\nbut instead rely on the expectation that all of the attrs will be\nover-written on calling build/compile/etc. This is somewhat fragile,\ninsofar as we check elsewhere for the presence of these attributes as\nevidence of having been built/compiled/etc. Pending a better way to do this,\nwe reset key attributes here to allow building and compiling.\n\nArgs:\nmodel: the model that is being reset", "source": "github-repos"}
{"code": "def substring_evaluator(self, index):\n    \n    condition_name = self.condition_data[index][0]\n    condition_value = self.condition_data[index][1]\n    user_value = self.attributes.get(condition_name)\n\n    if not isinstance(condition_value, string_types):\n      self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(\n        self._get_condition_json(index),\n      ))\n      return None\n\n    if not isinstance(user_value, string_types):\n      self.logger.warning(audience_logs.UNEXPECTED_TYPE.format(\n          self._get_condition_json(index),\n          type(user_value),\n          condition_name\n      ))\n      return None\n\n    return condition_value in user_value", "docstring": "Evaluate the given substring match condition for the given user attributes.\n\nArgs:\nindex: Index of the condition to be evaluated.\n\nReturns:\nBoolean:\n- True if the condition value is a substring of the user attribute value.\n- False if the condition value is not a substring of the user attribute value.\nNone: if the condition value isn't a string or the user attribute value isn't a string.", "source": "juraj-google-style"}
{"code": "def create_unique_autosave_filename(self, filename, autosave_dir):\n    basename = osp.basename(filename)\n    autosave_filename = osp.join(autosave_dir, basename)\n    if (autosave_filename in self.name_mapping.values()):\n        counter = 0\n        (root, ext) = osp.splitext(basename)\n        while (autosave_filename in self.name_mapping.values()):\n            counter += 1\n            autosave_basename = '{}-{}{}'.format(root, counter, ext)\n            autosave_filename = osp.join(autosave_dir, autosave_basename)\n    return autosave_filename", "docstring": "Create unique autosave file name for specified file name.\n\nArgs:\nfilename (str): original file name\nautosave_dir (str): directory in which autosave files are stored", "source": "codesearchnet"}
{"code": "def _start_job(self, request: 'bigquery.BigqueryJobsInsertRequest', stream=None):\n    try:\n        upload = None\n        if stream:\n            upload = Upload.FromStream(stream, mime_type=UNKNOWN_MIME_TYPE)\n        response = self.client.jobs.Insert(request, upload=upload)\n        _LOGGER.info('Started BigQuery job: %s\\n bq show -j --format=prettyjson --project_id=%s %s', response.jobReference, response.jobReference.projectId, response.jobReference.jobId)\n        return response\n    except HttpError as exn:\n        if exn.status_code == 409:\n            jobId = request.job.jobReference.jobId\n            _LOGGER.info('BigQuery job %s already exists, will not retry inserting it: %s', request.job.jobReference, exn)\n            job_location = self._parse_location_from_exc(exn.content, jobId)\n            response = request.job\n            if not response.jobReference.location and job_location:\n                response.jobReference.location = job_location\n            return response\n        else:\n            _LOGGER.info('Failed to insert job %s: %s', request.job.jobReference, exn)\n            raise", "docstring": "Inserts a BigQuery job.\n\nIf the job exists already, it returns it.\n\nArgs:\nrequest (bigquery.BigqueryJobsInsertRequest): An insert job request.\nstream (IO[bytes]): A bytes IO object open for reading.", "source": "github-repos"}
{"code": "def setKstar(self, term_i, Ks):\n    assert (Ks.shape[0] == self.N)\n    self.vd.getTerm(term_i).getKcf().setK0cross(Ks)", "docstring": "Set the kernel for predictions\n\nArgs:\nterm_i:     index of the term we are interested in\nKs:         (TODO: is this the covariance between train and test or the covariance between test points?)", "source": "codesearchnet"}
{"code": "def get_single_item_from_sequence(sequence, condition, ErrorClass=ValueError, no_item_error_message='No item matched condition', too_many_item_error_message='Too many items matched condition', append_sequence_to_error_message=True):\n    filtered_sequence = [item for item in sequence if condition(item)]\n    number_of_items_in_filtered_sequence = len(filtered_sequence)\n    if (number_of_items_in_filtered_sequence == 0):\n        error_message = no_item_error_message\n    elif (number_of_items_in_filtered_sequence > 1):\n        error_message = too_many_item_error_message\n    else:\n        return filtered_sequence[0]\n    if append_sequence_to_error_message:\n        error_message = '{}. Given: {}'.format(error_message, sequence)\n    raise ErrorClass(error_message)", "docstring": "Return an item from a python sequence based on the given condition.\n\nArgs:\nsequence (sequence): The sequence to filter\ncondition: A function that serves to filter items from `sequence`. Function\nmust have one argument (a single item from the sequence) and return a boolean.\nErrorClass (Exception): The error type raised in case the item isn't unique\nno_item_error_message (str): The message raised when no item matched the condtion\ntoo_many_item_error_message (str): The message raised when more than one item matched the condition\nappend_sequence_to_error_message (bool): Show or hide what was the tested sequence in the error message.\nHiding it may prevent sensitive data (such as password) to be exposed to public logs\n\nReturns:\nThe only item in the sequence which matched the condition", "source": "codesearchnet"}
{"code": "def start(component, exact):\n    \n    \n    version_file = conf.get_path('version_file', 'VERSION')\n\n    develop = conf.get('git.devel_branch', 'develop')\n    common.assert_on_branch(develop)\n\n    with conf.within_proj_dir():\n        out = shell.run('git status --porcelain', capture=True).stdout\n        lines = out.split(os.linesep)\n        has_changes = any(\n            not l.startswith('??') for l in lines if l.strip()\n        )\n\n    if has_changes:\n        log.info(\"Cannot release: there are uncommitted changes\")\n        exit(1)\n\n    old_ver, new_ver = versioning.bump(component, exact)\n\n    log.info(\"Bumping package version\")\n    log.info(\"  old version: <35>{}\".format(old_ver))\n    log.info(\"  new version: <35>{}\".format(new_ver))\n\n    with conf.within_proj_dir():\n        branch = 'release/' + new_ver\n\n        common.git_checkout(branch, create=True)\n\n        log.info(\"Creating commit for the release\")\n        shell.run('git add {ver_file} && git commit -m \"{msg}\"'.format(\n            ver_file=version_file,\n            msg=\"Releasing v{}\".format(new_ver)\n        ))", "docstring": "Create a new release branch.\n\nArgs:\ncomponent (str):\nVersion component to bump when creating the release. Can be *major*,\n*minor* or *patch*.\nexact (str):\nThe exact version to set for the release. Overrides the component\nargument. This allows to re-release a version if something went\nwrong with the release upload.", "source": "juraj-google-style"}
{"code": "def _TSKFileTimeCopyToStatTimeTuple(self, tsk_file, time_value):\n    if ((not tsk_file) or (not tsk_file.info) or (not tsk_file.info.meta) or (not tsk_file.info.fs_info)):\n        raise errors.BackEndError('Missing TSK File .info, .info.meta. or .info.fs_info')\n    stat_time = getattr(tsk_file.info.meta, time_value, None)\n    stat_time_nano = None\n    if (self._file_system_type in self._TSK_HAS_NANO_FS_TYPES):\n        time_value_nano = '{0:s}_nano'.format(time_value)\n        stat_time_nano = getattr(tsk_file.info.meta, time_value_nano, None)\n    if ((stat_time_nano is not None) and (pytsk3.TSK_VERSION_NUM >= 67240191)):\n        stat_time_nano /= 100\n    return (stat_time, stat_time_nano)", "docstring": "Copies a SleuthKit file object time value to a stat timestamp tuple.\n\nArgs:\ntsk_file (pytsk3.File): TSK file.\ntime_value (str): name of the time value.\n\nReturns:\ntuple[int, int]: number of seconds since 1970-01-01 00:00:00 and fraction\nof second in 100 nano seconds intervals. The number of seconds is None\non error, or if the file system does not include the requested\ntimestamp. The fraction of second is None on error, or if the file\nsystem does not support sub-second precision.\n\nRaises:\nBackEndError: if the TSK File .info, .info.meta or info.fs_info\nattribute is missing.", "source": "codesearchnet"}
{"code": "def has_entities(status):\n    \n    try:\n        if sum(len(v) for v in status.entities.values()) > 0:\n            return True\n\n    except AttributeError:\n        if sum(len(v) for v in status['entities'].values()) > 0:\n            return True\n\n    return False", "docstring": "Returns true if a Status object has entities.\n\nArgs:\nstatus: either a tweepy.Status object or a dict returned from Twitter API", "source": "juraj-google-style"}
{"code": "def update_paths_and_config(self, config, pkg_dir_name, pkg_cache_dir=None):\n    if (pkg_cache_dir is None):\n        pkg_cache_dir = self.package_cache_dir\n    cached_dir_path = os.path.join(pkg_cache_dir, pkg_dir_name)\n    if config.get('paths'):\n        for path in config['paths']:\n            path_to_append = os.path.join(cached_dir_path, path)\n            logger.debug('Appending \"%s\" to python sys.path', path_to_append)\n            sys.path.append(path_to_append)\n    else:\n        sys.path.append(cached_dir_path)\n    if config.get('configs'):\n        for config_filename in config['configs']:\n            self.configs_to_merge.append(os.path.join(cached_dir_path, config_filename))", "docstring": "Handle remote source defined sys.paths & configs.\n\nArgs:\nconfig (dict): git config dictionary\npkg_dir_name (string): directory name of the stacker archive\npkg_cache_dir (string): fully qualified path to stacker cache\ncache directory", "source": "codesearchnet"}
{"code": "def load_file(file_path, credentials=None):\n    if file_path.startswith('gs:\n        return _load_file_from_gcs(file_path, credentials)\n    else:\n        return open(file_path, 'r')", "docstring": "Load a file from either local or gcs.\n\nArgs:\nfile_path: The target file path, which should have the prefix 'gs://' if\nto be loaded from gcs.\ncredentials: Optional credential to be used to load the file from gcs.\n\nReturns:\nA python File object if loading file from local or a StringIO object if\nloading from gcs.", "source": "codesearchnet"}
{"code": "def init_from_class_batches(self, class_batches, num_shards=None):\n    \n    shards_for_submissions = {}\n    shard_idx = 0\n    for idx, (batch_id, batch_val) in enumerate(iteritems(class_batches)):\n      work_id = DEFENSE_WORK_ID_PATTERN.format(idx)\n      submission_id = batch_val['submission_id']\n      shard_id = None\n      if num_shards:\n        shard_id = shards_for_submissions.get(submission_id)\n        if shard_id is None:\n          shard_id = shard_idx % num_shards\n          shards_for_submissions[submission_id] = shard_id\n          shard_idx += 1\n      \n      \n      self.work[work_id] = {\n          'claimed_worker_id': None,\n          'claimed_worker_start_time': None,\n          'is_completed': False,\n          'error': None,\n          'elapsed_time': None,\n          'submission_id': submission_id,\n          'shard_id': shard_id,\n          'output_classification_batch_id': batch_id,\n      }", "docstring": "Initializes work pieces from classification batches.\n\nArgs:\nclass_batches: dict with classification batches, could be obtained\nas ClassificationBatches.data\nnum_shards: number of shards to split data into,\nif None then no sharding is done.", "source": "juraj-google-style"}
{"code": "def ColumnTypeParser(description):\n    if (not description):\n        raise DataTableException('Description error: empty description given')\n    if (not isinstance(description, (six.string_types, tuple))):\n        raise DataTableException(('Description error: expected either string or tuple, got %s.' % type(description)))\n    if isinstance(description, six.string_types):\n        description = (description,)\n    for elem in description[:3]:\n        if (not isinstance(elem, six.string_types)):\n            raise DataTableException(('Description error: expected tuple of strings, current element of type %s.' % type(elem)))\n    desc_dict = {'id': description[0], 'label': description[0], 'type': 'string', 'custom_properties': {}}\n    if (len(description) > 1):\n        desc_dict['type'] = description[1].lower()\n        if (len(description) > 2):\n            desc_dict['label'] = description[2]\n            if (len(description) > 3):\n                if (not isinstance(description[3], dict)):\n                    raise DataTableException(('Description error: expected custom properties of type dict, current element of type %s.' % type(description[3])))\n                desc_dict['custom_properties'] = description[3]\n                if (len(description) > 4):\n                    raise DataTableException('Description error: tuple of length > 4')\n    if (desc_dict['type'] not in ['string', 'number', 'boolean', 'date', 'datetime', 'timeofday']):\n        raise DataTableException((\"Description error: unsupported type '%s'\" % desc_dict['type']))\n    return desc_dict", "docstring": "Parses a single column description. Internal helper method.\n\nArgs:\ndescription: a column description in the possible formats:\n'id'\n('id',)\n('id', 'type')\n('id', 'type', 'label')\n('id', 'type', 'label', {'custom_prop1': 'custom_val1'})\nReturns:\nDictionary with the following keys: id, label, type, and\ncustom_properties where:\n- If label not given, it equals the id.\n- If type not given, string is used by default.\n- If custom properties are not given, an empty dictionary is used by\ndefault.\n\nRaises:\nDataTableException: The column description did not match the RE, or\nunsupported type was passed.", "source": "codesearchnet"}
{"code": "def run(self, row, **kwargs):\n        \n        self.source = row\n        kwargs['output'] = self.__graph__()\n        super(CSVRowProcessor, self).run(**kwargs)\n        return kwargs['output']", "docstring": "Methods takes a row and depending if a dict or list,\nruns RML rules.\n\nArgs:\n-----\nrow(Dict, List): Row from CSV Reader", "source": "juraj-google-style"}
{"code": "def pb(scalars_layout):\n  \n  \n  import tensorflow.compat.v1 as tf\n\n  assert isinstance(scalars_layout, layout_pb2.Layout)\n  tensor = tf.make_tensor_proto(\n      scalars_layout.SerializeToString(), dtype=tf.string)\n  tf_summary_metadata = tf.SummaryMetadata.FromString(\n      metadata.create_summary_metadata().SerializeToString())\n  summary = tf.Summary()\n  summary.value.add(tag=metadata.CONFIG_SUMMARY_TAG,\n                    metadata=tf_summary_metadata,\n                    tensor=tensor)\n  return summary", "docstring": "Creates a summary that contains a layout.\n\nWhen users navigate to the custom scalars dashboard, they will see a layout\nbased on the proto provided to this function.\n\nArgs:\nscalars_layout: The scalars_layout_pb2.Layout proto that specifies the\nlayout.\n\nReturns:\nA summary proto containing the layout.", "source": "juraj-google-style"}
{"code": "def get_func_graphs(op):\n\n    def _get_func_graph_for_branch(name_attr_list, cached_attr_name=None):\n        \n        func_graph = None\n        if cached_attr_name is not None:\n            func_graph = getattr(op, cached_attr_name, None)\n        inputs = op.inputs[1:]\n        if func_graph is None:\n            input_shapes = [t.shape for t in inputs]\n            func_graph = util.get_func_graph(op, input_shapes, name_attr_list.name)\n        for external_t, internal_t in zip(inputs, func_graph.inputs):\n            handle_data_util.copy_handle_data(external_t, internal_t)\n        func_graph.function_captures.reset_captures(inputs, func_graph.inputs)\n        func_graph._forward_cond = op\n        return func_graph\n    if op.type in ['If', 'StatelessIf']:\n        return (_get_func_graph_for_branch(op.get_attr('then_branch'), '_true_graph'), _get_func_graph_for_branch(op.get_attr('else_branch'), '_false_graph'))\n    elif op.type in ['Case', 'StatelessCase']:\n        return [_get_func_graph_for_branch(branch_fn, '_branch_graph_{}'.format(i)) for i, branch_fn in enumerate(op.get_attr('branches'))]\n    else:\n        raise ValueError('Unsupported op type: {}'.format(op.type))", "docstring": "Returns `FuncGraph`s for the input op branches.\n\nArgs:\nop: The If or Case Operation.\n\nReturns:\nA tuple of the `FuncGraph`s of the then_branch and else_branch (all branches\nfor Case).", "source": "github-repos"}
{"code": "def get_filename(self, tag):\n        \n        if tag.find('filename', recursive=False) is not None:\n            return tag.filename.contents[0]\n        elif tag.find('anchorfile', recursive=False) is not None:\n            return tag.anchorfile.contents[0] + '", "docstring": "Extract and return a documentation filename from a tag.\n\nOverride as necessary, though this default implementation probably\ncovers all the cases of interest.\n\nArgs:\ntag: A BeautifulSoup Tag that satisfies match_criterion.\n\nReturns:\nA string that would be appropriate to use as the documentation\nfilename for an entry in a Zeal database.", "source": "juraj-google-style"}
{"code": "def convert_to_rgb(self, video: 'torch.Tensor') -> VideoInput:\n    video = F.grayscale_to_rgb(video)\n    if video.shape[-3] == 3 or not (video[..., 3, :, :] < 255).any():\n        return video\n    alpha = video[..., 3, :, :] / 255.0\n    video = (1 - alpha[..., None, :, :]) * 255 + alpha[..., None, :, :] * video[..., :3, :, :]\n    return video", "docstring": "Converts a video to RGB format.\n\nArgs:\nvideo (`\"torch.Tensor\"`):\nThe video to convert.\n\nReturns:\n`torch.Tensor`: The converted video.", "source": "github-repos"}
{"code": "def __init__(self, regex: str, option_suffix: str):\n        \n        super().__init__(option_suffix)\n        self._regex = self._build_matcher(regex)", "docstring": "Create a new instance.\n\nArgs:\nregex:\nThe regular expression describing the entry line to match. The\nfirst matching line is selected. The expression must contain a\nsingle capture group that contains the data to return.\noption_suffix:\nSuffix for each configuration option", "source": "juraj-google-style"}
{"code": "def build_grab_exception(ex, curl):\n    if (ex.args[0] == 23):\n        if (getattr(curl, 'grab_callback_interrupted', None) is True):\n            return None\n        else:\n            return error.GrabNetworkError(ex.args[1], ex)\n    elif (ex.args[0] == 28):\n        return error.GrabTimeoutError(ex.args[1], ex)\n    elif (ex.args[0] == 7):\n        return error.GrabConnectionError(ex.args[1], ex)\n    elif (ex.args[0] == 67):\n        return error.GrabAuthError(ex.args[1], ex)\n    elif (ex.args[0] == 47):\n        return error.GrabTooManyRedirectsError(ex.args[1], ex)\n    elif (ex.args[0] == 6):\n        return error.GrabCouldNotResolveHostError(ex.args[1], ex)\n    elif (ex.args[0] == 3):\n        return error.GrabInvalidUrl(ex.args[1], ex)\n    else:\n        return error.GrabNetworkError(ex.args[1], ex)", "docstring": "Build Grab exception from the pycurl exception\n\nArgs:\nex - the original pycurl exception\ncurl - the Curl instance raised the exception", "source": "codesearchnet"}
{"code": "def search(self,limit,start_date=None,end_date=None,clipper=None):\n        \n\n        search_string = self._query_builder(start_date,\n                                            end_date,\n                                            clipper\n                                            )\n\n        \n        \n        \n        \n        \n        \n        try:\n            r = requests.get('%s?%s&&maxRecords=%s' % (self.api_url,\n                                                    search_string,\n                                                    limit)) \n            r.raise_for_status()\n        except requests.HTTPError, e:\n            exit (\"site is not available\")\n            \n        r_dict = json.loads(r.text)\n        \n        result={}\n        \n        if  (r_dict['features'] == 0):\n            result['status'] = u'error'\n            result['message'] = \"error while loading datas\"\n\n        else:\n            result['status'] = u'SUCCESS'\n            result['total'] = len(r_dict['features'])\n            result['limit'] = limit\n            result['ID']=[i['id'] for i in r_dict['features']]\n            result['downloads']=[{\"download\" : i['properties']['services']['download']['url'],\n                                 \"id\" : i['id']}\n                                  for i in r_dict['features']]\n            result['results'] = {\n                                \"features\": [{\n                                  'properties':{'sceneID': i['id'],\n                                  'sat_type': i['properties']['platform'],\n                                  'thumbnail': i['properties']['thumbnail'],\n                                  'date': i['properties']['completionDate'],\n                                  'download': i['properties']['services']['download']['url']}\n                                  ,\n                                  'geometry': i['geometry'],\n                                  \"type\": \"Feature\"}\n                                 for i in r_dict['features']],\n                                 \"type\": \"FeatureCollection\"\n                                 }\n\n\n        \n        return result", "docstring": "The main method of Search class. It searches tTheia Landsat API\nReturns python dictionary\n\nArguments:\nstart_date -- date string. format: YYYY-MM-DD\nend_date -- date string. format: YYYY-MM-DD\nlimit -- integer specigying the maximum results return.\nclipper -- clipper object : clipper.bbox / clipper.town", "source": "juraj-google-style"}
{"code": "def check_supported_model_or_raise(model: Union['PreTrainedModel', 'TFPreTrainedModel'], feature: str='default') -> Tuple[str, Callable]:\n    model_type = model.config.model_type.replace('_', '-')\n    model_name = getattr(model, 'name', '')\n    model_features = FeaturesManager.get_supported_features_for_model_type(model_type, model_name=model_name)\n    if feature not in model_features:\n        raise ValueError(f\"{model.config.model_type} doesn't support feature {feature}. Supported values are: {model_features}\")\n    return (model.config.model_type, FeaturesManager._SUPPORTED_MODEL_TYPE[model_type][feature])", "docstring": "Check whether or not the model has the requested features.\n\nArgs:\nmodel: The model to export.\nfeature: The name of the feature to check if it is available.\n\nReturns:\n(str) The type of the model (OnnxConfig) The OnnxConfig instance holding the model export properties.", "source": "github-repos"}
{"code": "def _analyze_indexed_fields(indexed_fields):\n    result = {}\n    for field_name in indexed_fields:\n        if (not isinstance(field_name, basestring)):\n            raise TypeError(('Field names must be strings; got %r' % (field_name,)))\n        if ('.' not in field_name):\n            if (field_name in result):\n                raise ValueError(('Duplicate field name %s' % field_name))\n            result[field_name] = None\n        else:\n            (head, tail) = field_name.split('.', 1)\n            if (head not in result):\n                result[head] = [tail]\n            elif (result[head] is None):\n                raise ValueError(('Field name %s conflicts with ancestor %s' % (field_name, head)))\n            else:\n                result[head].append(tail)\n    return result", "docstring": "Internal helper to check a list of indexed fields.\n\nArgs:\nindexed_fields: A list of names, possibly dotted names.\n\n(A dotted name is a string containing names separated by dots,\ne.g. 'foo.bar.baz'.  An undotted name is a string containing no\ndots, e.g. 'foo'.)\n\nReturns:\nA dict whose keys are undotted names.  For each undotted name in\nthe argument, the dict contains that undotted name as a key with\nNone as a value.  For each dotted name in the argument, the dict\ncontains the first component as a key with a list of remainders as\nvalues.\n\nExample:\nIf the argument is ['foo.bar.baz', 'bar', 'foo.bletch'], the return\nvalue is {'foo': ['bar.baz', 'bletch'], 'bar': None}.\n\nRaises:\nTypeError if an argument is not a string.\nValueError for duplicate arguments and for conflicting arguments\n(when an undotted name also appears as the first component of\na dotted name).", "source": "codesearchnet"}
{"code": "def wait_for_task(self, task, timeout=(- 1)):\n    self.__wait_task_completion(task, timeout)\n    task = self.get(task)\n    logger.debug(('Waiting for task. Percentage complete: ' + str(task.get('computedPercentComplete'))))\n    logger.debug(('Waiting for task. Task state: ' + str(task.get('taskState'))))\n    task_response = self.__get_task_response(task)\n    logger.debug('Task completed')\n    return task_response", "docstring": "Wait for task execution and return associated resource.\n\nArgs:\ntask: task dict\ntimeout: timeout in seconds\n\nReturns:\nAssociated resource when creating or updating; True when deleting.", "source": "codesearchnet"}
{"code": "def get_list(self, obj_class, data, subset):\n        \n        url = obj_class.get_url(data)\n        if obj_class.can_list and obj_class.can_get:\n            if (subset and len(subset) == 1 and subset[0].upper() ==\n                    \"BASIC\") and obj_class is jssobjects.Computer:\n                url += \"/subset/basic\"\n\n            result = self.jss.get(url)\n\n            if obj_class.container:\n                result = result.find(obj_class.container)\n\n            return self._build_jss_object_list(result, obj_class)\n\n        \n\n        elif obj_class.can_get:\n            xmldata = self.jss.get(url)\n            return obj_class(self.jss, xmldata)\n        else:\n            raise JSSMethodNotAllowedError(\n                obj_class.__class__.__name__)", "docstring": "Get a list of objects as JSSObjectList.\n\nArgs:\nobj_class: The JSSObject subclass type to search for.\ndata: None\nsubset: Some objects support a subset for listing; namely\nComputer, with subset=\"basic\".\n\nReturns:\nJSSObjectList", "source": "juraj-google-style"}
{"code": "def get_subscript(self, sub_script_name):\n        \n\n        \n        tree = self.treeWidget()\n\n        items = tree.findItems(sub_script_name, QtCore.Qt.MatchExactly | QtCore.Qt.MatchRecursive)\n\n        if len(items) >= 1:\n            \n            subscript_item = [sub_item for sub_item in items if isinstance(sub_item.value, Script)\n                               and sub_item.parent() is self]\n\n            subscript_item = subscript_item[0]\n        else:\n            raise ValueError('several elements with name ' + sub_script_name)\n\n\n        return subscript_item", "docstring": "finds the item that contains the sub_script with name sub_script_name\nArgs:\nsub_script_name: name of subscript\nReturns: B26QTreeItem in QTreeWidget which is a script", "source": "juraj-google-style"}
{"code": "def reverse_transform_table(self, table, table_meta, missing=None):\n        \n\n        if missing is None:\n            missing = self.missing\n\n        else:\n            self.missing = missing\n            warnings.warn(\n                DEPRECATION_MESSAGE.format('reverse_transform_table'), DeprecationWarning)\n\n        result = pd.DataFrame(index=table.index)\n        table_name = table_meta['name']\n\n        for field in table_meta['fields']:\n            new_column = self._reverse_transform_column(table, field, table_name)\n            if new_column is not None:\n                result[field['name']] = new_column\n\n        return result", "docstring": "Transform a `table` back to its original format.\n\nArgs:\ntable(pandas.DataFrame):     Contents of the table to be transformed.\n\ntable_meta(dict):   Metadata for the given table.\n\nmissing(bool):      Wheter or not use NullTransformer to handle missing values.\n\nReturns:\npandas.DataFrame: Table in original format.", "source": "juraj-google-style"}
{"code": "def cudnn_gru(units, n_hidden, n_layers=1, trainable_initial_states=False, seq_lengths=None, input_initial_h=None, name='cudnn_gru', reuse=False):\n    with tf.variable_scope(name, reuse=reuse):\n        gru = tf.contrib.cudnn_rnn.CudnnGRU(num_layers=n_layers, num_units=n_hidden)\n        if trainable_initial_states:\n            init_h = tf.get_variable('init_h', [n_layers, 1, n_hidden])\n            init_h = tf.tile(init_h, (1, tf.shape(units)[0], 1))\n        else:\n            init_h = tf.zeros([n_layers, tf.shape(units)[0], n_hidden])\n        initial_h = (input_initial_h or init_h)\n        (h, h_last) = gru(tf.transpose(units, (1, 0, 2)), (initial_h,))\n        h = tf.transpose(h, (1, 0, 2))\n        h_last = tf.squeeze(h_last, axis=0)[(- 1)]\n        if (seq_lengths is not None):\n            indices = tf.stack([tf.range(tf.shape(h)[0]), (seq_lengths - 1)], axis=1)\n            h_last = tf.gather_nd(h, indices)\n        return (h, h_last)", "docstring": "Fast CuDNN GRU implementation\n\nArgs:\nunits: tf.Tensor with dimensions [B x T x F], where\nB - batch size\nT - number of tokens\nF - features\n\nn_hidden: dimensionality of hidden state\ntrainable_initial_states: whether to create a special trainable variable\nto initialize the hidden states of the network or use just zeros\nseq_lengths: tensor of sequence lengths with dimension [B]\nn_layers: number of layers\ninput_initial_h: initial hidden state, tensor\nname: name of the variable scope to use\nreuse:whether to reuse already initialized variable\n\nReturns:\nh - all hidden states along T dimension,\ntf.Tensor with dimensionality [B x T x F]\nh_last - last hidden state, tf.Tensor with dimensionality [B x H]", "source": "codesearchnet"}
{"code": "def _PrintWarningCounters(self, storage_counters):\n    \n    warnings_by_pathspec = storage_counters.get('warnings_by_path_spec', {})\n    warnings_by_parser_chain = storage_counters.get(\n        'warnings_by_parser_chain', {})\n    if not warnings_by_parser_chain:\n      self._output_writer.Write('No warnings stored.\\n\\n')\n      return\n\n    table_view = views.ViewsFactory.GetTableView(\n        self._views_format_type, title='Warnings generated per parser',\n        column_names=['Parser (plugin) name', 'Number of warnings'])\n    for parser_chain, count in warnings_by_parser_chain.items():\n      parser_chain = parser_chain or '<No parser>'\n      table_view.AddRow([parser_chain, '{0:d}'.format(count)])\n    table_view.Write(self._output_writer)\n\n    table_view = views.ViewsFactory.GetTableView(\n        self._views_format_type, title='Pathspecs with most warnings',\n        column_names=['Number of warnings', 'Pathspec'])\n\n    top_pathspecs = warnings_by_pathspec.most_common(10)\n    for pathspec, count in top_pathspecs:\n      for path_index, line in enumerate(pathspec.split('\\n')):\n        if not line:\n          continue\n\n        if path_index == 0:\n          table_view.AddRow(['{0:d}'.format(count), line])\n        else:\n          table_view.AddRow(['', line])\n\n    table_view.Write(self._output_writer)", "docstring": "Prints a summary of the warnings.\n\nArgs:\nstorage_counters (dict): storage counters.", "source": "juraj-google-style"}
{"code": "def auto_repr(obj: Any, with_addr: bool = False,\n              sort_attrs: bool = True, joiner: str = COMMA_SPACE) -> str:\n    \n    if sort_attrs:\n        keys = sorted(obj.__dict__.keys())\n    else:\n        keys = obj.__dict__.keys()\n    elements = [\"{}={}\".format(k, repr(getattr(obj, k))) for k in keys]\n    return repr_result(obj, elements, with_addr=with_addr, joiner=joiner)", "docstring": "Convenience function for :func:`__repr__`.\nWorks its way through the object's ``__dict__`` and reports accordingly.\n\nArgs:\nobj: object to display\nwith_addr: include the memory address of ``obj``\nsort_attrs: sort the attributes into alphabetical order?\njoiner: string with which to join the elements\n\nReturns:\nstring: :func:`repr`-style representation", "source": "juraj-google-style"}
{"code": "def pull_datapackage(descriptor, name, backend, **backend_options):\n    warnings.warn('Functions \"push/pull_datapackage\" are deprecated. Please use \"Package\" class', UserWarning)\n    datapackage_name = name\n    plugin = import_module(('jsontableschema.plugins.%s' % backend))\n    storage = plugin.Storage(**backend_options)\n    resources = []\n    for table in storage.buckets:\n        schema = storage.describe(table)\n        base = os.path.dirname(descriptor)\n        (path, name) = _restore_path(table)\n        fullpath = os.path.join(base, path)\n        helpers.ensure_dir(fullpath)\n        with io.open(fullpath, 'wb') as file:\n            model = Schema(deepcopy(schema))\n            data = storage.iter(table)\n            writer = csv.writer(file, encoding='utf-8')\n            writer.writerow(model.headers)\n            for row in data:\n                writer.writerow(row)\n        resource = {'schema': schema, 'path': path}\n        if (name is not None):\n            resource['name'] = name\n        resources.append(resource)\n    mode = 'w'\n    encoding = 'utf-8'\n    if six.PY2:\n        mode = 'wb'\n        encoding = None\n    resources = _restore_resources(resources)\n    helpers.ensure_dir(descriptor)\n    with io.open(descriptor, mode=mode, encoding=encoding) as file:\n        descriptor = {'name': datapackage_name, 'resources': resources}\n        json.dump(descriptor, file, indent=4)\n    return storage", "docstring": "Pull Data Package from storage.\n\nAll parameters should be used as keyword arguments.\n\nArgs:\ndescriptor (str): path where to store descriptor\nname (str): name of the pulled datapackage\nbackend (str): backend name like `sql` or `bigquery`\nbackend_options (dict): backend options mentioned in backend docs", "source": "codesearchnet"}
{"code": "def update_location_centroid(point, cluster, max_distance, min_samples):\n    \n    cluster.append(point)\n    points = [p.gen2arr() for p in cluster]\n\n    \n    eps = estimate_meters_to_deg(max_distance, precision=6)\n\n    p_cluster = DBSCAN(eps=eps, min_samples=min_samples)\n    p_cluster.fit(points)\n\n    clusters = {}\n    for i, label in enumerate(p_cluster.labels_):\n        if label in clusters.keys():\n            clusters[label].append(points[i])\n        else:\n            clusters[label] = [points[i]]\n\n    centroids = []\n    biggest_centroid_l = -float(\"inf\")\n    biggest_centroid = None\n\n    for label, n_cluster in clusters.items():\n        centroid = compute_centroid(n_cluster)\n        centroids.append(centroid)\n\n        if label >= 0 and len(n_cluster) >= biggest_centroid_l:\n            biggest_centroid_l = len(n_cluster)\n            biggest_centroid = centroid\n\n    if biggest_centroid is None:\n        biggest_centroid = compute_centroid(points)\n\n    return biggest_centroid, cluster", "docstring": "Updates the centroid of a location cluster with another point\n\nArgs:\npoint (:obj:`Point`): Point to add to the cluster\ncluster (:obj:`list` of :obj:`Point`): Location cluster\nmax_distance (float): Max neighbour distance\nmin_samples (int): Minimum number of samples\nReturns:\n(:obj:`Point`, :obj:`list` of :obj:`Point`): Tuple with the location centroid\nand new point cluster (given cluster + given point)", "source": "juraj-google-style"}
{"code": "def _FormatDateTime(self, event):\n    \n    if not event.timestamp:\n      return 'N/A'\n\n    \n    \n    date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n        timestamp=event.timestamp)\n\n    year, month, day_of_month = date_time.GetDate()\n    hours, minutes, seconds = date_time.GetTimeOfDay()\n\n    try:\n      return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'.format(\n          year, month, day_of_month, hours, minutes, seconds)\n    except (TypeError, ValueError):\n      self._ReportEventError(event, (\n          'unable to copy timestamp: {0!s} to a human readable date and '\n          'time. Defaulting to: \"0000-00-00 00:00:00\"').format(event.timestamp))\n\n      return '0000-00-00 00:00:00'", "docstring": "Formats the date and time.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: date and time string or \"N/A\" if no event timestamp is available.", "source": "juraj-google-style"}
{"code": "def on_message(self, event):\n        \n\n        metadata = self._parse_metadata(event)\n        message = Message(text=metadata['text'],\n                          metadata=metadata).__dict__\n        if message.get('text'):\n            message['text'] = self.find_and_replace_userids(message['text'])\n            message['text'] = self.find_and_replace_channel_refs(\n                message['text']\n            )\n        return message", "docstring": "Runs when a message event is received\n\nArgs:\nevent: RTM API event.\n\nReturns:\nLegobot.messge", "source": "juraj-google-style"}
{"code": "def write_rtt(jlink):\n    \n    try:\n        while jlink.connected():\n            bytes = list(bytearray(input(), \"utf-8\") + b\"\\x0A\\x00\")\n            bytes_written = jlink.rtt_write(0, bytes)\n    except Exception:\n        print(\"IO write thread exception, exiting...\")\n        thread.interrupt_main()\n        raise", "docstring": "Writes kayboard input to JLink RTT buffer #0.\n\nThis method is a loop that blocks waiting on stdin. When enter is pressed,\nLF and NUL bytes are added to the input and transmitted as a byte list.\nIf the JLink is disconnected, it will exit gracefully. If any other\nexceptions are raised, they will be caught and re-raised after interrupting\nthe main thread.\n\nArgs:\njlink (pylink.JLink): The JLink to write to.\n\nRaises:\nException on error.", "source": "juraj-google-style"}
{"code": "def __get_valid_form_data_elements(self, soup):\n    elements = []\n    for element in soup.find_all(['input', 'button', 'textarea', 'select']):\n        if element.has_attr('name'):\n            elements.append(element)\n    return elements", "docstring": "Get all valid form input elements.\n\nNote:\nAn element is valid when the value can be updated client-side\nand the element has a name attribute.\n\nArgs:\nsoup (obj): The BeautifulSoup form.\n\nReturns:\nlist(obj): Soup elements.", "source": "codesearchnet"}
{"code": "def make_target(url, extra_opts=None):\n    parts = compat.urlparse(url, allow_fragments=False)\n    scheme = parts.scheme.lower()\n    if (scheme in ['ftp', 'ftps']):\n        creds = (parts.username, parts.password)\n        tls = (scheme == 'ftps')\n        from ftpsync import ftp_target\n        target = ftp_target.FtpTarget(parts.path, parts.hostname, parts.port, username=creds[0], password=creds[1], tls=tls, timeout=None, extra_opts=extra_opts)\n    else:\n        target = FsTarget(url, extra_opts)\n    return target", "docstring": "Factory that creates `_Target` objects from URLs.\n\nFTP targets must begin with the scheme ``ftp://`` or ``ftps://`` for TLS.\n\nNote:\nTLS is only supported on Python 2.7/3.2+.\nArgs:\nurl (str):\nextra_opts (dict, optional): Passed to Target constructor. Default: None.\nReturns:\n:class:`_Target`", "source": "codesearchnet"}
{"code": "def undo_last_change(self):\n    if (len(self.history) == 0):\n        raise IndexError(\"Can't undo. Already at oldest change.\")\n    if ('input_structure' not in self.history[(- 1)]):\n        raise IndexError(\"Can't undo. Latest history has no input_structure\")\n    h = self.history.pop()\n    self._undone.append((h, self.final_structure))\n    s = h['input_structure']\n    if isinstance(s, dict):\n        s = Structure.from_dict(s)\n    self.final_structure = s", "docstring": "Undo the last change in the TransformedStructure.\n\nRaises:\nIndexError: If already at the oldest change.", "source": "codesearchnet"}
{"code": "def create_context(self, state_hash, base_contexts, inputs, outputs):\n    for address in inputs:\n        if (not self.namespace_is_valid(address)):\n            raise CreateContextException('Address or namespace {} listed in inputs is not valid'.format(address))\n    for address in outputs:\n        if (not self.namespace_is_valid(address)):\n            raise CreateContextException('Address or namespace {} listed in outputs is not valid'.format(address))\n    addresses_to_find = [add for add in inputs if (len(add) == 70)]\n    (address_values, reads) = self._find_address_values_in_chain(base_contexts=base_contexts, addresses_to_find=addresses_to_find)\n    context = ExecutionContext(state_hash=state_hash, read_list=inputs, write_list=outputs, base_context_ids=base_contexts)\n    contexts_asked_not_found = [cid for cid in base_contexts if (cid not in self._contexts)]\n    if contexts_asked_not_found:\n        raise KeyError('Basing a new context off of context ids {} that are not in context manager'.format(contexts_asked_not_found))\n    context.create_initial(address_values)\n    self._contexts[context.session_id] = context\n    if reads:\n        context.create_prefetch(reads)\n        self._address_queue.put_nowait((context.session_id, state_hash, reads))\n    return context.session_id", "docstring": "Create a ExecutionContext to run a transaction against.\n\nArgs:\nstate_hash: (str): Merkle root to base state on.\nbase_contexts (list of str): Context ids of contexts that will\nhave their state applied to make this context.\ninputs (list of str): Addresses that can be read from.\noutputs (list of str): Addresses that can be written to.\nReturns:\ncontext_id (str): the unique context_id of the session", "source": "codesearchnet"}
{"code": "def subscribe(self, peer_jid):\n        \n        self.roster.subscribe(aioxmpp.JID.fromstr(peer_jid).bare())", "docstring": "Asks for subscription\n\nArgs:\npeer_jid (str): the JID you ask for subscriptiion", "source": "juraj-google-style"}
{"code": "def daylight_saving_start_day(self, value=None):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type str '\n                    'for field `daylight_saving_start_day`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `daylight_saving_start_day`')\n\n        self._daylight_saving_start_day = value", "docstring": "Corresponds to IDD Field `daylight_saving_start_day`\n\nArgs:\nvalue (str): value for IDD Field `daylight_saving_start_day`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def fix(self, value=None):\n        \n\n        if value is None:\n            self._impl.fix()\n        else:\n            self._impl.fix(value)", "docstring": "Fix all instances of this variable to a value if provided or to\ntheir current value otherwise.\n\nArgs:\nvalue: value to be set.", "source": "juraj-google-style"}
{"code": "def build_estimator(tf_transform_output, config, hidden_units=None):\n    transformed_feature_spec = tf_transform_output.transformed_feature_spec().copy()\n    transformed_feature_spec.pop(taxi.transformed_name(taxi.LABEL_KEY))\n    real_valued_columns = [tf.feature_column.numeric_column(key, shape=()) for key in taxi.transformed_names(taxi.DENSE_FLOAT_FEATURE_KEYS)]\n    categorical_columns = [tf.feature_column.categorical_column_with_identity(key, num_buckets=taxi.VOCAB_SIZE + taxi.OOV_SIZE, default_value=0) for key in taxi.transformed_names(taxi.VOCAB_FEATURE_KEYS)]\n    categorical_columns += [tf.feature_column.categorical_column_with_identity(key, num_buckets=taxi.FEATURE_BUCKET_COUNT, default_value=0) for key in taxi.transformed_names(taxi.BUCKET_FEATURE_KEYS)]\n    categorical_columns += [tf.feature_column.categorical_column_with_identity(key, num_buckets=num_buckets, default_value=0) for key, num_buckets in zip(taxi.transformed_names(taxi.CATEGORICAL_FEATURE_KEYS), taxi.MAX_CATEGORICAL_FEATURE_VALUES)]\n    return tf_estimator.DNNLinearCombinedClassifier(config=config, linear_feature_columns=categorical_columns, dnn_feature_columns=real_valued_columns, dnn_hidden_units=hidden_units or [100, 70, 50, 25])", "docstring": "Build an estimator for predicting the tipping behavior of taxi riders.\n\nArgs:\ntf_transform_output: A TFTransformOutput.\nconfig: tf.contrib.learn.RunConfig defining the runtime environment for the\nestimator (including model_dir).\nhidden_units: [int], the layer sizes of the DNN (input layer first)\n\nReturns:\nResulting DNNLinearCombinedClassifier.", "source": "github-repos"}
{"code": "def delete_container_service(access_token, subscription_id, resource_group, service_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourcegroups/', resource_group,\n                        '/providers/Microsoft.ContainerService/ContainerServices/', service_name,\n                        '?api-version=', ACS_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete a named container.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nservice_name (str): Name of container service.\n\nReturns:\nHTTP response.", "source": "juraj-google-style"}
{"code": "async def logs(self, service_id: str, *, details: bool=False, follow: bool=False, stdout: bool=False, stderr: bool=False, since: int=0, timestamps: bool=False, is_tty: bool=False, tail: str='all') -> Union[(str, AsyncIterator[str])]:\n    if ((stdout is False) and (stderr is False)):\n        raise TypeError('Need one of stdout or stderr')\n    params = {'details': details, 'follow': follow, 'stdout': stdout, 'stderr': stderr, 'since': since, 'timestamps': timestamps, 'tail': tail}\n    response = (await self.docker._query('services/{service_id}/logs'.format(service_id=service_id), method='GET', params=params))\n    return (await multiplexed_result(response, follow, is_tty=is_tty))", "docstring": "Retrieve logs of the given service\n\nArgs:\ndetails: show service context and extra details provided to logs\nfollow: return the logs as a stream.\nstdout: return logs from stdout\nstderr: return logs from stderr\nsince: return logs since this time, as a UNIX timestamp\ntimestamps: add timestamps to every log line\nis_tty: the service has a pseudo-TTY allocated\ntail: only return this number of log lines\nfrom the end of the logs, specify as an integer\nor `all` to output all log lines.", "source": "codesearchnet"}
{"code": "def _transform_value_range(self, images, original_range, target_range, dtype='float32'):\n    if original_range[0] == target_range[0] and original_range[1] == target_range[1]:\n        return images\n    images = self.backend.cast(images, dtype=dtype)\n    original_min_value, original_max_value = self._unwrap_value_range(original_range, dtype=dtype)\n    target_min_value, target_max_value = self._unwrap_value_range(target_range, dtype=dtype)\n    images = (images - original_min_value) / (original_max_value - original_min_value)\n    scale_factor = target_max_value - target_min_value\n    return images * scale_factor + target_min_value", "docstring": "Convert input values from `original_range` to `target_range`.\n\nThis function is intended to be used in preprocessing layers that\nrely upon color values. This allows us to assume internally that\nthe input tensor is always in the range `(0, 255)`.\n\nArgs:\nimages: the set of images to transform to the target range.\noriginal_range: the value range to transform from.\ntarget_range: the value range to transform to.\ndtype: the dtype to compute the conversion with,\ndefaults to \"float32\".\n\nReturns:\na new Tensor with values in the target range.\n\nExample:\n\n```python\noriginal_range = [0, 1]\ntarget_range = [0, 255]\nimages = layer.preprocessing.transform_value_range(\nimages,\noriginal_range,\ntarget_range\n)\nimages = ops.minimum(images + 10, 255)\nimages = layer.preprocessing.transform_value_range(\nimages,\ntarget_range,\noriginal_range\n)\n```", "source": "github-repos"}
{"code": "def RemapOperatorType(operator_type):\n    old_to_new = {'PoolOptions': 'Pool2DOptions', 'DepthwiseConvolutionOptions': 'DepthwiseConv2DOptions', 'ConvolutionOptions': 'Conv2DOptions', 'LocalResponseNormOptions': 'LocalResponseNormalizationOptions', 'BasicRNNOptions': 'RNNOptions'}\n    return old_to_new[operator_type] if operator_type in old_to_new else operator_type", "docstring": "Remap operator structs from old names to new names.\n\nArgs:\noperator_type: String representing the builtin operator data type\nstring. (see :schema.fbs).\nRaises:\nValueError: When the model has consistency problems.\nReturns:\nUpgraded builtin operator data type as a string.", "source": "github-repos"}
{"code": "def DownloadCollection(coll_path, target_path, token=None, overwrite=False, dump_client_info=False, flatten=False, max_threads=10):\n    completed_clients = set()\n    coll = _OpenCollectionPath(coll_path)\n    if (coll is None):\n        logging.error('%s is not a valid collection. Typo? Are you sure something was written to it?', coll_path)\n        return\n    thread_pool = threadpool.ThreadPool.Factory('Downloader', max_threads)\n    thread_pool.Start()\n    try:\n        collection_urn = coll.collection_id\n    except AttributeError:\n        collection_urn = coll.urn\n    try:\n        original_client_id = rdf_client.ClientURN(collection_urn.Split()[0])\n    except IOError:\n        original_client_id = None\n    logging.info('Expecting to download %s files', len(coll))\n    for grr_message in coll:\n        source = None\n        if isinstance(grr_message, rdf_flows.GrrMessage):\n            source = grr_message.source\n            grr_message = grr_message.payload\n        if isinstance(grr_message, rdfvalue.RDFURN):\n            urn = grr_message\n        elif isinstance(grr_message, rdf_client_fs.StatEntry):\n            urn = rdfvalue.RDFURN(grr_message.AFF4Path((source or original_client_id)))\n        elif isinstance(grr_message, rdf_file_finder.FileFinderResult):\n            urn = rdfvalue.RDFURN(grr_message.stat_entry.AFF4Path((source or original_client_id)))\n        elif isinstance(grr_message, collectors.ArtifactFilesDownloaderResult):\n            if grr_message.HasField('downloaded_file'):\n                urn = grr_message.downloaded_file.AFF4Path((source or original_client_id))\n            else:\n                continue\n        elif isinstance(grr_message, rdfvalue.RDFBytes):\n            try:\n                os.makedirs(target_path)\n            except OSError:\n                pass\n            try:\n                client_id = source.Split()[0]\n                with open(os.path.join(target_path, client_id), 'wb') as fd:\n                    fd.write(str(grr_message))\n            except AttributeError:\n                pass\n            continue\n        else:\n            continue\n        if dump_client_info:\n            client_id = urn.Split()[0]\n            re_match = aff4_grr.VFSGRRClient.CLIENT_ID_RE.match(client_id)\n            if (re_match and (client_id not in completed_clients)):\n                args = (rdf_client.ClientURN(client_id), target_path, token, overwrite)\n                thread_pool.AddTask(target=DumpClientYaml, args=args, name='ClientYamlDownloader')\n                completed_clients.add(client_id)\n        args = (urn, target_path, token, overwrite)\n        if flatten:\n            target = CopyAndSymlinkAFF4ToLocal\n        else:\n            target = CopyAFF4ToLocal\n        thread_pool.AddTask(target=target, args=args, name='Downloader')\n    thread_pool.Stop(join_timeout=THREADPOOL_JOIN_TIMEOUT)", "docstring": "Iterate through a Collection object downloading all files.\n\nArgs:\ncoll_path: Path to an AFF4 collection.\ntarget_path: Base directory to write to.\ntoken: Token for access.\noverwrite: If True, overwrite existing files.\ndump_client_info: If True, this will detect client paths, and dump a yaml\nversion of the client object to the root path. This is useful for seeing\nthe hostname/users of the machine the client id refers to.\nflatten: If True, produce a \"files\" flat folder with links to all the found\nfiles.\nmax_threads: Use this many threads to do the downloads.", "source": "codesearchnet"}
{"code": "def gather_data(options):\n    (qry_string, param_str) = qry_create(options)\n    qry_results = awsc.get_inst_info(qry_string)\n    i_info = process_results(qry_results)\n    return (i_info, param_str)", "docstring": "Get Data specific for command selected.\n\nCreate ec2 specific query and output title based on\noptions specified, retrieves the raw response data\nfrom aws, then processes it into the i_info dict,\nwhich is used throughout this module.\n\nArgs:\noptions (object): contains args and data from parser,\nthat has been adjusted by the command\nspecific functions as appropriate.\nReturns:\ni_info (dict): information on instances and details.\nparam_str (str): the title to display before the list.", "source": "codesearchnet"}
{"code": "def _collect_data(directory):\n  \n  \n  data_files = []\n  transcripts = [\n      filename for filename in os.listdir(directory)\n      if filename.endswith(\".csv\")\n  ]\n  for transcript in transcripts:\n    transcript_path = os.path.join(directory, transcript)\n    with open(transcript_path, \"r\") as transcript_file:\n      transcript_reader = csv.reader(transcript_file)\n      \n      _ = next(transcript_reader)\n      for transcript_line in transcript_reader:\n        media_name, label = transcript_line[0:2]\n        filename = os.path.join(directory, media_name)\n        data_files.append((media_name, filename, label))\n  return data_files", "docstring": "Traverses directory collecting input and target files.\n\nArgs:\ndirectory: base path to extracted audio and transcripts.\nReturns:\nlist of (media_base, media_filepath, label) tuples", "source": "juraj-google-style"}
{"code": "def create(self, data, **kwargs):\n        \n        self._check_missing_create_attrs(data)\n        server_data = self.gitlab.http_post(self.path, post_data=data,\n                                            **kwargs)\n        source_issue = ProjectIssue(self._parent.manager,\n                                    server_data['source_issue'])\n        target_issue = ProjectIssue(self._parent.manager,\n                                    server_data['target_issue'])\n        return source_issue, target_issue", "docstring": "Create a new object.\n\nArgs:\ndata (dict): parameters to send to the server to create the\nresource\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nReturns:\nRESTObject, RESTObject: The source and target issues\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabCreateError: If the server cannot perform the request", "source": "juraj-google-style"}
{"code": "def fillna(self: EventSetOrNode, value: float=0.0) -> EventSetOrNode:\n    from temporian.core.operators.fillna import fillna\n    return fillna(self, value)", "docstring": "Replaces all the NaN values with `value`.\n\nFeatures that cannot contain NaN values (e.g. integer or bytes features)\nare not impacted.\n\nUsage example:\n```python\n>>> import math\n>>> a = tp.event_set(\n...     timestamps=[0, 1, 3],\n...     features={\n...         \"f1\": [0., 10., math.nan],\n...         \"f2\": [\"a\",\"b\",\"\"]},\n... )\n\n>>> a.fillna()\nindexes: []\nfeatures: [('f1', float64), ('f2', str_)]\nevents:\n(3 events):\ntimestamps: [0. 1. 3.]\n'f1': [ 0. 10.  0.]\n'f2': [b'a' b'b' b'']\n...\n\n```\n\nArgs:\nvalue: Value to replace Nans with?\n\nReturns:\nEventSet without NaNs.", "source": "github-repos"}
{"code": "def merge_resources(resource1, resource2):\n    \n    merged = resource1.copy()\n    merged.update(resource2)\n    return merged", "docstring": "Updates a copy of resource1 with resource2 values and returns the merged dictionary.\n\nArgs:\nresource1: original resource\nresource2: resource to update resource1\n\nReturns:\ndict: merged resource", "source": "juraj-google-style"}
{"code": "def _restore_and_log_checkpoint(self, actor):\n    actor_id = self._worker.actor_id\n    try:\n        checkpoints = ray.actor.get_checkpoints_for_actor(actor_id)\n        if (len(checkpoints) > 0):\n            checkpoint_id = actor.load_checkpoint(actor_id, checkpoints)\n            if (checkpoint_id is not None):\n                msg = ('`load_checkpoint` must return a checkpoint id that ' + 'exists in the `available_checkpoints` list, or eone.')\n                assert any(((checkpoint_id == checkpoint.checkpoint_id) for checkpoint in checkpoints)), msg\n                self._worker.raylet_client.notify_actor_resumed_from_checkpoint(actor_id, checkpoint_id)\n    except Exception:\n        traceback_str = ray.utils.format_error_message(traceback.format_exc())\n        ray.utils.push_error_to_driver(self._worker, ray_constants.CHECKPOINT_PUSH_ERROR, traceback_str, driver_id=self._worker.task_driver_id)", "docstring": "Restore an actor from a checkpoint if available and log any errors.\n\nThis should only be called on workers that have just executed an actor\ncreation task.\n\nArgs:\nactor: The actor to restore from a checkpoint.", "source": "codesearchnet"}
{"code": "def get(self):\n    return self._get_helper(self._sorted_items, self._q)", "docstring": "Returns the current quantile value using the sorted list.\n\nCalculates the quantile using linear interpolation on the sorted values.\n\nReturns:\nfloat: The calculated quantile value. Returns NaN if the window is empty.", "source": "github-repos"}
{"code": "def matches_to_marker_results(df):\n    assert isinstance(df, pd.DataFrame)\n    from collections import defaultdict\n    d = defaultdict(list)\n    for (idx, row) in df.iterrows():\n        marker = row['marker']\n        d[marker].append(row)\n    marker_results = {}\n    for (k, v) in d.items():\n        if (len(v) > 1):\n            logging.debug('Multiple potential cgMLST allele matches (n=%s) found for marker %s. Selecting match on longest contig.', len(v), k)\n            df_marker = pd.DataFrame(v)\n            df_marker.sort_values('slen', ascending=False, inplace=True)\n            for (i, r) in df_marker.iterrows():\n                allele = r['allele_name']\n                slen = r['slen']\n                logging.debug('Selecting allele %s from contig with length %s', allele, slen)\n                seq = r['sseq']\n                if ('-' in seq):\n                    logging.warning('Gaps found in allele. Removing gaps. %s', r)\n                    seq = seq.replace('-', '').upper()\n                    allele = allele_name(seq)\n                marker_results[k] = allele_result_dict(allele, seq, r.to_dict())\n                break\n        elif (len(v) == 1):\n            row = v[0]\n            seq = row['sseq']\n            if ('-' in seq):\n                logging.warning('Gaps found in allele. Removing gaps. %s', row)\n                seq = seq.replace('-', '').upper()\n            allele = allele_name(seq)\n            marker_results[k] = allele_result_dict(allele, seq, row.to_dict())\n        else:\n            err_msg = 'Empty list of matches for marker {}'.format(k)\n            logging.error(err_msg)\n            raise Exception(err_msg)\n    return marker_results", "docstring": "Perfect BLAST matches to marker results dict\n\nParse perfect BLAST matches to marker results dict.\n\n\nArgs:\ndf (pandas.DataFrame): DataFrame of perfect BLAST matches\n\nReturns:\ndict: cgMLST330 marker names to matching allele numbers", "source": "codesearchnet"}
{"code": "def apply_transformations(collection, transformations, select=None):\n    \n    for t in transformations:\n        kwargs = dict(t)\n        func = kwargs.pop('name')\n        cols = kwargs.pop('input', None)\n\n        if isinstance(func, string_types):\n            if func in ('and', 'or'):\n                func += '_'\n            if not hasattr(transform, func):\n                raise ValueError(\"No transformation '%s' found!\" % func)\n            func = getattr(transform, func)\n            func(collection, cols, **kwargs)\n\n    if select is not None:\n        transform.Select(collection, select)\n\n    return collection", "docstring": "Apply all transformations to the variables in the collection.\n\nArgs:\ntransformations (list): List of transformations to apply.\nselect (list): Optional list of names of variables to retain after all\ntransformations are applied.", "source": "juraj-google-style"}
{"code": "def get_el(el):\n    tag_name = el.elt.tagName.lower()\n    if (tag_name in {'input', 'textarea', 'select'}):\n        return el.value\n    else:\n        raise ValueError(('Getter for %s (%s) not implemented!' % (tag_name, el.id)))", "docstring": "Get value of given `el` tag element.\n\nAutomatically choose proper method to set the `value` based on the type\nof the `el`.\n\nArgs:\nel (obj): Element reference to the input you want to convert to\ntypeahead.\n\nReturns:\nstr: Value of the object.", "source": "codesearchnet"}
{"code": "def __init__(self, candidates: typing.Sequence[ValueSpecOrAnnotation], default: typing.Any=MISSING_VALUE, is_noneable: bool=False, frozen: bool=False):\n    if not isinstance(candidates, (tuple, list)) or len(candidates) < 2:\n        raise ValueError(f\"Argument 'candidates' must be a list of at least 2 elements. Encountered {candidates}.\")\n    candidates = [ValueSpec.from_annotation(c, auto_typing=True) for c in candidates]\n    candidates_by_type = {}\n    has_noneable_candidate = False\n    for i, c in enumerate(candidates):\n        if not isinstance(c, ValueSpec):\n            raise ValueError(f\"Items in 'candidates' must be ValueSpec objects.Encountered {c} at {i}.\")\n        if c.is_noneable:\n            has_noneable_candidate = True\n        spec_type = (c.__class__, getattr(c, '_value_type'))\n        if spec_type not in candidates_by_type:\n            candidates_by_type[spec_type] = []\n        candidates_by_type[spec_type].append(c)\n    for spec_type, cs in candidates_by_type.items():\n        if len(cs) > 1:\n            raise ValueError(f'Found {len(cs)} value specs of the same type {spec_type}.')\n    candidate_types = set()\n    no_value_type_check = False\n    for c in candidates:\n        child_value_type = getattr(c, '_value_type')\n        if child_value_type is None:\n            no_value_type_check = True\n        elif isinstance(child_value_type, tuple):\n            candidate_types.update(child_value_type)\n        else:\n            candidate_types.add(child_value_type)\n    self._candidates = candidates\n    union_value_type = None if no_value_type_check else tuple(candidate_types)\n    super().__init__(union_value_type, default, is_noneable=is_noneable or has_noneable_candidate, frozen=frozen)", "docstring": "Constructor.\n\nArgs:\ncandidates: A sequence of value spec objects or their equivalence as the\nspec for candidate types.\ndefault: (Optional) default value of this spec.\nis_noneable: (Optional) If True, None is acceptable for this spec.\nfrozen: If True, values other than the default value is not accceptable.", "source": "github-repos"}
{"code": "def address(self, compressed=True, testnet=False):\n        \n        return self._key.address(True, testnet)", "docstring": "Address property that returns the Base58Check\nencoded version of the HASH160.\n\nArgs:\ncompressed (bool): Whether or not the compressed key should\nbe used.\ntestnet (bool): Whether or not the key is intended for testnet\nusage. False indicates mainnet usage.\n\nReturns:\nbytes: Base58Check encoded string", "source": "juraj-google-style"}
{"code": "def _PrintTasksStatus(self, processing_status):\n    if (processing_status and processing_status.tasks_status):\n        tasks_status = processing_status.tasks_status\n        table_view = views.CLITabularTableView(column_names=['Tasks:', 'Queued', 'Processing', 'Merging', 'Abandoned', 'Total'], column_sizes=[15, 7, 15, 15, 15, 0])\n        table_view.AddRow(['', tasks_status.number_of_queued_tasks, tasks_status.number_of_tasks_processing, tasks_status.number_of_tasks_pending_merge, tasks_status.number_of_abandoned_tasks, tasks_status.total_number_of_tasks])\n        self._output_writer.Write('\\n')\n        table_view.Write(self._output_writer)", "docstring": "Prints the status of the tasks.\n\nArgs:\nprocessing_status (ProcessingStatus): processing status.", "source": "codesearchnet"}
{"code": "def GetMessages(file_protos):\n    for file_proto in file_protos:\n        _FACTORY.pool.Add(file_proto)\n    return _FACTORY.GetMessages([file_proto.name for file_proto in file_protos])", "docstring": "Builds a dictionary of all the messages available in a set of files.\n\nArgs:\nfile_protos: A sequence of file protos to build messages out of.\n\nReturns:\nA dictionary mapping proto names to the message classes. This will include\nany dependent messages as well as any messages defined in the same file as\na specified message.", "source": "codesearchnet"}
{"code": "def __init__(self, field_types):\n        \n        assert False not in [isinstance(e, WeldType) for e in field_types]\n        self.field_types = field_types", "docstring": "Summary\n\nArgs:\nfield_types (TYPE): Description", "source": "juraj-google-style"}
{"code": "def apply(self, score: Optional[float]) -> Optional[int]:\n    if score is None:\n        self._tracker.push(float('NaN'))\n        return None\n    self._tracker.push(score)\n    if math.isnan(score):\n        return self._missing_label\n    if score < self.threshold:\n        return self._normal_label\n    return self._outlier_label", "docstring": "Applies the quantile-based threshold to an anomaly score.\n\nUpdates the quantile tracker with the given score and classifies the score\nas normal or outlier based on the current quantile threshold.\n\nArgs:\nscore (Optional[float]): The input anomaly score.\n\nReturns:\nOptional[int]: The anomaly label:\n- `normal_label` if the score is less than the threshold.\n- `outlier_label` if the score is at or above the threshold.\n- `missing_label` if the score is `NaN` (detector not ready).\n- `None` if the score is `None` (detector ready, but unable to produce\nscore).", "source": "github-repos"}
{"code": "def top_k_with_unique(inputs, k):\n    unique_inputs = _create_make_unique(tf.cast(inputs, tf.float32))\n    (top_values, indices) = _create_topk_unique(unique_inputs, k)\n    top_values = tf.cast(top_values, inputs.dtype)\n    return (top_values, indices)", "docstring": "Finds the values and indices of the k largests entries.\n\nInstead of doing sort like tf.nn.top_k, this function finds the max value\nk times. The running time is proportional to k, which is be faster when k\nis small. The current implementation supports only inputs of rank 2.\nIn addition, iota is used to replace the lower bits of each element, this\nmakes the selection more stable when there are equal elements. The\noverhead is that output values are approximated.\n\nArgs:\ninputs: A tensor with rank of 2. [batch_size, original_size].\nk: An integer, number of top elements to select.\n\nReturns:\ntop_values: A tensor, the k largest elements in sorted order.\n[batch_size, k].\nindices: A tensor, indices of the top_values. [batch_size, k].", "source": "codesearchnet"}
{"code": "def AddFileEntry(self, path, file_entry_type=definitions.FILE_ENTRY_TYPE_FILE, file_data=None, link_data=None):\n    if (path in self._paths):\n        raise KeyError('File entry already set for path: {0:s}.'.format(path))\n    if (file_data and (file_entry_type != definitions.FILE_ENTRY_TYPE_FILE)):\n        raise ValueError('File data set for non-file file entry type.')\n    if (link_data and (file_entry_type != definitions.FILE_ENTRY_TYPE_LINK)):\n        raise ValueError('Link data set for non-link file entry type.')\n    if (file_data is not None):\n        path_data = file_data\n    elif (link_data is not None):\n        path_data = link_data\n    else:\n        path_data = None\n    self._paths[path] = (file_entry_type, path_data)", "docstring": "Adds a fake file entry.\n\nArgs:\npath (str): path of the file entry.\nfile_entry_type (Optional[str]): type of the file entry object.\nfile_data (Optional[bytes]): data of the fake file-like object.\nlink_data (Optional[bytes]): link data of the fake file entry object.\n\nRaises:\nKeyError: if the path already exists.\nValueError: if the file data is set but the file entry type is not a file\nor if the link data is set but the file entry type is not a link.", "source": "codesearchnet"}
{"code": "def refresh(self, token, timeout):\n    assert (token in self._dict), 'Lock must exist'\n    assert ((timeout == (- 1)) or (timeout > 0))\n    if ((timeout < 0) or (timeout > LockStorageDict.LOCK_TIME_OUT_MAX)):\n        timeout = LockStorageDict.LOCK_TIME_OUT_MAX\n    self._lock.acquire_write()\n    try:\n        lock = self._dict[token]\n        lock['timeout'] = timeout\n        lock['expire'] = (time.time() + timeout)\n        self._dict[token] = lock\n        self._flush()\n    finally:\n        self._lock.release()\n    return lock", "docstring": "Modify an existing lock's timeout.\n\ntoken:\nValid lock token.\ntimeout:\nSuggested lifetime in seconds (-1 for infinite).\nThe real expiration time may be shorter than requested!\nReturns:\nLock dictionary.\nRaises ValueError, if token is invalid.", "source": "codesearchnet"}
{"code": "def raise_for_api_error(headers: MutableMapping, data: MutableMapping) -> None:\n    if (not data['ok']):\n        raise exceptions.SlackAPIError(data.get('error', 'unknow_error'), headers, data)\n    if ('warning' in data):\n        LOG.warning('Slack API WARNING: %s', data['warning'])", "docstring": "Check request response for Slack API error\n\nArgs:\nheaders: Response headers\ndata: Response data\n\nRaises:\n:class:`slack.exceptions.SlackAPIError`", "source": "codesearchnet"}
{"code": "def styled_plot(*style_sheets):\n\n    def decorator(get_plot):\n\n        def wrapper(*args, fonts=None, style=None, no_base_style=False, **kwargs):\n            if no_base_style:\n                list_style = []\n            else:\n                list_style = list(style_sheets)\n            if (style is not None):\n                if isinstance(style, list):\n                    list_style += style\n                else:\n                    list_style += [style]\n            if (fonts is not None):\n                list_style += [{'font.family': 'sans-serif', 'font.sans-serif': fonts}]\n            matplotlib.pyplot.style.use(list_style)\n            return get_plot(*args, **kwargs)\n        return wrapper\n    return decorator", "docstring": "Return a decorator that will apply matplotlib style sheets to a plot.\n\n``style_sheets`` is a base set of styles, which will be ignored if\n``no_base_style`` is set in the decorated function arguments.\n\nThe style will further be overwritten by any styles in the ``style``\noptional argument of the decorated function.\n\nArgs:\nstyle_sheets (:obj:`list`, :obj:`str`, or :obj:`dict`): Any matplotlib\nsupported definition of a style sheet. Can be a list of style of\nstyle sheets.", "source": "codesearchnet"}
{"code": "def search(self, search_phrase, limit=None):\n    query_parts = ['SELECT identifier, type, name, similarity(name, :word) AS sml', 'FROM identifier_index', 'WHERE name % :word', 'ORDER BY sml DESC, name']\n    query_params = {'word': search_phrase}\n    if limit:\n        query_parts.append('LIMIT :limit')\n        query_params['limit'] = limit\n    query_parts.append(';')\n    query = text('\\n'.join(query_parts))\n    self.backend.library.database.set_connection_search_path()\n    results = self.execute(query, **query_params).fetchall()\n    for result in results:\n        (vid, type, name, score) = result\n        (yield IdentifierSearchResult(score=score, vid=vid, type=type, name=name))", "docstring": "Finds identifiers by search phrase.\n\nArgs:\nsearch_phrase (str or unicode):\nlimit (int, optional): how many results to return. None means without limit.\n\nReturns:\nlist of IdentifierSearchResult instances.", "source": "codesearchnet"}
{"code": "def from_origin_axis_angle(origin, axis, angle, angle_in_radians=False):\n    theta = (((angle * pi) / 180) if (not angle_in_radians) else angle)\n    a = origin[0]\n    b = origin[1]\n    c = origin[2]\n    u = axis[0]\n    v = axis[1]\n    w = axis[2]\n    u2 = (u * u)\n    v2 = (v * v)\n    w2 = (w * w)\n    cos_t = cos(theta)\n    sin_t = sin(theta)\n    l2 = ((u2 + v2) + w2)\n    l = sqrt(l2)\n    m11 = ((u2 + ((v2 + w2) * cos_t)) / l2)\n    m12 = ((((u * v) * (1 - cos_t)) - ((w * l) * sin_t)) / l2)\n    m13 = ((((u * w) * (1 - cos_t)) + ((v * l) * sin_t)) / l2)\n    m14 = (((((a * (v2 + w2)) - (u * ((b * v) + (c * w)))) + (((u * ((b * v) + (c * w))) - (a * (v2 + w2))) * cos_t)) + ((((b * w) - (c * v)) * l) * sin_t)) / l2)\n    m21 = ((((u * v) * (1 - cos_t)) + ((w * l) * sin_t)) / l2)\n    m22 = ((v2 + ((u2 + w2) * cos_t)) / l2)\n    m23 = ((((v * w) * (1 - cos_t)) - ((u * l) * sin_t)) / l2)\n    m24 = (((((b * (u2 + w2)) - (v * ((a * u) + (c * w)))) + (((v * ((a * u) + (c * w))) - (b * (u2 + w2))) * cos_t)) + ((((c * u) - (a * w)) * l) * sin_t)) / l2)\n    m31 = ((((u * w) * (1 - cos_t)) - ((v * l) * sin_t)) / l2)\n    m32 = ((((v * w) * (1 - cos_t)) + ((u * l) * sin_t)) / l2)\n    m33 = ((w2 + ((u2 + v2) * cos_t)) / l2)\n    m34 = (((((c * (u2 + v2)) - (w * ((a * u) + (b * v)))) + (((w * ((a * u) + (b * v))) - (c * (u2 + v2))) * cos_t)) + ((((a * v) - (b * u)) * l) * sin_t)) / l2)\n    return SymmOp([[m11, m12, m13, m14], [m21, m22, m23, m24], [m31, m32, m33, m34], [0, 0, 0, 1]])", "docstring": "Generates a SymmOp for a rotation about a given axis through an\norigin.\n\nArgs:\norigin (3x1 array): The origin which the axis passes through.\naxis (3x1 array): The axis of rotation in cartesian space. For\nexample, [1, 0, 0]indicates rotation about x-axis.\nangle (float): Angle of rotation.\nangle_in_radians (bool): Set to True if angles are given in\nradians. Or else, units of degrees are assumed.\n\nReturns:\nSymmOp.", "source": "codesearchnet"}
{"code": "def markdown(self, text, gfm=False, project=None, **kwargs):\n    post_data = {'text': text, 'gfm': gfm}\n    if (project is not None):\n        post_data['project'] = project\n    data = self.http_post('/markdown', post_data=post_data, **kwargs)\n    return data['html']", "docstring": "Render an arbitrary Markdown document.\n\nArgs:\ntext (str): The markdown text to render\ngfm (bool): Render text using GitLab Flavored Markdown. Default is\nFalse\nproject (str): Full path of a project used a context when `gfm` is\nTrue\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabMarkdownError: If the server cannot perform the request\n\nReturns:\nstr: The HTML rendering of the markdown text.", "source": "codesearchnet"}
{"code": "def map_structure_with_atomic(is_atomic_fn, map_fn, nested):\n    if is_atomic_fn(nested):\n        return map_fn(nested)\n    if not nest.is_nested(nested):\n        raise ValueError('Received non-atomic and non-sequence element: {}'.format(nested))\n    if nest.is_mapping(nested):\n        values = [nested[k] for k in sorted(nested.keys())]\n    elif nest.is_attrs(nested):\n        values = _astuple(nested)\n    else:\n        values = nested\n    mapped_values = [map_structure_with_atomic(is_atomic_fn, map_fn, ele) for ele in values]\n    return nest._sequence_like(nested, mapped_values)", "docstring": "Maps the atomic elements of a nested structure.\n\nArgs:\nis_atomic_fn: A function that determines if an element of `nested` is\natomic.\nmap_fn: The function to apply to atomic elements of `nested`.\nnested: A nested structure.\n\nReturns:\nThe nested structure, with atomic elements mapped according to `map_fn`.\n\nRaises:\nValueError: If an element that is neither atomic nor a sequence is\nencountered.", "source": "github-repos"}
{"code": "def set_fog_density(self, density):\n    if ((density < 0) or (density > 1)):\n        raise HolodeckException('Fog density should be between 0 and 1')\n    self._should_write_to_command_buffer = True\n    command_to_send = ChangeFogDensityCommand(density)\n    self._commands.add_command(command_to_send)", "docstring": "Queue up a change fog density command. It will be applied when `tick` or `step` is called next.\nBy the next tick, the exponential height fog in the world will have the new density. If there is no fog in the\nworld, it will be automatically created with the given density.\n\nArgs:\ndensity (float): The new density value, between 0 and 1. The command will not be sent if the given\ndensity is invalid.", "source": "codesearchnet"}
{"code": "def _check_validity(cls, text):\n        \n\n        if not text[0].lstrip().startswith('1 ') or not text[1].lstrip().startswith('2 '):\n            raise ValueError(\"Line number check failed\")\n\n        for line in text:\n            line = line.strip()\n            if str(cls._checksum(line)) != line[-1]:\n                raise ValueError(\"Checksum validation failed\")", "docstring": "Check the validity of a TLE\n\nArgs:\ntext (tuple of str)\nRaise:\nValueError", "source": "juraj-google-style"}
{"code": "def get_num_bytes(self, batch: Sequence[numpy.ndarray]) -> int:\n    return sum((sys.getsizeof(element) for element in batch))", "docstring": "Returns:\nThe number of bytes of data for a batch.", "source": "github-repos"}
{"code": "def attention_mask_same_segment(query_segment, memory_segment=None, dtype=tf.float32):\n    memory_segment = rename_length_to_memory_length((memory_segment or query_segment))\n    return (mtf.cast(mtf.not_equal(query_segment, memory_segment), dtype) * (- 1000000000.0))", "docstring": "Bias for attention where attention between segments is disallowed.\n\nArgs:\nquery_segment: a mtf.Tensor with shape [..., length_dim]\nmemory_segment: a mtf.Tensor with shape [..., memory_length_dim]\ndtype: a tf.dtype\n\nReturns:\na mtf.Tensor with shape [..., length_dim, memory_length_dim]", "source": "codesearchnet"}
{"code": "def references_json(references):\n    references_json = []\n    for r in references:\n        ref = r.ref\n        ref['attributes'] = r._to_json_like(include_defaults=False)\n        references_json.append(ref)\n    return references_json", "docstring": "Given a list of all models in a graph, return JSON representing\nthem and their properties.\n\nArgs:\nreferences (seq[Model]) :\nA list of models to convert to JSON\n\nReturns:\nlist", "source": "codesearchnet"}
{"code": "def lock(vcs, lock_object, wait=True):\n    \n    if wait:\n        timeout = -1\n    else:\n        timeout = 0\n    lock_path = _get_lock_path(vcs, lock_object)\n    lock = filelock.FileLock(lock_path)\n    with lock.acquire(timeout=timeout):\n        yield", "docstring": "A context manager that grabs the lock and releases it when done.\n\nThis blocks until the lock can be acquired.\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\nlock_object (Lock)\nwait (boolean) - whether to wait for the lock or error out\n\nRaises:\nTimeout", "source": "juraj-google-style"}
{"code": "def ScanSource(self, source_path):\n    if os.path.islink(source_path):\n        source_path = os.path.realpath(source_path)\n    if ((not source_path.startswith('\\\\\\\\.\\\\')) and (not os.path.exists(source_path))):\n        raise errors.SourceScannerError('No such device, file or directory: {0:s}.'.format(source_path))\n    scan_context = source_scanner.SourceScannerContext()\n    scan_context.OpenSourcePath(source_path)\n    try:\n        self._source_scanner.Scan(scan_context)\n    except (ValueError, dfvfs_errors.BackEndError) as exception:\n        raise errors.SourceScannerError('Unable to scan source with error: {0!s}.'.format(exception))\n    if (scan_context.source_type not in (scan_context.SOURCE_TYPE_STORAGE_MEDIA_DEVICE, scan_context.SOURCE_TYPE_STORAGE_MEDIA_IMAGE)):\n        scan_node = scan_context.GetRootScanNode()\n        self._source_path_specs.append(scan_node.path_spec)\n        return scan_context\n    scan_node = scan_context.GetRootScanNode()\n    while (len(scan_node.sub_nodes) == 1):\n        scan_node = scan_node.sub_nodes[0]\n    base_path_specs = []\n    if (scan_node.type_indicator != dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION):\n        self._ScanVolume(scan_context, scan_node, base_path_specs)\n    else:\n        partition_identifiers = self._GetTSKPartitionIdentifiers(scan_node)\n        if (not partition_identifiers):\n            raise errors.SourceScannerError('No partitions found.')\n        for partition_identifier in partition_identifiers:\n            location = '/{0:s}'.format(partition_identifier)\n            sub_scan_node = scan_node.GetSubNodeByLocation(location)\n            self._ScanVolume(scan_context, sub_scan_node, base_path_specs)\n    if (not base_path_specs):\n        raise errors.SourceScannerError('No supported file system found in source.')\n    self._source_path_specs = base_path_specs\n    return scan_context", "docstring": "Scans the source path for volume and file systems.\n\nThis function sets the internal source path specification and source\ntype values.\n\nArgs:\nsource_path (str): path to the source.\n\nReturns:\ndfvfs.SourceScannerContext: source scanner context.\n\nRaises:\nSourceScannerError: if the format of or within the source is\nnot supported.", "source": "codesearchnet"}
{"code": "def step_preprocess(x, step, hparams):\n  \n  original_channel_size = common_layers.shape_list(x)[-1]\n\n  if hparams.add_position_timing_signal:\n    x = add_position_timing_signal(x, step, hparams)\n\n  if hparams.add_step_timing_signal:\n    x = add_step_timing_signal(x, step, hparams)\n\n  if ((hparams.add_position_timing_signal or hparams.add_position_timing_signal)\n      and hparams.add_or_concat_timing_signal == \"concat\"):\n    \n    x = common_layers.dense(\n        x, original_channel_size, activation=None, use_bias=False)\n\n  if hparams.add_sru:\n    x = common_layers.sru(x)\n\n  return x", "docstring": "Preprocess the input at the beginning of each step.\n\nArgs:\nx: input tensor\nstep: step\nhparams: model hyper-parameters\n\nReturns:\npreprocessed input.", "source": "juraj-google-style"}
{"code": "def output(self):\n    return self._get_node_attribute_at_index(0, 'output_tensors', 'output')", "docstring": "Retrieves the output tensor(s) of a layer.\n\nOnly returns the tensor(s) corresponding to the *first time*\nthe operation was called.\n\nReturns:\nOutput tensor or list of output tensors.", "source": "github-repos"}
{"code": "def convert_argument(self, arg_name, arg_value):\n        \n\n        self._ensure_loaded()\n\n        type_name = self.param_type(arg_name)\n        if type_name is None:\n            return arg_value\n\n        val = typeinfo.type_system.convert_to_type(arg_value, type_name)\n\n        validators = self.annotated_params[arg_name].validators\n        if len(validators) == 0:\n            return val\n\n        type_obj = typeinfo.type_system.get_type(type_name)\n\n        \n        \n        \n        try:\n            for validator_name, extra_args in validators:\n                if not hasattr(type_obj, validator_name):\n                    raise ValidationError(\"Could not find validator specified for argument\", argument=arg_name, validator_name=validator_name, type=str(type_obj), method=dir(type_obj))\n\n                validator = getattr(type_obj, validator_name)\n                validator(val, *extra_args)\n        except (ValueError, TypeError) as exc:\n            raise ValidationError(exc.args[0], argument=arg_name, arg_value=val)\n\n        return val", "docstring": "Given a parameter with type information, convert and validate it.\n\nArgs:\narg_name (str): The name of the argument to convert and validate\narg_value (object): The value to convert and validate\n\nReturns:\nobject: The converted value.", "source": "juraj-google-style"}
{"code": "def get_variables_in_scope(scope, collection=tf.GraphKeys.TRAINABLE_VARIABLES):\n  \n  scope_name = get_variable_scope_name(scope)\n\n  if scope_name:\n    \n    \n    \n    scope_name = re.escape(scope_name) + \"/\"\n\n  return tuple(tf.get_collection(collection, scope_name))", "docstring": "Returns a tuple `tf.Variable`s in a scope for a given collection.\n\nArgs:\nscope: `tf.VariableScope` or string to retrieve variables from.\ncollection: Collection to restrict query to. By default this is\n`tf.Graphkeys.TRAINABLE_VARIABLES`, which doesn't include non-trainable\nvariables such as moving averages.\n\nReturns:\nA tuple of `tf.Variable` objects.", "source": "juraj-google-style"}
{"code": "class Identity(Initializer):\n\n    @deprecated_args(None, 'Call initializer instance with the dtype argument instead of passing it to the constructor', 'dtype')\n    def __init__(self, gain=1.0, dtype=dtypes.float32):\n        self.gain = gain\n        self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))\n\n    def __call__(self, shape, dtype=None, partition_info=None):\n        full_shape = shape if partition_info is None else partition_info.full_shape\n        if len(full_shape) != 2:\n            raise ValueError(f'The tensor to initialize, specified by argument `shape` must be at least two-dimensional. Received shape={shape}')\n        if dtype is None:\n            dtype = self.dtype\n        if isinstance(full_shape, tensor_shape.TensorShape):\n            full_shape = full_shape.as_list()\n        initializer = linalg_ops_impl.eye(*full_shape, dtype=dtype)\n        if partition_info is not None:\n            initializer = array_ops.slice(initializer, partition_info.var_offset, shape)\n        return self.gain * initializer\n\n    def get_config(self):\n        return {'gain': self.gain, 'dtype': self.dtype.name}", "docstring": "Initializer that generates the identity matrix.\n\nOnly use for 2D matrices.\n\nArgs:\ngain: Multiplicative factor to apply to the identity matrix.\ndtype: Default data type, used if no `dtype` argument is provided when\ncalling the initializer. Only floating point types are supported.", "source": "github-repos"}
{"code": "def __init__(self, encoding='utf-8'):\n    \n    super(CLIOutputWriter, self).__init__()\n    self._encoding = encoding", "docstring": "Initializes an output writer.\n\nArgs:\nencoding (Optional[str]): output encoding.", "source": "juraj-google-style"}
{"code": "def serialize(obj):\n    LOGGER.debug('serialize(%s)', obj)\n    if isinstance(obj, datetime.date):\n        return simplejson.dumps(obj, default=encoders.as_date)\n    elif hasattr(obj, '__dict__'):\n        return simplejson.dumps(obj, default=encoders.as_object)\n    return simplejson.dumps(obj)", "docstring": "Serialize the given object into JSON.\n\nArgs:\nobj: the object to be serialized.\n\nReturns:\n(str): JSON representation of the given object.", "source": "codesearchnet"}
{"code": "def pr_curves_impl(self, runs, tag):\n    \n    if self._db_connection_provider:\n      \n      db = self._db_connection_provider()\n\n      \n      \n      cursor = db.execute( % ','.join(['?'] * len(runs)), runs + [tag, metadata.PLUGIN_NAME])\n      response_mapping = {}\n      for (run, step, wall_time, data, dtype, shape, plugin_data) in cursor:\n        if run not in response_mapping:\n          response_mapping[run] = []\n        buf = np.frombuffer(data, dtype=tf.DType(dtype).as_numpy_dtype)\n        data_array = buf.reshape([int(i) for i in shape.split(',')])\n        plugin_data_proto = plugin_data_pb2.PrCurvePluginData()\n        string_buffer = np.frombuffer(plugin_data, dtype=np.dtype('b'))\n        plugin_data_proto.ParseFromString(tf.compat.as_bytes(\n            string_buffer.tostring()))\n        thresholds = self._compute_thresholds(plugin_data_proto.num_thresholds)\n        entry = self._make_pr_entry(step, wall_time, data_array, thresholds)\n        response_mapping[run].append(entry)\n    else:\n      \n      response_mapping = {}\n      for run in runs:\n        try:\n          tensor_events = self._multiplexer.Tensors(run, tag)\n        except KeyError:\n          raise ValueError(\n              'No PR curves could be found for run %r and tag %r' % (run, tag))\n\n        content = self._multiplexer.SummaryMetadata(\n            run, tag).plugin_data.content\n        pr_curve_data = metadata.parse_plugin_metadata(content)\n        thresholds = self._compute_thresholds(pr_curve_data.num_thresholds)\n        response_mapping[run] = [\n            self._process_tensor_event(e, thresholds) for e in tensor_events]\n    return response_mapping", "docstring": "Creates the JSON object for the PR curves response for a run-tag combo.\n\nArguments:\nruns: A list of runs to fetch the curves for.\ntag: The tag to fetch the curves for.\n\nRaises:\nValueError: If no PR curves could be fetched for a run and tag.\n\nReturns:\nThe JSON object for the PR curves route response.", "source": "juraj-google-style"}
{"code": "def write_csv(data, file_name, encoding='utf-8'):\n    name_extension = (len(data) > 1)\n    (root, ext) = os.path.splitext(file_name)\n    for (i, sheet) in enumerate(data):\n        fname = (file_name if (not name_extension) else (((root + '_') + str(i)) + ext))\n        with open(fname, 'wb') as date_file:\n            csv_file = csv.writer(date_file, encoding=encoding)\n            for line in sheet:\n                csv_file.writerow(line)", "docstring": "Writes out to csv format.\n\nArgs:\ndata: 2D list of tables/worksheets.\nfile_name: Name of the output file.", "source": "codesearchnet"}
{"code": "def go_from(self, vertex):\n        \n        if self.vertex_out:\n            self.vertex_out.edges_out.remove(self)\n        self.vertex_out = vertex\n        vertex.edges_out.add(self)", "docstring": "Tell the edge to go out from this vertex.\n\nArgs:\nvertex (Vertex): vertex to go from.", "source": "juraj-google-style"}
{"code": "def plot_projectors(self, ax=None, fontsize=12, **kwargs):\n    (ax, fig, plt) = get_ax_fig_plt(ax)\n    title = kwargs.pop('title', 'Projectors')\n    ax.grid(True)\n    ax.set_xlabel('r [Bohr]')\n    ax.set_ylabel('$r\\\\tilde p\\\\, [Bohr]^{-\\\\frac{1}{2}}$')\n    for (state, rfunc) in self.projector_functions.items():\n        ax.plot(rfunc.mesh, (rfunc.mesh * rfunc.values), label=('TPROJ: ' + state))\n    ax.legend(loc='best', shadow=True, fontsize=fontsize)\n    return fig", "docstring": "Plot the PAW projectors.\n\nArgs:\nax: matplotlib :class:`Axes` or None if a new figure should be created.\n\nReturns: `matplotlib` figure", "source": "codesearchnet"}
{"code": "def __init__(self, X, y, batch_size, process_fn=None):\n        \n        self.X = X\n        self.y = y\n        self.batch_size = batch_size\n        self.process_fn = process_fn or (lambda x: x)\n\n        self.pos_indices = np.where(y == 1)[0]\n        self.neg_indices = np.where(y == 0)[0]\n        self.n = min(len(self.pos_indices), len(self.neg_indices))\n        self._index_array = None", "docstring": "A `Sequence` implementation that returns balanced `y` by undersampling majority class.\n\nArgs:\nX: The numpy array of inputs.\ny: The numpy array of targets.\nbatch_size: The generator mini-batch size.\nprocess_fn: The preprocessing function to apply on `X`", "source": "juraj-google-style"}
{"code": "def add_timeline_to_sketch(self, sketch_id, index_id):\n    \n    resource_url = '{0:s}/sketches/{1:d}/timelines/'.format(\n        self.api_base_url, sketch_id)\n    form_data = {'timeline': [index_id]}\n    self.session.post(resource_url, json=form_data)", "docstring": "Associate the specified timeline and sketch.\n\nArgs:\nsketch_id (int): ID of sketch\nindex_id (int): ID of timeline to add to sketch", "source": "juraj-google-style"}
{"code": "def fetch_local_package(self, config):\n        \n        \n        self.update_paths_and_config(config=config,\n                                     pkg_dir_name=config['source'],\n                                     pkg_cache_dir=os.getcwd())", "docstring": "Make a local path available to current stacker config.\n\nArgs:\nconfig (dict): 'local' path config dictionary", "source": "juraj-google-style"}
{"code": "def word_score(word, input_letters, questions=0):\n    \n\n    score = 0\n    bingo = 0\n    filled_by_blanks = []\n    rack = list(input_letters)  \n    for letter in word:\n        if letter in rack:\n            bingo += 1\n            score += letter_score(letter)\n            rack.remove(letter)\n        else:\n            filled_by_blanks.append(letter_score(letter))\n\n    \n    \n    for blank_score in sorted(filled_by_blanks, reverse=True):\n        if questions > 0:\n            score += blank_score\n            questions -= 1\n\n    \n    if bingo > 6:\n        score += 50\n\n    return score", "docstring": "Checks the Scrabble score of a single word.\n\nArgs:\nword: a string to check the Scrabble score of\ninput_letters: the letters in our rack\nquestions: integer of the tiles already on the board to build on\n\nReturns:\nan integer Scrabble score amount for the word", "source": "juraj-google-style"}
{"code": "def create_virtual_env(venv_path: str,\n                       requirements_paths: Iterable[str],\n                       python_path: str,\n                       verbose: bool) -> None:\n    \n    shell_tools.run_cmd('virtualenv',\n                        None if verbose else '--quiet',\n                        '-p',\n                        python_path,\n                        venv_path,\n                        out=sys.stderr)\n    pip_path = os.path.join(venv_path, 'bin', 'pip')\n    for req_path in requirements_paths:\n        shell_tools.run_cmd(pip_path,\n                            'install',\n                            None if verbose else '--quiet',\n                            '-r',\n                            req_path,\n                            out=sys.stderr)", "docstring": "Creates a new virtual environment and then installs dependencies.\n\nArgs:\nvenv_path: Where to put the virtual environment's state.\nrequirements_paths: Location of requirements files to -r install.\npython_path: The python binary to use.\nverbose: When set, more progress output is produced.", "source": "juraj-google-style"}
{"code": "def FoldByteStream(self, mapped_value, **unused_kwargs):  \n    \n    raise errors.FoldingError(\n        'Unable to fold {0:s} data type into byte stream'.format(\n            self._data_type_definition.TYPE_INDICATOR))", "docstring": "Folds the data type into a byte stream.\n\nArgs:\nmapped_value (object): mapped value.\n\nReturns:\nbytes: byte stream.\n\nRaises:\nFoldingError: if the data type definition cannot be folded into\nthe byte stream.", "source": "juraj-google-style"}
{"code": "def _ParseTokenType(self, file_object, file_offset):\n    \n    token_type_map = self._GetDataTypeMap('uint8')\n\n    token_type, _ = self._ReadStructureFromFileObject(\n        file_object, file_offset, token_type_map)\n\n    return token_type", "docstring": "Parses a token type.\n\nArgs:\nfile_object (dfvfs.FileIO): file-like object.\nfile_offset (int): offset of the token relative to the start of\nthe file-like object.\n\nReturns:\nint: token type", "source": "juraj-google-style"}
{"code": "def _get_color(self, age):\n    if (age == self.tree.age):\n        return self.leaf_color\n    color = self.stem_color\n    tree = self.tree\n    if (len(color) == 3):\n        return color\n    diff = [(color[(i + 3)] - color[i]) for i in range(3)]\n    per_age = [(diff[i] / (tree.age - 1)) for i in range(3)]\n    return tuple([int((color[i] + (per_age[i] * age))) for i in range(3)])", "docstring": "Get the fill color depending on age.\n\nArgs:\nage (int): The age of the branch/es\n\nReturns:\ntuple: (r, g, b)", "source": "codesearchnet"}
{"code": "def next(self):\n    try:\n        return six.next(self._wrapped)\n    except grpc.RpcError as exc:\n        six.raise_from(exceptions.from_grpc_error(exc), exc)", "docstring": "Get the next response from the stream.\n\nReturns:\nprotobuf.Message: A single response from the stream.", "source": "codesearchnet"}
{"code": "def _create_rand_mask_from_inputs(self, from_blocked_mask, to_blocked_mask, broadcasted_rand_attn, num_attention_heads, num_random_blocks, batch_size, from_seq_length, from_block_size):\n    num_windows = from_seq_length \n    rand_mask = self.jax_gather(to_blocked_mask, broadcasted_rand_attn, batch_dims=1)\n    rand_mask = rand_mask.reshape(batch_size, num_attention_heads, num_windows, num_random_blocks * from_block_size)\n    rand_mask = jnp.einsum('blq,bhlk->bhlqk', from_blocked_mask[:, 1:-1], rand_mask)\n    return rand_mask", "docstring": "Create 3D attention mask from a 2D tensor mask.\n\nArgs:\nfrom_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size].\nto_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size].\nbroadcasted_rand_attn:\n[batch_size, num_attention_heads, from_seq_length//from_block_size-2, num_rand_blocks]\nnum_attention_heads: int. Number of attention heads.\nnum_random_blocks: int. Number of random chunks per row.\nbatch_size: int. Batch size for computation.\nfrom_seq_length: int. length of from sequence.\nfrom_block_size: int. size of block in from sequence.\n\nReturns:\nfloat Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2,\nfrom_block_size, num_rand_blocks*to_block_size].", "source": "github-repos"}
{"code": "def destroy_elb(app='', env='dev', region='us-east-1', **_):\n    task_json = get_template(template_file='destroy/destroy_elb.json.j2', app=app, env=env, region=region, vpc=get_vpc_id(account=env, region=region))\n    wait_for_task(task_json)\n    return True", "docstring": "Destroy ELB Resources.\n\nArgs:\napp (str): Spinnaker Application name.\nenv (str): Deployment environment.\nregion (str): AWS region.\n\nReturns:\nTrue upon successful completion.", "source": "codesearchnet"}
{"code": "def replace_batch_norm(model):\n    for name, module in model.named_children():\n        if isinstance(module, nn.BatchNorm2d):\n            new_module = DFineFrozenBatchNorm2d(module.num_features)\n            if not module.weight.device == torch.device('meta'):\n                new_module.weight.data.copy_(module.weight)\n                new_module.bias.data.copy_(module.bias)\n                new_module.running_mean.data.copy_(module.running_mean)\n                new_module.running_var.data.copy_(module.running_var)\n            model._modules[name] = new_module\n        if len(list(module.children())) > 0:\n            replace_batch_norm(module)", "docstring": "Recursively replace all `torch.nn.BatchNorm2d` with `DFineFrozenBatchNorm2d`.\n\nArgs:\nmodel (torch.nn.Module):\ninput model", "source": "github-repos"}
{"code": "def _spanner_io_read_test_preprocessor(test_spec: dict, expected: List[str], env: TestEnvironment):\n    if (pipeline := test_spec.get('pipeline', None)):\n        for transform in pipeline.get('transforms', []):\n            if transform.get('type', '').startswith('ReadFromSpanner'):\n                config = transform['config']\n                instance, database = (config['instance_id'], config['database_id'])\n                if (table := (config.get('table', None) is None)):\n                    table = config.get('query', '').split('FROM')[-1].strip()\n                transform['type'] = 'Create'\n                transform['config'] = {k: v for k, v in config.items() if k.startswith('__')}\n                elements = INPUT_TABLES[str(instance), str(database), str(table)]\n                if config.get('query', None):\n                    config['query'].replace('select ', 'SELECT ').replace(' from ', ' FROM ')\n                    columns = set(''.join(config['query'].split('SELECT ')[1:]).split(' FROM', maxsplit=1)[0].split(', '))\n                    if columns != {'*'}:\n                        elements = [{column: element[column] for column in element if column in columns} for element in elements]\n                transform['config']['elements'] = elements\n    return test_spec", "docstring": "Preprocessor for tests that involve reading from Spanner.\n\nThis preprocessor replaces any ReadFromSpanner transform with a Create\ntransform that reads from a predefined in-memory dictionary. This allows\nthe test to verify the pipeline's correctness without relying on external\nSpanner instances.\n\nArgs:\ntest_spec: The dictionary representation of the YAML pipeline specification.\nexpected: A list of strings representing the expected output of the\npipeline.\nenv: The TestEnvironment object providing utilities for creating temporary\nfiles.\n\nReturns:\nThe modified test_spec dictionary with ReadFromSpanner transforms replaced.", "source": "github-repos"}
{"code": "def check_end_blocks(frame):\n    \n    try:\n        try:\n            module_name = frame.f_globals['__name__']\n        except KeyError:\n            warnings.warn(\n                'Can not get the source of an uknown module. '\n                'End-of-block syntax check is skipped.',\n                EndSyntaxWarning)\n            return\n        end\n\n        filename = frame.f_globals.get('__file__', '<unknown>')\n        try:\n            source = inspect.getsource(sys.modules[module_name])\n        except Exception:\n            warnings.warn(\n                'Can not get the source of module \"%s\". '\n                'End-of-block syntax check is skipped.' % (module_name,),\n                EndSyntaxWarning)\n            return\n        end\n    finally:\n        del frame\n    end\n\n    root = ast.parse(source)\n    for node in ast.walk(root):\n        bodies = get_compound_bodies(node)\n        if not bodies:\n            continue\n        end\n\n        \n        if (isinstance(node, ast.If) and\n                len(node.orelse) == 1 and\n                isinstance(node.orelse[0], ast.If)):\n            continue\n        end\n\n        \n        \n        \n        if (PY2 and\n                isinstance(node, ast.TryFinally) and\n                len(node.body) == 1 and\n                isinstance(node.body[0], ast.TryExcept)):\n            continue\n        end\n\n        for body in bodies:\n            skip_next = False\n            for i, child in enumerate(body):\n                if skip_next:\n                    skip_next = False\n                elif is_end_node(child):\n                    raise SyntaxError(\n                        '\"end\" does not close a block.',\n                        [filename, child.lineno, child.col_offset,\n                         source.splitlines()[child.lineno - 1] + '\\n'])\n                elif get_compound_bodies(child):\n                    try:\n                        ok = is_end_node(body[i + 1])\n                    except IndexError:\n                        ok = False\n                    end\n                    if not ok:\n                        raise SyntaxError(\n                            'This block is not closed with \"end\".',\n                            [filename, child.lineno, child.col_offset,\n                             source.splitlines()[child.lineno - 1] + '\\n'])\n                    end\n                    skip_next = True\n                end\n            end\n        end\n    end", "docstring": "Performs end-block check.\n\nArgs:\nframe: A frame object of the module to be checked.\n\nRaises:\nSyntaxError: If check failed.", "source": "juraj-google-style"}
{"code": "def AddForwardedIp(self, address, interface):\n    \n    for ip in list(netaddr.IPNetwork(address)):\n      self._RunIfconfig(args=[interface, 'alias', '%s/32' % str(ip)])", "docstring": "Configure a new IP address on the network interface.\n\nArgs:\naddress: string, the IP address to configure.\ninterface: string, the output device to use.", "source": "juraj-google-style"}
{"code": "def get_license_from_url(url):\n    \n    if not url:\n        return\n\n    split_url = urlsplit(url, scheme='http')\n\n    if split_url.netloc.lower() == 'creativecommons.org':\n        if 'publicdomain' in split_url.path:\n            match = _RE_PUBLIC_DOMAIN_URL.match(split_url.path)\n            if match is None:\n                license = ['public domain']\n            else:\n                license = ['CC0']\n                license.extend(part for part in match.groups() if part)\n        else:\n            license = ['CC']\n            match = _RE_LICENSE_URL.match(split_url.path)\n            license.extend(part.upper() for part in match.groups() if part)\n    elif split_url.netloc == 'arxiv.org':\n        license = ['arXiv']\n        match = _RE_LICENSE_URL.match(split_url.path)\n        license.extend(part for part in match.groups() if part)\n    else:\n        raise ValueError('Unknown license URL')\n\n    return u' '.join(license)", "docstring": "Get the license abbreviation from an URL.\n\nArgs:\nurl(str): canonical url of the license.\n\nReturns:\nstr: the corresponding license abbreviation.\n\nRaises:\nValueError: when the url is not recognized", "source": "juraj-google-style"}
{"code": "def deepgetattr(obj, name, default=_UNSPECIFIED):\n    try:\n        if ('.' in name):\n            (attr, subname) = name.split('.', 1)\n            return deepgetattr(getattr(obj, attr), subname, default)\n        else:\n            return getattr(obj, name)\n    except AttributeError:\n        if (default is _UNSPECIFIED):\n            raise\n        else:\n            return default", "docstring": "Try to retrieve the given attribute of an object, digging on '.'.\n\nThis is an extended getattr, digging deeper if '.' is found.\n\nArgs:\nobj (object): the object of which an attribute should be read\nname (str): the name of an attribute to look up.\ndefault (object): the default value to use if the attribute wasn't found\n\nReturns:\nthe attribute pointed to by 'name', splitting on '.'.\n\nRaises:\nAttributeError: if obj has no 'name' attribute.", "source": "codesearchnet"}
{"code": "def process(self, tensor):\n    for processor in self.preprocessors:\n        tensor = processor.process(tensor=tensor)\n    return tensor", "docstring": "Process state.\n\nArgs:\ntensor: tensor to process\n\nReturns: processed state", "source": "codesearchnet"}
{"code": "def __init__(self, host: str, port: int, time_to_live: Union[int, timedelta], *, kwargs: Optional[Dict[str, Any]]=None, request_coder: Optional[coders.Coder], response_coder: Optional[coders.Coder], source_caller: Optional[Caller[RequestT, ResponseT]]=None):\n    self.request_coder = request_coder\n    self.response_coder = response_coder\n    self.redis_caller = _RedisCaller(host, port, time_to_live, request_coder=self.request_coder, response_coder=self.response_coder, kwargs=kwargs, source_caller=source_caller, mode=_RedisMode.READ)", "docstring": "Args:\nhost (str): The hostname or IP address of the Redis server.\nport (int): The port number of the Redis server.\ntime_to_live: `(Union[int, timedelta])` The time-to-live (TTL) for\nrecords stored in Redis. Provide an integer (in seconds) or a\n`datetime.timedelta` object.\nkwargs: Optional(Dict[str, Any]) additional keyword arguments that\nare required to connect to your redis server. Same as `redis.Redis()`.\nrequest_coder: (Optional[`coders.Coder`]) coder for requests stored\nin Redis.\nresponse_coder: (Optional[`coders.Coder`]) coder for decoding responses\nreceived from Redis.\nsource_caller: (Optional[`Caller`]): The source caller using this Redis\ncache in case of fetching the cache request to store in Redis.", "source": "github-repos"}
{"code": "def __init__(self, status, reason, message):\n        \n        msg = \"{0}: {1} - {2}\".format(status.name, reason.name, message)\n        super(KmipOperationFailure, self).__init__(msg)\n        self.status = status\n        self.reason = reason\n        self.message = message", "docstring": "Construct the error message and attributes for the KMIP operation\nfailure.\n\nArgs:\nstatus: a ResultStatus enumeration\nreason: a ResultReason enumeration\nmessage: a string providing additional error information", "source": "juraj-google-style"}
{"code": "def evaluate(self, index):\n    if (self.condition_data[index][2] != self.CUSTOM_ATTRIBUTE_CONDITION_TYPE):\n        self.logger.warning(audience_logs.UNKNOWN_CONDITION_TYPE.format(self._get_condition_json(index)))\n        return None\n    condition_match = self.condition_data[index][3]\n    if (condition_match is None):\n        condition_match = ConditionMatchTypes.EXACT\n    if (condition_match not in self.EVALUATORS_BY_MATCH_TYPE):\n        self.logger.warning(audience_logs.UNKNOWN_MATCH_TYPE.format(self._get_condition_json(index)))\n        return None\n    if (condition_match != ConditionMatchTypes.EXISTS):\n        attribute_key = self.condition_data[index][0]\n        if (attribute_key not in self.attributes):\n            self.logger.debug(audience_logs.MISSING_ATTRIBUTE_VALUE.format(self._get_condition_json(index), attribute_key))\n            return None\n        if (self.attributes.get(attribute_key) is None):\n            self.logger.debug(audience_logs.NULL_ATTRIBUTE_VALUE.format(self._get_condition_json(index), attribute_key))\n            return None\n    return self.EVALUATORS_BY_MATCH_TYPE[condition_match](self, index)", "docstring": "Given a custom attribute audience condition and user attributes, evaluate the\ncondition against the attributes.\n\nArgs:\nindex: Index of the condition to be evaluated.\n\nReturns:\nBoolean:\n- True if the user attributes match the given condition.\n- False if the user attributes don't match the given condition.\nNone: if the user attributes and condition can't be evaluated.", "source": "codesearchnet"}
{"code": "def _convert_param_to_tensor(param):\n    param_t = tf.convert_to_tensor(param, dtype=dtype)\n    return param_t * tf.ones(shape=dim, dtype=dtype)", "docstring": "Converts `param` to `Tesnor`.\n\nArgs:\nparam: `Scalar` or `Tensor` with shape `batch_shape` + [1].\n\nReturns:\n`param` if it `Tensor`, if it is `Scalar` convert it to `Tensor` with\n[1] shape.", "source": "github-repos"}
{"code": "def check_version(version, range_=None):\n    \n    if range_ and version not in range_:\n        raise RezBindError(\"found version %s is not within range %s\"\n                           % (str(version), str(range_)))", "docstring": "Check that the found software version is within supplied range.\n\nArgs:\nversion: Version of the package as a Version object.\nrange_: Allowable version range as a VersionRange object.", "source": "juraj-google-style"}
{"code": "def set_lacp_mode(self, name, mode):\n    if (mode not in ['on', 'passive', 'active']):\n        return False\n    grpid = re.search('(\\\\d+)', name).group()\n    remove_commands = list()\n    add_commands = list()\n    for member in self.get_members(name):\n        remove_commands.append(('interface %s' % member))\n        remove_commands.append(('no channel-group %s' % grpid))\n        add_commands.append(('interface %s' % member))\n        add_commands.append(('channel-group %s mode %s' % (grpid, mode)))\n    return self.configure((remove_commands + add_commands))", "docstring": "Configures the LACP mode of the member interfaces\n\nArgs:\nname(str): The Port-Channel interface name to configure the\nLACP mode\n\nmode(str): The LACP mode to configure the member interfaces to.\nValid values are 'on, 'passive', 'active'\n\nReturns:\nTrue if the operation succeeds otherwise False", "source": "codesearchnet"}
{"code": "def __init__(self, channel, service_name, stub=None):\n    \n    if stub:\n      self._stub = stub\n    else:\n      self._stub = admin_pb2_grpc.AdminStub(channel)\n\n    self._service_name = service_name\n\n    self._shutdown = False\n    self._shutdown_cv = threading.Condition()\n    self._keep_alive_thread = threading.Thread(target=self._KeepAliveLoop)\n    self._keep_alive_thread.daemon = True\n    self._keep_alive_thread.start()", "docstring": "Create a Sender.\n\nArgs:\nchannel: The grpc.Channel over which we should send messages.\nservice_name: The name of the service that we are running as.\nstub: If set, used instead of AdminStub(channel). Intended to ease\nunit tests.", "source": "juraj-google-style"}
{"code": "def add_tree(self, tree, parent=None):\n    if (tree.path in self.path_db):\n        self.remove_tree_by_path(tree.path)\n    for index in tree.indexes:\n        if (not getattr(tree, index)):\n            continue\n        self._add_to(getattr(self, (index + '_db')), getattr(tree, index), tree)\n    if parent:\n        self._add_to(self.parent_db, tree.path, parent)\n    for sub_tree in tree.sub_trees:\n        assert sub_tree.path.startswith(tree.path)\n    for sub_tree in tree.sub_trees:\n        self.add_tree(sub_tree, parent=tree)", "docstring": "Add `tree` into database.\n\nArgs:\ntree (obj): :class:`.Tree` instance.\nparent (ref, default None): Reference to parent tree. This is used\nfor all sub-trees in recursive call.", "source": "codesearchnet"}
{"code": "def _resolve_attribute(self, attribute):\n        \n        value = self.attributes[attribute]\n        if not value:\n            return None\n        resolved_value = re.sub('\\$\\((.*?)\\)',self._resolve_attribute_match, value)\n        return resolved_value", "docstring": "Recursively replaces references to other attributes with their value.\n\nArgs:\nattribute (str): The name of the attribute to resolve.\n\nReturns:\nstr: The resolved value of 'attribute'.", "source": "juraj-google-style"}
{"code": "def angle(x):\n    if any_symbolic_tensors((x,)):\n        return Angle().symbolic_call(x)\n    return backend.numpy.angle(x)", "docstring": "Element-wise angle of a complex tensor.\n\nArguments:\nx: Input tensor. Can be real or complex.\n\nReturns:\nOutput tensor of same shape as x. containing the angle of each element\n(in radians).\n\nExample:\n>>> x = keras.ops.convert_to_tensor([[1 + 3j, 2 - 5j], [4 - 3j, 3 + 2j]])\n>>> keras.ops.angle(x)\narray([[ 1.2490457, -1.19029  ],\n[-0.6435011,  0.5880026]], dtype=float32)", "source": "github-repos"}
{"code": "def cancel(self, invoice_id, **kwargs):\n    url = '{}/{}/cancel'.format(self.base_url, invoice_id)\n    return self.post_url(url, {}, **kwargs)", "docstring": "Cancel an unpaid Invoice with given ID via API\nIt can only be called on an invoice that is not in the paid state.\n\nArgs:\ninvoice_id : Id for cancel the invoice\nReturns:\nThe response for the API will be the invoice entity, similar to create/update API response, with status attribute's value as cancelled", "source": "codesearchnet"}
{"code": "def mkdir_p(dirname):\n    assert (dirname is not None)\n    if ((dirname == '') or os.path.isdir(dirname)):\n        return\n    try:\n        os.makedirs(dirname)\n    except OSError as e:\n        if (e.errno != errno.EEXIST):\n            raise e", "docstring": "Like \"mkdir -p\", make a dir recursively, but do nothing if the dir exists\n\nArgs:\ndirname(str):", "source": "codesearchnet"}
{"code": "def configure_bigchaindb(command):\n    \n    @functools.wraps(command)\n    def configure(args):\n        config_from_cmdline = None\n        try:\n            if args.log_level is not None:\n                config_from_cmdline = {\n                    'log': {\n                        'level_console': args.log_level,\n                        'level_logfile': args.log_level,\n                    },\n                    'server': {'loglevel': args.log_level},\n                }\n        except AttributeError:\n            pass\n        bigchaindb.config_utils.autoconfigure(\n            filename=args.config, config=config_from_cmdline, force=True)\n        command(args)\n\n    return configure", "docstring": "Decorator to be used by command line functions, such that the\nconfiguration of bigchaindb is performed before the execution of\nthe command.\n\nArgs:\ncommand: The command to decorate.\n\nReturns:\nThe command wrapper function.", "source": "juraj-google-style"}
{"code": "def triangle(times: np.ndarray, amp: complex, period: float, phase: float = 0) -> np.ndarray:\n    \n    return amp*(-2*np.abs(sawtooth(times, 1, period, (phase-np.pi/2)/2)) + 1).astype(np.complex_)", "docstring": "Continuous triangle wave.\n\nArgs:\ntimes: Times to output wave for.\namp: Pulse amplitude. Wave range is [-amp, amp].\nperiod: Pulse period, units of dt.\nphase: Pulse phase.", "source": "juraj-google-style"}
{"code": "def snake_to_camel(name):\n    \n    ret = \"\".join(x.title() for x in name.split(\"_\"))\n    ret = ret[0].lower() + ret[1:]\n    return ret", "docstring": "Takes a snake_field_name and returns a camelCaseFieldName\n\nArgs:\nname (str): E.g. snake_field_name or SNAKE_FIELD_NAME\n\nReturns:\nstr: camelCase converted name. E.g. capsFieldName", "source": "juraj-google-style"}
{"code": "def basic_train_loop(supervisor, train_step_fn, args=None, kwargs=None, master=''):\n    if args is None:\n        args = []\n    if kwargs is None:\n        kwargs = {}\n    should_retry = True\n    while should_retry:\n        try:\n            should_retry = False\n            with supervisor.managed_session(master) as sess:\n                while not supervisor.should_stop():\n                    train_step_fn(sess, *args, **kwargs)\n        except errors.AbortedError:\n            should_retry = True", "docstring": "Basic loop to train a model.\n\nCalls `train_step_fn` in a loop to train a model.  The function is called as:\n\n```python\ntrain_step_fn(session, *args, **kwargs)\n```\n\nIt is passed a `tf.compat.v1.Session` in addition to `args` and `kwargs`.  The\nfunction\ntypically runs one training step in the session.\n\nArgs:\nsupervisor: `tf.compat.v1.train.Supervisor` to run the training services.\ntrain_step_fn: Callable to execute one training step.  Called repeatedly as\n`train_step_fn(session, *args **kwargs)`.\nargs: Optional positional arguments passed to `train_step_fn`.\nkwargs: Optional keyword arguments passed to `train_step_fn`.\nmaster: Master to use to create the training session.  Defaults to `\"\"`\nwhich causes the session to be created in the local process.", "source": "github-repos"}
{"code": "def _build_commands(self, ip_dest, next_hop, **kwargs):\n    commands = ('ip route %s %s' % (ip_dest, next_hop))\n    next_hop_ip = kwargs.get('next_hop_ip', None)\n    distance = kwargs.get('distance', None)\n    tag = kwargs.get('tag', None)\n    route_name = kwargs.get('route_name', None)\n    if (next_hop_ip is not None):\n        commands += (' %s' % next_hop_ip)\n    if (distance is not None):\n        commands += (' %s' % distance)\n    if (tag is not None):\n        commands += (' tag %s' % tag)\n    if (route_name is not None):\n        commands += (' name %s' % route_name)\n    return commands", "docstring": "Build the EOS command string for ip route interactions.\n\nArgs:\nip_dest (string): The ip address of the destination in the\nform of A.B.C.D/E\nnext_hop (string): The next hop interface or ip address\n**kwargs['next_hop_ip'] (string): The next hop address on\ndestination interface\n**kwargs['distance'] (string): Administrative distance for this\nroute\n**kwargs['tag'] (string): Route tag\n**kwargs['route_name'] (string): Route name\n\nReturns the ip route command string to be sent to the switch for\nthe given set of parameters.", "source": "codesearchnet"}
{"code": "def GetStatus(self):\n    STATUS_FORMAT = '>BBBhhhHhhhHBBBxBbHBHHHHBbbHHBBBbbbbbbbbbBH'\n    STATUS_FIELDS = ['packetType', 'firmwareVersion', 'protocolVersion', 'mainFineCurrent', 'usbFineCurrent', 'auxFineCurrent', 'voltage1', 'mainCoarseCurrent', 'usbCoarseCurrent', 'auxCoarseCurrent', 'voltage2', 'outputVoltageSetting', 'temperature', 'status', 'leds', 'mainFineResistor', 'serialNumber', 'sampleRate', 'dacCalLow', 'dacCalHigh', 'powerUpCurrentLimit', 'runTimeCurrentLimit', 'powerUpTime', 'usbFineResistor', 'auxFineResistor', 'initialUsbVoltage', 'initialAuxVoltage', 'hardwareRevision', 'temperatureLimit', 'usbPassthroughMode', 'mainCoarseResistor', 'usbCoarseResistor', 'auxCoarseResistor', 'defMainFineResistor', 'defUsbFineResistor', 'defAuxFineResistor', 'defMainCoarseResistor', 'defUsbCoarseResistor', 'defAuxCoarseResistor', 'eventCode', 'eventData']\n    self._SendStruct('BBB', 1, 0, 0)\n    while 1:\n        read_bytes = self._ReadPacket()\n        if (not read_bytes):\n            return None\n        calsize = struct.calcsize(STATUS_FORMAT)\n        if ((len(read_bytes) != calsize) or (read_bytes[0] != 16)):\n            logging.warning('Wanted status, dropped type=0x%02x, len=%d', read_bytes[0], len(read_bytes))\n            continue\n        status = dict(zip(STATUS_FIELDS, struct.unpack(STATUS_FORMAT, read_bytes)))\n        p_type = status['packetType']\n        if (p_type != 16):\n            raise MonsoonError(('Package type %s is not 0x10.' % p_type))\n        for k in status.keys():\n            if k.endswith('VoltageSetting'):\n                status[k] = (2.0 + (status[k] * 0.01))\n            elif k.endswith('FineCurrent'):\n                pass\n            elif k.endswith('CoarseCurrent'):\n                pass\n            elif (k.startswith('voltage') or k.endswith('Voltage')):\n                status[k] = (status[k] * 0.000125)\n            elif k.endswith('Resistor'):\n                status[k] = (0.05 + (status[k] * 0.0001))\n                if (k.startswith('aux') or k.startswith('defAux')):\n                    status[k] += 0.05\n            elif k.endswith('CurrentLimit'):\n                status[k] = ((8 * (1023 - status[k])) / 1023.0)\n        return status", "docstring": "Requests and waits for status.\n\nReturns:\nstatus dictionary.", "source": "codesearchnet"}
{"code": "def get_unique_graph(tops, check_types=None, none_if_empty=False):\n    if isinstance(tops, ops.Graph):\n        return tops\n    if not is_iterable(tops):\n        raise TypeError('{} is not iterable'.format(type(tops)))\n    if check_types is None:\n        check_types = (ops.Operation, tensor_lib.Tensor)\n    elif not is_iterable(check_types):\n        check_types = (check_types,)\n    g = None\n    for op in tops:\n        if not isinstance(op, check_types):\n            raise TypeError('Expected a type in ({}), got: {}'.format(', '.join([str(t) for t in check_types]), type(op)))\n        if g is None:\n            g = op.graph\n        elif g._graph_key != op.graph._graph_key:\n            raise ValueError('Operation {} does not belong to given graph'.format(op))\n    if g is None and (not none_if_empty):\n        raise ValueError(\"Can't find the unique graph of an empty list\")\n    return g", "docstring": "Return the unique graph used by the all the elements in tops.\n\nArgs:\ntops: iterable of elements to check (usually a list of tf.Operation and/or\ntf.Tensor). Or a tf.Graph.\ncheck_types: check that the element in tops are of given type(s). If None,\nthe types (tf.Operation, tf.Tensor) are used.\nnone_if_empty: don't raise an error if tops is an empty list, just return\nNone.\nReturns:\nThe unique graph used by all the tops.\nRaises:\nTypeError: if tops is not a iterable of tf.Operation.\nValueError: if the graph is not unique.", "source": "github-repos"}
{"code": "def easeInOutQuart(n):\n    \n    _checkRange(n)\n    n = 2 * n\n    if n < 1:\n        return 0.5 * n**4\n    else:\n        n = n - 2\n        return -0.5 * (n**4 - 2)", "docstring": "A quartic tween function that accelerates, reaches the midpoint, and then decelerates.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "juraj-google-style"}
{"code": "def fetch_friends(self, user):\n    if USING_ALLAUTH:\n        social_app = SocialApp.objects.get_current('twitter')\n        consumer_key = social_app.key\n        consumer_secret = social_app.secret\n        oauth_token = SocialToken.objects.get(account=user, app=social_app).token\n        oauth_token_secret = SocialToken.objects.get(account=user, app=social_app).token_secret\n    else:\n        t = TwitterBackend()\n        tokens = t.tokens(user)\n        oauth_token_secret = tokens['oauth_token_secret']\n        oauth_token = tokens['oauth_token']\n        consumer_key = settings.TWITTER_CONSUMER_KEY\n        consumer_secret = settings.TWITTER_CONSUMER_SECRET\n    api = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret, access_token_key=oauth_token, access_token_secret=oauth_token_secret)\n    return api.GetFriends()", "docstring": "fetches the friends from twitter using the\ninformation on django-social-auth models\nuser is an instance of UserSocialAuth\n\nReturns:\ncollection of friend objects fetched from facebook", "source": "codesearchnet"}
{"code": "def _get_mpr_table(self, connection, partition):\n        \n        \n        \n        \n        \n        \n\n        \n        \n        \n        virtual_table = partition.vid\n        table = '{}_v'.format(virtual_table)\n        logger.debug(\n            'Looking for materialized table of the partition.\\n    partition: {}'.format(partition.name))\n        table_exists = self._relation_exists(connection, table)\n        if table_exists:\n            logger.debug(\n                'Materialized table of the partition found.\\n    partition: {}, table: {}'\n                .format(partition.name, table))\n            return table\n\n        \n        logger.debug(\n            'Looking for a virtual table of the partition.\\n    partition: {}'.format(partition.name))\n        virtual_exists = self._relation_exists(connection, virtual_table)\n        if virtual_exists:\n            logger.debug(\n                'Virtual table of the partition found.\\n    partition: {}, table: {}'\n                .format(partition.name, table))\n            return virtual_table\n        raise MissingTableError('sqlite database does not have table for mpr of {} partition.'\n                                .format(partition.vid))", "docstring": "Returns name of the sqlite table who stores mpr data.\n\nArgs:\nconnection (apsw.Connection): connection to sqlite database who stores mpr data.\npartition (orm.Partition):\n\nReturns:\nstr:\n\nRaises:\nMissingTableError: if partition table not found in the db.", "source": "juraj-google-style"}
{"code": "def trace(fun: Callable[[], Any], *, where: Optional[Callable[[base.HyperPrimitive], bool]]=None, require_hyper_name: bool=False, per_thread: bool=True) -> DynamicEvaluationContext:\n    context = DynamicEvaluationContext(where=where, require_hyper_name=require_hyper_name, per_thread=per_thread)\n    with context.collect():\n        fun()\n    return context", "docstring": "Trace the hyper primitives called within a function by executing it.\n\nSee examples in :class:`pyglove.hyper.DynamicEvaluationContext`.\n\nArgs:\nfun: Function in which the search space is defined.\nwhere: A callable object that decide whether a hyper primitive should be\nincluded when being instantiated under `collect`.\nIf None, all hyper primitives under `collect` will be included.\nrequire_hyper_name: If True, all hyper primitives defined in this scope\nwill need to carry their names, which is usually a good idea when the\nfunction that instantiates the hyper primtives need to be called multiple\ntimes.\nper_thread: If True, the context manager will be applied to current thread\nonly. Otherwise, it will be applied on current process.\n\nReturns:\nAn DynamicEvaluationContext that can be passed to `pg.sample`.", "source": "github-repos"}
{"code": "def price(self, valuation_date, market, model=None):\n    del model, valuation_date\n    reference_curve = market.reference_curve\n    discount_curve = market.discount_curve\n    fwd_rate = reference_curve.get_forward_rate(self._accrual_start_date, self._accrual_end_date, self._daycount_fraction)\n    discount_at_settlement = discount_curve.get_discount_factor(self._settlement_date)\n    return discount_at_settlement * self._notional * (fwd_rate - self._fixed_rate) * self._daycount_fraction / (1.0 + self._daycount_fraction * fwd_rate)", "docstring": "Returns the present value of the instrument on the valuation date.\n\nArgs:\nvaluation_date: A scalar `DateTensor` specifying the date on which\nvaluation is being desired.\nmarket: A namedtuple of type `InterestRateMarket` which contains the\nnecessary information for pricing the FRA instrument.\nmodel: Reserved for future use.\n\nReturns:\nA Rank 1 `Tensor` of real type containing the modeled price of each FRA\ncontract based on the input market data.", "source": "github-repos"}
{"code": "def __init__(self, src_file, src_line, message):\n        \n        \n        self.message = message\n        self.src_file = src_file\n        self.src_line = src_line", "docstring": "Basic constructor for :class:`AbinitEvent`.\n\nArgs:\nmessage: String with human-readable message providing info on the event.\nsrc_file: String with the name of the Fortran file where the event is raised.\nsrc_line Integer giving the line number in src_file.", "source": "juraj-google-style"}
{"code": "def spence(x, name=None):\n    with ops.name_scope(name, 'spence', [x]):\n        return gen_special_math_ops.spence(x)", "docstring": "Computes Spence's integral of `x` element-wise.\n\nSpence's integral is defined as the integral of `log(t) / (1 - t)` from\n`1` to `x`, with the domain of definition all non-negative real numbers.\n\n>>> tf.math.special.spence([0.5, 1., 2., 3.]).numpy()\narray([ 0.58224034,  0.        , -0.82246685, -1.4367464], dtype=float32)\n\nThis implementation is based off of the Cephes math library.\n\nArgs:\nx: A `Tensor` or `SparseTensor`. Must be one of the following types:\n`float32`, `float64`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.spence\n@end_compatibility", "source": "github-repos"}
{"code": "def run(self, **kwargs):\n    if (not super().run(**kwargs)):\n        return\n    if kwargs['list']:\n        self.log.info('--- List of Scheduler Modules ---')\n        for (name, scheduler) in list(self.scheduler_plugins.items()):\n            if (self.active_scheduler == name):\n                self.log.info('{} (active)'.format(name))\n            else:\n                self.log.info(name)\n        self.log.info('--- End list of Scheduler Modules ---')\n        return\n    scheduler = self.scheduler_plugins[self.active_scheduler]()\n    scheduler.execute_scheduler()", "docstring": "Execute the scheduler.\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def tftp_update_bios(server=None, path=None):\n    \n\n    if not server:\n        raise salt.exceptions.CommandExecutionError(\"The server name must be specified.\")\n\n    if not path:\n        raise salt.exceptions.CommandExecutionError(\"The TFTP path must be specified.\")\n\n    dn = \"sys/rack-unit-1/bios/fw-updatable\"\n\n    inconfig = .format(server, path)\n\n    ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False)\n\n    return ret", "docstring": "Update the BIOS firmware through TFTP.\n\nArgs:\nserver(str): The IP address or hostname of the TFTP server.\n\npath(str): The TFTP path and filename for the BIOS image.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' cimc.tftp_update_bios foo.bar.com HP-SL2.cap", "source": "juraj-google-style"}
{"code": "def device_path_to_device_name(device_dir):\n    path_items = os.path.basename(device_dir)[len(METADATA_FILE_PREFIX) + len(DEVICE_TAG):].split(',')\n    return '/'.join([path_item.replace('device_', 'device:').replace('_', ':', 1) for path_item in path_items])", "docstring": "Parse device name from device path.\n\nArgs:\ndevice_dir: (str) a directory name for the device.\n\nReturns:\n(str) parsed device name.", "source": "github-repos"}
{"code": "def mutual_info(rho: Density, qubits0: Qubits, qubits1: Qubits=None, base: float=None) -> float:\n    if (qubits1 is None):\n        qubits1 = tuple((set(rho.qubits) - set(qubits0)))\n    rho0 = rho.partial_trace(qubits1)\n    rho1 = rho.partial_trace(qubits0)\n    ent = entropy(rho, base)\n    ent0 = entropy(rho0, base)\n    ent1 = entropy(rho1, base)\n    return ((ent0 + ent1) - ent)", "docstring": "Compute the bipartite von-Neumann mutual information of a mixed\nquantum state.\n\nArgs:\nrho:    A density matrix of the complete system\nqubits0: Qubits of system 0\nqubits1: Qubits of system 1. If none, taken to be all remaining qubits\nbase:   Optional logarithm base. Default is base e\n\nReturns:\nThe bipartite von-Neumann mutual information.", "source": "codesearchnet"}
{"code": "def hwvtep_set_overlaygw_type(self, **kwargs):\n    name = kwargs.pop('name')\n    type = kwargs.pop('type')\n    ip_args = dict(name=name, gw_type=type)\n    method_name = 'overlay_gateway_gw_type'\n    method_class = self._brocade_tunnels\n    gw_attr = getattr(method_class, method_name)\n    config = gw_attr(**ip_args)\n    output = self._callback(config)\n    return output", "docstring": "Set gateway type\n\nArgs:\nname  (str): gateway-name\ntype (str): gateway-type\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nNone", "source": "codesearchnet"}
{"code": "def generate_output_nir(self, json_data=None, hr=True, show_name=False, colorize=True):\n    if (json_data is None):\n        json_data = {}\n    output = generate_output(line='0', short=(HR_WHOIS_NIR['nets']['_short'] if hr else 'nir_nets'), name=(HR_WHOIS_NIR['nets']['_name'] if (hr and show_name) else None), is_parent=True, colorize=colorize)\n    count = 0\n    if json_data['nir']:\n        for net in json_data['nir']['nets']:\n            if (count > 0):\n                output += self.generate_output_newline(line='1', colorize=colorize)\n            count += 1\n            output += generate_output(line='1', short=net['handle'], is_parent=True, colorize=colorize)\n            for (key, val) in net.items():\n                if (val and (isinstance(val, dict) or ('\\n' in val) or (key == 'nameservers'))):\n                    output += generate_output(line='2', short=(HR_WHOIS_NIR['nets'][key]['_short'] if hr else key), name=(HR_WHOIS_NIR['nets'][key]['_name'] if (hr and show_name) else None), is_parent=(False if ((val is None) or (len(val) == 0)) else True), value=('None' if ((val is None) or (len(val) == 0)) else None), colorize=colorize)\n                    if (key == 'contacts'):\n                        for (k, v) in val.items():\n                            if v:\n                                output += generate_output(line='3', is_parent=(False if (len(v) == 0) else True), name=k, colorize=colorize)\n                                for (contact_key, contact_val) in v.items():\n                                    if (v is not None):\n                                        tmp_out = '{0}{1}{2}'.format(contact_key, ': ', contact_val)\n                                        output += generate_output(line='4', value=tmp_out, colorize=colorize)\n                    elif (key == 'nameservers'):\n                        for v in val:\n                            output += generate_output(line='3', value=v, colorize=colorize)\n                    else:\n                        for v in val.split('\\n'):\n                            output += generate_output(line='3', value=v, colorize=colorize)\n                else:\n                    output += generate_output(line='2', short=(HR_WHOIS_NIR['nets'][key]['_short'] if hr else key), name=(HR_WHOIS_NIR['nets'][key]['_name'] if (hr and show_name) else None), value=val, colorize=colorize)\n    else:\n        output += 'None'\n    return output", "docstring": "The function for generating CLI output NIR network results.\n\nArgs:\njson_data (:obj:`dict`): The data to process. Defaults to None.\nhr (:obj:`bool`): Enable human readable key translations. Defaults\nto True.\nshow_name (:obj:`bool`): Show human readable name (default is to\nonly show short). Defaults to False.\ncolorize (:obj:`bool`): Colorize the console output with ANSI\ncolors. Defaults to True.\n\nReturns:\nstr: The generated output.", "source": "codesearchnet"}
{"code": "def SetFileContext(self, file_name, row_num, row, headers):\n    self._context = (file_name, row_num, row, headers)", "docstring": "Save the current context to be output with any errors.\n\nArgs:\nfile_name: string\nrow_num: int\nrow: list of strings\nheaders: list of column headers, its order corresponding to row's", "source": "codesearchnet"}
{"code": "def assign(var, new_val, assign_fn=assign_slice):\n    if isinstance(var, Tensor):\n        var = var.operation\n    if (not isinstance(var, Variable)):\n        raise ValueError('var must be a mtf.Variable or its output Tensor.')\n    return Assign([var], [new_val], assign_fn=assign_fn)", "docstring": "Assign a new value to a variable.\n\nArgs:\nvar: either a Variable operation or its output Tensor.\nnew_val: a Tensor\nassign_fn: a function from\n(mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation\nReturns:\nan Operation\nRaises:\nValueError: if var is not a Variable and var.operation is not a Variable", "source": "codesearchnet"}
{"code": "def any(self, predicate=None):\n    if self.closed():\n        raise ValueError('Attempt to call any() on a closed Queryable.')\n    if (predicate is None):\n        predicate = (lambda x: True)\n    if (not is_callable(predicate)):\n        raise TypeError('any() parameter predicate={predicate} is not callable'.format(predicate=repr(predicate)))\n    for item in self.select(predicate):\n        if item:\n            return True\n    return False", "docstring": "Determine if the source sequence contains any elements which satisfy\nthe predicate.\n\nOnly enough of the sequence to satisfy the predicate once is consumed.\n\nNote: This method uses immediate execution.\n\nArgs:\npredicate: An optional single argument function used to test each\nelement. If omitted, or None, this method returns True if there\nis at least one element in the source.\n\nReturns:\nTrue if the sequence contains at least one element which satisfies\nthe predicate, otherwise False.\n\nRaises:\nValueError: If the Queryable is closed()", "source": "codesearchnet"}
{"code": "def unlock_kinetis(jlink):\n    if (not jlink.connected()):\n        raise ValueError('No target to unlock.')\n    method = UNLOCK_METHODS.get(jlink.tif, None)\n    if (method is None):\n        raise NotImplementedError('Unsupported target interface for unlock.')\n    return method(jlink)", "docstring": "Unlock for Freescale Kinetis K40 or K60 device.\n\nArgs:\njlink (JLink): an instance of a J-Link that is connected to a target.\n\nReturns:\n``True`` if the device was successfully unlocked, otherwise ``False``.\n\nRaises:\nValueError: if the J-Link is not connected to a target.", "source": "codesearchnet"}
{"code": "def extract_block(content: str, indent_level: int=0) -> str:\n    current_object = []\n    lines = content.split('\\n')\n    end_markers = [')', ']', '}', '\"\"\"']\n    for idx, line in enumerate(lines):\n        if idx == 0 and indent_level > 0 and (not is_empty_line(line)) and (find_indent(line) != indent_level):\n            raise ValueError(f'When `indent_level > 0`, the first line in `content` should have indent level {indent_level}. Got {find_indent(line)} instead.')\n        if find_indent(line) < indent_level and (not is_empty_line(line)):\n            break\n        is_valid_object = len(current_object) > 0\n        if not is_empty_line(line) and (not line.endswith(':')) and (find_indent(line) == indent_level) and is_valid_object:\n            if line.lstrip() in end_markers:\n                current_object.append(line)\n            return '\\n'.join(current_object)\n        else:\n            current_object.append(line)\n    if len(current_object) > 0:\n        return '\\n'.join(current_object)", "docstring": "Return the first block in `content` with the indent level `indent_level`.\n\nThe first line in `content` should be indented at `indent_level` level, otherwise an error will be thrown.\n\nThis method will immediately stop the search when a (non-empty) line with indent level less than `indent_level` is\nencountered.\n\nArgs:\ncontent (`str`): The content to parse\nindent_level (`int`, *optional*, default to 0): The indent level of the blocks to search for\n\nReturns:\n`str`: The first block in `content` with the indent level `indent_level`.", "source": "github-repos"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_stream = utils.BytearrayStream()\n    if self._major:\n        self._major.write(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Invalid struct missing the major protocol version number.')\n    if self._minor:\n        self._minor.write(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Invalid struct missing the minor protocol version number.')\n    self.length = local_stream.length()\n    super(ProtocolVersion, self).write(output_stream, kmip_version=kmip_version)\n    output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the ProtocolVersion struct to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the data attribute is not defined.", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context):\n    \n    super(EncodedStreamFileSystem, self).__init__(resolver_context)\n    self._encoding_method = None", "docstring": "Initializes an encoded file system.\n\nArgs:\nresolver_context (Context): a resolver context.", "source": "juraj-google-style"}
{"code": "def _json_clean(d):\n    result = {}\n    compkeys = {}\n    for (k, v) in d.items():\n        if (not isinstance(k, tuple)):\n            result[k] = v\n        else:\n            key = 'c.{}'.format(id(k))\n            result[key] = v\n            compkeys[key] = k\n    return (result, compkeys)", "docstring": "Cleans the specified python `dict` by converting any tuple keys to\nstrings so that they can be serialized by JSON.\n\nArgs:\nd (dict): python dictionary to clean up.\n\nReturns:\ndict: cleaned-up dictionary.", "source": "codesearchnet"}
{"code": "def read_core_state_eigen(self):\n    with zopen(self.filename, 'rt') as foutcar:\n        line = foutcar.readline()\n        while (line != ''):\n            line = foutcar.readline()\n            if ('NIONS =' in line):\n                natom = int(line.split('NIONS =')[1])\n                cl = [defaultdict(list) for i in range(natom)]\n            if ('the core state eigen' in line):\n                iat = (- 1)\n                while (line != ''):\n                    line = foutcar.readline()\n                    if ('E-fermi' in line):\n                        break\n                    data = line.split()\n                    if ((len(data) % 2) == 1):\n                        iat += 1\n                        data = data[1:]\n                    for i in range(0, len(data), 2):\n                        cl[iat][data[i]].append(float(data[(i + 1)]))\n    return cl", "docstring": "Read the core state eigenenergies at each ionic step.\n\nReturns:\nA list of dict over the atom such as [{\"AO\":[core state eig]}].\nThe core state eigenenergie list for each AO is over all ionic\nstep.\n\nExample:\nThe core state eigenenergie of the 2s AO of the 6th atom of the\nstructure at the last ionic step is [5][\"2s\"][-1]", "source": "codesearchnet"}
{"code": "def tokenize_to_spacy_doc(self, text: str) -> Doc:\n    if (not self.keep_multi_space):\n        text = re.sub(' +', ' ', text)\n    doc = self.nlp(text, disable=['parser'])\n    for a_token in doc:\n        self.custom_token(a_token)\n    return doc", "docstring": "Tokenize the given text, returning a spacy doc. Used for spacy rule extractor\n\nArgs:\ntext (string):\n\nReturns: Doc", "source": "codesearchnet"}
{"code": "def _Open(self, path_spec=None, mode='rb'):\n    \n    if not path_spec:\n      raise ValueError('Missing path specification.')\n\n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    table_name = getattr(path_spec, 'table_name', None)\n    if table_name is None:\n      raise errors.PathSpecError('Path specification missing table name.')\n\n    column_name = getattr(path_spec, 'column_name', None)\n    if column_name is None:\n      raise errors.PathSpecError('Path specification missing column name.')\n\n    row_condition = getattr(path_spec, 'row_condition', None)\n    if row_condition:\n      if not isinstance(row_condition, tuple) or len(row_condition) != 3:\n        raise errors.PathSpecError((\n            'Unsupported row_condition not a tuple in the form: '\n            '(column_name, operator, value).'))\n\n    row_index = getattr(path_spec, 'row_index', None)\n    if row_index is not None:\n      if not isinstance(row_index, py2to3.INTEGER_TYPES):\n        raise errors.PathSpecError(\n            'Unsupported row_index not of integer type.')\n\n    if not row_condition and row_index is None:\n      raise errors.PathSpecError(\n          'Path specification requires either a row_condition or row_index.')\n\n    if self._database_object:\n      raise IOError('Database file already set.')\n\n    file_object = resolver.Resolver.OpenFileObject(\n        path_spec.parent, resolver_context=self._resolver_context)\n\n    try:\n      database_object = sqlite_database.SQLiteDatabaseFile()\n      database_object.Open(file_object)\n    finally:\n      file_object.close()\n\n    \n    error_string = ''\n    if not database_object.HasTable(table_name):\n      error_string = 'Missing table: {0:s}'.format(table_name)\n\n    elif not database_object.HasColumn(table_name, column_name):\n      error_string = 'Missing column: {0:s} in table: {1:s}'.format(\n          column_name, table_name)\n\n    elif not row_condition:\n      query = 'SELECT {0:s} FROM {1:s} LIMIT 1 OFFSET {2:d}'.format(\n          column_name, table_name, row_index)\n      rows = database_object.Query(query)\n\n    elif not database_object.HasColumn(table_name, row_condition[0]):\n      error_string = (\n          'Missing row condition column: {0:s} in table: {1:s}'.format(\n              row_condition[0], table_name))\n\n    elif row_condition[1] not in self._OPERATORS:\n      error_string = (\n          'Unsupported row condition operator: {0:s}.'.format(\n              row_condition[1]))\n\n    else:\n      query = 'SELECT {0:s} FROM {1:s} WHERE {2:s} {3:s} ?'.format(\n          column_name, table_name, row_condition[0], row_condition[1])\n      rows = database_object.Query(query, parameters=(row_condition[2], ))\n\n    \n    \n    if not error_string and (len(rows) != 1 or len(rows[0]) != 1):\n      if not row_condition:\n        error_string = (\n            'Unable to open blob in table: {0:s} and column: {1:s} '\n            'for row: {2:d}.').format(table_name, column_name, row_index)\n\n      else:\n        row_condition_string = ' '.join([\n            '{0!s}'.format(value) for value in iter(row_condition)])\n        error_string = (\n            'Unable to open blob in table: {0:s} and column: {1:s} '\n            'where: {2:s}.').format(\n                table_name, column_name, row_condition_string)\n\n    if error_string:\n      database_object.Close()\n      raise IOError(error_string)\n\n    self._blob = rows[0][0]\n    self._current_offset = 0\n    self._database_object = database_object\n    self._size = len(self._blob)\n    self._table_name = table_name", "docstring": "Opens the file-like object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nmode (Optional[str]): file access mode.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file-like object could not be opened.\nOSError: if the file-like object could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def _get_typed_value(self, key, target_type, type_convert, is_optional=False, is_secret=False, is_local=False, default=None, options=None):\n    try:\n        value = self._get(key)\n    except KeyError:\n        if (not is_optional):\n            raise RheaError('No value was provided for the non optional key `{}`.'.format(key))\n        return default\n    if isinstance(value, six.string_types):\n        try:\n            self._add_key(key, is_secret=is_secret, is_local=is_local)\n            self._check_options(key=key, value=value, options=options)\n            return type_convert(value)\n        except ValueError:\n            raise RheaError('Cannot convert value `{}` (key: `{}`) to `{}`'.format(value, key, target_type))\n    if isinstance(value, target_type):\n        self._add_key(key, is_secret=is_secret, is_local=is_local)\n        self._check_options(key=key, value=value, options=options)\n        return value\n    raise RheaError('Cannot convert value `{}` (key: `{}`) to `{}`'.format(value, key, target_type))", "docstring": "Return the value corresponding to the key converted to the given type.\n\nArgs:\nkey: the dict key.\ntarget_type: The type we expect the variable or key to be in.\ntype_convert: A lambda expression that converts the key to the desired type.\nis_optional: To raise an error if key was not found.\nis_secret: If the key is a secret.\nis_local: If the key is a local to this service.\ndefault: default value if is_optional is True.\noptions: list/tuple if provided, the value must be one of these values.\n\nReturns:\nThe corresponding value of the key converted.", "source": "codesearchnet"}
{"code": "def _AddSaveOps(self, filename_tensor, saveables):\n    save = self.save_op(filename_tensor, saveables)\n    return control_flow_ops.with_dependencies([save], filename_tensor)", "docstring": "Add ops to save variables that are on the same shard.\n\nArgs:\nfilename_tensor: String Tensor.\nsaveables: A list of SaveableObject objects.\n\nReturns:\nA tensor with the filename used to save.", "source": "github-repos"}
{"code": "def inputs(self, name):\n    self._closed()\n    step = self._get_step(name, make_copy=False)\n    return step.list_inputs()", "docstring": "List input names and types of a step in the steps library.\n\nArgs:\nname (str): name of a step in the steps library.", "source": "codesearchnet"}
{"code": "def dynamic_rope_update(rope_forward):\n\n    def longrope_frequency_update(self, position_ids, device):\n        \n        seq_len = torch.max(position_ids) + 1\n        if hasattr(self.config, 'original_max_position_embeddings'):\n            original_max_position_embeddings = self.config.original_max_position_embeddings\n        else:\n            original_max_position_embeddings = self.config.max_position_embeddings\n        if seq_len > original_max_position_embeddings:\n            if not hasattr(self, 'long_inv_freq'):\n                self.long_inv_freq, _ = self.rope_init_fn(self.config, device, seq_len=original_max_position_embeddings + 1)\n            self.register_buffer('inv_freq', self.long_inv_freq, persistent=False)\n        else:\n            self.original_inv_freq = self.original_inv_freq.to(device)\n            self.register_buffer('inv_freq', self.original_inv_freq, persistent=False)\n\n    def dynamic_frequency_update(self, position_ids, device):\n        \n        seq_len = torch.max(position_ids) + 1\n        if seq_len > self.max_seq_len_cached:\n            inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)\n            self.register_buffer('inv_freq', inv_freq, persistent=False)\n            self.max_seq_len_cached = seq_len\n        if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len:\n            self.original_inv_freq = self.original_inv_freq.to(device)\n            self.register_buffer('inv_freq', self.original_inv_freq, persistent=False)\n            self.max_seq_len_cached = self.original_max_seq_len\n\n    @wraps(rope_forward)\n    def wrapper(self, x, position_ids):\n        if 'dynamic' in self.rope_type:\n            dynamic_frequency_update(self, position_ids, device=x.device)\n        elif self.rope_type == 'longrope':\n            longrope_frequency_update(self, position_ids, device=x.device)\n        return rope_forward(self, x, position_ids)\n    return wrapper", "docstring": "Decorator function to update the RoPE parameters in the forward pass, if the model is using a dynamic RoPE\n(i.e. a RoPE implementation that may recompute its frequencies in the forward pass).\n\nArgs:\nrope_forward (Callable):\nThe forward pass of the RoPE implementation.\n\nReturns:\nThe decorated forward pass.", "source": "github-repos"}
{"code": "def _create_vocab_table_lookup_model_tf1(self, sess: session.Session) -> Tuple[core.Tensor, core.Tensor, core.Tensor]:\n    asset_dir = self.create_tempdir('assets').full_path\n    asset_file = os.path.join(asset_dir, 'vocab_file.txt')\n    file_io.write_string_to_file(filename=asset_file, file_content='hello,model,quantization\\n')\n    vocab_file = asset.Asset(asset_file)\n    raw_vocab = io_ops.read_file(vocab_file)\n    vocabs = ragged_string_ops.string_split_v2(string_ops.string_strip(raw_vocab), sep=',')\n    kv_init = lookup_ops.KeyValueTensorInitializer(keys=vocabs, values=np.array([0, 1, 2]), value_dtype=dtypes.int64)\n    table = lookup_ops.StaticVocabularyTable(kv_init, num_oov_buckets=5)\n    input_vocabs_placeholder = array_ops.placeholder(dtypes.string, shape=(None,), name='input_vocabs')\n    lookup_vals = math_ops.cast(table.lookup(input_vocabs_placeholder), dtypes.float32)\n    matmul_input = array_ops_stack.stack([lookup_vals, lookup_vals])\n    weight_row = array_ops.ones(shape=array_ops.shape(input_vocabs_placeholder), dtype=dtypes.float32)\n    weight = array_ops.transpose_v2(array_ops_stack.stack([weight_row, weight_row]))\n    output_tensor = math_ops.matmul(matmul_input, weight)\n    return (input_vocabs_placeholder, lookup_vals, output_tensor)", "docstring": "Creates a simple model that initializes and lookups a vocab table.\n\nThis model creates an asset file at \"vocab_file.txt\" containing\ncomma-separated vocabularies.  It also initializes a `StaticVocabularyTable`\nand performs a lookup with the input vocabs, which is a 1D tensor of\nstrings.\n\nArgs:\nsess: Tensorflow Session to create the model in.\n\nReturns:\n(input_vocabs_placeholder, lookup_vals, output_tensor), where\n* input_vocabs_placeholder is a placeholder tensor of 1D strings\n* lookup_vals is an output tensor that is a direct result of table lookup\n* output_tensor is a float 2x2 matrix", "source": "github-repos"}
{"code": "def _StartWorkerProcess(self, process_name, storage_writer):\n    \n    analysis_plugin = self._analysis_plugins.get(process_name, None)\n    if not analysis_plugin:\n      logger.error('Missing analysis plugin: {0:s}'.format(process_name))\n      return None\n\n    if self._use_zeromq:\n      queue_name = '{0:s} output event queue'.format(process_name)\n      output_event_queue = zeromq_queue.ZeroMQPushBindQueue(\n          name=queue_name, timeout_seconds=self._QUEUE_TIMEOUT)\n      \n      \n      output_event_queue.Open()\n\n    else:\n      output_event_queue = multi_process_queue.MultiProcessingQueue(\n          timeout=self._QUEUE_TIMEOUT)\n\n    self._event_queues[process_name] = output_event_queue\n\n    if self._use_zeromq:\n      queue_name = '{0:s} input event queue'.format(process_name)\n      input_event_queue = zeromq_queue.ZeroMQPullConnectQueue(\n          name=queue_name, delay_open=True, port=output_event_queue.port,\n          timeout_seconds=self._QUEUE_TIMEOUT)\n\n    else:\n      input_event_queue = output_event_queue\n\n    process = analysis_process.AnalysisProcess(\n        input_event_queue, storage_writer, self._knowledge_base,\n        analysis_plugin, self._processing_configuration,\n        data_location=self._data_location,\n        event_filter_expression=self._event_filter_expression,\n        name=process_name)\n\n    process.start()\n\n    logger.info('Started analysis plugin: {0:s} (PID: {1:d}).'.format(\n        process_name, process.pid))\n\n    try:\n      self._StartMonitoringProcess(process)\n    except (IOError, KeyError) as exception:\n      logger.error((\n          'Unable to monitor analysis plugin: {0:s} (PID: {1:d}) '\n          'with error: {2!s}').format(process_name, process.pid, exception))\n\n      process.terminate()\n      return None\n\n    self._RegisterProcess(process)\n    return process", "docstring": "Creates, starts, monitors and registers a worker process.\n\nArgs:\nprocess_name (str): process name.\nstorage_writer (StorageWriter): storage writer for a session storage used\nto create task storage.\n\nReturns:\nMultiProcessWorkerProcess: extraction worker process or None on error.", "source": "juraj-google-style"}
{"code": "def versions_from_trove(trove):\n    \n    versions = set()\n    for classifier in trove:\n        if 'Programming Language :: Python ::' in classifier:\n            ver = classifier.split('::')[-1]\n            major = ver.split('.')[0].strip()\n            if major:\n                versions.add(major)\n    return sorted(\n        set([v for v in versions if v.replace('.', '', 1).isdigit()]))", "docstring": "Finds out python version from list of trove classifiers.\nArgs:\ntrove: list of trove classifiers\nReturns:\npython version string", "source": "juraj-google-style"}
{"code": "def add_record_references(self, app_id, record_id, field_id, target_record_ids):\n        \n\n        self._swimlane.request(\n            'post',\n            'app/{0}/record/{1}/add-references'.format(app_id, record_id),\n            json={\n                'fieldId': field_id,\n                'targetRecordIds': target_record_ids\n            }\n        )", "docstring": "Bulk operation to directly add record references without making any additional requests\n\nWarnings:\nDoes not perform any app, record, or target app/record validation\n\nArgs:\napp_id (str): Full App ID string\nrecord_id (str): Full parent Record ID string\nfield_id (str): Full field ID to target reference field on parent Record string\ntarget_record_ids (List(str)): List of full target reference Record ID strings", "source": "juraj-google-style"}
{"code": "def select_rows(self, rows):\n    self.values = self.values.iloc[rows]\n    self.index = self.index.iloc[(rows, :)]\n    for prop in self._property_columns:\n        vals = getattr(self, prop)[rows]\n        setattr(self, prop, vals)", "docstring": "Truncate internal arrays to keep only the specified rows.\n\nArgs:\nrows (array): An integer or boolean array identifying the indices\nof rows to keep.", "source": "codesearchnet"}
{"code": "def all_folders(path_name, keyword='', has_date=False, date_fmt=DATE_FMT) -> list:\n    if (not os.path.exists(path=path_name)):\n        return []\n    path_name = path_name.replace('\\\\', '/')\n    if keyword:\n        folders = sort_by_modified([f.replace('\\\\', '/') for f in glob.iglob(f'{path_name}/*{keyword}*') if (os.path.isdir(f) and (f.replace('\\\\', '/').split('/')[(- 1)][0] != '~'))])\n    else:\n        folders = sort_by_modified([f'{path_name}/{f}' for f in os.listdir(path=path_name) if (os.path.isdir(f'{path_name}/{f}') and (f[0] != '~'))])\n    if has_date:\n        folders = filter_by_dates(folders, date_fmt=date_fmt)\n    return folders", "docstring": "Search all folders with criteria\nReturned list will be sorted by last modified\n\nArgs:\npath_name: full path name\nkeyword: keyword to search\nhas_date: whether has date in file name (default False)\ndate_fmt: date format to check for has_date parameter\n\nReturns:\nlist: all folder names fulfilled criteria", "source": "codesearchnet"}
{"code": "def get_node_angle(self, node):\n        \n        return atan2(self.pos[0]-node.pos[0], self.pos[1]-node.pos[1]) - pi / 2", "docstring": "Get the angle beetween 2 nodes relative to the horizont.\n\nArgs:\nnode (object): The other node.\n\nReturns:\nrad: The angle", "source": "juraj-google-style"}
{"code": "def exec_resize(self, exec_id, height=None, width=None):\n    if isinstance(exec_id, dict):\n        exec_id = exec_id.get('Id')\n    params = {'h': height, 'w': width}\n    url = self._url('/exec/{0}/resize', exec_id)\n    res = self._post(url, params=params)\n    self._raise_for_status(res)", "docstring": "Resize the tty session used by the specified exec command.\n\nArgs:\nexec_id (str): ID of the exec instance\nheight (int): Height of tty session\nwidth (int): Width of tty session", "source": "codesearchnet"}
{"code": "def query(self, batch=False, query_functions=None, credential=None):\n    batch_item = self._build_query_batch_item(query_functions)\n    if batch:\n        self.batch_items.append(batch_item)\n    else:\n        request = self._build_request_message(credential, [batch_item])\n        response = self._send_and_receive_message(request)\n        results = self._process_batch_items(response)\n        return results[0]", "docstring": "Send a Query request to the server.\n\nArgs:\nbatch (boolean): A flag indicating if the operation should be sent\nwith a batch of additional operations. Defaults to False.\nquery_functions (list): A list of QueryFunction enumerations\nindicating what information the client wants from the server.\nOptional, defaults to None.\ncredential (Credential): A Credential object containing\nauthentication information for the server. Optional, defaults\nto None.", "source": "codesearchnet"}
{"code": "def render(self, trajectories: Tuple[(NonFluents, Fluents, Fluents, Fluents, np.array)], batch: Optional[int]=None) -> None:\n    (non_fluents, initial_state, states, actions, interms, rewards) = trajectories\n    non_fluents = dict(non_fluents)\n    states = dict(((name, fluent[0]) for (name, fluent) in states))\n    actions = dict(((name, fluent[0]) for (name, fluent) in actions))\n    rewards = rewards[0]\n    idx = self._compiler.rddl.domain.state_fluent_ordering.index('location/1')\n    start = initial_state[idx][0]\n    g = non_fluents['GOAL/1']\n    path = states['location/1']\n    deltas = actions['move/1']\n    centers = non_fluents['DECELERATION_ZONE_CENTER/2']\n    decays = non_fluents['DECELERATION_ZONE_DECAY/1']\n    zones = [(x, y, d) for ((x, y), d) in zip(centers, decays)]\n    self._ax1 = plt.gca()\n    self._render_state_space()\n    self._render_start_and_goal_positions(start, g)\n    self._render_deceleration_zones(zones)\n    self._render_state_action_trajectory(start, path, deltas)\n    plt.title('Navigation', fontweight='bold')\n    plt.legend(loc='lower right')\n    plt.show()", "docstring": "Render the simulated state-action `trajectories` for Navigation domain.\n\nArgs:\nstats: Performance statistics.\ntrajectories: NonFluents, states, actions, interms and rewards.\nbatch: Number of batches to render.", "source": "codesearchnet"}
{"code": "def raster_erosion(rasterfile):\n        \n        if is_string(rasterfile):\n            origin_raster = RasterUtilClass.read_raster(str(rasterfile))\n        elif isinstance(rasterfile, Raster):\n            origin_raster = rasterfile.data\n        elif isinstance(rasterfile, numpy.ndarray):\n            origin_raster = rasterfile\n        else:\n            return \"Your rasterfile has a wrong type. Type must be string or \" \\\n                   \"numpy.array or class Raster in pygeoc.\"\n        max_value_raster = origin_raster.max()\n        erosion_raster = numpy.zeros((origin_raster.shape[0], origin_raster.shape[1]))\n        \n        \n        \n        add_row = numpy.full((1, origin_raster.shape[1]), max_value_raster)\n        temp_origin_raster = numpy.vstack((numpy.vstack((add_row, origin_raster)), add_row))\n        add_col = numpy.full((origin_raster.shape[0] + 2, 1), max_value_raster)\n        expand_origin_raster = numpy.hstack((numpy.hstack((add_col, temp_origin_raster)), add_col))\n        \n        for i in range(origin_raster.shape[0]):\n            for j in range(origin_raster.shape[1]):\n                min_pixel_value = max_value_raster\n                \n                for k in range(3):\n                    for l in range(3):\n                        if expand_origin_raster[i + k, j + l] <= min_pixel_value:\n                            min_pixel_value = expand_origin_raster[i + k, j + l]\n                            \n                            \n                            \n                    erosion_raster[i, j] = min_pixel_value\n        \n        return erosion_raster", "docstring": "Erode the raster image.\n\nFind the min pixel's value in 8-neighborhood. Then change the compute\npixel's value into the min pixel's value.\n\nArgs:\nrasterfile: input original raster image, type can be filename(string,\nlike \"test1.tif\"), rasterfile(class Raster) or numpy.ndarray.\n\nReturns:\nerosion_raster: raster image after erosion, type is numpy.ndarray.", "source": "juraj-google-style"}
{"code": "def _create_tensor_watch_maps(self, device_name):\n    self._watch_key_to_datum[device_name] = {}\n    self._watch_key_to_rel_time[device_name] = {}\n    self._watch_key_to_dump_size_bytes[device_name] = {}\n    for datum in self._dump_tensor_data[device_name]:\n        if datum.watch_key not in self._watch_key_to_devices:\n            self._watch_key_to_devices[datum.watch_key] = {device_name}\n        else:\n            self._watch_key_to_devices[datum.watch_key].add(device_name)\n        if datum.watch_key not in self._watch_key_to_datum[device_name]:\n            self._watch_key_to_datum[device_name][datum.watch_key] = [datum]\n            self._watch_key_to_rel_time[device_name][datum.watch_key] = [datum.timestamp - self._t0]\n            self._watch_key_to_dump_size_bytes[device_name][datum.watch_key] = [datum.dump_size_bytes]\n        else:\n            self._watch_key_to_datum[device_name][datum.watch_key].append(datum)\n            self._watch_key_to_rel_time[device_name][datum.watch_key].append(datum.timestamp - self._t0)\n            self._watch_key_to_dump_size_bytes[device_name][datum.watch_key].append(datum.dump_size_bytes)", "docstring": "Create maps from tensor watch keys to datum and to timestamps.\n\nCreate a map from watch key (tensor name + debug op) to `DebugTensorDatum`\nitem. Also make a map from watch key to relative timestamp.\n\"relative\" means (absolute timestamp - t0).\n\nArgs:\ndevice_name: (str) name of the device.", "source": "github-repos"}
{"code": "def delete_s3_bucket(client, resource):\n    \n\n    if dbconfig.get('enable_delete_s3_buckets', NS_AUDITOR_REQUIRED_TAGS, False):\n        client.delete_bucket(Bucket=resource.id)\n    return ActionStatus.SUCCEED, resource.metrics()", "docstring": "Delete an S3 bucket\n\nThis function will try to delete an S3 bucket\n\nArgs:\nclient (:obj:`boto3.session.Session.client`): A boto3 client object\nresource (:obj:`Resource`): The resource object to terminate\n\nReturns:\n`ActionStatus`", "source": "juraj-google-style"}
{"code": "def deserialize(doc_xml, pyxb_binding=None):\n    \n    pyxb_binding = pyxb_binding or d1_common.types.dataoneTypes\n    try:\n        return pyxb_binding.CreateFromDocument(doc_xml)\n    except pyxb.ValidationError as e:\n        raise ValueError(\n            'Unable to deserialize XML to PyXB. error=\"{}\" xml=\"{}\"'.format(\n                e.details(), doc_xml\n            )\n        )\n    except (pyxb.PyXBException, xml.sax.SAXParseException, Exception) as e:\n        raise ValueError(\n            'Unable to deserialize XML to PyXB. error=\"{}\" xml=\"{}\"'.format(\n                str(e), doc_xml\n            )\n        )", "docstring": "Deserialize DataONE XML types to PyXB.\n\nArgs:\ndoc_xml: UTF-8 encoded ``bytes``\n\npyxb_binding: PyXB binding object. If not specified, the correct one should be\nselected automatically.\n\nReturns:\nPyXB object\n\nSee Also:\n``deserialize_d1_exception()`` for deserializing DataONE Exception types.", "source": "juraj-google-style"}
{"code": "def _show_status_for_work(self, work):\n    work_count = len(work.work)\n    work_completed = {}\n    work_completed_count = 0\n    for v in itervalues(work.work):\n        if v['is_completed']:\n            work_completed_count += 1\n            worker_id = v['claimed_worker_id']\n            if (worker_id not in work_completed):\n                work_completed[worker_id] = {'completed_count': 0, 'last_update': 0.0}\n            work_completed[worker_id]['completed_count'] += 1\n            work_completed[worker_id]['last_update'] = max(work_completed[worker_id]['last_update'], v['claimed_worker_start_time'])\n    print('Completed {0}/{1} work'.format(work_completed_count, work_count))\n    for k in sorted(iterkeys(work_completed)):\n        last_update_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(work_completed[k]['last_update']))\n        print('Worker {0}: completed {1}   last claimed work at {2}'.format(k, work_completed[k]['completed_count'], last_update_time))", "docstring": "Shows status for given work pieces.\n\nArgs:\nwork: instance of either AttackWorkPieces or DefenseWorkPieces", "source": "codesearchnet"}
{"code": "def run_compiler(self, compiler=GCC, inputs=None, output=None):\n        \n        \n        prog = RunningProgram(self, *compiler_cmdline(compiler=compiler,\n                                                      inputs=inputs,\n                                                      output=output))\n        prog.expect_exit_status(0)", "docstring": "Runs a compiler in the working directory.\n\nArgs:\ncompiler (tuple): The compiler program and its command-line arguments,\nincluding placeholders for output and input files.\ninputs (tuple):   The list of input files for the compiler.\noutput (str):     The name of the output file.", "source": "juraj-google-style"}
{"code": "def load_scatter_table(self, fn):\n    data = pickle.load(file(fn))\n    if (('version' not in data) or (data['version'] != tmatrix_aux.VERSION)):\n        warnings.warn('Loading data saved with another version.', Warning)\n    (self.num_points, self.D_max, self._psd_D, self._S_table, self._Z_table, self._angular_table, self._m_table, self.geometries) = data['psd_scatter']\n    return (data['time'], data['description'])", "docstring": "Load the scattering lookup tables.\n\nLoad the scattering lookup tables saved with save_scatter_table.\n\nArgs:\nfn: The name of the scattering table file.", "source": "codesearchnet"}
{"code": "def _MakeServiceDescriptor(self, service_proto, service_index, scope,\n                             package, file_desc):\n    \n\n    if package:\n      service_name = '.'.join((package, service_proto.name))\n    else:\n      service_name = service_proto.name\n\n    methods = [self._MakeMethodDescriptor(method_proto, service_name, package,\n                                          scope, index)\n               for index, method_proto in enumerate(service_proto.method)]\n    desc = descriptor.ServiceDescriptor(name=service_proto.name,\n                                        full_name=service_name,\n                                        index=service_index,\n                                        methods=methods,\n                                        options=_OptionsOrNone(service_proto),\n                                        file=file_desc)\n    self._service_descriptors[service_name] = desc\n    return desc", "docstring": "Make a protobuf ServiceDescriptor given a ServiceDescriptorProto.\n\nArgs:\nservice_proto: The descriptor_pb2.ServiceDescriptorProto protobuf message.\nservice_index: The index of the service in the File.\nscope: Dict mapping short and full symbols to message and enum types.\npackage: Optional package name for the new message EnumDescriptor.\nfile_desc: The file containing the service descriptor.\n\nReturns:\nThe added descriptor.", "source": "juraj-google-style"}
{"code": "def config(self, name='skype'):\n    self.conn('PUT', '{0}/users/ME/endpoints/{1}/presenceDocs/messagingService'.format(self.conn.msgsHost, self.id), auth=SkypeConnection.Auth.RegToken, json={'id': 'messagingService', 'type': 'EndpointPresenceDoc', 'selfLink': 'uri', 'privateInfo': {'epname': name}, 'publicInfo': {'capabilities': '', 'type': 1, 'skypeNameVersion': 'skype.com', 'nodeInfo': 'xx', 'version': '908/1.30.0.128'}})", "docstring": "Configure this endpoint to allow setting presence.\n\nArgs:\nname (str): display name for this endpoint", "source": "codesearchnet"}
{"code": "def _sign_threshold_signature_fulfillment(cls, input_, message, key_pairs):\n    input_ = deepcopy(input_)\n    message = sha3_256(message.encode())\n    if input_.fulfills:\n        message.update('{}{}'.format(input_.fulfills.txid, input_.fulfills.output).encode())\n    for owner_before in set(input_.owners_before):\n        ccffill = input_.fulfillment\n        subffills = ccffill.get_subcondition_from_vk(base58.b58decode(owner_before))\n        if (not subffills):\n            raise KeypairMismatchException('Public key {} cannot be found in the fulfillment'.format(owner_before))\n        try:\n            private_key = key_pairs[owner_before]\n        except KeyError:\n            raise KeypairMismatchException('Public key {} is not a pair to any of the private keys'.format(owner_before))\n        for subffill in subffills:\n            subffill.sign(message.digest(), base58.b58decode(private_key.encode()))\n    return input_", "docstring": "Signs a ThresholdSha256.\n\nArgs:\ninput_ (:class:`~bigchaindb.common.transaction.\nInput`) The Input to be signed.\nmessage (str): The message to be signed\nkey_pairs (dict): The keys to sign the Transaction with.", "source": "codesearchnet"}
{"code": "def files_sharedPublicURL(self, *, id: str, **kwargs) -> SlackResponse:\n        \n        self._validate_xoxp_token()\n        kwargs.update({\"id\": id})\n        return self.api_call(\"files.sharedPublicURL\", json=kwargs)", "docstring": "Enables a file for public/external sharing.\n\nArgs:\nid (str): The file id. e.g. 'F1234467890'", "source": "juraj-google-style"}
{"code": "def _render_our_module_key_flags(self, module, output_lines, prefix=''):\n    key_flags = self.get_key_flags_for_module(module)\n    if key_flags:\n        self._render_module_flags(module, key_flags, output_lines, prefix)", "docstring": "Returns a help string for the key flags of a given module.\n\nArgs:\nmodule: module|str, the module to render key flags for.\noutput_lines: [str], a list of strings.  The generated help message\nlines will be appended to this list.\nprefix: str, a string that is prepended to each generated help line.", "source": "codesearchnet"}
{"code": "def check_model_doc(overwrite: bool=False):\n    with open(PATH_TO_TOC, encoding='utf-8') as f:\n        content = yaml.safe_load(f.read())\n    api_idx = 0\n    while content[api_idx]['title'] != 'API':\n        api_idx += 1\n    api_doc = content[api_idx]['sections']\n    model_idx = 0\n    while api_doc[model_idx]['title'] != 'Models':\n        model_idx += 1\n    model_doc = api_doc[model_idx]['sections']\n    modalities_docs = [(idx, section) for idx, section in enumerate(model_doc) if 'sections' in section]\n    diff = False\n    for idx, modality_doc in modalities_docs:\n        old_modality_doc = modality_doc['sections']\n        new_modality_doc = clean_model_doc_toc(old_modality_doc)\n        if old_modality_doc != new_modality_doc:\n            diff = True\n            if overwrite:\n                model_doc[idx]['sections'] = new_modality_doc\n    if diff:\n        if overwrite:\n            api_doc[model_idx]['sections'] = model_doc\n            content[api_idx]['sections'] = api_doc\n            with open(PATH_TO_TOC, 'w', encoding='utf-8') as f:\n                f.write(yaml.dump(content, allow_unicode=True))\n        else:\n            raise ValueError('The model doc part of the table of content is not properly sorted, run `make style` to fix this.')", "docstring": "Check that the content of the table of content in `_toctree.yml` is clean (no duplicates and sorted for the model\nAPI doc) and potentially auto-cleans it.\n\nArgs:\noverwrite (`bool`, *optional*, defaults to `False`):\nWhether to just check if the TOC is clean or to auto-clean it (when `overwrite=True`).", "source": "github-repos"}
{"code": "def saturate_kwargs(keys, **kwargs):\n    if isinstance(keys, str):\n        keys = [keys]\n    keys = [k for k in keys if ((k in kwargs) and hasattr(kwargs.get(k, None), '__iter__'))]\n    if (len(keys) == 0):\n        return []\n    kw_corr = list(product(*(range(len(kwargs[k])) for k in keys)))\n    kw_arr = []\n    for corr in kw_corr:\n        kw_arr.append(dict(zip(keys, [kwargs[keys[i]][corr[i]] for i in range(len(keys))])))\n    for k in keys:\n        kwargs.pop(k, None)\n    kw_arr = [{**k, **kwargs} for k in kw_arr]\n    return kw_arr", "docstring": "Saturate all combinations of kwargs\n\nArgs:\nkeys: keys in kwargs that want to use process\n**kwargs: kwargs for func", "source": "codesearchnet"}
{"code": "def evaluate(dataset, predictions, output_folder, **kwargs):\n    \n    args = dict(\n        dataset=dataset, predictions=predictions, output_folder=output_folder, **kwargs\n    )\n    if isinstance(dataset, datasets.COCODataset):\n        return coco_evaluation(**args)\n    elif isinstance(dataset, datasets.PascalVOCDataset):\n        return voc_evaluation(**args)\n    else:\n        dataset_name = dataset.__class__.__name__\n        raise NotImplementedError(\"Unsupported dataset type {}.\".format(dataset_name))", "docstring": "evaluate dataset using different methods based on dataset type.\nArgs:\ndataset: Dataset object\npredictions(list[BoxList]): each item in the list represents the\nprediction results for one image.\noutput_folder: output folder, to save evaluation files or results.\n**kwargs: other args.\nReturns:\nevaluation result", "source": "juraj-google-style"}
{"code": "def walk(self, walk_func):\n        \n        nodes = self.topological_sort()\n        \n        nodes.reverse()\n\n        for n in nodes:\n            walk_func(n)", "docstring": "Walks each node of the graph in reverse topological order.\nThis can be used to perform a set of operations, where the next\noperation depends on the previous operation. It's important to note\nthat walking happens serially, and is not paralellized.\n\nArgs:\nwalk_func (:class:`types.FunctionType`): The function to be called\non each node of the graph.", "source": "juraj-google-style"}
{"code": "def fulfill_order(self, order_number, site_code=None, email_opt_in=False):\n    max_fulfillment_retries = get_configuration('MAX_FULFILLMENT_RETRIES', site_code=site_code)\n    api = get_ecommerce_client(site_code=site_code)\n    try:\n        logger.info('Requesting fulfillment of order [%s].', order_number)\n        api.orders(order_number).fulfill.put(email_opt_in=email_opt_in)\n    except exceptions.HttpClientError as exc:\n        status_code = exc.response.status_code\n        if (status_code == 406):\n            logger.info('Order [%s] has already been fulfilled. Ignoring.', order_number)\n            raise Ignore()\n        else:\n            logger.warning('Fulfillment of order [%s] failed because of HttpClientError. Retrying', order_number, exc_info=True)\n            _retry_order(self, exc, max_fulfillment_retries, order_number)\n    except (exceptions.HttpServerError, exceptions.Timeout, SSLError) as exc:\n        _retry_order(self, exc, max_fulfillment_retries, order_number)", "docstring": "Fulfills an order.\n\nArguments:\norder_number (str): Order number indicating which order to fulfill.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def launch(self, task, **kwargs):\n        \n        if task.status == task.S_LOCKED:\n            raise ValueError(\"You shall not submit a locked task!\")\n\n        \n        task.build()\n\n        \n        if isinstance(task, AbinitTask):\n            args = kwargs.get(\"exec_args\", [])\n            if args is None: args = []\n            args = args[:]\n            args.append(\"--timelimit %s\" % qu.time2slurm(self.qadapter.timelimit))\n            kwargs[\"exec_args\"] = args\n\n        \n        script_file = self.write_jobfile(task, **kwargs)\n\n        \n        try:\n            qjob, process = self.qadapter.submit_to_queue(script_file)\n            task.set_status(task.S_SUB, msg='Submitted to queue')\n            task.set_qjob(qjob)\n            return process\n\n        except self.qadapter.MaxNumLaunchesError as exc:\n            \n            \n            \n            \n            \n            task.set_status(task.S_ERROR, msg=\"max_num_launches reached: %s\" % str(exc))\n            raise", "docstring": "Build the input files and submit the task via the :class:`Qadapter`\n\nArgs:\ntask: :class:`TaskObject`\n\nReturns:\nProcess object.", "source": "juraj-google-style"}
{"code": "def SerializeExclusiveData(self, writer):\n        \n        writer.WriteVarBytes(self.Script)\n        if self.Version >= 1:\n            writer.WriteFixed8(self.Gas)", "docstring": "Serialize object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def handle_metrics(split, metrics, output_dir):\n    logger.info(f'***** {split} metrics *****')\n    for key in sorted(metrics.keys()):\n        logger.info(f'  {key} = {metrics[key]}')\n    save_json(metrics, os.path.join(output_dir, f'{split}_results.json'))", "docstring": "Log and save metrics\n\nArgs:\n- split: one of train, val, test\n- metrics: metrics dict\n- output_dir: where to save the metrics", "source": "github-repos"}
{"code": "def print_middleware_tree(self, *, EOL=os.linesep, **kwargs):  \n        \n\n        def mask_to_method_name(mask):\n            if mask == HTTPMethod.ALL:\n                return 'ALL'\n            methods = set(HTTPMethod) - {HTTPMethod.ALL}\n            names = (method.name for method in methods if method.value & mask)\n            return '+'.join(names)\n\n        def path_to_str(path):\n            if isinstance(path, str):\n                return path\n            return path.pattern.replace('\\\\', '')\n\n        def decend_into_tree(chain, level):\n            lines_ = []\n            for mw in chain:\n                info = (mask_to_method_name(mw.mask),\n                        path_to_str(mw.path),\n                        mw.func)\n                prefix = \"│   \" * level\n                lines_ += [prefix + \"├── %s %s %s\" % info]\n                if mw.is_subchain:\n                    lines_ += decend_into_tree(mw.func, level + 1)\n            if level:\n                lines_[-1] = lines_[-1].replace('├', '└')\n            return lines_\n\n        lines = [self.name]\n        lines += decend_into_tree(self.middleware, 0)\n        lines.append('┴')\n        print(EOL.join(lines), **kwargs)", "docstring": "Prints a unix-tree-like output of the structure of the web\napplication to the file specified (stdout by default).\n\nArgs:\nEOL (str): The character or string that ends the line.\n**kwargs: Arguments pass to the standard print function.\nThis allows specifying the file to write to and the\nability to flush output upon creation.", "source": "juraj-google-style"}
{"code": "def errors(self):\n    ret_errs = list()\n    errors = (self.get('error').get('errors', None) or list())\n    assert isinstance(errors, list)\n    for err in errors:\n        when = parse_datetime(err.get('when', None))\n        msg = err.get('message', '')\n        e = ErrorEvent(when, msg)\n        ret_errs.append(e)\n    return ret_errs", "docstring": "Returns the list of recent errors.\n\nReturns:\nlist: of :obj:`.ErrorEvent` tuples.", "source": "codesearchnet"}
{"code": "def remove(self, workflow_id):\n    try:\n        db = self._client[self.database]\n        fs = GridFSProxy(GridFS(db.unproxied_object))\n        for grid_doc in fs.find({'workflow_id': workflow_id}, no_cursor_timeout=True):\n            fs.delete(grid_doc._id)\n        col = db[WORKFLOW_DATA_COLLECTION_NAME]\n        return col.delete_one({'_id': ObjectId(workflow_id)})\n    except ConnectionFailure:\n        raise DataStoreNotConnected()", "docstring": "Removes a document specified by its id from the data store.\n\nAll associated GridFs documents are deleted as well.\n\nArgs:\nworkflow_id (str): The id of the document that represents a workflow run.\n\nRaises:\nDataStoreNotConnected: If the data store is not connected to the server.", "source": "codesearchnet"}
{"code": "def stop(self, wait=True):\n        \n\n        \n        for context in self._applications.values():\n            context.run_unload_hook()\n\n        self._stats_job.stop()\n        if self._mem_job is not None:\n            self._mem_job.stop()\n        self._cleanup_job.stop()\n        if self._ping_job is not None:\n            self._ping_job.stop()\n\n        self._clients.clear()", "docstring": "Stop the Bokeh Server application.\n\nArgs:\nwait (bool): whether to wait for orderly cleanup (default: True)\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def response_data_to_model_instance(self, response_data):\n    response_data['datetime_created'] = dateutil.parser.parse(response_data['datetime_created'])\n    return super(BaseTaskTypeManager, self).response_data_to_model_instance(response_data)", "docstring": "Convert response data to a task type model.\n\nArgs:\nresponse_data (dict): The data from the request's response.\n\nReturns:\n:class:`saltant.models.base_task_type.BaseTaskType`:\nA model instance representing the task type from the\nreponse data.", "source": "codesearchnet"}
{"code": "def char_matches(s1, s2, n=3):\n    \n    return __matches(s1, s2, char_ngrams, n=n)", "docstring": "Character-level n-grams that match between two strings\n\nArgs:\ns1: a string\ns2: another string\nn: an int for the n in n-gram\n\nReturns:\nset: the n-grams found in both strings", "source": "juraj-google-style"}
{"code": "def add_to_collection(self, name, value) -> None:\n    self._check_not_finalized()\n    with self._lock:\n        if name not in self._collections:\n            self._collections[name] = [value]\n        else:\n            self._collections[name].append(value)", "docstring": "Stores `value` in the collection with the given `name`.\n\nNote that collections are not sets, so it is possible to add a value to\na collection several times.\n\nArgs:\nname: The key for the collection. The `GraphKeys` class contains many\nstandard names for collections.\nvalue: The value to add to the collection.", "source": "github-repos"}
{"code": "def _get_connection(self, uri, headers=None):\n    \n    connection = None\n    if uri.scheme == 'https':\n      if not uri.port:\n        connection = httplib.HTTPSConnection(uri.host)\n      else:\n        connection = httplib.HTTPSConnection(uri.host, int(uri.port))\n    else:\n      if not uri.port:\n        connection = httplib.HTTPConnection(uri.host)\n      else:\n        connection = httplib.HTTPConnection(uri.host, int(uri.port))\n    return connection", "docstring": "Opens a socket connection to the server to set up an HTTP request.\n\nArgs:\nuri: The full URL for the request as a Uri object.\nheaders: A dict of string pairs containing the HTTP headers for the\nrequest.", "source": "juraj-google-style"}
{"code": "def read(self, length, timeout):\n    self._read_messages_until_true((lambda : (self._buffer_size and (self._buffer_size >= length))), timeout)\n    with self._read_buffer_lock:\n        (data, push_back) = (''.join(self._read_buffer), '')\n        if length:\n            (data, push_back) = (data[:length], data[length:])\n        self._read_buffer.clear()\n        self._buffer_size = len(push_back)\n        if push_back:\n            self._read_buffer.appendleft(push_back)\n    return data", "docstring": "Read 'length' bytes from this stream transport.\n\nArgs:\nlength: If not 0, read this many bytes from the stream, otherwise read all\navailable data (at least one byte).\ntimeout: timeouts.PolledTimeout to use for this read operation.\n\nReturns:\nThe bytes read from this stream.", "source": "codesearchnet"}
{"code": "def as_tmpfile(self, tmpdir=None):\n        \n        import tempfile, shutil\n        tmpdir = tempfile.mkdtemp() if tmpdir is None else tmpdir\n        new_path = os.path.join(tmpdir, self.basename)\n        shutil.copy(self.filepath, new_path)\n\n        \n        root, ext = os.path.splitext(self.filepath)\n        djrepo = root + \".djrepo\"\n        if os.path.exists(djrepo):\n            shutil.copy(djrepo, os.path.join(tmpdir, os.path.basename(djrepo)))\n\n        \n        new = self.__class__.from_file(new_path)\n        if self.has_dojo_report: new.dojo_report = self.dojo_report.deepcopy()\n\n        return new", "docstring": "Copy the pseudopotential to a temporary a file and returns a new pseudopotential object.\nUseful for unit tests in which we have to change the content of the file.\n\nArgs:\ntmpdir: If None, a new temporary directory is created and files are copied here\nelse tmpdir is used.", "source": "juraj-google-style"}
{"code": "def beginning_offsets(self, partitions):\n    offsets = self._fetcher.beginning_offsets(partitions, self.config['request_timeout_ms'])\n    return offsets", "docstring": "Get the first offset for the given partitions.\n\nThis method does not change the current consumer position of the\npartitions.\n\nNote:\nThis method may block indefinitely if the partition does not exist.\n\nArguments:\npartitions (list): List of TopicPartition instances to fetch\noffsets for.\n\nReturns:\n``{TopicPartition: int}``: The earliest available offsets for the\ngiven partitions.\n\nRaises:\nUnsupportedVersionError: If the broker does not support looking\nup the offsets by timestamp.\nKafkaTimeoutError: If fetch failed in request_timeout_ms.", "source": "codesearchnet"}
{"code": "def pearson_correlation(y_true, y_pred, axis=-1):\n    y_pred = ops.convert_to_tensor(y_pred)\n    y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)\n    y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)\n    y_true_norm = y_true - ops.mean(y_true, axis=axis, keepdims=True)\n    y_pred_norm = y_pred - ops.mean(y_pred, axis=axis, keepdims=True)\n    y_true_norm = y_true_norm / ops.std(y_true_norm, axis=axis, keepdims=True)\n    y_pred_norm = y_pred_norm / ops.std(y_pred_norm, axis=axis, keepdims=True)\n    return ops.mean(y_true_norm * y_pred_norm, axis=axis)", "docstring": "Computes the Pearson coefficient between labels and predictions.\n\nFormula:\n\n```python\nloss = mean(l2norm(y_true - mean(y_true) * l2norm(y_pred - mean(y_pred)))\n```\n\nArgs:\ny_true: Tensor of true targets.\ny_pred: Tensor of predicted targets.\naxis: Axis along which to determine similarity. Defaults to `-1`.\n\nReturns:\nPearson Correlation Coefficient tensor.\n\nExample:\n\n>>> y_true = [[0, 1, 0.5], [1, 1, 0.2]]\n>>> y_pred = [[0.1, 0.9, 0.5], [1, 0.9, 0.2]]\n>>> loss = keras.losses.concordance_correlation(\n...     y_true, y_pred, axis=-1\n... ).numpy()\n[1.         0.99339927]", "source": "github-repos"}
{"code": "def unpack(self, gpsd_socket_response):\n        \n        try:\n            fresh_data = json.loads(gpsd_socket_response)  \n            package_name = fresh_data.pop('class', 'ERROR')  \n            package = getattr(self, package_name, package_name)  \n            for key in package.keys():\n                package[key] = fresh_data.get(key, 'n/a')  \n\n        except AttributeError:  \n            sys.stderr.write('There is an unexpected exception in DataStream.unpack')\n            return\n\n        except (ValueError, KeyError) as error:\n            sys.stderr.write(str(error))  \n            return", "docstring": "Sets new socket data as DataStream attributes in those initialised dictionaries\nArguments:\ngpsd_socket_response (json object):\nProvides:\nself attribute dictionaries, e.g., self.TPV['lat'], self.SKY['gdop']\nRaises:\nAttributeError: 'str' object has no attribute 'keys' when the device falls out of the system\nValueError, KeyError: most likely extra, or mangled JSON data, should not happen, but that\napplies to a lot of things.", "source": "juraj-google-style"}
{"code": "def _relative_position_to_absolute_position_unmasked(x):\n  \n  x_shape = common_layers.shape_list(x)\n  batch = x_shape[0]\n  heads = x_shape[1]\n  length = x_shape[2]\n  \n  col_pad = tf.zeros((batch, heads, length, 1))\n  x = tf.concat([x, col_pad], axis=3)\n\n  \n  flat_x = tf.reshape(x, [batch, heads, length * 2 * length])\n  flat_pad = tf.zeros((batch, heads, length-1))\n  flat_x_padded = tf.concat([flat_x, flat_pad], axis=2)\n\n  \n  final_x = tf.reshape(flat_x_padded, [batch, heads, length+1, 2*length-1])\n  final_x = final_x[:, :, :, length-1:]\n  final_x = final_x[:, :, :length, :]\n  return final_x", "docstring": "Converts tensor from relative to aboslute indexing for local attention.\n\nArgs:\nx: a Tensor of shape [batch (or batch*num_blocks), heads,\nlength, 2 * length - 1]\n\nReturns:\nA Tensor of shape [batch (or batch*num_blocks), heads, length, length-1]", "source": "juraj-google-style"}
{"code": "def Deserialize(self, reader):\n        \n        sv = reader.ReadByte()\n        if sv != self.StateVersion:\n            raise Exception(\"Incorrect State format\")", "docstring": "Deserialize full object.\n\nArgs:\nreader (neocore.IO.BinaryReader):\n\nRaises:\nException: if the state version is incorrect.", "source": "juraj-google-style"}
{"code": "def _TrimNode(node, index, depth, flags):\n    if ((depth == 1) or (node.LeftChild is None)):\n        return\n    if (depth == 2):\n        if ((not flags[(index * 2)]) and (not flags[((index * 2) + 1)])):\n            node.LeftChild = None\n            node.RightChild = None\n    else:\n        MerkleTree._TrimNode(node.LeftChild, (index * 2), (depth - 1), flags)\n        MerkleTree._TrimNode(node.RightChild, (index * 2), (depth - 1), flags)\n        if ((node.LeftChild.LeftChild is None) and (node.RightChild.RightChild is None)):\n            node.LeftChild = None\n            node.RightChild = None", "docstring": "Internal helper method to trim a node.\n\nArgs:\nnode (MerkleTreeNode):\nindex (int): flag index.\ndepth (int): node tree depth to start trim from.\nflags (bytearray): of left/right pairs. 1 byte for the left node, 1 byte for the right node.\n00 to erase, 11 to keep. Will keep the node if either left or right is not-0", "source": "codesearchnet"}
{"code": "def state_probability(self, direction, repertoire, purview):\n    purview_state = self.purview_state(direction)\n    index = tuple(((node_state if (node in purview) else 0) for (node, node_state) in enumerate(purview_state)))\n    return repertoire[index]", "docstring": "Compute the probability of the purview in its current state given\nthe repertoire.\n\nCollapses the dimensions of the repertoire that correspond to the\npurview nodes onto their state. All other dimension are already\nsingular and thus receive 0 as the conditioning index.\n\nReturns:\nfloat: A single probabilty.", "source": "codesearchnet"}
{"code": "def add(self, arg, tag=None, name=None, aggregate=None, index_override=None):\n    if tag is None:\n        if aggregate is not None:\n            raise ValueError('You must specify `tag` if using aggregate.')\n        global_index = self._get_new_global_index(index_override)\n        sort_index = None\n    else:\n        if aggregate is None:\n            raise ValueError('You must specify `aggregate` if using tag.')\n        if tag not in self._tag_to_global_index:\n            self._tag_to_global_index[tag] = self._get_new_global_index(index_override)\n            self._tag_to_next_sort_index[tag] = 0\n        elif index_override and index_override != self._tag_to_global_index[tag]:\n            raise ValueError('Tag %r was called with two indices %r and %r' % (tag, index_override, self._tag_to_global_index[tag]))\n        global_index = self._tag_to_global_index[tag]\n        sort_index = self._tag_to_next_sort_index[tag]\n        self._tag_to_next_sort_index[tag] += 1\n    uuid = self._unique_function_id\n    name = '%s-%s-%s-%r-%r-%s' % (self._node_name_prefix, self._function_name, uuid, global_index, sort_index, name)\n    identity_op = _array_ops.identity(arg, name=name)\n    identity_op.op._set_attr(OpHint.FUNCTION_NAME_ATTR, _attr_value_pb2.AttrValue(s=_compat.as_bytes(self._function_name)))\n    identity_op.op._set_attr(OpHint.FUNCTION_UUID_ATTR, _attr_value_pb2.AttrValue(s=_compat.as_bytes(self._unique_function_id)))\n    identity_op.op._set_attr(self._attr_name, _attr_value_pb2.AttrValue(i=global_index))\n    identity_op.op._set_attr(OpHint.FUNCTION_LEVEL_ATTR, _attr_value_pb2.AttrValue(i=self._level))\n    if self._children_inputs_mappings:\n        identity_op.op._set_attr(OpHint.CHILDREN_INPUTS_MAPPINGS, _attr_value_pb2.AttrValue(s=_compat.as_bytes(_json.dumps(self._children_inputs_mappings))))\n    if sort_index is not None:\n        identity_op.op._set_attr(OpHint.FUNCTION_SORT_INDEX_ATTR, _attr_value_pb2.AttrValue(i=sort_index))\n    if aggregate is not None:\n        identity_op.op._set_attr(OpHint.FUNCTION_AGGREGATE_ATTR, _attr_value_pb2.AttrValue(s=_compat.as_bytes(aggregate)))\n    return identity_op", "docstring": "Return a wrapped tensor of an input tensor as an argument.\n\nArgs:\narg: A TensorFlow tensor that should be considered an argument.\ntag: String tag to identify arguments that should be packed.\nname: Name of argument. This is included in the Identity hint op names.\naggregate: Strategy to aggregate.\nAcceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST,\nand OpHint.AGGREGATE_STACK.\nNote, aggregate is only valid if tag is specified.\nindex_override: Specify what input/output index should this be in the\nfinal stub. i.e. add(arg0, index=1); add(arg1, index=0) will make the\nfinal stub be as stub_func(inputs[arg1, arg0], outputs=[]) rather than\nthe default call order based ordering.\n\nReturns:\nA tensor representing the wrapped argument.\n\nRaises:\nValueError: When indices are not consistent.", "source": "github-repos"}
{"code": "def get_filename_safe_string(string):\n    \n    invalid_filename_chars = ['\\\\', '/', ':', '\"', '*', '?', '|', '\\n',\n                              '\\r']\n    if string is None:\n        string = \"None\"\n    for char in invalid_filename_chars:\n        string = string.replace(char, \"\")\n    string = string.rstrip(\".\")\n\n    return string", "docstring": "Converts a string to a string that is safe for a filename\nArgs:\nstring (str): A string to make safe for a filename\n\nReturns:\nstr: A string safe for a filename", "source": "juraj-google-style"}
{"code": "def create(self, group, grouptype):\n    try:\n        self.client.add(self.__distinguished_name(group), API.__object_class(), self.__ldap_attr(group, grouptype))\n    except ldap3.core.exceptions.LDAPNoSuchObjectResult:\n        print('Error creating LDAP Group.\\nRequest: ', self.__ldap_attr(group, grouptype), '\\nDistinguished Name: ', self.__distinguished_name(group), file=sys.stderr)\n    except ldap3.core.exceptions.LDAPEntryAlreadyExistsResult:\n        print('Error creating LDAP Group. Group already exists. \\nRequest: ', self.__ldap_attr(group, grouptype), '\\nDistinguished Name: ', self.__distinguished_name(group), file=sys.stderr)", "docstring": "Create an LDAP Group.\n\nRaises:\nldap3.core.exceptions.LDAPNoSuchObjectResult:\nan object involved with the request is missing\n\nldap3.core.exceptions.LDAPEntryAlreadyExistsResult:\nthe entity being created already exists", "source": "codesearchnet"}
{"code": "def convert_gru_weights(weights, from_cudnn=True):\n    kernels = transform_kernels(weights[0], transpose_input(from_cudnn), n_gates)\n    recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates)\n    biases = np.array(weights[2]).reshape((2, -1) if from_cudnn else -1)\n    return [kernels, recurrent_kernels, biases]", "docstring": "Converts the weights between CuDNNGRU and GRU.\n\nArgs:\nweights: Original weights.\nfrom_cudnn: Indicates whether original weights are from CuDNN layer.\n\nReturns:\nUpdated weights compatible with GRU.", "source": "github-repos"}
{"code": "def set_vocabulary(self, vocabulary, idf_weights=None):\n    self._lookup_layer.set_vocabulary(vocabulary, idf_weights=idf_weights)", "docstring": "Sets vocabulary (and optionally document frequency) for this layer.\n\nThis method sets the vocabulary and IDF weights for this layer directly,\ninstead of analyzing a dataset through `adapt()`. It should be used\nwhenever the vocab (and optionally document frequency) information is\nalready known. If vocabulary data is already present in the layer, this\nmethod will replace it.\n\nArgs:\nvocabulary: Either an array or a string path to a text file.\nIf passing an array, can pass a tuple, list, 1D NumPy array,\nor 1D tensor containing the vocabulary terms.\nIf passing a file path, the file should contain one line\nper term in the vocabulary.\nidf_weights: A tuple, list, 1D NumPy array, or 1D tensor of inverse\ndocument frequency weights with equal length to vocabulary.\nMust be set if `output_mode` is `\"tf_idf\"`.\nShould not be set otherwise.", "source": "github-repos"}
{"code": "def export_verified_variants(aggregate_variants, unique_callers):\n    \n    document_lines = []\n    for variant in aggregate_variants:\n        \n        samples = []\n        for sample in variant['samples']:\n            line = [] \n            line.append(variant['institute'])\n            line.append(variant['_id']) \n            line.append(variant['category'])\n            line.append(variant['variant_type'])\n            line.append(variant['display_name'][:30]) \n            \n            case_name = variant['case_obj']['display_name']  \n            local_link = '/'.join([ '', variant['institute'], case_name, variant['_id'] ])\n            line.append(local_link)\n            line.append(variant.get('validation'))\n            line.append(case_name)\n            case_individual = next(ind for ind in variant['case_obj']['individuals'] if ind['individual_id'] == sample['sample_id'])\n            if case_individual['phenotype'] == 2:\n                line.append(' '.join([sample.get('display_name'),'(A)'])) \n            else:\n                line.append(sample.get('display_name'))\n            line.append(''.join(['chr',variant['chromosome'],':',str(variant['position'])])) \n            line.append('>'.join([variant.get('reference')[:10],variant.get('alternative')[:10]])) \n            genes = []\n            prot_effect = []\n            funct_anno = []\n            for gene in variant.get('genes'): \n                genes.append(gene.get('hgnc_symbol',''))\n                funct_anno.append(gene.get('functional_annotation'))\n                for transcript in gene.get('transcripts'):\n                    if transcript.get('is_canonical') and transcript.get('protein_sequence_name'):\n                        prot_effect.append(urllib.parse.unquote(transcript.get('protein_sequence_name')))\n            line.append(','.join(prot_effect))\n            line.append(','.join(funct_anno))\n            line.append(','.join(genes))\n            line.append(variant.get('rank_score'))\n            line.append(variant.get('cadd_score'))\n            line.append(sample.get('genotype_call'))\n            line.append(sample['allele_depths'][0])\n            line.append(sample['allele_depths'][1])\n            line.append(sample['genotype_quality'])\n\n            \n            for caller in unique_callers:\n                if variant.get(caller):\n                    line.append(variant.get(caller))\n                else:\n                    line.append('-')\n            document_lines.append(line)\n    return document_lines", "docstring": "Create the lines for an excel file with verified variants for\nan institute\n\nArgs:\naggregate_variants(list): a list of variants with aggregates case data\nunique_callers(set): a unique list of available callers\n\nReturns:\ndocument_lines(list): list of lines to include in the document", "source": "juraj-google-style"}
{"code": "def convert_attribute_tag_to_name(value):\n    \n    if not isinstance(value, Tags):\n        raise ValueError(\"The attribute tag must be a Tags enumeration.\")\n\n    for entry in attribute_name_tag_table:\n        if value == entry[1]:\n            return entry[0]\n\n    raise ValueError(\"Unrecognized attribute tag: {}\".format(value))", "docstring": "A utility function that converts an attribute tag into the corresponding\nattribute name string.\n\nFor example: enums.Tags.STATE -> 'State'\n\nArgs:\nvalue (enum): The Tags enumeration value of the attribute.\n\nReturns:\nstring: The attribute name string that corresponds to the attribute\ntag.\n\nRaises:\nValueError: if the attribute tag is not a Tags enumeration or if it\nis unrecognized attribute tag", "source": "juraj-google-style"}
{"code": "def call_rpc(self, rpc_id, payload=bytes()):\n        \n        if rpc_id < 0 or rpc_id > 0xFFFF:\n            raise RPCInvalidIDError(\"Invalid RPC ID: {}\".format(rpc_id))\n\n        if rpc_id not in self._rpcs:\n            raise RPCNotFoundError(\"rpc_id: {}\".format(rpc_id))\n\n        return self._rpcs[rpc_id](payload)", "docstring": "Call an RPC by its ID.\n\nArgs:\nrpc_id (int): The number of the RPC\npayload (bytes): A byte string of payload parameters up to 20 bytes\n\nReturns:\nbytes: The response payload from the RPC", "source": "juraj-google-style"}
{"code": "def create(provider, count=1, name=None, **kwargs):\n    r\n    count = int(count)\n    provider = provider_by_name(provider)\n    options = provider.create_server_defaults\n    options.update(kwargs)\n    names = [name] * count\n    provider.validate_create_options(**options)\n    return provider.create_servers(count, names, **options)", "docstring": "r'''\nCreate one or more cloud servers\n\nArgs:\n* provider (str): Cloud provider, e.g. ec2, digitalocean\n* count (int) =1: Number of instances\n* name (str) =None: Name of server(s)\n* \\**kwargs: Provider-specific flags", "source": "juraj-google-style"}
{"code": "def GetSubkeyByPath(self, key_path):\n    \n    subkey = self\n    for path_segment in key_paths.SplitKeyPath(key_path):\n      subkey = subkey.GetSubkeyByName(path_segment)\n      if not subkey:\n        break\n\n    return subkey", "docstring": "Retrieves a subkey by path.\n\nArgs:\nkey_path (str): path of the subkey.\n\nReturns:\nWinRegistryKey: Windows Registry subkey or None if not found.", "source": "juraj-google-style"}
{"code": "def __init__(self, name, num_qubits, num_clbits, params):\n        \n        if not isinstance(num_qubits, int) or not isinstance(num_clbits, int):\n            raise QiskitError(\"num_qubits and num_clbits must be integer.\")\n        if num_qubits < 0 or num_clbits < 0:\n            raise QiskitError(\n                \"bad instruction dimensions: %d qubits, %d clbits.\" %\n                num_qubits, num_clbits)\n        self.name = name\n        self.num_qubits = num_qubits\n        self.num_clbits = num_clbits\n\n        self._params = []  \n\n        \n        self.control = None\n        \n        \n        self._definition = None\n        self.params = params", "docstring": "Create a new instruction.\nArgs:\nname (str): instruction name\nnum_qubits (int): instruction's qubit width\nnum_clbits (int): instructions's clbit width\nparams (list[sympy.Basic|qasm.Node|int|float|complex|str|ndarray]): list of parameters\nRaises:\nQiskitError: when the register is not in the correct format.", "source": "juraj-google-style"}
{"code": "def download_file_content(self, file_id, etag=None):\n    if (not is_valid_uuid(file_id)):\n        raise StorageArgumentException('Invalid UUID for file_id: {0}'.format(file_id))\n    headers = {'Accept': '*/*'}\n    if etag:\n        headers['If-None-Match'] = etag\n    resp = self._authenticated_request.to_endpoint('file/{}/content/'.format(file_id)).with_headers(headers).get()\n    if (resp.status_code == 304):\n        return (None, None)\n    if ('ETag' not in resp.headers):\n        raise StorageException('No ETag received from the service with the download')\n    return (resp.headers['ETag'], resp.content)", "docstring": "Download file content.\n\nArgs:\nfile_id (str): The UUID of the file whose content is requested\netag (str): If the content is not changed since the provided ETag,\nthe content won't be downloaded. If the content is changed, it\nwill be downloaded and returned with its new ETag.\n\nNote:\nETags should be enclosed in double quotes::\n\nmy_etag = '\"71e1ed9ee52e565a56aec66bc648a32c\"'\n\n\nReturns:\nA tuple of ETag and content (etag, content) if the content was\nretrieved. If an etag was provided, and content didn't change\nreturns (None, None)::\n\n('\"71e1ed9ee52e565a56aec66bc648a32c\"', 'Hello world!')\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "codesearchnet"}
{"code": "def sg_inject(path, mod_name):\n    r\n    \n    import sys\n    if path not in list(sys.path):\n        sys.path.append(path)\n    globals()[mod_name] = importlib.import_module(mod_name)\n    \n    for func_name in dir(globals()[mod_name]):\n        if isinstance(globals()[mod_name].__dict__.get(func_name), types.FunctionType):\n            if not func_name.startswith('_'):\n                \n                exec('tf.Variable.%s = %s.%s' % (func_name, mod_name, func_name))\n                \n                exec('tf.Tensor.%s = %s.%s' % (func_name, mod_name, func_name))", "docstring": "r\"\"\"Converts all functions in the given Python module to sugar functions\nso that they can be used in a chainable manner.\n\nArgs:\npath: A string. Path to the Python module\nmod_name: A string. The name of the Python module to inject.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "class StackedRNNCells(Layer):\n\n    def __init__(self, cells, **kwargs):\n        for cell in cells:\n            if not 'call' in dir(cell):\n                raise ValueError('All cells must have a `call` method. received cells:', cells)\n            if not 'state_size' in dir(cell):\n                raise ValueError('All cells must have a `state_size` attribute. received cells:', cells)\n        self.cells = cells\n        self.reverse_state_order = kwargs.pop('reverse_state_order', False)\n        if self.reverse_state_order:\n            logging.warning('reverse_state_order=True in StackedRNNCells will soon be deprecated. Please update the code to work with the natural order of states if you rely on the RNN states, eg RNN(return_state=True).')\n        super(StackedRNNCells, self).__init__(**kwargs)\n\n    @property\n    def state_size(self):\n        return tuple((c.state_size for c in (self.cells[::-1] if self.reverse_state_order else self.cells)))\n\n    @property\n    def output_size(self):\n        if getattr(self.cells[-1], 'output_size', None) is not None:\n            return self.cells[-1].output_size\n        elif _is_multiple_state(self.cells[-1].state_size):\n            return self.cells[-1].state_size[0]\n        else:\n            return self.cells[-1].state_size\n\n    def get_initial_state(self, inputs=None, batch_size=None, dtype=None):\n        initial_states = []\n        for cell in self.cells[::-1] if self.reverse_state_order else self.cells:\n            get_initial_state_fn = getattr(cell, 'get_initial_state', None)\n            if get_initial_state_fn:\n                initial_states.append(get_initial_state_fn(inputs=inputs, batch_size=batch_size, dtype=dtype))\n            else:\n                initial_states.append(_generate_zero_filled_state_for_cell(cell, inputs, batch_size, dtype))\n        return tuple(initial_states)\n\n    def call(self, inputs, states, constants=None, training=None, **kwargs):\n        state_size = self.state_size[::-1] if self.reverse_state_order else self.state_size\n        nested_states = nest.pack_sequence_as(state_size, nest.flatten(states))\n        new_nested_states = []\n        for cell, states in zip(self.cells, nested_states):\n            states = states if nest.is_nested(states) else [states]\n            is_tf_rnn_cell = getattr(cell, '_is_tf_rnn_cell', None) is not None\n            states = states[0] if len(states) == 1 and is_tf_rnn_cell else states\n            if generic_utils.has_arg(cell.call, 'training'):\n                kwargs['training'] = training\n            else:\n                kwargs.pop('training', None)\n            cell_call_fn = cell.__call__ if callable(cell) else cell.call\n            if generic_utils.has_arg(cell.call, 'constants'):\n                inputs, states = cell_call_fn(inputs, states, constants=constants, **kwargs)\n            else:\n                inputs, states = cell_call_fn(inputs, states, **kwargs)\n            new_nested_states.append(states)\n        return (inputs, nest.pack_sequence_as(state_size, nest.flatten(new_nested_states)))\n\n    @tf_utils.shape_type_conversion\n    def build(self, input_shape):\n        if isinstance(input_shape, list):\n            input_shape = input_shape[0]\n        for cell in self.cells:\n            if isinstance(cell, Layer) and (not cell.built):\n                with backend.name_scope(cell.name):\n                    cell.build(input_shape)\n                    cell.built = True\n            if getattr(cell, 'output_size', None) is not None:\n                output_dim = cell.output_size\n            elif _is_multiple_state(cell.state_size):\n                output_dim = cell.state_size[0]\n            else:\n                output_dim = cell.state_size\n            input_shape = tuple([input_shape[0]] + tensor_shape.TensorShape(output_dim).as_list())\n        self.built = True\n\n    def get_config(self):\n        cells = []\n        for cell in self.cells:\n            cells.append(generic_utils.serialize_keras_object(cell))\n        config = {'cells': cells}\n        base_config = super(StackedRNNCells, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))\n\n    @classmethod\n    def from_config(cls, config, custom_objects=None):\n        from tensorflow.python.keras.layers import deserialize as deserialize_layer\n        cells = []\n        for cell_config in config.pop('cells'):\n            cells.append(deserialize_layer(cell_config, custom_objects=custom_objects))\n        return cls(cells, **config)", "docstring": "Wrapper allowing a stack of RNN cells to behave as a single cell.\n\nUsed to implement efficient stacked RNNs.\n\nArgs:\ncells: List of RNN cell instances.\n\nExamples:\n\n```python\nbatch_size = 3\nsentence_max_length = 5\nn_features = 2\nnew_shape = (batch_size, sentence_max_length, n_features)\nx = tf.constant(np.reshape(np.arange(30), new_shape), dtype = tf.float32)\n\nrnn_cells = [tf.keras.layers.LSTMCell(128) for _ in range(2)]\nstacked_lstm = tf.keras.layers.StackedRNNCells(rnn_cells)\nlstm_layer = tf.keras.layers.RNN(stacked_lstm)\n\nresult = lstm_layer(x)\n```", "source": "github-repos"}
{"code": "def _get_profile_data_generator(self):\n    node_to_file_path = {}\n    node_to_line_number = {}\n    node_to_func_name = {}\n    node_to_op_type = {}\n    for op in self._graph.get_operations():\n        for trace_entry in reversed(op.traceback):\n            file_path = trace_entry[0]\n            line_num = trace_entry[1]\n            func_name = trace_entry[2]\n            if not source_utils.guess_is_tensorflow_py_library(file_path):\n                break\n        node_to_file_path[op.name] = file_path\n        node_to_line_number[op.name] = line_num\n        node_to_func_name[op.name] = func_name\n        node_to_op_type[op.name] = op.type\n\n    def profile_data_generator(device_step_stats):\n        for node_stats in device_step_stats.node_stats:\n            if node_stats.node_name == '_SOURCE' or node_stats.node_name == '_SINK':\n                continue\n            yield profiling.ProfileDatum(device_step_stats.device, node_stats, node_to_file_path.get(node_stats.node_name, ''), node_to_line_number.get(node_stats.node_name, 0), node_to_func_name.get(node_stats.node_name, ''), node_to_op_type.get(node_stats.node_name, ''))\n    return profile_data_generator", "docstring": "Get function that generates `ProfileDatum` objects.\n\nReturns:\nA function that generates `ProfileDatum` objects.", "source": "github-repos"}
{"code": "def delete(self, resource, force=False, timeout=(- 1)):\n    return self._client.delete(resource, force=force, timeout=timeout)", "docstring": "Deletes a Deployment Server object based on its UUID or URI.\n\nArgs:\nresource (dict):\nObject to delete.\nforce:\nIf set to true, the operation completes despite any problems with\nnetwork connectivity or errors on the resource itself. The default is false.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\nbool: Indicates if the volume was successfully deleted.", "source": "codesearchnet"}
{"code": "def _get_output_columns(nodes, context):\n    columns = []\n    for node in nodes:\n        for sql_output in sql_context_helpers.get_outputs(node, context):\n            field_name = sql_output.field_name\n            column = sql_context_helpers.get_column(field_name, node, context)\n            column = column.label(sql_output.output_name)\n            columns.append(column)\n    return columns", "docstring": "Get the output columns for a list of SqlNodes.\n\nArgs:\nnodes: List[SqlNode], the nodes to get output columns from.\ncontext: CompilationContext, global compilation state and metadata.\n\nReturns:\nList[Column], list of SqlAlchemy Columns to output for this query.", "source": "codesearchnet"}
{"code": "def inspect_commit(self, commit):\n        \n        req = proto.InspectCommitRequest(commit=commit_from(commit))\n        return self.stub.InspectCommit(req, metadata=self.metadata)", "docstring": "Returns info about a specific Commit.\n\nParams:\n* commit: A tuple, string, or Commit object representing the commit.", "source": "juraj-google-style"}
{"code": "def greedy_coloring(adj):\n    coloring = {}\n    colors = {}\n    possible_colors = {n: set(range(len(adj))) for n in adj}\n    while possible_colors:\n        n = min(possible_colors, key=(lambda n: len(possible_colors[n])))\n        color = min(possible_colors[n])\n        coloring[n] = color\n        if (color not in colors):\n            colors[color] = {n}\n        else:\n            colors[color].add(n)\n        for neighbor in adj[n]:\n            if ((neighbor in possible_colors) and (color in possible_colors[neighbor])):\n                possible_colors[neighbor].remove(color)\n        del possible_colors[n]\n    return (coloring, colors)", "docstring": "Determines a vertex coloring.\n\nArgs:\nadj (dict): The edge structure of the graph to be colored.\n`adj` should be of the form {node: neighbors, ...} where\nneighbors is a set.\n\nReturns:\ndict: the coloring {node: color, ...}\ndict: the colors {color: [node, ...], ...}\n\nNote:\nThis is a greedy heuristic: the resulting coloring is not\nnecessarily minimal.", "source": "codesearchnet"}
{"code": "def _load_info(self):\n    url = ('%s/prefix?duration=36000' % self.base_url)\n    r = self.gbdx_connection.get(url)\n    r.raise_for_status()\n    return r.json()", "docstring": "Get user info for GBDX S3, put into instance vars for convenience.\n\nArgs:\nNone.\n\nReturns:\nDictionary with S3 access key, S3 secret key, S3 session token,\nuser bucket and user prefix (dict).", "source": "codesearchnet"}
{"code": "def typing(self, *, channel: str):\n        \n        payload = {\"id\": self._next_msg_id(), \"type\": \"typing\", \"channel\": channel}\n        self.send_over_websocket(payload=payload)", "docstring": "Sends a typing indicator to the specified channel.\n\nThis indicates that this app is currently\nwriting a message to send to a channel.\n\nArgs:\nchannel (str): The channel id. e.g. 'C024BE91L'\n\nRaises:\nSlackClientNotConnectedError: Websocket connection is closed.", "source": "juraj-google-style"}
{"code": "def sd(line, cell=None):\n  \n  parser = google.datalab.utils.commands.CommandParser(prog='%sd', description=(\n      'Execute various Stackdriver related operations. Use \"%sd '\n      '<stackdriver_product> -h\" for help on a specific Stackdriver product.'))\n\n  \n  _create_monitoring_subparser(parser)\n  return google.datalab.utils.commands.handle_magic_line(line, cell, parser)", "docstring": "Implements the stackdriver cell magic for ipython notebooks.\n\nArgs:\nline: the contents of the storage line.\nReturns:\nThe results of executing the cell.", "source": "juraj-google-style"}
{"code": "def report_uninitialized_variables(var_list=None, name='report_uninitialized_variables'):\n    if var_list is None:\n        var_list = global_variables() + local_variables()\n        if not var_list:\n            var_list = []\n            for op in ops.get_default_graph().get_operations():\n                if op.type in ['Variable', 'VariableV2', 'AutoReloadVariable']:\n                    var_list.append(op.outputs[0])\n    with ops.name_scope(name):\n        if var_list:\n            init_vars = [state_ops.is_variable_initialized(v) for v in var_list]\n        local_device = os.environ.get('TF_DEVICE_FOR_UNINITIALIZED_VARIABLE_REPORTING', '/cpu:0')\n        with ops.device(local_device):\n            if not var_list:\n                return array_ops.constant([], dtype=dtypes.string)\n            else:\n                variables_mask = math_ops.logical_not(array_ops_stack.stack(init_vars))\n                variable_names_tensor = array_ops.constant([s.op.name for s in var_list])\n                return array_ops.boolean_mask(variable_names_tensor, variables_mask)", "docstring": "Adds ops to list the names of uninitialized variables.\n\nWhen run, it returns a 1-D tensor containing the names of uninitialized\nvariables if there are any, or an empty array if there are none.\n\nArgs:\nvar_list: List of `Variable` objects to check. Defaults to the value of\n`global_variables() + local_variables()`\nname: Optional name of the `Operation`.\n\nReturns:\nA 1-D tensor containing names of the uninitialized variables, or an empty\n1-D tensor if there are no variables or no uninitialized variables.", "source": "github-repos"}
{"code": "def query_properties_with_values(self, query, include_defaults=True):\n    themed_keys = set()\n    result = dict()\n    if include_defaults:\n        keys = self.properties()\n    else:\n        keys = (set(self._property_values.keys()) | set(self._unstable_default_values.keys()))\n        if self.themed_values():\n            themed_keys = set(self.themed_values().keys())\n            keys |= themed_keys\n    for key in keys:\n        descriptor = self.lookup(key)\n        if (not query(descriptor)):\n            continue\n        value = descriptor.serializable_value(self)\n        if ((not include_defaults) and (key not in themed_keys)):\n            if (isinstance(value, PropertyValueContainer) and (key in self._unstable_default_values)):\n                continue\n        result[key] = value\n    return result", "docstring": "Query the properties values of |HasProps| instances with a\npredicate.\n\nArgs:\nquery (callable) :\nA callable that accepts property descriptors and returns True\nor False\n\ninclude_defaults (bool, optional) :\nWhether to include properties that have not been explicitly\nset by a user (default: True)\n\nReturns:\ndict : mapping of property names and values for matching properties", "source": "codesearchnet"}
{"code": "def add(self, method_mask, path, func):\n        \n        is_err = len(signature(func).parameters) == 3\n        is_subchain = isinstance(func, MiddlewareChain)\n        tup = MiddlewareNode(func=func,\n                             mask=method_mask,\n                             path=path,\n                             is_errorhandler=is_err,\n                             is_subchain=is_subchain,)\n        self.mw_list.append(tup)", "docstring": "Add a function to the middleware chain.\nThis function is returned when iterating over the chain with matching method and path.\n\nArgs:\nmethod_mask (growler.http.HTTPMethod): A bitwise mask intended to match specific\nrequest methods.\npath (str or regex): An object with which to compare request urls\nfunc (callable): The function to be yieled from the generator upon a request\nmatching the method_mask and path", "source": "juraj-google-style"}
{"code": "def trigger(self, target: str, trigger: str, parameters: Dict[str, Any]={}):\n\t\t\n\t\tpass", "docstring": "Calls the specified Trigger of another Area with the optionally given parameters.\n\nArgs:\ntarget: The name of the target Area.\ntrigger: The name of the Trigger.\nparameters: The parameters of the function call.", "source": "juraj-google-style"}
{"code": "def synthesize(self, duration, tick_frequency):\n    sr = self.samplerate.samples_per_second\n    tick = np.random.uniform(low=(- 1.0), high=1.0, size=int((sr * 0.1)))\n    tick *= np.linspace(1, 0, len(tick))\n    samples = np.zeros(int((sr * (duration / Seconds(1)))))\n    ticks_per_second = (Seconds(1) / tick_frequency)\n    step = int((sr \n    for i in range(0, len(samples), step):\n        size = len(samples[i:(i + len(tick))])\n        samples[i:(i + len(tick))] += tick[:size]\n    return AudioSamples(samples, self.samplerate)", "docstring": "Synthesize periodic \"ticks\", generated from white noise and an envelope\n\nArgs:\nduration (numpy.timedelta64): The total duration of the sound to be\nsynthesized\ntick_frequency (numpy.timedelta64): The frequency of the ticking\nsound", "source": "codesearchnet"}
{"code": "def _update_fetch_positions(self, partitions):\n        \n        \n        \n        \n        \n        \n        self._fetcher.reset_offsets_if_needed(partitions)\n\n        if not self._subscription.has_all_fetch_positions():\n            \n            \n            if (self.config['api_version'] >= (0, 8, 1) and\n                self.config['group_id'] is not None):\n                \n                self._coordinator.refresh_committed_offsets_if_needed()\n\n            \n            self._fetcher.update_fetch_positions(partitions)", "docstring": "Set the fetch position to the committed position (if there is one)\nor reset it using the offset reset policy the user has configured.\n\nArguments:\npartitions (List[TopicPartition]): The partitions that need\nupdating fetch positions.\n\nRaises:\nNoOffsetForPartitionError: If no offset is stored for a given\npartition and no offset reset policy is defined.", "source": "juraj-google-style"}
{"code": "def WriteRow(self, values):\n    \n    precondition.AssertIterableType(values, text)\n\n    if compatibility.PY2:\n      self._csv.writerow([value.encode(\"utf-8\") for value in values])\n    else:\n      self._csv.writerow(values)", "docstring": "Writes a single row to the underlying buffer.\n\nArgs:\nvalues: A list of string values to be inserted into the CSV output.", "source": "juraj-google-style"}
{"code": "def hotkey(*args, **kwargs):\n    interval = float(kwargs.get('interval', 0.0))\n    _failSafeCheck()\n    for c in args:\n        if (len(c) > 1):\n            c = c.lower()\n        platformModule._keyDown(c)\n        time.sleep(interval)\n    for c in reversed(args):\n        if (len(c) > 1):\n            c = c.lower()\n        platformModule._keyUp(c)\n        time.sleep(interval)\n    _autoPause(kwargs.get('pause', None), kwargs.get('_pause', True))", "docstring": "Performs key down presses on the arguments passed in order, then performs\nkey releases in reverse order.\n\nThe effect is that calling hotkey('ctrl', 'shift', 'c') would perform a\n\"Ctrl-Shift-C\" hotkey/keyboard shortcut press.\n\nArgs:\nkey(s) (str): The series of keys to press, in order. This can also be a\nlist of key strings to press.\ninterval (float, optional): The number of seconds in between each press.\n0.0 by default, for no pause in between presses.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool=True):\n    with open(json_file_path, 'w', encoding='utf-8') as writer:\n        writer.write(self.to_json_string(use_diff=use_diff))", "docstring": "Save this instance to a JSON file.\n\nArgs:\njson_file_path (`str` or `os.PathLike`):\nPath to the JSON file in which this configuration instance's parameters will be saved.\nuse_diff (`bool`, *optional*, defaults to `True`):\nIf set to `True`, only the difference between the config instance and the default `GenerationConfig()`\nis serialized to JSON file.", "source": "github-repos"}
{"code": "def CheckOperatorSpacing(filename, clean_lines, linenum, error):\n  \n  line = clean_lines.elided[linenum]\n\n  \n  \n  \n  \n  \n  \n  while True:\n    match = Match(r'^(.*\\boperator\\b)(\\S+)(\\s*\\(.*)$', line)\n    if match:\n      line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)\n    else:\n      break\n\n  \n  \n  \n  \n  if ((Search(r'[\\w.]=', line) or\n       Search(r'=[\\w.]', line))\n      and not Search(r'\\b(if|while|for) ', line)\n      \n      and not Search(r'(>=|<=|==|!=|&=|\\^=|\\|=|\\+=|\\*=|\\/=|\\%=)', line)\n      and not Search(r'operator=', line)):\n    error(filename, linenum, 'whitespace/operators', 4,\n          'Missing spaces around =')\n\n  \n  \n  \n\n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  match = Search(r'[^<>=!\\s](==|!=|<=|>=|\\|\\|)[^<>=!\\s,;\\)]', line)\n  if match:\n    error(filename, linenum, 'whitespace/operators', 3,\n          'Missing spaces around %s' % match.group(1))\n  elif not Match(r'\n    \n    \n    \n    \n    match = Match(r'^(.*[^\\s<])<[^\\s=<,]', line)\n    if match:\n      (_, _, end_pos) = CloseExpression(\n          clean_lines, linenum, len(match.group(1)))\n      if end_pos <= -1:\n        error(filename, linenum, 'whitespace/operators', 3,\n              'Missing spaces around <')\n\n    \n    \n    \n    match = Match(r'^(.*[^-\\s>])>[^\\s=>,]', line)\n    if match:\n      (_, _, start_pos) = ReverseCloseExpression(\n          clean_lines, linenum, len(match.group(1)))\n      if start_pos <= -1:\n        error(filename, linenum, 'whitespace/operators', 3,\n              'Missing spaces around >')\n\n  \n  \n  \n  \n  \n  match = Search(r'(operator|[^\\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\\s,=<])', line)\n  if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and\n      not (match.group(1) == 'operator' and match.group(2) == ';')):\n    error(filename, linenum, 'whitespace/operators', 3,\n          'Missing spaces around <<')\n\n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  match = Search(r'>>[a-zA-Z_]', line)\n  if match:\n    error(filename, linenum, 'whitespace/operators', 3,\n          'Missing spaces around >>')\n\n  \n  match = Search(r'(!\\s|~\\s|[\\s]--[\\s;]|[\\s]\\+\\+[\\s;])', line)\n  if match:\n    error(filename, linenum, 'whitespace/operators', 4,\n          'Extra space for operator %s' % match.group(1))", "docstring": "Checks for horizontal spacing around operators.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def _get_current_tf_device():\n    graph = get_graph()\n    op = _TfDeviceCaptureOp()\n    graph._apply_device_functions(op)\n    if tf2.enabled():\n        return device_spec.DeviceSpecV2.from_string(op.device)\n    else:\n        return device_spec.DeviceSpecV1.from_string(op.device)", "docstring": "Return explicit device of current context, otherwise returns `None`.\n\nReturns:\nIf the current device scope is explicitly set, it returns a string with\nthe device (`CPU` or `GPU`). If the scope is not explicitly set, it will\nreturn `None`.", "source": "github-repos"}
{"code": "def _ExpectedKeysForEntry(self, entry):\n    return [entry.name]", "docstring": "Generate a list of expected cache keys for this type of map.\n\nArgs:\nentry: A ShadowMapEntry\n\nReturns:\nA list of strings", "source": "github-repos"}
{"code": "def get_firmware_version(self, cached=True):\n    if (cached and (self.firmware_version != 'unknown')):\n        return self.firmware_version\n    firmware_version = self.get_characteristic_handle_from_uuid(UUID_FIRMWARE_REVISION)\n    if (firmware_version is None):\n        logger.warn('Failed to find handle for firmware version')\n        return None\n    self.firmware_version = self.dongle._read_attribute(self.conn_handle, firmware_version)\n    return self.firmware_version", "docstring": "Returns the SK8 device firmware version.\n\nArgs:\ncached (bool): if True, returns the locally cached copy of the firmware version.\nIf this is set to False, or the version is not cached, it will read from\nthe device instead.\n\nReturns:\nstr. The current firmware version string. May be `None` if an error occurs.", "source": "codesearchnet"}
{"code": "def market_normal(self, session, after_open, before_close) -> Session:\n    logger = logs.get_logger(self.market_normal)\n    if (session not in self.exch):\n        return SessNA\n    ss = self.exch[session]\n    s_time = shift_time(ss[0], (int(after_open) + 1))\n    e_time = shift_time(ss[(- 1)], (- int(before_close)))\n    request_cross = (pd.Timestamp(s_time) >= pd.Timestamp(e_time))\n    session_cross = (pd.Timestamp(ss[0]) >= pd.Timestamp(ss[1]))\n    if (request_cross and (not session_cross)):\n        logger.warning(f'end time {e_time} is earlier than {s_time} ...')\n        return SessNA\n    return Session(s_time, e_time)", "docstring": "Time intervals between market\n\nArgs:\nsession: [allday, day, am, pm, night]\nafter_open: mins after open\nbefore_close: mins before close\n\nReturns:\nSession of start_time and end_time", "source": "codesearchnet"}
{"code": "def _setweights(self):\n        \n        for name_w in self.weights:\n            raw_w = getattr(self.module, name_w + '_raw')\n            w = torch.nn.functional.dropout(raw_w, p=self.dropout, training=self.training)\n            if hasattr(self.module, name_w):\n                delattr(self.module, name_w)\n            setattr(self.module, name_w, w)", "docstring": "Uses pytorch's built-in dropout function to apply dropout to the parameters of\nthe wrapped module.\n\nArgs:\nNone\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def get_compatible_systems(self, id_or_uri):\n        \n        uri = self._client.build_uri(id_or_uri) + \"/compatible-systems\"\n        return self._client.get(uri)", "docstring": "Retrieves a collection of all storage systems that is applicable to this storage volume template.\n\nArgs:\nid_or_uri:\nCan be either the power device id or the uri\n\nReturns:\nlist: Storage systems.", "source": "juraj-google-style"}
{"code": "def __init__(self, saved_model_checksum: int=None, graph_def_program_hash: int=None, signature_def_hash: int=None, saved_object_graph_hash: int=None, checkpoint_hash: int=None, version: int=None):\n    self.saved_model_checksum = saved_model_checksum\n    self.graph_def_program_hash = graph_def_program_hash\n    self.signature_def_hash = signature_def_hash\n    self.saved_object_graph_hash = saved_object_graph_hash\n    self.checkpoint_hash = checkpoint_hash\n    self.version = version", "docstring": "Initializes the instance based on values in the SavedModel fingerprint.\n\nArgs:\nsaved_model_checksum: Value of the`saved_model_checksum`.\ngraph_def_program_hash: Value of the `graph_def_program_hash`.\nsignature_def_hash: Value of the `signature_def_hash`.\nsaved_object_graph_hash: Value of the `saved_object_graph_hash`.\ncheckpoint_hash: Value of the `checkpoint_hash`.\nversion: Value of the producer field of the VersionDef.", "source": "github-repos"}
{"code": "def pivot_samples(self, values, index=\"ID_REF\"):\n        \n        data = []\n        for gsm in self.gsms.values():\n            tmp_data = gsm.table.copy()\n            tmp_data[\"name\"] = gsm.name\n            data.append(tmp_data)\n        ndf = concat(data).pivot(index=index, values=values, columns=\"name\")\n        return ndf", "docstring": "Pivot samples by specified column.\n\nConstruct a table in which columns (names) are the samples, index\nis a specified column eg. ID_REF and values in the columns are of one\nspecified type.\n\nArgs:\nvalues (:obj:`str`): Column name present in all GSMs.\nindex (:obj:`str`, optional): Column name that will become an index in\npivoted table. Defaults to \"ID_REF\".\n\nReturns:\n:obj:`pandas.DataFrame`: Pivoted data", "source": "juraj-google-style"}
{"code": "def from_list(cls, vals: List[Value]=[], reverse: bool=False) -> 'LinkedList':\n    res = EmptyList()\n    for v in (vals if reverse else vals[::(- 1)]):\n        res = cls(v, res)\n    return res", "docstring": "Create an instance from a standard list.\n\nArgs:\nvals: Python list of instance values.", "source": "codesearchnet"}
{"code": "def n_feature_hash(feature, dims, seeds):\n    \n    vec = np.zeros(sum(dims))\n    offset = 0\n\n    for seed, dim in zip(seeds, dims):\n        vec[offset:(offset + dim)] = feature_hash(feature, dim, seed)\n        offset += dim\n\n    return vec", "docstring": "N-hot-encoded feature hashing.\n\nArgs:\nfeature (str): Target feature represented as string.\ndims (list of int): Number of dimensions for each hash value.\nseeds (list of float): Seed of each hash function (mmh3).\n\nReturns:\nnumpy 1d array: n-hot-encoded feature vector for `s`.", "source": "juraj-google-style"}
{"code": "def get_arrays(self, ji_win):\n        \n        if isinstance(ji_win, dict):\n            ji_windows = ji_win\n        else:\n            ji_windows = self.ji_windows(ji_win)\n\n        arrays = []\n        for filename, res in zip(self._layer_files, self._layer_resolution):\n            with rasterio.open(filename) as src:\n                arr = src.read(1, window=ji_windows[res])\n            arrays.append(arr)\n        if self.dst_res is not None:\n            arrays = self._resample(arrays=arrays, ji_windows=ji_windows)\n        return arrays", "docstring": "Get the data of the a window given the ji_windows derived with :method:`ji_windows`.\n\nArguments:\nji_win {[type]} -- The index of the window or the (multi-resolution) windows returned by :meth:`ji_window`.\n\nReturns:\n(list of) array(s) -- List of 2D arrays in native resolution in case `dst_res` is `None`\nor a 3D array where all layers are resampled to `dst_res` resolution.", "source": "juraj-google-style"}
{"code": "def _test_connection(url):\n        \n        import psycopg2\n        try:\n            with closing(psycopg2.connect(dsn=url)) as conn:\n                conn.cursor()\n        except psycopg2.OperationalError as e:\n            raise ValidationError(e)", "docstring": "Attempt to connect to postgres\n\nArgs:\nurl: string in the form \"postgres://[user]:[password]@[host][:port][/database]\"", "source": "juraj-google-style"}
{"code": "def _encode_choice_type_exclusivity(self, builder: expressions.Builder) -> List[validation_pb2.SqlRequirement]:\n    if not builder.return_type.returns_polymorphic():\n        return []\n    field_name = _last_path_token(builder)\n    constraint_key = f'{field_name}-choice-type-exclusivity'\n    if constraint_key in self._options.skip_keys:\n        return []\n    type_codes = _utils.element_type_codes(builder.return_type.root_element_definition)\n    if len(type_codes) <= 1:\n        return []\n    num_choices_exist: expressions.Builder = _num_fields_exist((builder.ofType(choice_field) for choice_field in type_codes))\n    exclusivity_constraint: expressions.Builder = num_choices_exist <= 1\n    parent_builder = builder.get_parent_builder()\n    result = self._encode_fhir_path_builder_constraint(exclusivity_constraint, parent_builder)\n    if result is None:\n        return []\n    choice_type_path = self._abs_path_invocation(builder)\n    column_name = _path_to_sql_column_name(choice_type_path)\n    parent_path = self._abs_path_invocation(parent_builder)\n    description = f'Choice type {choice_type_path} has more than one of its possible choice data types set.'\n    return [validation_pb2.SqlRequirement(column_name=column_name, sql_expression=result.sql, fhir_path_sql_expression=result.fhir_path_sql, severity=validation_pb2.ValidationSeverity.SEVERITY_ERROR, type=validation_pb2.ValidationType.VALIDATION_TYPE_CHOICE_TYPE, element_path=parent_path, description=description, fhir_path_key=constraint_key, fhir_path_expression=result.builder.fhir_path, fields_referenced_by_expression=[field_name])]", "docstring": "Encodes a constraint ensuring the choice type has only one value set.\n\nIf `builder` represents a choice type, encodes SQL ensuring that at most one\nof the columns representing that choice type's possible data types is not\nnull.\n\nArgs:\nbuilder: The builder representing a path to a choice type.\n\nReturns:\nAn empty sequence if `builder` is not a path to a choice type or the\nconstraint can not be encoded for other reasons. Otherwise, a sequence\ncontaining a single `SqlRequirement` for the choice type.", "source": "github-repos"}
{"code": "def register_watched_variable_resolver(resolver):\n    global _variables_override\n    assert _variables_override is default_get_variables\n    _variables_override = resolver", "docstring": "Registers the resolver to be used to get the list of variables to watch.\n\nArgs:\nresolver: callable, takes a Variable and returns a list of Variables that\nshall be watched.", "source": "github-repos"}
{"code": "def GetMessages(file_protos):\n  \n  for file_proto in file_protos:\n    _FACTORY.pool.Add(file_proto)\n  return _FACTORY.GetMessages([file_proto.name for file_proto in file_protos])", "docstring": "Builds a dictionary of all the messages available in a set of files.\n\nArgs:\nfile_protos: A sequence of file protos to build messages out of.\n\nReturns:\nA dictionary mapping proto names to the message classes. This will include\nany dependent messages as well as any messages defined in the same file as\na specified message.", "source": "juraj-google-style"}
{"code": "def preprocess_examples(self, texts: Union[TextInput, List[TextInput]], images: ImageInput=None, bboxes: BboxInput=None, num_image_tokens: Optional[int]=64) -> Union[str, List[str]]:\n    img_tokens = [self.boi_token] * num_image_tokens\n    img_info_tokens = ' '.join([self.boi_token] + img_tokens + [self.eoi_token])\n    batched = True\n    if isinstance(texts, str):\n        batched = False\n        texts = [texts]\n    if images is None:\n        images = [None] * len(texts)\n    elif not is_batched(images):\n        images = [images]\n    if len(texts) != len(images):\n        raise ValueError(f'The number of examples in `texts` and `images` should be the same. Got {len(texts)} v.s. {len(images)} instead.')\n    if not batched:\n        self._check_bboxes_for_single_text(bboxes)\n        bboxes = [bboxes]\n    elif bboxes is not None:\n        if not isinstance(bboxes, list):\n            raise ValueError('`bboxes` should be `None` or a list (as a batch) when `texts` is passed as a batch.')\n        for x in bboxes:\n            self._check_bboxes_for_single_text(x)\n    else:\n        bboxes = [None] * len(texts)\n    if len(bboxes) != len(texts):\n        raise ValueError(f'The number of examples in `texts` and `bboxes` should be the same. Got {len(texts)} v.s. {len(bboxes)} instead.')\n    result = [self._preprocess_single_example(text, image, bbox, img_info_tokens) for text, image, bbox in zip(texts, images, bboxes)]\n    if not batched:\n        result = result[0]\n    return result", "docstring": "Add image and bounding box information to `texts` as image and patch index tokens.\n\nArgs:\ntexts (`Union[TextInput, List[TextInput]]`): The texts to be processed.\nimages (`ImageInput`, *optional*): The images associated to `texts`.\nbboxes (`Union[List[Tuple[int]], List[Tuple[float]], List[List[Tuple[int]]], List[List[Tuple[float]]]]`, *optional*):\nThe bounding bboxes associated to `texts`.\nnum_image_tokens (`int`, *optional*, defaults to 64):\nThe number of image tokens (used as latent queries). This should corresponds to the `latent_query_num`\nattribute in `Kosmos2Config`.\n\nReturns:\n`Union[TextInput, List[TextInput]]`: The processed texts with image and patch index tokens.", "source": "github-repos"}
{"code": "def makedirs(self, dir_name, mode=PERM_DEF, exist_ok=False):\n    ends_with_sep = self.ends_with_path_separator(dir_name)\n    dir_name = self.absnormpath(dir_name)\n    if (ends_with_sep and self.is_macos and self.exists(dir_name, check_link=True) and (not self.exists(dir_name))):\n        self.remove_object(dir_name)\n    path_components = self._path_components(dir_name)\n    current_dir = self.root\n    for component in path_components:\n        if ((component not in current_dir.contents) or (not isinstance(current_dir.contents, dict))):\n            break\n        else:\n            current_dir = current_dir.contents[component]\n    try:\n        self.create_dir(dir_name, (mode & (~ self.umask)))\n    except (IOError, OSError) as e:\n        if ((not exist_ok) or (not isinstance(self.resolve(dir_name), FakeDirectory))):\n            if (self.is_windows_fs and (e.errno == errno.ENOTDIR)):\n                e.errno = errno.ENOENT\n            self.raise_os_error(e.errno, e.filename)", "docstring": "Create a leaf Fake directory and create any non-existent\nparent dirs.\n\nArgs:\ndir_name: (str) Name of directory to create.\nmode: (int) Mode to create directory (and any necessary parent\ndirectories) with. This argument defaults to 0o777.\nThe umask is applied to this mode.\nexist_ok: (boolean) If exist_ok is False (the default), an OSError is\nraised if the target directory already exists.\nNew in Python 3.2.\n\nRaises:\nOSError: if the directory already exists and exist_ok=False,\nor as per :py:meth:`create_dir`.", "source": "codesearchnet"}
{"code": "def add_file_locations(self, file_locations=[]):\n        \n        if not hasattr(self, '__file_locations__'):\n            self.__file_locations__ = copy.copy(file_locations)\n        else:\n            self.__file_locations__ += copy.copy(file_locations)", "docstring": "Adds a list of file locations to the current list\n\nArgs:\nfile_locations: list of file location tuples", "source": "juraj-google-style"}
{"code": "def _publish_response(self, slug, message):\n    resp_topic = self.topics.gateway_topic(slug, 'data/response')\n    self._logger.debug('Publishing response message: (topic=%s) (message=%s)', resp_topic, message)\n    self.client.publish(resp_topic, message)", "docstring": "Publish a response message for a device\n\nArgs:\nslug (string): The device slug that we are publishing on behalf of\nmessage (dict): A set of key value pairs that are used to create the message\nthat is sent.", "source": "codesearchnet"}
{"code": "def _ExtractExtensionInstallEvents(self, settings_dict, parser_mediator):\n    \n    for extension_id, extension in sorted(settings_dict.items()):\n      install_time = extension.get('install_time', None)\n      if not install_time:\n        parser_mediator.ProduceExtractionWarning(\n            'installation time missing for extension ID {0:s}'.format(\n                extension_id))\n        continue\n\n      try:\n        install_time = int(install_time, 10)\n      except ValueError:\n        parser_mediator.ProduceExtractionWarning((\n            'unable to convert installation time for extension ID '\n            '{0:s}').format(extension_id))\n        continue\n\n      manifest = extension.get('manifest', None)\n      if not manifest:\n        parser_mediator.ProduceExtractionWarning(\n            'manifest missing for extension ID {0:s}'.format(extension_id))\n        continue\n\n      event_data = ChromeExtensionInstallationEventData()\n      event_data.extension_id = extension_id\n      event_data.extension_name = manifest.get('name', None)\n      event_data.path = extension.get('path', None)\n\n      date_time = dfdatetime_webkit_time.WebKitTime(timestamp=install_time)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_ADDED)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extract extension installation events.\n\nArgs:\nsettings_dict (dict[str: object]): settings data from a Preferences file.\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.", "source": "juraj-google-style"}
{"code": "def raise_for_api_error(headers: MutableMapping, data: MutableMapping) -> None:\n    \n\n    if not data[\"ok\"]:\n        raise exceptions.SlackAPIError(data.get(\"error\", \"unknow_error\"), headers, data)\n\n    if \"warning\" in data:\n        LOG.warning(\"Slack API WARNING: %s\", data[\"warning\"])", "docstring": "Check request response for Slack API error\n\nArgs:\nheaders: Response headers\ndata: Response data\n\nRaises:\n:class:`slack.exceptions.SlackAPIError`", "source": "juraj-google-style"}
{"code": "def start(self, interval_s):\n    \n    if self.running:\n      return False\n\n    self.stopped.clear()\n\n    def _execute():\n      \n      if not self.method() and self.stop_if_false:\n        return\n      while not self.stopped.wait(interval_s):\n        if not self.method() and self.stop_if_false:\n          return\n\n    self.thread = threading.Thread(target=_execute)\n    self.thread.daemon = True\n    self.thread.start()\n    return True", "docstring": "Starts executing the method at the specified interval.\n\nArgs:\ninterval_s: The amount of time between executions of the method.\nReturns:\nFalse if the interval was already running.", "source": "juraj-google-style"}
{"code": "def EnableNetworkInterfaces(\n      self, interfaces, logger, dhclient_script=None):\n    \n    interfaces_to_up = [i for i in interfaces if i != 'eth0']\n    if interfaces_to_up:\n      logger.info('Enabling the Ethernet interfaces %s.', interfaces_to_up)\n      self._WriteIfcfg(interfaces_to_up, logger)\n      self._Ifup(interfaces_to_up, logger)", "docstring": "Enable the list of network interfaces.\n\nArgs:\ninterfaces: list of string, the output device names to enable.\nlogger: logger object, used to write to SysLog and serial port.\ndhclient_script: string, the path to a dhclient script used by dhclient.", "source": "juraj-google-style"}
{"code": "def get_embedded_object(self, signature_id):\n        \n        request = self._get_request()\n        return request.get(self.EMBEDDED_OBJECT_GET_URL + signature_id)", "docstring": "Retrieves a embedded signing object\n\nRetrieves an embedded object containing a signature url that can be opened in an iFrame.\n\nArgs:\n\nsignature_id (str): The id of the signature to get a signature url for\n\nReturns:\nAn Embedded object", "source": "juraj-google-style"}
{"code": "def apply_rot_fn(self, fn: Callable[[Rotation], Rotation]) -> Rigid:\n    return Rigid(fn(self._rots), self._trans)", "docstring": "Applies a Rotation -> Rotation function to the stored rotation object.\n\nArgs:\nfn: A function of type Rotation -> Rotation\nReturns:\nA transformation object with a transformed rotation.", "source": "github-repos"}
{"code": "def add_functions(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]:\n    \n\n    \n    spec_dict[\"functions\"][\"list\"] = []\n    spec_dict[\"functions\"][\"list_long\"] = []\n    spec_dict[\"functions\"][\"list_short\"] = []\n\n    spec_dict[\"functions\"][\"primary\"] = {}\n    spec_dict[\"functions\"][\"primary\"][\"list_long\"] = []\n    spec_dict[\"functions\"][\"primary\"][\"list_short\"] = []\n\n    spec_dict[\"functions\"][\"modifier\"] = {}\n    spec_dict[\"functions\"][\"modifier\"][\"list_long\"] = []\n    spec_dict[\"functions\"][\"modifier\"][\"list_short\"] = []\n\n    spec_dict[\"functions\"][\"to_short\"] = {}\n    spec_dict[\"functions\"][\"to_long\"] = {}\n\n    for func_name in spec_dict[\"functions\"][\"info\"]:\n\n        abbreviated_name = spec_dict[\"functions\"][\"info\"][func_name][\"abbreviation\"]\n\n        spec_dict[\"functions\"][\"list\"].extend((func_name, abbreviated_name))\n\n        spec_dict[\"functions\"][\"list_long\"].append(func_name)\n        spec_dict[\"functions\"][\"list_short\"].append(abbreviated_name)\n\n        if spec_dict[\"functions\"][\"info\"][func_name][\"type\"] == \"primary\":\n            spec_dict[\"functions\"][\"primary\"][\"list_long\"].append(func_name)\n            spec_dict[\"functions\"][\"primary\"][\"list_short\"].append(abbreviated_name)\n        else:\n            spec_dict[\"functions\"][\"modifier\"][\"list_long\"].append(func_name)\n            spec_dict[\"functions\"][\"modifier\"][\"list_short\"].append(abbreviated_name)\n\n        spec_dict[\"functions\"][\"to_short\"][abbreviated_name] = abbreviated_name\n        spec_dict[\"functions\"][\"to_short\"][func_name] = abbreviated_name\n\n        spec_dict[\"functions\"][\"to_long\"][abbreviated_name] = func_name\n        spec_dict[\"functions\"][\"to_long\"][func_name] = func_name\n\n    return spec_dict", "docstring": "Add function keys to spec_dict\n\nArgs:\nspec_dict (Mapping[str, Any]): bel specification dictionary\n\nReturns:\nMapping[str, Any]: bel specification dictionary with added function keys", "source": "juraj-google-style"}
{"code": "def _EncodeString(self, string):\n    \n    try:\n      \n      \n      encoded_string = string.encode(self._encoding, errors=self._errors)\n    except UnicodeEncodeError:\n      if self._errors == 'strict':\n        logging.error(\n            'Unable to properly write output due to encoding error. '\n            'Switching to error tolerant encoding which can result in '\n            'non Basic Latin (C0) characters to be replaced with \"?\" or '\n            '\"\\\\ufffd\".')\n        self._errors = 'replace'\n\n      encoded_string = string.encode(self._encoding, errors=self._errors)\n\n    return encoded_string", "docstring": "Encodes the string.\n\nArgs:\nstring (str): string to encode.\n\nReturns:\nbytes: encoded string.", "source": "juraj-google-style"}
{"code": "def forward_ports(remote_host, local_host, local_listen_ports,\n                  remote_listen_ports):\n  \n  if \":\" in local_host and not local_host.startswith(\"[\"):\n    local_host = \"[%s]\" % local_host\n\n  ssh = whichcraft.which(\"ssh\") or whichcraft.which(\"plink\")\n  if not ssh:\n    raise ValueError(\"Couldn't find an ssh client.\")\n\n  args = [ssh, remote_host]\n  for local_port in local_listen_ports:\n    args += [\"-L\", \"%s:%s:%s:%s\" % (local_host, local_port,\n                                    local_host, local_port)]\n  for remote_port in remote_listen_ports:\n    args += [\"-R\", \"%s:%s:%s:%s\" % (local_host, remote_port,\n                                    local_host, remote_port)]\n\n  logging.info(\"SSH port forwarding: %s\", \" \".join(args))\n  return subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n                          stdin=subprocess.PIPE, close_fds=(os.name == \"posix\"))", "docstring": "Forwards ports such that multiplayer works between machines.\n\nArgs:\nremote_host: Where to ssh to.\nlocal_host: \"127.0.0.1\" or \"::1\".\nlocal_listen_ports: Which ports to listen on locally to forward remotely.\nremote_listen_ports: Which ports to listen on remotely to forward locally.\n\nReturns:\nThe ssh process.\n\nRaises:\nValueError: if it can't find ssh.", "source": "juraj-google-style"}
{"code": "def generate_build(self, image, targetname, rebuilds=None, cache_repo='', cache_tag='', buildargs=None, **kwargs):\n    from_image = self.get_external_base_image(image)\n    if (cache_repo or cache_tag):\n        cache_from = utils.generate_name(image, cache_repo, cache_tag)\n    else:\n        cache_from = None\n    if (from_image is None):\n        raise errors.NoBaseError((\"No base image found in %s's dependencies\" % image))\n    if isinstance(from_image, ExternalDockerfile):\n        build_first = from_image\n        base_image = from_image.tag\n    else:\n        base_image = from_image\n        build_first = None\n    build_steps = []\n    istep = 0\n    sourceimages = set()\n    if (rebuilds is None):\n        rebuilds = []\n    else:\n        rebuilds = set(rebuilds)\n    for base_name in self.sort_dependencies(image):\n        istep += 1\n        buildname = ('dmkbuild_%s_%d' % (image, istep))\n        secret_files = self.ymldefs[base_name].get('secret_files', None)\n        squash = self.ymldefs[base_name].get('squash', bool(secret_files))\n        build_steps.append(dockermake.step.BuildStep(base_name, base_image, self.ymldefs[base_name], buildname, bust_cache=(base_name in rebuilds), build_first=build_first, cache_from=cache_from, buildargs=buildargs, squash=squash, secret_files=secret_files))\n        base_image = buildname\n        build_first = None\n        for (sourceimage, files) in iteritems(self.ymldefs[base_name].get('copy_from', {})):\n            sourceimages.add(sourceimage)\n            for (sourcepath, destpath) in iteritems(files):\n                istep += 1\n                buildname = ('dmkbuild_%s_%d' % (image, istep))\n                build_steps.append(dockermake.step.FileCopyStep(sourceimage, sourcepath, destpath, base_name, base_image, self.ymldefs[base_name], buildname, bust_cache=(base_name in rebuilds), build_first=build_first, cache_from=cache_from))\n                base_image = buildname\n    sourcebuilds = [self.generate_build(img, img, cache_repo=cache_repo, cache_tag=cache_tag, **kwargs) for img in sourceimages]\n    return builds.BuildTarget(imagename=image, targetname=targetname, steps=build_steps, sourcebuilds=sourcebuilds, from_image=from_image, **kwargs)", "docstring": "Separate the build into a series of one or more intermediate steps.\nEach specified build directory gets its own step\n\nArgs:\nimage (str): name of the image as defined in the dockermake.py file\ntargetname (str): name to tag the final built image with\nrebuilds (List[str]): list of image layers to rebuild (i.e., without docker's cache)\ncache_repo (str): repository to get images for caches in builds\ncache_tag (str): tags to use from repository for caches in builds\nbuildargs (dict): build-time dockerfile arugments\n**kwargs (dict): extra keyword arguments for the BuildTarget object", "source": "codesearchnet"}
{"code": "def _collect_paths(element):\n    output = []\n    path = vectors.el_to_path_vector(element)\n    root = path[0]\n    params = (element.params if element.params else None)\n    match = root.find(element.getTagName(), params)\n    if (len(match) == 1):\n        output.append(PathCall('find', 0, [element.getTagName(), params]))\n    output.extend(path_patterns.neighbours_pattern(element))\n    output.extend(path_patterns.predecesors_pattern(element, root))\n    index_backtrack = []\n    last_index_backtrack = []\n    params_backtrack = []\n    last_params_backtrack = []\n    for el in reversed(path):\n        if (not el.parent):\n            continue\n        tag_name = el.getTagName()\n        match = el.parent.wfind(tag_name).childs\n        index = match.index(el)\n        index_backtrack.append(PathCall('wfind', index, [tag_name]))\n        last_index_backtrack.append(PathCall('wfind', (index - len(match)), [tag_name]))\n        if el.params:\n            match = el.parent.wfind(tag_name, el.params).childs\n            index = match.index(el)\n            params_backtrack.append(PathCall('wfind', index, [tag_name, el.params]))\n            last_params_backtrack.append(PathCall('wfind', (index - len(match)), [tag_name, el.params]))\n        else:\n            params_backtrack.append(PathCall('wfind', index, [tag_name]))\n            last_params_backtrack.append(PathCall('wfind', (index - len(match)), [tag_name]))\n    output.extend([Chained(reversed(params_backtrack)), Chained(reversed(last_params_backtrack)), Chained(reversed(index_backtrack)), Chained(reversed(last_index_backtrack))])\n    return output", "docstring": "Collect all possible path which leads to `element`.\n\nFunction returns standard path from root element to this, reverse path,\nwhich uses negative indexes for path, also some pattern matches, like\n\"this is element, which has neighbour with id 7\" and so on.\n\nArgs:\nelement (obj): HTMLElement instance.\n\nReturns:\nlist: List of :class:`.PathCall` and :class:`.Chained` objects.", "source": "codesearchnet"}
{"code": "def max_error(grad1, grad2):\n    error = 0\n    for j_t, j_n in zip(grad1, grad2):\n        if j_t.size or j_n.size:\n            error = np.maximum(error, np.fabs(j_t - j_n).max())\n    return error", "docstring": "Computes maximum elementwise gap.\n\nComputes the maximum elementwise gap between two lists of tensors of the same\nshape.\n\nArgs:\ngrad1: a lists of tensors.\ngrad2: a lists of tensors with the same shape as grad1.\n\nReturns:\nThe maximum elementwise gap between the two.", "source": "github-repos"}
{"code": "def correlation_matrix(df):\n    columns = df.columns.tolist()\n    corr = pd.DataFrame(np.corrcoef(df, rowvar=0), columns=columns, index=columns)\n    return corr", "docstring": "Returns a pandas DataFrame with the pair-wise correlations of the columns.\n\nArgs:\ndf: pandas DataFrame with columns to run diagnostics on", "source": "codesearchnet"}
{"code": "def SetDocumentType(self, document_type):\n    \n    self._document_type = document_type\n    logger.debug('Elasticsearch document type: {0:s}'.format(document_type))", "docstring": "Sets the document type.\n\nArgs:\ndocument_type (str): document type.", "source": "juraj-google-style"}
{"code": "def __init__(self, resolver_context):\n    \n    super(NTFSFile, self).__init__(resolver_context)\n    self._file_system = None\n    self._fsntfs_data_stream = None\n    self._fsntfs_file_entry = None", "docstring": "Initializes a file-like object.\n\nArgs:\nresolver_context (Context): resolver context.", "source": "juraj-google-style"}
{"code": "def section(self, regex, config='running_config'):\n    if (config in ['running_config', 'startup_config']):\n        config = getattr(self, config)\n    match = re.search(regex, config, re.M)\n    if (not match):\n        raise TypeError('config section not found')\n    (block_start, line_end) = match.regs[0]\n    match = re.search('^[^\\\\s]', config[line_end:], re.M)\n    if (not match):\n        raise TypeError('could not find end block')\n    (_, block_end) = match.regs[0]\n    block_end = (line_end + block_end)\n    return config[block_start:block_end]", "docstring": "Returns a section of the config\n\nArgs:\nregex (str): A valid regular expression used to select sections\nof configuration to return\nconfig (str): The configuration to return.  Valid values for config\nare \"running_config\" or \"startup_config\".  The default value\nis \"running_config\"\n\nReturns:\nThe configuration section as a string object.", "source": "codesearchnet"}
{"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    y_true = math_ops.cast(y_true, self._dtype)\n    y_pred = math_ops.cast(y_pred, self._dtype)\n    if y_pred.shape.ndims > 1:\n        y_pred = array_ops.reshape(y_pred, [-1])\n    if y_true.shape.ndims > 1:\n        y_true = array_ops.reshape(y_true, [-1])\n    if sample_weight is not None:\n        sample_weight = math_ops.cast(sample_weight, self._dtype)\n        if sample_weight.shape.ndims > 1:\n            sample_weight = array_ops.reshape(sample_weight, [-1])\n    current_cm = confusion_matrix.confusion_matrix(y_true, y_pred, self.num_classes, weights=sample_weight, dtype=self._dtype)\n    return self.total_cm.assign_add(current_cm)", "docstring": "Accumulates the confusion matrix statistics.\n\nArgs:\ny_true: The ground truth values.\ny_pred: The predicted values.\nsample_weight: Optional weighting of each example. Defaults to 1. Can be a\n`Tensor` whose rank is either 0, or the same rank as `y_true`, and must\nbe broadcastable to `y_true`.\n\nReturns:\nUpdate op.", "source": "github-repos"}
{"code": "def word_ngrams(s, n=3, token_fn=tokens.on_whitespace):\n    \n    tokens = token_fn(s)\n    return __ngrams(tokens, n=min(len(tokens), n))", "docstring": "Word-level n-grams in a string\n\nBy default, whitespace is assumed to be a word boundary.\n\n>>> ng.word_ngrams('This is not a test!')\n[('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')]\n\nIf the sequence's length is less than or equal to n, the n-grams are\nsimply the sequence itself.\n\n>>> ng.word_ngrams('Test!')\n[('Test!')]\n\nArgs:\ns: a string\n\nReturns:\nlist: tuples of word-level n-grams", "source": "juraj-google-style"}
{"code": "async def get_pushlog_info(decision_link):\n    \n    source_env_prefix = decision_link.context.config['source_env_prefix']\n    repo = get_repo(decision_link.task, source_env_prefix)\n    rev = get_revision(decision_link.task, source_env_prefix)\n    context = decision_link.context\n    pushlog_url = context.config['pushlog_url'].format(\n        repo=repo, revision=rev\n    )\n    log.info(\"Pushlog url {}\".format(pushlog_url))\n    file_path = os.path.join(context.config[\"work_dir\"], \"{}_push_log.json\".format(decision_link.name))\n    pushlog_info = await load_json_or_yaml_from_url(\n        context, pushlog_url, file_path, overwrite=False\n    )\n    if len(pushlog_info['pushes']) != 1:\n        log.warning(\"Pushlog error: expected a single push at {} but got {}!\".format(\n            pushlog_url, pushlog_info['pushes']\n        ))\n    return pushlog_info", "docstring": "Get pushlog info for a decision LinkOfTrust.\n\nArgs:\ndecision_link (LinkOfTrust): the decision link to get pushlog info about.\n\nReturns:\ndict: pushlog info.", "source": "juraj-google-style"}
{"code": "def getSimilarTerms(self, textOrFingerprint):\n        \n        expression = self._createDictionary(textOrFingerprint)\n        terms = self._fullClient.getSimilarTermsForExpression(json.dumps(expression), maxResults=20)\n        return [t.term for t in terms]", "docstring": "Get the similar terms for a given text or fingerprint\nArgs:\ntextOrFingerprint, str OR list of integers\nReturns:\nlist of str: the 20 most similar terms\nRaises:\nCorticalioException: if the request was not successful", "source": "juraj-google-style"}
{"code": "def _load_element_spec(path: str) -> Any:\n    dataset_spec_filename = os.path.join(path, dataset_ops.DATASET_SPEC_FILENAME)\n    if not gfile.Exists(dataset_spec_filename):\n        raise errors.NotFoundError(node_def=None, op=None, message=f'tf.data snapshot element_spec file not found: {dataset_spec_filename}.')\n    with gfile.GFile(dataset_spec_filename, 'rb') as f:\n        encoded_spec = f.read()\n    try:\n        return _parse_element_spec(encoded_spec)\n    except nested_structure_coder.NotEncodableError as e:\n        raise errors.NotFoundError(node_def=None, op=None, message=f'tf.data snapshot element_spec file not found or invalid: {dataset_spec_filename}.') from e", "docstring": "Loads the dataset element spec.\n\nArgs:\npath: Base path of the snapshot.\n\nReturns:\nDataset element_spec.\n\nRaises:\nNotFoundError if the element spec file does not exist or cannot be decoded.", "source": "github-repos"}
{"code": "def start_server_on_separate_thread(dump_to_filesystem=True, server_start_delay_sec=0.0, poll_server=False, blocking=True, toggle_watch_on_core_metadata=None):\n    server_port = portpicker.pick_unused_port()\n    debug_server_url = 'grpc:\n    server_dump_dir = tempfile.mkdtemp() if dump_to_filesystem else None\n    server = EventListenerTestServicer(server_port=server_port, dump_dir=server_dump_dir, toggle_watch_on_core_metadata=toggle_watch_on_core_metadata)\n\n    def delay_then_run_server():\n        time.sleep(server_start_delay_sec)\n        server.run_server(blocking=blocking)\n    server_thread = threading.Thread(target=delay_then_run_server)\n    server_thread.start()\n    if poll_server:\n        if not _poll_server_till_success(50, 0.2, debug_server_url, server_dump_dir, server, gpu_memory_fraction=0.1):\n            raise ValueError('Failed to start test gRPC debug server at port %d' % server_port)\n        server.clear_data()\n    return (server_port, debug_server_url, server_dump_dir, server_thread, server)", "docstring": "Create a test gRPC debug server and run on a separate thread.\n\nArgs:\ndump_to_filesystem: (bool) whether the debug server will dump debug data\nto the filesystem.\nserver_start_delay_sec: (float) amount of time (in sec) to delay the server\nstart up for.\npoll_server: (bool) whether the server will be polled till success on\nstartup.\nblocking: (bool) whether the server should be started in a blocking mode.\ntoggle_watch_on_core_metadata: A list of\n(node_name, output_slot, debug_op) tuples to toggle the\nwatchpoint status during the on_core_metadata calls (optional).\n\nReturns:\nserver_port: (int) Port on which the server runs.\ndebug_server_url: (str) grpc:// URL to the server.\nserver_dump_dir: (str) The debug server's dump directory.\nserver_thread: The server Thread object.\nserver: The `EventListenerTestServicer` object.\n\nRaises:\nValueError: If polling the server process for ready state is not successful\nwithin maximum polling count.", "source": "github-repos"}
{"code": "def _register_preallocated_ips(self, conf):\n    for (dom_name, dom_spec) in conf.get('domains', {}).items():\n        for (idx, nic) in enumerate(dom_spec.get('nics', [])):\n            if ('ip' not in nic):\n                continue\n            net = conf['nets'][nic['net']]\n            if self._subnet_store.is_leasable_subnet(net['gw']):\n                nic['ip'] = _create_ip(net['gw'], int(nic['ip'].split('.')[(- 1)]))\n            dom_name = dom_spec['name']\n            if (not _ip_in_subnet(net['gw'], nic['ip'])):\n                raise RuntimeError((\"%s:nic%d's IP [%s] is outside the subnet [%s]\" % (dom_name, dom_spec['nics'].index(nic), nic['ip'], net['gw'])))\n            if (nic['ip'] in net['mapping'].values()):\n                conflict_list = [name for (name, ip) in net['mapping'].items() if (ip == net['ip'])]\n                raise RuntimeError(('IP %s was to several domains: %s %s' % (nic['ip'], dom_name, ' '.join(conflict_list))))\n            self._add_nic_to_mapping(net, dom_spec, nic)", "docstring": "Parse all the domains in the given conf and preallocate all their ips\ninto the networks mappings, raising exception on duplicated ips or ips\nout of the allowed ranges\n\nSee Also:\n:mod:`lago.subnet_lease`\n\nArgs:\nconf (dict): Configuration spec to parse\n\nReturns:\nNone\n\nRaises:\nRuntimeError: if there are any duplicated ips or any ip out of the\nallowed range", "source": "codesearchnet"}
{"code": "def make_es_id(uri):\n    \n    try:\n        uri = uri.clean_uri\n    except AttributeError:\n        pass\n    return sha1(uri.encode()).hexdigest()", "docstring": "Creates the id based off of the uri value\n\nArgs:\n-----\nuri: the uri to conver to an elasticsearch id", "source": "juraj-google-style"}
{"code": "def extract_ranges(index_list, range_size_limit=32):\n    \n    if not index_list:\n        return [], []\n    first = index_list[0]\n    last = first\n    ranges = []\n    singles = []\n    for i in index_list[1:]:\n        if i == last + 1 and (last - first) <= range_size_limit:\n            last = i\n        else:\n            if last > first:\n                ranges.append([first, last])\n            else:\n                singles.append(first)\n            first = i\n            last = i\n    if last > first:\n        ranges.append([first, last])\n    else:\n        singles.append(first)\n    return ranges, singles", "docstring": "Extract consecutive ranges and singles from index_list.\n\nArgs:\nindex_list: List of monotone increasing non-negative integers.\nrange_size_limit: Largest size range to return.  If a larger\nconsecutive range exists it will be returned as multiple\nranges.\n\nReturns:\nranges, singles where ranges is a list of [first, last] pairs of\nconsecutive elements in index_list, and singles is all of the\nother elements, in original order.", "source": "juraj-google-style"}
{"code": "def SetCredential(self, path_spec, identifier, data):\n    \n    supported_credentials = manager.CredentialsManager.GetCredentials(path_spec)\n\n    if identifier not in supported_credentials.CREDENTIALS:\n      raise KeyError((\n          'Unsuppored credential: {0:s} for path specification type: '\n          '{1:s}').format(identifier, path_spec.type_indicator))\n\n    credentials = self._credentials_per_path_spec.get(path_spec.comparable, {})\n    credentials[identifier] = data\n    self._credentials_per_path_spec[path_spec.comparable] = credentials", "docstring": "Sets a specific credential for the path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nidentifier (str): credential identifier.\ndata (object): credential data.\n\nRaises:\nKeyError: if the credential is not supported by the path specification\ntype.", "source": "juraj-google-style"}
{"code": "def files_comments_delete(self, *, file: str, id: str, **kwargs) -> SlackResponse:\n    kwargs.update({'file': file, 'id': id})\n    return self.api_call('files.comments.delete', json=kwargs)", "docstring": "Deletes an existing comment on a file.\n\nArgs:\nfile (str): The file id. e.g. 'F1234467890'\nid (str): The file comment id. e.g. 'Fc1234567890'", "source": "codesearchnet"}
{"code": "def _build_graph(self, tags):\n        \n        graph = SimpleGraph()\n        for tag_index in xrange(len(tags)):\n            for entity_index in xrange(len(tags[tag_index].get('entities'))):\n                a_entity_name = graph_key_from_tag(tags[tag_index], entity_index)\n                tokens = self.tokenizer.tokenize(tags[tag_index].get('entities', [])[entity_index].get('match'))\n                for tag in tags[tag_index + 1:]:\n                    start_token = tag.get('start_token')\n                    if start_token >= tags[tag_index].get('start_token') + len(tokens):\n                        for b_entity_index in xrange(len(tag.get('entities'))):\n                            b_entity_name = graph_key_from_tag(tag, b_entity_index)\n                            graph.add_edge(a_entity_name, b_entity_name)\n\n        return graph", "docstring": "Builds a graph from the entities included in the tags.\nNote this is used internally.\n\nArgs:\ntags (list): A list of the tags to include in graph\n\nReturns:\ngraph : this is the resulting graph of the tagged entities.", "source": "juraj-google-style"}
{"code": "def AddArguments(cls, argument_group):\n    \n    argument_group.add_argument(\n        '--user', dest='username', type=str, action='store',\n        default=cls._DEFAULT_USERNAME, metavar='USERNAME', required=False,\n        help='The username used to connect to the database.')\n    argument_group.add_argument(\n        '--password', dest='password', type=str, action='store',\n        default=cls._DEFAULT_PASSWORD, metavar='PASSWORD', help=(\n            'The password for the database user.'))\n    argument_group.add_argument(\n        '--db_name', '--db-name', dest='db_name', action='store',\n        type=str, default=cls._DEFAULT_NAME, required=False, help=(\n            'The name of the database to connect to.'))\n\n    server_config.ServerArgumentsHelper.AddArguments(argument_group)", "docstring": "Adds command line arguments the helper supports to an argument group.\n\nThis function takes an argument parser or an argument group object and adds\nto it all the command line arguments this helper supports.\n\nArgs:\nargument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\nargparse group.", "source": "juraj-google-style"}
{"code": "def _ParseFileVersion(file_version):\n    tokens = file_version.split('brain.Event:')\n    try:\n        return float(tokens[(- 1)])\n    except ValueError:\n        logger.warn('Invalid event.proto file_version. Defaulting to use of out-of-order event.step logic for purging expired events.')\n        return (- 1)", "docstring": "Convert the string file_version in event.proto into a float.\n\nArgs:\nfile_version: String file_version from event.proto\n\nReturns:\nVersion number as a float.", "source": "codesearchnet"}
{"code": "def sync(self, raw_data, row_change_callback=None):\n        \n        return self._update(raw_data, row_change_callback, delete_rows=True)", "docstring": "Equivalent to the inject method but will delete rows from the\ngoogle spreadsheet if their key is not found in the input (raw_data)\ndictionary.\n\nArgs:\nraw_data (dict): See inject method\nrow_change_callback (Optional) (func): See inject method\n\nReturns:\nUpdateResults (object): See inject method", "source": "juraj-google-style"}
{"code": "def transpose(self, permutation: Optional[List[int]]=None) -> 'TensorFluent':\n    if (permutation == []):\n        return self\n    t = (tf.transpose(self.tensor, permutation) if (permutation != []) else self.tensor)\n    scope = self.scope.as_list()\n    batch = self.batch\n    return TensorFluent(t, scope, batch=batch)", "docstring": "Returns a TensorFluent for the transpose operation with given `permutation`.\n\nArgs:\npermutation: The output's shape permutation.\n\nReturns:\nA TensorFluent wrapping the transpose operation.", "source": "codesearchnet"}
{"code": "def __init__(self, wait_until_step):\n    self._wait_until_step = wait_until_step", "docstring": "Initializes a `GlobalStepWaiterHook`.\n\nArgs:\nwait_until_step: an `int` shows until which global step should we wait.", "source": "github-repos"}
{"code": "def add_block_parser(subparsers, parent_parser):\n    parser = subparsers.add_parser('block', description='Provides subcommands to display information about the blocks in the current blockchain.', help='Displays information on blocks in the current blockchain')\n    grand_parsers = parser.add_subparsers(title='subcommands', dest='subcommand')\n    grand_parsers.required = True\n    description = 'Displays information for all blocks on the current blockchain, including the block id and number, public keys all of allsigners, and number of transactions and batches.'\n    list_parser = grand_parsers.add_parser('list', help='Displays information for all blocks on the current blockchain', description=description, parents=[base_http_parser(), base_list_parser()], formatter_class=argparse.RawDescriptionHelpFormatter)\n    list_parser.add_argument('-n', '--count', default=100, type=int, help='the number of blocks to list')\n    description = 'Displays information about the specified block on the current blockchain'\n    show_parser = grand_parsers.add_parser('show', help=description, description=(description + '.'), parents=[base_http_parser(), base_show_parser()], formatter_class=argparse.RawDescriptionHelpFormatter)\n    show_parser.add_argument('block_id', type=str, help='id (header_signature) of the block')", "docstring": "Adds arguments parsers for the block list and block show commands\n\nArgs:\nsubparsers: Add parsers to this subparser object\nparent_parser: The parent argparse.ArgumentParser object", "source": "codesearchnet"}
{"code": "def _GetDistinctValues(self, field_name):\n    \n    self._cursor.execute(\n        'SELECT {0:s}, COUNT({0:s}) FROM log2timeline GROUP BY {0:s}'.format(\n            field_name))\n\n    result = {}\n    row = self._cursor.fetchone()\n    while row:\n      if row[0]:\n        result[row[0]] = row[1]\n      row = self._cursor.fetchone()\n    return result", "docstring": "Query database for unique field types.\n\nArgs:\nfield_name (str): name of the filed to retrieve.\n\nReturns:\ndict[str, int]: counts of field types by name.", "source": "juraj-google-style"}
{"code": "def configure(screen_name=None, config_file=None, app=None, **kwargs):\n    dirs = kwargs.pop('default_directories', None)\n    bases = kwargs.pop('default_bases', None)\n    file_config = {}\n    if (config_file is not False):\n        config_file = find_file(config_file, dirs, bases)\n        file_config = parse(config_file)\n    config = {k: v for (k, v) in file_config.items() if (k not in ('apps', 'users'))}\n    user_conf = file_config.get('users', {}).get(screen_name, {})\n    app = (app or user_conf.get('app'))\n    app_conf = file_config.get('apps', {}).get(app, {})\n    config.update(app_conf)\n    config.update(user_conf)\n    config.update({k: v for (k, v) in kwargs.items() if (v is not None)})\n    return config", "docstring": "Set up a config dictionary using a bots.yaml config file and optional keyword args.\n\nArgs:\nscreen_name (str): screen_name of user to search for in config file\nconfig_file (str): Path to read for the config file\napp (str): Name of the app to look for in the config file. Defaults to the one set in users.{screen_name}.\ndefault_directories (str): Directories to read for the bots.yaml/json file. Defaults to CONFIG_DIRS.\ndefault_bases (str): File names to look for in the directories. Defaults to CONFIG_BASES.", "source": "codesearchnet"}
{"code": "def _ReplaceByOuterIfNecessary(self, item, substitutions):\n    containing_union = self._AllContaining(item.type_param)\n    if not containing_union:\n        return [item]\n    class_type_parameters = [type_param for type_param in containing_union if self.IsClassTypeParameter(type_param)]\n    if class_type_parameters:\n        substitutions[item.type_param] = pytd_utils.JoinTypes(class_type_parameters)\n        return []\n    else:\n        return [item]", "docstring": "Potentially replace a function type param with a class type param.\n\nArgs:\nitem: A pytd.TemplateItem\nsubstitutions: A dictionary to update with what we replaced.\n\nReturns:\nEither [item] or [].", "source": "github-repos"}
{"code": "def unitized(unit):\n\n    def wrap(f):\n\n        def wrapped_f(*args, **kwargs):\n            val = f(*args, **kwargs)\n            unit_type = _UNAME2UTYPE[unit]\n            if (isinstance(val, FloatWithUnit) or isinstance(val, ArrayWithUnit)):\n                return val.to(unit)\n            elif isinstance(val, collections.abc.Sequence):\n                return val.__class__([FloatWithUnit(i, unit_type=unit_type, unit=unit) for i in val])\n            elif isinstance(val, collections.Mapping):\n                for (k, v) in val.items():\n                    val[k] = FloatWithUnit(v, unit_type=unit_type, unit=unit)\n            elif isinstance(val, numbers.Number):\n                return FloatWithUnit(val, unit_type=unit_type, unit=unit)\n            elif (val is None):\n                pass\n            else:\n                raise TypeError((\"Don't know how to assign units to %s\" % str(val)))\n            return val\n        return wrapped_f\n    return wrap", "docstring": "Useful decorator to assign units to the output of a function. You can also\nuse it to standardize the output units of a function that already returns\na FloatWithUnit or ArrayWithUnit. For sequences, all values in the sequences\nare assigned the same unit. It works with Python sequences only. The creation\nof numpy arrays loses all unit information. For mapping types, the values\nare assigned units.\n\nArgs:\nunit: Specific unit (eV, Ha, m, ang, etc.).\n\nExample usage::\n\n@unitized(unit=\"kg\")\ndef get_mass():\nreturn 123.45", "source": "codesearchnet"}
{"code": "def get_vpc_id(account, region):\n    url = '{0}/networks/aws'.format(API_URL)\n    response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n    if (not response.ok):\n        raise SpinnakerVPCNotFound(response.text)\n    vpcs = response.json()\n    for vpc in vpcs:\n        LOG.debug('VPC: %(name)s, %(account)s, %(region)s => %(id)s', vpc)\n        if (('name' in vpc) and all([(vpc['name'] == 'vpc'), (vpc['account'] == account), (vpc['region'] == region)])):\n            LOG.info('Found VPC ID for %s in %s: %s', account, region, vpc['id'])\n            vpc_id = vpc['id']\n            break\n    else:\n        LOG.fatal('VPC list: %s', vpcs)\n        raise SpinnakerVPCIDNotFound('No VPC available for {0} [{1}].'.format(account, region))\n    return vpc_id", "docstring": "Get VPC ID configured for ``account`` in ``region``.\n\nArgs:\naccount (str): AWS account name.\nregion (str): Region name, e.g. us-east-1.\n\nReturns:\nstr: VPC ID for the requested ``account`` in ``region``.\n\nRaises:\n:obj:`foremast.exceptions.SpinnakerVPCIDNotFound`: VPC ID not found for\n``account`` in ``region``.\n:obj:`foremast.exceptions.SpinnakerVPCNotFound`: Spinnaker has no VPCs\nconfigured.", "source": "codesearchnet"}
{"code": "def imsave(path, img, channel_first=False, as_uint16=False, auto_scale=True):\n    img = _imsave_before(img, channel_first, auto_scale)\n    if ((img.dtype == np.uint16) or as_uint16):\n        raise ValueError('Pillow only supports uint8 image to save. Cast img to uint8.If you want to save image as uint16, install pypng or cv2 and nnabla.utils.image_utils automatically change backend to use these module.')\n    if (auto_scale and (img.dtype != np.uint8)):\n        img = (img * 255).astype(np.uint8)\n    Image.fromarray(img).save(path)", "docstring": "Save image by pillow module.\nCurrently, pillow supports only uint8 to save.\n\nArgs:\npath (str): output filename\nimg (numpy.ndarray): Image array to save. Image shape is considered as (height, width, channel) by default.\nchannel_first (bool):\nThis argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).\nDefault value is False, which means the img shape is considered as (height, width, channel)\nas_uint16 (bool):\nIn this backend, this argument is always False because pillow dose not support uint16.\nIf True, exception will be raised.\nauto_scale (bool) :\nWhether upscale pixel values or not.\nIf you want to save float image, this argument must be True.\nIn pillow backend, only float ([0, 1]) to uint8 ([0, 255]) is supported.", "source": "codesearchnet"}
{"code": "def delete_rule(name, localport=None, protocol=None, dir=None, remoteip=None):\n    ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''}\n    if __salt__['firewall.rule_exists'](name):\n        ret['changes'] = {'delete rule': name}\n    else:\n        ret['comment'] = 'A rule with that name does not exist'\n        return ret\n    if __opts__['test']:\n        ret['result'] = ((not ret['changes']) or None)\n        ret['comment'] = ret['changes']\n        ret['changes'] = {}\n        return ret\n    try:\n        __salt__['firewall.delete_rule'](name, localport, protocol, dir, remoteip)\n    except CommandExecutionError:\n        ret['comment'] = 'Could not delete rule'\n    return ret", "docstring": "Delete an existing firewall rule identified by name and optionally by ports,\nprotocols, direction, and remote IP.\n\n.. versionadded:: Neon\n\nArgs:\n\nname (str): The name of the rule to delete. If the name ``all`` is used\nyou must specify additional parameters.\n\nlocalport (Optional[str]): The port of the rule. If protocol is not\nspecified, protocol will be set to ``tcp``\n\nprotocol (Optional[str]): The protocol of the rule. Default is ``tcp``\nwhen ``localport`` is specified\n\ndir (Optional[str]): The direction of the rule.\n\nremoteip (Optional[str]): The remote IP of the rule.\n\nExample:\n\n.. code-block:: yaml\n\ndelete_smb_port_rule:\nwin_firewall.delete_rule:\n- name: SMB (445)", "source": "codesearchnet"}
{"code": "def _get_apis(self, apis):\n    ret = []\n    for data in apis:\n        ret.append(SpecificationAPI(specification=self, data=data))\n    return sorted(ret, key=(lambda x: x.rest_name[1:]))", "docstring": "Process apis for the given model\n\nArgs:\nmodel: the model processed\napis: the list of apis availble for the current model\nrelations: dict containing all relations between resources", "source": "codesearchnet"}
{"code": "def snapshot(self, filename='tmp.png'):\n    if (not filename):\n        filename = 'tmp.png'\n    if self.handle:\n        try:\n            screenshot(filename, self.handle)\n        except win32gui.error:\n            self.handle = None\n            screenshot(filename)\n    else:\n        screenshot(filename)\n    img = aircv.imread(filename)\n    os.remove(filename)\n    return img", "docstring": "Take a screenshot and save it to `tmp.png` filename by default\n\nArgs:\nfilename: name of file where to store the screenshot\n\nReturns:\ndisplay the screenshot", "source": "codesearchnet"}
{"code": "def add_pop_block_targets(bytecode: list[opcodes.Opcode]) -> None:\n    if not bytecode:\n        return\n    for op in bytecode:\n        op.block_target = None\n    setup_except_op = (opcodes.SETUP_FINALLY, opcodes.SETUP_EXCEPT_311)\n    todo = [(bytecode[0], ())]\n    seen = set()\n    while todo:\n        op, block_stack = todo.pop()\n        if op in seen:\n            continue\n        else:\n            seen.add(op)\n        if isinstance(op, opcodes.POP_BLOCK):\n            assert block_stack, 'POP_BLOCK without block.'\n            op.block_target = block_stack[-1].target\n            block_stack = block_stack[0:-1]\n        elif isinstance(op, opcodes.RAISE_VARARGS):\n            for b in reversed(block_stack):\n                if isinstance(b, setup_except_op):\n                    op.block_target = b.target\n                    break\n        elif isinstance(op, opcodes.BREAK_LOOP):\n            for i in reversed(range(len(block_stack))):\n                b = block_stack[i]\n                if isinstance(b, opcodes.SETUP_LOOP):\n                    op.block_target = b.target\n                    assert b.target != op\n                    todo.append((op.block_target, block_stack[0:i]))\n                    break\n        elif isinstance(op, setup_except_op):\n            todo.append((op.target, block_stack))\n            block_stack += (op,)\n        elif op.pushes_block():\n            assert op.target, f'{op.name} without target'\n            block_stack += (op,)\n        elif op.does_jump() and op.target:\n            if op.push_exc_block:\n                setup_op = op.target\n                while not isinstance(setup_op, setup_except_op):\n                    setup_op = setup_op.prev\n                block_stack += (setup_op,)\n            todo.append((op.target, block_stack))\n        if not op.no_next():\n            assert op.next, f'Bad instruction at end of bytecode: {op!r}.'\n            todo.append((op.next, block_stack))", "docstring": "Modifies bytecode so that each POP_BLOCK has a block_target.\n\nThis is to achieve better initial ordering of try/except and try/finally code.\ntry:\ni = 1\na[i]\nexcept IndexError:\nreturn i\nBy connecting a CFG edge from the end of the block (after the \"a[i]\") to the\nexcept handler, our basic block ordering algorithm knows that the except block\nneeds to be scheduled last, whereas if there only was an edge before the\n\"i = 1\", it would be able to schedule it too early and thus encounter an\nundefined variable. This is only for ordering. The actual analysis of the\ncode happens later, in vm.py.\n\nArgs:\nbytecode: An array of bytecodes.", "source": "github-repos"}
{"code": "def _open_tracing_interface(self, connection_id, callback):\n        \n\n        try:\n            context = self.connections.get_context(connection_id)\n        except ArgumentError:\n            callback(connection_id, self.id, False, \"Could not find connection information\")\n            return\n\n        self._logger.info(\"Attempting to enable tracing\")\n        self.connections.begin_operation(connection_id, 'open_interface', callback, self.get_config('default_timeout'))\n\n        try:\n            characteristic = context['services'][TileBusService][TracingChar]\n        except KeyError:\n            self.connections.finish_operation(\n                connection_id,\n                False,\n                \"Can't find characteristic to open tracing interface\"\n            )\n            return\n\n        \n        \n        self._register_notification_callback(\n            context['connection_handle'],\n            characteristic.value_handle,\n            lambda trace_chunk: self._trigger_callback('on_trace', connection_id, bytearray(trace_chunk))\n        )\n\n        self.bable.set_notification(\n            enabled=True,\n            connection_handle=context['connection_handle'],\n            characteristic=characteristic,\n            on_notification_set=[self._on_interface_opened, context],\n            on_notification_received=self._on_notification_received,\n            timeout=1.0,\n            sync=False\n        )", "docstring": "Enable the tracing interface for this IOTile device\n\nArgs:\nconnection_id (int): The unique identifier for the connection\ncallback (callback): Callback to be called when this command finishes\ncallback(conn_id, adapter_id, success, failure_reason)", "source": "juraj-google-style"}
{"code": "def read(self, n):\n    if self._EOF:\n        return ''\n    while (self._seg_index <= self._last_seg_index):\n        result = self._read_from_seg(n)\n        if (result != ''):\n            return result\n        else:\n            self._next_seg()\n    self._EOF = True\n    return ''", "docstring": "Read data from file segs.\n\nArgs:\nn: max bytes to read. Must be positive.\n\nReturns:\nsome bytes. May be smaller than n bytes. \"\" when no more data is left.", "source": "codesearchnet"}
{"code": "def set_task(project_, task_):\n    global project, task\n    project = project_\n    task = task_\n    msg.okay('Set project name to {}.{}'.format(project, task), 2)", "docstring": "Sets the active project and task. All subsequent logging will be saved to\nthe database with that project and task.\n\nArgs:\nproject_ (str): active project name; a project can have multiple tasks.\ntask_ (str): active task name. Logging is separated at the project and task\nlevel.", "source": "codesearchnet"}
{"code": "def disassemble_instruction(self, instruction):\n    if (not util.is_integer(instruction)):\n        raise TypeError('Expected instruction to be an integer.')\n    buf_size = self.MAX_BUF_SIZE\n    buf = (ctypes.c_char * buf_size)()\n    res = self._dll.JLINKARM_DisassembleInst(ctypes.byref(buf), buf_size, instruction)\n    if (res < 0):\n        raise errors.JLinkException('Failed to disassemble instruction.')\n    return ctypes.string_at(buf).decode()", "docstring": "Disassembles and returns the assembly instruction string.\n\nArgs:\nself (JLink): the ``JLink`` instance.\ninstruction (int): the instruction address.\n\nReturns:\nA string corresponding to the assembly instruction string at the\ngiven instruction address.\n\nRaises:\nJLinkException: on error.\nTypeError: if ``instruction`` is not a number.", "source": "codesearchnet"}
{"code": "def parseInt(self, words):\n    words = words.replace(' and ', ' ').lower()\n    words = re.sub('(\\\\b)a(\\\\b)', '\\\\g<1>one\\\\g<2>', words)\n\n    def textToNumber(s):\n        '\\n            Converts raw number string to an integer.\\n            Based on text2num.py by Greg Hewill.\\n            '\n        a = re.split('[\\\\s-]+', s)\n        n = 0\n        g = 0\n        for w in a:\n            x = NumberService.__small__.get(w, None)\n            if (x is not None):\n                g += x\n            elif (w == 'hundred'):\n                g *= 100\n            else:\n                x = NumberService.__magnitude__.get(w, None)\n                if (x is not None):\n                    n += (g * x)\n                    g = 0\n                else:\n                    raise NumberService.NumberException(('Unknown number: ' + w))\n        return (n + g)\n    return textToNumber(words)", "docstring": "Parses words to the integer they describe.\n\nArgs:\nwords (str): Description of the integer.\n\nReturns:\nAn integer representation of the words.", "source": "codesearchnet"}
{"code": "def is_attribute_supported(self, attribute):\n        \n        if attribute not in self._attribute_rule_sets.keys():\n            return False\n\n        rule_set = self._attribute_rule_sets.get(attribute)\n        if self._version >= rule_set.version_added:\n            return True\n        else:\n            return False", "docstring": "Check if the attribute is supported by the current KMIP version.\n\nArgs:\nattribute (string): The name of the attribute\n(e.g., 'Cryptographic Algorithm'). Required.\nReturns:\nbool: True if the attribute is supported by the current KMIP\nversion. False otherwise.", "source": "juraj-google-style"}
{"code": "def __getIp6Address(self, addressType):\n        \n        addrType = ['link local', 'global', 'rloc', 'mesh EID']\n        addrs = []\n        globalAddr = []\n        linkLocal64Addr = ''\n        rlocAddr = ''\n        meshEIDAddr = ''\n\n        addrs = self.__sendCommand(WPANCTL_CMD + 'getprop -v IPv6:AllAddresses')\n        for ip6AddrItem in addrs:\n            if re.match('\\[|\\]', ip6AddrItem):\n                continue\n            if re.match(WPAN_CARRIER_PROMPT, ip6AddrItem, re.M|re.I):\n                break\n            ip6AddrItem = ip6AddrItem.strip()\n            ip6Addr = self.__stripValue(ip6AddrItem).split(' ')[0]\n            ip6AddrPrefix = ip6Addr.split(':')[0]\n            if ip6AddrPrefix == 'fe80':\n                \n                if ip6Addr.split(':')[4] != '0':\n                    linkLocal64Addr = ip6Addr\n            elif ip6Addr.startswith(self.meshLocalPrefix):\n                \n                if ip6Addr.split(':')[4] == '0':\n                    \n                    rlocAddr = ip6Addr\n                else:\n                    \n                    meshEIDAddr = ip6Addr\n                    print 'meshEIDAddr:' + meshEIDAddr\n            else:\n                \n                if ip6Addr:\n                    print 'globalAddr: ' + ip6Addr\n                    globalAddr.append(ip6Addr)\n                else:\n                    pass\n\n        if addressType == addrType[0]:\n            return linkLocal64Addr\n        elif addressType == addrType[1]:\n            return globalAddr\n        elif addressType == addrType[2]:\n            return rlocAddr\n        elif addressType == addrType[3]:\n            return meshEIDAddr\n        else:\n            pass", "docstring": "get specific type of IPv6 address configured on OpenThread_WpanCtl\n\nArgs:\naddressType: the specific type of IPv6 address\n\nlink local: link local unicast IPv6 address that's within one-hop scope\nglobal: global unicast IPv6 address\nrloc: mesh local unicast IPv6 address for routing in thread network\nmesh EID: mesh Endpoint Identifier\n\nReturns:\nIPv6 address string", "source": "juraj-google-style"}
{"code": "def get_user_info(self, dn, _connection=None):\n    return self.get_object(dn=dn, filter=self.config.get('LDAP_USER_OBJECT_FILTER'), attributes=self.config.get('LDAP_GET_USER_ATTRIBUTES'), _connection=_connection)", "docstring": "Gets info about a user specified at dn.\n\nArgs:\ndn (str): The dn of the user to find\n_connection (ldap3.Connection): A connection object to use when\nsearching. If not given, a temporary connection will be\ncreated, and destroyed after use.\n\nReturns:\ndict: A dictionary of the user info from LDAP", "source": "codesearchnet"}
{"code": "def dbmin20years(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `dbmin20years`'.format(value))\n    self._dbmin20years = value", "docstring": "Corresponds to IDD Field `dbmin20years`\n20-year return period values for minimum extreme dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmin20years`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def check_valid_values(function):\n\n    def decorated(self, X, *args, **kwargs):\n        if isinstance(X, pd.DataFrame):\n            W = X.values\n        else:\n            W = X\n        if (not len(W)):\n            raise ValueError('Your dataset is empty.')\n        if (W.dtype not in [np.dtype('float64'), np.dtype('int64')]):\n            raise ValueError('There are non-numerical values in your data.')\n        if np.isnan(W).any().any():\n            raise ValueError('There are nan values in your data.')\n        return function(self, X, *args, **kwargs)\n    return decorated", "docstring": "Raises an exception if the given values are not supported.\n\nArgs:\nfunction(callable): Method whose unique argument is a numpy.array-like object.\n\nReturns:\ncallable: Decorated function\n\nRaises:\nValueError: If there are missing or invalid values or if the dataset is empty.", "source": "codesearchnet"}
{"code": "def create_from_wkt(self, wkt, item_type, ingest_source, **attributes):\n        \n        \n\n        geojson = load_wkt(wkt).__geo_interface__\n        vector = {\n            'type': \"Feature\",\n            'geometry': geojson,\n            'properties': {\n                'item_type': item_type,\n                'ingest_source': ingest_source,\n                'attributes': attributes\n            }\n        }\n\n        return self.create(vector)[0]", "docstring": "Create a single vector in the vector service\n\nArgs:\nwkt (str): wkt representation of the geometry\nitem_type (str): item_type of the vector\ningest_source (str): source of the vector\nattributes: a set of key-value pairs of attributes\n\nReturns:\nid (str): string identifier of the vector created", "source": "juraj-google-style"}
{"code": "async def run_tasks(context):\n    \n    running_tasks = RunTasks()\n    context.running_tasks = running_tasks\n    status = await running_tasks.invoke(context)\n    context.running_tasks = None\n    return status", "docstring": "Run any tasks returned by claimWork.\n\nReturns the integer status of the task that was run, or None if no task was\nrun.\n\nargs:\ncontext (scriptworker.context.Context): the scriptworker context.\n\nRaises:\nException: on unexpected exception.\n\nReturns:\nint: exit status\nNone: if no task run.", "source": "juraj-google-style"}
{"code": "def reboot(self):\n    if self.is_bootloader:\n        self.fastboot.reboot()\n        return\n    with self.handle_reboot():\n        self.adb.reboot()", "docstring": "Reboots the device.\n\nGenerally one should use this method to reboot the device instead of\ndirectly calling `adb.reboot`. Because this method gracefully handles\nthe teardown and restoration of running services.\n\nThis method is blocking and only returns when the reboot has completed\nand the services restored.\n\nRaises:\nError: Waiting for completion timed out.", "source": "github-repos"}
{"code": "def __setRouterUpgradeThreshold(self, iThreshold):\n        \n        print 'call __setRouterUpgradeThreshold'\n        try:\n            cmd = 'routerupgradethreshold %s' % str(iThreshold)\n            print cmd\n            return self.__sendCommand(cmd) == 'Done'\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger(\"setRouterUpgradeThreshold() Error: \" + str(e))", "docstring": "set router upgrade threshold\n\nArgs:\niThreshold: the number of active routers on the Thread network\npartition below which a REED may decide to become a Router.\n\nReturns:\nTrue: successful to set the ROUTER_UPGRADE_THRESHOLD\nFalse: fail to set ROUTER_UPGRADE_THRESHOLD", "source": "juraj-google-style"}
{"code": "def make_2d_block_raster_mask(query_shape, memory_flange):\n    query_triangle = common_layers.ones_matrix_band_part(np.prod(query_shape), np.prod(query_shape), (- 1), 0)\n    split_query_masks = tf.split(query_triangle, query_shape[0], axis=1)\n    mask_pieces = [tf.concat([tf.ones([np.prod(query_shape), memory_flange[1]]), split_query_masks[i], tf.zeros([np.prod(query_shape), memory_flange[1]])], axis=1) for i in range(query_shape[0])]\n    final_mask = tf.concat([tf.ones([np.prod(query_shape), ((query_shape[1] + (2 * memory_flange[1])) * memory_flange[0])]), tf.concat(mask_pieces, axis=1)], axis=1)\n    return (1.0 - final_mask)", "docstring": "Creates a mask for 2d block raster scan.\n\nThe query mask can look to the left, top left, top, and top right, but\nnot to the right. Inside the query, we have the standard raster scan\nmasking.\nArgs:\nquery_shape: A tuple of ints (query_height, query_width)\nmemory_flange: A tuple of ints\n(memory_flange_height, memory_flange_width)\n\nReturns:\nA tensor of shape query_size, memory_size", "source": "codesearchnet"}
{"code": "def start_app_and_connect(self):", "docstring": "Starts the server app on the android device and connects to it.\n\nAfter this, the self.host_port and self.device_port attributes must be\nset.\n\nMust be implemented by subclasses.\n\nRaises:\nAppStartError: When the app was not able to be started.", "source": "github-repos"}
{"code": "def __init__(self, enum_type, name=None, default=None, choices=None, **kwds):\n    \n    self._enum_type = enum_type\n    if default is not None:\n      self._validate(default)\n    if choices is not None:\n      map(self._validate, choices)\n    super(EnumProperty, self).__init__(name, default=default,\n                                       choices=choices, **kwds)", "docstring": "Constructor.\n\nArgs:\nenum_type: A subclass of protorpc.messages.Enum.\nname: Optional datastore name (defaults to the property name).\n\nAdditional keywords arguments specify the same options as\nsupported by IntegerProperty.", "source": "juraj-google-style"}
{"code": "def primitive_wrapper_from_json_value(self, json_value: Optional[Any], primitive_cls: Type[message.Message], *, default_timezone: str=_primitive_time_utils.SIMPLE_ZULU) -> _primitive_wrappers.PrimitiveWrapper:", "docstring": "Parses json_value into a FHIR protobuf primitive and wraps.\n\nThe wrapper provides necessary information on how to parse json_value into a\ncorresponding FHIR protobuf message. Afterwards, this is wrapped to provide\nstateful information to the parent parser and/or printer.\n\nArgs:\njson_value: The FHIR json value to parse and wrap.\nprimitive_cls: The type of FHIR primitive to parse json_value into.\ndefault_timezone: The default timezone string to use when parsing date/\ntime-like primitives when there is no timezone information available.\nDefaults to 'Z'.\n\nRaises:\nValueError: In the event that primitive_cls is not actually a primitive\nFHIR type.\n\nReturns:\nA wrapper around an instance of primitive_cls parsed from json_value.", "source": "github-repos"}
{"code": "def unlock_kinetis_identified(identity, flags):\n    if (flags.version_code != identity.version_code):\n        return False\n    if (flags.part_no != identity.part_no):\n        return False\n    return flags.valid", "docstring": "Checks whether the given flags are a valid identity.\n\nArgs:\nidentity (Identity): the identity to validate against\nflags (register.IDCodeRegisterFlags): the set idcode flags\n\nReturns:\n``True`` if the given ``flags`` correctly identify the the debug\ninterface, otherwise ``False``.", "source": "codesearchnet"}
{"code": "def __init__(self, logdir, options=None):\n    self._logdir = logdir\n    self._options = options", "docstring": "Creates a context manager object for profiler API.\n\nArgs:\nlogdir: profile data will save to this directory.\noptions: An optional `tf.profiler.experimental.ProfilerOptions` can be\nprovided to fine tune the profiler's behavior.", "source": "github-repos"}
{"code": "def append(self, tp, timestamp_ms, key, value, headers, max_time_to_block_ms, estimated_size=0):\n    assert isinstance(tp, TopicPartition), 'not TopicPartition'\n    assert (not self._closed), 'RecordAccumulator is closed'\n    self._appends_in_progress.increment()\n    try:\n        if (tp not in self._tp_locks):\n            with self._tp_locks[None]:\n                if (tp not in self._tp_locks):\n                    self._tp_locks[tp] = threading.Lock()\n        with self._tp_locks[tp]:\n            dq = self._batches[tp]\n            if dq:\n                last = dq[(- 1)]\n                future = last.try_append(timestamp_ms, key, value, headers)\n                if (future is not None):\n                    batch_is_full = ((len(dq) > 1) or last.records.is_full())\n                    return (future, batch_is_full, False)\n        size = max(self.config['batch_size'], estimated_size)\n        log.debug('Allocating a new %d byte message buffer for %s', size, tp)\n        buf = self._free.allocate(size, max_time_to_block_ms)\n        with self._tp_locks[tp]:\n            assert (not self._closed), 'RecordAccumulator is closed'\n            if dq:\n                last = dq[(- 1)]\n                future = last.try_append(timestamp_ms, key, value, headers)\n                if (future is not None):\n                    self._free.deallocate(buf)\n                    batch_is_full = ((len(dq) > 1) or last.records.is_full())\n                    return (future, batch_is_full, False)\n            records = MemoryRecordsBuilder(self.config['message_version'], self.config['compression_attrs'], self.config['batch_size'])\n            batch = ProducerBatch(tp, records, buf)\n            future = batch.try_append(timestamp_ms, key, value, headers)\n            if (not future):\n                raise Exception()\n            dq.append(batch)\n            self._incomplete.add(batch)\n            batch_is_full = ((len(dq) > 1) or batch.records.is_full())\n            return (future, batch_is_full, True)\n    finally:\n        self._appends_in_progress.decrement()", "docstring": "Add a record to the accumulator, return the append result.\n\nThe append result will contain the future metadata, and flag for\nwhether the appended batch is full or a new batch is created\n\nArguments:\ntp (TopicPartition): The topic/partition to which this record is\nbeing sent\ntimestamp_ms (int): The timestamp of the record (epoch ms)\nkey (bytes): The key for the record\nvalue (bytes): The value for the record\nheaders (List[Tuple[str, bytes]]): The header fields for the record\nmax_time_to_block_ms (int): The maximum time in milliseconds to\nblock for buffer memory to be available\n\nReturns:\ntuple: (future, batch_is_full, new_batch_created)", "source": "codesearchnet"}
{"code": "def save_json(py_obj, json_path):\n    with open(json_path, 'w', encoding='utf-8') as f:\n        f.write(serialize_to_normalized_pretty_json(py_obj))", "docstring": "Serialize a native object to JSON and save it normalized, pretty printed to a\nfile.\n\nThe JSON string is normalized by sorting any dictionary keys.\n\nArgs:\npy_obj: object\nAny object that can be represented in JSON. Some types, such as datetimes are\nautomatically converted to strings.\n\njson_path: str\nFile path to which to write the JSON file. E.g.: The path must exist. The\nfilename will normally end with \".json\".\n\nSee Also:\nToJsonCompatibleTypes()", "source": "codesearchnet"}
{"code": "def complete(command_line,\n             current_token,\n             position,\n             shell: arg(choices=('bash', 'fish'))):\n    \n    position = int(position)\n    tokens = shlex.split(command_line[:position])\n\n    all_argv, run_argv, command_argv = run.partition_argv(tokens[1:])\n    run_args = run.parse_args(run_argv)\n\n    module = run_args.get('commands_module')\n    module = module or DEFAULT_COMMANDS_MODULE\n    module = normalize_path(module)\n\n    try:\n        collection = Collection.load_from_module(module)\n    except Exception:\n        collection = {}\n\n    found_command = find_command(collection, tokens) or run\n\n    if current_token:\n        \n        if current_token.startswith('-'):\n            if current_token not in found_command.option_map:\n                print_command_options(found_command, current_token)\n        else:\n            print_commands(collection, shell)\n            path = os.path.expanduser(current_token)\n            path = os.path.expandvars(path)\n            paths = glob.glob('%s*' % path)\n            if paths:\n                for entry in paths:\n                    if os.path.isdir(entry):\n                        print('%s/' % entry)\n                    else:\n                        print(entry)\n    else:\n        \n        \n        \n        option = found_command.option_map.get(tokens[-1])\n\n        if option and option.takes_value:\n            if option.choices:\n                for choice in option.choices:\n                    print(choice)\n            else:\n                for entry in os.listdir():\n                    if os.path.isdir(entry):\n                        print('%s/' % entry)\n                    else:\n                        print(entry)\n        else:\n            print_command_options(found_command)\n            print_commands(collection, shell)", "docstring": "Find completions for current command.\n\nThis assumes that we'll handle all completion logic here and that\nthe shell's automatic file name completion is disabled.\n\nArgs:\ncommand_line: Command line\ncurrent_token: Token at cursor\nposition: Current cursor position\nshell: Name of shell", "source": "juraj-google-style"}
{"code": "def get_definition_name_from_ref(ref):\n        \n        p = re.compile('\n        definition_name = re.sub(p, r'\\1', ref)\n        return definition_name", "docstring": "Get the definition name of the given $ref value(Swagger value).\n\nArgs:\nref: ref value (ex: \"#/definitions/CustomDefinition\")\n\nReturns:\nThe definition name corresponding to the ref.", "source": "juraj-google-style"}
{"code": "def verify_unused_iterator(self, ds_fn, num_outputs, sparse_tensors=False, verify_exhausted=True, assert_items_equal=False):\n    self.verify_run_with_breaks(ds_fn, [0], num_outputs, sparse_tensors=sparse_tensors, verify_exhausted=verify_exhausted, assert_items_equal=assert_items_equal)", "docstring": "Verifies that saving and restoring an unused iterator works.\n\nArgs:\nds_fn: 0-argument function that returns a Dataset.\nnum_outputs: Total number of outputs expected from this Dataset.\nsparse_tensors: Whether dataset is built from SparseTensor(s).\nverify_exhausted: Whether to verify that the iterator has been exhausted\nafter producing `num_outputs` elements.\nassert_items_equal: Tests the output has the expected elements regardless\nof order.\n\nRaises:\nAssertionError if any test fails.", "source": "github-repos"}
{"code": "def check(self, key, value):\n        \n        key = key.lower().strip()\n\n        \n        try:\n            key = key.decode(\"utf-8\")\n        except UnicodeEncodeError:\n            pass\n\n        key = self._remove_accents(key)\n\n        if self.keyword in key.split():\n            self.value = value\n            return True\n\n        return False", "docstring": "Check whether `key` matchs the :attr:`keyword`. If so, set the\n:attr:`value` to `value`.\n\nArgs:\nkey (str): Key which will be matched with :attr:`keyword`.\nvalue (str): Value which will be assigned to :attr:`value` if keys\nmatches.\n\nReturns:\nTrue/False: Whether the key matched :attr:`keyword`.", "source": "juraj-google-style"}
{"code": "def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, is_index_masked: tf.Tensor, is_index_global_attn: tf.Tensor, is_global_attn: bool, training=False):\n    residual = hidden_states\n    layer_outputs = self.self_attn([hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn], training=training)\n    hidden_states = layer_outputs[0]\n    tf.debugging.assert_equal(shape_list(hidden_states), shape_list(residual), message=f'Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}')\n    hidden_states = self.dropout(hidden_states, training=training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    residual = hidden_states\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = self.activation_dropout(hidden_states, training=training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = self.dropout(hidden_states, training=training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    return (hidden_states,) + layer_outputs[1:]", "docstring": "Args:\nhidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*\nattention_mask (`tf.Tensor`): attention mask of size\n*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.\nlayer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size\n*(config.encoder_attention_heads,)*.", "source": "github-repos"}
{"code": "def read_passwd_file(pass_file):\n    \n    with open(pass_file) as fin:\n        passwd = fin.read().strip()\n    return passwd", "docstring": "Read password from external file and retrun as string. The file should\ncontain just single line. Prevents hard-coding password anywhere in this\nscript. IMPORTANT! Password is stored as plain text! Do NOT use with your\npersonal account!\"\n\nArgs:\npass_file (str): /path/to/pass_file", "source": "juraj-google-style"}
{"code": "def _CheckUnresolved(cls):\n    with cls._unresolved_subjects_lock:\n        if cls._unresolved_subjects:\n            msg = ['The following assertions were unresolved. Perhaps you called \"AssertThat(thing.IsEmpty())\" instead of \"AssertThat(thing).IsEmpty()\".']\n            for u in sorted(cls._unresolved_subjects):\n                msg.append('    * {0}'.format(u))\n            raise UnresolvedAssertionError('\\n'.join(msg))", "docstring": "Ensures that all created subjects were eventually resolved.\n\nA subject is considered resolved what at least one proposition has been\nexecuted on it. An unresolved or dangling assertion is almost certainly a\ntest author error.\n\nRaises:\nUnresolvedAssertionError: if any subjects remain unresolved at the time of\nthis function call.", "source": "github-repos"}
{"code": "def add_weights(self, object_name, weights):\n    if not isinstance(weights, dict):\n        raise ValueError(f\"Argument `weights` should be a dict where keys are weight names (usually '0', '1', etc.) and values are NumPy arrays. Received: type(weights)={type(weights)}\")\n\n    def add_weight_fn(weights_dict, source_name, target_name=None):\n        weights_dict[source_name].update(weights)\n    self._edit_object(add_weight_fn, object_name)", "docstring": "Add one or more new weights to an existing object.\n\nArgs:\nobject_name: String, name or path of the\nobject to add the weights to\n(e.g. `\"dense_2\"` or `\"layers/dense_2\"`).\nweights: Dict mapping weight names to weight\nvalues (arrays),\ne.g. `{\"0\": kernel_value, \"1\": bias_value}`.", "source": "github-repos"}
{"code": "def add_layer(self, label, change_layer=True):\n        \n        self.layer_stack.insert(self.last_layer() + 1, label)\n        if change_layer:\n            self.set_current_layer(self.last_layer())\n        return None", "docstring": "Add new mesh layer to the end of the stack\n\nArgs:\nlabel (str): new label for the mesh layer\nchange_layer (bool): change to the newly created layer", "source": "juraj-google-style"}
{"code": "def mtf_transformer_paper_tr(size):\n  \n  n = 2 ** size\n  hparams = mtf_transformer_base()\n  hparams.label_smoothing = 0.1\n  hparams.batch_size = 128\n  hparams.d_model = 1024\n  hparams.d_ff = int(4096 * n)\n  hparams.num_heads = int(8 * n)\n  hparams.shared_embedding_and_softmax_weights = False\n  \n  hparams.learning_rate_decay_steps = 51400\n  return hparams", "docstring": "Config for translation experiments.\n\nTrain these on translate_enfr_wmt32k_packed for 154000 steps (3 epochs)\n\nThe size parameter is an integer that controls the number of heads and the\nsize of the size of the feedforward hidden layers.  Increasing size by 1\ndoubles each of these.\n\nArgs:\nsize: an integer\nReturns:\na hparams object", "source": "juraj-google-style"}
{"code": "def min_count(self, n=1):\n    word_count = {w: c for (w, c) in iteritems(self.word_count) if (c >= n)}\n    return CountedVocabulary(word_count=word_count)", "docstring": "Returns a vocabulary after eliminating the words that appear < `n`.\n\nArgs:\nn (integer): specifies the minimum word frequency allowed.", "source": "codesearchnet"}
{"code": "def get(self):\n    resource = dict()\n    resource.update(self._parse_config())\n    resource.update(self._parse_interfaces())\n    return resource", "docstring": "Returns the Mlag configuration as a resource dict\n\nReturns:\ndict: A dict ojbect containing the Mlag resource attributes.", "source": "codesearchnet"}
{"code": "def select_update_method(self, force_interactive, force_change_set):\n    if (self.interactive or force_interactive):\n        return self.interactive_update_stack\n    elif force_change_set:\n        return self.noninteractive_changeset_update\n    else:\n        return self.default_update_stack", "docstring": "Select the correct update method when updating a stack.\n\nArgs:\nforce_interactive (str): Whether or not to force interactive mode\nno matter what mode the provider is in.\nforce_change_set (bool): Whether or not to force change set use.\n\nReturns:\nfunction: The correct object method to use when updating.", "source": "codesearchnet"}
{"code": "def read(self, size=None):\n    \n    if not self._is_open:\n      raise IOError('Not opened.')\n\n    if size is None:\n      size = self._size - self._file_object.tell()\n\n    return self._file_object.read(size)", "docstring": "Reads a byte string from the file-like object at the current offset.\n\nThe function will read a byte string of the specified size or\nall of the remaining data if no size was specified.\n\nArgs:\nsize (Optional[int]): number of bytes to read, where None is all\nremaining data.\n\nReturns:\nbytes: data read.\n\nRaises:\nIOError: if the read failed.\nOSError: if the read failed.", "source": "juraj-google-style"}
{"code": "def get_all(self, include_archived=False):\n        \n        return [conv for conv in self._conv_dict.values()\n                if not conv.is_archived or include_archived]", "docstring": "Get all the conversations.\n\nArgs:\ninclude_archived (bool): (optional) Whether to include archived\nconversations. Defaults to ``False``.\n\nReturns:\nList of all :class:`.Conversation` objects.", "source": "juraj-google-style"}
{"code": "def get_struct(name):\n    sid = idc.GetStrucIdByName(name)\n    if (sid == idaapi.BADADDR):\n        raise exceptions.SarkStructNotFound()\n    return sid", "docstring": "Get a struct by it's name.\n\nArgs:\nname: The name of the struct\n\nReturns:\nThe struct's id\n\nRaises:\nexceptions.SarkStructNotFound: is the struct does not exist.", "source": "codesearchnet"}
{"code": "def encode(self, label):\n        \n        label = super().encode(label)\n\n        return torch.tensor(self.stoi.get(label, self.unknown_index))", "docstring": "Encodes a ``label``.\n\nArgs:\nlabel (object): Label to encode.\n\nReturns:\ntorch.Tensor: Encoding of the label.", "source": "juraj-google-style"}
{"code": "def convert(self, inp):\n    inp = self._preprocess(inp)\n    n = NumberService().longestNumber(inp)\n    units = self.extractUnits(inp)\n    quantity = pq.Quantity(float(n), units[0])\n    quantity.units = units[1]\n    return quantity", "docstring": "Converts a string representation of some quantity of units into a\nquantities object.\n\nArgs:\ninp (str): A textual representation of some quantity of units,\ne.g., \"fifty kilograms\".\n\nReturns:\nA quantities object representing the described quantity and its\nunits.", "source": "codesearchnet"}
{"code": "def search_env_paths(fname, key_list=None, verbose=None):\n    import utool as ut\n    if (key_list is None):\n        key_list = [key for key in os.environ if (key.find('PATH') > (- 1))]\n        print(('key_list = %r' % (key_list,)))\n    found = ut.ddict(list)\n    for key in key_list:\n        dpath_list = os.environ[key].split(os.pathsep)\n        for dpath in dpath_list:\n            matches = ut.glob(dpath, fname)\n            found[key].extend(matches)\n    return dict(found)", "docstring": "r\"\"\"\nSearches your PATH to see if fname exists\n\nArgs:\nfname (str): file name to search for (can be glob pattern)\n\nCommandLine:\npython -m utool search_env_paths --fname msvcr*.dll\npython -m utool search_env_paths --fname '*flann*'\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_cplat import *  # NOQA\n>>> import utool as ut\n>>> fname = 'opencv2/highgui/libopencv_highgui.so'\n>>> fname = ut.get_argval('--fname', default='*')\n>>> print('fname = %r' % (fname,))\n>>> key_list = None # ['PATH']\n>>> found = search_env_paths(fname, key_list)\n>>> print(ut.repr4(found, nl=True, strvals=True))\n\nIgnore:\nOpenCV_DIR:PATH={share_opencv}\nOpenCV_CONFIG_PATH:FILEPATH={share_opencv}", "source": "codesearchnet"}
{"code": "def count_nonzero(x, axis=None):\n    if any_symbolic_tensors((x,)):\n        return CountNonzero(axis=axis).symbolic_call(x)\n    return backend.numpy.count_nonzero(x, axis=axis)", "docstring": "Counts the number of non-zero values in `x` along the given `axis`.\n\nIf no axis is specified then all non-zeros in the tensor are counted.\n\nArgs:\nx: Input tensor.\naxis: Axis or tuple of axes along which to count the number of\nnon-zeros. Defaults to `None`.\n\nReturns:\nint or tensor of ints.\n\nExamples:\n>>> x = keras.ops.array([[0, 1, 7, 0], [3, 0, 2, 19]])\n>>> keras.ops.count_nonzero(x)\n5\n>>> keras.ops.count_nonzero(x, axis=0)\narray([1, 1, 2, 1], dtype=int64)\n>>> keras.ops.count_nonzero(x, axis=1)\narray([2, 3], dtype=int64)", "source": "github-repos"}
{"code": "def get_image_features(self, pixel_values_images: torch.FloatTensor, vision_feature_layer: Optional[Union[int, List[int]]]=None, vision_feature_select_strategy: Optional[str]=None):\n    vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer\n    vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy\n    if vision_feature_select_strategy not in ['default', 'full']:\n        raise ValueError(f'Unexpected select feature strategy: {self.config.vision_feature_select_strategy}')\n    image_outputs = self.image_tower(pixel_values_images, output_hidden_states=True)\n    if isinstance(vision_feature_layer, int):\n        image_outputs = image_outputs.hidden_states[vision_feature_layer]\n        if vision_feature_select_strategy == 'default':\n            image_outputs = image_outputs[:, 1:]\n    else:\n        hs_pool = [image_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer]\n        if vision_feature_select_strategy == 'default':\n            hs_pool = [hs[:, 1:] for hs in hs_pool]\n        image_outputs = torch.cat(hs_pool, dim=-1)\n    image_features = self.multi_modal_projector(image_outputs)\n    return image_features", "docstring": "Obtains image last hidden states from the vision tower and apply multimodal projection.\n\nArgs:\npixel_values_images (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)\nThe tensors corresponding to the input images.\nvision_feature_layer (`Union[int, List[int]]`, *optional*):\nThe index of the layer to select the vision feature. If multiple indices are provided,\nthe vision feature of the corresponding indices will be concatenated to form the\nvision features.\nvision_feature_select_strategy (`str`, *optional*):\nThe feature selection strategy used to select the vision feature from the vision backbone.\nCan be one of `\"default\"` or `\"full\"`\nReturns:\nimage_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).", "source": "github-repos"}
{"code": "def split(cls, n, contiguous, can_query=itertools.chain(itertools.repeat(True, 50), itertools.repeat(False)).next, _app=None):\n    if (n < 1):\n        raise ValueError('n must be >= 1')\n    ranges = None\n    if can_query():\n        if (not contiguous):\n            ns_keys = get_namespace_keys(_app, (n + 1))\n            if (not ns_keys):\n                return []\n            else:\n                if (len(ns_keys) <= n):\n                    ns_range = []\n                    for ns_key in ns_keys:\n                        ns_range.append(NamespaceRange((ns_key.name() or ''), (ns_key.name() or ''), _app=_app))\n                    return sorted(ns_range, key=(lambda ns_range: ns_range.namespace_start))\n                ranges = [NamespaceRange((ns_keys[0].name() or ''), _app=_app)]\n        else:\n            ns_range = NamespaceRange(_app=_app).normalized_start()\n            if (ns_range is None):\n                return [NamespaceRange(_app=_app)]\n            ranges = [ns_range]\n    else:\n        ranges = [NamespaceRange(_app=_app)]\n    singles = []\n    while (ranges and ((len(ranges) + len(singles)) < n)):\n        namespace_range = ranges.pop(0)\n        if namespace_range.is_single_namespace:\n            singles.append(namespace_range)\n        else:\n            (left, right) = namespace_range.split_range()\n            if can_query():\n                right = right.normalized_start()\n            if (right is not None):\n                ranges.append(right)\n            ranges.append(left)\n    ns_ranges = sorted((singles + ranges), key=(lambda ns_range: ns_range.namespace_start))\n    if contiguous:\n        if (not ns_ranges):\n            return [NamespaceRange(_app=_app)]\n        continuous_ns_ranges = []\n        for i in range(len(ns_ranges)):\n            if (i == 0):\n                namespace_start = MIN_NAMESPACE\n            else:\n                namespace_start = ns_ranges[i].namespace_start\n            if (i == (len(ns_ranges) - 1)):\n                namespace_end = MAX_NAMESPACE\n            else:\n                namespace_end = _ord_to_namespace((_namespace_to_ord(ns_ranges[(i + 1)].namespace_start) - 1))\n            continuous_ns_ranges.append(NamespaceRange(namespace_start, namespace_end, _app=_app))\n        return continuous_ns_ranges\n    else:\n        return ns_ranges", "docstring": "Splits the complete NamespaceRange into n equally-sized NamespaceRanges.\n\nArgs:\nn: The maximum number of NamespaceRanges to return. Fewer than n\nnamespaces may be returned.\ncontiguous: If True then the returned NamespaceRanges will cover the\nentire space of possible namespaces (i.e. from MIN_NAMESPACE to\nMAX_NAMESPACE) without gaps. If False then the returned\nNamespaceRanges may exclude namespaces that don't appear in the\ndatastore.\ncan_query: A function that returns True if split() can query the datastore\nto generate more fair namespace range splits, and False otherwise.\nIf not set then split() is allowed to make 50 datastore queries.\n\nReturns:\nA list of at most n NamespaceRanges representing a near-equal distribution\nof actual existant datastore namespaces. The returned list will be sorted\nlexographically.\n\nRaises:\nValueError: if n is < 1.", "source": "codesearchnet"}
{"code": "def get_raw_entry(self, variant_line=None, variant_dict=None, \n    vcf_header=None, individual_id=None, dict_key=None):\n        \n        if variant_line:\n            variant_line = variant_line.rstrip().split()\n        \n        entry = None\n        \n        if self.field == 'CHROM':\n            if variant_line:\n                entry = variant_line[0]\n            elif variant_dict:\n                entry = variant_dict['CHROM']\n                \n        elif self.field == 'POS':\n            if variant_line:\n                entry = variant_line[1]\n            elif variant_dict:\n                entry = variant_dict['POS']\n            \n        elif self.field == 'ID':\n            if variant_line:\n                entry = variant_line[2]\n            elif variant_dict:\n                entry = variant_dict['ID']\n        \n        elif self.field == 'REF':\n            if variant_line:\n                entry = variant_line[3]\n            elif variant_dict:\n                entry = variant_dict['REF']\n        \n        elif self.field == 'ALT':\n            if variant_line:\n                entry = variant_line[4]\n            elif variant_dict:\n                entry = variant_dict['ALT']\n        \n        elif self.field == 'QUAL':\n            if variant_line:\n                entry = variant_line[5]\n            elif variant_dict:\n                entry = variant_dict['QUAL']\n        \n        elif self.field == 'FILTER':\n            if variant_line:\n                entry = variant_line[6]\n            elif variant_dict:\n                entry = variant_dict['FILTER']\n        \n        elif self.field == 'INFO':\n            if variant_line:\n                for info_annotation in variant_line[7].split(';'):\n                    splitted_annotation = info_annotation.split('=')\n                    if self.info_key == splitted_annotation[0]:\n                        if len(splitted_annotation) == 2:\n                            entry = splitted_annotation[1]\n                            \n            elif variant_dict:\n                entry = variant_dict.get('info_dict',{}).get(self.info_key)\n            \n            if self.dict_entry and entry:\n                \n                first_split = entry.split(self.separators[0])\n                for annotation in first_split:\n                    \n                    splitted_entry = annotation.split(self.separators[1])\n                    key = splitted_entry[0] \n                    value = splitted_entry[1]\n                    if dict_key:\n                        if key == dict_key:\n                            entry = value\n                    \n                    else:\n                        entry = value\n            \n        \n        elif self.field == 'FORMAT':\n            if variant_line:\n                entry = variant_line[8]\n            elif variant_dict:\n                entry = variant_dict['FORMAT']\n        \n        elif self.field == \"sample_id\":\n            \n            if not individual_id:\n                raise IOError(\"If 'sample_id' a individual id must be provided\")\n            if not self.gt_key:\n                raise IOError(\"If 'sample_id' a genotype key must be provided\")\n            \n            if variant_line:\n                if not vcf_header:\n                    raise IOError(\"If 'sample_id' the vcf header must be provided\")\n                \n                format_info = variant_line[8]\n                \n                for i, head in enumerate(vcf_header):\n                    if head == individual_id:\n                        raw_gt_call = variant_line[i]\n            elif variant_dict:\n                format_info = variant_dict['FORMAT']\n                raw_gt_call = variant_dict[individual_id]\n            \n            entry_dict = dict(zip(\n                format_info.split(':'), raw_gt_call.split(':')\n            ))\n            entry = entry_dict.get(self.gt_key, '.')\n        \n        return entry", "docstring": "Return the raw entry from the vcf field\n\nIf no entry was found return None\n\nArgs:\nvariant_line (str): A vcf formated variant line\nvcf_header (list): A list with the vcf header line\nindividual_id (str): The individual id to get gt call\nReturns:\nThe raw entry found in variant line", "source": "juraj-google-style"}
{"code": "def tf_initialize(self, x_init, base_value, target_value, estimated_improvement):\n    self.base_value = base_value\n    if (estimated_improvement is None):\n        estimated_improvement = tf.abs(x=base_value)\n    first_step = super(LineSearch, self).tf_initialize(x_init)\n    improvement = tf.divide(x=(target_value - self.base_value), y=tf.maximum(x=estimated_improvement, y=util.epsilon))\n    last_improvement = (improvement - 1.0)\n    if (self.mode == 'linear'):\n        deltas = [((- t) * self.parameter) for t in x_init]\n        self.estimated_incr = ((- estimated_improvement) * self.parameter)\n    elif (self.mode == 'exponential'):\n        deltas = [((- t) * self.parameter) for t in x_init]\n    return (first_step + (deltas, improvement, last_improvement, estimated_improvement))", "docstring": "Initialization step preparing the arguments for the first iteration of the loop body.\n\nArgs:\nx_init: Initial solution guess $x_0$.\nbase_value: Value $f(x')$ at $x = x'$.\ntarget_value: Value $f(x_0)$ at $x = x_0$.\nestimated_improvement: Estimated value at $x = x_0$, $f(x')$ if None.\n\nReturns:\nInitial arguments for tf_step.", "source": "codesearchnet"}
{"code": "def split(self):\n    ranges = []\n    for bound in self.bounds:\n        range = VersionRange(None)\n        range.bounds = [bound]\n        ranges.append(range)\n    return ranges", "docstring": "Split into separate contiguous ranges.\n\nReturns:\nA list of VersionRange objects. For example, the range \"3|5+\" will\nbe split into [\"3\", \"5+\"].", "source": "codesearchnet"}
{"code": "def MakeZip(self, input_dir, output_file):\n    \n    logging.info(\"Generating zip template file at %s\", output_file)\n    basename, _ = os.path.splitext(output_file)\n    \n    \n    shutil.make_archive(\n        basename, \"zip\", base_dir=\".\", root_dir=input_dir, verbose=True)", "docstring": "Creates a ZIP archive of the files in the input directory.\n\nArgs:\ninput_dir: the name of the input directory.\noutput_file: the name of the output ZIP archive without extension.", "source": "juraj-google-style"}
{"code": "def sheets_tab_id(config, auth, sheet_url_or_name, sheet_tab):\n    sheet_id = None\n    tab_id = None\n    spreadsheet = sheets_get(config, auth, sheet_url_or_name)\n    if spreadsheet:\n        sheet_id = spreadsheet['spreadsheetId']\n        for tab in spreadsheet.get('sheets', []):\n            if tab['properties']['title'] == sheet_tab:\n                tab_id = tab['properties']['sheetId']\n                break\n    return (sheet_id, tab_id)", "docstring": "Pull sheet tab id from URL, name, or id itself.\n\nArgs:\nconfig - see starthinker/util/configuration.py\nauth - user or service\nurl_or_name - one of: URL, document title, or id\nsheet_tab - name of tab to get id for\n\nReturns:\nPair of sheet id and tab id.", "source": "github-repos"}
{"code": "def locked_get(self):\n    credentials = None\n    if self._cache:\n        json = self._cache.get(self._key_name)\n        if json:\n            credentials = client.Credentials.new_from_json(json)\n    if (credentials is None):\n        entity = self._get_entity()\n        if (entity is not None):\n            credentials = getattr(entity, self._property_name)\n            if self._cache:\n                self._cache.set(self._key_name, credentials.to_json())\n    if (credentials and hasattr(credentials, 'set_store')):\n        credentials.set_store(self)\n    return credentials", "docstring": "Retrieve Credential from datastore.\n\nReturns:\noauth2client.Credentials", "source": "codesearchnet"}
{"code": "def _count_nonzero(input_tensor, dtype=dtypes.int64):\n    with ops.name_scope('count_nonzero', values=[input_tensor]):\n        zero = array_ops.zeros([], dtype=input_tensor.dtype)\n        nonzero_count = math_ops.reduce_sum(math_ops.cast(math_ops.not_equal(input_tensor, zero), dtype=dtype), name='nonzero_count')\n        return nonzero_count", "docstring": "Same as math_ops.count_nonzero.\n\nThe reduction is done in dtype, which can be faster for 32-bit dtypes.\n\nArgs:\ninput_tensor: numeric tensor\ndtype: reduction dtype\n\nReturns:\nnumber of nonzero values with type dtype", "source": "github-repos"}
{"code": "def one_hot_encoding(labels, num_classes, scope=None):\n    with tf.name_scope(scope, 'OneHotEncoding', [labels]):\n        batch_size = labels.get_shape()[0]\n        indices = tf.expand_dims(tf.range(0, batch_size), 1)\n        labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)\n        concated = tf.concat(axis=1, values=[indices, labels])\n        onehot_labels = tf.sparse_to_dense(concated, tf.stack([batch_size, num_classes]), 1.0, 0.0)\n        onehot_labels.set_shape([batch_size, num_classes])\n        return onehot_labels", "docstring": "Transform numeric labels into onehot_labels.\n\nArgs:\nlabels: [batch_size] target labels.\nnum_classes: total number of classes.\nscope: Optional scope for name_scope.\nReturns:\none hot encoding of the labels.", "source": "codesearchnet"}
{"code": "def db_en010(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `db_en010`'.format(value))\n    self._db_en010 = value", "docstring": "Corresponds to IDD Field `db_en010`\nmean coincident dry-bulb temperature to\nEnthalpy corresponding to 1.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `db_en010`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def fold_point(p, lattice, coords_are_cartesian=False):\n    if coords_are_cartesian:\n        p = lattice.get_fractional_coords(p)\n    else:\n        p = np.array(p)\n    p = ((np.mod(((p + 0.5) - 1e-10), 1) - 0.5) + 1e-10)\n    p = lattice.get_cartesian_coords(p)\n    closest_lattice_point = None\n    smallest_distance = 10000\n    for i in ((- 1), 0, 1):\n        for j in ((- 1), 0, 1):\n            for k in ((- 1), 0, 1):\n                lattice_point = np.dot((i, j, k), lattice.matrix)\n                dist = np.linalg.norm((p - lattice_point))\n                if ((closest_lattice_point is None) or (dist < smallest_distance)):\n                    closest_lattice_point = lattice_point\n                    smallest_distance = dist\n    if (not np.allclose(closest_lattice_point, (0, 0, 0))):\n        p = (p - closest_lattice_point)\n    return p", "docstring": "Folds a point with coordinates p inside the first Brillouin zone of the lattice.\n\nArgs:\np: coordinates of one point\nlattice: Lattice object used to convert from reciprocal to cartesian coordinates\ncoords_are_cartesian: Set to True if you are providing\ncoordinates in cartesian coordinates. Defaults to False.\n\nReturns:\nThe cartesian coordinates folded inside the first Brillouin zone", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context):\n    \n    super(VShadowFileSystem, self).__init__(resolver_context)\n    self._file_object = None\n    self._vshadow_volume = None", "docstring": "Initializes a file system.\n\nArgs:\nresolver_context (Context): resolver context.", "source": "juraj-google-style"}
{"code": "def GetEntries(self, parser_mediator, match=None, **unused_kwargs):\n    \n    stores = match.get('Stores', {})\n    for volume_name, volume in iter(stores.items()):\n      datetime_value = volume.get('CreationDate', None)\n      if not datetime_value:\n        continue\n\n      partial_path = volume['PartialPath']\n\n      event_data = plist_event.PlistTimeEventData()\n      event_data.desc = 'Spotlight Volume {0:s} ({1:s}) activated.'.format(\n          volume_name, partial_path)\n      event_data.key = ''\n      event_data.root = '/Stores'\n\n      event = time_events.PythonDatetimeEvent(\n          datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts relevant Volume Configuration Spotlight entries.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmatch (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.", "source": "juraj-google-style"}
{"code": "async def update_server_data(server):\n    \n\n    data = datatools.get_data()\n    \n    send_welcome_message = False\n    if server.id not in data[\"discord\"][\"servers\"]:\n        logger.debug(\"Adding new server to serverdata\")\n        data[\"discord\"][\"servers\"][server.id] = {\"prefix\": \"!\"}\n        if \"mute_intro\" not in data or not data[\"mute_intro\"]:\n            send_welcome_message = True\n\n    \n    _dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n    _dir_modules = \"{}/../\".format(_dir)\n    for module_name in os.listdir(_dir_modules):\n        if module_name.startswith(\"_\") or module_name.startswith(\"!\"):\n            continue\n\n        if not os.path.isfile(\"{}/{}/_data.py\".format(_dir_modules, module_name)):\n            logger.warning(\"No _data.py file found for module {}\".format(module_name))\n            continue\n\n        try:\n            import_name = \".discord_modis.modules.{}.{}\".format(module_name, \"_data\")\n            _data = importlib.import_module(import_name, \"modis\")\n\n            if _data.modulename not in data[\"discord\"][\"servers\"][server.id]:\n                data[\"discord\"][\"servers\"][server.id][_data.modulename] = _data.sd_structure\n                datatools.write_data(data)\n        except Exception as e:\n            logger.error(\"Could not initialise module {}\".format(module_name))\n            logger.exception(e)\n\n    datatools.write_data(data)\n\n    \n    if send_welcome_message:\n        default_channel = server.default_channel\n        if not default_channel:\n            for channel in server.channels:\n                if channel.name == \"general\":\n                    default_channel = channel\n                    break\n        if not default_channel:\n            for channel in server.channels:\n                if \"general\" in channel.name:\n                    default_channel = channel\n                    break\n        if not default_channel:\n            for channel in server.channels:\n                if channel.type == discord.ChannelType.text:\n                    default_channel = channel\n                    break\n\n        \n        if default_channel:\n            hello_message = \"Hello! I'm Modis.\\n\\n\" + \\\n                            \"The prefix is currently `!`, and can be changed at any time using `!prefix`\\n\\n\" + \\\n                            \"You can use `!help` to get help commands for all modules, \" + \\\n                            \"or {} me to get the server prefix and help commands.\".format(server.me.mention)\n            await client.send_message(default_channel, hello_message)", "docstring": "Updates the server info for the given server\n\nArgs:\nserver: The Discord server to update info for", "source": "juraj-google-style"}
{"code": "def release(self):\n        \n        if not self.acquired:\n            return False\n\n        os.close(self.fd)\n\n        if os.path.exists(self.path):\n            os.remove(self.path)\n\n        self.acquired = False\n        return True", "docstring": "Cleans up the lockfile if it was acquired.\n\nArgs:\nself (JLock): the ``JLock`` instance\n\nReturns:\n``False`` if the lock was not released or the lock is not acquired,\notherwise ``True``.", "source": "juraj-google-style"}
{"code": "def scan_message(self, message, regex):\n    for line in message.split('\\n'):\n        if bool(re.search(regex, line, flags=(re.IGNORECASE | re.MULTILINE))):\n            return line\n    return ''", "docstring": "Scans regex from msg and returns the line that matches\n\nKeyword arguments:\nmessage -- A (long) string, e.g. email body that will be\nscanned.\n\nregex -- A regular expression string that the message will be\nscanned against.\n\nReturns:\nMatching line or empty string", "source": "codesearchnet"}
{"code": "def join(self, *args, **kwargs):\n    super(ThreadReturn, self).join(*args, **kwargs)\n    return self._return", "docstring": "Joins the thread.\n\nArgs:\nself (ThreadReturn): the ``ThreadReturn`` instance\nargs: optional list of arguments\nkwargs: optional key-word arguments\n\nReturns:\nThe return value of the exited thread.", "source": "codesearchnet"}
{"code": "def get_type(name, env, non_generic):\n    \n    if name in env:\n        if isinstance(env[name], MultiType):\n            return clone(env[name])\n        return fresh(env[name], non_generic)\n    else:\n        print(\"W: Undefined symbol {0}\".format(name))\n        return TypeVariable()", "docstring": "Get the type of identifier name from the type environment env.\n\nArgs:\nname: The identifier name\nenv: The type environment mapping from identifier names to types\nnon_generic: A set of non-generic TypeVariables\n\nRaises:\nParseError: Raised if name is an undefined symbol in the type\nenvironment.", "source": "juraj-google-style"}
{"code": "def generate(passphrase, trees=['primary']):\n    (seeds, multi_wallet) = MultiWallet.generate(trees, entropy=True)\n    result = {}\n    for tree in trees:\n        result[tree] = dict(private_seed=seeds[tree], public_seed=multi_wallet.public_wif(tree), encrypted_seed=PassphraseBox.encrypt(passphrase, seeds[tree]))\n    return result", "docstring": "Generate a seed for the primary tree of a Gem wallet.\n\nYou may choose to store the passphrase for a user so the user doesn't have\nto type it in every time. This is okay (although the security risks should\nbe obvious) but Gem strongly discourages storing even the encrypted private\nseed, and storing both the passphrase and the private seed is completely\ninsane. Don't do it.\n\nArgs:\npassphrase (str): The passphrase that will be used to encrypt the seed\nbefore it's send to Gem. Key-stretching is done with PBDKF2 and\nencryption is done with nacl's SecretBox.\ntrees (list of str): A list of names to generate trees for. For User\nWallets this will be ['primary'], for Application Wallets it will be\n['primary', 'backup'].\n\nReturns:\nA dict of dicts containing the serialized public master node, and\na sub-dict with the encrypted private seed for each tree in `trees`.", "source": "codesearchnet"}
{"code": "def binary_n(total_N, min_n=50):\n    max_exp = np.log2(((1.0 * total_N) / min_n))\n    max_exp = int(np.floor(max_exp))\n    return [int(np.floor(((1.0 * total_N) / (2 ** i)))) for i in range(1, (max_exp + 1))]", "docstring": "Creates a list of values by successively halving the total length total_N\nuntil the resulting value is less than min_n.\n\nNon-integer results are rounded down.\n\nArgs:\ntotal_N (int):\ntotal length\nKwargs:\nmin_n (int):\nminimal length after division\n\nReturns:\nlist of integers:\ntotal_N/2, total_N/4, total_N/8, ... until total_N/2^i < min_n", "source": "codesearchnet"}
{"code": "def merge_all_summaries(key=ops.GraphKeys.SUMMARIES):\n    summary_ops = ops.get_collection(key)\n    if not summary_ops:\n        return None\n    else:\n        return merge_summary(summary_ops)", "docstring": "Merges all summaries collected in the default graph.\n\nThis op is deprecated. Please switch to tf.compat.v1.summary.merge_all, which\nhas\nidentical behavior.\n\nArgs:\nkey: `GraphKey` used to collect the summaries.  Defaults to\n`GraphKeys.SUMMARIES`.\n\nReturns:\nIf no summaries were collected, returns None.  Otherwise returns a scalar\n`Tensor` of type `string` containing the serialized `Summary` protocol\nbuffer resulting from the merging.", "source": "github-repos"}
{"code": "def _maybe_download_corpora(tmp_dir, dataset_split):\n    cnn_filename = 'cnn_stories.tgz'\n    cnn_finalpath = os.path.join(tmp_dir, 'cnn/stories/')\n    dailymail_filename = 'dailymail_stories.tgz'\n    dailymail_finalpath = os.path.join(tmp_dir, 'dailymail/stories/')\n    if (not tf.gfile.Exists(cnn_finalpath)):\n        cnn_file = generator_utils.maybe_download_from_drive(tmp_dir, cnn_filename, _CNN_STORIES_DRIVE_URL)\n        with tarfile.open(cnn_file, 'r:gz') as cnn_tar:\n            cnn_tar.extractall(tmp_dir)\n    if (not tf.gfile.Exists(dailymail_finalpath)):\n        dailymail_file = generator_utils.maybe_download_from_drive(tmp_dir, dailymail_filename, _DAILYMAIL_STORIES_DRIVE_URL)\n        with tarfile.open(dailymail_file, 'r:gz') as dailymail_tar:\n            dailymail_tar.extractall(tmp_dir)\n    cnn_files = tf.gfile.Glob((cnn_finalpath + '*'))\n    dailymail_files = tf.gfile.Glob((dailymail_finalpath + '*'))\n    all_files = (cnn_files + dailymail_files)\n    if (dataset_split == problem.DatasetSplit.TRAIN):\n        urls_path = generator_utils.maybe_download(tmp_dir, 'all_train.txt', _TRAIN_URLS)\n    elif (dataset_split == problem.DatasetSplit.EVAL):\n        urls_path = generator_utils.maybe_download(tmp_dir, 'all_val.txt', _DEV_URLS)\n    else:\n        urls_path = generator_utils.maybe_download(tmp_dir, 'all_test.txt', _TEST_URLS)\n    return (all_files, urls_path)", "docstring": "Download corpora if necessary and unzip them.\n\nArgs:\ntmp_dir: directory containing dataset.\ndataset_split: whether we're in train/dev/test mode.\n\nReturns:\nList of all files generated and path to file containing\ntrain/dev/test split info.", "source": "codesearchnet"}
{"code": "def validate(self):\n    if (not isinstance(self.value, bytes)):\n        raise TypeError('opaque value must be bytes')\n    elif (not isinstance(self.opaque_type, enums.OpaqueDataType)):\n        raise TypeError('opaque data type must be an OpaqueDataType enumeration')\n    name_count = len(self.names)\n    for i in range(name_count):\n        name = self.names[i]\n        if (not isinstance(name, six.string_types)):\n            position = '({0} in list)'.format(i)\n            raise TypeError('opaque data name {0} must be a string'.format(position))", "docstring": "Verify that the contents of the OpaqueObject are valid.\n\nRaises:\nTypeError: if the types of any OpaqueObject attributes are invalid.", "source": "codesearchnet"}
{"code": "def _upload_artifacts_to_path(self, mirror=False):\n    if ((not os.listdir(self.artifact_path)) or (not self.artifact_path)):\n        raise S3ArtifactNotFound\n    uploaded = False\n    if self.s3props.get('content_metadata'):\n        LOG.info('Uploading in multiple parts to set metadata')\n        uploaded = self.content_metadata_uploads(mirror=mirror)\n    if (not uploaded):\n        cmd = self._get_upload_cmd(mirror=mirror)\n        result = subprocess.run(cmd, check=True, shell=True, stdout=subprocess.PIPE)\n        LOG.debug('Upload Command Ouput: %s', result.stdout)\n    LOG.info('Uploaded artifacts to %s bucket', self.bucket)", "docstring": "Recursively upload directory contents to S3.\n\nArgs:\nmirror (bool): If true, uses a flat directory structure instead of nesting under a version.", "source": "codesearchnet"}
{"code": "def from_file(cls, path, directory=None, modules=None, active=None):\n    name = basename(path)\n    if name.endswith('.rpp'):\n        name = name[:(- 4)]\n    lines = _repp_lines(path)\n    directory = (dirname(path) if (directory is None) else directory)\n    r = cls(name=name, modules=modules, active=active)\n    _parse_repp(lines, r, directory)\n    return r", "docstring": "Instantiate a REPP from a `.rpp` file.\n\nThe *path* parameter points to the top-level module. Submodules\nare loaded from *directory*. If *directory* is not given, it is\nthe directory part of *path*.\n\nA REPP module may utilize external submodules, which may be\ndefined in two ways. The first method is to map a module name\nto an instantiated REPP instance in *modules*. The second\nmethod assumes that an external group call `>abc` corresponds\nto a file `abc.rpp` in *directory* and loads that file. The\nsecond method only happens if the name (e.g., `abc`) does not\nappear in *modules*. Only one module may define a tokenization\npattern.\n\nArgs:\npath (str): the path to the base REPP file to load\ndirectory (str, optional): the directory in which to search\nfor submodules\nmodules (dict, optional): a mapping from identifiers to\nREPP modules\nactive (iterable, optional): an iterable of default module\nactivations", "source": "codesearchnet"}
{"code": "def delete(script, layer_num=None):\n    \n    filter_xml = '  <filter name=\"Delete Current Mesh\"/>\\n'\n    if isinstance(script, mlx.FilterScript):\n        if (layer_num is None) or (layer_num == script.current_layer()):\n            util.write_filter(script, filter_xml)\n            script.del_layer(script.current_layer())\n        else:\n            cur_layer = script.current_layer()\n            change(script, layer_num)\n            util.write_filter(script, filter_xml)\n            if layer_num < script.current_layer():\n                change(script, cur_layer - 1)\n            else:\n                change(script, cur_layer)\n            script.del_layer(layer_num)\n    else:\n        util.write_filter(script, filter_xml)\n    return None", "docstring": "Delete layer\n\nArgs:\nscript: the mlx.FilterScript object or script filename to write\nthe filter to.\nlayer_num (int): the number of the layer to delete. Default is the\ncurrent layer. Not supported on the file base API.\n\nLayer stack:\nDeletes a layer\nwill change current layer if deleted layer is lower in the stack\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "juraj-google-style"}
{"code": "def post_command(self, command, args):\n    self._loop.log_coroutine(self.send_command(command, args, Verifier()))", "docstring": "Post a command asynchronously and don't wait for a response.\n\nThere is no notification of any error that could happen during\ncommand execution.  A log message will be generated if an error\noccurred.  The command's response is discarded.\n\nThis method is thread-safe and may be called from inside or ouside\nof the background event loop.  If there is no websockets connection,\nno error will be raised (though an error will be logged).\n\nArgs:\ncommand (string): The command name\nargs (dict): Optional arguments", "source": "codesearchnet"}
{"code": "def change(script, layer_num=None):\n    \n    if layer_num is None:\n        if isinstance(script, mlx.FilterScript):\n            layer_num = script.last_layer()\n        else:\n            layer_num = 0\n    filter_xml = ''.join([\n        '  <filter name=\"Change the current layer\">\\n',\n        '    <Param name=\"mesh\" ',\n        'value=\"{:d}\" '.format(layer_num),\n        'description=\"Mesh\" ',\n        'type=\"RichMesh\" ',\n        '/>\\n',\n        '  </filter>\\n'])\n    util.write_filter(script, filter_xml)\n    if isinstance(script, mlx.FilterScript):\n        script.set_current_layer(layer_num)\n        \n    return None", "docstring": "Change the current layer by specifying the new layer number.\n\nArgs:\nscript: the mlx.FilterScript object or script filename to write\nthe filter to.\nlayer_num (int): the number of the layer to change to. Default is the\nlast layer if script is a mlx.FilterScript object; if script is a\nfilename the default is the first layer.\n\nLayer stack:\nModifies current layer\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "juraj-google-style"}
{"code": "def push(self, x):\n    self._queue.append(x)", "docstring": "Adds a new value to the data window.\n\nArgs:\nx: The value to be added to the window.", "source": "github-repos"}
{"code": "def _shuffle_single(fname, extra_fn=None):\n    records = read_records(fname)\n    random.shuffle(records)\n    if (extra_fn is not None):\n        records = extra_fn(records)\n    out_fname = fname.replace(UNSHUFFLED_SUFFIX, '')\n    write_records(records, out_fname)\n    tf.gfile.Remove(fname)", "docstring": "Shuffle a single file of records.\n\nArgs:\nfname: a string\nextra_fn: an optional function from list of TFRecords to list of TFRecords\nto be called after shuffling.", "source": "codesearchnet"}
{"code": "def poll(self, timeout=None):\n        \n        p = select.poll()\n        p.register(self._fd, select.POLLIN | select.POLLPRI)\n        events = p.poll(int(timeout * 1000))\n\n        if len(events) > 0:\n            return True\n\n        return False", "docstring": "Poll for data available for reading from the serial port.\n\n`timeout` can be positive for a timeout in seconds, 0 for a\nnon-blocking poll, or negative or None for a blocking poll. Default is\na blocking poll.\n\nArgs:\ntimeout (int, float, None): timeout duration in seconds.\n\nReturns:\nbool: ``True`` if data is available for reading from the serial port, ``False`` if not.", "source": "juraj-google-style"}
{"code": "def update_variant_compounds(self, variant, variant_objs = None):\n        \n        compound_objs = []\n        for compound in variant.get('compounds', []):\n            not_loaded = True\n            gene_objs = []\n            \n            if variant_objs:\n                variant_obj = variant_objs.get(compound['variant'])\n            else:\n                variant_obj = self.variant_collection.find_one({'_id': compound['variant']})\n            if variant_obj:\n                \n                not_loaded = False\n                compound['rank_score'] = variant_obj['rank_score']\n                for gene in variant_obj.get('genes', []):\n                    gene_obj = {\n                        'hgnc_id': gene['hgnc_id'],\n                        'hgnc_symbol': gene.get('hgnc_symbol'),\n                        'region_annotation': gene.get('region_annotation'),\n                        'functional_annotation': gene.get('functional_annotation'),\n                    }\n                    gene_objs.append(gene_obj)\n                    compound['genes'] = gene_objs\n\n            compound['not_loaded'] = not_loaded\n            compound_objs.append(compound)\n\n        return compound_objs", "docstring": "Update compounds for a variant.\n\nThis will add all the necessary information of a variant on a compound object.\n\nArgs:\nvariant(scout.models.Variant)\nvariant_objs(dict): A dictionary with _ids as keys and variant objs as values.\n\nReturns:\ncompound_objs(list(dict)): A dictionary with updated compound objects.", "source": "juraj-google-style"}
{"code": "def einsum_vecmul_index(gate_indices, number_of_qubits):\n    (mat_l, mat_r, tens_lin, tens_lout) = _einsum_matmul_index_helper(gate_indices, number_of_qubits)\n    return ('{mat_l}{mat_r}, '.format(mat_l=mat_l, mat_r=mat_r) + '{tens_lin}->{tens_lout}'.format(tens_lin=tens_lin, tens_lout=tens_lout))", "docstring": "Return the index string for Numpy.eignsum matrix-vector multiplication.\n\nThe returned indices are to perform a matrix multiplication A.v where\nthe matrix A is an M-qubit matrix, vector v is an N-qubit vector, and\nM <= N, and identity matrices are implied on the subsystems where A has no\nsupport on v.\n\nArgs:\ngate_indices (list[int]): the indices of the right matrix subsystems\nto contract with the left matrix.\nnumber_of_qubits (int): the total number of qubits for the right matrix.\n\nReturns:\nstr: An indices string for the Numpy.einsum function.", "source": "codesearchnet"}
{"code": "def is_dsub_operation(op):\n  \n  if not is_pipeline(op):\n    return False\n\n  for name in ['dsub-version', 'job-id', 'job-name', 'user-id']:\n    if not get_label(op, name):\n      return False\n\n  return True", "docstring": "Determine if a pipelines operation is a dsub request.\n\nWe don't have a rigorous way to identify an operation as being submitted\nby dsub. Our best option is to check for certain fields that have always\nbeen part of dsub operations.\n\n- labels: job-id, job-name, and user-id have always existed. The dsub-version\nlabel has always existed for the google-v2 provider.\n\nArgs:\nop: a pipelines operation.\n\nReturns:\nBoolean, true if the pipeline run was generated by dsub.", "source": "juraj-google-style"}
{"code": "def get_export_outputs(export_outputs, predictions):\n    if export_outputs is None:\n        default_output = export_output_lib.PredictOutput(predictions)\n        export_outputs = {signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: default_output}\n    if not isinstance(export_outputs, dict):\n        raise TypeError('export_outputs must be dict, given: {}'.format(export_outputs))\n    for v in export_outputs.values():\n        if not isinstance(v, export_output_lib.ExportOutput):\n            raise TypeError('Values in export_outputs must be ExportOutput objects. Given: {}'.format(export_outputs))\n    _maybe_add_default_serving_output(export_outputs)\n    return export_outputs", "docstring": "Validate export_outputs or create default export_outputs.\n\nArgs:\nexport_outputs: Describes the output signatures to be exported to\n`SavedModel` and used during serving. Should be a dict or None.\npredictions:  Predictions `Tensor` or dict of `Tensor`.\n\nReturns:\nValid export_outputs dict\n\nRaises:\nTypeError: if export_outputs is not a dict or its values are not\nExportOutput instances.", "source": "github-repos"}
{"code": "def set_timer(self, num_secs):\n        \n        \n\n        \n        status = self.status()\n        devices = status['dps']\n        devices_numbers = list(devices.keys())\n        devices_numbers.sort()\n        dps_id = devices_numbers[-1]\n\n        payload = self.generate_payload(SET, {dps_id:num_secs})\n\n        data = self._send_receive(payload)\n        log.debug('set_timer received data=%r', data)\n        return data", "docstring": "Set a timer.\n\nArgs:\nnum_secs(int): Number of seconds", "source": "juraj-google-style"}
{"code": "def plane_xz(size=(10, 10), resolution=(10, 10)) -> VAO:\n    \n    sx, sz = size\n    rx, rz = resolution\n    dx, dz = sx / rx, sz / rz  \n    ox, oz = -sx / 2, -sz / 2  \n\n    def gen_pos():\n        for z in range(rz):\n            for x in range(rx):\n                yield ox + x * dx\n                yield 0\n                yield oz + z * dz\n\n    def gen_uv():\n        for z in range(rz):\n            for x in range(rx):\n                yield x / (rx - 1)\n                yield 1 - z / (rz - 1)\n\n    def gen_normal():\n        for _ in range(rx * rz):\n            yield 0.0\n            yield 1.0\n            yield 0.0\n\n    def gen_index():\n        for z in range(rz - 1):\n            for x in range(rx - 1):\n                \n                yield z * rz + x + 1\n                yield z * rz + x\n                yield z * rz + x + rx\n                \n                yield z * rz + x + 1\n                yield z * rz + x + rx\n                yield z * rz + x + rx + 1\n\n    pos_data = numpy.fromiter(gen_pos(), dtype=numpy.float32)\n    uv_data = numpy.fromiter(gen_uv(), dtype=numpy.float32)\n    normal_data = numpy.fromiter(gen_normal(), dtype=numpy.float32)\n    index_data = numpy.fromiter(gen_index(), dtype=numpy.uint32)\n\n    vao = VAO(\"plane_xz\", mode=moderngl.TRIANGLES)\n\n    vao.buffer(pos_data, '3f', ['in_position'])\n    vao.buffer(uv_data, '2f', ['in_uv'])\n    vao.buffer(normal_data, '3f', ['in_normal'])\n\n    vao.index_buffer(index_data, index_element_size=4)\n\n    return vao", "docstring": "Generates a plane on the xz axis of a specific size and resolution.\nNormals and texture coordinates are also included.\n\nArgs:\nsize: (x, y) tuple\nresolution: (x, y) tuple\n\nReturns:\nA :py:class:`demosys.opengl.vao.VAO` instance", "source": "juraj-google-style"}
{"code": "def log2(x):\n    if any_symbolic_tensors((x,)):\n        return Log2().symbolic_call(x)\n    return backend.numpy.log2(x)", "docstring": "Base-2 logarithm of `x`, element-wise.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput tensor, element-wise base-2 logarithm of `x`.", "source": "github-repos"}
{"code": "def _ProcessEvent(self, mediator, event):\n    try:\n        self._analysis_plugin.ExamineEvent(mediator, event)\n    except Exception as exception:\n        self.SignalAbort()\n        if self._debug_output:\n            logger.warning('Unhandled exception while processing event object.')\n            logger.exception(exception)", "docstring": "Processes an event.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between\nanalysis plugins and other components, such as storage and dfvfs.\nevent (EventObject): event.", "source": "codesearchnet"}
{"code": "def get_default_padding(self):\n    high = ((1024 * 10) + (self.size \n    low = (1024 + (self.size \n    if (self.padding >= 0):\n        if (self.padding > high):\n            return low\n        return self.padding\n    else:\n        return low", "docstring": "The default implementation which tries to select a reasonable\namount of padding and which might change in future versions.\n\nReturns:\nint: Amount of padding after saving", "source": "codesearchnet"}
{"code": "def get_definition_directive(self, node, directive, arg, default):\n    defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())\n    if not defs:\n        return default\n    arg_values_found = []\n    for def_ in defs:\n        if directive in def_.directives and arg in def_.directives[directive]:\n            arg_values_found.append(def_.directives[directive][arg])\n    if not arg_values_found:\n        return default\n    if len(arg_values_found) == 1:\n        return arg_values_found[0]\n    first_value = arg_values_found[0]\n    for other_value in arg_values_found[1:]:\n        if not ast_util.matches(first_value, other_value):\n            qn = anno.getanno(node, anno.Basic.QN)\n            raise ValueError('%s has ambiguous annotations for %s(%s): %s, %s' % (qn, directive.__name__, arg, parser.unparse(other_value).strip(), parser.unparse(first_value).strip()))\n    return first_value", "docstring": "Returns the unique directive argument for a symbol.\n\nSee lang/directives.py for details on directives.\n\nExample:\n# Given a directive in the code:\nag.foo_directive(bar, baz=1)\n\n# One can write for an AST node Name(id='bar'):\nget_definition_directive(node, ag.foo_directive, 'baz')\n\nArgs:\nnode: ast.AST, the node representing the symbol for which the directive\nargument is needed.\ndirective: Callable[..., Any], the directive to search.\narg: str, the directive argument to return.\ndefault: Any\n\nRaises:\nValueError: if conflicting annotations have been found", "source": "github-repos"}
{"code": "def integers(start, count):\n    \n    if count < 0:\n        raise ValueError(\"integers() count cannot be negative\")\n    return query(irange(start, start + count))", "docstring": "Generates in sequence the integral numbers within a range.\n\nNote: This method uses deferred execution.\n\nArgs:\nstart: The first integer in the sequence.\ncount: The number of sequential integers to generate.\n\nReturns:\nA Queryable over the specified range of integers.\n\nRaises:\nValueError: If count is negative.", "source": "juraj-google-style"}
{"code": "def find(self, id):\n        \n        url = \"{}/{}/{}\".format(__endpoint__, self.type.RESOURCE, id)\n        response = RestClient.get(url)[self.type.RESOURCE[:-1]]\n        return self.type(response)", "docstring": "Get a resource by its id\n\nArgs:\nid (string): Resource id\nReturns:\nobject: Instance of the resource type", "source": "juraj-google-style"}
{"code": "def call(self, inputs, **kwargs):\n    return inputs", "docstring": "This is where the layer's logic lives.\n\nArgs:\ninputs: Input tensor, or list/tuple of input tensors.\n**kwargs: Additional keyword arguments.\n\nReturns:\nA tensor or list/tuple of tensors.", "source": "github-repos"}
{"code": "def status(self, job_ids):\n        \n        if job_ids:\n            self._status()\n        return [self.resources[jid]['status'] for jid in job_ids]", "docstring": "Get the status of a list of jobs identified by the job identifiers\nreturned from the submit request.\n\nArgs:\n- job_ids (list) : A list of job identifiers\n\nReturns:\n- A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',\n'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.\n\nRaises:\n- ExecutionProviderException or its subclasses", "source": "juraj-google-style"}
{"code": "def verify_abort(func, *args, **kwargs):\n    \n    expected_exception = kwargs.pop(\"expected_exception\", runez.system.AbortException)\n    with CaptureOutput() as logged:\n        try:\n            value = func(*args, **kwargs)\n            assert False, \"%s did not raise, but returned %s\" % (func, value)\n\n        except expected_exception:\n            return str(logged)", "docstring": "Convenient wrapper around functions that should exit or raise an exception\n\nExample:\nassert \"Can't create folder\" in verify_abort(ensure_folder, \"/dev/null/not-there\")\n\nArgs:\nfunc (callable): Function to execute\n*args: Args to pass to 'func'\n**kwargs: Named args to pass to 'func'\n\nReturns:\n(str): Chatter from call to 'func', if it did indeed raise", "source": "juraj-google-style"}
{"code": "def prepare_background_data(self):\n    self.background_data = []\n    background_dir = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME)\n    if not gfile.Exists(background_dir):\n        return self.background_data\n    with tf.compat.v1.Session(graph=tf.Graph()) as sess:\n        wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, [])\n        wav_loader = io_ops.read_file(wav_filename_placeholder)\n        wav_decoder = tf.audio.decode_wav(wav_loader, desired_channels=1)\n        search_path = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME, '*.wav')\n        for wav_path in gfile.Glob(search_path):\n            wav_data = sess.run(wav_decoder, feed_dict={wav_filename_placeholder: wav_path}).audio.flatten()\n            self.background_data.append(wav_data)\n        if not self.background_data:\n            raise Exception('No background wav files were found in ' + search_path)", "docstring": "Searches a folder for background noise audio, and loads it into memory.\n\nIt's expected that the background audio samples will be in a subdirectory\nnamed '_background_noise_' inside the 'data_dir' folder, as .wavs that match\nthe sample rate of the training data, but can be much longer in duration.\n\nIf the '_background_noise_' folder doesn't exist at all, this isn't an\nerror, it's just taken to mean that no background noise augmentation should\nbe used. If the folder does exist, but it's empty, that's treated as an\nerror.\n\nReturns:\nList of raw PCM-encoded audio samples of background noise.\n\nRaises:\nException: If files aren't found in the folder.", "source": "github-repos"}
{"code": "def load_file_to_str(path):\n    with open(path, 'rt') as f:\n        string = f.read().replace(linesep, '')\n    if (not string):\n        raise LoadError(('%s file is empty!' % path))\n    return string", "docstring": "Load file into a string removing newlines\n\nArgs:\npath (str): Path to file\n\nReturns:\nstr: String contents of file", "source": "codesearchnet"}
{"code": "def sort_dependencies(self, image, dependencies=None):\n    if (dependencies is None):\n        dependencies = OrderedDict()\n    if (image in dependencies):\n        return\n    requires = self.ymldefs[image].get('requires', [])\n    for dep in requires:\n        self.sort_dependencies(dep, dependencies)\n    dependencies[image] = None\n    return dependencies.keys()", "docstring": "Topologically sort the docker commands by their requirements\n\nNote:\nCircular \"requires\" dependencies are assumed to have already been checked in\nget_external_base_image, they are not checked here\n\nArgs:\nimage (str): process this docker image's dependencies\ndependencies (OrderedDict): running cache of sorted dependencies (ordered dict)\n\nReturns:\nList[str]: list of dependencies a topologically-sorted build order", "source": "codesearchnet"}
{"code": "def histogram(self, tag, values, bins, step=None):\n    if (step is None):\n        step = self._step\n    else:\n        self._step = step\n    values = onp.array(values)\n    bins = onp.array(bins)\n    values = onp.reshape(values, (- 1))\n    (counts, limits) = onp.histogram(values, bins=bins)\n    cum_counts = onp.cumsum(onp.greater(counts, 0, dtype=onp.int32))\n    (start, end) = onp.searchsorted(cum_counts, [0, (cum_counts[(- 1)] - 1)], side='right')\n    (start, end) = (int(start), (int(end) + 1))\n    counts = (counts[(start - 1):end] if (start > 0) else onp.concatenate([[0], counts[:end]]))\n    limits = limits[start:(end + 1)]\n    sum_sq = values.dot(values)\n    histo = HistogramProto(min=values.min(), max=values.max(), num=len(values), sum=values.sum(), sum_squares=sum_sq, bucket_limit=limits.tolist(), bucket=counts.tolist())\n    summary = Summary(value=[Summary.Value(tag=tag, histo=histo)])\n    self.add_summary(summary, step)", "docstring": "Saves histogram of values.\n\nArgs:\ntag: str: label for this data\nvalues: ndarray: will be flattened by this routine\nbins: number of bins in histogram, or array of bins for onp.histogram\nstep: int: training step", "source": "codesearchnet"}
{"code": "def __replaceSpecialValues(self, decisions):\n    error = []\n    for (row, line) in enumerate(decisions):\n        if ('.' in line):\n            for (i, element) in enumerate(line):\n                if (row == 0):\n                    error.append(\"Row: {}colume: {}==> don't have parent value\".format(str(row).ljust(4), str(i).ljust(4)))\n                if (element == self.__parentSymbol):\n                    if (decisions[(row - 1)][i] == '.'):\n                        error.append(\"Row: {}Colume: {}==> don't have parent value\".format(str(row).ljust(4), str(i).ljust(4)))\n                    decisions[row][i] = decisions[(row - 1)][i]\n    if error:\n        view.Tli.showErrors('ReplaceSpecialValuesError', error)\n    else:\n        return decisions", "docstring": "Will replace special values in decisions array.\n\nArgs:\ndecisions (array of array of str): Standard decision array format.\nRaises:\nValueError: Row element don't have parent value.\n\nReturns:\nNew decision array with updated values.", "source": "codesearchnet"}
{"code": "def iter_archive(self, resource):\n    \n    if isinstance(resource, six.string_types):\n      resource = resource_lib.Resource(path=resource)\n    return extractor.iter_archive(resource.path, resource.extract_method)", "docstring": "Returns iterator over files within archive.\n\n**Important Note**: caller should read files as they are yielded.\nReading out of order is slow.\n\nArgs:\nresource: path to archive or `tfds.download.Resource`.\n\nReturns:\nGenerator yielding tuple (path_within_archive, file_obj).", "source": "juraj-google-style"}
{"code": "def _flatten_tensors(tensors):\n    if not tensors:\n        raise ValueError('tensors cannot be empty')\n    shape = tensors[0].shape\n    for tensor in tensors:\n        shape = shape.merge_with(tensor.shape)\n    if not shape.is_fully_defined():\n        raise ValueError('Tensors must have statically known shape.')\n    if len(shape) != 1:\n        reshaped = []\n        for t in tensors:\n            with ops.colocate_with(t):\n                reshaped.append(array_ops.reshape(t, [-1]))\n        tensors = reshaped\n    return (tensors, shape)", "docstring": "Check tensors for isomorphism and flatten.\n\nArgs:\ntensors: list of `tf.Tensor` which must all have the same shape.\n\nReturns:\ntensors: a list of `tf.Tensor` which are flattened (1D) views of tensors\nshape: the original shape of each element of input tensors\n\nRaises:\nValueError: tensors are empty or non-isomorphic or have unknown shape.", "source": "github-repos"}
{"code": "def get_kerberos_ticket(username, password):\n    cache = ('/tmp/ion-%s' % uuid.uuid4())\n    logger.debug(\"Setting KRB5CCNAME to 'FILE:{}'\".format(cache))\n    os.environ['KRB5CCNAME'] = ('FILE:' + cache)\n    try:\n        realm = settings.CSL_REALM\n        kinit = pexpect.spawnu('/usr/bin/kinit {}@{}'.format(username, realm), timeout=settings.KINIT_TIMEOUT)\n        kinit.expect(':')\n        kinit.sendline(password)\n        returned = kinit.expect([pexpect.EOF, 'password:'])\n        if (returned == 1):\n            logger.debug('Password for {}@{} expired, needs reset'.format(username, realm))\n            return 'reset'\n        kinit.close()\n        exitstatus = kinit.exitstatus\n    except pexpect.TIMEOUT:\n        KerberosAuthenticationBackend.kinit_timeout_handle(username, realm)\n        exitstatus = 1\n    if (exitstatus != 0):\n        try:\n            realm = settings.AD_REALM\n            kinit = pexpect.spawnu('/usr/bin/kinit {}@{}'.format(username, realm), timeout=settings.KINIT_TIMEOUT)\n            kinit.expect(':')\n            kinit.sendline(password)\n            returned = kinit.expect([pexpect.EOF, 'password:'])\n            if (returned == 1):\n                return False\n            kinit.close()\n            exitstatus = kinit.exitstatus\n        except pexpect.TIMEOUT:\n            KerberosAuthenticationBackend.kinit_timeout_handle(username, realm)\n            exitstatus = 1\n    if ('KRB5CCNAME' in os.environ):\n        subprocess.check_call(['kdestroy', '-c', os.environ['KRB5CCNAME']])\n        del os.environ['KRB5CCNAME']\n    if (exitstatus == 0):\n        logger.debug('Kerberos authorized {}@{}'.format(username, realm))\n        return True\n    else:\n        logger.debug('Kerberos failed to authorize {}'.format(username))\n        return False", "docstring": "Attempts to create a Kerberos ticket for a user.\n\nArgs:\nusername\nThe username.\npassword\nThe password.\n\nReturns:\nBoolean indicating success or failure of ticket creation", "source": "codesearchnet"}
{"code": "def post_process(self, dir_name, d):\n    logger.info('Post-processing dir:{}'.format(dir_name))\n    fullpath = os.path.abspath(dir_name)\n    transformations = {}\n    filenames = glob.glob(os.path.join(fullpath, 'transformations.json*'))\n    if (len(filenames) >= 1):\n        with zopen(filenames[0], 'rt') as f:\n            transformations = json.load(f)\n            try:\n                m = re.match('(\\\\d+)-ICSD', transformations['history'][0]['source'])\n                if m:\n                    d['icsd_id'] = int(m.group(1))\n            except Exception as ex:\n                logger.warning('Cannot parse ICSD from transformations file.')\n                pass\n    else:\n        logger.warning('Transformations file does not exist.')\n    other_parameters = transformations.get('other_parameters')\n    new_tags = None\n    if other_parameters:\n        new_tags = other_parameters.pop('tags', None)\n        new_author = other_parameters.pop('author', None)\n        if new_author:\n            d['author'] = new_author\n        if (not other_parameters):\n            transformations.pop('other_parameters')\n    d['transformations'] = transformations\n    filenames = glob.glob(os.path.join(fullpath, 'custodian.json*'))\n    if (len(filenames) >= 1):\n        with zopen(filenames[0], 'rt') as f:\n            d['custodian'] = json.load(f)\n    try:\n        run_stats = {}\n        for filename in glob.glob(os.path.join(fullpath, 'OUTCAR*')):\n            outcar = Outcar(filename)\n            i = (1 if re.search('relax2', filename) else 0)\n            taskname = ('relax2' if re.search('relax2', filename) else 'relax1')\n            d['calculations'][i]['output']['outcar'] = outcar.as_dict()\n            run_stats[taskname] = outcar.run_stats\n    except:\n        logger.error('Bad OUTCAR for {}.'.format(fullpath))\n    try:\n        overall_run_stats = {}\n        for key in ['Total CPU time used (sec)', 'User time (sec)', 'System time (sec)', 'Elapsed time (sec)']:\n            overall_run_stats[key] = sum([v[key] for v in run_stats.values()])\n        run_stats['overall'] = overall_run_stats\n    except:\n        logger.error('Bad run stats for {}.'.format(fullpath))\n    d['run_stats'] = run_stats\n    if self.use_full_uri:\n        d['dir_name'] = get_uri(dir_name)\n    if new_tags:\n        d['tags'] = new_tags\n    logger.info(('Post-processed ' + fullpath))", "docstring": "Simple post-processing for various files other than the vasprun.xml.\nCalled by generate_task_doc. Modify this if your runs have other\nkinds of processing requirements.\n\nArgs:\ndir_name:\nThe dir_name.\nd:\nCurrent doc generated.", "source": "codesearchnet"}
{"code": "def argsort(*args, **kwargs):\n    if ((len(args) == 1) and isinstance(args[0], dict)):\n        dict_ = args[0]\n        index_list = list(dict_.keys())\n        value_list = list(dict_.values())\n        return sortedby2(index_list, value_list)\n    else:\n        index_list = list(range(len(args[0])))\n        return sortedby2(index_list, *args, **kwargs)", "docstring": "like np.argsort but for lists\n\nArgs:\n*args: multiple lists to sort by\n**kwargs:\nreverse (bool): sort order is descending if True else acscending\n\nCommandLine:\npython -m utool.util_list argsort\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_list import *  # NOQA\n>>> result = ut.argsort({'a': 3, 'b': 2, 'c': 100})\n>>> print(result)", "source": "codesearchnet"}
{"code": "def items(self, prefix=None, delimiter=None):\n    return _item.Items(self._name, prefix, delimiter, context=self._context)", "docstring": "Get an iterator for the items within this bucket.\n\nArgs:\nprefix: an optional prefix to match items.\ndelimiter: an optional string to simulate directory-like semantics. The returned items\nwill be those whose names do not contain the delimiter after the prefix. For\nthe remaining items, the names will be returned truncated after the delimiter\nwith duplicates removed (i.e. as pseudo-directories).\nReturns:\nAn iterable list of items within this bucket.", "source": "codesearchnet"}
{"code": "def add_primitives_path(path):\n    if (path not in _PRIMITIVES_PATHS):\n        if (not os.path.isdir(path)):\n            raise ValueError('Invalid path: {}'.format(path))\n        LOGGER.debug('Adding new primitives path %s', path)\n        _PRIMITIVES_PATHS.insert(0, os.path.abspath(path))", "docstring": "Add a new path to look for primitives.\n\nThe new path will be inserted in the first place of the list,\nso any primitive found in this new folder will take precedence\nover any other primitive with the same name that existed in the\nsystem before.\n\nArgs:\npath (str): path to add\n\nRaises:\nValueError: A `ValueError` will be raised if the path is not valid.", "source": "codesearchnet"}
{"code": "def zero_state(self, batch_size, dtype):\n    state_size = self.state_size\n    is_eager = context.executing_eagerly()\n    if is_eager and _hasattr(self, '_last_zero_state'):\n        last_state_size, last_batch_size, last_dtype, last_output = getattr(self, '_last_zero_state')\n        if last_batch_size == batch_size and last_dtype == dtype and (last_state_size == state_size):\n            return last_output\n    with backend.name_scope(type(self).__name__ + 'ZeroState'):\n        output = _zero_state_tensors(state_size, batch_size, dtype)\n    if is_eager:\n        self._last_zero_state = (state_size, batch_size, dtype, output)\n    return output", "docstring": "Return zero-filled state tensor(s).\n\nArgs:\nbatch_size: int, float, or unit Tensor representing the batch size.\ndtype: the data type to use for the state.\n\nReturns:\nIf `state_size` is an int or TensorShape, then the return value is a\n`N-D` tensor of shape `[batch_size, state_size]` filled with zeros.\n\nIf `state_size` is a nested list or tuple, then the return value is\na nested list or tuple (of the same structure) of `2-D` tensors with\nthe shapes `[batch_size, s]` for each s in `state_size`.", "source": "github-repos"}
{"code": "def clean_headers(headers):\n    clean = {}\n    try:\n        for (k, v) in six.iteritems(headers):\n            if (not isinstance(k, six.binary_type)):\n                k = str(k)\n            if (not isinstance(v, six.binary_type)):\n                v = str(v)\n            clean[_helpers._to_bytes(k)] = _helpers._to_bytes(v)\n    except UnicodeEncodeError:\n        from oauth2client.client import NonAsciiHeaderError\n        raise NonAsciiHeaderError(k, ': ', v)\n    return clean", "docstring": "Forces header keys and values to be strings, i.e not unicode.\n\nThe httplib module just concats the header keys and values in a way that\nmay make the message header a unicode string, which, if it then tries to\ncontatenate to a binary request body may result in a unicode decode error.\n\nArgs:\nheaders: dict, A dictionary of headers.\n\nReturns:\nThe same dictionary but with all the keys converted to strings.", "source": "codesearchnet"}
{"code": "def _get_linear_trajectory(x0, velocity, t):\n    x0 = tf.convert_to_tensor(x0)\n    velocity = tf.convert_to_tensor(velocity)\n    t = tf.convert_to_tensor(t)\n    if (x0.shape.ndims != 1):\n        raise ValueError('x0 must be a rank 1 tensor')\n    if (velocity.shape.ndims != 1):\n        raise ValueError('velocity must be a rank 1 tensor')\n    if (t.shape.ndims != 1):\n        raise ValueError('t must be a rank 1 tensor')\n    x0 = tf.expand_dims(x0, axis=0)\n    velocity = tf.expand_dims(velocity, axis=0)\n    dx = (velocity * tf.expand_dims(t, axis=(- 1)))\n    linear_trajectories = (x0 + dx)\n    assert (linear_trajectories.shape.ndims == 2), 'linear_trajectories should be a rank 2 tensor'\n    return linear_trajectories", "docstring": "Construct a linear trajectory from x0.\n\nArgs:\nx0: N-D float tensor.\nvelocity: N-D float tensor\nt: [sequence_length]-length float tensor\n\nReturns:\nx: [sequence_length, ndims] float tensor.", "source": "codesearchnet"}
{"code": "def get_item(self, *key):\n        \n        item = self._get_item_or_section(key)\n        if not item.is_item:\n            raise RuntimeError('{} is a section, not an item'.format(key))\n        return item", "docstring": "The recommended way of retrieving an item by key when extending configmanager's behaviour.\nAttribute and dictionary key access is configurable and may not always return items\n(see PlainConfig for example), whereas this method will always return the corresponding\nItem as long as NOT_FOUND hook callbacks don't break this convention.\n\nArgs:\n*key\n\nReturns:\nitem (:class:`.Item`):", "source": "juraj-google-style"}
{"code": "def export_msdt(self, filename):\n    fmt = ('csv' if filename.lower().endswith('.csv') else 'dat')\n    delimiter = (', ' if (fmt == 'csv') else ' ')\n    with open(filename, 'wt') as f:\n        if (fmt == 'dat'):\n            f.write('\n        f.write(delimiter.join(['t', 'MSD', 'MSD_a', 'MSD_b', 'MSD_c', 'MSCD']))\n        f.write('\\n')\n        for (dt, msd, msdc, mscd) in zip(self.dt, self.msd, self.msd_components, self.mscd):\n            f.write(delimiter.join([('%s' % v) for v in (([dt, msd] + list(msdc)) + [mscd])]))\n            f.write('\\n')", "docstring": "Writes MSD data to a csv file that can be easily plotted in other\nsoftware.\n\nArgs:\nfilename (str): Filename. Supported formats are csv and dat. If\nthe extension is csv, a csv file is written. Otherwise,\na dat format is assumed.", "source": "codesearchnet"}
{"code": "def convert_to_shape(x):\n    if (x is None):\n        return None\n    if isinstance(x, Shape):\n        return x\n    if isinstance(x, str):\n        x = _parse_string_to_list_of_pairs(x, seconds_to_int=True)\n    return Shape(x)", "docstring": "Converts input to a Shape.\n\nArgs:\nx: Shape, str, or None.\n\nReturns:\nShape or None.\n\nRaises:\nValueError: If x cannot be converted to a Shape.", "source": "codesearchnet"}
{"code": "def _ReduceParserFilters(cls, includes, excludes):\n    if ((not includes) or (not excludes)):\n        return\n    for parser_name in set(includes).intersection(excludes):\n        if (includes[parser_name] == excludes[parser_name]):\n            logger.warning('Parser {0:s} was in both the inclusion and exclusion lists. Ignoring included parser.'.format(parser_name))\n            includes.pop(parser_name)\n            continue\n        plugin_includes = includes[parser_name]\n        plugin_excludes = excludes[parser_name]\n        intersection = set(plugin_includes).intersection(plugin_excludes)\n        if (not intersection):\n            continue\n        logger.warning('Parser {0:s} plugins: {1:s} in both the inclusion and exclusion lists. Ignoring included plugins.'.format(parser_name, ', '.join(intersection)))\n        plugins_list = list(set(plugin_includes).difference(intersection))\n        includes[parser_name] = plugins_list\n    parsers_to_pop = []\n    for parser_name in excludes:\n        if (parser_name in includes):\n            continue\n        logger.warning('The excluded parser: {0:s} is not associated with the included parsers: {1:s}. Ignoring excluded parser.'.format(parser_name, ', '.join(includes.keys())))\n        parsers_to_pop.append(parser_name)\n    for parser_name in parsers_to_pop:\n        excludes.pop(parser_name)", "docstring": "Reduces the parsers and plugins to include and exclude.\n\nIf an intersection is found, the parser or plugin is removed from\nthe inclusion set. If a parser is not in inclusion set there is no need\nto have it in the exclusion set.\n\nArgs:\nincludes (dict[str, BaseParser]): included parsers and plugins by name.\nexcludes (dict[str, BaseParser]): excluded parsers and plugins by name.", "source": "codesearchnet"}
{"code": "def playback_trajectory(env, ep_dir):\n    \n\n    \n    xml_path = os.path.join(ep_dir, \"model.xml\")\n    with open(xml_path, \"r\") as f:\n        env.reset_from_xml_string(f.read())\n\n    state_paths = os.path.join(ep_dir, \"state_*.npz\")\n\n    \n    t = 0\n    for state_file in sorted(glob(state_paths)):\n        print(state_file)\n        dic = np.load(state_file)\n        states = dic[\"states\"]\n        for state in states:\n            env.sim.set_state_from_flattened(state)\n            env.sim.forward()\n            env.render()\n            t += 1\n            if t % 100 == 0:\n                print(t)", "docstring": "Playback data from an episode.\n\nArgs:\nep_dir: The path to the directory containing data for an episode.", "source": "juraj-google-style"}
{"code": "def remove_node(self, node_id, force=False):\n    url = self._url('/nodes/{0}', node_id)\n    params = {'force': force}\n    res = self._delete(url, params=params)\n    self._raise_for_status(res)\n    return True", "docstring": "Remove a node from the swarm.\n\nArgs:\nnode_id (string): ID of the node to be removed.\nforce (bool): Force remove an active node. Default: `False`\n\nRaises:\n:py:class:`docker.errors.NotFound`\nIf the node referenced doesn't exist in the swarm.\n\n:py:class:`docker.errors.APIError`\nIf the server returns an error.\nReturns:\n`True` if the request was successful.", "source": "codesearchnet"}
{"code": "def from_dict(d):\n        \n        i = Tags()\n        for k, v in d.items():\n            if k not in (\"@module\", \"@class\"):\n                i[k] = v\n        return i", "docstring": "Creates Tags object from a dictionary.\n\nArgs:\nd: Dict of feff parameters and values.\n\nReturns:\nTags object", "source": "juraj-google-style"}
{"code": "def _get_ngram_counter(ids, n):\n    ids = [token_id for token_id in ids if (token_id != 0)]\n    ngram_list = [tuple(ids[i:(i + n)]) for i in range(((len(ids) + 1) - n))]\n    ngrams = set(ngram_list)\n    counts = collections.Counter()\n    for ngram in ngrams:\n        counts[ngram] = 1\n    return counts", "docstring": "Get a Counter with the ngrams of the given ID list.\n\nArgs:\nids: np.array or a list corresponding to a single sentence\nn: n-gram size\n\nReturns:\ncollections.Counter with ID tuples as keys and 1s as values.", "source": "codesearchnet"}
{"code": "def first(self):\n\n    def _transform(xs):\n        try:\n            return [six.next(iter(xs))]\n        except StopIteration:\n            return []\n    return self.transform(_transform, 'first')", "docstring": "Return a Query that selects only the first element of this Query.\nIf no elements are available, returns a query with no results.\n\nExample usage:\n\n.. code:: python\n\n>> q = Query(lambda: list(range(5)))\n>> q.first.results\n[0]\n\nReturns:\nQuery", "source": "codesearchnet"}
{"code": "def _define_loop(graph, eval_steps):\n  \n  loop = tools.Loop(\n      None, graph.step, graph.should_log, graph.do_report, graph.force_reset)\n  loop.add_phase(\n      'eval', graph.done, graph.score, graph.summary, eval_steps,\n      report_every=eval_steps,\n      log_every=None,\n      checkpoint_every=None,\n      feed={graph.is_training: False})\n  return loop", "docstring": "Create and configure an evaluation loop.\n\nArgs:\ngraph: Object providing graph elements via attributes.\neval_steps: Number of evaluation steps per epoch.\n\nReturns:\nLoop object.", "source": "juraj-google-style"}
{"code": "def option(self, key, value=None, **kwargs):\n    if (not isinstance(self._container, Section)):\n        raise ValueError('Options can only be added inside a section!')\n    option = Option(key, value, container=self._container, **kwargs)\n    option.value = value\n    self._container.structure.insert(self._idx, option)\n    self._idx += 1\n    return self", "docstring": "Creates a new option inside a section\n\nArgs:\nkey (str): key of the option\nvalue (str or None): value of the option\n**kwargs: are passed to the constructor of :class:`Option`\n\nReturns:\nself for chaining", "source": "codesearchnet"}
{"code": "def _reraise_with_traceback(f):\n    \n\n    def wrap(*args, **kwargs):\n        try:\n            return f(*args, **kwargs)\n        except Exception as e:\n            traceback_str = traceback.format_exc()\n            e.traceback = traceback_str\n            raise e\n\n    return wrap", "docstring": "Call the function normally. But if the function raises an error, attach the str(traceback)\ninto the function.traceback attribute, then reraise the error.\nArgs:\nf: The function to run.\n\nReturns: A function that wraps f, attaching the traceback if an error occurred.", "source": "juraj-google-style"}
{"code": "def diff_commonPrefix(self, text1, text2):\n    \n    \n    if not text1 or not text2 or text1[0] != text2[0]:\n      return 0\n    \n    \n    pointermin = 0\n    pointermax = min(len(text1), len(text2))\n    pointermid = pointermax\n    pointerstart = 0\n    while pointermin < pointermid:\n      if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]:\n        pointermin = pointermid\n        pointerstart = pointermin\n      else:\n        pointermax = pointermid\n      pointermid = (pointermax - pointermin) \n    return pointermid", "docstring": "Determine the common prefix of two strings.\n\nArgs:\ntext1: First string.\ntext2: Second string.\n\nReturns:\nThe number of characters common to the start of each string.", "source": "juraj-google-style"}
{"code": "def postprocess_image(x, rows, cols, hparams):\n    batch = common_layers.shape_list(x)[0]\n    x = tf.reshape(x, [batch, rows, cols, hparams.hidden_size])\n    likelihood = getattr(hparams, 'likelihood', DistributionType.CAT)\n    if (likelihood == DistributionType.DMOL):\n        depth = (hparams.num_mixtures * 10)\n        targets = tf.layers.dense(x, depth, use_bias=False, activation=None, name='output_conv')\n    else:\n        depth = 256\n        targets = tf.layers.dense(x, depth, use_bias=True, activation=None, name='output_conv')\n    if ((hparams.mode == tf.estimator.ModeKeys.PREDICT) and hparams.block_raster_scan):\n        y = targets\n        yshape = common_layers.shape_list(y)\n        block_length = hparams.query_shape[0]\n        block_width = hparams.query_shape[1]\n        y = tf.reshape(y, [batch, (yshape[1] \n        yshape = common_layers.shape_list(y)\n        y_blocks = tf.reshape(y, [batch, yshape[1], yshape[2], (yshape[3] \n        targets = tf.transpose(y_blocks, [0, 1, 3, 2, 4, 5])\n    return targets", "docstring": "Postprocessing after decoding.\n\nArgs:\nx: Tensor of shape [batch, ...], where ... can be any rank such that the\nnumber of elements in x is batch * rows * cols * hparams.hidden_size.\nrows: Integer representing number of rows in a 2-D data point.\ncols: Integer representing number of columns in a 2-D data point.\nhparams: HParams set.\n\nReturns:\nTensor of shape [batch, rows, cols, depth], where depth is\nhparams.num_mixtures * 10 if hparams.likelihood is DMOL, otherwise 256. In\nthe special case of inference and block raster scan order, it is a Tensor\nof shape [batch, num_blocks_rows, num_block_cols, block_length, block_width,\ndepth].", "source": "codesearchnet"}
{"code": "def enable(self, information, id_or_uri, timeout=(- 1)):\n    uri = self._client.build_uri(id_or_uri)\n    return self._client.update(information, uri, timeout=timeout)", "docstring": "Enables or disables a range.\n\nArgs:\ninformation (dict): Information to update.\nid_or_uri: ID or URI of range.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Updated resource.", "source": "codesearchnet"}
{"code": "def cosine_similarity(y_true, y_pred, axis=-1):\n    y_pred = ops.convert_to_tensor(y_pred)\n    y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)\n    y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)\n    y_pred = normalize(y_pred, axis=axis)\n    y_true = normalize(y_true, axis=axis)\n    return ops.sum(y_true * y_pred, axis=axis)", "docstring": "Computes the cosine similarity between labels and predictions.\n\nFormula:\n\n```python\nloss = sum(l2_norm(y_true) * l2_norm(y_pred))\n```\n\nArgs:\ny_true: Tensor of true targets.\ny_pred: Tensor of predicted targets.\naxis: Axis along which to determine similarity. Defaults to `-1`.\n\nReturns:\nCosine similarity tensor.\n\nExample:\n\n>>> y_true = [[0., 1.], [1., 1.], [1., 1.]]\n>>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]]\n>>> loss = keras.losses.cosine_similarity(y_true, y_pred, axis=-1)\n[0., 0.99999994, -0.99999994]", "source": "github-repos"}
{"code": "def read(self, viewport=None, components=3, *, attachment=0, alignment=1, dtype='f1') -> bytes:\n    return self.mglo.read(viewport, components, attachment, alignment, dtype)", "docstring": "Read the content of the framebuffer.\n\nArgs:\nviewport (tuple): The viewport.\ncomponents (int): The number of components to read.\n\nKeyword Args:\nattachment (int): The color attachment.\nalignment (int): The byte alignment of the pixels.\ndtype (str): Data type.\n\nReturns:\nbytes", "source": "codesearchnet"}
{"code": "def get_outputs_filtered(self, owner, spent=None):\n        \n        outputs = self.fastquery.get_outputs_by_public_key(owner)\n        if spent is None:\n            return outputs\n        elif spent is True:\n            return self.fastquery.filter_unspent_outputs(outputs)\n        elif spent is False:\n            return self.fastquery.filter_spent_outputs(outputs)", "docstring": "Get a list of output links filtered on some criteria\n\nArgs:\nowner (str): base58 encoded public_key.\nspent (bool): If ``True`` return only the spent outputs. If\n``False`` return only unspent outputs. If spent is\nnot specified (``None``) return all outputs.\n\nReturns:\n:obj:`list` of TransactionLink: list of ``txid`` s and ``output`` s\npointing to another transaction's condition", "source": "juraj-google-style"}
{"code": "def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]]=None):\n    if tensor_type is None:\n        return self\n    is_tensor, as_tensor = self._get_is_as_tensor_fns(tensor_type)\n    for key, value in self.items():\n        try:\n            if not is_tensor(value):\n                tensor = as_tensor(value)\n                self[key] = tensor\n        except:\n            if key == 'overflowing_values':\n                raise ValueError('Unable to create tensor returning overflowing values of different lengths. ')\n            raise ValueError(\"Unable to create tensor, you should probably activate padding with 'padding=True' to have batched tensors with the same length.\")\n    return self", "docstring": "Convert the inner content to tensors.\n\nArgs:\ntensor_type (`str` or [`~utils.TensorType`], *optional*):\nThe type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If\n`None`, no modification is done.", "source": "github-repos"}
{"code": "def joinpaths(self, *paths):\n        \n        if sys.version_info >= (3, 6):\n            paths = [os.fspath(path) for path in paths]\n        if len(paths) == 1:\n            return paths[0]\n        if self.is_windows_fs:\n            return self._join_paths_with_drive_support(*paths)\n        joined_path_segments = []\n        sep = self._path_separator(paths[0])\n        for path_segment in paths:\n            if self._starts_with_root_path(path_segment):\n                \n                joined_path_segments = [path_segment]\n            else:\n                if (joined_path_segments and\n                        not joined_path_segments[-1].endswith(sep)):\n                    joined_path_segments.append(sep)\n                if path_segment:\n                    joined_path_segments.append(path_segment)\n        return self._matching_string(paths[0], '').join(joined_path_segments)", "docstring": "Mimic os.path.join using the specified path_separator.\n\nArgs:\n*paths:  (str) Zero or more paths to join.\n\nReturns:\n(str) The paths joined by the path separator, starting with\nthe last absolute path in paths.", "source": "juraj-google-style"}
{"code": "def requires(self, require=None):\n\t\t\n\n\t\t\n\t\tif require is None:\n\t\t\treturn self._requires\n\n\t\t\n\t\tif not isinstance(require, dict):\n\t\t\traise ValueError('__require__')\n\n\t\t\n\t\tfor k,v in iteritems(require):\n\n\t\t\t\n\t\t\tif k not in self._nodes:\n\t\t\t\traise ValueError('__require__[%s]' % str(k))\n\n\t\t\t\n\t\t\tif isinstance(v, basestring):\n\t\t\t\tv = [v]\n\n\t\t\t\n\t\t\telif not isinstance(v, (tuple,list)):\n\t\t\t\traise ValueError('__require__[%s]' % str(k))\n\n\t\t\t\n\t\t\tfor s in v:\n\t\t\t\tif s not in self._nodes:\n\t\t\t\t\traise ValueError('__require__[%s]: %s' % (str(k), str(v)))\n\n\t\t\t\n\t\t\tself._requires[k] = v", "docstring": "Requires\n\nSets the require rules used to validate the Parent\n\nArguments:\nrequire {dict} -- A dictionary expressing requirements of fields\n\nRaises:\nValueError\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def ip_mask(ip_addr_and_mask, return_tuple=True):\n    \n    regex_ip_and_mask = __re.compile(\"^((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))/((3[0-2])|([1-2]?[0-9]))$\")\n    if return_tuple:\n        while not regex_ip_and_mask.match(ip_addr_and_mask):\n            print(\"Not a good IP and CIDR mask combo.\")\n            print(\"Please try again.\")\n            ip_addr_and_mask = input(\"Please enter a IP address and mask in the follwing format x.x.x.x/x: \")\n        ip_cidr_split = ip_addr_and_mask.split(\"/\")\n        ip_addr = ip_cidr_split[0]\n        cidr = ip_cidr_split[1]\n        return ip_addr, cidr\n    elif not return_tuple:\n        if not regex_ip_and_mask.match(ip_addr_and_mask):\n            return False\n        else:\n            return True", "docstring": "Function to check if a address and CIDR mask is good\nArgs:\nip_addr_and_mask: IP address and mask in the following format 192.168.1.1/24\nreturn_tuple: Set to True it returns a IP and mask in a tuple, set to False returns True or False\n\nReturns: see return_tuple for return options", "source": "juraj-google-style"}
{"code": "def set_weights(self, weights):\n    params = self.weights\n    if len(params) != len(weights):\n        raise ValueError('Length of the specified weight list (' + str(len(weights)) + ') does not match the number of weights of the optimizer (' + str(len(params)) + ')')\n    weight_value_tuples = []\n    param_values = backend.batch_get_value(params)\n    for pv, p, w in zip(param_values, params, weights):\n        if pv.shape != w.shape:\n            raise ValueError('Optimizer weight shape ' + str(pv.shape) + ' not compatible with provided weight shape ' + str(w.shape))\n        weight_value_tuples.append((p, w))\n    backend.batch_set_value(weight_value_tuples)", "docstring": "Sets the weights of the optimizer, from Numpy arrays.\n\nShould only be called after computing the gradients\n(otherwise the optimizer has no weights).\n\nArgs:\nweights: a list of Numpy arrays. The number of arrays and their shape\nmust match number of the dimensions of the weights of the optimizer\n(i.e. it should match the output of `get_weights`).\n\nRaises:\nValueError: in case of incompatible weight shapes.", "source": "github-repos"}
{"code": "def get_inst_info(qry_string):\n    \n    qry_prefix = \"EC2C.describe_instances(\"\n    qry_real = qry_prefix + qry_string + \")\"\n    qry_results = eval(qry_real)     \n    return qry_results", "docstring": "Get details for instances that match the qry_string.\n\nExecute a query against the AWS EC2 client object, that is\nbased on the contents of qry_string.\n\nArgs:\nqry_string (str): the query to be used against the aws ec2 client.\nReturns:\nqry_results (dict): raw information returned from AWS.", "source": "juraj-google-style"}
{"code": "def is_scalar(value):\n    return (np.isscalar(value) or (isinstance(value, np.ndarray) and (len(np.squeeze(value).shape) == 0)))", "docstring": "Test if the given value is a scalar.\n\nThis function also works with memory mapped array values, in contrast to the numpy is_scalar method.\n\nArgs:\nvalue: the value to test for being a scalar value\n\nReturns:\nboolean: if the given value is a scalar or not", "source": "codesearchnet"}
{"code": "def _obj_to_path(obj):\n  \n  if obj is None:\n    return obj\n\n  if inspect.isclass(obj) or inspect.isfunction(obj):\n    fetched = getattr(sys.modules[obj.__module__], obj.__name__, None)\n    if fetched is None:\n      raise ValueError(\n          \"Object %r must be defined on the top level of a module.\" % obj)\n    return \"%s.%s\" % (obj.__module__, obj.__name__)\n  raise TypeError(\"Unexpected type %s.\" % type(obj))", "docstring": "Returns the fully qualified path to the object.\n\nArgs:\nobj: obj must be a new style top level class, or a top level function.\nNo inner function or static method.\n\nReturns:\nFully qualified path to the object.\n\nRaises:\nTypeError: when argument obj has unsupported type.\nValueError: when obj can't be discovered on the top level.", "source": "juraj-google-style"}
{"code": "def get_sequence_length_feature_key_name_from_feature_key_name(feature_name):\n    return feature_name + _SEQUENCE_FEATURE_LENGTH_POSTFIX", "docstring": "Gets the name of the sequence length feature from that of the base feature.\n\nArgs:\nfeature_name: The feature key of a sequence column.\n\nReturns:\nA string which is the feature key for the associated feature length column.", "source": "github-repos"}
{"code": "def _output_dir(\n        self,\n        ext,\n        is_instance=False,\n        interpolatable=False,\n        autohinted=False,\n        is_variable=False,\n    ):\n        \n\n        assert not (is_variable and any([is_instance, interpolatable]))\n        \n        if is_variable:\n            dir_prefix = \"variable_\"\n        elif is_instance:\n            dir_prefix = \"instance_\"\n        else:\n            dir_prefix = \"master_\"\n        dir_suffix = \"_interpolatable\" if interpolatable else \"\"\n        output_dir = dir_prefix + ext + dir_suffix\n        if autohinted:\n            output_dir = os.path.join(\"autohinted\", output_dir)\n        return output_dir", "docstring": "Generate an output directory.\n\nArgs:\next: extension string.\nis_instance: The output is instance font or not.\ninterpolatable: The output is interpolatable or not.\nautohinted: The output is autohinted or not.\nis_variable: The output is variable font or not.\nReturn:\noutput directory string.", "source": "juraj-google-style"}
{"code": "def convert_elementwise_sub(\n    params, w_name, scope_name, inputs, layers, weights, names\n):\n    \n    print('Converting elementwise_sub ...')\n    model0 = layers[inputs[0]]\n    model1 = layers[inputs[1]]\n\n    if names == 'short':\n        tf_name = 'S' + random_string(7)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    sub = keras.layers.Subtract(name=tf_name)\n    layers[scope_name] = sub([model0, model1])", "docstring": "Convert elementwise subtraction.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def _flag_value_as_list(self, wanted_flag_name):\n    string_value_list = []\n    found, flag_value = self.get_flag_value(wanted_flag_name)\n    if found:\n        assert flag_value is not None\n        string_value_list = flag_value.split(',')\n    return string_value_list", "docstring": "Returns the string list of a TensorTracer flag.\n\nArgs:\nwanted_flag_name: the name of the flag we are looking for.\n\nReturns:\nThe list value of the flag.", "source": "github-repos"}
{"code": "def map_shape_structure(func, structure):\n    return tree_impl.map_shape_structure(func, structure)", "docstring": "Variant of keras.tree.map_structure that operates on shape tuples.\n\nTuples containing ints and Nones are considered shapes and passed to `func`.\n\nArgs:\nstructure: Arbitrarily nested structure.\n\nReturns:\nThe same structure with `func` applied.", "source": "github-repos"}
{"code": "def detect_response_encoding(response, is_html=False, peek=131072):\n    \n    encoding = get_heading_encoding(response)\n\n    encoding = wpull.string.detect_encoding(\n        wpull.util.peek_file(response.body, peek), encoding=encoding, is_html=is_html\n    )\n\n    _logger.debug(__('Got encoding: {0}', encoding))\n\n    return encoding", "docstring": "Return the likely encoding of the response document.\n\nArgs:\nresponse (Response): An instance of :class:`.http.Response`.\nis_html (bool): See :func:`.util.detect_encoding`.\npeek (int): The maximum number of bytes of the document to be analyzed.\n\nReturns:\n``str``, ``None``: The codec name.", "source": "juraj-google-style"}
{"code": "def _build_case(branch_index, branch_graphs, branch_inputs, name=None, lower_using_switch_merge=None):\n    _make_indexed_slices_indices_types_match(_CASE, branch_graphs)\n    _check_same_outputs(_CASE, branch_graphs)\n    case_inputs = _make_inputs_match(branch_graphs, branch_inputs)\n    stateful_ops = []\n    for bg in branch_graphs:\n        stateful_ops.extend([op for op in bg.get_operations() if auto_control_deps.op_is_stateful(op)])\n    if stateful_ops:\n        op_fn = gen_functional_ops.case\n    else:\n        op_fn = gen_functional_ops.stateless_case\n    with ops.control_dependencies(sum((list(bg.function_captures.control) for bg in branch_graphs), [])):\n\n        def _make_op(inputs):\n            case_op, tensors = util.get_op_and_outputs(op_fn(branch_index, inputs, [t.dtype for t in branch_graphs[0].outputs], [util.create_new_tf_function(g) for g in branch_graphs], output_shapes=_get_output_shapes(*[g.outputs for g in branch_graphs]), name=name))\n            _copy_handle_data(tensors, *[g.outputs for g in branch_graphs])\n            if case_op is not None:\n                util.maybe_set_lowering_attr(case_op, lower_using_switch_merge)\n                util.maybe_propagate_compile_time_consts_in_xla(case_op)\n                _set_read_only_resource_inputs_attr(case_op, branch_graphs)\n                case_op.graph.prevent_fetching(case_op)\n                for i, bg in enumerate(branch_graphs):\n                    bg.outer_graph = ops.get_default_graph()\n                    setattr(case_op, '_branch_graph_{}'.format(i), bg)\n            return tensors\n        tensors = util.run_as_function_for_tape_gradients(_make_op, case_inputs)\n    tensors = [array_ops.identity(t) for t in tensors]\n    return _pack_sequence_as(branch_graphs[0].structured_outputs, tensors)", "docstring": "Creates an `Case` op from `branch_index`, branch graphs and inputs.\n\nNote that this modifies `branch_graphs` to make the inputs match, and to\noutput all intermediates values so they're available for the gradient\ncomputation.\n\n`branch_graphs` need not have the same input types, but they must\nhave the same output types.\n\nArgs:\nbranch_index: integer Tensor\nbranch_graphs: List of FuncGraph\nbranch_inputs: List of lists of Tensors to be passed to corresponding\nbranch_graph as input.\nname: the name for the Case op.\nlower_using_switch_merge: Lower this op using switch merge ops (optional).\n\nReturns:\nA list of Tensors which are the outputs of the Case op. Does not include\nadded intermediate outputs.", "source": "github-repos"}
{"code": "def Delete(self, request, global_params=None):\n    config = self.GetMethodConfig('Delete')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Delete an association between a GCP project and a GitHub Enterprise server.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsGithubEnterpriseConfigsDeleteRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def Process(self, parser_mediator, plist_name, top_level, **kwargs):\n    \n    if not plist_name.startswith(self.PLIST_PATH):\n      raise errors.WrongPlistPlugin(self.NAME, plist_name)\n    super(AppleAccountPlugin, self).Process(\n        parser_mediator, plist_name=self.PLIST_PATH, top_level=top_level)", "docstring": "Check if it is a valid Apple account plist file name.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nplist_name (str): name of the plist.\ntop_level (dict[str, object]): plist top-level key.", "source": "juraj-google-style"}
{"code": "def _parse_ospf_process_id(self, config):\n    match = re.search('^router ospf (\\\\d+)', config)\n    return dict(ospf_process_id=int(match.group(1)))", "docstring": "Parses config file for the OSPF proc ID\n\nArgs:\nconfig(str):  Running configuration\nReturns:\ndict: key: ospf_process_id (int)", "source": "codesearchnet"}
{"code": "def __init__(self, action, *, payload=None):\n        \n        self.action = action\n        self.payload = payload if payload is not None else {}\n        self.uid = uuid.uuid4()", "docstring": "Initialise the request object.\n\nArgs:\naction (str): A string representing the requested action that should be\nexecuted by the server.\npayload (dict): A dictionary with data that is available to the action.", "source": "juraj-google-style"}
{"code": "def replace_symbols(text, form='NFKD', excluded=None, replacement=''):\n    if (excluded is None):\n        excluded = set()\n    categories = set(['Mn', 'Sc', 'Sk', 'Sm', 'So'])\n    return ''.join(((c if ((unicodedata.category(c) not in categories) or (c in excluded)) else replacement) for c in unicodedata.normalize(form, text)))", "docstring": "Replace symbols in text.\n\nRemoves symbols from input text or replaces them with a\nstring if specified.\n\nArgs:\ntext: The text to be processed.\nform: Unicode form.\nexcluded: Set of unicode characters to exclude.\nreplacement: New text that will replace symbols.\n\nReturns:\nThe text without symbols.", "source": "codesearchnet"}
{"code": "def pop(self, name, defval=None):\n    valu = self.info.pop(name, defval)\n    lkey = (self.pref + name.encode('utf8'))\n    self.slab.pop(lkey, db=self.db)\n    return valu", "docstring": "Pop a name from the SlabDict.\n\nArgs:\nname (str): The name to remove.\ndefval (obj): The default value to return if the name is not present.\n\nReturns:\nobject: The object stored in the SlabDict, or defval if the object was not present.", "source": "codesearchnet"}
{"code": "def _embedding_dim(vocab_size):\n    if ((not vocab_size) or (vocab_size <= 0)):\n        raise ValueError(('Invalid vocab_size %g.' % vocab_size))\n    return int(round((6.0 * math.sqrt(math.sqrt(vocab_size)))))", "docstring": "Calculate a reasonable embedding size for a vocabulary.\n\nRule of thumb is 6 * 4th root of vocab_size.\n\nArgs:\nvocab_size: Size of the input vocabulary.\nReturns:\nThe embedding size to use.\nRaises:\nValueError: if `vocab_size` is invalid.", "source": "codesearchnet"}
{"code": "def mlir_sparsify(input_data_str):\n    return wrap_converter.wrapped_experimental_mlir_sparsify(input_data_str)", "docstring": "Sparsify `input_data_str` to encode sparse tensor with proper format.\n\nArgs:\ninput_data_str: Input data in serialized form (e.g. a TFLITE model).\n\nReturns:\nSparsified model in serialized form (e.g. a TFLITE model).", "source": "github-repos"}
{"code": "def _eager_run_fn(fn: PartFn, part: _T) -> AsyncIterable[_T]:\n    q = asyncio.Queue[_T | _FinishedT]()\n\n    async def call_fn():\n        async for c in fn(part):\n            q.put_nowait(c)\n        q.put_nowait(_Finished)\n    context.create_task(call_fn())\n\n    async def result_iter():\n        while (c := (await q.get())) is not _Finished:\n            yield c\n    return result_iter()", "docstring": "Executes fn on part in an asyncio.task.\n\nMust be called called in an async context. It eagerly schedules a task on\nthe event loop to execute the whole of `fn` on the part. Results from the\nAsyncIterable returned by `fn` can be retrieved via the AsyncIterable returned\nby this method.\n\nArgs:\nfn: the part function to execute on the part.\npart: the part to execute the function on.\n\nReturns:\nAn AsyncIterable that can be used to retrieve the results of `fn` on `part`\nin order.\n\nNOTE: this method is non-blocking.", "source": "github-repos"}
{"code": "def trading_dates(start, end, calendar='US'):\n    \n    kw = dict(start=pd.Timestamp(start, tz='UTC').date(), end=pd.Timestamp(end, tz='UTC').date())\n    us_cal = getattr(sys.modules[__name__], f'{calendar}TradingCalendar')()\n    return pd.bdate_range(**kw).drop(us_cal.holidays(**kw))", "docstring": "Trading dates for given exchange\n\nArgs:\nstart: start date\nend: end date\ncalendar: exchange as string\n\nReturns:\npd.DatetimeIndex: datetime index\n\nExamples:\n>>> bus_dates = ['2018-12-24', '2018-12-26', '2018-12-27']\n>>> trd_dates = trading_dates(start='2018-12-23', end='2018-12-27')\n>>> assert len(trd_dates) == len(bus_dates)\n>>> assert pd.Series(trd_dates == pd.DatetimeIndex(bus_dates)).all()", "source": "juraj-google-style"}
{"code": "def unbroadcast_numpy_to(array, shape):\n  \n  axis = create_unbroadcast_axis(shape, numpy.shape(array))\n  return numpy.reshape(numpy.sum(array, axis=axis), shape)", "docstring": "Reverse the broadcasting operation.\n\nArgs:\narray: An array.\nshape: A shape that could have been broadcasted to the shape of array.\n\nReturns:\nArray with dimensions summed to match `shape`.", "source": "juraj-google-style"}
{"code": "def post(fqdn, package, result, entry, bound, ekey, *argl, **argd):\n    global _atdepth_call, _cstack_call\n    _cstack_call.pop()\n    if (len(_cstack_call) == 0):\n        _atdepth_call = False\n    r = _post_call(_atdepth_call, package, fqdn, result, entry, bound, ekey, argl, argd)\n    return r", "docstring": "Adds logging for the post-call result of calling the method externally.\n\nArgs:\nfqdn (str): fully-qualified domain name of the function being logged.\npackage (str): name of the package we are logging for. Usually the first\nelement of `fqdn.split('.')`.\nresult: returned from calling the method we are logging.\nentry (dict): one of the values returned by :func:`pre`.\nbound (bool): true if the method is bound.\nekey (str): key under which to store the entry in the database.", "source": "codesearchnet"}
{"code": "def earliest_date(dates, full_date=False):\n    \n    min_date = min(PartialDate.loads(date) for date in dates)\n    if not min_date.month and full_date:\n        min_date.month = 1\n    if not min_date.day and full_date:\n        min_date.day = 1\n    return min_date.dumps()", "docstring": "Return the earliest among the schema-compliant dates.\n\nThis is a convenience wrapper around :ref:`PartialDate`, which should be\nused instead if more features are needed.\n\nArgs:\ndates(list): List of dates from which oldest/earliest one will be returned\nfull_date(bool): Adds month and/or day as \"01\" if they are missing\nReturns:\nstr: Earliest date from provided list", "source": "juraj-google-style"}
{"code": "def get_pattern_step_time(self, patternnumber, stepnumber):\n        \n        _checkPatternNumber(patternnumber)\n        _checkStepNumber(stepnumber)\n        \n        address = _calculateRegisterAddress('time', patternnumber, stepnumber)\n        return self.read_register(address, 0)", "docstring": "Get the step time.\n\nArgs:\n* patternnumber (integer): 0-7\n* stepnumber (integer): 0-7\n\nReturns:\nThe step time (int??).", "source": "juraj-google-style"}
{"code": "def assemble(cls, header_json, metadata_json, content_json):\n        \n\n        try:\n            header = json_decode(header_json)\n        except ValueError:\n            raise MessageError(\"header could not be decoded\")\n\n        try:\n            metadata = json_decode(metadata_json)\n        except ValueError:\n            raise MessageError(\"metadata could not be decoded\")\n\n        try:\n            content = json_decode(content_json)\n        except ValueError:\n            raise MessageError(\"content could not be decoded\")\n\n        msg = cls(header, metadata, content)\n\n        msg._header_json = header_json\n        msg._metadata_json = metadata_json\n        msg._content_json = content_json\n\n        return msg", "docstring": "Creates a new message, assembled from JSON fragments.\n\nArgs:\nheader_json (``JSON``) :\n\nmetadata_json (``JSON``) :\n\ncontent_json (``JSON``) :\n\nReturns:\nMessage subclass\n\nRaises:\nMessageError", "source": "juraj-google-style"}
{"code": "def _remove_squeezable_dimensions(labels, predictions, weights=None, expected_rank_diff=0):\n    labels, predictions = confusion_matrix.remove_squeezable_dimensions(labels, predictions, expected_rank_diff=expected_rank_diff)\n    if weights is not None:\n        weights = ops.convert_to_tensor(weights)\n        labels_rank = labels.get_shape().ndims\n        weights_shape = weights.get_shape()\n        weights_rank = weights_shape.ndims\n        if labels_rank is not None and weights_rank is not None:\n            rank_diff = weights_rank - labels_rank\n            if rank_diff == 1:\n                weights = array_ops.squeeze(weights, [-1])\n            return (labels, predictions, weights)\n        rank_diff = array_ops.rank(weights) - array_ops.rank(labels)\n        if weights_rank is None or (weights_rank > 0 and weights_shape.dims[-1].is_compatible_with(1)):\n            weights = cond.cond(math_ops.equal(1, rank_diff), lambda: array_ops.squeeze(weights, [-1]), lambda: weights)\n    return (labels, predictions, weights)", "docstring": "Internal version of _remove_squeezable_dimensions which handles weights.\n\nSqueezes `predictions` and `labels` if their ranks differ from expected by\nexactly 1.\nSqueezes `weights` if its rank is 1 more than the new rank of `predictions`\n\nThis will use static shape if available. Otherwise, it will add graph\noperations, which could result in a performance hit.\n\nArgs:\nlabels: Label values, a `Tensor` whose dimensions match `predictions`.\npredictions: Predicted values, a `Tensor` of arbitrary dimensions.\nweights: Optional weight `Tensor`. It will be squeezed if it's not scalar,\nand its rank is 1 more than the new rank of `labels`.\nexpected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.\n\nReturns:\nTuple of `predictions`, `labels` and `weights`, possibly with the last\ndimension squeezed.", "source": "github-repos"}
{"code": "def fleet_id_to_slug(did):\n    \n\n    try:\n        fleet_slug = IOTileFleetSlug(did)\n    except ValueError:\n        raise ArgumentError(\"Unable to recognize {} as a fleet id\".format(did))\n\n    return str(fleet_slug)", "docstring": "Converts a fleet id into a correct fleet slug.\n\nArgs:\ndid (long) : A fleet id\ndid (string) : A device slug in the form of XXXX, XXXX-XXXX-XXXX, g--XXXX, g--XXXX-XXXX-XXXX\nReturns:\nstr: The device slug in the g--XXXX-XXXX-XXX format\nRaises:\nArgumentError: if the ID is not in the [1, 16**12] range, or if not a valid string", "source": "juraj-google-style"}
{"code": "def partitions_for_topic(self, topic):\n        \n        if topic not in self._partitions:\n            return None\n        return set(self._partitions[topic].keys())", "docstring": "Return set of all partitions for topic (whether available or not)\n\nArguments:\ntopic (str): topic to check for partitions\n\nReturns:\nset: {partition (int), ...}", "source": "juraj-google-style"}
{"code": "def _get_fields(mcs, bases, namespace):\n    fields = [(name, namespace.pop(name)) for (name, attribute) in list(namespace.items()) if isinstance(attribute, BaseField)]\n    for base in reversed(bases):\n        if hasattr(base, mcs._fields_storage_key):\n            fields = (list(getattr(base, mcs._fields_storage_key).items()) + fields)\n    return OrderedDict(fields)", "docstring": "Create fields dictionary to be used in resource class namespace.\n\nPop all field objects from attributes dict (namespace) and store them\nunder _field_storage_key atrribute. Also collect all fields from base\nclasses in order that ensures fields can be overriden.\n\nArgs:\nbases: all base classes of created serializer class\nnamespace (dict): namespace as dictionary of attributes", "source": "codesearchnet"}
{"code": "def _load_chunk(dat_path, cat_path, info_path):\n    dat_array = read_binary_matrix(dat_path)\n    dat_array = np.expand_dims(dat_array, (- 1))\n    cat_array = read_binary_matrix(cat_path)\n    info_array = read_binary_matrix(info_path)\n    info_array = np.copy(info_array)\n    info_array[(:, 2)] = (info_array[(:, 2)] / 2)\n    return (dat_array, cat_array, info_array)", "docstring": "Loads a data chunk as specified by the paths.\n\nArgs:\ndat_path: Path to dat file of the chunk.\ncat_path: Path to cat file of the chunk.\ninfo_path: Path to info file of the chunk.\n\nReturns:\nTuple with the dat, cat, info_arrays.", "source": "codesearchnet"}
{"code": "def plogdet(K):\n    r\n    egvals = eigvalsh(K)\n    return npsum(log(egvals[egvals > epsilon]))", "docstring": "r\"\"\"Log of the pseudo-determinant.\n\nIt assumes that ``K`` is a positive semi-definite matrix.\n\nArgs:\nK (array_like): matrix.\n\nReturns:\nfloat: log of the pseudo-determinant.", "source": "juraj-google-style"}
{"code": "def make_fake_movie(nframes, mask_shape=(64, 64), mask_center=None, bg_intensity=0.1, mask_sigma=10, dt=0.02, rate=1.0, tau=1.0, sigma=0.001, seed=None):\n    gen = np.random.RandomState(seed)\n    n = gen.poisson((rate * dt), size=nframes)\n    gamma = np.exp(((- dt) / tau))\n    c = signal.lfilter(np.r_[1], np.r_[(1, (- gamma))], n, axis=0)\n    (nr, nc) = mask_shape\n    npix = (nr * nc)\n    if (mask_center is None):\n        mask_center = ((nc \n    (a, b) = mask_center\n    (y, x) = np.ogrid[(:nr, :nc)]\n    xs = ((x - a) ** 2.0)\n    ys = ((y - b) ** 2.0)\n    twoss = (2.0 * (mask_sigma ** 2.0))\n    alpha = np.exp(((- 1) * ((xs / twoss) + (ys / twoss)))).ravel()\n    alpha /= alpha.sum()\n    beta = (gen.randn(npix) * bg_intensity)\n    lamb = rate\n    epsilon = (gen.randn(npix, nframes) * sigma)\n    F = (((c[(None, :)] * alpha[(:, None)]) + beta[(:, None)]) + epsilon)\n    theta = (sigma, alpha, beta, lamb, gamma)\n    return (F, c, n, theta)", "docstring": "Generate 2D fake fluorescence movie\n\nArguments:\n---------------------------------------------------------------------------\nnframes:        number of timebins to simulate\nmask_shape:     tuple (nrows, ncols), shape of a single movie frame\nmask_center:    tuple (x, y), pixel coords of cell center\nbg_intensity:   scalar, amplitude of (static) baseline fluorescence\nmask_sigma:     scalar, standard deviation of Gaussian mask\ndt:             timestep (s)\nrate:           mean spike rate (Hz)\ntau:            time constant of decay in calcium concentration (s)\nsigma:          SD of additive noise on fluorescence\nseed:           Seed for RNG\n\nReturns:\n---------------------------------------------------------------------------\nF:          fluorescence [npixels, nframes]\nc:          calcium concentration [nframes,]\nn:          spike train [nframes,]\ntheta:      tuple of true model parameters:\n(sigma, alpha, beta, lambda, gamma)", "source": "codesearchnet"}
{"code": "def single_slice_dim(self, shape):\n    if not isinstance(shape, (tuple, list)):\n        raise TypeError('`shape` must be a sequence (like tuple or list) instead of ' + type(shape).__name__)\n    if len(shape) != len(self.full_shape):\n        raise ValueError('Expected equal length, but received shape={} of length {} while self.full_shape={} is of length {}.'.format(shape, len(shape), self.full_shape, len(self.full_shape)))\n    for i in range(len(shape)):\n        if self.var_offset[i] + shape[i] > self.full_shape[i]:\n            raise ValueError('With self.var_offset={}, a partition of shape={} would exceed self.full_shape={} in dimension {}.'.format(self.var_offset, shape, self.full_shape, i))\n    slice_dim = None\n    for i in range(len(shape)):\n        if shape[i] == self.full_shape[i]:\n            continue\n        if slice_dim is not None:\n            raise ValueError('Cannot use single_slice_dim() with shape={} and self.full_shape={} since slice dim could be either dimension {} or {}.'.format(shape, self.full_shape, i, slice_dim))\n        slice_dim = i\n    return slice_dim", "docstring": "Returns the slice dim when the variable is partitioned only in one dim.\n\nArgs:\nshape: Tuple or list of `int` indicating the shape of one specific\nvariable partition.\n\nReturns:\n`int` representing the dimension that the variable is partitioned in, or\n`None` if the variable doesn't seem to be partitioned at all.\n\nRaises:\nTypeError: If `shape` is not a sequence.\nValueError: If `shape` is not the same length as `self.full_shape`. If\nthe variable is partitioned in more than one dimension.", "source": "github-repos"}
{"code": "def __setitem__(self, key: Union[str, int], value: Any) -> None:\n    if not hasattr(self, '_sym_parent'):\n        return\n    if base.treats_as_sealed(self):\n        raise base.WritePermissionError(self._error_message('Cannot modify field of a sealed Dict.'))\n    if not base.writtable_via_accessors(self):\n        raise base.WritePermissionError(self._error_message(\"Cannot modify Dict field by attribute or key while accessor_writable is set to False. Use 'rebind' method instead.\"))\n    update = self._set_item_without_permission_check(key, value)\n    if flags.is_change_notification_enabled() and update:\n        self._notify_field_updates([update])", "docstring": "Set item in this Dict.\n\nArgs:\nkey: String key. (Please be noted that key path is not supported.)\nvalue: Value to be inserted.\n\nRaises:\nWritePermissionError: when Dict cannot be modified by accessor or\nis sealed.\nKeyError: Key is not allowed according to the value spec.\nValueError: Value is not acceptable according to the value spec.", "source": "github-repos"}
{"code": "def put(self, dash_id=0):\n    data = request.get_json()\n    updated = self._update_dash(dash_id, data)\n    return build_response(dict(data=updated, code=200))", "docstring": "Update a dash meta and content, return updated dash content.\n\nArgs:\ndash_id: dashboard id.\n\nReturns:\nA dict containing the updated content of that dashboard, not include the meta info.", "source": "codesearchnet"}
{"code": "def get_student_current_grades(self, username, course_ids=None):\n    if (course_ids is None):\n        enrollments_client = CourseEnrollments(self.requester, self.base_url)\n        enrollments = enrollments_client.get_student_enrollments()\n        course_ids = list(enrollments.get_enrolled_course_ids())\n    all_current_grades = []\n    for course_id in course_ids:\n        try:\n            all_current_grades.append(self.get_student_current_grade(username, course_id))\n        except HTTPError as error:\n            if (error.response.status_code >= 500):\n                raise\n    return CurrentGradesByUser(all_current_grades)", "docstring": "Returns a CurrentGradesByUser object with the user current grades.\n\nArgs:\nusername (str): an edx user's username\ncourse_ids (list): a list of edX course ids.\n\nReturns:\nCurrentGradesByUser: object representing the student current grades", "source": "codesearchnet"}
{"code": "def _construct_location_to_filter_list(match_query):\n    location_to_filters = {}\n    for match_traversal in match_query.match_traversals:\n        for match_step in match_traversal:\n            current_filter = match_step.where_block\n            if (current_filter is not None):\n                current_location = match_step.as_block.location\n                location_to_filters.setdefault(current_location, []).append(current_filter)\n    return location_to_filters", "docstring": "Return a dict mapping location -> list of filters applied at that location.\n\nArgs:\nmatch_query: MatchQuery object from which to extract location -> filters dict\n\nReturns:\ndict mapping each location in match_query to a list of\nFilter objects applied at that location", "source": "codesearchnet"}
{"code": "def remove_all_servers(self):\n    for server_id in list(self._servers.keys()):\n        self.remove_server(server_id)", "docstring": "Remove all registered WBEM servers from the subscription manager. This\nalso unregisters listeners from these servers and removes all owned\nindication subscriptions, owned indication filters, and owned listener\ndestinations.\n\nRaises:\n\nExceptions raised by :class:`~pywbem.WBEMConnection`.", "source": "codesearchnet"}
{"code": "def humanize_time_delta(sec):\n    if (sec < 0):\n        logger.warn('humanize_time_delta() obtains negative seconds!')\n        return '{:.3g} seconds'.format(sec)\n    if (sec == 0):\n        return '0 second'\n    time = (datetime(2000, 1, 1) + timedelta(seconds=int(sec)))\n    units = ['day', 'hour', 'minute', 'second']\n    vals = [int((sec \n    if (sec < 60):\n        vals[(- 1)] = sec\n\n    def _format(v, u):\n        return '{:.3g} {}{}'.format(v, u, ('s' if (v > 1) else ''))\n    ans = []\n    for (v, u) in zip(vals, units):\n        if (v > 0):\n            ans.append(_format(v, u))\n    return ' '.join(ans)", "docstring": "Humanize timedelta given in seconds\n\nArgs:\nsec (float): time difference in seconds. Must be positive.\n\nReturns:\nstr - time difference as a readable string\n\nExample:\n\n.. code-block:: python\n\nprint(humanize_time_delta(1))                                   # 1 second\nprint(humanize_time_delta(60 + 1))                              # 1 minute 1 second\nprint(humanize_time_delta(87.6))                                # 1 minute 27 seconds\nprint(humanize_time_delta(0.01))                                # 0.01 seconds\nprint(humanize_time_delta(60 * 60 + 1))                         # 1 hour 1 second\nprint(humanize_time_delta(60 * 60 * 24 + 1))                    # 1 day 1 second\nprint(humanize_time_delta(60 * 60 * 24 + 60 * 2 + 60*60*9 + 3)) # 1 day 9 hours 2 minutes 3 seconds", "source": "codesearchnet"}
{"code": "def delete_nsg_rule(access_token, subscription_id, resource_group, nsg_name, nsg_rule_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Network/networkSecurityGroups/', nsg_name,\n                        '/securityRules/', nsg_rule_name,\n                        '?api-version=', NETWORK_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete network security group rule.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nnsg_name (str): Name of the Network Security Group.\nnsg_rule_name (str): Name of the NSG rule.\n\nReturns:\nHTTP response.", "source": "juraj-google-style"}
{"code": "def help_members(obj, use_other=False):\n    import utool as ut\n    attrnames = dir(obj)\n    attr_list = [getattr(obj, attrname) for attrname in attrnames]\n    attr_types = ut.lmap(ut.type_str, map(type, attr_list))\n    (unique_types, groupxs) = ut.group_indices(attr_types)\n    type_to_items = ut.dzip(unique_types, ut.apply_grouping(attr_list, groupxs))\n    type_to_itemname = ut.dzip(unique_types, ut.apply_grouping(attrnames, groupxs))\n    memtypes = ['instancemethod']\n    func_mems = ut.dict_subset(type_to_items, memtypes, [])\n    func_list = ut.flatten(func_mems.values())\n    defsig_list = []\n    num_unbound_args_list = []\n    num_args_list = []\n    for func in func_list:\n        argspec = ut.get_func_argspec(func)\n        args = argspec.args\n        unbound_args = get_unbound_args(argspec)\n        defsig = ut.func_defsig(func)\n        defsig_list.append(defsig)\n        num_unbound_args_list.append(len(unbound_args))\n        num_args_list.append(len(args))\n    group = ut.hierarchical_group_items(defsig_list, [num_unbound_args_list, num_args_list])\n    print(repr(obj))\n    print(ut.repr3(group, strvals=True))\n    if use_other:\n        other_mems = ut.delete_keys(type_to_items.copy(), memtypes)\n        other_mems_attrnames = ut.dict_subset(type_to_itemname, other_mems.keys())\n        named_other_attrs = ut.dict_union_combine(other_mems_attrnames, other_mems, (lambda x, y: list(zip(x, y))))\n        print(ut.repr4(named_other_attrs, nl=2, strvals=True))", "docstring": "r\"\"\"\nInspects members of a class\n\nArgs:\nobj (class or module):\n\nCommandLine:\npython -m utool.util_inspect help_members\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_inspect import *  # NOQA\n>>> import utool as ut\n>>> obj = ut.DynStruct\n>>> result = help_members(obj)\n>>> print(result)", "source": "codesearchnet"}
{"code": "def is_supergroup(self, subgroup):\n        \n        warnings.warn(\"This is not fully functional. Only trivial subsets are \"\n                      \"tested right now. \")\n        return set(subgroup.symmetry_ops).issubset(self.symmetry_ops)", "docstring": "True if this group is a supergroup of the supplied group.\n\nArgs:\nsubgroup (SymmetryGroup): Subgroup to test.\n\nReturns:\nTrue if this group is a supergroup of the supplied group.", "source": "juraj-google-style"}
{"code": "def description(self, description):\n    self._data['description'] = description\n    request = self._base_request\n    request['description'] = description\n    return self._tc_requests.update(request, owner=self.owner)", "docstring": "Updates the security labels description.\n\nArgs:\ndescription:", "source": "codesearchnet"}
{"code": "def _exec_procedure_func(self, func, tr_record):\n    func_name = func.__name__\n    procedure_name = func_name[1:] if func_name[0] == '_' else func_name\n    with self._log_test_stage(procedure_name):\n        try:\n            func(copy.deepcopy(tr_record))\n        except signals.TestAbortSignal:\n            raise\n        except Exception as e:\n            logging.exception('Exception happened when executing %s for %s.', procedure_name, self.current_test_info.name)\n            tr_record.add_error(procedure_name, e)", "docstring": "Executes a procedure function like on_pass, on_fail etc.\n\nThis function will alter the 'Result' of the test's record if\nexceptions happened when executing the procedure function, but\nprevents procedure functions from altering test records themselves\nby only passing in a copy.\n\nThis will let signals.TestAbortAll through so abort_all works in all\nprocedure functions.\n\nArgs:\nfunc: The procedure function to be executed.\ntr_record: The TestResultRecord object associated with the test\nexecuted.", "source": "github-repos"}
{"code": "def solid_named(self, name):\n    check.str_param(name, 'name')\n    if (name not in self._solid_dict):\n        raise DagsterInvariantViolationError('Pipeline {pipeline_name} has no solid named {name}.'.format(pipeline_name=self.name, name=name))\n    return self._solid_dict[name]", "docstring": "Return the solid named \"name\". Throws if it does not exist.\n\nArgs:\nname (str): Name of solid\n\nReturns:\nSolidDefinition: SolidDefinition with correct name.", "source": "codesearchnet"}
{"code": "def parse_objective_coefficient(entry):\n    \n    for parameter in entry.kinetic_law_reaction_parameters:\n        pid, name, value, units = parameter\n        if (pid == 'OBJECTIVE_COEFFICIENT' or\n                name == 'OBJECTIVE_COEFFICIENT'):\n            return value\n\n    return None", "docstring": "Return objective value for reaction entry.\n\nDetect objectives that are specified using the non-standardized\nkinetic law parameters which are used by many pre-FBC SBML models. The\nobjective coefficient is returned for the given reaction, or None if\nundefined.\n\nArgs:\nentry: :class:`SBMLReactionEntry`.", "source": "juraj-google-style"}
{"code": "def sample_frame_indices(clip_len, frame_sample_rate, seg_len):\n    converted_len = int(clip_len * frame_sample_rate)\n    end_idx = np.random.randint(converted_len, seg_len)\n    start_idx = end_idx - converted_len\n    indices = np.linspace(start_idx, end_idx, num=clip_len)\n    indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)\n    return indices", "docstring": "Sample a given number of frame indices from the video.\n\nArgs:\nclip_len (`int`): Total number of frames to sample.\nframe_sample_rate (`int`): Sample every n-th frame.\nseg_len (`int`): Maximum allowed index of sample's last frame.\n\nReturns:\nindices (`List[int]`): List of sampled frame indices", "source": "github-repos"}
{"code": "def get_plugin(self, identifier, cls=None):\n    if (((cls is None) or (cls == 'provider')) and (identifier in self.available_providers)):\n        return self.available_providers[identifier]\n    elif (((cls is None) or (cls == 'checker')) and (identifier in self.available_checkers)):\n        return self.available_checkers[identifier]\n    return Config.load_local_plugin(identifier)", "docstring": "Return the plugin corresponding to the given identifier and type.\n\nArgs:\nidentifier (str): identifier of the plugin.\ncls (str): one of checker / provider.\n\nReturns:\nChecker/Provider: plugin class.", "source": "codesearchnet"}
{"code": "def smoothing_cross_entropy_factored(a, b, labels, confidence):\n  \n  num_splits = 16\n  vocab_size = shape_list(b)[0]\n  labels = approximate_split(labels, num_splits)\n  a = approximate_split(a, num_splits)\n  parts = []\n  for part in range(num_splits):\n    with tf.control_dependencies(parts[-1:]):\n      logits = tf.matmul(a[part], b, transpose_b=True)\n      parts.append(\n          smoothing_cross_entropy(logits, labels[part], vocab_size, confidence))\n  return tf.concat(parts, 0)", "docstring": "Memory-efficient computation of smoothing cross-entropy.\n\nAvoids realizing the entire logits matrix at once.\n\nArgs:\na: a Tensor with shape [batch, inner_dim]\nb: a Tensor with shape [vocab_size, inner_dim]\nlabels: an integer Tensor with shape [batch]\nconfidence: a float\n\nReturns:\nA Tensor with shape [batch]", "source": "juraj-google-style"}
{"code": "def _finalize_func(string_handle):\n    iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(string_handle, **self._input_dataset._flat_structure)\n    with ops.control_dependencies([resource_variable_ops.destroy_resource_op(iterator_resource, ignore_lookup_error=True)]):\n        return array_ops.constant(0, dtypes.int64)", "docstring": "Destroys the iterator resource created.\n\nArgs:\nstring_handle: An iterator string handle created by _init_func\nReturns:\nTensor constant 0", "source": "github-repos"}
{"code": "def _add_scalar(self, scalar):\n        \n        encoded = EncodedNumber.encode(self.public_key, scalar,\n                                       max_exponent=self.exponent)\n\n        return self._add_encoded(encoded)", "docstring": "Returns E(a + b), given self=E(a) and b.\n\nArgs:\nscalar: an int or float b, to be added to `self`.\n\nReturns:\nEncryptedNumber: E(a + b), calculated by encrypting b and\ntaking the product of E(a) and E(b) modulo\n:attr:`~PaillierPublicKey.n` ** 2.\n\nRaises:\nValueError: if scalar is out of range or precision.", "source": "juraj-google-style"}
{"code": "def maybe_download_and_extract_dataset(self, data_url, dest_directory):\n    if not data_url:\n        return\n    if not gfile.Exists(dest_directory):\n        os.makedirs(dest_directory)\n    filename = data_url.split('/')[-1]\n    filepath = os.path.join(dest_directory, filename)\n    if not gfile.Exists(filepath):\n\n        def _progress(count, block_size, total_size):\n            sys.stdout.write('\\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0))\n            sys.stdout.flush()\n        try:\n            filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)\n        except:\n            tf.compat.v1.logging.error('Failed to download URL: {0} to folder: {1}. Please make sure you have enough free space and an internet connection'.format(data_url, filepath))\n            raise\n        print()\n        statinfo = os.stat(filepath)\n        tf.compat.v1.logging.info('Successfully downloaded {0} ({1} bytes)'.format(filename, statinfo.st_size))\n        tarfile.open(filepath, 'r:gz').extractall(dest_directory)", "docstring": "Download and extract data set tar file.\n\nIf the data set we're using doesn't already exist, this function\ndownloads it from the TensorFlow.org website and unpacks it into a\ndirectory.\nIf the data_url is none, don't download anything and expect the data\ndirectory to contain the correct files already.\n\nArgs:\ndata_url: Web location of the tar file containing the data set.\ndest_directory: File path to extract data to.", "source": "github-repos"}
{"code": "def _prepare_feed_values(model, inputs, targets, sample_weights, mode):\n    strategy = model._distribution_strategy\n    inputs, targets, sample_weights = _get_input_from_iterator(inputs, model)\n    if backend.is_tpu_strategy(strategy):\n        if sample_weights is not None:\n            raise ValueError('TPUStrategy does not support sample weights.')\n    if isinstance(inputs, dict):\n        inputs = [inputs[key] for key in model._feed_input_names]\n    if is_distributing_by_cloning(model):\n        inputs = flatten_per_replica_values(strategy, inputs)\n        targets = flatten_per_replica_values(strategy, targets)\n        inputs, targets = nest.map_structure(training_utils_v1.standardize_single_array, (inputs, targets))\n    else:\n        inputs = training_utils_v1.ModelInputs(inputs).as_list()\n    if mode == ModeKeys.PREDICT:\n        sample_weights = []\n        targets = []\n    elif sample_weights is not None and is_distributing_by_cloning(model):\n        if context.executing_eagerly() and (not model._compile_distribution):\n            raise NotImplementedError('`sample_weight` is not supported when using tf.distribute.Strategy in eager mode and cloning=True.')\n        sample_weights = flatten_per_replica_values(strategy, sample_weights)\n    ins = [inputs, targets, sample_weights]\n    return tuple(ins)", "docstring": "Prepare feed values to the model execution function.\n\nArgs:\nmodel: Model to prepare feed values for.\ninputs: List or dict of model inputs.\ntargets: Optional list of model targets.\nsample_weights: Optional list of sample weight arrays.\nmode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.\n\nReturns:\nFeed values for the model in the given mode.", "source": "github-repos"}
{"code": "def _get_num_nvidia_gpus():\n    try:\n        return len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))\n    except KeyError:\n        pass\n    try:\n        output = subprocess.check_output(['nvidia-smi', '--list-gpus'], encoding='utf-8')\n        return sum((l.startswith('GPU ') for l in output.strip().split('\\n')))\n    except subprocess.CalledProcessError as e:\n        raise RuntimeError('Could not get number of GPUs from nvidia-smi. Maybe it is missing?\\nOutput: %s' % e.output)", "docstring": "Gets the number of NVIDIA GPUs by using CUDA_VISIBLE_DEVICES and nvidia-smi.\n\nReturns:\nNumber of GPUs available on the node\nRaises:\nRuntimeError if executing nvidia-smi failed", "source": "github-repos"}
{"code": "def _init_volume_service(self, version):\n    volume_cfg = self._load_config_section(CONFIG_VOLUME_SECTION)\n    self._token_volume = volume_cfg[CONFIG_TOKEN]\n    proto = volume_cfg[CONFIG_PROTOCOL]\n    host = volume_cfg[CONFIG_HOST]\n    self._volume = VolumeService(host, version)\n    self._volume.base_protocol = proto\n    self._volume.set_auth(self._token_volume)", "docstring": "Method to initialize the Volume Service from the config data\n\nArgs:\nversion (string): Version of Boss API to use.\n\nReturns:\nNone\n\nRaises:\n(KeyError): if given invalid version.", "source": "codesearchnet"}
{"code": "def post_process_travis_macos(journal_filename):\n    travis_build_dir = os.environ.get('TRAVIS_BUILD_DIR', '')\n    with open(journal_filename, 'r') as file_obj:\n        content = file_obj.read()\n    processed = content.replace(travis_build_dir, '${TRAVIS_BUILD_DIR}')\n    with open(journal_filename, 'w') as file_obj:\n        file_obj.write(processed)", "docstring": "Post-process a generated journal file on Travis macOS.\n\nArgs:\njournal_filename (str): The name of the journal file.", "source": "codesearchnet"}
{"code": "def predict(self, documents, **kwargs):\n        \n        if isinstance(documents, (str, bytes, unicode_, np.unicode_)):\n            return self._predict_one(documents, **kwargs)\n        else:\n            return np.concatenate([self._predict_one(doc, **kwargs) for doc in documents])", "docstring": "Predict class (content=1 or not-content=0) of the blocks in one or many\nHTML document(s).\n\nArgs:\ndocuments (str or List[str]): HTML document(s)\n\nReturns:\n``np.ndarray`` or List[``np.ndarray``]: array of binary predictions\nfor content (1) or not-content (0).", "source": "juraj-google-style"}
{"code": "def handle_subscribe(self, request, path):\n        \n        \n        ret = []\n        if path:\n            \n            name = path[0]\n            if name not in self.children:\n                self.children[name] = NotifierNode(\n                    getattr(self.data, name, None), self)\n            ret += self.children[name].handle_subscribe(request, path[1:])\n        else:\n            \n            serialized = serialize_object(self.data)\n            if request.delta:\n                self.delta_requests.append(request)\n                ret.append(request.delta_response([[[], serialized]]))\n            else:\n                self.update_requests.append(request)\n                ret.append(request.update_response(serialized))\n        return ret", "docstring": "Add to the list of request to notify, and notify the initial value of\nthe data held\n\nArgs:\nrequest (Subscribe): The subscribe request\npath (list): The relative path from ourself\n\nReturns:\nlist: [(callback, Response)] that need to be called", "source": "juraj-google-style"}
{"code": "def format_sec_to_dhm(sec):\n    (rem_int, s_int) = divmod(int(sec), 60)\n    (rem_int, m_int) = divmod(rem_int, 60)\n    (d_int, h_int) = divmod(rem_int, 24)\n    return '{}d{:02d}h{:02d}m'.format(d_int, h_int, m_int)", "docstring": "Format seconds to days, hours, minutes.\n\nArgs:\nsec: float or int\nNumber of seconds in a period of time\n\nReturns:\nPeriod of time represented as a string on the form ``0d:00h:00m``.", "source": "codesearchnet"}
{"code": "def get_data(self, url, *args, **kwargs):\n    res = self._conn.get(url, headers=self._prepare_headers(**kwargs))\n    if (res.status_code == 200):\n        return res.text\n    else:\n        return None", "docstring": "Gets data from url as text\n\nReturns content under the provided url as text\n\nArgs:\n**url**: address of the wanted data\n\n.. versionadded:: 0.3.2\n**additional_headers**: (optional) Additional headers\nto be used with request\n\nReturns:\nstring", "source": "codesearchnet"}
{"code": "def __init__(self, root, attached_dependencies=None):\n    trackable_view.TrackableView.__init__(self, root)\n    self._root_ref = root if isinstance(root, weakref.ref) else weakref.ref(root)\n    self._attached_dependencies = attached_dependencies", "docstring": "Configure the graph view.\n\nArgs:\nroot: A `Trackable` object whose variables (including the variables of\ndependencies, recursively) should be saved. May be a weak reference.\nattached_dependencies: List of dependencies to attach to the root object.\nUsed when saving a Checkpoint with a defined root object. To avoid\nreference cycles, this should use the WeakTrackableReference class.", "source": "github-repos"}
{"code": "def set_defaults(self, defaults: Sequence[cfg.Variable]) -> 'PyTDSignature':\n    defaults = list(defaults)\n    params = []\n    for param in reversed(self.pytd_sig.params):\n        if defaults:\n            defaults.pop()\n            params.append(pytd.Parameter(name=param.name, type=param.type, kind=param.kind, optional=True, mutated_type=param.mutated_type))\n        else:\n            params.append(pytd.Parameter(name=param.name, type=param.type, kind=param.kind, optional=False, mutated_type=param.mutated_type))\n    new_sig = pytd.Signature(params=tuple(reversed(params)), starargs=self.pytd_sig.starargs, starstarargs=self.pytd_sig.starstarargs, return_type=self.pytd_sig.return_type, exceptions=self.pytd_sig.exceptions, template=self.pytd_sig.template)\n    self.pytd_sig = new_sig\n    self.param_types = [self.ctx.convert.constant_to_value(p.type, subst=datatypes.AliasingDict(), node=self.ctx.root_node) for p in self.pytd_sig.params]\n    self.signature = function.Signature.from_pytd(self.ctx, self.name, self.pytd_sig)\n    return self", "docstring": "Set signature's default arguments. Requires rebuilding PyTD signature.\n\nArgs:\ndefaults: An iterable of function argument defaults.\n\nReturns:\nSelf with an updated signature.", "source": "github-repos"}
{"code": "def add_roles(self, databaseName, roleNames, collectionName=None):\n        \n        for roleName in roleNames:\n            self.add_role(databaseName, roleName, collectionName)", "docstring": "Add multiple roles\n\nArgs:\ndatabaseName (str): Database Name\nroleNames (list of RoleSpecs): roles\n\nKeyword Args:\ncollectionName (str): Collection\n\nRaises:\nErrRoleException: role not compatible with the databaseName and/or collectionName", "source": "juraj-google-style"}
{"code": "def PrintMessage(self, message):\n    fields = message.ListFields()\n    if self.use_index_order:\n        fields.sort(key=(lambda x: x[0].index))\n    for (field, value) in fields:\n        if _IsMapEntry(field):\n            for key in sorted(value):\n                entry_submsg = field.message_type._concrete_class(key=key, value=value[key])\n                self.PrintField(field, entry_submsg)\n        elif (field.label == descriptor.FieldDescriptor.LABEL_REPEATED):\n            for element in value:\n                self.PrintField(field, element)\n        else:\n            self.PrintField(field, value)", "docstring": "Convert protobuf message to text format.\n\nArgs:\nmessage: The protocol buffers message.", "source": "codesearchnet"}
{"code": "def unsubscribe(self, subscription, max=None):\n    if (max is None):\n        self._send(('UNSUB %d' % subscription.sid))\n        self._subscriptions.pop(subscription.sid)\n    else:\n        subscription.max = max\n        self._send(('UNSUB %d %s' % (subscription.sid, max)))", "docstring": "Unsubscribe will remove interest in the given subject. If max is\nprovided an automatic Unsubscribe that is processed by the server\nwhen max messages have been received\n\nArgs:\nsubscription (pynats.Subscription): a Subscription object\nmax (int=None): number of messages", "source": "codesearchnet"}
{"code": "def describe_file_set(modules):\n    descriptor = FileSet()\n    file_descriptors = []\n    for module in modules:\n        file_descriptors.append(describe_file(module))\n    if file_descriptors:\n        descriptor.files = file_descriptors\n    return descriptor", "docstring": "Build a file set from a specified Python modules.\n\nArgs:\nmodules: Iterable of Python module to describe.\n\nReturns:\nInitialized FileSet instance describing the modules.", "source": "codesearchnet"}
{"code": "def month(self, value=None):\n    if (value is not None):\n        try:\n            value = int(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type int for field `month`'.format(value))\n        if (value < 1):\n            raise ValueError('value need to be greater or equal 1 for field `month`')\n        if (value > 12):\n            raise ValueError('value need to be smaller 12 for field `month`')\n    self._month = value", "docstring": "Corresponds to IDD Field `month`\n\nArgs:\nvalue (int): value for IDD Field `month`\nvalue >= 1\nvalue <= 12\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def defaults(cls, *options, **kwargs):\n    if (kwargs and (len(kwargs) != 1) and (list(kwargs.keys())[0] != 'backend')):\n        raise Exception('opts.defaults only accepts \"backend\" keyword argument')\n    cls._linemagic(cls._expand_options(merge_options_to_dict(options)), backend=kwargs.get('backend'))", "docstring": "Set default options for a session.\n\nSet default options for a session. whether in a Python script or\na Jupyter notebook.\n\nArgs:\n*options: Option objects used to specify the defaults.\nbackend:  The plotting extension the options apply to", "source": "codesearchnet"}
{"code": "def create(cls, hashing_algorithm=HashingAlgorithmEnum.SHA_256, digest_value=b'', key_format_type=KeyFormatTypeEnum.RAW):\n    algorithm = HashingAlgorithm(hashing_algorithm)\n    value = DigestValue(bytearray(digest_value))\n    format_type = KeyFormatType(key_format_type)\n    return Digest(hashing_algorithm=algorithm, digest_value=value, key_format_type=format_type)", "docstring": "Construct a Digest object from provided digest values.\n\nArgs:\nhashing_algorithm (HashingAlgorithm): An enumeration representing\nthe hash algorithm used to compute the digest. Optional,\ndefaults to HashingAlgorithm.SHA_256.\ndigest_value (byte string): The bytes of the digest hash. Optional,\ndefaults to the empty byte string.\nkey_format_type (KeyFormatType): An enumeration representing the\nformat of the key corresponding to the digest. Optional,\ndefaults to KeyFormatType.RAW.\n\nReturns:\nDigest: The newly created Digest.\n\nExample:\n>>> x = Digest.create(HashingAlgorithm.MD5, b'\\x00',\n... KeyFormatType.RAW)\n>>> x.hashing_algorithm\nHashingAlgorithm(value=HashingAlgorithm.MD5)\n>>> x.digest_value\nDigestValue(value=bytearray(b'\\x00'))\n>>> x.key_format_type\nKeyFormatType(value=KeyFormatType.RAW)", "source": "codesearchnet"}
{"code": "def get_kpoint_weights(self, kpoints, atol=1e-5):\n        \n        kpts = np.array(kpoints)\n        shift = []\n        mesh = []\n        for i in range(3):\n            nonzero = [i for i in kpts[:, i] if abs(i) > 1e-5]\n            if len(nonzero) != len(kpts):\n                \n                if not nonzero:\n                    mesh.append(1)\n                else:\n                    m = np.abs(np.round(1/np.array(nonzero)))\n                    mesh.append(int(max(m)))\n                shift.append(0)\n            else:\n                \n                m = np.abs(np.round(0.5/np.array(nonzero)))\n                mesh.append(int(max(m)))\n                shift.append(1)\n\n        mapping, grid = spglib.get_ir_reciprocal_mesh(\n            np.array(mesh), self._cell, is_shift=shift, symprec=self._symprec)\n        mapping = list(mapping)\n        grid = (np.array(grid) + np.array(shift) * (0.5, 0.5, 0.5)) / mesh\n        weights = []\n        mapped = defaultdict(int)\n        for k in kpoints:\n            for i, g in enumerate(grid):\n                if np.allclose(pbc_diff(k, g), (0, 0, 0), atol=atol):\n                    mapped[tuple(g)] += 1\n                    weights.append(mapping.count(mapping[i]))\n                    break\n        if (len(mapped) != len(set(mapping))) or (\n                not all([v == 1 for v in mapped.values()])):\n            raise ValueError(\"Unable to find 1:1 corresponding between input \"\n                             \"kpoints and irreducible grid!\")\n        return [w/sum(weights) for w in weights]", "docstring": "Calculate the weights for a list of kpoints.\n\nArgs:\nkpoints (Sequence): Sequence of kpoints. np.arrays is fine. Note\nthat the code does not check that the list of kpoints\nprovided does not contain duplicates.\natol (float): Tolerance for fractional coordinates comparisons.\n\nReturns:\nList of weights, in the SAME order as kpoints.", "source": "juraj-google-style"}
{"code": "def rotate_view(self, axis_ind=0, angle=0):\n    camera = self.ren.GetActiveCamera()\n    if (axis_ind == 0):\n        camera.Roll(angle)\n    elif (axis_ind == 1):\n        camera.Azimuth(angle)\n    else:\n        camera.Pitch(angle)\n    self.ren_win.Render()", "docstring": "Rotate the camera view.\n\nArgs:\naxis_ind: Index of axis to rotate. Defaults to 0, i.e., a-axis.\nangle: Angle to rotate by. Defaults to 0.", "source": "codesearchnet"}
{"code": "async def get_random_popular_person(self, limit=500):\n        \n        index = random.randrange(limit)\n        data = await self._get_popular_people_page()\n        if data is None:\n            return\n        if index >= len(data['results']):\n            \n            page, index = self._calculate_page_index(index, data)\n            data = await self._get_popular_people_page(page)\n        if data is None:\n            return\n        json_data = data['results'][index]\n        details = await self._get_person_json(json_data['id'])\n        details.update(**json_data)\n        return Person.from_json(details, self.config['data'].get('images'))", "docstring": "Randomly select a popular person.\n\nNotes:\nRequires at least two API calls. May require three API calls\nif the randomly-selected index isn't within the first page of\nrequired data.\n\nArguments:\nlimit (:py:class:`int`, optional): How many of the most\npopular people to make random choice from (defaults to top\n``500``).\n\nReturns:\n:py:class:`~.Person`: A randomly-selected popular person.", "source": "juraj-google-style"}
{"code": "def _Open(self, path_spec=None, mode='rb'):\n    \n    if not self._file_object_set_in_init and not path_spec:\n      raise ValueError('Missing path specification.')\n\n    if self._file_object_set_in_init:\n      return\n\n    self._file_object = self._OpenFileObject(path_spec)\n    if not self._file_object:\n      raise IOError('Unable to open missing file-like object.')", "docstring": "Opens the file-like object defined by path specification.\n\nArgs:\npath_spec (Optional[PathSpec]): path specification.\nmode (Optional[str]): file access mode.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file-like object could not be opened.\nOSError: if the file-like object could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def Artifacts(self, os_name=None, cpe=None, label=None):\n    \n    return [\n        c.artifact for c in self.conditions if c.Artifacts(os_name, cpe, label)\n    ]", "docstring": "Find the artifacts that correspond with other trigger conditions.\n\nArgs:\nos_name: An OS string.\ncpe: A CPE string.\nlabel: A label string.\n\nReturns:\nA list of artifacts to be processed.", "source": "juraj-google-style"}
{"code": "def normal(self, shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, name=None):\n    with ops.name_scope(name, 'stateful_normal', [shape, mean, stddev]) as name:\n        shape = _shape_tensor(shape)\n        mean = ops.convert_to_tensor(mean, dtype=dtype, name='mean')\n        stddev = ops.convert_to_tensor(stddev, dtype=dtype, name='stddev')\n        rnd = self._standard_normal(shape, dtype=dtype)\n        return math_ops.add(rnd * stddev, mean, name=name)", "docstring": "Outputs random values from a normal distribution.\n\nArgs:\nshape: A 1-D integer Tensor or Python array. The shape of the output\ntensor.\nmean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal\ndistribution.\nstddev: A 0-D Tensor or Python value of type `dtype`. The standard\ndeviation of the normal distribution.\ndtype: The type of the output.\nname: A name for the operation (optional).\n\nReturns:\nA tensor of the specified shape filled with random normal values.", "source": "github-repos"}
{"code": "def add_string_parameters(self, string):\n        \n        if isinstance(string, list):\n            for x in string:\n                self.add_string_parameters(x)\n            return\n        self._parameters.append(\"{ \\\"value\\\": \\\"\" + string + \"\\\" }\")", "docstring": "Add given string parameters to the internal list.\n\nArgs:\nstring (list of str or str): A string or list of strings to add to the parameters.", "source": "juraj-google-style"}
{"code": "def __call__(self, y_true, y_pred, sample_weight=None, regularization_losses=None):\n    y_true = self._conform_to_outputs(y_pred, y_true)\n    sample_weight = self._conform_to_outputs(y_pred, sample_weight)\n    if not self._built:\n        self.build(y_pred)\n    y_pred = nest.flatten(y_pred)\n    y_true = nest.flatten(y_true)\n    sample_weight = nest.flatten(sample_weight)\n    loss_values = []\n    loss_metric_values = []\n    batch_dim = None\n    zip_args = (y_true, y_pred, sample_weight, self._losses, self._loss_weights, self._per_output_metrics)\n    for y_t, y_p, sw, loss_obj, loss_weight, metric_obj in zip(*zip_args):\n        if y_t is None or loss_obj is None:\n            continue\n        y_t, y_p, sw = match_dtype_and_rank(y_t, y_p, sw)\n        sw = apply_mask(y_p, sw, get_mask(y_p))\n        loss_value = loss_obj(y_t, y_p, sample_weight=sw)\n        loss_metric_value = loss_value\n        if loss_obj.reduction == losses_utils.ReductionV2.SUM:\n            loss_metric_value *= distribute_lib.get_strategy().num_replicas_in_sync\n        if batch_dim is None:\n            if tf_utils.is_ragged(y_t):\n                batch_dim = y_t.nrows()\n            else:\n                batch_dim = array_ops.shape(y_t)[0]\n        if metric_obj is not None:\n            metric_obj.update_state(loss_metric_value, sample_weight=batch_dim)\n        if loss_weight is not None:\n            loss_value *= loss_weight\n            loss_metric_value *= loss_weight\n        if loss_obj.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE or loss_obj.reduction == losses_utils.ReductionV2.AUTO:\n            loss_value = losses_utils.scale_loss_for_distribution(loss_value)\n        loss_values.append(loss_value)\n        loss_metric_values.append(loss_metric_value)\n    if regularization_losses:\n        regularization_losses = losses_utils.cast_losses_to_common_dtype(regularization_losses)\n        reg_loss = math_ops.add_n(regularization_losses)\n        loss_metric_values.append(reg_loss)\n        loss_values.append(losses_utils.scale_loss_for_distribution(reg_loss))\n    if loss_values:\n        loss_metric_values = losses_utils.cast_losses_to_common_dtype(loss_metric_values)\n        total_loss_metric_value = math_ops.add_n(loss_metric_values)\n        self._loss_metric.update_state(total_loss_metric_value, sample_weight=batch_dim)\n        loss_values = losses_utils.cast_losses_to_common_dtype(loss_values)\n        total_loss = math_ops.add_n(loss_values)\n        return total_loss\n    else:\n        return array_ops.zeros(shape=())", "docstring": "Computes the overall loss.\n\nArgs:\ny_true: An arbitrary structure of Tensors representing the ground truth.\ny_pred: An arbitrary structure of Tensors representing a Model's outputs.\nsample_weight: An arbitrary structure of Tensors representing the\nper-sample loss weights. If one Tensor is passed, it is used for all\nlosses. If multiple Tensors are passed, the structure should match\n`y_pred`.\nregularization_losses: Additional losses to be added to the total loss.\n\nReturns:\nTuple of `(total_loss, per_output_loss_list)`", "source": "github-repos"}
{"code": "def ch_start_time(self, *channels: List[Channel]) -> int:\n        \n        return self.timeslots.ch_start_time(*channels)", "docstring": "Return minimum start time for supplied channels.\n\nArgs:\n*channels: Supplied channels", "source": "juraj-google-style"}
{"code": "def _create_datadict(cls, internal_name):\n    if (internal_name == 'LOCATION'):\n        return Location()\n    if (internal_name == 'DESIGN CONDITIONS'):\n        return DesignConditions()\n    if (internal_name == 'TYPICAL/EXTREME PERIODS'):\n        return TypicalOrExtremePeriods()\n    if (internal_name == 'GROUND TEMPERATURES'):\n        return GroundTemperatures()\n    if (internal_name == 'HOLIDAYS/DAYLIGHT SAVINGS'):\n        return HolidaysOrDaylightSavings()\n    if (internal_name == 'COMMENTS 1'):\n        return Comments1()\n    if (internal_name == 'COMMENTS 2'):\n        return Comments2()\n    if (internal_name == 'DATA PERIODS'):\n        return DataPeriods()\n    raise ValueError('No DataDictionary known for {}'.format(internal_name))", "docstring": "Creates an object depending on `internal_name`\n\nArgs:\ninternal_name (str): IDD name\n\nRaises:\nValueError: if `internal_name` cannot be matched to a data dictionary object", "source": "codesearchnet"}
{"code": "def SetHasherNames(self, hasher_names_string):\n    \n    hasher_names = hashers_manager.HashersManager.GetHasherNamesFromString(\n        hasher_names_string)\n\n    debug_hasher_names = ', '.join(hasher_names)\n    logger.debug('Got hasher names: {0:s}'.format(debug_hasher_names))\n\n    self._hashers = hashers_manager.HashersManager.GetHashers(hasher_names)\n    self._hasher_names_string = hasher_names_string", "docstring": "Sets the hashers that should be enabled.\n\nArgs:\nhasher_names_string (str): comma separated names of hashers to enable.", "source": "juraj-google-style"}
{"code": "def valUserCert(self, byts, cacerts=None):\n        \n        cert = crypto.load_certificate(crypto.FILETYPE_PEM, byts)\n\n        if cacerts is None:\n            cacerts = self.getCaCerts()\n\n        store = crypto.X509Store()\n        [store.add_cert(cacert) for cacert in cacerts]\n\n        ctx = crypto.X509StoreContext(store, cert)\n        ctx.verify_certificate()  \n        return cert", "docstring": "Validate the PEM encoded x509 user certificate bytes and return it.\n\nArgs:\nbyts (bytes): The bytes for the User Certificate.\ncacerts (tuple): A tuple of OpenSSL.crypto.X509 CA Certificates.\n\nRaises:\nOpenSSL.crypto.X509StoreContextError: If the certificate is not valid.\n\nReturns:\nOpenSSL.crypto.X509: The certificate, if it is valid.", "source": "juraj-google-style"}
{"code": "def get(self, client_id, client_secret, code, redirect_uri):\n    check_type(client_id, basestring, may_be_none=False)\n    check_type(client_secret, basestring, may_be_none=False)\n    check_type(code, basestring, may_be_none=False)\n    check_type(redirect_uri, basestring, may_be_none=False)\n    post_data = dict_from_items_with_values(grant_type='authorization_code', client_id=client_id, client_secret=client_secret, code=code, redirect_uri=redirect_uri)\n    response = requests.post(self._endpoint_url, data=post_data, **self._request_kwargs)\n    check_response_code(response, EXPECTED_RESPONSE_CODE['POST'])\n    json_data = extract_and_parse_json(response)\n    return self._object_factory(OBJECT_TYPE, json_data)", "docstring": "Exchange an Authorization Code for an Access Token.\n\nExchange an Authorization Code for an Access Token that can be used to\ninvoke the APIs.\n\nArgs:\nclient_id(basestring): Provided when you created your integration.\nclient_secret(basestring): Provided when you created your\nintegration.\ncode(basestring): The Authorization Code provided by the user\nOAuth process.\nredirect_uri(basestring): The redirect URI used in the user OAuth\nprocess.\n\nReturns:\nAccessToken: An AccessToken object with the access token provided\nby the Webex Teams cloud.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"}
{"code": "def GetVSSStoreIdentifiers(self, volume_system, volume_identifiers):\n    print_header = True\n    while True:\n        if print_header:\n            self._PrintVSSStoreIdentifiersOverview(volume_system, volume_identifiers)\n            print_header = False\n        self._output_writer.Write('\\n')\n        lines = self._textwrapper.wrap(self._USER_PROMPT_VSS)\n        self._output_writer.Write('\\n'.join(lines))\n        self._output_writer.Write('\\n\\nVSS identifier(s): ')\n        try:\n            selected_volumes = self._ReadSelectedVolumes(volume_system, prefix='vss')\n            if ((not selected_volumes) or (not set(selected_volumes).difference(volume_identifiers))):\n                break\n        except ValueError:\n            pass\n        self._output_writer.Write('\\n')\n        lines = self._textwrapper.wrap('Unsupported VSS identifier(s), please try again or abort with Ctrl^C.')\n        self._output_writer.Write('\\n'.join(lines))\n        self._output_writer.Write('\\n\\n')\n    return selected_volumes", "docstring": "Retrieves VSS store identifiers.\n\nThis method can be used to prompt the user to provide VSS store identifiers.\n\nArgs:\nvolume_system (VShadowVolumeSystem): volume system.\nvolume_identifiers (list[str]): volume identifiers including prefix.\n\nReturns:\nlist[str]: selected volume identifiers including prefix or None.", "source": "codesearchnet"}
{"code": "def get_display_name(self, room=None):\n        \n        if room:\n            try:\n                return room.members_displaynames[self.user_id]\n            except KeyError:\n                return self.user_id\n        if not self.displayname:\n            self.displayname = self.api.get_display_name(self.user_id)\n        return self.displayname or self.user_id", "docstring": "Get this user's display name.\n\nArgs:\nroom (Room): Optional. When specified, return the display name of the user\nin this room.\n\nReturns:\nThe display name. Defaults to the user ID if not set.", "source": "juraj-google-style"}
{"code": "def graph_op_digests(self, op_type=None):\n    if op_type is not None:\n        return [digest for digest in self._graph_op_digests if digest.op_type == op_type]\n    else:\n        return self._graph_op_digests", "docstring": "Get the list of the digests for graph-op creation so far.\n\nArgs:\nop_type: Optional op type to filter the creation events with.\n\nReturns:\nA list of `GraphOpCreationDigest` objects.", "source": "github-repos"}
{"code": "def _initialize_memory(self, policy_params):\n    template = (self._batch_env.observ[0], self._batch_env.action[0], tools.nested.map((lambda x: x[(0, 0)]), policy_params), self._batch_env.reward[0])\n    with tf.variable_scope('ppo_temporary'):\n        self._current_episodes = parts.EpisodeMemory(template, len(self._batch_env), self._config.max_length, 'episodes')\n    self._finished_episodes = parts.EpisodeMemory(template, self._config.update_every, self._config.max_length, 'memory')\n    self._num_finished_episodes = tf.Variable(0, False)", "docstring": "Initialize temporary and permanent memory.\n\nArgs:\npolicy_params: Nested tuple of policy parameters with all dimensions set.\n\nInitializes the attributes `self._current_episodes`,\n`self._finished_episodes`, and `self._num_finished_episodes`. The episodes\nmemory serves to collect multiple episodes in parallel. Finished episodes\nare copied into the next free slot of the second memory. The memory index\npoints to the next free slot.", "source": "codesearchnet"}
{"code": "def add_header(self, key, value, **params):\n    key = self.escape(key)\n    ci_key = key.casefold()\n\n    def quoted_params(items):\n        for p in items:\n            param_name = self.escape(p[0])\n            param_val = self.de_quote(self.escape(p[1]))\n            (yield (param_name, param_val))\n    sorted_items = sorted(params.items())\n    quoted_iter = (('%s=\"%s\"' % p) for p in quoted_params(sorted_items))\n    param_str = ' '.join(quoted_iter)\n    if param_str:\n        value = ('%s; %s' % (value, param_str))\n    self._header_data[ci_key] = (key, value)", "docstring": "Add a header to the collection, including potential parameters.\n\nArgs:\nkey (str): The name of the header\nvalue (str): The value to store under that key\nparams: Option parameters to be appended to the value,\nautomatically formatting them in a standard way", "source": "codesearchnet"}
{"code": "def render_chart_data(data):\n    \n    builder = HtmlBuilder()\n    builder._render_objects(data, datatype='chartdata')\n    return builder._to_html()", "docstring": "Return a dictionary list formatted as a HTML table.\n\nArgs:\ndata: data in the form consumed by Google Charts.", "source": "juraj-google-style"}
{"code": "def _create(cls, model_class, *args, **kwargs):\n        \n        manager = cls._get_manager(model_class)\n\n        return manager.create_user(*args, **kwargs)", "docstring": "Create a new user instance.\n\nArgs:\nmodel_class:\nThe type of model to create an instance of.\nargs:\nPositional arguments to create the instance with.\nkwargs:\nKeyword arguments to create the instance with.\n\nReturns:\nA new user instance of the type specified by\n``model_class``.", "source": "juraj-google-style"}
{"code": "def open_image(fn):\n    \n    flags = cv2.IMREAD_UNCHANGED+cv2.IMREAD_ANYDEPTH+cv2.IMREAD_ANYCOLOR\n    if not os.path.exists(fn) and not str(fn).startswith(\"http\"):\n        raise OSError('No such file or directory: {}'.format(fn))\n    elif os.path.isdir(fn) and not str(fn).startswith(\"http\"):\n        raise OSError('Is a directory: {}'.format(fn))\n    elif isdicom(fn):\n        slice = pydicom.read_file(fn)\n        if slice.PhotometricInterpretation.startswith('MONOCHROME'):\n            \n            im = np.stack([slice.pixel_array]*3,-1)\n            return im / ((1 << slice.BitsStored)-1)\n        else:\n            \n            \n            raise OSError('Unsupported DICOM image with PhotometricInterpretation=={}'.format(slice.PhotometricInterpretation))\n    else:\n        \n        \n        \n        try:\n            if str(fn).startswith(\"http\"):\n                req = urllib.urlopen(str(fn))\n                image = np.asarray(bytearray(req.read()), dtype=\"uint8\")\n                im = cv2.imdecode(image, flags).astype(np.float32)/255\n            else:\n                im = cv2.imread(str(fn), flags).astype(np.float32)/255\n            if im is None: raise OSError(f'File not recognized by opencv: {fn}')\n            return cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n        except Exception as e:\n            raise OSError('Error handling image at: {}'.format(fn)) from e", "docstring": "Opens an image using OpenCV given the file path.\n\nArguments:\nfn: the file path of the image\n\nReturns:\nThe image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0", "source": "juraj-google-style"}
{"code": "def get_model_field(model, field_name):\n    \n    meta = model._meta\n    try:\n        if DJANGO19:\n            field = meta.get_field(field_name)\n        else:\n            field = meta.get_field_by_name(field_name)[0]\n        return field\n    except:\n        if DJANGO19:\n            related_objs = (\n                f for f in meta.get_fields()\n                if (f.one_to_many or f.one_to_one)\n                and f.auto_created and not f.concrete\n            )\n            related_m2m_objs = (\n                f for f in meta.get_fields(include_hidden=True)\n                if f.many_to_many and f.auto_created\n            )\n        else:\n            related_objs = meta.get_all_related_objects()\n            related_m2m_objs = meta.get_all_related_many_to_many_objects()\n\n        related_objects = {\n            o.get_accessor_name(): o\n            for o in chain(related_objs, related_m2m_objs)\n        }\n        if field_name in related_objects:\n            return related_objects[field_name]\n        else:\n            \n            if hasattr(meta, 'virtual_fields'):\n                for field in meta.virtual_fields:\n                    if field.name == field_name:\n                        return field\n\n            raise AttributeError(\n                '%s is not a valid field for %s' % (field_name, model)\n            )", "docstring": "Return a field given a model and field name.\n\nArguments:\nmodel: a Django model\nfield_name: the name of a field\n\nReturns:\nA Django field if `field_name` is a valid field for `model`,\nNone otherwise.", "source": "juraj-google-style"}
{"code": "def __init__(self, parent=None):\n        \n        super(SupportedDtypesTranslator, self).__init__(parent)\n\n        \n        self._strs = [(np.dtype(object), self.tr('text'))]\n\n        self._ints = [(np.dtype(np.int8), self.tr('small integer (8 bit)')),\n                      (np.dtype(np.int16), self.tr('small integer (16 bit)')),\n                      (np.dtype(np.int32), self.tr('integer (32 bit)')),\n                      (np.dtype(np.int64), self.tr('integer (64 bit)'))]\n\n        self._uints = [(np.dtype(np.uint8), self.tr('unsigned small integer (8 bit)')),\n                       (np.dtype(np.uint16), self.tr('unsigned small integer (16 bit)')),\n                       (np.dtype(np.uint32), self.tr('unsigned integer (32 bit)')),\n                       (np.dtype(np.uint64), self.tr('unsigned integer (64 bit)'))]\n\n        self._floats = [(np.dtype(np.float16), self.tr('floating point number (16 bit)')),\n                      (np.dtype(np.float32), self.tr('floating point number (32 bit)')),\n                      (np.dtype(np.float64), self.tr('floating point number (64 bit)'))]\n\n        self._datetime = [(np.dtype('<M8[ns]'), self.tr('date and time'))]\n\n        self._bools = [(np.dtype(bool), self.tr('true/false value'))]\n\n        self._all = self._strs + self._ints + self._uints + self._floats + self._bools + self._datetime", "docstring": "Constructs the object with the given parent.\n\nArgs:\nparent (QtCore.QObject, optional): Causes the objected to be owned\nby `parent` instead of Qt. Defaults to `None`.", "source": "juraj-google-style"}
{"code": "def __init__(self, vertex_out, vertex_in, weight=1):\n        \n        self.vertex_out = None\n        self.vertex_in = None\n        self.weight = weight\n        self.go_from(vertex_out)\n        self.go_in(vertex_in)", "docstring": "Initialization method.\n\nArgs:\nvertex_out (Vertex): source vertex (edge going out).\nvertex_in (Vertex): target vertex (edge going in).\nweight (int): weight of the edge.", "source": "juraj-google-style"}
{"code": "def rgb_to_grayscale(images, name=None):\n    with ops.name_scope(name, 'rgb_to_grayscale', [images]) as name:\n        images = ops.convert_to_tensor(images, name='images')\n        orig_dtype = images.dtype\n        flt_image = convert_image_dtype(images, dtypes.float32)\n        rgb_weights = [0.2989, 0.587, 0.114]\n        gray_float = math_ops.tensordot(flt_image, rgb_weights, [-1, -1])\n        gray_float = array_ops.expand_dims(gray_float, -1)\n        return convert_image_dtype(gray_float, orig_dtype, name=name)", "docstring": "Converts one or more images from RGB to Grayscale.\n\nOutputs a tensor of the same `DType` and rank as `images`.  The size of the\nlast dimension of the output is 1, containing the Grayscale value of the\npixels.\n\n>>> original = tf.constant([[[1.0, 2.0, 3.0]]])\n>>> converted = tf.image.rgb_to_grayscale(original)\n>>> print(converted.numpy())\n[[[1.81...]]]\n\nArgs:\nimages: The RGB tensor to convert. The last dimension must have size 3 and\nshould contain RGB values.\nname: A name for the operation (optional).\n\nReturns:\nThe converted grayscale image(s).", "source": "github-repos"}
{"code": "def _wrap_2d_function(inputs, compute_op, dim=-1, name=None):\n\n    def _swap_axis(input_tensor, dim_index, last_index, name=None):\n        \n        return array_ops.transpose(input_tensor, array_ops.concat([math_ops.range(dim_index), [last_index], math_ops.range(dim_index + 1, last_index), [dim_index]], 0), name=name)\n    inputs = ops.convert_to_tensor(inputs)\n    shape = inputs.get_shape()\n    is_last_dim = dim == -1 or dim == shape.ndims - 1\n    if is_last_dim:\n        return compute_op(inputs, name=name)\n    dim_val = dim\n    if isinstance(dim, tensor_lib.Tensor):\n        dim_val = tensor_util.constant_value(dim)\n    if dim_val is not None and (not -shape.ndims <= dim_val < shape.ndims):\n        raise errors_impl.InvalidArgumentError(None, None, f'`dim` must be in the range [{-shape.ndims}, {shape.ndims}) where {shape.ndims} is the number of dimensions in the input. Received: dim={dim_val}')\n    ndims = array_ops.rank(inputs)\n    if not isinstance(dim, tensor_lib.Tensor):\n        if dim < 0:\n            dim += ndims\n    else:\n        dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim)\n    input_rank = array_ops.rank(inputs)\n    dim_axis = dim % shape.ndims\n    inputs = _swap_axis(inputs, dim_axis, math_ops.subtract(input_rank, 1))\n\n    def fix_output(output):\n        output = _swap_axis(output, dim_axis, math_ops.subtract(input_rank, 1), name=name)\n        output.set_shape(shape)\n        return output\n    outputs = compute_op(inputs)\n    if isinstance(outputs, tuple):\n        return tuple((fix_output(output) for output in outputs))\n    else:\n        return fix_output(outputs)", "docstring": "Helper function for ops that accept and return 2d inputs of same shape.\n\nIt reshapes and transposes the inputs into a 2-D Tensor and then invokes\nthe given function. The output would be transposed and reshaped back.\nIf the given function returns a tuple of tensors, each of them will be\ntransposed and reshaped.\n\nArgs:\ninputs: A non-empty `Tensor`. Must be one of the following types: `half`,\n`float32`, `float64`.\ncompute_op: The function to wrap. Must accept the input tensor as its first\narugment, and a second keyword argument `name`.\ndim: The dimension softmax would be performed on. The default is -1 which\nindicates the last dimension.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor`. Has the same shape as inputs. If compute_op returns multiple\ntensors, each of them have the same shape as the input.\nRaises:\nInvalidArgumentError: if `inputs` is empty or `dim` is beyond the last\ndimension of `inputs`.", "source": "github-repos"}
{"code": "def get_invalid_txn_info(self, batch_id):\n    with self._lock:\n        return [info.copy() for info in self._invalid.get(batch_id, [])]", "docstring": "Fetches the id of the Transaction that failed within a particular\nBatch, as well as any error message or other data about the failure.\n\nArgs:\nbatch_id (str): The id of the Batch containing an invalid txn\n\nReturns:\nlist of dict: A list of dicts with three possible keys:\n* 'id' - the header_signature of the invalid Transaction\n* 'message' - the error message sent by the TP\n* 'extended_data' - any additional data sent by the TP", "source": "codesearchnet"}
{"code": "def _get_overlaps_tensor(self, L):\n    (n, m) = L.shape\n    LY = np.array([np.where((L == y), 1, 0) for y in range(self.k_0, (self.k + 1))])\n    O = (np.einsum('abc,dbe,fbg->cegadf', LY, LY, LY) / n)\n    return torch.from_numpy(O).float()", "docstring": "Transforms the input label matrix to a three-way overlaps tensor.\n\nArgs:\nL: (np.array) An n x m array of LF output labels, in {0,...,k} if\nself.abstains, else in {1,...,k}, generated by m conditionally\nindependent LFs on n data points\n\nOutputs:\nO: (torch.Tensor) A (m, m, m, k, k, k) tensor of the label-specific\nempirical overlap rates; that is,\n\nO[i,j,k,y1,y2,y3] = P(\\lf_i = y1, \\lf_j = y2, \\lf_k = y3)\n\nwhere this quantity is computed empirically by this function, based\non the label matrix L.", "source": "codesearchnet"}
{"code": "def derive_annotations(self, annotations):\n        \n        cls = type(self)\n        \n        return cls(\n            self[0],\n            self[1],\n            self[2],\n            self[3],\n            annotations,\n            self[5]\n        )", "docstring": "Derives a new event from this one setting the ``annotations`` attribute.\n\nArgs:\nannotations: (Sequence[Union[amazon.ion.symbols.SymbolToken, unicode]]):\nThe annotations associated with the derived event.\n\nReturns:\nIonEvent: The newly generated event.", "source": "juraj-google-style"}
{"code": "def register_key_flag_for_module(self, module_name, flag):\n    \n    key_flags_by_module = self.key_flags_by_module_dict()\n    \n    key_flags = key_flags_by_module.setdefault(module_name, [])\n    \n    if flag not in key_flags:\n      key_flags.append(flag)", "docstring": "Specifies that a flag is a key flag for a module.\n\nArgs:\nmodule_name: str, the name of a Python module.\nflag: Flag, the Flag instance that is key to the module.", "source": "juraj-google-style"}
{"code": "def filter(self, scored_list):\n    top_n_key = ((- 1) * self.top_n)\n    top_n_list = sorted(scored_list, key=(lambda x: x[1]))[top_n_key:]\n    result_list = sorted(top_n_list, key=(lambda x: x[0]))\n    return result_list", "docstring": "Filtering with top-n ranking.\n\nArgs:\nscored_list:    The list of scoring.\n\nRetruns:\nThe list of filtered result.", "source": "codesearchnet"}
{"code": "def peek(self, index, name=None):\n    if name is None:\n        name = '%s_peek' % self._name\n    fn = lambda: gen_data_flow_ops.stage_peek(index, dtypes=self._dtypes, shared_name=self._name, name=name, capacity=self._capacity, memory_limit=self._memory_limit)\n    return self.__internal_get(fn, name)", "docstring": "Peeks at an element in the staging area.\n\nIf the staging area is too small to contain the element at\nthe specified index, it will block until enough elements\nare inserted to complete the operation.\n\nThe placement of the returned tensor will be determined by\nthe current device scope when this function is called.\n\nArgs:\nindex: The index of the tensor within the staging area\nto look up.\nname: A name for the operation (optional).\n\nReturns:\nThe tuple of tensors that was gotten.", "source": "github-repos"}
{"code": "def get_numpy_iterator(self):\n    raise NotImplementedError", "docstring": "Get a Python iterable for the `DataAdapter`, that yields NumPy\narrays.\n\nReturns:\nA Python iterator.", "source": "github-repos"}
{"code": "def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True):\n    if h5py is None:\n        raise ImportError('`save_model` requires h5py.')\n    if len(model.weights) != len(model._undeduplicated_weights):\n        logging.warning('Found duplicated `Variable`s in Model\\'s `weights`. This is usually caused by `Variable`s being shared by Layers in the Model. These `Variable`s will be treated as separate `Variable`s when the Model is restored. To avoid this, please save with `save_format=\"tf\"`.')\n    if not isinstance(filepath, h5py.File):\n        if not overwrite and os.path.isfile(filepath):\n            proceed = ask_to_proceed_with_overwrite(filepath)\n            if not proceed:\n                return\n        dirpath = os.path.dirname(filepath)\n        if not os.path.exists(dirpath):\n            gfile.MakeDirs(dirpath)\n        f = h5py.File(filepath, mode='w')\n        opened_new_file = True\n    else:\n        f = filepath\n        opened_new_file = False\n    try:\n        model_metadata = saving_utils.model_metadata(model, include_optimizer)\n        for k, v in model_metadata.items():\n            if isinstance(v, (dict, list, tuple)):\n                f.attrs[k] = json.dumps(v, default=json_utils.get_json_type).encode('utf8')\n            else:\n                f.attrs[k] = v\n        model_weights_group = f.create_group('model_weights')\n        model_layers = model.layers\n        save_weights_to_hdf5_group(model_weights_group, model_layers)\n        if include_optimizer and model.optimizer and (not isinstance(model.optimizer, optimizer_v1.TFOptimizer)):\n            save_optimizer_weights_to_hdf5_group(f, model.optimizer)\n        f.flush()\n    finally:\n        if opened_new_file:\n            f.close()", "docstring": "Saves a model to a HDF5 file.\n\nThe saved model contains:\n- the model's configuration (topology)\n- the model's weights\n- the model's optimizer's state (if any)\n\nThus the saved model can be reinstantiated in\nthe exact same state, without any of the code\nused for model definition or training.\n\nArgs:\nmodel: Keras model instance to be saved.\nfilepath: One of the following:\n- String, path where to save the model\n- `h5py.File` object where to save the model\noverwrite: Whether we should overwrite any existing\nmodel at the target location, or instead\nask the user with a manual prompt.\ninclude_optimizer: If True, save optimizer's state together.\n\nRaises:\nImportError: if h5py is not available.", "source": "github-repos"}
{"code": "def convert_ids_to_tokens(self, ids: Union[int, list[int]], skip_special_tokens: bool=False) -> Union[str, list[str]]:\n    if isinstance(ids, int):\n        return self._tokenizer.id_to_token(ids)\n    tokens = []\n    ids_to_skip = set(self.all_special_ids) if skip_special_tokens else set()\n    for index in ids:\n        index = int(index)\n        if index in ids_to_skip:\n            continue\n        tokens.append(self._tokenizer.id_to_token(index))\n    return tokens", "docstring": "Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and\nadded tokens.\n\nArgs:\nids (`int` or `List[int]`):\nThe token id (or token ids) to convert to tokens.\nskip_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not to remove special tokens in the decoding.\n\nReturns:\n`str` or `List[str]`: The decoded token(s).", "source": "github-repos"}
{"code": "def record_batch_metrics(self, requests_in_batch: List) -> None:\n    if not _has_opentelemetry or not requests_in_batch:\n        return\n    decode_tokens = 0\n    prefill_tokens = 0\n    for state in requests_in_batch:\n        if state.status == RequestStatus.DECODING:\n            decode_tokens += 1\n        elif state.status in [RequestStatus.PREFILLING, RequestStatus.PREFILLING_SPLIT]:\n            prefill_tokens += len(state.prompt_ids)\n    total_batch_tokens = decode_tokens + prefill_tokens\n    try:\n        if prefill_tokens > 0:\n            self.prefill_tokens_counter.add(prefill_tokens)\n        if decode_tokens > 0:\n            self.decode_tokens_counter.add(decode_tokens)\n        if prefill_tokens > 0:\n            ratio = decode_tokens / prefill_tokens\n            self.decode_prefill_ratio_gauge.set(ratio)\n        fill_percentage = total_batch_tokens / self.max_batch_tokens * 100.0\n        self.batch_fill_percentage_histogram.record(fill_percentage)\n        logger.debug(f'Batch metrics: {decode_tokens} decode tokens, {prefill_tokens} prefill tokens, batch fill: {fill_percentage:.2f}% ({total_batch_tokens}/{self.max_batch_tokens})')\n    except Exception as e:\n        logger.warning(f'Failed to record batch metrics: {e}')", "docstring": "Record metrics about the batch composition including decode/prefill ratio and batch fill percentage.\n\nArgs:\nrequests_in_batch: List of request states in the current batch", "source": "github-repos"}
{"code": "def remove(self, *dic):\n        \n        dicList = list(flatten(dic))\n        for d in dicList:\n            di = []\n            for k in d:\n                \n                di.append(Pair(k, IntegerSingle(d[k])))\n            dictSingle = DictSingle(di)\n            \n            self._remove([dictSingle], self.l)", "docstring": "remove a calendar config.\n\nArgs:\n*dic (dict): dictionary with format {'Day': 12, 'Hour': 34} Avaliable keys are Month, Day, Weekday, Hour, Minute. *Note the uppercase.* You can use gen(), genMix() to generate complex config dictionary.", "source": "juraj-google-style"}
{"code": "def get_random_numeric_tensor(self, dtype=None, min_size=_MIN_SIZE, max_size=_MAX_SIZE, min_val=_MIN_INT, max_val=_MAX_INT):\n    if max_size > 8:\n        raise tf.errors.InvalidArgumentError(None, None, 'Given size of {} will result in an OOM error'.format(max_size))\n    seed = self.get_int()\n    shape = self.get_int_list(min_length=min_size, max_length=max_size, min_int=min_size, max_int=max_size)\n    if dtype is None:\n        dtype = self.get_tf_dtype(allowed_set=_TF_RANDOM_DTYPES)\n    elif dtype not in _TF_RANDOM_DTYPES:\n        raise tf.errors.InvalidArgumentError(None, None, 'Given dtype {} is not accepted in get_random_numeric_tensor'.format(dtype))\n    return tf.random.uniform(shape=shape, minval=min_val, maxval=max_val, dtype=dtype, seed=seed)", "docstring": "Return a tensor of random shape and values.\n\nGenerated tensors are capped at dimension sizes of 8, as 2^32 bytes of\nrequested memory crashes the fuzzer (see b/34190148).\nReturns only type that tf.random.uniform can generate. If you need a\ndifferent type, consider using tf.cast.\n\nArgs:\ndtype: Type of tensor, must of one of the following types: float16,\nfloat32, float64, int32, or int64\nmin_size: Minimum size of returned tensor\nmax_size: Maximum size of returned tensor\nmin_val: Minimum value in returned tensor\nmax_val: Maximum value in returned tensor\n\nReturns:\nTensor of random shape filled with uniformly random numeric values.", "source": "github-repos"}
{"code": "def export_gpx_file(self):\n    gpx = create_elem('gpx', GPX_ELEM_ATTRIB)\n    if (not self.metadata.bounds):\n        self.metadata.bounds = [j for i in self for j in i]\n    gpx.append(self.metadata.togpx())\n    track = create_elem('trk')\n    gpx.append(track)\n    for segment in self:\n        chunk = create_elem('trkseg')\n        track.append(chunk)\n        for place in segment:\n            chunk.append(place.togpx())\n    return etree.ElementTree(gpx)", "docstring": "Generate GPX element tree from ``Trackpoints``.\n\nReturns:\netree.ElementTree: GPX element tree depicting ``Trackpoints``\nobjects", "source": "codesearchnet"}
{"code": "def __init__(self, visitor):\n    self._visitor = visitor\n    self._root_name = 'tf'\n    self._private_map = {'tf': ['compiler', 'core', 'security', 'dtensor', 'python', 'tsl'], 'tf.flags': ['cpp_flags']}\n    self._do_not_descend_map = {'tf': ['examples', 'flags', 'platform', 'pywrap_tensorflow', 'user_ops', 'tools', 'tensorboard'], 'tf.app': ['flags'], 'tf.test': ['mock']}", "docstring": "Constructor.\n\n`visitor` should be a callable suitable as a visitor for `traverse`. It will\nbe called only for members of the public TensorFlow API.\n\nArgs:\nvisitor: A visitor to call for the public API.", "source": "github-repos"}
{"code": "def from_arrays(cls, path, trn, val, bs=64, tfms=(None, None), classes=None, num_workers=4, test=None, continuous=False):\n    f = (ArraysIndexRegressionDataset if continuous else ArraysIndexDataset)\n    datasets = cls.get_ds(f, trn, val, tfms, test=test)\n    return cls(path, datasets, bs, num_workers, classes=classes)", "docstring": "Read in images and their labels given as numpy arrays\n\nArguments:\npath: a root path of the data (used for storing trained models, precomputed values, etc)\ntrn: a tuple of training data matrix and target label/classification array (e.g. `trn=(x,y)` where `x` has the\nshape of `(5000, 784)` and `y` has the shape of `(5000,)`)\nval: a tuple of validation data matrix and target label/classification array.\nbs: batch size\ntfms: transformations (for data augmentations). e.g. output of `tfms_from_model`\nclasses: a list of all labels/classifications\nnum_workers: a number of workers\ntest: a matrix of test data (the shape should match `trn[0]`)\n\nReturns:\nImageClassifierData", "source": "codesearchnet"}
{"code": "def start_dag(self, dag, *, data=None):\n        \n        return self._client.send(\n            Request(\n                action='start_dag',\n                payload={'name': dag.name if isinstance(dag, Dag) else dag,\n                         'data': data if isinstance(data, MultiTaskData) else None}\n            )\n        ).payload['dag_name']", "docstring": "Schedule the execution of a dag by sending a signal to the workflow.\n\nArgs:\ndag (Dag, str): The dag object or the name of the dag that should be started.\ndata (MultiTaskData): The data that should be passed on to the new dag.\n\nReturns:\nstr: The name of the successfully started dag.", "source": "juraj-google-style"}
{"code": "def forward(self, device_port, local_port=None):\n        \n        port = self._adb_device.forward(device_port, local_port)\n        return (self._host, port)", "docstring": "Forward device port to local\nArgs:\ndevice_port: port inside device\nlocal_port: port on PC, if this value is None, a port will random pick one.\n\nReturns:\ntuple, (host, local_port)", "source": "juraj-google-style"}
{"code": "def pixel_image(shape, sd=None, init_val=None):\n    \n    if sd is not None and init_val is not None:\n        warnings.warn(\n            \"`pixel_image` received both an initial value and a sd argument. Ignoring sd in favor of the supplied initial value.\"\n        )\n\n    sd = sd or 0.01\n    init_val = init_val or np.random.normal(size=shape, scale=sd).astype(np.float32)\n    return tf.Variable(init_val)", "docstring": "A naive, pixel-based image parameterization.\nDefaults to a random initialization, but can take a supplied init_val argument\ninstead.\n\nArgs:\nshape: shape of resulting image, [batch, width, height, channels].\nsd: standard deviation of param initialization noise.\ninit_val: an initial value to use instead of a random initialization. Needs\nto have the same shape as the supplied shape argument.\n\nReturns:\ntensor with shape from first argument.", "source": "juraj-google-style"}
{"code": "def _get_bit(self, n, hash_bytes):\n        \n\n        if hash_bytes[n \n            return True\n\n        return False", "docstring": "Determines if the n-th bit of passed bytes is 1 or 0.\n\nArguments:\n\nhash_bytes - List of hash byte values for which the n-th bit value\nshould be checked. Each element of the list should be an integer from\n0 to 255.\n\nReturns:\n\nTrue if the bit is 1. False if the bit is 0.", "source": "juraj-google-style"}
{"code": "def update_dns_zone_record(env, zone_id, **kwargs):\n    client = boto3.Session(profile_name=env).client('route53')\n    response = {}\n    hosted_zone_info = client.get_hosted_zone(Id=zone_id)\n    zone_name = hosted_zone_info['HostedZone']['Name'].rstrip('.')\n    dns_name = kwargs.get('dns_name')\n    if (dns_name and dns_name.endswith(zone_name)):\n        dns_name_aws = kwargs.get('dns_name_aws')\n        dns_json = get_template(template_file='infrastructure/dns_upsert.json.j2', **kwargs)\n        LOG.info('Attempting to create DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id, zone_name)\n        try:\n            response = client.change_resource_record_sets(HostedZoneId=zone_id, ChangeBatch=json.loads(dns_json))\n            LOG.info('Upserted DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id, zone_name)\n        except botocore.exceptions.ClientError as error:\n            LOG.info('Error creating DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id, zone_name)\n            LOG.debug(error)\n    else:\n        LOG.info('Skipping creating DNS record %s in non-matching Hosted Zone %s (%s)', dns_name, zone_id, zone_name)\n    LOG.debug('Route53 JSON Response: \\n%s', pformat(response))", "docstring": "Create a Route53 CNAME record in _env_ zone.\n\nArgs:\nenv (str): Deployment environment.\nzone_id (str): Route53 zone id.\n\nKeyword Args:\ndns_name (str): FQDN of application's dns entry to add/update.\ndns_name_aws (str): FQDN of AWS resource\ndns_ttl (int): DNS time-to-live (ttl)", "source": "codesearchnet"}
{"code": "def __init__(self, input_queue, output_queue):\n        \n        super(WorkflowThread, self).__init__(input_queue, output_queue)\n        self.pending = PendingBarriers()\n        self.worker_threads = []\n        self.register(WorkflowItem, input_queue)", "docstring": "Initializer.\n\nArgs:\ninput_queue: Queue this worker consumes work from. These should be\nWorkflowItems to process, or any WorkItems registered with this\nclass using the register() method.\noutput_queue: Queue where this worker puts finished work items,\nif any.", "source": "juraj-google-style"}
{"code": "def __init__(self, chain):\n        \n        self.queue = []\n        self._fsm = JTAGStateMachine()\n        self._chain = chain", "docstring": "Create a new CommandQueue to manage, compile, and run Primitives.\n\nArgs:\nchain: A JTAGScanChain instance that this queue will be associated with.", "source": "juraj-google-style"}
{"code": "def _last_path_token(builder: expressions.Builder) -> str:\n    if isinstance(builder.node, _evaluation.RootMessageNode):\n        return ''\n    return builder.node.to_path_token()", "docstring": "Returns `builder`'s last path token less the resource type.\n\nFor example:\n* \"Foo\" returns \"\" (empty string)\n* \"Foo.bar\" returns \"bar\"\n* \"Foo.bar.bats\" returns \"bats\"\n\nArgs:\nbuilder: The `builder` whose relative path to return.", "source": "github-repos"}
{"code": "def dna_transformation(prev_image, dna_input, dna_kernel_size, relu_shift):\n  \n  \n  prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]])\n  image_height = int(prev_image.get_shape()[1])\n  image_width = int(prev_image.get_shape()[2])\n\n  inputs = []\n  for xkern in range(dna_kernel_size):\n    for ykern in range(dna_kernel_size):\n      inputs.append(\n          tf.expand_dims(\n              tf.slice(prev_image_pad, [0, xkern, ykern, 0],\n                       [-1, image_height, image_width, -1]), [3]))\n  inputs = tf.concat(axis=3, values=inputs)\n\n  \n  kernel = tf.nn.relu(dna_input - relu_shift) + relu_shift\n  kernel = tf.expand_dims(\n      kernel / tf.reduce_sum(kernel, [3], keep_dims=True), [4])\n  return tf.reduce_sum(kernel * inputs, [3], keep_dims=False)", "docstring": "Apply dynamic neural advection to previous image.\n\nArgs:\nprev_image: previous image to be transformed.\ndna_input: hidden lyaer to be used for computing DNA transformation.\ndna_kernel_size: dna kernel size.\nrelu_shift: shift for ReLU function.\nReturns:\nList of images transformed by the predicted CDNA kernels.", "source": "juraj-google-style"}
{"code": "def serialize_quantity(o):\n    \n    return dict(\n        _type='astropy.units.Quantity',\n        value=o.value,\n        unit=o.unit.to_string())", "docstring": "Serializes an :obj:`astropy.units.Quantity`, for JSONification.\n\nArgs:\no (:obj:`astropy.units.Quantity`): :obj:`Quantity` to be serialized.\n\nReturns:\nA dictionary that can be passed to :obj:`json.dumps`.", "source": "juraj-google-style"}
{"code": "def normalize_json(template):\n    obj = parse_cloudformation_template(template)\n    json_str = json.dumps(obj, sort_keys=True, indent=4, default=str, separators=(',', ': '))\n    result = []\n    lines = json_str.split('\\n')\n    for line in lines:\n        result.append((line + '\\n'))\n    return result", "docstring": "Normalize our template for diffing.\n\nArgs:\ntemplate(str): string representing the template\n\nReturns:\nlist: json representation of the parameters", "source": "codesearchnet"}
{"code": "def _from_yaml_v0(cls, job):\n    job_metadata = {}\n    for key in ['job-id', 'job-name', 'create-time']:\n        job_metadata[key] = job.get(key)\n    job_metadata['create-time'] = dsub_util.replace_timezone(datetime.datetime.strptime(job['create-time'], '%Y-%m-%d %H:%M:%S.%f'), tzlocal())\n    job_resources = Resources()\n    params = {}\n    labels = job.get('labels', {})\n    if ('dsub-version' in labels):\n        job_metadata['dsub-version'] = labels['dsub-version']\n        del labels['dsub-version']\n    params['labels'] = cls._label_params_from_dict(labels)\n    params['envs'] = cls._env_params_from_dict(job.get('envs', {}))\n    params['inputs'] = cls._input_file_params_from_dict(job.get('inputs', {}), False)\n    params['outputs'] = cls._output_file_params_from_dict(job.get('outputs', {}), False)\n    if (job.get('task-id') is None):\n        job_params = params\n        task_metadata = {'task-id': None}\n        task_params = {}\n    else:\n        job_params = {}\n        task_metadata = {'task-id': str(job.get('task-id'))}\n        task_params = params\n    task_resources = Resources(logging_path=job.get('logging'))\n    task_descriptors = [TaskDescriptor.get_complete_descriptor(task_metadata, task_params, task_resources)]\n    return JobDescriptor.get_complete_descriptor(job_metadata, job_params, job_resources, task_descriptors)", "docstring": "Populate a JobDescriptor from the local provider's original meta.yaml.\n\nThe local job provider had the first incarnation of a YAML file for each\ntask. That idea was extended here in the JobDescriptor and the local\nprovider adopted the JobDescriptor.to_yaml() call to write its meta.yaml.\n\nThe JobDescriptor.from_yaml() detects if it receives a local provider's\n\"v0\" meta.yaml and calls this function.\n\nArgs:\njob: an object produced from decoding meta.yaml.\n\nReturns:\nA JobDescriptor populated as best we can from the old meta.yaml.", "source": "codesearchnet"}
{"code": "def _iterate_through_class(self, class_dict):\n    output_dict = {}\n    for key in class_dict:\n        val = class_dict[key]\n        try:\n            val = val.__dict__\n        except AttributeError:\n            pass\n        if (type(val) is dict):\n            val = self._iterate_through_class(val)\n        if (type(val) is list):\n            temp_val = []\n            for val_i in val:\n                try:\n                    val_i = val_i.__dict__\n                except AttributeError:\n                    pass\n                if (type(val_i) is dict):\n                    val_i = self._iterate_through_class(val_i)\n                temp_val.append(val_i)\n            val = temp_val\n        output_dict[key] = val\n    return output_dict", "docstring": "Recursive function for output dictionary creation.\n\nFunction will check each value in a dictionary to see if it is a\nclass, list, or dictionary object. The idea is to turn all class objects into\ndictionaries. If it is a class object it will pass its ``class.__dict__``\nrecursively through this function again. If it is a dictionary,\nit will pass the dictionary recursively through this functin again.\n\nIf the object is a list, it will iterate through entries checking for class\nor dictionary objects and pass them recursively through this function.\nThis uses the knowledge of the list structures in the code.\n\nArgs:\nclass_dict (obj): Dictionary to iteratively check.\n\nReturns:\nDictionary with all class objects turned into dictionaries.", "source": "codesearchnet"}
{"code": "def cdna_transformation(prev_image, cdna_input, num_masks, color_channels, dna_kernel_size, relu_shift):\n    batch_size = tf.shape(cdna_input)[0]\n    height = int(prev_image.get_shape()[1])\n    width = int(prev_image.get_shape()[2])\n    cdna_kerns = tfl.dense(cdna_input, ((dna_kernel_size * dna_kernel_size) * num_masks), name='cdna_params', activation=None)\n    cdna_kerns = tf.reshape(cdna_kerns, [batch_size, dna_kernel_size, dna_kernel_size, 1, num_masks])\n    cdna_kerns = (tf.nn.relu((cdna_kerns - relu_shift)) + relu_shift)\n    norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True)\n    cdna_kerns /= norm_factor\n    cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3])\n    cdna_kerns = tf.reshape(cdna_kerns, [dna_kernel_size, dna_kernel_size, batch_size, num_masks])\n    prev_image = tf.transpose(prev_image, [3, 1, 2, 0])\n    transformed = tf.nn.depthwise_conv2d(prev_image, cdna_kerns, [1, 1, 1, 1], 'SAME')\n    transformed = tf.reshape(transformed, [color_channels, height, width, batch_size, num_masks])\n    transformed = tf.transpose(transformed, [3, 1, 2, 0, 4])\n    transformed = tf.unstack(transformed, axis=(- 1))\n    return transformed", "docstring": "Apply convolutional dynamic neural advection to previous image.\n\nArgs:\nprev_image: previous image to be transformed.\ncdna_input: hidden lyaer to be used for computing CDNA kernels.\nnum_masks: number of masks and hence the number of CDNA transformations.\ncolor_channels: the number of color channels in the images.\ndna_kernel_size: dna kernel size.\nrelu_shift: shift for ReLU function.\nReturns:\nList of images transformed by the predicted CDNA kernels.", "source": "codesearchnet"}
{"code": "class _ImageEmbeddingHandler(_EmbeddingHandler):\n\n    def _validate_column_data(self, batch):\n        if isinstance(batch[0], (int, str, float, bool)):\n            raise TypeError(f'Embeddings can only be generated on dict[str, Image].Got dict[str, {type(batch[0])}] instead.')\n\n    def get_metrics_namespace(self) -> str:\n        return self._underlying.get_metrics_namespace() or 'BeamML_ImageEmbeddingHandler'", "docstring": "A ModelHandler intended to be work on list[dict[str, Image]] inputs.\n\nThe inputs to the model handler are expected to be a list of dicts.\n\nFor example, if the original mode is used with RunInference to take a\nPCollection[E] to a PCollection[P], this ModelHandler would take a\nPCollection[dict[str, E]] to a PCollection[dict[str, P]].\n\n_ImageEmbeddingHandler will accept an EmbeddingsManager instance, which\ncontains the details of the model to be loaded and the inference_fn to be\nused. The purpose of _ImageEmbeddingHandler is to generate embeddings for\nimage inputs using the EmbeddingsManager instance.\n\nIf the input is not an Image representation column, a RuntimeError will be\nraised.\n\nThis is an internal class and offers no backwards compatibility guarantees.\n\nArgs:\nembeddings_manager: An EmbeddingsManager instance.", "source": "github-repos"}
{"code": "def plugin_test_validation(self, handler):\n        \n        methods = {name:func for name, func in inspect.getmembers(handler, callable)}\n        if 'test' not in methods.keys():\n            print 'Failure for plugin: %s' % (handler.__name__)\n            print 'Validation Error: The file must have a top level test() method'\n            return None\n        else:\n            return methods['test']", "docstring": "Plugin validation.\n\nEvery workbench plugin must have top level test method.\n\nArgs:\nhandler: The loaded plugin.\n\nReturns:\nNone if the test fails or the test function.", "source": "juraj-google-style"}
{"code": "def end_entry(self):\n    if (self.in_progress is None):\n        return Error.NO_ERROR\n    if (self.in_progress.data_space() == 2):\n        return Error.INPUT_BUFFER_WRONG_SIZE\n    for entry in self.entries:\n        if ((entry.target == self.in_progress.target) and (entry.var_id == self.in_progress.var_id)):\n            entry.valid = False\n    self.entries.append(self.in_progress)\n    self.data_index += (self.in_progress.data_space() - 2)\n    self.in_progress = None\n    return Error.NO_ERROR", "docstring": "Finish a previously started config database entry.\n\nThis commits the currently in progress entry.  The expected flow is\nthat start_entry() is called followed by 1 or more calls to add_data()\nfollowed by a single call to end_entry().\n\nReturns:\nint: An error code", "source": "codesearchnet"}
{"code": "def add(self, private_key):\n        \n        if not isinstance(private_key, PaillierPrivateKey):\n            raise TypeError(\"private_key should be of type PaillierPrivateKey, \"\n                            \"not %s\" % type(private_key))\n        self.__keyring[private_key.public_key] = private_key", "docstring": "Add a key to the keyring.\n\nArgs:\nprivate_key (PaillierPrivateKey): a key to add to this keyring.", "source": "juraj-google-style"}
{"code": "def load_fasta_file_as_dict_of_seqrecords(filename):\n    \n\n    results = {}\n    records = load_fasta_file(filename)\n    for r in records:\n        results[r.id] = r\n\n    return results", "docstring": "Load a FASTA file and return the sequences as a dict of {ID: SeqRecord}\n\nArgs:\nfilename (str): Path to the FASTA file to load\n\nReturns:\ndict: Dictionary of IDs to their SeqRecords", "source": "juraj-google-style"}
{"code": "def __method_descriptor(self, service, method_info, operation_id,\n                          protorpc_method_info, security_definitions):\n    \n    descriptor = {}\n\n    request_message_type = (resource_container.ResourceContainer.\n                            get_request_message(protorpc_method_info.remote))\n    request_kind = self.__get_request_kind(method_info)\n    remote_method = protorpc_method_info.remote\n\n    path = method_info.get_path(service.api_info)\n\n    descriptor['parameters'] = self.__request_message_descriptor(\n        request_kind, request_message_type,\n        method_info.method_id(service.api_info),\n        path)\n    descriptor['responses'] = self.__response_message_descriptor(\n        remote_method.response_type(), method_info.method_id(service.api_info))\n    descriptor['operationId'] = operation_id\n\n    \n    api_key_required = method_info.is_api_key_required(service.api_info)\n    if method_info.audiences is not None:\n      descriptor['security'] = self.__security_descriptor(\n          method_info.audiences, security_definitions,\n          api_key_required=api_key_required)\n    elif service.api_info.audiences is not None or api_key_required:\n      descriptor['security'] = self.__security_descriptor(\n          service.api_info.audiences, security_definitions,\n          api_key_required=api_key_required)\n\n    \n    if method_info.metric_costs:\n      descriptor['x-google-quota'] = self.__x_google_quota_descriptor(\n          method_info.metric_costs)\n\n    return descriptor", "docstring": "Describes a method.\n\nArgs:\nservice: endpoints.Service, Implementation of the API as a service.\nmethod_info: _MethodInfo, Configuration for the method.\noperation_id: string, Operation ID of the method\nprotorpc_method_info: protorpc.remote._RemoteMethodInfo, ProtoRPC\ndescription of the method.\nsecurity_definitions: list of dicts, security definitions for the API.\n\nReturns:\nDictionary describing the method.", "source": "juraj-google-style"}
{"code": "def fetch_time_output(marker, format_s, ins):\n    \n    from parse import parse\n\n    timings = [x for x in ins if marker in x]\n    res = [parse(format_s, t) for t in timings]\n    return [_f for _f in res if _f]", "docstring": "Fetch the output /usr/bin/time from a.\n\nArgs:\nmarker: The marker that limits the time output\nformat_s: The format string used to parse the timings\nins: A list of lines we look for the output.\n\nReturns:\nA list of timing tuples", "source": "juraj-google-style"}
{"code": "def post(self, json=None):\n        \n        return self._call('post', url=self.endpoint, json=json)", "docstring": "Send a POST request and return the JSON decoded result.\n\nArgs:\njson (dict, optional): Object to encode and send in request.\n\nReturns:\nmixed: JSON decoded response data.", "source": "juraj-google-style"}
{"code": "def get_variants(self, chromosome=None, start=None, end=None):\n    query = {}\n    if chromosome:\n        query['chrom'] = chromosome\n    if start:\n        query['start'] = {'$lte': end}\n        query['end'] = {'$gte': start}\n    LOG.info('Find all variants {}'.format(query))\n    return self.db.variant.find(query).sort([('start', ASCENDING)])", "docstring": "Return all variants in the database\nIf no region is specified all variants will be returned.\n\nArgs:\nchromosome(str)\nstart(int)\nend(int)\n\n\nReturns:\nvariants(Iterable(Variant))", "source": "codesearchnet"}
{"code": "def generate_hpo_gene_list(self, *hpo_terms):\n        \n        genes = {}\n        for term in hpo_terms:\n            hpo_obj = self.hpo_term(term)\n            if hpo_obj:\n                for hgnc_id in hpo_obj['genes']:\n                    if hgnc_id in genes:\n                        genes[hgnc_id] += 1\n                    else:\n                        genes[hgnc_id] = 1\n            else:\n                LOG.warning(\"Term %s could not be found\", term)\n\n        sorted_genes = sorted(genes.items(), key=operator.itemgetter(1), reverse=True)\n        return sorted_genes", "docstring": "Generate a sorted list with namedtuples of hpogenes\n\nEach namedtuple of the list looks like (hgnc_id, count)\n\nArgs:\nhpo_terms(iterable(str))\n\nReturns:\nhpo_genes(list(HpoGene))", "source": "juraj-google-style"}
{"code": "def get_username(self, userid):\n        \n\n        username = self.user_map.get(userid)\n        if not username:\n            users = self.get_users()\n            if users:\n                members = {\n                    m['id']: m['name']\n                    for m in users.get('members', [{}])\n                    if m.get('id')\n                    and m.get('name')\n                }\n                if members:\n                    self.user_map.update(members)\n\n                username = self.user_map.get(userid, userid)\n\n        return username", "docstring": "Perform a lookup of users to resolve a userid to a username\n\nArgs:\nuserid (string): Slack userid to lookup.\n\nReturns:\nstring: Human-friendly name of the user", "source": "juraj-google-style"}
{"code": "def classifier_factory(clf):\n    \n    required_methods = ['fit', 'score', 'predict']\n\n    for method in required_methods:\n        if not hasattr(clf, method):\n            raise TypeError('\"{}\" is not in clf. Did you pass a '\n                            'classifier instance?'.format(method))\n\n    optional_methods = ['predict_proba']\n\n    for method in optional_methods:\n        if not hasattr(clf, method):\n            warnings.warn('{} not in clf. Some plots may '\n                          'not be possible to generate.'.format(method))\n\n    additional_methods = {\n        'plot_learning_curve': plot_learning_curve,\n        'plot_confusion_matrix': plot_confusion_matrix_with_cv,\n        'plot_roc_curve': plot_roc_curve_with_cv,\n        'plot_ks_statistic': plot_ks_statistic_with_cv,\n        'plot_precision_recall_curve': plot_precision_recall_curve_with_cv,\n        'plot_feature_importances': plot_feature_importances\n    }\n\n    for key, fn in six.iteritems(additional_methods):\n        if hasattr(clf, key):\n            warnings.warn('\"{}\" method already in clf. '\n                          'Overriding anyway. This may '\n                          'result in unintended behavior.'.format(key))\n        setattr(clf, key, types.MethodType(fn, clf))\n    return clf", "docstring": "Embeds scikit-plot instance methods in an sklearn classifier.\n\nArgs:\nclf: Scikit-learn classifier instance\n\nReturns:\nThe same scikit-learn classifier instance passed in **clf**\nwith embedded scikit-plot instance methods.\n\nRaises:\nValueError: If **clf** does not contain the instance methods\nnecessary for scikit-plot instance methods.", "source": "juraj-google-style"}
{"code": "def exe_cmd(*cmds):\n    \n    cmd = ' '.join(cmds)\n    proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)\n    (out, err) = proc.communicate()\n    if not err:\n        return out\n    return err", "docstring": "Executes commands in a new shell. Directing stderr to PIPE.\n\nThis is fastboot's own exe_cmd because of its peculiar way of writing\nnon-error info to stderr.\n\nArgs:\ncmds: A sequence of commands and arguments.\n\nReturns:\nThe output of the command run.\n\nRaises:\nException: An error occurred during the command execution.", "source": "juraj-google-style"}
{"code": "def clear(self, size=(- 1), *, offset=0, chunk=None) -> None:\n    self.mglo.clear(size, offset, chunk)", "docstring": "Clear the content.\n\nArgs:\nsize (int): The size. Value ``-1`` means all.\n\nKeyword Args:\noffset (int): The offset.\nchunk (bytes): The chunk to use repeatedly.", "source": "codesearchnet"}
{"code": "def make_encoder(base_depth, activation, latent_size, code_size):\n  \n  conv = functools.partial(\n      tf.keras.layers.Conv2D, padding=\"SAME\", activation=activation)\n\n  encoder_net = tf.keras.Sequential([\n      conv(base_depth, 5, 1),\n      conv(base_depth, 5, 2),\n      conv(2 * base_depth, 5, 1),\n      conv(2 * base_depth, 5, 2),\n      conv(4 * latent_size, 7, padding=\"VALID\"),\n      tf.keras.layers.Flatten(),\n      tf.keras.layers.Dense(latent_size * code_size, activation=None),\n      tf.keras.layers.Reshape([latent_size, code_size])\n  ])\n\n  def encoder(images):\n    \n    images = 2 * tf.cast(images, dtype=tf.float32) - 1\n    codes = encoder_net(images)\n    return codes\n\n  return encoder", "docstring": "Creates the encoder function.\n\nArgs:\nbase_depth: Layer base depth in encoder net.\nactivation: Activation function in hidden layers.\nlatent_size: The number of latent variables in the code.\ncode_size: The dimensionality of each latent variable.\n\nReturns:\nencoder: A `callable` mapping a `Tensor` of images to a `Tensor` of shape\n`[..., latent_size, code_size]`.", "source": "juraj-google-style"}
{"code": "def num_embedding_devices_per_chip(self):\n    return self.tpu_hardware_feature_proto.num_embedding_devices_per_chip", "docstring": "Number of embedding accelerator devices per chip.\n\nReturns:\nNumber of embedding devices per chip.", "source": "github-repos"}
{"code": "def __init__(self, projection=None, orientation0=(0, 0, -1), **kwargs):\n        \n        kwargs['orientation0'] = orientation0\n        super(Camera, self).__init__(**kwargs)\n        self.projection = PerspectiveProjection() if not projection else projection\n        self.reset_uniforms()", "docstring": "Returns a camera object\n\nArgs:\nprojection (obj): the projection type for the camera. It can either be an instance of OrthoProjection or PerspeectiveProjection\norientation0 (tuple):\n\nReturns:\nCamera instance", "source": "juraj-google-style"}
{"code": "def set_sflow(self, name, value=None, default=False, disable=False):\n        \n        if value not in [True, False, None]:\n            raise ValueError\n\n        commands = ['interface %s' % name]\n        commands.append(self.command_builder('sflow enable', value=value,\n                                             default=default, disable=disable))\n        return self.configure(commands)", "docstring": "Configures the sFlow state on the interface\n\nArgs:\nname (string): The interface identifier.  It must be a full\ninterface name (ie Ethernet, not Et)\n\nvalue (boolean): True if sFlow should be enabled otherwise False\n\ndefault (boolean): Specifies the default value for sFlow\n\ndisable (boolean): Specifies to disable sFlow\n\nReturns:\nTrue if the operation succeeds otherwise False is returned", "source": "juraj-google-style"}
{"code": "def _ParseOriginalFilename(self, file_object, format_version):\n    file_offset = file_object.tell()\n    if (format_version == 1):\n        data_type_map = self._GetDataTypeMap('recycle_bin_metadata_utf16le_string')\n    else:\n        data_type_map = self._GetDataTypeMap('recycle_bin_metadata_utf16le_string_with_size')\n    try:\n        (original_filename, _) = self._ReadStructureFromFileObject(file_object, file_offset, data_type_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse original filename with error: {0!s}'.format(exception))\n    if (format_version == 1):\n        return original_filename.rstrip('\\x00')\n    return original_filename.string.rstrip('\\x00')", "docstring": "Parses the original filename.\n\nArgs:\nfile_object (FileIO): file-like object.\nformat_version (int): format version.\n\nReturns:\nstr: filename or None on error.\n\nRaises:\nParseError: if the original filename cannot be read.", "source": "codesearchnet"}
{"code": "def _assert_weights_created(self):\n    if self.dynamic:\n        return\n    if 'build' in self.__class__.__dict__ and self.__class__ != Model and (not self.built):\n        raise ValueError('Weights for model %s have not yet been created. Weights are created when the Model is first called on inputs or `build()` is called with an `input_shape`.' % self.name)", "docstring": "Asserts that all the weights for the model have been created.\n\nFor a non-dynamic model, the weights must already be created after the\nlayer has been called. For a dynamic model, the exact list of weights can\nnever be known for certain since it may change at any time during execution.\n\nWe run this check right before accessing weights or getting the Numpy value\nfor the current weights. Otherwise, if the layer has never been called,\nthe user would just get an empty list, which is misleading.\n\nRaises:\nValueError: if the weights of the network has not yet been created.", "source": "github-repos"}
{"code": "def output(self, _filename):\n        \n\n        txt = ''\n        for contract in self.slither.contracts_derived:\n            txt += '\\n{}:\\n'.format(contract.name)\n            table = PrettyTable(['Name', 'Type'])\n            for variable in contract.state_variables:\n                if not variable.is_constant:\n                    table.add_row([variable.name, str(variable.type)])\n            txt += str(table) + '\\n'\n\n        self.info(txt)", "docstring": "_filename is not used\nArgs:\n_filename(string)", "source": "juraj-google-style"}
{"code": "def find_nearest_color_hexstr(hexdigits, color_table=None, method='euclid'):\n    triplet = []\n    try:\n        if (len(hexdigits) == 3):\n            for digit in hexdigits:\n                digit = int(digit, 16)\n                triplet.append(((digit * 16) + digit))\n        elif (len(hexdigits) == 6):\n            triplet.extend((int(hexdigits[i:(i + 2)], 16) for i in (0, 2, 4)))\n        else:\n            raise ValueError(('wrong length: %r' % hexdigits))\n    except ValueError:\n        return None\n    return find_nearest_color_index(*triplet, color_table=color_table, method=method)", "docstring": "Given a three or six-character hex digit string, return the nearest\ncolor index.\n\nArguments:\nhexdigits:  a three/6 digit hex string, e.g. 'b0b', '123456'\n\nReturns:\nint, None: index, or None on error.", "source": "codesearchnet"}
{"code": "def is_github_task(task):\n    \n    return any((\n        \n        \n        task.get('schedulerId') == 'taskcluster-github',\n        \n        task.get('extra', {}).get('tasks_for', '').startswith('github-'),\n        is_github_url(task.get('metadata', {}).get('source', '')),\n    ))", "docstring": "Determine if a task is related to GitHub.\n\nThis function currently looks into the ``schedulerId``, ``extra.tasks_for``, and\n``metadata.source``.\n\nArgs:\ntask (dict): the task definition to check.\n\nReturns:\nbool: True if a piece of data refers to GitHub", "source": "juraj-google-style"}
{"code": "def inputs(self) -> Mapping[str, Mapping[int, str]]:\n    raise NotImplementedError()", "docstring": "Mapping containing the axis definition of the input tensors to provide to the model\n\nReturns:\nFor each input: its name associated to the axes symbolic name and the axis position within the tensor", "source": "github-repos"}
{"code": "def set_pattern_additional_cycles(self, patternnumber, value):\n        \n        _checkPatternNumber(patternnumber)\n        minimalmodbus._checkInt(value, minvalue=0, maxvalue=99, description='number of additional cycles') \n            \n        address = _calculateRegisterAddress('cycles', patternnumber)\n        self.write_register(address, value, 0)", "docstring": "Set the number of additional cycles for a given pattern.\n\nArgs:\n* patternnumber (integer): 0-7\n* value (integer): 0-99", "source": "juraj-google-style"}
{"code": "def generate_files(generator, output_filenames, max_cases=None, cycle_every_n=1):\n    if outputs_exist(output_filenames):\n        tf.logging.info('Skipping generator because outputs files exists at {}'.format(output_filenames))\n        return\n    tmp_filenames = [(fname + '.incomplete') for fname in output_filenames]\n    num_shards = len(output_filenames)\n    if (num_shards > 0):\n        if ('-train' in output_filenames[0]):\n            tag = 'train'\n        elif ('-dev' in output_filenames[0]):\n            tag = 'eval'\n        else:\n            tag = 'other'\n    writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filenames]\n    (counter, shard) = (0, 0)\n    for case in generator:\n        if (case is None):\n            continue\n        if ((counter % 100000) == 0):\n            tf.logging.info(('Generating case %d.' % counter))\n        counter += 1\n        if (max_cases and (counter > max_cases)):\n            break\n        example = to_example(case)\n        writers[shard].write(example.SerializeToString())\n        if ((counter % cycle_every_n) == 0):\n            shard = ((shard + 1) % num_shards)\n    for writer in writers:\n        writer.close()\n    for (tmp_name, final_name) in zip(tmp_filenames, output_filenames):\n        tf.gfile.Rename(tmp_name, final_name)\n    if (num_shards > 0):\n        if (tag == 'train'):\n            mlperf_log.transformer_print(key=mlperf_log.PREPROC_NUM_TRAIN_EXAMPLES, value=counter)\n        elif (tag == 'eval'):\n            mlperf_log.transformer_print(key=mlperf_log.PREPROC_NUM_EVAL_EXAMPLES, value=counter)\n    tf.logging.info('Generated %s Examples', counter)", "docstring": "Generate cases from a generator and save as TFRecord files.\n\nGenerated cases are transformed to tf.Example protos and saved as TFRecords\nin sharded files named output_dir/output_name-00..N-of-00..M=num_shards.\n\nArgs:\ngenerator: a generator yielding (string -> int/float/str list) dictionaries.\noutput_filenames: List of output file paths.\nmax_cases: maximum number of cases to get from the generator;\nif None (default), we use the generator until StopIteration is raised.\ncycle_every_n: how many cases from the generator to take before\nswitching to the next shard; by default set to 1, switch every case.", "source": "codesearchnet"}
{"code": "def calc_checksum(sentence):\n    if sentence.startswith('$'):\n        sentence = sentence[1:]\n    sentence = sentence.split('*')[0]\n    return reduce(xor, map(ord, sentence))", "docstring": "Calculate a NMEA 0183 checksum for the given sentence.\n\nNMEA checksums are a simple XOR of all the characters in the sentence\nbetween the leading \"$\" symbol, and the \"*\" checksum separator.\n\nArgs:\nsentence (str): NMEA 0183 formatted sentence", "source": "codesearchnet"}
{"code": "def create_gpu_capa_map(match_list, generate_csv=False, filename='compute_capability'):\n    gpu_capa = collections.OrderedDict()\n    include = False\n    gpu = ''\n    cnt = 0\n    mismatch_cnt = 0\n    for match in match_list:\n        if 'Products' in match:\n            if not include:\n                include = True\n            continue\n        elif 'www' in match:\n            include = False\n            break\n        if include:\n            if gpu:\n                if gpu in gpu_capa:\n                    gpu_capa[gpu].append(match)\n                else:\n                    gpu_capa[gpu] = [match]\n                gpu = ''\n                cnt += 1\n                if len(list(gpu_capa.keys())) < cnt:\n                    mismatch_cnt += 1\n                    cnt = len(list(gpu_capa.keys()))\n            else:\n                gpu = match\n    if generate_csv:\n        f_name = filename + '.csv'\n        write_csv_from_dict(f_name, gpu_capa)\n    return gpu_capa", "docstring": "Generates a map between GPU types and corresponding compute capability.\n\nThis method is used for retrieving CUDA compute capability from the web only.\n\nArgs:\nmatch_list: List of all CUDA compute capability detected from the webpage.\ngenerate_csv: Boolean for creating csv file to store results.\nfilename: String that is the name of the csv file (without `.csv` ending).\n\nReturns:\nOrderedDict that lists in the incoming order of all CUDA compute capability\nprovided as `match_list`.", "source": "github-repos"}
{"code": "def pop(self):\n    return self._queue.popleft()", "docstring": "Removes and returns the oldest value from the data window (FIFO).\n\nReturns:\nThe oldest value from the window.", "source": "github-repos"}
{"code": "def get_qa_logit_layer(self) -> nn.Module:\n    if hasattr(self, 'answer_head'):\n        return self.answer_head.logit_fc[-1]", "docstring": "Returns the linear layer that produces question answering logits.\n\nReturns:\n`nn.Module`: A torch module mapping the question answering prediction hidden states or `None` if LXMERT\ndoes not have a visual answering head.", "source": "github-repos"}
{"code": "def bulk_load_docs(es, docs):\n    \n\n    chunk_size = 200\n\n    try:\n        results = elasticsearch.helpers.bulk(es, docs, chunk_size=chunk_size)\n        log.debug(f\"Elasticsearch documents loaded: {results[0]}\")\n\n        \n        if len(results[1]) > 0:\n            log.error(\"Bulk load errors {}\".format(results))\n    except elasticsearch.ElasticsearchException as e:\n        log.error(\"Indexing error: {}\\n\".format(e))", "docstring": "Bulk load docs\n\nArgs:\nes: elasticsearch handle\ndocs: Iterator of doc objects - includes index_name", "source": "juraj-google-style"}
{"code": "def get_parameter_bounds(self, include_frozen=False):\n    if include_frozen:\n        return self.parameter_bounds\n    return list((p for (p, f) in zip(self.parameter_bounds, self.unfrozen_mask) if f))", "docstring": "Get a list of the parameter bounds\n\nArgs:\ninclude_frozen (Optional[bool]): Should the frozen parameters be\nincluded in the returned value? (default: ``False``)", "source": "codesearchnet"}
{"code": "def wait_for_plug_update(self, plug_name, remote_state, timeout_s):\n    plug = self._plugs_by_name.get(plug_name)\n    if (plug is None):\n        raise InvalidPlugError(('Cannot wait on unknown plug \"%s\".' % plug_name))\n    if (not isinstance(plug, FrontendAwareBasePlug)):\n        raise InvalidPlugError(('Cannot wait on a plug %s that is not an subclass of FrontendAwareBasePlug.' % plug_name))\n    (state, update_event) = plug.asdict_with_event()\n    if (state != remote_state):\n        return state\n    if update_event.wait(timeout_s):\n        return plug._asdict()", "docstring": "Wait for a change in the state of a frontend-aware plug.\n\nArgs:\nplug_name: Plug name, e.g. 'openhtf.plugs.user_input.UserInput'.\nremote_state: The last observed state.\ntimeout_s: Number of seconds to wait for an update.\n\nReturns:\nAn updated state, or None if the timeout runs out.\n\nRaises:\nInvalidPlugError: The plug can't be waited on either because it's not in\nuse or it's not a frontend-aware plug.", "source": "codesearchnet"}
{"code": "def generate_index(fn, cols=None, names=None, sep=\" \"):\n    \n    \n    assert cols is not None, \"'cols' was not set\"\n    assert names is not None, \"'names' was not set\"\n    assert len(cols) == len(names)\n\n    \n    bgzip, open_func = get_open_func(fn, return_fmt=True)\n\n    \n    data = pd.read_csv(fn, sep=sep, engine=\"c\", usecols=cols, names=names,\n                       compression=\"gzip\" if bgzip else None)\n\n    \n    f = open_func(fn, \"rb\")\n    data[\"seek\"] = np.fromiter(_seek_generator(f), dtype=np.uint)[:-1]\n    f.close()\n\n    \n    write_index(get_index_fn(fn), data)\n\n    return data", "docstring": "Build a index for the given file.\n\nArgs:\nfn (str): the name of the file.\ncols (list): a list containing column to keep (as int).\nnames (list): the name corresponding to the column to keep (as str).\nsep (str): the field separator.\n\nReturns:\npandas.DataFrame: the index.", "source": "juraj-google-style"}
{"code": "def get_axis_value_discrete(self, axis):\n    if (self.type != EventType.POINTER_AXIS):\n        raise AttributeError(_wrong_meth.format(self.type))\n    return self._libinput.libinput_event_pointer_get_axis_value_discrete(self._handle, axis)", "docstring": "Return the axis value in discrete steps for a given axis event.\n\nHow a value translates into a discrete step depends on the source.\nIf the source is :attr:`~libinput.constant.PointerAxisSource.WHEEL`,\nthe discrete value correspond to the number of physical mouse wheel\nclicks.\n\nIf the source is :attr:`~libinput.constant.PointerAxisSource.CONTINUOUS`\nor :attr:`~libinput.constant.PointerAxisSource.FINGER`, the discrete\nvalue is always 0.\n\nArgs:\naxis (~libinput.constant.PointerAxis): The axis who's value to get.\nReturns:\nfloat: The discrete value for the given event.\nRaises:\nAttributeError", "source": "codesearchnet"}
{"code": "def _DeserializeResponse(self, payload):\n        \n        \n        status_line, payload = payload.split('\\n', 1)\n        _, status, _ = status_line.split(' ', 2)\n\n        \n        parser = email_parser.Parser()\n        msg = parser.parsestr(payload)\n\n        \n        info = dict(msg)\n        info['status'] = status\n\n        \n        content = msg.get_payload()\n\n        return http_wrapper.Response(info, content, self.__batch_url)", "docstring": "Convert string into Response and content.\n\nArgs:\npayload: Header and body string to be deserialized.\n\nReturns:\nA Response object", "source": "juraj-google-style"}
{"code": "def ReadLine(self, file_object):\n    (line, _, self.lines) = self.lines.partition('\\n')\n    if (not line):\n        self.ReadLines(file_object)\n        (line, _, self.lines) = self.lines.partition('\\n')\n    return line", "docstring": "Reads a line.\n\nArgs:\nfile_object (dfvfs.FileIO): file-like object.\n\nReturns:\nstr: line read from the lines buffer.", "source": "codesearchnet"}
{"code": "def is_scheduled(configuration, task={}):\n    days = configuration.recipe.get('setup', {}).get('day', [])\n    hours = [int(h) for h in task.get('hour', configuration.recipe.get('setup', {}).get('hour', []))]\n    if days == [] or configuration.date.strftime('%a') in days:\n        if hours == [] or configuration.hour in hours:\n            return True\n    return False", "docstring": "Wrapper for day_hour_scheduled that returns True if current time zone safe hour is in recipe schedule.\n\nUsed as a helper for any cron job running projects.  Keeping this logic in\nproject\nhelps avoid time zone detection issues and scheduling discrepencies between\nmachines.\n\nArgs:\n* recipe: (Recipe JSON) The JSON of a recipe.\n* task: ( dictionary / JSON ) The specific task being considered for execution.\n\nReturns:\n- True if task is scheduled to run current hour, else False.", "source": "github-repos"}
{"code": "def make_test_function(self):\n    if self.test_function is not None:\n        return self.test_function\n\n    def step_function(model, iterator):\n        \n\n        def run_step(data):\n            outputs = model.test_step(data)\n            with ops.control_dependencies(_minimum_control_deps(outputs)):\n                model._test_counter.assign_add(1)\n            return outputs\n        data = next(iterator)\n        outputs = model.distribute_strategy.run(run_step, args=(data,))\n        outputs = reduce_per_replica(outputs, self.distribute_strategy, reduction='first')\n        return outputs\n    if self._steps_per_execution.numpy().item() == 1:\n\n        def test_function(iterator):\n            \n            return step_function(self, iterator)\n    else:\n\n        def test_function(iterator):\n            \n            for _ in math_ops.range(self._steps_per_execution):\n                outputs = step_function(self, iterator)\n            return outputs\n    if not self.run_eagerly:\n        test_function = def_function.function(test_function, experimental_relax_shapes=True)\n    self.test_function = test_function\n    if self._cluster_coordinator:\n        self.test_function = lambda iterator: self._cluster_coordinator.schedule(test_function, args=(iterator,))\n    return self.test_function", "docstring": "Creates a function that executes one step of evaluation.\n\nThis method can be overridden to support custom evaluation logic.\nThis method is called by `Model.evaluate` and `Model.test_on_batch`.\n\nTypically, this method directly controls `tf.function` and\n`tf.distribute.Strategy` settings, and delegates the actual evaluation\nlogic to `Model.test_step`.\n\nThis function is cached the first time `Model.evaluate` or\n`Model.test_on_batch` is called. The cache is cleared whenever\n`Model.compile` is called.\n\nReturns:\nFunction. The function created by this method should accept a\n`tf.data.Iterator`, and return a `dict` containing values that will\nbe passed to `tf.keras.Callbacks.on_test_batch_end`.", "source": "github-repos"}
{"code": "def __init__(self, key, items):\n        \n        self._key = key\n        sequence = list(items)\n        super(Grouping, self).__init__(sequence)", "docstring": "Create a Grouping with a given key and a collection of members.\n\nArgs:\nkey: The key corresponding to this Grouping\n\nitems: An iterable collection of the members of the group.", "source": "juraj-google-style"}
{"code": "def to_dms(angle, style='dms'):\n    sign = (1 if (angle >= 0) else (- 1))\n    angle = (abs(angle) * 3600)\n    (minutes, seconds) = divmod(angle, 60)\n    (degrees, minutes) = divmod(minutes, 60)\n    if (style == 'dms'):\n        return tuple(((sign * abs(i)) for i in (int(degrees), int(minutes), seconds)))\n    elif (style == 'dm'):\n        return tuple(((sign * abs(i)) for i in (int(degrees), (minutes + (seconds / 60)))))\n    else:\n        raise ValueError(('Unknown style type %r' % style))", "docstring": "Convert decimal angle to degrees, minutes and possibly seconds.\n\nArgs:\nangle (float): Angle to convert\nstyle (str): Return fractional or whole minutes values\n\nReturns:\ntuple of int: Angle converted to degrees, minutes and possibly seconds\n\nRaises:\nValueError: Unknown value for ``style``", "source": "codesearchnet"}
{"code": "def showbox(self, force_rerun=False):\n        \n        log.debug('{}: running box maker...'.format(self.id))\n\n        if not self.sphsel_path:\n            return ValueError('Please run sphere_selector_using_residues')\n\n        boxfile = op.join(self.dock_dir, \"{}_box.pdb\".format(self.id))\n        boxscript = op.join(self.dock_dir, \"{}_box.in\".format(self.id))\n\n        if ssbio.utils.force_rerun(flag=force_rerun, outfile=boxfile):\n            with open(boxscript, \"w\") as f:\n                f.write(\"Y\\n\")\n                f.write(\"0\\n\")\n                f.write(\"{}\\n\".format(op.basename(self.sphsel_path)))\n                f.write(\"1\\n\")\n                f.write(\"{}\".format(op.basename(boxfile)))\n\n            cmd = \"showbox < {}\".format(boxscript)\n            os.chdir(self.dock_dir)\n            os.system(cmd)\n\n        if ssbio.utils.is_non_zero_file(boxfile):\n            self.box_path = boxfile\n            log.debug('{}: successful box creation'.format(self.box_path))\n        else:\n            log.critical('{}: showbox failed to run on selected spheres file'.format(self.sphsel_path))", "docstring": "Create the dummy PDB box around the selected spheres.\n\nArgs:\nforce_rerun (bool): If method should be rerun even if output file exists", "source": "juraj-google-style"}
{"code": "def process_result_value(self, value, dialect):\n    masks = list()\n    if value:\n        for e in enums.CryptographicUsageMask:\n            if (e.value & value):\n                masks.append(e)\n    return masks", "docstring": "Returns a new list of enums.CryptographicUsageMask Enums. This converts\nthe integer value into the list of enums.\n\nArgs:\nvalue(int): The integer value stored in the database that is used\nto create the list of enums.CryptographicUsageMask Enums.\ndialect(string): SQL dialect", "source": "codesearchnet"}
{"code": "def _set_mode(self, discover_mode, connect_mode):\n    payload = struct.pack('<BB', discover_mode, connect_mode)\n    response = self._send_command(6, 1, payload)\n    (result,) = unpack('<H', response.payload)\n    if (result != 0):\n        return (False, {'reason': 'Error code from BLED112 setting mode', 'code': result})\n    return (True, None)", "docstring": "Set the mode of the BLED112, used to enable and disable advertising\n\nTo enable advertising, use 4, 2.\nTo disable advertising use 0, 0.\n\nArgs:\ndiscover_mode (int): The discoverability mode, 0 for off, 4 for on (user data)\nconnect_mode (int): The connectability mode, 0 for of, 2 for undirected connectable", "source": "codesearchnet"}
{"code": "def EnqueueBreakpointUpdate(self, breakpoint):\n    \n    with self._transmission_thread_startup_lock:\n      if self._transmission_thread is None:\n        self._transmission_thread = threading.Thread(\n            target=self._TransmissionThreadProc)\n        self._transmission_thread.name = 'Cloud Debugger transmission thread'\n        self._transmission_thread.daemon = True\n        self._transmission_thread.start()\n\n    self._transmission_queue.append((breakpoint, 0))\n    self._new_updates.set()", "docstring": "Asynchronously updates the specified breakpoint on the backend.\n\nThis function returns immediately. The worker thread is actually doing\nall the work. The worker thread is responsible to retry the transmission\nin case of transient errors.\n\nArgs:\nbreakpoint: breakpoint in either final or non-final state.", "source": "juraj-google-style"}
{"code": "def create_primes(threshold):\n    if (threshold == 2):\n        return [2]\n    elif (threshold < 2):\n        return []\n    numbers = list(range(3, (threshold + 1), 2))\n    root_of_threshold = (threshold ** 0.5)\n    half = int((((threshold + 1) / 2) - 1))\n    idx = 0\n    counter = 3\n    while (counter <= root_of_threshold):\n        if numbers[idx]:\n            idy = int((((counter * counter) - 3) / 2))\n            numbers[idy] = 0\n            while (idy < half):\n                numbers[idy] = 0\n                idy += counter\n        idx += 1\n        counter = ((2 * idx) + 3)\n    return ([2] + [number for number in numbers if number])", "docstring": "Generate prime values using sieve of Eratosthenes method.\n\nArgs:\nthreshold (int):\nThe upper bound for the size of the prime values.\n\nReturns (List[int]):\nAll primes from 2 and up to ``threshold``.", "source": "codesearchnet"}
{"code": "def convert_item_to_command_line_arg(self, action, key, value):\n    args = []\n    if (action is None):\n        command_line_key = self.get_command_line_key_for_unknown_config_file_setting(key)\n    else:\n        command_line_key = action.option_strings[(- 1)]\n    if ((action is not None) and isinstance(action, ACTION_TYPES_THAT_DONT_NEED_A_VALUE)):\n        if (value.lower() in ('true', 'yes', '1')):\n            args.append(command_line_key)\n        elif (value.lower() in ('false', 'no', '0')):\n            pass\n        else:\n            self.error((\"Unexpected value for %s: '%s'. Expecting 'true', 'false', 'yes', 'no', '1' or '0'\" % (key, value)))\n    elif isinstance(value, list):\n        if ((action is None) or isinstance(action, argparse._AppendAction)):\n            for list_elem in value:\n                args.append(command_line_key)\n                args.append(str(list_elem))\n        elif ((isinstance(action, argparse._StoreAction) and (action.nargs in ('+', '*'))) or (isinstance(action.nargs, int) and (action.nargs > 1))):\n            args.append(command_line_key)\n            for list_elem in value:\n                args.append(str(list_elem))\n        else:\n            self.error((\"%s can't be set to a list '%s' unless its action type is changed to 'append' or nargs is set to '*', '+', or > 1\" % (key, value)))\n    elif isinstance(value, str):\n        args.append(command_line_key)\n        args.append(value)\n    else:\n        raise ValueError(('Unexpected value type %s for value: %s' % (type(value), value)))\n    return args", "docstring": "Converts a config file or env var key + value to a list of\ncommandline args to append to the commandline.\n\nArgs:\naction: The argparse Action object for this setting, or None if this\nconfig file setting doesn't correspond to any defined\nconfigargparse arg.\nkey: string (config file key or env var name)\nvalue: parsed value of type string or list", "source": "codesearchnet"}
{"code": "def build_ring_all_reduce(input_tensors, num_workers, num_subchunks, gpu_perm, red_op, un_op=None):\n    if len(input_tensors) < 2:\n        raise ValueError('input_tensors must be length 2 or longer')\n    input_tensors, shape = _flatten_tensors(input_tensors)\n    devices = [t.device for t in input_tensors]\n    pred_by_s_d, rank_by_s_d = _ring_permutations(num_workers, num_subchunks, gpu_perm)\n    chunks_by_dev, pad_len = _build_ring_gather(input_tensors, devices, num_subchunks, pred_by_s_d, rank_by_s_d, red_op)\n    if un_op:\n        chunks_by_dev = _apply_unary_to_chunks(un_op, chunks_by_dev)\n    output_tensors = _build_ring_scatter(pred_by_s_d, rank_by_s_d, chunks_by_dev)\n    if pad_len > 0:\n        output_tensors = _strip_padding(output_tensors, pad_len)\n    if len(shape) != 1:\n        output_tensors = _reshape_tensors(output_tensors, shape)\n    return output_tensors", "docstring": "Construct a subgraph performing a ring-style all-reduce of input_tensors.\n\nArgs:\ninput_tensors: a list of `tf.Tensor` objects, which must all\nhave the same shape and type.\nnum_workers: number of worker tasks spanned by input_tensors.\nnum_subchunks: number of subchunks each device should process in one tick.\ngpu_perm: a list of ints giving a ring-wise rank ordering of GPUs at\neach worker.  All workers must have the same number of\nGPUs with the same rank ordering.  If NVLINK is available, this should\nbe a ring order supported by NVLINK edges.\nred_op: a binary operator for elementwise reduction.\nun_op: an optional unary operator to apply to fully reduced values.\n\nRaises:\nValueError: empty input_tensors or they don't all have same\nsize.\n\nReturns:\na list of `tf.Tensor` identical sum-reductions of input_tensors.", "source": "github-repos"}
{"code": "def close(self, file_des):\n    file_handle = self.filesystem.get_open_file(file_des)\n    file_handle.close()", "docstring": "Close a file descriptor.\n\nArgs:\nfile_des: An integer file descriptor for the file object requested.\n\nRaises:\nOSError: bad file descriptor.\nTypeError: if file descriptor is not an integer.", "source": "codesearchnet"}
{"code": "def delete_case(self, case_id=None, institute_id=None, display_name=None):\n    query = {}\n    if case_id:\n        query['_id'] = case_id\n        LOG.info('Deleting case %s', case_id)\n    else:\n        if (not (institute_id and display_name)):\n            raise ValueError('Have to provide both institute_id and display_name')\n        LOG.info('Deleting case %s institute %s', display_name, institute_id)\n        query['owner'] = institute_id\n        query['display_name'] = display_name\n    result = self.case_collection.delete_one(query)\n    return result", "docstring": "Delete a single case from database\n\nArgs:\ninstitute_id(str)\ncase_id(str)\n\nReturns:\ncase_obj(dict): The case that was deleted", "source": "codesearchnet"}
{"code": "def WriteEventMACBGroup(self, event_macb_group):\n    \n    output_values = self._GetOutputValues(event_macb_group[0])\n\n    timestamp_descriptions = [\n        event.timestamp_desc for event in event_macb_group]\n    output_values[3] = (\n        self._output_mediator.GetMACBRepresentationFromDescriptions(\n            timestamp_descriptions))\n    \n    output_values[6] = '; '.join(timestamp_descriptions)\n\n    self._WriteOutputValues(output_values)", "docstring": "Writes an event MACB group to the output.\n\nArgs:\nevent_macb_group (list[EventObject]): event MACB group.", "source": "juraj-google-style"}
{"code": "def set_napp(self, user, napp, version=None):\n    self.user = user\n    self.napp = napp\n    self.version = (version or 'latest')", "docstring": "Set info about NApp.\n\nArgs:\nuser (str): NApps Server username.\nnapp (str): NApp name.\nversion (str): NApp version.", "source": "codesearchnet"}
{"code": "def __init__(self, filenames, record_bytes, header_bytes=None, footer_bytes=None, buffer_size=None, compression_type=None, name=None):\n    self._filenames = filenames\n    self._record_bytes = ops.convert_to_tensor(record_bytes, dtype=dtypes.int64, name='record_bytes')\n    self._header_bytes = convert.optional_param_to_tensor('header_bytes', header_bytes)\n    self._footer_bytes = convert.optional_param_to_tensor('footer_bytes', footer_bytes)\n    self._buffer_size = convert.optional_param_to_tensor('buffer_size', buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)\n    self._compression_type = convert.optional_param_to_tensor('compression_type', compression_type, argument_default='', argument_dtype=dtypes.string)\n    self._name = name\n    variant_tensor = gen_dataset_ops.fixed_length_record_dataset_v2(self._filenames, self._header_bytes, self._record_bytes, self._footer_bytes, self._buffer_size, self._compression_type, metadata=self._metadata.SerializeToString())\n    super(_FixedLengthRecordDataset, self).__init__(variant_tensor)", "docstring": "Creates a `FixedLengthRecordDataset`.\n\nArgs:\nfilenames: A `tf.string` tensor containing one or more filenames.\nrecord_bytes: A `tf.int64` scalar representing the number of bytes in each\nrecord.\nheader_bytes: (Optional.) A `tf.int64` scalar representing the number of\nbytes to skip at the start of a file.\nfooter_bytes: (Optional.) A `tf.int64` scalar representing the number of\nbytes to ignore at the end of a file.\nbuffer_size: (Optional.) A `tf.int64` scalar representing the number of\nbytes to buffer when reading.\ncompression_type: (Optional.) A `tf.string` scalar evaluating to one of\n`\"\"` (no compression), `\"ZLIB\"`, or `\"GZIP\"`.\nname: (Optional.) A name for the tf.data operation.", "source": "github-repos"}
{"code": "def _assert_at_most_n_true(predicates, n, msg):\n    preds_c = array_ops_stack.stack(predicates, name='preds_c')\n    num_true_conditions = math_ops.reduce_sum(math_ops.cast(preds_c, dtypes.int32), name='num_true_conds')\n    condition = math_ops.less_equal(num_true_conditions, constant_op.constant(n, name='n_true_conds'))\n    preds_names = ', '.join((getattr(p, 'name', '?') for p in predicates))\n    error_msg = ['%s: more than %d conditions (%s) evaluated as True:' % (msg, n, preds_names), preds_c]\n    return control_flow_assert.Assert(condition, data=error_msg, summarize=len(predicates))", "docstring": "Returns an Assert op that checks that at most n predicates are True.\n\nArgs:\npredicates: list of bool scalar tensors.\nn: maximum number of true predicates allowed.\nmsg: Error message.", "source": "github-repos"}
{"code": "def GetEntries(self, parser_mediator, match=None, **unused_kwargs):\n    \n    version = match.get('LastAttemptSystemVersion', 'N/A')\n    pending = match.get('LastUpdatesAvailable', None)\n\n    event_data = plist_event.PlistTimeEventData()\n    event_data.desc = 'Last MacOS {0:s} full update.'.format(version)\n    event_data.key = ''\n    event_data.root = '/'\n\n    datetime_value = match.get('LastFullSuccessfulDate', None)\n    if datetime_value:\n      event = time_events.PythonDatetimeEvent(\n          datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    datetime_value = match.get('LastSuccessfulDate', None)\n    if datetime_value and pending:\n      software = []\n      for update in match.get('RecommendedUpdates', []):\n        identifier = update.get('Identifier', '<IDENTIFIER>')\n        product_key = update.get('Product Key', '<PRODUCT_KEY>')\n\n        software.append('{0:s}({1:s})'.format(identifier, product_key))\n\n      if not software:\n        return\n\n      software = ','.join(software)\n      event_data.desc = (\n          'Last Mac OS {0!s} partially update, pending {1!s}: '\n          '{2:s}.').format(version, pending, software)\n\n      event = time_events.PythonDatetimeEvent(\n          datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts relevant MacOS update entries.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmatch (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.", "source": "juraj-google-style"}
{"code": "def __init__(self, data_file, vocab_data_file):\n    \n    def reading_function(file_name):\n      for root in self.ROOTS:\n        file_path = os.path.join(root, file_name)\n        if os.path.exists(file_path):\n          break\n        file_path = None\n      assert file_path is not None, (\"Couldn't locate %s in %r\" %\n                                     (file_name, self.ROOTS))\n      with open(file_path, mode=\"rb\") as fp:\n        return list(fp.read().decode().replace(\"\\n\", self.CHAR_EOS))\n\n    self._vocab_dict = {}\n    self._inv_vocab_dict = {}\n\n    token_list = reading_function(vocab_data_file)\n    self.vocab_size = 0\n    for token in self.DEFAULT_START_TOKENS + token_list:\n      if token not in self._vocab_dict:\n        self._vocab_dict[token] = self.vocab_size\n        self._inv_vocab_dict[self.vocab_size] = token\n        self.vocab_size += 1\n\n    raw_data = reading_function(data_file)\n    self.flat_data = np.array(self.tokenize(raw_data), dtype=np.int32)\n    self.num_tokens = self.flat_data.shape[0]", "docstring": "Creates a TokenDataSource instance.\n\nArgs:\ndata_file: file object containing text data to be tokenized.\nvocab_data_file: file object containing text data used to initialize\nthe vocabulary.", "source": "juraj-google-style"}
{"code": "def add_edge_bias(x, filter_size):\n    x_shape = common_layers.shape_list(x)\n    if ((filter_size[0] == 1) and (filter_size[1] == 1)):\n        return x\n    a = ((filter_size[0] - 1) \n    b = ((filter_size[1] - 1) \n    padding = [[0, 0], [a, a], [b, b], [0, 0]]\n    x_bias = tf.zeros((x_shape[:(- 1)] + [1]))\n    x = tf.pad(x, padding)\n    x_pad = tf.pad(x_bias, padding, constant_values=1)\n    return tf.concat([x, x_pad], axis=3)", "docstring": "Pad x and concatenates an edge bias across the depth of x.\n\nThe edge bias can be thought of as a binary feature which is unity when\nthe filter is being convolved over an edge and zero otherwise.\n\nArgs:\nx: Input tensor, shape (NHWC)\nfilter_size: filter_size to determine padding.\nReturns:\nx_pad: Input tensor, shape (NHW(c+1))", "source": "codesearchnet"}
{"code": "def Add(self, other):\n    \n    if len(self.data) != len(other.data):\n      raise RuntimeError(\"Can only add series of identical lengths.\")\n    for i in range(len(self.data)):\n      if self.data[i][1] != other.data[i][1]:\n        raise RuntimeError(\"Timestamp mismatch.\")\n      if self.data[i][0] is None and other.data[i][0] is None:\n        continue\n      self.data[i][0] = (self.data[i][0] or 0) + (other.data[i][0] or 0)", "docstring": "Add other to self pointwise.\n\nRequires that both self and other are of the same length, and contain\nidentical timestamps. Typically this means that Normalize has been called\non both with identical time parameters.\n\nArgs:\nother: The sequence to add to self.\n\nRaises:\nRuntimeError: other does not contain the same timestamps as self.", "source": "juraj-google-style"}
{"code": "def MakeCdfFromPmf(pmf, name=None):\n    if (name == None):\n        name = pmf.name\n    return MakeCdfFromItems(pmf.Items(), name)", "docstring": "Makes a CDF from a Pmf object.\n\nArgs:\npmf: Pmf.Pmf object\nname: string name for the data.\n\nReturns:\nCdf object", "source": "codesearchnet"}
{"code": "def save(self, items):\n    rows = []\n    indx = self.indx\n    size = 0\n    tick = s_common.now()\n    for item in items:\n        byts = s_msgpack.en(item)\n        size += len(byts)\n        lkey = s_common.int64en(indx)\n        indx += 1\n        rows.append((lkey, byts))\n    self.slab.putmulti(rows, append=True, db=self.db)\n    took = (s_common.now() - tick)\n    origindx = self.indx\n    self.indx = indx\n    return {'indx': indx, 'size': size, 'count': len(items), 'time': tick, 'took': took}\n    return origindx", "docstring": "Save a series of items to a sequence.\n\nArgs:\nitems (tuple): The series of items to save into the sequence.\n\nReturns:\nThe index of the first item", "source": "codesearchnet"}
{"code": "def emit(self, record):\n    if (record.levelno < logging.getLevelName(self.min_level)):\n        return\n    evt = LogEvent()\n    evt.level = record.levelname\n    evt.levelno = record.levelno\n    evt.timestamp = datetime.fromtimestamp(record.created)\n    evt.message = record.message\n    evt.filename = record.filename\n    evt.lineno = record.lineno\n    evt.module = record.module\n    evt.funcname = record.funcName\n    evt.pathname = record.pathname\n    evt.process_id = record.process\n    if (record.levelno >= 40):\n        evt.stacktrace = traceback.format_exc()\n    try:\n        db.session.add(evt)\n        db.session.commit()\n    except Exception:\n        db.session.rollback()", "docstring": "Persist a record into the database\n\nArgs:\nrecord (`logging.Record`): The logging.Record object to store\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def get_structure_from_prev_run(vasprun, outcar=None, sym_prec=0.1, international_monoclinic=True):\n    structure = vasprun.final_structure\n    site_properties = {}\n    if vasprun.is_spin:\n        if (outcar and outcar.magnetization):\n            site_properties.update({'magmom': [i['tot'] for i in outcar.magnetization]})\n        else:\n            site_properties.update({'magmom': vasprun.parameters['MAGMOM']})\n    if vasprun.parameters.get('LDAU', False):\n        for k in ('LDAUU', 'LDAUJ', 'LDAUL'):\n            vals = vasprun.incar[k]\n            m = {}\n            l = []\n            s = 0\n            for site in structure:\n                if (site.specie.symbol not in m):\n                    m[site.specie.symbol] = vals[s]\n                    s += 1\n                l.append(m[site.specie.symbol])\n            if (len(l) == len(structure)):\n                site_properties.update({k.lower(): l})\n            else:\n                raise ValueError('length of list {} not the same asstructure'.format(l))\n    structure = structure.copy(site_properties=site_properties)\n    if sym_prec:\n        sym_finder = SpacegroupAnalyzer(structure, symprec=sym_prec)\n        new_structure = sym_finder.get_primitive_standard_structure(international_monoclinic=international_monoclinic)\n        vpa_old = (structure.volume / structure.num_sites)\n        vpa_new = (new_structure.volume / new_structure.num_sites)\n        if ((abs((vpa_old - vpa_new)) / vpa_old) > 0.02):\n            raise ValueError('Standardizing cell failed! VPA old: {}, VPA new: {}'.format(vpa_old, vpa_new))\n        sm = StructureMatcher()\n        if (not sm.fit(structure, new_structure)):\n            raise ValueError(\"Standardizing cell failed! Old structure doesn't match new.\")\n        structure = new_structure\n    return structure", "docstring": "Process structure from previous run.\n\nArgs:\nvasprun (Vasprun): Vasprun that contains the final structure\nfrom previous run.\noutcar (Outcar): Outcar that contains the magnetization info from\nprevious run.\nsym_prec (float): Tolerance for symmetry finding for standardization. If\nno standardization is desired, set to 0 or a False.\ninternational_monoclinic (bool): Whether to use international\nconvention (vs Curtarolo) for monoclinic. Defaults True.\n\nReturns:\nReturns the magmom-decorated structure that can be passed to get\nVasp input files, e.g. get_kpoints.", "source": "codesearchnet"}
{"code": "def is_in_path(program):\n    if (sys.version_info.major == 2):\n        path = os.getenv('PATH')\n        if (os.name == 'nt'):\n            path = path.split(';')\n        else:\n            path = path.split(':')\n    else:\n        path = os.get_exec_path()\n    for i in path:\n        if os.path.isdir(i):\n            if (program in os.listdir(i)):\n                return True", "docstring": "Check if a program is in the system ``PATH``.\n\nChecks if a given program is in the user's ``PATH`` or not.\n\nArgs:\nprogram (str): The program to try to find in ``PATH``.\n\nReturns:\nbool: Is the program in ``PATH``?", "source": "codesearchnet"}
{"code": "def _compute_edge_nodes(nodes, degree):\n    (dimension, _) = np.shape(nodes)\n    nodes1 = np.empty((dimension, (degree + 1)), order='F')\n    nodes2 = np.empty((dimension, (degree + 1)), order='F')\n    nodes3 = np.empty((dimension, (degree + 1)), order='F')\n    curr2 = degree\n    curr3 = (- 1)\n    for i in six.moves.xrange((degree + 1)):\n        nodes1[(:, i)] = nodes[(:, i)]\n        nodes2[(:, i)] = nodes[(:, curr2)]\n        nodes3[(:, i)] = nodes[(:, curr3)]\n        curr2 += (degree - i)\n        curr3 -= (i + 2)\n    return (nodes1, nodes2, nodes3)", "docstring": "Compute the nodes of each edges of a surface.\n\n.. note::\n\nThere is also a Fortran implementation of this function, which\nwill be used if it can be built.\n\nArgs:\nnodes (numpy.ndarray): Control point nodes that define the surface.\ndegree (int): The degree of the surface define by ``nodes``.\n\nReturns:\nTuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]: The nodes in\nthe edges of the surface.", "source": "codesearchnet"}
{"code": "def item_status(self, **kwargs):\n        \n        path = self._get_id_path('item_status')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Check to see if a movie id is already added to a list.\n\nArgs:\nmovie_id: The id of the movie.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.AuthEnable = channel.unary_unary(\n        '/etcdserverpb.Auth/AuthEnable',\n        request_serializer=rpc__pb2.AuthEnableRequest.SerializeToString,\n        response_deserializer=rpc__pb2.AuthEnableResponse.FromString,\n        )\n    self.AuthDisable = channel.unary_unary(\n        '/etcdserverpb.Auth/AuthDisable',\n        request_serializer=rpc__pb2.AuthDisableRequest.SerializeToString,\n        response_deserializer=rpc__pb2.AuthDisableResponse.FromString,\n        )\n    self.Authenticate = channel.unary_unary(\n        '/etcdserverpb.Auth/Authenticate',\n        request_serializer=rpc__pb2.AuthenticateRequest.SerializeToString,\n        response_deserializer=rpc__pb2.AuthenticateResponse.FromString,\n        )\n    self.UserAdd = channel.unary_unary(\n        '/etcdserverpb.Auth/UserAdd',\n        request_serializer=rpc__pb2.AuthUserAddRequest.SerializeToString,\n        response_deserializer=rpc__pb2.AuthUserAddResponse.FromString,\n        )\n    self.UserGet = channel.unary_unary(\n        '/etcdserverpb.Auth/UserGet',\n        request_serializer=rpc__pb2.AuthUserGetRequest.SerializeToString,\n        response_deserializer=rpc__pb2.AuthUserGetResponse.FromString,\n        )\n    self.UserList = channel.unary_unary(\n        '/etcdserverpb.Auth/UserList',\n        request_serializer=rpc__pb2.AuthUserListRequest.SerializeToString,\n        response_deserializer=rpc__pb2.AuthUserListResponse.FromString,\n        )\n    self.UserDelete = channel.unary_unary(\n        '/etcdserverpb.Auth/UserDelete',\n        request_serializer=rpc__pb2.AuthUserDeleteRequest.SerializeToString,\n        response_deserializer=rpc__pb2.AuthUserDeleteResponse.FromString,\n        )\n    self.UserChangePassword = channel.unary_unary(\n        '/etcdserverpb.Auth/UserChangePassword',\n        request_serializer=rpc__pb2.AuthUserChangePasswordRequest.SerializeToString,\n        response_deserializer=rpc__pb2.AuthUserChangePasswordResponse.FromString,\n        )\n    self.UserGrantRole = channel.unary_unary(\n        '/etcdserverpb.Auth/UserGrantRole',\n        request_serializer=rpc__pb2.AuthUserGrantRoleRequest.SerializeToString,\n        response_deserializer=rpc__pb2.AuthUserGrantRoleResponse.FromString,\n        )\n    self.UserRevokeRole = channel.unary_unary(\n        '/etcdserverpb.Auth/UserRevokeRole',\n        request_serializer=rpc__pb2.AuthUserRevokeRoleRequest.SerializeToString,\n        response_deserializer=rpc__pb2.AuthUserRevokeRoleResponse.FromString,\n        )\n    self.RoleAdd = channel.unary_unary(\n        '/etcdserverpb.Auth/RoleAdd',\n        request_serializer=rpc__pb2.AuthRoleAddRequest.SerializeToString,\n        response_deserializer=rpc__pb2.AuthRoleAddResponse.FromString,\n        )\n    self.RoleGet = channel.unary_unary(\n        '/etcdserverpb.Auth/RoleGet',\n        request_serializer=rpc__pb2.AuthRoleGetRequest.SerializeToString,\n        response_deserializer=rpc__pb2.AuthRoleGetResponse.FromString,\n        )\n    self.RoleList = channel.unary_unary(\n        '/etcdserverpb.Auth/RoleList',\n        request_serializer=rpc__pb2.AuthRoleListRequest.SerializeToString,\n        response_deserializer=rpc__pb2.AuthRoleListResponse.FromString,\n        )\n    self.RoleDelete = channel.unary_unary(\n        '/etcdserverpb.Auth/RoleDelete',\n        request_serializer=rpc__pb2.AuthRoleDeleteRequest.SerializeToString,\n        response_deserializer=rpc__pb2.AuthRoleDeleteResponse.FromString,\n        )\n    self.RoleGrantPermission = channel.unary_unary(\n        '/etcdserverpb.Auth/RoleGrantPermission',\n        request_serializer=rpc__pb2.AuthRoleGrantPermissionRequest.SerializeToString,\n        response_deserializer=rpc__pb2.AuthRoleGrantPermissionResponse.FromString,\n        )\n    self.RoleRevokePermission = channel.unary_unary(\n        '/etcdserverpb.Auth/RoleRevokePermission',\n        request_serializer=rpc__pb2.AuthRoleRevokePermissionRequest.SerializeToString,\n        response_deserializer=rpc__pb2.AuthRoleRevokePermissionResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def make_train_function(self):\n    if self.train_function is not None:\n        return self.train_function\n\n    def step_function(model, iterator):\n        \n\n        def run_step(data):\n            outputs = model.train_step(data)\n            with ops.control_dependencies(_minimum_control_deps(outputs)):\n                model._train_counter.assign_add(1)\n            return outputs\n        data = next(iterator)\n        outputs = model.distribute_strategy.run(run_step, args=(data,))\n        outputs = reduce_per_replica(outputs, self.distribute_strategy, reduction='first')\n        write_scalar_summaries(outputs, step=model._train_counter)\n        return outputs\n    if self._steps_per_execution.numpy().item() == 1:\n\n        def train_function(iterator):\n            \n            return step_function(self, iterator)\n    else:\n\n        def train_function(iterator):\n            \n            for _ in math_ops.range(self._steps_per_execution):\n                outputs = step_function(self, iterator)\n            return outputs\n    if not self.run_eagerly:\n        train_function = def_function.function(train_function, experimental_relax_shapes=True)\n        self.train_tf_function = train_function\n    self.train_function = train_function\n    if self._cluster_coordinator:\n        self.train_function = lambda iterator: self._cluster_coordinator.schedule(train_function, args=(iterator,))\n    return self.train_function", "docstring": "Creates a function that executes one step of training.\n\nThis method can be overridden to support custom training logic.\nThis method is called by `Model.fit` and `Model.train_on_batch`.\n\nTypically, this method directly controls `tf.function` and\n`tf.distribute.Strategy` settings, and delegates the actual training\nlogic to `Model.train_step`.\n\nThis function is cached the first time `Model.fit` or\n`Model.train_on_batch` is called. The cache is cleared whenever\n`Model.compile` is called.\n\nReturns:\nFunction. The function created by this method should accept a\n`tf.data.Iterator`, and return a `dict` containing values that will\nbe passed to `tf.keras.Callbacks.on_train_batch_end`, such as\n`{'loss': 0.2, 'accuracy': 0.7}`.", "source": "github-repos"}
{"code": "def sendResponse(self, message, UUID, routing_key):\n        \n        self.sendMessage(\n            exchange=self.output_exchange,\n            routing_key=routing_key,\n            message=message,\n            UUID=UUID\n        )", "docstring": "Send `message` to ``self.output_exchange`` with routing key\n``self.output_key``, ``self.content_type`` in ``delivery_mode=2``.\n\nArgs:\nmessage (str): message which will be sent\nUUID: unique identification of message\nrouting_key (str): which routing key to use to send message back", "source": "juraj-google-style"}
{"code": "def start_trial(self, trial, checkpoint=None):\n    self._commit_resources(trial.resources)\n    try:\n        self._start_trial(trial, checkpoint)\n    except Exception as e:\n        logger.exception('Error starting runner for Trial %s', str(trial))\n        error_msg = traceback.format_exc()\n        time.sleep(2)\n        self._stop_trial(trial, error=True, error_msg=error_msg)\n        if isinstance(e, AbortTrialExecution):\n            return\n        try:\n            trial.clear_checkpoint()\n            logger.info('Trying to start runner for Trial %s without checkpoint.', str(trial))\n            self._start_trial(trial)\n        except Exception:\n            logger.exception('Error starting runner for Trial %s, aborting!', str(trial))\n            error_msg = traceback.format_exc()\n            self._stop_trial(trial, error=True, error_msg=error_msg)", "docstring": "Starts the trial.\n\nWill not return resources if trial repeatedly fails on start.\n\nArgs:\ntrial (Trial): Trial to be started.\ncheckpoint (Checkpoint): A Python object or path storing the state\nof trial.", "source": "codesearchnet"}
{"code": "def _select_forward_and_backward_functions(self, args, possible_gradient_type, executing_eagerly):\n    if executing_eagerly:\n        input_tangents = forwardprop_util.pack_tangents(args)\n    else:\n        input_tangents = forwardprop_util.TangentInfo()\n    need_gradients_for_jvps = record.should_record_backprop(input_tangents.tangents)\n    cache_key = (need_gradients_for_jvps, input_tangents.indices)\n    if possible_gradient_type == gradients_util.POSSIBLE_GRADIENT_TYPES_FIRST_ORDER:\n        if input_tangents.indices or executing_eagerly:\n            functions = self._first_order_tape_functions.get(cache_key, None)\n            if functions is None:\n                functions = _FirstOrderTapeGradientFunctions(self._func_graph, self._attrs, self._garbage_collector, forwardprop_input_indices=input_tangents.indices, delayed_rewrite_functions=self._delayed_rewrite_functions, need_gradients_for_jvps=need_gradients_for_jvps)\n                self._first_order_tape_functions[cache_key] = functions\n            return _ForwardBackwardCall(functions, args, input_tangents.tangents, tape_watching=True)\n        else:\n            return _ForwardBackwardCall(self._delayed_rewrite_functions, args, input_tangents.tangents, tape_watching=True)\n    elif possible_gradient_type == gradients_util.POSSIBLE_GRADIENT_TYPES_HIGHER_ORDER:\n        functions = self._higher_order_tape_functions.get(cache_key, None)\n        if functions is None:\n            functions = _HigherOrderTapeGradientFunctions(self._func_graph, self._attrs, self._garbage_collector, forwardprop_input_indices=input_tangents.indices, delayed_rewrite_functions=self._delayed_rewrite_functions, need_gradients_for_jvps=need_gradients_for_jvps)\n            self._higher_order_tape_functions[cache_key] = functions\n        return _ForwardBackwardCall(functions, args, input_tangents.tangents, tape_watching=True)\n    return _ForwardBackwardCall(self._delayed_rewrite_functions, args, input_tangents.tangents, tape_watching=False)", "docstring": "Selects forward and backward functions based on the calling context.\n\nThe forward function computes the \"real\" function outputs, `self._outputs`,\nand any extra values needed by the corresponding backward function.\n\nArgs:\nargs: A flat list of Tensors with all of the inputs to the forward\nfunction (including user-specified and captured inputs).\npossible_gradient_type: One of gradients_util.POSSIBLE_GRADIENT_TYPES_*.\nexecuting_eagerly: Boolean, the value of context.executing_eagerly().\n\nReturns:\nAn object with a `forward` method returning a tuple of (forward_function :\nAtomicFunction, augmented_arguments : List), and a corresponding\n`record` method which takes outputs from the forward function and records\nthe operation. forward_function should be called with augmented_arguments.", "source": "github-repos"}
{"code": "def MessageToRepr(msg, multiline=False, **kwargs):\n    indent = kwargs.get('indent', 0)\n\n    def IndentKwargs(kwargs):\n        kwargs = dict(kwargs)\n        kwargs['indent'] = (kwargs.get('indent', 0) + 4)\n        return kwargs\n    if isinstance(msg, list):\n        s = '['\n        for item in msg:\n            if multiline:\n                s += ('\\n' + (' ' * (indent + 4)))\n            s += (MessageToRepr(item, multiline=multiline, **IndentKwargs(kwargs)) + ',')\n        if multiline:\n            s += ('\\n' + (' ' * indent))\n        s += ']'\n        return s\n    if isinstance(msg, messages.Message):\n        s = (type(msg).__name__ + '(')\n        if (not kwargs.get('no_modules')):\n            s = ((msg.__module__ + '.') + s)\n        names = sorted([field.name for field in msg.all_fields()])\n        for name in names:\n            field = msg.field_by_name(name)\n            if multiline:\n                s += ('\\n' + (' ' * (indent + 4)))\n            value = getattr(msg, field.name)\n            s += (((field.name + '=') + MessageToRepr(value, multiline=multiline, **IndentKwargs(kwargs))) + ',')\n        if multiline:\n            s += ('\\n' + (' ' * indent))\n        s += ')'\n        return s\n    if isinstance(msg, six.string_types):\n        if (kwargs.get('shortstrings') and (len(msg) > 100)):\n            msg = msg[:100]\n    if isinstance(msg, datetime.datetime):\n\n        class SpecialTZInfo(datetime.tzinfo):\n\n            def __init__(self, offset):\n                super(SpecialTZInfo, self).__init__()\n                self.offset = offset\n\n            def __repr__(self):\n                s = (('TimeZoneOffset(' + repr(self.offset)) + ')')\n                if (not kwargs.get('no_modules')):\n                    s = ('apitools.base.protorpclite.util.' + s)\n                return s\n        msg = datetime.datetime(msg.year, msg.month, msg.day, msg.hour, msg.minute, msg.second, msg.microsecond, SpecialTZInfo(msg.tzinfo.utcoffset(0)))\n    return repr(msg)", "docstring": "Return a repr-style string for a protorpc message.\n\nprotorpc.Message.__repr__ does not return anything that could be considered\npython code. Adding this function lets us print a protorpc message in such\na way that it could be pasted into code later, and used to compare against\nother things.\n\nArgs:\nmsg: protorpc.Message, the message to be repr'd.\nmultiline: bool, True if the returned string should have each field\nassignment on its own line.\n**kwargs: {str:str}, Additional flags for how to format the string.\n\nKnown **kwargs:\nshortstrings: bool, True if all string values should be\ntruncated at 100 characters, since when mocking the contents\ntypically don't matter except for IDs, and IDs are usually\nless than 100 characters.\nno_modules: bool, True if the long module name should not be printed with\neach type.\n\nReturns:\nstr, A string of valid python (assuming the right imports have been made)\nthat recreates the message passed into this function.", "source": "codesearchnet"}
{"code": "def _make_spec_file(self):\n    if issubclass(BdistRPMCommand, object):\n        spec_file = super(BdistRPMCommand, self)._make_spec_file()\n    else:\n        spec_file = bdist_rpm._make_spec_file(self)\n    if (sys.version_info[0] < 3):\n        python_package = 'python'\n    else:\n        python_package = 'python3'\n    description = []\n    summary = ''\n    in_description = False\n    python_spec_file = []\n    for line in spec_file:\n        if line.startswith('Summary: '):\n            summary = line\n        elif line.startswith('BuildRequires: '):\n            line = 'BuildRequires: {0:s}-setuptools'.format(python_package)\n        elif line.startswith('Requires: '):\n            if (python_package == 'python3'):\n                line = line.replace('python', 'python3')\n        elif line.startswith('%description'):\n            in_description = True\n        elif line.startswith('%files'):\n            line = '%files -f INSTALLED_FILES -n {0:s}-%{{name}}'.format(python_package)\n        elif line.startswith('%prep'):\n            in_description = False\n            python_spec_file.append('%package -n {0:s}-%{{name}}'.format(python_package))\n            python_spec_file.append('{0:s}'.format(summary))\n            python_spec_file.append('')\n            python_spec_file.append('%description -n {0:s}-%{{name}}'.format(python_package))\n            python_spec_file.extend(description)\n        elif in_description:\n            if ((not description) and (not line)):\n                continue\n            description.append(line)\n        python_spec_file.append(line)\n    return python_spec_file", "docstring": "Generates the text of an RPM spec file.\n\nReturns:\nA list of strings containing the lines of text.", "source": "codesearchnet"}
{"code": "def observation_spec(self):\n    obs_spec = named_array.NamedDict({'action_result': (0,), 'alerts': (0,), 'available_actions': (0,), 'build_queue': (0, len(UnitLayer)), 'cargo': (0, len(UnitLayer)), 'cargo_slots_available': (1,), 'control_groups': (10, 2), 'game_loop': (1,), 'last_actions': (0,), 'multi_select': (0, len(UnitLayer)), 'player': (len(Player),), 'score_cumulative': (len(ScoreCumulative),), 'score_by_category': (len(ScoreByCategory), len(ScoreCategories)), 'score_by_vital': (len(ScoreByVital), len(ScoreVitals)), 'single_select': (0, len(UnitLayer))})\n    aif = self._agent_interface_format\n    if aif.feature_dimensions:\n        obs_spec['feature_screen'] = (len(SCREEN_FEATURES), aif.feature_dimensions.screen.y, aif.feature_dimensions.screen.x)\n        obs_spec['feature_minimap'] = (len(MINIMAP_FEATURES), aif.feature_dimensions.minimap.y, aif.feature_dimensions.minimap.x)\n    if aif.rgb_dimensions:\n        obs_spec['rgb_screen'] = (aif.rgb_dimensions.screen.y, aif.rgb_dimensions.screen.x, 3)\n        obs_spec['rgb_minimap'] = (aif.rgb_dimensions.minimap.y, aif.rgb_dimensions.minimap.x, 3)\n    if aif.use_feature_units:\n        obs_spec['feature_units'] = (0, len(FeatureUnit))\n    if aif.use_raw_units:\n        obs_spec['raw_units'] = (0, len(FeatureUnit))\n    if aif.use_unit_counts:\n        obs_spec['unit_counts'] = (0, len(UnitCounts))\n    if aif.use_camera_position:\n        obs_spec['camera_position'] = (2,)\n    return obs_spec", "docstring": "The observation spec for the SC2 environment.\n\nIt's worth noting that the image-like observations are in y,x/row,column\norder which is different than the actions which are in x,y order. This is\ndue to conflicting conventions, and to facilitate printing of the images.\n\nReturns:\nThe dict of observation names to their tensor shapes. Shapes with a 0 can\nvary in length, for example the number of valid actions depends on which\nunits you have selected.", "source": "codesearchnet"}
{"code": "def plot_probabilities_histogram(Y_p, title=None):\n    \n    if Y_p.ndim > 1:\n        msg = (\n            f\"Arg Y_p should be a 1-dimensional np.ndarray, not of shape \"\n            f\"{Y_p.shape}.\"\n        )\n        raise ValueError(msg)\n    plt.hist(Y_p, bins=20)\n    plt.xlim((0, 1.025))\n    plt.xlabel(\"Probability\")\n    plt.ylabel(\"\n    if isinstance(title, str):\n        plt.title(title)\n    plt.show()", "docstring": "Plot a histogram from a numpy array of probabilities\n\nArgs:\nY_p: An [n] or [n, 1] np.ndarray of probabilities (floats in [0,1])", "source": "juraj-google-style"}
{"code": "def get_accounts_for_service(cls, service_type):\n        \n        return [\n            a for a in cls.get_accounts().values()\n            if a.service_type == service_type\n        ]", "docstring": "Get a list of accounts for a given music service.\n\nArgs:\nservice_type (str): The service_type to use.\n\nReturns:\nlist: A list of `Account` instances.", "source": "juraj-google-style"}
{"code": "def discovery(self, logfile=None, tracefile=None):\n        \n        self._enable_logging(logfile=logfile, tracefile=tracefile)\n\n        self.log(\"'discovery' method is deprecated. Please 'connect' with force_discovery=True.\")\n        self.log(\"Device discovery process started\")\n        self.connect(logfile=logfile, force_discovery=True, tracefile=tracefile)\n        self.disconnect()", "docstring": "Discover the device details.\n\nThis method discover several device attributes.\n\nArgs:\nlogfile (file): Optional file descriptor for session logging. The file must be open for write.\nThe session is logged only if ``log_session=True`` was passed to the constructor.\nIt the parameter is not passed then the default *session.log* file is created in `log_dir`.", "source": "juraj-google-style"}
{"code": "def stage_tc_create_tag(self, tag, resource):\n    tag_resource = resource.tags(self.tcex.safetag(tag))\n    tag_resource.http_method = 'POST'\n    t_response = tag_resource.request()\n    if (t_response.get('status') != 'Success'):\n        self.log.warning('[tcex] Failed adding tag \"{}\" ({}).'.format(tag, t_response.get('response').text))", "docstring": "Add a tag to a resource.\n\nArgs:\ntag (str): The tag to be added to the resource.\nresource (obj): An instance of tcex resource class.", "source": "codesearchnet"}
{"code": "def get_legacy_output_classes(dataset_or_iterator):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_classes(), get_structure(dataset_or_iterator))", "docstring": "Returns the output classes for elements of the input dataset / iterator.\n\nArgs:\ndataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.\n\nReturns:\nA (nested) structure of Python `type` objects matching the structure of the\ndataset / iterator elements and specifying the class of the individual\ncomponents.\n\n@compatibility(TF2)\nThis is a legacy API for inspecting the type signature of dataset elements. In\nTF 2, you should use the `tf.data.Dataset.element_spec` attribute instead.\n@end_compatibility", "source": "github-repos"}
{"code": "def register_binary_elementwise_assert_api(func):\n    _BINARY_ELEMENTWISE_ASSERT_APIS.append(func)\n    for args, handler in _ELEMENTWISE_API_HANDLERS.items():\n        if len(args) == 3 and args[2] is _ASSERT_API_TAG:\n            _add_dispatch_for_binary_elementwise_api(func, args[0], args[1], handler)\n    return func", "docstring": "Decorator that registers a TensorFlow op as a binary elementwise assert API.\n\nDifferent from `dispatch_for_binary_elementwise_apis`, this decorator is used\nfor assert apis, such as assert_equal, assert_none_equal, etc, which return\nNone in eager mode and an op in graph mode.\n\nArgs:\nfunc: The function that implements the binary elementwise assert API.\n\nReturns:\n`func`", "source": "github-repos"}
{"code": "def global_env_valid(env):\n  \n  if env not in EFConfig.ACCOUNT_SCOPED_ENVS:\n    raise ValueError(\"Invalid global env: {}; global envs are: {}\".format(env, EFConfig.ACCOUNT_SCOPED_ENVS))\n  return True", "docstring": "Given an env, determine if it's a valid \"global\" or \"mgmt\" env as listed in EFConfig\nArgs:\nenv: the env to check\nReturns:\nTrue if the env is a valid global env in EFConfig\nRaises:\nValueError with message if the env is not valid", "source": "juraj-google-style"}
{"code": "def _bit_list_to_bytes(bit_list):\n    \n    num_bits = len(bit_list)\n    byte_vals = bytearray()\n    for start in six.moves.xrange(0, num_bits, 8):\n        curr_bits = bit_list[start:start + 8]\n        char_val = sum(\n            val * digit for val, digit in six.moves.zip(_POW2, curr_bits))\n        byte_vals.append(char_val)\n    return bytes(byte_vals)", "docstring": "Converts an iterable of 1s and 0s to bytes.\n\nCombines the list 8 at a time, treating each group of 8 bits\nas a single byte.\n\nArgs:\nbit_list (Sequence): Sequence of 1s and 0s.\n\nReturns:\nbytes: The decoded bytes.", "source": "juraj-google-style"}
{"code": "def get_energy_buckingham(structure, gulp_cmd='gulp', keywords=('optimise', 'conp', 'qok'), valence_dict=None):\n    gio = GulpIO()\n    gc = GulpCaller(gulp_cmd)\n    gin = gio.buckingham_input(structure, keywords, valence_dict=valence_dict)\n    gout = gc.run(gin)\n    return gio.get_energy(gout)", "docstring": "Compute the energy of a structure using Buckingham potential.\n\nArgs:\nstructure: pymatgen.core.structure.Structure\ngulp_cmd: GULP command if not in standard place\nkeywords: GULP first line keywords\nvalence_dict: {El: valence}. Needed if the structure is not charge\nneutral.", "source": "codesearchnet"}
{"code": "def get_dataset(self, dsid, dsinfo):\n        \n        data = self[dsinfo.get('file_key', dsid.name)]\n        data.attrs.update(dsinfo)\n\n        data.attrs[\"platform_name\"] = self['/attr/satellite_name']\n        data.attrs[\"sensor\"] = self['/attr/instrument_name']\n\n        return data", "docstring": "Get dataset function\n\nArgs:\ndsid: Dataset ID\nparam2: Dataset Information\n\nReturns:\nDask DataArray: Data", "source": "juraj-google-style"}
{"code": "def is_evenly_distributed_thresholds(thresholds):\n    num_thresholds = len(thresholds)\n    if num_thresholds < 3:\n        return False\n    even_thresholds = np.arange(num_thresholds, dtype=np.float32) / (num_thresholds - 1)\n    return np.allclose(thresholds, even_thresholds, atol=backend.epsilon())", "docstring": "Check if the thresholds list is evenly distributed.\n\nWe could leverage evenly distributed thresholds to use less memory when\ncalculate metrcis like AUC where each individual threshold need to be\nevaluated.\n\nArgs:\nthresholds: A python list or tuple, or 1D numpy array whose value is ranged\nin [0, 1].\n\nReturns:\nboolean, whether the values in the inputs are evenly distributed.", "source": "github-repos"}
{"code": "def __init__(self, config, start=True):\n    if config.dispatcher_address is None:\n        raise ValueError('Must specify a `dispatcher_address` in the `config` passed to `WorkerServer`.')\n    if isinstance(config, service_config_pb2.WorkerConfig):\n        config_proto = config\n    else:\n        config_proto = service_config_pb2.WorkerConfig(dispatcher_address=config.dispatcher_address, worker_address=config.worker_address, port=config.port, protocol=config.protocol, heartbeat_interval_ms=config.heartbeat_interval_ms, dispatcher_timeout_ms=config.dispatcher_timeout_ms, data_transfer_protocol=config.data_transfer_protocol, data_transfer_address=config.data_transfer_address)\n    self._server = _pywrap_server_lib.TF_DATA_NewWorkerServer(config_proto.SerializeToString())\n    if start:\n        self._server.start()", "docstring": "Creates a new worker server.\n\nArgs:\nconfig: A `tf.data.experimental.service.WorkerConfig` configuration.\nstart: (Optional.) Boolean, indicating whether to start the server after\ncreating it. Defaults to True.", "source": "github-repos"}
{"code": "def from_symmop(cls, symmop, time_reversal):\n        \n        magsymmop = cls(symmop.affine_matrix, time_reversal, symmop.tol)\n        return magsymmop", "docstring": "Initialize a MagSymmOp from a SymmOp and time reversal operator.\n\nArgs:\nsymmop (SymmOp): SymmOp\ntime_reversal (int): Time reversal operator, +1 or -1.\n\nReturns:\nMagSymmOp object", "source": "juraj-google-style"}
{"code": "def _ring_2d(m, n):\n  \n  if m == 1:\n    return [(0, i) for i in range(n)]\n  if n == 1:\n    return [(i, 0) for i in range(m)]\n  if m % 2 != 0:\n    tf.logging.warning(\"Odd dimension\")\n    return [(i % m, i \n  ret = [(0, 0)]\n  for i in range(m \n    for j in range(1, n):\n      ret.append((2 * i, j))\n    for j in range(n-1, 0, -1):\n      ret.append((2 * i + 1, j))\n  for i in range(m-1, 0, -1):\n    ret.append((i, 0))\n  return ret", "docstring": "Ring-order of a mxn mesh.\n\nArgs:\nm: an integer\nn: an integer\nReturns:\na list of mxn pairs", "source": "juraj-google-style"}
{"code": "def _check_for_fail_message(self, transport, exc_info, timeout):\n    try:\n        transport.read_message(timeout)\n    except usb_exceptions.CommonUsbError:\n        if (sys.exc_info()[0] is usb_exceptions.AdbRemoteError):\n            raise\n    raise_with_traceback(exc_info[0](exc_info[1]), traceback=exc_info[2])", "docstring": "Check for a 'FAIL' message from transport.\n\nThis method always raises, if 'FAIL' was read, it will raise an\nAdbRemoteError with the message, otherwise it will raise based on\nexc_info, which should be a tuple as per sys.exc_info().\n\nArgs:\ntransport: Transport from which to read for a 'FAIL' message.\nexc_info: Exception info to raise if no 'FAIL' is read.\ntimeout: Timeout to use for the read operation.\n\nRaises:\nAdbRemoteError: If a 'FAIL' is read, otherwise raises exc_info.", "source": "codesearchnet"}
{"code": "def create_dummy_class(klass, dependency):\n    \n    assert not building_rtfd()\n\n    class _DummyMetaClass(type):\n        \n        def __getattr__(_, __):\n            raise AttributeError(\"Cannot import '{}', therefore '{}' is not available\".format(dependency, klass))\n\n    @six.add_metaclass(_DummyMetaClass)\n    class _Dummy(object):\n        \n        def __init__(self, *args, **kwargs):\n            raise ImportError(\"Cannot import '{}', therefore '{}' is not available\".format(dependency, klass))\n\n    return _Dummy", "docstring": "When a dependency of a class is not available, create a dummy class which throws ImportError when used.\n\nArgs:\nklass (str): name of the class.\ndependency (str): name of the dependency.\n\nReturns:\nclass: a class object", "source": "juraj-google-style"}
{"code": "def _filter_out_metaclasses(bases, ctx):\n    non_meta = []\n    meta = None\n    for base in bases:\n        with_metaclass = False\n        for b in base.data:\n            if isinstance(b, metaclass.WithMetaclassInstance):\n                with_metaclass = True\n                if not meta:\n                    meta = b.cls.to_variable(ctx.root_node)\n                non_meta.extend(b.bases)\n        if not with_metaclass:\n            non_meta.append(base)\n    return (meta, non_meta)", "docstring": "Process the temporary classes created by six.with_metaclass.\n\nsix.with_metaclass constructs an anonymous class holding a metaclass and a\nlist of base classes; if we find instances in `bases`, store the first\nmetaclass we find and remove all metaclasses from `bases`.\n\nArgs:\nbases: The list of base classes for the class being constructed.\nctx: The current context.\n\nReturns:\nA tuple of (metaclass, base classes)", "source": "github-repos"}
{"code": "def get_min_muO2(self, min_voltage=None, max_voltage=None):\n        \n        data = []\n        for pair in self._select_in_voltage_range(min_voltage, max_voltage):\n            if pair.muO2_discharge is not None:\n                data.extend([d['chempot'] for d in pair.muO2_discharge])\n            if pair.muO2_charge is not None:\n                data.extend([d['chempot'] for d in pair.muO2_discharge])\n        return min(data) if len(data) > 0 else None", "docstring": "Minimum critical oxygen chemical potential along path.\n\nArgs:\nmin_voltage: The minimum allowable voltage for a given step\nmax_voltage: The maximum allowable voltage allowable for a given\nstep\n\nReturns:\nMinimum critical oxygen chemical of all compounds along the\ninsertion path (a subset of the path can be chosen by the optional\narguments).", "source": "juraj-google-style"}
{"code": "def grad_dot(dy, x1, x2):\n    if (len(numpy.shape(x1)) == 1):\n        dy = numpy.atleast_2d(dy)\n    elif (len(numpy.shape(x2)) == 1):\n        dy = numpy.transpose(numpy.atleast_2d(dy))\n        x2 = numpy.transpose(numpy.atleast_2d(x2))\n    x2_t = numpy.transpose(numpy.atleast_2d(numpy.sum(x2, axis=tuple(numpy.arange((numpy.ndim(x2) - 2))))))\n    dy_x2 = numpy.sum(dy, axis=tuple(((- numpy.arange((numpy.ndim(x2) - 2))) - 2)))\n    return numpy.reshape(numpy.dot(dy_x2, x2_t), numpy.shape(x1))", "docstring": "Gradient of NumPy dot product w.r.t. to the left hand side.\n\nArgs:\ndy: The gradient with respect to the output.\nx1: The left hand side of the `numpy.dot` function.\nx2: The right hand side\n\nReturns:\nThe gradient with respect to `x1` i.e. `x2.dot(dy.T)` with all the\nbroadcasting involved.", "source": "codesearchnet"}
{"code": "def print_stack_info(self):\n    try:\n        rest_api_id = None\n        deployment_found = False\n        response = self._cf_client.describe_stack_resources(StackName=self._stack_name)\n        print('\\nThe following resources were created:')\n        rows = []\n        for resource in response['StackResources']:\n            if (resource['ResourceType'] == 'AWS::ApiGateway::RestApi'):\n                rest_api_id = resource['PhysicalResourceId']\n            elif (resource['ResourceType'] == 'AWS::ApiGateway::Deployment'):\n                deployment_found = True\n            row = []\n            row.append(resource['ResourceType'])\n            row.append(resource['LogicalResourceId'])\n            row.append(resource['PhysicalResourceId'])\n            rows.append(row)\n            \"\\n                print('\\t{}\\t{}\\t{}'.format(\\n                        resource['ResourceType'],\\n                        resource['LogicalResourceId'],\\n                        resource['PhysicalResourceId']\\n                    )\\n                )\\n                \"\n        print(tabulate(rows, headers=['Resource Type', 'Logical ID', 'Physical ID']))\n        if (rest_api_id and deployment_found):\n            url = 'https:\n            print('\\nThe deployed service can be found at this URL:')\n            print('\\t{}\\n'.format(url))\n        return response\n    except Exception as wtf:\n        print(wtf)\n        return None", "docstring": "List resources from the given stack\n\nArgs:\nNone\n\nReturns:\nA dictionary filled resources or None if things went sideways", "source": "codesearchnet"}
{"code": "def solid_angle(center, coords):\n    \n    o = np.array(center)\n    r = [np.array(c) - o for c in coords]\n    r.append(r[0])\n    n = [np.cross(r[i + 1], r[i]) for i in range(len(r) - 1)]\n    n.append(np.cross(r[1], r[0]))\n    vals = []\n    for i in range(len(n) - 1):\n        v = -np.dot(n[i], n[i + 1]) \\\n            / (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1]))\n        vals.append(acos(abs_cap(v)))\n    phi = sum(vals)\n    return phi + (3 - len(r)) * pi", "docstring": "Helper method to calculate the solid angle of a set of coords from the\ncenter.\n\nArgs:\ncenter (3x1 array): Center to measure solid angle from.\ncoords (Nx3 array): List of coords to determine solid angle.\n\nReturns:\nThe solid angle.", "source": "juraj-google-style"}
{"code": "def capture_widget(widget, path=None):\n    \n    if use_qt5:\n        pixmap = widget.grab()\n    else:\n        pixmap = QtGui.QPixmap.grabWidget(widget)\n\n    if path:\n        pixmap.save(path)\n\n    else:\n        image_buffer = QtCore.QBuffer()\n        image_buffer.open(QtCore.QIODevice.ReadWrite)\n\n        pixmap.save(image_buffer, \"PNG\")\n\n        return image_buffer.data().data()", "docstring": "Grab an image of a Qt widget\n\nArgs:\nwidget: The Qt Widget to capture\npath (optional): The path to save to. If not provided - will return image data.\n\nReturns:\nIf a path is provided, the image will be saved to it.\nIf not, the PNG buffer will be returned.", "source": "juraj-google-style"}
{"code": "def analyse(self, path_and_filename, pattern):\n        \n        with open(path_and_filename) as handle:\n            content = handle.read()\n            loc = content.count('\\n') + 1\n            com = 0\n            for match in re.findall(pattern, content, re.DOTALL):\n                com += match.count('\\n') + 1\n\n            return max(0, loc - com), com", "docstring": "Find out lines of code and lines of comments.\n\nArgs:\npath_and_filename (str): path and filename to parse  for loc and com.\npattern (str): regex to search for line commens and block comments\n\nReturns:\nint, int: loc and com for given file.", "source": "juraj-google-style"}
{"code": "def apply_instance_data(designspace, include_filenames=None, Font=defcon.Font):\n    from fontTools.designspaceLib import DesignSpaceDocument\n    from os.path import normcase, normpath\n    if hasattr(designspace, '__fspath__'):\n        designspace = designspace.__fspath__()\n    if isinstance(designspace, basestring):\n        designspace = DesignSpaceDocument.fromfile(designspace)\n    basedir = os.path.dirname(designspace.path)\n    instance_ufos = []\n    if (include_filenames is not None):\n        include_filenames = {normcase(normpath(p)) for p in include_filenames}\n    for designspace_instance in designspace.instances:\n        fname = designspace_instance.filename\n        assert (fname is not None), ('instance %r missing required filename' % getattr(designspace_instance, 'name', designspace_instance))\n        if (include_filenames is not None):\n            fname = normcase(normpath(fname))\n            if (fname not in include_filenames):\n                continue\n        logger.debug('Applying instance data to %s', fname)\n        ufo = Font(normpath(os.path.join(basedir, fname)))\n        set_weight_class(ufo, designspace, designspace_instance)\n        set_width_class(ufo, designspace, designspace_instance)\n        glyphs_instance = InstanceDescriptorAsGSInstance(designspace_instance)\n        to_ufo_custom_params(None, ufo, glyphs_instance)\n        ufo.save()\n        instance_ufos.append(ufo)\n    return instance_ufos", "docstring": "Open UFO instances referenced by designspace, apply Glyphs instance\ndata if present, re-save UFOs and return updated UFO Font objects.\n\nArgs:\ndesignspace: DesignSpaceDocument object or path (str or PathLike) to\na designspace file.\ninclude_filenames: optional set of instance filenames (relative to\nthe designspace path) to be included. By default all instaces are\nprocessed.\nFont: the class used to load the UFO (default: defcon.Font).\nReturns:\nList of opened and updated instance UFOs.", "source": "codesearchnet"}
{"code": "def vasp_version_from_outcar( filename='OUTCAR' ):\n    \n    with open( filename ) as f:\n        line = f.readline().strip()\n    return line", "docstring": "Returns the first line from a VASP OUTCAR file, to get the VASP source version string.\n\nArgs:\nfilename (Str, optional): OUTCAR filename. Defaults to 'OUTCAR'.\n\nReturns:\n(Str): The first line read from the OUTCAR file.", "source": "juraj-google-style"}
{"code": "def _create_moving_sequence(image, pad_lefts, total_padding):\n    with tf.name_scope('moving_sequence'):\n\n        def get_padded_image(args):\n            (pad_left,) = args\n            pad_right = (total_padding - pad_left)\n            padding = tf.stack([pad_left, pad_right], axis=(- 1))\n            z = tf.zeros((1, 2), dtype=pad_left.dtype)\n            padding = tf.concat([padding, z], axis=0)\n            return tf.pad(image, padding)\n        padded_images = tf.map_fn(get_padded_image, [pad_lefts], dtype=tf.uint8, infer_shape=False, back_prop=False)\n    return padded_images", "docstring": "Create a moving image sequence from the given image a left padding values.\n\nArgs:\nimage: [in_h, in_w, n_channels] uint8 array\npad_lefts: [sequence_length, 2] int32 array of left padding values\ntotal_padding: tensor of padding values, (pad_h, pad_w)\n\nReturns:\n[sequence_length, out_h, out_w, n_channels] uint8 image sequence, where\nout_h = in_h + pad_h, out_w = in_w + out_w", "source": "codesearchnet"}
{"code": "def refresh(self, **kwargs):\n        \n        if self._id_attr:\n            path = '%s/%s' % (self.manager.path, self.id)\n        else:\n            path = self.manager.path\n        server_data = self.manager.gitlab.http_get(path, **kwargs)\n        self._update_attrs(server_data)", "docstring": "Refresh a single object from server.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nReturns None (updates the object)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the server cannot perform the request", "source": "juraj-google-style"}
{"code": "def format(sql, args=None):\n    resolved_vars = {}\n    code = []\n    SqlStatement._find_recursive_dependencies(sql, args, code=code, resolved_vars=resolved_vars)\n    parts = []\n    for (escape, placeholder, _, literal) in SqlStatement._get_tokens(sql):\n        if escape:\n            parts.append('$')\n        elif placeholder:\n            variable = placeholder[1:]\n            try:\n                value = resolved_vars[variable]\n            except KeyError as e:\n                raise Exception(('Invalid sql. Unable to substitute $%s.' % e.args[0]))\n            if isinstance(value, types.ModuleType):\n                value = _utils.get_default_query_from_module(value)\n            if isinstance(value, SqlStatement):\n                sql = value.format(value._sql, resolved_vars)\n                value = ('(%s)' % sql)\n            elif ('_repr_sql_' in dir(value)):\n                value = value._repr_sql_()\n            elif isinstance(value, basestring):\n                value = SqlStatement._escape_string(value)\n            elif (isinstance(value, list) or isinstance(value, tuple)):\n                if isinstance(value, tuple):\n                    value = list(value)\n                expansion = '('\n                for v in value:\n                    if (len(expansion) > 1):\n                        expansion += ', '\n                    if isinstance(v, basestring):\n                        expansion += SqlStatement._escape_string(v)\n                    else:\n                        expansion += str(v)\n                expansion += ')'\n                value = expansion\n            else:\n                value = str(value)\n            parts.append(value)\n        elif literal:\n            parts.append(literal)\n    expanded = ''.join(parts)\n    return expanded", "docstring": "Resolve variable references in a query within an environment.\n\nThis computes and resolves the transitive dependencies in the query and raises an\nexception if that fails due to either undefined or circular references.\n\nArgs:\nsql: query to format.\nargs: a dictionary of values to use in variable expansion.\n\nReturns:\nThe resolved SQL text with variables expanded.\n\nRaises:\nException on failure.", "source": "codesearchnet"}
{"code": "def validate(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame:\n        \n        return pd.concat([\n            self._validate_input(table, failed_only=failed_only),\n            self._validate_output(table, failed_only=failed_only),\n        ]).fillna(True)", "docstring": "Return a dataframe of validation results for the appropriate series vs the vector of validators.\n\nArgs:\ntable (pd.DataFrame): A dataframe on which to apply validation logic.\nfailed_only (bool): If ``True``: return only the indexes that failed to validate.", "source": "juraj-google-style"}
{"code": "def vector_projection(v1, v2):\n    \n    return scalar_projection(v1, v2) * v2 / np.linalg.norm(v2)", "docstring": "compute the vector projection of v1 upon v2\n\nArgs:\nv1, v2: iterable\nindices 0, 1, 2 corresponding to cartesian coordinates\n\nReturns:\n3-vector of the projection of point p onto the direction of v", "source": "juraj-google-style"}
{"code": "def get_group(self, uuid=None):\n        \n        if uuid is None:\n            uuid = self.uuid\n        group_data = self.get('group', params={'uuid': uuid})\n        return group_data", "docstring": "Get group data based on uuid.\n\nArgs:\nuuid (str): optional uuid. defaults to self.cuuid\n\nRaises:\nPyLmodUnexpectedData: No data was returned.\nrequests.RequestException: Exception connection error\n\nReturns:\ndict: group json", "source": "juraj-google-style"}
{"code": "def piola_kirchoff_1(self, def_grad):\n    if (not self.is_symmetric):\n        raise ValueError('The stress tensor is not symmetric,                              PK stress is based on a symmetric stress tensor.')\n    def_grad = SquareTensor(def_grad)\n    return (def_grad.det * np.dot(self, def_grad.inv.trans))", "docstring": "calculates the first Piola-Kirchoff stress\n\nArgs:\ndef_grad (3x3 array-like): deformation gradient tensor", "source": "codesearchnet"}
{"code": "def Delete(self, request, global_params=None):\n    config = self.GetMethodConfig('Delete')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Deletes a `BuildTrigger` by its project ID and trigger ID. This API is experimental.\n\nArgs:\nrequest: (CloudbuildProjectsTriggersDeleteRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Empty) The response message.", "source": "github-repos"}
{"code": "def _maybe_add_main_op(self, main_op):\n    if main_op is None:\n        return\n    if not isinstance(main_op, ops.Operation):\n        raise TypeError(f'Expected {main_op} to be an Operation but got type {type(main_op)} instead.')\n    for init_op_key in (constants.MAIN_OP_KEY, constants.LEGACY_INIT_OP_KEY):\n        if ops.get_collection(init_op_key):\n            raise ValueError(f'Graph already contains one or more main ops under the collection {init_op_key}.')\n    ops.add_to_collection(constants.MAIN_OP_KEY, main_op)", "docstring": "Adds main op to the SavedModel.\n\nArgs:\nmain_op: Main op to run as part of graph initialization. If None, no main\nop will be added to the graph.\n\nRaises:\nTypeError: If the main op is provided but is not of type `Operation`.\nValueError: if the Graph already contains an init op.", "source": "github-repos"}
{"code": "def log_batch(self, log_data):\n    url = uri_join(self.base_url, 'log')\n    attachments = []\n    for log_item in log_data:\n        log_item['item_id'] = self.stack[(- 1)]\n        attachment = log_item.get('attachment', None)\n        if ('attachment' in log_item):\n            del log_item['attachment']\n        if attachment:\n            if (not isinstance(attachment, collections.Mapping)):\n                attachment = {'data': attachment}\n            name = attachment.get('name', str(uuid.uuid4()))\n            log_item['file'] = {'name': name}\n            attachments.append(('file', (name, attachment['data'], attachment.get('mime', 'application/octet-stream'))))\n    files = [('json_request_part', (None, json.dumps(log_data), 'application/json'))]\n    files.extend(attachments)\n    from reportportal_client import POST_LOGBATCH_RETRY_COUNT\n    for i in range(POST_LOGBATCH_RETRY_COUNT):\n        try:\n            r = self.session.post(url=url, files=files, verify=self.verify_ssl)\n        except KeyError:\n            if (i < (POST_LOGBATCH_RETRY_COUNT - 1)):\n                continue\n            else:\n                raise\n        break\n    logger.debug('log_batch - Stack: %s', self.stack)\n    logger.debug('log_batch response: %s', r.text)\n    return _get_data(r)", "docstring": "Logs batch of messages with attachment.\n\nArgs:\nlog_data: list of log records.\nlog record is a dict of;\ntime, message, level, attachment\nattachment is a dict of:\nname: name of attachment\ndata: fileobj or content\nmime: content type for attachment", "source": "codesearchnet"}
{"code": "def _add_validator(fv, validator_instance):\n    for flag_name in validator_instance.get_flags_names():\n        fv[flag_name].validators.append(validator_instance)", "docstring": "Register new flags validator to be checked.\n\nArgs:\nfv: flags.FlagValues, the FlagValues instance to add the validator.\nvalidator_instance: validators.Validator, the validator to add.\nRaises:\nKeyError: Raised when validators work with a non-existing flag.", "source": "codesearchnet"}
{"code": "def create_transcripts_xml(video_id, video_el, resource_fs, static_dir):\n    video_transcripts = VideoTranscript.objects.filter(video__edx_video_id=video_id).order_by('language_code')\n    if video_transcripts.exists():\n        transcripts_el = SubElement(video_el, 'transcripts')\n    transcript_files_map = {}\n    for video_transcript in video_transcripts:\n        language_code = video_transcript.language_code\n        file_format = video_transcript.file_format\n        try:\n            transcript_filename = create_transcript_file(video_id=video_id, language_code=language_code, file_format=file_format, resource_fs=resource_fs.delegate_fs(), static_dir=combine(u'course', static_dir))\n            transcript_files_map[language_code] = transcript_filename\n        except TranscriptsGenerationException:\n            logger.exception('[VAL] Error while generating \"%s\" transcript for video[\"%s\"].', language_code, video_id)\n            continue\n        SubElement(transcripts_el, 'transcript', {'language_code': language_code, 'file_format': Transcript.SRT, 'provider': video_transcript.provider})\n    return dict(xml=video_el, transcripts=transcript_files_map)", "docstring": "Creates xml for transcripts.\nFor each transcript element, an associated transcript file is also created in course OLX.\n\nArguments:\nvideo_id (str): Video id of the video.\nvideo_el (Element): lxml Element object\nstatic_dir (str): The Directory to store transcript file.\nresource_fs (SubFS): The file system to store transcripts.\n\nReturns:\nlxml Element object with transcripts information", "source": "codesearchnet"}
{"code": "def _exec_one_test_with_retry(self, test_name, test_method, max_count):\n\n    def should_retry(record):\n        return record.result in [records.TestResultEnums.TEST_RESULT_FAIL, records.TestResultEnums.TEST_RESULT_ERROR]\n    previous_record = self.exec_one_test(test_name, test_method)\n    if not should_retry(previous_record):\n        return\n    for i in range(max_count - 1):\n        retry_name = f'{test_name}_retry_{i + 1}'\n        new_record = records.TestResultRecord(retry_name, self.TAG)\n        new_record.retry_parent = previous_record\n        new_record.parent = (previous_record, records.TestParentType.RETRY)\n        previous_record = self.exec_one_test(retry_name, test_method, new_record)\n        if not should_retry(previous_record):\n            break", "docstring": "Executes one test and retry the test if needed.\n\nRepeatedly execute a test case until it passes or the maximum count of\niteration has been reached.\n\nArgs:\ntest_name: string, Name of the test.\ntest_method: function, The test method to execute.\nmax_count: int, the maximum number of iterations to execute the test for.", "source": "github-repos"}
{"code": "def scatter_min(self, sparse_delta, use_locking=False, name=None):\n    raise NotImplementedError", "docstring": "Updates this variable with the min of `tf.IndexedSlices` and itself.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to use as an argument of min with this\nvariable.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nThe updated variable.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"}
{"code": "def __init__(self, fsapi, filename, line_prepend='', prepend_timestamp=False):\n        \n        self._fsapi = fsapi\n        self._filename = filename\n        if line_prepend:\n            line_prepend += ' '\n        self._line_prepend = line_prepend\n        self._prepend_timestamp = prepend_timestamp\n        self._line_buffer = LineBuffer()", "docstring": "Constructor.\n\nArgs:\nfsapi: api.FileStreamApi instance\nfilename: Name of the file this stream is pushed to.\nline_prepend: string to prepend to every line for this stream.\nprepend_timestamp: If true a timestamp will be prepended to each line\n(after line_prepend).", "source": "juraj-google-style"}
{"code": "def estimate_blocktime(self, oldest: int = 256) -> float:\n        \n        last_block_number = self.block_number()\n        \n        if last_block_number < 1:\n            return 15\n        \n        if last_block_number < oldest:\n            interval = (last_block_number - 1) or 1\n        else:\n            interval = last_block_number - oldest\n        assert interval > 0\n        last_timestamp = self.get_block_header(last_block_number)['timestamp']\n        first_timestamp = self.get_block_header(last_block_number - interval)['timestamp']\n        delta = last_timestamp - first_timestamp\n        return delta / interval", "docstring": "Calculate a blocktime estimate based on some past blocks.\nArgs:\noldest: delta in block numbers to go back.\nReturn:\naverage block time in seconds", "source": "juraj-google-style"}
{"code": "def EnableNetworkInterfaces(\n      self, interfaces, logger, dhclient_script=None):\n    \n    interfaces_to_up = [i for i in interfaces if i != 'eth0']\n    if interfaces_to_up:\n      logger.info('Enabling the Ethernet interfaces %s.', interfaces_to_up)\n      self._Dhcpcd(interfaces_to_up, logger)", "docstring": "Enable the list of network interfaces.\n\nArgs:\ninterfaces: list of string, the output device names to enable.\nlogger: logger object, used to write to SysLog and serial port.\ndhclient_script: string, the path to a dhclient script used by dhclient.", "source": "juraj-google-style"}
{"code": "async def change_url(self, url: str, description: str = None):\n        \n        await self._change(url=url, description=description)", "docstring": "change the url of that attachment\n\n|methcoro|\n\nArgs:\nurl: url you want to change\ndescription: *optional* description for your attachment\n\nRaises:\nValueError: url must not be None\nAPIException", "source": "juraj-google-style"}
{"code": "def _ParseHTTPHeaders(self, header_data, offset, display_name):\n    header_string = header_data.decode('ascii', errors='replace')\n    try:\n        http_header_start = header_string.index('request-method')\n    except ValueError:\n        logger.debug('No request method in header: \"{0:s}\"'.format(header_string))\n        return (None, None)\n    http_headers = header_string[http_header_start:]\n    header_parts = http_headers.split('\\x00')\n    request_method = header_parts[1]\n    if (request_method not in self._REQUEST_METHODS):\n        logger.debug(\"[{0:s}] {1:s}:{2:d}: Unknown HTTP method '{3:s}'. Response headers: '{4:s}'\".format(self.NAME, display_name, offset, request_method, header_string))\n    try:\n        response_head_start = http_headers.index('response-head')\n    except ValueError:\n        logger.debug('No response head in header: \"{0:s}\"'.format(header_string))\n        return (request_method, None)\n    response_head = http_headers[response_head_start:]\n    response_head_parts = response_head.split('\\x00')\n    response_head_text = response_head_parts[1]\n    response_head_text_parts = response_head_text.split('\\r\\n')\n    response_code = response_head_text_parts[0]\n    if (not response_code.startswith('HTTP')):\n        logger.debug(\"[{0:s}] {1:s}:{2:d}: Could not determine HTTP response code. Response headers: '{3:s}'.\".format(self.NAME, display_name, offset, header_string))\n    return (request_method, response_code)", "docstring": "Extract relevant information from HTTP header.\n\nArgs:\nheader_data (bytes): HTTP header data.\noffset (int): offset of the cache record, relative to the start of\nthe Firefox cache file.\ndisplay_name (str): display name of the Firefox cache file.\n\nReturns:\ntuple: containing:\n\nstr: HTTP request method or None if the value cannot be extracted.\nstr: HTTP response code or None if the value cannot be extracted.", "source": "codesearchnet"}
{"code": "def getZernike(self, index):\n        \n        if index not in list(self._dictCache.keys()):\n            self._dictCache[index]= self._polar(index, self._rhoMap,\n                                                self._thetaMap)\n        return self._dictCache[index]", "docstring": "getZernike\n\nRetrieve a map representing the index-th Zernike polynomial\n\nArgs:\nindex (int): The index of Zernike map to be generated,\nfollowing Noll 1976 ordering.\n\nReturns:\nnp.array: A map representing the index-th Zernike polynomial", "source": "juraj-google-style"}
{"code": "def parse_author(cls, marc):\n    name = None\n    code = None\n    linked_forms = None\n    is_corporation = None\n    record = None\n    if marc['100a']:\n        name = _first_or_none(marc['100a'])\n        code = _first_or_none(marc['1007'])\n        is_corporation = False\n        record = marc.datafields['100'][0]\n    elif marc['110a']:\n        name = _first_or_none(marc['110a'])\n        code = _first_or_none(marc['1107'])\n        linked_forms = marc['410a2 ']\n        is_corporation = True\n        record = marc.datafields['110'][0]\n    else:\n        return None\n    linked_forms = marc['410a2 ']\n    type_descriptor = ['osoba', 'organizace']\n    alt_name = ('%s [%s]' % (name, type_descriptor[is_corporation]))\n    if linked_forms:\n        alt_name += ((' (' + ', '.join(linked_forms)) + ')')\n    return cls(name=name, code=code, linked_forms=linked_forms, is_corporation=is_corporation, record=record, alt_name=alt_name)", "docstring": "Parse author from `marc` data.\n\nArgs:\nmarc (obj): :class:`.MARCXMLRecord` instance. See module\n:mod:`.marcxml_parser` for details.\n\nReturns:\nobj: :class:`Author`.", "source": "codesearchnet"}
{"code": "def fit(self, sents, **kwargs):\n    tokens = list(itertools.chain.from_iterable(sents))\n    counter = Counter(tokens)\n    self.vocab = self.build_vocab(counter, **kwargs)", "docstring": "Builds a vocabulary object based on the tokens in the input.\n\nArgs:\nsents: A list of lists of tokens (representing sentences)\n\nVocab kwargs include:\nmax_size\nmin_freq\nspecials\nunk_init", "source": "codesearchnet"}
{"code": "def delete(self):\n    config = self.get()\n    if (not config):\n        return True\n    command = 'no router ospf {}'.format(config['ospf_process_id'])\n    return self.configure(command)", "docstring": "Removes the entire ospf process from the running configuration\n\nArgs:\nNone\nReturns:\nbool: True if the command completed succssfully", "source": "codesearchnet"}
{"code": "def _get_section(name, source):\n    pattern = re.compile('^([^\\n]*{name}[^\\n]*\\n?(?:[ \\t].*?(?:\\n|$))*)'.format(name=name), (re.IGNORECASE | re.MULTILINE))\n    usage = None\n    for section in pattern.findall(source):\n        usage = _merge_section(usage, section.strip())\n    return usage", "docstring": "Extract the named section from the source.\n\nArgs:\nname: The name of the section to extract (e.g. \"Usage\").\nsource: The usage string to parse.\n\nReturns:\nA string containing only the requested section. If the section appears\nmultiple times, each instance will be merged into a single section.", "source": "codesearchnet"}
{"code": "def download(url, file=None):\n    import urllib.request\n    import shutil\n    if isinstance(file, str):\n        file = open(file, 'wb')\n    try:\n        with urllib.request.urlopen(url) as response:\n            if file:\n                shutil.copyfileobj(response, file)\n            else:\n                return response.read()\n    finally:\n        if file:\n            file.close()", "docstring": "Pass file as a filename, open file object, or None to return the request bytes\n\nArgs:\nurl (str): URL of file to download\nfile (Union[str, io, None]): One of the following:\n- Filename of output file\n- File opened in binary write mode\n- None: Return raw bytes instead\n\nReturns:\nUnion[bytes, None]: Bytes of file if file is None", "source": "codesearchnet"}
{"code": "def GetValueByName(self, name):\n    \n    pyregf_value = self._pyregf_key.get_value_by_name(name)\n    if not pyregf_value:\n      return None\n\n    return REGFWinRegistryValue(pyregf_value)", "docstring": "Retrieves a value by name.\n\nValue names are not unique and pyregf provides first match for the value.\n\nArgs:\nname (str): name of the value or an empty string for the default value.\n\nReturns:\nWinRegistryValue: Windows Registry value if a corresponding value was\nfound or None if not.", "source": "juraj-google-style"}
{"code": "def hdg60(msg):\n    \n    d = hex2bin(data(msg))\n\n    if d[0] == '0':\n        return None\n\n    sign = int(d[1])    \n    value = bin2int(d[2:12])\n\n    if sign:\n        value = value - 1024\n\n    hdg = value * 90 / 512.0  \n\n    \n    if hdg < 0:\n        hdg = 360 + hdg\n\n    return round(hdg, 3)", "docstring": "Megnetic heading of aircraft\n\nArgs:\nmsg (String): 28 bytes hexadecimal message (BDS60) string\n\nReturns:\nfloat: heading in degrees to megnetic north (from 0 to 360)", "source": "juraj-google-style"}
{"code": "def power(self, n):\n    if ((not isinstance(n, (int, np.integer))) or (n < 1)):\n        raise QiskitError('Can only power with positive integer powers.')\n    if (self._input_dim != self._output_dim):\n        raise QiskitError('Can only power with input_dim = output_dim.')\n    ret = self.copy()\n    for _ in range(1, n):\n        ret = ret.compose(self)\n    return ret", "docstring": "Return the compose of a operator with itself n times.\n\nArgs:\nn (int): the number of times to compose with self (n>0).\n\nReturns:\nBaseOperator: the n-times composed operator.\n\nRaises:\nQiskitError: if the input and output dimensions of the operator\nare not equal, or the power is not a positive integer.", "source": "codesearchnet"}
{"code": "def get_values(js_dict, value='value'):\n    values = js_dict[value]\n    if (type(values) is list):\n        if ((type(values[0]) is not dict) or tuple):\n            return values\n    values = {int(key): value for (key, value) in values.items()}\n    if js_dict.get('size'):\n        max_val = np.prod(np.array(js_dict['size']))\n    else:\n        max_val = np.prod(np.array(js_dict['dimension']['size']))\n    vals = (max_val * [None])\n    for (key, value) in values.items():\n        vals[key] = value\n    values = vals\n    return values", "docstring": "Get values from input data.\n\nArgs:\njs_dict (dict): dictionary containing dataset data and metadata.\nvalue (string, optional): name of the value column. Defaults to 'value'.\n\nReturns:\nvalues (list): list of dataset values.", "source": "codesearchnet"}
{"code": "def match_criterion(self, tag):\n    return ((tag.name == self.reference_tag_name) and (tag.attrs.get('kind', '') == self.reference_tag_kind))", "docstring": "Override. Determine if a tag has the desired name and kind attribute\nvalue.\n\nArgs:\ntag: A BeautifulSoup Tag.\n\nReturns:\nTrue if tag has the desired name and kind, otherwise False.", "source": "codesearchnet"}
{"code": "def create_border(video, color='blue', border_percent=2):\n    if (video.shape[(- 1)] != 3):\n        return video\n    color_to_axis = {'blue': 2, 'red': 0, 'green': 1}\n    axis = color_to_axis[color]\n    (_, _, height, width, _) = video.shape\n    border_height = np.ceil(((border_percent * height) / 100.0)).astype(np.int)\n    border_width = np.ceil(((border_percent * width) / 100.0)).astype(np.int)\n    video[(:, :, :border_height, :, axis)] = 255\n    video[(:, :, (- border_height):, :, axis)] = 255\n    video[(:, :, :, :border_width, axis)] = 255\n    video[(:, :, :, (- border_width):, axis)] = 255\n    return video", "docstring": "Creates a border around each frame to differentiate input and target.\n\nArgs:\nvideo: 5-D NumPy array.\ncolor: string, \"blue\", \"red\" or \"green\".\nborder_percent: Percentarge of the frame covered by the border.\nReturns:\nvideo: 5-D NumPy array.", "source": "codesearchnet"}
{"code": "def get_version():\n    if all([VERSION, UPDATED, any([isinstance(UPDATED, date), isinstance(UPDATED, datetime)])]):\n        return FORMAT_STRING.format(**{'version': VERSION, 'updated': UPDATED})\n    elif VERSION:\n        return VERSION\n    elif UPDATED:\n        return (localize(UPDATED) if any([isinstance(UPDATED, date), isinstance(UPDATED, datetime)]) else '')\n    else:\n        return ''", "docstring": "Return formatted version string.\n\nReturns:\nstr: string with project version or empty string.", "source": "codesearchnet"}
{"code": "def ExamineEvent(self, mediator, event):\n    if (event.data_type not in self._DATATYPES):\n        return\n    url = getattr(event, 'url', None)\n    if (url is None):\n        return\n    parsed_url = urlparse.urlparse(url)\n    domain = getattr(parsed_url, 'netloc', None)\n    if (domain in self._domains):\n        return\n    self._domains.append(domain)", "docstring": "Analyzes an event and extracts domains from it.\n\nWe only evaluate straightforward web history events, not visits which can\nbe inferred by TypedURLs, cookies or other means.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between\nanalysis plugins and other components, such as storage and dfvfs.\nevent (EventObject): event to examine.", "source": "codesearchnet"}
{"code": "def Items(self, key):\n    with self._mutex:\n        if (key not in self._buckets):\n            raise KeyError(('Key %s was not found in Reservoir' % key))\n        bucket = self._buckets[key]\n    return bucket.Items()", "docstring": "Return items associated with given key.\n\nArgs:\nkey: The key for which we are finding associated items.\n\nRaises:\nKeyError: If the key is not found in the reservoir.\n\nReturns:\n[list, of, items] associated with that key.", "source": "codesearchnet"}
{"code": "def _ParseDataObject(self, file_object, file_offset):\n    data_object_map = self._GetDataTypeMap('systemd_journal_data_object')\n    try:\n        (data_object, _) = self._ReadStructureFromFileObject(file_object, file_offset, data_object_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse data object at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))\n    if (data_object.object_type != self._OBJECT_TYPE_DATA):\n        raise errors.ParseError('Unsupported object type: {0:d}.'.format(data_object.object_type))\n    if (data_object.object_flags not in (0, self._OBJECT_COMPRESSED_FLAG_XZ, self._OBJECT_COMPRESSED_FLAG_LZ4)):\n        raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format(data_object.object_flags))\n    data_size = (data_object.data_size - 64)\n    data = file_object.read(data_size)\n    if (data_object.object_flags & self._OBJECT_COMPRESSED_FLAG_XZ):\n        data = lzma.decompress(data)\n    elif (data_object.object_flags & self._OBJECT_COMPRESSED_FLAG_LZ4):\n        uncompressed_size_map = self._GetDataTypeMap('uint32le')\n        try:\n            uncompressed_size = self._ReadStructureFromByteStream(data, (file_offset + 64), uncompressed_size_map)\n        except (ValueError, errors.ParseError) as exception:\n            raise errors.ParseError('Unable to parse LZ4 uncompressed size at offset: 0x{0:08x} with error: {1!s}'.format((file_offset + 64), exception))\n        data = lz4.block.decompress(data[8:], uncompressed_size=uncompressed_size)\n    return data", "docstring": "Parses a data object.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object.\nfile_offset (int): offset of the data object relative to the start\nof the file-like object.\n\nReturns:\nbytes: data.\n\nRaises:\nParseError: if the data object cannot be parsed.", "source": "codesearchnet"}
{"code": "def SetTimeZone(self, time_zone):\n    try:\n        self._time_zone = pytz.timezone(time_zone)\n    except (AttributeError, pytz.UnknownTimeZoneError):\n        raise ValueError('Unsupported timezone: {0!s}'.format(time_zone))", "docstring": "Sets the time zone.\n\nArgs:\ntime_zone (str): time zone.\n\nRaises:\nValueError: if the timezone is not supported.", "source": "codesearchnet"}
{"code": "def _segment_reduce(values, index, segment_reduce_fn, name):\n    flat_index = flatten(index)\n    vector_shape = tf.shape(values)[index.indices.shape.rank:]\n    flattened_shape = tf.concat([[-1], vector_shape], axis=0)\n    flat_values = tf.reshape(values, flattened_shape)\n    segment_means = segment_reduce_fn(data=flat_values, segment_ids=flat_index.indices, num_segments=flat_index.num_segments)\n    new_shape = tf.concat([index.batch_shape(), [index.num_segments], vector_shape], axis=0)\n    output_values = tf.reshape(segment_means, new_shape)\n    output_index = range_index_map(index.batch_shape(), index.num_segments)\n    return (output_values, output_index)", "docstring": "Applies a segment reduction segment-wise.\n\nArgs:\nvalues (`tf.Tensor`):\nTensor with segment values.\nindex (`IndexMap`):\nIndexMap.\nsegment_reduce_fn (`str`):\nName for the reduce operation. One of \"sum\", \"mean\", \"max\" or \"min\".\nname (`str`):\nName for the operation. Currently not used\n\nReturns:\n(`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments).", "source": "github-repos"}
{"code": "def SetEnvironmentVariable(self, name, value):\n    \n    if isinstance(value, py2to3.STRING_TYPES):\n      value = self._PathStripPrefix(value)\n\n    if value is not None:\n      self._environment_variables[name.upper()] = value", "docstring": "Sets an environment variable in the Windows path helper.\n\nArgs:\nname (str): name of the environment variable without enclosing\n%-characters, e.g. SystemRoot as in %SystemRoot%.\nvalue (str): value of the environment variable.", "source": "juraj-google-style"}
{"code": "def delete_object(self, object_name):\n\n    def delete_fn(weights_dict, source_name, target_name=None):\n        weights_dict.pop(source_name)\n    self._edit_object(delete_fn, object_name)", "docstring": "Removes an object from the file (e.g. a layer).\n\nArgs:\nobject_name: String, name or path of the\nobject to delete (e.g. `\"dense_2\"` or\n`\"layers/dense_2\"`).", "source": "github-repos"}
{"code": "def list_refs(profile, ref_type=None):\n    resource = '/refs'\n    if ref_type:\n        resource += ('/' + ref_type)\n    data = api.get_request(profile, resource)\n    result = [prepare(x) for x in data]\n    return result", "docstring": "List all refs.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nref_type\nThe type of ref you want. For heads, it's ``heads``. For tags,\nit's ``tags``. That sort of thing. If you don't specify a type,\nall refs are returned.\n\nReturns:\nA list of dicts with data about each ref.", "source": "codesearchnet"}
{"code": "def fetch_ensembl_exons(build='37'):\n    \n    LOG.info(\"Fetching ensembl exons build %s ...\", build)\n    if build == '37':\n        url = 'http:\n    else:\n        url = 'http:\n    \n    dataset_name = 'hsapiens_gene_ensembl'\n    \n    dataset = pybiomart.Dataset(name=dataset_name, host=url)\n    \n    attributes = [\n        'chromosome_name',\n        'ensembl_gene_id',\n        'ensembl_transcript_id',\n        'ensembl_exon_id',\n        'exon_chrom_start',\n        'exon_chrom_end',\n        '5_utr_start',\n        '5_utr_end',\n        '3_utr_start',\n        '3_utr_end',\n        'strand',\n        'rank'\n    ]\n    \n    filters = {\n        'chromosome_name': CHROMOSOMES,\n    }\n    \n    result = dataset.query(\n        attributes = attributes,\n        filters = filters\n    )\n    \n    return result", "docstring": "Fetch the ensembl genes\n\nArgs:\nbuild(str): ['37', '38']", "source": "juraj-google-style"}
{"code": "def get(self, key, default) -> Union[Uniform, UniformBlock, Subroutine, Attribute, Varying]:\n        \n\n        return self._members.get(key, default)", "docstring": "Returns a Uniform, UniformBlock, Subroutine, Attribute or Varying.\n\nArgs:\ndefault: This is the value to be returned in case key does not exist.\n\nReturns:\n:py:class:`Uniform`, :py:class:`UniformBlock`, :py:class:`Subroutine`,\n:py:class:`Attribute` or :py:class:`Varying`", "source": "juraj-google-style"}
{"code": "def trace(self, data, callback=None):\n    if (self._push_channel is None):\n        return\n    self._push_channel.trace(data, callback=callback)", "docstring": "Trace data asynchronously.\n\nIf no one is listening for traced data, it will be dropped\notherwise it will be queued for sending.\n\nArgs:\ndata (bytearray, string): Unstructured data to trace to any\nconnected client.\ncallback (callable): Optional callback to get notified when\nthis data is actually sent.", "source": "codesearchnet"}
{"code": "def copartition(self, axis, other, how_to_join, sort, force_repartition=False):\n    if isinstance(other, type(self)):\n        other = [other]\n    index_obj = ([o.index for o in other] if (axis == 0) else [o.columns for o in other])\n    joined_index = self._join_index_objects((axis ^ 1), index_obj, how_to_join, sort=sort)\n    left_old_idx = (self.index if (axis == 0) else self.columns)\n    right_old_idxes = index_obj\n    reindexed_self = self.data\n    reindexed_other_list = []\n\n    def compute_reindex(old_idx):\n        'Create a function based on the old index and axis.\\n\\n            Args:\\n                old_idx: The old index/columns\\n\\n            Returns:\\n                A function that will be run in each partition.\\n            '\n\n        def reindex_partition(df):\n            if (axis == 0):\n                df.index = old_idx\n                new_df = df.reindex(index=joined_index)\n                new_df.index = pandas.RangeIndex(len(new_df.index))\n            else:\n                df.columns = old_idx\n                new_df = df.reindex(columns=joined_index)\n                new_df.columns = pandas.RangeIndex(len(new_df.columns))\n            return new_df\n        return reindex_partition\n    for i in range(len(other)):\n        if ((i != 0) or (left_old_idx.equals(joined_index) and (not force_repartition))):\n            reindex_left = None\n        else:\n            reindex_left = self._prepare_method(compute_reindex(left_old_idx))\n        if (right_old_idxes[i].equals(joined_index) and (not force_repartition)):\n            reindex_right = None\n        else:\n            reindex_right = other[i]._prepare_method(compute_reindex(right_old_idxes[i]))\n        (reindexed_self, reindexed_other) = reindexed_self.copartition_datasets(axis, other[i].data, reindex_left, reindex_right)\n        reindexed_other_list.append(reindexed_other)\n    return (reindexed_self, reindexed_other_list, joined_index)", "docstring": "Copartition two QueryCompiler objects.\n\nArgs:\naxis: The axis to copartition along.\nother: The other Query Compiler(s) to copartition against.\nhow_to_join: How to manage joining the index object (\"left\", \"right\", etc.)\nsort: Whether or not to sort the joined index.\nforce_repartition: Whether or not to force the repartitioning. By default,\nthis method will skip repartitioning if it is possible. This is because\nreindexing is extremely inefficient. Because this method is used to\n`join` or `append`, it is vital that the internal indices match.\n\nReturns:\nA tuple (left query compiler, right query compiler list, joined index).", "source": "codesearchnet"}
{"code": "def duplicate_module(module_file: Union[str, os.PathLike], old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns, dest_file: Optional[str]=None, add_copied_from: bool=True, attrs_to_remove: Optional[List[str]]=None):\n    if dest_file is None:\n        dest_file = str(module_file).replace(old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased)\n    with open(module_file, 'r', encoding='utf-8') as f:\n        content = f.read()\n    content = re.sub('\n    objects = parse_module_content(content)\n    new_objects = []\n    for obj in objects:\n        special_pattern = False\n        for pattern, attr in SPECIAL_PATTERNS.items():\n            if pattern in obj:\n                obj = obj.replace(getattr(old_model_patterns, attr), getattr(new_model_patterns, attr))\n                new_objects.append(obj)\n                special_pattern = True\n                break\n        if special_pattern:\n            continue\n        old_obj = obj\n        obj, replacement = replace_model_patterns(obj, old_model_patterns, new_model_patterns)\n        has_copied_from = re.search('^\n        if add_copied_from and (not has_copied_from) and (_re_class_func.search(obj) is not None) and (len(replacement) > 0):\n            module_name = get_module_from_file(module_file)\n            old_object_name = _re_class_func.search(old_obj).groups()[0]\n            obj = add_content_to_text(obj, f'\n        obj = re.sub('\\n[ ]+\n        new_objects.append(obj)\n    content = '\\n'.join(new_objects)\n    if attrs_to_remove is not None:\n        for attr in attrs_to_remove:\n            content = remove_attributes(content, target_attr=attr)\n    with open(dest_file, 'w', encoding='utf-8') as f:\n        f.write(content)", "docstring": "Create a new module from an existing one and adapting all function and classes names from old patterns to new ones.\n\nArgs:\nmodule_file (`str` or `os.PathLike`): Path to the module to duplicate.\nold_model_patterns (`ModelPatterns`): The patterns for the old model.\nnew_model_patterns (`ModelPatterns`): The patterns for the new model.\ndest_file (`str` or `os.PathLike`, *optional*): Path to the new module.\nadd_copied_from (`bool`, *optional*, defaults to `True`):\nWhether or not to add `# Copied from` statements in the duplicated module.", "source": "github-repos"}
{"code": "def fit(self, X):\n    self.constant_value = self._get_constant_value(X)\n    if (self.constant_value is None):\n        self.model = scipy.stats.gaussian_kde(X)\n    else:\n        self._replace_constant_methods()\n    self.fitted = True", "docstring": "Fit Kernel density estimation to an list of values.\n\nArgs:\nX: 1-d `np.ndarray` or `pd.Series` or `list` datapoints to be estimated from.\n\nThis function will fit a gaussian_kde model to a list of datapoints\nand store it as a class attribute.", "source": "codesearchnet"}
{"code": "def __init__(self, job=None, replica=None, task=None, device_type=None, device_index=None):\n    self._job = _as_str_or_none(job)\n    self._replica = _as_int_or_none(replica)\n    self._task = _as_int_or_none(task)\n    self._device_type = _as_device_str_or_none(device_type)\n    self._device_index = _as_int_or_none(device_index)\n    self._as_string = self._components_to_string(job=self._job, replica=self._replica, task=self._task, device_type=self._device_type, device_index=self._device_index)\n    self._hash = hash(self.to_string())", "docstring": "Create a new `DeviceSpec` object.\n\nArgs:\njob: string.  Optional job name.\nreplica: int.  Optional replica index.\ntask: int.  Optional task index.\ndevice_type: Optional device type string (e.g. \"CPU\" or \"GPU\")\ndevice_index: int.  Optional device index.  If left unspecified, device\nrepresents 'any' device_index.", "source": "github-repos"}
{"code": "def list_indexes(cls):\n        \n\n        cls_list = cls.list_mapped_classes()\n        rtn_obj = {}\n        for key, value in cls_list.items():\n            idx = value.es_defs.get('kds_esIndex')[0]\n            try:\n                rtn_obj[idx].append(value)\n            except KeyError:\n                rtn_obj[idx] = [value]\n        return rtn_obj", "docstring": "Returns a dictionary with the key as the es_index name and the\nobject is a list of rdfclasses for that index\n\nargs:\nNone", "source": "juraj-google-style"}
{"code": "def __init__(self, path_spec):\n    \n    super(SourceScanNode, self).__init__()\n    self.path_spec = path_spec\n    self.parent_node = None\n    self.scanned = False\n    self.sub_nodes = []", "docstring": "Initializes a source scan node.\n\nArgs:\npath_spec (PathSpec): path specification.", "source": "juraj-google-style"}
{"code": "def __init__(self, command = None):\n\t\t\n\t\tself._output = None\n\t\tself._errors = None\n\t\tself._command = None\n\t\tself.command = command", "docstring": "Class constructor.\n\nArgs:\ncommand (str): Command to execute", "source": "juraj-google-style"}
{"code": "def set_triple(self, p, o, auto_refresh=True):\n    self.rdf.graph.set((self.uri, p, self._handle_object(o)))\n    self._handle_triple_refresh(auto_refresh)", "docstring": "Assuming the predicate or object matches a single triple, sets the other for that triple.\n\nArgs:\np (rdflib.term.URIRef): predicate\no (): object\nauto_refresh (bool): whether or not to update object-like self.rdf.triples\n\nReturns:\nNone: modifies pre-existing triple in self.rdf.graph", "source": "codesearchnet"}
{"code": "def exists(self, path):\n    self.__validate_storage_path(path)\n    try:\n        metadata = self.api_client.get_entity_by_query(path=path)\n    except StorageNotFoundException:\n        return False\n    return (metadata and ('uuid' in metadata))", "docstring": "Check if a certain path exists in the storage service.\n\nArgs:\npath (str): The path to be checked\n\nReturns:\nTrue if the path exists, False otherwise\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "codesearchnet"}
{"code": "def unembed_samples(samples, embedding, chain_break_method=None):\n    if (chain_break_method is None):\n        chain_break_method = majority_vote\n    return list(itertools.chain(*(chain_break_method(sample, embedding) for sample in samples)))", "docstring": "Return samples over the variables in the source graph.\n\nArgs:\nsamples (iterable): An iterable of samples where each sample\nis a dict of the form {v: val, ...} where v is a variable\nin the target model and val is the associated value as\ndetermined by a binary quadratic model sampler.\nembedding (dict): The mapping from the source graph to the target graph.\nShould be of the form {v: {s, ...}, ...} where v is a node in the\nsource graph and s is a node in the target graph.\nchain_break_method (function, optional): The method used to resolve chain\nbreaks. Default is :method:`majority_vote`.\n\nReturns:\nlist: A list of unembedded samples. Each sample is a dict of the form\n{v: val, ...} where v is a variable in the source graph and val\nis the value associated with the variable.", "source": "codesearchnet"}
{"code": "def __init__(self, host: str, port: int, time_to_live: Union[int, timedelta]=DEFAULT_CACHE_ENTRY_TTL_SEC, *, request_coder: Optional[coders.Coder]=None, response_coder: Optional[coders.Coder]=None, **kwargs):\n    self._host = host\n    self._port = port\n    self._time_to_live = time_to_live\n    self._request_coder = request_coder\n    self._response_coder = response_coder\n    self._kwargs = kwargs if kwargs else {}\n    self._source_caller = None", "docstring": "Args:\nhost (str): The hostname or IP address of the Redis server.\nport (int): The port number of the Redis server.\ntime_to_live: `(Union[int, timedelta])` The time-to-live (TTL) for\nrecords stored in Redis. Provide an integer (in seconds) or a\n`datetime.timedelta` object.\nrequest_coder: (Optional[`coders.Coder`]) coder for encoding requests.\nresponse_coder: (Optional[`coders.Coder`]) coder for decoding responses\nreceived from Redis.\nkwargs: Optional additional keyword arguments that\nare required to connect to your redis server. Same as `redis.Redis()`.", "source": "github-repos"}
{"code": "def prefixlen_to_mask(prefixlen):\n    \n    prefixlen = prefixlen or '32'\n    addr = '0.0.0.0/%s' % prefixlen\n    return str(netaddr.IPNetwork(addr).netmask)", "docstring": "Converts a prefix length to a dotted decimal subnet mask\n\nArgs:\nprefixlen (str): The prefix length value to convert\n\nReturns:\nstr: The subt mask as a dotted decimal string", "source": "juraj-google-style"}
{"code": "def exec_one_test(self, test_name, test_method, record=None):\n    tr_record = record or records.TestResultRecord(test_name, self.TAG)\n    tr_record.uid = getattr(test_method, 'uid', None)\n    tr_record.test_begin()\n    self.current_test_info = runtime_test_info.RuntimeTestInfo(test_name, self.log_path, tr_record)\n    expects.recorder.reset_internal_states(tr_record)\n    logging.info('%s %s', TEST_CASE_TOKEN, test_name)\n    teardown_test_failed = False\n    try:\n        try:\n            try:\n                self._setup_test(test_name)\n            except signals.TestFailure as e:\n                _, _, traceback = sys.exc_info()\n                raise signals.TestError(e.details, e.extras).with_traceback(traceback)\n            test_method()\n        except (signals.TestPass, signals.TestAbortSignal, signals.TestSkip):\n            raise\n        except Exception:\n            logging.exception('Exception occurred in %s.', self.current_test_info.name)\n            raise\n        finally:\n            before_count = expects.recorder.error_count\n            try:\n                self._teardown_test(test_name)\n            except signals.TestAbortSignal:\n                raise\n            except Exception as e:\n                logging.exception('Exception occurred in %s of %s.', STAGE_NAME_TEARDOWN_TEST, self.current_test_info.name)\n                tr_record.test_error()\n                tr_record.add_error(STAGE_NAME_TEARDOWN_TEST, e)\n                teardown_test_failed = True\n            else:\n                if before_count < expects.recorder.error_count:\n                    tr_record.test_error()\n                    teardown_test_failed = True\n    except (signals.TestFailure, AssertionError) as e:\n        tr_record.test_fail(e)\n    except signals.TestSkip as e:\n        tr_record.test_skip(e)\n    except signals.TestAbortSignal as e:\n        tr_record.test_fail(e)\n        raise\n    except signals.TestPass as e:\n        tr_record.test_pass(e)\n    except Exception as e:\n        tr_record.test_error(e)\n    else:\n        if expects.recorder.has_error and (not teardown_test_failed):\n            tr_record.test_fail()\n        elif not teardown_test_failed:\n            tr_record.test_pass()\n    finally:\n        tr_record.update_record()\n        try:\n            if tr_record.result in (records.TestResultEnums.TEST_RESULT_ERROR, records.TestResultEnums.TEST_RESULT_FAIL):\n                self._exec_procedure_func(self._on_fail, tr_record)\n            elif tr_record.result == records.TestResultEnums.TEST_RESULT_PASS:\n                self._exec_procedure_func(self._on_pass, tr_record)\n            elif tr_record.result == records.TestResultEnums.TEST_RESULT_SKIP:\n                self._exec_procedure_func(self._on_skip, tr_record)\n        finally:\n            logging.info(RESULT_LINE_TEMPLATE, tr_record.test_name, tr_record.result)\n            self.results.add_record(tr_record)\n            self.summary_writer.dump(tr_record.to_dict(), records.TestSummaryEntryType.RECORD)\n            self.current_test_info = None\n    return tr_record", "docstring": "Executes one test and update test results.\n\nExecutes setup_test, the test method, and teardown_test; then creates a\nrecords.TestResultRecord object with the execution information and adds\nthe record to the test class's test results.\n\nArgs:\ntest_name: string, Name of the test.\ntest_method: function, The test method to execute.\nrecord: records.TestResultRecord, optional arg for injecting a record\nobject to use for this test execution. If not set, a new one is created\ncreated. This is meant for passing information between consecutive test\ncase execution for retry purposes. Do NOT abuse this for \"magical\"\nfeatures.\n\nReturns:\nTestResultRecord, the test result record object of the test execution.\nThis object is strictly for read-only purposes. Modifying this record\nwill not change what is reported in the test run's summary yaml file.", "source": "github-repos"}
{"code": "def _black_objective_and_vega(volatilities):\n    vol_t = volatilities * sqrt_t\n    d1 = lnz / vol_t + vol_t / 2\n    d2 = d1 - vol_t\n    implied_prices = norm_forwards * _cdf(d1) - norm_strikes * _cdf(d2)\n    if is_call_options is not None:\n        put_prices = implied_prices - norm_forwards + norm_strikes\n        implied_prices = tf.where(tf.broadcast_to(is_call_options, tf.shape(put_prices)), implied_prices, put_prices)\n    vega = norm_forwards * _pdf(d1) * sqrt_t / discount_factors\n    return (implied_prices - normalized_prices, vega)", "docstring": "Calculate the Black Scholes price and vega for a given volatility.\n\nThis method returns normalized results.\n\nArgs:\nvolatilities: A real `Tensor` of same shape and dtype as `forwards`. The\nvolatility to expiry.\n\nReturns:\nA tuple containing (value, gradient) of the black scholes price, both of\nwhich are `Tensor`s of the same shape and dtype as `volatilities`.", "source": "github-repos"}
{"code": "def add_transition(self, source: str, dest: str):\n        \n        self._transitions[source].append(dest)", "docstring": "Adds a transition from one state to another.\n\nArgs:\nsource (str): the name of the state from where the transition starts\ndest (str): the name of the state where the transition ends", "source": "juraj-google-style"}
{"code": "def argmin(x, axis=-1):\n    return math_ops.argmin(x, axis)", "docstring": "Returns the index of the minimum value along an axis.\n\nArgs:\nx: Tensor or variable.\naxis: axis along which to perform the reduction.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def check_line_split(code_line):\n    return re.search('\\\\\\\\\\\\s*\\\\n$', code_line)", "docstring": "Checks if a line was split with `\\`.\n\nArgs:\ncode_line: A line of Python code\n\nReturns:\nIf the line was split with `\\`\n\n>>> skip_magic(\"!gcloud ml-engine models create ${MODEL} \\\\\\n\")\nTrue", "source": "github-repos"}
{"code": "def _set_read_only_resource_inputs_attr(op: ops.Operation, func_graph: func_graph_module.FuncGraph):\n    read_only_indices = acd.get_read_only_resource_input_indices_graph(func_graph)\n    ops.set_int_list_attr(op, acd.READ_ONLY_RESOURCE_INPUTS_ATTR, read_only_indices)", "docstring": "Sets the list of resource inputs which are read-only.\n\nThis is used by AutomaticControlDependencies.\n\nArgs:\nop: PartitionedCall Operation.\nfunc_graph: FuncGraph.", "source": "github-repos"}
{"code": "def append_dictionary_to_file(localization_key_to_comment, file_path, section_name):\n    output_file = open_strings_file(file_path, 'a')\n    write_section_header_to_file(output_file, section_name)\n    for (entry_key, entry_comment) in sorted(localization_key_to_comment.iteritems(), key=operator.itemgetter(1)):\n        output_file.write(u'\\n')\n        write_entry_to_file(output_file, entry_comment, entry_key)\n    output_file.close()", "docstring": "Appends dictionary of localization keys and comments to a file\n\nArgs:\nlocalization_key_to_comment (dict): A mapping between localization keys and comments.\nfile_path (str): The path of the file to append to.\nsection_name (str): The name of the section.", "source": "codesearchnet"}
{"code": "def wait_for_jobs(jobs):\n    \n\n    all_running = False\n    while not all_running:\n        all_running = True\n        time.sleep(5)\n        for job in jobs:\n            job.refresh()\n            scheduled = getattr(job, \"scheduled_at\", None)\n            if scheduled is not None:\n                logger.info(\"Waiting for %s on %s [%s]\" % (job.uid,\n                                                           job.site,\n                                                           _date2h(scheduled)))\n            all_running = all_running and job.state == \"running\"\n            if job.state == \"error\":\n                raise Exception(\"The job %s is in error state\" % job)\n    logger.info(\"All jobs are Running !\")", "docstring": "Waits for all the jobs to be runnning.\n\nArgs:\njobs(list): list of the python-grid5000 jobs to wait for\n\n\nRaises:\nException: if one of the job gets in error state.", "source": "juraj-google-style"}
{"code": "def format_dict(dic, format_list, separator=',', default_value=str):\n    \n\n    dic = collections.defaultdict(default_value, dic)\n\n    str_format = separator.join([\"{\" + \"{}\".format(head) + \"}\" for head in format_list])\n\n    return str_format.format(**dic)", "docstring": "Format dict to string passing a list of keys as order\nArgs:\nlista: List with elements to clean duplicates.", "source": "juraj-google-style"}
{"code": "def postprocess_periodical(marc_xml, mods, uuid, counter, url):\n    \n    dom = double_linked_dom(mods)\n\n    \n\n    add_missing_xml_attributes(dom, counter)\n\n    if uuid:\n        add_uuid(dom, uuid)\n\n    return dom.prettify()", "docstring": "Some basic postprocessing of the periodical publications.\n\nArgs:\nmarc_xml (str): Original Aleph record.\nmods (str): XML string generated by XSLT template.\nuuid (str): UUID of the package.\ncounter (int): Number of record, is added to XML headers.\nurl (str): URL of the publication (public or not).\n\nReturns:\nstr: Updated XML.", "source": "juraj-google-style"}
{"code": "def add_cookie_header(self, request, referrer_host=None):\n    new_request = convert_http_request(request, referrer_host)\n    self._cookie_jar.add_cookie_header(new_request)\n    request.fields.clear()\n    for (name, value) in new_request.header_items():\n        request.fields.add(name, value)", "docstring": "Wrapped ``add_cookie_header``.\n\nArgs:\nrequest: An instance of :class:`.http.request.Request`.\nreferrer_host (str): An hostname or IP address of the referrer\nURL.", "source": "codesearchnet"}
{"code": "def create_binding(site, hostheader='', ipaddress='*', port=80, protocol='http', sslflags=None):\n    protocol = six.text_type(protocol).lower()\n    name = _get_binding_info(hostheader, ipaddress, port)\n    if (protocol not in _VALID_PROTOCOLS):\n        message = \"Invalid protocol '{0}' specified. Valid formats: {1}\".format(protocol, _VALID_PROTOCOLS)\n        raise SaltInvocationError(message)\n    if sslflags:\n        sslflags = int(sslflags)\n        if (sslflags not in _VALID_SSL_FLAGS):\n            message = \"Invalid sslflags '{0}' specified. Valid sslflags range: {1}..{2}\".format(sslflags, _VALID_SSL_FLAGS[0], _VALID_SSL_FLAGS[(- 1)])\n            raise SaltInvocationError(message)\n    current_bindings = list_bindings(site)\n    if (name in current_bindings):\n        log.debug('Binding already present: %s', name)\n        return True\n    if sslflags:\n        ps_cmd = ['New-WebBinding', '-Name', \"'{0}'\".format(site), '-HostHeader', \"'{0}'\".format(hostheader), '-IpAddress', \"'{0}'\".format(ipaddress), '-Port', \"'{0}'\".format(port), '-Protocol', \"'{0}'\".format(protocol), '-SslFlags', '{0}'.format(sslflags)]\n    else:\n        ps_cmd = ['New-WebBinding', '-Name', \"'{0}'\".format(site), '-HostHeader', \"'{0}'\".format(hostheader), '-IpAddress', \"'{0}'\".format(ipaddress), '-Port', \"'{0}'\".format(port), '-Protocol', \"'{0}'\".format(protocol)]\n    cmd_ret = _srvmgr(ps_cmd)\n    if (cmd_ret['retcode'] != 0):\n        msg = 'Unable to create binding: {0}\\nError: {1}'.format(site, cmd_ret['stderr'])\n        raise CommandExecutionError(msg)\n    if (name in list_bindings(site)):\n        log.debug('Binding created successfully: %s', site)\n        return True\n    log.error('Unable to create binding: %s', site)\n    return False", "docstring": "Create an IIS Web Binding.\n\n.. note::\n\nThis function only validates against the binding\nipaddress:port:hostheader combination, and will return True even if the\nbinding already exists with a different configuration. It will not\nmodify the configuration of an existing binding.\n\nArgs:\nsite (str): The IIS site name.\nhostheader (str): The host header of the binding. Usually a hostname.\nipaddress (str): The IP address of the binding.\nport (int): The TCP port of the binding.\nprotocol (str): The application protocol of the binding.\nsslflags (str): The flags representing certificate type and storage of\nthe binding.\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.create_binding site='site0' hostheader='example.com' ipaddress='*' port='80'", "source": "codesearchnet"}
{"code": "def ParseSearchRow(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = TwitterAndroidSearchEventData()\n    event_data.query = query\n    event_data.name = self._GetRowValue(query_hash, row, 'name')\n    event_data.search_query = self._GetRowValue(query_hash, row, 'query')\n\n    timestamp = self._GetRowValue(query_hash, row, 'time')\n    if timestamp:\n      date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_CREATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a search row from the database.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from query.", "source": "juraj-google-style"}
{"code": "def get_or_create_hosted_zone(client, zone_name):\n    \n    zone_id = get_hosted_zone_by_name(client, zone_name)\n    if zone_id:\n        return zone_id\n\n    logger.debug(\"Zone %s does not exist, creating.\", zone_name)\n\n    reference = uuid.uuid4().hex\n\n    response = client.create_hosted_zone(Name=zone_name,\n                                         CallerReference=reference)\n\n    return parse_zone_id(response[\"HostedZone\"][\"Id\"])", "docstring": "Get the Id of an existing zone, or create it.\n\nArgs:\nclient (:class:`botocore.client.Route53`): The connection used to\ninteract with Route53's API.\nzone_name (string): The name of the DNS hosted zone to create.\n\nReturns:\nstring: The Id of the Hosted Zone.", "source": "juraj-google-style"}
{"code": "def _CreateRouteOptions(self, **kwargs):\n    \n    options = {\n        'proto': self.proto_id,\n        'scope': 'host',\n    }\n    options.update(kwargs)\n    return options", "docstring": "Create a dictionary of parameters to append to the ip route command.\n\nArgs:\n**kwargs: dict, the string parameters to update in the ip route command.\n\nReturns:\ndict, the string parameters to append to the ip route command.", "source": "juraj-google-style"}
{"code": "def _sparse_block_diag(sp_a):\n  \n  \n  \n  \n  \n  \n  sp_a_shape = tf.convert_to_tensor(value=_get_shape(sp_a, tf.int64))\n  ind_mat = tf.concat([[sp_a_shape[-2:]], tf.eye(2, dtype=tf.int64)], axis=0)\n  indices = tf.matmul(sp_a.indices, ind_mat)\n  dense_shape = sp_a_shape[0] * sp_a_shape[1:]\n  return tf.SparseTensor(\n      indices=indices, values=sp_a.values, dense_shape=dense_shape)", "docstring": "Returns a block diagonal rank 2 SparseTensor from a batch of SparseTensors.\n\nArgs:\nsp_a: A rank 3 `SparseTensor` representing a batch of matrices.\n\nReturns:\nsp_block_diag_a: matrix-shaped, `float` `SparseTensor` with the same dtype\nas `sparse_or_matrix`, of shape [B * M, B * N] where `sp_a` has shape\n[B, M, N]. Each [M, N] batch of `sp_a` is lined up along the diagonal.", "source": "juraj-google-style"}
{"code": "def validate_task_schema(context, schema_key='schema_file'):\n    schema_path = context.config\n    schema_keys = schema_key.split('.')\n    for key in schema_keys:\n        schema_path = schema_path[key]\n    task_schema = load_json_or_yaml(schema_path, is_path=True)\n    log.debug('Task is validated against this schema: {}'.format(task_schema))\n    try:\n        validate_json_schema(context.task, task_schema)\n    except ScriptWorkerTaskException as e:\n        raise TaskVerificationError('Cannot validate task against schema. Task: {}.'.format(context.task)) from e", "docstring": "Validate the task definition.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context. It must contain a task and\nthe config pointing to the schema file\nschema_key: the key in `context.config` where the path to the schema file is. Key can contain\ndots (e.g.: 'schema_files.file_a'), in which case\n\nRaises:\nTaskVerificationError: if the task doesn't match the schema", "source": "codesearchnet"}
{"code": "def qemu_rebase(target, backing_file, safe=True, fail_on_error=True):\n    \n    cmd = ['qemu-img', 'rebase', '-b', backing_file, target]\n    if not safe:\n        cmd.insert(2, '-u')\n\n    return run_command_with_validation(\n        cmd,\n        fail_on_error,\n        msg='Failed to rebase {target} onto {backing_file}'.format(\n            target=target, backing_file=backing_file\n        )\n    )", "docstring": "changes the backing file of 'source' to 'backing_file'\nIf backing_file is specified as \"\" (the empty string),\nthen the image is rebased onto no backing file\n(i.e. it will exist independently of any backing file).\n(Taken from qemu-img man page)\n\nArgs:\ntarget(str): Path to the source disk\nbacking_file(str): path to the base disk\nsafe(bool): if false, allow unsafe rebase\n(check qemu-img docs for more info)", "source": "juraj-google-style"}
{"code": "def recode_dwgsim_reads(\n        dwgsim_prefix,\n        fastq_rnf_fo,\n        fai_fo,\n        genome_id,\n        estimate_unknown_values,\n        number_of_read_tuples=10**9,\n    ):\n        \n\n        dwgsim_pattern = re.compile(\n            '@(.*)_([0-9]+)_([0-9]+)_([01])_([01])_([01])_([01])_([0-9]+):([0-9]+):([0-9]+)_([0-9]+):([0-9]+):([0-9]+)_(([0-9abcdef])+)'\n        )\n\n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n\n        fai_index = rnftools.utils.FaIdx(fai_fo=fai_fo)\n        read_tuple_id_width = len(format(number_of_read_tuples, 'x'))\n\n        \n        read_tuple_id = 0\n        last_read_tuple_name = None\n        old_fq = \"{}.bfast.fastq\".format(dwgsim_prefix)\n\n        fq_creator = rnftools.rnfformat.FqCreator(\n            fastq_fo=fastq_rnf_fo,\n            read_tuple_id_width=read_tuple_id_width,\n            genome_id_width=2,\n            chr_id_width=fai_index.chr_id_width,\n            coor_width=fai_index.coor_width,\n            info_reads_in_tuple=True,\n            info_simulator=\"dwgsim\",\n        )\n\n        i = 0\n        with open(old_fq, \"r+\") as f1:\n            for line in f1:\n                if i % 4 == 0:\n                    read_tuple_name = line[1:].strip()\n                    if read_tuple_name != last_read_tuple_name:\n                        new_tuple = True\n                        if last_read_tuple_name is not None:\n                            read_tuple_id += 1\n                    else:\n                        new_tuple = False\n\n                    last_read_tuple_name = read_tuple_name\n                    m = dwgsim_pattern.search(line)\n                    if m is None:\n                        rnftools.utils.error(\n                            \"Read tuple '{}' was not created by DwgSim.\".format(line[1:]),\n                            program=\"RNFtools\",\n                            subprogram=\"MIShmash\",\n                            exception=ValueError,\n                        )\n\n                    contig_name = m.group(1)\n                    start_1 = int(m.group(2))\n                    start_2 = int(m.group(3))\n                    direction_1 = \"F\" if int(m.group(4)) == 0 else \"R\"\n                    direction_2 = \"F\" if int(m.group(5)) == 0 else \"R\"\n                    \n                    \n                    \n                    \n                    \n                    \n                    \n                    \n                    \n\n                    chr_id = fai_index.dict_chr_ids[contig_name] if fai_index.dict_chr_ids != {} else \"0\"\n\n                elif i % 4 == 1:\n                    bases = line.strip()\n\n                    if new_tuple:\n\n                        segment = rnftools.rnfformat.Segment(\n                            genome_id=genome_id,\n                            chr_id=chr_id,\n                            direction=direction_1,\n                            left=start_1,\n                            right=start_1 + len(bases) - 1 if estimate_unknown_values else 0,\n                        )\n\n                    else:\n\n                        segment = rnftools.rnfformat.Segment(\n                            genome_id=genome_id,\n                            chr_id=chr_id,\n                            direction=direction_2,\n                            left=start_2,\n                            right=start_2 + len(bases) - 1 if estimate_unknown_values else 0,\n                        )\n\n                elif i % 4 == 2:\n                    pass\n\n                elif i % 4 == 3:\n                    qualities = line.strip()\n                    fq_creator.add_read(\n                        read_tuple_id=read_tuple_id,\n                        bases=bases,\n                        qualities=qualities,\n                        segments=[segment],\n                    )\n\n                i += 1\n\n        fq_creator.flush_read_tuple()", "docstring": "Convert DwgSim FASTQ file to RNF FASTQ file.\n\nArgs:\ndwgsim_prefix (str): DwgSim prefix of the simulation (see its commandline parameters).\nfastq_rnf_fo (file): File object of RNF FASTQ.\nfai_fo (file): File object for FAI file of the reference genome.\ngenome_id (int): RNF genome ID to be used.\nestimate_unknown_values (bool): Estimate unknown values (right coordinate of each end).\nnumber_of_read_tuples (int): Estimate of number of simulated read tuples (to set width).", "source": "juraj-google-style"}
{"code": "def received(self, messages):\n        \n        if messages:\n            if self._queue:\n                self._queue.put_nowait(messages)\n\n            if self._callback:\n                self._callback(messages)", "docstring": "Called when new messages arrive.\n\nArgs:\nmessages (tuple): Messages", "source": "juraj-google-style"}
{"code": "def _build_vocab(filename, vocab_dir, vocab_name):\n  \n  vocab_path = os.path.join(vocab_dir, vocab_name)\n  if not tf.gfile.Exists(vocab_path):\n    with tf.gfile.GFile(filename, \"r\") as f:\n      data = f.read().split()\n    counter = collections.Counter(data)\n    count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n    words, _ = list(zip(*count_pairs))\n    encoder = text_encoder.TokenTextEncoder(None, vocab_list=words)\n    encoder.store_to_file(vocab_path)\n  else:\n    encoder = text_encoder.TokenTextEncoder(vocab_path)\n  return encoder", "docstring": "Reads a file to build a vocabulary.\n\nArgs:\nfilename: file to read list of words from.\nvocab_dir: directory where to save the vocabulary.\nvocab_name: vocab file name.\n\nReturns:\ntext encoder.", "source": "juraj-google-style"}
{"code": "def foo(self, a: int, *args, b: str='x', **kwargs) -> str:\n    del a, args, kwargs\n    return b", "docstring": "Function foo.\n\nArgs:\na: An int.\n*args: Varargs.\nb: A str.\n**kwargs: Kwargs.\n\nReturns:\nA str.", "source": "github-repos"}
{"code": "def read_locations(filename):\n    data = ConfigParser()\n    if (filename == '-'):\n        data.read_file(sys.stdin)\n    else:\n        data.read(filename)\n    if (not data.sections()):\n        logging.debug('Config file is empty')\n    locations = {}\n    for name in data.sections():\n        if data.has_option(name, 'locator'):\n            (latitude, longitude) = utils.from_grid_locator(data.get(name, 'locator'))\n        else:\n            latitude = data.getfloat(name, 'latitude')\n            longitude = data.getfloat(name, 'longitude')\n        locations[name] = (latitude, longitude)\n    return locations", "docstring": "Pull locations from a user's config file.\n\nArgs:\nfilename (str): Config file to parse\n\nReturns:\ndict: List of locations from config file", "source": "codesearchnet"}
{"code": "def LessThan(self, value):\n    self._awql = self._CreateSingleValueCondition(value, '<')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"less than\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "codesearchnet"}
{"code": "def _CalculateHashDataStream(self, file_entry, data_stream_name):\n    hash_context = hashlib.sha256()\n    try:\n        file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)\n    except IOError as exception:\n        logging.warning('Unable to open path specification:\\n{0:s}with error: {1!s}'.format(file_entry.path_spec.comparable, exception))\n        return None\n    if (not file_object):\n        return None\n    try:\n        data = file_object.read(self._READ_BUFFER_SIZE)\n        while data:\n            hash_context.update(data)\n            data = file_object.read(self._READ_BUFFER_SIZE)\n    except IOError as exception:\n        logging.warning('Unable to read from path specification:\\n{0:s}with error: {1!s}'.format(file_entry.path_spec.comparable, exception))\n        return None\n    finally:\n        file_object.close()\n    return hash_context.hexdigest()", "docstring": "Calculates a message digest hash of the data of the file entry.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry.\ndata_stream_name (str): name of the data stream.\n\nReturns:\nbytes: digest hash or None.", "source": "codesearchnet"}
{"code": "def GetFileEntryByPathSpec(self, path_spec):\n    \n    \n    fsapfs_file_entry = None\n    location = getattr(path_spec, 'location', None)\n    identifier = getattr(path_spec, 'identifier', None)\n\n    if (location == self.LOCATION_ROOT or\n        identifier == self.ROOT_DIRECTORY_IDENTIFIER):\n      fsapfs_file_entry = self._fsapfs_volume.get_root_directory()\n      return apfs_file_entry.APFSFileEntry(\n          self._resolver_context, self, path_spec,\n          fsapfs_file_entry=fsapfs_file_entry, is_root=True)\n\n    try:\n      if identifier is not None:\n        fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_identifier(\n            identifier)\n      elif location is not None:\n        fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_path(location)\n\n    except IOError as exception:\n      raise errors.BackEndError(exception)\n\n    if fsapfs_file_entry is None:\n      return None\n\n    return apfs_file_entry.APFSFileEntry(\n        self._resolver_context, self, path_spec,\n        fsapfs_file_entry=fsapfs_file_entry)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nAPFSFileEntry: file entry or None if not available.\n\nRaises:\nBackEndError: if the file entry cannot be opened.", "source": "juraj-google-style"}
{"code": "def GetParsersInformation(cls):\n    parsers_information = []\n    for (_, parser_class) in cls.GetParsers():\n        description = getattr(parser_class, 'DESCRIPTION', '')\n        parsers_information.append((parser_class.NAME, description))\n    return parsers_information", "docstring": "Retrieves the parsers information.\n\nReturns:\nlist[tuple[str, str]]: parser names and descriptions.", "source": "codesearchnet"}
{"code": "def __init__(\n            self, keys: Dict[Tuple[YangIdentifier, Optional[YangIdentifier]], str]):\n        \n        self.keys = keys", "docstring": "Initialize the class instance.\n\nArgs:\nkeys: Dictionary with keys of an entry.", "source": "juraj-google-style"}
{"code": "def signature_cert_chain_url(url):\n    r = urlparse(url)\n    if (not (r.scheme.lower() == 'https')):\n        warnings.warn('Certificate URL scheme is invalid.')\n        return False\n    if (not (r.hostname.lower() == 's3.amazonaws.com')):\n        warnings.warn('Certificate URL hostname is invalid.')\n        return False\n    if (not os.path.normpath(r.path).startswith('/echo.api/')):\n        warnings.warn('Certificate URL path is invalid.')\n        return False\n    if (r.port and (not (r.port == 443))):\n        warnings.warn('Certificate URL port is invalid.')\n        return False\n    return True", "docstring": "Validate URL specified by SignatureCertChainUrl.\n\nSee `validate.request` for additional info.\n\nArgs:\nurl: str. SignatureCertChainUrl header value sent by request.\n\nReturns:\nbool: True if valid, False otherwise.", "source": "codesearchnet"}
{"code": "def list_file_extensions(path: str, reportevery: int=1) -> List[str]:\n    extensions = set()\n    count = 0\n    for (root, dirs, files) in os.walk(path):\n        count += 1\n        if ((count % reportevery) == 0):\n            log.debug('Walking directory {}: {!r}', count, root)\n        for file in files:\n            (filename, ext) = os.path.splitext(file)\n            extensions.add(ext)\n    return sorted(list(extensions))", "docstring": "Returns a sorted list of every file extension found in a directory\nand its subdirectories.\n\nArgs:\npath: path to scan\nreportevery: report directory progress after every *n* steps\n\nReturns:\nsorted list of every file extension found", "source": "codesearchnet"}
{"code": "def GetForwardedIps(self, interface, interface_ip=None):\n    \n    try:\n      ips = netifaces.ifaddresses(interface)\n      ips = ips[netifaces.AF_INET]\n    except (ValueError, IndexError):\n      return []\n    forwarded_ips = []\n    for ip in ips:\n      if ip['addr'] != interface_ip:\n        full_addr = '%s/%d' % (ip['addr'], netaddr.IPAddress(ip['netmask']).netmask_bits())\n        forwarded_ips.append(full_addr)\n    return self.ParseForwardedIps(forwarded_ips)", "docstring": "Retrieve the list of configured forwarded IP addresses.\n\nArgs:\ninterface: string, the output device to query.\ninterface_ip: string, current interface ip address.\n\nReturns:\nlist, the IP address strings.", "source": "juraj-google-style"}
{"code": "def all(self, **kwargs):\n    path = ('%s/all' % self.path)\n    obj = self.gitlab.http_list(path, **kwargs)\n    return [self._obj_cls(self, item) for item in obj]", "docstring": "List all the members, included inherited ones.\n\nArgs:\nall (bool): If True, return all the items, without pagination\nper_page (int): Number of items to retrieve per request\npage (int): ID of the page to return (starts with page 1)\nas_list (bool): If set to False and no pagination option is\ndefined, return a generator instead of a list\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabListError: If the list could not be retrieved\n\nReturns:\nRESTObjectList: The list of members", "source": "codesearchnet"}
{"code": "def add_numeric_table_values(table, min_consolidation_fraction=0.7, debug_info=None):\n    table = table.copy()\n    filter_invalid_unicode_from_table(table)\n    for row_index, row in table.iterrows():\n        for col_index, cell in enumerate(row):\n            table.iloc[row_index, col_index] = Cell(text=cell)\n    for col_index, column in enumerate(table.columns):\n        column_values = _consolidate_numeric_values(_get_column_values(table, col_index), min_consolidation_fraction=min_consolidation_fraction, debug_info=(debug_info, column))\n        for row_index, numeric_value in column_values.items():\n            table.iloc[row_index, col_index].numeric_value = numeric_value\n    return table", "docstring": "Parses text in table column-wise and adds the consolidated values. Consolidation refers to finding values with a\ncommon types (date or number)\n\nArgs:\ntable:\nTable to annotate.\nmin_consolidation_fraction:\nFraction of cells in a column that need to have consolidated value.\ndebug_info:\nAdditional information used for logging.", "source": "github-repos"}
{"code": "def block(self, cutoffs=None, values=None, n_bins=0, right=False, function=None):\n    params = self.__dict__.copy()\n    if ((values is not None) and (cutoffs is None)):\n        cutoffs = values[1:]\n    if ((cutoffs is None) and (n_bins == 0)):\n        cutoffs = np.mean(self)\n    if ((n_bins != 0) and (cutoffs is None)):\n        (mi, ma) = (np.amin(self), np.amax(self))\n        cutoffs = np.linspace(mi, ma, (n_bins + 1))\n        cutoffs = cutoffs[:(- 1)]\n    try:\n        data = np.digitize(self, cutoffs, right)\n    except ValueError:\n        data = np.digitize(self, [cutoffs], right)\n    if ((function is None) and (values is None)):\n        return Curve(data, params=params)\n    data = data.astype(float)\n    f = (function or utils.null)\n    (tops, vals) = utils.find_edges(data)\n    if (values is None):\n        for (top, base) in zip(tops[:(- 1)], tops[1:]):\n            data[top:base] = f(np.copy(self[top:base]))\n        data[base:] = f(np.copy(self[base:]))\n    else:\n        for (top, base, val) in zip(tops[:(- 1)], tops[1:], vals[:(- 1)]):\n            data[top:base] = values[int(val)]\n        data[base:] = values[int(vals[(- 1)])]\n    return Curve(data, params=params)", "docstring": "Block a log based on number of bins, or on cutoffs.\n\nArgs:\ncutoffs (array)\nvalues (array): the values to map to. Defaults to [0, 1, 2,...]\nn_bins (int)\nright (bool)\nfunction (function): transform the log if you want.\n\nReturns:\nCurve.", "source": "codesearchnet"}
{"code": "def get_table_columns(metadata):\n    cols = OrderedDict()\n    for col in metadata.c:\n        name = str(col).rpartition('.')[2]\n        cols[name] = col.type.python_type.__name__\n    return cols", "docstring": "Extract columns names and python typos from metadata\n\nArgs:\nmetadata: Table metadata\n\nReturns:\ndict with columns names and python types", "source": "codesearchnet"}
{"code": "def set_hparam(self, name, value):\n    (param_type, is_list) = self._hparam_types[name]\n    if isinstance(value, list):\n        if (not is_list):\n            raise ValueError(('Must not pass a list for single-valued parameter: %s' % name))\n        setattr(self, name, [_cast_to_type_if_compatible(name, param_type, v) for v in value])\n    else:\n        if is_list:\n            raise ValueError(('Must pass a list for multi-valued parameter: %s.' % name))\n        setattr(self, name, _cast_to_type_if_compatible(name, param_type, value))", "docstring": "Set the value of an existing hyperparameter.\n\nThis function verifies that the type of the value matches the type of the\nexisting hyperparameter.\n\nArgs:\nname: Name of the hyperparameter.\nvalue: New value of the hyperparameter.\n\nRaises:\nKeyError: If the hyperparameter doesn't exist.\nValueError: If there is a type mismatch.", "source": "codesearchnet"}
{"code": "def GetLaunchedFlows(self, flow_type=\"outstanding\"):\n    \n    result = None\n    all_clients = set(self.ListAllClients())\n    finished_clients = set(self.ListFinishedClients())\n    outstanding_clients = all_clients - finished_clients\n\n    if flow_type == \"all\":\n      result = all_clients\n    elif flow_type == \"finished\":\n      result = finished_clients\n    elif flow_type == \"outstanding\":\n      result = outstanding_clients\n\n    \n    flows = aff4.FACTORY.MultiListChildren(\n        [self.urn.Add(x.Basename()) for x in result])\n\n    return [x[0] for _, x in flows]", "docstring": "Returns the session IDs of all the flows we launched.\n\nArgs:\nflow_type: The type of flows to fetch. Can be \"all\", \"outstanding\" or\n\"finished\".\n\nReturns:\nA list of flow URNs.", "source": "juraj-google-style"}
{"code": "def force_string(val=None):\n    if (val is None):\n        return ''\n    if isinstance(val, list):\n        newval = [str(x) for x in val]\n        return ';'.join(newval)\n    if isinstance(val, str):\n        return val\n    else:\n        return str(val)", "docstring": "Force a string representation of an object\n\nArgs:\nval: object to parse into a string\n\nReturns:\nstr: String representation", "source": "codesearchnet"}
{"code": "def plot_state_qsphere(rho, figsize=None):\n    \n    if not HAS_MATPLOTLIB:\n        raise ImportError('Must have Matplotlib installed.')\n    rho = _validate_input_state(rho)\n    if figsize is None:\n        figsize = (7, 7)\n    num = int(np.log2(len(rho)))\n    \n    we, stateall = linalg.eigh(rho)\n    for _ in range(2**num):\n        \n        probmix = we.max()\n        prob_location = we.argmax()\n        if probmix > 0.001:\n            \n            state = stateall[:, prob_location]\n            loc = np.absolute(state).argmax()\n            \n            for j in range(2**num):\n                test = np.absolute(np.absolute(state[j]) -\n                                   np.absolute(state[loc]))\n                if test < 0.001:\n                    loc = j\n                    break\n            \n            angles = (np.angle(state[loc]) + 2 * np.pi) % (2 * np.pi)\n            angleset = np.exp(-1j*angles)\n            \n            \n            state = angleset*state\n            \n            state.flatten()\n            \n            fig = plt.figure(figsize=figsize)\n            ax = fig.add_subplot(111, projection='3d')\n            ax.axes.set_xlim3d(-1.0, 1.0)\n            ax.axes.set_ylim3d(-1.0, 1.0)\n            ax.axes.set_zlim3d(-1.0, 1.0)\n            ax.set_aspect(\"equal\")\n            ax.axes.grid(False)\n            \n            u = np.linspace(0, 2 * np.pi, 25)\n            v = np.linspace(0, np.pi, 25)\n            x = np.outer(np.cos(u), np.sin(v))\n            y = np.outer(np.sin(u), np.sin(v))\n            z = np.outer(np.ones(np.size(u)), np.cos(v))\n            ax.plot_surface(x, y, z, rstride=1, cstride=1, color='k',\n                            alpha=0.05, linewidth=0)\n            \n            \n            ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n            ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n            ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n\n            \n            ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0))\n            ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0))\n            ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0))\n            \n            ax.set_xticks([])\n            ax.set_yticks([])\n            ax.set_zticks([])\n\n            d = num\n            for i in range(2**num):\n                \n                element = bin(i)[2:].zfill(num)\n                weight = element.count(\"1\")\n                zvalue = -2 * weight / d + 1\n                number_of_divisions = n_choose_k(d, weight)\n                weight_order = bit_string_index(element)\n                \n                \n                \n                \n                \n                angle = weight_order * 2 * np.pi / number_of_divisions\n                xvalue = np.sqrt(1 - zvalue**2) * np.cos(angle)\n                yvalue = np.sqrt(1 - zvalue**2) * np.sin(angle)\n                ax.plot([xvalue], [yvalue], [zvalue],\n                        markerfacecolor=(.5, .5, .5),\n                        markeredgecolor=(.5, .5, .5),\n                        marker='o', markersize=10, alpha=1)\n                \n                prob = np.real(np.dot(state[i], state[i].conj()))\n                colorstate = phase_to_color_wheel(state[i])\n                a = Arrow3D([0, xvalue], [0, yvalue], [0, zvalue],\n                            mutation_scale=20, alpha=prob, arrowstyle=\"-\",\n                            color=colorstate, lw=10)\n                ax.add_artist(a)\n            \n            for weight in range(d + 1):\n                theta = np.linspace(-2 * np.pi, 2 * np.pi, 100)\n                z = -2 * weight / d + 1\n                r = np.sqrt(1 - z**2)\n                x = r * np.cos(theta)\n                y = r * np.sin(theta)\n                ax.plot(x, y, z, color=(.5, .5, .5))\n            \n            ax.plot([0], [0], [0], markerfacecolor=(.5, .5, .5),\n                    markeredgecolor=(.5, .5, .5), marker='o', markersize=10,\n                    alpha=1)\n            we[prob_location] = 0\n        else:\n            break\n    plt.tight_layout()\n    plt.close(fig)\n    return fig", "docstring": "Plot the qsphere representation of a quantum state.\n\nArgs:\nrho (ndarray): State vector or density matrix representation.\nof quantum state.\nfigsize (tuple): Figure size in inches.\n\nReturns:\nFigure: A matplotlib figure instance.\n\nRaises:\nImportError: Requires matplotlib.", "source": "juraj-google-style"}
{"code": "def write_json(self, fh, pretty=True):\n    sjson = json.JSONEncoder().encode(self.json())\n    if pretty:\n        json.dump(json.loads(sjson), fh, sort_keys=True, indent=4)\n    else:\n        json.dump(json.loads(sjson), fh)\n    return", "docstring": "Write composite object to file handle in JSON format.\n\nArgs:\nfh (file): File handle to write to.\npretty (bool): Sort keys and indent in output.", "source": "codesearchnet"}
{"code": "def summary_writer_function(name, tensor, function, family=None):\n    name_scope = ops.get_name_scope()\n    if name_scope:\n        name_scope += '/'\n\n    def record():\n        with ops.name_scope(name_scope), summary_op_util.summary_scope(name, family, values=[tensor]) as (tag, scope):\n            with ops.control_dependencies([function(tag, scope)]):\n                return constant_op.constant(True)\n    if _summary_state.writer is None:\n        return control_flow_ops.no_op()\n    with ops.device('cpu:0'):\n        op = smart_cond.smart_cond(_legacy_contrib_should_record_summaries(), record, _nothing, name='')\n        if not context.executing_eagerly():\n            ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op)\n    return op", "docstring": "Helper function to write summaries.\n\nArgs:\nname: name of the summary\ntensor: main tensor to form the summary\nfunction: function taking a tag and a scope which writes the summary\nfamily: optional, the summary's family\n\nReturns:\nThe result of writing the summary.", "source": "github-repos"}
{"code": "def transform_rest_response(self, response_body):\n    body_json = json.loads(response_body)\n    return json.dumps(body_json, indent=1, sort_keys=True)", "docstring": "Translates an apiserving REST response so it's ready to return.\n\nCurrently, the only thing that needs to be fixed here is indentation,\nso it's consistent with what the live app will return.\n\nArgs:\nresponse_body: A string containing the backend response.\n\nReturns:\nA reformatted version of the response JSON.", "source": "codesearchnet"}
{"code": "def check_constraint(type_constraint, object_instance):\n    if type_constraint is None and object_instance is None:\n        return\n    elif isinstance(type_constraint, TypeConstraint):\n        type_constraint.type_check(object_instance)\n    elif type_constraint is None:\n        pass\n    elif not isinstance(type_constraint, type):\n        raise RuntimeError('bad type: %s' % (type_constraint,))\n    elif not isinstance(object_instance, type_constraint):\n        raise SimpleTypeHintError", "docstring": "Determine if the passed type instance satisfies the TypeConstraint.\n\nWhen examining a candidate type for constraint satisfaction in\n'type_check', all CompositeTypeHint's eventually call this function. This\nfunction may end up being called recursively if the hinted type of a\nCompositeTypeHint is another CompositeTypeHint.\n\nArgs:\ntype_constraint: An instance of a TypeConstraint or a built-in Python type.\nobject_instance: An object instance.\n\nRaises:\nSimpleTypeHintError: If 'type_constraint' is a one of the allowed primitive\nPython types and 'object_instance' isn't an instance of this type.\nCompositeTypeHintError: If 'type_constraint' is a TypeConstraint object and\n'object_instance' does not satisfy its constraint.", "source": "github-repos"}
{"code": "def _deconstruct_single_qubit_matrix_into_gate_turns(\n        mat: np.ndarray) -> Tuple[float, float, float]:\n    \n    pre_phase, rotation, post_phase = (\n        linalg.deconstruct_single_qubit_matrix_into_angles(mat))\n\n    \n    tau = 2 * np.pi\n    xy_turn = rotation / tau\n    xy_phase_turn = 0.25 - pre_phase / tau\n    total_z_turn = (post_phase + pre_phase) / tau\n\n    \n    return (_signed_mod_1(xy_turn), _signed_mod_1(xy_phase_turn),\n            _signed_mod_1(total_z_turn))", "docstring": "Breaks down a 2x2 unitary into gate parameters.\n\nArgs:\nmat: The 2x2 unitary matrix to break down.\n\nReturns:\nA tuple containing the amount to rotate around an XY axis, the phase of\nthat axis, and the amount to phase around Z. All results will be in\nfractions of a whole turn, with values canonicalized into the range\n[-0.5, 0.5).", "source": "juraj-google-style"}
{"code": "def parse_filepath(self, filepath=None):\n    filepath = (filepath or self._default_filename)\n    (path, filename) = os.path.split(filepath)\n    if (not path):\n        path = self.basedir\n    elif (not os.path.isabs(path)):\n        path = os.path.join(self.basedir, path)\n    return (os.path.normpath(path), filename)", "docstring": "Parse given filepath to split possible path directory from filename.\n\n* If path directory is empty, will use ``basedir`` attribute as base\nfilepath;\n* If path directory is absolute, ignore ``basedir`` attribute;\n* If path directory is relative, join it to ``basedir`` attribute;\n\nKeyword Arguments:\nfilepath (str): Filepath to use to search for settings file. Will\nuse value from ``_default_filename`` class attribute if empty.\n\nIf filepath contain a directory path, it will be splitted from\nfilename and used as base directory (and update object\n``basedir`` attribute).\n\nReturns:\ntuple: Separated path directory and filename.", "source": "codesearchnet"}
{"code": "def get_conflicting_tools(self, request_only=False):\n        \n        from collections import defaultdict\n\n        tool_sets = defaultdict(set)\n        tools_dict = self.get_tools(request_only=request_only)\n        for variant, tools in tools_dict.itervalues():\n            for tool in tools:\n                tool_sets[tool].add(variant)\n\n        conflicts = dict((k, v) for k, v in tool_sets.iteritems() if len(v) > 1)\n        return conflicts", "docstring": "Returns tools of the same name provided by more than one package.\n\nArgs:\nrequest_only: If True, only return the key from resolved packages\nthat were also present in the request.\n\nReturns:\nDict of {tool-name: set([Variant])}.", "source": "juraj-google-style"}
{"code": "def _list_samples(self, predicate=None):\n    cursor = self.database[self.sample_collection].find(predicate, {'_id': 0, 'md5': 1})\n    return [item['md5'] for item in cursor]", "docstring": "List all samples that meet the predicate or all if predicate is not specified.\n\nArgs:\npredicate: Match samples against this predicate (or all if not specified)\n\nReturns:\nList of the md5s for the matching samples", "source": "codesearchnet"}
{"code": "def InitFromAff4Object(self, file_obj, stat_entry=None, hash_entry=None, with_details=False):\n    self.name = file_obj.urn.Basename()\n    self.path = '/'.join(file_obj.urn.Path().split('/')[2:])\n    self.is_directory = ('Container' in file_obj.behaviours)\n    self.stat = (stat_entry or file_obj.Get(file_obj.Schema.STAT))\n    self.hash = (hash_entry or file_obj.Get(file_obj.Schema.HASH, None))\n    if (not self.is_directory):\n        try:\n            self.last_collected = file_obj.GetContentAge()\n        except AttributeError:\n            logging.debug(\"File-like object %s doesn't have GetContentAge defined.\", file_obj.__class__.__name__)\n        if self.last_collected:\n            self.last_collected_size = file_obj.Get(file_obj.Schema.SIZE)\n    type_obj = file_obj.Get(file_obj.Schema.TYPE)\n    if (type_obj is not None):\n        self.age = type_obj.age\n    if with_details:\n        self.details = ApiAff4ObjectRepresentation().InitFromAff4Object(file_obj)\n    return self", "docstring": "Initializes the current instance from an Aff4Stream.\n\nArgs:\nfile_obj: An Aff4Stream representing a file.\nstat_entry: An optional stat entry object to be used. If none is provided,\nthe one stored in the AFF4 data store is used.\nhash_entry: An optional hash entry object to be used. If none is provided,\nthe one stored in the AFF4 data store is used.\nwith_details: True if all details of the Aff4Object should be included,\nfalse otherwise.\n\nReturns:\nA reference to the current instance.", "source": "codesearchnet"}
{"code": "def __init__( self, label, r ): \n        \n        self.label = label\n        self.r = r", "docstring": "Initialise an Atom instance\n\nArgs:\nlabel (Str): a label for this atom\nr (numpy.array): the atom coordinates\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def __init__(self, optimizer, scope=None, summary_labels=(), **kwargs):\n        \n        self.tf_optimizer_type = optimizer\n        self.tf_optimizer = TFOptimizer.tf_optimizers[optimizer](**kwargs)\n\n        super(TFOptimizer, self).__init__(scope=(scope or optimizer), summary_labels=summary_labels)", "docstring": "Creates a new optimizer instance of a TensorFlow optimizer.\n\nArgs:\noptimizer: The name of the optimizer. Must be one of the keys of the tf_optimizers dict.\n**kwargs: Arguments passed on to the TensorFlow optimizer constructor as **kwargs.", "source": "juraj-google-style"}
{"code": "def choose(self, locator=None, allow_label_click=None, **kwargs):\n        \n\n        self._check_with_label(\n            \"radio_button\", True, locator=locator, allow_label_click=allow_label_click, **kwargs)", "docstring": "Find a radio button and mark it as checked. The radio button can be found via name, id, or\nlabel text. ::\n\npage.choose(\"Male\")\n\nArgs:\nlocator (str, optional): Which radio button to choose.\nallow_label_click (bool, optional): Attempt to click the label to toggle state if\nelement is non-visible. Defaults to :data:`capybara.automatic_label_click`.\n**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.", "source": "juraj-google-style"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    if token_ids_1 is None:\n        return [1] + [0] * len(token_ids_0)\n    return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1)", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def forward(self, hidden_features):\n    hidden_features = self.flatten(hidden_features)\n    hidden_features = self.dropout_layer(hidden_features)\n    forecast = self.base_forecast_block(hidden_features)\n    if isinstance(forecast, tuple):\n        forecast = tuple((z.transpose(-1, -2) for z in forecast))\n    else:\n        forecast = forecast.transpose(-1, -2)\n    if self.prediction_channel_indices is not None:\n        if isinstance(forecast, tuple):\n            forecast = tuple((z[..., self.prediction_channel_indices] for z in forecast))\n        else:\n            forecast = forecast[..., self.prediction_channel_indices]\n    return forecast", "docstring": "Args:\nhidden_features (`torch.Tensor` of shape `(batch_size, num_patch, d_model)` in `flatten` mode\nor `(batch_size, n_vars, num_patch, d_model)` in `common_channel`/`mix_channel` mode.): Input hidden\nfeatures.\n\nReturns:\n`torch.Tensor` of shape `(batch_size, prediction_length, nvars)`.", "source": "github-repos"}
{"code": "def parse(ifp, pb_cls, **kwargs):\n    mode = 'rb'\n    if isinstance(ifp, str):\n        istream = open(ifp, mode=mode, **kwargs)\n    else:\n        istream = open(fileobj=ifp, mode=mode, **kwargs)\n    with istream:\n        for data in istream:\n            pb_obj = pb_cls()\n            pb_obj.ParseFromString(data)\n            (yield pb_obj)", "docstring": "Parse a stream.\n\nArgs:\nifp (string or file-like object): input stream.\npb_cls (protobuf.message.Message.__class__): The class object of\nthe protobuf message type encoded in the stream.", "source": "codesearchnet"}
{"code": "def _resolve_and_add(nodes1, s_val, final_s, nodes2, t_val, final_t):\n    (s_val, t_val) = _intersection_helpers.newton_refine(s_val, nodes1, t_val, nodes2)\n    (s_val, success_s) = _helpers.wiggle_interval(s_val)\n    (t_val, success_t) = _helpers.wiggle_interval(t_val)\n    if (not (success_s and success_t)):\n        return\n    final_s.append(s_val)\n    final_t.append(t_val)", "docstring": "Resolve a computed intersection and add to lists.\n\nWe perform one Newton step to deal with any residual issues of\nhigh-degree polynomial solves (one of which depends on the already\napproximate ``x_val, y_val``).\n\nArgs:\nnodes1 (numpy.ndarray): The nodes in the first curve.\ns_val (float): The approximate intersection parameter\nalong ``nodes1``.\nfinal_s (List[float]): The list of accepted intersection\nparameters ``s``.\nnodes2 (numpy.ndarray): The nodes in the second curve.\nt_val (float): The approximate intersection parameter\nalong ``nodes2``.\nfinal_t (List[float]): The list of accepted intersection\nparameters ``t``.", "source": "codesearchnet"}
{"code": "def _create_3d_attention_mask_from_input_mask(self, from_tensor, to_mask):\n    batch_size, from_seq_length = (from_tensor.shape[0], from_tensor.shape[1])\n    to_seq_length = to_mask.shape[1]\n    to_mask = torch.reshape(to_mask, (batch_size, 1, to_seq_length)).float()\n    broadcast_ones = torch.ones(size=(batch_size, from_seq_length, 1), dtype=torch.float32, device=to_mask.device)\n    mask = broadcast_ones * to_mask\n    return mask", "docstring": "Create 3D attention mask from a 2D tensor mask.\n\nArgs:\nfrom_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\nto_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\nReturns:\nfloat Tensor of shape [batch_size, from_seq_length, to_seq_length].", "source": "github-repos"}
{"code": "def register_from_fields(self, *args):\n    names = []\n    for field in args:\n        widget = self.resolve_widget(field)\n        self.register(widget.config_name)\n        if (widget.config_name not in names):\n            names.append(widget.config_name)\n    return names", "docstring": "Register config name from field widgets\n\nArguments:\n*args: Fields that contains widget\n:class:`djangocodemirror.widget.CodeMirrorWidget`.\n\nReturns:\nlist: List of registered config names from fields.", "source": "codesearchnet"}
{"code": "def AppendPathEntries(cls, path, path_separator, number_of_wildcards, skip_first):\n    if (path[(- 1)] == path_separator):\n        path = path[:(- 1)]\n    if skip_first:\n        path = ''.join([path, path_separator, '*'])\n        number_of_wildcards -= 1\n    paths = []\n    for _ in range(0, number_of_wildcards):\n        path = ''.join([path, path_separator, '*'])\n        paths.append(path)\n    return paths", "docstring": "Appends glob wildcards to a path.\n\nThis function will append glob wildcards \"*\" to a path, returning paths\nwith an additional glob wildcard up to the specified number. E.g. given\nthe path \"/tmp\" and a number of 2 wildcards, this function will return\n\"tmp/*\", \"tmp/*/*\". When skip_first is true the path with the first\nwildcard is not returned as a result.\n\nArgs:\npath (str): path to append glob wildcards to.\npath_separator (str): path segment separator.\nnumber_of_wildcards (int): number of glob wildcards to append.\nskip_first (bool): True if the the first path with glob wildcard should\nbe skipped as a result.\n\nReturns:\nlist[str]: paths with glob wildcards.", "source": "codesearchnet"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_stream = utils.BytearrayStream()\n    if self._unique_identifier:\n        self._unique_identifier.write(local_stream, kmip_version=kmip_version)\n    if self._lease_time:\n        self._lease_time.write(local_stream, kmip_version=kmip_version)\n    if self._last_change_date:\n        self._last_change_date.write(local_stream, kmip_version=kmip_version)\n    self.length = local_stream.length()\n    super(ObtainLeaseResponsePayload, self).write(output_stream, kmip_version=kmip_version)\n    output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the ObtainLease response payload to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the data attribute is not defined.", "source": "codesearchnet"}
{"code": "def _fill_meta_graph_def(meta_graph_def: meta_graph_pb2.MetaGraphDef, saveable_view: _SaveableView, signature_functions: Dict[str, Callable[..., Any]], namespace_whitelist: List[str], save_custom_gradients: bool, create_saver: bool, enable_debug_stripper: bool, defaults=None) -> Tuple[_AssetInfo, ops.Graph]:\n    resource_initializers = saveable_view.get_concrete_resource_initializers()\n    exported_graph = ops.Graph()\n    resource_initializer_ops = []\n    with exported_graph.as_default():\n        object_map, tensor_map, asset_info = saveable_view.map_resources()\n        signatures = _generate_signatures(signature_functions, object_map, defaults)\n    if save_custom_gradients:\n        _trace_gradient_functions(exported_graph, saveable_view)\n    with exported_graph.as_default():\n        for resource_initializer_function in resource_initializers:\n            asset_dependencies = []\n            for capture in resource_initializer_function.graph.external_captures:\n                asset_initializer = asset_info.asset_initializers_by_resource.get(capture, None)\n                if asset_initializer is not None:\n                    asset_dependencies.append(asset_initializer)\n            with ops.control_dependencies(asset_dependencies):\n                mapped_initializer = object_map[resource_initializer_function]\n                resource_initializer_ops.append(mapped_initializer())\n        resource_initializer_ops.extend(asset_info.asset_initializers_by_resource.values())\n        with ops.control_dependencies(resource_initializer_ops):\n            init_op = control_flow_ops.no_op()\n        meta_graph_def.collection_def[constants.MAIN_OP_KEY].node_list.value.append(init_op.name)\n        meta_graph_def.signature_def[constants.INIT_OP_SIGNATURE_KEY].CopyFrom(signature_def_utils.op_signature_def(init_op, constants.INIT_OP_SIGNATURE_KEY))\n\n    def call_with_mapped_captures(function, args):\n        if function in object_map:\n            return object_map[function](*args)\n        return saved_model_exported_concrete.ExportedConcreteFunction(function, tensor_map)(*args)\n    for obj in object_map.values():\n        obj._maybe_initialize_trackable()\n    named_saveable_objects, registered_savers = save_util_v1.frozen_saveables_and_savers(graph_view=saveable_view.augmented_graph_view, object_map=object_map, to_graph=exported_graph, call_with_mapped_captures=call_with_mapped_captures)\n    if create_saver:\n        saver = functional_saver.MultiDeviceSaver.from_saveables(named_saveable_objects, registered_savers, call_with_mapped_captures)\n        with exported_graph.as_default():\n            saver_def = saver.to_proto()\n            meta_graph_def.saver_def.CopyFrom(saver_def)\n    _dependency_sorted_node_ids(saveable_view)\n    graph_def, _ = exported_graph._as_graph_def(add_shapes=True, use_pybind11_proto=False)\n    graph_def.library.registered_gradients.extend(saveable_view.gradient_defs)\n    _verify_ops(graph_def, namespace_whitelist)\n    meta_graph_def.graph_def.CopyFrom(graph_def)\n    meta_graph_def.meta_info_def.tags.append(tag_constants.SERVING)\n    if saveable_view.options.extra_tags:\n        for tag in saveable_view.options.extra_tags:\n            meta_graph_def.meta_info_def.tags.append(tag)\n    meta_graph_def.meta_info_def.tensorflow_version = versions.__version__\n    meta_graph_def.meta_info_def.tensorflow_git_version = versions.__git_version__\n    meta_graph_def.meta_info_def.stripped_default_attrs = True\n    meta_graph_def.asset_file_def.extend(asset_info.asset_defs)\n    for signature_key, signature in signatures.items():\n        meta_graph_def.signature_def[signature_key].CopyFrom(signature)\n    meta_graph.strip_graph_default_valued_attrs(meta_graph_def)\n    if sys.byteorder == 'big':\n        utils_impl.swap_function_tensor_content(meta_graph_def, 'big', 'little')\n    if enable_debug_stripper:\n        _strip_debug_nodes(meta_graph_def)\n    meta_graph_def.meta_info_def.stripped_op_list.MergeFrom(meta_graph.stripped_op_list_for_graph(meta_graph_def.graph_def))\n    return (asset_info, exported_graph)", "docstring": "Generates a MetaGraph which calls `signature_functions`.\n\nArgs:\nmeta_graph_def: The MetaGraphDef proto to fill.\nsaveable_view: The _SaveableView being exported.\nsignature_functions: A dictionary mapping signature keys to concrete\nfunctions containing signatures to add to the MetaGraph.\nnamespace_whitelist: List of strings containing whitelisted op namespaces.\nsave_custom_gradients: Whether to save custom gradients.\ncreate_saver: Whether to add SavedModel's native save and restore ops.\nenable_debug_stripper: Whether to strip the debug nodes from the graph.\ndefaults: A dictionary mapping signature_key to dictionary of\nuser_specified_name to Tensor representing default values.\n\nReturns:\nA tuple of (_AssetInfo, Graph) containing the captured assets and\nexported Graph generated from tracing the saveable_view.", "source": "github-repos"}
{"code": "def get_run_short_description(run_call_count, fetches, feed_dict, is_callable_runner=False):\n    if is_callable_runner:\n        return 'runner from make_callable()'\n    description = 'run \n    if isinstance(fetches, (tensor_lib.Tensor, ops.Operation, variables.Variable)):\n        description += '1 fetch (%s); ' % common.get_graph_element_name(fetches)\n    else:\n        num_fetches = len(common.get_flattened_names(fetches))\n        if num_fetches > 1:\n            description += '%d fetches; ' % num_fetches\n        else:\n            description += '%d fetch; ' % num_fetches\n    if not feed_dict:\n        description += '0 feeds'\n    elif len(feed_dict) == 1:\n        for key in feed_dict:\n            description += '1 feed (%s)' % (key if isinstance(key, str) or not hasattr(key, 'name') else key.name)\n    else:\n        description += '%d feeds' % len(feed_dict)\n    return description", "docstring": "Get a short description of the run() call.\n\nArgs:\nrun_call_count: (int) Run call counter.\nfetches: Fetches of the `Session.run()` call. See doc of `Session.run()`\nfor more details.\nfeed_dict: Feeds to the `Session.run()` call. See doc of `Session.run()`\nfor more details.\nis_callable_runner: (bool) whether a runner returned by\nSession.make_callable is being run.\n\nReturns:\n(str) A short description of the run() call, including information about\nthe fetche(s) and feed(s).", "source": "github-repos"}
{"code": "def assert_equal_graph_def_v2(expected: graph_pb2.GraphDef, actual: graph_pb2.GraphDef) -> None:\n    assert_equal_graph_def(actual, expected, checkpoint_v2=True, hash_table_shared_name=True)", "docstring": "Asserts that two `GraphDef`s are (mostly) the same.\n\nCompares two `GraphDef` protos for equality, ignoring versions and ordering of\nnodes, attrs, and control inputs.  Node names are used to match up nodes\nbetween the graphs, so the naming of nodes must be consistent. This function\nignores randomized attribute values that may appear in V2 checkpoints.\n\nArgs:\nexpected: The `GraphDef` we expected.\nactual: The `GraphDef` we have.\n\nRaises:\nAssertionError: If the `GraphDef`s do not match.\nTypeError: If either argument is not a `GraphDef`.", "source": "github-repos"}
{"code": "def from_directory(input_dir, optional_files=None):\n        \n        sub_d = {}\n        for fname, ftype in [(\"INCAR\", Incar), (\"KPOINTS\", Kpoints),\n                             (\"POSCAR\", Poscar), (\"POTCAR\", Potcar)]:\n            fullzpath = zpath(os.path.join(input_dir, fname))\n            sub_d[fname.lower()] = ftype.from_file(fullzpath)\n        sub_d[\"optional_files\"] = {}\n        if optional_files is not None:\n            for fname, ftype in optional_files.items():\n                sub_d[\"optional_files\"][fname] = \\\n                    ftype.from_file(os.path.join(input_dir, fname))\n        return VaspInput(**sub_d)", "docstring": "Read in a set of VASP input from a directory. Note that only the\nstandard INCAR, POSCAR, POTCAR and KPOINTS files are read unless\noptional_filenames is specified.\n\nArgs:\ninput_dir (str): Directory to read VASP input from.\noptional_files (dict): Optional files to read in as well as a\ndict of {filename: Object type}. Object type must have a\nstatic method from_file.", "source": "juraj-google-style"}
{"code": "def is_user_enrolled(cls, user, course_id, course_mode):\n        \n        enrollment_client = EnrollmentApiClient()\n        try:\n            enrollments = enrollment_client.get_course_enrollment(user.username, course_id)\n            if enrollments and course_mode == enrollments.get('mode'):\n                return True\n        except HttpClientError as exc:\n            logging.error(\n                'Error while checking enrollment status of user %(user)s: %(message)s',\n                dict(user=user.username, message=str(exc))\n            )\n        except KeyError as exc:\n            logging.warning(\n                'Error while parsing enrollment data of user %(user)s: %(message)s',\n                dict(user=user.username, message=str(exc))\n            )\n        return False", "docstring": "Query the enrollment API and determine if a learner is enrolled in a given course run track.\n\nArgs:\nuser: The user whose enrollment needs to be checked\ncourse_mode: The mode with which the enrollment should be checked\ncourse_id: course id of the course where enrollment should be checked.\n\nReturns:\nBoolean: Whether or not enrollment exists", "source": "juraj-google-style"}
{"code": "def _error_messages(self, driver_id):\n    assert isinstance(driver_id, ray.DriverID)\n    message = self.redis_client.execute_command('RAY.TABLE_LOOKUP', ray.gcs_utils.TablePrefix.ERROR_INFO, '', driver_id.binary())\n    if (message is None):\n        return []\n    gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(message, 0)\n    error_messages = []\n    for i in range(gcs_entries.EntriesLength()):\n        error_data = ray.gcs_utils.ErrorTableData.GetRootAsErrorTableData(gcs_entries.Entries(i), 0)\n        assert (driver_id.binary() == error_data.DriverId())\n        error_message = {'type': decode(error_data.Type()), 'message': decode(error_data.ErrorMessage()), 'timestamp': error_data.Timestamp()}\n        error_messages.append(error_message)\n    return error_messages", "docstring": "Get the error messages for a specific driver.\n\nArgs:\ndriver_id: The ID of the driver to get the errors for.\n\nReturns:\nA list of the error messages for this driver.", "source": "codesearchnet"}
{"code": "def _ReadN(self, n):\n    ret = ''\n    while True:\n        chunk = self._read_file.read((n - len(ret)))\n        ret += chunk\n        if ((len(ret) == n) or (not chunk)):\n            return ret", "docstring": "Reads n characters from the input stream, or until EOF.\n\nThis is equivalent to the current CPython implementation of read(n), but\nnot guaranteed by the docs.\n\nArgs:\nn: int\n\nReturns:\nstring", "source": "codesearchnet"}
{"code": "def recompose(src: Path, target_file: Path):\n        \n        mission_folder, assets_folder = NewMiz._get_subfolders(src)\n        \n        base_info = ujson.loads(Path(mission_folder, 'base_info.json').read_text(encoding=ENCODING))\n        version = base_info['__version__']\n        with Miz(target_file) as miz:\n            LOGGER.info('re-composing mission table from folder: \"%s\"', mission_folder)\n            miz.mission.d = NewMiz._recreate_dict_from_folder(mission_folder, version)\n            for item in assets_folder.iterdir():\n                target = Path(miz.temp_dir, item.name).absolute()\n                if item.is_dir():\n                    if target.exists():\n                        shutil.rmtree(target)\n                    shutil.copytree(item.absolute(), target)\n                elif item.is_file():\n                    shutil.copy(item.absolute(), target)\n            miz.zip(target_file, encode=False)", "docstring": "Recompose a Miz from json object\n\nArgs:\nsrc: folder containing the json structure\ntarget_file: target Miz file", "source": "juraj-google-style"}
{"code": "def get_value_spec(self, name: str) -> Optional[class_schema.ValueSpec]:\n    for arg in self.named_args:\n        if arg.name == name:\n            return arg.value_spec\n    if self.varkw is not None:\n        return self.varkw.value_spec.schema.dynamic_field.value\n    return None", "docstring": "Returns Value spec for an argument name.\n\nArgs:\nname: Argument name.\n\nReturns:\nValueSpec for the requested argument. If name is not found, value spec of\nwildcard keyword argument will be used. None will be returned if name\ndoes not exist in signature and wildcard keyword is not accepted.", "source": "github-repos"}
{"code": "def subtract_business_days(self, date_tensor, num_days, roll_convention=constants.BusinessDayConvention.NONE):\n    return self.add_business_days(date_tensor, -num_days, roll_convention)", "docstring": "Adds given number of business days to given dates.\n\nNote that this is different from calling `subtract_period_and_roll` with\nPeriodType.DAY. For example, subtracting 5 business days from Friday gives\nthe previous Friday (unless there are holidays on this week or previous\nFriday). Subtracting 5 days and rolling means landing on Sunday and then\nrolling either to Monday or to Friday, depending on the roll convention.\n\nIf any of the dates in `date_tensor` are not business days, they will be\nrolled to business days before doing the subtraction. If `roll_convention`\nis `NONE`, and any dates are not business days, an exception is raised.\n\nArgs:\ndate_tensor: DateTensor of dates to advance from.\nnum_days: Tensor of int32 type broadcastable to `date_tensor`.\nroll_convention: BusinessDayConvention. Determines how to roll a date that\nfalls on a holiday.\n\nReturns:\nThe resulting DateTensor.", "source": "github-repos"}
{"code": "def add_citations(voevent, event_ivorns):\n    if (not voevent.xpath('Citations')):\n        etree.SubElement(voevent, 'Citations')\n    voevent.Citations.extend(_listify(event_ivorns))", "docstring": "Add citations to other voevents.\n\nThe schema mandates that the 'Citations' section must either be entirely\nabsent, or non-empty - hence we require this wrapper function for its\ncreation prior to listing the first citation.\n\nArgs:\nvoevent(:class:`Voevent`): Root node of a VOEvent etree.\nevent_ivorns (:class:`voeventparse.misc.EventIvorn`): List of EventIvorn\nelements to add to citation list.", "source": "codesearchnet"}
{"code": "def _CopyDateFromString(self, date_string):\n    \n    date_string_length = len(date_string)\n\n    \n    if date_string_length < 10:\n      raise ValueError('Date string too short.')\n\n    if date_string[4] != '-' or date_string[7] != '-':\n      raise ValueError('Invalid date string.')\n\n    try:\n      year = int(date_string[0:4], 10)\n    except ValueError:\n      raise ValueError('Unable to parse year.')\n\n    try:\n      month = int(date_string[5:7], 10)\n    except ValueError:\n      raise ValueError('Unable to parse month.')\n\n    try:\n      day_of_month = int(date_string[8:10], 10)\n    except ValueError:\n      raise ValueError('Unable to parse day of month.')\n\n    days_per_month = self._GetDaysPerMonth(year, month)\n    if day_of_month < 1 or day_of_month > days_per_month:\n      raise ValueError('Day of month value out of bounds.')\n\n    return year, month, day_of_month", "docstring": "Copies a date from a string.\n\nArgs:\ndate_string (str): date value formatted as: YYYY-MM-DD\n\nReturns:\ntuple[int, int, int]: year, month, day of month.\n\nRaises:\nValueError: if the date string is invalid or not supported.", "source": "juraj-google-style"}
{"code": "def equal_distribution_folds(y, folds=2):\n    (n, classes) = y.shape\n    dist = y.sum(axis=0).astype('float')\n    dist /= dist.sum()\n    index_list = []\n    fold_dist = np.zeros((folds, classes), dtype='float')\n    for _ in range(folds):\n        index_list.append([])\n    for i in range(n):\n        if (i < folds):\n            target_fold = i\n        else:\n            normed_folds = (fold_dist.T / fold_dist.sum(axis=1))\n            how_off = (normed_folds.T - dist)\n            target_fold = np.argmin(np.dot((y[i] - 0.5).reshape(1, (- 1)), how_off.T))\n        fold_dist[target_fold] += y[i]\n        index_list[target_fold].append(i)\n    logger.debug('Fold distributions:')\n    logger.debug(fold_dist)\n    return index_list", "docstring": "Creates `folds` number of indices that has roughly balanced multi-label distribution.\n\nArgs:\ny: The multi-label outputs.\nfolds: The number of folds to create.\n\nReturns:\n`folds` number of indices that have roughly equal multi-label distributions.", "source": "codesearchnet"}
{"code": "def to_json(self, **kwargs):\n    config = self.get_config()\n    timeseries_generator_config = {'class_name': self.__class__.__name__, 'config': config}\n    return json.dumps(timeseries_generator_config, **kwargs)", "docstring": "Returns a JSON string containing the generator's configuration.\n\nArgs:\n**kwargs: Additional keyword arguments to be passed\nto `json.dumps()`.\n\nReturns:\nA JSON string containing the tokenizer configuration.", "source": "github-repos"}
{"code": "def _process_sum_prod(self, func, **kwargs):\n    axis = kwargs.get('axis', 0)\n    min_count = kwargs.get('min_count', 0)\n\n    def sum_prod_builder(df, **kwargs):\n        return func(df, **kwargs)\n    if (min_count <= 1):\n        return self._full_reduce(axis, sum_prod_builder)\n    else:\n        return self._full_axis_reduce(axis, sum_prod_builder)", "docstring": "Calculates the sum or product of the DataFrame.\n\nArgs:\nfunc: Pandas func to apply to DataFrame.\nignore_axis: Whether to ignore axis when raising TypeError\nReturn:\nA new QueryCompiler object with sum or prod of the object.", "source": "codesearchnet"}
{"code": "def start(logdir):\n    if logdir.startswith('gs:\n        datalab.storage._api.Api.verify_permitted_to_read(logdir)\n    port = datalab.utils.pick_unused_port()\n    args = ['tensorboard', ('--logdir=' + logdir), ('--port=' + str(port))]\n    p = subprocess.Popen(args)\n    retry = 10\n    while (retry > 0):\n        if datalab.utils.is_http_running_on(port):\n            basepath = os.environ.get('DATALAB_ENDPOINT_URL', '')\n            url = ('%s/_proxy/%d/' % (basepath.rstrip('/'), port))\n            html = ('<p>TensorBoard was started successfully with pid %d. ' % p.pid)\n            html += ('Click <a href=\"%s\" target=\"_blank\">here</a> to access it.</p>' % url)\n            IPython.display.display_html(html, raw=True)\n            return p.pid\n        time.sleep(1)\n        retry -= 1\n    raise Exception('Cannot start TensorBoard.')", "docstring": "Start a TensorBoard instance.\n\nArgs:\nlogdir: the logdir to run TensorBoard on.\nRaises:\nException if the instance cannot be started.", "source": "codesearchnet"}
{"code": "def _compile_function_expression(self, expr: Expression, scope: Dict[(str, TensorFluent)], batch_size: Optional[int]=None, noise: Optional[List[tf.Tensor]]=None) -> TensorFluent:\n    etype = expr.etype\n    args = expr.args\n    if (len(args) == 1):\n        etype2func = {'abs': TensorFluent.abs, 'exp': TensorFluent.exp, 'log': TensorFluent.log, 'sqrt': TensorFluent.sqrt, 'cos': TensorFluent.cos, 'sin': TensorFluent.sin, 'tan': TensorFluent.tan, 'acos': TensorFluent.acos, 'arccos': TensorFluent.acos, 'asin': TensorFluent.asin, 'arcsin': TensorFluent.asin, 'atan': TensorFluent.atan, 'arctan': TensorFluent.atan, 'round': TensorFluent.round, 'ceil': TensorFluent.ceil, 'floor': TensorFluent.floor}\n        if (etype[1] not in etype2func):\n            raise ValueError('Invalid unary function expression:\\n{}'.format(expr))\n        op = etype2func[etype[1]]\n        x = self._compile_expression(args[0], scope, batch_size, noise)\n        fluent = op(x)\n    else:\n        etype2func = {'pow': TensorFluent.pow, 'max': TensorFluent.max, 'min': TensorFluent.min}\n        if (etype[1] not in etype2func):\n            raise ValueError('Invalid binary function expression:\\n{}'.format(expr))\n        op = etype2func[etype[1]]\n        x = self._compile_expression(args[0], scope, batch_size, noise)\n        y = self._compile_expression(args[1], scope, batch_size, noise)\n        fluent = op(x, y)\n    return fluent", "docstring": "Compile a function expression `expr` into a TensorFluent\nin the given `scope` with optional batch size.\n\nArgs:\nexpr (:obj:`rddl2tf.expr.Expression`): A RDDL function expression.\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.\nbatch_size (Optional[size]): The batch size.\n\nReturns:\n:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.", "source": "codesearchnet"}
{"code": "def fixminimized(self, alphabet):\n        \n        endstate = len(list(self.states))\n        for state in self.states:\n            for char in alphabet:\n                found = 0\n                for arc in state.arcs:\n                    if self.isyms.find(arc.ilabel) == char:\n                        found = 1\n                        break\n                if found == 0:\n                    self.add_arc(state.stateid, endstate, char)\n\n        self[endstate].final = TropicalWeight(float('inf'))\n\n        for char in alphabet:\n            self.add_arc(endstate, endstate, char)", "docstring": "After pyfst minimization,\nall unused arcs are removed,\nand all sink states are removed.\nHowever this may break compatibility.\nArgs:\nalphabet (list): The input alphabet\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def users_lookupByEmail(self, *, email: str, **kwargs) -> SlackResponse:\n        \n        kwargs.update({\"email\": email})\n        return self.api_call(\"users.lookupByEmail\", http_verb=\"GET\", params=kwargs)", "docstring": "Find a user with an email address.\n\nArgs:\nemail (str): An email address belonging to a user in the workspace.\ne.g. 'spengler@ghostbusters.example.com'", "source": "juraj-google-style"}
{"code": "def sg_mean(tensor, opt):\n    r\n    return tf.reduce_mean(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)", "docstring": "r\"\"\"Computes the mean of elements across axis of a tensor.\n\nSee `tf.reduce_mean()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` (automatically given by chain).\nopt:\naxis : A tuple/list of integers or an integer. The axis to reduce.\nkeep_dims: If true, retains reduced dimensions with length 1.\nname: If provided, replace current tensor's name.\n\nReturns:\nA `Tensor`.", "source": "juraj-google-style"}
{"code": "def safe_sum(x, alt_value=(- np.inf), name=None):\n    with tf.compat.v1.name_scope(name, 'safe_sum', [x, alt_value]):\n        if (not is_list_like(x)):\n            raise TypeError('Expected list input.')\n        if (not x):\n            raise ValueError('Input should not be empty.')\n        in_shape = x[0].shape\n        x = tf.stack(x, axis=(- 1))\n        x = tf.reduce_sum(input_tensor=x, axis=(- 1))\n        alt_value = np.array(alt_value, x.dtype.as_numpy_dtype)\n        alt_fill = tf.fill(tf.shape(input=x), value=alt_value)\n        x = tf.where(tf.math.is_finite(x), x, alt_fill)\n        x.set_shape(x.shape.merge_with(in_shape))\n        return x", "docstring": "Elementwise adds list members, replacing non-finite results with alt_value.\n\nTypically the `alt_value` is chosen so the `MetropolisHastings`\n`TransitionKernel` always rejects the proposal.\n\nArgs:\nx: Python `list` of `Tensors` to elementwise add.\nalt_value: Python scalar used to replace any elementwise sums which would\notherwise be non-finite.\nname: Python `str` name prefixed to Ops created by this function.\nDefault value: `None` (i.e., \"safe_sum\").\n\nReturns:\nsafe_sum: `Tensor` representing the elementwise sum of list of `Tensor`s\n`x` or `alt_value` where sums are non-finite.\n\nRaises:\nTypeError: if `x` is not list-like.\nValueError: if `x` is empty.", "source": "codesearchnet"}
{"code": "def _GetTaskStorageFilePath(self, task):\n    \n    filename = '{0:s}.plaso'.format(task.identifier)\n    return os.path.join(self._task_storage_path, filename)", "docstring": "Retrieves the path of a task storage file in the temporary directory.\n\nArgs:\ntask (Task): task.\n\nReturns:\nstr: path of a task storage file in the temporary directory.", "source": "juraj-google-style"}
{"code": "def get_column_names(self, X):\n    if isinstance(X, pd.DataFrame):\n        return X.columns\n    return range(X.shape[1])", "docstring": "Return iterable containing columns for the given array X.\n\nArgs:\nX: `numpy.ndarray` or `pandas.DataFrame`.\n\nReturns:\niterable: columns for the given matrix.", "source": "codesearchnet"}
{"code": "def garbage_collect_exports(export_dir_base, exports_to_keep):\n    if (exports_to_keep is None):\n        return\n    version_paths = []\n    for filename in tf_v1.gfile.ListDirectory(export_dir_base):\n        path = os.path.join(tf.compat.as_bytes(export_dir_base), tf.compat.as_bytes(filename))\n        if ((len(filename) == 10) and filename.isdigit()):\n            version_paths.append((int(filename), path))\n    oldest_version_path = sorted(version_paths)[:(- exports_to_keep)]\n    for (_, path) in oldest_version_path:\n        try:\n            tf_v1.gfile.DeleteRecursively(path)\n        except tf.errors.NotFoundError as e:\n            logging.warn('Can not delete %s recursively: %s', path, e)", "docstring": "Deletes older exports, retaining only a given number of the most recent.\n\nExport subdirectories are assumed to be named with monotonically increasing\nintegers; the most recent are taken to be those with the largest values.\n\nArgs:\nexport_dir_base: the base directory under which each export is in a\nversioned subdirectory.\nexports_to_keep: Number of exports to keep. Older exports will be garbage\ncollected. Set to None to disable.", "source": "codesearchnet"}
{"code": "def exists(path):\n    filesystem = FileSystems.get_filesystem(path)\n    return filesystem.exists(path)", "docstring": "Check if the provided path exists on the FileSystem.\n\nArgs:\npath: string path that needs to be checked.\n\nReturns: boolean flag indicating if path exists", "source": "github-repos"}
{"code": "def limit(self, accountID, **kwargs):\n    return self.create(accountID, order=LimitOrderRequest(**kwargs))", "docstring": "Shortcut to create a Limit Order in an Account\n\nArgs:\naccountID : The ID of the Account\nkwargs : The arguments to create a LimitOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "codesearchnet"}
{"code": "def Parse(text):\n  \n  precondition.AssertType(text, Text)\n\n  if compatibility.PY2:\n    text = text.encode(\"utf-8\")\n\n  return yaml.safe_load(text)", "docstring": "Parses a YAML source into a Python object.\n\nArgs:\ntext: A YAML source to parse.\n\nReturns:\nA Python data structure corresponding to the YAML source.", "source": "juraj-google-style"}
{"code": "def verify(self, obj):\n        \n\n        if self.encoding == 'none' and not isinstance(obj, (bytes, bytearray)):\n            raise ValidationError('Byte object was not either bytes or a bytearray', type=obj.__class__.__name__)\n        elif self.encoding == 'base64':\n            try:\n                data = base64.b64decode(obj)\n                return data\n            except TypeError:\n                raise ValidationError(\"Could not decode base64 encoded bytes\", obj=obj)\n        elif self.encoding == 'hex':\n            try:\n                data = binascii.unhexlify(obj)\n                return data\n            except TypeError:\n                raise ValidationError(\"Could not decode hex encoded bytes\", obj=obj)\n\n        return obj", "docstring": "Verify that the object conforms to this verifier's schema\n\nArgs:\nobj (object): A python object to verify\n\nReturns:\nbytes or byterray: The decoded byte buffer\n\nRaises:\nValidationError: If there is a problem verifying the object, a\nValidationError is thrown with at least the reason key set indicating\nthe reason for the lack of validation.", "source": "juraj-google-style"}
{"code": "def extract_class(jar, name):\n    \n\n    with jar.open(name) as entry:\n        return LinkableClass(javatools.unpack_class(entry))", "docstring": "Extracts a LinkableClass from a jar.\n\nArgs:\njar: An open ZipFile instance.\nname: A string containing the binary name of a class.\n\nRaises:\nKeyError: The class does not exist in the jar.", "source": "juraj-google-style"}
{"code": "def save(self, path=None, complevel=1, complib='zlib'):\n    if (path is None):\n        path = (self.hexuid + '.hdf5')\n    elif os.path.isdir(path):\n        path += ((os.sep + self.hexuid) + '.hdf5')\n    elif (not (path.endswith('.hdf5') or path.endswith('.hdf'))):\n        raise ValueError('File path must have a \".hdf5\" or \".hdf\" extension.')\n    with pd.HDFStore(path, 'w', complevel=complevel, complib=complib) as store:\n        store['kwargs'] = pd.Series()\n        store.get_storer('kwargs').attrs.metadata = self._rel()\n        fc = 0\n        for (name, data) in self._data().items():\n            if hasattr(data, '_revert_categories'):\n                data._revert_categories()\n            name = (name[1:] if name.startswith('_') else name)\n            if isinstance(data, Field):\n                fname = (('FIELD{}_'.format(fc) + name) + '/')\n                store[(fname + 'data')] = pd.DataFrame(data)\n                for (i, field) in enumerate(data.field_values):\n                    ffname = ((fname + 'values') + str(i))\n                    if isinstance(field, pd.Series):\n                        store[ffname] = pd.Series(field)\n                    else:\n                        store[ffname] = pd.DataFrame(field)\n                fc += 1\n            elif isinstance(data, Series):\n                s = pd.Series(data)\n                if isinstance(data.dtype, pd.types.dtypes.CategoricalDtype):\n                    s = s.astype('O')\n                store[name] = s\n            elif isinstance(data, DataFrame):\n                store[name] = pd.DataFrame(data)\n            elif isinstance(data, SparseSeries):\n                s = pd.SparseSeries(data)\n                if isinstance(data.dtype, pd.types.dtypes.CategoricalDtype):\n                    s = s.astype('O')\n                store[name] = s\n            elif isinstance(data, SparseDataFrame):\n                store[name] = pd.SparseDataFrame(data)\n            else:\n                if (hasattr(data, 'dtype') and isinstance(data.dtype, pd.types.dtypes.CategoricalDtype)):\n                    data = data.astype('O')\n                else:\n                    for col in data:\n                        if isinstance(data[col].dtype, pd.types.dtypes.CategoricalDtype):\n                            data[col] = data[col].astype('O')\n                store[name] = data\n            if hasattr(data, '_set_categories'):\n                data._set_categories()", "docstring": "Save the container as an HDF5 archive.\n\nArgs:\npath (str): Path where to save the container", "source": "codesearchnet"}
{"code": "def write_jsonl_file(fname, data):\n    if (not isinstance(data, list)):\n        print('warning: malformed json data for file', fname)\n        return\n    with open(fname, 'w') as of:\n        for row in data:\n            if row.strip():\n                of.write(('%s\\n' % row.strip()))", "docstring": "Writes a jsonl file.\n\nArgs:\ndata: list of json encoded data", "source": "codesearchnet"}
{"code": "def get_pointgroup(self, tolerance=0.3):\n    PA = self._get_point_group_analyzer(tolerance=tolerance)\n    return PointGroupOperations(PA.sch_symbol, PA.symmops)", "docstring": "Returns a PointGroup object for the molecule.\n\nArgs:\ntolerance (float): Tolerance to generate the full set of symmetry\noperations.\n\nReturns:\n:class:`~PointGroupOperations`", "source": "codesearchnet"}
{"code": "def __init__(self, idx):\n    \n    self.idx = idx\n    self.in_edges = []\n    self.out_edges = []", "docstring": "Initialize the Vertex.\n\nArgs:\nidx: The index of the vertex.", "source": "juraj-google-style"}
{"code": "def bounding_box_from(points, i, i1, thr):\n    \n    pi = points[i]\n    pi1 = points[i1]\n\n    min_lat = min(pi.lat, pi1.lat)\n    min_lon = min(pi.lon, pi1.lon)\n    max_lat = max(pi.lat, pi1.lat)\n    max_lon = max(pi.lon, pi1.lon)\n\n    return min_lat-thr, min_lon-thr, max_lat+thr, max_lon+thr", "docstring": "Creates bounding box for a line segment\n\nArgs:\npoints (:obj:`list` of :obj:`Point`)\ni (int): Line segment start, index in points array\ni1 (int): Line segment end, index in points array\nReturns:\n(float, float, float, float): with bounding box min x, min y, max x and max y", "source": "juraj-google-style"}
{"code": "def start_worker(node_ip_address, object_store_name, raylet_name, redis_address, worker_path, temp_dir, stdout_file=None, stderr_file=None):\n    command = [sys.executable, '-u', worker_path, ('--node-ip-address=' + node_ip_address), ('--object-store-name=' + object_store_name), ('--raylet-name=' + raylet_name), ('--redis-address=' + str(redis_address)), ('--temp-dir=' + temp_dir)]\n    process_info = start_ray_process(command, ray_constants.PROCESS_TYPE_WORKER, stdout_file=stdout_file, stderr_file=stderr_file)\n    return process_info", "docstring": "This method starts a worker process.\n\nArgs:\nnode_ip_address (str): The IP address of the node that this worker is\nrunning on.\nobject_store_name (str): The socket name of the object store.\nraylet_name (str): The socket name of the raylet server.\nredis_address (str): The address that the Redis server is listening on.\nworker_path (str): The path of the source code which the worker process\nwill run.\ntemp_dir (str): The path of the temp dir.\nstdout_file: A file handle opened for writing to redirect stdout to. If\nno redirection should happen, then this should be None.\nstderr_file: A file handle opened for writing to redirect stderr to. If\nno redirection should happen, then this should be None.\n\nReturns:\nProcessInfo for the process that was started.", "source": "codesearchnet"}
{"code": "def run(command, num_retries=1, timeout=(- 1), **kwargs):\n    last_error = None\n    for _ in range(num_retries):\n        try:\n            process = Subprocess(command, **kwargs)\n            return process.run(timeout)\n        except Exception as err:\n            last_error = err\n    raise last_error", "docstring": "Run a command with optional timeout and retries.\n\nProvides a convenience method for executing a subprocess with\nadditional error handling.\n\nArguments:\ncommand (list of str): The command to execute.\nnum_retries (int, optional): If the subprocess fails, the number of\nattempts to execute it before failing.\ntimeout (float, optional): If positive, the number of seconds to wait\nfor subprocess completion before failing.\n**kwargs: Additional args to pass to Subprocess.__init__()\n\nReturns:\nTuple of (int, str, str): Where the variables represent\n(exit status, stdout, stderr).\n\nRaises:\nSubprocessError: If the command fails after the given number of\nretries.", "source": "codesearchnet"}
{"code": "def monitoring(line, cell=None):\n    parser = datalab.utils.commands.CommandParser(prog='monitoring', description='Execute various Monitoring-related operations. Use \"%monitoring <command> -h\" for help on a specific command.')\n    list_parser = parser.subcommand('list', 'List the metrics or resource types in a monitored project.')\n    list_metric_parser = list_parser.subcommand('metrics', 'List the metrics that are available through the Monitoring API.')\n    list_metric_parser.add_argument('-t', '--type', help='The type of metric(s) to list; can include wildchars.')\n    list_metric_parser.add_argument('-p', '--project', help='The project on which to execute the request.')\n    list_metric_parser.set_defaults(func=_list_metric_descriptors)\n    list_resource_parser = list_parser.subcommand('resource_types', 'List the monitored resource types that are available through the Monitoring API.')\n    list_resource_parser.add_argument('-p', '--project', help='The project on which to execute the request.')\n    list_resource_parser.add_argument('-t', '--type', help='The resource type(s) to list; can include wildchars.')\n    list_resource_parser.set_defaults(func=_list_resource_descriptors)\n    list_group_parser = list_parser.subcommand('groups', 'List the Stackdriver groups in this project.')\n    list_group_parser.add_argument('-p', '--project', help='The project on which to execute the request.')\n    list_group_parser.add_argument('-n', '--name', help='The name of the group(s) to list; can include wildchars.')\n    list_group_parser.set_defaults(func=_list_groups)\n    return datalab.utils.commands.handle_magic_line(line, cell, parser)", "docstring": "Implements the monitoring cell magic for ipython notebooks.\n\nArgs:\nline: the contents of the storage line.\nReturns:\nThe results of executing the cell.", "source": "codesearchnet"}
{"code": "def int64_gauge(urn, metric, ptransform=None) -> metrics_pb2.MonitoringInfo:\n    labels = create_labels(ptransform=ptransform)\n    if isinstance(metric, int):\n        value = metric\n        time_ms = int(time.time()) * 1000\n    else:\n        raise TypeError('Expected int metric type but received %s with value %s' % (type(metric), metric))\n    coder = coders.VarIntCoder()\n    payload = coder.encode(time_ms) + coder.encode(value)\n    return create_monitoring_info(urn, LATEST_INT64_TYPE, payload, labels)", "docstring": "Return the gauge monitoring info for the URN, metric and labels.\n\nArgs:\nurn: The URN of the monitoring info/metric.\nmetric: An int representing the value. The current time will be used for\nthe timestamp.\nptransform: The ptransform id used as a label.", "source": "github-repos"}
{"code": "def _call_concrete_function(function, inputs):\n    expected_structure = function.graph.structured_input_signature\n    flatten_inputs = nest.flatten_up_to(expected_structure, inputs, expand_composites=True)\n    flatten_expected = nest.flatten(expected_structure, expand_composites=True)\n    tensor_inputs = []\n    for arg, expected in zip(flatten_inputs, flatten_expected):\n        if isinstance(expected, tensor.TensorSpec):\n            tensor_inputs.append(ops.convert_to_tensor(arg, dtype_hint=expected.dtype))\n        elif isinstance(expected, resource_variable_ops.VariableSpec):\n            tensor_inputs.append(arg.handle)\n    result = function._call_flat(tensor_inputs, function.captured_inputs)\n    if isinstance(result, ops.Operation):\n        return None\n    return result", "docstring": "Calls a restored Function with structured inputs.\n\nThis differs from `function.__call__` in that inputs and outputs are\nstructured and that it casts inputs to tensors if needed.\n\nNote: this does not checks that non-tensor inputs match. That should be\ndone before via `_concrete_function_callable_with`.\n\nArgs:\nfunction: ConcreteFunction to call.\ninputs: Structured inputs compatible with\n`function.graph.structured_input_signature`.\n\nReturns:\nThe structured function output.", "source": "github-repos"}
{"code": "def method_not_allowed(cls, errors=None):\n        \n        if cls.expose_status:  \n            cls.response.content_type = 'application/json'\n            cls.response._status_line = '405 Method Not Allowed'\n\n        return cls(405, None, errors).to_json", "docstring": "Shortcut API for HTTP 405 `Method not allowed` response.\n\nArgs:\nerrors (list): Response key/value data.\n\nReturns:\nWSResponse Instance.", "source": "juraj-google-style"}
{"code": "def most_frequent(self, k):\n    \n    word_count = {w:self.word_count[w] for w in self.words[:k]}\n    return CountedVocabulary(word_count=word_count)", "docstring": "Returns a vocabulary with the most frequent `k` words.\n\nArgs:\nk (integer): specifies the top k most frequent words to be returned.", "source": "juraj-google-style"}
{"code": "def get_cached_filename(self, filename, extention, settings_list=None):\n        \n        cached_name = \"_\".join([filename, self.get_hash()])\n        return \".\".join([cached_name, extention])", "docstring": "Creates a filename with md5 cache string based on settings list\n\nArgs:\nfilename (str): the filename without extention\nextention (str): the file extention without dot. (i.e. 'pkl')\nsettings_list (dict|list): the settings list as list (optional)\nNB! The dictionaries have to be sorted or hash id will change\narbitrarely.", "source": "juraj-google-style"}
{"code": "def _log_band_gap_information(bs):\n    bg_data = bs.get_band_gap()\n    if (not bg_data['direct']):\n        logging.info('Indirect band gap: {:.3f} eV'.format(bg_data['energy']))\n    direct_data = bs.get_direct_band_gap_dict()\n    if bs.is_spin_polarized:\n        direct_bg = min((spin_data['value'] for spin_data in direct_data.values()))\n        logging.info('Direct band gap: {:.3f} eV'.format(direct_bg))\n        for (spin, spin_data) in direct_data.items():\n            direct_kindex = spin_data['kpoint_index']\n            direct_kpoint = bs.kpoints[direct_kindex].frac_coords\n            direct_kpoint = kpt_str.format(k=direct_kpoint)\n            eq_kpoints = bs.get_equivalent_kpoints(direct_kindex)\n            k_indices = ', '.join(map(str, eq_kpoints))\n            b_indices = ', '.join([str((i + 1)) for i in spin_data['band_indices']])\n            logging.info('  {}:'.format(spin.name.capitalize()))\n            logging.info('    k-point: {}'.format(direct_kpoint))\n            logging.info('    k-point indices: {}'.format(k_indices))\n            logging.info('    Band indices: {}'.format(b_indices))\n    else:\n        direct_bg = direct_data[Spin.up]['value']\n        logging.info('Direct band gap: {:.3f} eV'.format(direct_bg))\n        direct_kindex = direct_data[Spin.up]['kpoint_index']\n        direct_kpoint = kpt_str.format(k=bs.kpoints[direct_kindex].frac_coords)\n        k_indices = ', '.join(map(str, bs.get_equivalent_kpoints(direct_kindex)))\n        b_indices = ', '.join([str((i + 1)) for i in direct_data[Spin.up]['band_indices']])\n        logging.info('  k-point: {}'.format(direct_kpoint))\n        logging.info('  k-point indices: {}'.format(k_indices))\n        logging.info('  Band indices: {}'.format(b_indices))", "docstring": "Log data about the direct and indirect band gaps.\n\nArgs:\nbs (:obj:`~pymatgen.electronic_structure.bandstructure.BandStructureSymmLine`):", "source": "codesearchnet"}
{"code": "def install(self, connection, partition, table_name=None, columns=None, materialize=False, logger=None):\n    partition.localize()\n    self._add_partition(connection, partition)\n    fdw_table = partition.vid\n    view_table = '{}_v'.format(fdw_table)\n    if materialize:\n        with connection.cursor() as cursor:\n            view_exists = self._relation_exists(connection, view_table)\n            if view_exists:\n                logger.debug('Materialized view of the partition already exists.\\n    partition: {}, view: {}'.format(partition.name, view_table))\n            else:\n                query = 'CREATE MATERIALIZED VIEW {} AS SELECT * FROM {};'.format(view_table, fdw_table)\n                logger.debug('Creating new materialized view of the partition.\\n    partition: {}, view: {}, query: {}'.format(partition.name, view_table, query))\n                cursor.execute(query)\n                cursor.execute('COMMIT;')\n    final_table = (view_table if materialize else fdw_table)\n    with connection.cursor() as cursor:\n        view_q = 'CREATE VIEW IF NOT EXISTS {} AS SELECT * FROM {} '.format(partition.vid, final_table)\n        cursor.execute(view_q)\n        cursor.execute('COMMIT;')\n    return partition.vid", "docstring": "Creates FDW or materialize view for given partition.\n\nArgs:\nconnection: connection to postgresql\npartition (orm.Partition):\nmaterialize (boolean): if True, create read-only table. If False create virtual table.\n\nReturns:\nstr: name of the created table.", "source": "codesearchnet"}
{"code": "def linear_interpolate(tensor1, tensor2, coeffs):\n  \n  interp_tensors = []\n  for coeff in coeffs:\n    interp_tensor = tensor1 + coeff * (tensor2 - tensor1)\n    interp_tensors.append(interp_tensor)\n  return tf.concat(interp_tensors, axis=0)", "docstring": "Linearly interpolate between two tensors at coeff.\n\nArgs:\ntensor1: 4-D Tensor, shape=(NHWC)\ntensor2: 4-D Tensor, shape=(NHWC)\ncoeffs: list of floats.\nReturns:\ninterp_latents: 5-D Tensor, with interp_latents[i] representing\ninterpolations at coeffs[i].\nshape=(len(coeffs), NHWC)", "source": "juraj-google-style"}
{"code": "def CheckAltTokens(filename, clean_lines, linenum, error):\n    line = clean_lines.elided[linenum]\n    if Match('^\\\\s*\n        return\n    if ((line.find('') >= 0)):\n        return\n    for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):\n        error(filename, linenum, 'readability/alt_tokens', 2, ('Use operator %s instead of %s' % (_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))))", "docstring": "Check alternative keywords being used in boolean expressions.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def remove(path):\n    \n    if os.path.isdir(path):\n        return __rmtree(path)\n    else:\n        return __rmfile(path)", "docstring": "Delete a file or directory.\n\nArgs:\npath (str): Path to the file or directory that needs to be deleted.\n\nReturns:\nbool: True if the operation is successful, False otherwise.", "source": "juraj-google-style"}
{"code": "def parse(self, **global_args):\n    if (self.build_file not in ParseContext._parsed):\n        butcher_context = {}\n        for str_to_exec in self._strs_to_exec:\n            ast = compile(str_to_exec, '<string>', 'exec')\n            exec_function(ast, butcher_context)\n        with ParseContext.activate(self):\n            startdir = os.path.abspath(os.curdir)\n            try:\n                os.chdir(self.build_file.path_on_disk)\n                if (self.build_file not in ParseContext._parsed):\n                    ParseContext._parsed.add(self.build_file)\n                    eval_globals = copy.copy(butcher_context)\n                    eval_globals.update({'ROOT_DIR': self.build_file.path_on_disk, '__file__': 'bogus please fix this'})\n                    eval_globals.update(global_args)\n                    exec_function(self.build_file.code, eval_globals)\n            finally:\n                os.chdir(startdir)", "docstring": "Entry point to parsing a BUILD file.\n\nArgs:\n**global_args: Variables to include in the parsing environment.", "source": "codesearchnet"}
{"code": "def variable_type(self, variable):\n    var_type = 'String'\n    if (variable is not None):\n        variable = variable.strip()\n        if re.match(self._variable_match, variable):\n            var_type = re.search(self._variable_parse, variable).group(4)\n    return var_type", "docstring": "Get the Type from the variable string or default to String type.\n\nThe default type is \"String\" for those cases when the input variable is\ncontains not \"DB variable\" and is just a String.\n\n**Example Variable**::\n\n#App:1234:output!StringArray returns **StringArray**\n\n**Example String**::\n\n\"My Data\" returns **String**\n\nArgs:\nvariable (string): The variable to be parsed\n\nReturns:\n(string): The variable type.", "source": "codesearchnet"}
{"code": "def FillDepressions(dem, epsilon=False, in_place=False, topology='D8'):\n    if (type(dem) is not rdarray):\n        raise Exception('A richdem.rdarray or numpy.ndarray is required!')\n    if (topology not in ['D8', 'D4']):\n        raise Exception('Unknown topology!')\n    if (not in_place):\n        dem = dem.copy()\n    _AddAnalysis(dem, 'FillDepressions(dem, epsilon={0})'.format(epsilon))\n    demw = dem.wrap()\n    if epsilon:\n        if (topology == 'D8'):\n            _richdem.rdPFepsilonD8(demw)\n        elif (topology == 'D4'):\n            _richdem.rdPFepsilonD4(demw)\n    elif (topology == 'D8'):\n        _richdem.rdFillDepressionsD8(demw)\n    elif (topology == 'D4'):\n        _richdem.rdFillDepressionsD4(demw)\n    dem.copyFromWrapped(demw)\n    if (not in_place):\n        return dem", "docstring": "Fills all depressions in a DEM.\n\nArgs:\ndem     (rdarray): An elevation model\nepsilon (float):   If True, an epsilon gradient is imposed to all flat regions.\nThis ensures that there is always a local gradient.\nin_place (bool):   If True, the DEM is modified in place and there is\nno return; otherwise, a new, altered DEM is returned.\ntopology (string): A topology indicator\n\nReturns:\nDEM without depressions.", "source": "codesearchnet"}
{"code": "def getHeaders(self):\n    headers = self._impl.getHeaders()\n    return tuple((headers.getIndex(i) for i in range(self._impl.getNumCols())))", "docstring": "Get the headers of this DataFrame.\n\nReturns:\nThe headers of this DataFrame.", "source": "codesearchnet"}
{"code": "def PyParseRangeCheck(lower_bound, upper_bound):\n  \n  \n  def CheckRange(string, location, tokens):\n    \n    try:\n      check_number = tokens[0]\n    except IndexError:\n      check_number = -1\n\n    if check_number < lower_bound:\n      raise pyparsing.ParseException(\n          'Value: {0:d} precedes lower bound: {1:d}'.format(\n              check_number, lower_bound))\n\n    if check_number > upper_bound:\n      raise pyparsing.ParseException(\n          'Value: {0:d} exceeds upper bound: {1:d}'.format(\n              check_number, upper_bound))\n\n  \n  \n  \n  return CheckRange", "docstring": "Verify that a number is within a defined range.\n\nThis is a callback method for pyparsing setParseAction\nthat verifies that a read number is within a certain range.\n\nTo use this method it needs to be defined as a callback method\nin setParseAction with the upper and lower bound set as parameters.\n\nArgs:\nlower_bound (int): lower bound of the range.\nupper_bound (int): upper bound of the range.\n\nReturns:\nFunction: callback method that can be used by pyparsing setParseAction.", "source": "juraj-google-style"}
{"code": "def _rolling_window(self, window_length, func1d, step=1, return_rolled=False):\n        \n        \n        if window_length % 2 == 0:\n            window_length += 1\n\n        shape = self.shape[:-1] + (self.shape[-1], window_length)\n        strides = self.strides + (step*self.strides[-1],)\n        data = np.nan_to_num(self)\n        data = np.pad(data, int(step*window_length\n        rolled = np.lib.stride_tricks.as_strided(data,\n                                                 shape=shape,\n                                                 strides=strides)\n        result = np.apply_along_axis(func1d, -1, rolled)\n        result[np.isnan(self)] = np.nan\n\n        if return_rolled:\n            return result, rolled\n        else:\n            return result", "docstring": "Private function. Smoother for other smoothing/conditioning functions.\n\nArgs:\nwindow_length (int): the window length.\nfunc1d (function): a function that takes a 1D array and returns a\nscalar.\nstep (int): if you want to skip samples in the shifted versions.\nDon't use this for smoothing, you will get strange results.\n\nReturns:\nndarray: the resulting array.", "source": "juraj-google-style"}
{"code": "def project_group_token(self, group_tokens: tf.Tensor) -> tf.Tensor:\n    projected_group_tokens = self.mlp_inter(group_tokens)\n    projected_group_tokens = self.norm_post_tokens(projected_group_tokens)\n    return projected_group_tokens", "docstring": "Args:\ngroup_tokens (tf.Tensor): group tokens, [batch_size, num_group_tokens, channels]\n\nReturns:\nprojected_group_tokens (tf.Tensor): [batch_size, num_output_groups, channels]", "source": "github-repos"}
{"code": "def parseSemver(text):\n    txt = text.strip().lstrip('vV')\n    ret = {}\n    m = semver_re.match(txt)\n    if (not m):\n        return None\n    d = m.groupdict()\n    ret['major'] = int(d.get('maj'))\n    ret['minor'] = int(d.get('min'))\n    ret['patch'] = int(d.get('pat'))\n    pre = d.get('pre')\n    bld = d.get('bld')\n    if pre:\n        parts = pre.split('.')\n        for part in parts:\n            if (not part):\n                return None\n            try:\n                int(part)\n            except ValueError:\n                continue\n            else:\n                if ((part[0] == '0') and (len(part) > 1)):\n                    return None\n        ret['pre'] = pre\n    if bld:\n        parts = bld.split('.')\n        for part in parts:\n            if (not part):\n                return None\n        ret['build'] = bld\n    return ret", "docstring": "Parse a Semantic Version string into is component parts.\n\nArgs:\ntext (str): A text string to parse into semver components. This string has whitespace and leading 'v'\ncharacters stripped off of it.\n\nExamples:\nParse a string into it semvar parts::\n\nparts = parseSemver('v1.2.3')\n\nReturns:\ndict: The dictionary will contain the keys 'major', 'minor' and 'patch' pointing to integer values.\nThe dictionary may also contain keys for 'build' and 'pre' information if that data is parsed out\nof a semver string. None is returned if the string is not a valid Semver string.", "source": "codesearchnet"}
{"code": "def get_execution_role(sagemaker_session=None):\n    \n    if not sagemaker_session:\n        sagemaker_session = Session()\n    arn = sagemaker_session.get_caller_identity_arn()\n\n    if ':role/' in arn:\n        return arn\n    message = 'The current AWS identity is not a role: {}, therefore it cannot be used as a SageMaker execution role'\n    raise ValueError(message.format(arn))", "docstring": "Return the role ARN whose credentials are used to call the API.\nThrows an exception if\nArgs:\nsagemaker_session(Session): Current sagemaker session\nReturns:\n(str): The role ARN", "source": "juraj-google-style"}
{"code": "def createEditor(self, parent, option, index):\n        \n        editor = QtGui.QLineEdit(parent)\n        return editor", "docstring": "Returns the widget used to edit the item specified by index for editing. The parent widget and style option are used to control how the editor widget appears.\n\nArgs:\nparent (QWidget): parent widget.\noption (QStyleOptionViewItem): controls how editor widget appears.\nindex (QModelIndex): model data index.", "source": "juraj-google-style"}
{"code": "def validator(sample: rd.RepresentativeSample) -> rd.RepresentativeSample:\n    if not isinstance(sample, Mapping):\n        raise ValueError(f'Invalid representative sample type. Provide a mapping (usually a dict) of {{input_key: input_value}}. Got type: {type(sample)} instead.')\n    if set(sample.keys()) != expected_input_keys:\n        raise KeyError(f'Invalid input keys for representative sample. The function expects input keys of: {set(expected_input_keys)}. Got: {set(sample.keys())}. Please provide correct input keys for representative samples.')\n    return sample", "docstring": "Validates a single instance of representative sample.\n\nThis provides a simple check for `sample` that this is a mapping of\n{input_key: input_value}.\n\nArgs:\nsample: A `RepresentativeSample` to validate.\n\nReturns:\n`sample` iff it is valid.\n\nRaises:\nValueError: iff the sample isn't an instance of `Mapping`.\nKeyError: iff the sample does not have the set of input keys that match\nthe input keys of the function.", "source": "github-repos"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    if token_ids_1 is None:\n        return [1] + [0] * len(token_ids_0) + [1]\n    return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1]", "docstring": "Args:\nRetrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def threshold(x, threshold, default_value):\n    return ops.threshold(x, threshold, default_value)", "docstring": "Threshold activation function.\n\nIt is defined as:\n\n`threshold(x) = x` if `x > threshold`,\n`threshold(x) = default_value` otherwise.\n\nArgs:\nx: Input tensor.\nthreshold: The value that decides when to retain or replace x.\ndefault_value: Value to assign when `x <= threshold`.", "source": "github-repos"}
{"code": "def get_repo(task, source_env_prefix):\n    \n    repo = _extract_from_env_in_payload(task, source_env_prefix + '_HEAD_REPOSITORY')\n    if repo is not None:\n        repo = repo.rstrip('/')\n    return repo", "docstring": "Get the repo for a task.\n\nArgs:\ntask (ChainOfTrust or LinkOfTrust): the trust object to inspect\nsource_env_prefix (str): The environment variable prefix that is used\nto get repository information.\n\nReturns:\nstr: the source url.\nNone: if not defined for this task.", "source": "juraj-google-style"}
{"code": "def preprocessing_fn(inputs):\n    outputs = {}\n    for key in taxi.DENSE_FLOAT_FEATURE_KEYS:\n        outputs[taxi.transformed_name(key)] = transform.scale_to_z_score(_fill_in_missing(inputs[key]))\n    for key in taxi.VOCAB_FEATURE_KEYS:\n        outputs[taxi.transformed_name(key)] = transform.compute_and_apply_vocabulary(_fill_in_missing(inputs[key]), top_k=taxi.VOCAB_SIZE, num_oov_buckets=taxi.OOV_SIZE)\n    for key in taxi.BUCKET_FEATURE_KEYS:\n        outputs[taxi.transformed_name(key)] = transform.bucketize(_fill_in_missing(inputs[key]), taxi.FEATURE_BUCKET_COUNT)\n    for key in taxi.CATEGORICAL_FEATURE_KEYS:\n        outputs[taxi.transformed_name(key)] = _fill_in_missing(inputs[key])\n    taxi_fare = _fill_in_missing(inputs[taxi.FARE_KEY])\n    tips = _fill_in_missing(inputs[taxi.LABEL_KEY])\n    outputs[taxi.transformed_name(taxi.LABEL_KEY)] = tf.where(tf.is_nan(taxi_fare), tf.cast(tf.zeros_like(taxi_fare), tf.int64), tf.cast(tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))\n    return outputs", "docstring": "tf.transform's callback function for preprocessing inputs.\n\nArgs:\ninputs: map from feature keys to raw not-yet-transformed features.\n\nReturns:\nMap from string feature key to transformed feature operations.", "source": "github-repos"}
{"code": "def __init__(self, resolver_context, file_object=None):\n    \n    super(DataRange, self).__init__(resolver_context)\n    self._current_offset = 0\n    self._file_object = file_object\n\n    if file_object:\n      self._file_object_set_in_init = True\n      self._range_offset = 0\n      self._range_size = file_object.get_size()\n    else:\n      self._file_object_set_in_init = False\n      self._range_offset = -1\n      self._range_size = -1", "docstring": "Initializes a file-like object.\n\nIf the file-like object is chained do not separately use the parent\nfile-like object.\n\nArgs:\nresolver_context (Context): resolver context.\nfile_object (Optional[file]): parent file-like object.", "source": "juraj-google-style"}
{"code": "def fetch(self, addon_id, data={}, **kwargs):\n        \n        return super(Addon, self).fetch(addon_id, data, **kwargs)", "docstring": "Fetch addon for given Id\n\nArgs:\naddon_id : Id for which addon object has to be retrieved\n\nReturns:\naddon dict for given subscription Id", "source": "juraj-google-style"}
{"code": "def get_context(self, max_frames=None, missing_entities=[]):\n    if ((not max_frames) or (max_frames > len(self.frame_stack))):\n        max_frames = len(self.frame_stack)\n    missing_entities = list(missing_entities)\n    context = []\n    for i in xrange(max_frames):\n        frame_entities = [entity.copy() for entity in self.frame_stack[i].entities]\n        for entity in frame_entities:\n            entity['confidence'] = (entity.get('confidence', 1.0) / (2.0 + i))\n        context += frame_entities\n    result = []\n    if (len(missing_entities) > 0):\n        for entity in context:\n            if (entity.get('data') in missing_entities):\n                result.append(entity)\n                missing_entities.remove(entity.get('data'))\n    else:\n        result = context\n    return result", "docstring": "Constructs a list of entities from the context.\n\nArgs:\nmax_frames(int): maximum number of frames to look back\nmissing_entities(list of str): a list or set of tag names, as strings\n\nReturns:\nlist: a list of entities", "source": "codesearchnet"}
{"code": "def mean_pooling(self, model_output, attention_mask):\n    token_embeddings = model_output[0]\n    input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n    return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-09)", "docstring": "The function calculates the mean of token embeddings\n\nArgs:\nmodel_output: The output of the model.\nattention_mask: This is a tensor that contains 1s for all input tokens and\n0s for all padding tokens.\n\nReturns:\nThe mean of the token embeddings.", "source": "github-repos"}
{"code": "def by_location(self, location, cc=None):\n        \n\n        header, content = self._http_request(self.BASE_URL, location=location, cc=cc)\t\t\n        return json.loads(content)", "docstring": "Perform a Yelp Neighborhood API Search based on a location specifier.\n\nArgs:\nlocation - textual location specifier of form: \"address, city, state or zip, optional country\"\ncc       - ISO 3166-1 alpha-2 country code. (Optional)", "source": "juraj-google-style"}
{"code": "def show_backref(target, max_depth=3):\n    if objgraph is None:\n        raise NotImplementedError('objgraph is not installed.')\n    string_io = io.StringIO()\n    objgraph.show_backrefs(target, max_depth=max_depth, output=string_io)\n    graph = string_io.getvalue()\n    string_io.close()\n    return graph", "docstring": "Returns a dot graph of all the objects that are referencing the target.\n\nA object referencing graph is useful to debug memory leak like circular\nreference. objgraph provides a good visualization of the memory graph than\nmost python built-in utilities like gc.get_referrers(), which are not\nhuman-readable sometimes.\n\nThe dot graph will be written to a string IO object, and can be rendered with\ngraphviz in operating system.\nE.g. dot -Tpng {$dot_graph} -o output.png\nArgs:\ntarget: The target object for the memory graph.\nmax_depth: The maximum depth of the graph. By default 3 layers of references\nare used. Increases this a lot may result in the graph growing too big.\n\nReturns:\nA string that contains the object reference graph.\nRaises:\nNotImplementedError: if objgraph is not installed.", "source": "github-repos"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    mru_values_dict = {}\n    for subkey in registry_key.GetSubkeys():\n      username_value = subkey.GetValueByName('UsernameHint')\n\n      if (username_value and username_value.data and\n          username_value.DataIsString()):\n        username = username_value.GetDataAsObject()\n      else:\n        username = 'N/A'\n\n      mru_values_dict[subkey.name] = username\n\n      event_data = windows_events.WindowsRegistryEventData()\n      event_data.key_path = subkey.path\n      event_data.offset = subkey.offset\n      event_data.regvalue = {'Username hint': username}\n      event_data.source_append = self._SOURCE_APPEND\n\n      event = time_events.DateTimeValuesEvent(\n          subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    event_data = windows_events.WindowsRegistryEventData()\n    event_data.key_path = registry_key.path\n    event_data.offset = registry_key.offset\n    event_data.regvalue = mru_values_dict\n    event_data.source_append = self._SOURCE_APPEND\n\n    event = time_events.DateTimeValuesEvent(\n        registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a Terminal Server Client Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def get_victim_web_asset(self, main_type, sub_type, unique_id, asset_id, params=None):\n        \n        params = params or {}\n\n        return self.victim_web_asset(main_type, sub_type, unique_id, asset_id, params=params)", "docstring": "Args:\nmain_type:\nsub_type:\nunique_id:\nasset_id:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def forward(self, g_values: torch.Tensor) -> torch.Tensor:\n    p_one_unique_token, p_two_unique_tokens = self._compute_latents(g_values)\n    return 0.5 * ((g_values + 0.5) * p_two_unique_tokens + p_one_unique_token)", "docstring": "Computes the likelihoods P(g_values|watermarked).\n\nArgs:\ng_values (`torch.Tensor` of shape `(batch_size, seq_len, watermarking_depth)`):\ng-values (values 0 or 1)\n\nReturns:\np(g_values|watermarked) of shape [batch_size, seq_len, watermarking_depth].", "source": "github-repos"}
{"code": "def _bitResponseToValue(bytestring):\n    _checkString(bytestring, description='bytestring', minlength=1, maxlength=1)\n    RESPONSE_ON = '\\x01'\n    RESPONSE_OFF = '\\x00'\n    if (bytestring == RESPONSE_ON):\n        return 1\n    elif (bytestring == RESPONSE_OFF):\n        return 0\n    else:\n        raise ValueError('Could not convert bit response to a value. Input: {0!r}'.format(bytestring))", "docstring": "Convert a response string to a numerical value.\n\nArgs:\nbytestring (str): A string of length 1. Can be for example ``\\\\x01``.\n\nReturns:\nThe converted value (int).\n\nRaises:\nTypeError, ValueError", "source": "codesearchnet"}
{"code": "def tseries_between(self, tstart=None, tend=None):\n        \n        if self.tseries is None:\n            return None\n\n        ndat = self.tseries.shape[0]\n\n        if tstart is None:\n            istart = 0\n        else:\n            igm = 0\n            igp = ndat - 1\n            while igp - igm > 1:\n                istart = igm + (igp - igm) \n                if self.tseries.iloc[istart]['t'] >= tstart:\n                    igp = istart\n                else:\n                    igm = istart\n            istart = igp\n\n        if tend is None:\n            iend = None\n        else:\n            igm = 0\n            igp = ndat - 1\n            while igp - igm > 1:\n                iend = igm + (igp - igm) \n                if self.tseries.iloc[iend]['t'] > tend:\n                    igp = iend\n                else:\n                    igm = iend\n            iend = igm + 1\n\n        return self.tseries.iloc[istart:iend]", "docstring": "Return time series data between requested times.\n\nArgs:\ntstart (float): starting time. Set to None to start at the\nbeginning of available data.\ntend (float): ending time. Set to None to stop at the end of\navailable data.\nReturns:\n:class:`pandas.DataFrame`: slice of :attr:`tseries`.", "source": "juraj-google-style"}
{"code": "def count(cls, cur, table:str, where_keys: list=None):\n        \n\n        if where_keys:\n            where_clause, values = cls._get_where_clause_with_values(where_keys)\n            query = cls._count_query_where.format(table, where_clause)\n            q, t = query, values\n        else:\n            query = cls._count_query.format(table)\n            q, t = query, ()\n        yield from cur.execute(q, t)\n        result = yield from cur.fetchone()\n        return int(result[0])", "docstring": "gives the number of records in the table\n\nArgs:\ntable: a string indicating the name of the table\n\nReturns:\nan integer indicating the number of records in the table", "source": "juraj-google-style"}
{"code": "def _postprocess_flat_outputs(outputs: Any, need_spmd_partitioning: bool) -> Tuple[List[Optional[core_types.Tensor]], List[ops.Operation], List[Any]]:\n    if outputs is None:\n        outputs = tuple()\n    pack_template = nest.flatten(outputs, expand_composites=False)\n    outputs = nest.flatten(outputs, expand_composites=True)\n    outputs += (control_flow_ops.no_op(),)\n    maybe_convert = lambda x: None if x is None else ops.convert_to_tensor(x)\n    try:\n        if need_spmd_partitioning:\n            outputs = [o if isinstance(o, ops.Operation) else maybe_convert(o) for o in outputs]\n        else:\n            with ops.device(core(0)):\n                outputs = [o if isinstance(o, ops.Operation) else maybe_convert(o) for o in outputs]\n    except Exception as e:\n        raise ValueError(f'TPU function return values must all either be Operations or convertible to Tensors. Got error: {e}')\n    output_operations = [o for o in outputs if isinstance(o, ops.Operation)]\n    output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)]\n    if outputs != output_tensors + output_operations:\n        raise ValueError('TPU functions must return zero-or more Tensor values followed by zero or more Operations.')\n    if len(output_operations) > 1:\n        pack_template = pack_template[:1 - len(output_operations)]\n    new_output_tensors = []\n    for t in output_tensors:\n        if t is None:\n            new_output_tensors.append(None)\n        elif need_spmd_partitioning:\n            o = array_ops.identity(t)\n            o.op._set_attr('_tpu_output_identity', attr_value_pb2.AttrValue(b=True))\n            new_output_tensors.append(o)\n        else:\n            with ops.device(t.device if t.device else core(0)):\n                o = array_ops.identity(t)\n                o.op._set_attr('_tpu_output_identity', attr_value_pb2.AttrValue(b=True))\n                new_output_tensors.append(o)\n    return (new_output_tensors, output_operations, pack_template)", "docstring": "Validates non-flat outputs, add backs device assignments and other attrs.\n\nArgs:\noutputs: Output from `computation` inside `tpu.rewrite`.\nneed_spmd_partitioning: Whether XLA SPMD partitioning is needed.\n\nReturns:\n- Tensors extracted from outputs.\n- Operations extracted from outputs.\n- A pack template for use with nest.pack_sequence_as to pack the tensors.", "source": "github-repos"}
{"code": "def HandleMessageBundles(self, request_comms, response_comms):\n    (messages, source, timestamp) = self._communicator.DecodeMessages(request_comms)\n    now = time.time()\n    if messages:\n        self.ReceiveMessages(source, messages)\n    required_count = max(0, (self.max_queue_size - request_comms.queue_size))\n    tasks = []\n    message_list = rdf_flows.MessageList()\n    if ((time.time() - now) < 10):\n        tasks = self.DrainTaskSchedulerQueueForClient(source, required_count)\n        message_list.job = tasks\n    self._communicator.EncodeMessages(message_list, response_comms, destination=source, timestamp=timestamp, api_version=request_comms.api_version)\n    return (source, len(messages))", "docstring": "Processes a queue of messages as passed from the client.\n\nWe basically dispatch all the GrrMessages in the queue to the task scheduler\nfor backend processing. We then retrieve from the TS the messages destined\nfor this client.\n\nArgs:\nrequest_comms: A ClientCommunication rdfvalue with messages sent by the\nclient. source should be set to the client CN.\nresponse_comms: A ClientCommunication rdfvalue of jobs destined to this\nclient.\n\nReturns:\ntuple of (source, message_count) where message_count is the number of\nmessages received from the client with common name source.", "source": "codesearchnet"}
{"code": "def scatterplot_matrix(df, features, downsample_frac=None, figsize=(15, 15)):\n    \n\n    if downsample_frac:\n        df = df.sample(frac=downsample_frac)\n\n    plt.figure(figsize=figsize)\n    sns.pairplot(df[features], hue='target')\n    plt.show()", "docstring": "Plot a scatterplot matrix for a list of features, colored by target value.\n\nExample: `scatterplot_matrix(X, X.columns.tolist(), downsample_frac=0.01)`\n\nArgs:\ndf: Pandas dataframe containing the target column (named 'target').\nfeatures: The list of features to include in the correlation plot.\ndownsample_frac: Dataframe downsampling rate (0.1 to include 10% of the dataset).\nfigsize: The size of the plot.", "source": "juraj-google-style"}
{"code": "def get_module(module_abs_import):\n    \n    logger.debug(\"starting\")\n    logger.debug(f\"loading module {module_abs_import}\")\n    try:\n        imported_module = importlib.import_module(module_abs_import)\n        logger.debug(\"done\")\n        return imported_module\n    except ModuleNotFoundError as err:\n        msg = (\"The module doesn't exist. Looking for a file like this: \"\n               f\"{module_abs_import}\")\n\n        extended_msg = (f\"{module_abs_import}.py should be in your working \"\n                        \"dir or it should be installed to the python path.\"\n                        \"\\nIf you have 'package.sub.mod' your current working \"\n                        \"dir should contain ./package/sub/mod.py\\n\"\n                        \"If you specified 'mymodulename', your current \"\n                        \"working dir should contain ./mymodulename.py\\n\"\n                        \"If the module is not in your current working dir, it \"\n                        \"must exist in your current python path - so you \"\n                        \"should have run pip install or setup.py\")\n        logger.error(msg)\n        raise PyModuleNotFoundError(extended_msg) from err", "docstring": "Use importlib to get the module dynamically.\n\nGet instance of the module specified by the module_abs_import.\nThis means that module_abs_import must be resolvable from this package.\n\nArgs:\nmodule_abs_import: string. Absolute name of module to import.\n\nRaises:\nPyModuleNotFoundError: if module not found.", "source": "juraj-google-style"}
{"code": "def get_qos_aggregated_configuration(self):\n    uri = '{}{}'.format(self.data['uri'], self.QOS_AGGREGATED_CONFIGURATION)\n    return self._helper.do_get(uri)", "docstring": "Gets the QoS aggregated configuration for the logical interconnect.\n\nReturns:\ndict: QoS Configuration.", "source": "codesearchnet"}
{"code": "def _length(self):\n    self._build_chunk_headers()\n    length = 0\n    if self._data:\n        for field in self._data:\n            length += len(self._chunk_headers[field])\n            length += len(self._data[field])\n            length += 2\n    if self._files:\n        for field in self._files:\n            length += len(self._chunk_headers[field])\n            length += self._file_size(field)\n            length += 2\n    length += len(self.boundary)\n    length += 6\n    return length", "docstring": "Returns total length for this request.\n\nReturns:\nint. Length", "source": "codesearchnet"}
{"code": "class _TrainingTarget(object):\n\n    def __init__(self, target, feedable=False, skip_target_weights=True):\n        self._target = target\n        self._feedable = feedable\n        self._skip_target_weights = skip_target_weights\n\n    @property\n    def target(self):\n        return self._target\n\n    @property\n    def feedable(self):\n        return self._feedable\n\n    @property\n    def skip_target_weights(self):\n        return self._skip_target_weights", "docstring": "Container for a target tensor (y_true) and its metadata (shape, loss...).\n\nArgs:\ntarget: A target tensor for the model. It may be `None` if the\noutput is excluded from loss computation. It is still kept as None\nsince each output of the model should have a corresponding target. If\nthe target is None, the rest of the attributes will be None as well.\nfeedable: Boolean, whether the target is feedable (requires data to be\npassed in `fit` or `train_on_batch`), or not (model compiled with\n`target_tensors` argument).\nskip_target_weights: Boolean, whether the target should be skipped during\nweights calculation.", "source": "github-repos"}
{"code": "def energy(self, sample_like, dtype=np.float):\n    (energy,) = self.energies(sample_like, dtype=dtype)\n    return energy", "docstring": "The energy of the given sample.\n\nArgs:\nsample_like (samples_like):\nA raw sample. `sample_like` is an extension of\nNumPy's array_like structure. See :func:`.as_samples`.\n\ndtype (:class:`numpy.dtype`, optional):\nThe data type of the returned energies. Defaults to float.\n\nReturns:\nThe energy.", "source": "codesearchnet"}
{"code": "def Unlock(fd, path):\n  \n  try:\n    fcntl.flock(fd, fcntl.LOCK_UN | fcntl.LOCK_NB)\n  except IOError as e:\n    if e.errno == errno.EWOULDBLOCK:\n      raise IOError('Exception unlocking %s. Locked by another process.' % path)\n    else:\n      raise IOError('Exception unlocking %s. %s.' % (path, str(e)))", "docstring": "Release the lock on the file.\n\nArgs:\nfd: int, the file descriptor of the file to unlock.\npath: string, the name of the file to lock.\n\nRaises:\nIOError, raised from flock while attempting to release a file lock.", "source": "juraj-google-style"}
{"code": "def ParseOptions(self, options):\n    \n    \n    helpers_manager.ArgumentHelperManager.ParseOptions(\n        options, self, names=['data_location'])\n\n    \n    signature_identifiers = self.ParseStringOption(\n        options, 'signature_identifiers')\n    if signature_identifiers == 'list':\n      self.list_signature_identifiers = True\n\n    if self.list_signature_identifiers:\n      return\n\n    self._ParseInformationalOptions(options)\n    self._ParseLogFileOptions(options)\n\n    self._ParseStorageMediaOptions(options)\n\n    self._destination_path = self.ParseStringOption(\n        options, 'path', default_value='export')\n\n    if not self._data_location:\n      logger.warning('Unable to automatically determine data location.')\n\n    argument_helper_names = ['artifact_definitions', 'process_resources']\n    helpers_manager.ArgumentHelperManager.ParseOptions(\n        options, self, names=argument_helper_names)\n\n    self._ParseFilterOptions(options)\n\n    if (getattr(options, 'no_vss', False) or\n        getattr(options, 'include_duplicates', False)):\n      self._skip_duplicates = False\n\n    self._EnforceProcessMemoryLimit(self._process_memory_limit)", "docstring": "Parses the options and initializes the front-end.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "juraj-google-style"}
{"code": "def convert_and_export_with_cache(model: PreTrainedModel, example_input_ids: Optional[torch.Tensor]=None, example_cache_position: Optional[torch.Tensor]=None, dynamic_shapes: Optional[dict]=None, strict: Optional[bool]=None):\n    if not is_torch_greater_or_equal_than_2_3:\n        raise ImportError('torch >= 2.3 is required.')\n    import torch.export._trace\n    ALL_MASK_ATTENTION_FUNCTIONS.register('sdpa_without_vmap', sdpa_mask_without_vmap)\n    ALL_ATTENTION_FUNCTIONS.register('sdpa_without_vmap', ALL_ATTENTION_FUNCTIONS['sdpa'])\n    model.config._attn_implementation = 'sdpa_without_vmap'\n    with torch.no_grad():\n        example_input_ids = example_input_ids if example_input_ids is not None else torch.tensor([[1]], dtype=torch.long)\n        example_cache_position = example_cache_position if example_cache_position is not None else torch.tensor([0], dtype=torch.long)\n        if is_torch_greater_or_equal('2.6.0'):\n            exported_program = torch.export.export(TorchExportableModuleWithStaticCache(model), args=(example_input_ids, example_cache_position), kwargs={}, dynamic_shapes=dynamic_shapes, strict=strict if strict is not None else True)\n        else:\n            if dynamic_shapes is not None:\n                logging.warning('Dynamic shapes spec will be ignored by convert_and_export_with_cache for torch < 2.6.0.')\n            if strict is not None:\n                logging.warning('The strict flag will be ignored by convert_and_export_with_cache for torch < 2.6.0.')\n            exported_program = torch.export._trace._export(TorchExportableModuleWithStaticCache(model), args=(example_input_ids,), kwargs={'cache_position': example_cache_position}, pre_dispatch=False, strict=True)\n        return exported_program", "docstring": "Convert a `PreTrainedModel` into an exportable module and export it using `torch.export`,\nensuring the exported model is compatible with `ExecuTorch`.\n\nArgs:\nmodel (`PreTrainedModel`): The pretrained model to be exported.\nexample_input_ids (`Optional[torch.Tensor]`): Example input token id used by `torch.export`.\nexample_cache_position (`Optional[torch.Tensor]`): Example current cache position used by `torch.export`.\ndynamic_shapes(`Optional[dict]`): Dynamic shapes used by `torch.export`.\nstrict(`Optional[bool]`): Flag to instruct `torch.export` to use `torchdynamo`.\n\nReturns:\nExported program (`torch.export.ExportedProgram`): The exported program generated via `torch.export`.", "source": "github-repos"}
{"code": "def write(self, brightness):\n    if (not isinstance(brightness, (bool, int))):\n        raise TypeError('Invalid brightness type, should be bool or int.')\n    if isinstance(brightness, bool):\n        brightness = (self._max_brightness if brightness else 0)\n    elif (not (0 <= brightness <= self._max_brightness)):\n        raise ValueError(('Invalid brightness value, should be between 0 and %d.' % self._max_brightness))\n    try:\n        os.write(self._fd, (b'%d\\n' % brightness))\n    except OSError as e:\n        raise LEDError(e.errno, ('Writing LED brightness: ' + e.strerror))\n    try:\n        os.lseek(self._fd, 0, os.SEEK_SET)\n    except OSError as e:\n        raise LEDError(e.errno, ('Rewinding LED brightness: ' + e.strerror))", "docstring": "Set the brightness of the LED to `brightness`.\n\n`brightness` can be a boolean for on/off, or integer value for a\nspecific brightness.\n\nArgs:\nbrightness (bool, int): Brightness value to set.\n\nRaises:\nLEDError: if an I/O or OS error occurs.\nTypeError: if `brightness` type is not bool or int.", "source": "codesearchnet"}
{"code": "def InitFromNotification(self, notification, is_pending=False):\n    \n    self.timestamp = notification.timestamp\n    self.message = notification.message\n    self.subject = str(notification.subject)\n    self.is_pending = is_pending\n\n    reference_type_enum = ApiNotificationReference.Type\n\n    \n    \n    \n    \n    \n    legacy_type = None\n    if \":\" in notification.type:\n      legacy_type, new_type = notification.type.split(\":\", 2)\n      self.notification_type = new_type\n    else:\n      legacy_type = notification.type\n\n    \n    \n    \n    components = self._GetUrnComponents(notification)\n    if legacy_type == \"Discovery\":\n      self.reference.type = reference_type_enum.CLIENT\n      self.reference.client = ApiNotificationClientReference(\n          client_id=components[0])\n    elif legacy_type == \"ViewObject\":\n      if len(components) >= 2 and components[0] == \"hunts\":\n        self.reference.type = reference_type_enum.HUNT\n        self.reference.hunt.hunt_id = components[1]\n      elif len(components) >= 2 and components[0] == \"cron\":\n        self.reference.type = reference_type_enum.CRON\n        self.reference.cron.cron_job_id = components[1]\n      elif len(components) >= 3 and components[1] == \"flows\":\n        self.reference.type = reference_type_enum.FLOW\n        self.reference.flow.flow_id = components[2]\n        self.reference.flow.client_id = components[0]\n      elif len(components) == 1 and rdf_client.ClientURN.Validate(\n          components[0]):\n        self.reference.type = reference_type_enum.CLIENT\n        self.reference.client.client_id = components[0]\n      else:\n        if notification.subject:\n          path = notification.subject.Path()\n          for prefix in itervalues(rdf_paths.PathSpec.AFF4_PREFIXES):\n            part = \"/%s%s\" % (components[0], prefix)\n            if path.startswith(part):\n              self.reference.type = reference_type_enum.VFS\n              self.reference.vfs.client_id = components[0]\n              self.reference.vfs.vfs_path = (prefix +\n                                             path[len(part):]).lstrip(\"/\")\n              break\n\n        if self.reference.type != reference_type_enum.VFS:\n          self.reference.type = reference_type_enum.UNKNOWN\n          self.reference.unknown.subject_urn = notification.subject\n\n    elif legacy_type == \"FlowStatus\":\n      if not components or not rdf_client.ClientURN.Validate(components[0]):\n        self.reference.type = reference_type_enum.UNKNOWN\n        self.reference.unknown.subject_urn = notification.subject\n      else:\n        self.reference.type = reference_type_enum.FLOW\n        self.reference.flow.flow_id = notification.source.Basename()\n        self.reference.flow.client_id = components[0]\n\n    \n    \n    elif legacy_type == \"GrantAccess\":\n      if rdf_client.ClientURN.Validate(components[1]):\n        self.reference.type = reference_type_enum.CLIENT_APPROVAL\n        self.reference.client_approval.client_id = components[1]\n        self.reference.client_approval.approval_id = components[-1]\n        self.reference.client_approval.username = components[-2]\n      elif components[1] == \"hunts\":\n        self.reference.type = reference_type_enum.HUNT_APPROVAL\n        self.reference.hunt_approval.hunt_id = components[2]\n        self.reference.hunt_approval.approval_id = components[-1]\n        self.reference.hunt_approval.username = components[-2]\n      elif components[1] == \"cron\":\n        self.reference.type = reference_type_enum.CRON_JOB_APPROVAL\n        self.reference.cron_job_approval.cron_job_id = components[2]\n        self.reference.cron_job_approval.approval_id = components[-1]\n        self.reference.cron_job_approval.username = components[-2]\n\n    else:\n      self.reference.type = reference_type_enum.UNKNOWN\n      self.reference.unknown.subject_urn = notification.subject\n      self.reference.unknown.source_urn = notification.source\n\n    return self", "docstring": "Initializes this object from an existing notification.\n\nArgs:\nnotification: A rdfvalues.flows.Notification object.\nis_pending: Indicates whether the user has already seen this notification\nor not.\n\nReturns:\nThe current instance.", "source": "juraj-google-style"}
{"code": "def napalm_configure(task: Task, dry_run: Optional[bool]=None, filename: Optional[str]=None, configuration: Optional[str]=None, replace: bool=False) -> Result:\n    device = task.host.get_connection('napalm', task.nornir.config)\n    if replace:\n        device.load_replace_candidate(filename=filename, config=configuration)\n    else:\n        device.load_merge_candidate(filename=filename, config=configuration)\n    diff = device.compare_config()\n    dry_run = task.is_dry_run(dry_run)\n    if ((not dry_run) and diff):\n        device.commit_config()\n    else:\n        device.discard_config()\n    return Result(host=task.host, diff=diff, changed=(len(diff) > 0))", "docstring": "Loads configuration into a network devices using napalm\n\nArguments:\ndry_run: Whether to apply changes or not\nfilename: filename containing the configuration to load into the device\nconfiguration: configuration to load into the device\nreplace: whether to replace or merge the configuration\n\nReturns:\nResult object with the following attributes set:\n* changed (``bool``): whether the task is changing the system or not\n* diff (``string``): change in the system", "source": "codesearchnet"}
{"code": "def get_linux_config(browser: str) -> dict:\n    \n    \n    if browser.lower() == 'chrome':\n        cookie_file = '~/.config/google-chrome/Default/Cookies'\n    elif browser.lower() == \"chromium\":\n        cookie_file = '~/.config/chromium/Default/Cookies'\n    else:\n        raise ValueError(\"Browser must be either Chrome or Chromium.\")\n\n    \n    config = {\n        'my_pass': 'peanuts',\n        'iterations': 1,\n        'cookie_file': cookie_file,\n    }\n\n    \n    \n    try:\n        import gi\n        gi.require_version('Secret', '1')\n        from gi.repository import Secret\n    except ImportError:\n        pass\n    else:\n        flags = Secret.ServiceFlags.LOAD_COLLECTIONS\n        service = Secret.Service.get_sync(flags)\n\n        gnome_keyring = service.get_collections()\n        unlocked_keyrings = service.unlock_sync(gnome_keyring).unlocked\n\n        keyring_name = \"{} Safe Storage\".format(browser.capitalize())\n\n        for unlocked_keyring in unlocked_keyrings:\n            for item in unlocked_keyring.get_items():\n                if item.get_label() == keyring_name:\n                    item.load_secret_sync()\n                    config['my_pass'] = item.get_secret().get_text()\n                    break\n            else:\n                \n                continue\n\n            \n            break\n\n    return config", "docstring": "Get the settings for Chrome/Chromium cookies on Linux.\n\nArgs:\nbrowser: Either \"Chrome\" or \"Chromium\"\nReturns:\nConfig dictionary for Chrome/Chromium cookie decryption", "source": "juraj-google-style"}
{"code": "def record(self, flat_outputs, inference_args, input_tangents):\n    backward_function, to_record = self._backward(flat_outputs)\n    record.record_operation(self._inference_function.cached_definition.signature.name, to_record, inference_args + input_tangents, backward_function)", "docstring": "Record the function call operation.\n\n_DelayedRewriteGradientFunctions supports only first-order backprop tape\ngradients (and then only when graph building). It does not work with\nhigher-order tape gradients or forward autodiff, but does work with\nhigher-order symbolic gradients (tf.gradients).\n\nArgs:\nflat_outputs: The result of running `forward`.\ninference_args: A flat list of Tensors with inference inputs to the\noperation.\ninput_tangents: A flat list of Tensors with input tangents consumed by the\noperation.", "source": "github-repos"}
{"code": "def _MultipleModulesFoundError(path, candidates):\n    assert (len(candidates) > 1)\n    params = ([path] + _StripCommonPathPrefix(candidates[:2]))\n    if (len(candidates) == 2):\n        fmt = ERROR_LOCATION_MULTIPLE_MODULES_3\n    else:\n        fmt = ERROR_LOCATION_MULTIPLE_MODULES_4\n        params.append(str((len(candidates) - 2)))\n    return (fmt, params)", "docstring": "Generates an error message to be used when multiple matches are found.\n\nArgs:\npath: The breakpoint location path that the user provided.\ncandidates: List of paths that match the user provided path. Must\ncontain at least 2 entries (throws AssertionError otherwise).\n\nReturns:\nA (format, parameters) tuple that should be used in the description\nfield of the breakpoint error status.", "source": "codesearchnet"}
{"code": "def isexe(*components):\n    \n    _path = path(*components)\n    return isfile(_path) and os.access(_path, os.X_OK)", "docstring": "Return whether a path is an executable file.\n\nArguments:\n\npath (str): Path of the file to check.\n\nExamples:\n\n>>> fs.isexe(\"/bin/ls\")\nTrue\n\n>>> fs.isexe(\"/home\")\nFalse\n\n>>> fs.isexe(\"/not/a/real/path\")\nFalse\n\nReturns:\n\nbool: True if file is executable, else false.", "source": "juraj-google-style"}
{"code": "def _find_dtype(value, preferred):\n    result = _find_dtype_helper(value, preferred)\n    if result == dtypes.int64 or result == dtypes.int32 or result is None:\n        return result\n    raise ValueError('Illegal dtype: ' + str(result))", "docstring": "Returns the preferred dtype of value or preferred if preferred != None.\n\nThis is used as an operator to pass over multiple objects in decreasing order\nof priority until there is a preferred dtype for one. For example, if you were\nadding three tensor-ish things (some tensors, some lists), and needed a\npreferred dtype, you could use this as:\n\ndef adding(a, b, c, dtype = None):\ndtype = _find_dtype(a, dtype)\ndtype = _find_dtype(b, dtype)\ndtype = _find_dtype(c, dtype)\nif dtype is None:\ndtype = tf.float32\n...Code continues here...\n\nArgs:\nvalue: a list, value, RowPartition, or tensor.\npreferred: a given dtype. If not None, this will be returned.\n\nReturns:\nan optional dtype.", "source": "github-repos"}
{"code": "def incoming_edges(self, node):\n        \n        edges = self.edges()\n        in_edges = []\n        for out_node, in_node in edges:\n            if node is in_node:\n                in_edges.append((out_node, in_node))\n        return tuple(in_edges)", "docstring": "Returns a ``tuple`` of incoming edges for a **node object**.\n\nArguments:\n\n- node(``object``) **node object** present in the graph to be queried\nfor incoming edges.", "source": "juraj-google-style"}
{"code": "def get_lock_request(name, version, patch_lock, weak=True):\n    ch = ('~' if weak else '')\n    if (patch_lock == PatchLock.lock):\n        s = ('%s%s==%s' % (ch, name, str(version)))\n        return PackageRequest(s)\n    elif ((patch_lock == PatchLock.no_lock) or (not version)):\n        return None\n    version_ = version.trim(patch_lock.rank)\n    s = ('%s%s-%s' % (ch, name, str(version_)))\n    return PackageRequest(s)", "docstring": "Given a package and patch lock, return the equivalent request.\n\nFor example, for object 'foo-1.2.1' and lock type 'lock_3', the equivalent\nrequest is '~foo-1.2'. This restricts updates to foo to patch-or-lower\nversion changes only.\n\nFor objects not versioned down to a given lock level, the closest possible\nlock is applied. So 'lock_3' applied to 'foo-1' would give '~foo-1'.\n\nArgs:\nname (str): Package name.\nversion (Version): Package version.\npatch_lock (PatchLock): Lock type to apply.\n\nReturns:\n`PackageRequest` object, or None if there is no equivalent request.", "source": "codesearchnet"}
{"code": "def _tpu_service(self):\n    if self._service:\n        return self._service\n    if not _GOOGLE_API_CLIENT_INSTALLED:\n        raise RuntimeError('Missing runtime dependency on the Google API client. Run `pip install cloud-tpu-client` to fix.')\n    credentials = self._credentials\n    if credentials is None or credentials == 'default':\n        credentials = client.GoogleCredentials.get_application_default()\n    if self._discovery_url:\n        return discovery.build('tpu', 'v1', credentials=credentials, discoveryServiceUrl=self._discovery_url, cache_discovery=False)\n    else:\n        return discovery.build('tpu', 'v1', credentials=credentials, cache_discovery=False)", "docstring": "Creates a new Cloud TPU API object.\n\nThis works around an issue where the underlying HTTP connection sometimes\ntimes out when the script has been running for too long. Other methods in\nthis object call this method to get a new API object whenever they need\nto communicate with the Cloud API.\n\nRaises:\nRuntimeError: If the dependent Python packages are missing.\n\nReturns:\nA Google Cloud TPU API object.", "source": "github-repos"}
{"code": "def build_gemini_query(self, query, extra_info):\n        \n        if 'WHERE' in query:\n            return \"{0} AND {1}\".format(query, extra_info)\n        else:\n            return \"{0} WHERE {1}\".format(query, extra_info)", "docstring": "Append sql to a gemini query\n\nArgs:\nquery(str): The gemini query\nextra_info(str): The text that should be added\n\nReturn:\nextended_query(str)", "source": "juraj-google-style"}
{"code": "def sample_variants(self, variants, sample_name, category = 'snv'):\n        \n        LOG.info('Retrieving variants for subject : {0}'.format(sample_name))\n        has_allele = re.compile('1|2') \n\n        query = {\n            '$and': [\n                {'_id' : { '$in' : variants}},\n                {'category' : category},\n                {'samples': {\n                    '$elemMatch': { 'display_name' : sample_name, 'genotype_call': { '$regex' : has_allele } }\n                }}\n            ]\n        }\n\n        result = self.variant_collection.find(query)\n        return result", "docstring": "Given a list of variants get variant objects found in a specific patient\n\nArgs:\nvariants(list): a list of variant ids\nsample_name(str): a sample display name\ncategory(str): 'snv', 'sv' ..\n\nReturns:\nresult(iterable(Variant))", "source": "juraj-google-style"}
{"code": "def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n    self._validate_kwargs(kwargs)\n    dtype = _assert_float_dtype(dtype)\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    return self._random_generator.random_normal(shape, self.mean, self.stddev, dtype)", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor. Only floating point types are\nsupported.\n**kwargs: Additional keyword arguments.\n\nRaises:\nValueError: If the dtype is not floating point", "source": "github-repos"}
{"code": "def recipe_bulkdozer(config, recipe_timezone, account_id, dcm_profile_id, sheet_url):\n    traffic(config, {'hour': [], 'account_id': account_id, 'dcm_profile_id': dcm_profile_id, 'auth': 'user', 'sheet_url': sheet_url, 'timezone': recipe_timezone})", "docstring": "Bulkdozer is a tool that can reduce trafficking time in Campaign Manager by up\nto 80%% by providing automated bulk editing capabilities.\n\nArgs:\nrecipe_timezone (timezone) - Timezone for report dates.\naccount_id (string) - Campaign Manager Network ID (optional if profile id provided)\ndcm_profile_id (string) - Campaign Manager Profile ID (optional if account id provided)\nsheet_url (string) - Feed Sheet URL", "source": "github-repos"}
{"code": "def kron_with_controls(*matrices: np.ndarray) -> np.ndarray:\n    product = kron(*matrices)\n    for i in range(product.shape[0]):\n        for j in range(product.shape[1]):\n            if np.isnan(product[(i, j)]):\n                product[(i, j)] = (1 if (i == j) else 0)\n    return product", "docstring": "Computes the kronecker product of a sequence of matrices and controls.\n\nUse linalg.CONTROL_TAG to represent controls. Any entry of the output\nmatrix corresponding to a situation where the control is not satisfied will\nbe overwritten by identity matrix elements.\n\nThe control logic works by imbuing NaN with the meaning \"failed to meet one\nor more controls\". The normal kronecker product then spreads the per-item\nNaNs to all the entries in the product that need to be replaced by identity\nmatrix elements. This method rewrites those NaNs. Thus CONTROL_TAG can be\nthe matrix [[NaN, 0], [0, 1]] or equivalently [[NaN, NaN], [NaN, 1]].\n\nBecause this method re-interprets NaNs as control-failed elements, it won't\npropagate error-indicating NaNs from its input to its output in the way\nyou'd otherwise expect.\n\nArgs:\n*matrices: The matrices and controls to combine with the kronecker\nproduct.\n\nReturns:\nThe resulting matrix.", "source": "codesearchnet"}
{"code": "def cloud_train(train_dataset,\n                eval_dataset,\n                analysis_dir,\n                output_dir,\n                features,\n                model_type,\n                max_steps,\n                num_epochs,\n                train_batch_size,\n                eval_batch_size,\n                min_eval_frequency,\n                top_n,\n                layer_sizes,\n                learning_rate,\n                epsilon,\n                job_name,\n                job_name_prefix,\n                config):\n  \n  import google.datalab.ml as ml\n\n  if len(train_dataset.input_files) != 1 or len(eval_dataset.input_files) != 1:\n    raise ValueError('CsvDataSets must be built with a file pattern, not list '\n                     'of files.')\n\n  if file_io.file_exists(output_dir):\n    raise ValueError('output_dir already exist. Use a new output path.')\n\n  if isinstance(features, dict):\n    \n    if not file_io.file_exists(output_dir):\n      file_io.recursive_create_dir(output_dir)\n    features_file = os.path.join(output_dir, 'features_file.json')\n    file_io.write_string_to_file(\n        features_file,\n        json.dumps(features))\n  else:\n    features_file = features\n\n  if not isinstance(config, ml.CloudTrainingConfig):\n    raise ValueError('cloud should be an instance of '\n                     'google.datalab.ml.CloudTrainingConfig for cloud training.')\n\n  _assert_gcs_files([output_dir, train_dataset.input_files[0], eval_dataset.input_files[0],\n                     features_file, analysis_dir])\n\n  args = ['--train-data-paths=%s' % train_dataset.input_files[0],\n          '--eval-data-paths=%s' % eval_dataset.input_files[0],\n          '--preprocess-output-dir=%s' % analysis_dir,\n          '--transforms-file=%s' % features_file,\n          '--model-type=%s' % model_type,\n          '--max-steps=%s' % str(max_steps),\n          '--train-batch-size=%s' % str(train_batch_size),\n          '--eval-batch-size=%s' % str(eval_batch_size),\n          '--min-eval-frequency=%s' % str(min_eval_frequency),\n          '--learning-rate=%s' % str(learning_rate),\n          '--epsilon=%s' % str(epsilon)]\n  if num_epochs:\n    args.append('--num-epochs=%s' % str(num_epochs))\n  if top_n:\n    args.append('--top-n=%s' % str(top_n))\n  if layer_sizes:\n    for i in range(len(layer_sizes)):\n      args.append('--layer-size%s=%s' % (i + 1, str(layer_sizes[i])))\n\n  job_request = {\n    'package_uris': [_package_to_staging(output_dir), _TF_GS_URL, _PROTOBUF_GS_URL],\n    'python_module': 'mltoolbox._structured_data.trainer.task',\n    'job_dir': output_dir,\n    'args': args\n  }\n  job_request.update(dict(config._asdict()))\n\n  if not job_name:\n    job_name = job_name_prefix or 'structured_data_train'\n    job_name += '_' + datetime.datetime.now().strftime('%y%m%d_%H%M%S')\n  job = ml.Job.submit_training(job_request, job_name)\n  print('Job request send. View status of job at')\n  print('https:\n        _default_project())\n\n  return job", "docstring": "Train model using CloudML.\n\nSee local_train() for a description of the args.\nArgs:\nconfig: A CloudTrainingConfig object.\njob_name: Training job name. A default will be picked if None.", "source": "juraj-google-style"}
{"code": "def to_diff_dict(self) -> dict[str, Any]:\n    config_dict = self.to_dict()\n    default_config_dict = PretrainedConfig().to_dict()\n    class_config_dict = self.__class__().to_dict() if not self.has_no_defaults_at_init else {}\n    serializable_config_dict = {}\n    for key, value in config_dict.items():\n        if isinstance(getattr(self, key, None), PretrainedConfig) and key in class_config_dict and isinstance(class_config_dict[key], dict) or key in self.sub_configs:\n            diff = recursive_diff_dict(value, default_config_dict, config_obj=getattr(self, key, None))\n            if 'model_type' in value:\n                diff['model_type'] = value['model_type']\n            serializable_config_dict[key] = diff\n        elif key not in default_config_dict or key == 'transformers_version' or key == 'vocab_file' or (value != default_config_dict[key]) or (key in default_config_dict and value != class_config_dict.get(key, value)):\n            serializable_config_dict[key] = value\n    self._remove_keys_not_serialized(serializable_config_dict)\n    if '_name_or_path' in serializable_config_dict:\n        del serializable_config_dict['_name_or_path']\n    if hasattr(self, 'quantization_config'):\n        serializable_config_dict['quantization_config'] = self.quantization_config.to_dict() if not isinstance(self.quantization_config, dict) else self.quantization_config\n    self.dict_torch_dtype_to_str(serializable_config_dict)\n    return serializable_config_dict", "docstring": "Removes all attributes from the configuration that correspond to the default config attributes for\nbetter readability, while always retaining the `config` attribute from the class. Serializes to a\nPython dictionary.\n\nReturns:\nDict[str, Any]: Dictionary of all the attributes that make up this configuration instance.", "source": "github-repos"}
{"code": "def service_messages(self, short_name):\n        \n\n        if short_name not in self.services:\n            raise ArgumentError(\"Unknown service name\", short_name=short_name)\n\n        return list(self.services[short_name]['state'].messages)", "docstring": "Get the messages stored for a service.\n\nArgs:\nshort_name (string): The short name of the service to get messages for\n\nReturns:\nlist(ServiceMessage): A list of the ServiceMessages stored for this service", "source": "juraj-google-style"}
{"code": "def _PrintExtractionStatusUpdateWindow(self, processing_status):\n    if self._stdout_output_writer:\n        self._ClearScreen()\n    output_text = 'plaso - {0:s} version {1:s}\\n\\n'.format(self._tool_name, plaso.__version__)\n    self._output_writer.Write(output_text)\n    self.PrintExtractionStatusHeader(processing_status)\n    table_view = views.CLITabularTableView(column_names=['Identifier', 'PID', 'Status', 'Memory', 'Sources', 'Events', 'File'], column_sizes=[15, 7, 15, 15, 15, 15, 0])\n    self._AddExtractionProcessStatusTableRow(processing_status.foreman_status, table_view)\n    for worker_status in processing_status.workers_status:\n        self._AddExtractionProcessStatusTableRow(worker_status, table_view)\n    table_view.Write(self._output_writer)\n    self._output_writer.Write('\\n')\n    if processing_status.aborted:\n        self._output_writer.Write('Processing aborted - waiting for clean up.\\n\\n')\n    if self._stdout_output_writer:\n        sys.stdout.flush()", "docstring": "Prints an extraction status update in window mode.\n\nArgs:\nprocessing_status (ProcessingStatus): processing status.", "source": "codesearchnet"}
{"code": "class StackedRNNCells(Layer):\n\n    def __init__(self, cells, **kwargs):\n        super().__init__(**kwargs)\n        for cell in cells:\n            if 'call' not in dir(cell):\n                raise ValueError(f'All cells must have a `call` method. Received cell without a `call` method: {cell}')\n            if 'state_size' not in dir(cell):\n                raise ValueError(f'All cells must have a `state_size` attribute. Received cell without a `state_size`: {cell}')\n        self.cells = cells\n\n    @property\n    def state_size(self):\n        return [c.state_size for c in self.cells]\n\n    @property\n    def output_size(self):\n        if getattr(self.cells[-1], 'output_size', None) is not None:\n            return self.cells[-1].output_size\n        elif isinstance(self.cells[-1].state_size, (list, tuple)):\n            return self.cells[-1].state_size[0]\n        else:\n            return self.cells[-1].state_size\n\n    def get_initial_state(self, batch_size=None):\n        initial_states = []\n        for cell in self.cells:\n            get_initial_state_fn = getattr(cell, 'get_initial_state', None)\n            if get_initial_state_fn:\n                initial_states.append(get_initial_state_fn(batch_size=batch_size))\n            elif isinstance(cell.state_size, int):\n                initial_states.append(ops.zeros((batch_size, cell.state_size), dtype=self.compute_dtype))\n            else:\n                initial_states.append([ops.zeros((batch_size, d), dtype=self.compute_dtype) for d in cell.state_size])\n        return initial_states\n\n    def call(self, inputs, states, training=False, **kwargs):\n        new_states = []\n        for cell, states in zip(self.cells, states):\n            state_is_list = tree.is_nested(states)\n            states = list(states) if tree.is_nested(states) else [states]\n            if isinstance(cell, Layer) and cell._call_has_training_arg:\n                kwargs['training'] = training\n            else:\n                kwargs.pop('training', None)\n            cell_call_fn = cell.__call__ if callable(cell) else cell.call\n            inputs, states = cell_call_fn(inputs, states, **kwargs)\n            if len(states) == 1 and (not state_is_list):\n                states = states[0]\n            new_states.append(states)\n        if len(new_states) == 1:\n            new_states = new_states[0]\n        return (inputs, new_states)\n\n    def build(self, input_shape):\n        for cell in self.cells:\n            if isinstance(cell, Layer) and (not cell.built):\n                cell.build(input_shape)\n                cell.built = True\n            if getattr(cell, 'output_size', None) is not None:\n                output_dim = cell.output_size\n            elif isinstance(cell.state_size, (list, tuple)):\n                output_dim = cell.state_size[0]\n            else:\n                output_dim = cell.state_size\n            batch_size = tree.flatten(input_shape)[0]\n            input_shape = (batch_size, output_dim)\n\n    def get_config(self):\n        cells = []\n        for cell in self.cells:\n            cells.append(serialization_lib.serialize_keras_object(cell))\n        config = {'cells': cells}\n        base_config = super().get_config()\n        return {**base_config, **config}\n\n    @classmethod\n    def from_config(cls, config, custom_objects=None):\n        cells = []\n        for cell_config in config.pop('cells'):\n            cells.append(serialization_lib.deserialize_keras_object(cell_config, custom_objects=custom_objects))\n        return cls(cells, **config)", "docstring": "Wrapper allowing a stack of RNN cells to behave as a single cell.\n\nUsed to implement efficient stacked RNNs.\n\nArgs:\ncells: List of RNN cell instances.\n\nExample:\n\n```python\nbatch_size = 3\nsentence_length = 5\nnum_features = 2\nnew_shape = (batch_size, sentence_length, num_features)\nx = np.reshape(np.arange(30), new_shape)\n\nrnn_cells = [keras.layers.LSTMCell(128) for _ in range(2)]\nstacked_lstm = keras.layers.StackedRNNCells(rnn_cells)\nlstm_layer = keras.layers.RNN(stacked_lstm)\n\nresult = lstm_layer(x)\n```", "source": "github-repos"}
{"code": "def _update_data(self, data):\n    self.data = data\n    child_change_dict = {}\n    for name in self.children:\n        child_data = getattr(data, name, None)\n        if (child_data is None):\n            child_change_dict[name] = [[]]\n        else:\n            child_change_dict[name] = [[], child_data]\n    return child_change_dict", "docstring": "Set our data and notify any subscribers of children what has changed\n\nArgs:\ndata (object): The new data\n\nReturns:\ndict: {child_name: [path_list, optional child_data]} of the change\nthat needs to be passed to a child as a result of this", "source": "codesearchnet"}
{"code": "def _BatchNormGrad(grad_y, x, scale, pop_mean, pop_var, epsilon, data_format, is_training=True):\n    x_dtype = x.dtype.base_dtype\n    if x_dtype == dtypes.float16 or x_dtype == dtypes.bfloat16:\n        x = math_ops.cast(x, dtypes.float32)\n        grad_y = math_ops.cast(grad_y, dtypes.float32)\n    if is_training:\n        if data_format == b'NHWC':\n            keepdims = False\n            reduce_axis = [0, 1, 2]\n        elif data_format == b'NDHWC':\n            keepdims = False\n            reduce_axis = [0, 1, 2, 3]\n        elif data_format == b'NCHW':\n            keepdims = True\n            reduce_axis = [0, 2, 3]\n            shape = [1, array_ops.size(scale), 1, 1]\n            scale = array_ops.reshape(scale, shape)\n        else:\n            keepdims = True\n            reduce_axis = [0, 2, 3, 4]\n            shape = [1, array_ops.size(scale), 1, 1, 1]\n            scale = array_ops.reshape(scale, shape)\n        mean_grad_y = math_ops.reduce_mean(grad_y, reduce_axis, keepdims=keepdims)\n        mean_x = math_ops.reduce_mean(x, reduce_axis, keepdims=keepdims)\n        var_x = math_ops.reduce_mean(math_ops.squared_difference(x, array_ops.stop_gradient(mean_x)), reduce_axis, keepdims=keepdims)\n        grad_y_offset = grad_y - mean_grad_y\n        x_offset = x - mean_x\n        mean = math_ops.reduce_mean(grad_y * x_offset, axis=reduce_axis, keepdims=keepdims)\n        grad_x = scale * math_ops.rsqrt(var_x + epsilon) * (grad_y_offset - math_ops.reciprocal(var_x + epsilon) * mean * x_offset)\n        grad_scale = math_ops.rsqrt(var_x + epsilon) * math_ops.reduce_sum(grad_y * x_offset, axis=reduce_axis, keepdims=keepdims)\n        if data_format == b'NCHW' or data_format == b'NCDHW':\n            grad_scale = array_ops.squeeze(grad_scale)\n        grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis)\n        return (math_ops.cast(grad_x, x_dtype), grad_scale, grad_offset)\n    else:\n        if data_format == b'NHWC':\n            reduce_axis = [0, 1, 2]\n        elif data_format == b'NDHWC':\n            reduce_axis = [0, 1, 2, 3]\n        elif data_format == b'NCHW':\n            reduce_axis = [0, 2, 3]\n            shape = [1, array_ops.size(pop_mean), 1, 1]\n            pop_mean = array_ops.reshape(pop_mean, shape)\n            pop_var = array_ops.reshape(pop_var, shape)\n            scale = array_ops.reshape(scale, shape)\n        else:\n            reduce_axis = [0, 2, 3, 4]\n            shape = [1, array_ops.size(pop_mean), 1, 1, 1]\n            pop_mean = array_ops.reshape(pop_mean, shape)\n            pop_var = array_ops.reshape(pop_var, shape)\n            scale = array_ops.reshape(scale, shape)\n        grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis)\n        var_rsqrt = math_ops.rsqrt(pop_var + epsilon)\n        grad_scale = math_ops.reduce_sum(grad_y * (x - pop_mean) * var_rsqrt, axis=reduce_axis)\n        grad_x = grad_y * scale * var_rsqrt\n        return (math_ops.cast(grad_x, x_dtype), grad_scale, grad_offset)", "docstring": "Returns the gradients for the 3 inputs of BatchNorm.\n\nArgs:\ngrad_y: A `Tensor` of 4 or 5 dimensions for gradient for y.\nx: A `Tensor` of 4 or 5 dimensions for x.\nscale: A `Tensor` of 1 dimension for scaling.\npop_mean: A `Tensor` of 1 dimension for the population mean. Only used when\nis_training=False.\npop_var: A `Tensor` of 1 dimension for the population variance. Only used\nwhen is_training=False.\nepsilon: A small float number added to the variance of x.\ndata_format: The data format for input. Either b\"NHWC\" or b\"NCHW\".\nis_training: A bool value to indicate the operation is for training\n(default) or inference.\n\nReturns:\nA tuple (grad_x, grad_scale, grad_offset), where grad_x is the gradient\nfor x, grad_scale the gradient for scale, and grad_offset the gradient\nfor offset.", "source": "github-repos"}
{"code": "def _predictResponseSize(mode, functioncode, payloadToSlave):\n    \n    MIN_PAYLOAD_LENGTH = 4  \n    BYTERANGE_FOR_GIVEN_SIZE = slice(2, 4)  \n\n    NUMBER_OF_PAYLOAD_BYTES_IN_WRITE_CONFIRMATION = 4\n    NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD = 1\n\n    RTU_TO_ASCII_PAYLOAD_FACTOR = 2\n\n    NUMBER_OF_RTU_RESPONSE_STARTBYTES   = 2\n    NUMBER_OF_RTU_RESPONSE_ENDBYTES     = 2\n    NUMBER_OF_ASCII_RESPONSE_STARTBYTES = 5\n    NUMBER_OF_ASCII_RESPONSE_ENDBYTES   = 4\n\n    \n    _checkMode(mode)\n    _checkFunctioncode(functioncode, None)\n    _checkString(payloadToSlave, description='payload', minlength=MIN_PAYLOAD_LENGTH)\n\n    \n    if functioncode in [5, 6, 15, 16]:\n        response_payload_size = NUMBER_OF_PAYLOAD_BYTES_IN_WRITE_CONFIRMATION\n\n    elif functioncode in [1, 2, 3, 4]:\n        given_size = _twoByteStringToNum(payloadToSlave[BYTERANGE_FOR_GIVEN_SIZE])\n        if functioncode == 1 or functioncode == 2:\n            \n            number_of_inputs = given_size\n            response_payload_size = NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD + \\\n                                    number_of_inputs \n\n        elif functioncode == 3 or functioncode == 4:\n            number_of_registers = given_size\n            response_payload_size = NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD + \\\n                                    number_of_registers * _NUMBER_OF_BYTES_PER_REGISTER\n\n    else:\n        raise ValueError('Wrong functioncode: {}. The payload is: {!r}'.format( \\\n            functioncode, payloadToSlave))\n\n    \n    if mode == MODE_ASCII:\n        return NUMBER_OF_ASCII_RESPONSE_STARTBYTES + \\\n            response_payload_size * RTU_TO_ASCII_PAYLOAD_FACTOR + \\\n            NUMBER_OF_ASCII_RESPONSE_ENDBYTES\n    else:\n        return NUMBER_OF_RTU_RESPONSE_STARTBYTES + \\\n            response_payload_size + \\\n            NUMBER_OF_RTU_RESPONSE_ENDBYTES", "docstring": "Calculate the number of bytes that should be received from the slave.\n\nArgs:\n* mode (str): The modbus protcol mode (MODE_RTU or MODE_ASCII)\n* functioncode (int): Modbus function code.\n* payloadToSlave (str): The raw request that is to be sent to the slave (not hex encoded string)\n\nReturns:\nThe preducted number of bytes (int) in the response.\n\nRaises:\nValueError, TypeError.", "source": "juraj-google-style"}
{"code": "def after_request(response):\n    \n    response.headers.add('Access-Control-Allow-Origin', '*')\n    response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')\n    response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')\n    return response", "docstring": "Modifies the response object prior to sending it to the client. Used to add CORS headers to the request\n\nArgs:\nresponse (response): Flask response object\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def decode_bu64(b):\n    \n    s = b\n    s = s.replace(b'-', b'+')\n    s = s.replace(b'_', b'/')\n    p = len(s) % 4\n    if p == 0:\n        pass\n    elif p == 2:\n        s += b'=='\n    elif p == 3:\n        s += b'='\n    else:\n        raise ValueError('Illegal Base64url string')\n    return base64.standard_b64decode(s)", "docstring": "Encode bytes to a URL safe flavor of Base64 used by JWTs.\n\n- Reverse of encode_bu64().\n\nArgs:\nb: bytes\nURL safe Base64 encoded bytes to encode.\n\nReturns:\nbytes: Decoded bytes.", "source": "juraj-google-style"}
{"code": "def get_init_tokens_op(self, num_tokens=-1):\n    if self._gradients_applied is False:\n        raise ValueError('get_init_tokens_op() should be called after apply_gradients().')\n    tokens_needed = self._replicas_to_aggregate - self._total_num_replicas\n    if num_tokens == -1:\n        num_tokens = self._replicas_to_aggregate\n    elif num_tokens < tokens_needed:\n        raise ValueError('Too few tokens to finish the first step: %d (given) vs %d (needed)' % (num_tokens, tokens_needed))\n    if num_tokens > 0:\n        with ops.device(self._global_step.device), ops.name_scope(''):\n            tokens = array_ops.fill([num_tokens], self._global_step)\n            init_tokens = self._sync_token_queue.enqueue_many((tokens,))\n    else:\n        init_tokens = control_flow_ops.no_op(name='no_init_tokens')\n    return init_tokens", "docstring": "Returns the op to fill the sync_token_queue with the tokens.\n\nThis is supposed to be executed in the beginning of the chief/sync thread\nso that even if the total_num_replicas is less than replicas_to_aggregate,\nthe model can still proceed as the replicas can compute multiple steps per\nvariable update. Make sure:\n`num_tokens >= replicas_to_aggregate - total_num_replicas`.\n\nArgs:\nnum_tokens: Number of tokens to add to the queue.\n\nReturns:\nAn op for the chief/sync replica to fill the token queue.\n\nRaises:\nValueError: If this is called before apply_gradients().\nValueError: If num_tokens are smaller than replicas_to_aggregate -\ntotal_num_replicas.", "source": "github-repos"}
{"code": "def _GetFieldByName(message_descriptor, field_name):\n    try:\n        return message_descriptor.fields_by_name[field_name]\n    except KeyError:\n        raise ValueError(('Protocol message %s has no \"%s\" field.' % (message_descriptor.name, field_name)))", "docstring": "Returns a field descriptor by field name.\n\nArgs:\nmessage_descriptor: A Descriptor describing all fields in message.\nfield_name: The name of the field to retrieve.\nReturns:\nThe field descriptor associated with the field name.", "source": "codesearchnet"}
{"code": "def reflection(normal, origin=(0, 0, 0)):\n        \n        \n        n = np.array(normal, dtype=float) / np.linalg.norm(normal)\n\n        u, v, w = n\n\n        translation = np.eye(4)\n        translation[0:3, 3] = -np.array(origin)\n\n        xx = 1 - 2 * u ** 2\n        yy = 1 - 2 * v ** 2\n        zz = 1 - 2 * w ** 2\n        xy = -2 * u * v\n        xz = -2 * u * w\n        yz = -2 * v * w\n        mirror_mat = [[xx, xy, xz, 0], [xy, yy, yz, 0], [xz, yz, zz, 0],\n                      [0, 0, 0, 1]]\n\n        if np.linalg.norm(origin) > 1e-6:\n            mirror_mat = np.dot(np.linalg.inv(translation),\n                                np.dot(mirror_mat, translation))\n        return SymmOp(mirror_mat)", "docstring": "Returns reflection symmetry operation.\n\nArgs:\nnormal (3x1 array): Vector of the normal to the plane of\nreflection.\norigin (3x1 array): A point in which the mirror plane passes\nthrough.\n\nReturns:\nSymmOp for the reflection about the plane", "source": "juraj-google-style"}
{"code": "def _save_state_and_schedule_next(self, shard_state, tstate, task_directive):\n    \n    spec = tstate.mapreduce_spec\n\n    if task_directive == self._TASK_DIRECTIVE.DROP_TASK:\n      return\n    if task_directive in (self._TASK_DIRECTIVE.RETRY_SLICE,\n                          self._TASK_DIRECTIVE.RETRY_TASK):\n      \n      return self.retry_task()\n    elif task_directive == self._TASK_DIRECTIVE.ABORT_SHARD:\n      logging.info(\"Aborting shard %d of job '%s'\",\n                   shard_state.shard_number, shard_state.mapreduce_id)\n      task = None\n    elif task_directive == self._TASK_DIRECTIVE.FAIL_TASK:\n      logging.critical(\"Shard %s failed permanently.\", shard_state.shard_id)\n      task = None\n    elif task_directive == self._TASK_DIRECTIVE.RETRY_SHARD:\n      logging.warning(\"Shard %s is going to be attempted for the %s time.\",\n                      shard_state.shard_id,\n                      shard_state.retries + 1)\n      task = self._state_to_task(tstate, shard_state)\n    elif task_directive == self._TASK_DIRECTIVE.RECOVER_SLICE:\n      logging.warning(\"Shard %s slice %s is being recovered.\",\n                      shard_state.shard_id,\n                      shard_state.slice_id)\n      task = self._state_to_task(tstate, shard_state)\n    else:\n      assert task_directive == self._TASK_DIRECTIVE.PROCEED_TASK\n      countdown = self._get_countdown_for_next_slice(spec)\n      task = self._state_to_task(tstate, shard_state, countdown=countdown)\n\n    \n    queue_name = os.environ.get(\"HTTP_X_APPENGINE_QUEUENAME\",\n                                \n                                \n                                \"default\")\n    config = util.create_datastore_write_config(spec)\n\n    @db.transactional(retries=5)\n    def _tx():\n      \n      fresh_shard_state = model.ShardState.get_by_shard_id(tstate.shard_id)\n      if not fresh_shard_state:\n        raise db.Rollback()\n      if (not fresh_shard_state.active or\n          \"worker_active_state_collision\" in _TEST_INJECTED_FAULTS):\n        logging.warning(\"Shard %s is not active. Possible spurious task \"\n                        \"execution. Dropping this task.\", tstate.shard_id)\n        logging.warning(\"Datastore's %s\", str(fresh_shard_state))\n        logging.warning(\"Slice's %s\", str(shard_state))\n        return\n      fresh_shard_state.copy_from(shard_state)\n      fresh_shard_state.put(config=config)\n      \n      \n      \n      \n      if fresh_shard_state.active:\n        \n        \n        self._add_task(task, spec, queue_name)\n\n    try:\n      _tx()\n    except (datastore_errors.Error,\n            taskqueue.Error,\n            runtime.DeadlineExceededError,\n            apiproxy_errors.Error), e:\n      logging.warning(\n          \"Can't transactionally continue shard. \"\n          \"Will retry slice %s %s for the %s time.\",\n          tstate.shard_id,\n          tstate.slice_id,\n          self.task_retry_count() + 1)\n      self._try_free_lease(shard_state)\n      raise e", "docstring": "Save state and schedule task.\n\nSave shard state to datastore.\nSchedule next slice if needed.\nSet HTTP response code.\nNo modification to any shard_state or tstate.\n\nArgs:\nshard_state: model.ShardState for current shard.\ntstate: model.TransientShardState for current shard.\ntask_directive: enum _TASK_DIRECTIVE.\n\nReturns:\nThe task to retry if applicable.", "source": "juraj-google-style"}
{"code": "def __call__(self, shape, dtype=None, **kwargs):\n    raise NotImplementedError", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor.\n**kwargs: Additional keyword arguments.", "source": "github-repos"}
{"code": "def tarfile_extract(fileobj, dest_path):\n    tar = tarfile.open(mode='r|', fileobj=fileobj, bufsize=pipebuf.PIPE_BUF_BYTES)\n    dest_path = os.path.realpath(dest_path)\n    extracted_files = []\n    for member in tar:\n        assert (not member.name.startswith('/'))\n        relpath = os.path.join(dest_path, member.name)\n        if member.issym():\n            target_path = os.path.join(dest_path, member.name)\n            try:\n                os.symlink(member.linkname, target_path)\n            except OSError as e:\n                if (e.errno == errno.EEXIST):\n                    os.remove(target_path)\n                    os.symlink(member.linkname, target_path)\n                else:\n                    raise\n            continue\n        if (member.isreg() and (member.size >= pipebuf.PIPE_BUF_BYTES)):\n            cat_extract(tar, member, relpath)\n        else:\n            tar.extract(member, path=dest_path)\n        filename = os.path.realpath(relpath)\n        extracted_files.append(filename)\n        if (len(extracted_files) > 1000):\n            _fsync_files(extracted_files)\n            del extracted_files[:]\n    tar.close()\n    _fsync_files(extracted_files)", "docstring": "Extract a tarfile described by a file object to a specified path.\n\nArgs:\nfileobj (file): File object wrapping the target tarfile.\ndest_path (str): Path to extract the contents of the tarfile to.", "source": "codesearchnet"}
{"code": "def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:\n    \n    logger.debug(\"Yielding from append iterator\")\n    if not isinstance(variables, list):\n        raise ValueError(\n            f\"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}\"\n        )\n\n    \n    yield list(\n        chain.from_iterable(\n            variable_matrix(item, parent, \"product\") for item in variables\n        )\n    )", "docstring": "This successively appends each element of an array to a single list of values.\n\nThis takes a list of values and puts all the values generated for each element in\nthe list into a single list of values. It uses the :func:`itertools.chain` function to\nachieve this. This function is particularly useful for specifying multiple types of\nsimulations with different parameters.\n\nArgs:\nvariables: The variables object\nparent: Unused", "source": "juraj-google-style"}
{"code": "def process_rewards(self, rewards):\n    (min_reward, max_reward) = self.reward_range\n    rewards = np.clip(rewards, min_reward, max_reward)\n    rewards = np.around(rewards, decimals=0).astype(np.int64)\n    return rewards", "docstring": "Clips, rounds, and changes to integer type.\n\nArgs:\nrewards: numpy array of raw (float) rewards.\n\nReturns:\nprocessed_rewards: numpy array of np.int64", "source": "codesearchnet"}
{"code": "def get_arrays(self, type_img):\n        \n\n        if type_img.lower() == 'lola':\n            return LolaMap(self.ppdlola, *self.window, path_pdsfile=self.path_pdsfiles).image()\n        elif type_img.lower() == 'wac':\n            return WacMap(self.ppdwac, *self.window, path_pdsfile=self.path_pdsfiles).image()\n        else:\n            raise ValueError('The img type has to be either \"Lola\" or \"Wac\"')", "docstring": "Return arrays the region of interest\n\nArgs:\ntype_img (str): Either lola or wac.\n\nReturns:\nA tupple of three arrays ``(X,Y,Z)`` with ``X`` contains the\nlongitudes, ``Y`` contains the latitude and ``Z`` the values\nextracted for the region of interest.\n\nNote:\nThe argument has to be either lola or wac. Note case sensitive.\nAll return arrays have the same size.\n\nAll coordinates are in degree.", "source": "juraj-google-style"}
{"code": "def _get_object_checkpoint_renames(path, variable_names):\n    fname = checkpoint_utils._get_checkpoint_filename(path)\n    try:\n        names_to_keys = saver_lib.object_graph_key_mapping(fname)\n    except errors.NotFoundError:\n        return {}\n    missing_names = set(variable_names) - set(names_to_keys.keys())\n    if missing_names:\n        raise ValueError('Attempting to warm-start from an object-based checkpoint, but found that the checkpoint did not contain values for all variables. The following variables were missing: {}'.format(missing_names))\n    return {name: names_to_keys[name] for name in variable_names}", "docstring": "Returns a dictionary mapping variable names to checkpoint keys.\n\nThe warm-starting utility expects variable names to match with the variable\nnames in the checkpoint. For object-based checkpoints, the variable names\nand names in the checkpoint are different. Thus, for object-based checkpoints,\nthis function is used to obtain the map from variable names to checkpoint\nkeys.\n\nArgs:\npath: path to checkpoint directory or file.\nvariable_names: list of variable names to load from the checkpoint.\n\nReturns:\nIf the checkpoint is object-based, this function returns a map from variable\nnames to their corresponding checkpoint keys.\nIf the checkpoint is name-based, this returns an empty dict.\n\nRaises:\nValueError: If the object-based checkpoint is missing variables.", "source": "github-repos"}
{"code": "def emit(self, record):\n        \n        record.task = self.cur_task\n\n        if record.levelno >= self.dump_level and self.cur_task:\n            self.tasks[self.cur_task].failed = True\n            self.tasks[self.cur_task].force_show = True\n\n        \n        is_start = START_TASK_REG.match(str(record.msg))\n        if is_start:\n            self.handle_new_task(is_start.groupdict()['task_name'], record)\n            return\n\n        is_end = END_TASK_REG.match(str(record.msg))\n        if is_end:\n            self.handle_closed_task(is_end.groupdict()['task_name'], record)\n            return\n\n        force_show_record = ALWAYS_SHOW_REG.match(str(record.msg))\n        if force_show_record:\n            record.msg = force_show_record.groupdict()['message']\n            self.pretty_emit(record)\n\n        if (\n            not force_show_record and self.should_show_by_level(record)\n            and self.should_show_by_depth()\n        ):\n            self.pretty_emit(record)\n            return\n\n        if self.cur_task:\n            self.tasks[self.cur_task].append(record)", "docstring": "Handle the given record, this is the entry point from the python\nlogging facility\n\nParams:\nrecord (logging.LogRecord): log record to handle\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def indent(self, node, dirty=True):\n        \n        if node.subitems:\n            return\n\n        self._subitems[node.id] = node\n        node.super_list_item_id = self.id\n        node.parent_item = self\n        if dirty:\n            node.touch(True)", "docstring": "Indent an item. Does nothing if the target has subitems.\n\nArgs:\nnode (gkeepapi.node.ListItem): Item to indent.\ndirty (bool): Whether this node should be marked dirty.", "source": "juraj-google-style"}
{"code": "def __init__(self, details):\n\t\t\n\n\t\t\n\t\tif not isinstance(details, dict):\n\t\t\traise ValueError('details')\n\n\t\t\n\t\tif '__hash__' not in details:\n\t\t\traise KeyError('__hash__')\n\n\t\t\n\t\tif '__optional__' in details:\n\t\t\tbOptional = details['__optional__']\n\t\t\tdel details['__optional__']\n\t\telse:\n\t\t\tbOptional = None\n\n\t\t\n\t\t\n\t\tif details['__hash__'] is True:\n\t\t\tdetails['__hash__'] = {\"__type__\":\"string\"}\n\n\t\t\n\t\tself._key = Node(details['__hash__'])\n\n\t\t\n\t\tdel details['__hash__']\n\n\t\t\n\t\tself._node = _child(details)\n\n\t\t\n\t\tif bOptional:\n\t\t\tdetails['__optional__'] = bOptional\n\n\t\t\n\t\tsuper(HashNode, self).__init__(details, 'HashNode')", "docstring": "Constructor\n\nInitialises the instance\n\nArguments:\ndetails {dict} -- Details describing the type of values allowed for\nthe node\n\nRaises:\nKeyError\nValueError\n\nReturns:\nHashNode", "source": "juraj-google-style"}
{"code": "def validate(bo, error_level: str = \"WARNING\") -> Tuple[bool, List[Tuple[str, str]]]:\n    \n\n    if bo.ast:\n        bo = validate_functions(bo.ast, bo)  \n        if error_level == \"WARNING\":\n            bo = validate_arg_values(bo.ast, bo)  \n\n    else:\n        bo.validation_messages.append((\"ERROR\", \"Invalid BEL Statement - cannot parse\"))\n\n    for msg in bo.validation_messages:\n        if msg[0] == \"ERROR\":\n            bo.parse_valid = False\n            break\n\n    return bo", "docstring": "Semantically validate BEL AST\n\nAdd errors and warnings to bel_obj.validation_messages\n\nError Levels are similar to log levels - selecting WARNING includes both\nWARNING and ERROR, selecting ERROR just includes ERROR\n\nArgs:\nbo: main BEL language object\nerror_level: return ERRORs only or also WARNINGs\n\nReturns:\nTuple[bool, List[Tuple[str, str]]]: (is_valid, messages)", "source": "juraj-google-style"}
{"code": "def verify_tensor_all_finite(t=None, msg=None, name=None, x=None, message=None):\n    x = deprecation.deprecated_argument_lookup('x', x, 't', t)\n    message = deprecation.deprecated_argument_lookup('message', message, 'msg', msg)\n    return verify_tensor_all_finite_v2(x, message, name)", "docstring": "Assert that the tensor does not contain any NaN's or Inf's.\n\nArgs:\nt: Tensor to check.\nmsg: Message to log on failure.\nname: A name for this operation (optional).\nx: Alias for t.\nmessage: Alias for msg.\n\nReturns:\nSame tensor as `t`.", "source": "github-repos"}
{"code": "def get_svg_layers(svg_sources):\n    layers = []\n    (width, height) = (None, None)\n\n    def extract_length(attr):\n        'Extract length in pixels.'\n        match = CRE_MM_LENGTH.match(attr)\n        if match:\n            return (INKSCAPE_PPmm.magnitude * float(match.group('length')))\n        else:\n            return float(attr)\n    for svg_source_i in svg_sources:\n        xml_root = etree.parse(svg_source_i)\n        svg_root = xml_root.xpath('/svg:svg', namespaces=INKSCAPE_NSMAP)[0]\n        width = max(extract_length(svg_root.attrib['width']), width)\n        height = max(extract_length(svg_root.attrib['height']), height)\n        layers += svg_root.xpath('\n    for (i, layer_i) in enumerate(layers):\n        layer_i.attrib['id'] = ('layer%d' % (i + 1))\n    return ((width, height), layers)", "docstring": "Collect layers from input svg sources.\n\nArgs:\n\nsvg_sources (list) : A list of file-like objects, each containing\none or more XML layers.\n\nReturns\n-------\n(width, height), layers : (int, int), list\nThe first item in the tuple is the shape of the largest layer, and the\nsecond item is a list of ``Element`` objects (from :mod:`lxml.etree`\nmodule), one per SVG layer.", "source": "codesearchnet"}
{"code": "def normalize_keypoints(keypoints: torch.Tensor, height: int, width: int) -> torch.Tensor:\n    size = torch.tensor([width, height], device=keypoints.device, dtype=keypoints.dtype)[None]\n    center = size / 2\n    scaling = size.max(1, keepdim=True).values * 0.7\n    return (keypoints - center[:, None, :]) / scaling[:, None, :]", "docstring": "Normalize keypoints locations based on image image_shape\n\nArgs:\nkeypoints (`torch.Tensor` of shape `(batch_size, num_keypoints, 2)`):\nKeypoints locations in (x, y) format.\nheight (`int`):\nImage height.\nwidth (`int`):\nImage width.\n\nReturns:\nNormalized keypoints locations of shape (`torch.Tensor` of shape `(batch_size, num_keypoints, 2)`).", "source": "github-repos"}
{"code": "def read_string_array(self, key, embedded=True):\n        \n        data = None\n        if key is not None:\n            key_type = self.variable_type(key)\n            data = self.db.read(key.strip())\n            if embedded:\n                data = self.read_embedded(data, key_type)\n            if data is not None:\n                try:\n                    data = json.loads(data, object_pairs_hook=OrderedDict)\n                except ValueError as e:\n                    err = u'Failed loading JSON data ({}). Error: ({})'.format(data, e)\n                    self.tcex.log.error(err)\n                    self.tcex.message_tc(err)\n                    self.tcex.exit(1)\n        else:\n            self.tcex.log.warning(u'The key field was None.')\n        return data", "docstring": "Read method of CRUD operation for string array data.\n\nArgs:\nkey (string): The variable to read from the DB.\nembedded (boolean): Resolve embedded variables.\n\nReturns:\n(list): Results retrieved from DB.", "source": "juraj-google-style"}
{"code": "def _OpenFile(self, path):\n    \n    if not self._registry_file_reader:\n      return None\n\n    return self._registry_file_reader.Open(\n        path, ascii_codepage=self._ascii_codepage)", "docstring": "Opens a Windows Registry file.\n\nArgs:\npath (str): path of the Windows Registry file.\n\nReturns:\nWinRegistryFile: Windows Registry file or None if not available.", "source": "juraj-google-style"}
{"code": "def _validate_oneof_field_multi_mapping(src_pb, dest_pb, ignored_fields):\n    ignored_fields_set = set(ignored_fields)\n    src_oneof_names_dict = src_pb.DESCRIPTOR.oneofs_by_name\n    dest_oneof_dict = _get_fields_to_oneof_dict(dest_pb.DESCRIPTOR.oneofs_by_name)\n    dest_field_names = set(dest_pb.DESCRIPTOR.fields_by_name.keys())\n    for src_oneof_name, src_oneof_field in src_oneof_names_dict.items():\n        mapped_field = set()\n        for src_field in src_oneof_field.fields:\n            src_field_name = src_field.name\n            if src_field_name in ignored_fields_set:\n                continue\n            if src_field_name in dest_oneof_dict:\n                mapped_field.add(dest_oneof_dict[src_field_name])\n            elif src_field_name in dest_field_names:\n                mapped_field.add(src_field_name)\n        if len(mapped_field) > 1:\n            raise NotImplementedError('Oneof field {} in proto {} maps to more than one field, all fields in the oneof must be explicitly handled or ignored.'.format(src_oneof_name, src_pb.DESCRIPTOR.name))", "docstring": "Validates if the oneof field on src_pb maps to multiple fields.\n\nArgs:\nsrc_pb: the proto to check oneof from.\ndest_pb: the proto to check oneof against.\nignored_fields: fields that skip the check.\nException: Raises NotImplementedError if any oneof field in src_pb maps to\nmultiple fields from dest_pb.", "source": "github-repos"}
{"code": "def is_likely_link(text):\n    text = text.lower()\n    if (text.startswith('http:\n        return True\n    (dummy, dot, file_extension) = text.rpartition('.')\n    if (dot and file_extension and (len(file_extension) <= 4)):\n        file_extension_set = frozenset(file_extension)\n        if (file_extension_set and (file_extension_set <= ALPHANUMERIC_CHARS) and (not (file_extension_set <= NUMERIC_CHARS))):\n            if (file_extension in COMMON_TLD):\n                return False\n            file_type = mimetypes.guess_type(text, strict=False)[0]\n            if file_type:\n                return True\n            else:\n                return False", "docstring": "Return whether the text is likely to be a link.\n\nThis function assumes that leading/trailing whitespace has already been\nremoved.\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def GetMerger(self, cls):\n    for merger in self._mergers:\n        if isinstance(merger, cls):\n            return merger\n    raise LookupError('No matching DataSetMerger found')", "docstring": "Looks for an added DataSetMerger derived from the given class.\n\nArgs:\ncls: A class derived from DataSetMerger.\n\nReturns:\nThe matching DataSetMerger instance.\n\nRaises:\nLookupError: No matching DataSetMerger has been added.", "source": "codesearchnet"}
{"code": "def __init__(self, nrows=None, nvals=None, uniform_row_length=None, dtype=dtypes.int64):\n    nrows = tensor_shape.TensorShape([nrows])\n    nvals = tensor_shape.TensorShape([nvals])\n    if not isinstance(uniform_row_length, tensor_shape.TensorShape):\n        uniform_row_length = tensor_shape.TensorShape([uniform_row_length])\n    else:\n        uniform_row_length = uniform_row_length.with_rank(1)\n    self._nrows = nrows\n    self._nvals = nvals\n    self._uniform_row_length = uniform_row_length\n    self._dtype = dtypes.as_dtype(dtype)\n    if self._dtype not in (dtypes.int32, dtypes.int64):\n        raise ValueError('dtype must be tf.int32 or tf.int64')\n    nrows = tensor_shape.dimension_value(nrows[0])\n    nvals = tensor_shape.dimension_value(nvals[0])\n    ncols = tensor_shape.dimension_value(uniform_row_length[0])\n    if nrows == 0:\n        if nvals is None:\n            self._nvals = tensor_shape.TensorShape([0])\n        elif nvals != 0:\n            raise ValueError('nvals=%s is not compatible with nrows=%s' % (nvals, nrows))\n    if ncols == 0:\n        if nvals is None:\n            self._nvals = tensor_shape.TensorShape([0])\n        elif nvals != 0:\n            raise ValueError('nvals=%s is not compatible with uniform_row_length=%s' % (nvals, uniform_row_length))\n    if ncols is not None and nvals is not None:\n        if ncols != 0 and nvals % ncols != 0:\n            raise ValueError(\"nvals=%s is not compatible with uniform_row_length=%s (doesn't divide evenly)\" % (nvals, ncols))\n        if nrows is not None and nvals != ncols * nrows:\n            raise ValueError('nvals=%s is not compatible with nrows=%s and uniform_row_length=%s' % (nvals, nrows, ncols))\n        if nrows is None and ncols != 0:\n            self._nrows = tensor_shape.TensorShape([nvals \n    if ncols is not None and nrows is not None and (nvals is None):\n        self._nvals = tensor_shape.TensorShape([ncols * nrows])", "docstring": "Constructs a new RowPartitionSpec.\n\nArgs:\nnrows: The number of rows in the RowPartition, or `None` if unspecified.\nnvals: The number of values partitioned by the RowPartition, or `None` if\nunspecified.\nuniform_row_length: The number of values in each row for this\nRowPartition, or `None` if rows are ragged or row length is unspecified.\ndtype: The data type used to encode the partition.  One of `tf.int64` or\n`tf.int32`.", "source": "github-repos"}
{"code": "def _check_dep(self, depinfo, deptile, resolver):\n    try:\n        settings = self._load_depsettings(deptile)\n    except IOError:\n        return False\n    if (settings['resolver'] != resolver.__class__.__name__):\n        return None\n    resolver_settings = {}\n    if ('settings' in settings):\n        resolver_settings = settings['settings']\n    return resolver.check(depinfo, deptile, resolver_settings)", "docstring": "Check if a dependency tile is up to date\n\nReturns:\nbool: True if it is up to date, False if it not and None if this resolver\ncannot assess whether or not it is up to date.", "source": "codesearchnet"}
{"code": "def git_clone(prettyname: str, url: str, directory: str,\n              branch: str = None,\n              commit: str = None,\n              clone_options: List[str] = None,\n              run_func: Callable[[List[str]], Any] = None) -> bool:\n    \n    run_func = run_func or subprocess.check_call\n    clone_options = clone_options or []  \n    if os.path.isdir(directory):\n        log.info(\"Not re-cloning {} Git repository: using existing source \"\n                 \"in {}\".format(prettyname, directory))\n        return False\n    log.info(\"Fetching {} source from {} into {}\",\n             prettyname, url, directory)\n    require_executable(GIT)\n    gitargs = [GIT, \"clone\"] + clone_options\n    if branch:\n        gitargs += [\"--branch\", branch]\n    gitargs += [url, directory]\n    run_func(gitargs)\n    if commit:\n        log.info(\"Resetting {} local Git repository to commit {}\",\n                 prettyname, commit)\n        run_func([GIT,\n                  \"-C\", directory,\n                  \"reset\", \"--hard\", commit])\n        \n        \n    return True", "docstring": "Fetches a Git repository, unless we have it already.\n\nArgs:\nprettyname: name to display to user\nurl: URL\ndirectory: destination directory\nbranch: repository branch\ncommit: repository commit tag\nclone_options: additional options to pass to ``git clone``\nrun_func: function to use to call an external command\n\nReturns:\ndid we need to do anything?", "source": "juraj-google-style"}
{"code": "def _GetDistinctValues(self, field_name):\n    self._cursor.execute('SELECT {0:s}, COUNT({0:s}) FROM log2timeline GROUP BY {0:s}'.format(field_name))\n    result = {}\n    row = self._cursor.fetchone()\n    while row:\n        if row[0]:\n            result[row[0]] = row[1]\n        row = self._cursor.fetchone()\n    return result", "docstring": "Query database for unique field types.\n\nArgs:\nfield_name (str): name of the filed to retrieve.\n\nReturns:\ndict[str, int]: counts of field types by name.", "source": "codesearchnet"}
{"code": "def HasStorage(self):\n    from neo.Core.State.ContractState import ContractPropertyState\n    return ((self.ContractProperties & ContractPropertyState.HasStorage) > 0)", "docstring": "Flag indicating if storage is available.\n\nReturns:\nbool: True if available. False otherwise.", "source": "codesearchnet"}
{"code": "def download_artifact_bundle(self, id_or_uri, file_path):\n    uri = ((self.DOWNLOAD_PATH + '/') + extract_id_from_uri(id_or_uri))\n    return self._client.download(uri, file_path)", "docstring": "Download the Artifact Bundle.\n\nArgs:\nid_or_uri: ID or URI of the Artifact Bundle.\nfile_path(str): Destination file path.\n\nReturns:\nbool: Successfully downloaded.", "source": "codesearchnet"}
{"code": "def time_range_to_frame_range(self, start, end, sr):\n    start_sample = seconds_to_sample(start, sr)\n    end_sample = seconds_to_sample(end, sr)\n    return (self.sample_to_frame_range(start_sample)[0], self.sample_to_frame_range((end_sample - 1))[1])", "docstring": "Calculate the frames containing samples from the given time range in seconds.\n\nArgs:\nstart (float): Start time in seconds.\nend (float): End time in seconds.\nsr (int): The sampling rate to use for time-to-sample conversion.\n\nReturns:\ntuple: A tuple containing the start and end (exclusive) frame indices.", "source": "codesearchnet"}
{"code": "def canonicalize(self, namespace_targets: Mapping[(str, List[str])]=None) -> 'BEL':\n    if (not self.ast):\n        return self\n    if (not self.ast.collected_nsarg_norms):\n        self = self.collect_nsarg_norms()\n    self.ast.canonicalize()\n    return self", "docstring": "Takes an AST and returns a canonicalized BEL statement string.\n\nArgs:\nnamespace_targets (Mapping[str, List[str]]): override default canonicalization\nsettings of BEL.bio API api_url - see {api_url}/status to get default canonicalization settings\n\nReturns:\nBEL: returns self", "source": "codesearchnet"}
{"code": "def mod(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return Mod().symbolic_call(x1, x2)\n    return backend.numpy.mod(x1, x2)", "docstring": "Returns the element-wise remainder of division.\n\nArgs:\nx1: First tensor.\nx2: Second tensor.\n\nReturns:\nOutput tensor, element-wise remainder of division.", "source": "github-repos"}
{"code": "async def snap(self, user=None, view=None):\n    if (view is None):\n        view = self.view\n    if (user is None):\n        user = self.auth.getUserByName('root')\n    snap = (await view.snap(user))\n    return snap", "docstring": "Return a transaction object for the default view.\n\nArgs:\nwrite (bool): Set to True for a write transaction.\n\nReturns:\n(synapse.lib.snap.Snap)\n\nNOTE: This must be used in a with block.", "source": "codesearchnet"}
{"code": "def set_json(self, obj, status=HttpStatusCodes.HTTP_200):\n        \n        obj = json.dumps(obj, sort_keys=True, default=lambda x: str(x))\n        self.set_status(status)\n        self.set_header(HttpResponseHeaders.CONTENT_TYPE, 'application/json')\n        self.set_content(obj)", "docstring": "Helper method to set a JSON response.\n\nArgs:\nobj (:obj:`object`): JSON serializable object\nstatus (:obj:`str`, optional): Status code of the response", "source": "juraj-google-style"}
{"code": "def placeOrder(self, contract: Contract, order: Order) -> Trade:\n    orderId = (order.orderId or self.client.getReqId())\n    self.client.placeOrder(orderId, contract, order)\n    now = datetime.datetime.now(datetime.timezone.utc)\n    key = self.wrapper.orderKey(self.wrapper.clientId, orderId, order.permId)\n    trade = self.wrapper.trades.get(key)\n    if trade:\n        assert (trade.orderStatus.status not in OrderStatus.DoneStates)\n        logEntry = TradeLogEntry(now, trade.orderStatus.status, 'Modify')\n        trade.log.append(logEntry)\n        self._logger.info(f'placeOrder: Modify order {trade}')\n        trade.modifyEvent.emit(trade)\n        self.orderModifyEvent.emit(trade)\n    else:\n        order.clientId = self.wrapper.clientId\n        order.orderId = orderId\n        orderStatus = OrderStatus(status=OrderStatus.PendingSubmit)\n        logEntry = TradeLogEntry(now, orderStatus.status, '')\n        trade = Trade(contract, order, orderStatus, [], [logEntry])\n        self.wrapper.trades[key] = trade\n        self._logger.info(f'placeOrder: New order {trade}')\n        self.newOrderEvent.emit(trade)\n    return trade", "docstring": "Place a new order or modify an existing order.\nReturns a Trade that is kept live updated with\nstatus changes, fills, etc.\n\nArgs:\ncontract: Contract to use for order.\norder: The order to be placed.", "source": "codesearchnet"}
{"code": "def _ParseLastRunTime(self, parser_mediator, fixed_length_section):\n    \n    systemtime_struct = fixed_length_section.last_run_time\n    system_time_tuple = (\n        systemtime_struct.year, systemtime_struct.month,\n        systemtime_struct.weekday, systemtime_struct.day_of_month,\n        systemtime_struct.hours, systemtime_struct.minutes,\n        systemtime_struct.seconds, systemtime_struct.milliseconds)\n\n    date_time = None\n    if system_time_tuple != self._EMPTY_SYSTEM_TIME_TUPLE:\n      try:\n        date_time = dfdatetime_systemtime.Systemtime(\n            system_time_tuple=system_time_tuple)\n      except ValueError:\n        parser_mediator.ProduceExtractionWarning(\n            'invalid last run time: {0!s}'.format(system_time_tuple))\n\n    return date_time", "docstring": "Parses the last run time from a fixed-length data section.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfixed_length_section (job_fixed_length_data_section): a Windows\nScheduled Task job fixed-length data section.\n\nReturns:\ndfdatetime.DateTimeValues: last run date and time or None if not\navailable.", "source": "juraj-google-style"}
{"code": "def add_notification_listener(self, notification_type, notification_callback):\n    if (notification_type not in self.notifications):\n        self.notifications[notification_type] = [(self.notification_id, notification_callback)]\n    else:\n        if (reduce((lambda a, b: (a + 1)), filter((lambda tup: (tup[1] == notification_callback)), self.notifications[notification_type]), 0) > 0):\n            return (- 1)\n        self.notifications[notification_type].append((self.notification_id, notification_callback))\n    ret_val = self.notification_id\n    self.notification_id += 1\n    return ret_val", "docstring": "Add a notification callback to the notification center.\n\nArgs:\nnotification_type: A string representing the notification type from .helpers.enums.NotificationTypes\nnotification_callback: closure of function to call when event is triggered.\n\nReturns:\nInteger notification id used to remove the notification or -1 if the notification has already been added.", "source": "codesearchnet"}
{"code": "def list_permissions(self, group_name=None, resource=None):\n        \n        self.project_service.set_auth(self._token_project)\n        return self.project_service.list_permissions(group_name, resource)", "docstring": "List permission sets associated filtering by group and/or resource.\n\nArgs:\ngroup_name (string): Name of group.\nresource (intern.resource.boss.Resource): Identifies which data\nmodel object to operate on.\n\nReturns:\n(list): List of permissions.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def _Upgrade2To3(self, data):\n    buffers = [{'data': []}]\n    for subgraph in data['subgraphs']:\n        if 'tensors' not in subgraph:\n            continue\n        for tensor in subgraph['tensors']:\n            if 'data_buffer' not in tensor:\n                tensor['buffer'] = 0\n            else:\n                if tensor['data_buffer']:\n                    tensor[u'buffer'] = len(buffers)\n                    buffers.append({'data': tensor['data_buffer']})\n                else:\n                    tensor['buffer'] = 0\n                del tensor['data_buffer']\n    data['buffers'] = buffers", "docstring": "Upgrade data from Version 2 to Version 3.\n\nChanged actual read-only tensor data to be in a buffers table instead\nof inline with the tensor.\n\nArgs:\ndata: Dictionary representing the TensorFlow lite data to be upgraded.\nThis will be modified in-place to be an upgraded version.", "source": "github-repos"}
{"code": "def resume(self, email, master_token, state=None, sync=True):\n        \n        auth = APIAuth(self.OAUTH_SCOPES)\n\n        ret = auth.load(email, master_token, android_id=get_mac())\n        if ret:\n            self.load(auth, state, sync)\n\n        return ret", "docstring": "Authenticate to Google with the provided master token & sync.\n\nArgs:\nemail (str): The account to use.\nmaster_token (str): The master token.\nstate (dict): Serialized state to load.\n\nRaises:\nLoginException: If there was a problem logging in.", "source": "juraj-google-style"}
{"code": "class Embedding:\n    dense_embedding: Optional[List[float]] = None\n    sparse_embedding: Optional[Tuple[List[int], List[float]]] = None", "docstring": "Represents vector embeddings.\n\nArgs:\ndense_embedding: Dense vector representation\nsparse_embedding: Optional sparse vector representation for hybrid\nsearch", "source": "github-repos"}
{"code": "def VisitFunction(self, f):\n    signatures = tuple((ex for s in f.signatures for ex in ExpandSignature(s)))\n    return f.Replace(signatures=signatures)", "docstring": "Rebuild the function with the new signatures.\n\nThis is called after its children (i.e. when VisitSignature has already\nconverted each signature into a list) and rebuilds the function using the\nnew signatures.\n\nArguments:\nf: A pytd.Function instance.\n\nReturns:\nFunction with the new signatures.", "source": "github-repos"}
{"code": "def __init__(self, channel):\n    self.NewSession = channel.unary_unary('/tensorflow.ProfileAnalysis/NewSession', request_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.NewProfileSessionRequest.SerializeToString, response_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.NewProfileSessionResponse.FromString)\n    self.EnumSessions = channel.unary_unary('/tensorflow.ProfileAnalysis/EnumSessions', request_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.EnumProfileSessionsAndToolsRequest.SerializeToString, response_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.EnumProfileSessionsAndToolsResponse.FromString)\n    self.GetSessionToolData = channel.unary_unary('/tensorflow.ProfileAnalysis/GetSessionToolData', request_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.ProfileSessionDataRequest.SerializeToString, response_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.ProfileSessionDataResponse.FromString)", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "github-repos"}
{"code": "def traverse_inorder(self, leaves=True, internal=True):\n    c = self\n    s = deque()\n    done = False\n    while (not done):\n        if (c is None):\n            if (len(s) == 0):\n                done = True\n            else:\n                c = s.pop()\n                if ((leaves and c.is_leaf()) or (internal and (not c.is_leaf()))):\n                    (yield c)\n                if (len(c.children) == 0):\n                    c = None\n                elif (len(c.children) == 2):\n                    c = c.children[1]\n                else:\n                    raise RuntimeError(INORDER_NONBINARY)\n        else:\n            s.append(c)\n            if (len(c.children) == 0):\n                c = None\n            elif (len(c.children) == 2):\n                c = c.children[0]\n            else:\n                raise RuntimeError(INORDER_NONBINARY)", "docstring": "Perform an inorder traversal starting at this ``Node`` object\n\nArgs:\n``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``", "source": "codesearchnet"}
{"code": "def find_in_mailbox(cls, session, mailbox_or_id):\n        \n        if hasattr(mailbox_or_id, 'id'):\n            mailbox_or_id = mailbox_or_id.id\n        return cls(\n            '/mailboxes/%d/users.json' % mailbox_or_id,\n            session=session,\n        )", "docstring": "Get the users that are associated to a Mailbox.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nmailbox_or_id (MailboxRef or int): Mailbox of the ID of the\nmailbox to get the folders for.\n\nReturns:\nRequestPaginator(output_type=helpscout.models.User): Users\niterator.", "source": "juraj-google-style"}
{"code": "def make_df_from_batch(batch_name, batch_col=\"b01\", reader=None, reader_label=None):\n    \n\n    batch_name = batch_name\n    batch_col = batch_col\n    logger.debug(f\"batch_name, batch_col: {batch_name}, {batch_col}\")\n    if reader is None:\n        reader_obj = get_db_reader(reader_label)\n        reader = reader_obj()\n\n    srnos = reader.select_batch(batch_name, batch_col)\n    logger.debug(\"srnos:\" + str(srnos))\n    info_dict = _create_info_dict(reader, srnos)\n    info_df = pd.DataFrame(info_dict)\n    info_df = info_df.sort_values([\"groups\", \"filenames\"])\n    info_df = _make_unique_groups(info_df)\n    info_df[\"labels\"] = info_df[\"filenames\"].apply(create_labels)\n    info_df.set_index(\"filenames\", inplace=True)\n\n    return info_df", "docstring": "Create a pandas DataFrame with the info needed for ``cellpy`` to load\nthe runs.\n\nArgs:\nbatch_name (str): Name of the batch.\nbatch_col (str): The column where the batch name is in the db.\nreader (method): the db-loader method.\nreader_label (str): the label for the db-loader (if db-loader method is\nnot given)\n\nReturns: info_df (pandas DataFrame)", "source": "juraj-google-style"}
{"code": "def remove_config(reset=False):\n    cmd = 'Stop-DscConfiguration'\n    log.info('DSC: Stopping Running Configuration')\n    try:\n        _pshell(cmd)\n    except CommandExecutionError as exc:\n        if (exc.info['retcode'] != 0):\n            raise CommandExecutionError('Failed to Stop DSC Configuration', info=exc.info)\n        log.info('DSC: %s', exc.info['stdout'])\n    cmd = 'Remove-DscConfigurationDocument -Stage Current, Pending, Previous -Force'\n    log.info('DSC: Removing Configuration')\n    try:\n        _pshell(cmd)\n    except CommandExecutionError as exc:\n        if (exc.info['retcode'] != 0):\n            raise CommandExecutionError('Failed to remove DSC Configuration', info=exc.info)\n        log.info('DSC: %s', exc.info['stdout'])\n    if (not reset):\n        return True\n\n    def _remove_fs_obj(path):\n        if os.path.exists(path):\n            log.info('DSC: Removing %s', path)\n            if (not __salt__['file.remove'](path)):\n                error = 'Failed to remove {0}'.format(path)\n                log.error('DSC: %s', error)\n                raise CommandExecutionError(error)\n    dsc_config_dir = '{0}\\\\System32\\\\Configuration'.format(os.getenv('SystemRoot', 'C:\\\\Windows'))\n    _remove_fs_obj('{0}\\\\DSCStatusHistory.mof'.format(dsc_config_dir))\n    _remove_fs_obj('{0}\\\\DSCEngineCache.mof'.format(dsc_config_dir))\n    _remove_fs_obj('{0}\\\\ConfigurationStatus'.format(dsc_config_dir))\n    return True", "docstring": "Remove the current DSC Configuration. Removes current, pending, and previous\ndsc configurations.\n\n.. versionadded:: 2017.7.5\n\nArgs:\nreset (bool):\nAttempts to reset the DSC configuration by removing the following\nfrom ``C:\\\\Windows\\\\System32\\\\Configuration``:\n\n- File: DSCStatusHistory.mof\n- File: DSCEngineCache.mof\n- Dir: ConfigurationStatus\n\nDefault is False\n\n.. warning::\n``remove_config`` may fail to reset the DSC environment if any\nof the files in the ``ConfigurationStatus`` directory. If you\nwait a few minutes and run again, it may complete successfully.\n\nReturns:\nbool: True if successful\n\nRaises:\nCommandExecutionError: On failure\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' dsc.remove_config True", "source": "codesearchnet"}
{"code": "def json(self, json):\n        \n        self._request.json = json\n        self.add_matcher(matcher('JSONMatcher', json))", "docstring": "Defines the JSON body to match.\n\n``json`` argument can be an JSON string, a JSON serializable\nPython structure, such as a ``dict`` or ``list`` or it can be\na regular expression used to match the body.\n\nArguments:\njson (str|dict|list|regex): body JSON to match.\n\nReturns:\nself: current Mock instance.", "source": "juraj-google-style"}
{"code": "def get_gpus(num_gpu=1, worker_index=-1):\n  \n  \n  list_gpus = subprocess.check_output([\"nvidia-smi\", \"--list-gpus\"]).decode()\n  logging.debug(\"all GPUs:\\n{0}\".format(list_gpus))\n\n  \n  gpus = [x for x in list_gpus.split('\\n') if len(x) > 0]\n\n  def parse_gpu(gpu_str):\n    cols = gpu_str.split(' ')\n    return cols[5].split(')')[0], cols[1].split(':')[0]\n  gpu_list = [parse_gpu(gpu) for gpu in gpus]\n\n  free_gpus = []\n  retries = 0\n  while len(free_gpus) < num_gpu and retries < MAX_RETRIES:\n    smi_output = subprocess.check_output([\"nvidia-smi\", \"--format=csv,noheader,nounits\", \"--query-compute-apps=gpu_uuid\"]).decode()\n    logging.debug(\"busy GPUs:\\n{0}\".format(smi_output))\n    busy_uuids = [x for x in smi_output.split('\\n') if len(x) > 0]\n    for uuid, index in gpu_list:\n      if uuid not in busy_uuids:\n        free_gpus.append(index)\n\n    if len(free_gpus) < num_gpu:\n      logging.warn(\"Unable to find available GPUs: requested={0}, available={1}\".format(num_gpu, len(free_gpus)))\n      retries += 1\n      time.sleep(30 * retries)\n      free_gpus = []\n\n  logging.info(\"Available GPUs: {}\".format(free_gpus))\n\n  \n  if len(free_gpus) < num_gpu:\n    smi_output = subprocess.check_output([\"nvidia-smi\", \"--format=csv\", \"--query-compute-apps=gpu_uuid,pid,process_name,used_gpu_memory\"]).decode()\n    logging.info(\": {0}\".format(smi_output))\n    raise Exception(\"Unable to find {} free GPU(s)\\n{}\".format(num_gpu, smi_output))\n\n  \n  num_available = len(free_gpus)\n  if worker_index == -1:\n    \n    random.shuffle(free_gpus)\n    proposed_gpus = free_gpus[:num_gpu]\n  else:\n    \n    if worker_index * num_gpu + num_gpu > num_available:\n      worker_index = worker_index * num_gpu % num_available\n    proposed_gpus = free_gpus[worker_index * num_gpu:(worker_index * num_gpu + num_gpu)]\n  logging.info(\"Proposed GPUs: {}\".format(proposed_gpus))\n\n  return ','.join(str(x) for x in proposed_gpus)", "docstring": "Get list of free GPUs according to nvidia-smi.\n\nThis will retry for ``MAX_RETRIES`` times until the requested number of GPUs are available.\n\nArgs:\n:num_gpu: number of GPUs desired.\n:worker_index: index \"hint\" for allocation of available GPUs.\n\nReturns:\nComma-delimited string of GPU ids, or raises an Exception if the requested number of GPUs could not be found.", "source": "juraj-google-style"}
{"code": "def prettyprint_cfg_tree(root, decorate_after_node=0, full=False, forward=False):\n    if forward:\n        children = lambda node: node.outgoing\n    else:\n        children = lambda node: node.incoming\n    desc = lambda node: prettyprint_cfg_node(node, decorate_after_node, full)\n    return ascii_tree(root, get_children=children, get_description=desc)", "docstring": "Pretty print a cfg tree with the bindings at each node.\n\nArgs:\nroot: The root node.\ndecorate_after_node: Don't print bindings unless node_id > this.\nfull: Print the full string representation of a binding's data\nforward: Traverse the tree forwards if true.\n\nReturns:\nA prettyprinted tree.", "source": "github-repos"}
{"code": "def _convert_bytes_to_cc_source(data, array_name, max_line_width=80, include_guard=None, include_path=None, use_tensorflow_license=False):\n    starting_pad = '   '\n    array_lines = []\n    array_line = starting_pad\n    for value in bytearray(data):\n        if len(array_line) + 4 > max_line_width:\n            array_lines.append(array_line + '\\n')\n            array_line = starting_pad\n        array_line += ' 0x%02x,' % value\n    if len(array_line) > len(starting_pad):\n        array_lines.append(array_line + '\\n')\n    array_values = ''.join(array_lines)\n    if include_guard is None:\n        include_guard = 'TENSORFLOW_LITE_UTIL_' + array_name.upper() + '_DATA_H_'\n    if include_path is not None:\n        include_line = '\n    else:\n        include_line = ''\n    if use_tensorflow_license:\n        license_text = '\\n/* Copyright {year} The TensorFlow Authors. All Rights Reserved.\\n\\nLicensed under the Apache License, Version 2.0 (the \"License\");\\nyou may not use this file except in compliance with the License.\\nYou may obtain a copy of the License at\\n\\n    http:\n    else:\n        license_text = ''\n    source_template = '{license_text}\\n\n    source_text = source_template.format(array_name=array_name, array_length=len(data), array_values=array_values, license_text=license_text, include_line=include_line)\n    header_template = '\\n{license_text}\\n\\n\n    header_text = header_template.format(array_name=array_name, include_guard=include_guard, license_text=license_text)\n    return (source_text, header_text)", "docstring": "Returns strings representing a C++ constant array containing `data`.\n\nArgs:\ndata: Byte array that will be converted into a C++ constant.\narray_name: String to use as the variable name for the constant array.\nmax_line_width: The longest line length, for formatting purposes.\ninclude_guard: Name to use for the include guard macro definition.\ninclude_path: Optional path to include in the source file.\nuse_tensorflow_license: Whether to include the standard TensorFlow Apache2\nlicense in the generated files.\n\nReturns:\nText that can be compiled as a C++ source file to link in the data as a\nliteral array of values.\nText that can be used as a C++ header file to reference the literal array.", "source": "github-repos"}
{"code": "def create_from_binary(cls, binary_view):\n        \n        nw_obj = cls()\n        offset = 0\n        previous_dr_offset = 0\n        header_size = cls._INFO.size \n\n        while binary_view[offset] != 0:   \n            header = cls._INFO.unpack(binary_view[offset:offset+header_size])[0]\n            length_len = header & 0x0F\n            length_offset = (header & 0xF0) >> 4\n\n            temp_len = offset+header_size+length_len \n            dr_length = int.from_bytes(binary_view[offset+header_size:temp_len], \"little\", signed=False)\n            if length_offset: \n                dr_offset = int.from_bytes(binary_view[temp_len:temp_len+length_offset], \"little\", signed=True) + previous_dr_offset\n                previous_dr_offset = dr_offset\n            else: \n                dr_offset = None\n            offset += header_size + length_len + length_offset\n            nw_obj.data_runs.append((dr_length, dr_offset))\n            \n\n        _MOD_LOGGER.debug(\"DataRuns object created successfully\")\n\n        return nw_obj", "docstring": "Creates a new object DataRuns from a binary stream. The binary\nstream can be represented by a byte string, bytearray or a memoryview of the\nbytearray.\n\nArgs:\nbinary_view (memoryview of bytearray) - A binary stream with the\ninformation of the attribute\n\nReturns:\nDataRuns: New object using hte binary stream as source", "source": "juraj-google-style"}
{"code": "def create_nsg(access_token, subscription_id, resource_group, nsg_name, location):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Network/networkSecurityGroups/', nsg_name,\n                        '?api-version=', NETWORK_API])\n    nsg_body = {'location': location}\n    body = json.dumps(nsg_body)\n    return do_put(endpoint, body, access_token)", "docstring": "Create network security group (use create_nsg_rule() to add rules to it).\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nnsg_name (str): Name of the new NSG.\nlocation (str): Azure data center location. E.g. westus.\n\nReturns:\nHTTP response. NSG JSON body.", "source": "juraj-google-style"}
{"code": "def build_frontend(self, frontend_node):\n    proxy_name = frontend_node.frontend_header.proxy_name.text\n    service_address_node = frontend_node.frontend_header.service_address\n    config_block_lines = self.__build_config_block(frontend_node.config_block)\n    (host, port) = ('', '')\n    if isinstance(service_address_node, pegnode.ServiceAddress):\n        host = service_address_node.host.text\n        port = service_address_node.port.text\n    else:\n        for line in config_block_lines:\n            if isinstance(line, config.Bind):\n                (host, port) = (line.host, line.port)\n                break\n        else:\n            raise Exception('Not specify host and port in `frontend` definition')\n    return config.Frontend(name=proxy_name, host=host, port=port, config_block=config_block_lines)", "docstring": "parse `frontend` sections, and return a config.Frontend\n\nArgs:\nfrontend_node (TreeNode): Description\n\nRaises:\nException: Description\n\nReturns:\nconfig.Frontend: an object", "source": "codesearchnet"}
{"code": "def compose(*parameter_functions):\n  \n  def composed_fn(var_name, variable, phase):\n    for fn in parameter_functions:\n      variable = fn(var_name, variable, phase)\n    return variable\n  return composed_fn", "docstring": "Composes multiple modification functions in order.\n\nArgs:\n*parameter_functions: The functions to compose.\n\nReturns:\nA parameter modification function that consists of applying all the provided\nfunctions.", "source": "juraj-google-style"}
{"code": "def get_import(self, file_prefixes_to_strip: Sequence[str], module_prefix: str, use_lazy_loading: bool) -> str:\n    module_import_path = _get_import_path(self.exported_symbol.file_name, file_prefixes_to_strip, module_prefix)\n    alias = ''\n    symbol_name = self.exported_symbol.symbol_name\n    if self.name != symbol_name:\n        alias = f' as {self.name}'\n    if not use_lazy_loading:\n        return f'from {module_import_path} import {symbol_name}{alias} \n    else:\n        return f\"  '{self.name}': ('{module_import_path}', '{symbol_name}'),", "docstring": "Returns the import statement for this entrypoint.\n\nArgs:\nfile_prefixes_to_strip: List of prefixes to strip from the file name.\nmodule_prefix: A prefix to add to the import.\nuse_lazy_loading: Whether to use lazy loading or not.", "source": "github-repos"}
{"code": "def extract_issuer_ca_cert_url(cert_obj):\n    for extension in cert_obj.extensions:\n        if (extension.oid.dotted_string == AUTHORITY_INFO_ACCESS_OID):\n            authority_info_access = extension.value\n            for access_description in authority_info_access:\n                if (access_description.access_method.dotted_string == CA_ISSUERS_OID):\n                    return access_description.access_location.value", "docstring": "Extract issuer CA certificate URL from certificate.\n\nCertificates may include a URL where the root certificate for the CA which was used\nfor signing the certificate can be downloaded. This function returns the URL if\npresent.\n\nThe primary use for this is to fix validation failure due to non-trusted issuer by\ndownloading the root CA certificate from the URL and installing it in the local\ntrust store.\n\nArgs:\ncert_obj: cryptography.Certificate\n\nReturns:\nstr: Issuer certificate URL if present, else None", "source": "codesearchnet"}
{"code": "def WriteEvent(self, event):\n    self.WriteEventStart()\n    try:\n        self.WriteEventBody(event)\n    except errors.NoFormatterFound as exception:\n        error_message = 'unable to retrieve formatter with error: {0!s}'.format(exception)\n        self._ReportEventError(event, error_message)\n    except errors.WrongFormatter as exception:\n        error_message = 'wrong formatter with error: {0!s}'.format(exception)\n        self._ReportEventError(event, error_message)\n    self.WriteEventEnd()", "docstring": "Writes the event to the output.\n\nArgs:\nevent (EventObject): event.", "source": "codesearchnet"}
{"code": "def parse_float(value: Any) -> Numeric:\n    return float(value)", "docstring": "Attempts to parse a valid floating point value from the provided value.\n\nArgs:\n* value: of Any type\n\nReturns:\n* float value: if valid\n\nRaises:\n* ValueError: if parsing failed", "source": "github-repos"}
{"code": "def time_to_readable_str(value_us, force_time_unit=None):\n    if not value_us:\n        return '0'\n    if force_time_unit:\n        if force_time_unit not in TIME_UNITS:\n            raise ValueError('Invalid time unit: %s' % force_time_unit)\n        order = TIME_UNITS.index(force_time_unit)\n        time_unit = force_time_unit\n        return '{:.10g}{}'.format(value_us / math.pow(10.0, 3 * order), time_unit)\n    else:\n        order = min(len(TIME_UNITS) - 1, int(math.log(value_us, 10) / 3))\n        time_unit = TIME_UNITS[order]\n        return '{:.3g}{}'.format(value_us / math.pow(10.0, 3 * order), time_unit)", "docstring": "Convert time value to human-readable string.\n\nArgs:\nvalue_us: time value in microseconds.\nforce_time_unit: force the output to use the specified time unit. Must be\nin TIME_UNITS.\n\nReturns:\nHuman-readable string representation of the time value.\n\nRaises:\nValueError: if force_time_unit value is not in TIME_UNITS.", "source": "github-repos"}
{"code": "def write_weights(file_path: str, weights: Array, features: typing.List[str]) -> None:\n    with open(file_path, 'w') as f:\n        f.write('\\n'.join(['%s\\t%.6f' % (feature, weights[i]) for i, feature in enumerate(features)]))", "docstring": "Writes learned weights and corresponsing features to a file.\n\nArgs:\nfile_path: A file path for the weights file.\nweights: A weight vector.\nfeatures: A list of feature identifiers.", "source": "github-repos"}
{"code": "def probe_services(self, handle, conn_id, callback):\n    self._command_task.async_command(['_probe_services', handle], callback, {'connection_id': conn_id, 'handle': handle})", "docstring": "Given a connected device, probe for its GATT services and characteristics\n\nArgs:\nhandle (int): a handle to the connection on the BLED112 dongle\nconn_id (int): a unique identifier for this connection on the DeviceManager\nthat owns this adapter.\ncallback (callable): Callback to be called when this procedure finishes", "source": "codesearchnet"}
{"code": "def AddFilesWithUnknownHashes(client_path_blob_refs, use_external_stores=True):\n    hash_id_blob_refs = dict()\n    client_path_hash_id = dict()\n    metadatas = dict()\n    all_client_path_blob_refs = list()\n    for (client_path, blob_refs) in iteritems(client_path_blob_refs):\n        if (len(blob_refs) <= 1):\n            if blob_refs:\n                hash_id = rdf_objects.SHA256HashID.FromBytes(blob_refs[0].blob_id.AsBytes())\n            else:\n                hash_id = rdf_objects.SHA256HashID.FromData(b'')\n            client_path_hash_id[client_path] = hash_id\n            hash_id_blob_refs[hash_id] = blob_refs\n            metadatas[hash_id] = FileMetadata(client_path=client_path, blob_refs=blob_refs)\n        else:\n            for blob_ref in blob_refs:\n                all_client_path_blob_refs.append((client_path, blob_ref))\n    client_path_offset = collections.defaultdict((lambda : 0))\n    client_path_sha256 = collections.defaultdict(hashlib.sha256)\n    verified_client_path_blob_refs = collections.defaultdict(list)\n    client_path_blob_ref_batches = collection.Batch(items=all_client_path_blob_refs, size=_BLOBS_READ_BATCH_SIZE)\n    for client_path_blob_ref_batch in client_path_blob_ref_batches:\n        blob_id_batch = set((blob_ref.blob_id for (_, blob_ref) in client_path_blob_ref_batch))\n        blobs = data_store.BLOBS.ReadBlobs(blob_id_batch)\n        for (client_path, blob_ref) in client_path_blob_ref_batch:\n            blob = blobs[blob_ref.blob_id]\n            if (blob is None):\n                message = 'Could not find one of referenced blobs: {}'.format(blob_ref.blob_id)\n                raise BlobNotFoundError(message)\n            offset = client_path_offset[client_path]\n            if (blob_ref.size != len(blob)):\n                raise ValueError(('Got conflicting size information for blob %s: %d vs %d.' % (blob_ref.blob_id, blob_ref.size, len(blob))))\n            if (blob_ref.offset != offset):\n                raise ValueError(('Got conflicting offset information for blob %s: %d vs %d.' % (blob_ref.blob_id, blob_ref.offset, offset)))\n            verified_client_path_blob_refs[client_path].append(blob_ref)\n            client_path_offset[client_path] = (offset + len(blob))\n            client_path_sha256[client_path].update(blob)\n    for client_path in iterkeys(client_path_sha256):\n        sha256 = client_path_sha256[client_path].digest()\n        hash_id = rdf_objects.SHA256HashID.FromBytes(sha256)\n        client_path_hash_id[client_path] = hash_id\n        hash_id_blob_refs[hash_id] = verified_client_path_blob_refs[client_path]\n    data_store.REL_DB.WriteHashBlobReferences(hash_id_blob_refs)\n    if use_external_stores:\n        for client_path in iterkeys(verified_client_path_blob_refs):\n            metadatas[client_path_hash_id[client_path]] = FileMetadata(client_path=client_path, blob_refs=verified_client_path_blob_refs[client_path])\n        EXTERNAL_FILE_STORE.AddFiles(metadatas)\n    return client_path_hash_id", "docstring": "Adds new files consisting of given blob references.\n\nArgs:\nclient_path_blob_refs: A dictionary mapping `db.ClientPath` instances to\nlists of blob references.\nuse_external_stores: A flag indicating if the files should also be added to\nexternal file stores.\n\nReturns:\nA dictionary mapping `db.ClientPath` to hash ids of the file.\n\nRaises:\nBlobNotFoundError: If one of the referenced blobs cannot be found.", "source": "codesearchnet"}
{"code": "def delete_document(project_id, knowledge_base_id, document_id):\n    import dialogflow_v2beta1 as dialogflow\n    client = dialogflow.DocumentsClient()\n    document_path = client.document_path(project_id, knowledge_base_id, document_id)\n    response = client.delete_document(document_path)\n    print('operation running:\\n {}'.format(response.operation))\n    print('Waiting for results...')\n    print('Done.\\n {}'.format(response.result()))", "docstring": "Deletes a Document.\n\nArgs:\nproject_id: The GCP project linked with the agent.\nknowledge_base_id: Id of the Knowledge base.\ndocument_id: Id of the Document.", "source": "codesearchnet"}
{"code": "def __init__(self, size=3, **kwargs):\n        \n        self.size = size\n        self.kwargs = kwargs\n        self._in_use = set()\n        self._lock = threading.Lock()", "docstring": "Initializes the pool.\n\nArgs:\nsize: size of pool (default 3)\n**kwargs: arguments for Browser(...)", "source": "juraj-google-style"}
{"code": "def __init__(self, rfile, maxlen):\n        \n        self.rfile = rfile\n        self.maxlen = maxlen\n        self.bytes_read = 0", "docstring": "Initialize SizeCheckWrapper instance.\n\nArgs:\nrfile (file): file of a limited size\nmaxlen (int): maximum length of the file being read", "source": "juraj-google-style"}
{"code": "def _plot_depth_track(self, ax, md, kind='MD'):\n    if (kind == 'MD'):\n        ax.set_yscale('bounded', vmin=md.min(), vmax=md.max())\n    elif (kind == 'TVD'):\n        tvd = self.location.md2tvd(md)\n        ax.set_yscale('piecewise', x=tvd, y=md)\n    else:\n        raise Exception('Kind must be MD or TVD')\n    for sp in ax.spines.values():\n        sp.set_color('gray')\n    if ax.is_first_col():\n        pad = (- 10)\n        ax.spines['left'].set_color('none')\n        ax.yaxis.set_ticks_position('right')\n        for label in ax.get_yticklabels():\n            label.set_horizontalalignment('right')\n    elif ax.is_last_col():\n        pad = (- 10)\n        ax.spines['right'].set_color('none')\n        ax.yaxis.set_ticks_position('left')\n        for label in ax.get_yticklabels():\n            label.set_horizontalalignment('left')\n    else:\n        pad = (- 30)\n        for label in ax.get_yticklabels():\n            label.set_horizontalalignment('center')\n    ax.tick_params(axis='y', colors='gray', labelsize=12, pad=pad)\n    ax.set_xticks([])\n    ax.set(xticks=[])\n    ax.depth_track = True\n    return ax", "docstring": "Private function. Depth track plotting.\n\nArgs:\nax (ax): A matplotlib axis.\nmd (ndarray): The measured depths of the track.\nkind (str): The kind of track to plot.\n\nReturns:\nax.", "source": "codesearchnet"}
{"code": "class QuantileThreshold(ThresholdFn):\n\n    def __init__(self, quantile: Optional[float]=0.95, quantile_tracker: Optional[QuantileTracker]=None, **kwargs):\n        super().__init__(**kwargs)\n        if quantile_tracker is not None:\n            self._tracker = quantile_tracker\n        else:\n            self._tracker = BufferedSlidingQuantileTracker(window_size=100, q=quantile)\n\n    @property\n    def is_stateful(self) -> bool:\n        \n        return True\n\n    @property\n    def threshold(self) -> float:\n        \n        return self._tracker.get()\n\n    def apply(self, score: Optional[float]) -> Optional[int]:\n        \n        if score is None:\n            self._tracker.push(float('NaN'))\n            return None\n        self._tracker.push(score)\n        if math.isnan(score):\n            return self._missing_label\n        if score < self.threshold:\n            return self._normal_label\n        return self._outlier_label", "docstring": "Applies a quantile-based dynamic threshold to anomaly scores.\n\nThis `ThresholdFn` is stateful and uses a quantile tracker to dynamically\ndetermine the threshold for anomaly detection. It estimates the specified\nquantile of the incoming anomaly scores and uses this quantile value as the\nthreshold.\n\nThe threshold adapts over time as more data is processed, making it suitable\nfor scenarios where the distribution of anomaly scores may change.\n\nArgs:\nquantile (Optional[float]): The quantile to be tracked (e.g., 0.95 for the\n95th percentile). This value determines the dynamic threshold. Defaults to\n0.95.\nquantile_tracker (Optional[BufferedQuantileTracker]): An optional\npre-initialized quantile tracker. If provided, this tracker will be used;\notherwise, a `BufferedSlidingQuantileTracker` will be created with a\ndefault window size of 100.\n**kwargs: Additional keyword arguments to be passed to the base\n`ThresholdFn` constructor.", "source": "github-repos"}
{"code": "def _ReadFloatingPointDataTypeDefinition(\n      self, definitions_registry, definition_values, definition_name,\n      is_member=False):\n    \n    return self._ReadFixedSizeDataTypeDefinition(\n        definitions_registry, definition_values,\n        data_types.FloatingPointDefinition, definition_name,\n        self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE,\n        is_member=is_member, supported_size_values=(4, 8))", "docstring": "Reads a floating-point data type definition.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\ndefinition_values (dict[str, object]): definition values.\ndefinition_name (str): name of the definition.\nis_member (Optional[bool]): True if the data type definition is a member\ndata type definition.\n\nReturns:\nFloatingPointDefinition: floating-point data type definition.", "source": "juraj-google-style"}
{"code": "def create_context(pip_version=None, python_version=None):\n    \n    \n    if pip_version:\n        pip_req = \"pip-%s\" % str(pip_version)\n    else:\n        pip_req = \"pip\"\n\n    if python_version:\n        ver = Version(str(python_version))\n        major_minor_ver = ver.trim(2)\n        py_req = \"python-%s\" % str(major_minor_ver)\n    else:\n        \n        package = get_latest_package(\"python\")\n        if package:\n            major_minor_ver = package.version.trim(2)\n        else:\n            \n            \n            major_minor_ver = '.'.join(map(str, sys.version_info[:2]))\n\n        py_req = \"python-%s\" % str(major_minor_ver)\n\n    \n    request = [pip_req, py_req]\n\n    with convert_errors(from_=(PackageFamilyNotFoundError, PackageNotFoundError),\n                        to=BuildError, msg=\"Cannot run - pip or python rez \"\n                        \"package is not present\"):\n        context = ResolvedContext(request)\n\n    \n    pip_variant = context.get_resolved_package(\"pip\")\n    pip_package = pip_variant.parent\n    print_info(\"Using %s (%s)\" % (pip_package.qualified_name, pip_variant.uri))\n\n    return context", "docstring": "Create a context containing the specific pip and python.\n\nArgs:\npip_version (str or `Version`): Version of pip to use, or latest if None.\npython_version (str or `Version`): Python version to use, or latest if\nNone.\n\nReturns:\n`ResolvedContext`: Context containing pip and python.", "source": "juraj-google-style"}
{"code": "async def update_notifications(self, on_match_open: bool = None, on_tournament_end: bool = None):\n        \n        params = {}\n        if on_match_open is not None:\n            params['notify_users_when_matches_open'] = on_match_open\n        if on_tournament_end is not None:\n            params['notify_users_when_the_tournament_ends'] = on_tournament_end\n        assert_or_raise(len(params) > 0, ValueError, 'At least one of the notifications must be given')\n        await self.update(**params)", "docstring": "update participants notifications for this tournament\n\n|methcoro|\n\nArgs:\non_match_open: Email registered Challonge participants when matches open up for them\non_tournament_end: Email registered Challonge participants the results when this tournament ends\n\nRaises:\nAPIException", "source": "juraj-google-style"}
{"code": "def roll_to_business_day(self, date_tensor, roll_convention):\n    if roll_convention == constants.BusinessDayConvention.NONE:\n        return date_tensor\n    ordinals = dt.convert_to_date_tensor(date_tensor).ordinal()\n    biz_days, is_bizday = self._to_biz_space(ordinals)\n    biz_days_rolled = self._apply_roll_biz_space(date_tensor, biz_days, is_bizday, roll_convention)\n    return dt.from_ordinals(self._from_biz_space(biz_days_rolled))", "docstring": "Rolls the given dates to business dates according to given convention.\n\nArgs:\ndate_tensor: `DateTensor` of dates to roll from.\nroll_convention: BusinessDayConvention. Determines how to roll a date that\nfalls on a holiday.\n\nReturns:\nThe resulting `DateTensor`.", "source": "github-repos"}
{"code": "async def getTempCoreCmdr(mods=None, outp=None):\n    \n    acm = genTempCoreProxy(mods)\n    prox = await acm.__aenter__()\n    cmdrcore = await CmdrCore.anit(prox, outp=outp)\n    cmdrcore.acm = acm\n    return cmdrcore", "docstring": "Get a CmdrCore instance which is backed by a temporary Cortex.\n\nArgs:\nmods (list): A list of additional CoreModules to load in the Cortex.\noutp: A output helper.  Will be used for the Cmdr instance.\n\nNotes:\nThe CmdrCore returned by this should be fini()'d to tear down the temporary Cortex.\n\nReturns:\nCmdrCore: A CmdrCore instance.", "source": "juraj-google-style"}
{"code": "def _file_io_read_test_preprocessor(test_spec: dict, expected: List[str], env: TestEnvironment):\n    if (pipeline := test_spec.get('pipeline', None)):\n        for transform in pipeline.get('transforms', []):\n            if transform.get('type', '').startswith('ReadFrom'):\n                file_name = transform['config']['path'].split('/')[-1]\n                return replace_recursive(test_spec, transform['type'], 'path', env.input_file(file_name, INPUT_FILES[file_name]))\n    return test_spec", "docstring": "This preprocessor replaces any file IO ReadFrom transform with a Create\ntransform that reads from a predefined in-memory dictionary. This allows\nthe test to verify the pipeline's correctness without relying on external\nfiles.\n\nArgs:\ntest_spec: The dictionary representation of the YAML pipeline specification.\nexpected: A list of strings representing the expected output of the\npipeline.\nenv: The TestEnvironment object providing utilities for creating temporary\nfiles.\n\nReturns:\nThe modified test_spec dictionary with ReadFrom transforms replaced.", "source": "github-repos"}
{"code": "def insert_at_frontier(self, operations: ops.OP_TREE, start: int, frontier: Dict[(ops.Qid, int)]=None) -> Dict[(ops.Qid, int)]:\n    if (frontier is None):\n        frontier = defaultdict((lambda : 0))\n    operations = tuple(ops.flatten_op_tree(operations))\n    if (not operations):\n        return frontier\n    qubits = set((q for op in operations for q in op.qubits))\n    if any(((frontier[q] > start) for q in qubits)):\n        raise ValueError('The frontier for qubits on which the operationsto insert act cannot be after start.')\n    next_moments = self.next_moments_operating_on(qubits, start)\n    (insertion_indices, _) = self._pick_inserted_ops_moment_indices(operations, start, frontier)\n    self._push_frontier(frontier, next_moments)\n    self._insert_operations(operations, insertion_indices)\n    return frontier", "docstring": "Inserts operations inline at frontier.\n\nArgs:\noperations: the operations to insert\nstart: the moment at which to start inserting the operations\nfrontier: frontier[q] is the earliest moment in which an operation\nacting on qubit q can be placed.", "source": "codesearchnet"}
{"code": "def rename(script, label='blank', layer_num=None):\n    filter_xml = ''.join(['  <filter name=\"Rename Current Mesh\">\\n', '    <Param name=\"newName\" ', 'value=\"{}\" '.format(label), 'description=\"New Label\" ', 'type=\"RichString\" ', '/>\\n', '  </filter>\\n'])\n    if isinstance(script, mlx.FilterScript):\n        if ((layer_num is None) or (layer_num == script.current_layer())):\n            util.write_filter(script, filter_xml)\n            script.layer_stack[script.current_layer()] = label\n        else:\n            cur_layer = script.current_layer()\n            change(script, layer_num)\n            util.write_filter(script, filter_xml)\n            change(script, cur_layer)\n            script.layer_stack[layer_num] = label\n    else:\n        util.write_filter(script, filter_xml)\n    return None", "docstring": "Rename layer label\n\nCan be useful for outputting mlp files, as the output file names use\nthe labels.\n\nArgs:\nscript: the mlx.FilterScript object or script filename to write\nthe filter to.\nlabel (str): new label for the mesh layer\nlayer_num (int): layer number to rename. Default is the\ncurrent layer. Not supported on the file base API.\n\nLayer stack:\nRenames a layer\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "codesearchnet"}
{"code": "def range(self, dim, data_range=True, dimension_range=True):\n        \n        iskdim = self.get_dimension(dim) not in self.vdims\n        return super(StatisticsElement, self).range(dim, iskdim, dimension_range)", "docstring": "Return the lower and upper bounds of values along dimension.\n\nArgs:\ndimension: The dimension to compute the range on.\ndata_range (bool): Compute range from data values\ndimension_range (bool): Include Dimension ranges\nWhether to include Dimension range and soft_range\nin range calculation\n\nReturns:\nTuple containing the lower and upper bound", "source": "juraj-google-style"}
{"code": "def _read_range(self, start, end=0):\n        \n        if start >= self._size:\n            \n            return bytes()\n\n        \n        with _handle_oss_error():\n            response = self._bucket.get_object(key=self._key, headers=dict(\n                Range=self._http_range(\n                    \n                    start, end if end <= self._size else self._size)))\n\n        \n        return response.read()", "docstring": "Read a range of bytes in stream.\n\nArgs:\nstart (int): Start stream position.\nend (int): End stream position.\n0 To not specify end.\n\nReturns:\nbytes: number of bytes read", "source": "juraj-google-style"}
{"code": "def add(self, username, user_api, filename=None):\n        \n        keys = API.__get_keys(filename)\n        user = user_api.find(username)[0]\n        distinguished_name = user.entry_dn\n        if 'ldapPublicKey' not in user.objectClass:\n            raise ldap3.core.exceptions.LDAPNoSuchAttributeResult(\n                'LDAP Public Key Object Class not found. ' +\n                'Please ensure user was created correctly.')\n        else:\n            for key in list(set(keys)):  \n                print(key)\n                try:\n                    SSHKey(key).parse()\n                except Exception as err:\n                    raise err from None\n                else:\n                    operation = {'sshPublicKey': [(ldap3.MODIFY_ADD, [key])]}\n                    self.client.modify(distinguished_name, operation)", "docstring": "Add SSH public key to a user's profile.\n\nArgs:\nusername: Username to attach SSH public key to\nfilename: Filename containing keys to add (optional)\n\nRaises:\nldap3.core.exceptions.LDAPNoSuchAttributeResult:\nldapPublicKey isn't attached to objectClass", "source": "juraj-google-style"}
{"code": "def conversations_invite(\n        self, *, channel: str, users: List[str], **kwargs\n    ) -> SlackResponse:\n        \n        self._validate_xoxp_token()\n        kwargs.update({\"channel\": channel, \"users\": users})\n        return self.api_call(\"conversations.invite\", json=kwargs)", "docstring": "Invites users to a channel.\n\nArgs:\nchannel (str): The channel id. e.g. 'C1234567890'\nusers (list): An list of user id's to invite. e.g. ['U2345678901', 'U3456789012']", "source": "juraj-google-style"}
{"code": "def set_weather_from_metar(\n        metar: typing.Union[Metar.Metar, str],\n        in_file: typing.Union[str, Path],\n        out_file: typing.Union[str, Path] = None\n) -> typing.Tuple[typing.Union[str, None], typing.Union[str, None]]:\n    \n    error, metar = custom_metar.CustomMetar.get_metar(metar)\n\n    if error:\n        return error, None\n\n    if metar:\n        LOGGER.debug('METAR: %s', metar.code)\n\n    in_file = elib.path.ensure_file(in_file)\n\n    if out_file is None:\n        out_file = in_file\n    else:\n        out_file = elib.path.ensure_file(out_file, must_exist=False)\n    LOGGER.debug('applying metar: %s -> %s', in_file, out_file)\n\n    try:\n        LOGGER.debug('building MissionWeather')\n        _mission_weather = mission_weather.MissionWeather(metar)\n\n        with Miz(str(in_file)) as miz:\n            _mission_weather.apply_to_miz(miz)\n            miz.zip(str(out_file))\n            return None, f'successfully applied METAR to {in_file}'\n\n    except ValueError:\n        error = f'Unable to apply METAR string to the mission.\\n' \\\n                f'This is most likely due to a freak value, this feature is still experimental.\\n' \\\n                f'I will fix it ASAP !'\n        return error, None", "docstring": "Applies the weather from a METAR object to a MIZ file\n\nArgs:\nmetar: metar object\nin_file: path to MIZ file\nout_file: path to output MIZ file (will default to in_file)\n\nReturns: tuple of error, success", "source": "juraj-google-style"}
{"code": "def split(self, file):\n        \n        with open(file, 'rb') as f:\n            for record in sagemaker.amazon.common.read_recordio(f):\n                yield record", "docstring": "Split a file into records using a specific strategy\n\nThis RecordIOSplitter splits the data into individual RecordIO records.\n\nArgs:\nfile (str): path to the file to split\n\nReturns: generator for the individual records that were split from the file", "source": "juraj-google-style"}
{"code": "def siblings(self, as_resources=False):\n\n\t\t\n\n\t\tsiblings = set()\n\n\t\t\n\t\tfor parent in self.parents(as_resources=True):\n\t\t\tfor sibling in parent.children(as_resources=as_resources):\n\t\t\t\tsiblings.add(sibling)\n\n\t\t\n\t\tif as_resources:\n\t\t\tsiblings.remove(self)\n\t\tif not as_resources:\n\t\t\tsiblings.remove(self.uri)\n\n\t\treturn list(siblings)", "docstring": "method to return hierarchical siblings of this resource.\n\nArgs:\nas_resources (bool): if True, opens each as appropriate resource type instead of return URI only\n\nReturns:\n(list): list of resources", "source": "juraj-google-style"}
{"code": "class InstructBlipForConditionalGenerationModelOutput(ModelOutput):\n    loss: Optional[Tuple[torch.FloatTensor]] = None\n    logits: Optional[Tuple[torch.FloatTensor]] = None\n    vision_outputs: Optional[torch.FloatTensor] = None\n    qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None\n    language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None\n\n    def to_tuple(self) -> Tuple[Any]:\n        return tuple((self[k] if k not in ['vision_outputs', 'qformer_outputs', 'language_model_outputs'] else getattr(self, k).to_tuple() for k in self.keys()))", "docstring": "Class defining the outputs of [`InstructBlipForConditionalGeneration`].\n\nArgs:\nloss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):\nLanguage modeling loss from the language model.\nlogits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\nPrediction scores of the language modeling head of the language model.\nvision_outputs (`BaseModelOutputWithPooling`):\nOutputs of the vision encoder.\nqformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):\nOutputs of the Q-Former (Querying Transformer).\nlanguage_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):\nOutputs of the language model.", "source": "github-repos"}
{"code": "def from_serializable(cls, obj):\n    if (obj.get('version', {'bqm_schema': '1.0.0'})['bqm_schema'] != '2.0.0'):\n        return cls._from_serializable_v1(obj)\n    variables = [(tuple(v) if isinstance(v, list) else v) for v in obj['variable_labels']]\n    if obj['use_bytes']:\n        ldata = bytes2array(obj['linear_biases'])\n        qdata = bytes2array(obj['quadratic_biases'])\n        irow = bytes2array(obj['quadratic_head'])\n        icol = bytes2array(obj['quadratic_tail'])\n    else:\n        ldata = obj['linear_biases']\n        qdata = obj['quadratic_biases']\n        irow = obj['quadratic_head']\n        icol = obj['quadratic_tail']\n    offset = obj['offset']\n    vartype = obj['variable_type']\n    bqm = cls.from_numpy_vectors(ldata, (irow, icol, qdata), offset, str(vartype), variable_order=variables)\n    bqm.info.update(obj['info'])\n    return bqm", "docstring": "Deserialize a binary quadratic model.\n\nArgs:\nobj (dict):\nA binary quadratic model serialized by :meth:`~.BinaryQuadraticModel.to_serializable`.\n\nReturns:\n:obj:`.BinaryQuadraticModel`\n\nExamples:\n\nEncode and decode using JSON\n\n>>> import dimod\n>>> import json\n...\n>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)\n>>> s = json.dumps(bqm.to_serializable())\n>>> new_bqm = dimod.BinaryQuadraticModel.from_serializable(json.loads(s))\n\nSee also:\n:meth:`~.BinaryQuadraticModel.to_serializable`\n\n:func:`json.loads`, :func:`json.load` JSON deserialization functions", "source": "codesearchnet"}
{"code": "def _parse_string(self, xml):\n    if (not isinstance(xml, HTMLElement)):\n        xml = dhtmlparser.parseString(str(xml))\n    record = xml.find('record')\n    if (not record):\n        raise ValueError('There is no <record> in your MARC XML document!')\n    record = record[0]\n    self.oai_marc = (len(record.find('oai_marc')) > 0)\n    if (not self.oai_marc):\n        leader = record.find('leader')\n        if (len(leader) >= 1):\n            self.leader = leader[0].getContent()\n    if self.oai_marc:\n        self._parse_control_fields(record.find('fixfield'), 'id')\n        self._parse_data_fields(record.find('varfield'), 'id', 'label')\n    else:\n        self._parse_control_fields(record.find('controlfield'), 'tag')\n        self._parse_data_fields(record.find('datafield'), 'tag', 'code')\n    if (self.oai_marc and ('LDR' in self.controlfields)):\n        self.leader = self.controlfields['LDR']", "docstring": "Parse MARC XML document to dicts, which are contained in\nself.controlfields and self.datafields.\n\nArgs:\nxml (str or HTMLElement): input data\n\nAlso detect if this is oai marc format or not (see elf.oai_marc).", "source": "codesearchnet"}
{"code": "def column_type(self, agencyId: int, advertiserId: int, column: str) -> str:\n    if column not in self.columns:\n        for saved_column in API_SearchAds(self.config, self.auth, iterate=True).savedcolumns().list(agencyId=agencyId, advertiserId=advertiserId).execute():\n            self.columns[saved_column['savedColumnName']] = SA_TYPES.get(saved_column['type'], 'STRING')\n    return self.columns.get(column, 'STRING')", "docstring": "Return the column type for the given column name.\n\nIntended mostly as an internl helper function but left open for convenience.\nLeverages both saved columns and standard columns.\nDoes not distinguish saved from standard, will this be a problem?\n\nArgs:\nagencyId - required only for saved columns, usually derived from report\nadvertiserid - required only for saved columns, usually derived from report\n\nReturns:\nColumn type as defnined by BigQuery. Defaults to STRING if not found.", "source": "github-repos"}
{"code": "def force_in_A_to_force_in_B(force_A, torque_A, pose_A_in_B):\n    \n    pos_A_in_B = pose_A_in_B[:3, 3]\n    rot_A_in_B = pose_A_in_B[:3, :3]\n    skew_symm = _skew_symmetric_translation(pos_A_in_B)\n    force_B = rot_A_in_B.T.dot(force_A)\n    torque_B = -rot_A_in_B.T.dot(skew_symm.dot(force_A)) + rot_A_in_B.T.dot(torque_A)\n    return force_B, torque_B", "docstring": "Converts linear and rotational force at a point in frame A to the equivalent in frame B.\n\nArgs:\nforce_A: 3-dim iterable for linear force in A\ntorque_A: 3-dim iterable for rotational force (moment) in A\npose_A_in_B: numpy array of shape (4,4) corresponding to the pose of A in frame B\n\nReturns:\nforce_B, torque_B: two numpy arrays of shape (3,) for the forces in B", "source": "juraj-google-style"}
{"code": "def member_del(self, member_id, reconfig=True):\n        \n        server_id = self._servers.host_to_server_id(\n            self.member_id_to_host(member_id))\n        if reconfig and member_id in [member['_id'] for member in self.members()]:\n            config = self.config\n            config['members'].pop(member_id)\n            self.repl_update(config)\n        self._servers.remove(server_id)\n        return True", "docstring": "remove member from replica set\nArgs:\nmember_id - member index\nreconfig - is need reconfig replica\n\nreturn True if operation success otherwise False", "source": "juraj-google-style"}
{"code": "def verify_tree_consistency(self, old_tree_size: int, new_tree_size: int, old_root: bytes, new_root: bytes, proof: Sequence[bytes]):\n    old_size = old_tree_size\n    new_size = new_tree_size\n    if ((old_size < 0) or (new_size < 0)):\n        raise ValueError('Negative tree size')\n    if (old_size > new_size):\n        raise ValueError(('Older tree has bigger size (%d vs %d), did you supply inputs in the wrong order?' % (old_size, new_size)))\n    if (old_size == new_size):\n        if (old_root == new_root):\n            if proof:\n                logging.debug('Trees are identical, ignoring proof')\n            return True\n        else:\n            raise error.ConsistencyError('Inconsistency: different root hashes for the same tree size')\n    if (old_size == 0):\n        if proof:\n            logging.debug('Ignoring non-empty consistency proof for empty tree.')\n        return True\n    node = (old_size - 1)\n    last_node = (new_size - 1)\n    while (node % 2):\n        node \n        last_node \n    p = iter(proof)\n    try:\n        if node:\n            new_hash = old_hash = next(p)\n        else:\n            new_hash = old_hash = old_root\n        while node:\n            if (node % 2):\n                next_node = next(p)\n                old_hash = self.hasher.hash_children(next_node, old_hash)\n                new_hash = self.hasher.hash_children(next_node, new_hash)\n            elif (node < last_node):\n                new_hash = self.hasher.hash_children(new_hash, next(p))\n            node \n            last_node \n        while last_node:\n            n = next(p)\n            new_hash = self.hasher.hash_children(new_hash, n)\n            last_node \n        if (new_hash != new_root):\n            raise error.ProofError(('Bad Merkle proof: second root hash does not match. Expected hash: %s , computed hash: %s' % (hexlify(new_root).strip(), hexlify(new_hash).strip())))\n        elif (old_hash != old_root):\n            raise error.ConsistencyError(('Inconsistency: first root hash does not match. Expected hash: %s, computed hash: %s' % (hexlify(old_root).strip(), hexlify(old_hash).strip())))\n    except StopIteration:\n        raise error.ProofError('Merkle proof is too short')\n    try:\n        next(p)\n    except StopIteration:\n        pass\n    else:\n        logging.debug('Proof has extra nodes')\n    return True", "docstring": "Verify the consistency between two root hashes.\n\nold_tree_size must be <= new_tree_size.\n\nArgs:\nold_tree_size: size of the older tree.\nnew_tree_size: size of the newer_tree.\nold_root: the root hash of the older tree.\nnew_root: the root hash of the newer tree.\nproof: the consistency proof.\n\nReturns:\nTrue. The return value is enforced by a decorator and need not be\nchecked by the caller.\n\nRaises:\nConsistencyError: the proof indicates an inconsistency\n(this is usually really serious!).\nProofError: the proof is invalid.\nValueError: supplied tree sizes are invalid.", "source": "codesearchnet"}
{"code": "def _ParseFilterOptions(self, options):\n    \n    names = ['artifact_filters', 'date_filters', 'filter_file']\n    helpers_manager.ArgumentHelperManager.ParseOptions(\n        options, self, names=names)\n\n    extensions_string = self.ParseStringOption(options, 'extensions_string')\n    self._ParseExtensionsString(extensions_string)\n\n    names_string = getattr(options, 'names_string', None)\n    self._ParseNamesString(names_string)\n\n    signature_identifiers = getattr(options, 'signature_identifiers', None)\n    try:\n      self._ParseSignatureIdentifiers(\n          self._data_location, signature_identifiers)\n    except (IOError, ValueError) as exception:\n      raise errors.BadConfigOption(exception)\n\n    if self._artifact_filters or self._filter_file:\n      self.has_filters = True\n    else:\n      self.has_filters = self._filter_collection.HasFilters()", "docstring": "Parses the filter options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "juraj-google-style"}
{"code": "def __init__(self, xid=None, flags=ConfigFlag.OFPC_FRAG_NORMAL,\n                 miss_send_len=ControllerMaxLen.OFPCML_NO_BUFFER):\n        \n        super().__init__(xid)\n        self.flags = flags\n        self.miss_send_len = miss_send_len", "docstring": "Create a SwitchConfig with the optional parameters below.\n\nArgs:\nxid (int): xid to be used on the message header.\nflags (ConfigFlag): OFPC_* flags.\nmiss_send_len (int): UBInt16 max bytes of new flow that the\ndatapath should send to the controller.", "source": "juraj-google-style"}
{"code": "def __init__(self, root):\n    self._root_ref = root if isinstance(root, weakref.ref) else weakref.ref(root)", "docstring": "Configure the trackable view.\n\nArgs:\nroot: A `Trackable` object whose variables (including the variables of\ndependencies, recursively) should be saved. May be a weak reference.", "source": "github-repos"}
{"code": "def __call__(self, shape, dtype=None, **kwargs):\n    _validate_kwargs(self.__class__.__name__, kwargs)\n    dtype = _assert_float_dtype(_get_dtype(dtype))\n    scale = self.scale\n    fan_in, fan_out = _compute_fans(shape)\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    if self.mode == 'fan_in':\n        scale /= max(1.0, fan_in)\n    elif self.mode == 'fan_out':\n        scale /= max(1.0, fan_out)\n    else:\n        scale /= max(1.0, (fan_in + fan_out) / 2.0)\n    if self.distribution == 'truncated_normal':\n        stddev = math.sqrt(scale) / 0.8796256610342398\n        return self._random_generator.truncated_normal(shape, 0.0, stddev, dtype)\n    elif self.distribution == 'untruncated_normal':\n        stddev = math.sqrt(scale)\n        return self._random_generator.random_normal(shape, 0.0, stddev, dtype)\n    else:\n        limit = math.sqrt(3.0 * scale)\n        return self._random_generator.random_uniform(shape, -limit, limit, dtype)", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor. Only floating point types are\nsupported. If not specified, `tf.keras.backend.floatx()` is used, which\ndefault to `float32` unless you configured it otherwise (via\n`tf.keras.backend.set_floatx(float_dtype)`)\n**kwargs: Additional keyword arguments.", "source": "github-repos"}
{"code": "def ParseFileTransfer(\n      self, parser_mediator, query, row, cache=None, database=None,\n      **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    source_dict = cache.GetResults('source')\n    if not source_dict:\n      results = database.Query(self.QUERY_SOURCE_FROM_TRANSFER)\n\n      cache.CacheQueryResults(\n          results, 'source', 'pk_id', ('skypeid', 'skypename'))\n      source_dict = cache.GetResults('source')\n\n    dest_dict = cache.GetResults('destination')\n    if not dest_dict:\n      results = database.Query(self.QUERY_DEST_FROM_TRANSFER)\n\n      cache.CacheQueryResults(\n          results, 'destination', 'parent_id', ('skypeid', 'skypename'))\n      dest_dict = cache.GetResults('destination')\n\n    source = 'Unknown'\n    destination = 'Unknown'\n\n    parent_id = self._GetRowValue(query_hash, row, 'parent_id')\n    partner_dispname = self._GetRowValue(query_hash, row, 'partner_dispname')\n    partner_handle = self._GetRowValue(query_hash, row, 'partner_handle')\n\n    if parent_id:\n      destination = '{0:s} <{1:s}>'.format(partner_handle, partner_dispname)\n      skype_id, skype_name = source_dict.get(parent_id, [None, None])\n      if skype_name:\n        source = '{0:s} <{1:s}>'.format(skype_id, skype_name)\n    else:\n      source = '{0:s} <{1:s}>'.format(partner_handle, partner_dispname)\n\n      pk_id = self._GetRowValue(query_hash, row, 'pk_id')\n      if pk_id:\n        skype_id, skype_name = dest_dict.get(pk_id, [None, None])\n        if skype_name:\n          destination = '{0:s} <{1:s}>'.format(skype_id, skype_name)\n\n    filename = self._GetRowValue(query_hash, row, 'filename')\n    filesize = self._GetRowValue(query_hash, row, 'filesize')\n\n    try:\n      file_size = int(filesize, 10)\n    except (ValueError, TypeError):\n      parser_mediator.ProduceExtractionWarning(\n          'unable to convert file size: {0!s} of file: {1:s}'.format(\n              filesize, filename))\n      file_size = 0\n\n    event_data = SkypeTransferFileEventData()\n    event_data.destination = destination\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.source = source\n    event_data.transferred_filename = filename\n    event_data.transferred_filepath = self._GetRowValue(\n        query_hash, row, 'filepath')\n    event_data.transferred_filesize = file_size\n\n    status = self._GetRowValue(query_hash, row, 'status')\n    starttime = self._GetRowValue(query_hash, row, 'starttime')\n\n    if status == 2:\n      if starttime:\n        event_data.action_type = 'SENDSOLICITUDE'\n\n        date_time = dfdatetime_posix_time.PosixTime(timestamp=starttime)\n        event = time_events.DateTimeValuesEvent(\n            date_time, 'File transfer from Skype')\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    elif status == 8:\n      if starttime:\n        event_data.action_type = 'GETSOLICITUDE'\n\n        date_time = dfdatetime_posix_time.PosixTime(timestamp=starttime)\n        event = time_events.DateTimeValuesEvent(\n            date_time, 'File transfer from Skype')\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      accepttime = self._GetRowValue(query_hash, row, 'accepttime')\n      if accepttime:\n        event_data.action_type = 'ACCEPTED'\n\n        date_time = dfdatetime_posix_time.PosixTime(timestamp=accepttime)\n        event = time_events.DateTimeValuesEvent(\n            date_time, 'File transfer from Skype')\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      finishtime = self._GetRowValue(query_hash, row, 'finishtime')\n      if finishtime:\n        event_data.action_type = 'FINISHED'\n\n        date_time = dfdatetime_posix_time.PosixTime(timestamp=finishtime)\n        event = time_events.DateTimeValuesEvent(\n            date_time, 'File transfer from Skype')\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a file transfer.\n\nThere is no direct relationship between who sends the file and\nwho accepts the file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from query.\ncache (Optional[SQLiteCache]): cache.\ndatabase (Optional[SQLiteDatabase]): database.", "source": "juraj-google-style"}
{"code": "def dzip(items1, items2, cls=dict):\n    try:\n        len(items1)\n    except TypeError:\n        items1 = list(items1)\n    try:\n        len(items2)\n    except TypeError:\n        items2 = list(items2)\n    if ((len(items1) == 0) and (len(items2) == 1)):\n        items2 = []\n    if ((len(items2) == 1) and (len(items1) > 1)):\n        items2 = (items2 * len(items1))\n    if (len(items1) != len(items2)):\n        raise ValueError(('out of alignment len(items1)=%r, len(items2)=%r' % (len(items1), len(items2))))\n    return cls(zip(items1, items2))", "docstring": "Zips elementwise pairs between items1 and items2 into a dictionary. Values\nfrom items2 can be broadcast onto items1.\n\nArgs:\nitems1 (Iterable): full sequence\nitems2 (Iterable): can either be a sequence of one item or a sequence\nof equal length to `items1`\ncls (Type[dict]): dictionary type to use. Defaults to dict, but could\nbe ordered dict instead.\n\nReturns:\ndict: similar to dict(zip(items1, items2))\n\nExample:\n>>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}\n>>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}\n>>> assert dzip([], [4]) == {}", "source": "codesearchnet"}
{"code": "def sample(self, num_rows):\n        \n\n        sampled_values = []\n        for i in range(num_rows):\n            sampled_values.append(self._sample_row())\n\n        return pd.DataFrame(sampled_values, columns=self.columns)", "docstring": "Sample new rows.\n\nArgs:\nnum_rows(int): Number of rows to sample\n\nReturns:\npandas.DataFrame", "source": "juraj-google-style"}
{"code": "class BundleFactory(object):\n\n    def __init__(self, stacked: bool) -> None:\n        self._stacked = stacked\n\n    def create_bundle(self, output_pcollection: Union[pvalue.PBegin, pvalue.PCollection]) -> '_Bundle':\n        return _Bundle(output_pcollection, self._stacked)\n\n    def create_empty_committed_bundle(self, output_pcollection: Union[pvalue.PBegin, pvalue.PCollection]) -> '_Bundle':\n        bundle = self.create_bundle(output_pcollection)\n        bundle.commit(None)\n        return bundle", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\nBundleFactory creates output bundles to be used by transform evaluators.\n\nArgs:\nstacked: whether or not to stack the WindowedValues within the bundle\nin case consecutive ones share the same timestamp and windows.\nDirectRunnerOptions.direct_runner_use_stacked_bundle controls this option.", "source": "github-repos"}
{"code": "def compute_output_shape(self, input_shape):\n    \n    input_shape = tf.TensorShape(input_shape)\n    input_shape = input_shape.with_rank_at_least(2)\n    if tf.compat.dimension_value(input_shape[-1]) is None:\n      raise ValueError(\n          'The innermost dimension of `input_shape` must be defined, '\n          'but saw: {}'.format(input_shape))\n    return input_shape[:-1].concatenate(self.units)", "docstring": "Computes the output shape of the layer.\n\nArgs:\ninput_shape: Shape tuple (tuple of integers) or list of shape tuples\n(one per output tensor of the layer). Shape tuples can include None for\nfree dimensions, instead of an integer.\n\nReturns:\noutput_shape: A tuple representing the output shape.\n\nRaises:\nValueError: If innermost dimension of `input_shape` is not defined.", "source": "juraj-google-style"}
{"code": "def experimental_local_results(self, value):\n    return super(OneDeviceStrategy, self).experimental_local_results(value)", "docstring": "Returns the list of all local per-replica values contained in `value`.\n\nIn `OneDeviceStrategy`, the `value` is always expected to be a single\nvalue, so the result is just the value in a tuple.\n\nArgs:\nvalue: A value returned by `experimental_run()`, `run()`,\n`extended.call_for_each_replica()`, or a variable created in `scope`.\n\nReturns:\nA tuple of values contained in `value`. If `value` represents a single\nvalue, this returns `(value,).`", "source": "github-repos"}
{"code": "def list(self, pattern='*'):\n    if (self._group_dict is None):\n        self._group_dict = collections.OrderedDict(((group.id, group) for group in self._client.list_groups()))\n    return [group for group in self._group_dict.values() if fnmatch.fnmatch(group.display_name, pattern)]", "docstring": "Returns a list of groups that match the filters.\n\nArgs:\npattern: An optional pattern to filter the groups based on their display\nname. This can include Unix shell-style wildcards. E.g.\n``\"Production*\"``.\n\nReturns:\nA list of Group objects that match the filters.", "source": "codesearchnet"}
{"code": "def one_step(self, current_state, previous_kernel_results):\n    with tf.compat.v1.name_scope(name=mcmc_util.make_name(self.name, 'slice', 'one_step'), values=[self.step_size, self.max_doublings, self._seed_stream, current_state, previous_kernel_results.target_log_prob]):\n        with tf.compat.v1.name_scope('initialize'):\n            [current_state_parts, step_sizes, current_target_log_prob] = _prepare_args(self.target_log_prob_fn, current_state, self.step_size, previous_kernel_results.target_log_prob, maybe_expand=True)\n            max_doublings = tf.convert_to_tensor(value=self.max_doublings, dtype=tf.int32, name='max_doublings')\n        independent_chain_ndims = distribution_util.prefer_static_rank(current_target_log_prob)\n        [next_state_parts, next_target_log_prob, bounds_satisfied, direction, upper_bounds, lower_bounds] = _sample_next(self.target_log_prob_fn, current_state_parts, step_sizes, max_doublings, current_target_log_prob, independent_chain_ndims, seed=self._seed_stream())\n\n        def maybe_flatten(x):\n            return (x if mcmc_util.is_list_like(current_state) else x[0])\n        return [maybe_flatten(next_state_parts), SliceSamplerKernelResults(target_log_prob=next_target_log_prob, bounds_satisfied=bounds_satisfied, direction=direction, upper_bounds=upper_bounds, lower_bounds=lower_bounds)]", "docstring": "Runs one iteration of Slice Sampler.\n\nArgs:\ncurrent_state: `Tensor` or Python `list` of `Tensor`s representing the\ncurrent state(s) of the Markov chain(s). The first `r` dimensions\nindex independent chains,\n`r = tf.rank(target_log_prob_fn(*current_state))`.\nprevious_kernel_results: `collections.namedtuple` containing `Tensor`s\nrepresenting values from previous calls to this function (or from the\n`bootstrap_results` function.)\n\nReturns:\nnext_state: Tensor or Python list of `Tensor`s representing the state(s)\nof the Markov chain(s) after taking exactly one step. Has same type and\nshape as `current_state`.\nkernel_results: `collections.namedtuple` of internal calculations used to\nadvance the chain.\n\nRaises:\nValueError: if there isn't one `step_size` or a list with same length as\n`current_state`.\nTypeError: if `not target_log_prob.dtype.is_floating`.", "source": "codesearchnet"}
{"code": "def _base_query(self, session):\n    return session.query(ORMTargetMarker).filter((ORMTargetMarker.name == self.name)).filter((ORMTargetMarker.params == self.params))", "docstring": "Base query for a target.\n\nArgs:\nsession: database session to query in", "source": "codesearchnet"}
{"code": "def get_scaled_loss(self, loss):\n    if callable(loss):\n\n        def new_loss():\n            loss_val = loss()\n            return loss_val * math_ops.cast(self.loss_scale, loss_val.dtype)\n        return new_loss\n    else:\n        return loss * math_ops.cast(self.loss_scale, loss.dtype)", "docstring": "Scales the loss by the loss scale.\n\nThis method is only needed if you compute gradients manually, e.g. with\n`tf.GradientTape`. In that case, call this method to scale the loss before\npassing the loss to `tf.GradientTape`. If you use\n`LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss\nscaling is automatically applied and this method is unneeded.\n\nIf this method is called, `get_unscaled_gradients` should also be called.\nSee the `tf.keras.mixed_precision.LossScaleOptimizer` doc for\nan example.\n\nArgs:\nloss: The loss, which will be multiplied by the loss scale. Can either be\na tensor or a callable returning a tensor.\n\nReturns:\n`loss` multiplied by `LossScaleOptimizer.loss_scale`.", "source": "github-repos"}
{"code": "def _GenOpenApiSpecCallback(args, openapi_func=_GenOpenApiSpec):\n  \n  openapi_paths = openapi_func(args.service, args.output,\n                               hostname=args.hostname,\n                               application_path=args.application,\n                               x_google_api_name=args.x_google_api_name)\n  for openapi_path in openapi_paths:\n    print 'OpenAPI spec written to %s' % openapi_path", "docstring": "Generate OpenAPI (Swagger) specs to files.\n\nArgs:\nargs: An argparse.Namespace object to extract parameters from\nopenapi_func: A function that generates OpenAPI specs and stores them to\nfiles, accepting a list of service names and an output directory.", "source": "juraj-google-style"}
{"code": "def list_workers(config, *, filter_by_queues=None):\n    celery_app = create_app(config)\n    worker_stats = celery_app.control.inspect().stats()\n    queue_stats = celery_app.control.inspect().active_queues()\n    if (worker_stats is None):\n        return []\n    workers = []\n    for (name, w_stat) in worker_stats.items():\n        queues = [QueueStats.from_celery(q_stat) for q_stat in queue_stats[name]]\n        add_worker = (filter_by_queues is None)\n        if (not add_worker):\n            for queue in queues:\n                if (queue.name in filter_by_queues):\n                    add_worker = True\n                    break\n        if add_worker:\n            workers.append(WorkerStats.from_celery(name, w_stat, queues))\n    return workers", "docstring": "Return a list of all available workers.\n\nArgs:\nconfig (Config): Reference to the configuration object from which the\nsettings are retrieved.\nfilter_by_queues (list): Restrict the returned workers to workers that listen to\nat least one of the queue names in this list.\n\nReturns:\nlist: A list of WorkerStats objects.", "source": "codesearchnet"}
{"code": "def destroy(self):\n    if (not self._is_live()):\n        raise RuntimeError('A unit must be submitted to fleet before it can destroyed.')\n    return self._client.destroy_unit(self.name)", "docstring": "Remove a unit from the fleet cluster\n\nReturns:\nTrue: The unit was removed\n\nRaises:\nfleet.v1.errors.APIError: Fleet returned a response code >= 400", "source": "codesearchnet"}
{"code": "def _tp__get_typed_properties(self):\n    try:\n        return tuple((getattr(self, p) for p in self._tp__typed_properties))\n    except AttributeError:\n        raise NotImplementedError", "docstring": "Return a tuple of typed attrs that can be used for comparisons.\n\nRaises:\nNotImplementedError: Raised if this class was mixed into a class\nthat was not created by _AnnotatedObjectMeta.", "source": "codesearchnet"}
{"code": "def create(self, master_course_id, coach_email, max_students_allowed, title, modules=None):\n    payload = {'master_course_id': master_course_id, 'coach_email': coach_email, 'max_students_allowed': max_students_allowed, 'display_name': title}\n    if (modules is not None):\n        payload['course_modules'] = modules\n    resp = self.requester.post(parse.urljoin(self.base_url, '/api/ccx/v0/ccx/'), json=payload)\n    try:\n        resp.raise_for_status()\n    except:\n        log.error(resp.json())\n        raise\n    return resp.json()['ccx_course_id']", "docstring": "Creates a CCX\n\nArgs:\nmaster_course_id (str): edx course id of the master course\ncoach_email (str): email of the user to make a coach. This user must exist on edx.\nmax_students_allowed (int): Maximum number of students to allow in this ccx.\ntitle (str): Title of the CCX to be created\nmodules (optional list): A list of locator_ids (str) for the modules to enable.\n\nReturns:\nccx_id (str): The ID of the ccx.", "source": "codesearchnet"}
{"code": "def parse_name(name):\n  \n  bucket = None\n  obj = None\n  m = re.match(_STORAGE_NAME, name)\n  if m:\n    \n    bucket = m.group(1)\n    obj = m.group(2)\n    if obj is not None:\n      obj = obj[1:]  \n  else:\n    m = re.match('(' + _OBJECT_NAME + ')', name)\n    if m:\n      obj = m.group(1)\n  return bucket, obj", "docstring": "Parse a gs:// URL into the bucket and object names.\n\nArgs:\nname: a GCS URL of the form gs://bucket or gs://bucket/object\nReturns:\nThe bucket name (with no gs:// prefix), and the object name if present. If the name\ncould not be parsed returns None for both.", "source": "juraj-google-style"}
{"code": "def start_listing(self, request: Request) -> ListingResponse:\n    if (self._session_state != SessionState.ready):\n        raise RuntimeError('Session not ready')\n    response = ListingResponse()\n    (yield from self._prepare_fetch(request, response))\n    (yield from self._open_data_stream())\n    mlsd_command = Command('MLSD', self._request.file_path)\n    list_command = Command('LIST', self._request.file_path)\n    try:\n        (yield from self._begin_stream(mlsd_command))\n        self._listing_type = 'mlsd'\n    except FTPServerError as error:\n        if (error.reply_code in (ReplyCodes.syntax_error_command_unrecognized, ReplyCodes.command_not_implemented)):\n            self._listing_type = None\n        else:\n            raise\n    if (not self._listing_type):\n        (yield from self._begin_stream(list_command))\n        self._listing_type = 'list'\n    _logger.debug('Listing type is %s', self._listing_type)\n    self._session_state = SessionState.directory_request_sent\n    return response", "docstring": "Fetch a file listing.\n\nArgs:\nrequest: Request.\n\nReturns:\nA listing response populated with the initial data connection\nreply.\n\nOnce the response is received, call :meth:`download_listing`.\n\nCoroutine.", "source": "codesearchnet"}
{"code": "def get_version_string(version):\n    \n    version_len = len(version)\n    if version_len == 3:\n        version_string = '%d.%d.%d' % version\n    elif version_len == 4:\n        version_string = '%d.%d.%d-%s' % version\n    else:\n        raise Exception(\n            'Version tuple is non-semver-compliant {} length!'.format(version_len)\n        )\n    return version_string", "docstring": "Translate a version tuple into a string.\n\nSpecify the __version__ as a tuple for more precise comparisons, and\ntranslate it to __version_string__ for when that's needed.\n\nThis function exists primarily for easier unit testing.\n\nArgs:\nversion (Tuple[int, int, int, str]): three ints and an optional string.\n\nReturns:\nversion_string (str): the tuple translated into a string per semver.org", "source": "juraj-google-style"}
{"code": "def _summary(self, name, tensor):\n    \n    if tensor.shape.ndims == 0:\n      return tf.summary.scalar(name, tensor)\n    else:\n      return tf.summary.histogram(name, tensor)", "docstring": "Create a scalar or histogram summary matching the rank of the tensor.\n\nArgs:\nname: Name for the summary.\ntensor: Tensor to summarize.\n\nReturns:\nSummary tensor.", "source": "juraj-google-style"}
{"code": "def __init__(self, debug=False):\n    \n    self.debug = debug\n    facility = logging.handlers.SysLogHandler.LOG_DAEMON\n    self.logger = logger.Logger(\n        name='instance-setup', debug=self.debug, facility=facility)\n    self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)\n    self.metadata_dict = None\n    self.instance_config = instance_config.InstanceConfig(logger=self.logger)\n\n    if self.instance_config.GetOptionBool('InstanceSetup', 'network_enabled'):\n      self.metadata_dict = self.watcher.GetMetadata()\n      instance_config_metadata = self._GetInstanceConfig()\n      self.instance_config = instance_config.InstanceConfig(\n          logger=self.logger, instance_config_metadata=instance_config_metadata)\n      if self.instance_config.GetOptionBool('InstanceSetup', 'set_host_keys'):\n        host_key_types = self.instance_config.GetOptionString(\n            'InstanceSetup', 'host_key_types')\n        self._SetSshHostKeys(host_key_types=host_key_types)\n      if self.instance_config.GetOptionBool('InstanceSetup', 'set_boto_config'):\n        self._SetupBotoConfig()\n    if self.instance_config.GetOptionBool(\n        'InstanceSetup', 'optimize_local_ssd'):\n      self._RunScript('google_optimize_local_ssd')\n    if self.instance_config.GetOptionBool('InstanceSetup', 'set_multiqueue'):\n      self._RunScript('google_set_multiqueue')\n    try:\n      self.instance_config.WriteConfig()\n    except (IOError, OSError) as e:\n      self.logger.warning(str(e))", "docstring": "Constructor.\n\nArgs:\ndebug: bool, True if debug output should write to the console.", "source": "juraj-google-style"}
{"code": "def _normalize_angle(angle, range, step):\n    while (angle <= range[0]):\n        angle += step\n    while (angle >= range[1]):\n        angle -= step\n    return angle", "docstring": "Finds an angle that matches the given one modulo step.\n\nIncrements and decrements the given value with a given step.\n\nArgs:\nrange: a 2-tuple of min and max target values.\nstep: tuning step.\n\nReturns:\nNormalized value within a given range.", "source": "codesearchnet"}
{"code": "def __rfloordiv__(self, other):\n        \n        other = as_dimension(other)\n        if self._value is None or other.value is None:\n            return Dimension(None)\n        else:\n            return Dimension(other.value", "docstring": "Returns the quotient of `other` and `self` rounded down.\n\nArgs:\nother: Another Dimension, or a value accepted by `as_dimension`.\n\nReturns:\nA `Dimension` whose value is the integer quotient of `self` and `other`.", "source": "juraj-google-style"}
{"code": "def to_tensor(pic):\n    if (not (_is_pil_image(pic) or _is_numpy_image(pic))):\n        raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))\n    if isinstance(pic, np.ndarray):\n        if (pic.ndim == 2):\n            pic = pic[(:, :, None)]\n        img = torch.from_numpy(pic.transpose((2, 0, 1)))\n        if isinstance(img, torch.ByteTensor):\n            return img.float().div(255)\n        else:\n            return img\n    if ((accimage is not None) and isinstance(pic, accimage.Image)):\n        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)\n        pic.copyto(nppic)\n        return torch.from_numpy(nppic)\n    if (pic.mode == 'I'):\n        img = torch.from_numpy(np.array(pic, np.int32, copy=False))\n    elif (pic.mode == 'I;16'):\n        img = torch.from_numpy(np.array(pic, np.int16, copy=False))\n    elif (pic.mode == 'F'):\n        img = torch.from_numpy(np.array(pic, np.float32, copy=False))\n    elif (pic.mode == '1'):\n        img = (255 * torch.from_numpy(np.array(pic, np.uint8, copy=False)))\n    else:\n        img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))\n    if (pic.mode == 'YCbCr'):\n        nchannel = 3\n    elif (pic.mode == 'I;16'):\n        nchannel = 1\n    else:\n        nchannel = len(pic.mode)\n    img = img.view(pic.size[1], pic.size[0], nchannel)\n    img = img.transpose(0, 1).transpose(0, 2).contiguous()\n    if isinstance(img, torch.ByteTensor):\n        return img.float().div(255)\n    else:\n        return img", "docstring": "Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.\n\nSee ``ToTensor`` for more details.\n\nArgs:\npic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\nReturns:\nTensor: Converted image.", "source": "codesearchnet"}
{"code": "def residual_block(cnn, depth, stride, pre_activation):\n    input_layer = cnn.top_layer\n    in_size = cnn.top_size\n    if (in_size != depth):\n        shortcut = cnn.apool(1, 1, stride, stride, input_layer=input_layer, num_channels_in=in_size)\n        padding = ((depth - in_size) \n        if (cnn.channel_pos == 'channels_last'):\n            shortcut = tf.pad(shortcut, [[0, 0], [0, 0], [0, 0], [padding, padding]])\n        else:\n            shortcut = tf.pad(shortcut, [[0, 0], [padding, padding], [0, 0], [0, 0]])\n    else:\n        shortcut = input_layer\n    if pre_activation:\n        res = cnn.batch_norm(input_layer)\n        res = tf.nn.relu(res)\n    else:\n        res = input_layer\n    cnn.conv(depth, 3, 3, stride, stride, input_layer=res, num_channels_in=in_size, use_batch_norm=True, bias=None)\n    if pre_activation:\n        res = cnn.conv(depth, 3, 3, 1, 1, activation=None, use_batch_norm=False, bias=None)\n        output = (shortcut + res)\n    else:\n        res = cnn.conv(depth, 3, 3, 1, 1, activation=None, use_batch_norm=True, bias=None)\n        output = tf.nn.relu((shortcut + res))\n    cnn.top_layer = output\n    cnn.top_size = depth", "docstring": "Residual block with identity short-cut.\n\nArgs:\ncnn: the network to append residual blocks.\ndepth: the number of output filters for this residual block.\nstride: Stride used in the first layer of the residual block.\npre_activation: use pre_activation structure or not.", "source": "codesearchnet"}
{"code": "def __init__(self, key_uri_supplier):\n        \n        self._key_uri_supplier = key_uri_supplier\n        self._jwks_cache = cache.make_region().configure(\n            u\"dogpile.cache.memory\", expiration_time=datetime.timedelta(minutes=5))", "docstring": "Constructs an instance of JwksSupplier.\n\nArgs:\nkey_uri_supplier: a KeyUriSupplier instance that returns the `jwks_uri`\nbased on the given issuer.", "source": "juraj-google-style"}
{"code": "def MakePmfFromDict(d, name=''):\n    pmf = Pmf(d, name)\n    pmf.Normalize()\n    return pmf", "docstring": "Makes a PMF from a map from values to probabilities.\n\nArgs:\nd: dictionary that maps values to probabilities\nname: string name for this PMF\n\nReturns:\nPmf object", "source": "codesearchnet"}
{"code": "def __init__(self, env, actions):\n        \n        super(BinarySpaceToDiscreteSpaceEnv, self).__init__(env)\n        \n        self.action_space = gym.spaces.Discrete(len(actions))\n        \n        self._action_map = {}\n        self._action_meanings = {}\n        \n        for action, button_list in enumerate(actions):\n            \n            byte_action = 0\n            \n            for button in button_list:\n                byte_action |= self._button_map[button]\n            \n            self._action_map[action] = byte_action\n            self._action_meanings[action] = ' '.join(button_list)", "docstring": "Initialize a new binary to discrete action space wrapper.\n\nArgs:\nenv (gym.Env): the environment to wrap\nactions (list): an ordered list of actions (as lists of buttons).\nThe index of each button list is its discrete coded value\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def relative_probability(self, l1, l2, c1, c2):\n    if self.site_energies:\n        site_delta_E = (self.site_energies[l2] - self.site_energies[l1])\n    else:\n        site_delta_E = 0.0\n    if self.nn_energy:\n        delta_nn = ((c2 - c1) - 1)\n        site_delta_E += (delta_nn * self.nn_energy)\n    return metropolis(site_delta_E)", "docstring": "The relative probability for a jump between two sites with specific site types and coordination numbers.\n\nArgs:\nl1 (Str): Site label for the initial site.\nl2 (Str): Site label for the final site.\nc1 (Int): Coordination number for the initial site.\nc2 (Int): Coordination number for the final site.\n\nReturns:\n(Float): The relative probability of this jump occurring.", "source": "codesearchnet"}
{"code": "def total_purge_developed_repo(repodir):\n    assert (repodir is not None)\n    import utool as ut\n    import os\n    repo = ut.util_git.Repo(dpath=repodir)\n    user = os.environ['USER']\n    fmtdict = dict(user=user, modname=repo.modname, reponame=repo.reponame, dpath=repo.dpath, global_site_pkgs=ut.get_global_dist_packages_dir(), local_site_pkgs=ut.get_local_dist_packages_dir(), venv_site_pkgs=ut.get_site_packages_dir())\n    commands = [_.format(**fmtdict) for _ in ['pip uninstall {modname}', 'sudo -H pip uninstall {modname}', 'sudo pip uninstall {modname}', 'easy_install -m {modname}', 'cd {dpath} && python setup.py develop --uninstall', 'sudo chown -R {user}:{user} {dpath}']]\n    print('Normal uninstall commands')\n    print('\\n'.join(commands))\n    possible_link_paths = [_.format(**fmtdict) for _ in ['{dpath}/{modname}.egg-info', '{dpath}/build', '{venv_site_pkgs}/{reponame}.egg-info', '{local_site_pkgs}/{reponame}.egg-info', '{venv_site_pkgs}/{reponame}.egg-info']]\n    from os.path import exists, basename\n    existing_link_paths = [path for path in possible_link_paths]\n    print('\n    for path in existing_link_paths:\n        if exists(path):\n            if (ut.get_file_info(path)['owner'] != user):\n                print('sudo /bin/rm -rf {path}'.format(path=path))\n            else:\n                print('/bin/rm -rf {path}'.format(path=path))\n    print('\n    easyinstall_paths = [_.format(**fmtdict) for _ in ['{venv_site_pkgs}/easy-install.pth', '{local_site_pkgs}/easy-install.pth', '{venv_site_pkgs}/easy-install.pth']]\n    for path in easyinstall_paths:\n        if exists(path):\n            easy_install_list = ut.readfrom(path, verbose=False).strip().split('\\n')\n            easy_install_list_ = [basename(p) for p in easy_install_list]\n            index1 = ut.listfind(easy_install_list_, repo.reponame)\n            index2 = ut.listfind(easy_install_list_, repo.modname)\n            if ((index1 is not None) or (index2 is not None)):\n                print(('Found at index1=%r, index=%r' % (index1, index2)))\n                if (ut.get_file_info(path)['owner'] != user):\n                    print('sudo gvim {path}'.format(path=path))\n                else:\n                    print('gvim {path}'.format(path=path))\n    checkcmds = [_.format(**fmtdict) for _ in ['python -c \"import {modname}; print({modname}.__file__)\"']]\n    import sys\n    assert (repo.modname not in sys.modules)\n    print('\n    for cmd in checkcmds:\n        print(cmd)", "docstring": "r\"\"\"\nOutputs commands to help purge a repo\n\nArgs:\nrepodir (str): path to developed repository\n\nCommandLine:\npython -m utool.util_sysreq total_purge_installed_repo --show\n\nIgnore:\nrepodir = ut.truepath('~/code/Lasagne')\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_sysreq import *  # NOQA\n>>> import utool as ut\n>>> repodir = ut.get_argval('--repodir', default=None)\n>>> result = total_purge_installed_repo(repodir)", "source": "codesearchnet"}
{"code": "def update_subscription(self, *, subscription_id, credit_card_token):\n    payload = {'creditCardToken': credit_card_token}\n    fmt = 'subscriptions/{}'.format(subscription_id)\n    return self.client._put((self.url + fmt), json=payload, headers=self.get_headers())", "docstring": "Update information associated with the specified subscription. At the moment it is only possible\nto update the token of the credit card to which the charge of the subscription is made.\n\nArgs:\nsubscription_id: Identification of the subscription.\ncredit_card_token:\n\nReturns:", "source": "codesearchnet"}
{"code": "def add_event(self, event):\n    if not self._closed:\n        event_pb = event.SerializeToString()\n        self._session.run(self._add_event_op, feed_dict={self._event_placeholder: event_pb})", "docstring": "Adds an event to the event file.\n\nArgs:\nevent: An `Event` protocol buffer.", "source": "github-repos"}
{"code": "def get_canonical_import(import_set):\n    import_list = sorted(import_set, key=lambda imp_and_priority: (-imp_and_priority[1], imp_and_priority[0]))\n    return import_list[0][0]", "docstring": "Obtain one single import from a set of possible sources of a symbol.\n\nOne symbol might come from multiple places as it is being imported and\nreexported. To simplify API changes, we always use the same import for the\nsame module, and give preference based on higher priority and alphabetical\nordering.\n\nArgs:\nimport_set: (set) Imports providing the same symbol. This is a set of tuples\nin the form (import, priority). We want to pick an import with highest\npriority.\n\nReturns:\nA module name to import", "source": "github-repos"}
{"code": "def run_query_series(queries, conn):\n    \n    results = []\n    for item in queries:\n        qry = item\n        kwargs = {}\n        if isinstance(item, tuple):\n            qry = item[0]\n            kwargs = item[1]\n        result = conn.update_query(qry, **kwargs)\n        \n        results.append(result)\n    return results", "docstring": "Iterates through a list of queries and runs them through the connection\n\nArgs:\n-----\nqueries: list of strings or tuples containing (query_string, kwargs)\nconn: the triplestore connection to use", "source": "juraj-google-style"}
{"code": "def GetMapLocation(self):\n    raise NotImplementedError('%s must implement this method!' % self.__class__.__name__)", "docstring": "Return the location of the Map in this cache.\n\nThis is used by automount maps so far, and must be implemented in the\nchild class only if it is to support automount maps.\n\nRaises:\nNotImplementedError:  We should have been implemented by child.", "source": "github-repos"}
{"code": "def get_sequence_sliding_window_properties(self, scale, window, representative_only=True):\n        \n        if representative_only:\n            \n            if not self.representative_sequence:\n                log.warning('{}: no representative sequence set, cannot get sequence properties'.format(self.id))\n                return\n\n            \n            if not self.representative_sequence.seq:\n                log.warning('{}: representative sequence {} set, but no sequence stored. '\n                            'Cannot get sequence properties.'.format(self.id, self.representative_sequence.id))\n                return\n\n            self.representative_sequence.get_sliding_window_properties(scale=scale, window=window)\n\n        if not representative_only:\n            for s in self.sequences:\n                \n                if not s.seq:\n                    log.warning('{}: no sequence stored. '\n                                'Cannot get sequence properties.'.format(s.id))\n                    continue\n\n                else:\n                    s.get_sliding_window_properties(scale=scale, window=window)", "docstring": "Run Biopython ProteinAnalysis with a sliding window to calculate a given property.\nResults are stored in the protein's respective SeqProp objects at ``.letter_annotations``\n\nArgs:\nscale (str): Scale name\nwindow (int): Sliding window size\nrepresentative_only (bool): If analysis should only be run on the representative sequence", "source": "juraj-google-style"}
{"code": "def reserve_ids(self, token, channel, quantity):\n        \n        quantity = str(quantity)\n        url = self.url(\"{}/{}/reserve/{}/\".format(token, channel, quantity))\n        req = self.remote_utils.get_url(url)\n        if req.status_code is not 200:\n            raise RemoteDataNotFoundError('Invalid req: ' + req.status_code)\n        out = req.json()\n        return [out[0] + i for i in range(out[1])]", "docstring": "Requests a list of next-available-IDs from the server.\n\nArguments:\nquantity (int): The number of IDs to reserve\n\nReturns:\nint[quantity]: List of IDs you've been granted", "source": "juraj-google-style"}
{"code": "def match_urls_to_resources(self, url_values):\n        \n        valid_values = {}\n        for resource in self.Meta.related_resources:\n            for k, v in url_values.items():\n                resource_url = resource.get_resource_url(\n                    resource, resource.Meta.base_url)\n                if isinstance(v, list):\n                    if all([resource_url in i for i in v]):\n                        self.set_related_method(resource, v)\n                        valid_values[k] = v\n                elif resource_url in v:\n                    self.set_related_method(resource, v)\n                    valid_values[k] = v\n        return valid_values", "docstring": "For the list of valid URLs, try and match them up\nto resources in the related_resources attribute.\n\nArgs:\nurl_values: A dictionary of keys and URL strings that\ncould be related resources.\nReturns:\nvalid_values: The values that are valid", "source": "juraj-google-style"}
{"code": "def Get(self, request, global_params=None):\n    config = self.GetMethodConfig('Get')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Gets the specified routine resource by routine ID.\n\nArgs:\nrequest: (BigqueryRoutinesGetRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Routine) The response message.", "source": "github-repos"}
{"code": "def SetUseSSL(self, use_ssl):\n    \n    self._use_ssl = use_ssl\n    logger.debug('Elasticsearch use_ssl: {0!s}'.format(use_ssl))", "docstring": "Sets the use of ssl.\n\nArgs:\nuse_ssl (bool): enforces use of ssl.", "source": "juraj-google-style"}
{"code": "def set_flat(self, new_weights):\n        \n        self._check_sess()\n        shapes = [v.get_shape().as_list() for v in self.variables.values()]\n        arrays = unflatten(new_weights, shapes)\n        placeholders = [\n            self.placeholders[k] for k, v in self.variables.items()\n        ]\n        self.sess.run(\n            list(self.assignment_nodes.values()),\n            feed_dict=dict(zip(placeholders, arrays)))", "docstring": "Sets the weights to new_weights, converting from a flat array.\n\nNote:\nYou can only set all weights in the network using this function,\ni.e., the length of the array must match get_flat_size.\n\nArgs:\nnew_weights (np.ndarray): Flat array containing weights.", "source": "juraj-google-style"}
{"code": "def check_required_fields(self, ignore_fields=list(), allow_no_resources=False):\n    if self.is_requestable():\n        self._check_required_fields('dataset-requestable', ignore_fields)\n    else:\n        self._check_required_fields('dataset', ignore_fields)\n        if ((len(self.resources) == 0) and (not allow_no_resources)):\n            raise HDXError('There are no resources! Please add at least one resource!')\n        for resource in self.resources:\n            ignore_fields = ['package_id']\n            resource.check_required_fields(ignore_fields=ignore_fields)", "docstring": "Check that metadata for dataset and its resources is complete. The parameter ignore_fields\nshould be set if required to any fields that should be ignored for the particular operation.\n\nArgs:\nignore_fields (List[str]): Fields to ignore. Default is [].\nallow_no_resources (bool): Whether to allow no resources. Defaults to False.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def get_colour_handler(extranames: List[str]=None, with_process_id: bool=False, with_thread_id: bool=False, stream: TextIO=None) -> logging.StreamHandler:\n    fmt = '%(white)s%(asctime)s.%(msecs)03d'\n    if (with_process_id or with_thread_id):\n        procinfo = []\n        if with_process_id:\n            procinfo.append('p%(process)d')\n        if with_thread_id:\n            procinfo.append('t%(thread)d')\n        fmt += ' [{}]'.format('.'.join(procinfo))\n    extras = ((':' + ':'.join(extranames)) if extranames else '')\n    fmt += ' %(name)s{extras}:%(levelname)s: '.format(extras=extras)\n    fmt += '%(reset)s%(log_color)s%(message)s'\n    cf = ColoredFormatter(fmt, datefmt=LOG_DATEFMT, reset=True, log_colors=LOG_COLORS, secondary_log_colors={}, style='%')\n    ch = logging.StreamHandler(stream)\n    ch.setFormatter(cf)\n    return ch", "docstring": "Gets a colour log handler using a standard format.\n\nArgs:\nextranames: additional names to append to the logger's name\nwith_process_id: include the process ID in the logger's name?\nwith_thread_id: include the thread ID in the logger's name?\nstream: ``TextIO`` stream to send log output to\n\nReturns:\nthe :class:`logging.StreamHandler`", "source": "codesearchnet"}
{"code": "def grid_destroy_from_name(job_name):\n    jobs = grid_reload_from_name(job_name)\n    for job in jobs:\n        job.delete()\n        logger.info(('Killing the job (%s, %s)' % (job.site, job.uid)))", "docstring": "Destroy all the jobs with a given name.\n\nArgs:\njob_name (str): the job name", "source": "codesearchnet"}
{"code": "def __init__(self, message, error_list, launched_job):\n    \n    super(JobError, self).__init__(message)\n    self.message = message\n    self.error_list = error_list\n    self.launched_job = launched_job", "docstring": "Create a JobError to indicate something went wrong.\n\nArgs:\nmessage: user-friendly message\nerror_list: what went wrong\nlaunched_job: if the job is launched, but has errors in\n\"--wait\"ing on the tasks.", "source": "juraj-google-style"}
{"code": "def handle_worker_messages(self, timeout):\n        \n        msgs = self.messaging_backend.popn(self.incoming_mailbox, n=20)\n\n        for msg in msgs:\n            self.handle_single_message(msg)", "docstring": "Read messages that are placed in self.incoming_mailbox,\nand then update the job states corresponding to each message.\n\nArgs:\ntimeout: How long to wait for an incoming message, if the mailbox is empty right now.\n\nReturns: None", "source": "juraj-google-style"}
{"code": "def condensed(network, state):\n    \n    result = []\n    covered_nodes = set()\n\n    for c in reversed(sorted(complexes(network, state))):\n        if not any(n in covered_nodes for n in c.subsystem.node_indices):\n            result.append(c)\n            covered_nodes = covered_nodes | set(c.subsystem.node_indices)\n\n    return result", "docstring": "Return a list of maximal non-overlapping complexes.\n\nArgs:\nnetwork (Network): The |Network| of interest.\nstate (tuple[int]): The state of the network (a binary tuple).\n\nReturns:\nlist[SystemIrreducibilityAnalysis]: A list of |SIA| for non-overlapping\ncomplexes with maximal |big_phi| values.", "source": "juraj-google-style"}
{"code": "def enable(self, key_id, **kwargs):\n    path = ('%s/%s/enable' % (self.path, key_id))\n    self.gitlab.http_post(path, **kwargs)", "docstring": "Enable a deploy key for a project.\n\nArgs:\nkey_id (int): The ID of the key to enable\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabProjectDeployKeyError: If the key could not be enabled", "source": "codesearchnet"}
{"code": "def create_error_response(code, message, status=status.BAD_REQUEST):\n    errors = dict(code=code, message=message)\n    return Response(errors=errors, status=status)", "docstring": "Create a fail response.\n\nArgs:\ncode (str): the code of the error. The title should be lowercase and\nunderscore separated.\nmessage (dict, list, str): the message of the error.\nThis can be a list, dictionary or simple string.\nstatus (int): the status code. Defaults to 400.\n\nReturns:\nResponse: the response with the error. The format of the error is the\nfollowing: code and message. The code could be `user_error` or\n`internal_error`. The message contains either a string, or a list\nor a dictionary. If not specify, the status will be a 400.", "source": "codesearchnet"}
{"code": "def __init__(self, use_variable):\n    super().__init__()\n    w_val = np.random.randn(128, 32).astype('f4')\n    if use_variable:\n        self.w = variables.Variable(w_val)\n    else:\n        self.w = w_val", "docstring": "Initializes a GatherModel.\n\nArgs:\nuse_variable: If True, creates a variable for weight.", "source": "github-repos"}
{"code": "def get_scene(self, label: str) -> Scene:\n        \n        return self._get_resource(label, self._scenes, \"scene\")", "docstring": "Gets a scene by label\n\nArgs:\nlabel (str): The label for the scene to fetch\n\nReturns:\nScene instance", "source": "juraj-google-style"}
{"code": "def default_compute_objective(metrics: dict[str, float]) -> float:\n    metrics = copy.deepcopy(metrics)\n    loss = metrics.pop('eval_loss', None)\n    _ = metrics.pop('epoch', None)\n    speed_metrics = [m for m in metrics.keys() if m.endswith('_runtime') or m.endswith('_per_second') or m.endswith('_compilation_time')]\n    for sm in speed_metrics:\n        _ = metrics.pop(sm, None)\n    return loss if len(metrics) == 0 else sum(metrics.values())", "docstring": "The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no\nmetrics are provided to the [`Trainer`], the sum of all metrics otherwise.\n\nArgs:\nmetrics (`Dict[str, float]`): The metrics returned by the evaluate method.\n\nReturn:\n`float`: The objective to minimize or maximize", "source": "github-repos"}
{"code": "def stop(self, wait=True):\n    for context in self._applications.values():\n        context.run_unload_hook()\n    self._stats_job.stop()\n    if (self._mem_job is not None):\n        self._mem_job.stop()\n    self._cleanup_job.stop()\n    if (self._ping_job is not None):\n        self._ping_job.stop()\n    self._clients.clear()", "docstring": "Stop the Bokeh Server application.\n\nArgs:\nwait (bool): whether to wait for orderly cleanup (default: True)\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def __init__(self, host: str, port: int, command: Optional[str]=None, batch_size: int=100):\n    self._host = host\n    self._port = port\n    self._command = command\n    self._batch_size = batch_size", "docstring": "Args:\nhost (str): The redis host\nport (int): The redis port\ncommand (str): command to be executed with redis client\nbatch_size(int): Number of key, values pairs to write at once\n\nReturns:\n:class:`~apache_beam.transforms.ptransform.PTransform`", "source": "github-repos"}
{"code": "def _update_service_current_state(service: ServiceState):\n    LOG.debug('Setting current state from target state for %s', service.id)\n    service.update_current_state(service.target_state)", "docstring": "Update the current state of a service.\n\nUpdates the current state of services after their target state has changed.\n\nArgs:\nservice (ServiceState): Service state object to update", "source": "codesearchnet"}
{"code": "def _collect_leaf_level_keys(cross):\n    leaf_level_keys = []\n    for k in cross.keys:\n        if isinstance(k, CrossedColumn):\n            leaf_level_keys.extend(_collect_leaf_level_keys(k))\n        else:\n            leaf_level_keys.append(k)\n    return leaf_level_keys", "docstring": "Collects base keys by expanding all nested crosses.\n\nArgs:\ncross: A `CrossedColumn`.\n\nReturns:\nA list of strings or `CategoricalColumn` instances.", "source": "github-repos"}
{"code": "def _get_or_make_slot(self, var, val, slot_name, op_name):\n    named_slots = self._slot_dict(slot_name)\n    if _var_key(var) not in named_slots:\n        new_slot_variable = slot_creator.create_slot(var, val, op_name, copy_xla_sharding=True)\n        self._restore_slot_variable(slot_name=slot_name, variable=var, slot_variable=new_slot_variable)\n        named_slots[_var_key(var)] = new_slot_variable\n    return named_slots[_var_key(var)]", "docstring": "Find or create a slot for a variable.\n\nArgs:\nvar: A `Variable` object.\nval: A `Tensor`.  The initial value of the slot.\nslot_name: Name for the slot.\nop_name: Name to use when scoping the Variable that\nneeds to be created for the slot.\n\nReturns:\nA `Variable` object.", "source": "github-repos"}
{"code": "def parse_responses(self):\n    response_dict = {'multicast_ids': [], 'success': 0, 'failure': 0, 'canonical_ids': 0, 'results': [], 'topic_message_id': None}\n    for response in self.send_request_responses:\n        if (response.status_code == 200):\n            if (('content-length' in response.headers) and (int(response.headers['content-length']) <= 0)):\n                raise FCMServerError('FCM server connection error, the response is empty')\n            else:\n                parsed_response = response.json()\n                multicast_id = parsed_response.get('multicast_id', None)\n                success = parsed_response.get('success', 0)\n                failure = parsed_response.get('failure', 0)\n                canonical_ids = parsed_response.get('canonical_ids', 0)\n                results = parsed_response.get('results', [])\n                message_id = parsed_response.get('message_id', None)\n                if message_id:\n                    success = 1\n                if multicast_id:\n                    response_dict['multicast_ids'].append(multicast_id)\n                response_dict['success'] += success\n                response_dict['failure'] += failure\n                response_dict['canonical_ids'] += canonical_ids\n                response_dict['results'].extend(results)\n                response_dict['topic_message_id'] = message_id\n        elif (response.status_code == 401):\n            raise AuthenticationError('There was an error authenticating the sender account')\n        elif (response.status_code == 400):\n            raise InvalidDataError(response.text)\n        else:\n            raise FCMServerError('FCM server is temporarily unavailable')\n    return response_dict", "docstring": "Parses the json response sent back by the server and tries to get out the important return variables\n\nReturns:\ndict: multicast_ids (list), success (int), failure (int), canonical_ids (int),\nresults (list) and optional topic_message_id (str but None by default)\n\nRaises:\nFCMServerError: FCM is temporary not available\nAuthenticationError: error authenticating the sender account\nInvalidDataError: data passed to FCM was incorrecly structured", "source": "codesearchnet"}
{"code": "def infer_location(self, location_query, max_distance, google_key, foursquare_client_id, foursquare_client_secret, limit):\n    self.location_from = infer_location(self.points[0], location_query, max_distance, google_key, foursquare_client_id, foursquare_client_secret, limit)\n    self.location_to = infer_location(self.points[(- 1)], location_query, max_distance, google_key, foursquare_client_id, foursquare_client_secret, limit)\n    return self", "docstring": "In-place location inferring\n\nSee infer_location function\n\nArgs:\nReturns:\n:obj:`Segment`: self", "source": "codesearchnet"}
{"code": "def ParseOptions(cls, options, configuration_object):\n    \n    if not isinstance(configuration_object, tools.CLITool):\n      raise errors.BadConfigObject(\n          'Configuration object is not an instance of CLITool')\n\n    use_zeromq = getattr(options, 'use_zeromq', True)\n\n    setattr(configuration_object, '_use_zeromq', use_zeromq)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.", "source": "juraj-google-style"}
{"code": "def is_possible_workdir(path):\n        \n        res = False\n        trails = ['initialized', 'uuid']\n        try:\n            res = all(\n                os.path.isfile(os.path.join(path, 'current', trail))\n                for trail in trails\n            )\n        except:\n            pass\n        return res", "docstring": "A quick method to suggest if the path is a possible workdir.\nThis does not guarantee that the workdir is not malformed, only that by\nsimple heuristics it might be one.\nFor a full check use :func:`is_workdir`.\n\nArgs:\npath(str): Path\n\nReturns:\nbool: True if ``path`` might be a work dir.", "source": "juraj-google-style"}
{"code": "def copy(x):\n    if any_symbolic_tensors((x,)):\n        return Copy().symbolic_call(x)\n    return backend.numpy.copy(x)", "docstring": "Returns a copy of `x`.\n\nArgs:\nx: Input tensor.\n\nReturns:\nA copy of `x`.", "source": "github-repos"}
{"code": "def get_userid_from_botid(self, botid):\n        \n        botinfo = self.slack_client.api_call('bots.info', bot=botid)\n        if botinfo['ok'] is True:\n            return botinfo['bot'].get('user_id')\n        else:\n            return botid", "docstring": "Perform a lookup of bots.info to resolve a botid to a userid\n\nArgs:\nbotid (string): Slack botid to lookup.\nReturns:\nstring: userid value", "source": "juraj-google-style"}
{"code": "def set_monitor_timeout(timeout, power='ac', scheme=None):\n    return _set_powercfg_value(scheme=scheme, sub_group='SUB_VIDEO', setting_guid='VIDEOIDLE', power=power, value=timeout)", "docstring": "Set the monitor timeout in minutes for the given power scheme\n\nArgs:\ntimeout (int):\nThe amount of time in minutes before the monitor will timeout\n\npower (str):\nSet the value for AC or DC power. Default is ``ac``. Valid options\nare:\n\n- ``ac`` (AC Power)\n- ``dc`` (Battery)\n\nscheme (str):\nThe scheme to use, leave as ``None`` to use the current. Default is\n``None``. This can be the GUID or the Alias for the Scheme. Known\nAliases are:\n\n- ``SCHEME_BALANCED`` - Balanced\n- ``SCHEME_MAX`` - Power saver\n- ``SCHEME_MIN`` - High performance\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\nCLI Example:\n\n.. code-block:: bash\n\n# Sets the monitor timeout to 30 minutes\nsalt '*' powercfg.set_monitor_timeout 30", "source": "codesearchnet"}
{"code": "def list(self):\n    resp = self.client.api.plugins()\n    return [self.prepare_model(r) for r in resp]", "docstring": "List plugins installed on the server.\n\nReturns:\n(list of :py:class:`Plugin`): The plugins.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def export_aliases(export_path=None, exclusions=None):\n    if (not export_path):\n        export_path = os.path.abspath(ALIAS_FILE_NAME)\n    alias_table = get_alias_table()\n    for exclusion in (exclusions or []):\n        if (exclusion not in alias_table.sections()):\n            raise CLIError(ALIAS_NOT_FOUND_ERROR.format(exclusion))\n        alias_table.remove_section(exclusion)\n    _commit_change(alias_table, export_path=export_path, post_commit=False)\n    logger.warning(POST_EXPORT_ALIAS_MSG, export_path)", "docstring": "Export all registered aliases to a given path, as an INI configuration file.\n\nArgs:\nexport_path: The path of the alias configuration file to export to.\nexclusions: Space-separated aliases excluded from export.", "source": "codesearchnet"}
{"code": "def _ParsePage(self, parser_mediator, file_offset, page_data):\n    page_header_map = self._GetDataTypeMap('binarycookies_page_header')\n    try:\n        page_header = self._ReadStructureFromByteStream(page_data, file_offset, page_header_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to map page header data at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))\n    for record_offset in page_header.offsets:\n        if parser_mediator.abort:\n            break\n        self._ParseRecord(parser_mediator, page_data, record_offset)", "docstring": "Parses a page.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nfile_offset (int): offset of the data relative from the start of\nthe file-like object.\npage_data (bytes): page data.\n\nRaises:\nParseError: when the page cannot be parsed.", "source": "codesearchnet"}
{"code": "def create_executable_script(filepath, body, program=None):\n    \n    program = program or \"python\"\n    if callable(body):\n        from rez.utils.sourcecode import SourceCode\n        code = SourceCode(func=body)\n        body = code.source\n\n    if not body.endswith('\\n'):\n        body += '\\n'\n\n    with open(filepath, 'w') as f:\n        \n        f.write(\"\n        f.write(body)\n\n    \n    \n    \n    \n    if os.name == \"posix\":\n    \tos.chmod(filepath, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH\n             | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)", "docstring": "Create an executable script.\n\nArgs:\nfilepath (str): File to create.\nbody (str or callable): Contents of the script. If a callable, its code\nis used as the script body.\nprogram (str): Name of program to launch the script, 'python' if None", "source": "juraj-google-style"}
{"code": "def _split_into_groups(n, max_group_size, mesh_dim_size):\n    if ((n % mesh_dim_size) != 0):\n        raise ValueError(('n=%d is not a multiple of mesh_dim_size=%d' % (n, mesh_dim_size)))\n    num_groups = max(1, (n \n    while (((num_groups % mesh_dim_size) != 0) or ((n % num_groups) != 0)):\n        num_groups += 1\n    group_size = (n \n    tf.logging.info(('_split_into_groups(n=%d, max_group_size=%d, mesh_dim_size=%d) = (num_groups=%d group_size=%d)' % (n, max_group_size, mesh_dim_size, num_groups, group_size)))\n    return (num_groups, group_size)", "docstring": "Helper function for figuring out how to split a dimensino into groups.\n\nWe have a dimension with size n and we want to split it into\ntwo dimensions: n = num_groups * group_size\n\ngroup_size should be the largest possible value meeting the constraints:\ngroup_size <= max_group_size\n(num_groups = n/group_size) is a multiple of mesh_dim_size\n\nArgs:\nn: an integer\nmax_group_size: an integer\nmesh_dim_size: an integer\n\nReturns:\nnum_groups: an integer\ngroup_size: an integer\n\nRaises:\nValueError: if n is not a multiple of mesh_dim_size", "source": "codesearchnet"}
{"code": "class DepthAnythingFeatureFusionLayer(nn.Module):\n\n    def __init__(self, config):\n        super().__init__()\n        self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True)\n        self.residual_layer1 = DepthAnythingPreActResidualLayer(config)\n        self.residual_layer2 = DepthAnythingPreActResidualLayer(config)\n\n    def forward(self, hidden_state, residual=None, size=None):\n        if residual is not None:\n            if hidden_state.shape != residual.shape:\n                residual = nn.functional.interpolate(residual, size=(hidden_state.shape[2], hidden_state.shape[3]), mode='bilinear', align_corners=False)\n            hidden_state = hidden_state + self.residual_layer1(residual)\n        hidden_state = self.residual_layer2(hidden_state)\n        modifier = {'scale_factor': 2} if size is None else {'size': size}\n        hidden_state = nn.functional.interpolate(hidden_state, **modifier, mode='bilinear', align_corners=True)\n        hidden_state = self.projection(hidden_state)\n        return hidden_state", "docstring": "Feature fusion layer, merges feature maps from different stages.\n\nArgs:\nconfig (`[DepthAnythingConfig]`):\nModel configuration class defining the model architecture.", "source": "github-repos"}
{"code": "class LogElements(PTransform):\n\n    class _LoggingFn(DoFn):\n\n        def __init__(self, prefix='', with_timestamp=False, with_window=False, level=None):\n            super().__init__()\n            self.prefix = prefix\n            self.with_timestamp = with_timestamp\n            self.with_window = with_window\n            self.level = level\n\n        def process(self, element, timestamp=DoFn.TimestampParam, window=DoFn.WindowParam, **kwargs):\n            log_line = self.prefix + str(element)\n            if self.with_timestamp:\n                log_line += ', timestamp=' + repr(timestamp.to_rfc3339())\n            if self.with_window:\n                log_line += ', window(start=' + window.start.to_rfc3339()\n                log_line += ', end=' + window.end.to_rfc3339() + ')'\n            if self.level == logging.DEBUG:\n                logging.debug(log_line)\n            elif self.level == logging.INFO:\n                logging.info(log_line)\n            elif self.level == logging.WARNING:\n                logging.warning(log_line)\n            elif self.level == logging.ERROR:\n                logging.error(log_line)\n            elif self.level == logging.CRITICAL:\n                logging.critical(log_line)\n            else:\n                print(log_line)\n            yield element\n\n    def __init__(self, label=None, prefix='', with_timestamp=False, with_window=False, level=None):\n        super().__init__(label)\n        self.prefix = prefix\n        self.with_timestamp = with_timestamp\n        self.with_window = with_window\n        self.level = level\n\n    def expand(self, input):\n        return input | ParDo(self._LoggingFn(self.prefix, self.with_timestamp, self.with_window, self.level))", "docstring": "PTransform for printing the elements of a PCollection.\n\nArgs:\nlabel (str): (optional) A custom label for the transform.\nprefix (str): (optional) A prefix string to prepend to each logged element.\nwith_timestamp (bool): (optional) Whether to include element's timestamp.\nwith_window (bool): (optional) Whether to include element's window.\nlevel: (optional) The logging level for the output (e.g. `logging.DEBUG`,\n`logging.INFO`, `logging.WARNING`, `logging.ERROR`). If not specified,\nthe log is printed to stdout.", "source": "github-repos"}
{"code": "def create_timer(cb: Callable[([float], None)], interval: float, delay_policy: TimerDelayPolicy=TimerDelayPolicy.DEFAULT, loop: Optional[asyncio.BaseEventLoop]=None) -> asyncio.Task:\n    if (not loop):\n        loop = asyncio.get_event_loop()\n\n    async def _timer():\n        fired_tasks = []\n        try:\n            while True:\n                if (delay_policy == TimerDelayPolicy.CANCEL):\n                    for t in fired_tasks:\n                        if (not t.done()):\n                            t.cancel()\n                            (await t)\n                    fired_tasks.clear()\n                else:\n                    fired_tasks[:] = [t for t in fired_tasks if (not t.done())]\n                t = loop.create_task(cb(interval=interval))\n                fired_tasks.append(t)\n                (await asyncio.sleep(interval))\n        except asyncio.CancelledError:\n            for t in fired_tasks:\n                t.cancel()\n            (await asyncio.gather(*fired_tasks))\n    return loop.create_task(_timer())", "docstring": "Schedule a timer with the given callable and the interval in seconds.\nThe interval value is also passed to the callable.\nIf the callable takes longer than the timer interval, all accumulated\ncallable's tasks will be cancelled when the timer is cancelled.\n\nArgs:\ncb: TODO - fill argument descriptions\n\nReturns:\nYou can stop the timer by cancelling the returned task.", "source": "codesearchnet"}
{"code": "def build_from_items(self, items: list[_ItemType] | None) -> imports_map.ImportsMap | None:\n    if not items:\n        return None\n    imports_multimap = self._build_multimap(items)\n    assert imports_multimap is not None\n    return self._finalize(imports_multimap)", "docstring": "Create a file mapping from a list of (short path, path) tuples.\n\nBuilds a dict of short_path to full name\n(e.g. \"path/to/file.py\" =>\n\"$GENDIR/rulename~~pytype-gen/path_to_file.py~~pytype\"\nArgs:\nitems: A list of (short_path, full_path) tuples.\n\nReturns:\nDict of .py short_path to list of .pytd path or None if no items", "source": "github-repos"}
{"code": "def mg_refractive(m, mix):\n    if (len(m) == 2):\n        cF = (((float(mix[1]) / (mix[0] + mix[1])) * ((m[1] ** 2) - (m[0] ** 2))) / ((m[1] ** 2) + (2 * (m[0] ** 2))))\n        er = (((m[0] ** 2) * (1.0 + (2.0 * cF))) / (1.0 - cF))\n        m = np.sqrt(er)\n    else:\n        m_last = mg_refractive(m[(- 2):], mix[(- 2):])\n        mix_last = (mix[(- 2)] + mix[(- 1)])\n        m = mg_refractive((m[:(- 2)] + (m_last,)), (mix[:(- 2)] + (mix_last,)))\n    return m", "docstring": "Maxwell-Garnett EMA for the refractive index.\n\nArgs:\nm: Tuple of the complex refractive indices of the media.\nmix: Tuple of the volume fractions of the media, len(mix)==len(m)\n(if sum(mix)!=1, these are taken relative to sum(mix))\n\nReturns:\nThe Maxwell-Garnett approximation for the complex refractive index of\nthe effective medium\n\nIf len(m)==2, the first element is taken as the matrix and the second as\nthe inclusion. If len(m)>2, the media are mixed recursively so that the\nlast element is used as the inclusion and the second to last as the\nmatrix, then this mixture is used as the last element on the next\niteration, and so on.", "source": "codesearchnet"}
{"code": "def pretty_str(something, indent=0):\n    \n    if isinstance(something, CodeEntity):\n        return something.pretty_str(indent=indent)\n    else:\n        return (' ' * indent) + repr(something)", "docstring": "Return a human-readable string representation of an object.\n\nUses `pretty_str` if the given value is an instance of\n`CodeEntity` and `repr` otherwise.\n\nArgs:\nsomething: Some value to convert.\n\nKwargs:\nindent (int): The amount of spaces to use as indentation.", "source": "juraj-google-style"}
{"code": "def plot_series(filename, plot_kwargs=None):\n    import matplotlib.pyplot as plt\n    if (plot_kwargs is None):\n        plot_kwargs = {}\n    data = np.genfromtxt(filename, dtype='i8,f4', names=['k', 'v'])\n    index = data['k']\n    values = data['v']\n    plt.plot(index, values, **plot_kwargs)", "docstring": "Plot series data from MonitorSeries output text file.\n\nArgs:\nfilename (str): Path to *.series.txt file produced by :obj:`~nnabla.MonitorSeries` class.\nplot_kwags (dict, optional):\nKeyward arguments passed to :function:`matplotlib.pyplot.plot`.\n\nNote:\nmatplotlib package is required.", "source": "codesearchnet"}
{"code": "def empty_like(array, dtype=None, keepmeta=True):\n    if keepmeta:\n        return dc.empty(array.shape, dtype, tcoords=array.dca.tcoords, chcoords=array.dca.chcoords, scalarcoords=array.dca.scalarcoords, attrs=array.attrs, name=array.name)\n    else:\n        return dc.empty(array.shape, dtype)", "docstring": "Create an array of empty with the same shape and type as the input array.\n\nArgs:\narray (xarray.DataArray): The shape and data-type of it define\nthese same attributes of the output array.\ndtype (data-type, optional): If spacified, this function overrides\nthe data-type of the output array.\nkeepmeta (bool, optional): Whether *coords, attrs, and name of the input\narray are kept in the output one. Default is True.\n\nReturns:\narray (decode.array): Decode array without initializing entries.", "source": "codesearchnet"}
{"code": "def sendmail(subject, text, mailto, sender=None):\n\n    def user_at_host():\n        from socket import gethostname\n        return ((os.getlogin() + '@') + gethostname())\n    try:\n        sender = (user_at_host() if (sender is None) else sender)\n    except OSError:\n        sender = 'abipyscheduler@youknowwhere'\n    if is_string(mailto):\n        mailto = [mailto]\n    from email.mime.text import MIMEText\n    mail = MIMEText(text)\n    mail['Subject'] = subject\n    mail['From'] = sender\n    mail['To'] = ', '.join(mailto)\n    msg = mail.as_string()\n    from subprocess import Popen, PIPE\n    import sys\n    sendmail = which('sendmail')\n    if (sendmail is None):\n        return (- 1)\n    if (sys.version_info[0] < 3):\n        p = Popen([sendmail, '-t'], stdin=PIPE, stderr=PIPE)\n    else:\n        p = Popen([sendmail, '-t'], stdin=PIPE, stderr=PIPE, universal_newlines=True)\n    (outdata, errdata) = p.communicate(msg)\n    return len(errdata)", "docstring": "Sends an e-mail with unix sendmail.\n\nArgs:\nsubject: String with the subject of the mail.\ntext: String with the body of the mail.\nmailto: String or list of string with the recipients.\nsender: string with the sender address.\nIf sender is None, username@hostname is used.\n\nReturns:\nExit status", "source": "codesearchnet"}
{"code": "def _init_request_logging(self, app):\n    enabled = (not app.config.get(CONF_DISABLE_REQUEST_LOGGING, False))\n    if (not enabled):\n        return\n    self._requests_middleware = WSGIApplication(self._key, app.wsgi_app, telemetry_channel=self._channel)\n    app.wsgi_app = self._requests_middleware", "docstring": "Sets up request logging unless ``APPINSIGHTS_DISABLE_REQUEST_LOGGING``\nis set in the Flask config.\n\nArgs:\napp (flask.Flask). the Flask application for which to initialize the extension.", "source": "codesearchnet"}
{"code": "def readme_verify():\n    expected = populate_readme(REVISION, RTD_VERSION)\n    with open(README_FILE, 'r') as file_obj:\n        contents = file_obj.read()\n    if (contents != expected):\n        err_msg = ('\\n' + get_diff(contents, expected, 'README.rst.actual', 'README.rst.expected'))\n        raise ValueError(err_msg)\n    else:\n        print('README contents are as expected.')", "docstring": "Populate the template and compare to ``README``.\n\nRaises:\nValueError: If the current README doesn't agree with the expected\nvalue computed from the template.", "source": "codesearchnet"}
{"code": "def update_clinvar_id(self, clinvar_id, submission_id ):\n        \n        updated_submission = self.clinvar_submission_collection.find_one_and_update( {'_id': ObjectId(submission_id)}, { '$set' : {'clinvar_subm_id' : clinvar_id, 'updated_at': datetime.now()} }, upsert=True, return_document=pymongo.ReturnDocument.AFTER )\n        return updated_submission", "docstring": "saves an official clinvar submission ID in a clinvar submission object\n\nArgs:\nclinvar_id(str): a string with a format: SUB[0-9]. It is obtained from clinvar portal when starting a new submission\nsubmission_id(str): submission_id(str) : id of the submission to be updated\n\nReturns:\nupdated_submission(obj): a clinvar submission object, updated", "source": "juraj-google-style"}
{"code": "def shutdown(self, wait=True):\n        \n        self.scheduler_thread.stop()\n        self.worker_message_handler_thread.stop()\n\n        if wait:\n            self.scheduler_thread.join()\n            self.worker_message_handler_thread.join()", "docstring": "Shut down the worker message handler and scheduler threads.\nArgs:\nwait: If true, block until both threads have successfully shut down. If False, return immediately.\n\nReturns: None", "source": "juraj-google-style"}
{"code": "def sg_min(tensor, opt):\n    return tf.reduce_min(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)", "docstring": "r\"\"\"Computes the minimum of elements across axis of a tensor.\n\nSee `tf.reduce_min()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` (automatically given by chain).\nopt:\naxis : A tuple/list of integers or an integer. The axis to reduce.\nkeep_dims: If true, retains reduced dimensions with length 1.\nname: If provided, replace current tensor's name.\n\nReturns:\nA `Tensor`.", "source": "codesearchnet"}
{"code": "def get_tabular_rows(self, url, dict_rows=False, **kwargs):\n    return self.get_tabular_stream(url, **kwargs).iter(keyed=dict_rows)", "docstring": "Get iterator for reading rows from tabular data. Each row is returned as a dictionary.\n\nArgs:\nurl (str): URL to download\ndict_rows (bool): Return dict (requires headers parameter) or list for each row. Defaults to False (list).\n**kwargs:\nheaders (Union[int, List[int], List[str]]): Number of row(s) containing headers or list of headers\nfile_type (Optional[str]): Type of file. Defaults to inferring.\ndelimiter (Optional[str]): Delimiter used for values in each row. Defaults to inferring.\n\nReturns:\nIterator[Union[List,Dict]]: Iterator where each row is returned as a list or dictionary.", "source": "codesearchnet"}
{"code": "def get(account_id, account_type_id=None):\n    if (type(account_id) == str):\n        args = {'account_name': account_id}\n    else:\n        args = {'account_id': account_id}\n    if account_type_id:\n        args['account_type_id'] = account_type_id\n    return db.Account.find_one(**args)", "docstring": "Return account by ID and type\n\nArgs:\naccount_id (`int`, `str`): Unique Account identifier\naccount_type_id (str): Type of account to get\n\nReturns:\n:obj:`Account`: Returns an Account object if found, else None", "source": "codesearchnet"}
{"code": "def decrypt(self, cipherText):\n    decryptedResult = ''\n    for index in range(0, len(cipherText), BLOCK_SIZE):\n        block = cipherText[index:(index + BLOCK_SIZE)]\n        if (len(block) < BLOCK_SIZE):\n            block = zero_pad(block, BLOCK_SIZE)\n        decryptedResult += self.decrypt_block(block)\n    return decryptedResult", "docstring": "Decrypt an arbitrary-length block of data.\n\nNOTE: This function formerly worked only on 16-byte blocks of `cipherText`.\ncode that assumed this should still work fine, but can optionally be\nmodified to call `decrypt_block` instead.\n\nArgs:\ncipherText (str): data to decrypt. If the data is not a multiple of 16\nbytes long, it will be padded with null (0x00) bytes until it is.\nWARNING: This is almost certainty never need to happen for\ncorrectly-encrypted data.\n\nReturns:\ndecrypted data. Note that this will always be a multiple of 16 bytes\nlong. If the original data was not a multiple of 16 bytes, the\nresult will contain trailing null bytes, which can be removed with\n`.rstrip('\\x00')`", "source": "codesearchnet"}
{"code": "def tensor_float_32_execution_enabled():\n    return _pywrap_tensor_float_32_execution.is_enabled()", "docstring": "Returns whether TensorFloat-32 is enabled.\n\nBy default, TensorFloat-32 is enabled, but this can be changed with\n`tf.config.experimental.enable_tensor_float_32_execution`.\n\nReturns:\nTrue if TensorFloat-32 is enabled (the default) and False otherwise", "source": "github-repos"}
{"code": "def resolve(self, sourcepath, paths, library_paths=None):\n    (basedir, filename) = os.path.split(sourcepath)\n    basepaths = [basedir]\n    resolved_paths = []\n    if (library_paths and isinstance(library_paths, string_types) and (library_paths not in basepaths)):\n        basepaths.append(library_paths)\n    elif library_paths:\n        for k in list(library_paths):\n            if (k not in basepaths):\n                basepaths.append(k)\n    for import_rule in paths:\n        candidates = self.candidate_paths(import_rule)\n        stack = []\n        for (i, basepath) in enumerate(basepaths):\n            checked = self.check_candidate_exists(basepath, candidates)\n            if checked:\n                stack.extend(checked)\n        if (len(stack) > 1):\n            raise UnclearResolution(\"rule '{}' This is not clear for these paths: {}\".format(import_rule, ', '.join(stack)))\n        elif (len(stack) == 1):\n            resolved_paths.append(os.path.normpath(stack[0]))\n        elif self.STRICT_PATH_VALIDATION:\n            raise UnresolvablePath(\"Imported path '{}' does not exist in '{}'\".format(import_rule, basedir))\n    return resolved_paths", "docstring": "Resolve given paths from given base paths\n\nReturn resolved path list.\n\nNote:\nResolving strategy is made like libsass do, meaning paths in\nimport rules are resolved from the source file where the import\nrules have been finded.\n\nIf import rule is not explicit enough and two file are candidates\nfor the same rule, it will raises an error. But contrary to\nlibsass, this happen also for files from given libraries in\n``library_paths`` (oposed to libsass just silently taking the\nfirst candidate).\n\nArgs:\nsourcepath (str): Source file path, its directory is used to\nresolve given paths. The path must be an absolute path to\navoid errors on resolving.\npaths (list): Relative paths (from ``sourcepath``) to resolve.\nlibrary_paths (list): List of directory paths for libraries to\nresolve paths if resolving fails on the base source path.\nDefault to None.\n\nRaises:\nUnresolvablePath: If a path does not exist and\n``STRICT_PATH_VALIDATION`` attribute is ``True``.\n\nReturns:\nlist: List of resolved path.", "source": "codesearchnet"}
{"code": "def capability_installed(name, source=None, limit_access=False, image=None, restart=False):\n    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}\n    old = __salt__['dism.installed_capabilities']()\n    if (name in old):\n        ret['comment'] = 'The capability {0} is already installed'.format(name)\n        return ret\n    if __opts__['test']:\n        ret['changes']['capability'] = '{0} will be installed'.format(name)\n        ret['result'] = None\n        return ret\n    status = __salt__['dism.add_capability'](name, source, limit_access, image, restart)\n    if (status['retcode'] not in [0, 1641, 3010]):\n        ret['comment'] = 'Failed to install {0}: {1}'.format(name, status['stdout'])\n        ret['result'] = False\n    new = __salt__['dism.installed_capabilities']()\n    changes = salt.utils.data.compare_lists(old, new)\n    if changes:\n        ret['comment'] = 'Installed {0}'.format(name)\n        ret['changes'] = status\n        ret['changes']['capability'] = changes\n    return ret", "docstring": "Install a DISM capability\n\nArgs:\nname (str): The capability to install\nsource (str): The optional source of the capability\nlimit_access (bool): Prevent DISM from contacting Windows Update for\nonline images\nimage (Optional[str]): The path to the root directory of an offline\nWindows image. If `None` is passed, the running operating system is\ntargeted. Default is None.\nrestart (Optional[bool]): Reboot the machine if required by the install\n\nExample:\nRun ``dism.available_capabilities`` to get a list of available\ncapabilities. This will help you get the proper name to use.\n\n.. code-block:: yaml\n\ninstall_dotnet35:\ndism.capability_installed:\n- name: NetFX3~~~~", "source": "codesearchnet"}
{"code": "def observe(self, value):\n        \n        self._buffer.append(value)\n        if len(self._buffer) == _BUFFER_SIZE:\n            self._flush()", "docstring": "Samples an observation's value.\n\nArgs:\nvalue: A numeric value signifying the value to be sampled.", "source": "juraj-google-style"}
{"code": "def write(self, originalPrefix, newPrefix=None):\n        \n        \n        numSpaces = max(2, 25 - len(self.name))\n\n        \n        if self.value is None:\n            line = '%s\\n' % self.name\n        else:\n            if self.name == 'WMS':\n                line = '%s %s\\n' % (self.name, self.value)\n            elif newPrefix is None:\n                line = '%s%s%s\\n' % (self.name, ' ' * numSpaces, self.value)\n            elif originalPrefix in self.value:\n                line = '%s%s%s\\n' % (self.name, ' ' * numSpaces, self.value.replace(originalPrefix, newPrefix))\n            else:\n                line = '%s%s%s\\n' % (self.name, ' ' * numSpaces, self.value)\n        return line", "docstring": "Write project card to string.\n\nArgs:\noriginalPrefix (str): Original name to give to files that follow the project naming convention\n(e.g: prefix.gag).\nnewPrefix (str, optional): If new prefix is desired, pass in this parameter. Defaults to None.\n\nReturns:\nstr: Card and value as they would be written to the project file.", "source": "juraj-google-style"}
{"code": "def get_num_image_channels(module_or_spec, signature=None, input_name=None):\n    if (input_name is None):\n        input_name = 'images'\n    input_info_dict = module_or_spec.get_input_info_dict(signature)\n    try:\n        shape = input_info_dict[input_name].get_shape()\n    except KeyError:\n        raise ValueError((\"Module is missing input '%s' in signature '%s'.\" % (input_name, (signature or 'default'))))\n    try:\n        (_, _, _, num_channels) = shape.as_list()\n        if (num_channels is None):\n            raise ValueError\n    except ValueError:\n        raise ValueError(('Shape of module input is %s, expected [batch_size, height, width, num_channels] with known num_channels' % shape))\n    return num_channels", "docstring": "Returns expected num_channels dimensions of an image input.\n\nThis is for advanced users only who expect to handle modules with\nimage inputs that might not have the 3 usual RGB channels.\n\nArgs:\nmodule_or_spec: a Module or ModuleSpec that accepts image inputs.\nsignature: a string with the key of the signature in question.\nIf None, the default signature is used.\ninput_name: a string with the input name for images. If None, the\nconventional input name `images` for the default signature is used.\n\nReturns:\nAn integer with the number of input channels to the module.\n\nRaises:\nValueError: If the channel information is missing or malformed.", "source": "codesearchnet"}
{"code": "def slice_batch_indices(indices):\n    num_in_full_batch = num_full_batches * batch_size\n    first_k_indices = tf.slice(indices, [0], [num_in_full_batch])\n    first_k_indices = tf.reshape(first_k_indices, [num_full_batches, batch_size])\n    flat_dataset = tf.data.Dataset.from_tensor_slices(first_k_indices)\n    if self._partial_batch_size:\n        index_remainder = tf.data.Dataset.from_tensors(tf.slice(indices, [num_in_full_batch], [self._partial_batch_size]))\n        flat_dataset = flat_dataset.concatenate(index_remainder)\n    return flat_dataset", "docstring": "Convert a Tensor of indices into a dataset of batched indices.\n\nThis step can be accomplished in several ways. The most natural is\nto slice the Tensor in a Dataset map. (With a condition on the upper\nindex to handle the partial batch.) However it turns out that\ncoercing the Tensor into a shape which is divisible by the batch\nsize (and handling the last partial batch separately) allows for a\nmuch more favorable memory access pattern and improved performance.\n\nArgs:\nindices: Tensor which determines the data order for an entire\nepoch.\n\nReturns:\nA Dataset of batched indices.", "source": "github-repos"}
{"code": "def save_aggregate_reports_to_splunk(self, aggregate_reports):\n        \n        logger.debug(\"Saving aggregate reports to Splunk\")\n        if type(aggregate_reports) == dict:\n            aggregate_reports = [aggregate_reports]\n\n        if len(aggregate_reports) < 1:\n            return\n\n        data = self._common_data.copy()\n        json_str = \"\"\n        for report in aggregate_reports:\n            for record in report[\"records\"]:\n                new_report = dict()\n                for metadata in report[\"report_metadata\"]:\n                    new_report[metadata] = report[\"report_metadata\"][metadata]\n                new_report[\"published_policy\"] = report[\"policy_published\"]\n                new_report[\"source_ip_address\"] = record[\"source\"][\n                    \"ip_address\"]\n                new_report[\"source_country\"] = record[\"source\"][\"country\"]\n                new_report[\"source_reverse_dns\"] = record[\"source\"][\n                    \"reverse_dns\"]\n                new_report[\"source_base_domain\"] = record[\"source\"][\n                    \"base_domain\"]\n                new_report[\"message_count\"] = record[\"count\"]\n                new_report[\"disposition\"] = record[\"policy_evaluated\"][\n                    \"disposition\"\n                ]\n                new_report[\"spf_aligned\"] = record[\"alignment\"][\"spf\"]\n                new_report[\"dkim_aligned\"] = record[\"alignment\"][\"dkim\"]\n                new_report[\"passed_dmarc\"] = record[\"alignment\"][\"dmarc\"]\n                new_report[\"header_from\"] = record[\"identifiers\"][\n                    \"header_from\"]\n                new_report[\"envelope_from\"] = record[\"identifiers\"][\n                    \"envelope_from\"]\n                if \"dkim\" in record[\"auth_results\"]:\n                    new_report[\"dkim_results\"] = record[\"auth_results\"][\n                        \"dkim\"]\n                if \"spf\" in record[\"auth_results\"]:\n                    new_report[\"spf_results\"] = record[\"auth_results\"][\n                        \"spf\"]\n\n                data[\"sourcetype\"] = \"dmarc:aggregate\"\n                timestamp = human_timestamp_to_timestamp(\n                    new_report[\"begin_date\"])\n                data[\"time\"] = timestamp\n                data[\"event\"] = new_report.copy()\n                json_str += \"{0}\\n\".format(json.dumps(data))\n\n        if not self.session.verify:\n            logger.debug(\"Skipping certificate verification for Splunk HEC\")\n        try:\n            response = self.session.post(self.url, data=json_str,\n                                         timeout=self.timeout)\n            response = response.json()\n        except Exception as e:\n            raise SplunkError(e.__str__())\n        if response[\"code\"] != 0:\n            raise SplunkError(response[\"text\"])", "docstring": "Saves aggregate DMARC reports to Splunk\n\nArgs:\naggregate_reports: A list of aggregate report dictionaries\nto save in Splunk", "source": "juraj-google-style"}
{"code": "def IoU(cm, ignore_index=None):\n    if (not isinstance(cm, ConfusionMatrix)):\n        raise TypeError('Argument cm should be instance of ConfusionMatrix, but given {}'.format(type(cm)))\n    if (ignore_index is not None):\n        if (not (isinstance(ignore_index, numbers.Integral) and (0 <= ignore_index < cm.num_classes))):\n            raise ValueError('ignore_index should be non-negative integer, but given {}'.format(ignore_index))\n    cm = cm.type(torch.float64)\n    iou = (cm.diag() / (((cm.sum(dim=1) + cm.sum(dim=0)) - cm.diag()) + 1e-15))\n    if (ignore_index is not None):\n\n        def ignore_index_fn(iou_vector):\n            if (ignore_index >= len(iou_vector)):\n                raise ValueError('ignore_index {} is larger than the length of IoU vector {}'.format(ignore_index, len(iou_vector)))\n            indices = list(range(len(iou_vector)))\n            indices.remove(ignore_index)\n            return iou_vector[indices]\n        return MetricsLambda(ignore_index_fn, iou)\n    else:\n        return iou", "docstring": "Calculates Intersection over Union\n\nArgs:\ncm (ConfusionMatrix): instance of confusion matrix metric\nignore_index (int, optional): index to ignore, e.g. background index\n\nReturns:\nMetricsLambda\n\nExamples:\n\n.. code-block:: python\n\ntrain_evaluator = ...\n\ncm = ConfusionMatrix(num_classes=num_classes)\nIoU(cm, ignore_index=0).attach(train_evaluator, 'IoU')\n\nstate = train_evaluator.run(train_dataset)\n# state.metrics['IoU'] -> tensor of shape (num_classes - 1, )", "source": "codesearchnet"}
{"code": "def _EuclidianDistances(self,slist):\n    \n    e_dists2 = [transitfeed.ApproximateDistanceBetweenStops(stop, tail) for\n                (stop,tail) in itertools.izip(slist, slist[1:])]\n\n    return e_dists2", "docstring": "Calculate euclidian distances between stops.\n\nUses the stoplists long/lats to approximate distances\nbetween stations and build a list with y-coordinates for the\nhorizontal lines in the graph.\n\nArgs:\n# Class Stop is defined in transitfeed.py\nstoplist: [Stop, Stop, ...]\n\nReturns:\n# One integer for each pair of stations\n# indicating the approximate distance\n[0,33,140, ... ,X]", "source": "juraj-google-style"}
{"code": "def from_json(cls, fh):\n        \n        if isinstance(fh, str):\n            return cls(json.loads(fh))\n        else:\n            return cls(json.load(fh))", "docstring": "Load json from file handle.\n\nArgs:\nfh (file): File handle to load from.\n\nExamlple:\n>>> with open('data.json', 'r') as json:\n>>>    data = composite.load(json)", "source": "juraj-google-style"}
{"code": "def convert(cls, content, input_format, output_format):\n        \n        assert input_format in ('srt', 'sjson')\n        assert output_format in ('srt', 'sjson')\n\n        \n        \n        content = content.decode('utf-8-sig')\n\n        if input_format == output_format:\n            return content\n\n        if input_format == 'srt':\n\n            if output_format == 'sjson':\n                try:\n                    \n                    \n                    srt_subs = SubRipFile.from_string(content, error_handling=SubRipFile.ERROR_RAISE)\n                except Error as ex:  \n                    raise TranscriptsGenerationException(text_type(ex))\n\n                return json.dumps(cls.generate_sjson_from_srt(srt_subs))\n\n        if input_format == 'sjson':\n\n            if output_format == 'srt':\n                return cls.generate_srt_from_sjson(json.loads(content))", "docstring": "Convert transcript `content` from `input_format` to `output_format`.\n\nArguments:\ncontent: Transcript content byte-stream.\ninput_format: Input transcript format.\noutput_format: Output transcript format.\n\nAccepted input formats: sjson, srt.\nAccepted output format: srt, sjson.\n\nRaises:\nTranscriptsGenerationException: On parsing the invalid srt\ncontent during conversion from srt to sjson.", "source": "juraj-google-style"}
{"code": "def write(self, fb):\n    print('[{}.{}]'.format(fb.module, fb.func.__name__), file=self.file)\n    print('class = {}'.format(fb.func_ins.name), file=self.file)\n    print('inspecs = {}'.format(repr(fb.inspecs)), file=self.file)\n    print('func_args = {}'.format(repr(fb.func_args)), file=self.file)\n    print('func_kwargs = {}'.format(repr(fb.func_kwargs)), file=self.file)\n    print('ext = ({}, {})'.format(repr(fb.ext), repr(fb.ext_kwargs)), file=self.file)\n    if (self.setup_stat is not None):\n        self._write_a_stat('setup', self.setup_stat)\n    if (self.foward_stat is not None):\n        self._write_a_stat('forward', self.forward_stat)\n    if (self.backward_stat is not None):\n        self._write_a_stat('backward', self.backward_stat)", "docstring": "Write a single function benchmark.\n\nArgs:\nfb (FunctionBenchmark): FunctionBenchmark class instance.\nBefore passing to this, you should call ``fb.benchmark()``.", "source": "codesearchnet"}
{"code": "def SetSerializersProfiler(self, serializers_profiler):\n    \n    self._serializers_profiler = serializers_profiler\n    if self._storage_file:\n      self._storage_file.SetSerializersProfiler(serializers_profiler)", "docstring": "Sets the serializers profiler.\n\nArgs:\nserializers_profiler (SerializersProfiler): serializers profiler.", "source": "juraj-google-style"}
{"code": "def flatten(dictionary, separator='.', prefix=''):\n    new_dict = {}\n    for (key, value) in dictionary.items():\n        new_key = (((prefix + separator) + key) if prefix else key)\n        if isinstance(value, collections.MutableMapping):\n            new_dict.update(flatten(value, separator, new_key))\n        elif isinstance(value, list):\n            new_value = []\n            for item in value:\n                if isinstance(item, collections.MutableMapping):\n                    new_value.append(flatten(item, separator, new_key))\n                else:\n                    new_value.append(item)\n            new_dict[new_key] = new_value\n        else:\n            new_dict[new_key] = value\n    return new_dict", "docstring": "Flatten the dictionary keys are separated by separator\n\nArguments:\ndictionary {dict} -- The dictionary to be flattened.\n\nKeyword Arguments:\nseparator {str} -- The separator to use (default is '.'). It will\ncrush items with key conflicts.\nprefix {str} -- Used for recursive calls.\n\nReturns:\ndict -- The flattened dictionary.", "source": "codesearchnet"}
{"code": "def addRow(self, *value):\n        \n        if len(value) == 1 and isinstance(value[0], (tuple, list)):\n            value = value[0]\n        assert len(value) == self.getNumCols()\n        self._impl.addRow(Tuple(value)._impl)", "docstring": "Add a row to the DataFrame. The size of the tuple must be equal to the\ntotal number of columns in the dataframe.\n\nArgs:\nvalue: A single argument with a tuple containing all the values\nfor the row to be added, or multiple arguments with the values for\neach column.", "source": "juraj-google-style"}
{"code": "def is_param_method(obj, has_deps=False):\n    \n    parameterized = (inspect.ismethod(obj) and\n                     isinstance(get_method_owner(obj), param.Parameterized))\n    if parameterized and has_deps:\n        return getattr(obj, \"_dinfo\", {}).get('dependencies')\n    return parameterized", "docstring": "Whether the object is a method on a parameterized object.\n\nArgs:\nobj: Object to check\nhas_deps (boolean, optional): Check for dependencies\nWhether to also check whether the method has been annotated\nwith param.depends\n\nReturns:\nA boolean value indicating whether the object is a method\non a Parameterized object and if enabled whether it has any\ndependencies", "source": "juraj-google-style"}
{"code": "def insert(self, key, value, name=None):\n    with tf.name_scope(name or '%s_lookup_table_insert' % self._name):\n        key = tf.convert_to_tensor(key, self._key_dtype, name='key')\n        value = tf.convert_to_tensor(value, self._value_dtype, name='value')\n        op = gen_simple_hash_table_op.examples_simple_hash_table_insert(self.resource_handle, key, value)\n        return op", "docstring": "Associates `key` with `value`.\n\nArgs:\nkey: Scalar key to insert.\nvalue: Scalar value to be associated with key.\nname: A name for the operation (optional).\n\nReturns:\nThe created Operation.\n\nRaises:\nTypeError: when `key` or `value` doesn't match the table data\ntypes.", "source": "github-repos"}
{"code": "def is_hermitian(matrix: np.ndarray, *, rtol: float=1e-05, atol: float=1e-08) -> bool:\n    return ((matrix.shape[0] == matrix.shape[1]) and np.allclose(matrix, np.conj(matrix.T), rtol=rtol, atol=atol))", "docstring": "Determines if a matrix is approximately Hermitian.\n\nA matrix is Hermitian if it's square and equal to its adjoint.\n\nArgs:\nmatrix: The matrix to check.\nrtol: The per-matrix-entry relative tolerance on equality.\natol: The per-matrix-entry absolute tolerance on equality.\n\nReturns:\nWhether the matrix is Hermitian within the given tolerance.", "source": "codesearchnet"}
{"code": "def Get(self, request, global_params=None):\n    config = self.GetMethodConfig('Get')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Returns details of a `WorkerPool`.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsWorkerPoolsGetRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(WorkerPool) The response message.", "source": "github-repos"}
{"code": "def rh45(msg):\n    \n    d = hex2bin(data(msg))\n    if d[38] == '0':\n        return None\n    rh = bin2int(d[39:51]) * 16\n    return rh", "docstring": "Radio height.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nint: radio height in ft", "source": "juraj-google-style"}
{"code": "def local_predict(training_dir, data):\n    from .prediction import predict as predict_module\n    tmp_dir = tempfile.mkdtemp()\n    (_, input_file_path) = tempfile.mkstemp(dir=tmp_dir, suffix='.csv', prefix='input')\n    try:\n        if isinstance(data, pd.DataFrame):\n            data.to_csv(input_file_path, header=False, index=False)\n        else:\n            with open(input_file_path, 'w') as f:\n                for line in data:\n                    f.write((line + '\\n'))\n        model_dir = os.path.join(training_dir, 'model')\n        if (not file_io.file_exists(model_dir)):\n            raise ValueError('training_dir should contain the folder model')\n        cmd = ['predict.py', ('--predict-data=%s' % input_file_path), ('--trained-model-dir=%s' % model_dir), ('--output-dir=%s' % tmp_dir), '--output-format=csv', '--batch-size=16', '--mode=prediction', '--no-shard-files']\n        runner_results = predict_module.main(cmd)\n        runner_results.wait_until_finish()\n        schema_file = os.path.join(tmp_dir, 'csv_schema.json')\n        with open(schema_file, 'r') as f:\n            schema = json.loads(f.read())\n        errors_file = glob.glob(os.path.join(tmp_dir, 'errors*'))\n        if (errors_file and (os.path.getsize(errors_file[0]) > 0)):\n            print('Warning: there are errors. See below:')\n            with open(errors_file[0], 'r') as f:\n                text = f.read()\n                print(text)\n        prediction_file = glob.glob(os.path.join(tmp_dir, 'predictions*'))\n        if (not prediction_file):\n            raise FileNotFoundError('Prediction results not found')\n        predictions = pd.read_csv(prediction_file[0], header=None, names=[col['name'] for col in schema])\n        return predictions\n    finally:\n        shutil.rmtree(tmp_dir)", "docstring": "Runs local prediction on the prediction graph.\n\nRuns local prediction and returns the result in a Pandas DataFrame. For\nrunning prediction on a large dataset or saving the results, run\nlocal_batch_prediction or batch_prediction. Input data should fully match\nthe schema that was used at training, except the target column should not\nexist.\n\nArgs:\ntraining_dir: local path to the trained output folder.\ndata: List of csv strings or a Pandas DataFrame that match the model schema.\n\nRaises:\nValueError: if training_dir does not contain the folder 'model'.\nFileNotFoundError: if the prediction data is not found.", "source": "codesearchnet"}
{"code": "def available_writers(as_dict=False):\n    \n    writers = []\n    for writer_configs in configs_for_writer():\n        try:\n            writer_info = read_writer_config(writer_configs)\n        except (KeyError, IOError, yaml.YAMLError):\n            LOG.warning(\"Could not import writer config from: %s\", writer_configs)\n            LOG.debug(\"Error loading YAML\", exc_info=True)\n            continue\n        writers.append(writer_info if as_dict else writer_info['name'])\n    return writers", "docstring": "Available writers based on current configuration.\n\nArgs:\nas_dict (bool): Optionally return writer information as a dictionary.\nDefault: False\n\nReturns: List of available writer names. If `as_dict` is `True` then\na list of dictionaries including additionally writer information\nis returned.", "source": "juraj-google-style"}
{"code": "def compose_tree_url(tree, issn_url=False):\n    \n    url = compose_tree_path(tree, issn_url)\n\n    if WEB_PORT == 80:\n        return \"%s:\n\n    return \"%s:", "docstring": "Compose full url for given `tree`, with protocol, server's address and\nport.\n\nArgs:\ntree (obj): :class:`.Tree` instance.\nissn_url (bool, default False): Compose URL using ISSN.\n\nReturns:\nstr: URL of the tree", "source": "juraj-google-style"}
{"code": "def _check_classes(var: 'cfg.Variable | None', check: 'Callable[[_base.BaseValue], bool]') -> bool:\n    if not var:\n        return False\n    for v in var.data:\n        if isinstance(v, class_mixin.Class):\n            if not check(v):\n                return False\n        elif isinstance(v.cls, class_mixin.Class) and v.cls != v:\n            if not check(v.cls):\n                return False\n    return True", "docstring": "Check whether the cls of each value in `var` is a class and passes `check`.\n\nArgs:\nvar: A cfg.Variable or empty.\ncheck: (BaseValue) -> bool.\n\nReturns:\nWhether the check passes.", "source": "github-repos"}
{"code": "def __init__(self, path, mode):\n        \n\n        if (mode == WorkDB.Mode.open) and (not os.path.exists(path)):\n            raise FileNotFoundError('Requested file {} not found'.format(path))\n\n        self._path = path\n        self._conn = sqlite3.connect(path)\n\n        self._init_db()", "docstring": "Open a DB in file `path` in mode `mode`.\n\nArgs:\npath: The path to the DB file.\nmode: The mode in which to open the DB. See the `Mode` enum for\ndetails.\n\nRaises:\nFileNotFoundError: If `mode` is `Mode.open` and `path` does not\nexist.", "source": "juraj-google-style"}
{"code": "def ProduceEventSource(self, event_source):\n    if (not self._storage_writer):\n        raise RuntimeError('Storage writer not set.')\n    self._storage_writer.AddEventSource(event_source)\n    self._number_of_event_sources += 1\n    self.last_activity_timestamp = time.time()", "docstring": "Produces an event source.\n\nArgs:\nevent_source (EventSource): an event source.\n\nRaises:\nRuntimeError: when storage writer is not set.", "source": "codesearchnet"}
{"code": "def set_output_embeddings(self, value):\n    if self.get_lm_head() is not None:\n        lm_head = self.get_lm_head()\n        try:\n            lm_head.set_output_embeddings(value)\n        except AttributeError:\n            logger.info('Building the model')\n            self.build_in_name_scope()\n            lm_head.set_output_embeddings(value)", "docstring": "Set model's output embeddings\n\nArgs:\nvalue (`tf.Variable`):\nThe new weights mapping hidden states to vocabulary.", "source": "github-repos"}
{"code": "def slice_arrays(arrays, start=None, stop=None):\n    if arrays is None:\n        return [None]\n    if isinstance(start, list) and stop is not None:\n        raise ValueError('The stop argument has to be None if the value of start is a list.')\n    elif isinstance(arrays, list):\n        if hasattr(start, '__len__'):\n            if hasattr(start, 'shape'):\n                start = start.tolist()\n            return [None if x is None else x[start] for x in arrays]\n        return [None if x is None else None if not hasattr(x, '__getitem__') else x[start:stop] for x in arrays]\n    else:\n        if hasattr(start, '__len__'):\n            if hasattr(start, 'shape'):\n                start = start.tolist()\n            return arrays[start]\n        if hasattr(start, '__getitem__'):\n            return arrays[start:stop]\n        return [None]", "docstring": "Slice an array or list of arrays.\n\nThis takes an array-like, or a list of\narray-likes, and outputs:\n- arrays[start:stop] if `arrays` is an array-like\n- [x[start:stop] for x in arrays] if `arrays` is a list\n\nCan also work on list/array of indices: `slice_arrays(x, indices)`\n\nArgs:\narrays: Single array or list of arrays.\nstart: can be an integer index (start index) or a list/array of indices\nstop: integer (stop index); should be None if `start` was a list.\n\nReturns:\nA slice of the array(s).\n\nRaises:\nValueError: If the value of start is a list and stop is not None.", "source": "github-repos"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True) -> torch.Tensor:\n    residual = hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n    hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (self_attn_weights,)\n    if use_cache:\n        outputs += (present_key_value,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\nlayer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(attention_heads,)`.\npast_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def _ReadSpecificationFile(self, path):\n    specification_store = specification.FormatSpecificationStore()\n    with io.open(path, 'rt', encoding=self._SPECIFICATION_FILE_ENCODING) as file_object:\n        for line in file_object.readlines():\n            line = line.strip()\n            if ((not line) or line.startswith('\n                continue\n            try:\n                (identifier, offset, pattern) = line.split()\n            except ValueError:\n                logger.error('[skipping] invalid line: {0:s}'.format(line))\n                continue\n            try:\n                offset = int(offset, 10)\n            except ValueError:\n                logger.error('[skipping] invalid offset in line: {0:s}'.format(line))\n                continue\n            try:\n                pattern = codecs.escape_decode(pattern)[0]\n            except ValueError:\n                logger.error('[skipping] invalid pattern in line: {0:s}'.format(line))\n                continue\n            format_specification = specification.FormatSpecification(identifier)\n            format_specification.AddNewSignature(pattern, offset=offset)\n            specification_store.AddSpecification(format_specification)\n    return specification_store", "docstring": "Reads the format specification file.\n\nArgs:\npath (str): path of the format specification file.\n\nReturns:\nFormatSpecificationStore: format specification store.", "source": "codesearchnet"}
{"code": "def with_organisation(self, organisation):\n    if (organisation is None):\n        organisation = ''\n    organisation = slugify(organisation)\n    self._validate_organisation(organisation)\n    self.organisation = organisation\n    return self", "docstring": "Add an organisation segment.\n\nArgs:\norganisation (str): Official name of an administrative body\nholding an election.\n\nReturns:\nIdBuilder\n\nRaises:\nValueError", "source": "codesearchnet"}
{"code": "def streaming_client(self, tasks_regex, tasks_negate, workers_regex, workers_negate):\n    cc = CapturingClient(Queue(), re.compile(tasks_regex), tasks_negate, re.compile(workers_regex), workers_negate)\n    self.observers.append(cc)\n    (yield cc.queue)\n    self.observers.remove(cc)", "docstring": "Connects a client to the streaming capture, filtering the events that are sent\nto it.\n\nArgs:\ntasks_regex (str): a pattern to filter tasks to capture.\nex.: '^dispatch|^email' to filter names starting with that\nor 'dispatch.*123456' to filter that exact name and number\nor even '123456' to filter that exact number anywhere.\ntasks_negate (bool): if True, finds tasks that do not match criteria\nworkers_regex (str): a pattern to filter workers to capture.\nex.: 'service|priority' to filter names containing that\nworkers_negate (bool): if True, finds workers that do not match criteria", "source": "codesearchnet"}
{"code": "def log_(\n    message: str,\n    logger: logging.Logger,\n    level: str = \"info\",\n    extra: Optional[Dict] = None,\n    trim: bool = False,\n) -> None:\n    \n    if extra is None:\n        extra = {}\n    \n    if message:\n        message = message.replace(\"\\n\", \"\").replace(\"  \", \" \").replace(\"{ \", \"{\")\n    if trim:\n        message = _trim_message(message)\n    \n    getattr(logger, level)(message, extra=extra)", "docstring": "Log a request or response\n\nArgs:\nmessage: JSON-RPC request or response string.\nlevel: Log level.\nextra: More details to include in the log entry.\ntrim: Abbreviate log messages.", "source": "juraj-google-style"}
{"code": "def save_and_return_nodes(obj, export_dir, signatures=None, options: save_options.SaveOptions=None, experimental_skip_checkpoint=False):\n    options = options or save_options.SaveOptions()\n    saved_model = saved_model_pb2.SavedModel()\n    meta_graph_def = saved_model.meta_graphs.add()\n    _, exported_graph, object_saver, asset_info, saved_nodes, node_paths = _build_meta_graph(obj, signatures, options, meta_graph_def)\n    saved_model.saved_model_schema_version = constants.SAVED_MODEL_SCHEMA_VERSION\n    if not experimental_skip_checkpoint:\n        path_helpers.get_or_create_variables_dir(export_dir)\n        ckpt_options = checkpoint_options.CheckpointOptions(experimental_io_device=options.experimental_io_device, experimental_sharding_callback=options.experimental_sharding_callback)\n        object_saver.save(path_helpers.get_variables_path(export_dir), options=ckpt_options)\n    builder_impl.copy_assets_to_destination_dir(asset_info.asset_filename_map, export_dir)\n    if context.executing_eagerly():\n        try:\n            context.async_wait()\n        except errors.NotFoundError as err:\n            raise FileNotFoundError(f\"{err}\\n You may be trying to save on a different device from the computational device. Consider setting the `experimental_io_device` option in `tf.saved_model.SaveOptions` to the io_device such as '/job:localhost'.\") from err\n    pywrap_saved_model.Save(export_dir)\n    if options.experimental_image_format:\n        prefix = file_io.join(compat.as_str(export_dir), 'saved_model')\n        proto_splitter.SavedModelSplitter(saved_model).write(prefix)\n    else:\n        path = file_io.join(compat.as_str(export_dir), compat.as_str(constants.SAVED_MODEL_FILENAME_PB))\n        file_io.atomic_write_string_to_file(path, saved_model.SerializeToString(deterministic=True))\n    fingerprinting_utils.write_fingerprint(export_dir)\n    if options.save_debug_info:\n        _export_debug_info(exported_graph, export_dir)\n    metrics.SetWritePath(saved_model_path=str(export_dir))\n    ops.dismantle_graph(exported_graph)\n    return (saved_nodes, node_paths)", "docstring": "Saves a SavedModel while returning all saved nodes and their paths.\n\nPlease see `tf.saved_model.save` for details.\n\nArgs:\nobj: A trackable object to export.\nexport_dir: A directory in which to write the SavedModel.\nsignatures: A function or dictionary of functions to save in the SavedModel\nas signatures.\noptions: `tf.saved_model.SaveOptions` object for configuring save options.\nexperimental_skip_checkpoint: If set to `True`, the checkpoint will not be\nwritten.\n\nReturns:\nA tuple of (a list of saved nodes in the order they are serialized to the\n`SavedObjectGraph`, dictionary mapping nodes to one possible path from\nthe root node to the key node)", "source": "github-repos"}
{"code": "def set_exception(self, exc_class, exc_info, exc_stack):\n    if self.is_finished():\n        raise InternalError('set_exception called on finished AsynchronousResponse', result=self._result, exception=self._exception)\n    self._exception = (exc_class, exc_info, exc_stack)\n    self.finish()", "docstring": "Set an exception as the result of this operation.\n\nArgs:\nexc_class (object): The exception type", "source": "codesearchnet"}
{"code": "def on_run_end(self, request):", "docstring": "Callback invoked on run() calls to the debug-wrapper session.\n\nThis is a blocking callback.\nThe invocation happens right before the wrapper exits its run() call.\n\nArgs:\nrequest: (`OnRunEndRequest`) callback request object carrying information\nsuch as the actual action performed by the session wrapper for the\nrun() call.\n\nReturns:\nAn instance of `OnRunStartResponse`.", "source": "github-repos"}
{"code": "def schedule(\n        time: Union[datetime.time, datetime.datetime],\n        callback: Callable, *args):\n    \n    dt = _fillDate(time)\n    now = datetime.datetime.now(dt.tzinfo)\n    delay = (dt - now).total_seconds()\n    loop = asyncio.get_event_loop()\n    loop.call_later(delay, callback, *args)", "docstring": "Schedule the callback to be run at the given time with\nthe given arguments.\n\nArgs:\ntime: Time to run callback. If given as :py:class:`datetime.time`\nthen use today as date.\ncallback: Callable scheduled to run.\nargs: Arguments for to call callback with.", "source": "juraj-google-style"}
{"code": "def _select_mgmt_networks(self, conf):\n    nets = conf['nets']\n    mgmts = sorted([name for (name, net) in nets.iteritems() if (net.get('management') is True)])\n    if (len(mgmts) == 0):\n        mgmt_name = sorted(nets.keys())[0]\n        LOGGER.debug('No management network configured, selecting network %s', mgmt_name)\n        nets[mgmt_name]['management'] = True\n        mgmts.append(mgmt_name)\n    for mgmt_name in mgmts:\n        if (nets[mgmt_name].get('dns_domain_name', None) is None):\n            nets[mgmt_name]['dns_domain_name'] = 'lago.local'\n    return mgmts", "docstring": "Select management networks. If no management network is found, it will\nmark the first network found by sorted the network lists. Also adding\ndefault DNS domain, if none is set.\n\nArgs:\nconf(spec): spec", "source": "codesearchnet"}
{"code": "def AddColumn(self, column, default='', col_index=(- 1)):\n    if (column in self.table):\n        raise TableError(('Column %r already in table.' % column))\n    if (col_index == (- 1)):\n        self._table[0][column] = column\n        for i in range(1, len(self._table)):\n            self._table[i][column] = default\n    else:\n        self._table[0].Insert(column, column, col_index)\n        for i in range(1, len(self._table)):\n            self._table[i].Insert(column, default, col_index)", "docstring": "Appends a new column to the table.\n\nArgs:\ncolumn: A string, name of the column to add.\ndefault: Default value for entries. Defaults to ''.\ncol_index: Integer index for where to insert new column.\n\nRaises:\nTableError: Column name already exists.", "source": "codesearchnet"}
{"code": "def read(self, x):\n    \n    access_logits = self._address_content(x)\n    weights = tf.nn.softmax(access_logits)\n    retrieved_mem = tf.reduce_sum(\n        tf.multiply(tf.expand_dims(weights, 3),\n                    tf.expand_dims(self.mem_vals, axis=1)), axis=2)\n    return access_logits, retrieved_mem", "docstring": "Read from the memory.\n\nAn external component can use the results via a simple MLP,\ne.g., fn(x W_x + retrieved_mem W_m).\n\nArgs:\nx: a tensor in the shape of [batch_size, length, depth].\nReturns:\naccess_logits: the logits for accessing the memory in shape of\n[batch_size, length, memory_size].\nretrieved_mem: the retrieved results in the shape of\n[batch_size, length, val_depth].", "source": "juraj-google-style"}
{"code": "def save(self):\n    try:\n        email = models.EmailAddress.objects.get(email=self.validated_data['email'], is_verified=True)\n    except models.EmailAddress.DoesNotExist:\n        return None\n    token = models.PasswordResetToken.objects.create(email=email)\n    token.send()\n    return token", "docstring": "Send out a password reset if the provided data is valid.\n\nIf the provided email address exists and is verified, a reset\nemail is sent to the address.\n\nReturns:\nThe password reset token if it was returned and ``None``\notherwise.", "source": "codesearchnet"}
{"code": "def as_vartype(vartype):\n    if isinstance(vartype, Vartype):\n        return vartype\n    try:\n        if isinstance(vartype, str):\n            vartype = Vartype[vartype]\n        elif isinstance(vartype, frozenset):\n            vartype = Vartype(vartype)\n        else:\n            vartype = Vartype(frozenset(vartype))\n    except (ValueError, KeyError):\n        raise TypeError(\"expected input vartype to be one of: Vartype.SPIN, 'SPIN', {-1, 1}, Vartype.BINARY, 'BINARY', or {0, 1}.\")\n    return vartype", "docstring": "Cast various inputs to a valid vartype object.\n\nArgs:\nvartype (:class:`.Vartype`/str/set):\nVariable type. Accepted input values:\n\n* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``\n* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``\n\nReturns:\n:class:`.Vartype`: Either :class:`.Vartype.SPIN` or\n:class:`.Vartype.BINARY`.\n\nSee also:\n:func:`~dimod.decorators.vartype_argument`", "source": "codesearchnet"}
{"code": "def __init__(self, msg):\n        \n        super(CoTError, self).__init__(\n            msg, exit_code=STATUSES['malformed-payload']\n        )", "docstring": "Initialize CoTError.\n\nArgs:\nmsg (string): the error message", "source": "juraj-google-style"}
{"code": "def quat_to_rot(quat: torch.Tensor) -> torch.Tensor:\n    quat = quat[..., None] * quat[..., None, :]\n    mat = _get_quat('_QTR_MAT', dtype=quat.dtype, device=quat.device)\n    shaped_qtr_mat = mat.view((1,) * len(quat.shape[:-2]) + mat.shape)\n    quat = quat[..., None, None] * shaped_qtr_mat\n    return torch.sum(quat, dim=(-3, -4))", "docstring": "Converts a quaternion to a rotation matrix.\n\nArgs:\nquat: [*, 4] quaternions\nReturns:\n[*, 3, 3] rotation matrices", "source": "github-repos"}
{"code": "def _process_stack_frames(self):\n    stack_frames = tf_stack.extract_stack()\n    stack_frame_ids = []\n    writer = None\n    for file_path, lineno, func, _ in stack_frames:\n        abs_path = os.path.abspath(file_path)\n        if (abs_path, lineno, func) in self._stack_frame_to_id:\n            stack_frame_ids.append(self._stack_frame_to_id[abs_path, lineno, func])\n            continue\n        with self._stack_frame_to_id_lock:\n            if (abs_path, lineno, func) not in self._stack_frame_to_id:\n                stack_frame_id = _get_id()\n                self._stack_frame_to_id[abs_path, lineno, func] = stack_frame_id\n                file_index = self._write_source_file_content(abs_path)\n                file_line_col = graph_debug_info_pb2.GraphDebugInfo.FileLineCol(file_index=file_index, line=lineno, func=func)\n                stack_frame_with_id = debug_event_pb2.StackFrameWithId(id=stack_frame_id, file_line_col=file_line_col)\n                writer = self.get_writer()\n                writer.WriteStackFrameWithId(stack_frame_with_id)\n            stack_frame_ids.append(self._stack_frame_to_id[abs_path, lineno, func])\n    code_location = debug_event_pb2.CodeLocation(host_name=self._hostname, stack_frame_ids=stack_frame_ids)\n    return code_location", "docstring": "Process stack frames.\n\nSend the content of source-files, on a best-effort basis.\n\nReturns:\nA list of stack frame IDs.", "source": "github-repos"}
{"code": "def add_entry(self, path_object):\n        \n        if (not is_root() and not self.st_mode & PERM_WRITE and\n                not self.filesystem.is_windows_fs):\n            exception = IOError if IS_PY2 else OSError\n            raise exception(errno.EACCES, 'Permission Denied', self.path)\n\n        if path_object.name in self.contents:\n            self.filesystem.raise_os_error(errno.EEXIST, self.path)\n\n        self.contents[path_object.name] = path_object\n        path_object.parent_dir = self\n        self.st_nlink += 1\n        path_object.st_nlink += 1\n        path_object.st_dev = self.st_dev\n        if path_object.st_nlink == 1:\n            self.filesystem.change_disk_usage(\n                path_object.size, path_object.name, self.st_dev)", "docstring": "Adds a child FakeFile to this directory.\n\nArgs:\npath_object: FakeFile instance to add as a child of this directory.\n\nRaises:\nOSError: if the directory has no write permission (Posix only)\nOSError: if the file or directory to be added already exists", "source": "juraj-google-style"}
{"code": "def predict(self, X, break_ties=\"random\", return_probs=False, **kwargs):\n        \n        Y_s = self._to_numpy(self.predict_proba(X, **kwargs))\n        Y_p = self._break_ties(Y_s, break_ties).astype(np.int)\n        if return_probs:\n            return Y_p, Y_s\n        else:\n            return Y_p", "docstring": "Predicts (int) labels for an input X on all tasks\n\nArgs:\nX: The input for the predict_proba method\nbreak_ties: A tie-breaking policy (see Classifier._break_ties())\nreturn_probs: Return the predicted probabilities as well\n\nReturns:\nY_p: An n-dim np.ndarray of predictions in {1,...k}\n[Optionally: Y_s: An [n, k] np.ndarray of predicted probabilities]", "source": "juraj-google-style"}
{"code": "def delete(self, version_name):\n    name = ('%s/versions/%s' % (self._full_model_name, version_name))\n    response = self._api.projects().models().versions().delete(name=name).execute()\n    if ('name' not in response):\n        raise Exception('Invalid response from service. \"name\" is not found.')\n    _util.wait_for_long_running_operation(response['name'])", "docstring": "Delete a version of model.\n\nArgs:\nversion_name: the name of the version in short form, such as \"v1\".", "source": "codesearchnet"}
{"code": "def _validate_alias_file_content(alias_file_path, url=''):\n    \n    alias_table = get_config_parser()\n    try:\n        alias_table.read(alias_file_path)\n        for alias_name, alias_command in reduce_alias_table(alias_table):\n            _validate_alias_name(alias_name)\n            _validate_alias_command(alias_command)\n            _validate_alias_command_level(alias_name, alias_command)\n            _validate_pos_args_syntax(alias_name, alias_command)\n    except Exception as exception:  \n        error_msg = CONFIG_PARSING_ERROR % AliasManager.process_exception_message(exception)\n        error_msg = error_msg.replace(alias_file_path, url or alias_file_path)\n        raise CLIError(error_msg)", "docstring": "Make sure the alias name and alias command in the alias file is in valid format.\n\nArgs:\nThe alias file path to import aliases from.", "source": "juraj-google-style"}
{"code": "def get_gpu_ids():\n    if (_mode() == LOCAL_MODE):\n        raise Exception('ray.get_gpu_ids() currently does not work in PYTHON MODE.')\n    all_resource_ids = global_worker.raylet_client.resource_ids()\n    assigned_ids = [resource_id for (resource_id, _) in all_resource_ids.get('GPU', [])]\n    if (global_worker.original_gpu_ids is not None):\n        assigned_ids = [global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids]\n    return assigned_ids", "docstring": "Get the IDs of the GPUs that are available to the worker.\n\nIf the CUDA_VISIBLE_DEVICES environment variable was set when the worker\nstarted up, then the IDs returned by this method will be a subset of the\nIDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range\n[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.\n\nReturns:\nA list of GPU IDs.", "source": "codesearchnet"}
{"code": "def if_features(self, stmt: Statement, mid: ModuleId) -> bool:\n    iffs = stmt.find_all('if-feature')\n    if (not iffs):\n        return True\n    for i in iffs:\n        if (not FeatureExprParser(i.argument, self, mid).parse()):\n            return False\n    return True", "docstring": "Evaluate ``if-feature`` substatements on a statement, if any.\n\nArgs:\nstmt: Yang statement that is tested on if-features.\nmid: Identifier of the module in which `stmt` is present.\n\nRaises:\nModuleNotRegistered: If `mid` is not registered in the data model.\nInvalidFeatureExpression: If a if-feature expression is not\nsyntactically correct.\nUnknownPrefix: If a prefix specified in a feature name is not\ndeclared.", "source": "codesearchnet"}
{"code": "def enable_traceback_filtering():\n    if sys.version_info.major != 3 or sys.version_info.minor < 7:\n        raise RuntimeError(f'Traceback filtering is only available with Python 3.7 or higher. This Python version: {sys.version}')\n    global _ENABLE_TRACEBACK_FILTERING\n    _ENABLE_TRACEBACK_FILTERING.value = True", "docstring": "Enable filtering out TensorFlow-internal frames in exception stack traces.\n\nRaw TensorFlow stack traces involve many internal frames, which can be\nchallenging to read through, while not being actionable for end users.\nBy default, TensorFlow filters internal frames in most exceptions that it\nraises, to keep stack traces short, readable, and focused on what's\nactionable for end users (their own code).\n\nIf you have previously disabled traceback filtering via\n`tf.debugging.disable_traceback_filtering()`, you can re-enable it via\n`tf.debugging.enable_traceback_filtering()`.\n\nRaises:\nRuntimeError: If Python version is not at least 3.7.", "source": "github-repos"}
{"code": "def section(title, element_list):\n    \n    sect = {\n            'Type': 'Section',\n            'Title': title,\n            }\n\n    if isinstance(element_list, list):\n        sect['Elements'] = element_list\n    else:\n        sect['Elements'] = [element_list]\n    return sect", "docstring": "Returns a dictionary representing a new section.  Sections\ncontain a list of elements that are displayed separately from\nthe global elements on the page.\n\nArgs:\ntitle: The title of the section to be displayed\nelement_list: The list of elements to display within the section\n\nReturns:\nA dictionary with metadata specifying that it is to be rendered as\na section containing multiple elements", "source": "juraj-google-style"}
{"code": "def add_request(self, request):\n        \n\n        queue_item = QueueItem(request, Response(request.url))\n        self.add(queue_item)\n        return queue_item", "docstring": "Add a request to the queue.\n\nArgs:\nrequest (:class:`nyawc.http.Request`): The request to add.\n\nReturns:\n:class:`nyawc.QueueItem`: The created queue item.", "source": "juraj-google-style"}
{"code": "def _rescale(vector):\n    \n    \n    min_val = min(vector)\n    vector = [v - min_val for v in vector]\n\n    \n    max_val = float(max(vector))\n    try:\n        return [v / max_val for v in vector]\n    except ZeroDivisionError:  \n        return [1.0] * len(vector)", "docstring": "Scale values in vector to the range [0, 1].\n\nArgs:\nvector: A list of real values.", "source": "juraj-google-style"}
{"code": "def _preprocess_token_ids(self, token_ids, skip_special_tokens: bool=False):\n    if skip_special_tokens:\n        prompt_token_id = self.convert_tokens_to_ids('<|startofprev|>')\n        decoder_start_token_id = self.convert_tokens_to_ids('<|startoftranscript|>')\n        token_ids = self._strip_prompt(token_ids, prompt_token_id, decoder_start_token_id)\n    return token_ids", "docstring": "Pre-process the token ids for decoding by removing the prompt tokens ids and timestamp token ids.\n\nArgs:\ntoken_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):\nList of tokenized input ids. Typically, obtained using the `__call__` method of the tokenizer.\nskip_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not to remove special tokens from the token ids. If `True`, the prompt token ids will be\nremoved.", "source": "github-repos"}
{"code": "def do_check(func, files, status):\n    \n\n    for file_name in files:\n        with open(file_name, 'r') as f:\n            output = func.parse(f.read(), file_name)\n\n        if output:\n            status.append(\"{0}: {1}\".format(file_name, output))\n\n    return status", "docstring": "Generic do_check helper method\n\nArgs:\nfunc (function): Specific function to call\nfiles (list): list of files to run against\nstatus (list): list of pre-receive check failures to eventually print\nto the user\n\nReturns:\nstatus list of current pre-redeive check failures. Might be an empty\nlist.", "source": "juraj-google-style"}
{"code": "def get_custom_objects():\n    return _GLOBAL_CUSTOM_OBJECTS", "docstring": "Retrieves a live reference to the global dictionary of custom objects.\n\nUpdating and clearing custom objects using `custom_object_scope`\nis preferred, but `get_custom_objects` can\nbe used to directly access the current collection of custom objects.\n\nExample:\n\n```python\nget_custom_objects().clear()\nget_custom_objects()['MyObject'] = MyObject\n```\n\nReturns:\nGlobal dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).", "source": "github-repos"}
{"code": "def create_from_settings(settings):\n        \n        return Connection(\n            settings[\"url\"], \n            settings[\"base_url\"],\n            settings[\"user\"],\n            settings[\"password\"],\n            authorizations = settings[\"authorizations\"],\n            debug = settings[\"debug\"]\n        )", "docstring": "Create a connection with given settings.\n\nArgs:\nsettings (dict): A dictionary of settings\n\nReturns:\n:class:`Connection`. The connection", "source": "juraj-google-style"}
{"code": "def codeblocks(start=None, end=None, full=True):\n    \n    if full:\n        for function in functions(start, end):\n            fc = FlowChart(f=function.func_t)\n            for block in fc:\n                yield block\n\n    else:\n        start, end = fix_addresses(start, end)\n\n        for code_block in FlowChart(bounds=(start, end)):\n            yield code_block", "docstring": "Get all `CodeBlock`s in a given range.\n\nArgs:\nstart - start address of the range. If `None` uses IDB start.\nend - end address of the range. If `None` uses IDB end.\nfull - `True` is required to change node info (e.g. color). `False` causes faster iteration.", "source": "juraj-google-style"}
{"code": "def get_accuracy(targets, outputs, k=1, ignore_index=None):\n    n_correct = 0.0\n    for (target, output) in zip(targets, outputs):\n        if ((not torch.is_tensor(target)) or is_scalar(target)):\n            target = torch.LongTensor([target])\n        if ((not torch.is_tensor(output)) or is_scalar(output)):\n            output = torch.LongTensor([[output]])\n        predictions = output.topk(k=min(k, len(output)), dim=0)[0]\n        for prediction in predictions:\n            if torch_equals_ignore_index(target.squeeze(), prediction.squeeze(), ignore_index=ignore_index):\n                n_correct += 1\n                break\n    return ((n_correct / len(targets)), int(n_correct), len(targets))", "docstring": "Get the accuracy top-k accuracy between two tensors.\n\nArgs:\ntargets (1 - 2D :class:`torch.Tensor`): Target or true vector against which to measure\nsaccuracy\noutputs (1 - 3D :class:`torch.Tensor`): Prediction or output vector\nignore_index (int, optional): Specifies a target index that is ignored\n\nReturns:\n:class:`tuple` consisting of accuracy (:class:`float`), number correct (:class:`int`) and\ntotal (:class:`int`)\n\nExample:\n\n>>> import torch\n>>> from torchnlp.metrics import get_accuracy\n>>> targets = torch.LongTensor([1, 2, 3, 4, 5])\n>>> outputs = torch.LongTensor([1, 2, 2, 3, 5])\n>>> accuracy, n_correct, n_total = get_accuracy(targets, outputs, ignore_index=3)\n>>> accuracy\n0.8\n>>> n_correct\n4\n>>> n_total\n5", "source": "codesearchnet"}
{"code": "def setDirname(self, dirname):\n    sep = utils._getPathSep(dirname)\n    if (not dirname.endswith(sep)):\n        dirname += sep\n    self._dir = utils.asString(dirname)", "docstring": "Set a new directory name for the sequence.\n\nArgs:\ndirname (str): the new directory name", "source": "codesearchnet"}
{"code": "def get_sites_in_sphere(self, pt, r):\n    neighbors = []\n    for site in self._sites:\n        dist = site.distance_from_point(pt)\n        if (dist <= r):\n            neighbors.append((site, dist))\n    return neighbors", "docstring": "Find all sites within a sphere from a point.\n\nArgs:\npt (3x1 array): Cartesian coordinates of center of sphere.\nr (float): Radius of sphere.\n\nReturns:\n[(site, dist) ...] since most of the time, subsequent processing\nrequires the distance.", "source": "codesearchnet"}
{"code": "def automatic_gamma_density(structure, kppa):\n    latt = structure.lattice\n    lengths = latt.abc\n    ngrid = (kppa / structure.num_sites)\n    mult = ((((ngrid * lengths[0]) * lengths[1]) * lengths[2]) ** (1 / 3))\n    num_div = [int(round((mult / l))) for l in lengths]\n    num_div = [(i if (i > 0) else 1) for i in num_div]\n    num_div = [((i + (i % 2)) if (i <= 8) else ((i - (i % 2)) + 1)) for i in num_div]\n    style = Kpoints.supported_modes.Gamma\n    comment = ('pymatgen 4.7.6+ generated KPOINTS with grid density = ' + '{} / atom'.format(kppa))\n    num_kpts = 0\n    return Kpoints(comment, num_kpts, style, [num_div], [0, 0, 0])", "docstring": "Returns an automatic Kpoint object based on a structure and a kpoint\ndensity. Uses Gamma centered meshes always. For GW.\n\nAlgorithm:\nUses a simple approach scaling the number of divisions along each\nreciprocal lattice vector proportional to its length.\n\nArgs:\nstructure:\nInput structure\nkppa:\nGrid density", "source": "codesearchnet"}
{"code": "def dict_from_items_with_values(*dictionaries, **items):\n    dict_list = list(dictionaries)\n    dict_list.append(items)\n    result = {}\n    for d in dict_list:\n        for (key, value) in d.items():\n            if (value is not None):\n                result[key] = value\n    return result", "docstring": "Creates a dict with the inputted items; pruning any that are `None`.\n\nArgs:\n*dictionaries(dict): Dictionaries of items to be pruned and included.\n**items: Items to be pruned and included.\n\nReturns:\ndict: A dictionary containing all of the items with a 'non-None' value.", "source": "codesearchnet"}
{"code": "def bipartition_indices(N):\n    result = []\n    if (N <= 0):\n        return result\n    for i in range((2 ** (N - 1))):\n        part = [[], []]\n        for n in range(N):\n            bit = ((i >> n) & 1)\n            part[bit].append(n)\n        result.append((tuple(part[1]), tuple(part[0])))\n    return result", "docstring": "Return indices for undirected bipartitions of a sequence.\n\nArgs:\nN (int): The length of the sequence.\n\nReturns:\nlist: A list of tuples containing the indices for each of the two\nparts.\n\nExample:\n>>> N = 3\n>>> bipartition_indices(N)\n[((), (0, 1, 2)), ((0,), (1, 2)), ((1,), (0, 2)), ((0, 1), (2,))]", "source": "codesearchnet"}
{"code": "def trk50(msg):\n    \n    d = hex2bin(data(msg))\n\n    if d[11] == '0':\n        return None\n\n    sign = int(d[12])   \n    value = bin2int(d[13:23])\n\n    if sign:\n        value = value - 1024\n\n    trk = value * 90.0 / 512.0\n\n    \n    if trk < 0:\n        trk = 360 + trk\n\n    return round(trk, 3)", "docstring": "True track angle, BDS 5,0 message\n\nArgs:\nmsg (String): 28 bytes hexadecimal message (BDS50) string\n\nReturns:\nfloat: angle in degrees to true north (from 0 to 360)", "source": "juraj-google-style"}
{"code": "def handle_api_static_request(self, request, start_response):\n    \n    if request.path == PROXY_PATH:\n      return util.send_wsgi_response('200 OK',\n                                     [('Content-Type',\n                                       'text/html')],\n                                     PROXY_HTML, start_response)\n    else:\n      _logger.debug('Unknown static url requested: %s',\n                    request.relative_url)\n      return util.send_wsgi_response('404 Not Found', [('Content-Type',\n                                       'text/plain')], 'Not Found',\n                                     start_response)", "docstring": "Handler for requests to {base_path}/static/.*.\n\nThis calls start_response and returns the response body.\n\nArgs:\nrequest: An ApiRequest, the request from the user.\nstart_response: A function with semantics defined in PEP-333.\n\nReturns:\nA string containing the response body.", "source": "juraj-google-style"}
{"code": "def _to_row_partitions_from_lengths(lengths: Sequence[Union[int, Sequence[int]]]) -> Sequence[RowPartition]:\n    result, _ = dynamic_ragged_shape._to_row_partitions_and_nvals_from_lengths(lengths)\n    return result", "docstring": "Allow ragged and uniform shapes to be specified.\n\nFor example, [2, [2,1], 2] represents a shape like:\n[[[0, 0], [0, 0]], [[0, 0]]]\n\nArgs:\nlengths: a list of integers and lists of integers.\n\nReturns:\na sequence of RowPartitions.", "source": "github-repos"}
{"code": "def base64url_decode(input):\n    \n    rem = len(input) % 4\n\n    if rem > 0:\n        input += b'=' * (4 - rem)\n\n    return base64.urlsafe_b64decode(input)", "docstring": "Helper method to base64url_decode a string.\n\nArgs:\ninput (str): A base64url_encoded string to decode.", "source": "juraj-google-style"}
{"code": "def _unpad_modernbert_input(inputs: torch.Tensor, attention_mask: torch.Tensor, position_ids: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, Optional[torch.Tensor], Optional[torch.Tensor]]:\n    seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)\n    indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()\n    max_seqlen_in_batch = int(seqlens_in_batch.max().item())\n    cu_seqlens = torch.nn.functional.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))\n    if inputs.dim() == 2:\n        unpadded_inputs = inputs.flatten()[indices]\n    else:\n        batch, seqlen, *rest = inputs.shape\n        shape = batch * seqlen\n        unpadded_inputs = inputs.view(shape, *rest)[indices]\n    unpadded_position_ids = position_ids.flatten()[indices] if position_ids is not None else None\n    unpadded_labels = labels.flatten()[indices] if labels is not None else None\n    return (unpadded_inputs, indices, cu_seqlens, max_seqlen_in_batch, unpadded_position_ids, unpadded_labels)", "docstring": "Remove padding from input sequences.\n\nArgs:\ninputs: (batch, seqlen, ...) or (batch, seqlen)\nattention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.\nposition_ids: (batch, seqlen), int, position ids\nlabels: (batch, seqlen), int, labels\n\nReturns:\nunpadded_inputs: (total_nnz, ...), where total_nnz = number of tokens selected in attention_mask.\nindices: (total_nnz)\ncu_seqlens: (batch + 1), the cumulative sequence lengths\nmax_seqlen_in_batch: int\nunpadded_position_ids: (total_nnz) or None\nunpadded_labels: (total_nnz) or None", "source": "github-repos"}
{"code": "def rho_hv(scatterer):\n    \n    Z = scatterer.get_Z()\n    a = (Z[2,2] + Z[3,3])**2 + (Z[3,2] - Z[2,3])**2\n    b = (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1])\n    c = (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])\n    return np.sqrt(a / (b*c))", "docstring": "Copolarized correlation (rho_hv) for the current setup.\n\nArgs:\nscatterer: a Scatterer instance.\n\nReturns:\nrho_hv.", "source": "juraj-google-style"}
{"code": "def copy_files_to_folder(files, target_folder, overwrite=True):\n    if (not files):\n        return []\n    for f in files:\n        target = os.path.join(target_folder, os.path.split(f)[(- 1)])\n        if (target == f):\n            return target\n        if os.path.exists(target):\n            if overwrite:\n                try:\n                    os.remove(target)\n                except Exception:\n                    raise IOError(('Failed to remove %s' % f))\n                else:\n                    shutil.copy(f, target)\n            else:\n                continue\n        else:\n            print(('Copying %s to %s' % (os.path.split(f)[(- 1)], os.path.normpath(target_folder))))\n            shutil.copy(f, target)\n    return [os.path.join(target_folder, os.path.split(f)[(- 1)]) for f in files]", "docstring": "Copy a list of files to a new target folder.\n\nReturns:\nA list of fullpath of the new files.", "source": "codesearchnet"}
{"code": "def conversation(self, name=None, **kwargs):\n        \n        convo = Conversation(self, **kwargs)\n        super().conversation(name, convo)\n        return convo", "docstring": "Make a new conversation.\n\nArguments:\nname: The key for the dictionary the conversation will be stored as\nin conversations. If None the conversation will be stored as a\nlist instead. Mixing both types results in an error.\n**kwargs: Keyword arguments to pass into the new conversation.\nThese accept the same arguments as Cleverbot.\n\nReturns:\nThe new conversation.", "source": "juraj-google-style"}
{"code": "def embedding_layer(token_indices=None, token_embedding_matrix=None, n_tokens=None, token_embedding_dim=None, name: str=None, trainable=True):\n    if (token_embedding_matrix is not None):\n        tok_mat = token_embedding_matrix\n        if trainable:\n            Warning('Matrix of embeddings is passed to the embedding_layer, possibly there is a pre-trained embedding matrix. Embeddings paramenters are set to Trainable!')\n    else:\n        tok_mat = (np.random.randn(n_tokens, token_embedding_dim).astype(np.float32) / np.sqrt(token_embedding_dim))\n    tok_emb_mat = tf.Variable(tok_mat, name=name, trainable=trainable)\n    embedded_tokens = tf.nn.embedding_lookup(tok_emb_mat, token_indices)\n    return embedded_tokens", "docstring": "Token embedding layer. Create matrix of for token embeddings.\nCan be initialized with given matrix (for example pre-trained\nwith word2ve algorithm\n\nArgs:\ntoken_indices: token indices tensor of type tf.int32\ntoken_embedding_matrix: matrix of embeddings with dimensionality\n[n_tokens, embeddings_dimension]\nn_tokens: total number of unique tokens\ntoken_embedding_dim: dimensionality of embeddings, typical 100..300\nname: embedding matrix name (variable name)\ntrainable: whether to set the matrix trainable or not\n\nReturns:\nembedded_tokens: tf tensor of size [B, T, E], where B - batch size\nT - number of tokens, E - token_embedding_dim", "source": "codesearchnet"}
{"code": "def _update_workflow_definition(pb_config: dict):\n        \n        known_workflows = get_workflows()\n        workflow_id = pb_config['workflow']['id']\n        workflow_version = pb_config['workflow']['version']\n        if workflow_id not in known_workflows or \\\n           workflow_version not in known_workflows[workflow_id]:\n            raise RuntimeError(\"Unknown workflow definition: {}:{}\"\n                               .format(workflow_id, workflow_version))\n        workflow = get_workflow(workflow_id, workflow_version)\n        for stage in workflow['stages']:\n            stage['status'] = 'none'\n        pb_config['workflow_parameters'] = pb_config['workflow']['parameters']\n        pb_config['workflow_id'] = pb_config['workflow']['id']\n        pb_config['workflow_version'] = pb_config['workflow']['version']\n        pb_config['workflow_stages'] = workflow['stages']\n        pb_config.pop('workflow', None)", "docstring": "Update the PB configuration workflow definition.\n\nArgs:\npb_config (dict): PB configuration dictionary\n\nRaises:\nRunTimeError, if the workflow definition (id, version)\nspecified in the sbi_config is not known.", "source": "juraj-google-style"}
{"code": "def in_builddir(sub='.'):\n    from functools import wraps\n\n    def wrap_in_builddir(func):\n        'Wrap the function for the new build directory.'\n\n        @wraps(func)\n        def wrap_in_builddir_func(self, *args, **kwargs):\n            'The actual function inside the wrapper for the new builddir.'\n            p = (local.path(self.builddir) / sub)\n            if (not p.exists()):\n                LOG.error('%s does not exist.', p)\n            if (p == local.cwd):\n                LOG.debug('CWD already is %s', p)\n                return func(self, *args, *kwargs)\n            with local.cwd(p):\n                return func(self, *args, **kwargs)\n        return wrap_in_builddir_func\n    return wrap_in_builddir", "docstring": "Decorate a project phase with a local working directory change.\n\nArgs:\nsub: An optional subdirectory to change into.", "source": "codesearchnet"}
{"code": "def __handle_variable(self, shell_entry, output):\n        \n        if 'variable' in shell_entry:\n            variable_name = shell_entry['variable']\n            self.pipeline.variables[variable_name] = \"\\n\".join(output)", "docstring": "Saving output for configured variable name.\n\nArgs:\nshell_entry(dict): shell based configuration (shell, docker container or Python).\noutput: list of strings representing output of last shell", "source": "juraj-google-style"}
{"code": "def capitalcase(string):\n    string = str(string)\n    if (not string):\n        return string\n    return (uppercase(string[0]) + string[1:])", "docstring": "Convert string into capital case.\nFirst letters will be uppercase.\n\nArgs:\nstring: String to convert.\n\nReturns:\nstring: Capital case string.", "source": "codesearchnet"}
{"code": "def log_handler(self, handler):\n    if (not self.opened()):\n        handler = (handler or util.noop)\n        self._log_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler)\n        self._dll.JLINKARM_EnableLog(self._log_handler)", "docstring": "Setter for the log handler function.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def __savorize(self, node: yaml.Node, expected_type: Type) -> yaml.Node:\n    logger.debug('Savorizing node assuming type {}'.format(expected_type.__name__))\n    for base_class in expected_type.__bases__:\n        if (base_class in self._registered_classes.values()):\n            node = self.__savorize(node, base_class)\n    if hasattr(expected_type, 'yatiml_savorize'):\n        logger.debug('Calling {}.yatiml_savorize()'.format(expected_type.__name__))\n        cnode = Node(node)\n        expected_type.yatiml_savorize(cnode)\n        node = cnode.yaml_node\n    return node", "docstring": "Removes syntactic sugar from the node.\n\nThis calls yatiml_savorize(), first on the class's base \\\nclasses, then on the class itself.\n\nArgs:\nnode: The node to modify.\nexpected_type: The type to assume this type is.", "source": "codesearchnet"}
{"code": "def __init__(self, host: str, port: int, time_to_live: Union[int, timedelta], *, kwargs: Optional[Dict[str, Any]]=None, request_coder: Optional[coders.Coder], response_coder: Optional[coders.Coder], source_caller: Optional[Caller[RequestT, ResponseT]]=None):\n    self.request_coder = request_coder\n    self.response_coder = response_coder\n    self.redis_caller = _RedisCaller(host, port, time_to_live, request_coder=self.request_coder, response_coder=self.response_coder, kwargs=kwargs, source_caller=source_caller, mode=_RedisMode.WRITE)", "docstring": "Args:\nhost (str): The hostname or IP address of the Redis server.\nport (int): The port number of the Redis server.\ntime_to_live: `(Union[int, timedelta])` The time-to-live (TTL) for\nrecords stored in Redis. Provide an integer (in seconds) or a\n`datetime.timedelta` object.\nkwargs: Optional(Dict[str, Any]) additional keyword arguments that\nare required to connect to your redis server. Same as `redis.Redis()`.\nrequest_coder: (Optional[`coders.Coder`]) coder for requests stored\nin Redis.\nresponse_coder: (Optional[`coders.Coder`]) coder for decoding responses\nreceived from Redis.\nsource_caller: (Optional[`Caller`]): The source caller using this Redis\ncache in case of fetching the cache request to store in Redis.", "source": "github-repos"}
{"code": "def store_container(self, container):\n    \n    with self._store_lock:\n      self.store.setdefault(container.CONTAINER_TYPE, []).append(container)", "docstring": "Thread-safe method to store data in the state's store.\n\nArgs:\ncontainer (containers.interface.AttributeContainer): The data to store.", "source": "juraj-google-style"}
{"code": "def list_groups(refresh=False):\n    \n    if 'group.list_groups' in __context__ and not refresh:\n        return __context__['group.list_groups']\n\n    results = _get_all_groups()\n\n    ret = []\n\n    for result in results:\n        ret.append(result.Name)\n\n    __context__['group.list_groups'] = ret\n\n    return ret", "docstring": "Return a list of groups\n\nArgs:\n\nrefresh (bool):\nRefresh the info for all groups in ``__context__``. If False only\nthe groups in ``__context__`` will be returned. If True, the\n``__context__`` will be refreshed with current data and returned.\nDefault is False\n\nReturns:\nlist: A list of groups on the machine\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' group.list_groups", "source": "juraj-google-style"}
{"code": "def merge_collections(collections, force_dense=False, sampling_rate='auto'):\n    if (len(listify(collections)) == 1):\n        return collections\n    levels = set([c.level for c in collections])\n    if (len(levels) > 1):\n        raise ValueError((\"At the moment, it's only possible to merge Collections at the same level of analysis. You passed collections at levels: %s.\" % levels))\n    variables = list(chain(*[c.variables.values() for c in collections]))\n    cls = collections[0].__class__\n    variables = cls.merge_variables(variables, sampling_rate=sampling_rate)\n    if isinstance(collections[0], BIDSRunVariableCollection):\n        if (sampling_rate == 'auto'):\n            rates = [var.sampling_rate for var in variables if isinstance(var, DenseRunVariable)]\n            sampling_rate = (rates[0] if rates else None)\n        return cls(variables, sampling_rate)\n    return cls(variables)", "docstring": "Merge two or more collections at the same level of analysis.\n\nArgs:\ncollections (list): List of Collections to merge.\nsampling_rate (int, str): Sampling rate to use if it becomes necessary\nto resample DenseRunVariables. Either an integer or 'auto' (see\nmerge_variables docstring for further explanation).\n\nReturns:\nA BIDSVariableCollection or BIDSRunVariableCollection, depending\non the type of the input collections.", "source": "codesearchnet"}
{"code": "def get_container_instance_group(access_token, subscription_id, resource_group,\n                                 container_group_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourcegroups/', resource_group,\n                        '/providers/Microsoft.ContainerInstance/ContainerGroups/',\n                        container_group_name,\n                        '?api-version=', CONTAINER_API])\n    return do_get(endpoint, access_token)", "docstring": "Get the JSON definition of a container group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\ncontainer_group_name (str): Name of container instance group.\n\nReturns:\nHTTP response. JSON body of container group.", "source": "juraj-google-style"}
{"code": "def file_modified_time(file_name) -> pd.Timestamp:\n    return pd.to_datetime(time.ctime(os.path.getmtime(filename=file_name)))", "docstring": "File modified time in python\n\nArgs:\nfile_name: file name\n\nReturns:\npd.Timestamp", "source": "codesearchnet"}
{"code": "def deregister_context(self, context_words):\n    for context_word in context_words:\n        if context_word not in self._comp_dict:\n            raise KeyError('Cannot deregister unregistered context word \"%s\"' % context_word)\n    for context_word in context_words:\n        del self._comp_dict[context_word]", "docstring": "Deregister a list of context words.\n\nArgs:\ncontext_words: A list of context words to deregister, as a list of str.\n\nRaises:\nKeyError: if there are word(s) in context_words that do not correspond\nto any registered contexts.", "source": "github-repos"}
{"code": "def to_geojson(self, filename, proj, metadata=None):\n        \n        if metadata is None:\n            metadata = {}\n        json_obj = {\"type\": \"FeatureCollection\", \"features\": [], \"properties\": {}}\n        json_obj['properties']['times'] = self.times.tolist()\n        json_obj['properties']['dx'] = self.dx\n        json_obj['properties']['step'] = self.step\n        json_obj['properties']['u'] = self.u.tolist()\n        json_obj['properties']['v'] = self.v.tolist()\n        for k, v in metadata.items():\n            json_obj['properties'][k] = v\n        for t, time in enumerate(self.times):\n            feature = {\"type\": \"Feature\",\n                       \"geometry\": {\"type\": \"Polygon\"},\n                       \"properties\": {}}\n            boundary_coords = self.boundary_polygon(time)\n            lonlat = np.vstack(proj(boundary_coords[0], boundary_coords[1], inverse=True))\n            lonlat_list = lonlat.T.tolist()\n            if len(lonlat_list) > 0:\n                lonlat_list.append(lonlat_list[0])\n            feature[\"geometry\"][\"coordinates\"] = [lonlat_list]\n            for attr in [\"timesteps\", \"masks\", \"x\", \"y\", \"i\", \"j\"]:\n                feature[\"properties\"][attr] = getattr(self, attr)[t].tolist()\n            feature[\"properties\"][\"attributes\"] = {}\n            for attr_name, steps in self.attributes.items():\n                feature[\"properties\"][\"attributes\"][attr_name] = steps[t].tolist()\n            json_obj['features'].append(feature)\n        file_obj = open(filename, \"w\")\n        json.dump(json_obj, file_obj, indent=1, sort_keys=True)\n        file_obj.close()\n        return", "docstring": "Output the data in the STObject to a geoJSON file.\n\nArgs:\nfilename: Name of the file\nproj: PyProj object for converting the x and y coordinates back to latitude and longitue values.\nmetadata: Metadata describing the object to be included in the top-level properties.", "source": "juraj-google-style"}
{"code": "def set(msg_or_dict, key, value):\n    if (not isinstance(msg_or_dict, (collections_abc.MutableMapping, message.Message))):\n        raise TypeError('set() expected a dict or protobuf message, got {!r}.'.format(type(msg_or_dict)))\n    (basekey, subkey) = _resolve_subkeys(key)\n    if (subkey is not None):\n        if isinstance(msg_or_dict, collections_abc.MutableMapping):\n            msg_or_dict.setdefault(basekey, {})\n        set(get(msg_or_dict, basekey), subkey, value)\n        return\n    if isinstance(msg_or_dict, collections_abc.MutableMapping):\n        msg_or_dict[key] = value\n    else:\n        _set_field_on_message(msg_or_dict, key, value)", "docstring": "Set a key's value on a protobuf Message or dictionary.\n\nArgs:\nmsg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the\nobject.\nkey (str): The key to set.\nvalue (Any): The value to set.\n\nRaises:\nTypeError: If ``msg_or_dict`` is not a Message or dictionary.", "source": "codesearchnet"}
{"code": "def delete_meta_features(self, path):\n        \n        if os.path.exists(self.meta_features_path(path)):\n            os.remove(self.meta_features_path(path))", "docstring": "Deletes meta-features of base learner if it exists\n\nArgs:\npath (str): Absolute/local path of xcessiv folder", "source": "juraj-google-style"}
{"code": "def register(self, name, namespace):\n    if (name in self._NAMESPACES):\n        raise ValueError('Namespace {0} already exists.'.format(name))\n    if (not isinstance(namespace, ns.Namespace)):\n        raise TypeError('Namespaces must be of type Namespace.')\n    self._NAMESPACES[name] = namespace", "docstring": "Register a new namespace with the Configuration object.\n\nArgs:\nname (str): The name of the section/namespace.\nnamespace (namespace.Namespace): The Namespace object to store.\n\nRaises:\nTypeError: If the namespace is not a Namespace object.\nValueError: If the namespace is already registered.", "source": "codesearchnet"}
{"code": "def extract_objects_from_source(self, text, type_filter=None):\n    \n    objects = parse_verilog(text)\n\n    if type_filter:\n      objects = [o for o in objects if isinstance(o, type_filter)]\n\n    return objects", "docstring": "Extract object declarations from a text buffer\n\nArgs:\ntext (str): Source code to parse\ntype_filter (class, optional): Object class to filter results\nReturns:\nList of parsed objects.", "source": "juraj-google-style"}
{"code": "def _read_callback(self, data=None):\n    try:\n        if (data is not None):\n            self.__reader.feed(data)\n            while True:\n                reply = self.__reader.gets()\n                if (reply is not False):\n                    try:\n                        callback = self.__callback_queue.popleft()\n                        callback(reply)\n                    except IndexError:\n                        self._reply_list.append(reply)\n                        self._condition.notify_all()\n                else:\n                    break\n    except hiredis.ProtocolError:\n        LOG.warning('corrupted stream => disconnect')\n        self.disconnect()", "docstring": "Callback called when some data are read on the socket.\n\nThe buffer is given to the hiredis parser. If a reply is complete,\nwe put the decoded reply to on the reply queue.\n\nArgs:\ndata (str): string (buffer) read on the socket.", "source": "codesearchnet"}
{"code": "def encode(self):\n    match_id = self.match_id\n    if (match_id is None):\n        match_id = ((1 << 11) - 1)\n    return (((self.match_type << 12) | DataStreamSelector.SpecifierEncodings[self.match_spec]) | match_id)", "docstring": "Encode this stream as a packed 16-bit unsigned integer.\n\nReturns:\nint: The packed encoded stream", "source": "codesearchnet"}
{"code": "def forward(self, outputs, targets):\n    outputs_without_aux = {k: v for k, v in outputs.items() if k not in ('auxiliary_outputs', 'enc_outputs')}\n    if self.assign_second_stage:\n        indices = self.stg2_assigner(outputs_without_aux, targets)\n    else:\n        indices = self.matcher(outputs_without_aux, targets)\n    num_boxes = sum((len(t['class_labels']) for t in targets))\n    num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)\n    world_size = 1\n    if is_accelerate_available():\n        if PartialState._shared_state != {}:\n            num_boxes = reduce(num_boxes)\n            world_size = PartialState().num_processes\n    num_boxes = torch.clamp(num_boxes / world_size, min=1).item()\n    losses = {}\n    for loss in self.losses:\n        losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))\n    if 'auxiliary_outputs' in outputs:\n        for i, auxiliary_outputs in enumerate(outputs['auxiliary_outputs']):\n            if not self.assign_second_stage:\n                indices = self.matcher(auxiliary_outputs, targets)\n            for loss in self.losses:\n                l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes)\n                l_dict = {k + f'_{i}': v for k, v in l_dict.items()}\n                losses.update(l_dict)\n    if 'enc_outputs' in outputs:\n        enc_outputs = outputs['enc_outputs']\n        bin_targets = copy.deepcopy(targets)\n        for bt in bin_targets:\n            bt['class_labels'] = torch.zeros_like(bt['class_labels'])\n        if self.assign_first_stage:\n            indices = self.stg1_assigner(enc_outputs, bin_targets)\n        else:\n            indices = self.matcher(enc_outputs, bin_targets)\n        for loss in self.losses:\n            l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes)\n            l_dict = {k + '_enc': v for k, v in l_dict.items()}\n            losses.update(l_dict)\n    return losses", "docstring": "This performs the loss computation.\n\nArgs:\noutputs (`dict`, *optional*):\nDictionary of tensors, see the output specification of the model for the format.\ntargets (`List[dict]`, *optional*):\nList of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the\nlosses applied, see each loss' doc.", "source": "github-repos"}
{"code": "def find_stream(cls, fileobj, max_bytes):\n        \n\n        r = BitReader(fileobj)\n        stream = cls(r)\n        if stream.sync(max_bytes):\n            stream.offset = (r.get_position() - 12) \n            return stream", "docstring": "Returns a possibly valid _ADTSStream or None.\n\nArgs:\nmax_bytes (int): maximum bytes to read", "source": "juraj-google-style"}
{"code": "def block_start(self, previous_block):\n        \n\n        previous_header_bytes = previous_block.header\n        previous_header = BlockHeader()\n        previous_header.ParseFromString(previous_header_bytes)\n\n        block_info = BlockInfo(\n            block_num=previous_header.block_num,\n            previous_block_id=previous_header.previous_block_id,\n            signer_public_key=previous_header.signer_public_key,\n            header_signature=previous_block.header_signature,\n            timestamp=int(time.time()))\n\n        return [self.create_batch(block_info)]", "docstring": "Returns an ordered list of batches to inject at the beginning of the\nblock. Can also return None if no batches should be injected.\n\nArgs:\nprevious_block (Block): The previous block.\n\nReturns:\nA list of batches to inject.", "source": "juraj-google-style"}
{"code": "def classify_tables_by_dependency_type(\n        metadata: MetaData,\n        extra_dependencies: List[TableDependency] = None,\n        sort: bool = True) \\\n        -> List[TableDependencyClassification]:\n    \n    tables = list(metadata.tables.values())  \n    all_deps = get_all_dependencies(metadata, extra_dependencies)\n    tdcmap = {}  \n    for table in tables:\n        parents = [td.parent_table for td in all_deps\n                   if td.child_table == table]\n        children = [td.child_table for td in all_deps\n                    if td.parent_table == table]\n        tdcmap[table] = TableDependencyClassification(\n            table, parents=parents, children=children\n        )\n\n    \n    def parents_contain(start: Table,\n                        probe: Table) -> Tuple[bool, List[Table]]:\n        tdc_ = tdcmap[start]\n        if probe in tdc_.parents:\n            return True, [start, probe]\n        for parent in tdc_.parents:\n            contains_, chain_ = parents_contain(start=parent, probe=probe)\n            if contains_:\n                return True, [start] + chain_\n        return False, []\n\n    def children_contain(start: Table,\n                         probe: Table) -> Tuple[bool, List[Table]]:\n        tdc_ = tdcmap[start]\n        if probe in tdc_.children:\n            return True, [start, probe]\n        for child in tdc_.children:\n            contains_, chain_ = children_contain(start=child, probe=probe)\n            if contains_:\n                return True, [start] + chain_\n        return False, []\n\n    for table in tables:\n        tdc = tdcmap[table]\n        contains, chain = parents_contain(start=table, probe=table)\n        if contains:\n            tdc.set_circular(contains, chain)\n        else:\n            contains, chain = children_contain(start=table, probe=table)\n            if contains:\n                tdc.set_circular(contains, chain)\n            else:\n                tdc.set_circular(False)\n\n    classifications = list(tdcmap.values())\n    if sort:\n        classifications.sort(key=lambda c: c.tablename)\n    return classifications", "docstring": "Inspects a metadata object (optionally adding other specified dependencies)\nand returns a list of objects describing their dependencies.\n\nArgs:\nmetadata: the :class:`MetaData` to inspect\nextra_dependencies: additional dependencies\nsort: sort the results by table name?\n\nReturns:\nlist of :class:`TableDependencyClassification` objects, one for each\ntable", "source": "juraj-google-style"}
{"code": "def dataframe_from_excel(path, sheetname=0, header=0, skiprows=None):  \n    \n    sheetname = sheetname or 0\n    if isinstance(sheetname, (basestring, float)):\n        try:\n            sheetname = int(sheetname)\n        except (TypeError, ValueError, OverflowError):\n            sheetname = str(sheetname)\n    wb = xlrd.open_workbook(path)\n    \n    \n    \n    \n    \n    \n    return pd.io.excel.read_excel(wb, sheetname=sheetname, header=header, skiprows=skiprows, engine='xlrd')", "docstring": "Thin wrapper for pandas.io.excel.read_excel() that accepts a file path and sheet index/name\n\nArguments:\npath (str): file or folder to retrieve CSV files and `pandas.DataFrame`s from\next (str): file name extension (to filter files by)\ndate_parser (function): if the MultiIndex can be interpretted as a datetime, this parser will be used\n\nReturns:\ndict of DataFrame: { file_path: flattened_data_frame }", "source": "juraj-google-style"}
{"code": "def db_wb004(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `db_wb004`'.format(value))\n\n        self._db_wb004 = value", "docstring": "Corresponds to IDD Field `db_wb004`\nmean coincident dry-bulb temperature to\nWet-bulb temperature corresponding to 0.4% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `db_wb004`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def call(self, input_ids=None, inputs_embeds=None, training=False):\n    assert not (input_ids is None and inputs_embeds is None)\n    assert not (input_ids is not None and inputs_embeds is not None)\n    if input_ids is not None:\n        check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n        inputs_embeds = tf.gather(self.weight, input_ids)\n    final_embeddings = self.LayerNorm(inputs=inputs_embeds)\n    final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n    return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\nfinal_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github-repos"}
{"code": "def ConsumeByteString(self):\n    the_list = [self._ConsumeSingleByteString()]\n    while (self.token and (self.token[0] in _QUOTES)):\n        the_list.append(self._ConsumeSingleByteString())\n    return b''.join(the_list)", "docstring": "Consumes a byte array value.\n\nReturns:\nThe array parsed (as a string).\n\nRaises:\nParseError: If a byte array value couldn't be consumed.", "source": "codesearchnet"}
{"code": "def do_IDENT(self, service_name: str, source: list, *args, **kwargs) -> None:\n    self.logger.info(' IDENT %s as %s', service_name, source)\n    self.messaging._address_map[service_name] = source", "docstring": "Perform identification of a service to a binary representation.\n\nArgs:\nservice_name: human readable name for service\nsource: zmq representation for the socket source", "source": "codesearchnet"}
{"code": "def _PreparedData(self, order_by=()):\n    if (not order_by):\n        return self.__data\n    sorted_data = self.__data[:]\n    if (isinstance(order_by, six.string_types) or (isinstance(order_by, tuple) and (len(order_by) == 2) and (order_by[1].lower() in ['asc', 'desc']))):\n        order_by = (order_by,)\n    for key in reversed(order_by):\n        if isinstance(key, six.string_types):\n            sorted_data.sort(key=(lambda x: x[0].get(key)))\n        elif (isinstance(key, (list, tuple)) and (len(key) == 2) and (key[1].lower() in ('asc', 'desc'))):\n            key_func = (lambda x: x[0].get(key[0]))\n            sorted_data.sort(key=key_func, reverse=(key[1].lower() != 'asc'))\n        else:\n            raise DataTableException(\"Expected tuple with second value: 'asc' or 'desc'\")\n    return sorted_data", "docstring": "Prepares the data for enumeration - sorting it by order_by.\n\nArgs:\norder_by: Optional. Specifies the name of the column(s) to sort by, and\n(optionally) which direction to sort in. Default sort direction\nis asc. Following formats are accepted:\n\"string_col_name\"  -- For a single key in default (asc) order.\n(\"string_col_name\", \"asc|desc\") -- For a single key.\n[(\"col_1\",\"asc|desc\"), (\"col_2\",\"asc|desc\")] -- For more than\none column, an array of tuples of (col_name, \"asc|desc\").\n\nReturns:\nThe data sorted by the keys given.\n\nRaises:\nDataTableException: Sort direction not in 'asc' or 'desc'", "source": "codesearchnet"}
{"code": "def compute_jaccard_index(x_set, y_set):\n    \n    if not x_set or not y_set:\n        return 0.0\n\n    intersection_cardinal = len(x_set & y_set)\n    union_cardinal = len(x_set | y_set)\n\n    return intersection_cardinal / float(union_cardinal)", "docstring": "Return the Jaccard similarity coefficient of 2 given sets.\n\nArgs:\nx_set (set): first set.\ny_set (set): second set.\n\nReturns:\nfloat: Jaccard similarity coefficient.", "source": "juraj-google-style"}
{"code": "def set(self, key, samples, sampling_rate):\n        \n        if not np.issubdtype(samples.dtype, np.floating):\n            raise ValueError('Samples are required as np.float32!')\n\n        if len(samples.shape) > 1:\n            raise ValueError('Only single channel supported!')\n\n        self.raise_error_if_not_open()\n\n        if key in self._file:\n            del self._file[key]\n\n        samples = (samples * MAX_INT16_VALUE).astype(np.int16)\n\n        dset = self._file.create_dataset(key, data=samples)\n        dset.attrs[SAMPLING_RATE_ATTR] = sampling_rate", "docstring": "Set the samples and sampling-rate for the given key.\nExisting data will be overwritten.\nThe samples have to have ``np.float32`` datatype and values in\nthe range of -1.0 and 1.0.\n\nArgs:\nkey (str): A key to store the data for.\nsamples (numpy.ndarray): 1-D array of audio samples (np.float32).\nsampling_rate (int): The sampling-rate of the audio samples.\n\nNote:\nThe container has to be opened in advance.", "source": "juraj-google-style"}
{"code": "def update(self, task_name, result):\n    with open(self.filepath, 'rb') as f:\n        existing_results = pickle.load(f)\n    if (task_name not in self.tasks):\n        self._add_task(task_name)\n        existing_results['tasks'].append(task_name)\n        existing_results['results'].append([])\n    task_name_idx = existing_results['tasks'].index(task_name)\n    results = existing_results['results'][task_name_idx]\n    results.append(result)\n    with open(self.filepath, 'wb') as f:\n        pickle.dump(existing_results, f)", "docstring": "Update the results file with new information.\n\nArgs:\ntask_name (str): Name of the currently running task. A previously unseen\n``task_name`` will create a new entry in both :attr:`tasks`\nand :attr:`results`.\nresult: This will be appended to the list in :attr:`results` which\ncorresponds to the ``task_name`` in ``task_name``:attr:`tasks`.", "source": "codesearchnet"}
{"code": "def _get_index_points(self, index_points=None):\n    if ((self._index_points is None) and (index_points is None)):\n        raise ValueError(\"This GaussianProcess instance was not instantiated with a value for index_points. One must therefore be provided when calling sample, log_prob, and other such methods. In particular, one can't compute KL divergences to/from an instance of `GaussianProccess` with unspecified `index_points` directly. Instead, use the `get_marginal_distribution` function, which takes `index_points` as an argument and returns a `Normal` or `MultivariateNormalLinearOperator` instance, whose KL can be computed.\")\n    return (index_points if (index_points is not None) else self._index_points)", "docstring": "Return `index_points` if not None, else `self._index_points`.\n\nArgs:\nindex_points: if given, this is what is returned; else,\n`self._index_points`\n\nReturns:\nindex_points: the given arg, if not None, else the class member\n`self._index_points`.\n\nRases:\nValueError: if `index_points` and `self._index_points` are both `None`.", "source": "codesearchnet"}
{"code": "def from_file(cls, filename):\n        \n        filename = str(filename)\n        from pymatgen.io.gaussian import GaussianOutput\n        with zopen(filename) as f:\n            contents = f.read()\n        fname = filename.lower()\n        if fnmatch(fname, \"*.xyz*\"):\n            return cls.from_str(contents, fmt=\"xyz\")\n        elif any([fnmatch(fname.lower(), \"*.{}*\".format(r))\n                  for r in [\"gjf\", \"g03\", \"g09\", \"com\", \"inp\"]]):\n            return cls.from_str(contents, fmt=\"g09\")\n        elif any([fnmatch(fname.lower(), \"*.{}*\".format(r))\n                  for r in [\"out\", \"lis\", \"log\"]]):\n            return GaussianOutput(filename).final_structure\n        elif fnmatch(fname, \"*.json*\") or fnmatch(fname, \"*.mson*\"):\n            return cls.from_str(contents, fmt=\"json\")\n        elif fnmatch(fname, \"*.yaml*\"):\n            return cls.from_str(contents, fmt=\"yaml\")\n        else:\n            from pymatgen.io.babel import BabelMolAdaptor\n            m = re.search(r\"\\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)\",\n                          filename.lower())\n            if m:\n                new = BabelMolAdaptor.from_file(filename,\n                                                m.group(1)).pymatgen_mol\n                new.__class__ = cls\n                return new\n\n        raise ValueError(\"Unrecognized file extension!\")", "docstring": "Reads a molecule from a file. Supported formats include xyz,\ngaussian input (gjf|g03|g09|com|inp), Gaussian output (.out|and\npymatgen's JSON serialized molecules. Using openbabel,\nmany more extensions are supported but requires openbabel to be\ninstalled.\n\nArgs:\nfilename (str): The filename to read from.\n\nReturns:\nMolecule", "source": "juraj-google-style"}
{"code": "def from_string(string):\n    lines = list(clean_lines(string.splitlines()))\n    params = {}\n    for line in lines:\n        for sline in line.split(';'):\n            m = re.match('(\\\\w+)\\\\s*=\\\\s*(.*)', sline.strip())\n            if m:\n                key = m.group(1).strip()\n                val = m.group(2).strip()\n                val = Incar.proc_val(key, val)\n                params[key] = val\n    return Incar(params)", "docstring": "Reads an Incar object from a string.\n\nArgs:\nstring (str): Incar string\n\nReturns:\nIncar object", "source": "codesearchnet"}
{"code": "def get_branch_length(self, age=None, pos=0):\n        \n        if age is None:\n            age = self.age\n\n        return self.length * pow(self.branches[pos][0], age)", "docstring": "Get the length of a branch.\n\nThis method calculates the length of a branch in specific age.\nThe used formula: length * scale^age.\n\nArgs:\nage (int): The age, for which you want to know the branch length.\nReturns:\nfloat: The length of the branch", "source": "juraj-google-style"}
{"code": "def generate_packer_filename(provider, region, builder):\n    \n    filename = '{0}_{1}_{2}.json'.format(provider, region, builder)\n    return filename", "docstring": "Generate a filename to be used by packer.\n\nArgs:\nprovider (str): Name of Spinnaker provider.\nregion (str): Name of provider region to use.\nbuilder (str): Name of builder process type.\n\nReturns:\nstr: Generated filename based on parameters.", "source": "juraj-google-style"}
{"code": "def to_string(self, verbose=0, title=None, **kwargs):\n    from pprint import pformat\n    s = pformat(self, **kwargs)\n    if (title is not None):\n        return '\\n'.join([marquee(title, mark='='), s])\n    return s", "docstring": "String representation. kwargs are passed to `pprint.pformat`.\n\nArgs:\nverbose: Verbosity level\ntitle: Title string.", "source": "codesearchnet"}
{"code": "def WaitForFlow(flow_urn, token=None, timeout=DEFAULT_TIMEOUT, max_sleep_time=1, min_sleep_time=0.2, dampening_multiplier=0.9):\n    start_time = time.time()\n    sleep_time = max_sleep_time\n    while True:\n        with aff4.FACTORY.Open(flow_urn, token=token, aff4_type=flow.GRRFlow) as flow_obj:\n            if ((time.time() - start_time) > timeout):\n                logging.warning('Timed out after waiting %ss for %s!', timeout, flow_obj)\n                raise IOError('Timed out trying to access client! Is it connected?')\n            if (not flow_obj.GetRunner().IsRunning()):\n                break\n        sleep_time = max((sleep_time * dampening_multiplier), min_sleep_time)\n        time.sleep(sleep_time)\n        logging.debug('Waiting for %s, sleeping for %.3fs', flow_obj, sleep_time)", "docstring": "Waits for a flow to finish, polling while we wait.\n\nArgs:\nflow_urn: The urn of the flow to wait for.\ntoken: The datastore access token.\ntimeout: How long to wait before giving up, usually because the client has\ngone away.\nmax_sleep_time: The initial and longest time to wait in between polls.\nmin_sleep_time: The final and shortest time to wait in between polls.\ndampening_multiplier: The current sleep time is multiplied by this number on\neach iteration. Controls how fast the polling reaches its minimum sleep\ntime. You probably want this to be less than 1, unless you want to wait an\nincreasing amount of time in between flows.\n\nRaises:\nIOError: If we time out while waiting for the client.", "source": "codesearchnet"}
{"code": "def get_chempot_range_map(self, elements, referenced=True, joggle=True):\n    all_chempots = []\n    pd = self\n    facets = pd.facets\n    for facet in facets:\n        chempots = self._get_facet_chempots(facet)\n        all_chempots.append([chempots[el] for el in pd.elements])\n    inds = [pd.elements.index(el) for el in elements]\n    el_energies = {el: 0.0 for el in elements}\n    if referenced:\n        el_energies = {el: pd.el_refs[el].energy_per_atom for el in elements}\n    chempot_ranges = collections.defaultdict(list)\n    vertices = [list(range(len(self.elements)))]\n    if (len(all_chempots) > len(self.elements)):\n        vertices = get_facets(all_chempots, joggle=joggle)\n    for ufacet in vertices:\n        for combi in itertools.combinations(ufacet, 2):\n            data1 = facets[combi[0]]\n            data2 = facets[combi[1]]\n            common_ent_ind = set(data1).intersection(set(data2))\n            if (len(common_ent_ind) == len(elements)):\n                common_entries = [pd.qhull_entries[i] for i in common_ent_ind]\n                data = np.array([[(all_chempots[i][j] - el_energies[pd.elements[j]]) for j in inds] for i in combi])\n                sim = Simplex(data)\n                for entry in common_entries:\n                    chempot_ranges[entry].append(sim)\n    return chempot_ranges", "docstring": "Returns a chemical potential range map for each stable entry.\n\nArgs:\nelements: Sequence of elements to be considered as independent\nvariables. E.g., if you want to show the stability ranges\nof all Li-Co-O phases wrt to uLi and uO, you will supply\n[Element(\"Li\"), Element(\"O\")]\nreferenced: If True, gives the results with a reference being the\nenergy of the elemental phase. If False, gives absolute values.\njoggle (boolean): Whether to joggle the input to avoid precision\nerrors.\n\nReturns:\nReturns a dict of the form {entry: [simplices]}. The list of\nsimplices are the sides of the N-1 dim polytope bounding the\nallowable chemical potential range of each entry.", "source": "codesearchnet"}
{"code": "def word_to_vector_list(self, word, numeric=False, xsampa=False):\n        \n        if xsampa:\n            word = self.xsampa.convert(word)\n        tensor = list(map(self.segment_to_vector, self.segs(word)))\n        if numeric:\n            return self.tensor_to_numeric(tensor)\n        else:\n            return tensor", "docstring": "Return a list of feature vectors, given a Unicode IPA word.\n\nArgs:\nword (unicode): string in IPA\nnumeric (bool): if True, return features as numeric values instead\nof strings\n\nReturns:\nlist: a list of lists of '+'/'-'/'0' or 1/-1/0", "source": "juraj-google-style"}
{"code": "def _padding_to_conv_op_padding(padding):\n  \n  if not isinstance(padding, tuple):\n    raise ValueError(\"padding should be a tuple.\")\n  if all(p == SAME for p in padding):\n    \n    \n    return SAME\n  else:\n    \n    \n    \n    return VALID", "docstring": "Whether to use SAME or VALID for the underlying convolution op.\n\nArgs:\npadding: A tuple of members of ALLOWED_PADDINGS, e.g. as returned from\n`_fill_and_verify_padding`.\n\nReturns:\nOne of CONV_OP_ALLOWED_PADDINGS, the padding method to use for the\nunderlying convolution op.\n\nRaises:\nValueError: If padding is not a tuple.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.Analyze = channel.unary_unary(\n        '/pulumirpc.Analyzer/Analyze',\n        request_serializer=analyzer__pb2.AnalyzeRequest.SerializeToString,\n        response_deserializer=analyzer__pb2.AnalyzeResponse.FromString,\n        )\n    self.GetPluginInfo = channel.unary_unary(\n        '/pulumirpc.Analyzer/GetPluginInfo',\n        request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n        response_deserializer=plugin__pb2.PluginInfo.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def _SparseSoftmaxGrad(op: ops.Operation, grad):\n    indices, shape = (op.inputs[0], op.inputs[2])\n    out_vals = op.outputs[0]\n    sp_output = sparse_tensor.SparseTensor(indices, out_vals, shape)\n    sp_grad = sparse_tensor.SparseTensor(indices, grad, shape)\n    sp_product = sparse_tensor.SparseTensor(indices, sp_output.values * sp_grad.values, shape)\n    sum_reduced = -sparse_ops.sparse_reduce_sum(sp_product, [-1], keepdims=True)\n    sp_sum = sparse_ops.sparse_dense_cwise_add(sp_grad, sum_reduced)\n    grad_x = sp_sum.values * sp_output.values\n    return [None, grad_x, None]", "docstring": "Gradients for SparseSoftmax.\n\nThe calculation is the same as SoftmaxGrad:\n\ngrad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax\n\nwhere we now only operate on the non-zero values present in the SparseTensors.\n\nArgs:\nop: the SparseSoftmax op.\ngrad: the upstream gradient w.r.t. the non-zero SparseSoftmax output values.\n\nReturns:\nGradients w.r.t. the input (sp_indices, sp_values, sp_shape).", "source": "github-repos"}
{"code": "def _isValidQuery(self, query, mode=\"phonefy\"):\n        \n        try:\n            \n            validator = self.modes[mode].get(\"query_validator\")\n            if validator:\n                try:\n                    compiledRegexp = re.compile(\n                        \"^{expr}$\".format(\n                            expr=validator\n                        )\n                    )\n                    return compiledRegexp.match(query)\n                except AttributeError as e:\n                    return True\n\n        except AttributeError as e:\n            \n            compiledRegexp = re.compile(\"^{r}$\".format(r=self.validQuery[mode]))\n            return compiledRegexp.match(query)", "docstring": "Method to verify if a given query is processable by the platform.\n\nThe system looks for the forbidden characters in self.Forbidden list.\n\nArgs:\n-----\nquery: The query to be launched.\nmode: To be chosen amongst mailfy, phonefy, usufy, searchfy.\nReturn:\n-------\nTrue | False", "source": "juraj-google-style"}
{"code": "def build_filter_stack(stack, options):\n    \n    \n    if options.get('keyword_case'):\n        stack.preprocess.append(\n            filters.KeywordCaseFilter(options['keyword_case']))\n\n    if options.get('identifier_case'):\n        stack.preprocess.append(\n            filters.IdentifierCaseFilter(options['identifier_case']))\n\n    if options.get('truncate_strings'):\n        stack.preprocess.append(filters.TruncateStringFilter(\n            width=options['truncate_strings'], char=options['truncate_char']))\n\n    if options.get('use_space_around_operators', False):\n        stack.enable_grouping()\n        stack.stmtprocess.append(filters.SpacesAroundOperatorsFilter())\n\n    \n    if options.get('strip_comments'):\n        stack.enable_grouping()\n        stack.stmtprocess.append(filters.StripCommentsFilter())\n\n    if options.get('strip_whitespace') or options.get('reindent'):\n        stack.enable_grouping()\n        stack.stmtprocess.append(filters.StripWhitespaceFilter())\n\n    if options.get('reindent'):\n        stack.enable_grouping()\n        stack.stmtprocess.append(\n            filters.ReindentFilter(\n                char=options['indent_char'],\n                width=options['indent_width'],\n                indent_after_first=options['indent_after_first'],\n                indent_columns=options['indent_columns'],\n                wrap_after=options['wrap_after'],\n                comma_first=options['comma_first']))\n\n    if options.get('reindent_aligned', False):\n        stack.enable_grouping()\n        stack.stmtprocess.append(\n            filters.AlignedIndentFilter(char=options['indent_char']))\n\n    if options.get('right_margin'):\n        stack.enable_grouping()\n        stack.stmtprocess.append(\n            filters.RightMarginFilter(width=options['right_margin']))\n\n    \n    if options.get('output_format'):\n        frmt = options['output_format']\n        if frmt.lower() == 'php':\n            fltr = filters.OutputPHPFilter()\n        elif frmt.lower() == 'python':\n            fltr = filters.OutputPythonFilter()\n        else:\n            fltr = None\n        if fltr is not None:\n            stack.postprocess.append(fltr)\n\n    return stack", "docstring": "Setup and return a filter stack.\n\nArgs:\nstack: :class:`~sqlparse.filters.FilterStack` instance\noptions: Dictionary with options validated by validate_options.", "source": "juraj-google-style"}
{"code": "def bounds(self, thr=0):\n    min_lat = float('inf')\n    min_lon = float('inf')\n    max_lat = (- float('inf'))\n    max_lon = (- float('inf'))\n    for segment in self.segments:\n        (milat, milon, malat, malon) = segment.bounds(thr=thr)\n        min_lat = min(milat, min_lat)\n        min_lon = min(milon, min_lon)\n        max_lat = max(malat, max_lat)\n        max_lon = max(malon, max_lon)\n    return (min_lat, min_lon, max_lat, max_lon)", "docstring": "Gets the bounds of this segment\n\nReturns:\n(float, float, float, float): Bounds, with min latitude, min longitude,\nmax latitude and max longitude", "source": "codesearchnet"}
{"code": "def RunPlugins(cls, artifacts_registry, file_system, mount_point, knowledge_base):\n    searcher = file_system_searcher.FileSystemSearcher(file_system, mount_point)\n    cls.CollectFromFileSystem(artifacts_registry, knowledge_base, searcher, file_system)\n    environment_variables = None\n    if knowledge_base:\n        environment_variables = knowledge_base.GetEnvironmentVariables()\n    registry_file_reader = FileSystemWinRegistryFileReader(file_system, mount_point, environment_variables=environment_variables)\n    win_registry = dfwinreg_registry.WinRegistry(registry_file_reader=registry_file_reader)\n    searcher = registry_searcher.WinRegistrySearcher(win_registry)\n    cls.CollectFromWindowsRegistry(artifacts_registry, knowledge_base, searcher)\n    cls.CollectFromKnowledgeBase(knowledge_base)\n    if (not knowledge_base.HasUserAccounts()):\n        logger.warning('Unable to find any user accounts on the system.')", "docstring": "Runs the preprocessing plugins.\n\nArgs:\nartifacts_registry (artifacts.ArtifactDefinitionsRegistry): artifacts\ndefinitions registry.\nfile_system (dfvfs.FileSystem): file system to be preprocessed.\nmount_point (dfvfs.PathSpec): mount point path specification that refers\nto the base location of the file system.\nknowledge_base (KnowledgeBase): to fill with preprocessing information.", "source": "codesearchnet"}
{"code": "def _CreateConfig(self, project_id):\n    \n    project_id = project_id or self._GetNumericProjectId()\n\n    \n    if not project_id:\n      return\n\n    self.boto_config_header %= (\n        self.boto_config_script, self.boto_config_template)\n    config = config_manager.ConfigManager(\n        config_file=self.boto_config_template,\n        config_header=self.boto_config_header)\n    boto_dir = os.path.dirname(self.boto_config_script)\n\n    config.SetOption('GSUtil', 'default_project_id', project_id)\n    config.SetOption('GSUtil', 'default_api_version', '2')\n    config.SetOption('GoogleCompute', 'service_account', 'default')\n    config.SetOption('Plugin', 'plugin_directory', boto_dir)\n    config.WriteConfig(config_file=self.boto_config)", "docstring": "Create the boto config to support standalone GSUtil.\n\nArgs:\nproject_id: string, the project ID to use in the config file.", "source": "juraj-google-style"}
{"code": "def has_extana(self, cached=True):\n        \n        if cached and self.hardware != -1:\n            return True if (self.hardware & EXT_HW_EXTANA) else False\n\n        result = self._check_hardware()\n        return True if (result & EXT_HW_EXTANA) != 0 else False", "docstring": "Can be used to check if an SK8-ExtAna device is currently connected.\nNOTE: do not attempt to call while data streaming is active!\n\nArgs:\ncached (bool): if True, use the cached value of the connected hardware\nstate rather than querying the device. Set to False to force a query.\n\nReturns:\nbool. True if the SK8 currently has an SK8-ExtAna device attached, False otherwise.", "source": "juraj-google-style"}
{"code": "def find_exception_by_code(code):\n    errorName = None\n    for error in WebDriverError:\n        if (error.value.code == code):\n            errorName = error\n            break\n    return errorName", "docstring": "Find name of exception by WebDriver defined error code.\n\nArgs:\ncode(str): Error code defined in protocol.\n\nReturns:\nThe error name defined in protocol.", "source": "codesearchnet"}
{"code": "def dbmax_mean(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `dbmax_mean`'.format(value))\n\n        self._dbmax_mean = value", "docstring": "Corresponds to IDD Field `dbmax_mean`\nMean of extreme annual maximum dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmax_mean`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def get_num_bytes(self, batch: Sequence[tf.Tensor]) -> int:\n    return sum((sys.getsizeof(element) for element in batch))", "docstring": "Returns:\nThe number of bytes of data for a batch of Tensors.", "source": "github-repos"}
{"code": "def __init__(self, settings, room_id):\n        \n        StreamProcess.__init__(self, settings, room_id)\n        self._reactor = self._connection.get_twisted_reactor()\n        self._protocol = None", "docstring": "Initialize.\n\nArgs:\nsettings (dict): Settings used to create a :class:`Connection` instance\nroom_id (int): Room ID", "source": "juraj-google-style"}
{"code": "def _somethingFound(self, data, mode='phonefy'):\n    if data:\n        try:\n            for text in self.notFoundText[mode]:\n                if (text in data):\n                    return False\n            return True\n        except AttributeError as e:\n            verifier = self.modes.get(mode)\n            if verifier:\n                if (verifier.get('not_found_text', '') in data):\n                    return False\n                else:\n                    return True\n    return False", "docstring": "Verifying if something was found.\n\nArgs:\n-----\ndata: Data where the self.notFoundText will be searched.\nmode: Mode to be executed.\n\nReturn:\n-------\nTrue if exists.", "source": "codesearchnet"}
{"code": "def consume(self, callback, queue):\n    self.consumers[queue] = callback\n    if self._client_ready.called:\n        return self.client.consume(callback, queue)", "docstring": "Register a new consumer.\n\nThis consumer will be configured for every protocol this factory\nproduces so it will be reconfigured on network failures. If a connection\nis already active, the consumer will be added to it.\n\nArgs:\ncallback (callable): The callback to invoke when a message arrives.\nqueue (str): The name of the queue to consume from.", "source": "codesearchnet"}
{"code": "def findAll(self, selfValue):\n        \n        resultList = []\n        for element in selfValue:\n            if isinstance(element, Single):\n                resultList += element.findAll(element.value)\n            else:\n                resultList.append(element)\n        return resultList", "docstring": "Looks for all the non single values(str, int) *recursively* and returns a list of them\n\nArgs:\nselfValue: A list of single, str, int. Normally just ``self.value``\n\nReturns:\nlist: A list contains only non singles(str, int).", "source": "juraj-google-style"}
{"code": "def _subdivide_nodes(nodes, degree):\n    if (degree == 1):\n        nodes_a = _helpers.matrix_product(nodes, LINEAR_SUBDIVIDE_A)\n        nodes_b = _helpers.matrix_product(nodes, LINEAR_SUBDIVIDE_B)\n        nodes_c = _helpers.matrix_product(nodes, LINEAR_SUBDIVIDE_C)\n        nodes_d = _helpers.matrix_product(nodes, LINEAR_SUBDIVIDE_D)\n    elif (degree == 2):\n        nodes_a = _helpers.matrix_product(nodes, QUADRATIC_SUBDIVIDE_A)\n        nodes_b = _helpers.matrix_product(nodes, QUADRATIC_SUBDIVIDE_B)\n        nodes_c = _helpers.matrix_product(nodes, QUADRATIC_SUBDIVIDE_C)\n        nodes_d = _helpers.matrix_product(nodes, QUADRATIC_SUBDIVIDE_D)\n    elif (degree == 3):\n        nodes_a = _helpers.matrix_product(nodes, CUBIC_SUBDIVIDE_A)\n        nodes_b = _helpers.matrix_product(nodes, CUBIC_SUBDIVIDE_B)\n        nodes_c = _helpers.matrix_product(nodes, CUBIC_SUBDIVIDE_C)\n        nodes_d = _helpers.matrix_product(nodes, CUBIC_SUBDIVIDE_D)\n    elif (degree == 4):\n        nodes_a = _helpers.matrix_product(nodes, QUARTIC_SUBDIVIDE_A)\n        nodes_b = _helpers.matrix_product(nodes, QUARTIC_SUBDIVIDE_B)\n        nodes_c = _helpers.matrix_product(nodes, QUARTIC_SUBDIVIDE_C)\n        nodes_d = _helpers.matrix_product(nodes, QUARTIC_SUBDIVIDE_D)\n    else:\n        nodes_a = specialize_surface(nodes, degree, _WEIGHTS_SUBDIVIDE0, _WEIGHTS_SUBDIVIDE1, _WEIGHTS_SUBDIVIDE2)\n        nodes_b = specialize_surface(nodes, degree, _WEIGHTS_SUBDIVIDE3, _WEIGHTS_SUBDIVIDE2, _WEIGHTS_SUBDIVIDE1)\n        nodes_c = specialize_surface(nodes, degree, _WEIGHTS_SUBDIVIDE1, _WEIGHTS_SUBDIVIDE4, _WEIGHTS_SUBDIVIDE3)\n        nodes_d = specialize_surface(nodes, degree, _WEIGHTS_SUBDIVIDE2, _WEIGHTS_SUBDIVIDE3, _WEIGHTS_SUBDIVIDE5)\n    return (nodes_a, nodes_b, nodes_c, nodes_d)", "docstring": "Subdivide a surface into four sub-surfaces.\n\n.. note::\n\nThere is also a Fortran implementation of this function, which\nwill be used if it can be built.\n\nDoes so by taking the unit triangle (i.e. the domain of the surface) and\nsplitting it into four sub-triangles by connecting the midpoints of each\nside.\n\nArgs:\nnodes (numpy.ndarray): Control points for a surface.\ndegree (int): The degree of the surface.\n\nReturns:\nTuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray]: The\nnodes for the four sub-surfaces.", "source": "codesearchnet"}
{"code": "def setMinimum(self, minimum):\n        \n        if not isinstance(minimum, int):\n            raise TypeError(\"Argument is not of type int or long\")\n        self._minimum = minimum", "docstring": "setter to _minimum.\n\nArgs:\nminimum (int or long): new _minimum value.\n\nRaises:\nTypeError: If the given argument is not an integer.", "source": "juraj-google-style"}
{"code": "def get_polling_override(self):\n    polling_override = self.get_characteristic_handle_from_uuid(UUID_POLLING_OVERRIDE)\n    if (polling_override is None):\n        logger.warn('Failed to find handle for polling override')\n        return None\n    override_ms = self.dongle._read_attribute(self.conn_handle, polling_override, True)\n    return (None if (override_ms is None) else ord(override_ms))", "docstring": "Get the current polling override value in milliseconds.\n\nSee :meth:`set_polling_override` for more information.\n\nReturns:\nNone on error, otherwise the current override period in milliseconds\n(0 = disabled).", "source": "codesearchnet"}
{"code": "def _DescriptionSection(component, info):\n    if custom_descriptions.NeedsCustomDescription(component):\n        available_space = LINE_LENGTH - SECTION_INDENTATION\n        description = custom_descriptions.GetDescription(component, available_space, LINE_LENGTH)\n        summary = custom_descriptions.GetSummary(component, available_space, LINE_LENGTH)\n    else:\n        description = _GetDescription(info)\n        summary = _GetSummary(info)\n    text = description or summary or None\n    if text:\n        return ('DESCRIPTION', text)\n    else:\n        return None", "docstring": "The \"Description\" sections of the help string.\n\nArgs:\ncomponent: The component to produce the description section for.\ninfo: The info dict for the component of interest.\n\nReturns:\nReturns the description if available. If not, returns the summary.\nIf neither are available, returns None.", "source": "github-repos"}
{"code": "def get_snapshot(self, snapshot_id_or_uri, volume_id_or_uri=None):\n        \n        uri = self.__build_volume_snapshot_uri(volume_id_or_uri, snapshot_id_or_uri)\n        return self._client.get(uri)", "docstring": "Gets a snapshot of a volume.\n\nArgs:\nvolume_id_or_uri:\nCan be either the volume ID or the volume URI. It is optional if it is passed a snapshot URI,\nbut required if it passed a snapshot ID.\nsnapshot_id_or_uri:\nCan be either the snapshot ID or the snapshot URI.\n\nReturns:\ndict: The snapshot.", "source": "juraj-google-style"}
{"code": "def dataset(self, mode, hparams=None, global_step=None, **kwargs):\n    \n    datasets = [p.dataset(mode, **kwargs) for p in self.problems]\n    datasets = [\n        d.map(lambda x, i=j: self.normalize_example(  \n            dict(x, problem_id=tf.constant([i])), hparams))\n        for j, d in enumerate(datasets)  \n    ]\n    if mode is problem.DatasetSplit.TRAIN:\n      if global_step is None:\n        global_step = tf.train.get_or_create_global_step()\n      pmf = get_schedule_distribution(self.schedule, global_step)\n      return get_multi_dataset(datasets, pmf)\n    elif self.only_eval_first_problem:\n      return datasets[0]\n    else:\n      datasets = [d.repeat() for d in datasets]\n      return tf.data.Dataset.zip(tuple(datasets)).flat_map(\n          lambda *x: functools.reduce(  \n              tf.data.Dataset.concatenate,\n              map(tf.data.Dataset.from_tensors, x)))", "docstring": "Returns a dataset containing examples from multiple problems.\n\nArgs:\nmode: A member of problem.DatasetSplit.\nhparams: A tf.HParams object, the model hparams.\nglobal_step: A scalar tensor used to compute the sampling distribution.\nIf global_step is None, we call tf.train.get_or_create_global_step by\ndefault.\n**kwargs: Keywords for problem.Problem.Dataset.\n\nReturns:\nA dataset containing examples from multiple problems.", "source": "juraj-google-style"}
{"code": "def make_instance(cls, data):\n    schema = cls()\n    if (not hasattr(schema.Meta, 'model')):\n        raise AttributeError('In order to make an instance, a model for the schema must be defined in the Meta class.')\n    serialized_data = schema.load(data).data\n    return cls.Meta.model(**serialized_data)", "docstring": "Validate the data and create a model instance from the data.\n\nArgs:\ndata (dict): The unserialized data to insert into the new model\ninstance through it's constructor.\n\nReturns:\npeewee.Model|sqlalchemy.Model: The model instance with it's data\ninserted into it.\n\nRaises:\nAttributeError: This is raised if ``Meta.model`` isn't set on the\nschema's definition.", "source": "codesearchnet"}
{"code": "def merge(self, other_rel):\n    if ((other_rel.thresholds.size == self.thresholds.size) and np.all((other_rel.thresholds == self.thresholds))):\n        self.frequencies += other_rel.frequencies\n    else:\n        print('Input table thresholds do not match.')", "docstring": "Ingest another DistributedReliability and add its contents to the current object.\n\nArgs:\nother_rel: a Distributed reliability object.", "source": "codesearchnet"}
{"code": "def fetch_woeid(self, location):\n    rss = self._fetch_xml(WOEID_LOOKUP_URL.format(quote(location)))\n    try:\n        woeid = rss.find('results/Result/woeid').text\n    except AttributeError:\n        return None\n    return woeid", "docstring": "Fetch a location's corresponding WOEID.\n\nArgs:\nlocation: (string) a location (e.g. 23454 or Berlin, Germany).\n\nReturns:\na string containing the location's corresponding WOEID or None if\nthe WOEID could not be found.\n\nRaises:\nurllib.error.URLError: urllib.request could not open the URL\n(Python 3).\nurllib2.URLError: urllib2 could not open the URL (Python 2).\nxml.etree.ElementTree.ParseError: xml.etree.ElementTree failed to\nparse the XML document.", "source": "codesearchnet"}
{"code": "def dim_reduce_data(data, d):\n    (genes, cells) = data.shape\n    distances = np.zeros((cells, cells))\n    for i in range(cells):\n        for j in range(cells):\n            distances[(i, j)] = poisson_dist(data[(:, i)], data[(:, j)])\n    proximity = (distances ** 2)\n    J = (np.eye(cells) - (1.0 / cells))\n    B = ((- 0.5) * np.dot(J, np.dot(proximity, J)))\n    (e_val, e_vec) = np.linalg.eigh(B)\n    lam = np.diag(e_val[(- d):])[::(- 1)]\n    E = e_vec[(:, (- d):)][::(- 1)]\n    X = np.dot(E, (lam ** 0.5))\n    return X", "docstring": "Does a MDS on the data directly, not on the means.\n\nArgs:\ndata (array): genes x cells\nd (int): desired dimensionality\n\nReturns:\nX, a cells x d matrix", "source": "codesearchnet"}
{"code": "def set_dft_grid(self, radical_points=128, angular_points=302,\n                     grid_type=\"Lebedev\"):\n        \n        available_lebedev_angular_points = {6, 18, 26, 38, 50, 74, 86, 110, 146,\n                                            170, 194, 230, 266, 302, 350, 434,\n                                            590, 770, 974, 1202, 1454, 1730,\n                                            2030, 2354, 2702, 3074, 3470, 3890,\n                                            4334, 4802, 5294}\n        if grid_type.lower() == \"sg-0\":\n            self.params[\"rem\"][\"xc_grid\"] = 0\n        elif grid_type.lower() == \"sg-1\":\n            self.params[\"rem\"][\"xc_grid\"] = 1\n        elif grid_type.lower() == \"lebedev\":\n            if angular_points not in available_lebedev_angular_points:\n                raise ValueError(str(angular_points) + \" is not a valid \"\n                                 \"Lebedev angular points number\")\n            self.params[\"rem\"][\"xc_grid\"] = \"{rp:06d}{ap:06d}\".format(\n                rp=radical_points, ap=angular_points)\n        elif grid_type.lower() == \"gauss-legendre\":\n            self.params[\"rem\"][\"xc_grid\"] = \"-{rp:06d}{ap:06d}\".format(\n                rp=radical_points, ap=angular_points)\n        else:\n            raise ValueError(\"Grid type \" + grid_type + \" is not supported \"\n                                                        \"currently\")", "docstring": "Set the grid for DFT numerical integrations.\n\nArgs:\nradical_points: Radical points. (Integer)\nangular_points: Angular points. (Integer)\ngrid_type: The type of of the grid. There are two standard grids:\nSG-1 and SG-0. The other two supported grids are \"Lebedev\" and\n\"Gauss-Legendre\"", "source": "juraj-google-style"}
{"code": "def on_epoch_begin(self, epoch, logs=None):", "docstring": "Called at the start of an epoch.\n\nSubclasses should override for any actions to run. This function should\nonly be called during TRAIN mode.\n\nArgs:\nepoch: Integer, index of epoch.\nlogs: Dict. Currently no data is passed to this argument for this\nmethod but that may change in the future.", "source": "github-repos"}
{"code": "def dedent(text: str) -> str:\n    return textwrap.dedent(text).strip()", "docstring": "Wrapper around `textwrap.dedent` which also `strip()` the content.\n\nBefore:\n\n```python\ntext = textwrap.dedent(\n\\\"\\\"\\\"\\\\\nA(\nx=1,\n)\\\"\\\"\\\"\n)\n```\n\nAfter:\n\n```python\ntext = epy.dedent(\n\\\"\\\"\\\"\nA(\nx=1,\n)\n\\\"\\\"\\\"\n)\n```\n\nArgs:\ntext: The text to dedent\n\nReturns:\nThe dedented text", "source": "github-repos"}
{"code": "def get_desired():\n    public_members = get_public_members()\n    if public_members:\n        members = '\\n    :members: {}'.format(', '.join(public_members))\n    else:\n        members = ''\n    return DESIRED_TEMPLATE.format(members=members)", "docstring": "Populate ``DESIRED_TEMPLATE`` with public members.\n\nIf there are no members, does nothing.\n\nReturns:\nstr: The \"desired\" contents of ``bezier.rst``.", "source": "codesearchnet"}
{"code": "def get_evaluations(self, variant_obj):\n    query = dict(variant_id=variant_obj['variant_id'])\n    res = self.acmg_collection.find(query).sort([('created_at', pymongo.DESCENDING)])\n    return res", "docstring": "Return all evaluations for a certain variant.\n\nArgs:\nvariant_obj (dict): variant dict from the database\n\nReturns:\npymongo.cursor: database cursor", "source": "codesearchnet"}
{"code": "def _delete_gridfs_data(self, data):\n    if isinstance(data, ObjectId):\n        if self._gridfs.exists({'_id': data}):\n            self._gridfs.delete(data)\n        else:\n            raise DataStoreGridfsIdInvalid()\n    elif isinstance(data, list):\n        for item in data:\n            self._delete_gridfs_data(item)\n    elif isinstance(data, dict):\n        for (key, item) in data.items():\n            self._delete_gridfs_data(item)", "docstring": "Delete all GridFS data that is linked by fields in the specified data.\n\nArgs:\ndata: The data that is parsed for MongoDB ObjectIDs. The linked GridFs object\nfor any ObjectID is deleted.", "source": "codesearchnet"}
{"code": "def convert_maxpool3(params, w_name, scope_name, inputs, layers, weights, names):\n    \n\n    print('Converting pooling ...')\n\n    if names == 'short':\n        tf_name = 'P' + random_string(7)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    if 'kernel_shape' in params:\n        height, width, depth = params['kernel_shape']\n    else:\n        height, width, depth = params['kernel_size']\n\n    if 'strides' in params:\n        stride_height, stride_width, stride_depth = params['strides']\n    else:\n        stride_height, stride_width, stride_depth = params['stride']\n\n    if 'pads' in params:\n        padding_h, padding_w, padding_d, _, _ = params['pads']\n    else:\n        padding_h, padding_w, padding_d = params['padding']\n\n    input_name = inputs[0]\n    if padding_h > 0 and padding_w > 0 and padding_d > 0:\n        padding_name = tf_name + '_pad'\n        padding_layer = keras.layers.ZeroPadding3D(\n            padding=(padding_h, padding_w, padding_d),\n            name=padding_name\n        )\n        layers[padding_name] = padding_layer(layers[inputs[0]])\n        input_name = padding_name\n\n    \n    pooling = keras.layers.MaxPooling3D(\n        pool_size=(height, width, depth),\n        strides=(stride_height, stride_width, stride_depth),\n        padding='valid',\n        name=tf_name\n    )\n\n    layers[scope_name] = pooling(layers[input_name])", "docstring": "Convert 3d Max pooling.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def _add_arg(self, key, value, mask=False):\n    if (self.lang == 'python'):\n        self._add_arg_python(key, value, mask)\n    elif (self.lang == 'java'):\n        self._add_arg_java(key, value, mask)", "docstring": "Add CLI Arg for the correct language.\n\nArgs:\nkey (string): The CLI Args key (e.g., --name).\nvalue (string): The CLI Args value (e.g., bob).\nmask (boolean, default:False): Indicates whether no mask value.", "source": "codesearchnet"}
{"code": "def van(first_enc, first_frame, current_enc, gt_image, reuse=False, scope_prefix='', hparams=None):\n    with tf.variable_scope((scope_prefix + 'van'), reuse=reuse):\n        output_shape = first_frame.get_shape().as_list()\n        output_shape[0] = (- 1)\n        first_depth = 64\n        (f_first_enc, _) = van_enc_2d(first_enc, first_depth)\n        (f_first_frame, image_enc_history) = van_image_enc_2d(first_frame, first_depth, hparams=hparams)\n        (f_current_enc, van_higher_level) = van_enc_2d(current_enc, first_depth, reuse=True)\n        (f_gt_image, _) = van_image_enc_2d(gt_image, first_depth, True, hparams=hparams)\n        analogy_t = analogy_computation_2d(f_first_enc, f_first_frame, f_current_enc, first_depth)\n        enc_img = (f_current_enc + analogy_t)\n        img = van_dec_2d(enc_img, image_enc_history, output_shape, first_depth, hparams=hparams)\n        batch_size = tf.to_float(tf.shape(first_enc)[0])\n        r_loss = (tf.nn.l2_loss(((f_gt_image - f_current_enc) - analogy_t)) / batch_size)\n        return (img, r_loss, van_higher_level)", "docstring": "Implements a VAN.\n\nArgs:\nfirst_enc: The first encoding.\nfirst_frame: The first ground truth frame.\ncurrent_enc: The encoding of the frame to generate.\ngt_image: The ground truth image, only used for regularization.\nreuse: To reuse in variable scope or not.\nscope_prefix: The prefix before the scope name.\nhparams: The python hparams.\n\nReturns:\nThe generated image.", "source": "codesearchnet"}
{"code": "def recipe_cm360_report_replicate(config, auth_read, recipe_name, auth_write, account, recipe_slug, report_id, report_name, delete, Aggregate):\n    drive(config, {'auth': 'user', 'copy': {'source': 'https:\n    dataset(config, {'auth': auth_write, 'dataset': recipe_slug})\n    cm_report_replicate(config, {'auth': auth_read, 'report': {'account': account, 'id': report_id, 'name': report_name, 'delete': delete}, 'replicate': {'sheets': {'sheet': recipe_name, 'tab': 'Accounts', 'range': ''}}, 'write': {'bigquery': {'dataset': recipe_slug, 'is_incremental_load': Aggregate}}})", "docstring": "Replicate a report across multiple networks and advertisers.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nrecipe_name (string) - Sheet to read ids from.\nauth_write (authentication) - Credentials used for writing data.\naccount (integer) - CM network id.\nrecipe_slug (string) - NA\nreport_id (integer) - CM template report id, for template\nreport_name (string) - CM template report name, empty if using id instead.\ndelete (boolean) - Use only to reset the reports if setup changes.\nAggregate (boolean) - Append report data to existing table, requires Date column.", "source": "github-repos"}
{"code": "def __init__(self, fail_silently=False, aws_access_key_id=None,\n                 aws_secret_access_key=None, **kwargs):\n        \n        super(EmailBackend, self).__init__(fail_silently=fail_silently)\n\n        \n        access_key_id = getattr(settings, 'AWS_ACCESS_KEY_ID', None)\n        secret_access_key = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None)\n        region_name = getattr(settings, 'AWS_DEFAULT_REGION', 'us-east-1')\n\n        \n        access_key_id = getattr(settings, 'AWS_SES_ACCESS_KEY_ID',\n                                access_key_id)\n        secret_access_key = getattr(settings, 'AWS_SES_SECRET_ACCESS_KEY',\n                                    secret_access_key)\n        region_name = getattr(settings, 'AWS_SES_REGION', region_name)\n\n        \n        \n        if aws_access_key_id is not None and aws_secret_access_key is not None:\n            access_key_id = aws_access_key_id\n            secret_access_key = aws_secret_access_key\n\n        self.conn = boto3.client(\n            'ses',\n            aws_access_key_id=access_key_id,\n            aws_secret_access_key=secret_access_key,\n            region_name=region_name,\n        )", "docstring": "Creates a client for the Amazon SES API.\n\nArgs:\nfail_silently: Flag that determines whether Amazon SES\nclient errors should throw an exception.", "source": "juraj-google-style"}
{"code": "def config_file(self, filename):\n        \n        if os.path.isfile(filename):\n            with open(filename, 'r') as fh:\n                self._config_data = json.load(fh)\n        else:\n            self.tcex.log.error('Could not load configuration file \"{}\".'.format(filename))", "docstring": "Load configuration data from provided file and inject values into sys.argv.\n\nArgs:\nconfig (str): The configuration file name.", "source": "juraj-google-style"}
{"code": "def download(self, folder=None):\n    url = self.data.get('url', None)\n    if (not url):\n        raise HDXError('No URL to download!')\n    logger.debug(('Downloading %s' % url))\n    filename = self.data['name']\n    format = ('.%s' % self.data['format'])\n    if (format not in filename):\n        filename = ('%s%s' % (filename, format))\n    with Download(full_agent=self.configuration.get_user_agent()) as downloader:\n        path = downloader.download_file(url, folder, filename)\n        return (url, path)", "docstring": "Download resource store to provided folder or temporary folder if no folder supplied\n\nArgs:\nfolder (Optional[str]): Folder to download resource to. Defaults to None.\n\nReturns:\nTuple[str, str]: (URL downloaded, Path to downloaded file)", "source": "codesearchnet"}
{"code": "def get(cls, issue_type):\n    if isinstance(issue_type, str):\n        obj = getattr(db, cls.__name__).find_one((cls.issue_type == issue_type))\n    elif isinstance(issue_type, int):\n        obj = getattr(db, cls.__name__).find_one((cls.issue_type_id == issue_type))\n    elif isinstance(issue_type, cls):\n        return issue_type\n    else:\n        obj = None\n    if (not obj):\n        obj = cls()\n        obj.issue_type = issue_type\n        db.session.add(obj)\n        db.session.commit()\n        db.session.refresh(obj)\n    return obj", "docstring": "Returns the IssueType object for `issue_type`. If no existing object was found, a new type will\nbe created in the database and returned\n\nArgs:\nissue_type (str,int,IssueType): Issue type name, id or class\n\nReturns:\n:obj:`IssueType`", "source": "codesearchnet"}
{"code": "def acquire(self):\n    self._fd = open(self._path, mode='w+')\n    os.chmod(self._path, 432)\n    fcntl.flock(self._fd, self._op)", "docstring": "Acquire the lock\n\nRaises:\nIOError: if the call to flock fails", "source": "codesearchnet"}
{"code": "def get_user(self, user_id=None, user_name=None):\n        \n\n        if user_id:\n            endpoint = '/api/user_id/{0}'.format(user_id)\n        elif user_name:\n            endpoint = '/api/user_name/{0}'.format(user_name)\n        else:\n            \n            endpoint = '/api/user'\n\n        data = self._make_request(verb=\"GET\", endpoint=endpoint)\n\n        try:\n            return User.NewFromJSON(data)\n        except:\n            return data", "docstring": "Get a user object from the API. If no ``user_id`` or ``user_name``\nis specified, it will return the User object for the currently\nauthenticated user.\n\nArgs:\nuser_id (int): User ID of the user for whom you want to get\ninformation. [Optional]\nuser_name(str): Username for the user for whom you want to get\ninformation. [Optional]\n\nReturns:\nA User object.", "source": "juraj-google-style"}
{"code": "def refresh_db(**kwargs):\n    salt.utils.pkg.clear_rtag(__opts__)\n    retcodes = {100: True, 0: None, 1: False}\n    ret = True\n    check_update_ = kwargs.pop('check_update', True)\n    options = _get_options(**kwargs)\n    clean_cmd = ['--quiet', '--assumeyes', 'clean', 'expire-cache']\n    clean_cmd.extend(options)\n    _call_yum(clean_cmd, ignore_retcode=True)\n    if check_update_:\n        update_cmd = ['--quiet', '--assumeyes', 'check-update']\n        if ((__grains__.get('os_family') == 'RedHat') and (__grains__.get('osmajorrelease') == 7)):\n            update_cmd.append('--setopt=autocheck_running_kernel=false')\n        update_cmd.extend(options)\n        ret = retcodes.get(_call_yum(update_cmd, ignore_retcode=True)['retcode'], False)\n    return ret", "docstring": "Check the yum repos for updated packages\n\nReturns:\n\n- ``True``: Updates are available\n- ``False``: An error occurred\n- ``None``: No updates are available\n\nrepo\nRefresh just the specified repo\n\ndisablerepo\nDo not refresh the specified repo\n\nenablerepo\nRefresh a disabled repo using this option\n\nbranch\nAdd the specified branch when refreshing\n\ndisableexcludes\nDisable the excludes defined in your config files. Takes one of three\noptions:\n- ``all`` - disable all excludes\n- ``main`` - disable excludes defined in [main] in yum.conf\n- ``repoid`` - disable excludes defined for that repo\n\nsetopt\nA comma-separated or Python list of key=value options. This list will\nbe expanded and ``--setopt`` prepended to each in the yum/dnf command\nthat is run.\n\n.. versionadded:: 2019.2.0\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' pkg.refresh_db", "source": "codesearchnet"}
{"code": "def Approve(self, request, global_params=None):\n    config = self.GetMethodConfig('Approve')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Approves or rejects a pending build. If approved, the returned LRO will be analogous to the LRO returned from a CreateBuild call. If rejected, the returned LRO will be immediately done.\n\nArgs:\nrequest: (CloudbuildProjectsBuildsApproveRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def _wrap_any(cls, self, obj: Any):\n    if isinstance(obj, expressions.Builder):\n        return cls.from_fhir_path_builder(obj)\n    if isinstance(obj, list):\n        return [cls._wrap_any(self, item) for item in obj]\n    if isinstance(obj, tuple):\n        return (cls._wrap_any(self, item) for item in obj)\n    if isinstance(obj, dict):\n        return {cls._wrap_any(self, key): cls._wrap_any(self, value) for key, value in obj.items()}\n    if callable(obj):\n        return cls._wrap_function(self, obj)\n    return obj", "docstring": "Wraps any object with the logic below.\n\nArgs:\nself: self instance reference.\nobj: any object.\n\nReturns:\nIf the object is:\n- an expressions.Builder: returns this class to wrap it;\n- a list: returns a new list with each item in the list wrapped;\n- a tuple: returns a new tuple with each item in the list wrapped;\n- a dictionary: returns a new dictionary with each key/value pair in\nthe dictionary wrapped.\n- a callable function: returns a wrapper function with the return result\nof the function wrapped.\n- anything else: returns the object itself.", "source": "github-repos"}
{"code": "def from_json_value(cls, json_value: Optional[Any], primitive_cls: Type[message.Message], context: Context) -> 'PrimitiveWrapper':\n    if isinstance(json_value, (list, tuple)):\n        raise ValueError('Error, unable to wrap sequence.')\n    if json_value is None or isinstance(json_value, (dict,)):\n        return cls(no_value_primitive(primitive_cls), context)\n    if not isinstance(json_value, cast(Tuple[Type[Any], ...], cls._PARSABLE_TYPES)):\n        raise fhir_errors.InvalidFhirError(f'Unable to parse JSON. {type(json_value)} is invalid FHIR JSON.')\n    return cls.from_json_str(str(json_value), primitive_cls, context)", "docstring": "Parses json_value into an instance of primitive_cls and wraps.\n\nArgs:\njson_value: The optional raw json_value to parse and wrap.\nprimitive_cls: The type of FHIR primitive message to create and validate.\ncontext: Related primitive information to use for printing/parsing a\nwrapped primitive.\n\nReturns:\nAn instance of PrimitiveWrapper.", "source": "github-repos"}
{"code": "def children(self, obj, save_type=base.SaveType.CHECKPOINT, **kwargs):\n    children = {}\n    for name, ref in self.list_children(obj, **kwargs):\n        children[name] = ref\n    return children", "docstring": "Returns all child trackables attached to obj.\n\nArgs:\nobj: A `Trackable` object.\nsave_type: A string, can be 'savedmodel' or 'checkpoint'.\n**kwargs: kwargs to use when retrieving the object's children.\n\nReturns:\nDictionary of all children attached to the object with name to trackable.", "source": "github-repos"}
{"code": "def _set_bearer_user_vars(allowed_client_ids, scopes):\n  \n  all_scopes, sufficient_scopes = _process_scopes(scopes)\n  try:\n    authorized_scopes = oauth.get_authorized_scopes(sorted(all_scopes))\n  except oauth.Error:\n    _logger.debug('Unable to get authorized scopes.', exc_info=True)\n    return\n  if not _are_scopes_sufficient(authorized_scopes, sufficient_scopes):\n    _logger.warning('Authorized scopes did not satisfy scope requirements.')\n    return\n  client_id = oauth.get_client_id(authorized_scopes)\n\n  \n  \n  \n  if (list(allowed_client_ids) != SKIP_CLIENT_ID_CHECK and\n      client_id not in allowed_client_ids):\n    _logger.warning('Client ID is not allowed: %s', client_id)\n    return\n\n  os.environ[_ENV_USE_OAUTH_SCOPE] = ' '.join(authorized_scopes)\n  _logger.debug('get_current_user() will return user from matched oauth_user.')", "docstring": "Validate the oauth bearer token and set endpoints auth user variables.\n\nIf the bearer token is valid, this sets ENDPOINTS_USE_OAUTH_SCOPE.  This\nprovides enough information that our endpoints.get_current_user() function\ncan get the user.\n\nArgs:\nallowed_client_ids: List of client IDs that are acceptable.\nscopes: List of acceptable scopes.", "source": "juraj-google-style"}
{"code": "def create_combination(list_of_sentences):\n    num_sentences = (len(list_of_sentences) - 1)\n    combinations = []\n    for (i, _) in enumerate(list_of_sentences):\n        if (i == num_sentences):\n            break\n        num_pairs = (num_sentences - i)\n        populated = (num_pairs * [list_of_sentences[i]])\n        zipped = list(zip(populated, list_of_sentences[(i + 1):]))\n        combinations += zipped\n    return combinations", "docstring": "Generates all possible pair combinations for the input list of sentences.\n\nFor example:\n\ninput = [\"paraphrase1\", \"paraphrase2\", \"paraphrase3\"]\n\noutput = [(\"paraphrase1\", \"paraphrase2\"),\n(\"paraphrase1\", \"paraphrase3\"),\n(\"paraphrase2\", \"paraphrase3\")]\n\nArgs:\nlist_of_sentences: the list of input sentences.\nReturns:\nthe list of all possible sentence pairs.", "source": "codesearchnet"}
{"code": "def create_index(index_name, index_config, client):\n    \n    client.create(index=index_name, body=index_config)", "docstring": "Creates an index with a given configuration\n\nArgs:\nindex_name (str): Name of the index you want to create\nindex_config (dict) configuration for the index\nclient (Elasticsearch.IndicesClient) the Elasticsearch client", "source": "juraj-google-style"}
{"code": "def rejection_resample(class_func, target_dist, initial_dist=None, seed=None):\n\n    def _apply_fn(dataset):\n        \n        return dataset.rejection_resample(class_func=class_func, target_dist=target_dist, initial_dist=initial_dist, seed=seed)\n    return _apply_fn", "docstring": "A transformation that resamples a dataset to achieve a target distribution.\n\n**NOTE** Resampling is performed via rejection sampling; some fraction\nof the input values will be dropped.\n\nArgs:\nclass_func: A function mapping an element of the input dataset to a scalar\n`tf.int32` tensor. Values should be in `[0, num_classes)`.\ntarget_dist: A floating point type tensor, shaped `[num_classes]`.\ninitial_dist: (Optional.)  A floating point type tensor, shaped\n`[num_classes]`.  If not provided, the true class distribution is\nestimated live in a streaming fashion.\nseed: (Optional.) Python integer seed for the resampler.\n\nReturns:\nA `Dataset` transformation function, which can be passed to\n`tf.data.Dataset.apply`.", "source": "github-repos"}
{"code": "def _dist_to_opt(self):\n    dist_to_opt_ops = []\n    self._grad_norm = tf.sqrt(self._grad_norm_squared)\n    avg_op = self._moving_averager.apply([self._grad_norm])\n    dist_to_opt_ops.append(avg_op)\n    with tf.control_dependencies([avg_op]):\n        self._grad_norm_avg = self._moving_averager.average(self._grad_norm)\n        self._d_t = (self._grad_norm_avg / self._grad_norm_squared_avg)\n    avg_op = self._moving_averager.apply([self._d_t])\n    dist_to_opt_ops.append(avg_op)\n    with tf.control_dependencies([avg_op]):\n        self._dist_to_opt_avg = tf.identity(self._moving_averager.average(self._d_t))\n        if self._sparsity_debias:\n            self._dist_to_opt_avg /= tf.sqrt(self._sparsity_avg)\n    return dist_to_opt_ops", "docstring": "Distance to optimum.\n\nReturns:\nD_t ops", "source": "codesearchnet"}
{"code": "def explore(config, mutations, resample_probability, custom_explore_fn):\n    new_config = copy.deepcopy(config)\n    for (key, distribution) in mutations.items():\n        if isinstance(distribution, dict):\n            new_config.update({key: explore(config[key], mutations[key], resample_probability, None)})\n        elif isinstance(distribution, list):\n            if ((random.random() < resample_probability) or (config[key] not in distribution)):\n                new_config[key] = random.choice(distribution)\n            elif (random.random() > 0.5):\n                new_config[key] = distribution[max(0, (distribution.index(config[key]) - 1))]\n            else:\n                new_config[key] = distribution[min((len(distribution) - 1), (distribution.index(config[key]) + 1))]\n        else:\n            if (random.random() < resample_probability):\n                new_config[key] = distribution()\n            elif (random.random() > 0.5):\n                new_config[key] = (config[key] * 1.2)\n            else:\n                new_config[key] = (config[key] * 0.8)\n            if (type(config[key]) is int):\n                new_config[key] = int(new_config[key])\n    if custom_explore_fn:\n        new_config = custom_explore_fn(new_config)\n        assert (new_config is not None), 'Custom explore fn failed to return new config'\n    logger.info('[explore] perturbed config from {} -> {}'.format(config, new_config))\n    return new_config", "docstring": "Return a config perturbed as specified.\n\nArgs:\nconfig (dict): Original hyperparameter configuration.\nmutations (dict): Specification of mutations to perform as documented\nin the PopulationBasedTraining scheduler.\nresample_probability (float): Probability of allowing resampling of a\nparticular variable.\ncustom_explore_fn (func): Custom explore fn applied after built-in\nconfig perturbations are.", "source": "codesearchnet"}
{"code": "def fmt_partition(partition):\n    \n    if not partition:\n        return ''\n\n    parts = [fmt_part(part, partition.node_labels).split('\\n')\n             for part in partition]\n\n    times = ('   ',\n             ' {} '.format(MULTIPLY),\n             '   ')\n    breaks = ('\\n', '\\n', '')  \n    between = [times] * (len(parts) - 1) + [breaks]\n\n    \n    elements = chain.from_iterable(zip(parts, between))\n\n    \n    return ''.join(chain.from_iterable(zip(*elements)))", "docstring": "Format a |Bipartition|.\n\nThe returned string looks like::\n\n0,1    ∅\n─── ✕ ───\n2    0,1\n\nArgs:\npartition (Bipartition): The partition in question.\n\nReturns:\nstr: A human-readable string representation of the partition.", "source": "juraj-google-style"}
{"code": "def get_cases(variant_source, case_lines=None, case_type='ped', variant_type='snv', variant_mode='vcf'):\n    individuals = get_individuals(variant_source=variant_source, case_lines=case_lines, case_type=case_type, variant_mode=variant_mode)\n    case_objs = []\n    case_ids = set()\n    compressed = False\n    tabix_index = False\n    if variant_source.endswith('.gz'):\n        logger.debug('Found compressed variant source')\n        compressed = True\n        tabix_file = '.'.join([variant_source, 'tbi'])\n        if os.path.exists(tabix_file):\n            logger.debug('Found index file')\n            tabix_index = True\n    if (len(individuals) > 0):\n        for individual in individuals:\n            case_ids.add(individual.case_id)\n    else:\n        case_ids = [os.path.basename(variant_source)]\n    for case_id in case_ids:\n        logger.info('Found case {0}'.format(case_id))\n        case = Case(case_id=case_id, name=case_id, variant_source=variant_source, variant_type=variant_type, variant_mode=variant_mode, compressed=compressed, tabix_index=tabix_index)\n        for individual in individuals:\n            if (individual.case_id == case_id):\n                logger.info('Adding ind {0} to case {1}'.format(individual.name, individual.case_id))\n                case.add_individual(individual)\n        case_objs.append(case)\n    return case_objs", "docstring": "Create a cases and populate it with individuals\n\nArgs:\nvariant_source (str): Path to vcf files\ncase_lines (Iterable): Ped like lines\ncase_type (str): Format of case lines\n\nReturns:\ncase_objs (list(puzzle.models.Case))", "source": "codesearchnet"}
{"code": "def _batch(self, batch_size) -> TypeSpec:\n    raise NotImplementedError(f'{type(self).__name__}._batch')", "docstring": "Returns a TypeSpec representing a batch of objects with this TypeSpec.\n\nArgs:\nbatch_size: An `int` representing the number of elements in a batch, or\n`None` if the batch size may vary.\n\nReturns:\nA `TypeSpec` representing a batch of objects with this TypeSpec.", "source": "github-repos"}
{"code": "def _get_events_list(object_key: str) -> List[str]:\n    \n    return DB.get_list(_keys.events_list(object_key))", "docstring": "Get list of event ids for the object with the specified key.\n\nArgs:\nobject_key (str): Key of an object in the database.", "source": "juraj-google-style"}
{"code": "def from_dlpack(dlcapsule):\n    context.context().ensure_initialized()\n    return pywrap_tfe.TFE_FromDlpackCapsule(dlcapsule, context.context()._handle)", "docstring": "Returns the Tensorflow eager tensor.\n\nThe returned tensor uses the memory shared by dlpack capsules from other\nframework.\n\n```python\na = tf.experimental.dlpack.from_dlpack(dlcapsule)\n# `a` uses the memory shared by dlpack\n```\n\nArgs:\ndlcapsule: A PyCapsule named as dltensor\n\nReturns:\nA Tensorflow eager tensor", "source": "github-repos"}
{"code": "def rand_ascii_str(length):\n    \n    letters = [random.choice(ascii_letters_and_digits) for _ in range(length)]\n    return ''.join(letters)", "docstring": "Generates a random string of specified length, composed of ascii letters\nand digits.\n\nArgs:\nlength: The number of characters in the string.\n\nReturns:\nThe random string generated.", "source": "juraj-google-style"}
{"code": "def hash_file(fpath, algorithm='sha256', chunk_size=65535):\n    if isinstance(algorithm, str):\n        hasher = resolve_hasher(algorithm)\n    else:\n        hasher = algorithm\n    with open(fpath, 'rb') as fpath_file:\n        for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\n            hasher.update(chunk)\n    return hasher.hexdigest()", "docstring": "Calculates a file sha256 or md5 hash.\n\nExample:\n\n>>> hash_file('/path/to/file.zip')\n'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'\n\nArgs:\nfpath: Path to the file being validated.\nalgorithm: Hash algorithm, one of `\"auto\"`, `\"sha256\"`, or `\"md5\"`.\nThe default `\"auto\"` detects the hash algorithm in use.\nchunk_size: Bytes to read at a time, important for large files.\n\nReturns:\nThe file hash.", "source": "github-repos"}
{"code": "def split_leading_dim(tensor, inputs, n_dims=2):\n    input_shape_static = inputs.get_shape()\n    input_shape_list = input_shape_static.as_list()\n    tensor_shape_static = tensor.get_shape()\n    tensor_shape_list = tensor_shape_static.as_list()\n    if (input_shape_static.is_fully_defined() and tensor_shape_static.is_fully_defined()):\n        new_shape = (input_shape_list[:n_dims] + tensor_shape_list[1:])\n        return tf.reshape(tensor, new_shape)\n    dims_after_first = tf.shape(tensor)[1:]\n    split_sizes = tf.shape(inputs)[:n_dims]\n    known_split_sizes = input_shape_list[:n_dims]\n    known_dims_after_first = tensor_shape_list[1:]\n    output_size = tf.concat([split_sizes, dims_after_first], 0)\n    result = tf.reshape(tensor, output_size)\n    result.set_shape((known_split_sizes + known_dims_after_first))\n    return result", "docstring": "Split the first dimension of a tensor.\n\nArgs:\ntensor: Tensor to have its first dimension split.\ninputs: Original reference input to look the dimensions of.\nn_dims: Number of dimensions to split.\n\nReturns:\nThe input tensor, with its first dimension split.", "source": "codesearchnet"}
{"code": "def easeOutElastic(n, amplitude=1, period=0.3):\n    \n    _checkRange(n)\n\n    if amplitude < 1:\n        amplitude = 1\n        s = period / 4\n    else:\n        s = period / (2 * math.pi) * math.asin(1 / amplitude)\n\n    return amplitude * 2**(-10*n) * math.sin((n-s)*(2*math.pi / period)) + 1", "docstring": "An elastic tween function that overshoots the destination and then \"rubber bands\" into the destination.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "juraj-google-style"}
{"code": "def swo_flush(self, num_bytes=None):\n    if (num_bytes is None):\n        num_bytes = self.swo_num_bytes()\n    buf = ctypes.c_uint32(num_bytes)\n    res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.FLUSH, ctypes.byref(buf))\n    if (res < 0):\n        raise errors.JLinkException(res)\n    return None", "docstring": "Flushes data from the SWO buffer.\n\nAfter this method is called, the flushed part of the SWO buffer is\nempty.\n\nIf ``num_bytes`` is not present, flushes all data currently in the SWO\nbuffer.\n\nArgs:\nself (JLink): the ``JLink`` instance\nnum_bytes (int): the number of bytes to flush\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error", "source": "codesearchnet"}
{"code": "def confirm_iam_role(self, account):\n        \n        try:\n            iam = self.session.client('iam')\n            rolearn = iam.get_role(RoleName=self.role_name)['Role']['Arn']\n            return rolearn\n\n        except ClientError as e:\n            if e.response['Error']['Code'] == 'NoSuchEntity':\n                self.create_iam_role(account)\n            else:\n                raise\n\n        except Exception as e:\n            self.log.exception('Failed validating IAM role for VPC Flow Log Auditing for {}'.format(e))", "docstring": "Return the ARN of the IAM Role on the provided account as a string. Returns an `IAMRole` object from boto3\n\nArgs:\naccount (:obj:`Account`): Account where to locate the role\n\nReturns:\n:obj:`IAMRole`", "source": "juraj-google-style"}
{"code": "def add_text(self, coords, text, color=(0, 0, 0)):\n        \n        source = vtk.vtkVectorText()\n        source.SetText(text)\n        mapper = vtk.vtkPolyDataMapper()\n        mapper.SetInputConnection(source.GetOutputPort())\n        follower = vtk.vtkFollower()\n        follower.SetMapper(mapper)\n        follower.GetProperty().SetColor(color)\n        follower.SetPosition(coords)\n        follower.SetScale(0.5)\n        self.ren.AddActor(follower)\n        follower.SetCamera(self.ren.GetActiveCamera())", "docstring": "Add text at a coordinate.\n\nArgs:\ncoords: Coordinates to add text at.\ntext: Text to place.\ncolor: Color for text as RGB. Defaults to black.", "source": "juraj-google-style"}
{"code": "def SetActiveBreakpoints(self, breakpoints_data):\n    with self._lock:\n        ids = set([x['id'] for x in breakpoints_data])\n        for breakpoint_id in (six.viewkeys(self._active) - ids):\n            self._active.pop(breakpoint_id).Clear()\n        self._active.update([(x['id'], python_breakpoint.PythonBreakpoint(x, self._hub_client, self, self.data_visibility_policy)) for x in breakpoints_data if (x['id'] in ((ids - six.viewkeys(self._active)) - self._completed))])\n        self._completed &= ids\n        if self._active:\n            self._next_expiration = datetime.min\n        else:\n            self._next_expiration = datetime.max", "docstring": "Adds new breakpoints and removes missing ones.\n\nArgs:\nbreakpoints_data: updated list of active breakpoints.", "source": "codesearchnet"}
{"code": "def get_module_class_from_name(module, name):\n    modules_children = list(module.children())\n    if module.__class__.__name__ == name:\n        return module.__class__\n    elif len(modules_children) == 0:\n        return\n    else:\n        for child_module in modules_children:\n            module_class = get_module_class_from_name(child_module, name)\n            if module_class is not None:\n                return module_class", "docstring": "Gets a class from a module by its name.\n\nArgs:\nmodule (`torch.nn.Module`): The module to get the class from.\nname (`str`): The name of the class.", "source": "github-repos"}
{"code": "def _AddPropertiesForField(field, cls):\n  \n  \n  \n  assert _FieldDescriptor.MAX_CPPTYPE == 10\n\n  constant_name = field.name.upper() + \"_FIELD_NUMBER\"\n  setattr(cls, constant_name, field.number)\n\n  if field.label == _FieldDescriptor.LABEL_REPEATED:\n    _AddPropertiesForRepeatedField(field, cls)\n  elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:\n    _AddPropertiesForNonRepeatedCompositeField(field, cls)\n  else:\n    _AddPropertiesForNonRepeatedScalarField(field, cls)", "docstring": "Adds a public property for a protocol message field.\nClients can use this property to get and (in the case\nof non-repeated scalar fields) directly set the value\nof a protocol message field.\n\nArgs:\nfield: A FieldDescriptor for this field.\ncls: The class we're constructing.", "source": "juraj-google-style"}
{"code": "def doc2id(self, doc):\n        \n        doc = map(self.process_token, doc)\n        return [self.token_to_id(token) for token in doc]", "docstring": "Get the list of token_id given doc.\n\nArgs:\ndoc (list): document.\n\nReturns:\nlist: int id of doc.", "source": "juraj-google-style"}
{"code": "def from_path(cls, path):\n        \n        stat_res = os.stat(path)\n        return cls.from_int(stat.S_IMODE(stat_res.st_mode))", "docstring": "Make a new :class:`FilePerms` object based on the permissions\nassigned to the file or directory at *path*.\n\nArgs:\npath (str): Filesystem path of the target file.\n\n>>> from os.path import expanduser\n>>> 'r' in FilePerms.from_path(expanduser('~')).user  # probably\nTrue", "source": "juraj-google-style"}
{"code": "def saveplot(fig, *name_args, close=True, **name_kwargs):\n    \n    oname = out_name(*name_args, **name_kwargs)\n    fig.savefig('{}.{}'.format(oname, conf.plot.format),\n                format=conf.plot.format, bbox_inches='tight')\n    if close:\n        plt.close(fig)", "docstring": "Save matplotlib figure.\n\nYou need to provide :data:`stem` as a positional or keyword argument (see\n:func:`out_name`).\n\nArgs:\nfig (:class:`matplotlib.figure.Figure`): matplotlib figure.\nclose (bool): whether to close the figure.\nname_args: positional arguments passed on to :func:`out_name`.\nname_kwargs: keyword arguments passed on to :func:`out_name`.", "source": "juraj-google-style"}
{"code": "def _add_tags(self, tags):\n        \n        \n        alltagsadded = True\n        for tag in tags:\n            if not self._add_tag(tag):\n                alltagsadded = False\n        return alltagsadded", "docstring": "Add a list of tag\n\nArgs:\ntags (List[str]): list of tags to add\n\nReturns:\nbool: True if all tags added or False if any already present.", "source": "juraj-google-style"}
{"code": "def numeric_task_id(task_id):\n    if (task_id is not None):\n        if task_id.startswith('task-'):\n            return int(task_id[len('task-'):])\n        else:\n            return int(task_id)", "docstring": "Converts a task-id to the numeric task-id.\n\nArgs:\ntask_id: task-id in either task-n or n format\n\nReturns:\nn", "source": "codesearchnet"}
{"code": "def pack_container(in_container, out_file):\n    container_filename = local.path(out_file).basename\n    out_container = ((local.cwd / 'container-out') / container_filename)\n    out_dir = out_container.dirname\n    with local.cwd(in_container):\n        tar('cjf', out_container, '.')\n    c_hash = download.update_hash(out_container)\n    if out_dir.exists():\n        mkdir('-p', out_dir)\n    mv(out_container, out_file)\n    mv((out_container + '.hash'), (out_file + '.hash'))\n    new_container = {'path': out_file, 'hash': str(c_hash)}\n    CFG['container']['known'] += new_container", "docstring": "Pack a container image into a .tar.bz2 archive.\n\nArgs:\nin_container (str): Path string to the container image.\nout_file (str): Output file name.", "source": "codesearchnet"}
{"code": "def entitlements(self, request, pk=None):  \n        \n        enterprise_customer_user = self.get_object()\n        instance = {\"entitlements\": enterprise_customer_user.entitlements}\n        serializer = serializers.EnterpriseCustomerUserEntitlementSerializer(instance, context={'request': request})\n        return Response(serializer.data)", "docstring": "Retrieve the list of entitlements available to this learner.\n\nOnly those entitlements are returned that satisfy enterprise customer's data sharing setting.\n\nArguments:\nrequest (HttpRequest): Reference to in-progress request instance.\npk (Int): Primary key value of the selected enterprise learner.\n\nReturns:\n(HttpResponse): Response object containing a list of learner's entitlements.", "source": "juraj-google-style"}
{"code": "def cos(x):\n    return math_ops.cos(x)", "docstring": "Computes cos of x element-wise.\n\nArgs:\nx: Tensor or variable.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def main(argv: Optional[Sequence[str]]=None) -> None:\n    args = parse_arguments(argv=argv)\n    if args.logging:\n        logging.basicConfig(level=logging.DEBUG)\n    handle_skip()\n    action = args.action\n    request = parse_request()\n    LOGGER.debug('Received action %s with request:\\n%s', action, request)\n    try:\n        mapping = parse_mapping(args.mapping)\n    except Exception as error:\n        LOGGER.critical('Unable to parse mapping file', exc_info=True)\n        print('Unable to parse mapping file: {error}'.format(error=error), file=sys.stderr)\n        sys.exit(1)\n    if (action == 'get'):\n        get_password(request, mapping)\n    else:\n        LOGGER.info('Action %s is currently not supported', action)\n        sys.exit(1)", "docstring": "Start the pass-git-helper script.\n\nArgs:\nargv:\nIf not ``None``, use the provided command line arguments for\nparsing. Otherwise, extract them automatically.", "source": "codesearchnet"}
{"code": "def add_done_callback(self, fn):\n        \n        if self._result_set:\n            _helpers.safe_invoke_callback(fn, self)\n            return\n\n        self._done_callbacks.append(fn)\n\n        if self._polling_thread is None:\n            \n            \n            self._polling_thread = _helpers.start_daemon_thread(\n                target=self._blocking_poll\n            )", "docstring": "Add a callback to be executed when the operation is complete.\n\nIf the operation is not already complete, this will start a helper\nthread to poll for the status of the operation in the background.\n\nArgs:\nfn (Callable[Future]): The callback to execute when the operation\nis complete.", "source": "juraj-google-style"}
{"code": "def decode(self, images, save=None, round=4, names=None, **kwargs):\n    if isinstance(images, string_types):\n        images = [images]\n    if isinstance(images, list):\n        imgs_to_decode = imageutils.load_imgs(images, self.masker)\n    else:\n        imgs_to_decode = images\n    methods = {'pearson': self._pearson_correlation, 'dot': self._dot_product, 'roi': self._roi_association}\n    result = np.around(methods[self.method](imgs_to_decode, **kwargs), round)\n    if (names is None):\n        if (type(images).__module__ == np.__name__):\n            names = [('image_%d' % i) for i in range(images.shape[1])]\n        elif (self.method == 'roi'):\n            names = [('cluster_%d' % i) for i in range(result.shape[1])]\n        else:\n            names = images\n    result = pd.DataFrame(result, columns=names, index=self.feature_names)\n    if (save is not None):\n        result.to_csv(save, index_label='Feature')\n    return result", "docstring": "Decodes a set of images.\n\nArgs:\nimages: The images to decode. Can be:\n- A single String specifying the filename of the image to decode\n- A list of filenames\n- A single NumPy array containing the image data\nsave: Optional filename to save results to. If None (default), returns\nall results as an array.\nround: Optional integer indicating number of decimals to round result\nto. Defaults to 4.\nnames: Optional list of names corresponding to the images in filenames.\nIf passed, must be of same length and in same order as filenames.\nBy default, the columns in the output will be named using the image\nfilenames.\n\nReturns:\nAn n_features x n_files numpy array, where each feature is a row and\neach image is a column. The meaning of the values depends on the\ndecoding method used.", "source": "codesearchnet"}
{"code": "def _CalculateDigestHash(self, file_entry, data_stream_name):\n    file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)\n    if (not file_object):\n        return None\n    try:\n        file_object.seek(0, os.SEEK_SET)\n        hasher_object = hashers_manager.HashersManager.GetHasher('sha256')\n        data = file_object.read(self._READ_BUFFER_SIZE)\n        while data:\n            hasher_object.Update(data)\n            data = file_object.read(self._READ_BUFFER_SIZE)\n    finally:\n        file_object.close()\n    return hasher_object.GetStringDigest()", "docstring": "Calculates a SHA-256 digest of the contents of the file entry.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry whose content will be hashed.\ndata_stream_name (str): name of the data stream whose content is to be\nhashed.\n\nReturns:\nstr: hexadecimal representation of the SHA-256 hash or None if the digest\ncannot be determined.", "source": "codesearchnet"}
{"code": "def post_cutout(self, token, channel,\n                    x_start,\n                    y_start,\n                    z_start,\n                    data,\n                    resolution=0):\n        \n        return self.data.post_cutout(token, channel,\n                                     x_start,\n                                     y_start,\n                                     z_start,\n                                     data,\n                                     resolution)", "docstring": "Post a cutout to the server.\n\nArguments:\ntoken (str)\nchannel (str)\nx_start (int)\ny_start (int)\nz_start (int)\ndata (numpy.ndarray): A numpy array of data. Pass in (x, y, z)\nresolution (int : 0): Resolution at which to insert the data\n\nReturns:\nbool: True on success\n\nRaises:\nRemoteDataUploadError: if there's an issue during upload.", "source": "juraj-google-style"}
{"code": "def _get_populate_from_value(instance, field_name: Union[(str, Tuple[str])], language: str):\n    if callable(field_name):\n        return field_name(instance)\n\n    def get_field_value(name):\n        value = resolve_object_property(instance, name)\n        with translation.override(language):\n            return str(value)\n    if (isinstance(field_name, tuple) or isinstance(field_name, list)):\n        value = '-'.join([value for value in [get_field_value(name) for name in field_name] if value])\n        return value\n    return get_field_value(field_name)", "docstring": "Gets the value to create a slug from in the specified language.\n\nArguments:\ninstance:\nThe model that the field resides on.\n\nfield_name:\nThe name of the field to generate a slug for.\n\nlanguage:\nThe language to generate the slug for.\n\nReturns:\nThe text to generate a slug for.", "source": "codesearchnet"}
{"code": "def compute_v(self, memory_antecedent):\n    \n    if self.shared_kv:\n      raise ValueError(\"compute_v cannot be called with shared_kv\")\n    ret = mtf.einsum(\n        [memory_antecedent, self.wv], reduced_dims=[self.memory_input_dim])\n    if self.combine_dims:\n      ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.v_dims)\n    return ret", "docstring": "Compute value Tensor v.\n\nArgs:\nmemory_antecedent: a Tensor with dimensions\n{memory_input_dim} + other_dims\nReturns:\na Tensor with dimensions\nmemory_heads_dims + {value_dim} + other_dims", "source": "juraj-google-style"}
{"code": "def log_power_spectrum(frames, fft_points=512, normalize=True):\n    power_spec = power_spectrum(frames, fft_points)\n    power_spec[(power_spec <= 1e-20)] = 1e-20\n    log_power_spec = (10 * np.log10(power_spec))\n    if normalize:\n        return (log_power_spec - np.max(log_power_spec))\n    else:\n        return log_power_spec", "docstring": "Log power spectrum of each frame in frames.\n\nArgs:\nframes (array): The frame array in which each row is a frame.\nfft_points (int): The length of FFT. If fft_length is greater than\nframe_len, the frames will be zero-padded.\nnormalize (bool): If normalize=True, the log power spectrum\nwill be normalized.\n\nReturns:\narray: The power spectrum - If frames is an\nnum_frames x sample_per_frame matrix, output will be\nnum_frames x fft_length.", "source": "codesearchnet"}
{"code": "def read_video_pyav(video_path: str, sample_indices_fn: Callable, **kwargs):\n    requires_backends(read_video_pyav, ['av'])\n    import av\n    container = av.open(video_path)\n    total_num_frames = container.streams.video[0].frames\n    video_fps = container.streams.video[0].average_rate\n    duration = total_num_frames / video_fps if video_fps else 0\n    metadata = VideoMetadata(total_num_frames=int(total_num_frames), fps=float(video_fps), duration=float(duration), video_backend='pyav')\n    indices = sample_indices_fn(metadata=metadata, **kwargs)\n    frames = []\n    container.seek(0)\n    end_index = indices[-1]\n    for i, frame in enumerate(container.decode(video=0)):\n        if i > end_index:\n            break\n        if i >= 0 and i in indices:\n            frames.append(frame)\n    video = np.stack([x.to_ndarray(format='rgb24') for x in frames])\n    metadata.frames_indices = indices\n    return (video, metadata)", "docstring": "Decode the video with PyAV decoder.\n\nArgs:\nvideo_path (`str`):\nPath to the video file.\nsample_indices_fn (`Callable`, *optional*):\nA callable function that will return indices at which the video should be sampled. If the video has to be loaded using\nby a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`.\nIf not provided, simple uniform sampling with fps is performed.\nExample:\ndef sample_indices_fn(metadata, **kwargs):\nreturn np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int)\n\nReturns:\nTuple[`np.array`, `VideoMetadata`]: A tuple containing:\n- Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).\n- `VideoMetadata` object.", "source": "github-repos"}
{"code": "async def set_notification_level(self, level):\n    (await self._client.set_conversation_notification_level(hangouts_pb2.SetConversationNotificationLevelRequest(request_header=self._client.get_request_header(), conversation_id=hangouts_pb2.ConversationId(id=self.id_), level=level)))", "docstring": "Set the notification level of this conversation.\n\nArgs:\nlevel: ``NOTIFICATION_LEVEL_QUIET`` to disable notifications, or\n``NOTIFICATION_LEVEL_RING`` to enable them.\n\nRaises:\n.NetworkError: If the request fails.", "source": "codesearchnet"}
{"code": "def put(self, values, name=None):\n    with ops.name_scope(name, '%s_put' % self._name, self._scope_vals(values)) as scope:\n        if not isinstance(values, (list, tuple, dict)):\n            values = [values]\n        indices = list(range(len(values)))\n        vals, _ = self._check_put_dtypes(values, indices)\n        with ops.colocate_with(self._coloc_op):\n            op = gen_data_flow_ops.stage(values=vals, shared_name=self._name, name=scope, capacity=self._capacity, memory_limit=self._memory_limit)\n        return op", "docstring": "Create an op that places a value into the staging area.\n\nThis operation will block if the `StagingArea` has reached\nits capacity.\n\nArgs:\nvalues: A single tensor, a list or tuple of tensors, or a dictionary with\ntensor values. The number of elements must match the length of the\nlist provided to the dtypes argument when creating the StagingArea.\nname: A name for the operation (optional).\n\nReturns:\nThe created op.\n\nRaises:\nValueError: If the number or type of inputs don't match the staging area.", "source": "github-repos"}
{"code": "def get(self, id_or_url, default=None):\n        \n        if '/' in id_or_url:\n            id = urls.SheetUrl.from_string(id_or_url).id\n        else:\n            id = id_or_url\n        try:\n            return self[id]\n        except KeyError:\n            return default", "docstring": "Fetch and return the spreadsheet with the given id or url.\n\nArgs:\nid_or_url (str): unique alphanumeric id or URL of the spreadsheet\nReturns:\nNew SpreadSheet instance or given default if none is found\nRaises:\nValueError: if an URL is given from which no id could be extracted", "source": "juraj-google-style"}
{"code": "def encode(self, input_features: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None, **kwargs):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n\n    def _encoder_forward(module, input_features, **kwargs):\n        encode_module = module._get_encoder_module()\n        return encode_module(input_features, **kwargs)\n    return self.module.apply({'params': params or self.params}, input_features=jnp.array(input_features, dtype='f4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward)", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import WhisperProcessor, FlaxWhisperForConditionalGeneration\n>>> from datasets import load_dataset\n\n>>> processor = WhisperProcessor.from_pretrained(\"openai/whisper-tiny.en\")\n>>> model = FlaxWhisperForConditionalGeneration.from_pretrained(\"openai/whisper-tiny.en\", from_pt=True)\n>>> ds = load_dataset(\"hf-internal-testing/librispeech_asr_dummy\", \"clean\", split=\"validation\")\n>>> inputs = processor(ds[0][\"audio\"][\"array\"], return_tensors=\"np\")\n>>> input_features = inputs.input_features\n>>> encoder_outputs = model.encode(input_features=input_features)\n```", "source": "github-repos"}
{"code": "def init(library: typing.Union[(str, types.ModuleType)]) -> None:\n    if isinstance(library, types.ModuleType):\n        library = library.__name__\n    if (library not in manager._handlers):\n        raise ValueError('Possible values are <{}>, not <{}>'.format(manager._handlers.keys(), library))\n    manager.init(library, asynclib)\n    asynclib.lib_name = library\n    asynclib._init = True", "docstring": "Must be called at some point after import and before your event loop\nis run.\n\nPopulates the asynclib instance of _AsyncLib with methods relevant to the\nasync library you are using.\n\nThe supported libraries at the moment are:\n- curio\n- trio\n\nArgs:\nlibrary (str or module): Either the module name as a string or the\nimported module itself. E.g. ``multio.init(curio)``.", "source": "codesearchnet"}
{"code": "def start(self, on_done):\n        \n        genesis_file = os.path.join(self._data_dir, 'genesis.batch')\n        try:\n            with open(genesis_file, 'rb') as batch_file:\n                genesis_data = genesis_pb2.GenesisData()\n                genesis_data.ParseFromString(batch_file.read())\n            LOGGER.info('Producing genesis block from %s', genesis_file)\n        except IOError:\n            raise InvalidGenesisStateError(\n                \"Genesis File {} specified, but unreadable\".format(\n                    genesis_file))\n\n        initial_state_root = self._context_manager.get_first_root()\n\n        genesis_batches = [batch for batch in genesis_data.batches]\n        if genesis_batches:\n            scheduler = SerialScheduler(\n                self._context_manager.get_squash_handler(),\n                initial_state_root,\n                always_persist=True)\n\n            LOGGER.debug('Adding %s batches', len(genesis_data.batches))\n            for batch in genesis_data.batches:\n                scheduler.add_batch(batch)\n\n            self._transaction_executor.execute(scheduler)\n\n            scheduler.finalize()\n            scheduler.complete(block=True)\n\n        txn_receipts = []\n        state_hash = initial_state_root\n        for batch in genesis_batches:\n            result = scheduler.get_batch_execution_result(\n                batch.header_signature)\n            if result is None or not result.is_valid:\n                raise InvalidGenesisStateError(\n                    'Unable to create genesis block, due to batch {}'\n                    .format(batch.header_signature))\n            if result.state_hash is not None:\n                state_hash = result.state_hash\n\n            txn_results = scheduler.get_transaction_execution_results(\n                batch.header_signature)\n            txn_receipts += self._make_receipts(txn_results)\n\n        settings_view = SettingsView(\n            self._state_view_factory.create_view(state_hash))\n        name = settings_view.get_setting('sawtooth.consensus.algorithm.name')\n        version = settings_view.get_setting(\n            'sawtooth.consensus.algorithm.version')\n        if name is None or version is None:\n            raise LocalConfigurationError(\n                'Unable to start validator; sawtooth.consensus.algorithm.name '\n                'and sawtooth.consensus.algorithm.version must be set in the '\n                'genesis block.')\n\n        LOGGER.debug('Produced state hash %s for genesis block.', state_hash)\n\n        block_builder = self._generate_genesis_block()\n        block_builder.add_batches(genesis_batches)\n        block_builder.set_state_hash(state_hash)\n\n        block_publisher = self._get_block_publisher(initial_state_root)\n        if not block_publisher.initialize_block(block_builder.block_header):\n            LOGGER.error('Consensus refused to initialize consensus block.')\n            raise InvalidGenesisConsensusError(\n                'Consensus refused to initialize genesis block.')\n\n        if not block_publisher.finalize_block(block_builder.block_header):\n            LOGGER.error('Consensus refused to finalize genesis block.')\n            raise InvalidGenesisConsensusError(\n                'Consensus refused to finalize genesis block.')\n\n        self._sign_block(block_builder)\n\n        block = block_builder.build_block()\n\n        blkw = BlockWrapper(block=block)\n\n        LOGGER.info('Genesis block created: %s', blkw)\n\n        self._block_manager.put([blkw.block])\n        self._block_manager.persist(blkw.identifier, \"commit_store\")\n\n        self._txn_receipt_store.chain_update(block, txn_receipts)\n        self._chain_id_manager.save_block_chain_id(block.header_signature)\n\n        LOGGER.debug('Deleting genesis data.')\n        os.remove(genesis_file)\n\n        if on_done is not None:\n            on_done()", "docstring": "Starts the genesis block creation process.  Will call the given\n`on_done` callback on successful completion.\n\nArgs:\non_done (function): a function called on completion\n\nRaises:\nInvalidGenesisStateError: raises this error if a genesis block is\nunable to be produced, or the resulting block-chain-id saved.", "source": "juraj-google-style"}
{"code": "def insert_and_get(self, **fields):\n        \n\n        if not self.conflict_target and not self.conflict_action:\n            \n            return super().create(**fields)\n\n        compiler = self._build_insert_compiler([fields])\n        rows = compiler.execute_sql(return_id=False)\n\n        columns = rows[0]\n\n        \n        \n        model_columns = {}\n        for field in self.model._meta.local_concrete_fields:\n            model_columns[field.column] = field.attname\n\n        \n        \n        model_init_fields = {}\n        for column_name, column_value in columns.items():\n            try:\n                model_init_fields[model_columns[column_name]] = column_value\n            except KeyError:\n                pass\n\n        return self.model(**model_init_fields)", "docstring": "Creates a new record in the database and then gets\nthe entire row.\n\nThis allows specifying custom conflict behavior using .on_conflict().\nIf no special behavior was specified, this uses the normal Django create(..)\n\nArguments:\nfields:\nThe fields of the row to create.\n\nReturns:\nThe model instance representing the row that was created.", "source": "juraj-google-style"}
{"code": "def append_item(self, item):\n        \n        if isinstance(item, SubmenuItem):\n            raise TypeError(\"SubmenuItems cannot be added to a MultiSelectMenu\")\n        super(MultiSelectMenu, self).append_item(item)", "docstring": "Add an item to the end of the menu before the exit item.\n\nNote that Multi-Select Menus will not allow a SubmenuItem to be added, as multi-select menus\nare expected to be used only for executing multiple actions.\n\nArgs:\nitem (:obj:`MenuItem`): The item to be added\n\nRaises:\nTypeError: If the specified MenuIem is a SubmenuItem.", "source": "juraj-google-style"}
{"code": "def build_from_queue(cls, input_queue, replay_size, batch_size):\n    return cls((lambda : input_queue.dequeue_many(batch_size)), replay_size, batch_size=batch_size)", "docstring": "Builds a `ReplayableQueue` that draws from a regular `input_queue`.\n\nArgs:\ninput_queue: The queue to draw from.\nreplay_size: The size of the replay buffer.\nbatch_size: The size of each batch.\n\nReturns:\nA ReplayableQueue.", "source": "codesearchnet"}
{"code": "def pair_wise_sigmoid_focal_loss(inputs: Tensor, labels: Tensor, alpha: float=0.25, gamma: float=2.0) -> Tensor:\n    if alpha < 0:\n        raise ValueError('alpha must be positive')\n    height_and_width = inputs.shape[1]\n    criterion = nn.BCEWithLogitsLoss(reduction='none')\n    prob = inputs.sigmoid()\n    cross_entropy_loss_pos = criterion(inputs, torch.ones_like(inputs))\n    focal_pos = (1 - prob) ** gamma * cross_entropy_loss_pos\n    focal_pos *= alpha\n    cross_entropy_loss_neg = criterion(inputs, torch.zeros_like(inputs))\n    focal_neg = prob ** gamma * cross_entropy_loss_neg\n    focal_neg *= 1 - alpha\n    loss = torch.matmul(focal_pos, labels.T) + torch.matmul(focal_neg, (1 - labels).T)\n    return loss / height_and_width", "docstring": "A pair wise version of the focal loss, see `sigmoid_focal_loss` for usage.\n\nArgs:\ninputs (`torch.Tensor`):\nA tensor representing a mask.\nlabels (`torch.Tensor`):\nA tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs\n(0 for the negative class and 1 for the positive class).\nalpha (float, *optional*, defaults to 0.25):\nWeighting factor in range (0,1) to balance positive vs negative examples.\ngamma (float, *optional*, defaults to 2.0):\nExponent of the modulating factor \\\\(1 - p_t\\\\) to balance easy vs hard examples.\n\nReturns:\n`torch.Tensor`: The computed loss between each pairs.", "source": "github-repos"}
{"code": "def __init__(self, shape, dtype):\n    \n    self._dtype = dtype\n    self._sum = tf.Variable(lambda: tf.zeros(shape, dtype), False)\n    self._count = tf.Variable(lambda: 0, trainable=False)", "docstring": "Specify the shape and dtype of the mean to be estimated.\n\nNote that a float mean to zero submitted elements is NaN, while computing\nthe integer mean of zero elements raises a division by zero error.\n\nArgs:\nshape: Shape of the mean to compute.\ndtype: Data type of the mean to compute.", "source": "juraj-google-style"}
{"code": "def batch_slice(linop, params_overrides, slices):\n    if not isinstance(slices, collections.abc.Sequence):\n        slices = (slices,)\n    if len(slices) == 1 and slices[0] is Ellipsis:\n        override_dict = {}\n    else:\n        batch_shape = linop.batch_shape_tensor()\n        override_dict = {}\n        for param_name, param_ndims_to_matrix_ndims in linop._experimental_parameter_ndims_to_matrix_ndims.items():\n            param = getattr(linop, param_name)\n            if param is not None:\n                override_dict[param_name] = nest.map_structure_up_to(param, functools.partial(_slice_single_param, slices=slices, batch_shape=batch_shape), param, param_ndims_to_matrix_ndims)\n    override_dict.update(params_overrides)\n    parameters = dict(linop.parameters, **override_dict)\n    return type(linop)(**parameters)", "docstring": "Slices `linop` along its batch dimensions.\n\nArgs:\nlinop: A `LinearOperator` instance.\nparams_overrides: A `dict` of parameter overrides.\nslices: A `slice` or `int` or `int` `Tensor` or `tf.newaxis` or `tuple`\nthereof. (e.g. the argument of a `__getitem__` method).\n\nReturns:\nnew_linop: A batch-sliced `LinearOperator`.", "source": "github-repos"}
{"code": "def __gt__(self, other):\n    \n    if not isinstance(other, DateTimeValues):\n      raise ValueError('Other not an instance of DateTimeValues')\n\n    normalized_timestamp = self._GetNormalizedTimestamp()\n    other_normalized_timestamp = other._GetNormalizedTimestamp()  \n\n    if normalized_timestamp is None:\n      return False\n\n    if other_normalized_timestamp is None:\n      return True\n\n    return normalized_timestamp > other_normalized_timestamp", "docstring": "Determines if the date time values are greater than other.\n\nArgs:\nother (DateTimeValues): date time values to compare against.\n\nReturns:\nbool: True if the date time values are greater than other.\n\nRaises:\nValueError: if other is not an instance of DateTimeValues.", "source": "juraj-google-style"}
{"code": "def device(self):\n    hdevice = self._libinput.libinput_event_get_device(self._hevent)\n    return Device(hdevice, self._libinput)", "docstring": "The device associated with this event.\n\nFor device added/removed events this is the device added or removed.\nFor all other device events, this is the device that generated the\nevent.\n\nReturns:\n~libinput.define.Device: Device object.", "source": "codesearchnet"}
{"code": "def __init__(self, default_model=None, id_resolver=None):\n    \n    \n    \n    try:\n      super(ModelAdapter, self).__init__(id_resolver)\n    except:\n      pass\n    self.default_model = default_model\n    self.want_pbs = 0", "docstring": "Constructor.\n\nArgs:\ndefault_model: If an implementation for the kind cannot be found, use\nthis model class.  If none is specified, an exception will be thrown\n(default).\nid_resolver: A datastore_pbs.IdResolver that can resolve\napplication ids. This is only necessary when running on the Cloud\nDatastore v1 API.", "source": "juraj-google-style"}
{"code": "def UploadAccount(self, hash_algorithm, hash_key, accounts):\n    \n    param = {\n        'hashAlgorithm': hash_algorithm,\n        'signerKey': hash_key,\n        'users': accounts\n    }\n    \n    \n    return self._InvokeGitkitApi('uploadAccount', param)", "docstring": "Uploads multiple accounts to Gitkit server.\n\nArgs:\nhash_algorithm: string, algorithm to hash password.\nhash_key: string, base64-encoded key of the algorithm.\naccounts: array of accounts to be uploaded.\n\nReturns:\nResponse of the API.", "source": "juraj-google-style"}
{"code": "def lint(self, targets):\n        \n        LinterRunner.targets = targets\n        linters = self._config.get_linter_classes()\n        with Pool() as pool:\n            out_err_none = pool.map(LinterRunner.run, linters)\n        out_err = [item for item in out_err_none if item is not None]\n        stdout, stderr = zip(*out_err)\n        return sorted(chain.from_iterable(stdout)), chain.from_iterable(stderr)", "docstring": "Run linters in parallel and sort all results.\n\nArgs:\ntargets (list): List of files and folders to lint.", "source": "juraj-google-style"}
{"code": "def acquire(self):\n    if os.path.exists(self.path):\n        try:\n            pid = None\n            with open(self.path, 'r') as f:\n                line = f.readline().strip()\n                pid = int(line)\n            if (not psutil.pid_exists(pid)):\n                os.remove(self.path)\n        except ValueError as e:\n            os.remove(self.path)\n        except IOError as e:\n            pass\n    try:\n        self.fd = os.open(self.path, ((os.O_CREAT | os.O_EXCL) | os.O_RDWR))\n        to_write = ('%s%s' % (os.getpid(), os.linesep))\n        os.write(self.fd, to_write.encode())\n    except OSError as e:\n        if (not os.path.exists(self.path)):\n            raise\n        return False\n    self.acquired = True\n    return True", "docstring": "Attempts to acquire a lock for the J-Link lockfile.\n\nIf the lockfile exists but does not correspond to an active process,\nthe lockfile is first removed, before an attempt is made to acquire it.\n\nArgs:\nself (Jlock): the ``JLock`` instance\n\nReturns:\n``True`` if the lock was acquired, otherwise ``False``.\n\nRaises:\nOSError: on file errors.", "source": "codesearchnet"}
{"code": "def delete_branch(profile, name):\n    \n    ref = \"heads/\" + name\n    data = refs.delete_ref(profile, ref)\n    return data", "docstring": "Delete a branch.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nname\nThe name of the branch to delete.\n\nReturns:\nThe response of the DELETE request.", "source": "juraj-google-style"}
{"code": "def _continue_search(self, state):\n    \n    i = state[_StateKeys.CUR_INDEX]\n    alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS]\n    finished_scores = state[_StateKeys.FINISHED_SCORES]\n    finished_flags = state[_StateKeys.FINISHED_FLAGS]\n\n    not_at_max_decode_length = tf.less(i, self.max_decode_length)\n\n    \n    max_length_norm = _length_normalization(self.alpha, self.max_decode_length)\n    \n    best_alive_scores = alive_log_probs[:, 0] / max_length_norm\n\n    \n    finished_scores *= tf.to_float(finished_flags)  \n    lowest_finished_scores = tf.reduce_min(finished_scores, axis=1)\n\n    \n    \n    finished_batches = tf.reduce_any(finished_flags, 1)\n    lowest_finished_scores += (1. - tf.to_float(finished_batches)) * -INF\n\n    worst_finished_score_better_than_best_alive_score = tf.reduce_all(\n        tf.greater(lowest_finished_scores, best_alive_scores)\n    )\n\n    return tf.logical_and(\n        not_at_max_decode_length,\n        tf.logical_not(worst_finished_score_better_than_best_alive_score)\n    )", "docstring": "Return whether to continue the search loop.\n\nThe loops should terminate when\n1) when decode length has been reached, or\n2) when the worst score in the finished sequences is better than the best\nscore in the alive sequences (i.e. the finished sequences are provably\nunchanging)\n\nArgs:\nstate: A dictionary with the current loop state.\n\nReturns:\nBool tensor with value True if loop should continue, False if loop should\nterminate.", "source": "juraj-google-style"}
{"code": "def _record_op_seen_by_control_dependencies(self, op) -> None:\n    for controller in self._control_dependencies_stack:\n        controller.add_op(op)", "docstring": "Record that the given op depends on all registered control dependencies.\n\nArgs:\nop: An Operation.", "source": "github-repos"}
{"code": "def user_exists(self, username):\n    response = self._get((self.rest_url + '/user'), params={'username': username})\n    if (not response.ok):\n        return None\n    return True", "docstring": "Determines if the user exists.\n\nArgs:\nusername: The user name.\n\n\nReturns:\nbool:\nTrue if the user exists in the Crowd application.", "source": "codesearchnet"}
{"code": "def find_files(directory, pattern, recursively=True):\n    \n\n    for root, dirs, files in os.walk(directory):\n        for basename in files:\n            if fnmatch.fnmatch(basename, pattern):\n                yield root, basename\n        if not recursively:\n            break", "docstring": "Yield a list of files with their base directories, recursively or not.\n\nReturns:\nA list of (base_directory, filename)\n\nArgs:\ndirectory: base directory to start the search.\npattern: fnmatch pattern for filenames.\ncomplete_filename: return complete filename or not?\nrecursively: do we recurse or not?", "source": "juraj-google-style"}
{"code": "def readinto(self, b):\n        \n        if not self._readable:\n            raise UnsupportedOperation('read')\n\n        \n        size = len(b)\n        with self._seek_lock:\n            start = self._seek\n            end = start + size\n            self._seek = end\n\n        \n        with handle_os_exceptions():\n            read_data = self._read_range(start, end)\n\n        \n        read_size = len(read_data)\n        if read_size:\n            memoryview(b)[:read_size] = read_data\n\n        \n        if read_size != size:\n            with self._seek_lock:\n                self._seek = start + read_size\n\n        \n        return read_size", "docstring": "Read bytes into a pre-allocated, writable bytes-like object b,\nand return the number of bytes read.\n\nArgs:\nb (bytes-like object): buffer.\n\nReturns:\nint: number of bytes read", "source": "juraj-google-style"}
{"code": "def fit(self, X):\n        \n\n        training_signal = X\n\n        self.window_design(self.window_length, self.beta)\n\n        if self.method == 'std_dev':\n            self.fit_freq_std_dev(training_signal)\n        elif self.method == 'min_max':\n            self.fit_freq_min_max(training_signal)\n        else:\n            raise ValueError('Unknown method: {}'.format(self.method))", "docstring": "Defines a spectral mask based on training data\n\nArgs:\nX: Training data", "source": "juraj-google-style"}
{"code": "def list_members(self, name, type='USER', recurse=True, max_results=1000):\n    results = self.client.service.getListMembership(name, type, recurse, max_results, self.proxy_id)\n    return [item['member'] for item in results]", "docstring": "Look up all the members of a list.\n\nArgs:\nname (str): The name of the list\ntype (str): The type of results to return. \"USER\" to get users,\n\"LIST\" to get lists.\nrecurse (bool): Presumably, whether to recurse into member lists\nwhen retrieving users.\nmax_results (int): Maximum number of results to return.\n\nReturns:\nlist of strings: names of the members of the list", "source": "codesearchnet"}
{"code": "async def is_change_done(self, zone, change_id):\n        \n        zone_id = self.get_managed_zone(zone)\n        url = f'{self._base_url}/managedZones/{zone_id}/changes/{change_id}'\n        resp = await self.get_json(url)\n        return resp['status'] == self.DNS_CHANGES_DONE", "docstring": "Check if a DNS change has completed.\n\nArgs:\nzone (str): DNS zone of the change.\nchange_id (str): Identifier of the change.\nReturns:\nBoolean", "source": "juraj-google-style"}
{"code": "def unsubscribe(self, peer_jid):\n        \n        self.roster.unsubscribe(aioxmpp.JID.fromstr(peer_jid).bare())", "docstring": "Asks for unsubscription\n\nArgs:\npeer_jid (str): the JID you ask for unsubscriptiion", "source": "juraj-google-style"}
{"code": "async def apply(self, sender: str, recipient: str, mailbox: str, append_msg: AppendMessage) -> Tuple[(Optional[str], AppendMessage)]:\n    ...", "docstring": "Run the filter and return the mailbox where it should be appended,\nor None to discard, and the message to be appended, which is usually\nthe same as ``append_msg``.\n\nArgs:\nsender: The envelope sender of the message.\nrecipient: The envelope recipient of the message.\nmailbox: The intended mailbox to append the message.\nappend_msg: The message to be appended.\n\nraises:\n:exc:`~pymap.exceptions.AppendFailure`", "source": "codesearchnet"}
{"code": "def GetUpdateTimestamp(self):\n    if self.update_time is None:\n        self.update_time = self._ReadTimestamp(self.update_file)\n    return self.update_time", "docstring": "Return the timestamp of the last cache update.\n\nReturns:\nAn int with the number of seconds since epoch, or None if the timestamp\nfile doesn't exist or has errors.", "source": "github-repos"}
{"code": "def one_step(self, current_state, previous_kernel_results):\n\n    @tfp.mcmc.internal.util.make_innermost_setter\n    def set_num_leapfrog_steps(kernel_results, num_leapfrog_steps):\n        return kernel_results._replace(accepted_results=kernel_results.accepted_results._replace(num_leapfrog_steps=num_leapfrog_steps))\n    step_size = previous_kernel_results.new_step_size\n    previous_kernel_results = set_num_leapfrog_steps(previous_kernel_results, self._num_leapfrog_steps(step_size))\n    (new_state, kernel_results) = self._kernel.one_step(self._flatten_state(current_state), previous_kernel_results)\n    return (self._unflatten_state(new_state), kernel_results)", "docstring": "Runs one iteration of NeuTra.\n\nArgs:\ncurrent_state: `Tensor` or Python `list` of `Tensor`s representing the\ncurrent state(s) of the Markov chain(s). The first `r` dimensions index\nindependent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.\nprevious_kernel_results: `collections.namedtuple` containing `Tensor`s\nrepresenting values from previous calls to this function (or from the\n`bootstrap_results` function.)\n\nReturns:\nnext_state: Tensor or Python list of `Tensor`s representing the state(s)\nof the Markov chain(s) after taking exactly one step. Has same type and\nshape as `current_state`.\nkernel_results: `collections.namedtuple` of internal calculations used to\nadvance the chain.", "source": "codesearchnet"}
{"code": "def get_worksheet_keys(data_dict, result_info_key):\n    \n    keys = set(data_dict.keys())\n    keys.remove(result_info_key)\n    if 'meta' in keys:\n        keys.remove('meta')\n    return sorted(keys)", "docstring": "Gets sorted keys from the dict, ignoring result_info_key and 'meta' key\nArgs:\ndata_dict: dict to pull keys from\n\nReturns:\nlist of keys in the dict other than the result_info_key", "source": "juraj-google-style"}
{"code": "def config_str2dict(option_value):\n    dict = {}\n    for key in option_value.split(','):\n        if (':' in key):\n            (key, value) = pair.split(':')\n            value = float(value)\n        else:\n            value = 0\n        dict[key] = value\n    return dict", "docstring": "Parse the value of a config option and convert it to a dictionary.\n\nThe configuration allows lines formatted like:\nfoo = Bar:1,Baz,Flub:0.75\nThis gets converted to a dictionary:\nfoo = { 'Bar': 1, 'Baz': 0, 'Flub': 0.75 }\n\nArgs:\noption_value -- The config string to parse.", "source": "codesearchnet"}
{"code": "def __init__(self, name, code, return_type, params=None, language='js', imports=None):\n    \n    if not isinstance(return_type, basestring):\n      raise TypeError('Argument return_type should be a string. Instead got: ', type(return_type))\n    if params and not isinstance(params, list):\n      raise TypeError('Argument params should be a list of parameter names and types')\n    if imports and not isinstance(imports, list):\n      raise TypeError('Argument imports should be a list of GCS string paths')\n    if imports and language != 'js':\n      raise Exception('Imports are available for Javascript UDFs only')\n\n    self._name = name\n    self._code = code\n    self._return_type = return_type\n    self._params = params or []\n    self._language = language\n    self._imports = imports or []\n    self._sql = None", "docstring": "Initializes a UDF object from its pieces.\n\nArgs:\nname: the name of the javascript function\ncode: function body implementing the logic.\nreturn_type: BigQuery data type of the function return. See supported data types in\nthe BigQuery docs\nparams: list of parameter tuples: (name, type)\nlanguage: see list of supported languages in the BigQuery docs\nimports: a list of GCS paths containing further support code.", "source": "juraj-google-style"}
{"code": "def merge(metric_kind, prior, latest):\n    \n    prior_type, _ = _detect_value(prior)\n    latest_type, _ = _detect_value(latest)\n    if prior_type != latest_type:\n        _logger.warn(u'Metric values are not compatible: %s, %s',\n                     prior, latest)\n        raise ValueError(u'Incompatible delta metric values')\n    if prior_type is None:\n        _logger.warn(u'Bad metric values, types not known for : %s, %s',\n                     prior, latest)\n        raise ValueError(u'Unsupported delta metric types')\n\n    if metric_kind == MetricKind.DELTA:\n        return _merge_delta_metric(prior, latest)\n    else:\n        return _merge_cumulative_or_gauge_metrics(prior, latest)", "docstring": "Merges `prior` and `latest`\n\nArgs:\nmetric_kind (:class:`MetricKind`): indicates the kind of metrics\nbeing merged\nprior (:class:`MetricValue`): an prior instance of the metric\nlatest (:class:`MetricValue`: the latest instance of the metric", "source": "juraj-google-style"}
{"code": "def colourise(__text: str, *args, **kwargs) -> str:\n    \n    if sys.stdout.isatty():\n        __text = style(__text, *args, **kwargs)\n    return __text", "docstring": "Colourise text using click’s style function.\n\nReturns text untouched if colour output is not enabled, or ``stdout`` is\nnot a tty.\n\nSee :func:`click.style` for parameters\n\nArgs:\n__text: Text to colourise\nReturns:\nColourised text, when possible", "source": "juraj-google-style"}
{"code": "def main():\n    windows_libraries = list(pylink.Library.find_library_windows())\n    latest_library = None\n    for lib in windows_libraries:\n        if os.path.dirname(lib).endswith('JLinkARM'):\n            latest_library = lib\n            break\n        elif (latest_library is None):\n            latest_library = lib\n        elif (os.path.dirname(lib) > os.path.dirname(latest_library)):\n            latest_library = lib\n    if (latest_library is None):\n        raise OSError('No J-Link library found.')\n    library = pylink.Library(latest_library)\n    jlink = pylink.JLink(lib=library)\n    print(('Found version: %s' % jlink.version))\n    for emu in jlink.connected_emulators():\n        jlink.disable_dialog_boxes()\n        jlink.open(serial_no=emu.SerialNumber)\n        jlink.sync_firmware()\n        print(('Updated emulator with serial number %s' % emu.SerialNumber))\n    return None", "docstring": "Upgrades the firmware of the J-Links connected to a Windows device.\n\nReturns:\nNone.\n\nRaises:\nOSError: if there are no J-Link software packages.", "source": "codesearchnet"}
{"code": "def restore_state(self, state):\n        \n\n        config_vars = state.get('config_variables', {})\n\n        for str_name, str_value in config_vars.items():\n            name = int(str_name)\n            value = base64.b64decode(str_value)\n\n            if name in self._config_variables:\n                self._config_variables[name].current_value = value", "docstring": "Restore the current state of this emulated object.\n\nArgs:\nstate (dict): A previously dumped state produced by dump_state.", "source": "juraj-google-style"}
{"code": "def economic_qs_linear(G):\n    r\n    import dask.array as da\n\n    if not isinstance(G, da.Array):\n        G = asarray(G, float)\n\n    if G.shape[0] > G.shape[1]:\n        (Q, Ssq, _) = svd(G, full_matrices=True)\n        S0 = Ssq ** 2\n        rank = len(S0)\n        Q0, Q1 = Q[:, :rank], Q[:, rank:]\n        return ((Q0, Q1), S0)\n\n    return economic_qs(G.dot(G.T))", "docstring": "r\"\"\"Economic eigen decomposition for symmetric matrices ``dot(G, G.T)``.\n\nIt is theoretically equivalent to ``economic_qs(dot(G, G.T))``.\nRefer to :func:`numpy_sugar.economic_qs` for further information.\n\nArgs:\nG (array_like): Matrix.\n\nReturns:\ntuple: ``((Q0, Q1), S0)``.", "source": "juraj-google-style"}
{"code": "def _dataset_load_from_hdx(self, id_or_name):\n        \n        \n\n        if not self._load_from_hdx('dataset', id_or_name):\n            return False\n        self._dataset_create_resources()\n        return True", "docstring": "Loads the dataset given by either id or name from HDX\n\nArgs:\nid_or_name (str): Either id or name of dataset\n\nReturns:\nbool: True if loaded, False if not", "source": "juraj-google-style"}
{"code": "def thread(self, value: str):\n    if ((value is not None) and (not isinstance(value, str))):\n        raise TypeError(\"'thread' MUST be a string\")\n    self._thread = value", "docstring": "Set thread id of the message\n\nArgs:\nvalue (str): the thread id", "source": "codesearchnet"}
{"code": "def add_argument(self, parser, bootstrap=False):\n    if self.cli_expose:\n        if isinstance(self.child, YapconfBoolItem):\n            original_default = self.child.default\n            self.child.default = True\n            args = self.child._get_argparse_names(parser.prefix_chars)\n            kwargs = self._get_argparse_kwargs(bootstrap)\n            parser.add_argument(*args, **kwargs)\n            self.child.default = False\n            args = self.child._get_argparse_names(parser.prefix_chars)\n            kwargs = self._get_argparse_kwargs(bootstrap)\n            parser.add_argument(*args, **kwargs)\n            self.child.default = original_default\n        else:\n            super(YapconfListItem, self).add_argument(parser, bootstrap)", "docstring": "Add list-style item as an argument to the given parser.\n\nGenerally speaking, this works mostly like the normal append\naction, but there are special rules for boolean cases. See the\nAppendReplace action for more details.\n\nExamples:\nA non-nested list value with the name 'values' and a child name of\n'value' will result in a command-line argument that will correctly\nhandle arguments like the following:\n\n['--value', 'VALUE1', '--value', 'VALUE2']\n\nArgs:\nparser (argparse.ArgumentParser): The parser to add this item to.\nbootstrap (bool): Flag to indicate whether you only want to mark\nthis item as required or not.", "source": "codesearchnet"}
{"code": "def update_object(self, ref, payload, return_fields=None):\n        \n        query_params = self._build_query_params(return_fields=return_fields)\n\n        opts = self._get_request_options(data=payload)\n        url = self._construct_url(ref, query_params)\n        self._log_request('put', url, opts)\n        r = self.session.put(url, **opts)\n\n        self._validate_authorized(r)\n\n        if r.status_code != requests.codes.ok:\n            self._check_service_availability('update', r, ref)\n\n            raise ib_ex.InfobloxCannotUpdateObject(\n                response=jsonutils.loads(r.content),\n                ref=ref,\n                content=r.content,\n                code=r.status_code)\n\n        return self._parse_reply(r)", "docstring": "Update an Infoblox object\n\nArgs:\nref      (str): Infoblox object reference\npayload (dict): Payload with data to send\nReturns:\nThe object reference of the updated object\nRaises:\nInfobloxException", "source": "juraj-google-style"}
{"code": "def intersection(L1, L2):\n    \n    D = L1[0] * L2[1] - L1[1] * L2[0]\n    Dx = L1[2] * L2[1] - L1[1] * L2[2]\n    Dy = L1[0] * L2[2] - L1[2] * L2[0]\n    if D != 0:\n        x = Dx / D\n        y = Dy / D\n        return x, y\n    else:\n        return False", "docstring": "Intersects two line segments\n\nArgs:\nL1 ([float, float]): x and y coordinates\nL2 ([float, float]): x and y coordinates\nReturns:\nbool: if they intersect\n(float, float): x and y of intersection, if they do", "source": "juraj-google-style"}
{"code": "def _path(cls, ndivsm, structure=None, kpath_bounds=None, comment=None):\n        \n        if kpath_bounds is None:\n            \n            from pymatgen.symmetry.bandstructure import HighSymmKpath\n            sp = HighSymmKpath(structure)\n\n            \n            kpath_labels = []\n            for labels in sp.kpath[\"path\"]:\n                kpath_labels.extend(labels)\n\n            kpath_bounds = []\n            for label in kpath_labels:\n                red_coord = sp.kpath[\"kpoints\"][label]\n                \n                kpath_bounds.append(red_coord)\n\n        return cls(mode=KSamplingModes.path, num_kpts=ndivsm, kpts=kpath_bounds,\n                   comment=comment if comment else \"K-Path scheme\")", "docstring": "Static constructor for path in k-space.\n\nArgs:\nstructure: :class:`Structure` object.\nkpath_bounds: List with the reduced coordinates of the k-points defining the path.\nndivsm: Number of division for the smallest segment.\ncomment: Comment string.\n\nReturns:\n:class:`KSampling` object.", "source": "juraj-google-style"}
{"code": "def set_trunk_groups(self, intf, value=None, default=False, disable=False):\n        \n        if default:\n            cmd = 'default switchport trunk group'\n            return self.configure_interface(intf, cmd)\n\n        if disable:\n            cmd = 'no switchport trunk group'\n            return self.configure_interface(intf, cmd)\n\n        current_value = self.get(intf)['trunk_groups']\n        failure = False\n\n        value = make_iterable(value)\n\n        for name in set(value).difference(current_value):\n            if not self.add_trunk_group(intf, name):\n                failure = True\n\n        for name in set(current_value).difference(value):\n            if not self.remove_trunk_group(intf, name):\n                failure = True\n\n        return not failure", "docstring": "Configures the switchport trunk group value\n\nArgs:\nintf (str): The interface identifier to configure.\nvalue (str): The set of values to configure the trunk group\ndefault (bool): Configures the trunk group default value\ndisable (bool): Negates all trunk group settings\n\nReturns:\nTrue if the config operation succeeds otherwise False", "source": "juraj-google-style"}
{"code": "def time_stats(self, **kwargs):\n        \n        \n        \n        if 'time_stats' in self.attributes:\n            return self.attributes['time_stats']\n\n        path = '%s/%s/time_stats' % (self.manager.path, self.get_id())\n        return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "Get time stats for the object.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabTimeTrackingError: If the time tracking update cannot be done", "source": "juraj-google-style"}
{"code": "def __register_class(self, parsed_config):\n    methods = parsed_config.get('methods')\n    if (not methods):\n        return\n    service_classes = set()\n    for method in methods.itervalues():\n        rosy_method = method.get('rosyMethod')\n        if (rosy_method and ('.' in rosy_method)):\n            method_class = rosy_method.split('.', 1)[0]\n            service_classes.add(method_class)\n    for service_class in service_classes:\n        if (service_class in self.__registered_classes):\n            raise api_exceptions.ApiConfigurationError(('API class %s has already been registered.' % service_class))\n        self.__registered_classes.add(service_class)", "docstring": "Register the class implementing this config, so we only add it once.\n\nArgs:\nparsed_config: The JSON object with the API configuration being added.\n\nRaises:\nApiConfigurationError: If the class has already been registered.", "source": "codesearchnet"}
{"code": "def Relay(self, inventory):\n        \n        if type(inventory) is MinerTransaction:\n            return False\n\n        if inventory.Hash.ToBytes() in self.KnownHashes:\n            return False\n\n        self.KnownHashes.append(inventory.Hash.ToBytes())\n\n        if type(inventory) is Block:\n            pass\n\n        elif type(inventory) is Transaction or issubclass(type(inventory), Transaction):\n            if not self.AddTransaction(inventory):\n                \n                try:\n                    self.KnownHashes.remove(inventory.Hash.ToBytes())\n                except ValueError:\n                    \n                    pass\n                return False\n        else:\n            \n            pass\n\n        relayed = self.RelayDirectly(inventory)\n        return relayed", "docstring": "Relay the inventory to the remote client.\n\nArgs:\ninventory (neo.Network.Inventory):\n\nReturns:\nbool: True if relayed successfully. False otherwise.", "source": "juraj-google-style"}
{"code": "def toTFExample(dtypes):\n\n    def _toTFExample(iter):\n        float_dtypes = ['float', 'double']\n        int64_dtypes = ['boolean', 'tinyint', 'smallint', 'int', 'bigint', 'long']\n        bytes_dtypes = ['binary', 'string']\n        float_list_dtypes = ['array<float>', 'array<double>']\n        int64_list_dtypes = ['array<boolean>', 'array<tinyint>', 'array<smallint>', 'array<int>', 'array<bigint>', 'array<long>']\n\n        def _toTFFeature(name, dtype, row):\n            feature = None\n            if (dtype in float_dtypes):\n                feature = (name, tf.train.Feature(float_list=tf.train.FloatList(value=[row[name]])))\n            elif (dtype in int64_dtypes):\n                feature = (name, tf.train.Feature(int64_list=tf.train.Int64List(value=[row[name]])))\n            elif (dtype in bytes_dtypes):\n                if (dtype == 'binary'):\n                    feature = (name, tf.train.Feature(bytes_list=tf.train.BytesList(value=[bytes(row[name])])))\n                else:\n                    feature = (name, tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(row[name]).encode('utf-8')])))\n            elif (dtype in float_list_dtypes):\n                feature = (name, tf.train.Feature(float_list=tf.train.FloatList(value=row[name])))\n            elif (dtype in int64_list_dtypes):\n                feature = (name, tf.train.Feature(int64_list=tf.train.Int64List(value=row[name])))\n            else:\n                raise Exception('Unsupported dtype: {0}'.format(dtype))\n            return feature\n        results = []\n        for row in iter:\n            features = dict([_toTFFeature(name, dtype, row) for (name, dtype) in dtypes])\n            example = tf.train.Example(features=tf.train.Features(feature=features))\n            results.append((bytearray(example.SerializeToString()), None))\n        return results\n    return _toTFExample", "docstring": "mapPartition function to convert a Spark RDD of Row into an RDD of serialized tf.train.Example bytestring.\n\nNote that tf.train.Example is a fairly flat structure with limited datatypes, e.g. tf.train.FloatList,\ntf.train.Int64List, and tf.train.BytesList, so most DataFrame types will be coerced into one of these types.\n\nArgs:\n:dtypes: the DataFrame.dtypes of the source DataFrame.\n\nReturns:\nA mapPartition function which converts the source DataFrame into tf.train.Example bytestrings.", "source": "codesearchnet"}
{"code": "def ProcessAllReadyRequests(self):\n    request_dict = data_store.REL_DB.ReadFlowRequestsReadyForProcessing(self.rdf_flow.client_id, self.rdf_flow.flow_id, next_needed_request=self.rdf_flow.next_request_to_process)\n    if (not request_dict):\n        return 0\n    processed = 0\n    while (self.rdf_flow.next_request_to_process in request_dict):\n        (request, responses) = request_dict[self.rdf_flow.next_request_to_process]\n        self.RunStateMethod(request.next_state, request, responses)\n        self.rdf_flow.next_request_to_process += 1\n        processed += 1\n        self.completed_requests.append(request)\n    if (processed and self.IsRunning() and (not self.outstanding_requests)):\n        self.RunStateMethod('End')\n        if ((self.rdf_flow.flow_state == self.rdf_flow.FlowState.RUNNING) and (not self.outstanding_requests)):\n            self.MarkDone()\n    self.PersistState()\n    if (not self.IsRunning()):\n        self._ClearAllRequestsAndResponses()\n    return processed", "docstring": "Processes all requests that are due to run.\n\nReturns:\nThe number of processed requests.", "source": "codesearchnet"}
{"code": "def __init__(self, dom, path):\n        \n        self._dom = dom\n        self._providers = {\n            name: self._get_provider(spec)\n            for name, spec in self._dom.get('sources', {}).items()\n        }\n        self._path = path", "docstring": "You would usually use the\n:func:`TemplateRepository.from_url` method instead of\ndirectly using this\n\nArgs:\ndom (dict): Specification of the template repository (not confuse\nwith xml dom)", "source": "juraj-google-style"}
{"code": "def _parse_section(name, source):\n    section = textwrap.dedent(_get_section(name, source)[7:])\n    commands = []\n    for line in section.splitlines():\n        if ((not commands) or (line[:1].isalpha() and line[:1].islower())):\n            commands.append(line)\n        else:\n            commands[(- 1)] = '{} {}'.format(commands[(- 1)].strip(), line.strip())\n    return commands", "docstring": "Yield each section line.\n\nNote:\nDepending on how it is wrapped, a section line can take up more than\none physical line.\n\nArgs:\nname: The name of the section to extract (e.g. \"Usage\").\nsource: The usage string to parse.\n\nReturns:\nA list containing each line, de-wrapped by whitespace from the source\ncode.\nIf the section is defined multiple times in the source code, all lines\nfrom all sections with that name will be returned.", "source": "codesearchnet"}
{"code": "def sg_min(tensor, opt):\n    r\n    return tf.reduce_min(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)", "docstring": "r\"\"\"Computes the minimum of elements across axis of a tensor.\n\nSee `tf.reduce_min()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` (automatically given by chain).\nopt:\naxis : A tuple/list of integers or an integer. The axis to reduce.\nkeep_dims: If true, retains reduced dimensions with length 1.\nname: If provided, replace current tensor's name.\n\nReturns:\nA `Tensor`.", "source": "juraj-google-style"}
{"code": "def train(self, resume_from_checkpoint: Optional[Union[str, bool]]=None, trial: Union['optuna.Trial', dict[str, Any], None]=None, ignore_keys_for_eval: Optional[list[str]]=None, **kwargs):\n    if resume_from_checkpoint is False:\n        resume_from_checkpoint = None\n    self._memory_tracker.start()\n    args = self.args\n    self.is_in_train = True\n    if self.neftune_noise_alpha is not None:\n        self.model = self._activate_neftune(self.model)\n    if (args.fp16_full_eval or args.bf16_full_eval) and (not args.do_train) and (not self.is_model_parallel) and (self.model_init is None):\n        self._move_model_to_device(self.model, args.device)\n    if 'model_path' in kwargs:\n        resume_from_checkpoint = kwargs.pop('model_path')\n        warnings.warn('`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` instead.', FutureWarning)\n    if len(kwargs) > 0:\n        raise TypeError(f'train() got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.')\n    self._hp_search_setup(trial)\n    self._train_batch_size = self.args.train_batch_size\n    model_reloaded = False\n    if self.model_init is not None:\n        enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed)\n        self.model = self.call_model_init(trial)\n        model_reloaded = True\n        self.optimizer, self.lr_scheduler = (None, None)\n    if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:\n        resume_from_checkpoint = get_last_checkpoint(args.output_dir)\n        if resume_from_checkpoint is None:\n            raise ValueError(f'No valid checkpoint found in output directory ({args.output_dir})')\n    if resume_from_checkpoint is not None:\n        if not is_sagemaker_mp_enabled() and (not self.is_deepspeed_enabled) and (not self.is_fsdp_enabled):\n            self._load_from_checkpoint(resume_from_checkpoint)\n        state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME))\n        if state.train_batch_size is not None:\n            self._train_batch_size = state.train_batch_size\n    if model_reloaded:\n        if self.place_model_on_device:\n            self._move_model_to_device(self.model, args.device)\n        self.model_wrapped = self.model\n    inner_training_loop = find_executable_batch_size(self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size)\n    if args.push_to_hub:\n        try:\n            hf_hub_utils.disable_progress_bars()\n            return inner_training_loop(args=args, resume_from_checkpoint=resume_from_checkpoint, trial=trial, ignore_keys_for_eval=ignore_keys_for_eval)\n        finally:\n            hf_hub_utils.enable_progress_bars()\n    else:\n        return inner_training_loop(args=args, resume_from_checkpoint=resume_from_checkpoint, trial=trial, ignore_keys_for_eval=ignore_keys_for_eval)", "docstring": "Main training entry point.\n\nArgs:\nresume_from_checkpoint (`str` or `bool`, *optional*):\nIf a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a\n`bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance\nof [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here.\ntrial (`optuna.Trial` or `Dict[str, Any]`, *optional*):\nThe trial run or the hyperparameter dictionary for hyperparameter search.\nignore_keys_for_eval (`List[str]`, *optional*)\nA list of keys in the output of your model (if it is a dictionary) that should be ignored when\ngathering predictions for evaluation during the training.\nkwargs (`Dict[str, Any]`, *optional*):\nAdditional keyword arguments used to hide deprecated arguments", "source": "github-repos"}
{"code": "def weighting_function(max_num_bins: int, up: torch.Tensor, reg_scale: int) -> torch.Tensor:\n    upper_bound1 = abs(up[0]) * abs(reg_scale)\n    upper_bound2 = abs(up[0]) * abs(reg_scale) * 2\n    step = (upper_bound1 + 1) ** (2 / (max_num_bins - 2))\n    left_values = [-step ** i + 1 for i in range(max_num_bins \n    right_values = [step ** i - 1 for i in range(1, max_num_bins \n    values = [-upper_bound2] + left_values + [torch.zeros_like(up[0][None])] + right_values + [upper_bound2]\n    values = [v if v.dim() > 0 else v.unsqueeze(0) for v in values]\n    values = torch.cat(values, 0)\n    return values", "docstring": "Generates the non-uniform Weighting Function W(n) for bounding box regression.\n\nArgs:\nmax_num_bins (int): Max number of the discrete bins.\nup (Tensor): Controls upper bounds of the sequence,\nwhere maximum offset is ±up * H / W.\nreg_scale (float): Controls the curvature of the Weighting Function.\nLarger values result in flatter weights near the central axis W(max_num_bins/2)=0\nand steeper weights at both ends.\nReturns:\nTensor: Sequence of Weighting Function.", "source": "github-repos"}
{"code": "def _ParseBooleanValue(self, byte_stream):\n    \n    if byte_stream == b'\\x00':\n      return False\n\n    if byte_stream == b'\\x01':\n      return True\n\n    raise errors.ParseError('Unsupported boolean value.')", "docstring": "Parses a boolean value.\n\nArgs:\nbyte_stream (bytes): byte stream.\n\nReturns:\nbool: boolean value.\n\nRaises:\nParseError: when the boolean value cannot be parsed.", "source": "juraj-google-style"}
{"code": "def get_entry(self, pathname_name):\n        \n        pathname_name = self._normalized_entryname(pathname_name)\n        return self.contents[pathname_name]", "docstring": "Retrieves the specified child file or directory entry.\n\nArgs:\npathname_name: The basename of the child object to retrieve.\n\nReturns:\nThe fake file or directory object.\n\nRaises:\nKeyError: if no child exists by the specified name.", "source": "juraj-google-style"}
{"code": "def predict_next_action(self, state_key, next_action_list):\n    if (self.q_df is not None):\n        next_action_q_df = self.q_df[(self.q_df.state_key == state_key)]\n        next_action_q_df = next_action_q_df[next_action_q_df.action_key.isin(next_action_list)]\n        if (next_action_q_df.shape[0] == 0):\n            return random.choice(next_action_list)\n        else:\n            if (next_action_q_df.shape[0] == 1):\n                max_q_action = next_action_q_df['action_key'].values[0]\n            else:\n                next_action_q_df = next_action_q_df.sort_values(by=['q_value'], ascending=False)\n                max_q_action = next_action_q_df.iloc[(0, :)]['action_key']\n            return max_q_action\n    else:\n        return random.choice(next_action_list)", "docstring": "Predict next action by Q-Learning.\n\nArgs:\nstate_key:          The key of state in `self.t+1`.\nnext_action_list:   The possible action in `self.t+1`.\n\nReturns:\nThe key of action.", "source": "codesearchnet"}
{"code": "def from_array(array):\n    try:\n        raw_data = blosc.pack_array(array)\n    except Exception as e:\n        raise ValueError('Could not compress data from array. {}'.format(e))\n    return raw_data", "docstring": "Export a numpy array to a blosc array.\n\nArguments:\narray: The numpy array to compress to blosc array\n\nReturns:\nBytes/String. A blosc compressed array", "source": "codesearchnet"}
{"code": "def replace_with_spqr_linear(model, quantization_config=None, modules_to_not_convert=None, current_key_name=None, has_been_replaced=False):\n    if modules_to_not_convert is None:\n        modules_to_not_convert = []\n    if is_accelerate_available():\n        from accelerate import init_empty_weights\n    if is_spqr_available():\n        from spqr_quant import QuantizedLinear\n    for name, module in model.named_children():\n        if current_key_name is None:\n            current_key_name = []\n        current_key_name.append(name)\n        if isinstance(module, nn.Linear):\n            if '.'.join(current_key_name) + '.weight' not in modules_to_not_convert:\n                with init_empty_weights():\n                    tensor_name = '.'.join(current_key_name)\n                    shapes = quantization_config.shapes\n                    shapes_keys = shapes.keys()\n                    shapes_valid = f'{tensor_name}.dense_weights.shape' in shapes_keys and f'{tensor_name}.row_offsets.shape' in shapes_keys and (f'{tensor_name}.col_vals.shape' in shapes_keys) and (f'{tensor_name}.in_perm.shape' in shapes_keys)\n                    if not shapes_valid:\n                        raise ValueError(f'The SpQR quantization config does not contain the shape configuration for {tensor_name}. This indicates that the configuration is either invalid or corrupted.')\n                    dense_weights_shape = shapes[f'{tensor_name}.dense_weights.shape']\n                    row_offsets_shape = shapes[f'{tensor_name}.row_offsets.shape']\n                    col_vals_shape = shapes[f'{tensor_name}.col_vals.shape']\n                    in_perm_shape = shapes[f'{tensor_name}.in_perm.shape']\n                    in_features = module.in_features\n                    out_features = module.out_features\n                    model._modules[name] = QuantizedLinear.create_placehodler(rows=out_features, cols=in_features, bits=quantization_config.bits, beta1=quantization_config.beta1, beta2=quantization_config.beta2, dense_weights_shape=dense_weights_shape, row_offsets_shape=row_offsets_shape, col_vals_shape=col_vals_shape, in_perm_shape=in_perm_shape)\n                    has_been_replaced = True\n                    model._modules[name].source_cls = type(module)\n                    model._modules[name].requires_grad_(False)\n            else:\n                pass\n        if len(list(module.children())) > 0:\n            _, has_been_replaced = replace_with_spqr_linear(module, quantization_config=quantization_config, modules_to_not_convert=modules_to_not_convert, current_key_name=current_key_name, has_been_replaced=has_been_replaced)\n        current_key_name.pop(-1)\n    return (model, has_been_replaced)", "docstring": "Public method that recursively replaces the Linear layers of the given model with SpQR quantized layers.\n`accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the\nconversion has been successful or not.\n\nArgs:\nmodel (`torch.nn.Module`):\nThe model to convert, can be any `torch.nn.Module` instance.\nquantization_config (`SpQRConfig`):\nThe quantization config object that contains the quantization parameters.\nmodules_to_not_convert (`list[str]`, *optional*):\nA list of nn.Linear weights to not convert. If a parameter path is in the list (e.g. `lm_head.weight`), the corresponding module will not be\nconverted.\ncurrent_key_name (`list`, *optional*):\nA list that contains the current key name. This is used for recursion and should not be passed by the user.\nhas_been_replaced (`bool`, *optional*):\nA boolean that indicates if the conversion has been successful or not. This is used for recursion and\nshould not be passed by the user.", "source": "github-repos"}
{"code": "def unprotect(self, **kwargs):\n        \n        id = self.get_id().replace('/', '%2F')\n        path = '%s/%s/unprotect' % (self.manager.path, id)\n        self.manager.gitlab.http_put(path, **kwargs)\n        self._attrs['protected'] = False", "docstring": "Unprotect the branch.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabProtectError: If the branch could not be unprotected", "source": "juraj-google-style"}
{"code": "def _parse_local_interface(self, config):\n    match = re.search('local-interface (\\\\w+)', config)\n    value = (match.group(1) if match else None)\n    return dict(local_interface=value)", "docstring": "Scans the config block and parses the local-interface value\n\nArgs:\nconfig (str): The config block to scan\n\nReturns:\ndict: A dict object that is intended to be merged into the\nresource dict", "source": "codesearchnet"}
{"code": "def read_excel(\n    filename,\n    dataset_class=dataset.pandas_dataset.PandasDataset,\n    expectations_config=None,\n    autoinspect_func=None,\n    *args, **kwargs\n):\n    \n    df = pd.read_excel(filename, *args, **kwargs)\n    if isinstance(df, dict):\n        for key in df:\n            df[key] = _convert_to_dataset_class(\n                df[key], dataset_class, expectations_config, autoinspect_func)\n    else:\n        df = _convert_to_dataset_class(\n            df, dataset_class, expectations_config, autoinspect_func)\n    return df", "docstring": "Read a file using Pandas read_excel and return a great_expectations dataset.\n\nArgs:\nfilename (string): path to file to read\ndataset_class (Dataset class): class to which to convert resulting Pandas df\nexpectations_config (string): path to great_expectations config file\n\nReturns:\ngreat_expectations dataset or ordered dict of great_expectations datasets,\nif multiple worksheets are imported", "source": "juraj-google-style"}
{"code": "def reduced_shape(input_shape, axes):\n    constant_input_shape = tensor_util.constant_value(input_shape)\n    if constant_input_shape is not None:\n        constant_axes = tensor_util.constant_value(axes)\n        if constant_axes is not None:\n            constant_axes = np.array(constant_axes, dtype=np.int32)\n            constant_input_shape = np.array(constant_input_shape, dtype=np.int32)\n            constant_input_shape[constant_axes] = 1\n            return constant_input_shape\n    axes = ops.convert_to_tensor(axes)\n    input_rank = array_ops.size(input_shape, out_type=axes.dtype)\n    axes = (axes + input_rank) % input_rank\n    axes_shape = array_ops.shape(axes)\n    return gen_data_flow_ops.dynamic_stitch([range(input_rank), axes], [input_shape, array_ops.ones(axes_shape, dtype=input_shape.dtype)])", "docstring": "Helper function for reduction ops.\n\nArgs:\ninput_shape: 1-D Tensor, the shape of the Tensor being reduced.\naxes: 1-D Tensor, the reduction axes.\n\nReturns:\nA 1-D Tensor, the output shape as if keepdims were set to True.", "source": "github-repos"}
{"code": "def __init__(self, key_value_pairs=None):\n    \n    if not key_value_pairs:\n      raise errors.FormatError('Missing key value pairs value.')\n\n    if not isinstance(key_value_pairs, list):\n      raise errors.FormatError('key_value_pairs must be a list')\n\n    for pair in key_value_pairs:\n      if not isinstance(pair, dict):\n        raise errors.FormatError('key_value_pair must be a dict')\n\n      if set(pair.keys()) != set(['key', 'value']):\n        key_value_pairs = ', '.join([\n            '{0:s}: {1:s}'.format(key, value) for key, value in key_value_pairs\n        ])\n        error_message = (\n            'key_value_pair missing \"key\" and \"value\" keys, got: '\n            '{0:s}').format(key_value_pairs)\n        raise errors.FormatError(error_message)\n\n      WindowsRegistryKeySourceType.ValidateKey(pair['key'])\n\n    super(WindowsRegistryValueSourceType, self).__init__()\n    self.key_value_pairs = key_value_pairs", "docstring": "Initializes a source type.\n\nArgs:\nkey_value_pairs (Optional[list[tuple[str, str]]]): key path and value\nname pairs, where key paths are relative to the root of the Windows\nRegistry.\n\nRaises:\nFormatError: when key value pairs is not set.", "source": "juraj-google-style"}
{"code": "def is17(msg):\n    if allzeros(msg):\n        return False\n    d = hex2bin(data(msg))\n    if (bin2int(d[28:56]) != 0):\n        return False\n    caps = cap17(msg)\n    if ('BDS20' not in caps):\n        return False\n    return True", "docstring": "Check if a message is likely to be BDS code 1,7\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nbool: True or False", "source": "codesearchnet"}
{"code": "def fetch(self, plan_id, data={}, **kwargs):\n        \n        return super(Plan, self).fetch(plan_id, data, **kwargs)", "docstring": "Fetch Plan for given Id\n\nArgs:\nplan_id : Id for which Plan object has to be retrieved\n\nReturns:\nPlan dict for given subscription Id", "source": "juraj-google-style"}
{"code": "def get_committed_signatures(vcs):\n    committed_path = _get_committed_history_path(vcs)\n    known_signatures = []\n    if os.path.exists(committed_path):\n        with open(committed_path, 'r') as f:\n            known_signatures = f.read().split()\n    return known_signatures", "docstring": "Get the list of committed signatures\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\n\nReturns:\nlist(basestring) - list of signatures", "source": "codesearchnet"}
{"code": "def find_contacts(self, geoms_1, geoms_2):\n    for contact in self.sim.data.contact[0:self.sim.data.ncon]:\n        c1_in_g1 = (self.sim.model.geom_id2name(contact.geom1) in geoms_1)\n        c2_in_g2 = (self.sim.model.geom_id2name(contact.geom2) in geoms_2)\n        c2_in_g1 = (self.sim.model.geom_id2name(contact.geom2) in geoms_1)\n        c1_in_g2 = (self.sim.model.geom_id2name(contact.geom1) in geoms_2)\n        if ((c1_in_g1 and c2_in_g2) or (c1_in_g2 and c2_in_g1)):\n            (yield contact)", "docstring": "Finds contact between two geom groups.\n\nArgs:\ngeoms_1: a list of geom names (string)\ngeoms_2: another list of geom names (string)\n\nReturns:\niterator of all contacts between @geoms_1 and @geoms_2", "source": "codesearchnet"}
{"code": "def construct_lanczos_params(self):\n    self.min_eigen_vec = autograph.to_graph(utils.tf_lanczos_smallest_eigval)\n\n    def _m_vector_prod_fn(x):\n        return self.get_psd_product(x, dtype=self.lanczos_dtype)\n\n    def _h_vector_prod_fn(x):\n        return self.get_h_product(x, dtype=self.lanczos_dtype)\n    self.m_min_vec_estimate = np.zeros(shape=(self.matrix_m_dimension, 1), dtype=np.float64)\n    zeros_m = tf.zeros(shape=(self.matrix_m_dimension, 1), dtype=tf.float64)\n    self.m_min_vec_ph = tf.placeholder_with_default(input=zeros_m, shape=(self.matrix_m_dimension, 1), name='m_min_vec_ph')\n    (self.m_min_eig, self.m_min_vec) = self.min_eigen_vec(_m_vector_prod_fn, self.matrix_m_dimension, self.m_min_vec_ph, self.lzs_params['max_iter'], dtype=self.lanczos_dtype)\n    self.m_min_eig = tf.cast(self.m_min_eig, self.nn_dtype)\n    self.m_min_vec = tf.cast(self.m_min_vec, self.nn_dtype)\n    self.h_min_vec_estimate = np.zeros(shape=((self.matrix_m_dimension - 1), 1), dtype=np.float64)\n    zeros_h = tf.zeros(shape=((self.matrix_m_dimension - 1), 1), dtype=tf.float64)\n    self.h_min_vec_ph = tf.placeholder_with_default(input=zeros_h, shape=((self.matrix_m_dimension - 1), 1), name='h_min_vec_ph')\n    (self.h_min_eig, self.h_min_vec) = self.min_eigen_vec(_h_vector_prod_fn, (self.matrix_m_dimension - 1), self.h_min_vec_ph, self.lzs_params['max_iter'], dtype=self.lanczos_dtype)\n    self.h_min_eig = tf.cast(self.h_min_eig, self.nn_dtype)\n    self.h_min_vec = tf.cast(self.h_min_vec, self.nn_dtype)", "docstring": "Computes matrices T and V using the Lanczos algorithm.\n\nArgs:\nk: number of iterations and dimensionality of the tridiagonal matrix\nReturns:\neig_vec: eigen vector corresponding to min eigenvalue", "source": "codesearchnet"}
{"code": "def trace_set_format(self, fmt):\n        \n        cmd = enums.JLinkTraceCommand.SET_FORMAT\n        data = ctypes.c_uint32(fmt)\n        res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n        if (res == 1):\n            raise errors.JLinkException('Failed to set trace format.')\n        return None", "docstring": "Sets the format for the trace buffer to use.\n\nArgs:\nself (JLink): the ``JLink`` instance.\nfmt (int): format for the trace buffer; this is one of the attributes\nof ``JLinkTraceFormat``.\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def campaign(self, name, **kwargs):\n    group_obj = Campaign(name, **kwargs)\n    return self._group(group_obj)", "docstring": "Add Campaign data to Batch object.\n\nArgs:\nname (str): The name for this Group.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nfirst_seen (str, kwargs): The first seen datetime expression for this Group.\nxid (str, kwargs): The external id for this Group.\n\nReturns:\nobj: An instance of Campaign.", "source": "codesearchnet"}
{"code": "def GetFormattedSources(self, event):\n    \n    event_formatter = self.GetEventFormatter(event)\n    if not event_formatter:\n      return None, None\n\n    return event_formatter.GetSources(event)", "docstring": "Retrieves the formatted sources related to the event.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\ntuple: containing:\n\nstr: full source string or None if no event formatter was found.\nstr: short source string or None if no event formatter was found.", "source": "juraj-google-style"}
{"code": "def _initializer(shape, dtype=dtypes.float32, partition_info=None):\n    if dtype != dtypes.float32:\n        raise TypeError('Currently, only float32 is supported. Received dtype: {}'.format(dtype))\n    if len(shape) != 2:\n        raise ValueError('Expected 2-dim shape, but received: {}'.format(shape))\n    if shape[0] <= 0:\n        raise ValueError('Expected 1st dim of shape to be > 0, but received shape: {}'.format(shape))\n    if shape[1] != new_col_vocab_size + num_col_oov_buckets:\n        raise ValueError('Expected 2nd dim of shape to be new_col_vocab_size ({}) + num_col_oov_buckets ({}) = {}, but received shape: {}'.format(new_col_vocab_size, num_col_oov_buckets, new_col_vocab_size + num_col_oov_buckets, shape))\n    offset = 0\n    if partition_info is not None:\n        offset = partition_info.single_offset(shape)\n    if offset + shape[0] > new_row_vocab_size + num_row_oov_buckets:\n        raise ValueError('Trying to initialize {} additional rows after {} rows have already been initialized, which would exceed expected total row count of new_row_vocab_size ({}) + num_row_oov_buckets ({}) = {}.'.format(shape[0], offset, new_row_vocab_size, num_row_oov_buckets, new_row_vocab_size + num_row_oov_buckets))\n    row_oov_buckets_to_use = min(shape[0], max(0, offset + shape[0] - new_row_vocab_size))\n    num_rows_to_load = shape[0] - row_oov_buckets_to_use\n    if offset > new_row_vocab_size:\n        if shape[0] != row_oov_buckets_to_use:\n            raise ValueError('Partitioned variable offset is greater than new vocab size and not operating on OOV-only partition.')\n        return initializer(shape)\n    return _load_and_remap_matrix(ckpt_path=ckpt_path, old_tensor_name=old_tensor_name, new_row_vocab_offset=offset, num_rows_to_load=num_rows_to_load, new_col_vocab_size=new_col_vocab_size, initializer=initializer, old_row_vocab_size=old_row_vocab_size, old_row_vocab_file=old_row_vocab_file, new_row_vocab_file=new_row_vocab_file, old_col_vocab_file=old_col_vocab_file, new_col_vocab_file=new_col_vocab_file, num_row_oov_buckets=row_oov_buckets_to_use, num_col_oov_buckets=num_col_oov_buckets, max_rows_in_memory=max_rows_in_memory)", "docstring": "Variable initializer.\n\nArgs:\nshape: Shape of `Tensor` to return. Should include OOV on both axes.\ndtype: Must be float32.\npartition_info: variable_scope._PartitionInfo.\n\nReturns:\n`Tensor` of shape `shape`.\n\nRaises:\nTypeError: If `dtype` is anything other than float32.\nValueError: For shape mismatch upon invocation.", "source": "github-repos"}
{"code": "def rotate_capture_handler_log(self, name):\n        \n        for sc_key, sc in self._stream_capturers.iteritems():\n            for h in sc[0].capture_handlers:\n                if h['name'] == name:\n                    sc[0]._rotate_log(h)", "docstring": "Force a rotation of a handler's log file\n\nArgs:\nname:\nThe name of the handler who's log file should be rotated.", "source": "juraj-google-style"}
{"code": "def has_mixture(val: Any) -> bool:\n    getter = getattr(val, '_has_mixture_', None)\n    result = (NotImplemented if (getter is None) else getter())\n    if (result is not NotImplemented):\n        return result\n    return (mixture(val, None) is not None)", "docstring": "Returns whether the value has a mixture representation.\n\nReturns:\nIf `val` has a `_has_mixture_` method and its result is not\nNotImplemented, that result is returned. Otherwise, if the value\nhas a `_mixture_` method return True if that has a non-default value.\nReturns False if neither function exists.", "source": "codesearchnet"}
{"code": "def run_step(self, context):\n        \n        logger.debug(\"starting\")\n        \n        self.set_step_input_context(context)\n\n        if self.while_decorator:\n            self.while_decorator.while_loop(context,\n                                            self.run_foreach_or_conditional)\n        else:\n            self.run_foreach_or_conditional(context)\n\n        logger.debug(\"done\")", "docstring": "Run a single pipeline step.\n\nArgs:\ncontext: (pypyr.context.Context) The pypyr context. This arg will\nmutate.", "source": "juraj-google-style"}
{"code": "class CounterMetric(Metric):\n\n    def __init__(self, counter_metric, submit_timestamp, metric_id):\n        value = counter_metric.result\n        super().__init__(submit_timestamp, metric_id, value, counter_metric)", "docstring": "The Counter Metric in ready-to-publish format.\n\nArgs:\ncounter_metric (object): counter metric object from MetricResult\nsubmit_timestamp (float): date-time of saving metric to database\nmetric_id (uuid): unique id to identify test run", "source": "github-repos"}
{"code": "def _WritesString(self, content):\n    content_bytes = codecs.encode(content, 'utf-8')\n    self._sample_file.write(content_bytes)", "docstring": "Writes a string to the sample file.\n\nArgs:\ncontent (str): content to write to the sample file.", "source": "codesearchnet"}
{"code": "def _parse_authors(details):\n    \n    authors = details.find(\n        \"tr\",\n        {\"id\": \"ctl00_ContentPlaceHolder1_tblRowAutor\"}\n    )\n\n    if not authors:\n        return []  \n\n    \n    author_list = []\n    for author in authors[0].find(\"a\"):\n        author_obj = Author(author.getContent())\n\n        if \"href\" in author.params:\n            author_obj.URL = author.params[\"href\"]\n\n        author_list.append(author_obj)\n\n    return author_list", "docstring": "Parse authors of the book.\n\nArgs:\ndetails (obj): HTMLElement containing slice of the page with details.\n\nReturns:\nlist: List of :class:`structures.Author` objects. Blank if no author \\\nfound.", "source": "juraj-google-style"}
{"code": "def remove_long_seq(maxlen, seq, label):\n    new_seq, new_label = ([], [])\n    for x, y in zip(seq, label):\n        if len(x) < maxlen:\n            new_seq.append(x)\n            new_label.append(y)\n    return (new_seq, new_label)", "docstring": "Removes sequences that exceed the maximum length.\n\nArgs:\nmaxlen: Int, maximum length of the output sequences.\nseq: List of lists, where each sublist is a sequence.\nlabel: List where each element is an integer.\n\nReturns:\nnew_seq, new_label: shortened lists for `seq` and `label`.", "source": "github-repos"}
{"code": "def _single_request(self, method, *args, **kwargs):\n    _method = self._service\n    for item in method.split('.'):\n        if method.endswith(item):\n            _method = getattr(_method, item)(*args, **kwargs)\n        else:\n            _method = getattr(_method, item)()\n    _method.uri = _method.uri.replace('$ENDPOINT', self._endpoint)\n    try:\n        return _method.execute(http=self._http)\n    except googleapiclient.errors.HttpError as exc:\n        response = json.loads(exc.content.decode('utf-8'))['error']\n        raise APIError(code=response['code'], message=response['message'], http_error=exc)", "docstring": "Make a single request to the fleet API endpoint\n\nArgs:\nmethod (str): A dot delimited string indicating the method to call.  Example: 'Machines.List'\n*args: Passed directly to the method being called.\n**kwargs: Passed directly to the method being called.\n\nReturns:\ndict: The response from the method called.\n\nRaises:\nfleet.v1.errors.APIError: Fleet returned a response code >= 400", "source": "codesearchnet"}
{"code": "def stage(self, pipeline_name, stage_name, pipeline_counter=None):\n        \n        return Stage(self, pipeline_name, stage_name, pipeline_counter=pipeline_counter)", "docstring": "Returns an instance of :class:`Stage`\n\nArgs:\npipeline_name (str): Name of the pipeline the stage belongs to\nstage_name (str): Name of the stage to act on\npipeline_counter (int): The pipeline instance the stage is for.\n\nReturns:\nStage: an instantiated :class:`Stage`.", "source": "juraj-google-style"}
{"code": "def _CalculateNTFSTimeHash(self, file_entry):\n    date_time_values = []\n    access_time = getattr(file_entry, 'access_time', None)\n    if access_time:\n        date_time_string = access_time.CopyToDateTimeString()\n        date_time_values.append('atime:{0:s}'.format(date_time_string))\n    creation_time = getattr(file_entry, 'creation_time', None)\n    if creation_time:\n        date_time_string = creation_time.CopyToDateTimeString()\n        date_time_values.append('crtime:{0:s}'.format(date_time_string))\n    modification_time = getattr(file_entry, 'modification_time', None)\n    if modification_time:\n        date_time_string = modification_time.CopyToDateTimeString()\n        date_time_values.append('mtime:{0:s}'.format(date_time_string))\n    change_time = getattr(file_entry, 'change_time', None)\n    if change_time:\n        date_time_string = change_time.CopyToDateTimeString()\n        date_time_values.append('ctime:{0:s}'.format(date_time_string))\n    date_time_values = ''.join(date_time_values)\n    date_time_values = date_time_values.encode('ascii')\n    hash_value = hashlib.md5()\n    hash_value.update(date_time_values)\n    return hash_value.hexdigest()", "docstring": "Calculates an MD5 from the date and time value of a NTFS file entry.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry.\n\nReturns:\nstr: hexadecimal representation of the MD5 hash value of the date and\ntime values of the file entry.", "source": "codesearchnet"}
{"code": "def max_sequence_length(self, dataset_split):\n    return {problem.DatasetSplit.TRAIN: 64, problem.DatasetSplit.EVAL: 128, problem.DatasetSplit.TEST: 128}[dataset_split]", "docstring": "Determine the maximum sequence length given a dataset_split.\n\nArgs:\ndataset_split: A problem.DatasetSplit.\n\nReturns:\nThe maximum length that a sequence can be for this dataset_split.", "source": "codesearchnet"}
{"code": "def resize(x, mode, factor=4):\n    \n    assert mode in ['bilinear', 'nearest'], mode\n    shp = tf.shape(x)[2:] * factor\n    \n    x = tf.transpose(x, [0, 2, 3, 1])\n    if mode == 'bilinear':\n        x = tf.image.resize_bilinear(x, shp, align_corners=True)\n    else:\n        \n        x = tf.image.resize_nearest_neighbor(x, shp, align_corners=False)\n    \n    return tf.transpose(x, [0, 3, 1, 2])", "docstring": "Resize input tensor with unkown input-shape by a factor\n\nArgs:\nx (tf.Tensor): tensor NCHW\nfactor (int, optional): resize factor for H, W\n\nNote:\nDifferences here against Caffe have huge impacts on the\nquality of the predictions.\n\nReturns:\ntf.Tensor: resized tensor NCHW", "source": "juraj-google-style"}
{"code": "def download_and_extract(self, url_or_urls):\n    with self._downloader.tqdm():\n        with self._extractor.tqdm():\n            return _map_promise(self._download_extract, url_or_urls)", "docstring": "Download and extract given url_or_urls.\n\nIs roughly equivalent to:\n\n```\nextracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))\n```\n\nArgs:\nurl_or_urls: url or `list`/`dict` of urls to download and extract. Each\nurl can be a `str` or `tfds.download.Resource`.\n\nIf not explicitly specified in `Resource`, the extraction method will\nautomatically be deduced from downloaded file name.\n\nReturns:\nextracted_path(s): `str`, extracted paths of given URL(s).", "source": "codesearchnet"}
{"code": "def create_statement_inspection_table(sts: List[Influence]):\n    \n\n    columns = [\n        \"un_groundings\",\n        \"subj_polarity\",\n        \"obj_polarity\",\n        \"Sentence\",\n        \"Source API\",\n    ]\n\n    polarity_to_str = lambda x: \"+\" if x == 1 else \"-\" if x == -1 else \"None\"\n    l = []\n    for s in sts:\n        subj_un_grounding = s.subj.db_refs[\"UN\"][0][0].split(\"/\")[-1]\n        obj_un_grounding = s.obj.db_refs[\"UN\"][0][0].split(\"/\")[-1]\n        subj_polarity = s.subj_delta[\"polarity\"]\n        obj_polarity = s.obj_delta[\"polarity\"]\n        subj_adjectives = s.subj_delta[\"adjectives\"]\n        for e in s.evidence:\n            l.append(\n                (\n                    (subj_un_grounding, obj_un_grounding),\n                    subj_polarity,\n                    obj_polarity,\n                    e.text,\n                    e.source_api,\n                )\n            )\n\n    df = pd.DataFrame(l, columns=columns)\n    df = df.pivot_table(index=[\"un_groundings\", \"Source API\", \"Sentence\"])\n\n    def hover(hover_color=\"\n        return dict(\n            selector=\"tr:hover\",\n            props=[(\"background-color\", \"%s\" % hover_color)],\n        )\n\n    styles = [\n        hover(),\n        dict(props=[(\"font-size\", \"100%\"), (\"font-family\", \"Gill Sans\")]),\n    ]\n\n    return df.style.set_table_styles(styles)", "docstring": "Display an HTML representation of a table with INDRA statements to\nmanually inspect for validity.\n\nArgs:\nsts: A list of INDRA statements to be manually inspected for validity.", "source": "juraj-google-style"}
{"code": "def notify_owner(func):\n\n    def wrapper(self, *args, **kwargs):\n        old = self._saved_copy()\n        result = func(self, *args, **kwargs)\n        self._notify_owners(old)\n        return result\n    wrapper.__doc__ = ('Container method ``%s`` instrumented to notify property owners' % func.__name__)\n    return wrapper", "docstring": "A decorator for mutating methods of property container classes\nthat notifies owners of the property container about mutating changes.\n\nArgs:\nfunc (callable) : the container method to wrap in a notification\n\nReturns:\nwrapped method\n\nExamples:\n\nA ``__setitem__`` could be wrapped like this:\n\n.. code-block:: python\n\n# x[i] = y\n@notify_owner\ndef __setitem__(self, i, y):\nreturn super(PropertyValueDict, self).__setitem__(i, y)\n\nThe returned wrapped method will have a docstring indicating what\noriginal method it is wrapping.", "source": "codesearchnet"}
{"code": "def write_to_file(src, dst):\n    \n    n = 0\n    for block in src:\n        dst.write(block)\n        n += len(block)\n    return n", "docstring": "Write data from `src` into `dst`.\n\nArgs:\nsrc (iterable): iterable that yields blocks of data to write\ndst (file-like object): file-like object that must support\n.write(block)\n\nReturns:\nnumber of bytes written to `dst`", "source": "juraj-google-style"}
{"code": "def get_user(self, user_id):\n        \n        try:\n            return self._user_dict[user_id]\n        except KeyError:\n            logger.warning('UserList returning unknown User for UserID %s',\n                           user_id)\n            return User(user_id, None, None, None, [], False)", "docstring": "Get a user by its ID.\n\nArgs:\nuser_id (~hangups.user.UserID): The ID of the user.\n\nRaises:\nKeyError: If no such user is known.\n\nReturns:\n:class:`~hangups.user.User` with the given ID.", "source": "juraj-google-style"}
{"code": "def _sign_operation(op):\n    \n    md5 = hashlib.md5()\n    md5.update(op.consumerId.encode('utf-8'))\n    md5.update(b'\\x00')\n    md5.update(op.operationName.encode('utf-8'))\n    if op.labels:\n        signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels))\n    return md5.digest()", "docstring": "Obtains a signature for an operation in a ReportRequest.\n\nArgs:\nop (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an\noperation used in a `ReportRequest`\n\nReturns:\nstring: a unique signature for that operation", "source": "juraj-google-style"}
{"code": "def handler_for_name(fq_name):\n    resolved_name = for_name(fq_name)\n    if isinstance(resolved_name, (type, types.ClassType)):\n        return resolved_name()\n    elif isinstance(resolved_name, types.MethodType):\n        return getattr(resolved_name.im_class(), resolved_name.__name__)\n    else:\n        return resolved_name", "docstring": "Resolves and instantiates handler by fully qualified name.\n\nFirst resolves the name using for_name call. Then if it resolves to a class,\ninstantiates a class, if it resolves to a method - instantiates the class and\nbinds method to the instance.\n\nArgs:\nfq_name: fully qualified name of something to find.\n\nReturns:\nhandler instance which is ready to be called.", "source": "codesearchnet"}
{"code": "def htmlcolor_to_rgb(str_color):\n    \n    if not (str_color.startswith('\n        raise ValueError(\"Bad html color format. Expected: '\n    result = [1.0 * int(n, 16) / 255 for n in (str_color[1:3], str_color[3:5], str_color[5:])]\n    return result", "docstring": "function to convert HTML-styly color string to RGB values\n\nArgs:\ns: Color in HTML format\n\nReturns:\nlist of three RGB color components", "source": "juraj-google-style"}
{"code": "def trim_wav_ms(in_path: Path, out_path: Path, start_time: int, end_time: int) -> None:\n    try:\n        trim_wav_sox(in_path, out_path, start_time, end_time)\n    except FileNotFoundError:\n        trim_wav_pydub(in_path, out_path, start_time, end_time)\n    except subprocess.CalledProcessError:\n        trim_wav_pydub(in_path, out_path, start_time, end_time)", "docstring": "Extracts part of a WAV File.\n\nFirst attempts to call sox. If sox is unavailable, it backs off to\npydub+ffmpeg.\n\nArgs:\nin_path: A path to the source file to extract a portion of\nout_path: A path describing the to-be-created WAV file.\nstart_time: The point in the source WAV file at which to begin\nextraction.\nend_time: The point in the source WAV file at which to end extraction.", "source": "codesearchnet"}
{"code": "def UnlockScanNode(self, path_spec):\n    \n    if not self.HasScanNode(path_spec):\n      raise KeyError('Scan node does not exist.')\n\n    if path_spec not in self._locked_scan_nodes:\n      raise KeyError('Scan node is not locked.')\n\n    del self._locked_scan_nodes[path_spec]\n\n    \n    self._scan_nodes[path_spec].scanned = False", "docstring": "Marks a scan node as unlocked.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nRaises:\nKeyError: if the scan node does not exists or is not locked.", "source": "juraj-google-style"}
{"code": "def get_tensor_file_paths(self, node_name, output_slot, debug_op, device_name=None):\n    device_name = self._infer_device_name(device_name, node_name)\n    watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)\n    if watch_key not in self._watch_key_to_datum[device_name]:\n        raise WatchKeyDoesNotExistInDebugDumpDirError('Watch key \"%s\" does not exist in the debug dump of device %s' % (watch_key, device_name))\n    return [datum.file_path for datum in self._watch_key_to_datum[device_name][watch_key]]", "docstring": "Get the file paths from a debug-dumped tensor.\n\nArgs:\nnode_name: (`str`) name of the node that the tensor is produced by.\noutput_slot: (`int`) output slot index of tensor.\ndebug_op: (`str`) name of the debug op.\ndevice_name: (`str`) name of the device. If there is only one device or if\nthe specified debug_watch_key exists on only one device, this argument\nis optional.\n\nReturns:\nList of file path(s) loaded. This is a list because each debugged tensor\nmay be dumped multiple times.\n\nRaises:\nWatchKeyDoesNotExistInDebugDumpDirError: If the tensor does not exist in\nthe debug-dump data.", "source": "github-repos"}
{"code": "def load_actor_class(self, driver_id, function_descriptor):\n    function_id = function_descriptor.function_id\n    actor_class = self._loaded_actor_classes.get(function_id, None)\n    if (actor_class is None):\n        if self._worker.load_code_from_local:\n            driver_id = ray.DriverID.nil()\n            actor_class = self._load_actor_from_local(driver_id, function_descriptor)\n        else:\n            actor_class = self._load_actor_class_from_gcs(driver_id, function_descriptor)\n        self._loaded_actor_classes[function_id] = actor_class\n        module_name = function_descriptor.module_name\n        actor_class_name = function_descriptor.class_name\n        actor_methods = inspect.getmembers(actor_class, predicate=is_function_or_method)\n        for (actor_method_name, actor_method) in actor_methods:\n            method_descriptor = FunctionDescriptor(module_name, actor_method_name, actor_class_name)\n            method_id = method_descriptor.function_id\n            executor = self._make_actor_method_executor(actor_method_name, actor_method, actor_imported=True)\n            self._function_execution_info[driver_id][method_id] = FunctionExecutionInfo(function=executor, function_name=actor_method_name, max_calls=0)\n            self._num_task_executions[driver_id][method_id] = 0\n        self._num_task_executions[driver_id][function_id] = 0\n    return actor_class", "docstring": "Load the actor class.\n\nArgs:\ndriver_id: Driver ID of the actor.\nfunction_descriptor: Function descriptor of the actor constructor.\n\nReturns:\nThe actor class.", "source": "codesearchnet"}
{"code": "def SetFlushInterval(self, flush_interval):\n    self._flush_interval = flush_interval\n    logger.debug('Elasticsearch flush interval: {0:d}'.format(flush_interval))", "docstring": "Set the flush interval.\n\nArgs:\nflush_interval (int): number of events to buffer before doing a bulk\ninsert.", "source": "codesearchnet"}
{"code": "def intersection_update(self, *others):\n    for other in map(self._as_mapping, others):\n        for (element, current_count) in list(self.items()):\n            multiplicity = other.get(element, 0)\n            if (multiplicity < current_count):\n                self[element] = multiplicity", "docstring": "r\"\"\"Update the multiset, keeping only elements found in it and all others.\n\n>>> ms = Multiset('aab')\n>>> ms.intersection_update('bc')\n>>> sorted(ms)\n['b']\n\nYou can also use the ``&=`` operator for the same effect. However, the operator version\nwill only accept a set as other operator, not any iterable, to avoid errors.\n\n>>> ms = Multiset('aabc')\n>>> ms &= Multiset('abbd')\n>>> sorted(ms)\n['a', 'b']\n\nFor a variant of the operation which does not modify the multiset, but returns a new\nmultiset instead see :meth:`intersection`.\n\nArgs:\nothers: The other sets to intersect this multiset with. Can also be any :class:`~typing.Iterable`\\[~T]\nor :class:`~typing.Mapping`\\[~T, :class:`int`] which are then converted to :class:`Multiset`\\[~T].", "source": "codesearchnet"}
{"code": "def proc_val(key, val):\n        \n        list_keys = (\"LDAUU\", \"LDAUL\", \"LDAUJ\", \"MAGMOM\", \"DIPOL\",\n                     \"LANGEVIN_GAMMA\", \"QUAD_EFG\", \"EINT\")\n        bool_keys = (\"LDAU\", \"LWAVE\", \"LSCALU\", \"LCHARG\", \"LPLANE\", \"LUSE_VDW\",\n                     \"LHFCALC\", \"ADDGRID\", \"LSORBIT\", \"LNONCOLLINEAR\")\n        float_keys = (\"EDIFF\", \"SIGMA\", \"TIME\", \"ENCUTFOCK\", \"HFSCREEN\",\n                      \"POTIM\", \"EDIFFG\", \"AGGAC\", \"PARAM1\", \"PARAM2\")\n        int_keys = (\"NSW\", \"NBANDS\", \"NELMIN\", \"ISIF\", \"IBRION\", \"ISPIN\",\n                    \"ICHARG\", \"NELM\", \"ISMEAR\", \"NPAR\", \"LDAUPRINT\", \"LMAXMIX\",\n                    \"ENCUT\", \"NSIM\", \"NKRED\", \"NUPDOWN\", \"ISPIND\", \"LDAUTYPE\",\n                    \"IVDW\")\n\n        def smart_int_or_float(numstr):\n            if numstr.find(\".\") != -1 or numstr.lower().find(\"e\") != -1:\n                return float(numstr)\n            else:\n                return int(numstr)\n\n        try:\n            if key in list_keys:\n                output = []\n                toks = re.findall(\n                    r\"(-?\\d+\\.?\\d*)\\*?(-?\\d+\\.?\\d*)?\\*?(-?\\d+\\.?\\d*)?\", val)\n                for tok in toks:\n                    if tok[2] and \"3\" in tok[0]:\n                        output.extend(\n                            [smart_int_or_float(tok[2])] * int(tok[0])\n                            * int(tok[1]))\n                    elif tok[1]:\n                        output.extend([smart_int_or_float(tok[1])] *\n                                      int(tok[0]))\n                    else:\n                        output.append(smart_int_or_float(tok[0]))\n                return output\n            if key in bool_keys:\n                m = re.match(r\"^\\.?([T|F|t|f])[A-Za-z]*\\.?\", val)\n                if m:\n                    if m.group(1) == \"T\" or m.group(1) == \"t\":\n                        return True\n                    else:\n                        return False\n                raise ValueError(key + \" should be a boolean type!\")\n\n            if key in float_keys:\n                return float(re.search(r\"^-?\\d*\\.?\\d*[e|E]?-?\\d*\", val).group(0))\n\n            if key in int_keys:\n                return int(re.match(r\"^-?[0-9]+\", val).group(0))\n\n        except ValueError:\n            pass\n\n        \n        try:\n            val = int(val)\n            return val\n        except ValueError:\n            pass\n\n        try:\n            val = float(val)\n            return val\n        except ValueError:\n            pass\n\n        if \"true\" in val.lower():\n            return True\n\n        if \"false\" in val.lower():\n            return False\n\n        return val.strip().capitalize()", "docstring": "Static helper method to convert INCAR parameters to proper types, e.g.,\nintegers, floats, lists, etc.\n\nArgs:\nkey: INCAR parameter key\nval: Actual value of INCAR parameter.", "source": "juraj-google-style"}
{"code": "def api_request(self, method_name, params):\n    url = self._method_url(method_name)\n    data = json.dumps(params)\n    return self._make_request(url=url, method='post', data=data)", "docstring": "Execute an arbitrary method.\n\nArgs:\nmethod_name (str): include the controller name: 'devices/search'\nparams (dict): the method parameters\nReturns:\nA dict with the response\nRaises:\nrequests.exceptions.HTTPError", "source": "codesearchnet"}
{"code": "def ensure_app_data_dir(appname, *args):\n    \n    from ubelt import util_path\n    dpath = get_app_data_dir(appname, *args)\n    util_path.ensuredir(dpath)\n    return dpath", "docstring": "Calls `get_app_data_dir` but ensures the directory exists.\n\nArgs:\nappname (str): the name of the application\n*args: any other subdirectories may be specified\n\nSeeAlso:\nget_app_data_dir\n\nExample:\n>>> import ubelt as ub\n>>> dpath = ub.ensure_app_data_dir('ubelt')\n>>> assert exists(dpath)", "source": "juraj-google-style"}
{"code": "def _get_schema(cls, schema):\n    if isinstance(schema, string_types):\n        schema = cls._get_object_from_python_path(schema)\n    if isclass(schema):\n        schema = schema()\n    if (not isinstance(schema, Schema)):\n        raise TypeError('The schema must be a path to a Marshmallow schema or a Marshmallow schema.')\n    return schema", "docstring": "Method that will fetch a Marshmallow schema flexibly.\n\nArgs:\nschema (marshmallow.Schema|str): Either the schema class, an\ninstance of a schema, or a Python path to a schema.\n\nReturns:\nmarshmallow.Schema: The desired schema.\n\nRaises:\nTypeError: This is raised if the provided object isn't\na Marshmallow schema.", "source": "codesearchnet"}
{"code": "def fwd(self, x_data):\n    x_data = numpy.asfarray(x_data)\n    shape = x_data.shape\n    x_data = x_data.reshape(len(self), (- 1))\n    (lower, upper) = evaluation.evaluate_bound(self, x_data)\n    q_data = numpy.zeros(x_data.shape)\n    indices = (x_data > upper)\n    q_data[indices] = 1\n    indices = ((~ indices) & (x_data >= lower))\n    q_data[indices] = numpy.clip(evaluation.evaluate_forward(self, x_data), a_min=0, a_max=1)[indices]\n    q_data = q_data.reshape(shape)\n    return q_data", "docstring": "Forward Rosenblatt transformation.\n\nArgs:\nx_data (numpy.ndarray):\nLocation for the distribution function. ``x_data.shape`` must\nbe compatible with distribution shape.\n\nReturns:\n(numpy.ndarray):\nEvaluated distribution function values, where\n``out.shape==x_data.shape``.", "source": "codesearchnet"}
{"code": "def delete_asset(self, asset_id, asset_type):\n    return self.asset(asset_id, asset_type=asset_type, action='DELETE')", "docstring": "Delete the asset with the provided asset_id.\n\nArgs:\nasset_id: The id of the asset.\nasset_type: The asset type.\n\nReturns:", "source": "codesearchnet"}
{"code": "def translate_node_id(self, ni: PrefName, sctx: SchemaContext) -> QualName:\n        \n        p, s, loc = ni.partition(\":\")\n        if not s:\n            return (ni, sctx.default_ns)\n        try:\n            mdata = self.modules[sctx.text_mid]\n        except KeyError:\n            raise ModuleNotRegistered(*sctx.text_mid) from None\n        try:\n            return (loc, self.namespace(mdata.prefix_map[p]))\n        except KeyError:\n            raise UnknownPrefix(p, sctx.text_mid) from None", "docstring": "Translate node identifier to a qualified name.\n\nArgs:\nni: Node identifier (with optional prefix).\nsctx: SchemaContext.\n\nRaises:\nModuleNotRegistered: If `mid` is not registered in the data model.\nUnknownPrefix: If the prefix specified in `ni` is not declared.", "source": "juraj-google-style"}
{"code": "def generate(self, descriptors):\n        \n        model_ids = self.search_tree.adj_list.keys()\n\n        target_graph = None\n        father_id = None\n        descriptors = deepcopy(descriptors)\n        elem_class = Elem\n        if self.optimizemode is OptimizeMode.Maximize:\n            elem_class = ReverseElem\n\n        \n        pq = PriorityQueue()\n        temp_list = []\n        for model_id in model_ids:\n            metric_value = self.searcher.get_metric_value_by_id(model_id)\n            temp_list.append((metric_value, model_id))\n        temp_list = sorted(temp_list)\n        for metric_value, model_id in temp_list:\n            graph = self.searcher.load_model_by_id(model_id)\n            graph.clear_operation_history()\n            graph.clear_weights()\n            pq.put(elem_class(metric_value, model_id, graph))\n\n        t = 1.0\n        t_min = self.t_min\n        alpha = 0.9\n        opt_acq = self._get_init_opt_acq_value()\n        while not pq.empty() and t > t_min:\n            elem = pq.get()\n            if self.optimizemode is OptimizeMode.Maximize:\n                temp_exp = min((elem.metric_value - opt_acq) / t, 1.0)\n            else:\n                temp_exp = min((opt_acq - elem.metric_value) / t, 1.0)\n            ap = math.exp(temp_exp)\n            if ap >= random.uniform(0, 1):\n                for temp_graph in transform(elem.graph):\n                    if contain(descriptors, temp_graph.extract_descriptor()):\n                        continue\n\n                    temp_acq_value = self.acq(temp_graph)\n                    pq.put(elem_class(temp_acq_value, elem.father_id, temp_graph))\n                    descriptors.append(temp_graph.extract_descriptor())\n                    if self._accept_new_acq_value(opt_acq, temp_acq_value):\n                        opt_acq = temp_acq_value\n                        father_id = elem.father_id\n                        target_graph = deepcopy(temp_graph)\n            t *= alpha\n\n        \n        if father_id is None:\n            return None, None\n        nm_graph = self.searcher.load_model_by_id(father_id)\n        for args in target_graph.operation_history:\n            getattr(nm_graph, args[0])(*list(args[1:]))\n        return nm_graph, father_id", "docstring": "Generate new architecture.\nArgs:\ndescriptors: All the searched neural architectures.\nReturns:\ngraph: An instance of Graph. A morphed neural network with weights.\nfather_id: The father node ID in the search tree.", "source": "juraj-google-style"}
{"code": "def read_as_base64(fn):\n    with open(fn) as unpacked_file:\n        with tempfile.TemporaryFile() as b64_file:\n            base64.encode(unpacked_file, b64_file)\n            b64_file.flush()\n            b64_file.seek(0)\n            return b64_file.read()", "docstring": "Convert given `fn` to base64 and return it. This method does the process\nin not-so-much memory consuming way.\n\nArgs:\nfn (str): Path to the file which should be converted.\n\nReturns:\nstr: File encoded as base64.", "source": "codesearchnet"}
{"code": "def __init__(self, cumulative=IGNORED, name=IGNORED, scalar=IGNORED, kind=IGNORED):\n    if name != IGNORED and (not isinstance(name, MetricStructuredNameMatcher)):\n        raise ValueError('name must be a MetricStructuredNameMatcher.')\n    self.cumulative = cumulative\n    self.name = name\n    self.scalar = scalar\n    self.kind = kind", "docstring": "Creates a MetricUpdateMatcher.\n\nAny property not passed in to the constructor will be ignored when matching.\n\nArgs:\ncumulative: A boolean.\nname: A MetricStructuredNameMatcher object that matches the name.\nscalar: An integer with the metric update.\nkind: A string defining the kind of counter.", "source": "github-repos"}
{"code": "def get_config(self):\n    all_args = tf_inspect.getfullargspec(self.__init__).args\n    config = {'name': self.name, 'trainable': self.trainable}\n    if hasattr(self, '_batch_input_shape'):\n        config['batch_input_shape'] = self._batch_input_shape\n    config['dtype'] = policy.serialize(self._dtype_policy)\n    if hasattr(self, 'dynamic'):\n        if self.dynamic:\n            config['dynamic'] = self.dynamic\n        elif 'dynamic' in all_args:\n            all_args.remove('dynamic')\n    expected_args = config.keys()\n    extra_args = [arg for arg in all_args if arg not in expected_args]\n    if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'):\n        raise NotImplementedError('Layers with arguments in `__init__` must override `get_config`.')\n    return config", "docstring": "Returns the config of the layer.\n\nA layer config is a Python dictionary (serializable)\ncontaining the configuration of a layer.\nThe same layer can be reinstantiated later\n(without its trained weights) from this configuration.\n\nThe config of a layer does not include connectivity\ninformation, nor the layer class name. These are handled\nby `Network` (one layer of abstraction above).\n\nReturns:\nPython dictionary.", "source": "github-repos"}
{"code": "def _apply_credentials(auto_refresh=True, credentials=None, headers=None):\n    token = credentials.get_credentials().access_token\n    if (auto_refresh is True):\n        if (token is None):\n            token = credentials.refresh(access_token=None, timeout=10)\n        elif credentials.jwt_is_expired():\n            token = credentials.refresh(timeout=10)\n    headers.update({'Authorization': 'Bearer {}'.format(token)})", "docstring": "Update Authorization header.\n\nUpdate request headers with latest `access_token`. Perform token\n`refresh` if token is ``None``.\n\nArgs:\nauto_refresh (bool): Perform token refresh if access_token is ``None`` or expired. Defaults to ``True``.\ncredentials (class): Read-only credentials.\nheaders (class): Requests `CaseInsensitiveDict`.", "source": "codesearchnet"}
{"code": "def set_large_file_size(self, st_size):\n    self._check_positive_int(st_size)\n    if self.st_size:\n        self.size = 0\n    if self.filesystem:\n        self.filesystem.change_disk_usage(st_size, self.name, self.st_dev)\n    self.st_size = st_size\n    self._byte_contents = None", "docstring": "Sets the self.st_size attribute and replaces self.content with None.\n\nProvided specifically to simulate very large files without regards\nto their content (which wouldn't fit in memory).\nNote that read/write operations with such a file raise\n:py:class:`FakeLargeFileIoException`.\n\nArgs:\nst_size: (int) The desired file size\n\nRaises:\nIOError: if the st_size is not a non-negative integer,\nor if st_size exceeds the available file system space", "source": "codesearchnet"}
{"code": "def _attach_debugger_logic(model, debug_path: Optional[str]='.', do_prune_layers: Optional[bool]=True, use_repr: bool=True):\n    class_name = model.__class__.__name__\n    model._call_tree = {'module_path': class_name, 'inputs': None, 'outputs': None, 'children': []}\n    model._debugger_model_call_stack = []\n    model._debugger_module_dump_name = class_name\n    if debug_path:\n        try:\n            os.makedirs(debug_path, exist_ok=True)\n        except Exception as e:\n            raise ValueError(f'Unexpected or existing debug_path={debug_path}.') from e\n\n    def wrap_forward(module, full_path):\n        orig_forward = module.forward\n\n        @functools.wraps(orig_forward)\n        def wrapped_forward(*inps, **kws):\n            if _is_rank_zero():\n                dict_inputs = {'args': inps, 'kwargs': kws}\n                dict_inputs = {k: dict_inputs[k] for k in dict_inputs if len(dict_inputs[k]) > 0}\n                node = {'module_path': full_path, 'inputs': _serialize_io(dict_inputs, debug_path=debug_path, use_repr=use_repr, path_to_value=f'{full_path}_inputs'), 'outputs': None, 'children': []}\n                model._debugger_model_call_stack.append(node)\n            with torch.no_grad():\n                out = orig_forward(*inps, **kws)\n            if _is_rank_zero():\n                if sum((1 for _ in module.named_children())) > 0:\n                    node['outputs'] = None\n                else:\n                    node['outputs'] = _serialize_io(out, debug_path=debug_path, use_repr=use_repr, path_to_value=f'{full_path}_outputs')\n                finished = model._debugger_model_call_stack.pop()\n                if not finished['children']:\n                    finished.pop('children')\n                if model._debugger_model_call_stack:\n                    model._debugger_model_call_stack[-1]['children'].append(finished)\n            return out\n        module.forward = wrapped_forward\n    for name, submodule in model.named_modules():\n        if name == '':\n            continue\n        wrap_forward(submodule, f'{class_name}.{name}')\n    real_top_forward = model.forward\n\n    @functools.wraps(real_top_forward)\n    def top_wrapped_forward(*inps, **kws):\n        if _is_rank_zero():\n            top_node = {'module_path': f'{class_name} (top-level)', 'inputs': _serialize_io({'args': inps, 'kwargs': kws}, debug_path=debug_path, use_repr=use_repr, path_to_value=f'{class_name}_inputs'), 'outputs': None, 'children': []}\n            model._debugger_model_call_stack.append(top_node)\n        out = real_top_forward(*inps, **kws)\n        if _is_rank_zero() and model._debugger_model_call_stack:\n            top_node['outputs'] = _serialize_io(out, debug_path=debug_path, use_repr=use_repr, path_to_value=f'{class_name}_outputs')\n            finished = model._debugger_model_call_stack.pop()\n            model._call_tree['inputs'] = finished['inputs']\n            model._call_tree['outputs'] = finished['outputs']\n            model._call_tree['children'] = finished['children']\n            [model._call_tree.pop(k, None) for k in list(model._call_tree.keys()) if not model._call_tree[k]]\n            if do_prune_layers:\n                prune_intermediate_layers(model._call_tree)\n            log_model_debug_trace(debug_path=debug_path, model=model)\n        return out\n    model.forward = top_wrapped_forward", "docstring": "Attaches a debugging wrapper to every module in the model.\n\nThis records structured inputs and outputs during the forward pass into a call tree.\n\nArgs:\nmodel (`PreTrainedModel`, `nn.Module`): Model to wrap.\ndebug_path (`str`): Optional directory to dump debug JSON files.\ndo_prune_layers (`bool`, *optional*, defaults to `True`): Whether to prune intermediate layers.\nuse_repr (bool, *optional*, defaults to `True`): Whether to save a `repr()`-ized version of the tensors as the\n`value` property in the asscoiated FULL_TENSORS.json file, or to store full tensors in separate SafeTensors\nfiles and store the relative path to that file in the `value` property.", "source": "github-repos"}
{"code": "def save(self, clean=True):\n    ret = {}\n    if clean:\n        self._dirty = False\n    else:\n        ret['_dirty'] = self._dirty\n    return ret", "docstring": "Serialize into raw representation. Clears the dirty bit by default.\n\nArgs:\nclean (bool): Whether to clear the dirty bit.\n\nReturns:\ndict: Raw.", "source": "codesearchnet"}
{"code": "def evaluate(self, node: InstanceNode) -> XPathValue:\n        \n        return self._eval(XPathContext(node, node, 1, 1))", "docstring": "Evaluate the receiver and return the result.\n\nArgs:\nnode: Context node for XPath evaluation.\n\nRaises:\nXPathTypeError: If a subexpression of the receiver is of a wrong\ntype.", "source": "juraj-google-style"}
{"code": "def _rewrite_input_as_indexed_slices(body_grad_graph, grad_output_slices, forward_input, loop_vars):\n    init_slices = _create_grad_indexed_slices_init(grad_output_slices, forward_input)\n    with body_grad_graph.as_default():\n        input_slices = indexed_slices.IndexedSlices(values=body_grad_graph.capture(init_slices.values, allowlisted=True), indices=body_grad_graph.capture(init_slices.indices, allowlisted=True), dense_shape=body_grad_graph.capture(init_slices.dense_shape, allowlisted=True))\n        for t in _flatten(init_slices):\n            captured_t = body_grad_graph.captures.pop(t)\n            body_grad_graph.inputs.remove(captured_t)\n        new_output_slices = _rewrite_grad_indexed_slices_output(grad_output_slices, input_slices)\n    return _update_indexed_slices_param(body_grad_graph, loop_vars, init_slices, input_slices, new_output_slices, grad_output_slices)", "docstring": "Rewrites grad_output_slices's corresponding input to be an IndexedSlices.\n\nThis rewrite requires that forward_input was captured in the forward loop,\ni.e. is not a user-specified loop variable. This is important because the\nrewrite assumes that forward_input is passed through to its corresponding\noutput unchanged. This assumption is used in _rewrite_input_as_indexed_slices,\nwhich depends on the exact gradient structure produced by the input's fanout.\n\nThis can yield a more efficient computation than using\n_rewrite_output_as_tensor, since it preserves the IndexedSlices structure\ninstead of converting the IndexedSlices to a dense Tensor.\n\nArgs:\nbody_grad_graph: _WhileBodyGradFuncGraph.\ngrad_output_slices: IndexedSlices output of body_grad_graph.\nforward_input: the corresponding Tensor input to the forward loop.\nloop_vars: list of Tensors. The inputs to body_grad_graph.\n\nReturns:\nThe new loop_vars to pass to body_grad_graph.", "source": "github-repos"}
{"code": "def chat(self, id):\n        \n        json = self.skype.conn(\"GET\", \"{0}/users/ME/conversations/{1}\".format(self.skype.conn.msgsHost, id),\n                               auth=SkypeConnection.Auth.RegToken, params={\"view\": \"msnp24Equivalent\"}).json()\n        cls = SkypeSingleChat\n        if \"threadProperties\" in json:\n            info = self.skype.conn(\"GET\", \"{0}/threads/{1}\".format(self.skype.conn.msgsHost, json.get(\"id\")),\n                                   auth=SkypeConnection.Auth.RegToken, params={\"view\": \"msnp24Equivalent\"}).json()\n            json.update(info)\n            cls = SkypeGroupChat\n        return self.merge(cls.fromRaw(self.skype, json))", "docstring": "Get a single conversation by identifier.\n\nArgs:\nid (str): single or group chat identifier", "source": "juraj-google-style"}
{"code": "def get_sequence_sliding_window_properties(self, scale, window, representative_only=True):\n    if representative_only:\n        if (not self.representative_sequence):\n            log.warning('{}: no representative sequence set, cannot get sequence properties'.format(self.id))\n            return\n        if (not self.representative_sequence.seq):\n            log.warning('{}: representative sequence {} set, but no sequence stored. Cannot get sequence properties.'.format(self.id, self.representative_sequence.id))\n            return\n        self.representative_sequence.get_sliding_window_properties(scale=scale, window=window)\n    if (not representative_only):\n        for s in self.sequences:\n            if (not s.seq):\n                log.warning('{}: no sequence stored. Cannot get sequence properties.'.format(s.id))\n                continue\n            else:\n                s.get_sliding_window_properties(scale=scale, window=window)", "docstring": "Run Biopython ProteinAnalysis with a sliding window to calculate a given property.\nResults are stored in the protein's respective SeqProp objects at ``.letter_annotations``\n\nArgs:\nscale (str): Scale name\nwindow (int): Sliding window size\nrepresentative_only (bool): If analysis should only be run on the representative sequence", "source": "codesearchnet"}
{"code": "def range_dimension_tensor(self, name='range_dimension_tensor'):\n    with self._name_scope(name):\n        return self._range_dimension_tensor()", "docstring": "Dimension (in the sense of vector spaces) of the range of this operator.\n\nDetermined at runtime.\n\nIf this operator acts like the batch matrix `A` with\n`A.shape = [B1,...,Bb, M, N]`, then this returns `M`.\n\nArgs:\nname:  A name for this `Op`.\n\nReturns:\n`int32` `Tensor`", "source": "github-repos"}
{"code": "def create_trial_from_spec(spec, output_path, parser, **trial_kwargs):\n    try:\n        args = parser.parse_args(to_argv(spec))\n    except SystemExit:\n        raise TuneError('Error parsing args, see above message', spec)\n    if ('resources_per_trial' in spec):\n        trial_kwargs['resources'] = json_to_resources(spec['resources_per_trial'])\n    return Trial(trainable_name=spec['run'], config=spec.get('config', {}), local_dir=os.path.join(args.local_dir, output_path), stopping_criterion=spec.get('stop', {}), checkpoint_freq=args.checkpoint_freq, checkpoint_at_end=args.checkpoint_at_end, keep_checkpoints_num=args.keep_checkpoints_num, checkpoint_score_attr=args.checkpoint_score_attr, export_formats=spec.get('export_formats', []), restore_path=spec.get('restore'), upload_dir=args.upload_dir, trial_name_creator=spec.get('trial_name_creator'), loggers=spec.get('loggers'), sync_function=spec.get('sync_function'), max_failures=args.max_failures, **trial_kwargs)", "docstring": "Creates a Trial object from parsing the spec.\n\nArguments:\nspec (dict): A resolved experiment specification. Arguments should\nThe args here should correspond to the command line flags\nin ray.tune.config_parser.\noutput_path (str); A specific output path within the local_dir.\nTypically the name of the experiment.\nparser (ArgumentParser): An argument parser object from\nmake_parser.\ntrial_kwargs: Extra keyword arguments used in instantiating the Trial.\n\nReturns:\nA trial object with corresponding parameters to the specification.", "source": "codesearchnet"}
{"code": "def from_text_files(cls, path, field, train, validation, test=None, bs=64, bptt=70, **kwargs):\n    (trn_ds, val_ds, test_ds) = ConcatTextDataset.splits(path, text_field=field, train=train, validation=validation, test=test)\n    return cls(path, field, trn_ds, val_ds, test_ds, bs, bptt, **kwargs)", "docstring": "Method used to instantiate a LanguageModelData object that can be used for a\nsupported nlp task.\n\nArgs:\npath (str): the absolute path in which temporary model data will be saved\nfield (Field): torchtext field\ntrain (str): file location of the training data\nvalidation (str): file location of the validation data\ntest (str): file location of the testing data\nbs (int): batch size to use\nbptt (int): back propagation through time hyper-parameter\nkwargs: other arguments\n\nReturns:\na LanguageModelData instance, which most importantly, provides us the datasets for training,\nvalidation, and testing\n\nNote:\nThe train, validation, and test path can be pointed to any file (or folder) that contains a valid\ntext corpus.", "source": "codesearchnet"}
{"code": "def remove_file(profile, branch, file_path, commit_message=None):\n    branch_sha = get_branch_sha(profile, branch)\n    tree = get_files_in_branch(profile, branch_sha)\n    new_tree = remove_file_from_tree(tree, file_path)\n    data = trees.create_tree(profile, new_tree)\n    sha = data.get('sha')\n    if (not commit_message):\n        commit_message = (('Deleted ' + file_path) + '.')\n    parents = [branch_sha]\n    commit_data = commits.create_commit(profile, commit_message, sha, parents)\n    commit_sha = commit_data.get('sha')\n    ref_data = refs.update_ref(profile, ('heads/' + branch), commit_sha)\n    return ref_data", "docstring": "Remove a file from a branch.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nbranch\nThe name of a branch.\n\nfile_path\nThe path of the file to delete.\n\ncommit_message\nA commit message to give to the commit.\n\nReturns:\nA dict with data about the branch's new ref (it includes the new SHA\nthe branch's HEAD points to, after committing the new file).", "source": "codesearchnet"}
{"code": "def resolve_topic(topic):\n    try:\n        (module_name, _, class_name) = topic.partition('\n        module = importlib.import_module(module_name)\n    except ImportError as e:\n        raise TopicResolutionError('{}: {}'.format(topic, e))\n    try:\n        cls = resolve_attr(module, class_name)\n    except AttributeError as e:\n        raise TopicResolutionError('{}: {}'.format(topic, e))\n    return cls", "docstring": "Return class described by given topic.\n\nArgs:\ntopic: A string describing a class.\n\nReturns:\nA class.\n\nRaises:\nTopicResolutionError: If there is no such class.", "source": "codesearchnet"}
{"code": "def copy_function(func, name=None):\n    \n    code = func.__code__\n    newname = name or func.__name__\n    newcode = CodeType(\n        code.co_argcount,\n        code.co_kwonlyargcount,\n        code.co_nlocals,\n        code.co_stacksize,\n        code.co_flags,\n        code.co_code,\n        code.co_consts,\n        code.co_names,\n        code.co_varnames,\n        code.co_filename,\n        newname,\n        code.co_firstlineno,\n        code.co_lnotab,\n        code.co_freevars,\n        code.co_cellvars,\n    )\n    newfunc = FunctionType(\n        newcode,\n        func.__globals__,\n        newname,\n        func.__defaults__,\n        func.__closure__,\n    )\n    newfunc.__dict__.update(func.__dict__)\n    return newfunc", "docstring": "Copy a function object with different name.\n\nArgs:\nfunc (function): Function to be copied.\nname (string, optional): Name of the new function.\nIf not spacified, the same name of `func` will be used.\n\nReturns:\nnewfunc (function): New function with different name.", "source": "juraj-google-style"}
{"code": "def __init__(self, experimenter=None, exp_type=None):\n        \n        super().__init__()\n        self.experimenter = experimenter\n        self.exp_type = exp_type", "docstring": "Create a ExperimenterMultipartHeader with the parameters below.\n\nArgs:\nexperimenter: Experimenter ID which takes the same form as in\nstruct ofp_experimenter_header (\n:class:`~pyof.v0x04.symmetric.experimenter.ExperimenterHeader`)\nexp_type: Experimenter defined.", "source": "juraj-google-style"}
{"code": "def get_symmetry_operations(self, cartesian=False):\n    (rotation, translation) = self._get_symmetry()\n    symmops = []\n    mat = self._structure.lattice.matrix.T\n    invmat = np.linalg.inv(mat)\n    for (rot, trans) in zip(rotation, translation):\n        if cartesian:\n            rot = np.dot(mat, np.dot(rot, invmat))\n            trans = np.dot(trans, self._structure.lattice.matrix)\n        op = SymmOp.from_rotation_and_translation(rot, trans)\n        symmops.append(op)\n    return symmops", "docstring": "Return symmetry operations as a list of SymmOp objects.\nBy default returns fractional coord symmops.\nBut cartesian can be returned too.\n\nReturns:\n([SymmOp]): List of symmetry operations.", "source": "codesearchnet"}
{"code": "def bloom_gelu_forward(x: torch.Tensor) -> torch.Tensor:\n    return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))", "docstring": "Custom bias GELU function. Adapted from Megatron-DeepSpeed code. Here we use a simple implementation (inference) to\nmake the model jitable.\n\nArgs:\nx (`torch.tensor`):\ninput hidden states", "source": "github-repos"}
{"code": "def add_listener(self, callback, event_type=None):\n        \n        listener_uid = uuid4()\n        \n        \n        \n        self.listeners.append(\n            {\n                'uid': listener_uid,\n                'callback': callback,\n                'event_type': event_type\n            }\n        )\n        return listener_uid", "docstring": "Add a listener that will send a callback when the client recieves\nan event.\n\nArgs:\ncallback (func(roomchunk)): Callback called when an event arrives.\nevent_type (str): The event_type to filter for.\n\nReturns:\nuuid.UUID: Unique id of the listener, can be used to identify the listener.", "source": "juraj-google-style"}
{"code": "def find_certs() -> str:\n    bundle = path.realpath(path.dirname(httplib2.CA_CERTS))\n    if (not bundle.startswith(path.dirname(httplib2.__file__))):\n        return bundle\n    for (platform, files) in PLATFORM_FILES.items():\n        if sys.platform.startswith(platform):\n            for cert_file in files:\n                if path.exists(cert_file):\n                    return cert_file\n    if path.exists(getenv('CURL_CA_BUNDLE', '')):\n        return getenv('CURL_CA_BUNDLE')\n    if ALLOW_FALLBACK:\n        warnings.warn('No system certs detected, falling back to bundled', RuntimeWarning)\n        return httplib2.CA_CERTS\n    else:\n        raise RuntimeError('No system certs detected!')", "docstring": "Find suitable certificates for ``httplib2``.\n\nWarning:\nThe default behaviour is to fall back to the bundled certificates when\nno system certificates can be found.  If you're packaging ``jnrbase``\n*please* set ``ALLOW_FALLBACK`` to ``False`` to disable this very much\nunwanted behaviour, but please maintain the option so that downstream\nusers can inspect the configuration easily.\n\nSee also: :pypi:`httplib2`\n\nReturns:\nPath to SSL certificates\nRaises:\nRuntimeError: When no suitable certificates are found", "source": "codesearchnet"}
{"code": "def find_wells_without_curve(self, mnemonic, alias=None):\n    return Project([w for w in self if (w.get_curve(mnemonic, alias=alias) is None)])", "docstring": "Returns a new Project with only the wells which DO NOT have the named curve.\n\nArgs:\nmenmonic (str): the name of the curve to look for.\nalias (dict): a welly alias dictionary.\n\nReturns:\nproject.", "source": "codesearchnet"}
{"code": "def __init__(self, path, auto_reboot_args=None, keep_explorer=False, add_all_devices=False):\n        \n        super(SimpleTestResult, self).__init__()\n        self.path = path\n        self.auto_reboot_args = auto_reboot_args\n        self.result = json.load(open(self.path, 'r'))\n        self.log_handler = None\n        self.started = None\n        self.keep_explorer = keep_explorer\n        self.add_all_devices = add_all_devices\n        SimpleTestResult.executions += 1\n        logger.info('Initial state is %s', json.dumps(self.result, indent=2))", "docstring": "Record test results in json file\n\nArgs:\npath (str): File path to record the results\nauto_reboot (bool): Whether reboot when harness die", "source": "juraj-google-style"}
{"code": "def group_sub_entities(self, entities: List[dict]) -> dict:\n    entity = entities[0]['entity'].split('-', 1)[-1]\n    scores = np.nanmean([entity['score'] for entity in entities])\n    tokens = [entity['word'] for entity in entities]\n    entity_group = {'entity_group': entity, 'score': np.mean(scores), 'word': self.tokenizer.convert_tokens_to_string(tokens), 'start': entities[0]['start'], 'end': entities[-1]['end']}\n    return entity_group", "docstring": "Group together the adjacent tokens with the same entity predicted.\n\nArgs:\nentities (`dict`): The entities predicted by the pipeline.", "source": "github-repos"}
{"code": "def assertRaisesWithPredicateMatch(self, exception_type, expected_err_re_or_predicate):\n    if callable(expected_err_re_or_predicate):\n        predicate = expected_err_re_or_predicate\n    else:\n\n        def predicate(e):\n            if isinstance(e, errors.OpError):\n                e = cast(errors.OpError, e)\n                err_str = cast(str, e.message)\n                op = e.op\n            else:\n                err_str = str(e)\n                op = None\n            while op is not None:\n                err_str += '\\nCaused by: ' + op.name\n                op = op._original_op\n            logging.info(\"Searching within error strings: '%s' within '%s'\", expected_err_re_or_predicate, err_str)\n            return re.search(expected_err_re_or_predicate, err_str)\n    try:\n        yield\n        self.fail(exception_type.__name__ + ' not raised')\n    except Exception as e:\n        if not isinstance(e, exception_type) or not predicate(e):\n            raise AssertionError('Exception of type %s: %s' % (str(type(e)), str(e)))", "docstring": "Returns a context manager to enclose code expected to raise an exception.\n\nIf the exception is an OpError, the op stack is also included in the message\npredicate search.\n\nArgs:\nexception_type: The expected type of exception that should be raised.\nexpected_err_re_or_predicate: If this is callable, it should be a function\nof one argument that inspects the passed-in exception and returns True\n(success) or False (please fail the test). Otherwise, the error message\nis expected to match this regular expression partially.\n\nReturns:\nA context manager to surround code that is expected to raise an\nexception.", "source": "github-repos"}
{"code": "def __call__(self, request: Union[Chunk, List[Chunk]], *args, **kwargs) -> List[Tuple[Chunk, Dict[str, Any]]]:\n    requests = request if isinstance(request, list) else [request]\n    query = self.vector_search_parameters.format_query(requests)\n    if self.log_query:\n        _LOGGER.info('Executing query %s', query)\n    query_job = self.client.query(query)\n    results = query_job.result()\n    results_by_id = {}\n    for result_row in results:\n        result_dict = dict(result_row.items())\n        results_by_id[result_row.id] = result_dict\n    response = []\n    for chunk in requests:\n        result_dict = results_by_id.get(chunk.id, {})\n        response.append((chunk, result_dict))\n    return response", "docstring": "Process request(s) using BigQuery vector search.\n\nArgs:\nrequest: Single Chunk with embedding or list of Chunk's with\nembeddings to process\n\nReturns:\nChunk(s) where chunk.metadata['enrichment_output'] contains the\ndata retrieved via BigQuery VECTOR_SEARCH.", "source": "github-repos"}
{"code": "def min_count(self, n=1):\n    \n    word_count = {w:c for w,c in iteritems(self.word_count) if c >= n}\n    return CountedVocabulary(word_count=word_count)", "docstring": "Returns a vocabulary after eliminating the words that appear < `n`.\n\nArgs:\nn (integer): specifies the minimum word frequency allowed.", "source": "juraj-google-style"}
{"code": "def rename(self, source_file_names, destination_file_names):\n    raise NotImplementedError", "docstring": "Rename the files at the source list to the destination list.\nSource and destination lists should be of the same size.\n\nArgs:\nsource_file_names: List of file paths that need to be moved\ndestination_file_names: List of destination_file_names for the files\n\nRaises:\n``BeamIOError``: if any of the rename operations fail", "source": "github-repos"}
{"code": "def _init_metadata_service(self, version):\n        \n        metadata_cfg = self._load_config_section(CONFIG_METADATA_SECTION)\n        self._token_metadata = metadata_cfg[CONFIG_TOKEN]\n        proto = metadata_cfg[CONFIG_PROTOCOL]\n        host = metadata_cfg[CONFIG_HOST]\n\n        self._metadata = MetadataService(host, version)\n        self._metadata.base_protocol = proto\n        self._metadata.set_auth(self._token_metadata)", "docstring": "Method to initialize the Metadata Service from the config data\n\nArgs:\nversion (string): Version of Boss API to use.\n\nReturns:\nNone\n\nRaises:\n(KeyError): if given invalid version.", "source": "juraj-google-style"}
{"code": "def get_op_name(tensor_name):\n    if not tensor_name:\n        raise ValueError(f'Tensor name cannot be empty or None. Received: {tensor_name}.')\n    if tensor_name.startswith('^'):\n        tensor_name = tensor_name[1:]\n    if ':' in tensor_name:\n        op_name, _ = tensor_name.split(':')\n        return op_name\n    return tensor_name", "docstring": "Extract the Op name from a Tensor name.\n\nThe Op name is everything before a colon, if present,\nnot including any ^ prefix denoting a control dependency.\n\nArgs:\ntensor_name: the full name of a Tensor in the graph.\nReturns:\nThe name of the Op of which the given Tensor is an output.\nRaises:\nValueError: if tensor_name is None or empty.", "source": "github-repos"}
{"code": "def job_history(backend):\n    \n    year = widgets.Output(layout=widgets.Layout(display='flex-inline',\n                                                align_items='center',\n                                                min_height='400px'))\n\n    month = widgets.Output(layout=widgets.Layout(display='flex-inline',\n                                                 align_items='center',\n                                                 min_height='400px'))\n\n    week = widgets.Output(layout=widgets.Layout(display='flex-inline',\n                                                align_items='center',\n                                                min_height='400px'))\n\n    tabs = widgets.Tab(layout=widgets.Layout(max_height='620px'))\n    tabs.children = [year, month, week]\n    tabs.set_title(0, 'Year')\n    tabs.set_title(1, 'Month')\n    tabs.set_title(2, 'Week')\n    tabs.selected_index = 1\n\n    _build_job_history(tabs, backend)\n    return tabs", "docstring": "Widget for displaying job history\n\nArgs:\nbackend (IBMQbackend): The backend.\n\nReturns:\nTab: A tab widget for history images.", "source": "juraj-google-style"}
{"code": "def create(cls, **kwargs):\n        \n        try:\n            return cls.add(cls.new(**kwargs))\n        except:\n            cls.session.rollback()\n            raise", "docstring": "Initializes a new instance, adds it to the db and commits\nthe transaction.\n\nArgs:\n\n**kwargs: The keyword arguments for the init constructor.\n\nExamples:\n\n>>> user = User.create(name=\"Vicky\", email=\"vicky@h.com\")\n>>> user.id\n35", "source": "juraj-google-style"}
{"code": "def getSwarmModelParams(modelID):\n    cjDAO = ClientJobsDAO.get()\n    (jobID, description) = cjDAO.modelsGetFields(modelID, ['jobId', 'genDescription'])\n    (baseDescription,) = cjDAO.jobGetFields(jobID, ['genBaseDescription'])\n    descriptionDirectory = tempfile.mkdtemp()\n    try:\n        baseDescriptionFilePath = os.path.join(descriptionDirectory, 'base.py')\n        with open(baseDescriptionFilePath, mode='wb') as f:\n            f.write(baseDescription)\n        descriptionFilePath = os.path.join(descriptionDirectory, 'description.py')\n        with open(descriptionFilePath, mode='wb') as f:\n            f.write(description)\n        expIface = helpers.getExperimentDescriptionInterfaceFromModule(helpers.loadExperimentDescriptionScriptFromDir(descriptionDirectory))\n        return json.dumps(dict(modelConfig=expIface.getModelDescription(), inferenceArgs=expIface.getModelControl().get('inferenceArgs', None)))\n    finally:\n        shutil.rmtree(descriptionDirectory, ignore_errors=True)", "docstring": "Retrieve the Engine-level model params from a Swarm model\n\nArgs:\nmodelID - Engine-level model ID of the Swarm model\n\nReturns:\nJSON-encoded string containing Model Params", "source": "codesearchnet"}
{"code": "def close(self):\n    if self._session and (not self._closed):\n        self._closed = True\n        tf_session.TF_CloseSession(self._session)", "docstring": "Closes this session.\n\nCalling this method frees all resources associated with the session.\n\nRaises:\ntf.errors.OpError: Or one of its subclasses if an error occurs while\nclosing the TensorFlow session.", "source": "github-repos"}
{"code": "def y_score(estimator, X):\n    \n    try:\n        y = estimator.predict_proba(X)\n        return y[:, 1]\n    except(AttributeError):\n        return estimator.decision_function(X)", "docstring": "Score examples from a new matrix X\nArgs:\nestimator: an sklearn estimator object\nX: design matrix with the same features that the estimator was trained on\n\nReturns: a vector of scores of the same length as X\n\nNote that estimator.predict_proba is preferred but when unavailable\n(e.g. SVM without probability calibration) decision_function is used.", "source": "juraj-google-style"}
{"code": "def has_inf_or_nan(datum, tensor):\n    _ = datum\n    if isinstance(tensor, InconvertibleTensorProto):\n        return False\n    elif np.issubdtype(tensor.dtype, np.floating) or np.issubdtype(tensor.dtype, np.complexfloating) or np.issubdtype(tensor.dtype, np.integer):\n        return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))\n    else:\n        return False", "docstring": "A predicate for whether a tensor consists of any bad numerical values.\n\nThis predicate is common enough to merit definition in this module.\nBad numerical values include `nan`s and `inf`s.\nThe signature of this function follows the requirement of the method\n`DebugDumpDir.find()`.\n\nArgs:\ndatum: (`DebugTensorDatum`) Datum metadata.\ntensor: (`numpy.ndarray` or None) Value of the tensor. None represents\nan uninitialized tensor.\n\nReturns:\n(`bool`) True if and only if tensor consists of any nan or inf values.", "source": "github-repos"}
{"code": "def _handle_port_request(self, client_data, writer):\n        \n        try:\n            pid = int(client_data)\n        except ValueError as error:\n            self._client_request_errors += 1\n            log.warning('Could not parse request: %s', error)\n            return\n\n        log.info('Request on behalf of pid %d.', pid)\n        log.info('cmdline: %s', _get_process_command_line(pid))\n\n        if not _should_allocate_port(pid):\n            self._denied_allocations += 1\n            return\n\n        port = self._port_pool.get_port_for_process(pid)\n        if port > 0:\n            self._total_allocations += 1\n            writer.write('{:d}\\n'.format(port).encode('utf-8'))\n            log.debug('Allocated port %d to pid %d', port, pid)\n        else:\n            self._denied_allocations += 1", "docstring": "Given a port request body, parse it and respond appropriately.\n\nArgs:\nclient_data: The request bytes from the client.\nwriter: The asyncio Writer for the response to be written to.", "source": "juraj-google-style"}
{"code": "def GetShadowMap(self, since=None):\n    return ShadowUpdateGetter().GetUpdates(self._GetClient(), self.conf['bucket'], self.conf['shadow_object'], since)", "docstring": "Return the shadow map from this source.\n\nArgs:\nsince: Get data only changed since this timestamp (inclusive) or None\nfor all data.\n\nReturns:\ninstance of shadow.ShadowMap", "source": "github-repos"}
{"code": "def _write_session(self):\n    base_name = ('%ssession' % self._product_accronym.lower())\n    filename = ('%s%s.py' % (self._class_prefix.lower(), base_name))\n    override_content = self._extract_override_content(base_name)\n    self.write(destination=self.output_directory, filename=filename, template_name='session.py.tpl', version=self.api_version, product_accronym=self._product_accronym, class_prefix=self._class_prefix, root_api=self.api_root, api_prefix=self.api_prefix, override_content=override_content, header=self.header_content)", "docstring": "Write SDK session file\n\nArgs:\nversion (str): the version of the server", "source": "codesearchnet"}
{"code": "def input_waiting(self):\n    buf = array.array('I', [0])\n    try:\n        fcntl.ioctl(self._fd, termios.TIOCINQ, buf, True)\n    except OSError as e:\n        raise SerialError(e.errno, ('Querying input waiting: ' + e.strerror))\n    return buf[0]", "docstring": "Query the number of bytes waiting to be read from the serial port.\n\nReturns:\nint: number of bytes waiting to be read.\n\nRaises:\nSerialError: if an I/O or OS error occurs.", "source": "codesearchnet"}
{"code": "def loss(logits, labels):\n  \n  labels = tf.to_int64(labels)\n  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n      logits=logits, labels=labels, name='xentropy')\n  return tf.reduce_mean(cross_entropy, name='xentropy_mean')", "docstring": "Calculates the loss from the logits and the labels.\n\nArgs:\nlogits: Logits tensor, float - [batch_size, NUM_CLASSES].\nlabels: Labels tensor, int32 - [batch_size].\nReturns:\nloss: Loss tensor of type float.", "source": "juraj-google-style"}
{"code": "def path(self, goal):\n        \n        if goal == self.name:\n            return [self]\n\n        if goal not in self.routes:\n            raise ValueError(\"Unknown '{0}'\".format(goal))\n\n        obj = self\n        path = [obj]\n        while True:\n            obj = obj.routes[goal].direction\n            path.append(obj)\n            if obj.name == goal:\n                break\n        return path", "docstring": "Get the shortest way between two nodes of the graph\n\nArgs:\ngoal (str): Name of the targeted node\nReturn:\nlist of Node", "source": "juraj-google-style"}
{"code": "def _lookup_model(cls, kind, default_model=None):\n    \n    modelclass = cls._kind_map.get(kind, default_model)\n    if modelclass is None:\n      raise KindError(\n          \"No model class found for kind '%s'. Did you forget to import it?\" %\n          kind)\n    return modelclass", "docstring": "Get the model class for the kind.\n\nArgs:\nkind: A string representing the name of the kind to lookup.\ndefault_model: The model class to use if the kind can't be found.\n\nReturns:\nThe model class for the requested kind.\nRaises:\nKindError: The kind was not found and no default_model was provided.", "source": "juraj-google-style"}
{"code": "def vectorize(density_matrix, method='col'):\n    density_matrix = np.array(density_matrix)\n    if (method == 'col'):\n        return density_matrix.flatten(order='F')\n    elif (method == 'row'):\n        return density_matrix.flatten(order='C')\n    elif (method in ['pauli', 'pauli_weights']):\n        num = int(np.log2(len(density_matrix)))\n        if (len(density_matrix) != (2 ** num)):\n            raise Exception('Input state must be n-qubit state')\n        if (method == 'pauli_weights'):\n            pgroup = pauli_group(num, case='weight')\n        else:\n            pgroup = pauli_group(num, case='tensor')\n        vals = [np.trace(np.dot(p.to_matrix(), density_matrix)) for p in pgroup]\n        return np.array(vals)\n    return None", "docstring": "Flatten an operator to a vector in a specified basis.\n\nArgs:\ndensity_matrix (ndarray): a density matrix.\nmethod (str): the method of vectorization. Allowed values are\n- 'col' (default) flattens to column-major vector.\n- 'row' flattens to row-major vector.\n- 'pauli'flattens in the n-qubit Pauli basis.\n- 'pauli-weights': flattens in the n-qubit Pauli basis ordered by\nweight.\n\nReturns:\nndarray: the resulting vector.\nRaises:\nException: if input state is not a n-qubit state", "source": "codesearchnet"}
{"code": "def has_request(self, request):\n    queue_item = QueueItem(request, Response(request.url))\n    key = queue_item.get_hash()\n    for status in QueueItem.STATUSES:\n        if (key in self.__get_var(('items_' + status)).keys()):\n            return True\n    return False", "docstring": "Check if the given request already exists in the queue.\n\nArgs:\nrequest (:class:`nyawc.http.Request`): The request to check.\n\nReturns:\nbool: True if already exists, False otherwise.", "source": "codesearchnet"}
{"code": "def command(self, cmd_name, callback, *args):\n    cmd = JLinkCommand(cmd_name, args, callback)\n    self._commands.put(cmd)", "docstring": "Run an asynchronous command.\n\nArgs:\ncmd_name (int): The unique code for the command to execute.\ncallback (callable): The optional callback to run when the command finishes.\nThe signature should be callback(cmd_name, result, exception)\n*args: Any arguments that are passed to the underlying command handler", "source": "codesearchnet"}
{"code": "def get_model(self, opt_fn, emb_sz, n_hid, n_layers, **kwargs):\n    m = get_language_model(self.nt, emb_sz, n_hid, n_layers, self.pad_idx, **kwargs)\n    model = SingleModel(to_gpu(m))\n    return RNN_Learner(self, model, opt_fn=opt_fn)", "docstring": "Method returns a RNN_Learner object, that wraps an instance of the RNN_Encoder module.\n\nArgs:\nopt_fn (Optimizer): the torch optimizer function to use\nemb_sz (int): embedding size\nn_hid (int): number of hidden inputs\nn_layers (int): number of hidden layers\nkwargs: other arguments\n\nReturns:\nAn instance of the RNN_Learner class.", "source": "codesearchnet"}
{"code": "def prefetch_users(persistent_course_grades):\n        \n        users = User.objects.filter(\n            id__in=[grade.user_id for grade in persistent_course_grades]\n        )\n        return {\n            user.id: user for user in users\n        }", "docstring": "Prefetch Users from the list of user_ids present in the persistent_course_grades.\n\nArguments:\npersistent_course_grades (list): A list of PersistentCourseGrade.\n\nReturns:\n(dict): A dictionary containing user_id to user mapping.", "source": "juraj-google-style"}
{"code": "def Deserialize(self, reader):\n        \n        self.name = reader.ReadVarString().decode('utf-8')\n        self.symbol = reader.ReadVarString().decode('utf-8')\n        self.decimals = reader.ReadUInt8()", "docstring": "Read serialized data from byte stream\nArgs:\nreader (neocore.IO.BinaryReader): reader to read byte data from", "source": "juraj-google-style"}
{"code": "def from_file(cls, filename, directory=None,\n                  format=None, engine=None, encoding=File._encoding):\n        \n        filepath = os.path.join(directory or '', filename)\n        if encoding is None:\n            encoding = locale.getpreferredencoding()\n        with io.open(filepath, encoding=encoding) as fd:\n            source = fd.read()\n        return cls(source, filename, directory, format, engine, encoding)", "docstring": "Return an instance with the source string read from the given file.\n\nArgs:\nfilename: Filename for loading/saving the source.\ndirectory: (Sub)directory for source loading/saving and rendering.\nformat: Rendering output format (``'pdf'``, ``'png'``, ...).\nengine: Layout command used (``'dot'``, ``'neato'``, ...).\nencoding: Encoding for loading/saving the source.", "source": "juraj-google-style"}
{"code": "def train(self, X_train, Y_train, X_test, Y_test):\n    while True:\n        print(1)\n        time.sleep(1)\n        if (random.randint(0, 9) >= 5):\n            break", "docstring": "Train and validate the LR on a train and test dataset\n\nArgs:\nX_train (np.array): Training data\nY_train (np.array): Training labels\nX_test (np.array): Test data\nY_test (np.array): Test labels", "source": "codesearchnet"}
{"code": "def stitch_map(tiles, width, height, bbox, dpi):\n    size = (int((width * dpi_to_dpmm(dpi))), int((height * dpi_to_dpmm(dpi))))\n    background = Image.new('RGBA', size, (255, 255, 255))\n    for layer in tiles:\n        layer_img = Image.new('RGBA', size)\n        for ((x, y), tile_path) in layer.items():\n            tile = Image.open(tile_path)\n            layer_img.paste(tile, (((x - bbox.min.x) * TILE_SIZE), ((y - bbox.min.y) * TILE_SIZE)))\n        background = Image.alpha_composite(background, layer_img)\n    add_scales_bar(background, bbox)\n    return background.convert('RGB')", "docstring": "Merge tiles together into one image.\n\nArgs:\ntiles (list of dict of file): tiles for each layer\nwidth (float): page width in mm\nheight (height): page height in mm\ndpi (dpi): resolution in dots per inch\n\nReturns:\nPIL.Image: merged map.", "source": "codesearchnet"}
{"code": "def subproc_call(cmd, timeout=None):\n    try:\n        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, timeout=timeout)\n        return (output, 0)\n    except subprocess.TimeoutExpired as e:\n        logger.warn(\"Command '{}' timeout!\".format(cmd))\n        logger.warn(e.output.decode('utf-8'))\n        return (e.output, (- 1))\n    except subprocess.CalledProcessError as e:\n        logger.warn(\"Command '{}' failed, return code={}\".format(cmd, e.returncode))\n        logger.warn(e.output.decode('utf-8'))\n        return (e.output, e.returncode)\n    except Exception:\n        logger.warn(\"Command '{}' failed to run.\".format(cmd))\n        return ('', (- 2))", "docstring": "Execute a command with timeout, and return STDOUT and STDERR\n\nArgs:\ncmd(str): the command to execute.\ntimeout(float): timeout in seconds.\n\nReturns:\noutput(bytes), retcode(int). If timeout, retcode is -1.", "source": "codesearchnet"}
{"code": "def get_variant_type(variant_source):\n    \n    file_type = get_file_type(variant_source)\n    variant_type = 'sv'\n    if file_type == 'vcf':\n        variants = VCF(variant_source)\n    elif file_type == 'gemini':\n        variants = GeminiQuery(variant_source)\n        gemini_query = \"SELECT * from variants\"\n        variants.run(gemini_query)\n    \n    \n    for i,variant in enumerate(variants):\n        if file_type == 'vcf':\n            if variant.is_snp:\n                variant_type = 'snv'\n        elif file_type == 'gemini':\n            if variant['type'] == 'snp':\n                variant_type = 'snv'\n            \n        if i > 1000:\n            break\n    \n    return variant_type", "docstring": "Try to find out what type of variants that exists in a variant source\n\nArgs:\nvariant_source (str): Path to variant source\nsource_mode (str): 'vcf' or 'gemini'\n\nReturns:\nvariant_type (str): 'sv' or 'snv'", "source": "juraj-google-style"}
{"code": "async def client_event_handler(self, client_id, event_tuple, user_data):\n    (conn_string, event_name, event) = event_tuple\n    if (event_name == 'report'):\n        report = event.serialize()\n        report['encoded_report'] = base64.b64encode(report['encoded_report'])\n        msg_payload = dict(connection_string=conn_string, serialized_report=report)\n        msg_name = OPERATIONS.NOTIFY_REPORT\n    elif (event_name == 'trace'):\n        encoded_payload = base64.b64encode(event)\n        msg_payload = dict(connection_string=conn_string, payload=encoded_payload)\n        msg_name = OPERATIONS.NOTIFY_TRACE\n    elif (event_name == 'progress'):\n        msg_payload = dict(connection_string=conn_string, operation=event.get('operation'), done_count=event.get('finished'), total_count=event.get('total'))\n        msg_name = OPERATIONS.NOTIFY_PROGRESS\n    elif (event_name == 'device_seen'):\n        msg_payload = event\n        msg_name = OPERATIONS.NOTIFY_DEVICE_FOUND\n    elif (event_name == 'broadcast'):\n        report = event.serialize()\n        report['encoded_report'] = base64.b64encode(report['encoded_report'])\n        msg_payload = dict(connection_string=conn_string, serialized_report=report)\n        msg_name = OPERATIONS.NOTIFY_BROADCAST\n    else:\n        self._logger.debug('Not forwarding unknown event over websockets: %s', event_tuple)\n        return\n    try:\n        self._logger.debug('Sending event %s: %s', msg_name, msg_payload)\n        (await self.server.send_event(user_data, msg_name, msg_payload))\n    except websockets.exceptions.ConnectionClosed:\n        self._logger.debug('Could not send notification because connection was closed for client %s', client_id)", "docstring": "Forward an event on behalf of a client.\n\nThis method is called by StandardDeviceServer when it has an event that\nshould be sent to a client.\n\nArgs:\nclient_id (str): The client that we should send this event to\nevent_tuple (tuple): The conn_string, event_name and event\nobject passed from the call to notify_event.\nuser_data (object): The user data passed in the call to\n:meth:`setup_client`.", "source": "codesearchnet"}
{"code": "def _write_session(self):\n        \n        base_name = \"%ssession\" % self._product_accronym.lower()\n        filename = \"%s%s.py\" % (self._class_prefix.lower(), base_name)\n        override_content = self._extract_override_content(base_name)\n\n        self.write(destination=self.output_directory, filename=filename, template_name=\"session.py.tpl\",\n                   version=self.api_version,\n                   product_accronym=self._product_accronym,\n                   class_prefix=self._class_prefix,\n                   root_api=self.api_root,\n                   api_prefix=self.api_prefix,\n                   override_content=override_content,\n                   header=self.header_content)", "docstring": "Write SDK session file\n\nArgs:\nversion (str): the version of the server", "source": "juraj-google-style"}
{"code": "def get_port_from_port_server(portserver_address, pid=None):\n    if (not portserver_address):\n        return None\n    if (portserver_address[0] == '@'):\n        portserver_address = ('\\x00' + portserver_address[1:])\n    if (pid is None):\n        pid = os.getpid()\n    try:\n        if hasattr(socket, 'AF_UNIX'):\n            sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n        else:\n            sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n        try:\n            sock.connect(portserver_address)\n            sock.sendall(('%d\\n' % pid).encode('ascii'))\n            buf = sock.recv(1024)\n        finally:\n            sock.close()\n    except socket.error as e:\n        print('Socket error when connecting to portserver:', e, file=sys.stderr)\n        return None\n    try:\n        port = int(buf.split(b'\\n')[0])\n    except ValueError:\n        print('Portserver failed to find a port.', file=sys.stderr)\n        return None\n    _owned_ports.add(port)\n    return port", "docstring": "Request a free a port from a system-wide portserver.\n\nThis follows a very simple portserver protocol:\nThe request consists of our pid (in ASCII) followed by a newline.\nThe response is a port number and a newline, 0 on failure.\n\nThis function is an implementation detail of pick_unused_port().\nIt should not normally be called by code outside of this module.\n\nArgs:\nportserver_address: The address (path) of a unix domain socket\nwith which to connect to the portserver.  A leading '@'\ncharacter indicates an address in the \"abstract namespace.\"\nOn systems without socket.AF_UNIX, this is an AF_INET address.\npid: The PID to tell the portserver to associate the reservation with.\nIf None, the current process's PID is used.\n\nReturns:\nThe port number on success or None on failure.", "source": "codesearchnet"}
{"code": "def __init__(self, interface, logger, base_configs=None):\n    raise NotImplementedError('Base class should not be called directly!')", "docstring": "The constructor for the Sniffer. It constructs a sniffer and\nconfigures it to be ready for capture.\n\nArgs:\ninterface: A string specifying the interface used to configure the\nsniffer.\nlogger: Mobly logger object.\nbase_configs: A dictionary containing baseline configurations of the\nsniffer. These can be overridden when staring a capture. The\nkeys are specified by Sniffer.CONFIG_KEY_*.\n\nReturns:\nself: A configured sniffer.\n\nRaises:\nInvalidDataError: if the config_path is invalid.\nNoPermissionError: if an error occurs while configuring the\nsniffer.", "source": "github-repos"}
{"code": "def flatten_per_replica_values(distribution_strategy, per_replica_values):\n    return [e for flattened in nest.flatten(per_replica_values) for e in distribution_strategy.unwrap(flattened)]", "docstring": "Unwraps and flattens a nest of PerReplica parameters.\n\nPerReplica values have one value associated with each device. Each entry in\nthe PerReplica dict has a device `key` and the corresponding value on the\ndevice as the `value`. In this function we take a PerReplica value or a list\nof PerReplica values and return all the values in the PerReplica dict.\n\nArgs:\ndistribution_strategy: DistributionStrategy used to distribute training and\nvalidation.\nper_replica_values: List of PerReplica object or a single PerReplica object.\n\nReturns:\nList of values of all the PerReplica objects.", "source": "github-repos"}
{"code": "def rename_style(self, old_name, new_name):\n    if (old_name not in self.styles):\n        raise KeyError(('Style %r not found' % old_name))\n    if (new_name in self.styles):\n        raise ValueError(('There is already a style called %r' % new_name))\n    if (not is_valid_field_content(new_name)):\n        raise ValueError(('%r is not a valid name' % new_name))\n    self.styles[new_name] = self.styles[old_name]\n    del self.styles[old_name]\n    for line in self:\n        if (line.style == old_name):\n            line.style = new_name", "docstring": "Rename a style, including references to it.\n\nArguments:\nold_name (str): Style to be renamed.\nnew_name (str): New name for the style (must be unused).\n\nRaises:\nKeyError: No style named old_name.\nValueError: new_name is not a legal name (cannot use commas)\nor new_name is taken.", "source": "codesearchnet"}
{"code": "def ssh(cmd=''):\n    \n    with settings(warn_only=True):\n        local('ssh -A -o StrictHostKeyChecking=no -i \"%s\" %s@%s \"%s\"' % (\n            env.key_filename, env.user, env.host, cmd))", "docstring": "SSH into the server(s) (sequentially if more than one)\n\nArgs:\ncmd (str) ='': Command to run on the server", "source": "juraj-google-style"}
{"code": "def encode_corpus(self, corpus, output_path):\n        \n\n        out_container = containers.Container(output_path)\n        out_container.open()\n\n        for utterance in corpus.utterances.values():\n            data = self.encode_utterance(utterance, corpus=corpus)\n            out_container.set(utterance.idx, data)\n\n        out_container.close()\n        return out_container", "docstring": "Encode all utterances of the given corpus and store them in a :class:`audiomate.container.Container`.\n\nArgs:\ncorpus (Corpus): The corpus to process.\noutput_path (str): The path to store the container with the encoded data.\n\nReturns:\nContainer: The container with the encoded data.", "source": "juraj-google-style"}
{"code": "def intersect(df, other, index=False, keep='first'):\n    \n\n    validate_set_ops(df, other)\n    if index:\n        df_reset_index = df.reset_index()\n        other_reset_index = other.reset_index()\n        index_cols = [col for col in df_reset_index.columns if col not in df.columns]\n        df_index_names = df.index.names\n        return_df = (pd.merge(df_reset_index, other_reset_index,\n                              how='inner',\n                              left_on=df_reset_index.columns.values.tolist(),\n                              right_on=df_reset_index.columns.values.tolist())\n                     .set_index(index_cols))\n        return_df.index.names = df_index_names\n        return_df = return_df.drop_duplicates(keep=keep)\n        return return_df\n    else:\n        return_df = pd.merge(df, other,\n                             how='inner',\n                             left_on=df.columns.values.tolist(),\n                             right_on=df.columns.values.tolist())\n        return_df = return_df.drop_duplicates(keep=keep)\n        return return_df", "docstring": "Returns rows that appear in both DataFrames.\n\nArgs:\ndf (pandas.DataFrame): data passed in through the pipe.\nother (pandas.DataFrame): other DataFrame to use for set operation with\nthe first.\n\nKwargs:\nindex (bool): Boolean indicating whether to consider the pandas index\nas part of the set operation (default `False`).\nkeep (str): Indicates which duplicate should be kept. Options are `'first'`\nand `'last'`.", "source": "juraj-google-style"}
{"code": "def add_implem(self, transition, attribute, function, **kwargs):\n    implem = ImplementationProperty(field_name=self.state_field, transition=transition, workflow=self.workflow, implementation=function, **kwargs)\n    self.implementations[transition.name] = implem\n    self.transitions_at[transition.name] = attribute\n    return implem", "docstring": "Add an implementation.\n\nArgs:\ntransition (Transition): the transition for which the implementation\nis added\nattribute (str): the name of the attribute where the implementation\nwill be available\nfunction (callable): the actual implementation function\n**kwargs: extra arguments for the related ImplementationProperty.", "source": "codesearchnet"}
{"code": "def set_image(self, text):\n        \n        \n        if exercises.CONTENT_STORAGE_PLACEHOLDER in text:\n            return text, []\n        \n        stripped_text = text.strip().replace('\\\\n', '')\n        \n        graphie_regex = re.compile(WEB_GRAPHIE_URL_REGEX, flags=re.IGNORECASE)\n        graphie_match = graphie_regex.match(stripped_text)\n        if graphie_match:\n            is_web_plus_graphie = True\n            graphie_rawpath = graphie_match.groupdict()['rawpath']\n            graphie_path = graphie_rawpath.replace(\"\n            exercise_image_file = _ExerciseGraphieFile(graphie_path)\n        elif get_base64_encoding(stripped_text):\n            is_web_plus_graphie = False\n            exercise_image_file = _ExerciseBase64ImageFile(stripped_text)\n        else:\n            is_web_plus_graphie = False\n            exercise_image_file = _ExerciseImageFile(stripped_text)\n        \n        exercise_image_file.assessment_item = self\n        \n        _filename = exercise_image_file.process_file()\n        \n        new_text = exercises.CONTENT_STORAGE_FORMAT.format(exercise_image_file.get_replacement_str())\n        if is_web_plus_graphie:     \n            new_text = \"web+graphie:\" + new_text\n        return new_text, [exercise_image_file]", "docstring": "Save image resource at `text` (path or url) to storage, then return the\nreplacement string and the necessary exercicse image file object.\nArgs:\n- text (str): path or url to parse as an exercise image resource\nReturns: (new_text, files)\n- `new_text` (str): replacement string for the original `text` string\n- `files` (list): list of files that were downloaded from `text`", "source": "juraj-google-style"}
{"code": "def texture3d(self, size, components, data=None, *, alignment=1, dtype='f1') -> 'Texture3D':\n        \n\n        res = Texture3D.__new__(Texture3D)\n        res.mglo, res._glo = self.mglo.texture3d(size, components, data, alignment, dtype)\n        res.ctx = self\n        res.extra = None\n        return res", "docstring": "Create a :py:class:`Texture3D` object.\n\nArgs:\nsize (tuple): The width, height and depth of the texture.\ncomponents (int): The number of components 1, 2, 3 or 4.\ndata (bytes): Content of the texture.\n\nKeyword Args:\nalignment (int): The byte alignment 1, 2, 4 or 8.\ndtype (str): Data type.\n\nReturns:\n:py:class:`Texture3D` object", "source": "juraj-google-style"}
{"code": "def _SetExtractionPreferredTimeZone(self, knowledge_base):\n    if self._preferred_time_zone:\n        try:\n            knowledge_base.SetTimeZone(self._preferred_time_zone)\n        except ValueError:\n            logger.warning('Unsupported time zone: {0:s}, defaulting to {1:s}'.format(self._preferred_time_zone, knowledge_base._time_zone.zone))", "docstring": "Sets the preferred time zone before extraction.\n\nArgs:\nknowledge_base (KnowledgeBase): contains information from the source\ndata needed for parsing.", "source": "codesearchnet"}
{"code": "def filter_devices(ads, func):\n    results = []\n    for ad in ads:\n        if func(ad):\n            results.append(ad)\n    return results", "docstring": "Finds the AndroidDevice instances from a list that match certain\nconditions.\n\nArgs:\nads: A list of AndroidDevice instances.\nfunc: A function that takes an AndroidDevice object and returns True\nif the device satisfies the filter condition.\n\nReturns:\nA list of AndroidDevice instances that satisfy the filter condition.", "source": "codesearchnet"}
{"code": "def _prune_heads(self, heads_to_prune):\n    for layer, heads in heads_to_prune.items():\n        self.encoder.layer[layer].attention.prune_heads(heads)", "docstring": "Prunes heads of the model.\n\nArgs:\nheads_to_prune:\ndict of {layer_num: list of heads to prune in this layer}", "source": "github-repos"}
{"code": "def design_stat_extremes(self, value=\"Extremes\"):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type str '\n                    'for field `design_stat_extremes`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `design_stat_extremes`')\n            vals = set()\n            vals.add(\"Extremes\")\n            if value not in vals:\n                raise ValueError('value {} is not an accepted value for '\n                                 'field `design_stat_extremes`'.format(value))\n\n        self._design_stat_extremes = value", "docstring": "Corresponds to IDD Field `design_stat_extremes`\n\nArgs:\nvalue (str): value for IDD Field `design_stat_extremes`\nAccepted values are:\n- Extremes\nDefault value: Extremes\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def get_block(self, height_or_hash, id=None, endpoint=None):\n        \n        return self._call_endpoint(GET_BLOCK, params=[height_or_hash, 1], id=id, endpoint=endpoint)", "docstring": "Look up a block by the height or hash of the block.\nArgs:\nheight_or_hash: (int or str) either the height of the desired block or its hash in the form '1e67372c158a4cfbb17b9ad3aaae77001a4247a00318e354c62e53b56af4006f'\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\n\nReturns:\nblock: a json object or the ``neorpc.Core.Block.Block`` object", "source": "juraj-google-style"}
{"code": "def _Matches(path, pattern_list):\n  \n  \n  return any(fnmatch.fnmatchcase(path, pattern) for pattern in pattern_list)", "docstring": "Returns true if path matches any patten found in pattern_list.\n\nArgs:\npath: A dot separated path to a package, class, method or variable\npattern_list: A list of wildcard patterns\n\nReturns:\nTrue if path matches any wildcard found in pattern_list.", "source": "juraj-google-style"}
{"code": "def parse_rule(cls, txt):\n    types = {'glob': GlobRule, 'regex': RegexRule, 'range': RangeRule, 'before': TimestampRule, 'after': TimestampRule}\n    (label, txt) = Rule._parse_label(txt)\n    if (label is None):\n        if ('*' in txt):\n            label = 'glob'\n        else:\n            label = 'range'\n    elif (label not in types):\n        raise ConfigurationError((\"'%s' is not a valid package filter type\" % label))\n    rule_cls = types[label]\n    txt_ = ('%s(%s)' % (label, txt))\n    try:\n        rule = rule_cls._parse(txt_)\n    except Exception as e:\n        raise ConfigurationError((\"Error parsing package filter '%s': %s: %s\" % (txt_, e.__class__.__name__, str(e))))\n    return rule", "docstring": "Parse a rule from a string.\n\nSee rezconfig.package_filter for an overview of valid strings.\n\nArgs:\ntxt (str): String to parse.\n\nReturns:\n`Rule` instance.", "source": "codesearchnet"}
{"code": "def update(self, other):\n        \n        if isinstance(other, NdMapping):\n            dims = [d for d in other.kdims if d not in self.kdims]\n            if len(dims) == other.ndims:\n                raise KeyError(\"Cannot update with NdMapping that has\"\n                               \" a different set of key dimensions.\")\n            elif dims:\n                other = other.drop_dimension(dims)\n            other = other.data\n        for key, data in other.items():\n            self._add_item(key, data, sort=False)\n        if self.sort:\n            self._resort()", "docstring": "Merges other item with this object\n\nArgs:\nother: Object containing items to merge into this object\nMust be a dictionary or NdMapping type", "source": "juraj-google-style"}
{"code": "def create_graph_from_data(self, data):\n        \n        \n        self.arguments['{SCORE}'] = self.scores[self.score]\n        self.arguments['{VERBOSE}'] = str(self.verbose).upper()\n\n        results = self._run_gies(data, verbose=self.verbose)\n\n        return nx.relabel_nodes(nx.DiGraph(results),\n                                {idx: i for idx, i in enumerate(data.columns)})", "docstring": "Run the GIES algorithm.\n\nArgs:\ndata (pandas.DataFrame): DataFrame containing the data\n\nReturns:\nnetworkx.DiGraph: Solution given by the GIES algorithm.", "source": "juraj-google-style"}
{"code": "def has_register(self, register):\n        \n        has_reg = False\n        if (isinstance(register, QuantumRegister) and\n                register in self.qregs):\n            has_reg = True\n        elif (isinstance(register, ClassicalRegister) and\n              register in self.cregs):\n            has_reg = True\n        return has_reg", "docstring": "Test if this circuit has the register r.\n\nArgs:\nregister (Register): a quantum or classical register.\n\nReturns:\nbool: True if the register is contained in this circuit.", "source": "juraj-google-style"}
{"code": "def get(self, key, state_manager, training=None):\n    if key in self._feature_tensors:\n        return self._feature_tensors[key]\n    if key in self._features:\n        feature_tensor = self._get_raw_feature_as_tensor(key)\n        self._feature_tensors[key] = feature_tensor\n        return feature_tensor\n    if isinstance(key, six.string_types):\n        raise ValueError('Feature {} is not in features dictionary.'.format(key))\n    if not isinstance(key, fc_types.FeatureColumn):\n        raise TypeError('\"key\" must be either a \"str\" or \"FeatureColumn\". Provided: {}'.format(key))\n    column = key\n    logging.debug('Transforming feature_column %s.', column)\n    try:\n        transformed = column.transform_feature(self, state_manager, training=training)\n    except TypeError:\n        transformed = column.transform_feature(self, state_manager)\n    if transformed is None:\n        raise ValueError('Column {} is not supported.'.format(column.name))\n    self._feature_tensors[column] = transformed\n    return transformed", "docstring": "Returns a `Tensor` for the given key.\n\nA `str` key is used to access a base feature (not-transformed). When a\n`FeatureColumn` is passed, the transformed feature is returned if it\nalready exists, otherwise the given `FeatureColumn` is asked to provide its\ntransformed output, which is then cached.\n\nArgs:\nkey: a `str` or a `FeatureColumn`.\nstate_manager: A StateManager object that holds the FeatureColumn state.\ntraining: Boolean indicating whether to the column is being used in\ntraining mode. This argument is passed to the transform_feature method\nof any `FeatureColumn` that takes a `training` argument. For example, if\na `FeatureColumn` performed dropout, it could expose a `training`\nargument to control whether the dropout should be applied.\n\nReturns:\nThe transformed `Tensor` corresponding to the `key`.\n\nRaises:\nValueError: if key is not found or a transformed `Tensor` cannot be\ncomputed.", "source": "github-repos"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_buffer = utils.BytearrayStream()\n    if self._object_type:\n        self._object_type.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The Create response payload is missing the object type field.')\n    if self._unique_identifier:\n        self._unique_identifier.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The Create response payload is missing the unique identifier field.')\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        if self._template_attribute:\n            self._template_attribute.write(local_buffer, kmip_version=kmip_version)\n    self.length = local_buffer.length()\n    super(CreateResponsePayload, self).write(output_buffer, kmip_version=kmip_version)\n    output_buffer.write(local_buffer.buffer)", "docstring": "Write the data encoding the Create response payload to a buffer.\n\nArgs:\noutput_buffer (stream): A data buffer in which to encode object\ndata, supporting a write method.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nInvalidField: Raised if the object type attribute or unique\nidentifier is not defined.", "source": "codesearchnet"}
{"code": "def update_additional_charge(self, *, recurring_billing_id, description, plan_value, plan_tax, plan_tax_return_base, currency):\n    payload = {'description': description, 'additionalValues': [{'name': 'ITEM_VALUE', 'value': plan_value, 'currency': currency}, {'name': 'ITEM_TAX', 'value': plan_tax, 'currency': currency}, {'name': 'ITEM_TAX_RETURN_BASE', 'value': plan_tax_return_base, 'currency': currency}]}\n    fmt = 'recurringBillItems/{}'.format(recurring_billing_id)\n    return self.client._put((self.url + fmt), payload=payload, headers=self.get_headers())", "docstring": "Updates the information from an additional charge in an invoice.\n\nArgs:\nrecurring_billing_id: Identifier of the additional charge.\ndescription:\nplan_value:\nplan_tax:\nplan_tax_return_base:\ncurrency:\n\nReturns:", "source": "codesearchnet"}
{"code": "def filter_publication(publication, cmp_authors=True):\n    \n    query = None\n    isbn_query = False\n\n    \n    if publication.optionals and publication.optionals.ISBN:\n        query = aleph.ISBNQuery(publication.optionals.ISBN)\n        isbn_query = True\n    else:\n        query = aleph.TitleQuery(publication.title)\n\n    result = aleph.reactToAMQPMessage(aleph.SearchRequest(query), \"\")\n\n    if not result.records:\n        return publication  \n\n    \n    \n    \n    if isbn_query:\n        for record in result.records:\n            epub = record.epublication\n\n            \n            if compare_names(epub.nazev, publication.title) >= 80:\n                return None  \n\n        return publication\n\n    \n    for record in result.records:\n        epub = record.epublication\n\n        \n        if not compare_names(epub.nazev, publication.title) >= 80:\n            continue\n\n        if not cmp_authors:\n            return None  \n\n        \n        for author in epub.autori:\n            \n            author_str = \"%s %s %s\" % (\n                author.firstName,\n                author.lastName,\n                author.title\n            )\n\n            \n            pub_authors = map(lambda x: x.name, publication.authors)\n            if type(pub_authors) not in [list, tuple, set]:\n                pub_authors = [pub_authors]\n\n            \n            for pub_author in pub_authors:\n                if compare_names(author_str, pub_author) >= 50:\n                    return None  \n\n    return publication", "docstring": "Filter publications based at data from Aleph.\n\nArgs:\npublication (obj): :class:`.Publication` instance.\n\nReturns:\nobj/None: None if the publication was found in Aleph or `publication` \\\nif not.", "source": "juraj-google-style"}
{"code": "def approximate_density(dist, xloc, parameters=None, cache=None, eps=1e-07):\n    if (parameters is None):\n        parameters = dist.prm.copy()\n    if (cache is None):\n        cache = {}\n    xloc = numpy.asfarray(xloc)\n    (lo, up) = (numpy.min(xloc), numpy.max(xloc))\n    mu = (0.5 * (lo + up))\n    eps = (numpy.where((xloc < mu), eps, (- eps)) * xloc)\n    floc = evaluation.evaluate_forward(dist, xloc, parameters=parameters.copy(), cache=cache.copy())\n    for d in range(len(dist)):\n        xloc[d] += eps[d]\n        tmp = evaluation.evaluate_forward(dist, xloc, parameters=parameters.copy(), cache=cache.copy())\n        floc[d] -= tmp[d]\n        xloc[d] -= eps[d]\n    floc = numpy.abs((floc / eps))\n    return floc", "docstring": "Approximate the probability density function.\n\nArgs:\ndist : Dist\nDistribution in question. May not be an advanced variable.\nxloc : numpy.ndarray\nLocation coordinates. Requires that xloc.shape=(len(dist), K).\neps : float\nAcceptable error level for the approximations\nretall : bool\nIf True return Graph with the next calculation state with the\napproximation.\n\nReturns:\nnumpy.ndarray: Local probability density function with\n``out.shape == xloc.shape``. To calculate actual density function,\nevaluate ``numpy.prod(out, 0)``.\n\nExample:\n>>> distribution = chaospy.Normal(1000, 10)\n>>> xloc = numpy.array([[990, 1000, 1010]])\n>>> print(numpy.around(approximate_density(distribution, xloc), 4))\n[[0.0242 0.0399 0.0242]]\n>>> print(numpy.around(distribution.pdf(xloc), 4))\n[[0.0242 0.0399 0.0242]]", "source": "codesearchnet"}
{"code": "def _compile_property_ast(schema, current_schema_type, ast, location, context, unique_local_directives):\n    validate_property_directives(unique_local_directives)\n    if (location.field == COUNT_META_FIELD_NAME):\n        if (not is_in_fold_scope(context)):\n            raise GraphQLCompilationError(u'Cannot use the \"{}\" meta field when not within a @fold vertex field, as counting elements only makes sense in a fold. Location: {}'.format(COUNT_META_FIELD_NAME, location))\n    tag_directive = unique_local_directives.get('tag', None)\n    if tag_directive:\n        if is_in_fold_scope(context):\n            raise GraphQLCompilationError(u'Tagging values within a @fold vertex field is not allowed! Location: {}'.format(location))\n        if (location.field == COUNT_META_FIELD_NAME):\n            raise AssertionError(u'Tags are prohibited within @fold, but unexpectedly found use of a tag on the {} meta field that is only allowed within a @fold!Location: {}'.format(COUNT_META_FIELD_NAME, location))\n        tag_name = tag_directive.arguments[0].value.value\n        if (tag_name in context['tags']):\n            raise GraphQLCompilationError(u'Cannot reuse tag name: {}'.format(tag_name))\n        validate_safe_string(tag_name)\n        context['tags'][tag_name] = {'location': location, 'optional': is_in_optional_scope(context), 'type': strip_non_null_from_type(current_schema_type)}\n        context['metadata'].record_tag_info(tag_name, TagInfo(location=location))\n    output_directive = unique_local_directives.get('output', None)\n    if output_directive:\n        output_name = output_directive.arguments[0].value.value\n        if (output_name in context['outputs']):\n            raise GraphQLCompilationError(u'Cannot reuse output name: {}, {}'.format(output_name, context))\n        validate_safe_string(output_name)\n        validate_output_name(output_name)\n        graphql_type = strip_non_null_from_type(current_schema_type)\n        if is_in_fold_scope(context):\n            set_fold_innermost_scope(context)\n            if (location.field != COUNT_META_FIELD_NAME):\n                graphql_type = GraphQLList(graphql_type)\n        context['outputs'][output_name] = {'location': location, 'optional': is_in_optional_scope(context), 'type': graphql_type, 'fold': context.get('fold', None)}", "docstring": "Process property directives at this AST node, updating the query context as appropriate.\n\nArgs:\nschema: GraphQL schema object, obtained from the graphql library\ncurrent_schema_type: GraphQLType, the schema type at the current location\nast: GraphQL AST node, obtained from the graphql library. Only for function signature\nuniformity at the moment -- it is currently not used.\nlocation: Location object representing the current location in the query\ncontext: dict, various per-compilation data (e.g. declared tags, whether the current block\nis optional, etc.). May be mutated in-place in this function!\nunique_local_directives: dict, directive name string -> directive object, containing\nunique directives present on the current AST node *only*", "source": "codesearchnet"}
{"code": "def range(self, start_date=None, stop_date=None, field=(lambda x: x.xfer)):\n    assert (start_date <= stop_date), 'Start date must be earlier than end date.'\n    out = Transactions()\n    for t in self.trans:\n        date = field(t)\n        if ((start_date is not None) and (not (date >= start_date))):\n            continue\n        if ((stop_date is not None) and (not (date <= stop_date))):\n            continue\n        out.append(t)\n    return out", "docstring": "Return a ``Transactions`` object in an inclusive date range.\n\nArgs:\nstart_date: A ``datetime.Date`` object that marks the inclusive\nstart date for the range.\n\nstop_date: A ``datetime.Date`` object that marks the inclusive end\ndate for the range.\n\nfield: The field to compare start and end dates to. Default is the\n``xfer`` field.\n\nReturns:\nA ``Transactions`` object.", "source": "codesearchnet"}
{"code": "def __init__(self, path_segment_index):\n    \n    super(PathFilterScanTreeNode, self).__init__()\n    self._path_segments = {}\n    self.default_value = None\n    self.parent = None\n    self.path_segment_index = path_segment_index", "docstring": "Initializes a path filter scan tree node.\n\nArgs:\npath_segment_index: an integer containing the path segment index.", "source": "juraj-google-style"}
{"code": "def create_projection(self, fov: float=75.0, near: float=1.0, far: float=100.0, aspect_ratio: float=None):\n    return matrix44.create_perspective_projection_matrix(fov, (aspect_ratio or self.window.aspect_ratio), near, far, dtype='f4')", "docstring": "Create a projection matrix with the following parameters.\nWhen ``aspect_ratio`` is not provided the configured aspect\nratio for the window will be used.\n\nArgs:\nfov (float): Field of view (float)\nnear (float): Camera near value\nfar (float): Camrea far value\n\nKeyword Args:\naspect_ratio (float): Aspect ratio of the viewport\n\nReturns:\nThe projection matrix as a float32 :py:class:`numpy.array`", "source": "codesearchnet"}
{"code": "def retrieve_metar(station_icao) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]:\n    \n    url = _BASE_METAR_URL.format(station=station_icao)\n    with requests.get(url) as resp:\n        if not resp.ok:\n            return f'unable to obtain METAR for station {station_icao}\\n' \\\n                   f'Got to \"http:\n                   f'for a list of valid stations', None\n        return None, resp.content.decode().split('\\n')[1]", "docstring": "Retrieves a METAR string from an online database\n\nArgs:\nstation_icao: ICAO of the station\n\nReturns:\ntuple of error, metar_str", "source": "juraj-google-style"}
{"code": "def validate_config(config, required_keys, optional_keys=None):\n    if (optional_keys is None):\n        optional_keys = []\n    if (not isinstance(config, dict)):\n        raise Exception('config is not dict type')\n    invalid_keys = (set(config) - set((required_keys + optional_keys)))\n    if (len(invalid_keys) > 0):\n        raise Exception(('Invalid config with unexpected keys \"%s\"' % ', '.join((e for e in invalid_keys))))\n    missing_keys = (set(required_keys) - set(config))\n    if (len(missing_keys) > 0):\n        raise Exception(('Invalid config with missing keys \"%s\"' % ', '.join(missing_keys)))", "docstring": "Validate a config dictionary to make sure it includes all required keys\nand does not include any unexpected keys.\n\nArgs:\nconfig: the config to validate.\nrequired_keys: the names of the keys that the config must have.\noptional_keys: the names of the keys that the config can have.\n\nRaises:\nException if the config is not a dict or invalid.", "source": "codesearchnet"}
{"code": "def Embed(variables, verbose=False):\n    print(_AvailableString(variables, verbose))\n    try:\n        _EmbedIPython(variables)\n    except ImportError:\n        _EmbedCode(variables)", "docstring": "Drops into a Python REPL with variables available as local variables.\n\nArgs:\nvariables: A dict of variables to make available. Keys are variable names.\nValues are variable values.\nverbose: Whether to include 'hidden' members, those keys starting with _.", "source": "github-repos"}
{"code": "def __init__(self, callbacks=None, add_history=False, add_progbar=False, model=None, **params):\n    self.callbacks = nest.flatten(callbacks) if callbacks else []\n    self._add_default_callbacks(add_history, add_progbar)\n    if model:\n        self.set_model(model)\n    if params:\n        self.set_params(params)\n    self._supports_tf_logs = all((getattr(cb, '_supports_tf_logs', False) for cb in self.callbacks))\n    self._batch_hooks_support_tf_logs = all((getattr(cb, '_supports_tf_logs', False) for cb in self.callbacks if cb._implements_train_batch_hooks() or cb._implements_test_batch_hooks() or cb._implements_predict_batch_hooks()))\n    self._should_call_train_batch_hooks = any((cb._implements_train_batch_hooks() for cb in self.callbacks))\n    self._should_call_test_batch_hooks = any((cb._implements_test_batch_hooks() for cb in self.callbacks))\n    self._should_call_predict_batch_hooks = any((cb._implements_predict_batch_hooks() for cb in self.callbacks))\n    self._disallow_batch_hooks_in_ps_strategy()\n    self._check_timing = any((cbk.__class__.__name__ not in globals() for cbk in self.callbacks))\n    self._num_batches_for_timing_check = 5\n    self._hook_times = {}\n    self._batch_start_time = None\n    self._batch_times = []", "docstring": "Container for `Callback` instances.\n\nThis object wraps a list of `Callback` instances, making it possible\nto call them all at once via a single endpoint\n(e.g. `callback_list.on_epoch_end(...)`).\n\nArgs:\ncallbacks: List of `Callback` instances.\nadd_history: Whether a `History` callback should be added, if one does not\nalready exist in the `callbacks` list.\nadd_progbar: Whether a `ProgbarLogger` callback should be added, if one\ndoes not already exist in the `callbacks` list.\nmodel: The `Model` these callbacks are used with.\n**params: If provided, parameters will be passed to each `Callback` via\n`Callback.set_params`.", "source": "github-repos"}
{"code": "async def change_votes(self, player1_votes: int=None, player2_votes: int=None, add: bool=False):\n    assert_or_raise(((player1_votes is not None) or (player2_votes is not None)), ValueError, 'One of the votes must not be None')\n    if add:\n        res = (await self.connection('GET', 'tournaments/{}/matches/{}'.format(self._tournament_id, self._id)))\n        self._refresh_from_json(res)\n        if (player1_votes is not None):\n            player1_votes += (self._player1_votes or 0)\n        if (player2_votes is not None):\n            player2_votes += (self._player2_votes or 0)\n    params = {}\n    if (player1_votes is not None):\n        params.update({'player1_votes': player1_votes})\n    if (player2_votes is not None):\n        params.update({'player2_votes': player2_votes})\n    res = (await self.connection('PUT', 'tournaments/{}/matches/{}'.format(self._tournament_id, self._id), 'match', **params))\n    self._refresh_from_json(res)", "docstring": "change the votes for either player\n\n|methcoro|\nThe votes will be overriden by default,\nIf `add` is set to True, another API request call will be made to ensure the local is up to date with\nthe Challonge server. Then the votes given in argument will be added to those on the server\n\nArgs:\nplayer1_votes: if set, the player 1 votes will be changed to this value, or added to the current value if `add` is set\nplayer1_votes: if set, the player 2 votes will be changed to this value, or added to the current value if `add` is set\nadd: if set, votes in parameters will be added instead of overriden\n\nRaises:\nValueError: one of the votes arguments must not be None\nAPIException", "source": "codesearchnet"}
{"code": "def snow_depth(self, value=999.0):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `snow_depth`'.format(value))\n\n        self._snow_depth = value", "docstring": "Corresponds to IDD Field `snow_depth`\n\nArgs:\nvalue (float): value for IDD Field `snow_depth`\nUnit: cm\nMissing value: 999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def format_tensor(tensor, tensor_label, include_metadata=False, auxiliary_message=None, include_numeric_summary=False, np_printoptions=None, highlight_options=None):\n    lines = []\n    font_attr_segs = {}\n    if tensor_label is not None:\n        lines.append('Tensor \"%s\":' % tensor_label)\n        suffix = tensor_label.split(':')[-1]\n        if suffix.isdigit():\n            font_attr_segs[0] = [(8, 8 + len(tensor_label), 'bold')]\n        else:\n            debug_op_len = len(suffix)\n            proper_len = len(tensor_label) - debug_op_len - 1\n            font_attr_segs[0] = [(8, 8 + proper_len, 'bold'), (8 + proper_len + 1, 8 + proper_len + 1 + debug_op_len, 'yellow')]\n    if isinstance(tensor, debug_data.InconvertibleTensorProto):\n        if lines:\n            lines.append('')\n        lines.extend(str(tensor).split('\\n'))\n        return debugger_cli_common.RichTextLines(lines)\n    elif not isinstance(tensor, np.ndarray):\n        if lines:\n            lines.append('')\n        lines.extend(repr(tensor).split('\\n'))\n        return debugger_cli_common.RichTextLines(lines)\n    if include_metadata:\n        lines.append('  dtype: %s' % str(tensor.dtype))\n        lines.append('  shape: %s' % str(tensor.shape).replace('L', ''))\n    if lines:\n        lines.append('')\n    formatted = debugger_cli_common.RichTextLines(lines, font_attr_segs=font_attr_segs)\n    if auxiliary_message:\n        formatted.extend(auxiliary_message)\n    if include_numeric_summary:\n        formatted.append('Numeric summary:')\n        formatted.extend(numeric_summary(tensor))\n        formatted.append('')\n    if np_printoptions is not None:\n        np.set_printoptions(**np_printoptions)\n    array_lines = repr(tensor).split('\\n')\n    if tensor.dtype.type is not np.bytes_:\n        annotations = _annotate_ndarray_lines(array_lines, tensor, np_printoptions=np_printoptions)\n    else:\n        annotations = None\n    formatted_array = debugger_cli_common.RichTextLines(array_lines, annotations=annotations)\n    formatted.extend(formatted_array)\n    if highlight_options is not None:\n        indices_list = list(np.argwhere(highlight_options.criterion(tensor)))\n        total_elements = np.size(tensor)\n        highlight_summary = 'Highlighted%s: %d of %d element(s) (%.2f%%)' % ('(%s)' % highlight_options.description if highlight_options.description else '', len(indices_list), total_elements, len(indices_list) / float(total_elements) * 100.0)\n        formatted.lines[0] += ' ' + highlight_summary\n        if indices_list:\n            indices_list = [list(indices) for indices in indices_list]\n            are_omitted, rows, start_cols, end_cols = locate_tensor_element(formatted, indices_list)\n            for is_omitted, row, start_col, end_col in zip(are_omitted, rows, start_cols, end_cols):\n                if is_omitted or start_col is None or end_col is None:\n                    continue\n                if row in formatted.font_attr_segs:\n                    formatted.font_attr_segs[row].append((start_col, end_col, highlight_options.font_attr))\n                else:\n                    formatted.font_attr_segs[row] = [(start_col, end_col, highlight_options.font_attr)]\n    return formatted", "docstring": "Generate a RichTextLines object showing a tensor in formatted style.\n\nArgs:\ntensor: The tensor to be displayed, as a numpy ndarray or other\nappropriate format (e.g., None representing uninitialized tensors).\ntensor_label: A label for the tensor, as a string. If set to None, will\nsuppress the tensor name line in the return value.\ninclude_metadata: Whether metadata such as dtype and shape are to be\nincluded in the formatted text.\nauxiliary_message: An auxiliary message to display under the tensor label,\ndtype and shape information lines.\ninclude_numeric_summary: Whether a text summary of the numeric values (if\napplicable) will be included.\nnp_printoptions: A dictionary of keyword arguments that are passed to a\ncall of np.set_printoptions() to set the text format for display numpy\nndarrays.\nhighlight_options: (HighlightOptions) options for highlighting elements\nof the tensor.\n\nReturns:\nA RichTextLines object. Its annotation field has line-by-line markups to\nindicate which indices in the array the first element of each line\ncorresponds to.", "source": "github-repos"}
{"code": "def load_library(lib, name=None, lib_cls=None):\n    \n    try:\n        if lib_cls:\n            return lib_cls(lib)\n        else:\n            return ctypes.CDLL(lib)\n    except Exception:\n        if name:\n            lib_msg = '%s (%s)' % (name, lib)\n        else:\n            lib_msg = lib\n\n        lib_msg += ' could not be loaded'\n\n        if sys.platform == 'cygwin':\n            lib_msg += ' in cygwin'\n        _LOGGER.error(lib_msg, exc_info=True)\n        return None", "docstring": "Loads a library. Catches and logs exceptions.\n\nReturns: the loaded library or None\n\narguments:\n* lib        -- path to/name of the library to be loaded\n* name       -- the library's identifier (for logging)\nDefaults to None.\n* lib_cls    -- library class. Defaults to None (-> ctypes.CDLL).", "source": "juraj-google-style"}
{"code": "def single_conv_dist(name, x, output_channels=None):\n  \n  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n    x_shape = common_layers.shape_list(x)\n    if output_channels is None:\n      output_channels = x_shape[-1]\n    mean_log_scale = conv(\"conv2d\", x, output_channels=2*output_channels,\n                          conv_init=\"zeros\", apply_actnorm=False)\n    mean = mean_log_scale[:, :, :, 0::2]\n    log_scale = mean_log_scale[:, :, :, 1::2]\n    return tf.distributions.Normal(mean, tf.exp(log_scale))", "docstring": "A 3x3 convolution mapping x to a standard normal distribution at init.\n\nArgs:\nname: variable scope.\nx: 4-D Tensor.\noutput_channels: number of channels of the mean and std.", "source": "juraj-google-style"}
{"code": "def make_persister(self, to_persist):\n        \n\n        if not self.meta_data:\n            raise Exception(\"Root not set. Can't create persister.\")\n\n        def persister(c, broker):\n            if c in to_persist:\n                self.dehydrate(c, broker)\n        return persister", "docstring": "Returns a function that hydrates components as they are evaluated. The\nfunction should be registered as an observer on a Broker just before\nexecution.\n\nArgs:\nto_persist (set): Set of components to persist. Skip everything\nelse.", "source": "juraj-google-style"}
{"code": "def _compute_gradients_wrt_embedding_table(self, gradient_wrt_activation, embedding_table, feature_indices, feature_values, combiner):\n    if combiner not in ('mean', 'sum'):\n        raise ValueError('`combiner` must be mean or sum; got {}.'.format(combiner))\n    grads_shape = gradient_wrt_activation.shape[:-1] + embedding_table.shape\n    grads = np.zeros(shape=grads_shape)\n    count = np.zeros(shape=grads_shape)\n    for feature_indice, vocabulary_id in zip(feature_indices, feature_values):\n        batch_index = tuple(feature_indice[:-1])\n        grads[batch_index][vocabulary_id] += gradient_wrt_activation[batch_index]\n        count[batch_index] += 1\n    count[count == 0] = 1\n    if combiner == 'mean':\n        grads = grads / count\n    return np.reshape(grads, (-1, *embedding_table.shape))", "docstring": "Compute gradients wrt embedding_table.\n\nArgs:\ngradient_wrt_activation: `np.array` with shape `batch_size` by embedding\n`dimension`.\nembedding_table: `np.array` with shape `vocabulary_size` by embedding\n`dimension`.\nfeature_indices: `indices` as used to construct `SparseTensor`.\nfeature_values: `values` as used to construct `SparseTensor`.\ncombiner: `String`, 'mean' or 'sum'.\n\nReturns:\nGradients wrt `embedding_table`, an `np.array`s with shape\n`batch_size` by `vocabulary_size` by\nembedding `dimension`.\n\nRaises:\nValueError: if `combiner` is not one of 'mean' or 'sum'.", "source": "github-repos"}
{"code": "def _normalize_batch_coordinates(self, inputs, original_sizes, is_bounding_box=False):\n    if len(original_sizes) != len(inputs):\n        return [self._normalize_coordinates(self.target_size, item, original_sizes[0], is_bounding_box=is_bounding_box) for item in inputs]\n    else:\n        return [self._normalize_coordinates(self.target_size, item, size, is_bounding_box=is_bounding_box) for item, size in zip(inputs, original_sizes)]", "docstring": "Normalize coordinates based on original sizes.\nArgs:\ninputs: List of coordinate arrays\noriginal_sizes: Original sizes of the images\nis_bounding_box: Whether inputs are bounding boxes\nReturns:\nNormalized coordinates as list", "source": "github-repos"}
{"code": "def trace_min_buffer_capacity(self):\n    cmd = enums.JLinkTraceCommand.GET_MIN_CAPACITY\n    data = ctypes.c_uint32(0)\n    res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n    if (res == 1):\n        raise errors.JLinkException('Failed to get min trace buffer size.')\n    return data.value", "docstring": "Retrieves the minimum capacity the trace buffer can be configured with.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\nThe minimum configurable capacity for the trace buffer.", "source": "codesearchnet"}
{"code": "def _get_type(points, soma_class):\n    assert (soma_class in (SOMA_CONTOUR, SOMA_CYLINDER))\n    npoints = len(points)\n    if (soma_class == SOMA_CONTOUR):\n        return {0: None, 1: SomaSinglePoint, 2: None}.get(npoints, SomaSimpleContour)\n    if ((npoints == 3) and (points[0][COLS.P] == (- 1)) and (points[1][COLS.P] == 1) and (points[2][COLS.P] == 1)):\n        L.warning('Using neuromorpho 3-Point soma')\n        return SomaNeuromorphoThreePointCylinders\n    return {0: None, 1: SomaSinglePoint}.get(npoints, SomaCylinders)", "docstring": "get the type of the soma\n\nArgs:\npoints: Soma points\nsoma_class(str): one of 'contour' or 'cylinder' to specify the type", "source": "codesearchnet"}
{"code": "def on_session_init(self, request):", "docstring": "Callback invoked during construction of the debug-wrapper session.\n\nThis is a blocking callback.\nThe invocation happens right before the constructor ends.\n\nArgs:\nrequest: (`OnSessionInitRequest`) callback request carrying information\nsuch as the session being wrapped.\n\nReturns:\nAn instance of `OnSessionInitResponse`.", "source": "github-repos"}
{"code": "def serialize_object_graph(self, saveables_cache=None):\n    named_saveable_objects, object_graph_proto, feed_additions, _ = save_util_v1.serialize_object_graph_with_registered_savers(self, saveables_cache)\n    return (named_saveable_objects, object_graph_proto, feed_additions)", "docstring": "Determine checkpoint keys for variables and build a serialized graph.\n\nNon-slot variables are keyed based on a shortest path from the root saveable\nto the object which owns the variable (i.e. the one which called\n`Trackable._add_variable` to create it).\n\nSlot variables are keyed based on a shortest path to the variable being\nslotted for, a shortest path to their optimizer, and the slot name.\n\nArgs:\nsaveables_cache: An optional cache storing previously created\nSaveableObjects created for each Trackable. Maps Trackables to a\ndictionary of attribute names to Trackable.\n\nReturns:\nA tuple of (named_variables, object_graph_proto, feed_additions):\nnamed_variables: A dictionary mapping names to variable objects.\nobject_graph_proto: A TrackableObjectGraph protocol buffer\ncontaining the serialized object graph and variable references.\nfeed_additions: A dictionary mapping from Tensors to values which should\nbe fed when saving.\n\nRaises:\nValueError: If there are invalid characters in an optimizer's slot names.", "source": "github-repos"}
{"code": "def no_gradient(op_type: str) -> None:\n    if not isinstance(op_type, str):\n        raise TypeError('op_type must be a string')\n    gradient_registry.register(None, op_type)", "docstring": "Specifies that ops of type `op_type` is not differentiable.\n\nThis function should *not* be used for operations that have a\nwell-defined gradient that is not yet implemented.\n\nThis function is only used when defining a new op type. It may be\nused for ops such as `tf.size()` that are not differentiable.  For\nexample:\n\n```python\ntf.no_gradient(\"Size\")\n```\n\nThe gradient computed for 'op_type' will then propagate zeros.\n\nFor ops that have a well-defined gradient but are not yet implemented,\nno declaration should be made, and an error *must* be thrown if\nan attempt to request its gradient is made.\n\nArgs:\nop_type: The string type of an operation. This corresponds to the\n`OpDef.name` field for the proto that defines the operation.\n\nRaises:\nTypeError: If `op_type` is not a string.", "source": "github-repos"}
{"code": "def _process_book(html_chunk):\n    \n    title, book_url = _parse_title_url(html_chunk)\n\n    \n    data = DOWNER.download(book_url)\n    dom = dhtmlparser.parseString(\n        handle_encodnig(data)\n    )\n    details = dom.find(\"div\", {\"id\": \"kniha_detail\"})[0]\n\n    \n    pub = Publication(\n        title=title,\n        authors=_parse_authors(html_chunk),\n        price=_parse_price(details),\n        publisher=\"CPress\"\n    )\n\n    \n    pub.optionals.URL = book_url\n    pub.optionals.EAN = _parse_ean(details)\n    pub.optionals.format = _parse_format(details)\n    pub.optionals.pub_date = _parse_date(details)\n    pub.optionals.description = _parse_description(details)\n\n    return pub", "docstring": "Parse available informations about book from the book details page.\n\nArgs:\nhtml_chunk (obj): HTMLElement containing slice of the page with details.\n\nReturns:\nobj: :class:`structures.Publication` instance with book details.", "source": "juraj-google-style"}
{"code": "def _check_docstring_quotes(self, quote_record):\n    (_, triple, row, col) = quote_record\n    if (triple != TRIPLE_QUOTE_OPTS.get(self.config.docstring_quote)):\n        self._invalid_docstring_quote(triple, row, col)", "docstring": "Check if the docstring quote from tokenization is valid.\n\nArgs:\nquote_record: a tuple containing the info about the string\nfrom tokenization, giving the (token, quote, row number).", "source": "codesearchnet"}
{"code": "def wait_till_change_set_complete(cfn_client, change_set_id, try_count=25, sleep_time=0.5, max_sleep=3):\n    complete = False\n    response = None\n    for i in range(try_count):\n        response = cfn_client.describe_change_set(ChangeSetName=change_set_id)\n        complete = (response['Status'] in ('FAILED', 'CREATE_COMPLETE'))\n        if complete:\n            break\n        if (sleep_time == max_sleep):\n            logger.debug('Still waiting on changeset for another %s seconds', sleep_time)\n        time.sleep(sleep_time)\n        sleep_time = min((sleep_time * 2), max_sleep)\n    if (not complete):\n        raise exceptions.ChangesetDidNotStabilize(change_set_id)\n    return response", "docstring": "Checks state of a changeset, returning when it is in a complete state.\n\nSince changesets can take a little bit of time to get into a complete\nstate, we need to poll it until it does so. This will try to get the\nstate `try_count` times, waiting `sleep_time` * 2 seconds between each try\nup to the `max_sleep` number of seconds. If, after that time, the changeset\nis not in a complete state it fails. These default settings will wait a\nlittle over one minute.\n\nArgs:\ncfn_client (:class:`botocore.client.CloudFormation`): Used to query\ncloudformation.\nchange_set_id (str): The unique changeset id to wait for.\ntry_count (int): Number of times to try the call.\nsleep_time (int): Time to sleep between attempts.\nmax_sleep (int): Max time to sleep during backoff\n\nReturn:\ndict: The response from cloudformation for the describe_change_set\ncall.", "source": "codesearchnet"}
{"code": "def unlock(self, passphrase, encrypted_seed=None):\n    wallet = self.resource\n    if (not encrypted_seed):\n        encrypted_seed = wallet.primary_private_seed\n    try:\n        if encrypted_seed['nonce']:\n            primary_seed = NaclPassphraseBox.decrypt(passphrase, encrypted_seed)\n        else:\n            primary_seed = PassphraseBox.decrypt(passphrase, encrypted_seed)\n    except:\n        raise InvalidPassphraseError()\n    self.multi_wallet = MultiWallet(private_seeds={'primary': primary_seed}, public={'cosigner': wallet.cosigner_public_seed, 'backup': wallet.backup_public_seed})\n    return self", "docstring": "Unlock the Wallet by decrypting the primary_private_seed with the\nsupplied passphrase. Once unlocked, the private seed is accessible in\nmemory and calls to `account.pay` will succeed. This is a necessary step\nfor creating transactions.\n\nArgs:\npassphrase (str): The passphrase the User used to encrypt this wallet.\nencrypted_seed (dict): A dictionary of the form\n{'ciphertext': longhexvalue,\n'iterations': integer of pbkdf2 derivations,\n'nonce': 24-byte hex value\n'salt': 16-byte hex value}\nthis dict represents an private seed (not a master key) encrypted\nwith the `passphrase` using pbkdf2. You can obtain this value with\nwallet.generate. If this value is supplied, it overwrites (locally\nonly) the encrypted primary_private_seed value, allowing you to load\nin a primary key that you didn't store with Gem. Note that the key\nMUST match the pubkey that this wallet was created with.\nReturns:\nself", "source": "codesearchnet"}
{"code": "def parse_flux_bounds(entry):\n    lower_bound = None\n    upper_bound = None\n    for parameter in entry.kinetic_law_reaction_parameters:\n        (pid, name, value, units) = parameter\n        if ((pid == 'UPPER_BOUND') or (name == 'UPPER_BOUND')):\n            upper_bound = value\n        elif ((pid == 'LOWER_BOUND') or (name == 'LOWER_BOUND')):\n            lower_bound = value\n    return (lower_bound, upper_bound)", "docstring": "Return flux bounds for reaction entry.\n\nDetect flux bounds that are specified using the non-standardized\nkinetic law parameters which are used by many pre-FBC SBML models. The\nflux bounds are returned as a pair of lower, upper bounds. The returned\nbound is None if undefined.\n\nArgs:\nentry: :class:`SBMLReactionEntry`.", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context):\n    \n    super(OSFile, self).__init__(resolver_context)\n    self._file_object = None\n    self._size = 0", "docstring": "Initializes a file-like object.\n\nArgs:\nresolver_context (Context): resolver context.", "source": "juraj-google-style"}
{"code": "class PatchTSMixerChannelFeatureMixerBlock(nn.Module):\n\n    def __init__(self, config: PatchTSMixerConfig):\n        super().__init__()\n        self.norm = PatchTSMixerNormLayer(config)\n        self.gated_attn = config.gated_attn\n        self.mlp = PatchTSMixerMLP(in_features=config.num_input_channels, out_features=config.num_input_channels, config=config)\n        if config.gated_attn:\n            self.gating_block = PatchTSMixerGatedAttention(in_size=config.num_input_channels, out_size=config.num_input_channels)\n\n    def forward(self, inputs: torch.Tensor):\n        \n        residual = inputs\n        inputs = self.norm(inputs)\n        inputs = inputs.permute(0, 3, 2, 1)\n        if self.gated_attn:\n            inputs = self.gating_block(inputs)\n        inputs = self.mlp(inputs)\n        inputs = inputs.permute(0, 3, 2, 1)\n        out = inputs + residual\n        return out", "docstring": "This module mixes the features in the channel dimension.\n\nArgs:\nconfig (`PatchTSMixerConfig`):\nConfiguration.", "source": "github-repos"}
{"code": "def flatten(input_layer, preserve_batch=True):\n    if preserve_batch:\n        return reshape(input_layer, [DIM_SAME, (- 1)])\n    else:\n        return reshape(input_layer, [(- 1)])", "docstring": "Flattens this.\n\nIf preserve_batch is True, the result is rank 2 and the first dim (batch) is\nunchanged. Otherwise the result is rank 1.\n\nArgs:\ninput_layer: The Pretty Tensor object, supplied.\npreserve_batch: If True (the default), then preserve the first dimension.\nReturns:\nA LayerWrapper with the flattened tensor.", "source": "codesearchnet"}
{"code": "def to_diff_dict(self) -> Dict[str, Any]:\n    config_dict = self.to_dict()\n    default_config_dict = HqqConfig().to_dict()\n    serializable_config_dict = {}\n    for key, value in config_dict.items():\n        if value != default_config_dict[key]:\n            serializable_config_dict[key] = value\n    return serializable_config_dict", "docstring": "Removes all attributes from config which correspond to the default config attributes for better readability and\nserializes to a Python dictionary.\nReturns:\n`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,", "source": "github-repos"}
{"code": "def gaussian_square(times: np.ndarray, amp: complex, center: float, width: float, sigma: float, zeroed_width: Union[(None, float)]=None) -> np.ndarray:\n    square_start = (center - (width / 2))\n    square_stop = (center + (width / 2))\n    if zeroed_width:\n        zeroed_width = min(width, zeroed_width)\n        gauss_zeroed_width = (zeroed_width - width)\n    else:\n        gauss_zeroed_width = None\n    funclist = [functools.partial(gaussian, amp=amp, center=square_start, sigma=sigma, zeroed_width=gauss_zeroed_width, rescale_amp=True), functools.partial(gaussian, amp=amp, center=square_stop, sigma=sigma, zeroed_width=gauss_zeroed_width, rescale_amp=True), functools.partial(constant, amp=amp)]\n    condlist = [(times <= square_start), (times >= square_stop)]\n    return np.piecewise(times.astype(np.complex_), condlist, funclist)", "docstring": "r\"\"\"Continuous gaussian square pulse.\n\nArgs:\ntimes: Times to output pulse for.\namp: Pulse amplitude.\ncenter: Center of the square pulse component.\nwidth: Width of the square pulse component.\nsigma: Width (standard deviation) of gaussian rise/fall portion of the pulse.\nzeroed_width: Subtract baseline of gaussian square pulse\nto enforce $\\OmegaSquare(center \\pm zeroed_width/2)=0$.", "source": "codesearchnet"}
{"code": "def get_all_nn_info(self, structure):\n    return [self.get_nn_info(structure, n) for n in range(len(structure))]", "docstring": "Get a listing of all neighbors for all sites in a structure\n\nArgs:\nstructure (Structure): Input structure\nReturn:\nList of NN site information for each site in the structure. Each\nentry has the same format as `get_nn_info`", "source": "codesearchnet"}
{"code": "def peak_signal_to_noise_ratio(true, pred):\n    return ((10.0 * tf.log((1.0 / mean_squared_error(true, pred)))) / tf.log(10.0))", "docstring": "Image quality metric based on maximal signal power vs. power of the noise.\n\nArgs:\ntrue: the ground truth image.\npred: the predicted image.\nReturns:\npeak signal to noise ratio (PSNR)", "source": "codesearchnet"}
{"code": "def verify_directory(directory_name, directory_location, directory_create=False):\n    \n    if not directory_create:\n        return __os.path.exists(__os.path.join(directory_location, directory_name))\n    elif directory_create:\n        good = __os.path.exists(__os.path.join(directory_location, directory_name))\n        if not good:\n            __os.mkdir(__os.path.join(directory_location, directory_name))", "docstring": "Function to verify if a directory exists\nArgs:\ndirectory_name: The name of directory to check\ndirectory_location: The location of the directory, derive from the os module\ndirectory_create: If you want to create the directory\n\nReturns: returns boolean True or False, but if you set directory_create to True it will create the directory", "source": "juraj-google-style"}
{"code": "def point_dist2(p1, p2):\n    v = vector(p1, p2)\n    return np.dot(v, v)", "docstring": "compute the square of the euclidian distance between two 3D points\n\nArgs:\np1, p2: indexable objects with\nindices 0, 1, 2 corresponding to 3D cartesian coordinates.\nReturns:\nThe square of the euclidian distance between the points.", "source": "codesearchnet"}
{"code": "def get_var(environ_cp, var_name, query_item, enabled_by_default, question=None, yes_reply=None, no_reply=None):\n    if not question:\n        question = 'Do you wish to build TensorFlow with {} support?'.format(query_item)\n    if not yes_reply:\n        yes_reply = '{} support will be enabled for TensorFlow.'.format(query_item)\n    if not no_reply:\n        no_reply = 'No {}'.format(yes_reply)\n    yes_reply += '\\n'\n    no_reply += '\\n'\n    if enabled_by_default:\n        question += ' [Y/n]: '\n    else:\n        question += ' [y/N]: '\n    var = environ_cp.get(var_name)\n    if var is not None:\n        var_content = var.strip().lower()\n        true_strings = ('1', 't', 'true', 'y', 'yes')\n        false_strings = ('0', 'f', 'false', 'n', 'no')\n        if var_content in true_strings:\n            var = True\n        elif var_content in false_strings:\n            var = False\n        else:\n            raise UserInputError('Environment variable %s must be set as a boolean indicator.\\nThe following are accepted as TRUE : %s.\\nThe following are accepted as FALSE: %s.\\nCurrent value is %s.' % (var_name, ', '.join(true_strings), ', '.join(false_strings), var))\n    while var is None:\n        user_input_origin = get_input(question)\n        user_input = user_input_origin.strip().lower()\n        if user_input == 'y':\n            print(yes_reply)\n            var = True\n        elif user_input == 'n':\n            print(no_reply)\n            var = False\n        elif not user_input:\n            if enabled_by_default:\n                print(yes_reply)\n                var = True\n            else:\n                print(no_reply)\n                var = False\n        else:\n            print('Invalid selection: {}'.format(user_input_origin))\n    return var", "docstring": "Get boolean input from user.\n\nIf var_name is not set in env, ask user to enable query_item or not. If the\nresponse is empty, use the default.\n\nArgs:\nenviron_cp: copy of the os.environ.\nvar_name: string for name of environment variable, e.g. \"TF_NEED_CUDA\".\nquery_item: string for feature related to the variable, e.g. \"CUDA for\nNvidia GPUs\".\nenabled_by_default: boolean for default behavior.\nquestion: optional string for how to ask for user input.\nyes_reply: optional string for reply when feature is enabled.\nno_reply: optional string for reply when feature is disabled.\n\nReturns:\nboolean value of the variable.\n\nRaises:\nUserInputError: if an environment variable is set, but it cannot be\ninterpreted as a boolean indicator, assume that the user has made a\nscripting error, and will continue to provide invalid input.\nRaise the error to avoid infinitely looping.", "source": "github-repos"}
{"code": "def get_variant_genotypes(self, variant):\n        \n        \n        try:\n            plink_chrom = CHROM_STR_TO_INT[variant.chrom.name]\n        except KeyError:\n            raise ValueError(\n                \"Invalid chromosome ('{}') for Plink.\".format(variant.chrom)\n            )\n\n        info = self.bim.loc[\n            (self.bim.chrom == plink_chrom) &\n            (self.bim.pos == variant.pos), :\n        ]\n\n        if info.shape[0] == 0:\n            logging.variant_not_found(variant)\n            return []\n\n        elif info.shape[0] == 1:\n            return self._get_biallelic_variant(variant, info)\n\n        else:\n            return self._get_multialleic_variant(variant, info)", "docstring": "Get the genotypes from a well formed variant instance.\n\nArgs:\nmarker (Variant): A Variant instance.\n\nReturns:\nA list of Genotypes instance containing a pointer to the variant as\nwell as a vector of encoded genotypes.\n\nNote\n====\nIf the sample IDs are not unique, the index is changed to be the\nsample family ID and individual ID (i.e. fid_iid).", "source": "juraj-google-style"}
{"code": "def _ParseRecord(self, parser_mediator, file_object):\n    \n    header_record_offset = file_object.tell()\n\n    \n    \n    token_type = self._ParseTokenType(file_object, header_record_offset)\n    if token_type not in self._HEADER_TOKEN_TYPES:\n      raise errors.ParseError(\n          'Unsupported header token type: 0x{0:02x}'.format(token_type))\n\n    token_type, token_data = self._ParseToken(file_object, header_record_offset)\n\n    if token_data.format_version != 11:\n      raise errors.ParseError('Unsupported format version type: {0:d}'.format(\n          token_data.format_version))\n\n    timestamp = token_data.microseconds + (\n        token_data.timestamp * definitions.MICROSECONDS_PER_SECOND)\n\n    event_type = token_data.event_type\n    header_record_size = token_data.record_size\n    record_end_offset = header_record_offset + header_record_size\n\n    event_tokens = []\n    return_token_values = None\n\n    file_offset = file_object.tell()\n    while file_offset < record_end_offset:\n      token_type, token_data = self._ParseToken(file_object, file_offset)\n      if not token_data:\n        raise errors.ParseError('Unsupported token type: 0x{0:02x}'.format(\n            token_type))\n\n      file_offset = file_object.tell()\n\n      if token_type == self._TOKEN_TYPE_AUT_TRAILER:\n        break\n\n      token_type_string = self._TOKEN_TYPES.get(token_type, 'UNKNOWN')\n      token_values = self._FormatTokenData(token_type, token_data)\n      event_tokens.append({token_type_string: token_values})\n\n      if token_type in (\n          self._TOKEN_TYPE_AUT_RETURN32, self._TOKEN_TYPE_AUT_RETURN64):\n        return_token_values = token_values\n\n    if token_data.signature != self._TRAILER_TOKEN_SIGNATURE:\n      raise errors.ParseError('Unsupported signature in trailer token.')\n\n    if token_data.record_size != header_record_size:\n      raise errors.ParseError(\n          'Mismatch of event record size between header and trailer token.')\n\n    event_data = BSMEventData()\n    event_data.event_type = event_type\n    event_data.extra_tokens = event_tokens\n    event_data.offset = header_record_offset\n    event_data.record_length = header_record_size\n    event_data.return_value = return_token_values\n\n    date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n        timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_CREATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an event record.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nParseError: if the event record cannot be read.", "source": "juraj-google-style"}
{"code": "def __init__(self, timestamp=None):\n    \n    super(CocoaTime, self).__init__()\n    self._precision = definitions.PRECISION_1_SECOND\n    self._timestamp = timestamp", "docstring": "Initializes a Cocoa timestamp.\n\nArgs:\ntimestamp (Optional[float]): Cocoa timestamp.", "source": "juraj-google-style"}
{"code": "def animate(func: types.AnyFunction = None,\n            *,\n            animation: types.AnimationGenerator = _default_animation(),\n            step: float = 0.1) -> types.AnyFunction:\n    \n    if callable(func):\n        return _animate_no_kwargs(func, animation, step)\n    elif func is None:\n        return _animate_with_kwargs(animation_gen=animation, step=step)\n    else:\n        raise TypeError(\"argument 'func' must either be None or callable\")", "docstring": "Wrapper function for the _Animate wrapper class.\n\nArgs:\nfunc: A function to run while animation is showing.\nanimation: An AnimationGenerator that yields animation frames.\nstep: Approximate timestep (in seconds) between frames.\nReturns:\nAn animated version of func if func is not None. Otherwise, a function\nthat takes a function and returns an animated version of that.", "source": "juraj-google-style"}
{"code": "def num_memory_zones(self):\n        \n        count = self._dll.JLINK_GetMemZones(0, 0)\n        if count < 0:\n            raise errors.JLinkException(count)\n        return count", "docstring": "Returns the number of memory zones supported by the target.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nAn integer count of the number of memory zones supported by the\ntarget.\n\nRaises:\nJLinkException: on error.", "source": "juraj-google-style"}
{"code": "def run_census(flags_obj, ctx):\n    train_file = os.path.join(flags_obj.data_dir, census_dataset.TRAINING_FILE)\n    test_file = os.path.join(flags_obj.data_dir, census_dataset.EVAL_FILE)\n\n    def train_input_fn():\n        return census_dataset.input_fn(train_file, flags_obj.epochs_between_evals, True, flags_obj.batch_size)\n\n    def eval_input_fn():\n        return census_dataset.input_fn(test_file, 1, False, flags_obj.batch_size)\n    tensors_to_log = {'average_loss': '{loss_prefix}head/truediv', 'loss': '{loss_prefix}head/weighted_loss/Sum'}\n    model_helpers.apply_clean(flags.FLAGS)\n    model = build_estimator(model_dir=flags_obj.model_dir, model_type=flags_obj.model_type, model_column_fn=census_dataset.build_model_columns, inter_op=flags_obj.inter_op_parallelism_threads, intra_op=flags_obj.intra_op_parallelism_threads, ctx=ctx)\n    loss_prefix = LOSS_PREFIX.get(flags_obj.model_type, '')\n    tensors_to_log = {k: v.format(loss_prefix=loss_prefix) for (k, v) in tensors_to_log.items()}\n    train_hooks = hooks_helper.get_train_hooks(flags_obj.hooks, model_dir=flags_obj.model_dir, batch_size=flags_obj.batch_size, tensors_to_log=tensors_to_log)\n    train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, hooks=train_hooks)\n    eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn)\n    tf.estimator.train_and_evaluate(model, train_spec, eval_spec)", "docstring": "Construct all necessary functions and call run_loop.\n\nArgs:\nflags_obj: Object containing user specified flags.", "source": "codesearchnet"}
{"code": "def sort_recursive(data):\n    \n\n    newdict = {}\n\n    for i in data.items():\n        if type(i[1]) is dict:\n            newdict[i[0]] = sort_recursive(i[1])\n        else:\n            newdict[i[0]] = i[1]\n\n    return OrderedDict(sorted(newdict.items(), key=lambda item: (compare_type(type(item[1])), item[0])))", "docstring": "Recursively sorts all elements in a dictionary\n\nArgs:\ndata (dict): The dictionary to sort\n\nReturns:\nsorted_dict (OrderedDict): The sorted data dict", "source": "juraj-google-style"}
{"code": "def _process_string_token(self, token, start_row, start_col):\n        \n        for i, char in enumerate(token):\n            if char in QUOTES:\n                break\n\n        \n        \n        norm_quote = token[i:]\n\n        \n        if len(norm_quote) >= 3 and norm_quote[:3] in TRIPLE_QUOTE_OPTS.values():\n            self._tokenized_triple_quotes[start_row] = (token, norm_quote[:3], start_row, start_col)\n            return\n\n        \n\n        preferred_quote = SMART_QUOTE_OPTS.get(self.config.string_quote)\n\n        \n        if self.config.string_quote in SMART_CONFIG_OPTS:\n            other_quote = next(q for q in QUOTES if q != preferred_quote)\n            \n            if preferred_quote in token[i + 1:-1] and other_quote not in token[i + 1:-1]:\n                preferred_quote = other_quote\n\n        if norm_quote[0] != preferred_quote:\n            self._invalid_string_quote(\n                quote=norm_quote[0],\n                row=start_row,\n                correct_quote=preferred_quote,\n                col=start_col,\n            )", "docstring": "Internal method for identifying and checking string tokens\nfrom the token stream.\n\nArgs:\ntoken: the token to check.\nstart_row: the line on which the token was found.\nstart_col: the column on which the token was found.", "source": "juraj-google-style"}
{"code": "def before_starting_server(self):\n    self._validate_snippet_app_on_device()\n    self._disable_hidden_api_blocklist()", "docstring": "Performs the preparation steps before starting the remote server.\n\nThis function performs following preparation steps:\n* Validate that the Mobly Snippet app is available on the device.\n* Disable hidden api blocklist if necessary and possible.\n\nRaises:\nerrors.ServerStartPreCheckError: if the server app is not installed\nfor the current user.", "source": "github-repos"}
{"code": "def get(self, blob):\n        \n        return self._send(\n            url=self._base_url + blob.parent.server_id + '/' + blob.server_id + '?s=0',\n            method='GET',\n            allow_redirects=False\n        ).headers.get('Location')", "docstring": "Get the canonical link to a media blob.\n\nArgs:\nblob (gkeepapi.node.Blob): The blob.\n\nReturns:\nstr: A link to the media.", "source": "juraj-google-style"}
{"code": "def _read_variable_op(self, no_copy=False):\n    variable_accessed(self)\n    self._variable_read = True\n\n    def read_and_set_handle(no_copy):\n        if no_copy and forward_compat.forward_compatible(2022, 5, 3):\n            gen_resource_variable_ops.disable_copy_on_read(self.handle)\n        result = gen_resource_variable_ops.read_variable_op(self.handle, self._dtype)\n        _maybe_set_handle_data(self._dtype, self.handle, result)\n        return result\n    if getattr(self, '_caching_device', None) is not None:\n        with ops.colocate_with(None, ignore_existing=True):\n            with ops.device(self._caching_device):\n                result = read_and_set_handle(no_copy)\n    else:\n        result = read_and_set_handle(no_copy)\n    if not context.executing_eagerly():\n        record.record_operation('ReadVariableOp', [result], [self.handle], backward_function=lambda x: [x], forward_function=lambda x: [x])\n    if context.xla_sharding_for_resource_variables_enabled() and (not context.executing_eagerly()) and (self._xla_sharding is not None):\n        sharding_string = self._xla_sharding.SerializeToString()\n        with ops.colocate_with(result):\n            result = gen_xla_ops.xla_sharding(result, sharding=sharding_string)\n        result.op._set_attr('_XlaSharding', attr_value_pb2.AttrValue(s=sharding_string))\n    return result", "docstring": "Reads the value of the variable.\n\nIf the variable is in copy-on-read mode and `no_copy` is True, the variable\nis converted to copy-on-write mode before it is read.\n\nArgs:\nno_copy: Whether to prevent a copy of the variable.\n\nReturns:\nThe value of the variable.", "source": "github-repos"}
{"code": "def __init__(self, context=None, queue=None):\n        \n        self._context = context or TelemetryContext()\n        self._queue = queue or SynchronousQueue(SynchronousSender())", "docstring": "Initializes a new instance of the class.\n\nArgs:\ncontext (:class:`TelemetryContext') the telemetry context to use when sending telemetry data.\\n\nqueue (:class:`QueueBase`) the queue to enqueue the resulting :class:`contracts.Envelope` to.", "source": "juraj-google-style"}
{"code": "def enable_logging(main):\n    \n    @functools.wraps(main)\n    def wrapper(*args, **kwargs):\n        import argparse\n        parser = argparse.ArgumentParser()\n\n        parser.add_argument(\n            '--loglevel', default=\"ERROR\", type=str,\n            help=\"Set the loglevel. Possible values: CRITICAL, ERROR (default),\"\n                 \"WARNING, INFO, DEBUG\")\n\n        options = parser.parse_args()\n\n        \n        \n        \n        \n        numeric_level = getattr(logging, options.loglevel.upper(), None)\n        if not isinstance(numeric_level, int):\n            raise ValueError('Invalid log level: %s' % options.loglevel)\n        logging.basicConfig(level=numeric_level)\n\n        retcode = main(*args, **kwargs)\n        return retcode\n\n    return wrapper", "docstring": "This decorator is used to decorate main functions.\nIt adds the initialization of the logger and an argument parser that allows\none to select the loglevel.\nUseful if we are writing simple main functions that call libraries where\nthe logging module is used\n\nArgs:\nmain:\nmain function.", "source": "juraj-google-style"}
{"code": "def normalize_in_place(sysmeta_pyxb, reset_timestamps=False):\n    if (sysmeta_pyxb.accessPolicy is not None):\n        sysmeta_pyxb.accessPolicy = d1_common.wrap.access_policy.get_normalized_pyxb(sysmeta_pyxb.accessPolicy)\n    if getattr(sysmeta_pyxb, 'mediaType', False):\n        d1_common.xml.sort_value_list_pyxb(sysmeta_pyxb.mediaType.property_)\n    if getattr(sysmeta_pyxb, 'replicationPolicy', False):\n        d1_common.xml.sort_value_list_pyxb(sysmeta_pyxb.replicationPolicy.preferredMemberNode)\n        d1_common.xml.sort_value_list_pyxb(sysmeta_pyxb.replicationPolicy.blockedMemberNode)\n    d1_common.xml.sort_elements_by_child_values(sysmeta_pyxb.replica, ['replicaVerified', 'replicaMemberNode', 'replicationStatus'])\n    sysmeta_pyxb.archived = bool(sysmeta_pyxb.archived)\n    if reset_timestamps:\n        epoch_dt = datetime.datetime(1970, 1, 1, tzinfo=d1_common.date_time.UTC())\n        sysmeta_pyxb.dateUploaded = epoch_dt\n        sysmeta_pyxb.dateSysMetadataModified = epoch_dt\n        for replica_pyxb in getattr(sysmeta_pyxb, 'replica', []):\n            replica_pyxb.replicaVerified = epoch_dt\n    else:\n        sysmeta_pyxb.dateUploaded = d1_common.date_time.round_to_nearest(sysmeta_pyxb.dateUploaded)\n        sysmeta_pyxb.dateSysMetadataModified = d1_common.date_time.round_to_nearest(sysmeta_pyxb.dateSysMetadataModified)\n        for replica_pyxb in getattr(sysmeta_pyxb, 'replica', []):\n            replica_pyxb.replicaVerified = d1_common.date_time.round_to_nearest(replica_pyxb.replicaVerified)", "docstring": "Normalize SystemMetadata PyXB object in-place.\n\nArgs:\nsysmeta_pyxb:\nSystemMetadata PyXB object to normalize.\n\nreset_timestamps: bool\n``True``: Timestamps in the SystemMetadata are set to a standard value so that\nobjects that are compared after normalization register as equivalent if only\ntheir timestamps differ.\n\nNotes:\nThe SystemMetadata is normalized by removing any redundant information and\nordering all sections where there are no semantics associated with the order. The\nnormalized SystemMetadata is intended to be semantically equivalent to the\nun-normalized one.", "source": "codesearchnet"}
{"code": "def SubtractFromBalance(self, assetId, fixed8_val):\n        \n        found = False\n        for key, balance in self.Balances.items():\n            if key == assetId:\n                self.Balances[assetId] = self.Balances[assetId] - fixed8_val\n                found = True\n        if not found:\n            self.Balances[assetId] = fixed8_val * Fixed8(-1)", "docstring": "Subtract amount to the specified balance.\n\nArgs:\nassetId (UInt256):\nfixed8_val (Fixed8): amount to add.", "source": "juraj-google-style"}
{"code": "def to_struct(self, from_api: dict=None, from_json: dict=None, indent: int=2) -> str:\n    if from_api:\n        from_json = self.to_json(from_api=from_api)\n    fields = []\n    spaces = ' ' * indent\n    for key, value in from_json.items():\n        if not isinstance(value, dict):\n            continue\n        if value.get('type', 'record') == 'record':\n            fields.append('%sSTRUCT(\\n%s\\n%s) AS %s' % (spaces, self.to_struct(from_json=value, indent=indent + 2), spaces, key))\n        elif value['type'] == 'array':\n            if 'enum' in value['items']:\n                fields.append('%s[%s\\n%s] AS %s' % (spaces, 'STRING', spaces, key))\n            else:\n                fields.append('%s[STRUCT(\\n%s\\n%s)] AS %s' % (spaces, self.to_struct(from_json=value['items'], indent=indent + 2), spaces, key))\n        else:\n            fields.append('%s%s AS %s' % (spaces, value['type'].upper(), key))\n    return ',\\n'.join(fields)", "docstring": "Translates a Discovery API Document schema to a BigQuery STRUCT.\n\nRecursively crawls the discovery document reference tree to build struct.\nLeverages recursion depth passed in constructor to stop if necessary.\n\nArgs:\nfrom_api: the api schema to extrapolate\nfrom_json: new object with references replaced, not passed by caller\nparents: used to track recursion depth for a specific schema branch\n\nReturns:\nA BigQuery STRUCT object that can be pasted into a query.", "source": "github-repos"}
{"code": "def export_run_metadata():\n    return context().export_run_metadata()", "docstring": "Returns a RunMetadata proto with accumulated information.\n\nThe returned protocol buffer contains information since the most recent call\nto either enable_run_metadata or export_run_metadata.\n\nReturns:\nA RunMetadata protocol buffer.", "source": "github-repos"}
{"code": "def correct_segmentation(segments, clusters, min_time):\n    result_segments = []\n    prev_segment = None\n    for (i, segment) in enumerate(segments):\n        if (len(segment) >= 1):\n            continue\n        cluster = clusters[i]\n        if (prev_segment is None):\n            prev_segment = segment\n        else:\n            cluster_dt = 0\n            if (len(cluster) > 0):\n                cluster_dt = abs(cluster[0].time_difference(cluster[(- 1)]))\n            if (cluster_dt <= min_time):\n                prev_segment.extend(segment)\n            else:\n                prev_segment.append(segment[0])\n                result_segments.append(prev_segment)\n                prev_segment = segment\n    if (prev_segment is not None):\n        result_segments.append(prev_segment)\n    return result_segments", "docstring": "Corrects the predicted segmentation\n\nThis process prevents over segmentation\n\nArgs:\nsegments (:obj:`list` of :obj:`list` of :obj:`Point`):\nsegments to correct\nmin_time (int): minimum required time for segmentation", "source": "codesearchnet"}
{"code": "def convert_http_request(request, referrer_host=None):\n    new_request = urllib.request.Request(request.url_info.url, origin_req_host=referrer_host)\n    for (name, value) in request.fields.get_all():\n        new_request.add_header(name, value)\n    return new_request", "docstring": "Convert a HTTP request.\n\nArgs:\nrequest: An instance of :class:`.http.request.Request`.\nreferrer_host (str): The referrering hostname or IP address.\n\nReturns:\nRequest: An instance of :class:`urllib.request.Request`", "source": "codesearchnet"}
{"code": "def _find_all_line_split(self, begin_line: int, end_line: int) -> list[int]:\n    curr_line = 0\n    curr_idx = 0\n    point_idx = []\n    while curr_line < begin_line:\n        curr_idx = self._src.find('\\n', curr_idx) + 1\n        curr_line += 1\n    point_idx.append(curr_idx)\n    while curr_line < end_line:\n        curr_idx = self._src.find('\\n', curr_idx) + 1\n        point_idx.append(curr_idx)\n        curr_line += 1\n    curr_idx = self._src.find('\\n', curr_idx)\n    curr_idx = len(self._src) if curr_idx == -1 else curr_idx\n    point_idx.append(curr_idx)\n    return point_idx", "docstring": "Finds all index of line boundaries between begin_line and end_line.\n\nDue to the possibility of the endline of the error message being on the\nlast line of the source code, last line will be the last index, and not past\nthe last index.\n\nArgs:\nbegin_line: The begin line of which we want to find the index of.\nend_line: The end line of which we want to find the index of.\n\nReturns:\nA list of indicies for the line boundaries.", "source": "github-repos"}
{"code": "def multi_log_probs_from_logits_and_actions(policy_logits, actions):\n    log_probs = []\n    for i in range(len(policy_logits)):\n        log_probs.append((- tf.nn.sparse_softmax_cross_entropy_with_logits(logits=policy_logits[i], labels=actions[i])))\n    return log_probs", "docstring": "Computes action log-probs from policy logits and actions.\n\nIn the notation used throughout documentation and comments, T refers to the\ntime dimension ranging from 0 to T-1. B refers to the batch size and\nACTION_SPACE refers to the list of numbers each representing a number of\nactions.\n\nArgs:\npolicy_logits: A list with length of ACTION_SPACE of float32\ntensors of shapes\n[T, B, ACTION_SPACE[0]],\n...,\n[T, B, ACTION_SPACE[-1]]\nwith un-normalized log-probabilities parameterizing a softmax policy.\nactions: A list with length of ACTION_SPACE of int32\ntensors of shapes\n[T, B],\n...,\n[T, B]\nwith actions.\n\nReturns:\nA list with length of ACTION_SPACE of float32\ntensors of shapes\n[T, B],\n...,\n[T, B]\ncorresponding to the sampling log probability\nof the chosen action w.r.t. the policy.", "source": "codesearchnet"}
{"code": "def wiki_request(self, params):\n    params['format'] = 'json'\n    if ('action' not in params):\n        params['action'] = 'query'\n    limit = self._rate_limit\n    last_call = self._rate_limit_last_call\n    if (limit and last_call and ((last_call + self._min_wait) > datetime.now())):\n        wait_time = ((last_call + self._min_wait) - datetime.now())\n        time.sleep(int(wait_time.total_seconds()))\n    req = self._get_response(params)\n    if self._rate_limit:\n        self._rate_limit_last_call = datetime.now()\n    return req", "docstring": "Make a request to the MediaWiki API using the given search\nparameters\n\nArgs:\nparams (dict): Request parameters\nReturns:\nA parsed dict of the JSON response\nNote:\nUseful when wanting to query the MediaWiki site for some \\\nvalue that is not part of the wrapper API", "source": "codesearchnet"}
{"code": "def fit_transform(self, data):\n    if data:\n        assert isinstance(data, dict), 'Step {}, \"data\" argument in the \"fit_transform()\" method must be dict, got {} instead.'.format(self.name, type(data))\n    logger.info('Step {}, working in \"{}\" mode'.format(self.name, self._mode))\n    if (self._mode == 'inference'):\n        ValueError('Step {}, you are in \"{}\" mode, where you cannot run \"fit\".Please change mode to \"train\" to enable fitting.Use: \"step.set_mode_train()\" then \"step.fit_transform()\"'.format(self.name, self._mode))\n    if (self.output_is_cached and (not self.force_fitting)):\n        logger.info('Step {} using cached output'.format(self.name))\n        step_output_data = self.output\n    elif (self.output_is_persisted and self.load_persisted_output and (not self.force_fitting)):\n        logger.info('Step {} loading persisted output from {}'.format(self.name, self.experiment_directory_output_step))\n        step_output_data = self._load_output(self.experiment_directory_output_step)\n    else:\n        step_inputs = {}\n        if (self.input_data is not None):\n            for input_data_part in self.input_data:\n                step_inputs[input_data_part] = data[input_data_part]\n        for input_step in self.input_steps:\n            step_inputs[input_step.name] = input_step.fit_transform(data)\n        if self.adapter:\n            step_inputs = self._adapt(step_inputs)\n        else:\n            step_inputs = self._unpack(step_inputs)\n        step_output_data = self._fit_transform_operation(step_inputs)\n    logger.info('Step {}, fit and transform completed'.format(self.name))\n    return step_output_data", "docstring": "Fit the model and transform data or load already processed data.\n\nLoads cached or persisted output or adapts data for the current transformer and\nexecutes ``transformer.fit_transform``.\n\nArgs:\ndata (dict): data dictionary with keys as input names and values as dictionaries of\nkey-value pairs that can be passed to the ``self.transformer.fit_transform`` method.\nExample:\n\n.. code-block:: python\n\ndata = {'input_1': {'X': X,\n'y': y},\n'input_2': {'X': X,\n'y': y}\n}\n\nReturns:\ndict: Step output from the ``self.transformer.fit_transform`` method", "source": "codesearchnet"}
{"code": "def __init__(self, max_sza=95.0, **kwargs):\n        \n        self.max_sza = max_sza\n        self.max_sza_cos = np.cos(np.deg2rad(max_sza)) if max_sza is not None else None\n        super(SunZenithCorrectorBase, self).__init__(**kwargs)", "docstring": "Collect custom configuration values.\n\nArgs:\nmax_sza (float): Maximum solar zenith angle in degrees that is\nconsidered valid and correctable. Default 95.0.", "source": "juraj-google-style"}
{"code": "def _NeedsClassParam(self, sig):\n    if self.class_name and self.function_name and sig.params:\n        safe_class_name = pytd_utils.Print(pytd.NamedType(self.class_name))\n        return pytd_utils.Print(sig.return_type) == safe_class_name and pytd_utils.Print(sig.params[0].type) in (f'type[{safe_class_name}]', f'Type[{safe_class_name}]', safe_class_name)\n    return False", "docstring": "Whether the signature needs a bounded type param for the class.\n\nWe detect the signatures\n(cls: Type[X][, ...]) -> X\nand\n(self: X[, ...]) -> X\nso that we can replace X with a bounded TypeVar. This heuristic\nisn't perfect; for example, in this naive copy method:\nclass X:\ndef copy(self):\nreturn X()\nwe should have left X alone. But it prevents a number of false\npositives by enabling us to infer correct types for common\nimplementations of __new__ and __enter__.\n\nArgs:\nsig: A pytd.Signature.\n\nReturns:\nTrue if the signature needs a class param, False otherwise.", "source": "github-repos"}
{"code": "def _relation_exists(self, connection, relation):\n        \n        query = 'SELECT 1 FROM sqlite_master WHERE (type=\\'table\\' OR type=\\'view\\') AND name=?;'\n        cursor = connection.cursor()\n        cursor.execute(query, [relation])\n        result = cursor.fetchall()\n        return result == [(1,)]", "docstring": "Returns True if relation (table or view) exists in the sqlite db. Otherwise returns False.\n\nArgs:\nconnection (apsw.Connection): connection to sqlite database who stores mpr data.\npartition (orm.Partition):\n\nReturns:\nboolean: True if relation exists, False otherwise.", "source": "juraj-google-style"}
{"code": "def unshuffle_from_sc_to_cpu(t: tensor.Tensor, num_sparse_cores: int, offset_in_shard: int, size_in_shard: int, shard_rotation: int=0) -> tensor.Tensor:\n    old_shape = t.shape\n    if t.shape[0] % num_sparse_cores != 0:\n        raise ValueError('The dim of table ({}) should be multiple of number of sparse cores ({})'.format(t.shape[1], num_sparse_cores))\n    shards_t = array_ops.reshape(t, (num_sparse_cores, t.shape[0] \n    shards = shards_t[:, offset_in_shard:offset_in_shard + size_in_shard, :]\n    if shard_rotation:\n        shards = manip_ops.roll(shards, -shard_rotation, axis=0)\n    intermediate_tensor = array_ops.transpose(shards, (1, 0, 2))\n    new_shape = (size_in_shard * num_sparse_cores, old_shape[1])\n    return array_ops.reshape(intermediate_tensor, new_shape)", "docstring": "Unshuffles the sparse core sharded embedding tables to unsharded.\n\nThis converts an input tensor respresenting stacked and sharded embedding\ntable into a specific embedding table variable by using the provided\nmetadata about the said table within the stacked, sharded embedding table.\nArgs:\nt: The input stacked and sharded embedding table from sparsecore.\nnum_sparse_cores: The number of sparsecores, this determines the number of\nshards that are present in the input t.\noffset_in_shard: Offset within a shard where the queried table starts.\nsize_in_shard: size (number of rows) of this queried table within each shard\nof the input t.\nshard_rotation: The rotation of this table's shards.\n\nReturns:\nAn embedding table which is part of the stacked embedding table t.", "source": "github-repos"}
{"code": "def generate_branches(scales=None, angles=None, shift_angle=0):\n    \n    branches = []\n    for pos, scale in enumerate(scales):\n        angle = -sum(angles)/2 + sum(angles[:pos]) + shift_angle\n        branches.append([scale, angle])\n    return branches", "docstring": "Generates branches with alternative system.\n\nArgs:\nscales (tuple/array): Indicating how the branch/es length/es develop/s from age to age.\nangles (tuple/array): Holding the branch and shift angle in radians.\nshift_angle (float): Holding the rotation angle for all branches.\n\nReturns:\nbranches (2d-array): A array constits of arrays holding scale and angle for every branch.", "source": "juraj-google-style"}
{"code": "def summary(self):\n    with tf.name_scope((self._name + '/summary')):\n        mean_summary = tf.cond((self._count > 0), (lambda : self._summary('mean', self._mean)), str)\n        std_summary = tf.cond((self._count > 1), (lambda : self._summary('stddev', self._std())), str)\n        return tf.summary.merge([mean_summary, std_summary])", "docstring": "Summary string of mean and standard deviation.\n\nReturns:\nSummary tensor.", "source": "codesearchnet"}
{"code": "def _get_channel(host, timeout):\n    \n    connection = create_blocking_connection(host)\n\n    \n    if timeout >= 0:\n        connection.add_timeout(\n            timeout,\n            lambda: sys.stderr.write(\"Timeouted!\\n\") or sys.exit(1)\n        )\n\n    return connection.channel()", "docstring": "Create communication channel for given `host`.\n\nArgs:\nhost (str): Specified --host.\ntimeout (int): Set `timeout` for returned `channel`.\n\nReturns:\nObject: Pika channel object.", "source": "juraj-google-style"}
{"code": "def get_item(dictionary, tuple_key, default_value):\n    (u, v) = tuple_key\n    tuple1 = dictionary.get((u, v), None)\n    tuple2 = dictionary.get((v, u), None)\n    return (tuple1 or tuple2 or default_value)", "docstring": "Grab values from a dictionary using an unordered tuple as a key.\n\nDictionary should not contain None, 0, or False as dictionary values.\n\nArgs:\ndictionary: Dictionary that uses two-element tuple as keys\ntuple_key: Unordered tuple of two elements\ndefault_value: Value that is returned when the tuple_key is not found in the dictionary", "source": "codesearchnet"}
{"code": "def dict_to_xml(spec, full_document=False):\n    middle = xmltodict.unparse(spec, full_document=full_document, pretty=True)\n    return lxml.etree.fromstring(middle)", "docstring": "Convert dict to XML\n\nArgs:\nspec(dict): dict to convert\nfull_document(bool): whether to add XML headers\n\nReturns:\nlxml.etree.Element: XML tree", "source": "codesearchnet"}
{"code": "def traced(func=None, *, span_name=None, standalone=False, additional_attributes: Optional[List[Tuple[str, str, Union[Any, Callable[[Any], Any]]]]]=None):\n\n    def decorator(func):\n        if not _has_opentelemetry:\n            return func\n        import functools\n\n        @functools.wraps(func)\n        def wrapper(*args, **kwargs):\n            instance = args[0] if args and (hasattr(func, '__self__') and func.__self__ is not None) else None\n            is_method = instance is not None\n            if is_method and hasattr(instance, 'tracer'):\n                tracer = instance.tracer\n            else:\n                tracer = get_tracer(f'transformers.{func.__module__}.{func.__name__}')\n            name = span_name or func.__name__\n            span_fn = tracer.start_span if standalone else tracer.start_as_current_span\n            with span_fn(name) as span:\n                span.set_attribute('function.name', func.__name__)\n                span.set_attribute('function.module', func.__module__)\n                span.set_attribute('function.is_method', is_method)\n                if args:\n                    for i, arg in enumerate(args):\n                        if isinstance(arg, (str, int, float, bool)) or arg is None:\n                            span.set_attribute(f'args.{i}', str(arg))\n                        else:\n                            span.set_attribute(f'args.{i}', str(type(arg)))\n                if kwargs:\n                    for key, value in kwargs.items():\n                        if isinstance(value, (str, int, float, bool)) or value is None:\n                            span.set_attribute(f'kwargs.{key}', str(value))\n                        else:\n                            span.set_attribute(f'kwargs.{key}', str(type(value)))\n                if additional_attributes and is_method:\n                    for attr_config in additional_attributes:\n                        instance_attribute_name, span_attribute_key, value_or_transform_function = attr_config\n                        if hasattr(instance, instance_attribute_name):\n                            attribute_value = getattr(instance, instance_attribute_name)\n                            if callable(value_or_transform_function):\n                                transformed_value = value_or_transform_function(attribute_value)\n                            else:\n                                transformed_value = value_or_transform_function\n                            span.set_attribute(span_attribute_key, transformed_value)\n                try:\n                    result = func(*args, **kwargs)\n                    return result\n                except Exception as e:\n                    span.set_status(Status(StatusCode.ERROR))\n                    span.record_exception(e)\n                    raise\n        return wrapper\n    if func is None:\n        return decorator\n    return decorator(func)", "docstring": "Decorator to trace function calls with OpenTelemetry.\n\nCan be used as @traced or @traced(span_name=\"custom_name\")\n\nArgs:\nfunc: The function to trace\nspan_name: Optional custom name for the span (defaults to function name)\nstandalone: If True, creates a parentless span\nadditional_attributes: Optional list of additional attributes to set on the span.\nEach item is a tuple of (instance_attribute_name, span_attribute_key, value_or_transform_function)\nwhere:\n- instance_attribute_name: Name of the attribute to get from the class instance\n- span_attribute_key: Key to use when setting the attribute on the span\n- value_or_transform_function: Either a raw value to use directly, or a function to transform\nthe attribute value before setting it on the span\n\nReturns:\nDecorated function with tracing", "source": "github-repos"}
{"code": "def FileEntryExistsByPathSpec(self, path_spec):\n    \n    \n    \n    fsntfs_file_entry = None\n    location = getattr(path_spec, 'location', None)\n    mft_attribute = getattr(path_spec, 'mft_attribute', None)\n    mft_entry = getattr(path_spec, 'mft_entry', None)\n\n    try:\n      if mft_attribute is not None and mft_entry is not None:\n        fsntfs_file_entry = self._fsntfs_volume.get_file_entry(mft_entry)\n      elif location is not None:\n        fsntfs_file_entry = self._fsntfs_volume.get_file_entry_by_path(location)\n\n    except IOError as exception:\n      raise errors.BackEndError(exception)\n\n    return fsntfs_file_entry is not None", "docstring": "Determines if a file entry for a path specification exists.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nbool: True if the file entry exists.\n\nRaises:\nBackEndError: if the file entry cannot be opened.", "source": "juraj-google-style"}
{"code": "def discriminator(self, x, is_training, reuse=False):\n    \n    hparams = self.hparams\n    with tf.variable_scope(\n        \"discriminator\", reuse=reuse,\n        initializer=tf.random_normal_initializer(stddev=0.02)):\n      batch_size, height, width = common_layers.shape_list(x)[:3]\n      \n      net = tf.layers.conv2d(x, 64, (4, 4), strides=(2, 2),\n                             padding=\"SAME\", name=\"d_conv1\")\n      \n      net = lrelu(net)\n      net = tf.layers.conv2d(net, 128, (4, 4), strides=(2, 2),\n                             padding=\"SAME\", name=\"d_conv2\")\n      \n      if hparams.discriminator_batchnorm:\n        net = tf.layers.batch_normalization(net, training=is_training,\n                                            momentum=0.999, name=\"d_bn2\")\n      net = lrelu(net)\n      size = height * width\n      net = tf.reshape(net, [batch_size, size * 8])  \n      net = tf.layers.dense(net, 1024, name=\"d_fc3\")  \n      if hparams.discriminator_batchnorm:\n        net = tf.layers.batch_normalization(net, training=is_training,\n                                            momentum=0.999, name=\"d_bn3\")\n      net = lrelu(net)\n      return net", "docstring": "Discriminator architecture based on InfoGAN.\n\nArgs:\nx: input images, shape [bs, h, w, channels]\nis_training: boolean, are we in train or eval model.\nreuse: boolean, should params be re-used.\n\nReturns:\nout_logit: the output logits (before sigmoid).", "source": "juraj-google-style"}
{"code": "def build(self):\n    if self.exists:\n        self._build('requirements', self.requirements_last_modified, ('pip install -U -r %s' % self.requirements_file))\n        try:\n            self._build('requirements (dev)', self.dev_requirements_last_modified, ('pip install -U -r %s' % self.dev_requirements_file))\n        except Exception as e:\n            if ('No such file' not in str(e)):\n                raise e\n            self.stdout.write(style.yellow('Could not find dev requirements'))\n        try:\n            self._build('requirements (local)', self.local_requirements_last_modified, ('pip install -U -r %s' % self.local_requirements_file))\n        except Exception as e:\n            if ('No such file' not in str(e)):\n                raise e\n            self.stdout.write(style.yellow('Could not find local requirements'))\n        self._build('application', self.setup_last_modified, ('python %s develop' % self.setup_file))", "docstring": "Builds the app in the app's environment.\n\nOnly builds if the build is out-of-date and is non-empty.\nBuilds in 3 stages: requirements, dev requirements, and app.\npip is used to install requirements, and setup.py is used to\ninstall the app itself.\n\nRaises:\nValidationError if the app fails to build.", "source": "codesearchnet"}
{"code": "def get_field_value(self, key, default=MISSING):\n    meta_value = self.metadata.get(key)\n    context_value = self.context.get(key)\n    if (context_value is not None):\n        return context_value\n    elif (meta_value is not None):\n        return meta_value\n    return default", "docstring": "Method to fetch a value from either the fields metadata or the\nschemas context, in that order.\n\nArgs:\nkey (str): The name of the key to grab the value for.\n\nKeyword Args:\ndefault (object, optional): If the value doesn't exist in the\nschema's ``context`` or the field's ``metadata``, this value\nwill be returned. By default this will be ``MISSING``.\n\nReturns:\nobject: This will be the correct value to use given the parameters.", "source": "codesearchnet"}
{"code": "def add(self, val):\n        \n        return cache.lpush(self.key, json.dumps(val) if self.serialize else val)", "docstring": "Add given value to item (list)\n\nArgs:\nval: A JSON serializable object.\n\nReturns:\nCache backend response.", "source": "juraj-google-style"}
{"code": "def autodiscover(self, message):\n    if (message['version'] in self.allowed_versions):\n        logger.debug(('<%s> Client version matches server version.' % message['cuuid']))\n        response = serialize_data({'method': 'OHAI Client', 'version': self.version, 'server_name': self.server_name}, self.compression, encryption=False)\n    else:\n        logger.warning(('<%s> Client version %s does not match allowed server versions %s' % (message['cuuid'], message['version'], self.version)))\n        response = serialize_data({'method': 'BYE REGISTER'}, self.compression, encryption=False)\n    return response", "docstring": "This function simply returns the server version number as a response\nto the client.\n\nArgs:\nmessage (dict): A dictionary of the autodiscover message from the\nclient.\n\nReturns:\nA JSON string of the \"OHAI Client\" server response with the server's\nversion number.\n\nExamples:\n>>> response\n'{\"method\": \"OHAI Client\", \"version\": \"1.0\"}'", "source": "codesearchnet"}
{"code": "def join(self, timeout_s=None):\n    \n    if not self.thread:\n      return False\n    self.thread.join(timeout_s)\n    return self.running", "docstring": "Joins blocking until the interval ends or until timeout is reached.\n\nArgs:\ntimeout_s: The time in seconds to wait, defaults to forever.\nReturns:\nTrue if the interval is still running and we reached the timeout.", "source": "juraj-google-style"}
{"code": "def detokenize_numbers(text: str) -> str:\n    for reg, sub in DETOKENIZE_NUMBERS:\n        text = re.sub(reg, sub, text)\n    return text", "docstring": "Inverts the operation of *tokenize_numbers*. This is replacing ' @,@ ' and ' @.@' by ',' and '.'.\n\nArgs:\ntext: A string where the number should be detokenized.\n\nReturns:\nA detokenized string.\n\nExample:\n\n```python\n>>> detokenize_numbers(\"$ 5 @,@ 000 1 @.@ 73 m\")\n'$ 5,000 1.73 m'\n```", "source": "github-repos"}
{"code": "def transform_action(self, obs, func_call, skip_available=False):\n    func_id = func_call.function\n    try:\n        func = actions.FUNCTIONS[func_id]\n    except KeyError:\n        raise ValueError(('Invalid function id: %s.' % func_id))\n    if (not (skip_available or (func_id in self.available_actions(obs)))):\n        raise ValueError(('Function %s/%s is currently not available' % (func_id, func.name)))\n    if (len(func_call.arguments) != len(func.args)):\n        raise ValueError(('Wrong number of arguments for function: %s, got: %s' % (func, func_call.arguments)))\n    aif = self._agent_interface_format\n    for (t, arg) in zip(func.args, func_call.arguments):\n        if (t.name in ('screen', 'screen2')):\n            sizes = aif.action_dimensions.screen\n        elif (t.name == 'minimap'):\n            sizes = aif.action_dimensions.minimap\n        else:\n            sizes = t.sizes\n        if (len(sizes) != len(arg)):\n            raise ValueError(('Wrong number of values for argument of %s, got: %s' % (func, func_call.arguments)))\n        for (s, a) in zip(sizes, arg):\n            if (not (0 <= a < s)):\n                raise ValueError(('Argument is out of range for %s, got: %s' % (func, func_call.arguments)))\n    kwargs = {type_.name: type_.fn(a) for (type_, a) in zip(func.args, func_call.arguments)}\n    sc2_action = sc_pb.Action()\n    kwargs['action'] = sc2_action\n    kwargs['action_space'] = aif.action_space\n    if func.ability_id:\n        kwargs['ability_id'] = func.ability_id\n    actions.FUNCTIONS[func_id].function_type(**kwargs)\n    return sc2_action", "docstring": "Tranform an agent-style action to one that SC2 can consume.\n\nArgs:\nobs: a `sc_pb.Observation` from the previous frame.\nfunc_call: a `FunctionCall` to be turned into a `sc_pb.Action`.\nskip_available: If True, assume the action is available. This should only\nbe used for testing or if you expect to make actions that weren't\nvalid at the last observation.\n\nReturns:\na corresponding `sc_pb.Action`.\n\nRaises:\nValueError: if the action doesn't pass validation.", "source": "codesearchnet"}
{"code": "def sg_restore(sess, save_path, category=''):\n    r\n    \n    if not isinstance(category, (tuple, list)):\n        category = [category]\n\n    \n    var_list = {}\n    for cat in category:\n        for t in tf.global_variables():\n            if t.name.startswith(cat):\n                var_list[t.name[:-2]] = t\n\n    \n    saver = tf.train.Saver(var_list)\n    saver.restore(sess, save_path)", "docstring": "r\"\"\" Restores previously saved variables.\n\nArgs:\nsess: A `Session` to use to restore the parameters.\nsave_path: Path where parameters were previously saved.\ncategory: A `String` to filter variables starts with given category.\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _mouseUp(x, y, button):\n    \n    if button == 'left':\n        try:\n            _sendMouseEvent(MOUSEEVENTF_LEFTUP, x, y)\n        except (PermissionError, OSError): \n            pass\n    elif button == 'middle':\n        try:\n            _sendMouseEvent(MOUSEEVENTF_MIDDLEUP, x, y)\n        except (PermissionError, OSError): \n            pass\n    elif button == 'right':\n        try:\n            _sendMouseEvent(MOUSEEVENTF_RIGHTUP, x, y)\n        except (PermissionError, OSError): \n            pass\n    else:\n        assert False, \"button argument not in ('left', 'middle', 'right')\"", "docstring": "Send the mouse up event to Windows by calling the mouse_event() win32\nfunction.\n\nArgs:\nx (int): The x position of the mouse event.\ny (int): The y position of the mouse event.\nbutton (str): The mouse button, either 'left', 'middle', or 'right'\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def perform_check(self,\n                      env: env_tools.PreparedEnv,\n                      verbose: bool) -> Tuple[bool, str]:", "docstring": "Evaluates the status check and returns a pass/fail with message.\n\nArgs:\nenv: Describes a prepared python 3 environment in which to run.\nverbose: When set, more progress output is produced.\n\nReturns:\nA tuple containing a pass/fail boolean and then a details message.", "source": "juraj-google-style"}
{"code": "def get_reduced_symbols(symbols):\n    \n\n    reduced_symbols = []\n\n    for ss in symbols:\n        if not (ss in reduced_symbols):\n            reduced_symbols.append(ss)\n\n    return reduced_symbols", "docstring": "Reduces expanded list of symbols.\n\nArgs:\nsymbols: list containing any chemical symbols as often as\nthe atom appears in the structure\n\nReturns:\nreduced_symbols: any symbols appears only once", "source": "juraj-google-style"}
{"code": "def compute_memory_contents_under_schedule(self, schedule):\n    out_degree = self._compute_initial_out_degree()\n    curr_memory_contents = set()\n    memory_contents_for_each_operation = []\n    for operation_id in schedule:\n        operation_name = self._operations[operation_id].name\n        for output_name in self.get_operation_output_names(operation_name):\n            curr_memory_contents.add(output_name)\n        memory_contents_for_each_operation.append(frozenset(curr_memory_contents))\n        for output_name in self.get_operation_output_names(operation_name):\n            if (out_degree[output_name] == 0):\n                curr_memory_contents.remove(output_name)\n        for input_name in self.get_operation_input_names(operation_name):\n            out_degree[input_name] -= 1\n            if (out_degree[input_name] == 0):\n                curr_memory_contents.remove(input_name)\n    return memory_contents_for_each_operation", "docstring": "The in-memory tensors present when executing each operation in schedule.\n\nSimulates running operations in the order given by a schedule. Keeps track\nof the tensors in memory at every point in time, and outputs a list (one\nentry for each point in time) of all sets of all memory contents (i.e. a\nfrozenset of strings) ever seen in this execution.\n\nIt is assumed (but not checked) that schedule is a valid topological sort of\nthe operations in this graph.\n\nArgs:\nschedule: A list of integer ids; the order to run operations in.\n\nReturns:\na list of frozenset of strings, where the ith entry describes the tensors\nin memory when executing operation i (where schedule[i] is an index into\nget_all_operation_names()).", "source": "codesearchnet"}
{"code": "def placeholder_value(self, placeholder_context):\n    if placeholder_context.unnest_only:\n        return self\n    component_placeholders = nest.map_structure(lambda x: x.placeholder_value(placeholder_context), self._component_specs)\n    return self._from_components(component_placeholders)", "docstring": "Value used for tracing a function signature with this TraceType.\n\nWARNING: Do not override.\n\nArgs:\nplaceholder_context: A class container for context information when\ncreating a placeholder value.\n\nReturns:\nA `CompositeTensor` placeholder whose components are recursively composed\nof placeholders themselves.", "source": "github-repos"}
{"code": "def roll(x, shift, axis=None):\n    if any_symbolic_tensors((x,)):\n        return Roll(shift, axis=axis).symbolic_call(x)\n    return backend.numpy.roll(x, shift, axis=axis)", "docstring": "Roll tensor elements along a given axis.\n\nElements that roll beyond the last position are re-introduced at the first.\n\nArgs:\nx: Input tensor.\nshift: The number of places by which elements are shifted.\naxis: The axis along which elements are shifted. By default, the\narray is flattened before shifting, after which the original\nshape is restored.\n\nReturns:\nOutput tensor.", "source": "github-repos"}
{"code": "def get_dialect_name(mixed: Union[SQLCompiler, Engine, Dialect]) -> str:\n    \n    dialect = get_dialect(mixed)\n    \n    return dialect.name", "docstring": "Finds the name of the SQLAlchemy dialect in use.\n\nArgs:\nmixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or\n:class:`Dialect` object\n\nReturns: the SQLAlchemy dialect name being used", "source": "juraj-google-style"}
{"code": "def send_worker_queue_message(self, *, batch_id, job_name, entry_point, worker_args, retry_count=0):\n    try:\n        job_id = str(uuid4())\n        self.job_queue.send_message(MessageBody=json.dumps({'batch_id': batch_id, 'job_id': job_id, 'job_name': job_name, 'entry_point': entry_point, 'worker_args': worker_args}), MessageDeduplicationId=job_id, MessageGroupId=batch_id, MessageAttributes={'RetryCount': {'StringValue': str(retry_count), 'DataType': 'Number'}})\n        if (retry_count == 0):\n            job = SchedulerJob()\n            job.job_id = job_id\n            job.batch_id = batch_id\n            job.status = SchedulerStatus.PENDING\n            job.data = worker_args\n            db.session.add(job)\n            db.session.commit()\n    except:\n        self.log.exception('Error when processing worker task')", "docstring": "Send a message to the `worker_queue` for a worker to execute the requests job\n\nArgs:\nbatch_id (`str`): Unique ID of the batch the job belongs to\njob_name (`str`): Non-unique ID of the job. This is used to ensure that the same job is only scheduled\na single time per batch\nentry_point (`dict`): A dictionary providing the entry point information for the worker to load the class\nworker_args (`dict`): A dictionary with the arguments required by the worker class (if any, can be an\nempty dictionary)\nretry_count (`int`): The number of times this one job has been attempted to be executed. If a job fails to\nexecute after 3 retries it will be marked as failed\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def __init__(self, maximum_number_of_items=50000):\n    \n    super(_EventSourceHeap, self).__init__()\n    self._heap = []\n    self._maximum_number_of_items = maximum_number_of_items", "docstring": "Initializes an event source heap.\n\nArgs:\nmaximum_number_of_items (Optional[int]): maximum number of items\nin the heap.", "source": "juraj-google-style"}
{"code": "def _make_tensor_slice_spec(slice_spec, use_constant=True):\n\n    def make_piece_scalar(piece):\n        if isinstance(piece, int):\n            scalar = constant_op.constant(piece)\n            if use_constant:\n                return scalar\n            else:\n                return array_ops.placeholder_with_default(scalar, [])\n        elif isinstance(piece, slice):\n            return slice(make_piece_scalar(piece.start), make_piece_scalar(piece.stop), make_piece_scalar(piece.step))\n        else:\n            return piece\n    if isinstance(slice_spec, tuple):\n        return tuple((make_piece_scalar(piece) for piece in slice_spec))\n    else:\n        return make_piece_scalar(slice_spec)", "docstring": "Wraps all integers in an extended slice spec w/ a tensor.\n\nThis function is used to help test slicing when the slice spec contains\ntensors, rather than integers.\n\nArgs:\nslice_spec: The extended slice spec.\nuse_constant: If true, then wrap each integer with a tf.constant.  If false,\nthen wrap each integer with a tf.placeholder.\n\nReturns:\nA copy of slice_spec, but with each integer i replaced with tf.constant(i).", "source": "github-repos"}
{"code": "def kron(*matrices: np.ndarray) -> np.ndarray:\n    product = np.eye(1)\n    for m in matrices:\n        product = np.kron(product, m)\n    return np.array(product)", "docstring": "Computes the kronecker product of a sequence of matrices.\n\nA *args version of lambda args: functools.reduce(np.kron, args).\n\nArgs:\n*matrices: The matrices and controls to combine with the kronecker\nproduct.\n\nReturns:\nThe resulting matrix.", "source": "codesearchnet"}
{"code": "def _send_join_group_request(self):\n    if self.coordinator_unknown():\n        e = Errors.GroupCoordinatorNotAvailableError(self.coordinator_id)\n        return Future().failure(e)\n    elif (not self._client.ready(self.coordinator_id, metadata_priority=False)):\n        e = Errors.NodeNotReadyError(self.coordinator_id)\n        return Future().failure(e)\n    log.info('(Re-)joining group %s', self.group_id)\n    member_metadata = [(protocol, (metadata if isinstance(metadata, bytes) else metadata.encode())) for (protocol, metadata) in self.group_protocols()]\n    if (self.config['api_version'] < (0, 9)):\n        raise Errors.KafkaError('JoinGroupRequest api requires 0.9+ brokers')\n    elif ((0, 9) <= self.config['api_version'] < (0, 10, 1)):\n        request = JoinGroupRequest[0](self.group_id, self.config['session_timeout_ms'], self._generation.member_id, self.protocol_type(), member_metadata)\n    elif ((0, 10, 1) <= self.config['api_version'] < (0, 11, 0)):\n        request = JoinGroupRequest[1](self.group_id, self.config['session_timeout_ms'], self.config['max_poll_interval_ms'], self._generation.member_id, self.protocol_type(), member_metadata)\n    else:\n        request = JoinGroupRequest[2](self.group_id, self.config['session_timeout_ms'], self.config['max_poll_interval_ms'], self._generation.member_id, self.protocol_type(), member_metadata)\n    log.debug('Sending JoinGroup (%s) to coordinator %s', request, self.coordinator_id)\n    future = Future()\n    _f = self._client.send(self.coordinator_id, request)\n    _f.add_callback(self._handle_join_group_response, future, time.time())\n    _f.add_errback(self._failed_request, self.coordinator_id, request, future)\n    return future", "docstring": "Join the group and return the assignment for the next generation.\n\nThis function handles both JoinGroup and SyncGroup, delegating to\n:meth:`._perform_assignment` if elected leader by the coordinator.\n\nReturns:\nFuture: resolves to the encoded-bytes assignment returned from the\ngroup leader", "source": "codesearchnet"}
{"code": "def HashFilePath(self, path, byte_count):\n    with open(path, 'rb') as fd:\n        self.HashFile(fd, byte_count)", "docstring": "Updates underlying hashers with file on a given path.\n\nArgs:\npath: A path to the file that is going to be fed to the hashers.\nbyte_count: A maximum numbers of bytes that are going to be processed.", "source": "codesearchnet"}
{"code": "def on_options(self, req, resp, **kwargs):\n    resp.set_header('Allow', ', '.join(self.allowed_methods()))\n    resp.body = json.dumps(self.describe(req, resp))\n    resp.content_type = 'application/json'", "docstring": "Respond with JSON formatted resource description on OPTIONS request.\n\nArgs:\nreq (falcon.Request): Optional request object. Defaults to None.\nresp (falcon.Response): Optional response object. Defaults to None.\nkwargs (dict): Dictionary of values created by falcon from\nresource uri template.\n\nReturns:\nNone\n\n\n.. versionchanged:: 0.2.0\nDefault ``OPTIONS`` responses include ``Allow`` header with list of\nallowed HTTP methods.", "source": "codesearchnet"}
{"code": "def generate_sigproc_header(f):\n    header_string = b''\n    header_string += to_sigproc_keyword(b'HEADER_START')\n    for keyword in f.header.keys():\n        if (keyword == b'src_raj'):\n            header_string += (to_sigproc_keyword(b'src_raj') + to_sigproc_angle(f.header[b'src_raj']))\n        elif (keyword == b'src_dej'):\n            header_string += (to_sigproc_keyword(b'src_dej') + to_sigproc_angle(f.header[b'src_dej']))\n        elif ((keyword == b'az_start') or (keyword == b'za_start')):\n            header_string += (to_sigproc_keyword(keyword) + np.float64(f.header[keyword]).tostring())\n        elif (keyword not in header_keyword_types.keys()):\n            pass\n        else:\n            header_string += to_sigproc_keyword(keyword, f.header[keyword])\n    header_string += to_sigproc_keyword(b'HEADER_END')\n    return header_string", "docstring": "Generate a serialzed sigproc header which can be written to disk.\n\nArgs:\nf (Filterbank object): Filterbank object for which to generate header\n\nReturns:\nheader_str (str): Serialized string corresponding to header", "source": "codesearchnet"}
{"code": "def get_unfrozen_copy(values):\n    \n    if isinstance(values, (frozendict, dict)):\n        return {key: get_unfrozen_copy(value) for key, value in values.items()}\n    elif isinstance(values, (list, tuple)):\n        return [get_unfrozen_copy(value) for value in values]\n\n    \n    return values", "docstring": "Recursively convert `value`'s tuple values into lists, and frozendicts into dicts.\n\nArgs:\nvalues (frozendict/tuple): the frozendict/tuple.\n\nReturns:\nvalues (dict/list): the unfrozen copy.", "source": "juraj-google-style"}
{"code": "def plot_seebeck_mu(self, temp=600, output='eig', xlim=None):\n    import matplotlib.pyplot as plt\n    plt.figure(figsize=(9, 7))\n    seebeck = self._bz.get_seebeck(output=output, doping_levels=False)[temp]\n    plt.plot(self._bz.mu_steps, seebeck, linewidth=3.0)\n    self._plot_bg_limits()\n    self._plot_doping(temp)\n    if (output == 'eig'):\n        plt.legend(['S$_1$', 'S$_2$', 'S$_3$'])\n    if (xlim is None):\n        plt.xlim((- 0.5), (self._bz.gap + 0.5))\n    else:\n        plt.xlim(xlim[0], xlim[1])\n    plt.ylabel('Seebeck \\n coefficient  ($\\\\mu$V/K)', fontsize=30.0)\n    plt.xlabel('E-E$_f$ (eV)', fontsize=30)\n    plt.xticks(fontsize=25)\n    plt.yticks(fontsize=25)\n    plt.tight_layout()\n    return plt", "docstring": "Plot the seebeck coefficient in function of Fermi level\n\nArgs:\ntemp:\nthe temperature\nxlim:\na list of min and max fermi energy by default (0, and band gap)\nReturns:\na matplotlib object", "source": "codesearchnet"}
{"code": "def _environment_variables(**kwargs):\n        \n        \n        user_agent = os.getenv('USER_AGENT')\n        if user_agent is not None:\n            kwargs['user_agent'] = user_agent\n        preprefix = os.getenv('PREPREFIX')\n        if preprefix is not None:\n            kwargs['preprefix'] = preprefix\n        return kwargs", "docstring": "Overwrite keyword arguments with environment variables\n\nArgs:\n**kwargs: See below\nuser_agent (str): User agent string.\n\nReturns:\nkwargs: Changed keyword arguments", "source": "juraj-google-style"}
{"code": "def mp_atan2(y, x):\n    \n    return 'if((x)>0, atan((y)/(x)), if(((x)<0) and ((y)>=0), atan((y)/(x))+pi, if(((x)<0) and ((y)<0), atan((y)/(x))-pi, if(((x)==0) and ((y)>0), pi/2, if(((x)==0) and ((y)<0), -pi/2, 0)))))'.replace(\n        'pi', str(math.pi)).replace('y', y).replace('x', x)", "docstring": "muparser atan2 function\n\nImplements an atan2(y,x) function for older muparser versions (<2.1.0);\natan2 was added as a built-in function in muparser 2.1.0\n\nArgs:\ny (str): y argument of the atan2(y,x) function\nx (str): x argument of the atan2(y,x) function\n\nReturns:\nA muparser string that calculates atan2(y,x)", "source": "juraj-google-style"}
{"code": "def _calculateEncodingKey(comparator):\n    encodingName = None\n    for (k, v) in list(_encodings.items()):\n        if (v == comparator):\n            encodingName = k\n            break\n    return encodingName", "docstring": "Gets the first key of all available encodings where the corresponding\nvalue matches the comparator.\n\nArgs:\ncomparator (string): A view name for an encoding.\n\nReturns:\nstr: A key for a specific encoding used by python.", "source": "codesearchnet"}
{"code": "def state_province_region(self, value=None):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `state_province_region`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `state_province_region`')\n    self._state_province_region = value", "docstring": "Corresponds to IDD Field `state_province_region`\n\nArgs:\nvalue (str): value for IDD Field `state_province_region`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def AddServiceDescriptor(self, service_desc):\n    if (not isinstance(service_desc, descriptor.ServiceDescriptor)):\n        raise TypeError('Expected instance of descriptor.ServiceDescriptor.')\n    self._service_descriptors[service_desc.full_name] = service_desc", "docstring": "Adds a ServiceDescriptor to the pool.\n\nArgs:\nservice_desc: A ServiceDescriptor.", "source": "codesearchnet"}
{"code": "def assign_selective_dynamics(self, slab):\n    sd_list = []\n    sd_list = [([False, False, False] if (site.properties['surface_properties'] == 'subsurface') else [True, True, True]) for site in slab.sites]\n    new_sp = slab.site_properties\n    new_sp['selective_dynamics'] = sd_list\n    return slab.copy(site_properties=new_sp)", "docstring": "Helper function to assign selective dynamics site_properties\nbased on surface, subsurface site properties\n\nArgs:\nslab (Slab): slab for which to assign selective dynamics", "source": "codesearchnet"}
{"code": "def validate_log_output(self, passed, db_data, user_data, oper):\n    truncate = self.args.truncate\n    if ((db_data is not None) and passed):\n        if (isinstance(db_data, string_types) and (len(db_data) > truncate)):\n            db_data = db_data[:truncate]\n        elif isinstance(db_data, list):\n            db_data_truncated = []\n            for d in db_data:\n                if ((d is not None) and isinstance(d, string_types) and (len(d) > truncate)):\n                    db_data_truncated.append('{} ...'.format(d[:self.args.truncate]))\n                else:\n                    db_data_truncated.append(d)\n            db_data = db_data_truncated\n    if ((user_data is not None) and passed):\n        if (isinstance(user_data, string_types) and (len(user_data) > truncate)):\n            user_data = user_data[:self.args.truncate]\n        elif isinstance(user_data, list):\n            user_data_truncated = []\n            for u in user_data:\n                if (isinstance(db_data, string_types) and (len(u) > truncate)):\n                    user_data_truncated.append('{} ...'.format(u[:self.args.truncate]))\n                else:\n                    user_data_truncated.append(u)\n            user_data = user_data_truncated\n    self.log.info('[validate] DB Data   : ({}), Type: [{}]'.format(db_data, type(db_data)))\n    self.log.info('[validate] Operator  : ({})'.format(oper))\n    self.log.info('[validate] User Data : ({}), Type: [{}]'.format(user_data, type(user_data)))\n    if passed:\n        self.log.info('[validate] Results   : Passed')\n    else:\n        self.log.error('[validate] Results  : Failed')\n        if ((db_data is not None) and (user_data is not None) and (oper in ['eq', 'ne'])):\n            try:\n                diff_count = 0\n                for (i, diff) in enumerate(difflib.ndiff(db_data, user_data)):\n                    if (diff[0] == ' '):\n                        continue\n                    elif (diff[0] == '-'):\n                        self.log.info('[validate] Diff      : Missing data at index {}'.format(i))\n                    elif (diff[0] == '+'):\n                        self.log.info('[validate] Diff      : Extra data at index {}'.format(i))\n                    if (diff_count > self.max_diff):\n                        self.log.info('Max number of differences reached.')\n                        break\n                    diff_count += 1\n            except TypeError:\n                pass\n            except KeyError:\n                pass\n        if self.args.halt_on_fail:\n            raise RuntimeError('Failed validating data.')", "docstring": "Format the validation log output to be easier to read.\n\nArgs:\npassed (bool): The results of the validation test.\ndb_data (str): The data store in Redis.\nuser_data (str): The user provided data.\noper (str): The comparison operator.\n\nRaises:\nRuntimeError: Raise error on validation failure if halt_on_fail is True.", "source": "codesearchnet"}
{"code": "def depth_september_average_ground_temperature(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `depth_september_average_ground_temperature`'.format(value))\n    self._depth_september_average_ground_temperature = value", "docstring": "Corresponds to IDD Field\n`depth_september_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_september_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def from_json(cls, json):\n    \n    mapreduce_spec = cls(json[\"name\"],\n                         json[\"mapreduce_id\"],\n                         json[\"mapper_spec\"],\n                         json.get(\"params\"),\n                         json.get(\"hooks_class_name\"))\n    return mapreduce_spec", "docstring": "Create new MapreduceSpec from the json, encoded by to_json.\n\nArgs:\njson: json representation of MapreduceSpec.\n\nReturns:\nan instance of MapreduceSpec with all data deserialized from json.", "source": "juraj-google-style"}
{"code": "def take(self, count=1):\n    if self.closed():\n        raise ValueError('Attempt to call take() on a closed Queryable.')\n    count = max(0, count)\n    return self._create(itertools.islice(self, count))", "docstring": "Returns a specified number of elements from the start of a sequence.\n\nIf the source sequence contains fewer elements than requested only the\navailable elements will be returned and no exception will be raised.\n\nNote: This method uses deferred execution.\n\nArgs:\ncount: An optional number of elements to take. The default is one.\n\nReturns:\nA Queryable over the first count elements of the source sequence,\nor the all elements of elements in the source, whichever is fewer.\n\nRaises:\nValueError: If the Queryable is closed()", "source": "codesearchnet"}
{"code": "def _compute_merkle_root(self, required_state_root):\n    state_hash = None\n    if (self._previous_valid_batch_c_id is not None):\n        publishing_or_genesis = (self._always_persist or (required_state_root is None))\n        state_hash = self._squash(state_root=self._previous_state_hash, context_ids=[self._previous_valid_batch_c_id], persist=self._always_persist, clean_up=publishing_or_genesis)\n        if (self._always_persist is True):\n            return state_hash\n        if (state_hash == required_state_root):\n            self._squash(state_root=self._previous_state_hash, context_ids=[self._previous_valid_batch_c_id], persist=True, clean_up=True)\n    return state_hash", "docstring": "Computes the merkle root of the state changes in the context\ncorresponding with _last_valid_batch_c_id as applied to\n_previous_state_hash.\n\nArgs:\nrequired_state_root (str): The merkle root that these txns\nshould equal.\n\nReturns:\nstate_hash (str): The merkle root calculated from the previous\nstate hash and the state changes from the context_id", "source": "codesearchnet"}
{"code": "def _AddCredentialConfiguration(\n      self, path_spec, credential_type, credential_data):\n    \n    credential_configuration = configurations.CredentialConfiguration(\n        credential_data=credential_data, credential_type=credential_type,\n        path_spec=path_spec)\n\n    self._credential_configurations.append(credential_configuration)", "docstring": "Adds a credential configuration.\n\nArgs:\npath_spec (dfvfs.PathSpec): path specification.\ncredential_type (str): credential type.\ncredential_data (bytes): credential data.", "source": "juraj-google-style"}
{"code": "def signCertAs(self, cert, signas):\n        \n        cakey = self.getCaKey(signas)\n        if cakey is None:\n            raise s_exc.NoCertKey('Missing .key for %s' % signas)\n        cacert = self.getCaCert(signas)\n        if cacert is None:\n            raise s_exc.NoCertKey('Missing .crt for %s' % signas)\n\n        cert.set_issuer(cacert.get_subject())\n        cert.sign(cakey, self.signing_digest)", "docstring": "Signs a certificate with a CA keypair.\n\nArgs:\ncert (OpenSSL.crypto.X509): The certificate to sign.\nsignas (str): The CA keypair name to sign the new keypair with.\n\nExamples:\nSign a certificate with the CA \"myca\":\n\ncdir.signCertAs(mycert, 'myca')\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def parse_json_path(self, jsonpath):\n\n        \n        if jsonpath not in self.parsed:\n            try:\n                self.parsed[jsonpath] = self.parser(jsonpath)\n            except Exception:\n                self.log(\"Invalid Json Path: \" + jsonpath, \"error\")\n                raise InvalidJsonPathError(\"Invalid Json Path\")\n\n        return self.parsed[jsonpath]", "docstring": "Parse a jsonpath\n\nArgs:\njsonpath: str\n\nReturns: a parsed json path", "source": "juraj-google-style"}
{"code": "class CategoricalHinge(reduction_metrics.MeanMetricWrapper):\n\n    def __init__(self, name='categorical_hinge', dtype=None):\n        super().__init__(fn=categorical_hinge, name=name, dtype=dtype)\n        self._direction = 'down'\n\n    def get_config(self):\n        return {'name': self.name, 'dtype': self.dtype}", "docstring": "Computes the categorical hinge metric between `y_true` and `y_pred`.\n\nArgs:\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.\n\nExample:\n>>> m = keras.metrics.CategoricalHinge()\n>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])\n>>> m.result().numpy()\n1.4000001\n>>> m.reset_state()\n>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],\n...                sample_weight=[1, 0])\n>>> m.result()\n1.2", "source": "github-repos"}
{"code": "async def complete_task(context, result):\n    args = [get_task_id(context.claim_task), get_run_id(context.claim_task)]\n    reversed_statuses = get_reversed_statuses(context)\n    try:\n        if (result == 0):\n            log.info('Reporting task complete...')\n            response = (await context.temp_queue.reportCompleted(*args))\n        elif ((result != 1) and (result in reversed_statuses)):\n            reason = reversed_statuses[result]\n            log.info('Reporting task exception {}...'.format(reason))\n            payload = {'reason': reason}\n            response = (await context.temp_queue.reportException(*args, payload))\n        else:\n            log.info('Reporting task failed...')\n            response = (await context.temp_queue.reportFailed(*args))\n        log.debug('Task status response:\\n{}'.format(pprint.pformat(response)))\n    except taskcluster.exceptions.TaskclusterRestFailure as exc:\n        if (exc.status_code == 409):\n            log.info('409: not reporting complete/failed.')\n        else:\n            raise", "docstring": "Mark the task as completed in the queue.\n\nDecide whether to call reportCompleted, reportFailed, or reportException\nbased on the exit status of the script.\n\nIf the task has expired or been cancelled, we'll get a 409 status.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\n\nRaises:\ntaskcluster.exceptions.TaskclusterRestFailure: on non-409 error.", "source": "codesearchnet"}
{"code": "def _update_hasher(hasher, data, types=True):\n    \n    \n    if isinstance(data, (tuple, list, zip)):\n        needs_iteration = True\n    else:\n        needs_iteration = any(check(data) for check in\n                              _HASHABLE_EXTENSIONS.iterable_checks)\n\n    if needs_iteration:\n        \n        \n        SEP = b'_,_'\n        ITER_PREFIX = b'_[_'\n        ITER_SUFFIX = b'_]_'\n\n        iter_ = iter(data)\n        hasher.update(ITER_PREFIX)\n        \n        \n        try:\n            for item in iter_:\n                prefix, hashable = _convert_to_hashable(item, types)\n                binary_data = prefix + hashable + SEP\n                hasher.update(binary_data)\n        except TypeError:\n            \n            \n            _update_hasher(hasher, item, types)\n            for item in iter_:\n                \n                _update_hasher(hasher, item, types)\n                hasher.update(SEP)\n        hasher.update(ITER_SUFFIX)\n    else:\n        prefix, hashable = _convert_to_hashable(data, types)\n        binary_data = prefix + hashable\n        hasher.update(binary_data)", "docstring": "Converts `data` into a byte representation and calls update on the hasher\n`hashlib.HASH` algorithm.\n\nArgs:\nhasher (HASH): instance of a hashlib algorithm\ndata (object): ordered data with structure\ntypes (bool): include type prefixes in the hash\n\nExample:\n>>> hasher = hashlib.sha512()\n>>> data = [1, 2, ['a', 2, 'c']]\n>>> _update_hasher(hasher, data)\n>>> print(hasher.hexdigest()[0:8])\ne2c67675\n\n2ba8d82b", "source": "juraj-google-style"}
{"code": "def Columns(iterable):\n    columns = sorted(iterable)\n    return '({})'.format(', '.join(('`{}`'.format(col) for col in columns)))", "docstring": "Returns a string of column names for MySQL INSERTs.\n\nTo account for Iterables with undefined order (dicts before Python 3.6),\nthis function sorts column names.\n\nExamples:\n>>> Columns({\"password\": \"foo\", \"name\": \"bar\"})\nu'(`name`, `password`)'\n\nArgs:\niterable: The iterable of strings to be used as column names.\nReturns: A string containing a tuple of sorted comma-separated column names.", "source": "codesearchnet"}
{"code": "def cut_matrix(self, n):\n    return connectivity.relevant_connections(n, self.from_nodes, self.to_nodes)", "docstring": "Compute the cut matrix for this cut.\n\nThe cut matrix is a square matrix which represents connections severed\nby the cut.\n\nArgs:\nn (int): The size of the network.\n\nExample:\n>>> cut = Cut((1,), (2,))\n>>> cut.cut_matrix(3)\narray([[0., 0., 0.],\n[0., 0., 1.],\n[0., 0., 0.]])", "source": "codesearchnet"}
{"code": "def add_graph(self, run_key, device_name, graph_def, debug=False):\n    \n    graph_dict = (self._run_key_to_debug_graphs if debug else\n                  self._run_key_to_original_graphs)\n    if not run_key in graph_dict:\n      graph_dict[run_key] = dict()  \n    graph_dict[run_key][tf.compat.as_str(device_name)] = (\n        debug_graphs_helper.DebugGraphWrapper(graph_def))", "docstring": "Add a GraphDef.\n\nArgs:\nrun_key: A key for the run, containing information about the feeds,\nfetches, and targets.\ndevice_name: The name of the device that the `GraphDef` is for.\ngraph_def: An instance of the `GraphDef` proto.\ndebug: Whether `graph_def` consists of the debug ops.", "source": "juraj-google-style"}
{"code": "def _instantiate_data_type(self, data_type_class, data_type_args, loc):\n        \n        assert issubclass(data_type_class, DataType), \\\n            'Expected stone.data_type.DataType, got %r' % data_type_class\n\n        argspec = inspect.getargspec(data_type_class.__init__)  \n        argspec.args.remove('self')\n        num_args = len(argspec.args)\n        \n        num_defaults = len(argspec.defaults or ())\n\n        pos_args, kw_args = data_type_args\n\n        if (num_args - num_defaults) > len(pos_args):\n            \n            raise InvalidSpec(\n                'Missing positional argument %s for %s type' %\n                (quote(argspec.args[len(pos_args)]),\n                 quote(data_type_class.__name__)),\n                *loc)\n        elif (num_args - num_defaults) < len(pos_args):\n            \n            raise InvalidSpec(\n                'Too many positional arguments for %s type' %\n                quote(data_type_class.__name__),\n                *loc)\n\n        \n        args = {}\n        for i, key in enumerate(argspec.args):\n            args[key] = (i >= num_args - num_defaults)\n\n        for key in kw_args:\n            \n            if key not in args:\n                raise InvalidSpec('Unknown argument %s to %s type.' %\n                    (quote(key), quote(data_type_class.__name__)),\n                    *loc)\n            \n            if not args[key]:\n                raise InvalidSpec(\n                    'Positional argument %s cannot be specified as a '\n                    'keyword argument.' % quote(key),\n                    *loc)\n            del args[key]\n\n        try:\n            return data_type_class(*pos_args, **kw_args)\n        except ParameterError as e:\n            \n            \n            raise InvalidSpec('Bad argument to %s type: %s' %\n                (quote(data_type_class.__name__), e.args[0]),\n                *loc)", "docstring": "Responsible for instantiating a data type with additional attributes.\nThis method ensures that the specified attributes are valid.\n\nArgs:\ndata_type_class (DataType): The class to instantiate.\ndata_type_attrs (dict): A map from str -> values of attributes.\nThese will be passed into the constructor of data_type_class\nas keyword arguments.\n\nReturns:\nstone.data_type.DataType: A parameterized instance.", "source": "juraj-google-style"}
{"code": "def apply_indexed_slices_grad(self, grad, local_step=0, name=None):\n    return self.apply_grad(grad_indices=grad.indices, grad_values=grad.values, grad_shape=grad.dense_shape, local_step=local_step, name=name)", "docstring": "Attempts to apply a gradient to the accumulator.\n\nThe attempt is silently dropped if the gradient is stale, i.e., `local_step`\nis less than the accumulator's global time step.\n\nArgs:\ngrad: The gradient `IndexedSlices` to be applied.\nlocal_step: Time step at which the gradient was computed.\nname: Optional name for the operation.\n\nReturns:\nThe operation that (conditionally) applies a gradient to the accumulator.\n\nRaises:\nInvalidArgumentError: If grad is of the wrong shape", "source": "github-repos"}
{"code": "class TrOCRProcessor(ProcessorMixin):\n    attributes = ['image_processor', 'tokenizer']\n    image_processor_class = 'AutoImageProcessor'\n    tokenizer_class = 'AutoTokenizer'\n\n    def __init__(self, image_processor=None, tokenizer=None, **kwargs):\n        feature_extractor = None\n        if 'feature_extractor' in kwargs:\n            warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning)\n            feature_extractor = kwargs.pop('feature_extractor')\n        image_processor = image_processor if image_processor is not None else feature_extractor\n        if image_processor is None:\n            raise ValueError('You need to specify an `image_processor`.')\n        if tokenizer is None:\n            raise ValueError('You need to specify a `tokenizer`.')\n        super().__init__(image_processor, tokenizer)\n        self.current_processor = self.image_processor\n        self._in_target_context_manager = False\n\n    def __call__(self, images: ImageInput=None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[TrOCRProcessorKwargs]) -> BatchFeature:\n        \n        if self._in_target_context_manager:\n            return self.current_processor(images, **kwargs)\n        if images is None and text is None:\n            raise ValueError('You need to specify either an `images` or `text` input to process.')\n        output_kwargs = self._merge_kwargs(TrOCRProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)\n        if images is not None:\n            inputs = self.image_processor(images, **output_kwargs['images_kwargs'])\n        if text is not None:\n            encodings = self.tokenizer(text, **output_kwargs['text_kwargs'])\n        if text is None:\n            return inputs\n        elif images is None:\n            return encodings\n        else:\n            inputs['labels'] = encodings['input_ids']\n            return inputs\n\n    def batch_decode(self, *args, **kwargs):\n        \n        return self.tokenizer.batch_decode(*args, **kwargs)\n\n    def decode(self, *args, **kwargs):\n        \n        return self.tokenizer.decode(*args, **kwargs)\n\n    @contextmanager\n    def as_target_processor(self):\n        \n        warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your images inputs, or in a separate call.')\n        self._in_target_context_manager = True\n        self.current_processor = self.tokenizer\n        yield\n        self.current_processor = self.image_processor\n        self._in_target_context_manager = False\n\n    @property\n    def feature_extractor_class(self):\n        warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning)\n        return self.image_processor_class\n\n    @property\n    def feature_extractor(self):\n        warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning)\n        return self.image_processor", "docstring": "Constructs a TrOCR processor which wraps a vision image processor and a TrOCR tokenizer into a single processor.\n\n[`TrOCRProcessor`] offers all the functionalities of [`ViTImageProcessor`/`DeiTImageProcessor`] and\n[`RobertaTokenizer`/`XLMRobertaTokenizer`]. See the [`~TrOCRProcessor.__call__`] and [`~TrOCRProcessor.decode`] for\nmore information.\n\nArgs:\nimage_processor ([`ViTImageProcessor`/`DeiTImageProcessor`], *optional*):\nAn instance of [`ViTImageProcessor`/`DeiTImageProcessor`]. The image processor is a required input.\ntokenizer ([`RobertaTokenizer`/`XLMRobertaTokenizer`], *optional*):\nAn instance of [`RobertaTokenizer`/`XLMRobertaTokenizer`]. The tokenizer is a required input.", "source": "github-repos"}
{"code": "def add_showcase(self, showcase, showcases_to_check=None):\n    dataset_showcase = self._get_dataset_showcase_dict(showcase)\n    if (showcases_to_check is None):\n        showcases_to_check = self.get_showcases()\n    for showcase in showcases_to_check:\n        if (dataset_showcase['showcase_id'] == showcase['id']):\n            return False\n    showcase = hdx.data.showcase.Showcase({'id': dataset_showcase['showcase_id']}, configuration=self.configuration)\n    showcase._write_to_hdx('associate', dataset_showcase, 'package_id')\n    return True", "docstring": "Add dataset to showcase\n\nArgs:\nshowcase (Union[Showcase,Dict,str]): Either a showcase id or showcase metadata from a Showcase object or dictionary\nshowcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to showcases containing dataset.\n\nReturns:\nbool: True if the showcase was added, False if already present", "source": "codesearchnet"}
{"code": "def _add_file_argument(parser, types, args, custom_kwargs=None):\n    custom_kwargs = custom_kwargs or {}\n    arg = args[-1]\n    dest = custom_kwargs.get('dest', arg.lstrip(_ARG_PREFIX).replace('-', '_'))\n    kwargs = {'type': types.get(dest), 'action': 'store', 'default': config.ITEMS[dest].default, 'help': config.ITEMS[dest].comment}\n    kwargs.update(custom_kwargs)\n    if kwargs['type'] is None:\n        del kwargs['type']\n    if arg.startswith(_ARG_PREFIX):\n        kwargs['dest'] = dest\n    elif 'type' in kwargs:\n        kwargs['default'] = kwargs['type'](kwargs['default'])\n    parser.add_argument(*args, **kwargs)", "docstring": "Add a file-configurable option to the parser.\n\nArgs:\nparser: The parser.\ntypes: A map from option destination to type.\nargs: The option's name(s). Either a 2-tuple of (short_arg, arg) or a\n1-tuple of (arg,).\ncustom_kwargs: The option's custom kwargs.", "source": "github-repos"}
{"code": "def is_parsable(url):\n        \n\n        try:\n            parsed = urlparse(url)\n            URLHelper.__cache[url] = parsed\n            return True\n        except:\n            return False", "docstring": "Check if the given URL is parsable (make sure it's a valid URL). If it is parsable, also cache it.\n\nArgs:\nurl (str): The URL to check.\n\nReturns:\nbool: True if parsable, False otherwise.", "source": "juraj-google-style"}
{"code": "def _expand_dims(x, input_shape, output_shape):\n    verify_no_new_dims([output_shape], input_shape)\n    if ((input_shape == output_shape) or (input_shape.ndims == 0)):\n        return x\n    perm = [input_shape.dims.index(d) for d in output_shape.dims if (d in input_shape.dims)]\n    x = tf.transpose(x, perm)\n    for (i, d) in enumerate(output_shape.dims):\n        if (d not in input_shape.dims):\n            x = tf.expand_dims(x, i)\n    return x", "docstring": "Expand dimensions and transpose if necessary.\n\nArgs:\nx: a tf.Tensor\ninput_shape: a Shape\noutput_shape: a Shape whose dimensions are a superset of\nthose in input_shape\n\nReturns:\na tf.Tensor", "source": "codesearchnet"}
{"code": "def global_horizontal_radiation(self, value=9999.0):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `global_horizontal_radiation`'.format(value))\n            if value < 0.0:\n                raise ValueError('value need to be greater or equal 0.0 '\n                                 'for field `global_horizontal_radiation`')\n\n        self._global_horizontal_radiation = value", "docstring": "Corresponds to IDD Field `global_horizontal_radiation`\n\nArgs:\nvalue (float): value for IDD Field `global_horizontal_radiation`\nUnit: Wh/m2\nvalue >= 0.0\nMissing value: 9999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def __init__(self, protocol, node, peers, ksize, alpha):\n        \n        self.protocol = protocol\n        self.ksize = ksize\n        self.alpha = alpha\n        self.node = node\n        self.nearest = NodeHeap(self.node, self.ksize)\n        self.last_ids_crawled = []\n        log.info(\"creating spider with peers: %s\", peers)\n        self.nearest.push(peers)", "docstring": "Create a new C{SpiderCrawl}er.\n\nArgs:\nprotocol: A :class:`~kademlia.protocol.KademliaProtocol` instance.\nnode: A :class:`~kademlia.node.Node` representing the key we're\nlooking for\npeers: A list of :class:`~kademlia.node.Node` instances that\nprovide the entry point for the network\nksize: The value for k based on the paper\nalpha: The value for alpha based on the paper", "source": "juraj-google-style"}
{"code": "def discount_factor(self, date: Optional[types.DateTensor]=None, time: Optional[types.FloatTensor]=None, **kwargs) -> tf.Tensor:\n    pass", "docstring": "Returns the discount factor to a specified set of dates.\n\nArgs:\ndate: Optional input specifying the dates at which to evaluate the\ndiscount factors. The function expects either `date` or `time` to be\nspecified.\ntime: Optional input specifying the times at which to evaluate the\ndiscount factors. The function expects either `date` or `time` to be\nspecified.\n**kwargs: The context object, e.g., curve_type.\n\nReturns:\nA `Tensor` of the same shape as `dates` with the corresponding discount\nfactors.", "source": "github-repos"}
{"code": "def by_leb(blocks):\n     \n    slist_len = len(blocks)\n    slist = ['x'] * slist_len\n\n    for block in blocks:\n        if blocks[block].leb_num >= slist_len:\n            add_elements = blocks[block].leb_num - slist_len + 1\n            slist += (['x'] * add_elements)\n            slist_len = len(slist)\n\n        slist[blocks[block].leb_num] = block\n\n    return slist", "docstring": "Sort blocks by Logical Erase Block number.\n\nArguments:\nList:blocks -- List of block objects to sort.\n\nReturns:\nList              -- Indexes of blocks sorted by LEB.", "source": "juraj-google-style"}
{"code": "def _get_timestamp_ms(when):\n    if (when is None):\n        return None\n    ms_since_epoch = float((time.mktime(when.utctimetuple()) * 1000.0))\n    ms_since_epoch += (when.microsecond / 1000.0)\n    return int(ms_since_epoch)", "docstring": "Converts a datetime.datetime to integer milliseconds since the epoch.\n\nRequires special handling to preserve microseconds.\n\nArgs:\nwhen: A datetime.datetime instance.\n\nReturns:\nInteger time since the epoch in milliseconds. If the supplied 'when' is\nNone, the return value will be None.", "source": "codesearchnet"}
{"code": "def node_name(self, value):\n        \n        if value == self._defaults['ai.internal.nodeName'] and 'ai.internal.nodeName' in self._values:\n            del self._values['ai.internal.nodeName']\n        else:\n            self._values['ai.internal.nodeName'] = value", "docstring": "The node_name property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def dump(destination, ms, single=False, pretty_print=False, **kwargs):\n    \n    text = dumps(ms,\n                 single=single,\n                 pretty_print=pretty_print,\n                 **kwargs)\n\n    if hasattr(destination, 'write'):\n        print(text, file=destination)\n    else:\n        with open(destination, 'w') as fh:\n            print(text, file=fh)", "docstring": "Serialize Xmrs objects to the Prolog representation and write to a file.\n\nArgs:\ndestination: filename or file object where data will be written\nms: an iterator of Xmrs objects to serialize (unless the\n*single* option is `True`)\nsingle: if `True`, treat *ms* as a single Xmrs object\ninstead of as an iterator\npretty_print: if `True`, add newlines and indentation", "source": "juraj-google-style"}
{"code": "def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):\n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  if (class_info.last_line - class_info.starting_linenum <= 24 or\n      linenum <= class_info.starting_linenum):\n    return\n\n  matched = Match(r'\\s*(public|protected|private):', clean_lines.lines[linenum])\n  if matched:\n    \n    \n    \n    \n    \n    \n    \n    \n    prev_line = clean_lines.lines[linenum - 1]\n    if (not IsBlankLine(prev_line) and\n        not Search(r'\\b(class|struct)\\b', prev_line) and\n        not Search(r'\\\\$', prev_line)):\n      \n      \n      \n      \n      end_class_head = class_info.starting_linenum\n      for i in range(class_info.starting_linenum, linenum):\n        if Search(r'\\{\\s*$', clean_lines.lines[i]):\n          end_class_head = i\n          break\n      if end_class_head < linenum - 1:\n        error(filename, linenum, 'whitespace/blank_line', 3,\n              '\"%s:\" should be preceded by a blank line' % matched.group(1))", "docstring": "Checks for additional blank line issues related to sections.\n\nCurrently the only thing checked here is blank line before protected/private.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nclass_info: A _ClassInfo objects.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def _wait_for_response(self, requests):\n        \n        failed_requests = []\n        responses_for_requests = OrderedDict.fromkeys(requests)\n\n        for retry in range(self._max_retry):\n            try:\n                logging.debug('Try \n                self._availability_limiter.map_with_retries(requests, responses_for_requests)\n\n                failed_requests = []\n                for request, response in responses_for_requests.items():\n                    if self._drop_404s and response is not None and response.status_code == 404:\n                        logging.warning('Request to {0} failed with status code 404, dropping.'.format(request.url))\n                    elif not response:\n                        failed_requests.append((request, response))\n\n                if not failed_requests:\n                    break\n\n                logging.warning('Try \n                    retry + 1, len(requests), len(requests) - len(failed_requests),\n                ))\n\n                \n                requests = [fr[0] for fr in failed_requests]\n            except InvalidRequestError:\n                raise\n            except Exception as e:\n                \n                logging.exception('Try \n                pass\n\n        if failed_requests:\n            logging.warning('Still {0} failed request(s) after {1} retries:'.format(\n                len(failed_requests), self._max_retry,\n            ))\n            for failed_request, failed_response in failed_requests:\n                if failed_response is not None:\n                    \n                    failed_response_text = failed_response.text.encode('ascii', 'xmlcharrefreplace')\n                    logging.warning('Request to {0} failed with status code {1}. Response text: {2}'.format(\n                        failed_request.url, failed_response.status_code, failed_response_text,\n                    ))\n                else:\n                    logging.warning('Request to {0} failed with None response.'.format(failed_request.url))\n\n        return list(responses_for_requests.values())", "docstring": "Issues a batch of requests and waits for the responses.\nIf some of the requests fail it will retry the failed ones up to `_max_retry` times.\n\nArgs:\nrequests - A list of requests\nReturns:\nA list of `requests.models.Response` objects\nRaises:\nInvalidRequestError - if any of the requests returns \"403 Forbidden\" response", "source": "juraj-google-style"}
{"code": "def parse(self, argument):\n    if (not self.enum_values):\n        return argument\n    elif self.case_sensitive:\n        if (argument not in self.enum_values):\n            raise ValueError(('value should be one of <%s>' % '|'.join(self.enum_values)))\n        else:\n            return argument\n    elif (argument.upper() not in [value.upper() for value in self.enum_values]):\n        raise ValueError(('value should be one of <%s>' % '|'.join(self.enum_values)))\n    else:\n        return [value for value in self.enum_values if (value.upper() == argument.upper())][0]", "docstring": "Determine validity of argument and return the correct element of enum.\n\nIf self.enum_values is empty, then all arguments are valid and argument\nwill be returned.\n\nOtherwise, if argument matches an element in enum, then the first\nmatching element will be returned.\n\nArgs:\nargument: The supplied flag value.\n\nReturns:\nThe matching element from enum_values, or argument if enum_values is\nempty.\n\nRaises:\nValueError: enum_values was non-empty, but argument didn't match\nanything in enum.", "source": "codesearchnet"}
{"code": "def get_covalent_bonds(self, tol=0.2):\n    bonds = []\n    for (site1, site2) in itertools.combinations(self._sites, 2):\n        if CovalentBond.is_bonded(site1, site2, tol):\n            bonds.append(CovalentBond(site1, site2))\n    return bonds", "docstring": "Determines the covalent bonds in a molecule.\n\nArgs:\ntol (float): The tol to determine bonds in a structure. See\nCovalentBond.is_bonded.\n\nReturns:\nList of bonds", "source": "codesearchnet"}
{"code": "def set_signal_type(self, sig_type):\n        \n        if isinstance(sig_type, str):\n            sig_type = [sig_type]\n        self.snr_input.signal_type = sig_type\n        return", "docstring": "Set the signal type of interest.\n\nSets the signal type for which the SNR is calculated.\nThis means inspiral, merger, and/or ringdown.\n\nArgs:\nsig_type (str or list of str): Signal type desired by user.\nChoices are `ins`, `mrg`, `rd`, `all` for circular waveforms created with PhenomD.\nIf eccentric waveforms are used, must be `all`.", "source": "juraj-google-style"}
{"code": "def parse_client_table(redis_client):\n    NIL_CLIENT_ID = ray.ObjectID.nil().binary()\n    message = redis_client.execute_command('RAY.TABLE_LOOKUP', ray.gcs_utils.TablePrefix.CLIENT, '', NIL_CLIENT_ID)\n    if (message is None):\n        return []\n    node_info = {}\n    gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(message, 0)\n    ordered_client_ids = []\n    for i in range(gcs_entry.EntriesLength()):\n        client = ray.gcs_utils.ClientTableData.GetRootAsClientTableData(gcs_entry.Entries(i), 0)\n        resources = {decode(client.ResourcesTotalLabel(i)): client.ResourcesTotalCapacity(i) for i in range(client.ResourcesTotalLabelLength())}\n        client_id = ray.utils.binary_to_hex(client.ClientId())\n        if (not client.IsInsertion()):\n            assert (client_id in node_info), 'Client removed not found!'\n            assert node_info[client_id]['IsInsertion'], 'Unexpected duplicate removal of client.'\n        else:\n            ordered_client_ids.append(client_id)\n        node_info[client_id] = {'ClientID': client_id, 'IsInsertion': client.IsInsertion(), 'NodeManagerAddress': decode(client.NodeManagerAddress(), allow_none=True), 'NodeManagerPort': client.NodeManagerPort(), 'ObjectManagerPort': client.ObjectManagerPort(), 'ObjectStoreSocketName': decode(client.ObjectStoreSocketName(), allow_none=True), 'RayletSocketName': decode(client.RayletSocketName(), allow_none=True), 'Resources': resources}\n    return [node_info[client_id] for client_id in ordered_client_ids]", "docstring": "Read the client table.\n\nArgs:\nredis_client: A client to the primary Redis shard.\n\nReturns:\nA list of information about the nodes in the cluster.", "source": "codesearchnet"}
{"code": "def serialize_date(value):\n    \n    if not value:\n        return None\n    elif isinstance(value, datetime.datetime):\n        return value.date().isoformat()\n    elif isinstance(value, datetime.date):\n        return value.isoformat()\n    else:\n        return parse_date(value).isoformat()", "docstring": "Attempts to convert `value` into an ``xs:date`` string. If `value` is\n``None``, ``None`` will be returned.\n\nArgs:\nvalue: A date value. This can be a string, datetime.date, or\ndatetime.datetime object.\n\nReturns:\nAn ``xs:date`` formatted timestamp string.", "source": "juraj-google-style"}
{"code": "def InitializeDebuggeeLabels(self, flags):\n    self._debuggee_labels = {}\n    for (label, var_names) in six.iteritems(_DEBUGGEE_LABELS):\n        for name in var_names:\n            value = os.environ.get(name)\n            if value:\n                if ((label == labels.Debuggee.MODULE) and (value == 'default')):\n                    break\n                self._debuggee_labels[label] = value\n                break\n    if flags:\n        self._debuggee_labels.update({name: value for (name, value) in six.iteritems(flags) if (name in _DEBUGGEE_LABELS)})\n    self._debuggee_labels['projectid'] = self._project_id", "docstring": "Initialize debuggee labels from environment variables and flags.\n\nThe caller passes all the flags that the the debuglet got. This function\nwill only use the flags used to label the debuggee. Flags take precedence\nover environment variables.\n\nDebuggee description is formatted from available flags.\n\nArgs:\nflags: dictionary of debuglet command line flags.", "source": "codesearchnet"}
{"code": "def prepend_block(self, node, reverse=False):\n    \n    if not isinstance(node, grammar.STATEMENTS):\n      raise ValueError\n    if reverse:\n      self.to_prepend_block[-1].appendleft(node)\n    else:\n      self.to_prepend_block[-1].append(node)", "docstring": "Prepend a statement to the current block.\n\nArgs:\nnode: The statement to prepend.\nreverse: When called multiple times, this flag determines whether the\nstatement should be prepended or appended to the already inserted\nstatements.\n\nRaises:\nValueError: If the given node is not a statement.", "source": "juraj-google-style"}
{"code": "def distance(self, method='haversine'):\n        \n        distances = []\n        for segment in self:\n            if len(segment) < 2:\n                distances.append([])\n            else:\n                distances.append(segment.distance(method))\n        return distances", "docstring": "Calculate distances between locations in segments.\n\nArgs:\nmethod (str): Method used to calculate distance\n\nReturns:\nlist of list of float: Groups of distance between points in\nsegments", "source": "juraj-google-style"}
{"code": "def serialize(self):\n    df = self.copy()\n    df['scored_calls'] = df['scored_calls'].apply((lambda x: json.dumps(x)))\n    df['channel_values'] = df['channel_values'].apply((lambda x: json.dumps(x)))\n    df['regions'] = df['regions'].apply((lambda x: json.dumps(x)))\n    df['phenotype_calls'] = df['phenotype_calls'].apply((lambda x: json.dumps(x)))\n    df['neighbors'] = df['neighbors'].apply((lambda x: json.dumps(x)))\n    df['frame_shape'] = df['frame_shape'].apply((lambda x: json.dumps(x)))\n    return df", "docstring": "Convert the data to one that can be saved in h5 structures\n\nReturns:\npandas.DataFrame: like a cell data frame but serialized. columns", "source": "codesearchnet"}
{"code": "def _add_case(self, case_obj):\n    if self.case(case_obj['_id']):\n        raise IntegrityError(('Case %s already exists in database' % case_obj['_id']))\n    return self.case_collection.insert_one(case_obj)", "docstring": "Add a case to the database\nIf the case already exists exception is raised\n\nArgs:\ncase_obj(Case)", "source": "codesearchnet"}
{"code": "def preprocess_bel_stmt(stmt: str) -> str:\n    stmt = stmt.strip()\n    stmt = re.sub(',+', ',', stmt)\n    stmt = re.sub(',', ', ', stmt)\n    stmt = re.sub(' +', ' ', stmt)\n    return stmt", "docstring": "Clean up basic formatting of BEL statement\n\nArgs:\nstmt: BEL statement as single string\n\nReturns:\ncleaned BEL statement", "source": "codesearchnet"}
{"code": "def _compute_direction_numbers(dim):\n    m = np.empty((dim, 32), dtype=np.int32)\n    m[0, :] = np.ones(32, dtype=np.int32)\n    for k in range(dim - 1):\n        a_k = _PRIMITIVE_POLYNOMIAL_COEFFICIENTS[k]\n        deg = np.int32(np.floor(np.log2(a_k)))\n        m[k + 1, :deg] = _INITIAL_DIRECTION_NUMBERS[:deg, k]\n        for j in range(deg, 32):\n            m[k + 1, j] = m[k + 1, j - deg]\n            for i in range(deg):\n                if a_k >> i & 1:\n                    m[k + 1, j] = np.bitwise_xor(m[k + 1, j], m[k + 1, j - deg + i] << deg - i)\n    return m", "docstring": "Returns array of direction numbers for dimension dim.\n\nThese are the m_kj values in the Joe & Kuo notes[1], not the v_kj values. So\nthese refer to the 'abuse of notation' mentioned in the notes -- it is a\nmatrix of integers, not floats. The variable names below are intended to match\nthe notation in the notes as closely as possible.\n\nArgs:\ndim: int, dimension.\n\nReturns:\n`numpy.array` of direction numbers with `shape` [dim, 32].", "source": "github-repos"}
{"code": "def constraint(self):\n    return self._constraint", "docstring": "Returns the constraint function associated with this variable.\n\nReturns:\nThe constraint function that was passed to the variable constructor.\nCan be `None` if no constraint was passed.", "source": "github-repos"}
{"code": "def delete_individual(self, ind_obj):\n        \n        logger.info(\"Deleting individual {0} from database\"\n                    .format(ind_obj.ind_id))\n        self.session.delete(ind_obj)\n        self.save()\n        return ind_obj", "docstring": "Delete a case from the database\n\nArgs:\nind_obj (puzzle.models.Individual): initialized individual model", "source": "juraj-google-style"}
{"code": "def CheckDirectory(self, path, extension='yaml'):\n    result = True\n    if extension:\n        glob_spec = os.path.join(path, '*.{0:s}'.format(extension))\n    else:\n        glob_spec = os.path.join(path, '*')\n    for definition_file in sorted(glob.glob(glob_spec)):\n        if (not self.CheckFile(definition_file)):\n            result = False\n    return result", "docstring": "Validates definition files in a directory.\n\nArgs:\npath (str): path of the definition file.\nextension (Optional[str]): extension of the filenames to read.\n\nReturns:\nbool: True if the directory contains valid definitions.", "source": "codesearchnet"}
{"code": "def pdf(self, resource_id):\n        \n        self.resource_id(str(resource_id))\n        self._request_uri = '{}/pdf'.format(self._request_uri)", "docstring": "Update the request URI to get the pdf for this resource.\n\nArgs:\nresource_id (integer): The group id.", "source": "juraj-google-style"}
{"code": "def check_config_file(msg):\n    \n    with jsonconfig.Config(\"messages\", indent=4) as cfg:\n        verify_profile_name(msg, cfg)\n\n        retrieve_data_from_config(msg, cfg)\n\n        if msg._auth is None:\n            retrieve_pwd_from_config(msg, cfg)\n\n        if msg.save:\n            update_config_data(msg, cfg)\n            update_config_pwd(msg, cfg)", "docstring": "Checks the config.json file for default settings and auth values.\n\nArgs:\n:msg: (Message class) an instance of a message class.", "source": "juraj-google-style"}
{"code": "def input_node_from_schema(schema: Schema, same_sampling_as: Optional[EventSetNode]=None, name: Optional[str]=None) -> EventSetNode:\n    return input_node(features=schema.features, indexes=schema.indexes, is_unix_timestamp=schema.is_unix_timestamp, same_sampling_as=same_sampling_as, name=name)", "docstring": "Creates an input [`EventSetNode`][temporian.EventSetNode] from a schema.\n\nUsage example:\n\n```python\n>>> # Create two nodes with the same schema.\n>>> a = tp.input_node(features=[(\"f1\", tp.float64), (\"f2\", tp.str_)])\n>>> b = tp.input_node_from_schema(a.schema)\n\n```\n\nArgs:\nschema: Schema of the node.\nsame_sampling_as: If set, the created EventSetNode is guaranteed to have the\nsame sampling as same_sampling_as`. In this case, `indexes` and\n`is_unix_timestamp` should not be provided. Some operators require\nfor input EventSetNodes to have the same sampling.\nname: Name for the EventSetNode.\n\nReturns:\nEventSetNode with the given specifications.", "source": "github-repos"}
{"code": "def remove(self, **kwargs):\n        \n        return self.client.api.remove_container(self.id, **kwargs)", "docstring": "Remove this container. Similar to the ``docker rm`` command.\n\nArgs:\nv (bool): Remove the volumes associated with the container\nlink (bool): Remove the specified link and not the underlying\ncontainer\nforce (bool): Force the removal of a running container (uses\n``SIGKILL``)\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "async def evaluate_trained_model(state):\n  \n\n  return await evaluate_model(\n      state.train_model_path, state.best_model_path,\n      os.path.join(fsdb.eval_dir(), state.train_model_name), state.seed)", "docstring": "Evaluate the most recently trained model against the current best model.\n\nArgs:\nstate: the RL loop State instance.", "source": "juraj-google-style"}
{"code": "def __init__(self, name, state_callback, restore_callback):\n\n    def _state_callback_wrapper():\n        with ops.init_scope():\n            return state_callback()\n    self._state_callback = _state_callback_wrapper\n    self._restore_callback = restore_callback\n    with ops.device('/cpu:0'):\n        self._save_string = constant_op.constant('', dtype=dtypes.string)\n    spec = saveable_object.SaveSpec(self._save_string, '', name, dtype=dtypes.string)\n    super(_PythonStringStateSaveable, self).__init__(self._save_string, [spec], name)", "docstring": "Configure saving.\n\nArgs:\nname: The checkpoint key to write to.\nstate_callback: A function taking no arguments which returns a string.\nThis function is run every time a checkpoint is written.\nrestore_callback: A function taking a Python string, used to restore\nstate.", "source": "github-repos"}
{"code": "def _GetGenericBasesLookupMap(self, node):\n    mapping = collections.defaultdict(list)\n    seen_bases = set()\n    bases = list(reversed(node.bases))\n    while bases:\n        base = bases.pop()\n        if base in seen_bases:\n            continue\n        seen_bases.add(base)\n        if isinstance(base, pytd.GenericType) and isinstance(base.base_type, pytd.ClassType):\n            mapping[base.base_type].append(base)\n            bases.extend(reversed(base.base_type.cls.bases))\n        elif isinstance(base, pytd.ClassType):\n            bases.extend(reversed(base.cls.bases))\n    return mapping", "docstring": "Get a lookup map for the generic bases of a class.\n\nGets a map from a pytd.ClassType to the list of pytd.GenericType bases of\nthe node that have that class as their base. This method does depth-first\ntraversal of the bases, which ensures that the order of elements in each\nlist is consistent with the node's MRO.\n\nArgs:\nnode: A pytd.Class node.\n\nReturns:\nA pytd.ClassType -> List[pytd.GenericType] map.", "source": "github-repos"}
{"code": "def package_info(pkg_name):\n    indent = '  '\n    for (config, _) in _iter_packages():\n        if (pkg_name == config['name']):\n            print('Package:', pkg_name)\n            print(indent, 'Platform:', config['platform'])\n            print(indent, 'Version:', config['version'])\n            print(indent, 'Path:', config['path'])\n            print(indent, 'Worlds:')\n            for world in config['maps']:\n                world_info(world['name'], world_config=world, initial_indent='    ')", "docstring": "Prints the information of a package.\n\nArgs:\npkg_name (str): The name of the desired package to get information", "source": "codesearchnet"}
{"code": "def if_else(condition, when_true, otherwise):\n    if ((not isinstance(when_true, collections.Iterable)) or isinstance(when_true, str)):\n        when_true = np.repeat(when_true, len(condition))\n    if ((not isinstance(otherwise, collections.Iterable)) or isinstance(otherwise, str)):\n        otherwise = np.repeat(otherwise, len(condition))\n    assert ((len(condition) == len(when_true)) and (len(condition) == len(otherwise)))\n    if isinstance(when_true, pd.Series):\n        when_true = when_true.values\n    if isinstance(otherwise, pd.Series):\n        otherwise = otherwise.values\n    output = np.array([(when_true[i] if c else otherwise[i]) for (i, c) in enumerate(condition)])\n    return output", "docstring": "Wraps creation of a series based on if-else conditional logic into a function\ncall.\n\nProvide a boolean vector condition, value(s) when true, and value(s)\nwhen false, and a vector will be returned the same length as the conditional\nvector according to the logical statement.\n\nArgs:\ncondition: A boolean vector representing the condition. This is often\na logical statement with a symbolic series.\nwhen_true: A vector the same length as the condition vector or a single\nvalue to apply when the condition is `True`.\notherwise: A vector the same length as the condition vector or a single\nvalue to apply when the condition is `False`.\n\nExample:\ndf = pd.DataFrame", "source": "codesearchnet"}
{"code": "def get_source_inputs(tensor):\n    if not hasattr(tensor, '_keras_history'):\n        return tensor\n    operation, node_index, _ = tensor._keras_history\n    if not operation or not operation._inbound_nodes:\n        return [tensor]\n    else:\n        node = operation._inbound_nodes[node_index]\n        if node.is_input:\n            return tree.flatten(node.output_tensors)\n        else:\n            source_tensors = []\n            for tensor in node.input_tensors:\n                previous_sources = get_source_inputs(tensor)\n                for x in previous_sources:\n                    if all((x is not t for t in source_tensors)):\n                        source_tensors.append(x)\n            return source_tensors", "docstring": "Returns the list of input tensors necessary to compute `tensor`.\n\nOutput will always be a list of tensors\n(potentially with 1 element).\n\nArgs:\ntensor: The tensor to start from.\n\nReturns:\nList of input tensors.", "source": "github-repos"}
{"code": "def __init__(self, session, flush_limit=100):\n        \n        super(TcExLogHandler, self).__init__()\n        self.session = session\n        self.flush_limit = flush_limit\n        self.entries = []", "docstring": "Initialize Class properties.\n\nArgs:\nsession (Request.Session): The preconfigured instance of Session for ThreatConnect API.\nflush_limit (int): The limit to flush batch logs to the API.", "source": "juraj-google-style"}
{"code": "def _GetDelayImportTimestamps(self, pefile_object):\n    \n    delay_import_timestamps = []\n    if not hasattr(pefile_object, 'DIRECTORY_ENTRY_DELAY_IMPORT'):\n      return delay_import_timestamps\n    for importdata in pefile_object.DIRECTORY_ENTRY_DELAY_IMPORT:\n      dll_name = importdata.dll\n      try:\n        dll_name = dll_name.decode('ascii')\n      except UnicodeDecodeError:\n        dll_name = dll_name.decode('ascii', errors='replace')\n\n      timestamp = getattr(importdata.struct, 'dwTimeStamp', 0)\n      delay_import_timestamps.append([dll_name, timestamp])\n    return delay_import_timestamps", "docstring": "Retrieves timestamps from delay import entries, if available.\n\nArgs:\npefile_object (pefile.PE): pefile object.\n\nReturns:\ntuple[str, int]: name of the DLL being imported and the second is\nthe timestamp of the entry.", "source": "juraj-google-style"}
{"code": "def copy_clean(node, preserve_annos=None):\n    return CleanCopier(preserve_annos).copy(node)", "docstring": "Creates a deep copy of an AST.\n\nThe copy will not include fields that are prefixed by '__', with the\nexception of user-specified annotations.\n\nArgs:\nnode: ast.AST\npreserve_annos: Optional[Set[Hashable]], annotation keys to include in the\ncopy\nReturns:\nast.AST", "source": "github-repos"}
{"code": "async def executor(func, *args, **kwargs):\n\n    def syncfunc():\n        return func(*args, **kwargs)\n    loop = asyncio.get_running_loop()\n    return (await loop.run_in_executor(None, syncfunc))", "docstring": "Execute a function in an executor thread.\n\nArgs:\ntodo ((func,args,kwargs)): A todo tuple.", "source": "codesearchnet"}
{"code": "def IsPipe(self):\n    if (self._stat_object is None):\n        self._stat_object = self._GetStat()\n    if (self._stat_object is not None):\n        self.entry_type = self._stat_object.type\n    return (self.entry_type == definitions.FILE_ENTRY_TYPE_PIPE)", "docstring": "Determines if the file entry is a pipe.\n\nReturns:\nbool: True if the file entry is a pipe.", "source": "codesearchnet"}
{"code": "def WriteEventBody(self, event):\n    for field_name in self._fields:\n        if (field_name == 'datetime'):\n            output_value = self._FormatDateTime(event)\n        else:\n            output_value = self._dynamic_fields_helper.GetFormattedField(event, field_name)\n        output_value = self._RemoveIllegalXMLCharacters(output_value)\n        column_index = self._fields.index(field_name)\n        self._column_widths.setdefault(column_index, 0)\n        if (field_name == 'datetime'):\n            column_width = min(self._MAX_COLUMN_WIDTH, (len(self._timestamp_format) + 2))\n        else:\n            column_width = min(self._MAX_COLUMN_WIDTH, (len(output_value) + 2))\n        self._column_widths[column_index] = max(self._MIN_COLUMN_WIDTH, self._column_widths[column_index], column_width)\n        self._sheet.set_column(column_index, column_index, self._column_widths[column_index])\n        if ((field_name == 'datetime') and isinstance(output_value, datetime.datetime)):\n            self._sheet.write_datetime(self._current_row, column_index, output_value)\n        else:\n            self._sheet.write(self._current_row, column_index, output_value)\n    self._current_row += 1", "docstring": "Writes the body of an event object to the spreadsheet.\n\nArgs:\nevent (EventObject): event.", "source": "codesearchnet"}
{"code": "def find_parents(root, path, names):\n    if (not root):\n        return []\n    if (not os.path.commonprefix((root, path))):\n        log.warning('Path %s not in %s', path, root)\n        return []\n    dirs = ([root] + os.path.relpath(os.path.dirname(path), root).split(os.path.sep))\n    while dirs:\n        search_dir = os.path.join(*dirs)\n        existing = list(filter(os.path.exists, [os.path.join(search_dir, n) for n in names]))\n        if existing:\n            return existing\n        dirs.pop()\n    return []", "docstring": "Find files matching the given names relative to the given path.\n\nArgs:\npath (str): The file path to start searching up from.\nnames (List[str]): The file/directory names to look for.\nroot (str): The directory at which to stop recursing upwards.\n\nNote:\nThe path MUST be within the root.", "source": "codesearchnet"}
{"code": "def normalize_name(name, overrides=None):\n    normalized_name = name.title()\n    if overrides:\n        override_map = dict([(name.title(), name) for name in overrides])\n        return override_map.get(normalized_name, normalized_name)\n    else:\n        return normalized_name", "docstring": "Normalize the key name to title case.\n\nFor example, ``normalize_name('content-id')`` will become ``Content-Id``\n\nArgs:\nname (str): The name to normalize.\noverrides (set, sequence): A set or sequence containing keys that\nshould be cased to themselves. For example, passing\n``set('WARC-Type')`` will normalize any key named \"warc-type\" to\n``WARC-Type`` instead of the default ``Warc-Type``.\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def _ProcessArchiveTypes(self, mediator, path_spec, type_indicators):\n    \n    number_of_type_indicators = len(type_indicators)\n    if number_of_type_indicators == 0:\n      return\n\n    self.processing_status = definitions.STATUS_INDICATOR_COLLECTING\n\n    if number_of_type_indicators > 1:\n      display_name = mediator.GetDisplayName()\n      logger.debug((\n          'Found multiple format type indicators: {0:s} for '\n          'archive file: {1:s}').format(type_indicators, display_name))\n\n    for type_indicator in type_indicators:\n      if type_indicator == dfvfs_definitions.TYPE_INDICATOR_TAR:\n        archive_path_spec = path_spec_factory.Factory.NewPathSpec(\n            dfvfs_definitions.TYPE_INDICATOR_TAR, location='/',\n            parent=path_spec)\n\n      elif type_indicator == dfvfs_definitions.TYPE_INDICATOR_ZIP:\n        archive_path_spec = path_spec_factory.Factory.NewPathSpec(\n            dfvfs_definitions.TYPE_INDICATOR_ZIP, location='/',\n            parent=path_spec)\n\n      else:\n        archive_path_spec = None\n\n        warning_message = (\n            'unsupported archive format type indicator: {0:s}').format(\n                type_indicator)\n        mediator.ProduceExtractionWarning(\n            warning_message, path_spec=path_spec)\n\n      if archive_path_spec:\n        try:\n          path_spec_generator = self._path_spec_extractor.ExtractPathSpecs(\n              [archive_path_spec], resolver_context=mediator.resolver_context)\n\n          for generated_path_spec in path_spec_generator:\n            if self._abort:\n              break\n\n            event_source = event_sources.FileEntryEventSource(\n                path_spec=generated_path_spec)\n            event_source.file_entry_type = (\n                dfvfs_definitions.FILE_ENTRY_TYPE_FILE)\n            mediator.ProduceEventSource(event_source)\n\n            self.last_activity_timestamp = time.time()\n\n        except (IOError, errors.MaximumRecursionDepth) as exception:\n          warning_message = (\n              'unable to process archive file with error: {0!s}').format(\n                  exception)\n          mediator.ProduceExtractionWarning(\n              warning_message, path_spec=generated_path_spec)", "docstring": "Processes a data stream containing archive types such as: TAR or ZIP.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\npath_spec (dfvfs.PathSpec): path specification.\ntype_indicators(list[str]): dfVFS archive type indicators found in\nthe data stream.", "source": "juraj-google-style"}
{"code": "def _determine_checkout_url(self, platform, action):\n        \n        api_version = settings.API_CHECKOUT_VERSION\n        if platform == \"test\":\n            base_uri = settings.ENDPOINT_CHECKOUT_TEST\n        elif self.live_endpoint_prefix is not None and platform == \"live\":\n            base_uri = settings.ENDPOINT_CHECKOUT_LIVE_SUFFIX.format(\n                self.live_endpoint_prefix)\n        elif self.live_endpoint_prefix is None and platform == \"live\":\n            errorstring = \n            raise AdyenEndpointInvalidFormat(errorstring)\n        if action == \"paymentsDetails\":\n            action = \"payments/details\"\n        if action == \"paymentsResult\":\n            action = \"payments/result\"\n        if action == \"originKeys\":\n            api_version = settings.API_CHECKOUT_UTILITY_VERSION\n\n        return '/'.join([base_uri, api_version, action])", "docstring": "This returns the Adyen API endpoint based on the provided platform,\nservice and action.\n\nArgs:\nplatform (str): Adyen platform, ie 'live' or 'test'.\naction (str): the API action to perform.", "source": "juraj-google-style"}
{"code": "def add_2d_positional_embeddings(self, grid, interpolate_pos_encoding: bool=False):\n    batch_size, height, width, hidden_dim = grid.shape\n    row_height = min(self.max_grid_row_position_embeddings, height)\n    row_position_ids = torch.arange(row_height, dtype=torch.long, device=grid.device)\n    row_position_embeddings = self.row_position_embeddings(row_position_ids)\n    row_shape = (1,) * (len(grid.shape) - 3) + (row_height, 1, hidden_dim)\n    row_position_embeddings = row_position_embeddings.view(*row_shape)\n    row_width = min(self.max_grid_col_position_embeddings, width)\n    col_position_ids = torch.arange(row_width, dtype=torch.long, device=grid.device)\n    col_position_embeddings = self.col_position_embeddings(col_position_ids)\n    col_shape = (batch_size, 1, row_width, hidden_dim)\n    col_position_embeddings = col_position_embeddings.view(*col_shape)\n    positional_embeddings = row_position_embeddings + col_position_embeddings\n    if interpolate_pos_encoding and (height > self.max_grid_row_position_embeddings or width > self.max_grid_col_position_embeddings):\n        grid = grid + self.interpolate_pos_encoding(positional_embeddings, height, width)\n    else:\n        grid = grid + positional_embeddings\n    return grid", "docstring": "Args:\ngrid: (batch_size, height, width, hidden_dim)\ninterpolate_pos_encoding: (`bool`, *optional*, defaults to `False`):\nWhether to interpolate the pre-trained position encodings.\nReturns:\ngrid + col_position_embeddings.view(*col_shape): (batch_size, *, height, width, hidden_dim)", "source": "github-repos"}
{"code": "def get_targets(self):\n    if (not hasattr(self, '_targets')):\n        targets = []\n        for target_def in (self.config.targets or []):\n            target = Target(target_def)\n            targets.append(target)\n        self._targets = targets\n    return self._targets", "docstring": "Returns the named targets that are specified in the config.\n\nReturns:\nlist: a list of :class:`stacker.target.Target` objects", "source": "codesearchnet"}
{"code": "def parse_mmtf_header(infile):\n    infodict = {}\n    mmtf_decoder = mmtf.parse(infile)\n    infodict['date'] = mmtf_decoder.deposition_date\n    infodict['release_date'] = mmtf_decoder.release_date\n    try:\n        infodict['experimental_method'] = [x.decode() for x in mmtf_decoder.experimental_methods]\n    except AttributeError:\n        infodict['experimental_method'] = [x for x in mmtf_decoder.experimental_methods]\n    infodict['resolution'] = mmtf_decoder.resolution\n    infodict['description'] = mmtf_decoder.title\n    group_name_exclude = ['HOH']\n    chem_comp_type_exclude = ['l-peptide linking', 'peptide linking']\n    chemicals = list(set([mmtf_decoder.group_list[idx]['groupName'] for idx in mmtf_decoder.group_type_list if ((mmtf_decoder.group_list[idx]['chemCompType'].lower() not in chem_comp_type_exclude) and (mmtf_decoder.group_list[idx]['groupName'] not in group_name_exclude))]))\n    infodict['chemicals'] = chemicals\n    return infodict", "docstring": "Parse an MMTF file and return basic header-like information.\n\nArgs:\ninfile (str): Path to MMTF file\n\nReturns:\ndict: Dictionary of parsed header\n\nTodo:\n- Can this be sped up by not parsing the 3D coordinate info somehow?\n- OR just store the sequences when this happens since it is already being parsed.", "source": "codesearchnet"}
{"code": "def translate_html_string(self, html: str) -> str:\n    text_content = get_text(html)\n    chunks = self.parse(text_content)\n    return resolve(chunks, html)", "docstring": "Translates the given HTML string with markups for semantic line breaks.\n\nArgs:\nhtml (str): An input html string.\n\nReturns:\nThe translated HTML string (str).", "source": "github-repos"}
{"code": "def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n    known_args, pipeline_args = parse_known_args(argv)\n    pipeline_options = PipelineOptions(pipeline_args)\n    pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n    model_handler = VertexAIModelHandlerJSON(endpoint_id=known_args.endpoint, project=known_args.project, location=known_args.location, experiment=known_args.experiment, network=known_args.vpc_network, private=known_args.private)\n    pipeline = test_pipeline\n    if not test_pipeline:\n        pipeline = beam.Pipeline(options=pipeline_options)\n    parameters = {'temperature': 0.2, 'maxOutputTokens': 256, 'topK': 40, 'topP': 0.95}\n    prompts = ['What is 5+2?', 'Who is the president?', 'Write me a business plan for a cookie shop.']\n    read_prompts = pipeline | 'Get prompt' >> beam.Create(prompts)\n    preprocess = read_prompts | 'Format prompt' >> beam.Map(lambda data: (data, {'prompt': data}))\n    predictions = preprocess | 'RunInference' >> RunInference(KeyedModelHandler(model_handler), inference_args=parameters)\n    _ = predictions | 'PrintOutput' >> beam.Map(print)\n    _ = predictions | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)\n    result = pipeline.run()\n    result.wait_until_finish()\n    return result", "docstring": "Args:\nargv: Command line arguments defined for this example.\nsave_main_session: Used for internal testing.\ntest_pipeline: Used for internal testing.", "source": "github-repos"}
{"code": "def partition_graphs(self):\n    if not self._debug_graphs:\n        raise LookupError('No partition graphs have been loaded.')\n    return [self._debug_graphs[key].debug_graph_def for key in self._debug_graphs]", "docstring": "Get the partition graphs.\n\nReturns:\nPartition graphs as a list of GraphDef.\n\nRaises:\nLookupError: If no partition graphs have been loaded.", "source": "github-repos"}
{"code": "def vectorize( self, docs ):\n        \n\n        if type(docs) == dict:\n            docs = docs.items()\n\n        if self.model == None:\n            self.train(docs)\n\n        asset_id2vector = {}\n\n        unfound = []\n        for item in docs:\n            \n            asset_id, _ = item\n            label = 'DOC_' + str(asset_id)\n            if label in self.model:\n                asset_id2vector.update({asset_id: self.model['DOC_' + str(asset_id)]})\n            else:\n                unfound.append(item)\n\n        if len(unfound) > 0:\n            \n            sentences = [self._gen_sentence(item) for item in unfound]\n            self.update_model(sentences, train=self.stream_train)\n            asset_id2vector.update({item[0]: self.model['DOC_' + str(item[0])] for item in unfound})\n\n        return asset_id2vector", "docstring": "Returns the feature vectors for a set of docs. If model is not already be trained,\nthen self.train() is called.\n\nArgs:\ndocs (dict or list of tuples): asset_id, body_text of documents\nyou wish to featurize.", "source": "juraj-google-style"}
{"code": "def get_residue_annotations(self, start_resnum, end_resnum=None):\n    if (not end_resnum):\n        end_resnum = start_resnum\n    f = SeqFeature(FeatureLocation((start_resnum - 1), end_resnum))\n    return f.extract(self).letter_annotations", "docstring": "Retrieve letter annotations for a residue or a range of residues\n\nArgs:\nstart_resnum (int): Residue number\nend_resnum (int): Optional residue number, specify if a range is desired\n\nReturns:\ndict: Letter annotations for this residue or residues", "source": "codesearchnet"}
{"code": "def _assertOpOutputMatchesExpected(self, params, solution, high_level=True, rtol=0.001, atol=1e-05):\n    diagonal = params['diagonal']\n    with self.session() as session:\n        for dtype in self.numeric_types - {np.int8, np.uint8}:\n            expected = solution.astype(dtype)\n            with self.test_scope():\n                params['diagonal'] = array_ops.placeholder(dtype, diagonal.shape, name='diagonal')\n                if high_level:\n                    output = array_ops.matrix_diag(**params)\n                else:\n                    output = gen_array_ops.matrix_diag(**params)\n            result = session.run(output, {params['diagonal']: diagonal.astype(dtype)})\n            self.assertEqual(output.dtype, expected.dtype)\n            self.assertAllCloseAccordingToType(expected, result, rtol=rtol, atol=atol, bfloat16_rtol=0.03)", "docstring": "Verifies that matrix_diag produces `solution` when fed `params`.\n\nArgs:\nparams: dictionary containing input parameters to matrix_diag.\nsolution: numpy array representing the expected output of matrix_diag.\nhigh_level: call high_level matrix_diag\nrtol: relative tolerance for equality test.\natol: absolute tolerance for equality test.", "source": "github-repos"}
{"code": "def dcc_connect(self, address, port, dcctype=\"chat\"):\n        \n        warnings.warn(\"Use self.dcc(type).connect()\", DeprecationWarning)\n        return self.dcc(dcctype).connect(address, port)", "docstring": "Connect to a DCC peer.\n\nArguments:\n\naddress -- IP address of the peer.\n\nport -- Port to connect to.\n\nReturns a DCCConnection instance.", "source": "juraj-google-style"}
{"code": "def verified_excel_file(store, institute_list, temp_excel_dir):\n    document_lines = []\n    written_files = 0\n    today = datetime.datetime.now().strftime('%Y-%m-%d')\n    LOG.info('Creating verified variant document..')\n    for cust in institute_list:\n        verif_vars = store.verified(institute_id=cust)\n        LOG.info('Found {} verified variants for customer {}'.format(len(verif_vars), cust))\n        if (not verif_vars):\n            continue\n        unique_callers = set()\n        for (var_type, var_callers) in CALLERS.items():\n            for caller in var_callers:\n                unique_callers.add(caller.get('id'))\n        cust_verified = export_verified_variants(verif_vars, unique_callers)\n        document_name = ('.'.join([cust, '_verified_variants', today]) + '.xlsx')\n        workbook = Workbook(os.path.join(temp_excel_dir, document_name))\n        Report_Sheet = workbook.add_worksheet()\n        row = 0\n        for (col, field) in enumerate((VERIFIED_VARIANTS_HEADER + list(unique_callers))):\n            Report_Sheet.write(row, col, field)\n        for (row, line) in enumerate(cust_verified, 1):\n            for (col, field) in enumerate(line):\n                Report_Sheet.write(row, col, field)\n        workbook.close()\n        if os.path.exists(os.path.join(temp_excel_dir, document_name)):\n            written_files += 1\n    return written_files", "docstring": "Collect all verified variants in a list on institutes and save them to file\n\nArgs:\nstore(adapter.MongoAdapter)\ninstitute_list(list): a list of institute ids\ntemp_excel_dir(os.Path): folder where the temp excel files are written to\n\nReturns:\nwritten_files(int): the number of files written to temp_excel_dir", "source": "codesearchnet"}
{"code": "def query_string_to_dict(query):\n    query_params = {}\n    for key_value in query.split('&'):\n        key_value_pair = key_value.split('=', 1)\n        key = (key_value_pair[0] if (len(key_value_pair) >= 1) else '')\n        value = (key_value_pair[1] if (len(key_value_pair) == 2) else '')\n        query_params[key] = value\n    return query_params", "docstring": "Convert a string to a query dict.\n\nArgs:\nquery (str): The query string.\n\nReturns:\nobj: The key value object with query params.\n\nNote:\nThis method does the same as urllib.parse.parse_qsl except\nthat it doesn't actually decode the values.", "source": "codesearchnet"}
{"code": "def changed(self, path, md5):\n        \n        actual = self.update(path)\n\n        msg = \"File '{}', md5 '{}', actual '{}'\"\n        logger.debug(msg.format(path, md5, actual))\n\n        if not md5 or not actual:\n            return True\n\n        return actual.split(\".\")[0] != md5.split(\".\")[0]", "docstring": "Check if file/directory has the expected md5.\n\nArgs:\npath (str): path to the file/directory to check.\nmd5 (str): expected md5.\n\nReturns:\nbool: True if path has the expected md5, False otherwise.", "source": "juraj-google-style"}
{"code": "def events_from_logdir(logdir):\n    assert gfile.Exists(logdir)\n    files = gfile.ListDirectory(logdir)\n    assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files\n    return events_from_file(os.path.join(logdir, files[0]))", "docstring": "Returns all events in the single eventfile in logdir.\n\nArgs:\nlogdir: The directory in which the single event file is sought.\n\nReturns:\nA list of all tf.Event protos from the single event file.\n\nRaises:\nAssertionError: If logdir does not contain exactly one file.", "source": "github-repos"}
{"code": "def verify_branch(branch_name):\n    \n    \n    try:\n        shell.run(\n            'git rev-parse --verify {}'.format(branch_name),\n            never_pretend=True\n        )\n        return True\n    except IOError:\n        return False", "docstring": "Verify if the given branch exists.\n\nArgs:\nbranch_name (str):\nThe name of the branch to check.\n\nReturns:\nbool: **True** if a branch with name *branch_name* exits, **False**\notherwise.", "source": "juraj-google-style"}
{"code": "def _cleanup_keys_with_confirmation(self, keys_to_delete):\n    print('Round name: ', self.round_name)\n    print('Number of entities to be deleted: ', len(keys_to_delete))\n    if (not keys_to_delete):\n        return\n    if self.verbose:\n        print('Entities to delete:')\n        idx = 0\n        prev_key_prefix = None\n        dots_printed_after_same_prefix = False\n        for k in keys_to_delete:\n            if (idx >= 20):\n                print('   ...')\n                print('   ...')\n                break\n            key_prefix = (k.flat_path[0:1] if (k.flat_path[0] in [u'SubmissionType', u'WorkType']) else k.flat_path[0])\n            if (prev_key_prefix == key_prefix):\n                if (not dots_printed_after_same_prefix):\n                    print('   ...')\n                dots_printed_after_same_prefix = True\n            else:\n                print('  ', k)\n                dots_printed_after_same_prefix = False\n                idx += 1\n            prev_key_prefix = key_prefix\n    print()\n    inp = input_str('Are you sure? (type \"yes\" without quotes to confirm): ')\n    if (inp != 'yes'):\n        return\n    with self.datastore_client.no_transact_batch() as batch:\n        for k in keys_to_delete:\n            batch.delete(k)\n    print('Data deleted')", "docstring": "Asks confirmation and then deletes entries with keys.\n\nArgs:\nkeys_to_delete: list of datastore keys for which entries should be deleted", "source": "codesearchnet"}
{"code": "def error_log(self, msg='', level=20, traceback=False):\n    sys.stderr.write((msg + '\\n'))\n    sys.stderr.flush()\n    if traceback:\n        tblines = traceback_.format_exc()\n        sys.stderr.write(tblines)\n        sys.stderr.flush()", "docstring": "Write error message to log.\n\nArgs:\nmsg (str): error message\nlevel (int): logging level\ntraceback (bool): add traceback to output or not", "source": "codesearchnet"}
{"code": "def _GetPathSegmentIndexForValueWeights(self, value_weights):\n    \n    largest_weight = value_weights.GetLargestWeight()\n\n    if largest_weight > 0:\n      value_weight_indexes = value_weights.GetIndexesForWeight(largest_weight)\n    else:\n      value_weight_indexes = []\n\n    if value_weight_indexes:\n      path_segment_index = value_weight_indexes[0]\n    else:\n      path_segment_index = value_weights.GetFirstAvailableIndex()\n\n    if path_segment_index is None:\n      raise RuntimeError('No path segment index found.')\n\n    return path_segment_index", "docstring": "Retrieves the index of the path segment based on value weights.\n\nArgs:\nvalue_weights: the value weights object (instance of _PathSegmentWeights).\n\nReturns:\nAn integer containing the path segment index.\n\nRaises:\nRuntimeError: is no path segment index can be found.", "source": "juraj-google-style"}
{"code": "def load(self, source, as_defaults=False):\n    if isinstance(source, six.string_types):\n        source = os.path.expanduser(source)\n        with open(source, encoding='utf-8') as f:\n            self._rw.load_config_from_file(self._config, f, as_defaults=as_defaults)\n    elif isinstance(source, (list, tuple)):\n        for s in source:\n            with open(s, encoding='utf-8') as f:\n                self._rw.load_config_from_file(self._config, f, as_defaults=as_defaults)\n    else:\n        self._rw.load_config_from_file(self._config, source, as_defaults=as_defaults)", "docstring": "Load configuration values from the specified source.\n\nArgs:\nsource:\nas_defaults (bool): if ``True``, contents of ``source`` will be treated as schema of configuration items.", "source": "codesearchnet"}
{"code": "def connect(address, authkey):\n  \n  TFManager.register('get_queue')\n  TFManager.register('get')\n  TFManager.register('set')\n  m = TFManager(address, authkey=authkey)\n  m.connect()\n  return m", "docstring": "Connect to a multiprocess.Manager.\n\nArgs:\n:address: unique address to the TFManager, either a unique connection string for 'local', or a (host, port) tuple for remote.\n:authkey: string authorization key\n\nReturns:\nA TFManager instance referencing the remote TFManager at the supplied address.", "source": "juraj-google-style"}
{"code": "def operation_at(self,\n                     qubit: ops.Qid,\n                     moment_index: int) -> Optional[ops.Operation]:\n        \n        if not 0 <= moment_index < len(self._moments):\n            return None\n        for op in self._moments[moment_index].operations:\n            if qubit in op.qubits:\n                return op\n        return None", "docstring": "Finds the operation on a qubit within a moment, if any.\n\nArgs:\nqubit: The qubit to check for an operation on.\nmoment_index: The index of the moment to check for an operation\nwithin. Allowed to be beyond the end of the circuit.\n\nReturns:\nNone if there is no operation on the qubit at the given moment, or\nelse the operation.", "source": "juraj-google-style"}
{"code": "def diff(self, **kwargs):\n    path = ('%s/%s/diff' % (self.manager.path, self.get_id()))\n    return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "Generate the commit diff.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the diff could not be retrieved\n\nReturns:\nlist: The changes done in this commit", "source": "codesearchnet"}
{"code": "def is_remote_added(remote):\n    \n    out = __salt__['cmd.run_all'](FLATPAK_BINARY_NAME + ' remotes')\n\n    lines = out.splitlines()\n    for item in lines:\n        i = re.split(r'\\t+', item.rstrip('\\t'))\n        if i[0] == remote:\n            return True\n    return False", "docstring": "Determines if a remote exists.\n\nArgs:\nremote (str): The remote's name.\n\nReturns:\nbool: True if the remote has already been added.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' flatpak.is_remote_added flathub", "source": "juraj-google-style"}
{"code": "def refill(self, from_address, to_address, nfees, ntokens, password, min_confirmations=6, sync=False):\n    (path, from_address) = from_address\n    verb = Spoolverb()\n    inputs = self.select_inputs(from_address, (nfees + 1), ntokens, min_confirmations=min_confirmations)\n    outputs = ([{'address': to_address, 'value': self.token}] * ntokens)\n    outputs += ([{'address': to_address, 'value': self.fee}] * nfees)\n    outputs += [{'script': self._t._op_return_hex(verb.fuel), 'value': 0}]\n    unsigned_tx = self._t.build_transaction(inputs, outputs)\n    signed_tx = self._t.sign_transaction(unsigned_tx, password, path=path)\n    txid = self._t.push(signed_tx)\n    return txid", "docstring": "Refill wallets with the necessary fuel to perform spool transactions\n\nArgs:\nfrom_address (Tuple[str]): Federation wallet address. Fuels the wallets with tokens and fees. All transactions to wallets\nholding a particular piece should come from the Federation wallet\nto_address (str): Wallet address that needs to perform a spool transaction\nnfees (int): Number of fees to transfer. Each fee is 10000 satoshi. Used to pay for the transactions\nntokens (int): Number of tokens to transfer. Each token is 600 satoshi. Used to register hashes in the blockchain\npassword (str): Password for the Federation wallet. Used to sign the transaction\nmin_confirmations (int): Number of confirmations when chosing the inputs of the transaction. Defaults to 6\nsync (bool): Perform the transaction in synchronous mode, the call to the function will block until there is at\nleast on confirmation on the blockchain. Defaults to False\n\nReturns:\nstr: transaction id", "source": "codesearchnet"}
{"code": "def readline(self, size=None):\n        \n        if size is not None:\n            data = self.rfile.readline(size)\n            self.bytes_read += len(data)\n            self._check_length()\n            return data\n\n        \n        \n        res = []\n        while True:\n            data = self.rfile.readline(256)\n            self.bytes_read += len(data)\n            self._check_length()\n            res.append(data)\n            \n            if len(data) < 256 or data[-1:] == LF:\n                return EMPTY.join(res)", "docstring": "Read a single line from rfile buffer and return it.\n\nArgs:\nsize (int): minimum amount of data to read\n\nReturns:\nbytes: One line from rfile.", "source": "juraj-google-style"}
{"code": "def remove_son(self, son):\n    self._sons = [x for x in self._sons if (x.node_id != son.node_id)]", "docstring": "Remove the son node. Do nothing if the node is not a son\n\nArgs:\nfathers: list of fathers to add", "source": "codesearchnet"}
{"code": "def __init__(self, caption, height, width):\n        \n        self.caption = caption\n        self.height = height\n        self.width = width\n        self._window = None", "docstring": "Initialize a new image viewer.\n\nArgs:\ncaption (str): the caption/title for the window\nheight (int): the height of the window\nwidth (int): the width of the window\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def get_nn_info(self, structure, n):\n    nns = self.get_voronoi_polyhedra(structure, n)\n    return self._extract_nn_info(structure, nns)", "docstring": "Get all near-neighbor sites as well as the associated image locations\nand weights of the site with index n in structure\nusing Voronoi decomposition.\n\nArgs:\nstructure (Structure): input structure.\nn (integer): index of site for which to determine near-neighbor\nsites.\n\nReturns:\nsiw (list of tuples (Site, array, float)): tuples, each one\nof which represents a coordinated site, its image location,\nand its weight.", "source": "codesearchnet"}
{"code": "def stateless_random_shuffle(input_tensor, seed, name=None):\n    with tf.compat.v1.name_scope(name, default_name='stateless_random_shuffle', values=[input_tensor, seed]):\n        input_tensor = tf.convert_to_tensor(input_tensor, name='input_tensor')\n        seed = tf.convert_to_tensor(seed, name='random_seed')\n        uniforms = tf.random.stateless_uniform(shape=[tf.shape(input_tensor)[0]], seed=seed, dtype=tf.float64)\n    return tf.gather(input_tensor, tf.argsort(uniforms, stable=True, axis=0))", "docstring": "Produces stateless random shuffle of the 1st dimension of an input Tensor.\n\nThis is a stateless version of `tf.random_shuffle`. If run twice with the same\nseed, produces the same result.\n\nExample\n```python\nidentity_shuffle = tf.range(100)\nrandom_shuffle = stateless_random_shuffle(identity_shuffle, seed=(42, 2))\n```\n\nArgs:\ninput_tensor: float32, float64, int32 or int64 1-D Tensor.\nseed: int32 or int64 Tensor of shape [2].\nname: Python `str` name prefixed to ops created by this function.\n\nReturns:\nA Tensor of the same shape and dtype as `input_tensor`.", "source": "github-repos"}
{"code": "def from_authorized_user_info(cls, info, scopes=None):\n        \n        keys_needed = set(('refresh_token', 'client_id', 'client_secret'))\n        missing = keys_needed.difference(six.iterkeys(info))\n\n        if missing:\n            raise ValueError(\n                'Authorized user info was not in the expected format, missing '\n                'fields {}.'.format(', '.join(missing)))\n\n        return Credentials(\n            None,  \n            refresh_token=info['refresh_token'],\n            token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT,\n            scopes=scopes,\n            client_id=info['client_id'],\n            client_secret=info['client_secret'])", "docstring": "Creates a Credentials instance from parsed authorized user info.\n\nArgs:\ninfo (Mapping[str, str]): The authorized user info in Google\nformat.\nscopes (Sequence[str]): Optional list of scopes to include in the\ncredentials.\n\nReturns:\ngoogle.oauth2.credentials.Credentials: The constructed\ncredentials.\n\nRaises:\nValueError: If the info is not in the expected format.", "source": "juraj-google-style"}
{"code": "def port(self, value):\n    self._port = value\n    if (value is None):\n        try:\n            del self._connectionXML.attrib['port']\n        except KeyError:\n            pass\n    else:\n        self._connectionXML.set('port', value)", "docstring": "Set the connection's port property.\n\nArgs:\nvalue:  New port value. String.\n\nReturns:\nNothing.", "source": "codesearchnet"}
{"code": "def rotoreflection(axis, angle, origin=(0, 0, 0)):\n        \n        rot = SymmOp.from_origin_axis_angle(origin, axis, angle)\n        refl = SymmOp.reflection(axis, origin)\n        m = np.dot(rot.affine_matrix, refl.affine_matrix)\n        return SymmOp(m)", "docstring": "Returns a roto-reflection symmetry operation\n\nArgs:\naxis (3x1 array): Axis of rotation / mirror normal\nangle (float): Angle in degrees\norigin (3x1 array): Point left invariant by roto-reflection.\nDefaults to (0, 0, 0).\n\nReturn:\nRoto-reflection operation", "source": "juraj-google-style"}
{"code": "def get_controller(self, path):\n        \n        path_info = path.lstrip('/').split('/', 2)\n        try:\n            return self._routes.get(path_info[0] + '/' + path_info[1])\n        except (IndexError, KeyError):\n            return self._routes.get(path_info[0] or 'index')", "docstring": "Return controller that handle given path.\n\nArgs:\n- path: requested path, like: /blog/post_view/15", "source": "juraj-google-style"}
{"code": "def get_session(self, username, password, remote='127.0.0.1', proxy=None):\n    params = {'username': username, 'password': password, 'validation-factors': {'validationFactors': [{'name': 'remote_address', 'value': remote}]}}\n    if proxy:\n        params['validation-factors']['validationFactors'].append({'name': 'X-Forwarded-For', 'value': proxy})\n    response = self._post((self.rest_url + '/session'), data=json.dumps(params), params={'expand': 'user'})\n    if (not response.ok):\n        return None\n    return response.json()", "docstring": "Create a session for a user.\n\nAttempts to create a user session on the Crowd server.\n\nArgs:\nusername: The account username.\n\npassword: The account password.\n\nremote:\nThe remote address of the user. This can be used\nto create multiple concurrent sessions for a user.\nThe host you run this program on may need to be configured\nin Crowd as a trusted proxy for this to work.\n\nproxy: Value of X-Forwarded-For server header.\n\nReturns:\ndict:\nA dict mapping of user attributes if the application\nauthentication was successful. See the Crowd\ndocumentation for the authoritative list of attributes.\n\nNone: If authentication failed.", "source": "codesearchnet"}
{"code": "def to_dict(self) -> Dict[str, Any]:\n    output = copy.deepcopy(self.__dict__)\n    if '_commit_hash' in output:\n        del output['_commit_hash']\n    if '_original_object_hash' in output:\n        del output['_original_object_hash']\n    if 'compile_config' in output:\n        del output['compile_config']\n    output['transformers_version'] = __version__\n    self.dict_torch_dtype_to_str(output)\n    return output", "docstring": "Serializes this instance to a Python dictionary.\n\nReturns:\n`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.", "source": "github-repos"}
{"code": "def _initial_guess(self):\n    (a, b, c) = np.polyfit(self.volumes, self.energies, 2)\n    self.eos_params = [a, b, c]\n    v0 = ((- b) / (2 * a))\n    e0 = (((a * (v0 ** 2)) + (b * v0)) + c)\n    b0 = ((2 * a) * v0)\n    b1 = 4\n    (vmin, vmax) = (min(self.volumes), max(self.volumes))\n    if ((not (vmin < v0)) and (v0 < vmax)):\n        raise EOSError('The minimum volume of a fitted parabola is not in the input volumes\\n.')\n    return (e0, b0, b1, v0)", "docstring": "Quadratic fit to get an initial guess for the parameters.\n\nReturns:\ntuple: (e0, b0, b1, v0)", "source": "codesearchnet"}
{"code": "def _timesfm_masked_mean_std(inputs: torch.Tensor, padding: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:\n\n    def _get_patch_index(arr: torch.Tensor):\n        indices = torch.argmax((arr >= 3).to(torch.int32), dim=1)\n        row_sum = (arr >= 3).to(torch.int32).sum(dim=1)\n        return torch.where(row_sum == 0, arr.shape[1] - 1, indices)\n    pad_sum = torch.sum(1 - padding, dim=2)\n    patch_indices = _get_patch_index(pad_sum)\n    bidxs = torch.arange(inputs.shape[0])\n    arr = inputs[bidxs, patch_indices, :]\n    pad = padding[bidxs, patch_indices, :]\n    mask = 1 - pad\n    num_valid_elements = torch.sum(mask, dim=1)\n    num_valid_elements = torch.where(num_valid_elements == 0, torch.tensor(1, dtype=num_valid_elements.dtype, device=num_valid_elements.device), num_valid_elements)\n    masked_sum = torch.sum(arr * mask, dim=1)\n    masked_squared_sum = torch.sum((arr * mask) ** 2, dim=1)\n    masked_mean = masked_sum / num_valid_elements\n    masked_var = masked_squared_sum / num_valid_elements - masked_mean ** 2\n    masked_var = torch.where(masked_var < 0.0, torch.tensor(0.0, dtype=masked_var.dtype, device=masked_var.device), masked_var)\n    masked_std = torch.sqrt(masked_var)\n    return (masked_mean, masked_std)", "docstring": "Calculates mean and standard deviation of `inputs` across axis 1.\n\nIt excludes values where `padding` is 1.\n\nArgs:\ninputs: A PyTorch tensor of shape [b, n, p].\npadding: A PyTorch tensor of shape [b, n, p] with values 0 or 1.\n\nReturns:\nA tuple containing the mean and standard deviation.\nWe return the statistics of the first patch with more than three non-padded values.", "source": "github-repos"}
{"code": "def summary(self, line_length=None, positions=None, print_fn=None):\n    if not self.built:\n        raise ValueError('This model has not yet been built. Build the model first by calling `build()` or calling `fit()` with some data, or specify an `input_shape` argument in the first layer(s) for automatic build.')\n    layer_utils.print_summary(self, line_length=line_length, positions=positions, print_fn=print_fn)", "docstring": "Prints a string summary of the network.\n\nArgs:\nline_length: Total length of printed lines\n(e.g. set this to adapt the display to different\nterminal window sizes).\npositions: Relative or absolute positions of log elements\nin each line. If not provided,\ndefaults to `[.33, .55, .67, 1.]`.\nprint_fn: Print function to use. Defaults to `print`.\nIt will be called on each line of the summary.\nYou can set it to a custom function\nin order to capture the string summary.\n\nRaises:\nValueError: if `summary()` is called before the model is built.", "source": "github-repos"}
{"code": "def BasenamePath(self, path):\n    \n    if path.endswith(self.PATH_SEPARATOR):\n      path = path[:-1]\n    _, _, basename = path.rpartition(self.PATH_SEPARATOR)\n    return basename", "docstring": "Determines the basename of the path.\n\nArgs:\npath (str): path.\n\nReturns:\nstr: basename of the path.", "source": "juraj-google-style"}
{"code": "def __init__(\n      self, name, data_type_definition, aliases=None, data_type=None,\n      description=None, urls=None):\n    \n    super(ElementSequenceDataTypeDefinition, self).__init__(\n        name, aliases=aliases, description=description, urls=urls)\n    self.byte_order = getattr(\n        data_type_definition, 'byte_order', definitions.BYTE_ORDER_NATIVE)\n    self.elements_data_size = None\n    self.elements_data_size_expression = None\n    self.element_data_type = data_type\n    self.element_data_type_definition = data_type_definition\n    self.elements_terminator = None\n    self.number_of_elements = None\n    self.number_of_elements_expression = None", "docstring": "Initializes a sequence data type definition.\n\nArgs:\nname (str): name.\ndata_type_definition (DataTypeDefinition): sequence element data type\ndefinition.\naliases (Optional[list[str]]): aliases.\ndata_type (Optional[str]): name of the sequence element data type.\ndescription (Optional[str]): description.\nurls (Optional[list[str]]): URLs.", "source": "juraj-google-style"}
{"code": "def cancel(self, workflow_id):\n        \n        self.logger.debug('Canceling workflow: ' + workflow_id)\n        url = '%(wf_url)s/%(wf_id)s/cancel' % {\n            'wf_url': self.workflows_url, 'wf_id': workflow_id\n        }\n        r = self.gbdx_connection.post(url, data='')\n        r.raise_for_status()", "docstring": "Cancels a running workflow.\n\nArgs:\nworkflow_id (str): Workflow id.\n\nReturns:\nNothing", "source": "juraj-google-style"}
{"code": "def register_thread(self, thread):\n    with self._lock:\n        self._registered_threads.add(thread)", "docstring": "Register a thread to join.\n\nArgs:\nthread: A Python thread to join.", "source": "github-repos"}
{"code": "def list_changes(self):\n    if (not self.is_attached()):\n        raise ItsdbError('changes are not tracked for detached tables.')\n    return [(i, self[i]) for (i, row) in enumerate(self._records) if (row is not None)]", "docstring": "Return a list of modified records.\n\nThis is only applicable for attached tables.\n\nReturns:\nA list of `(row_index, record)` tuples of modified records\nRaises:\n:class:`delphin.exceptions.ItsdbError`: when called on a\ndetached table", "source": "codesearchnet"}
{"code": "def lsfiles(root='.', **kwargs):\n    paths = ls(root=root, **kwargs)\n    if isfile(root):\n        return paths\n    return [_path for _path in paths if isfile(path(root, _path))]", "docstring": "Return only files from a directory listing.\n\nArguments:\n\nroot (str): Path to directory. Can be relative or absolute.\n**kwargs: Any additional arguments to be passed to ls().\n\nReturns:\n\nlist of str: A list of file paths.\n\nRaises:\n\nOSError: If root directory does not exist.", "source": "codesearchnet"}
{"code": "def from_json_stat(datasets, naming='label', value='value'):\n    warnings.warn(\"Shouldn't use this function anymore! Now use read() methods ofDataset, Collection or Dimension.\", DeprecationWarning)\n    check_input(naming)\n    results = []\n    if (type(datasets) is list):\n        for (idx, element) in enumerate(datasets):\n            for dataset in element:\n                js_dict = datasets[idx][dataset]\n                results.append(generate_df(js_dict, naming, value))\n    elif (isinstance(datasets, OrderedDict) or (type(datasets) is dict) or isinstance(datasets, Dataset)):\n        if ('class' in datasets):\n            if (datasets['class'] == 'dataset'):\n                js_dict = datasets\n                results.append(generate_df(js_dict, naming, value))\n        else:\n            for dataset in datasets:\n                js_dict = datasets[dataset]\n                results.append(generate_df(js_dict, naming, value))\n    return results", "docstring": "Decode JSON-stat formatted data into pandas.DataFrame object.\n\nArgs:\ndatasets(OrderedDict, list): data in JSON-stat format, previously \\\ndeserialized to a python object by \\\njson.load() or json.loads(), for example.\\\nBoth List and OrderedDict are accepted \\\nas inputs.\nnaming(string, optional): dimension naming. Possible values: 'label'\nor 'id'.Defaults to 'label'.\nvalue (string, optional): name of the value column. Defaults to 'value'.\n\nReturns:\nresults(list): list of pandas.DataFrame with imported data.", "source": "codesearchnet"}
{"code": "def text_set_fields(text, variables):\n    text = RE_TEXT_FIELD.sub('{\\\\1}', text)\n    try:\n        return text.format_map(defaultdict(str, variables))\n    except ValueError:\n        return text", "docstring": "Replaces fields in text with values from recipe.\n\nFields in text are just are {field}, where field is a name of the variable.\nMissing fields default to blanks.\n\nArgs:\ntext (string) A paragraph possible containing {field} entries\nvariables: (dict) The keys mapping to field, and values to replace\n\nReturns:\nA string with all values replaced. Or if an error occurs, original text.", "source": "github-repos"}
{"code": "def _get_kind_name(param_type, is_list):\n    \n    if issubclass(param_type, bool):\n      \n      \n      typename = 'bool'\n    elif issubclass(param_type, six.integer_types):\n      \n      \n      typename = 'int64'\n    elif issubclass(param_type, (six.string_types, six.binary_type)):\n      \n      \n      typename = 'bytes'\n    elif issubclass(param_type, float):\n      typename = 'float'\n    else:\n      raise ValueError('Unsupported parameter type: %s' % str(param_type))\n\n    suffix = 'list' if is_list else 'value'\n    return '_'.join([typename, suffix])", "docstring": "Returns the field name given parameter type and is_list.\n\nArgs:\nparam_type: Data type of the hparam.\nis_list: Whether this is a list.\n\nReturns:\nA string representation of the field name.\n\nRaises:\nValueError: If parameter type is not recognized.", "source": "juraj-google-style"}
{"code": "def is_traceback_filtering_enabled():\n    return global_state.get_global_attribute('traceback_filtering', True)", "docstring": "Check if traceback filtering is enabled.\n\nRaw Keras tracebacks (also known as stack traces)\ninvolve many internal frames, which can be\nchallenging to read through, while not being actionable for end users.\nBy default, Keras filters internal frames in most exceptions that it\nraises, to keep traceback short, readable, and focused on what's\nactionable for you (your own code).\n\nSee also `keras.config.enable_traceback_filtering()` and\n`keras.config.disable_traceback_filtering()`.\n\nIf you have previously disabled traceback filtering via\n`keras.config.disable_traceback_filtering()`, you can re-enable it via\n`keras.config.enable_traceback_filtering()`.\n\nReturns:\nBoolean, `True` if traceback filtering is enabled,\nand `False` otherwise.", "source": "github-repos"}
{"code": "def record_data(self, content):\n    if 'timestamp' not in content:\n        content = content.copy()\n        content['timestamp'] = utils.get_current_epoch_time()\n    self.summary_writer.dump(content, records.TestSummaryEntryType.USER_DATA)", "docstring": "Record an entry in test summary file.\n\nSometimes additional data need to be recorded in summary file for\ndebugging or post-test analysis.\n\nEach call adds a new entry to the summary file, with no guarantee of\nits position among the summary file entries.\n\nThe content should be a dict. If absent, timestamp field is added for\nease of parsing later.\n\nArgs:\ncontent: dict, the data to add to summary file.", "source": "github-repos"}
{"code": "def get_assistants(cls, superassistants):\n        \n        _assistants = cls.load_all_assistants(superassistants)\n        result = []\n        for supa in superassistants:\n            result.extend(_assistants[supa.name])\n\n        return result", "docstring": "Returns list of assistants that are subassistants of given superassistants\n(I love this docstring).\n\nArgs:\nroles: list of names of roles, defaults to all roles\nReturns:\nlist of YamlAssistant instances with specified roles", "source": "juraj-google-style"}
{"code": "def _bns_task_id(job: str) -> Union[int, str]:\n    maybe_task_id = job.rsplit('/')[-1].rsplit(':')[0]\n    try:\n        return int(maybe_task_id)\n    except ValueError:\n        return job", "docstring": "Tries to extract an integer task ID from a job name.\n\nFor example, for `job` = '/.../tpu_worker/0:port_name', return 0.\n\nArgs:\njob: A job name to extract task ID from.\n\nReturns:\nThe task ID on success, or the original job name on failure.", "source": "github-repos"}
{"code": "def fn(x: str, y: Optional[list[Union[str, int]]], z: tuple[Union[str, int], str]=(42, 'hello')) -> tuple[int, str]:\n    pass", "docstring": "Test function with multiple args, and docstring args that we have to strip out.\n\nArgs:\nx: The first input. It's got a big multiline\ndescription and also contains\n(choices: [\"a\", \"b\", \"c\"])\n\ny: The second input. It's a big list with a single-line description.\n\nz: The third input. It's some kind of tuple with a default arg.\n\nReturns:\nThe output. The return description is also a big multiline\ndescription that spans multiple lines.", "source": "github-repos"}
{"code": "def return_resource_name(self, record, resource_type):\n        \n        try:\n            if resource_type == 's3':\n                regex = re.compile('.*(\\.(?:s3-|s3){1}(?:.*)?\\.amazonaws\\.com)')\n                bucket_name = record.replace(regex.match(record).group(1), '')\n                return bucket_name\n\n        except Exception as e:\n            self.log.error('Unable to parse DNS record {} for resource type {}/{}'.format(record, resource_type, e))\n            return record", "docstring": "Removes the trailing AWS domain from a DNS record\nto return the resource name\n\ne.g bucketname.s3.amazonaws.com will return bucketname\n\nArgs:\nrecord (str): DNS record\nresource_type: AWS Resource type (i.e. S3 Bucket, Elastic Beanstalk, etc..)", "source": "juraj-google-style"}
{"code": "def constant(interval=1):\n    try:\n        itr = iter(interval)\n    except TypeError:\n        itr = itertools.repeat(interval)\n    for val in itr:\n        (yield val)", "docstring": "Generator for constant intervals.\n\nArgs:\ninterval: A constant value to yield or an iterable of such values.", "source": "codesearchnet"}
{"code": "def validate(self, message, schema_name):\n    err = None\n    try:\n        jsonschema.validate(message, self.schemas[schema_name])\n    except KeyError:\n        msg = f\n        err = {'msg': msg}\n    except jsonschema.ValidationError as e:\n        msg = f'Given message was not valid against the schema \"{schema_name}\": {e.message}'\n        err = {'msg': msg}\n    if err:\n        logging.error(**err)\n        raise exceptions.InvalidMessageError(err['msg'])", "docstring": "Validate a message given a schema.\n\nArgs:\nmessage (dict): Loaded JSON of pulled message from Google\nPubSub.\nschema_name (str): Name of schema to validate ``message``\nagainst. ``schema_name`` will be used to look up\nschema from :py:attr:`.MessageValidator.schemas` dict\nRaises:\nInvalidMessageError: if message is invalid against the\ngiven schema.\nInvalidMessageError: if given schema name can not be found.", "source": "codesearchnet"}
{"code": "def _save_env(env):\n    \n    env_path = os.path.join(env[\"resultdir\"], \"env\")\n\n    if os.path.isdir(env[\"resultdir\"]):\n        with open(env_path, \"w\") as f:\n            yaml.dump(env, f)", "docstring": "Saves one environment.\n\nArgs:\nenv (dict): the env dict to save.", "source": "juraj-google-style"}
{"code": "def __init__(self, data_type_definition):\n    \n    if (data_type_definition.false_value is None and\n        data_type_definition.true_value is None):\n      raise errors.FormatError(\n          'Boolean data type has no True or False values.')\n\n    super(BooleanMap, self).__init__(data_type_definition)", "docstring": "Initializes a boolean data type map.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\n\nRaises:\nFormatError: if the data type map cannot be determined from the data\ntype definition.", "source": "juraj-google-style"}
{"code": "def rename(self, source_file_names, destination_file_names):\n    if not len(source_file_names) == len(destination_file_names):\n        message = 'Unable to rename unequal number of sources and destinations'\n        raise BeamIOError(message)\n    src_dest_pairs = list(zip(source_file_names, destination_file_names))\n    results = s3io.S3IO(options=self._options).rename_files(src_dest_pairs)\n    exceptions = {(src, dest): error for src, dest, error in results if error is not None}\n    if exceptions:\n        raise BeamIOError('Rename operation failed', exceptions)", "docstring": "Rename the files at the source list to the destination list.\nSource and destination lists should be of the same size.\n\nArgs:\nsource_file_names: List of file paths that need to be moved\ndestination_file_names: List of destination_file_names for the files\n\nRaises:\n``BeamIOError``: if any of the rename operations fail", "source": "github-repos"}
{"code": "def get_plot(self, structure, two_theta_range=(0, 90), annotate_peaks=True, ax=None, with_labels=True, fontsize=16):\n    if (ax is None):\n        from pymatgen.util.plotting import pretty_plot\n        plt = pretty_plot(16, 10)\n        ax = plt.gca()\n    else:\n        import matplotlib.pyplot as plt\n    xrd = self.get_pattern(structure, two_theta_range=two_theta_range)\n    for (two_theta, i, hkls, d_hkl) in zip(xrd.x, xrd.y, xrd.hkls, xrd.d_hkls):\n        if (two_theta_range[0] <= two_theta <= two_theta_range[1]):\n            print(hkls)\n            label = ', '.join([str(hkl['hkl']) for hkl in hkls])\n            ax.plot([two_theta, two_theta], [0, i], color='k', linewidth=3, label=label)\n            if annotate_peaks:\n                ax.annotate(label, xy=[two_theta, i], xytext=[two_theta, i], fontsize=fontsize)\n    if with_labels:\n        ax.set_xlabel('$2\\\\theta$ ($^\\\\circ$)')\n        ax.set_ylabel('Intensities (scaled)')\n    if hasattr(ax, 'tight_layout'):\n        ax.tight_layout()\n    return plt", "docstring": "Returns the diffraction plot as a matplotlib.pyplot.\n\nArgs:\nstructure: Input structure\ntwo_theta_range ([float of length 2]): Tuple for range of\ntwo_thetas to calculate in degrees. Defaults to (0, 90). Set to\nNone if you want all diffracted beams within the limiting\nsphere of radius 2 / wavelength.\nannotate_peaks: Whether to annotate the peaks with plane\ninformation.\nax: matplotlib :class:`Axes` or None if a new figure should be created.\nwith_labels: True to add xlabels and ylabels to the plot.\nfontsize: (int) fontsize for peak labels.\n\nReturns:\n(matplotlib.pyplot)", "source": "codesearchnet"}
{"code": "class LlavaFastImageProcessorKwargs(DefaultFastImageProcessorKwargs):\n    do_pad: Optional[bool]", "docstring": "Args:\ndo_pad (`bool`, *optional*):\nWhether to pad the image to a square based on the longest edge.", "source": "github-repos"}
{"code": "def infer_module(filename, pythonpath):\n    for path in filter(bool, pythonpath):\n        if not path.endswith(path_utils.sep):\n            path += path_utils.sep\n        if filename.startswith(path):\n            filename = filename[len(path):]\n            break\n    else:\n        path = ''\n    return Module(path, filename, path_to_module_name(filename))", "docstring": "Convert a filename to a module relative to pythonpath.\n\nThis method tries to deduce the module name from the pythonpath and the\nfilename. This will not always be possible. (It depends on the filename\nstarting with an entry in the pythonpath.)\n\nArgs:\nfilename: The filename of a Python file. E.g. \"foo/bar/baz.py\".\npythonpath: The path Python uses to search for modules.\n\nReturns:\nA Module object.", "source": "github-repos"}
{"code": "def checkpoint_exists(checkpoint_prefix):\n    return checkpoint_exists_internal(checkpoint_prefix)", "docstring": "Checks whether a V1 or V2 checkpoint exists with the specified prefix.\n\nThis is the recommended way to check if a checkpoint exists, since it takes\ninto account the naming difference between V1 and V2 formats.\n\nArgs:\ncheckpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking\npriority.  Typically the result of `Saver.save()` or that of\n`tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or\nV1/V2.\n\nReturns:\nA bool, true if a checkpoint referred to by `checkpoint_prefix` exists.", "source": "github-repos"}
{"code": "def _get_version(self, root):\n        \n        \n        \n        \n        version = self.get_version(root)\n        if version:\n            return StrictVersion(version)\n\n        raise UnknownVersionError(\n            \"Unable to determine the version of the input document. No \"\n            \"version information found on the root element.\"\n        )", "docstring": "Return the version of the root element passed in.\n\nArgs:\nroot (etree.Element)\n\nReturns:\ndistutils.StrictVersion\n\nRaises:\nUnknownVersionError", "source": "juraj-google-style"}
{"code": "def __init__(self, request, response):\n        \n\n        self.status = QueueItem.STATUS_QUEUED\n        self.decomposed = False\n        self.__response_soup = None\n        self.__index_hash = None\n\n        self.request = request\n        self.response = response", "docstring": "Constructs a QueueItem instance.\n\nArgs:\nrequest (:class:`nyawc.http.Request`): The Request object.\nresponse (:class:`nyawc.http.Response`): The Response object (empty object when initialized).", "source": "juraj-google-style"}
{"code": "def validate(self, corpus):\n        \n        invalid_utterances = {}\n\n        for utterance in corpus.utterances.values():\n            if self.label_list_idx in utterance.label_lists.keys():\n                ll = utterance.label_lists[self.label_list_idx]\n\n                if len(ll) < self.min_number_of_labels:\n                    invalid_utterances[utterance.idx] = 'Only {} labels'.format(len(ll))\n            else:\n                invalid_utterances[utterance.idx] = 'No label-list {}'.format(self.label_list_idx)\n\n        passed = len(invalid_utterances) <= 0\n        info = {\n            'Min. number of labels': str(self.min_number_of_labels),\n            'Label-List ID': self.label_list_idx\n        }\n\n        return base.InvalidUtterancesResult(passed, invalid_utterances, name=self.name(), info=info)", "docstring": "Perform the validation on the given corpus.\n\nArgs:\ncorpus (Corpus): The corpus to test/validate.\n\nReturns:\nInvalidUtterancesResult: Validation result.", "source": "juraj-google-style"}
{"code": "def show_app(app, state, notebook_url, port=0, **kw):\n    logging.basicConfig()\n    from tornado.ioloop import IOLoop\n    from ..server.server import Server\n    loop = IOLoop.current()\n    if callable(notebook_url):\n        origin = notebook_url(None)\n    else:\n        origin = _origin_url(notebook_url)\n    server = Server({'/': app}, io_loop=loop, port=port, allow_websocket_origin=[origin], **kw)\n    server_id = uuid4().hex\n    curstate().uuid_to_server[server_id] = server\n    server.start()\n    if callable(notebook_url):\n        url = notebook_url(server.port)\n    else:\n        url = _server_url(notebook_url, server.port)\n    logging.debug(('Server URL is %s' % url))\n    logging.debug(('Origin URL is %s' % origin))\n    from ..embed import server_document\n    script = server_document(url, resources=None)\n    publish_display_data({HTML_MIME_TYPE: script, EXEC_MIME_TYPE: ''}, metadata={EXEC_MIME_TYPE: {'server_id': server_id}})", "docstring": "Embed a Bokeh server application in a Jupyter Notebook output cell.\n\nArgs:\napp (Application or callable) :\nA Bokeh Application to embed inline in a Jupyter notebook.\n\nstate (State) :\n** Unused **\n\nnotebook_url (str or callable) :\nThe URL of the notebook server that is running the embedded app.\n\nIf ``notebook_url`` is a string, the value string is parsed to\nconstruct the origin and full server URLs.\n\nIf notebook_url is a callable, it must accept one parameter,\nwhich will be the server port, or None. If passed a port,\nthe callable must generate the server URL, otherwise if passed\nNone, it must generate the origin URL for the server.\n\nport (int) :\nA port for the embedded server will listen on.\n\nBy default the port is 0, which results in the server listening\non a random dynamic port.\n\nAny additional keyword arguments are passed to :class:`~bokeh.server.Server` (added in version 1.1)\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def serialize_sparse(sp_input, name=None, out_type=dtypes.string):\n    return serialize_sparse_v2(sp_input, out_type, name)", "docstring": "Serialize a `SparseTensor` into a 3-vector (1-D `Tensor`) object.\n\nArgs:\nsp_input: The input `SparseTensor`.\nname: A name prefix for the returned tensors (optional).\nout_type: The `dtype` to use for serialization.\n\nReturns:\nA 3-vector (1-D `Tensor`), with each column representing the serialized\n`SparseTensor`'s indices, values, and shape (respectively).\n\nRaises:\nTypeError: If `sp_input` is not a `SparseTensor`.", "source": "github-repos"}
{"code": "def load(self, cfgstr=None):\n    from six.moves import cPickle as pickle\n    cfgstr = self._rectify_cfgstr(cfgstr)\n    dpath = self.dpath\n    fname = self.fname\n    verbose = self.verbose\n    if (not self.enabled):\n        if (verbose > 1):\n            self.log('[cacher] ... cache disabled: fname={}'.format(self.fname))\n        raise IOError(3, 'Cache Loading Is Disabled')\n    fpath = self.get_fpath(cfgstr=cfgstr)\n    if (not exists(fpath)):\n        if (verbose > 2):\n            self.log('[cacher] ... cache does not exist: dpath={} fname={} cfgstr={}'.format(basename(dpath), fname, cfgstr))\n        raise IOError(2, ('No such file or directory: %r' % (fpath,)))\n    elif (verbose > 3):\n        self.log('[cacher] ... cache exists: dpath={} fname={} cfgstr={}'.format(basename(dpath), fname, cfgstr))\n    try:\n        with open(fpath, 'rb') as file_:\n            data = pickle.load(file_)\n    except Exception as ex:\n        if (verbose > 0):\n            self.log(('CORRUPTED? fpath = %s' % (fpath,)))\n        if (verbose > 1):\n            self.log('[cacher] ... CORRUPTED? dpath={} cfgstr={}'.format(basename(dpath), cfgstr))\n        if isinstance(ex, (EOFError, IOError, ImportError)):\n            raise IOError(str(ex))\n        else:\n            if (verbose > 1):\n                self.log('[cacher] ... unknown reason for exception')\n            raise\n    else:\n        if (self.verbose > 2):\n            self.log('[cacher] ... {} cache hit'.format(self.fname))\n        elif (verbose > 1):\n            self.log('[cacher] ... cache hit')\n    return data", "docstring": "Loads the data\n\nRaises:\nIOError - if the data is unable to be loaded. This could be due to\na cache miss or because the cache is disabled.\n\nExample:\n>>> from ubelt.util_cache import *  # NOQA\n>>> # Setting the cacher as enabled=False turns it off\n>>> cacher = Cacher('test_disabled_load', '', enabled=True)\n>>> cacher.save('data')\n>>> assert cacher.load() == 'data'\n>>> cacher.enabled = False\n>>> assert cacher.tryload() is None", "source": "codesearchnet"}
{"code": "def to_subquery(self) -> StandardSqlExpression:\n    return SubQuery(Select(select_part=self, from_part=None))", "docstring": "Renders the expression as a subquery.\n\nBuilds a SELECT statement for the expression and returns it as a subquery.\nExpressions which already render a SELECT (such as the Select and\nUnionExpression classes) should overide this to remove the extra SELECT.\n\nReturns:\nA SubQuery expression for this expression.", "source": "github-repos"}
{"code": "def remove_observer(self, callback):\n    if (callback not in self._observers):\n        raise ValueError('{} is not an observer of {}'.format(callback, self))\n    self._observers.remove(callback)", "docstring": "Remove an observer from this event.\n\nArgs:\ncallback: A function or coroutine callback to remove from this\nevent.\n\nRaises:\nValueError: If the callback is not an observer of this event.", "source": "codesearchnet"}
{"code": "def find_structure(self, filename_or_structure):\n    try:\n        if isinstance(filename_or_structure, str):\n            s = Structure.from_file(filename_or_structure)\n        elif isinstance(filename_or_structure, Structure):\n            s = filename_or_structure\n        else:\n            raise MPRestError('Provide filename or Structure object.')\n        payload = {'structure': json.dumps(s.as_dict(), cls=MontyEncoder)}\n        response = self.session.post('{}/find_structure'.format(self.preamble), data=payload)\n        if (response.status_code in [200, 400]):\n            resp = json.loads(response.text, cls=MontyDecoder)\n            if resp['valid_response']:\n                return resp['response']\n            else:\n                raise MPRestError(resp['error'])\n        raise MPRestError('REST error with status code {} and error {}'.format(response.status_code, response.text))\n    except Exception as ex:\n        raise MPRestError(str(ex))", "docstring": "Finds matching structures on the Materials Project site.\n\nArgs:\nfilename_or_structure: filename or Structure object\n\nReturns:\nA list of matching structures.\n\nRaises:\nMPRestError", "source": "codesearchnet"}
{"code": "def _try_run_local_init_op(self, sess: session.Session) -> Tuple[bool, Optional[str]]:\n    if self._local_init_op is not None:\n        is_ready_for_local_init, msg = self._model_ready_for_local_init(sess)\n        if is_ready_for_local_init:\n            logging.info('Running local_init_op.')\n            sess.run(self._local_init_op, feed_dict=self._local_init_feed_dict, options=self._local_init_run_options)\n            logging.info('Done running local_init_op.')\n            return (True, None)\n        else:\n            return (False, msg)\n    return (True, None)", "docstring": "Tries to run _local_init_op, if not None, and is ready for local init.\n\nArgs:\nsess: A `Session`.\n\nReturns:\nA tuple (is_successful, msg), where is_successful is True if\n_local_init_op is None, or we ran _local_init_op, and False otherwise;\nand msg is a `String` with the reason why the model was not ready to run\nlocal init.", "source": "github-repos"}
{"code": "def load_data(path):\n        \n\n\n        \n        if not os.path.exists(path):\n            print(path)\n            raise AttributeError('Path given does not exist!')\n\n        \n        \n        \n\n\n        \n        \n        data = {}\n        \n        \n        \n        \n        \n        if 'raw_data' in os.listdir(path):  \n            data_files = os.listdir(os.path.join(path, 'raw_data' + '/'))\n            path = os.path.join(path, 'raw_data' + '/')\n\n        else:\n            data_files = glob.glob(os.path.join(path, '*.csv'))\n\n        \n        if not data_files:\n            raise AttributeError('Could not find data files in {:s}'.format(path))\n\n        \n        for data_file in data_files:\n            \n            data_name = data_file.split('-')[-1][0:-4] \n            imported_data_df = pd.read_csv(os.path.join(path, data_file))\n\n            \n            \n            column_headers = list(imported_data_df.columns.values)\n            if sum([int(x.isdigit()) for x in column_headers]) != len(column_headers):\n                data[data_name] = {h: imported_data_df[h].as_matrix() for h in column_headers}\n            else:\n                \n                data[data_name] = np.squeeze(imported_data_df.as_matrix())\n\n        return data", "docstring": "loads the data that has been save with Script.save.\nArgs:\npath: path to folder saved by Script.save or raw_data folder within\nReturns:\na dictionary with the data of form\ndata = {param_1_name: param_1_data, ...}", "source": "juraj-google-style"}
{"code": "def from_text_vision_configs(cls, text_config: SiglipTextConfig, vision_config: SiglipVisionConfig, **kwargs):\n    return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`SiglipConfig`] (or a derived class) from siglip text model configuration and siglip vision\nmodel configuration.\n\nReturns:\n[`SiglipConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def oauth_access(self, *, client_id: str, client_secret: str, code: str, **kwargs) -> SlackResponse:\n    kwargs.update({'client_id': client_id, 'client_secret': client_secret, 'code': code})\n    return self.api_call('oauth.access', data=kwargs)", "docstring": "Exchanges a temporary OAuth verifier code for an access token.\n\nArgs:\nclient_id (str): Issued when you created your application. e.g. '4b39e9-752c4'\nclient_secret (str): Issued when you created your application. e.g. '33fea0113f5b1'\ncode (str): The code param returned via the OAuth callback. e.g. 'ccdaa72ad'", "source": "codesearchnet"}
{"code": "def duplicate_verts(script):\n    \n    if script.ml_version == '1.3.4BETA':\n        filter_xml = '  <filter name=\"Remove Duplicated Vertex\"/>\\n'\n    else:\n        filter_xml = '  <filter name=\"Remove Duplicate Vertices\"/>\\n'\n\n    util.write_filter(script, filter_xml)\n    return None", "docstring": "\"Check for every vertex on the mesh: if there are two vertices with\nthe same coordinates they are merged into a single one.\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter to.\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "juraj-google-style"}
{"code": "def ParseLocalEntryRow(\n      self, parser_mediator, query, row, cache=None, database=None,\n      **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    inode_number = self._GetRowValue(query_hash, row, 'inode_number')\n    local_path = self.GetLocalPath(inode_number, cache, database)\n\n    event_data = GoogleDriveSnapshotLocalEntryEventData()\n    event_data.path = local_path\n    event_data.query = query\n    event_data.size = self._GetRowValue(query_hash, row, 'size')\n\n    timestamp = self._GetRowValue(query_hash, row, 'modified')\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a local entry row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.\ncache (Optional[SQLiteCache]): cache.\ndatabase (Optional[SQLiteDatabase]): database.", "source": "juraj-google-style"}
{"code": "def _num_slices_in_dimension(self, axis):\n    if not isinstance(axis, int):\n        raise TypeError('axis must be an integer')\n    if axis < 0:\n        rank = self.rank\n        if rank is None:\n            raise ValueError(\"You can't use negative values if the rank is undefined\")\n        axis = axis + rank\n    if axis == 0:\n        return self._dimension(0)\n    if axis <= self.num_row_partitions:\n        return self.row_partitions[axis - 1].nvals()\n    remainder = axis - (self.num_row_partitions - 1)\n    return _reduce_prod_patch(self.inner_shape[:remainder])", "docstring": "The total size of a dimension (like nvals).\n\nEffectively, this is self[:axis+1]._num_elements()\n\nExample:\nshape = DynamicRaggedShape._from_inner_shape([2, 3, 4])\nshape._num_slices_in_dimension(0) = 2\nshape._num_slices_in_dimension(1) = 6\nshape._num_slices_in_dimension(2) = 24\nshape._num_slices_in_dimension(-1) = 24\nshape._num_slices_in_dimension(-2) = 6\nshape._num_slices_in_dimension(-2) = 2\n\nArgs:\naxis: the last axis to include in the number of elements. If negative,\nthen axis = axis + rank.\n\nReturns:\nThe number of elements in the shape.", "source": "github-repos"}
{"code": "def command_runner(shell_command, force_rerun_flag, outfile_checker, cwd=None, silent=False):\n    program_and_args = shlex.split(shell_command)\n    if (not program_exists(program_and_args[0])):\n        raise OSError('{}: program not installed'.format(program_and_args[0]))\n    if cwd:\n        outfile_checker = op.join(cwd, op.basename(outfile_checker))\n    if force_rerun(flag=force_rerun_flag, outfile=outfile_checker):\n        if silent:\n            command = subprocess.Popen(program_and_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)\n            (out, err) = command.communicate()\n            ret = command.returncode\n        else:\n            for path in execute(cmd=program_and_args, cwd=cwd):\n                print(path, end='')\n        log.debug('{}: Ran program, output to {}'.format(program_and_args[0], outfile_checker))\n    else:\n        log.debug('{}: Output already exists'.format(outfile_checker))", "docstring": "Run a shell command with subprocess, with additional options to check if output file exists and printing stdout.\n\nArgs:\nshell_command (str): Command as it would be formatted in the command-line (ie. \"program -i test.in -o test.out\").\nforce_rerun_flag: If the program should be rerun even if the output file exists.\noutfile_checker (str): Name out the output file which may have been generated. This does not specify what the outfile\nwill be, that should be done in the program's args or predetermined.\ncwd (str): Path to working directory where command will be executed.\nsilent (bool): If program STDOUT should be printed to the current shell.\n\nReturns:\nbool: If the program ran successfully.", "source": "codesearchnet"}
{"code": "def is_published(self):\n    citeable = (('publication_info' in self.record) and is_citeable(self.record['publication_info']))\n    submitted = (('dois' in self.record) and any((('journal_title' in el) for el in force_list(self.record.get('publication_info')))))\n    return (citeable or submitted)", "docstring": "Return True if a record is published.\n\nWe say that a record is published if it is citeable, which means that\nit has enough information in a ``publication_info``, or if we know its\nDOI and a ``journal_title``, which means it is in press.\n\nReturns:\nbool: whether the record is published.\n\nExamples:\n>>> record = {\n...     'dois': [\n...         {'value': '10.1016/0029-5582(61)90469-2'},\n...     ],\n...     'publication_info': [\n...         {'journal_title': 'Nucl.Phys.'},\n...     ],\n... }\n>>> LiteratureReader(record).is_published\nTrue", "source": "codesearchnet"}
{"code": "def restore_state(self, state):\n        \n\n        super(EmulatedPeripheralTile, self).restore_state(state)\n\n        self.debug_mode = state.get('debug_mode', False)\n        self.run_level = state.get('run_level', None)\n\n        if state.get('app_started', False):\n            self._hosted_app_running.set()", "docstring": "Restore the current state of this emulated object.\n\nArgs:\nstate (dict): A previously dumped state produced by dump_state.", "source": "juraj-google-style"}
{"code": "def _init_project_service(self, version):\n    project_cfg = self._load_config_section(CONFIG_PROJECT_SECTION)\n    self._token_project = project_cfg[CONFIG_TOKEN]\n    proto = project_cfg[CONFIG_PROTOCOL]\n    host = project_cfg[CONFIG_HOST]\n    self._project = ProjectService(host, version)\n    self._project.base_protocol = proto\n    self._project.set_auth(self._token_project)", "docstring": "Method to initialize the Project Service from the config data\n\nArgs:\nversion (string): Version of Boss API to use.\n\nReturns:\nNone\n\nRaises:\n(KeyError): if given invalid version.", "source": "codesearchnet"}
{"code": "def get_course_video_ids_with_youtube_profile(course_ids=None, offset=None, limit=None):\n    course_videos = CourseVideo.objects.select_related('video').prefetch_related('video__encoded_videos', 'video__encoded_videos__profile').filter(video__encoded_videos__profile__profile_name='youtube').order_by('id').distinct()\n    if course_ids:\n        course_videos = course_videos.filter(course_id__in=course_ids)\n    course_videos = course_videos.values_list('course_id', 'video__edx_video_id')\n    if ((limit is not None) and (offset is not None)):\n        course_videos = course_videos[offset:(offset + limit)]\n    course_videos_with_yt_profile = []\n    for (course_id, edx_video_id) in course_videos:\n        yt_profile = EncodedVideo.objects.filter(video__edx_video_id=edx_video_id, profile__profile_name='youtube').first()\n        if yt_profile:\n            course_videos_with_yt_profile.append((course_id, edx_video_id, yt_profile.url))\n    return course_videos_with_yt_profile", "docstring": "Returns a list that contains all the course ids and video ids with the youtube profile\n\nArgs:\ncourse_ids (list): valid course ids\nlimit (int): batch records limit\noffset (int): an offset for selecting a batch\nReturns:\n(list): Tuples of course_id, edx_video_id and youtube video url", "source": "codesearchnet"}
{"code": "def train(total_loss, global_step):\n    num_batches_per_epoch = (NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size)\n    decay_steps = int((num_batches_per_epoch * NUM_EPOCHS_PER_DECAY))\n    lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True)\n    tf.summary.scalar('learning_rate', lr)\n    loss_averages_op = _add_loss_summaries(total_loss)\n    with tf.control_dependencies([loss_averages_op]):\n        opt = tf.train.GradientDescentOptimizer(lr)\n        grads = opt.compute_gradients(total_loss)\n    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n    for var in tf.trainable_variables():\n        tf.summary.histogram(var.op.name, var)\n    for (grad, var) in grads:\n        if (grad is not None):\n            tf.summary.histogram((var.op.name + '/gradients'), grad)\n    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)\n    variables_averages_op = variable_averages.apply(tf.trainable_variables())\n    with tf.control_dependencies([apply_gradient_op, variables_averages_op]):\n        train_op = tf.no_op(name='train')\n    return train_op", "docstring": "Train CIFAR-10 model.\n\nCreate an optimizer and apply to all trainable variables. Add moving\naverage for all trainable variables.\n\nArgs:\ntotal_loss: Total loss from loss().\nglobal_step: Integer Variable counting the number of training steps\nprocessed.\nReturns:\ntrain_op: op for training.", "source": "codesearchnet"}
{"code": "def signatures(self, transaction):\n    if (not self.multi_wallet):\n        raise DecryptionError('This wallet must be unlocked with wallet.unlock(passphrase)')\n    return self.multi_wallet.signatures(transaction)", "docstring": "Sign a transaction.\n\nArgs:\ntransaction (coinop.Transaction)\n\nReturns:\nA list of signature dicts of the form\n[ {'primary': 'base58signaturestring'},\n... ]", "source": "codesearchnet"}
{"code": "def __setitem__(self, key, value):\n        \n        with self._condition:\n            if key not in self._processors:\n                proc_iterator = self._proc_iter_class()\n                proc_iterator.add_processor(value)\n                self._processors[key] = proc_iterator\n            else:\n                self._processors[key].add_processor(value)\n            if value.connection_id not in self._identities:\n                self._identities[value.connection_id] = [key]\n            else:\n                self._identities[value.connection_id].append(key)\n            self._condition.notify_all()", "docstring": "Either create a new ProcessorIterator, if none exists for a\nProcessorType, or add the Processor to the ProcessorIterator.\n\nArgs:\nkey (ProcessorType): The type of transactions this transaction\nprocessor can handle.\nvalue (Processor): Information about the transaction processor.", "source": "juraj-google-style"}
{"code": "def CreateSourceType(cls, type_indicator, attributes):\n    \n    if type_indicator not in cls._source_type_classes:\n      raise errors.FormatError(\n          'Unsupported type indicator: {0:s}.'.format(type_indicator))\n\n    return cls._source_type_classes[type_indicator](**attributes)", "docstring": "Creates a source type.\n\nArgs:\ntype_indicator (str): source type indicator.\nattributes (dict[str, object]): source type attributes.\n\nReturns:\nSourceType: a source type.\n\nRaises:\nFormatError: if the type indicator is not set or unsupported,\nor if required attributes are missing.", "source": "juraj-google-style"}
{"code": "def get_compute_usage(access_token, subscription_id, location):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/providers/Microsoft.compute/locations/', location,\n                        '/usages?api-version=', COMP_API])\n    return do_get(endpoint, access_token)", "docstring": "List compute usage and limits for a location.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nlocation (str): Azure data center location. E.g. westus.\n\nReturns:\nHTTP response. JSON body of Compute usage and limits data.", "source": "juraj-google-style"}
{"code": "def get_data_xlsx(file_name, file_contents=None, on_demand=False):\n    \n    return get_data_xls(file_name, file_contents=file_contents, on_demand=on_demand)", "docstring": "Loads the new excel format files. Old format files will automatically get loaded as well.\n\nArgs:\nfile_name: The name of the local file, or the holder for the\nextension type when the file_contents are supplied.\nfile_contents: The file-like object holding contents of file_name.\nIf left as None, then file_name is directly loaded.\non_demand: Requests that a yielder be used in place of a full data\ncopy.", "source": "juraj-google-style"}
{"code": "def count(self, files=False):\n    return (len(self.files) if files else len(self.unique()))", "docstring": "Returns a count of unique values or files.\n\nArgs:\nfiles (bool): When True, counts all files mapped to the Entity.\nWhen False, counts all unique values.\nReturns: an int.", "source": "codesearchnet"}
{"code": "def list_vdirs(site, app=_DEFAULT_APP):\n    ret = dict()\n    ps_cmd = ['Get-WebVirtualDirectory', '-Site', \"'{0}'\".format(site), '-Application', \"'{0}'\".format(app), '|', \"Select-Object PhysicalPath, @{ Name = 'name';\", \"Expression = { $_.path.Split('/')[-1] } }\"]\n    cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)\n    try:\n        items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n    except ValueError:\n        raise CommandExecutionError('Unable to parse return data as Json.')\n    for item in items:\n        ret[item['name']] = {'sourcepath': item['physicalPath']}\n    if (not ret):\n        log.warning('No vdirs found in output: %s', cmd_ret)\n    return ret", "docstring": "Get all configured IIS virtual directories for the specified site, or for\nthe combination of site and application.\n\nArgs:\nsite (str): The IIS site name.\napp (str): The IIS application.\n\nReturns:\ndict: A dictionary of the virtual directory names and properties.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.list_vdirs site", "source": "codesearchnet"}
{"code": "def by_name(name):\n    devices = discover(all_households=True)\n    for device in (devices or []):\n        if (device.player_name == name):\n            return device\n    return None", "docstring": "Return a device by name.\n\nArgs:\nname (str): The name of the device to return.\n\nReturns:\n:class:`~.SoCo`: The first device encountered among all zone with the\ngiven player name. If none are found `None` is returned.", "source": "codesearchnet"}
{"code": "def match_shortname(self, name, filled_args=None):\n        \n\n        filled_count = 0\n        if filled_args is not None:\n            filled_count = len(filled_args)\n\n        possible = [x for x in self.arg_names[filled_count:] if x.startswith(name)]\n        if len(possible) == 0:\n            raise ArgumentError(\"Could not convert short-name full parameter name, none could be found\", short_name=name, parameters=self.arg_names)\n        elif len(possible) > 1:\n            raise ArgumentError(\"Short-name is ambiguous, could match multiple keyword parameters\", short_name=name, possible_matches=possible)\n\n        return possible[0]", "docstring": "Try to convert a prefix into a parameter name.\n\nIf the result could be ambiguous or there is no matching\nparameter, throw an ArgumentError\n\nArgs:\nname (str): A prefix for a parameter name\nfilled_args (list): A list of filled positional arguments that will be\nremoved from consideration.\n\nReturns:\nstr: The full matching parameter name", "source": "juraj-google-style"}
{"code": "def record_ttft_metric(self, created_time: float, request_id: str) -> None:\n    if not _has_opentelemetry:\n        return\n    ttft_ms = (time.time() - created_time) * 1000.0\n    try:\n        self.ttft_histogram.record(ttft_ms)\n        logger.debug(f'Recorded TTFT for request {request_id}: {ttft_ms:.2f}ms')\n    except Exception as e:\n        logger.warning(f'Failed to record TTFT metric: {e}')", "docstring": "Record Time to First Token (TTFT).\n\nArgs:\ncreated_time: The time the request was created\nrequest_id: The ID of the request", "source": "github-repos"}
{"code": "def write(self, string):\n        \n        \n        \n        \n        x, y = self._normalizeCursor(*self._cursor)\n        width, height = self.get_size()\n        wrapper = _textwrap.TextWrapper(initial_indent=(' '*x), width=width)\n        writeLines = []\n        for line in string.split('\\n'):\n            if line:\n                writeLines += wrapper.wrap(line)\n                wrapper.initial_indent = ''\n            else:\n                writeLines.append([])\n\n        for line in writeLines:\n            x, y = self._normalizeCursor(x, y)\n            self.draw_str(x, y, line[x:], self._fg, self._bg)\n            y += 1\n            x = 0\n        y -= 1\n        self._cursor = (x, y)", "docstring": "This method mimics basic file-like behaviour.\n\nBecause of this method you can replace sys.stdout or sys.stderr with\na :any:`Console` or :any:`Window` instance.\n\nThis is a convoluted process and behaviour seen now can be excepted to\nchange on later versions.\n\nArgs:\nstring (Text): The text to write out.\n\n.. seealso:: :any:`set_colors`, :any:`set_mode`, :any:`Window`", "source": "juraj-google-style"}
{"code": "def meas_gate(self, circuit, qreg, op):\n    if (self.meas_fun is None):\n        pass\n    else:\n        self.meas_fun(circuit, qreg, op)", "docstring": "Add measurement gates to a circuit.\n\nArgs:\ncircuit (QuantumCircuit): circuit to add measurement to.\nqreg (tuple(QuantumRegister,int)): quantum register being measured.\nop (str): the basis label for the measurement.", "source": "codesearchnet"}
{"code": "def auth_user_id(self, value):\n        \n        if value == self._defaults['ai.user.authUserId'] and 'ai.user.authUserId' in self._values:\n            del self._values['ai.user.authUserId']\n        else:\n            self._values['ai.user.authUserId'] = value", "docstring": "The auth_user_id property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def github_belspec_files(spec_dir, force: bool = False):\n    \n\n    if not force:\n        dtnow = datetime.datetime.utcnow()\n        delta = datetime.timedelta(1)\n        yesterday = dtnow - delta\n\n        for fn in glob.glob(f\"{spec_dir}/bel*yaml\"):\n            if datetime.datetime.fromtimestamp(os.path.getmtime(fn)) > yesterday:\n                log.info(\"Skipping BEL Specification update - specs less than 1 day old\")\n                return\n\n    repo_url = \"https:\n    params = {}\n    github_access_token = os.getenv(\"GITHUB_ACCESS_TOKEN\", \"\")\n    if github_access_token:\n        params = {\"access_token\": github_access_token}\n\n    r = requests.get(repo_url, params=params)\n    if r.status_code == 200:\n        results = r.json()\n        for f in results:\n            url = f[\"download_url\"]\n            fn = os.path.basename(url)\n\n            if \"yaml\" not in fn and \"yml\" in fn:\n                fn = fn.replace(\"yml\", \"yaml\")\n\n            r = requests.get(url, params=params, allow_redirects=True)\n            if r.status_code == 200:\n                open(f\"{spec_dir}/{fn}\", \"wb\").write(r.content)\n            else:\n                sys.exit(\n                    f\"Could not get BEL Spec file {url} from Github -- Status: {r.status_code}  Msg: {r.content}\"\n                )\n    else:\n        sys.exit(\n            f\"Could not get BEL Spec directory listing from Github -- Status: {r.status_code}  Msg: {r.content}\"\n        )", "docstring": "Get belspec files from Github repo\n\n\nArgs:\nspec_dir: directory to store the BEL Specification and derived files\nforce: force update of BEL Specifications from Github - skipped if local files less than 1 day old", "source": "juraj-google-style"}
{"code": "def DNN(input_shape,\n           dense_layers,\n           output_layer=[1, 'sigmoid'],\n           optimizer='adam',\n           loss='binary_crossentropy'):\n    \n\n    inputs = Input(shape=input_shape)\n    \n    dense = inputs\n\n    for i, d in enumerate(dense_layers):\n        dense = Dense(d, activation='relu')(dense)\n        dense = BatchNormalization()(dense)\n        dense = Dropout(0.3)(dense)\n    \n    output = Dense(output_layer[0], activation=output_layer[1])(dense)\n    \n    model = Model(inputs=inputs, outputs=output)\n    model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])\n    \n    return model", "docstring": "Summary\n\nArgs:\ninput_shape (list): The shape of the input layer\ntargets (int): Number of targets\ndense_layers (list): Dense layer descriptor [fully_connected]\noptimizer (str or object optional): Keras optimizer as string or keras optimizer\n\nReturns:\nTYPE: model, build_arguments", "source": "juraj-google-style"}
{"code": "def _PromptUserForAPFSVolumeIdentifiers(\n      self, volume_system, volume_identifiers):\n    \n    print_header = True\n    while True:\n      if print_header:\n        self._PrintAPFSVolumeIdentifiersOverview(\n            volume_system, volume_identifiers)\n\n        print_header = False\n\n      lines = self._textwrapper.wrap(self._USER_PROMPT_APFS)\n      self._output_writer.Write('\\n'.join(lines))\n      self._output_writer.Write('\\n\\nVolume identifiers: ')\n\n      try:\n        selected_volumes = self._ReadSelectedVolumes(\n            volume_system, prefix='apfs')\n        if (not selected_volumes or\n            not set(selected_volumes).difference(volume_identifiers)):\n          break\n      except ValueError:\n        pass\n\n      self._output_writer.Write('\\n')\n\n      lines = self._textwrapper.wrap(\n          'Unsupported volume identifier(s), please try again or abort with '\n          'Ctrl^C.')\n      self._output_writer.Write('\\n'.join(lines))\n      self._output_writer.Write('\\n\\n')\n\n    return selected_volumes", "docstring": "Prompts the user to provide APFS volume identifiers.\n\nArgs:\nvolume_system (dfvfs.APFSVolumeSystem): volume system.\nvolume_identifiers (list[str]): volume identifiers including prefix.\n\nReturns:\nlist[str]: selected volume identifiers including prefix or None.", "source": "juraj-google-style"}
{"code": "def combine_slices(self, slices, tensor_shape, device=None):\n    if (tensor_shape.ndims == 0):\n        return slices[0]\n    ret = slices[:]\n    tensor_layout = self.tensor_layout(tensor_shape)\n    for (mesh_dim, tensor_axis) in zip(self.shape, tensor_layout.mesh_axis_to_tensor_axis(self.ndims)):\n        slice_size = (len(ret) \n        if (tensor_axis is None):\n            ret = ret[:slice_size]\n        else:\n            if device:\n                devices = ([device] * slice_size)\n            else:\n                devices = [ret[i].device for i in xrange(slice_size)]\n            concat_inputs = []\n            for i in xrange(slice_size):\n                concat_inputs.append([ret[(i + (slice_size * j))] for j in xrange(mesh_dim.size)])\n            ret = parallel(devices, tf.concat, concat_inputs, axis=([tensor_axis] * len(devices)))\n    assert (len(ret) == 1)\n    return ret[0]", "docstring": "Turns a set of slices into a single tensor.\n\nArgs:\nslices: list of tf.Tensor with length self.size.\ntensor_shape: Shape.\ndevice: optional str. If absent, we use the devices of the slices.\n\nReturns:\ntf.Tensor.", "source": "codesearchnet"}
{"code": "def chimera_anticluster(m, n=None, t=4, multiplier=3.0, cls=BinaryQuadraticModel, subgraph=None, seed=None):\n    if (seed is None):\n        seed = numpy.random.randint((2 ** 32), dtype=np.uint32)\n    r = numpy.random.RandomState(seed)\n    m = int(m)\n    if (n is None):\n        n = m\n    else:\n        n = int(n)\n    t = int(t)\n    ldata = np.zeros((((m * n) * t) * 2))\n    if (m and n and t):\n        (inrow, incol) = zip(*_iter_chimera_tile_edges(m, n, t))\n        if ((m > 1) or (n > 1)):\n            (outrow, outcol) = zip(*_iter_chimera_intertile_edges(m, n, t))\n        else:\n            outrow = outcol = tuple()\n        qdata = r.choice(((- 1.0), 1.0), size=(len(inrow) + len(outrow)))\n        qdata[len(inrow):] *= multiplier\n        irow = (inrow + outrow)\n        icol = (incol + outcol)\n    else:\n        irow = icol = qdata = tuple()\n    bqm = cls.from_numpy_vectors(ldata, (irow, icol, qdata), 0.0, SPIN)\n    if (subgraph is not None):\n        (nodes, edges) = subgraph\n        subbqm = cls.empty(SPIN)\n        try:\n            subbqm.add_variables_from(((v, bqm.linear[v]) for v in nodes))\n        except KeyError:\n            msg = \"given 'subgraph' contains nodes not in Chimera({}, {}, {})\".format(m, n, t)\n            raise ValueError(msg)\n        try:\n            subbqm.add_interactions_from(((u, v, bqm.adj[u][v]) for (u, v) in edges))\n        except KeyError:\n            msg = \"given 'subgraph' contains edges not in Chimera({}, {}, {})\".format(m, n, t)\n            raise ValueError(msg)\n        bqm = subbqm\n    return bqm", "docstring": "Generate an anticluster problem on a Chimera lattice.\n\nAn anticluster problem has weak interactions within a tile and strong\ninteractions between tiles.\n\nArgs:\nm (int):\nNumber of rows in the Chimera lattice.\n\nn (int, optional, default=m):\nNumber of columns in the Chimera lattice.\n\nt (int, optional, default=t):\nSize of the shore within each Chimera tile.\n\nmultiplier (number, optional, default=3.0):\nStrength of the intertile edges.\n\ncls (class, optional, default=:class:`.BinaryQuadraticModel`):\nBinary quadratic model class to build from.\n\nsubgraph (int/tuple[nodes, edges]/:obj:`~networkx.Graph`):\nA subgraph of a Chimera(m, n, t) graph to build the anticluster\nproblem on.\n\nseed (int, optional, default=None):\nRandom seed.\n\nReturns:\n:obj:`.BinaryQuadraticModel`: spin-valued binary quadratic model.", "source": "codesearchnet"}
{"code": "def from_coffeescript(cls, code, args={}):\n    compiled = nodejs_compile(code, lang='coffeescript', file='???')\n    if ('error' in compiled):\n        raise CompilationError(compiled.error)\n    return cls(code=compiled.code, args=args)", "docstring": "Create a CustomJSHover instance from a CoffeeScript snippet. The\nfunction bodies are translated to JavaScript functions using node and\ntherefore require return statements.\n\nThe ``code`` snippet namespace will contain the variable ``value`` (the\nuntransformed value) at render time as well as ``special_vars`` and\n``format`` as described in the class description.\n\nExample:\n\n.. code-block:: coffeescript\n\nformatter = CustomJSHover.from_coffeescript(\"return value + \" total\")\n\nArgs:\ncode (str) :\nA coffeescript snippet to transform a single ``value`` value\n\nReturns:\nCustomJSHover", "source": "codesearchnet"}
{"code": "def _infer_device_name(self, device_name, node_name):\n    if device_name is None:\n        if node_name in self._node_devices:\n            if len(self._node_devices[node_name]) == 1:\n                return list(self._node_devices[node_name])[0]\n            else:\n                raise ValueError(\"There are multiple (%d) devices with nodes named '%s' but device_name is not specified.\" % (len(self._node_devices[node_name]), node_name))\n        else:\n            raise ValueError(\"None of the %d device(s) has a node named '%s'.\" % (len(self._device_names), node_name))\n    else:\n        return device_name", "docstring": "Infer the device name given node name.\n\nIf device_name is provided (i.e., not None), it'll be simply returned right\naway.\n\nArgs:\ndevice_name: (str or None) name of the device. If None, will try to infer\nthe device name by looking at the available nodes.\nnode_name: (str) name of the node.\n\nReturns:\n(str) Inferred name of the device, if available.\n\nRaises:\nValueError: If the node name does not exist on any of the available\ndevices or if there are multiple devices that contain the node with\nthe given name.", "source": "github-repos"}
{"code": "def find_mip(self, direction, mechanism, purview):\n        \n        if not purview:\n            return _null_ria(direction, mechanism, purview)\n\n        \n        \n        repertoire = self.repertoire(direction, mechanism, purview)\n\n        def _mip(phi, partition, partitioned_repertoire):\n            \n            \n            \n            return RepertoireIrreducibilityAnalysis(\n                phi=phi,\n                direction=direction,\n                mechanism=mechanism,\n                purview=purview,\n                partition=partition,\n                repertoire=repertoire,\n                partitioned_repertoire=partitioned_repertoire,\n                node_labels=self.node_labels\n            )\n\n        \n        if (direction == Direction.CAUSE and\n                np.all(repertoire == 0)):\n            return _mip(0, None, None)\n\n        mip = _null_ria(direction, mechanism, purview, phi=float('inf'))\n\n        for partition in mip_partitions(mechanism, purview, self.node_labels):\n            \n            \n            phi, partitioned_repertoire = self.evaluate_partition(\n                direction, mechanism, purview, partition,\n                repertoire=repertoire)\n\n            \n            if phi == 0:\n                return _mip(0.0, partition, partitioned_repertoire)\n\n            \n            if phi < mip.phi:\n                mip = _mip(phi, partition, partitioned_repertoire)\n\n        return mip", "docstring": "Return the minimum information partition for a mechanism over a\npurview.\n\nArgs:\ndirection (Direction): |CAUSE| or |EFFECT|.\nmechanism (tuple[int]): The nodes in the mechanism.\npurview (tuple[int]): The nodes in the purview.\n\nReturns:\nRepertoireIrreducibilityAnalysis: The irreducibility analysis for\nthe mininum-information partition in one temporal direction.", "source": "juraj-google-style"}
{"code": "def _lookup_key_parse(table_keys):\n    regex_matcher = '\\\\[([^\\\\]]+)]'\n    valid_dynamodb_datatypes = ['M', 'S', 'N', 'L']\n    clean_table_keys = []\n    new_keys = []\n    for key in table_keys:\n        match = re.search(regex_matcher, key)\n        if match:\n            if (match.group(1) in valid_dynamodb_datatypes):\n                match_val = str(match.group(1))\n                key = key.replace(match.group(0), '')\n                new_keys.append({match_val: key})\n                clean_table_keys.append(key)\n            else:\n                raise ValueError('Stacker does not support looking up the datatype: {}'.format(str(match.group(1))))\n        else:\n            new_keys.append({'S': key})\n            clean_table_keys.append(key)\n    key_dict = {}\n    key_dict['new_keys'] = new_keys\n    key_dict['clean_table_keys'] = clean_table_keys\n    return key_dict", "docstring": "Return the order in which the stacks should be executed.\n\nArgs:\ndependencies (dict): a dictionary where each key should be the\nfully qualified name of a stack whose value is an array of\nfully qualified stack names that the stack depends on. This is\nused to generate the order in which the stacks should be\nexecuted.\n\nReturns:\ndict: includes a dict of lookup types with data types ('new_keys')\nand a list of the lookups with without ('clean_table_keys')", "source": "codesearchnet"}
{"code": "class EmbeddingTypeAdapter(Generic[EmbeddingTypeAdapterInputT, EmbeddingTypeAdapterOutputT]):\n    input_fn: Callable[[Sequence[EmbeddingTypeAdapterInputT]], List[str]]\n    output_fn: Callable[[Sequence[EmbeddingTypeAdapterInputT], Sequence[Any]], List[EmbeddingTypeAdapterOutputT]]\n\n    def __reduce__(self):\n        \n        return (self.__class__, (self.input_fn, self.output_fn))", "docstring": "Adapts input types to text for embedding and converts output embeddings.\n\nArgs:\ninput_fn: Function to extract text for embedding from input type\noutput_fn: Function to create output type from input and embeddings", "source": "github-repos"}
{"code": "def _get_pprof_proto(self, profile_datum_generator):\n    pprof_profile = profile_pb2.Profile()\n    samples = Samples(self._string_table)\n    for datum in profile_datum_generator:\n        if not datum.traceback:\n            continue\n        stack_frame = datum.traceback[-1]\n        after_apply_op = False\n        location_ids = []\n        for stack_frame_index in reversed(range(len(datum.traceback) - 1)):\n            prev_stack_frame = stack_frame\n            stack_frame = datum.traceback[stack_frame_index]\n            prev_file_path = prev_stack_frame[0]\n            prev_function = prev_stack_frame[2]\n            prev_function_start_line = -1\n            curr_file_path = stack_frame[0]\n            curr_line_number = stack_frame[1]\n            if not after_apply_op:\n                if prev_function == 'apply_op':\n                    after_apply_op = True\n                continue\n            location_index = self._locations.index_of(curr_file_path, curr_line_number, prev_function, prev_file_path, prev_function_start_line)\n            location_ids.append(location_index)\n        samples.add(datum, location_ids)\n    sample_type_description = 'count'\n    sample_type = pprof_profile.sample_type.add()\n    sample_type.type = self._string_table.index_of(sample_type_description)\n    sample_type.unit = self._string_table.index_of('count')\n    sample_type_description = 'all_time'\n    sample_type = pprof_profile.sample_type.add()\n    sample_type.type = self._string_table.index_of(sample_type_description)\n    sample_type.unit = self._string_table.index_of('nanoseconds')\n    sample_type_description = 'op_time'\n    sample_type = pprof_profile.sample_type.add()\n    sample_type.type = self._string_table.index_of(sample_type_description)\n    sample_type.unit = self._string_table.index_of('nanoseconds')\n    pprof_profile.string_table.extend(self._string_table.string_table())\n    pprof_profile.sample.extend(samples.get_sample_protos())\n    pprof_profile.function.extend(self._functions.function_protos())\n    pprof_profile.location.extend(self._locations.location_protos())\n    return pprof_profile", "docstring": "Returns profile data in pprof proto format.\n\nArgs:\nprofile_datum_generator: Generator outputting `ProfileDatum` objects.\n\nReturns:\nA proto in pprof format.", "source": "github-repos"}
{"code": "def _op_expand(n_bits, func=None, broadcastable=None):\n    \n    if func is None:\n        return functools.partial(_op_expand, n_bits, broadcastable=broadcastable)\n\n    @functools.wraps(func)\n    def wrapper(self, *args):\n        params = args[0:-n_bits] if len(args) > n_bits else tuple()\n        rargs = args[-n_bits:]\n\n        if broadcastable is None:\n            blist = [True] * len(rargs)\n        else:\n            blist = broadcastable\n\n        if not all([_is_bit(arg) for arg in rargs]):\n            rarg_size = [1] * n_bits\n            for iarg, arg in enumerate(rargs):\n                if isinstance(arg, Register):\n                    rarg_size[iarg] = len(arg)\n                elif isinstance(arg, list) and all([_is_bit(bit) for bit in arg]):\n                    rarg_size[iarg] = len(arg)\n                elif _is_bit(arg):\n                    rarg_size[iarg] = 1\n                else:\n                    raise QiskitError('operation arguments must be qubits/cbits')\n            broadcast_size = max(rarg_size)\n            expanded_rargs = []\n            for arg, broadcast in zip(rargs, blist):\n                if isinstance(arg, Register):\n                    arg = [(arg, i) for i in range(len(arg))]\n                elif isinstance(arg, tuple):\n                    arg = [arg]\n                \n                if isinstance(arg, list) and len(arg) == 1 and broadcast:\n                    arg = arg * broadcast_size\n                if len(arg) != broadcast_size:\n                    raise QiskitError('register size error')\n                expanded_rargs.append(arg)\n            rargs = expanded_rargs\n            if all([isinstance(arg, list) for arg in rargs]):\n                if all(rargs):\n                    instructions = InstructionSet()\n                    for irargs in zip(*rargs):\n                        instructions.add(func(self, *params, *irargs),\n                                         [i for i in irargs if isinstance(i[0], QuantumRegister)],\n                                         [i for i in irargs if isinstance(i[0], ClassicalRegister)])\n                    return instructions\n                else:\n                    raise QiskitError('empty control or target argument')\n        return func(self, *params, *rargs)\n\n    return wrapper", "docstring": "Decorator for expanding an operation across a whole register or register subset.\nArgs:\nn_bits (int): the number of register bit arguments the decorated function takes\nfunc (function): used for decorators with keyword args\nbroadcastable (list(bool)): list of bool for which register args can be\nbroadcast from 1 bit to the max size of the rest of the args. Defaults\nto all True if not specified.\n\nReturn:\ntype: partial function object", "source": "juraj-google-style"}
{"code": "def render_head_repr(expr: Any, sub_render=None, key_sub_render=None) -> str:\n    head_repr_fmt = '{head}({args}{kwargs})'\n    if (sub_render is None):\n        sub_render = render_head_repr\n    if (key_sub_render is None):\n        key_sub_render = sub_render\n    if isinstance(expr.__class__, Singleton):\n        return repr(expr)\n    if isinstance(expr, Expression):\n        args = expr.args\n        keys = expr.minimal_kwargs.keys()\n        kwargs = ''\n        if (len(keys) > 0):\n            kwargs = ', '.join([('%s=%s' % (key, key_sub_render(expr.kwargs[key]))) for key in keys])\n            if (len(args) > 0):\n                kwargs = (', ' + kwargs)\n        return head_repr_fmt.format(head=expr.__class__.__name__, args=', '.join([sub_render(arg) for arg in args]), kwargs=kwargs)\n    elif isinstance(expr, (tuple, list)):\n        delims = (('(', ')') if isinstance(expr, tuple) else ('[', ']'))\n        if (len(expr) == 1):\n            delims = (delims[0], (',' + delims[1]))\n        return ((delims[0] + ', '.join([render_head_repr(v, sub_render=sub_render, key_sub_render=key_sub_render) for v in expr])) + delims[1])\n    else:\n        return sympy_srepr(expr)", "docstring": "Render a textual representation of `expr` using\nPositional and keyword arguments are recursively\nrendered using `sub_render`, which defaults to `render_head_repr` by\ndefault.  If desired, a different renderer may be used for keyword\narguments by giving `key_sub_renderer`\n\nRaises:\nAttributeError: if `expr` is not an instance of\n:class:`Expression`, or more specifically, if `expr` does not\nhave `args` and `kwargs` (respectively `minimal_kwargs`)\nproperties", "source": "codesearchnet"}
{"code": "def getmtime(self, path=None, client_kwargs=None, header=None):\n        \n        return self._getmtime_from_header(\n            self.head(path, client_kwargs, header))", "docstring": "Return the time of last access of path.\n\nArgs:\npath (str): File path or URL.\nclient_kwargs (dict): Client arguments.\nheader (dict): Object header.\n\nReturns:\nfloat: The number of seconds since the epoch\n(see the time module).", "source": "juraj-google-style"}
{"code": "def clean_email_or_username(self):\n    email_or_username = self.cleaned_data[self.Fields.EMAIL_OR_USERNAME].strip()\n    if (not email_or_username):\n        return email_or_username\n    email = email_or_username__to__email(email_or_username)\n    bulk_entry = (len(split_usernames_and_emails(email)) > 1)\n    if bulk_entry:\n        for email in split_usernames_and_emails(email):\n            validate_email_to_link(email, None, ValidationMessages.INVALID_EMAIL_OR_USERNAME, ignore_existing=True)\n        email = email_or_username\n    else:\n        validate_email_to_link(email, email_or_username, ValidationMessages.INVALID_EMAIL_OR_USERNAME, ignore_existing=True)\n    return email", "docstring": "Clean email form field\n\nReturns:\nstr: the cleaned value, converted to an email address (or an empty string)", "source": "codesearchnet"}
{"code": "def run( self, for_time=None ):\n        \n        self.for_time = for_time\n        try:\n            self.is_initialised()\n        except AttributeError:\n            raise\n        if self.number_of_equilibration_jumps > 0:\n            for step in range( self.number_of_equilibration_jumps ):\n                self.lattice.jump()\n            self.reset()\n        if self.for_time:\n            self.number_of_jumps = 0\n            while self.lattice.time < self.for_time:\n                self.lattice.jump()\n                self.number_of_jumps += 1\n        else: \n            for step in range( self.number_of_jumps ):\n                self.lattice.jump()\n        self.has_run = True", "docstring": "Run the simulation.\n\nArgs:\nfor_time (:obj:Float, optional): If `for_time` is set, then run the simulation until a set amount of time has passed. Otherwise, run the simulation for a set number of jumps. Defaults to None.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def call(self, input_ids: Optional[tf.Tensor]=None, token_type_ids: Optional[tf.Tensor]=None, inputs_embeds: Optional[tf.Tensor]=None, training: bool=False) -> tf.Tensor:\n    assert not (input_ids is None and inputs_embeds is None)\n    if input_ids is not None:\n        check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n        inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n    input_shape = shape_list(inputs_embeds)[:-1]\n    if token_type_ids is None:\n        token_type_ids = tf.fill(dims=input_shape, value=0)\n    token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)\n    final_embeddings = inputs_embeds + token_type_embeds\n    final_embeddings = self.LayerNorm(inputs=final_embeddings)\n    final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n    return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\n\nReturns:\nfinal_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github-repos"}
{"code": "def claim(self, unclaimed_file_readers):\n        \n        claimed_vcf_readers = []\n        for caller in self._callers:\n            (unclaimed_file_readers,\n             translated_vcf_readers) = caller.claim(unclaimed_file_readers)\n            claimed_vcf_readers.extend(translated_vcf_readers)\n\n        return unclaimed_file_readers, claimed_vcf_readers", "docstring": "Allows each caller to claim incoming files as they are recognized.\n\nArgs:\nunclaimed_file_readers: Usually, all files in the input dir.\n\nReturns:\nA tuple of unclaimed file readers and claimed VcfReaders. The\npresence of any unclaimed file readers could indicate stray files\nin the input dir.", "source": "juraj-google-style"}
{"code": "def en(item):\n    \n    if pakr is None:  \n        return msgpack.packb(item, use_bin_type=True, unicode_errors='surrogatepass')\n    try:\n        return pakr.pack(item)\n    except Exception:\n        pakr.reset()\n        raise", "docstring": "Use msgpack to serialize a compatible python object.\n\nArgs:\nitem (obj): The object to serialize\n\nNotes:\nString objects are encoded using utf8 encoding.  In order to handle\npotentially malformed input, ``unicode_errors='surrogatepass'`` is set\nto allow encoding bad input strings.\n\nReturns:\nbytes: The serialized bytes in msgpack format.", "source": "juraj-google-style"}
{"code": "def GetSoapXMLForComplexType(self, type_name, value):\n    \n    element = self.schema.get_element(\n        '{%s}%s' % (self._namespace_override, type_name))\n    result_element = self._element_maker(element.qname.localname)\n    element_value = element(**value)\n    element.type.render(result_element, element_value)\n    data = lxml.etree.tostring(result_element).strip()\n    return data", "docstring": "Return an XML string representing a SOAP complex type.\n\nArgs:\ntype_name: The name of the type with namespace prefix if necessary.\nvalue: A python dictionary to hydrate the type instance with.\n\nReturns:\nA string containing the SOAP XML for the type.", "source": "juraj-google-style"}
{"code": "def screenshot(self):\n    b64data = self.http.get('/screenshot').value\n    raw_data = base64.b64decode(b64data)\n    from PIL import Image\n    buff = io.BytesIO(raw_data)\n    return Image.open(buff)", "docstring": "Take screenshot with session check\n\nReturns:\nPIL.Image", "source": "codesearchnet"}
{"code": "def do_post(self, uri, resource, timeout, custom_headers):\n    self.validate_resource_uri(uri)\n    (task, entity) = self._connection.post(uri, resource, custom_headers=custom_headers)\n    if (not task):\n        return entity\n    return self._task_monitor.wait_for_task(task, timeout)", "docstring": "Helps to make post requests.\n\nArgs:\nuri: URI of  the resource.\nresource: Resource data to post.\ntimeout: Time out for the request in seconds.\ncutom_headers: Allows to add custom http headers.\n\nReturns:\nRetunrs Task object.", "source": "codesearchnet"}
{"code": "def get_ut_layer(x, hparams, ffn_unit, attention_unit, pad_remover=None):\n    if (hparams.recurrence_type == 'basic'):\n        ut_initializer = (x, x, x)\n        ut_function = functools.partial(universal_transformer_basic, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit)\n    elif (hparams.recurrence_type == 'highway'):\n        ut_initializer = (x, x, x)\n        ut_function = functools.partial(universal_transformer_highway, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit, pad_remover=pad_remover)\n    elif (hparams.recurrence_type == 'skip'):\n        ut_initializer = (x, x, x)\n        ut_function = functools.partial(universal_transformer_skip, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit, pad_remover=pad_remover)\n    elif (hparams.recurrence_type == 'dwa'):\n        memory_size = (hparams.num_rec_steps + 1)\n        memory_empty = tf.zeros(([memory_size] + common_layers.shape_list(x)))\n        memory = fill_memory_slot(memory_empty, x, 0)\n        ut_initializer = (x, x, memory)\n        ut_function = functools.partial(universal_transformer_depthwise_attention, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit)\n    elif (hparams.recurrence_type == 'gru'):\n        ut_initializer = (x, x, x)\n        ut_function = functools.partial(universal_transformer_with_gru_as_transition_function, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit, pad_remover=pad_remover)\n    elif (hparams.recurrence_type == 'lstm'):\n        memory = tf.zeros(common_layers.shape_list(x))\n        ut_initializer = (x, x, memory)\n        ut_function = functools.partial(universal_transformer_with_lstm_as_transition_function, hparams=hparams, ffn_unit=ffn_unit, attention_unit=attention_unit, pad_remover=pad_remover)\n    else:\n        raise ValueError(('Unknown recurrence type: %s' % hparams.recurrence_type))\n    return (ut_function, ut_initializer)", "docstring": "Provides the function that is used in universal transforemr steps.\n\nArgs:\nx: input\nhparams: model hyper-parameters\nffn_unit: feed-forward unit\nattention_unit: multi-head attention unit\npad_remover: to mask out padding in convolutional layers (efficiency).\n\nReturns:\nut_function and the ut_initializer\n\nRaises:\nValueError: Unknown recurrence type", "source": "codesearchnet"}
{"code": "def patch_on_member(src: symbolic.Symbolic, cls: Union[Type[Any], Tuple[Type[Any], ...]], name: str, value: Any=None, value_fn: Optional[Callable[[Any], Any]]=None, skip_notification: Optional[bool]=None) -> Any:\n    return _conditional_patch(src, lambda k, v, p: isinstance(p, cls) and k.key == name, value, value_fn, skip_notification)", "docstring": "Recursively patch values that are the requested member of classes.\n\nExample::\n\nd = pg.Dict(a=A(x=1), b=2)\nprint(pg.patching.patch_on_member(d, A, 'x', 2)\n# {a=A(x=2), b=4}\n\nArgs:\nsrc: symbolic value to patch.\ncls: In which class the member belongs to.\nname: Member name.\nvalue: New value for field that satisfy `condition`.\nvalue_fn: Callable object that produces new value based on old value.\nIf not None, `value` must be None.\nskip_notification: If True, `on_change` event will not be triggered for this\noperation. If None, the behavior is decided by `pg.notify_on_rebind`.\nPlease see `symbolic.Symbolic.rebind` for details.\n\nReturns:\n`src` after being patched.", "source": "github-repos"}
{"code": "def read_dimvalue(self, dimname, path=\"/\", default=NO_DEFAULT):\n        \n        try:\n            dim = self._read_dimensions(dimname, path=path)[0]\n            return len(dim)\n        except self.Error:\n            if default is NO_DEFAULT: raise\n            return default", "docstring": "Returns the value of a dimension.\n\nArgs:\ndimname: Name of the variable\npath: path to the group.\ndefault: return `default` if `dimname` is not present and\n`default` is not `NO_DEFAULT` else raise self.Error.", "source": "juraj-google-style"}
{"code": "def get_num_filters(layer):\n    \n    \n    if K.ndim(layer.output) == 2:\n        return K.int_shape(layer.output)[-1]\n\n    channel_idx = 1 if K.image_data_format() == 'channels_first' else -1\n    return K.int_shape(layer.output)[channel_idx]", "docstring": "Determines the number of filters within the given `layer`.\n\nArgs:\nlayer: The keras layer to use.\n\nReturns:\nTotal number of filters within `layer`.\nFor `keras.layers.Dense` layer, this is the total number of outputs.", "source": "juraj-google-style"}
{"code": "def get_element_dt(self, el_name, tz=None, el_idx=0):\n    return iso8601.parse_date(self.get_element_by_name(el_name, el_idx).text, tz)", "docstring": "Return the text of the selected element as a ``datetime.datetime`` object.\n\nThe element text must be a ISO8601 formatted datetime\n\nArgs:\nel_name : str\nName of element to use.\n\ntz : datetime.tzinfo\nTimezone in which to return the datetime.\n\n- Without a timezone, other contextual information is required in order to\ndetermine the exact represented time.\n- If dt has timezone: The ``tz`` parameter is ignored.\n- If dt is naive (without timezone): The timezone is set to ``tz``.\n- ``tz=None``: Prevent naive dt from being set to a timezone. Without a\ntimezone, other contextual information is required in order to determine\nthe exact represented time.\n- ``tz=d1_common.date_time.UTC()``: Set naive dt to UTC.\n\nel_idx : int\nIndex of element to use in the event that there are multiple sibling\nelements with the same name.\n\nReturns:\ndatetime.datetime", "source": "codesearchnet"}
{"code": "def read_nanopubs(fn: str) -> Iterable[Mapping[(str, Any)]]:\n    (jsonl_flag, json_flag, yaml_flag) = (False, False, False)\n    if ((fn == '-') or ('jsonl' in fn)):\n        jsonl_flag = True\n    elif ('json' in fn):\n        json_flag = True\n    elif re.search('ya?ml', fn):\n        yaml_flag = True\n    else:\n        log.error('Do not recognize nanopub file format - neither json nor jsonl format.')\n        return {}\n    try:\n        if re.search('gz$', fn):\n            f = gzip.open(fn, 'rt')\n        else:\n            try:\n                f = click.open_file(fn, mode='rt')\n            except Exception as e:\n                log.info(f'Can not open file {fn}  Error: {e}')\n                quit()\n        if jsonl_flag:\n            for line in f:\n                (yield json.loads(line))\n        elif json_flag:\n            nanopubs = json.load(f)\n            for nanopub in nanopubs:\n                (yield nanopub)\n        elif yaml_flag:\n            nanopubs = yaml.load(f, Loader=yaml.SafeLoader)\n            for nanopub in nanopubs:\n                (yield nanopub)\n    except Exception as e:\n        log.error(f'Could not open file: {fn}')", "docstring": "Read file and generate nanopubs\n\nIf filename has *.gz, will read as a gzip file\nIf filename has *.jsonl*, will parsed as a JSONLines file\nIF filename has *.json*, will be parsed as a JSON file\nIf filename has *.yaml* or *.yml*,  will be parsed as a YAML file\n\nArgs:\nfilename (str): filename to read nanopubs from\n\nReturns:\nGenerator[Mapping[str, Any]]: generator of nanopubs in nanopub_bel JSON Schema format", "source": "codesearchnet"}
{"code": "def updateGroup(self, group, vendorSpecific=None):\n        \n        response = self.updateGroupResponse(group, vendorSpecific)\n        return self._read_boolean_response(response)", "docstring": "See Also: updateGroupResponse()\n\nArgs:\ngroup:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def get_showcases(self):\n    (assoc_result, showcases_dicts) = self._read_from_hdx('showcase', self.data['id'], fieldname='package_id', action=hdx.data.showcase.Showcase.actions()['list_showcases'])\n    showcases = list()\n    if assoc_result:\n        for showcase_dict in showcases_dicts:\n            showcase = hdx.data.showcase.Showcase(showcase_dict, configuration=self.configuration)\n            showcases.append(showcase)\n    return showcases", "docstring": "Get any showcases the dataset is in\n\nReturns:\nList[Showcase]: list of showcases", "source": "codesearchnet"}
{"code": "def has_file_with_suffix(self, suffixes):\n        \n        if not isinstance(suffixes, list):\n            suffixes = [suffixes]\n\n        if self.handle:\n            for member in self.handle.getmembers():\n                if os.path.splitext(member.name)[1] in suffixes:\n                    return True\n                else:\n                    \n                    \n                    for suffix in suffixes:\n                        if '{0}/'.format(suffix) in member.name:\n                            return True\n\n        return False", "docstring": "Finds out if there is a file with one of suffixes in the archive.\nArgs:\nsuffixes: list of suffixes or single suffix to look for\nReturns:\nTrue if there is at least one file with at least one given suffix\nin the archive, False otherwise (or archive can't be opened)", "source": "juraj-google-style"}
{"code": "def all(self, data={}, **kwargs):\n    return super(Plan, self).all(data, **kwargs)", "docstring": "Fetch all plan entities\n\nReturns:\nDictionary of plan data", "source": "codesearchnet"}
{"code": "def _ParseProcessingOptions(self, options):\n    \n    self._single_process_mode = getattr(options, 'single_process', False)\n\n    argument_helper_names = [\n        'process_resources', 'temporary_directory', 'workers', 'zeromq']\n    helpers_manager.ArgumentHelperManager.ParseOptions(\n        options, self, names=argument_helper_names)", "docstring": "Parses the processing options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "juraj-google-style"}
{"code": "def __init__(self, feature_set='spe+', feature_model='strict'):\n        \n        fm = {'strict': _panphon.FeatureTable,\n              'permissive': permissive.PermissiveFeatureTable}\n        self.fm = fm[feature_model](feature_set=feature_set)", "docstring": "Construct a Sonority object\n\nArgs:\nfeature_set (str): features set to be used by `FeatureTable`\nfeature_model (str): 'strict' or 'permissive' feature model", "source": "juraj-google-style"}
{"code": "def scale_out(self, blocks=1):\n    r = []\n    for i in range(blocks):\n        if self.provider:\n            external_block_id = str(len(self.blocks))\n            launch_cmd = self.launch_cmd.format(block_id=external_block_id)\n            internal_block = self.provider.submit(launch_cmd, 1, 1)\n            logger.debug('Launched block {}->{}'.format(external_block_id, internal_block))\n            if (not internal_block):\n                raise ScalingFailed(self.provider.label, 'Attempts to provision nodes via provider has failed')\n            r.extend([external_block_id])\n            self.blocks[external_block_id] = internal_block\n        else:\n            logger.error('No execution provider available')\n            r = None\n    return r", "docstring": "Scales out the number of blocks by \"blocks\"\n\nRaises:\nNotImplementedError", "source": "codesearchnet"}
{"code": "def __init__(self, script=None):\n        \n        param_list = bytearray(b'\\x07\\x10')\n        super(NEP5Token, self).__init__(script=script, param_list=param_list)", "docstring": "Create an instance.\n\nArgs:\nscript (bytes): (Optional)", "source": "juraj-google-style"}
{"code": "def set_vector_catch(self, flags):\n    res = self._dll.JLINKARM_WriteVectorCatch(flags)\n    if (res < 0):\n        raise errors.JLinkException(res)\n    return None", "docstring": "Sets vector catch bits of the processor.\n\nThe CPU will jump to a vector if the given vector catch is active, and\nwill enter a debug state.  This has the effect of halting the CPU as\nwell, meaning the CPU must be explicitly restarted.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error.", "source": "codesearchnet"}
{"code": "def get_time(self, force_uptime=False):\n        \n\n        if force_uptime:\n            return self.uptime\n\n        time = self.uptime + self.time_offset\n\n        if self.is_utc:\n            time |= (1 << 31)\n\n        return time", "docstring": "Get the current UTC time or uptime.\n\nBy default, this method will return UTC time if possible and fall back\nto uptime if not.  If you specify, force_uptime=True, it will always\nreturn uptime even if utc time is available.\n\nArgs:\nforce_uptime (bool): Always return uptime, defaults to False.\n\nReturns:\nint: The current uptime or encoded utc time.", "source": "juraj-google-style"}
{"code": "def Matches(self, file_entry):\n    \n    if not self._filters:\n      return True\n\n    results = []\n    for file_entry_filter in self._filters:\n      result = file_entry_filter.Matches(file_entry)\n      results.append(result)\n\n    return True in results or False not in results", "docstring": "Compares the file entry against the filter collection.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry to compare.\n\nReturns:\nbool: True if the file entry matches one of the filters. If no filters\nare provided or applicable the result will be True.", "source": "juraj-google-style"}
{"code": "def write_file(self, filename, distance=6, velocity=8, charge=3):\n        \n        with open(filename, \"w\") as f:\n            f.write(self.get_string(distance=distance, velocity=velocity,\n                                    charge=charge))", "docstring": "Writes LammpsData to file.\n\nArgs:\nfilename (str): Filename.\ndistance (int): No. of significant figures to output for\nbox settings (bounds and tilt) and atomic coordinates.\nDefault to 6.\nvelocity (int): No. of significant figures to output for\nvelocities. Default to 8.\ncharge (int): No. of significant figures to output for\ncharges. Default to 3.", "source": "juraj-google-style"}
{"code": "def fork(self, command: Command) -> Tuple[('SelectedMailbox', Iterable[Response])]:\n    frozen = _Frozen(self)\n    cls = type(self)\n    copy = cls(self._guid, self._readonly, self._permanent_flags, self._session_flags, self._selected_set, self._lookup, _mod_sequence=self._mod_sequence, _prev=frozen, _messages=self._messages)\n    if (self._prev is not None):\n        with_uid: bool = getattr(command, 'uid', False)\n        untagged = self._compare(self._prev, frozen, with_uid)\n    else:\n        untagged = []\n    return (copy, untagged)", "docstring": "Compares the state of the current object to that of the last fork,\nreturning the untagged responses that reflect any changes. A new copy\nof the object is also returned, ready for the next command.\n\nArgs:\ncommand: The command that was finished.", "source": "codesearchnet"}
{"code": "def __init__(self, actions=None):\n        \n        super().__init__(InstructionType.OFPIT_CLEAR_ACTIONS)\n        self.actions = actions if actions else []", "docstring": "Create a InstructionClearAction with the optional parameters below.\n\nArgs:\nactions (:class:`~.actions.ListOfActions`):\nActions associated with OFPIT_CLEAR_ACTIONS.", "source": "juraj-google-style"}
{"code": "def __setKeySwitchGuardTime(self, iKeySwitchGuardTime):\n        \n        print '%s call setKeySwitchGuardTime' % self.port\n        print iKeySwitchGuardTime\n        try:\n            cmd = 'keysequence guardtime %s' % str(iKeySwitchGuardTime)\n            if self.__sendCommand(cmd)[0] == 'Done':\n                time.sleep(1)\n                return True\n            else:\n                return False\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger(\"setKeySwitchGuardTime() Error; \" + str(e))", "docstring": "set the Key switch guard time\n\nArgs:\niKeySwitchGuardTime: key switch guard time\n\nReturns:\nTrue: successful to set key switch guard time\nFalse: fail to set key switch guard time", "source": "juraj-google-style"}
{"code": "def add_path(self, path, path_filter=None):\n    for (root, _, files) in os.walk(path):\n        for filename in files:\n            full_path_and_filename = os.path.join(root, filename)\n            if ((path_filter is None) or path_filter(full_path_and_filename)):\n                relative_path_and_filename = full_path_and_filename.replace((path + '/'), '')\n                with open(full_path_and_filename, 'rb') as handle:\n                    self.files[relative_path_and_filename] = b64encode(handle.read()).decode('utf-8')", "docstring": "Adding all files from given path to the object.\n\nArgs:\npath (str): valid, existing directory", "source": "codesearchnet"}
{"code": "def __init__(self, host, port, rhash):\n        \n        self.hash = rhash\n        self.r = redis.StrictRedis(host=host, port=port)", "docstring": "Initialize the Class properties.\n\nArgs:\nhost (string): The Redis host.\nport (string): The Redis port.\nrhash (string): The rhash value.", "source": "juraj-google-style"}
{"code": "def to_numpy_array(self):\n    return encode_resource_handle(self._get_resource_handle())", "docstring": "Convert a TensorHandle object to a feedable numpy value.\n\nReturns:\nA numpy array of a custom struct type that can be used as a feed value\nto run().", "source": "github-repos"}
{"code": "def load_file(self, path, objtype=None, encoding='utf-8'):\n    path = self.abspath(path)\n    debug(('file path is %s' % path))\n    if (path in self._cache):\n        return self._cache[path]\n    try:\n        debug(('cache miss, attempting to load file from disk: %s' % path))\n        contents = parsed_data = self.get_contents(path)\n        if encoding:\n            parsed_data = contents.encode(encoding)\n    except ConfigurationError as exc:\n        debug(exc)\n        raise\n    except UnicodeEncodeError:\n        raise ConfigurationError('unable to encode file contents')\n    if (objtype is not string_types):\n        for deserializer in (self._load_json, self._load_yaml):\n            parsed_data = deserializer(contents)\n            if parsed_data:\n                break\n        if (objtype and (not isinstance(parsed_data, objtype))):\n            debug(('specified file %s is not of type %s' % (path, objtype)))\n            raise ConfigurationError('invalid file serialization type for contents')\n    self._cache[path] = parsed_data\n    return parsed_data", "docstring": "Load the file specified by path\n\nThis method will first try to load the file contents from cache and\nif there is a cache miss, it will load the contents from disk\n\nArgs:\npath (string): The full or relative path to the file to be loaded\n\nencoding (string): The file contents text encoding\n\nobjtype (object): The object type of the file contents.  This\nis used to type check the deserialized content against the\ncontents loaded from disk.\nIgnore serializing if objtype is string_types\n\nReturns:\nobject: The deserialized file contents which could be either a\nstring object or a dict object\n\nRaises:\nConfigurationError:", "source": "codesearchnet"}
{"code": "def clinvar_export(store, institute_id, case_name, variant_id):\n    \n\n    institute_obj, case_obj = institute_and_case(store, institute_id, case_name)\n    pinned = [store.variant(variant_id) or variant_id for variant_id in\n                  case_obj.get('suspects', [])]\n    variant_obj = store.variant(variant_id)\n    return dict(\n        today = str(date.today()),\n        institute=institute_obj,\n        case=case_obj,\n        variant=variant_obj,\n        pinned_vars=pinned\n    )", "docstring": "Gather the required data for creating the clinvar submission form\n\nArgs:\nstore(scout.adapter.MongoAdapter)\ninstitute_id(str): Institute ID\ncase_name(str): case ID\nvariant_id(str): variant._id\n\nReturns:\na dictionary with all the required data (case and variant level) to pre-fill in fields in the clinvar submission form", "source": "juraj-google-style"}
{"code": "def List(self, request, global_params=None):\n    config = self.GetMethodConfig('List')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "List all repositories for a given `BitbucketServerConfig`. This API is experimental.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsBitbucketServerConfigsReposListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ListBitbucketServerRepositoriesResponse) The response message.", "source": "github-repos"}
{"code": "def _MakePackagePages(self, package, showprivate=False, nested=False, showinh=False):\n\n    def checkNoNested(mod):\n        try:\n            all = mod.__all__\n        except AttributeError:\n            return False\n        mems = inspect.getmembers(mod, inspect.ismodule)\n        mems = [m for m in mems if (m[0] in mod.__all__)]\n        if (len(mems) > 0):\n            return False\n        return True\n    mods = inspect.getmembers(package, inspect.ismodule)\n    (nmods, pvt, npkgs) = ([], [], [])\n    for mod in mods:\n        if checkNoNested(mod[1]):\n            if (mod[0][0] == '_'):\n                pvt.append(mod)\n            else:\n                nmods.append(mod)\n        else:\n            npkgs.append(mod)\n    if showprivate:\n        nmods += pvt\n    files = []\n    ignore = []\n    for pkg in npkgs:\n        pt = ('%s/%s/%s' % (self.path, package.__name__.replace('.', '/'), pkg[1].__name__.split('.')[(- 1)]))\n        if os.path.exists(pt):\n            shutil.rmtree(pt)\n        os.makedirs(pt)\n        ignore += inspect.getmembers(pkg[1])\n        f = self._MakePackagePages(pkg[1], showprivate=showprivate, nested=True, showinh=showinh)\n        files.append(f.split((package.__name__.replace('.', '/') + '/'))[1])\n    if nested:\n        try:\n            name = package.__displayname__\n        except AttributeError:\n            name = package.__name__\n        index = ('\\n%s\\n%s\\n\\n.. toctree::\\n   :maxdepth: 5\\n\\n    ' % (name, ('*' * len(name))))\n        index += '\\n   '.join(files)\n        index += ('\\n   ' + self._ProduceContent(nmods, showprivate=showprivate, showinh=showinh))\n        findex = ('content/%s/index.rst' % package.__name__.replace('.', '/'))\n        with open(findex, 'w') as f:\n            if package.__doc__:\n                f.write(package.__doc__)\n            f.write(index)\n        return ('\\n   ' + findex)\n    names = ('\\n   %s/%s/' % (self.path, package.__name__.replace('.', '/')))\n    nmods = [m for m in nmods if (m not in ignore)]\n    return names.join((self._ProduceContent(nmods, showprivate=showprivate, showinh=showinh).split('\\n   ') + files))", "docstring": "An internal helper to generate all of the pages for a given package\n\nArgs:\npackage (module): The top-level package to document\nshowprivate (bool): A flag for whether or not to display private members\nnested (bool): Foor internal use ONLY\n\nReturns:\nstr: The file names ready to be appended to a top-level toctree", "source": "codesearchnet"}
{"code": "def get_file_systems(self):\n    result = {}\n    if os.access('/proc/mounts', os.R_OK):\n        file = open('/proc/mounts')\n        for line in file:\n            try:\n                mount = line.split()\n                device = mount[0]\n                mount_point = mount[1]\n                fs_type = mount[2]\n            except (IndexError, ValueError):\n                continue\n            if (fs_type not in self.filesystems):\n                self.log.debug(('Ignoring %s since it is of type %s ' + ' which is not in the list of filesystems.'), mount_point, fs_type)\n                continue\n            if self.exclude_reg.search(mount_point):\n                self.log.debug(('Ignoring %s since it is in the ' + 'exclude_filter list.'), mount_point)\n                continue\n            if ((('/' in device) or (device == 'tmpfs')) and mount_point.startswith('/')):\n                try:\n                    stat = os.stat(mount_point)\n                except OSError:\n                    self.log.debug('Path %s is not mounted - skipping.', mount_point)\n                    continue\n                if (stat.st_dev in result):\n                    continue\n                result[stat.st_dev] = {'device': os.path.realpath(device), 'mount_point': mount_point, 'fs_type': fs_type}\n        file.close()\n    else:\n        if (not psutil):\n            self.log.error('Unable to import psutil')\n            return None\n        partitions = psutil.disk_partitions(False)\n        for partition in partitions:\n            result[len(result)] = {'device': os.path.realpath(partition.device), 'mount_point': partition.mountpoint, 'fs_type': partition.fstype}\n        pass\n    return result", "docstring": "Creates a map of mounted filesystems on the machine.\n\niostat(1): Each sector has size of 512 bytes.\n\nReturns:\nst_dev -> FileSystem(device, mount_point)", "source": "codesearchnet"}
{"code": "def OnCreateAccount(self, account):\n    pubkey = account.PublicKey.encode_point(False)\n    pubkeyunhex = binascii.unhexlify(pubkey)\n    pub = pubkeyunhex[1:65]\n    priv = bytearray(account.PrivateKey)\n    decrypted = (pub + priv)\n    encrypted_pk = self.EncryptPrivateKey(bytes(decrypted))\n    (db_account, created) = Account.get_or_create(PrivateKeyEncrypted=encrypted_pk, PublicKeyHash=account.PublicKeyHash.ToBytes())\n    db_account.save()\n    self.__dbaccount = db_account", "docstring": "Save a KeyPair in encrypted form into the database.\n\nArgs:\naccount (KeyPair):", "source": "codesearchnet"}
{"code": "def update_status(self, progress):\n        \n\n        \n        update_interval = 0.2\n\n        now = datetime.datetime.now()\n\n        if not self._last_progress_update is None and now-self._last_progress_update < datetime.timedelta(seconds=update_interval):\n            return\n\n        self._last_progress_update = now\n\n        self.progressBar.setValue(progress)\n\n        script = self.current_script\n\n        \n        if progress:\n            remaining_time = str(datetime.timedelta(seconds=script.remaining_time.seconds))\n            self.lbl_time_estimate.setText('time remaining: {:s}'.format(remaining_time))\n        if script is not str(self.tabWidget.tabText(self.tabWidget.currentIndex())).lower() in ['scripts', 'instruments']:\n            self.plot_script(script)", "docstring": "waits for a signal emitted from a thread and updates the gui\nArgs:\nprogress:\nReturns:", "source": "juraj-google-style"}
{"code": "def end_offsets(self, partitions):\n    offsets = self._fetcher.end_offsets(partitions, self.config['request_timeout_ms'])\n    return offsets", "docstring": "Get the last offset for the given partitions. The last offset of a\npartition is the offset of the upcoming message, i.e. the offset of the\nlast available message + 1.\n\nThis method does not change the current consumer position of the\npartitions.\n\nNote:\nThis method may block indefinitely if the partition does not exist.\n\nArguments:\npartitions (list): List of TopicPartition instances to fetch\noffsets for.\n\nReturns:\n``{TopicPartition: int}``: The end offsets for the given partitions.\n\nRaises:\nUnsupportedVersionError: If the broker does not support looking\nup the offsets by timestamp.\nKafkaTimeoutError: If fetch failed in request_timeout_ms", "source": "codesearchnet"}
{"code": "def unique_fn_name(scope, name):\n    return ('%s%s_%s' % (scope, name, ops.uid())).replace('/', '_')", "docstring": "Returns a unique name to use for a control flow function.\n\nArgs:\nscope: A name scope string.\nname: An identifier for this function (e.g. \"true\", \"body\").\n\nReturns:\nA string, the name to use for the function.", "source": "github-repos"}
{"code": "def get_history_by_tail_number(self, tail_number, page=1, limit=100):\n    url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit)\n    return self._fr24.get_data(url, True)", "docstring": "Fetch the history of a particular aircraft by its tail number.\n\nThis method can be used to get the history of a particular aircraft by its tail number.\nIt checks the user authentication and returns the data accordingly.\n\nArgs:\ntail_number (str): The tail number, e.g. VT-ANL\npage (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data\nlimit (int): Optional limit on number of records returned\n\nReturns:\nA list of dicts with the data; one dict for each row of data from flightradar24\n\nExample::\n\nfrom pyflightdata import FlightData\nf=FlightData()\n#optional login\nf.login(myemail,mypassword)\nf.get_history_by_flight_number('VT-ANL')\nf.get_history_by_flight_number('VT-ANL',page=1,limit=10)", "source": "codesearchnet"}
{"code": "def __rmul__(self, other):\n    return self * other", "docstring": "Returns the product of `self` and `other`.\n\nArgs:\nother: Another Dimension, or a value accepted by `as_dimension`.\n\nReturns:\nA Dimension whose value is the product of `self` and `other`.", "source": "github-repos"}
{"code": "def parse_datetime(value):\n    if (not value):\n        return None\n    elif isinstance(value, datetime.datetime):\n        return value\n    return dateutil.parser.parse(value)", "docstring": "Attempts to parse `value` into an instance of ``datetime.datetime``. If\n`value` is ``None``, this function will return ``None``.\n\nArgs:\nvalue: A timestamp. This can be a string or datetime.datetime value.", "source": "codesearchnet"}
{"code": "def _WritesString(self, content):\n    \n    content_bytes = codecs.encode(content, 'utf-8')\n    self._sample_file.write(content_bytes)", "docstring": "Writes a string to the sample file.\n\nArgs:\ncontent (str): content to write to the sample file.", "source": "juraj-google-style"}
{"code": "def _ReadString(self, file_object, file_offset, data_type_map, description):\n    element_data_size = data_type_map._element_data_type_definition.GetByteSize()\n    elements_terminator = data_type_map._data_type_definition.elements_terminator\n    byte_stream = []\n    element_data = file_object.read(element_data_size)\n    byte_stream.append(element_data)\n    while (element_data and (element_data != elements_terminator)):\n        element_data = file_object.read(element_data_size)\n        byte_stream.append(element_data)\n    byte_stream = b''.join(byte_stream)\n    return self._ReadStructureFromByteStream(byte_stream, file_offset, data_type_map, description)", "docstring": "Reads a string.\n\nArgs:\nfile_object (FileIO): file-like object.\nfile_offset (int): offset of the data relative from the start of\nthe file-like object.\ndata_type_map (dtfabric.DataTypeMap): data type map of the string.\ndescription (str): description of the string.\n\nReturns:\nobject: structure values object.\n\nRaises:\nFileFormatError: if the string cannot be read.\nValueError: if file-like object or date type map are invalid.", "source": "codesearchnet"}
{"code": "def get_nested_group_users(self, groupname):\n        \n\n        response = self._get(self.rest_url + \"/group/user/nested\",\n                             params={\"groupname\": groupname,\n                                     \"start-index\": 0,\n                                     \"max-results\": 99999})\n\n        if not response.ok:\n            return None\n\n        return [u['name'] for u in response.json()['users']]", "docstring": "Retrieves a list of all users that directly or indirectly belong to the given groupname.\n\nArgs:\ngroupname: The group name.\n\n\nReturns:\nlist:\nA list of strings of user names.", "source": "juraj-google-style"}
{"code": "def __contains__(self, item):\n        \n        if isinstance(item, str):\n            return item in self._impl.namespace\n\n        elif isinstance(item, Cells):\n            return item._impl in self._impl.cells.values()\n\n        elif isinstance(item, StaticSpace):\n            return item._impl in self._impl.spaces.values()\n\n        else:\n            return False", "docstring": "Check if item is in the space.\n\nitem can be either a cells or space.\n\nArgs:\nitem: a cells or space to check.\n\nReturns:\nTrue if item is a direct child of the space, False otherwise.", "source": "juraj-google-style"}
{"code": "def make_predict_function(self):\n    if self.predict_function is not None:\n        return self.predict_function\n\n    def step_function(model, iterator):\n        \n\n        def run_step(data):\n            outputs = model.predict_step(data)\n            with ops.control_dependencies(_minimum_control_deps(outputs)):\n                model._predict_counter.assign_add(1)\n            return outputs\n        data = next(iterator)\n        outputs = model.distribute_strategy.run(run_step, args=(data,))\n        outputs = reduce_per_replica(outputs, self.distribute_strategy, reduction='concat')\n        return outputs\n    if self._steps_per_execution is None or self._steps_per_execution.numpy().item() == 1:\n\n        def predict_function(iterator):\n            \n            return step_function(self, iterator)\n    else:\n\n        def predict_function(iterator):\n            \n            outputs = step_function(self, iterator)\n            for _ in math_ops.range(self._steps_per_execution - 1):\n                directives.set_loop_options(shape_invariants=[(t, tf_utils.get_tensor_spec(t, dynamic_batch=True).shape) for t in nest.flatten(outputs)])\n                step_outputs = step_function(self, iterator)\n                outputs = nest.map_structure(lambda t1, t2: concat([t1, t2]), outputs, step_outputs)\n            return outputs\n    if not self.run_eagerly:\n        predict_function = def_function.function(predict_function, experimental_relax_shapes=True)\n    self.predict_function = predict_function\n    return self.predict_function", "docstring": "Creates a function that executes one step of inference.\n\nThis method can be overridden to support custom inference logic.\nThis method is called by `Model.predict` and `Model.predict_on_batch`.\n\nTypically, this method directly controls `tf.function` and\n`tf.distribute.Strategy` settings, and delegates the actual evaluation\nlogic to `Model.predict_step`.\n\nThis function is cached the first time `Model.predict` or\n`Model.predict_on_batch` is called. The cache is cleared whenever\n`Model.compile` is called.\n\nReturns:\nFunction. The function created by this method should accept a\n`tf.data.Iterator`, and return the outputs of the `Model`.", "source": "github-repos"}
{"code": "def ParseFromHumanReadable(self, string):\n    \n    if not string:\n      return None\n\n    match = self.REGEX.match(string.strip().lower())\n    if not match:\n      raise DecodeError(\"Unknown specification for ByteSize %s\" % string)\n\n    multiplier = self.DIVIDERS.get(match.group(2))\n    if not multiplier:\n      raise DecodeError(\"Invalid multiplier %s\" % match.group(2))\n\n    \n    value = match.group(1)\n    if \".\" in value:\n      value = float(value)\n    else:\n      value = int(value)\n\n    self._value = int(value * multiplier)", "docstring": "Parse a human readable string of a byte string.\n\nArgs:\nstring: The string to parse.\n\nRaises:\nDecodeError: If the string can not be parsed.", "source": "juraj-google-style"}
{"code": "def CreatePrecisionHelper(cls, precision):\n    \n    precision_helper_class = cls._PRECISION_CLASSES.get(precision, None)\n    if not precision_helper_class:\n      raise ValueError('Unsupported precision: {0!s}'.format(precision))\n\n    return precision_helper_class", "docstring": "Creates a precision helper.\n\nArgs:\nprecision (str): precision of the date and time value, which should\nbe one of the PRECISION_VALUES in definitions.\n\nReturns:\nclass: date time precision helper class.\n\nRaises:\nValueError: if the precision value is unsupported.", "source": "juraj-google-style"}
{"code": "def getOutlet(self):\n    outrow = (int(self.getCard(name='OUTROW').value) - 1)\n    outcol = (int(self.getCard(name='OUTCOL').value) - 1)\n    gssha_grid = self.getGrid()\n    return gssha_grid.pixel2lonlat(outcol, outrow)", "docstring": "Gets the outlet latitude and longitude.\n\nReturns:\nlatitude(float): Latitude of grid cell center.\nlongitude(float): Longitude of grid cell center.", "source": "codesearchnet"}
{"code": "def add_team_member(self, account_id=None, email_address=None):\n    return self._add_remove_team_member(self.TEAM_ADD_MEMBER_URL, email_address, account_id)", "docstring": "Add or invite a user to your Team\n\nArgs:\n\naccount_id (str):       The id of the account of the user to invite to your team.\n\nemail_address (str):    The email address of the account to invite to your team. The account id prevails if both account_id and email_address are provided.\n\nReturns:\nA Team object", "source": "codesearchnet"}
{"code": "def convert_alg_to_int(alg):\n    if isinstance(alg, int):\n        return alg\n    if isinstance(alg, Algorithm):\n        return alg.value\n    if isinstance(alg, tensor.Tensor):\n        return alg\n    if isinstance(alg, str):\n        canon_alg = alg.strip().lower().replace('-', '').replace('_', '')\n        if canon_alg == 'philox':\n            return Algorithm.PHILOX.value\n        elif canon_alg == 'threefry':\n            return Algorithm.THREEFRY.value\n        elif canon_alg == 'autoselect':\n            return Algorithm.AUTO_SELECT.value\n        else:\n            raise ValueError(unsupported_alg_error_msg(alg))\n    else:\n        raise TypeError(f\"Can't convert argument `alg` (of value {alg} and type {type(alg)}) to int.\")", "docstring": "Converts algorithm to an integer.\n\nArgs:\nalg: can be one of these types: integer, Algorithm, Tensor, string. Allowed\nstrings are \"philox\" and \"threefry\".\n\nReturns:\nAn integer, unless the input is a Tensor in which case a Tensor is returned.", "source": "github-repos"}
{"code": "def _add_qasm_measure(self, qubit, cmembit, cregbit=None):\n        \n        \n        outcome, probability = self._get_measure_outcome(qubit)\n        \n        membit = 1 << cmembit\n        self._classical_memory = (self._classical_memory & (~membit)) | (int(outcome) << cmembit)\n\n        if cregbit is not None:\n            regbit = 1 << cregbit\n            self._classical_register = \\\n                (self._classical_register & (~regbit)) | (int(outcome) << cregbit)\n\n        \n        if outcome == '0':\n            update_diag = [[1 / np.sqrt(probability), 0], [0, 0]]\n        else:\n            update_diag = [[0, 0], [0, 1 / np.sqrt(probability)]]\n        \n        self._add_unitary_single(update_diag, qubit)", "docstring": "Apply a measure instruction to a qubit.\n\nArgs:\nqubit (int): qubit is the qubit measured.\ncmembit (int): is the classical memory bit to store outcome in.\ncregbit (int, optional): is the classical register bit to store outcome in.", "source": "juraj-google-style"}
{"code": "def __get_object__(binding):\n    \n    if isinstance(binding, rdflib.term.Node):\n        return binding\n    elif isinstance(binding, collections.Iterable):\n        for key, row in binding.items():\n            if isinstance(row, (rdflib.URIRef, rdflib.Literal)):\n                return row\n            elif isinstance(row, dict):\n                if row.get('type').startswith('uri'):\n                    return rdflib.URIRef(row.get('value'))\n                return rdflib.Literal(row.get('value'))\n            elif isinstance(row, tuple):\n                print(row)\n            elif isinstance(row, str):\n                if row.startswith(\"literal\") or \"xml:lang\" in key:\n                    continue\n                return rdflib.Literal(row)", "docstring": "Method takes a binding extracts value and returns rdflib\nentity\n\nArgs:\nbinding: binding row", "source": "juraj-google-style"}
{"code": "def deserialize(config, custom_objects=None):\n    obj = serialization_lib.deserialize_keras_object(config, custom_objects=custom_objects)\n    if not isinstance(obj, Layer):\n        raise ValueError(f'`keras.layers.deserialize` was passed a `config` object that is not a `keras.layers.Layer`. Received: {config}')\n    return obj", "docstring": "Returns a Keras layer object via its configuration.\n\nArgs:\nconfig: A python dict containing a serialized layer configuration.\ncustom_objects: Optional dictionary mapping names (strings) to custom\nobjects (classes and functions) to be considered during\ndeserialization.\n\nReturns:\nA Keras layer instance.", "source": "github-repos"}
{"code": "def arctan(x):\n    if any_symbolic_tensors((x,)):\n        return Arctan().symbolic_call(x)\n    return backend.numpy.arctan(x)", "docstring": "Trigonometric inverse tangent, element-wise.\n\nArgs:\nx: Input tensor.\n\nReturns:\nTensor of the inverse tangent of each element in `x`, in the interval\n`[-pi/2, pi/2]`.\n\nExample:\n>>> x = keras.ops.convert_to_tensor([0, 1])\n>>> keras.ops.arctan(x)\narray([0., 0.7853982], dtype=float32)", "source": "github-repos"}
{"code": "def solveAsync(self, callback):\n\n    def async_call():\n        self._lock.acquire()\n        try:\n            self._impl.solve()\n        except Exception:\n            self._lock.release()\n            raise\n        else:\n            self._lock.release()\n            callback.run()\n    Thread(target=async_call).start()", "docstring": "Solve the current model asynchronously.\n\nArgs:\ncallback: Callback to be executed when the solver is done.", "source": "codesearchnet"}
{"code": "def _copy_deploy_scripts_for_hosts(self, domains):\n    with LogTask('Copying any deploy scripts'):\n        for (host_name, host_spec) in domains.iteritems():\n            host_metadata = host_spec.get('metadata', {})\n            deploy_scripts = self._get_scripts(host_metadata)\n            new_scripts = self._copy_delpoy_scripts(deploy_scripts)\n            self._set_scripts(host_metadata=host_metadata, scripts=new_scripts)\n    return domains", "docstring": "Copy the deploy scripts for all the domains into the prefix scripts dir\n\nArgs:\ndomains(dict): spec with the domains info as when loaded from the\ninitfile\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def Convert(self, metadata, grr_message, token=None):\n    return self.BatchConvert([(metadata, grr_message)], token=token)", "docstring": "Converts GrrMessage into a set of RDFValues.\n\nArgs:\nmetadata: ExportedMetadata to be used for conversion.\ngrr_message: GrrMessage to be converted.\ntoken: Security token.\n\nReturns:\nList or generator with resulting RDFValues.", "source": "codesearchnet"}
{"code": "def ParseRow(self, parser_mediator, row_offset, row):\n    \n    timestamp = self._ParseTimestamp(parser_mediator, row)\n    if timestamp is None:\n      return\n\n    event_data = TrendMicroUrlEventData()\n    event_data.offset = row_offset\n\n    \n    for field in (\n        'credibility_rating', 'credibility_score', 'policy_identifier',\n        'threshold', 'block_mode'):\n      try:\n        value = int(row[field], 10)\n      except (ValueError, TypeError):\n        value = None\n      setattr(event_data, field, value)\n\n    \n    for field in ('url', 'group_name', 'group_code', 'application_name', 'ip'):\n      setattr(event_data, field, row[field])\n\n    event = time_events.DateTimeValuesEvent(\n        timestamp, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a line of the log file and produces events.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrow_offset (int): line number of the row.\nrow (dict[str, str]): fields of a single row, as specified in COLUMNS.", "source": "juraj-google-style"}
{"code": "def console_set_char_foreground(\n    con: tcod.console.Console, x: int, y: int, col: Tuple[int, int, int]\n) -> None:\n    \n    lib.TCOD_console_set_char_foreground(_console(con), x, y, col)", "docstring": "Change the foreground color of x,y to col.\n\nArgs:\ncon (Console): Any Console instance.\nx (int): Character x position from the left.\ny (int): Character y position from the top.\ncol (Union[Tuple[int, int, int], Sequence[int]]):\nAn (r, g, b) sequence or Color instance.\n\n.. deprecated:: 8.4\nArray access performs significantly faster than using this function.\nSee :any:`Console.fg`.", "source": "juraj-google-style"}
{"code": "def is_function_or_method(obj):\n    return (inspect.isfunction(obj) or inspect.ismethod(obj) or is_cython(obj))", "docstring": "Check if an object is a function or method.\n\nArgs:\nobj: The Python object in question.\n\nReturns:\nTrue if the object is an function or method.", "source": "codesearchnet"}
{"code": "def position(x=None, y=None):\n    (posx, posy) = platformModule._position()\n    posx = int(posx)\n    posy = int(posy)\n    if (x is not None):\n        posx = int(x)\n    if (y is not None):\n        posy = int(y)\n    return Point(posx, posy)", "docstring": "Returns the current xy coordinates of the mouse cursor as a two-integer\ntuple.\n\nArgs:\nx (int, None, optional) - If not None, this argument overrides the x in\nthe return value.\ny (int, None, optional) - If not None, this argument overrides the y in\nthe return value.\n\nReturns:\n(x, y) tuple of the current xy coordinates of the mouse cursor.", "source": "codesearchnet"}
{"code": "def delete(self, request):\n    try:\n        self.client.delete_object(Bucket=request.bucket, Key=request.object)\n    except Exception as e:\n        raise messages.S3ClientError(str(e), get_http_error_code(e))", "docstring": "Deletes given object from bucket\nArgs:\nrequest: (DeleteRequest) input message\nReturns:\n(void) Void, otherwise will raise if an error occurs", "source": "github-repos"}
{"code": "def _ExtractMetadataFromFileEntry(self, mediator, file_entry, data_stream):\n    if (file_entry.IsRoot() and (file_entry.type_indicator not in self._TYPES_WITH_ROOT_METADATA)):\n        return\n    if (data_stream and (not data_stream.IsDefault())):\n        return\n    display_name = mediator.GetDisplayName()\n    logger.debug('[ExtractMetadataFromFileEntry] processing file entry: {0:s}'.format(display_name))\n    self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING\n    if self._processing_profiler:\n        self._processing_profiler.StartTiming('extracting')\n    self._event_extractor.ParseFileEntryMetadata(mediator, file_entry)\n    if self._processing_profiler:\n        self._processing_profiler.StopTiming('extracting')\n    self.processing_status = definitions.STATUS_INDICATOR_RUNNING", "docstring": "Extracts metadata from a file entry.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\nfile_entry (dfvfs.FileEntry): file entry to extract metadata from.\ndata_stream (dfvfs.DataStream): data stream or None if the file entry\nhas no data stream.", "source": "codesearchnet"}
{"code": "def _CreateLineString(self, parent, coordinate_list):\n    if (not coordinate_list):\n        return None\n    linestring = ET.SubElement(parent, 'LineString')\n    tessellate = ET.SubElement(linestring, 'tessellate')\n    tessellate.text = '1'\n    if (len(coordinate_list[0]) == 3):\n        altitude_mode = ET.SubElement(linestring, 'altitudeMode')\n        altitude_mode.text = 'absolute'\n    coordinates = ET.SubElement(linestring, 'coordinates')\n    if (len(coordinate_list[0]) == 3):\n        coordinate_str_list = [('%f,%f,%f' % t) for t in coordinate_list]\n    else:\n        coordinate_str_list = [('%f,%f' % t) for t in coordinate_list]\n    coordinates.text = ' '.join(coordinate_str_list)\n    return linestring", "docstring": "Create a KML LineString element.\n\nThe points of the string are given in coordinate_list. Every element of\ncoordinate_list should be one of a tuple (longitude, latitude) or a tuple\n(longitude, latitude, altitude).\n\nArgs:\nparent: The parent ElementTree.Element instance.\ncoordinate_list: The list of coordinates.\n\nReturns:\nThe LineString ElementTree.Element instance or None if coordinate_list is\nempty.", "source": "codesearchnet"}
{"code": "def repertoire(self, direction, mechanism, purview):\n        \n        system = self.system[direction]\n        node_labels = system.node_labels\n\n        if not set(purview).issubset(self.purview_indices(direction)):\n            raise ValueError('{} is not a {} purview in {}'.format(\n                fmt.fmt_mechanism(purview, node_labels), direction, self))\n\n        if not set(mechanism).issubset(self.mechanism_indices(direction)):\n            raise ValueError('{} is no a {} mechanism in {}'.format(\n                fmt.fmt_mechanism(mechanism, node_labels), direction, self))\n\n        return system.repertoire(direction, mechanism, purview)", "docstring": "Return the cause or effect repertoire function based on a direction.\n\nArgs:\ndirection (str): The temporal direction, specifiying the cause or\neffect repertoire.", "source": "juraj-google-style"}
{"code": "def _ScanFileSystemForWindowsDirectory(self, path_resolver):\n    \n    result = False\n    for windows_path in self._WINDOWS_DIRECTORIES:\n      windows_path_spec = path_resolver.ResolvePath(windows_path)\n\n      result = windows_path_spec is not None\n      if result:\n        self._windows_directory = windows_path\n        break\n\n    return result", "docstring": "Scans a file system for a known Windows directory.\n\nArgs:\npath_resolver (WindowsPathResolver): Windows path resolver.\n\nReturns:\nbool: True if a known Windows directory was found.", "source": "juraj-google-style"}
{"code": "def _overrides(subcls, supercls, attr):\n    if subcls and supercls and (supercls in subcls.mro):\n        subcls = _base(subcls)\n        supercls = _base(supercls)\n        for cls in subcls.mro:\n            if cls == supercls:\n                break\n            if isinstance(cls, mixin.LazyMembers):\n                cls.load_lazy_attribute(attr)\n            if isinstance(cls, abstract.SimpleValue) and attr in cls.members and cls.members[attr].bindings:\n                return True\n    return False", "docstring": "Check whether subcls_var overrides or newly defines the given attribute.\n\nArgs:\nsubcls: A potential subclass.\nsupercls: A potential superclass.\nattr: An attribute name.\n\nReturns:\nTrue if subcls_var is a subclass of supercls_var and overrides or newly\ndefines the attribute. False otherwise.", "source": "github-repos"}
{"code": "def delete_additional_charge(self, recurring_billing_id):\n        \n        fmt = 'recurringBillItems/{}'.format(recurring_billing_id)\n        return self.client._delete(self.url + fmt, headers=self.get_headers())", "docstring": "Remove an extra charge from an invoice.\n\nArgs:\nrecurring_billing_id: Identifier of the additional charge.\n\nReturns:", "source": "juraj-google-style"}
{"code": "def FillDeviceCapabilities(device, descriptor):\n  \n  preparsed_data = PHIDP_PREPARSED_DATA(0)\n  ret = hid.HidD_GetPreparsedData(device, ctypes.byref(preparsed_data))\n  if not ret:\n    raise ctypes.WinError()\n\n  try:\n    caps = HidCapabilities()\n    ret = hid.HidP_GetCaps(preparsed_data, ctypes.byref(caps))\n\n    if ret != HIDP_STATUS_SUCCESS:\n      raise ctypes.WinError()\n\n    descriptor.usage = caps.Usage\n    descriptor.usage_page = caps.UsagePage\n    descriptor.internal_max_in_report_len = caps.InputReportByteLength\n    descriptor.internal_max_out_report_len = caps.OutputReportByteLength\n\n  finally:\n    hid.HidD_FreePreparsedData(preparsed_data)", "docstring": "Fill out device capabilities.\n\nFills the HidCapabilitites of the device into descriptor.\n\nArgs:\ndevice: A handle to the open device\ndescriptor: DeviceDescriptor to populate with the\ncapabilities\n\nReturns:\nnone\n\nRaises:\nWindowsError when unable to obtain capabilitites.", "source": "juraj-google-style"}
{"code": "def max_pooling1d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None):\n    warnings.warn('`tf.layers.max_pooling1d` is deprecated and will be removed in a future version. Please use `tf.keras.layers.MaxPooling1D` instead.')\n    layer = MaxPooling1D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name)\n    return layer.apply(inputs)", "docstring": "Max Pooling layer for 1D inputs.\n\nArgs:\ninputs: The tensor over which to pool. Must have rank 3.\npool_size: An integer or tuple/list of a single integer,\nrepresenting the size of the pooling window.\nstrides: An integer or tuple/list of a single integer, specifying the\nstrides of the pooling operation.\npadding: A string. The padding method, either 'valid' or 'same'.\nCase-insensitive.\ndata_format: A string, one of `channels_last` (default) or `channels_first`.\nThe ordering of the dimensions in the inputs.\n`channels_last` corresponds to inputs with shape\n`(batch, length, channels)` while `channels_first` corresponds to\ninputs with shape `(batch, channels, length)`.\nname: A string, the name of the layer.\n\nReturns:\nThe output tensor, of rank 3.\n\nRaises:\nValueError: if eager execution is enabled.", "source": "github-repos"}
{"code": "def estCumPos(pos, chrom, offset=20000000):\n    chromvals = SP.unique(chrom)\n    chrom_pos = SP.zeros_like(chromvals)\n    cum_pos = SP.zeros_like(pos)\n    maxpos_cum = 0\n    for (i, mychrom) in enumerate(chromvals):\n        chrom_pos[i] = maxpos_cum\n        i_chr = (chrom == mychrom)\n        maxpos = (pos[i_chr].max() + offset)\n        maxpos_cum += maxpos\n        cum_pos[i_chr] = (chrom_pos[i] + pos[i_chr])\n    return (cum_pos, chrom_pos)", "docstring": "compute the cumulative position of each variant given the position and the chromosome\nAlso return the starting cumulativeposition of each chromosome\n\nArgs:\npos:        scipy.array of basepair positions (on the chromosome)\nchrom:      scipy.array of chromosomes\noffset:     offset between chromosomes for cumulative position (default 20000000 bp)\n\nReturns:\ncum_pos:    scipy.array of cumulative positions\nchrom_pos:  scipy.array of starting cumulative positions for each chromosme", "source": "codesearchnet"}
{"code": "def compute_ld(cur_geno, other_genotypes, r2=False):\n    \n    \n    norm_cur = normalize_genotypes(cur_geno)\n\n    \n    norm_others = np.stack(\n        tuple(normalize_genotypes(g) for g in other_genotypes),\n        axis=1,\n    )\n\n    \n    assert norm_cur.shape[0] == norm_others.shape[0]\n\n    \n    n = (\n        ~np.isnan(norm_cur.reshape(norm_cur.shape[0], 1)) *\n        ~np.isnan(norm_others)\n    ).sum(axis=0)\n\n    \n    r = pd.Series(\n        np.dot(\n            np.nan_to_num(norm_cur), np.nan_to_num(norm_others) / n\n        ),\n        index=[g.variant.name for g in other_genotypes],\n        name=\"r2\" if r2 else \"r\",\n    )\n\n    \n    r.loc[r > 1] = 1\n    r.loc[r < -1] = -1\n\n    if r2:\n        return r ** 2\n    else:\n        return r", "docstring": "Compute LD between a marker and a list of markers.\n\nArgs:\ncur_geno (Genotypes): The genotypes of the marker.\nother_genotypes (list): A list of genotypes.\n\nReturns:\nnumpy.array: An array containing the r or r**2 values between cur_geno\nand other_genotypes.\n\nNote:\nThe genotypes will automatically be normalized using (x - mean) / std.", "source": "juraj-google-style"}
{"code": "def represent_as_string(iterable):\n    \n    keep = (\".\", \"[\", \"]\")\n    return \"\".join(tuple(int_to_str_digit(i) if i not in keep\n                   else i for i in iterable))", "docstring": "Represent a number in the form of a string.\n(8, 6, 8, '.', 0, 15) -> \"868.0F\"\n\nArgs:\niterable - Number represented as an iterable container of digits.\nReturns:\nNumber represented as a string of digits.\n\n>>> represent_as_string((8, 6, 8, '.', 0, 15))\n'868.0F'", "source": "juraj-google-style"}
{"code": "def load(self, context):\n    if (not ((context.flags.debugger_data_server_grpc_port > 0) or (context.flags.debugger_port > 0))):\n        return None\n    flags = context.flags\n    try:\n        import tensorflow\n    except ImportError:\n        raise ImportError('To use the debugger plugin, you need to have TensorFlow installed:\\n  pip install tensorflow')\n    try:\n        from tensorboard.plugins.debugger import debugger_plugin as debugger_plugin_lib\n        from tensorboard.plugins.debugger import interactive_debugger_plugin as interactive_debugger_plugin_lib\n    except ImportError as e:\n        (e_type, e_value, e_traceback) = sys.exc_info()\n        message = (e.msg if hasattr(e, 'msg') else e.message)\n        if ('grpc' in message):\n            e_value = ImportError((message + '\\n\\nTo use the debugger plugin, you need to have gRPC installed:\\n  pip install grpcio'))\n        six.reraise(e_type, e_value, e_traceback)\n    if (flags.debugger_port > 0):\n        interactive_plugin = interactive_debugger_plugin_lib.InteractiveDebuggerPlugin(context)\n        logger.info('Starting Interactive Debugger Plugin at gRPC port %d', flags.debugger_data_server_grpc_port)\n        interactive_plugin.listen(flags.debugger_port)\n        return interactive_plugin\n    elif (flags.debugger_data_server_grpc_port > 0):\n        noninteractive_plugin = debugger_plugin_lib.DebuggerPlugin(context)\n        logger.info('Starting Non-interactive Debugger Plugin at gRPC port %d', flags.debugger_data_server_grpc_port)\n        noninteractive_plugin.listen(flags.debugger_data_server_grpc_port)\n        return noninteractive_plugin\n    raise AssertionError()", "docstring": "Returns the debugger plugin, if possible.\n\nArgs:\ncontext: The TBContext flags including `add_arguments`.\n\nReturns:\nA DebuggerPlugin instance or None if it couldn't be loaded.", "source": "codesearchnet"}
{"code": "def create_binary_descriptor(streamer):\n    \n\n    trigger = 0\n    if streamer.automatic:\n        trigger = 1\n    elif streamer.with_other is not None:\n        trigger = (1 << 7) | streamer.with_other\n\n    return struct.pack(\"<8sHBBBx\", streamer.dest.encode(), streamer.selector.encode(), trigger, streamer.KnownFormats[streamer.format], streamer.KnownTypes[streamer.report_type])", "docstring": "Create a packed binary descriptor of a DataStreamer object.\n\nArgs:\nstreamer (DataStreamer): The streamer to create a packed descriptor for\n\nReturns:\nbytes: A packed 14-byte streamer descriptor.", "source": "juraj-google-style"}
{"code": "def _should_elide_opcode(op_items: list[tuple[int, Opcode]], i: int, python_version: tuple[int, int]):\n    op = op_items[i][1]\n    if python_version == (3, 11):\n        return isinstance(op, JUMP_BACKWARD) and i + 1 < len(op_items) and isinstance(op_items[i + 1][1], END_ASYNC_FOR)\n    return False", "docstring": "Returns `True` if the opcode on index `i` should be elided.\n\nOpcodes should be elided if they don't contribute to type checking and cause\nissues in the block graph.\n\nArgs:\nop_items: List of (offset, opcode) tuples.\ni: Index of opcode to check for elision.\npython_version: Python version tuple.", "source": "github-repos"}
{"code": "def get_group(self, name, user_name=None):\n    return self.service.get_group(name, user_name, self.url_prefix, self.auth, self.session, self.session_send_opts)", "docstring": "Get owner of group and the resources it's attached to.\n\nArgs:\nname (string): Name of group to query.\nuser_name (optional[string]): Supply None if not interested in determining if user is a member of the given group.\n\nReturns:\n(dict): Keys include 'owner', 'name', 'resources'.\n\nRaises:\nrequests.HTTPError on failure.", "source": "codesearchnet"}
{"code": "def identical_dataset_and_algorithm_tuner(self, additional_parents=None):\n    return self._create_warm_start_tuner(additional_parents=additional_parents, warm_start_type=WarmStartTypes.IDENTICAL_DATA_AND_ALGORITHM)", "docstring": "Creates a new ``HyperparameterTuner`` by copying the request fields from the provided parent to the new\ninstance of ``HyperparameterTuner``. Followed by addition of warm start configuration with the type as\n\"IdenticalDataAndAlgorithm\" and parents as the union of provided list of ``additional_parents`` and the ``self``\n\nArgs:\nadditional_parents (set{str}): Set of additional parents along with the self to be used in warm starting\nthe identical dataset and algorithm tuner.\n\nReturns:\nsagemaker.tuner.HyperparameterTuner: HyperparameterTuner instance which can be used to launch identical\ndataset and algorithm tuning job.\n\nExamples:\n>>> parent_tuner = HyperparameterTuner.attach(tuning_job_name=\"parent-job-1\")\n>>> identical_dataset_algo_tuner = parent_tuner.identical_dataset_and_algorithm_tuner(\n>>>                                                             additional_parents={\"parent-job-2\"})\nLater On:\n>>> identical_dataset_algo_tuner.fit(inputs={})", "source": "codesearchnet"}
{"code": "def logloss(y, p):\n    \n\n    p[p < EPS] = EPS\n    p[p > 1 - EPS] = 1 - EPS\n    return log_loss(y, p)", "docstring": "Bounded log loss error.\n\nArgs:\ny (numpy.array): target\np (numpy.array): prediction\n\nReturns:\nbounded log loss error", "source": "juraj-google-style"}
{"code": "def _get_augmented_label_matrix(self, L, higher_order=False):\n        \n        \n        \n        \n        self.c_data = {}\n        for i in range(self.m):\n            self.c_data[i] = {\n                \"start_index\": i * self.k,\n                \"end_index\": (i + 1) * self.k,\n                \"max_cliques\": set(\n                    [\n                        j\n                        for j in self.c_tree.nodes()\n                        if i in self.c_tree.node[j][\"members\"]\n                    ]\n                ),\n            }\n\n        L_ind = self._create_L_ind(L)\n\n        \n        \n        \n        if higher_order:\n            L_aug = np.copy(L_ind)\n            for item in chain(self.c_tree.nodes(), self.c_tree.edges()):\n                if isinstance(item, int):\n                    C = self.c_tree.node[item]\n                    C_type = \"node\"\n                elif isinstance(item, tuple):\n                    C = self.c_tree[item[0]][item[1]]\n                    C_type = \"edge\"\n                else:\n                    raise ValueError(item)\n                members = list(C[\"members\"])\n                nc = len(members)\n\n                \n                if nc == 1:\n                    C[\"start_index\"] = members[0] * self.k\n                    C[\"end_index\"] = (members[0] + 1) * self.k\n\n                \n                else:\n                    L_C = np.ones((self.n, self.k ** nc))\n                    for i, vals in enumerate(product(range(self.k), repeat=nc)):\n                        for j, v in enumerate(vals):\n                            L_C[:, i] *= L_ind[:, members[j] * self.k + v]\n\n                    \n                    if L_aug is not None:\n                        C[\"start_index\"] = L_aug.shape[1]\n                        C[\"end_index\"] = L_aug.shape[1] + L_C.shape[1]\n                        L_aug = np.hstack([L_aug, L_C])\n                    else:\n                        C[\"start_index\"] = 0\n                        C[\"end_index\"] = L_C.shape[1]\n                        L_aug = L_C\n\n                    \n                    id = tuple(members) if len(members) > 1 else members[0]\n                    self.c_data[id] = {\n                        \"start_index\": C[\"start_index\"],\n                        \"end_index\": C[\"end_index\"],\n                        \"max_cliques\": set([item]) if C_type == \"node\" else set(item),\n                    }\n            return L_aug\n        else:\n            return L_ind", "docstring": "Returns an augmented version of L where each column is an indicator\nfor whether a certain source or clique of sources voted in a certain\npattern.\n\nArgs:\nL: An [n,m] scipy.sparse label matrix with values in {0,1,...,k}", "source": "juraj-google-style"}
{"code": "def auto_model(layout, scan_length=None, one_vs_rest=False):\n    base_name = split(layout.root)[(- 1)]\n    tasks = layout.entities['task'].unique()\n    task_models = []\n    for task_name in tasks:\n        model = OrderedDict()\n        model['Name'] = '_'.join([base_name, task_name])\n        model['Description'] = ('Autogenerated model for the %s task from %s' % (task_name, base_name))\n        model['Input'] = {'Task': task_name}\n        steps = []\n        transformations = OrderedDict(Name='Factor', Input=['trial_type'])\n        run = OrderedDict(Level='Run', Name='Run', Transformations=[transformations])\n        run_nodes = load_variables(layout, task=task_name, levels=['run'], scan_length=scan_length)\n        evs = []\n        for n in run_nodes.nodes:\n            evs.extend(n.variables['trial_type'].values.values)\n        trial_types = np.unique(evs)\n        trial_type_factors = [('trial_type.' + tt) for tt in trial_types]\n        run['Transformations'].append(OrderedDict(Name='Convolve', Input=trial_type_factors))\n        run_model = OrderedDict(X=trial_type_factors)\n        run['Model'] = run_model\n        if one_vs_rest:\n            contrasts = []\n            for (i, tt) in enumerate(trial_types):\n                cdict = OrderedDict()\n                if (len(trial_types) > 1):\n                    cdict['Name'] = (('run_' + tt) + '_vs_others')\n                else:\n                    cdict['Name'] = ('run_' + tt)\n                cdict['ConditionList'] = trial_type_factors\n                weights = np.ones(len(trial_types))\n                try:\n                    weights[(trial_types != tt)] = ((- 1.0) / (len(trial_types) - 1))\n                except ZeroDivisionError:\n                    pass\n                cdict['Weights'] = list(weights)\n                cdict['Type'] = 't'\n                contrasts.append(cdict)\n            run['Contrasts'] = contrasts\n        steps.append(run)\n        if one_vs_rest:\n            sessions = layout.get_sessions()\n            if (len(sessions) > 1):\n                contrast_names = [cc['Name'] for cc in steps[(- 1)]['Contrasts']]\n                steps.append(_make_passthrough_contrast('Session', contrast_names))\n            subjects = layout.get_subjects()\n            if (len(subjects) > 1):\n                contrast_names = [cc['Name'] for cc in steps[(- 1)]['Contrasts']]\n                steps.append(_make_passthrough_contrast('Subject', contrast_names))\n            contrast_names = [cc['Name'] for cc in steps[(- 1)]['Contrasts']]\n            steps.append(_make_passthrough_contrast('Dataset', contrast_names))\n        model['Steps'] = steps\n        task_models.append(model)\n    return task_models", "docstring": "Create a simple default model for each of the tasks in a BIDSLayout.\nContrasts each trial type against all other trial types and trial types\nat the run level and then uses t-tests at each other level present to\naggregate these results up.\n\nArgs:\nlayout (BIDSLayout) A BIDSLayout instance\nscan_length (Int) Scan length for loading event varibles in cases\nwhere the scan length can not be read from the nifti.\nPrimarily for testing.\none_vs_rest (Bool) Set to True if you would like to autogenerate\ncontrasts of each trial type against everyother trialtype.\n\nReturns:\nmodels (list) list of model dictionaries for each task", "source": "codesearchnet"}
{"code": "def remove(self, *dic):\n    dicList = list(flatten(dic))\n    for d in dicList:\n        di = []\n        for k in d:\n            di.append(Pair(k, IntegerSingle(d[k])))\n        dictSingle = DictSingle(di)\n        self._remove([dictSingle], self.l)", "docstring": "remove a calendar config.\n\nArgs:\n*dic (dict): dictionary with format {'Day': 12, 'Hour': 34} Avaliable keys are Month, Day, Weekday, Hour, Minute. *Note the uppercase.* You can use gen(), genMix() to generate complex config dictionary.", "source": "codesearchnet"}
{"code": "def lazy_property(fn):\n    attr_name = ('_lazy_' + fn.__name__)\n\n    @property\n    @wraps(fn)\n    def _lazy_property(self):\n        if (not hasattr(self, attr_name)):\n            setattr(self, attr_name, fn(self))\n        return getattr(self, attr_name)\n    return _lazy_property", "docstring": "Decorator that makes a property lazy-evaluated whilst preserving\ndocstrings.\n\nArgs:\nfn (function): the property in question\n\nReturns:\nevaluated version of the property.", "source": "codesearchnet"}
{"code": "def __init__(self, fields):\n        \n        self.fields = {\n            ensure_unicode_string(key): value\n            for key, value in six.iteritems(fields)\n        }\n\n        \n        \n        super(ConstructResult, self).__init__(self.fields)\n        self.validate()", "docstring": "Construct a ConstructResult object that maps the given field names to their expressions.\n\nArgs:\nfields: dict, variable name string -> Expression\nsee rules for variable names in validate_safe_string().\n\nReturns:\nnew ConstructResult object", "source": "juraj-google-style"}
{"code": "def make_fixed_temp_multi_apec(kTs, name_template='apec%d', norm=None):\n    total_model = None\n    sub_models = []\n    for (i, kT) in enumerate(kTs):\n        component = ui.xsapec((name_template % i))\n        component.kT = kT\n        ui.freeze(component.kT)\n        if (norm is not None):\n            component.norm = norm\n        sub_models.append(component)\n        if (total_model is None):\n            total_model = component\n        else:\n            total_model = (total_model + component)\n    return (total_model, sub_models)", "docstring": "Create a model summing multiple APEC components at fixed temperatures.\n\n*kTs*\nAn iterable of temperatures for the components, in keV.\n*name_template* = 'apec%d'\nA template to use for the names of each component; it is string-formatted\nwith the 0-based component number as an argument.\n*norm* = None\nAn initial normalization to be used for every component, or None to use\nthe Sherpa default.\nReturns:\nA tuple ``(total_model, sub_models)``, where *total_model* is a Sherpa\nmodel representing the sum of the APEC components and *sub_models* is\na list of the individual models.\n\nThis function creates a vector of APEC model components and sums them.\nTheir *kT* parameters are set and then frozen (using\n:func:`sherpa.astro.ui.freeze`), so that upon exit from this function, the\namplitude of each component is the only free parameter.", "source": "codesearchnet"}
{"code": "def total_seconds(td):\n    \n    a_milli = 1000000.0\n    td_ds = td.seconds + (td.days * 86400)  \n    td_micro = td.microseconds + (td_ds * a_milli)\n    return td_micro / a_milli", "docstring": "For those with older versions of Python, a pure-Python\nimplementation of Python 2.7's :meth:`~datetime.timedelta.total_seconds`.\n\nArgs:\ntd (datetime.timedelta): The timedelta to convert to seconds.\nReturns:\nfloat: total number of seconds\n\n>>> td = timedelta(days=4, seconds=33)\n>>> total_seconds(td)\n345633.0", "source": "juraj-google-style"}
{"code": "def is_workdir(cls, path):\n    try:\n        cls(path=path).load()\n    except MalformedWorkdir:\n        return False\n    return True", "docstring": "Check if the given path is a workdir\n\nArgs:\npath(str): Path to check\n\nReturn:\nbool: True if the given path is a workdir", "source": "codesearchnet"}
{"code": "def logical_xor(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return LogicalXor().symbolic_call(x1, x2)\n    return backend.numpy.logical_xor(x1, x2)", "docstring": "Compute the truth value of `x1 XOR x2`, element-wise.\n\nArgs:\nx1: First input tensor.\nx2: Second input tensor.\n\nReturns:\nOutput boolean tensor.", "source": "github-repos"}
{"code": "def ParseApplicationUsageRow(\n      self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    \n    \n    application_name = self._GetRowValue(query_hash, row, 'event')\n    usage = 'Application {0:s}'.format(application_name)\n\n    event_data = MacOSApplicationUsageEventData()\n    event_data.application = self._GetRowValue(query_hash, row, 'app_path')\n    event_data.app_version = self._GetRowValue(query_hash, row, 'app_version')\n    event_data.bundle_id = self._GetRowValue(query_hash, row, 'bundle_id')\n    event_data.count = self._GetRowValue(query_hash, row, 'number_times')\n    event_data.query = query\n\n    timestamp = self._GetRowValue(query_hash, row, 'last_time')\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, usage)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an application usage row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def register_entry(self, navbar_kwargs):\n        \n        \n        path = navbar_kwargs.pop('path')\n        \n        \n        if not hasattr(path, '__iter__') or isinstance(path, basestring):\n            path = [path]\n\n        entry_group = self.navbar_entries\n        \n        \n        \n        \n        for name, is_last in iter_islast(path):\n            kwargs = deepcopy(navbar_kwargs)\n            kwargs['name'] = name\n            for existing_entry in entry_group:\n                \n                \n                \n                \n                \n                \n                if existing_entry.name == name:\n                    entry = existing_entry\n                    if is_last:\n                        entry.endpoint = kwargs['endpoint']\n                    break\n            else:\n                \n                \n                \n                \n                if not is_last:\n                    kwargs['endpoint'] = None\n                entry = NavbarEntry(**kwargs)\n                entry_group.add(entry)\n            entry_group = entry.children", "docstring": "Register a navbar entry with the copilot.\n\nArgs:\nnavbar_kwargs (dict): Arguments passed to the\n:class:`NavbarEntry` instance.", "source": "juraj-google-style"}
{"code": "def AddContract(self, contract):\n    super(UserWallet, self).AddContract(contract)\n    try:\n        db_contract = Contract.get(ScriptHash=contract.ScriptHash.ToBytes())\n        db_contract.delete_instance()\n    except Exception as e:\n        logger.debug('contract does not exist yet')\n    sh = bytes(contract.ScriptHash.ToArray())\n    (address, created) = Address.get_or_create(ScriptHash=sh)\n    address.IsWatchOnly = False\n    address.save()\n    db_contract = Contract.create(RawData=contract.ToArray(), ScriptHash=contract.ScriptHash.ToBytes(), PublicKeyHash=contract.PublicKeyHash.ToBytes(), Address=address, Account=self.__dbaccount)\n    logger.debug(('Creating db contract %s ' % db_contract))\n    db_contract.save()", "docstring": "Add a contract to the database.\n\nArgs:\ncontract(neo.SmartContract.Contract): a Contract instance.", "source": "codesearchnet"}
{"code": "def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, current_tokens: torch.LongTensor, beam_group_idx: int) -> torch.FloatTensor:\n    batch_size = current_tokens.shape[0] \n    group_start_idx = beam_group_idx * self._num_sub_beams\n    group_end_idx = min(group_start_idx + self._num_sub_beams, self._num_beams)\n    group_size = group_end_idx - group_start_idx\n    vocab_size = scores.shape[-1]\n    if group_start_idx == 0:\n        return scores\n    scores_processed = scores.clone()\n    for batch_idx in range(batch_size):\n        previous_group_tokens = current_tokens[batch_idx * self._num_beams:batch_idx * self._num_beams + group_start_idx]\n        token_frequency = torch.bincount(previous_group_tokens, minlength=vocab_size).to(scores.device)\n        scores_processed[batch_idx * group_size:(batch_idx + 1) * group_size] -= self._diversity_penalty * token_frequency\n    return scores_processed", "docstring": "Args:\ninput_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\nIndices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)\nscores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\nPrediction scores of a language modeling head. These can be logits for each vocabulary when not using\nbeam search or log softmax for each vocabulary token when using beam search\ncurrent_tokens (`torch.LongTensor` of shape `(batch_size)`):\nIndices of input sequence tokens in the vocabulary, corresponding to the tokens selected by the other\nbeam groups in the current generation step.\nbeam_group_idx (`int`):\nThe index of the beam group currently being processed.\n\nReturn:\n`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`:\nThe processed prediction scores.", "source": "github-repos"}
{"code": "def inspect_config(self, id):\n    url = self._url('/configs/{0}', id)\n    return self._result(self._get(url), True)", "docstring": "Retrieve config metadata\n\nArgs:\nid (string): Full ID of the config to inspect\n\nReturns (dict): A dictionary of metadata\n\nRaises:\n:py:class:`docker.errors.NotFound`\nif no config with that ID exists", "source": "codesearchnet"}
{"code": "def assert_no_garbage_created(f: _F) -> _F:\n\n    def decorator(self: 'TensorFlowTestCase', **kwargs):\n        \n        gc.disable()\n        previous_debug_flags = gc.get_debug()\n        gc.set_debug(gc.DEBUG_UNCOLLECTABLE)\n        gc.collect()\n        previous_garbage = len(gc.garbage)\n        result = f(self, **kwargs)\n        gc.collect()\n        new_garbage = len(gc.garbage)\n        if new_garbage > previous_garbage:\n            for i, obj in enumerate(gc.garbage[previous_garbage:]):\n                if getattr(obj, '__module__', '') == 'ast':\n                    new_garbage -= 3\n        if new_garbage > previous_garbage:\n            logging.error(\"The decorated test created work for Python's garbage collector, likely due to a reference cycle. New objects in cycle(s):\")\n            for i, obj in enumerate(gc.garbage[previous_garbage:]):\n                try:\n                    logging.error('Object %d of %d', i, len(gc.garbage) - previous_garbage)\n\n                    def _safe_object_str(obj) -> str:\n                        return '<%s %d>' % (obj.__class__.__name__, id(obj))\n                    logging.error('  Object type: %s', _safe_object_str(obj))\n                    logging.error('  Referrer types: %s', ', '.join([_safe_object_str(ref) for ref in gc.get_referrers(obj)]))\n                    logging.error('  Referent types: %s', ', '.join([_safe_object_str(ref) for ref in gc.get_referents(obj)]))\n                    logging.error('  Object attribute names: %s', dir(obj))\n                    logging.error('  Object __str__:')\n                    logging.error(obj)\n                    logging.error('  Object __repr__:')\n                    logging.error(repr(obj))\n                except Exception:\n                    logging.error('(Exception while printing object)')\n        if new_garbage > previous_garbage:\n            for i in range(previous_garbage, new_garbage):\n                if _find_reference_cycle(gc.garbage, i):\n                    break\n        self.assertEqual(previous_garbage, new_garbage)\n        gc.set_debug(previous_debug_flags)\n        gc.enable()\n        return result\n    return decorator", "docstring": "Test method decorator to assert that no garbage has been created.\n\nNote that this decorator sets DEBUG_SAVEALL, which in some Python interpreters\ncannot be un-set (i.e. will disable garbage collection for any other unit\ntests in the same file/shard).\n\nArgs:\nf: The function to decorate.\n\nReturns:\nThe decorated function.", "source": "github-repos"}
{"code": "def _get_full_name(self, node):\n    curr = node\n    items = []\n    while not isinstance(curr, ast.Name):\n        if not isinstance(curr, ast.Attribute):\n            return None\n        items.append(curr.attr)\n        curr = curr.value\n    items.append(curr.id)\n    return '.'.join(reversed(items))", "docstring": "Traverse an Attribute node to generate a full name, e.g., \"tf.foo.bar\".\n\nThis is the inverse of `full_name_node`.\n\nArgs:\nnode: A Node of type Attribute.\n\nReturns:\na '.'-delimited full-name or None if node was not Attribute or Name.\ni.e. `foo()+b).bar` returns None, while `a.b.c` would return \"a.b.c\".", "source": "github-repos"}
{"code": "def domain_tag(self, domains):\n    api_name = 'opendns-domain_tag'\n    fmt_url_path = u'domains/{0}/latest_tags'\n    return self._multi_get(api_name, fmt_url_path, domains)", "docstring": "Get the data range when a domain is part of OpenDNS block list.\n\nArgs:\ndomains: an enumerable of strings domain names\nReturns:\nAn enumerable of string with period, category, and url", "source": "codesearchnet"}
{"code": "def _TravelTimes(self,triplist,index=0):\n    \n\n    def DistanceInTravelTime(dep_secs, arr_secs):\n      t_dist = arr_secs-dep_secs\n      if t_dist<0:\n        t_dist = self._DUMMY_SEPARATOR \n      return t_dist\n\n    if not triplist:\n      return []\n\n    if 0 < index < len(triplist):\n      trip = triplist[index]\n    else:\n      trip = triplist[0]\n\n    t_dists2 = [DistanceInTravelTime(stop[3],tail[2]) for (stop,tail)\n                 in itertools.izip(trip.GetTimeStops(),trip.GetTimeStops()[1:])]\n    return t_dists2", "docstring": "Calculate distances and plot stops.\n\nUses a timetable to approximate distances\nbetween stations\n\nArgs:\n# Class Trip is defined in transitfeed.py\ntriplist: [Trip, Trip, ...]\n# (Optional) Index of Triplist prefered for timetable Calculation\nindex: 3\n\nReturns:\n# One integer for each pair of stations\n# indicating the approximate distance\n[0,33,140, ... ,X]", "source": "juraj-google-style"}
{"code": "def get_function_name(function: Any) -> str:\n    return str(function.__qualname__.split('.')[0])", "docstring": "Get the original name of a function, removing module paths.\n\nArgs:\n* function: function instance\n\nReturns:\n* Actual name of function", "source": "github-repos"}
{"code": "def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n    known_args, pipeline_args = parse_known_args(argv)\n    pipeline_options = PipelineOptions(pipeline_args)\n    pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n    model_loader = KeyedModelHandler(TFModelHandlerNumpy(model_uri=known_args.model_path, model_type=ModelType.SAVED_WEIGHTS, create_model_fn=get_model))\n    pipeline = test_pipeline\n    if not test_pipeline:\n        pipeline = beam.Pipeline(options=pipeline_options)\n    label_pixel_tuple = pipeline | 'ReadFromInput' >> beam.io.ReadFromText(known_args.input) | 'PreProcessInputs' >> beam.Map(process_input)\n    predictions = label_pixel_tuple | 'RunInference' >> RunInference(model_loader) | 'PostProcessOutputs' >> beam.ParDo(PostProcessor())\n    _ = predictions | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)\n    result = pipeline.run()\n    result.wait_until_finish()\n    return result", "docstring": "Args:\nargv: Command line arguments defined for this example.\nsave_main_session: Used for internal testing.\ntest_pipeline: Used for internal testing.", "source": "github-repos"}
{"code": "def get_numeric_value(event_tags, logger=None):\n    logger_message_debug = None\n    numeric_metric_value = None\n    if (event_tags is None):\n        logger_message_debug = 'Event tags is undefined.'\n    elif (not isinstance(event_tags, dict)):\n        logger_message_debug = 'Event tags is not a dictionary.'\n    elif (NUMERIC_METRIC_TYPE not in event_tags):\n        logger_message_debug = 'The numeric metric key is not in event tags.'\n    else:\n        numeric_metric_value = event_tags[NUMERIC_METRIC_TYPE]\n        try:\n            if isinstance(numeric_metric_value, (numbers.Integral, float, str)):\n                cast_numeric_metric_value = float(numeric_metric_value)\n                if ((not isinstance(cast_numeric_metric_value, float)) or math.isnan(cast_numeric_metric_value) or math.isinf(cast_numeric_metric_value)):\n                    logger_message_debug = 'Provided numeric value {} is in an invalid format.'.format(numeric_metric_value)\n                    numeric_metric_value = None\n                elif isinstance(numeric_metric_value, bool):\n                    logger_message_debug = 'Provided numeric value is a boolean, which is an invalid format.'\n                    numeric_metric_value = None\n                else:\n                    numeric_metric_value = cast_numeric_metric_value\n            else:\n                logger_message_debug = 'Numeric metric value is not in integer, float, or string form.'\n                numeric_metric_value = None\n        except ValueError:\n            logger_message_debug = 'Value error while casting numeric metric value to a float.'\n            numeric_metric_value = None\n    if (logger and logger_message_debug):\n        logger.log(enums.LogLevels.DEBUG, logger_message_debug)\n    if (numeric_metric_value is not None):\n        if logger:\n            logger.log(enums.LogLevels.INFO, 'The numeric metric value {} will be sent to results.'.format(numeric_metric_value))\n    elif logger:\n        logger.log(enums.LogLevels.WARNING, 'The provided numeric metric value {} is in an invalid format and will not be sent to results.'.format(numeric_metric_value))\n    return numeric_metric_value", "docstring": "A smart getter of the numeric value from the event tags.\n\nArgs:\nevent_tags: A dictionary of event tags.\nlogger: Optional logger.\n\nReturns:\nA float numeric metric value is returned when the provided numeric\nmetric value is in the following format:\n- A string (properly formatted, e.g., no commas)\n- An integer\n- A float or double\nNone is returned when the provided numeric metric values is in\nthe following format:\n- None\n- A boolean\n- inf, -inf, nan\n- A string not properly formatted (e.g., '1,234')\n- Any values that cannot be cast to a float (e.g., an array or dictionary)", "source": "codesearchnet"}
{"code": "def speech_speaker(self):\n    if self.speaker:\n        return self.speaker\n    elif self.parent:\n        return self.parent.speech_speaker()\n    else:\n        return None", "docstring": "Retrieves the speaker of the audio or video file associated with the element.\n\nThe source is inherited from ancestor elements if none is specified. For this reason, always use this method rather than access the ``src`` attribute directly.\n\nReturns:\nstr or None if not found", "source": "codesearchnet"}
{"code": "def hdg60(msg):\n    d = hex2bin(data(msg))\n    if (d[0] == '0'):\n        return None\n    sign = int(d[1])\n    value = bin2int(d[2:12])\n    if sign:\n        value = (value - 1024)\n    hdg = ((value * 90) / 512.0)\n    if (hdg < 0):\n        hdg = (360 + hdg)\n    return round(hdg, 3)", "docstring": "Megnetic heading of aircraft\n\nArgs:\nmsg (String): 28 bytes hexadecimal message (BDS60) string\n\nReturns:\nfloat: heading in degrees to megnetic north (from 0 to 360)", "source": "codesearchnet"}
{"code": "def load(filename):\n    if (not os.path.exists(filename)):\n        LOG.error(\"load object - File '%s' does not exist.\", filename)\n        return None\n    obj = None\n    with open(filename, 'rb') as obj_file:\n        obj = dill.load(obj_file)\n    return obj", "docstring": "Load a pickled obj from the filesystem.\n\nYou better know what you expect from the given pickle, because we don't check it.\n\nArgs:\nfilename (str): The filename we load the object from.\n\nReturns:\nThe object we were able to unpickle, else None.", "source": "codesearchnet"}
{"code": "def mount_share(share_path):\n    \n    sh_url = CFURLCreateWithString(None, share_path, None)\n    \n    open_options = {NetFS.kNAUIOptionKey: NetFS.kNAUIOptionNoUI}\n    \n    mount_options = {NetFS.kNetFSAllowSubMountsKey: True}\n    \n    result, output = NetFS.NetFSMountURLSync(sh_url, None, None, None,\n                                             open_options, mount_options, None)\n    \n    if result != 0:\n        raise Exception('Error mounting url \"%s\": %s' % (share_path, output))\n    \n    return str(output[0])", "docstring": "Mounts a share at /Volumes\n\nArgs:\nshare_path: String URL with all auth info to connect to file share.\n\nReturns:\nThe mount point or raises an error.", "source": "juraj-google-style"}
{"code": "def atmospheric_station_pressure(self, value=999999):\n    if (value is not None):\n        try:\n            value = int(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type int for field `atmospheric_station_pressure`'.format(value))\n        if (value <= 31000):\n            raise ValueError('value need to be greater 31000 for field `atmospheric_station_pressure`')\n        if (value >= 120000):\n            raise ValueError('value need to be smaller 120000 for field `atmospheric_station_pressure`')\n    self._atmospheric_station_pressure = value", "docstring": "Corresponds to IDD Field `atmospheric_station_pressure`\n\nArgs:\nvalue (int): value for IDD Field `atmospheric_station_pressure`\nUnit: Pa\nvalue > 31000\nvalue < 120000\nMissing value: 999999\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def _parse_cli_args(argv):\n    parser = argparse.ArgumentParser(description='Mobly Suite Executable.')\n    group = parser.add_mutually_exclusive_group(required=True)\n    group.add_argument('-c', '--config', type=str, metavar='<PATH>', help='Path to the test configuration file.')\n    group.add_argument('-l', '--list_tests', action='store_true', help='Print the names of the tests defined in a script without executing them.')\n    parser.add_argument('--tests', '--test_case', nargs='+', type=str, metavar='[ClassA[_test_suffix][.test_a] ClassB[_test_suffix][.test_b] ...]', help='A list of test classes and optional tests to execute. Note: test_suffix based names are only supported when running by suite class')\n    parser.add_argument('-tb', '--test_bed', nargs='+', type=str, metavar='[<TEST BED NAME1> <TEST BED NAME2> ...]', help='Specify which test beds to run tests on.')\n    parser.add_argument('-v', '--verbose', action='store_true', help='Set console logger level to DEBUG')\n    if not argv:\n        argv = sys.argv[1:]\n    return parser.parse_known_args(argv)[0]", "docstring": "Parses cli args that are consumed by Mobly.\n\nArgs:\nargv: A list that is then parsed as cli args. If None, defaults to cli\ninput.\n\nReturns:\nNamespace containing the parsed args.", "source": "github-repos"}
{"code": "def get_nonmonotonic_neurites(neuron, tol=1e-6):\n    \n    return [n for n in neuron.neurites if not is_monotonic(n, tol)]", "docstring": "Get neurites that are not monotonic\n\nArgs:\nneurite(Neurite): neurite to operate on\ntol(float): the tolerance or the ratio\n\nReturns:\nlist of neurites that do not satisfy monotonicity test", "source": "juraj-google-style"}
{"code": "def init_state(self, node):\n    raise NotImplementedError('Subclasses must implement this.')", "docstring": "State initialization function.\n\nOptional to overload.\n\nAn in/out state slot will be created for each node in the graph. Subclasses\nmust overload this to control what that is initialized to.\n\nArgs:\nnode: Node", "source": "github-repos"}
{"code": "def register_agent(self, host, sweep_id=None, project_name=None):\n    mutation = gql('\\n        mutation CreateAgent(\\n            $host: String!\\n            $projectName: String!,\\n            $entityName: String!,\\n            $sweep: String!\\n        ) {\\n            createAgent(input: {\\n                host: $host,\\n                projectName: $projectName,\\n                entityName: $entityName,\\n                sweep: $sweep,\\n            }) {\\n                agent {\\n                    id\\n                }\\n            }\\n        }\\n        ')\n    if (project_name is None):\n        project_name = self.settings('project')\n\n    def no_retry_400(e):\n        if (not isinstance(e, requests.HTTPError)):\n            return True\n        if (e.response.status_code != 400):\n            return True\n        body = json.loads(e.response.content)\n        raise UsageError(body['errors'][0]['message'])\n    response = self.gql(mutation, variable_values={'host': host, 'entityName': self.settings('entity'), 'projectName': project_name, 'sweep': sweep_id}, check_retry_fn=no_retry_400)\n    return response['createAgent']['agent']", "docstring": "Register a new agent\n\nArgs:\nhost (str): hostname\npersistent (bool): long running or oneoff\nsweep (str): sweep id\nproject_name: (str): model that contains sweep", "source": "codesearchnet"}
{"code": "def _make_tags_vector(self, tags, bucket_length=None) -> np.ndarray:\n    bucket_length = (bucket_length or len(tags))\n    answer = np.zeros(shape=(bucket_length,), dtype=np.int32)\n    for (i, tag) in enumerate(tags):\n        answer[i] = self.tags.tok2idx(tag)\n    return answer", "docstring": "Transforms a sentence of tags to Numpy array, which will be the network target.\n\nArgs:\ntags: input sentence of tags\nbucket_length: the width of the bucket\n\nReturns:\nA 2d array, answer[i][j] contains the index of j-th tag in i-th input sentence.", "source": "codesearchnet"}
{"code": "def byte_adaptor(fbuffer):\n    if six.PY3:\n        strings = fbuffer.read().decode('latin-1')\n        fbuffer = six.StringIO(strings)\n        return fbuffer\n    else:\n        return fbuffer", "docstring": "provides py3 compatibility by converting byte based\nfile stream to string based file stream\n\nArguments:\nfbuffer: file like objects containing bytes\n\nReturns:\nstring buffer", "source": "codesearchnet"}
{"code": "def accepts_scalar_input(func):\n\n    @ignores_exc_tb(outer_wrapper=False)\n    def wrp_asi(self, input_, *args, **kwargs):\n        if util_iter.isiterable(input_):\n            return func(self, input_, *args, **kwargs)\n        else:\n            ret = func(self, [input_], *args, **kwargs)\n            if (ret is not None):\n                return ret[0]\n    wrp_asi = preserve_sig(wrp_asi, func)\n    return wrp_asi", "docstring": "DEPRICATE in favor of accepts_scalar_input2\nonly accepts one input as vector\n\naccepts_scalar_input is a decorator which expects to be used on class\nmethods.  It lets the user pass either a vector or a scalar to a function,\nas long as the function treats everything like a vector. Input and output\nis sanitized to the user expected format on return.\n\nArgs:\nfunc (func):\n\nReturns:\nfunc: wrp_asi\n\nCommandLine:\npython -m utool.util_decor --test-accepts_scalar_input\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_decor import *  # NOQA\n>>> @accepts_scalar_input\n... def foobar(self, list_):\n...     return [x + 1 for x in list_]\n>>> self = None  # dummy self because this decorator is for classes\n>>> assert 2 == foobar(self, 1)\n>>> assert [2, 3] == foobar(self, [1, 2])", "source": "codesearchnet"}
{"code": "def ExamineEvent(self, mediator, event):\n    \n    if self._session_end_timestamp is None:\n      self._session_end_timestamp = (\n          event.timestamp + self._maximum_pause_microseconds)\n      self._events_per_session.append(0)\n\n    if event.timestamp > self._session_end_timestamp:\n      self._session_counter += 1\n      self._events_per_session.append(0)\n\n    self._session_end_timestamp = (\n        event.timestamp + self._maximum_pause_microseconds)\n    \n    \n    self._events_per_session[-1] += 1\n\n    label = 'session_{0:d}'.format(self._session_counter)\n    event_tag = self._CreateEventTag(event, self._EVENT_TAG_COMMENT, [label])\n    mediator.ProduceEventTag(event_tag)\n    self._number_of_event_tags += 1", "docstring": "Analyzes an EventObject and tags it as part of a session.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between analysis\nplugins and other components, such as storage and dfvfs.\nevent (EventObject): event to examine.", "source": "juraj-google-style"}
{"code": "def findSequenceOnDisk(cls, pattern, strictPadding=False):\n    seq = cls(pattern)\n    if ((seq.frameRange() == '') and (seq.padding() == '')):\n        if os.path.isfile(pattern):\n            return seq\n    patt = seq.format('{dirname}{basename}*{extension}')\n    ext = seq.extension()\n    basename = seq.basename()\n    pad = seq.padding()\n    globbed = iglob(patt)\n    if (pad and strictPadding):\n        globbed = cls._filterByPaddingNum(globbed, seq.zfill())\n        pad = cls.conformPadding(pad)\n    matches = cls.yield_sequences_in_list(globbed)\n    for match in matches:\n        if ((match.basename() == basename) and (match.extension() == ext)):\n            if (pad and strictPadding):\n                match.setPadding(pad)\n            return match\n    msg = 'no sequence found on disk matching {0}'\n    raise FileSeqException(msg.format(pattern))", "docstring": "Search for a specific sequence on disk.\n\nThe padding characters used in the `pattern` are used to filter the\nframe values of the files on disk (if `strictPadding` is True).\n\nExamples:\nFind sequence matching basename and extension, and a wildcard for\nany frame.\nreturns bar.1.exr bar.10.exr, bar.100.exr, bar.1000.exr, inclusive\n\n>>> findSequenceOnDisk(\"seq/bar@@@@.exr\")\n\nFind exactly 4-padded sequence, i.e. seq/bar1-100#.exr\nreturns only frames bar1000.exr through bar9999.exr\n\n>>> findSequenceOnDisk(\"seq/bar#.exr\", strictPadding=True)\n\nArgs:\npattern (str): the sequence pattern being searched for\nstrictPadding (bool): if True, ignore files with padding length different from `pattern`\n\nReturns:\nstr:\n\nRaises:\n:class:`.FileSeqException`: if no sequence is found on disk", "source": "codesearchnet"}
{"code": "def __init__(self,\n               queues=queues_config.WORKER_LIST,\n               threadpool_prefix=\"grr_threadpool\",\n               threadpool_size=None,\n               token=None):\n    \n    logging.info(\"started worker with queues: %s\", str(queues))\n    self.queues = queues\n\n    \n    \n    \n    self.queued_flows = utils.TimeBasedCache(max_size=10, max_age=60)\n\n    if token is None:\n      raise RuntimeError(\"A valid ACLToken is required.\")\n\n    if threadpool_size is None:\n      threadpool_size = config.CONFIG[\"Threadpool.size\"]\n\n    self.thread_pool = threadpool.ThreadPool.Factory(\n        threadpool_prefix, min_threads=2, max_threads=threadpool_size)\n\n    self.thread_pool.Start()\n\n    self.token = token\n    self.last_active = 0\n    self.last_mh_lease_attempt = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0)\n\n    \n    self.well_known_flows = flow.WellKnownFlow.GetAllWellKnownFlows(token=token)", "docstring": "Constructor.\n\nArgs:\nqueues: The queues we use to fetch new messages from.\nthreadpool_prefix: A name for the thread pool used by this worker.\nthreadpool_size: The number of workers to start in this thread pool.\ntoken: The token to use for the worker.\n\nRaises:\nRuntimeError: If the token is not provided.", "source": "juraj-google-style"}
{"code": "def resize_file(fobj, diff, BUFFER_SIZE=(2 ** 16)):\n    fobj.seek(0, 2)\n    filesize = fobj.tell()\n    if (diff < 0):\n        if ((filesize + diff) < 0):\n            raise ValueError\n        fobj.truncate((filesize + diff))\n    elif (diff > 0):\n        try:\n            while diff:\n                addsize = min(BUFFER_SIZE, diff)\n                fobj.write((b'\\x00' * addsize))\n                diff -= addsize\n            fobj.flush()\n        except IOError as e:\n            if (e.errno == errno.ENOSPC):\n                fobj.truncate(filesize)\n            raise", "docstring": "Resize a file by `diff`.\n\nNew space will be filled with zeros.\n\nArgs:\nfobj (fileobj)\ndiff (int): amount of size to change\nRaises:\nIOError", "source": "codesearchnet"}
{"code": "def func_str(func, args=[], kwargs={}, type_aliases=[], packed=False, packkw=None, truncate=False):\n    import utool as ut\n    truncatekw = {}\n    argrepr_list = ([] if (args is None) else ut.get_itemstr_list(args, nl=False, truncate=truncate, truncatekw=truncatekw))\n    kwrepr_list = ([] if (kwargs is None) else ut.dict_itemstr_list(kwargs, explicit=True, nl=False, truncate=truncate, truncatekw=truncatekw))\n    repr_list = (argrepr_list + kwrepr_list)\n    argskwargs_str = ', '.join(repr_list)\n    _str = ('%s(%s)' % (meta_util_six.get_funcname(func), argskwargs_str))\n    if packed:\n        packkw_ = dict(textwidth=80, nlprefix='    ', break_words=False)\n        if (packkw is not None):\n            packkw_.update(packkw_)\n        _str = packstr(_str, **packkw_)\n    return _str", "docstring": "string representation of function definition\n\nReturns:\nstr: a representation of func with args, kwargs, and type_aliases\n\nArgs:\nfunc (function):\nargs (list): argument values (default = [])\nkwargs (dict): kwargs values (default = {})\ntype_aliases (list): (default = [])\npacked (bool): (default = False)\npackkw (None): (default = None)\n\nReturns:\nstr: func_str\n\nCommandLine:\npython -m utool.util_str --exec-func_str\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_str import *  # NOQA\n>>> func = byte_str\n>>> args = [1024, 'MB']\n>>> kwargs = dict(precision=2)\n>>> type_aliases = []\n>>> packed = False\n>>> packkw = None\n>>> _str = func_str(func, args, kwargs, type_aliases, packed, packkw)\n>>> result = _str\n>>> print(result)\nbyte_str(1024, 'MB', precision=2)", "source": "codesearchnet"}
{"code": "def business_days_in_period(self, date_tensor, period_tensor):\n    pass", "docstring": "Calculates number of business days in a period.\n\nIncludes the dates in `date_tensor`, but excludes final dates resulting from\naddition of `period_tensor`.\n\nArgs:\ndate_tensor: DateTensor of starting dates.\nperiod_tensor: PeriodTensor, should be broadcastable to `date_tensor`.\n\nReturns:\nAn int32 Tensor with the number of business days in given periods that\nstart at given dates.", "source": "github-repos"}
{"code": "def consume(generator):\n    if hasattr(generator, '__next__'):\n        return list(generator)\n    if (not PY_35):\n        raise RuntimeError('paco: asynchronous iterator protocol not supported')\n    buf = []\n    while True:\n        try:\n            buf.append((yield from generator.__anext__()))\n        except StopAsyncIteration:\n            break\n    return buf", "docstring": "Helper function to consume a synchronous or asynchronous generator.\n\nArguments:\ngenerator (generator|asyncgenerator): generator to consume.\n\nReturns:\nlist", "source": "codesearchnet"}
{"code": "def _detect_encoding(data=None):\n    import locale\n    enc_list = ['utf-8', 'latin-1', 'iso8859-1', 'iso8859-2', 'utf-16', 'cp720']\n    code = locale.getpreferredencoding(False)\n    if (data is None):\n        return code\n    if (code.lower() not in enc_list):\n        enc_list.insert(0, code.lower())\n    for c in enc_list:\n        try:\n            for line in data:\n                line.decode(c)\n        except (UnicodeDecodeError, UnicodeError, AttributeError):\n            continue\n        return c\n    print('Encoding not detected. Please pass encoding value manually')", "docstring": "Return the default system encoding. If data is passed, try\nto decode the data with the default system encoding or from a short\nlist of encoding types to test.\n\nArgs:\ndata - list of lists\nReturns:\nenc - system encoding", "source": "codesearchnet"}
{"code": "def process_runway_configs(runway_dir=''):\n    LOG.info('Processing application.json files from local directory \"%s\".', runway_dir)\n    file_lookup = FileLookup(runway_dir=runway_dir)\n    app_configs = process_configs(file_lookup, 'application-master-{env}.json', 'pipeline.json')\n    return app_configs", "docstring": "Read the _application.json_ files.\n\nArgs:\nrunway_dir (str): Name of runway directory with app.json files.\n\nReturns:\ncollections.defaultdict: Configurations stored for each environment\nfound.", "source": "codesearchnet"}
{"code": "def has_service_by_name(self, name):\n    return name in self._service_objects", "docstring": "Checks if the manager has a service registered with a specific name.\n\nArgs:\nname: string, the name to look for.\n\nReturns:\nTrue if a service is registered with the specified name, False\notherwise.", "source": "github-repos"}
{"code": "def GetExecutionDetails(self, request, global_params=None):\n    config = self.GetMethodConfig('GetExecutionDetails')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Request detailed information about the execution status of a stage of the job. EXPERIMENTAL. This API is subject to change or removal without notice.\n\nArgs:\nrequest: (DataflowProjectsLocationsJobsStagesGetExecutionDetailsRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(StageExecutionDetails) The response message.", "source": "github-repos"}
{"code": "def __init__(self, count, string_length):\n    \n    self._count = count\n    self._string_length = string_length", "docstring": "Initialize input reader.\n\nArgs:\ncount: number of entries this shard should generate.\nstring_length: the length of generated random strings.", "source": "juraj-google-style"}
{"code": "def dummyctrl(self, r, ctrl):\n    dv = DummyVertex(r)\n    (dv.view.w, dv.view.h) = (self.dw, self.dh)\n    self.grx[dv] = dv\n    dv.ctrl = ctrl\n    ctrl[r] = dv\n    self.layers[r].append(dv)\n    return dv", "docstring": "creates a DummyVertex at rank r inserted in the ctrl dict\nof the associated edge and layer.\n\nArguments:\nr (int): rank value\nctrl (dict): the edge's control vertices\n\nReturns:\nDummyVertex : the created DummyVertex.", "source": "codesearchnet"}
{"code": "def UpdateNumberOfEventReports(\n      self, number_of_consumed_reports, number_of_produced_reports):\n    \n    consumed_reports_delta = 0\n    if number_of_consumed_reports is not None:\n      if number_of_consumed_reports < self.number_of_consumed_reports:\n        raise ValueError(\n            'Number of consumed reports smaller than previous update.')\n\n      consumed_reports_delta = (\n          number_of_consumed_reports - self.number_of_consumed_reports)\n\n      self.number_of_consumed_reports = number_of_consumed_reports\n      self.number_of_consumed_reports_delta = consumed_reports_delta\n\n    produced_reports_delta = 0\n    if number_of_produced_reports is not None:\n      if number_of_produced_reports < self.number_of_produced_reports:\n        raise ValueError(\n            'Number of produced reports smaller than previous update.')\n\n      produced_reports_delta = (\n          number_of_produced_reports - self.number_of_produced_reports)\n\n      self.number_of_produced_reports = number_of_produced_reports\n      self.number_of_produced_reports_delta = produced_reports_delta\n\n    return consumed_reports_delta > 0 or produced_reports_delta > 0", "docstring": "Updates the number of event reports.\n\nArgs:\nnumber_of_consumed_reports (int): total number of event reports consumed\nby the process.\nnumber_of_produced_reports (int): total number of event reports produced\nby the process.\n\nReturns:\nbool: True if either number of event reports has increased.\n\nRaises:\nValueError: if the consumed or produced number of event reports is\nsmaller than the value of the previous update.", "source": "juraj-google-style"}
{"code": "def add_string_pairs_from_button_element(xib_file, results, button, special_ui_components_prefix):\n    button_entry_comment = extract_element_internationalized_comment(button)\n    if (button_entry_comment is None):\n        return\n    for state in button.getElementsByTagName('state'):\n        state_name = state.attributes['key'].value\n        state_entry_comment = (((button_entry_comment + ' - ') + state_name) + ' state of button')\n        if (not add_string_pairs_from_attributed_ui_element(results, state, state_entry_comment)):\n            try:\n                button_entry_key = state.attributes['title'].value\n            except KeyError:\n                try:\n                    button_entry_key = state.getElementsByTagName('string')[0].firstChild.nodeValue\n                except Exception:\n                    continue\n            results.append((button_entry_key, state_entry_comment))\n    warn_if_element_not_of_class(button, 'Button', special_ui_components_prefix)", "docstring": "Adds strings pairs from a button xib element.\n\nArgs:\nxib_file (str): Path to the xib file.\nresults (list): The list to add the results to.\nbutton(element): The button element from the xib, to extract the string pairs from.\nspecial_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)", "source": "codesearchnet"}
{"code": "def _build(self, inputs):\n    shape_inputs = inputs.get_shape().as_list()\n    rank = len(shape_inputs)\n    max_dim = (np.max(self._dims) + 1)\n    if (rank < max_dim):\n        raise ValueError('Rank of inputs must be at least {}.'.format(max_dim))\n    full_begin = ([0] * rank)\n    full_size = ([(- 1)] * rank)\n    for (dim, begin, size) in zip(self._dims, self._begin, self._size):\n        full_begin[dim] = begin\n        full_size[dim] = size\n    return tf.slice(inputs, begin=full_begin, size=full_size)", "docstring": "Connects the SliceByDim module into the graph.\n\nArgs:\ninputs: `Tensor` to slice. Its rank must be greater than the maximum\ndimension specified in `dims` (plus one as python is 0 indexed).\n\nReturns:\nThe sliced tensor.\n\nRaises:\nValueError: If `inputs` tensor has insufficient rank.", "source": "codesearchnet"}
{"code": "def _add_future(cls, future):\n    \n    if cls._local._activated:\n      cls._local._in_order_futures.add(future)", "docstring": "Adds a future to the list of in-order futures thus far.\n\nArgs:\nfuture: The future to add to the list.", "source": "juraj-google-style"}
{"code": "def _PrintStorageInformationAsJSON(self, storage_reader):\n    \n    serializer = json_serializer.JSONAttributeContainerSerializer\n    storage_counters = self._CalculateStorageCounters(storage_reader)\n    storage_counters_json = json.dumps(storage_counters)\n    self._output_writer.Write('{')\n    self._output_writer.Write('\"storage_counters\": {0:s}'.format(\n        storage_counters_json))\n    self._output_writer.Write(',\\n')\n    self._output_writer.Write(' \"sessions\": {')\n    for index, session in enumerate(storage_reader.GetSessions()):\n      json_string = serializer.WriteSerialized(session)\n      if index != 0:\n        self._output_writer.Write(',\\n')\n      self._output_writer.Write('\"session_{0:s}\": {1:s} '.format(\n          session.identifier, json_string))\n    self._output_writer.Write('}}')", "docstring": "Writes a summary of sessions as machine-readable JSON.\n\nArgs:\nstorage_reader (StorageReader): storage reader.", "source": "juraj-google-style"}
{"code": "def _request(self, domain, type_name, search_command, db_method, body=None):\n        \n        headers = {'Content-Type': 'application/json', 'DB-Method': db_method}\n        search_command = self._clean_datastore_path(search_command)\n        url = '/v2/exchange/db/{}/{}/{}'.format(domain, type_name, search_command)\n        r = self.tcex.session.post(url, data=body, headers=headers, params=self._params)\n\n        data = []\n        status = 'Failed'\n        if not r.ok or 'application/json' not in r.headers.get('content-type', ''):\n            self.tcex.handle_error(350, [r.status_code, r.text])\n        data = r.json()\n        status = 'Success'\n\n        return {'data': data, 'response': r, 'status': status}", "docstring": "Make the API request for a Data Store CRUD operation\n\nArgs:\ndomain (string): One of 'local', 'organization', or 'system'.\ntype_name (string): This is a free form index type name. The ThreatConnect API will use\nthis resource verbatim.\nsearch_command (string): Search command to pass to ES.\ndb_method (string): The DB method 'DELETE', 'GET', 'POST', or 'PUT'\nbody (dict): JSON body", "source": "juraj-google-style"}
{"code": "def apply_operation(self, symmop, fractional=False):\n    if (not fractional):\n        self._lattice = Lattice([symmop.apply_rotation_only(row) for row in self._lattice.matrix])\n\n        def operate_site(site):\n            new_cart = symmop.operate(site.coords)\n            new_frac = self._lattice.get_fractional_coords(new_cart)\n            return PeriodicSite(site.species, new_frac, self._lattice, properties=site.properties)\n    else:\n        new_latt = np.dot(symmop.rotation_matrix, self._lattice.matrix)\n        self._lattice = Lattice(new_latt)\n\n        def operate_site(site):\n            return PeriodicSite(site.species, symmop.operate(site.frac_coords), self._lattice, properties=site.properties)\n    self._sites = [operate_site(s) for s in self._sites]", "docstring": "Apply a symmetry operation to the structure and return the new\nstructure. The lattice is operated by the rotation matrix only.\nCoords are operated in full and then transformed to the new lattice.\n\nArgs:\nsymmop (SymmOp): Symmetry operation to apply.\nfractional (bool): Whether the symmetry operation is applied in\nfractional space. Defaults to False, i.e., symmetry operation\nis applied in cartesian coordinates.", "source": "codesearchnet"}
{"code": "def GetMessages(self, formatter_mediator, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    event_values = event.CopyToDict()\n\n    primary_url = event_values['primary_url']\n    secondary_url = event_values['secondary_url']\n\n    \n    \n    \n    \n    \n    \n    \n    \n\n    if primary_url == '':\n      subject = 'local file'\n\n    elif secondary_url in (primary_url, '*'):\n      subject = primary_url\n\n    elif secondary_url == '':\n      subject = '{0:s} embedded in local file'.format(primary_url)\n\n    else:\n      subject = '{0:s} embedded in {1:s}'.format(primary_url, secondary_url)\n\n    event_values['subject'] = subject\n\n    return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def remove(self, key):\n    data = self._load_file()\n    del data[key]\n    self._save_file(data)", "docstring": "Remove a key from the data store\n\nArgs:\nkey (string): The key to remove\n\nRaises:\nKeyError: if the key was not found", "source": "codesearchnet"}
{"code": "def _from_tensor_list_helper(decode_fn, element_spec, tensor_list):\n    flat_specs = nest.flatten(element_spec)\n    flat_spec_lengths = [len(spec._flat_tensor_specs) for spec in flat_specs]\n    if sum(flat_spec_lengths) != len(tensor_list):\n        raise ValueError('Expected {} tensors but got {}.'.format(sum(flat_spec_lengths), len(tensor_list)))\n    i = 0\n    flat_ret = []\n    for component_spec, num_flat_values in zip(flat_specs, flat_spec_lengths):\n        value = tensor_list[i:i + num_flat_values]\n        flat_ret.append(decode_fn(component_spec, value))\n        i += num_flat_values\n    return nest.pack_sequence_as(element_spec, flat_ret)", "docstring": "Returns an element constructed from the given spec and tensor list.\n\nArgs:\ndecode_fn: Method that constructs an element component from the element spec\ncomponent and a tensor list.\nelement_spec: A nested structure of `tf.TypeSpec` objects representing to\nelement type specification.\ntensor_list: A list of tensors to use for constructing the value.\n\nReturns:\nAn element constructed from the given spec and tensor list.\n\nRaises:\nValueError: If the number of tensors needed to construct an element for\nthe given spec does not match the given number of tensors.", "source": "github-repos"}
{"code": "def decode_csv(csv_string, column_names):\n  \n  import csv\n  r = next(csv.reader([csv_string]))\n  if len(r) != len(column_names):\n    raise ValueError('csv line %s does not have %d columns' % (csv_string, len(column_names)))\n  return {k: v for k, v in zip(column_names, r)}", "docstring": "Parse a csv line into a dict.\n\nArgs:\ncsv_string: a csv string. May contain missing values \"a,,c\"\ncolumn_names: list of column names\n\nReturns:\nDict of {column_name, value_from_csv}. If there are missing values,\nvalue_from_csv will be ''.", "source": "juraj-google-style"}
{"code": "def assertAllInRange(self, target, lower_bound, upper_bound, open_lower_bound=False, open_upper_bound=False):\n    target = self._GetNdArray(target)\n    if not (np.issubdtype(target.dtype, np.floating) or np.issubdtype(target.dtype, np.integer)):\n        raise AssertionError('The value of %s does not have an ordered numeric type, instead it has type: %s' % (target, target.dtype))\n    nan_subscripts = np_where(np.isnan(target))\n    if np.size(nan_subscripts):\n        raise AssertionError('%d of the %d element(s) are NaN. Subscripts(s) and value(s) of the NaN element(s):\\n' % (len(nan_subscripts[0]), np.size(target)) + '\\n'.join(self._format_subscripts(nan_subscripts, target)))\n    range_str = ('(' if open_lower_bound else '[') + str(lower_bound) + ', ' + str(upper_bound) + (')' if open_upper_bound else ']')\n    violations = np.less_equal(target, lower_bound) if open_lower_bound else np.less(target, lower_bound)\n    violations = np.logical_or(violations, np.greater_equal(target, upper_bound) if open_upper_bound else np.greater(target, upper_bound))\n    violation_subscripts = np_where(violations)\n    if np.size(violation_subscripts):\n        raise AssertionError('%d of the %d element(s) are outside the range %s. ' % (len(violation_subscripts[0]), np.size(target), range_str) + 'Subscript(s) and value(s) of the offending elements:\\n' + '\\n'.join(self._format_subscripts(violation_subscripts, target)))", "docstring": "Assert that elements in a Tensor are all in a given range.\n\nArgs:\ntarget: The numpy `ndarray`, or anything that can be converted into a\nnumpy `ndarray` (including Tensor).\nlower_bound: lower bound of the range\nupper_bound: upper bound of the range\nopen_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather\nthan the default >=)\nopen_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather\nthan the default <=)\n\nRaises:\nAssertionError:\nif the value tensor does not have an ordered numeric type (float* or\nint*), or\nif there are nan values, or\nif any of the elements do not fall in the specified range.", "source": "github-repos"}
{"code": "def get_structure_seqs(pdb_file, file_type):\n    \n\n    \n    \n\n    \n    my_structure = StructureIO(pdb_file)\n    model = my_structure.first_model\n\n    structure_seqs = {}\n\n    \n    for chain in model:\n        chain_seq = ''\n        tracker = 0\n\n        \n        for res in chain.get_residues():\n            \n            \n\n            \n            \n            \n            if Polypeptide.is_aa(res, standard=True):\n                full_id = res.get_full_id()\n                end_tracker = full_id[3][1]\n                i_code = full_id[3][2]\n                aa = Polypeptide.three_to_one(res.get_resname())\n\n                \n                if end_tracker != (tracker + 1):\n                    if i_code != ' ':\n                        chain_seq += aa\n                        tracker = end_tracker + 1\n                        continue\n                    else:\n                        chain_seq += 'X' * (end_tracker - tracker - 1)\n\n                chain_seq += aa\n                tracker = end_tracker\n\n            else:\n                continue\n\n        structure_seqs[chain.get_id()] = chain_seq\n\n    return structure_seqs", "docstring": "Get a dictionary of a PDB file's sequences.\n\nSpecial cases include:\n- Insertion codes. In the case of residue numbers like \"15A\", \"15B\", both residues are written out. Example: 9LPR\n- HETATMs. Currently written as an \"X\", or unknown amino acid.\n\nArgs:\npdb_file: Path to PDB file\n\nReturns:\ndict: Dictionary of:\n{chain_id: sequence}", "source": "juraj-google-style"}
{"code": "def _GetDateValuesWithEpoch(self, number_of_days, date_time_epoch):\n    \n    return self._GetDateValues(\n        number_of_days, date_time_epoch.year, date_time_epoch.month,\n        date_time_epoch.day_of_month)", "docstring": "Determines date values.\n\nArgs:\nnumber_of_days (int): number of days since epoch.\ndate_time_epoch (DateTimeEpoch): date and time of the epoch.\n\nReturns:\ntuple[int, int, int]: year, month, day of month.", "source": "juraj-google-style"}
{"code": "def __init__(self, endpoint_name, sagemaker_session=None):\n        \n        super(PyTorchPredictor, self).__init__(endpoint_name, sagemaker_session, npy_serializer, numpy_deserializer)", "docstring": "Initialize an ``PyTorchPredictor``.\n\nArgs:\nendpoint_name (str): The name of the endpoint to perform inference on.\nsagemaker_session (sagemaker.session.Session): Session object which manages interactions with\nAmazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one\nusing the default AWS configuration chain.", "source": "juraj-google-style"}
{"code": "def astimezone(self, tzinfo):\n    \n    \n    assert self.tzinfo is not None\n\n    tzinfo = _tzinfome(tzinfo)\n\n    d = self.asdatetime(naive=False).astimezone(tzinfo)\n    return type(self)(d)", "docstring": "Returns a version of this timestamp converted to the given timezone.\n\nArgs:\ntzinfo: Either a datetime.tzinfo object or a string (which will be looked\nup in pytz.\n\nReturns:\nA datetime_tz object in the given timezone.", "source": "juraj-google-style"}
{"code": "def noise_new(dim: int, h: float=NOISE_DEFAULT_HURST, l: float=NOISE_DEFAULT_LACUNARITY, random: Optional[tcod.random.Random]=None) -> tcod.noise.Noise:\n    return tcod.noise.Noise(dim, hurst=h, lacunarity=l, seed=random)", "docstring": "Return a new Noise instance.\n\nArgs:\ndim (int): Number of dimensions.  From 1 to 4.\nh (float): The hurst exponent.  Should be in the 0.0-1.0 range.\nl (float): The noise lacunarity.\nrandom (Optional[Random]): A Random instance, or None.\n\nReturns:\nNoise: The new Noise instance.", "source": "codesearchnet"}
{"code": "def RepackTemplate(self, template_path, output_dir, upload=False, token=None, sign=False, context=None, signed_template=False):\n    orig_config = config.CONFIG\n    repack_config = RepackConfig()\n    print(('Repacking template: %s' % template_path))\n    config.CONFIG = repack_config.GetConfigFromTemplate(template_path)\n    result_path = None\n    try:\n        repack_context = config.CONFIG['Template.build_context']\n        if context:\n            repack_context.extend(context)\n        output_path = os.path.join(output_dir, config.CONFIG.Get('ClientRepacker.output_filename', context=repack_context))\n        print(('Using context: %s and labels: %s' % (repack_context, config.CONFIG.Get('Client.labels', context=repack_context))))\n        try:\n            signer = None\n            if sign:\n                signer = self.GetSigner(repack_context)\n            builder_obj = self.GetRepacker(context=repack_context, signer=signer)\n            builder_obj.signed_template = signed_template\n            result_path = builder_obj.MakeDeployableBinary(template_path, output_path)\n        except Exception:\n            logging.exception('Repacking template %s failed:', template_path)\n        if result_path:\n            print(('Repacked into %s' % result_path))\n            if upload:\n                from grr_response_server import maintenance_utils\n                client_platform = config.CONFIG.Get('Client.platform', context=repack_context)\n                repack_basename = config.CONFIG.Get('ClientRepacker.output_basename', context=repack_context)\n                repack_extension = config.CONFIG.Get('ClientBuilder.output_extension', context=repack_context)\n                repack_filename = (repack_basename + repack_extension)\n                binary_urn = rdfvalue.RDFURN('aff4:/config/executables').Add(client_platform).Add('installers').Add(repack_filename)\n                maintenance_utils.UploadSignedConfigBlob(open(result_path, 'rb').read(((100 * 1024) * 1024)), binary_urn, client_context=repack_context, token=token)\n        else:\n            print(('Failed to repack %s.' % template_path))\n    finally:\n        config.CONFIG = orig_config\n    return result_path", "docstring": "Repack binaries based on the configuration.\n\nWe repack all templates in the templates directory. We expect to find only\nfunctioning templates, all other files should be removed. Each template\ncontains a build.yaml that specifies how it was built and how it should be\nrepacked.\n\nArgs:\ntemplate_path: template path string\noutput_dir: Output files will be put in this directory.\nupload: If specified we also upload the repacked binary into the\ntoken: Token to use when uploading to the datastore.\nsign: If true, we want to digitally sign the installer.\ncontext: Array of context strings\nsigned_template: If true, the libraries in the template are already\nsigned. This is only used for windows when repacking the template\nmultiple times.\n\nReturns:\nA list of output installers generated.", "source": "codesearchnet"}
{"code": "def upload(self, title, description='', keywords='', developer_tags=None, access_control=AccessControl.Public):\n    if (not self.authenticated):\n        raise ApiError(_('Authentication is required'))\n    my_media_group = gdata.media.Group(title=gdata.media.Title(text=title), description=gdata.media.Description(description_type='plain', text=description), keywords=gdata.media.Keywords(text=keywords), category=[gdata.media.Category(text='Autos', scheme='http:\n    extension = self._access_control(access_control, my_media_group)\n    video_entry = gdata.youtube.YouTubeVideoEntry(media=my_media_group, extension_elements=extension)\n    if developer_tags:\n        video_entry.AddDeveloperTags(developer_tags)\n    response = Api.yt_service.GetFormUploadToken(video_entry)\n    post_url = response[0]\n    youtube_token = response[1]\n    return {'post_url': post_url, 'youtube_token': youtube_token}", "docstring": "Browser based upload\nCreates the video entry and meta data to initiate a browser upload\n\nAuthentication is needed\n\nParams:\ntitle: string\ndescription: string\nkeywords: comma seperated string\ndeveloper_tags: tuple\n\nReturn:\ndict contains post_url and youtube_token. i.e { 'post_url': post_url, 'youtube_token': youtube_token }\n\nRaises:\nApiError: on no authentication", "source": "codesearchnet"}
{"code": "def get_modname_from_modpath(module_fpath):\n    \n    modsubdir_list = get_module_subdir_list(module_fpath)\n    modname = '.'.join(modsubdir_list)\n    modname = modname.replace('.__init__', '').strip()\n    modname = modname.replace('.__main__', '').strip()\n    return modname", "docstring": "returns importable name from file path\n\nget_modname_from_modpath\n\nArgs:\nmodule_fpath (str): module filepath\n\nReturns:\nstr: modname\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_path import *  # NOQA\n>>> import utool as ut\n>>> module_fpath = ut.util_path.__file__\n>>> modname = ut.get_modname_from_modpath(module_fpath)\n>>> result = modname\n>>> print(result)\nutool.util_path", "source": "juraj-google-style"}
{"code": "def get_and_setattr(cls, id, **kwargs):\n        \n        model = cls.get(id)\n        for k, v in cls._preprocess_params(kwargs).items():\n            setattr(model, k, v)\n        return model", "docstring": "Returns an updated instance of the service's model class.\n\nArgs:\nmodel: the model to update\n**kwargs: update parameters", "source": "juraj-google-style"}
{"code": "def __add_action(self, relative_directory, action):\n    generator_action_container = self.__actions.retrieve_element_or_default(relative_directory, None)\n    if (generator_action_container is None):\n        generator_action_container = GeneratorActionContainer()\n        generator_action_container.add_generator_action(action)\n        self.__actions.add_element(location=relative_directory, element=generator_action_container)\n    else:\n        generator_action_container.add_generator_action(action)", "docstring": "Add action into the dictionary of actions.\n\nArgs:\nrelative_directory:\naction:", "source": "codesearchnet"}
{"code": "class OneFormerTransformerDecoderOutput(BaseModelOutput):\n    object_queries: Optional[torch.FloatTensor] = None\n    contrastive_logits: Optional[torch.FloatTensor] = None\n    prediction_masks: Optional[torch.FloatTensor] = None\n    prediction_class: Optional[torch.FloatTensor] = None\n    auxiliary_predictions: Optional[Tuple[Dict[str, torch.FloatTensor]]] = None", "docstring": "Base class for outputs of the Transformer decoder. This class adds attributes for class predictions, mask\npredictions and contrastive logits to BaseModelOutputWithCrossAttentions.\n\nArgs:\nobject_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`):\nQueries representation for the region proposals.\ncontrastive_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`):\nQueries representation for the contrastive loss.\nprediction_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`):\nMask predictions from last layer of the transformer decoder.\nprediction_class (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`):\nClass predictions from last layer of the transformer decoder.\nauxiliary_predictions (Tuple of Dict of `str, torch.FloatTensor`, *optional*):\nTuple of class and mask predictions from each layer of the transformer decoder.", "source": "github-repos"}
{"code": "def _get_clang_major_version(path_to_clang: str) -> int:\n    logging.info('Running echo __clang_major__ | %s -E -P -', path_to_clang)\n    clang_version_proc = subprocess.run([path_to_clang, '-E', '-P', '-'], input='__clang_major__', check=True, capture_output=True, text=True)\n    major_version = int(clang_version_proc.stdout)\n    logging.info('%s reports major version %s.', path_to_clang, major_version)\n    return major_version", "docstring": "Gets the major version of the clang at `path_to_clang`.\n\nArgs:\npath_to_clang: Path to a clang executable\n\nReturns:\nThe major version.", "source": "github-repos"}
{"code": "def create_transaction(self, to_account):\n        \n        from_account = self.statement_import.bank_account\n\n        transaction = Transaction.objects.create()\n        Leg.objects.create(\n            transaction=transaction, account=from_account, amount=+(self.amount * -1)\n        )\n        Leg.objects.create(transaction=transaction, account=to_account, amount=-(self.amount * -1))\n\n        transaction.date = self.date\n        transaction.save()\n\n        self.transaction = transaction\n        self.save()\n        return transaction", "docstring": "Create a transaction for this statement amount and account, into to_account\n\nThis will also set this StatementLine's ``transaction`` attribute to the newly\ncreated transaction.\n\nArgs:\nto_account (Account): The account the transaction is into / out of.\n\nReturns:\nTransaction: The newly created (and committed) transaction.", "source": "juraj-google-style"}
{"code": "def push_image(registry, image):\n    \n    \n    values = {\n        'registry': registry,\n        'image': image['name'],\n    }\n\n    log.info(\"Pushing <33>{registry}<35>/{image}\".format(**values))\n    shell.run('docker push {registry}/{image}'.format(**values))", "docstring": "Push the given image to selected repository.\n\nArgs:\nregistry (str):\nThe name of the registry we're pushing to. This is the address of\nthe repository without the protocol specification (no http(s)://)\nimage (dict[str, Any]):\nThe dict containing the information about the image. This is the\nsame dictionary as defined in DOCKER_IMAGES variable.", "source": "juraj-google-style"}
{"code": "def get_available_versions(self, project_name):\n    available_versions = self.pypi_client.package_releases(project_name)\n    if (not available_versions):\n        available_versions = self.pypi_client.package_releases(project_name.capitalize())\n    return dict(((self._parse_version(version), version) for version in available_versions))", "docstring": "Query PyPI to see if package has any available versions.\n\nArgs:\nproject_name (str): The name the project on PyPI.\n\nReturns:\ndict: Where keys are tuples of parsed versions and values are the\nversions returned by PyPI.", "source": "codesearchnet"}
{"code": "def CreateUnit(self, parent=None, value=None, bid_amount=None):\n    \n    unit = {\n        'xsi_type': 'ProductPartition',\n        'partitionType': 'UNIT'\n    }\n\n    \n    if parent is not None:\n      unit['parentCriterionId'] = parent['id']\n      unit['caseValue'] = value\n\n    if bid_amount is not None and bid_amount > 0:\n      \n      \n      bidding_strategy_configuration = {\n          'bids': [{\n              'xsi_type': 'CpcBid',\n              'bid': {\n                  'xsi_type': 'Money',\n                  'microAmount': str(bid_amount)\n              }\n          }]\n      }\n\n      adgroup_criterion = {\n          'xsi_type': 'BiddableAdGroupCriterion',\n          'biddingStrategyConfiguration': bidding_strategy_configuration\n      }\n    else:\n      adgroup_criterion = {\n          'xsi_type': 'NegativeAdGroupCriterion'\n      }\n\n    adgroup_criterion['adGroupId'] = self.adgroup_id\n    adgroup_criterion['criterion'] = unit\n\n    self.CreateAddOperation(adgroup_criterion)\n\n    return unit", "docstring": "Creates a unit node.\n\nArgs:\nparent: The node that should be this node's parent.\nvalue: The value being partitioned on.\nbid_amount: The amount to bid for matching products, in micros.\nReturns:\nA new unit node.", "source": "juraj-google-style"}
{"code": "def postings(self, quarter, stats_counter=None):\n    logging.info('Finding postings for %s', quarter)\n    for posting in self._iter_postings(quarter):\n        transformed = self._transform(posting)\n        transformed['id'] = '{}_{}'.format(self.partner_id, self._id(posting))\n        if stats_counter:\n            stats_counter.track(input_document=posting, output_document=transformed)\n        (yield transformed)", "docstring": "Yield job postings in common schema format\n\nArgs:\nquarter (str) The quarter, in format '2015Q1'\nstats_counter (object, optional) A counter that can track both\ninput and output documents using a 'track' method.", "source": "codesearchnet"}
{"code": "def getOrderedLinks(self, session):\n    streamLinks = session.query(StreamLink).filter((StreamLink.channelInputFile == self)).order_by(StreamLink.linkNumber).all()\n    return streamLinks", "docstring": "Retrieve the links in the order of the link number.\n\nArgs:\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.\n\nReturns:\nlist: A list of :class:`.StreamLink` objects.", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    msiecf_file = pymsiecf.file()\n    msiecf_file.set_ascii_codepage(parser_mediator.codepage)\n\n    try:\n      msiecf_file.open_file_object(file_object)\n    except IOError as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to open file with error: {0!s}'.format(exception))\n      return\n\n    try:\n      self._ParseItems(parser_mediator, msiecf_file)\n    finally:\n      msiecf_file.close()", "docstring": "Parses a MSIE Cache File (MSIECF) file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.", "source": "juraj-google-style"}
{"code": "def convert_constant(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting constant ...')\n\n    params_list = params['value'].numpy()\n\n    def target_layer(x, value=params_list):\n        return tf.constant(value.tolist(), shape=value.shape)\n\n    lambda_layer = keras.layers.Lambda(target_layer)\n    layers[scope_name + '_np'] = params_list  \n    layers[scope_name] = lambda_layer(layers[list(layers.keys())[0]])", "docstring": "Convert constant layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def verify_certs_chain(certs_chain: List[crypto.X509], amazon_cert: crypto.X509) -> bool:\n    store = crypto.X509Store()\n    for cert in certs_chain:\n        store.add_cert(cert)\n    default_verify_paths = ssl.get_default_verify_paths()\n    default_verify_file = default_verify_paths.cafile\n    default_verify_file = (Path(default_verify_file).resolve() if default_verify_file else None)\n    default_verify_path = default_verify_paths.capath\n    default_verify_path = (Path(default_verify_path).resolve() if default_verify_path else None)\n    ca_files = ([ca_file for ca_file in default_verify_path.iterdir()] if default_verify_path else [])\n    if default_verify_file:\n        ca_files.append(default_verify_file)\n    for ca_file in ca_files:\n        ca_file: Path\n        if ca_file.is_file():\n            with ca_file.open('r', encoding='ascii') as crt_f:\n                ca_certs_txt = crt_f.read()\n                ca_certs = extract_certs(ca_certs_txt)\n                for cert in ca_certs:\n                    store.add_cert(cert)\n    ssl_context = ssl.create_default_context()\n    der_certs = ssl_context.get_ca_certs(binary_form=True)\n    pem_certs = '\\n'.join([ssl.DER_cert_to_PEM_cert(der_cert) for der_cert in der_certs])\n    ca_certs = extract_certs(pem_certs)\n    for ca_cert in ca_certs:\n        store.add_cert(ca_cert)\n    store_context = crypto.X509StoreContext(store, amazon_cert)\n    try:\n        store_context.verify_certificate()\n        result = True\n    except crypto.X509StoreContextError:\n        result = False\n    return result", "docstring": "Verifies if Amazon and additional certificates creates chain of trust to a root CA.\n\nArgs:\ncerts_chain: List of pycrypto X509 intermediate certificates from signature chain URL.\namazon_cert: Pycrypto X509 Amazon certificate.\n\nReturns:\nresult: True if verification was successful, False if not.", "source": "codesearchnet"}
{"code": "def ffn_expert_fn(input_size, hidden_sizes, output_size, hidden_activation=tf.nn.relu):\n\n    def my_fn(x):\n        layer_sizes = (([input_size] + hidden_sizes) + [output_size])\n        for i in range((1 + len(hidden_sizes))):\n            w = tf.get_variable(('w_%d' % i), layer_sizes[i:(i + 2)], tf.float32)\n            x = tf.matmul(x, w)\n            if (i < len(hidden_sizes)):\n                x = hidden_activation(x)\n            if (layer_sizes[i] != input_size):\n                x *= ((layer_sizes[i] / float(input_size)) ** (- 0.5))\n        return x\n    return my_fn", "docstring": "Returns a function that creates a feed-forward network.\n\nUse this function to create the expert_fn argument to distributed_moe.\n\nArgs:\ninput_size: an integer\nhidden_sizes: a list of integers\noutput_size: an integer\nhidden_activation: a unary function.\n\nReturns:\na unary function", "source": "codesearchnet"}
{"code": "def Sample(self, profile_name, used_memory):\n    \n    sample_time = time.time()\n    sample = '{0:f}\\t{1:s}\\t{2:d}\\n'.format(\n        sample_time, profile_name, used_memory)\n    self._WritesString(sample)", "docstring": "Takes a sample for profiling.\n\nArgs:\nprofile_name (str): name of the profile to sample.\nused_memory (int): amount of used memory in bytes.", "source": "juraj-google-style"}
{"code": "def DeserializeFromBufer(buffer, offset=0):\n        \n        mstream = StreamManager.GetStream(buffer)\n        reader = BinaryReader(mstream)\n        tx = Transaction.DeserializeFrom(reader)\n\n        StreamManager.ReleaseStream(mstream)\n        return tx", "docstring": "Deserialize object instance from the specified buffer.\n\nArgs:\nbuffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from.\noffset: UNUSED\n\nReturns:\nTransaction:", "source": "juraj-google-style"}
{"code": "class FastSpeech2ConformerEncoder(nn.Module):\n\n    def __init__(self, config: FastSpeech2ConformerConfig, module_config, use_encoder_input_layer=False):\n        super().__init__()\n        self.embed = None\n        if use_encoder_input_layer:\n            self.embed = nn.Embedding(num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, padding_idx=0)\n        self.pos_enc = FastSpeech2ConformerRelPositionalEncoding(config, module_config)\n        self.conformer_layers = nn.ModuleList([FastSpeech2ConformerEncoderLayer(config, module_config) for _ in range(module_config['layers'])])\n\n    def forward(self, input_tensor: torch.LongTensor, attention_mask: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=False, return_dict: Optional[bool]=None):\n        \n        feature_representation = input_tensor\n        if self.embed is not None:\n            feature_representation = self.embed(feature_representation)\n        hidden_states, pos_emb = self.pos_enc(feature_representation)\n        all_hidden_states = () if output_hidden_states else None\n        all_self_attentions = () if output_attentions else None\n        for conformer_layer in self.conformer_layers:\n            if output_hidden_states:\n                all_hidden_states = all_hidden_states + (hidden_states,)\n            layer_outputs = conformer_layer(hidden_states, pos_emb, attention_mask, output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_self_attentions = all_self_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            all_hidden_states = all_hidden_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)", "docstring": "FastSpeech2ConformerEncoder encoder module.\n\nArgs:\nconfig (`FastSpeech2ConformerConfig`):\nFastSpeech2ConformerConfig instance.\nmodule_config (`dict`):\nDictionary containing the encoder or decoder module configuration from the `FastSpeech2ConformerConfig`.\nuse_encoder_input_layer (`bool`, *optional*, defaults to `False`):\nInput layer type.", "source": "github-repos"}
{"code": "async def leave(self):\n    is_group_conversation = (self._conversation.type == hangouts_pb2.CONVERSATION_TYPE_GROUP)\n    try:\n        if is_group_conversation:\n            (await self._client.remove_user(hangouts_pb2.RemoveUserRequest(request_header=self._client.get_request_header(), event_request_header=self._get_event_request_header())))\n        else:\n            (await self._client.delete_conversation(hangouts_pb2.DeleteConversationRequest(request_header=self._client.get_request_header(), conversation_id=hangouts_pb2.ConversationId(id=self.id_), delete_upper_bound_timestamp=parsers.to_timestamp(datetime.datetime.now(tz=datetime.timezone.utc)))))\n    except exceptions.NetworkError as e:\n        logger.warning('Failed to leave conversation: {}'.format(e))\n        raise", "docstring": "Leave this conversation.\n\nRaises:\n.NetworkError: If conversation cannot be left.", "source": "codesearchnet"}
{"code": "def _DrawTrips(self,triplist,colpar=\"\"):\n    \n\n    stations = []\n    if not self._stations and triplist:\n      self._stations = self._CalculateYLines(self._TravelTimes(triplist))\n      if not self._stations:\n        self._AddWarning(\"Failed to use traveltimes for graph\")\n        self._stations = self._CalculateYLines(self._Uniform(triplist))\n        if not self._stations:\n          self._AddWarning(\"Failed to calculate station distances\")\n          return\n\n    stations = self._stations\n    tmpstrs = []\n    servlist = []\n    for t in triplist:\n      if not colpar:\n        if t.service_id not in servlist:\n          servlist.append(t.service_id)\n        shade = int(servlist.index(t.service_id) * (200/len(servlist))+55)\n        color = \"\n      else:\n        color=colpar\n\n      start_offsets = [0]\n      first_stop = t.GetTimeStops()[0]\n\n      for j,freq_offset in enumerate(start_offsets):\n        if j>0 and not colpar:\n          color=\"purple\"\n        scriptcall = 'onmouseover=\"LineClick(\\'%s\\',\\'Trip %s starting %s\\')\"' % (t.trip_id,\n            t.trip_id, transitfeed.FormatSecondsSinceMidnight(t.GetStartTime()))\n        tmpstrhead = '<polyline class=\"T\" id=\"%s\" stroke=\"%s\" %s points=\"' % \\\n          (str(t.trip_id),color, scriptcall)\n        tmpstrs.append(tmpstrhead)\n\n        for i, s in enumerate(t.GetTimeStops()):\n          arr_t = s[0]\n          dep_t = s[1]\n          if arr_t is None or dep_t is None:\n            continue\n          arr_x = int(arr_t/3600.0 * self._hour_grid) - self._hour_grid * self._offset\n          dep_x = int(dep_t/3600.0 * self._hour_grid) - self._hour_grid * self._offset\n          tmpstrs.append(\"%s,%s \" % (int(arr_x+20), int(stations[i]+20)))\n          tmpstrs.append(\"%s,%s \" % (int(dep_x+20), int(stations[i]+20)))\n        tmpstrs.append('\" />')\n    return \"\".join(tmpstrs)", "docstring": "Generates svg polylines for each transit trip.\n\nArgs:\n# Class Trip is defined in transitfeed.py\n[Trip, Trip, ...]\n\nReturns:\n# A string containing a polyline tag for each trip\n' <polyline class=\"T\" stroke=\"#336633\" points=\"433,0 ...'", "source": "juraj-google-style"}
{"code": "def _parse_frange_part(frange):\n        \n        match = FRANGE_RE.match(frange)\n        if not match:\n            msg = 'Could not parse \"{0}\": did not match {1}'\n            raise ParseException(msg.format(frange, FRANGE_RE.pattern))\n        start, end, modifier, chunk = match.groups()\n        start = int(start)\n        end = int(end) if end is not None else start\n\n        if end > start and chunk is not None and int(chunk) < 0:\n            msg = 'Could not parse \"{0}: chunk can not be negative'\n            raise ParseException(msg.format(frange))\n\n        chunk = abs(int(chunk)) if chunk is not None else 1\n        \n        if chunk == 0:\n            msg = 'Could not parse \"{0}\": chunk cannot be 0'\n            raise ParseException(msg.format(frange))\n        return start, end, modifier, chunk", "docstring": "Internal method: parse a discrete frame range part.\n\nArgs:\nfrange (str): single part of a frame range as a string\n(ie \"1-100x5\")\n\nReturns:\ntuple: (start, end, modifier, chunk)\n\nRaises:\n:class:`.ParseException`: if the frame range can\nnot be parsed", "source": "juraj-google-style"}
{"code": "def __init__(self):\n        \n        super(JLinkSWOSpeedInfo, self).__init__()\n        self.SizeofStruct = ctypes.sizeof(self)\n        self.Interface = enums.JLinkSWOInterfaces.UART", "docstring": "Initializes the J-Link SWO Speed Information instance.\n\nArgs:\nself (JLinkSWOSpeedInfo): the ``JLinkSWOSpeedInfo`` instance\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def __init__(self, save_path):\n    reader = py_checkpoint_reader.NewCheckpointReader(save_path)\n    try:\n        object_graph_string = reader.get_tensor(base.OBJECT_GRAPH_PROTO_KEY)\n    except errors_impl.NotFoundError as not_found_error:\n        raise ValueError(f'The specified checkpoint \"{save_path}\" does not appear to be object-based (saved with TF2) since it is missing the key \"{base.OBJECT_GRAPH_PROTO_KEY}\". Likely it was created with the TF1 name-based saver and does not contain an object dependency graph.') from not_found_error\n    object_graph_proto = trackable_object_graph_pb2.TrackableObjectGraph()\n    object_graph_proto.ParseFromString(object_graph_string)\n    self._object_graph_proto = object_graph_proto", "docstring": "Configure the checkpoint view.\n\nArgs:\nsave_path: The path to the checkpoint.\n\nRaises:\nValueError: If the save_path does not lead to a TF2 checkpoint.", "source": "github-repos"}
{"code": "def Patch(self, request, global_params=None):\n    config = self.GetMethodConfig('Patch')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Update an association between a GCP project and a GitHub Enterprise server.\n\nArgs:\nrequest: (CloudbuildProjectsGithubEnterpriseConfigsPatchRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def explore_package(module_name):\n    \n\n    packages = []\n    loader = pkgutil.get_loader(module_name)\n    for sub_module in pkgutil.walk_packages([os.path.dirname(loader.get_filename())],\n                                            prefix=module_name + '.'):\n        _, sub_module_name, _ = sub_module\n        packages.append(sub_module_name)\n\n    return packages", "docstring": "returns all the packages in the module\n\nArgs:\nmodule_name: name of module\n\nReturns:", "source": "juraj-google-style"}
{"code": "def glyph_has_ink(font: TTFont, name: Text) -> bool:\n    if ('glyf' in font):\n        return ttf_glyph_has_ink(font, name)\n    elif (('CFF ' in font) or ('CFF2' in font)):\n        return cff_glyph_has_ink(font, name)\n    else:\n        raise Exception(\"Could not find 'glyf', 'CFF ', or 'CFF2' table.\")", "docstring": "Checks if specified glyph has any ink.\n\nThat is, that it has at least one defined contour associated.\nComposites are considered to have ink if any of their components have ink.\nArgs:\nfont:       the font\nglyph_name: The name of the glyph to check for ink.\nReturns:\nTrue if the font has at least one contour associated with it.", "source": "codesearchnet"}
{"code": "def code_challenge(verifier):\n    \n    digest = hashlib.sha256(verifier).digest()\n    return base64.urlsafe_b64encode(digest).rstrip(b'=')", "docstring": "Creates a 'code_challenge' as described in section 4.2 of RFC 7636\nby taking the sha256 hash of the verifier and then urlsafe\nbase64-encoding it.\n\nArgs:\nverifier: bytestring, representing a code_verifier as generated by\ncode_verifier().\n\nReturns:\nBytestring, representing a urlsafe base64-encoded sha256 hash digest,\nwithout '=' padding.", "source": "juraj-google-style"}
{"code": "def slice_list(in_list, lens):\n    \n    if not isinstance(lens, list):\n        raise TypeError('\"indices\" must be a list of integers')\n    elif sum(lens) != len(in_list):\n        raise ValueError(\n            'sum of lens and list length does not match: {} != {}'.format(\n                sum(lens), len(in_list)))\n    out_list = []\n    idx = 0\n    for i in range(len(lens)):\n        out_list.append(in_list[idx:idx + lens[i]])\n        idx += lens[i]\n    return out_list", "docstring": "Slice a list into several sub lists by a list of given length.\n\nArgs:\nin_list (list): The list to be sliced.\nlens(int or list): The expected length of each out list.\n\nReturns:\nlist: A list of sliced list.", "source": "juraj-google-style"}
{"code": "def edit_distance_filter(source_target_input, max_equal_to_diff_ratio=0):\n    thrown_out_count = 0\n    source_target_output = []\n    if (not max_equal_to_diff_ratio):\n        return (source_target_input, thrown_out_count)\n    for src_tgt in source_target_input:\n        opcodes = fast_match_sequences(*src_tgt)\n        diff_char_count = 0\n        equal_char_count = 0\n        for (tag, i1, i2, j1, j2) in opcodes:\n            if (tag == 'diff'):\n                diff_char_count += max((i2 - i1), (j2 - j1))\n            else:\n                equal_char_count += (i2 - i1)\n        if (diff_char_count <= (max_equal_to_diff_ratio * equal_char_count)):\n            source_target_output.append(src_tgt)\n        else:\n            thrown_out_count += 1\n    return (source_target_output, thrown_out_count)", "docstring": "Filter out examples that exceed max_edit_ratio between source and target.\n\nArgs:\nsource_target_input:     a list of [source, target] pairs\nmax_equal_to_diff_ratio: cutoff for ratio of equal chars / diff chars\nbetween source and target\n\nReturns:\nsource_target_output:    filtered subset of [source, target] input pairs\nthrown_out_count:        number of examples filtered out", "source": "codesearchnet"}
{"code": "def get_config_multiline_option(parser: ConfigParser,\n                                section: str,\n                                option: str,\n                                default: List[str] = None) -> List[str]:\n    \n    default = default or []\n    if not parser.has_section(section):\n        raise ValueError(\"config missing section: \" + section)\n    try:\n        multiline = parser.get(section, option)\n        values = [x.strip() for x in multiline.splitlines() if x.strip()]\n        return values\n    except NoOptionError:\n        return default", "docstring": "Retrieves a multi-line string value from a parser as a list of strings\n(one per line, ignoring blank lines).\n\nArgs:\nparser: instance of :class:`ConfigParser`\nsection: section name within config file\noption: option (variable) name within that section\ndefault: value to return if option is absent (``None`` is mapped to\n``[]``)\n\nReturns:\nlist of strings\n\nRaises:\nValueError: if the section is absent", "source": "juraj-google-style"}
{"code": "def CreateProductPartition(client, adgroup_id):\n  \n  ad_group_criterion_service = client.GetService('AdGroupCriterionService',\n                                                 'v201809')\n  helper = ProductPartitionHelper(adgroup_id)\n  root = helper.CreateSubdivision()\n\n  new_product_canonical_condition = {\n      'xsi_type': 'ProductCanonicalCondition',\n      'condition': 'NEW'\n  }\n\n  used_product_canonical_condition = {\n      'xsi_type': 'ProductCanonicalCondition',\n      'condition': 'USED'\n  }\n\n  other_product_canonical_condition = {\n      'xsi_type': 'ProductCanonicalCondition',\n  }\n\n  helper.CreateUnit(root, new_product_canonical_condition)\n  helper.CreateUnit(root, used_product_canonical_condition)\n  helper.CreateUnit(root, other_product_canonical_condition)\n\n  result = ad_group_criterion_service.mutate(helper.operations)\n  return result['value']", "docstring": "Creates a ProductPartition tree for the given AdGroup ID.\n\nArgs:\nclient: an AdWordsClient instance.\nadgroup_id: a str AdGroup ID.\n\nReturns:\nThe ProductPartition tree as a sudsobject.", "source": "juraj-google-style"}
{"code": "def insert_flux_bias(cur, chain, system, flux_bias, chain_strength, encoded_data=None):\n    if (encoded_data is None):\n        encoded_data = {}\n    insert_chain(cur, chain, encoded_data)\n    insert_system(cur, system, encoded_data)\n    if ('flux_bias' not in encoded_data):\n        encoded_data['flux_bias'] = _encode_real(flux_bias)\n    if ('chain_strength' not in encoded_data):\n        encoded_data['chain_strength'] = _encode_real(chain_strength)\n    if ('insert_time' not in encoded_data):\n        encoded_data['insert_time'] = datetime.datetime.now()\n    insert = '\\n        INSERT OR REPLACE INTO flux_bias(chain_id, system_id, insert_time, flux_bias, chain_strength)\\n        SELECT\\n            chain.id,\\n            system.id,\\n            :insert_time,\\n            :flux_bias,\\n            :chain_strength\\n        FROM chain, system\\n        WHERE\\n            chain.chain_length = :chain_length AND\\n            chain.nodes = :nodes AND\\n            system.system_name = :system_name;\\n        '\n    cur.execute(insert, encoded_data)", "docstring": "Insert a flux bias offset into the cache.\n\nArgs:\ncur (:class:`sqlite3.Cursor`):\nAn sqlite3 cursor. This function is meant to be run within a :obj:`with` statement.\n\nchain (iterable):\nA collection of nodes. Chains in embedding act as one node.\n\nsystem (str):\nThe unique name of a system.\n\nflux_bias (float):\nThe flux bias offset associated with the given chain.\n\nchain_strength (float):\nThe magnitude of the negative quadratic bias that induces the given chain in an Ising\nproblem.\n\nencoded_data (dict, optional):\nIf a dictionary is provided, it will be populated with the serialized data. This is\nuseful for preventing encoding the same information many times.", "source": "codesearchnet"}
{"code": "def resolve_artifacts_by_builder_compat(\n            self, package_names, builder_name, dependencies=False):\n        \n\n        paths = self.compat_builders.get(builder_name)\n        if not paths:\n            \n            return\n\n        resolver = (\n            \n            find_packages_requirements_dists\n            if dependencies else\n            \n            pkg_names_to_dists\n        )\n        for distribution in resolver(package_names):\n            path = paths.get(distribution.project_name)\n            if path:\n                yield path", "docstring": "Yield the list of paths to the artifacts in the order of the\ndependency resolution\n\nArguments:\n\npackage_names\nThe names of the packages to probe the dependency graph, to\nbe provided as a list of strings.\nartifact_name\nThe exact name of the artifact.\ndependencies\nTrace dependencies.  Default is off.\n\nReturns the path of where the artifact should be if it has been\ndeclared, otherwise None.", "source": "juraj-google-style"}
{"code": "def __init__(self, **options):\n        \n        self.options = options\n        self.logging_level = logging.DEBUG\n        self.setup_logging()\n        self.logger = Logger.get_logger(__name__)\n        self.results = []", "docstring": "Initialize application with command line options.\n\nArgs:\noptions (ApplicationOptions): given command line options.", "source": "juraj-google-style"}
{"code": "def _IsWindowsDrivePathSegment(cls, path_segment):\n    \n    if (len(path_segment) == 2 and path_segment[1] == ':' and\n        path_segment[0].isalpha()):\n      return True\n\n    path_segment = path_segment.upper()\n    return path_segment in ('%%ENVIRON_SYSTEMDRIVE%%', '%SYSTEMDRIVE%')", "docstring": "Determines if the path segment contains a Windows Drive indicator.\n\nA drive indicator can be a drive letter or %SystemDrive%.\n\nArgs:\npath_segment (str): path segment.\n\nReturns:\nbool: True if the path segment contains a Windows Drive indicator.", "source": "juraj-google-style"}
{"code": "def bulk_lookup(self, api_name, keys):\n    cached_data = {}\n    for key in keys:\n        value = self.lookup_value(api_name, key)\n        if (value is not None):\n            cached_data[key] = value\n    return cached_data", "docstring": "Perform lookup on an enumerable of keys.\n\nArgs:\napi_name: a string name of the API. Keys and values are segmented by api_name.\nkeys: an enumerable of string keys.", "source": "codesearchnet"}
{"code": "def __init__(self, states, internals, actions, include_next_states, capacity, scope='replay', summary_labels=None):\n        \n        super(Replay, self).__init__(\n            states=states,\n            internals=internals,\n            actions=actions,\n            include_next_states=include_next_states,\n            capacity=capacity,\n            scope=scope,\n            summary_labels=summary_labels\n        )", "docstring": "Replay memory.\n\nArgs:\nstates (dict): States specification.\ninternals (dict): Internal states specification.\nactions (dict): Actions specification.\ninclude_next_states (bool): Include subsequent state if true.\ncapacity (int): Memory capacity (number of state/internals/action/(next-state)? records).", "source": "juraj-google-style"}
{"code": "def index_by_id(cls, target_id, resources):\n        \n        for index in range(len(resources)):\n            if cls.id_by_index(index, resources) == target_id:\n                return index\n\n        raise AssertionError", "docstring": "Helper method to fetch the index of a resource by its id or address\n\nArgs:\nresources (list of objects): The resources to be paginated\ntarget_id (string): The address or header_signature of the resource\n\nReturns:\ninteger: The index of the target resource\n\nRaises:\nAssertionError: Raised if the target is not found", "source": "juraj-google-style"}
{"code": "def import_entities(self, entities):\n        \n        edata = Entity.create_payload(entities)\n        r = fapi.upload_entities(self.namespace, self.name,\n                                 edata, self.api_url)\n        fapi._check_response_code(r, 201)", "docstring": "Upload entity objects.\n\nArgs:\nentities: iterable of firecloud.Entity objects.", "source": "juraj-google-style"}
{"code": "def document(self, name, file_name, owner=None, **kwargs):\n    return Document(self.tcex, name, file_name, owner=owner, **kwargs)", "docstring": "Create the Document TI object.\n\nArgs:\nowner:\nname:\nfile_name:\n**kwargs:\n\nReturn:", "source": "codesearchnet"}
{"code": "def wait_for_fresh_games(self, poll_interval=15.0):\n    wait_until_game = self.read_wait_cell()\n    if (not wait_until_game):\n        return\n    latest_game = self.latest_game_number\n    last_latest = latest_game\n    while (latest_game < wait_until_game):\n        utils.dbg('Latest game {} not yet at required game {} (+{}, {:0.3f} games/sec)'.format(latest_game, wait_until_game, (latest_game - last_latest), ((latest_game - last_latest) / poll_interval)))\n        time.sleep(poll_interval)\n        last_latest = latest_game\n        latest_game = self.latest_game_number", "docstring": "Block caller until required new games have been played.\n\nArgs:\npoll_interval:  number of seconds to wait between checks\n\nIf the cell `table_state=metadata:wait_for_game_number` exists,\nthen block the caller, checking every `poll_interval` seconds,\nuntil `table_state=metadata:game_counter is at least the value\nin that cell.", "source": "codesearchnet"}
{"code": "def __init__(\n            self, symbol: str = '', exchange: str = '', currency: str = '',\n            **kwargs):\n        \n        Contract.__init__(\n            self, secType='STK', symbol=symbol,\n            exchange=exchange, currency=currency, **kwargs)", "docstring": "Stock contract.\n\nArgs:\nsymbol: Symbol name.\nexchange: Destination exchange.\ncurrency: Underlying currency.", "source": "juraj-google-style"}
{"code": "def verify_file_exists(file_name, file_location):\n    \n    return __os.path.isfile(__os.path.join(file_location, file_name))", "docstring": "Function to verify if a file exists\nArgs:\nfile_name: The name of file to check\nfile_location: The location of the file, derive from the os module\n\nReturns: returns boolean True or False", "source": "juraj-google-style"}
{"code": "def main(argv: Optional[Sequence[str]] = None) -> None:\n    \n    args = parse_arguments(argv=argv)\n\n    if args.logging:\n        logging.basicConfig(level=logging.DEBUG)\n\n    handle_skip()\n\n    action = args.action\n    request = parse_request()\n    LOGGER.debug('Received action %s with request:\\n%s',\n                 action, request)\n\n    try:\n        mapping = parse_mapping(args.mapping)\n    except Exception as error:\n        LOGGER.critical('Unable to parse mapping file', exc_info=True)\n        print(  \n            'Unable to parse mapping file: {error}'.format(\n                error=error),\n            file=sys.stderr)\n        sys.exit(1)\n\n    if action == 'get':\n        get_password(request, mapping)\n    else:\n        LOGGER.info('Action %s is currently not supported', action)\n        sys.exit(1)", "docstring": "Start the pass-git-helper script.\n\nArgs:\nargv:\nIf not ``None``, use the provided command line arguments for\nparsing. Otherwise, extract them automatically.", "source": "juraj-google-style"}
{"code": "def find_model_patch_tracks(self):\n    self.model_grid.load_data()\n    tracked_model_objects = []\n    model_objects = []\n    if (self.model_grid.data is None):\n        print('No model output found')\n        return tracked_model_objects\n    min_orig = self.model_ew.min_thresh\n    max_orig = self.model_ew.max_thresh\n    data_increment_orig = self.model_ew.data_increment\n    self.model_ew.min_thresh = 0\n    self.model_ew.data_increment = 1\n    self.model_ew.max_thresh = 100\n    for (h, hour) in enumerate(self.hours):\n        print('Finding {0} objects for run {1} Hour: {2:02d}'.format(self.ensemble_member, self.run_date.strftime('%Y%m%d%H'), hour))\n        if (self.mask is not None):\n            model_data = (self.model_grid.data[h] * self.mask)\n        else:\n            model_data = self.model_grid.data[h]\n        model_data[:self.patch_radius] = 0\n        model_data[(- self.patch_radius):] = 0\n        model_data[(:, :self.patch_radius)] = 0\n        model_data[(:, (- self.patch_radius):)] = 0\n        scaled_data = np.array(rescale_data(model_data, min_orig, max_orig))\n        hour_labels = label_storm_objects(scaled_data, 'ew', self.model_ew.min_thresh, self.model_ew.max_thresh, min_area=self.size_filter, max_area=self.model_ew.max_size, max_range=self.model_ew.delta, increment=self.model_ew.data_increment, gaussian_sd=self.gaussian_window)\n        model_objects.extend(extract_storm_patches(hour_labels, model_data, self.model_grid.x, self.model_grid.y, [hour], dx=self.model_grid.dx, patch_radius=self.patch_radius))\n        for model_obj in model_objects[(- 1)]:\n            dims = model_obj.timesteps[(- 1)].shape\n            if (h > 0):\n                model_obj.estimate_motion(hour, self.model_grid.data[(h - 1)], dims[1], dims[0])\n        del scaled_data\n        del model_data\n        del hour_labels\n    tracked_model_objects.extend(track_storms(model_objects, self.hours, self.object_matcher.cost_function_components, self.object_matcher.max_values, self.object_matcher.weights))\n    self.model_ew.min_thresh = min_orig\n    self.model_ew.max_thresh = max_orig\n    self.model_ew.data_increment = data_increment_orig\n    return tracked_model_objects", "docstring": "Identify storms in gridded model output and extract uniform sized patches around the storm centers of mass.\n\nReturns:", "source": "codesearchnet"}
{"code": "def infer_channel_dimension_format(image: np.ndarray, num_channels: Optional[Union[int, tuple[int, ...]]]=None) -> ChannelDimension:\n    num_channels = num_channels if num_channels is not None else (1, 3)\n    num_channels = (num_channels,) if isinstance(num_channels, int) else num_channels\n    if image.ndim == 3:\n        first_dim, last_dim = (0, 2)\n    elif image.ndim == 4:\n        first_dim, last_dim = (1, 3)\n    elif image.ndim == 5:\n        first_dim, last_dim = (2, 4)\n    else:\n        raise ValueError(f'Unsupported number of image dimensions: {image.ndim}')\n    if image.shape[first_dim] in num_channels and image.shape[last_dim] in num_channels:\n        logger.warning(f'The channel dimension is ambiguous. Got image shape {image.shape}. Assuming channels are the first dimension. Use the [input_data_format](https:\n        return ChannelDimension.FIRST\n    elif image.shape[first_dim] in num_channels:\n        return ChannelDimension.FIRST\n    elif image.shape[last_dim] in num_channels:\n        return ChannelDimension.LAST\n    raise ValueError('Unable to infer channel dimension format')", "docstring": "Infers the channel dimension format of `image`.\n\nArgs:\nimage (`np.ndarray`):\nThe image to infer the channel dimension of.\nnum_channels (`int` or `Tuple[int, ...]`, *optional*, defaults to `(1, 3)`):\nThe number of channels of the image.\n\nReturns:\nThe channel dimension of the image.", "source": "github-repos"}
{"code": "def delete_storage_account(access_token, subscription_id, rgname, account_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.Storage/storageAccounts/', account_name, '?api-version=', STORAGE_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete a storage account in the specified resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\naccount_name (str): Name of the new storage account.\n\nReturns:\nHTTP response.", "source": "codesearchnet"}
{"code": "def set_reprompt_ssml(self, ssml):\n        \n        self.response.reprompt.outputSpeech.type = 'SSML'\n        self.response.reprompt.outputSpeech.ssml = ssml", "docstring": "Set response reprompt output speech as SSML type.\n\nArgs:\nssml: str. Response speech used when type is 'SSML', should be formatted\nwith Speech Synthesis Markup Language. Cannot exceed 8,000\ncharacters.", "source": "juraj-google-style"}
{"code": "def __init__(self, params, train):\n    \n    self.train = train\n    self.params = params\n\n    self.embedding_softmax_layer = embedding_layer.EmbeddingSharedWeights(\n        params.vocab_size, params.hidden_size)\n    self.encoder_stack = EncoderStack(params, train)\n    self.decoder_stack = DecoderStack(params, train)", "docstring": "Initialize layers to build Transformer model.\n\nArgs:\nparams: hyperparameter object defining layer sizes, dropout values, etc.\ntrain: boolean indicating whether the model is in training mode. Used to\ndetermine if dropout layers should be added.", "source": "juraj-google-style"}
{"code": "def price(self, market: pmd.ProcessedMarketData, name: Optional[str]=None):\n    name = name or self._name + '_price'\n    with tf.name_scope(name):\n        pay_cf = self._pay_leg.price(market)\n        receive_cf = self._receive_leg.price(market)\n        return receive_cf - pay_cf", "docstring": "Returns the present value of the stream on the valuation date.\n\nArgs:\nmarket: An instance of `ProcessedMarketData`.\nname: Python str. The name to give to the ops created by this function.\nDefault value: `None` which maps to 'price'.\n\nReturns:\nA `Tensor` of shape `batch_shape`  containing the modeled price of each\nIRS contract based on the input market data.", "source": "github-repos"}
{"code": "def get_variable_value_for_variation(self, variable, variation):\n    \n\n    if not variable or not variation:\n      return None\n\n    if variation.id not in self.variation_variable_usage_map:\n      self.logger.error('Variation with ID \"%s\" is not in the datafile.' % variation.id)\n      return None\n\n    \n    variable_usages = self.variation_variable_usage_map[variation.id]\n\n    \n    variable_usage = None\n    if variable_usages:\n      variable_usage = variable_usages.get(variable.id)\n\n    if variable_usage:\n      variable_value = variable_usage.value\n      self.logger.info('Value for variable \"%s\" for variation \"%s\" is \"%s\".' % (\n        variable.key,\n        variation.key,\n        variable_value\n      ))\n\n    else:\n      variable_value = variable.defaultValue\n      self.logger.info('Variable \"%s\" is not used in variation \"%s\". Assigning default value \"%s\".' % (\n        variable.key,\n        variation.key,\n        variable_value\n      ))\n\n    return variable_value", "docstring": "Get the variable value for the given variation.\n\nArgs:\nvariable: The Variable for which we are getting the value.\nvariation: The Variation for which we are getting the variable value.\n\nReturns:\nThe variable value or None if any of the inputs are invalid.", "source": "juraj-google-style"}
{"code": "def _GetPlistRootKey(self, file_entry):\n    \n    file_object = file_entry.GetFileObject()\n\n    try:\n      plist_file = plist.PlistFile()\n      plist_file.Read(file_object)\n\n    except IOError as exception:\n      location = getattr(file_entry.path_spec, 'location', '')\n      raise errors.PreProcessFail(\n          'Unable to read plist file: {0:s} with error: {1!s}'.format(\n              location, exception))\n\n    finally:\n      file_object.close()\n\n    return plist_file.root_key", "docstring": "Retrieves the root key of a plist file.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry of the plist.\n\nReturns:\ndict[str, object]: plist root key.\n\nRaises:\nerrors.PreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def load_variant(self, variant_obj):\n    try:\n        result = self.variant_collection.insert_one(variant_obj)\n    except DuplicateKeyError as err:\n        raise IntegrityError('Variant %s already exists in database', variant_obj['_id'])\n    return result", "docstring": "Load a variant object\n\nArgs:\nvariant_obj(dict)\n\nReturns:\ninserted_id", "source": "codesearchnet"}
{"code": "def decompose(P):\n    P = P.copy()\n    if (not P):\n        return P\n    out = [Poly({key: P.A[key]}) for key in P.keys]\n    return Poly(out, None, None, None)", "docstring": "Decompose a polynomial to component form.\n\nIn array missing values are padded with 0 to make decomposition compatible\nwith ``chaospy.sum(Q, 0)``.\n\nArgs:\nP (Poly) : Input data.\n\nReturns:\n(Poly) : Decomposed polynomial with `P.shape==(M,)+Q.shape` where\n`M` is the number of components in `P`.\n\nExamples:\n>>> q = cp.variable()\n>>> P = cp.Poly([q**2-1, 2])\n>>> print(P)\n[q0^2-1, 2]\n>>> print(cp.decompose(P))\n[[-1, 2], [q0^2, 0]]\n>>> print(cp.sum(cp.decompose(P), 0))\n[q0^2-1, 2]", "source": "codesearchnet"}
{"code": "def forward(self, x, encoder_padding_mask, layer_head_mask, output_attentions=False):\n    residual = x\n    x, attn_weights = self.self_attn(query=x, key=x, key_padding_mask=encoder_padding_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions)\n    x = nn.functional.dropout(x, p=self.dropout, training=self.training)\n    x = residual + x\n    x = self.self_attn_layer_norm(x)\n    residual = x\n    x = self.activation_fn(self.fc1(x))\n    x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training)\n    x = self.fc2(x)\n    x = nn.functional.dropout(x, p=self.dropout, training=self.training)\n    x = residual + x\n    x = self.final_layer_norm(x)\n    return (x, attn_weights)", "docstring": "Args:\nx (`torch.Tensor`): input to the layer of shape *(seq_len, batch, embed_dim)*\nencoder_padding_mask (`torch.ByteTensor`): binary ByteTensor of shape\n*(batch, src_len)* where padding elements are indicated by `1`.\nfor t_tgt, t_src is excluded (or masked out), =0 means it is\nincluded in attention\nlayer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size\n*(config.encoder_attention_heads,)*.\n\nReturns:\nencoded output of shape *(seq_len, batch, embed_dim)*", "source": "github-repos"}
{"code": "def create_model() -> tf.keras.Model:\n    model = tf.keras.Sequential([tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10, activation='softmax')])\n    return model", "docstring": "Create model for training.\n\nCreate a simple tf.keras model for training.\n\nReturns:\nThe model to use for training.", "source": "github-repos"}
{"code": "def addUrlScheme(self, url):\n    if (not isinstance(url, str)):\n        raise TypeError('url must be a string value')\n    if (not (url in self._urlSchemes)):\n        self._urlSchemes[url] = OEmbedUrlScheme(url)", "docstring": "Add a url scheme to this endpoint. It takes a url string and create\nthe OEmbedUrlScheme object internally.\n\nArgs:\nurl: The url string that represents a url scheme to add.", "source": "codesearchnet"}
{"code": "def _get_ngrams(n, text):\n  \n  ngram_set = set()\n  text_length = len(text)\n  max_index_ngram_start = text_length - n\n  for i in range(max_index_ngram_start + 1):\n    ngram_set.add(tuple(text[i:i + n]))\n  return ngram_set", "docstring": "Calculates n-grams.\n\nArgs:\nn: which n-grams to calculate\ntext: An array of tokens\n\nReturns:\nA set of n-grams", "source": "juraj-google-style"}
{"code": "def post_headline(self, name, level, message):\n        \n\n        self.post_command(OPERATIONS.CMD_SET_HEADLINE,\n                          {'name': name, 'level': level, 'message': message})", "docstring": "Asynchronously update the sticky headline for a service.\n\nArgs:\nname (string): The name of the service\nlevel (int): A message level in states.*_LEVEL\nmessage (string): The user facing error message that will be stored\nfor the service and can be queried later.", "source": "juraj-google-style"}
{"code": "def get_layer_opt(self, lrs, wds):\n    return LayerOptimizer(self.opt_fn, self.get_layer_groups(), lrs, wds)", "docstring": "Method returns an instance of the LayerOptimizer class, which\nallows for setting differential learning rates for different\nparts of the model.\n\nAn example of how a model maybe differentiated into different parts\nfor application of differential learning rates and weight decays is\nseen in ../.../courses/dl1/fastai/conv_learner.py, using the dict\n'model_meta'. Currently, this seems supported only for convolutional\nnetworks such as VGG-19, ResNet-XX etc.\n\nArgs:\nlrs (float or list(float)): learning rate(s) for the model\n\nwds (float or list(float)): weight decay parameter(s).\n\nReturns:\nAn instance of a LayerOptimizer", "source": "codesearchnet"}
{"code": "def ExtractEvents(\n      self, parser_mediator, registry_key, codepage='cp1252', **kwargs):\n    \n    self._ParseMRUListExKey(parser_mediator, registry_key, codepage=codepage)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\ncodepage (Optional[str]): extended ASCII string codepage.", "source": "juraj-google-style"}
{"code": "def list_types_poi(self, **kwargs):\n        \n        \n        url_args = {'language': util.language_code(kwargs.get('lang'))}\n\n        \n        result = self.make_request('list_poi_types', url_args)\n\n        if not util.check_result(result):\n            return False, result.get('message', 'UNKNOWN ERROR')\n\n        \n        values = util.response_list(result, 'Data')\n        return True, [emtype.ParkingPoiType(**a) for a in values]", "docstring": "Obtain a list of families, types and categories of POI.\n\nArgs:\nlang (str): Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[ParkingPoiType]), or message\nstring in case of error.", "source": "juraj-google-style"}
{"code": "def parse_ensembl_line(line, header):\n    \n    line = line.rstrip().split('\\t')\n    header = [head.lower() for head in header]\n    raw_info = dict(zip(header, line))\n\n    ensembl_info = {}\n\n    for word in raw_info:\n        value = raw_info[word]\n\n        if not value:\n            continue\n\n        if 'chromosome' in word:\n            ensembl_info['chrom'] = value\n\n        if 'gene' in word:\n            if 'id' in word:\n                ensembl_info['ensembl_gene_id'] = value\n            elif 'start' in word:\n                ensembl_info['gene_start'] = int(value)\n            elif 'end' in word:\n                ensembl_info['gene_end'] = int(value)\n\n        if 'hgnc symbol' in word:\n            ensembl_info['hgnc_symbol'] = value\n        if \"gene name\" in word:\n            ensembl_info['hgnc_symbol'] = value\n\n        if 'hgnc id' in word:\n            ensembl_info['hgnc_id'] = int(value.split(':')[-1])\n\n        if 'transcript' in word:\n            if 'id' in word:\n                ensembl_info['ensembl_transcript_id'] = value\n            elif 'start' in word:\n                ensembl_info['transcript_start'] = int(value)\n            elif 'end' in word:\n                ensembl_info['transcript_end'] = int(value)\n\n        if 'exon' in word:\n            if 'start' in word:\n                ensembl_info['exon_start'] = int(value)\n            elif 'end' in word:\n                ensembl_info['exon_end'] = int(value)\n            elif 'rank' in word:\n                ensembl_info['exon_rank'] = int(value)\n\n        if 'utr' in word:\n\n            if 'start' in word:\n                if '5' in word:\n                    ensembl_info['utr_5_start'] = int(value)\n                elif '3' in word:\n                    ensembl_info['utr_3_start'] = int(value)\n            elif 'end' in word:\n                if '5' in word:\n                    ensembl_info['utr_5_end'] = int(value)\n                elif '3' in word:\n                    ensembl_info['utr_3_end'] = int(value)\n\n        if 'strand' in word:\n            ensembl_info['strand'] = int(value)\n\n        if 'refseq' in word:\n            if 'mrna' in word:\n                if 'predicted' in word:\n                    ensembl_info['refseq_mrna_predicted'] = value\n                else:\n                    ensembl_info['refseq_mrna'] = value\n\n            if 'ncrna' in word:\n                ensembl_info['refseq_ncrna'] = value\n\n    return ensembl_info", "docstring": "Parse an ensembl formated line\n\nArgs:\nline(list): A list with ensembl gene info\nheader(list): A list with the header info\n\nReturns:\nensembl_info(dict): A dictionary with the relevant info", "source": "juraj-google-style"}
{"code": "def changed(self, path_info, checksum_info):\n    logger.debug(\"checking if '{}'('{}') has changed.\".format(path_info, checksum_info))\n    if (not self.exists(path_info)):\n        logger.debug(\"'{}' doesn't exist.\".format(path_info))\n        return True\n    checksum = checksum_info.get(self.PARAM_CHECKSUM)\n    if (checksum is None):\n        logger.debug(\"checksum for '{}' is missing.\".format(path_info))\n        return True\n    if self.changed_cache(checksum):\n        logger.debug(\"cache for '{}'('{}') has changed.\".format(path_info, checksum))\n        return True\n    actual = self.save_info(path_info)[self.PARAM_CHECKSUM]\n    if (checksum != actual):\n        logger.debug(\"checksum '{}'(actual '{}') for '{}' has changed.\".format(checksum, actual, path_info))\n        return True\n    logger.debug(\"'{}' hasn't changed.\".format(path_info))\n    return False", "docstring": "Checks if data has changed.\n\nA file is considered changed if:\n- It doesn't exist on the working directory (was unlinked)\n- Checksum is not computed (saving a new file)\n- The checkusm stored in the State is different from the given one\n- There's no file in the cache\n\nArgs:\npath_info: dict with path information.\nchecksum: expected checksum for this data.\n\nReturns:\nbool: True if data has changed, False otherwise.", "source": "codesearchnet"}
{"code": "def update_pipeline_field(self, pipeline_key, field):\n\t\t\n\t\turi = '/'.join([\n\t\t\t\t\t\tself.api_uri,\n\t\t\t\t\t\tself.pipelines_suffix,\n\t\t\t\t\t\tpipeline_key,\n\t\t\t\t\t\tself.fields_suffix\n\t\t\t\t\t\t])\n\t\treturn self._update_field(uri, field)", "docstring": "Upates pipeline field as specified\nArgs:\npipeline_key\t\tkey for pipeline where the fields lives\nfield \t\t\t\tStreakField object with fresh data\nreturns\t\t\t\t(status code, updated field dict)", "source": "juraj-google-style"}
{"code": "def validate_addr(self, address, id=None, endpoint=None):\n    return self._call_endpoint(VALIDATE_ADDR, params=[address], id=id, endpoint=endpoint)", "docstring": "returns whether or not addr string is valid\n\nArgs:\naddress: (str) address to lookup ( in format 'AXjaFSP23Jkbe6Pk9pPGT6NBDs1HVdqaXK')\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\n\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "codesearchnet"}
{"code": "def _process_name_or_alias_filter_directive(filter_operation_info, location, context, parameters):\n    filtered_field_type = filter_operation_info.field_type\n    if isinstance(filtered_field_type, GraphQLUnionType):\n        raise GraphQLCompilationError(u'Cannot apply \"name_or_alias\" to union type {}'.format(filtered_field_type))\n    current_type_fields = filtered_field_type.fields\n    name_field = current_type_fields.get('name', None)\n    alias_field = current_type_fields.get('alias', None)\n    if ((not name_field) or (not alias_field)):\n        raise GraphQLCompilationError(u'Cannot apply \"name_or_alias\" to type {} because it lacks a \"name\" or \"alias\" field.'.format(filtered_field_type))\n    name_field_type = strip_non_null_from_type(name_field.type)\n    alias_field_type = strip_non_null_from_type(alias_field.type)\n    if (not isinstance(name_field_type, GraphQLScalarType)):\n        raise GraphQLCompilationError(u'Cannot apply \"name_or_alias\" to type {} because its \"name\" field is not a scalar.'.format(filtered_field_type))\n    if (not isinstance(alias_field_type, GraphQLList)):\n        raise GraphQLCompilationError(u'Cannot apply \"name_or_alias\" to type {} because its \"alias\" field is not a list.'.format(filtered_field_type))\n    alias_field_inner_type = strip_non_null_from_type(alias_field_type.of_type)\n    if (alias_field_inner_type != name_field_type):\n        raise GraphQLCompilationError(u'Cannot apply \"name_or_alias\" to type {} because the \"name\" field and the inner type of the \"alias\" field do not match: {} vs {}'.format(filtered_field_type, name_field_type, alias_field_inner_type))\n    argument_inferred_type = name_field_type\n    (argument_expression, non_existence_expression) = _represent_argument(location, context, parameters[0], argument_inferred_type)\n    check_against_name = expressions.BinaryComposition(u'=', expressions.LocalField('name'), argument_expression)\n    check_against_alias = expressions.BinaryComposition(u'contains', expressions.LocalField('alias'), argument_expression)\n    filter_predicate = expressions.BinaryComposition(u'||', check_against_name, check_against_alias)\n    if (non_existence_expression is not None):\n        filter_predicate = expressions.BinaryComposition(u'||', non_existence_expression, filter_predicate)\n    return blocks.Filter(filter_predicate)", "docstring": "Return a Filter basic block that checks for a match against an Entity's name or alias.\n\nArgs:\nfilter_operation_info: FilterOperationInfo object, containing the directive and field info\nof the field where the filter is to be applied.\nlocation: Location where this filter is used.\ncontext: dict, various per-compilation data (e.g. declared tags, whether the current block\nis optional, etc.). May be mutated in-place in this function!\nparameters: list of 1 element, containing the value to check the name or alias against;\nif the parameter is optional and missing, the check will return True\n\nReturns:\na Filter basic block that performs the check against the name or alias", "source": "codesearchnet"}
{"code": "async def tag(self, name: str, repo: str, *, tag: str = None) -> bool:\n        \n        params = {\"repo\": repo}\n\n        if tag:\n            params[\"tag\"] = tag\n\n        await self.docker._query(\n            \"images/{name}/tag\".format(name=name),\n            \"POST\",\n            params=params,\n            headers={\"content-type\": \"application/json\"},\n        )\n        return True", "docstring": "Tag the given image so that it becomes part of a repository.\n\nArgs:\nrepo: the repository to tag in\ntag: the name for the new tag", "source": "juraj-google-style"}
{"code": "def get_meta_graph_def(saved_model_dir, tag_set):\n    saved_model = read_saved_model(saved_model_dir)\n    set_of_tags = set([tag for tag in tag_set.split(',') if tag])\n    valid_tags = []\n    for meta_graph_def in saved_model.meta_graphs:\n        meta_graph_tags = set(meta_graph_def.meta_info_def.tags)\n        if meta_graph_tags == set_of_tags:\n            return meta_graph_def\n        else:\n            valid_tags.append(','.join(meta_graph_tags))\n    raise RuntimeError(f'MetaGraphDef associated with tag-set {tag_set} could not be found in the SavedModel. Please use one of the following tag-sets: {valid_tags}')", "docstring": "Gets MetaGraphDef from SavedModel.\n\nReturns the MetaGraphDef for the given tag-set and SavedModel directory.\n\nArgs:\nsaved_model_dir: Directory containing the SavedModel to inspect.\ntag_set: Group of tag(s) of the MetaGraphDef to load, in string format,\nseparated by ','. The empty string tag is ignored so that passing ''\nmeans the empty tag set. For tag-set contains multiple tags, all tags\nmust be passed in.\n\nRaises:\nRuntimeError: An error when the given tag-set does not exist in the\nSavedModel.\n\nReturns:\nA MetaGraphDef corresponding to the tag-set.", "source": "github-repos"}
{"code": "def check_models_are_tested(module: types.ModuleType, test_file: str) -> List[str]:\n    defined_models = get_models(module)\n    tested_models = find_tested_models(test_file)\n    if tested_models is None:\n        if test_file.replace(os.path.sep, '/') in TEST_FILES_WITH_NO_COMMON_TESTS:\n            return\n        return [f'{test_file} should define `all_model_classes` to apply common tests to the models it tests. ' + 'If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file ' + '`utils/check_repo.py`.']\n    failures = []\n    for model_name, _ in defined_models:\n        if model_name not in tested_models and should_be_tested(model_name):\n            failures.append(f'{model_name} is defined in {module.__name__} but is not tested in ' + f'{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file.' + 'If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`' + 'in the file `utils/check_repo.py`.')\n    return failures", "docstring": "Check models defined in a module are all tested in a given file.\n\nArgs:\nmodule (`types.ModuleType`): The module in which we get the models.\ntest_file (`str`): The path to the file where the module is tested.\n\nReturns:\n`List[str]`: The list of error messages corresponding to models not tested.", "source": "github-repos"}
{"code": "def predict(self, version_name, data):\n    full_version_name = ('%s/versions/%s' % (self._full_model_name, version_name))\n    request = self._api.projects().predict(body={'instances': data}, name=full_version_name)\n    request.headers['user-agent'] = 'GoogleCloudDataLab/1.0'\n    result = request.execute()\n    if ('predictions' not in result):\n        raise Exception('Invalid response from service. Cannot find \"predictions\" in response.')\n    return result['predictions']", "docstring": "Get prediction results from features instances.\n\nArgs:\nversion_name: the name of the version used for prediction.\ndata: typically a list of instance to be submitted for prediction. The format of the\ninstance depends on the model. For example, structured data model may require\na csv line for each instance.\nNote that online prediction only works on models that take one placeholder value,\nsuch as a string encoding a csv line.\nReturns:\nA list of prediction results for given instances. Each element is a dictionary representing\noutput mapping from the graph.\nAn example:\n[{\"predictions\": 1, \"score\": [0.00078, 0.71406, 0.28515]},\n{\"predictions\": 1, \"score\": [0.00244, 0.99634, 0.00121]}]", "source": "codesearchnet"}
{"code": "def forward(self, g_values: torch.Tensor, mask: torch.Tensor, labels: Optional[torch.Tensor]=None, loss_batch_weight=1, return_dict=False) -> BayesianWatermarkDetectorModelOutput:\n    likelihoods_watermarked = self.likelihood_model_watermarked(g_values)\n    likelihoods_unwatermarked = 0.5 * torch.ones_like(g_values)\n    out = self._compute_posterior(likelihoods_watermarked=likelihoods_watermarked, likelihoods_unwatermarked=likelihoods_unwatermarked, mask=mask, prior=self.prior)\n    loss = None\n    if labels is not None:\n        loss_fct = BCELoss()\n        loss_unwweight = torch.sum(self.likelihood_model_watermarked.delta ** 2)\n        loss_weight = loss_unwweight * loss_batch_weight\n        loss = loss_fct(torch.clamp(out, 1e-05, 1 - 1e-05), labels) + loss_weight\n    if not return_dict:\n        return (out,) if loss is None else (out, loss)\n    return BayesianWatermarkDetectorModelOutput(loss=loss, posterior_probabilities=out)", "docstring": "Computes the watermarked posterior P(watermarked|g_values).\n\nArgs:\ng_values (`torch.Tensor` of shape `(batch_size, seq_len, watermarking_depth, ...)`):\ng-values (with values 0 or 1)\nmask:\nA binary array shape [batch_size, seq_len] indicating which g-values should be used. g-values with mask\nvalue 0 are discarded.\n\nReturns:\np(watermarked | g_values), of shape [batch_size].", "source": "github-repos"}
{"code": "def _prepare_babi_data(tmp_dir, data_dir):\n  \n  if not tf.gfile.Exists(data_dir):\n    tf.gfile.MakeDirs(data_dir)\n\n  file_path = os.path.join(tmp_dir, _TAR)\n  headers = {\"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) \"\n                           \"AppleWebKit/537.36 (KHTML, like Gecko) \"\n                           \"Chrome/63.0.3239.132 Safari/537.36\"}\n  resp = requests.get(_URL, headers=headers)\n  with open(file_path, \"wb\") as f:\n    f.write(resp.content)\n\n  tar = tarfile.open(file_path)\n  tar.extractall(tmp_dir)\n  tar.close()\n\n  return tmp_dir", "docstring": "Downloads and extracts the dataset.\n\nArgs:\ntmp_dir: temp directory to download and extract the dataset\ndata_dir: The base directory where data and vocab files are stored.\n\nReturns:\ntmp_dir: temp directory containing the raw data.", "source": "juraj-google-style"}
{"code": "def extractUnits(self, inp):\n        \n        inp = self._preprocess(inp)\n\n        units = []\n        description = \"\"\n        for w in inp.split(' '):\n            if self.isValidUnit(w) or w == '/':\n                if description:\n                    description += \" \"\n                description += w\n            else:\n                if description:\n                    units.append(description)\n                description = \"\"\n\n        if description:\n            units.append(description)\n        return units", "docstring": "Collects all the valid units from an inp string. Works by\nappending consecutive words from the string and cross-referncing\nthem with a set of valid units.\n\nArgs:\ninp (str): Some text which hopefully contains descriptions\nof different units.\n\nReturns:\nA list of strings, each entry in which is a valid quantities\nunit.", "source": "juraj-google-style"}
{"code": "def get_servo_position(self):\n        \n        \n\n        data = []\n        data.append(0x09)\n        data.append(self.servoid)\n        data.append(RAM_READ_REQ)\n        data.append(CALIBRATED_POSITION_RAM)\n        data.append(BYTE2)\n        send_data(data)\n        rxdata = []\n        try:\n            rxdata = SERPORT.read(13)\n            if (self.servomodel==0x06) or (self.servomodel == 0x04):\n                return ((ord(rxdata[10])&0xff)<<8) | (ord(rxdata[9])&0xFF)\n            else:\n                \n                return ((ord(rxdata[10])&0x03)<<8) | (ord(rxdata[9])&0xFF)\n\n        except HerkulexError:\n            print \"Could not read from the servos. Check connection\"", "docstring": "Gets the current position of Herkulex\n\nArgs:\nnone\n\nReturns:\nint: position of the servo- 0 to 1023\n\nRaises:\nSerialException: Error occured while opening serial port", "source": "juraj-google-style"}
{"code": "def broadcast_recv(shape, dtype, group_size, group_key, instance_key, communication_hint='auto', timeout=0):\n    if group_size <= 1:\n        raise ValueError(f'Parameter `group_size` to broadcast_send must be at least 2. Received: {group_size}.')\n    return gen_collective_ops.collective_bcast_recv(shape=shape, T=dtype, group_size=group_size, group_key=group_key, instance_key=instance_key, communication_hint=communication_hint.lower(), timeout_seconds=timeout)", "docstring": "Receives a broadcasts tensor, across devices.\n\nArgs:\nshape: Shape of the tensor to be received.\ndtype: Type of the tensor to be received.\ngroup_size: one plus the number of receiving tensors, i.e. the total\nnumber of devices participating.  Each tensor must reside on a\ndifferent device.\ngroup_key: an integer identifying the group of devices.\ninstance_key: an integer identifying the participating group of Ops.\ncommunication_hint: preferred collective communication.  The implementation\nmay fall back to another mechanism.  Options include `auto`, `ring`, and\n`nccl`.\ntimeout: If set to a non zero, set a completion timeout to detect staleness.\nIf the timer goes off, a DeadlineExceededError is raised.\nThe timeout value in seconds. This feature is experimental.\n\nReturns:\nAn Op implementing the broadcast receive.\n\nRaises:\nValueError: if any of the input parameter constraints are not met.", "source": "github-repos"}
{"code": "def make_full_document(text, title=None, preamp_decl={}, preamb_extra=None):\n    import utool as ut\n    doc_preamb = ut.codeblock('\\n    %\\\\documentclass{article}\\n    \\\\documentclass[10pt,twocolumn,letterpaper]{article}\\n    % \\\\usepackage[utf8]{inputenc}\\n    \\\\usepackage[T1]{fontenc}\\n\\n    \\\\usepackage{times}\\n    \\\\usepackage{epsfig}\\n    \\\\usepackage{graphicx}\\n    \\\\usepackage{amsmath,amsthm,amssymb}\\n    \\\\usepackage[usenames,dvipsnames,svgnames,table]{xcolor}\\n    \\\\usepackage{multirow}\\n    \\\\usepackage{subcaption}\\n    \\\\usepackage{booktabs}\\n\\n    %\\\\pagenumbering{gobble}\\n    ')\n    if (preamb_extra is not None):\n        if isinstance(preamb_extra, (list, tuple)):\n            preamb_extra = '\\n'.join(preamb_extra)\n        doc_preamb += (('\\n' + preamb_extra) + '\\n')\n    if (title is not None):\n        preamp_decl['title'] = title\n    decl_lines = ['\\\\{key}{{{val}}}'.format(key=key, val=val) for (key, val) in preamp_decl.items()]\n    doc_decllines = '\\n'.join(decl_lines)\n    doc_header = ut.codeblock('\\n        \\\\begin{document}\\n        ')\n    if (preamp_decl.get('title') is not None):\n        doc_header += '\\\\maketitle'\n    doc_footer = ut.codeblock('\\n        \\\\end{document}\\n        ')\n    text_ = '\\n'.join((doc_preamb, doc_decllines, doc_header, text, doc_footer))\n    return text_", "docstring": "r\"\"\"\ndummy preamble and document to wrap around latex fragment\n\nArgs:\ntext (str):\ntitle (str):\n\nReturns:\nstr: text_\n\nCommandLine:\npython -m utool.util_latex --test-make_full_document\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_latex import *  # NOQA\n>>> text = 'foo'\n>>> title = 'title'\n>>> preamp_decl = {}\n>>> text_ = make_full_document(text, title)\n>>> result = str(text_)\n>>> print(result)", "source": "codesearchnet"}
{"code": "def define_batch_env(constructor, num_agents, env_processes):\n    with tf.variable_scope('environments'):\n        if env_processes:\n            envs = [tools.wrappers.ExternalProcess(constructor) for _ in range(num_agents)]\n        else:\n            envs = [constructor() for _ in range(num_agents)]\n        batch_env = tools.BatchEnv(envs, blocking=(not env_processes))\n        batch_env = tools.InGraphBatchEnv(batch_env)\n    return batch_env", "docstring": "Create environments and apply all desired wrappers.\n\nArgs:\nconstructor: Constructor of an OpenAI gym environment.\nnum_agents: Number of environments to combine in the batch.\nenv_processes: Whether to step environment in external processes.\n\nReturns:\nIn-graph environments object.", "source": "codesearchnet"}
{"code": "def _load_methods(package):\n    \n    global _methods\n    _methods[package] = None\n    \n    from acorn.config import settings\n    from acorn.logging.descriptors import _obj_getattr\n    spack = settings(package)\n    if spack is not None:\n        if spack.has_section(\"analysis.methods\"):\n            _methods[package] = {}\n            \n            from importlib import import_module\n            mappings = dict(spack.items(\"analysis.methods\"))\n            for fqdn, target in mappings.items():\n                rootname = target.split('.')[0]\n                root = import_module(rootname)\n                caller = _obj_getattr(root, target)\n                _methods[package][fqdn] = caller", "docstring": "Loads the mappings from method call result to analysis.\n\nArgs:\npackage (str): name of the package to load for.", "source": "juraj-google-style"}
{"code": "def _binary_2d_label_to_1d_sparse_value(labels):\n    indices = []\n    values = []\n    batch = 0\n    for row in labels:\n        label = 0\n        xi = 0\n        for x in row:\n            if x == 1:\n                indices.append([batch])\n                values.append(label)\n                xi += 1\n            else:\n                assert x == 0\n            label += 1\n        batch += 1\n    if indices != [[i] for i in range(len(labels))]:\n        raise ValueError('Expected 1 label/example, got %s.' % indices)\n    shape = [len(labels)]\n    return sparse_tensor.SparseTensorValue(np.array(indices, np.int64), np.array(values, np.int64), np.array(shape, np.int64))", "docstring": "Convert dense 2D binary indicator to sparse ID.\n\nOnly 1 values in `labels` are included in result.\n\nArgs:\nlabels: Dense 2D binary indicator, shape [batch_size, num_classes]. Each\nrow must contain exactly 1 `1` value.\n\nReturns:\n`SparseTensorValue` of shape [batch_size]. Values are indices of `1` values\nalong the last dimension of `labels`.\n\nRaises:\nValueError: if there is not exactly 1 `1` value per row of `labels`.", "source": "github-repos"}
{"code": "def allreduce_ring(xs, devices, reduction_fn_string=\"SUM\"):\n  \n  n = len(xs)\n  if len(devices) != n:\n    raise ValueError(\"devices must be a list of length len(xs)\")\n  if n == 1:\n    return xs\n  shape = xs[0].shape.as_list()\n  \n  size = None if None in shape else mtf.list_product(shape)\n  if size is None or size < 1024 or size % n != 0:\n    return allreduce_ring_single_shard(xs, devices, reduction_fn_string)\n\n  def _circular_shift(l, n):\n    n %= len(l)\n    return l[-n:] + l[:-n]\n  def _flatten_and_split(x):\n    \n    return tf.split(tf.reshape(x, [-1]), n)\n  def _concat_and_reshape(xs):\n    return tf.reshape(tf.concat(xs, 0), shape)\n\n  \n  x_split = mtf.parallel(devices, _flatten_and_split, xs)\n  x_split_t = mtf.transpose_list_of_lists(x_split)\n\n  y_split_t = []\n  for shard in xrange(n):\n    shard_xs = _circular_shift(x_split_t[shard], shard)\n    shard_devices = _circular_shift(devices, shard)\n    shard_ys = allreduce_ring_single_shard(\n        shard_xs, shard_devices, reduction_fn_string)\n    y_split_t.append(_circular_shift(shard_ys, -shard))\n  y_split = mtf.transpose_list_of_lists(y_split_t)\n  ys = mtf.parallel(devices, _concat_and_reshape, y_split)\n  return ys", "docstring": "Compute the reduction of all Tensors and put the result everywhere.\n\nPerformance-optimized for a ring of devices.\n\nArgs:\nxs: a list of n tf.Tensors\ndevices: a list of strings\nreduction_fn_string: \"SUM\" or \"MAX\"\n\nReturns:\na list of n Tensors\nRaises:\nValueError: if devices is not a list of n strings", "source": "juraj-google-style"}
{"code": "def to_view(self, view_name):\n    \n    \n    from . import _view\n    return _view.View(view_name, self._context).create(self._sql)", "docstring": "Create a View from this Query.\n\nArgs:\nview_name: the name of the View either as a string or a 3-part tuple\n(projectid, datasetid, name).\n\nReturns:\nA View for the Query.", "source": "juraj-google-style"}
{"code": "def __init__(self, type, data):\n        \n        \n        if not isinstance(type, int):\n            raise TypeError(\"ext type is not type integer\")\n        \n        elif sys.version_info[0] == 3 and not isinstance(data, bytes):\n            raise TypeError(\"ext data is not type \\'bytes\\'\")\n        elif sys.version_info[0] == 2 and not isinstance(data, str):\n            raise TypeError(\"ext data is not type \\'str\\'\")\n        self.type = type\n        self.data = data", "docstring": "Construct a new Ext object.\n\nArgs:\ntype: application-defined type integer\ndata: application-defined data byte array\n\nExample:\n>>> foo = umsgpack.Ext(0x05, b\"\\x01\\x02\\x03\")\n>>> umsgpack.packb({u\"special stuff\": foo, u\"awesome\": True})\n'\\x82\\xa7awesome\\xc3\\xadspecial stuff\\xc7\\x03\\x05\\x01\\x02\\x03'\n>>> bar = umsgpack.unpackb(_)\n>>> print(bar[\"special stuff\"])\nExt Object (Type: 0x05, Data: 01 02 03)\n>>>", "source": "juraj-google-style"}
{"code": "def __init__(\n            self, summary=AverageSummary, alpha=1,\n            credibility=WeightedCredibility, reviewer=Reviewer, product=Product):\n        \n        self.alpha = alpha\n        self.graph = nx.DiGraph()\n        self.reviewers = []\n        self.products = []\n\n        self._summary_cls = summary\n        self._review_cls = summary.review_class()\n\n        self.credibility = credibility(self)\n        self._reviewer_cls = reviewer\n        self._product_cls = product", "docstring": "Construct bipartite graph.\n\nArgs:\nsummary_type: specify summary type class, default value is AverageSummary.\nalpha: used to compute weight of anomalous scores, default value is 1.\ncredibility: credibility class to be used in this graph.\n(Default: WeightedCredibility)\nreviewer: Class of reviewers.\nproduct: Class of products.", "source": "juraj-google-style"}
{"code": "def take_grad(self, num_required, name=None):\n    return gen_data_flow_ops.sparse_accumulator_take_gradient(self._accumulator_ref, num_required, dtype=self._dtype, name=name)", "docstring": "Attempts to extract the average gradient from the accumulator.\n\nThe operation blocks until sufficient number of gradients have been\nsuccessfully applied to the accumulator.\n\nOnce successful, the following actions are also triggered:\n- Counter of accumulated gradients is reset to 0.\n- Aggregated gradient is reset to 0 tensor.\n- Accumulator's internal time step is incremented by 1.\n\nArgs:\nnum_required: Number of gradients that needs to have been aggregated\nname: Optional name for the operation\n\nReturns:\nA tuple of indices, values, and shape representing the average gradient.\n\nRaises:\nInvalidArgumentError: If `num_required` < 1", "source": "github-repos"}
{"code": "def set_keras_mask(x, mask):\n    set_tensor_attr(x, '_keras_mask', mask)", "docstring": "Sets the Keras mask attribute for the given tensor in-place.\n\nArgs:\nx: Input tensor.\nmask: The mask tensor to be set. If `None`, the `_keras_mask` attribute\nwill be cleared.", "source": "github-repos"}
{"code": "def _CreateCommentsFromPrefix(comment_prefix, comment_lineno, comment_column, standalone=False):\n    comments = []\n    lines = comment_prefix.split('\\n')\n    index = 0\n    while index < len(lines):\n        comment_block = []\n        while index < len(lines) and lines[index].lstrip().startswith('\n            comment_block.append(lines[index].strip())\n            index += 1\n        if comment_block:\n            new_lineno = comment_lineno + index - 1\n            comment_block[0] = comment_block[0].strip()\n            comment_block[-1] = comment_block[-1].strip()\n            comment_leaf = pytree.Leaf(type=token.COMMENT, value='\\n'.join(comment_block), context=('', (new_lineno, comment_column)))\n            comment_node = comment_leaf if not standalone else pytree.Node(pygram.python_symbols.simple_stmt, [comment_leaf])\n            comments.append(comment_node)\n        while index < len(lines) and (not lines[index].lstrip()):\n            index += 1\n    return comments", "docstring": "Create pytree nodes to represent the given comment prefix.\n\nArgs:\ncomment_prefix: (unicode) the text of the comment from the node's prefix.\ncomment_lineno: (int) the line number for the start of the comment.\ncomment_column: (int) the column for the start of the comment.\nstandalone: (bool) determines if the comment is standalone or not.\n\nReturns:\nThe simple_stmt nodes if this is a standalone comment, otherwise a list of\nnew COMMENT leafs. The prefix may consist of multiple comment blocks,\nseparated by blank lines. Each block gets its own leaf.", "source": "github-repos"}
{"code": "def set_symbols(self, symbols, functional=None, sym_potcar_map=None):\n    del self[:]\n    if sym_potcar_map:\n        for el in symbols:\n            self.append(PotcarSingle(sym_potcar_map[el]))\n    else:\n        for el in symbols:\n            p = PotcarSingle.from_symbol_and_functional(el, functional)\n            self.append(p)", "docstring": "Initialize the POTCAR from a set of symbols. Currently, the POTCARs can\nbe fetched from a location specified in .pmgrc.yaml. Use pmg config\nto add this setting.\n\nArgs:\nsymbols ([str]): A list of element symbols\nfunctional (str): The functional to use. If None, the setting\nPMG_DEFAULT_FUNCTIONAL in .pmgrc.yaml is used, or if this is\nnot set, it will default to PBE.\nsym_potcar_map (dict): A map of symbol:raw POTCAR string. If\nsym_potcar_map is specified, POTCARs will be generated from\nthe given map data rather than the config file location.", "source": "codesearchnet"}
{"code": "def from_schema(cls, schema: class_schema.Schema, module_name: str, name: str, qualname: Optional[str]=None, is_method: bool=True) -> 'Signature':\n    arg_names = list(schema.metadata.get('init_arg_list', []))\n    if arg_names and arg_names[-1].startswith('*'):\n        vararg_name = arg_names[-1][1:]\n        arg_names.pop(-1)\n    else:\n        vararg_name = None\n\n    def get_arg_spec(arg_name):\n        field = schema.get_field(arg_name)\n        if not field:\n            raise ValueError(f'Argument {arg_name!r} is not a symbolic field.')\n        return field.value\n    args = []\n    if is_method:\n        args.append(Argument.from_annotation('self', Argument.Kind.POSITIONAL_OR_KEYWORD))\n    args.extend([Argument(n, Argument.Kind.POSITIONAL_OR_KEYWORD, get_arg_spec(n)) for n in arg_names])\n    varargs = None\n    if vararg_name:\n        varargs = Argument(vararg_name, Argument.Kind.VAR_POSITIONAL, get_arg_spec(vararg_name))\n    existing_names = set(arg_names)\n    if vararg_name:\n        existing_names.add(vararg_name)\n    kwonlyargs = []\n    varkw = None\n    for key, field in schema.fields.items():\n        if key not in existing_names and (not field.frozen):\n            if key.is_const:\n                kwonlyargs.append(Argument(str(key), Argument.Kind.KEYWORD_ONLY, field.value))\n            else:\n                varkw = Argument(schema.metadata.get('varkw_name', None) or 'kwargs', Argument.Kind.VAR_KEYWORD, class_schema.ValueSpec.DictType(field.value))\n    return Signature(callable_type=CallableType.FUNCTION, name=name, module_name=module_name, qualname=qualname, description=schema.description, args=args, kwonlyargs=kwonlyargs, varargs=varargs, varkw=varkw, return_value=schema.metadata.get('returns', None))", "docstring": "Creates a signature from a schema object.\n\nArgs:\nschema: A `pg.typing.Schema` object associated with a `pg.Object`.\nmodule_name: Module name for the signature.\nname: Function or method name of the signature.\nqualname: Qualname of the signature.\nis_method: If True, `self` will be added in the signature as the first\nargument.\n\nReturns:\nA signature object from the schema.", "source": "github-repos"}
{"code": "def index(self, text, terms=None, **kwargs):\n\n        \n\n        self.clear()\n\n        \n        terms = terms or text.terms.keys()\n\n        pairs = combinations(terms, 2)\n        count = comb(len(terms), 2)\n\n        for t1, t2 in bar(pairs, expected_size=count, every=1000):\n\n            \n            score = text.score_braycurtis(t1, t2, **kwargs)\n            self.set_pair(t1, t2, score)", "docstring": "Index all term pair distances.\n\nArgs:\ntext (Text): The source text.\nterms (list): Terms to index.", "source": "juraj-google-style"}
{"code": "def assert_call(self, expected, left, right):\n    name_map = {left: 'left', right: 'right'}\n    node, result = self._is_instance.call(self._node, None, function.Args((left, right), self.new_dict(), None, None))\n    self.assertIn(node, self._node.outgoing)\n    result_map = {}\n    for b in result.bindings:\n        terms = set()\n        for o in b.origins:\n            self.assertEqual(node, o.where)\n            for sources in o.source_sets:\n                terms.add(' '.join(sorted(('%s:%d' % (name_map[b.variable], b.variable.bindings.index(b)) for b in sources))))\n        result_map[b.data] = terms\n    self.assertEqual(expected, result_map)", "docstring": "Check that call() returned the desired results.\n\nArgs:\nexpected: A dict from values to source sets, where a source set is\nrepresented by the sorted binding names separated by spaces, for example\n\"left:0 right:1\" would indicate binding #0 of variable \"left\" and\nbinding #1 of variable \"right\".\nleft: A Variable to use as the first arg to call().\nright: A Variable to use as the second arg to call().", "source": "github-repos"}
{"code": "def upper_bound(fm, nr_subs=None, scale_factor=1):\n    nr_subs_total = len(np.unique(fm.SUBJECTINDEX))\n    if (not nr_subs):\n        nr_subs = (nr_subs_total - 1)\n    assert (nr_subs < nr_subs_total)\n    intersub_scores = []\n    for measure in range(len(measures.scores)):\n        res_dict = {}\n        result_vectors = [(np.empty(nr_subs_total) + np.nan) for _ in np.unique(fm.category)]\n        res_dict.update(list(zip(np.unique(fm.category), result_vectors)))\n        intersub_scores.append(res_dict)\n    for fm_cat in fm.by_field('category'):\n        cat = fm_cat.category[0]\n        for (sub_counter, sub) in enumerate(np.unique(fm_cat.SUBJECTINDEX)):\n            image_scores = []\n            for fm_single in fm_cat.by_field('filenumber'):\n                predicting_subs = np.setdiff1d(np.unique(fm_single.SUBJECTINDEX), [sub])\n                np.random.shuffle(predicting_subs)\n                predicting_subs = predicting_subs[0:nr_subs]\n                predicting_fm = fm_single[ismember(fm_single.SUBJECTINDEX, predicting_subs)]\n                predicted_fm = fm_single[(fm_single.SUBJECTINDEX == sub)]\n                try:\n                    predicting_fdm = compute_fdm(predicting_fm, scale_factor=scale_factor)\n                except RuntimeError:\n                    predicting_fdm = None\n                image_scores.append(measures.prediction_scores(predicting_fdm, predicted_fm))\n            for (measure, score) in enumerate(nanmean(image_scores, 0)):\n                intersub_scores[measure][cat][sub_counter] = score\n    return intersub_scores", "docstring": "compute the inter-subject consistency upper bound for a fixmat.\n\nInput:\nfm : a fixmat instance\nnr_subs : the number of subjects used for the prediction. Defaults\nto the total number of subjects in the fixmat minus 1\nscale_factor : the scale factor of the FDMs. Default is 1.\nReturns:\nA list of scores; the list contains one dictionary for each measure.\nEach dictionary contains one key for each category and corresponding\nvalues is an array with scores for each subject.", "source": "codesearchnet"}
{"code": "def parse_line(self, line):\n    toks = shlex.split(line)\n    return (toks[0], ([] if (len(toks) == 1) else toks[1:]))", "docstring": "Parse a line of input.\n\nThe input line is tokenized using the same rules as the way bash shell\ntokenizes inputs. All quoting and escaping rules from the bash shell\napply here too.\n\nThe following cases are handled by __exec_line__():\n1.  Empty line.\n2.  The input line is completely made of whitespace characters.\n3.  The input line is the EOF character.\n4.  The first token, as tokenized by shlex.split(), is '!'.\n5.  Internal commands, i.e., commands registered with internal =\nTrue\n\nArguments:\nThe line to parse.\n\nReturns:\nA tuple (cmd, args). The first element cmd must be a python3 string.\nThe second element is, by default, a list of strings representing\nthe arguments, as tokenized by shlex.split().\n\nHow to overload parse_line():\n1.  The signature of the method must be the same.\n2.  The return value must be a tuple (cmd, args), where the cmd is\na string representing the first token, and args is a list of\nstrings.", "source": "codesearchnet"}
{"code": "def __get_favorites(self, favorite_type, start=0, max_items=100):\n        \n        if favorite_type not in (RADIO_SHOWS, RADIO_STATIONS):\n            favorite_type = SONOS_FAVORITES\n\n        response = self.contentDirectory.Browse([\n            ('ObjectID',\n             'FV:2' if favorite_type is SONOS_FAVORITES\n             else 'R:0/{0}'.format(favorite_type)),\n            ('BrowseFlag', 'BrowseDirectChildren'),\n            ('Filter', '*'),\n            ('StartingIndex', start),\n            ('RequestedCount', max_items),\n            ('SortCriteria', '')\n        ])\n        result = {}\n        favorites = []\n        results_xml = response['Result']\n\n        if results_xml != '':\n            \n            metadata = XML.fromstring(really_utf8(results_xml))\n\n            for item in metadata.findall(\n                    '{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container'\n                    if favorite_type == RADIO_SHOWS else\n                    '{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item'):\n                favorite = {}\n                favorite['title'] = item.findtext(\n                    '{http:\n                favorite['uri'] = item.findtext(\n                    '{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}res')\n                if favorite_type == SONOS_FAVORITES:\n                    favorite['meta'] = item.findtext(\n                        '{urn:schemas-rinconnetworks-com:metadata-1-0/}resMD')\n                favorites.append(favorite)\n\n        result['total'] = response['TotalMatches']\n        result['returned'] = len(favorites)\n        result['favorites'] = favorites\n\n        return result", "docstring": "Helper method for `get_favorite_radio_*` methods.\n\nArgs:\nfavorite_type (str): Specify either `RADIO_STATIONS` or\n`RADIO_SHOWS`.\nstart (int): Which number to start the retrieval from. Used for\npaging.\nmax_items (int): The total number of results to return.", "source": "juraj-google-style"}
{"code": "def Contains(self, value):\n    self._awql = self._CreateSingleValueCondition(value, 'CONTAINS')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"contains\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "codesearchnet"}
{"code": "def ToDebugString(self, indentation_level=1):\n    \n    indentation = '  ' * indentation_level\n\n    text_parts = ['{0:s}path segment index: {1:d}\\n'.format(\n        indentation, self.path_segment_index)]\n\n    for path_segment, scan_object in self._path_segments.items():\n      text_parts.append('{0:s}path segment: {1:s}\\n'.format(\n          indentation, path_segment))\n\n      if isinstance(scan_object, PathFilterScanTreeNode):\n        text_parts.append('{0:s}scan tree node:\\n'.format(indentation))\n        text_parts.append(scan_object.ToDebugString(indentation_level + 1))\n\n      elif isinstance(scan_object, py2to3.STRING_TYPES):\n        text_parts.append('{0:s}path: {1:s}\\n'.format(\n            indentation, scan_object))\n\n    text_parts.append('{0:s}default value:\\n'.format(indentation))\n\n    if isinstance(self.default_value, PathFilterScanTreeNode):\n      text_parts.append('{0:s}scan tree node:\\n'.format(indentation))\n      text_parts.append(self.default_value.ToDebugString(indentation_level + 1))\n\n    elif isinstance(self.default_value, py2to3.STRING_TYPES):\n      text_parts.append('{0:s}pattern: {1:s}\\n'.format(\n          indentation, self.default_value))\n\n    text_parts.append('\\n')\n\n    return ''.join(text_parts)", "docstring": "Converts the path filter scan tree node into a debug string.\n\nArgs:\nindentation_level: an integer containing the text indentation level.\n\nReturns:\nA string containing a debug representation of the path filter scan\ntree node.", "source": "juraj-google-style"}
{"code": "def path_is_ignored(self, path):\n    try:\n        self.run('check-ignore', '--quiet', path)\n    except CommandError as e:\n        if (e.retcode == 1):\n            return False\n        else:\n            raise e\n    return True", "docstring": "Given a path, check if the path would be ignored.\n\nReturns:\nboolean", "source": "codesearchnet"}
{"code": "def file_content(self, file_content, update_if_exists=True):\n        \n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        self._data['fileContent'] = file_content\n        return self.tc_requests.upload(\n            self.api_type,\n            self.api_sub_type,\n            self.unique_id,\n            file_content,\n            update_if_exists=update_if_exists,\n        )", "docstring": "Updates the file content.\n\nArgs:\nfile_content: The file_content to upload.\nupdate_if_exists:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def list(name, default=None, allow_none=False, fallback=None, separator=','):\n    \n    value = read(name, default, allow_none, fallback=fallback)\n    if isinstance(value, builtins.list):\n        return value\n    elif isinstance(value, builtins.str):\n        return _str_to_list(value, separator)\n    elif value is None and allow_none:\n        return None\n    else:\n        return [builtins.str(value)]", "docstring": "Get a list of strings or the default.\n\nThe individual list elements are whitespace-stripped.\n\nArgs:\nname: The environment variable name\ndefault: The default value to use if no environment variable is found\nallow_none: If the return value can be `None` (i.e. optional)\nseparator: The list item separator character or pattern", "source": "juraj-google-style"}
{"code": "def get_metric_by_name(self, metric_name, **kwargs):\n        \n        return self._get_object_by_name(self._METRIC_ENDPOINT_SUFFIX,\n                                        metric_name,\n                                        **kwargs)", "docstring": "get a metric by name\n\nArgs:\nmetric_name (string): name of metric\n\nReturns:\ndictionary of response", "source": "juraj-google-style"}
{"code": "def format_level_0_memory(memory):\n    \n    formatted_memory = _list_to_complex_array(memory)\n    \n    if not 2 <= len(formatted_memory.shape) <= 3:\n        raise QiskitError('Level zero memory is not of correct shape.')\n    return formatted_memory", "docstring": "Format an experiment result memory object for measurement level 0.\n\nArgs:\nmemory (list): Memory from experiment with `meas_level==1`. `avg` or\n`single` will be inferred from shape of result memory.\n\nReturns:\nnp.ndarray: Measurement level 0 complex numpy array\n\nRaises:\nQiskitError: If the returned numpy array does not have 2 (avg) or 3 (single)\nindicies.", "source": "juraj-google-style"}
{"code": "def run_one_step(self, eig_init_vec_val, eig_num_iter_val, smooth_val, penalty_val, learning_rate_val):\n    step_feed_dict = {self.eig_init_vec_placeholder: eig_init_vec_val, self.eig_num_iter_placeholder: eig_num_iter_val, self.smooth_placeholder: smooth_val, self.penalty_placeholder: penalty_val, self.learning_rate: learning_rate_val}\n    if (self.params['eig_type'] == 'SCIPY'):\n        (current_eig_vector, self.current_eig_val_estimate) = self.get_scipy_eig_vec()\n        step_feed_dict.update({self.eig_vec_estimate: current_eig_vector})\n    elif (self.params['eig_type'] == 'LZS'):\n        step_feed_dict.update({self.dual_object.m_min_vec_ph: self.dual_object.m_min_vec_estimate})\n    self.sess.run(self.train_step, feed_dict=step_feed_dict)\n    [_, self.dual_object.m_min_vec_estimate, self.current_eig_val_estimate] = self.sess.run([self.proj_step, self.eig_vec_estimate, self.eig_val_estimate], feed_dict=step_feed_dict)\n    if ((self.current_step % self.params['print_stats_steps']) == 0):\n        [self.current_total_objective, self.current_unconstrained_objective, self.dual_object.m_min_vec_estimate, self.current_eig_val_estimate, self.current_nu] = self.sess.run([self.total_objective, self.dual_object.unconstrained_objective, self.eig_vec_estimate, self.eig_val_estimate, self.dual_object.nu], feed_dict=step_feed_dict)\n        stats = {'total_objective': float(self.current_total_objective), 'unconstrained_objective': float(self.current_unconstrained_objective), 'min_eig_val_estimate': float(self.current_eig_val_estimate)}\n        tf.logging.info('Current inner step: %d, optimization stats: %s', self.current_step, stats)\n        if (self.params['stats_folder'] is not None):\n            stats = json.dumps(stats)\n            filename = os.path.join(self.params['stats_folder'], (str(self.current_step) + '.json'))\n            with tf.gfile.Open(filename) as file_f:\n                file_f.write(stats)\n    if (((self.current_step % self.params['projection_steps']) == 0) and (self.current_unconstrained_objective < 0)):\n        nu = self.sess.run(self.dual_object.nu)\n        dual_feed_dict = {self.dual_object.h_min_vec_ph: self.dual_object.h_min_vec_estimate}\n        (_, min_eig_val_h_lz) = self.dual_object.get_lanczos_eig(compute_m=False, feed_dict=dual_feed_dict)\n        projected_dual_feed_dict = {self.dual_object.projected_dual.nu: nu, self.dual_object.projected_dual.min_eig_val_h: min_eig_val_h_lz}\n        if self.dual_object.projected_dual.compute_certificate(self.current_step, projected_dual_feed_dict):\n            return True\n    return False", "docstring": "Run one step of gradient descent for optimization.\n\nArgs:\neig_init_vec_val: Start value for eigen value computations\neig_num_iter_val: Number of iterations to run for eigen computations\nsmooth_val: Value of smoothness parameter\npenalty_val: Value of penalty for the current step\nlearning_rate_val: Value of learning rate\nReturns:\nfound_cert: True is negative certificate is found, False otherwise", "source": "codesearchnet"}
{"code": "def print_error_messages_raylet(task_error_queue, threads_stopped):\n    while True:\n        if threads_stopped.is_set():\n            return\n        try:\n            (error, t) = task_error_queue.get(block=False)\n        except queue.Empty:\n            threads_stopped.wait(timeout=0.01)\n            continue\n        while ((t + UNCAUGHT_ERROR_GRACE_PERIOD) > time.time()):\n            threads_stopped.wait(timeout=1)\n            if threads_stopped.is_set():\n                break\n        if (t < (last_task_error_raise_time + UNCAUGHT_ERROR_GRACE_PERIOD)):\n            logger.debug('Suppressing error from worker: {}'.format(error))\n        else:\n            logger.error('Possible unhandled error from worker: {}'.format(error))", "docstring": "Prints message received in the given output queue.\n\nThis checks periodically if any un-raised errors occured in the background.\n\nArgs:\ntask_error_queue (queue.Queue): A queue used to receive errors from the\nthread that listens to Redis.\nthreads_stopped (threading.Event): A threading event used to signal to\nthe thread that it should exit.", "source": "codesearchnet"}
{"code": "def to_hdf5(ramon, hdf5=None):\n    \n    if issubclass(type(ramon), RAMONBase) is False:\n        raise InvalidRAMONError(\"Invalid RAMON supplied to ramon.to_hdf5.\")\n\n    import h5py\n    import numpy\n\n    if hdf5 is None:\n        tmpfile = tempfile.NamedTemporaryFile(delete=False)\n    else:\n        tmpfile = hdf5\n\n    with h5py.File(tmpfile.name, \"a\") as hdf5:\n\n        \n        \n        grp = hdf5.create_group(str(ramon.id))\n\n        grp.create_dataset(\"ANNOTATION_TYPE\", (1,),\n                           numpy.uint32,\n                           data=AnnotationType.get_int(type(ramon)))\n\n        if hasattr(ramon, 'cutout'):\n            if ramon.cutout is not None:\n                grp.create_dataset('CUTOUT', ramon.cutout.shape,\n                                   ramon.cutout.dtype, data=ramon.cutout)\n                grp.create_dataset('RESOLUTION', (1,),\n                                   numpy.uint32, data=ramon.resolution)\n                grp.create_dataset('XYZOFFSET', (3,),\n                                   numpy.uint32, data=ramon.xyz_offset)\n\n        \n        metadata = grp.create_group('METADATA')\n\n        metadata.create_dataset('AUTHOR', (1,),\n                                dtype=h5py.special_dtype(vlen=str),\n                                data=ramon.author)\n\n        fstring = StringIO()\n        csvw = csv.writer(fstring, delimiter=',')\n        csvw.writerows([r for r in six.iteritems(ramon.kvpairs)])\n\n        metadata.create_dataset('KVPAIRS', (1,),\n                                dtype=h5py.special_dtype(vlen=str),\n                                data=fstring.getvalue())\n        metadata.create_dataset('CONFIDENCE', (1,), numpy.float,\n                                data=ramon.confidence)\n        metadata.create_dataset('STATUS', (1,), numpy.uint32,\n                                data=ramon.status)\n\n        \n\n        if hasattr(ramon, 'segments'):\n            metadata.create_dataset('SEGMENTS',\n                                    data=numpy.asarray(ramon.segments,\n                                                       dtype=numpy.uint32))\n\n        if hasattr(ramon, 'synapse_type'):\n            metadata.create_dataset('SYNAPSE_TYPE', (1,), numpy.uint32,\n                                    data=ramon.synapse_type)\n\n        if hasattr(ramon, 'weight'):\n            metadata.create_dataset('WEIGHT', (1,),\n                                    numpy.float, data=ramon.weight)\n\n        if hasattr(ramon, 'neuron'):\n            metadata.create_dataset('NEURON', (1,),\n                                    numpy.uint32, data=ramon.neuron)\n\n        if hasattr(ramon, 'segmentclass'):\n            metadata.create_dataset('SEGMENTCLASS', (1,), numpy.uint32,\n                                    data=ramon.segmentclass)\n\n        if hasattr(ramon, 'synapses'):\n            metadata.create_dataset('SYNAPSES', (len(ramon.synapses),),\n                                    numpy.uint32, data=ramon.synapses)\n\n        if hasattr(ramon, 'organelles'):\n            metadata.create_dataset('ORGANELLES',\n                                    (len(ramon.organelles),),\n                                    numpy.uint32,\n                                    data=ramon.organelles)\n\n        if hasattr(ramon, 'organelle_class'):\n            metadata.create_dataset('ORGANELLECLASS', (1,),\n                                    numpy.uint32,\n                                    data=ramon.organelle_class)\n        hdf5.flush()\n        tmpfile.seek(0)\n        return tmpfile\n    return False", "docstring": "Exports a RAMON object to an HDF5 file object.\n\nArguments:\nramon (RAMON): A subclass of RAMONBase\nhdf5 (str): Export filename\n\nReturns:\nhdf5.File\n\nRaises:\nInvalidRAMONError: if you pass a non-RAMON object", "source": "juraj-google-style"}
{"code": "def sync_entities(*model_objs):\n    \n\n    \n    if sync_entities.defer:\n        \n        if not model_objs:\n            sync_entities.buffer[None] = None\n        else:\n            \n            for model_obj in model_objs:\n                sync_entities.buffer[(model_obj.__class__, model_obj.pk)] = model_obj\n\n        \n        return False\n\n    \n    EntitySyncer(*model_objs).sync()", "docstring": "Syncs entities\n\nArgs:\nmodel_objs (List[Model]): The model objects to sync. If empty, all entities will be synced", "source": "juraj-google-style"}
{"code": "def __or__(self, other: 'TensorFluent') -> 'TensorFluent':\n        \n        return self._binary_op(self, other, tf.logical_or, tf.bool)", "docstring": "Returns a TensorFluent for the or logical operator.\n\nArgs:\nself: The first operand.\nother: The second operand.\n\nReturns:\nA TensorFluent wrapping the operator's output.", "source": "juraj-google-style"}
{"code": "def wiki_2x2_base():\n    hparams = mtf_transformer.mtf_transformer_base_lm()\n    hparams.shared_embedding_and_softmax_weights = False\n    hparams.attention_dropout = 0.0\n    hparams.relu_dropout = 0.0\n    hparams.layer_prepostprocess_dropout = 0.0\n    hparams.max_length = 1024\n    hparams.batch_size = 32\n    hparams.learning_rate_schedule = 'rsqrt_decay'\n    hparams.mesh_shape = 'all:8'\n    hparams.layout = 'batch:all;experts:all'\n    moe.set_default_moe_hparams(hparams)\n    hparams.moe_num_experts = 16\n    hparams.moe_hidden_size = 8192\n    hparams.decoder_layers = (['att', 'drd'] * 6)\n    hparams.d_model = 1024\n    hparams.d_ff = 2048\n    hparams.d_kv = 128\n    hparams.num_heads = 4\n    return hparams", "docstring": "Set of architectural experiments - language model on wikipedia on a 2x2.\n\n1 epoch = ~180k steps at batch size 32 - we may never finish an epoch!\n\nReturns:\na hparams", "source": "codesearchnet"}
{"code": "class DetaHungarianMatcher(nn.Module):\n\n    def __init__(self, class_cost: float=1, bbox_cost: float=1, giou_cost: float=1):\n        super().__init__()\n        requires_backends(self, ['scipy'])\n        self.class_cost = class_cost\n        self.bbox_cost = bbox_cost\n        self.giou_cost = giou_cost\n        if class_cost == 0 and bbox_cost == 0 and (giou_cost == 0):\n            raise ValueError(\"All costs of the Matcher can't be 0\")\n\n    @torch.no_grad()\n    def forward(self, outputs, targets):\n        \n        batch_size, num_queries = outputs['logits'].shape[:2]\n        out_prob = outputs['logits'].flatten(0, 1).sigmoid()\n        out_bbox = outputs['pred_boxes'].flatten(0, 1)\n        target_ids = torch.cat([v['class_labels'] for v in targets])\n        target_bbox = torch.cat([v['boxes'] for v in targets])\n        alpha = 0.25\n        gamma = 2.0\n        neg_cost_class = (1 - alpha) * out_prob ** gamma * -(1 - out_prob + 1e-08).log()\n        pos_cost_class = alpha * (1 - out_prob) ** gamma * -(out_prob + 1e-08).log()\n        class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids]\n        bbox_cost = torch.cdist(out_bbox, target_bbox, p=1)\n        giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox))\n        cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost\n        cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()\n        sizes = [len(v['boxes']) for v in targets]\n        indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))]\n        return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]", "docstring": "This class computes an assignment between the targets and the predictions of the network.\n\nFor efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more\npredictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are\nun-matched (and thus treated as non-objects).\n\nArgs:\nclass_cost:\nThe relative weight of the classification error in the matching cost.\nbbox_cost:\nThe relative weight of the L1 error of the bounding box coordinates in the matching cost.\ngiou_cost:\nThe relative weight of the giou loss of the bounding box in the matching cost.", "source": "github-repos"}
{"code": "def __init__(self, variable_name, inferred_type):\n        \n        variable_name = ensure_unicode_string(variable_name)\n        super(Variable, self).__init__(variable_name, inferred_type)\n        self.variable_name = variable_name\n        self.inferred_type = inferred_type\n        self.validate()", "docstring": "Construct a new Variable object for the given variable name.\n\nArgs:\nvariable_name: string, should start with '$' and then obey variable naming rules\n(see validate_safe_string())\ninferred_type: GraphQL type object, specifying the inferred type of the variable\n\nReturns:\nnew Variable object", "source": "juraj-google-style"}
{"code": "def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3):\n    if (kmip_version < enums.KMIPVersion.KMIP_1_3):\n        raise exceptions.VersionNotSupported('KMIP {} does not support the ProfileInformation object.'.format(kmip_version.value))\n    super(ProfileInformation, self).read(input_buffer, kmip_version=kmip_version)\n    local_buffer = utils.BytearrayStream(input_buffer.read(self.length))\n    if self.is_tag_next(enums.Tags.PROFILE_NAME, local_buffer):\n        profile_name = primitives.Enumeration(enums.ProfileName, tag=enums.Tags.PROFILE_NAME)\n        profile_name.read(local_buffer, kmip_version=kmip_version)\n        self._profile_name = profile_name\n    else:\n        raise exceptions.InvalidKmipEncoding('The ProfileInformation encoding is missing the profile name.')\n    if self.is_tag_next(enums.Tags.SERVER_URI, local_buffer):\n        server_uri = primitives.TextString(tag=enums.Tags.SERVER_URI)\n        server_uri.read(local_buffer, kmip_version=kmip_version)\n        self._server_uri = server_uri\n    if self.is_tag_next(enums.Tags.SERVER_PORT, local_buffer):\n        server_port = primitives.Integer(tag=enums.Tags.SERVER_PORT)\n        server_port.read(local_buffer, kmip_version=kmip_version)\n        self._server_port = server_port\n    self.is_oversized(local_buffer)", "docstring": "Read the data encoding the ProfileInformation structure and decode it\ninto its constituent parts.\n\nArgs:\ninput_buffer (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 2.0.\n\nRaises:\nInvalidKmipEncoding: Raised if the profile name is missing from\nthe encoding.\nVersionNotSupported: Raised when a KMIP version is provided that\ndoes not support the ProfileInformation structure.", "source": "codesearchnet"}
{"code": "def ReadClientMetadata(self, client_id):\n    result = self.MultiReadClientMetadata([client_id])\n    try:\n        return result[client_id]\n    except KeyError:\n        raise UnknownClientError(client_id)", "docstring": "Reads the ClientMetadata record for a single client.\n\nArgs:\nclient_id: A GRR client id string, e.g. \"C.ea3b2b71840d6fa7\".\n\nReturns:\nAn rdfvalues.object.ClientMetadata object.\n\nRaises:\nUnknownClientError: if no client with corresponding id was found.", "source": "codesearchnet"}
{"code": "def _format_device(var):\n    if var.dtype.name.endswith('_ref'):\n        resource_var_annotation = '(legacy)'\n    else:\n        resource_var_annotation = '(resource)'\n    if var.device:\n        return '{} {}'.format(var.device, resource_var_annotation)\n    else:\n        return resource_var_annotation", "docstring": "Returns the device with an annotation specifying `ResourceVariable`.\n\n\"legacy\" means a normal tf.Variable while \"resource\" means a ResourceVariable.\n\nFor example:\n`(legacy)`\n`(resource)`\n`/job:learner/task:0/device:CPU:* (legacy)`\n`/job:learner/task:0/device:CPU:* (resource)`\n\nArgs:\nvar: The Tensorflow Variable to print.", "source": "codesearchnet"}
{"code": "def rename_dimension(x, old_name, new_name):\n    return reshape(x, x.shape.rename_dimension(old_name, new_name))", "docstring": "Reshape a Tensor, renaming one dimension.\n\nArgs:\nx: a Tensor\nold_name: a string\nnew_name: a string\n\nReturns:\na Tensor", "source": "codesearchnet"}
{"code": "def absolute_name(self, depth=0):\n        \n        node, node_depth = self, self.depth\n        if depth < 1:\n            depth = node_depth\n        while node_depth > depth and node.package is not None:\n            node = node.package\n            node_depth -= 1\n        names = []\n        while node is not None:\n            names.append(node.name)\n            node = node.package\n        return '.'.join(reversed(names))", "docstring": "Return the absolute name of the node.\n\nConcatenate names from root to self within depth.\n\nArgs:\ndepth (int): maximum depth to go to.\n\nReturns:\nstr: absolute name of the node (until given depth is reached).", "source": "juraj-google-style"}
{"code": "def economic_qs(K, epsilon=sqrt(finfo(float).eps)):\n    (S, Q) = eigh(K)\n    nok = (abs(max(Q[0].min(), Q[0].max(), key=abs)) < epsilon)\n    nok = (nok and (abs(max(K.min(), K.max(), key=abs)) >= epsilon))\n    if nok:\n        from scipy.linalg import eigh as sp_eigh\n        (S, Q) = sp_eigh(K)\n    ok = (S >= epsilon)\n    nok = logical_not(ok)\n    S0 = S[ok]\n    Q0 = Q[(:, ok)]\n    Q1 = Q[(:, nok)]\n    return ((Q0, Q1), S0)", "docstring": "r\"\"\"Economic eigen decomposition for symmetric matrices.\n\nA symmetric matrix ``K`` can be decomposed in\n:math:`\\mathrm Q_0 \\mathrm S_0 \\mathrm Q_0^\\intercal + \\mathrm Q_1\\\n\\mathrm S_1 \\mathrm Q_1^ \\intercal`, where :math:`\\mathrm S_1` is a zero\nmatrix with size determined by ``K``'s rank deficiency.\n\nArgs:\nK (array_like): Symmetric matrix.\nepsilon (float): Eigen value threshold. Default is\n``sqrt(finfo(float).eps)``.\n\nReturns:\ntuple: ``((Q0, Q1), S0)``.", "source": "codesearchnet"}
{"code": "def generate_sample_set(self, tags=None):\n    if isinstance(tags, str):\n        tags = [tags]\n    md5_list = self.data_store.tag_match(tags)\n    return self.store_sample_set(md5_list)", "docstring": "Generate a sample_set that maches the tags or all if tags are not specified.\n\nArgs:\ntags: Match samples against this tag list (or all if not specified)\n\nReturns:\nThe sample_set of those samples matching the tags", "source": "codesearchnet"}
{"code": "def get_rbounds(step):\n    \n    if step.geom is not None:\n        rcmb = step.geom.rcmb\n    else:\n        rcmb = step.sdat.par['geometry']['r_cmb']\n        if step.sdat.par['geometry']['shape'].lower() == 'cartesian':\n            rcmb = 0\n    rcmb = max(rcmb, 0)\n    return rcmb, rcmb + 1", "docstring": "Radial or vertical position of boundaries.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nReturns:\ntuple of floats: radial or vertical positions of boundaries of the\ndomain.", "source": "juraj-google-style"}
{"code": "def ws004c(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `ws004c`'.format(value))\n    self._ws004c = value", "docstring": "Corresponds to IDD Field `ws004c`\n\nArgs:\nvalue (float): value for IDD Field `ws004c`\nUnit: m/s\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def read_cs_g0_contribution(self):\n    header_pattern = '^\\\\s+G\\\\=0 CONTRIBUTION TO CHEMICAL SHIFT \\\\(field along BDIR\\\\)\\\\s+$\\\\n^\\\\s+-{50,}$\\\\n^\\\\s+BDIR\\\\s+X\\\\s+Y\\\\s+Z\\\\s*$\\\\n^\\\\s+-{50,}\\\\s*$\\\\n'\n    row_pattern = ('(?:\\\\d+)\\\\s+' + '\\\\s+'.join((['([-]?\\\\d+\\\\.\\\\d+)'] * 3)))\n    footer_pattern = '\\\\s+-{50,}\\\\s*$'\n    self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float, last_one_only=True, attribute_name='cs_g0_contribution')", "docstring": "Parse the  G0 contribution of NMR chemical shielding.\n\nReturns:\nG0 contribution matrix as list of list.", "source": "codesearchnet"}
{"code": "def get_atoms(structure, **kwargs):\n        \n        if not structure.is_ordered:\n            raise ValueError(\"ASE Atoms only supports ordered structures\")\n        symbols = [str(site.specie.symbol) for site in structure]\n        positions = [site.coords for site in structure]\n        cell = structure.lattice.matrix\n        return Atoms(symbols=symbols, positions=positions, pbc=True,\n                     cell=cell, **kwargs)", "docstring": "Returns ASE Atoms object from pymatgen structure.\n\nArgs:\nstructure: pymatgen.core.structure.Structure\n**kwargs: other keyword args to pass into the ASE Atoms constructor\n\nReturns:\nASE Atoms object", "source": "juraj-google-style"}
{"code": "def get_redirect(paths):\n    \n\n    if isinstance(paths, str):\n        paths = [paths]\n\n    for path in paths:\n        url, permanent = get_alias(path)\n        if url:\n            return redirect(url, 301 if permanent else 302)\n\n        url, permanent = current_app.get_path_regex(path)\n        if url:\n            return redirect(url, 301 if permanent else 302)\n\n    return None", "docstring": "Get a redirect from a path or list of paths\n\nArguments:\n\npaths -- either a single path string, or a list of paths to check\n\nReturns: a flask.redirect() result", "source": "juraj-google-style"}
{"code": "def _recover_shape_information(self, inputs, outputs):\n    batch_size_value = inputs.get_shape()[0]\n    if self._data_format.startswith('NC'):\n        output_shape_value = ((batch_size_value, self.output_channels) + self.output_shape)\n    elif (self._data_format.startswith('N') and self._data_format.endswith('C')):\n        output_shape_value = (((batch_size_value,) + self.output_shape) + (self.output_channels,))\n    outputs.set_shape(output_shape_value)\n    return outputs", "docstring": "Recover output tensor shape value to enable shape inference.\n\nThe batch size of `inputs` isn't preserved by the convolution op. Calculate\nwhat the proper output shape will be for `outputs`.\n\nArgs:\ninputs: A Tensor of shape `data_format` and of type `tf.float16`,\n`tf.bfloat16` or `tf.float32`.\noutputs: A Tensor of shape `data_format` and of type `tf.float16`,\n`tf.bfloat16` or `tf.float32`. The output of `inputs` from a transpose\nconvolution op.\n\nReturns:\noutputs: The passed-in `outputs` with all shape information filled in.", "source": "codesearchnet"}
{"code": "def to_string(cls, error_code):\n        \n        if error_code == cls.ZONE_NOT_FOUND_ERROR:\n            return 'Zone not found'\n        return super(JLinkWriteErrors, cls).to_string(error_code)", "docstring": "Returns the string message for the given ``error_code``.\n\nArgs:\ncls (JLinkWriteErrors): the ``JLinkWriteErrors`` class\nerror_code (int): error code to convert\n\nReturns:\nAn error string corresponding to the error code.\n\nRaises:\nValueError: if the error code is invalid.", "source": "juraj-google-style"}
{"code": "def GetPreviousNonBlankLine(clean_lines, linenum):\n    prevlinenum = (linenum - 1)\n    while (prevlinenum >= 0):\n        prevline = clean_lines.elided[prevlinenum]\n        if (not IsBlankLine(prevline)):\n            return (prevline, prevlinenum)\n        prevlinenum -= 1\n    return ('', (- 1))", "docstring": "Return the most recent non-blank line and its line number.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file contents.\nlinenum: The number of the line to check.\n\nReturns:\nA tuple with two elements.  The first element is the contents of the last\nnon-blank line before the current line, or the empty string if this is the\nfirst non-blank line.  The second is the line number of that line, or -1\nif this is the first non-blank line.", "source": "codesearchnet"}
{"code": "def process(self, body, url=None, sig=None):\n    self.request = RequestBody()\n    self.response = ResponseBody()\n    self.request.parse(body)\n    app_id = self.request.session.application.application_id\n    stamp = self.request.request.timestamp\n    if (not self.valid.request(app_id, body, stamp, url, sig)):\n        return False\n    self.pass_session_attributes()\n    self.dispatch()\n    if (self.request.request.type == 'SessionEndedRequest'):\n        self.terminate()\n    return self.response.to_json()", "docstring": "Process request body given skill logic.\n\nTo validate a request, both, url and sig are required.\n\nAttributes received through body will be automatically added to the\nresponse.\n\nArgs:\nbody: str. HTTP request body.\nurl: str. SignatureCertChainUrl header value sent by request.\nPEM-encoded X.509 certificate chain that Alexa used to sign the\nmessage.\nsig: str. Signature header value sent by request. Base64-encoded\nsignature of the request body.\n\nReturn:\nstr or bool: HTTP response body or False if the request is invalid.", "source": "codesearchnet"}
{"code": "def segment_to_vector(self, seg):\n        \n        ft_dict = {ft: val for (val, ft) in self.fts(seg)}\n        return [ft_dict[name] for name in self.names]", "docstring": "Given a Unicode IPA segment, return a list of feature specificiations\nin cannonical order.\n\nArgs:\nseg (unicode): IPA consonant or vowel\n\nReturns:\nlist: feature specifications ('+'/'-'/'0') in the order from\n`FeatureTable.names`", "source": "juraj-google-style"}
{"code": "def from_maybe_serialized(source: Union[Any, str], value_type: Optional[Type[Any]]=None) -> Any:\n    if isinstance(source, str):\n        if source.endswith('.json'):\n            value = symbolic.load(source)\n        else:\n            value = symbolic.from_json_str(source)\n    else:\n        value = source\n    if value_type is not None and (not isinstance(value, value_type)):\n        raise TypeError(f'Loaded value {value!r} is not an instance of {value_type!r}.')\n    return value", "docstring": "Load value from maybe serialized form (e.g. JSON file or JSON string).\n\nArgs:\nsource: Source of value. It can be value (non-string type) itself, or a\nfilepath, or a JSON string from where the value will be loaded.\nvalue_type: An optional type to constrain the value.\n\nReturns:\nValue from source.", "source": "github-repos"}
{"code": "def _CalculateHashDataStream(self, file_entry, data_stream_name):\n    \n    hash_context = hashlib.sha256()\n\n    try:\n      file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)\n    except IOError as exception:\n      logging.warning((\n          'Unable to open path specification:\\n{0:s}'\n          'with error: {1!s}').format(\n              file_entry.path_spec.comparable, exception))\n      return None\n\n    if not file_object:\n      return None\n\n    try:\n      data = file_object.read(self._READ_BUFFER_SIZE)\n      while data:\n        hash_context.update(data)\n        data = file_object.read(self._READ_BUFFER_SIZE)\n    except IOError as exception:\n      logging.warning((\n          'Unable to read from path specification:\\n{0:s}'\n          'with error: {1!s}').format(\n              file_entry.path_spec.comparable, exception))\n      return None\n\n    finally:\n      file_object.close()\n\n    return hash_context.hexdigest()", "docstring": "Calculates a message digest hash of the data of the file entry.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry.\ndata_stream_name (str): name of the data stream.\n\nReturns:\nbytes: digest hash or None.", "source": "juraj-google-style"}
{"code": "def set_dataset_date(self, dataset_date, dataset_end_date=None, date_format=None):\n    parsed_date = self._parse_date(dataset_date, date_format)\n    if (dataset_end_date is None):\n        self.set_dataset_date_from_datetime(parsed_date)\n    else:\n        parsed_end_date = self._parse_date(dataset_end_date, date_format)\n        self.set_dataset_date_from_datetime(parsed_date, parsed_end_date)", "docstring": "Set dataset date from string using specified format. If no format is supplied, the function will guess.\nFor unambiguous formats, this should be fine.\n\nArgs:\ndataset_date (str): Dataset date string\ndataset_end_date (Optional[str]): Dataset end date string\ndate_format (Optional[str]): Date format. If None is given, will attempt to guess. Defaults to None.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: Optional[bool]=False) -> Tuple[tf.Tensor]:\n    residual = hidden_states\n    hidden_states = self.layer_norm1(hidden_states)\n    hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.layer_norm2(hidden_states)\n    hidden_states = self.mlp(hidden_states)\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`tf.Tensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n`(config.encoder_attention_heads,)`.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def past_stop_threshold(stop_threshold, eval_metric):\n  \n  if stop_threshold is None:\n    return False\n\n  if not isinstance(stop_threshold, numbers.Number):\n    raise ValueError(\"Threshold for checking stop conditions must be a number.\")\n  if not isinstance(eval_metric, numbers.Number):\n    raise ValueError(\"Eval metric being checked against stop conditions \"\n                     \"must be a number.\")\n\n  if eval_metric >= stop_threshold:\n    tf.logging.info(\n        \"Stop threshold of {} was passed with metric value {}.\".format(\n            stop_threshold, eval_metric))\n    return True\n\n  return False", "docstring": "Return a boolean representing whether a model should be stopped.\n\nArgs:\nstop_threshold: float, the threshold above which a model should stop\ntraining.\neval_metric: float, the current value of the relevant metric to check.\n\nReturns:\nTrue if training should stop, False otherwise.\n\nRaises:\nValueError: if either stop_threshold or eval_metric is not a number", "source": "juraj-google-style"}
{"code": "def __init__(self, value_set_codes_table: Optional[bigquery.TableReference]=None, value_set_codes_definitions: Optional[fhir_package.FhirPackageManager]=None) -> None:\n    self._value_set_codes_table = value_set_codes_table\n    self._value_set_codes_definitions = value_set_codes_definitions\n    self._use_resource_alias = None", "docstring": "Creates a BigQuerySqlInterpreter.\n\nArgs:\nvalue_set_codes_table: The name of the database table containing value set\ncode definitions. Used when building SQL for memberOf expressions. If\ngiven, value set definitions needed for memberOf expressions will be\nretrieved from this table if they can not be found in\n`value_set_codes_definitions`. If neither this nor\n`value_set_codes_definitions` is given, no memberOf SQL will be\ngenerated.\nvalue_set_codes_definitions: A package manager containing value set\ndefinitions which can be used to build SQL for memberOf expressions.\nThese value set definitions can be consulted in favor of using an\nexternal `value_set_codes_table`. If neither this nor\n`value_set_codes_definitions` is given, no memberOf SQL will be\ngenerated.", "source": "github-repos"}
{"code": "def get(self, blocking=True):\n    if self.closed:\n        raise PoolAlreadyClosedError('Connection pool is already closed.')\n    if (not self.limiter.acquire(blocking=blocking)):\n        return None\n    c = None\n    try:\n        c = self.idle_conns.pop()\n    except IndexError:\n        try:\n            c = self.connect_func()\n        except Exception:\n            self.limiter.release()\n            raise\n    return _ConnectionProxy(self, c)", "docstring": "Gets a connection.\n\nArgs:\nblocking: Whether to block when max_size connections are already in use.\nIf false, may return None.\n\nReturns:\nA connection to the database.\n\nRaises:\nPoolAlreadyClosedError: if close() method was already called on\nthis pool.", "source": "codesearchnet"}
{"code": "def create_multiple_expectations(df, columns, expectation_type, *args, **kwargs):\n    expectation = getattr(df, expectation_type)\n    results = list()\n    for column in columns:\n        results.append(expectation(column, *args, **kwargs))\n    return results", "docstring": "Creates an identical expectation for each of the given columns with the specified arguments, if any.\n\nArgs:\ndf (great_expectations.dataset): A great expectations dataset object.\ncolumns (list): A list of column names represented as strings.\nexpectation_type (string): The expectation type.\n\nRaises:\nKeyError if the provided column does not exist.\nAttributeError if the provided expectation type does not exist or df is not a valid great expectations dataset.\n\nReturns:\nA list of expectation results.", "source": "codesearchnet"}
{"code": "def load_model_from_hdf5(filepath, custom_objects=None, compile=True):\n    if h5py is None:\n        raise ImportError('`load_model` requires h5py.')\n    if not custom_objects:\n        custom_objects = {}\n    opened_new_file = not isinstance(filepath, h5py.File)\n    if opened_new_file:\n        f = h5py.File(filepath, mode='r')\n    else:\n        f = filepath\n    model = None\n    try:\n        model_config = f.attrs.get('model_config')\n        if model_config is None:\n            raise ValueError('No model found in config file.')\n        if hasattr(model_config, 'decode'):\n            model_config = model_config.decode('utf-8')\n        model_config = json_utils.decode(model_config)\n        model = model_config_lib.model_from_config(model_config, custom_objects=custom_objects)\n        load_weights_from_hdf5_group(f['model_weights'], model.layers)\n        if compile:\n            training_config = f.attrs.get('training_config')\n            if hasattr(training_config, 'decode'):\n                training_config = training_config.decode('utf-8')\n            if training_config is None:\n                logging.warning('No training configuration found in the save file, so the model was *not* compiled. Compile it manually.')\n                return model\n            training_config = json_utils.decode(training_config)\n            model.compile(**saving_utils.compile_args_from_training_config(training_config, custom_objects), from_serialized=True)\n            saving_utils.try_build_compiled_arguments(model)\n            if 'optimizer_weights' in f:\n                try:\n                    model.optimizer._create_all_weights(model.trainable_variables)\n                except (NotImplementedError, AttributeError):\n                    logging.warning('Error when creating the weights of optimizer {}, making it impossible to restore the saved optimizer state. As a result, your model is starting with a freshly initialized optimizer.')\n                optimizer_weight_values = load_optimizer_weights_from_hdf5_group(f)\n                try:\n                    model.optimizer.set_weights(optimizer_weight_values)\n                except ValueError:\n                    logging.warning('Error in loading the saved optimizer state. As a result, your model is starting with a freshly initialized optimizer.')\n    finally:\n        if opened_new_file:\n            f.close()\n    return model", "docstring": "Loads a model saved via `save_model_to_hdf5`.\n\nArgs:\nfilepath: One of the following:\n- String, path to the saved model\n- `h5py.File` object from which to load the model\ncustom_objects: Optional dictionary mapping names\n(strings) to custom classes or functions to be\nconsidered during deserialization.\ncompile: Boolean, whether to compile the model\nafter loading.\n\nReturns:\nA Keras model instance. If an optimizer was found\nas part of the saved model, the model is already\ncompiled. Otherwise, the model is uncompiled and\na warning will be displayed. When `compile` is set\nto False, the compilation is omitted without any\nwarning.\n\nRaises:\nImportError: if h5py is not available.\nValueError: In case of an invalid savefile.", "source": "github-repos"}
{"code": "def _setup_transitions(tdef, states, prev=()):\n    trs = list(prev)\n    for transition in tdef:\n        if (len(transition) == 3):\n            (name, source, target) = transition\n            if (is_string(source) or isinstance(source, State)):\n                source = [source]\n            source = [states[src] for src in source]\n            target = states[target]\n            tr = Transition(name, source, target)\n        else:\n            raise TypeError((\"Elements of the 'transition' attribute of a workflow should be three-tuples; got %r instead.\" % (transition,)))\n        if any(((prev_tr.name == tr.name) for prev_tr in trs)):\n            trs = [(tr if (prev_tr.name == tr.name) else prev_tr) for prev_tr in trs]\n        else:\n            trs.append(tr)\n    return TransitionList(trs)", "docstring": "Create a TransitionList object from a 'transitions' Workflow attribute.\n\nArgs:\ntdef: list of transition definitions\nstates (StateList): already parsed state definitions.\nprev (TransitionList): transition definitions from a parent.\n\nReturns:\nTransitionList: the list of transitions defined in the 'tdef' argument.", "source": "codesearchnet"}
{"code": "def find_next_punctuation(text: str, start_idx=0):\n    for i in range(start_idx, len(text)):\n        if text[i] in ['.', '?', '!', '\\n']:\n            return i\n    return None", "docstring": "Find the index of the next punctuation mark.\n\nArgs:\ntext (`str`):\nString to examine\nstart_idx (`int`, *optional*)\nIndex where to start", "source": "github-repos"}
{"code": "def from_sub_model_configs(cls, semantic_config: BarkSemanticConfig, coarse_acoustics_config: BarkCoarseConfig, fine_acoustics_config: BarkFineConfig, codec_config: PretrainedConfig, **kwargs):\n    return cls(semantic_config=semantic_config.to_dict(), coarse_acoustics_config=coarse_acoustics_config.to_dict(), fine_acoustics_config=fine_acoustics_config.to_dict(), codec_config=codec_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`BarkConfig`] (or a derived class) from bark sub-models configuration.\n\nReturns:\n[`BarkConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def _FormatHostname(self, event):\n    \n    hostname = self._output_mediator.GetHostname(event)\n    return self._SanitizeField(hostname)", "docstring": "Formats the hostname.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: formatted hostname field.", "source": "juraj-google-style"}
{"code": "def _replace_deferred(self, arg, context):\n    \n    if isinstance(arg, UnboundVariable):\n      return context[arg]\n    elif isinstance(arg, _DeferredLayer):\n      \n      return arg._construct(context)\n    elif isinstance(arg, tuple):\n      return tuple((self._replace_deferred(x, context) for x in arg))\n    elif (isinstance(arg, collections.Sequence) and\n          not isinstance(arg, six.string_types)):\n      return [self._replace_deferred(x, context) for x in arg]\n    elif isinstance(arg, collections.Mapping):\n      return {k: self._replace_deferred(v, context)\n              for k, v in six.iteritems(arg)}\n    else:\n      return arg", "docstring": "This replaces all deferred nodes (UnboundVariables and _DeferredLayers).\n\nIf arg is a sequence or a dict, then it's deferred values are also replaced.\n\nArgs:\narg: The argument to replace. If a list or a dict, then all items are also\nreplaced.\ncontext: The context for this replacement.\nReturns:\nThe replaced values or arg if it is not a deferred node.", "source": "juraj-google-style"}
{"code": "def from_moy(cls, moy, leap_year=False):\n        \n        if not leap_year:\n            num_of_minutes_until_month = (0, 44640, 84960, 129600, 172800, 217440,\n                                          260640, 305280, 349920, 393120, 437760,\n                                          480960, 525600)\n        else:\n            num_of_minutes_until_month = (0, 44640, 84960 + 1440, 129600 + 1440,\n                                          172800 + 1440, 217440 + 1440, 260640 + 1440,\n                                          305280 + 1440, 349920 + 1440, 393120 + 1440,\n                                          437760 + 1440, 480960 + 1440, 525600 + 1440)\n        \n        for monthCount in range(12):\n            if int(moy) < num_of_minutes_until_month[monthCount + 1]:\n                month = monthCount + 1\n                break\n        try:\n            day = int((moy - num_of_minutes_until_month[month - 1]) / (60 * 24)) + 1\n        except UnboundLocalError:\n            raise ValueError(\n                \"moy must be positive and smaller than 525600. Invalid input %d\" % (moy)\n            )\n        else:\n            hour = int((moy / 60) % 24)\n            minute = int(moy % 60)\n\n            return cls(month, day, hour, minute, leap_year)", "docstring": "Create Ladybug Datetime from a minute of the year.\n\nArgs:\nmoy: An integer value 0 <= and < 525600", "source": "juraj-google-style"}
{"code": "def _get_config_instance(group_or_term, session, **kwargs):\n    path = group_or_term._get_path()\n    cached = group_or_term._top._cached_configs.get(path)\n    if cached:\n        config = cached\n        created = False\n    else:\n        (config, created) = get_or_create(session, Config, **kwargs)\n    return (config, created)", "docstring": "Finds appropriate config instance and returns it.\n\nArgs:\ngroup_or_term (Group or Term):\nsession (Sqlalchemy session):\nkwargs (dict): kwargs to pass to get_or_create.\n\nReturns:\ntuple of (Config, bool):", "source": "codesearchnet"}
{"code": "def intersects(self, rect, edges=False):\n    if ((self.bottom > rect.top) or (self.top < rect.bottom) or (self.left > rect.right) or (self.right < rect.left)):\n        return False\n    if (not edges):\n        if ((self.bottom == rect.top) or (self.top == rect.bottom) or (self.left == rect.right) or (self.right == rect.left)):\n            return False\n    if (((self.left == rect.right) and (self.bottom == rect.top)) or ((self.left == rect.right) and (rect.bottom == self.top)) or ((rect.left == self.right) and (self.bottom == rect.top)) or ((rect.left == self.right) and (rect.bottom == self.top))):\n        return False\n    return True", "docstring": "Detect intersections between this rectangle and rect.\n\nArgs:\nrect (Rectangle): Rectangle to test for intersections.\nedges (bool): Accept edge touching rectangles as intersects or not\n\nReturns:\nbool: True if the rectangles intersect, False otherwise", "source": "codesearchnet"}
{"code": "def DeregisterMountPoint(cls, mount_point):\n    \n    if mount_point not in cls._mount_points:\n      raise KeyError('Mount point: {0:s} not set.'.format(mount_point))\n\n    del cls._mount_points[mount_point]", "docstring": "Deregisters a path specification mount point.\n\nArgs:\nmount_point (str): mount point identifier.\n\nRaises:\nKeyError: if the corresponding mount point is not set.", "source": "juraj-google-style"}
{"code": "def path_get_origin(p: tcod.path.AStar) -> Tuple[(int, int)]:\n    x = ffi.new('int *')\n    y = ffi.new('int *')\n    lib.TCOD_path_get_origin(p._path_c, x, y)\n    return (x[0], y[0])", "docstring": "Get the current origin position.\n\nThis point moves when :any:`path_walk` returns the next x,y step.\n\nArgs:\np (AStar): An AStar instance.\nReturns:\nTuple[int, int]: An (x, y) point.", "source": "codesearchnet"}
{"code": "def check_required_tags_compliance(self, resource):\n        \n\n        missing_tags = []\n        notes = []\n        resource_tags = {tag.key.lower(): tag.value for tag in resource.tags}\n\n        \n        if resource.resource_type in self.alert_schedule:\n            target_accounts = self.alert_schedule[resource.resource_type]['scope']\n        else:\n            target_accounts = self.alert_schedule['*']['scope']\n        if not (resource.account.account_name in target_accounts or '*' in target_accounts):\n            return missing_tags, notes\n\n        \n        if self.audit_ignore_tag.lower() in resource_tags:\n            return missing_tags, notes\n\n        required_tags = list(self.required_tags)\n\n        \n        if self.gdpr_enabled and resource.account.account_name in self.gdpr_accounts:\n            required_tags.append(self.gdpr_tag)\n\n        \n\n        \n        for key in [tag.lower() for tag in required_tags]:\n            if key not in resource_tags:\n                missing_tags.append(key)\n            elif not self.validate_tag(key, resource_tags[key]):\n                missing_tags.append(key)\n                notes.append('{} tag is not valid'.format(key))\n\n        return missing_tags, notes", "docstring": "Check whether a resource is compliance\n\nArgs:\nresource: A single resource\n\nReturns:\n`(list, list)`\nA tuple contains missing tags (if there were any) and notes", "source": "juraj-google-style"}
{"code": "def is_date(v) -> (bool, date):\n        \n        if isinstance(v, date):\n            return True, v\n        try:\n            reg = r'^([0-9]{4})(?:-(0[1-9]|1[0-2])(?:-(0[1-9]|[1-2][0-9]|3[0-1])(?:T' \\\n                  r'([0-5][0-9])(?::([0-5][0-9])(?::([0-5][0-9]))?)?)?)?)?$'\n            match = re.match(reg, v)\n            if match:\n                groups = match.groups()\n                patterns = ['%Y', '%m', '%d', '%H', '%M', '%S']\n                d = datetime.strptime('-'.join([x for x in groups if x]),\n                                      '-'.join([patterns[i] for i in range(len(patterns)) if groups[i]]))\n                return True, d\n        except:\n            pass\n        return False, v", "docstring": "Boolean function for checking if v is a date\n\nArgs:\nv:\nReturns: bool", "source": "juraj-google-style"}
{"code": "def clinvar_export(store, institute_id, case_name, variant_id):\n    (institute_obj, case_obj) = institute_and_case(store, institute_id, case_name)\n    pinned = [(store.variant(variant_id) or variant_id) for variant_id in case_obj.get('suspects', [])]\n    variant_obj = store.variant(variant_id)\n    return dict(today=str(date.today()), institute=institute_obj, case=case_obj, variant=variant_obj, pinned_vars=pinned)", "docstring": "Gather the required data for creating the clinvar submission form\n\nArgs:\nstore(scout.adapter.MongoAdapter)\ninstitute_id(str): Institute ID\ncase_name(str): case ID\nvariant_id(str): variant._id\n\nReturns:\na dictionary with all the required data (case and variant level) to pre-fill in fields in the clinvar submission form", "source": "codesearchnet"}
{"code": "class ZoeDepthPreActResidualLayer(nn.Module):\n\n    def __init__(self, config):\n        super().__init__()\n        self.use_batch_norm = config.use_batch_norm_in_fusion_residual\n        use_bias_in_fusion_residual = config.use_bias_in_fusion_residual if config.use_bias_in_fusion_residual is not None else not self.use_batch_norm\n        self.activation1 = nn.ReLU()\n        self.convolution1 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual)\n        self.activation2 = nn.ReLU()\n        self.convolution2 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual)\n        if self.use_batch_norm:\n            self.batch_norm1 = nn.BatchNorm2d(config.fusion_hidden_size, eps=config.batch_norm_eps)\n            self.batch_norm2 = nn.BatchNorm2d(config.fusion_hidden_size, eps=config.batch_norm_eps)\n\n    def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:\n        residual = hidden_state\n        hidden_state = self.activation1(hidden_state)\n        hidden_state = self.convolution1(hidden_state)\n        if self.use_batch_norm:\n            hidden_state = self.batch_norm1(hidden_state)\n        hidden_state = self.activation2(hidden_state)\n        hidden_state = self.convolution2(hidden_state)\n        if self.use_batch_norm:\n            hidden_state = self.batch_norm2(hidden_state)\n        return hidden_state + residual", "docstring": "ResidualConvUnit, pre-activate residual unit.\n\nArgs:\nconfig (`[ZoeDepthConfig]`):\nModel configuration class defining the model architecture.", "source": "github-repos"}
{"code": "def send(url, data):\n    \n    validate(data)\n\n    return requests.post(url, json=data)", "docstring": "Sends an incoming message\n\nArgs:\nurl(str): the incoming hook url\ndata(dict): the sending data\n\nReturns:\nrequests.Response", "source": "juraj-google-style"}
{"code": "def _open_streaming_interface(self, connection_id, callback):\n        \n\n        try:\n            context = self.connections.get_context(connection_id)\n        except ArgumentError:\n            callback(connection_id, self.id, False, \"Could not find connection information\")\n            return\n\n        self._logger.info(\"Attempting to enable streaming\")\n        self.connections.begin_operation(connection_id, 'open_interface', callback, self.get_config('default_timeout'))\n\n        try:\n            characteristic = context['services'][TileBusService][StreamingChar]\n        except KeyError:\n            self.connections.finish_operation(\n                connection_id,\n                False,\n                \"Can't find characteristic to open streaming interface\"\n            )\n            return\n\n        context['parser'] = IOTileReportParser(report_callback=self._on_report, error_callback=self._on_report_error)\n        context['parser'].context = connection_id\n\n        def on_report_chunk_received(report_chunk):\n            \n            context['parser'].add_data(report_chunk)\n\n        \n        self._register_notification_callback(\n            context['connection_handle'],\n            characteristic.value_handle,\n            on_report_chunk_received\n        )\n\n        self.bable.set_notification(\n            enabled=True,\n            connection_handle=context['connection_handle'],\n            characteristic=characteristic,\n            on_notification_set=[self._on_interface_opened, context],\n            on_notification_received=self._on_notification_received,\n            timeout=1.0,\n            sync=False\n        )", "docstring": "Enable streaming interface for this IOTile device\n\nArgs:\nconnection_id (int): The unique identifier for the connection\ncallback (callback): Callback to be called when this command finishes\ncallback(conn_id, adapter_id, success, failure_reason)", "source": "juraj-google-style"}
{"code": "async def set_typing(self, typing=hangouts_pb2.TYPING_TYPE_STARTED):\n        \n        \n        try:\n            await self._client.set_typing(\n                hangouts_pb2.SetTypingRequest(\n                    request_header=self._client.get_request_header(),\n                    conversation_id=hangouts_pb2.ConversationId(id=self.id_),\n                    type=typing,\n                )\n            )\n        except exceptions.NetworkError as e:\n            logger.warning('Failed to set typing status: {}'.format(e))\n            raise", "docstring": "Set your typing status in this conversation.\n\nArgs:\ntyping: (optional) ``TYPING_TYPE_STARTED``, ``TYPING_TYPE_PAUSED``,\nor ``TYPING_TYPE_STOPPED`` to start, pause, or stop typing,\nrespectively. Defaults to ``TYPING_TYPE_STARTED``.\n\nRaises:\n.NetworkError: If typing status cannot be set.", "source": "juraj-google-style"}
{"code": "def universal_transformer_basic(layer_inputs, step, hparams, ffn_unit, attention_unit):\n    (state, inputs, memory) = tf.unstack(layer_inputs, num=None, axis=0, name='unstack')\n    new_state = step_preprocess(state, step, hparams)\n    for i in range(hparams.num_inrecurrence_layers):\n        with tf.variable_scope(('rec_layer_%d' % i)):\n            new_state = ffn_unit(attention_unit(new_state))\n    return (new_state, inputs, memory)", "docstring": "Basic Universal Transformer.\n\nThis model is pretty similar to the vanilla transformer in which weights are\nshared between layers. For some tasks, this simple idea brings a\ngeneralization that is not achievable by playing with the size of the model\nor drop_out parameters in the vanilla transformer.\n\nArgs:\nlayer_inputs:\n- state: state\nstep: indicates number of steps taken so far\nhparams: model hyper-parameters\nffn_unit: feed-forward unit\nattention_unit: multi-head attention unit\n\nReturns:\nlayer_output:\nnew_state: new state", "source": "codesearchnet"}
{"code": "def __call__(self, inputs, state, scope=None):\n    if scope is not None:\n        with vs.variable_scope(scope, custom_getter=self._rnn_get_variable) as scope:\n            return super(RNNCell, self).__call__(inputs, state, scope=scope)\n    else:\n        scope_attrname = 'rnncell_scope'\n        scope = getattr(self, scope_attrname, None)\n        if scope is None:\n            scope = vs.variable_scope(vs.get_variable_scope(), custom_getter=self._rnn_get_variable)\n            setattr(self, scope_attrname, scope)\n        with scope:\n            return super(RNNCell, self).__call__(inputs, state)", "docstring": "Run this RNN cell on inputs, starting from the given state.\n\nArgs:\ninputs: `2-D` tensor with shape `[batch_size, input_size]`.\nstate: if `self.state_size` is an integer, this should be a `2-D Tensor`\nwith shape `[batch_size, self.state_size]`.  Otherwise, if\n`self.state_size` is a tuple of integers, this should be a tuple with\nshapes `[batch_size, s] for s in self.state_size`.\nscope: VariableScope for the created subgraph; defaults to class name.\n\nReturns:\nA pair containing:\n\n- Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.\n- New state: Either a single `2-D` tensor, or a tuple of tensors matching\nthe arity and shapes of `state`.", "source": "github-repos"}
{"code": "def _parse_header(self, data):\n        \n\n        (magic, word_size, byte_order, version, osabi, abi_version, _), data = \\\n            unpack('4sBBBBB7s', data[:16]), data[16:]\n\n        assert magic == self._ELF_MAGIC, 'Missing ELF magic'\n        assert word_size in (1, 2), 'Invalid word size'\n        assert byte_order in (1, 2), 'Invalid byte order'\n\n        assert version == 1, 'Invalid version'\n\n        self.osabi = self.OSABI(osabi)\n        self.abi_version = abi_version\n\n        endian = Target.Endian(byte_order - 1)\n        (type_, machine, version), data = unpack('HHI', data[:8], endian=endian), data[8:]\n\n        try:\n            self.type = self.Type(type_)\n        except ValueError:\n            self.type = self.Type.unknown\n\n        try:\n            self.machine = ELF.Machine(machine)\n        except ValueError:\n            self.machine = ELF.Machine.unknown\n\n        assert version == 1, 'Invalid version'\n\n        if self.machine is ELF.Machine.i386:\n            arch = Target.Arch.x86\n            assert word_size == 1, 'Unexpected ELF64 for machine type x86'\n            assert endian is Target.Endian.little, 'Unexpected big-endian for machine type x86'\n        elif self.machine is ELF.Machine.x86_64:\n            arch = Target.Arch.x86\n            assert word_size == 2, 'Unexpected ELF32 for machine type x64_64'\n            assert endian is Target.Endian.little, 'Unexpected big-endian for machine type x86'\n        elif self.machine is ELF.Machine.arm:\n            arch = Target.Arch.arm\n            assert word_size == 1, 'Unexpected ELF64 for machine type arm'\n        elif self.machine is ELF.Machine.aarch64:\n            arch = Target.Arch.arm\n            assert word_size == 2, 'Unexpected ELF32 for machine type aarch64'\n        else:\n            arch = Target.Arch.unknown\n\n        self.arch = arch\n        self.bits = 32 * word_size\n        self.endian = endian\n\n        if self.bits == 32:\n            fmt = 'IIIIHHHHHH'\n        else:\n            fmt = 'QQQIHHHHHH'\n\n        fmt_size = pack_size(fmt)\n        (self.entry, self.phoff, self.shoff, self.flags, self.hsize, self.phentsize,\n            self.phnum, self.shentsize, self.shnum, self.shstrndx) = \\\n            unpack(fmt, data[:fmt_size], target=self)", "docstring": "Parse the ELF header in ``data`` and populate the properties.\n\nArgs:\ndata(bytes): The ELF header.", "source": "juraj-google-style"}
{"code": "def _prepare_init_params_from_job_description(cls, job_details):\n    init_params = dict()\n    init_params['model_name'] = job_details['ModelName']\n    init_params['instance_count'] = job_details['TransformResources']['InstanceCount']\n    init_params['instance_type'] = job_details['TransformResources']['InstanceType']\n    init_params['volume_kms_key'] = job_details['TransformResources'].get('VolumeKmsKeyId')\n    init_params['strategy'] = job_details.get('BatchStrategy')\n    init_params['assemble_with'] = job_details['TransformOutput'].get('AssembleWith')\n    init_params['output_path'] = job_details['TransformOutput']['S3OutputPath']\n    init_params['output_kms_key'] = job_details['TransformOutput'].get('KmsKeyId')\n    init_params['accept'] = job_details['TransformOutput'].get('Accept')\n    init_params['max_concurrent_transforms'] = job_details.get('MaxConcurrentTransforms')\n    init_params['max_payload'] = job_details.get('MaxPayloadInMB')\n    init_params['base_transform_job_name'] = job_details['TransformJobName']\n    return init_params", "docstring": "Convert the transform job description to init params that can be handled by the class constructor\n\nArgs:\njob_details (dict): the returned job details from a describe_transform_job API call.\n\nReturns:\ndict: The transformed init_params", "source": "codesearchnet"}
{"code": "def __init__(self, name=None, settings=None, instruments=None, scripts=None, log_function=None, data_path=None):\n        \n        QObject.__init__(self)\n\n        self._script_class = self.__class__.__name__\n\n        if name is None:\n            name = self.__class__.__name__\n        self.name = name\n\n\n        self._instruments = {}\n        if instruments is None:\n            instruments = {}\n        else:\n            assert isinstance(instruments, dict)\n            assert set(self._INSTRUMENTS.keys()) <= set(instruments.keys())\n\n        self.data_path = data_path\n\n        self.instruments = {key: instruments[key] for key in list(self._INSTRUMENTS.keys())}\n\n        self._scripts = {}\n        if scripts is None:\n            scripts = {}\n        self.scripts = scripts\n\n        \n        self.start_time = datetime.datetime.now()\n        self.end_time = self.start_time - datetime.timedelta(seconds=1)\n\n        self._settings = deepcopy(Parameter(self._DEFAULT_SETTINGS + Script._DEFAULT_SETTINGS))\n        self._settings.update({'tag':self.name.lower()})\n        if settings is not None:\n            self.update(settings)\n        self._abort = False\n        self.is_running = False\n\n        \n        \n        self.data = {}\n\n        \n        self.log_data = deque()\n        \n        self.log_function = log_function\n\n        \n        self._plot_refresh = True\n\n        self.progress = None\n\n\n        self._current_subscript_stage = {\n            'current_subscript': None,\n            'subscript_exec_count':{},\n            'subscript_exec_duration':{}\n        }", "docstring": "executes scripts and stores script parameters and settings\nArgs:\nname (optional):  name of script, if not provided take name of function\nsettings (optional): a Parameter object that contains all the information needed in the script\ninstruments (optional): instruments used in the script\nscripts (optional):  sub_scripts used in the script\nlog_function(optional): function reference that takes a string", "source": "juraj-google-style"}
{"code": "def stop_standing_subprocess(proc):\n    logging.debug('Stopping standing subprocess %d', proc.pid)\n    _kill_process_tree(proc)\n    if proc.stdout:\n        proc.stdout.close()\n    if proc.stderr:\n        proc.stderr.close()\n    proc.wait()\n    logging.debug('Stopped standing subprocess %d', proc.pid)", "docstring": "Stops a subprocess started by start_standing_subprocess.\n\nBefore killing the process, we check if the process is running, if it has\nterminated, Error is raised.\n\nCatches and ignores the PermissionError which only happens on Macs.\n\nArgs:\nproc: Subprocess to terminate.\n\nRaises:\nError: if the subprocess could not be stopped.", "source": "github-repos"}
{"code": "def __init__(self, name='', declarations=None):\n        \n        scopedef.scopedef_t.__init__(self, name)\n        if not declarations:\n            declarations = []\n        \n        self._declarations = declarations", "docstring": "Creates an object that describes a C++ namespace declaration.\n\nArgs:\nname (str): name of the namespace\ndeclarations (list[declaration_t]): list of declarations", "source": "juraj-google-style"}
{"code": "def type_decisioner(marc_xml, mono_callback, multimono_callback, periodical_callback):\n    marc_xml = _read_content_or_path(marc_xml)\n    record = MARCXMLRecord(marc_xml)\n    if (record.is_monographic or record.is_single_unit):\n        return mono_callback()\n    elif record.is_multi_mono:\n        return multimono_callback()\n    elif record.is_continuing:\n        return periodical_callback()\n    raise ValueError(\"Can't identify type of the `marc_xml`!\")", "docstring": "Detect type of the `marc_xml`. Call proper callback.\n\nArgs:\nmarc_xml (str): Filename or XML string. Don't use ``\\\\n`` in case of\nfilename.\nmono_callback (fn reference): Callback in case of monographic\npublications.\nmultimono_callback (fn reference): Callback used in case of\nmulti-monographic publications.\nperiodical_callback (fn reference): Callback used in case of periodical\npublications.\n\nReturns:\nobj: Content returned by the callback.\n\nRaises:\nValueError: In case that type couldn't be detected.", "source": "codesearchnet"}
{"code": "def _deserialize_audience(audience_map):\n    \n\n    for audience in audience_map.values():\n      condition_structure, condition_list = condition_helper.loads(audience.conditions)\n      audience.__dict__.update({\n        'conditionStructure': condition_structure,\n        'conditionList': condition_list\n      })\n\n    return audience_map", "docstring": "Helper method to de-serialize and populate audience map with the condition list and structure.\n\nArgs:\naudience_map: Dict mapping audience ID to audience object.\n\nReturns:\nDict additionally consisting of condition list and structure on every audience object.", "source": "juraj-google-style"}
{"code": "def call_servo(examples, serving_bundle):\n  \n  parsed_url = urlparse('http:\n  channel = implementations.insecure_channel(parsed_url.hostname,\n                                             parsed_url.port)\n  stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)\n\n  if serving_bundle.use_predict:\n    request = predict_pb2.PredictRequest()\n  elif serving_bundle.model_type == 'classification':\n    request = classification_pb2.ClassificationRequest()\n  else:\n    request = regression_pb2.RegressionRequest()\n  request.model_spec.name = serving_bundle.model_name\n  if serving_bundle.model_version is not None:\n    request.model_spec.version.value = serving_bundle.model_version\n  if serving_bundle.signature is not None:\n    request.model_spec.signature_name = serving_bundle.signature\n\n  if serving_bundle.use_predict:\n    \n    \n    \n    request.inputs[serving_bundle.predict_input_tensor].CopyFrom(\n      tf.compat.v1.make_tensor_proto(\n        values=[ex.SerializeToString() for ex in examples],\n        dtype=types_pb2.DT_STRING))\n  else:\n    request.input.example_list.examples.extend(examples)\n\n  if serving_bundle.use_predict:\n    return common_utils.convert_predict_response(\n      stub.Predict(request, 30.0), serving_bundle) \n  elif serving_bundle.model_type == 'classification':\n    return stub.Classify(request, 30.0)  \n  else:\n    return stub.Regress(request, 30.0)", "docstring": "Send an RPC request to the Servomatic prediction service.\n\nArgs:\nexamples: A list of examples that matches the model spec.\nserving_bundle: A `ServingBundle` object that contains the information to\nmake the serving request.\n\nReturns:\nA ClassificationResponse or RegressionResponse proto.", "source": "juraj-google-style"}
{"code": "def whatIfOrder(self, contract: Contract, order: Order) -> OrderState:\n        \n        return self._run(self.whatIfOrderAsync(contract, order))", "docstring": "Retrieve commission and margin impact without actually\nplacing the order. The given order will not be modified in any way.\n\nThis method is blocking.\n\nArgs:\ncontract: Contract to test.\norder: Order to test.", "source": "juraj-google-style"}
{"code": "def to_ast(self):\n    if self == STANDARD_OPTIONS:\n        return parser.parse_expression('ag__.STD')\n    template = '\\n      ag__.ConversionOptions(\\n          recursive=recursive_val,\\n          user_requested=user_requested_val,\\n          optional_features=optional_features_val,\\n          internal_convert_user_code=internal_convert_user_code_val)\\n    '\n\n    def list_of_features(values):\n        return parser.parse_expression('({})'.format(', '.join(('ag__.{}'.format(str(v)) for v in values))))\n    expr_ast = templates.replace(template, recursive_val=parser.parse_expression(str(self.recursive)), user_requested_val=parser.parse_expression(str(self.user_requested)), internal_convert_user_code_val=parser.parse_expression(str(self.internal_convert_user_code)), optional_features_val=list_of_features(self.optional_features))\n    return expr_ast[0].value", "docstring": "Returns a representation of this object as an AST node.\n\nThe AST node encodes a constructor that would create an object with the\nsame contents.\n\nReturns:\nast.Node", "source": "github-repos"}
{"code": "def GetFeatureService(self, itemId, returnURLOnly=False):\n    admin = None\n    item = None\n    try:\n        admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)\n        if (self._securityHandler.valid == False):\n            self._valid = self._securityHandler.valid\n            self._message = self._securityHandler.message\n            return None\n        item = admin.content.getItem(itemId=itemId)\n        if (item.type == 'Feature Service'):\n            if returnURLOnly:\n                return item.url\n            else:\n                fs = arcrest.agol.FeatureService(url=item.url, securityHandler=self._securityHandler)\n                if ((fs.layers is None) or (len(fs.layers) == 0)):\n                    fs = arcrest.ags.FeatureService(url=item.url)\n                return fs\n        return None\n    except:\n        (line, filename, synerror) = trace()\n        raise common.ArcRestHelperError({'function': 'GetFeatureService', 'line': line, 'filename': filename, 'synerror': synerror})\n    finally:\n        admin = None\n        item = None\n        del item\n        del admin\n        gc.collect()", "docstring": "Obtains a feature service by item ID.\n\nArgs:\nitemId (str): The feature service's item ID.\nreturnURLOnly (bool): A boolean value to return the URL of the feature service. Defaults to ``False``.\nReturns:\nWhen ``returnURLOnly`` is ``True``, the URL of the feature service is returned.\n\nWhen ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`.", "source": "codesearchnet"}
{"code": "def limits(self, clip_negative=True):\n        \n        min, max = dtype_range[self.as_numpy_dtype]  \n        if clip_negative:\n            min = 0  \n        return min, max", "docstring": "Return intensity limits, i.e. (min, max) tuple, of the dtype.\nArgs:\nclip_negative : bool, optional\nIf True, clip the negative range (i.e. return 0 for min intensity)\neven if the image dtype allows negative values.\nReturns\nmin, max : tuple\nLower and upper intensity limits.", "source": "juraj-google-style"}
{"code": "def EnablePlugins(self, plugin_includes):\n    \n    super(SyslogParser, self).EnablePlugins(plugin_includes)\n\n    self._plugin_by_reporter = {}\n    for plugin in self._plugins:\n      self._plugin_by_reporter[plugin.REPORTER] = plugin", "docstring": "Enables parser plugins.\n\nArgs:\nplugin_includes (list[str]): names of the plugins to enable, where None\nor an empty list represents all plugins. Note that the default plugin\nis handled separately.", "source": "juraj-google-style"}
{"code": "def find_elements_by_name(self, name, update=False) -> Elements:\n    return self.find_elements(by=By.NAME, value=name, update=update)", "docstring": "Finds multiple elements by name.\n\nArgs:\nname: The name of the elements to be found.\nupdate: If the interface has changed, this option should be True.\n\nReturns:\nA list with elements if any was found. An empty list if not.\n\nRaises:\nNoSuchElementException - If the element wasn't found.\n\nUsage:\nelements = driver.find_elements_by_name('foo')", "source": "codesearchnet"}
{"code": "def _parse_description(self, config):\n    value = None\n    match = re.search('description (.+)$', config, re.M)\n    if match:\n        value = match.group(1)\n    return dict(description=value)", "docstring": "Scans the specified config block and returns the description value\n\nArgs:\nconfig (str): The interface config block to scan\n\nReturns:\ndict: Returns a dict object with the description value retrieved\nfrom the config block.  If the description value is not\nconfigured, None is returned as the value.  The returned dict\nis intended to be merged into the interface resource dict.", "source": "codesearchnet"}
{"code": "def __init__(self, num_buckets: int, lower: float, upper: float):\n    if num_buckets < 2:\n        raise ValueError(f'num_buckets is {num_buckets}, must be at least 2 for simulated quantization.')\n    self.num_buckets = num_buckets\n    self.lower = lower\n    self.upper = upper", "docstring": "Simulated quantizaiton configuration.\n\nArgs:\nnum_buckets: The number of quantization buckets, must be atleast 2.\nlower: The lower bound for the quantization range.\nupper: The upper bound for the quantization range.\n\nReturns:\n`QuantizationConfig`.\n\nRaises:\nValueError: if `num_buckets` is less than 2.", "source": "github-repos"}
{"code": "def set_config(self, key, value):\n        \n\n        keyname = \"config:\" + key\n\n        self.kvstore.set(keyname, value)", "docstring": "Set a persistent config key to a value, stored in the registry\n\nArgs:\nkey (string): The key name\nvalue (string): The key value", "source": "juraj-google-style"}
{"code": "def _dict_mapping_to_pb(mapping, proto_type):\n    converted_pb = getattr(trace_pb2, proto_type)()\n    ParseDict(mapping, converted_pb)\n    return converted_pb", "docstring": "Convert a dict to protobuf.\n\nArgs:\nmapping (dict): A dict that needs to be converted to protobuf.\nproto_type (str): The type of the Protobuf.\n\nReturns:\nAn instance of the specified protobuf.", "source": "codesearchnet"}
{"code": "def get_many(self, type: Type[T], query: Mapping[(str, Any)], context: PipelineContext=None) -> Iterable[T]:\n    pass", "docstring": "Gets a query from the data source, which contains a request for multiple objects.\n\nArgs:\nquery: The query being requested (contains a request for multiple objects).\ncontext: The context for the extraction (mutable).\n\nReturns:\nThe requested objects.", "source": "codesearchnet"}
{"code": "def _apply_colocation_attr_map(colocation_attr_map, absolute_import_scope):\n    graph = tf_v1.get_default_graph()\n    for op in graph.get_operations():\n        if (not op.name.startswith((absolute_import_scope + '/'))):\n            continue\n        try:\n            class_values = op.get_attr('_class')\n        except ValueError:\n            continue\n        new_attr_value = tf_v1.AttrValue()\n        new_coloc_groups = []\n        for class_value in class_values:\n            if class_value.startswith(tf.compat.as_bytes('loc:@')):\n                if (class_value not in colocation_attr_map):\n                    rewritten_class_value = [class_value]\n                else:\n                    rewritten_class_value = colocation_attr_map[class_value].GetConsistentValueOrRaise('Failed to rewrite colocation constraints while applying hub.Module:\\nThe module graph contains a node {op!r} that has a colocation constraint {class_value!r} with ambiguous rewriting {old_value!r} vs {new_value!r} because {old_reason} and {new_reason}, respectively.\\nTo fix, avoid publishing a module with inputs comprising multiple outputs of one op that is referenced in tf.colocate_with(...) constraints on other ops.', {'op': op.name, 'class_value': class_value})\n                new_coloc_groups.extend(rewritten_class_value)\n            else:\n                new_attr_value.list.s.append(class_value)\n        new_coloc_groups = sorted(set(new_coloc_groups))\n        new_attr_value.list.s.extend(new_coloc_groups)\n        op._set_attr('_class', new_attr_value)\n        if new_coloc_groups:\n            new_coloc_device = ''\n            for new_coloc_group in new_coloc_groups:\n                assert new_coloc_group.startswith(tf.compat.as_bytes('loc:@'))\n                new_coloc_target_op = graph.get_operation_by_name(tf.compat.as_str_any(new_coloc_group[5:]))\n                new_coloc_device = new_coloc_target_op.device\n                if new_coloc_device:\n                    break\n            op._set_device(new_coloc_device)", "docstring": "Rewrites colocation constraints in the current default graph.\n\nNodes in `absolute_import_scope` get their \"_class\" attr lists rewritten\naccording to `colocation_attr_map`: each entry that matches a key gets\nreplaced by the associated values (with deduplication). The node's device\nis updated accordingly.\n\nArgs:\ncolocation_attr_map: as returned by _build_colocation_attr_map.\nabsolute_import_scope: as for fix_colocation_after_import.\n\nRaises:\nValueError: if rewriting runs into an inconsistent value in\n`colocation_attr_map`.", "source": "codesearchnet"}
{"code": "def load(self, languages=[]):\n        \n        duckling_load = self.clojure.var(\"duckling.core\", \"load!\")\n        clojure_hashmap = self.clojure.var(\"clojure.core\", \"hash-map\")\n        clojure_list = self.clojure.var(\"clojure.core\", \"list\")\n\n        if languages:\n            \n            iso_languages = [Language.convert_to_iso(lang) for lang in languages]\n\n            duckling_load.invoke(\n                clojure_hashmap.invoke(\n                    self.clojure.read(':languages'),\n                    clojure_list.invoke(*iso_languages)\n                )\n            )\n        else:\n            duckling_load.invoke()\n\n        self._is_loaded = True", "docstring": "Loads the Duckling corpus.\n\nLanguages can be specified, defaults to all.\n\nArgs:\nlanguages: Optional parameter to specify languages,\ne.g. [Duckling.ENGLISH, Duckling.FRENCH] or supported ISO 639-1 Codes (e.g. [\"en\", \"fr\"])", "source": "juraj-google-style"}
{"code": "def main(event_loop=None):\n    \n    context, credentials = get_context_from_cmdln(sys.argv[1:])\n    log.info(\"Scriptworker starting up at {} UTC\".format(arrow.utcnow().format()))\n    cleanup(context)\n    context.event_loop = event_loop or asyncio.get_event_loop()\n\n    done = False\n\n    async def _handle_sigterm():\n        log.info(\"SIGTERM received; shutting down\")\n        nonlocal done\n        done = True\n        if context.running_tasks is not None:\n            await context.running_tasks.cancel()\n\n    context.event_loop.add_signal_handler(signal.SIGTERM, lambda: asyncio.ensure_future(_handle_sigterm()))\n\n    while not done:\n        try:\n            context.event_loop.run_until_complete(async_main(context, credentials))\n        except Exception:\n            log.critical(\"Fatal exception\", exc_info=1)\n            raise", "docstring": "Scriptworker entry point: get everything set up, then enter the main loop.\n\nArgs:\nevent_loop (asyncio.BaseEventLoop, optional): the event loop to use.\nIf None, use ``asyncio.get_event_loop()``. Defaults to None.", "source": "juraj-google-style"}
{"code": "def GetMessage(self, log_source, lcid, message_identifier):\n    \n    event_log_provider_key = self._GetEventLogProviderKey(log_source)\n    if not event_log_provider_key:\n      return None\n\n    generator = self._GetMessageFileKeys(event_log_provider_key)\n    if not generator:\n      return None\n\n    \n    message_string = None\n    for message_file_key in generator:\n      message_string = self._GetMessage(\n          message_file_key, lcid, message_identifier)\n\n      if message_string:\n        break\n\n    if self._string_format == 'wrc':\n      message_string = self._ReformatMessageString(message_string)\n\n    return message_string", "docstring": "Retrieves a specific message for a specific Event Log source.\n\nArgs:\nlog_source (str): Event Log source.\nlcid (int): language code identifier (LCID).\nmessage_identifier (int): message identifier.\n\nReturns:\nstr: message string or None if not available.", "source": "juraj-google-style"}
{"code": "def split(x, axis=0):\n    \n    from .function_bases import split as split_base\n    return split_base(x, axis, x.shape[axis])", "docstring": "Split arrays at the specified axis.\n\nIt returns a number corresponding the size of the given\naxis (i.e ``x.shape[axis]``) of :obj:`~nnabla.Variable` s.\n\nArgs:\nx(~nnabla.Variable): N-D array\naxis(int): Axis\n\nReturns: A :obj:`tuple` of :obj:`~nnabla.Variable` s\n\nSee Also:\n:func:`nnabla.function_bases.split`.", "source": "juraj-google-style"}
{"code": "def resolve_label_conflict(mapping, old_labels=None, new_labels=None):\n    if (old_labels is None):\n        old_labels = set(mapping)\n    if (new_labels is None):\n        new_labels = set(itervalues(mapping))\n    counter = itertools.count((2 * len(mapping)))\n    old_to_intermediate = {}\n    intermediate_to_new = {}\n    for (old, new) in iteritems(mapping):\n        if (old == new):\n            continue\n        if ((old in new_labels) or (new in old_labels)):\n            lbl = next(counter)\n            while ((lbl in new_labels) or (lbl in old_labels)):\n                lbl = next(counter)\n            old_to_intermediate[old] = lbl\n            intermediate_to_new[lbl] = new\n        else:\n            old_to_intermediate[old] = new\n    return (old_to_intermediate, intermediate_to_new)", "docstring": "Resolve a self-labeling conflict by creating an intermediate labeling.\n\nArgs:\nmapping (dict):\nA dict mapping the current variable labels to new ones.\n\nold_labels (set, optional, default=None):\nThe keys of mapping. Can be passed in for performance reasons. These are not checked.\n\nnew_labels (set, optional, default=None):\nThe values of mapping. Can be passed in for performance reasons. These are not checked.\n\nReturns:\ntuple: A 2-tuple containing:\n\ndict: A map from the keys of mapping to an intermediate labeling\n\ndict: A map from the intermediate labeling to the values of mapping.", "source": "codesearchnet"}
{"code": "def needkwargs(*argnames):\n    \n    required = set(argnames)\n\n    def decorator(func):\n        def inner(*args, **kwargs):\n            missing = required - set(kwargs)\n            if missing:\n                err = \"%s kwargs are missing.\" % list(missing)\n                raise ValueError(err)\n            return func(*args, **kwargs)\n        return inner\n    return decorator", "docstring": "Function decorator which checks that the decorated function is called\nwith a set of required kwargs.\n\nArgs:\n*argnames: String keyword argument names.\n\nRaises:\nValueError: If a required kwarg is missing in the decorated function\ncall.", "source": "juraj-google-style"}
{"code": "def from_json(cls, data):\n        \n        \n        optional_keys = {'wind_direction': 0, 'rain': False, 'snow_on_ground': False}\n        assert 'wind_speed' in data, 'Required key \"wind_speed\" is missing!'\n        for key, val in optional_keys.items():\n            if key not in data:\n                data[key] = val\n\n        return cls(data['wind_speed'], data['wind_direction'], data['rain'],\n                   data['snow_on_ground'])", "docstring": "Create a Wind Condition from a dictionary.\n\nArgs:\ndata = {\n\"wind_speed\": float,\n\"wind_direction\": float,\n\"rain\": bool,\n\"snow_on_ground\": bool}", "source": "juraj-google-style"}
{"code": "def __init__(self, recognizer: IRecognizer, node: yaml.Node) -> None:\n        \n        self.__recognizer = recognizer\n        self.yaml_node = node", "docstring": "Create an UnknownNode for a particular mapping node.\n\nThe member functions will act on the contained node.\n\nArgs:\nnode: The node to operate on.", "source": "juraj-google-style"}
{"code": "def from_string(string):\n        \n        lines = [line.strip() for line in string.splitlines()]\n\n        comment = lines[0]\n        num_kpts = int(lines[1].split()[0].strip())\n        style = lines[2].lower()[0]\n\n        \n        if style == \"a\":\n            return Kpoints.automatic(int(lines[3]))\n\n        coord_pattern = re.compile(r'^\\s*([\\d+.\\-Ee]+)\\s+([\\d+.\\-Ee]+)\\s+'\n                                   r'([\\d+.\\-Ee]+)')\n\n        \n        if style == \"g\" or style == \"m\":\n            kpts = [int(i) for i in lines[3].split()]\n            kpts_shift = (0, 0, 0)\n            if len(lines) > 4 and coord_pattern.match(lines[4]):\n                try:\n                    kpts_shift = [float(i) for i in lines[4].split()]\n                except ValueError:\n                    pass\n            return Kpoints.gamma_automatic(kpts, kpts_shift) if style == \"g\" \\\n                else Kpoints.monkhorst_automatic(kpts, kpts_shift)\n\n        \n        if num_kpts <= 0:\n            style = Kpoints.supported_modes.Cartesian if style in \"ck\" \\\n                else Kpoints.supported_modes.Reciprocal\n            kpts = [[float(j) for j in lines[i].split()] for i in range(3, 6)]\n            kpts_shift = [float(i) for i in lines[6].split()]\n            return Kpoints(comment=comment, num_kpts=num_kpts, style=style,\n                           kpts=kpts, kpts_shift=kpts_shift)\n\n        \n        if style == \"l\":\n            coord_type = \"Cartesian\" if lines[3].lower()[0] in \"ck\" \\\n                else \"Reciprocal\"\n            style = Kpoints.supported_modes.Line_mode\n            kpts = []\n            labels = []\n            patt = re.compile(r'([e0-9.\\-]+)\\s+([e0-9.\\-]+)\\s+([e0-9.\\-]+)'\n                              r'\\s*!*\\s*(.*)')\n            for i in range(4, len(lines)):\n                line = lines[i]\n                m = patt.match(line)\n                if m:\n                    kpts.append([float(m.group(1)), float(m.group(2)),\n                                 float(m.group(3))])\n                    labels.append(m.group(4).strip())\n            return Kpoints(comment=comment, num_kpts=num_kpts, style=style,\n                           kpts=kpts, coord_type=coord_type, labels=labels)\n\n        \n        style = Kpoints.supported_modes.Cartesian if style in \"ck\" \\\n            else Kpoints.supported_modes.Reciprocal\n        kpts = []\n        kpts_weights = []\n        labels = []\n        tet_number = 0\n        tet_weight = 0\n        tet_connections = None\n\n        for i in range(3, 3 + num_kpts):\n            toks = lines[i].split()\n            kpts.append([float(j) for j in toks[0:3]])\n            kpts_weights.append(float(toks[3]))\n            if len(toks) > 4:\n                labels.append(toks[4])\n            else:\n                labels.append(None)\n        try:\n            \n            if lines[3 + num_kpts].strip().lower()[0] == \"t\":\n                toks = lines[4 + num_kpts].split()\n                tet_number = int(toks[0])\n                tet_weight = float(toks[1])\n                tet_connections = []\n                for i in range(5 + num_kpts, 5 + num_kpts + tet_number):\n                    toks = lines[i].split()\n                    tet_connections.append((int(toks[0]),\n                                            [int(toks[j])\n                                             for j in range(1, 5)]))\n        except IndexError:\n            pass\n\n        return Kpoints(comment=comment, num_kpts=num_kpts,\n                       style=Kpoints.supported_modes[str(style)],\n                       kpts=kpts, kpts_weights=kpts_weights,\n                       tet_number=tet_number, tet_weight=tet_weight,\n                       tet_connections=tet_connections, labels=labels)", "docstring": "Reads a Kpoints object from a KPOINTS string.\n\nArgs:\nstring (str): KPOINTS string.\n\nReturns:\nKpoints object", "source": "juraj-google-style"}
{"code": "def testEmptyTensors(self, drop_remainder):\n    new_batch_size = 4\n    dataset = dataset_ops.Dataset.range(8)\n    dataset = dataset.map(lambda x: array_ops.reshape((), (5, 0)))\n    dataset = dataset.batch(2)\n    rebatched_dataset = dataset.rebatch(batch_size=new_batch_size, drop_remainder=drop_remainder)\n    expected_output = [array_ops.reshape((), (new_batch_size, 5, 0)) for _ in range(8 \n    self.assertDatasetProduces(rebatched_dataset, expected_output)", "docstring": "Tests empty tensors case.\n\nArgs:\ndrop_remainder: whether to drop the remainder.\n\nThe implementation of rebatch might move the input data.\nThis test ensures the empty buffer is handled correctly.", "source": "github-repos"}
{"code": "def splitdrive(path):\n    \n    relative = get_instance(path).relpath(path)\n    drive = path.rsplit(relative, 1)[0]\n    if drive and not drive[-2:] == '\n        \n        relative = '/' + relative\n        drive = drive.rstrip('/')\n    return drive, relative", "docstring": "Split the path into a pair (drive, tail) where drive is either a\nmount point or the empty string. On systems which do not use drive\nspecifications, drive will always be the empty string.\n\nIn all cases, drive + tail will be the same as path.\n\nEquivalent to \"os.path.splitdrive\".\n\nArgs:\npath (path-like object): Path or URL.\n\nReturns:\ntuple of str: drive, tail.", "source": "juraj-google-style"}
{"code": "def storage_systems(self):\n    if (not self.__storage_systems):\n        self.__storage_systems = StorageSystems(self.__connection)\n    return self.__storage_systems", "docstring": "Gets the StorageSystems API client.\n\nReturns:\nStorageSystems:", "source": "codesearchnet"}
{"code": "def get_file_link(self, file_key):\n\t\t\n\t\t\n\t\tself._raise_unimplemented_error()\n\n\t\turi = '/'.join([self.api_uri,\n\t\t\t\t\t\tself.files_suffix,\n\t\t\t\t\t\tfile_key,\n\t\t\t\t\t\tself.file_link_suffix,\n\t\t\t\t\t\t])\n\t\treturn self._req('get', uri)", "docstring": "Gets link to file\nArgs:\nfile_key\t\tkey for the file\nreturn\t\t\t(status code, ?)", "source": "juraj-google-style"}
{"code": "def from_json(cls, raw):\n        \n        bcls = None\n        if 'webLink' in raw:\n            bcls = WebLink\n        elif 'topicCategory' in raw:\n            bcls = Category\n        elif 'taskAssist' in raw:\n            bcls = TaskAssist\n        elif 'context' in raw:\n            bcls = Context\n\n        if bcls is None:\n            logger.warning('Unknown annotation type: %s', raw.keys())\n            return None\n        annotation = bcls()\n        annotation.load(raw)\n\n        return annotation", "docstring": "Helper to construct an annotation from a dict.\n\nArgs:\nraw (dict): Raw annotation representation.\n\nReturns:\nNode: An Annotation object or None.", "source": "juraj-google-style"}
{"code": "def _reduction_a_cell(ip, p, filters, block_id=None):\n    channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1\n    with backend.name_scope(f'reduction_A_block_{block_id}'):\n        p = _adjust_block(p, ip, filters, block_id)\n        h = layers.Activation('relu')(ip)\n        h = layers.Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name=f'reduction_conv_1_{block_id}', use_bias=False, kernel_initializer='he_normal')(h)\n        h = layers.BatchNormalization(axis=channel_dim, momentum=0.9997, epsilon=0.001, name=f'reduction_bn_1_{block_id}')(h)\n        h3 = layers.ZeroPadding2D(padding=imagenet_utils.correct_pad(h, 3), name=f'reduction_pad_1_{block_id}')(h)\n        with backend.name_scope('block_1'):\n            x1_1 = _separable_conv_block(h, filters, (5, 5), strides=(2, 2), block_id=f'reduction_left1_{block_id}')\n            x1_2 = _separable_conv_block(p, filters, (7, 7), strides=(2, 2), block_id=f'reduction_right1_{block_id}')\n            x1 = layers.add([x1_1, x1_2], name=f'reduction_add_1_{block_id}')\n        with backend.name_scope('block_2'):\n            x2_1 = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='valid', name=f'reduction_left2_{block_id}')(h3)\n            x2_2 = _separable_conv_block(p, filters, (7, 7), strides=(2, 2), block_id=f'reduction_right2_{block_id}')\n            x2 = layers.add([x2_1, x2_2], name=f'reduction_add_2_{block_id}')\n        with backend.name_scope('block_3'):\n            x3_1 = layers.AveragePooling2D((3, 3), strides=(2, 2), padding='valid', name=f'reduction_left3_{block_id}')(h3)\n            x3_2 = _separable_conv_block(p, filters, (5, 5), strides=(2, 2), block_id=f'reduction_right3_{block_id}')\n            x3 = layers.add([x3_1, x3_2], name=f'reduction_add3_{block_id}')\n        with backend.name_scope('block_4'):\n            x4 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same', name=f'reduction_left4_{block_id}')(x1)\n            x4 = layers.add([x2, x4])\n        with backend.name_scope('block_5'):\n            x5_1 = _separable_conv_block(x1, filters, (3, 3), block_id=f'reduction_left4_{block_id}')\n            x5_2 = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='valid', name=f'reduction_right5_{block_id}')(h3)\n            x5 = layers.add([x5_1, x5_2], name=f'reduction_add4_{block_id}')\n        x = layers.concatenate([x2, x3, x4, x5], axis=channel_dim, name=f'reduction_concat_{block_id}')\n        return (x, ip)", "docstring": "Adds a Reduction cell for NASNet-A (Fig. 4 in the paper).\n\nArgs:\nip: Input tensor `x`\np: Input tensor `p`\nfilters: Number of output filters\nblock_id: String block_id\n\nReturns:\nA Keras tensor", "source": "github-repos"}
{"code": "def ModuleHelp(self, module):\n    \n    helplist = []\n    self.__RenderOurModuleKeyFlags(module, helplist)\n    return '\\n'.join(helplist)", "docstring": "Describe the key flags of a module.\n\nArgs:\nmodule: A module object or a module name (a string).\n\nReturns:\nstring describing the key flags of a module.", "source": "juraj-google-style"}
{"code": "def get_learning_rate(self, iter):\n        \n        lr = self.scheduler.get_learning_rate(iter)\n        if iter < self.warmup_iter:\n            lr *= (iter + 1) * 1.0 / self.warmup_iter\n        return lr", "docstring": "Get learning rate with exponential decay based on current iteration.\n\nArgs:\niter (int): Current iteration (starting with 0).\n\nReturns:\nfloat: Learning rate", "source": "juraj-google-style"}
{"code": "def __init__(self, wrapped, exit_callback):\n        \n        Future.__init__(self)\n        wrapped.add_done_callback(self._done_callback)\n        self._exit_callback = exit_callback\n        self._wrapped = wrapped", "docstring": "Constructor.\n\nArgs:\nwrapped (Future): the original Future object (to wrap)\nexit_callback: the exit callback to call at the end of\nthe block", "source": "juraj-google-style"}
{"code": "def PatternMatch(regex):\n    pattern = re.compile(regex)\n    return (lambda text: ((- 1) if (pattern.search(text) is None) else 0))", "docstring": "Compute the score of a text by determing if a pattern matches.\n\nExample:\n>>> fitness = PatternMatch(\"flag{.*}\")\n>>> fitness(\"flag{example}\")\n0\n\n>>> fitness(\"junk\")\n-1\n\nArgs:\nregex (str): regular expression string to use as a pattern", "source": "codesearchnet"}
{"code": "def get_learning_rate(self, iter):\n    return (self.init_lr * ((1.0 - ((iter * 1.0) / self.max_iter)) ** self.power))", "docstring": "Get learning rate with polymomial decay based on current iteration.\n\nArgs:\niter (int): current iteration (starting with 0).\n\nReturns:\nfloat: Learning rate", "source": "codesearchnet"}
{"code": "def add_event(self, event):\n    self._warn_if_event_writer_is_closed()\n    self.event_writer.add_event(event)", "docstring": "Adds an event to the event file.\n\nArgs:\nevent: An `Event` protocol buffer.", "source": "github-repos"}
{"code": "def __init__(self, config_manager, backend):\n    \n    self._config_manager = config_manager\n    self._backend = backend", "docstring": "Initializes an instance of the DiscoveryService.\n\nArgs:\nconfig_manager: An instance of ApiConfigManager.\nbackend: An _ApiServer instance for API config generation.", "source": "juraj-google-style"}
{"code": "def MultiHeadedAttention(\n    feature_depth, num_heads=8, dropout=0.0, mode='train'):\n  \n  return combinators.Serial(\n      combinators.Parallel(\n          combinators.Branch(num_branches=3),  \n          combinators.Identity()  \n      ),\n      MultiHeadedAttentionQKV(  \n          feature_depth, num_heads=num_heads, dropout=dropout, mode=mode),\n  )", "docstring": "Transformer-style multi-headed attention.\n\nAccepts inputs of the form (x, mask) and constructs (q, k, v) from x.\n\nArgs:\nfeature_depth: int:  depth of embedding\nnum_heads: int: number of attention heads\ndropout: float: dropout rate\nmode: str: 'train' or 'eval'\n\nReturns:\nMulti-headed self-attention layer.", "source": "juraj-google-style"}
{"code": "def __init__(self, failfast=False, save_tests=False, report_template=None, report_dir=None,\n        log_level=\"INFO\", log_file=None):\n        \n        self.exception_stage = \"initialize HttpRunner()\"\n        kwargs = {\n            \"failfast\": failfast,\n            \"resultclass\": report.HtmlTestResult\n        }\n        self.unittest_runner = unittest.TextTestRunner(**kwargs)\n        self.test_loader = unittest.TestLoader()\n        self.save_tests = save_tests\n        self.report_template = report_template\n        self.report_dir = report_dir\n        self._summary = None\n        if log_file:\n            logger.setup_logger(log_level, log_file)", "docstring": "initialize HttpRunner.\n\nArgs:\nfailfast (bool): stop the test run on the first error or failure.\nsave_tests (bool): save loaded/parsed tests to JSON file.\nreport_template (str): report template file path, template should be in Jinja2 format.\nreport_dir (str): html report save directory.\nlog_level (str): logging level.\nlog_file (str): log file path.", "source": "juraj-google-style"}
{"code": "def set_all_curriculums_to_lesson_num(self, lesson_num):\n        \n        for _, curriculum in self.brains_to_curriculums.items():\n            curriculum.lesson_num = lesson_num", "docstring": "Sets all the curriculums in this meta curriculum to a specified\nlesson number.\n\nArgs:\nlesson_num (int): The lesson number which all the curriculums will\nbe set to.", "source": "juraj-google-style"}
{"code": "def getContextsForTerm(self, term, getFingerprint=None, startIndex=0, maxResults=5):\n        \n        return self._terms.getContextsForTerm(self._retina, term, getFingerprint, startIndex, maxResults)", "docstring": "Get the contexts for a given term\nArgs:\nterm, str: A term in the retina (required)\ngetFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)\nstartIndex, int: The start-index for pagination (optional)\nmaxResults, int: Max results per page (optional)\nReturns:\nlist of Context\nRaises:\nCorticalioException: if the request was not successful", "source": "juraj-google-style"}
{"code": "def add_column(self, column_name, column_values):\n    \n    \n    if isinstance(column_values, list) and isinstance(column_values[0], list):\n      raise ValueError('\"column_values\" must be a flat list, but we detected '\n                       'that its first entry is a list')\n\n    if isinstance(column_values, np.ndarray) and column_values.ndim != 1:\n      raise ValueError('\"column_values\" should be of rank 1, '\n                       'but is of rank %d' % column_values.ndim)\n    if len(column_values) != self.num_points:\n      raise ValueError('\"column_values\" should be of length %d, but is of '\n                       'length %d' % (self.num_points, len(column_values)))\n    if column_name in self.name_to_values:\n      raise ValueError('The column name \"%s\" is already used' % column_name)\n\n    self.column_names.append(column_name)\n    self.name_to_values[column_name] = column_values", "docstring": "Adds a named column of metadata values.\n\nArgs:\ncolumn_name: Name of the column.\ncolumn_values: 1D array/list/iterable holding the column values. Must be\nof length `num_points`. The i-th value corresponds to the i-th point.\n\nRaises:\nValueError: If `column_values` is not 1D array, or of length `num_points`,\nor the `name` is already used.", "source": "juraj-google-style"}
{"code": "def export(self, input_ids: Optional[torch.Tensor]=None, cache_position: Optional[torch.Tensor]=None, dynamic_shapes: Optional[dict]=None, strict: Optional[bool]=None) -> torch.export.ExportedProgram:\n    ALL_MASK_ATTENTION_FUNCTIONS.register('sdpa_without_vmap', sdpa_mask_without_vmap)\n    ALL_ATTENTION_FUNCTIONS.register('sdpa_without_vmap', ALL_ATTENTION_FUNCTIONS['sdpa'])\n    self.model.model.config._attn_implementation = 'sdpa_without_vmap'\n    example_input_ids = input_ids if input_ids is not None else torch.tensor([[1]], dtype=torch.long)\n    example_cache_position = cache_position if cache_position is not None else torch.tensor([0], dtype=torch.long)\n    exported_program = torch.export.export(self.model, args=(example_input_ids, example_cache_position), kwargs={}, dynamic_shapes=dynamic_shapes, strict=strict if strict is not None else True)\n    return exported_program", "docstring": "Export the wrapped module using `torch.export`.\n\nArgs:\ninput_ids (`Optional[torch.Tensor]`):\nTensor representing current input token id to the module. If not provided, a default tensor will be used.\ncache_position (`Optional[torch.Tensor]`):\nTensor representing current input position in the cache. If not provided, a default tensor will be used.\ndynamic_shapes (`Optional[dict]`):\nDynamic shapes to use for export if specified.\nstrict(`Optional[bool]`):\nFlag to instruct `torch.export` to use `torchdynamo`.", "source": "github-repos"}
{"code": "def start_upsert(ini_data):\n    \n    stack_driver = CloudStackUtility(ini_data)\n    poll_stack = not ini_data.get('no_poll', False)\n    if stack_driver.upsert():\n        logging.info('stack create/update was started successfully.')\n\n        if poll_stack:\n            stack_tool = None\n            try:\n                profile = ini_data.get('environment', {}).get('profile')\n                if profile:\n                    boto3_session = boto3.session.Session(profile_name=profile)\n                else:\n                    boto3_session = boto3.session.Session()\n\n                region = ini_data['environment']['region']\n                stack_name = ini_data['environment']['stack_name']\n\n                cf_client = stack_driver.get_cloud_formation_client()\n\n                if not cf_client:\n                    cf_client = boto3_session.client('cloudformation', region_name=region)\n\n                stack_tool = stack_tool = StackTool(\n                    stack_name,\n                    region,\n                    cf_client\n                )\n            except Exception as wtf:\n                logging.warning('there was a problems creating stack tool: {}'.format(wtf))\n\n            if stack_driver.poll_stack():\n                try:\n                    logging.info('stack create/update was finished successfully.')\n                    stack_tool.print_stack_info()\n                except Exception as wtf:\n                    logging.warning('there was a problems printing stack info: {}'.format(wtf))\n\n                sys.exit(0)\n            else:\n                try:\n                    logging.error('stack create/update was did not go well.')\n                    stack_tool.print_stack_events()\n                except Exception as wtf:\n                    logging.warning('there was a problems printing stack events: {}'.format(wtf))\n                sys.exit(1)\n    else:\n        logging.error('start of stack create/update did not go well.')\n        sys.exit(1)", "docstring": "Helper function to facilitate upsert.\n\nArgs:\nini_date - the dictionary of info to run upsert\n\nExit:\n0 - good\n1 - bad", "source": "juraj-google-style"}
{"code": "def HasBalance(self, assetId):\n        \n        for key, fixed8 in self.Balances.items():\n            if key == assetId:\n                return True\n        return False", "docstring": "Flag indicating if the asset has a balance.\n\nArgs:\nassetId (UInt256):\n\nReturns:\nbool: True if a balance is present. False otherwise.", "source": "juraj-google-style"}
{"code": "def show(self, *args, **kwargs):\n        \n        plt = self.get_pourbaix_plot(*args, **kwargs)\n        plt.show()", "docstring": "Shows the pourbaix plot\n\nArgs:\n*args: args to get_pourbaix_plot\n**kwargs: kwargs to get_pourbaix_plot\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _generate_bucket_value(self, bucketing_id):\n    ratio = (float(self._generate_unsigned_hash_code_32_bit(bucketing_id)) / MAX_HASH_VALUE)\n    return math.floor((ratio * MAX_TRAFFIC_VALUE))", "docstring": "Helper function to generate bucket value in half-closed interval [0, MAX_TRAFFIC_VALUE).\n\nArgs:\nbucketing_id: ID for bucketing.\n\nReturns:\nBucket value corresponding to the provided bucketing ID.", "source": "codesearchnet"}
{"code": "def define_lattice_from_file( self, filename, cell_lengths ):\n        \n        self.lattice = init_lattice.lattice_from_sites_file( filename, cell_lengths = cell_lengths )", "docstring": "Set up the simulation lattice from a file containing site data.\nUses `init_lattice.lattice_from_sites_file`, which defines the site file spec.\n\nArgs:\nfilename (Str): sites file filename.\ncell_lengths (List(x,y,z)): cell lengths for the simulation cell.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def settings_view_for_block(block_wrapper, settings_view_factory):\n        \n        state_root_hash = \\\n            block_wrapper.state_root_hash \\\n            if block_wrapper is not None else None\n\n        return settings_view_factory.create_settings_view(state_root_hash)", "docstring": "Returns the settings view for an arbitrary block.\n\nArgs:\nblock_wrapper (BlockWrapper): The block for which a settings\nview is to be returned\nsettings_view_factory (SettingsViewFactory): The settings\nview factory used to create the SettingsView object\n\nReturns:\nSettingsView object associated with the block", "source": "juraj-google-style"}
{"code": "def contains(self, name):\n    \n    try:\n      self._api.buckets_get(name)\n    except google.datalab.utils.RequestException as e:\n      if e.status == 404:\n        return False\n      raise e\n    except Exception as e:\n      raise e\n    return True", "docstring": "Checks if the specified bucket exists.\n\nArgs:\nname: the name of the bucket to lookup.\nReturns:\nTrue if the bucket exists; False otherwise.\nRaises:\nException if there was an error requesting information about the bucket.", "source": "juraj-google-style"}
{"code": "def minimum_image_dr(self, r1, r2, cutoff=None):\n    delta_r_vector = self.minimum_image(r1, r2)\n    return self.dr(np.zeros(3), delta_r_vector, cutoff)", "docstring": "Calculate the shortest distance between two points in the cell,\naccounting for periodic boundary conditions.\n\nArgs:\nr1 (np.array): fractional coordinates of point r1.\nr2 (np.array): fractional coordinates of point r2.\ncutoff (:obj: `float`, optional): if set, return zero if the minimum distance is greater than `cutoff`. Defaults to None.\n\nReturns:\n(float): The distance between r1 and r2.", "source": "codesearchnet"}
{"code": "def __init__(self, ps_tasks, ps_device, worker_device, merge_devices, ps_ops, ps_strategy):\n    self._ps_tasks = ps_tasks\n    self._ps_device = ps_device\n    self._worker_device = worker_device\n    self._merge_devices = merge_devices\n    self._ps_ops = ps_ops\n    self._ps_strategy = ps_strategy", "docstring": "Create a new `_ReplicaDeviceChooser`.\n\nArgs:\nps_tasks: Number of tasks in the `ps` job.\nps_device: String.  Name of the `ps` job.\nworker_device: String.  Name of the `worker` job.\nmerge_devices: Boolean. Set to True to allow merging of device specs.\nps_ops: List of strings representing `Operation` types that need to be\nplaced on `ps` devices.\nps_strategy: A callable invoked for every ps `Operation` (i.e. matched by\n`ps_ops`), that takes the `Operation` and returns the ps task index to\nuse.", "source": "github-repos"}
{"code": "def __init__(self, chunks: typing.List[str], separator: str):\n    HTMLParser.__init__(self)\n    self.chunks_joined = SEP.join(chunks)\n    self.separator = separator\n    self.to_skip = False\n    self.scan_index = 0\n    self.element_stack: queue.LifoQueue[ElementState] = queue.LifoQueue()", "docstring": "Initializes the parser.\n\nArgs:\nchunks (List[str]): The chunks to resolve.\nseparator (str): The separator string.", "source": "github-repos"}
{"code": "def inspect_secret(self, id):\n    url = self._url('/secrets/{0}', id)\n    return self._result(self._get(url), True)", "docstring": "Retrieve secret metadata\n\nArgs:\nid (string): Full ID of the secret to remove\n\nReturns (dict): A dictionary of metadata\n\nRaises:\n:py:class:`docker.errors.NotFound`\nif no secret with that ID exists", "source": "codesearchnet"}
{"code": "def add_minute(self, minute):\n        \n        _moy = self.moy + int(minute)\n        return self.__class__.from_moy(_moy)", "docstring": "Create a new DateTime after the minutes are added.\n\nArgs:\nminute: An integer value for minutes.", "source": "juraj-google-style"}
{"code": "def set_agent(self, agent):\n    self.agent = agent\n    self.queue = asyncio.Queue(loop=self.agent.loop)\n    self.presence = agent.presence\n    self.web = agent.web", "docstring": "Links behaviour with its owner agent\n\nArgs:\nagent (spade.agent.Agent): the agent who owns the behaviour", "source": "codesearchnet"}
{"code": "def _build(self, ids):\n    \n    \n    if self._existing_vocab is None:\n      if self.EMBEDDINGS not in self._initializers:\n        self._initializers[self.EMBEDDINGS] = tf.initializers.random_normal()\n      self._embeddings = tf.get_variable(\n          \"embeddings\",\n          shape=[self._vocab_size, self._embed_dim],\n          dtype=tf.float32,\n          initializer=self._initializers[self.EMBEDDINGS],\n          partitioner=self._partitioners.get(self.EMBEDDINGS, None),\n          regularizer=self._regularizers.get(self.EMBEDDINGS, None),\n          trainable=self._trainable)\n    else:\n      self._embeddings = tf.get_variable(\n          \"embeddings\",\n          dtype=tf.float32,\n          initializer=self._existing_vocab,\n          regularizer=self._regularizers.get(self.EMBEDDINGS, None),\n          trainable=self._trainable)\n\n    if self._densify_gradients:\n      \n      \n      \n      \n      \n      embeddings = util.convert_gradient_to_tensor(self._embeddings)\n    else:\n      embeddings = self._embeddings\n\n    \n    return tf.nn.embedding_lookup(embeddings, ids, name=\"embedding_lookup\")", "docstring": "Lookup embeddings.\n\nLooks up an embedding vector for each value in `ids`. All ids must be within\n[0, vocab_size), else an `InvalidArgumentError` is raised at runtime.\n\nArgs:\nids: Tensor of dtype int64.\n\nReturns:\nTensor of tf.shape(ids) + [embedding_dim] and dtype float32.", "source": "juraj-google-style"}
{"code": "def console_wait_for_keypress(flush: bool) -> Key:\n    \n    key = Key()\n    lib.TCOD_console_wait_for_keypress_wrapper(key.key_p, flush)\n    return key", "docstring": "Block until the user presses a key, then returns a new Key.\n\nArgs:\nflush bool: If True then the event queue is cleared before waiting\nfor the next event.\n\nReturns:\nKey: A new Key instance.\n\n.. deprecated:: 9.3\nUse the :any:`tcod.event.wait` function to wait for events.", "source": "juraj-google-style"}
{"code": "def attribute(\n        self, main_type, sub_type, unique_id, attribute_id, action='GET', owner=None, params=None\n    ):\n        \n        params = params or {}\n        if owner:\n            params['owner'] = owner\n        action = action.upper()\n        if not sub_type:\n            url = '/v2/{}/{}/attributes/{}'.format(main_type, unique_id, attribute_id)\n        else:\n            url = '/v2/{}/{}/{}/attributes/{}'.format(main_type, sub_type, unique_id, attribute_id)\n\n        if action == 'GET':\n            return self.tcex.session.get(url, params=params)\n\n        if action == 'DELETE':\n            return self.tcex.session.delete(url, params=params)\n\n        return None", "docstring": "Args:\nowner:\nmain_type:\nsub_type:\nunique_id:\nattribute_id:\naction:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def get(self, **params):\n        \n        if self._use_cache:\n            r = requests.get(self.url, params=params)\n        else:\n            with requests_cache.disabled():\n                r = requests.get(self.url, params=params)\n        r.raise_for_status()\n        return r", "docstring": "Performs get request to the biomart service.\n\nArgs:\n**params (dict of str: any): Arbitrary keyword arguments, which\nare added as parameters to the get request to biomart.\n\nReturns:\nrequests.models.Response: Response from biomart for the request.", "source": "juraj-google-style"}
{"code": "def get_source(label, source_type, **kwargs):\n    if (source_type not in yapconf.ALL_SUPPORTED_SOURCES):\n        raise YapconfSourceError(('Invalid source type %s. Supported types are %s.' % (source_type, yapconf.ALL_SUPPORTED_SOURCES)))\n    if (source_type not in yapconf.SUPPORTED_SOURCES):\n        raise YapconfSourceError(('Unsupported source type \"%s\". If you want to use this type, you will need to install the correct client for it (try `pip install yapconf[%s]. Currently supported types are %s. All supported types are %s' % (source_type, source_type, yapconf.SUPPORTED_SOURCES, yapconf.ALL_SUPPORTED_SOURCES)))\n    if (source_type == 'dict'):\n        return DictConfigSource(label, data=kwargs.get('data'))\n    elif (source_type == 'json'):\n        return JsonConfigSource(label, **kwargs)\n    elif (source_type == 'yaml'):\n        filename = kwargs.get('filename')\n        if ('filename' in kwargs):\n            kwargs.pop('filename')\n        return YamlConfigSource(label, filename, **kwargs)\n    elif (source_type == 'environment'):\n        return EnvironmentConfigSource(label)\n    elif (source_type == 'etcd'):\n        return EtcdConfigSource(label, kwargs.get('client'), kwargs.get('key', '/'))\n    elif (source_type == 'kubernetes'):\n        name = kwargs.get('name')\n        if ('name' in kwargs):\n            kwargs.pop('name')\n        client = kwargs.get('client')\n        if ('client' in kwargs):\n            kwargs.pop('client')\n        return KubernetesConfigSource(label, client, name, **kwargs)\n    else:\n        raise NotImplementedError(('No implementation for source type %s' % source_type))", "docstring": "Get a config source based on type and keyword args.\n\nThis is meant to be used internally by the spec via ``add_source``.\n\nArgs:\nlabel (str): The label for this source.\nsource_type: The type of source. See ``yapconf.SUPPORTED_SOURCES``\n\nKeyword Args:\nThe keyword arguments are based on the source_type. Please see the\ndocumentation of the individual sources for a detailed list of all\npossible arguments.\n\nReturns (yapconf.sources.ConfigSource):\nA valid config source which can be used for generating an override.\n\nRaises:\nYapconfSourceError: If there is some kind of error with this source\ndefinition.", "source": "codesearchnet"}
{"code": "def forward(self, logits, labels):\n    duration, start_time, end_time = labels\n    candidates = torch.mul(logits, duration)\n    candidates_start_time, candidates_end_time = (candidates[:, 0].float(), candidates[:, 1].float())\n    losses_dict = {}\n    for loss in self.losses:\n        losses_dict.update({loss: self.loss_map[loss](start_time, end_time, candidates_start_time, candidates_end_time, duration)})\n    return losses_dict", "docstring": "This performs the loss computation.\n\nArgs:\nlogits (`torch.FloatTensor`):\nThe output logits of head module.\nlabels (`List[torch.FloatTensor]`):\nList of tensors ([start, end, duration]), which contains start time, end time of the video corresponding to the text, and also the duration.", "source": "github-repos"}
{"code": "def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:\n    if isinstance(variables, dict):\n        if variables.get('times'):\n            times = int(variables['times'])\n            del variables['times']\n            (yield (list(variable_matrix(variables, parent, 'product')) * times))\n        else:\n            raise ValueError(f'times is a required keyword for the repeat iterator.')\n    else:\n        raise ValueError(f'The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}')", "docstring": "Cycle through a list of values a specified number of times\n\nArgs:\nvariables: The input variables for the creation of the range\nparent: The variable for which the values are being generated.\n\nReturns: A list of dictionaries mapping the parent to each value.", "source": "codesearchnet"}
{"code": "def _add_tag(self, tag):\n        \n        \n        tags = self.data.get('tags', None)\n        if tags:\n            if tag in [x['name'] for x in tags]:\n                return False\n        else:\n            tags = list()\n        tags.append({'name': tag})\n        self.data['tags'] = tags\n        return True", "docstring": "Add a tag\n\nArgs:\ntag (str): Tag to add\n\nReturns:\nbool: True if tag added or False if tag already present", "source": "juraj-google-style"}
{"code": "def check_tweet(tweet, validation_checking=False):\n    \n\n    if \"id\" not in tweet:\n        raise NotATweetError(\"This text has no 'id' key\")\n\n    original_format = is_original_format(tweet)\n\n    if original_format:\n        _check_original_format_tweet(tweet, validation_checking=validation_checking)\n    else:\n        _check_activity_streams_tweet(tweet, validation_checking=validation_checking)\n\n    return original_format", "docstring": "Ensures a tweet is valid and determines the type of format for the tweet.\n\nArgs:\ntweet (dict/Tweet): the tweet payload\nvalidation_checking (bool): check for valid key structure in a tweet.", "source": "juraj-google-style"}
{"code": "def bulk_get_or_create(self, data_list):\n        \n        items_to_create = dict()\n        for record_key, record_config in data_list.items():\n            if record_key not in items_to_create:\n                record = self.get_instance(record_key)\n                if not record:\n                    items_to_create[record_key] = self.model_cls(**record_config)\n        if items_to_create:\n            \n\n            self.model_cls.objects.bulk_create(items_to_create.values())\n            self.set_record_lookup(True)\n        return self.record_lookup", "docstring": "data_list is the data to get or create\nWe generate the query and set all the record keys based on passed in queryset\nThen we loop over each item in the data_list, which has the keys already! No need to generate them.  Should save a lot of time\nUse values instead of the whole object, much faster\nArgs:\ndata_list:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def eq(self, other, axis=\"columns\", level=None):\n        \n        return self._binary_op(\"eq\", other, axis=axis, level=level)", "docstring": "Checks element-wise that this is equal to other.\n\nArgs:\nother: A DataFrame or Series or scalar to compare to.\naxis: The axis to perform the eq over.\nlevel: The Multilevel index level to apply eq over.\n\nReturns:\nA new DataFrame filled with Booleans.", "source": "juraj-google-style"}
{"code": "def peek_with_kwargs(init, args=[]):\n    \n    def peek(store, container, _stack=None):\n        return init(\\\n            *[ store.peek(attr, container, _stack=_stack) for attr in args ], \\\n            **dict([ (attr, store.peek(attr, container, _stack=_stack)) \\\n                for attr in container if attr not in args ]))\n    return peek", "docstring": "Make datatypes passing keyworded arguments to the constructor.\n\nThis is a factory function; returns the actual `peek` routine.\n\nArguments:\n\ninit (callable): type constructor.\n\nargs (iterable): arguments NOT to be keyworded; order does matter.\n\nReturns:\n\ncallable: deserializer (`peek` routine).\n\nAll the peeked attributes that are not referenced in `args` are passed to `init` as\nkeyworded arguments.", "source": "juraj-google-style"}
{"code": "def get_catalog_courses(self, catalog_id):\n    return self._load_data(self.CATALOGS_COURSES_ENDPOINT.format(catalog_id), default=[])", "docstring": "Return the courses included in a single course catalog by ID.\n\nArgs:\ncatalog_id (int): The catalog ID we want to retrieve.\n\nReturns:\nlist: Courses of the catalog in question", "source": "codesearchnet"}
{"code": "def delete_variants(adapter, vcf_obj, case_obj, case_id=None):\n    \n    case_id = case_id or case_obj['case_id']\n    nr_deleted = 0\n    start_deleting = datetime.now()\n    chrom_time = datetime.now()\n    current_chrom = None\n    new_chrom = None\n    \n    for variant in vcf_obj:\n        formated_variant = build_variant(\n            variant=variant,\n            case_obj=case_obj,\n            case_id=case_id,\n        )\n        \n        if not formated_variant:\n            continue\n        \n        new_chrom = formated_variant.get('chrom')\n        adapter.delete_variant(formated_variant)\n        nr_deleted += 1\n        \n        if not current_chrom:\n            LOG.info(\"Start deleting chromosome {}\".format(new_chrom))\n            current_chrom = new_chrom\n            chrom_time = datetime.now()\n            continue\n        \n        if new_chrom != current_chrom:\n            LOG.info(\"Chromosome {0} done\".format(current_chrom))\n            LOG.info(\"Time to delete chromosome {0}: {1}\".format(\n                current_chrom, datetime.now()-chrom_time))\n            LOG.info(\"Start deleting chromosome {0}\".format(new_chrom))\n            current_chrom = new_chrom\n\n\n    return nr_deleted", "docstring": "Delete variants for a case in the database\n\nArgs:\nadapter(loqusdb.plugins.Adapter)\nvcf_obj(iterable(dict))\nind_positions(dict)\ncase_id(str)\n\nReturns:\nnr_deleted (int): Number of deleted variants", "source": "juraj-google-style"}
{"code": "def _parse_getprop_output(self, output):\n    output = output.decode('utf-8', errors='ignore').replace('\\r\\n', '\\n')\n    results = {}\n    for line in output.split(']\\n'):\n        if not line:\n            continue\n        try:\n            name, value = line.split(': ', 1)\n        except ValueError:\n            logging.debug('Failed to parse adb getprop line %s', line)\n            continue\n        name = name.strip()[1:-1]\n        if value and value[0] == '[':\n            value = value[1:]\n        results[name] = value\n    return results", "docstring": "Parses the raw output of `adb shell getprop` into a dictionary.\n\nArgs:\noutput: byte str, the raw output of the `adb shell getprop` call.\n\nReturns:\ndict, name-value pairs of the properties.", "source": "github-repos"}
{"code": "def __eq__(self, other):\n        \n        if type(self) is type(other) and \\\n                self._index == other._index:\n            return True\n        return False", "docstring": "Two channels are the same if they are of the same type, and have the same index.\n\nArgs:\nother (Channel): other Channel\n\nReturns:\nbool: are self and other equal.", "source": "juraj-google-style"}
{"code": "def register(self, name, namespace):\n        \n        if name in self._NAMESPACES:\n\n            raise ValueError(\"Namespace {0} already exists.\".format(name))\n\n        if not isinstance(namespace, ns.Namespace):\n\n            raise TypeError(\"Namespaces must be of type Namespace.\")\n\n        self._NAMESPACES[name] = namespace", "docstring": "Register a new namespace with the Configuration object.\n\nArgs:\nname (str): The name of the section/namespace.\nnamespace (namespace.Namespace): The Namespace object to store.\n\nRaises:\nTypeError: If the namespace is not a Namespace object.\nValueError: If the namespace is already registered.", "source": "juraj-google-style"}
{"code": "def register_for_auto_class(cls, auto_class='FlaxAutoModel'):\n    if not isinstance(auto_class, str):\n        auto_class = auto_class.__name__\n    import transformers.models.auto as auto_module\n    if not hasattr(auto_module, auto_class):\n        raise ValueError(f'{auto_class} is not a valid auto class.')\n    cls._auto_class = auto_class", "docstring": "Register this class with a given auto class. This should only be used for custom models as the ones in the\nlibrary are already mapped with an auto class.\n\n\n\nArgs:\nauto_class (`str` or `type`, *optional*, defaults to `\"FlaxAutoModel\"`):\nThe auto class to register this new model with.", "source": "github-repos"}
{"code": "def kmip_version(self, value):\n        \n        if isinstance(value, enums.KMIPVersion):\n            self._kmip_version = value\n        else:\n            raise ValueError(\"KMIP version must be a KMIPVersion enumeration\")", "docstring": "Set the KMIP version for the client.\n\nArgs:\nvalue (KMIPVersion): A KMIPVersion enumeration\n\nReturn:\nNone\n\nRaises:\nValueError: if value is not a KMIPVersion enumeration\n\nExample:\n>>> client.kmip_version = enums.KMIPVersion.KMIP_1_1\n>>>", "source": "juraj-google-style"}
{"code": "def __init__(self, amount):\n        \n        super(CapitalFlow, self).__init__()\n        self.amount = float(amount)", "docstring": "CapitalFlow constructor.\n\nArgs:\n* amount (float): Amount to adjust by", "source": "juraj-google-style"}
{"code": "def _remove_boring_lines(text):\n  \n  lines = text.split(\"\\n\")\n  filtered = [line for line in lines if re.match(\"[a-zA-z\\\"\\']\", line)]\n  return \"\\n\".join(filtered)", "docstring": "Remove lines that do not start with a letter or a quote.\n\nFrom inspecting the data, this seems to leave in most prose and remove\nmost weird stuff.\n\nArgs:\ntext: a string\nReturns:\na string", "source": "juraj-google-style"}
{"code": "def payments(self, virtual_account_id, data={}, **kwargs):\n    url = '{}/{}/payments'.format(self.base_url, virtual_account_id)\n    return self.get_url(url, data, **kwargs)", "docstring": "Fetch Payment for Virtual Account Id\n\nArgs:\nvirtual_account_id :\nId for which Virtual Account objects has to be retrieved\n\nReturns:\nPayment dict for given Virtual Account Id", "source": "codesearchnet"}
{"code": "def _get_flat_core_sizes(cores):\n    core_sizes_lists = []\n    for core in cores:\n        flat_output_size = nest.flatten(core.output_size)\n        core_sizes_lists.append([tf.TensorShape(size).as_list() for size in flat_output_size])\n    return core_sizes_lists", "docstring": "Obtains the list flattened output sizes of a list of cores.\n\nArgs:\ncores: list of cores to get the shapes from.\n\nReturns:\nList of lists that, for each core, contains the list of its output\ndimensions.", "source": "codesearchnet"}
{"code": "def get_2d_local_memory(x, query_shape, memory_flange):\n    (_, height, width, depth_x) = common_layers.shape_list(x)\n    x_center_blocks = _extract_blocks(x, query_shape[0], query_shape[1])\n    paddings = [[0, 0], [memory_flange[0], memory_flange[0]], [memory_flange[1], memory_flange[1]], [0, 0]]\n    padded_x = tf.pad(x, paddings)\n    padded_x.set_shape([None, (height + (2 * memory_flange[0])), (width + (2 * memory_flange[1])), depth_x])\n    x_outer_memory_blocks = _extract_blocks(padded_x, memory_flange[0], memory_flange[1])\n    (x_left_blocks, x_right_blocks) = _get_left_right_blocks(x_outer_memory_blocks)\n    t_hw_block = (lambda x: tf.transpose(x, [0, 2, 1, 4, 3, 5]))\n    (x_top_center_blocks, x_bottom_center_blocks) = map(t_hw_block, _get_left_right_blocks(t_hw_block(x_outer_memory_blocks)))\n    (x_left_corner_blocks, x_right_corner_blocks) = _split_along_width(x_outer_memory_blocks)\n    t_hw = (lambda x: tf.transpose(x, [0, 2, 1, 3, 4, 5]))\n    (x_top_left_corner_blocks, x_bottom_left_corner_blocks) = map(t_hw, _split_along_width(t_hw(x_left_corner_blocks)))\n    (x_top_right_corner_blocks, x_bottom_right_corner_blocks) = map(t_hw, _split_along_width(t_hw(x_right_corner_blocks)))\n    x_top_memory = tf.concat([x_top_left_corner_blocks, x_top_center_blocks, x_top_right_corner_blocks], axis=4)\n    x_middle_memory = tf.concat([x_left_blocks, x_center_blocks, x_right_blocks], axis=4)\n    x_bottom_memory = tf.concat([x_bottom_left_corner_blocks, x_bottom_center_blocks, x_bottom_right_corner_blocks], axis=4)\n    x = tf.concat([x_top_memory, x_middle_memory, x_bottom_memory], axis=3)\n    return x", "docstring": "Stitches together the local 2d memory blocks.\n\nArgs:\nx: a [batch, height, width, depth tensor]\nquery_shape: 2-d integer list of query shape\nmemory_flange: 2-d integer list of memory flanges\n\nReturns:\nx: A [batch, num_h_blocks, num_w_blocks,\nquery_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]]\ntensor.", "source": "codesearchnet"}
{"code": "def _CreateLineReader(self, file_object):\n    if py2to3.PY_3:\n        line_reader = text_file.TextFile(file_object, encoding=self._encoding, end_of_line=self._end_of_line)\n        maximum_read_buffer_size = line_reader._MAXIMUM_READ_BUFFER_SIZE\n    else:\n        line_reader = line_reader_file.BinaryLineReader(file_object, end_of_line=self._end_of_line)\n        maximum_read_buffer_size = line_reader.MAXIMUM_READ_BUFFER_SIZE\n    if (self._maximum_line_length > maximum_read_buffer_size):\n        self._maximum_line_length = (maximum_read_buffer_size - 1)\n    for _ in range(0, self.NUMBER_OF_HEADER_LINES):\n        line_reader.readline(self._maximum_line_length)\n    return line_reader", "docstring": "Creates an object that reads lines from a text file.\n\nThe line reader is advanced to the beginning of the DSV content, skipping\nany header lines.\n\nArgs:\nfile_object (dfvfs.FileIO): file-like object.\n\nReturns:\nTextFile|BinaryLineReader: an object that implements an iterator\nover lines in a text file.\n\nRaises:\nUnicodeDecodeError: if the file cannot be read with the specified\nencoding.", "source": "codesearchnet"}
{"code": "def walk(self, walk_func):\n    nodes = self.topological_sort()\n    nodes.reverse()\n    for n in nodes:\n        walk_func(n)", "docstring": "Walks each node of the graph in reverse topological order.\nThis can be used to perform a set of operations, where the next\noperation depends on the previous operation. It's important to note\nthat walking happens serially, and is not paralellized.\n\nArgs:\nwalk_func (:class:`types.FunctionType`): The function to be called\non each node of the graph.", "source": "codesearchnet"}
{"code": "def intent(method):\n\n    def wrapper(self, *args, **kwargs):\n        try:\n            return method(self, *args, **kwargs)\n        except exceptions.MatrixError as e:\n            if isinstance(e.original_exception, matrix_client.errors.MatrixRequestError):\n                self._handle_request_exception(e)\n                return method(self, *args, **kwargs)\n            else:\n                raise e\n    return wrapper", "docstring": "Helps object methods handle MatrixRequestError.\n\nArgs:\nmethod(function): Object method to be wrapped\n\nMethod's object must have _handle_request_exception method that deals with\nspecific status codes and errcodes.", "source": "codesearchnet"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return cls + token_ids_0 + sep\n    return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. An ALBERT sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def open(self, file_path, flags, mode=None, dir_fd=None):\n    file_path = self._path_with_dir_fd(file_path, self.open, dir_fd)\n    if (mode is None):\n        if self.filesystem.is_windows_fs:\n            mode = 438\n        else:\n            mode = (511 & (~ self._umask()))\n    open_modes = _OpenModes(must_exist=(not (flags & os.O_CREAT)), can_read=(not (flags & os.O_WRONLY)), can_write=(flags & (os.O_RDWR | os.O_WRONLY)), truncate=(flags & os.O_TRUNC), append=(flags & os.O_APPEND), must_not_exist=(flags & os.O_EXCL))\n    if (open_modes.must_not_exist and open_modes.must_exist):\n        raise NotImplementedError('O_EXCL without O_CREAT mode is not supported')\n    if ((not self.filesystem.is_windows_fs) and self.filesystem.exists(file_path)):\n        obj = self.filesystem.resolve(file_path)\n        if isinstance(obj, FakeDirectory):\n            if (((not open_modes.must_exist) and (not self.filesystem.is_macos)) or open_modes.can_write):\n                self.filesystem.raise_os_error(errno.EISDIR, file_path)\n            dir_wrapper = FakeDirWrapper(obj, file_path, self.filesystem)\n            file_des = self.filesystem._add_open_file(dir_wrapper)\n            dir_wrapper.filedes = file_des\n            return file_des\n    str_flags = 'b'\n    delete_on_close = False\n    if hasattr(os, 'O_TEMPORARY'):\n        delete_on_close = ((flags & os.O_TEMPORARY) == os.O_TEMPORARY)\n    fake_file = FakeFileOpen(self.filesystem, delete_on_close=delete_on_close, raw_io=True)(file_path, str_flags, open_modes=open_modes)\n    if (fake_file.file_object != self.filesystem.dev_null):\n        self.chmod(file_path, mode)\n    return fake_file.fileno()", "docstring": "Return the file descriptor for a FakeFile.\n\nArgs:\nfile_path: the path to the file\nflags: low-level bits to indicate io operation\nmode: bits to define default permissions\nNote: only basic modes are supported, OS-specific modes are\nignored\ndir_fd: If not `None`, the file descriptor of a directory,\nwith `file_path` being relative to this directory.\nNew in Python 3.3.\n\nReturns:\nA file descriptor.\n\nRaises:\nIOError: if the path cannot be found\nValueError: if invalid mode is given\nNotImplementedError: if `os.O_EXCL` is used without `os.O_CREAT`", "source": "codesearchnet"}
{"code": "def get_agent_settings():\n    ret = dict()\n    sorted_types = sorted(_SERVICE_TYPES.items(), key=(lambda x: ((- x[1]), x[0])))\n    ret['services'] = list()\n    ret['contact'] = __utils__['reg.read_value'](_HKEY, _AGENT_KEY, 'sysContact')['vdata']\n    ret['location'] = __utils__['reg.read_value'](_HKEY, _AGENT_KEY, 'sysLocation')['vdata']\n    current_bitmask = __utils__['reg.read_value'](_HKEY, _AGENT_KEY, 'sysServices')['vdata']\n    if (current_bitmask == 0):\n        ret['services'].append(sorted_types[(- 1)][0])\n    else:\n        for (service, bitmask) in sorted_types:\n            if ((current_bitmask is not None) and (current_bitmask > 0)):\n                remaining_bitmask = (current_bitmask - bitmask)\n                if (remaining_bitmask >= 0):\n                    current_bitmask = remaining_bitmask\n                    ret['services'].append(service)\n            else:\n                break\n    ret['services'] = sorted(ret['services'])\n    return ret", "docstring": "Determine the value of the SNMP sysContact, sysLocation, and sysServices\nsettings.\n\nReturns:\ndict: A dictionary of the agent settings.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_snmp.get_agent_settings", "source": "codesearchnet"}
{"code": "def get_screenshot_as_png(obj, driver=None, timeout=5, **kwargs):\n    Image = import_required('PIL.Image', ('To use bokeh.io.export_png you need pillow ' + '(\"conda install pillow\" or \"pip install pillow\")'))\n    with _tmp_html() as tmp:\n        html = get_layout_html(obj, **kwargs)\n        with io.open(tmp.path, mode='w', encoding='utf-8') as file:\n            file.write(decode_utf8(html))\n        web_driver = (driver if (driver is not None) else webdriver_control.get())\n        web_driver.get(('file:\n        web_driver.maximize_window()\n        web_driver.execute_script(\"document.body.style.width = '100%';\")\n        wait_until_render_complete(web_driver, timeout)\n        png = web_driver.get_screenshot_as_png()\n        b_rect = web_driver.execute_script(_BOUNDING_RECT_SCRIPT)\n    image = Image.open(io.BytesIO(png))\n    cropped_image = _crop_image(image, **b_rect)\n    return cropped_image", "docstring": "Get a screenshot of a ``LayoutDOM`` object.\n\nArgs:\nobj (LayoutDOM or Document) : a Layout (Row/Column), Plot or Widget\nobject or Document to export.\n\ndriver (selenium.webdriver) : a selenium webdriver instance to use\nto export the image.\n\ntimeout (int) : the maximum amount of time to wait for initialization.\nIt will be used as a timeout for loading Bokeh, then when waiting for\nthe layout to be rendered.\n\nReturns:\ncropped_image (PIL.Image.Image) : a pillow image loaded from PNG.\n\n.. warning::\nResponsive sizing_modes may generate layouts with unexpected size and\naspect ratios. It is recommended to use the default ``fixed`` sizing mode.", "source": "codesearchnet"}
{"code": "def cardinal(self, to):\n        \n        return sum(m.cardinal(to) for m in self.submodules)", "docstring": "Return the number of dependencies of this package to the given node.\n\nArgs:\nto (Package/Module): target node.\n\nReturns:\nint: number of dependencies.", "source": "juraj-google-style"}
{"code": "def __init__(self, identifier=None, session_identifier=None):\n    \n    super(TaskCompletion, self).__init__()\n    self.aborted = False\n    self.identifier = identifier\n    self.session_identifier = session_identifier\n    self.timestamp = None", "docstring": "Initializes a task completion attribute container.\n\nArgs:\nidentifier (Optional[str]): unique identifier of the task.\nThe identifier should match that of the corresponding\ntask start information.\nsession_identifier (Optional[str]): identifier of the session the task\nis part of.", "source": "juraj-google-style"}
{"code": "def __init__(self, gl, parent=None):\n        \n        self.gitlab = gl\n        self._parent = parent  \n        self._computed_path = self._compute_path()", "docstring": "REST manager constructor.\n\nArgs:\ngl (Gitlab): :class:`~gitlab.Gitlab` connection to use to make\nrequests.\nparent: REST object to which the manager is attached.", "source": "juraj-google-style"}
{"code": "def _read_mode_acopt(self, size, kind):\n    temp = self._read_unpack(size)\n    algo = chksum_opt.get(temp)\n    data = dict(kind=kind, length=size, ac=algo)\n    return data", "docstring": "Read Alternate Checksum Request option.\n\nPositional arguments:\nsize - int, length of option\nkind - int, 14 (Alt-Chksum Request)\n\nReturns:\n* dict -- extracted Alternate Checksum Request (CHKSUM-REQ) option\n\nStructure of TCP CHKSUM-REQ [RFC 1146][RFC 6247]:\n+----------+----------+----------+\n|  Kind=14 | Length=3 |  chksum  |\n+----------+----------+----------+\n\nOctets      Bits        Name                    Description\n0           0     tcp.chksumreq.kind      Kind (14)\n1           8     tcp.chksumreq.length    Length (3)\n2          16     tcp.chksumreq.ac        Checksum Algorithm", "source": "codesearchnet"}
{"code": "def _WriteData(self, target, entry):\n    password_entry = '%s:%s:%d:%d:%s:%s:%s' % (entry.name, entry.passwd, entry.uid, entry.gid, entry.gecos, entry.dir, entry.shell)\n    target.write(password_entry.encode() + b'\\n')\n    return len(password_entry) + 1", "docstring": "Write a PasswdMapEntry to the target cache.\n\nArgs:\ntarget: A file-like object.\nentry: A PasswdMapEntry.\n\nReturns:\nNumber of bytes written to the target.", "source": "github-repos"}
{"code": "def add_cohp(self, label, cohp):\n    energies = ((cohp.energies - cohp.efermi) if self.zero_at_efermi else cohp.energies)\n    populations = cohp.get_cohp()\n    int_populations = cohp.get_icohp()\n    self._cohps[label] = {'energies': energies, 'COHP': populations, 'ICOHP': int_populations, 'efermi': cohp.efermi}", "docstring": "Adds a COHP for plotting.\n\nArgs:\nlabel: Label for the COHP. Must be unique.\n\ncohp: COHP object.", "source": "codesearchnet"}
{"code": "def _use_prototype(self, spec, prototypes):\n    prototype = spec['based-on']\n    del spec['based-on']\n    for attr in prototype:\n        if (attr not in spec):\n            spec[attr] = copy.deepcopy(prototype[attr])\n    return spec", "docstring": "Populates the given spec with the values of it's declared prototype\n\nArgs:\nspec (dict): spec to update\nprototypes (dict): Configuration spec containing the prototypes\n\nReturns:\ndict: updated spec", "source": "codesearchnet"}
{"code": "def inverse_guass(self, mu: float, sigma: float) -> float:\n    return float(lib.TCOD_random_get_gaussian_double_inv(self.random_c, mu, sigma))", "docstring": "Return a random Gaussian number using the Box-Muller transform.\n\nArgs:\nmu (float): The median returned value.\nsigma (float): The standard deviation.\n\nReturns:\nfloat: A random float.", "source": "codesearchnet"}
{"code": "def set_who(voevent, date=None, author_ivorn=None):\n    \n    if author_ivorn is not None:\n        voevent.Who.AuthorIVORN = ''.join(('ivo:\n    if date is not None:\n        voevent.Who.Date = date.replace(microsecond=0).isoformat()", "docstring": "Sets the minimal 'Who' attributes:  date of authoring, AuthorIVORN.\n\nArgs:\nvoevent(:class:`Voevent`): Root node of a VOEvent etree.\ndate(datetime.datetime): Date of authoring.\nNB Microseconds are ignored, as per the VOEvent spec.\nauthor_ivorn(str): Short author identifier,\ne.g. ``voevent.4pisky.org/ALARRM``.\nNote that the prefix ``ivo://`` will be prepended internally.", "source": "juraj-google-style"}
{"code": "def plot(self, pts_per_edge, color=None, ax=None, with_nodes=False):\n    if (self._dimension != 2):\n        raise NotImplementedError('2D is the only supported dimension', 'Current dimension', self._dimension)\n    if (ax is None):\n        ax = _plot_helpers.new_axis()\n    _plot_helpers.add_patch(ax, color, pts_per_edge, *self._get_edges())\n    if with_nodes:\n        ax.plot(self._nodes[(0, :)], self._nodes[(1, :)], color='black', marker='o', linestyle='None')\n    return ax", "docstring": "Plot the current surface.\n\nArgs:\npts_per_edge (int): Number of points to plot per edge.\ncolor (Optional[Tuple[float, float, float]]): Color as RGB profile.\nax (Optional[matplotlib.artist.Artist]): matplotlib axis object\nto add plot to.\nwith_nodes (Optional[bool]): Determines if the control points\nshould be added to the plot. Off by default.\n\nReturns:\nmatplotlib.artist.Artist: The axis containing the plot. This\nmay be a newly created axis.\n\nRaises:\nNotImplementedError: If the surface's dimension is not ``2``.", "source": "codesearchnet"}
{"code": "def rgbline(x, y, red, green, blue, alpha=1, linestyles='solid', linewidth=2.5):\n    y = np.array(y)\n    if (len(y.shape) == 1):\n        y = np.array([y])\n        red = np.array([red])\n        green = np.array([green])\n        blue = np.array([blue])\n        alpha = np.array([alpha])\n    elif isinstance(alpha, int):\n        alpha = ([alpha] * len(y))\n    seg = []\n    colours = []\n    for (yy, rr, gg, bb, aa) in zip(y, red, green, blue, alpha):\n        pts = np.array([x, yy]).T.reshape((- 1), 1, 2)\n        seg.extend(np.concatenate([pts[:(- 1)], pts[1:]], axis=1))\n        nseg = (len(x) - 1)\n        r = [(0.5 * (rr[i] + rr[(i + 1)])) for i in range(nseg)]\n        g = [(0.5 * (gg[i] + gg[(i + 1)])) for i in range(nseg)]\n        b = [(0.5 * (bb[i] + bb[(i + 1)])) for i in range(nseg)]\n        a = (np.ones(nseg, np.float) * aa)\n        colours.extend(list(zip(r, g, b, a)))\n    lc = LineCollection(seg, colors=colours, rasterized=True, linewidth=linewidth, linestyles=linestyles)\n    return lc", "docstring": "Get a RGB coloured line for plotting.\n\nArgs:\nx (list): x-axis data.\ny (list): y-axis data (can be multidimensional array).\nred (list): Red data (must have same shape as ``y``).\ngreen (list): Green data (must have same shape as ``y``).\nblue (list): blue data (must have same shape as ``y``).\nalpha (:obj:`list` or :obj:`int`, optional): Alpha (transparency)\ndata (must have same shape as ``y`` or be an :obj:`int`).\nlinestyles (:obj:`str`, optional): Linestyle for plot. Options are\n``\"solid\"`` or ``\"dotted\"``.", "source": "codesearchnet"}
{"code": "def remove(self, annotation):\n        \n        if annotation.id in self._annotations:\n            del self._annotations[annotation.id]\n        self._dirty = True", "docstring": "Removes an annotation.\n\nArgs:\nannotation (gkeepapi.node.Annotation): An Annotation object.\n\nReturns:\ngkeepapi.node.Annotation: The Annotation.", "source": "juraj-google-style"}
{"code": "def get_path(self, key, rel_to_cwd=False, rel_to_conf=False):\n    if (key in self.__cli):\n        path = self.__cli[key]\n        from_conf = False\n    else:\n        path = self.__config.get(key)\n        from_conf = True\n    if (not isinstance(path, str)):\n        return None\n    res = self.__abspath(path, from_conf)\n    if rel_to_cwd:\n        return os.path.relpath(res, self.__invoke_dir)\n    if rel_to_conf:\n        return os.path.relpath(res, self.__conf_dir)\n    return self.__abspath(path, from_conf)", "docstring": "Retrieve a path from the config, resolving it against\nthe invokation directory or the configuration file directory,\ndepending on whether it was passed through the command-line\nor the configuration file.\n\nArgs:\nkey: str, the key to lookup the path with\n\nReturns:\nstr: The path, or `None`", "source": "codesearchnet"}
{"code": "def ProcessListDirectory(self, responses):\n    if (not responses.success):\n        raise flow.FlowError('Unable to list directory.')\n    with data_store.DB.GetMutationPool() as pool:\n        for response in responses:\n            stat_entry = rdf_client_fs.StatEntry(response)\n            filesystem.CreateAFF4Object(stat_entry, self.client_urn, pool, token=self.token)\n            self.SendReply(stat_entry)", "docstring": "Processes the results of the ListDirectory client action.\n\nArgs:\nresponses: a flow Responses object.", "source": "codesearchnet"}
{"code": "def chmod(target):\n    assert isinstance(target, str)\n    assert os.path.exists(target)\n    file_mode = (stat.S_IRUSR | stat.S_IWUSR)\n    folder_mode = ((stat.S_IRUSR | stat.S_IWUSR) | stat.S_IXUSR)\n    remove_immutable_attribute(target)\n    if os.path.isfile(target):\n        os.chmod(target, file_mode)\n    elif os.path.isdir(target):\n        os.chmod(target, folder_mode)\n        for (root, dirs, files) in os.walk(target):\n            for cur_dir in dirs:\n                os.chmod(os.path.join(root, cur_dir), folder_mode)\n            for cur_file in files:\n                os.chmod(os.path.join(root, cur_file), file_mode)\n    else:\n        raise ValueError('Unsupported file type: {}'.format(target))", "docstring": "Recursively set the chmod for files to 0600 and 0700 for folders.\n\nIt's ok unless we need something more specific.\n\nArgs:\ntarget (str): Root file or folder", "source": "codesearchnet"}
{"code": "class Identity(Initializer):\n\n    def __init__(self, gain=1.0):\n        self.gain = gain\n\n    def __call__(self, shape, dtype=None):\n        \n        if len(shape) != 2:\n            raise ValueError(f'Identity matrix initializer can only be used for 2D matrices. Received: shape={shape} of rank {len(shape)}.')\n        dtype = standardize_dtype(dtype)\n        return self.gain * ops.eye(*shape, dtype=dtype)", "docstring": "Initializer that generates the identity matrix.\n\nOnly usable for generating 2D matrices.\n\nExamples:\n\n>>> # Standalone usage:\n>>> initializer = Identity()\n>>> values = initializer(shape=(2, 2))\n\n>>> # Usage in a Keras layer:\n>>> initializer = Identity()\n>>> layer = Dense(3, kernel_initializer=initializer)\n\nArgs:\ngain: Multiplicative factor to apply to the identity matrix.", "source": "github-repos"}
{"code": "def _SetAllFieldTypes(self, package, desc_proto, scope):\n    package = _PrefixWithDot(package)\n    main_desc = self._GetTypeFromScope(package, desc_proto.name, scope)\n    if (package == '.'):\n        nested_package = _PrefixWithDot(desc_proto.name)\n    else:\n        nested_package = '.'.join([package, desc_proto.name])\n    for (field_proto, field_desc) in zip(desc_proto.field, main_desc.fields):\n        self._SetFieldType(field_proto, field_desc, nested_package, scope)\n    for (extension_proto, extension_desc) in zip(desc_proto.extension, main_desc.extensions):\n        extension_desc.containing_type = self._GetTypeFromScope(nested_package, extension_proto.extendee, scope)\n        self._SetFieldType(extension_proto, extension_desc, nested_package, scope)\n    for nested_type in desc_proto.nested_type:\n        self._SetAllFieldTypes(nested_package, nested_type, scope)", "docstring": "Sets all the descriptor's fields's types.\n\nThis method also sets the containing types on any extensions.\n\nArgs:\npackage: The current package of desc_proto.\ndesc_proto: The message descriptor to update.\nscope: Enclosing scope of available types.", "source": "codesearchnet"}
{"code": "def get_block_details(self, block_ids):\n        \n        \n        if not hasattr(block_ids, \"__iter__\"):\n            block_ids = [block_ids]\n\n        for _id in block_ids:\n            block_key = self._db.get_block(_id)[0]\n            block_data = self._db.get_all_field_value(block_key)\n            \n            \n            \n            for key in block_data:\n                for char in ['[', '{']:\n                    if char in block_data[key]:\n                        block_data[key] = ast.literal_eval(\n                            str(block_data[key]))\n            yield block_data", "docstring": "Get details of scheduling or processing block\n\nArgs:\nblock_ids (list): List of block IDs", "source": "juraj-google-style"}
{"code": "def _compute_args(self, data=dict(), **kwargs):\n    for (name, remote_attribute) in self._attributes.items():\n        default_value = BambouConfig.get_default_attribute_value(self.__class__, name, remote_attribute.attribute_type)\n        setattr(self, name, default_value)\n    if (len(data) > 0):\n        self.from_dict(data)\n    for (key, value) in kwargs.items():\n        if hasattr(self, key):\n            setattr(self, key, value)", "docstring": "Compute the arguments\n\nTry to import attributes from data.\nOtherwise compute kwargs arguments.\n\nArgs:\ndata: a dict()\nkwargs: a list of arguments", "source": "codesearchnet"}
{"code": "def SplitIntoNormalAndControl(self, buf):\n    if not self._csi or not buf:\n        return [(buf, '')]\n    seq = []\n    i = 0\n    while i < len(buf):\n        c = buf.find(self._csi, i)\n        if c < 0:\n            seq.append((buf[i:], ''))\n            break\n        normal = buf[i:c]\n        i = c + self.GetControlSequenceLen(buf[c:])\n        seq.append((normal, buf[c:i]))\n    return seq", "docstring": "Returns a list of (normal_string, control_sequence) tuples from buf.\n\nArgs:\nbuf: The input string containing one or more control sequences\ninterspersed with normal strings.\n\nReturns:\nA list of (normal_string, control_sequence) tuples.", "source": "github-repos"}
{"code": "def sort_request(request: Dict[str, Any]) -> OrderedDict:\n    \n    sort_order = [\"jsonrpc\", \"method\", \"params\", \"id\"]\n    return OrderedDict(sorted(request.items(), key=lambda k: sort_order.index(k[0])))", "docstring": "Sort a JSON-RPC request dict.\n\nThis has no effect other than making the request nicer to read.\n\n>>> json.dumps(sort_request(\n...     {'id': 2, 'params': [2, 3], 'method': 'add', 'jsonrpc': '2.0'}))\n'{\"jsonrpc\": \"2.0\", \"method\": \"add\", \"params\": [2, 3], \"id\": 2}'\n\nArgs:\nrequest: JSON-RPC request in dict format.", "source": "juraj-google-style"}
{"code": "def DeserializeFromDB(buffer):\n        \n        m = StreamManager.GetStream(buffer)\n        reader = BinaryReader(m)\n        spentcoin = SpentCoinState()\n        spentcoin.Deserialize(reader)\n\n        StreamManager.ReleaseStream(m)\n\n        return spentcoin", "docstring": "Deserialize full object.\n\nArgs:\nbuffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from.\n\nReturns:\nSpentCoinState:", "source": "juraj-google-style"}
{"code": "def __init__(self, timestamp, rank=0):\n        \n        self.timestamp = timestamp\n        self.rank = rank", "docstring": "Create a reorderer.\n\nArgs:\ntimestamp (int): Epoch time of timestamp. Packages before this time\nare preferred.\nrank (int): If non-zero, allow version changes at this rank or above\npast the timestamp.", "source": "juraj-google-style"}
{"code": "def JoinPath(stem=\"\", *parts):\n  \n  \n  parts = [SmartUnicode(path) for path in parts]\n\n  result = (stem + NormalizePath(u\"/\".join(parts))).replace(\"\n  result = result.rstrip(\"/\")\n\n  return result or \"/\"", "docstring": "A sane version of os.path.join.\n\nThe intention here is to append the stem to the path. The standard module\nremoves the path if the stem begins with a /.\n\nArgs:\nstem: The stem to join to.\n*parts: parts of the path to join. The first arg is always the root and\ndirectory traversal is not allowed.\n\nReturns:\na normalized path.", "source": "juraj-google-style"}
{"code": "def __init__(self, path):\n    \n    super(StorageFileReader, self).__init__()\n    self._path = path\n    self._storage_file = None", "docstring": "Initializes a storage reader.\n\nArgs:\npath (str): path to the input file.", "source": "juraj-google-style"}
{"code": "def to_FIB(self, other):\n        \n\n        if not isinstance(other, GroundedFunctionNetwork):\n            raise TypeError(\n                f\"Expected GroundedFunctionNetwork, but got {type(other)}\"\n            )\n\n        def shortname(var):\n            return var[var.find(\"::\") + 2 : var.rfind(\"_\")]\n\n        def shortname_vars(graph, shortname):\n            return [v for v in graph.nodes() if shortname in v]\n\n        this_var_nodes = [\n            shortname(n)\n            for (n, d) in self.nodes(data=True)\n            if d[\"type\"] == \"variable\"\n        ]\n        other_var_nodes = [\n            shortname(n)\n            for (n, d) in other.nodes(data=True)\n            if d[\"type\"] == \"variable\"\n        ]\n\n        shared_vars = set(this_var_nodes).intersection(set(other_var_nodes))\n        full_shared_vars = {\n            full_var\n            for shared_var in shared_vars\n            for full_var in shortname_vars(self, shared_var)\n        }\n\n        return ForwardInfluenceBlanket(self, full_shared_vars)", "docstring": "Creates a ForwardInfluenceBlanket object representing the\nintersection of this model with the other input model.\n\nArgs:\nother: The GroundedFunctionNetwork object to compare this model to.\n\nReturns:\nA ForwardInfluenceBlanket object to use for model comparison.", "source": "juraj-google-style"}
{"code": "def compute_distance(a, b):\n    if (not a):\n        return len(b)\n    if (not b):\n        return len(a)\n    if ((a == b) or (str.lower(a) == str.lower(b))):\n        return 0\n    a = str.lower(a)\n    b = str.lower(b)\n    vector_1 = ([(- 1)] * (len(b) + 1))\n    vector_2 = ([(- 1)] * (len(b) + 1))\n    for i in range(len(vector_1)):\n        vector_1[i] = i\n    for i in range(len(a)):\n        vector_2[0] = (i + 1)\n        for j in range(len(b)):\n            penalty = (0 if (a[i] == b[j]) else compute_qwerty_distance(a[i], b[j]))\n            vector_2[(j + 1)] = min((vector_2[j] + 1), (vector_1[(j + 1)] + 1), (vector_1[j] + penalty))\n        for j in range(len(vector_1)):\n            vector_1[j] = vector_2[j]\n    return vector_2[len(b)]", "docstring": "Computes a modified Levenshtein distance between two strings, comparing the\nlowercase versions of each string and accounting for QWERTY distance.\n\nArguments:\n- a (str) String to compare to 'b'\n- b (str) String to compare to 'a'\n\nReturns:\n- (int) Number representing closeness of 'a' and 'b' (lower is better)", "source": "codesearchnet"}
{"code": "def learning_phase():\n    graph = ops.get_default_graph()\n    if graph is getattr(_GRAPH, 'graph', None):\n        learning_phase = symbolic_learning_phase()\n    else:\n        with ops.init_scope():\n            learning_phase = _GRAPH_LEARNING_PHASES[None]\n    _mark_func_graph_as_unsaveable(graph, learning_phase)\n    return learning_phase", "docstring": "Returns the learning phase flag.\n\nThe learning phase flag is a bool tensor (0 = test, 1 = train)\nto be passed as input to any Keras function\nthat uses a different behavior at train time and test time.\n\nReturns:\nLearning phase (scalar integer tensor or Python integer).", "source": "github-repos"}
{"code": "def __init__(self, input_bytes):\n    self.fdp = atheris.FuzzedDataProvider(input_bytes)", "docstring": "FuzzingHelper initializer.\n\nArgs:\ninput_bytes: Input randomized bytes used to create a FuzzedDataProvider.", "source": "github-repos"}
{"code": "def get_apod(cls, date=None, hd=False):\n        \n        instance = cls('planetary/apod')\n        filters = {\n            'date': date,\n            'hd': hd\n        }\n\n        return instance.get_resource(**filters)", "docstring": "Returns Astronomy Picture of the Day\n\nArgs:\ndate: date instance (default = today)\n\nhd: bool if high resolution should be included\n\nReturns:\njson", "source": "juraj-google-style"}
{"code": "def verify_permitted_to_read(gs_path):\n    \n    \n    \n    \n    from . import _bucket\n    bucket, prefix = _bucket.parse_name(gs_path)\n    credentials = None\n    if datalab.context.Context.is_signed_in():\n      credentials = datalab.context._utils.get_credentials()\n    args = {\n        'maxResults': Api._MAX_RESULTS,\n        'projection': 'noAcl'\n    }\n    if prefix is not None:\n      args['prefix'] = prefix\n    url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, ''))\n    try:\n      datalab.utils.Http.request(url, args=args, credentials=credentials)\n    except datalab.utils.RequestException as e:\n      if e.status == 401:\n        raise Exception('Not permitted to read from specified path. '\n                        'Please sign in and make sure you have read access.')\n      raise e", "docstring": "Check if the user has permissions to read from the given path.\n\nArgs:\ngs_path: the GCS path to check if user is permitted to read.\nRaises:\nException if user has no permissions to read.", "source": "juraj-google-style"}
{"code": "def unescape(inp, quote='\"'):\n    \n    if len(inp) < 2:\n        return inp\n\n    output = \"\"\n    unesc = False\n    for act in inp:\n        if act == quote and unesc:\n            output = output[:-1]\n\n        output += act\n\n        if act == \"\\\\\":\n            unesc = not unesc\n        else:\n            unesc = False\n\n    return output", "docstring": "Unescape `quote` in string `inp`.\n\nExample usage::\n\n>> unescape('hello \\\\\"')\n'hello \"'\n\nArgs:\ninp (str): String in which `quote` will be unescaped.\nquote (char, default \"): Specify which character will be unescaped.\n\nReturns:\nstr: Unescaped string.", "source": "juraj-google-style"}
{"code": "def transform_module(self, mod, user_context):\n    result = []\n    for member in mod.__dict__.values():\n        if inspect.ismodule(member):\n            continue\n        try:\n            result.append(self.transform(member, user_context))\n        except NotImplementedError:\n            pass\n    return result", "docstring": "Transforms a module.\n\nSubclasses may override this method. The return value is opaque.\n\nThe method receives the original AST. The result is passed as-is to the\noutput of `transform`.\n\nArgs:\nmod: A Python module.\nuser_context: An opaque object (may be None) that is forwarded to\ntransform_ast, through the ctx.user attribute.\nReturns:\nList[Tuple[Any, Any]]. By default it returns the output of transform_ast,\nevaluated on each supported member, other than modules, together with a\n`transformer.Context` containing information about the transformation\nprocess.", "source": "github-repos"}
{"code": "def __init__(self, engine: trt.ICudaEngine):\n    from cuda import cuda\n    import tensorrt as trt\n    self.engine = engine\n    self.context = engine.create_execution_context()\n    self.context_lock = threading.RLock()\n    self.inputs = []\n    self.outputs = []\n    self.gpu_allocations = []\n    self.cpu_allocations = []\n    try:\n        _ = np.bool\n    except AttributeError:\n        np.bool = np.bool_\n    for i in range(self.engine.num_bindings):\n        name = self.engine.get_binding_name(i)\n        dtype = self.engine.get_binding_dtype(i)\n        shape = self.engine.get_binding_shape(i)\n        size = trt.volume(shape) * dtype.itemsize\n        allocation = _assign_or_fail(cuda.cuMemAlloc(size))\n        binding = {'index': i, 'name': name, 'dtype': np.dtype(trt.nptype(dtype)), 'shape': list(shape), 'allocation': allocation, 'size': size}\n        self.gpu_allocations.append(allocation)\n        if self.engine.binding_is_input(i):\n            self.inputs.append(binding)\n        else:\n            self.outputs.append(binding)\n    assert self.context\n    assert len(self.inputs) > 0\n    assert len(self.outputs) > 0\n    assert len(self.gpu_allocations) > 0\n    for output in self.outputs:\n        self.cpu_allocations.append(np.zeros(output['shape'], output['dtype']))\n    self.stream = _assign_or_fail(cuda.cuStreamCreate(0))", "docstring": "Implementation of the TensorRTEngine class which handles\nallocations associated with TensorRT engine.\n\nExample Usage::\n\nTensorRTEngine(engine)\n\nArgs:\nengine: trt.ICudaEngine object that contains TensorRT engine", "source": "github-repos"}
{"code": "def ProtoEq(a, b):\n\n    def Format(pb):\n        \n        if isinstance(pb, message.Message):\n            return dict(((desc.number, value) for desc, value in pb.ListFields()))\n        elif _IsMap(pb):\n            return dict(pb.items())\n        elif _IsRepeatedContainer(pb):\n            return dict(enumerate(list(pb)))\n        else:\n            return pb\n    a, b = (Format(a), Format(b))\n    if not isinstance(a, dict) or not isinstance(b, dict):\n        return a == b\n    for tag in sorted(set(a.keys()) | set(b.keys())):\n        if tag not in a or tag not in b:\n            return False\n        elif not ProtoEq(a[tag], b[tag]):\n            return False\n    return True", "docstring": "Compares two proto2 objects for equality.\n\nRecurses into nested messages. Uses list (not set) semantics for comparing\nrepeated fields, ie duplicates and order matter.\n\nArgs:\na: A proto2 message or a primitive.\nb: A proto2 message or a primitive.\n\nReturns:\n`True` if the messages are equal.", "source": "github-repos"}
{"code": "def mt_report(context, case_id, test, outpath=None):\n    \n    LOG.info('exporting mitochondrial variants for case \"{}\"'.format(case_id))\n\n    adapter = context.obj['adapter']\n    query = {'chrom':'MT'}\n\n    case_obj = adapter.case(case_id=case_id)\n\n    if not case_obj:\n        LOG.warning('Could not find a scout case with id \"{}\". No report was created.'.format(case_id))\n        context.abort()\n\n    samples = case_obj.get('individuals')\n    mt_variants = list(adapter.variants(case_id=case_id, query=query, nr_of_variants= -1, sort_key='position'))\n    if not mt_variants:\n        LOG.warning('There are no MT variants associated to case {} in database!'.format(case_id))\n        context.abort()\n\n    today = datetime.datetime.now().strftime('%Y-%m-%d')\n\n    \n    if not outpath:\n        outpath = str(os.getcwd())\n\n    \n    \n    written_files = 0\n\n    for sample in samples:\n        sample_id = sample['individual_id']\n        sample_lines = export_mt_variants(variants=mt_variants, sample_id=sample_id)\n\n        \n        document_name = '.'.join([case_obj['display_name'], sample_id, today]) + '.xlsx'\n        workbook = Workbook(os.path.join(outpath,document_name))\n        Report_Sheet = workbook.add_worksheet()\n\n        if test and sample_lines and workbook:\n            written_files +=1\n            continue\n\n        \n        row = 0\n        for col,field in enumerate(MT_EXPORT_HEADER):\n            Report_Sheet.write(row,col,field)\n\n        \n        for row, line in enumerate(sample_lines,1): \n            for col, field in enumerate(line): \n                Report_Sheet.write(row,col,field)\n        workbook.close()\n\n        if os.path.exists(os.path.join(outpath,document_name)):\n            written_files += 1\n\n    if test:\n        LOG.info(\"Number of excel files that can be written to folder {0}: {1}\".format(outpath, written_files))\n    else:\n        LOG.info(\"Number of excel files written to folder {0}: {1}\".format(outpath, written_files))\n    return written_files", "docstring": "Export all mitochondrial variants for each sample of a case\nand write them to an excel file\n\nArgs:\nadapter(MongoAdapter)\ncase_id(str)\ntest(bool): True if the function is called for testing purposes\noutpath(str): path to output file\n\nReturns:\nwritten_files(int): number of written or simulated files", "source": "juraj-google-style"}
{"code": "def is_symbolic_tensor(tensor):\n    if isinstance(tensor, tensor_lib.Tensor):\n        return hasattr(tensor, 'graph')\n    elif is_extension_type(tensor):\n        component_tensors = nest.flatten(tensor, expand_composites=True)\n        return any((hasattr(t, 'graph') for t in component_tensors))\n    elif isinstance(tensor, variables.Variable):\n        return getattr(tensor, '_keras_history', False) or not context.executing_eagerly()\n    elif isinstance(tensor, tuple(_user_convertible_tensor_types)):\n        tensor = ops.convert_to_tensor_or_composite(tensor)\n        return is_symbolic_tensor(tensor)\n    else:\n        return False", "docstring": "Returns whether a tensor is symbolic (from a TF graph) or an eager tensor.\n\nA Variable can be seen as either: it is considered symbolic\nwhen we are in a graph scope, and eager when we are in an eager scope.\n\nArgs:\ntensor: A tensor instance to test.\n\nReturns:\nTrue for symbolic tensors, False for eager tensors.", "source": "github-repos"}
{"code": "def Print(self, output_writer):\n    \n    if self._filters:\n      output_writer.Write('Filters:\\n')\n      for file_entry_filter in self._filters:\n        file_entry_filter.Print(output_writer)", "docstring": "Prints a human readable version of the filter.\n\nArgs:\noutput_writer (CLIOutputWriter): output writer.", "source": "juraj-google-style"}
{"code": "def has_axis(self, axis):\n    if (self.type != EventType.POINTER_AXIS):\n        raise AttributeError(_wrong_meth.format(self.type))\n    return self._libinput.libinput_event_pointer_has_axis(self._handle, axis)", "docstring": "Check if the event has a valid value for the given axis.\n\nIf this method returns True for an axis and :meth:`get_axis_value`\nreturns a value of 0, the event is a scroll stop event.\n\nFor pointer events that are not of type\n:attr:`~libinput.constant.EventType.POINTER_AXIS`, this method raises\n:exc:`AttributeError`.\n\nArgs:\naxis (~libinput.constant.PointerAxis): The axis to check.\nReturns:\nbool: True if this event contains a value for this axis.\nRaises:\nAttributeError", "source": "codesearchnet"}
{"code": "def stderr(self):\n    if (not self.id):\n        raise WorkflowError('Workflow is not running.  Cannot get stderr.')\n    if self.batch_values:\n        raise NotImplementedError('Query Each Workflow Id within the Batch Workflow for stderr.')\n    wf = self.workflow.get(self.id)\n    stderr_list = []\n    for task in wf['tasks']:\n        stderr_list.append({'id': task['id'], 'taskType': task['taskType'], 'name': task['name'], 'stderr': self.workflow.get_stderr(self.id, task['id'])})\n    return stderr_list", "docstring": "Get stderr from all the tasks of a workflow.\n\nReturns:\n(list): tasks with their stderr\n\nExample:\n>>> workflow.stderr\n[\n{\n\"id\": \"4488895771403082552\",\n\"taskType\": \"AOP_Strip_Processor\",\n\"name\": \"Task1\",\n\"stderr\": \"............\"\n}\n]", "source": "codesearchnet"}
{"code": "def prepare(self):\n    msg = aioxmpp.stanza.Message(to=self.to, from_=self.sender, type_=aioxmpp.MessageType.CHAT)\n    msg.body[None] = self.body\n    if len(self.metadata):\n        data = forms_xso.Data(type_=forms_xso.DataType.FORM)\n        for (name, value) in self.metadata.items():\n            data.fields.append(forms_xso.Field(var=name, type_=forms_xso.FieldType.TEXT_SINGLE, values=[value]))\n        if self.thread:\n            data.fields.append(forms_xso.Field(var='_thread_node', type_=forms_xso.FieldType.TEXT_SINGLE, values=[self.thread]))\n        data.title = SPADE_X_METADATA\n        msg.xep0004_data = [data]\n    return msg", "docstring": "Returns an aioxmpp.stanza.Message built from the Message and prepared to be sent.\n\nReturns:\naioxmpp.stanza.Message: the message prepared to be sent", "source": "codesearchnet"}
{"code": "def Parse(self, conditions, host_data):\n    processed = []\n    probes = self.triggers.Calls(conditions)\n    for p in probes:\n        artifact_data = host_data.get(p.artifact)\n        if (not p.result_context):\n            rdf_data = artifact_data['PARSER']\n        else:\n            rdf_data = artifact_data.get(str(p.result_context))\n        try:\n            result = p.Parse(rdf_data)\n        except ProcessingError as e:\n            raise ProcessingError(('Bad artifact %s: %s' % (p.artifact, e)))\n        if result:\n            processed.append(result)\n    return self.matcher.Detect(probes, processed)", "docstring": "Runs probes that evaluate whether collected data has an issue.\n\nArgs:\nconditions: The trigger conditions.\nhost_data: A map of artifacts and rdf data.\n\nReturns:\nAnomalies if an issue exists.", "source": "codesearchnet"}
{"code": "def entropy(state):\n    \n\n    rho = np.array(state)\n    if rho.ndim == 1:\n        return 0\n    evals = np.maximum(np.linalg.eigvalsh(state), 0.)\n    return shannon_entropy(evals, base=np.e)", "docstring": "Compute the von-Neumann entropy of a quantum state.\n\nArgs:\nstate (array_like): a density matrix or state vector.\n\nReturns:\nfloat: The von-Neumann entropy S(rho).", "source": "juraj-google-style"}
{"code": "def set_timezone(tz=None, deploy=False):\n    if (not tz):\n        raise CommandExecutionError('Timezone name option must not be none.')\n    ret = {}\n    query = {'type': 'config', 'action': 'set', 'xpath': \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/timezone\", 'element': '<timezone>{0}</timezone>'.format(tz)}\n    ret.update(__proxy__['panos.call'](query))\n    if (deploy is True):\n        ret.update(commit())\n    return ret", "docstring": "Set the timezone of the Palo Alto proxy minion. A commit will be required before this is processed.\n\nCLI Example:\n\nArgs:\ntz (str): The name of the timezone to set.\n\ndeploy (bool): If true then commit the full candidate configuration, if false only set pending change.\n\n.. code-block:: bash\n\nsalt '*' panos.set_timezone UTC\nsalt '*' panos.set_timezone UTC deploy=True", "source": "codesearchnet"}
{"code": "def default_pass_manager(basis_gates, coupling_map, initial_layout, seed_transpiler):\n    \n    pass_manager = PassManager()\n    pass_manager.property_set['layout'] = initial_layout\n\n    pass_manager.append(Unroller(basis_gates))\n\n    \n    pass_manager.append(TrivialLayout(coupling_map),\n                        condition=lambda property_set: not property_set['layout'])\n\n    \n    \n    pass_manager.append(CheckMap(coupling_map))\n    pass_manager.append(DenseLayout(coupling_map),\n                        condition=lambda property_set: not property_set['is_swap_mapped'])\n\n    \n    pass_manager.append(FullAncillaAllocation(coupling_map))\n    pass_manager.append(EnlargeWithAncilla())\n\n    \n    pass_manager.append(Unroll3qOrMore())\n\n    \n    pass_manager.append(LegacySwap(coupling_map, trials=20, seed=seed_transpiler))\n\n    \n    pass_manager.append(Decompose(SwapGate))\n\n    \n    pass_manager.append(CXDirection(coupling_map))\n\n    \n    pass_manager.append(Unroller(['u1', 'u2', 'u3', 'id', 'cx']))\n\n    \n    simplification_passes = [Optimize1qGates(), CXCancellation(), RemoveResetInZeroState()]\n\n    pass_manager.append(simplification_passes + [Depth(), FixedPoint('depth')],\n                        do_while=lambda property_set: not property_set['depth_fixed_point'])\n\n    return pass_manager", "docstring": "The default pass manager that maps to the coupling map.\n\nArgs:\nbasis_gates (list[str]): list of basis gate names supported by the target.\ncoupling_map (CouplingMap): coupling map to target in mapping.\ninitial_layout (Layout or None): initial layout of virtual qubits on physical qubits\nseed_transpiler (int or None): random seed for stochastic passes.\n\nReturns:\nPassManager: A pass manager to map and optimize.", "source": "juraj-google-style"}
{"code": "def append(self, transitions, rows=None):\n    rows = (tf.range(self._capacity) if (rows is None) else rows)\n    assert (rows.shape.ndims == 1)\n    assert_capacity = tf.assert_less(rows, self._capacity, message='capacity exceeded')\n    with tf.control_dependencies([assert_capacity]):\n        assert_max_length = tf.assert_less(tf.gather(self._length, rows), self._max_length, message='max length exceeded')\n    with tf.control_dependencies([assert_max_length]):\n        timestep = tf.gather(self._length, rows)\n        indices = tf.stack([rows, timestep], 1)\n        append_ops = tools.nested.map((lambda var, val: tf.scatter_nd_update(var, indices, val)), self._buffers, transitions, flatten=True)\n    with tf.control_dependencies(append_ops):\n        episode_mask = tf.reduce_sum(tf.one_hot(rows, self._capacity, dtype=tf.int32), 0)\n        return self._length.assign_add(episode_mask)", "docstring": "Append a batch of transitions to rows of the memory.\n\nArgs:\ntransitions: Tuple of transition quantities with batch dimension.\nrows: Episodes to append to, defaults to all.\n\nReturns:\nOperation.", "source": "codesearchnet"}
{"code": "def get_metric_function(metric, output_shape=None, loss_fn=None):\n    if metric not in ['accuracy', 'acc', 'crossentropy', 'ce']:\n        return metrics_module.get(metric)\n    is_sparse_categorical_crossentropy = isinstance(loss_fn, losses.SparseCategoricalCrossentropy) or (isinstance(loss_fn, losses.LossFunctionWrapper) and loss_fn.fn == losses.sparse_categorical_crossentropy)\n    is_binary_crossentropy = isinstance(loss_fn, losses.BinaryCrossentropy) or (isinstance(loss_fn, losses.LossFunctionWrapper) and loss_fn.fn == losses.binary_crossentropy)\n    if metric in ['accuracy', 'acc']:\n        if output_shape[-1] == 1 or is_binary_crossentropy:\n            return metrics_module.binary_accuracy\n        elif is_sparse_categorical_crossentropy:\n            return metrics_module.sparse_categorical_accuracy\n        return metrics_module.categorical_accuracy\n    else:\n        if output_shape[-1] == 1 or is_binary_crossentropy:\n            return metrics_module.binary_crossentropy\n        elif is_sparse_categorical_crossentropy:\n            return metrics_module.sparse_categorical_crossentropy\n        return metrics_module.categorical_crossentropy", "docstring": "Returns the metric function corresponding to the given metric input.\n\nArgs:\nmetric: Metric function name or reference.\noutput_shape: The shape of the output that this metric will be calculated\nfor.\nloss_fn: The loss function used.\n\nReturns:\nThe metric function.", "source": "github-repos"}
{"code": "def render_asset_html(self, path, tag_template):\n    url = os.path.join(settings.STATIC_URL, path)\n    return tag_template.format(url=url)", "docstring": "Render HTML tag for a given path.\n\nArguments:\npath (string): Relative path from static directory.\ntag_template (string): Template string for HTML tag.\n\nReturns:\nstring: HTML tag with url from given path.", "source": "codesearchnet"}
{"code": "def reflection_matrix_pow(reflection_matrix: np.ndarray, exponent: float):\n    squared_phase = np.dot(reflection_matrix[(:, 0)], reflection_matrix[(0, :)])\n    phase = complex(np.sqrt(squared_phase))\n    i = (np.eye(reflection_matrix.shape[0]) * phase)\n    pos_part = ((i + reflection_matrix) * 0.5)\n    neg_part = ((i - reflection_matrix) * 0.5)\n    pos_factor = (phase ** (exponent - 1))\n    neg_factor = (pos_factor * (complex((- 1)) ** exponent))\n    pos_part_raised = (pos_factor * pos_part)\n    neg_part_raised = (neg_part * neg_factor)\n    return (pos_part_raised + neg_part_raised)", "docstring": "Raises a matrix with two opposing eigenvalues to a power.\n\nArgs:\nreflection_matrix: The matrix to raise to a power.\nexponent: The power to raise the matrix to.\n\nReturns:\nThe given matrix raised to the given power.", "source": "codesearchnet"}
{"code": "def __init__(self, element=None):\n        \n        super(RootTreeMapNode, self).__init__(element)\n\n        self._depth = 0", "docstring": "Constructor.\n\nArgs:\nelement: object to attach to this root.", "source": "juraj-google-style"}
{"code": "def merge_single_qubit_gates_into_phased_x_z(circuit: circuits.Circuit, atol: float=1e-08) -> None:\n\n    def synth(qubit: ops.Qid, matrix: np.ndarray) -> List[ops.Operation]:\n        out_gates = decompositions.single_qubit_matrix_to_phased_x_z(matrix, atol)\n        return [gate(qubit) for gate in out_gates]\n    MergeSingleQubitGates(synthesizer=synth).optimize_circuit(circuit)", "docstring": "Canonicalizes runs of single-qubit rotations in a circuit.\n\nSpecifically, any run of non-parameterized circuits will be replaced by an\noptional PhasedX operation followed by an optional Z operation.\n\nArgs:\ncircuit: The circuit to rewrite. This value is mutated in-place.\natol: Absolute tolerance to angle error. Larger values allow more\nnegligible gates to be dropped, smaller values increase accuracy.", "source": "codesearchnet"}
{"code": "def _process(op_queue, seen_ops):\n    reads = []\n    writes = []\n    op = op_queue.pop()\n    if op in seen_ops:\n        return (reads, writes)\n    seen_ops.add(op)\n    reads, writes = acd_utils.get_read_write_resource_inputs(op)\n    op_queue.extend((t.op for t in op.inputs if t.dtype == dtypes.variant))\n    return (reads, writes)", "docstring": "Processes the next element of the op queue.\n\nArgs:\nop_queue: Queue of Dataset operations to process.\nseen_ops: Already processed set of Operations.\n\nReturns:\nA 2-tuple containing sets of resource handles. The first tuple entry\ncontains read-only handles and the second entry contains read-write\nhandles.", "source": "github-repos"}
{"code": "def __init__(self, port, observer):\n    self._web_server = gui.websocket_server.WebSocketServer()\n    self._port = port\n    self._observer = observer\n    self._clients = set()", "docstring": "Instantiates a GUI server.\n\nArgs:\nport: tcp/ssl port for internal web server.\nobserver: GuiObserver called on requests.", "source": "github-repos"}
{"code": "def _merge_bee(self, bee):\n    random_dimension = randint(0, (len(self._value_ranges) - 1))\n    second_bee = randint(0, (self._num_employers - 1))\n    while (bee.id == self._employers[second_bee].id):\n        second_bee = randint(0, (self._num_employers - 1))\n    new_bee = deepcopy(bee)\n    new_bee.values[random_dimension] = self.__onlooker.calculate_positions(new_bee.values[random_dimension], self._employers[second_bee].values[random_dimension], self._value_ranges[random_dimension])\n    fitness_score = new_bee.get_score(self._fitness_fxn(new_bee.values, **self._args))\n    return (fitness_score, new_bee.values, new_bee.error)", "docstring": "Shifts a random value for a supplied bee with in accordance with\nanother random bee's value\n\nArgs:\nbee (EmployerBee): supplied bee to merge\n\nReturns:\ntuple: (score of new position, values of new position, fitness\nfunction return value of new position)", "source": "codesearchnet"}
{"code": "def Add(self, entry):\n    if not isinstance(entry, MapEntry):\n        raise TypeError('Not instance of MapEntry')\n    if not entry.Verify():\n        self.log.info('refusing to add entry, verify failed')\n        return False\n    if entry.Key() not in self._data:\n        self._index.append(entry.Key())\n    else:\n        self.log.warning('duplicate key detected when adding to map: %r, overwritten', entry.Key())\n    self._data[entry.Key()] = entry\n    return True", "docstring": "Add a MapEntry object to the Map and verify it (overwrites).\n\nArgs:\nentry: A maps.MapEntry instance.\n\nReturns:\nA boolean indicating the add is successful when True.\n\nRaises:\nTypeError: The object passed is not the right type.", "source": "github-repos"}
{"code": "def new_from_list(cls, items, **kwargs):\n    obj = cls(**kwargs)\n    for item in items:\n        obj.append(ListItem(item))\n    return obj", "docstring": "Populates the ListView with a string list.\n\nArgs:\nitems (list): list of strings to fill the widget with.", "source": "codesearchnet"}
{"code": "def entropy(state):\n    rho = np.array(state)\n    if (rho.ndim == 1):\n        return 0\n    evals = np.maximum(np.linalg.eigvalsh(state), 0.0)\n    return shannon_entropy(evals, base=np.e)", "docstring": "Compute the von-Neumann entropy of a quantum state.\n\nArgs:\nstate (array_like): a density matrix or state vector.\n\nReturns:\nfloat: The von-Neumann entropy S(rho).", "source": "codesearchnet"}
{"code": "def train(self, input_data_config, output_data_config, hyperparameters, job_name):\n        \n        self.container_root = self._create_tmp_folder()\n        os.mkdir(os.path.join(self.container_root, 'output'))\n        \n        os.mkdir(os.path.join(self.container_root, 'output', 'data'))\n        \n        \n        shared_dir = os.path.join(self.container_root, 'shared')\n        os.mkdir(shared_dir)\n\n        data_dir = self._create_tmp_folder()\n        volumes = self._prepare_training_volumes(data_dir, input_data_config, output_data_config,\n                                                 hyperparameters)\n        \n        hyperparameters = self._update_local_src_path(hyperparameters, key=sagemaker.estimator.DIR_PARAM_NAME)\n\n        \n        \n        for host in self.hosts:\n            _create_config_file_directories(self.container_root, host)\n            self.write_config_files(host, hyperparameters, input_data_config)\n            shutil.copytree(data_dir, os.path.join(self.container_root, host, 'input', 'data'))\n\n        training_env_vars = {\n            REGION_ENV_NAME: self.sagemaker_session.boto_region_name,\n            TRAINING_JOB_NAME_ENV_NAME: job_name,\n        }\n        compose_data = self._generate_compose_file('train', additional_volumes=volumes,\n                                                   additional_env_vars=training_env_vars)\n        compose_command = self._compose()\n\n        if _ecr_login_if_needed(self.sagemaker_session.boto_session, self.image):\n            _pull_image(self.image)\n\n        process = subprocess.Popen(compose_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n        try:\n            _stream_output(process)\n        except RuntimeError as e:\n            \n            \n            msg = \"Failed to run: %s, %s\" % (compose_command, str(e))\n            raise RuntimeError(msg)\n        finally:\n            artifacts = self.retrieve_artifacts(compose_data, output_data_config, job_name)\n\n            \n            \n            \n            dirs_to_delete = [data_dir, shared_dir]\n            self._cleanup(dirs_to_delete)\n\n        \n        \n        print('===== Job Complete =====')\n        return artifacts", "docstring": "Run a training job locally using docker-compose.\nArgs:\ninput_data_config (dict): The Input Data Configuration, this contains data such as the\nchannels to be used for training.\nhyperparameters (dict): The HyperParameters for the training job.\njob_name (str): Name of the local training job being run.\n\nReturns (str): Location of the trained model.", "source": "juraj-google-style"}
{"code": "def get_lambda_alias_arn(app, account, region):\n    session = boto3.Session(profile_name=account, region_name=region)\n    lambda_client = session.client('lambda')\n    lambda_aliases = lambda_client.list_aliases(FunctionName=app)\n    matched_alias = None\n    for alias in lambda_aliases['Aliases']:\n        if (alias['Name'] == account):\n            lambda_alias_arn = alias['AliasArn']\n            LOG.info('Found ARN for alias %s for function %s', account, app)\n            matched_alias = lambda_alias_arn\n            break\n    else:\n        fatal_message = 'Lambda alias {0} of function {1} not found'.format(account, app)\n        LOG.fatal(fatal_message)\n        raise LambdaAliasDoesNotExist(fatal_message)\n    return matched_alias", "docstring": "Get lambda alias ARN. Assumes that account name is equal to alias name.\n\nArgs:\naccount (str): AWS account name.\nregion (str): Region name, e.g. us-east-1\napp (str): Lambda function name\n\nReturns:\nstr: ARN for requested lambda alias", "source": "codesearchnet"}
{"code": "def request(self, method: str, path: str, content: Optional[Union[(dict, bytes, str)]]=None, timestamp: Optional[int]=None, external_url: Optional[str]=None, headers: Optional[Dict[(str, str)]]=None, query_params: Optional[Dict[(str, Any)]]=None, api_path: str='/_matrix/client/r0') -> Awaitable[dict]:\n    content = (content or {})\n    headers = (headers or {})\n    query_params = (query_params or {})\n    query_params['access_token'] = self.token\n    if (timestamp is not None):\n        if isinstance(timestamp, datetime):\n            timestamp = int((timestamp.replace(tzinfo=timezone.utc).timestamp() * 1000))\n        query_params['ts'] = timestamp\n    if (isinstance(content, dict) and (external_url is not None)):\n        content['external_url'] = external_url\n    method = method.upper()\n    if (method not in ['GET', 'PUT', 'DELETE', 'POST']):\n        raise MatrixError(('Unsupported HTTP method: %s' % method))\n    if ('Content-Type' not in headers):\n        headers['Content-Type'] = 'application/json'\n    if (headers.get('Content-Type', None) == 'application/json'):\n        content = json.dumps(content)\n    if (self.identity and (not self.is_real_user)):\n        query_params['user_id'] = self.identity\n    self._log_request(method, path, content, query_params)\n    endpoint = ((self.base_url + api_path) + path)\n    return self._send(method, endpoint, content, query_params, (headers or {}))", "docstring": "Make a raw HTTP request.\n\nArgs:\nmethod: The HTTP method to use.\npath: The API endpoint to call. Does not include the base path (e.g. /_matrix/client/r0).\ncontent: The content to post as a dict (json) or bytes/str (raw).\ntimestamp: The timestamp query param used for timestamp massaging.\nexternal_url: The external_url field to send in the content\n(only applicable if content is dict).\nheaders: The dict of HTTP headers to send.\nquery_params: The dict of query parameters to send.\napi_path: The base API path.\n\nReturns:\nThe response as a dict.", "source": "codesearchnet"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0):\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        raise exceptions.VersionNotSupported('KMIP {} does not support the AttributeReference object.'.format(kmip_version.value))\n    local_buffer = BytearrayStream()\n    if self._vendor_identification:\n        self._vendor_identification.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The AttributeReference is missing the vendor identification field.')\n    if self._attribute_name:\n        self._attribute_name.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The AttributeReference is missing the attribute name field.')\n    self.length = local_buffer.length()\n    super(AttributeReference, self).write(output_buffer, kmip_version=kmip_version)\n    output_buffer.write(local_buffer.buffer)", "docstring": "Write the AttributeReference structure encoding to the data stream.\n\nArgs:\noutput_buffer (stream): A data stream in which to encode\nAttributes structure data, supporting a write method.\nkmip_version (enum): A KMIPVersion enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 2.0.\n\nRaises:\nInvalidField: Raised if the vendor identification or attribute name\nfields are not defined.\nVersionNotSupported: Raised when a KMIP version is provided that\ndoes not support the AttributeReference structure.", "source": "codesearchnet"}
{"code": "def validate_policy(topic, signer, routing_policy, nitpicky=False):\n    \n    if topic in routing_policy:\n        \n        if signer in routing_policy[topic]:\n            \n            \n            return True\n        else:\n            \n            _log.error(\"Authorization/routing_policy error.  \"\n                       \"Topic %r.  Signer %r.\" % (topic, signer))\n            return False\n    else:\n        \n        \n\n        \n        \n        \n        if nitpicky:\n            \n            \n            \n            _log.error(\"Authorization/routing_policy underspecified.\")\n            return False\n        else:\n            \n            \n            \n            _log.warning('No routing policy defined for \"{t}\" but routing_nitpicky is '\n                         'False so the message is being treated as authorized.'.format(t=topic))\n            return True", "docstring": "Checks that the sender is allowed to emit messages for the given topic.\n\nArgs:\ntopic (str): The message topic the ``signer`` used when sending the message.\nsigner (str): The Common Name of the certificate used to sign the message.\n\nReturns:\nbool: True if the policy defined in the settings allows the signer to send\nmessages on ``topic``.", "source": "juraj-google-style"}
{"code": "def ed25519_private_key_from_string(string):\n    \n    try:\n        return Ed25519PrivateKey.from_private_bytes(\n            base64.b64decode(string)\n        )\n    except (UnsupportedAlgorithm, Base64Error) as exc:\n        raise ScriptWorkerEd25519Error(\"Can't create Ed25519PrivateKey: {}!\".format(str(exc)))", "docstring": "Create an ed25519 private key from ``string``, which is a seed.\n\nArgs:\nstring (str): the string to use as a seed.\n\nReturns:\nEd25519PrivateKey: the private key", "source": "juraj-google-style"}
{"code": "def get_existing_path(path, topmost_path=None):\n    \n    prev_path = None\n\n    if topmost_path:\n        topmost_path = os.path.normpath(topmost_path)\n\n    while True:\n        if os.path.exists(path):\n            return path\n\n        path = os.path.dirname(path)\n        if path == prev_path:\n            return None\n\n        if topmost_path and os.path.normpath(path) == topmost_path:\n            return None\n\n        prev_path = path", "docstring": "Get the longest parent path in `path` that exists.\n\nIf `path` exists, it is returned.\n\nArgs:\npath (str): Path to test\ntopmost_path (str): Do not test this path or above\n\nReturns:\nstr: Existing path, or None if no path was found.", "source": "juraj-google-style"}
{"code": "def _ParseSystemTime(self, byte_stream):\n    systemtime_map = self._GetDataTypeMap('systemtime')\n    try:\n        systemtime = self._ReadStructureFromByteStream(byte_stream, 0, systemtime_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse SYSTEMTIME value with error: {0!s}'.format(exception))\n    system_time_tuple = (systemtime.year, systemtime.month, systemtime.weekday, systemtime.day_of_month, systemtime.hours, systemtime.minutes, systemtime.seconds, systemtime.milliseconds)\n    if (system_time_tuple == self._EMPTY_SYSTEM_TIME_TUPLE):\n        return None\n    try:\n        return dfdatetime_systemtime.Systemtime(system_time_tuple=system_time_tuple)\n    except ValueError:\n        raise errors.ParseError('Invalid SYSTEMTIME value: {0!s}'.format(system_time_tuple))", "docstring": "Parses a SYSTEMTIME date and time value from a byte stream.\n\nArgs:\nbyte_stream (bytes): byte stream.\n\nReturns:\ndfdatetime.Systemtime: SYSTEMTIME date and time value or None if no\nvalue is set.\n\nRaises:\nParseError: if the SYSTEMTIME could not be parsed.", "source": "codesearchnet"}
{"code": "def enable_logging(log_level):\n    \n    \n    root_logger = logging.getLogger()\n    root_logger.setLevel(logging.DEBUG)\n    logfile_handler = logging.StreamHandler(_LOGFILE_STREAM)\n    logfile_handler.setLevel(logging.DEBUG)\n    logfile_handler.setFormatter(logging.Formatter(\n        '%(levelname)s [%(asctime)s][%(name)s] %(message)s'))\n    root_logger.addHandler(logfile_handler)\n    if signal.getsignal(signal.SIGTERM) == signal.SIG_DFL:\n        signal.signal(signal.SIGTERM, _logfile_sigterm_handler)\n    if log_level:\n        handler = logging.StreamHandler()\n        handler.setFormatter(_LogColorFormatter())\n        root_logger.setLevel(log_level)\n        root_logger.addHandler(handler)", "docstring": "Configure the root logger and a logfile handler.\n\nArgs:\nlog_level: The logging level to set the logger handler.", "source": "juraj-google-style"}
{"code": "def next_state_scope(self, next_state_fluents: Sequence[tf.Tensor]) -> Dict[(str, TensorFluent)]:\n    return dict(zip(self.rddl.domain.next_state_fluent_ordering, next_state_fluents))", "docstring": "Returns a partial scope with current next state-fluents.\n\nArgs:\nnext_state_fluents (Sequence[tf.Tensor]): The next state fluents.\n\nReturns:\nA mapping from next state fluent names to :obj:`rddl2tf.fluent.TensorFluent`.", "source": "codesearchnet"}
{"code": "def stop(self, **kwargs):\n    return self.client.api.stop(self.id, **kwargs)", "docstring": "Stops a container. Similar to the ``docker stop`` command.\n\nArgs:\ntimeout (int): Timeout in seconds to wait for the container to\nstop before sending a ``SIGKILL``. Default: 10\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def crc_update(crc, data):\n    \n    if type(data) != array.array or data.itemsize != 1:\n        buf = array.array(\"B\", data)\n    else:\n        buf = data\n    crc = crc ^ _MASK\n    for b in buf:\n        table_index = (crc ^ b) & 0xff\n        crc = (CRC_TABLE[table_index] ^ (crc >> 8)) & _MASK\n    return crc ^ _MASK", "docstring": "Update CRC-32C checksum with data.\nArgs:\ncrc: 32-bit checksum to update as long.\ndata: byte array, string or iterable over bytes.\nReturns:\n32-bit updated CRC-32C as long.", "source": "juraj-google-style"}
{"code": "def get(self, key, default=None):\n        \n        return self._fetch_cmd(b'get', [key], False).get(key, default)", "docstring": "The memcached \"get\" command, but only for one key, as a convenience.\n\nArgs:\nkey: str, see class docs for details.\ndefault: value that will be returned if the key was not found.\n\nReturns:\nThe value for the key, or default if the key wasn't found.", "source": "juraj-google-style"}
{"code": "def FindUnspentCoins(self, from_addr=None, use_standard=False, watch_only_val=0):\n        \n        ret = []\n        for coin in self.GetCoins():\n            if coin.State & CoinState.Confirmed > 0 and \\\n                    coin.State & CoinState.Spent == 0 and \\\n                    coin.State & CoinState.Locked == 0 and \\\n                    coin.State & CoinState.Frozen == 0 and \\\n                    coin.State & CoinState.WatchOnly == watch_only_val:\n\n                do_exclude = False\n                if self._vin_exclude:\n                    for to_exclude in self._vin_exclude:\n\n                        if coin.Reference.PrevIndex == to_exclude.PrevIndex and \\\n                                coin.Reference.PrevHash == to_exclude.PrevHash:\n                            do_exclude = True\n\n                if do_exclude:\n                    continue\n\n                if from_addr is not None:\n                    if coin.Output.ScriptHash == from_addr:\n                        ret.append(coin)\n                elif use_standard:\n\n                    contract = self._contracts[coin.Output.ScriptHash.ToBytes()]\n                    if contract.IsStandard:\n                        ret.append(coin)\n                else:\n                    ret.append(coin)\n\n        return ret", "docstring": "Finds unspent coin objects in the wallet.\n\nArgs:\nfrom_addr (UInt160): a bytearray (len 20) representing an address.\nuse_standard (bool): whether or not to only include standard contracts ( i.e not a smart contract addr ).\nwatch_only_val (int): a flag ( 0 or 64 ) indicating whether or not to find coins that are in 'watch only' addresses.\n\nReturns:\nlist: a list of ``neo.Wallet.Coins`` in the wallet that are not spent.", "source": "juraj-google-style"}
{"code": "def get_blocks(self, block_ids):\n    return list(filter((lambda b: (b is not None)), map(self._get_block_by_id_or_none, block_ids)))", "docstring": "Returns all blocks with the given set of block_ids.\nIf a block id in the provided iterable does not exist in the block\nstore, it is ignored.\n\nArgs:\nblock_ids (:iterable:str): an iterable of block ids\n\nReturns\nlist of block wrappers found for the given block ids", "source": "codesearchnet"}
{"code": "def cancelHistoricalData(self, bars: BarDataList):\n    self.client.cancelHistoricalData(bars.reqId)\n    self.wrapper.endSubscription(bars)", "docstring": "Cancel the update subscription for the historical bars.\n\nArgs:\nbars: The bar list that was obtained from ``reqHistoricalData``\nwith a keepUpToDate subscription.", "source": "codesearchnet"}
{"code": "def merge( self, other_cluster ):\n        \n        new_cluster = Cluster( self.sites | other_cluster.sites )\n        new_cluster.neighbours = ( self.neighbours | other_cluster.neighbours ).difference( new_cluster.sites )\n        return new_cluster", "docstring": "Combine two clusters into a single cluster.\n\nArgs:\nother_cluster (Cluster): The second cluster to combine.\n\nReturns:\n(Cluster):   The combination of both clusters.", "source": "juraj-google-style"}
{"code": "def sam_verifier(entries, line=None):\n    \n\n    regex = r'^[!-?A-~]{1,255}\\t' \\\n            + r'([0-9]{1,4}|[0-5][0-9]{4}|' \\\n            + r'[0-9]{1,4}|[1-5][0-9]{4}|' \\\n            + r'6[0-4][0-9]{3}|65[0-4][0-9]{2}|' \\\n            + r'655[0-2][0-9]|6553[0-7])\\t' \\\n            + r'\\*|[!-()+-<>-~][!-~]*\\t' \\\n            + r'([0-9]{1,9}|1[0-9]{9}|2(0[0-9]{8}|' \\\n            + r'1([0-3][0-9]{7}|4([0-6][0-9]{6}|' \\\n            + r'7([0-3][0-9]{5}|4([0-7][0-9]{4}|' \\\n            + r'8([0-2][0-9]{3}|3([0-5][0-9]{2}|' \\\n            + r'6([0-3][0-9]|4[0-7])))))))))\\t' \\\n            + r'([0-9]{1,2}|1[0-9]{2}|' \\\n            + r'2[0-4][0-9]|25[0-5])\\t' \\\n            + r'\\*|([0-9]+[MIDNSHPX=])+\\t' \\\n            + r'\\*|=|[!-()+-<>-~][!-~]*\\t' \\\n            + r'([0-9]{1,9}|1[0-9]{9}|2(0[0-9]{8}|' \\\n            + r'1([0-3][0-9]{7}|4([0-6][0-9]{6}|' \\\n            + r'7([0-3][0-9]{5}|4([0-7][0-9]{4}|' \\\n            + r'8([0-2][0-9]{3}|3([0-5][0-9]{2}|' \\\n            + r'6([0-3][0-9]|4[0-7])))))))))\\t' \\\n            + r'-?([0-9]{1,9}|1[0-9]{9}|2(0[0-9]{8}|' \\\n            + r'1([0-3][0-9]{7}|4([0-6][0-9]{6}|' \\\n            + r'7([0-3][0-9]{5}|4([0-7][0-9]{4}|' \\\n            + r'8([0-2][0-9]{3}|3([0-5][0-9]{2}|' \\\n            + r'6([0-3][0-9]|4[0-7])))))))))\\t' \\\n            + r'\\*|[A-Za-z=.]+\\t' \\\n            + r'[!-~]+{0}$'.format(os.linesep)\n    delimiter = r'\\t'\n\n    for entry in entries:\n        try:\n            entry_verifier([entry.write()], regex, delimiter)\n        except FormatError as error:\n            \n            if line:\n                intro = 'Line {0}'.format(str(line))\n            elif error.part == 0:\n                intro = 'An entry with reference {0}'.format(entry.rname)\n            else:\n                intro = 'An entry with query {0}'.format(entry.qname)\n\n            \n            if error.part == 0:\n                if len(entry.qname) == 0:\n                    msg = '{0} has no query name'.format(intro)\n                elif len(entry.qname) > 255:\n                    msg = '{0} query name must be less than 255 ' \\\n                          'characters'.format(intro)\n                else:\n                    msg = '{0} query name contains characters not in ' \\\n                          '[!-?A-~]'.format(intro)\n            elif error.part == 1:\n                msg = '{0} flag not in range [0-(2^31-1)]'.format(intro)\n            elif error.part == 2:\n                if len(entry.rname) == 0:\n                    msg = '{0} has no reference name'.format(intro)\n                else:\n                    msg = '{0} reference name has characters not in ' \\\n                          '[!-()+-<>-~][!-~]'.format(intro)\n            elif error.part == 3:\n                msg = '{0} leftmost position not in range ' \\\n                      '[0-(2^31-1)]'.format(intro)\n            elif error.part == 4:\n                msg = '{0} mapping quality not in range ' \\\n                      '[0-(2^8-1)]'.format(intro)\n            elif error.part == 5:\n                msg = '{0} CIGAR string has characters not in ' \\\n                      '[0-9MIDNSHPX=]'.format(intro)\n            elif error.part == 6:\n                msg = '{0} mate read name has characters not in ' \\\n                      '[!-()+-<>-~][!-~]'.format(intro)\n            elif error.part == 7:\n                msg = '{0} mate read position not in range ' \\\n                      '[0-(2^31-1)]'.format(intro)\n            elif error.part == 8:\n                msg = '{0} template length not in range ' \\\n                      '[(-2^31+1)-(2^31-1)]'.format(intro)\n            elif error.part == 9:\n                msg = '{0} sequence has characters not in ' \\\n                      '[A-Za-z=.]'.format(intro)\n            elif error.part == 10:\n                msg = '{0} quality scores has characters not in ' \\\n                      '[!-~]'.format(intro)\n            else:\n                msg = '{0}: Unknown Error: Likely a Bug'.format(intro)\n            raise FormatError(message=msg)\n\n        if line:\n            line += 1", "docstring": "Raises error if invalid SAM format detected\n\nArgs:\nentries (list): A list of SamEntry instances\n\nline (int): Line number of first entry\n\nRaises:\nFormatError: Error when SAM format incorrect with descriptive message", "source": "juraj-google-style"}
{"code": "def list(self):\n    raw_reports = self._swimlane.request('get', 'reports?appId={}'.format(self._app.id)).json()\n    return [Report(self._app, raw_report) for raw_report in raw_reports if (raw_report['$type'] == Report._type)]", "docstring": "Retrieve all reports for parent app\n\nReturns:\n:class:`list` of :class:`~swimlane.core.resources.report.Report`: List of all returned reports", "source": "codesearchnet"}
{"code": "def _inherit_outputs(self, pipeline_name, already_defined, resolve_outputs=False):\n    for (name, slot_key) in already_defined.iteritems():\n        if (not isinstance(slot_key, db.Key)):\n            slot_key = db.Key(slot_key)\n        slot = self._output_dict.get(name)\n        if (slot is None):\n            if self._strict:\n                raise UnexpectedPipelineError(('Inherited output named \"%s\" must be filled but not declared for pipeline class \"%s\"' % (name, pipeline_name)))\n            else:\n                self._output_dict[name] = Slot(name=name, slot_key=slot_key)\n        else:\n            slot.key = slot_key\n            slot._exists = True\n    if resolve_outputs:\n        slot_key_dict = dict(((s.key, s) for s in self._output_dict.itervalues()))\n        all_slots = db.get(slot_key_dict.keys())\n        for (slot, slot_record) in zip(slot_key_dict.itervalues(), all_slots):\n            if (slot_record is None):\n                raise UnexpectedPipelineError(('Inherited output named \"%s\" for pipeline class \"%s\" is missing its Slot in the datastore: \"%s\"' % (slot.name, pipeline_name, slot.key)))\n            slot = slot_key_dict[slot_record.key()]\n            slot._set_value(slot_record)", "docstring": "Inherits outputs from a calling Pipeline.\n\nArgs:\npipeline_name: The Pipeline class name (used for debugging).\nalready_defined: Maps output name to stringified db.Key (of _SlotRecords)\nof any exiting output slots to be inherited by this future.\nresolve_outputs: When True, this method will dereference all output slots\nbefore returning back to the caller, making those output slots' values\navailable.\n\nRaises:\nUnexpectedPipelineError when resolve_outputs is True and any of the output\nslots could not be retrived from the Datastore.", "source": "codesearchnet"}
{"code": "def get_results(self, params=None, result_id=None):\n    if (result_id is not None):\n        return [dict(i) for i in self.db.table('results').all() if (i['meta']['id'] == result_id)]\n    if (params is None):\n        return [dict(i) for i in self.db.table('results').all()]\n    all_params = set((['RngRun'] + self.get_params()))\n    param_subset = set(params.keys())\n    if (not all_params.issuperset(param_subset)):\n        raise ValueError(('%s:\\nParameters: %s\\nQuery: %s' % ('Specified parameter keys do not match database format', all_params, param_subset)))\n    query_params = {}\n    for key in params:\n        if (not isinstance(params[key], list)):\n            query_params[key] = [params[key]]\n        else:\n            query_params[key] = params[key]\n    if (not query_params.keys()):\n        return [dict(i) for i in self.db.table('results').all()]\n    query = reduce(and_, [reduce(or_, [(where('params')[key] == v) for v in value]) for (key, value) in query_params.items()])\n    return [dict(i) for i in self.db.table('results').search(query)]", "docstring": "Return all the results available from the database that fulfill some\nparameter combinations.\n\nIf params is None (or not specified), return all results.\n\nIf params is specified, it must be a dictionary specifying the result\nvalues we are interested in, with multiple values specified as lists.\n\nFor example, if the following params value is used::\n\nparams = {\n'param1': 'value1',\n'param2': ['value2', 'value3']\n}\n\nthe database will be queried for results having param1 equal to value1,\nand param2 equal to value2 or value3.\n\nNot specifying a value for all the available parameters is allowed:\nunspecified parameters are assumed to be 'free', and can take any\nvalue.\n\nReturns:\nA list of results matching the query. Returned results have the\nsame structure as results inserted with the insert_result method.", "source": "codesearchnet"}
{"code": "def _use_gl(objs):\n    from ..models.plots import Plot\n    return _any(objs, (lambda obj: (isinstance(obj, Plot) and (obj.output_backend == 'webgl'))))", "docstring": "Whether a collection of Bokeh objects contains a plot requesting WebGL\n\nArgs:\nobjs (seq[Model or Document]) :\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def import_submodules(package: Union[(str, ModuleType)], base_package_for_relative_import: str=None, recursive: bool=True) -> Dict[(str, ModuleType)]:\n    if isinstance(package, str):\n        package = importlib.import_module(package, base_package_for_relative_import)\n    results = {}\n    for (loader, name, is_pkg) in pkgutil.walk_packages(package.__path__):\n        full_name = ((package.__name__ + '.') + name)\n        log.debug('importing: {}', full_name)\n        results[full_name] = importlib.import_module(full_name)\n        if (recursive and is_pkg):\n            results.update(import_submodules(full_name))\n    return results", "docstring": "Import all submodules of a module, recursively, including subpackages.\n\nArgs:\npackage: package (name or actual module)\nbase_package_for_relative_import: path to prepend?\nrecursive: import submodules too?\n\nReturns:\ndict: mapping from full module name to module", "source": "codesearchnet"}
{"code": "def __get_form_data(self, soup):\n        \n\n        elements = self.__get_valid_form_data_elements(soup)\n        form_data = self.__get_default_form_data_input(elements)\n        callback = self.options.callbacks.form_before_autofill\n        action = callback(self.queue_item, elements, form_data)\n\n        if action == CrawlerActions.DO_AUTOFILL_FORM:\n            self.__autofill_form_data(form_data, elements)\n\n        return form_data", "docstring": "Build a form data dict from the given form.\n\nArgs:\nsoup (obj): The BeautifulSoup form.\n\nReturns:\nobj: The form data (key/value).", "source": "juraj-google-style"}
{"code": "def expand_abbreviations(txt, fields):\n    \n    def _expand(matchobj):\n        s = matchobj.group(\"var\")\n        if s not in fields:\n            matches = [x for x in fields if x.startswith(s)]\n            if len(matches) == 1:\n                s = matches[0]\n        return \"{%s}\" % s\n    return re.sub(FORMAT_VAR_REGEX, _expand, txt)", "docstring": "Expand abbreviations in a format string.\n\nIf an abbreviation does not match a field, or matches multiple fields, it\nis left unchanged.\n\nExample:\n\n>>> fields = (\"hey\", \"there\", \"dude\")\n>>> expand_abbreviations(\"hello {d}\", fields)\n'hello dude'\n\nArgs:\ntxt (str): Format string.\nfields (list of str): Fields to expand to.\n\nReturns:\nExpanded string.", "source": "juraj-google-style"}
{"code": "def depth_soil_specific_heat(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `depth_soil_specific_heat`'.format(value))\n    self._depth_soil_specific_heat = value", "docstring": "Corresponds to IDD Field `depth_soil_specific_heat`\n\nArgs:\nvalue (float): value for IDD Field `depth_soil_specific_heat`\nUnit: J/kg-K,\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def __init__(self, filePath=None, loadData=False):\n        \n        self.data = {}\n        \n        self.filePath = filePath\n        if loadData:\n            self.fileLoad(updatePath=False)", "docstring": "Initialize the DataManager object.\nArgs:\nfilePath (Optional[str]): Relative or absolute path to a JSON\ndata file. Defaults to None.\nloadData (Optional[bool]): Loads data from the given file path\nif True. Defaults to False.", "source": "juraj-google-style"}
{"code": "def add_done_callback(self, fn):\n    with self._condition:\n        if (self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]):\n            self._done_callbacks.append(fn)\n            return\n    fn(self)", "docstring": "Attaches a callable that will be called when the future finishes.\n\nArgs:\nfn: A callable that will be called with this future as its only\nargument when the future completes or is cancelled. The callable\nwill always be called by a thread in the same process in which\nit was added. If the future has already completed or been\ncancelled then the callable will be called immediately. These\ncallables are called in the order that they were added.", "source": "codesearchnet"}
{"code": "def foldl(fn, elems, initializer=None, name=None):\n    return functional_ops.foldl(fn, elems, initializer=initializer, name=name)", "docstring": "Reduce elems using fn to combine them from left to right.\n\nArgs:\nfn: Callable that will be called upon each element in elems and an\naccumulator, for instance `lambda acc, x: acc + x`\nelems: tensor\ninitializer: The first value used (`elems[0]` in case of None)\nname: A string name for the foldl node in the graph\n\nReturns:\nTensor with same type and shape as `initializer`.", "source": "github-repos"}
{"code": "def validate_seeded_answers_simple(answers, options, algo):\n    seen_options = {}\n    for answer in answers:\n        if answer:\n            key = options[answer['answer']].get('text')\n            if options[answer['answer']].get('image_url'):\n                key += options[answer['answer']].get('image_url')\n            seen_options.setdefault(key, 0)\n            seen_options[key] += 1\n    missing_options = []\n    index = 1\n    for option in options:\n        key = ((option.get('text') + option.get('image_url')) if option.get('image_url') else option.get('text'))\n        if (option.get('text') != 'n/a'):\n            if (seen_options.get(key, 0) == 0):\n                missing_options.append((_('Option ') + str(index)))\n            index += 1\n    if missing_options:\n        return {'seed_error': (_('Missing option seed(s): ') + ', '.join(missing_options))}\n    return None", "docstring": "This validator checks if the answers includes all possible options\n\nArgs:\nanswers (str): the answers to be checked\noptions (dict): all options that should exist in the answers\nalgo (str): selection algorithm\n\nReturns:\nNone if everything is good. Otherwise, the missing option error message.", "source": "codesearchnet"}
{"code": "def export(rv, code=None, headers=None):\n    \n    if isinstance(rv, ResponseBase):\n        return make_response(rv, code, headers)\n    else:\n        if code is None:\n            code = 200\n        mediatype = request.accept_mimetypes.best_match(\n            exporters.keys(), default='application/json')\n        return exporters[mediatype](rv, code, headers)", "docstring": "Create a suitable response\n\nArgs:\nrv: return value of action\ncode: status code\nheaders: response headers\nReturns:\nflask.Response", "source": "juraj-google-style"}
{"code": "def aws_client(self, client_id=None):\n    \n    if client_id is None:\n      return self._aws_clients\n    elif self._aws_clients is not None and self._aws_clients.has_key(client_id):\n      return self._aws_clients[client_id]\n    else:\n      return None", "docstring": "Get AWS client if it exists (must have been formerly stored with set_aws_clients)\nIf client_id is not provided, returns the dictionary of all clients\nArgs:\nclient_id: label for the client, e.g. 'ec2'; omit to get a dictionary of all clients\nReturns:\naws client if found, or None if not", "source": "juraj-google-style"}
{"code": "def get_connection_id_by_endpoint(self, endpoint):\n        \n        with self._connections_lock:\n            for connection_id in self._connections:\n                connection_info = self._connections[connection_id]\n                if connection_info.uri == endpoint:\n                    return connection_id\n            raise KeyError()", "docstring": "Returns the connection id associated with a publically\nreachable endpoint or raises KeyError if the endpoint is not\nfound.\n\nArgs:\nendpoint (str): A zmq-style uri which identifies a publically\nreachable endpoint.", "source": "juraj-google-style"}
{"code": "def count_matching(self, selector, offset=0):\n        \n\n        if selector.output:\n            data = self.streaming_data\n        elif selector.buffered:\n            data = self.storage_data\n        else:\n            raise ArgumentError(\"You can only pass a buffered selector to count_matching\", selector=selector)\n\n        count = 0\n        for i in range(offset, len(data)):\n            reading = data[i]\n\n            stream = DataStream.FromEncoded(reading.stream)\n            if selector.matches(stream):\n                count += 1\n\n        return count", "docstring": "Count the number of readings matching selector.\n\nArgs:\nselector (DataStreamSelector): The selector that we want to\ncount matching readings for.\noffset (int): The starting offset that we should begin counting at.\n\nReturns:\nint: The number of matching readings.", "source": "juraj-google-style"}
{"code": "def FileEntryExistsByPathSpec(self, path_spec):\n    \n    tsk_vs_part, _ = tsk_partition.GetTSKVsPartByPathSpec(\n        self._tsk_volume, path_spec)\n\n    \n    \n    if tsk_vs_part is None:\n      location = getattr(path_spec, 'location', None)\n      return location is not None and location == self.LOCATION_ROOT\n\n    return True", "docstring": "Determines if a file entry for a path specification exists.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\nbool: True if the file entry exists or false otherwise.", "source": "juraj-google-style"}
{"code": "def add_file_recursive(self, filename, trim=False):\n        \n\n        assert not self.final, 'Trying to mutate a final graph.'\n        self.add_source_file(filename)\n        queue = collections.deque([filename])\n        seen = set()\n        while queue:\n            filename = queue.popleft()\n            self.graph.add_node(filename)\n            try:\n                deps, broken = self.get_file_deps(filename)\n            except parsepy.ParseError:\n                \n                \n                \n                \n                if filename.endswith('.py'):\n                    self.unreadable_files.add(filename)\n                else:\n                    self.graph.remove_node(filename)\n                continue\n            for f in broken:\n                self.broken_deps[filename].add(f)\n            for f in deps:\n                if self.follow_file(f, seen, trim):\n                    queue.append(f)\n                    seen.add(f)\n                self.graph.add_node(f)\n                self.graph.add_edge(filename, f)", "docstring": "Add a file and all its recursive dependencies to the graph.\n\nArgs:\nfilename: The name of the file.\ntrim: Whether to trim the dependencies of builtin and system files.", "source": "juraj-google-style"}
{"code": "def expect_false(condition, msg, extras=None):\n    \n    try:\n        asserts.assert_false(condition, msg, extras)\n    except signals.TestSignal as e:\n        logging.exception('Expected a `False` value, got `True`.')\n        recorder.add_error(e)", "docstring": "Expects an expression evaluates to False.\n\nIf the expectation is not met, the test is marked as fail after its\nexecution finishes.\n\nArgs:\nexpr: The expression that is evaluated.\nmsg: A string explaining the details in case of failure.\nextras: An optional field for extra information to be included in test\nresult.", "source": "juraj-google-style"}
{"code": "def info(self, **kwargs):\n        \n        path = self._get_series_id_season_number_path('info')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the primary information about a TV season by its season number.\n\nArgs:\nlanguage: (optional) ISO 639 code.\nappend_to_response: (optional) Comma separated, any TV series\nmethod.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def expand_repertoire(self, direction, repertoire, new_purview=None):\n    if (repertoire is None):\n        return None\n    purview = distribution.purview(repertoire)\n    if (new_purview is None):\n        new_purview = self.node_indices\n    if (not set(purview).issubset(new_purview)):\n        raise ValueError('Expanded purview must contain original purview.')\n    non_purview_indices = tuple((set(new_purview) - set(purview)))\n    uc = self.unconstrained_repertoire(direction, non_purview_indices)\n    expanded_repertoire = (repertoire * uc)\n    return distribution.normalize(expanded_repertoire)", "docstring": "Distribute an effect repertoire over a larger purview.\n\nArgs:\ndirection (Direction): |CAUSE| or |EFFECT|.\nrepertoire (np.ndarray): The repertoire to expand.\n\nKeyword Args:\nnew_purview (tuple[int]): The new purview to expand the repertoire\nover. If ``None`` (the default), the new purview is the entire\nnetwork.\n\nReturns:\nnp.ndarray: A distribution over the new purview, where probability\nis spread out over the new nodes.\n\nRaises:\nValueError: If the expanded purview doesn't contain the original\npurview.", "source": "codesearchnet"}
{"code": "def broadcast_dynamic_shape(shape_x: DynamicRaggedShape, shape_y: DynamicRaggedShape) -> DynamicRaggedShape:\n    if not isinstance(shape_x, DynamicRaggedShape):\n        raise TypeError('shape_x must be a DynamicRaggedShape')\n    if not isinstance(shape_y, DynamicRaggedShape):\n        raise TypeError('shape_y must be a DynamicRaggedShape')\n    return broadcast_dynamic_shape_extended(shape_x, shape_y)[0]", "docstring": "Returns the shape formed by broadcasting two shapes to be compatible.\n\n1. If shape_x and shape_y both have row_partitions, then fail if their dtypes\ndon't match.\n2. If neither has row_partitions and they have different dtypes,\ngo with int64.\n3. If one has row_partitions, go with that dtype.\n\nArgs:\nshape_x: A `DynamicRaggedShape`\nshape_y: A `DynamicRaggedShape`\n\nReturns:\nA `DynamicRaggedShape`.\nRaises:\nValueError: If `shape_x` and `shape_y` are not broadcast-compatible.", "source": "github-repos"}
{"code": "def download_folder(bucket_name, prefix, target, sagemaker_session):\n    boto_session = sagemaker_session.boto_session\n    s3 = boto_session.resource('s3')\n    bucket = s3.Bucket(bucket_name)\n    prefix = prefix.lstrip('/')\n    objects = list(bucket.objects.filter(Prefix=prefix))\n    if ((len(objects) > 0) and (objects[0].key == prefix) and (prefix[(- 1)] != '/')):\n        s3.Object(bucket_name, prefix).download_file(os.path.join(target, os.path.basename(prefix)))\n        return\n    for obj_sum in bucket.objects.filter(Prefix=prefix):\n        if ((obj_sum.key != '') and (obj_sum.key[(- 1)] == '/')):\n            continue\n        obj = s3.Object(obj_sum.bucket_name, obj_sum.key)\n        s3_relative_path = obj_sum.key[len(prefix):].lstrip('/')\n        file_path = os.path.join(target, s3_relative_path)\n        try:\n            os.makedirs(os.path.dirname(file_path))\n        except OSError as exc:\n            if (exc.errno != errno.EEXIST):\n                raise\n        obj.download_file(file_path)", "docstring": "Download a folder from S3 to a local path\n\nArgs:\nbucket_name (str): S3 bucket name\nprefix (str): S3 prefix within the bucket that will be downloaded. Can be a single file.\ntarget (str): destination path where the downloaded items will be placed\nsagemaker_session (:class:`sagemaker.session.Session`): a sagemaker session to interact with S3.", "source": "codesearchnet"}
{"code": "def zero_or_more(e, delimiter=None):\n    if (delimiter is None):\n        delimiter = (lambda s, grm, pos: (s, Ignore, (pos, pos)))\n\n    def match_zero_or_more(s, grm=None, pos=0):\n        start = pos\n        try:\n            (s, obj, span) = e(s, grm, pos)\n            pos = span[1]\n            data = ([] if (obj is Ignore) else [obj])\n        except PegreError:\n            return PegreResult(s, [], (pos, pos))\n        try:\n            while True:\n                (s, obj, span) = delimiter(s, grm, pos)\n                pos = span[1]\n                if (obj is not Ignore):\n                    data.append(obj)\n                (s, obj, span) = e(s, grm, pos)\n                pos = span[1]\n                if (obj is not Ignore):\n                    data.append(obj)\n        except PegreError:\n            pass\n        return PegreResult(s, data, (start, pos))\n    return match_zero_or_more", "docstring": "Create a PEG function to match zero or more expressions.\n\nArgs:\ne: the expression to match\ndelimiter: an optional expression to match between the\nprimary *e* matches.", "source": "codesearchnet"}
{"code": "def run_cm(cm, time_scale):\n    \n    cm = np.linalg.matrix_power(cm, time_scale)\n    \n    cm[cm > 1] = 1\n    return cm", "docstring": "Iterate a connectivity matrix the specified number of steps.\n\nArgs:\ncm (np.ndarray): A connectivity matrix.\ntime_scale (int): The number of steps to run.\n\nReturns:\nnp.ndarray: The connectivity matrix at the new timescale.", "source": "juraj-google-style"}
{"code": "def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding, data_format, expected, use_gpu, v2, use_negative_input=False):\n    if data_format == 'NCHW_VECT_C':\n        avg_pool_func = nn_ops.avg_pool\n        tf_logging.info('pool_func=%s', pool_func)\n        if pool_func == avg_pool_func:\n            tf_logging.info('NCHW_VECT_C not yet implemented for avg_pool')\n            return\n        if self._isMaxPool(pool_func) and isinstance(padding, list):\n            tf_logging.info('NCHW_VECT_C not yet implemented for max pool' + ' with explicit padding')\n            return\n    self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding, data_format, dtypes.float32, expected, use_gpu, v2, use_negative_input)\n    if not test.is_built_with_rocm():\n        self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding, data_format, dtypes.float64, expected, use_gpu, v2, use_negative_input)\n    if not use_gpu or test_util.GpuSupportsHalfMatMulAndConv():\n        self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding, data_format, dtypes.float16, expected, use_gpu, v2, use_negative_input)", "docstring": "Verifies the output values of the pooling function.\n\nArgs:\npool_func: Function to be called, co.MaxPool, co.AvgPool,\nor the Lua version.\ninput_sizes: Input tensor dimensions.\nksize: The kernel size dimensions\nstrides: The stride dimensions\npadding: Padding type.\ndata_format: The data format we use to run the pooling operation.\nexpected: An array containing the expected operation outputs.\nuse_gpu: Whether we are running on GPU.\nv2: Whether to use v2 version.\nuse_negative_input: If the input values should be negative.\"", "source": "github-repos"}
{"code": "def get_access_token(self, http=None, additional_claims=None):\n        \n        if additional_claims is None:\n            if self.access_token is None or self.access_token_expired:\n                self.refresh(None)\n            return client.AccessTokenInfo(\n              access_token=self.access_token, expires_in=self._expires_in())\n        else:\n            \n            token, unused_expiry = self._create_token(additional_claims)\n            return client.AccessTokenInfo(\n              access_token=token, expires_in=self._MAX_TOKEN_LIFETIME_SECS)", "docstring": "Create a signed jwt.\n\nArgs:\nhttp: unused\nadditional_claims: dict, additional claims to add to\nthe payload of the JWT.\nReturns:\nAn AccessTokenInfo with the signed jwt", "source": "juraj-google-style"}
{"code": "def operation_at(self, qubit: ops.Qid, moment_index: int) -> Optional[ops.Operation]:\n    if (not (0 <= moment_index < len(self._moments))):\n        return None\n    for op in self._moments[moment_index].operations:\n        if (qubit in op.qubits):\n            return op\n    return None", "docstring": "Finds the operation on a qubit within a moment, if any.\n\nArgs:\nqubit: The qubit to check for an operation on.\nmoment_index: The index of the moment to check for an operation\nwithin. Allowed to be beyond the end of the circuit.\n\nReturns:\nNone if there is no operation on the qubit at the given moment, or\nelse the operation.", "source": "codesearchnet"}
{"code": "def repl(optimize=True, persist=True):\n    \n    print(\"Extra commands for the REPL:\")\n    print(\".code    - print code\")\n    print(\".raw     - print raw code\")\n    print(\".quit    - exit immediately\")\n    print(\".reset   - reset machine (IP and stacks)\")\n    print(\".restart - create a clean, new machine\")\n    print(\".clear   - same as .restart\")\n    print(\".stack   - print data stack\")\n    print(\"\")\n\n    machine = Machine([])\n\n    def match(s, *args):\n        return any(map(lambda arg: s.strip()==arg, args))\n\n    while True:\n        try:\n            source = raw_input(\"> \").strip()\n\n            if source[0] == \".\" and len(source) > 1:\n                if match(source, \".quit\"):\n                    return\n                elif match(source, \".code\"):\n                    print_code(machine)\n                elif match(source, \".raw\"):\n                    print(machine.code)\n                elif match(source, \".reset\"):\n                    machine.reset()\n                elif match(source, \".restart\", \".clear\"):\n                    machine = Machine([])\n                elif match(source, \".stack\"):\n                    print(machine.stack)\n                else:\n                    raise ParseError(\"Unknown command: %s\" % source)\n                continue\n\n            code = compile(parse(source), silent=False, optimize=optimize)\n\n            if not persist:\n                machine.reset()\n\n            machine.code += code\n            machine.run()\n        except EOFError:\n            return\n        except KeyboardInterrupt:\n            return\n        except ParseError as e:\n            print(\"Parse error: %s\" % e)\n        except MachineError as e:\n            print(\"Machine error: %s\" % e)\n        except CompileError as e:\n            print(\"Compile error: %s\" % e)", "docstring": "Starts a simple REPL for this machine.\n\nArgs:\noptimize: Controls whether to run inputted code through the\noptimizer.\n\npersist: If True, the machine is not deleted after each line.", "source": "juraj-google-style"}
{"code": "def __init__(self, shard_context, shard_state, tstate):\n    \n    self._tstate = tstate\n    self.job_context = shard_context.job_context\n    self.shard_context = shard_context\n    self.number = shard_state.slice_id\n    self.attempt = shard_state.slice_retries + 1", "docstring": "Init.\n\nThe signature of __init__ is subject to change.\n\nRead only properties:\njob_context: JobContext object.\nshard_context: ShardContext object.\nnumber: int. slice number. 0 indexed.\nattempt: int. The current attempt at executing this slice.\nstarting at 1.\n\nArgs:\nshard_context: map_job.JobConfig.\nshard_state: model.ShardState.\ntstate: model.TransientShardstate.", "source": "juraj-google-style"}
{"code": "def NewEvent(\n    type: str, id: UUID = None, data: JsonDict = None, metadata: JsonDict = None\n) -> NewEventData:\n    \n\n    return NewEventData(id or uuid4(), type, data, metadata)", "docstring": "Build the data structure for a new event.\n\nArgs:\ntype: An event type.\nid: The uuid identifier for the event.\ndata: A dict containing data for the event. These data\nmust be json serializable.\nmetadata: A dict containing metadata about the event.\nThese must be json serializable.", "source": "juraj-google-style"}
{"code": "def single_qubit_matrix_to_phased_x_z(\n        mat: np.ndarray,\n        atol: float = 0\n) -> List[ops.SingleQubitGate]:\n    \n\n    xy_turn, xy_phase_turn, total_z_turn = (\n        _deconstruct_single_qubit_matrix_into_gate_turns(mat))\n\n    \n    result = [\n        ops.PhasedXPowGate(exponent=2 * xy_turn,\n                           phase_exponent=2 * xy_phase_turn),\n        ops.Z**(2 * total_z_turn)\n    ]\n    result = [\n        g for g in result\n        if protocols.trace_distance_bound(g) > atol\n    ]\n\n    \n    if len(result) == 2 and abs(xy_turn) >= 0.5 - atol:\n        return [\n            ops.PhasedXPowGate(phase_exponent=2 * xy_phase_turn + total_z_turn)\n        ]\n\n    return result", "docstring": "Implements a single-qubit operation with a PhasedX and Z gate.\n\nIf one of the gates isn't needed, it will be omitted.\n\nArgs:\nmat: The 2x2 unitary matrix of the operation to implement.\natol: A limit on the amount of error introduced by the\nconstruction.\n\nReturns:\nA list of gates that, when applied in order, perform the desired\noperation.", "source": "juraj-google-style"}
{"code": "def SummaryMetadata(self, run, tag):\n    \n    accumulator = self.GetAccumulator(run)\n    return accumulator.SummaryMetadata(tag)", "docstring": "Return the summary metadata for the given tag on the given run.\n\nArgs:\nrun: A string name of the run for which summary metadata is to be\nretrieved.\ntag: A string name of the tag whose summary metadata is to be\nretrieved.\n\nRaises:\nKeyError: If the run is not found, or the tag is not available for\nthe given run.\n\nReturns:\nA `SummaryMetadata` protobuf.", "source": "juraj-google-style"}
{"code": "def get_params(img, output_size):\n    (w, h) = img.size\n    (th, tw) = output_size\n    if ((w == tw) and (h == th)):\n        return (0, 0, h, w)\n    i = random.randint(0, (h - th))\n    j = random.randint(0, (w - tw))\n    return (i, j, th, tw)", "docstring": "Get parameters for ``crop`` for a random crop.\n\nArgs:\nimg (PIL Image): Image to be cropped.\noutput_size (tuple): Expected output size of the crop.\n\nReturns:\ntuple: params (i, j, h, w) to be passed to ``crop`` for random crop.", "source": "codesearchnet"}
{"code": "def _flat_shapes(self):\n    return structure.get_flat_tensor_shapes(self.element_spec)", "docstring": "Returns a list `tf.TensorShapes`s for the element tensor representation.\n\nReturns:\nA list `tf.TensorShapes`s for the element tensor representation.", "source": "github-repos"}
{"code": "def variant_case(store, case_obj, variant_obj):\n    \n    case_obj['bam_files'] = []\n    case_obj['mt_bams'] = []\n    case_obj['bai_files'] = []\n    case_obj['mt_bais'] = []\n    case_obj['sample_names'] = []\n    for individual in case_obj['individuals']:\n        bam_path = individual.get('bam_file')\n        mt_bam = individual.get('mt_bam')\n        case_obj['sample_names'].append(individual.get('display_name'))\n        if bam_path and os.path.exists(bam_path):\n            case_obj['bam_files'].append(individual['bam_file'])\n            case_obj['bai_files'].append(find_bai_file(individual['bam_file']))\n        if mt_bam and os.path.exists(mt_bam):\n            case_obj['mt_bams'].append(individual['mt_bam'])\n            case_obj['mt_bais'].append(find_bai_file(individual['mt_bam']))\n\n        else:\n            LOG.debug(\"%s: no bam file found\", individual['individual_id'])\n\n    try:\n        genes = variant_obj.get('genes', [])\n        if len(genes) == 1:\n            hgnc_gene_obj = store.hgnc_gene(variant_obj['genes'][0]['hgnc_id'])\n            if hgnc_gene_obj:\n                vcf_path = store.get_region_vcf(case_obj, gene_obj=hgnc_gene_obj)\n                case_obj['region_vcf_file'] = vcf_path\n            else:\n                case_obj['region_vcf_file'] = None\n        elif len(genes) > 1:\n            chrom = variant_obj['genes'][0]['common']['chromosome']\n            start = min(gene['common']['start'] for gene in variant_obj['genes'])\n            end = max(gene['common']['end'] for gene in variant_obj['genes'])\n            \n            vcf_path = store.get_region_vcf(case_obj, chrom=chrom, start=start, end=end)\n            case_obj['region_vcf_file'] = vcf_path\n    except (SyntaxError, Exception):\n        LOG.warning(\"skip VCF region for alignment view\")", "docstring": "Pre-process case for the variant view.\n\nAdds information about files from case obj to variant\n\nArgs:\nstore(scout.adapter.MongoAdapter)\ncase_obj(scout.models.Case)\nvariant_obj(scout.models.Variant)", "source": "juraj-google-style"}
{"code": "def rotate(self, vector):\n    if isinstance(vector, Quaternion):\n        return self._rotate_quaternion(vector)\n    q = Quaternion(vector=vector)\n    a = self._rotate_quaternion(q).vector\n    if isinstance(vector, list):\n        l = [x for x in a]\n        return l\n    elif isinstance(vector, tuple):\n        l = [x for x in a]\n        return tuple(l)\n    else:\n        return a", "docstring": "Rotate a 3D vector by the rotation stored in the Quaternion object.\n\nParams:\nvector: A 3-vector specified as any ordered sequence of 3 real numbers corresponding to x, y, and z values.\nSome types that are recognised are: numpy arrays, lists and tuples.\nA 3-vector can also be represented by a Quaternion object who's scalar part is 0 and vector part is the required 3-vector.\nThus it is possible to call `Quaternion.rotate(q)` with another quaternion object as an input.\n\nReturns:\nThe rotated vector returned as the same type it was specified at input.\n\nRaises:\nTypeError: if any of the vector elements cannot be converted to a real number.\nValueError: if `vector` cannot be interpreted as a 3-vector or a Quaternion object.", "source": "codesearchnet"}
{"code": "def groups_from_tag(self, group, tag_name, filters=None, params=None):\n        \n        for t in self.pivot_from_tag(group, tag_name, filters=filters, params=params):\n            yield t", "docstring": "Args:\ngroup:\ntag_name:\nfilters:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def get_by_name(self, name):\n        \n        san_managers = self._client.get_all()\n        result = [x for x in san_managers if x['name'] == name]\n        return result[0] if result else None", "docstring": "Gets a SAN Manager by name.\n\nArgs:\nname: Name of the SAN Manager\n\nReturns:\ndict: SAN Manager.", "source": "juraj-google-style"}
{"code": "def get_adif_id(self, callsign, timestamp=timestamp_now):\n    return self.get_all(callsign, timestamp)[const.ADIF]", "docstring": "Returns ADIF id of a callsign's country\n\nArgs:\ncallsign (str): Amateur Radio callsign\ntimestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)\n\nReturns:\nint: containing the country ADIF id\n\nRaises:\nKeyError: No Country found for callsign", "source": "codesearchnet"}
{"code": "def to_json_string(self, indent=None):\n    root_ids = []\n    for r in self._roots:\n        root_ids.append(r.id)\n    root_references = self._all_models.values()\n    json = {'title': self.title, 'roots': {'root_ids': root_ids, 'references': references_json(root_references)}, 'version': __version__}\n    return serialize_json(json, indent=indent)", "docstring": "Convert the document to a JSON string.\n\nArgs:\nindent (int or None, optional) : number of spaces to indent, or\nNone to suppress all newlines and indentation (default: None)\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def initialize_means(data, clusters, k):\n    \n    init_w = np.zeros((data.shape[0], k))\n    if sparse.issparse(data):\n        for i in range(k):\n            if data[:,clusters==i].shape[1]==0:\n                point = np.random.randint(0, data.shape[1])\n                init_w[:,i] = data[:,point].toarray().flatten()\n            else:\n                \n                init_w[:,i] = np.array(data[:,clusters==i].mean(1)).flatten() + eps\n    else:\n        for i in range(k):\n            if data[:,clusters==i].shape[1]==0:\n                point = np.random.randint(0, data.shape[1])\n                init_w[:,i] = data[:,point].flatten()\n            else:\n                init_w[:,i] = data[:,clusters==i].mean(1) + eps\n    return init_w", "docstring": "Initializes the M matrix given the data and a set of cluster labels.\nCluster centers are set to the mean of each cluster.\n\nArgs:\ndata (array): genes x cells\nclusters (array): 1d array of ints (0...k-1)\nk (int): number of clusters", "source": "juraj-google-style"}
{"code": "def keys(self):\n    all_keys = [k.decode('utf-8') for (k, v) in self.rdb.hgetall(self.session_hash).items()]\n    return all_keys", "docstring": "Return a list of all keys in the dictionary.\n\nReturns:\nlist of str: [key1,key2,...,keyN]", "source": "codesearchnet"}
{"code": "def _IsBase64(cls, s):\n    try:\n        if (base64.b64encode(base64.b64decode(s)).decode('utf-8') == s):\n            return True\n    except (TypeError, binascii.Error):\n        pass\n    return False", "docstring": "An imperfect but decent method for determining if a string is base64.\n\nArgs:\ns: A string with the data to test.\n\nReturns:\nTrue if s is base64, else False.", "source": "codesearchnet"}
{"code": "def save_chkpt_vars(dic, path):\n    logger.info('Variables to save to {}:'.format(path))\n    keys = sorted(list(dic.keys()))\n    logger.info(pprint.pformat(keys))\n    assert (not path.endswith('.npy'))\n    if path.endswith('.npz'):\n        np.savez_compressed(path, **dic)\n    else:\n        with tf.Graph().as_default(), tf.Session() as sess:\n            for (k, v) in six.iteritems(dic):\n                k = get_op_tensor_name(k)[0]\n                _ = tf.Variable(name=k, initial_value=v)\n            sess.run(tf.global_variables_initializer())\n            saver = tf.train.Saver()\n            saver.save(sess, path, write_meta_graph=False)", "docstring": "Save variables in dic to path.\n\nArgs:\ndic: {name: value}\npath: save as npz if the name ends with '.npz', otherwise save as a checkpoint.", "source": "codesearchnet"}
{"code": "def read(self, *, level=0, alignment=1) -> bytes:\n    return self.mglo.read(level, alignment)", "docstring": "Read the content of the texture into a buffer.\n\nKeyword Args:\nlevel (int): The mipmap level.\nalignment (int): The byte alignment of the pixels.\n\nReturns:\nbytes", "source": "codesearchnet"}
{"code": "def guess_base_branch():\n    my_branch = current_branch(refresh=True).name\n    curr = latest_commit()\n    if (len(curr.branches) > 1):\n        other = [x for x in curr.branches if (x != my_branch)]\n        if (len(other) == 1):\n            return other[0]\n        return None\n    else:\n        parent = curr\n        while (parent and (my_branch in parent.branches)):\n            curr = parent\n            if (len(curr.branches) > 1):\n                other = [x for x in curr.branches if (x != my_branch)]\n                if (len(other) == 1):\n                    return other[0]\n                return None\n            parents = [p for p in curr.parents if (my_branch in p.branches)]\n            num_parents = len(parents)\n            if (num_parents > 2):\n                return None\n            if (num_parents == 2):\n                for p in parents:\n                    if (p.branches == [my_branch]):\n                        parent = p\n                        break\n            elif (num_parents == 1):\n                parent = parents[0]\n            elif (num_parents == 0):\n                parent = None\n        return None", "docstring": "Try to guess the base branch for the current branch.\n\nDo not trust this guess. git makes it pretty much impossible to guess\nthe base branch reliably so this function implements few heuristics that\nwill work on most common use cases but anything a bit crazy will probably\ntrip this function.\n\nReturns:\nOptional[str]: The name of the base branch for the current branch if\nguessable or **None** if can't guess.", "source": "codesearchnet"}
{"code": "def set_zone(timezone):\n    if (timezone.lower() in mapper.win_to_unix):\n        win_zone = timezone\n    elif (timezone.lower() in mapper.unix_to_win):\n        win_zone = mapper.get_win(timezone)\n    else:\n        raise CommandExecutionError('Invalid timezone passed: {0}'.format(timezone))\n    cmd = ['tzutil', '/s', win_zone]\n    res = __salt__['cmd.run_all'](cmd, python_shell=False)\n    if res['retcode']:\n        raise CommandExecutionError('tzutil encountered an error setting timezone: {0}'.format(timezone), info=res)\n    return zone_compare(timezone)", "docstring": "Sets the timezone using the tzutil.\n\nArgs:\ntimezone (str): A valid timezone\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\nRaises:\nCommandExecutionError: If invalid timezone is passed\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' timezone.set_zone 'America/Denver'", "source": "codesearchnet"}
{"code": "def deregister(cls, name: str) -> None:\n        \n        if name not in cls.available:\n            raise ConnectionPluginNotRegistered(\n                f\"Connection {name!r} is not registered\"\n            )\n        cls.available.pop(name)", "docstring": "Deregisters a registered connection plugin by its name\n\nArgs:\nname: name of the connection plugin to deregister\n\nRaises:\n:obj:`nornir.core.exceptions.ConnectionPluginNotRegistered`", "source": "juraj-google-style"}
{"code": "def remove(self, repl_id):\n        \n        repl = self._storage.pop(repl_id)\n        repl.cleanup()\n        del(repl)", "docstring": "remove replica set with kill members\nArgs:\nrepl_id - replica set identity\nreturn True if operation success otherwise False", "source": "juraj-google-style"}
{"code": "async def find_movie(self, query):\n    params = OrderedDict([('query', query), ('include_adult', False)])\n    url = self.url_builder('search/movie', {}, params)\n    data = (await self.get_data(url))\n    if (data is None):\n        return\n    return [Movie.from_json(item, self.config['data'].get('images')) for item in data.get('results', [])]", "docstring": "Retrieve movie data by search query.\n\nArguments:\nquery (:py:class:`str`): Query to search for.\n\nReturns:\n:py:class:`list`: Possible matches.", "source": "codesearchnet"}
{"code": "def _AnalyzeDataStream(self, mediator, file_entry, data_stream_name):\n    display_name = mediator.GetDisplayName()\n    logger.debug('[AnalyzeDataStream] analyzing file: {0:s}'.format(display_name))\n    if self._processing_profiler:\n        self._processing_profiler.StartTiming('analyzing')\n    try:\n        file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)\n        if (not file_object):\n            raise RuntimeError('Unable to retrieve file-like object for file entry: {0:s}.'.format(display_name))\n        try:\n            self._AnalyzeFileObject(mediator, file_object)\n        finally:\n            file_object.close()\n    finally:\n        if self._processing_profiler:\n            self._processing_profiler.StopTiming('analyzing')\n    logger.debug('[AnalyzeDataStream] completed analyzing file: {0:s}'.format(display_name))", "docstring": "Analyzes the contents of a specific data stream of a file entry.\n\nThe results of the analyzers are set in the parser mediator as attributes\nthat are added to produced event objects. Note that some file systems\nallow directories to have data streams, e.g. NTFS.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\nfile_entry (dfvfs.FileEntry): file entry whose data stream is to be\nanalyzed.\ndata_stream_name (str): name of the data stream.\n\nRaises:\nRuntimeError: if the file-like object cannot be retrieved from\nthe file entry.", "source": "codesearchnet"}
{"code": "class AnyVote(LabelAggregation):\n\n    def __init__(self, **kwargs):\n\n        def inner(predictions: Iterable[int]) -> int:\n            return self._outlier_label if any(map(lambda p: p == self._outlier_label, predictions)) else self._normal_label\n        super().__init__(agg_func=inner, **kwargs)", "docstring": "Aggregates anomaly labels using an \"any vote\" (OR) scheme.\n\nThis `AggregationFn` implements an \"any vote\" strategy. It aggregates\nanomaly labels such that the result is considered an outlier if at least\none of the input `AnomalyPrediction` objects is labeled as an outlier.\n\nExample:\nIf input labels are [normal, normal, outlier], and outlier_label=1,\nthen the aggregated label will be outlier (1).\nIf input labels are [normal, normal, normal], and outlier_label=1,\nthen the aggregated label will be normal (0).\n\nArgs:\nnormal_label (int): The integer label for normal predictions. Defaults to 0.\noutlier_label (int): The integer label for outlier predictions. Defaults to\n1.\n**kwargs: Additional keyword arguments to pass to the base\n`LabelAggregation` class.", "source": "github-repos"}
{"code": "def all(self):\n    return [email for (email, action) in self._collaborators.items() if (action in [RoleValue.Owner, RoleValue.User, ShareRequestValue.Add])]", "docstring": "Get all collaborators.\n\nReturns:\nList[str]: Collaborators.", "source": "codesearchnet"}
{"code": "def retrieve_reviewers(self, product):\n    if (not isinstance(product, self._product_cls)):\n        raise TypeError(\"Type of given product isn't acceptable:\", product, ', expected:', self._product_cls)\n    return list(self.graph.predecessors(product))", "docstring": "Retrieve reviewers who reviewed a given product.\n\nArgs:\nproduct: A product specifying reviewers.\n\nReturns:\nA list of reviewers who review the product.\n\nRaises:\nTypeError: when given product isn't instance of specified product\nclass when this graph is constructed.", "source": "codesearchnet"}
{"code": "def ParseSearchRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    event_data = TwitterAndroidSearchEventData()\n    event_data.query = query\n    event_data.name = self._GetRowValue(query_hash, row, 'name')\n    event_data.search_query = self._GetRowValue(query_hash, row, 'query')\n    timestamp = self._GetRowValue(query_hash, row, 'time')\n    if timestamp:\n        date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a search row from the database.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from query.", "source": "codesearchnet"}
{"code": "def sia_bipartitions(nodes, node_labels=None):\n    \n    if config.CUT_ONE_APPROXIMATION:\n        bipartitions = directed_bipartition_of_one(nodes)\n    else:\n        \n        bipartitions = directed_bipartition(nodes, nontrivial=True)\n\n    return [Cut(bipartition[0], bipartition[1], node_labels)\n            for bipartition in bipartitions]", "docstring": "Return all |big_phi| cuts for the given nodes.\n\nThis value changes based on :const:`config.CUT_ONE_APPROXIMATION`.\n\nArgs:\nnodes (tuple[int]): The node indices to partition.\nReturns:\nlist[Cut]: All unidirectional partitions.", "source": "juraj-google-style"}
{"code": "def is_seq_of(seq, expected_type, seq_type=None):\n    \n    if seq_type is None:\n        exp_seq_type = collections_abc.Sequence\n    else:\n        assert isinstance(seq_type, type)\n        exp_seq_type = seq_type\n    if not isinstance(seq, exp_seq_type):\n        return False\n    for item in seq:\n        if not isinstance(item, expected_type):\n            return False\n    return True", "docstring": "Check whether it is a sequence of some type.\n\nArgs:\nseq (Sequence): The sequence to be checked.\nexpected_type (type): Expected type of sequence items.\nseq_type (type, optional): Expected sequence type.\n\nReturns:\nbool: Whether the sequence is valid.", "source": "juraj-google-style"}
{"code": "def create(self, title, teamId=None, **request_parameters):\n    check_type(title, basestring)\n    check_type(teamId, basestring)\n    post_data = dict_from_items_with_values(request_parameters, title=title, teamId=teamId)\n    json_data = self._session.post(API_ENDPOINT, json=post_data)\n    return self._object_factory(OBJECT_TYPE, json_data)", "docstring": "Create a room.\n\nThe authenticated user is automatically added as a member of the room.\n\nArgs:\ntitle(basestring): A user-friendly name for the room.\nteamId(basestring): The team ID with which this room is\nassociated.\n**request_parameters: Additional request parameters (provides\nsupport for parameters that may be added in the future).\n\nReturns:\nRoom: A Room with the details of the created room.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"}
{"code": "def create(self, resource):\n    uri = (self.URI + self.RESOURCES_PATH)\n    return self._client.create(resource=resource, uri=uri)", "docstring": "Set all the labels for a resource.\n\nArgs:\nresource: The object containing the resource URI and a list of labels\n\nReturns:\ndict: Resource Labels", "source": "codesearchnet"}
{"code": "def unlock(self, password: str):\n    if self.locked:\n        self._privkey = decode_keyfile_json(self.keystore, password.encode('UTF-8'))\n        self.locked = False\n        self._fill_address()", "docstring": "Unlock the account with a password.\n\nIf the account is already unlocked, nothing happens, even if the password is wrong.\n\nRaises:\nValueError: (originating in ethereum.keys) if the password is wrong\n(and the account is locked)", "source": "codesearchnet"}
{"code": "def set_buffer_options(self, options, bufnr=None):\n    buf = (self._vim.buffers[bufnr] if bufnr else self._vim.current.buffer)\n    filetype = options.pop('filetype', None)\n    if filetype:\n        self.set_filetype(filetype)\n    for (opt, value) in options.items():\n        buf.options[opt] = value", "docstring": "Set buffer-local options for a buffer, defaulting to current.\n\nArgs:\noptions (dict):\nOptions to set, with keys being Vim option names. For Boolean\noptions, use a :class:`bool` value as expected, e.g.\n``{'buflisted': False}`` for ``setlocal nobuflisted``.\nbufnr (Optional[int]):\nA Vim buffer number, as you might get from VimL ``bufnr('%')``\nor Python ``vim.current.buffer.number``. If ``None``, options\nare set on the current buffer.", "source": "codesearchnet"}
{"code": "def to(self, *args, **kwargs) -> 'BatchFeature':\n    requires_backends(self, ['torch'])\n    import torch\n    new_data = {}\n    device = kwargs.get('device')\n    if device is None and len(args) > 0:\n        arg = args[0]\n        if is_torch_dtype(arg):\n            pass\n        elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int):\n            device = arg\n        else:\n            raise ValueError(f'Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.')\n\n    def _to(elem):\n        if torch.is_floating_point(elem):\n            return elem.to(*args, **kwargs)\n        if device is not None:\n            return elem.to(device=device)\n        return elem\n    for k, v in self.items():\n        if isinstance(v, list) and isinstance(v[0], list):\n            new_v = []\n            for elems in v:\n                new_v.append([_to(elem) for elem in elems])\n            new_data[k] = new_v\n        elif isinstance(v, list):\n            new_data[k] = [_to(elem) for elem in v]\n        else:\n            new_data[k] = _to(v)\n    self.data = new_data\n    return self", "docstring": "Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in\ndifferent `dtypes` and sending the `BatchFeature` to a different `device`.\n\nArgs:\nargs (`Tuple`):\nWill be passed to the `to(...)` function of the tensors.\nkwargs (`Dict`, *optional*):\nWill be passed to the `to(...)` function of the tensors.\n\nReturns:\n[`BatchFeature`]: The same instance after modification.", "source": "github-repos"}
{"code": "def _parse_stop_words_file(self, path):\n        \n        language = None\n        loaded = False\n\n        if os.path.isfile(path):\n            self._logger.debug('Loading stop words in %s', path)\n\n            language = path.split('-')[-1]\n\n            if not language in self.__stop_words:\n                self.__stop_words[language] = set()\n\n            with codecs.open(path, 'r', 'UTF-8') as file:\n                loaded = True\n                for word in file:\n                    self.__stop_words[language].add(word.strip())\n\n        return loaded", "docstring": "Load stop words from the given path.\n\nParse the stop words file, saving each word found in it in a set\nfor the language of the file. This language is obtained from\nthe file name. If the file doesn't exist, the method will have\nno effect.\n\nArgs:\npath: Path to the stop words file.\n\nReturns:\nA boolean indicating whether the file was loaded.", "source": "juraj-google-style"}
{"code": "def GetCPIOArchiveFileEntryByPathSpec(self, path_spec):\n    location = getattr(path_spec, 'location', None)\n    if (location is None):\n        raise errors.PathSpecError('Path specification missing location.')\n    if (not location.startswith(self.LOCATION_ROOT)):\n        raise errors.PathSpecError('Invalid location in path specification.')\n    if (len(location) == 1):\n        return None\n    return self._cpio_archive_file.GetFileEntryByPath(location[1:])", "docstring": "Retrieves the CPIO archive file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\nCPIOArchiveFileEntry: CPIO archive file entry or None if not available.\n\nRaises:\nPathSpecError: if the path specification is incorrect.", "source": "codesearchnet"}
{"code": "def __verify_ready(self, creating=False):\n    if (len(self._value_ranges) == 0):\n        self._logger.log('crit', 'Attribute value_ranges must have at least one value')\n        raise RuntimeWarning('Attribute value_ranges must have at least one value')\n    if ((len(self._employers) == 0) and (creating is False)):\n        self._logger.log('crit', 'Need to create employers')\n        raise RuntimeWarning('Need to create employers')", "docstring": "Some cleanup, ensures that everything is set up properly to avoid\nrandom errors during execution\n\nArgs:\ncreating (bool): True if currently creating employer bees, False\nfor checking all other operations", "source": "codesearchnet"}
{"code": "def Add(self, entry):\n    if not isinstance(entry, SshkeyMapEntry):\n        raise TypeError\n    return super(SshkeyMap, self).Add(entry)", "docstring": "Add a new object, verify it is a SshkeyMapEntry instance.\n\nArgs:\nentry: A SshkeyMapEntry instance.\n\nReturns:\nTrue if added successfully, False otherwise.\n\nRaises:\nTypeError: The argument is of the wrong type.", "source": "github-repos"}
{"code": "def Execute(self, message):\n    self.message = message\n    if message:\n        self.require_fastpoll = message.require_fastpoll\n    args = None\n    try:\n        if self.message.args_rdf_name:\n            if (not self.in_rdfvalue):\n                raise RuntimeError(('Did not expect arguments, got %s.' % self.message.args_rdf_name))\n            if (self.in_rdfvalue.__name__ != self.message.args_rdf_name):\n                raise RuntimeError(('Unexpected arg type %s != %s.' % (self.message.args_rdf_name, self.in_rdfvalue.__name__)))\n            args = self.message.payload\n        if (self._authentication_required and (self.message.auth_state != rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED)):\n            raise RuntimeError(('Message for %s was not Authenticated.' % self.message.name))\n        self.cpu_start = self.proc.cpu_times()\n        self.cpu_limit = self.message.cpu_limit\n        if getattr(flags.FLAGS, 'debug_client_actions', False):\n            pdb.set_trace()\n        try:\n            self.Run(args)\n        finally:\n            used = self.proc.cpu_times()\n            self.cpu_used = ((used.user - self.cpu_start.user), (used.system - self.cpu_start.system))\n    except NetworkBytesExceededError as e:\n        self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.NETWORK_LIMIT_EXCEEDED, ('%r: %s' % (e, e)), traceback.format_exc())\n    except Exception as e:\n        self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR, ('%r: %s' % (e, e)), traceback.format_exc())\n        if flags.FLAGS.pdb_post_mortem:\n            self.DisableNanny()\n            pdb.post_mortem()\n    if (self.status.status != rdf_flows.GrrStatus.ReturnedStatus.OK):\n        logging.info('Job Error (%s): %s', self.__class__.__name__, self.status.error_message)\n        if self.status.backtrace:\n            logging.debug(self.status.backtrace)\n    if self.cpu_used:\n        self.status.cpu_time_used.user_cpu_time = self.cpu_used[0]\n        self.status.cpu_time_used.system_cpu_time = self.cpu_used[1]\n    self.SendReply(self.status, message_type=rdf_flows.GrrMessage.Type.STATUS)\n    self._RunGC()", "docstring": "This function parses the RDFValue from the server.\n\nThe Run method will be called with the specified RDFValue.\n\nArgs:\nmessage:     The GrrMessage that we are called to process.\n\nReturns:\nUpon return a callback will be called on the server to register\nthe end of the function and pass back exceptions.\nRaises:\nRuntimeError: The arguments from the server do not match the expected\nrdf type.", "source": "codesearchnet"}
{"code": "def ms_bot_framework(self) -> list:\n    ms_bf_controls = [control.ms_bot_framework() for control in self.controls]\n    return ms_bf_controls", "docstring": "Returns list of MS Bot Framework compatible states of the\nRichMessage instance nested controls.\n\nReturns:\nms_bf_controls: MS Bot Framework representation of RichMessage instance\nnested controls.", "source": "codesearchnet"}
{"code": "def __init__(self, scope, parent):\n        \n        CodeControlFlow.__init__(self, scope, parent, \"switch\")\n        self.cases = []\n        self.default_case = None", "docstring": "Constructor for switches.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.", "source": "juraj-google-style"}
{"code": "def compute_output(self, o, output_shape=None):\n    if self.combine_dims:\n        o = mtf.transpose(o, ((o.shape - self.o_dims) + self.o_dims))\n        o = mtf.replace_dimensions(o, self.o_dims, self.wo.shape.dims[0])\n        reduced_dims = [self.wo.shape.dims[0]]\n    else:\n        reduced_dims = self.o_dims\n    return mtf.einsum([o, self.wo], output_shape=output_shape, reduced_dims=reduced_dims)", "docstring": "Compute output of multihead attention.\n\nArgs:\no: a Tensor with dimensions\nquery_heads_dims + {value_dim} + other_dims\noutput_shape: an optional Shape\nReturns:\na Tensor with shape:\n{output_dim} + other_dims", "source": "codesearchnet"}
{"code": "def write8(self, offset, value):\n        \n        if not isinstance(offset, (int, long)):\n            raise TypeError(\"Invalid offset type, should be integer.\")\n        if not isinstance(value, (int, long)):\n            raise TypeError(\"Invalid value type, should be integer.\")\n        if value < 0 or value > 0xff:\n            raise ValueError(\"Value out of bounds.\")\n\n        offset = self._adjust_offset(offset)\n        self._validate_offset(offset, 1)\n        self.mapping[offset:offset + 1] = struct.pack(\"B\", value)", "docstring": "Write 8-bits to the specified `offset` in bytes, relative to the\nbase physical address of the MMIO region.\n\nArgs:\noffset (int, long): offset from base physical address, in bytes.\nvalue (int, long): 8-bit value to write.\n\nRaises:\nTypeError: if `offset` or `value` type are invalid.\nValueError: if `offset` or `value` are out of bounds.", "source": "juraj-google-style"}
{"code": "def dict_filter_nones(dict_):\n    dict2_ = {key: val for (key, val) in six.iteritems(dict_) if (val is not None)}\n    return dict2_", "docstring": "r\"\"\"\nRemoves None values\n\nArgs:\ndict_ (dict):  a dictionary\n\nReturns:\ndict:\n\nCommandLine:\npython -m utool.util_dict --exec-dict_filter_nones\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> # UNSTABLE_DOCTEST\n>>> # fails on python 3 because of dict None order\n>>> from utool.util_dict import *  # NOQA\n>>> import utool as ut\n>>> dict_ = {1: None, 2: 'blue', 3: 'four', None: 'fun'}\n>>> dict2_ = dict_filter_nones(dict_)\n>>> result = ut.repr4(dict2_, nl=False)\n>>> print(result)\n{None: 'fun', 2: 'blue', 3: 'four'}", "source": "codesearchnet"}
{"code": "def locked_put(self, credentials):\n        \n        filters = {self.key_name: self.key_value}\n        query = self.session.query(self.model_class).filter_by(**filters)\n        entity = query.first()\n\n        if not entity:\n            entity = self.model_class(**filters)\n\n        setattr(entity, self.property_name, credentials)\n        self.session.add(entity)", "docstring": "Write a credentials to the SQLAlchemy datastore.\n\nArgs:\ncredentials: :class:`oauth2client.Credentials`", "source": "juraj-google-style"}
{"code": "def get_service_list(self) -> list:\n    services = []\n    if (not self._manager):\n        raise RuntimeError('Only the Swarm manager node can retrieve all the services.')\n    service_list = self._client.services.list()\n    for s_list in service_list:\n        services.append(s_list.short_id)\n    return services", "docstring": "Get a list of docker services.\n\nOnly the manager nodes can retrieve all the services\n\nReturns:\nlist, all the ids of the services in swarm", "source": "codesearchnet"}
{"code": "def __init__(self, sess_creator):\n    self._sess_creator = sess_creator\n    _WrappedSession.__init__(self, self._create_session())", "docstring": "Create a new `_RecoverableSession`.\n\nThe value returned by calling `sess_creator.create_session()` will be the\nsession wrapped by this recoverable session.\n\nArgs:\nsess_creator: A 'SessionCreator' to be wrapped by recoverable.", "source": "github-repos"}
{"code": "def scalars_impl(self, run, tag_regex_string):\n    \n    if not tag_regex_string:\n      \n      return {\n          _REGEX_VALID_PROPERTY: False,\n          _TAG_TO_EVENTS_PROPERTY: {},\n      }\n\n    \n    try:\n      regex = re.compile(tag_regex_string)\n    except re.error:\n      return {\n          _REGEX_VALID_PROPERTY: False,\n          _TAG_TO_EVENTS_PROPERTY: {},\n      }\n\n    \n    run_to_data = self._multiplexer.PluginRunToTagToContent(\n        scalars_metadata.PLUGIN_NAME)\n\n    tag_to_data = None\n    try:\n      tag_to_data = run_to_data[run]\n    except KeyError:\n      \n      \n      payload = {}\n\n    if tag_to_data:\n      scalars_plugin_instance = self._get_scalars_plugin()\n      if not scalars_plugin_instance:\n        raise ValueError(('Failed to respond to request for /scalars. '\n                          'The scalars plugin is oddly not registered.'))\n\n      form = scalars_plugin.OutputFormat.JSON\n      payload = {\n        tag: scalars_plugin_instance.scalars_impl(tag, run, None, form)[0]\n            for tag in tag_to_data.keys()\n            if regex.match(tag)\n      }\n\n    return {\n        _REGEX_VALID_PROPERTY: True,\n        _TAG_TO_EVENTS_PROPERTY: payload,\n    }", "docstring": "Given a tag regex and single run, return ScalarEvents.\n\nArgs:\nrun: A run string.\ntag_regex_string: A regular expression that captures portions of tags.\n\nRaises:\nValueError: if the scalars plugin is not registered.\n\nReturns:\nA dictionary that is the JSON-able response.", "source": "juraj-google-style"}
{"code": "def testHeatEquationWithVariousSchemes(self, one_step_fn, time_step):\n\n    def final_cond_fn(x):\n        return math.e * math.sin(x)\n\n    def expected_result_fn(x):\n        return tf.sin(x)\n\n    @dirichlet\n    def lower_boundary_fn(t, x):\n        del x\n        return -tf.math.exp(t)\n\n    @dirichlet\n    def upper_boundary_fn(t, x):\n        del x\n        return tf.math.exp(t)\n    grid = grids.uniform_grid(minimums=[-10.5 * math.pi], maximums=[10.5 * math.pi], sizes=[1000], dtype=np.float32)\n    self._testHeatEquation(grid=grid, final_t=1, time_step=time_step, final_cond_fn=final_cond_fn, expected_result_fn=expected_result_fn, one_step_fn=one_step_fn, lower_boundary_fn=lower_boundary_fn, upper_boundary_fn=upper_boundary_fn, error_tolerance=0.001)", "docstring": "Test solving heat equation with various time marching schemes.\n\nTests solving heat equation with the boundary conditions\n`u(x, t=1) = e * sin(x)`, `u(-2 pi n - pi / 2, t) = -e^t`, and\n`u(2 pi n + pi / 2, t) = -e^t` with some integer `n` for `u(x, t=0)`.\n\nThe exact solution is `u(x, t=0) = sin(x)`.\n\nAll time marching schemes should yield reasonable results given small enough\ntime steps. First-order accurate schemes (explicit, implicit, weighted with\ntheta != 0.5) require smaller time step than second-order accurate ones\n(Crank-Nicolson, Extrapolation).\n\nArgs:\none_step_fn: one_step_fn representing a time marching scheme to use.\ntime_step: time step for given scheme.", "source": "github-repos"}
{"code": "def build_tensor_serving_input_receiver_fn(shape, dtype=tf.float32, batch_size=1):\n\n    def serving_input_receiver_fn():\n        features = tf.placeholder(dtype=dtype, shape=([batch_size] + shape), name='input_tensor')\n        return tf.estimator.export.TensorServingInputReceiver(features=features, receiver_tensors=features)\n    return serving_input_receiver_fn", "docstring": "Returns a input_receiver_fn that can be used during serving.\n\nThis expects examples to come through as float tensors, and simply\nwraps them as TensorServingInputReceivers.\n\nArguably, this should live in tf.estimator.export. Testing here first.\n\nArgs:\nshape: list representing target size of a single example.\ndtype: the expected datatype for the input example\nbatch_size: number of input tensors that will be passed for prediction\n\nReturns:\nA function that itself returns a TensorServingInputReceiver.", "source": "codesearchnet"}
{"code": "def _run_calibration(saved_model_path: str, signature_keys: Sequence[str], tags: Collection[str], force_graph_mode_calibration: bool, representative_dataset_file_map: Mapping[str, quantization_options_pb2.RepresentativeDatasetFile]) -> bool:\n    repr_dataset_map = rd.TfRecordRepresentativeDatasetLoader(representative_dataset_file_map).load()\n    _run_graph_for_calibration(saved_model_path, signature_keys, tags, repr_dataset_map, force_graph_mode_calibration)\n    return True", "docstring": "Runs calibration and adds calibration statistics to exported model.\n\nArgs:\nsaved_model_path: Path to the SavedModel to run calibration.\nsignature_keys: List of signature keys corresponding to SignatureDefs to run\ncalibration on.\ntags: A set of tags that identify the MetaGraphDef.\nforce_graph_mode_calibration: If True, runs the calibration in graph mode.\nrepresentative_dataset_file_map: Signature key ->\n`RepresentativeDatasetFile` mapping for running the calibration step. Each\ndataset file stores the representative dataset for the function matching\nthe signature key.\n\nReturns:\n`True` upon successfully running calibration.", "source": "github-repos"}
{"code": "def _least_upper_bound(*nodes):\n    N = set(nodes)\n    UB = LATTICE_UPPER_BOUNDS\n    try:\n        bounds = [UB[n] for n in N]\n    except KeyError:\n        dtype = next((n for n in N if n not in UB))\n        raise ValueError(f'dtype={dtype!r} is not a valid dtype for Keras type promotion.')\n    CUB = set.intersection(*bounds)\n    LUB = CUB & N or {c for c in CUB if CUB.issubset(UB[c])}\n    if len(LUB) == 1:\n        return LUB.pop()\n    elif len(LUB) == 0:\n        msg = f'Input dtypes {tuple((str(n) for n in nodes))} have no available implicit dtype promotion path. Try explicitly casting inputs to the desired output type.'\n        raise ValueError(msg)\n    else:\n        raise ValueError(f\"Internal Type Promotion error: {nodes} do not have a unique least upper bound on the specified lattice; options are {LUB}. This is an unexpected error in Keras's internal logic; please report it to the maintainers.\")", "docstring": "Compute the least upper bound of a set of nodes.\n\nArgs:\nnodes: sequence of entries from dtypes + weak_types\n\nReturns:\nThe type representing the least upper bound of the input nodes on the\npromotion lattice.", "source": "github-repos"}
{"code": "async def call(self, methname, *args, **kwargs):\n        \n        todo = (methname, args, kwargs)\n        return await self.task(todo)", "docstring": "Call a remote method by name.\n\nArgs:\nmethname (str): The name of the remote method.\n*args: Arguments to the method call.\n**kwargs: Keyword arguments to the method call.\n\nMost use cases will likely use the proxy methods directly:\n\nThe following two are effectively the same:\n\nvalu = proxy.getFooBar(x, y)\nvalu = proxy.call('getFooBar', x, y)", "source": "juraj-google-style"}
{"code": "def estimate_mutual_information(x, y):\n    xy = np.concatenate((x, y), axis=1)\n    epsilon = _calculate_epsilon(xy)\n    h_x = estimate_entropy(x, epsilon)\n    h_y = estimate_entropy(y, epsilon)\n    h_xy = estimate_entropy(xy, epsilon)\n    return max(0, ((h_x + h_y) - h_xy))", "docstring": "Estimate the mutual information of two datasets.\n\nMutual information is a measure of dependence between\ntwo datasets and is calculated as:\n\n$I(x;y) = H(x) + H(y) - H(x,y)$\n\nWhere H(x) is the Shannon entropy of x. For continuous datasets,\nadapts the Kraskov Estimator [1] for mutual information.\n\nArgs:\nx (array-like): An array with shape (n_samples, n_features_x)\ny (array-like): An array with shape (n_samples, n_features_y)\n\nReturns:\nfloat: A floating point number representing the mutual\ninformation of x and y. This calculation is *exact*\nfor entirely discrete datasets and *approximate* if\nthere are continuous columns present.\n\nReferences:\n\n.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, \"Estimating mutual\ninformation\". Phys. Rev. E 69, 2004.", "source": "codesearchnet"}
{"code": "def disable_switchport(self, inter_type, inter):\n    config = ET.Element('config')\n    interface = ET.SubElement(config, 'interface', xmlns='urn:brocade.com:mgmt:brocade-interface')\n    int_type = ET.SubElement(interface, inter_type)\n    name = ET.SubElement(int_type, 'name')\n    name.text = inter\n    ET.SubElement(int_type, 'switchport-basic', operation='delete')\n    try:\n        self._callback(config)\n        return True\n    except Exception as error:\n        logging.error(error)\n        return False", "docstring": "Change an interface's operation to L3.\n\nArgs:\ninter_type: The type of interface you want to configure. Ex.\ntengigabitethernet, gigabitethernet, fortygigabitethernet.\ninter: The ID for the interface you want to configure. Ex. 1/0/1\n\nReturns:\nTrue if command completes successfully or False if not.\n\nRaises:\nNone", "source": "codesearchnet"}
{"code": "def recv(self, socket_, encoding=None):\n        \n        unpacker = msgpack.Unpacker(encoding=encoding)\n\n        \n        response = socket_.recv(8)  \n        if response == b\"\":\n            raise TensorForceError(\"No data received by socket.recv in call to method `recv` \" +\n                                   \"(listener possibly closed)!\")\n        orig_len = int(response)\n        received_len = 0\n        while True:\n            data = socket_.recv(min(orig_len - received_len, self.max_msg_len))\n            \n            if not data:\n                raise TensorForceError(\"No data of len {} received by socket.recv in call to method `recv`!\".\n                                       format(orig_len - received_len))\n            data_len = len(data)\n            received_len += data_len\n            unpacker.feed(data)\n\n            if received_len == orig_len:\n                break\n\n        \n        for message in unpacker:\n            sts = message.get(\"status\", message.get(b\"status\"))\n            if sts:\n                if sts == \"ok\" or sts == b\"ok\":\n                    return message\n                else:\n                    raise TensorForceError(\"RemoteEnvironment server error: {}\".\n                                           format(message.get(\"message\", \"not specified\")))\n            else:\n                raise TensorForceError(\"Message without field 'status' received!\")\n        raise TensorForceError(\"No message encoded in data stream (data stream had len={})\".\n                               format(orig_len))", "docstring": "Receives a message as msgpack-numpy encoded byte-string from the given socket object.\nBlocks until something was received.\n\nArgs:\nsocket_: The python socket object to use.\nencoding (str): The encoding to use for unpacking messages from the socket.\nReturns: The decoded (as dict) message received.", "source": "juraj-google-style"}
{"code": "def safe_join(directory: FilePath, *paths: FilePath) -> Path:\n    try:\n        safe_path = file_path_to_path(directory).resolve(strict=True)\n        full_path = file_path_to_path(directory, *paths).resolve(strict=True)\n    except FileNotFoundError:\n        raise NotFound()\n    try:\n        full_path.relative_to(safe_path)\n    except ValueError:\n        raise NotFound()\n    return full_path", "docstring": "Safely join the paths to the known directory to return a full path.\n\nRaises:\nNotFound: if the full path does not share a commonprefix with\nthe directory.", "source": "codesearchnet"}
{"code": "def list_projects(self, dataset_name):\n        \n        url = self.url() + \"/nd/resource/dataset/{}\".format(dataset_name)\\\n            + \"/project/\"\n\n        req = self.remote_utils.get_url(url)\n\n        if req.status_code is not 200:\n            raise RemoteDataNotFoundError('Could not find {}'.format(req.text))\n        else:\n            return req.json()", "docstring": "Lists a set of projects related to a dataset.\n\nArguments:\ndataset_name (str): Dataset name to search projects for\n\nReturns:\ndict: Projects found based on dataset query", "source": "juraj-google-style"}
{"code": "def slope(self, other):\n    (X1, Y1, X2, Y2) = (self.X, self.Y, other.X, other.Y)\n    Y3 = (Y1 - Y2)\n    X3 = (X1 - X2)\n    return ((Y3 * self.inverse(X3)) % self.P)", "docstring": "Determines the slope between this point and another point.\n\nArgs:\nother (AffinePoint): The second point.\n\nReturns:\nint: Slope between self and other.", "source": "codesearchnet"}
{"code": "def MergeDataSets(self):\n    rules = set()\n    for (schedule, merge_map, zone_map) in ([self.feed_merger.a_schedule, self.feed_merger.a_merge_map, self.feed_merger.a_zone_map], [self.feed_merger.b_schedule, self.feed_merger.b_merge_map, self.feed_merger.b_zone_map]):\n        for fare in schedule.GetFareAttributeList():\n            for fare_rule in fare.GetFareRuleList():\n                fare_id = merge_map[schedule.GetFareAttribute(fare_rule.fare_id)].fare_id\n                route_id = (fare_rule.route_id and merge_map[schedule.GetRoute(fare_rule.route_id)].route_id)\n                origin_id = (fare_rule.origin_id and zone_map[fare_rule.origin_id])\n                destination_id = (fare_rule.destination_id and zone_map[fare_rule.destination_id])\n                contains_id = (fare_rule.contains_id and zone_map[fare_rule.contains_id])\n                rules.add((fare_id, route_id, origin_id, destination_id, contains_id))\n    for fare_rule_tuple in rules:\n        migrated_fare_rule = transitfeed.FareRule(*fare_rule_tuple)\n        self.feed_merger.merged_schedule.AddFareRuleObject(migrated_fare_rule)\n    if rules:\n        self.feed_merger.problem_reporter.FareRulesBroken(self)\n    print(('Fare Rules: union has %d fare rules' % len(rules)))\n    return True", "docstring": "Merge the fare rule datasets.\n\nThe fare rules are first migrated. Merging is done by removing any\nduplicate rules.\n\nReturns:\nTrue since fare rules can always be merged.", "source": "codesearchnet"}
{"code": "def __init__(self, weight_shape: Sequence[int], bias_size: Optional[int]=None, activation_fn: Optional[ops.Operation]=None, use_biasadd: bool=True) -> None:\n    self.bias_size = bias_size\n    self.activation_fn = activation_fn\n    self.use_biasadd = use_biasadd\n    self.filters = np.random.uniform(low=-1.0, high=1.0, size=weight_shape)\n    if bias_size is not None:\n        self.bias = np.random.uniform(low=-1.0, high=1.0, size=bias_size)", "docstring": "Initializes a MatmulModel.\n\nArgs:\nweight_shape: Shape of the weight tensor.\nbias_size: If None, do not use bias. Else, use given size as bias.\nactivation_fn: The activation function to be used. No activation\nfunction if None.\nuse_biasadd: If True, use BiasAdd for adding bias, else use AddV2.", "source": "github-repos"}
{"code": "def send_request(self, request, correlation_id=None):\n        \n        log.debug('Sending request %s', request)\n        if correlation_id is None:\n            correlation_id = self._next_correlation_id()\n        header = RequestHeader(request,\n                               correlation_id=correlation_id,\n                               client_id=self._client_id)\n        message = b''.join([header.encode(), request.encode()])\n        size = Int32.encode(len(message))\n        data = size + message\n        self.bytes_to_send.append(data)\n        if request.expect_response():\n            ifr = (correlation_id, request)\n            self.in_flight_requests.append(ifr)\n        return correlation_id", "docstring": "Encode and queue a kafka api request for sending.\n\nArguments:\nrequest (object): An un-encoded kafka request.\ncorrelation_id (int, optional): Optionally specify an ID to\ncorrelate requests with responses. If not provided, an ID will\nbe generated automatically.\n\nReturns:\ncorrelation_id", "source": "juraj-google-style"}
{"code": "def transformer_text_encoder(inputs, target_space, hparams, name=None):\n    with tf.variable_scope(name, default_name='transformer_text_encoder'):\n        inputs = common_layers.flatten4d3d(inputs)\n        [encoder_input, encoder_self_attention_bias, ed] = transformer_layers.transformer_prepare_encoder(inputs, target_space=target_space, hparams=hparams)\n        encoder_input = tf.nn.dropout(encoder_input, (1.0 - hparams.dropout))\n        encoder_output = transformer_layers.transformer_encoder(encoder_input, encoder_self_attention_bias, hparams)\n        return (encoder_output, ed)", "docstring": "Transformer text encoder over inputs with unmasked full attention.\n\nArgs:\ninputs: Tensor of shape [batch, length, 1, hparams.hidden_size].\ntarget_space: int. Used for encoding inputs under a target space id.\nhparams: HParams.\nname: string, variable scope.\n\nReturns:\nencoder_output: Tensor of shape [batch, length, hparams.hidden_size].\ned: Tensor of shape [batch, 1, 1, length]. Encoder-decoder attention bias\nfor any padded tokens.", "source": "codesearchnet"}
{"code": "def resample(self, seed=None):\n    if (seed is not None):\n        gen = torch.manual_seed(seed)\n    else:\n        gen = torch.default_generator\n    if self.replacement:\n        self.perm = torch.LongTensor(len(self)).random_(len(self.dataset), generator=gen)\n    else:\n        self.perm = torch.randperm(len(self.dataset), generator=gen).narrow(0, 0, len(self))", "docstring": "Resample the dataset.\n\nArgs:\nseed (int, optional): Seed for resampling. By default no seed is\nused.", "source": "codesearchnet"}
{"code": "def UpdateMapping(self, filename, mapping_update):\n    if (filename not in self._file_mapping):\n        raise problems.NonexistentMapping(filename)\n    mapping = self._file_mapping[filename]\n    mapping.update(mapping_update)", "docstring": "Updates an entry in the list of known filenames.\nAn entry is identified by its filename.\n\nArgs:\nfilename: The filename whose mapping is to be updated\nmapping_update: A dictionary containing the fields to update and their\nnew values.\nRaises:\nInexistentMapping if the filename does not exist in the mapping", "source": "codesearchnet"}
{"code": "def pop_stack(stack, op_id):\n  \n  if __debug__:\n    pushed_stack, pushed_op_id = stack.pop()\n    assert pushed_op_id == op_id, 'Wanted %s, got %s' % (op_id, pushed_op_id)\n  else:\n    pushed_stack = stack.pop()\n  return pushed_stack", "docstring": "Proxy of pop, where we know we're popping a stack off of a stack.\n\nWe know that we don't need to differentiate through this.\nSee pop() for more.\n\nArgs:\nstack: The stack to pop from.\nop_id: A unique variable that is also passed into the matching push.\nAllows optimization passes to track pairs of pushes and pops.\n\nReturns:\nThe last value.", "source": "juraj-google-style"}
{"code": "def update_datastore(self, schema=None, primary_key=None, path=None):\n    self.create_datastore(schema, primary_key, 2, path=path)", "docstring": "For tabular data, update a resource in the HDX datastore which enables data preview in HDX. If no schema is provided\nall fields are assumed to be text. If path is not supplied, the file is first downloaded from HDX.\n\nArgs:\nschema (List[Dict]): List of fields and types of form {'id': 'FIELD', 'type': 'TYPE'}. Defaults to None.\nprimary_key (Optional[str]): Primary key of schema. Defaults to None.\npath (Optional[str]): Local path to file that was uploaded. Defaults to None.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def sub_map(self, counters_map):\n    \n    for counter_name in counters_map.counters:\n      self.increment(counter_name, -counters_map.counters[counter_name])", "docstring": "Subtracts all counters from the map.\n\nFor each counter in the passed map, subtracts its value to the counter in\nthis map.\n\nArgs:\ncounters_map: CounterMap instance to subtract.", "source": "juraj-google-style"}
{"code": "def _DecodeURL(self, url):\n    if (not url):\n        return ''\n    decoded_url = urlparse.unquote(url)\n    if isinstance(decoded_url, py2to3.BYTES_TYPE):\n        try:\n            decoded_url = decoded_url.decode('utf-8')\n        except UnicodeDecodeError as exception:\n            decoded_url = decoded_url.decode('utf-8', errors='replace')\n            logger.warning('Unable to decode URL: {0:s} with error: {1!s}'.format(url, exception))\n    return decoded_url", "docstring": "Decodes the URL, replaces %XX to their corresponding characters.\n\nArgs:\nurl (str): encoded URL.\n\nReturns:\nstr: decoded URL.", "source": "codesearchnet"}
{"code": "def children_rest_names(self):\n    names = []\n    for fetcher in self.fetchers:\n        names.append(fetcher.__class__.managed_object_rest_name())\n    return names", "docstring": "Gets the list of all possible children ReST names.\n\nReturns:\nlist: list containing all possible rest names as string\n\nExample:\n>>> entity = NUEntity()\n>>> entity.children_rest_names\n[\"foo\", \"bar\"]", "source": "codesearchnet"}
{"code": "def diff(self, sym: Symbol, n: int=1, expand_simplify: bool=True):\n    if (not isinstance(sym, sympy.Basic)):\n        raise TypeError(('%s needs to be a Sympy symbol' % sym))\n    if sym.free_symbols.issubset(self.free_symbols):\n        deriv = QuantumDerivative.create(self, derivs={sym: n}, vals=None)\n        if ((not deriv.is_zero) and expand_simplify):\n            deriv = deriv.expand().simplify_scalar()\n        return deriv\n    else:\n        return self.__class__._zero", "docstring": "Differentiate by scalar parameter `sym`.\n\nArgs:\nsym: What to differentiate by.\nn: How often to differentiate\nexpand_simplify: Whether to simplify the result.\n\nReturns:\nThe n-th derivative.", "source": "codesearchnet"}
{"code": "def anonymous_login(services):\n    \n    if isinstance(services, str):\n        services = [services]\n\n    clients = {}\n    \n    for serv in services:\n        try:\n            clients[serv] = KNOWN_CLIENTS[serv](http_timeout=STD_TIMEOUT)\n        except KeyError:  \n            print(\"Error: No known client for '{}' service.\".format(serv))\n        except Exception:  \n            print(\"Error: Unable to create client for '{}' service.\\n\"\n                  \"Anonymous access may not be allowed.\".format(serv))\n\n    return clients", "docstring": "Initialize services without authenticating to Globus Auth.\n\nNote:\nClients may have reduced functionality without authentication.\n\nArguments:\nservices (str or list of str): The services to initialize clients for.\n\nReturns:\ndict: The clients requested, indexed by service name.", "source": "juraj-google-style"}
{"code": "def __best_intent(self, parse_result, context=[]):\n        \n        best_intent = None\n        best_tags = None\n        context_as_entities = [{'entities': [c]} for c in context]\n        for intent in self.intent_parsers:\n            i, tags = intent.validate_with_tags(parse_result.get('tags') + context_as_entities, parse_result.get('confidence'))\n            if not best_intent or (i and i.get('confidence') > best_intent.get('confidence')):\n                best_intent = i\n                best_tags = tags\n\n        return best_intent, best_tags", "docstring": "Decide the best intent\n\nArgs:\nparse_result(list): results used to match the best intent.\ncontext(list): ?\n\nReturns:\nbest_intent, best_tags:\nbest_intent : The best intent for given results\nbest_tags : The Tags for result", "source": "juraj-google-style"}
{"code": "def get_rollout_from_id(self, rollout_id):\n    \n    layer = self.rollout_id_map.get(rollout_id)\n\n    if layer:\n      return layer\n\n    self.logger.error('Rollout with ID \"%s\" is not in datafile.' % rollout_id)\n    return None", "docstring": "Get rollout for the provided ID.\n\nArgs:\nrollout_id: ID of the rollout to be fetched.\n\nReturns:\nRollout corresponding to the provided ID.", "source": "juraj-google-style"}
{"code": "def _get_function_id(self):\n    if self.is_for_driver_task:\n        return ray.FunctionID.nil()\n    function_id_hash = hashlib.sha1()\n    function_id_hash.update(self.module_name.encode('ascii'))\n    function_id_hash.update(self.function_name.encode('ascii'))\n    function_id_hash.update(self.class_name.encode('ascii'))\n    function_id_hash.update(self._function_source_hash)\n    function_id = function_id_hash.digest()\n    return ray.FunctionID(function_id)", "docstring": "Calculate the function id of current function descriptor.\n\nThis function id is calculated from all the fields of function\ndescriptor.\n\nReturns:\nray.ObjectID to represent the function descriptor.", "source": "codesearchnet"}
{"code": "def get_de_novos_in_transcript(transcript, de_novos):\n    \n    \n    in_transcript = []\n    for de_novo in de_novos:\n        \n        \n        \n        \n        \n        site = transcript.get_coding_distance(de_novo)\n        cds_length = transcript.get_coding_distance(transcript.get_cds_end())\n        within_cds = site['pos'] >= 0 and site['pos'] < cds_length['pos']\n        if within_cds and (transcript.in_coding_region(de_novo) or abs(site['offset']) < 9):\n            in_transcript.append(de_novo)\n    \n    return in_transcript", "docstring": "get the de novos within the coding sequence of a transcript\n\nArgs:\ntranscript: Transcript object, which defines the transcript coordinates\nde_novos: list of chromosome sequence positions for de novo events\n\nReturns:\nlist of de novo positions found within the transcript", "source": "juraj-google-style"}
{"code": "def predict(data, training_dir=None, model_name=None, model_version=None, cloud=False):\n    if cloud:\n        if ((not model_version) or (not model_name)):\n            raise ValueError('model_version or model_name is not set')\n        if training_dir:\n            raise ValueError('training_dir not needed when cloud is True')\n        with warnings.catch_warnings():\n            warnings.simplefilter('ignore')\n            return cloud_predict(model_name, model_version, data)\n    else:\n        if (not training_dir):\n            raise ValueError('training_dir is not set')\n        if (model_version or model_name):\n            raise ValueError('model_name and model_version not needed when cloud is False.')\n        with warnings.catch_warnings():\n            warnings.simplefilter('ignore')\n            return local_predict(training_dir, data)", "docstring": "Runs prediction locally or on the cloud.\n\nArgs:\ndata: List of csv strings or a Pandas DataFrame that match the model schema.\ntraining_dir: local path to the trained output folder.\nmodel_name: deployed model name\nmodel_version: depoyed model version\ncloud: bool. If False, does local prediction and data and training_dir\nmust be set. If True, does cloud prediction and data, model_name,\nand model_version must be set.\n\n\nFor cloud prediction, the model must be created. This can be done by running\ntwo gcloud commands::\n1) gcloud beta ml models create NAME\n2) gcloud beta ml versions create VERSION --model NAME --origin gs://BUCKET/training_dir/model\nor these datalab commands:\n1) import google.datalab as datalab\nmodel = datalab.ml.ModelVersions(MODEL_NAME)\nmodel.deploy(version_name=VERSION, path='gs://BUCKET/training_dir/model')\nNote that the model must be on GCS.\n\nReturns:\nPandas DataFrame.", "source": "codesearchnet"}
{"code": "def json_to_data(fn=None, return_json=True):\n    \n    def json_to_data_decorator(fn):\n        @handle_type_error\n        @wraps(fn)\n        def get_data_wrapper(*args, **kwargs):\n            kwargs[\"data\"] = decode_json_body()\n\n            if not return_json:\n                return fn(*args, **kwargs)\n\n            return encode_json_body(\n                fn(*args, **kwargs)\n            )\n\n        return get_data_wrapper\n\n    if fn:  \n        return json_to_data_decorator(fn)\n\n    return json_to_data_decorator", "docstring": "Decode JSON from the request and add it as ``data`` parameter for wrapped\nfunction.\n\nArgs:\nreturn_json (bool, default True): Should the decorator automatically\nconvert returned value to JSON?", "source": "juraj-google-style"}
{"code": "def __init__(self, decode_module, encode_module, methodName='runTest'):\n    super(DescriptorSourceTestBase, self).__init__(methodName)\n    self._decode_module = decode_module\n    self._encode_module = encode_module", "docstring": "DescriptorSourceTestBase initializer.\n\nArgs:\ndecode_module: a module containing the `decode_proto_op` method\nencode_module: a module containing the `encode_proto_op` method\nmethodName: the name of the test method (same as for test.TestCase)", "source": "github-repos"}
{"code": "def load(archive_file: fhir_package.PackageSource) -> fhir_package.FhirPackage[structure_definition_pb2.StructureDefinition, search_parameter_pb2.SearchParameter, code_system_pb2.CodeSystem, value_set_pb2.ValueSet]:\n    return fhir_package.FhirPackage.load(archive_file, _PRIMITIVE_HANDLER, structure_definition_pb2.StructureDefinition, search_parameter_pb2.SearchParameter, code_system_pb2.CodeSystem, value_set_pb2.ValueSet)", "docstring": "Instantiates and returns a new `FhirPackage` for FHIR R4.\n\nArgs:\narchive_file: The zip or tar file path or a function returning a file-like\ncontaining resources represented by this collection.\n\nReturns:\nAn instance of `FhirPackage`.\n\nRaises:\nValueError: In the event that the file or contents are invalid.", "source": "github-repos"}
{"code": "def penalize_boundary_complexity(shp, w=20, mask=None, C=0.5):\n\n    def inner(T):\n        arr = T('input')\n        if (mask is None):\n            mask_ = np.ones(shp)\n            mask_[(:, w:(- w), w:(- w))] = 0\n        else:\n            mask_ = mask\n        blur = _tf_blur(arr, w=5)\n        diffs = ((blur - arr) ** 2)\n        diffs += (0.8 * ((arr - C) ** 2))\n        return (- tf.reduce_sum((diffs * mask_)))\n    return inner", "docstring": "Encourage the boundaries of an image to have less variation and of color C.\n\nArgs:\nshp: shape of T(\"input\") because this may not be known.\nw: width of boundary to penalize. Ignored if mask is set.\nmask: mask describing what area should be penalized.\n\nReturns:\nObjective.", "source": "codesearchnet"}
{"code": "def get_i_name(self, num, is_oai=None):\n        \n        if num not in (1, 2):\n            raise ValueError(\"`num` parameter have to be 1 or 2!\")\n\n        if is_oai is None:\n            is_oai = self.oai_marc\n\n        i_name = \"ind\" if not is_oai else \"i\"\n\n        return i_name + str(num)", "docstring": "This method is used mainly internally, but it can be handy if you work\nwith with raw MARC XML object and not using getters.\n\nArgs:\nnum (int): Which indicator you need (1/2).\nis_oai (bool/None): If None, :attr:`.oai_marc` is\nused.\n\nReturns:\nstr: current name of ``i1``/``ind1`` parameter based on \\\n:attr:`oai_marc` property.", "source": "juraj-google-style"}
{"code": "def _CreateComplexTypeFromData(self, elem_type, type_is_override, data, set_type_attrs):\n    elem_arguments = dict(elem_type.elements)\n    instantiated_arguments = {k: self._PackArgumentsHelper(elem_arguments[k], v, set_type_attrs) for (k, v) in data if (k != 'xsi_type')}\n    if set_type_attrs:\n        found_type_attr = next((e_name for (e_name, _) in elem_type.elements if e_name.endswith('.Type')), None)\n        if (found_type_attr and type_is_override):\n            instantiated_arguments[found_type_attr] = elem_type.qname.localname\n    return elem_type(**instantiated_arguments)", "docstring": "Initialize a SOAP element with specific data.\n\nArgs:\nelem_type: The type of the element to create.\ntype_is_override: A boolean specifying if the type is being overridden.\ndata: The data to hydrate the type with.\nset_type_attrs: A boolean indicating whether or not attributes that end\nin .Type should be set. This is only necessary for batch job service.\n\nReturns:\nAn fully initialized SOAP element.", "source": "codesearchnet"}
{"code": "def get_all_for_resource(identifier, configuration=None):\n        \n        \n\n        resourceview = ResourceView(configuration=configuration)\n        success, result = resourceview._read_from_hdx('resource view', identifier, 'id', ResourceView.actions()['list'])\n        resourceviews = list()\n        if success:\n            for resourceviewdict in result:\n                resourceview = ResourceView(resourceviewdict, configuration=configuration)\n                resourceviews.append(resourceview)\n        return resourceviews", "docstring": "Read all resource views for a resource given by identifier from HDX and returns list of ResourceView objects\n\nArgs:\nidentifier (str): Identifier of resource\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nList[ResourceView]: List of ResourceView objects", "source": "juraj-google-style"}
{"code": "def WriteScanNode(self, scan_context, scan_node, indentation=''):\n    if (not scan_node):\n        return\n    values = []\n    part_index = getattr(scan_node.path_spec, 'part_index', None)\n    if (part_index is not None):\n        values.append('{0:d}'.format(part_index))\n    store_index = getattr(scan_node.path_spec, 'store_index', None)\n    if (store_index is not None):\n        values.append('{0:d}'.format(store_index))\n    start_offset = getattr(scan_node.path_spec, 'start_offset', None)\n    if (start_offset is not None):\n        values.append('start offset: {0:d} (0x{0:08x})'.format(start_offset))\n    location = getattr(scan_node.path_spec, 'location', None)\n    if (location is not None):\n        values.append('location: {0:s}'.format(location))\n    values = ', '.join(values)\n    flags = ''\n    if (scan_node in scan_context.locked_scan_nodes):\n        flags = ' [LOCKED]'\n    print('{0:s}{1:s}: {2:s}{3:s}'.format(indentation, scan_node.path_spec.type_indicator, values, flags))\n    indentation = '  {0:s}'.format(indentation)\n    for sub_scan_node in scan_node.sub_nodes:\n        self.WriteScanNode(scan_context, sub_scan_node, indentation=indentation)", "docstring": "Writes the source scanner node to stdout.\n\nArgs:\nscan_context (SourceScannerContext): the source scanner context.\nscan_node (SourceScanNode): the scan node.\nindentation (Optional[str]): indentation.", "source": "codesearchnet"}
{"code": "def compile_action_preconditions_checking(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> tf.Tensor:\n    with self.graph.as_default():\n        with tf.name_scope('action_preconditions_checking'):\n            preconds = self.compile_action_preconditions(state, action)\n            all_preconds = tf.stack([p.tensor for p in preconds], axis=1)\n            checking = tf.reduce_all(all_preconds, axis=1)\n            return checking", "docstring": "Combines the action preconditions into an applicability checking op.\n\nArgs:\nstate (Sequence[tf.Tensor]): The current state fluents.\naction (Sequence[tf.Tensor]): The action fluents.\n\nReturns:\nA boolean tensor for checking if `action` is application in `state`.", "source": "codesearchnet"}
{"code": "def get_connection(self, name):\n    name = 'connection:{}'.format(name)\n    if (not self.has_section(name)):\n        return None\n    return dict(self.items(name))", "docstring": "Returns the properties for a connection name\n\nThis method will return the settings for the configuration specified\nby name.  Note that the name argument should only be the name.\n\nFor instance, give the following eapi.conf file\n\n.. code-block:: ini\n\n[connection:veos01]\ntransport: http\n\nThe name to use to retrieve the configuration would be veos01\n\n>>> pyeapi.client.config.get_connection('veos01')\n\nArgs:\nname (str): The name of the connection to return\n\nReturns:\nA Python dictionary object of key/value pairs that represent\nthe node configuration.  If the name provided in the argument\nis not found, then None is returned.", "source": "codesearchnet"}
{"code": "def transform(self, obj, user_context):\n    if inspect.isfunction(obj) or inspect.ismethod(obj):\n        return self.transform_function(obj, user_context)\n    raise NotImplementedError('Non-function: {}'.format(type(obj)))", "docstring": "Transforms a Python object.\n\nUsers typically call this method.\n\nArgs:\nobj: A Python object, function, type, etc.\nuser_context: An opaque object (may be None) that is forwarded to\ntransform_ast, through the ctx.user attribute.\nReturns:\nThe result of calling transform_function.\n\nRaises:\nNotImplementedError: if the type of obj is not handled.", "source": "github-repos"}
{"code": "def __init__(self, model_name: str, *, title: Optional[str]=None, task_type: str=DEFAULT_TASK_TYPE, project: Optional[str]=None, location: Optional[str]=None, credentials: Optional[Credentials]=None, **kwargs):\n    if not vertexai:\n        raise ImportError('vertexai is required to use VertexAITextEmbeddings. Please install it with `pip install google-cloud-aiplatform`')\n    super().__init__(type_adapter=create_rag_adapter(), **kwargs)\n    self.model_name = model_name\n    self.title = title\n    self.task_type = task_type\n    self.project = project\n    self.location = location\n    self.credentials = credentials", "docstring": "Utilizes Vertex AI text embeddings for semantic search and RAG\npipelines.\n\nArgs:\nmodel_name: Name of the Vertex AI text embedding model\ntitle: Optional title for the text content\ntask_type: Task type for embeddings (default: RETRIEVAL_DOCUMENT)\nproject: GCP project ID\nlocation: GCP location\ncredentials: Optional GCP credentials\n**kwargs: Additional arguments passed to EmbeddingsManager including\nModelHandler inference_args.", "source": "github-repos"}
{"code": "def get_dssp_annotations(self, outdir, force_rerun=False):\n    if self.structure:\n        parsed = self.structure\n    else:\n        parsed = self.parse_structure()\n    if (not parsed):\n        log.error('{}: unable to open structure to run DSSP'.format(self.id))\n        return\n    log.debug('{}: running DSSP'.format(self.id))\n    dssp_results = ssbio.protein.structure.properties.dssp.get_dssp_df(model=parsed.first_model, pdb_file=self.structure_path, outdir=outdir, force_rerun=force_rerun)\n    if dssp_results.empty:\n        log.error('{}: unable to run DSSP'.format(self.id))\n        return\n    chains = dssp_results.chain.unique()\n    dssp_summary = ssbio.protein.structure.properties.dssp.secondary_structure_summary(dssp_results)\n    for chain in chains:\n        ss = dssp_results[(dssp_results.chain == chain)].ss.tolist()\n        exposure_rsa = dssp_results[(dssp_results.chain == chain)].exposure_rsa.tolist()\n        exposure_asa = dssp_results[(dssp_results.chain == chain)].exposure_asa.tolist()\n        phi = dssp_results[(dssp_results.chain == chain)].phi.tolist()\n        psi = dssp_results[(dssp_results.chain == chain)].psi.tolist()\n        chain_prop = self.chains.get_by_id(chain)\n        chain_seq = chain_prop.seq_record\n        ss = ssbio.protein.structure.properties.residues.match_structure_sequence(orig_seq=chain_seq, new_seq=ss, fill_with='-')\n        exposure_rsa = ssbio.protein.structure.properties.residues.match_structure_sequence(orig_seq=chain_seq, new_seq=exposure_rsa, fill_with=float('Inf'))\n        exposure_asa = ssbio.protein.structure.properties.residues.match_structure_sequence(orig_seq=chain_seq, new_seq=exposure_asa, fill_with=float('Inf'))\n        phi = ssbio.protein.structure.properties.residues.match_structure_sequence(orig_seq=chain_seq, new_seq=phi, fill_with=float('Inf'))\n        psi = ssbio.protein.structure.properties.residues.match_structure_sequence(orig_seq=chain_seq, new_seq=psi, fill_with=float('Inf'))\n        chain_prop.seq_record.annotations.update(dssp_summary[chain])\n        chain_prop.seq_record.letter_annotations['SS-dssp'] = ss\n        chain_prop.seq_record.letter_annotations['RSA-dssp'] = exposure_rsa\n        chain_prop.seq_record.letter_annotations['ASA-dssp'] = exposure_asa\n        chain_prop.seq_record.letter_annotations['PHI-dssp'] = phi\n        chain_prop.seq_record.letter_annotations['PSI-dssp'] = psi\n        log.debug('{}: stored DSSP annotations in chain seq_record letter_annotations'.format(chain))", "docstring": "Run DSSP on this structure and store the DSSP annotations in the corresponding ChainProp SeqRecords\n\nCalculations are stored in the ChainProp's ``letter_annotations`` at the following keys:\n\n* ``SS-dssp``\n* ``RSA-dssp``\n* ``ASA-dssp``\n* ``PHI-dssp``\n* ``PSI-dssp``\n\nArgs:\noutdir (str): Path to where DSSP dataframe will be stored.\nforce_rerun (bool): If DSSP results should be recalculated\n\nTODO:\n* Also parse global properties, like total accessible surface area. Don't think Biopython parses those?", "source": "codesearchnet"}
{"code": "def running(processid):\n    \n    try:\n        \n        \n        \n        \n        os.kill(processid, 0)\n    except OverflowError as exc:\n        print(\"checking validity of pid ({p}) failed with: {e}\"\n              .format(p=processid, e=exc))\n        sys.exit(1)\n    except OSError:\n        return False\n    else:\n        return True", "docstring": "Check the validity of a process ID.\n\nArguments:\nprocessid (int): Process ID number.\n\nReturns:\nTrue if process ID is found otherwise False.", "source": "juraj-google-style"}
{"code": "def wrap_query_in_nested_if_field_is_nested(query, field, nested_fields):\n    for element in nested_fields:\n        match_pattern = '^{}.'.format(element)\n        if re.match(match_pattern, field):\n            return generate_nested_query(element, query)\n    return query", "docstring": "Helper for wrapping a query into a nested if the fields within the query are nested\n\nArgs:\nquery : The query to be wrapped.\nfield : The field that is being queried.\nnested_fields : List of fields which are nested.\nReturns:\n(dict): The nested query", "source": "codesearchnet"}
{"code": "def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    binary = '{0:b}'.format(abs(self.value))\n    binary = (('0' * (64 - (len(binary) % 64))) + binary)\n    if (self.value < 0):\n        binary = binary.replace('1', 'i')\n        binary = binary.replace('0', '1')\n        binary = binary.replace('i', '0')\n        pivot = binary.rfind('0')\n        binary = ((binary[0:pivot] + '1') + ('0' * len(binary[(pivot + 1):])))\n    hexadecimal = b''\n    for i in range(0, len(binary), 8):\n        byte = binary[i:(i + 8)]\n        byte = int(byte, 2)\n        hexadecimal += struct.pack('!B', byte)\n    self.length = len(hexadecimal)\n    super(BigInteger, self).write(ostream, kmip_version=kmip_version)\n    ostream.write(hexadecimal)", "docstring": "Write the encoding of the BigInteger to the output stream.\n\nArgs:\nostream (Stream): A buffer to contain the encoded bytes of a\nBigInteger object. Usually a BytearrayStream object.\nRequired.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def verify(self, flag_values):\n    param = self._get_input_to_checker_function(flag_values)\n    if (not self.checker(param)):\n        raise _exceptions.ValidationError(self.message)", "docstring": "Verifies that constraint is satisfied.\n\nflags library calls this method to verify Validator's constraint.\n\nArgs:\nflag_values: flags.FlagValues, the FlagValues instance to get flags from.\nRaises:\nError: Raised if constraint is not satisfied.", "source": "codesearchnet"}
{"code": "def rollback(self, label=None, plane='sdr'):\n        \n        begin = time.time()\n        rb_label = self._chain.target_device.rollback(label=label, plane=plane)\n        elapsed = time.time() - begin\n        if label:\n            self.emit_message(\"Configuration rollback last {:.0f}s. Label: {}\".format(elapsed, rb_label),\n                              log_level=logging.INFO)\n        else:\n            self.emit_message(\"Configuration failed.\", log_level=logging.WARNING)\n\n        return rb_label", "docstring": "Rollback the configuration.\n\nThis method rolls back the configuration on the device.\n\nArgs:\nlabel (text): The configuration label ID\nplane: (text): sdr or admin\n\nReturns:\nA string with commit label or None", "source": "juraj-google-style"}
{"code": "def ParseRecord(self, parser_mediator, key, structure):\n    if (key != 'logline'):\n        logger.warning('Unable to parse record, unknown structure: {0:s}'.format(key))\n        return\n    try:\n        timestamp = int(structure.timestamp)\n    except ValueError:\n        logger.debug('Invalid timestamp string {0:s}, skipping record'.format(structure.timestamp))\n        return\n    try:\n        (nickname, text) = self._StripThenGetNicknameAndText(structure.text)\n    except pyparsing.ParseException:\n        logger.debug('Error parsing entry at offset {0:d}'.format(self._offset))\n        return\n    event_data = XChatScrollbackEventData()\n    event_data.nickname = nickname\n    event_data.offset = self._offset\n    event_data.text = text\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ADDED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a log record structure.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nkey (str): name of the parsed structure.\nstructure (pyparsing.ParseResults): structure parsed from the log file.", "source": "codesearchnet"}
{"code": "def __call__(self, inputs, state, scope=None, *args, **kwargs):\n    return base_layer.Layer.__call__(self, inputs, state, *args, scope=scope, **kwargs)", "docstring": "Run this RNN cell on inputs, starting from the given state.\n\nArgs:\ninputs: `2-D` tensor with shape `[batch_size, input_size]`.\nstate: if `self.state_size` is an integer, this should be a `2-D Tensor`\nwith shape `[batch_size, self.state_size]`.  Otherwise, if\n`self.state_size` is a tuple of integers, this should be a tuple with\nshapes `[batch_size, s] for s in self.state_size`.\nscope: optional cell scope.\n*args: Additional positional arguments.\n**kwargs: Additional keyword arguments.\n\nReturns:\nA pair containing:\n\n- Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.\n- New state: Either a single `2-D` tensor, or a tuple of tensors matching\nthe arity and shapes of `state`.", "source": "github-repos"}
{"code": "def split_pair(pair_string, separator, nullable_idx=1):\n    pair = pair_string.split(separator, 1)\n    if (len(pair) == 1):\n        if (nullable_idx == 0):\n            return [None, pair[0]]\n        elif (nullable_idx == 1):\n            return [pair[0], None]\n        else:\n            raise IndexError('nullable_idx should be either 0 or 1.')\n    else:\n        return pair", "docstring": "Split a string into a pair, which can have one empty value.\n\nArgs:\npair_string: The string to be split.\nseparator: The separator to be used for splitting.\nnullable_idx: The location to be set to null if the separator is not in the\ninput string. Should be either 0 or 1.\n\nReturns:\nA list containing the pair.\n\nRaises:\nIndexError: If nullable_idx is not 0 or 1.", "source": "codesearchnet"}
{"code": "def switch_to_frame(self, frame):\n        \n\n        if isinstance(frame, Element):\n            self.driver.switch_to_frame(frame)\n            self._scopes.append(\"frame\")\n        elif frame == \"parent\":\n            if self._scopes[-1] != \"frame\":\n                raise ScopeError(\"`switch_to_frame(\\\"parent\\\")` cannot be called \"\n                                 \"from inside a descendant frame's `scope` context.\")\n            self._scopes.pop()\n            self.driver.switch_to_frame(\"parent\")\n        elif frame == \"top\":\n            if \"frame\" in self._scopes:\n                idx = self._scopes.index(\"frame\")\n                if any([scope not in [\"frame\", None] for scope in self._scopes[idx:]]):\n                    raise ScopeError(\"`switch_to_frame(\\\"top\\\")` cannot be called \"\n                                     \"from inside a descendant frame's `scope` context.\")\n                self._scopes = self._scopes[:idx]\n                self.driver.switch_to_frame(\"top\")\n        else:\n            raise ValueError(\n                \"You must provide a frame element, \\\"parent\\\", or \\\"top\\\" \"\n                \"when calling switch_to_frame\")", "docstring": "Switch to the given frame.\n\nIf you use this method you are responsible for making sure you switch back to the parent\nframe when done in the frame changed to. :meth:`frame` is preferred over this method and\nshould be used when possible. May not be supported by all drivers.\n\nArgs:\nframe (Element | str): The iframe/frame element to switch to.", "source": "juraj-google-style"}
{"code": "async def _handle_conversation_delta(self, conversation):\n        \n        conv_id = conversation.conversation_id.id\n        conv = self._conv_dict.get(conv_id, None)\n        if conv is None:\n            \n            await self._get_or_fetch_conversation(conv_id)\n        else:\n            \n            conv.update_conversation(conversation)", "docstring": "Receive Conversation delta and create or update the conversation.\n\nArgs:\nconversation: hangouts_pb2.Conversation instance\n\nRaises:\nNetworkError: A request to fetch the complete conversation failed.", "source": "juraj-google-style"}
{"code": "def FileEntryExistsByPathSpec(self, path_spec):\n    \n    \n    \n    \n    try:\n      file_object = resolver.Resolver.OpenFileObject(\n          path_spec, resolver_context=self._resolver_context)\n    except (IOError, ValueError, errors.AccessError, errors.PathSpecError):\n      return False\n\n    file_object.close()\n    return True", "docstring": "Determines if a file entry for a path specification exists.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nbool: True if the file entry exists.", "source": "juraj-google-style"}
{"code": "def update(self, *args):\n    for token in tuple(*args):\n        self.add(token)", "docstring": "Updates the Trie with new tokens provided as arguments.\n\nArgs:\n*args: Variable number of words to be added to the Trie.", "source": "github-repos"}
{"code": "def send_message(self, room_id, text_content, msgtype=\"m.text\", timestamp=None):\n        \n        return self.send_message_event(\n            room_id, \"m.room.message\",\n            self.get_text_body(text_content, msgtype),\n            timestamp=timestamp\n        )", "docstring": "Perform PUT /rooms/$room_id/send/m.room.message\n\nArgs:\nroom_id (str): The room ID to send the event in.\ntext_content (str): The m.text body to send.\ntimestamp (int): Set origin_server_ts (For application services only)", "source": "juraj-google-style"}
{"code": "def _AnsiCmd(command_list):\n    if (not isinstance(command_list, list)):\n        raise ValueError(('Invalid list: %s' % command_list))\n    for sgr in command_list:\n        if (sgr.lower() not in SGR):\n            raise ValueError(('Invalid or unsupported SGR name: %s' % sgr))\n    command_str = [str(SGR[x.lower()]) for x in command_list]\n    return ('\\x1b[%sm' % ';'.join(command_str))", "docstring": "Takes a list of SGR values and formats them as an ANSI escape sequence.\n\nArgs:\ncommand_list: List of strings, each string represents an SGR value.\ne.g. 'fg_blue', 'bg_yellow'\n\nReturns:\nThe ANSI escape sequence.\n\nRaises:\nValueError: if a member of command_list does not map to a valid SGR value.", "source": "codesearchnet"}
{"code": "def generate_dequeue_op(self, tpu_device=0):\n    self.freeze()\n    if self._generated_dequeue_op and (not ops.inside_function()):\n        raise ValueError(\"Can't generate two dequeue Ops from the same queue\")\n    self._generated_dequeue_op = True\n    full_name = '%s/dequeue' % self._name\n    sharded_shapes = [policy.get_unpartitioned_shape(policy.get_sharded_shape(shape)) for shape, policy in zip(self._tuple_shapes, self._sharding_policies)]\n    if tpu_device is not None:\n        with ops.device(tpu_name_util.core(tpu_device)):\n            dequeue_op = tpu_ops.infeed_dequeue_tuple(dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)\n    else:\n        dequeue_op = tpu_ops.infeed_dequeue_tuple(dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)\n    if self._number_of_partitions <= 1:\n        return dequeue_op\n    partitions = [policy.get_unpartitioned_shape([1] * shape.ndims).as_list() for shape, policy in zip(self._tuple_shapes, self._sharding_policies)]\n    return tag_sharding_attribute_for_dequeued_tensors(dequeue_op, partitions)", "docstring": "Generates the device-side Op to dequeue a tuple from the queue.\n\nImplicitly freezes the queue configuration if it is not already\nfrozen, which will raise errors if the shapes and types have not\nbeen fully specified.\n\nArgs:\ntpu_device: The TPU device ordinal where the infeed instruction should be\nplaced. If None, no explicit placement will be performed, and it is up\nto the user to call this API from within a proper TPU device scope.\nThe XLA code will fail if the TPU dequeue instruction is not bound to\nany device.\n\nReturns:\nA list of Outputs corresponding to a shard of infeed dequeued\ninto XLA, suitable for use within a replicated block.\n\nRaises:\nValueError: if the types or shapes of the tuple elements have not been\nset; or if a dequeue op has already been generated.", "source": "github-repos"}
{"code": "def extract_archive(archive_path, dest):\n    if (not os.path.isdir(dest)):\n        os.makedirs(dest)\n    try:\n        tmpfolder = None\n        if ((not tf.gfile.Exists(archive_path)) or tf.gfile.IsDirectory(archive_path)):\n            raise ValueError(('archive path %s is not a file' % archive_path))\n        if archive_path.startswith('gs:\n            tmpfolder = tempfile.mkdtemp()\n            cmd_args = ['gsutil', 'cp', archive_path, tmpfolder]\n            _shell_process.run_and_monitor(cmd_args, os.getpid())\n            archive_path = os.path.join(tmpfolder, os.path.name(archive_path))\n        if archive_path.lower().endswith('.tar.gz'):\n            flags = '-xzf'\n        elif archive_path.lower().endswith('.tar'):\n            flags = '-xf'\n        else:\n            raise ValueError('Only tar.gz or tar.Z files are supported.')\n        cmd_args = ['tar', flags, archive_path, '-C', dest]\n        _shell_process.run_and_monitor(cmd_args, os.getpid())\n    finally:\n        if tmpfolder:\n            shutil.rmtree(tmpfolder)", "docstring": "Extract a local or GCS archive file to a folder.\n\nArgs:\narchive_path: local or gcs path to a *.tar.gz or *.tar file\ndest: local folder the archive will be extracted to", "source": "codesearchnet"}
{"code": "def _get_api_call(self, function_name, *args):\n    api_call = (dedent('\\n        var done = arguments[0];\\n        KindleAPI.%(api_call)s(%(args)s).always(function(a) {\\n            done(a);\\n        });\\n    ') % {'api_call': function_name, 'args': ', '.join(args)})\n    script = '\\n'.join((api.API_SCRIPT, api_call))\n    try:\n        return self._browser.execute_async_script(script)\n    except TimeoutException:\n        raise APIError", "docstring": "Runs an api call with javascript-formatted arguments.\n\nArgs:\nfunction_name: The name of the KindleAPI call to run.\n*args: Javascript-formatted arguments to pass to the API call.\n\nReturns:\nThe result of the API call.\n\nRaises:\nAPIError: If the API call fails or times out.", "source": "codesearchnet"}
{"code": "def _build(self):\n    flat_initial_state = nest.flatten(self._initial_state)\n    if (self._mask is not None):\n        flat_mask = nest.flatten(self._mask)\n        flat_learnable_state = [_single_learnable_state(state, state_id=i, learnable=mask) for (i, (state, mask)) in enumerate(zip(flat_initial_state, flat_mask))]\n    else:\n        flat_learnable_state = [_single_learnable_state(state, state_id=i) for (i, state) in enumerate(flat_initial_state)]\n    return nest.pack_sequence_as(structure=self._initial_state, flat_sequence=flat_learnable_state)", "docstring": "Connects the module to the graph.\n\nReturns:\nThe learnable state, which has the same type, structure and shape as\nthe `initial_state` passed to the constructor.", "source": "codesearchnet"}
{"code": "def setup_remoteckan(self, remoteckan=None, **kwargs):\n        \n        \n        if remoteckan is None:\n            self._remoteckan = self.create_remoteckan(self.get_hdx_site_url(), full_agent=self.get_user_agent(),\n                                                      **kwargs)\n        else:\n            self._remoteckan = remoteckan", "docstring": "Set up remote CKAN from provided CKAN or by creating from configuration\n\nArgs:\nremoteckan (Optional[ckanapi.RemoteCKAN]): CKAN instance. Defaults to setting one up from configuration.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def set_hostname(hostname=None):\n    \n    if not hostname:\n        raise salt.exceptions.CommandExecutionError(\"Hostname option must be provided.\")\n\n    dn = \"sys/rack-unit-1/mgmt/if-1\"\n    inconfig = .format(hostname)\n\n    ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False)\n\n    try:\n        if ret['outConfig']['mgmtIf'][0]['status'] == 'modified':\n            return True\n        else:\n            return False\n    except Exception as err:\n        return False", "docstring": "Sets the hostname on the server.\n\n.. versionadded:: 2019.2.0\n\nArgs:\nhostname(str): The new hostname to set.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' cimc.set_hostname foobar", "source": "juraj-google-style"}
{"code": "def power(self, n):\n        \n        if not isinstance(n, (int, np.integer)):\n            raise QiskitError(\"Can only power with integer powers.\")\n        if self._input_dim != self._output_dim:\n            raise QiskitError(\"Can only power with input_dim = output_dim.\")\n        \n        \n        return SuperOp(\n            np.linalg.matrix_power(self._data, n), self.input_dims(),\n            self.output_dims())", "docstring": "Return the compose of a QuantumChannel with itself n times.\n\nArgs:\nn (int): compute the matrix power of the superoperator matrix.\n\nReturns:\nSuperOp: the n-times composition channel as a SuperOp object.\n\nRaises:\nQiskitError: if the input and output dimensions of the\nQuantumChannel are not equal, or the power is not an integer.", "source": "juraj-google-style"}
{"code": "def _generate_security_groups(config_key):\n    \n    raw_default_groups = validate_key_values(CONFIG, 'base', config_key, default='')\n    default_groups = _convert_string_to_native(raw_default_groups)\n    LOG.debug('Default security group for %s is %s', config_key, default_groups)\n\n    entries = {}\n    for env in ENVS:\n        entries[env] = []\n\n    if isinstance(default_groups, (list)):\n        groups = _remove_empty_entries(default_groups)\n        for env in entries:\n            entries[env] = groups\n    elif isinstance(default_groups, (dict)):\n        entries.update(default_groups)\n\n    LOG.debug('Generated security group: %s', entries)\n    return entries", "docstring": "Read config file and generate security group dict by environment.\n\nArgs:\nconfig_key (str): Configuration file key\n\nReturns:\ndict: of environments in {'env1': ['group1', 'group2']} format", "source": "juraj-google-style"}
{"code": "def export(self, top=True):\n        \n        out = []\n        if top:\n            out.append(self._internal_name)\n        out.append(self._to_str(self.ground_temperature_depth))\n        out.append(self._to_str(self.depth_soil_conductivity))\n        out.append(self._to_str(self.depth_soil_density))\n        out.append(self._to_str(self.depth_soil_specific_heat))\n        out.append(self._to_str(self.depth_january_average_ground_temperature))\n        out.append(\n            self._to_str(\n                self.depth_february_average_ground_temperature))\n        out.append(self._to_str(self.depth_march_average_ground_temperature))\n        out.append(self._to_str(self.depth_april_average_ground_temperature))\n        out.append(self._to_str(self.depth_may_average_ground_temperature))\n        out.append(self._to_str(self.depth_june_average_ground_temperature))\n        out.append(self._to_str(self.depth_july_average_ground_temperature))\n        out.append(self._to_str(self.depth_august_average_ground_temperature))\n        out.append(\n            self._to_str(\n                self.depth_september_average_ground_temperature))\n        out.append(self._to_str(self.depth_october_average_ground_temperature))\n        out.append(\n            self._to_str(\n                self.depth_november_average_ground_temperature))\n        out.append(\n            self._to_str(\n                self.depth_december_average_ground_temperature))\n        return \",\".join(out)", "docstring": "Exports object to its string representation.\n\nArgs:\ntop (bool):  if True appends `internal_name` before values.\nAll non list objects should be exported with value top=True,\nall list objects, that are embedded in as fields inlist objects\nshould be exported with `top`=False\n\nReturns:\nstr: The objects string representation", "source": "juraj-google-style"}
{"code": "def add(self, watch_key, tensor_value):\n    \n    if watch_key not in self._tensor_data:\n      self._tensor_data[watch_key] = _WatchStore(\n          watch_key,\n          mem_bytes_limit=self._watch_mem_bytes_limit)\n    self._tensor_data[watch_key].add(tensor_value)", "docstring": "Add a tensor value.\n\nArgs:\nwatch_key: A string representing the debugger tensor watch, e.g.,\n'Dense_1/BiasAdd:0:DebugIdentity'.\ntensor_value: The value of the tensor as a numpy.ndarray.", "source": "juraj-google-style"}
{"code": "def genCaCert(self, name, signas=None, outp=None, save=True):\n        \n        pkey, cert = self._genBasePkeyCert(name)\n        ext0 = crypto.X509Extension(b'basicConstraints', False, b'CA:TRUE')\n        cert.add_extensions([ext0])\n\n        if signas is not None:\n            self.signCertAs(cert, signas)\n        else:\n            self.selfSignCert(cert, pkey)\n\n        if save:\n\n            keypath = self._savePkeyTo(pkey, 'cas', '%s.key' % name)\n            if outp is not None:\n                outp.printf('key saved: %s' % (keypath,))\n\n            crtpath = self._saveCertTo(cert, 'cas', '%s.crt' % name)\n            if outp is not None:\n                outp.printf('cert saved: %s' % (crtpath,))\n\n        return pkey, cert", "docstring": "Generates a CA keypair.\n\nArgs:\nname (str): The name of the CA keypair.\nsignas (str): The CA keypair to sign the new CA with.\noutp (synapse.lib.output.Output): The output buffer.\n\nExamples:\nMake a CA named \"myca\":\n\nmycakey, mycacert = cdir.genCaCert('myca')\n\nReturns:\n((OpenSSL.crypto.PKey, OpenSSL.crypto.X509)): Tuple containing the private key and certificate objects.", "source": "juraj-google-style"}
{"code": "def get_dependency_definitions(self, url: str) -> List[_StructDefT]:\n    dependencies: Dict[str, _StructDefT] = {}\n    urls_to_load: List[str] = [url]\n    while urls_to_load:\n        url_to_load = urls_to_load.pop()\n        base_definition = self.get_structure_definition(url_to_load)\n        for elem in base_definition.snapshot.element:\n            for elem_type in elem.type:\n                type_name = elem_type.code.value\n                if _fhir_path_data_types.primitive_type_from_type_code(type_name) is None and type_name not in dependencies:\n                    child_struct = self.get_structure_definition(type_name)\n                    dependencies[type_name] = child_struct\n                    urls_to_load.append(child_struct.url.value)\n    return list(dependencies.values())", "docstring": "Returns all dependencies for the structure identified by the given URL.\n\nArgs:\nurl: The URL identifying the FHIR StructureDefinition to load dependencies\nfor.\n\nReturns:\nThe structure definitions depended on by the above URL.\n\nRaises:\nUnableToLoadResourceError if the resource cannot be loaded.", "source": "github-repos"}
{"code": "def load_text_file(self, filename, encoding=\"utf-8\", tokenizer=None):\n        \n        with load_file(filename, encoding=encoding) as data:\n            self.load_text(data, tokenizer)", "docstring": "Load in a text file from which to generate a word frequency list\n\nArgs:\nfilename (str): The filepath to the text file to be loaded\nencoding (str): The encoding of the text file\ntokenizer (function): The function to use to tokenize a string", "source": "juraj-google-style"}
{"code": "def EnforceLimits(self, client_id, user, flow_name, flow_args, token=None):\n    if ((not self.dup_interval) and (not self.daily_req_limit)):\n        return\n    now = rdfvalue.RDFDatetime.Now()\n    yesterday = (now - rdfvalue.Duration('1d'))\n    dup_boundary = (now - self.dup_interval)\n    min_create_time = min(yesterday, dup_boundary)\n    flow_count = 0\n    flows = self._LoadFlows(client_id, min_create_time, token=token)\n    if (flow_args is None):\n        flow_args = flow.EmptyFlowArgs()\n    for flow_obj in flows:\n        if ((flow_obj.create_time > dup_boundary) and (flow_obj.flow_class_name == flow_name) and (flow_obj.args == flow_args)):\n            raise DuplicateFlowError(('Identical %s already run on %s at %s' % (flow_name, client_id, flow_obj.create_time)), flow_id=flow_obj.flow_id)\n        if ((flow_obj.creator == user) and (flow_obj.create_time > yesterday)):\n            flow_count += 1\n    if (self.daily_req_limit and (flow_count >= self.daily_req_limit)):\n        raise DailyFlowRequestLimitExceededError(('%s flows run since %s, limit: %s' % (flow_count, yesterday, self.daily_req_limit)))", "docstring": "Enforce DailyFlowRequestLimit and FlowDuplicateInterval.\n\nLook at the flows that have run on this client recently and check\nwe aren't exceeding our limits. Raises if limits will be exceeded by running\nthe specified flow.\n\nArgs:\nclient_id: client URN\nuser: username string\nflow_name: flow name string\nflow_args: flow args rdfvalue for the flow being launched\ntoken: acl token\n\nRaises:\nDailyFlowRequestLimitExceededError: if the user has already run\nAPI.DailyFlowRequestLimit on this client in the previous 24h.\nDuplicateFlowError: an identical flow was run on this machine by a user\nwithin the API.FlowDuplicateInterval", "source": "codesearchnet"}
{"code": "def extra(name: str, desc: str) -> Callable:\n\n    def attr_dec(f):\n        f.__setattr__('extra_fn', True)\n        f.__setattr__('name', name)\n        f.__setattr__('desc', desc)\n        return f\n    return attr_dec", "docstring": "Decorator for slave channel's \"additional features\" interface.\n\nArgs:\nname (str): A human readable name for the function.\ndesc (str): A short description and usage of it. Use\n``{function_name}`` in place of the function name\nin the description.\n\nReturns:\nThe decorated method.", "source": "codesearchnet"}
{"code": "def index_buffer(self, buffer, index_element_size=4):\n        \n        if not type(buffer) in [moderngl.Buffer, numpy.ndarray, bytes]:\n            raise VAOError(\"buffer parameter must be a moderngl.Buffer, numpy.ndarray or bytes instance\")\n\n        if isinstance(buffer, numpy.ndarray):\n            buffer = self.ctx.buffer(buffer.tobytes())\n\n        if isinstance(buffer, bytes):\n            buffer = self.ctx.buffer(data=buffer)\n\n        self._index_buffer = buffer\n        self._index_element_size = index_element_size", "docstring": "Set the index buffer for this VAO\n\nArgs:\nbuffer: ``moderngl.Buffer``, ``numpy.array`` or ``bytes``\n\nKeyword Args:\nindex_element_size (int): Byte size of each element. 1, 2 or 4", "source": "juraj-google-style"}
{"code": "def get_message(self, message_id):\n    for message in self.messages:\n        if (message.id == message_id):\n            return message\n    raise ArgumentError('Message ID not found', message_id=message_id)", "docstring": "Get a message by its persistent id.\n\nArgs:\nmessage_id (int): The id of the message that we're looking for", "source": "codesearchnet"}
{"code": "def _MergeEntities(self, a, b):\n\n    def _MergeAgencyId(a_agency_id, b_agency_id):\n        \"Merge two agency ids.\\n\\n      The only difference between this and _MergeIdentical() is that the values\\n      None and '' are regarded as being the same.\\n\\n      Args:\\n        a_agency_id: The first agency id.\\n        b_agency_id: The second agency id.\\n\\n      Returns:\\n        The merged agency id.\\n\\n      Raises:\\n        MergeError: The agency ids could not be merged.\\n      \"\n        a_agency_id = (a_agency_id or None)\n        b_agency_id = (b_agency_id or None)\n        return self._MergeIdentical(a_agency_id, b_agency_id)\n    scheme = {'agency_id': _MergeAgencyId, 'agency_name': self._MergeIdentical, 'agency_url': self._MergeIdentical, 'agency_timezone': self._MergeIdentical}\n    return self._SchemedMerge(scheme, a, b)", "docstring": "Merges two agencies.\n\nTo be merged, they are required to have the same id, name, url and\ntimezone. The remaining language attribute is taken from the new agency.\n\nArgs:\na: The first agency.\nb: The second agency.\n\nReturns:\nThe merged agency.\n\nRaises:\nMergeError: The agencies could not be merged.", "source": "codesearchnet"}
{"code": "def total_cost_function(self, item_a, item_b, time_a, time_b):\n        \n        distances = np.zeros(len(self.weights))\n        for c, component in enumerate(self.cost_function_components):\n            distances[c] = component(item_a, time_a, item_b, time_b, self.max_values[c])\n        total_distance = np.sum(self.weights * distances)\n        return total_distance", "docstring": "Calculate total cost function between two items.\n\nArgs:\nitem_a: STObject\nitem_b: STObject\ntime_a: Timestep in item_a at which cost function is evaluated\ntime_b: Timestep in item_b at which cost function is evaluated\n\nReturns:\nThe total weighted distance between item_a and item_b", "source": "juraj-google-style"}
{"code": "def object_hook(self, object_dict):\n    instance = self.decoder(object_dict)\n    self.condition_list.append(instance)\n    self.index += 1\n    return self.index", "docstring": "Hook which when passed into a json.JSONDecoder will replace each dict\nin a json string with its index and convert the dict to an object as defined\nby the passed in condition_decoder. The newly created condition object is\nappended to the conditions_list.\n\nArgs:\nobject_dict: Dict representing an object.\n\nReturns:\nAn index which will be used as the placeholder in the condition_structure", "source": "codesearchnet"}
{"code": "def _get_augmented_label_matrix(self, L, higher_order=False):\n    self.c_data = {}\n    for i in range(self.m):\n        self.c_data[i] = {'start_index': (i * self.k), 'end_index': ((i + 1) * self.k), 'max_cliques': set([j for j in self.c_tree.nodes() if (i in self.c_tree.node[j]['members'])])}\n    L_ind = self._create_L_ind(L)\n    if higher_order:\n        L_aug = np.copy(L_ind)\n        for item in chain(self.c_tree.nodes(), self.c_tree.edges()):\n            if isinstance(item, int):\n                C = self.c_tree.node[item]\n                C_type = 'node'\n            elif isinstance(item, tuple):\n                C = self.c_tree[item[0]][item[1]]\n                C_type = 'edge'\n            else:\n                raise ValueError(item)\n            members = list(C['members'])\n            nc = len(members)\n            if (nc == 1):\n                C['start_index'] = (members[0] * self.k)\n                C['end_index'] = ((members[0] + 1) * self.k)\n            else:\n                L_C = np.ones((self.n, (self.k ** nc)))\n                for (i, vals) in enumerate(product(range(self.k), repeat=nc)):\n                    for (j, v) in enumerate(vals):\n                        L_C[(:, i)] *= L_ind[(:, ((members[j] * self.k) + v))]\n                if (L_aug is not None):\n                    C['start_index'] = L_aug.shape[1]\n                    C['end_index'] = (L_aug.shape[1] + L_C.shape[1])\n                    L_aug = np.hstack([L_aug, L_C])\n                else:\n                    C['start_index'] = 0\n                    C['end_index'] = L_C.shape[1]\n                    L_aug = L_C\n                id = (tuple(members) if (len(members) > 1) else members[0])\n                self.c_data[id] = {'start_index': C['start_index'], 'end_index': C['end_index'], 'max_cliques': (set([item]) if (C_type == 'node') else set(item))}\n        return L_aug\n    else:\n        return L_ind", "docstring": "Returns an augmented version of L where each column is an indicator\nfor whether a certain source or clique of sources voted in a certain\npattern.\n\nArgs:\nL: An [n,m] scipy.sparse label matrix with values in {0,1,...,k}", "source": "codesearchnet"}
{"code": "def check_file(self, fs, info):\n        \n        \n\n        if self.exclude is not None and fs.match(self.exclude, info.name):\n            return False\n        return fs.match(self.filter, info.name)", "docstring": "Check if a filename should be included.\n\nOverride to exclude files from the walk.\n\nArguments:\nfs (FS): A filesystem instance.\ninfo (Info): A resource info object.\n\nReturns:\nbool: `True` if the file should be included.", "source": "juraj-google-style"}
{"code": "def shrink(script, iterations=1):\n    \n    filter_xml = '  <filter name=\"Erode Selection\"/>\\n'\n    for _ in range(iterations):\n        util.write_filter(script, filter_xml)\n    return None", "docstring": "Shrink (erode, reduce) the current set of selected faces\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter to.\niterations (int): the number of times to shrink the selection.\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "juraj-google-style"}
{"code": "def strace_clear(self, handle):\n    data = ctypes.c_int(handle)\n    res = self._dll.JLINK_STRACE_Control(enums.JLinkStraceCommand.TRACE_EVENT_CLR, ctypes.byref(data))\n    if (res < 0):\n        raise errors.JLinkException('Failed to clear STRACE event.')\n    return None", "docstring": "Clears the trace event specified by the given handle.\n\nArgs:\nself (JLink): the ``JLink`` instance.\nhandle (int): handle of the trace event.\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error.", "source": "codesearchnet"}
{"code": "def create_redis_client(redis_address, password=None):\n    (redis_ip_address, redis_port) = redis_address.split(':')\n    return redis.StrictRedis(host=redis_ip_address, port=int(redis_port), password=password)", "docstring": "Create a Redis client.\n\nArgs:\nThe IP address, port, and password of the Redis server.\n\nReturns:\nA Redis client.", "source": "codesearchnet"}
{"code": "def _GetDayOfYear(self, year, month, day_of_month):\n    if (month not in range(1, 13)):\n        raise ValueError('Month value out of bounds.')\n    days_per_month = self._GetDaysPerMonth(year, month)\n    if ((day_of_month < 1) or (day_of_month > days_per_month)):\n        raise ValueError('Day of month value out of bounds.')\n    day_of_year = day_of_month\n    for past_month in range(1, month):\n        day_of_year += self._GetDaysPerMonth(year, past_month)\n    return day_of_year", "docstring": "Retrieves the day of the year for a specific day of a month in a year.\n\nArgs:\nyear (int): year e.g. 1970.\nmonth (int): month, where 1 represents January.\nday_of_month (int): day of the month, where 1 represents the first day.\n\nReturns:\nint: day of year.\n\nRaises:\nValueError: if the month or day of month value is out of bounds.", "source": "codesearchnet"}
{"code": "def _broadcast_arg(U, arg, argtype, name):\n    if ((arg is None) or isinstance(arg, argtype)):\n        return [arg for _ in range(U.ndim)]\n    elif np.iterable(arg):\n        if (len(arg) != U.ndim):\n            raise ValueError('Parameter {} was specified as a sequence of incorrect length. The length must match the number of tensor dimensions (U.ndim={})'.format(name, U.ndim))\n        elif (not all([isinstance(a, argtype) for a in arg])):\n            raise TypeError('Parameter {} specified as a sequence of incorrect type. Expected {}.'.format(name, argtype))\n        else:\n            return arg\n    else:\n        raise TypeError('Parameter {} specified as a {}. Expected {}.'.format(name, type(arg), argtype))", "docstring": "Broadcasts plotting option `arg` to all factors.\n\nArgs:\nU : KTensor\narg : argument provided by the user\nargtype : expected type for arg\nname : name of the variable, used for error handling\n\nReturns:\niterable version of arg of length U.ndim", "source": "codesearchnet"}
{"code": "def sqrt(x):\n    zero = _constant_to_tensor(0.0, x.dtype.base_dtype)\n    x = math_ops.maximum(x, zero)\n    return math_ops.sqrt(x)", "docstring": "Element-wise square root.\n\nThis function clips negative tensor values to 0 before computing the\nsquare root.\n\nArgs:\nx: Tensor or variable.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def FindFileContainingSymbol(self, symbol):\n    \n\n    symbol = _NormalizeFullyQualifiedName(symbol)\n    try:\n      return self._descriptors[symbol].file\n    except KeyError:\n      pass\n\n    try:\n      return self._enum_descriptors[symbol].file\n    except KeyError:\n      pass\n\n    try:\n      file_proto = self._internal_db.FindFileContainingSymbol(symbol)\n    except KeyError as error:\n      if self._descriptor_db:\n        file_proto = self._descriptor_db.FindFileContainingSymbol(symbol)\n      else:\n        raise error\n    if not file_proto:\n      raise KeyError('Cannot find a file containing %s' % symbol)\n    return self._ConvertFileProtoToFileDescriptor(file_proto)", "docstring": "Gets the FileDescriptor for the file containing the specified symbol.\n\nArgs:\nsymbol: The name of the symbol to search for.\n\nReturns:\nA FileDescriptor that contains the specified symbol.\n\nRaises:\nKeyError: if the file can not be found in the pool.", "source": "juraj-google-style"}
{"code": "def combs(a, r):\n    \n    \n    if r == 0:\n        return np.asarray([])\n\n    a = np.asarray(a)\n    data_type = a.dtype if r == 0 else np.dtype([('', a.dtype)] * r)\n    b = np.fromiter(combinations(a, r), data_type)\n    return b.view(a.dtype).reshape(-1, r)", "docstring": "NumPy implementation of ``itertools.combinations``.\n\nReturn successive ``r``-length combinations of elements in the array ``a``.\n\nArgs:\na (np.ndarray): The array from which to get combinations.\nr (int): The length of the combinations.\n\nReturns:\nnp.ndarray: An array of combinations.", "source": "juraj-google-style"}
{"code": "def ms_bot_framework(self) -> dict:\n    rich_card = {}\n    buttons = [button.ms_bot_framework() for button in self.content]\n    rich_card['buttons'] = buttons\n    if self.text:\n        rich_card['title'] = self.text\n    attachments = [{'contentType': 'application/vnd.microsoft.card.thumbnail', 'content': rich_card}]\n    out_activity = {}\n    out_activity['type'] = 'message'\n    out_activity['attachments'] = attachments\n    return out_activity", "docstring": "Returns MS Bot Framework compatible state of the ButtonsFrame instance.\n\nCreating MS Bot Framework activity blank with RichCard in \"attachments\". RichCard\nis populated with CardActions corresponding buttons embedded in ButtonsFrame.\n\nReturns:\ncontrol_json: MS Bot Framework representation of ButtonsFrame state.", "source": "codesearchnet"}
{"code": "def filter(self, scored_list):\n    if (len(scored_list) > 0):\n        avg = np.mean([s[1] for s in scored_list])\n        std = np.std([s[1] for s in scored_list])\n    else:\n        avg = 0\n        std = 0\n    limiter = (avg + (0.5 * std))\n    mean_scored = [(sent_idx, score) for (sent_idx, score) in scored_list if (score > limiter)]\n    return mean_scored", "docstring": "Filtering with std.\n\nArgs:\nscored_list:    The list of scoring.\n\nRetruns:\nThe list of filtered result.", "source": "codesearchnet"}
{"code": "def compress_artifact_if_supported(artifact_path):\n    \n    content_type, encoding = guess_content_type_and_encoding(artifact_path)\n    log.debug('\"{}\" is encoded with \"{}\" and has mime/type \"{}\"'.format(artifact_path, encoding, content_type))\n\n    if encoding is None and content_type in _GZIP_SUPPORTED_CONTENT_TYPE:\n        log.info('\"{}\" can be gzip\\'d. Compressing...'.format(artifact_path))\n        with open(artifact_path, 'rb') as f_in:\n            text_content = f_in.read()\n\n        with gzip.open(artifact_path, 'wb') as f_out:\n            f_out.write(text_content)\n\n        encoding = 'gzip'\n        log.info('\"{}\" compressed'.format(artifact_path))\n    else:\n        log.debug('\"{}\" is not supported for compression.'.format(artifact_path))\n\n    return content_type, encoding", "docstring": "Compress artifacts with GZip if they're known to be supported.\n\nThis replaces the artifact given by a gzip binary.\n\nArgs:\nartifact_path (str): the path to compress\n\nReturns:\ncontent_type, content_encoding (tuple):  Type and encoding of the file. Encoding equals 'gzip' if compressed.", "source": "juraj-google-style"}
{"code": "def from_service_account_file(cls, filename, **kwargs):\n    (info, signer) = _service_account_info.from_filename(filename, require=['client_email', 'token_uri'])\n    return cls._from_signer_and_info(signer, info, **kwargs)", "docstring": "Creates a Credentials instance from a service account json file.\n\nArgs:\nfilename (str): The path to the service account json file.\nkwargs: Additional arguments to pass to the constructor.\n\nReturns:\ngoogle.auth.service_account.Credentials: The constructed\ncredentials.", "source": "codesearchnet"}
{"code": "def set_parameter(self, name, value):\n    i = self.get_parameter_names(include_frozen=True).index(name)\n    v = self.get_parameter_vector(include_frozen=True)\n    v[i] = value\n    self.set_parameter_vector(v, include_frozen=True)", "docstring": "Set a parameter value by name\n\nArgs:\nname: The name of the parameter\nvalue (float): The new value for the parameter", "source": "codesearchnet"}
{"code": "def get_attribute(self, attribute: str) -> 'Node':\n        \n        matches = [\n            value_node for key_node, value_node in self.yaml_node.value\n            if key_node.value == attribute\n        ]\n        if len(matches) != 1:\n            raise SeasoningError(\n                'Attribute not found, or found multiple times: {}'.format(\n                    matches))\n        return Node(matches[0])", "docstring": "Returns the node representing the given attribute's value.\n\nUse only if is_mapping() returns true.\n\nArgs:\nattribute: The name of the attribute to retrieve.\n\nRaises:\nKeyError: If the attribute does not exist.\n\nReturns:\nA node representing the value.", "source": "juraj-google-style"}
{"code": "def add_observers(self, count, date_observed):\n    if (not self.can_update()):\n        self._tcex.handle_error(910, [self.type])\n    data = {'count': count, 'dataObserved': self._utils.format_datetime(date_observed, date_format='%Y-%m-%dT%H:%M:%SZ')}\n    return self.tc_requests.add_observations(self.api_type, self.api_sub_type, self.unique_id, data, owner=self.owner)", "docstring": "Adds a Indicator Observation\n\nArgs:\ncount:\ndate_observed:", "source": "codesearchnet"}
{"code": "def assert_non_singular(self, name='assert_non_singular'):\n    with self._name_scope(name):\n        return self._assert_non_singular()", "docstring": "Returns an `Op` that asserts this operator is non singular.\n\nThis operator is considered non-singular if\n\n```\nConditionNumber < max{100, range_dimension, domain_dimension} * eps,\neps := np.finfo(self.dtype.as_numpy_dtype).eps\n```\n\nArgs:\nname:  A string name to prepend to created ops.\n\nReturns:\nAn `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if\nthe operator is singular.", "source": "github-repos"}
{"code": "def get_keys(self, alias_name, key_format):\n    uri = ((((self.URI + '/keys/') + alias_name) + '?format=') + key_format)\n    return self._client.get(uri)", "docstring": "Retrieves the contents of PKCS12 file in the format specified.\nThis PKCS12 formatted file contains both the certificate as well as the key file data.\nValid key formats are Base64 and PKCS12.\n\nArgs:\nalias_name: Key pair associated with the RabbitMQ\nkey_format: Valid key formats are Base64 and PKCS12.\nReturns:\ndict: RabbitMQ certificate", "source": "codesearchnet"}
{"code": "def from_poscar_string(poscar_string, transformations=None):\n        \n        p = Poscar.from_string(poscar_string)\n        if not p.true_names:\n            raise ValueError(\"Transformation can be craeted only from POSCAR \"\n                             \"strings with proper VASP5 element symbols.\")\n        raw_string = re.sub(r\"'\", \"\\\"\", poscar_string)\n        s = p.structure\n        source_info = {\"source\": \"POSCAR\",\n                       \"datetime\": str(datetime.datetime.now()),\n                       \"original_file\": raw_string}\n        return TransformedStructure(s, transformations, history=[source_info])", "docstring": "Generates TransformedStructure from a poscar string.\n\nArgs:\nposcar_string (str): Input POSCAR string.\ntransformations ([Transformations]): Sequence of transformations\nto be applied to the input structure.", "source": "juraj-google-style"}
{"code": "def header_string_from_file(filename='feff.inp'):\n    with zopen(filename, 'r') as fobject:\n        f = fobject.readlines()\n        feff_header_str = []\n        ln = 0\n        try:\n            feffpmg = f[0].find('pymatgen')\n        except IndexError:\n            feffpmg = False\n        if feffpmg:\n            nsites = int(f[8].split()[2])\n            for line in f:\n                ln += 1\n                if (ln <= (nsites + 9)):\n                    feff_header_str.append(line)\n        else:\n            end = 0\n            for line in f:\n                if (((line[0] == '*') or (line[0] == 'T')) and (end == 0)):\n                    feff_header_str.append(line.replace('\\r', ''))\n                else:\n                    end = 1\n    return ''.join(feff_header_str)", "docstring": "Reads Header string from either a HEADER file or feff.inp file\nWill also read a header from a non-pymatgen generated feff.inp file\n\nArgs:\nfilename: File name containing the Header data.\n\nReturns:\nReads header string.", "source": "codesearchnet"}
{"code": "def _load_hdf5(self, filename, parent_level=\"CellpyData\"):\n        \n\n        if not os.path.isfile(filename):\n            self.logger.info(f\"file does not exist: {filename}\")\n            raise IOError\n        store = pd.HDFStore(filename)\n\n        \n        required_keys = ['dfdata', 'dfsummary', 'info']\n        required_keys = [\"/\" + parent_level + \"/\" + _ for _ in required_keys]\n\n        for key in required_keys:\n            if key not in store.keys():\n                self.logger.info(f\"This hdf-file is not good enough - \"\n                                 f\"at least one key is missing: {key}\")\n                raise Exception(f\"OH MY GOD! At least one crucial key\"\n                                f\"is missing {key}!\")\n\n        self.logger.debug(f\"Keys in current hdf5-file: {store.keys()}\")\n        data = DataSet()\n\n        if parent_level != \"CellpyData\":\n            self.logger.debug(\"Using non-default parent label for the \"\n                              \"hdf-store: {}\".format(parent_level))\n\n        \n        infotable = store.select(parent_level + \"/info\")\n        try:\n            data.cellpy_file_version = \\\n                self._extract_from_dict(infotable, \"cellpy_file_version\")\n        except Exception as e:\n            data.cellpy_file_version = 0\n            warnings.warn(f\"Unhandled exception raised: {e}\")\n\n        if data.cellpy_file_version < MINIMUM_CELLPY_FILE_VERSION:\n            raise WrongFileVersion\n\n        if data.cellpy_file_version > CELLPY_FILE_VERSION:\n            raise WrongFileVersion\n\n        data.dfsummary = store.select(parent_level + \"/dfsummary\")\n        data.dfdata = store.select(parent_level + \"/dfdata\")\n\n        try:\n            data.step_table = store.select(parent_level + \"/step_table\")\n        except Exception as e:\n            self.logging.debug(\"could not get step_table from cellpy-file\")\n            data.step_table = pd.DataFrame()\n            warnings.warn(f\"Unhandled exception raised: {e}\")\n\n        try:\n            fidtable = store.select(\n                parent_level + \"/fidtable\")  \n            \n            fidtable_selected = True\n        except Exception as e:\n            self.logging.debug(\"could not get fid-table from cellpy-file\")\n            fidtable = []\n\n            warnings.warn(\"no fidtable - you should update your hdf5-file\")\n            fidtable_selected = False\n        self.logger.debug(\"  h5\")\n        \n\n        newtests = []  \n\n        \n        \n        \n\n        data = self._load_infotable(data, infotable, filename)\n\n        if fidtable_selected:\n            data.raw_data_files, data.raw_data_files_length = \\\n                self._convert2fid_list(fidtable)\n        else:\n            data.raw_data_files = None\n            data.raw_data_files_length = None\n        newtests.append(data)\n        store.close()\n        \n        return newtests", "docstring": "Load a cellpy-file.\n\nArgs:\nfilename (str): Name of the cellpy file.\nparent_level (str) (optional): name of the parent level\n(defaults to \"CellpyData\")\n\nReturns:\nloaded datasets (DataSet-object)", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.ListAlertPolicies = channel.unary_unary(\n            \"/google.monitoring.v3.AlertPolicyService/ListAlertPolicies\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2.ListAlertPoliciesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2.ListAlertPoliciesResponse.FromString,\n        )\n        self.GetAlertPolicy = channel.unary_unary(\n            \"/google.monitoring.v3.AlertPolicyService/GetAlertPolicy\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2.GetAlertPolicyRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2.AlertPolicy.FromString,\n        )\n        self.CreateAlertPolicy = channel.unary_unary(\n            \"/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2.CreateAlertPolicyRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2.AlertPolicy.FromString,\n        )\n        self.DeleteAlertPolicy = channel.unary_unary(\n            \"/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2.DeleteAlertPolicyRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n        self.UpdateAlertPolicy = channel.unary_unary(\n            \"/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__service__pb2.UpdateAlertPolicyRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_alert__pb2.AlertPolicy.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def projection_error(nodes, projected):\n    relative_err = np.linalg.norm((nodes - projected), ord='fro')\n    if (relative_err != 0.0):\n        relative_err /= np.linalg.norm(nodes, ord='fro')\n    return relative_err", "docstring": "Compute the error between ``nodes`` and the projected nodes.\n\n.. note::\n\nThis is a helper for :func:`maybe_reduce`, which is in turn a helper\nfor :func:`_full_reduce`. Hence there is no corresponding Fortran\nspeedup.\n\nFor now, just compute the relative error in the Frobenius norm. But,\nwe may wish to consider the error per row / point instead.\n\nArgs:\nnodes (numpy.ndarray): Nodes in a curve.\nprojected (numpy.ndarray): The ``nodes`` projected into the\nspace of degree-elevated nodes.\n\nReturns:\nfloat: The relative error.", "source": "codesearchnet"}
{"code": "def expert_dot_product(q, k, v, info_q, info_k):\n    length_q = common_layers.shape_list(q)[0]\n    length_k = common_layers.shape_list(k)[0]\n    depth_v = v.get_shape().as_list()[(- 1)]\n    bias = attention_bias_coordinates(info_q.coordinates, info_k.coordinates)\n    if (info_k.order is not None):\n        bias += attention_bias_future(info_q.order, info_k.order)\n    (q, k, v) = [tf.expand_dims(tf.expand_dims(t, 0), 0) for t in (q, k, v)]\n\n    def is_zero():\n        zeros = tf.zeros(shape=[1, 1, length_q, depth_v], dtype=tf.float32)\n        zeros = tf.Print(zeros, [length_k, length_q], 'length_k/length_q: ')\n        return zeros\n\n    def is_not_zero():\n        return dot_product_attention(q, k, v, bias=bias, make_image_summary=False)\n    v_out = tf.cond(tf.logical_or(tf.equal(length_q, 0), tf.equal(length_k, 0)), is_zero, is_not_zero)\n    v_out = tf.squeeze(v_out, axis=0)\n    v_out = tf.squeeze(v_out, axis=0)\n    return v_out", "docstring": "Perform dot product on a subset of the sequence.\n\nCan add a mask to the attention to prevent sequences to attend to each other\nand to prevent attention to the future.\n\nArgs:\nq (tf.Tensor): Queries of shape [length_expert_q, depth_k]\nk (tf.Tensor): Keys of shape [length_expert_k, depth_k]\nv (tf.Tensor): Values of shape [length_expert_k, depth_v]\ninfo_q (BatchInfo): Batch info for queries. If None, no mask is added\ninfo_k (BatchInfo): Batch info for keys\n\nReturns:\ntf.Tensor: dot product attention output ([length_expert_q, depth_v])", "source": "codesearchnet"}
{"code": "def _FusedBatchNormGradGrad(op: ops.Operation, *grad):\n    data_format = op.get_attr('data_format')\n    epsilon = op.get_attr('epsilon')\n    is_training = op.get_attr('is_training')\n    grad_y = op.inputs[0]\n    x = op.inputs[1]\n    scale = op.inputs[2]\n    pop_mean = op.inputs[3]\n    pop_var = op.inputs[4]\n    grad_grad_x = grad[0]\n    grad_grad_scale = grad[1]\n    grad_grad_offset = grad[2]\n    with backprop.GradientTape() as tape:\n        tape.watch(grad_y)\n        tape.watch(x)\n        tape.watch(scale)\n        grad_x, grad_scale, grad_offset = _BatchNormGrad(grad_y, x, scale, pop_mean, pop_var, epsilon, data_format, is_training)\n        grad_initial = [grad_grad_x, grad_grad_scale, grad_grad_offset]\n    grad_grad_y, grad_x, grad_scale = tape.gradient([grad_x, grad_scale, grad_offset], [grad_y, x, scale], grad_initial)\n    return (grad_grad_y, grad_x, grad_scale, None, None)", "docstring": "Returns the gradients for the 3 inputs of FusedBatchNormGrad.\n\nArgs:\nop: The FusedBatchNormGradOp for which we need to compute gradients.\n*grad: An argument list for tensors of gradients wrt the outputs with\ngrad[0] as grad_grad_x, grad[1] as grad_grad_scale, grad[2] as\ngrad_grad_offset.\n\nReturns:\nA tuple (grad_grad_y, grad_x, grad_scale, None, None), where grad_grad_y\nis the gradient for grad_y, grad_x the gradient for x, grad_scale the\ngradient for scale.", "source": "github-repos"}
{"code": "def _add_write_pbs(self, write_pbs):\n    if self._read_only:\n        raise ValueError(_WRITE_READ_ONLY)\n    super(Transaction, self)._add_write_pbs(write_pbs)", "docstring": "Add `Write`` protobufs to this transaction.\n\nArgs:\nwrite_pbs (List[google.cloud.proto.firestore.v1beta1.\\\nwrite_pb2.Write]): A list of write protobufs to be added.\n\nRaises:\nValueError: If this transaction is read-only.", "source": "codesearchnet"}
{"code": "def recipe_bucket(config, auth_write, bucket_bucket, bucket_emails, bucket_groups):\n    bucket(config, {'auth': auth_write, 'bucket': bucket_bucket, 'emails': bucket_emails, 'groups': bucket_groups})", "docstring": "Create and permission a bucket in Storage.\n\nArgs:\nauth_write (authentication) - Credentials used for writing data.\nbucket_bucket (string) - Name of Google Cloud Bucket to create.\nbucket_emails (string_list) - Comma separated emails.\nbucket_groups (string_list) - Comma separated groups.", "source": "github-repos"}
{"code": "def plot_normal_cdf(rbound=None, lbound=None, mean=0, sd=1):\n    shade = ((rbound is not None) or (lbound is not None))\n    shade_left = ((rbound is not None) and (lbound is not None))\n    inf = (3.5 * sd)\n    step = 0.1\n    rlabel = rbound\n    llabel = lbound\n    if (rbound is None):\n        rbound = (inf + mean)\n        rlabel = '$\\\\infty$'\n    if (lbound is None):\n        lbound = ((- inf) + mean)\n        llabel = '-$\\\\infty$'\n    pdf_range = np.arange(((- inf) + mean), (inf + mean), step)\n    plt.plot(pdf_range, stats.norm.pdf(pdf_range, loc=mean, scale=sd), color='k', lw=1)\n    cdf_range = np.arange(lbound, (rbound + step), step)\n    if shade:\n        plt.fill_between(cdf_range, stats.norm.pdf(cdf_range, loc=mean, scale=sd), color='gold')\n    if shade_left:\n        cdf_range = np.arange(((- inf) + mean), (lbound + step), step)\n        plt.fill_between(cdf_range, stats.norm.pdf(cdf_range, loc=mean, scale=sd), color='darkblue')\n    plt.ylim(0, (stats.norm.pdf(0, loc=0, scale=sd) * 1.25))\n    plt.xlabel('z')\n    plt.ylabel('$\\\\phi$(z)', rotation=90)\n    plt.title('Normal Curve ~ ($\\\\mu$ = {0}, $\\\\sigma$ = {1}) {2} < z < {3}'.format(mean, sd, llabel, rlabel), fontsize=16)\n    plt.show()", "docstring": "Plots a normal curve with specified parameters and area below curve shaded\nbetween ``lbound`` and ``rbound``.\n\nArgs:\n``rbound`` (numeric): right boundary of shaded region\n\n``lbound`` (numeric): left boundary of shaded region; by default is negative infinity\n\n``mean`` (numeric): mean/expectation of normal distribution\n\n``sd`` (numeric): standard deviation of normal distribution", "source": "codesearchnet"}
{"code": "def import_gssapi_extension(name):\n    try:\n        path = 'gssapi.raw.ext_{0}'.format(name)\n        __import__(path)\n        return sys.modules[path]\n    except ImportError:\n        return None", "docstring": "Import a GSSAPI extension module\n\nThis method imports a GSSAPI extension module based\non the name of the extension (not including the\n'ext_' prefix).  If the extension is not available,\nthe method retuns None.\n\nArgs:\nname (str): the name of the extension\n\nReturns:\nmodule: Either the extension module or None", "source": "codesearchnet"}
{"code": "def start_of_chunk(prev_tag, tag, prev_type, type_):\n    \n    chunk_start = False\n\n    if tag == 'B': chunk_start = True\n    if tag == 'S': chunk_start = True\n\n    if prev_tag == 'E' and tag == 'E': chunk_start = True\n    if prev_tag == 'E' and tag == 'I': chunk_start = True\n    if prev_tag == 'S' and tag == 'E': chunk_start = True\n    if prev_tag == 'S' and tag == 'I': chunk_start = True\n    if prev_tag == 'O' and tag == 'E': chunk_start = True\n    if prev_tag == 'O' and tag == 'I': chunk_start = True\n\n    if tag != 'O' and tag != '.' and prev_type != type_:\n        chunk_start = True\n\n    return chunk_start", "docstring": "Checks if a chunk started between the previous and current word.\n\nArgs:\nprev_tag: previous chunk tag.\ntag: current chunk tag.\nprev_type: previous type.\ntype_: current type.\n\nReturns:\nchunk_start: boolean.", "source": "juraj-google-style"}
{"code": "def start_day_cycle(self, day_length):\n    if (day_length <= 0):\n        raise HolodeckException('The given day length should be between above 0!')\n    self._should_write_to_command_buffer = True\n    command_to_send = DayCycleCommand(True)\n    command_to_send.set_day_length(day_length)\n    self._commands.add_command(command_to_send)", "docstring": "Queue up a day cycle command to start the day cycle. It will be applied when `tick` or `step` is called next.\nThe sky sphere will now update each tick with an updated sun angle as it moves about the sky. The length of a\nday will be roughly equivalent to the number of minutes given.\n\nArgs:\nday_length (int): The number of minutes each day will be.", "source": "codesearchnet"}
{"code": "def indicator_associations_types(\n        self,\n        main_type,\n        sub_type,\n        unique_id,\n        association_type,\n        api_branch=None,\n        api_entity=None,\n        owner=None,\n        params=None,\n    ):\n        \n        params = params or {}\n        if owner:\n            params['owner'] = owner\n\n        api_branch = api_branch or association_type.api_sub_type\n        api_entity = api_entity or association_type.api_entity\n        if not sub_type:\n            url = '/v2/{}/{}/indicators/{}'.format(main_type, unique_id, api_branch)\n        else:\n            url = '/v2/{}/{}/{}/indicators/{}'.format(main_type, sub_type, unique_id, api_branch)\n\n        for iat in self._iterate(url, params, api_entity):\n            yield iat", "docstring": "Args:\nowner:\nmain_type:\nsub_type:\nunique_id:\nassociation_type:\napi_branch:\napi_entity:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def avg(self, vars_list: List[str]) -> 'TensorFluent':\n    operand = self\n    if (operand.dtype == tf.bool):\n        operand = operand.cast(tf.float32)\n    return self._aggregation_op(tf.reduce_mean, operand, vars_list)", "docstring": "Returns the TensorFluent for the avg aggregation function.\n\nArgs:\nvars_list: The list of variables to be aggregated over.\n\nReturns:\nA TensorFluent wrapping the avg aggregation function.", "source": "codesearchnet"}
{"code": "def batch_flatten(x):\n    x = array_ops.reshape(x, array_ops_stack.stack([-1, prod(shape(x)[1:])]))\n    return x", "docstring": "Turn a nD tensor into a 2D tensor with same 0th dimension.\n\nIn other words, it flattens each data samples of a batch.\n\nArgs:\nx: A tensor or variable.\n\nReturns:\nA tensor.\n\nExamples:\nFlattening a 3D tensor to 2D by collapsing the last dimension.\n\n>>> x_batch = tf.keras.backend.ones(shape=(2, 3, 4, 5))\n>>> x_batch_flatten = batch_flatten(x_batch)\n>>> tf.keras.backend.int_shape(x_batch_flatten)\n(2, 60)", "source": "github-repos"}
{"code": "def __init__(self, model_name: str, columns: list[str], api_key: Optional[str]=None, organization: Optional[str]=None, dimensions: Optional[int]=None, user: Optional[str]=None, max_batch_size: Optional[int]=None, **kwargs):\n    self.model_name = model_name\n    self.api_key = api_key\n    self.organization = organization\n    self.dimensions = dimensions\n    self.user = user\n    self.max_batch_size = max_batch_size\n    super().__init__(columns=columns, **kwargs)", "docstring": "Embedding Config for OpenAI Text Embedding models.\nText Embeddings are generated for a batch of text using the OpenAI API.\n\nArgs:\nmodel_name: Name of the OpenAI embedding model\ncolumns: The columns where the embeddings will be stored in the output\napi_key: OpenAI API key\norganization: OpenAI organization ID\ndimensions: Specific embedding dimensions to use (if model supports it)\nuser: End-user identifier for tracking and rate limit calculations\nmax_batch_size: Maximum batch size for requests to OpenAI API", "source": "github-repos"}
{"code": "def make_grid(tensor, nrow=8, padding=2, pad_value=0):\n    \n    if not (isinstance(tensor, np.ndarray) or\n            (isinstance(tensor, list) and all(isinstance(t, np.ndarray) for t in tensor))):\n        raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))\n\n    \n    if isinstance(tensor, list):\n        tensor = np.stack(tensor, 0)\n\n    if tensor.ndim == 2:  \n        tensor = tensor.reshape((1, tensor.shape[0], tensor.shape[1]))\n\n    if tensor.ndim == 3:\n        if tensor.shape[0] == 1:  \n            tensor = np.concatenate((tensor, tensor, tensor), 0)\n        tensor = tensor.reshape((1, tensor.shape[0], tensor.shape[1], tensor.shape[2]))\n\n    if tensor.ndim == 4 and tensor.shape[1] == 1:  \n        tensor = np.concatenate((tensor, tensor, tensor), 1)\n\n    if tensor.shape[0] == 1:\n        return np.squeeze(tensor)\n\n    \n    nmaps = tensor.shape[0]\n    xmaps = min(nrow, nmaps)\n    ymaps = int(math.ceil(float(nmaps) / xmaps))\n    height, width = int(tensor.shape[2] + padding), int(tensor.shape[3] + padding)\n    grid = np.ones((3, height * ymaps + padding, width * xmaps + padding)) * pad_value\n    k = 0\n    for y in range(ymaps):\n        for x in range(xmaps):\n            if k >= nmaps:\n                break\n            grid[:, y * height + padding:(y+1) * height,\\\n                 x * width + padding:(x+1) * width] = tensor[k]\n            k = k + 1\n    return grid", "docstring": "Make a grid of images, via numpy.\n\nArgs:\ntensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\nor a list of images all of the same size.\nnrow (int, optional): Number of images displayed in each row of the grid.\nThe Final grid size is (B / nrow, nrow). Default is 8.\npadding (int, optional): amount of padding. Default is 2.\npad_value (float, optional): Value for the padded pixels.", "source": "juraj-google-style"}
{"code": "def master(self, task_type=None, task_id=None, rpc_layer=None):\n    task_type = task_type if task_type is not None else self.task_type\n    task_id = task_id if task_id is not None else self.task_id\n    if task_type is not None and task_id is not None:\n        return format_master_url(self.cluster_spec().task_address(task_type, task_id), rpc_layer or self.rpc_layer)\n    return ''", "docstring": "Returns the master string for connecting to a TensorFlow master.\n\nArgs:\ntask_type: (Optional) Overrides the default auto-selected task type.\ntask_id: (Optional) Overrides the default auto-selected task index.\nrpc_layer: (Optional) Overrides the default RPC protocol TensorFlow uses\nto communicate across nodes.\n\nReturns:\nA connection string for connecting to a TensorFlow master.", "source": "github-repos"}
{"code": "def create_toolbutton(entries, parent=None):\n    \n    btn = QtGui.QToolButton(parent)\n    menu = QtGui.QMenu()\n    actions = []\n\n    for label, slot in entries:\n        action = add_menu_action(menu, label, slot)\n        actions.append(action)\n\n    btn.setPopupMode(QtGui.QToolButton.MenuButtonPopup)\n    btn.setDefaultAction(actions[0])\n    btn.setMenu(menu)\n    return btn, actions", "docstring": "Create a toolbutton.\n\nArgs:\nentries: List of (label, slot) tuples.\n\nReturns:\n`QtGui.QToolBar`.", "source": "juraj-google-style"}
{"code": "def _fit(self, col):\n    column = col[self.col_name].replace({np.nan: np.inf})\n    frequencies = column.groupby(column).count().rename({np.inf: None}).to_dict()\n    start = 0\n    end = 0\n    num_vals = len(col)\n    for val in frequencies:\n        prob = (frequencies[val] / num_vals)\n        end = (start + prob)\n        interval = (start, end)\n        mean = np.mean(interval)\n        std = (prob / 6)\n        self.probability_map[val] = (interval, mean, std)\n        start = end", "docstring": "Create a map of the empirical probability for each category.\n\nArgs:\ncol(pandas.DataFrame): Data to transform.", "source": "codesearchnet"}
{"code": "def compose_containerized_launch_cmd(self, filepath, engine_dir, container_image):\n        \n        self.engine_file = os.path.expanduser(filepath)\n        uid = str(uuid.uuid4())\n        engine_json = None\n        try:\n            with open(self.engine_file, 'r') as f:\n                engine_json = f.read()\n\n        except OSError as e:\n            logger.error(\"Could not open engine_json : \", self.engine_file)\n            raise e\n\n        return .format(engine_dir, engine_json, container_image, debug_option=self.debug_option, uid=uid)", "docstring": "Reads the json contents from filepath and uses that to compose the engine launch command.\n\nNotes: Add this to the ipengine launch for debug logs :\n--log-to-file --debug\nArgs:\nfilepath (str): Path to the engine file\nengine_dir (str): CWD for the engines .\ncontainer_image (str): The container to be used to launch workers", "source": "juraj-google-style"}
{"code": "def _find_elements(self, result, elements):\n    element_mapping = {}\n    result = StringIO.StringIO(result)\n    for (_, e) in ET.iterparse(result, events=('end',)):\n        if (not elements):\n            break\n        if (e.tag in elements):\n            element_mapping[e.tag] = e.text\n            elements.remove(e.tag)\n    return element_mapping", "docstring": "Find interesting elements from XML.\n\nThis function tries to only look for specified elements\nwithout parsing the entire XML. The specified elements is better\nlocated near the beginning.\n\nArgs:\nresult: response XML.\nelements: a set of interesting element tags.\n\nReturns:\nA dict from element tag to element value.", "source": "codesearchnet"}
{"code": "def validate(self, *args, **kwargs): \n        \n        return super(ParameterValidator, self)._validate(*args, **kwargs)", "docstring": "Validate a parameter dict against a parameter schema from an ocrd-tool.json\n\nArgs:\nobj (dict):\nschema (dict):", "source": "juraj-google-style"}
{"code": "def fib(n):\n    assert (n > 0)\n    (a, b) = (1, 1)\n    for i in range((n - 1)):\n        (a, b) = (b, (a + b))\n    return a", "docstring": "Fibonacci example function\n\nArgs:\nn (int): integer\n\nReturns:\nint: n-th Fibonacci number", "source": "codesearchnet"}
{"code": "def recipe_trends_places_to_sheets_via_value(config, auth_write, secret, key, places_dataset, places_query, places_legacy, destination_sheet, destination_tab):\n    twitter(config, {'auth': auth_write, 'secret': secret, 'key': key, 'trends': {'places': {'single_cell': True, 'bigquery': {'dataset': places_dataset, 'query': places_query, 'legacy': places_legacy}}}, 'out': {'sheets': {'sheet': destination_sheet, 'tab': destination_tab, 'range': 'A1'}}})", "docstring": "Move using hard coded WOEID values.\n\nArgs:\nauth_write (authentication) - Credentials used for writing data.\nsecret (string) - NA\nkey (string) - NA\nplaces_dataset (string) - NA\nplaces_query (string) - NA\nplaces_legacy (boolean) - NA\ndestination_sheet (string) - NA\ndestination_tab (string) - NA", "source": "github-repos"}
{"code": "def share(self, group_id, group_access, expires_at=None, **kwargs):\n        \n        path = '/projects/%s/share' % self.get_id()\n        data = {'group_id': group_id,\n                'group_access': group_access,\n                'expires_at': expires_at}\n        self.manager.gitlab.http_post(path, post_data=data, **kwargs)", "docstring": "Share the project with a group.\n\nArgs:\ngroup_id (int): ID of the group.\ngroup_access (int): Access level for the group.\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabCreateError: If the server failed to perform the request", "source": "juraj-google-style"}
{"code": "def rep1sep(parser: Union[(Parser, Sequence[Input])], separator: Union[(Parser, Sequence[Input])]) -> RepeatedOnceSeparatedParser:\n    if isinstance(parser, str):\n        parser = lit(parser)\n    if isinstance(separator, str):\n        separator = lit(separator)\n    return RepeatedOnceSeparatedParser(parser, separator)", "docstring": "Match a parser one or more times separated by another parser.\n\nThis matches repeated sequences of ``parser`` separated by ``separator``.\nIf there is at least one match, a list containing the values of the\n``parser`` matches is returned. The values from ``separator`` are discarded.\nIf it does not match ``parser`` at all, it fails.\n\nArgs:\nparser: Parser or literal\nseparator: Parser or literal", "source": "codesearchnet"}
{"code": "def _add_sparse_to_tensors_map(sp_input, container=None, shared_name=None, name=None):\n    sp_input = _convert_to_sparse_tensor(sp_input)\n    return gen_sparse_ops.add_sparse_to_tensors_map(sp_input.indices, sp_input.values, sp_input.dense_shape, container=container, shared_name=shared_name, name=name)", "docstring": "Add a `SparseTensor` to a `SparseTensorsMap` and return its handle.\n\nArgs:\nsp_input: The input `SparseTensor`.\ncontainer: The container for the underlying `SparseTensorsMap` (optional).\nshared_name: The shared name for the underlying `SparseTensorsMap`\n(optional, defaults to the name of the newly created op).\nname: A name prefix for the returned tensors (optional).\n\nReturns:\nA string 1-vector (1D `Tensor`), with the single element representing the\na unique handle to a `SparseTensor` stored by the `SparseTensorMap`\nunderlying this op.\n\nRaises:\nTypeError: If `sp_input` is not a `SparseTensor`.", "source": "github-repos"}
{"code": "def to_event(self, event_type, field_name=None, depth=None):\n    if (self.ion_event is None):\n        value = self\n        if isinstance(self, IonPyNull):\n            value = None\n        self.ion_event = IonEvent(event_type, ion_type=self.ion_type, value=value, field_name=field_name, annotations=self.ion_annotations, depth=depth)\n    return self.ion_event", "docstring": "Constructs an IonEvent from this _IonNature value.\n\nArgs:\nevent_type (IonEventType): The type of the resulting event.\nfield_name (Optional[text]): The field name associated with this value, if any.\ndepth (Optional[int]): The depth of this value.\n\nReturns:\nAn IonEvent with the properties from this value.", "source": "codesearchnet"}
{"code": "def try_get_column(column_name, node, context):\n    selectable = get_node_selectable(node, context)\n    if (not hasattr(selectable, 'c')):\n        raise AssertionError(u'Selectable \"{}\" does not have a column collection. Context is {}.'.format(selectable, context))\n    return selectable.c.get(column_name, None)", "docstring": "Attempt to get a column by name from the selectable.\n\nArgs:\ncolumn_name: str, name of the column to retrieve.\nnode: SqlNode, the node the column is being retrieved for.\ncontext: CompilationContext, compilation specific metadata.\n\nReturns:\nOptional[column], the SQLAlchemy column if found, None otherwise.", "source": "codesearchnet"}
{"code": "def flash_progress_callback(action, progress_string, percentage):\n    \n    if action.lower() != 'compare':\n        return progress_bar(min(100, percentage), 100, prefix=action)\n\n    return None", "docstring": "Callback that can be used with ``JLink.flash()``.\n\nThis callback generates a progress bar in the console to show the progress\nof each of the steps of the flash.\n\nArgs:\naction (str): the current action being invoked\nprogress_string (str): the current step in the progress\npercentage (int): the percent to which the current step has been done\n\nReturns:\n``None``\n\nNote:\nThis function ignores the compare action.", "source": "juraj-google-style"}
{"code": "def from_json(cls, json):\n    \n    return cls(json[cls.BLOB_KEY_PARAM],\n               json[cls.START_INDEX_PARAM],\n               json[cls.END_INDEX_PARAM])", "docstring": "Creates an instance of the InputReader for the given input shard state.\n\nArgs:\njson: The InputReader state as a dict-like object.\n\nReturns:\nAn instance of the InputReader configured using the values of json.", "source": "juraj-google-style"}
{"code": "def _InitializeParserObjects(self, parser_filter_expression=None):\n    (self._formats_with_signatures, non_sigscan_parser_names) = parsers_manager.ParsersManager.GetFormatsWithSignatures(parser_filter_expression=parser_filter_expression)\n    self._non_sigscan_parser_names = []\n    for parser_name in non_sigscan_parser_names:\n        if (parser_name not in ('filestat', 'usnjrnl')):\n            self._non_sigscan_parser_names.append(parser_name)\n    self._file_scanner = parsers_manager.ParsersManager.CreateSignatureScanner(self._formats_with_signatures)\n    self._parsers = parsers_manager.ParsersManager.GetParserObjects(parser_filter_expression=parser_filter_expression)\n    active_parser_names = ', '.join(sorted(self._parsers.keys()))\n    logger.debug('Active parsers: {0:s}'.format(active_parser_names))\n    self._filestat_parser = self._parsers.get('filestat', None)\n    if ('filestat' in self._parsers):\n        del self._parsers['filestat']\n    self._mft_parser = self._parsers.get('mft', None)\n    self._usnjrnl_parser = self._parsers.get('usnjrnl', None)\n    if ('usnjrnl' in self._parsers):\n        del self._parsers['usnjrnl']", "docstring": "Initializes the parser objects.\n\nArgs:\nparser_filter_expression (Optional[str]): the parser filter expression,\nNone represents all parsers and plugins.\n\nThe parser filter expression is a comma separated value string that\ndenotes a list of parser names to include and/or exclude. Each entry\ncan have the value of:\n\n* An exact match of a list of parsers, or a preset (see\ndata/presets.yaml for the list of predefined presets).\n* A name of a single parser (case insensitive), e.g. msiecf.\n* A glob name for a single parser, e.g. '*msie*' (case insensitive).", "source": "codesearchnet"}
{"code": "def processMailList(platformNames=[], emails=[]):\n    \n    \n    platforms = platform_selection.getPlatformsByName(platformNames, mode=\"mailfy\")\n\n    results = []\n    for e in emails:\n        for pla in platforms:\n            \n            entities = pla.getInfo(query=e, mode=\"mailfy\")\n            if entities != {}:\n                results += json.loads(entities)\n    return results", "docstring": "Method to perform the email search.\n\nArgs:\n-----\nplatformNames: List of names of the platforms.\nemails: List of numbers to be queried.\n\nReturn:\n-------\nA list of verified emails.", "source": "juraj-google-style"}
{"code": "def _check_state_for_finalize_write(self, writer_results, num_shards):\n    if not writer_results:\n        return ([], [], [], 0)\n    src_glob = FileSystems.join(FileSystems.split(writer_results[0])[0], '*')\n    dst_glob = self._get_final_name_glob(num_shards)\n    src_glob_files = set((file_metadata.path for mr in FileSystems.match([src_glob]) for file_metadata in mr.metadata_list))\n    dst_glob_files = set((file_metadata.path for mr in FileSystems.match([dst_glob]) for file_metadata in mr.metadata_list))\n    src_files = []\n    dst_files = []\n    delete_files = []\n    num_skipped = 0\n    for shard_num, src in enumerate(writer_results):\n        final_name = self._get_final_name(shard_num, num_shards)\n        dst = final_name\n        src_exists = src in src_glob_files\n        dst_exists = dst in dst_glob_files\n        if not src_exists and (not dst_exists):\n            raise BeamIOError('src and dst files do not exist. src: %s, dst: %s' % (src, dst))\n        if not src_exists and dst_exists:\n            _LOGGER.debug('src: %s -> dst: %s already renamed, skipping', src, dst)\n            num_skipped += 1\n            continue\n        if src_exists and dst_exists and (FileSystems.checksum(src) == FileSystems.checksum(dst)):\n            _LOGGER.debug('src: %s == dst: %s, deleting src', src, dst)\n            delete_files.append(src)\n            continue\n        src_files.append(src)\n        dst_files.append(dst)\n    self._report_sink_lineage(dst_glob, dst_files)\n    return (src_files, dst_files, delete_files, num_skipped)", "docstring": "Checks writer output files' states.\n\nReturns:\nsrc_files, dst_files: Lists of files to rename. For each i, finalize_write\nshould rename(src_files[i], dst_files[i]).\ndelete_files: Src files to delete. These could be leftovers from an\nincomplete (non-atomic) rename operation.\nnum_skipped: Tally of writer results files already renamed, such as from\na previous run of finalize_write().", "source": "github-repos"}
{"code": "def sub_index(self, sub, start=0, end=None):\n    start_index = self.index(sub[0], start, end)\n    end = self._fix_end_index(end)\n    if ((start_index + len(sub)) > end):\n        raise ValueError\n    for i in range(1, len(sub)):\n        if (sub[i] != self[(start_index + i)]):\n            raise ValueError\n    return start_index", "docstring": "Return the index of a subsequence.\n\nThis runs in O(len(sub))\n\nArgs:\nsub (Sequence): An Iterable to search for\nReturns:\nint: The index of the first element of sub\nRaises:\nValueError: If sub isn't a subsequence\nTypeError: If sub isn't iterable\nIndexError: If start or end are out of range", "source": "codesearchnet"}
{"code": "def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    if attention_mask is None:\n        attention_mask = jnp.ones_like(input_ids)\n    if position_ids is None:\n        batch_size, sequence_length = input_ids.shape\n        position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n\n    def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):\n        encode_module = module._get_encoder_module()\n        return encode_module(input_ids, attention_mask, position_ids, **kwargs)\n    outputs = self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward)\n    if return_dict:\n        outputs = FlaxBaseModelOutput(last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions)\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import FlaxEncoderDecoderModel, BertTokenizer\n\n>>> # initialize a bert2gpt2 from pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized\n>>> model = FlaxEncoderDecoderModel.from_encoder_decoder_pretrained(\"google-bert/bert-base-cased\", \"openai-community/gpt2\")\n\n>>> tokenizer = BertTokenizer.from_pretrained(\"google-bert/bert-base-cased\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> input_ids = tokenizer.encode(text, return_tensors=\"np\")\n>>> encoder_outputs = model.encode(input_ids)\n```", "source": "github-repos"}
{"code": "def _process_scopes(scopes):\n    all_scopes = set()\n    sufficient_scopes = set()\n    for scope_set in scopes:\n        scope_set_scopes = frozenset(scope_set.split())\n        all_scopes.update(scope_set_scopes)\n        sufficient_scopes.add(scope_set_scopes)\n    return (all_scopes, sufficient_scopes)", "docstring": "Parse a scopes list into a set of all scopes and a set of sufficient scope sets.\n\nscopes: A list of strings, each of which is a space-separated list of scopes.\nExamples: ['scope1']\n['scope1', 'scope2']\n['scope1', 'scope2 scope3']\n\nReturns:\nall_scopes: a set of strings, each of which is one scope to check for\nsufficient_scopes: a set of sets of strings; each inner set is\na set of scopes which are sufficient for access.\nExample: {{'scope1'}, {'scope2', 'scope3'}}", "source": "codesearchnet"}
{"code": "def dot(inputs, axes=-1, **kwargs):\n    return Dot(axes=axes, **kwargs)(inputs)", "docstring": "Functional interface to the `Dot` layer.\n\nArgs:\ninputs: A list of input tensors (at least 2).\naxes: Integer or tuple of integers,\naxis or axes along which to take the dot product.\nnormalize: Whether to L2-normalize samples along the\ndot product axis before taking the dot product.\nIf set to `True`, then the output of the dot product\nis the cosine proximity between the two samples.\n**kwargs: Standard layer keyword arguments.\n\nReturns:\nA tensor, the dot product of the samples from the inputs.", "source": "github-repos"}
{"code": "def permute(self, ordering: np.ndarray, *, axis: int) -> None:\n    if (axis not in (0, 1)):\n        raise ValueError('Axis must be 0 (rows) or 1 (columns)')\n    for layer in self.layers.values():\n        layer._permute(ordering, axis=axis)\n    if (axis == 0):\n        if (self.row_graphs is not None):\n            for g in self.row_graphs.values():\n                g._permute(ordering)\n        for a in self.row_attrs.values():\n            a._permute(ordering)\n    elif (axis == 1):\n        if (self.col_graphs is not None):\n            for g in self.col_graphs.values():\n                g._permute(ordering)\n        for a in self.col_attrs.values():\n            a._permute(ordering)", "docstring": "Permute the view, by permuting its layers, attributes and graphs\n\nArgs:\nordering (np.ndarray):\tThe desired ordering along the axis\naxis (int):\t\t\t\t0, permute rows; 1, permute columns", "source": "codesearchnet"}
{"code": "def _process_worker(call_queue, result_queue):\n    \n    while True:\n        call_item = call_queue.get(block=True)\n        if call_item is None:\n            \n            result_queue.put(None)\n            return\n        try:\n            r = call_item.fn(*call_item.args, **call_item.kwargs)\n        except BaseException:\n            e = sys.exc_info()[1]\n            result_queue.put(_ResultItem(call_item.work_id,\n                                         exception=e))\n        else:\n            result_queue.put(_ResultItem(call_item.work_id,\n                                         result=r))", "docstring": "Evaluates calls from call_queue and places the results in result_queue.\n\nThis worker is run in a separate process.\n\nArgs:\ncall_queue: A multiprocessing.Queue of _CallItems that will be read and\nevaluated by the worker.\nresult_queue: A multiprocessing.Queue of _ResultItems that will written\nto by the worker.\nshutdown: A multiprocessing.Event that will be set as a signal to the\nworker that it should exit when call_queue is empty.", "source": "juraj-google-style"}
{"code": "def parse_location(location):\n    \n    def split_dms(text, hemisphere):\n        \n        out = []\n        sect = []\n        for i in text:\n            if i.isdigit():\n                sect.append(i)\n            else:\n                out.append(sect)\n                sect = []\n        d, m, s = [float(''.join(i)) for i in out]\n        if hemisphere in 'SW':\n            d, m, s = [-1 * x for x in (d, m, s)]\n        return to_dd(d, m, s)\n\n    for sep in ';, ':\n        chunks = location.split(sep)\n        if len(chunks) == 2:\n            if chunks[0].endswith('N'):\n                latitude = float(chunks[0][:-1])\n            elif chunks[0].endswith('S'):\n                latitude = -1 * float(chunks[0][:-1])\n            else:\n                latitude = float(chunks[0])\n            if chunks[1].endswith('E'):\n                longitude = float(chunks[1][:-1])\n            elif chunks[1].endswith('W'):\n                longitude = -1 * float(chunks[1][:-1])\n            else:\n                longitude = float(chunks[1])\n            return latitude, longitude\n        elif len(chunks) == 4:\n            if chunks[0].endswith(('s', '\"')):\n                latitude = split_dms(chunks[0], chunks[1])\n            else:\n                latitude = float(chunks[0])\n                if chunks[1] == 'S':\n                    latitude = -1 * latitude\n            if chunks[2].endswith(('s', '\"')):\n                longitude = split_dms(chunks[2], chunks[3])\n            else:\n                longitude = float(chunks[2])\n                if chunks[3] == 'W':\n                    longitude = -1 * longitude\n            return latitude, longitude", "docstring": "Parse latitude and longitude from string location.\n\nArgs:\nlocation (str): String to parse\n\nReturns:\ntuple of float: Latitude and longitude of location", "source": "juraj-google-style"}
{"code": "def to_unicode(self, s):\n    if isinstance(s, unicode):\n        return s\n    if isinstance(s, str):\n        return unicode(s, errors='ignore')\n    return s", "docstring": "Convert an elementary datatype to unicode.\n\nArgs:\ns: the datatype to be unicoded.\n\nReturns:\nUnicoded data.", "source": "codesearchnet"}
{"code": "def _CheckAtLeast3DImage(image, require_static=True):\n    try:\n        if image.get_shape().ndims is None:\n            image_shape = image.get_shape().with_rank(3)\n        else:\n            image_shape = image.get_shape().with_rank_at_least(3)\n    except ValueError:\n        raise ValueError(\"'image' (shape %s) must be at least three-dimensional.\" % image.shape)\n    if require_static and (not image_shape.is_fully_defined()):\n        raise ValueError(\"'image' must be fully defined.\")\n    if any((x == 0 for x in image_shape[-3:])):\n        raise ValueError(\"inner 3 dims of 'image.shape' must be > 0: %s\" % image_shape)\n    if not image_shape[-3:].is_fully_defined():\n        return [check_ops.assert_positive(array_ops.shape(image)[-3:], [\"inner 3 dims of 'image.shape' must be > 0.\"]), check_ops.assert_greater_equal(array_ops.rank(image), 3, message=\"'image' must be at least three-dimensional.\")]\n    else:\n        return []", "docstring": "Assert that we are working with a properly shaped image.\n\nArgs:\nimage: >= 3-D Tensor of size [*, height, width, depth]\nrequire_static: If `True`, requires that all dimensions of `image` are known\nand non-zero.\n\nRaises:\nValueError: if image.shape is not a [>= 3] vector.\n\nReturns:\nAn empty list, if `image` has fully defined dimensions. Otherwise, a list\ncontaining an assert op is returned.", "source": "github-repos"}
{"code": "def zeros_like(x, dtype=None):\n    if any_symbolic_tensors((x,)):\n        return ZerosLike(dtype=dtype).symbolic_call(x)\n    return backend.numpy.zeros_like(x, dtype=dtype)", "docstring": "Return a tensor of zeros with the same shape and type as `x`.\n\nArgs:\nx: Input tensor.\ndtype: Overrides the data type of the result.\n\nReturns:\nA tensor of zeros with the same shape and type as `x`.", "source": "github-repos"}
{"code": "def softmax_cross_entropy_one_hot(logits, labels, weights_fn=None):\n  \n  with tf.variable_scope(\"softmax_cross_entropy_one_hot\",\n                         values=[logits, labels]):\n    del weights_fn\n    cross_entropy = tf.losses.softmax_cross_entropy(\n        onehot_labels=labels, logits=logits)\n    return cross_entropy, tf.constant(1.0)", "docstring": "Calculate softmax cross entropy given one-hot labels and logits.\n\nArgs:\nlogits: Tensor of size [batch-size, o=1, p=1, num-classes]\nlabels: Tensor of size [batch-size, o=1, p=1, num-classes]\nweights_fn: Function that takes in labels and weighs examples (unused)\nReturns:\ncross-entropy (scalar), weights", "source": "juraj-google-style"}
{"code": "def _get_associated_classnames(self, classname, namespace, assoc_class, result_class, result_role, role):\n    class_repo = self._get_class_repo(namespace)\n    result_classes = self._classnamedict(result_class, namespace)\n    assoc_classes = self._classnamedict(assoc_class, namespace)\n    rtn_classnames_set = set()\n    role = (role.lower() if role else role)\n    result_role = (result_role.lower() if result_role else result_role)\n    ref_clns = self._get_reference_classnames(classname, namespace, assoc_class, role)\n    cls = [class_repo[cln] for cln in ref_clns]\n    for cl in cls:\n        for prop in six.itervalues(cl.properties):\n            if (prop.type == 'reference'):\n                if self._assoc_prop_matches(prop, cl.classname, assoc_classes, result_classes, result_role):\n                    rtn_classnames_set.add(prop.reference_class)\n    return list(rtn_classnames_set)", "docstring": "Get list of classnames that are associated classes for which this\nclassname is a target filtered by the assoc_class, role, result_class,\nand result_role parameters if they are none.\n\nThis is a common method used by all of the other reference and\nassociator methods to create a list of reference classnames\n\nReturns:\nlist of classnames that satisfy the criteria.", "source": "codesearchnet"}
{"code": "def deserialize_subject_info(subject_info_xml):\n    \n    try:\n        return d1_common.xml.deserialize(subject_info_xml)\n    except ValueError as e:\n        raise d1_common.types.exceptions.InvalidToken(\n            0,\n            'Could not deserialize SubjectInfo. subject_info=\"{}\", error=\"{}\"'.format(\n                subject_info_xml, str(e)\n            ),\n        )", "docstring": "Deserialize SubjectInfo XML doc to native object.\n\nArgs:\nsubject_info_xml: str\nSubjectInfo XML doc\n\nReturns:\nSubjectInfo PyXB object", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor) -> torch.Tensor:\n    hidden_states = self.patch_embed(hidden_states)\n    rotary_pos_emb = self.rot_pos_emb(grid_thw)\n    window_index, cu_window_seqlens = self.get_window_index(grid_thw)\n    cu_window_seqlens = torch.tensor(cu_window_seqlens, device=hidden_states.device, dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32)\n    cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens)\n    seq_len, _ = hidden_states.size()\n    hidden_states = hidden_states.reshape(seq_len \n    hidden_states = hidden_states[window_index, :, :]\n    hidden_states = hidden_states.reshape(seq_len, -1)\n    rotary_pos_emb = rotary_pos_emb.reshape(seq_len \n    rotary_pos_emb = rotary_pos_emb[window_index, :, :]\n    rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)\n    cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(dim=0, dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32)\n    cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)\n    for layer_num, blk in enumerate(self.blocks):\n        if layer_num in self.fullatt_block_indexes:\n            cu_seqlens_now = cu_seqlens\n        else:\n            cu_seqlens_now = cu_window_seqlens\n        if self.gradient_checkpointing and self.training:\n            hidden_states = self._gradient_checkpointing_func(blk.__call__, hidden_states, cu_seqlens_now, rotary_pos_emb)\n        else:\n            hidden_states = blk(hidden_states, cu_seqlens=cu_seqlens_now, rotary_pos_emb=rotary_pos_emb)\n    hidden_states = self.merger(hidden_states)\n    reverse_indices = torch.argsort(window_index)\n    hidden_states = hidden_states[reverse_indices, :]\n    return hidden_states", "docstring": "Args:\nhidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):\nThe final hidden states of the model.\ngrid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):\nThe temporal, height and width of feature shape of each image in LLM.\n\nReturns:\n`torch.Tensor`: hidden_states.", "source": "github-repos"}
{"code": "def query(self, src: Any, use_inferred: bool=False) -> Any:\n    return self._query(0, src, use_inferred)", "docstring": "Query the value from the source object based on current path.\n\nExample::\n\n@pg.members([\n('x', pg.typing.Int()),\n('y', pg.typing.Str())\n])\nclass A(pg.Object):\npass\n\n@pg.members([\n('z', pg.typing.Object(A))\n])\nclass B(pg.Object):\npass\n\nb = B(z=A(x=1, y='foo'))\nassert pg.KeyPath.parse('z.x').query(b) == 1\n\nArgs:\nsrc: Source value to query.\nuse_inferred: If True, infer `pg.Inferential` values. Otherwise returns\ntheir symbolic form. Applicable only for symbolic values.\n\nReturns:\nValue from src if path exists.\n\nRaises:\nKeyError: Path doesn't exist in src.\nRuntimeError: Called on a KeyPath that is considered as removed.", "source": "github-repos"}
{"code": "def _is_valid(self, value):\n        \n\n        \n        \n        if hasattr(self._type, \"istypeof\"):\n            return self._type.istypeof(value)\n        else:\n            return isinstance(value, self._type)", "docstring": "Return True if the input value is valid for insertion into the\ninner list.\n\nArgs:\nvalue: An object about to be inserted.", "source": "juraj-google-style"}
{"code": "def set_shape(self, shape):\n    self._ref().set_shape(shape)\n    self.value().set_shape(shape)", "docstring": "Overrides the shape for this variable.\n\nArgs:\nshape: the `TensorShape` representing the overridden shape.", "source": "github-repos"}
{"code": "def sort_ordered_objects(items, getter=lambda x: x):\n    \n    return sorted(items, key=lambda x: getattr(getter(x), OrderedBase.CREATION_COUNTER_FIELD, -1))", "docstring": "Sort an iterable of OrderedBase instances.\n\nArgs:\nitems (iterable): the objects to sort\ngetter (callable or None): a function to extract the OrderedBase instance from an object.\n\nExamples:\n>>> sort_ordered_objects([x, y, z])\n>>> sort_ordered_objects(v.items(), getter=lambda e: e[1])", "source": "juraj-google-style"}
{"code": "def rename_nodes(self, renaming_map):\n        \n        if not isinstance(renaming_map, dict):\n            raise TypeError(\"renaming_map must be a dict\")\n        for node in self.traverse_preorder():\n            if node.label in renaming_map:\n                node.label = renaming_map[node.label]", "docstring": "Rename nodes in this ``Tree``\n\nArgs:\n``renaming_map`` (``dict``): A dictionary mapping old labels (keys) to new labels (values)", "source": "juraj-google-style"}
{"code": "def is_done(self, transform: Optional[AppliedPTransform]=None) -> bool:\n    if transform:\n        return self._is_transform_done(transform)\n    for applied_ptransform in self._step_names:\n        if not self._is_transform_done(applied_ptransform):\n            return False\n    return True", "docstring": "Checks completion of a step or the pipeline.\n\nArgs:\ntransform: AppliedPTransform to check for completion.\n\nReturns:\nTrue if the step will not produce additional output. If transform is None\nreturns true if all steps are done.", "source": "github-repos"}
{"code": "def queryString_required(strList):\n\t\n\tdef _dec(function):\n\t\t@wraps(function)\n\t\tdef _wrap(request, *args, **kwargs):\n\t\t\tfor i in strList:\n\t\t\t\tif i not in request.GET:\n\t\t\t\t\traise Http404(\"api does not exist\")\n\t\t\treturn function(request, *args, **kwargs)\n\t\treturn _wrap\n\treturn _dec", "docstring": "An decorator checking whether queryString key is valid or not\nArgs:\nstr: allowed queryString key\n\nReturns:\nif contains invalid queryString key, it will raise exception.", "source": "juraj-google-style"}
{"code": "def modutf7_decode(data: bytes) -> str:\n    \n    parts = []\n    is_usascii = True\n    buf = memoryview(data)\n    while buf:\n        byte = buf[0]\n        if is_usascii:\n            if buf[0:2] == b'&-':\n                parts.append('&')\n                buf = buf[2:]\n            elif byte == 0x26:\n                is_usascii = False\n                buf = buf[1:]\n            else:\n                parts.append(chr(byte))\n                buf = buf[1:]\n        else:\n            for i, byte in enumerate(buf):\n                if byte == 0x2d:\n                    to_decode = buf[:i].tobytes()\n                    decoded = _modified_b64decode(to_decode)\n                    parts.append(decoded)\n                    buf = buf[i + 1:]\n                    is_usascii = True\n                    break\n    if not is_usascii:\n        to_decode = buf.tobytes()\n        decoded = _modified_b64decode(to_decode)\n        parts.append(decoded)\n    return ''.join(parts)", "docstring": "Decode the bytestring using modified UTF-7.\n\nArgs:\ndata: The encoded bytestring to decode.", "source": "juraj-google-style"}
{"code": "def reset( self ):\n        \n        self.lattice.reset()\n        for atom in self.atoms.atoms:\n            atom.reset()", "docstring": "Reset all counters for this simulation.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def append(self, node):\n    \n    if not isinstance(node, grammar.STATEMENTS):\n      raise ValueError\n    self.to_append[-1].append(node)", "docstring": "Append a statement to the current statement.\n\nNote that multiple calls to append will result in the last statement to be\nappended to end up at the bottom.\n\nArgs:\nnode: The statement to append.\n\nRaises:\nValueError: If the given node is not a statement.", "source": "juraj-google-style"}
{"code": "def clone(self, opts):\n        \n        topt = self.opts.copy()\n        topt.update(opts)\n        return self.__class__(self.modl, self.name, self.info, topt)", "docstring": "Create a new instance of this type with the specified options.\n\nArgs:\nopts (dict): The type specific options for the new instance.", "source": "juraj-google-style"}
{"code": "def __add__(self, other):\n    ret = RichLine()\n    if isinstance(other, str):\n        ret.text = self.text + other\n        ret.font_attr_segs = self.font_attr_segs[:]\n        return ret\n    elif isinstance(other, RichLine):\n        ret.text = self.text + other.text\n        ret.font_attr_segs = self.font_attr_segs[:]\n        old_len = len(self.text)\n        for start, end, font_attr in other.font_attr_segs:\n            ret.font_attr_segs.append((old_len + start, old_len + end, font_attr))\n        return ret\n    else:\n        raise TypeError('%r cannot be concatenated with a RichLine' % other)", "docstring": "Concatenate two chunks of maybe rich text to make a longer rich line.\n\nDoes not modify self.\n\nArgs:\nother: Another piece of text to concatenate with this one.\nIf it is a plain str, it will be appended to this string with no\nattributes.  If it is a RichLine, it will be appended to this string\nwith its attributes preserved.\n\nReturns:\nA new RichLine comprising both chunks of text, with appropriate\nattributes applied to the corresponding substrings.", "source": "github-repos"}
{"code": "def _reciprocal_condition_number(lu_mat, one_norm):\n    if (_scipy_lapack is None):\n        raise OSError('This function requires SciPy for calling into LAPACK.')\n    (rcond, info) = _scipy_lapack.dgecon(lu_mat, one_norm)\n    if (info != 0):\n        raise RuntimeError('The reciprocal 1-norm condition number could not be computed.')\n    return rcond", "docstring": "r\"\"\"Compute reciprocal condition number of a matrix.\n\nArgs:\nlu_mat (numpy.ndarray): A 2D array of a matrix :math:`A` that has been\nLU-factored, with the non-diagonal part of :math:`L` stored in the\nstrictly lower triangle and :math:`U` stored in the upper triangle.\none_norm (float): The 1-norm of the original matrix :math:`A`.\n\nReturns:\nfloat: The reciprocal condition number of :math:`A`.\n\nRaises:\nOSError: If SciPy is not installed.\nRuntimeError: If the reciprocal 1-norm condition number could not\nbe computed.", "source": "codesearchnet"}
{"code": "def get(self, key, default='', stringify=True):\n    \n    obj = self.__getitem__(key)\n    if obj is None:\n      obj = default\n    elif stringify:\n      obj = str(obj)\n    return obj", "docstring": "Returns dictionary values or default.\n\nArgs:\nkey: string. Dictionary key to look up.\ndefault: string. Return this value if key not found.\nstringify: bool. Force all return values to string for compatibility\nreasons.\nReturns:\npython-wrapped CF object or default if not found.", "source": "juraj-google-style"}
{"code": "def _ParseCommon2003CachedEntry(self, value_data, cached_entry_offset):\n    \n    data_type_map = self._GetDataTypeMap(\n        'appcompatcache_cached_entry_2003_common')\n\n    try:\n      cached_entry = self._ReadStructureFromByteStream(\n          value_data[cached_entry_offset:], cached_entry_offset, data_type_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError(\n          'Unable to parse cached entry value with error: {0!s}'.format(\n              exception))\n\n    if cached_entry.path_size > cached_entry.maximum_path_size:\n      raise errors.ParseError('Path size value out of bounds.')\n\n    path_end_of_string_size = (\n        cached_entry.maximum_path_size - cached_entry.path_size)\n    if cached_entry.path_size == 0 or path_end_of_string_size != 2:\n      raise errors.ParseError('Unsupported path size values.')\n\n    return cached_entry", "docstring": "Parses the cached entry structure common for Windows 2003, Vista and 7.\n\nArgs:\nvalue_data (bytes): value data.\ncached_entry_offset (int): offset of the first cached entry data\nrelative to the start of the value data.\n\nReturns:\nappcompatcache_cached_entry_2003_common: cached entry structure common\nfor Windows 2003, Windows Vista and Windows 7.\n\nRaises:\nParseError: if the value data could not be parsed.", "source": "juraj-google-style"}
{"code": "def update(self, resource, id_or_uri=None, timeout=(- 1)):\n    uri = resource.pop('uri', None)\n    if (not uri):\n        if (not id_or_uri):\n            raise ValueError('URI was not provided')\n        uri = self._client.build_uri(id_or_uri)\n    return self._client.update(resource=resource, uri=uri, timeout=timeout)", "docstring": "Updates the specified alert resource.\n\nArgs:\nresource (dict): Object to update.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Updated alert.", "source": "codesearchnet"}
{"code": "def to(self, *args, **kwargs) -> 'BatchFeature':\n    requires_backends(self, ['torch'])\n    import torch\n    device = kwargs.get('device')\n    non_blocking = kwargs.get('non_blocking', False)\n    if device is None and len(args) > 0:\n        arg = args[0]\n        if is_torch_dtype(arg):\n            pass\n        elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int):\n            device = arg\n        else:\n            raise ValueError(f'Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.')\n\n    def maybe_to(v):\n        if isinstance(v, torch.Tensor) and torch.is_floating_point(v):\n            return v.to(*args, **kwargs)\n        elif isinstance(v, torch.Tensor) and device is not None:\n            return v.to(device=device, non_blocking=non_blocking)\n        else:\n            return v\n    self.data = {k: maybe_to(v) for k, v in self.items()}\n    return self", "docstring": "Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in\ndifferent `dtypes` and sending the `BatchFeature` to a different `device`.\n\nArgs:\nargs (`Tuple`):\nWill be passed to the `to(...)` function of the tensors.\nkwargs (`Dict`, *optional*):\nWill be passed to the `to(...)` function of the tensors.\nTo enable asynchronous data transfer, set the `non_blocking` flag in `kwargs` (defaults to `False`).\n\nReturns:\n[`BatchFeature`]: The same instance after modification.", "source": "github-repos"}
{"code": "def check_lines(first, second):\n    if (not ((first.__class__ is Linearization) and (second.__class__ is Linearization) and (first.error == 0.0) and (second.error == 0.0))):\n        return (False, None)\n    (s, t, success) = segment_intersection(first.start_node, first.end_node, second.start_node, second.end_node)\n    if success:\n        if (_helpers.in_interval(s, 0.0, 1.0) and _helpers.in_interval(t, 0.0, 1.0)):\n            intersections = np.asfortranarray([[s], [t]])\n            result = (intersections, False)\n        else:\n            result = (np.empty((2, 0), order='F'), False)\n    else:\n        (disjoint, params) = parallel_lines_parameters(first.start_node, first.end_node, second.start_node, second.end_node)\n        if disjoint:\n            result = (np.empty((2, 0), order='F'), False)\n        else:\n            result = (params, True)\n    return (True, result)", "docstring": "Checks if two curves are lines and tries to intersect them.\n\n.. note::\n\nThis is a helper for :func:`._all_intersections`.\n\nIf they are not lines / not linearized, immediately returns :data:`False`\nwith no \"return value\".\n\nIf they are lines, attempts to intersect them (even if they are parallel\nand share a coincident segment).\n\nArgs:\nfirst (Union[SubdividedCurve, Linearization]): First curve being\nintersected.\nsecond (Union[SubdividedCurve, Linearization]): Second curve being\nintersected.\n\nReturns:\nTuple[bool, Optional[Tuple[numpy.ndarray, bool]]]: A pair of\n\n* Flag indicating if both candidates in the pair are lines.\n* Optional \"result\" populated only if both candidates are lines.\nWhen this result is populated, it will be a pair of\n\n* array of parameters of intersection\n* flag indicating if the two candidates share a coincident segment", "source": "codesearchnet"}
{"code": "def add_authorization_policy(access_token, ck_id, oid):\n    \n    path = '/ContentKeys'\n    body = '{\"AuthorizationPolicyId\":\"' + oid + '\"}'\n    return helper_add(access_token, ck_id, path, body)", "docstring": "Add Media Service Authorization Policy.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nck_id (str): A Media Service Asset Content Key ID.\noptions_id (str): A Media Service OID.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def encoding_specs(self, spec):\n    raise NotImplementedError(f'{type(self).__name__}.encoding_specs')", "docstring": "Returns a nest of `TypeSpec`(s) describing the encoding for `spec`.\n\nArgs:\nspec: The TypeSpec whose encoding should be described.\n\nReturns:\nA nest (as defined by `tf.nest) of `tf.TypeSpec`, describing the values\nthat are returned by `self.encode(spec, ...)`.  All TypeSpecs in this\nnest must be batchable.", "source": "github-repos"}
{"code": "def get_task_info(self):\n    return (self.task_type, self.task_id)", "docstring": "Returns job name and task_id for the process which calls this.\n\nThis returns the job name and task index for the process which calls this\nfunction according to its rank and cluster specification. The job name and\ntask index are set after a cluster is constructed by cluster_spec otherwise\ndefaults to None.\n\nReturns:\nA string specifying job name the process belongs to and an integer\nspecifying the task index the process belongs to in that job.", "source": "github-repos"}
{"code": "def dump(self, conf_file=None):\n    if conf_file:\n        conf_dir = os.path.dirname(conf_file)\n        if (not conf_dir):\n            conf_dir = self.__invoke_dir\n        elif (not os.path.exists(conf_dir)):\n            os.makedirs(conf_dir)\n    else:\n        conf_dir = self.__conf_dir\n    final_conf = {}\n    for (key, value) in list(self.__config.items()):\n        if (key in self.__cli):\n            continue\n        final_conf[key] = value\n    for (key, value) in list(self.__cli.items()):\n        if (key.endswith('index') or (key in ['sitemap', 'output'])):\n            path = self.__abspath(value, from_conf=False)\n            if path:\n                relpath = os.path.relpath(path, conf_dir)\n                final_conf[key] = relpath\n        elif (key.endswith('sources') or key.endswith('source_filters')):\n            new_list = []\n            for path in value:\n                path = self.__abspath(path, from_conf=False)\n                if path:\n                    relpath = os.path.relpath(path, conf_dir)\n                    new_list.append(relpath)\n            final_conf[key] = new_list\n        elif (key not in ['command', 'output_conf_file']):\n            final_conf[key] = value\n    with open((conf_file or self.conf_file or 'hotdoc.json'), 'w') as _:\n        _.write(json.dumps(final_conf, sort_keys=True, indent=4))", "docstring": "Dump the possibly updated config to a file.\n\nArgs:\nconf_file: str, the destination, or None to overwrite the\nexisting configuration.", "source": "codesearchnet"}
{"code": "def content(self):\n    if (self._content is None):\n        self._content = self.parse_files()\n    return self._content", "docstring": "Return parsed data. Parse it if not already parsed.\n\nReturns:\nlist: list of dictionaries (one for each parsed line).", "source": "codesearchnet"}
{"code": "def get_lacp_mode(self, name):\n        \n        members = self.get_members(name)\n        if not members:\n            return DEFAULT_LACP_MODE\n\n        for member in self.get_members(name):\n            match = re.search(r'channel-group\\s\\d+\\smode\\s(?P<value>.+)',\n                              self.get_block('^interface %s' % member))\n            return match.group('value')", "docstring": "Returns the LACP mode for the specified Port-Channel interface\n\nArgs:\nname(str): The Port-Channel interface name to return the LACP\nmode for from the configuration\n\nReturns:\nThe configured LACP mode for the interface.  Valid mode values\nare 'on', 'passive', 'active'", "source": "juraj-google-style"}
{"code": "def send_messages(self, email_messages):\n        \n        if not email_messages:\n            return\n\n        sent_message_count = 0\n\n        for email_message in email_messages:\n            if self._send(email_message):\n                sent_message_count += 1\n        return sent_message_count", "docstring": "Sends one or more EmailMessage objects and returns the\nnumber of email messages sent.\n\nArgs:\nemail_messages: A list of Django EmailMessage objects.\nReturns:\nAn integer count of the messages sent.\nRaises:\nClientError: An interaction with the Amazon SES HTTP API\nfailed.", "source": "juraj-google-style"}
{"code": "def float_value_convert(dictin, dropfailedvalues=False):\n    return key_value_convert(dictin, valuefn=float, dropfailedvalues=dropfailedvalues)", "docstring": "Convert values of dictionary to floats\n\nArgs:\ndictin (DictUpperBound): Input dictionary\ndropfailedvalues (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False.\n\nReturns:\nDict: Dictionary with values converted to floats", "source": "codesearchnet"}
{"code": "def tokenize(self, string):\n    s = string\n    s = re.sub('\\t', ' ', s)\n    s = re.sub((('(' + regex_separator) + ')'), ' \\\\g<1> ', s)\n    s = re.sub('([^0-9]),', '\\\\g<1> , ', s)\n    s = re.sub(',([^0-9])', ' , \\\\g<1>', s)\n    s = re.sub(\"^(')\", '\\\\g<1> ', s)\n    s = re.sub((('(' + regex_not_letter_number) + \")'\"), \"\\\\g<1> '\", s)\n    s = re.sub((('(' + regex_clitics) + ')$'), ' \\\\g<1>', s)\n    s = re.sub((((('(' + regex_clitics) + ')(') + regex_not_letter_number) + ')'), ' \\\\g<1> \\\\g<2>', s)\n    words = s.strip().split()\n    p1 = re.compile((('.*' + regex_letter_number) + '\\\\.'))\n    p2 = re.compile('^([A-Za-z]\\\\.([A-Za-z]\\\\.)+|[A-Z][bcdfghj-nptvxz]+\\\\.)$')\n    token_list = []\n    for word in words:\n        m1 = p1.match(word)\n        m2 = p2.match(word)\n        if (m1 and (word not in abbreviations_list) and (not m2)):\n            token_list.append(word[0:word.find('.')])\n            token_list.append(word[word.find('.')])\n        else:\n            token_list.append(word)\n    return token_list", "docstring": "Used to parce a string into tokens\n\nThis function is to take in a string and return a list of tokens\n\nArgs:\nstring(str): This is a string of words or a sentance to be parsed into tokens\n\nReturns:\nlist: a list of tokens from the string passed in.\n\nNotes:\nDoesn't seem to parse contractions correctly for example don't\nwould parse as two tokens 'do' and \"n't\" and this seems to be not\nwhat we would want.  Maybe should be \"don't\" or maybe contractions\nshould be expanded into \"do not\" or \"do\",\"not\".  This could be\ndone with a contraction dictionary and some preprocessing.", "source": "codesearchnet"}
{"code": "def _actor_property(self, event, cameo_code, actor_regex):\n        \n        if cameo_code not in self.mapping:\n            return None\n\n        arguments = self.mapping[cameo_code][event + \"-arguments\"]\n        if not isinstance(arguments, list):\n            arguments = [arguments]\n\n        result = list()\n        for a in arguments:\n            match = re.search(actor_regex, a)\n            if match:\n                result.append(match.group(1))\n        return result[0] if len(result) > 0 else None", "docstring": "Determine the property to use for modeling an actor\nArgs:\nevent: one of \"event1\", \"event2\" or \"event3\"\ncameo_code: one of the cameo codes\nactor_regex: one of the regexes above\n\nReturns:", "source": "juraj-google-style"}
{"code": "def end_episode(self, agent_indices):\n    \n    with tf.name_scope('end_episode/'):\n      return tf.cond(\n          self._is_training,\n          lambda: self._define_end_episode(agent_indices), str)", "docstring": "Add episodes to the memory and perform update steps if memory is full.\n\nDuring training, add the collected episodes of the batch indices that\nfinished their episode to the memory. If the memory is full, train on it,\nand then clear the memory. A summary string is returned if requested at\nthis step.\n\nArgs:\nagent_indices: Tensor containing current batch indices.\n\nReturns:\nSummary tensor.", "source": "juraj-google-style"}
{"code": "def check_session_id_signature(session_id, secret_key=settings.secret_key_bytes(), signed=settings.sign_sessions()):\n    secret_key = _ensure_bytes(secret_key)\n    if signed:\n        pieces = session_id.split('-', 1)\n        if (len(pieces) != 2):\n            return False\n        base_id = pieces[0]\n        provided_signature = pieces[1]\n        expected_signature = _signature(base_id, secret_key)\n        return hmac.compare_digest(encode_utf8(expected_signature), encode_utf8(provided_signature))\n    else:\n        return True", "docstring": "Check the signature of a session ID, returning True if it's valid.\n\nThe server uses this function to check whether a session ID\nwas generated with the correct secret key. If signed sessions are disabled,\nthis function always returns True.\n\nArgs:\nsession_id (str) : The session ID to check\nsecret_key (str, optional) : Secret key (default: value of 'BOKEH_SECRET_KEY' env var)\nsigned (bool, optional) : Whether to check anything (default: value of\n'BOKEH_SIGN_SESSIONS' env var)", "source": "codesearchnet"}
{"code": "def _normalize_mlengine_job_id(job_id):\n    \n\n    \n    match = re.search(r'\\d|\\{{2}', job_id)\n    if match and match.start() == 0:\n        job = 'z_{}'.format(job_id)\n    else:\n        job = job_id\n\n    \n    tracker = 0\n    cleansed_job_id = ''\n    for m in re.finditer(r'\\{{2}.+?\\}{2}', job):\n        cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_',\n                                  job[tracker:m.start()])\n        cleansed_job_id += job[m.start():m.end()]\n        tracker = m.end()\n\n    \n    cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_', job[tracker:])\n\n    return cleansed_job_id", "docstring": "Replaces invalid MLEngine job_id characters with '_'.\n\nThis also adds a leading 'z' in case job_id starts with an invalid\ncharacter.\n\nArgs:\njob_id: A job_id str that may have invalid characters.\n\nReturns:\nA valid job_id representation.", "source": "juraj-google-style"}
{"code": "def process_resource(self, req, resp, resource, uri_kwargs=None):\n    if ('user' in req.context):\n        return\n    identifier = self.identify(req, resp, resource, uri_kwargs)\n    user = self.try_storage(identifier, req, resp, resource, uri_kwargs)\n    if (user is not None):\n        req.context['user'] = user\n    elif (self.challenge is not None):\n        req.context.setdefault('challenges', list()).append(self.challenge)", "docstring": "Process resource after routing to it.\n\nThis is basic falcon middleware handler.\n\nArgs:\nreq (falcon.Request): request object\nresp (falcon.Response): response object\nresource (object): resource object matched by falcon router\nuri_kwargs (dict): additional keyword argument from uri template.\nFor ``falcon<1.0.0`` this is always ``None``", "source": "codesearchnet"}
{"code": "def __init__(self, save_steps=None, save_secs=None, output_dir=None, summary_writer=None, scaffold=None, summary_op=None):\n    if scaffold is None and summary_op is None or (scaffold is not None and summary_op is not None):\n        raise ValueError('Exactly one of scaffold or summary_op must be provided.')\n    self._summary_op = summary_op\n    self._summary_writer = summary_writer\n    self._output_dir = output_dir\n    self._scaffold = scaffold\n    self._timer = SecondOrStepTimer(every_secs=save_secs, every_steps=save_steps)", "docstring": "Initializes a `SummarySaverHook`.\n\nArgs:\nsave_steps: `int`, save summaries every N steps. Exactly one of\n`save_secs` and `save_steps` should be set.\nsave_secs: `int`, save summaries every N seconds.\noutput_dir: `string`, the directory to save the summaries to. Only used if\nno `summary_writer` is supplied.\nsummary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,\none will be created accordingly.\nscaffold: `Scaffold` to get summary_op if it's not provided.\nsummary_op: `Tensor` of type `string` containing the serialized `Summary`\nprotocol buffer or a list of `Tensor`. They are most likely an output by\nTF summary methods like `tf.compat.v1.summary.scalar` or\n`tf.compat.v1.summary.merge_all`. It can be passed in as one tensor; if\nmore than one, they must be passed in as a list.\n\nRaises:\nValueError: Exactly one of scaffold or summary_op should be set.", "source": "github-repos"}
{"code": "def _compute_sequence_length_from_mask(mask, batch_first):\n    timestep_index = 0 if not batch_first else 1\n    return torch.sum(mask.int(), dim=timestep_index)", "docstring": "Calculate the sequence length tensor (1-D) based on the masking tensor.\n\nThe masking tensor is a 2D boolean tensor with shape [batch, timestep]. For\nany timestep that should be masked, the corresponding field will be False.\nConsider the following example:\na = [[True, True, False, False]\n[True, True, True, False]]\nIt is a (2, 4) tensor, and the corresponding sequence length result should\nbe 1D tensor with value [2, 3]. Note that the masking tensor must be right\npadded that could be checked by, e.g., `is_sequence_right_padded()`.\n\nArgs:\nmask: Boolean tensor with shape [batch, timestep] or [timestep, batch]\nif time_major=True.\ntime_major: Boolean, which indicates whether the mask is time major or\nbatch major.\n\nReturns:\nsequence_length: 1D int32 tensor.", "source": "github-repos"}
{"code": "class UnbatchPandas(beam.PTransform):\n\n    def __init__(self, proxy, include_indexes=False):\n        self._proxy = proxy\n        self._include_indexes = include_indexes\n\n    def expand(self, pcoll):\n        return pcoll | _unbatch_transform(self._proxy, self._include_indexes)", "docstring": "A transform that explodes a PCollection of DataFrame or Series. DataFrame\nis converterd to a schema-aware PCollection, while Series is converted to its\nunderlying type.\n\nArgs:\ninclude_indexes: (optional, default: False) When unbatching a DataFrame\nif include_indexes=True, attempt to include index columns in the output\nschema for expanded DataFrames. Raises an error if any of the index\nlevels are unnamed (name=None), or if any of the names are not unique\namong all column and index names.", "source": "github-repos"}
{"code": "def flux_down(self, fluxDownTop, emission=None):\n    if (emission is None):\n        emission = np.zeros_like(self.absorptivity)\n    E = np.concatenate((np.atleast_1d(fluxDownTop), emission), axis=(- 1))\n    return np.squeeze(matrix_multiply(self.Tdown, E[(..., np.newaxis)]))", "docstring": "Compute upwelling radiative flux at interfaces between layers.\n\nInputs:\n\n* fluxUpBottom: flux up from bottom\n* emission: emission from atmospheric levels (N)\ndefaults to zero if not given\n\nReturns:\n* vector of upwelling radiative flux between levels (N+1)\nelement N is the flux up to space.", "source": "codesearchnet"}
{"code": "def add_polyhedron(self, neighbors, center, color, opacity=1.0, draw_edges=False, edges_color=[0.0, 0.0, 0.0], edges_linewidth=2):\n    points = vtk.vtkPoints()\n    conv = vtk.vtkConvexPointSet()\n    for i in range(len(neighbors)):\n        (x, y, z) = neighbors[i].coords\n        points.InsertPoint(i, x, y, z)\n        conv.GetPointIds().InsertId(i, i)\n    grid = vtk.vtkUnstructuredGrid()\n    grid.Allocate(1, 1)\n    grid.InsertNextCell(conv.GetCellType(), conv.GetPointIds())\n    grid.SetPoints(points)\n    dsm = vtk.vtkDataSetMapper()\n    polysites = [center]\n    polysites.extend(neighbors)\n    self.mapper_map[dsm] = polysites\n    if (vtk.VTK_MAJOR_VERSION <= 5):\n        dsm.SetInputConnection(grid.GetProducerPort())\n    else:\n        dsm.SetInputData(grid)\n    ac = vtk.vtkActor()\n    ac.SetMapper(dsm)\n    ac.GetProperty().SetOpacity(opacity)\n    if (color == 'element'):\n        myoccu = 0.0\n        for (specie, occu) in center.species.items():\n            if (occu > myoccu):\n                myspecie = specie\n                myoccu = occu\n        color = [(i / 255) for i in self.el_color_mapping[myspecie.symbol]]\n        ac.GetProperty().SetColor(color)\n    else:\n        ac.GetProperty().SetColor(color)\n    if draw_edges:\n        ac.GetProperty().SetEdgeColor(edges_color)\n        ac.GetProperty().SetLineWidth(edges_linewidth)\n        ac.GetProperty().EdgeVisibilityOn()\n    self.ren.AddActor(ac)", "docstring": "Adds a polyhedron.\n\nArgs:\nneighbors: Neighbors of the polyhedron (the vertices).\ncenter: The atom in the center of the polyhedron.\ncolor: Color for text as RGB.\nopacity: Opacity of the polyhedron\ndraw_edges: If set to True, the a line will be drawn at each edge\nedges_color: Color of the line for the edges\nedges_linewidth: Width of the line drawn for the edges", "source": "codesearchnet"}
{"code": "def _FormatInAddrExToken(self, token_data):\n    protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.net_type, 'UNKNOWN')\n    if (token_data.net_type == 4):\n        ip_address = self._FormatPackedIPv6Address(token_data.ip_address[:4])\n    elif (token_data.net_type == 16):\n        ip_address = self._FormatPackedIPv6Address(token_data.ip_address)\n    return {'protocols': protocol, 'net_type': token_data.net_type, 'address': ip_address}", "docstring": "Formats an extended IPv4 address token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_in_addr_ex): AUT_IN_ADDR_EX token data.\n\nReturns:\ndict[str, str]: token values.", "source": "codesearchnet"}
{"code": "def iter_compress(item_iter, flag_iter):\n    \n    \n    true_items = (item for (item, flag) in zip(item_iter, flag_iter) if flag)\n    return true_items", "docstring": "iter_compress - like numpy compress\n\nArgs:\nitem_iter (list):\nflag_iter (list): of bools\n\nReturns:\nlist: true_items\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_iter import *  # NOQA\n>>> item_iter = [1, 2, 3, 4, 5]\n>>> flag_iter = [False, True, True, False, True]\n>>> true_items = iter_compress(item_iter, flag_iter)\n>>> result = list(true_items)\n>>> print(result)\n[2, 3, 5]", "source": "juraj-google-style"}
{"code": "def bind(port, socket_type, socket_proto):\n    got_socket = False\n    for family in (socket.AF_INET6, socket.AF_INET):\n        try:\n            sock = socket.socket(family, socket_type, socket_proto)\n            got_socket = True\n        except socket.error:\n            continue\n        try:\n            sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n            sock.bind(('', port))\n            if (socket_type == socket.SOCK_STREAM):\n                sock.listen(1)\n            port = sock.getsockname()[1]\n        except socket.error:\n            return None\n        finally:\n            sock.close()\n    return (port if got_socket else None)", "docstring": "Try to bind to a socket of the specified type, protocol, and port.\n\nThis is primarily a helper function for PickUnusedPort, used to see\nif a particular port number is available.\n\nFor the port to be considered available, the kernel must support at least\none of (IPv6, IPv4), and the port must be available on each supported\nfamily.\n\nArgs:\nport: The port number to bind to, or 0 to have the OS pick a free port.\nsocket_type: The type of the socket (ex: socket.SOCK_STREAM).\nsocket_proto: The protocol of the socket (ex: socket.IPPROTO_TCP).\n\nReturns:\nThe port number on success or None on failure.", "source": "codesearchnet"}
{"code": "def modify_model_interface(input_file, output_file, input_type, output_type):\n    input_type_int = _parse_type_to_int(input_type, 'input_type')\n    output_type_int = _parse_type_to_int(output_type, 'output_type')\n    status = _pywrap_modify_model_interface.modify_model_interface(input_file, output_file, input_type_int, output_type_int)\n    if status != 0:\n        raise RuntimeError('Error occurred when trying to modify the model input type from float to {input_type} and output type from float to {output_type}.'.format(input_type=input_type, output_type=output_type))", "docstring": "Modify a quantized model's interface (input/output) from float to integer.\n\nArgs:\ninput_file: Full path name to the input tflite file.\noutput_file: Full path name to the output tflite file.\ninput_type: Final input interface type.\noutput_type: Final output interface type.\n\nRaises:\nRuntimeError: If the modification of the model interface was unsuccessful.\nValueError: If the input_type or output_type is unsupported.", "source": "github-repos"}
{"code": "def select_and_insert(self, name, data):\n        \n        self.select_obj(name)\n        self.insert_into_obj(data)", "docstring": "Combines selection and data insertion into one function\n\nArgs:\nname: the name of the object you want to insert into\ndata: the data you want to insert\nReturns:\nNone\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def van_dec_2d(x, skip_connections, output_shape, first_depth, hparams=None):\n    with tf.variable_scope('van_dec'):\n        dec = tf.layers.conv2d_transpose(x, (first_depth * 4), 3, padding='same', activation=tf.nn.relu, strides=2)\n        dec = tf.nn.dropout(dec, hparams.van_keep_prob)\n        dec = tf.contrib.layers.layer_norm(dec)\n        dec = tf.layers.conv2d_transpose(dec, (first_depth * 4), 3, padding='same', activation=tf.nn.relu, strides=1)\n        dec = tf.nn.dropout(dec, hparams.van_keep_prob)\n        dec = tf.layers.conv2d_transpose(dec, (first_depth * 2), 3, padding='same', activation=tf.nn.relu, strides=1)\n        dec = tf.nn.dropout(dec, hparams.van_keep_prob)\n        dec = tf.contrib.layers.layer_norm(dec)\n        dec = tf.layers.conv2d_transpose(dec, (first_depth * 2), 3, padding='same', activation=tf.nn.relu, strides=2)\n        dec = tf.nn.dropout(dec, hparams.van_keep_prob)\n        dec = tf.layers.conv2d_transpose(dec, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)\n        dec = tf.nn.dropout(dec, hparams.van_keep_prob)\n        dec = tf.contrib.layers.layer_norm(dec)\n        dec = tf.layers.conv2d_transpose(dec, (output_shape[3] + 1), 3, padding='same', activation=tf.nn.relu, strides=2)\n        dec = tf.nn.dropout(dec, hparams.van_keep_prob)\n        out_mask = tf.layers.conv2d_transpose(dec, (output_shape[3] + 1), 3, strides=1, padding='same', activation=None)\n        mask = tf.nn.sigmoid(out_mask[(:, :, :, 3:4)])\n        out = out_mask[(:, :, :, :3)]\n        return ((out * mask) + (skip_connections[0] * (1 - mask)))", "docstring": "The VAN decoder.\n\nArgs:\nx: The analogy information to decode.\nskip_connections: The encoder layers which can be used as skip connections.\noutput_shape: The shape of the desired output image.\nfirst_depth: The depth of the first layer of the van image encoder.\nhparams: The python hparams.\n\nReturns:\nThe decoded image prediction.", "source": "codesearchnet"}
{"code": "def merge(self, merge_commit_message=None, should_remove_source_branch=False, merge_when_pipeline_succeeds=False, **kwargs):\n    path = ('%s/%s/merge' % (self.manager.path, self.get_id()))\n    data = {}\n    if merge_commit_message:\n        data['merge_commit_message'] = merge_commit_message\n    if should_remove_source_branch:\n        data['should_remove_source_branch'] = True\n    if merge_when_pipeline_succeeds:\n        data['merge_when_pipeline_succeeds'] = True\n    server_data = self.manager.gitlab.http_put(path, post_data=data, **kwargs)\n    self._update_attrs(server_data)", "docstring": "Accept the merge request.\n\nArgs:\nmerge_commit_message (bool): Commit message\nshould_remove_source_branch (bool): If True, removes the source\nbranch\nmerge_when_pipeline_succeeds (bool): Wait for the build to succeed,\nthen merge\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabMRClosedError: If the merge failed", "source": "codesearchnet"}
{"code": "def LVMPathSpecGetVolumeIndex(path_spec):\n  \n  volume_index = getattr(path_spec, 'volume_index', None)\n\n  if volume_index is None:\n    location = getattr(path_spec, 'location', None)\n\n    if location is None or not location.startswith('/lvm'):\n      return None\n\n    volume_index = None\n    try:\n      volume_index = int(location[4:], 10) - 1\n    except ValueError:\n      pass\n\n    if volume_index is None or volume_index < 0:\n      return None\n\n  return volume_index", "docstring": "Retrieves the volume index from the path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nint: volume index or None if not available.", "source": "juraj-google-style"}
{"code": "def __init__(self, connection):\n        \n        self.connection = connection\n        response = connection.response\n\n        super(BambouHTTPError, self).__init__(\"[HTTP %s(%s)] %s\" % (response.status_code, response.reason, response.errors))", "docstring": "Intializes a BambouHTTPError\n\nArgs:\nconnection: the Connection object", "source": "juraj-google-style"}
{"code": "def access_vlan(self, inter_type, inter, vlan_id):\n        \n        config = ET.Element('config')\n        interface = ET.SubElement(config, 'interface',\n                                  xmlns=(\"urn:brocade.com:mgmt:\"\n                                         \"brocade-interface\"))\n        int_type = ET.SubElement(interface, inter_type)\n        name = ET.SubElement(int_type, 'name')\n        name.text = inter\n        switchport = ET.SubElement(int_type, 'switchport')\n        access = ET.SubElement(switchport, 'access')\n        accessvlan = ET.SubElement(access, 'accessvlan')\n        accessvlan.text = vlan_id\n        try:\n            self._callback(config)\n            return True\n        \n        except Exception as error:\n            logging.error(error)\n            return False", "docstring": "Add a L2 Interface to a specific VLAN.\n\nArgs:\ninter_type: The type of interface you want to configure. Ex.\ntengigabitethernet, gigabitethernet, fortygigabitethernet.\ninter: The ID for the interface you want to configure. Ex. 1/0/1\nvlan_id: ID for the VLAN interface being modified. Value of 2-4096.\n\nReturns:\nTrue if command completes successfully or False if not.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def read_log(self, logfile):\n        \n\n        \n        logfile.seek(0)\n\n        \n        field_names, _ = self._parse_bro_header(logfile)\n\n        \n        \n        \n        \n        while 1:\n            _line = next(logfile).strip()\n            if not _line.startswith('\n                yield self._cast_dict(dict(zip(field_names, _line.split(self.delimiter))))\n            else:\n                time.sleep(.1) \n                break", "docstring": "The read_log method returns a memory efficient generator for rows in a Bro log.\n\nUsage:\nrows = my_bro_reader.read_log(logfile)\nfor row in rows:\ndo something with row\n\nArgs:\nlogfile: The Bro Log file.", "source": "juraj-google-style"}
{"code": "def export(self, filepath, encoding='utf-8', gzipped=True):\n    data = json.dumps(self.word_frequency.dictionary, sort_keys=True)\n    write_file(filepath, encoding, gzipped, data)", "docstring": "Export the word frequency list for import in the future\n\nArgs:\nfilepath (str): The filepath to the exported dictionary\nencoding (str): The encoding of the resulting output\ngzipped (bool): Whether to gzip the dictionary or not", "source": "codesearchnet"}
{"code": "def _wrap_decorator(wrapped_function, decorator_name):\n\n    def wrapper(wrapper_func):\n        return tf_decorator.make_decorator(wrapped_function, wrapper_func, decorator_name)\n    return wrapper", "docstring": "Indicate that one function wraps another.\n\nThis decorator wraps a function using `tf_decorator.make_decorator`\nso that doc generation scripts can pick up original function\nsignature.\nIt would be better to use @functools.wrap decorator, but it would\nnot update function signature to match wrapped function in Python 2.\n\nArgs:\nwrapped_function: The function that decorated function wraps.\ndecorator_name: The name of the decorator.\n\nReturns:\nFunction that accepts wrapper function as an argument and returns\n`TFDecorator` instance.", "source": "github-repos"}
{"code": "def method_schema(self, method: str, iterate: bool=False) -> dict:\n    endpoint, method = method.rsplit('.', 1)\n    resource = self.api_document\n    for e in endpoint.split('.'):\n        resource = resource['resources'][e]\n    resource = resource['methods'][method]['response']['$ref']\n    properties = self.api_document['schemas'][resource]['properties']\n    schema = self.to_schema(properties)\n    if iterate or ('List' in resource and resource.endswith('Response')):\n        for entry in schema:\n            if entry['type'] == 'RECORD':\n                return entry['fields']\n            elif entry['mode'] == 'REPEATED':\n                entry['mode'] = 'NULLABLE'\n                return [entry]\n        raise 'Unahandled discovery schema.'\n    else:\n        return schema", "docstring": "Return BigQuery schema for a Discovery API function.\n\nUse the full dot notation of the rest API function.\n\nArgs:\nmethod: the dot notation name of the Google API function\niterate: if true, return only iterable schema\n\nReturns:\nA dictionary representation of the resource.", "source": "github-repos"}
{"code": "def from_rfc3339(value):\n    \n    return datetime.datetime.strptime(value, _RFC3339_MICROS).replace(tzinfo=pytz.utc)", "docstring": "Convert a microsecond-precision timestamp to datetime.\n\nArgs:\nvalue (str): The RFC3339 string to convert.\n\nReturns:\ndatetime.datetime: The datetime object equivalent to the timestamp in\nUTC.", "source": "juraj-google-style"}
{"code": "def get_numeric_features_to_observed_range(examples):\n  \n  observed_features = collections.defaultdict(list)  \n  for example in examples:\n    for feature_name in get_numeric_feature_names(example):\n      original_feature = parse_original_feature_from_example(\n          example, feature_name)\n      observed_features[feature_name].extend(original_feature.original_value)\n  return {\n      feature_name: {\n          'observedMin': min(feature_values),\n          'observedMax': max(feature_values),\n      }\n      for feature_name, feature_values in iteritems(observed_features)\n  }", "docstring": "Returns numerical features and their observed ranges.\n\nArgs:\nexamples: Examples to read to get ranges.\n\nReturns:\nA dict mapping feature_name -> {'observedMin': 'observedMax': } dicts,\nwith a key for each numerical feature.", "source": "juraj-google-style"}
{"code": "def FlagCxx14Features(filename, clean_lines, linenum, error):\n    line = clean_lines.elided[linenum]\n    include = Match('\\\\s*\n    if (include and (include.group(1) in ('scoped_allocator', 'shared_mutex'))):\n        error(filename, linenum, 'build/c++14', 5, ('<%s> is an unapproved C++14 header.' % include.group(1)))", "docstring": "Flag those C++14 features that we restrict.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def _get_permutation(tensor, n_dims, active_dim):\n    if not tensor.shape:\n        raise ValueError(\"Tensor's rank should be static\")\n    rank = len(tensor.shape)\n    batch_rank = rank - n_dims\n    if active_dim == n_dims - 1:\n        return None\n    perm = np.arange(rank)\n    perm[rank - 1] = batch_rank + active_dim\n    perm[batch_rank + active_dim] = rank - 1\n    return perm", "docstring": "Returns the permutation that swaps the active and the last dimensions.\n\nArgs:\ntensor: `Tensor` having a statically known rank.\nn_dims: Number of spatial dimensions.\nactive_dim: The active spatial dimension.\n\nReturns:\nA list representing the permutation, or `None` if no permutation needed.\n\nFor example, with 'tensor` having rank 5, `n_dims = 3` and `active_dim = 1`\nyields [0, 1, 2, 4, 3]. Explanation: we start with [0, 1, 2, 3, 4], where the\nlast n_dims=3 dimensions are spatial dimensions, and the first two are batch\ndimensions. Among the spatial dimensions, we take the one at index 1, which\nis \"3\", and swap it with the last dimension \"4\".", "source": "github-repos"}
{"code": "def get_auditwheel_output(wheel_path: str) -> None:\n    stringio = io.StringIO()\n    previous_stdout = sys.stdout\n    sys.stdout = stringio\n    auditwheel_parser = argparse.ArgumentParser(description='Cross-distro Python wheels.')\n    sub_parsers = auditwheel_parser.add_subparsers(metavar='command', dest='cmd')\n    main_show.configure_parser(sub_parsers)\n    auditwheel_args = argparse.Namespace(WHEEL_FILE=wheel_path, verbose=1)\n    main_show.execute(args=auditwheel_args, p=auditwheel_parser)\n    sys.stdout = previous_stdout\n    return stringio.getvalue()", "docstring": "Run \"auditwheel show\" on the wheel and return the output.\n\nArgs:\nwheel_path: path of the wheel file\n\nReturns:\n\"auditwheel show\" output", "source": "github-repos"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(Authentication, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = utils.BytearrayStream(input_stream.read(self.length))\n    credentials = []\n    while self.is_tag_next(enums.Tags.CREDENTIAL, local_stream):\n        credential = objects.Credential()\n        credential.read(local_stream, kmip_version=kmip_version)\n        credentials.append(credential)\n    if (len(credentials) == 0):\n        raise ValueError('Authentication encoding missing credentials.')\n    self._credentials = credentials\n    self.is_oversized(local_stream)", "docstring": "Read the data encoding the Authentication struct and decode it into\nits constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def powerset(iterable, nonempty=False, reverse=False):\n    iterable = list(iterable)\n    if nonempty:\n        start = 1\n    else:\n        start = 0\n    seq_sizes = range(start, (len(iterable) + 1))\n    if reverse:\n        seq_sizes = reversed(seq_sizes)\n        iterable.reverse()\n    return chain.from_iterable((combinations(iterable, r) for r in seq_sizes))", "docstring": "Generate the power set of an iterable.\n\nArgs:\niterable (Iterable): The iterable from which to generate the power set.\n\nKeyword Args:\nnonempty (boolean): If True, don't include the empty set.\nreverse (boolean): If True, reverse the order of the powerset.\n\nReturns:\nIterable: An iterator over the power set.\n\nExample:\n>>> ps = powerset(np.arange(2))\n>>> list(ps)\n[(), (0,), (1,), (0, 1)]\n>>> ps = powerset(np.arange(2), nonempty=True)\n>>> list(ps)\n[(0,), (1,), (0, 1)]\n>>> ps = powerset(np.arange(2), nonempty=True, reverse=True)\n>>> list(ps)\n[(1, 0), (1,), (0,)]", "source": "codesearchnet"}
{"code": "def get_tri_area(pts):\n    \n    a, b, c = pts[0], pts[1], pts[2]\n    v1 = np.array(b) - np.array(a)\n    v2 = np.array(c) - np.array(a)\n    area_tri = abs(sp.linalg.norm(sp.cross(v1, v2)) / 2)\n    return area_tri", "docstring": "Given a list of coords for 3 points,\nCompute the area of this triangle.\n\nArgs:\npts: [a, b, c] three points", "source": "juraj-google-style"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    for subkey in registry_key.GetSubkeys():\n      values_dict = {}\n      values_dict['subkey_name'] = subkey.name\n\n      name_values = subkey.name.split('&')\n      number_of_name_values = len(name_values)\n\n      \n      if number_of_name_values != 4:\n        logger.warning(\n            'Expected 4 &-separated values in: {0:s}'.format(subkey.name))\n\n      if number_of_name_values >= 1:\n        values_dict['device_type'] = name_values[0]\n      if number_of_name_values >= 2:\n        values_dict['vendor'] = name_values[1]\n      if number_of_name_values >= 3:\n        values_dict['product'] = name_values[2]\n      if number_of_name_values >= 4:\n        values_dict['revision'] = name_values[3]\n\n      event_data = windows_events.WindowsRegistryEventData()\n      event_data.key_path = registry_key.path\n      event_data.offset = registry_key.offset\n      event_data.regvalue = values_dict\n      event_data.source_append = self._SOURCE_APPEND\n\n      if subkey.number_of_subkeys == 0:\n        \n        event = time_events.DateTimeValuesEvent(\n            subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n        continue\n\n      for device_key in subkey.GetSubkeys():\n        values_dict['serial'] = device_key.name\n\n        friendly_name_value = device_key.GetValueByName('FriendlyName')\n        if friendly_name_value:\n          values_dict['friendly_name'] = friendly_name_value.GetDataAsObject()\n        else:\n          values_dict.pop('friendly_name', None)\n\n        \n        parent_id_prefix_value = device_key.GetValueByName('ParentIdPrefix')\n        if parent_id_prefix_value:\n          values_dict['parent_id_prefix'] = (\n              parent_id_prefix_value.GetDataAsObject())\n        else:\n          values_dict.pop('parent_id_prefix', None)\n\n        \n        event = time_events.DateTimeValuesEvent(\n            subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n        \n        \n        event = time_events.DateTimeValuesEvent(\n            device_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n        device_parameter_key = device_key.GetSubkeyByName('Device Parameters')\n        if device_parameter_key:\n          event = time_events.DateTimeValuesEvent(\n              device_parameter_key.last_written_time,\n              definitions.TIME_DESCRIPTION_WRITTEN)\n          parser_mediator.ProduceEventWithEventData(event, event_data)\n\n        log_configuration_key = device_key.GetSubkeyByName('LogConf')\n        if log_configuration_key:\n          event = time_events.DateTimeValuesEvent(\n              log_configuration_key.last_written_time,\n              definitions.TIME_DESCRIPTION_WRITTEN)\n          parser_mediator.ProduceEventWithEventData(event, event_data)\n\n        properties_key = device_key.GetSubkeyByName('Properties')\n        if properties_key:\n          event = time_events.DateTimeValuesEvent(\n              properties_key.last_written_time,\n              definitions.TIME_DESCRIPTION_WRITTEN)\n          parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def CompressedHistograms(self, run, tag):\n    \n    accumulator = self.GetAccumulator(run)\n    return accumulator.CompressedHistograms(tag)", "docstring": "Retrieve the compressed histogram events associated with a run and tag.\n\nArgs:\nrun: A string name of the run for which values are retrieved.\ntag: A string name of the tag for which values are retrieved.\n\nRaises:\nKeyError: If the run is not found, or the tag is not available for\nthe given run.\n\nReturns:\nAn array of `event_accumulator.CompressedHistogramEvents`.", "source": "juraj-google-style"}
{"code": "def get_stored_version(connection):\n    if (connection.engine.name == 'sqlite'):\n        version = connection.execute('PRAGMA user_version').fetchone()[0]\n        if (version == 0):\n            raise VersionIsNotStored\n        return version\n    elif (connection.engine.name == 'postgresql'):\n        try:\n            r = connection.execute('SELECT version FROM {}.user_version;'.format(POSTGRES_SCHEMA_NAME)).fetchone()\n            if (not r):\n                raise VersionIsNotStored\n            version = r[0]\n        except ProgrammingError:\n            raise VersionIsNotStored\n        return version\n    else:\n        raise DatabaseError('Do not know how to get version from {} engine.'.format(connection.engine.name))", "docstring": "Returns database version.\n\nArgs:\nconnection (sqlalchemy connection):\n\nRaises: Assuming user_version pragma (sqlite case) and user_version table (postgresql case)\nexist because they created with the database creation.\n\nReturns:\nint: version of the database.", "source": "codesearchnet"}
{"code": "def assert_corofunction(**kw):\n    for (name, value) in kw.items():\n        if (not asyncio.iscoroutinefunction(value)):\n            raise TypeError('paco: {} must be a coroutine function'.format(name))", "docstring": "Asserts if a given values are a coroutine function.\n\nArguments:\n**kw (mixed): value to check if it is an iterable.\n\nRaises:\nTypeError: if assertion fails.", "source": "codesearchnet"}
{"code": "def operate_magmom(self, magmom):\n        \n\n        magmom = Magmom(magmom)  \n\n        transformed_moment = self.apply_rotation_only(magmom.global_moment) * \\\n            np.linalg.det(self.rotation_matrix) * self.time_reversal\n\n        \n        return Magmom.from_global_moment_and_saxis(transformed_moment, magmom.saxis)", "docstring": "Apply time reversal operator on the magnetic moment. Note that\nmagnetic moments transform as axial vectors, not polar vectors.\n\nSee 'Symmetry and magnetic structures', Rodríguez-Carvajal and\nBourée for a good discussion. DOI: 10.1051/epjconf/20122200010\n\nArgs:\nmagmom: Magnetic moment as electronic_structure.core.Magmom\nclass or as list or np array-like\n\nReturns:\nMagnetic moment after operator applied as Magmom class", "source": "juraj-google-style"}
{"code": "def gui(discord_token, discord_client_id):\n    logger.info('Starting Modis in GUI')\n    import tkinter as tk\n    logger.debug('Loading packages')\n    from modis.discord_modis import gui as discord_modis_gui\n    from modis.reddit_modis import gui as reddit_modis_gui\n    from modis.facebook_modis import gui as facebook_modis_gui\n    logger.debug('Initialising window')\n    root = tk.Tk()\n    root.minsize(width=800, height=400)\n    root.geometry('800x600')\n    root.title('Modis Control Panel')\n    root.iconbitmap('{}/assets/modis.ico'.format(file_dir))\n    'notebook = ttk.Notebook(root)\\n    notebook.grid(column=0, row=0, padx=0, pady=0, sticky=\"W E N S\")\\n\\n    \n    discord = discord_modis_gui.Frame(root, discord_token, discord_client_id)\n    discord.grid(column=0, row=0, padx=0, pady=0, sticky='W E N S')\n    root.columnconfigure(0, weight=1)\n    root.rowconfigure(0, weight=1)\n    discord.columnconfigure(0, weight=1)\n    discord.rowconfigure(0, weight=1)\n    logger.debug('GUI initialised')\n    root.mainloop()", "docstring": "Start Modis in gui format.\n\nArgs:\ndiscord_token (str): The bot token for your Discord application\ndiscord_client_id: The bot's client ID", "source": "codesearchnet"}
{"code": "def depth(self, local: bool = True) -> int:\n        \n        G = self.graph\n        if not local:\n            def remove_local(dagc: DAGCircuit) \\\n                    -> Generator[Operation, None, None]:\n                for elem in dagc:\n                    if dagc.graph.degree[elem] > 2:\n                        yield elem\n            G = DAGCircuit(remove_local(self)).graph\n\n        return nx.dag_longest_path_length(G) - 1", "docstring": "Return the circuit depth.\n\nArgs:\nlocal:  If True include local one-qubit gates in depth\ncalculation. Else return the multi-qubit gate depth.", "source": "juraj-google-style"}
{"code": "def get_template_path(filename):\n    if os.path.isfile(filename):\n        return os.path.abspath(filename)\n    for i in sys.path:\n        if os.path.isfile(os.path.join(i, filename)):\n            return os.path.abspath(os.path.join(i, filename))\n    return None", "docstring": "Find raw template in working directory or in sys.path.\n\ntemplate_path from config may refer to templates colocated with the Stacker\nconfig, or files in remote package_sources. Here, we emulate python module\nloading to find the path to the template.\n\nArgs:\nfilename (str): Template filename.\n\nReturns:\nOptional[str]: Path to file, or None if no file found", "source": "codesearchnet"}
{"code": "def report_to_rows(report):\n    if type(report) is GeneratorType:\n        leftovers = ''\n        for chunk in report:\n            data, extra = chunk.rsplit('\\n', 1)\n            for row in csv_to_rows(leftovers + data):\n                yield row\n            leftovers = extra\n    else:\n        for row in csv_to_rows(report):\n            yield row", "docstring": "Helper to convert DBM files into iterator of rows, memory efficient.\n\nUsage example:\n\n```\nfilename, report = report_file(...)\nrows = report_to_rows(report)\n```\n\nArgs:\n* report: (iterator or file) Either an iterator or file that will be\nconverted to rows.\n\nReturns:\n* Iterator of lists representing each row.", "source": "github-repos"}
{"code": "def addFixedEffect(self,F=None,A=None):\n        \n        if A==None:\n            A = SP.eye(self.P)\n        if F==None:\n            F = SP.ones((self.N,1))\n        \n        assert A.shape[1]==self.P, 'Incompatible shape'\n        assert F.shape[0]==self.N, 'Incompatible shape'\n       \n        if F.shape[1]>1:\n            for m in range(F.shape[1]):\n                self.vd.addFixedEffTerm(A,F[:,m:m+1])\n        else:\n            self.vd.addFixedEffTerm(A,F)\n\n        \n        self.gp      = None\n        self.init    = False\n        self.fast    = False\n        self.optimum = None\n\n        self.cache['Sigma']   = None\n        self.cache['Hessian'] = None\n        self.cache['Lparams'] = None\n        self.cache['paramsST']= None", "docstring": "add fixed effect to the model\n\nArgs:\nF: fixed effect matrix [N,1]\nA: design matrix [K,P] (e.g. SP.ones((1,P)) common effect; SP.eye(P) any effect)", "source": "juraj-google-style"}
{"code": "async def start_server_in_loop(runner, hostname, port, agent):\n    \n    await runner.setup()\n    agent.web.server = aioweb.TCPSite(runner, hostname, port)\n    await agent.web.server.start()\n    logger.info(f\"Serving on http:", "docstring": "Listens to http requests and sends them to the webapp.\n\nArgs:\nrunner: AppRunner to process the http requests\nhostname: host name to listen from.\nport: port to listen from.\nagent: agent that owns the web app.", "source": "juraj-google-style"}
{"code": "def require_representation(self, req):\n    try:\n        (type_, subtype, _) = parse_mime_type(req.content_type)\n        content_type = '/'.join((type_, subtype))\n    except:\n        raise falcon.HTTPUnsupportedMediaType(description='Invalid Content-Type header: {}'.format(req.content_type))\n    if (content_type == 'application/json'):\n        body = req.stream.read()\n        return json.loads(body.decode('utf-8'))\n    else:\n        raise falcon.HTTPUnsupportedMediaType(description='only JSON supported, got: {}'.format(content_type))", "docstring": "Require raw representation dictionary from falcon request object.\n\nThis does not perform any field parsing or validation but only uses\nallowed content-encoding handler to decode content body.\n\nNote:\nCurrently only JSON is allowed as content type.\n\nArgs:\nreq (falcon.Request): request object\n\nReturns:\ndict: raw dictionary of representation supplied in request body", "source": "codesearchnet"}
{"code": "def file_name(self, value):\n        \n        if value == self._defaults['fileName'] and 'fileName' in self._values:\n            del self._values['fileName']\n        else:\n            self._values['fileName'] = value", "docstring": "The file_name property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def append(self, other, ignore_index=False):\n    if (not isinstance(other, self.__class__)):\n        raise ValueError('May only append instances of same type.')\n    if (type(ignore_index) is bool):\n        new_frame = self._frame.append(other._frame, ignore_index=ignore_index, verify_integrity=True)\n    else:\n        new_frame = self._frame.append(other._frame, ignore_index=True, verify_integrity=True)\n        if (type(ignore_index) is int):\n            new_frame.index = range(ignore_index, (ignore_index + len(new_frame)))\n        else:\n            new_frame.index = ignore_index\n    return self.__class__(new_frame)", "docstring": "Append rows of `other` to the end of this frame, returning a new object.\n\nWrapper around the :meth:`pandas.DataFrame.append` method.\n\nArgs:\nother (Cartesian):\nignore_index (sequence, bool, int): If it is a boolean, it\nbehaves like in the description of\n:meth:`pandas.DataFrame.append`.\nIf it is a sequence, it becomes the new index.\nIf it is an integer,\n``range(ignore_index, ignore_index + len(new))``\nbecomes the new index.\n\nReturns:\nCartesian:", "source": "codesearchnet"}
{"code": "def _get_object_type(filename, filepath):\n    filename_no_ext = os.path.splitext(filename)[0].lower()\n    if filename_no_ext.endswith(PrecompiledExampleType.test_ends):\n        object_type = PRECOMPILED_OBJECT_TYPE_UNIT_TEST\n    elif PrecompiledExampleType.katas in filepath.split(os.sep):\n        object_type = PRECOMPILED_OBJECT_TYPE_KATA\n    elif PrecompiledExampleType.examples in filepath.split(os.sep):\n        object_type = PRECOMPILED_OBJECT_TYPE_EXAMPLE\n    else:\n        object_type = PRECOMPILED_OBJECT_TYPE_UNSPECIFIED\n    return object_type", "docstring": "Get type of an object based on it filename/filepath\n\nArgs:\nfilename: object's filename\nfilepath: object's filepath\n\nReturns: type of the object (example, kata, unit-test)", "source": "github-repos"}
{"code": "def index_max(x, idx, y):\n    return _index_update_helper(tf_np.ndarray._with_index_max, x, idx, y)", "docstring": "Pure equivalent of `x[idx] = maximum(x[idx], y)`.\n\nReturns the value of x that would result from the NumPy-style indexed\nassignment `x[idx] = maximum(x[idx], y)`. Because it's a pure function, `x`\nitself won't be changed.\n\nArgs:\nx: an array with the values to be updated.\nidx: a Numpy-style index, consisting of `None`, integers, slice objects,\nellipses, ndarrays with integer dtypes, or a tuple of the above.\ny: the array of updates. `y` must be broadcastable to the shape of the array\nthat would be returned by `x[idx]`.\n\nReturns:\nThe updated version of `x`.", "source": "github-repos"}
{"code": "def twopercent(station_code):\n    \n    \n    temp = None\n    try:\n        fin = open('%s/%s' % (env.WEATHER_DATA_PATH,\n                              _basename(station_code, 'ddy')))\n        for line in fin:\n            value = re.search(, line)\n            if value:\n                temp = float(value.groups()[0])\n    except IOError:\n        pass\n\n    if not temp:\n        \n        try:\n            fin = open('%s/%s' % (env.WEATHER_DATA_PATH,\n                                  _basename(station_code, 'stat')))\n            flag = 0\n            tdata = []\n            for line in fin:\n                if line.find('2%') is not -1:\n                    flag = 3\n                if flag > 0:\n                    tdata.append(line.split('\\t'))\n                    flag -= 1\n            temp = float(tdata[2][5].strip())\n        except IOError:\n            pass\n    if temp:\n        return temp\n    else:\n        raise Exception(\"Error: 2% High Temperature not found\")", "docstring": "Two percent high design temperature for a location.\n\nDegrees in Celcius\n\nArgs:\nstation_code (str): Weather Station Code\n\nReturns:\nfloat degrees Celcius", "source": "juraj-google-style"}
{"code": "def simulate_measurement(self, index: int) -> bool:\n        \n        args = self._shard_num_args({'index': index})\n        prob_one = np.sum(self._pool.map(_one_prob_per_shard, args))\n        result = bool(np.random.random() <= prob_one)\n\n        args = self._shard_num_args({\n            'index': index,\n            'result': result,\n            'prob_one': prob_one\n        })\n        self._pool.map(_collapse_state, args)\n        return result", "docstring": "Simulates a single qubit measurement in the computational basis.\n\nArgs:\nindex: Which qubit is measured.\n\nReturns:\nTrue iff the measurement result corresponds to the |1> state.", "source": "juraj-google-style"}
{"code": "def __init__(self, ignore):\n    super().__init__()\n    self._ignore = ignore", "docstring": "Initialize the visitor.\n\nArgs:\nignore: A list of prefixes to ignore. Typically, this list includes things\nsomething like like \"builtins.\", since we don't want to convert builtin\ntypes to late types. (And, more generally, types of modules that are\nalways loaded by pytype don't need to be late types)", "source": "github-repos"}
{"code": "def _default_ising_beta_range(h, J):\n    abs_h = [abs(hh) for hh in h.values() if (hh != 0)]\n    abs_J = [abs(jj) for jj in J.values() if (jj != 0)]\n    abs_biases = (abs_h + abs_J)\n    if (not abs_biases):\n        return [0.1, 1.0]\n    min_delta_energy = min(abs_biases)\n    abs_bias_dict = {k: abs(v) for (k, v) in h.items()}\n    for ((k1, k2), v) in J.items():\n        abs_bias_dict[k1] += abs(v)\n        abs_bias_dict[k2] += abs(v)\n    max_delta_energy = max(abs_bias_dict.values())\n    hot_beta = (np.log(2) / max_delta_energy)\n    cold_beta = (np.log(100) / min_delta_energy)\n    return [hot_beta, cold_beta]", "docstring": "Determine the starting and ending beta from h J\n\nArgs:\nh (dict)\n\nJ (dict)\n\nAssume each variable in J is also in h.\n\nWe use the minimum bias to give a lower bound on the minimum energy gap, such at the\nfinal sweeps we are highly likely to settle into the current valley.", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: torch.Tensor, output_attentions: Optional[bool]=None):\n    residual = hidden_states\n    hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, object_queries=object_queries, output_attentions=output_attentions)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    residual = hidden_states\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`\nattention_mask (`torch.FloatTensor`): attention mask of size\n`(batch, source_len)` where padding elements are indicated by very large negative\nvalues.\nobject_queries (`torch.FloatTensor`, *optional*):\nObject queries (also called content embeddings), to be added to the hidden states.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def check_import_stdlib(module):\n    if ((module in stdlib_list('2.7')) or (module in stdlib_list('3.4')) or (module in stdlib_list('3.5')) or (module in stdlib_list('3.6')) or (module in stdlib_list('3.7')) or (module in ['app', 'args', 'playbook_app'])):\n        return True\n    return False", "docstring": "Check if module is in Python stdlib.\n\nArgs:\nmodule (str): The name of the module to check.\n\nReturns:\nbool: Returns True if the module is in the stdlib or template.", "source": "codesearchnet"}
{"code": "def get_allocated_fragments(self, id_or_uri, count=(- 1), start=0):\n    uri = (self._client.build_uri(id_or_uri) + '/allocated-fragments?start={0}&count={1}'.format(start, count))\n    return self._client.get_collection(uri)", "docstring": "Gets all fragments that have been allocated in range.\n\nArgs:\nid_or_uri:\nID or URI of range.\ncount:\nThe number of resources to return. A count of -1 requests all items. The actual number of items in\nthe response may differ from the requested count if the sum of start and count exceed the total number\nof items.\nstart:\nThe first item to return, using 0-based indexing. If not specified, the default is 0 - start with the\nfirst available item.\n\nReturns:\nlist: A list with the allocated fragements.", "source": "codesearchnet"}
{"code": "def get_rots(self) -> Rotation:\n    return self._rots", "docstring": "Getter for the rotation.\n\nReturns:\nThe rotation object", "source": "github-repos"}
{"code": "def create_store(reducer, initial_state=None, enhancer=None):\n    if (enhancer is not None):\n        if (not hasattr(enhancer, '__call__')):\n            raise TypeError('Expected the enhancer to be a function.')\n        return enhancer(create_store)(reducer, initial_state)\n    if (not hasattr(reducer, '__call__')):\n        raise TypeError('Expected the reducer to be a function.')\n    current_reducer = [reducer]\n    current_state = [initial_state]\n    current_listeners = [[]]\n    next_listeners = [current_listeners[0]]\n    is_dispatching = [False]\n\n    def ensure_can_mutate_next_listeners():\n        if (next_listeners[0] == current_listeners[0]):\n            next_listeners[0] = current_listeners[0][:]\n\n    def get_state():\n        return current_state[0]\n\n    def subscribe(listener):\n        if (not hasattr(listener, '__call__')):\n            raise TypeError('Expected listener to be a function.')\n        is_subscribed = [True]\n        ensure_can_mutate_next_listeners()\n        next_listeners[0].append(listener)\n\n        def unsubcribe():\n            if (not is_subscribed[0]):\n                return\n            is_subscribed[0] = False\n            ensure_can_mutate_next_listeners()\n            index = next_listeners[0].index(listener)\n            next_listeners[0].pop(index)\n        return unsubcribe\n\n    def dispatch(action):\n        if (not isinstance(action, dict)):\n            raise TypeError('Actions must be a dict. Use custom middleware for async actions.')\n        if (action.get('type') is None):\n            raise ValueError('Actions must have a non-None \"type\" property. Have you misspelled a constant?')\n        if is_dispatching[0]:\n            raise Exception('Reducers may not dispatch actions.')\n        try:\n            is_dispatching[0] = True\n            current_state[0] = current_reducer[0](current_state[0], action)\n        finally:\n            is_dispatching[0] = False\n        listeners = current_listeners[0] = next_listeners[0]\n        for listener in listeners:\n            listener()\n        return action\n\n    def replace_reducer(next_reducer):\n        if (not hasattr(next_reducer, '__call__')):\n            raise TypeError('Expected next_reducer to be a function')\n        current_reducer[0] = next_reducer\n        dispatch({'type': ActionTypes.INIT})\n    dispatch({'type': ActionTypes.INIT})\n    return StoreDict(dispatch=dispatch, subscribe=subscribe, get_state=get_state, replace_reducer=replace_reducer)", "docstring": "redux in a nutshell.\n\nobservable has been omitted.\n\nArgs:\nreducer: root reducer function for the state tree\ninitial_state: optional initial state data\nenhancer: optional enhancer function for middleware etc.\n\nReturns:\na Pydux store", "source": "codesearchnet"}
{"code": "def reset(self):\n    if (self._status is not TaskStatus.STOPPED):\n        raise RuntimeError(('Cannot reset %s in state %s' % (self, self._status)))\n    self._reset()\n    self.return_values = {}\n    self._status = TaskStatus.IDLE", "docstring": "Reset a task.\n\nAllows a task to be started again, clears the ``return_values``.\n\nRaises:\nRuntimeError: If the task has not been stopped.", "source": "codesearchnet"}
{"code": "def from_json(cls, json_value: Dict[str, Any], *, allow_partial: bool=False, root_path: Optional[utils.KeyPath]=None) -> 'DNA':\n    cloneable_metadata_keys = json_value.pop('_cloneable_metadata_keys', None)\n    if json_value.get('format', None) == 'compact':\n        with symbolic.enable_type_check(False):\n            dna = DNA.parse(symbolic.from_json(json_value.get('value')))\n            if 'metadata' in json_value:\n                dna.rebind(metadata=symbolic.from_json(json_value.get('metadata')), raise_on_no_change=False, skip_notification=True)\n    else:\n        dna = super(DNA, cls).from_json(json_value, allow_partial=allow_partial, root_path=root_path)\n        assert isinstance(dna, DNA)\n    if cloneable_metadata_keys:\n        dna._cloneable_metadata_keys = set(cloneable_metadata_keys)\n    return dna", "docstring": "Class method that load a DNA from a JSON value.\n\nArgs:\njson_value: Input JSON value, only JSON dict is acceptable.\nallow_partial: Whether to allow elements of the list to be partial.\nroot_path: KeyPath of loaded object in its object tree.\n\nReturns:\nA DNA object.", "source": "github-repos"}
{"code": "def shape(self) -> torch.Size:\n    if self._rot_mats is not None:\n        return self._rot_mats.shape[:-2]\n    elif self._quats is not None:\n        return self._quats.shape[:-1]\n    else:\n        raise ValueError('Both rotations are None')", "docstring": "Returns the virtual shape of the rotation object. This shape is defined as the batch dimensions of the\nunderlying rotation matrix or quaternion. If the Rotation was initialized with a [10, 3, 3] rotation matrix\ntensor, for example, the resulting shape would be [10].\n\nReturns:\nThe virtual shape of the rotation object", "source": "github-repos"}
{"code": "def fill_rects(self, *rects):\n        \n        rect_array = ffi.new('SDL_Rect[]', len(rects))\n        for i, r in enumerate(rects):\n            rect_array[i] = r._ptr[0]\n        check_int_err(lib.SDL_RenderFillRects(self._ptr, rect_array, len(rects)))", "docstring": "Fill some number of rectangles on the current rendering target with the drawing color.\n\nArgs:\n*rects (Rect): The destination rectangles.\n\nRaises:\nSDLError: If an error is encountered.", "source": "juraj-google-style"}
{"code": "def PushAttributeContainer(self, serialized_data):\n    self._list.append(serialized_data)\n    self.data_size += len(serialized_data)\n    self.next_sequence_number += 1", "docstring": "Pushes a serialized attribute container onto the list.\n\nArgs:\nserialized_data (bytes): serialized attribute container data.", "source": "codesearchnet"}
{"code": "def heightmap_add_hm(\n    hm1: np.ndarray, hm2: np.ndarray, hm3: np.ndarray\n) -> None:\n    \n    hm3[:] = hm1[:] + hm2[:]", "docstring": "Add two heightmaps together and stores the result in ``hm3``.\n\nArgs:\nhm1 (numpy.ndarray): The first heightmap.\nhm2 (numpy.ndarray): The second heightmap to add to the first.\nhm3 (numpy.ndarray): A destination heightmap to store the result.\n\n.. deprecated:: 2.0\nDo ``hm3[:] = hm1[:] + hm2[:]`` instead.", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    file_entry = parser_mediator.GetFileEntry()\n    display_name = parser_mediator.GetDisplayName()\n\n    file_header_map = self._GetDataTypeMap('custom_file_header')\n\n    try:\n      file_header, file_offset = self._ReadStructureFromFileObject(\n          file_object, 0, file_header_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.UnableToParseFile((\n          'Invalid Custom Destination: {0:s} - unable to parse file header '\n          'with error: {1!s}').format(display_name, exception))\n\n    if file_header.unknown1 != 2:\n      raise errors.UnableToParseFile((\n          'Unsupported Custom Destination file: {0:s} - invalid unknown1: '\n          '{1:d}.').format(display_name, file_header.unknown1))\n\n    if file_header.header_values_type > 2:\n      raise errors.UnableToParseFile((\n          'Unsupported Custom Destination file: {0:s} - invalid header value '\n          'type: {1:d}.').format(display_name, file_header.header_values_type))\n\n    if file_header.header_values_type == 0:\n      data_map_name = 'custom_file_header_value_type_0'\n    else:\n      data_map_name = 'custom_file_header_value_type_1_or_2'\n\n    file_header_value_map = self._GetDataTypeMap(data_map_name)\n\n    try:\n      _, value_data_size = self._ReadStructureFromFileObject(\n          file_object, file_offset, file_header_value_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.UnableToParseFile((\n          'Invalid Custom Destination: {0:s} - unable to parse file header '\n          'value with error: {1!s}').format(display_name, exception))\n\n    file_offset += value_data_size\n    file_size = file_object.get_size()\n    remaining_file_size = file_size - file_offset\n\n    entry_header_map = self._GetDataTypeMap('custom_entry_header')\n    file_footer_map = self._GetDataTypeMap('custom_file_footer')\n\n    \n    \n    \n    first_guid_checked = False\n    while remaining_file_size > 4:\n      try:\n        entry_header, entry_data_size = self._ReadStructureFromFileObject(\n            file_object, file_offset, entry_header_map)\n\n      except (ValueError, errors.ParseError) as exception:\n        if not first_guid_checked:\n          raise errors.UnableToParseFile((\n              'Invalid Custom Destination file: {0:s} - unable to parse '\n              'entry header with error: {1!s}').format(\n                  display_name, exception))\n\n        parser_mediator.ProduceExtractionWarning(\n            'unable to parse entry header with error: {0!s}'.format(\n                exception))\n        break\n\n      if entry_header.guid != self._LNK_GUID:\n        if not first_guid_checked:\n          raise errors.UnableToParseFile((\n              'Unsupported Custom Destination file: {0:s} - invalid entry '\n              'header signature offset: 0x{1:08x}.').format(\n                  display_name, file_offset))\n\n        try:\n          \n          file_footer, _ = self._ReadStructureFromFileObject(\n              file_object, file_offset, file_footer_map)\n\n          if file_footer.signature != self._FILE_FOOTER_SIGNATURE:\n            parser_mediator.ProduceExtractionWarning(\n                'invalid entry header signature at offset: 0x{0:08x}'.format(\n                    file_offset))\n\n        except (ValueError, errors.ParseError) as exception:\n          parser_mediator.ProduceExtractionWarning((\n              'unable to parse footer at offset: 0x{0:08x} with error: '\n              '{1!s}').format(file_offset, exception))\n          break\n\n        \n        break\n\n      first_guid_checked = True\n      file_offset += entry_data_size\n      remaining_file_size -= entry_data_size\n\n      lnk_file_size = self._ParseLNKFile(\n          parser_mediator, file_entry, file_offset, remaining_file_size)\n\n      file_offset += lnk_file_size\n      remaining_file_size -= lnk_file_size\n\n    try:\n      file_footer, _ = self._ReadStructureFromFileObject(\n          file_object, file_offset, file_footer_map)\n\n      if file_footer.signature != self._FILE_FOOTER_SIGNATURE:\n        parser_mediator.ProduceExtractionWarning(\n            'invalid footer signature at offset: 0x{0:08x}'.format(file_offset))\n\n    except (ValueError, errors.ParseError) as exception:\n      parser_mediator.ProduceExtractionWarning((\n          'unable to parse footer at offset: 0x{0:08x} with error: '\n          '{1!s}').format(file_offset, exception))", "docstring": "Parses a .customDestinations-ms file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def detect_gaps(dataframe, timestep, print_all=False, print_max=5, verbose=True):\n    gcount = 0\n    msg_counter = 0\n    warning_printed = False\n    try:\n        n = len(dataframe.index)\n    except:\n        print('Error: Invalid dataframe.')\n        return (- 1)\n    for i in range(0, n):\n        if (i > 0):\n            time_diff = (dataframe.index[i] - dataframe.index[(i - 1)])\n            if ((time_diff.delta / 1000000000.0) != timestep):\n                gcount += 1\n                if (print_all or (msg_counter <= (print_max - 1))):\n                    if verbose:\n                        print(('Warning: Gap in time series found between %s and %s' % (dataframe.index[(i - 1)], dataframe.index[i])))\n                    msg_counter += 1\n                if ((msg_counter == print_max) and verbose and (not warning_printed)):\n                    print(('Waring: Only the first %i gaps have been listed. Try to increase print_max parameter to show more details.' % msg_counter))\n                    warning_printed = True\n    if verbose:\n        print(('%i gaps found in total.' % gcount))\n    return gcount", "docstring": "checks if a given dataframe contains gaps and returns the number of gaps\n\nThis funtion checks if a dataframe contains any gaps for a given temporal\nresolution that needs to be specified in seconds. The number of gaps\ndetected in the dataframe is returned.\n\nArgs:\ndataframe: A pandas dataframe object with index defined as datetime\ntimestep (int): The temporal resolution of the time series in seconds\n(e.g., 86400 for daily values)\nprint_all (bool, opt): Lists every gap on the screen\nprint_mx (int, opt): The maximum number of gaps listed on the screen in\norder to avoid a decrease in performance if numerous gaps occur\nverbose (bool, opt): Enables/disables output to the screen\n\nReturns:\nThe number of gaps as integer. Negative values indicate errors.", "source": "codesearchnet"}
{"code": "def _parse_peer_address(self, config):\n    match = re.search('peer-address ([^\\\\s]+)', config)\n    value = (match.group(1) if match else None)\n    return dict(peer_address=value)", "docstring": "Scans the config block and parses the peer-address value\n\nArgs:\nconfig (str): The config block to scan\n\nReturns:\ndict: A dict object that is intended to be merged into the\nresource dict", "source": "codesearchnet"}
{"code": "def find_coord(targ_length,xyz,rcum,theta,phi):\n    \n    \n    \n    \n    i = np.nonzero(rcum <= targ_length)[0][-1]\n    if i == len(theta):\n        return xyz[-1,:]\n    else:\n        r_lcl = targ_length-rcum[i] \n        (dx,dy,dz) = spherical_to_cartesian(r_lcl,theta[i],phi[i])\n        return xyz[i,:] + [dx,dy,dz]", "docstring": "Find (x,y,z) ending coordinate of segment path along section\npath.\n\nArgs:\ntarg_length = scalar specifying length of segment path, starting\nfrom the begining of the section path\nxyz = coordinates specifying the section path\nrcum = cumulative sum of section path length at each node in xyz\ntheta, phi = angles between each coordinate in xyz", "source": "juraj-google-style"}
{"code": "def get_associated(self, retrieve=False):\n\n\t\t\n\n\t\tif self.exists and hasattr(self.rdf.triples, 'pcdm') and hasattr(self.rdf.triples.pcdm, 'hasRelatedFile'):\n\t\t\tfiles = [ self.repo.parse_uri(uri) for uri in self.rdf.triples.pcdm.hasRelatedFile ]\n\n\t\t\t\n\t\t\treturn files\n\n\t\telse:\n\t\t\treturn []", "docstring": "get pcdm:hasRelatedFile for this resource\n\nArgs:\nretrieve (bool): if True, issue .refresh() on resource thereby confirming existence and retrieving payload", "source": "juraj-google-style"}
{"code": "def should_stop(self):\n    return self._stop_event.is_set()", "docstring": "Check if stop was requested.\n\nReturns:\nTrue if a stop was requested.", "source": "github-repos"}
{"code": "def get_component(self, component_name):\n    mapping = self.get_components()\n    return (mapping[component_name] if (component_name in mapping) else None)", "docstring": "Looks up a component by its name.\n\nArgs:\ncomponent_name: The name of the component to look up.\nReturns:\nThe component for the provided name or None if there is no such component.", "source": "codesearchnet"}
{"code": "def save_to_text_file(monsoon_data, file_path):\n        \n        if not monsoon_data:\n            raise MonsoonError(\"Attempting to write empty Monsoon data to \"\n                               \"file, abort\")\n        utils.create_dir(os.path.dirname(file_path))\n        with io.open(file_path, 'w', encoding='utf-8') as f:\n            for md in monsoon_data:\n                f.write(str(md))\n                f.write(MonsoonData.delimiter)", "docstring": "Save multiple MonsoonData objects to a text file.\n\nArgs:\nmonsoon_data: A list of MonsoonData objects to write to a text\nfile.\nfile_path: The full path of the file to save to, including the file\nname.", "source": "juraj-google-style"}
{"code": "async def init(self, name, conf=None):\n        \n        tank = self.tanks.get(name)\n        if tank is not None:\n            return tank\n\n        iden = s_common.guid()\n\n        logger.info('Creating new tank: %s', name)\n\n        path = s_common.genpath(self.dirn, 'tanks', iden)\n\n        tank = await CryoTank.anit(path, conf)\n\n        node = await self.names.open((name,))\n        await node.set((iden, conf))\n\n        self.tanks.put(name, tank)\n\n        return tank", "docstring": "Generate a new CryoTank with a given name or get an reference to an existing CryoTank.\n\nArgs:\nname (str): Name of the CryoTank.\n\nReturns:\nCryoTank: A CryoTank instance.", "source": "juraj-google-style"}
{"code": "def MoveStateToNextToken(self):\n    current = self.next_token\n    if not current.OpensScope() and (not current.ClosesScope()):\n        self.lowest_level_on_line = min(self.lowest_level_on_line, self.paren_level)\n    if current.OpensScope():\n        last = self.stack[-1]\n        new_indent = style.Get('CONTINUATION_INDENT_WIDTH') + last.last_space\n        self.stack.append(_ParenState(new_indent, self.stack[-1].last_space))\n        self.paren_level += 1\n    if len(self.stack) > 1 and current.ClosesScope():\n        if subtypes.DICTIONARY_KEY_PART in current.subtypes:\n            self.stack[-2].last_space = self.stack[-2].indent\n        else:\n            self.stack[-2].last_space = self.stack[-1].last_space\n        self.stack.pop()\n        self.paren_level -= 1\n    is_multiline_string = current.is_string and '\\n' in current.value\n    if is_multiline_string:\n        self.column += len(current.value.split('\\n')[0])\n    elif not current.is_pseudo:\n        self.column += len(current.value)\n    self.next_token = self.next_token.next_token\n    penalty = 0\n    if not current.is_pylint_comment and (not current.is_pytype_comment) and (not current.is_copybara_comment) and (self.column > self.column_limit):\n        excess_characters = self.column - self.column_limit\n        penalty += style.Get('SPLIT_PENALTY_EXCESS_CHARACTER') * excess_characters\n    if is_multiline_string:\n        self.column = len(current.value.split('\\n')[-1])\n    return penalty", "docstring": "Calculate format decision state information and move onto the next token.\n\nBefore moving onto the next token, we first calculate the format decision\nstate given the current token and its formatting decisions. Then the format\ndecision state is set up so that the next token can be added.\n\nReturns:\nThe penalty for the number of characters over the column limit.", "source": "github-repos"}
{"code": "def _binary_product(variables):\n    \n    multiplier, multiplicand, product = variables\n\n    return BinaryQuadraticModel({multiplier: 0.0,\n                                 multiplicand: 0.0,\n                                 product: 3.0},\n                                {(multiplier, multiplicand): 1.0,\n                                 (multiplier, product): -2.0,\n                                 (multiplicand, product): -2.0},\n                                0.0,\n                                Vartype.BINARY)", "docstring": "Create a bqm with a gap of 2 that represents the product of two variables.\n\nArgs:\nvariables (list):\nmultiplier, multiplicand, product\n\nReturns:\n:obj:`.BinaryQuadraticModel`", "source": "juraj-google-style"}
{"code": "def LessThanOrEqualTo(self, value):\n    self._awql = self._CreateSingleValueCondition(value, '<=')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"less than or equal to.\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "codesearchnet"}
{"code": "def config_file(self, filename):\n    if os.path.isfile(filename):\n        with open(filename, 'r') as fh:\n            self._config_data = json.load(fh)\n    else:\n        self.tcex.log.error('Could not load configuration file \"{}\".'.format(filename))", "docstring": "Load configuration data from provided file and inject values into sys.argv.\n\nArgs:\nconfig (str): The configuration file name.", "source": "codesearchnet"}
{"code": "def recipe_email_dv360_to_bigquery(config, auth_read, email, subject, dataset, table, dbm_schema, is_incremental_load):\n    email(config, {'auth': auth_read, 'read': {'from': 'noreply-dv360@google.com', 'to': email, 'subject': subject, 'link': 'https:", "docstring": "Pulls a DV360 Report from a gMail email into BigQuery.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nemail (string) - Email address report was sent to.\nsubject (string) - Regular expression to match subject. Double escape backslashes.\ndataset (string) - Existing dataset in BigQuery.\ntable (string) - Name of table to be written to.\ndbm_schema (json) - Schema provided in JSON list format or empty list.\nis_incremental_load (boolean) - Append report data to table based on date column, de-duplicates.", "source": "github-repos"}
{"code": "def wrap_cached_variables(concrete_function):\n    outer_graph = func_graph_module.FuncGraph('{}_no_cache'.format(concrete_function.graph.name))\n    mapped_captures = None\n    remapped_captures = {}\n    with outer_graph.as_default():\n        for capture, placeholder in concrete_function.graph.captures:\n            cached_variable = getattr(capture, '_cached_variable', None)\n            if cached_variable is None:\n                continue\n            cached_variable = cached_variable()\n            new_cached_value = cached_variable.read_value()\n            key = id(capture)\n            external = concrete_function.graph.function_captures.by_val_external[key]\n            internal = concrete_function.graph.function_captures.by_val_internal[key]\n            remapped_captures[key] = [external, internal]\n            concrete_function.graph.function_captures.add_or_replace(key=key, external=new_cached_value, internal=placeholder, is_by_ref=False)\n            mapped_captures = True\n    if not mapped_captures:\n        return concrete_function\n    inner_concrete = defun.ConcreteFunction.from_func_graph(concrete_function.graph, concrete_function.function_type, {})\n\n    def wrap_function(*args):\n        return inner_concrete._call_flat(list(args), inner_concrete.captured_inputs)\n    args = nest.flatten(concrete_function.structured_input_signature, expand_composites=True)\n    func_graph_module.func_graph_from_py_func(None, wrap_function, args=tuple(args), kwargs={}, func_graph=outer_graph)\n    fn = defun.ConcreteFunction.from_func_graph(outer_graph, concrete_function.function_type, {})\n    fn._arg_keywords = concrete_function._arg_keywords\n    fn._num_positional_args = concrete_function._num_positional_args\n    for key, capture in remapped_captures.items():\n        external, internal = capture\n        concrete_function.graph._function_captures.add_or_replace(key=key, external=external, internal=internal, is_by_ref=False)\n    return fn", "docstring": "Wraps the concrete function if it uses cached read tensors.\n\nThis function creates a new concrete function that captures variables\ninstead of the cached read tensors.\n\nArgs:\nconcrete_function: A Concrete function that maybe captures cached read\ntensors.\n\nReturns:\nA concrete function that wraps the original concrete function, which\ncaptures variables instead. If the original function did not capture any\ncached values, then the function is not wrapped and the original object is\nreturned.", "source": "github-repos"}
{"code": "def print_table(self, stream=sys.stdout, filter_function=None):\n        \n        print(self.to_table(filter_function=filter_function), file=stream)", "docstring": "A pretty ASCII printer for the periodic table, based on some filter_function.\n\nArgs:\nstream: file-like object\nfilter_function:\nA filtering function that take a Pseudo as input and returns a boolean.\nFor example, setting filter_function = lambda p: p.Z_val > 2 will print\na periodic table containing only pseudos with Z_val > 2.", "source": "juraj-google-style"}
{"code": "def compile_default_action(self, batch_size: Optional[int] = None) -> Sequence[tf.Tensor]:\n        \n        with self.graph.as_default():\n            with tf.name_scope('default_action'):\n                self._initialize_default_action_fluents()\n                if batch_size is None:\n                    return self.default_action_fluents\n                return self._compile_batch_fluents(self.default_action_fluents, batch_size)", "docstring": "Returns a tuple of tensors representing the default action fluents.\n\nArgs:\nbatch_size (int): The batch size.\n\nReturns:\nSequence[tf.Tensor]: A tuple of tensors.", "source": "juraj-google-style"}
{"code": "def num_tasks(self, job_name):\n    try:\n        job = self._cluster_spec[job_name]\n    except KeyError:\n        raise ValueError('No such job in cluster: %r' % job_name)\n    return len(job)", "docstring": "Returns the number of tasks defined in the given job.\n\nArgs:\njob_name: The string name of a job in this cluster.\n\nReturns:\nThe number of tasks defined in the given job.\n\nRaises:\nValueError: If `job_name` does not name a job in this cluster.", "source": "github-repos"}
{"code": "def from_pandas(pandas_df, dataset_class=dataset.pandas_dataset.PandasDataset, expectations_config=None, autoinspect_func=None):\n    return _convert_to_dataset_class(pandas_df, dataset_class, expectations_config, autoinspect_func)", "docstring": "Read a Pandas data frame and return a great_expectations dataset.\n\nArgs:\npandas_df (Pandas df): Pandas data frame\ndataset_class (Dataset class) = dataset.pandas_dataset.PandasDataset:\nclass to which to convert resulting Pandas df\nexpectations_config (string) = None: path to great_expectations config file\nautoinspect_func (function) = None: The autoinspection function that should\nbe run on the dataset to establish baseline expectations.\n\nReturns:\ngreat_expectations dataset", "source": "codesearchnet"}
{"code": "def deserialize(config, custom_objects=None):\n    if config['class_name'].lower() in ALL_OBJECTS_DICT:\n        config['class_name'] = config['class_name'].lower()\n    return serialization_lib.deserialize_keras_object(config, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects)", "docstring": "Returns a Keras optimizer object via its configuration.\n\nArgs:\nconfig: Optimizer configuration dictionary.\ncustom_objects: Optional dictionary mapping names (strings) to custom\nobjects (classes and functions) to be considered during\ndeserialization.\n\nReturns:\nA Keras Optimizer instance.", "source": "github-repos"}
{"code": "def get_cytoband_coordinates(chrom, pos):\n    \n    coordinate = \"\"\n\n    if chrom in CYTOBANDS:\n        for interval in CYTOBANDS[chrom][pos]:\n            coordinate = interval.data\n\n    return coordinate", "docstring": "Get the cytoband coordinate for a position\n\nArgs:\nchrom(str)\npos(int)\n\nReturns:\ncoordinate(str)", "source": "juraj-google-style"}
{"code": "def _is_default_hook(default_hook, hook):\n    \n    if not hasattr(default_hook, '__call__'):\n      raise TypeError('Default hooks for ndb.model.Model must be callable')\n    if not hasattr(hook, '__call__'):\n      raise TypeError('Hooks must be callable')\n    return default_hook.im_func is hook.im_func", "docstring": "Checks whether a specific hook is in its default state.\n\nArgs:\ncls: A ndb.model.Model class.\ndefault_hook: Callable specified by ndb internally (do not override).\nhook: The hook defined by a model class using _post_*_hook.\n\nRaises:\nTypeError if either the default hook or the tested hook are not callable.", "source": "juraj-google-style"}
{"code": "def _getGraphOpTypes(self, graphdef, output_nodes):\n    name_to_input_name, name_to_node, _ = _extract_graph_summary(graphdef)\n    used_node_names = _bfs_for_reachable_nodes(output_nodes, name_to_input_name)\n    return set([name_to_node[node_name].op for node_name in used_node_names])", "docstring": "Returns used op types in `graphdef` reachable from `output_nodes`.\n\nThis is used to check that after the stub transformation the expected\nnodes are there.\n\nNOTE: this is not a exact test that the graph is the correct output, but\nit balances compact expressibility of test with sanity checking.\n\nArgs:\ngraphdef: TensorFlow proto graphdef.\noutput_nodes: A list of output node names that we need to reach.\n\nReturns:\nA set of node types reachable from `output_nodes`.", "source": "github-repos"}
{"code": "def update_metadata(token: str, commit_sha: str):\n    frameworks_table = get_frameworks_table()\n    frameworks_dataset = Dataset.from_pandas(frameworks_table)\n    resolved_tags_file = hf_hub_download('huggingface/transformers-metadata', 'pipeline_tags.json', repo_type='dataset', token=token)\n    tags_dataset = Dataset.from_json(resolved_tags_file)\n    table = {tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class']) for i in range(len(tags_dataset))}\n    table = update_pipeline_and_auto_class_table(table)\n    model_classes = sorted(table.keys())\n    tags_table = pd.DataFrame({'model_class': model_classes, 'pipeline_tag': [table[m][0] for m in model_classes], 'auto_class': [table[m][1] for m in model_classes]})\n    tags_dataset = Dataset.from_pandas(tags_table)\n    hub_frameworks_json = hf_hub_download(repo_id='huggingface/transformers-metadata', filename='frameworks.json', repo_type='dataset', token=token)\n    with open(hub_frameworks_json) as f:\n        hub_frameworks_json = f.read()\n    hub_pipeline_tags_json = hf_hub_download(repo_id='huggingface/transformers-metadata', filename='pipeline_tags.json', repo_type='dataset', token=token)\n    with open(hub_pipeline_tags_json) as f:\n        hub_pipeline_tags_json = f.read()\n    with tempfile.TemporaryDirectory() as tmp_dir:\n        frameworks_dataset.to_json(os.path.join(tmp_dir, 'frameworks.json'))\n        tags_dataset.to_json(os.path.join(tmp_dir, 'pipeline_tags.json'))\n        with open(os.path.join(tmp_dir, 'frameworks.json')) as f:\n            frameworks_json = f.read()\n        with open(os.path.join(tmp_dir, 'pipeline_tags.json')) as f:\n            pipeline_tags_json = f.read()\n        frameworks_equal = hub_frameworks_json == frameworks_json\n        hub_pipeline_tags_equal = hub_pipeline_tags_json == pipeline_tags_json\n        if frameworks_equal and hub_pipeline_tags_equal:\n            print('No updates on the Hub, not pushing the metadata files.')\n            return\n        if commit_sha is not None:\n            commit_message = f'Update with commit {commit_sha}\\n\\nSee: https:\n        else:\n            commit_message = 'Update'\n        upload_folder(repo_id='huggingface/transformers-metadata', folder_path=tmp_dir, repo_type='dataset', token=token, commit_message=commit_message)", "docstring": "Update the metadata for the Transformers repo in `huggingface/transformers-metadata`.\n\nArgs:\ntoken (`str`): A valid token giving write access to `huggingface/transformers-metadata`.\ncommit_sha (`str`): The commit SHA on Transformers corresponding to this update.", "source": "github-repos"}
{"code": "def line_intersection(self, point1, point2, tolerance=1e-8):\n        \n        b1 = self.bary_coords(point1)\n        b2 = self.bary_coords(point2)\n        l = b1 - b2\n        \n        valid = np.abs(l) > 1e-10\n        \n        \n        possible = b1 - (b1[valid] / l[valid])[:, None] * l\n        barys = []\n        for p in possible:\n            \n            if (p >= -tolerance).all():\n                found = False\n                \n                for b in barys:\n                    if np.all(np.abs(b - p) < tolerance):\n                        found = True\n                        break\n                if not found:\n                    barys.append(p)\n        assert len(barys) < 3\n        return [self.point_from_bary_coords(b) for b in barys]", "docstring": "Computes the intersection points of a line with a simplex\nArgs:\npoint1, point2 ([float]): Points that determine the line\nReturns:\npoints where the line intersects the simplex (0, 1, or 2)", "source": "juraj-google-style"}
{"code": "def _make_auth(self, method, date, nonce, path, query={}, ctype='application/json'):\n        \n\n        query = urlencode(query)\n\n        hmac_str = (method + '\\n' + nonce + '\\n' + date + '\\n' + ctype + '\\n' + path +\n                    '\\n' + query + '\\n').lower().encode('utf-8')\n\n        signature = base64.b64encode(hmac.new(self._secret_key, hmac_str, digestmod=hashlib.sha256).digest())\n        auth = 'On ' + self._access_key.decode('utf-8') + ':HmacSHA256:' + signature.decode('utf-8')\n\n        if self._logging:\n            utils.log({\n                'query': query,\n                'hmac_str': hmac_str,\n                'signature': signature,\n                'auth': auth\n            })\n\n        return auth", "docstring": "Create the request signature to authenticate\n\nArgs:\n- method (str): HTTP method\n- date (str): HTTP date header string\n- nonce (str): Cryptographic nonce\n- path (str): URL pathname\n- query (dict, default={}): URL query string in key-value pairs\n- ctype (str, default='application/json'): HTTP Content-Type", "source": "juraj-google-style"}
{"code": "def __call__(self, shape, dtype=None, **kwargs):\n    _validate_kwargs(self.__class__.__name__, kwargs)\n    dtype = _assert_float_dtype(_get_dtype(dtype))\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    return self._random_generator.random_normal(shape, self.mean, self.stddev, dtype)", "docstring": "Returns a tensor object initialized to random normal values.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor. Only floating point types are\nsupported. If not specified, `tf.keras.backend.floatx()` is used, which\ndefault to `float32` unless you configured it otherwise (via\n`tf.keras.backend.set_floatx(float_dtype)`)\n**kwargs: Additional keyword arguments.", "source": "github-repos"}
{"code": "def get_folders(cls, session, mailbox_or_id):\n        \n        if isinstance(mailbox_or_id, Mailbox):\n            mailbox_or_id = mailbox_or_id.id\n        return cls(\n            '/mailboxes/%d/folders.json' % mailbox_or_id,\n            session=session,\n            out_type=Folder,\n        )", "docstring": "List the folders for the mailbox.\n\nArgs:\nmailbox_or_id (helpscout.models.Mailbox or int): Mailbox or the ID\nof the mailbox to get the folders for.\n\nReturns:\nRequestPaginator(output_type=helpscout.models.Folder): Folders\niterator.", "source": "juraj-google-style"}
{"code": "class _IndicatorColumn(_DenseColumn, _SequenceDenseColumn, collections.namedtuple('_IndicatorColumn', ['categorical_column'])):\n\n    @property\n    def name(self):\n        return '{}_indicator'.format(self.categorical_column.name)\n\n    def _transform_feature(self, inputs):\n        \n        id_weight_pair = self.categorical_column._get_sparse_tensors(inputs)\n        id_tensor = id_weight_pair.id_tensor\n        weight_tensor = id_weight_pair.weight_tensor\n        if weight_tensor is not None:\n            weighted_column = sparse_ops.sparse_merge(sp_ids=id_tensor, sp_values=weight_tensor, vocab_size=int(self._variable_shape[-1]))\n            weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0], weighted_column.dense_shape)\n            return array_ops.scatter_nd(weighted_column.indices, weighted_column.values, weighted_column.dense_shape)\n        dense_id_tensor = sparse_ops.sparse_tensor_to_dense(id_tensor, default_value=-1)\n        one_hot_id_tensor = array_ops.one_hot(dense_id_tensor, depth=self._variable_shape[-1], on_value=1.0, off_value=0.0)\n        return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2])\n\n    @property\n    def _parse_example_spec(self):\n        return self.categorical_column._parse_example_spec\n\n    @property\n    def _variable_shape(self):\n        \n        return tensor_shape.TensorShape([1, self.categorical_column._num_buckets])\n\n    def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n        \n        del weight_collections\n        del trainable\n        if isinstance(self.categorical_column, _SequenceCategoricalColumn):\n            raise ValueError('In indicator_column: {}. categorical_column must not be of type _SequenceCategoricalColumn. Suggested fix A: If you wish to use input_layer, use a non-sequence categorical_column_with_*. Suggested fix B: If you wish to create sequence input, use sequence_input_layer instead of input_layer. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))\n        return inputs.get(self)\n\n    def _get_sequence_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n        del weight_collections\n        del trainable\n        if not isinstance(self.categorical_column, _SequenceCategoricalColumn):\n            raise ValueError('In indicator_column: {}. categorical_column must be of type _SequenceCategoricalColumn to use sequence_input_layer. Suggested fix: Use one of sequence_categorical_column_with_*. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))\n        dense_tensor = inputs.get(self)\n        sparse_tensors = self.categorical_column._get_sparse_tensors(inputs)\n        sequence_length = fc_utils.sequence_length_from_sparse_tensor(sparse_tensors.id_tensor)\n        return _SequenceDenseColumn.TensorSequenceLengthPair(dense_tensor=dense_tensor, sequence_length=sequence_length)", "docstring": "Represents a one-hot column for use in deep networks.\n\nArgs:\ncategorical_column: A `_CategoricalColumn` which is created by\n`categorical_column_with_*` function.", "source": "github-repos"}
{"code": "def bin(self, bins, labels=None):\n    return dim(self, bin, bins, labels=labels)", "docstring": "Bins continuous values.\n\nBins continuous using the provided bins and assigns labels\neither computed from each bins center point or from the\nsupplied labels.\n\nArgs:\nbins: List or array containing the bin boundaries\nlabels: List of labels to assign to each bin\nIf the bins are length N the labels should be length N-1", "source": "codesearchnet"}
{"code": "def __init__(self, *args, **kwargs):\n        \n        super(StateTransaction, self).__init__(*args, **kwargs)\n\n        self.Type = TransactionType.StateTransaction", "docstring": "Create an instance.\n\nArgs:\n*args:\n**kwargs:", "source": "juraj-google-style"}
{"code": "def authenticate(self, request):\n        \n        \n        request = request._request  \n        user = getattr(request, 'user', None)\n\n        \n        \n        \n        \n        if not user or user.is_anonymous:\n            return None\n\n        self.enforce_csrf(request)\n\n        \n        return (user, None)", "docstring": "Authenticate the user, requiring a logged-in account and CSRF.\n\nThis is exactly the same as the `SessionAuthentication` implementation,\nwith the `user.is_active` check removed.\n\nArgs:\nrequest (HttpRequest)\n\nReturns:\nTuple of `(user, token)`\n\nRaises:\nPermissionDenied: The CSRF token check failed.", "source": "juraj-google-style"}
{"code": "def get(self, key, index=None):\n    records = self.get_multi([key], index=index)\n    try:\n        return records[0][1]\n    except IndexError:\n        return None", "docstring": "Retrieves a value associated with a key from the database\n\nArgs:\nkey (str): The key to retrieve", "source": "codesearchnet"}
{"code": "def concurrence(state):\n    \n    rho = np.array(state)\n    if rho.ndim == 1:\n        rho = outer(state)\n    if len(state) != 4:\n        raise Exception(\"Concurrence is only defined for more than two qubits\")\n\n    YY = np.fliplr(np.diag([-1, 1, 1, -1]))\n    A = rho.dot(YY).dot(rho.conj()).dot(YY)\n    w = la.eigh(A, eigvals_only=True)\n    w = np.sqrt(np.maximum(w, 0))\n    return max(0.0, w[-1] - np.sum(w[0:-1]))", "docstring": "Calculate the concurrence.\n\nArgs:\nstate (np.array): a quantum state (1x4 array) or a density matrix (4x4\narray)\nReturns:\nfloat: concurrence.\nRaises:\nException: if attempted on more than two qubits.", "source": "juraj-google-style"}
{"code": "def __init__(self, command=None, payload=None, print_payload=False):\n        \n        self.Command = command\n        self.Magic = settings.MAGIC\n\n        if payload is None:\n            payload = bytearray()\n        else:\n            payload = binascii.unhexlify(Helper.ToArray(payload))\n\n        self.Checksum = Message.GetChecksum(payload)\n        self.Payload = payload\n\n        if print_payload:\n            logger.info(\"PAYLOAD: %s \" % self.Payload)", "docstring": "Create an instance.\n\nArgs:\ncommand (str): payload command e.g. \"inv\", \"getdata\". See NeoNode.MessageReceived() for more commands.\npayload (bytes): raw bytes of the payload.\nprint_payload: UNUSED", "source": "juraj-google-style"}
{"code": "def List(self, device_path):\n    connection = self.protocol_handler.Open(self._handle, destination=b'sync:')\n    listing = self.filesync_handler.List(connection, device_path)\n    connection.Close()\n    return listing", "docstring": "Return a directory listing of the given path.\n\nArgs:\ndevice_path: Directory to list.", "source": "codesearchnet"}
{"code": "def update_args(self, args):\n    for arg in vars(args):\n        if (self.get(arg) and (getattr(args, arg) is not None)):\n            self._config[self.root_section][arg] = getattr(args, arg)", "docstring": "Update config dictionary with parsed args, as resolved by argparse.\nOnly root positional arguments that already exist will overridden.\n\nArgs:\nargs (namespace): args parsed by argparse", "source": "codesearchnet"}
{"code": "def make_request(url, data, on_complete):\n    req = ajax.ajax()\n    req.bind('complete', on_complete)\n    req.open('POST', url, True)\n    req.set_header('content-type', 'application/x-www-form-urlencoded')\n    req.send(data)", "docstring": "Make AJAX request to `url` with given POST `data`. Call `on_complete`\ncallback when complete.\n\nArgs:\nurl (str): URL.\ndata (dict): Dictionary with POST data.\non_complete (ref): Reference to function / method which will be called\nwhen the request is done.", "source": "codesearchnet"}
{"code": "def fashion_mnist_generator(tmp_dir, training, how_many, start_from=0):\n    _get_fashion_mnist(tmp_dir)\n    d = (_FASHION_MNIST_LOCAL_FILE_PREFIX + (_MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME))\n    l = (_FASHION_MNIST_LOCAL_FILE_PREFIX + (_MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME))\n    return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from)", "docstring": "Image generator for FashionMNIST.\n\nArgs:\ntmp_dir: path to temporary storage directory.\ntraining: a Boolean; if true, we use the train set, otherwise the test set.\nhow_many: how many images and labels to generate.\nstart_from: from which image to start.\n\nReturns:\nAn instance of image_generator that produces MNIST images.", "source": "codesearchnet"}
{"code": "def cost(self, t_node, branch_length, multiplicity=2.0):\n        \n        merger_time = t_node+branch_length\n        return self.integral_merger_rate(merger_time) - self.integral_merger_rate(t_node)\\\n                 - np.log(self.total_merger_rate(merger_time))*(multiplicity-1.0)/multiplicity", "docstring": "returns the cost associated with a branch starting at t_node\nt_node is time before present, the branch goes back in time\n\nArgs:\n- t_node:           time of the node\n- branch_length:    branch length, determines when this branch merges with sister\n- multiplicity:     2 if merger is binary, higher if this is a polytomy", "source": "juraj-google-style"}
{"code": "def _get_max_page(dom):\n    \n    div = dom.find(\"div\", {\"class\": \"razeniKnihListovani\"})\n\n    if not div:\n        return 1\n\n    \n    links = div[0].find(\"a\")\n    max_page = filter(\n        lambda x: \"href\" in x.params and \"pageindex=\" in x.params[\"href\"],\n        links\n    )\n    max_page = map(\n        lambda x: x.params[\"href\"].split(\"pageindex=\")[-1],\n        max_page\n    )\n    max_page = filter(lambda x: x.isdigit(), max_page)\n    max_page = map(lambda x: int(x), max_page)\n\n    if not max_page:\n        return 1\n\n    return max(max_page)", "docstring": "Try to guess how much pages are in book listing.\n\nArgs:\ndom (obj): HTMLElement container of the page with book list.\n\nReturns:\nint: Number of pages for given category.", "source": "juraj-google-style"}
{"code": "def get_gates(self, x):\n    x = tf.stop_gradient(x)\n    x = tf.matmul(x, self.t_vectors)\n    x = tf.sign(x)\n    x = (tf.matmul(x, self.t_group, transpose_b=True) / self.nb_hyperplanes)\n    x = tf.argmax(x, axis=(- 1))\n    x = tf.one_hot(x, self.nb_buckets)\n    return x", "docstring": "Return the bucket id of the given tensor.\n\nArgs:\nx (tf.Tensor): float32 of shape [length, depth]\n\nReturns:\ntf.Tensor: One-hot vector int64 of shape [heads, length, nb_buckets]\ncontaining the id of the bucket", "source": "codesearchnet"}
{"code": "def check_channel(fcn):\n    \n\n    def wrapper(*args, **kwargs):\n        if not isinstance(args[1], ChannelResource):\n            raise RuntimeError('resource must be an instance of intern.resource.boss.ChannelResource.')\n\n        if not args[1].cutout_ready:\n            raise PartialChannelResourceError(\n                    'ChannelResource not fully initialized.  Use intern.remote.BossRemote.get_channel({}, {}, {})'.format(\n                        args[1].name, args[1].coll_name, args[1].exp_name))\n        return fcn(*args, **kwargs)\n\n    return wrapper", "docstring": "Decorator that ensures a valid channel passed in.\n\nArgs:\nfcn (function): Function that has a ChannelResource as its second argument.\n\nReturns:\n(function): Wraps given function with one that checks for a valid channel.", "source": "juraj-google-style"}
{"code": "def __init__(self, cell):\n    self._cell = cell", "docstring": "Creates a new SamplerCell.\n\nArgs:\ncell: A c pointer of TFE_MonitoringSamplerCell.", "source": "github-repos"}
{"code": "def nic_v1(msg, NICs):\n    if ((typecode(msg) < 5) or (typecode(msg) > 22)):\n        raise RuntimeError(('%s: Not a surface position message (5<TC<8),             airborne position message (8<TC<19),             or airborne position with GNSS height (20<TC<22)' % msg))\n    tc = typecode(msg)\n    NIC = uncertainty.TC_NICv1_lookup[tc]\n    if isinstance(NIC, dict):\n        NIC = NIC[NICs]\n    try:\n        Rc = uncertainty.NICv1[NIC][NICs]['Rc']\n        VPL = uncertainty.NICv1[NIC][NICs]['VPL']\n    except KeyError:\n        (Rc, VPL) = (uncertainty.NA, uncertainty.NA)\n    return (Rc, VPL)", "docstring": "Calculate NIC, navigation integrity category, for ADS-B version 1\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string\nNICs (int or string): NIC supplement\n\nReturns:\nint or string: Horizontal Radius of Containment\nint or string: Vertical Protection Limit", "source": "codesearchnet"}
{"code": "def create_impression_event(self, experiment, variation_id, user_id, attributes):\n    params = self._get_common_params(user_id, attributes)\n    impression_params = self._get_required_params_for_impression(experiment, variation_id)\n    params[self.EventParams.USERS][0][self.EventParams.SNAPSHOTS].append(impression_params)\n    return Event(self.EVENTS_URL, params, http_verb=self.HTTP_VERB, headers=self.HTTP_HEADERS)", "docstring": "Create impression Event to be sent to the logging endpoint.\n\nArgs:\nexperiment: Experiment for which impression needs to be recorded.\nvariation_id: ID for variation which would be presented to user.\nuser_id: ID for user.\nattributes: Dict representing user attributes and values which need to be recorded.\n\nReturns:\nEvent object encapsulating the impression event.", "source": "codesearchnet"}
{"code": "def is_ordered(cat_id):\n    url = 'https:\n    auth = Auth()\n    r = _req_with_retries(auth.gbdx_connection, url)\n    if (r is not None):\n        return (r.status_code == 200)\n    return False", "docstring": "Checks to see if a CatalogID has been ordered or not.\n\nArgs:\ncatalogID (str): The catalog ID from the platform catalog.\nReturns:\nordered (bool): Whether or not the image has been ordered", "source": "codesearchnet"}
{"code": "def _respond(self, channel, text):\n    result = self._format_message(channel, text)\n    if (result is not None):\n        logger.info('Sending message: %r', truncate(result, max_len=50))\n    self.socket.send_str(result)", "docstring": "Respond to a message on the current socket.\n\nArgs:\nchannel (:py:class:`str`): The channel to send to.\ntext (:py:class:`str`): The message text to send.", "source": "codesearchnet"}
{"code": "def if_callable_call_with_formatted_string(callback, formattable_string, *args):\n    \n    try:\n        formatted_string = formattable_string.format(*args)\n    except IndexError:\n        raise ValueError(\"Mismatch metween amount of insertion points in the formattable string\\n\"\n                         \"and the amount of args given.\")\n    if callable(callback):\n        callback(formatted_string)", "docstring": "If the callback is callable, format the string with the args and make a call.\nOtherwise, do nothing.\n\nArgs:\ncallback (function): May or may not be callable.\nformattable_string (str): A string with '{}'s inserted.\n*args: A variable amount of arguments for the string formatting. Must correspond to the\namount of '{}'s in 'formattable_string'.\nRaises:\nValueError", "source": "juraj-google-style"}
{"code": "def __init__(self, resolver_context):\n    \n    super(TSKFile, self).__init__(resolver_context)\n    self._current_offset = 0\n    self._file_system = None\n    self._size = 0\n    self._tsk_attribute = None\n    self._tsk_file = None", "docstring": "Initializes a file-like object.\n\nArgs:\nresolver_context (Context): resolver context.", "source": "juraj-google-style"}
{"code": "async def cancel(self, task: asyncio.Task, wait_for: bool=True) -> Any:\n    if (task is None):\n        return\n    task.cancel()\n    with suppress(KeyError):\n        self._tasks.remove(task)\n    with suppress(Exception):\n        return ((await task) if wait_for else None)", "docstring": "Cancels and waits for an `asyncio.Task` to finish.\nRemoves it from the collection of managed tasks.\n\nArgs:\ntask (asyncio.Task):\nThe to be cancelled task.\nIt is not required that the task was was created with `TaskScheduler.create_task()`.\n\nwait_for (bool, optional):\nWhether to wait for the task to finish execution.\nIf falsey, this function returns immediately after cancelling the task.\n\nReturns:\nAny: The return value of `task`. None if `wait_for` is falsey.", "source": "codesearchnet"}
{"code": "def on(self, day, strict=False):\n        \n        day_start, day_stop = day.floor('day').span('day')\n        if strict:\n            return self.included(day_start, day_stop)\n        else:\n            return self.overlapping(day_start, day_stop)", "docstring": "Iterates (in chronological order) over all events that occurs on `day`\n\nArgs:\nday (Arrow object)\nstrict (bool): if True events will be returned only if they are\\\nstrictly *included* in `day`.", "source": "juraj-google-style"}
{"code": "def execute_forever(method, interval_s):  \n  \n  interval = Interval(method)\n  interval.start(interval_s)\n  return interval", "docstring": "Executes a method forever at the specified interval.\n\nArgs:\nmethod: The callable to execute.\ninterval_s: The number of seconds to start the execution after each method\nfinishes.\nReturns:\nAn Interval object.", "source": "juraj-google-style"}
{"code": "def get_experiment_from_id(self, experiment_id):\n    experiment = self.experiment_id_map.get(experiment_id)\n    if experiment:\n        return experiment\n    self.logger.error(('Experiment ID \"%s\" is not in datafile.' % experiment_id))\n    self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR))\n    return None", "docstring": "Get experiment for the provided experiment ID.\n\nArgs:\nexperiment_id: Experiment ID for which experiment is to be determined.\n\nReturns:\nExperiment corresponding to the provided experiment ID.", "source": "codesearchnet"}
{"code": "def create_elb_dns(self, regionspecific=False):\n        \n        if regionspecific:\n            dns_elb = self.generated.dns()['elb_region']\n        else:\n            dns_elb = self.generated.dns()['elb']\n\n        dns_elb_aws = find_elb(name=self.app_name, env=self.env, region=self.region)\n\n        zone_ids = get_dns_zone_ids(env=self.env, facing=self.elb_subnet)\n\n        self.log.info('Updating Application URL: %s', dns_elb)\n\n        dns_kwargs = {\n            'dns_name': dns_elb,\n            'dns_name_aws': dns_elb_aws,\n            'dns_ttl': self.dns_ttl,\n        }\n\n        for zone_id in zone_ids:\n            self.log.debug('zone_id: %s', zone_id)\n            update_dns_zone_record(self.env, zone_id, **dns_kwargs)\n\n        return dns_elb", "docstring": "Create dns entries in route53.\n\nArgs:\nregionspecific (bool): The DNS entry should have region on it\nReturns:\nstr: Auto-generated DNS name for the Elastic Load Balancer.", "source": "juraj-google-style"}
{"code": "def _download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=None):\n\n    def _callback(downloaded, total):\n        '\\n        Call function for upload.\\n\\n        `downloaded`: File size already downloaded (int)\\n\\n        `total`: Total file size to be downloaded (int)\\n        '\n        if ((total is 0) or (downloaded == total)):\n            return\n        progress = ((downloaded * 100) / total)\n        sys.stderr.write('\\r[{0}] {1}%'.format(('\n        sys.stderr.flush()\n    m = _URI_RE.match(s3_path)\n    bucket_name = m.group(1)\n    bucket = boto_conn.get_bucket(bucket_name)\n    retries = 6\n    if (s3_path.endswith('/') is False):\n        key_name = m.group(2)\n        key_instance = bucket.get_key(key_name)\n        while ((key_instance is None) and (retries > 0)):\n            retries = (retries - 1)\n            log.info(('Results file is not available on s3. Retry: ' + str((6 - retries))))\n            time.sleep(10)\n            key_instance = bucket.get_key(key_name)\n        if (key_instance is None):\n            raise Exception('Results file not available on s3 yet. This can be because of s3 eventual consistency issues.')\n        log.info(('Downloading file from %s' % s3_path))\n        if (delim is None):\n            try:\n                key_instance.get_contents_to_file(fp)\n            except boto.exception.S3ResponseError as e:\n                if (e.status == 403):\n                    log.warn('Access denied while fetching the s3 object. Retrying without specifying the version....')\n                    key_instance.open()\n                    fp.write(key_instance.read())\n                    key_instance.close()\n                else:\n                    raise\n        else:\n            _read_iteratively(key_instance, fp, delim=delim)\n    else:\n        key_prefix = m.group(2)\n        bucket_paths = bucket.list(key_prefix)\n        for one_path in bucket_paths:\n            name = one_path.name\n            if name.endswith('$folder$'):\n                continue\n            log.info(('Downloading file from %s' % name))\n            if (delim is None):\n                one_path.get_contents_to_file(fp)\n            else:\n                _read_iteratively(one_path, fp, delim=delim)", "docstring": "Downloads the contents of all objects in s3_path into fp\n\nArgs:\n`boto_conn`: S3 connection object\n\n`s3_path`: S3 path to be downloaded\n\n`fp`: The file object where data is to be downloaded", "source": "codesearchnet"}
{"code": "def CallNtpdate(logger):\n    ntpd_inactive = subprocess.call(['service', 'ntpd', 'status'])\n    try:\n        if (not ntpd_inactive):\n            subprocess.check_call(['service', 'ntpd', 'stop'])\n        subprocess.check_call('ntpdate `awk \\'$1==\"server\" {print $2}\\' /etc/ntp.conf`', shell=True)\n        if (not ntpd_inactive):\n            subprocess.check_call(['service', 'ntpd', 'start'])\n    except subprocess.CalledProcessError:\n        logger.warning('Failed to sync system time with ntp server.')\n    else:\n        logger.info('Synced system time with ntp server.')", "docstring": "Sync clock using ntpdate.\n\nArgs:\nlogger: logger object, used to write to SysLog and serial port.", "source": "codesearchnet"}
{"code": "def ion_equals(a, b, timestamps_instants_only=False):\n    if timestamps_instants_only:\n        return _ion_equals_timestamps_instants(a, b)\n    return _ion_equals_timestamps_data_model(a, b)", "docstring": "Tests two objects for equivalence under the Ion data model.\n\nThere are three important cases:\n* When neither operand specifies its `ion_type` or `annotations`, this method will only return True when the\nvalues of both operands are equivalent under the Ion data model.\n* When only one of the operands specifies its `ion_type` and `annotations`, this method will only return True\nwhen that operand has no annotations and has a value equivalent to the other operand under the Ion data model.\n* When both operands specify `ion_type` and `annotations`, this method will only return True when the ion_type\nand annotations of both are the same and their values are equivalent under the Ion data model.\n\nNote that the order of the operands does not matter.\n\nArgs:\na (object): The first operand.\nb (object): The second operand.\ntimestamps_instants_only (Optional[bool]): False if timestamp objects (datetime and its subclasses) should be\ncompared according to the Ion data model (where the instant, precision, and offset must be equal); True\nif these objects should be considered equivalent if they simply represent the same instant.", "source": "codesearchnet"}
{"code": "def generate_full_symmops(symmops, tol):\n    UNIT = np.eye(4)\n    generators = [op.affine_matrix for op in symmops if (not np.allclose(op.affine_matrix, UNIT))]\n    if (not generators):\n        return symmops\n    else:\n        full = list(generators)\n        for g in full:\n            for s in generators:\n                op = np.dot(g, s)\n                d = (np.abs((full - op)) < tol)\n                if (not np.any(np.all(np.all(d, axis=2), axis=1))):\n                    full.append(op)\n        d = (np.abs((full - UNIT)) < tol)\n        if (not np.any(np.all(np.all(d, axis=2), axis=1))):\n            full.append(UNIT)\n        return [SymmOp(op) for op in full]", "docstring": "Recursive algorithm to permute through all possible combinations of the\ninitially supplied symmetry operations to arrive at a complete set of\noperations mapping a single atom to all other equivalent atoms in the\npoint group.  This assumes that the initial number already uniquely\nidentifies all operations.\n\nArgs:\nsymmops ([SymmOp]): Initial set of symmetry operations.\n\nReturns:\nFull set of symmetry operations.", "source": "codesearchnet"}
{"code": "def get_http_json(self, url=None, retry_count=3, rate_limit_timeout=120, headers=None):\n    if (headers is None):\n        headers = {'Accept': 'application/rdap+json'}\n    try:\n        log.debug('HTTP query for {0} at {1}'.format(self.address_str, url))\n        conn = Request(url, headers=headers)\n        data = self.opener.open(conn, timeout=self.timeout)\n        try:\n            d = json.loads(data.readall().decode('utf-8', 'ignore'))\n        except AttributeError:\n            d = json.loads(data.read().decode('utf-8', 'ignore'))\n        try:\n            for tmp in d['notices']:\n                if (tmp['title'] == 'Rate Limit Notice'):\n                    log.debug('RDAP query rate limit exceeded.')\n                    if (retry_count > 0):\n                        log.debug('Waiting {0} seconds...'.format(str(rate_limit_timeout)))\n                        sleep(rate_limit_timeout)\n                        return self.get_http_json(url=url, retry_count=(retry_count - 1), rate_limit_timeout=rate_limit_timeout, headers=headers)\n                    else:\n                        raise HTTPRateLimitError('HTTP lookup failed for {0}. Rate limit exceeded, wait and try again (possibly a temporary block).'.format(url))\n        except (KeyError, IndexError):\n            pass\n        return d\n    except HTTPError as e:\n        if (e.code == 429):\n            log.debug('HTTP query rate limit exceeded.')\n            if (retry_count > 0):\n                log.debug('Waiting {0} seconds...'.format(str(rate_limit_timeout)))\n                sleep(rate_limit_timeout)\n                return self.get_http_json(url=url, retry_count=(retry_count - 1), rate_limit_timeout=rate_limit_timeout, headers=headers)\n            else:\n                raise HTTPRateLimitError('HTTP lookup failed for {0}. Rate limit exceeded, wait and try again (possibly a temporary block).'.format(url))\n        else:\n            raise HTTPLookupError('HTTP lookup failed for {0} with error code {1}.'.format(url, str(e.code)))\n    except (URLError, socket.timeout, socket.error) as e:\n        log.debug('HTTP query socket error: {0}'.format(e))\n        if (retry_count > 0):\n            log.debug('HTTP query retrying (count: {0})'.format(str(retry_count)))\n            return self.get_http_json(url=url, retry_count=(retry_count - 1), rate_limit_timeout=rate_limit_timeout, headers=headers)\n        else:\n            raise HTTPLookupError('HTTP lookup failed for {0}.'.format(url))\n    except (HTTPLookupError, HTTPRateLimitError) as e:\n        raise e\n    except:\n        raise HTTPLookupError('HTTP lookup failed for {0}.'.format(url))", "docstring": "The function for retrieving a json result via HTTP.\n\nArgs:\nurl (:obj:`str`): The URL to retrieve (required).\nretry_count (:obj:`int`): The number of times to retry in case\nsocket errors, timeouts, connection resets, etc. are\nencountered. Defaults to 3.\nrate_limit_timeout (:obj:`int`): The number of seconds to wait\nbefore retrying when a rate limit notice is returned via\nrdap+json or HTTP error 429. Defaults to 60.\nheaders (:obj:`dict`): The HTTP headers. The Accept header\ndefaults to 'application/rdap+json'.\n\nReturns:\ndict: The data in json format.\n\nRaises:\nHTTPLookupError: The HTTP lookup failed.\nHTTPRateLimitError: The HTTP request rate limited and retries\nwere exhausted.", "source": "codesearchnet"}
{"code": "def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)):\n    if data_format is None:\n        data_format = image_data_format()\n    if data_format not in {'channels_first', 'channels_last'}:\n        raise ValueError('Unknown data_format: ' + str(data_format))\n    if len(strides) != 2:\n        raise ValueError('`strides` must be a tuple of 2 integers.')\n    x, tf_data_format = _preprocess_conv2d_input(x, data_format)\n    padding = _preprocess_padding(padding)\n    if not isinstance(strides, tuple):\n        strides = tuple(strides)\n    if tf_data_format == 'NHWC':\n        strides = (1,) + strides + (1,)\n    else:\n        strides = (1, 1) + strides\n    x = nn.separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=strides, padding=padding, rate=dilation_rate, data_format=tf_data_format)\n    if data_format == 'channels_first' and tf_data_format == 'NHWC':\n        x = array_ops.transpose(x, (0, 3, 1, 2))\n    return x", "docstring": "2D convolution with separable filters.\n\nArgs:\nx: input tensor\ndepthwise_kernel: convolution kernel for the depthwise convolution.\npointwise_kernel: kernel for the 1x1 convolution.\nstrides: strides tuple (length 2).\npadding: string, `\"same\"` or `\"valid\"`.\ndata_format: string, `\"channels_last\"` or `\"channels_first\"`.\ndilation_rate: tuple of integers,\ndilation rates for the separable convolution.\n\nReturns:\nOutput tensor.\n\nRaises:\nValueError: if `data_format` is neither `channels_last` or\n`channels_first`.\nValueError: if `strides` is not a tuple of 2 integers.", "source": "github-repos"}
{"code": "def __init__(self, *compressed_files, **kwargs):\n        \n        self._files = []\n        self._prefixes = defaultdict(lambda: set(['']))\n        self._extract = kwargs.get('extract', False)\n        self._supersede = kwargs.get('supersede', False)\n        self._match_version = kwargs.get('_match_version', True)\n        self._local_warned = False\n\n        for f in compressed_files:\n            if isinstance(f, zipfile.ZipFile):\n                bin_package = any(n.endswith('.so') or n.endswith('.pxd') or n.endswith('.dylib')\n                                  for n in f.namelist())\n                need_extract = True\n            elif isinstance(f, tarfile.TarFile):\n                bin_package = any(m.name.endswith('.so') or m.name.endswith('.pxd') or m.name.endswith('.dylib')\n                                  for m in f.getmembers())\n                need_extract = True\n            elif isinstance(f, dict):\n                bin_package = any(name.endswith('.so') or name.endswith('.pxd') or name.endswith('.dylib')\n                                  for name in iterkeys(f))\n                need_extract = False\n            elif isinstance(f, list):\n                bin_package = any(name.endswith('.so') or name.endswith('.pxd') or name.endswith('.dylib')\n                                  for name in f)\n                need_extract = False\n            else:\n                raise TypeError('Compressed file can only be zipfile.ZipFile or tarfile.TarFile')\n\n            if bin_package:\n                if not ALLOW_BINARY:\n                    raise SystemError('Cannot load binary package. It is quite possible that you are using an old '\n                                      'MaxCompute service which does not support binary packages. If this is '\n                                      'not true, please set `odps.isolation.session.enable` to True or ask your '\n                                      'project owner to change project-level configuration.')\n                if need_extract:\n                    f = self._extract_archive(f)\n\n            prefixes = set([''])\n            dir_prefixes = set()\n            if isinstance(f, zipfile.ZipFile):\n                for name in f.namelist():\n                    name = name if name.endswith('/') else (name.rsplit('/', 1)[0] + '/')\n                    if name in prefixes:\n                        continue\n                    try:\n                        f.getinfo(name + '__init__.py')\n                    except KeyError:\n                        prefixes.add(name)\n            elif isinstance(f, tarfile.TarFile):\n                for member in f.getmembers():\n                    name = member.name if member.isdir() else member.name.rsplit('/', 1)[0]\n                    if name in prefixes:\n                        continue\n                    try:\n                        f.getmember(name + '/__init__.py')\n                    except KeyError:\n                        prefixes.add(name + '/')\n            elif isinstance(f, (list, dict)):\n                \n                \n                if ALLOW_BINARY:\n                    bin_package = True\n\n                rendered_names = set()\n                for name in f:\n                    name = name.replace(os.sep, '/')\n                    rendered_names.add(name)\n\n                for name in rendered_names:\n                    name = name if name.endswith('/') else (name.rsplit('/', 1)[0] + '/')\n                    if name in prefixes or '/tests/' in name:\n                        continue\n                    if name + '__init__.py' not in rendered_names:\n                        prefixes.add(name)\n                        dir_prefixes.add(name)\n                    else:\n                        if '/' in name.rstrip('/'):\n                            ppath = name.rstrip('/').rsplit('/', 1)[0]\n                        else:\n                            ppath = ''\n                        prefixes.add(ppath)\n                        dir_prefixes.add(ppath)\n\n            if bin_package:\n                path_patch = []\n                for p in sorted(dir_prefixes):\n                    if p in sys.path:\n                        continue\n                    parent_exist = False\n                    for pp in path_patch:\n                        if p[:len(pp)] == pp:\n                            parent_exist = True\n                            break\n                    if parent_exist:\n                        continue\n                    path_patch.append(p)\n                if self._supersede:\n                    sys.path = path_patch + sys.path\n                else:\n                    sys.path = sys.path + path_patch\n            else:\n                self._files.append(f)\n                if prefixes:\n                    self._prefixes[id(f)] = sorted(prefixes)", "docstring": "Constructor.\n\nArgs:\ncompressed_files zipfile.ZipFile or tarfile.TarFile", "source": "juraj-google-style"}
{"code": "def all_function(function: _evaluation.AllFunction, operand_result: Optional[_sql_data_types.IdentifierSelect], params_result: Collection[_sql_data_types.StandardSqlExpression]) -> _sql_data_types.Select:\n    sql_alias = 'all_'\n    sql_data_type = _sql_data_types.Boolean\n    if not operand_result or not params_result:\n        return _sql_data_types.Select(select_part=_sql_data_types.RawExpression('TRUE', _sql_alias=sql_alias, _sql_data_type=_sql_data_types.Boolean), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK)\n    else:\n        criteria = list(params_result)[0]\n        context_sql = None\n        where_part = None\n        if _fhir_path_data_types.is_collection(function.parent_node.return_type):\n            context_sql = operand_result.from_part\n            where_part = operand_result.where_part\n        else:\n            context_sql = str(operand_result.to_subquery())\n        criteria_sql = _sql_data_types.RawExpression(criteria.as_operand(), _sql_alias=sql_alias, _sql_data_type=sql_data_type).to_subquery()\n        internal_if_null_call = _sql_data_types.FunctionCall('IFNULL', [criteria_sql, 'FALSE'], _sql_alias=sql_alias, _sql_data_type=sql_data_type)\n        logical_and_call = _sql_data_types.FunctionCall('BOOL_AND', (internal_if_null_call,), _sql_alias=sql_alias, _sql_data_type=sql_data_type)\n        return _sql_data_types.Select(select_part=_sql_data_types.FunctionCall('IFNULL', [logical_and_call, 'TRUE'], _sql_alias=sql_alias, _sql_data_type=sql_data_type), from_part=context_sql, where_part=where_part, sql_dialect=_sql_data_types.SqlDialect.SPARK)", "docstring": "Generates Spark SQL representing the FHIRPath all() function.\n\nReturns true if criteria evaluates to true for every item in its operand.\n\nThis function takes one param (`criteria`) in addition to its operand. If\noperand is not provided, it returns True.\n\nArgs:\nfunction: The FHIRPath AST `AllFunction` node\noperand_result: The expression which is being evaluated\nparams_result: The parameter passed in to function\n\nReturns:\nA compiled Spark SQL expression.", "source": "github-repos"}
{"code": "def __init__(self, settings, room_id, pause=1):\n        \n        Process.__init__(self)\n        self._pause = pause\n        self._room_id = room_id\n        self._callback = None\n        self._queue = None\n        self._connection = Connection.create_from_settings(settings)\n        self._last_message_id = None", "docstring": "Initialize.\n\nArgs:\nsettings (dict): Settings used to create a :class:`Connection` instance\nroom_id (int): Room ID\n\nKwargs:\npause (int): Pause in seconds between requests", "source": "juraj-google-style"}
{"code": "def WriteBytes(self, value, unhex=True):\n        \n        if unhex:\n            try:\n                value = binascii.unhexlify(value)\n            except binascii.Error:\n                pass\n        return self.stream.write(value)", "docstring": "Write a `bytes` type to the stream.\n\nArgs:\nvalue (bytes): array of bytes to write to the stream.\nunhex (bool): (Default) True. Set to unhexlify the stream. Use when the bytes are not raw bytes; i.e. b'aabb'\n\nReturns:\nint: the number of bytes written.", "source": "juraj-google-style"}
{"code": "class _Validate(beam.PTransform):\n\n    def __init__(self, schema: dict[str, Any], error_handling: Optional[Mapping[str, Any]]=None):\n        self._schema = schema\n        self._exception_handling_args = exception_handling_args(error_handling)\n\n    @maybe_with_exception_handling\n    def expand(self, pcoll):\n        validator = json_utils.row_validator(schema_from_element_type(pcoll.element_type), self._schema)\n\n        def invoke_validator(x):\n            validator(x)\n            return x\n        return pcoll | beam.Map(invoke_validator)\n\n    def with_exception_handling(self, **kwargs):\n        self._exception_handling_args = kwargs\n        return self", "docstring": "Validates each element of a PCollection against a json schema.\n\nArgs:\nschema: A json schema against which to validate each element.\nerror_handling: Whether and how to handle errors during iteration.\nIf this is not set, invalid elements will fail the pipeline, otherwise\ninvalid elements will be passed to the specified error output along\nwith information about how the schema was invalidated.", "source": "github-repos"}
{"code": "def __init__(self, indent=True, relation_sort=original_order):\n        \n        self.indent = indent\n        self.relation_sort = relation_sort", "docstring": "Initialize a new codec.\n\nArgs:\nindent: if True, adaptively indent; if False or None, don't\nindent; if a non-negative integer, indent that many\nspaces per nesting level\nrelation_sort: when encoding, sort the relations on each\nnode according to this function; by default, the\noriginal order is maintained", "source": "juraj-google-style"}
{"code": "def get_cuda_version_all():\n    key = 'cuda_ver_all'\n    out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])\n    ret_val = out.split(b'\\n')\n    filtered = []\n    for item in ret_val:\n        if item not in ['\\n', '']:\n            filtered.append(item)\n    all_vers = []\n    for item in filtered:\n        ver_re = re.search('.*/cuda(\\\\-[\\\\d]+\\\\.[\\\\d]+)?', item.decode('utf-8'))\n        if ver_re.group(1):\n            all_vers.append(ver_re.group(1).strip('-'))\n    if err and FLAGS.debug:\n        print('Error in detecting CUDA version:\\n %s' % str(err))\n    return all_vers", "docstring": "Retrieves all additional CUDA versions available (other than default).\n\nFor retrieving default CUDA version, use `get_cuda_version` function.\n\nstderr is silenced by default. Setting FLAGS.debug mode will not enable it.\nRemove `2> /dev/null` command from `cmds_linux['cuda_ver_dflt']` to enable\nstderr.\n\nReturns:\nList of all CUDA versions found (except default version).\ne.g. ['10.1', '10.2']", "source": "github-repos"}
{"code": "def disambiguate_query(self, query, language=None, entities=None):\n    body = {'shortText': query, 'entities': [], 'onlyNER': 'false', 'customisation': 'generic'}\n    if language:\n        body['language'] = {'lang': language}\n    if entities:\n        body['entities'] = entities\n    files = {'query': str(body)}\n    logger.debug('About to submit the following query {}'.format(body))\n    (res, status) = self.post(self.disambiguate_service, files=files, headers={'Accept': 'application/json'})\n    if (status == 200):\n        return (self.decode(res), status)\n    else:\n        logger.debug('Disambiguation failed.')\n        return (None, status)", "docstring": "Call the disambiguation service in order to disambiguate a search query.\n\nArgs:\ntext (str): Query to be disambiguated.\nlanguage (str): language of text (if known)\nentities (list): list of entities or mentions to be supplied by\nthe user.\n\nReturns:\ndict, int: API response and API status.", "source": "codesearchnet"}
{"code": "def set_mlag_id(self, name, value=None, default=False, disable=False):\n    cmd = self.command_builder('mlag', value=value, default=default, disable=disable)\n    return self.configure_interface(name, cmd)", "docstring": "Configures the interface mlag value for the specified interface\n\nArgs:\nname (str): The interface to configure.  Valid values for the\nname arg include Port-Channel*\nvalue (str): The mlag identifier to cofigure on the interface\ndefault (bool): Configures the interface mlag value using the\ndefault keyword\ndisable (bool): Negates the interface mlag value using the\nno keyword\n\nReturns:\nbool: Returns True if the commands complete successfully", "source": "codesearchnet"}
{"code": "def sort_segment_points(Aps, Bps):\n    \n    mid = []\n    j = 0\n    mid.append(Aps[0])\n    for i in range(len(Aps)-1):\n        dist = distance_tt_point(Aps[i], Aps[i+1])\n        for m in range(j, len(Bps)):\n            distm = distance_tt_point(Aps[i], Bps[m])\n            if dist > distm:\n                direction = dot(normalize(line(Aps[i].gen2arr(), Aps[i+1].gen2arr())), normalize(Bps[m].gen2arr()))\n                if direction > 0:\n                    j = m + 1\n                    mid.append(Bps[m])\n                    break\n\n        mid.append(Aps[i+1])\n    for m in range(j, len(Bps)):\n        mid.append(Bps[m])\n    return mid", "docstring": "Takes two line segments and sorts all their points,\nso that they form a continuous path\n\nArgs:\nAps: Array of tracktotrip.Point\nBps: Array of tracktotrip.Point\nReturns:\nArray with points ordered", "source": "juraj-google-style"}
{"code": "def __getitem__(self, key):\n    if key is None:\n        key = self._key()\n    value = self._get_recursive(key)\n    if value is None:\n        value = self[key] = self.default_factory()\n    return value", "docstring": "Gets the value at key (or current context), or sets default value.\n\nArgs:\nkey: May be `None` or `Graph`object. When `None`, the key is set to the\ncurrent context.\n\nReturns:\nEither the cached or default value.", "source": "github-repos"}
{"code": "def delete_folder(self, folder_id, recursive=True):\n    return self.__request('DELETE', ('folders/%s' % (folder_id,)), querystring={'recursive': unicode(recursive).lower()})", "docstring": "Delete an existing folder\n\nArgs:\nfolder_id (int): ID of the folder to delete.\nrecursive (bool): Delete all subfolder if True.\n\nReturns:\ndict. Response from Box.\n\nRaises:\nBoxError: An error response is returned from Box (status_code >= 400).\n\nBoxHttpResponseError: Response from Box is malformed.\n\nrequests.exceptions.*: Any connection related problem.", "source": "codesearchnet"}
{"code": "def create(self, name, network):\n        \n        if not network in SUPPORTED_NETWORKS:\n            raise ValueError('Network not valid!')\n        account = self.wrap(self.resource.create(dict(name=name,\n                                                      network=network)))\n        self.add(account)\n        return account", "docstring": "Create a new Account object and add it to this Accounts collection.\n\nArgs:\nname (str): Account name\nnetwork (str): Type of cryptocurrency.  Can be one of, 'bitcoin', '\nbitcoin_testnet', 'litecoin', 'dogecoin'.\n\nReturns: The new round.Account", "source": "juraj-google-style"}
{"code": "def recipe_email_to_bigquery(config, auth_read, email_from, email_to, subject, link, attachment, dataset, table, schema, header, is_incremental_load):\n    email(config, {'auth': auth_read, 'read': {'from': email_from, 'to': email_to, 'subject': subject, 'link': link, 'attachment': attachment}, 'write': {'bigquery': {'dataset': dataset, 'table': table, 'schema': schema, 'header': header, 'is_incremental_load': is_incremental_load}}})", "docstring": "Import emailed CM report, Dv360 report, csv, or excel into a BigQuery table.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nemail_from (string) - Must match from field.\nemail_to (string) - Must match to field.\nsubject (string) - Regular expression to match subject.\nlink (string) - Regular expression to match email.\nattachment (string) - Regular expression to match atttachment.\ndataset (string) - Existing dataset in BigQuery.\ntable (string) - Name of table to be written to.\nschema (json) - Schema provided in JSON list format or empty list.\nheader (boolean) - Does the csv contain a header row.\nis_incremental_load (boolean) - Append report data to table based on date column, de-duplicates.", "source": "github-repos"}
{"code": "def bessel_j0(x, name=None):\n    with ops.name_scope(name, 'bessel_j0', [x]):\n        return gen_special_math_ops.bessel_j0(x)", "docstring": "Computes the Bessel j0 function of `x` element-wise.\n\nModified Bessel function of order 0.\n\n>>> tf.math.special.bessel_j0([0.5, 1., 2., 4.]).numpy()\narray([ 0.93846981,  0.76519769,  0.22389078, -0.39714981], dtype=float32)\n\nArgs:\nx: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n`float32`, `float64`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.j0\n@end_compatibility", "source": "github-repos"}
{"code": "def __auth_descriptor(self, api_info):\n    \n    if api_info.auth is None:\n      return None\n\n    auth_descriptor = {}\n    if api_info.auth.allow_cookie_auth is not None:\n      auth_descriptor['allowCookieAuth'] = api_info.auth.allow_cookie_auth\n    if api_info.auth.blocked_regions:\n      auth_descriptor['blockedRegions'] = api_info.auth.blocked_regions\n\n    return auth_descriptor", "docstring": "Builds an auth descriptor from API info.\n\nArgs:\napi_info: An _ApiInfo object.\n\nReturns:\nA dictionary with 'allowCookieAuth' and/or 'blockedRegions' keys.", "source": "juraj-google-style"}
{"code": "def parse(self, ping_message):\n        \n\n        try:\n            \n            if typepy.is_not_null_string(ping_message.stdout):\n                ping_message = ping_message.stdout\n        except AttributeError:\n            pass\n\n        logger.debug(\"parsing ping result: {}\".format(ping_message))\n\n        self.__parser = NullPingParser()\n\n        if typepy.is_null_string(ping_message):\n            logger.debug(\"ping_message is empty\")\n            self.__stats = PingStats()\n\n            return self.__stats\n\n        ping_lines = _to_unicode(ping_message).splitlines()\n        parser_class_list = (\n            LinuxPingParser,\n            WindowsPingParser,\n            MacOsPingParser,\n            AlpineLinuxPingParser,\n        )\n\n        for parser_class in parser_class_list:\n            self.__parser = parser_class()\n            try:\n                self.__stats = self.__parser.parse(ping_lines)\n                return self.__stats\n            except ParseError as e:\n                if e.reason != ParseErrorReason.HEADER_NOT_FOUND:\n                    raise e\n            except pp.ParseException:\n                pass\n\n        self.__parser = NullPingParser()\n\n        return self.__stats", "docstring": "Parse ping command output.\n\nArgs:\nping_message (str or :py:class:`~pingparsing.PingResult`):\n``ping`` command output.\n\nReturns:\n:py:class:`~pingparsing.PingStats`: Parsed result.", "source": "juraj-google-style"}
{"code": "def getSlicesForText(self, retina_name, body, get_fingerprint=None, start_index=0, max_results=10):\n        \n\n        resourcePath = '/text/slices'\n        method = 'POST'\n\n        queryParams = {}\n        headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}\n        postData = None\n\n        queryParams['retina_name'] = retina_name\n        queryParams['start_index'] = start_index\n        queryParams['max_results'] = max_results\n        queryParams['get_fingerprint'] = get_fingerprint\n        postData = body\n        response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)\n        return [text.Text(**r) for r in response.json()]", "docstring": "Get a list of slices of the text\nArgs:\nretina_name, str: The retina name (required)\nbody, str: The text to be evaluated (required)\nget_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)\nstart_index, int: The start-index for pagination (optional) (optional)\nmax_results, int: Max results per page (optional) (optional)\nReturns: Array[Text]", "source": "juraj-google-style"}
{"code": "def get_policy(observations, hparams, action_space):\n  \n  if not isinstance(action_space, gym.spaces.Discrete):\n    raise ValueError(\"Expecting discrete action space.\")\n\n  obs_shape = common_layers.shape_list(observations)\n  (frame_height, frame_width) = obs_shape[2:4]\n\n  \n  \n  if hparams.policy_problem_name == \"dummy_policy_problem_ttt\":\n    tf.logging.info(\"Using DummyPolicyProblemTTT for the policy.\")\n    policy_problem = tic_tac_toe_env.DummyPolicyProblemTTT()\n  else:\n    tf.logging.info(\"Using DummyPolicyProblem for the policy.\")\n    policy_problem = DummyPolicyProblem(action_space, frame_height, frame_width)\n\n  trainer_lib.add_problem_hparams(hparams, policy_problem)\n  hparams.force_full_predict = True\n  model = registry.model(hparams.policy_network)(\n      hparams, tf.estimator.ModeKeys.TRAIN\n  )\n  try:\n    num_target_frames = hparams.video_num_target_frames\n  except AttributeError:\n    num_target_frames = 1\n  features = {\n      \"inputs\": observations,\n      \"input_action\": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32),\n      \"input_reward\": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32),\n      \"targets\": tf.zeros(obs_shape[:1] + [num_target_frames] + obs_shape[2:]),\n      \"target_action\": tf.zeros(\n          obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32),\n      \"target_reward\": tf.zeros(\n          obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32),\n      \"target_policy\": tf.zeros(\n          obs_shape[:1] + [num_target_frames] + [action_space.n]),\n      \"target_value\": tf.zeros(\n          obs_shape[:1] + [num_target_frames])\n  }\n  with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):\n    t2t_model.create_dummy_vars()\n    (targets, _) = model(features)\n  return (targets[\"target_policy\"][:, 0, :], targets[\"target_value\"][:, 0])", "docstring": "Get a policy network.\n\nArgs:\nobservations: observations\nhparams: parameters\naction_space: action space\n\nReturns:\nTuple (action logits, value).", "source": "juraj-google-style"}
{"code": "def dumps(o, preserve=False):\n    \n\n    retval = \"\"\n    addtoretval, sections = _dump_sections(o, \"\")\n    retval += addtoretval\n    while sections != {}:\n        newsections = {}\n        for section in sections:\n            addtoretval, addtosections = _dump_sections(sections[section],\n                                                        section, preserve)\n            if addtoretval or (not addtoretval and not addtosections):\n                if retval and retval[-2:] != \"\\n\\n\":\n                    retval += \"\\n\"\n                retval += \"[\" + section + \"]\\n\"\n                if addtoretval:\n                    retval += addtoretval\n            for s in addtosections:\n                newsections[section + \".\" + s] = addtosections[s]\n        sections = newsections\n    return retval", "docstring": "Stringifies input dict as toml\n\nArgs:\no: Object to dump into toml\n\npreserve: Boolean parameter. If true, preserve inline tables.\n\nReturns:\nString containing the toml corresponding to dict", "source": "juraj-google-style"}
{"code": "def run(self, data_loaders, workflow, max_epochs, **kwargs):\n    assert isinstance(data_loaders, list)\n    assert mmcv.is_list_of(workflow, tuple)\n    assert (len(data_loaders) == len(workflow))\n    self._max_epochs = max_epochs\n    work_dir = (self.work_dir if (self.work_dir is not None) else 'NONE')\n    self.logger.info('Start running, host: %s, work_dir: %s', get_host_info(), work_dir)\n    self.logger.info('workflow: %s, max: %d epochs', workflow, max_epochs)\n    self.call_hook('before_run')\n    while (self.epoch < max_epochs):\n        for (i, flow) in enumerate(workflow):\n            (mode, epochs) = flow\n            if isinstance(mode, str):\n                if (not hasattr(self, mode)):\n                    raise ValueError('runner has no method named \"{}\" to run an epoch'.format(mode))\n                epoch_runner = getattr(self, mode)\n            elif callable(mode):\n                epoch_runner = mode\n            else:\n                raise TypeError('mode in workflow must be a str or callable function, not {}'.format(type(mode)))\n            for _ in range(epochs):\n                if ((mode == 'train') and (self.epoch >= max_epochs)):\n                    return\n                epoch_runner(data_loaders[i], **kwargs)\n    time.sleep(1)\n    self.call_hook('after_run')", "docstring": "Start running.\n\nArgs:\ndata_loaders (list[:obj:`DataLoader`]): Dataloaders for training\nand validation.\nworkflow (list[tuple]): A list of (phase, epochs) to specify the\nrunning order and epochs. E.g, [('train', 2), ('val', 1)] means\nrunning 2 epochs for training and 1 epoch for validation,\niteratively.\nmax_epochs (int): Total training epochs.", "source": "codesearchnet"}
{"code": "def FromFile(cls, inpath):\n        \n\n        with open(inpath, \"r\") as infile:\n            indata = infile.read()\n\n        return cls.FromString(indata)", "docstring": "Load a CommandFile from a path.\n\nArgs:\ninpath (str): The path to the file to load\n\nReturns:\nCommandFile: The decoded CommandFile object.", "source": "juraj-google-style"}
{"code": "class PatchTSMixerForPretraining(PatchTSMixerPreTrainedModel):\n\n    def __init__(self, config: PatchTSMixerConfig):\n        super().__init__(config)\n        self.model = PatchTSMixerModel(config, mask_input=True)\n        self.head = PatchTSMixerPretrainHead(config=config)\n        self.masked_loss = config.masked_loss\n        self.use_return_dict = config.use_return_dict\n        if config.post_init:\n            self.post_init()\n\n    @auto_docstring\n    def forward(self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=False, return_loss: bool=True, return_dict: Optional[bool]=None) -> PatchTSMixerForPreTrainingOutput:\n        \n        return_dict = return_dict if return_dict is not None else self.use_return_dict\n        if self.masked_loss is True:\n            loss = torch.nn.MSELoss(reduction='none')\n        else:\n            loss = torch.nn.MSELoss(reduction='mean')\n        model_output = self.model(past_values, observed_mask=observed_mask, output_hidden_states=output_hidden_states, return_dict=return_dict)\n        if isinstance(model_output, tuple):\n            model_output = PatchTSMixerModelOutput(*model_output)\n        x_hat = self.head(model_output.last_hidden_state)\n        if return_loss is True:\n            loss_val = loss(x_hat, model_output.patch_input)\n        else:\n            loss_val = None\n        if self.masked_loss is True and loss_val is not None:\n            loss_val = (loss_val.mean(dim=-1) * model_output.mask).sum() / (model_output.mask.sum() + 1e-10)\n        if not return_dict:\n            return tuple((v for v in [loss_val, x_hat, model_output.last_hidden_state, model_output.hidden_states]))\n        return PatchTSMixerForPreTrainingOutput(loss=loss_val, prediction_outputs=x_hat, last_hidden_state=model_output.last_hidden_state, hidden_states=model_output.hidden_states)", "docstring": "`PatchTSMixer` for mask pretraining.\n\nArgs:\nconfig (`PatchTSMixerConfig`):\nConfiguration.\n\nReturns:\n`None`.", "source": "github-repos"}
{"code": "def ExportNEP2(self, passphrase):\n    if (len(passphrase) < 2):\n        raise ValueError('Passphrase must have a minimum of 2 characters')\n    address_hash_tmp = hashlib.sha256(self.GetAddress().encode('utf-8')).digest()\n    address_hash_tmp2 = hashlib.sha256(address_hash_tmp).digest()\n    address_hash = address_hash_tmp2[:4]\n    pwd_normalized = bytes(unicodedata.normalize('NFC', passphrase), 'utf-8')\n    derived = scrypt.hash(pwd_normalized, address_hash, N=SCRYPT_ITERATIONS, r=SCRYPT_BLOCKSIZE, p=SCRYPT_PARALLEL_FACTOR, buflen=SCRYPT_KEY_LEN_BYTES)\n    derived1 = derived[:32]\n    derived2 = derived[32:]\n    xor_ed = xor_bytes(bytes(self.PrivateKey), derived1)\n    cipher = AES.new(derived2, AES.MODE_ECB)\n    encrypted = cipher.encrypt(xor_ed)\n    assembled = bytearray()\n    assembled.extend(NEP_HEADER)\n    assembled.extend(NEP_FLAG)\n    assembled.extend(address_hash)\n    assembled.extend(encrypted)\n    encrypted_key_nep2 = base58.b58encode_check(bytes(assembled))\n    return encrypted_key_nep2.decode('utf-8')", "docstring": "Export the encrypted private key in NEP-2 format.\n\nArgs:\npassphrase (str): The password to encrypt the private key with, as unicode string\n\nReturns:\nstr: The NEP-2 encrypted private key", "source": "codesearchnet"}
{"code": "def open(self, **params):\n        \n        logger.info('opening telnet')\n        self.port = params['port']\n        self.ip = params['ip']\n        self.tn = None\n        self._init()", "docstring": "Open telnet connection\n\nArgs:\nparams (dict), must contain two parameters \"ip\" - ip address or hostname and \"port\" - port number\n\nExample:\nparams = {'port': 23, 'ip': 'localhost'}", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.StreamingAnnotateVideo = channel.stream_stream(\n            \"/google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService/StreamingAnnotateVideo\",\n            request_serializer=google_dot_cloud_dot_videointelligence__v1p3beta1_dot_proto_dot_video__intelligence__pb2.StreamingAnnotateVideoRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_videointelligence__v1p3beta1_dot_proto_dot_video__intelligence__pb2.StreamingAnnotateVideoResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def get_site_dos(self, site):\n        \n        site_dos = functools.reduce(add_densities, self.pdos[site].values())\n        return Dos(self.efermi, self.energies, site_dos)", "docstring": "Get the total Dos for a site (all orbitals).\n\nArgs:\nsite: Site in Structure associated with CompleteDos.\n\nReturns:\nDos containing summed orbital densities for site.", "source": "juraj-google-style"}
{"code": "def _serialize_normalized_array(array, fmt='png', quality=70):\n    dtype = array.dtype\n    assert np.issubdtype(dtype, np.unsignedinteger)\n    assert (np.max(array) <= np.iinfo(dtype).max)\n    assert (array.shape[(- 1)] > 1)\n    image = PIL.Image.fromarray(array)\n    image_bytes = BytesIO()\n    image.save(image_bytes, fmt, quality=quality)\n    image_data = image_bytes.getvalue()\n    return image_data", "docstring": "Given a normalized array, returns byte representation of image encoding.\n\nArgs:\narray: NumPy array of dtype uint8 and range 0 to 255\nfmt: string describing desired file format, defaults to 'png'\nquality: specifies compression quality from 0 to 100 for lossy formats\n\nReturns:\nimage data as BytesIO buffer", "source": "codesearchnet"}
{"code": "def _Load(dec: '_Dec[_DecT]', filename: Path, compress: bool=False, open_function=open) -> _DecT:\n    try:\n        with open_function(filename, 'rb') as fi:\n            if compress:\n                with gzip.GzipFile(fileobj=fi) as zfi:\n                    data = zfi.read()\n            else:\n                data = fi.read()\n        return dec.decode(data)\n    except (OSError, gzip.BadGzipFile, msgspec.DecodeError, msgspec.ValidationError) as e:\n        raise LoadPickleError(filename) from e", "docstring": "Loads a serialized file.\n\nArgs:\ndec: The msgspec.Decoder to use.\nfilename: The file to read.\ncompress: if True, the file will be opened using gzip.\nopen_function: The function to open the file with.\n\nReturns:\nThe decoded object.\n\nRaises:\nLoadPickleError, if there is an OSError, gzip error, or msgspec error.", "source": "github-repos"}
{"code": "def __setattr__(self, key, value):\n        \n        if key in self.__dict__ or '_' + key in self.__dict__:\n            object.__setattr__(self, key, value)\n        else:\n            self.set(key, value)", "docstring": "A shortcut for the 'set' method.\n\nArgs:\nkey (str): The name of the attribute to set.\nvalue (str): The value to assign to 'key'.", "source": "juraj-google-style"}
{"code": "def mangle_scope_tree(root, toplevel):\n    \n    def mangle(scope):\n        \n        if scope.get_enclosing_scope() is None and not toplevel:\n            return\n        for name in scope.symbols:\n            mangled_name = scope.get_next_mangled_name()\n            scope.mangled[name] = mangled_name\n            scope.rev_mangled[mangled_name] = name\n\n    def visit(node):\n        mangle(node)\n        for child in node.children:\n            visit(child)\n\n    visit(root)", "docstring": "Walk over a scope tree and mangle symbol names.\n\nArgs:\ntoplevel: Defines if global scope should be mangled or not.", "source": "juraj-google-style"}
{"code": "def f2format(filename):\n    \n    print('Now converting %r...' % filename)\n\n    \n    encoding = os.getenv('F2FORMAT_ENCODING', LOCALE_ENCODING)\n\n    lineno = dict()     \n    content = list()    \n    with open(filename, 'r', encoding=encoding) as file:\n        lineno[1] = 0\n        for lnum, line in enumerate(file, start=1):\n            content.append(line)\n            lineno[lnum+1] = lineno[lnum] + len(line)\n\n    \n    string = ''.join(content)\n    text = convert(string, lineno)\n\n    \n    with open(filename, 'w', encoding=encoding) as file:\n        file.write(text)", "docstring": "Wrapper works for conversion.\n\nArgs:\n- filename -- str, file to be converted", "source": "juraj-google-style"}
{"code": "def matches_filters(self, node):\n        \n\n        visible = self.visible\n\n        if self.options[\"text\"]:\n            if isregex(self.options[\"text\"]):\n                regex = self.options[\"text\"]\n            elif self.exact_text is True:\n                regex = re.compile(r\"\\A{}\\Z\".format(re.escape(self.options[\"text\"])))\n            else:\n                regex = toregex(self.options[\"text\"])\n\n            text = normalize_text(\n                node.all_text if visible == \"all\" else node.visible_text)\n\n            if not regex.search(text):\n                return False\n\n        if isinstance(self.exact_text, (bytes_, str_)):\n            regex = re.compile(r\"\\A{}\\Z\".format(re.escape(self.exact_text)))\n\n            text = normalize_text(\n                node.all_text if visible == \"all\" else node.visible_text)\n\n            if not regex.search(text):\n                return False\n\n        if visible == \"visible\":\n            if not node.visible:\n                return False\n        elif visible == \"hidden\":\n            if node.visible:\n                return False\n\n        for name, node_filter in iter(self._node_filters.items()):\n            if name in self.filter_options:\n                if not node_filter.matches(node, self.filter_options[name]):\n                    return False\n            elif node_filter.has_default:\n                if not node_filter.matches(node, node_filter.default):\n                    return False\n\n        if self.options[\"filter\"] and not self.options[\"filter\"](node):\n            return False\n\n        return True", "docstring": "Returns whether the given node matches all filters.\n\nArgs:\nnode (Element): The node to evaluate.\n\nReturns:\nbool: Whether the given node matches.", "source": "juraj-google-style"}
{"code": "def find_clusters(struct, connected_matrix):\n    n_atoms = len(struct.species)\n    if (n_atoms == 0):\n        return [0, 0, 0]\n    if (0 in np.sum(connected_matrix, axis=0)):\n        return [0, 1, 0]\n    cluster_sizes = []\n    clusters = []\n    visited = [False for item in range(n_atoms)]\n    connected_matrix += np.eye(len(connected_matrix))\n\n    def visit(atom, atom_cluster):\n        visited[atom] = True\n        new_cluster = set(np.where((connected_matrix[atom] != 0))[0]).union(atom_cluster)\n        atom_cluster = new_cluster\n        for new_atom in atom_cluster:\n            if (not visited[new_atom]):\n                visited[new_atom] = True\n                atom_cluster = visit(new_atom, atom_cluster)\n        return atom_cluster\n    for i in range(n_atoms):\n        if (not visited[i]):\n            atom_cluster = set()\n            cluster = visit(i, atom_cluster)\n            clusters.append(cluster)\n            cluster_sizes.append(len(cluster))\n    max_cluster = max(cluster_sizes)\n    min_cluster = min(cluster_sizes)\n    return [max_cluster, min_cluster, clusters]", "docstring": "Finds bonded clusters of atoms in the structure with periodic boundary\nconditions.\n\nIf there are atoms that are not bonded to anything, returns [0,1,0]. (For\nfaster computation time)\n\nAuthor: \"Gowoon Cheon\"\nEmail: \"gcheon@stanford.edu\"\n\nArgs:\nstruct (Structure): Input structure\nconnected_matrix: Must be made from the same structure with\nfind_connected_atoms() function.\n\nReturns:\nmax_cluster: the size of the largest cluster in the crystal structure\nmin_cluster: the size of the smallest cluster in the crystal structure\nclusters: list of bonded clusters found here, clusters are formatted as\nsets of indices of atoms", "source": "codesearchnet"}
{"code": "def parse_qcmetrics(metrics: dict) -> dict:\n    \n    data = {\n        'versions': {\n            'freebayes': metrics['program']['freebayes']['version'],\n            'gatk': metrics['program']['gatk']['version'],\n            'manta': metrics['program'].get('manta', {}).get('version'),\n            'bcftools': metrics['program']['bcftools']['version'],\n            'vep': metrics['program']['varianteffectpredictor']['version'],\n        },\n        'samples': [],\n    }\n\n    plink_samples = {}\n    plink_sexcheck = metrics['program'].get('plink_sexcheck', {}).get('sample_sexcheck')\n    if isinstance(plink_sexcheck, str):\n        sample_id, sex_number = plink_sexcheck.strip().split(':', 1)\n        plink_samples[sample_id] = PED_SEX_MAP.get(int(sex_number))\n    elif isinstance(plink_sexcheck, list):\n        for sample_raw in plink_sexcheck:\n            sample_id, sex_number = sample_raw.split(':', 1)\n            plink_samples[sample_id] = PED_SEX_MAP.get(int(sex_number))\n\n    for sample_id, sample_metrics in metrics['sample'].items():\n\n        \n        bam_stats = [values['bamstats'] for key, values in sample_metrics.items()\n                     if key[:-1].endswith('.lane')]\n        total_reads = sum(int(bam_stat['raw_total_sequences']) for bam_stat in bam_stats)\n        total_mapped = sum(int(bam_stat['reads_mapped']) for bam_stat in bam_stats)\n\n        \n        main_key = [key for key in sample_metrics.keys() if '_lanes_' in key][0]\n\n        hs_metrics = sample_metrics[main_key]['collecthsmetrics']['header']['data']\n        multiple_inst_metrics = sample_metrics[main_key]['collectmultiplemetricsinsertsize']['header']['data']\n        multiple_metrics = sample_metrics[main_key]['collectmultiplemetrics']['header']['pair']\n\n        sample_data = {\n            'at_dropout': hs_metrics['AT_DROPOUT'],\n            'completeness_target': {\n                10: hs_metrics['PCT_TARGET_BASES_10X'],\n                20: hs_metrics['PCT_TARGET_BASES_20X'],\n                50: hs_metrics['PCT_TARGET_BASES_50X'],\n                100: hs_metrics['PCT_TARGET_BASES_100X'],\n            },\n            'duplicates': float(sample_metrics[main_key]['markduplicates']['fraction_duplicates']),\n            'gc_dropout': hs_metrics['GC_DROPOUT'],\n            'id': sample_id,\n            'median_insert_size':  multiple_inst_metrics['MEDIAN_INSERT_SIZE'],\n            'mapped': total_mapped / total_reads,\n            'plink_sex': plink_samples.get(sample_id),\n            'predicted_sex': sample_metrics[main_key]['chanjo_sexcheck']['gender'],\n            'reads': total_reads,\n            'insert_size_standard_deviation': float(multiple_inst_metrics['STANDARD_DEVIATION']),\n            'strand_balance': float(multiple_metrics['STRAND_BALANCE']),\n            'target_coverage': float(hs_metrics['MEAN_TARGET_COVERAGE']),\n        }\n        data['samples'].append(sample_data)\n    return data", "docstring": "Parse MIP qc metrics file.\nArgs:\nmetrics (dict): raw YAML input from MIP qc metrics file\n\nReturns:\ndict: parsed data", "source": "juraj-google-style"}
{"code": "def _ParsePathSpecification(self, knowledge_base, searcher, file_system, path_specification, path_separator):\n    try:\n        file_entry = searcher.GetFileEntryByPathSpec(path_specification)\n    except IOError as exception:\n        relative_path = searcher.GetRelativePath(path_specification)\n        if (path_separator != file_system.PATH_SEPARATOR):\n            relative_path_segments = file_system.SplitPath(relative_path)\n            relative_path = '{0:s}{1:s}'.format(path_separator, path_separator.join(relative_path_segments))\n        raise errors.PreProcessFail('Unable to retrieve file entry: {0:s} with error: {1!s}'.format(relative_path, exception))\n    if file_entry:\n        self._ParseFileEntry(knowledge_base, file_entry)", "docstring": "Parses a file system for a preprocessing attribute.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nsearcher (dfvfs.FileSystemSearcher): file system searcher to preprocess\nthe file system.\nfile_system (dfvfs.FileSystem): file system to be preprocessed.\npath_specification (dfvfs.PathSpec): path specification that contains\nthe artifact value data.\npath_separator (str): path segment separator.\n\nRaises:\nPreProcessFail: if the preprocessing fails.", "source": "codesearchnet"}
{"code": "def remove_bucket_list_item(self, id, collection, item):\n        \n        if type(id) is not ObjectId:\n            id = ObjectId(id)\n        obj = getattr(self.db, collection)\n        result = obj.update(\n            {'_id': id},\n            {'$pull': {'bucket_list': item}}\n        )\n        return result", "docstring": "Removes an item from the bucket list\n\nArgs:\nid: the CRITs object id of the TLO\ncollection: The db collection. See main class documentation.\nitem: the bucket list item to remove\nReturns:\nThe mongodb result", "source": "juraj-google-style"}
{"code": "def _GetFileSystemCacheIdentifier(self, path_spec):\n    string_parts = []\n    string_parts.append(getattr(path_spec.parent, 'comparable', ''))\n    string_parts.append('type: {0:s}'.format(path_spec.type_indicator))\n    return ''.join(string_parts)", "docstring": "Determines the file system cache identifier for the path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nstr: identifier of the VFS object.", "source": "codesearchnet"}
{"code": "def regularize_cost_from_collection(name='regularize_cost'):\n    \n    ctx = get_current_tower_context()\n    if not ctx.is_training:\n        \n        \n        \n        return tf.constant(0, dtype=tf.float32, name='empty_' + name)\n\n    \n    \n    if ctx.has_own_variables:   \n        losses = ctx.get_collection_in_tower(tfv1.GraphKeys.REGULARIZATION_LOSSES)\n    else:\n        losses = tfv1.get_collection(tfv1.GraphKeys.REGULARIZATION_LOSSES)\n    if len(losses) > 0:\n        logger.info(\"regularize_cost_from_collection() found {} regularizers \"\n                    \"in REGULARIZATION_LOSSES collection.\".format(len(losses)))\n\n        def maploss(l):\n            assert l.dtype.is_floating, l\n            if l.dtype != tf.float32:\n                l = tf.cast(l, tf.float32)\n            return l\n\n        losses = [maploss(l) for l in losses]\n        reg_loss = tf.add_n(losses, name=name)\n        return reg_loss\n    else:\n        return tf.constant(0, dtype=tf.float32, name='empty_' + name)", "docstring": "Get the cost from the regularizers in ``tf.GraphKeys.REGULARIZATION_LOSSES``.\nIf in replicated mode, will only regularize variables created within the current tower.\n\nArgs:\nname (str): the name of the returned tensor\n\nReturns:\ntf.Tensor: a scalar, the total regularization cost.", "source": "juraj-google-style"}
{"code": "def _BuildMessageFromTypeName(type_name, descriptor_pool):\n    from google.protobuf import symbol_database\n    database = symbol_database.Default()\n    try:\n        message_descriptor = descriptor_pool.FindMessageTypeByName(type_name)\n    except KeyError:\n        return None\n    message_type = database.GetPrototype(message_descriptor)\n    return message_type()", "docstring": "Returns a protobuf message instance.\n\nArgs:\ntype_name: Fully-qualified protobuf  message type name string.\ndescriptor_pool: DescriptorPool instance.\n\nReturns:\nA Message instance of type matching type_name, or None if the a Descriptor\nwasn't found matching type_name.", "source": "codesearchnet"}
{"code": "def _select_mgmt_networks(self, conf):\n        \n\n        nets = conf['nets']\n        mgmts = sorted(\n            [\n                name for name, net in nets.iteritems()\n                if net.get('management') is True\n            ]\n        )\n\n        if len(mgmts) == 0:\n            mgmt_name = sorted((nets.keys()))[0]\n            LOGGER.debug(\n                'No management network configured, selecting network %s',\n                mgmt_name\n            )\n            nets[mgmt_name]['management'] = True\n            mgmts.append(mgmt_name)\n\n        for mgmt_name in mgmts:\n            if nets[mgmt_name].get('dns_domain_name', None) is None:\n                nets[mgmt_name]['dns_domain_name'] = 'lago.local'\n\n        return mgmts", "docstring": "Select management networks. If no management network is found, it will\nmark the first network found by sorted the network lists. Also adding\ndefault DNS domain, if none is set.\n\nArgs:\nconf(spec): spec", "source": "juraj-google-style"}
{"code": "def _as_document(self, partition):\n        \n\n        schema = ' '.join(\n            u'{} {} {} {} {}'.format(\n                c.id,\n                c.vid,\n                c.name,\n                c.altname,\n                c.description) for c in partition.table.columns)\n\n        values = ''\n\n        for stat in partition.stats:\n            if stat.uvalues :\n                \n                \n                values += ' '.join(e[:200] for e in stat.uvalues) + '\\n'\n\n        \n        \n        def resum(g):\n            try:\n                return str(GVid.parse(g).summarize())\n            except KeyError:\n                return g\n            except ValueError:\n                logger.debug(\"Failed to parse gvid '{}' from partition '{}' grain coverage\"\n                             .format(g, partition.identity.vname))\n                return g\n\n        keywords = (\n            ' '.join(partition.space_coverage) + ' ' +\n            ' '.join([resum(g) for g in partition.grain_coverage if resum(g)]) + ' ' +\n            ' '.join(str(x) for x in partition.time_coverage)\n        )\n\n        doc_field = u('{} {} {} {} {} {}').format(\n            values,\n            schema,\n            ' '.join([\n                u('{}').format(partition.identity.vid),\n                u('{}').format(partition.identity.id_),\n                u('{}').format(partition.identity.name),\n                u('{}').format(partition.identity.vname)]),\n            partition.display.title,\n            partition.display.description,\n            partition.display.sub_description,\n            partition.display.time_description,\n            partition.display.geo_description\n        )\n\n        document = dict(\n            vid=u('{}').format(partition.identity.vid),\n            dataset_vid=u('{}').format(partition.identity.as_dataset().vid),\n            title=u('{}').format(partition.table.description),\n            keywords=u('{}').format(keywords),\n            doc=doc_field)\n\n        return document", "docstring": "Converts given partition to the document indexed by FTS backend.\n\nArgs:\npartition (orm.Partition): partition to convert.\n\nReturns:\ndict with structure matches to BasePartitionIndex._schema.", "source": "juraj-google-style"}
{"code": "def _build(self, inputs):\n    \n\n    if self._axis is None:\n      axis = list(range(1, inputs.shape.ndims))\n    else:\n      axis = self._axis\n\n    original_dtype = inputs.dtype\n    if original_dtype in [tf.float16, tf.bfloat16]:\n      inputs = tf.cast(inputs, tf.float32)\n\n    if inputs.get_shape().ndims < 2:\n      raise base.NotSupportedError(\n          \"Layer normalization expects inputs of at least rank 2.\"\n          \" Got inputs of rank {}.\".format(inputs.get_shape().ndims))\n\n    \n    \n    params_shape = inputs.get_shape()[-1:]\n\n    if self._scale:\n      if self.GAMMA not in self._initializers:\n        self._initializers[self.GAMMA] = create_gamma_initializer()\n      self._gamma = tf.get_variable(\n          self.GAMMA,\n          shape=params_shape,\n          dtype=inputs.dtype,\n          initializer=self._initializers[self.GAMMA],\n          partitioner=self._partitioners.get(self.GAMMA),\n          regularizer=self._regularizers.get(self.GAMMA))\n    else:\n      self._gamma = None\n\n    if self._offset:\n      if self.BETA not in self._initializers:\n        self._initializers[self.BETA] = create_beta_initializer()\n      self._beta = tf.get_variable(\n          self.BETA,\n          shape=params_shape,\n          dtype=inputs.dtype,\n          initializer=self._initializers[self.BETA],\n          partitioner=self._partitioners.get(self.BETA),\n          regularizer=self._regularizers.get(self.BETA))\n    else:\n      self._beta = None\n\n    mean, var = tf.nn.moments(inputs, axis, keep_dims=True)\n\n    normalized = tf.nn.batch_normalization(inputs, mean, var, self._beta,\n                                           self._gamma, self._eps)\n\n    if original_dtype in [tf.float16, tf.bfloat16]:\n      normalized = tf.cast(normalized, dtype=original_dtype)\n    return normalized", "docstring": "Connects the LayerNorm module into the graph.\n\nArgs:\ninputs: a Tensor of dimensionality >= 2.\n\nReturns:\nnormalized: layer normalized outputs with same shape as inputs.\n\nRaises:\nbase.NotSupportedError: If `inputs` has less than 2 dimensions.", "source": "juraj-google-style"}
{"code": "def _variable_on_cpu(name, shape, initializer):\n  \n  with tf.device('/cpu:0'):\n    dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n    var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n  return var", "docstring": "Helper to create a Variable stored on CPU memory.\n\nArgs:\nname: name of the variable\nshape: list of ints\ninitializer: initializer for Variable\n\nReturns:\nVariable Tensor", "source": "juraj-google-style"}
{"code": "def exists_evaluator(self, index):\n    \n    attr_name = self.condition_data[index][0]\n    return self.attributes.get(attr_name) is not None", "docstring": "Evaluate the given exists match condition for the user attributes.\n\nArgs:\nindex: Index of the condition to be evaluated.\n\nReturns:\nBoolean: True if the user attributes have a non-null value for the given condition,\notherwise False.", "source": "juraj-google-style"}
{"code": "def _schema_from_json_file_object(self, file_obj):\n    json_data = json.load(file_obj)\n    return [SchemaField.from_api_repr(field) for field in json_data]", "docstring": "Helper function for schema_from_json that takes a\nfile object that describes a table schema.\n\nReturns:\nList of schema field objects.", "source": "codesearchnet"}
{"code": "def _check_wiremap_validity(self, wire_map, keymap, valmap):\n    for (k, v) in wire_map.items():\n        kname = ('%s[%d]' % (k[0].name, k[1]))\n        vname = ('%s[%d]' % (v[0].name, v[1]))\n        if (k not in keymap):\n            raise DAGCircuitError(('invalid wire mapping key %s' % kname))\n        if (v not in valmap):\n            raise DAGCircuitError(('invalid wire mapping value %s' % vname))\n        if (type(k) is not type(v)):\n            raise DAGCircuitError(('inconsistent wire_map at (%s,%s)' % (kname, vname)))", "docstring": "Check that the wiremap is consistent.\n\nCheck that the wiremap refers to valid wires and that\nthose wires have consistent types.\n\nArgs:\nwire_map (dict): map from (register,idx) in keymap to\n(register,idx) in valmap\nkeymap (dict): a map whose keys are wire_map keys\nvalmap (dict): a map whose keys are wire_map values\n\nRaises:\nDAGCircuitError: if wire_map not valid", "source": "codesearchnet"}
{"code": "def _random_flip(image, flip_index, random_func, scope_name):\n    with ops.name_scope(None, scope_name, [image]) as scope:\n        image = ops.convert_to_tensor(image, name='image')\n        image = _AssertAtLeast3DImage(image)\n        shape = image.get_shape()\n\n        def f_rank3():\n            uniform_random = random_func(shape=[], minval=0, maxval=1.0)\n            mirror_cond = math_ops.less(uniform_random, 0.5)\n            result = tf_cond.cond(mirror_cond, lambda: array_ops.reverse(image, [flip_index]), lambda: image, name=scope)\n            return fix_image_flip_shape(image, result)\n\n        def f_rank4():\n            batch_size = array_ops.shape(image)[0]\n            uniform_random = random_func(shape=[batch_size], minval=0, maxval=1.0)\n            flips = math_ops.round(array_ops.reshape(uniform_random, [batch_size, 1, 1, 1]))\n            flips = math_ops.cast(flips, image.dtype)\n            flipped_input = array_ops.reverse(image, [flip_index + 1])\n            return flips * flipped_input + (1 - flips) * image\n        if shape.ndims is None:\n            rank = array_ops.rank(image)\n            return tf_cond.cond(math_ops.equal(rank, 3), f_rank3, f_rank4)\n        if shape.ndims == 3:\n            return f_rank3()\n        elif shape.ndims == 4:\n            return f_rank4()\n        else:\n            raise ValueError(\"'image' (shape %s) must have either 3 or 4 dimensions.\" % shape)", "docstring": "Randomly (50% chance) flip an image along axis `flip_index`.\n\nArgs:\nimage: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\nof shape `[height, width, channels]`.\nflip_index: Dimension along which to flip the image.\nVertical is 0, Horizontal is 1.\nrandom_func: partial function for calling either stateful or stateless\nrandom ops with `seed` parameter specified.\nscope_name: Name of the scope in which the ops are added.\n\nReturns:\nA tensor of the same type and shape as `image`.\n\nRaises:\nValueError: if the shape of `image` not supported.", "source": "github-repos"}
{"code": "def refine_rotation(self):\n    (new_x, y) = (get_uvec(self[0]), get_uvec(self[1]))\n    new_y = (y - (np.dot(new_x, y) * new_x))\n    new_z = np.cross(new_x, new_y)\n    return SquareTensor([new_x, new_y, new_z])", "docstring": "Helper method for refining rotation matrix by ensuring\nthat second and third rows are perpindicular to the first.\nGets new y vector from an orthogonal projection of x onto y\nand the new z vector from a cross product of the new x and y\n\nArgs:\ntol to test for rotation\n\nReturns:\nnew rotation matrix", "source": "codesearchnet"}
{"code": "def read_neb(self, reverse=True, terminate_on_match=True):\n    patterns = {'energy': 'energy\\\\(sigma->0\\\\)\\\\s+=\\\\s+([\\\\d\\\\-\\\\.]+)', 'tangent_force': '(NEB: projections on to tangent \\\\(spring, REAL\\\\)\\\\s+\\\\S+|tangential force \\\\(eV/A\\\\))\\\\s+([\\\\d\\\\-\\\\.]+)'}\n    self.read_pattern(patterns, reverse=reverse, terminate_on_match=terminate_on_match, postprocess=str)\n    self.data['energy'] = float(self.data['energy'][0][0])\n    if self.data.get('tangent_force'):\n        self.data['tangent_force'] = float(self.data['tangent_force'][0][1])", "docstring": "Reads NEB data. This only works with OUTCARs from both normal\nVASP NEB calculations or from the CI NEB method implemented by\nHenkelman et al.\n\nArgs:\nreverse (bool): Read files in reverse. Defaults to false. Useful for\nlarge files, esp OUTCARs, especially when used with\nterminate_on_match. Defaults to True here since we usually\nwant only the final value.\nterminate_on_match (bool): Whether to terminate when there is at\nleast one match in each key in pattern. Defaults to True here\nsince we usually want only the final value.\n\nRenders accessible:\ntangent_force - Final tangent force.\nenergy - Final energy.\nThese can be accessed under Outcar.data[key]", "source": "codesearchnet"}
{"code": "def __predicate_object_map__(self, map_iri):\n        \n        pred_obj_maps = []\n        for pred_obj_map_bnode in self.rml.objects(\n                subject=map_iri,\n                predicate=NS_MGR.rr.predicateObjectMap.rdflib):\n            pred_obj_map = SimpleNamespace()\n            pred_obj_map.predicate = self.rml.value(\n                subject=pred_obj_map_bnode,\n                predicate=NS_MGR.rr.predicate.rdflib)\n            obj_map_bnode = self.rml.value(\n                subject=pred_obj_map_bnode,\n                predicate=NS_MGR.rr.objectMap.rdflib)\n            if obj_map_bnode is None:\n                continue\n            pred_obj_map.constant = self.rml.value(\n                subject=obj_map_bnode,\n                predicate=NS_MGR.rr.constant.rdflib)\n            pred_obj_map.template = self.rml.value(\n                subject=obj_map_bnode,\n                predicate=NS_MGR.rr.template.rdflib)\n            pred_obj_map.parentTriplesMap = self.rml.value(\n                subject=obj_map_bnode,\n                predicate=NS_MGR.rr.parentTriplesMap.rdflib)\n            if pred_obj_map.parentTriplesMap is not None:\n                self.parents.add(str(pred_obj_map.parentTriplesMap))\n            pred_obj_map.reference = self.rml.value(\n                subject=obj_map_bnode,\n                predicate=NS_MGR.rr.reference.rdflib)\n            pred_obj_map.datatype = self.rml.value(\n                subject=obj_map_bnode,\n                predicate=NS_MGR.rr.datatype.rdflib)\n            pred_obj_map.query = self.rml.value(\n                subject=obj_map_bnode,\n                predicate=NS_MGR.rml.query.rdflib)\n            pred_obj_map.json_query = self.rml.value(\n                subject=obj_map_bnode,\n                predicate=NS_MGR.rml.reference.rdflib)\n            json_key = None\n            if hasattr(self.triple_maps[str(map_iri)].logicalSource,\n                       'json_key'):\n                json_key = self.triple_maps[str(map_iri)].logicalSource.json_key\n            pred_obj_map.json_key = pick(self.rml.value(\n                    subject=obj_map_bnode,\n                    predicate=NS_MGR.rml.key.rdflib),\n                    json_key)\n            \n            pred_obj_map.delimiters = []\n            if pred_obj_map.json_query:\n                self.use_json_qry = True\n            for obj in self.rml.objects(subject=obj_map_bnode,\n                                        predicate=NS_MGR.kds.delimiter.rdflib):\n                pred_obj_map.delimiters.append(obj)\n            pred_obj_maps.append(pred_obj_map)\n        return pred_obj_maps", "docstring": "Iterates through rr:predicateObjectMaps for this TripleMap\ncreating a SimpleNamespace for each triple map and assigning the\nconstant, template, parentTripleMap, reference as properties.\n\nArgs:\n\n-----\nmap_iri:  rdflib.URIRef, TripleMap IRI\n\nReturns:\n\n--------\nlist:  List of predicate_object Namespace objects", "source": "juraj-google-style"}
{"code": "def copy_file_content(self, file_id, source_file):\n        \n        if not is_valid_uuid(file_id):\n            raise StorageArgumentException(\n                'Invalid UUID for file_id: {0}'.format(file_id))\n\n        if not is_valid_uuid(source_file):\n            raise StorageArgumentException(\n                'Invalid UUID for source_file: {0}'.format(source_file))\n\n        self._authenticated_request \\\n            .to_endpoint('file/{}/content/'.format(file_id)) \\\n            .with_headers({'X-Copy-From': source_file}) \\\n            .put()", "docstring": "Copy file content from source file to target file.\n\nArgs:\nfile_id (str): The UUID of the file whose content is written.\nsource_file (str): The UUID of the file whose content is copied.\n\nReturns:\nNone\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "juraj-google-style"}
{"code": "def cluster_spec(self):\n    merged_cluster = {}\n    for cluster_resolver in self._cluster_resolvers:\n        cluster_spec = cluster_resolver.cluster_spec()\n        cluster_dict = cluster_spec.as_dict()\n        for job_name, tasks in cluster_dict.items():\n            if job_name in merged_cluster:\n                if isinstance(tasks, dict):\n                    merged_cluster[job_name] = {}\n            elif isinstance(tasks, list):\n                merged_cluster[job_name] = []\n            else:\n                merged_cluster[job_name] = {}\n    for cluster_resolver in self._cluster_resolvers:\n        cluster_spec = cluster_resolver.cluster_spec()\n        cluster_dict = cluster_spec.as_dict()\n        for job_name, tasks in cluster_dict.items():\n            if isinstance(merged_cluster[job_name], list):\n                merged_cluster[job_name].extend(tasks)\n            else:\n                if isinstance(tasks, list):\n                    task_dict = dict(zip(range(0, len(tasks)), tasks))\n                else:\n                    task_dict = tasks.copy()\n                task_keys = set(task_dict)\n                merged_keys = set(merged_cluster[job_name].keys())\n                intersected_keys = task_keys.intersection(merged_keys)\n                if intersected_keys:\n                    raise KeyError('Duplicate keys detected when merging two ClusterSpecs: %s' % repr(intersected_keys))\n                merged_cluster[job_name].update(task_dict)\n    return ClusterSpec(merged_cluster)", "docstring": "Returns a union of all the ClusterSpecs from the ClusterResolvers.\n\nReturns:\nA ClusterSpec containing host information merged from all the underlying\nClusterResolvers.\n\nRaises:\nKeyError: If there are conflicting keys detected when merging two or\nmore dictionaries, this exception is raised.\n\nNote: If there are multiple ClusterResolvers exposing ClusterSpecs with the\nsame job name, we will merge the list/dict of workers.\n\nIf *all* underlying ClusterSpecs expose the set of workers as lists, we will\nconcatenate the lists of workers, starting with the list of workers from\nthe first ClusterResolver passed into the constructor.\n\nIf *any* of the ClusterSpecs expose the set of workers as a dict, we will\ntreat all the sets of workers as dicts (even if they are returned as lists)\nand will only merge them into a dict if there is no conflicting keys. If\nthere is a conflicting key, we will raise a `KeyError`.", "source": "github-repos"}
{"code": "def ported_string(raw_data, encoding='utf-8', errors='ignore'):\n    \n\n    if not raw_data:\n        return six.text_type()\n\n    if isinstance(raw_data, six.text_type):\n        return raw_data.strip()\n\n    if six.PY2:\n        try:\n            return six.text_type(raw_data, encoding, errors).strip()\n        except LookupError:\n            return six.text_type(raw_data, \"utf-8\", errors).strip()\n\n    if six.PY3:\n        try:\n            return six.text_type(raw_data, encoding).strip()\n        except (LookupError, UnicodeDecodeError):\n            return six.text_type(raw_data, \"utf-8\", errors).strip()", "docstring": "Give as input raw data and output a str in Python 3\nand unicode in Python 2.\n\nArgs:\nraw_data: Python 2 str, Python 3 bytes or str to porting\nencoding: string giving the name of an encoding\nerrors: his specifies the treatment of characters\nwhich are invalid in the input encoding\n\nReturns:\nstr (Python 3) or unicode (Python 2)", "source": "juraj-google-style"}
{"code": "def variables(self):\n    return tuple(self._flatten(predicate=_is_variable, expand_composites=True))", "docstring": "Sequence of variables owned by this module and its submodules.\n\nNote: this method uses reflection to find variables on the current instance\nand submodules. For performance reasons you may wish to cache the result\nof calling this method if you don't expect the return value to change.\n\nReturns:\nA sequence of variables for the current module (sorted by attribute\nname) followed by variables from all submodules recursively (breadth\nfirst).", "source": "github-repos"}
{"code": "def pkg_version_list(self, pkg_id):\n    pkg_data = self.__reg_software.get(pkg_id, None)\n    if (not pkg_data):\n        return []\n    if isinstance(pkg_data, list):\n        return pkg_data\n    installed_versions = list(pkg_data.get('version').keys())\n    return sorted(installed_versions, key=cmp_to_key(self.__oldest_to_latest_version))", "docstring": "Returns information on a package.\n\nArgs:\npkg_id (str): Package Id of the software/component.\n\nReturns:\nlist: List of version numbers installed.", "source": "codesearchnet"}
{"code": "def _PrintDictAsTable(self, src_dict):\n    key_list = list(src_dict.keys())\n    key_list.sort()\n    print('|', end='')\n    for key in key_list:\n        print(' {0:s} |'.format(key), end='')\n    print('')\n    print('|', end='')\n    for key in key_list:\n        print(' :---: |', end='')\n    print('')\n    print('|', end='')\n    for key in key_list:\n        print(' {0!s} |'.format(src_dict[key]), end='')\n    print('\\n')", "docstring": "Prints a table of artifact definitions.\n\nArgs:\nsrc_dict (dict[str, ArtifactDefinition]): artifact definitions by name.", "source": "codesearchnet"}
{"code": "def compare(self, reference_model):\n    self.console.print('Running comparison')\n    ref_spec = {}\n    get_weight_spec_of_saveable(reference_model, ref_spec)\n\n    def _compare(target, ref_spec, inner_path, target_name, ref_name, error_count, match_count, checked_paths):\n        base_inner_path = inner_path\n        for ref_key, ref_val in ref_spec.items():\n            inner_path = base_inner_path + '/' + ref_key\n            if inner_path in checked_paths:\n                continue\n            if ref_key not in target:\n                error_count += 1\n                checked_paths.add(inner_path)\n                if isinstance(ref_val, dict):\n                    self.console.print(f'[color(160)]...Object [bold]{inner_path}[/] present in {ref_name}, missing from {target_name}[/]')\n                    self.console.print(f'    In {ref_name}, {inner_path} contains the following keys: {list(ref_val.keys())}')\n                else:\n                    self.console.print(f'[color(160)]...Weight [bold]{inner_path}[/] present in {ref_name}, missing from {target_name}[/]')\n            elif isinstance(ref_val, dict):\n                _error_count, _match_count = _compare(target[ref_key], ref_spec[ref_key], inner_path, target_name, ref_name, error_count=error_count, match_count=match_count, checked_paths=checked_paths)\n                error_count += _error_count\n                match_count += _match_count\n            elif target[ref_key].shape != ref_val.shape:\n                error_count += 1\n                checked_paths.add(inner_path)\n                self.console.print(f'[color(160)]...Weight shape mismatch for [bold]{inner_path}[/][/]\\n    In {ref_name}: shape={ref_val.shape}\\n    In {target_name}: shape={target[ref_key].shape}')\n            else:\n                match_count += 1\n        return (error_count, match_count)\n    checked_paths = set()\n    error_count, match_count = _compare(self.weights_dict, ref_spec, inner_path='', target_name='saved file', ref_name='reference model', error_count=0, match_count=0, checked_paths=checked_paths)\n    _error_count, _ = _compare(ref_spec, self.weights_dict, inner_path='', target_name='reference model', ref_name='saved file', error_count=0, match_count=0, checked_paths=checked_paths)\n    error_count += _error_count\n    self.console.print('─────────────────────')\n    if error_count == 0:\n        status = 'success'\n        self.console.print('[color(28)][bold]Comparison successful:[/] saved file is compatible with the reference model[/]')\n        if match_count == 1:\n            plural = ''\n        else:\n            plural = 's'\n        self.console.print(f'    Found {match_count} matching weight{plural}')\n    else:\n        status = 'error'\n        if error_count == 1:\n            plural = ''\n        else:\n            plural = 's'\n        self.console.print(f'[color(160)][bold]Found {error_count} error{plural}:[/] saved file is not compatible with the reference model[/]')\n    return {'status': status, 'error_count': error_count, 'match_count': match_count}", "docstring": "Compares the opened file to a reference model.\n\nThis method will list all mismatches between the\ncurrently opened file and the provided reference model.\n\nArgs:\nreference_model: Model instance to compare to.\n\nReturns:\nDict with the following keys:\n`'status'`, `'error_count'`, `'match_count'`.\nStatus can be `'success'` or `'error'`.\n`'error_count'` is the number of mismatches found.\n`'match_count'` is the number of matching weights found.", "source": "github-repos"}
{"code": "def sg_sugar_func(func):\n\n    @wraps(func)\n    def wrapper(tensor, **kwargs):\n        out = func(tensor, tf.sg_opt(kwargs))\n        out._sugar = tf.sg_opt(func=func, arg=(tf.sg_opt(kwargs) + sg_get_context()), prev=tensor)\n        out.sg_reuse = types.MethodType(sg_reuse, out)\n        return out\n    return wrapper", "docstring": "r\"\"\" Decorates a function `func` so that it can be a sugar function.\nSugar function can be used in a chainable manner.\n\nArgs:\nfunc: function to decorate\n\nReturns:\nA sugar function.", "source": "codesearchnet"}
{"code": "def list_group_members(self, name):\n    self.project_service.set_auth(self._token_project)\n    return self.project_service.list_group_members(name)", "docstring": "Get the members of a group.\n\nArgs:\nname (string): Name of group to query.\n\nReturns:\n(list[string]): List of member names.\n\nRaises:\nrequests.HTTPError on failure.", "source": "codesearchnet"}
{"code": "def map_exp_ids(self, exp, positions=False):\n    if positions:\n        exp = [(('%s_%s' % (self.indexed_string.word(x[0]), '-'.join(map(str, self.indexed_string.string_position(x[0]))))), x[1]) for x in exp]\n    else:\n        exp = [(self.indexed_string.word(x[0]), x[1]) for x in exp]\n    return exp", "docstring": "Maps ids to words or word-position strings.\n\nArgs:\nexp: list of tuples [(id, weight), (id,weight)]\npositions: if True, also return word positions\n\nReturns:\nlist of tuples (word, weight), or (word_positions, weight) if\nexamples: ('bad', 1) or ('bad_3-6-12', 1)", "source": "codesearchnet"}
{"code": "def __init__(self, all_models=None):\n        \n        self.local_models = ModelRepository()  \n        if all_models:\n            self.all_models = all_models  \n        else:\n            self.all_models = ModelRepository()", "docstring": "create a new repo for a model\nArgs:\nall_models: models to be added to this new repository.", "source": "juraj-google-style"}
{"code": "def decode_base64_dict(data):\n    b64 = base64.b64decode(data['__ndarray__'])\n    array = np.copy(np.frombuffer(b64, dtype=data['dtype']))\n    if (len(data['shape']) > 1):\n        array = array.reshape(data['shape'])\n    return array", "docstring": "Decode a base64 encoded array into a NumPy array.\n\nArgs:\ndata (dict) : encoded array data to decode\n\nData should have the format encoded by :func:`encode_base64_dict`.\n\nReturns:\nnp.ndarray", "source": "codesearchnet"}
{"code": "def abs_vert_pos(self, amount):\n        \n        mL = amount%256\n        mH = amount/256\n        if amount < 32767 and amount > 0:\n            self.send(chr(27)+'('+'V'+chr(2)+chr(0)+chr(mL)+chr(mH))\n        else:\n            raise RuntimeError('Invalid vertical position in function absVertPos')", "docstring": "Specify vertical print position from the top margin position.\n\nArgs:\namount: The distance from the top margin you'd like, from 0 to 32767\nReturns:\nNone\nRaises:\nRuntimeError: Invalid vertical position.", "source": "juraj-google-style"}
{"code": "def migrate_indexes(aggregate_indexes=None, forensic_indexes=None):\n    \n    version = 2\n    if aggregate_indexes is None:\n        aggregate_indexes = []\n    if forensic_indexes is None:\n        forensic_indexes = []\n    for aggregate_index_name in aggregate_indexes:\n        if not Index(aggregate_index_name).exists():\n            continue\n        aggregate_index = Index(aggregate_index_name)\n        doc = \"doc\"\n        fo_field = \"published_policy.fo\"\n        fo = \"fo\"\n        fo_mapping = aggregate_index.get_field_mapping(fields=[fo_field])\n        fo_mapping = fo_mapping[list(fo_mapping.keys())[0]][\"mappings\"]\n        if doc not in fo_mapping:\n            continue\n\n        fo_mapping = fo_mapping[doc][fo_field][\"mapping\"][fo]\n        fo_type = fo_mapping[\"type\"]\n        if fo_type == \"long\":\n            new_index_name = \"{0}-v{1}\".format(aggregate_index_name, version)\n            body = {\"properties\": {\"published_policy.fo\": {\n                \"type\": \"text\",\n                \"fields\": {\n                    \"keyword\": {\n                        \"type\": \"keyword\",\n                        \"ignore_above\": 256\n                    }\n                }\n            }\n            }\n            }\n            Index(new_index_name).create()\n            Index(new_index_name).put_mapping(doc_type=doc, body=body)\n            reindex(connections.get_connection(), aggregate_index_name,\n                    new_index_name)\n            Index(aggregate_index_name).delete()\n\n    for forensic_index in forensic_indexes:\n        pass", "docstring": "Updates index mappings\n\nArgs:\naggregate_indexes (list): A list of aggregate index names\nforensic_indexes (list): A list of forensic index names", "source": "juraj-google-style"}
{"code": "def _update(self, baseNumber, magnification):\n        \n        interval = int(baseNumber * magnification)\n        self.value = [IntegerSingle(interval)]", "docstring": "update self.value with basenumber and time interval\n\nArgs:\nbaseNumber (str): self.baseNumber\nmagnification (str): self.magnification", "source": "juraj-google-style"}
{"code": "def is_datafile_valid(datafile):\n    try:\n        datafile_json = json.loads(datafile)\n    except:\n        return False\n    try:\n        jsonschema.Draft4Validator(constants.JSON_SCHEMA).validate(datafile_json)\n    except:\n        return False\n    return True", "docstring": "Given a datafile determine if it is valid or not.\n\nArgs:\ndatafile: JSON string representing the project.\n\nReturns:\nBoolean depending upon whether datafile is valid or not.", "source": "codesearchnet"}
{"code": "class QuantoConfig(QuantizationConfigMixin):\n\n    def __init__(self, weights='int8', activations=None, modules_to_not_convert: Optional[List]=None, **kwargs):\n        self.quant_method = QuantizationMethod.QUANTO\n        self.weights = weights\n        self.activations = activations\n        self.modules_to_not_convert = modules_to_not_convert\n        self.post_init()\n\n    def post_init(self):\n        \n        accepted_weights = ['float8', 'int8', 'int4', 'int2']\n        accepted_activations = [None, 'int8', 'float8']\n        if self.weights not in accepted_weights:\n            raise ValueError(f'Only support weights in {accepted_weights} but found {self.weights}')\n        if self.activations not in accepted_activations:\n            raise ValueError(f'Only support weights in {accepted_activations} but found {self.activations}')", "docstring": "This is a wrapper class about all possible attributes and features that you can play with a model that has been\nloaded using `quanto`.\n\nArgs:\nweights (`str`, *optional*, defaults to `\"int8\"`):\nThe target dtype for the weights after quantization. Supported values are (\"float8\",\"int8\",\"int4\",\"int2\")\nactivations (`str`, *optional*):\nThe target dtype for the activations after quantization. Supported values are (None,\"int8\",\"float8\")\nmodules_to_not_convert (`list`, *optional*, default to `None`):\nThe list of modules to not quantize, useful for quantizing models that explicitly require to have\nsome modules left in their original precision (e.g. Whisper encoder, Llava encoder, Mixtral gate layers).", "source": "github-repos"}
{"code": "def update(self, data, offset, is_last, buffer_index=0):\n    if (buffer_index >= self.num_buffers):\n        raise ValueError('Expected buffer index < {} but got index {}.'.format(self.num_buffers, buffer_index))\n    if ((self.buffers[buffer_index] is not None) and (self.buffers[buffer_index].shape[0] > 0)):\n        expected_next_frame = (self.current_frame + self.buffers[buffer_index].shape[0])\n        if (expected_next_frame != offset):\n            raise ValueError('There are missing frames. Last frame in buffer is {}. The passed frames start at {}.'.format(expected_next_frame, offset))\n        self.buffers[buffer_index] = np.vstack([self.buffers[buffer_index], data])\n    else:\n        self.buffers[buffer_index] = data\n    self.buffers_full[buffer_index] = is_last", "docstring": "Update the buffer at the given index.\n\nArgs:\ndata (np.ndarray): The frames.\noffset (int): The index of the first frame in `data` within the sequence.\nis_last (bool): Whether this is the last block of frames in the sequence.\nbuffer_index (int): The index of the buffer to update (< self.num_buffers).", "source": "codesearchnet"}
{"code": "def load_file(filename, file_type='json', klazz=YapconfError, open_kwargs=None, load_kwargs=None):\n    _check_file_type(file_type, klazz)\n    open_kwargs = (open_kwargs or {'encoding': 'utf-8'})\n    load_kwargs = (load_kwargs or {})\n    data = None\n    with open(filename, **open_kwargs) as conf_file:\n        if (str(file_type).lower() == 'json'):\n            data = json.load(conf_file, **load_kwargs)\n        elif (str(file_type).lower() == 'yaml'):\n            data = yaml.safe_load(conf_file.read())\n        else:\n            raise NotImplementedError(('Someone forgot to implement how to load a %s file_type.' % file_type))\n    if (not isinstance(data, dict)):\n        raise klazz(('Successfully loaded %s, but the result was not a dictionary.' % filename))\n    return data", "docstring": "Load a file with the given file type.\n\nArgs:\nfilename (str): The filename to load.\nfile_type (str, optional): Defaults to 'json'. The file type for the\ngiven filename. Supported types are ``yapconf.FILE_TYPES```\nklazz (optional): The custom exception to raise if something goes\nwrong.\nopen_kwargs (dict, optional): Keyword arguments for the open call.\nload_kwargs (dict, optional): Keyword arguments for the load call.\n\nRaises:\nklazz: If no klazz was passed in, this will be the ``YapconfError``\n\nReturns:\ndict: The dictionary from the file.", "source": "codesearchnet"}
{"code": "def MakePmfFromItems(t, name=''):\n    pmf = Pmf(dict(t), name)\n    pmf.Normalize()\n    return pmf", "docstring": "Makes a PMF from a sequence of value-probability pairs\n\nArgs:\nt: sequence of value-probability pairs\nname: string name for this PMF\n\nReturns:\nPmf object", "source": "codesearchnet"}
{"code": "def _get_section(name, source):\n    \n    \n    pattern = re.compile(\n        '^([^\\n]*{name}[^\\n]*\\n?(?:[ \\t].*?(?:\\n|$))*)'.format(name=name),\n        re.IGNORECASE | re.MULTILINE)\n    usage = None\n    for section in pattern.findall(source):\n        usage = _merge_section(usage, section.strip())\n    return usage", "docstring": "Extract the named section from the source.\n\nArgs:\nname: The name of the section to extract (e.g. \"Usage\").\nsource: The usage string to parse.\n\nReturns:\nA string containing only the requested section. If the section appears\nmultiple times, each instance will be merged into a single section.", "source": "juraj-google-style"}
{"code": "def sum(self, vars_list: List[str]) -> 'TensorFluent':\n        \n        operand = self\n        if operand.dtype == tf.bool:\n            operand = operand.cast(tf.float32)\n        return self._aggregation_op(tf.reduce_sum, operand, vars_list)", "docstring": "Returns the TensorFluent for the sum aggregation function.\n\nArgs:\nvars_list: The list of variables to be aggregated over.\n\nReturns:\nA TensorFluent wrapping the sum aggregation function.", "source": "juraj-google-style"}
{"code": "def find_element_by_class(self, class_, update=False) -> Elements:\n        \n        return self.find_element(by=By.CLASS, value=class_, update=update)", "docstring": "Finds an element by class.\n\nArgs:\nclass_: The class of the element to be found.\nupdate: If the interface has changed, this option should be True.\n\nReturns:\nThe element if it was found.\n\nRaises:\nNoSuchElementException - If the element wasn't found.\n\nUsage:\nelement = driver.find_element_by_class('foo')", "source": "juraj-google-style"}
{"code": "def maps_json():\n    map_sources = {id: {'id': map_source.id, 'name': map_source.name, 'folder': map_source.folder, 'min_zoom': map_source.min_zoom, 'max_zoom': map_source.max_zoom, 'layers': [{'min_zoom': layer.min_zoom, 'max_zoom': layer.max_zoom, 'tile_url': layer.tile_url.replace('$', '')} for layer in map_source.layers]} for (id, map_source) in app.config['mapsources'].items()}\n    return jsonify(map_sources)", "docstring": "Generates a json object which serves as bridge between\nthe web interface and the map source collection.\n\nAll attributes relevant for openlayers are converted into\nJSON and served through this route.\n\nReturns:\nResponse: All map sources as JSON object.", "source": "codesearchnet"}
{"code": "def add_edge(self, a, b):\n    neighbors_of_a = self.adjacency_lists.get(a)\n    if (not neighbors_of_a):\n        neighbors_of_a = set()\n        self.adjacency_lists[a] = neighbors_of_a\n    neighbors_of_a.add(b)\n    neighbors_of_b = self.adjacency_lists.get(b)\n    if (not neighbors_of_b):\n        neighbors_of_b = set()\n        self.adjacency_lists[b] = neighbors_of_b\n    neighbors_of_b.add(a)", "docstring": "Used to add edges to the graph. 'a' and 'b' are vertexes and\nif 'a' or 'b' doesn't exisit then the vertex is created\n\nArgs:\na (hash): is one vertex of the edge\nb (hash): is another vertext of the edge", "source": "codesearchnet"}
{"code": "def parse_compounds(compound_info, case_id, variant_type):\n    \n    \n    compounds = []\n    if compound_info:\n        for family_info in compound_info.split(','):\n            splitted_entry = family_info.split(':')\n            \n            if splitted_entry[0] == case_id:\n                for compound in splitted_entry[1].split('|'):\n                    splitted_compound = compound.split('>')\n                    compound_obj = {}\n                    compound_name = splitted_compound[0]\n                    compound_obj['variant'] = generate_md5_key(compound_name.split('_') +\n                                                               [variant_type, case_id])\n\n                    try:\n                        compound_score = float(splitted_compound[1])\n                    except (TypeError, IndexError):\n                        compound_score = 0.0\n\n                    compound_obj['score'] = compound_score\n                    compound_obj['display_name'] = compound_name\n\n                    compounds.append(compound_obj)\n\n    return compounds", "docstring": "Get a list with compounds objects for this variant.\n\nArguments:\ncompound_info(str): A Variant dictionary\ncase_id (str): unique family id\nvariant_type(str): 'research' or 'clinical'\n\nReturns:\ncompounds(list(dict)): A list of compounds", "source": "juraj-google-style"}
{"code": "def load_schema(schema_path):\n    \n    try:\n        with open(schema_path) as schema_file:\n            schema = json.load(schema_file)\n    except ValueError as e:\n        raise SchemaInvalidError('Invalid JSON in schema or included schema: '\n                                 '%s\\n%s' % (schema_file.name, str(e)))\n\n    return schema", "docstring": "Load the JSON schema at the given path as a Python object.\n\nArgs:\nschema_path: A filename for a JSON schema.\n\nReturns:\nA Python object representation of the schema.", "source": "juraj-google-style"}
{"code": "def _PrintTasksInformation(self, storage_reader):\n    \n    table_view = views.ViewsFactory.GetTableView(\n        self._views_format_type, title='Tasks')\n\n    for task_start, _ in storage_reader.GetSessions():\n      start_time = timelib.Timestamp.CopyToIsoFormat(\n          task_start.timestamp)\n      task_identifier = uuid.UUID(hex=task_start.identifier)\n      task_identifier = '{0!s}'.format(task_identifier)\n      table_view.AddRow([task_identifier, start_time])\n\n    table_view.Write(self._output_writer)", "docstring": "Prints information about the tasks.\n\nArgs:\nstorage_reader (StorageReader): storage reader.", "source": "juraj-google-style"}
{"code": "def remove_deps(self, deps):\n        \n        if not isinstance(deps, (list, tuple)):\n            deps = [deps]\n\n        assert all(isinstance(d, Dependency) for d in deps)\n\n        self._deps = [d for d in self._deps if d not in deps]\n\n        if self.is_work:\n            \n            for task in self:\n                task.remove_deps(deps)", "docstring": "Remove a list of dependencies from the :class:`Node`.\n\nArgs:\ndeps: List of :class:`Dependency` objects specifying the  dependencies of the node.", "source": "juraj-google-style"}
{"code": "def create_contentkey_authorization_policy(access_token, content):\n    \n    path = '/ContentKeyAuthorizationPolicies'\n    endpoint = ''.join([ams_rest_endpoint, path])\n    body = content\n    return do_ams_post(endpoint, path, body, access_token)", "docstring": "Create Media Service Content Key Authorization Policy.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\ncontent (str): Content Payload.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def _create_L_ind(self, L):\n        \n        \n        \n        if issparse(L[0]):\n            L = [L_t.todense() for L_t in L]\n\n        \n        L = self._to_numpy(L)\n\n        L_ind = np.ones((self.n, self.m * self.k))\n        for yi, y in enumerate(self.task_graph.feasible_set()):\n            for t in range(self.t):\n                \n                \n                L_ind[:, yi :: self.k] *= np.where(\n                    np.logical_or(L[t] == y[t], L[t] == 0), 1, 0\n                )\n\n            \n            L_ind[:, yi :: self.k] *= np.where(sum(L) != 0, 1, 0)\n\n        return L_ind", "docstring": "Convert T label matrices with labels in 0...K_t to a one-hot format\n\nHere we can view e.g. the $(i,j)$ entries of the $T$ label matrices as\na _label vector_ emitted by LF j for data point i.\n\nArgs:\nL: a T-length list of [n,m] scipy.sparse label matrices with values\nin {0,1,...,k}\n\nReturns:\nL_ind: An [n,m*k] dense np.ndarray with values in {0,1}\n\nNote that no column is required for 0 (abstain) labels.", "source": "juraj-google-style"}
{"code": "def default_multivariate_normal_fn(dtype, shape, name, trainable, add_variable_fn):\n    del name, trainable, add_variable_fn\n    dist = tfd.Normal(loc=tf.zeros(shape, dtype), scale=dtype.as_numpy_dtype(1))\n    batch_ndims = tf.size(input=dist.batch_shape_tensor())\n    return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims)", "docstring": "Creates multivariate standard `Normal` distribution.\n\nArgs:\ndtype: Type of parameter's event.\nshape: Python `list`-like representing the parameter's event shape.\nname: Python `str` name prepended to any created (or existing)\n`tf.Variable`s.\ntrainable: Python `bool` indicating all created `tf.Variable`s should be\nadded to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.\nadd_variable_fn: `tf.get_variable`-like `callable` used to create (or\naccess existing) `tf.Variable`s.\n\nReturns:\nMultivariate standard `Normal` distribution.", "source": "codesearchnet"}
{"code": "def _ReadAppJsonFile(self, relative_path):\n    try:\n        with open(os.path.join(sys.path[0], relative_path), 'r') as f:\n            return json.load(f)\n    except (IOError, ValueError):\n        return None", "docstring": "Reads JSON file from an application directory.\n\nArgs:\nrelative_path: file name relative to application root directory.\n\nReturns:\nParsed JSON data or None if the file does not exist, can't be read or\nnot a valid JSON file.", "source": "codesearchnet"}
{"code": "def __init__(self, columns: list[str]) -> None:\n    self.columns = columns", "docstring": "Base Opertation class data processing transformations.\nArgs:\ncolumns: List of column names to apply the transformation.", "source": "github-repos"}
{"code": "def FromJson(json):\n        \n        type = ContractParameterType.FromString(json['type'])\n\n        value = json['value']\n        param = ContractParameter(type=type, value=None)\n\n        if type == ContractParameterType.Signature or type == ContractParameterType.ByteArray:\n            param.Value = bytearray.fromhex(value)\n\n        elif type == ContractParameterType.Boolean:\n            param.Value = bool(value)\n\n        elif type == ContractParameterType.Integer:\n            param.Value = int(value)\n\n        elif type == ContractParameterType.Hash160:\n            param.Value = UInt160.ParseString(value)\n\n        elif type == ContractParameterType.Hash256:\n            param.Value = UInt256.ParseString(value)\n\n        \n        elif type == ContractParameterType.PublicKey:\n            param.Value = ECDSA.decode_secp256r1(value).G\n\n        elif type == ContractParameterType.String:\n            param.Value = str(value)\n\n        elif type == ContractParameterType.Array:\n            val = [ContractParameter.FromJson(item) for item in value]\n            param.Value = val\n\n        return param", "docstring": "Convert a json object to a ContractParameter object\n\nArgs:\nitem (dict): The item to convert to a ContractParameter object\n\nReturns:\nContractParameter", "source": "juraj-google-style"}
{"code": "def __init__(self, file_name, timeout=10, delay=.05):\n        \n        self.file_name = os.path.abspath(file_name)\n        self.lockfile = os.path.abspath(file_name) + \".lock\"\n        self.timeout = float(timeout)\n        self.delay = float(delay)\n        self.is_locked = False\n\n        if self.delay > self.timeout or self.delay <= 0 or self.timeout <= 0:\n            raise ValueError(\"delay and timeout must be positive with delay \"\n                             \"<= timeout\")", "docstring": "Prepare the file locker. Specify the file to lock and optionally\nthe maximum timeout and the delay between each attempt to lock.\n\nArgs:\nfile_name: Name of file to lock.\ntimeout: Maximum timeout for locking. Defaults to 10.\ndelay: Delay between each attempt to lock. Defaults to 0.05.", "source": "juraj-google-style"}
{"code": "def write_message(self, msg, timeout=None):\n    \n    replace_dict = {'command': self.CMD_TO_WIRE[msg.command]}\n    if msg.has_data:\n      \n      data = msg[-1]\n      replace_dict[msg._fields[-1]] = len(data)\n\n    self.stream.write(struct.pack(msg.struct_format,\n                                  *msg._replace(**replace_dict)), timeout)\n    if msg.has_data:\n      self.stream.write(data, timeout)", "docstring": "Write an arbitrary message (of one of the types above).\n\nFor the host side implementation, this will only ever be a DataMessage, but\nit's implemented generically enough here that you could use\nFilesyncTransport to implement the device side if you wanted.\n\nArgs:\nmsg:  The message to send, must be one of the types above.\ntimeout: timeouts.PolledTimeout to use for the operation.", "source": "juraj-google-style"}
{"code": "def make_session(username=None, password=None, bearer_token=None, extra_headers_dict=None):\n    if ((password is None) and (bearer_token is None)):\n        logger.error('No authentication information provided; please check your object')\n        raise KeyError\n    session = requests.Session()\n    session.trust_env = False\n    headers = {'Accept-encoding': 'gzip', 'User-Agent': ('twitterdev-search-tweets-python/' + VERSION)}\n    if bearer_token:\n        logger.info('using bearer token for authentication')\n        headers['Authorization'] = 'Bearer {}'.format(bearer_token)\n        session.headers = headers\n    else:\n        logger.info('using username and password for authentication')\n        session.auth = (username, password)\n        session.headers = headers\n    if extra_headers_dict:\n        headers.update(extra_headers_dict)\n    return session", "docstring": "Creates a Requests Session for use. Accepts a bearer token\nfor premiums users and will override username and password information if\npresent.\n\nArgs:\nusername (str): username for the session\npassword (str): password for the user\nbearer_token (str): token for a premium API user.", "source": "codesearchnet"}
{"code": "def new(arg_name, annotated_with=None):\n    if (annotated_with is not None):\n        annotation = annotations.Annotation(annotated_with)\n    else:\n        annotation = annotations.NO_ANNOTATION\n    return BindingKey(arg_name, annotation)", "docstring": "Creates a BindingKey.\n\nArgs:\narg_name: the name of the bound arg\nannotation: an Annotation, or None to create an unannotated binding key\nReturns:\na new BindingKey", "source": "codesearchnet"}
{"code": "def install(self, connection, partition, table_name=None, index_columns=None, materialize=False, logger=None):\n    raise NotImplementedError", "docstring": "Installs partition's mpr to the database to allow to execute sql queries over mpr.\n\nArgs:\nconnection:\npartition (orm.Partition):\nmaterialize (boolean): if True, create generic table. If False create MED over mpr.\n\nReturns:\nstr: name of the created table.", "source": "codesearchnet"}
{"code": "def delete_variant(self, variant):\n    mongo_variant = self.get_variant(variant)\n    if mongo_variant:\n        if (mongo_variant['observations'] == 1):\n            LOG.debug('Removing variant {0}'.format(mongo_variant.get('_id')))\n            message = self.db.variant.delete_one({'_id': variant['_id']})\n        else:\n            LOG.debug('Decreasing observations for {0}'.format(mongo_variant.get('_id')))\n            message = self.db.variant.update_one({'_id': mongo_variant['_id']}, {'$inc': {'observations': (- 1), 'homozygote': (- variant.get('homozygote', 0)), 'hemizygote': (- variant.get('hemizygote', 0))}, '$pull': {'families': variant.get('case_id')}}, upsert=False)\n    return", "docstring": "Delete observation in database\n\nThis means that we take down the observations variable with one.\nIf 'observations' == 1 we remove the variant. If variant was homozygote\nwe decrease 'homozygote' with one.\nAlso remove the family from array 'families'.\n\nArgs:\nvariant (dict): A variant dictionary", "source": "codesearchnet"}
{"code": "def _export_work_errors(self, work, output_file):\n    \n    errors = set()\n    for v in itervalues(work.work):\n      if v['is_completed'] and v['error'] is not None:\n        errors.add(v['error'])\n    with open(output_file, 'w') as f:\n      for e in sorted(errors):\n        f.write(e)\n        f.write('\\n')", "docstring": "Saves errors for given work pieces into file.\n\nArgs:\nwork: instance of either AttackWorkPieces or DefenseWorkPieces\noutput_file: name of the output file", "source": "juraj-google-style"}
{"code": "def get_uri(dir_name):\n    fullpath = os.path.abspath(dir_name)\n    try:\n        hostname = socket.gethostbyaddr(socket.gethostname())[0]\n    except:\n        hostname = socket.gethostname()\n    return '{}:{}'.format(hostname, fullpath)", "docstring": "Returns the URI path for a directory. This allows files hosted on\ndifferent file servers to have distinct locations.\n\nArgs:\ndir_name:\nA directory name.\n\nReturns:\nFull URI path, e.g., fileserver.host.com:/full/path/of/dir_name.", "source": "codesearchnet"}
{"code": "def report(\n    vulnerabilities,\n    fileobj,\n    print_sanitised,\n):\n    \n    n_vulnerabilities = len(vulnerabilities)\n    unsanitised_vulnerabilities = [v for v in vulnerabilities if not isinstance(v, SanitisedVulnerability)]\n    n_unsanitised = len(unsanitised_vulnerabilities)\n    n_sanitised = n_vulnerabilities - n_unsanitised\n    heading = \"{} vulnerabilit{} found{}.\\n\".format(\n        'No' if n_unsanitised == 0 else n_unsanitised,\n        'y' if n_unsanitised == 1 else 'ies',\n        \" (plus {} sanitised)\".format(n_sanitised) if n_sanitised else \"\",\n    )\n    vulnerabilities_to_print = vulnerabilities if print_sanitised else unsanitised_vulnerabilities\n    with fileobj:\n        for i, vulnerability in enumerate(vulnerabilities_to_print, start=1):\n            fileobj.write(vulnerability_to_str(i, vulnerability))\n\n        if n_unsanitised == 0:\n            fileobj.write(color(heading, GOOD))\n        else:\n            fileobj.write(color(heading, DANGER))", "docstring": "Prints issues in color-coded text format.\n\nArgs:\nvulnerabilities: list of vulnerabilities to report\nfileobj: The output file object, which may be sys.stdout", "source": "juraj-google-style"}
{"code": "def BDEVolumeOpen(bde_volume, path_spec, file_object, key_chain):\n    password = key_chain.GetCredential(path_spec, 'password')\n    if password:\n        bde_volume.set_password(password)\n    recovery_password = key_chain.GetCredential(path_spec, 'recovery_password')\n    if recovery_password:\n        bde_volume.set_recovery_password(recovery_password)\n    startup_key = key_chain.GetCredential(path_spec, 'startup_key')\n    if startup_key:\n        bde_volume.read_startup_key(startup_key)\n    bde_volume.open_file_object(file_object)", "docstring": "Opens the BDE volume using the path specification.\n\nArgs:\nbde_volume (pybde.volume): BDE volume.\npath_spec (PathSpec): path specification.\nfile_object (FileIO): file-like object.\nkey_chain (KeyChain): key chain.", "source": "codesearchnet"}
{"code": "def __init__(\n      self, full_name=None, group_identifier=None, identifier=None,\n      path_separator='/', user_directory=None, username=None):\n    \n    super(UserAccountArtifact, self).__init__()\n    self._path_separator = path_separator\n    self.full_name = full_name\n    self.group_identifier = group_identifier\n    self.identifier = identifier\n    \n    self.user_directory = user_directory\n    self.username = username", "docstring": "Initializes an user artifact.\n\nArgs:\nfull_name (Optional[str]): name describing the user e.g. full name.\ngroup_identifier (Optional[str]): identifier of the primary group\nthe user is part of.\nidentifier (Optional[str]): user identifier.\npath_separator (Optional[str]): path segment separator.\nuser_directory (Optional[str]): path of the user (or home or profile)\ndirectory.\nusername (Optional[str]): name uniquely identifying the user.", "source": "juraj-google-style"}
{"code": "def Dict(fields):\n    \n    check_user_facing_fields_dict(fields, 'Dict')\n\n    class _Dict(_ConfigComposite):\n        def __init__(self):\n            key = 'Dict.' + str(DictCounter.get_next_count())\n            super(_Dict, self).__init__(\n                name=None,\n                key=key,\n                fields=fields,\n                description='A configuration dictionary with typed fields',\n                type_attributes=ConfigTypeAttributes(is_builtin=True),\n            )\n\n    return _Dict", "docstring": "Schema for configuration data with string keys and typed values via :py:class:`Field` .\n\nArgs:\nfields (Dict[str, Field])", "source": "juraj-google-style"}
{"code": "def generateRandomInput(numRecords, elemSize = 400, numSet = 42):\n  \n\n  inputs = []\n\n  for _ in xrange(numRecords):\n\n    input = np.zeros(elemSize, dtype=realDType)\n    for _ in range(0,numSet):\n      ind = np.random.random_integers(0, elemSize-1, 1)[0]\n      input[ind] = 1\n    while abs(input.sum() - numSet) > 0.1:\n      ind = np.random.random_integers(0, elemSize-1, 1)[0]\n      input[ind] = 1\n\n    inputs.append(input)\n\n  return inputs", "docstring": "Generates a set of input record\n\nParams:\nnumRecords - how many records to generate\nelemSize - the size of each record (num 0s or 1s)\nnumSet - how many 1s in each record\n\nReturns: a list of inputs", "source": "juraj-google-style"}
{"code": "def place_market_order(self, product_id, side, size=None, funds=None, client_oid=None, stp=None, overdraft_enabled=None, funding_amount=None):\n    params = {'product_id': product_id, 'side': side, 'order_type': 'market', 'size': size, 'funds': funds, 'client_oid': client_oid, 'stp': stp, 'overdraft_enabled': overdraft_enabled, 'funding_amount': funding_amount}\n    params = dict(((k, v) for (k, v) in params.items() if (v is not None)))\n    return self.place_order(**params)", "docstring": "Place market order.\n\nArgs:\nproduct_id (str): Product to order (eg. 'BTC-USD')\nside (str): Order side ('buy' or 'sell)\nsize (Optional[Decimal]): Desired amount in crypto. Specify this or\n`funds`.\nfunds (Optional[Decimal]): Desired amount of quote currency to use.\nSpecify this or `size`.\nclient_oid (Optional[str]): User-specified Order ID\nstp (Optional[str]): Self-trade prevention flag. See `place_order`\nfor details.\noverdraft_enabled (Optional[bool]): If true funding above and\nbeyond the account balance will be provided by margin, as\nnecessary.\nfunding_amount (Optional[Decimal]): Amount of margin funding to be\nprovided for the order. Mutually exclusive with\n`overdraft_enabled`.\n\nReturns:\ndict: Order details. See `place_order` for example.", "source": "codesearchnet"}
{"code": "def conv_block_internal(conv_fn,\n                        inputs,\n                        filters,\n                        dilation_rates_and_kernel_sizes,\n                        first_relu=True,\n                        use_elu=False,\n                        separabilities=None,\n                        **kwargs):\n  \n\n  name = kwargs.pop(\"name\") if \"name\" in kwargs else None\n  mask = kwargs.pop(\"mask\") if \"mask\" in kwargs else None\n\n  \n  \n  \n  \n\n  use_layer_norm = \"normalizer_fn\" not in kwargs\n  norm = kwargs.pop(\"normalizer_fn\", None)\n  use_normalizer_fn = use_layer_norm or norm\n\n  if use_layer_norm:\n    norm = lambda x, name: layer_norm(x, filters, name=name)\n\n  with tf.variable_scope(name, \"conv_block\", [inputs]):\n    cur, counter = inputs, -1\n    for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes:\n      counter += 1\n      if first_relu or counter > 0:\n        cur = tf.nn.elu(cur) if use_elu else tf.nn.relu(cur)\n      if mask is not None:\n        cur *= mask\n      if separabilities:\n        cur = conv_fn(\n            cur,\n            filters,\n            kernel_size,\n            dilation_rate=dilation_rate,\n            name=\"conv_block_%d\" % counter,\n            use_bias=norm is None,\n            separability=separabilities[counter],\n            **kwargs)\n      else:\n        cur = conv_fn(\n            cur,\n            filters,\n            kernel_size,\n            dilation_rate=dilation_rate,\n            name=\"conv_block_%d\" % counter,\n            use_bias=norm is None,\n            **kwargs)\n      if use_normalizer_fn:\n        cur = norm(cur, name=\"conv_block_norm_%d\" % counter)\n    return cur", "docstring": "A block of convolutions.\n\nArgs:\nconv_fn: convolution function, e.g. conv or separable_conv.\ninputs: a Tensor\nfilters: an Integer\ndilation_rates_and_kernel_sizes: a list of tuples (dilation, (k_w, k_h))\nfirst_relu: whether to do a relu at start (defaults to True)\nuse_elu: whether to use ELUs instead of ReLUs (defaults to False)\nseparabilities: list of separability factors (per-layer).\n**kwargs: additional arguments (e.g., pooling)\n\nReturns:\na Tensor.", "source": "juraj-google-style"}
{"code": "def create_software_renderer(self, surface):\n        \n        renderer = object.__new__(Renderer)\n        renderer._ptr = self._ptr = check_ptr_err(lib.SDL_CreateSoftwareRenderer(surface._ptr))\n        return renderer", "docstring": "Create a 2D software rendering context for a surface.\n\nArgs:\nsurface (Surface): The surface where rendering is done.\n\nReturns:\nRenderer: A 2D software rendering context.\n\nRaises:\nSDLError: If there was an error creating the renderer.", "source": "juraj-google-style"}
{"code": "def _pack_with_custom_ops(dataset, keys, length):\n  \n  from tensor2tensor.data_generators.ops import pack_sequences_ops  \n  \n  k1, k2 = keys\n  def map_fn_custom(x):\n    \n    (k1_packed, k1_segmengation, k1_position,\n     k2_packed, k2_segmentation, k2_position) = (\n         pack_sequences_ops.pack_sequences2(x[k1], x[k2], length))\n    packed = {\n        k1: k1_packed,\n        k1 + \"_segmentation\": k1_segmengation,\n        k1 + \"_position\": k1_position,\n        k2: k2_packed,\n        k2 + \"_segmentation\": k2_segmentation,\n        k2 + \"_position\": k2_position,\n    }\n    return tf.data.Dataset.from_tensor_slices(packed)\n  dataset = dataset.flat_map(map_fn_custom)\n  return dataset", "docstring": "Helper-function for packing a dataset which has already been batched.\n\nSee pack_dataset()\n\nRelies on custom ops which require a custom compiled binary.\nFaster than _pack_with_tf_ops(), and denser packing.\n\nArgs:\ndataset: a dataset containing padded batches of examples.\nkeys: a list of strings (must have length 2)\nlength: an integer\n\nReturns:\na dataset.", "source": "juraj-google-style"}
{"code": "def load_recipe(self, recipe):\n    \n    self.recipe = recipe\n    for module_description in recipe['modules']:\n      \n      module_name = module_description['name']\n      module = self.config.get_module(module_name)(self)\n      self._module_pool[module_name] = module", "docstring": "Populates the internal module pool with modules declared in a recipe.\n\nArgs:\nrecipe: Dict, recipe declaring modules to load.", "source": "juraj-google-style"}
{"code": "def get_structure_by_id(self, cod_id, **kwargs):\n        \n        r = requests.get(\"http:\n        return Structure.from_str(r.text, fmt=\"cif\", **kwargs)", "docstring": "Queries the COD for a structure by id.\n\nArgs:\ncod_id (int): COD id.\nkwargs: All kwargs supported by\n:func:`pymatgen.core.structure.Structure.from_str`.\n\nReturns:\nA Structure.", "source": "juraj-google-style"}
{"code": "def run_foreach_or_conditional(self, context):\n    logger.debug('starting')\n    if self.foreach_items:\n        self.foreach_loop(context)\n    else:\n        self.run_conditional_decorators(context)\n    logger.debug('done')", "docstring": "Run the foreach sequence or the conditional evaluation.\n\nArgs:\ncontext: (pypyr.context.Context) The pypyr context. This arg will\nmutate.", "source": "codesearchnet"}
{"code": "def remove_model_references_from_file(filename, models, condition):\n    filename = REPO_PATH / filename\n    with open(filename, 'r') as f:\n        init_file = f.read()\n    new_file_lines = []\n    for i, line in enumerate(init_file.split('\\n')):\n        if any((condition(line, model) for model in models)):\n            continue\n        new_file_lines.append(line)\n    with open(filename, 'w') as f:\n        f.write('\\n'.join(new_file_lines))", "docstring": "Remove all references to the given models from the given file\n\nArgs:\nfilename (str): The file to remove the references from\nmodels (List[str]): The models to remove\ncondition (Callable): A function that takes the line and model and returns True if the line should be removed", "source": "github-repos"}
{"code": "def grabEmails(emails=None, emailsFile=None, nicks=None, nicksFile=None, domains=EMAIL_DOMAINS, excludeDomains=[]):\n    email_candidates = []\n    if (emails != None):\n        email_candidates = emails\n    elif (emailsFile != None):\n        with open(emailsFile, 'r') as iF:\n            email_candidates = iF.read().splitlines()\n    elif (nicks != None):\n        for n in nicks:\n            for d in domains:\n                if (d not in excludeDomains):\n                    email_candidates.append(((n + '@') + d))\n    elif (nicksFile != None):\n        with open(nicksFile, 'r') as iF:\n            nicks = iF.read().splitlines()\n            for n in nicks:\n                for d in domains:\n                    if (d not in excludeDomains):\n                        email_candidates.append(((n + '@') + d))\n    return email_candidates", "docstring": "Method that generates a list of emails.\n\nArgs:\n-----\nemails: Any premade list of emails.\nemailsFile: Filepath to the emails file (one per line).\nnicks: A list of aliases.\nnicksFile: Filepath to the aliases file (one per line).\ndomains: Domains where the aliases will be tested.\nexcludeDomains: Domains to be excluded from the created list.\n\nReturns:\n--------\nlist: the list of emails that will be verified.", "source": "codesearchnet"}
{"code": "async def movehere(self, channel):\n    self.logger.debug('movehere command')\n    (await self.embed.delete())\n    self.embed.channel = channel\n    (await self.embed.send())\n    (await self.add_reactions())\n    self.statuslog.info('Moved to front')", "docstring": "Moves the embed message to a new channel; can also be used to move the musicplayer to the front\n\nArgs:\nchannel (discord.Channel): The channel to move to", "source": "codesearchnet"}
{"code": "def set_rgb_dim_level(self, channelIndex: int, rgb: RGBColorState, dimLevel: float):\n        \n        data = {\n            \"channelIndex\": channelIndex,\n            \"deviceId\": self.id,\n            \"simpleRGBColorState\": rgb,\n            \"dimLevel\": dimLevel,\n        }\n        return self._restCall(\n            \"device/control/setSimpleRGBColorDimLevel\", body=json.dumps(data)\n        )", "docstring": "sets the color and dimlevel of the lamp\n\nArgs:\nchannelIndex(int): the channelIndex of the lamp. Use self.topLightChannelIndex or self.bottomLightChannelIndex\nrgb(RGBColorState): the color of the lamp\ndimLevel(float): the dimLevel of the lamp. 0.0 = off, 1.0 = MAX\n\nReturns:\nthe result of the _restCall", "source": "juraj-google-style"}
{"code": "def dedent(self, node, dirty=True):\n        \n        if node.id not in self._subitems:\n            return\n\n        del self._subitems[node.id]\n        node.super_list_item_id = None\n        node.parent_item = None\n        if dirty:\n            node.touch(True)", "docstring": "Dedent an item. Does nothing if the target is not indented under this item.\n\nArgs:\nnode (gkeepapi.node.ListItem): Item to dedent.\ndirty (bool): Whether this node should be marked dirty.", "source": "juraj-google-style"}
{"code": "def _pare_down_model(self, strain_gempro, genes_to_remove):\n        \n        \n        strain_genes = [x.id for x in strain_gempro.genes]\n        genes_to_remove.extend(self.missing_in_orthology_matrix)\n        genes_to_remove = list(set(genes_to_remove).intersection(set(strain_genes)))\n\n        if len(genes_to_remove) == 0:\n            log.info('{}: no genes marked non-functional'.format(strain_gempro.id))\n            return\n        else:\n            log.debug('{}: {} genes to be marked non-functional'.format(strain_gempro.id, len(genes_to_remove)))\n\n        \n        if strain_gempro.model:\n            strain_gempro.model._trimmed = False\n            strain_gempro.model._trimmed_genes = []\n            strain_gempro.model._trimmed_reactions = {}\n\n            \n            cobra.manipulation.delete_model_genes(strain_gempro.model, genes_to_remove)\n\n            if strain_gempro.model._trimmed:\n                log.info('{}: marked {} genes as non-functional, '\n                         'deactivating {} reactions'.format(strain_gempro.id, len(strain_gempro.model._trimmed_genes),\n                                                            len(strain_gempro.model._trimmed_reactions)))\n        \n        else:\n            for g in genes_to_remove:\n                strain_gempro.genes.get_by_id(g).functional = False\n            log.info('{}: marked {} genes as non-functional'.format(strain_gempro.id, len(genes_to_remove)))", "docstring": "Mark genes as non-functional in a GEM-PRO. If there is a COBRApy model associated with it, the\nCOBRApy method delete_model_genes is utilized to delete genes.\n\nArgs:\nstrain_gempro (GEMPRO): GEMPRO object\ngenes_to_remove (list): List of gene IDs to remove from the model", "source": "juraj-google-style"}
{"code": "def create_audit_student_enrollment(self, course_id):\n    audit_enrollment = {'mode': 'audit', 'course_details': {'course_id': course_id}}\n    resp = self.requester.post(urljoin(self.base_url, self.enrollment_url), json=audit_enrollment)\n    resp.raise_for_status()\n    return Enrollment(resp.json())", "docstring": "Creates an audit enrollment for the user in a given course\n\nArgs:\ncourse_id (str): an edX course id\n\nReturns:\nEnrollment: object representing the student enrollment in the provided course", "source": "codesearchnet"}
{"code": "def _print_drift_report(self):\n    try:\n        response = self._cloud_formation.describe_stack_resources(StackName=self._stack_name)\n        rows = []\n        for resource in response.get('StackResources', []):\n            row = []\n            row.append(resource.get('LogicalResourceId', 'unknown'))\n            row.append(resource.get('PhysicalResourceId', 'unknown'))\n            row.append(resource.get('ResourceStatus', 'unknown'))\n            row.append(resource.get('DriftInformation', {}).get('StackResourceDriftStatus', 'unknown'))\n            rows.append(row)\n        print('Drift Report:')\n        print(tabulate(rows, headers=['Logical ID', 'Physical ID', 'Resource Status', 'Drift Info']))\n    except Exception as wtf:\n        logging.error(wtf, exc_info=True)\n        return False\n    return True", "docstring": "Report the drift of the stack.\n\nArgs:\nNone\n\nReturns:\nGood or Bad; True or False\n\nNote: not yet implemented", "source": "codesearchnet"}
{"code": "def parse(self, s, term_join=None):\n    if (not term_join):\n        term_join = (lambda x: (('(' + ' OR '.join(x)) + ')'))\n    toks = self.scan(s)\n    if (toks and toks[0] and ((toks[0][0] == self.TERM) or (toks[0][0] == self.QUOTEDTERM))):\n        toks = ([(self.MARKER, 'about')] + toks)\n    bymarker = []\n    for t in toks:\n        if (t[0] == self.MARKER):\n            bymarker.append((t[1], []))\n        else:\n            bymarker[(- 1)][1].append(t)\n    comps = []\n    for t in bymarker:\n        t = list(t)\n        if ((t[0] == 'in') and (len(t[1]) == 1) and isinstance(t[1][0][1], string_types) and (self.stem(t[1][0][1]) in self.geograins.keys())):\n            t[0] = 'by'\n        if ((t[0] == 'from') and (len(t[1]) == 1) and (t[1][0][0] != self.YEAR)):\n            t[0] = 'source'\n        comps.append(t)\n    groups = {marker: [] for (marker, _) in comps}\n    for (marker, terms) in comps:\n        groups[marker] += [term for (marker, term) in terms]\n    for (marker, group) in groups.items():\n        if (marker == 'about'):\n            continue\n        if ((len(group) > 1) and (marker not in self.multiterms)):\n            (groups[marker], extras) = ([group[0]], group[1:])\n            if (not ('about' in groups)):\n                groups['about'] = extras\n            else:\n                groups['about'] += extras\n        if (marker == 'by'):\n            groups['by'] = [self.geograins.get(self.stem(e)) for e in group]\n    for (marker, terms) in iteritems(groups):\n        if (len(terms) > 1):\n            if (marker in 'in'):\n                groups[marker] = ' '.join(terms)\n            else:\n                groups[marker] = term_join(terms)\n        elif (len(terms) == 1):\n            groups[marker] = terms[0]\n        else:\n            pass\n    return groups", "docstring": "Parses search term to\n\nArgs:\ns (str): string with search term.\nor_join (callable): function to join 'OR' terms.\n\nReturns:\ndict: all of the terms grouped by marker. Key is a marker, value is a term.\n\nExample:\n>>> SearchTermParser().parse('table2 from 1978 to 1979 in california')\n{'to': 1979, 'about': 'table2', 'from': 1978, 'in': 'california'}", "source": "codesearchnet"}
{"code": "def linear_interpolate(tensor1, tensor2, coeffs):\n    interp_tensors = []\n    for coeff in coeffs:\n        interp_tensor = (tensor1 + (coeff * (tensor2 - tensor1)))\n        interp_tensors.append(interp_tensor)\n    return tf.concat(interp_tensors, axis=0)", "docstring": "Linearly interpolate between two tensors at coeff.\n\nArgs:\ntensor1: 4-D Tensor, shape=(NHWC)\ntensor2: 4-D Tensor, shape=(NHWC)\ncoeffs: list of floats.\nReturns:\ninterp_latents: 5-D Tensor, with interp_latents[i] representing\ninterpolations at coeffs[i].\nshape=(len(coeffs), NHWC)", "source": "codesearchnet"}
{"code": "def get_ctl_field(self, controlfield, alt=None):\n        \n        if not alt:\n            return self.controlfields[controlfield]\n\n        return self.controlfields.get(controlfield, alt)", "docstring": "Method wrapper over :attr:`.controlfields` dictionary.\n\nArgs:\ncontrolfield (str): Name of the controlfield.\nalt (object, default None): Alternative value of the `controlfield`\nwhen `controlfield` couldn't be found.\n\nReturns:\nstr: record from given `controlfield`", "source": "juraj-google-style"}
{"code": "def get_data_path(self, filename, env_prefix=None):\n    if (env_prefix == None):\n        target_file = filename\n    else:\n        target_file = os.path.join(env_prefix, filename)\n    if os.path.exists(os.path.join(self._data_path, target_file)):\n        return os.path.join(self._data_path, target_file)\n    else:\n        raise DataNotFoundError(u('Cannot find data file: {0}').format(target_file))", "docstring": "Get data path.\n\nArgs:\nfilename (string) : Name of file inside of /data folder to retrieve.\n\nKwargs:\nenv_prefix (string) : Name of subfolder, ex: 'qa' will find files in /data/qa\n\nReturns:\nString - path to file.\n\nUsage::\n\nopen(WTF_DATA_MANAGER.get_data_path('testdata.csv')\n\nNote: WTF_DATA_MANAGER is a provided global instance of DataManager", "source": "codesearchnet"}
{"code": "def handle_backend_response(self, orig_request, backend_request, response_status, response_headers, response_body, method_config, start_response):\n    for (header, value) in response_headers:\n        if ((header.lower() == 'content-type') and (not value.lower().startswith('application/json'))):\n            return self.fail_request(orig_request, ('Non-JSON reply: %s' % response_body), start_response)\n    self.check_error_response(response_body, response_status)\n    empty_response = self.check_empty_response(orig_request, method_config, start_response)\n    if (empty_response is not None):\n        return empty_response\n    body = self.transform_rest_response(response_body)\n    cors_handler = self._create_cors_handler(orig_request)\n    return util.send_wsgi_response(response_status, response_headers, body, start_response, cors_handler=cors_handler)", "docstring": "Handle backend response, transforming output as needed.\n\nThis calls start_response and returns the response body.\n\nArgs:\norig_request: An ApiRequest, the original request from the user.\nbackend_request: An ApiRequest, the transformed request that was\nsent to the backend handler.\nresponse_status: A string, the status from the response.\nresponse_headers: A dict, the headers from the response.\nresponse_body: A string, the body of the response.\nmethod_config: A dict, the API config of the method to be called.\nstart_response: A function with semantics defined in PEP-333.\n\nReturns:\nA string containing the response body.", "source": "codesearchnet"}
{"code": "def multi_replace(str_, search_list, repl_list):\n    if isinstance(repl_list, six.string_types):\n        repl_list_ = ([repl_list] * len(search_list))\n    else:\n        repl_list_ = repl_list\n    newstr = str_\n    assert (len(search_list) == len(repl_list_)), 'bad lens'\n    for (search, repl) in zip(search_list, repl_list_):\n        newstr = newstr.replace(search, repl)\n    return newstr", "docstring": "r\"\"\"\nPerforms multiple replace functions foreach item in search_list and\nrepl_list.\n\nArgs:\nstr_ (str): string to search\nsearch_list (list): list of search strings\nrepl_list (list or str): one or multiple replace strings\n\nReturns:\nstr: str_\n\nCommandLine:\npython -m utool.util_str --exec-multi_replace\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_str import *  # NOQA\n>>> str_ = 'foo. bar: baz; spam-eggs --- eggs+spam'\n>>> search_list = ['.', ':', '---']\n>>> repl_list = '@'\n>>> str_ = multi_replace(str_, search_list, repl_list)\n>>> result = ('str_ = %s' % (str(str_),))\n>>> print(result)\nstr_ = foo@ bar@ baz; spam-eggs @ eggs+spam", "source": "codesearchnet"}
{"code": "def RegisterPathSpec(cls, path_spec_type):\n    type_indicator = path_spec_type.TYPE_INDICATOR\n    if (type_indicator in cls._path_spec_types):\n        raise KeyError('Path specification type: {0:s} already set.'.format(type_indicator))\n    cls._path_spec_types[type_indicator] = path_spec_type\n    if getattr(path_spec_type, '_IS_SYSTEM_LEVEL', False):\n        cls._system_level_type_indicators[type_indicator] = path_spec_type", "docstring": "Registers a path specification type.\n\nArgs:\npath_spec_type (type): path specification type.\n\nRaises:\nKeyError: if path specification is already registered.", "source": "codesearchnet"}
{"code": "def _begin_disconnection_action(self, action):\n        \n\n        conn_key = action.data['id']\n        callback = action.data['callback']\n\n        if self._get_connection_state(conn_key) != self.Idle:\n            callback(conn_key, self.id, False, 'Cannot start disconnection, connection is not idle')\n            return\n\n        \n        data = self._get_connection(conn_key)\n        data['state'] = self.Disconnecting\n        data['microstate'] = None\n        data['callback'] = callback\n        data['timeout'] = action.timeout", "docstring": "Begin a disconnection attempt\n\nArgs:\naction (ConnectionAction): the action object describing what we are\nconnecting to and what the result of the operation was", "source": "juraj-google-style"}
{"code": "class PromptDepthAnythingNeck(nn.Module):\n\n    def __init__(self, config):\n        super().__init__()\n        self.config = config\n        self.reassemble_stage = PromptDepthAnythingReassembleStage(config)\n        self.convs = nn.ModuleList()\n        for channel in config.neck_hidden_sizes:\n            self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False))\n        self.fusion_stage = PromptDepthAnythingFeatureFusionStage(config)\n\n    def forward(self, hidden_states: List[torch.Tensor], patch_height: Optional[int]=None, patch_width: Optional[int]=None, prompt_depth: Optional[torch.Tensor]=None) -> List[torch.Tensor]:\n        \n        if not isinstance(hidden_states, (tuple, list)):\n            raise TypeError('hidden_states should be a tuple or list of tensors')\n        if len(hidden_states) != len(self.config.neck_hidden_sizes):\n            raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.')\n        hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)\n        features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]\n        output = self.fusion_stage(features, prompt_depth=prompt_depth)\n        return output", "docstring": "PromptDepthAnythingNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as\ninput and produces another list of tensors as output. For PromptDepthAnything, it includes 2 stages:\n\n* PromptDepthAnythingReassembleStage\n* PromptDepthAnythingFeatureFusionStage.\n\nArgs:\nconfig (dict): config dict.", "source": "github-repos"}
{"code": "def build_defaults(self):\n    defaults = {}\n    for arg in self.args:\n        if (not isinstance(arg, _BaseOpt)):\n            raise errors.InvalidSchemeError('Unable to build default for non-Option type')\n        if (not isinstance(arg.default, NoDefault)):\n            defaults[arg.name] = arg.default\n        if isinstance(arg, DictOption):\n            if arg.scheme:\n                b = arg.scheme.build_defaults()\n                if b:\n                    defaults[arg.name] = b\n    return defaults", "docstring": "Build a dictionary of default values from the `Scheme`.\n\nReturns:\ndict: The default configurations as set by the `Scheme`.\n\nRaises:\nerrors.InvalidSchemeError: The `Scheme` does not contain\nvalid options.", "source": "codesearchnet"}
{"code": "def format_counts(counts, header=None):\n    \n    counts_dict = {}\n    for key, val in counts.items():\n        key = format_counts_memory(key, header)\n        counts_dict[key] = val\n    return counts_dict", "docstring": "Format a single experiment result coming from backend to present\nto the Qiskit user.\n\nArgs:\ncounts (dict): counts histogram of multiple shots\nheader (dict): the experiment header dictionary containing\nuseful information for postprocessing.\n\nReturns:\ndict: a formatted counts", "source": "juraj-google-style"}
{"code": "def sparse_eye(num_rows, num_columns=None, dtype=dtypes.float32, name=None):\n    with ops.name_scope(name, default_name='eye', values=[num_rows, num_columns]):\n        num_rows = _make_int64_tensor(num_rows, 'num_rows')\n        num_columns = num_rows if num_columns is None else _make_int64_tensor(num_columns, 'num_columns')\n        diag_size = math_ops.minimum(num_rows, num_columns)\n        diag_range = math_ops.range(diag_size, dtype=dtypes.int64)\n        return sparse_tensor.SparseTensor(indices=array_ops_stack.stack([diag_range, diag_range], axis=1), values=array_ops.ones(diag_size, dtype=dtype), dense_shape=[num_rows, num_columns])", "docstring": "Creates a two-dimensional sparse tensor with ones along the diagonal.\n\nArgs:\nnum_rows: Non-negative integer or `int32` scalar `tensor` giving the number\nof rows in the resulting matrix.\nnum_columns: Optional non-negative integer or `int32` scalar `tensor` giving\nthe number of columns in the resulting matrix. Defaults to `num_rows`.\ndtype: The type of element in the resulting `Tensor`.\nname: A name for this `Op`. Defaults to \"eye\".\n\nReturns:\nA `SparseTensor` of shape [num_rows, num_columns] with ones along the\ndiagonal.", "source": "github-repos"}
{"code": "def Decompress(self, compressed_data):\n    \n    try:\n      uncompressed_data = self._bz2_decompressor.decompress(compressed_data)\n      remaining_compressed_data = getattr(\n          self._bz2_decompressor, 'unused_data', b'')\n\n    except (EOFError, IOError) as exception:\n      raise errors.BackEndError((\n          'Unable to decompress BZIP2 compressed stream with error: '\n          '{0!s}.').format(exception))\n\n    return uncompressed_data, remaining_compressed_data", "docstring": "Decompresses the compressed data.\n\nArgs:\ncompressed_data (bytes): compressed data.\n\nReturns:\ntuple(bytes, bytes): uncompressed data and remaining compressed data.\n\nRaises:\nBackEndError: if the BZIP2 compressed stream cannot be decompressed.", "source": "juraj-google-style"}
{"code": "def Close(self, abort=False):\n    \n    if not self._closed_event or not self._terminate_event:\n      raise RuntimeError('Missing closed or terminate event.')\n\n    if not abort and self._closed_event.is_set():\n      raise errors.QueueAlreadyClosed()\n\n    self._closed_event.set()\n\n    if abort:\n      if not self._closed_event.is_set():\n        logger.warning(\n            '{0:s} queue aborting. Contents may be lost.'.format(self.name))\n\n      \n      \n      \n      self._terminate_event.set()\n\n      self._linger_seconds = 0\n\n      if self._zmq_thread:\n        logger.debug('[{0:s}] Waiting for thread to exit.'.format(self.name))\n        self._zmq_thread.join(timeout=self.timeout_seconds)\n        if self._zmq_thread.isAlive():\n          logger.error((\n              '{0:s} ZMQ responder thread did not exit within timeout').format(\n                  self.name))\n    else:\n      logger.debug(\n          '{0:s} queue closing, will linger for up to {1:d} seconds'.format(\n              self.name, self._linger_seconds))", "docstring": "Closes the queue.\n\nArgs:\nabort (Optional[bool]): whether the Close is the result of an abort\ncondition. If True, queue contents may be lost.\n\nRaises:\nQueueAlreadyClosed: if the queue is not started, or has already been\nclosed.\nRuntimeError: if closed or terminate event is missing.", "source": "juraj-google-style"}
{"code": "def subscribe(self, topic, callback, ordered=True):\n    if (('+' in topic) or ('\n        regex = re.compile(topic.replace('+', '[^/]+').replace('\n        self.wildcard_queues.append((topic, regex, callback, ordered))\n    else:\n        self.queues[topic] = PacketQueue(0, callback, ordered)\n    try:\n        self.client.subscribe(topic, 1, self._on_receive)\n    except operationError as exc:\n        raise InternalError('Could not subscribe to topic', topic=topic, message=exc.message)", "docstring": "Subscribe to future messages in the given topic\n\nThe contents of topic should be in the format created by self.publish with a\nsequence number of message type encoded as a json string.\n\nWildcard topics containing + and # are allowed and\n\nArgs:\ntopic (string): The MQTT topic to subscribe to\ncallback (callable): The callback to call when a new mesage is received\nThe signature of callback should be callback(sequence, topic, type, message)\nordered (bool): Whether messages on this topic have a sequence number that must\nbe checked and queued to ensure that packets are received in order", "source": "codesearchnet"}
{"code": "def van_dec_2d(x, skip_connections, output_shape, first_depth, hparams=None):\n  \n  with tf.variable_scope('van_dec'):\n    dec = tf.layers.conv2d_transpose(\n        x, first_depth * 4, 3, padding='same', activation=tf.nn.relu, strides=2)\n    dec = tf.nn.dropout(dec, hparams.van_keep_prob)\n    dec = tf.contrib.layers.layer_norm(dec)\n    dec = tf.layers.conv2d_transpose(\n        dec,\n        first_depth * 4,\n        3,\n        padding='same',\n        activation=tf.nn.relu,\n        strides=1)\n    dec = tf.nn.dropout(dec, hparams.van_keep_prob)\n    dec = tf.layers.conv2d_transpose(\n        dec,\n        first_depth * 2,\n        3,\n        padding='same',\n        activation=tf.nn.relu,\n        strides=1)\n    dec = tf.nn.dropout(dec, hparams.van_keep_prob)\n    dec = tf.contrib.layers.layer_norm(dec)\n\n    dec = tf.layers.conv2d_transpose(\n        dec,\n        first_depth * 2,\n        3,\n        padding='same',\n        activation=tf.nn.relu,\n        strides=2)\n    dec = tf.nn.dropout(dec, hparams.van_keep_prob)\n    dec = tf.layers.conv2d_transpose(\n        dec, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)\n    dec = tf.nn.dropout(dec, hparams.van_keep_prob)\n    dec = tf.contrib.layers.layer_norm(dec)\n\n    dec = tf.layers.conv2d_transpose(\n        dec,\n        output_shape[3] + 1,\n        3,\n        padding='same',\n        activation=tf.nn.relu,\n        strides=2)\n    dec = tf.nn.dropout(dec, hparams.van_keep_prob)\n\n    out_mask = tf.layers.conv2d_transpose(\n        dec, output_shape[3] + 1, 3, strides=1, padding='same', activation=None)\n\n    mask = tf.nn.sigmoid(out_mask[:, :, :, 3:4])\n    out = out_mask[:, :, :, :3]\n\n    return out * mask + skip_connections[0] * (1 - mask)", "docstring": "The VAN decoder.\n\nArgs:\nx: The analogy information to decode.\nskip_connections: The encoder layers which can be used as skip connections.\noutput_shape: The shape of the desired output image.\nfirst_depth: The depth of the first layer of the van image encoder.\nhparams: The python hparams.\n\nReturns:\nThe decoded image prediction.", "source": "juraj-google-style"}
{"code": "def FromEncoded(cls, encoded):\n    match_spec = (encoded & ((1 << 11) | (1 << 15)))\n    match_type = ((encoded & (7 << 12)) >> 12)\n    match_id = (encoded & ((1 << 11) - 1))\n    if (match_spec not in cls.SpecifierEncodingMap):\n        raise ArgumentError('Unknown encoded match specifier', match_spec=match_spec, known_specifiers=cls.SpecifierEncodingMap.keys())\n    spec_name = cls.SpecifierEncodingMap[match_spec]\n    if (match_id == cls.MatchAllCode):\n        match_id = None\n    return DataStreamSelector(match_type, match_id, spec_name)", "docstring": "Create a DataStreamSelector from an encoded 16-bit value.\n\nThe binary value must be equivalent to what is produced by\na call to self.encode() and will turn that value back into\na a DataStreamSelector.\n\nNote that the following operation is a no-op:\n\nDataStreamSelector.FromEncode(value).encode()\n\nArgs:\nencoded (int): The encoded binary representation of a\nDataStreamSelector.\n\nReturns:\nDataStreamSelector: The decoded selector.", "source": "codesearchnet"}
{"code": "def guess_leb_size(path):\n    \n\n    f = open(path, 'rb')\n    f.seek(0,2)\n    file_size = f.tell()+1\n    f.seek(0)\n    block_size = None\n\n    for _ in range(0, file_size, FILE_CHUNK_SZ):\n        buf = f.read(FILE_CHUNK_SZ)\n\n        for m in re.finditer(UBIFS_NODE_MAGIC, buf):\n            start = m.start()\n            chdr = nodes.common_hdr(buf[start:start+UBIFS_COMMON_HDR_SZ])\n\n            if chdr and chdr.node_type == UBIFS_SB_NODE:\n                sb_start = start + UBIFS_COMMON_HDR_SZ\n                sb_end = sb_start + UBIFS_SB_NODE_SZ\n\n                if chdr.len != len(buf[sb_start:sb_end]):\n                    f.seek(sb_start)\n                    buf = f.read(UBIFS_SB_NODE_SZ)\n                else:\n                    buf = buf[sb_start:sb_end]\n\n                sbn = nodes.sb_node(buf)\n                block_size = sbn.leb_size\n                f.close()\n                return block_size\n\n    f.close()\n    return block_size", "docstring": "Get LEB size from superblock\n\nArguments:\nStr:path    -- Path to file.\n\nReturns:\nInt         -- LEB size.\n\nSearches file for superblock and retrieves leb size.", "source": "juraj-google-style"}
{"code": "def tables_get(self, table_name):\n    url = (Api._ENDPOINT + (Api._TABLES_PATH % table_name))\n    return datalab.utils.Http.request(url, credentials=self._credentials)", "docstring": "Issues a request to retrieve information about a table.\n\nArgs:\ntable_name: a tuple representing the full name of the table.\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "codesearchnet"}
{"code": "def remove_alias(alias_names):\n    \n    alias_table = get_alias_table()\n    for alias_name in alias_names:\n        if alias_name not in alias_table.sections():\n            raise CLIError(ALIAS_NOT_FOUND_ERROR.format(alias_name))\n        alias_table.remove_section(alias_name)\n    _commit_change(alias_table)", "docstring": "Remove an alias.\n\nArgs:\nalias_name: The name of the alias to be removed.", "source": "juraj-google-style"}
{"code": "def check_addresses(address_list, is_remote=False):\n    assert all((isinstance(x, (tuple, string_types)) for x in address_list))\n    if (is_remote and any((isinstance(x, string_types) for x in address_list))):\n        raise AssertionError('UNIX domain sockets not allowed for remoteaddresses')\n    for address in address_list:\n        check_address(address)", "docstring": "Check if the format of the addresses is correct\n\nArguments:\naddress_list (list[tuple]):\nSequence of (``str``, ``int``) pairs, each representing an IP\naddress and port respectively\n\n.. note::\nwhen supported by the platform, one or more of the elements in\nthe list can be of type ``str``, representing a valid UNIX\ndomain socket\n\nis_remote (boolean):\nWhether or not the address list\nRaises:\nAssertionError:\nraised when ``address_list`` contains an invalid element\nValueError:\nraised when any address in the list has an incorrect format\n\nExample:\n\n>>> check_addresses([('127.0.0.1', 22), ('127.0.0.1', 2222)])", "source": "codesearchnet"}
{"code": "def _add_sphere(ax):\n    \n    (u, v) = np.mgrid[0:2 * np.pi:20j, 0:np.pi:10j]\n    x = np.cos(u) * np.sin(v)\n    y = np.sin(u) * np.sin(v)\n    z = np.cos(v)\n    ax.plot_wireframe(x, y, z, color='grey', linewidth=0.2)\n    return ax", "docstring": "_add_sphere(ax)\n\nAdd a wireframe unit sphere onto matplotlib 3D axes\n\nArgs:\nax - matplotlib 3D axes object\n\nReturns:\nupdated matplotlib 3D axes", "source": "juraj-google-style"}
{"code": "def determine_opening_indent(indent_texts):\n    num_lines = len(indent_texts)\n    if (num_lines < 1):\n        return 0\n    assert (num_lines >= 1)\n    first_line_indent = indent_texts[0][0]\n    if (num_lines == 1):\n        return first_line_indent\n    assert (num_lines >= 2)\n    second_line_indent = indent_texts[1][0]\n    second_line_text = indent_texts[1][1]\n    if (len(second_line_text) == 0):\n        return first_line_indent\n    return second_line_indent", "docstring": "Determine the opening indent level for a docstring.\n\nThe opening indent level is the indent level is the first non-zero indent\nlevel of a non-empty line in the docstring.\n\nArgs:\nindent_texts: The lines of the docstring as an iterable over 2-tuples\neach containing an integer indent level as the first element and\nthe text as the second element.\n\nReturns:\nThe opening indent level as an integer.", "source": "codesearchnet"}
{"code": "def parse_config(args=sys.argv):\n    \n    parser = argparse.ArgumentParser(\n        description='Read in the config file')\n    parser.add_argument(\n        'config_file',\n        help='Configuration file.',\n        metavar='FILE', type=extant_file)\n    return parser.parse_args(args[1:])", "docstring": "Parse the args using the config_file pattern\n\nArgs:\nargs: sys.argv\n\nReturns:\nThe populated namespace object from parser.parse_args().\n\nRaises:\nTBD", "source": "juraj-google-style"}
{"code": "def decode(self, targets, encoder_outputs, attention_bias):\n    \n    with tf.name_scope(\"decode\"):\n      \n      \n      decoder_inputs = self.embedding_softmax_layer(targets)\n      with tf.name_scope(\"shift_targets\"):\n        \n        decoder_inputs = tf.pad(\n            decoder_inputs, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]\n      with tf.name_scope(\"add_pos_encoding\"):\n        length = tf.shape(decoder_inputs)[1]\n        decoder_inputs += model_utils.get_position_encoding(\n            length, self.params.hidden_size)\n      if self.train:\n        mlperf_log.transformer_print(\n            key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,\n            value=self.params.layer_postprocess_dropout)\n        decoder_inputs = tf.nn.dropout(\n            decoder_inputs, 1 - self.params.layer_postprocess_dropout)\n\n      \n      decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(\n          length)\n      outputs = self.decoder_stack(\n          decoder_inputs, encoder_outputs, decoder_self_attention_bias,\n          attention_bias)\n      logits = self.embedding_softmax_layer.linear(outputs)\n      return logits", "docstring": "Generate logits for each value in the target sequence.\n\nArgs:\ntargets: target values for the output sequence.\nint tensor with shape [batch_size, target_length]\nencoder_outputs: continuous representation of input sequence.\nfloat tensor with shape [batch_size, input_length, hidden_size]\nattention_bias: float tensor with shape [batch_size, 1, 1, input_length]\n\nReturns:\nfloat32 tensor with shape [batch_size, target_length, vocab_size]", "source": "juraj-google-style"}
{"code": "def _kill_process_type(self, process_type, allow_graceful=False, check_alive=True, wait=False):\n    process_infos = self.all_processes[process_type]\n    if (process_type != ray_constants.PROCESS_TYPE_REDIS_SERVER):\n        assert (len(process_infos) == 1)\n    for process_info in process_infos:\n        process = process_info.process\n        if (process.poll() is not None):\n            if check_alive:\n                raise Exception(\"Attempting to kill a process of type '{}', but this process is already dead.\".format(process_type))\n            else:\n                continue\n        if process_info.use_valgrind:\n            process.terminate()\n            process.wait()\n            if (process.returncode != 0):\n                message = 'Valgrind detected some errors in process of type {}. Error code {}.'.format(process_type, process.returncode)\n                if (process_info.stdout_file is not None):\n                    with open(process_info.stdout_file, 'r') as f:\n                        message += ('\\nPROCESS STDOUT:\\n' + f.read())\n                if (process_info.stderr_file is not None):\n                    with open(process_info.stderr_file, 'r') as f:\n                        message += ('\\nPROCESS STDERR:\\n' + f.read())\n                raise Exception(message)\n            continue\n        if process_info.use_valgrind_profiler:\n            os.kill(process.pid, signal.SIGINT)\n            time.sleep(0.1)\n        if allow_graceful:\n            process.terminate()\n            timer = threading.Timer(1, (lambda process: process.kill()), [process])\n            try:\n                timer.start()\n                process.wait()\n            finally:\n                timer.cancel()\n            if (process.poll() is not None):\n                continue\n        process.kill()\n        if wait:\n            process.wait()\n    del self.all_processes[process_type]", "docstring": "Kill a process of a given type.\n\nIf the process type is PROCESS_TYPE_REDIS_SERVER, then we will kill all\nof the Redis servers.\n\nIf the process was started in valgrind, then we will raise an exception\nif the process has a non-zero exit code.\n\nArgs:\nprocess_type: The type of the process to kill.\nallow_graceful (bool): Send a SIGTERM first and give the process\ntime to exit gracefully. If that doesn't work, then use\nSIGKILL. We usually want to do this outside of tests.\ncheck_alive (bool): If true, then we expect the process to be alive\nand will raise an exception if the process is already dead.\nwait (bool): If true, then this method will not return until the\nprocess in question has exited.\n\nRaises:\nThis process raises an exception in the following cases:\n1. The process had already died and check_alive is true.\n2. The process had been started in valgrind and had a non-zero\nexit code.", "source": "codesearchnet"}
{"code": "def create(self, vid):\n        \n        command = 'vlan %s' % vid\n        return self.configure(command) if isvlan(vid) else False", "docstring": "Creates a new VLAN resource\n\nArgs:\nvid (str): The VLAN ID to create\n\nReturns:\nTrue if create was successful otherwise False", "source": "juraj-google-style"}
{"code": "def display(self, updating_pv=None):\n    data = self._to_dataframe()\n    data.columns = [self._pcoll_var + '.' + str(column) if isinstance(column, int) else column for column in data.columns]\n    data = data.map(lambda x: str(x) if isinstance(x, dict) else x)\n    if updating_pv:\n        if data.empty:\n            _LOGGER.debug('Skip a visualization update due to empty data.')\n        else:\n            self._display_dataframe(data.copy(deep=True), updating_pv)\n            if self._display_facets:\n                self._display_dive(data.copy(deep=True), updating_pv)\n                self._display_overview(data.copy(deep=True), updating_pv)\n    else:\n        self._display_dataframe(data.copy(deep=True))\n        if self._display_facets:\n            self._display_dive(data.copy(deep=True))\n            self._display_overview(data.copy(deep=True))", "docstring": "Displays the visualization through IPython.\n\nArgs:\nupdating_pv: A PCollectionVisualization object. When provided, the\ndisplay_id of each visualization part will inherit from the initial\ndisplay of updating_pv and only update that visualization web element\ninstead of creating new ones.\n\nThe visualization has 3 parts: facets-dive, facets-overview and paginated\ndata table. Each part is assigned an auto-generated unique display id\n(the uniqueness is guaranteed throughout the lifespan of the PCollection\nvariable).", "source": "github-repos"}
{"code": "def _QueryHashes(self, digests):\n    url_parameters = {'apikey': self._api_key, 'resource': ', '.join(digests)}\n    try:\n        json_response = self.MakeRequestAndDecodeJSON(self._VIRUSTOTAL_API_REPORT_URL, 'GET', params=url_parameters)\n    except errors.ConnectionError as exception:\n        json_response = None\n        logger.error('Unable to query VirusTotal with error: {0!s}.'.format(exception))\n    return json_response", "docstring": "Queries VirusTotal for a specfic hashes.\n\nArgs:\ndigests (list[str]): hashes to look up.\n\nReturns:\ndict[str, object]: JSON response or None on error.", "source": "codesearchnet"}
{"code": "def function_table(self, function_id=None):\n    self._check_connected()\n    function_table_keys = self.redis_client.keys((ray.gcs_utils.FUNCTION_PREFIX + '*'))\n    results = {}\n    for key in function_table_keys:\n        info = self.redis_client.hgetall(key)\n        function_info_parsed = {'DriverID': binary_to_hex(info[b'driver_id']), 'Module': decode(info[b'module']), 'Name': decode(info[b'name'])}\n        results[binary_to_hex(info[b'function_id'])] = function_info_parsed\n    return results", "docstring": "Fetch and parse the function table.\n\nReturns:\nA dictionary that maps function IDs to information about the\nfunction.", "source": "codesearchnet"}
{"code": "def google_api_build_results(config, auth, api_call, results):\n    if 'bigquery' in results:\n        if 'schema' not in results['bigquery']:\n            results['bigquery']['schema'] = Discovery_To_BigQuery(api_call['api'], api_call['version'], api_call.get('key', None), api_call.get('labels', None)).method_schema(api_call['function'], api_call.get('iterate', False))\n        if 'format' not in results['bigquery']:\n            results['bigquery']['format'] = 'JSON'\n        results['bigquery']['skip_rows'] = 0\n        table_create(config, results['bigquery'].get('auth', auth), config.project, results['bigquery']['dataset'], results['bigquery']['table'], results['bigquery']['schema'], overwrite=False)\n    return results", "docstring": "Builds the BigQuery table to house the Google API call results.\n\nOptional piece of the recipe, will create a BigQuery table for results.\nTakes results, which defines a bigquery endpoint, and adds fields.\n\nArgs:\nauth (string): either \"user\" or \"service\" to make the BigQuery call.\napi_call (dict): the JSON for the API call as defined in recipe.\nresults (dict): defines where the data will be written\n\nReturns (dict):\nA modified results JSON with additional API values added.\n\nRaises:\nValueError: If a required key in the recipe is missing.", "source": "github-repos"}
{"code": "def GetHasher(cls, hasher_name):\n    \n    hasher_name = hasher_name.lower()\n    if hasher_name not in cls._hasher_classes:\n      raise KeyError(\n          'hasher class not set for name: {0:s}.'.format(hasher_name))\n\n    hasher_class = cls._hasher_classes[hasher_name]\n    return hasher_class()", "docstring": "Retrieves an instance of a specific hasher.\n\nArgs:\nhasher_name (str): the name of the hasher to retrieve.\n\nReturns:\nBaseHasher: hasher.\n\nRaises:\nKeyError: if hasher class is not set for the corresponding name.", "source": "juraj-google-style"}
{"code": "def internal_convert_to_tensor_or_indexed_slices(value, dtype=None, name=None, as_ref=False):\n    if isinstance(value, ops.EagerTensor) and (not context.executing_eagerly()):\n        return ops.convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)\n    elif isinstance(value, internal.NativeObject):\n        if dtype and (not dtypes.as_dtype(dtype).is_compatible_with(value.dtype)):\n            raise ValueError(f'Incompatible tensor conversion requested to `dtype` {dtypes.as_dtype(dtype).name} for `value` ({value}) with dtype {value.dtype.name}.')\n        return value\n    else:\n        return ops.convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)", "docstring": "Converts the given object to a `Tensor` or an `IndexedSlices`.\n\nIf `value` is an `IndexedSlices` or `SparseTensor` it is returned\nunmodified. Otherwise, it is converted to a `Tensor` using\n`convert_to_tensor()`.\n\nArgs:\nvalue: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed\nby `convert_to_tensor()`.\ndtype: (Optional.) The required `DType` of the returned `Tensor` or\n`IndexedSlices`.\nname: (Optional.) A name to use if a new `Tensor` is created.\nas_ref: True if the caller wants the results as ref tensors.\n\nReturns:\nA `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.\n\nRaises:\nValueError: If `dtype` does not match the element type of `value`.", "source": "github-repos"}
{"code": "def helper(*commands):\n    \n    def decorated_func(f):\n        f.__help_targets__ = list(commands)\n        return f\n    return decorated_func", "docstring": "Decorate a function to be the helper function of commands.\n\nArguments:\ncommands: Names of command that should trigger this function object.\n\n---------------------------\nInterface of helper methods:\n\n@helper('some-command')\ndef help_foo(self, args):\n'''\nArguments:\nargs: A list of arguments.\n\nReturns:\nA string that is the help message.\n'''\npass", "source": "juraj-google-style"}
{"code": "def _get_val_list(obj, path_list, reverse=False):\n    \n    try:\n        y = getattr(obj, path_list[0])\n    except AttributeError:\n        return []\n    if len(path_list) == 1:\n        return [y]\n    else:\n        val_list = [x for a in y for x in _get_val_list(a, path_list[1:], reverse)]\n        if reverse:\n            val_list.reverse()\n        return val_list", "docstring": "Extract values from nested objects by attribute names.\n\nObjects contain attributes which are named references to objects. This will descend\ndown a tree of nested objects, starting at the given object, following the given\npath.\n\nArgs:\nobj: object\nAny type of object\n\npath_list: list\nAttribute names\n\nreverse: bool\nReverse the list of values before concatenation.\n\nReturns:\nlist of objects", "source": "juraj-google-style"}
{"code": "def push(self, stream, reading):\n        \n\n        \n        reading = copy.copy(reading)\n        reading.stream = stream.encode()\n\n        if stream.buffered:\n            output_buffer = stream.output\n\n            if self.id_assigner is not None:\n                reading.reading_id = self.id_assigner(stream, reading)\n\n            try:\n                self._engine.push(reading)\n            except StorageFullError:\n                \n                if (stream.output and not self._rollover_streaming) or (not stream.output and not self._rollover_storage):\n                    raise\n\n                self._erase_buffer(stream.output)\n                self._engine.push(reading)\n\n            for walker in self._queue_walkers:\n                \n                if walker.selector.output == output_buffer:\n                    walker.notify_added(stream)\n\n        \n        for selector in self._monitors:\n            if selector is None or selector.matches(stream):\n                for callback in self._monitors[selector]:\n                    callback(stream, reading)\n\n        \n        \n        for walker in self._virtual_walkers:\n            if walker.matches(stream):\n                walker.push(stream, reading)\n\n        self._last_values[stream] = reading", "docstring": "Push a reading into a stream, updating any associated stream walkers.\n\nArgs:\nstream (DataStream): the stream to push the reading into\nreading (IOTileReading): the reading to push", "source": "juraj-google-style"}
{"code": "def reply(self, status=200, new_response=False, **kw):\n        \n        \n        res = Response(**kw) if new_response else self._response\n        \n        res.status(status or res._status)\n        \n        res.mock = self\n        \n        self._response = res\n        \n        return res", "docstring": "Defines the mock response.\n\nArguments:\nstatus (int, optional): response status code. Defaults to ``200``.\n**kw (dict): optional keyword arguments passed to ``pook.Response``\nconstructor.\n\nReturns:\npook.Response: mock response definition instance.", "source": "juraj-google-style"}
{"code": "def anonymous_login(services):\n    if isinstance(services, str):\n        services = [services]\n    clients = {}\n    for serv in services:\n        try:\n            clients[serv] = KNOWN_CLIENTS[serv](http_timeout=STD_TIMEOUT)\n        except KeyError:\n            print(\"Error: No known client for '{}' service.\".format(serv))\n        except Exception:\n            print(\"Error: Unable to create client for '{}' service.\\nAnonymous access may not be allowed.\".format(serv))\n    return clients", "docstring": "Initialize services without authenticating to Globus Auth.\n\nNote:\nClients may have reduced functionality without authentication.\n\nArguments:\nservices (str or list of str): The services to initialize clients for.\n\nReturns:\ndict: The clients requested, indexed by service name.", "source": "codesearchnet"}
{"code": "def zero_fill_missing_phenotypes(self):\n    if self.is_uniform(verbose=False):\n        return self.copy()\n    output = self.copy()\n\n    def _do_fill(d, names):\n        old_names = list(d.keys())\n        old_values = list(d.values())\n        missing = (set(names) - set(old_names))\n        return dict(zip((old_names + list(missing)), (old_values + ([0] * len(missing)))))\n    pnames = self.phenotypes\n    output['phenotype_calls'] = output.apply((lambda x: _do_fill(x['phenotype_calls'], pnames)), 1)\n    return output", "docstring": "Fill in missing phenotypes and scored types by listing any missing data as negative\n\nReturns:\nCellDataFrame: The CellDataFrame modified.", "source": "codesearchnet"}
{"code": "def __init__(self, name, aliases=None, description=None, urls=None):\n    \n    super(StructureDefinition, self).__init__(\n        name, aliases=aliases, description=description, urls=urls)\n    self.family_definition = None", "docstring": "Initializes a data type definition.\n\nArgs:\nname (str): name.\naliases (Optional[list[str]]): aliases.\ndescription (Optional[str]): description.\nurls (Optional[list[str]]): URLs.", "source": "juraj-google-style"}
{"code": "def trace_stop(self):\n    cmd = enums.JLinkTraceCommand.STOP\n    res = self._dll.JLINKARM_TRACE_Control(cmd, 0)\n    if (res == 1):\n        raise errors.JLinkException('Failed to stop trace.')\n    return None", "docstring": "Stops collecting trace data.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def get_property(self, prop):\n        \n        prop = prop.split('.')\n        root = self\n        for p in prop:\n            if p in root:\n                root = root[p]\n            else:\n                return None\n        return root", "docstring": "Access nested value using dot separated keys\n\nArgs:\nprop (:obj:`str`): Property in the form of dot separated keys\n\nReturns:\nProperty value if exists, else `None`", "source": "juraj-google-style"}
{"code": "def _StructPackEncoder(wire_type, format):\n  \n\n  value_size = struct.calcsize(format)\n\n  def SpecificEncoder(field_number, is_repeated, is_packed):\n    local_struct_pack = struct.pack\n    if is_packed:\n      tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)\n      local_EncodeVarint = _EncodeVarint\n      def EncodePackedField(write, value):\n        write(tag_bytes)\n        local_EncodeVarint(write, len(value) * value_size)\n        for element in value:\n          write(local_struct_pack(format, element))\n      return EncodePackedField\n    elif is_repeated:\n      tag_bytes = TagBytes(field_number, wire_type)\n      def EncodeRepeatedField(write, value):\n        for element in value:\n          write(tag_bytes)\n          write(local_struct_pack(format, element))\n      return EncodeRepeatedField\n    else:\n      tag_bytes = TagBytes(field_number, wire_type)\n      def EncodeField(write, value):\n        write(tag_bytes)\n        return write(local_struct_pack(format, value))\n      return EncodeField\n\n  return SpecificEncoder", "docstring": "Return a constructor for an encoder for a fixed-width field.\n\nArgs:\nwire_type:  The field's wire type, for encoding tags.\nformat:  The format string to pass to struct.pack().", "source": "juraj-google-style"}
{"code": "def is_parameterized(val: Any) -> bool:\n    if isinstance(val, sympy.Basic):\n        return True\n    getter = getattr(val, '_is_parameterized_', None)\n    result = (NotImplemented if (getter is None) else getter())\n    if (result is not NotImplemented):\n        return result\n    else:\n        return False", "docstring": "Returns whether the object is parameterized with any Symbols.\n\nA value is parameterized when it has an `_is_parameterized_` method and\nthat method returns a truthy value, or if the value is an instance of\nsympy.Basic.\n\nReturns:\nTrue if the gate has any unresolved Symbols\nand False otherwise. If no implementation of the magic\nmethod above exists or if that method returns NotImplemented,\nthis will default to False.", "source": "codesearchnet"}
{"code": "def write_entry_to_file(file_descriptor, entry_comment, entry_key):\n    \n    escaped_key = re.sub(r'([^\\\\])\"', '\\\\1\\\\\"', entry_key)\n    file_descriptor.write(u'\\n' % entry_comment)\n    file_descriptor.write(u'\"%s\" = \"%s\";\\n' % (escaped_key, escaped_key))", "docstring": "Writes a localization entry to the file\n\nArgs:\nfile_descriptor (file, instance): The file to write the entry to.\nentry_comment (str): The entry's comment.\nentry_key (str): The entry's key.", "source": "juraj-google-style"}
{"code": "def create_model(text_in, timesteps, phase):\n  \n  with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):\n    \n    with tf.device('/cpu:0'):\n      embedded = text_in.embedding_lookup(CHARS, [EMBEDDING_SIZE])\n    \n    \n    \n    \n    lstm = (embedded\n            .cleave_sequence(timesteps)\n            .sequence_lstm(LOWER)\n            .sequence_lstm(UPPER))\n\n    \n    \n    \n    return (lstm.squash_sequence()\n            .dropout(keep_prob=0.8, phase=phase)\n            .fully_connected(CHARS, activation_fn=None))", "docstring": "Creates a 2 layer LSTM model with dropout.\n\nArgs:\ntext_in: The input text as ASCII ordinals in a Tensor.\ntimesteps: The number of timesteps in the sequence.\nphase: Phase controls whether or not dropout is active.  In training mode\nwe want to perform dropout, but in test we want to disable it.\nReturns:\nThe logits.", "source": "juraj-google-style"}
{"code": "def _parse_symbol(self, sym):\n    special = {'Hw': 'H', 'Ow': 'O', 'Wat': 'O', 'wat': 'O', 'OH': '', 'OH2': '', 'NO3': 'N'}\n    parsed_sym = None\n    m_sp = re.match('|'.join(special.keys()), sym)\n    if m_sp:\n        parsed_sym = special[m_sp.group()]\n    elif Element.is_valid_symbol(sym[:2].title()):\n        parsed_sym = sym[:2].title()\n    elif Element.is_valid_symbol(sym[0].upper()):\n        parsed_sym = sym[0].upper()\n    else:\n        m = re.match('w?[A-Z][a-z]*', sym)\n        if m:\n            parsed_sym = m.group()\n    if ((parsed_sym is not None) and (m_sp or (not re.match('{}\\\\d*'.format(parsed_sym), sym)))):\n        msg = '{} parsed as {}'.format(sym, parsed_sym)\n        warnings.warn(msg)\n        self.errors.append(msg)\n    return parsed_sym", "docstring": "Parse a string with a symbol to extract a string representing an element.\n\nArgs:\nsym (str): A symbol to be parsed.\n\nReturns:\nA string with the parsed symbol. None if no parsing was possible.", "source": "codesearchnet"}
{"code": "def token_of_request(self, url, body=None, content_type=None):\n        \n        parsed_url = urlparse(url)\n        query = parsed_url.query\n        path = parsed_url.path\n        data = path\n        if query != '':\n            data = ''.join([data, '?', query])\n        data = ''.join([data, \"\\n\"])\n\n        if body:\n            mimes = [\n                'application/x-www-form-urlencoded'\n            ]\n            if content_type in mimes:\n                data += body\n\n        return '{0}:{1}'.format(self.__access_key, self.__token(data))", "docstring": "带请求体的签名（本质上是管理凭证的签名）\n\nArgs:\nurl:          待签名请求的url\nbody:         待签名请求的body\ncontent_type: 待签名请求的body的Content-Type\n\nReturns:\n管理凭证", "source": "juraj-google-style"}
{"code": "def fpn_map_rois_to_levels(boxes):\n    \n    sqrtarea = tf.sqrt(tf_area(boxes))\n    level = tf.cast(tf.floor(\n        4 + tf.log(sqrtarea * (1. / 224) + 1e-6) * (1.0 / np.log(2))), tf.int32)\n\n    \n    level_ids = [\n        tf.where(level <= 2),\n        tf.where(tf.equal(level, 3)),   \n        tf.where(tf.equal(level, 4)),\n        tf.where(level >= 5)]\n    level_ids = [tf.reshape(x, [-1], name='roi_level{}_id'.format(i + 2))\n                 for i, x in enumerate(level_ids)]\n    num_in_levels = [tf.size(x, name='num_roi_level{}'.format(i + 2))\n                     for i, x in enumerate(level_ids)]\n    add_moving_summary(*num_in_levels)\n\n    level_boxes = [tf.gather(boxes, ids) for ids in level_ids]\n    return level_ids, level_boxes", "docstring": "Assign boxes to level 2~5.\n\nArgs:\nboxes (nx4):\n\nReturns:\n[tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices of boxes in its level.\n[tf.Tensor]: 4 tensors, the gathered boxes in each level.\n\nBe careful that the returned tensor could be empty.", "source": "juraj-google-style"}
{"code": "def get_enterprise_customer_for_user(auth_user):\n    \n    EnterpriseCustomerUser = apps.get_model('enterprise', 'EnterpriseCustomerUser')  \n    try:\n        return EnterpriseCustomerUser.objects.get(user_id=auth_user.id).enterprise_customer  \n    except EnterpriseCustomerUser.DoesNotExist:\n        return None", "docstring": "Return enterprise customer instance for given user.\n\nSome users are associated with an enterprise customer via `EnterpriseCustomerUser` model,\n1. if given user is associated with any enterprise customer, return enterprise customer.\n2. otherwise return `None`.\n\nArguments:\nauth_user (contrib.auth.User): Django User\n\nReturns:\n(EnterpriseCustomer): enterprise customer associated with the current user.", "source": "juraj-google-style"}
{"code": "def on_get(self, req, resp, handler=None, **kwargs):\n    self.handle((handler or self.list), req, resp, **kwargs)", "docstring": "Respond on GET HTTP request assuming resource list retrieval flow.\n\nThis request handler assumes that GET requests are associated with\nresource list retrieval. Thus default flow for such requests is:\n\n* Retrieve list of existing resource instances and prepare their\nrepresentations by calling list retrieval method handler.\n\nArgs:\nreq (falcon.Request): request object instance.\nresp (falcon.Response): response object instance to be modified\nhandler (method): list method handler to be called. Defaults\nto ``self.list``.\n**kwargs: additional keyword arguments retrieved from url template.", "source": "codesearchnet"}
{"code": "def ends_with(self, suffix):\n        \n        suffix = suffix.lower()\n        found_words = []\n\n        res = cgaddag.gdg_ends_with(self.gdg, suffix.encode(encoding=\"ascii\"))\n        tmp = res\n\n        while tmp:\n            word = tmp.contents.str.decode(\"ascii\")\n            found_words.append(word)\n            tmp = tmp.contents.next\n\n        cgaddag.gdg_destroy_result(res)\n        return found_words", "docstring": "Find all words ending with a suffix.\n\nArgs:\nsuffix: A suffix to be searched for.\n\nReturns:\nA list of all words found.", "source": "juraj-google-style"}
{"code": "def setUserPwd(self, user, pwd):\n        \n        def getSkypeToken(self):\n            self.liveLogin(user, pwd)\n        self.getSkypeToken = MethodType(getSkypeToken, self)", "docstring": "Replace the stub :meth:`getSkypeToken` method with one that connects via the Microsoft account flow using the\ngiven credentials.  Avoids storing the account password in an accessible way.\n\nArgs:\nuser (str): username or email address of the connecting account\npwd (str): password of the connecting account", "source": "juraj-google-style"}
{"code": "def flowread(flow_or_path, quantize=False, concat_axis=0, *args, **kwargs):\n    \n    if isinstance(flow_or_path, np.ndarray):\n        if (flow_or_path.ndim != 3) or (flow_or_path.shape[-1] != 2):\n            raise ValueError('Invalid flow with shape {}'.format(\n                flow_or_path.shape))\n        return flow_or_path\n    elif not is_str(flow_or_path):\n        raise TypeError(\n            '\"flow_or_path\" must be a filename or numpy array, not {}'.format(\n                type(flow_or_path)))\n\n    if not quantize:\n        with open(flow_or_path, 'rb') as f:\n            try:\n                header = f.read(4).decode('utf-8')\n            except Exception:\n                raise IOError('Invalid flow file: {}'.format(flow_or_path))\n            else:\n                if header != 'PIEH':\n                    raise IOError(\n                        'Invalid flow file: {}, header does not contain PIEH'.\n                        format(flow_or_path))\n\n            w = np.fromfile(f, np.int32, 1).squeeze()\n            h = np.fromfile(f, np.int32, 1).squeeze()\n            flow = np.fromfile(f, np.float32, w * h * 2).reshape((h, w, 2))\n    else:\n        assert concat_axis in [0, 1]\n        cat_flow = imread(flow_or_path, flag='unchanged')\n        if cat_flow.ndim != 2:\n            raise IOError(\n                '{} is not a valid quantized flow file, its dimension is {}.'.\n                format(flow_or_path, cat_flow.ndim))\n        assert cat_flow.shape[concat_axis] % 2 == 0\n        dx, dy = np.split(cat_flow, 2, axis=concat_axis)\n        flow = dequantize_flow(dx, dy, *args, **kwargs)\n\n    return flow.astype(np.float32)", "docstring": "Read an optical flow map.\n\nArgs:\nflow_or_path (ndarray or str): A flow map or filepath.\nquantize (bool): whether to read quantized pair, if set to True,\nremaining args will be passed to :func:`dequantize_flow`.\nconcat_axis (int): The axis that dx and dy are concatenated,\ncan be either 0 or 1. Ignored if quantize is False.\n\nReturns:\nndarray: Optical flow represented as a (h, w, 2) numpy array", "source": "juraj-google-style"}
{"code": "def export_default_scripts(target_folder, source_folder = None, raise_errors = False, verbose=False):\n    \n\n    scripts_to_load = get_classes_in_folder(source_folder, Script)\n\n    if verbose:\n        print(('attempt to load {:d} scripts: '.format(len(scripts_to_load))))\n\n    loaded_scripts, failed, loaded_instruments = Script.load_and_append(scripts_to_load, raise_errors=raise_errors)\n\n    for name, value in loaded_scripts.items():\n        filename = os.path.join(target_folder, '{:s}.b26'.format(name))\n        value.save_b26(filename)\n\n    if verbose:\n        print('\\n================================================')\n        print('================================================')\n        print(('saved {:d} scripts, {:d} failed'.format(len(loaded_scripts), len(failed))))\n        if failed != {}:\n            for error_name, error in failed.items():\n                print(('failed to create script: ', error_name, error))", "docstring": "tries to instantiate all the scripts that are imported in /scripts/__init__.py\nsaves each script that could be instantiated into a .b26 file in the folder path\nArgs:\ntarget_folder: target path for .b26 files\nsource_folder: location of python script files", "source": "juraj-google-style"}
{"code": "def get_wulff_shape(self, material_id):\n    from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n    from pymatgen.analysis.wulff import WulffShape, hkl_tuple_to_str\n    structure = self.get_structure_by_material_id(material_id)\n    surfaces = self.get_surface_data(material_id)['surfaces']\n    lattice = SpacegroupAnalyzer(structure).get_conventional_standard_structure().lattice\n    miller_energy_map = {}\n    for surf in surfaces:\n        miller = tuple(surf['miller_index'])\n        if ((miller not in miller_energy_map) or surf['is_reconstructed']):\n            miller_energy_map[miller] = surf['surface_energy']\n    (millers, energies) = zip(*miller_energy_map.items())\n    return WulffShape(lattice, millers, energies)", "docstring": "Constructs a Wulff shape for a material.\n\nArgs:\nmaterial_id (str): Materials Project material_id, e.g. 'mp-123'.\nReturns:\npymatgen.analysis.wulff.WulffShape", "source": "codesearchnet"}
{"code": "def handle_http_error(error: HTTPException) -> ResponseReturnValue:\n    code = error.code or 500\n    return (DQMResponse(name=error.name, description=error.description, code=code), code)", "docstring": "DQM HTTP Error Response.\n\nArgs:\n* error: HTTP error\n\nReturns:\n* DQMResponse for the error with the relevant status code", "source": "github-repos"}
{"code": "def _variable_with_weight_decay(name, shape, stddev, wd):\n    dtype = (tf.float16 if FLAGS.use_fp16 else tf.float32)\n    var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n    if (wd is not None):\n        weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n        tf.add_to_collection('losses', weight_decay)\n    return var", "docstring": "Helper to create an initialized Variable with weight decay.\n\nNote that the Variable is initialized with a truncated normal distribution.\nA weight decay is added only if one is specified.\n\nArgs:\nname: name of the variable\nshape: list of ints\nstddev: standard deviation of a truncated Gaussian\nwd: add L2Loss weight decay multiplied by this float. If None, weight\ndecay is not added for this Variable.\n\nReturns:\nVariable Tensor", "source": "codesearchnet"}
{"code": "def prune(t):\n    if isinstance(t, TypeVariable):\n        if (t.instance is not None):\n            t.instance = prune(t.instance)\n            return t.instance\n    return t", "docstring": "Returns the currently defining instance of t.\n\nAs a side effect, collapses the list of type instances. The function Prune\nis used whenever a type expression has to be inspected: it will always\nreturn a type expression which is either an uninstantiated type variable or\na type operator; i.e. it will skip instantiated variables, and will\nactually prune them from expressions to remove long chains of instantiated\nvariables.\n\nArgs:\nt: The type to be pruned\n\nReturns:\nAn uninstantiated TypeVariable or a TypeOperator", "source": "codesearchnet"}
{"code": "def metta_config(quarter, num_dimensions):\n    \n    first_day, last_day = quarter_boundaries(quarter)\n    return {\n        'start_time': first_day,\n        'end_time': last_day,\n        'prediction_window': 3, \n        'label_name': 'onet_soc_code',\n        'label_type': 'categorical',\n        'matrix_id': 'job_postings_{}'.format(quarter),\n        'feature_names': ['doc2vec_{}'.format(i) for i in range(num_dimensions)],\n    }", "docstring": "Returns metta metadata for a quarter's SOC code classifier matrix\n\nArgs:\nquarter (str) quarter, in format '2015Q1'\nnum_dimensions (int) Number of features in matrix\n\nReturns: (dict) metadata suitable for metta.archive_train_test", "source": "juraj-google-style"}
{"code": "def _create_table_and_update_context(node, context):\n    \n    schema_type_name = sql_context_helpers.get_schema_type_name(node, context)\n    table = context.compiler_metadata.get_table(schema_type_name).alias()\n    context.query_path_to_selectable[node.query_path] = table\n    return table", "docstring": "Create an aliased table for a SqlNode.\n\nUpdates the relevant Selectable global context.\n\nArgs:\nnode: SqlNode, the current node.\ncontext: CompilationContext, global compilation state and metadata.\n\nReturns:\nTable, the newly aliased SQLAlchemy table.", "source": "juraj-google-style"}
{"code": "def _read_csv_with_offset_pandas_on_ray(fname, num_splits, start, end, kwargs, header):\n    index_col = kwargs.get('index_col', None)\n    bio = file_open(fname, 'rb')\n    bio.seek(start)\n    to_read = (header + bio.read((end - start)))\n    bio.close()\n    pandas_df = pandas.read_csv(BytesIO(to_read), **kwargs)\n    pandas_df.columns = pandas.RangeIndex(len(pandas_df.columns))\n    if (index_col is not None):\n        index = pandas_df.index\n        pandas_df.index = pandas.RangeIndex(0, len(pandas_df))\n    else:\n        index = len(pandas_df)\n    return (_split_result_for_readers(1, num_splits, pandas_df) + [index])", "docstring": "Use a Ray task to read a chunk of a CSV into a Pandas DataFrame.\n\nNote: Ray functions are not detected by codecov (thus pragma: no cover)\n\nArgs:\nfname: The filename of the file to open.\nnum_splits: The number of splits (partitions) to separate the DataFrame into.\nstart: The start byte offset.\nend: The end byte offset.\nkwargs: The kwargs for the Pandas `read_csv` function.\nheader: The header of the file.\n\nReturns:\nA list containing the split Pandas DataFrames and the Index as the last\nelement. If there is not `index_col` set, then we just return the length.\nThis is used to determine the total length of the DataFrame to build a\ndefault Index.", "source": "codesearchnet"}
{"code": "def GetFileSystemTypeIndicators(cls, path_spec, resolver_context=None):\n    \n    if (cls._file_system_remainder_list is None or\n        cls._file_system_store is None):\n      specification_store, remainder_list = cls._GetSpecificationStore(\n          definitions.FORMAT_CATEGORY_FILE_SYSTEM)\n      cls._file_system_remainder_list = remainder_list\n      cls._file_system_store = specification_store\n\n    if cls._file_system_scanner is None:\n      cls._file_system_scanner = cls._GetSignatureScanner(\n          cls._file_system_store)\n\n    return cls._GetTypeIndicators(\n        cls._file_system_scanner, cls._file_system_store,\n        cls._file_system_remainder_list, path_spec,\n        resolver_context=resolver_context)", "docstring": "Determines if a file contains a supported file system types.\n\nArgs:\npath_spec (PathSpec): path specification.\nresolver_context (Optional[Context]): resolver context, where None\nrepresents the built-in context which is not multi process safe.\n\nReturns:\nlist[str]: supported format type indicators.", "source": "juraj-google-style"}
{"code": "def dump(self, conf_file=None):\n        \n\n        if conf_file:\n            conf_dir = os.path.dirname(conf_file)\n            if not conf_dir:\n                conf_dir = self.__invoke_dir\n            elif not os.path.exists(conf_dir):\n                os.makedirs(conf_dir)\n        else:\n            conf_dir = self.__conf_dir\n\n        final_conf = {}\n        for key, value in list(self.__config.items()):\n            if key in self.__cli:\n                continue\n            final_conf[key] = value\n\n        for key, value in list(self.__cli.items()):\n            if key.endswith('index') or key in ['sitemap', 'output']:\n                path = self.__abspath(value, from_conf=False)\n                if path:\n                    relpath = os.path.relpath(path, conf_dir)\n                    final_conf[key] = relpath\n            elif key.endswith('sources') or key.endswith('source_filters'):\n                new_list = []\n                for path in value:\n                    path = self.__abspath(path, from_conf=False)\n                    if path:\n                        relpath = os.path.relpath(path, conf_dir)\n                        new_list.append(relpath)\n                final_conf[key] = new_list\n            elif key not in ['command', 'output_conf_file']:\n                final_conf[key] = value\n\n        with open(conf_file or self.conf_file or 'hotdoc.json', 'w') as _:\n            _.write(json.dumps(final_conf, sort_keys=True, indent=4))", "docstring": "Dump the possibly updated config to a file.\n\nArgs:\nconf_file: str, the destination, or None to overwrite the\nexisting configuration.", "source": "juraj-google-style"}
{"code": "def update_version_in_examples(version: str):\n    for folder, directories, fnames in os.walk(PATH_TO_EXAMPLES):\n        if 'legacy' in directories:\n            directories.remove('legacy')\n        for fname in fnames:\n            if fname.endswith('.py'):\n                update_version_in_file(os.path.join(folder, fname), version, file_type='examples')", "docstring": "Update the version in all examples files.\n\nArgs:\nversion (`str`): The new version to set in the examples.", "source": "github-repos"}
{"code": "def scope(self):\n    return super(OneDeviceStrategy, self).scope()", "docstring": "Returns a context manager selecting this Strategy as current.\n\nInside a `with strategy.scope():` code block, this thread\nwill use a variable creator set by `strategy`, and will\nenter its \"cross-replica context\".\n\nIn `OneDeviceStrategy`, all variables created inside `strategy.scope()`\nwill be on `device` specified at strategy construction time.\nSee example in the docs for this class.\n\nReturns:\nA context manager to use for creating variables with this strategy.", "source": "github-repos"}
{"code": "def to_binary(self, copy=False):\n        \n        if self.vartype is Vartype.BINARY:\n            if copy:\n                return self.copy()\n            else:\n                return self\n\n        new = BinaryPolynomial({}, Vartype.BINARY)\n\n        \n        for term, bias in self.items():\n            for t in map(frozenset, powerset(term)):\n                newbias = bias * 2**len(t) * (-1)**(len(term) - len(t))\n\n                if t in new:\n                    new[t] += newbias\n                else:\n                    new[t] = newbias\n\n        return new", "docstring": "Return a binary polynomial over `{0, 1}` variables.\n\nArgs:\ncopy (optional, default=False):\nIf True, the returned polynomial is always a copy. Otherwise,\nif the polynomial is binary-valued already it returns itself.\n\nReturns:\n:obj:`.BinaryPolynomial`", "source": "juraj-google-style"}
{"code": "def deserialize(name, custom_objects=None):\n    return deserialize_keras_object(name, module_objects=globals(), custom_objects=custom_objects, printable_module_name='loss function')", "docstring": "Deserializes a serialized loss class/function instance.\n\nArgs:\nname: Loss configuration.\ncustom_objects: Optional dictionary mapping names (strings) to custom\nobjects (classes and functions) to be considered during deserialization.\n\nReturns:\nA Keras `Loss` instance or a loss function.", "source": "github-repos"}
{"code": "def connections(self, origin, destination, dt=datetime.now(), only_direct=False):\n        \n        query = {\n            'S': origin,\n            'Z': destination,\n            'date': dt.strftime(\"%d.%m.%y\"),\n            'time': dt.strftime(\"%H:%M\"),\n            'start': 1,\n            'REQ0JourneyProduct_opt0': 1 if only_direct else 0\n        }\n        rsp = requests.get('http:\n        return parse_connections(rsp.text)", "docstring": "Find connections between two stations\n\nArgs:\norigin (str): origin station\ndestination (str): destination station\ndt (datetime): date and time for query\nonly_direct (bool): only direct connections", "source": "juraj-google-style"}
{"code": "def join(input_layer, others, include_self=True, join_function=None):\n  \n  if include_self:\n    list_of_tensors = [input_layer]\n    list_of_tensors.extend(others)\n  else:\n    list_of_tensors = others\n  return prettytensor.join_pretty_tensors(list_of_tensors, input_layer,\n                                          join_function)", "docstring": "Joins the provided PrettyTensors with this using the join function.\n\nArgs:\ninput_layer: The input layer for this op.\nothers: Sequence of PrettyTensor objects.\ninclude_self: Whether or not this includes itself or if the value is only\nderived from others.\njoin_function: The function to use for joining, must accept a list of\ntensors. Use None for concat on the final dimension.\nReturns:\nself.", "source": "juraj-google-style"}
{"code": "def to_representation(self, instance):\n    request = self.context['request']\n    enterprise_customer = instance.enterprise_customer\n    representation = super(EnterpriseCustomerCatalogDetailSerializer, self).to_representation(instance)\n    paginated_content = instance.get_paginated_content(request.GET)\n    count = paginated_content['count']\n    search_results = paginated_content['results']\n    for item in search_results:\n        content_type = item['content_type']\n        marketing_url = item.get('marketing_url')\n        if marketing_url:\n            item['marketing_url'] = utils.update_query_parameters(marketing_url, utils.get_enterprise_utm_context(enterprise_customer))\n        if (content_type == 'course'):\n            item['enrollment_url'] = instance.get_course_enrollment_url(item['key'])\n        if (content_type == 'courserun'):\n            item['enrollment_url'] = instance.get_course_run_enrollment_url(item['key'])\n        if (content_type == 'program'):\n            item['enrollment_url'] = instance.get_program_enrollment_url(item['uuid'])\n    previous_url = None\n    next_url = None\n    page = int(request.GET.get('page', '1'))\n    request_uri = request.build_absolute_uri()\n    if paginated_content['previous']:\n        previous_url = utils.update_query_parameters(request_uri, {'page': (page - 1)})\n    if paginated_content['next']:\n        next_url = utils.update_query_parameters(request_uri, {'page': (page + 1)})\n    representation['count'] = count\n    representation['previous'] = previous_url\n    representation['next'] = next_url\n    representation['results'] = search_results\n    return representation", "docstring": "Serialize the EnterpriseCustomerCatalog object.\n\nArguments:\ninstance (EnterpriseCustomerCatalog): The EnterpriseCustomerCatalog to serialize.\n\nReturns:\ndict: The EnterpriseCustomerCatalog converted to a dict.", "source": "codesearchnet"}
{"code": "def __init__(self, executable):\n    \n    self.long_name = executable\n    self.name = os.path.basename(executable)  \n    \n    (self.short_name, self.ext) = os.path.splitext(self.name)\n    self.executable = GetRealPath(executable)  \n    self.output = []          \n    self.desc = []            \n    self.modules = {}         \n    self.module_list = []     \n    self.date = time.localtime(time.time())", "docstring": "Create object with executable.\nArgs:\nexecutable  Program to execute (string)", "source": "juraj-google-style"}
{"code": "def IsCppString(line):\n    line = line.replace('\\\\\\\\', 'XX')\n    return ((((line.count('\"') - line.count('\\\\\"')) - line.count('\\'\"\\'')) & 1) == 1)", "docstring": "Does line terminate so, that the next symbol is in string constant.\n\nThis function does not consider single-line nor multi-line comments.\n\nArgs:\nline: is a partial line of code starting from the 0..n.\n\nReturns:\nTrue, if next character appended to 'line' is inside a\nstring constant.", "source": "codesearchnet"}
{"code": "def set_reboot_required_witnessed():\n    errcode = (- 1)\n    dir_path = os.path.dirname(NILRT_REBOOT_WITNESS_PATH)\n    if (not os.path.exists(dir_path)):\n        try:\n            os.makedirs(dir_path)\n        except OSError as ex:\n            raise SaltInvocationError('Error creating {0} (-{1}): {2}'.format(dir_path, ex.errno, ex.strerror))\n        rdict = __salt__['cmd.run_all']('touch {0}'.format(NILRT_REBOOT_WITNESS_PATH))\n        errcode = rdict['retcode']\n    return (errcode == 0)", "docstring": "This function is used to remember that an event indicating that a reboot is\nrequired was witnessed. This function writes to a temporary filesystem so\nthe event gets cleared upon reboot.\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\n.. code-block:: bash\n\nsalt '*' system.set_reboot_required_witnessed", "source": "codesearchnet"}
{"code": "def _parse_signed_int_components(buf):\n    sign_bit = 0\n    value = 0\n    first = True\n    while True:\n        ch = buf.read(1)\n        if (ch == b''):\n            break\n        octet = ord(ch)\n        if first:\n            if (octet & _SIGNED_INT_SIGN_MASK):\n                sign_bit = 1\n            value = (octet & _SIGNED_INT_SIGN_VALUE_MASK)\n            first = False\n        else:\n            value <<= 8\n            value |= octet\n    return (sign_bit, value)", "docstring": "Parses the remainder of a file-like object as a signed magnitude value.\n\nReturns:\nReturns a pair of the sign bit and the unsigned magnitude.", "source": "codesearchnet"}
{"code": "def app(environ, start_response):\n    \n    from wsgi import container\n\n    \n\n    container.bind('Environ', environ)\n\n    \n\n    try:\n        for provider in container.make('WSGIProviders'):\n            container.resolve(provider.boot)\n    except Exception as e:\n        container.make('ExceptionHandler').load_exception(e)\n\n    \n\n    start_response(container.make('Request').get_status_code(),\n                   container.make('Request').get_and_reset_headers())\n\n    \n\n    return iter([bytes(container.make('Response'), 'utf-8')])", "docstring": "The WSGI Application Server.\n\nArguments:\nenviron {dict} -- The WSGI environ dictionary\nstart_response {WSGI callable}\n\nReturns:\nWSGI Response", "source": "juraj-google-style"}
{"code": "def apply_channel_shift(x, intensity, channel_axis=0):\n    x = np.rollaxis(x, channel_axis, 0)\n    min_x, max_x = (np.min(x), np.max(x))\n    channel_images = [np.clip(x_channel + intensity, min_x, max_x) for x_channel in x]\n    x = np.stack(channel_images, axis=0)\n    x = np.rollaxis(x, 0, channel_axis + 1)\n    return x", "docstring": "Performs a channel shift.\n\nDEPRECATED.\n\nArgs:\nx: Input tensor. Must be 3D.\nintensity: Transformation intensity.\nchannel_axis: Index of axis for channels in the input tensor.\n\nReturns:\nNumpy image tensor.", "source": "github-repos"}
{"code": "async def addNodes(self, nodedefs):\n        \n        async with await self.snap() as snap:\n            snap.strict = False\n            async for node in snap.addNodes(nodedefs):\n                yield node", "docstring": "Quickly add/modify a list of nodes from node definition tuples.\nThis API is the simplest/fastest way to add nodes, set node props,\nand add tags to nodes remotely.\n\nArgs:\n\nnodedefs (list): A list of node definition tuples. See below.\n\nA node definition tuple is defined as:\n\n( (form, valu), {'props':{}, 'tags':{})\n\nThe \"props\" or \"tags\" keys may be omitted.", "source": "juraj-google-style"}
{"code": "def __getitem__(self, key: Union[Tuple[int, int],\n                                     Tuple[str, str],\n                                     Tuple[Node, Node]]) -> Optional[Edge]:\n        \n        if isinstance(key[0], Node) and isinstance(key[1], Node):\n            return self.get_edge(key[0], key[1])\n        elif isinstance(key[0], int) and isinstance(key[1], int):\n            return self.get_edge_by_index(key[0], key[1])\n        elif isinstance(key[0], str) and isinstance(key[1], str):\n            return self.get_edge_by_name(key[0], key[1])\n        raise ValueError(\"Invalid edge key: {}\".format(key))", "docstring": "Returns the edge corresponding to the given key.\n\nIf the given key is a tuple of nodes or node indexes, then the edge connecting the two\nnodes will be returned if such an edge exists.\n\nIf the given key is a tuple of node names, then the edge connecting the corresponding\nnodes will be returned if such an edge exists.\n\nArguments:\nkey (Union[Tuple[int, int], Tuple[str, str], Tuple[Node, Node]]): The key identifying the edge to return.", "source": "juraj-google-style"}
{"code": "def epoch_to_human_time(epoch_time):\n    if isinstance(epoch_time, int):\n        try:\n            d = datetime.datetime.fromtimestamp((epoch_time / 1000))\n            return d.strftime('%m-%d-%Y %H:%M:%S ')\n        except ValueError:\n            return None", "docstring": "Converts an epoch timestamp to human readable time.\n\nThis essentially converts an output of get_current_epoch_time to an output\nof get_current_human_time\n\nArgs:\nepoch_time: An integer representing an epoch timestamp in milliseconds.\n\nReturns:\nA time string representing the input time.\nNone if input param is invalid.", "source": "codesearchnet"}
{"code": "def pad(boxes, top, left, height=None, width=None, bounding_box_format='xyxy'):\n    if bounding_box_format != 'xyxy':\n        raise NotImplementedError\n    box_utils = BoundingBox()\n    if backend_utils.in_tf_graph():\n        box_utils.backend.set_backend('tensorflow')\n    outputs = box_utils.pad(boxes, top, left)\n    box_utils.backend.reset()\n    return outputs", "docstring": "Pads bounding boxes by adding top and left offsets.\n\nThis function adds padding to the bounding boxes by increasing the 'top'\nand 'left' coordinates by the specified amounts. The method assume the\ninput bounding_box_format is `xyxy`.\n\nArgs:\nboxes: Bounding boxes to pad. Shape `(N, 4)` or `(batch, N, 4)`.\ntop: Vertical padding to add.\nleft: Horizontal padding to add.\nheight: Image height. Defaults to None.\nwidth: Image width. Defaults to None.\nbounding_box_format: The format of the input bounding boxes. Defaults to\n`\"xyxy\"`.\n\nReturns:\nPadded bounding boxes in the original format.", "source": "github-repos"}
{"code": "def SetPermissions(path, mode=None, uid=None, gid=None, mkdir=False):\n    if (mkdir and (not os.path.exists(path))):\n        os.mkdir(path, (mode or 511))\n    elif mode:\n        os.chmod(path, mode)\n    if (uid and gid):\n        os.chown(path, uid, gid)\n    _SetSELinuxContext(path)", "docstring": "Set the permissions and ownership of a path.\n\nArgs:\npath: string, the path for which owner ID and group ID needs to be setup.\nmode: octal string, the permissions to set on the path.\nuid: int, the owner ID to be set for the path.\ngid: int, the group ID to be set for the path.\nmkdir: bool, True if the directory needs to be created.", "source": "codesearchnet"}
{"code": "def __closely_associated_score(self, normalized_sentences, top_n_words):\n    scores_list = []\n    sentence_idx = (- 1)\n    for sentence in normalized_sentences:\n        self.tokenize(sentence)\n        sentence = self.token\n        sentence_idx += 1\n        word_idx = []\n        for w in top_n_words:\n            try:\n                word_idx.append(sentence.index(w))\n            except ValueError:\n                pass\n        word_idx.sort()\n        if (len(word_idx) == 0):\n            continue\n        clusters = []\n        cluster = [word_idx[0]]\n        i = 1\n        while (i < len(word_idx)):\n            if ((word_idx[i] - word_idx[(i - 1)]) < self.cluster_threshold):\n                cluster.append(word_idx[i])\n            else:\n                clusters.append(cluster[:])\n                cluster = [word_idx[i]]\n            i += 1\n        clusters.append(cluster)\n        max_cluster_score = 0\n        for c in clusters:\n            significant_words_in_cluster = len(c)\n            total_words_in_cluster = ((c[(- 1)] - c[0]) + 1)\n            score = (((1.0 * significant_words_in_cluster) * significant_words_in_cluster) / total_words_in_cluster)\n            if (score > max_cluster_score):\n                max_cluster_score = score\n        scores_list.append((sentence_idx, score))\n    return scores_list", "docstring": "Scoring the sentence with closely associations.\n\nArgs:\nnormalized_sentences:   The list of sentences.\ntop_n_words:            Important sentences.\n\nReturns:\nThe list of scores.", "source": "codesearchnet"}
{"code": "def quality(self, tests, alias=None):\n        \n        \n\n        \n        alias = alias or {}\n        alias = alias.get('striplog', alias.get('Striplog', []))\n\n        \n        \n        \n        \n        \n        this_tests =\\\n            tests.get('all', [])+tests.get('All', [])+tests.get('ALL', [])\\\n            + tests.get('striplog', tests.get('Striplog', []))\\\n            + utils.flatten_list([tests.get(a) for a in alias])\n        this_tests = filter(None, this_tests)\n\n        \n        \n        if not tests.get('striplog', tests.get('Striplog', 1)):\n            this_tests = []\n\n        return {test.__name__: test(self) for test in this_tests}", "docstring": "Run a series of tests and return the corresponding results.\n\nBased on curve testing for ``welly``.\n\nArgs:\ntests (list): a list of functions.\n\nReturns:\nlist. The results. Stick to booleans (True = pass) or ints.", "source": "juraj-google-style"}
{"code": "def pre_finalize(self, init_result, writer_results):\n    raise NotImplementedError", "docstring": "Pre-finalization stage for sink.\n\nCalled after all bundle writes are complete and before finalize_write.\nUsed to setup and verify filesystem and sink states.\n\nArgs:\ninit_result: the result of ``initialize_write()`` invocation.\nwriter_results: an iterable containing results of ``Writer.close()``\ninvocations. This will only contain results of successful writes, and\nwill only contain the result of a single successful write for a given\nbundle.\n\nReturns:\nAn object that contains any sink specific state generated.\nThis object will be passed to finalize_write().", "source": "github-repos"}
{"code": "def _GetInstanceAndProjectAttributes(self, metadata_dict):\n    \n    metadata_dict = metadata_dict or {}\n\n    try:\n      instance_data = metadata_dict['instance']['attributes']\n    except KeyError:\n      instance_data = {}\n      self.logger.warning('Instance attributes were not found.')\n\n    try:\n      project_data = metadata_dict['project']['attributes']\n    except KeyError:\n      project_data = {}\n      self.logger.warning('Project attributes were not found.')\n\n    return instance_data, project_data", "docstring": "Get dictionaries for instance and project attributes.\n\nArgs:\nmetadata_dict: json, the deserialized contents of the metadata server.\n\nReturns:\ntuple, two dictionaries for instance and project attributes.", "source": "juraj-google-style"}
{"code": "def _process_has_substring_filter_directive(filter_operation_info, location, context, parameters):\n    filtered_field_type = filter_operation_info.field_type\n    filtered_field_name = filter_operation_info.field_name\n    if (not strip_non_null_from_type(filtered_field_type).is_same_type(GraphQLString)):\n        raise GraphQLCompilationError(u'Cannot apply \"has_substring\" to non-string type {}'.format(filtered_field_type))\n    argument_inferred_type = GraphQLString\n    (argument_expression, non_existence_expression) = _represent_argument(location, context, parameters[0], argument_inferred_type)\n    filter_predicate = expressions.BinaryComposition(u'has_substring', expressions.LocalField(filtered_field_name), argument_expression)\n    if (non_existence_expression is not None):\n        filter_predicate = expressions.BinaryComposition(u'||', non_existence_expression, filter_predicate)\n    return blocks.Filter(filter_predicate)", "docstring": "Return a Filter basic block that checks if the directive arg is a substring of the field.\n\nArgs:\nfilter_operation_info: FilterOperationInfo object, containing the directive and field info\nof the field where the filter is to be applied.\nlocation: Location where this filter is used.\ncontext: dict, various per-compilation data (e.g. declared tags, whether the current block\nis optional, etc.). May be mutated in-place in this function!\nparameters: list of 1 element, specifying the collection in which the value must exist;\nif the collection is optional and missing, the check will return True\n\nReturns:\na Filter basic block that performs the substring check", "source": "codesearchnet"}
{"code": "def load_ipython_extension(shell):\n  \n\n  \n\n  def _request(self, uri, method=\"GET\", body=None, headers=None,\n               redirections=_httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):\n    if headers is None:\n      headers = {}\n    headers['user-agent'] = 'GoogleCloudDataLab/1.0'\n    return _orig_request(self, uri, method=method, body=body, headers=headers,\n                         redirections=redirections, connection_type=connection_type)\n\n  _httplib2.Http.request = _request\n\n  \n\n  def _init_session(self):\n    _orig_init(self)\n    self.headers['User-Agent'] = 'GoogleCloudDataLab/1.0'\n\n  _requests.Session.__init__ = _init_session\n\n  \n  \n  \n  \n\n  def _run_line_magic(self, magic_name, line):\n    fn = self.find_line_magic(magic_name)\n    if fn is None:\n      cm = self.find_cell_magic(magic_name)\n      if cm:\n        return _run_cell_magic(self, magic_name, line, None)\n    return _orig_run_line_magic(self, magic_name, line)\n\n  def _run_cell_magic(self, magic_name, line, cell):\n    if cell is None or len(cell) == 0 or cell.isspace():\n      fn = self.find_line_magic(magic_name)\n      if fn:\n        return _orig_run_line_magic(self, magic_name, line)\n      \n      cell = None\n    return _orig_run_cell_magic(self, magic_name, line, cell)\n\n  _shell.InteractiveShell.run_cell_magic = _run_cell_magic\n  _shell.InteractiveShell.run_line_magic = _run_line_magic\n\n  \n  \n  \n\n  def _get_project_id():\n    try:\n      return google.datalab.Context.default().project_id\n    except Exception:\n      return None\n\n  def _set_project_id(project_id):\n    context = google.datalab.Context.default()\n    context.set_project_id(project_id)\n    try:\n      from datalab.context import Context as _old_context\n      _old_context.default().set_project_id(project_id)\n    except ImportError:\n      \n      pass\n\n  try:\n    if 'datalab_project_id' not in _IPython.get_ipython().user_ns:\n      _IPython.get_ipython().user_ns['datalab_project_id'] = _get_project_id\n      _IPython.get_ipython().user_ns['set_datalab_project_id'] = _set_project_id\n  except TypeError:\n    pass", "docstring": "Called when the extension is loaded.\n\nArgs:\nshell - (NotebookWebApplication): handle to the Notebook interactive shell instance.", "source": "juraj-google-style"}
{"code": "def copy_numbered_block(self):\n    raw_block = self.copy_raw_block()\n    raw_block.insert(0, range(self.start[1], self.end[1]))\n    return raw_block", "docstring": "Copies the block as it was originally specified by start and end into a new table.\nAdditionally inserts the original table indices in the first row of the block.\n\nReturns:\nA copy of the block with no block transformations.", "source": "codesearchnet"}
{"code": "def run_step(context):\n    logger.debug('started')\n    context.assert_child_key_has_value('fileWriteJson', 'path', __name__)\n    out_path = context.get_formatted_string(context['fileWriteJson']['path'])\n    is_payload_specified = ('payload' in context['fileWriteJson'])\n    logger.debug(f'opening destination file for writing: {out_path}')\n    os.makedirs(os.path.abspath(os.path.dirname(out_path)), exist_ok=True)\n    with open(out_path, 'w') as outfile:\n        if is_payload_specified:\n            payload = context['fileWriteJson']['payload']\n            formatted_iterable = context.get_formatted_iterable(payload)\n        else:\n            formatted_iterable = context.get_formatted_iterable(context)\n        json.dump(formatted_iterable, outfile, indent=2, ensure_ascii=False)\n    logger.info(f'formatted context content and wrote to {out_path}')\n    logger.debug('done')", "docstring": "Write payload out to json file.\n\nArgs:\ncontext: pypyr.context.Context. Mandatory.\nThe following context keys expected:\n- fileWriteJson\n- path. mandatory. path-like. Write output file to\nhere. Will create directories in path for you.\n- payload. optional. Write this key to output file. If not\nspecified, output entire context.\n\nReturns:\nNone.\n\nRaises:\npypyr.errors.KeyNotInContextError: fileWriteJson or\nfileWriteJson['path'] missing in context.\npypyr.errors.KeyInContextHasNoValueError: fileWriteJson or\nfileWriteJson['path'] exists but is None.", "source": "codesearchnet"}
{"code": "def convert_predict_response(pred, serving_bundle):\n    output = pred.outputs[serving_bundle.predict_output_tensor]\n    raw_output = output.float_val\n    if (serving_bundle.model_type == 'classification'):\n        values = []\n        for example_index in range(output.tensor_shape.dim[0].size):\n            start = (example_index * output.tensor_shape.dim[1].size)\n            values.append(raw_output[start:(start + output.tensor_shape.dim[1].size)])\n    else:\n        values = raw_output\n    return convert_prediction_values(values, serving_bundle, pred.model_spec)", "docstring": "Converts a PredictResponse to ClassificationResponse or RegressionResponse.\n\nArgs:\npred: PredictResponse to convert.\nserving_bundle: A `ServingBundle` object that contains the information about\nthe serving request that the response was generated by.\n\nReturns:\nA ClassificationResponse or RegressionResponse.", "source": "codesearchnet"}
{"code": "def update(self,identity,params=None, headers=None):\n        \n        path = self._sub_url_params('/payments/:identity', {\n          \n            'identity': identity,\n          })\n        \n        if params is not None:\n            params = {self._envelope_key(): params}\n\n        response = self._perform_request('PUT', path, params, headers,\n                                         retry_failures=True)\n        return self._resource_for(response)", "docstring": "Update a payment.\n\nUpdates a payment object. This accepts only the metadata parameter.\n\nArgs:\nidentity (string): Unique identifier, beginning with \"PM\".\nparams (dict, optional): Request body.\n\nReturns:\nListResponse of Payment instances", "source": "juraj-google-style"}
{"code": "def as_text(content: ProcessorContentTypes, *, strict: bool=False, substream_name: str | None=None) -> str:\n    text_parts = []\n    for mime, part in ProcessorContent(content).items():\n        if substream_name is not None and part.substream_name != substream_name:\n            continue\n        if is_text(mime):\n            text_parts.append(part.text)\n        elif strict:\n            raise ValueError(f'Unsupported content type {mime}.')\n    return ''.join(text_parts)", "docstring": "Returns a text representation of the content.\n\nThe returned text is a concatenation of all text parts in the content.\n\nArgs:\ncontent: The content to process. This can be of various types as defined by\n`ProcessorContentTypes`.\nstrict: If True, unsupported content types will raise a ValueError.\nOtherwise, they will be ignored.\nsubstream_name: If set, only text parts with the given substream name will\nbe returned.", "source": "github-repos"}
{"code": "def check_num(self, checks, radl):\n    prefixes = {}\n    for f in self.features:\n        if (not isinstance(f, Feature)):\n            continue\n        (prefix, sep, tail) = f.prop.partition('.')\n        if ((not sep) or (prefix not in checks)):\n            continue\n        checks0 = checks[prefix]\n        (num, sep, suffix) = tail.partition('.')\n        try:\n            num = int(num)\n        except:\n            raise RADLParseException('Invalid property name; expected an index.', line=f.line)\n        if ((not sep) or (suffix not in checks0)):\n            continue\n        f._check(checks0[suffix], radl)\n        if (prefix not in prefixes):\n            prefixes[prefix] = set()\n        prefixes[prefix].add(num)\n    for (prefix, nums) in prefixes.items():\n        if ((min(nums) != 0) or (max(nums) != (len(nums) - 1))):\n            raise RADLParseException((\"Invalid indices values in properties '%s'\" % prefix))\n    return prefixes", "docstring": "Check types, operators and units in features with numbers.\n\nArgs:\n\n- checks(dict of dict of str:tuples): keys are property name prefixes, and the\nvalues are dict with keys are property name suffixes and values are iterable\nas in ``_check_feature``.\n- radl: passed to ``_check_feature``.", "source": "codesearchnet"}
{"code": "def _to_backend_mesh(device_mesh):\n    shape = device_mesh.devices.shape\n    devices = [_to_backend_device(d) for d in device_mesh.devices.flatten()]\n    devices = np.array(devices).reshape(shape)\n    return jax.sharding.Mesh(devices, device_mesh.axis_names)", "docstring": "Convert the DeviceMesh to JAX backend specific Mesh.\n\nArgs:\ndevice_mesh: DeviceMesh instance to convert.\n\nReturns:\nA `jax.sharding.Mesh` instance.", "source": "github-repos"}
{"code": "def convert_predict_response(pred, serving_bundle):\n  \n  output = pred.outputs[serving_bundle.predict_output_tensor]\n  raw_output = output.float_val\n  if serving_bundle.model_type == 'classification':\n    values = []\n    for example_index in range(output.tensor_shape.dim[0].size):\n      start = example_index * output.tensor_shape.dim[1].size\n      values.append(raw_output[start:start + output.tensor_shape.dim[1].size])\n  else:\n    values = raw_output\n  return convert_prediction_values(values, serving_bundle, pred.model_spec)", "docstring": "Converts a PredictResponse to ClassificationResponse or RegressionResponse.\n\nArgs:\npred: PredictResponse to convert.\nserving_bundle: A `ServingBundle` object that contains the information about\nthe serving request that the response was generated by.\n\nReturns:\nA ClassificationResponse or RegressionResponse.", "source": "juraj-google-style"}
{"code": "def append(parent: ScheduleComponent, child: ScheduleComponent,\n           name: str = None) -> Schedule:\n    r\n    common_channels = set(parent.channels) & set(child.channels)\n    insertion_time = parent.ch_stop_time(*common_channels)\n    return insert(parent, insertion_time, child, name=name)", "docstring": "r\"\"\"Return a new schedule with by appending `child` to `parent` at\nthe last time of the `parent` schedule's channels\nover the intersection of the parent and child schedule's channels.\n\n$t = \\textrm{max}({x.stop\\_time |x \\in parent.channels \\cap child.channels})$\n\nArgs:\nparent: The schedule to be inserted into\nchild: The schedule to insert\nname: Name of the new schedule. Defaults to name of parent", "source": "juraj-google-style"}
{"code": "def __init__(self, filename, ionicstep_start=1,\n                 ionicstep_end=None, comment=None):\n        \n        preamble = None\n        coords_str = []\n        structures = []\n        preamble_done = False\n        if (ionicstep_start < 1):\n            raise Exception('Start ionic step cannot be less than 1')\n        if (ionicstep_end is not None and\n                ionicstep_start < 1):\n            raise Exception('End ionic step cannot be less than 1')\n\n        ionicstep_cnt = 1\n        with zopen(filename, \"rt\") as f:\n            for l in f:\n                l = l.strip()\n                if preamble is None:\n                    preamble = [l]\n                elif not preamble_done:\n                    if l == \"\" or \"Direct configuration=\" in l:\n                        preamble_done = True\n                        tmp_preamble = [preamble[0]]\n                        for i in range(1, len(preamble)):\n                            if preamble[0] != preamble[i]:\n                                tmp_preamble.append(preamble[i])\n                            else:\n                                break\n                        preamble = tmp_preamble\n                    else:\n                        preamble.append(l)\n                elif l == \"\" or \"Direct configuration=\" in l:\n                    p = Poscar.from_string(\"\\n\".join(preamble +\n                                                     [\"Direct\"] + coords_str))\n                    if ionicstep_end is None:\n                        if (ionicstep_cnt >= ionicstep_start):\n                            structures.append(p.structure)\n                    else:\n                        if ionicstep_start <= ionicstep_cnt < ionicstep_end:\n                            structures.append(p.structure)\n                    ionicstep_cnt += 1\n                    coords_str = []\n                else:\n                    coords_str.append(l)\n            p = Poscar.from_string(\"\\n\".join(preamble +\n                                             [\"Direct\"] + coords_str))\n            if ionicstep_end is None:\n                if ionicstep_cnt >= ionicstep_start:\n                    structures.append(p.structure)\n            else:\n                if ionicstep_start <= ionicstep_cnt < ionicstep_end:\n                    structures.append(p.structure)\n        self.structures = structures\n        self.comment = comment or self.structures[0].formula", "docstring": "Init a Xdatcar.\n\nArgs:\nfilename (str): Filename of input XDATCAR file.\nionicstep_start (int): Starting number of ionic step.\nionicstep_end (int): Ending number of ionic step.", "source": "juraj-google-style"}
{"code": "def monkhorst(cls, ngkpt, shiftk=(0.5, 0.5, 0.5), chksymbreak=None, use_symmetries=True,\n                  use_time_reversal=True, comment=None):\n        \n        return cls(\n            kpts=[ngkpt], kpt_shifts=shiftk,\n            use_symmetries=use_symmetries, use_time_reversal=use_time_reversal, chksymbreak=chksymbreak,\n            comment=comment if comment else \"Monkhorst-Pack scheme with user-specified shiftk\")", "docstring": "Convenient static constructor for a Monkhorst-Pack mesh.\n\nArgs:\nngkpt: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors.\nshiftk: Shift to be applied to the kpoints.\nuse_symmetries: Use spatial symmetries to reduce the number of k-points.\nuse_time_reversal: Use time-reversal symmetry to reduce the number of k-points.\n\nReturns:\n:class:`KSampling` object.", "source": "juraj-google-style"}
{"code": "def backup(filenames, prefix=\"error\"):\n    \n    num = max([0] + [int(f.split(\".\")[1])\n                     for f in glob(\"{}.*.tar.gz\".format(prefix))])\n    filename = \"{}.{}.tar.gz\".format(prefix, num + 1)\n    logging.info(\"Backing up run to {}.\".format(filename))\n    with tarfile.open(filename, \"w:gz\") as tar:\n        for fname in filenames:\n            for f in glob(fname):\n                tar.add(f)", "docstring": "Backup files to a tar.gz file. Used, for example, in backing up the\nfiles of an errored run before performing corrections.\n\nArgs:\nfilenames ([str]): List of files to backup. Supports wildcards, e.g.,\n*.*.\nprefix (str): prefix to the files. Defaults to error, which means a\nseries of error.1.tar.gz, error.2.tar.gz, ... will be generated.", "source": "juraj-google-style"}
{"code": "def output_refs(self, transitive: bool=True) -> List['SymbolReference']:\n    parent_func = self.parent_func()\n    references: List[SymbolReference] = []\n    if parent_func is not None:\n        output_vars = self.output_vars()\n\n        def find_references(code: Code):\n            refs = []\n\n            def identify_reference(k, v, p):\n                del k, p\n                if isinstance(v, SymbolReference):\n                    if v.name in output_vars:\n                        refs.append(v)\n            pg.traverse(code, identify_reference)\n            return refs\n        for line in self.succeeding_lines():\n            ins_refs = find_references(line)\n            references.extend(ins_refs)\n            new_assigned = line.output_vars()\n            if ins_refs and transitive:\n                output_vars.update(new_assigned)\n            else:\n                output_vars -= new_assigned\n    return references", "docstring": "Returns the references to the symbols that this code outputs.\n\nArgs:\ntransitive: If True, transitive symbol references will be included.\nOtherwise, only the direct dependencies will be included.\n\nReturns:\nA list of ``Var` or ``FunctionCall`` in their definition order that\nconsume the outputs of current instruction. Users can use\n:meth:`parent_instruction` or :meth:`line` to get their context.", "source": "github-repos"}
{"code": "def parameterize(self, country: Optional[str]='South Sudan', state: Optional[str]=None, year: Optional[int]=None, month: Optional[int]=None, unit: Optional[str]=None, fallback_aggaxes: List[str]=['year', 'month'], aggfunc: Callable=np.mean):\n    valid_axes = ('country', 'state', 'year', 'month')\n    if any(map((lambda axis: (axis not in valid_axes)), fallback_aggaxes)):\n        raise ValueError(f'All elements of the fallback_aggaxes set must be one of the following: {valid_axes}')\n    for n in self.nodes(data=True):\n        for indicator in n[1]['indicators'].values():\n            (indicator.mean, indicator.unit) = get_indicator_value(indicator, country, state, year, month, unit, fallback_aggaxes, aggfunc)\n            indicator.stdev = (0.1 * abs(indicator.mean))", "docstring": "Parameterize the analysis graph.\n\nArgs:\ncountry\nyear\nmonth\nfallback_aggaxes:\nAn iterable of strings denoting the axes upon which to perform\nfallback aggregation if the desired constraints cannot be met.\naggfunc: The function that will be called to perform the\naggregation if there are multiple matches.", "source": "codesearchnet"}
{"code": "def set_available(self, show=None):\n    show = (self.state.show if (show is None) else show)\n    self.set_presence(PresenceState(available=True, show=show))", "docstring": "Sets the agent availability to True.\n\nArgs:\nshow (aioxmpp.PresenceShow, optional): the show state of the presence (Default value = None)", "source": "codesearchnet"}
{"code": "def block_reducible(cm, nodes1, nodes2):\n    \n    \n    if not nodes1 or not nodes2:\n        return True\n\n    cm = cm[np.ix_(nodes1, nodes2)]\n\n    \n    if not cm.sum(0).all() or not cm.sum(1).all():\n        return True\n    if len(nodes1) > 1 and len(nodes2) > 1:\n        return block_cm(cm)\n    return False", "docstring": "Return whether connections from ``nodes1`` to ``nodes2`` are reducible.\n\nArgs:\ncm (np.ndarray): The network's connectivity matrix.\nnodes1 (tuple[int]): Source nodes\nnodes2 (tuple[int]): Sink nodes", "source": "juraj-google-style"}
{"code": "def write(self, __text: str) -> None:\n    if (__text == os.linesep):\n        self.handle.write(__text)\n    else:\n        frame = inspect.currentframe()\n        if (frame is None):\n            filename = 'unknown'\n            lineno = 0\n        else:\n            outer = frame.f_back\n            filename = outer.f_code.co_filename.split(os.sep)[(- 1)]\n            lineno = outer.f_lineno\n        self.handle.write('[{:>15s}:{:03d}] {}'.format(filename[(- 15):], lineno, __text))", "docstring": "Write text to the debug stream.\n\nArgs:\n__text: Text to write", "source": "codesearchnet"}
{"code": "def exclude(self, scheduled_operation: ScheduledOperation) -> bool:\n        \n        try:\n            self.scheduled_operations.remove(scheduled_operation)\n            return True\n        except ValueError:\n            return False", "docstring": "Omits a scheduled operation from the schedule, if present.\n\nArgs:\nscheduled_operation: The operation to try to remove.\n\nReturns:\nTrue if the operation was present and is now removed, False if it\nwas already not present.", "source": "juraj-google-style"}
{"code": "def filter_segs(self, segs):\n\n    def whole_seg(seg):\n        m = self.seg_regex.match(seg)\n        if (m and (m.group(0) == seg)):\n            return True\n        else:\n            return False\n    return list(filter(whole_seg, segs))", "docstring": "Given list of strings, return only those which are valid segments.\n\nArgs:\nsegs (list): list of unicode values\n\nReturns:\nlist: values in `segs` that are valid segments (according to the\ndefinititions of bases and diacritics/modifiers known to the\nobject", "source": "codesearchnet"}
{"code": "def on_message(self, fragment):\n    try:\n        message = (yield self._receive(fragment))\n    except Exception as e:\n        log.error('Unhandled exception receiving a message: %r: %r', e, fragment, exc_info=True)\n        self._internal_error('server failed to parse a message')\n    try:\n        if message:\n            if (_message_test_port is not None):\n                _message_test_port.received.append(message)\n            work = (yield self._handle(message))\n            if work:\n                (yield self._schedule(work))\n    except Exception as e:\n        log.error('Handler or its work threw an exception: %r: %r', e, message, exc_info=True)\n        self._internal_error('server failed to handle a message')\n    raise gen.Return(None)", "docstring": "Process an individual wire protocol fragment.\n\nThe websocket RFC specifies opcodes for distinguishing text frames\nfrom binary frames. Tornado passes us either a text or binary string\ndepending on that opcode, we have to look at the type of the fragment\nto see what we got.\n\nArgs:\nfragment (unicode or bytes) : wire fragment to process", "source": "codesearchnet"}
{"code": "def _get_string_match(self, key):\n        \n\n        expression = r'(?:\\s*)'.join([\n            '^',\n            'define',\n            r'\\(',\n            '\\'{}\\''.format(key),\n            ',',\n            r'\\'(.*)\\'',\n            r'\\)',\n            ';'\n        ])\n\n        pattern = re.compile(expression, re.MULTILINE)\n        return pattern.search(self._content)", "docstring": "Gets a MatchObject for the given key, assuming a string value.\n\nArgs:\nkey (str): Key of the property to look-up.\n\nReturn:\nMatchObject: The discovered match.", "source": "juraj-google-style"}
{"code": "def _verify_request(self, signature_chain_url: str, signature: str, request_body: bytes) -> bool:\n    if (signature_chain_url not in self.valid_certificates.keys()):\n        amazon_cert: X509 = verify_cert(signature_chain_url)\n        if amazon_cert:\n            amazon_cert_lifetime: timedelta = self.config['amazon_cert_lifetime']\n            expiration_timestamp = (datetime.utcnow() + amazon_cert_lifetime)\n            validated_cert = ValidatedCert(cert=amazon_cert, expiration_timestamp=expiration_timestamp)\n            self.valid_certificates[signature_chain_url] = validated_cert\n            log.info(f'Certificate {signature_chain_url} validated')\n        else:\n            log.error(f'Certificate {signature_chain_url} validation failed')\n            return False\n    else:\n        validated_cert: ValidatedCert = self.valid_certificates[signature_chain_url]\n        amazon_cert: X509 = validated_cert.cert\n    if verify_signature(amazon_cert, signature, request_body):\n        result = True\n    else:\n        log.error(f\"Failed signature verification for request: {request_body.decode('utf-8', 'replace')}\")\n        result = False\n    return result", "docstring": "Conducts series of Alexa request verifications against Amazon Alexa requirements.\n\nArgs:\nsignature_chain_url: Signature certificate URL from SignatureCertChainUrl HTTP header.\nsignature: Base64 decoded Alexa request signature from Signature HTTP header.\nrequest_body: full HTTPS request body\nReturns:\nresult: True if verification was successful, False if not.", "source": "codesearchnet"}
{"code": "def __init__(self, credentials=None):\n        \n        super(Authentication, self).__init__(enums.Tags.AUTHENTICATION)\n\n        self._credentials = []\n        self.credentials = credentials", "docstring": "Construct an Authentication struct.\n\nArgs:\ncredentials (list): A list of Credential structs to be used for\nauthentication. Optional, defaults to None.", "source": "juraj-google-style"}
{"code": "def view_packgets_list(self, option: str='-e', keyword: str='') -> list:\n    if (option not in ['-f', '-d', '-e', '-s', '-3', '-i', '-u']):\n        raise ValueError(f'There is no option called {option!r}.')\n    (output, _) = self._execute('-s', self.device_sn, 'shell', 'pm', 'list', 'packages', option, keyword)\n    return list(map((lambda x: x[8:]), output.splitlines()))", "docstring": "Show all packages.\n\nArgs:\noption:\n-f see their associated file\n-d filter to only show disabled packages\n-e filter to only show enabled packages\n-s filter to only show system packages\n-3 filter to only show third party packages\n-i see the installer for the packages\n-u also include uninstalled packages\n-keyword: optionally only those whose name contains the text in keyword", "source": "codesearchnet"}
{"code": "def _generate_flush_cache_op(self, num_replicas, on_tpu, tensor_trace_order, graph):\n\n    def _flush_fun(cache, replica_id, step_num):\n        \n\n        def _f(file_index):\n            \n\n            def _print_cache():\n                \n                replica_str = '%d' % file_index\n                if self._parameters.trace_dir:\n                    output_path = os.path.join(self._parameters.trace_dir, _COMPACT_TRACE_FILE_PREFIX) + replica_str + self._get_outfile_suffix()\n                    output_stream = _OUTPUT_STREAM_ESCAPE + output_path\n                else:\n                    output_stream = sys.stderr\n                new_step_line = _REPLICA_ID_TAG + replica_str\n                print_ops = []\n                if self._parameters.inspect_trace:\n                    if self._num_signature_dimensions() > 1:\n                        raise ValueError('Inspecting multi signatures are not supported.')\n                    if self._parameters.trace_mode in tensor_tracer_flags.TRACE_MODE_HISTORY:\n                        print_ops.append(self._inspect_history_cache(cache=cache, replica_id=replica_id, step_num=step_num, tensor_trace_order=tensor_trace_order))\n                    else:\n                        print_ops.append(self._inspect_summary_cache(cache=cache, replica_id=replica_id, step_num=step_num, output_stream=output_stream, tensor_trace_order=tensor_trace_order))\n                else:\n                    for i in range(self._num_signature_dimensions()):\n                        print_ops.append(logging_ops.print_v2(new_step_line, '\\n', cache[:, i], '\\n', summarize=-1, output_stream=output_stream))\n                with ops.control_dependencies(print_ops):\n                    return constant_op.constant(0).op\n            return _print_cache\n\n        def _eq(file_index):\n            return math_ops.equal(replica_id, file_index)\n        flush_op_cases = {}\n        flush_op_cases[_eq(0)] = _f(0)\n        for i in range(1, num_replicas):\n            if on_tpu and (not self._parameters.collect_summary_per_core):\n                flush_op_cases[_eq(i)] = control_flow_ops.no_op\n            else:\n                flush_op_cases[_eq(i)] = _f(i)\n        return control_flow_case.case(flush_op_cases, exclusive=True)\n    cache = self._create_or_get_tensor_values_cache(_TT_SUMMARY_TAG, graph)\n    if self._use_temp_cache():\n        cache_val = cache\n    else:\n        cache_val = cache.value()\n    if on_tpu:\n        if not self._parameters.collect_summary_per_core:\n            cache_val = self.merge_caches_on_tpu(cache_val)\n            cache_val = self.aggregate_global_cache(cache_val)[0]\n        flush_op = tpu_replication.outside_compilation(_flush_fun, cache_val, self._replica_id, array_ops.identity(training_util.get_or_create_global_step()))\n    else:\n        global_step = training_util.get_or_create_global_step()\n        flush_op = _flush_fun(cache_val, self._replica_id, global_step)\n    if self._use_temp_cache():\n        with ops.control_dependencies([flush_op]):\n            return constant_op.constant(0).op\n    else:\n        with ops.control_dependencies([flush_op]):\n            reset_value = constant_op.constant(_COMPACT_TRACE_ENTRY_INIT_VALUE, dtype=cache.dtype, shape=cache.shape)\n            assign_op = state_ops.assign(cache, reset_value).op\n            with ops.control_dependencies([assign_op]):\n                return constant_op.constant(0).op", "docstring": "Generates an Op that will flush the cache to file.\n\nArgs:\nnum_replicas: total number of replicas.\non_tpu: if the graph is executed on TPU.\ntensor_trace_order: TensorTraceOrder object holding tensorname to id map.\ngraph: TensorFlow graph.\n\nReturns:\nThe Op to flush the cache to file.", "source": "github-repos"}
{"code": "def send_course_refund_email(self, email, refund_id, amount, course_name, order_number, order_url, site_code=None):\n    config = get_sailthru_configuration(site_code)\n    try:\n        sailthru_client = get_sailthru_client(site_code)\n    except SailthruError:\n        return\n    email_vars = {'amount': amount, 'course_name': course_name, 'order_number': order_number, 'order_url': order_url}\n    try:\n        response = sailthru_client.send(template=config['templates']['course_refund'], email=email, _vars=email_vars)\n    except SailthruClientError:\n        logger.exception('A client error occurred while attempting to send a course refund notification for refund [%d].', refund_id)\n        return\n    if response.is_ok():\n        logger.info('Course refund notification sent for refund %d.', refund_id)\n    else:\n        error = response.get_error()\n        logger.error('An error occurred while attempting to send a course refund notification for refund [%d]: %d - %s', refund_id, error.get_error_code(), error.get_message())\n        if can_retry_sailthru_request(error):\n            logger.info('An attempt will be made again to send a course refund notification for refund [%d].', refund_id)\n            schedule_retry(self, config)\n        else:\n            logger.warning('No further attempts will be made to send a course refund notification for refund [%d].', refund_id)", "docstring": "Sends the course refund email.\n\nArgs:\nself: Ignore.\nemail (str): Recipient's email address.\nrefund_id (int): ID of the refund that initiated this task.\namount (str): Formatted amount of the refund.\ncourse_name (str): Name of the course for which payment was refunded.\norder_number (str): Order number of the order that was refunded.\norder_url (str): Receipt URL of the refunded order.\nsite_code (str): Identifier of the site sending the email.", "source": "codesearchnet"}
{"code": "def is_compatible_with(self, other):\n    other = as_dimension(other)\n    return self._value is None or other.value is None or self._value == other.value", "docstring": "Returns true if `other` is compatible with this Dimension.\n\nTwo known Dimensions are compatible if they have the same value.\nAn unknown Dimension is compatible with all other Dimensions.\n\nArgs:\nother: Another Dimension.\n\nReturns:\nTrue if this Dimension and `other` are compatible.", "source": "github-repos"}
{"code": "def get_tensor_shape(self, tensor_name):\n    tensor = self._name_to_tensor(tensor_name)\n    if isinstance(tensor, mtf.Tensor):\n        return tf.TensorShape(tensor.shape.to_integer_list)\n    else:\n        return tensor.shape", "docstring": "The tf.TensorShape of a tensor.\n\nArgs:\ntensor_name: string, the name of a tensor in the graph.\n\nReturns:\na tf.TensorShape", "source": "codesearchnet"}
{"code": "def set_tensor(self, tensor_index, value):\n    self._interpreter.SetTensor(tensor_index, value)", "docstring": "Sets the value of the input tensor.\n\nNote this copies data in `value`.\n\nIf you want to avoid copying, you can use the `tensor()` function to get a\nnumpy buffer pointing to the input buffer in the tflite interpreter.\n\nArgs:\ntensor_index: Tensor index of tensor to set. This value can be gotten from\nthe 'index' field in get_input_details.\nvalue: Value of tensor to set.\n\nRaises:\nValueError: If the interpreter could not set the tensor.", "source": "github-repos"}
{"code": "def Check(self, error, filename, linenum):\n    \n    if Match(r'T(EST|est)', self.current_function):\n      base_trigger = self._TEST_TRIGGER\n    else:\n      base_trigger = self._NORMAL_TRIGGER\n    trigger = base_trigger * 2**_VerboseLevel()\n\n    if self.lines_in_function > trigger:\n      error_level = int(math.log(self.lines_in_function / base_trigger, 2))\n      \n      if error_level > 5:\n        error_level = 5\n      error(filename, linenum, 'readability/fn_size', error_level,\n            'Small and focused functions are preferred:'\n            ' %s has %d non-comment lines'\n            ' (error triggered by exceeding %d lines).'  % (\n                self.current_function, self.lines_in_function, trigger))", "docstring": "Report if too many lines in function body.\n\nArgs:\nerror: The function to call with any errors found.\nfilename: The name of the current file.\nlinenum: The number of the line to check.", "source": "juraj-google-style"}
{"code": "def __is_bound_method(method):\n    \n    if not(hasattr(method, \"__func__\") and hasattr(method, \"__self__\")):\n        return False\n\n    \n    return six.get_method_self(method) is not None", "docstring": "Return ``True`` if the `method` is a bound method (attached to an class\ninstance.\n\nArgs:\nmethod: A method or function type object.", "source": "juraj-google-style"}
{"code": "def __init__(self, input_reader=None, output_writer=None):\n    \n    super(StorageMediaTool, self).__init__(\n        input_reader=input_reader, output_writer=output_writer)\n    self._custom_artifacts_path = None\n    self._artifact_definitions_path = None\n    self._artifact_filters = None\n    self._credentials = []\n    self._credential_configurations = []\n    self._filter_file = None\n    self._partitions = None\n    self._process_vss = False\n    self._source_scanner = source_scanner.SourceScanner()\n    self._source_path = None\n    self._source_path_specs = []\n    self._textwrapper = textwrap.TextWrapper()\n    self._user_selected_vss_stores = False\n    self._volumes = None\n    self._vss_only = False\n    self._vss_stores = None", "docstring": "Initializes the CLI tool object.\n\nArgs:\ninput_reader (Optional[InputReader]): input reader, where None indicates\nthat the stdin input reader should be used.\noutput_writer (Optional[OutputWriter]): output writer, where None\nindicates that the stdout output writer should be used.", "source": "juraj-google-style"}
{"code": "def get_shifted_center_blocks(x, indices):\n    center_x = gather_blocks_2d(x, indices)\n\n    def shift_right_2d_blocks(x):\n        'Shift the second to last dimension of x right by one.'\n        shifted_targets = tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0], [0, 0]])[(:, :, :, :(- 1), :)]\n        return shifted_targets\n    x_shifted = shift_right_2d_blocks(center_x)\n    return x_shifted", "docstring": "Get right shifted blocks for masked local attention 2d.\n\nArgs:\nx: A tensor with shape [batch, heads, height, width, depth]\nindices: The indices to gather blocks\n\nReturns:\nx_shifted: a tensor of extracted blocks, each block right shifted along\nlength.", "source": "codesearchnet"}
{"code": "def __init__(self, callback):\n        \n        self._callback = callback\n        self._vcs = brocade_vcs(\n            callback=pynos.utilities.return_xml\n        )", "docstring": "VCS init method.\nArgs:\ncallback: Callback function that will be called for each action.\nReturns:\nVCS Object\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def get_from(input_file, property_names):\n    with open(input_file) as f:\n        feature_collection = geojson.load(f)\n    features = feature_collection['features']\n    values = [tuple([feat['properties'].get(x) for x in property_names]) for feat in features]\n    return values", "docstring": "Reads a geojson and returns a list of value tuples, each value corresponding to a\nproperty in property_names.\n\nArgs:\ninput_file (str): File name.\nproperty_names: List of strings; each string is a property name.\n\nReturns:\nList of value tuples.", "source": "codesearchnet"}
{"code": "def ekm_log(logstr, priority=3):\n    \n    if priority <= ekmmeters_log_level:\n        dt = datetime.datetime\n        stamp = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M.%f\")\n        ekmmeters_log_func(\"[EKM Meter Debug Message: \" + stamp + \"] -> \" + logstr)\n    pass", "docstring": "Send string to module level log\n\nArgs:\nlogstr (str): string to print.\npriority (int): priority, supports 3 (default) and 4 (special).", "source": "juraj-google-style"}
{"code": "def get_image_size_fit_to_canvas(image_height: int, image_width: int, canvas_height: int, canvas_width: int, tile_size: int) -> Tuple[int, int]:\n    target_width = np.clip(image_width, tile_size, canvas_width)\n    target_height = np.clip(image_height, tile_size, canvas_height)\n    scale_h = target_height / image_height\n    scale_w = target_width / image_width\n    if scale_w < scale_h:\n        new_width = target_width\n        new_height = min(math.floor(image_height * scale_w) or 1, target_height)\n    else:\n        new_height = target_height\n        new_width = min(math.floor(image_width * scale_h) or 1, target_width)\n    return (new_height, new_width)", "docstring": "Calculates the new size of an image to fit within a canvas while maintaining aspect ratio.\n\nThis function calculates the optimal size for an image to fit within a canvas defined by\ncanvas_height and canvas_width, while ensuring that the image dimensions are not smaller than\ntile_size. If the image is larger than the canvas, the returned size will fit within the canvas.\nIf the image already fits within the canvas, the size remains unchanged.\nThe aspect ratio of the original image is preserved as much as possible.\n\nArgs:\nimage_height (`int`):\nThe height of the original image.\nimage_width (`int`):\nThe width of the original image.\ncanvas_height (`int`):\nThe height of the canvas.\ncanvas_width (`int`):\nThe width of the canvas.\ntile_size (`int`):\nThe tile size.\n\nReturns:\n`Tuple[int, int]`: A tuple containing the new height and width of the image.", "source": "github-repos"}
{"code": "def _workflow_complete(workflow_stage_dict: dict):\n    \n    \n    \n    complete_stages = []\n    for _, stage_config in workflow_stage_dict.items():\n        complete_stages.append((stage_config['status'] == 'complete'))\n    if all(complete_stages):\n        LOG.info('PB workflow complete!')\n        return True\n    return False", "docstring": "Check if the workflow is complete.\n\nThis function checks if the entire workflow is complete.\n\nThis function is used by `execute_processing_block`.\n\nArgs:\nworkflow_stage_dict (dict): Workflow metadata dictionary.\n\nReturns:\nbool, True if the workflow is complete, otherwise False.", "source": "juraj-google-style"}
{"code": "def get(self):\n    with warnings.catch_warnings(record=False):\n        warnings.simplefilter('ignore')\n        return math.sqrt(np.nanvar(self._queue, ddof=1))", "docstring": "Calculates and returns the stdev of the current sliding window.\n\nReturns:\nfloat: The standard deviation of the values in the current sliding window.\nReturns NaN if the window contains fewer than 2 elements.", "source": "github-repos"}
{"code": "def get_model_files(model_type: str, frameworks: Optional[List[str]]=None) -> Dict[str, Union[Path, List[Path]]]:\n    module_name = model_type_to_module_name(model_type)\n    model_module = TRANSFORMERS_PATH / 'models' / module_name\n    model_files = list(model_module.glob('*.py'))\n    model_files = filter_framework_files(model_files, frameworks=frameworks)\n    doc_file = REPO_PATH / 'docs' / 'source' / 'en' / 'model_doc' / f'{model_type}.md'\n    test_files = [f'test_modeling_{module_name}.py', f'test_modeling_tf_{module_name}.py', f'test_modeling_flax_{module_name}.py', f'test_tokenization_{module_name}.py', f'test_image_processing_{module_name}.py', f'test_feature_extraction_{module_name}.py', f'test_processor_{module_name}.py']\n    test_files = filter_framework_files(test_files, frameworks=frameworks)\n    test_files = [REPO_PATH / 'tests' / 'models' / module_name / f for f in test_files]\n    test_files = [f for f in test_files if f.exists()]\n    return {'doc_file': doc_file, 'model_files': model_files, 'module_name': module_name, 'test_files': test_files}", "docstring": "Retrieves all the files associated to a model.\n\nArgs:\nmodel_type (`str`): A valid model type (like \"bert\" or \"gpt2\")\nframeworks (`List[str]`, *optional*):\nIf passed, will only keep the model files corresponding to the passed frameworks.\n\nReturns:\n`Dict[str, Union[Path, List[Path]]]`: A dictionary with the following keys:\n- **doc_file** -- The documentation file for the model.\n- **model_files** -- All the files in the model module.\n- **test_files** -- The test files for the model.", "source": "github-repos"}
{"code": "def set(self, name, value):\n    name = str(name)\n    if (name not in self._properties):\n        raise ArgumentError('Unknown property in DeviceModel', name=name)\n    self._properties[name] = value", "docstring": "Set a device model property.\n\nArgs:\nname (str): The name of the property to set\nvalue (int, bool): The value of the property to set", "source": "codesearchnet"}
{"code": "def CallNtpdate(logger):\n  \n  ntpd_inactive = subprocess.call(['service', 'ntpd', 'status'])\n  try:\n    if not ntpd_inactive:\n      subprocess.check_call(['service', 'ntpd', 'stop'])\n    subprocess.check_call(\n        'ntpdate `awk \\'$1==\"server\" {print $2}\\' /etc/ntp.conf`', shell=True)\n    if not ntpd_inactive:\n      subprocess.check_call(['service', 'ntpd', 'start'])\n  except subprocess.CalledProcessError:\n    logger.warning('Failed to sync system time with ntp server.')\n  else:\n    logger.info('Synced system time with ntp server.')", "docstring": "Sync clock using ntpdate.\n\nArgs:\nlogger: logger object, used to write to SysLog and serial port.", "source": "juraj-google-style"}
{"code": "def crack(ciphertext, *fitness_functions, min_key=0, max_key=26, shift_function=shift_case_english):\n    if (min_key >= max_key):\n        raise ValueError('min_key cannot exceed max_key')\n    decryptions = []\n    for key in range(min_key, max_key):\n        plaintext = decrypt(key, ciphertext, shift_function=shift_function)\n        decryptions.append(Decryption(plaintext, key, score(plaintext, *fitness_functions)))\n    return sorted(decryptions, reverse=True)", "docstring": "Break ``ciphertext`` by enumerating keys between ``min_key`` and ``max_key``.\n\nExample:\n>>> decryptions = crack(\"KHOOR\", fitness.english.quadgrams)\n>>> print(''.join(decryptions[0].plaintext))\nHELLO\n\nArgs:\nciphertext (iterable): The symbols to decrypt\n*fitness_functions (variable length argument list): Functions to score decryption with\n\nKeyword Args:\nmin_key (int): Key to start with\nmax_key (int): Key to stop at (exclusive)\nshift_function (function(shift, symbol)): Shift function to use\n\nReturns:\nSorted list of decryptions\n\nRaises:\nValueError: If min_key exceeds max_key\nValueError: If no fitness_functions are given", "source": "codesearchnet"}
{"code": "def fit_arrhenius(temps, diffusivities):\n    \n    t_1 = 1 / np.array(temps)\n    logd = np.log(diffusivities)\n    \n    a = np.array([t_1, np.ones(len(temps))]).T\n    w, res, _, _ = np.linalg.lstsq(a, logd, rcond=None)\n    w = np.array(w)\n    n = len(temps)\n    if n > 2:\n        std_Ea = (res[0] / (n - 2) / (\n        n * np.var(t_1))) ** 0.5 * const.k / const.e\n    else:\n        std_Ea = None\n    return -w[0] * const.k / const.e, np.exp(w[1]), std_Ea", "docstring": "Returns Ea, c, standard error of Ea from the Arrhenius fit:\nD = c * exp(-Ea/kT)\n\nArgs:\ntemps ([float]): A sequence of temperatures. units: K\ndiffusivities ([float]): A sequence of diffusivities (e.g.,\nfrom DiffusionAnalyzer.diffusivity). units: cm^2/s", "source": "juraj-google-style"}
{"code": "def get_modules_to_fuse(model, quantization_config):\n    if not isinstance(model, PreTrainedModel):\n        raise TypeError(f'The model should be an instance of `PreTrainedModel`, got {model.__class__.__name__}')\n    if quantization_config.modules_to_fuse is not None:\n        current_fused_mapping = quantization_config.modules_to_fuse\n        current_fused_mapping['max_seq_len'] = quantization_config.fuse_max_seq_len\n    elif model.config.model_type in AWQ_FUSED_MAPPINGS:\n        current_fused_mapping = AWQ_FUSED_MAPPINGS[model.config.model_type]\n        config = model.config.get_text_config(decoder=True)\n        hidden_size = config.hidden_size\n        num_attention_heads = config.num_attention_heads\n        num_key_value_heads = getattr(config, 'num_key_value_heads', num_attention_heads)\n        current_fused_mapping['hidden_size'] = hidden_size\n        current_fused_mapping['num_attention_heads'] = num_attention_heads\n        current_fused_mapping['num_key_value_heads'] = num_key_value_heads\n        current_fused_mapping['max_seq_len'] = quantization_config.fuse_max_seq_len\n    else:\n        raise ValueError('Fusing mapping not found either on the quantization config or the supported `AWQ_FUSED_MAPPINGS`. Please pass a `fused_mapping` argument in the `quantization_config` or raise an issue on transformers https:\n    return current_fused_mapping", "docstring": "Returns the fusing mapping given the quantization config and the model\n\nArgs:\nmodel (`~PreTrainedModel`):\nThe model to fuse - note this model should have been converted into AWQ format beforehand.\nquantization_config (`~transformers.quantization_config.AWQConfig`):\nThe quantization configuration to use.", "source": "github-repos"}
{"code": "def assemble_buffer(self, buf_header, buf_payload):\n        \n        if self.header.get('num_buffers', 0) <= len(self._buffers):\n            raise ProtocolError(\"too many buffers received expecting \" + str(self.header['num_buffers']))\n        self._buffers.append((buf_header, buf_payload))", "docstring": "Add a buffer header and payload that we read from the socket.\n\nThis differs from add_buffer() because we're validating vs.\nthe header's num_buffers, instead of filling in the header.\n\nArgs:\nbuf_header (``JSON``) : a buffer header\nbuf_payload (``JSON`` or bytes) : a buffer payload\n\nReturns:\nNone\n\nRaises:\nProtocolError", "source": "juraj-google-style"}
{"code": "def __init__(self, context):\n    \n    self._multiplexer = context.multiplexer\n\n    \n    \n    \n    self._index_cached = None\n\n    \n    \n    self._index_impl_lock = threading.Lock()\n\n    \n    \n    self._index_impl_thread = None", "docstring": "Instantiates TextPlugin via TensorBoard core.\n\nArgs:\ncontext: A base_plugin.TBContext instance.", "source": "juraj-google-style"}
{"code": "def UpdateOsLogin(self, oslogin_desired, two_factor_desired=False):\n    \n    oslogin_configured = self._GetStatus(two_factor=False)\n    if oslogin_configured is None:\n      return None\n    two_factor_configured = self._GetStatus(two_factor=True)\n    \n    two_factor_desired = two_factor_desired and oslogin_desired\n\n    if oslogin_desired:\n      params = ['activate']\n      if two_factor_desired:\n        params += ['--twofactor']\n      \n      if not oslogin_configured:\n        self.logger.info('Activating OS Login.')\n        return self._RunOsLoginControl(params) or self._RunOsLoginNssCache()\n      \n      if two_factor_desired and not two_factor_configured:\n        self.logger.info('Activating OS Login two factor authentication.')\n        return self._RunOsLoginControl(params) or self._RunOsLoginNssCache()\n      \n      if two_factor_configured and not two_factor_desired:\n        self.logger.info('Reactivating OS Login with two factor disabled.')\n        return (self._RunOsLoginControl(['deactivate'])\n                or self._RunOsLoginControl(params))\n      \n      current_time = time.time()\n      if current_time - self.update_time > NSS_CACHE_DURATION_SEC:\n        self.update_time = current_time\n        return self._RunOsLoginNssCache()\n\n    elif oslogin_configured:\n      self.logger.info('Deactivating OS Login.')\n      return (self._RunOsLoginControl(['deactivate'])\n              or self._RemoveOsLoginNssCache())\n\n    \n    return 0", "docstring": "Update whether OS Login is enabled and update NSS cache if necessary.\n\nArgs:\noslogin_desired: bool, enable OS Login if True, disable if False.\ntwo_factor_desired: bool, enable two factor if True, disable if False.\n\nReturns:\nint, the return code from updating OS Login, or None if not present.", "source": "juraj-google-style"}
{"code": "def remove(self, key):\n        \n        self.raise_error_if_not_open()\n\n        if key in self._file:\n            del self._file[key]", "docstring": "Remove the data stored for the given key.\n\nArgs:\nkey (str): Key of the data to remove.\n\nNote:\nThe container has to be opened in advance.", "source": "juraj-google-style"}
{"code": "def is_flat(neurite, tol, method='tolerance'):\n    \n    ext = principal_direction_extent(neurite.points[:, COLS.XYZ])\n\n    assert method in ('tolerance', 'ratio'), \"Method must be one of 'tolerance', 'ratio'\"\n    if method == 'ratio':\n        sorted_ext = np.sort(ext)\n        return sorted_ext[0] / sorted_ext[1] < float(tol)\n    return any(ext < float(tol))", "docstring": "Check if neurite is flat using the given method\n\nArgs:\nneurite(Neurite): neurite to operate on\ntol(float): tolerance\nmethod(string): the method of flatness estimation:\n'tolerance' returns true if any extent of the tree is smaller\nthan the given tolerance\n'ratio' returns true if the ratio of the smallest directions\nis smaller than tol. e.g. [1,2,3] -> 1/2 < tol\n\nReturns:\nTrue if neurite is flat", "source": "juraj-google-style"}
{"code": "def _enum_from_direction(direction):\n    \n    if isinstance(direction, int):\n        return direction\n\n    if direction == Query.ASCENDING:\n        return enums.StructuredQuery.Direction.ASCENDING\n    elif direction == Query.DESCENDING:\n        return enums.StructuredQuery.Direction.DESCENDING\n    else:\n        msg = _BAD_DIR_STRING.format(direction, Query.ASCENDING, Query.DESCENDING)\n        raise ValueError(msg)", "docstring": "Convert a string representation of a direction to an enum.\n\nArgs:\ndirection (str): A direction to order by. Must be one of\n:attr:`~.firestore.Query.ASCENDING` or\n:attr:`~.firestore.Query.DESCENDING`.\n\nReturns:\nint: The enum corresponding to ``direction``.\n\nRaises:\nValueError: If ``direction`` is not a valid direction.", "source": "juraj-google-style"}
{"code": "def is_partial(self, filepath):\n        \n        path, filename = os.path.split(filepath)\n        return filename.startswith('_')", "docstring": "Check if file is a Sass partial source (see\n`Sass partials Reference`_).\n\nArgs:\nfilepath (str): A file path. Can be absolute, relative or just a\nfilename.\n\nReturns:\nbool: True if file is a partial source, else False.", "source": "juraj-google-style"}
{"code": "def draw_lines(self, *points):\n    point_array = ffi.new('SDL_Point[]', len(points))\n    for (i, p) in enumerate(points):\n        point_array[i] = p._ptr[0]\n    check_int_err(lib.SDL_RenderDrawLines(self._ptr, point_array, len(points)))", "docstring": "Draw a series of connected lines on the current rendering target.\n\nArgs:\n*points (Point): The points along the lines.\n\nRaises:\nSDLError: If an error is encountered.", "source": "codesearchnet"}
{"code": "def _parse_interfaces(self):\n    interfaces = dict()\n    names = re.findall('^interface (Po.+)$', self.config, re.M)\n    for name in names:\n        config = self.get_block(('interface %s' % name))\n        match = re.search('mlag (\\\\d+)', config)\n        if match:\n            interfaces[name] = dict(mlag_id=match.group(1))\n    return dict(interfaces=interfaces)", "docstring": "Scans the global config and returns the configured interfaces\n\nReturns:\ndict: A dict object that is intended to be merged into the\nresource dict.", "source": "codesearchnet"}
{"code": "def create(window, root):\n    notifications = {}\n    _id = root.get_property('id')\n    from foxpuppet.windows.browser.notifications import addons\n    notifications.update(addons.NOTIFICATIONS)\n    return notifications.get(_id, BaseNotification)(window, root)", "docstring": "Create a notification object.\n\nArgs:\nwindow (:py:class:`BrowserWindow`): Window object this region\nappears in.\nroot\n(:py:class:`~selenium.webdriver.remote.webelement.WebElement`):\nWebDriver element object that serves as the root for the\nnotification.\n\nReturns:\n:py:class:`BaseNotification`: Firefox notification.", "source": "codesearchnet"}
{"code": "def cumsum(x, dim, exclusive=False):\n  \n  with tf.variable_scope(\"cumsum\"):\n    new_name = \"tmp_dim_cumsum\"\n    new_dim = Dimension(new_name, dim.size)\n    new_shape = x.shape.rename_dimension(dim.name, new_name)\n    comparator = less if exclusive else less_equal\n    m = cast(\n        comparator(mtf_range(x.mesh, dim, dtype=tf.float32),\n                   mtf_range(x.mesh, new_dim, dtype=tf.float32)), x.dtype)\n    ret = einsum([x, m], output_shape=new_shape)\n    return reshape(ret, x.shape)", "docstring": "Cumulative sum.\n\nArgs:\nx: a Tensor\ndim: a Dimension\nexclusive: a boolean\n\nReturns:\na Tensor with the same shape as x.", "source": "juraj-google-style"}
{"code": "def clone(self, uuid):\n        \n        \n        request_url = self._client.base_api_url + self.clone_url.format(\n            id=uuid\n        )\n\n        response = self._client.session.post(request_url)\n\n        \n        self.validate_request_success(\n            response_text=response.text,\n            request_url=request_url,\n            status_code=response.status_code,\n            expected_status_code=HTTP_201_CREATED,\n        )\n\n        \n        return self.response_data_to_model_instance(response.json())", "docstring": "Clone the task instance with given UUID.\n\nArgs:\nuuid (str): The UUID of the task instance to clone.\n\nReturns:\n:class:`saltant.models.base_task_instance.BaseTaskInstance`:\nA task instance model instance representing the task\ninstance created due to the clone.", "source": "juraj-google-style"}
{"code": "def begin(self: EventSetOrNode) -> EventSetOrNode:\n    from temporian.core.operators.begin import begin\n    return begin(self)", "docstring": "Generates a single timestamp at the beginning of the\n[`EventSet`][temporian.EventSet], per index group.\n\nUsage example:\n```python\n>>> a = tp.event_set(\n...     timestamps=[5, 6, 7, -1],\n...     features={\"f\": [50, 60, 70, -10], \"idx\": [1, 1, 1, 2]},\n...     indexes=[\"idx\"]\n... )\n\n>>> a_ini = a.begin()\n>>> a_ini\nindexes: [('idx', int64)]\nfeatures: []\nevents:\nidx=1 (1 events):\ntimestamps: [5.]\nidx=2 (1 events):\ntimestamps: [-1.]\n...\n\n```\n\nReturns:\nA feature-less EventSet with a single timestamp per index group.", "source": "github-repos"}
{"code": "def process(self, element, *args, **kwargs):\n    (text, uid), prediction = element\n    embedding = prediction.inference\n    l2_norm = np.linalg.norm(embedding)\n    yield {'text': text, 'id': uid, 'embedding': embedding / l2_norm}", "docstring": "For each element in the input PCollection, normalize the embedding vector, and\nyield a new element with the normalized embedding added\nArgs:\nelement: The element to be processed.", "source": "github-repos"}
{"code": "def error_log(self, msg='', level=20, traceback=False):\n        \n        \n        sys.stderr.write(msg + '\\n')\n        sys.stderr.flush()\n        if traceback:\n            tblines = traceback_.format_exc()\n            sys.stderr.write(tblines)\n            sys.stderr.flush()", "docstring": "Write error message to log.\n\nArgs:\nmsg (str): error message\nlevel (int): logging level\ntraceback (bool): add traceback to output or not", "source": "juraj-google-style"}
{"code": "def dump_property(self, name):\n    if (not hasattr(self, name)):\n        raise ArgumentError(('Unknown property %s' % name))\n    value = getattr(self, name)\n    if (name in self._complex_properties):\n        value = self._complex_properties[name][0](value)\n    return value", "docstring": "Serialize a property of this class by name.\n\nArgs:\nname (str): The name of the property to dump.\n\nReturns:\nobject: The serialized value of the property.", "source": "codesearchnet"}
{"code": "def _generate_response(self, response: dict, request: dict) -> dict:\n    response_template = deepcopy(self.response_template)\n    response_template['sessionAttributes']['sessionId'] = request['session']['sessionId']\n    for (key, value) in response_template.items():\n        if (key not in response.keys()):\n            response[key] = value\n    return response", "docstring": "Populates generated response with additional data conforming Alexa response specification.\n\nArgs:\nresponse: Raw user input extracted from Alexa request.\nrequest: Alexa request.\nReturns:\nresponse: Response conforming Alexa response specification.", "source": "codesearchnet"}
{"code": "def peek_step(self, val: ArrayValue,\n                  sn: \"DataNode\") -> Tuple[Optional[Value], \"DataNode\"]:\n        \n        try:\n            return val[self.index], sn\n        except (IndexError, KeyError, TypeError):\n            return None, sn", "docstring": "Return entry value addressed by the receiver + its schema node.\n\nArgs:\nval: Current value (array).\nsn:  Current schema node.", "source": "juraj-google-style"}
{"code": "def is_packet_trace(path):\n    \n    path = os.path.abspath(path)\n    if not os.path.isfile(path):\n        return False\n\n    try:\n        f = open(path, 'rb')\n    except:\n        return False\n\n    magic = f.read(4)\n    f.close()\n\n    return magic in FILE_TYPE_HANDLER", "docstring": "Determine if a file is a packet trace that is supported by this module.\n\nArgs:\npath (str): path to the trace file.\n\nReturns:\nbool: True if the file is a valid packet trace.", "source": "juraj-google-style"}
{"code": "def _ExpandUsersHomeDirectoryPathSegments(cls, path_segments, path_separator, user_accounts):\n    if (not path_segments):\n        return []\n    user_paths = []\n    first_path_segment = path_segments[0].lower()\n    if (first_path_segment not in ('%%users.homedir%%', '%%users.userprofile%%')):\n        if cls._IsWindowsDrivePathSegment(path_segments[0]):\n            path_segments[0] = ''\n        user_path = path_separator.join(path_segments)\n        user_paths.append(user_path)\n    else:\n        for user_account in user_accounts:\n            user_path_segments = user_account.GetUserDirectoryPathSegments()\n            if (not user_path_segments):\n                continue\n            if cls._IsWindowsDrivePathSegment(user_path_segments[0]):\n                user_path_segments[0] = ''\n            if (not user_path_segments[(- 1)]):\n                user_path_segments.pop()\n            user_path_segments.extend(path_segments[1:])\n            user_path = path_separator.join(user_path_segments)\n            user_paths.append(user_path)\n    return user_paths", "docstring": "Expands a path to contain all users home or profile directories.\n\nExpands the artifacts path variable \"%%users.homedir%%\" or\n\"%%users.userprofile%%\".\n\nArgs:\npath_segments (list[str]): path segments.\npath_separator (str): path segment separator.\nuser_accounts (list[UserAccountArtifact]): user accounts.\n\nReturns:\nlist[str]: paths returned for user accounts without a drive indicator.", "source": "codesearchnet"}
{"code": "def add_asset(self, asset, asset_name, asset_type):\n    if (not self.can_update()):\n        self._tcex.handle_error(910, [self.type])\n    if (asset == 'PHONE'):\n        return self.tc_requests.add_victim_phone_asset(self.unique_id, asset_name)\n    if (asset == 'EMAIL'):\n        return self.tc_requests.add_victim_email_asset(self.unique_id, asset_name, asset_type)\n    if (asset == 'NETWORK'):\n        return self.tc_requests.add_victim_network_asset(self.unique_id, asset_name, asset_type)\n    if (asset == 'SOCIAL'):\n        return self.tc_requests.add_victim_social_asset(self.unique_id, asset_name, asset_type)\n    if (asset == 'WEB'):\n        return self.tc_requests.add_victim_web_asset(self.unique_id, asset_name)\n    self._tcex.handle_error(925, ['asset_type', 'add_asset', 'asset_type', 'asset_type', asset_type])\n    return None", "docstring": "Adds a asset to the Victim\n\nValid asset_type:\n+ PHONE\n+ EMAIL\n+ NETWORK\n+ SOCIAL\n+ WEB\n\nArgs:\nasset:\nasset_name:\nasset_type: PHONE, EMAIL, NETWORK, SOCIAL, or WEB\n\nReturns:", "source": "codesearchnet"}
{"code": "def get_consensus_module(module_name):\n    module_package = module_name\n    if (module_name == 'genesis'):\n        module_package = 'sawtooth_validator.journal.consensus.genesis.genesis_consensus'\n    elif (module_name == 'devmode'):\n        module_package = 'sawtooth_validator.journal.consensus.dev_mode.dev_mode_consensus'\n    try:\n        return importlib.import_module(module_package)\n    except ImportError:\n        raise UnknownConsensusModuleError('Consensus module \"{}\" does not exist.'.format(module_name))", "docstring": "Returns a consensus module by name.\n\nArgs:\nmodule_name (str): The name of the module to load.\n\nReturns:\nmodule: The consensus module.\n\nRaises:\nUnknownConsensusModuleError: Raised if the given module_name does\nnot correspond to a consensus implementation.", "source": "codesearchnet"}
{"code": "def export_pytorch(preprocessor: Union['PreTrainedTokenizer', 'FeatureExtractionMixin', 'ProcessorMixin'], model: 'PreTrainedModel', config: OnnxConfig, opset: int, output: Path, tokenizer: Optional['PreTrainedTokenizer']=None, device: str='cpu') -> Tuple[List[str], List[str]]:\n    if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None:\n        raise ValueError('You cannot provide both a tokenizer and a preprocessor to export the model.')\n    if tokenizer is not None:\n        warnings.warn('The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use `preprocessor` instead.', FutureWarning)\n        logger.info('Overwriting the `preprocessor` argument with `tokenizer` to generate dummy inputs.')\n        preprocessor = tokenizer\n    if issubclass(type(model), PreTrainedModel):\n        import torch\n        from torch.onnx import export as onnx_export\n        logger.info(f'Using framework PyTorch: {torch.__version__}')\n        with torch.no_grad():\n            model.config.return_dict = True\n            model.eval()\n            if config.values_override is not None:\n                logger.info(f'Overriding {len(config.values_override)} configuration item(s)')\n                for override_config_key, override_config_value in config.values_override.items():\n                    logger.info(f'\\t- {override_config_key} -> {override_config_value}')\n                    setattr(model.config, override_config_key, override_config_value)\n            model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.PYTORCH)\n            device = torch.device(device)\n            if device.type == 'cuda' and torch.cuda.is_available():\n                model.to(device)\n                model_inputs_device = {}\n                for k, v in model_inputs.items():\n                    if isinstance(v, Tuple):\n                        model_inputs_device[k] = tuple((x.to(device) if isinstance(x, torch.Tensor) else None for x in v))\n                    elif isinstance(v, List):\n                        model_inputs_device[k] = [tuple((x.to(device) if isinstance(x, torch.Tensor) else None for x in t)) for t in v]\n                    else:\n                        model_inputs_device[k] = v.to(device)\n                model_inputs = model_inputs_device\n            inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys())\n            onnx_outputs = list(config.outputs.keys())\n            if not inputs_match:\n                raise ValueError(\"Model and config inputs doesn't match\")\n            config.patch_ops()\n            onnx_export(model, (model_inputs,), f=output.as_posix(), input_names=list(config.inputs.keys()), output_names=onnx_outputs, dynamic_axes=dict(chain(config.inputs.items(), config.outputs.items())), do_constant_folding=True, opset_version=opset)\n            config.restore_ops()\n    return (matched_inputs, onnx_outputs)", "docstring": "Export a PyTorch model to an ONNX Intermediate Representation (IR)\n\nArgs:\npreprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]):\nThe preprocessor used for encoding the data.\nmodel ([`PreTrainedModel`]):\nThe model to export.\nconfig ([`~onnx.config.OnnxConfig`]):\nThe ONNX configuration associated with the exported model.\nopset (`int`):\nThe version of the ONNX operator set to use.\noutput (`Path`):\nDirectory to store the exported ONNX model.\ndevice (`str`, *optional*, defaults to `cpu`):\nThe device on which the ONNX model will be exported. Either `cpu` or `cuda`.\n\nReturns:\n`Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from\nthe ONNX configuration.", "source": "github-repos"}
{"code": "def _get_container_environment(self, **kwargs):\n    environment = {}\n    environment.update(self.primary_container['Environment'])\n    environment['SAGEMAKER_BATCH'] = 'True'\n    if ('MaxPayloadInMB' in kwargs):\n        environment['SAGEMAKER_MAX_PAYLOAD_IN_MB'] = str(kwargs['MaxPayloadInMB'])\n    if ('BatchStrategy' in kwargs):\n        if (kwargs['BatchStrategy'] == 'SingleRecord'):\n            strategy_env_value = 'SINGLE_RECORD'\n        elif (kwargs['BatchStrategy'] == 'MultiRecord'):\n            strategy_env_value = 'MULTI_RECORD'\n        else:\n            raise ValueError(\"Invalid BatchStrategy, must be 'SingleRecord' or 'MultiRecord'\")\n        environment['SAGEMAKER_BATCH_STRATEGY'] = strategy_env_value\n    if (('MaxConcurrentTransforms' in kwargs) and (int(kwargs['MaxConcurrentTransforms']) > 1)):\n        logger.warning('Local Mode only supports 1 ConcurrentTransform. Setting MaxConcurrentTransforms to 1')\n    environment['SAGEMAKER_MAX_CONCURRENT_TRANSFORMS'] = '1'\n    if ('Environment' in kwargs):\n        environment.update(kwargs['Environment'])\n    return environment", "docstring": "Get all the Environment variables that will be passed to the container\n\nCertain input fields such as BatchStrategy have different values for the API vs the Environment\nvariables, such as SingleRecord vs SINGLE_RECORD. This method also handles this conversion.\n\nArgs:\n**kwargs: existing transform arguments\n\nReturns:\ndict: All the environment variables that should be set in the container", "source": "codesearchnet"}
{"code": "def _project_THn(self, hist: Hist) -> Any:\n        \n        \n        projection_axes = [axis.axis_type.value for axis in self.projection_axes]\n\n        \n        \n        \n        if len(projection_axes) == 2:\n            \n            projection_axes.reverse()\n\n        \n        \n        args = projection_axes + [\"E\"]\n        \n        logger.debug(f\"hist: {hist.GetName()} args: {args}\")\n\n        if len(projection_axes) > 3:\n            \n            projected_hist = hist.ProjectionND(*args)\n        else:\n            \n            projected_hist = hist.Projection(*args)\n\n        return projected_hist", "docstring": "Perform the actual THn -> THn or TH1 projection.\n\nThis projection could be to 1D, 2D, 3D, or ND.\n\nArgs:\nhist (ROOT.THnBase): Histogram from which the projections should be performed.\nReturns:\nROOT.THnBase or ROOT.TH1: The projected histogram.", "source": "juraj-google-style"}
{"code": "def intersect(self, range_):\n    new_slice = None\n    if self.package_request.conflict:\n        if (self.package_request.range is None):\n            new_slice = self.solver._get_variant_slice(self.package_name, range_)\n        else:\n            new_range = (range_ - self.package_request.range)\n            if (new_range is not None):\n                new_slice = self.solver._get_variant_slice(self.package_name, new_range)\n    else:\n        new_slice = self.variant_slice.intersect(range_)\n    if (new_slice is None):\n        if self.pr:\n            self.pr(\"%s intersected with range '%s' resulted in no packages\", self, range_)\n        return None\n    if (new_slice is not self.variant_slice):\n        scope = self._copy(new_slice)\n        if self.pr:\n            self.pr(\"%s was intersected to %s by range '%s'\", self, scope, range_)\n        return scope\n    return self", "docstring": "Intersect this scope with a package range.\n\nReturns:\nA new copy of this scope, with variants whos version fall outside\nof the given range removed. If there were no removals, self is\nreturned. If all variants were removed, None is returned.", "source": "codesearchnet"}
{"code": "def _finish_connection_action(self, action):\n        \n\n        success = action.data['success']\n        conn_key = action.data['id']\n\n        if self._get_connection_state(conn_key) != self.Connecting:\n            print(\"Invalid finish_connection action on a connection whose state is not Connecting, conn_key=%s\" % str(conn_key))\n            return\n\n        \n        data = self._get_connection(conn_key)\n        callback = data['callback']\n\n        conn_id = data['conn_id']\n        int_id = data['int_id']\n\n        if success is False:\n            reason = action.data['reason']\n            if reason is None:\n                reason = \"No reason was given\"\n\n            del self._connections[conn_id]\n            del self._int_connections[int_id]\n            callback(conn_id, self.id, False, reason)\n        else:\n            data['state'] = self.Idle\n            data['microstate'] = None\n            data['callback'] = None\n            callback(conn_id, self.id, True, None)", "docstring": "Finish a connection attempt\n\nArgs:\naction (ConnectionAction): the action object describing what we are\nconnecting to and what the result of the operation was", "source": "juraj-google-style"}
{"code": "def GetEventTypeString(self, event_type):\n    if (0 <= event_type < len(self._EVENT_TYPES)):\n        return self._EVENT_TYPES[event_type]\n    return 'Unknown {0:d}'.format(event_type)", "docstring": "Retrieves a string representation of the event type.\n\nArgs:\nevent_type (int): event type.\n\nReturns:\nstr: description of the event type.", "source": "codesearchnet"}
{"code": "def get_instances_with_configs(configs):\n    serials = []\n    for c in configs:\n        try:\n            serials.append(c['serial'])\n        except KeyError:\n            raise Error('Required value \"serial\" is missing in AndroidDevice config %s.' % c)\n    _validate_device_existence(serials)\n    results = []\n    for c in configs:\n        serial = c.pop('serial')\n        is_required = c.get(KEY_DEVICE_REQUIRED, True)\n        try:\n            ad = AndroidDevice(serial)\n            ad.load_config(c)\n        except Exception:\n            if is_required:\n                raise\n            ad.log.exception('Skipping this optional device due to error.')\n            continue\n        results.append(ad)\n    return results", "docstring": "Create AndroidDevice instances from a list of dict configs.\n\nEach config should have the required key-value pair 'serial'.\n\nArgs:\nconfigs: A list of dicts each representing the configuration of one\nandroid device.\n\nReturns:\nA list of AndroidDevice objects.", "source": "github-repos"}
{"code": "def get_variation_from_id(self, experiment_key, variation_id):\n    variation_map = self.variation_id_map.get(experiment_key)\n    if variation_map:\n        variation = variation_map.get(variation_id)\n        if variation:\n            return variation\n        else:\n            self.logger.error(('Variation ID \"%s\" is not in datafile.' % variation_id))\n            self.error_handler.handle_error(exceptions.InvalidVariationException(enums.Errors.INVALID_VARIATION_ERROR))\n            return None\n    self.logger.error(('Experiment key \"%s\" is not in datafile.' % experiment_key))\n    self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR))\n    return None", "docstring": "Get variation given experiment and variation ID.\n\nArgs:\nexperiment: Key representing parent experiment of variation.\nvariation_id: ID representing the variation.\n\nReturns\nObject representing the variation.", "source": "codesearchnet"}
{"code": "def forward(self, hidden: torch.Tensor):\n    residual = hidden\n    hidden = self.norm(hidden)\n    hidden = self.mlp(hidden)\n    if self.gated_attn:\n        hidden = self.gating_block(hidden)\n    out = hidden + residual\n    return out", "docstring": "Args:\nhidden (`torch.Tensor` of shape `(batch_size, num_patches, d_model)`):\nInput tensor to the layer.\n\nReturns:\n`torch.Tensor`: Transformed tensor.", "source": "github-repos"}
{"code": "def loop(self, xy, yx):\n        \n        if len(xy) > 0:\n            self.iter += self.runs_per_iter\n        if self.iter < 2:\n            return True\n        t_test, self.p_value = ttest_ind(xy, yx, equal_var=False)\n        if self.p_value > self.threshold and self.iter < self.max_iter:\n            return True\n        else:\n            return False", "docstring": "Tests the loop condition based on the new results and the\nparameters.\n\nArgs:\nxy (list): list containing all the results for one set of samples\nyx (list): list containing all the results for the other set.\n\nReturns:\nbool: True if the loop has to continue, False otherwise.", "source": "juraj-google-style"}
{"code": "def visit_boolean_op(self, boolean_logic: _evaluation.BooleanOperatorNode) -> _sql_data_types.Select:\n    lhs_result = self.visit(boolean_logic.left)\n    rhs_result = self.visit(boolean_logic.right)\n    if lhs_result.sql_data_type != _sql_data_types.Boolean:\n        lhs_result = lhs_result.is_not_null()\n    if rhs_result.sql_data_type != _sql_data_types.Boolean:\n        rhs_result = rhs_result.is_not_null()\n    lhs_subquery = lhs_result.as_operand()\n    rhs_subquery = rhs_result.as_operand()\n    if boolean_logic.op == _ast.BooleanLogic.Op.IMPLIES:\n        sql_value = f'NOT {lhs_subquery} OR {rhs_subquery}'\n    elif boolean_logic.op == _ast.BooleanLogic.Op.XOR:\n        sql_value = f'{lhs_subquery} <> {rhs_subquery}'\n    else:\n        sql_value = f'{lhs_subquery} {boolean_logic.op.upper()} {rhs_subquery}'\n    return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_value, _sql_data_type=_sql_data_types.Boolean, _sql_alias='logic_'), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK)", "docstring": "Translates a FHIRPath Boolean logic operation to Spark SQL.\n\nNote that evaluation for Boolean logic is only supported for Boolean\noperands of scalar cardinality.\n\nArgs:\nboolean_logic: The FHIRPath AST `BooleanLogic` node.\n\nReturns:\nA compiled Spark SQL expression.", "source": "github-repos"}
{"code": "def swd_read32(self, offset):\n        \n        value = self._dll.JLINK_SWD_GetU32(offset)\n        return ctypes.c_uint32(value).value", "docstring": "Gets a unit of ``32`` bits from the input buffer.\n\nArgs:\nself (JLink): the ``JLink`` instance\noffset (int): the offset (in bits) from which to start reading\n\nReturns:\nThe integer read from the input buffer.", "source": "juraj-google-style"}
{"code": "def restore_state(self, state, name=None):\n    if self._reader_ref.dtype == dtypes.resource:\n        return gen_io_ops.reader_restore_state_v2(self._reader_ref, state, name=name)\n    else:\n        return gen_io_ops.reader_restore_state(self._reader_ref, state, name=name)", "docstring": "Restore a reader to a previously saved state.\n\nNot all Readers support being restored, so this can produce an\nUnimplemented error.\n\nArgs:\nstate: A string Tensor.\nResult of a SerializeState of a Reader with matching type.\nname: A name for the operation (optional).\n\nReturns:\nThe created Operation.", "source": "github-repos"}
{"code": "def getsource(classorfunc):\n    if _isbuiltin(classorfunc):\n        return ''\n    try:\n        source = inspect.getsource(classorfunc)\n    except TypeError:\n        source = getsourcefallback(classorfunc)\n    declaration = []\n    lines = source.splitlines()\n    if (PY2 and (not isinstance(source, unicode))):\n        encoding = detect_encoding(iter(lines).next)[0]\n        sourcelines = (s.decode(encoding) for s in lines)\n    else:\n        sourcelines = iter(lines)\n    found_keyword = False\n    for line in sourcelines:\n        words = line.split()\n        if (not words):\n            continue\n        if (words[0] in ('def', 'class')):\n            found_keyword = True\n        if found_keyword:\n            cind = line.find(':')\n            if (cind > 0):\n                declaration.append(line[:(cind + 1)])\n                after_decl = line[(cind + 1):].strip()\n                break\n            else:\n                declaration.append(line)\n    bodylines = list(sourcelines)\n    if (type(classorfunc) == type):\n        cls = classorfunc\n        base_imports = {}\n        for base in cls.__bases__:\n            if ((base.__name__ == 'object') and (base.__module__ == 'builtins')):\n                continue\n            if (base in base_imports):\n                continue\n            if (base.__module__ == '__main__'):\n                continue\n            base_imports[base] = ('from %s import %s' % (base.__module__, base.__name__))\n        cind = declaration[0].index('class ')\n        declstring = (declaration[0][:cind] + ('class %s(%s):%s' % (cls.__name__, ','.join([base.__name__ for base in cls.__bases__]), after_decl)))\n        declaration = [impstring for (c, impstring) in base_imports.items() if (c.__module__ != '__builtin__')]\n        declaration.append(declstring)\n    else:\n        declaration[(- 1)] += after_decl\n    return '\\n'.join((declaration + bodylines))", "docstring": "Return the source code for a class or function.\n\nNotes:\nReturned source will not include any decorators for the object.\nThis will only return the explicit declaration of the object, not any dependencies\n\nArgs:\nclassorfunc (type or function): the object to get the source code for\n\nReturns:\nstr: text of source code (without any decorators). Note: in python 2, this returns unicode", "source": "codesearchnet"}
{"code": "def __init__(self, sdat):\n        \n        self._isteps = {}\n        self._all_isteps_known = False\n        super().__init__(sdat)", "docstring": "Initialization of instances:\n\nArgs:\nsdat (:class:`StagyyData`): the StagyyData instance owning the\n:class:`_Snaps` instance.\nAttributes:\nsdat (:class:`StagyyData`): the StagyyData instance owning the\n:class:`_Snaps` instance.", "source": "juraj-google-style"}
{"code": "def get_path(self, origX, origY, destX, destY):\n    return super(AStar, self).get_path(origX, origY, destX, destY)", "docstring": "Get the shortest path from origXY to destXY.\n\nReturns:\nList[Tuple[int, int]]: Returns a list walking the path from orig\nto dest.\n\nThis excludes the starting point and includes the destination.\n\nIf no path is found then an empty list is returned.", "source": "codesearchnet"}
{"code": "def train_model(self, train_op, cost_to_log, num_steps, feed_vars=(), feed_data=None, print_every=100):\n    costs = [train_op]\n    if (isinstance(cost_to_log, collections.Sequence) and (not isinstance(cost_to_log, six.string_types))):\n        costs.extend(cost_to_log)\n    else:\n        costs.append(cost_to_log)\n    return self.run_model(costs, num_steps, feed_vars=feed_vars, feed_data=feed_data, print_every=print_every)[2:]", "docstring": "Trains the given model.\n\nArgs:\ntrain_op: The training operation.\ncost_to_log: A cost to log.\nnum_steps: Number of batches to run.\nfeed_vars: A list or tuple of the variables that will be fed.\nfeed_data: A generator that produces tuples of the same length as\nfeed_vars.\nprint_every: Print and save every so many steps.\nReturns:\n`cost_to_log` from the final step.", "source": "codesearchnet"}
{"code": "def _on_status_message(self, sequence, topic, message):\n        \n\n        self._logger.debug(\"Received message on (topic=%s): %s\" % (topic, message))\n\n        try:\n            conn_key = self._find_connection(topic)\n        except ArgumentError:\n            self._logger.warn(\"Dropping message that does not correspond with a known connection, message=%s\", message)\n            return\n\n        if messages.ConnectionResponse.matches(message):\n            if self.name != message['client']:\n                self._logger.debug(\"Connection response received for a different client, client=%s, name=%s\", message['client'], self.name)\n                return\n\n            self.conns.finish_connection(conn_key, message['success'], message.get('failure_reason', None))\n        else:\n            self._logger.warn(\"Dropping message that did not correspond with a known schema, message=%s\", message)", "docstring": "Process a status message received\n\nArgs:\nsequence (int): The sequence number of the packet received\ntopic (string): The topic this message was received on\nmessage (dict): The message itself", "source": "juraj-google-style"}
{"code": "def get_variables_in_scope(scope, collection=tf.GraphKeys.TRAINABLE_VARIABLES):\n    scope_name = get_variable_scope_name(scope)\n    if scope_name:\n        scope_name = (re.escape(scope_name) + '/')\n    return tuple(tf.get_collection(collection, scope_name))", "docstring": "Returns a tuple `tf.Variable`s in a scope for a given collection.\n\nArgs:\nscope: `tf.VariableScope` or string to retrieve variables from.\ncollection: Collection to restrict query to. By default this is\n`tf.Graphkeys.TRAINABLE_VARIABLES`, which doesn't include non-trainable\nvariables such as moving averages.\n\nReturns:\nA tuple of `tf.Variable` objects.", "source": "codesearchnet"}
{"code": "def decode_body(headers: MutableMapping, body: bytes) -> dict:\n    (type_, encoding) = parse_content_type(headers)\n    decoded_body = body.decode(encoding)\n    if (type_ == 'application/json'):\n        payload = json.loads(decoded_body)\n    elif (decoded_body == 'ok'):\n        payload = {'ok': True}\n    else:\n        payload = {'ok': False, 'data': decoded_body}\n    return payload", "docstring": "Decode the response body\n\nFor 'application/json' content-type load the body as a dictionary\n\nArgs:\nheaders: Response headers\nbody: Response body\n\nReturns:\ndecoded body", "source": "codesearchnet"}
{"code": "def get_model(self, model, model_id):\n        \n        return self._store.find_record(self._get_model_class(model), int(model_id))", "docstring": "Get a single model from the server.\n\nArgs:\nmodel (string): The class as a string.\nmodel_id (string): The integer ID as a string.\n\nReturns:\n:class:`cinder_data.model.CinderModel`: A instance of the model.", "source": "juraj-google-style"}
{"code": "def replace_all(self, replacements):\n    for override in replacements:\n        assert isinstance(override, PTransformOverride)\n        self._replace(override)\n    for override in replacements:\n        self._check_replacement(override)", "docstring": "Dynamically replaces PTransforms in the currently populated hierarchy.\n\nCurrently this only works for replacements where input and output types\nare exactly the same.\n\nTODO: Update this to also work for transform overrides where input and\noutput types are different.\n\nArgs:\nreplacements (List[~apache_beam.pipeline.PTransformOverride]): a list of\n:class:`~apache_beam.pipeline.PTransformOverride` objects.", "source": "github-repos"}
{"code": "def make_fout(fout='./tmp', fmt='pcap'):\n    if (fmt == 'pcap'):\n        from pcapkit.dumpkit import PCAP as output\n    elif (fmt == 'plist'):\n        from dictdumper import PLIST as output\n    elif (fmt == 'json'):\n        from dictdumper import JSON as output\n    elif (fmt == 'tree'):\n        from dictdumper import Tree as output\n        fmt = 'txt'\n    elif (fmt == 'html'):\n        from dictdumper import JavaScript as output\n        fmt = 'js'\n    elif (fmt == 'xml'):\n        from dictdumper import XML as output\n    else:\n        from pcapkit.dumpkit import NotImplementedIO as output\n        if (fmt is not None):\n            warnings.warn(f'Unsupported output format: {fmt}; disabled file output feature', FormatWarning, stacklevel=stacklevel())\n        return (output, '')\n    try:\n        pathlib.Path(fout).mkdir(parents=True, exist_ok=True)\n    except FileExistsError as error:\n        if (fmt is None):\n            warnings.warn(error.strerror, FileWarning, stacklevel=stacklevel())\n        else:\n            raise FileExists(*error.args) from None\n    return (output, fmt)", "docstring": "Make root path for output.\n\nPositional arguments:\n* fout -- str, root path for output\n* fmt -- str, output format\n\nReturns:\n* output -- dumper of specified format", "source": "codesearchnet"}
{"code": "def find_template_filename(self, template_name):\n\n    def next_file():\n        filename = (self.path / template_name)\n        (yield filename)\n        try:\n            exts = self.default_file_extensions\n        except AttributeError:\n            return\n        strfilename = str(filename)\n        for ext in exts:\n            (yield Path((strfilename + ext)))\n    for filename in next_file():\n        if filename.is_file():\n            return filename", "docstring": "Searches for a file matching the given template name.\n\nIf found, this method returns the pathlib.Path object of the found\ntemplate file.\n\nArgs:\ntemplate_name (str): Name of the template, with or without a file\nextension.\n\nReturns:\npathlib.Path: Path to the matching filename.", "source": "codesearchnet"}
{"code": "def swd_sync(self, pad=False):\n    if pad:\n        self._dll.JLINK_SWD_SyncBytes()\n    else:\n        self._dll.JLINK_SWD_SyncBits()\n    return None", "docstring": "Causes a flush to write all data remaining in output buffers to SWD\ndevice.\n\nArgs:\nself (JLink): the ``JLink`` instance\npad (bool): ``True`` if should pad the data to full byte size\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def set_viewbox(self, x, y, w, h):\n        \n        self.attributes['viewBox'] = \"%s %s %s %s\" % (x, y, w, h)\n        self.attributes['preserveAspectRatio'] = 'none'", "docstring": "Sets the origin and size of the viewbox, describing a virtual view area.\n\nArgs:\nx (int): x coordinate of the viewbox origin\ny (int): y coordinate of the viewbox origin\nw (int): width of the viewbox\nh (int): height of the viewbox", "source": "juraj-google-style"}
{"code": "def dropout(x, keep_prob, noise_shape=None, name=None):\n  \n  noise_shape = convert_to_shape(noise_shape)\n  if noise_shape is None:\n    noise_shape = x.shape\n  with tf.variable_scope(name, default_name=\"dropout\"):\n    if keep_prob == 1.0:\n      return x\n    noise = cast(less(random_uniform(\n        x.mesh, noise_shape, dtype=x.dtype), keep_prob), x.dtype)\n    noise /= keep_prob\n    return x * noise", "docstring": "Dropout layer.\n\nArgs:\nx: a Tensor\nkeep_prob: a float between 0.0 and 1.0\nnoise_shape: an optional Shape (a subset of x.shape)\nname: an optional string\n\nReturns:\na Tensor", "source": "juraj-google-style"}
{"code": "def create_chapter_from_url(self, url, title=None):\n    try:\n        request_object = requests.get(url, headers=self.request_headers, allow_redirects=False)\n    except (requests.exceptions.MissingSchema, requests.exceptions.ConnectionError):\n        raise ValueError(('%s is an invalid url or no network connection' % url))\n    except requests.exceptions.SSLError:\n        raise ValueError((\"Url %s doesn't have valid SSL certificate\" % url))\n    unicode_string = request_object.text\n    return self.create_chapter_from_string(unicode_string, url, title)", "docstring": "Creates a Chapter object from a url. Pulls the webpage from the\ngiven url, sanitizes it using the clean_function method, and saves\nit as the content of the created chapter. Basic webpage loaded\nbefore any javascript executed.\n\nArgs:\nurl (string): The url to pull the content of the created Chapter\nfrom\ntitle (Option[string]): The title of the created Chapter. By\ndefault, this is None, in which case the title will try to be\ninferred from the webpage at the url.\n\nReturns:\nChapter: A chapter object whose content is the webpage at the given\nurl and whose title is that provided or inferred from the url\n\nRaises:\nValueError: Raised if unable to connect to url supplied", "source": "codesearchnet"}
{"code": "def _verify_and_get_subgroup_size(self, group_assignment, num_shards):\n    if not group_assignment:\n        return None\n    if not (isinstance(group_assignment, list) and all((isinstance(i, list) for i in group_assignment))):\n        raise ValueError(f'Argument `group_assignment` must be a list of lists. Received: {group_assignment}')\n    replica_ids = set()\n    for g in group_assignment:\n        for i in g:\n            replica_ids.add(i)\n    if set(range(num_shards)) != replica_ids:\n        raise ValueError(f'Argument `group_assignment` must be a permutation of range({num_shards}). Received: {group_assignment}')\n    subgroup_size_list = [len(group) for group in group_assignment]\n    if all((subgroup_size_list[0] == size for size in subgroup_size_list)):\n        return subgroup_size_list[0]\n    else:\n        raise ValueError(f'The size of each subgroup in `group_assignment` must be equal. Received: {group_assignment}')", "docstring": "Verify group_assignment and get the subgroup size\".\n\nArgs:\ngroup_assignment: list of group ids for applying the optimizer\nto subgroups.\nnum_shards: The number of TPU shards.\n\nReturns:\nThe size of one subgroup in group_assignment.\n\nRaises:\nValueError: If group_assignment is invalid.", "source": "github-repos"}
{"code": "def mtr_tr_dense(sz):\n  \n  n = 2 ** sz\n  hparams = mtf_bitransformer_base()\n  hparams.d_model = 1024\n  hparams.max_length = 256\n  hparams.batch_size = 128\n  hparams.d_ff = int(4096 * n)\n  hparams.d_kv = 128\n  hparams.encoder_num_heads = int(8 * n)\n  hparams.decoder_num_heads = int(8 * n)\n  \n  hparams.learning_rate_decay_steps = 51400\n  hparams.layout = \"batch:batch;vocab:model;d_ff:model;heads:model\"\n  hparams.mesh_shape = \"batch:32\"\n  hparams.label_smoothing = 0.1\n  hparams.layer_prepostprocess_dropout = 0.1\n  hparams.attention_dropout = 0.1\n  hparams.relu_dropout = 0.1\n  return hparams", "docstring": "Series of machine translation models.\n\nAll models are trained on sequences of 256 tokens.\n\nYou can use the dataset translate_enfr_wmt32k_packed.\n154000 steps = 3 epochs.\n\nArgs:\nsz: an integer\n\nReturns:\na hparams", "source": "juraj-google-style"}
{"code": "def sample_id(self, lon):\n        \n        if self.grid == 'WAC':\n            sample = np.rint(float(self.SAMPLE_PROJECTION_OFFSET) + 1.0 +\n                             (lon * np.pi / 180.0 - float(self.CENTER_LONGITUDE)) *\n                             self.A_AXIS_RADIUS *\n                             np.cos(self.CENTER_LATITUDE * np.pi / 180.0)\n                             / (self.MAP_SCALE * 1e-3))\n        else:\n            sample = np.rint(float(self.SAMPLE_PROJECTION_OFFSET) + float(self.MAP_RESOLUTION)\n                             * (lon - float(self.CENTER_LONGITUDE))) + 1\n        return self._control_sample(sample)", "docstring": "Return the corresponding sample\n\nArgs:\nlon (int): longidute in degree\n\nReturns:\nCorreponding sample", "source": "juraj-google-style"}
{"code": "def shutdown(cluster_info, queues=['input']):\n  \n  def _shutdown(iter):\n    host = util.get_ip_address()\n    executor_id = util.read_executor_id()\n\n    \n    mgr = _get_manager(cluster_info, host, executor_id)\n\n    \n    for node in cluster_info:\n      if node['host'] == host and node['executor_id'] == executor_id:\n        tb_pid = node['tb_pid']\n        if tb_pid != 0:\n          logging.info(\"Stopping tensorboard (pid={0})\".format(tb_pid))\n          subprocess.Popen([\"kill\", str(tb_pid)])\n\n    \n    logging.info(\"Stopping all queues\")\n    for q in queues:\n      try:\n        queue = mgr.get_queue(q)\n        logging.info(\"Feeding None into {0} queue\".format(q))\n        queue.put(None, block=True)\n      except (AttributeError, KeyError):\n        msg = \"Queue '{}' not found on this node, check for exceptions on other nodes.\".format(q)\n        raise Exception(msg)\n\n    logging.info(\"Setting mgr.state to 'stopped'\")\n    mgr.set('state', 'stopped')\n    return [True]\n\n  return _shutdown", "docstring": "Stops all TensorFlow nodes by feeding ``None`` into the multiprocessing.Queues.\n\nArgs:\n:cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc).\n:queues: *INTERNAL_USE*\n\nReturns:\nA nodeRDD.mapPartitions() function", "source": "juraj-google-style"}
{"code": "def from_string(cls, key, key_id=None):\n    key = _helpers.from_bytes(key)\n    (marker_id, key_bytes) = pem.readPemBlocksFromFile(six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)\n    if (marker_id == 0):\n        private_key = rsa.key.PrivateKey.load_pkcs1(key_bytes, format='DER')\n    elif (marker_id == 1):\n        (key_info, remaining) = decoder.decode(key_bytes, asn1Spec=_PKCS8_SPEC)\n        if (remaining != b''):\n            raise ValueError('Unused bytes', remaining)\n        private_key_info = key_info.getComponentByName('privateKey')\n        private_key = rsa.key.PrivateKey.load_pkcs1(private_key_info.asOctets(), format='DER')\n    else:\n        raise ValueError('No key could be detected.')\n    return cls(private_key, key_id=key_id)", "docstring": "Construct an Signer instance from a private key in PEM format.\n\nArgs:\nkey (str): Private key in PEM format.\nkey_id (str): An optional key id used to identify the private key.\n\nReturns:\ngoogle.auth.crypt.Signer: The constructed signer.\n\nRaises:\nValueError: If the key cannot be parsed as PKCS#1 or PKCS#8 in\nPEM format.", "source": "codesearchnet"}
{"code": "def AsCode(self, indent_per_depth=2):\n    indent = ' ' * indent_per_depth * self.depth\n    tokens_str = ' '.join((tok.value for tok in self._tokens))\n    return indent + tokens_str", "docstring": "Return a \"code\" representation of this line.\n\nThe code representation shows how the line would be printed out as code.\n\nTODO(eliben): for now this is rudimentary for debugging - once we add\nformatting capabilities, this method will have other uses (not all tokens\nhave spaces around them, for example).\n\nArguments:\nindent_per_depth: how much spaces to indent per depth level.\n\nReturns:\nA string representing the line as code.", "source": "github-repos"}
{"code": "def real(input, name=None):\n    with ops.name_scope(name, 'Real', [input]) as name:\n        input = ops.convert_to_tensor(input, name='input')\n        if input.dtype.is_complex:\n            real_dtype = input.dtype.real_dtype\n            return gen_math_ops.real(input, Tout=real_dtype, name=name)\n        elif input.dtype.is_numeric:\n            return input\n        else:\n            raise TypeError('input must be a numeric tensor, but got tensor with dtype {}'.format(input.dtype))", "docstring": "Returns the real part of a complex (or real) tensor.\n\nGiven a tensor `input`, this operation returns a tensor of type `float` that\nis the real part of each element in `input` considered as a complex number.\n\nFor example:\n\n```python\nx = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])\ntf.math.real(x)  # [-2.25, 3.25]\n```\n\nIf `input` is already real, it is returned unchanged.\n\nArgs:\ninput: A `Tensor`. Must have numeric type.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` of type `float32` or `float64`.", "source": "github-repos"}
{"code": "def _gen_condition(cls, initial, new_public_keys):\n    try:\n        threshold = len(new_public_keys)\n    except TypeError:\n        threshold = None\n    if (isinstance(new_public_keys, list) and (len(new_public_keys) > 1)):\n        ffill = ThresholdSha256(threshold=threshold)\n        reduce(cls._gen_condition, new_public_keys, ffill)\n    elif (isinstance(new_public_keys, list) and (len(new_public_keys) <= 1)):\n        raise ValueError('Sublist cannot contain single owner')\n    else:\n        try:\n            new_public_keys = new_public_keys.pop()\n        except AttributeError:\n            pass\n        if isinstance(new_public_keys, Fulfillment):\n            ffill = new_public_keys\n        else:\n            ffill = Ed25519Sha256(public_key=base58.b58decode(new_public_keys))\n    initial.add_subfulfillment(ffill)\n    return initial", "docstring": "Generates ThresholdSha256 conditions from a list of new owners.\n\nNote:\nThis method is intended only to be used with a reduce function.\nFor a description on how to use this method, see\n:meth:`~.Output.generate`.\n\nArgs:\ninitial (:class:`cryptoconditions.ThresholdSha256`):\nA Condition representing the overall root.\nnew_public_keys (:obj:`list` of :obj:`str`|str): A list of new\nowners or a single new owner.\n\nReturns:\n:class:`cryptoconditions.ThresholdSha256`:", "source": "codesearchnet"}
{"code": "def ParseText(self, text, eof=True):\n    lines = []\n    if text:\n        lines = text.splitlines()\n    for line in lines:\n        self._CheckLine(line)\n        if (self._cur_state_name in ('End', 'EOF')):\n            break\n    if ((self._cur_state_name != 'End') and ('EOF' not in self.states) and eof):\n        self._AppendRecord()\n    return self._result", "docstring": "Passes CLI output through FSM and returns list of tuples.\n\nFirst tuple is the header, every subsequent tuple is a row.\n\nArgs:\ntext: (str), Text to parse with embedded newlines.\neof: (boolean), Set to False if we are parsing only part of the file.\nSuppresses triggering EOF state.\n\nRaises:\nTextFSMError: An error occurred within the FSM.\n\nReturns:\nList of Lists.", "source": "codesearchnet"}
{"code": "def outer(vector1, vector2=None):\n    if (vector2 is None):\n        vector2 = np.array(vector1).conj()\n    else:\n        vector2 = np.array(vector2).conj()\n    return np.outer(vector1, vector2)", "docstring": "Construct the outer product of two vectors.\n\nThe second vector argument is optional, if absent the projector\nof the first vector will be returned.\n\nArgs:\nvector1 (ndarray): the first vector.\nvector2 (ndarray): the (optional) second vector.\n\nReturns:\nnp.array: The matrix |v1><v2|.", "source": "codesearchnet"}
{"code": "def truncate(text, length=255):\n        \n\n        lines = []\n        i = 0\n        while i < len(text) - 1:\n            try:\n                lines.append(text[i:i+length])\n                i += length\n\n            except IndexError as e:\n                lines.append(text[i:])\n        return lines", "docstring": "Splits the message into a list of strings of of length `length`\n\nArgs:\ntext (str): The text to be divided\nlength (int, optional): The length of the chunks of text. \\\nDefaults to 255.\n\nReturns:\nlist: Text divided into chunks of length `length`", "source": "juraj-google-style"}
{"code": "def close(self, suppress_warning: bool = False) -> None:\n\t\t\n\t\tif self._file is None:\n\t\t\tif not suppress_warning:\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tlogging.warn(\"Connection to %s is already closed\", self.filename)\n\t\telse:\n\t\t\tself._file.close()\n\t\t\tself._file = None\n\t\tself.layers = None  \n\t\tself.ra = None  \n\t\tself.row_attrs = None  \n\t\tself.ca = None  \n\t\tself.col_attrs = None  \n\t\tself.row_graphs = None  \n\t\tself.col_graphs = None  \n\t\tself.shape = (0, 0)\n\t\tself._closed = True", "docstring": "Close the connection. After this, the connection object becomes invalid. Warns user if called after closing.\n\nArgs:\nsuppress_warning:\t\tSuppresses warning message if True (defaults to false)", "source": "juraj-google-style"}
{"code": "def ProduceExtractionWarning(self, message, path_spec=None):\n    if (not self._storage_writer):\n        raise RuntimeError('Storage writer not set.')\n    if ((not path_spec) and self._file_entry):\n        path_spec = self._file_entry.path_spec\n    parser_chain = self.GetParserChain()\n    warning = warnings.ExtractionWarning(message=message, parser_chain=parser_chain, path_spec=path_spec)\n    self._storage_writer.AddWarning(warning)\n    self._number_of_warnings += 1\n    self.last_activity_timestamp = time.time()", "docstring": "Produces an extraction warning.\n\nArgs:\nmessage (str): message of the warning.\npath_spec (Optional[dfvfs.PathSpec]): path specification, where None\nwill use the path specification of current file entry set in\nthe mediator.\n\nRaises:\nRuntimeError: when storage writer is not set.", "source": "codesearchnet"}
{"code": "def from_timestamp_pb(cls, stamp):\n        \n        microseconds = int(stamp.seconds * 1e6)\n        bare = from_microseconds(microseconds)\n        return cls(\n            bare.year,\n            bare.month,\n            bare.day,\n            bare.hour,\n            bare.minute,\n            bare.second,\n            nanosecond=stamp.nanos,\n            tzinfo=pytz.UTC,\n        )", "docstring": "Parse RFC 3339-compliant timestamp, preserving nanoseconds.\n\nArgs:\nstamp (:class:`~google.protobuf.timestamp_pb2.Timestamp`): timestamp message\n\nReturns:\n:class:`DatetimeWithNanoseconds`:\nan instance matching the timestamp message", "source": "juraj-google-style"}
{"code": "def exists(self, path=None, client_kwargs=None, assume_exists=None):\n    try:\n        self.head(path, client_kwargs)\n    except ObjectNotFoundError:\n        return False\n    except ObjectPermissionError:\n        if (assume_exists is None):\n            raise\n        return assume_exists\n    return True", "docstring": "Return True if path refers to an existing path.\n\nArgs:\npath (str): Path or URL.\nclient_kwargs (dict): Client arguments.\nassume_exists (bool or None): This value define the value to return\nin the case there is no enough permission to determinate the\nexisting status of the file. If set to None, the permission\nexception is reraised (Default behavior). if set to True or\nFalse, return this value.\n\nReturns:\nbool: True if exists.", "source": "codesearchnet"}
{"code": "def _maybe_init_tags(self, run_id, tag_to_metadata):\n    \n    cursor = self._db.cursor()\n    \n    \n    cursor.execute('SELECT tag_name, tag_id FROM Tags WHERE run_id = ?',\n                   (run_id,))\n    tag_to_id = {row[0]: row[1] for row in cursor.fetchall()\n                 if row[0] in tag_to_metadata}\n    new_tag_data = []\n    for tag, metadata in six.iteritems(tag_to_metadata):\n      if tag not in tag_to_id:\n        tag_id = self._create_id()\n        tag_to_id[tag] = tag_id\n        new_tag_data.append((run_id, tag_id, tag, time.time(),\n                             metadata.display_name,\n                             metadata.plugin_data.plugin_name,\n                             self._make_blob(metadata.plugin_data.content)))\n    cursor.executemany(\n        ,\n        new_tag_data)\n    return tag_to_id", "docstring": "Returns a tag-to-ID map for the given tags, creating rows if needed.\n\nArgs:\nrun_id: the ID of the run to which these tags belong.\ntag_to_metadata: map of tag name to SummaryMetadata for the tag.", "source": "juraj-google-style"}
{"code": "def run_in_v1_v2(device_to_use: Optional[str]=None, assert_no_eager_garbage: bool=False) -> Callable[[Callable[..., Any]], Callable[..., None]]:\n\n    def decorator(f: Callable[..., Any]) -> Callable[..., None]:\n        decorator_tag = 'wrapped_with_v1_v2_decorator'\n        if hasattr(f, decorator_tag):\n            return f\n\n        def decorated(self: 'TensorFlowTestCase', *args, **kwargs) -> None:\n            logging.info('Running %s in V1 mode.', f.__name__)\n            try:\n                with self.subTest('V1_mode'):\n                    v2_compat.disable_v2_behavior()\n                    f(self, *args, **kwargs)\n            except unittest.case.SkipTest:\n                pass\n\n            def run_v2(self: 'TensorFlowTestCase', **kwargs) -> None:\n                logging.info('Running %s in V2 mode.', f.__name__)\n                if device_to_use:\n                    with ops.device(device_to_use):\n                        f(self, *args, **kwargs)\n                else:\n                    f(self, *args, **kwargs)\n            if assert_no_eager_garbage:\n                ops.reset_default_graph()\n                run_v2 = assert_no_new_tensors(assert_no_garbage_created(run_v2))\n            self.tearDown()\n            self._tempdir = None\n            ops.reset_default_graph()\n            v2_compat.enable_v2_behavior()\n            with self.subTest('V2_mode'):\n                self.setUp()\n                run_v2(self, **kwargs)\n        tf_decorated = tf_decorator.make_decorator(f, decorated)\n        tf_decorated.__dict__[decorator_tag] = True\n        return tf_decorated\n    return decorator", "docstring": "Execute the decorated test in v1 and v2 modes.\n\nThe overall execution is similar to that of `run_in_graph_and_eager_mode`.\n\nArgs:\ndevice_to_use: A string in the following format: \"/device:CPU:0\".\nassert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage\ncollector and asserts that no extra garbage has been created when running\nthe test with eager execution enabled. This will fail if there are\nreference cycles (e.g. a = []; a.append(a)). Off by default because some\ntests may create garbage for legitimate reasons (e.g. they define a class\nwhich inherits from `object`), and because DEBUG_SAVEALL is sticky in some\nPython interpreters (meaning that tests which rely on objects being\ncollected elsewhere in the unit test file will not work). Additionally,\nchecks that nothing still has a reference to Tensors that the test\nallocated.\n\nReturns:\nA decorator that runs a given test in v1 and v2 modes.", "source": "github-repos"}
{"code": "def symlink(self, link_target, path, dir_fd=None):\n    link_target = self._path_with_dir_fd(link_target, self.symlink, dir_fd)\n    self.filesystem.create_symlink(path, link_target, create_missing_dirs=False)", "docstring": "Creates the specified symlink, pointed at the specified link target.\n\nArgs:\nlink_target: The target of the symlink.\npath: Path to the symlink to create.\ndir_fd: If not `None`, the file descriptor of a directory,\nwith `link_target` being relative to this directory.\nNew in Python 3.3.\n\nRaises:\nOSError:  if the file already exists.", "source": "codesearchnet"}
{"code": "def convert_saved_model_v1(saved_model_path, exported_names, tags, lift_variables, include_variables_in_initializers, upgrade_legacy=True, show_debug_info=False):\n    return pywrap_mlir.experimental_convert_saved_model_v1_to_mlir(saved_model_path, exported_names, tags, lift_variables, include_variables_in_initializers, upgrade_legacy, show_debug_info)", "docstring": "Converts a v1 SavedModel to MLIR module.\n\nArgs:\nsaved_model_path: Path to SavedModel.\nexported_names: Names to export.\ntags: MetaGraphDef to be loaded is identified by the supplied tags.\nlift_variables: Whether to promote tf.VarHandleOp to resource arguments.\ninclude_variables_in_initializers: Keeps the variables in initializers\nbefore lifting variables.\nupgrade_legacy: Functionalize the input graph before importing.\nshow_debug_info: Whether to include locations in the emitted textual form.\n\nReturns:\nA textual representation of the MLIR module corresponding to the\nSavedModule.", "source": "github-repos"}
{"code": "def _find_extraneous_saver_nodes(graph_def, saver_def):\n    nodes = {node_def.name: (set((tensor.get_op_name(x) for x in node_def.input)), node_def.op) for node_def in graph_def.node}\n    retain_scope_save = None\n    retain_scope_restore = None\n    if saver_def is not None:\n        save_op_name = tensor.get_op_name(saver_def.save_tensor_name)\n        restore_op_name = tensor.get_op_name(saver_def.restore_op_name)\n        retain_scope_restore = _get_scope(restore_op_name) + '/'\n        retain_scope_save = _get_scope(save_op_name) + '/'\n    all_saver_node_names = set((name for name, (_, op) in nodes.items() if op in SAVE_AND_RESTORE_OPS))\n    all_saver_scopes = set((_get_scope(x) for x in all_saver_node_names)) - all_saver_node_names\n    all_saver_scopes = set((x + '/' for x in all_saver_scopes))\n    extraneous_scopes = all_saver_scopes - set([retain_scope_save, retain_scope_restore])\n    extraneous_node_names = set()\n    for name, _ in nodes.items():\n        for extraneous_scope in extraneous_scopes:\n            if name.startswith(extraneous_scope):\n                extraneous_node_names.add(name)\n                break\n    return extraneous_node_names", "docstring": "Identifies any nodes in the graph_def related to unused Savers.\n\nThis approach assumes that each Saver is cleanly isolated in its own name\nscope, so we need only identify the scopes associated with extraneous Savers\nand return all the nodes in those scopes.\n\nArgs:\ngraph_def: a GraphDef proto to evaluate.\nsaver_def: a SaverDef proto referencing Save/Restore ops to be retained.\nReturns:\nAn iterable of node names that may be safely omitted.", "source": "github-repos"}
{"code": "def get_data_for_sensors(macs=[], search_duratio_sec=5, bt_device=''):\n        \n\n        log.info('Get latest data for sensors. Stop with Ctrl+C.')\n        log.info('Stops automatically in %ss', search_duratio_sec)\n        log.info('MACs: %s', macs)\n\n        datas = dict()\n\n        for new_data in RuuviTagSensor._get_ruuvitag_datas(macs, search_duratio_sec, bt_device=bt_device):\n            datas[new_data[0]] = new_data[1]\n\n        return datas", "docstring": "Get lates data for sensors in the MAC's list.\n\nArgs:\nmacs (array): MAC addresses\nsearch_duratio_sec (int): Search duration in seconds. Default 5\nbt_device (string): Bluetooth device id\nReturns:\ndict: MAC and state of found sensors", "source": "juraj-google-style"}
{"code": "def discover(names=None, pattern=['*.py'], skip='efp', dry_run=False, blacklist=None, name_greps=None,\n             manual_reset=False, delete_history=False, max_devices=0,\n             continue_from=None, result_file='./result.json', auto_reboot=False, keep_explorer=False,\n             add_all_devices=False):\n    \n    if not os.path.exists(settings.OUTPUT_PATH):\n        os.mkdir(settings.OUTPUT_PATH)\n\n    if delete_history:\n        os.system('del history.json')\n\n    if blacklist:\n        try:\n            excludes = [line.strip('\\n') for line in open(blacklist, 'r').readlines()\n                        if not line.startswith('\n        except:\n            logger.exception('Failed to open test case black list file')\n            raise\n    else:\n        excludes = []\n\n    log = None\n    if os.path.isfile(result_file):\n        try:\n            log = json.load(open(result_file, 'r'))\n        except:\n            logger.exception('Failed to open result file')\n\n    if not log:\n        log = {}\n        json.dump(log, open(result_file, 'w'), indent=2)\n\n    suite = unittest.TestSuite()\n    discovered = unittest.defaultTestLoader.discover('cases', pattern)\n\n    if names and continue_from:\n        names = names[names.index(continue_from):]\n\n    for s1 in discovered:\n        for s2 in s1:\n            for case in s2:\n                if case.__class__ is HarnessCase:\n                    continue\n                case_name = unicode(case.__class__.__name__)\n\n                \n                if name_greps and not any(fnmatch.fnmatch(case_name, name_grep) for name_grep in name_greps):\n                    logger.info('case[%s] skipped by name greps', case_name)\n                    continue\n\n                \n                if len(names) and case_name not in names:\n                    logger.info('case[%s] skipped', case_name)\n                    continue\n\n                \n                if case_name in log.keys():\n                    if (log[case_name]['passed'] and ('p' in skip)) \\\n                        or (log[case_name]['passed'] is False and ('f' in skip)) \\\n                        or (log[case_name]['passed'] is None and ('e' in skip)):\n                        logger.warning('case[%s] skipped for its status[%s]', case_name, log[case_name]['passed'])\n                        continue\n\n                \n                if continue_from:\n                    if continue_from != case_name:\n                        logger.warning('case[%s] skipped for continue from[%s]', case_name, continue_from)\n                        continue\n                    else:\n                        continue_from = None\n\n                \n                if case_name in excludes:\n                    logger.warning('case[%s] skipped for blacklist', case_name)\n                    continue\n\n                \n                if max_devices and case.golden_devices_required > max_devices:\n                    logger.warning('case[%s] skipped for exceeding max golden devices allowed[%d]', case_name, max_devices)\n                    continue\n\n                suite.addTest(case)\n                logger.info('case[%s] added', case_name)\n\n    if auto_reboot:\n        argv = []\n        argv.append('\"%s\"' % os.sep.join([os.getcwd(), 'start.bat']))\n        argv.extend(['-p', pattern])\n        argv.extend(['-k', skip])\n        argv.extend(['-o', result_file])\n        argv.append('-a')\n\n        if manual_reset:\n            argv.append('-m')\n\n        if delete_history:\n            argv.append('-d')\n\n        auto_reboot_args = argv + names\n    else:\n        auto_reboot_args = None\n        os.system('del \"%s\"' % RESUME_SCRIPT_PATH)\n\n    \n    if manual_reset:\n        settings.PDU_CONTROLLER_TYPE = 'MANUAL_PDU_CONTROLLER'\n        settings.PDU_CONTROLLER_OPEN_PARAMS = {}\n        settings.PDU_CONTROLLER_REBOOT_PARAMS = {}\n\n    result = SimpleTestResult(result_file, auto_reboot_args, keep_explorer, add_all_devices)\n    for case in suite:\n        logger.info(case.__class__.__name__)\n\n    if dry_run:\n        return\n\n    suite.run(result)\n    return result", "docstring": "Discover all test cases and skip those passed\n\nArgs:\npattern (str): Pattern to match case modules, refer python's unittest\ndocumentation for more details\nskip (str): types cases to skip", "source": "juraj-google-style"}
{"code": "def wait_at_barrier(self, barrier_id, timeout_in_ms):\n    ensure_initialized()\n    pywrap_tfe.TFE_WaitAtBarrier(self._context_handle, barrier_id, timeout_in_ms)", "docstring": "Blocks until all coordinated tasks are at the barrier.\n\nThe barrier may fail if it times out or if one of the tasks is unhealthy.\n\nArgs:\nbarrier_id: Unique string identifying the barrier.\ntimeout_in_ms: Duration before the barrier times out and fails.", "source": "github-repos"}
{"code": "def enableSync(self, url, definition=None):\n    adminFS = AdminFeatureService(url=url, securityHandler=self._securityHandler)\n    cap = str(adminFS.capabilities)\n    existingDef = {}\n    enableResults = 'skipped'\n    if ('Sync' in cap):\n        return 'Sync is already enabled'\n    else:\n        capItems = cap.split(',')\n        capItems.append('Sync')\n        existingDef['capabilities'] = ','.join(capItems)\n        enableResults = adminFS.updateDefinition(json_dict=existingDef)\n        if ('error' in enableResults):\n            return enableResults['error']\n    adminFS = None\n    del adminFS\n    return enableResults", "docstring": "Enables Sync capability for an AGOL feature service.\n\nArgs:\nurl (str): The URL of the feature service.\ndefinition (dict): A dictionary containing valid definition values. Defaults to ``None``.\nReturns:\ndict: The result from :py:func:`arcrest.hostedservice.service.AdminFeatureService.updateDefinition`.", "source": "codesearchnet"}
{"code": "def _get(self, url, params=None):\n        \n        if not params:\n            params = {}\n\n        params.update({'login': self.login, 'key': self.key})\n\n        response_json = requests.get(self.api_url + url, params).json()\n\n        return self._process_response(response_json)", "docstring": "Used by every other method, it makes a GET request with the given params.\n\nArgs:\nurl (str): relative path of a specific service (account_info, ...).\nparams (:obj:`dict`, optional): contains parameters to be sent in the GET request.\n\nReturns:\ndict: results of the response of the GET request.", "source": "juraj-google-style"}
{"code": "def __init__(self, tcex):\n        \n        self._tcex = tcex\n        self._is_organization = False\n        self._notification_type = None\n        self._recipients = None\n        self._priority = 'Low'", "docstring": "Initialize the Class properties.\n\nArgs:\ntcex (obj): An instance of TcEx object.", "source": "juraj-google-style"}
{"code": "def find_nearest_color_hexstr(hexdigits, color_table=None, method='euclid'):\n    \n    triplet = []\n    try:\n        if len(hexdigits) == 3:\n            for digit in hexdigits:\n                digit = int(digit, 16)\n                triplet.append((digit * 16) + digit)\n        elif len(hexdigits) == 6:\n            triplet.extend(int(hexdigits[i:i+2], 16) for i in (0, 2, 4))\n        else:\n            raise ValueError('wrong length: %r' % hexdigits)\n    except ValueError:\n        return None\n\n    return find_nearest_color_index(*triplet,\n                                    color_table=color_table,\n                                    method=method)", "docstring": "Given a three or six-character hex digit string, return the nearest\ncolor index.\n\nArguments:\nhexdigits:  a three/6 digit hex string, e.g. 'b0b', '123456'\n\nReturns:\nint, None: index, or None on error.", "source": "juraj-google-style"}
{"code": "def from_values_indices(cls, values, indices, populate=False, structure=None, voigt_rank=None, vsym=True, verbose=False):\n    indices = np.array(indices)\n    if voigt_rank:\n        shape = (([3] * (voigt_rank % 2)) + ([6] * (voigt_rank \n    else:\n        shape = (np.ceil((np.max((indices + 1), axis=0) / 3.0)) * 3)\n    base = np.zeros(shape.astype(int))\n    for (v, idx) in zip(values, indices):\n        base[tuple(idx)] = v\n    if (6 in shape):\n        obj = cls.from_voigt(base)\n    else:\n        obj = cls(base)\n    if populate:\n        assert structure, 'Populate option must include structure input'\n        obj = obj.populate(structure, vsym=vsym, verbose=verbose)\n    elif structure:\n        obj = obj.fit_to_structure(structure)\n    return obj", "docstring": "Creates a tensor from values and indices, with options\nfor populating the remainder of the tensor.\n\nArgs:\nvalues (floats): numbers to place at indices\nindices (array-likes): indices to place values at\npopulate (bool): whether to populate the tensor\nstructure (Structure): structure to base population\nor fit_to_structure on\nvoigt_rank (int): full tensor rank to indicate the\nshape of the resulting tensor.  This is necessary\nif one provides a set of indices more minimal than\nthe shape of the tensor they want, e.g.\nTensor.from_values_indices((0, 0), 100)\nvsym (bool): whether to voigt symmetrize during the\noptimization procedure\nverbose (bool): whether to populate verbosely", "source": "codesearchnet"}
{"code": "def env_valid(env):\n  \n  if env not in EFConfig.ENV_LIST:\n    raise ValueError(\"unknown env: {}; env must be one of: \".format(env) + \", \".join(EFConfig.ENV_LIST))\n  return True", "docstring": "Given an env, determine if it's valid\nArgs:\nenv: the env to check\nReturns:\nTrue if the env is valid\nRaises:\nValueError with message if the env is not valid", "source": "juraj-google-style"}
{"code": "def proj_path(*path_parts):\n    \n    \n    path_parts = path_parts or ['.']\n\n    \n    if not os.path.isabs(path_parts[0]):\n        proj_path = _find_proj_root()\n\n        if proj_path is not None:\n            path_parts = [proj_path] + list(path_parts)\n\n    return os.path.normpath(os.path.join(*path_parts))", "docstring": "Return absolute path to the repo dir (root project directory).\n\nArgs:\npath (str):\nThe path relative to the project root (pelconf.yaml).\n\nReturns:\nstr: The given path converted to an absolute path.", "source": "juraj-google-style"}
{"code": "def fix_report(self, report, errors='drop', prefer='before'):\n    if (not isinstance(report, SignedListReport)):\n        raise ArgumentError('Report must be a SignedListReport', report=report)\n    if (errors not in ('drop',)):\n        raise ArgumentError(\"Unknown errors handler: {}, supported=['drop']\".format(errors))\n    self.ensure_prepared()\n    fixed_readings = []\n    dropped_readings = 0\n    for reading in report.visible_readings:\n        assignment = self.assign_utc(reading.reading_id, reading.raw_time, prefer=prefer)\n        if (assignment is None):\n            dropped_readings += 1\n            continue\n        fixed_reading = IOTileReading(assignment.rtc_value, reading.stream, reading.value, reading_time=assignment.utc, reading_id=reading.reading_id)\n        fixed_readings.append(fixed_reading)\n    fixed_report = SignedListReport.FromReadings(report.origin, fixed_readings, report_id=report.report_id, selector=report.streamer_selector, streamer=report.origin_streamer, sent_timestamp=report.sent_timestamp)\n    fixed_report.received_time = report.received_time\n    if (dropped_readings > 0):\n        self._logger.warning('Dropped %d readings of %d when fixing UTC timestamps in report 0x%08X for device 0x%08X', dropped_readings, len(report.visible_readings), report.report_id, report.origin)\n    return fixed_report", "docstring": "Perform utc assignment on all readings in a report.\n\nThe returned report will have all reading timestamps in UTC. This only\nworks on SignedListReport objects.  Note that the report should\ntypically have previously been added to the UTC assigner using\nadd_report or no reference points from the report will be used.\n\nArgs:\nreport (SignedListReport): The report that we should fix.\nerrors (str): The behavior that we should have when we can't\nfix a given reading.  The only currently support behavior is\ndrop, which means that the reading will be dropped and not\nincluded in the new report.\nprefer (str): Whether to prefer fixing readings by looking for\nreference points after the reading or before, all other things\nbeing equal.  See the description of ``assign_utc``.\n\nReturns:\nSignedListReport: The report with UTC timestamps.", "source": "codesearchnet"}
{"code": "def render_html(root, options=0, extensions=None):\n    \n    if extensions is None:\n        extensions = _cmark.ffi.NULL\n\n    raw_result = _cmark.lib.cmark_render_html(\n        root, options, extensions)\n\n    return _cmark.ffi.string(raw_result).decode('utf-8')", "docstring": "Render a given syntax tree as HTML.\n\nArgs:\nroot (Any): The reference to the root node of the syntax tree.\noptions (int): The cmark options.\nextensions (Any): The reference to the syntax extensions, generally\nfrom :func:`parser_get_syntax_extensions`\n\nReturns:\nstr: The rendered HTML.", "source": "juraj-google-style"}
{"code": "def _find_metric_value(session_or_group, metric_name):\n  \n  \n  \n  \n  \n  for metric_value in session_or_group.metric_values:\n    if (metric_value.name.tag == metric_name.tag and\n        metric_value.name.group == metric_name.group):\n      return metric_value", "docstring": "Returns the metric_value for a given metric in a session or session group.\n\nArgs:\nsession_or_group: A Session protobuffer or SessionGroup protobuffer.\nmetric_name: A MetricName protobuffer. The metric to search for.\nReturns:\nA MetricValue protobuffer representing the value of the given metric or\nNone if no such metric was found in session_or_group.", "source": "juraj-google-style"}
{"code": "def retry(self, **kwargs):\n        \n        path = '%s/%s/retry' % (self.manager.path, self.get_id())\n        self.manager.gitlab.http_post(path)", "docstring": "Retry the job.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabJobRetryError: If the job could not be retried", "source": "juraj-google-style"}
{"code": "def __init__(self, agent, environment, repeat_actions=1, history=None, id_=0):\n        \n        super(ParallelRunner, self).__init__(agent, environment, repeat_actions, history)\n\n        self.id = id_  \n        self.current_timestep = None  \n        self.episode_actions = []\n        self.num_parallel = self.agent.execution['num_parallel']\n        print('ParallelRunner with {} parallel buffers.'.format(self.num_parallel))", "docstring": "Initialize a single Runner object (one Agent/one Environment).\n\nArgs:\nid_ (int): The ID of this Runner (for distributed TF runs).", "source": "juraj-google-style"}
{"code": "def setDirname(self, dirname):\n        \n        \n        \n        sep = utils._getPathSep(dirname)\n        if not dirname.endswith(sep):\n            dirname += sep\n\n        self._dir = utils.asString(dirname)", "docstring": "Set a new directory name for the sequence.\n\nArgs:\ndirname (str): the new directory name", "source": "juraj-google-style"}
{"code": "def slice_batch_indices(indices):\n    num_in_full_batch = num_full_batches * batch_size\n    first_k_indices = array_ops.slice(indices, [0], [num_in_full_batch])\n    first_k_indices = array_ops.reshape(first_k_indices, [num_full_batches, batch_size])\n    flat_dataset = dataset_ops.DatasetV2.from_tensor_slices(first_k_indices)\n    if self._partial_batch_size:\n        index_remainder = dataset_ops.DatasetV2.from_tensors(array_ops.slice(indices, [num_in_full_batch], [self._partial_batch_size]))\n        flat_dataset = flat_dataset.concatenate(index_remainder)\n    if shuffle == 'batch':\n        flat_dataset = flat_dataset.shuffle(1024).repeat(epochs)\n    return flat_dataset", "docstring": "Convert a Tensor of indices into a dataset of batched indices.\n\nThis step can be accomplished in several ways. The most natural is to\nslice the Tensor in a Dataset map. (With a condition on the upper index to\nhandle the partial batch.) However it turns out that coercing the Tensor\ninto a shape which is divisible by the batch size (and handling the last\npartial batch separately) allows for a much more favorable memory access\npattern and improved performance.\n\nArgs:\nindices: Tensor which determines the data order for an entire epoch.\n\nReturns:\nA Dataset of batched indices.", "source": "github-repos"}
{"code": "def WriteEventBody(self, event):\n    \n    inode = getattr(event, 'inode', None)\n    if inode is None:\n      event.inode = 0\n\n    try:\n      message, _ = self._output_mediator.GetFormattedMessages(event)\n    except errors.WrongFormatter:\n      message = None\n\n    if message:\n      event.message = message\n\n    json_dict = self._JSON_SERIALIZER.WriteSerializedDict(event)\n    json_string = json.dumps(json_dict, sort_keys=True)\n    \n    if py2to3.PY_2:\n      json_string = codecs.decode(json_string, 'ascii')\n    self._output_writer.Write(json_string)\n    self._output_writer.Write('\\n')", "docstring": "Writes the body of an event object to the output.\n\nArgs:\nevent (EventObject): event.", "source": "juraj-google-style"}
{"code": "def getVariable(self, name):\n    return lock_and_call((lambda : Variable(self._impl.getVariable(name))), self._lock)", "docstring": "Get the variable with the corresponding name.\n\nArgs:\nname: Name of the variable to be found.\n\nRaises:\nTypeError: if the specified variable does not exist.", "source": "codesearchnet"}
{"code": "def convert_gemm(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting Linear ...')\n    if (names == 'short'):\n        tf_name = ('FC' + random_string(6))\n    elif (names == 'keep'):\n        tf_name = w_name\n    else:\n        tf_name = (w_name + str(random.random()))\n    bias_name = '{0}.bias'.format(w_name)\n    weights_name = '{0}.weight'.format(w_name)\n    W = weights[weights_name].numpy().transpose()\n    (input_channels, output_channels) = W.shape\n    keras_weights = [W]\n    has_bias = False\n    if (bias_name in weights):\n        bias = weights[bias_name].numpy()\n        keras_weights = [W, bias]\n        has_bias = True\n    dense = keras.layers.Dense(output_channels, weights=keras_weights, use_bias=has_bias, name=tf_name, bias_initializer='zeros', kernel_initializer='zeros')\n    layers[scope_name] = dense(layers[inputs[0]])", "docstring": "Convert Linear.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def getUserSid(username):\n    \n    if six.PY2:\n        username = _to_unicode(username)\n\n    domain = win32api.GetComputerName()\n    if username.find('\\\\') != -1:\n        domain = username.split('\\\\')[0]\n        username = username.split('\\\\')[-1]\n    domain = domain.upper()\n    return win32security.ConvertSidToStringSid(\n        win32security.LookupAccountName(None, domain + '\\\\' + username)[0])", "docstring": "Get the Security ID for the user\n\nArgs:\nusername (str): The user name for which to look up the SID\n\nReturns:\nstr: The user SID\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' user.getUserSid jsnuffy", "source": "juraj-google-style"}
{"code": "def imdb(limit=None, shuffle=True):\n    \n\n    movie_review_url = 'http:\n\n    \n    path = keras.utils.get_file(\n        'aclImdb.tar.gz', movie_review_url, extract=True)[:-7]\n\n    X_train, y_train = read_pos_neg_data(path, 'train', limit)\n    X_test, y_test = read_pos_neg_data(path, 'test', limit)\n\n    if shuffle:\n        X_train, y_train = sklearn.utils.shuffle(X_train, y_train)\n        X_test, y_test = sklearn.utils.shuffle(X_test, y_test)\n\n    return X_train, X_test, y_train, y_test", "docstring": "Downloads (and caches) IMDB Moview Reviews. 25k training data, 25k test data\n\nArgs:\nlimit: get only first N items for each class\n\nReturns:\n[X_train, y_train, X_test, y_test]", "source": "juraj-google-style"}
{"code": "def _run(self, data, store, signal, context, *, success_callback=None, stop_callback=None, abort_callback=None):\n    if (data is None):\n        data = MultiTaskData()\n        data.add_dataset(self._name)\n    try:\n        if (self._callback_init is not None):\n            self._callback_init(data, store, signal, context)\n        result = self.run(data, store, signal, context)\n        if (self._callback_finally is not None):\n            self._callback_finally(TaskStatus.Success, data, store, signal, context)\n        if (success_callback is not None):\n            success_callback()\n    except StopTask as err:\n        if (self._callback_finally is not None):\n            self._callback_finally(TaskStatus.Stopped, data, store, signal, context)\n        if (stop_callback is not None):\n            stop_callback(exc=err)\n        result = (Action(data, limit=[]) if err.skip_successors else None)\n    except AbortWorkflow as err:\n        if (self._callback_finally is not None):\n            self._callback_finally(TaskStatus.Aborted, data, store, signal, context)\n        if (abort_callback is not None):\n            abort_callback(exc=err)\n        result = None\n        signal.stop_workflow()\n    except:\n        if (self._callback_finally is not None):\n            self._callback_finally(TaskStatus.Error, data, store, signal, context)\n        signal.stop_workflow()\n        raise\n    if (result is None):\n        data.flatten(in_place=True)\n        data.add_task_history(self.name)\n        return Action(data)\n    else:\n        if (not isinstance(result, Action)):\n            raise TaskReturnActionInvalid()\n        result.data.flatten(in_place=True)\n        result.data.add_task_history(self.name)\n        return result", "docstring": "The internal run method that decorates the public run method.\n\nThis method makes sure data is being passed to and from the task.\n\nArgs:\ndata (MultiTaskData): The data object that has been passed from the\npredecessor task.\nstore (DataStoreDocument): The persistent data store object that allows the\ntask to store data for access across the current\nworkflow run.\nsignal (TaskSignal): The signal object for tasks. It wraps the construction\nand sending of signals into easy to use methods.\ncontext (TaskContext): The context in which the tasks runs.\nsuccess_callback: This function is called when the task completed successfully\nstop_callback: This function is called when a StopTask exception was raised.\nabort_callback: This function is called when an AbortWorkflow exception\nwas raised.\n\nRaises:\nTaskReturnActionInvalid: If the return value of the task is not\nan Action object.\n\nReturns:\nAction: An Action object containing the data that should be passed on\nto the next task and optionally a list of successor tasks that\nshould be executed.", "source": "codesearchnet"}
{"code": "def __readCommissioningLogs(self, durationInSeconds):\n        \n        self.logThreadStatus = self.logStatus['running']\n        logs = Queue()\n        t_end = time.time() + durationInSeconds\n        while time.time() < t_end:\n            time.sleep(0.3)\n\n            if self.logThreadStatus == self.logStatus['pauseReq']:\n                self.logThreadStatus = self.logStatus['paused']\n\n            if self.logThreadStatus != self.logStatus['running']:\n                continue\n\n            try:\n                line = self._readline()\n                if line:\n                    print line\n                    logs.put(line)\n\n                    if \"Join success\" in line:\n                        self.joinCommissionedStatus = self.joinStatus['succeed']\n                        break\n                    elif \"Join failed\" in line:\n                        self.joinCommissionedStatus = self.joinStatus['failed']\n                        break\n\n            except Exception:\n                pass\n\n        self.logThreadStatus = self.logStatus['stop']\n        return logs", "docstring": "read logs during the commissioning process\n\nArgs:\ndurationInSeconds: time duration for reading commissioning logs\n\nReturns:\nCommissioning logs", "source": "juraj-google-style"}
{"code": "def random(length: int = 8, chars: str = digits + ascii_lowercase) -> Iterator[str]:\n    \n    while True:\n        yield \"\".join([choice(chars) for _ in range(length)])", "docstring": "A random string.\n\nNot unique, but has around 1 in a million chance of collision (with the default 8\ncharacter length). e.g. 'fubui5e6'\n\nArgs:\nlength: Length of the random string.\nchars: The characters to randomly choose from.", "source": "juraj-google-style"}
{"code": "def incoming_args(self, nodeid):\n        \n        _vars = self._vars\n        ep = self._eps[nodeid]\n        lbl = ep[2]\n        iv = ep[3].get(IVARG_ROLE)\n        in_args_list = []\n        \n        if iv in _vars:\n            for role, nids in _vars[iv]['refs'].items():\n                \n                if role != IVARG_ROLE:\n                    in_args_list.append((nids, role, iv))\n        if lbl in _vars:\n            for role, nids in _vars[lbl]['refs'].items():\n                \n                if role != 'LBL':\n                    in_args_list.append((nids, role, lbl))\n            for nid, role, hi in _vars[lbl].get('hcrefs', []):\n                in_args_list.append(([nid], role, hi))\n        in_args = {}\n        for nids, role, tgt in in_args_list:\n            for nid in nids:\n                if nid not in in_args:\n                    in_args[nid] = {}\n                in_args[nid][role] = tgt\n        return in_args", "docstring": "Return the arguments that target *nodeid*.\n\nValid arguments include regular variable arguments and scopal\n(label-selecting or HCONS) arguments. MOD/EQ\nlinks and intrinsic arguments are not included.\n\nArgs:\nnodeid: the nodeid of the EP that is the arguments' target\nReturns:\ndict: `{source_nodeid: {rargname: value}}`", "source": "juraj-google-style"}
{"code": "def setSingleStep(self, singleStep):\n    if (not isinstance(singleStep, int)):\n        raise TypeError('Argument is not of type int')\n    self._singleStep = abs(singleStep)\n    return self._singleStep", "docstring": "setter to _singleStep. converts negativ values to positiv ones.\n\nArgs:\nsingleStep (int): new _singleStep value. converts negativ values to positiv ones.\n\nRaises:\nTypeError: If the given argument is not an integer.\n\nReturns:\nint or long: the absolute value of the given argument.", "source": "codesearchnet"}
{"code": "def __init__(self, parent=None, iconSize=QtCore.QSize(36, 36)):\n        \n        super(DataTableWidget, self).__init__(parent)\n        self._iconSize = iconSize\n        self.initUi()", "docstring": "Constructs the object with the given parent.\n\nArgs:\nparent (QObject, optional): Causes the objected to be owned\nby `parent` instead of Qt. Defaults to `None`.\niconSize (QSize, optional): Size of edit buttons. Defaults to QSize(36, 36).", "source": "juraj-google-style"}
{"code": "def PushEvent(self, timestamp, event_data):\n    \n    heap_values = (timestamp, event_data)\n    heapq.heappush(self._heap, heap_values)\n    self.data_size += len(event_data)", "docstring": "Pushes a serialized event onto the heap.\n\nArgs:\ntimestamp (int): event timestamp, which contains the number of\nmicro seconds since January 1, 1970, 00:00:00 UTC.\nevent_data (bytes): serialized event.", "source": "juraj-google-style"}
{"code": "def connected_client(self):\n    future = self.get_connected_client()\n    cb = functools.partial(self._connected_client_release_cb, future)\n    return ContextManagerFuture(future, cb)", "docstring": "Returns a ContextManagerFuture to be yielded in a with statement.\n\nReturns:\nA ContextManagerFuture object.\n\nExamples:\n>>> with (yield pool.connected_client()) as client:\n# client is a connected tornadis.Client instance\n# it will be automatically released to the pool thanks to\n# the \"with\" keyword\nreply = yield client.call(\"PING\")", "source": "codesearchnet"}
{"code": "def _record_purchase(sailthru_client, email, item, purchase_incomplete, message_id, options):\n    \n    try:\n        sailthru_response = sailthru_client.purchase(email, [item],\n                                                     incomplete=purchase_incomplete, message_id=message_id,\n                                                     options=options)\n\n        if not sailthru_response.is_ok():\n            error = sailthru_response.get_error()\n            logger.error(\"Error attempting to record purchase in Sailthru: %s\", error.get_message())\n            return not can_retry_sailthru_request(error)\n\n    except SailthruClientError as exc:\n        logger.exception(\"Exception attempting to record purchase for %s in Sailthru - %s\", email, text_type(exc))\n        return False\n\n    return True", "docstring": "Record a purchase in Sailthru\n\nArguments:\nsailthru_client (object): SailthruClient\nemail (str): user's email address\nitem (dict): Sailthru required information about the course\npurchase_incomplete (boolean): True if adding item to shopping cart\nmessage_id (str): Cookie used to identify marketing campaign\noptions (dict): Sailthru purchase API options (e.g. template name)\n\nReturns:\nFalse if retryable error, else True", "source": "juraj-google-style"}
{"code": "def generate_enum_doc(enum_descriptor, locations, path, name_prefix=''):\n    print(make_subsection((name_prefix + enum_descriptor.name)))\n    location = locations[path]\n    if location.HasField('leading_comments'):\n        print(textwrap.dedent(location.leading_comments))\n    row_tuples = []\n    for (value_index, value) in enumerate(enum_descriptor.value):\n        field_location = locations[(path + (2, value_index))]\n        row_tuples.append((make_code(value.name), value.number, textwrap.fill(get_comment_from_location(field_location), INFINITY)))\n    print_table(('Name', 'Number', 'Description'), row_tuples)", "docstring": "Generate doc for an enum.\n\nArgs:\nenum_descriptor: descriptor_pb2.EnumDescriptorProto instance for enum\nto generate docs for.\nlocations: Dictionary of location paths tuples to\ndescriptor_pb2.SourceCodeInfo.Location instances.\npath: Path tuple to the enum definition.\nname_prefix: Optional prefix for this enum's name.", "source": "codesearchnet"}
{"code": "def remove_variable(self, v):\n    if (v not in self):\n        return\n    adj = self.adj\n    while adj[v]:\n        self.remove_interaction(v, next(iter(adj[v])))\n    del self.linear[v]\n    try:\n        del self._counterpart\n        if ((self.vartype is not Vartype.BINARY) and hasattr(self, '_binary')):\n            del self._binary\n        elif ((self.vartype is not Vartype.SPIN) and hasattr(self, '_spin')):\n            del self._spin\n    except AttributeError:\n        pass", "docstring": "Remove variable v and all its interactions from a binary quadratic model.\n\nArgs:\nv (variable):\nThe variable to be removed from the binary quadratic model.\n\nNotes:\nIf the specified variable is not in the binary quadratic model, this function does nothing.\n\nExamples:\nThis example creates an Ising model and then removes one variable.\n\n>>> import dimod\n...\n>>> bqm = dimod.BinaryQuadraticModel({'a': 0.0, 'b': 1.0, 'c': 2.0},\n...                            {('a', 'b'): 0.25, ('a','c'): 0.5, ('b','c'): 0.75},\n...                            -0.5, dimod.SPIN)\n>>> bqm.remove_variable('a')\n>>> 'a' in bqm.linear\nFalse\n>>> ('b','c') in bqm.quadratic\nTrue", "source": "codesearchnet"}
{"code": "def _events(self, using_url, filters=None, limit=None):\n    if (not isinstance(limit, (int, NoneType))):\n        limit = None\n    if (filters is None):\n        filters = []\n    if isinstance(filters, string_types):\n        filters = filters.split(',')\n    if (not self.blocking):\n        self.blocking = True\n    while self.blocking:\n        params = {'since': self._last_seen_id, 'limit': limit}\n        if filters:\n            params['events'] = ','.join(map(str, filters))\n        try:\n            data = self.get(using_url, params=params, raw_exceptions=True)\n        except (ConnectTimeout, ConnectionError) as e:\n            data = None\n        except Exception as e:\n            reraise('', e)\n        if data:\n            self._last_seen_id = data[(- 1)]['id']\n            for event in data:\n                self._count += 1\n                (yield event)", "docstring": "A long-polling method that queries Syncthing for events..\n\nArgs:\nusing_url (str): REST HTTP endpoint\nfilters (List[str]): Creates an \"event group\" in Syncthing to\nonly receive events that have been subscribed to.\nlimit (int): The number of events to query in the history\nto catch up to the current state.\n\nReturns:\ngenerator[dict]", "source": "codesearchnet"}
{"code": "def build_or_reuse_placeholder(tensor_spec):\n    g = tfv1.get_default_graph()\n    name = tensor_spec.name\n    try:\n        tensor = g.get_tensor_by_name((name + ':0'))\n        assert ('Placeholder' in tensor.op.type), 'Tensor {} exists but is not a placeholder!'.format(name)\n        assert tensor_spec.is_compatible_with(tensor), 'Tensor {} exists but is not compatible with the signature!'.format(tensor)\n        return tensor\n    except KeyError:\n        with tfv1.name_scope(None):\n            ret = tfv1.placeholder(tensor_spec.dtype, shape=tensor_spec.shape, name=tensor_spec.name)\n        return ret", "docstring": "Build a tf.placeholder from the metadata in the given tensor spec, or return an existing one.\n\nArgs:\ntensor_spec (tf.TensorSpec):\n\nReturns:\ntf.Tensor:", "source": "codesearchnet"}
{"code": "def eval_rs(gains, losses):\n        \n        \n        count = len(gains) + len(losses)\n\n        avg_gains = stats.avg(gains, count=count) if gains else 1\n        avg_losses = stats.avg(losses,count=count) if losses else 1\n        if avg_losses == 0:\n            return avg_gains\n        else:\n            return avg_gains / avg_losses", "docstring": "Evaluates the RS variable in RSI algorithm\n\nArgs:\ngains: List of price gains.\nlosses: List of prices losses.\n\nReturns:\nFloat of average gains over average losses.", "source": "juraj-google-style"}
{"code": "def Parse(self, conditions, host_data):\n    result = CheckResult(check_id=self.check_id)\n    methods = self.SelectChecks(conditions)\n    result.ExtendAnomalies([m.Parse(conditions, host_data) for m in methods])\n    return result", "docstring": "Runs methods that evaluate whether collected host_data has an issue.\n\nArgs:\nconditions: A list of conditions to determine which Methods to trigger.\nhost_data: A map of artifacts and rdf data.\n\nReturns:\nA CheckResult populated with Anomalies if an issue exists.", "source": "codesearchnet"}
{"code": "def _get_degree(num_nodes):\n    d_float = (0.5 * (np.sqrt(((8.0 * num_nodes) + 1.0)) - 3.0))\n    d_int = int(np.round(d_float))\n    if (((d_int + 1) * (d_int + 2)) == (2 * num_nodes)):\n        return d_int\n    else:\n        raise ValueError(num_nodes, 'not a triangular number')", "docstring": "Get the degree of the current surface.\n\nArgs:\nnum_nodes (int): The number of control points for a\nB |eacute| zier surface.\n\nReturns:\nint: The degree :math:`d` such that :math:`(d + 1)(d + 2)/2`\nequals ``num_nodes``.\n\nRaises:\nValueError: If ``num_nodes`` isn't a triangular number.", "source": "codesearchnet"}
{"code": "def build(cls, name: str, param_names: tuple[str, ...], posonly_count: int, varargs_name: str | None, kwonly_params: tuple[str, ...], kwargs_name: str | None, defaults: 'dict[str, cfg.Variable]', annotations: dict[str, Any], ctx: 'context.Context') -> 'SimpleFunction':\n    annotations = dict(annotations)\n    for n in itertools.chain(param_names, [varargs_name, kwargs_name], kwonly_params):\n        if n and n not in annotations:\n            annotations[n] = ctx.convert.unsolvable\n    if not isinstance(defaults, dict):\n        defaults = dict(zip(param_names[-len(defaults):], defaults))\n    signature = function.Signature(name, param_names, posonly_count, varargs_name, kwonly_params, kwargs_name, defaults, annotations)\n    return cls(signature, ctx)", "docstring": "Returns a SimpleFunction.\n\nArgs:\nname: Name of the function as a string\nparam_names: Tuple of parameter names as strings. This DOES include\npositional-only parameters and does NOT include keyword-only parameters.\nposonly_count: Number of positional-only parameters.\nvarargs_name: The \"args\" in \"*args\". String or None.\nkwonly_params: Tuple of keyword-only parameters as strings.\nkwargs_name: The \"kwargs\" in \"**kwargs\". String or None.\ndefaults: Dictionary of string names to values of default arguments.\nannotations: Dictionary of string names to annotations (strings or types).\nctx: The abstract context for this function.", "source": "github-repos"}
{"code": "def _compile_internal(computation, inputs=None):\n    if inputs is None:\n        inputs = []\n    if not isinstance(inputs, collections_abc.Sequence):\n        raise TypeError('inputs must be a list')\n    flat_inputs = nest.flatten(inputs)\n    flat_inputs = [ops.convert_to_tensor(x) for x in flat_inputs]\n    cluster_name = ops.get_default_graph().unique_name('cluster')\n    pivot = control_flow_ops.no_op(name=cluster_name + '/pivot')\n    context = XLACompileContext(name=cluster_name, pivot=pivot)\n    try:\n        context.Enter()\n        flat_inputs = [array_ops.identity(x, name='input_{}'.format(i)) for i, x in enumerate(flat_inputs)]\n        computation_inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_inputs)\n        vscope = variable_scope.get_variable_scope()\n        saved_use_resource = vscope.use_resource\n        vscope.set_use_resource(True)\n        with _disable_summary_context():\n            outputs = computation(*computation_inputs)\n        vscope.set_use_resource(saved_use_resource)\n        outputs_is_flat = is_flat(outputs)\n        if outputs_is_flat:\n            output_tensors, control_deps = _postprocess_flat_outputs(outputs)\n        else:\n            output_tensors, control_deps = _postprocess_non_flat_outputs(outputs)\n        context.ExitResult(output_tensors)\n    finally:\n        context.report_unsupported_operations()\n        context.Exit()\n    if not output_tensors:\n        return control_flow_ops.group(control_deps, name='output_0')\n    output_tensors = [xla_ops.xla_cluster_output(o, name='output{}'.format(i)) for i, o in enumerate(output_tensors)]\n    with ops.control_dependencies(control_deps):\n        output_tensors = [array_ops.identity(o, name='output_%d' % i) for i, o in enumerate(output_tensors)]\n    if not outputs_is_flat:\n        output_tensors = nest.pack_sequence_as(structure=outputs, flat_sequence=output_tensors)\n    return output_tensors", "docstring": "Builds graph operators that compiles and symbolically executes computation.\n\nArgs:\ncomputation: A Python function that builds the computation to compile and\nexecute.\ninputs: A list of inputs or `None` (equivalent to an empty list). Each input\ncan be a nested structure containing values that are convertible to\ntensors. Note that passing an N-dimension list of compatible values will\nresult in a N-dimension list of scalar tensors rather than a single Rank-N\ntensors. If you need different behavior, convert part of inputs to tensors\nwith `tf.convert_to_tensor`.\n\nReturns:\nSame data structure as if computation(*inputs) is called directly with some\nexceptions for correctness. Exceptions include: 1) None output 2) Single\nvalue output 3) Operation-only outputs\nRaises:\nValueError: If any element in computation outputs is neither an operations\nor a value that can be converted to tensor.\nValueError: If computation outputs is non-flat and contains any Operations.\nTypeError: If `inputs` is not a list or tuple.", "source": "github-repos"}
{"code": "def delete_file(self, file_id):\n        \n        if not is_valid_uuid(file_id):\n            raise StorageArgumentException(\n                'Invalid UUID for file_id: {0}'.format(file_id))\n\n        self._authenticated_request \\\n            .to_endpoint('file/{}/'.format(file_id)) \\\n            .delete()", "docstring": "Delete a file.\n\nArgs:\nfile_id (str): The UUID of the file to delete.\n\nReturns:\nNone\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "juraj-google-style"}
{"code": "async def getTempCoreProx(mods=None):\n    acm = genTempCoreProxy(mods)\n    prox = (await acm.__aenter__())\n    object.__setattr__(prox, '_acm', acm)\n\n    async def onfini():\n        (await prox._acm.__aexit__(None, None, None))\n    prox.onfini(onfini)\n    return prox", "docstring": "Get a Telepath Proxt to a Cortex instance which is backed by a temporary Cortex.\n\nArgs:\nmods (list): A list of additional CoreModules to load in the Cortex.\n\nNotes:\nThe Proxy returned by this should be fini()'d to tear down the temporary Cortex.\n\nReturns:\ns_telepath.Proxy", "source": "codesearchnet"}
{"code": "def unpack_rpc_payload(resp_format, payload):\n    \n\n    code = _create_argcode(resp_format, payload)\n    return struct.unpack(code, payload)", "docstring": "Unpack an RPC payload according to resp_format.\n\nArgs:\nresp_format (str): a struct format code (without the <) for the\nparameter format for this RPC.  This format code may include the final\ncharacter V, which means that it expects a variable length bytearray.\npayload (bytes): The binary payload that should be unpacked.\n\nReturns:\nlist: A list of the unpacked payload items.", "source": "juraj-google-style"}
{"code": "def output_shapes(self):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), self.element_spec)", "docstring": "Returns the shape of each component of an element of this dataset.\n\nReturns:\nA (nested) structure of `tf.TensorShape` objects corresponding to each\ncomponent of an element of this dataset.", "source": "github-repos"}
{"code": "def GetFilter(cls, filter_name):\n    \n    \n    try:\n      filt_cls = cls.GetPlugin(filter_name)\n    except KeyError:\n      raise DefinitionError(\"Filter %s does not exist.\" % filter_name)\n\n    return filt_cls()", "docstring": "Return an initialized filter. Only initialize filters once.\n\nArgs:\nfilter_name: The name of the filter, as a string.\n\nReturns:\nan initialized instance of the filter.\n\nRaises:\nDefinitionError if the type of filter has not been defined.", "source": "juraj-google-style"}
{"code": "def fts_match_any(self, fts, inv):\n    return any([self.fts_match(fts, s) for s in inv])", "docstring": "Return `True` if any segment in `inv` matches the features in `fts`\n\nArgs:\nfts (list): a collection of (value, feature) tuples\ninv (list): a collection of IPA segments represented as Unicode\nstrings\n\nReturns:\nbool: `True` if any segment in `inv` matches the features in `fts`", "source": "codesearchnet"}
{"code": "async def verify_scriptworker_task(chain, obj):\n    \n    errors = []\n    if obj.worker_impl != \"scriptworker\":\n        errors.append(\"{} {} must be run from scriptworker!\".format(obj.name, obj.task_id))\n    raise_on_errors(errors)", "docstring": "Verify the signing trust object.\n\nCurrently the only check is to make sure it was run on a scriptworker.\n\nArgs:\nchain (ChainOfTrust): the chain we're operating on\nobj (ChainOfTrust or LinkOfTrust): the trust object for the signing task.", "source": "juraj-google-style"}
{"code": "def _process_tensor_event_in_chunks(self, event, tensor_chunks):\n    value = event.summary.value[0]\n    debugger_plugin_metadata = json.loads(compat.as_text(value.metadata.plugin_data.content))\n    device_name = debugger_plugin_metadata['device']\n    num_chunks = debugger_plugin_metadata['numChunks']\n    chunk_index = debugger_plugin_metadata['chunkIndex']\n    if num_chunks <= 1:\n        return event\n    debug_node_name = value.node_name\n    timestamp = int(event.wall_time)\n    tensor_key = '%s_%s_%d' % (device_name, debug_node_name, timestamp)\n    if tensor_key not in tensor_chunks:\n        tensor_chunks[tensor_key] = [None] * num_chunks\n    chunks = tensor_chunks[tensor_key]\n    if value.tensor.tensor_content:\n        chunks[chunk_index] = value.tensor\n    elif value.tensor.string_val:\n        chunks[chunk_index] = event\n    if None not in chunks:\n        if value.tensor.tensor_content:\n            event.summary.value[0].tensor.tensor_content = b''.join((chunk.tensor_content for chunk in chunks))\n            del tensor_chunks[tensor_key]\n            return event\n        elif value.tensor.string_val:\n            merged_event = chunks[0]\n            for chunk in chunks[1:]:\n                merged_event.summary.value[0].tensor.string_val.extend(list(chunk.summary.value[0].tensor.string_val))\n            return merged_event", "docstring": "Possibly reassemble event chunks.\n\nDue to gRPC's message size limit, a large tensor can be encapsulated in\nmultiple Event proto chunks to be sent through the debugger stream. This\nmethod keeps track of the chunks that have arrived, reassemble all chunks\ncorresponding to a tensor when they have arrived and return the reassembled\nEvent proto.\n\nArgs:\nevent: The single Event proto that has arrived.\ntensor_chunks: A dict used to keep track of the Event protos that have\narrived but haven't been reassembled.\n\nReturns:\nIf all Event protos corresponding to a tensor have arrived, returns the\nreassembled Event proto. Otherwise, return None.", "source": "github-repos"}
{"code": "def _ParseDataStreamWithParser(self, parser_mediator, parser, file_entry, data_stream_name):\n    file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)\n    if (not file_object):\n        raise RuntimeError('Unable to retrieve file-like object from file entry.')\n    try:\n        self._ParseFileEntryWithParser(parser_mediator, parser, file_entry, file_object=file_object)\n    finally:\n        file_object.close()", "docstring": "Parses a data stream of a file entry with a specific parser.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nparser (BaseParser): parser.\nfile_entry (dfvfs.FileEntry): file entry.\ndata_stream_name (str): data stream name.\n\nRaises:\nRuntimeError: if the file-like object is missing.", "source": "codesearchnet"}
{"code": "def options(self, section):\n    if (not self.has_section(section)):\n        raise NoSectionError(section) from None\n    return self.__getitem__(section).options()", "docstring": "Returns list of configuration options for the named section.\n\nArgs:\nsection (str): name of section\n\nReturns:\nlist: list of option names", "source": "codesearchnet"}
{"code": "def pipelines(self):\n    if (not self.response):\n        return set()\n    elif ((self._pipelines is None) and self.response):\n        self._pipelines = set()\n        for group in self.response.payload:\n            for pipeline in group['pipelines']:\n                self._pipelines.add(pipeline['name'])\n    return self._pipelines", "docstring": "Returns a set of all pipelines from the last response\n\nReturns:\nset: Response success: all the pipelines available in the response\nResponse failure: an empty set", "source": "codesearchnet"}
{"code": "def commutes(m1: np.ndarray, m2: np.ndarray, *, rtol: float=1e-05, atol: float=1e-08) -> bool:\n    return ((m1.shape[0] == m1.shape[1]) and (m1.shape == m2.shape) and np.allclose(m1.dot(m2), m2.dot(m1), rtol=rtol, atol=atol))", "docstring": "Determines if two matrices approximately commute.\n\nTwo matrices A and B commute if they are square and have the same size and\nAB = BA.\n\nArgs:\nm1: One of the matrices.\nm2: The other matrix.\nrtol: The per-matrix-entry relative tolerance on equality.\natol: The per-matrix-entry absolute tolerance on equality.\n\nReturns:\nWhether the two matrices have compatible sizes and a commutator equal\nto zero within tolerance.", "source": "codesearchnet"}
{"code": "def read_label_file(path):\n    \n    labels = []\n\n    for record in textfile.read_separated_lines_generator(path, separator='\\t', max_columns=3):\n        value = ''\n\n        if len(record) > 2:\n            value = str(record[2])\n\n        labels.append([float(_clean_time(record[0])), float(_clean_time(record[1])), value])\n\n    return labels", "docstring": "Read the labels from an audacity label file.\n\nArgs:\npath (str): Path to the label file.\n\nReturns:\nlist: List of labels (start [sec], end [sec], label)\n\nExample::\n\n>>> read_label_file('/path/to/label/file.txt')\n[\n[0.0, 0.2, 'sie'],\n[0.2, 2.2, 'hallo']\n]", "source": "juraj-google-style"}
{"code": "def dumps(self, with_defaults=False):\n    return self._rw.dump_config_to_string(self._config, with_defaults=with_defaults)", "docstring": "Generate a string representing all the configuration values.\n\nArgs:\nwith_defaults (bool): if ``True``, values of items with no custom values will be included in the output\nif they have a default value set.", "source": "codesearchnet"}
{"code": "def join(self, other):\n    if self.contains(other):\n        return True\n    if other.contains(self):\n        self.x = other.x\n        self.y = other.y\n        self.width = other.width\n        self.height = other.height\n        return True\n    if (not self.intersects(other, edges=True)):\n        return False\n    if ((self.left == other.left) and (self.width == other.width)):\n        y_min = min(self.bottom, other.bottom)\n        y_max = max(self.top, other.top)\n        self.y = y_min\n        self.height = (y_max - y_min)\n        return True\n    if ((self.bottom == other.bottom) and (self.height == other.height)):\n        x_min = min(self.left, other.left)\n        x_max = max(self.right, other.right)\n        self.x = x_min\n        self.width = (x_max - x_min)\n        return True\n    return False", "docstring": "Try to join a rectangle to this one, if the result is also a rectangle\nand the operation is successful and this rectangle is modified to the union.\n\nArguments:\nother (Rectangle): Rectangle to join\n\nReturns:\nbool: True when successfully joined, False otherwise", "source": "codesearchnet"}
{"code": "def find_elements_by_id(self, id_, update=False) -> Elements:\n        \n        return self.find_elements(by=By.ID, value=id_, update=update)", "docstring": "Finds multiple elements by id.\n\nArgs:\nid_: The id of the elements to be found.\nupdate: If the interface has changed, this option should be True.\n\nReturns:\nA list with elements if any was found. An empty list if not.\n\nRaises:\nNoSuchElementException - If the element wasn't found.\n\nUsage:\nelements = driver.find_elements_by_id('foo')", "source": "juraj-google-style"}
{"code": "def Record(self, obj):\n    if (len(self._visit_recorder_objects) >= _MAX_VISIT_OBJECTS):\n        return False\n    obj_id = id(obj)\n    if (obj_id in self._visit_recorder_objects):\n        return False\n    self._visit_recorder_objects[obj_id] = obj\n    return True", "docstring": "Records the object as visited.\n\nArgs:\nobj: visited object.\n\nReturns:\nTrue if the object hasn't been previously visited or False if it has\nalready been recorded or the quota has been exhausted.", "source": "codesearchnet"}
{"code": "class GroupViTTextEncoder(nn.Module):\n\n    def __init__(self, config: GroupViTTextConfig):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for idx, encoder_layer in enumerate(self.layers):\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions)\n            else:\n                layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self-attention layers. Each layer is a\n[`GroupViTEncoderLayer`].\n\nArgs:\nconfig: GroupViTTextConfig", "source": "github-repos"}
{"code": "def index_in_block(self, channel_index: int) -> int:\n        \n        if channel_index < 0 or channel_index >= self.cdim:\n            raise ValueError()\n\n        struct = self.block_structure\n\n        if len(struct) == 1:\n            return channel_index, 0\n        i = 1\n        while sum(struct[:i]) <= channel_index and i < self.cdim:\n            i += 1\n        block_index = i - 1\n        index_in_block = channel_index - sum(struct[:block_index])\n\n        return index_in_block, block_index", "docstring": "Return the index a channel has within the subblock it belongs to\n\nI.e., only for reducible circuits, this gives a result different from\nthe argument itself.\n\nArgs:\nchannel_index (int): The index of the external channel\n\nRaises:\nValueError: for an invalid `channel_index`", "source": "juraj-google-style"}
{"code": "def get_weights_of_nn_sites(self, structure, n):\n    return [e['weight'] for e in self.get_nn_info(structure, n)]", "docstring": "Get weight associated with each near neighbor of site with\nindex n in structure.\n\nArgs:\nstructure (Structure): input structure.\nn (integer): index of site for which to determine the weights.\nReturns:\nweights (list of floats): near-neighbor weights.", "source": "codesearchnet"}
{"code": "def _str_dotted_getattr(obj, name):\n    \n    for part in name.split('.'):\n        obj = getattr(obj, part)\n    return str(obj) if obj else None", "docstring": "Expands extends getattr to allow dots in x to indicate nested objects.\n\nArgs:\nobj (object): an object.\nname (str): a name for a field in the object.\n\nReturns:\nAny: the value of named attribute.\n\nRaises:\nAttributeError: if the named attribute does not exist.", "source": "juraj-google-style"}
{"code": "def fit_cosine_function(wind):\n    \n    wind_daily = wind.groupby(wind.index.date).mean()\n    wind_daily_hourly = pd.Series(index=wind.index, data=wind_daily.loc[wind.index.date].values)  \n\n    df = pd.DataFrame(data=dict(daily=wind_daily_hourly, hourly=wind)).dropna(how='any')\n    x = np.array([df.daily, df.index.hour])\n    popt, pcov = scipy.optimize.curve_fit(_cosine_function, x, df.hourly)\n\n    return popt", "docstring": "fits a cosine function to observed hourly windspeed data\n\nArgs:\nwind: observed hourly windspeed data\n\nReturns:\nparameters needed to generate diurnal features of windspeed using a cosine function", "source": "juraj-google-style"}
{"code": "def tuple_shapes(self):\n    if not self.is_tuple():\n        raise ValueError('tuple_shapes() called on a non-tuple shape')\n    return self._tuple_shapes", "docstring": "If this is a tuple, returns its sequence of constituent Shape objects.\n\nReturns:\nTuple sub-shapes.\n\nRaises:\nValueError: if this is not a tuple.", "source": "github-repos"}
{"code": "def build_kalman_filter_step(get_transition_matrix_for_timestep, get_transition_noise_for_timestep, get_observation_matrix_for_timestep, get_observation_noise_for_timestep):\n\n    def kalman_filter_step(state, elems_t):\n        'Run a single step of Kalman filtering.\\n\\n    Args:\\n      state: A `KalmanFilterState` object representing the previous\\n        filter state at time `t-1`.\\n      elems_t: A tuple of Tensors `(x_t, mask_t)`, or a `Tensor` `x_t`.\\n        `x_t` is a `Tensor` with rightmost shape dimensions\\n        `[observation_size, 1]` representing the vector observed at time `t`,\\n        and `mask_t` is a `Tensor` with rightmost dimensions`[1, 1]`\\n        representing the observation mask at time `t`. Both `x_t` and `mask_t`\\n        may have batch dimensions, which must be compatible with the batch\\n        dimensions of `state.predicted_mean` and `state.predictived_cov`\\n        respectively. If `mask_t` is not provided, it is assumed to be `None`.\\n\\n    Returns:\\n      new_state: A `KalmanFilterState` object representing the new\\n        filter state at time `t`.\\n    '\n        if isinstance(elems_t, tuple):\n            (x_t, mask_t) = elems_t\n        else:\n            x_t = elems_t\n            mask_t = None\n        observation_matrix = get_observation_matrix_for_timestep(state.timestep)\n        observation_noise = get_observation_noise_for_timestep(state.timestep)\n        if (mask_t is not None):\n            x_expected = (_propagate_mean(state.predicted_mean, observation_matrix, observation_noise) * tf.ones_like(x_t))\n            x_t = tf.where(tf.broadcast_to(mask_t, tf.shape(input=x_expected)), x_expected, tf.broadcast_to(x_t, tf.shape(input=x_expected)))\n        (filtered_mean, filtered_cov, observation_dist) = linear_gaussian_update(state.predicted_mean, state.predicted_cov, observation_matrix, observation_noise, x_t)\n        log_marginal_likelihood = observation_dist.log_prob(x_t[(..., 0)])\n        if (mask_t is not None):\n            filtered_mean = tf.where(tf.broadcast_to(mask_t, tf.shape(input=filtered_mean)), state.predicted_mean, filtered_mean)\n            filtered_cov = tf.where(tf.broadcast_to(mask_t, tf.shape(input=filtered_cov)), state.predicted_cov, filtered_cov)\n            log_marginal_likelihood = tf.where(tf.broadcast_to(mask_t[(..., 0, 0)], tf.shape(input=log_marginal_likelihood)), tf.zeros_like(log_marginal_likelihood), log_marginal_likelihood)\n        (predicted_mean, predicted_cov) = kalman_transition(filtered_mean, filtered_cov, get_transition_matrix_for_timestep(state.timestep), get_transition_noise_for_timestep(state.timestep))\n        return KalmanFilterState(filtered_mean, filtered_cov, predicted_mean, predicted_cov, observation_dist.mean()[(..., tf.newaxis)], observation_dist.covariance(), log_marginal_likelihood, (state.timestep + 1))\n    return kalman_filter_step", "docstring": "Build a callable that performs one step of Kalman filtering.\n\nArgs:\nget_transition_matrix_for_timestep: callable taking a timestep\nas an integer `Tensor` argument, and returning a `LinearOperator`\nof shape `[latent_size, latent_size]`.\nget_transition_noise_for_timestep: callable taking a timestep as\nan integer `Tensor` argument, and returning a\n`MultivariateNormalLinearOperator` of event shape\n`[latent_size]`.\nget_observation_matrix_for_timestep: callable taking a timestep\nas an integer `Tensor` argument, and returning a `LinearOperator`\nof shape `[observation_size, observation_size]`.\nget_observation_noise_for_timestep: callable taking a timestep as\nan integer `Tensor` argument, and returning a\n`MultivariateNormalLinearOperator` of event shape\n`[observation_size]`.\n\nReturns:\nkalman_filter_step: a callable that updates a KalmanFilterState\nfrom timestep `t-1` to `t`.", "source": "codesearchnet"}
{"code": "class DinatDownsampler(nn.Module):\n\n    def __init__(self, dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:\n        super().__init__()\n        self.dim = dim\n        self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n        self.norm = norm_layer(2 * dim)\n\n    def forward(self, input_feature: torch.Tensor) -> torch.Tensor:\n        input_feature = self.reduction(input_feature.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)\n        input_feature = self.norm(input_feature)\n        return input_feature", "docstring": "Convolutional Downsampling Layer.\n\nArgs:\ndim (`int`):\nNumber of input channels.\nnorm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):\nNormalization layer class.", "source": "github-repos"}
{"code": "def get_random_voxels(dataset, n_voxels):\n    voxels = np.arange(dataset.masker.n_vox_in_vol)\n    np.random.shuffle(voxels)\n    selected = voxels[0:n_voxels]\n    return dataset.get_image_data(voxels=selected)", "docstring": "Returns mappable data for a random subset of voxels.\n\nMay be useful as a baseline in predictive analyses--e.g., to compare\nperformance of a more principled feature selection method with simple\nrandom selection.\n\nArgs:\ndataset: A Dataset instance\nn_voxels: An integer specifying the number of random voxels to select.\n\nReturns:\nA 2D numpy array with (randomly-selected) voxels in rows and mappables\nin columns.", "source": "codesearchnet"}
{"code": "def _validate_recurse_directive_types(current_schema_type, field_schema_type, context):\n    \n    \n    type_hints = context['type_equivalence_hints'].get(field_schema_type)\n    type_hints_inverse = context['type_equivalence_hints_inverse'].get(field_schema_type)\n    allowed_current_types = {field_schema_type}\n\n    if type_hints and isinstance(type_hints, GraphQLUnionType):\n        allowed_current_types.update(type_hints.types)\n\n    if type_hints_inverse and isinstance(type_hints_inverse, GraphQLUnionType):\n        allowed_current_types.update(type_hints_inverse.types)\n\n    \n    current_scope_is_allowed = current_schema_type in allowed_current_types\n\n    is_implemented_interface = (\n        isinstance(field_schema_type, GraphQLInterfaceType) and\n        isinstance(current_schema_type, GraphQLObjectType) and\n        field_schema_type in current_schema_type.interfaces\n    )\n\n    if not any((current_scope_is_allowed, is_implemented_interface)):\n        raise GraphQLCompilationError(u'Edges expanded with a @recurse directive must either '\n                                      u'be of the same type as their enclosing scope, a supertype '\n                                      u'of the enclosing scope, or be of an interface type that is '\n                                      u'implemented by the type of their enclosing scope. '\n                                      u'Enclosing scope type: {}, edge type: '\n                                      u'{}'.format(current_schema_type, field_schema_type))", "docstring": "Perform type checks on the enclosing type and the recursed type for a recurse directive.\n\nArgs:\ncurrent_schema_type: GraphQLType, the schema type at the current location\nfield_schema_type: GraphQLType, the schema type at the inner scope\ncontext: dict, various per-compilation data (e.g. declared tags, whether the current block\nis optional, etc.). May be mutated in-place in this function!", "source": "juraj-google-style"}
{"code": "def table(cls, table, columns, index='', keyset=None):\n    keyset = keyset or KeySet(all_=True)\n    if not isinstance(keyset, KeySet):\n        raise ValueError('keyset must be an instance of class google.cloud.spanner.KeySet')\n    return cls(is_sql=False, is_table=True, read_operation='process_read_batch', kwargs={'table': table, 'columns': columns, 'index': index, 'keyset': keyset})", "docstring": "A convenient method to construct ReadOperation from table.\n\nArgs:\ntable: name of the table from which to fetch data.\ncolumns: names of columns to be retrieved.\nindex: (optional) name of index to use, rather than the table's primary\nkey.\nkeyset: (optional) `KeySet` keys / ranges identifying rows to be\nretrieved.", "source": "github-repos"}
{"code": "def _ass_refresh_attrs(self, cached_ass, file_ass):\n        \n        \n        loaded_ass = yaml_loader.YamlLoader.load_yaml_by_path(file_ass['source'], log_debug=True)\n        attrs = loaded_ass\n        yaml_checker.check(file_ass['source'], attrs)\n        cached_ass['source'] = file_ass['source']\n        cached_ass['ctime'] = os.path.getctime(file_ass['source'])\n        cached_ass['attrs'] = {}\n        cached_ass['snippets'] = {}\n        \n        \n        \n        for a in ['fullname', 'description', 'icon_path']:\n            if a in attrs:\n                cached_ass['attrs'][a] = attrs.get(a)\n        \n        if 'args' in attrs:\n            cached_ass['attrs']['args'] = {}\n        for argname, argparams in attrs.get('args', {}).items():\n            if 'use' in argparams or 'snippet' in argparams:\n                snippet_name = argparams.pop('use', None) or argparams.pop('snippet')\n                snippet = yaml_snippet_loader.YamlSnippetLoader.get_snippet_by_name(snippet_name)\n                cached_ass['attrs']['args'][argname] = snippet.get_arg_by_name(argname)\n                cached_ass['attrs']['args'][argname].update(argparams)\n                cached_ass['snippets'][snippet.name] = self._get_snippet_ctime(snippet.name)\n            else:\n                cached_ass['attrs']['args'][argname] = argparams", "docstring": "Completely refreshes cached assistant from file.\n\nArgs:\ncached_ass: an assistant from cache hierarchy\n(for format see Cache class docstring)\nfile_ass: the respective assistant from filesystem hierarchy\n(for format see what refresh_role accepts)", "source": "juraj-google-style"}
{"code": "def __init__(self, glob, opts = None):\n    \n    super(GlobComponent, self).__init__()\n    self._glob = glob\n    self.regex = re.compile(fnmatch.translate(glob), re.I)\n    self.opts = opts or PathOpts()", "docstring": "Instantiates a new GlobComponent from a given path glob.\n\nArgs:\nglob: A string with potential glob elements (e.g. `foo*`).\nopts: An optional PathOpts instance.", "source": "juraj-google-style"}
{"code": "def propose(self, n=1):\n        \n        proposed_params = []\n\n        for i in range(n):\n            \n            \n            candidate_params = self._create_candidates()\n\n            \n            \n            if candidate_params is None:\n                return None\n\n            \n            predictions = self.predict(candidate_params)\n\n            \n            \n            idx = self._acquire(predictions)\n\n            \n            \n            params = {}\n            for i in range(candidate_params[idx, :].shape[0]):\n                inverse_transformed = self.tunables[i][1].inverse_transform(\n                    candidate_params[idx, i]\n                )\n                params[self.tunables[i][0]] = inverse_transformed\n            proposed_params.append(params)\n\n        return params if n == 1 else proposed_params", "docstring": "Use the trained model to propose a new set of parameters.\n\nArgs:\nn (int, optional): number of candidates to propose\n\nReturns:\nMapping of tunable name to proposed value. If called with n>1 then proposal is a list\nof dictionaries.", "source": "juraj-google-style"}
{"code": "def Print(x, data, message, **kwargs):\n    return PrintOperation(x, data, message, **kwargs).outputs[0]", "docstring": "Call tf.Print.\n\nArgs:\nx: a Tensor.\ndata: a list of Tensor\nmessage: a string\n**kwargs: keyword arguments to tf.Print\nReturns:\na Tensor which is identical in value to x", "source": "codesearchnet"}
{"code": "def Create(self, request, global_params=None):\n    config = self.GetMethodConfig('Create')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Starts a build with the specified configuration. This method returns a long-running `Operation`, which includes the build ID. Pass the build ID to `GetBuild` to determine the build status (such as `SUCCESS` or `FAILURE`).\n\nArgs:\nrequest: (CloudbuildProjectsBuildsCreateRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def enclose_points(points, clip_rect):\n    point_array = ffi.new('SDL_Point[]', len(points))\n    for (i, p) in enumerate(points):\n        point_array[i] = p._ptr\n    enclosing_rect = Rect()\n    if lib.SDL_EnclosePoints(point_array, len(points), clip_rect._ptr, enclosing_rect._ptr):\n        return enclosing_rect\n    else:\n        return None", "docstring": "Return the minimal rectangle enclosing the given set of points\n\nArgs:\npoints (List[Point]): The set of points that the new Rect must enclose.\nclip_rect (Rect): A clipping Rect.\n\nReturns:\nRect: A new Rect enclosing the given points.", "source": "codesearchnet"}
{"code": "def _parse_string_to_list_of_pairs(s, seconds_to_int=False):\n  r\n  ret = []\n  for p in [s.split(\":\") for s in re.sub(\"[,.;]\", \" \", s).split()]:\n    if len(p) != 2:\n      raise ValueError(\"bad input to _parse_string_to_list_of_pairs %s\" % s)\n    if seconds_to_int:\n      ret.append((p[0], int(p[1])))\n    else:\n      ret.append(tuple(p))\n  return ret", "docstring": "r\"\"\"Parses a string into a list of pairs.\n\nIn the input string, each pair is separated by a colon, and the delimiters\nbetween pairs are any of \" ,.;\".\n\ne.g. \"rows:32,cols:32\"\n\nArgs:\ns: str to parse.\nseconds_to_int: Boolean. If True, then the second elements are returned\nas integers;  otherwise they are strings.\n\nReturns:\nList of tuple pairs.\n\nRaises:\nValueError: Badly formatted string.", "source": "juraj-google-style"}
{"code": "def triangle_area(point1, point2, point3):\n    'Lengths of the three sides of the triangle'\n    a = point_distance(point1, point2)\n    b = point_distance(point1, point3)\n    c = point_distance(point2, point3)\n    'Where s is the semiperimeter'\n    s = (((a + b) + c) / 2.0)\n    \"Return the area of the triangle (using Heron's formula)\"\n    return math.sqrt((((s * (s - a)) * (s - b)) * (s - c)))", "docstring": "Uses Heron's formula to find the area of a triangle\nbased on the coordinates of three points.\n\nArgs:\npoint1: list or tuple, the x y coordinate of point one.\n\npoint2: list or tuple, the x y coordinate of point two.\n\npoint3: list or tuple, the x y coordinate of point three.\n\nReturns:\nThe area of a triangle as a floating point number.\n\nRequires:\nThe math module, point_distance().", "source": "codesearchnet"}
{"code": "def from_datetimes(datetimes):\n    if isinstance(datetimes, (datetime.date, datetime.datetime)):\n        return from_year_month_day(datetimes.year, datetimes.month, datetimes.day, validate=False)\n    years = tf.constant([dt.year for dt in datetimes], dtype=tf.int32)\n    months = tf.constant([dt.month for dt in datetimes], dtype=tf.int32)\n    days = tf.constant([dt.day for dt in datetimes], dtype=tf.int32)\n    return from_year_month_day(years, months, days, validate=False)", "docstring": "Creates DateTensor from a sequence of Python datetime objects.\n\nArgs:\ndatetimes: Sequence of Python datetime objects.\n\nReturns:\nDateTensor object.\n\n#### Example\n\n```python\nimport datetime\n\ndates = [datetime.date(2015, 4, 15), datetime.date(2017, 12, 30)]\ndate_tensor = tff.datetime.dates_from_datetimes(dates)\n```", "source": "github-repos"}
{"code": "class DataParallel(Distribution):\n\n    def __init__(self, device_mesh=None, devices=None, auto_shard_dataset=True):\n        if device_mesh:\n            self._initialize_with_device_mesh(device_mesh)\n        elif devices:\n            self._initialize_mesh_from_devices(devices)\n        else:\n            self._initialize_mesh_from_list_devices()\n        self._num_process = distribution_lib.num_processes()\n        self._process_id = distribution_lib.process_id()\n        self._is_multi_process = self._num_process > 1\n        self._auto_shard_dataset = auto_shard_dataset\n\n    def _initialize_with_device_mesh(self, device_mesh):\n        if not isinstance(device_mesh, DeviceMesh):\n            raise ValueError(f'Expect `mesh` to be an instance of `DeviceMesh`. Received: mesh={device_mesh} (of type {type(device_mesh)})')\n        super().__init__(device_mesh, device_mesh.axis_names[0])\n        if self.device_mesh.devices.ndim != 1:\n            warnings.warn('Expect the input mesh to be 1D, but received mesh.devices.ndim=%d. The first axis will be used for data-parallel sharding.', device_mesh.devices.ndim)\n\n    def _initialize_mesh_from_devices(self, devices):\n        devices = np.array(devices)\n        device_mesh = DeviceMesh(shape=devices.shape, axis_names=[DEFAULT_BATCH_DIM_NAME], devices=devices)\n        super().__init__(device_mesh, DEFAULT_BATCH_DIM_NAME)\n\n    def _initialize_mesh_from_list_devices(self):\n        devices = np.array(list_devices())\n        device_mesh = DeviceMesh(shape=devices.shape, axis_names=[DEFAULT_BATCH_DIM_NAME], devices=devices)\n        super().__init__(device_mesh, DEFAULT_BATCH_DIM_NAME)\n\n    def get_data_layout(self, data_shape):\n        data_shard_spec = [None] * len(data_shape)\n        data_shard_spec[0] = self.batch_dim_name\n        return TensorLayout(data_shard_spec, self.device_mesh)\n\n    def get_variable_layout(self, variable):\n        if getattr(variable, '_layout', None) is not None:\n            return variable._layout\n        variable_shard_spec = [None] * len(variable.shape)\n        return TensorLayout(variable_shard_spec, self.device_mesh)\n\n    def get_tensor_layout(self, path):\n        return None\n\n    def distribute_dataset(self, dataset):\n        from tensorflow.python.data.experimental.ops import distribute as tf_data_distribute\n        from keras.src.utils.module_utils import tensorflow as tf\n        if not isinstance(dataset, tf.data.Dataset):\n            raise ValueError(f'Only `tf.data.Dataset` is supported for sharding, got {type(dataset)}')\n        if not self._is_multi_process or not self._auto_shard_dataset:\n            return dataset\n        batch_size = tf_data_distribute.compute_batch_size(dataset)\n        if batch_size.numpy() < 0:\n            raise ValueError('The batch size of the input dataset is unknown. Please config the batch size for the input dataset, e.g via `dataset.batch(batch_size)`')\n        per_worker_batch_size = tf_data_distribute.batch_sizes_for_worker(global_batch_size=batch_size, num_workers=self._num_process, num_replicas_per_worker=1, worker_index=self._process_id)\n        distributed_dataset = dataset.rebatch(per_worker_batch_size)\n        distributed_dataset = tf_data_distribute._AutoShardDataset(distributed_dataset, num_workers=self._num_process, index=self._process_id, num_replicas=self._num_process)\n        return distributed_dataset.prefetch(tf.data.AUTOTUNE)", "docstring": "Distribution for data parallelism.\n\nYou can choose to create this instance by either specifying\nthe `device_mesh` or `devices` arguments (but not both).\n\nThe `device_mesh` argument is expected to be a `DeviceMesh` instance,\nand is expected to be 1D only. In case that the mesh has multiple axes,\nthen the first axis will be treated as the data parallel dimension\n(and a warning will be raised).\n\nWhen a list of `devices` are provided, they will be used to construct a\n1D mesh.\n\nWhen both `mesh` and `devices` are absent, then `list_devices()`\nwill be used to detect any available devices and create a 1D mesh from\nthem.\n\nArgs:\ndevice_mesh: Optional `DeviceMesh` instance.\ndevices: Optional list of devices.\nauto_shard_dataset: Automatically shard the dataset amongst processes.\nDefaults to true.", "source": "github-repos"}
{"code": "def inverse(self):\n    inverses = []\n    for segment in self:\n        if (len(segment) < 2):\n            inverses.append([])\n        else:\n            inverses.append(segment.inverse())\n    return inverses", "docstring": "Calculate the inverse geodesic between locations in segments.\n\nReturns:\nlist of 2-tuple of float: Groups in bearing and distance between\npoints in segments", "source": "codesearchnet"}
{"code": "def bias_dropout_add(x: Tensor, bias: Tensor, residual: Optional[Tensor], prob: float, training: bool) -> Tensor:\n    if bias is not None:\n        x = x + bias\n    out = torch.nn.functional.dropout(x, p=prob, training=training)\n    if residual is not None:\n        out = residual + out\n    return out", "docstring": "add bias to x, apply dropout and residual connection\n\nArgs:\nx (Tensor): main path of output\nbias (Tensor): None or attn_bias of the last attention layer\nresidual (Optional[Tensor]): residual value\nprob (float): dropout probability\ntraining (bool): whether in training mode or not\n\nReturns:\nTensor: dropout(x + bias) + residual", "source": "github-repos"}
{"code": "def make_session(username=None, password=None, bearer_token=None, extra_headers_dict=None): \n    \n\n    if password is None and bearer_token is None:\n        logger.error(\"No authentication information provided; \"\n                     \"please check your object\")\n        raise KeyError\n\n    session = requests.Session()\n    session.trust_env = False\n    headers = {'Accept-encoding': 'gzip',\n               'User-Agent': 'twitterdev-search-tweets-python/' + VERSION}\n    if bearer_token:\n        logger.info(\"using bearer token for authentication\")\n        headers['Authorization'] = \"Bearer {}\".format(bearer_token)\n        session.headers = headers\n    else:\n        logger.info(\"using username and password for authentication\")\n        session.auth = username, password\n        session.headers = headers\n    if extra_headers_dict:\n        headers.update(extra_headers_dict) \n    return session", "docstring": "Creates a Requests Session for use. Accepts a bearer token\nfor premiums users and will override username and password information if\npresent.\n\nArgs:\nusername (str): username for the session\npassword (str): password for the user\nbearer_token (str): token for a premium API user.", "source": "juraj-google-style"}
{"code": "def add_oxidation_state_by_site(self, oxidation_states):\n        \n        if len(oxidation_states) != len(self.sites):\n            raise ValueError(\"Oxidation states of all sites must be \"\n                             \"specified.\")\n        for site, ox in zip(self.sites, oxidation_states):\n            new_sp = {}\n            for el, occu in site.species.items():\n                sym = el.symbol\n                new_sp[Specie(sym, ox)] = occu\n            site.species = new_sp", "docstring": "Add oxidation states to a structure by site.\n\nArgs:\noxidation_states (list): List of oxidation states.\nE.g., [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2]", "source": "juraj-google-style"}
{"code": "def __new__(cls, *args) -> 'InvokeExpressionNode':\n    if not args:\n        return super().__new__(cls)\n    _, identifier, parent_node = args\n    if identifier == 'reference' and isinstance(parent_node.return_type, _fhir_path_data_types.ReferenceStructureDataType):\n        return super().__new__(InvokeReferenceNode)\n    return super().__new__(InvokeExpressionNode)", "docstring": "Creates a new InvokeExpressionNode node or one of its subclasses.\n\nCreates either an InvokeExpressionNode or InvokeReferenceNode, a subclass of\nInvokeExpressionNode. The InvokeReferenceNode is returned when a field named\n'reference' is invoked against a FHIR Reference resource. Database backends\nhave special behavior for reference nodes. This reference-specific node type\nallows them to define visitors to implement their reference-specific logic.\n\nArgs:\n*args: The args passed to `__init__`.\n\nReturns:\nA new InvokeExpressionNode of the appropriate type.", "source": "github-repos"}
{"code": "def __init__(self, config, in_features, condition_dim, n_classes=256, bottleneck_factor=2):\n    super().__init__()\n    bottleneck = (in_features + condition_dim) \n    self.mlp = nn.Sequential(nn.Conv2d(in_features + condition_dim, bottleneck, kernel_size=1, stride=1, padding=0), nn.GELU(), nn.Conv2d(bottleneck, 2 + 2, kernel_size=1, stride=1, padding=0), nn.Softplus())\n    self.p_eps = 0.0001\n    self.max_temp = config.max_temp\n    self.min_temp = config.min_temp\n    self.log_binomial_transform = LogBinomialSoftmax(n_classes, act=torch.softmax)", "docstring": "Per-pixel MLP followed by a Conditional Log Binomial softmax.\n\nArgs:\nin_features (`int`):\nNumber of input channels in the main feature.\ncondition_dim (`int`):\nNumber of input channels in the condition feature.\nn_classes (`int`, *optional*, defaults to 256):\nNumber of classes.\nbottleneck_factor (`int`, *optional*, defaults to 2):\nHidden dim factor.", "source": "github-repos"}
{"code": "def extract_sequences(x, sequence_length, sequence_stride):\n    if any_symbolic_tensors((x,)):\n        return ExtractSequences(sequence_length, sequence_stride).symbolic_call(x)\n    return backend.math.extract_sequences(x, sequence_length, sequence_stride)", "docstring": "Expands the dimension of last axis into sequences of `sequence_length`.\n\nSlides a window of size `sequence_length` over the last axis of the input\nwith a stride of `sequence_stride`, replacing the last axis with\n`[num_sequences, sequence_length]` sequences.\n\nIf the dimension along the last axis is N, the number of sequences can be\ncomputed by:\n\n`num_sequences = 1 + (N - sequence_length) // sequence_stride`\n\nArgs:\nx: Input tensor.\nsequence_length: An integer representing the sequences length.\nsequence_stride: An integer representing the sequences hop size.\n\nReturns:\nA tensor of sequences with shape [..., num_sequences, sequence_length].\n\nExample:\n\n>>> x = keras.ops.convert_to_tensor([1, 2, 3, 4, 5, 6])\n>>> extract_sequences(x, 3, 2)\narray([[1, 2, 3],\n[3, 4, 5]])", "source": "github-repos"}
{"code": "def invoke_script(self, script, id=None, endpoint=None):\n        \n        return self._call_endpoint(INVOKE_SCRIPT, params=[script], id=id, endpoint=endpoint)", "docstring": "Invokes a script that has been assembled\nArgs:\nscript: (str) a hexlified string of a contract invocation script, example '00c10b746f74616c537570706c796754a64cac1b1073e662933ef3e30b007cd98d67d7'\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"}
{"code": "def serialize_keras_object(instance):\n    instance = inspect.unwrap(instance)\n    if instance is None:\n        return None\n    if hasattr(instance, 'get_config'):\n        name = object_registration.get_registered_name(instance.__class__)\n        try:\n            config = instance.get_config()\n        except NotImplementedError as e:\n            if _SKIP_FAILED_SERIALIZATION:\n                return serialize_keras_class_and_config(name, {_LAYER_UNDEFINED_CONFIG_KEY: True})\n            raise e\n        serialization_config = {}\n        for key, item in config.items():\n            if isinstance(item, str):\n                serialization_config[key] = item\n                continue\n            try:\n                serialized_item = serialize_keras_object(item)\n                if isinstance(serialized_item, dict) and (not isinstance(item, dict)):\n                    serialized_item['__passive_serialization__'] = True\n                serialization_config[key] = serialized_item\n            except ValueError:\n                serialization_config[key] = item\n        name = object_registration.get_registered_name(instance.__class__)\n        return serialize_keras_class_and_config(name, serialization_config, instance)\n    if hasattr(instance, '__name__'):\n        return object_registration.get_registered_name(instance)\n    raise ValueError(f\"Cannot serialize {instance} because it doesn't implement `get_config()`.\")", "docstring": "Serialize a Keras object into a JSON-compatible representation.\n\nCalls to `serialize_keras_object` while underneath the\n`SharedObjectSavingScope` context manager will cause any objects re-used\nacross multiple layers to be saved with a special shared object ID. This\nallows the network to be re-created properly during deserialization.\n\nArgs:\ninstance: The object to serialize.\n\nReturns:\nA dict-like, JSON-compatible representation of the object's config.", "source": "github-repos"}
{"code": "def pack_results(measurements: Sequence[Tuple[(str, np.ndarray)]]) -> bytes:\n    if (not measurements):\n        return b''\n    shapes = [(key, np.shape(data)) for (key, data) in measurements]\n    if (not all(((len(shape) == 2) for (_, shape) in shapes))):\n        raise ValueError('Expected 2-D data: shapes={}'.format(shapes))\n    reps = shapes[0][1][0]\n    if (not all(((shape[0] == reps) for (_, shape) in shapes))):\n        raise ValueError('Expected same reps for all keys: shapes={}'.format(shapes))\n    bits = np.hstack([np.asarray(data, dtype=bool) for (_, data) in measurements])\n    bits = bits.reshape((- 1))\n    remainder = (len(bits) % 8)\n    if remainder:\n        bits = np.pad(bits, (0, (8 - remainder)), 'constant')\n    bits = bits.reshape(((- 1), 8))[(:, ::(- 1))]\n    byte_arr = np.packbits(bits, axis=1).reshape((- 1))\n    return byte_arr.tobytes()", "docstring": "Pack measurement results into a byte string.\n\nArgs:\nmeasurements: A sequence of tuples, one for each measurement, consisting\nof a string key and an array of boolean data. The data should be\na 2-D array indexed by (repetition, qubit_index). All data for all\nmeasurements must have the same number of repetitions.\n\nReturns:\nPacked bytes, as described in the unpack_results docstring below.\n\nRaises:\nValueError if the measurement data do not have the compatible shapes.", "source": "codesearchnet"}
{"code": "def _convert_reward(self, reward):\n    \n    if not np.isfinite(reward).all():\n      raise ValueError('Infinite reward encountered.')\n    return np.array(reward, dtype=np.float32)", "docstring": "Convert the reward to 32 bits.\n\nArgs:\nreward: Numpy reward.\n\nRaises:\nValueError: Rewards contain infinite values.\n\nReturns:\nNumpy reward with 32-bit data type.", "source": "juraj-google-style"}
{"code": "def reparameterization_type(self):\n    return self._reparameterization_type", "docstring": "Describes how samples from the distribution are reparameterized.\n\nCurrently this is one of the static instances\n`distributions.FULLY_REPARAMETERIZED`\nor `distributions.NOT_REPARAMETERIZED`.\n\nReturns:\nAn instance of `ReparameterizationType`.", "source": "github-repos"}
{"code": "def fillNoneValues(column):\n    \n    if column.dtype == object:\n        column.fillna('', inplace=True)\n    return column", "docstring": "Fill all NaN/NaT values of a column with an empty string\n\nArgs:\ncolumn (pandas.Series): A Series object with all rows.\n\nReturns:\ncolumn: Series with filled NaN values.", "source": "juraj-google-style"}
{"code": "def value(self):\n    with c_api_util.tf_buffer() as buffer_:\n        pywrap_tfe.TFE_MonitoringSamplerCellValue(self._cell, buffer_)\n        proto_data = pywrap_tf_session.TF_GetBuffer(buffer_)\n    histogram_proto = summary_pb2.HistogramProto()\n    histogram_proto.ParseFromString(compat.as_bytes(proto_data))\n    return histogram_proto", "docstring": "Retrieves the current distribution of samples.\n\nReturns:\nA HistogramProto describing the distribution of samples.", "source": "github-repos"}
{"code": "def check_onfail_requisites(state_id, state_result, running, highstate):\n    nret = None\n    if (state_id and state_result and highstate and isinstance(highstate, dict)):\n        onfails = search_onfail_requisites(state_id, highstate)\n        if onfails:\n            for handler in onfails:\n                (fstate, mod_, fchunk) = handler\n                for (rstateid, rstate) in six.iteritems(running):\n                    if ('_|-' in rstateid):\n                        st = salt.state.split_low_tag(rstateid)\n                    else:\n                        id_ = rstate.get('__id__', rstateid)\n                        if (not id_):\n                            raise ValueError('no state id')\n                        st = {'__id__': id_, 'state': mod_}\n                    if ((mod_ == st['state']) and (fstate == st['__id__'])):\n                        ofresult = rstate.get('result', _empty)\n                        if (ofresult in [False, True]):\n                            nret = ofresult\n                        if (ofresult is False):\n                            break\n                if (nret is None):\n                    nret = False\n    return nret", "docstring": "When a state fail and is part of a highstate, check\nif there is onfail requisites.\nWhen we find onfail requisites, we will consider the state failed\nonly if at least one of those onfail requisites also failed\n\nReturns:\n\nTrue: if onfail handlers suceeded\nFalse: if one on those handler failed\nNone: if the state does not have onfail requisites", "source": "codesearchnet"}
{"code": "def simulate_w(self, index: int, half_turns: float, axis_half_turns: float):\n    args = self._shard_num_args({'index': index, 'half_turns': half_turns, 'axis_half_turns': axis_half_turns})\n    if (index >= self._num_shard_qubits):\n        self._pool.map(_clear_scratch, args)\n        self._pool.map(_w_between_shards, args)\n        self._pool.map(_copy_scratch_to_state, args)\n    else:\n        self._pool.map(_w_within_shard, args)\n    norm_squared = np.sum(self._pool.map(_norm_squared, args))\n    args = self._shard_num_args({'norm_squared': norm_squared})\n    self._pool.map(_renorm, args)", "docstring": "Simulate a single qubit rotation gate about a X + b Y.\n\nThe gate simulated is U = exp(-i pi/2 W half_turns)\nwhere W = cos(pi axis_half_turns) X + sin(pi axis_half_turns) Y\n\nArgs:\nindex: The qubit to act on.\nhalf_turns: The amount of the overall rotation, see the formula\nabove.\naxis_half_turns: The angle between the pauli X and Y operators,\nsee the formula above.", "source": "codesearchnet"}
{"code": "def round_accuracy(y_true, y_predicted):\n    \n    predictions = [round(x) for x in y_predicted]\n    examples_len = len(y_true)\n    correct = sum([y1 == y2 for y1, y2 in zip(y_true, predictions)])\n    return correct / examples_len if examples_len else 0", "docstring": "Rounds predictions and calculates accuracy in terms of absolute coincidence.\n\nArgs:\ny_true: list of true values\ny_predicted: list of predicted values\n\nReturns:\nportion of absolutely coincidental samples", "source": "juraj-google-style"}
{"code": "def addRow(self, triggered):\n    if triggered:\n        model = self.tableView.model()\n        model.addDataFrameRows()\n        self.sender().setChecked(False)", "docstring": "Adds a row to the model.\n\nThis method is also a slot.\n\nArgs:\ntriggered (bool): If the corresponding button was\nactivated, the row will be appended to the end.", "source": "codesearchnet"}
{"code": "def imfrombytes(content, flag='color'):\n    \n    img_np = np.frombuffer(content, np.uint8)\n    flag = imread_flags[flag] if is_str(flag) else flag\n    img = cv2.imdecode(img_np, flag)\n    return img", "docstring": "Read an image from bytes.\n\nArgs:\ncontent (bytes): Image bytes got from files or other streams.\nflag (str): Same as :func:`imread`.\n\nReturns:\nndarray: Loaded image array.", "source": "juraj-google-style"}
{"code": "def namespace(self, mid: ModuleId) -> YangIdentifier:\n    try:\n        mdata = self.modules[mid]\n    except KeyError:\n        raise ModuleNotRegistered(*mid) from None\n    return mdata.main_module[0]", "docstring": "Return the namespace corresponding to a module or submodule.\n\nArgs:\nmid: Module identifier.\n\nRaises:\nModuleNotRegistered: If `mid` is not registered in the data model.", "source": "codesearchnet"}
{"code": "def SpinTimes(spin, bias):\n    if (not isinstance(spin, int)):\n        raise TypeError('spin must be an int')\n    if (spin == (- 1)):\n        return Times(Real(((- 1), 1)), bias)\n    elif (spin == 1):\n        return bias\n    else:\n        raise ValueError('expected spins to be -1., or 1.')", "docstring": "Define our own multiplication for bias times spins. This allows for\ncleaner log code as well as value checking.\n\nArgs:\nspin (int): -1 or 1\nbias (:class:`pysmt.shortcuts.Symbol`): The bias\n\nReturns:\nspins * bias", "source": "codesearchnet"}
{"code": "def __init__(self, scale, growth_factor, bucket_count):\n    super(ExponentialBuckets, self).__init__(pywrap_tfe.TFE_MonitoringNewExponentialBuckets(scale, growth_factor, bucket_count))", "docstring": "Creates a new exponential Buckets.\n\nArgs:\nscale: float\ngrowth_factor: float\nbucket_count: integer", "source": "github-repos"}
{"code": "def put(self, credentials):\n    self.acquire_lock()\n    try:\n        self.locked_put(credentials)\n    finally:\n        self.release_lock()", "docstring": "Write a credential.\n\nThe Storage lock must be held when this is called.\n\nArgs:\ncredentials: Credentials, the credentials to store.", "source": "codesearchnet"}
{"code": "def upload_to_s3(context, file_obj):\n    bucket = context.solid_config['bucket']\n    key = context.solid_config['key']\n    context.resources.s3.put_object(Bucket=bucket, Body=file_obj.read(), Key=key, **(context.solid_config.get('kwargs') or {}))\n    (yield Result(bucket, 'bucket'))\n    (yield Result(key, 'key'))", "docstring": "Upload a file to s3.\n\nArgs:\ninfo (ExpectationExecutionInfo): Must expose a boto3 S3 client as its `s3` resource.\n\nReturns:\n(str, str):\nThe bucket and key to which the file was uploaded.", "source": "codesearchnet"}
{"code": "def add_line(self, start, end, color=(0.5, 0.5, 0.5), width=1):\n    source = vtk.vtkLineSource()\n    source.SetPoint1(start)\n    source.SetPoint2(end)\n    vertexIDs = vtk.vtkStringArray()\n    vertexIDs.SetNumberOfComponents(1)\n    vertexIDs.SetName('VertexIDs')\n    vertexIDs.InsertNextValue('a')\n    vertexIDs.InsertNextValue('b')\n    source.GetOutput().GetPointData().AddArray(vertexIDs)\n    mapper = vtk.vtkPolyDataMapper()\n    mapper.SetInputConnection(source.GetOutputPort())\n    actor = vtk.vtkActor()\n    actor.SetMapper(mapper)\n    actor.GetProperty().SetColor(color)\n    actor.GetProperty().SetLineWidth(width)\n    self.ren.AddActor(actor)", "docstring": "Adds a line.\n\nArgs:\nstart: Starting coordinates for line.\nend: Ending coordinates for line.\ncolor: Color for text as RGB. Defaults to grey.\nwidth: Width of line. Defaults to 1.", "source": "codesearchnet"}
{"code": "def find(self, title):\n    if (title not in self._titles):\n        raise KeyError(title)\n    return self._titles[title][0]", "docstring": "Return the first worksheet with the given title.\n\nArgs:\ntitle(str): title/name of the worksheet to return\nReturns:\nWorkSheet: contained worksheet object\nRaises:\nKeyError: if the spreadsheet has no no worksheet with the given ``title``", "source": "codesearchnet"}
{"code": "def VShadowPathSpecGetStoreIndex(path_spec):\n  \n  store_index = getattr(path_spec, 'store_index', None)\n\n  if store_index is None:\n    location = getattr(path_spec, 'location', None)\n\n    if location is None or not location.startswith('/vss'):\n      return None\n\n    store_index = None\n    try:\n      store_index = int(location[4:], 10) - 1\n    except (TypeError, ValueError):\n      pass\n\n    if store_index is None or store_index < 0:\n      return None\n\n  return store_index", "docstring": "Retrieves the store index from the path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nint: store index or None if not available.", "source": "juraj-google-style"}
{"code": "def build_linear_positions(index_dims, output_range=(-1.0, 1.0)):\n\n    def _linspace(n_xels_per_dim):\n        return torch.linspace(start=output_range[0], end=output_range[1], steps=n_xels_per_dim, dtype=torch.float32)\n    dim_ranges = [_linspace(n_xels_per_dim) for n_xels_per_dim in index_dims]\n    array_index_grid = meshgrid(*dim_ranges, indexing='ij')\n    return torch.stack(array_index_grid, dim=-1)", "docstring": "Generate an array of position indices for an N-D input array.\n\nArgs:\nindex_dims (`List[int]`):\nThe shape of the index dimensions of the input array.\noutput_range (`Tuple[float]`, *optional*, defaults to `(-1.0, 1.0)`):\nThe min and max values taken by each input index dimension.\n\nReturns:\n`torch.FloatTensor` of shape `(index_dims[0], index_dims[1], .., index_dims[-1], N)`.", "source": "github-repos"}
{"code": "def by_location(self, location, cc=None):\n    (header, content) = self._http_request(self.BASE_URL, location=location, cc=cc)\n    return json.loads(content)", "docstring": "Perform a Yelp Neighborhood API Search based on a location specifier.\n\nArgs:\nlocation - textual location specifier of form: \"address, city, state or zip, optional country\"\ncc       - ISO 3166-1 alpha-2 country code. (Optional)", "source": "codesearchnet"}
{"code": "def __init__(self, in_features: int, lateral_features: int):\n    super().__init__()\n    self.proj = nn.Sequential(nn.Conv2d(lateral_features, in_features, kernel_size=1, padding=0, bias=False), nn.GroupNorm(32, in_features))\n    self.block = MaskFormerFPNConvLayer(in_features, in_features)", "docstring": "A Feature Pyramid Network Layer (FPN) layer. It creates a feature map by aggregating features from the previous\nand backbone layer. Due to the spatial mismatch, the tensor coming from the previous layer is upsampled.\n\nArgs:\nin_features (`int`):\nThe number of input features (channels).\nlateral_features (`int`):\nThe number of lateral features (channels).", "source": "github-repos"}
{"code": "def __load_partition_entries(self, fd, bs):\n        \n\n        fd.seek(self.header.part_lba * bs)\n        for p in range(0, self.header.num_partitions):\n            data = fd.read(self.header.part_size)\n            entry = GptPartitionEntry(data)\n            if entry.type_guid != uuid.UUID(\n                '{00000000-0000-0000-0000-000000000000}'\n            ):\n                self.__partition_entries.append(entry)\n            else:\n                \n                break", "docstring": "Loads the list of :class:`GptPartition` partition entries\n\nArgs:\nbs (uint): Block size of the volume", "source": "juraj-google-style"}
{"code": "def compile_action_bound_constraints(self, state: Sequence[tf.Tensor]) -> Dict[(str, Bounds)]:\n    scope = self.action_precondition_scope(state)\n    lower_bounds = self.rddl.domain.action_lower_bound_constraints\n    upper_bounds = self.rddl.domain.action_upper_bound_constraints\n    with self.graph.as_default():\n        with tf.name_scope('action_bound_constraints'):\n            bounds = {}\n            for name in self.rddl.domain.action_fluent_ordering:\n                lower_expr = lower_bounds.get(name)\n                lower = None\n                if (lower_expr is not None):\n                    with tf.name_scope('lower_bound'):\n                        lower = self._compile_expression(lower_expr, scope)\n                upper_expr = upper_bounds.get(name)\n                upper = None\n                if (upper_expr is not None):\n                    with tf.name_scope('upper_bound'):\n                        upper = self._compile_expression(upper_expr, scope)\n                bounds[name] = (lower, upper)\n            return bounds", "docstring": "Compiles all actions bounds for the given `state`.\n\nArgs:\nstate (Sequence[tf.Tensor]): The current state fluents.\n\nReturns:\nA mapping from action names to a pair of\n:obj:`rddl2tf.fluent.TensorFluent` representing\nits lower and upper bounds.", "source": "codesearchnet"}
{"code": "def _fromTwosComplement(x, bits=16):\n    \n    _checkInt(bits, minvalue=0, description='number of bits')\n\n    _checkInt(x, description='input')\n    upperlimit = 2 ** (bits) - 1\n    lowerlimit = 0\n    if x > upperlimit or x < lowerlimit:\n        raise ValueError('The input value is out of range. Given value is {0}, but allowed range is {1} to {2} when using {3} bits.' \\\n            .format(x, lowerlimit, upperlimit, bits))\n\n    \n    limit = 2 ** (bits - 1) - 1\n    if x <= limit:\n        return x\n    return x - 2 ** bits", "docstring": "Calculate the inverse(?) of a two's complement of an integer.\n\nArgs:\n* x (int): input integer.\n* bits (int): number of bits, must be > 0.\n\nReturns:\nAn int, that represents the inverse(?) of two's complement of the input.\n\nExample for bits=8:\n\n=== =======\nx   returns\n=== =======\n0   0\n1   1\n127 127\n128 -128\n129 -127\n255 -1\n=== =======", "source": "juraj-google-style"}
{"code": "def apply_cut(self, cut):\n        \n        \n        return MacroSubsystem(\n            self.network,\n            self.network_state,\n            self.micro_node_indices,\n            cut=cut,\n            time_scale=self.time_scale,\n            blackbox=self.blackbox,\n            coarse_grain=self.coarse_grain)", "docstring": "Return a cut version of this |MacroSubsystem|.\n\nArgs:\ncut (Cut): The cut to apply to this |MacroSubsystem|.\n\nReturns:\nMacroSubsystem: The cut version of this |MacroSubsystem|.", "source": "juraj-google-style"}
{"code": "def WakeStuckFlow(session_id):\n    session_id = rdfvalue.SessionID(session_id)\n    woken = 0\n    checked_pending = False\n    with queue_manager.QueueManager() as manager:\n        for (request, responses) in manager.FetchRequestsAndResponses(session_id):\n            if (not checked_pending):\n                task = manager.Query(request.client_id, task_id=('task:%s' % request.request.task_id))\n                if task:\n                    return\n                checked_pending = True\n            if ((not responses) or (responses[(- 1)].type != rdf_flows.GrrMessage.Type.STATUS)):\n                manager.QueueClientMessage(request.request)\n                woken += 1\n            if (responses and (responses[(- 1)].type == rdf_flows.GrrMessage.Type.STATUS)):\n                manager.QueueNotification(session_id)\n    return woken", "docstring": "Wake up stuck flows.\n\nA stuck flow is one which is waiting for the client to do something, but the\nclient requests have been removed from the client queue. This can happen if\nthe system is too loaded and the client messages have TTLed out. In this case\nwe reschedule the client requests for this session.\n\nArgs:\nsession_id: The session for the flow to wake.\n\nReturns:\nThe total number of client messages re-queued.", "source": "codesearchnet"}
{"code": "def _on_connection_finished(self, result):\n    (success, retval, context) = self._parse_return(result)\n    conn_id = context['connection_id']\n    callback = context['callback']\n    if (success is False):\n        callback(conn_id, self.id, False, 'Timeout opening connection')\n        with self.count_lock:\n            self.connecting_count -= 1\n        return\n    handle = retval['handle']\n    context['disconnect_handler'] = self._on_connection_failed\n    context['connect_time'] = time.time()\n    context['state'] = 'preparing'\n    self._connections[handle] = context\n    self.probe_services(handle, conn_id, self._probe_services_finished)", "docstring": "Callback when the connection attempt to a BLE device has finished\n\nThis function if called when a new connection is successfully completed\n\nArgs:\nevent (BGAPIPacket): Connection event", "source": "codesearchnet"}
{"code": "def are_all_matches_terminal(self, predicate: Callable[([ops.Operation], bool)]):\n    return all(((self.next_moment_operating_on(op.qubits, (i + 1)) is None) for (i, op) in self.findall_operations(predicate)))", "docstring": "Check whether all of the ops that satisfy a predicate are terminal.\n\nArgs:\npredicate: A predicate on ops.Operations which is being checked.\n\nReturns:\nWhether or not all `Operation` s in a circuit that satisfy the\ngiven predicate are terminal.", "source": "codesearchnet"}
{"code": "def rfc3339(self):\n    if (self._nanosecond == 0):\n        return to_rfc3339(self)\n    nanos = str(self._nanosecond).rjust(9, '0').rstrip('0')\n    return '{}.{}Z'.format(self.strftime(_RFC3339_NO_FRACTION), nanos)", "docstring": "Return an RFC 3339-compliant timestamp.\n\nReturns:\n(str): Timestamp string according to RFC 3339 spec.", "source": "codesearchnet"}
{"code": "def get_nn_info(self, structure, n):\n    site = structure[n]\n    neighs_dists = structure.get_neighbors(site, self.cutoff)\n    try:\n        eln = site.specie.element\n    except:\n        eln = site.species_string\n    reldists_neighs = []\n    for (neigh, dist) in neighs_dists:\n        try:\n            el2 = neigh.specie.element\n        except:\n            el2 = neigh.species_string\n        reldists_neighs.append([(dist / get_okeeffe_distance_prediction(eln, el2)), neigh])\n    siw = []\n    min_reldist = min([reldist for (reldist, neigh) in reldists_neighs])\n    for (reldist, s) in reldists_neighs:\n        if (reldist < ((1.0 + self.tol) * min_reldist)):\n            w = (min_reldist / reldist)\n            siw.append({'site': s, 'image': self._get_image(structure, s), 'weight': w, 'site_index': self._get_original_site(structure, s)})\n    return siw", "docstring": "Get all near-neighbor sites as well as the associated image locations\nand weights of the site with index n using the closest relative\nneighbor distance-based method with O'Keeffe parameters.\n\nArgs:\nstructure (Structure): input structure.\nn (integer): index of site for which to determine near\nneighbors.\n\nReturns:\nsiw (list of tuples (Site, array, float)): tuples, each one\nof which represents a neighbor site, its image location,\nand its weight.", "source": "codesearchnet"}
{"code": "def unpack(self, dtensor: Any) -> Sequence[Any]:\n    if not context.executing_eagerly():\n        raise RuntimeError('`unpack` must be called eagerly.')\n    try:\n        tensors = _pywrap_dtensor_device.Unpack(context.context()._handle, dtensor, self._device_info)\n    except core._NotOkStatusException as e:\n        raise core._status_to_exception(e) from None\n    is_sparse = _pywrap_dtensor_device.IsSparseDTensor(context.context()._handle, dtensor, self._device_info)\n    if is_sparse:\n        result = []\n        for i in range(len(tensors) \n            result.append(sparse_tensor.SparseTensor(tensors[i], tensors[i + len(tensors) \n        return result\n    else:\n        return tensors", "docstring": "Unpacks a DTensor handle on this DTensor device.\n\nPacking and unpacking are inverse operations:\n\n```\n* unpack(pack(tensors)) == tensors\n* pack(unpack(dtensor)) == dtensor\n```\n\nRefer to `dtensor.unpack` for more information.\n\nArgs:\ndtensor: The DTensor to unpack.\n\nReturns:\nThe raw underlying tensor components of the DTensor.\n\nRaises:\nRuntimeError: When not called eagerly.", "source": "github-repos"}
{"code": "def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n    if 'shortest_edge' in size and 'longest_edge' in size:\n        size = get_resize_output_image_size(image, size, input_data_format)\n    elif 'height' in size and 'width' in size:\n        size = (size['height'], size['width'])\n    else:\n        raise ValueError(\"size must be a dictionary with keys 'shortest_edge' and 'longest_edge' or 'height' and 'width'.\")\n    return resize(image, size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\nresized to keep the input aspect ratio.\n\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nsize (`Dict[str, int]`):\nSize of the output image.\nresample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\nResampling filter to use when resiizing the image.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def _get_expiration(self, headers: dict) -> int:\n        \n        expiration_str = headers.get('expires')\n        if not expiration_str:\n            return 0\n        expiration = datetime.strptime(expiration_str, '%a, %d %b %Y %H:%M:%S %Z')\n        delta = (expiration - datetime.utcnow()).total_seconds()\n        return math.ceil(abs(delta))", "docstring": "Gets the expiration time of the data from the response headers.\n\nArgs:\nheaders: dictionary of headers from ESI\n\nReturns:\nvalue of seconds from now the data expires", "source": "juraj-google-style"}
{"code": "def get_url_preview(self, url, ts=None):\n    params = {'url': url}\n    if ts:\n        params['ts'] = ts\n    return self._send('GET', '', query_params=params, api_path='/_matrix/media/r0/preview_url')", "docstring": "Get preview for URL.\n\nArgs:\nurl (str): URL to get a preview\nts (double): The preferred point in time to return\na preview for. The server may return a newer\nversion if it does not have the requested\nversion available.", "source": "codesearchnet"}
{"code": "def get_callable(subcommand):\n    _LOGGER.debug('Creating callable from subcommand \"%s\".', subcommand.__name__)\n    if isinstance(subcommand, ModuleType):\n        _LOGGER.debug('Subcommand is a module.')\n        assert hasattr(subcommand, 'Command'), 'Module subcommand must have callable \"Command\" class definition.'\n        callable_ = subcommand.Command\n    else:\n        callable_ = subcommand\n    if any((isinstance(callable_, t) for t in six.class_types)):\n        return callable_()\n    return callable_", "docstring": "Return a callable object from the subcommand.\n\nArgs:\nsubcommand: A object loaded from an entry point. May be a module,\nclass, or function.\n\nReturns:\nThe callable entry point for the subcommand. If the subcommand is a\nfunction, it will be returned unchanged. If the subcommand is a module\nor a class, an instance of the command class will be returned.\n\nRaises:\nAssertionError: Raised when a module entry point does not have a\ncallable class named Command.", "source": "codesearchnet"}
{"code": "def swo_stop(self):\n    res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.STOP, 0)\n    if (res < 0):\n        raise errors.JLinkException(res)\n    return None", "docstring": "Stops collecting SWO data.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error", "source": "codesearchnet"}
{"code": "def GetPluginObjectByName(cls, plugin_name):\n    \n    plugin_class = cls._plugin_classes.get(plugin_name, None)\n    if plugin_class:\n      return plugin_class()\n\n    return None", "docstring": "Retrieves a specific plugin object by its name.\n\nArgs:\nplugin_name (str): name of the plugin.\n\nReturns:\nBasePlugin: a plugin object or None if not available.", "source": "juraj-google-style"}
{"code": "def get_characteristic_handle_from_uuid(self, uuid):\n        \n        ch = self.get_characteristic_from_uuid(uuid)\n        return None if ch is None else ch.char_handle", "docstring": "Given a characteristic UUID, return its handle.\n\nArgs:\nuuid (str): a string containing the hex-encoded UUID\n\nReturns:\nNone if an error occurs, otherwise an integer handle.", "source": "juraj-google-style"}
{"code": "def max(self):\n    if (len(self._data) == 0):\n        return 600\n    return next(iter(reversed(sorted(self._data.keys()))))", "docstring": "Return the maximum value in this histogram.\n\nIf there are no values in the histogram at all, return 600.\n\nReturns:\nint: The maximum value in the histogram.", "source": "codesearchnet"}
{"code": "def get_current_round(self, tournament=1):\n        \n        \n        query = \n        arguments = {'tournament': tournament}\n        data = self.raw_query(query, arguments)['data']['rounds'][0]\n        if data is None:\n            return None\n        round_num = data[\"number\"]\n        return round_num", "docstring": "Get number of the current active round.\n\nArgs:\ntournament (int): ID of the tournament (optional, defaults to 1)\n\nReturns:\nint: number of the current active round\n\nExample:\n>>> NumerAPI().get_current_round()\n104", "source": "juraj-google-style"}
{"code": "def GetEntries(self, parser_mediator, match=None, **unused_kwargs):\n    \n    accounts = match.get('Accounts', {})\n    for name_account, account in iter(accounts.items()):\n      first_name = account.get('FirstName', '<FirstName>')\n      last_name = account.get('LastName', '<LastName>')\n      general_description = '{0:s} ({1:s} {2:s})'.format(\n          name_account, first_name, last_name)\n\n      event_data = plist_event.PlistTimeEventData()\n      event_data.key = name_account\n      event_data.root = '/Accounts'\n\n      datetime_value = account.get('CreationDate', None)\n      if datetime_value:\n        event_data.desc = 'Configured Apple account {0:s}'.format(\n            general_description)\n\n        event = time_events.PythonDatetimeEvent(\n            datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      datetime_value = account.get('LastSuccessfulConnect', None)\n      if datetime_value:\n        event_data.desc = 'Connected Apple account {0:s}'.format(\n            general_description)\n\n        event = time_events.PythonDatetimeEvent(\n            datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      datetime_value = account.get('ValidationDate', None)\n      if datetime_value:\n        event_data.desc = 'Last validation Apple account {0:s}'.format(\n            general_description)\n\n        event = time_events.PythonDatetimeEvent(\n            datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts relevant Apple Account entries.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmatch (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.", "source": "juraj-google-style"}
{"code": "def __get_default_form_data_input(self, elements):\n        \n\n        form_data = OrderedDict()\n\n        for element in elements:\n            default_value = self.__get_default_value_from_element(element)\n\n            if default_value is False:\n                continue\n\n            form_data[element[\"name\"]] = default_value\n\n        return form_data", "docstring": "Get the default form data {key: value} for the given elements.\n\nArgs:\nelements list(obj): Soup elements.\n\nReturns:\nobj: The {key: value} form data", "source": "juraj-google-style"}
{"code": "def cumulative_distribution(self, X):\n        \n        self.check_fit()\n\n        U, V = self.split_matrix(X)\n\n        if (V == 0).all() or (U == 0).all():\n            return np.zeros(V.shape[0])\n\n        else:\n            cdfs = [\n                np.power(\n                    np.power(U[i], -self.theta) + np.power(V[i], -self.theta) - 1,\n                    -1.0 / self.theta\n                )\n                if (U[i] > 0 and V[i] > 0) else 0\n                for i in range(len(U))\n            ]\n\n            return np.array([max(x, 0) for x in cdfs])", "docstring": "Computes the cumulative distribution function for the copula, :math:`C(u, v)`\n\nArgs:\nX: `np.ndarray`\n\nReturns:\nnp.array: cumulative probability", "source": "juraj-google-style"}
{"code": "def load_from_tarfile(session, tarfile_path, check_for_duplicates, pkts_per_commit=1000):\n    tf_stream = tarfile_xml_generator(tarfile_path)\n    logger.info(('Loading: ' + tarfile_path))\n    n_parsed = 0\n    n_loaded = 0\n    for tarinf in tf_stream:\n        try:\n            v = vp.loads(tarinf.xml, check_version=False)\n            if (v.attrib['version'] != '2.0'):\n                logger.debug('Packet: {} is not VO-schema version 2.0.'.format(tarinf.name))\n            n_parsed += 1\n        except:\n            logger.exception('Error loading file {}, skipping'.format(tarinf.name))\n            continue\n        try:\n            new_row = Voevent.from_etree(v)\n            if check_for_duplicates:\n                if ivorn_present(session, new_row.ivorn):\n                    logger.debug('Ignoring duplicate ivorn: {} in file {}'.format(new_row.ivorn, tarinf.name))\n                    continue\n            session.add(new_row)\n            n_loaded += 1\n        except:\n            logger.exception('Error converting file {} to database row, skipping'.format(tarinf.name))\n            continue\n        if ((n_loaded % pkts_per_commit) == 0):\n            session.commit()\n    session.commit()\n    logger.info('Successfully parsed {} packets, of which loaded {}.'.format(n_parsed, n_loaded))\n    return (n_parsed, n_loaded)", "docstring": "Iterate through xml files in a tarball and attempt to load into database.\n\n.. warning::\nVery slow with duplicate checking enabled.\n\nReturns:\ntuple: (n_parsed, n_loaded) - Total number of packets parsed from\ntarbar, and number successfully loaded.", "source": "codesearchnet"}
{"code": "def fit_transform(self, *args, **kwargs):\n        \n        self.fit(*args, **kwargs)\n        return self.transform(*args, **kwargs)", "docstring": "Performs fit followed by transform.\n\nThis method simply combines fit and transform.\n\nArgs:\nargs: positional arguments (can be anything)\nkwargs: keyword arguments (can be anything)\n\nReturns:\ndict: output", "source": "juraj-google-style"}
{"code": "def call_rpc_external(self, address, rpc_id, arg_payload, timeout=10.0):\n    self.verify_calling_thread(False, 'call_rpc_external is for use **outside** of the event loop')\n    response = CrossThreadResponse()\n    self._loop.call_soon_threadsafe(self._rpc_queue.put_rpc, address, rpc_id, arg_payload, response)\n    try:\n        return response.wait(timeout)\n    except RPCRuntimeError as err:\n        return err.binary_error", "docstring": "Call an RPC from outside of the event loop and block until it finishes.\n\nThis is the main method by which a caller outside of the EmulationLoop\ncan inject an RPC into the EmulationLoop and wait for it to complete.\nThis method is synchronous so it blocks until the RPC completes or the\ntimeout expires.\n\nArgs:\naddress (int): The address of the mock tile this RPC is for\nrpc_id (int): The number of the RPC\npayload (bytes): A byte string of payload parameters up to 20 bytes\ntimeout (float): The maximum time to wait for the RPC to finish.\n\nReturns:\nbytes: The response payload from the RPC", "source": "codesearchnet"}
{"code": "def GenApiConfig(service_class_names, config_string_generator=None, hostname=None, application_path=None, **additional_kwargs):\n    api_service_map = collections.OrderedDict()\n    resolved_services = []\n    for service_class_name in service_class_names:\n        (module_name, base_service_class_name) = service_class_name.rsplit('.', 1)\n        module = __import__(module_name, fromlist=base_service_class_name)\n        service = getattr(module, base_service_class_name)\n        if hasattr(service, 'get_api_classes'):\n            resolved_services.extend(service.get_api_classes())\n        elif ((not isinstance(service, type)) or (not issubclass(service, remote.Service))):\n            raise TypeError(('%s is not a ProtoRPC service' % service_class_name))\n        else:\n            resolved_services.append(service)\n    for resolved_service in resolved_services:\n        services = api_service_map.setdefault((resolved_service.api_info.name, resolved_service.api_info.api_version), [])\n        services.append(resolved_service)\n    app_yaml_hostname = _GetAppYamlHostname(application_path)\n    service_map = collections.OrderedDict()\n    config_string_generator = (config_string_generator or api_config.ApiConfigGenerator())\n    for (api_info, services) in api_service_map.iteritems():\n        assert services, 'An API must have at least one ProtoRPC service'\n        hostname = (services[0].api_info.hostname or hostname or app_yaml_hostname)\n        service_map[('%s-%s' % api_info)] = config_string_generator.pretty_print_config_to_json(services, hostname=hostname, **additional_kwargs)\n    return service_map", "docstring": "Write an API configuration for endpoints annotated ProtoRPC services.\n\nArgs:\nservice_class_names: A list of fully qualified ProtoRPC service classes.\nconfig_string_generator: A generator object that produces API config strings\nusing its pretty_print_config_to_json method.\nhostname: A string hostname which will be used as the default version\nhostname. If no hostname is specificied in the @endpoints.api decorator,\nthis value is the fallback.\napplication_path: A string with the path to the AppEngine application.\n\nRaises:\nTypeError: If any service classes don't inherit from remote.Service.\nmessages.DefinitionNotFoundError: If a service can't be found.\n\nReturns:\nA map from service names to a string containing the API configuration of the\nservice in JSON format.", "source": "codesearchnet"}
{"code": "def quantize(self, mode, **kwargs):\n    from keras.src.dtype_policies import QUANTIZATION_MODES\n    type_check = kwargs.pop('type_check', True)\n    if kwargs:\n        raise ValueError(f'Unrecognized keyword arguments passed to {self.__class__.__name__}: {kwargs}')\n    if mode not in QUANTIZATION_MODES:\n        raise ValueError(f'Invalid quantization mode. Expected one of {QUANTIZATION_MODES}. Received: mode={mode}')\n    mode_changed = False\n    for layer in self._flatten_layers():\n        list_of_sublayers = list(layer._flatten_layers())\n        if len(list_of_sublayers) == 1:\n            try:\n                layer.quantize(mode, type_check=type_check)\n                mode_changed = True\n            except NotImplementedError as e:\n                warnings.warn(str(e))\n    if mode_changed:\n        self.train_function = None\n        self.test_function = None\n        self.predict_function = None", "docstring": "Quantize the weights of the model.\n\nNote that the model must be built first before calling this method.\n`quantize` will recursively call `quantize(mode)` in all layers and\nwill be skipped if the layer doesn't implement the function.\n\nArgs:\nmode: The mode of the quantization. Only 'int8' is supported at this\ntime.", "source": "github-repos"}
{"code": "def __init__(self, data=None, top=None):\n        \n        self._triples = []\n        self._top = None\n\n        if data is None:\n            data = []\n        else:\n            data = list(data)  \n\n        if data:\n            self._triples.extend(\n                Triple(*t, inverted=getattr(t, 'inverted', None))\n                for t in data\n            )\n            \n            if top is None:\n                top = data[0][0]\n            self.top = top", "docstring": "Create a Graph from an iterable of triples.\n\nArgs:\ndata: an iterable of triples (Triple objects or 3-tuples)\ntop: the node identifier of the top node; if unspecified,\nthe source of the first triple is used\nExample:\n\n>>> Graph([\n...     ('b', 'instance', 'bark'),\n...     ('d', 'instance', 'dog'),\n...     ('b', 'ARG1', 'd')\n... ])", "source": "juraj-google-style"}
{"code": "def __init__(self, fields: List[Field], name: Optional[str]=None, base_schema_list: Optional[List['Schema']]=None, description: Optional[str]=None, *, allow_nonconst_keys: bool=False, metadata: Optional[Dict[str, Any]]=None, for_cls: Optional[Type[Any]]=None):\n    if not isinstance(fields, list):\n        raise TypeError(f\"Argument 'fields' must be a list. Encountered: {fields}.\")\n    self._name = name\n    self._allow_nonconst_keys = allow_nonconst_keys\n    self._fields = {f.key: f for f in fields}\n    self._description = description\n    self._metadata = metadata or {}\n    if for_cls is not None:\n        for f in fields:\n            if f.origin is None:\n                f.set_origin(for_cls)\n    self._dynamic_field = None\n    for f in fields:\n        if not f.key.is_const:\n            self._dynamic_field = f\n            break\n    if base_schema_list:\n        base = Schema.merge(base_schema_list)\n        self.extend(base)\n    if not allow_nonconst_keys and self._dynamic_field is not None:\n        raise ValueError(f\"NonConstKey is not allowed in schema. Encountered '{self._dynamic_field.key}'.\")", "docstring": "Constructor.\n\nArgs:\nfields: A list of Field as the definition of the schema. The order of the\nfields will be preserved.\nname: Optional name of this schema. Useful for debugging.\nbase_schema_list: List of schema used as base. When present, fields\nfrom these schema will be copied to this schema. Fields from the\nlatter schema will override those from the former ones.\ndescription: Optional str as the description for the schema.\nallow_nonconst_keys: Whether immediate fields can use non-const keys.\nmetadata: Optional dict of user objects as schema-level metadata.\nfor_cls: Optional class that this schema applies to.\n\nRaises:\nTypeError: Argument `fields` is not a list.\nKeyError: If a field name contains characters ('.') which is not\nallowed, or a field name from `fields` already exists in parent\nschema.\nValueError: When failed to create ValueSpec from `fields`.\nIt could be an unsupported value type, default value doesn't conform\nwith value specification, etc.", "source": "github-repos"}
{"code": "def get_ignition_type(root):\n    \n    properties = {}\n    elem = root.find('ignitionType')\n\n    if elem is None:\n        raise MissingElementError('ignitionType')\n    elem = elem.attrib\n\n    if 'target' in elem:\n        ign_target = elem['target'].rstrip(';').upper()\n    else:\n        raise MissingAttributeError('target', 'ignitionType')\n\n    if 'type' in elem:\n        ign_type = elem['type']\n        if ign_type == 'baseline max intercept from d/dt':\n            ign_type = 'd/dt max extrapolated'\n    else:\n        raise MissingAttributeError('type', 'ignitionType')\n\n    \n    if len(ign_target.split(';')) > 1:\n        raise NotImplementedError('Multiple ignition targets not supported.')\n\n    \n    \n    if ign_target == 'OHEX':\n        ign_target = 'OH*'\n    elif ign_target == 'CHEX':\n        ign_target = 'CH*'\n    elif ign_target == 'P':\n        ign_target = 'pressure'\n    elif ign_target == 'T':\n        ign_target = 'temperature'\n\n    if ign_target not in ['pressure', 'temperature', 'OH', 'OH*', 'CH*', 'CH']:\n        raise KeywordError(ign_target + ' not valid ignition target')\n\n    if ign_type not in ['max', 'd/dt max', '1/2 max', 'min', 'd/dt max extrapolated']:\n        raise KeywordError(ign_type + ' not valid ignition type')\n\n    properties['type'] = ign_type\n    properties['target'] = ign_target\n\n    return properties", "docstring": "Gets ignition type and target.\n\nArgs:\nroot (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file\n\nReturns:\nproperties (`dict`): Dictionary with ignition type/target information", "source": "juraj-google-style"}
{"code": "def wait_for_transform_job(self, job, poll=5):\n        \n        desc = _wait_until(lambda: _transform_job_status(self.sagemaker_client, job), poll)\n        self._check_job_status(job, desc, 'TransformJobStatus')\n        return desc", "docstring": "Wait for an Amazon SageMaker transform job to complete.\n\nArgs:\njob (str): Name of the transform job to wait for.\npoll (int): Polling interval in seconds (default: 5).\n\nReturns:\n(dict): Return value from the ``DescribeTransformJob`` API.\n\nRaises:\nValueError: If the transform job fails.", "source": "juraj-google-style"}
{"code": "def add_text(self, coords, text, color=(0, 0, 0)):\n    source = vtk.vtkVectorText()\n    source.SetText(text)\n    mapper = vtk.vtkPolyDataMapper()\n    mapper.SetInputConnection(source.GetOutputPort())\n    follower = vtk.vtkFollower()\n    follower.SetMapper(mapper)\n    follower.GetProperty().SetColor(color)\n    follower.SetPosition(coords)\n    follower.SetScale(0.5)\n    self.ren.AddActor(follower)\n    follower.SetCamera(self.ren.GetActiveCamera())", "docstring": "Add text at a coordinate.\n\nArgs:\ncoords: Coordinates to add text at.\ntext: Text to place.\ncolor: Color for text as RGB. Defaults to black.", "source": "codesearchnet"}
{"code": "def update_unexpected_keys(self, model, unexpected_keys: List[str], prefix: str) -> List[str]:\n    if self.run_compressed:\n        return unexpected_keys\n    keys_to_ignore = self.compressor.get_unexpected_file_keys(model)\n    return [key for key in unexpected_keys if not any((re.match(f'.*{pattern}', key) for pattern in keys_to_ignore))]", "docstring": "Override this method if you want to adjust the `unexpected_keys`.\n\nArgs:\nunexpected_keys (`List[str]`, *optional*):\nThe list of unexpected keys in the checkpoint compared to the state dict of the model", "source": "github-repos"}
{"code": "def save_json(dictionary, path, pretty=False, sortkeys=False):\n    with open(path, 'w') as f:\n        if pretty:\n            indent = 2\n            separators = (',', ': ')\n        else:\n            indent = None\n            separators = (', ', ': ')\n        json.dump(dictionary, f, indent=indent, sort_keys=sortkeys, separators=separators)", "docstring": "Save dictionary to JSON file preserving order if it is an OrderedDict\n\nArgs:\ndictionary (Dict): Python dictionary to save\npath (str): Path to JSON file\npretty (bool): Whether to pretty print. Defaults to False.\nsortkeys (bool): Whether to sort dictionary keys. Defaults to False.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def nr_profiles(arr, genomes):\n    gs_collapse = []\n    genome_idx_dict = {}\n    indices = []\n    patt_dict = {}\n    for (i, g) in enumerate(genomes):\n        p = arr[(i, :)].tostring()\n        if (p in patt_dict):\n            parent = patt_dict[p]\n            idx = genome_idx_dict[parent]\n            gs_collapse[idx].append(g)\n        else:\n            indices.append(i)\n            patt_dict[p] = g\n            genome_idx_dict[g] = len(gs_collapse)\n            gs_collapse.append([g])\n    return (arr[(indices, :)], gs_collapse)", "docstring": "Get a condensed cgMLST pairwise distance matrix for specified Genomes_\nwhere condensed means redundant cgMLST profiles are only represented once in the distance matrix.\n\nArgs:\nuser_name (list): List of Genome_ names to retrieve condensed distance matrix for\n\nReturns:\n(numpy.array, list): tuple of condensed cgMLST distance matrix and list of grouped Genomes_", "source": "codesearchnet"}
{"code": "def to_html(value: Any, *, name: Optional[str]=None, root_path: Optional[utils.KeyPath]=None, view_id: str='html-tree-view', **kwargs) -> Html:\n    content = base.view(value, name=name, root_path=root_path, view_id=view_id, **kwargs)\n    assert isinstance(content, Html), content\n    return content", "docstring": "Returns the HTML representation of a value.\n\nArgs:\nvalue: The value to render.\nname: The name of the value.\nroot_path: The root path of the value.\nview_id: The ID of the view to render the value.\nSee `pg.views.HtmlView.dir()` for all available HTML view IDs.\n**kwargs: Additional keyword arguments passed from `pg.to_html`, wich\nwill be passed to the `HtmlView.render_xxx()` (thus\n`Extension._html_xxx()`) methods.\n\nReturns:\nThe rendered HTML.", "source": "github-repos"}
{"code": "def execute_processing_block(pb_id: str, log_level='DEBUG'):\n    init_logger('sip', show_log_origin=True, propagate=False, log_level=log_level)\n    LOG.info(('+' * 40))\n    LOG.info('+ Executing Processing block: %s!', pb_id)\n    LOG.info(('+' * 40))\n    LOG.info('Processing Block Controller version: %s', __version__)\n    LOG.info('Docker Swarm API version: %s', sip_swarm_api_version)\n    LOG.info('Configuration database API version: %s', config_db_version)\n    pb = ProcessingBlock(pb_id)\n    LOG.info('Starting workflow %s %s', pb.workflow_id, pb.workflow_version)\n    pb.set_status('running')\n    docker = DockerSwarmClient()\n    workflow_stage_dict = {}\n    for stage in pb.workflow_stages:\n        workflow_stage_dict[stage.id] = deepcopy(stage.config)\n        workflow_stage_dict[stage.id]['services'] = dict()\n    while True:\n        time.sleep(0.1)\n        for workflow_stage in pb.workflow_stages:\n            _start_workflow_stages(pb, pb_id, workflow_stage_dict, workflow_stage, docker)\n            _update_workflow_stages(workflow_stage_dict[workflow_stage.id], workflow_stage, docker)\n        if _abort_workflow(pb, workflow_stage_dict, docker):\n            break\n        if _workflow_complete(workflow_stage_dict):\n            break\n    pb_list = ProcessingBlockList()\n    pb_list.set_complete(pb_id)\n    pb.set_status('completed')\n    LOG.info(('-' * 40))\n    LOG.info('- Destroying PBC for %s', pb_id)\n    LOG.info(('-' * 40))\n    return pb.status", "docstring": "Execute a processing block.\n\nCelery tasks that executes a workflow defined in a Configuration database\nProcessing Block data object.\n\nArgs:\npb_id (str): The PB id for the PBC\nlog_level (str): Python logging level.", "source": "codesearchnet"}
{"code": "def setData(self, data):\n        \n        try:\n            bytestream = pickle.dumps(data)\n            super(MimeData, self).setData(self._mimeType, bytestream)\n        except TypeError:\n            raise TypeError(self.tr(\"can not pickle added data\"))\n        except:\n            raise", "docstring": "Add some data.\n\nArgs:\ndata (object): Object to add as data. This object has to be pickable.\nQt objects don't work!\n\nRaises:\nTypeError if data is not pickable", "source": "juraj-google-style"}
{"code": "def update_account_info(self):\n    request = self._get_request()\n    return request.post(self.ACCOUNT_UPDATE_URL, {'callback_url': self.account.callback_url})", "docstring": "Update current account information\n\nAt the moment you can only update your callback_url.\n\nReturns:\nAn Account object", "source": "codesearchnet"}
{"code": "def find_nearest_color_index(r, g, b, color_table=None, method='euclid'):\n    shortest_distance = ((257 * 257) * 3)\n    index = 0\n    if (not color_table):\n        if (not color_table8):\n            build_color_tables()\n        color_table = color_table8\n    for (i, values) in enumerate(color_table):\n        rd = (r - values[0])\n        gd = (g - values[1])\n        bd = (b - values[2])\n        this_distance = (((rd * rd) + (gd * gd)) + (bd * bd))\n        if (this_distance < shortest_distance):\n            index = i\n            shortest_distance = this_distance\n    return index", "docstring": "Given three integers representing R, G, and B,\nreturn the nearest color index.\n\nArguments:\nr:    int - of range 0…255\ng:    int - of range 0…255\nb:    int - of range 0…255\n\nReturns:\nint, None: index, or None on error.", "source": "codesearchnet"}
{"code": "def generate_cot(context, parent_path=None):\n    \n    body = generate_cot_body(context)\n    schema = load_json_or_yaml(\n        context.config['cot_schema_path'], is_path=True,\n        exception=ScriptWorkerException,\n        message=\"Can't read schema file {}: %(exc)s\".format(context.config['cot_schema_path'])\n    )\n    validate_json_schema(body, schema, name=\"chain of trust\")\n    body = format_json(body)\n    parent_path = parent_path or os.path.join(context.config['artifact_dir'], 'public')\n    unsigned_path = os.path.join(parent_path, 'chain-of-trust.json')\n    write_to_file(unsigned_path, body)\n    if context.config['sign_chain_of_trust']:\n        ed25519_signature_path = '{}.sig'.format(unsigned_path)\n        ed25519_private_key = ed25519_private_key_from_file(context.config['ed25519_private_key_path'])\n        ed25519_signature = ed25519_private_key.sign(body.encode('utf-8'))\n        write_to_file(ed25519_signature_path, ed25519_signature, file_type='binary')\n    return body", "docstring": "Format and sign the cot body, and write to disk.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\nparent_path (str, optional): The directory to write the chain of trust\nartifacts to.  If None, this is ``artifact_dir/public/``.\nDefaults to None.\n\nReturns:\nstr: the contents of the chain of trust artifact.\n\nRaises:\nScriptWorkerException: on schema error.", "source": "juraj-google-style"}
{"code": "def shift_by_n_processors(self, x, mesh_axis, offset, wrap):\n    \n    n = self.shape[mesh_axis].size\n    source_pcoord = []\n    for i in xrange(n):\n      c = i - offset\n      if c != c % n:\n        if wrap:\n          c = c % n\n        else:\n          c = None\n      source_pcoord.append(c)\n    return self.receive(x, mesh_axis, source_pcoord)", "docstring": "Receive the slice from processor pcoord - offset.\n\nArgs:\nx: a LaidOutTensor\nmesh_axis: an integer\noffset: an integer\nwrap: a boolean. If True, then wrap around. Otherwise, pad with zeros.", "source": "juraj-google-style"}
{"code": "def with_input_types(self, input_type_hint, *side_inputs_arg_hints, **side_input_kwarg_hints):\n    super().with_input_types(input_type_hint)\n    side_inputs_arg_hints = native_type_compatibility.convert_to_beam_types(side_inputs_arg_hints)\n    side_input_kwarg_hints = native_type_compatibility.convert_to_beam_types(side_input_kwarg_hints)\n    for si in side_inputs_arg_hints:\n        validate_composite_type_param(si, 'Type hints for a PTransform')\n    for si in side_input_kwarg_hints.values():\n        validate_composite_type_param(si, 'Type hints for a PTransform')\n    self.side_inputs_types = side_inputs_arg_hints\n    return WithTypeHints.with_input_types(self, input_type_hint, *side_inputs_arg_hints, **side_input_kwarg_hints)", "docstring": "Annotates the types of main inputs and side inputs for the PTransform.\n\nArgs:\ninput_type_hint: An instance of an allowed built-in type, a custom class,\nor an instance of a typehints.TypeConstraint.\n*side_inputs_arg_hints: A variable length argument composed of\nof an allowed built-in type, a custom class, or a\ntypehints.TypeConstraint.\n**side_input_kwarg_hints: A dictionary argument composed of\nof an allowed built-in type, a custom class, or a\ntypehints.TypeConstraint.\n\nExample of annotating the types of side-inputs::\n\nFlatMap().with_input_types(int, int, bool)\n\nRaises:\n:class:`TypeError`: If **type_hint** is not a valid type-hint.\nSee\n:func:`~apache_beam.typehints.typehints.validate_composite_type_param`\nfor further details.\n\nReturns:\n:class:`PTransform`: A reference to the instance of this particular\n:class:`PTransform` object. This allows chaining type-hinting related\nmethods.", "source": "github-repos"}
{"code": "def __getattr__(self, key):\n        \n        if key == 'str' and self.weld_type == WeldVec(WeldChar()):\n            return StringSeriesWeld(\n                self.expr,\n                self.weld_type,\n                self.df,\n                self.column_name\n            )\n        raise AttributeError(\"Attr %s does not exist\" % key)", "docstring": "Summary\n\nArgs:\nkey (TYPE): Description\n\nReturns:\nTYPE: Description\n\nRaises:\nException: Description", "source": "juraj-google-style"}
{"code": "def update_resource_assignments(self, id_or_uri, resource_assignments, timeout=(- 1)):\n    uri = (self._client.build_uri(id_or_uri) + '/resource-assignments')\n    headers = {'Content-Type': 'application/json'}\n    return self._client.patch_request(uri, resource_assignments, timeout=timeout, custom_headers=headers)", "docstring": "Modifies scope membership by adding or removing resource assignments.\n\nArgs:\nid_or_uri: Can be either the resource ID or the resource URI.\nresource_assignments (dict):\nA dict object with a list of resource URIs to be added and a list of resource URIs to be removed.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Updated resource.", "source": "codesearchnet"}
{"code": "def truncated_normal(self, shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, name=None):\n    with ops.name_scope(name, 'truncated_normal', [shape, mean, stddev]) as name:\n        shape_tensor = _shape_tensor(shape)\n        mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name='mean')\n        stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name='stddev')\n        rnd = self._truncated_normal(shape_tensor, dtype=dtype)\n        mul = rnd * stddev_tensor\n        return math_ops.add(mul, mean_tensor, name=name)", "docstring": "Outputs random values from a truncated normal distribution.\n\nThe generated values follow a normal distribution with specified mean and\nstandard deviation, except that values whose magnitude is more than\n2 standard deviations from the mean are dropped and re-picked.\n\nArgs:\nshape: A 1-D integer Tensor or Python array. The shape of the output\ntensor.\nmean: A 0-D Tensor or Python value of type `dtype`. The mean of the\ntruncated normal distribution.\nstddev: A 0-D Tensor or Python value of type `dtype`. The standard\ndeviation of the normal distribution, before truncation.\ndtype: The type of the output.\nname: A name for the operation (optional).\n\nReturns:\nA tensor of the specified shape filled with random truncated normal\nvalues.", "source": "github-repos"}
{"code": "def ingest_data(ingested_dataset_path: str, base_artifact_path: str):\n    timestamp = int(time.time())\n    target_path = f'{base_artifact_path}/ingestion/ingested_dataset_{timestamp}.jsonl'\n    target_path_gcsfuse = target_path.replace('gs:\n    Path(target_path_gcsfuse).parent.mkdir(parents=True, exist_ok=True)\n    with open(target_path_gcsfuse, 'w') as f:\n        f.writelines(['{\"image_id\": 318556, \"id\": 255, \"caption\": \"An angled view of a beautifully decorated bathroom.\", \"image_url\": \"http:\n    Path(ingested_dataset_path).parent.mkdir(parents=True, exist_ok=True)\n    with open(ingested_dataset_path, 'w') as f:\n        f.write(target_path)", "docstring": "Data ingestion step that returns an uri\nto the data it has 'ingested' as jsonlines.\n\nArgs:\ndata_ingestion_target (str): uri to the data that was scraped and\ningested by the component", "source": "github-repos"}
{"code": "def deserialize(self, encoded_accumulator):\n    pass", "docstring": "Deserialize an accumulator received from 'serialize()'.\n\nThis function deserializes an accumulator serialized by 'serialize()'.\n\nArgs:\nencoded_accumulator: A byte string representing an accumulator.\n\nReturns:\nThe accumulator represented by the passed byte_string.", "source": "github-repos"}
{"code": "def match(self, path):\n        \n        match = self._re.search(path)\n        if match is None:\n            return None\n        args = []\n        kwargs = {}\n        for i, wildcard in enumerate(self._wildcards):\n            if wildcard.name == '!':\n                continue\n            value = wildcard.value(match.groups()[i])\n            if not wildcard.name:\n                args.append(value)\n            else:\n                kwargs[wildcard.name] = value\n        return self._callback, args, kwargs", "docstring": "Return route handler with arguments if path matches this route.\n\nArguments:\npath (str): Request path\n\nReturns:\ntuple or None: A tuple of three items:\n\n1. Route handler (callable)\n2. Positional arguments (list)\n3. Keyword arguments (dict)\n\n``None`` if the route does not match the path.", "source": "juraj-google-style"}
{"code": "def align_up(offset, align):\n    remain = (offset % align)\n    if (remain == 0):\n        return offset\n    else:\n        return (offset + (align - remain))", "docstring": "Align ``offset`` up to ``align`` boundary.\n\nArgs:\noffset (int): value to be aligned.\nalign (int): alignment boundary.\n\nReturns:\nint: aligned offset.\n\n>>> align_up(3, 2)\n4\n\n>>> align_up(3, 1)\n3", "source": "codesearchnet"}
{"code": "def compress_encoder(inputs,\n                     hparams,\n                     strides=(2, 2),\n                     kernel_size=(3, 3),\n                     name=None):\n  \n  with tf.variable_scope(name, default_name=\"compress\"):\n    x = inputs\n    for i in range(hparams.num_compress_steps \n      with tf.variable_scope(\"compress_conv_%d\" % i):\n        y = common_layers.conv_block(\n            common_layers.layer_norm(\n                x, hparams.hidden_size, name=\"lnorm\"),\n            hparams.hidden_size,\n            dilation_rates_and_kernel_sizes=[((1, 1), kernel_size)],\n            strides=strides,\n            padding=\"SAME\",\n            name=\"compress_conv_%d\" % i)\n        y = tf.nn.dropout(y, 1.0 - hparams.dropout)\n        if hparams.do_compress_attend:\n          y = compress_self_attention_layer(\n              x, hparams, name=\"compress_selfatt_%d\" % i)\n          y += x\n        x = y\n\n    x = residual_block_layer(x, hparams)\n\n    \n    \n    shape_x = common_layers.shape_list(x)\n    x = tf.layers.dense(x,\n                        hparams.num_latents * hparams.hidden_size,\n                        name=name + \"_dense\")\n    return tf.reshape(x, [shape_x[0],\n                          shape_x[1] * shape_x[2] * hparams.num_latents,\n                          hparams.hidden_size])", "docstring": "Encoder that compresses 2-D inputs by 2**num_compress_steps.\n\nArgs:\ninputs: Tensor of shape [batch, height, width, channels].\nhparams: HParams.\nstrides: Tuple, strides for conv block.\nkernel_size: Tuple, kernel window size for conv block.\nname: string, variable scope.\n\nReturns:\nTensor of shape [batch, latent_length, hparams.hidden_size], where\nlatent_length is\nhparams.num_latents * (height*width) / 2**(hparams.num_compress_steps).", "source": "juraj-google-style"}
{"code": "def get_gcps(self):\n    gcps = self.filehandle.gcps\n    gcp_array = np.array([(p.row, p.col, p.x, p.y, p.z) for p in gcps[0]])\n    ypoints = np.unique(gcp_array[(:, 0)])\n    xpoints = np.unique(gcp_array[(:, 1)])\n    gcp_lons = gcp_array[(:, 2)].reshape(ypoints.shape[0], xpoints.shape[0])\n    gcp_lats = gcp_array[(:, 3)].reshape(ypoints.shape[0], xpoints.shape[0])\n    gcp_alts = gcp_array[(:, 4)].reshape(ypoints.shape[0], xpoints.shape[0])\n    return ((xpoints, ypoints), (gcp_lons, gcp_lats, gcp_alts), gcps)", "docstring": "Read GCP from the GDAL band.\n\nArgs:\nband (gdal band): Measurement band which comes with GCP's\ncoordinates (tuple): A tuple with longitude and latitude arrays\n\nReturns:\npoints (tuple): Pixel and Line indices 1d arrays\ngcp_coords (tuple): longitude and latitude 1d arrays", "source": "codesearchnet"}
{"code": "def create_resource(self, parent_id=''):\n    resource_name = self.trigger_settings.get('resource', '')\n    resource_name = resource_name.replace('/', '')\n    if (not self.resource_id):\n        created_resource = self.client.create_resource(restApiId=self.api_id, parentId=parent_id, pathPart=resource_name)\n        self.resource_id = created_resource['id']\n        self.log.info('Successfully created resource')\n    else:\n        self.log.info('Resource already exists. To update resource please delete existing resource: %s', resource_name)", "docstring": "Create the specified resource.\n\nArgs:\nparent_id (str): The resource ID of the parent resource in API Gateway", "source": "codesearchnet"}
{"code": "def delete(filepath):\n    remove_acl(filepath)\n    remove_immutable_attribute(filepath)\n    if (os.path.isfile(filepath) or os.path.islink(filepath)):\n        os.remove(filepath)\n    elif os.path.isdir(filepath):\n        shutil.rmtree(filepath)", "docstring": "Delete the given file, directory or link.\n\nIt Should support undelete later on.\n\nArgs:\nfilepath (str): Absolute full path to a file. e.g. /path/to/file", "source": "codesearchnet"}
{"code": "def _maybe_load_initial_epoch_from_ckpt(self, initial_epoch, mode):\n    if self._training_state is not None:\n        return self._training_state.maybe_load_initial_epoch_from_ckpt(initial_epoch, mode)\n    return initial_epoch", "docstring": "Maybe load initial epoch from ckpt considering possible worker recovery.\n\nRefer to tensorflow/python/keras/distribute/worker_training_state.py\nfor more information.\n\nArgs:\ninitial_epoch: The original initial_epoch user passes in in `fit()`.\nmode: The mode for running `model.fit()`.\n\nReturns:\nIf the training is recovering from previous failure under multi-worker\ntraining setting, return the epoch the training is supposed to continue\nat. Otherwise, return the `initial_epoch` the user passes in.", "source": "github-repos"}
{"code": "def parse_application_name(setup_filename):\n    with open(setup_filename, 'rt') as setup_file:\n        fst = RedBaron(setup_file.read())\n        for node in fst:\n            if ((node.type == 'atomtrailers') and (str(node.name) == 'setup')):\n                for call in node.call:\n                    if (str(call.name) == 'name'):\n                        value = call.value\n                        if hasattr(value, 'to_python'):\n                            value = value.to_python()\n                        name = str(value)\n                        break\n                if name:\n                    break\n    return name", "docstring": "Parse a setup.py file for the name.\n\nReturns:\nname, or None", "source": "codesearchnet"}
{"code": "def get_metadata(changeset):\n    url = 'https:\n    return ET.fromstring(requests.get(url).content).getchildren()[0]", "docstring": "Get the metadata of a changeset using the OSM API and return it as a XML\nElementTree.\n\nArgs:\nchangeset: the id of the changeset.", "source": "codesearchnet"}
{"code": "def minimal_selector(self, complete_selector):\n    \n    if complete_selector not in self._selector_map:\n      raise KeyError(\"No value with selector '{}'.\".format(complete_selector))\n\n    selector_components = complete_selector.split('.')\n    node = self._selector_tree\n\n    start = None\n    for i, component in enumerate(reversed(selector_components)):\n      if len(node) == 1:\n        if start is None:\n          start = -i  \n      else:\n        start = None\n      node = node[component]\n\n    if len(node) > 1:  \n      return complete_selector\n    return '.'.join(selector_components[start:])", "docstring": "Returns the minimal selector that uniquely matches `complete_selector`.\n\nArgs:\ncomplete_selector: A complete selector stored in the map.\n\nReturns:\nA partial selector that unambiguously matches `complete_selector`.\n\nRaises:\nKeyError: If `complete_selector` is not in the map.", "source": "juraj-google-style"}
{"code": "def _string_to_components(spec=None):\n    cached_result = _STRING_TO_COMPONENTS_CACHE.get(spec)\n    if cached_result is not None:\n        return cached_result\n    raw_spec = spec\n    job, replica, task, device_type, device_index = (None, None, None, None, None)\n    spec = spec or ''\n    splits = [x.split(':') for x in spec.split('/')]\n    valid_device_types = DeviceSpecV2._get_valid_device_types()\n    for y in splits:\n        ly = len(y)\n        if y:\n            if ly == 2 and y[0] == 'job':\n                job = y[1]\n            elif ly == 2 and y[0] == 'replica':\n                replica = y[1]\n            elif ly == 2 and y[0] == 'task':\n                task = y[1]\n            elif (ly == 1 or ly == 2) and y[0].upper() in valid_device_types:\n                if device_type is not None:\n                    raise ValueError(f'Multiple device types are not allowed while parsing the device spec: {spec}.')\n                device_type = y[0].upper()\n                if ly == 2 and y[1] != '*':\n                    device_index = int(y[1])\n            elif ly == 3 and y[0] == 'device':\n                if device_type is not None:\n                    raise ValueError(f'Multiple device types are not allowed while parsing the device spec: {spec}.')\n                device_type = y[1]\n                if y[2] != '*':\n                    device_index = int(y[2])\n            elif ly and y[0] != '':\n                raise ValueError(f\"Unknown attribute '{y[0]}' is encountered while parsing the device spec: '{spec}'.\")\n    output = (job, replica, task, device_type, device_index)\n    _STRING_TO_COMPONENTS_CACHE[raw_spec] = output\n    return output", "docstring": "Stateless portion of device spec string parsing.\n\nArgs:\nspec: An optional string specifying a device specification.\n\nReturns:\nThe parsed components of `spec`. Note that the result of this function\nmust go through attribute setters of DeviceSpec, and should therefore NOT\nbe used directly.", "source": "github-repos"}
{"code": "def Print(self, x, data, message, **kwargs):  \n    \n    del data, message, kwargs\n    tf.logging.warning(\"Warning - mtf.Print not implemented for this mesh type\")\n    return x", "docstring": "Calls tf.Print.\n\nArgs:\nx: LaidOutTensor.\ndata: list of LaidOutTensor.\nmessage: str.\n**kwargs: keyword arguments to tf.print.\n\nReturns:\nLaidOutTensor.", "source": "juraj-google-style"}
{"code": "def reindex(self, kdims=[], force=False):\n    if (not isinstance(kdims, list)):\n        kdims = [kdims]\n    kdims = [self.get_dimension(kd, strict=True) for kd in kdims]\n    dropped = [kd for kd in self.kdims if (kd not in kdims)]\n    if dropped:\n        raise ValueError('DynamicMap does not allow dropping dimensions, reindex may only be used to reorder dimensions.')\n    return super(DynamicMap, self).reindex(kdims, force)", "docstring": "Reorders key dimensions on DynamicMap\n\nCreate a new object with a reordered set of key dimensions.\nDropping dimensions is not allowed on a DynamicMap.\n\nArgs:\nkdims: List of dimensions to reindex the mapping with\nforce: Not applicable to a DynamicMap\n\nReturns:\nReindexed DynamicMap", "source": "codesearchnet"}
{"code": "def submit(self, command='', blocksize=1, job_name='parsl.auto'):\n    (instance, name) = self.create_instance(command=command)\n    self.provisioned_blocks += 1\n    self.resources[name] = {'job_id': name, 'status': translate_table[instance['status']]}\n    return name", "docstring": "The submit method takes the command string to be executed upon\ninstantiation of a resource most often to start a pilot.\n\nArgs :\n- command (str) : The bash command string to be executed.\n- blocksize (int) : Blocksize to be requested\n\nKWargs:\n- job_name (str) : Human friendly name to be assigned to the job request\n\nReturns:\n- A job identifier, this could be an integer, string etc\n\nRaises:\n- ExecutionProviderException or its subclasses", "source": "codesearchnet"}
{"code": "def get(self, key):\n        \n\n        match = self._get_match(key=key)\n\n        if not match:\n            return None\n\n        return self._get_value_from_match(key=key, match=match)", "docstring": "Gets the value of the property of the given key.\n\nArgs:\nkey (str): Key of the property to look-up.", "source": "juraj-google-style"}
{"code": "def get_token(self, text, start=0):\n    best_class = best_match = None\n    for (token_class, match) in self.matching_tokens(text):\n        if (best_match and (best_match.end() >= match.end())):\n            continue\n        best_match = match\n        best_class = token_class\n    return (best_class, best_match)", "docstring": "Retrieve the next token from some text.\n\nArgs:\ntext (str): the text from which tokens should be extracted\n\nReturns:\n(token_kind, token_text): the token kind and its content.", "source": "codesearchnet"}
{"code": "async def reopen(self):\n    res = (await self.connection('POST', 'tournaments/{}/matches/{}/reopen'.format(self._tournament_id, self._id)))\n    self._refresh_from_json(res)", "docstring": "Reopens a match that was marked completed, automatically resetting matches that follow it\n\n|methcoro|\n\nRaises:\nAPIException", "source": "codesearchnet"}
{"code": "def get_stored_hash(self, temp_ver):\n        \n        with open(self._prefixed('%s.hash' % temp_ver.name)) as f:\n            return f.read().strip()", "docstring": "Retrieves the hash for the given template version from the store\n\nArgs:\ntemp_ver (TemplateVersion): template version to retrieve the hash\nfor\n\nReturns:\nstr: hash of the given template version", "source": "juraj-google-style"}
{"code": "def compute_kv(self, memory_antecedent):\n    \n    if not self.shared_kv:\n      raise ValueError(\"compute_kv can only be called with shared_kv\")\n    ret = mtf.einsum(\n        [memory_antecedent, self.wkv], reduced_dims=[self.memory_input_dim])\n    if self.combine_dims:\n      ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.k_dims)\n    return ret", "docstring": "Compute key/value Tensor kv.\n\nArgs:\nmemory_antecedent: a Tensor with dimensions\n{memory_input_dim} + other_dims\nReturns:\na Tensor with dimensions\nmemory_heads_dims + {key_dim} + other_dims", "source": "juraj-google-style"}
{"code": "def RetrievePluginAsset(self, run, plugin_name, asset_name):\n    \n    accumulator = self.GetAccumulator(run)\n    return accumulator.RetrievePluginAsset(plugin_name, asset_name)", "docstring": "Return the contents for a specific plugin asset from a run.\n\nArgs:\nrun: The string name of the run.\nplugin_name: The string name of a plugin.\nasset_name: The string name of an asset.\n\nReturns:\nThe string contents of the plugin asset.\n\nRaises:\nKeyError: If the asset is not available.", "source": "juraj-google-style"}
{"code": "def _wrap_method(name):\n    method = getattr(datetime.datetime, name)\n\n    @functools.wraps(method, ('__name__', '__doc__'), ())\n    def wrapper(self, *args, **kw):\n        r = method(self, *args, **kw)\n        if (isinstance(r, datetime.datetime) and (not isinstance(r, type(self)))):\n            r = type(self)(r)\n        return r\n    setattr(datetime_tz, name, wrapper)", "docstring": "Wrap a method.\n\nPatch a method which might return a datetime.datetime to return a\ndatetime_tz.datetime_tz instead.\n\nArgs:\nname: The name of the method to patch", "source": "codesearchnet"}
{"code": "def parse_split(cls, header: bytes, body: bytes) -> 'MessageContent':\n        \n        header_lines = cls._find_lines(header)\n        body_lines = cls._find_lines(body)\n        header_view = memoryview(header)\n        body_view = memoryview(body)\n        return cls._parse_split([header_view, body_view], header, body,\n                                header_view, body_view,\n                                header_lines, body_lines)", "docstring": "Parse the header and body bytestrings into message content.\n\nArgs:\nheader: The header bytestring to parse.\nbody: The body bytestring to parse.", "source": "juraj-google-style"}
{"code": "def unregister(self, alias):\n    if (alias not in self._service_objects):\n        raise Error(self._device, ('No service is registered with alias \"%s\".' % alias))\n    service_obj = self._service_objects.pop(alias)\n    if service_obj.is_alive:\n        with expects.expect_no_raises(('Failed to stop service instance \"%s\".' % alias)):\n            service_obj.stop()", "docstring": "Unregisters a service instance.\n\nStops a service and removes it from the manager.\n\nArgs:\nalias: string, the alias of the service instance to unregister.", "source": "codesearchnet"}
{"code": "def _parse_plugin_data_as(content, data_oneof_field):\n    plugin_data = plugin_data_pb2.HParamsPluginData.FromString(content)\n    if (plugin_data.version != PLUGIN_DATA_VERSION):\n        raise error.HParamsError(('Only supports plugin_data version: %s; found: %s in: %s' % (PLUGIN_DATA_VERSION, plugin_data.version, plugin_data)))\n    if (not plugin_data.HasField(data_oneof_field)):\n        raise error.HParamsError(('Expected plugin_data.%s to be set. Got: %s' % (data_oneof_field, plugin_data)))\n    return getattr(plugin_data, data_oneof_field)", "docstring": "Returns a data oneof's field from plugin_data.content.\n\nRaises HParamsError if the content doesn't have 'data_oneof_field' set or\nthis file is incompatible with the version of the metadata stored.\n\nArgs:\ncontent: The SummaryMetadata.plugin_data.content to use.\ndata_oneof_field: string. The name of the data oneof field to return.", "source": "codesearchnet"}
{"code": "def add_result(self, test, passed, error=None):\n        \n        self.result[unicode(test.__class__.__name__)] = {\n            'started': self.started,\n            'stopped': time.strftime('%Y-%m-%dT%H:%M:%S'),\n            'passed': passed,\n            'error': error,\n            'executions': SimpleTestResult.executions\n        }\n        if self.auto_reboot_args:\n            os.system('del \"%s\"' % RESUME_SCRIPT_PATH)\n\n        json.dump(OrderedDict(sorted(self.result.items(), key=lambda t: t[0])),\n                  open(self.path, 'w'), indent=2)\n\n        \n        logger.removeHandler(self.log_handler)\n        self.log_handler.close()\n        self.log_handler = None\n        time.sleep(2)\n\n        \n        if not self.keep_explorer:\n            os.system('taskkill /f /im explorer.exe && start explorer.exe')", "docstring": "Record test result into json file\n\nArgs:\ntest (TestCase): The test just run\npassed (bool): Whether the case is passed", "source": "juraj-google-style"}
{"code": "def ClientCertFromCSR(cls, csr):\n    builder = x509.CertificateBuilder()\n    common_name = csr.GetCN()\n    serial = int(common_name.split('.')[1], 16)\n    builder = builder.serial_number(serial)\n    builder = builder.subject_name(x509.Name([x509.NameAttribute(oid.NameOID.COMMON_NAME, str(common_name))]))\n    now = rdfvalue.RDFDatetime.Now()\n    now_plus_year = (now + rdfvalue.Duration('52w'))\n    builder = builder.not_valid_after(now_plus_year.AsDatetime())\n    now_minus_ten = (now - rdfvalue.Duration('10s'))\n    builder = builder.not_valid_before(now_minus_ten.AsDatetime())\n    ca_cert = config_lib._CONFIG['CA.certificate']\n    builder = builder.issuer_name(ca_cert.GetIssuer())\n    builder = builder.public_key(csr.GetPublicKey().GetRawPublicKey())\n    ca_key = config_lib._CONFIG['PrivateKeys.ca_key']\n    return RDFX509Cert(builder.sign(private_key=ca_key.GetRawPrivateKey(), algorithm=hashes.SHA256(), backend=openssl.backend))", "docstring": "Creates a new cert for the given common name.\n\nArgs:\ncsr: A CertificateSigningRequest.\n\nReturns:\nThe signed cert.", "source": "codesearchnet"}
{"code": "def get_json_type(obj):\n    if hasattr(obj, 'get_config'):\n        return {'class_name': obj.__class__.__name__, 'config': obj.get_config()}\n    if type(obj).__module__ == np.__name__:\n        if isinstance(obj, np.ndarray):\n            return obj.tolist()\n        else:\n            return obj.item()\n    if callable(obj):\n        return obj.__name__\n    if type(obj).__name__ == type.__name__:\n        return obj.__name__\n    if isinstance(obj, tensor_shape.Dimension):\n        return obj.value\n    if isinstance(obj, tensor_shape.TensorShape):\n        return obj.as_list()\n    if isinstance(obj, dtypes.DType):\n        return obj.name\n    if isinstance(obj, collections.abc.Mapping):\n        return dict(obj)\n    if obj is Ellipsis:\n        return {'class_name': '__ellipsis__'}\n    if isinstance(obj, wrapt.ObjectProxy):\n        return obj.__wrapped__\n    if isinstance(obj, internal.TypeSpec):\n        try:\n            type_spec_name = type_spec_registry.get_name(type(obj))\n            return {'class_name': 'TypeSpec', 'type_spec': type_spec_name, 'serialized': obj._serialize()}\n        except ValueError:\n            raise ValueError('Unable to serialize {} to JSON, because the TypeSpec class {} has not been registered.'.format(obj, type(obj)))\n    if isinstance(obj, enum.Enum):\n        return obj.value\n    raise TypeError('Not JSON Serializable:', obj)", "docstring": "Serializes any object to a JSON-serializable structure.\n\nArgs:\nobj: the object to serialize\n\nReturns:\nJSON-serializable structure representing `obj`.\n\nRaises:\nTypeError: if `obj` cannot be serialized.", "source": "github-repos"}
{"code": "def etm_register_write(self, register_index, value, delay=False):\n    self._dll.JLINKARM_ETM_WriteReg(int(register_index), int(value), int(delay))\n    return None", "docstring": "Writes a value to an ETM register.\n\nArgs:\nself (JLink): the ``JLink`` instance.\nregister_index (int): the register to write to.\nvalue (int): the value to write to the register.\ndelay (bool): boolean specifying if the write should be buffered.\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def unitary(input_circuit: circuit.QuantumCircuit):\n    return tfq.layers.Unitary()(input_circuit.pqc, symbol_names=input_circuit.symbol_names, symbol_values=tf.expand_dims(input_circuit.symbol_values, 0)).to_tensor()[0]", "docstring": "Returns the unitary matrix corresponding to the given circuit.\n\nArgs:\ninput_circuit: Quantum circuit whose unitary matrix is to be calculated.", "source": "github-repos"}
{"code": "def sparse_subtract(x1, x2):\n    if isinstance(x2, tf.SparseTensor):\n        return tf.sparse.add(x1, tf.sparse.map_values(tf.negative, x2))\n    else:\n        return tf.sparse.add(x1, tf.negative(x2))", "docstring": "Subtraction for `tf.SparseTensor`s.\n\nEither `x1` or `x2` or both can be `tf.SparseTensor`s.\n\nArgs:\nx1: fist tensor to add.\nx2: second tensor to add.\nReturns:\nThe sum of `x1` and `x2`, which is a `tf.SparseTensor` if and only if\nboth `x1` or `x2` are `tf.SparseTensor`s.", "source": "github-repos"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor:\n    residual = hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    hidden_states, attn_weights, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)\n    hidden_states = self.attn_dropout(hidden_states)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.ffn_layer_norm(hidden_states)\n    hidden_states = self.ffn(hidden_states)\n    hidden_states = self.ffn_dropout(hidden_states)\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`):\ninput to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`):\nattention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very\nlarge negative values.", "source": "github-repos"}
{"code": "def remove_file_from_tree(tree, file_path):\n    \n    match = None\n    for item in tree:\n        if item.get(\"path\") == file_path:\n            match = item\n            break\n    if match:\n        tree.remove(match)\n    return tree", "docstring": "Remove a file from a tree.\n\nArgs:\n\ntree\nA list of dicts containing info about each blob in a tree.\n\nfile_path\nThe path of a file to remove from a tree.\n\nReturns:\nThe provided tree, but with the item matching the specified\nfile_path removed.", "source": "juraj-google-style"}
{"code": "def __init__(self, idx):\n    \n    self.idx = idx\n    self.source = -1\n    self.target = -1\n    self.data = {}", "docstring": "Initialize the Edge.\n\nArgs:\nidx: The index of the Edge.", "source": "juraj-google-style"}
{"code": "def GetHandlers(self):\n    handlers = []\n    if self.ssl_context:\n        handlers.append(urllib2.HTTPSHandler(context=self.ssl_context))\n    if self.proxies:\n        handlers.append(urllib2.ProxyHandler(self.proxies))\n    return handlers", "docstring": "Retrieve the appropriate urllib2 handlers for the given configuration.\n\nReturns:\nA list of urllib2.BaseHandler subclasses to be used when making calls\nwith proxy.", "source": "codesearchnet"}
{"code": "def get_settings(section='gocd', settings_paths=('~/.gocd/gocd-cli.cfg', '/etc/go/gocd-cli.cfg')):\n    if isinstance(settings_paths, basestring):\n        settings_paths = (settings_paths,)\n    config_file = next((path for path in settings_paths if is_file_readable(path)), None)\n    if config_file:\n        config_file = expand_user(config_file)\n    return Settings(prefix=section, section=section, filename=config_file)", "docstring": "Returns a `gocd_cli.settings.Settings` configured for settings file\n\nThe settings will be read from environment variables first, then\nit'll be read from the first config file found (if any).\n\nEnvironment variables are expected to be in UPPERCASE and to be prefixed\nwith `GOCD_`.\n\nArgs:\nsection: The prefix to use for reading environment variables and the\nname of the section in the config file. Default: gocd\nsettings_path: Possible paths for the configuration file.\nDefault: `('~/.gocd/gocd-cli.cfg', '/etc/go/gocd-cli.cfg')`\n\nReturns:\n`gocd_cli.settings.Settings` instance", "source": "codesearchnet"}
{"code": "def is_user_profile_valid(user_profile):\n    if (not user_profile):\n        return False\n    if (not (type(user_profile) is dict)):\n        return False\n    if (UserProfile.USER_ID_KEY not in user_profile):\n        return False\n    if (UserProfile.EXPERIMENT_BUCKET_MAP_KEY not in user_profile):\n        return False\n    experiment_bucket_map = user_profile.get(UserProfile.EXPERIMENT_BUCKET_MAP_KEY)\n    if (not (type(experiment_bucket_map) is dict)):\n        return False\n    for decision in experiment_bucket_map.values():\n        if ((type(decision) is not dict) or (UserProfile.VARIATION_ID_KEY not in decision)):\n            return False\n    return True", "docstring": "Determine if provided user profile is valid or not.\n\nArgs:\nuser_profile: User's profile which needs to be validated.\n\nReturns:\nBoolean depending upon whether profile is valid or not.", "source": "codesearchnet"}
{"code": "def listTemplates(data={}):\n        \n        conn = Qubole.agent()\n        url_path = Template.rest_entity_path\n        page_attr = []\n        if \"page\" in data and data[\"page\"] is not None:\n            page_attr.append(\"page=%s\" % data[\"page\"])\n        if \"per_page\" in data and data[\"per_page\"] is not None:\n            page_attr.append(\"per_page=%s\" % data[\"per_page\"])\n        if page_attr:\n            url_path = \"%s?%s\" % (url_path, \"&\".join(page_attr))\n\n        return conn.get(url_path)", "docstring": "Fetch existing Templates details.\n\nArgs:\n`data`: dictionary containing the value of page number and per-page value\nReturns:\nDictionary containing paging_info and command_templates details", "source": "juraj-google-style"}
{"code": "def get_strip_metadata(self, catID):\n        \n\n        self.logger.debug('Retrieving strip catalog metadata')\n        url = '%(base_url)s/record/%(catID)s?includeRelationships=false' % {\n            'base_url': self.base_url, 'catID': catID\n        }\n        r = self.gbdx_connection.get(url)\n        if r.status_code == 200:\n            return r.json()['properties']\n        elif r.status_code == 404:\n            self.logger.debug('Strip not found: %s' % catID)\n            r.raise_for_status()\n        else:\n            self.logger.debug('There was a problem retrieving catid: %s' % catID)\n            r.raise_for_status()", "docstring": "Retrieves the strip catalog metadata given a cat ID.\n\nArgs:\ncatID (str): The source catalog ID from the platform catalog.\n\nReturns:\nmetadata (dict): A metadata dictionary .\n\nTODO: have this return a class object with interesting information exposed.", "source": "juraj-google-style"}
{"code": "def Save(obj: _Serializable, filename: Path, compress: bool=False, open_function=open) -> None:\n    with open_function(filename, 'wb') as fi:\n        if compress:\n            with gzip.GzipFile(filename='', mode='wb', fileobj=fi, mtime=1.0) as zfi:\n                zfi.write(Encode(obj))\n        else:\n            fi.write(Encode(obj))", "docstring": "Saves a serializable object to a file.\n\nArgs:\nobj: The object to serialize.\nfilename: filename to write to.\ncompress: if True, the data will be compressed using gzip. The given\nfilename will be used, unaltered.\nopen_function: The function to use to open files. Defaults to the builtin\nopen() function.", "source": "github-repos"}
{"code": "def _write_init_fetchers(self, filenames):\n    destination = ('%s%s' % (self.output_directory, self.fetchers_path))\n    self.write(destination=destination, filename='__init__.py', template_name='__init_fetcher__.py.tpl', filenames=self._prepare_filenames(filenames, suffix='Fetcher'), class_prefix=self._class_prefix, product_accronym=self._product_accronym, header=self.header_content)", "docstring": "Write fetcher init file\n\nArgs:\nfilenames (dict): dict of filename and classes", "source": "codesearchnet"}
{"code": "def filter_by_analysis_period(self, analysis_period):\n        \n        _filtered_data = self.filter_by_months_per_hour(\n            analysis_period.months_per_hour)\n        _filtered_data.header._analysis_period = analysis_period\n        return _filtered_data", "docstring": "Filter the Data Collection based on an analysis period.\n\nArgs:\nanalysis period: A Ladybug analysis period\n\nReturn:\nA new Data Collection with filtered data", "source": "juraj-google-style"}
{"code": "def annotations_from_file(filename):\n    \n    import edflib\n    e = edflib.EdfReader(filename, annotations_mode='all')\n    return e.read_annotations()", "docstring": "Get a list of event annotations from an EDF (European Data Format file\nor EDF+ file, using edflib.\n\nArgs:\nfilename: EDF+ file\n\nReturns:\nlist: annotation events, each in the form [start_time, duration, text]", "source": "juraj-google-style"}
{"code": "def __init__(self, rdfclass=None, **kwargs):\n    \n    super(RDFStructDictType, self).__init__(**kwargs)\n    self._type = self.rdfclass = rdfclass", "docstring": "An arg which must be an RDFStruct.\n\nArgs:\nrdfclass: The RDFStruct subclass that this arg must be.\n**kwargs: Passthrough to base class.", "source": "juraj-google-style"}
{"code": "def asym(scatterer, h_pol=True):\n    if (scatterer.psd_integrator is not None):\n        return scatterer.psd_integrator.get_angular_integrated(scatterer.psd, scatterer.get_geometry(), 'asym')\n    old_geom = scatterer.get_geometry()\n    cos_t0 = np.cos((scatterer.thet0 * deg_to_rad))\n    sin_t0 = np.sin((scatterer.thet0 * deg_to_rad))\n    p0 = (scatterer.phi0 * deg_to_rad)\n\n    def integrand(thet, phi):\n        (scatterer.phi, scatterer.thet) = ((phi * rad_to_deg), (thet * rad_to_deg))\n        cos_T_sin_t = (0.5 * ((np.sin((2 * thet)) * cos_t0) + (((1 - np.cos((2 * thet))) * sin_t0) * np.cos((p0 - phi)))))\n        I = sca_intensity(scatterer, h_pol)\n        return (I * cos_T_sin_t)\n    try:\n        cos_int = dblquad(integrand, 0.0, (2 * np.pi), (lambda x: 0.0), (lambda x: np.pi))[0]\n    finally:\n        scatterer.set_geometry(old_geom)\n    return (cos_int / sca_xsect(scatterer, h_pol))", "docstring": "Asymmetry parameter for the current setup, with polarization.\n\nArgs:\nscatterer: a Scatterer instance.\nh_pol: If True (default), use horizontal polarization.\nIf False, use vertical polarization.\n\nReturns:\nThe asymmetry parameter.", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context):\n    \n    super(NTFSFileSystem, self).__init__(resolver_context)\n    self._file_object = None\n    self._fsntfs_volume = None", "docstring": "Initializes a file system object.\n\nArgs:\nresolver_context (Context): resolver context.", "source": "juraj-google-style"}
{"code": "def update_port(self, port_information, id_or_uri, timeout=(- 1)):\n    uri = (self._client.build_uri(id_or_uri) + '/ports')\n    return self._client.update(port_information, uri, timeout)", "docstring": "Updates an interconnect port.\n\nArgs:\nid_or_uri: Can be either the interconnect id or the interconnect uri.\nport_information (dict): object to update\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: The interconnect.", "source": "codesearchnet"}
{"code": "def variables(self, name):\n        \n        if isinstance(name, tuple):\n            name = name[0]\n        if name.startswith('@{'):\n            name = '@' + name[2:-1]\n        i = len(self)\n        while i >= 0:\n            i -= 1\n            if name in self[i]['__variables__']:\n                return self[i]['__variables__'][name]\n        return False", "docstring": "Search for variable by name. Searches scope top down\nArgs:\nname (string): Search term\nReturns:\nVariable object OR False", "source": "juraj-google-style"}
{"code": "def _load_sentence_list(self, path):\n        \n\n        result = {}\n\n        for entry in textfile.read_separated_lines_generator(path, separator='\\t', max_columns=3):\n            if self.include_languages is None or entry[1] in self.include_languages:\n                result[entry[0]] = entry[1:]\n\n        return result", "docstring": "Load and filter the sentence list.\n\nArgs:\npath (str): Path to the sentence list.\n\nReturns:\ndict: Dictionary of sentences (id : language, transcription)", "source": "juraj-google-style"}
{"code": "def __str__(self):\n        \n        d = enums.JLinkHaltReasons.__dict__\n        s = next(k for k, v in d.items() if v == self.HaltReason)\n        if self.dbgrq():\n            return s\n        return s.replace('_', ' ').title()", "docstring": "Returns a string representation of the instance.\n\nArgs:\nself (JLinkMOEInfo): the ``JLinkMOEInfo`` instance\n\nReturns:\nA string representation of the instance.", "source": "juraj-google-style"}
{"code": "def get_template_files(self, template_id, filename):\n        \n\n        url = self.TEMPLATE_GET_FILES_URL + template_id\n        request = self._get_request()\n\n        return request.get_file(url, filename)", "docstring": "Download a PDF copy of a template's original files\n\nArgs:\n\ntemplate_id (str):  The id of the template to retrieve.\n\nfilename (str):     Filename to save the PDF file to. This should be a full path.\n\nReturns:\nReturns a PDF file", "source": "juraj-google-style"}
{"code": "def _build(self, inputs_list):\n    outputs = []\n    for (idx, tensor) in enumerate(inputs_list):\n        outputs.append(Linear(self._output_size, initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers, use_bias=((idx == 0) and self._use_bias))(tensor))\n    return tf.add_n(outputs)", "docstring": "Connects the module into the graph.\n\nIf this is not the first time the module has been connected to the graph,\nthe Tensors provided here must have the same final dimensions as when called\nthe first time, in order for the existing variables to be the correct size\nfor the multiplication. The batch size may differ for each connection.\n\nArgs:\ninputs_list: A list of 2D Tensors of rank 2, with leading batch dimension.\n\nReturns:\nA 2D Tensor of size [batch_size, output_size].", "source": "codesearchnet"}
{"code": "def count_up_to(self, limit):\n    return gen_state_ops.resource_count_up_to(self.handle, limit=limit, T=self.dtype)", "docstring": "Increments this variable until it reaches `limit`.\n\nWhen that Op is run it tries to increment the variable by `1`. If\nincrementing the variable would bring it above `limit` then the Op raises\nthe exception `OutOfRangeError`.\n\nIf no error is raised, the Op outputs the value of the variable before\nthe increment.\n\nThis is essentially a shortcut for `count_up_to(self, limit)`.\n\nArgs:\nlimit: value at which incrementing the variable raises an error.\n\nReturns:\nA `Tensor` that will hold the variable value before the increment. If no\nother Op modifies this variable, the values produced will all be\ndistinct.", "source": "github-repos"}
{"code": "def transform(self, value):\n    with tf.name_scope((self._name + '/transform')):\n        no_batch_dim = (value.shape.ndims == self._mean.shape.ndims)\n        if no_batch_dim:\n            value = value[(None, ...)]\n        if self._center:\n            value -= self._mean[(None, ...)]\n        if self._scale:\n            value /= tf.cond((self._count > 1), (lambda : (self._std() + 1e-08)), (lambda : tf.ones_like(self._var_sum)))[None]\n        if self._clip:\n            value = tf.clip_by_value(value, (- self._clip), self._clip)\n        if no_batch_dim:\n            value = value[0]\n        return tf.check_numerics(value, 'value')", "docstring": "Normalize a single or batch tensor.\n\nApplies the activated transformations in the constructor using current\nestimates of mean and variance.\n\nArgs:\nvalue: Batch or single value tensor.\n\nReturns:\nNormalized batch or single value tensor.", "source": "codesearchnet"}
{"code": "def line_starts_subpgm(line: str) -> Tuple[(bool, Optional[str])]:\n    match = RE_SUB_START.match(line)\n    if (match != None):\n        f_name = match.group(1)\n        return (True, f_name)\n    match = RE_FN_START.match(line)\n    if (match != None):\n        f_name = match.group(1)\n        return (True, f_name)\n    return (False, None)", "docstring": "Indicates whether a line in the program is the first line of a subprogram\ndefinition.\n\nArgs:\nline\nReturns:\n(True, f_name) if line begins a definition for subprogram f_name;\n(False, None) if line does not begin a subprogram definition.", "source": "codesearchnet"}
{"code": "def clean(self, value):\n\t\t\n\n\t\t\n\t\tif value is None and self._optional:\n\t\t\treturn None\n\n\t\t\n\t\tfor i in range(len(self._nodes)):\n\n\t\t\t\n\t\t\tif self._nodes[i].valid(value):\n\n\t\t\t\t\n\t\t\t\treturn self._nodes[i].clean(value)\n\n\t\t\n\t\traise ValueError('value', value)", "docstring": "Clean\n\nUses the valid method to check which type the value is, and then calls\nthe correct version of clean on that node\n\nArguments:\nvalue {mixed} -- The value to clean\n\nReturns:\nmixed", "source": "juraj-google-style"}
{"code": "def full_name_node(name, ctx=ast.Load()):\n    names = name.split('.')\n    names.reverse()\n    node = ast.Name(id=names.pop(), ctx=ast.Load())\n    while names:\n        node = ast.Attribute(value=node, attr=names.pop(), ctx=ast.Load())\n    node.ctx = ctx\n    return node", "docstring": "Make an Attribute or Name node for name.\n\nTranslate a qualified name into nested Attribute nodes (and a Name node).\n\nArgs:\nname: The name to translate to a node.\nctx: What context this name is used in. Defaults to Load()\n\nReturns:\nA Name or Attribute node.", "source": "github-repos"}
{"code": "def sequence_path(self, fasta_path):\n    if (not fasta_path):\n        self.sequence_dir = None\n        self.sequence_file = None\n    else:\n        if (not op.exists(fasta_path)):\n            raise OSError('{}: file does not exist'.format(fasta_path))\n        if (not op.dirname(fasta_path)):\n            self.sequence_dir = '.'\n        else:\n            self.sequence_dir = op.dirname(fasta_path)\n        self.sequence_file = op.basename(fasta_path)\n        tmp_sr = SeqIO.read(fasta_path, 'fasta')\n        if (self.name == '<unknown name>'):\n            self.name = tmp_sr.name\n        if (self.description == '<unknown description>'):\n            self.description = tmp_sr.description\n        if (not self.dbxrefs):\n            self.dbxrefs = tmp_sr.dbxrefs\n        if (not self.features):\n            self.features = tmp_sr.features\n        if (not self.annotations):\n            self.annotations = tmp_sr.annotations\n        if (not self.letter_annotations):\n            self.letter_annotations = tmp_sr.letter_annotations", "docstring": "Provide pointers to the paths of the FASTA file\n\nArgs:\nfasta_path: Path to FASTA file", "source": "codesearchnet"}
{"code": "def get_source_var_declaration(self, var):\n    return next((x.source_mapping for x in self.variables if (x.name == var)))", "docstring": "Return the source mapping where the variable is declared\n\nArgs:\nvar (str): variable name\nReturns:\n(dict): sourceMapping", "source": "codesearchnet"}
{"code": "def __init__(self, device, configs=None):\n    self._device = device\n    self._configs = configs", "docstring": "Constructor of the class.\n\nThe constructor is the only place to pass in a config. If you need to\nchange the config later, you should unregister the service instance\nfrom `ServiceManager` and register again with the new config.\n\nArgs:\ndevice: the device object this service is associated with.\nconfig: optional configuration defined by the author of the service\nclass.", "source": "github-repos"}
{"code": "def join_dags(self, names=None):\n        \n        return self._client.send(\n            Request(\n                action='join_dags',\n                payload={'names': names}\n            )\n        ).success", "docstring": "Wait for the specified dags to terminate.\n\nThis function blocks until the specified dags terminate. If no dags are specified\nwait for all dags of the workflow, except the dag of the task calling this signal,\nto terminate.\n\nArgs:\nnames (list): The names of the dags that have to terminate.\n\nReturns:\nbool: True if all the signal was sent successfully.", "source": "juraj-google-style"}
{"code": "def to_string(cls, error_code):\n        \n        if error_code == cls.ERROR_UNKNOWN:\n            return 'Unknown error.'\n        elif error_code == cls.ERROR_NO_MORE_EVENTS:\n            return 'There are no more available watchpoint units.'\n        elif error_code == cls.ERROR_NO_MORE_ADDR_COMP:\n            return 'No more address comparisons can be set.'\n        elif error_code == cls.ERROR_NO_MORE_DATA_COMP:\n            return 'No more data comparisons can be set.'\n        elif error_code == cls.ERROR_INVALID_ADDR_MASK:\n            return 'Invalid flags passed for the address mask.'\n        elif error_code == cls.ERROR_INVALID_DATA_MASK:\n            return 'Invalid flags passed for the data mask.'\n        elif error_code == cls.ERROR_INVALID_ACCESS_MASK:\n            return 'Invalid flags passed for the access mask.'\n        return super(JLinkDataErrors, cls).to_string(error_code)", "docstring": "Returns the string message for the given error code.\n\nArgs:\ncls (JLinkDataErrors): the ``JLinkDataErrors`` class\nerror_code (int): error code to convert\n\nReturns:\nAn error string corresponding to the error code.\n\nRaises:\nValueError: if the error code is invalid.", "source": "juraj-google-style"}
{"code": "def create_attention_mask_from_sequences(self, query_ids: List[int], table_values: List[TableValue]) -> List[int]:\n    return [1] * (1 + len(query_ids) + 1 + len(table_values))", "docstring": "Creates the attention mask according to the query token IDs and a list of table values.\n\nArgs:\nquery_ids (`List[int]`): list of token IDs corresponding to the ID.\ntable_values (`List[TableValue]`): lift of table values, which are named tuples containing the\ntoken value, the column ID and the row ID of said token.\n\nReturns:\n`List[int]`: List of ints containing the attention mask values.", "source": "github-repos"}
{"code": "def _task_table(self, task_id):\n    assert isinstance(task_id, ray.TaskID)\n    message = self._execute_command(task_id, 'RAY.TABLE_LOOKUP', ray.gcs_utils.TablePrefix.RAYLET_TASK, '', task_id.binary())\n    if (message is None):\n        return {}\n    gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(message, 0)\n    assert (gcs_entries.EntriesLength() == 1)\n    task_table_message = ray.gcs_utils.Task.GetRootAsTask(gcs_entries.Entries(0), 0)\n    execution_spec = task_table_message.TaskExecutionSpec()\n    task_spec = task_table_message.TaskSpecification()\n    task = ray._raylet.Task.from_string(task_spec)\n    function_descriptor_list = task.function_descriptor_list()\n    function_descriptor = FunctionDescriptor.from_bytes_list(function_descriptor_list)\n    task_spec_info = {'DriverID': task.driver_id().hex(), 'TaskID': task.task_id().hex(), 'ParentTaskID': task.parent_task_id().hex(), 'ParentCounter': task.parent_counter(), 'ActorID': task.actor_id().hex(), 'ActorCreationID': task.actor_creation_id().hex(), 'ActorCreationDummyObjectID': task.actor_creation_dummy_object_id().hex(), 'ActorCounter': task.actor_counter(), 'Args': task.arguments(), 'ReturnObjectIDs': task.returns(), 'RequiredResources': task.required_resources(), 'FunctionID': function_descriptor.function_id.hex(), 'FunctionHash': binary_to_hex(function_descriptor.function_hash), 'ModuleName': function_descriptor.module_name, 'ClassName': function_descriptor.class_name, 'FunctionName': function_descriptor.function_name}\n    return {'ExecutionSpec': {'Dependencies': [execution_spec.Dependencies(i) for i in range(execution_spec.DependenciesLength())], 'LastTimestamp': execution_spec.LastTimestamp(), 'NumForwards': execution_spec.NumForwards()}, 'TaskSpec': task_spec_info}", "docstring": "Fetch and parse the task table information for a single task ID.\n\nArgs:\ntask_id: A task ID to get information about.\n\nReturns:\nA dictionary with information about the task ID in question.", "source": "codesearchnet"}
{"code": "def uniprot_reviewed_checker_batch(uniprot_ids):\n    uniprot_ids = ssbio.utils.force_list(uniprot_ids)\n    invalid_ids = [i for i in uniprot_ids if (not is_valid_uniprot_id(i))]\n    uniprot_ids = [i for i in uniprot_ids if is_valid_uniprot_id(i)]\n    if invalid_ids:\n        warnings.warn('Invalid UniProt IDs {} will be ignored'.format(invalid_ids))\n    Nmax = 200\n    (N, rest) = divmod(len(uniprot_ids), Nmax)\n    uni_rev_dict = {}\n    if (rest > 0):\n        N += 1\n    for i in range(0, N):\n        i1 = (i * Nmax)\n        i2 = ((i + 1) * Nmax)\n        if (i2 > len(uniprot_ids)):\n            i2 = len(uniprot_ids)\n        query = uniprot_ids[i1:i2]\n        query_string = ''\n        for x in query:\n            query_string += (('id:' + x) + '+OR+')\n        query_string = query_string.strip('+OR+')\n        uni_rev_raw = StringIO(bsup.search(query_string, columns='id,reviewed', frmt='tab'))\n        uni_rev_df = pd.read_table(uni_rev_raw, sep='\\t', index_col=0)\n        uni_rev_df = uni_rev_df.fillna(False)\n        uni_rev_df = uni_rev_df[pd.notnull(uni_rev_df.Status)]\n        uni_rev_df = uni_rev_df.replace(to_replace='reviewed', value=True)\n        uni_rev_df = uni_rev_df.replace(to_replace='unreviewed', value=False)\n        uni_rev_dict_adder = uni_rev_df.to_dict()['Status']\n        uni_rev_dict.update(uni_rev_dict_adder)\n    return uni_rev_dict", "docstring": "Batch check if uniprot IDs are reviewed or not\n\nArgs:\nuniprot_ids: UniProt ID or list of UniProt IDs\n\nReturns:\nA dictionary of {UniProtID: Boolean}", "source": "codesearchnet"}
{"code": "def netflix(es, ps, e0, l=.0001):\n    \n    m = len(es)\n    n = len(ps[0])\n\n    X = np.stack(ps).T\n    pTy = .5 * (n * e0**2 + (X**2).sum(axis=0) - n * np.array(es)**2)\n\n    w = np.linalg.pinv(X.T.dot(X) + l * n * np.eye(m)).dot(pTy)\n\n    return X.dot(w), w", "docstring": "Combine predictions with the optimal weights to minimize RMSE.\n\nArgs:\nes (list of float): RMSEs of predictions\nps (list of np.array): predictions\ne0 (float): RMSE of all zero prediction\nl (float): lambda as in the ridge regression\n\nReturns:\nEnsemble prediction (np.array) and weights (np.array) for input predictions", "source": "juraj-google-style"}
{"code": "def convert_compartment_entry(self, compartment, adjacencies):\n        \n        d = OrderedDict()\n        d['id'] = compartment.id\n        if adjacencies is not None:\n            d['adjacent_to'] = adjacencies\n\n        order = {key: i for i, key in enumerate(['name'])}\n        prop_keys = set(compartment.properties)\n        for prop in sorted(prop_keys,\n                           key=lambda x: (order.get(x, 1000), x)):\n            if compartment.properties[prop] is not None:\n                d[prop] = compartment.properties[prop]\n\n        return d", "docstring": "Convert compartment entry to YAML dict.\n\nArgs:\ncompartment: :class:`psamm.datasource.entry.CompartmentEntry`.\nadjacencies: Sequence of IDs or a single ID of adjacent\ncompartments (or None).", "source": "juraj-google-style"}
{"code": "def node_from_map(node_map: Mapping[str, node_def_pb2.NodeDef], name: str) -> node_def_pb2.NodeDef:\n    stripped_name = node_name_from_input(name)\n    if stripped_name not in node_map:\n        raise ValueError(\"No node named '%s' found in map.\" % name)\n    return node_map[stripped_name]", "docstring": "Pulls a node def from a dictionary for a given name.\n\nArgs:\nnode_map: Dictionary containing an entry indexed by name for every node.\nname: Identifies the node we want to find.\n\nReturns:\nNodeDef of the node with the given name.\n\nRaises:\nValueError: If the node isn't present in the dictionary.", "source": "github-repos"}
{"code": "def _bdtr(k, n, p):\n    ones = tf.ones_like((n - k))\n    k_eq_n = tf.equal(k, n)\n    safe_dn = tf.where(k_eq_n, ones, (n - k))\n    dk = tf.math.betainc(a=safe_dn, b=(k + 1), x=(1 - p))\n    return tf.where(k_eq_n, ones, dk)", "docstring": "The binomial cumulative distribution function.\n\nArgs:\nk: floating point `Tensor`.\nn: floating point `Tensor`.\np: floating point `Tensor`.\n\nReturns:\n`sum_{j=0}^k p^j (1 - p)^(n - j)`.", "source": "codesearchnet"}
{"code": "def get_next_of_type(self, processor_type):\n        \n        with self._condition:\n            if processor_type not in self:\n                self.wait_for_registration(processor_type)\n            try:\n                processor = self[processor_type].next_processor()\n            except NoProcessorVacancyError:\n                processor = self.wait_for_vacancy(processor_type)\n            processor.inc_occupancy()\n            return processor", "docstring": "Get the next available processor of a particular type and increment\nits occupancy counter.\n\nArgs:\nprocessor_type (ProcessorType): The processor type associated with\na zmq identity.\n\nReturns:\n(Processor): Information about the transaction processor", "source": "juraj-google-style"}
{"code": "def getMAC(self, bType=MacType.RandomMac):\n        \n        print '%s call getMAC' % self.port\n        print bType\n        \n        if self.isPowerDown:\n            macAddr64 = self.mac\n        else:\n            if bType == MacType.FactoryMac:\n                macAddr64 = self.__sendCommand('eui64')[0]\n            elif bType == MacType.HashMac:\n                macAddr64 = self.__sendCommand('joinerid')[0]\n            else:\n                macAddr64 = self.__sendCommand('extaddr')[0]\n        print macAddr64\n\n        return int(macAddr64, 16)", "docstring": "get one specific type of MAC address\ncurrently OpenThread only supports Random MAC address\n\nArgs:\nbType: indicate which kind of MAC address is required\n\nReturns:\nspecific type of MAC address", "source": "juraj-google-style"}
{"code": "def has_ncols(state, incorrect_msg=\"Your query returned a table with {{n_stu}} column{{'s' if n_stu > 1 else ''}} while it should return a table with {{n_sol}} column{{'s' if n_sol > 1 else ''}}.\"):\n    has_result(state)\n    n_stu = len(state.student_result)\n    n_sol = len(state.solution_result)\n    if (n_stu != n_sol):\n        _msg = state.build_message(incorrect_msg, fmt_kwargs={'n_stu': n_stu, 'n_sol': n_sol})\n        state.do_test(_msg)\n    return state", "docstring": "Test whether the student and solution query results have equal numbers of columns.\n\nArgs:\nincorrect_msg: If specified, this overrides the automatically generated feedback message\nin case the number of columns in the student and solution query don't match.\n\n:Example:\n\nConsider the following solution and SCT: ::\n\n# solution\nSELECT artist_id as id, name FROM artists\n\n# sct\nEx().has_ncols()\n\n# passing submission\nSELECT artist_id as id, name FROM artists\n\n# failing submission (too little columns)\nSELECT artist_id as id FROM artists\n\n# passing submission (two columns, even though not correct ones)\nSELECT artist_id, label FROM artists", "source": "codesearchnet"}
{"code": "def _ReadStructureFromFileObject(self, file_object, file_offset, data_type_map):\n    context = None\n    data = b''\n    last_data_size = 0\n    data_size = data_type_map.GetByteSize()\n    if (not data_size):\n        data_size = data_type_map.GetSizeHint()\n    while (data_size != last_data_size):\n        read_offset = (file_offset + last_data_size)\n        read_size = (data_size - last_data_size)\n        data_segment = self._ReadData(file_object, read_offset, read_size)\n        data = b''.join([data, data_segment])\n        try:\n            context = dtfabric_data_maps.DataTypeMapContext()\n            structure_values_object = data_type_map.MapByteStream(data, context=context)\n            return (structure_values_object, data_size)\n        except dtfabric_errors.ByteStreamTooSmallError:\n            pass\n        except dtfabric_errors.MappingError as exception:\n            raise errors.ParseError('Unable to map {0:s} data at offset: 0x{1:08x} with error: {2!s}'.format(data_type_map.name, file_offset, exception))\n        last_data_size = data_size\n        data_size = data_type_map.GetSizeHint(context=context)\n    raise errors.ParseError('Unable to read {0:s} at offset: 0x{1:08x}'.format(data_type_map.name, file_offset))", "docstring": "Reads a structure from a file-like object.\n\nIf the data type map has a fixed size this method will read the predefined\nnumber of bytes from the file-like object. If the data type map has a\nvariable size, depending on values in the byte stream, this method will\ncontinue to read from the file-like object until the data type map can be\nsuccessfully mapped onto the byte stream or until an error occurs.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object to parse.\nfile_offset (int): offset of the structure data relative to the start\nof the file-like object.\ndata_type_map (dtfabric.DataTypeMap): data type map of the structure.\n\nReturns:\ntuple[object, int]: structure values object and data size of\nthe structure.\n\nRaises:\nParseError: if the structure cannot be read.\nValueError: if file-like object or data type map is missing.", "source": "codesearchnet"}
{"code": "def CsvToTable(self, buf, header=True, separator=\",\"):\n        \n        self.Reset()\n\n        header_row = self.row_class()\n        if header:\n            line = buf.readline()\n            header_str = \"\"\n            while not header_str:\n                \n                header_str = line.split(\"\n                if not header_str:\n                    line = buf.readline()\n\n            header_list = header_str.split(separator)\n            header_length = len(header_list)\n\n            for entry in header_list:\n                entry = entry.strip()\n                if entry in header_row:\n                    raise TableError(\"Duplicate header entry %r.\" % entry)\n\n                header_row[entry] = entry\n            header_row.row = 0\n            self._table[0] = header_row\n\n        \n        for line in buf:\n            \n            if line.startswith(\"\n                continue\n\n            lst = line.split(separator)\n            lst = [l.strip() for l in lst]\n            if header and len(lst) != header_length:\n                \n                continue\n            if not header:\n                header_row = self.row_class()\n                header_length = len(lst)\n                header_row.values = dict(\n                    zip(range(header_length), range(header_length))\n                )\n                self._table[0] = header_row\n                header = True\n                continue\n\n            new_row = self.NewRow()\n            new_row.values = lst\n            header_row.row = self.size + 1\n            self._table.append(new_row)\n\n        return self.size", "docstring": "Parses buffer into tabular format.\n\nStrips off comments (preceded by '#').\nOptionally parses and indexes by first line (header).\n\nArgs:\nbuf: String file buffer containing CSV data.\nheader: Is the first line of buffer a header.\nseparator: String that CSV is separated by.\n\nReturns:\nint, the size of the table created.\n\nRaises:\nTableError: A parsing error occurred.", "source": "juraj-google-style"}
{"code": "def get_converter_to_specific(self, dataset=None, mass=None, to_unit=None, from_unit=None):\n    if (not dataset):\n        dataset_number = self._validate_dataset_number(None)\n        if (dataset_number is None):\n            self._report_empty_dataset()\n            return\n        dataset = self.datasets[dataset_number]\n    if (not mass):\n        mass = dataset.mass\n    if (not to_unit):\n        to_unit_cap = self.cellpy_units['charge']\n        to_unit_mass = self.cellpy_units['specific']\n        to_unit = (to_unit_cap / to_unit_mass)\n    if (not from_unit):\n        from_unit_cap = self.raw_units['charge']\n        from_unit_mass = self.raw_units['mass']\n        from_unit = (from_unit_cap / from_unit_mass)\n    return ((from_unit / to_unit) / mass)", "docstring": "get the convertion values\n\nArgs:\ndataset: DataSet object\nmass: mass of electrode (for example active material in mg)\nto_unit: (float) unit of input, f.ex. if unit of charge\nis mAh and unit of mass is g, then to_unit for charge/mass\nwill be 0.001 / 1.0 = 0.001\nfrom_unit: float) unit of output, f.ex. if unit of charge\nis mAh and unit of mass is g, then to_unit for charge/mass\nwill be 1.0 / 0.001 = 1000.0\n\nReturns:\nmultiplier (float) from_unit/to_unit * mass", "source": "codesearchnet"}
{"code": "def GetMetadataAttribute(self, attribute_name):\n    \n    table_name = 'metadata'\n\n    has_table = self._database_file.HasTable(table_name)\n    if not has_table:\n      return None\n\n    column_names = ['value']\n    condition = 'name == \"{0:s}\"'.format(attribute_name)\n\n    values = list(self._database_file.GetValues(\n        [table_name], column_names, condition))\n\n    number_of_values = len(values)\n    if number_of_values == 0:\n      return None\n\n    if number_of_values == 1:\n      return values[0]['value']\n\n    raise RuntimeError('More than one value found in database.')", "docstring": "Retrieves the metadata attribute.\n\nArgs:\nattribute_name (str): name of the metadata attribute.\n\nReturns:\nstr: the metadata attribute or None.\n\nRaises:\nRuntimeError: if more than one value is found in the database.", "source": "juraj-google-style"}
{"code": "def __eq__(self, other: 'TensorFluent') -> 'TensorFluent':\n        \n        return self._binary_op(self, other, tf.equal, tf.float32)", "docstring": "Returns a TensorFluent for the equal relational operator.\n\nArgs:\nself: The first operand.\nother: The second operand.", "source": "juraj-google-style"}
{"code": "def describe_file(module):\n    \n    descriptor = FileDescriptor()\n    descriptor.package = util.get_package_for_module(module)\n\n    if not descriptor.package:\n        descriptor.package = None\n\n    message_descriptors = []\n    enum_descriptors = []\n\n    \n    \n    for name in sorted(dir(module)):\n        value = getattr(module, name)\n\n        if isinstance(value, type):\n            if issubclass(value, messages.Message):\n                message_descriptors.append(describe_message(value))\n\n            elif issubclass(value, messages.Enum):\n                enum_descriptors.append(describe_enum(value))\n\n    if message_descriptors:\n        descriptor.message_types = message_descriptors\n\n    if enum_descriptors:\n        descriptor.enum_types = enum_descriptors\n\n    return descriptor", "docstring": "Build a file from a specified Python module.\n\nArgs:\nmodule: Python module to describe.\n\nReturns:\nInitialized FileDescriptor instance describing the module.", "source": "juraj-google-style"}
{"code": "def categorical_partition_data(data):\n    \n\n    \n    series = pd.Series(data)\n    value_counts = series.value_counts(dropna=True)\n\n    \n    null_indexes = series.isnull()\n    nonnull_count = (null_indexes == False).sum()\n\n    weights = value_counts.values / nonnull_count\n    return {\n        \"values\": value_counts.index.tolist(),\n        \"weights\": weights\n    }", "docstring": "Convenience method for creating weights from categorical data.\n\nArgs:\ndata (list-like): The data from which to construct the estimate.\n\nReturns:\nA new partition object::\n\n{\n\"partition\": (list) The categorical values present in the data\n\"weights\": (list) The weights of the values in the partition.\n}", "source": "juraj-google-style"}
{"code": "def mix(self, ca, cb, xb):\n    r = (((1 - xb) * ca.red) + (xb * cb.red))\n    g = (((1 - xb) * ca.green) + (xb * cb.green))\n    b = (((1 - xb) * ca.blue) + (xb * cb.blue))\n    a = (((1 - xb) * ca.alpha) + (xb * cb.alpha))\n    return gdk.RGBA(red=r, green=g, blue=b, alpha=a)", "docstring": "Mix colors.\n\nArgs:\nca (gdk.RGBA): first color\ncb (gdk.RGBA): second color\nxb (float): between 0.0 and 1.0\n\nReturn:\ngdk.RGBA: linear interpolation between ca and cb,\n0 or 1 return the unaltered 1st or 2nd color respectively,\nas in CSS.", "source": "codesearchnet"}
{"code": "def has_no_flat_neurites(neuron, tol=0.1, method='ratio'):\n    \n    return CheckResult(len(get_flat_neurites(neuron, tol, method)) == 0)", "docstring": "Check that a neuron has no flat neurites\n\nArguments:\nneuron(Neuron): The neuron object to test\ntol(float): tolerance\nmethod(string): way of determining flatness, 'tolerance', 'ratio' \\\nas described in :meth:`neurom.check.morphtree.get_flat_neurites`\n\nReturns:\nCheckResult with result", "source": "juraj-google-style"}
{"code": "def construct(cls, name, range=None):\n    other = Requirement(None)\n    other.name_ = name\n    other.range_ = (VersionRange() if (range is None) else range)\n    return other", "docstring": "Create a requirement directly from an object name and VersionRange.\n\nArgs:\nname: Object name string.\nrange: VersionRange object. If None, an unversioned requirement is\ncreated.", "source": "codesearchnet"}
{"code": "def _TestCase(self, shape, indices, scatter_op=state_ops.scatter_add):\n    super(ScatterAddSubTest, self).setUp()\n    with self.cached_session(use_gpu=False):\n        p_init = np.random.rand(*shape).astype('f')\n        vals_shape = [len(indices)] + shape[1:]\n        vals_init = np.random.rand(*vals_shape).astype('f')\n        v_i = [float(x) for x in vals_init.ravel()]\n        p = variables.Variable(p_init)\n        vals = constant_op.constant(v_i, shape=vals_shape, name='vals')\n        ind = constant_op.constant(indices, dtype=dtypes.int32)\n        p2 = scatter_op(p, ind, vals, name='updated_p')\n        self.evaluate(variables.global_variables_initializer())\n        result = self.evaluate(p2)\n    for i, ind in enumerate(indices):\n        if scatter_op == state_ops.scatter_add:\n            p_init.reshape(shape[0], -1)[ind, :] += vals_init.reshape(vals_shape[0], -1)[i, :]\n        else:\n            p_init.reshape(shape[0], -1)[ind, :] -= vals_init.reshape(vals_shape[0], -1)[i, :]\n    self.assertTrue(all((p_init == result).ravel()))", "docstring": "Run a random test case with the given shape and indices.\n\nArgs:\nshape: Shape of the parameters array.\nindices: One-dimensional array of ints, the indices of the last dimension\nof the parameters to update.\nscatter_op: ScatterAdd or ScatterSub.", "source": "github-repos"}
{"code": "def _generic_fit(fqdn, result, scorer, yP=None, *argl, **argd):\n    out = None\n    if (len(argl) > 0):\n        machine = argl[0]\n        out = {}\n        if hasattr(machine, 'best_score_'):\n            out['score'] = machine.best_score_\n        yL = _do_auto_predict(*argl[0:2])\n        yscore = scorer(fqdn, yL, yP, *argl, **argd)\n        if (yscore is not None):\n            out.update(yscore)\n    return out", "docstring": "Performs the generic fit tests that are common to both classifier and\nregressor; uses `scorer` to score the predicted values given by the machine\nwhen tested against its training set.\n\nArgs:\nscorer (function): called on the result of `machine.predict(Xtrain,\nytrain)`.", "source": "codesearchnet"}
{"code": "def GetTransactionResults(self):\n    if (self.References is None):\n        return None\n    results = []\n    realresults = []\n    for ref_output in self.References.values():\n        results.append(TransactionResult(ref_output.AssetId, ref_output.Value))\n    for output in self.outputs:\n        results.append(TransactionResult(output.AssetId, (output.Value * Fixed8((- 1)))))\n    for (key, group) in groupby(results, (lambda x: x.AssetId)):\n        sum = Fixed8(0)\n        for item in group:\n            sum = (sum + item.Amount)\n        if (sum != Fixed8.Zero()):\n            realresults.append(TransactionResult(key, sum))\n    return realresults", "docstring": "Get the execution results of the transaction.\n\nReturns:\nNone: if the transaction has no references.\nlist: of TransactionResult objects.", "source": "codesearchnet"}
{"code": "def predict(self, data, alpha=0.01, max_iter=2000, **kwargs):\n    edge_model = GraphLasso(alpha=alpha, max_iter=max_iter)\n    edge_model.fit(data.values)\n    return nx.relabel_nodes(nx.DiGraph(edge_model.get_precision()), {idx: i for (idx, i) in enumerate(data.columns)})", "docstring": "Predict the graph skeleton.\n\nArgs:\ndata (pandas.DataFrame): observational data\nalpha (float): regularization parameter\nmax_iter (int): maximum number of iterations\n\nReturns:\nnetworkx.Graph: Graph skeleton", "source": "codesearchnet"}
{"code": "def list_insights_components(access_token, subscription_id, resource_group):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/microsoft.insights/',\n                        '/components?api-version=', INSIGHTS_COMPONENTS_API])\n    return do_get(endpoint, access_token)", "docstring": "List the Microsoft Insights components in a resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\n\nReturns:\nHTTP response. JSON body of components.", "source": "juraj-google-style"}
{"code": "def add_middleware(middleware: EFBMiddleware):\n    \n    global middlewares\n    if isinstance(middleware, EFBMiddleware):\n        middlewares.append(middleware)\n    else:\n        raise TypeError(\"Middleware instance is expected\")", "docstring": "Register a middleware with the coordinator.\n\nArgs:\nmiddleware (EFBMiddleware): Middleware to register", "source": "juraj-google-style"}
{"code": "def add_attribute(self, attribute_type, attribute_value):\n        \n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        return self.tc_requests.add_attribute(\n            self.api_type,\n            self.api_sub_type,\n            self.unique_id,\n            attribute_type,\n            attribute_value,\n            owner=self.owner,\n        )", "docstring": "Adds a attribute to a Group/Indicator or Victim\n\n\nArgs:\nattribute_type:\nattribute_value:\n\nReturns: attribute json", "source": "juraj-google-style"}
{"code": "def write_byte(self, value):\n        \n        if isinstance(value, bytes):\n            self.stream.write(value)\n        elif isinstance(value, str):\n            self.stream.write(value.encode('utf-8'))\n        elif isinstance(value, int):\n            self.stream.write(bytes([value]))", "docstring": "Write a single byte to the stream.\n\nArgs:\nvalue (bytes, str or int): value to write to the stream.", "source": "juraj-google-style"}
{"code": "def CopyTextToLabel(cls, text, prefix=''):\n    \n    text = '{0:s}{1:s}'.format(prefix, text)\n    return cls._INVALID_LABEL_CHARACTERS_REGEX.sub('_', text)", "docstring": "Copies a string to a label.\n\nA label only supports a limited set of characters therefore\nunsupported characters are replaced with an underscore.\n\nArgs:\ntext (str): label text.\nprefix (Optional[str]): label prefix.\n\nReturns:\nstr: label.", "source": "juraj-google-style"}
{"code": "def new(self, val):\n    if (len(self.things) >= self.max_things):\n        raise LimitationError('too many things')\n    self.things.add(val)\n    return val", "docstring": "Add a new value to me.\n\nArgs:\nval (LispVal): The value to be added.\n\nReturns:\nLispVal: The added value.\n\nRaises:\n~parthial.errs.LimitationError: If I already contain the maximum\nnumber of elements.", "source": "codesearchnet"}
{"code": "def create_api_call(func, settings):\n\n    def base_caller(api_call, _, *args):\n        'Simply call api_call and ignore settings.'\n        return api_call(*args)\n\n    def inner(request, options=None):\n        'Invoke with the actual settings.'\n        this_options = _merge_options_metadata(options, settings)\n        this_settings = settings.merge(this_options)\n        if (this_settings.retry and this_settings.retry.retry_codes):\n            api_call = gax.retry.retryable(func, this_settings.retry, **this_settings.kwargs)\n        else:\n            api_call = gax.retry.add_timeout_arg(func, this_settings.timeout, **this_settings.kwargs)\n        api_call = _catch_errors(api_call, gax.config.API_ERRORS)\n        return api_caller(api_call, this_settings, request)\n    if settings.page_descriptor:\n        if (settings.bundler and settings.bundle_descriptor):\n            raise ValueError('The API call has incompatible settings: bundling and page streaming')\n        api_caller = _page_streamable(settings.page_descriptor)\n    elif (settings.bundler and settings.bundle_descriptor):\n        api_caller = _bundleable(settings.bundle_descriptor)\n    else:\n        api_caller = base_caller\n    return inner", "docstring": "Converts an rpc call into an API call governed by the settings.\n\nIn typical usage, ``func`` will be a callable used to make an rpc request.\nThis will mostly likely be a bound method from a request stub used to make\nan rpc call.\n\nThe result is created by applying a series of function decorators defined\nin this module to ``func``.  ``settings`` is used to determine which\nfunction decorators to apply.\n\nThe result is another callable which for most values of ``settings`` has\nhas the same signature as the original. Only when ``settings`` configures\nbundling does the signature change.\n\nArgs:\nfunc (Callable[Sequence[object], object]): is used to make a bare rpc\ncall.\nsettings (_CallSettings): provides the settings for this call\n\nReturns:\nCallable[Sequence[object], object]: a bound method on a request stub used\nto make an rpc call\n\nRaises:\nValueError: if ``settings`` has incompatible values, e.g, if bundling\nand page_streaming are both configured", "source": "codesearchnet"}
{"code": "def _BuildOobLink(self, param, mode):\n    code = self.rpc_helper.GetOobCode(param)\n    if code:\n        parsed = list(parse.urlparse(self.widget_url))\n        query = dict(parse.parse_qsl(parsed[4]))\n        query.update({'mode': mode, 'oobCode': code})\n        try:\n            parsed[4] = parse.urlencode(query)\n        except AttributeError:\n            parsed[4] = urllib.urlencode(query)\n        return (code, parse.urlunparse(parsed))\n    raise errors.GitkitClientError('invalid request')", "docstring": "Builds out-of-band URL.\n\nGitkit API GetOobCode() is called and the returning code is combined\nwith Gitkit widget URL to building the out-of-band url.\n\nArgs:\nparam: dict of request.\nmode: string, Gitkit widget mode to handle the oob action after user\nclicks the oob url in the email.\n\nRaises:\nGitkitClientError: if oob code is not returned.\n\nReturns:\nA string of oob url.", "source": "codesearchnet"}
{"code": "def set_parameter(self, key, value):\n        \n        for x in self.transformed_structures:\n            x.other_parameters[key] = value", "docstring": "Add parameters to the transmuter. Additional parameters are stored in\nthe as_dict() output.\n\nArgs:\nkey: The key for the parameter.\nvalue: The value for the parameter.", "source": "juraj-google-style"}
{"code": "def compile_initial_state(self, batch_size: Optional[int]=None) -> Sequence[tf.Tensor]:\n    with self.graph.as_default():\n        with tf.name_scope('initial_state'):\n            self._initialize_initial_state_fluents()\n            if (batch_size is None):\n                return self.initial_state_fluents\n            return self._compile_batch_fluents(self.initial_state_fluents, batch_size)", "docstring": "Returns a tuple of tensors representing the initial state fluents.\n\nArgs:\nbatch_size (Optional[int]): The batch size.\n\nReturns:\nSequence[tf.Tensor]: A tuple of tensors.", "source": "codesearchnet"}
{"code": "def __init__(\n      self, resolver_context, file_system, path_spec, is_root=False,\n      is_virtual=False):\n    \n    super(SQLiteBlobFileEntry, self).__init__(\n        resolver_context, file_system, path_spec, is_root=is_root,\n        is_virtual=is_virtual)\n    self._number_of_entries = None\n\n    if is_virtual:\n      self.entry_type = definitions.FILE_ENTRY_TYPE_DIRECTORY\n    else:\n      self.entry_type = definitions.FILE_ENTRY_TYPE_FILE", "docstring": "Initializes a file entry.\n\nArgs:\nresolver_context (Context): resolver context.\nfile_system (FileSystem): file system.\npath_spec (PathSpec): path specification.\nis_root (Optional[bool]): True if the file entry is the root file entry\nof the corresponding file system.\nis_virtual (Optional[bool]): True if the file entry is a virtual file\nentry emulated by the corresponding file system.", "source": "juraj-google-style"}
{"code": "def replace_with_json(self, json):\n    replacement = self.from_json(json)\n    replacement._destructively_move(self)", "docstring": "Overwrite everything in this document with the JSON-encoded\ndocument.\n\njson (JSON-data) :\nA JSON-encoded document to overwrite this one.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def with_redis_cache(self, host: str, port: int, time_to_live: Union[int, timedelta]=DEFAULT_CACHE_ENTRY_TTL_SEC, *, request_coder: Optional[coders.Coder]=None, response_coder: Optional[coders.Coder]=None, **kwargs):\n    if has_valid_redis_address(host, port):\n        self._cache = RedisCache(host=host, port=port, time_to_live=time_to_live, request_coder=request_coder, response_coder=response_coder, **kwargs)\n    return self", "docstring": "Configure the Redis cache to use with enrichment transform.\n\nArgs:\nhost (str): The hostname or IP address of the Redis server.\nport (int): The port number of the Redis server.\ntime_to_live: `(Union[int, timedelta])` The time-to-live (TTL) for\nrecords stored in Redis. Provide an integer (in seconds) or a\n`datetime.timedelta` object.\nrequest_coder: (Optional[`coders.Coder`]) coder for requests stored\nin Redis.\nresponse_coder: (Optional[`coders.Coder`]) coder for decoding responses\nreceived from Redis.\nkwargs: Optional additional keyword arguments that\nare required to connect to your redis server. Same as `redis.Redis()`.", "source": "github-repos"}
{"code": "def serve(name: str='', port: int=5000) -> None:\n    logging.info(' * Listening on port %s', port)\n    httpd = HTTPServer((name, port), RequestHandler)\n    httpd.serve_forever()", "docstring": "A basic way to serve the methods.\n\nArgs:\nname: Server address.\nport: Server port.", "source": "codesearchnet"}
{"code": "def get_players(self, team):\n    team_id = self.__get_team_id(team)\n    self.logger.debug(f'Getting players of team {team_id}.')\n    return self._request('teams', team_id, 'players')", "docstring": "Loads the players of a team.\n\nArgs:\n* team (:obj: json): a team in json format obtained from the service.\n\nReturns:\n* :obj: json: the players of the team", "source": "codesearchnet"}
{"code": "def derivative(self, rate):\n        \n        rate = self._validate_number_sequence(rate, 3)\n        return 0.5 * self * Quaternion(vector=rate)", "docstring": "Get the instantaneous quaternion derivative representing a quaternion rotating at a 3D rate vector `rate`\n\nParams:\nrate: numpy 3-array (or array-like) describing rotation rates about the global x, y and z axes respectively.\n\nReturns:\nA unit quaternion describing the rotation rate", "source": "juraj-google-style"}
{"code": "def _profile_table(self, batch_id):\n        \n        \n        \n        message = self._execute_command(batch_id, \"RAY.TABLE_LOOKUP\",\n                                        ray.gcs_utils.TablePrefix.PROFILE, \"\",\n                                        batch_id.binary())\n\n        if message is None:\n            return []\n\n        gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(\n            message, 0)\n\n        profile_events = []\n        for i in range(gcs_entries.EntriesLength()):\n            profile_table_message = (\n                ray.gcs_utils.ProfileTableData.GetRootAsProfileTableData(\n                    gcs_entries.Entries(i), 0))\n\n            component_type = decode(profile_table_message.ComponentType())\n            component_id = binary_to_hex(profile_table_message.ComponentId())\n            node_ip_address = decode(\n                profile_table_message.NodeIpAddress(), allow_none=True)\n\n            for j in range(profile_table_message.ProfileEventsLength()):\n                profile_event_message = profile_table_message.ProfileEvents(j)\n\n                profile_event = {\n                    \"event_type\": decode(profile_event_message.EventType()),\n                    \"component_id\": component_id,\n                    \"node_ip_address\": node_ip_address,\n                    \"component_type\": component_type,\n                    \"start_time\": profile_event_message.StartTime(),\n                    \"end_time\": profile_event_message.EndTime(),\n                    \"extra_data\": json.loads(\n                        decode(profile_event_message.ExtraData())),\n                }\n\n                profile_events.append(profile_event)\n\n        return profile_events", "docstring": "Get the profile events for a given batch of profile events.\n\nArgs:\nbatch_id: An identifier for a batch of profile events.\n\nReturns:\nA list of the profile events for the specified batch.", "source": "juraj-google-style"}
{"code": "def build(self, var_list):\n    if self.built:\n        return\n    super().build(var_list)\n    self._momentums = self.add_optimizer_variables(var_list, 'momentum')", "docstring": "Initialize optimizer variables.\n\nLion optimizer has one variable `momentums`.\n\nArgs:\nvar_list: list of model variables to build Lion variables on.", "source": "github-repos"}
{"code": "def _RegisterDebuggee(self, service):\n    try:\n        request = {'debuggee': self._GetDebuggee()}\n        try:\n            response = service.debuggees().register(body=request).execute()\n            project_number = response['debuggee'].get('project')\n            self._project_number = (project_number or self._project_number)\n            self._debuggee_id = response['debuggee']['id']\n            native.LogInfo(('Debuggee registered successfully, ID: %s' % self._debuggee_id))\n            self.register_backoff.Succeeded()\n            return (False, 0)\n        except BaseException:\n            native.LogInfo(('Failed to register debuggee: %s, %s' % (request, traceback.format_exc())))\n    except BaseException:\n        native.LogWarning(('Debuggee information not available: ' + traceback.format_exc()))\n    return (True, self.register_backoff.Failed())", "docstring": "Single attempt to register the debuggee.\n\nIf the registration succeeds, sets self._debuggee_id to the registered\ndebuggee ID.\n\nArgs:\nservice: client to use for API calls\n\nReturns:\n(registration_required, delay) tuple", "source": "codesearchnet"}
{"code": "def clone(self, uuid):\n    request_url = (self._client.base_api_url + self.clone_url.format(id=uuid))\n    response = self._client.session.post(request_url)\n    self.validate_request_success(response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_201_CREATED)\n    return self.response_data_to_model_instance(response.json())", "docstring": "Clone the task instance with given UUID.\n\nArgs:\nuuid (str): The UUID of the task instance to clone.\n\nReturns:\n:class:`saltant.models.base_task_instance.BaseTaskInstance`:\nA task instance model instance representing the task\ninstance created due to the clone.", "source": "codesearchnet"}
{"code": "def close_children_tasks(self, parent_task_name):\n        \n        if parent_task_name not in self.tasks:\n            return\n\n        while self.tasks:\n            next_task = reversed(self.tasks.keys()).next()\n            if next_task == parent_task_name:\n                break\n            del self.tasks[next_task]", "docstring": "Closes all the children tasks that were open\n\nArgs:\nparent_task_name (str): Name of the parent task\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def get_config_path(module_id: str = None, ext: str = 'yaml') -> Path:\n    \n    if module_id:\n        config_path = get_data_path(module_id)\n    else:\n        profile = coordinator.profile\n        config_path = get_base_path() / 'profiles' / profile\n    if not config_path.exists():\n        config_path.mkdir(parents=True)\n    return config_path / \"config.{}\".format(ext)", "docstring": "Get path for configuration file. Defaulted to\n``~/.ehforwarderbot/profiles/profile_name/channel_id/config.yaml``.\n\nThis method creates the queried path if not existing. The config file will\nnot be created, however.\n\nArgs:\nmodule_id (str): Module ID.\next (Optional[Str]): Extension name of the config file.\nDefaulted to ``\"yaml\"``.\n\nReturns:\nThe path to the configuration file.", "source": "juraj-google-style"}
{"code": "def tsqr(a):\n    if (len(a.shape) != 2):\n        raise Exception('tsqr requires len(a.shape) == 2, but a.shape is {}'.format(a.shape))\n    if (a.num_blocks[1] != 1):\n        raise Exception('tsqr requires a.num_blocks[1] == 1, but a.num_blocks is {}'.format(a.num_blocks))\n    num_blocks = a.num_blocks[0]\n    K = (int(np.ceil(np.log2(num_blocks))) + 1)\n    q_tree = np.empty((num_blocks, K), dtype=object)\n    current_rs = []\n    for i in range(num_blocks):\n        block = a.objectids[(i, 0)]\n        (q, r) = ra.linalg.qr.remote(block)\n        q_tree[(i, 0)] = q\n        current_rs.append(r)\n    for j in range(1, K):\n        new_rs = []\n        for i in range(int(np.ceil(((1.0 * len(current_rs)) / 2)))):\n            stacked_rs = ra.vstack.remote(*current_rs[(2 * i):((2 * i) + 2)])\n            (q, r) = ra.linalg.qr.remote(stacked_rs)\n            q_tree[(i, j)] = q\n            new_rs.append(r)\n        current_rs = new_rs\n    assert (len(current_rs) == 1), ('len(current_rs) = ' + str(len(current_rs)))\n    if (a.shape[0] >= a.shape[1]):\n        q_shape = a.shape\n    else:\n        q_shape = [a.shape[0], a.shape[0]]\n    q_num_blocks = core.DistArray.compute_num_blocks(q_shape)\n    q_objectids = np.empty(q_num_blocks, dtype=object)\n    q_result = core.DistArray(q_shape, q_objectids)\n    for i in range(num_blocks):\n        q_block_current = q_tree[(i, 0)]\n        ith_index = i\n        for j in range(1, K):\n            if (np.mod(ith_index, 2) == 0):\n                lower = [0, 0]\n                upper = [a.shape[1], core.BLOCK_SIZE]\n            else:\n                lower = [a.shape[1], 0]\n                upper = [(2 * a.shape[1]), core.BLOCK_SIZE]\n            ith_index \n            q_block_current = ra.dot.remote(q_block_current, ra.subarray.remote(q_tree[(ith_index, j)], lower, upper))\n        q_result.objectids[i] = q_block_current\n    r = current_rs[0]\n    return (q_result, ray.get(r))", "docstring": "Perform a QR decomposition of a tall-skinny matrix.\n\nArgs:\na: A distributed matrix with shape MxN (suppose K = min(M, N)).\n\nReturns:\nA tuple of q (a DistArray) and r (a numpy array) satisfying the\nfollowing.\n- If q_full = ray.get(DistArray, q).assemble(), then\nq_full.shape == (M, K).\n- np.allclose(np.dot(q_full.T, q_full), np.eye(K)) == True.\n- If r_val = ray.get(np.ndarray, r), then r_val.shape == (K, N).\n- np.allclose(r, np.triu(r)) == True.", "source": "codesearchnet"}
{"code": "def __init__(self, band_type=None, rate=None, burst_size=None):\n        \n        super().__init__()\n        self.band_type = band_type\n        self.rate = rate\n        self.burst_size = burst_size\n        self.update_length()", "docstring": "Create a MeterBandHeader with the optional parameters below.\n\nArgs:\nband_type (MeterBandType): One of OFPMBT_*.\nrate (int): Rate for this band.\nburst_size (int): Size of bursts.", "source": "juraj-google-style"}
{"code": "def step(self, action):\n    (observ, reward, done, info) = self._env.step(action)\n    observ = self._convert_observ(observ)\n    reward = self._convert_reward(reward)\n    return (observ, reward, done, info)", "docstring": "Forward action to the wrapped environment.\n\nArgs:\naction: Action to apply to the environment.\n\nRaises:\nValueError: Invalid action.\n\nReturns:\nConverted observation, converted reward, done flag, and info object.", "source": "codesearchnet"}
{"code": "def add_outputs(self, **kwargs):\n    self._closed()\n    for (name, source_name) in kwargs.items():\n        obj = {}\n        obj['outputSource'] = source_name\n        obj['type'] = self.step_output_types[source_name]\n        self.wf_outputs[name] = obj", "docstring": "Add workflow outputs.\n\nThe output type is added automatically, based on the steps in the steps\nlibrary.\n\nArgs:\nkwargs (dict): A dict containing ``name=source name`` pairs.\n``name`` is the name of the workflow output (e.g.,\n``txt_files``) and source name is the name of the step that\nproduced this output plus the output name (e.g.,\n``saf-to-txt/out_files``).", "source": "codesearchnet"}
{"code": "def build_circle_dict(self, center_lat, center_lng, radius, stroke_color='\n    circle = {'stroke_color': stroke_color, 'stroke_opacity': stroke_opacity, 'stroke_weight': stroke_weight, 'fill_color': fill_color, 'fill_opacity': fill_opacity, 'center': {'lat': center_lat, 'lng': center_lng}, 'radius': radius}\n    return circle", "docstring": "Set a dictionary with the javascript class Circle parameters\n\nThis function sets a default drawing configuration if the user just\npass the rectangle bounds, but also allows to set each parameter\nindividually if the user wish so.\n\nArgs:\ncenter_lat (float): The circle center latitude\ncenter_lng (float): The circle center longitude\nradius  (float): The circle radius, in meters\nstroke_color (str): Sets the color of the rectangle border using\nhexadecimal color notation\nstroke_opacity (float): Sets the opacity of the rectangle border\nin percentage. If stroke_opacity = 0, the border is transparent\nstroke_weight (int): Sets the stroke girth in pixels.\nfill_color (str): Sets the color of the circle fill using\nhexadecimal color notation\nfill_opacity (float): Sets the opacity of the circle fill", "source": "codesearchnet"}
{"code": "def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(Digest, self).read(istream, kmip_version=kmip_version)\n    tstream = BytearrayStream(istream.read(self.length))\n    self.hashing_algorithm.read(tstream, kmip_version=kmip_version)\n    self.digest_value.read(tstream, kmip_version=kmip_version)\n    self.key_format_type.read(tstream, kmip_version=kmip_version)\n    self.is_oversized(tstream)\n    self.validate()", "docstring": "Read the data encoding the Digest object and decode it into its\nconstituent parts.\n\nArgs:\nistream (Stream): A data stream containing encoded object data,\nsupporting a read method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def set_schema_location(self, ns_uri, schema_location, replace=False):\n    ni = self.__lookup_uri(ns_uri)\n    if (ni.schema_location == schema_location):\n        return\n    elif (replace or (ni.schema_location is None)):\n        ni.schema_location = schema_location\n    elif (schema_location is None):\n        ni.schema_location = None\n    else:\n        raise ConflictingSchemaLocationError(ns_uri, ni.schema_location, schema_location)", "docstring": "Sets the schema location of the given namespace.\n\nIf ``replace`` is ``True``, then any existing schema location is\nreplaced.  Otherwise, if the schema location is already set to a\ndifferent value, an exception is raised.  If the schema location is set\nto None, it is effectively erased from this set (this is not considered\n\"replacement\".)\n\nArgs:\nns_uri (str): The namespace whose schema location is to be set\nschema_location (str): The schema location URI to set, or None\nreplace (bool): Whether to replace any existing schema location\n\nRaises:\nNamespaceNotFoundError: If the given namespace isn't in this set.\nConflictingSchemaLocationError: If replace is False,\nschema_location is not None, and the namespace already has a\ndifferent schema location in this set.", "source": "codesearchnet"}
{"code": "def is_valid(self, value):\n        \n        if not self.is_array:\n            return self._valid(value)\n\n        if isinstance(value, (list, set, tuple)):\n            return all([self._valid(item) for item in value])\n\n        return self._valid(value)", "docstring": "Validate value before actual instance setting based on type.\n\nArgs:\nvalue (object): The value object for validation.\n\nReturns:\nTrue if value validation succeeds else False.", "source": "juraj-google-style"}
{"code": "def CompileReport(self, mediator):\n    \n    lines_of_text = ['Listing file paths and hashes']\n    for pathspec, hashes in sorted(\n        self._paths_with_hashes.items(),\n        key=lambda tuple: tuple[0].comparable):\n\n      path_string = self._GeneratePathString(mediator, pathspec, hashes)\n      lines_of_text.append(path_string)\n\n    lines_of_text.append('')\n    report_text = '\\n'.join(lines_of_text)\n    return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)", "docstring": "Compiles an analysis report.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between analysis\nplugins and other components, such as storage and dfvfs.\n\nReturns:\nAnalysisReport: report.", "source": "juraj-google-style"}
{"code": "def get_summed_cohp_by_label_and_orbital_list(self, label_list, orbital_list, divisor=1):\n        \n        \n        first_cohpobject = self.get_orbital_resolved_cohp(label_list[0], orbital_list[0])\n        summed_cohp = first_cohpobject.cohp.copy()\n        summed_icohp = first_cohpobject.icohp.copy()\n        for ilabel, label in enumerate(label_list[1:], 1):\n            cohp_here = self.get_orbital_resolved_cohp(label, orbital_list[ilabel])\n            summed_cohp[Spin.up] = np.sum([summed_cohp[Spin.up], cohp_here.cohp.copy()[Spin.up]], axis=0)\n            if Spin.down in summed_cohp:\n                summed_cohp[Spin.down] = np.sum([summed_cohp[Spin.down], cohp_here.cohp.copy()[Spin.down]], axis=0)\n            summed_icohp[Spin.up] = np.sum([summed_icohp[Spin.up], cohp_here.icohp.copy()[Spin.up]], axis=0)\n            if Spin.down in summed_icohp:\n                summed_icohp[Spin.down] = np.sum([summed_icohp[Spin.down], cohp_here.icohp.copy()[Spin.down]], axis=0)\n\n        divided_cohp = {}\n        divided_icohp = {}\n        divided_cohp[Spin.up] = np.divide(summed_cohp[Spin.up], divisor)\n        divided_icohp[Spin.up] = np.divide(summed_icohp[Spin.up], divisor)\n        if Spin.down in summed_cohp:\n            divided_cohp[Spin.down] = np.divide(summed_cohp[Spin.down], divisor)\n            divided_icohp[Spin.down] = np.divide(summed_icohp[Spin.down], divisor)\n\n        return Cohp(efermi=first_cohpobject.efermi, energies=first_cohpobject.energies, cohp=divided_cohp,\n                    are_coops=first_cohpobject.are_coops,\n                    icohp=divided_icohp)", "docstring": "Returns a COHP object that includes a summed COHP divided by divisor\n\nArgs:\nlabel_list: list of labels for the COHP that should be included in the summed cohp\norbital_list: list of orbitals for the COHPs that should be included in the summed cohp (same order as label_list)\ndivisor: float/int, the summed cohp will be divided by this divisor\nReturns:\nReturns a COHP object including a summed COHP", "source": "juraj-google-style"}
{"code": "def _extract_response_chunks(self, all_responses, response_chunks, api_name):\n    for response_chunk in response_chunks:\n        if (not isinstance(response_chunk, list)):\n            response_chunk = [response_chunk]\n        for response in response_chunk:\n            if (not response):\n                continue\n            if self._cache:\n                self._cache.cache_value(api_name, response['resource'], response)\n            all_responses[response['resource']] = response", "docstring": "Extracts and caches the responses from the response chunks in case\nof the responses for the requests containing multiple concatenated\nresources. Extracted responses are added to the already cached\nresponses passed in the all_responses parameter.\n\nArgs:\nall_responses: a list containing already cached responses.\nresponse_chunks: a list with response chunks.\napi_name: a string name of the API.", "source": "codesearchnet"}
{"code": "def _lift_unlifted_variables(graph, variable_holder):\n    with graph.as_default():\n        global_collection_variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)\n        local_collection_variables = ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)\n        existing_captures = {id(c) for c in graph.internal_captures}\n        lifted_variables = {}\n\n        def _should_lift_variable(v):\n            return (v._in_graph_mode and v.graph.building_function) and isinstance(v, resource_variable_ops.BaseResourceVariable) and (id(v.handle) not in existing_captures)\n        for old_variable in global_collection_variables:\n            if _should_lift_variable(old_variable):\n                new_variable = _lift_single_variable(old_variable, graph, variable_holder)\n                lifted_variables[id(old_variable)] = new_variable\n                existing_captures.add(id(old_variable.handle))\n        for old_variable in local_collection_variables:\n            if _should_lift_variable(old_variable):\n                new_variable = _lift_single_variable(old_variable, graph, variable_holder)\n                lifted_variables[id(old_variable)] = new_variable\n                existing_captures.add(id(old_variable.handle))\n                if new_variable._in_graph_mode:\n                    outer_graph = new_variable.graph\n                    global_collection = outer_graph.get_collection_ref(ops.GraphKeys.GLOBAL_VARIABLES)\n                    global_collection.remove(new_variable)\n                    outer_graph.add_to_collection(ops.GraphKeys.LOCAL_VARIABLES, new_variable)\n        for collection_name in [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.LOCAL_VARIABLES]:\n            mutable_collection = ops.get_collection_ref(collection_name)\n            for index, current in enumerate(mutable_collection):\n                mutable_collection[index] = lifted_variables.get(id(current), current)\n                if not resource_variable_ops.is_resource_variable(mutable_collection[index]):\n                    logging.log_first_n(logging.WARN, 'Unable to create a python object for variable {} because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().'.format(mutable_collection[index]), 5)", "docstring": "Finds resource variables and lifts them into the outer context.\n\nWhen we import a GraphDef inside a wrap_function, no Python graph building\ncode runs. This means we get VarHandleOps which create variable resources,\nbut no corresponding Python objects. Leaving them like this works but gives\nthe user no way to interact with or modify the variables outside the graph.\n\nThis method searches for variables and lifts them out as regular variable\nobjects when possible, indicating to the FuncGraph that they are captures.\n\nArgs:\ngraph: The FuncGraph to lift variables from.\nvariable_holder: A VariableHolder to record the lifted variables in.", "source": "github-repos"}
{"code": "def list(cls, session, endpoint_override=None, data=None):\n    cls._check_implements('list')\n    return cls((endpoint_override or ('/%s.json' % cls.__endpoint__)), data=data, session=session)", "docstring": "Return records in a mailbox.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nendpoint_override (str, optional): Override the default\nendpoint using this.\ndata (dict, optional): Data to provide as request parameters.\n\nReturns:\nRequestPaginator(output_type=helpscout.BaseModel): Results\niterator.", "source": "codesearchnet"}
{"code": "def van(first_enc,\n        first_frame,\n        current_enc,\n        gt_image,\n        reuse=False,\n        scope_prefix='',\n        hparams=None):\n  \n  with tf.variable_scope(scope_prefix + 'van', reuse=reuse):\n    output_shape = first_frame.get_shape().as_list()\n    output_shape[0] = -1\n\n    first_depth = 64\n\n    f_first_enc, _ = van_enc_2d(first_enc, first_depth)\n    f_first_frame, image_enc_history = van_image_enc_2d(\n        first_frame, first_depth, hparams=hparams)\n    f_current_enc, van_higher_level = van_enc_2d(\n        current_enc, first_depth, reuse=True)\n    f_gt_image, _ = van_image_enc_2d(gt_image, first_depth, True,\n                                     hparams=hparams)\n\n    analogy_t = analogy_computation_2d(\n        f_first_enc, f_first_frame, f_current_enc, first_depth)\n    enc_img = f_current_enc + analogy_t\n\n    img = van_dec_2d(\n        enc_img, image_enc_history, output_shape, first_depth, hparams=hparams)\n\n    batch_size = tf.to_float(tf.shape(first_enc)[0])\n    r_loss = tf.nn.l2_loss(f_gt_image - f_current_enc - analogy_t) / batch_size\n\n    return img, r_loss, van_higher_level", "docstring": "Implements a VAN.\n\nArgs:\nfirst_enc: The first encoding.\nfirst_frame: The first ground truth frame.\ncurrent_enc: The encoding of the frame to generate.\ngt_image: The ground truth image, only used for regularization.\nreuse: To reuse in variable scope or not.\nscope_prefix: The prefix before the scope name.\nhparams: The python hparams.\n\nReturns:\nThe generated image.", "source": "juraj-google-style"}
{"code": "def _einsum_equation(input_shapes, output_shape):\n    ret = []\n    next_letter = ord('a')\n    dim_to_letter = {}\n    for (shape_num, shape) in enumerate((input_shapes + [output_shape])):\n        if (shape_num == len(input_shapes)):\n            ret.append('->')\n        elif (shape_num > 0):\n            ret.append(',')\n        for d in shape.dims:\n            if (d not in dim_to_letter):\n                dim_to_letter[d] = chr(next_letter)\n                next_letter += 1\n            ret.append(dim_to_letter[d])\n    return ''.join(ret)", "docstring": "Turn shapes into an einsum equation.\n\ne.g. \"ij,jk->ik\"\n\nArgs:\ninput_shapes: a list of Shapes\noutput_shape: a Shape\nReturns:\na string", "source": "codesearchnet"}
{"code": "def parse(cls, args):\n        \n\n        try:\n            (options, args) = cls.optparser.parse_args(args)\n            if options.mode not in [\"1\", \"2\"]:\n                raise ParseError(\"mode must be either '1' or '2'\",\n                                 cls.optparser.format_help())\n\n            if (options.dbtap_id is None) or (options.db_table is None):\n                raise ParseError(\"dbtap_id and db_table are required\",\n                                 cls.optparser.format_help())\n\n            \n\n        except OptionParsingError as e:\n            raise ParseError(e.msg, cls.optparser.format_help())\n        except OptionParsingExit as e:\n            return None\n\n        v = vars(options)\n        v[\"command_type\"] = \"DbImportCommand\"\n        return v", "docstring": "Parse command line arguments to construct a dictionary of command\nparameters that can be used to create a command\n\nArgs:\n`args`: sequence of arguments\n\nReturns:\nDictionary that can be used in create method\n\nRaises:\nParseError: when the arguments are not correct", "source": "juraj-google-style"}
{"code": "def bazel_command(self, subcommand: str='test', extra_options: Tuple[str, ...]=()) -> List[str]:\n    options = _dict_to_cli_options(self.options)\n    configs = [f'--config={config}' for config in self.configs]\n    build_tag_filters = f'--build_tag_filters={','.join(self.build_tag_filters)}'\n    test_tag_filters = f'--test_tag_filters={','.join(self.test_tag_filters)}'\n    action_env = [f'--action_env={k}={v}' for k, v in self.action_env.items()]\n    test_env = [f'--test_env={k}={v}' for k, v in self.test_env.items()]\n    repo_env = [f'--repo_env={k}={v}' for k, v in self.repo_env.items()]\n    override_repository = [f'--override_repository={k}={v}' for k, v in self.override_repository.items()]\n    tag_filters = [build_tag_filters, test_tag_filters]\n    all_options = tag_filters + configs + action_env + test_env + repo_env + override_repository + options + list(extra_options)\n    return ['bazel', subcommand, *all_options, '--', *self.target_patterns]", "docstring": "Returns a bazel test command for this build.\n\nArgs:\nsubcommand: The subcommand to give to bazel. `test` by default.\nextra_options: Extra options. For now just used to pass in `--nobuild`.\n\nReturns: List of command line arguments", "source": "github-repos"}
{"code": "def _get_kind_name(item):\n    if isinstance(item, (str, bytes)):\n        kind = 'bytes_list'\n    elif isinstance(item, int):\n        kind = 'int64_list'\n    elif isinstance(item, float):\n        kind = 'float_list'\n    elif isinstance(item, Any):\n        kind = 'any_list'\n    else:\n        kind = 'node_list'\n    return kind", "docstring": "Returns the kind name in CollectionDef.\n\nArgs:\nitem: A data item.\n\nReturns:\nThe string representation of the kind in CollectionDef.", "source": "github-repos"}
{"code": "def data_period_end_day(self, value=None):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `data_period_end_day`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `data_period_end_day`')\n    self._data_period_end_day = value", "docstring": "Corresponds to IDD Field `data_period_end_day`\n\nArgs:\nvalue (str): value for IDD Field `data_period_end_day`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def _get_encoding(dom, default=\"utf-8\"):\n    \n    encoding = dom.find(\"meta\", {\"http-equiv\": \"Content-Type\"})\n\n    if not encoding:\n        return default\n\n    encoding = encoding[0].params.get(\"content\", None)\n\n    if not encoding:\n        return default\n\n    return encoding.lower().split(\"=\")[-1]", "docstring": "Try to look for meta tag in given `dom`.\n\nArgs:\ndom (obj): pyDHTMLParser dom of HTML elements.\ndefault (default \"utr-8\"): What to use if encoding is not found in\n`dom`.\n\nReturns:\nstr/default: Given encoding or `default` parameter if not found.", "source": "juraj-google-style"}
{"code": "def _with_num_row_partitions(self, num_row_partitions):\n    rank = self.rank\n    if rank is None:\n        raise ValueError('Rank must be known to adjust num_row_partitions')\n    if not isinstance(num_row_partitions, int):\n        raise ValueError('num_row_partitions must be an int')\n    if num_row_partitions < 0:\n        raise ValueError('num_row_partitions must be nonnegative')\n    if num_row_partitions == self.num_row_partitions:\n        return self\n    if num_row_partitions >= rank:\n        raise ValueError('num_row_partitions must be less than rank')\n    if num_row_partitions > self.num_row_partitions:\n        num_row_partitions_diff = num_row_partitions - self.num_row_partitions\n        new_inner_rank = self.rank - num_row_partitions\n        nvals = self._inner_shape_dim(0)\n        more_rp = []\n        for i in range(num_row_partitions_diff):\n            nrows = nvals\n            row_length = self._inner_shape_dim(i + 1)\n            nvals = nrows * row_length\n            rp = RowPartition.from_uniform_row_length(row_length, nrows=nrows, dtype=self.dtype)\n            more_rp.append(rp)\n        alt_inner = self._alt_inner_shape(new_inner_rank)\n        return DynamicRaggedShape(list(self.row_partitions) + more_rp, alt_inner)\n    else:\n        assert num_row_partitions < self.num_row_partitions\n        return DynamicRaggedShape(self.row_partitions[:num_row_partitions], self._alt_inner_shape(self.rank - num_row_partitions))", "docstring": "Creates an identical shape with the given num_row_partitions.\n\nNote that the shape must be statically refactorable to this rank.\nIn particular:\n* rank must be known.\n* num_row_partitions must be a nonnegative int.\n* num_row_partitions must be less than the rank of the shape\n* num_row_partitions must be greater or equal to the index of any ragged\ndimension.\n\nNote that if the num_row_partitions is the same, self is returned.\n\nArgs:\nnum_row_partitions: the target num_row_partitions (must be a nonnegative\nint).\n\nReturns:\na shape with a (possibly) different num_row_partitions.\n\nRaises:\nValueError: if the rank is unknown, the argument is not a nonnegative int,\nor there is a dimension that is nonuniform.", "source": "github-repos"}
{"code": "def RegisterDefinition(self, data_type_definition):\n    name_lower = data_type_definition.name.lower()\n    if (name_lower in self._definitions):\n        raise KeyError('Definition already set for name: {0:s}.'.format(data_type_definition.name))\n    if (data_type_definition.name in self._aliases):\n        raise KeyError('Alias already set for name: {0:s}.'.format(data_type_definition.name))\n    for alias in data_type_definition.aliases:\n        if (alias in self._aliases):\n            raise KeyError('Alias already set for name: {0:s}.'.format(alias))\n    self._definitions[name_lower] = data_type_definition\n    for alias in data_type_definition.aliases:\n        self._aliases[alias] = name_lower\n    if (data_type_definition.TYPE_INDICATOR == definitions.TYPE_INDICATOR_FORMAT):\n        self._format_definitions.append(name_lower)", "docstring": "Registers a data type definition.\n\nThe data type definitions are identified based on their lower case name.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definitions.\n\nRaises:\nKeyError: if data type definition is already set for the corresponding\nname.", "source": "codesearchnet"}
{"code": "def pluralize(singular):\n    \n    if singular in UNCOUNTABLES:\n        return singular\n    for i in IRREGULAR:\n        if i[0] == singular:\n            return i[1]\n    for i in PLURALIZE_PATTERNS:\n        if re.search(i[0], singular):\n            return re.sub(i[0], i[1], singular)", "docstring": "Convert singular word to its plural form.\n\nArgs:\nsingular: A word in its singular form.\n\nReturns:\nThe word in its plural form.", "source": "juraj-google-style"}
{"code": "def create_services(self, compose_str: str) -> list:\n    if (not self._manager):\n        raise RuntimeError('Services can only be run on swarm manager nodes')\n    services_ids = []\n    try:\n        service_config = yaml.load(compose_str)\n        service_list = copy.deepcopy(service_config)\n        service_config.pop('version')\n        service_config.pop('services')\n        for service_name in service_list['services']:\n            service_exist = self._client.services.list(filters={'name': service_name})\n            if (not service_exist):\n                service_config['name'] = service_name\n                service_spec = self._parse_services(service_config, service_name, service_list)\n                created_service = self._client.services.create(**service_spec)\n                service_id = created_service.short_id\n                LOG.debug('Service created: %s', service_id)\n                services_ids.append(service_id)\n            else:\n                LOG.debug('Services already exists')\n    except yaml.YAMLError as exc:\n        print(exc)\n    return services_ids", "docstring": "Create new docker services.\n\nArgs:\ncompose_str (string): Docker compose 'file' string\n\nReturn:\nservice_names, list", "source": "codesearchnet"}
{"code": "def text_filepaths_for_task(self, tmp_dir, task_id):\n    \n    assert task_id >= 0\n    assert task_id < self.num_train_shards + self.num_dev_shards\n    if task_id < self.num_train_shards:\n      return [\n          f for i, f in enumerate(self.train_text_filepaths(tmp_dir))\n          if i % self.num_train_shards == task_id\n      ]\n    else:\n      return [\n          f for i, f in enumerate(self.dev_text_filepaths(tmp_dir))\n          if i % self.num_dev_shards == task_id - self.num_train_shards\n      ]", "docstring": "List of input filepaths for a particular training or dev shard.\n\nArgs:\ntmp_dir: a string\ntask_id: an integer less than self.num_shards\nReturns:\na list of tuples (filepath, start_pos, num_bytes)", "source": "juraj-google-style"}
{"code": "def disqualified(self, num, natural=True, **kwargs):\n        \n        search_type = 'natural' if natural else 'corporate'\n        baseuri = (self._BASE_URI +\n                   'disqualified-officers/{}/{}'.format(search_type, num))\n        res = self.session.get(baseuri, params=kwargs)\n        self.handle_http_error(res)\n        return res", "docstring": "Search for disqualified officers by officer ID.\n\nSearches for natural disqualifications by default. Specify\nnatural=False to search for corporate disqualifications.\n\nArgs:\nnum (str): Company number to search on.\nnatural (Optional[bool]): Natural or corporate search\nkwargs (dict): additional keywords passed into\nrequests.session.get *params* keyword.", "source": "juraj-google-style"}
{"code": "def global_idx_to_numeric_idx(self, axis, indices):\n        \n        assert axis in [\"row\", \"col\", \"columns\"]\n        if axis == \"row\":\n            return pandas.Index(\n                pandas.Series(np.arange(len(self.index)), index=self.index)\n                .loc[indices]\n                .values\n            )\n        elif axis in [\"col\", \"columns\"]:\n            return pandas.Index(\n                pandas.Series(np.arange(len(self.columns)), index=self.columns)\n                .loc[indices]\n                .values\n            )", "docstring": "Note: this function involves making copies of the index in memory.\n\nArgs:\naxis: Axis to extract indices.\nindices: Indices to convert to numerical.\n\nReturns:\nAn Index object.", "source": "juraj-google-style"}
{"code": "def _get_deprecated_positional_arguments(names_to_ok_vals, arg_spec):\n    arg_space = arg_spec.args + arg_spec.kwonlyargs\n    arg_name_to_pos = {name: pos for pos, name in enumerate(arg_space)}\n    deprecated_positional_args = {}\n    for arg_name, spec in iter(names_to_ok_vals.items()):\n        if arg_name in arg_name_to_pos:\n            pos = arg_name_to_pos[arg_name]\n            deprecated_positional_args[arg_name] = DeprecatedArgSpec(pos, spec.has_ok_value, spec.ok_value)\n    return deprecated_positional_args", "docstring": "Builds a dictionary from deprecated arguments to their spec.\n\nReturned dict is keyed by argument name.\nEach value is a DeprecatedArgSpec with the following fields:\nposition: The zero-based argument position of the argument\nwithin the signature.  None if the argument isn't found in\nthe signature.\nok_values:  Values of this argument for which warning will be\nsuppressed.\n\nArgs:\nnames_to_ok_vals: dict from string arg_name to a list of values, possibly\nempty, which should not elicit a warning.\narg_spec: Output from tf_inspect.getfullargspec on the called function.\n\nReturns:\nDictionary from arg_name to DeprecatedArgSpec.", "source": "github-repos"}
{"code": "def ExpectingFunctionArgs(clean_lines, linenum):\n  \n  line = clean_lines.elided[linenum]\n  return (Match(r'^\\s*MOCK_(CONST_)?METHOD\\d+(_T)?\\(', line) or\n          (linenum >= 2 and\n           (Match(r'^\\s*MOCK_(?:CONST_)?METHOD\\d+(?:_T)?\\((?:\\S+,)?\\s*$',\n                  clean_lines.elided[linenum - 1]) or\n            Match(r'^\\s*MOCK_(?:CONST_)?METHOD\\d+(?:_T)?\\(\\s*$',\n                  clean_lines.elided[linenum - 2]) or\n            Search(r'\\bstd::m?function\\s*\\<\\s*$',\n                   clean_lines.elided[linenum - 1]))))", "docstring": "Checks whether where function type arguments are expected.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\n\nReturns:\nTrue if the line at 'linenum' is inside something that expects arguments\nof function types.", "source": "juraj-google-style"}
{"code": "def parse_variables_mapping(variables_mapping, ignore=False):\n    run_times = 0\n    parsed_variables_mapping = {}\n    while (len(parsed_variables_mapping) != len(variables_mapping)):\n        for var_name in variables_mapping:\n            run_times += 1\n            if (run_times > (len(variables_mapping) * 4)):\n                not_found_variables = {key: variables_mapping[key] for key in variables_mapping if (key not in parsed_variables_mapping)}\n                raise exceptions.VariableNotFound(not_found_variables)\n            if (var_name in parsed_variables_mapping):\n                continue\n            value = variables_mapping[var_name]\n            variables = extract_variables(value)\n            if (var_name in variables):\n                if ignore:\n                    parsed_variables_mapping[var_name] = value\n                    continue\n                raise exceptions.VariableNotFound(var_name)\n            if variables:\n                if any([(_var_name not in parsed_variables_mapping) for _var_name in variables]):\n                    continue\n            parsed_value = parse_lazy_data(value, parsed_variables_mapping)\n            parsed_variables_mapping[var_name] = parsed_value\n    return parsed_variables_mapping", "docstring": "eval each prepared variable and function in variables_mapping.\n\nArgs:\nvariables_mapping (dict):\n{\n\"varA\": LazyString(123$varB),\n\"varB\": LazyString(456$varC),\n\"varC\": LazyString(${sum_two($a, $b)}),\n\"a\": 1,\n\"b\": 2,\n\"c\": {\"key\": LazyString($b)},\n\"d\": [LazyString($a), 3]\n}\nignore (bool): If set True, VariableNotFound will be ignored.\nThis is used when initializing tests.\n\nReturns:\ndict: parsed variables_mapping should not contain any variable or function.\n{\n\"varA\": \"1234563\",\n\"varB\": \"4563\",\n\"varC\": \"3\",\n\"a\": 1,\n\"b\": 2,\n\"c\": {\"key\": 2},\n\"d\": [1, 3]\n}", "source": "codesearchnet"}
{"code": "def read_config_info(ini_file):\n    try:\n        config = RawConfigParser()\n        config.optionxform = (lambda option: option)\n        config.read(ini_file)\n        the_stuff = {}\n        for section in config.sections():\n            the_stuff[section] = {}\n            for option in config.options(section):\n                the_stuff[section][option] = config.get(section, option)\n        return the_stuff\n    except Exception as wtf:\n        logging.error('Exception caught in read_config_info(): {}'.format(wtf))\n        traceback.print_exc(file=sys.stdout)\n        return sys.exit(1)", "docstring": "Read the INI file\n\nArgs:\nini_file - path to the file\n\nReturns:\nA dictionary of stuff from the INI file\n\nExits:\n1 - if problems are encountered", "source": "codesearchnet"}
{"code": "def events(self):\n    if (not self.__events):\n        self.__events = Events(self.__connection)\n    return self.__events", "docstring": "Gets the Events API client.\n\nReturns:\nEvents:", "source": "codesearchnet"}
{"code": "def __call__(self, fn):\n        \n        \n        if isinstance(fn, Response):\n            return fn.mock\n        if isinstance(fn, Mock):\n            return fn\n\n        \n        if not isfunction(fn) and not ismethod(fn):\n            raise TypeError('first argument must be a method or function')\n\n        \n        self._engine.remove_mock(self)\n\n        @functools.wraps(fn)\n        def decorator(*args, **kw):\n            \n            self._engine.add_mock(self)\n\n            \n            \n            \n            engine_active = self._engine.active\n            if not engine_active:\n                self._engine.activate()\n\n            \n            try:\n                return fn(*args, **kw)\n            finally:\n                \n                \n                self._engine.remove_mock(self)\n\n                \n                if not engine_active:\n                    self._engine.disable()\n\n        return decorator", "docstring": "Overload Mock instance as callable object in order to be used\nas decorator definition syntax.\n\nArguments:\nfn (function): function to decorate.\n\nReturns:\nfunction or pook.Mock", "source": "juraj-google-style"}
{"code": "def as_fn(self, *binding_order):\n    if (len(binding_order) != len(self.unbound_vars)):\n        raise ValueError('All vars must be specified.')\n    for arg in binding_order:\n        if (arg not in self.unbound_vars):\n            raise ValueError(('Unknown binding: %s' % arg))\n\n    def func(*args, **kwargs):\n        'Constructs a template.'\n        if (len(binding_order) != len(args)):\n            raise ValueError(('Missing values, expects: %s' % binding_order))\n        values = dict(zip(binding_order, args))\n        values.update(kwargs)\n        return self.construct(**values)\n    func.__doc__ = _gen_ipython_string(func, binding_order, [], func.__doc__)\n    return func", "docstring": "Creates a function by binding the arguments in the given order.\n\nArgs:\n*binding_order: The unbound variables. This must include all values.\nReturns:\nA function that takes the arguments of binding_order.\nRaises:\nValueError: If the bindings are missing values or include unknown values.", "source": "codesearchnet"}
{"code": "def noisy_moments(self, moments: 'Iterable[cirq.Moment]',\n                      system_qubits: Sequence['cirq.Qid']\n                     ) -> Sequence['cirq.OP_TREE']:\n        \n        if not hasattr(self.noisy_moment, '_not_overridden'):\n            result = []\n            for moment in moments:\n                result.append(self.noisy_moment(moment, system_qubits))\n            return result\n\n        if not hasattr(self.noisy_operation, '_not_overridden'):\n            result = []\n            for moment in moments:\n                result.append([self.noisy_operation(op) for op in moment])\n            return result\n\n        assert False, 'Should be unreachable.'", "docstring": "Adds possibly stateful noise to a series of moments.\n\nArgs:\nmoments: The moments to add noise to.\nsystem_qubits: A list of all qubits in the system.\n\nReturns:\nA sequence of OP_TREEs, with the k'th tree corresponding to the\nnoisy operations for the k'th moment.", "source": "juraj-google-style"}
{"code": "def parseString(inString, silence=False):\n    \n    parser = None\n    rootNode= parsexmlstring_(inString, parser)\n    rootTag, rootClass = get_root_tag(rootNode)\n    if rootClass is None:\n        rootTag = 'PcGts'\n        rootClass = PcGts\n    rootObj = rootClass.factory()\n    rootObj.build(rootNode)\n    \n    if not silence:\n        sys.stdout.write('<?xml version=\"1.0\" ?>\\n')\n        rootObj.export(\n            sys.stdout, 0, name_=rootTag,\n            namespacedef_='xmlns:pc=\"http:\n    return rootObj", "docstring": "Parse a string, create the object tree, and export it.\n\nArguments:\n- inString -- A string.  This XML fragment should not start\nwith an XML declaration containing an encoding.\n- silence -- A boolean.  If False, export the object.\nReturns -- The root object in the tree.", "source": "juraj-google-style"}
{"code": "def strace_clear_all(self):\n        \n        data = 0\n        res = self._dll.JLINK_STRACE_Control(enums.JLinkStraceCommand.TRACE_EVENT_CLR_ALL, data)\n        if res < 0:\n            raise errors.JLinkException('Failed to clear all STRACE events.')\n\n        return None", "docstring": "Clears all STRACE events.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error.", "source": "juraj-google-style"}
{"code": "def _properties_model_to_dict(properties):\n    result = {}\n    for attr in properties.__dict__:\n        value = getattr(properties, attr)\n        if (hasattr(value, '__module__') and ('models' in value.__module__)):\n            value = _properties_model_to_dict(value)\n        if (not ((value is None) or (isinstance(value, dict) and (not value)))):\n            result[attr] = value\n    return result", "docstring": "Convert properties model to dict.\n\nArgs:\nproperties: Properties model.\n\nReturns:\ndict: Converted model.", "source": "codesearchnet"}
{"code": "def optimize(onnx_model_path: Path) -> Path:\n    from onnxruntime import InferenceSession, SessionOptions\n    opt_model_path = generate_identified_filename(onnx_model_path, '-optimized')\n    sess_option = SessionOptions()\n    sess_option.optimized_model_filepath = opt_model_path.as_posix()\n    _ = InferenceSession(onnx_model_path.as_posix(), sess_option)\n    print(f'Optimized model has been written at {opt_model_path}: ✔')\n    print('/!\\\\ Optimized model contains hardware specific operators which might not be portable. /!\\\\')\n    return opt_model_path", "docstring": "Load the model at the specified path and let onnxruntime look at transformations on the graph to enable all the\noptimizations possible\n\nArgs:\nonnx_model_path: filepath where the model binary description is stored\n\nReturns: Path where the optimized model binary description has been saved", "source": "github-repos"}
{"code": "class Conv1D(nn.Module):\n\n    def __init__(self, nf, nx):\n        super().__init__()\n        self.nf = nf\n        self.nx = nx\n        self.weight = nn.Parameter(torch.empty(nx, nf))\n        self.bias = nn.Parameter(torch.zeros(nf))\n        nn.init.normal_(self.weight, std=0.02)\n\n    def __repr__(self) -> str:\n        return 'Conv1D(nf={nf}, nx={nx})'.format(**self.__dict__)\n\n    def forward(self, x):\n        size_out = x.size()[:-1] + (self.nf,)\n        x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)\n        x = x.view(size_out)\n        return x", "docstring": "1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).\n\nBasically works like a linear layer but the weights are transposed.\n\nArgs:\nnf (`int`): The number of output features.\nnx (`int`): The number of input features.", "source": "github-repos"}
{"code": "def _get_or_create_global_step_read(graph=None):\n    graph = graph or ops.get_default_graph()\n    global_step_read_tensor = _get_global_step_read(graph)\n    if global_step_read_tensor is not None:\n        return global_step_read_tensor\n    global_step_tensor = get_global_step(graph)\n    if global_step_tensor is None:\n        return None\n    with graph.as_default() as g, g.name_scope(None):\n        with g.name_scope(global_step_tensor.op.name + '/'):\n            if isinstance(global_step_tensor, variables.Variable):\n                global_step_value = cond.cond(variable_v1.is_variable_initialized(global_step_tensor), global_step_tensor.read_value, lambda: global_step_tensor.initial_value)\n            else:\n                global_step_value = global_step_tensor\n            global_step_read_tensor = global_step_value + 0\n            ops.add_to_collection(GLOBAL_STEP_READ_KEY, global_step_read_tensor)\n    return _get_global_step_read(graph)", "docstring": "Gets or creates global step read tensor in graph.\n\nArgs:\ngraph: The graph in which to create the global step read tensor. If missing,\nuse default graph.\n\nReturns:\nGlobal step read tensor if there is global_step_tensor else return None.", "source": "github-repos"}
{"code": "def rho_rec(self, g2):\n        \n        return (self.expnorm / np.sqrt(1 + self.gamma2 * g2) + (\n            1 - self.expnorm) * np.exp(-0.25 * self.beta2 * g2))", "docstring": "Reciprocal space model charge value\nfor input squared reciprocal vector.\nArgs:\ng2: Square of reciprocal vector\n\nReturns:\nCharge density at the reciprocal vector magnitude", "source": "juraj-google-style"}
{"code": "def _ensure_unicode(text):\n    \n    if isinstance(text, six.binary_type):\n        return text.decode(sys.getfilesystemencoding(), 'replace')\n    else:\n        return text", "docstring": "Ensures the text passed in becomes unicode\nArgs:\ntext (str|unicode)\nReturns:\nunicode", "source": "juraj-google-style"}
{"code": "def read_accpro20(infile):\n    \n    with open(infile) as f:\n        records = f.read().splitlines()\n\n    accpro20_dict = {}\n    for i, r in enumerate(records):\n        if i % 2 == 0:\n            \n            \n            accpro20_dict[records[i].split(' ')[0][1:]] = [int(x) for x in records[i + 1].split(' ')]\n\n    return accpro20_dict", "docstring": "Read the accpro20 output (.acc20) and return the parsed FASTA records.\n\nKeeps the spaces between the accessibility numbers.\n\nArgs:\ninfile: Path to .acc20 file\n\nReturns:\ndict: Dictionary of accessibilities with keys as the ID", "source": "juraj-google-style"}
{"code": "def FoldByteStream(self, mapped_value, **kwargs):\n    \n    try:\n      byte_stream = mapped_value.encode(self._data_type_definition.encoding)\n\n    except Exception as exception:\n      error_string = (\n          'Unable to write: {0:s} to byte stream with error: {1!s}').format(\n              self._data_type_definition.name, exception)\n      raise errors.MappingError(error_string)\n\n    return super(StringMap, self).FoldByteStream(byte_stream, **kwargs)", "docstring": "Folds the data type into a byte stream.\n\nArgs:\nmapped_value (object): mapped value.\n\nReturns:\nbytes: byte stream.\n\nRaises:\nFoldingError: if the data type definition cannot be folded into\nthe byte stream.", "source": "juraj-google-style"}
{"code": "def invoke(self):\n    self._ensure_safe()\n    self._interpreter.Invoke()", "docstring": "Invoke the interpreter.\n\nBe sure to set the input sizes, allocate tensors and fill values before\ncalling this. Also, note that this function releases the GIL so heavy\ncomputation can be done in the background while the Python interpreter\ncontinues. No other function on this object should be called while the\ninvoke() call has not finished.\n\nRaises:\nValueError: When the underlying interpreter fails raise ValueError.", "source": "github-repos"}
{"code": "def backward_transfer_pair(backward_channel: NettingChannelState, payer_transfer: LockedTransferSignedState, pseudo_random_generator: random.Random, block_number: BlockNumber) -> Tuple[(Optional[MediationPairState], List[Event])]:\n    transfer_pair = None\n    events: List[Event] = list()\n    lock = payer_transfer.lock\n    lock_timeout = BlockTimeout((lock.expiration - block_number))\n    if is_channel_usable(backward_channel, lock.amount, lock_timeout):\n        message_identifier = message_identifier_from_prng(pseudo_random_generator)\n        refund_transfer = channel.send_refundtransfer(channel_state=backward_channel, initiator=payer_transfer.initiator, target=payer_transfer.target, amount=get_lock_amount_after_fees(lock, backward_channel), message_identifier=message_identifier, payment_identifier=payer_transfer.payment_identifier, expiration=lock.expiration, secrethash=lock.secrethash)\n        transfer_pair = MediationPairState(payer_transfer, backward_channel.partner_state.address, refund_transfer.transfer)\n        events.append(refund_transfer)\n    return (transfer_pair, events)", "docstring": "Sends a transfer backwards, allowing the previous hop to try a new\nroute.\n\nWhen all the routes available for this node failed, send a transfer\nbackwards with the same amount and secrethash, allowing the previous hop to\ndo a retry.\n\nArgs:\nbackward_channel: The original channel which sent the mediated transfer\nto this node.\npayer_transfer: The *latest* payer transfer which is backing the\nmediation.\nblock_number: The current block number.\n\nReturns:\nThe mediator pair and the correspoding refund event.", "source": "codesearchnet"}
{"code": "def run_tag_from_session_and_metric(session_name, metric_name):\n  \n  assert isinstance(session_name, six.string_types)\n  assert isinstance(metric_name, api_pb2.MetricName)\n  \n  \n  \n  \n  run = os.path.normpath(os.path.join(session_name, metric_name.group))\n  tag = metric_name.tag\n  return run, tag", "docstring": "Returns a (run,tag) tuple storing the evaluations of the specified metric.\n\nArgs:\nsession_name: str.\nmetric_name: MetricName protobuffer.\nReturns: (run, tag) tuple.", "source": "juraj-google-style"}
{"code": "def GetRootKey(self):\n    regf_key = self._regf_file.get_root_key()\n    if (not regf_key):\n        return None\n    return REGFWinRegistryKey(regf_key, key_path=self._key_path_prefix)", "docstring": "Retrieves the root key.\n\nReturns:\nWinRegistryKey: Windows Registry root key or None if not available.", "source": "codesearchnet"}
{"code": "def get_att_mats(translate_model):\n    enc_atts = []\n    dec_atts = []\n    encdec_atts = []\n    prefix = 'transformer/body/'\n    postfix_self_attention = '/multihead_attention/dot_product_attention'\n    if (translate_model.hparams.self_attention_type == 'dot_product_relative'):\n        postfix_self_attention = '/multihead_attention/dot_product_attention_relative'\n    postfix_encdec = '/multihead_attention/dot_product_attention'\n    for i in range(translate_model.hparams.num_hidden_layers):\n        enc_att = translate_model.attention_weights[('%sencoder/layer_%i/self_attention%s' % (prefix, i, postfix_self_attention))]\n        dec_att = translate_model.attention_weights[('%sdecoder/layer_%i/self_attention%s' % (prefix, i, postfix_self_attention))]\n        encdec_att = translate_model.attention_weights[('%sdecoder/layer_%i/encdec_attention%s' % (prefix, i, postfix_encdec))]\n        enc_atts.append(enc_att)\n        dec_atts.append(dec_att)\n        encdec_atts.append(encdec_att)\n    return (enc_atts, dec_atts, encdec_atts)", "docstring": "Get's the tensors representing the attentions from a build model.\n\nThe attentions are stored in a dict on the Transformer object while building\nthe graph.\n\nArgs:\ntranslate_model: Transformer object to fetch the attention weights from.\n\nReturns:\nTuple of attention matrices; (\nenc_atts: Encoder self attention weights.\nA list of `num_layers` numpy arrays of size\n(batch_size, num_heads, inp_len, inp_len)\ndec_atts: Decoder self attetnion weights.\nA list of `num_layers` numpy arrays of size\n(batch_size, num_heads, out_len, out_len)\nencdec_atts: Encoder-Decoder attention weights.\nA list of `num_layers` numpy arrays of size\n(batch_size, num_heads, out_len, inp_len)\n)", "source": "codesearchnet"}
{"code": "def _ParseContainerTable(self, parser_mediator, table, container_name):\n    \n    if table is None:\n      raise ValueError('Missing table value.')\n\n    for record_index, esedb_record in enumerate(table.records):\n      if parser_mediator.abort:\n        break\n\n      \n      \n      if container_name == 'Content':\n        value_mappings = self._CONTAINER_TABLE_VALUE_MAPPINGS\n      else:\n        value_mappings = None\n\n      try:\n        record_values = self._GetRecordValues(\n            parser_mediator, table.name, esedb_record,\n            value_mappings=value_mappings)\n\n      except UnicodeDecodeError:\n        parser_mediator.ProduceExtractionWarning((\n            'Unable to retrieve record values from record: {0:d} '\n            'in table: {1:s}').format(record_index, table.name))\n        continue\n\n      if (container_name in self._SUPPORTED_CONTAINER_NAMES or\n          container_name.startswith('MSHist')):\n        access_count = record_values.get('AccessCount', None)\n        cached_filename = record_values.get('Filename', None)\n        cached_file_size = record_values.get('FileSize', None)\n        cache_identifier = record_values.get('CacheId', None)\n        container_identifier = record_values.get('ContainerId', None)\n        entry_identifier = record_values.get('EntryId', None)\n        file_extension = record_values.get('FileExtension', None)\n        redirect_url = record_values.get('RedirectUrl', None)\n        sync_count = record_values.get('SyncCount', None)\n\n        url = record_values.get('Url', '')\n        \n        if ord(url[0]) < 0x20 or ord(url[0]) == 0x7f:\n          url = None\n\n        request_headers = record_values.get('RequestHeaders', None)\n        \n        if not isinstance(request_headers, py2to3.UNICODE_TYPE):\n          request_headers = None\n\n        response_headers = record_values.get('ResponseHeaders', None)\n        \n        if not isinstance(response_headers, py2to3.UNICODE_TYPE):\n          response_headers = None\n\n        event_data = MsieWebCacheContainerEventData()\n        event_data.access_count = access_count\n        event_data.cached_filename = cached_filename\n        event_data.cached_file_size = cached_file_size\n        event_data.cache_identifier = cache_identifier\n        event_data.container_identifier = container_identifier\n        event_data.entry_identifier = entry_identifier\n        event_data.file_extension = file_extension\n        event_data.redirect_url = redirect_url\n        event_data.request_headers = request_headers\n        event_data.response_headers = response_headers\n        event_data.sync_count = sync_count\n        event_data.url = url\n\n        timestamp = record_values.get('SyncTime', None)\n        if timestamp:\n          date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n          event = time_events.DateTimeValuesEvent(\n              date_time, 'Synchronization time')\n          parser_mediator.ProduceEventWithEventData(event, event_data)\n\n        timestamp = record_values.get('CreationTime', None)\n        if timestamp:\n          date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n          event = time_events.DateTimeValuesEvent(\n              date_time, definitions.TIME_DESCRIPTION_CREATION)\n          parser_mediator.ProduceEventWithEventData(event, event_data)\n\n        timestamp = record_values.get('ExpiryTime', None)\n        if timestamp:\n          date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n          event = time_events.DateTimeValuesEvent(\n              date_time, definitions.TIME_DESCRIPTION_EXPIRATION)\n          parser_mediator.ProduceEventWithEventData(event, event_data)\n\n        timestamp = record_values.get('ModifiedTime', None)\n        if timestamp:\n          date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n          event = time_events.DateTimeValuesEvent(\n              date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n          parser_mediator.ProduceEventWithEventData(event, event_data)\n\n        timestamp = record_values.get('AccessedTime', None)\n        if timestamp:\n          date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n          event = time_events.DateTimeValuesEvent(\n              date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)\n          parser_mediator.ProduceEventWithEventData(event, event_data)\n\n        timestamp = record_values.get('PostCheckTime', None)\n        if timestamp:\n          date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n          event = time_events.DateTimeValuesEvent(\n              date_time, 'Post check time')\n          parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a Container_# table.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ntable (pyesedb.table): table.\ncontainer_name (str): container name, which indicates the table type.\n\nRaises:\nValueError: if the table value is missing.", "source": "juraj-google-style"}
{"code": "def ParseStatusRow(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = TwitterAndroidStatusEventData()\n    event_data.query = query\n    event_data.identifier = self._GetRowValue(query_hash, row, '_id')\n    event_data.author_identifier = self._GetRowValue(\n        query_hash, row, 'author_id')\n    event_data.username = self._GetRowValue(query_hash, row, 'username')\n    event_data.content = self._GetRowValue(query_hash, row, 'content')\n    event_data.favorited = self._GetRowValue(query_hash, row, 'favorited')\n    event_data.retweeted = self._GetRowValue(query_hash, row, 'retweeted')\n\n    timestamp = self._GetRowValue(query_hash, row, 'time')\n    if timestamp:\n      date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_CREATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a status row from the database.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from query.", "source": "juraj-google-style"}
{"code": "def merge_config(config: Mapping[(str, Any)], override_config: Mapping[(str, Any)]=None, override_config_fn: str=None) -> Mapping[(str, Any)]:\n    if override_config_fn:\n        with open(override_config_fn, 'r') as f:\n            override_config = yaml.load(f, Loader=yaml.SafeLoader)\n    if (not override_config):\n        log.info('Missing override_config')\n    return functools.reduce(rec_merge, (config, override_config))", "docstring": "Override config with additional configuration in override_config or override_config_fn\n\nUsed in script to merge CLI options with Config\n\nArgs:\nconfig: original configuration\noverride_config: new configuration to override/extend current config\noverride_config_fn: new configuration filename as YAML file", "source": "codesearchnet"}
{"code": "def _cast_to_known_type(name):\n    if name is None:\n        return None\n    return name.rstrip('.')", "docstring": "Canonicalizes a string representing a type if possible.\n\n# TODO(dbieber): Support additional canonicalization, such as string/str, and\n# boolean/bool.\n\nExample:\n_cast_to_known_type(\"str.\") == \"str\"\n\nArgs:\nname: A string representing a type, or None.\nReturns:\nA canonicalized version of the type string.", "source": "github-repos"}
{"code": "def __init__(self, text_encoder_config=None, **kwargs):\n    \n    super(MultiNLIConfig, self).__init__(**kwargs)\n    self.text_encoder_config = (\n        text_encoder_config or tfds.features.text.TextEncoderConfig())", "docstring": "BuilderConfig for MultiNLI.\n\nArgs:\ntext_encoder_config: `tfds.features.text.TextEncoderConfig`, configuration\nfor the `tfds.features.text.TextEncoder` used for the features feature.\n**kwargs: keyword arguments forwarded to super.", "source": "juraj-google-style"}
{"code": "def __init__(self, value=None):\n        \n        super(ExtensionType, self).__init__(value, Tags.EXTENSION_TYPE)", "docstring": "Construct an ExtensionType object.\n\nArgs:\nvalue (Types): A number representing a Types enumeration value,\nindicating the type of the extended Object. Optional, defaults\nto None.", "source": "juraj-google-style"}
{"code": "def add_data(self, data):\n    if ((self.data_size - self.data_index) < len(data)):\n        return Error.DESTINATION_BUFFER_TOO_SMALL\n    if (self.in_progress is not None):\n        self.in_progress.data += data\n    return Error.NO_ERROR", "docstring": "Add data to the currently in progress entry.\n\nArgs:\ndata (bytes): The data that we want to add.\n\nReturns:\nint: An error code", "source": "codesearchnet"}
{"code": "def get_xnp(self, x: Array, *, strict: bool=True):\n    if self.is_jax(x):\n        return self.jnp\n    elif self.is_tf(x):\n        return self.tnp\n    elif self.is_np(x):\n        return np\n    elif self.is_torch(x):\n        return self.torch\n    elif not strict and isinstance(x, _ARRAY_LIKE_TYPES):\n        return np\n    else:\n        raise TypeError(f'Cannot infer the numpy module from array: {type(x).__name__}')", "docstring": "Returns the numpy module associated with the given array.\n\nArgs:\nx: Either tf, jax or numpy array.\nstrict: If `False`, default to `np.array` if the array can't be infered (\nto support array-like: list, tuple,...)\n\nReturns:\nThe numpy module.", "source": "github-repos"}
{"code": "async def attach_file(self, file_path: str, description: str = None) -> Attachment:\n        \n        with open(file_path, 'rb') as f:\n            return await self._attach(f.read(), description)", "docstring": "add a file as an attachment\n\n|methcoro|\n\nWarning:\n|unstable|\n\nArgs:\nfile_path: path to the file you want to add\ndescription: *optional* description for your attachment\n\nReturns:\nAttachment:\n\nRaises:\nValueError: file_path must not be None\nAPIException", "source": "juraj-google-style"}
{"code": "def future(self, request, timeout=None, metadata=None, credentials=None):\n    return _utils.wrap_future_call(self._inner.future(request, timeout, metadata, credentials), self._loop, self._executor)", "docstring": "Asynchronously invokes the underlying RPC.\n\nArgs:\nrequest: The request value for the RPC.\ntimeout: An optional duration of time in seconds to allow for the RPC.\nmetadata: Optional :term:`metadata` to be transmitted to the\nservice-side of the RPC.\ncredentials: An optional CallCredentials for the RPC.\n\nReturns:\nAn object that is both a Call for the RPC and a Future. In the event of\nRPC completion, the return Call-Future's result value will be the\nresponse message of the RPC. Should the event terminate with non-OK\nstatus, the returned Call-Future's exception value will be an RpcError.", "source": "codesearchnet"}
{"code": "def is60(msg):\n    \n\n    if allzeros(msg):\n        return False\n\n    d = hex2bin(data(msg))\n\n    \n\n    if wrongstatus(d, 1, 2, 12):\n        return False\n\n    if wrongstatus(d, 13, 14, 23):\n        return False\n\n    if wrongstatus(d, 24, 25, 34):\n        return False\n\n    if wrongstatus(d, 35, 36, 45):\n        return False\n\n    if wrongstatus(d, 46, 47, 56):\n        return False\n\n    ias = ias60(msg)\n    if ias is not None and ias > 500:\n        return False\n\n    mach = mach60(msg)\n    if mach is not None and mach > 1:\n        return False\n\n    vr_baro = vr60baro(msg)\n    if vr_baro is not None and abs(vr_baro) > 6000:\n        return False\n\n    vr_ins = vr60ins(msg)\n    if vr_ins is not None and abs(vr_ins) > 6000:\n        return False\n\n    return True", "docstring": "Check if a message is likely to be BDS code 6,0\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nbool: True or False", "source": "juraj-google-style"}
{"code": "def comment(self, text, comment_prefix='\n        \n        comment = Comment(self._container)\n        if not text.startswith(comment_prefix):\n            text = \"{} {}\".format(comment_prefix, text)\n        if not text.endswith('\\n'):\n            text = \"{}{}\".format(text, '\\n')\n        comment.add_line(text)\n        self._container.structure.insert(self._idx, comment)\n        self._idx += 1\n        return self", "docstring": "Creates a comment block\n\nArgs:\ntext (str): content of comment without #\ncomment_prefix (str): character indicating start of comment\n\nReturns:\nself for chaining", "source": "juraj-google-style"}
{"code": "class TFConvNextLayer(keras.layers.Layer):\n\n    def __init__(self, config, dim, drop_path=0.0, **kwargs):\n        super().__init__(**kwargs)\n        self.dim = dim\n        self.config = config\n        self.dwconv = keras.layers.Conv2D(filters=dim, kernel_size=7, padding='same', groups=dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer='zeros', name='dwconv')\n        self.layernorm = keras.layers.LayerNormalization(epsilon=1e-06, name='layernorm')\n        self.pwconv1 = keras.layers.Dense(units=4 * dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer='zeros', name='pwconv1')\n        self.act = get_tf_activation(config.hidden_act)\n        self.pwconv2 = keras.layers.Dense(units=dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer='zeros', name='pwconv2')\n        self.drop_path = TFConvNextDropPath(drop_path, name='drop_path') if drop_path > 0.0 else keras.layers.Activation('linear', name='drop_path')\n\n    def build(self, input_shape: tf.TensorShape=None):\n        self.layer_scale_parameter = self.add_weight(shape=(self.dim,), initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value), trainable=True, name='layer_scale_parameter') if self.config.layer_scale_init_value > 0 else None\n        if self.built:\n            return\n        self.built = True\n        if getattr(self, 'dwconv', None) is not None:\n            with tf.name_scope(self.dwconv.name):\n                self.dwconv.build([None, None, None, self.dim])\n        if getattr(self, 'layernorm', None) is not None:\n            with tf.name_scope(self.layernorm.name):\n                self.layernorm.build([None, None, None, self.dim])\n        if getattr(self, 'pwconv1', None) is not None:\n            with tf.name_scope(self.pwconv1.name):\n                self.pwconv1.build([None, None, self.dim])\n        if getattr(self, 'pwconv2', None) is not None:\n            with tf.name_scope(self.pwconv2.name):\n                self.pwconv2.build([None, None, 4 * self.dim])\n        if getattr(self, 'drop_path', None) is not None:\n            with tf.name_scope(self.drop_path.name):\n                self.drop_path.build(None)\n\n    def call(self, hidden_states, training=False):\n        input = hidden_states\n        x = self.dwconv(hidden_states)\n        x = self.layernorm(x)\n        x = self.pwconv1(x)\n        x = self.act(x)\n        x = self.pwconv2(x)\n        if self.layer_scale_parameter is not None:\n            x = self.layer_scale_parameter * x\n        x = input + self.drop_path(x, training=training)\n        return x", "docstring": "This corresponds to the `Block` class in the original implementation.\n\nThere are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,\nH, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back\n\nThe authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow\nNHWC ordering, we can just apply the operations straight-away without the permutation.\n\nArgs:\nconfig ([`ConvNextConfig`]): Model configuration class.\ndim (`int`): Number of input channels.\ndrop_path (`float`): Stochastic depth rate. Default: 0.0.", "source": "github-repos"}
{"code": "def __str__(self):\n    duplicates = []\n\n    def AppendDuplicateItem(item, count):\n        if count == 1:\n            duplicates.append('{0!r}'.format(item))\n        else:\n            duplicates.append('{0!r} [{1} copies]'.format(item, count))\n    with self._lock:\n        for item, count in six.iteritems(self._d):\n            AppendDuplicateItem(item, count)\n        for item, count in zip(self._unhashable_items, self._unhashable_counts):\n            AppendDuplicateItem(item, count)\n    return '[{0}]'.format(', '.join(duplicates))", "docstring": "Returns the string representation of the duplicate counts.\n\nItems occurring more than once are accompanied by their count.\nOtherwise the count is implied to be 1.\n\nFor example, if the internal dict is {2: 1, 3: 4, 'abc': 1}, this returns\nthe string \"[{2, 3 [4 copies], 'abc'}]\".\n\nReturns:\nString, the counts of duplicate items.", "source": "github-repos"}
{"code": "def WriteEventMACBGroup(self, event_macb_group):\n    output_values = self._GetOutputValues(event_macb_group[0])\n    timestamp_descriptions = [event.timestamp_desc for event in event_macb_group]\n    output_values[3] = self._output_mediator.GetMACBRepresentationFromDescriptions(timestamp_descriptions)\n    output_values[6] = '; '.join(timestamp_descriptions)\n    self._WriteOutputValues(output_values)", "docstring": "Writes an event MACB group to the output.\n\nArgs:\nevent_macb_group (list[EventObject]): event MACB group.", "source": "codesearchnet"}
{"code": "def __content_type_matches(self, content_type, available_content_types):\n    if (content_type is None):\n        return False\n    if (content_type in available_content_types):\n        return True\n    for available_content_type in available_content_types:\n        if (available_content_type in content_type):\n            return True\n    return False", "docstring": "Check if the given content type matches one of the available content types.\n\nArgs:\ncontent_type (str): The given content type.\navailable_content_types list(str): All the available content types.\n\nReturns:\nbool: True if a match was found, False otherwise.", "source": "codesearchnet"}
{"code": "def search(self, search_phrase, limit=None):\n        \n        \n        \n        \n        search_phrase = search_phrase.replace('-', '_')\n        query, query_params = self._make_query_from_terms(search_phrase)\n\n        self._parsed_query = (query, query_params)\n\n        connection = self.backend.library.database.connection\n        \n        connection.connection.create_function('rank', 1, _make_rank_func((1., .1, 0, 0)))\n\n        logger.debug('Searching datasets using `{}` query.'.format(query))\n        results = connection.execute(query,\n                                     **query_params).fetchall()  \n\n        datasets = defaultdict(DatasetSearchResult)\n        for result in results:\n            vid, score = result\n            datasets[vid] = DatasetSearchResult()\n            datasets[vid].vid = vid\n            datasets[vid].b_score = score\n\n        logger.debug('Extending datasets with partitions.')\n        for partition in self.backend.partition_index.search(search_phrase):\n            datasets[partition.dataset_vid].p_score += partition.score\n            datasets[partition.dataset_vid].partitions.add(partition)\n        return list(datasets.values())", "docstring": "Finds datasets by search phrase.\n\nArgs:\nsearch_phrase (str or unicode):\nlimit (int, optional): how many results to return. None means without limit.\n\nReturns:\nlist of DatasetSearchResult instances.", "source": "juraj-google-style"}
{"code": "def set_input_embeddings(self, value):\n    main_layer = getattr(self, self.base_model_prefix)\n    if main_layer is None:\n        raise NotImplementedError('The model does not implements the base_model_prefix attribute.')\n    try:\n        main_layer.set_input_embeddings(value)\n    except AttributeError:\n        logger.info('Building the model')\n        self.build_in_name_scope()\n        main_layer.set_input_embeddings(value)", "docstring": "Set model's input embeddings\n\nArgs:\nvalue (`tf.Variable`):\nThe new weights mapping hidden states to vocabulary.", "source": "github-repos"}
{"code": "def _pad_input(self, inputs):\n    if all(((p == self._conv_op_padding) for p in self._padding)):\n        return inputs\n    assert (self._conv_op_padding == VALID)\n\n    def pad_amount(kernel_size, rate, padding):\n        'Pre- and post-padding required for a particular axis before conv op.'\n        effective_kernel_size = int((((kernel_size - 1) * rate) + 1))\n        if (padding == FULL):\n            return [(effective_kernel_size - 1), (effective_kernel_size - 1)]\n        if (padding == CAUSAL):\n            return [(effective_kernel_size - 1), 0]\n        if (padding == REVERSE_CAUSAL):\n            return [0, (effective_kernel_size - 1)]\n        if (padding == SAME):\n            return [((effective_kernel_size - 1) \n        return [0, 0]\n    paddings = map(pad_amount, self._kernel_shape, self._rate, self._padding)\n    if self._data_format.startswith('NC'):\n        paddings = ([[0, 0], [0, 0]] + list(paddings))\n    else:\n        paddings = (([[0, 0]] + list(paddings)) + [[0, 0]])\n    return tf.pad(inputs, paddings)", "docstring": "Pad input in case the desired padding type requires it.\n\nVALID and SAME padding types are directly supported by tensorflow\nconvolution ops, so don't require us to pad input ourselves, at least\nin cases where the same method is used for all dimensions.\n\nOther padding types (FULL, CAUSAL, REVERSE_CAUSAL) aren't directly supported\nby conv ops but can be implemented by using VALID and padding the input\nappropriately ourselves.\n\nIf different padding types are used for different dimensions, we use VALID\nbut pad the input ourselves along any dimensions that require other padding\ntypes.\n\nArgs:\ninputs: A Tensor of shape `data_format` and of type `tf.float16`,\n`tf.bfloat16` or `tf.float32`.\n\nReturns:\ninputs: The `inputs` argument that has had any required padding added.", "source": "codesearchnet"}
{"code": "def convert_new_publication_info_to_old(publication_infos):\n    \n    def _needs_a_hidden_pubnote(journal_title, journal_volume):\n        return (\n            journal_title in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE and\n            journal_volume in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE[journal_title]\n        )\n\n    result = []\n\n    for publication_info in publication_infos:\n        _publication_info = copy.deepcopy(publication_info)\n        journal_title = _publication_info.get('journal_title')\n\n        try:\n            journal_title = _JOURNALS_RENAMED_NEW_TO_OLD[journal_title]\n            _publication_info['journal_title'] = journal_title\n            result.append(_publication_info)\n            continue\n        except KeyError:\n            pass\n\n        journal_volume = _publication_info.get('journal_volume')\n        year = _publication_info.get('year')\n\n        if (journal_title in _JOURNALS_WITH_YEAR_ADDED_TO_VOLUME and year and\n                journal_volume and len(journal_volume) == 2):\n            two_digit_year = str(year)[2:]\n            _publication_info['journal_volume'] = ''.join([two_digit_year, journal_volume])\n            result.append(_publication_info)\n            continue\n\n        if journal_title and journal_volume:\n            match = _RE_TITLE_ENDS_WITH_A_LETTER.match(journal_title)\n            if match and _needs_a_hidden_pubnote(journal_title, journal_volume):\n                _publication_info['journal_title'] = match.group('title')\n                _publication_info['journal_volume'] = journal_volume + match.group('letter')\n                result.append(_publication_info)\n                _publication_info = copy.deepcopy(publication_info)\n                _publication_info['hidden'] = True\n                _publication_info['journal_title'] = match.group('title')\n                _publication_info['journal_volume'] = match.group('letter') + journal_volume\n            elif match and journal_title not in _JOURNALS_ALREADY_ENDING_WITH_A_LETTER:\n                _publication_info['journal_title'] = match.group('title')\n                _publication_info['journal_volume'] = match.group('letter') + journal_volume\n\n        result.append(_publication_info)\n\n    return result", "docstring": "Convert back a ``publication_info`` value from the new format to the old.\n\nDoes the inverse transformation of :func:`convert_old_publication_info_to_new`,\nto be used whenever we are sending back records from Labs to Legacy.\n\nArgs:\npublication_infos: a ``publication_info`` in the new format.\n\nReturns:\nlist(dict): a ``publication_info`` in the old format.", "source": "juraj-google-style"}
{"code": "def _ParseKeysFromFindSpecs(self, parser_mediator, win_registry, find_specs):\n    \n    searcher = dfwinreg_registry_searcher.WinRegistrySearcher(win_registry)\n    for registry_key_path in iter(searcher.Find(find_specs=find_specs)):\n      if parser_mediator.abort:\n        break\n\n      registry_key = searcher.GetKeyByPath(registry_key_path)\n      self._ParseKey(parser_mediator, registry_key)", "docstring": "Parses the Registry keys from FindSpecs.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nwin_registry (dfwinreg.WinRegistryKey): root Windows Registry key.\nfind_specs (dfwinreg.FindSpecs): Keys to search for.", "source": "juraj-google-style"}
{"code": "def init(deb1, deb2=False):\n    \n    global DEBUG        \n    global DEBUGALL     \n    DEBUG = deb1\n    DEBUGALL = deb2", "docstring": "Initialize DEBUG and DEBUGALL.\n\nAllows other modules to set DEBUG and DEBUGALL, so their\ncall to dprint or dprintx generate output.\n\nArgs:\ndeb1 (bool): value of DEBUG to set\ndeb2 (bool): optional - value of DEBUGALL to set,\ndefaults to False.", "source": "juraj-google-style"}
{"code": "def _instance_transform(fqdn, o, *args, **kwargs):\n    \n    return _package_transform(o, fqdn, start=0, *args, **kwargs)", "docstring": "Applies an instance method with name `fqdn` to `o`.\n\nArgs:\nfqdn (str): fully-qualified domain name of the object.\no: object to apply instance method to.", "source": "juraj-google-style"}
{"code": "def _forward_log_det_jacobian(self, x):\n    raise NotImplementedError('forward_log_det_jacobian not implemented.')", "docstring": "Subclass implementation of `forward_log_det_jacobian` public function.\n\nIn particular, this method differs from the public function, in that it\ndoes not take `event_ndims`. Thus, this implements the minimal Jacobian\ndeterminant calculation (i.e. over `forward_min_event_ndims`).\n\nArgs:\nx: `Tensor`. The input to the \"forward_log_det_jacobian\" evaluation.\n\nReturns:\nforward_log_det_jacobian: `Tensor`, if this bijector is injective.\nIf not injective, returns the k-tuple containing jacobians for the\nunique `k` points `(x1, ..., xk)` such that `g(xi) = y`.", "source": "github-repos"}
{"code": "def get_url_distribution(self, params=None):\n    params = (params or {})\n    all_responses = {}\n    api_name = 'virustotal-url-distribution'\n    response_chunks = self._request_reports(list(params.keys()), list(params.values()), 'url/distribution')\n    self._extract_response_chunks(all_responses, response_chunks, api_name)\n    return all_responses", "docstring": "Retrieves a live feed with the latest URLs submitted to VT.\n\nArgs:\nresources: a dictionary with name and value for optional arguments\nReturns:\nA dict with the VT report.", "source": "codesearchnet"}
{"code": "def _generate_fix_length_rpc_response(response_length, template='{\"id\": 0, \"result\": \"%s\", \"error\": null, \"callback\": null}'):\n    result_length = response_length - (len(template) - 2)\n    if result_length < 0:\n        raise ValueError(f'The response_length should be no smaller than template_length + 2. Got response_length {response_length}, template_length {len(template)}.')\n    chars = string.ascii_letters + string.digits\n    return template % ''.join((random.choice(chars) for _ in range(result_length)))", "docstring": "Generates an RPC response string with specified length.\n\nThis function generates a random string and formats the template with the\ngenerated random string to get the response string. This function formats\nthe template with printf style string formatting.\n\nArgs:\nresponse_length: int, the length of the response string to generate.\ntemplate: str, the template used for generating the response string.\n\nReturns:\nThe generated response string.\n\nRaises:\nValueError: if the specified length is too small to generate a response.", "source": "github-repos"}
{"code": "def switch_opt(default, shortname, help_msg):\n    return ConfOpt(bool(default), True, shortname, dict(action=internal.Switch), True, help_msg, None)", "docstring": "Define a switchable ConfOpt.\n\nThis creates a boolean option. If you use it in your CLI, it can be\nswitched on and off by prepending + or - to its name: +opt / -opt.\n\nArgs:\ndefault (bool): the default value of the swith option.\nshortname (str): short name of the option, no shortname will be used if\nit is set to None.\nhelp_msg (str): short description of the option.\n\nReturns:\n:class:`~loam.manager.ConfOpt`: a configuration option with the given\nproperties.", "source": "codesearchnet"}
{"code": "def _Aff4Size(aff4_obj):\n  \n  if not isinstance(aff4_obj, aff4.AFF4Stream):\n    message = \"Expected an instance of `%s` but received `%s`\"\n    raise TypeError(message % (aff4.AFF4Stream, type(aff4_obj)))\n\n  return int(aff4_obj.Get(aff4_obj.Schema.SIZE))", "docstring": "Retrieves the total size in bytes of an AFF4 object.\n\nArgs:\naff4_obj: An AFF4 stream instance to retrieve size for.\n\nReturns:\nAn integer representing number of bytes.\n\nRaises:\nTypeError: If `aff4_obj` is not an instance of AFF4 stream.", "source": "juraj-google-style"}
{"code": "def plot_spectra_pieces_pdf(ss, aint=10, pdf_filename='pieces.pdf', setup=_default_setup):\n    \n\n    import f311.explorer as ex\n\n    xmin, xmax, ymin_, ymax, _, yspan = calc_max_min(ss)\n    ymin = ymin_ if setup.ymin is None else setup.ymin\n\n    num_pages = int(math.ceil((xmax-xmin)/aint)) \n    \n\n    a99.format_BLB()\n    \n    pdf = matplotlib.backends.backend_pdf.PdfPages(pdf_filename)\n    logger = a99.get_python_logger()\n\n    for h in range(num_pages):\n        fig = plt.figure()\n        lambda0 = xmin+h*aint\n        lambda1 = lambda0+aint\n        logger.info(\"Printing page {0:d}/{1:d} ([{2:g}, {3:g}])\".format(h+1, num_pages, lambda0, lambda1))\n        for i, s in enumerate(ss):\n            s_cut = ex.cut_spectrum(s, lambda0, lambda1)\n            ax = plt.gca()\n            ax.plot(s_cut.x, s_cut.y, label=s.title)\n        if setup.flag_xlabel and setup.fmt_xlabel:\n            plt.xlabel('Wavelength (interval: [{0:g}, {1:g}])'.format(lambda0, lambda1))\n        xspan = lambda1-lambda0\n        ax.set_xlim([lambda0 - xspan * _T, lambda1 + xspan * _T])\n        ax.set_ylim([ymin - yspan * _T, ymax + yspan * _T])\n        if setup.flag_legend:\n            leg = plt.legend(loc=0)\n            a99.format_legend(leg)\n        plt.tight_layout()\n        pdf.savefig(fig)\n        plt.close()\n\n    \n    \n    pdf.close()\n    logger.info(\"File {0!s} successfully created.\".format(pdf_filename))", "docstring": "Plots spectra, overlapped, in small wavelength intervals into a PDF file,\none interval per page of the PDF file.\n\nArgs:\nss: list of Spectrum objects\naint: wavelength interval for each plot\npdf_filename: name of output file\nsetup: PlotSpectrumSetup object\n\n**Note** overrides setup.fmt_xlabel; leaves y-labell and title blank", "source": "juraj-google-style"}
{"code": "def fcoe_networks(self):\n    if (not self.__fcoe_networks):\n        self.__fcoe_networks = FcoeNetworks(self.__connection)\n    return self.__fcoe_networks", "docstring": "Gets the FcoeNetworks API client.\n\nReturns:\nFcoeNetworks:", "source": "codesearchnet"}
{"code": "def _should_record_summaries_internal(default_state):\n    if _summary_state.writer is None:\n        return constant_op.constant(False)\n    if not callable(_summary_state.is_recording):\n        static_cond = tensor_util.constant_value(_summary_state.is_recording)\n        if static_cond is not None and (not static_cond):\n            return constant_op.constant(False)\n    resolve = lambda x: x() if callable(x) else x\n    cond_distributed = resolve(_summary_state.is_recording_distribution_strategy)\n    cond = resolve(_summary_state.is_recording)\n    if cond is None:\n        cond = default_state\n    return math_ops.logical_and(cond_distributed, cond)", "docstring": "Returns boolean Tensor if summaries should/shouldn't be recorded.\n\nNow the summary condition is decided by logical \"and\" of below conditions:\nFirst, summary writer must be set. Given this constraint is met,\nctx.summary_recording and ctx.summary_recording_distribution_strategy.\nThe former one is usually set by user, and the latter one is controlled\nby DistributionStrategy (tf.distribute.ReplicaContext).\n\nArgs:\ndefault_state: can be True or False. The default summary behavior when\nsummary writer is set and the user does not specify\nctx.summary_recording and ctx.summary_recording_distribution_strategy\nis True.", "source": "github-repos"}
{"code": "def parse(self, filepath, content):\n    try:\n        parsed = json.loads(content)\n    except ValueError:\n        msg = 'No JSON object could be decoded from file: {}'\n        raise SettingsBackendError(msg.format(filepath))\n    return parsed", "docstring": "Parse opened settings content using JSON parser.\n\nArgs:\nfilepath (str): Settings object, depends from backend\ncontent (str): Settings content from opened file, depends from\nbackend.\n\nRaises:\nboussole.exceptions.SettingsBackendError: If parser can not decode\na valid JSON object.\n\nReturns:\ndict: Dictionnary containing parsed setting elements.", "source": "codesearchnet"}
{"code": "def random_indexes(max_index, subset_size=None, seed=None, rng=None):\n    subst_ = np.arange(0, max_index)\n    rng = ensure_rng((seed if (rng is None) else rng))\n    rng.shuffle(subst_)\n    if (subset_size is None):\n        subst = subst_\n    else:\n        subst = subst_[0:min(subset_size, max_index)]\n    return subst", "docstring": "random unrepeated indicies\n\nArgs:\nmax_index (?):\nsubset_size (None): (default = None)\nseed (None): (default = None)\nrng (RandomState):  random number generator(default = None)\n\nReturns:\n?: subst\n\nCommandLine:\npython -m utool.util_numpy --exec-random_indexes\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_numpy import *  # NOQA\n>>> max_index = 10\n>>> subset_size = None\n>>> seed = None\n>>> rng = np.random.RandomState(0)\n>>> subst = random_indexes(max_index, subset_size, seed, rng)\n>>> result = ('subst = %s' % (str(subst),))\n>>> print(result)", "source": "codesearchnet"}
{"code": "def build_rectangle_dict(self, north, west, south, east, stroke_color='\n    rectangle = {'stroke_color': stroke_color, 'stroke_opacity': stroke_opacity, 'stroke_weight': stroke_weight, 'fill_color': fill_color, 'fill_opacity': fill_opacity, 'bounds': {'north': north, 'west': west, 'south': south, 'east': east}}\n    return rectangle", "docstring": "Set a dictionary with the javascript class Rectangle parameters\n\nThis function sets a default drawing configuration if the user just\npass the rectangle bounds, but also allows to set each parameter\nindividually if the user wish so.\n\nArgs:\nnorth (float): The north latitude bound\nwest (float): The west longitude bound\nsouth (float): The south latitude bound\neast (float): The east longitude bound\nstroke_color (str): Sets the color of the rectangle border using\nhexadecimal color notation\nstroke_opacity (float): Sets the opacity of the rectangle border\nin percentage. If stroke_opacity = 0, the border is transparent\nstroke_weight (int): Sets the stroke girth in pixels.\nfill_color (str): Sets the color of the rectangle fill using\nhexadecimal color notation\nfill_opacity (float): Sets the opacity of the rectangle fill", "source": "codesearchnet"}
{"code": "def CheckFile(self, path):\n    \n    print('Checking: {0:s}'.format(path))\n\n    definitions_registry = registry.DataTypeDefinitionsRegistry()\n    definitions_reader = reader.YAMLDataTypeDefinitionsFileReader()\n    result = False\n\n    try:\n      definitions_reader.ReadFile(definitions_registry, path)\n      result = True\n\n    except KeyError as exception:\n      logging.warning((\n          'Unable to register data type definition in file: {0:s} with '\n          'error: {1:s}').format(path, exception))\n\n    except errors.FormatError as exception:\n      logging.warning(\n          'Unable to validate file: {0:s} with error: {1:s}'.format(\n              path, exception))\n\n    return result", "docstring": "Validates the definition in a file.\n\nArgs:\npath (str): path of the definition file.\n\nReturns:\nbool: True if the file contains valid definitions.", "source": "juraj-google-style"}
{"code": "def CreatePriceTableRow(header, description, final_url, price_in_micros, currency_code, price_unit, final_mobile_url=None):\n    table_row = {'header': header, 'description': description, 'finalUrls': {'urls': [final_url]}, 'price': {'money': {'microAmount': price_in_micros}, 'currencyCode': currency_code}, 'priceUnit': price_unit, 'xsi_type': 'PriceTableRow'}\n    if final_mobile_url:\n        table_row['finalMobileUrls'] = {'urls': [final_mobile_url]}\n    return table_row", "docstring": "Helper function to generate a single row of a price table.\n\nArgs:\nheader: A str containing the header text of this row.\ndescription: A str description of this row in the price table.\nfinal_url: A str containing the final URL after all cross domain redirects.\nprice_in_micros: An int indicating the price of the given currency in\nmicros.\ncurrency_code: A str indicating the currency code being used.\nprice_unit: A str enum indicating the price unit for this row.\nfinal_mobile_url: A str containing the final mobile URL after all cross\ndomain redirects.\n\nReturns:\nA dictionary containing the contents of the generated price table row.", "source": "codesearchnet"}
{"code": "def is_method_call(func, types=(), methods=()):\n    \n    return (\n        isinstance(func, astroid.BoundMethod)\n        and isinstance(func.bound, astroid.Instance)\n        and (func.bound.name in types if types else True)\n        and (func.name in methods if methods else True)\n    )", "docstring": "Determines if a BoundMethod node represents a method call.\n\nArgs:\nfunc (astroid.BoundMethod): The BoundMethod AST node to check.\ntypes (Optional[String]): Optional sequence of caller type names to restrict check.\nmethods (Optional[String]): Optional sequence of method names to restrict check.\n\nReturns:\nbool: true if the node represents a method call for the given type and\nmethod names, False otherwise.", "source": "juraj-google-style"}
{"code": "def next_in_buffer(self, target_buffer: collections.deque[_T]) -> _T:\n    if bool(target_buffer):\n        return target_buffer.popleft()\n    for item in self._input_stream:\n        which_buffer = self._select_buffer(item)\n        if which_buffer is None:\n            continue\n        if which_buffer is target_buffer:\n            return item\n        if isinstance(which_buffer, collections.deque):\n            which_buffer.append(item)\n            continue\n        if isinstance(which_buffer, tuple):\n            return_item: bool = False\n            for buffer in which_buffer:\n                if buffer is target_buffer:\n                    return_item = True\n                else:\n                    buffer.append(item)\n            if return_item:\n                return item\n            continue\n        T = TypeVar('T', bound=_T)\n        ExpectedTypes = Union[collections.deque[T], tuple[collections.deque[T], ...], None]\n        raise TypeError(f'`{self._select_buffer}` returned a value of type `{type(which_buffer).__name__}`; expected one of `{ExpectedTypes}`.')\n    raise StopIteration()", "docstring": "Returns the next item in the sub-stream corresponding to `target_buffer`.\n\nArgs:\ntarget_buffer: The queue backing the sub-stream whose next element should\nbe returned.\n\nReturns:\nIf `target_buffer` is nonempty, the next element of `target_buffer`.\nOtherwise, the next element of `self._input_stream` that would have been\nadded to `target_buffer`.\n\nRaises:\nStopIteration: If `target_buffer` and `self._input_stream` are both empty.", "source": "github-repos"}
{"code": "def get_inner_template(self, language, template_type, indentation, key, val):\n    inner_templates = {'php': {'iterable': ('%s%s => array \\n%s( \\n%s%s),\\n' % (indentation, key, indentation, val, indentation)), 'singular': ('%s%s => %s, \\n' % (indentation, key, val))}, 'javascript': {'iterable': ('%s%s : {\\n%s\\n%s},\\n' % (indentation, key, val, indentation)), 'singular': ('%s%s: %s,\\n' % (indentation, key, val))}, 'ocaml': {'iterable': ('%s[| (%s, (\\n%s\\n%s))|] ;;\\n' % (indentation, key, val, indentation)), 'singular': ('%s(%s, %s);\\n' % (indentation, key, val))}}\n    return inner_templates[language][template_type]", "docstring": "Gets the requested template for the given language.\n\nArgs:\nlanguage: string, the language of the template to look for.\n\ntemplate_type: string, 'iterable' or 'singular'.\nAn iterable template is needed when the value is an iterable\nand needs more unpacking, e.g. list, tuple. A singular template\nis needed when unpacking is complete and the value is singular,\ne.g. string, int, float.\n\nindentation: int, the indentation level.\n\nkey: multiple types, the array key.\n\nval: multiple types, the array values\n\nReturns:\nstring, template formatting for arrays by language.", "source": "codesearchnet"}
{"code": "def multisorted(items, *keys):\n    if (len(keys) == 0):\n        keys = [asc()]\n    for key in reversed(keys):\n        items = sorted(items, key=key.func, reverse=key.reverse)\n    return items", "docstring": "Sort by multiple attributes.\n\nArgs:\nitems: An iterable series to be sorted.\n*keys: Key objects which extract key values from the items.\nThe first key will be the most significant, and the\nlast key the least significant. If no key functions\nare provided, the items will be sorted in ascending\nnatural order.\nReturns:\nA list of items sorted according to keys.", "source": "codesearchnet"}
{"code": "def move(self, fromaccount, toaccount, amount, minconf=1):\n    amount = Decimal(amount).quantize(self.quantum, rounding=ROUND_HALF_EVEN)\n    return self.rpc.call('move', fromaccount, toaccount, float(str(amount)), minconf)", "docstring": "Send coins between accounts in the same wallet.\n\nIf the receiving account does not exist, it is automatically\ncreated (but not automatically assigned an address).\n\nArgs:\nfromaccount (str): origin account\ntoaccount (str): destination account\namount (str or Decimal): amount to send (8 decimal points)\nminconf (int): ensure the account has a valid balance using this\nmany confirmations (default=1)\n\nReturns:\nbool: True if the coins are moved successfully, False otherwise", "source": "codesearchnet"}
{"code": "def _serialization_helper(self, ray_forking):\n        \n        if ray_forking:\n            actor_handle_id = compute_actor_handle_id(\n                self._ray_actor_handle_id, self._ray_actor_forks)\n        else:\n            actor_handle_id = self._ray_actor_handle_id\n\n        \n        \n        state = {\n            \"actor_id\": self._ray_actor_id,\n            \"actor_handle_id\": actor_handle_id,\n            \"module_name\": self._ray_module_name,\n            \"class_name\": self._ray_class_name,\n            \"actor_cursor\": self._ray_actor_cursor,\n            \"actor_method_names\": self._ray_actor_method_names,\n            \"method_signatures\": self._ray_method_signatures,\n            \"method_num_return_vals\": self._ray_method_num_return_vals,\n            \n            \"actor_creation_dummy_object_id\": self.\n            _ray_actor_creation_dummy_object_id,\n            \"actor_method_cpus\": self._ray_actor_method_cpus,\n            \"actor_driver_id\": self._ray_actor_driver_id,\n            \"ray_forking\": ray_forking\n        }\n\n        if ray_forking:\n            self._ray_actor_forks += 1\n            new_actor_handle_id = actor_handle_id\n        else:\n            \n            \n            \n            \n            new_actor_handle_id = ActorHandleID(_random_string())\n        \n        \n        \n        \n        \n        self._ray_new_actor_handles.append(new_actor_handle_id)\n\n        return state", "docstring": "This is defined in order to make pickling work.\n\nArgs:\nray_forking: True if this is being called because Ray is forking\nthe actor handle and false if it is being called by pickling.\n\nReturns:\nA dictionary of the information needed to reconstruct the object.", "source": "juraj-google-style"}
{"code": "def _format_value(value):\n    literal = repr(value)\n    try:\n        if (parse_value(literal) == value):\n            return literal\n    except SyntaxError:\n        pass\n    return None", "docstring": "Returns `value` in a format parseable by `parse_value`, or `None`.\n\nSimply put, This function ensures that when it returns a string value, the\nfollowing will hold:\n\nparse_value(_format_value(value)) == value\n\nArgs:\nvalue: The value to format.\n\nReturns:\nA string representation of `value` when `value` is literally representable,\nor `None`.", "source": "codesearchnet"}
{"code": "def _CleanupUnregisteredFlagFromModuleDicts(self, flag_obj):\n    \n    if self._FlagIsRegistered(flag_obj):\n      return\n    for flags_by_module_dict in (self.FlagsByModuleDict(),\n                                 self.FlagsByModuleIdDict(),\n                                 self.KeyFlagsByModuleDict()):\n      for flags_in_module in six.itervalues(flags_by_module_dict):\n        \n        \n        while flag_obj in flags_in_module:\n          flags_in_module.remove(flag_obj)", "docstring": "Cleanup unregistered flags from all module -> [flags] dictionaries.\n\nIf flag_obj is registered under either its long name or short name, it\nwon't be removed from the dictionaries.\n\nArgs:\nflag_obj: A flag object.", "source": "juraj-google-style"}
{"code": "def metar_to_speech(metar: str) -> str:\n        \n        LOGGER.info('getting speech text from METAR: %s', metar)\n        metar_data, metar_units = emiz.avwx.metar.parse_in(metar)\n        speech = emiz.avwx.speech.metar(metar_data, metar_units)\n        speech = str(speech).replace('Altimeter', 'Q N H')\n        LOGGER.debug('resulting speech: %s', speech)\n        return speech", "docstring": "Creates a speakable text from a METAR\n\nArgs:\nmetar: METAR string to use\n\nReturns: speakable METAR for TTS", "source": "juraj-google-style"}
{"code": "def _parse_octet(self, octet_str):\n        \n        \n        if not self._DECIMAL_DIGITS.issuperset(octet_str):\n            raise ValueError\n        octet_int = int(octet_str, 10)\n        \n        \n        if octet_int > 255 or (octet_str[0] == '0' and len(octet_str) > 1):\n            raise ValueError\n        return octet_int", "docstring": "Convert a decimal octet into an integer.\n\nArgs:\noctet_str: A string, the number to parse.\n\nReturns:\nThe octet as an integer.\n\nRaises:\nValueError: if the octet isn't strictly a decimal from [0..255].", "source": "juraj-google-style"}
{"code": "def ashrae_clear_sky(altitudes, month, sky_clearness=1):\n    MONTHLY_A = [1202, 1187, 1164, 1130, 1106, 1092, 1093, 1107, 1136, 1166, 1190, 1204]\n    MONTHLY_B = [0.141, 0.142, 0.149, 0.164, 0.177, 0.185, 0.186, 0.182, 0.165, 0.152, 0.144, 0.141]\n    dir_norm_rad = []\n    dif_horiz_rad = []\n    for (i, alt) in enumerate(altitudes):\n        if (alt > 0):\n            try:\n                dir_norm = (MONTHLY_A[(month - 1)] / math.exp((MONTHLY_B[(month - 1)] / math.sin(math.radians(alt)))))\n                diff_horiz = ((0.17 * dir_norm) * math.sin(math.radians(alt)))\n                dir_norm_rad.append((dir_norm * sky_clearness))\n                dif_horiz_rad.append((diff_horiz * sky_clearness))\n            except OverflowError:\n                dir_norm_rad.append(0)\n                dif_horiz_rad.append(0)\n        else:\n            dir_norm_rad.append(0)\n            dif_horiz_rad.append(0)\n    return (dir_norm_rad, dif_horiz_rad)", "docstring": "Calculate solar flux for an original ASHRAE Clear Sky\n\nArgs:\naltitudes: A list of solar altitudes in degrees\nmonth: An integer (1-12) indicating the month the altitudes belong to\nsky_clearness: A factor that will be multiplied by the output of\nthe model. This is to help account for locations where clear,\ndry skies predominate (e.g., at high elevations) or,\nconversely, where hazy and humid conditions are frequent. See\nThrelkeld and Jordan (1958) for recommended values. Typical\nvalues range from 0.95 to 1.05 and are usually never more\nthan 1.2. Default is set to 1.0.\n\nReturns:\ndir_norm_rad: A list of direct normal radiation values for each\nof the connected altitudes in W/m2.\ndif_horiz_rad: A list of diffuse horizontall radiation values for each\nof the connected altitudes in W/m2.", "source": "codesearchnet"}
{"code": "async def delete(self, service_id: str) -> bool:\n        \n\n        await self.docker._query(\n            \"services/{service_id}\".format(service_id=service_id), method=\"DELETE\"\n        )\n        return True", "docstring": "Remove a service\n\nArgs:\nservice_id: ID or name of the service\n\nReturns:\nTrue if successful", "source": "juraj-google-style"}
{"code": "def connect_output(self, node):\n    if (len(self.outputs) == self.max_outputs):\n        raise TooManyOutputsError('Attempted to connect too many nodes to the output of a node', max_outputs=self.max_outputs, stream=self.stream)\n    self.outputs.append(node)", "docstring": "Connect another node to our output.\n\nThis downstream node will automatically be triggered when we update\nour output.\n\nArgs:\nnode (SGNode): The node that should receive our output", "source": "codesearchnet"}
{"code": "def validate(self):\n    if (not isinstance(self.enum, enumeration.EnumMeta)):\n        raise TypeError('enumeration type {0} must be of type EnumMeta'.format(self.enum))\n    if (self.value is not None):\n        if (not isinstance(self.value, self.enum)):\n            raise TypeError('enumeration {0} must be of type {1}'.format(self.value, self.enum))\n        if (type(self.value.value) not in six.integer_types):\n            raise TypeError('enumeration value must be an int')\n        elif (self.value.value > Enumeration.MAX):\n            raise ValueError('enumeration value greater than accepted max')\n        elif (self.value.value < Enumeration.MIN):\n            raise ValueError('enumeration value less than accepted min')", "docstring": "Verify that the value of the Enumeration is valid.\n\nRaises:\nTypeError: if the enum is not of type Enum\nValueError: if the value is not of the expected Enum subtype or if\nthe value cannot be represented by an unsigned 32-bit integer", "source": "codesearchnet"}
{"code": "def delete(self, key):\n    \n    self._cur_batch.delete(key)\n    self._num_mutations += 1\n    if self._num_mutations >= MAX_MUTATIONS_IN_BATCH:\n      self.commit()\n      self.begin()", "docstring": "Adds deletion of the entity with given key to the mutation buffer.\n\nIf mutation buffer reaches its capacity then this method commit all pending\nmutations from the buffer and emties it.\n\nArgs:\nkey: key of the entity which should be deleted", "source": "juraj-google-style"}
{"code": "def get_step_by_name(self, name):\n    self._validate_step_name(name)\n    name = str(name)\n    try:\n        return self.all_upstream_steps[name]\n    except KeyError as e:\n        msg = 'No Step with name \"{}\" found. You have following Steps: {}'.format(name, list(self.all_upstream_steps.keys()))\n        raise StepError(msg) from e", "docstring": "Extracts step by name from the pipeline.\n\nExtracted Step is a fully functional pipeline as well.\nAll upstream Steps are already defined.\n\nArgs:\nname (str): name of the step to be fetched\nReturns:\nStep (obj): extracted step", "source": "codesearchnet"}
{"code": "def fromRaw(cls, skype=None, raw={}):\n    return cls(skype, raw, **cls.rawToFields(raw))", "docstring": "Create a new instance based on the raw properties of an API response.\n\nThis can be overridden to automatically create subclass instances based on the raw content.\n\nArgs:\nskype (Skype): parent Skype instance\nraw (dict): raw object, as provided by the API\n\nReturns:\nSkypeObj: the new class instance", "source": "codesearchnet"}
{"code": "def if_callable_call_with_formatted_string(callback, formattable_string, *args):\n    try:\n        formatted_string = formattable_string.format(*args)\n    except IndexError:\n        raise ValueError('Mismatch metween amount of insertion points in the formattable string\\nand the amount of args given.')\n    if callable(callback):\n        callback(formatted_string)", "docstring": "If the callback is callable, format the string with the args and make a call.\nOtherwise, do nothing.\n\nArgs:\ncallback (function): May or may not be callable.\nformattable_string (str): A string with '{}'s inserted.\n*args: A variable amount of arguments for the string formatting. Must correspond to the\namount of '{}'s in 'formattable_string'.\nRaises:\nValueError", "source": "codesearchnet"}
{"code": "def np2str(value):\n    if (hasattr(value, 'dtype') and issubclass(value.dtype.type, (np.string_, np.object_)) and (value.size == 1)):\n        value = np.asscalar(value)\n        if (not isinstance(value, str)):\n            value = value.decode()\n        return value\n    else:\n        raise ValueError('Array is not a string type or is larger than 1')", "docstring": "Convert an `numpy.string_` to str.\n\nArgs:\nvalue (ndarray): scalar or 1-element numpy array to convert\n\nRaises:\nValueError: if value is array larger than 1-element or it is not of\ntype `numpy.string_` or it is not a numpy array", "source": "codesearchnet"}
{"code": "def shape_internal(input, name=None, optimize=True, out_type=None):\n    with ops.name_scope(name, 'Shape', [input]) as name:\n        if isinstance(input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n            if not out_type:\n                out_type = dtypes.int32\n            return gen_math_ops.cast(input.dense_shape, out_type)\n        else:\n            if not context.executing_eagerly():\n                input = ops.convert_to_tensor(input)\n                input_shape = input.get_shape()\n                if optimize and input_shape.is_fully_defined():\n                    if not out_type:\n                        return constant_op._tensor_shape_tensor_conversion_function(input_shape)\n                    return constant(input_shape.as_list(), out_type, name=name)\n            if not out_type:\n                out_type = dtypes.int32\n            return gen_array_ops.shape(input, name=name, out_type=out_type)", "docstring": "Returns the shape of a tensor.\n\nIf `out_type` is not specified and the shape is fully known, then we look at\nthe dimension values to determine whether to return an int32 or int64 tensor.\nIf the shape is not fully known, we default to int32.\n\nArgs:\ninput: A `Tensor` or `SparseTensor`.\nname: A name for the operation (optional).\noptimize: if true, encode the shape as a constant when possible.\nout_type: (Optional) The specified output type of the operation (`int32` or\n`int64`). Defaults to tf.int32.\n\nReturns:\nA `Tensor` of type `out_type`.", "source": "github-repos"}
{"code": "def update(self, scope, at=0):\n        \n        if hasattr(scope, '_mixins') and not at:\n            self._mixins.update(scope._mixins)\n        self[at]['__variables__'].update(scope[at]['__variables__'])\n        self[at]['__blocks__'].extend(scope[at]['__blocks__'])\n        self[at]['__names__'].extend(scope[at]['__names__'])", "docstring": "Update scope. Add another scope to this one.\nArgs:\nscope (Scope): Scope object\nKwargs:\nat (int): Level to update", "source": "juraj-google-style"}
{"code": "def click_nowait(self, pattern, action='click', desc=None, **match_kwargs):\n        \n        point = self.match(pattern, **match_kwargs)\n        if not point or not point.matched:\n            return None\n\n        func = getattr(self, action)\n        func(*point.pos)\n        return point", "docstring": "Return immediately if no image found\n\nArgs:\n- pattern (str or Pattern): filename or an opencv image object.\n- action (str): click or long_click\n\nReturns:\nClick point or None", "source": "juraj-google-style"}
{"code": "def guess_peb_size(path):\n    file_offset = 0\n    offsets = []\n    f = open(path, 'rb')\n    f.seek(0, 2)\n    file_size = (f.tell() + 1)\n    f.seek(0)\n    for _ in range(0, file_size, FILE_CHUNK_SZ):\n        buf = f.read(FILE_CHUNK_SZ)\n        for m in re.finditer(UBI_EC_HDR_MAGIC, buf):\n            start = m.start()\n            if (not file_offset):\n                file_offset = start\n                idx = start\n            else:\n                idx = (start + file_offset)\n            offsets.append(idx)\n        file_offset += FILE_CHUNK_SZ\n    f.close()\n    occurances = {}\n    for i in range(0, len(offsets)):\n        try:\n            diff = (offsets[i] - offsets[(i - 1)])\n        except:\n            diff = offsets[i]\n        if (diff not in occurances):\n            occurances[diff] = 0\n        occurances[diff] += 1\n    most_frequent = 0\n    block_size = None\n    for offset in occurances:\n        if (occurances[offset] > most_frequent):\n            most_frequent = occurances[offset]\n            block_size = offset\n    return block_size", "docstring": "Determine the most likely block size\n\nArguments:\nStr:path    -- Path to file.\n\nReturns:\nInt         -- PEB size.\n\nSearches file for Magic Number, picks most\ncommon length between them.", "source": "codesearchnet"}
{"code": "def set_raw_datadir(self, directory=None):\n        \n\n        if directory is None:\n            self.logger.info(\"no directory name given\")\n            return\n        if not os.path.isdir(directory):\n            self.logger.info(directory)\n            self.logger.info(\"directory does not exist\")\n            return\n        self.raw_datadir = directory", "docstring": "Set the directory containing .res-files.\n\nUsed for setting directory for looking for res-files.@\nA valid directory name is required.\n\nArgs:\ndirectory (str): path to res-directory\n\nExample:\n>>> d = CellpyData()\n>>> directory = \"MyData/Arbindata\"\n>>> d.set_raw_datadir(directory)", "source": "juraj-google-style"}
{"code": "def AddSymbolicLink(self, path, linked_path):\n    \n    if self.file_system.FileEntryExistsByPath(path):\n      raise ValueError('Path: {0:s} already set.'.format(path))\n\n    self._AddParentDirectories(path)\n    self.file_system.AddFileEntry(\n        path, file_entry_type=definitions.FILE_ENTRY_TYPE_LINK,\n        link_data=linked_path)", "docstring": "Adds a symbolic link to the fake file system.\n\nArgs:\npath (str): path of the symbolic link within the fake file system.\nlinked_path (str): path that is linked.\n\nRaises:\nValueError: if the path is already set.", "source": "juraj-google-style"}
{"code": "def equals(self, actual_seq):\n    try:\n        expected = dict([(element, None) for element in self._expected_seq])\n        actual = dict([(element, None) for element in actual_seq])\n    except TypeError:\n        expected = list(self._expected_seq)\n        actual = list(actual_seq)\n        expected.sort()\n        actual.sort()\n    return (expected == actual)", "docstring": "Check to see whether actual_seq has same elements as expected_seq.\n\nArgs:\nactual_seq: sequence\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def add_mount_point(self, path, total_size=None):\n        \n        path = self.absnormpath(path)\n        if path in self.mount_points:\n            self.raise_os_error(errno.EEXIST, path)\n        self._last_dev += 1\n        self.mount_points[path] = {\n            'idev': self._last_dev, 'total_size': total_size, 'used_size': 0\n        }\n        \n        root_dir = (self.root if path == self.root.name\n                    else self.create_dir(path))\n        root_dir.st_dev = self._last_dev\n        return self.mount_points[path]", "docstring": "Add a new mount point for a filesystem device.\nThe mount point gets a new unique device number.\n\nArgs:\npath: The root path for the new mount path.\n\ntotal_size: The new total size of the added filesystem device\nin bytes. Defaults to infinite size.\n\nReturns:\nThe newly created mount point dict.\n\nRaises:\nOSError: if trying to mount an existing mount point again.", "source": "juraj-google-style"}
{"code": "def _process_path_prefix(path_prefix):\n    _validate_path(path_prefix)\n    if (not _GCS_PATH_PREFIX_REGEX.match(path_prefix)):\n        raise ValueError(('Path prefix should have format /bucket, /bucket/, or /bucket/prefix but got %s.' % path_prefix))\n    bucket_name_end = path_prefix.find('/', 1)\n    bucket = path_prefix\n    prefix = None\n    if (bucket_name_end != (- 1)):\n        bucket = path_prefix[:bucket_name_end]\n        prefix = (path_prefix[(bucket_name_end + 1):] or None)\n    return (bucket, prefix)", "docstring": "Validate and process a Google Cloud Stoarge path prefix.\n\nArgs:\npath_prefix: a Google Cloud Storage path prefix of format '/bucket/prefix'\nor '/bucket/' or '/bucket'.\n\nRaises:\nValueError: if path is invalid.\n\nReturns:\na tuple of /bucket and prefix. prefix can be None.", "source": "codesearchnet"}
{"code": "def automatic_gamma_density(structure, kppa):\n        \n\n        latt = structure.lattice\n        lengths = latt.abc\n        ngrid = kppa / structure.num_sites\n\n        mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)\n        num_div = [int(round(mult / l)) for l in lengths]\n\n        \n        num_div = [i if i > 0 else 1 for i in num_div]\n\n        \n        \n        num_div = [i + i % 2 if i <= 8 else i - i % 2 + 1 for i in num_div]\n\n        style = Kpoints.supported_modes.Gamma\n\n        comment = \"pymatgen 4.7.6+ generated KPOINTS with grid density = \" + \\\n                  \"{} / atom\".format(kppa)\n        num_kpts = 0\n        return Kpoints(comment, num_kpts, style, [num_div], [0, 0, 0])", "docstring": "Returns an automatic Kpoint object based on a structure and a kpoint\ndensity. Uses Gamma centered meshes always. For GW.\n\nAlgorithm:\nUses a simple approach scaling the number of divisions along each\nreciprocal lattice vector proportional to its length.\n\nArgs:\nstructure:\nInput structure\nkppa:\nGrid density", "source": "juraj-google-style"}
{"code": "def parse_bucket_info(domain):\n    \n    match = RGX_BUCKET.match(domain)\n    if match:\n        data = match.groupdict()\n        return data['bucket'], data['region'] or 'us-east-1'", "docstring": "Parse a domain name to gather the bucket name and region for an S3 bucket. Returns a tuple\n(bucket_name, bucket_region) if a valid domain name, else `None`\n\n>>> parse_bucket_info('www.riotgames.com.br.s3-website-us-west-2.amazonaws.com')\n('www.riotgames.com.br', 'us-west-2')\n\nArgs:\ndomain (`str`): Domain name to parse\n\nReturns:\n:obj:`list` of `str`: `str`,`None`", "source": "juraj-google-style"}
{"code": "def register(config_class, slow_tokenizer_class=None, fast_tokenizer_class=None, exist_ok=False):\n    if slow_tokenizer_class is None and fast_tokenizer_class is None:\n        raise ValueError('You need to pass either a `slow_tokenizer_class` or a `fast_tokenizer_class')\n    if slow_tokenizer_class is not None and issubclass(slow_tokenizer_class, PreTrainedTokenizerFast):\n        raise ValueError('You passed a fast tokenizer in the `slow_tokenizer_class`.')\n    if fast_tokenizer_class is not None and issubclass(fast_tokenizer_class, PreTrainedTokenizer):\n        raise ValueError('You passed a slow tokenizer in the `fast_tokenizer_class`.')\n    if slow_tokenizer_class is not None and fast_tokenizer_class is not None and issubclass(fast_tokenizer_class, PreTrainedTokenizerFast) and (fast_tokenizer_class.slow_tokenizer_class != slow_tokenizer_class):\n        raise ValueError(f'The fast tokenizer class you are passing has a `slow_tokenizer_class` attribute that is not consistent with the slow tokenizer class you passed (fast tokenizer has {fast_tokenizer_class.slow_tokenizer_class} and you passed {slow_tokenizer_class}. Fix one of those so they match!')\n    if config_class in TOKENIZER_MAPPING._extra_content:\n        existing_slow, existing_fast = TOKENIZER_MAPPING[config_class]\n        if slow_tokenizer_class is None:\n            slow_tokenizer_class = existing_slow\n        if fast_tokenizer_class is None:\n            fast_tokenizer_class = existing_fast\n    TOKENIZER_MAPPING.register(config_class, (slow_tokenizer_class, fast_tokenizer_class), exist_ok=exist_ok)", "docstring": "Register a new tokenizer in this mapping.\n\n\nArgs:\nconfig_class ([`PretrainedConfig`]):\nThe configuration corresponding to the model to register.\nslow_tokenizer_class ([`PretrainedTokenizer`], *optional*):\nThe slow tokenizer to register.\nfast_tokenizer_class ([`PretrainedTokenizerFast`], *optional*):\nThe fast tokenizer to register.", "source": "github-repos"}
{"code": "def __call__(self, images: ImageInput=None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[InstructBlipProcessorKwargs]) -> BatchFeature:\n    if images is None and text is None:\n        raise ValueError('You have to specify at least images or text.')\n    output_kwargs = self._merge_kwargs(InstructBlipProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)\n    encoding = BatchFeature()\n    if text is not None:\n        if isinstance(text, str):\n            text = [text]\n        elif not isinstance(text, list) and (not isinstance(text[0], str)):\n            raise ValueError('Invalid input text. Please provide a string, or a list of strings')\n        return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None)\n        _text_encoding = self.tokenizer(text, **output_kwargs['text_kwargs'], return_tensors=None)\n        output_kwargs['text_kwargs']['return_tensors'] = return_tensors\n        if self.num_query_tokens is not None and images is not None:\n            text_encoding = {}\n            image_tokens = self.image_token.content * self.num_query_tokens\n            image_token_encoding = self.tokenizer([image_tokens] * len(text), add_special_tokens=False, return_tensors=None)\n            for k in _text_encoding:\n                text_encoding[k] = [img_encoding + txt_encoding for img_encoding, txt_encoding in zip(image_token_encoding[k], _text_encoding[k])]\n        else:\n            text_encoding = _text_encoding\n            if images is not None:\n                logger.warning_once('Expanding inputs for image tokens in InstructBLIP should be done in processing. Please follow instruction here (https:\n        text_encoding = BatchEncoding(text_encoding, tensor_type=return_tensors)\n        encoding.update(text_encoding)\n        qformer_text_encoding = self.qformer_tokenizer(text, **output_kwargs['text_kwargs'])\n        encoding['qformer_input_ids'] = qformer_text_encoding.pop('input_ids')\n        encoding['qformer_attention_mask'] = qformer_text_encoding.pop('attention_mask')\n    if images is not None:\n        image_encoding = self.image_processor(images, **output_kwargs['images_kwargs'])\n        encoding.update(image_encoding)\n    return encoding", "docstring": "This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and\n[`BertTokenizerFast.__call__`] to prepare text for the model.\n\nPlease refer to the docstring of the above two methods for more information.\nArgs:\nimages (`ImageInput`):\nThe image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch\ntensor. Both channels-first and channels-last formats are supported.\ntext (`TextInput`, `PreTokenizedInput`, `List[TextInput]`, `List[PreTokenizedInput]`):\nThe sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings\n(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set\n`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).", "source": "github-repos"}
{"code": "def StopTaskStorage(self, abort=False):\n    \n    if self._storage_type != definitions.STORAGE_TYPE_SESSION:\n      raise IOError('Unsupported storage type.')\n\n    if os.path.isdir(self._merge_task_storage_path):\n      if abort:\n        shutil.rmtree(self._merge_task_storage_path)\n      else:\n        os.rmdir(self._merge_task_storage_path)\n\n    if os.path.isdir(self._processed_task_storage_path):\n      if abort:\n        shutil.rmtree(self._processed_task_storage_path)\n      else:\n        os.rmdir(self._processed_task_storage_path)\n\n    if os.path.isdir(self._task_storage_path):\n      if abort:\n        shutil.rmtree(self._task_storage_path)\n      else:\n        os.rmdir(self._task_storage_path)\n\n    self._merge_task_storage_path = None\n    self._processed_task_storage_path = None\n    self._task_storage_path = None", "docstring": "Removes the temporary path for the task storage.\n\nThe results of tasks will be lost on abort.\n\nArgs:\nabort (bool): True to indicate the stop is issued on abort.\n\nRaises:\nIOError: if the storage type is not supported.\nOSError: if the storage type is not supported.", "source": "juraj-google-style"}
{"code": "def extract_version(exepath, version_arg, word_index=(- 1), version_rank=3):\n    if isinstance(version_arg, basestring):\n        version_arg = [version_arg]\n    args = ([exepath] + version_arg)\n    (stdout, stderr, returncode) = _run_command(args)\n    if returncode:\n        raise RezBindError(('failed to execute %s: %s\\n(error code %d)' % (exepath, stderr, returncode)))\n    stdout = stdout.strip().split('\\n')[0].strip()\n    log((\"extracting version from output: '%s'\" % stdout))\n    try:\n        strver = stdout.split()[word_index]\n        toks = strver.replace('.', ' ').replace('-', ' ').split()\n        strver = '.'.join(toks[:version_rank])\n        version = Version(strver)\n    except Exception as e:\n        raise RezBindError((\"failed to parse version from output '%s': %s\" % (stdout, str(e))))\n    log((\"extracted version: '%s'\" % str(version)))\n    return version", "docstring": "Run an executable and get the program version.\n\nArgs:\nexepath: Filepath to executable.\nversion_arg: Arg to pass to program, eg \"-V\". Can also be a list.\nword_index: Expect the Nth word of output to be the version.\nversion_rank: Cap the version to this many tokens.\n\nReturns:\n`Version` object.", "source": "codesearchnet"}
{"code": "def change_wavelength(self, wavelength):\n        \n        for name, slab in self.slabs.items():\n            const_args = slab._const_args\n            mat_args = slab._mat_params\n\n            const_args[8] = wavelength\n\n            s = Slab(*const_args)\n            for mat_arg in mat_args:\n                s.add_material(*mat_arg)\n\n            self.slabs[name] = s\n\n        self._wl = wavelength", "docstring": "Changes the wavelength of the structure.\n\nThis will affect the mode solver and potentially\nthe refractive indices used (provided functions\nwere provided as refractive indices).\n\nArgs:\nwavelength (float): The new wavelength.", "source": "juraj-google-style"}
{"code": "def remove_file_from_tree(tree, file_path):\n    match = None\n    for item in tree:\n        if (item.get('path') == file_path):\n            match = item\n            break\n    if match:\n        tree.remove(match)\n    return tree", "docstring": "Remove a file from a tree.\n\nArgs:\n\ntree\nA list of dicts containing info about each blob in a tree.\n\nfile_path\nThe path of a file to remove from a tree.\n\nReturns:\nThe provided tree, but with the item matching the specified\nfile_path removed.", "source": "codesearchnet"}
{"code": "def get_folder_details(self, folder):\n    if (not is_valid_uuid(folder)):\n        raise StorageArgumentException('Invalid UUID for folder: {0}'.format(folder))\n    return self._authenticated_request.to_endpoint('folder/{}/'.format(folder)).return_body().get()", "docstring": "Get information on a given folder.\n\nArgs:\nfolder (str): The UUID of the requested folder.\n\nReturns:\nA dictionary of the folder details if found::\n\n{\nu'created_by': u'303447',\nu'created_on': u'2017-03-21T14:06:32.293902Z',\nu'description': u'',\nu'entity_type': u'folder',\nu'modified_by': u'303447',\nu'modified_on': u'2017-03-21T14:06:32.293967Z',\nu'name': u'myfolder',\nu'parent': u'3abd8742-d069-44cf-a66b-2370df74a682',\nu'uuid': u'2516442e-1e26-4de1-8ed8-94523224cc40'\n}\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "codesearchnet"}
{"code": "def intent(method):\n    \n\n    def wrapper(self, *args, **kwargs):\n        try:\n            return method(self, *args, **kwargs)\n        except exceptions.MatrixError as e:\n            if isinstance(e.original_exception,\n                          matrix_client.errors.MatrixRequestError):\n                self._handle_request_exception(e)\n                \n                return method(self, *args, **kwargs)\n            else:\n                raise e\n\n    return wrapper", "docstring": "Helps object methods handle MatrixRequestError.\n\nArgs:\nmethod(function): Object method to be wrapped\n\nMethod's object must have _handle_request_exception method that deals with\nspecific status codes and errcodes.", "source": "juraj-google-style"}
{"code": "def cluster_from_file(filename):\n    atoms_string = Atoms.atoms_string_from_file(filename)\n    line_list = [l.split() for l in atoms_string.splitlines()[3:]]\n    coords = []\n    symbols = []\n    for l in line_list:\n        if l:\n            coords.append([float(i) for i in l[:3]])\n            symbols.append(l[4])\n    return Molecule(symbols, coords)", "docstring": "Parse the feff input file and return the atomic cluster as a Molecule\nobject.\n\nArgs:\nfilename (str): path the feff input file\n\nReturns:\nMolecule: the atomic cluster as Molecule object. The absorbing atom\nis the one at the origin.", "source": "codesearchnet"}
{"code": "def Decrypt(self, encrypted_data):\n    \n    index_split = -(len(encrypted_data) % DES3.block_size)\n    if index_split:\n      remaining_encrypted_data = encrypted_data[index_split:]\n      encrypted_data = encrypted_data[:index_split]\n    else:\n      remaining_encrypted_data = b''\n\n    decrypted_data = self._des3_cipher.decrypt(encrypted_data)\n\n    return decrypted_data, remaining_encrypted_data", "docstring": "Decrypts the encrypted data.\n\nArgs:\nencrypted_data (bytes): encrypted data.\n\nReturns:\ntuple[bytes, bytes]: decrypted data and remaining encrypted data.", "source": "juraj-google-style"}
{"code": "def append_memory_pdf_to_writer(input_pdf: bytes,\n                                writer: PdfFileWriter,\n                                start_recto: bool = True) -> None:\n    \n    if not input_pdf:\n        return\n    if start_recto and writer.getNumPages() % 2 != 0:\n        writer.addBlankPage()\n        \n    infile = io.BytesIO(input_pdf)\n    reader = PdfFileReader(infile)\n    for page_num in range(reader.numPages):\n        writer.addPage(reader.getPage(page_num))", "docstring": "Appends a PDF (as bytes in memory) to a PyPDF2 writer.\n\nArgs:\ninput_pdf: the PDF, as ``bytes``\nwriter: the writer\nstart_recto: start a new right-hand page?", "source": "juraj-google-style"}
{"code": "def _add_ragged_partition(values, partition, tensor_dict, row_splits_dtype, validate):\n    if isinstance(partition, RaggedFeature.UniformRowLength):\n        if isinstance(values, ragged_tensor.RaggedTensor):\n            length = ops.convert_to_tensor(partition.length, dtype=row_splits_dtype)\n            return ragged_tensor.RaggedTensor.from_uniform_row_length(values, length, validate=validate)\n        else:\n            return array_ops.reshape(values, array_ops.concat([[-1, partition.length], array_ops.shape(values)[1:]], axis=0))\n    else:\n        partition_t = math_ops.cast(tensor_dict[partition.key], row_splits_dtype)\n        if isinstance(partition, RaggedFeature.RowSplits):\n            return ragged_tensor.RaggedTensor.from_row_splits(values, partition_t, validate=validate)\n        elif isinstance(partition, RaggedFeature.RowLengths):\n            return ragged_tensor.RaggedTensor.from_row_lengths(values, partition_t, validate=validate)\n        elif isinstance(partition, RaggedFeature.RowStarts):\n            return ragged_tensor.RaggedTensor.from_row_starts(values, partition_t, validate=validate)\n        elif isinstance(partition, RaggedFeature.RowLimits):\n            return ragged_tensor.RaggedTensor.from_row_limits(values, partition_t, validate=validate)\n        elif isinstance(partition, RaggedFeature.ValueRowIds):\n            return ragged_tensor.RaggedTensor.from_value_rowids(values, partition_t, validate=validate)\n        raise ValueError(f'Unhandled partition type {partition!r}')", "docstring": "Creates a RaggedTensor from a values tensor and a partition tensor.\n\nArgs:\nvalues: The values tensor for the new RaggedTensor.\npartition: The partition configuration object.  Specifies the key that\nshould be used to look up the partition tensor (unless partition is a\nRaggedFeature.UniformRowLength, in which case there is no partition\ntensor).\ntensor_dict: The dictionary mapping keys to tensors.\nrow_splits_dtype: The dtype for the partition tensor.\nvalidate: Whether to validate that the values form a valid RaggedTensor.\n\nReturns:\nA new RaggedTensor formed from the values and partition tensors.", "source": "github-repos"}
{"code": "def constraint(self):\n    raise NotImplementedError", "docstring": "Returns the constraint function associated with this variable.\n\nReturns:\nThe constraint function that was passed to the variable constructor.\nCan be `None` if no constraint was passed.", "source": "github-repos"}
{"code": "def parse_docs(docs, marks):\n    if (docs is None):\n        return {}\n    indexs = []\n    for mark in marks:\n        i = docs.find(mark)\n        if (i >= 0):\n            indexs.append(i)\n    if (not indexs):\n        return {'$desc': textwrap.dedent(docs).strip()}\n    start = min(indexs)\n    start = docs.rfind('\\n', 0, start)\n    yamltext = textwrap.dedent(docs[(start + 1):])\n    meta = yaml.load(yamltext)\n    meta['$desc'] = textwrap.dedent(docs[:start]).strip()\n    return meta", "docstring": "Parse YAML syntax content from docs\n\nIf docs is None, return {}\nIf docs has no YAML content, return {\"$desc\": docs}\nElse, parse YAML content, return {\"$desc\": docs, YAML}\n\nArgs:\ndocs (str): docs to be parsed\nmarks (list): list of which indicate YAML content starts\nReturns:\nA dict contains information of docs", "source": "codesearchnet"}
{"code": "def state(self):\n    try:\n        return libvirt_utils.Domain.resolve_state(self.raw_state())\n    except vm_plugin.LagoVMDoesNotExistError:\n        return 'down'\n    except vm_plugin.LagoFailedToGetVMStateError:\n        return 'failed to get state'\n    except KeyError:\n        return 'unknown state'", "docstring": "Return a small description of the current status of the domain\n\nReturns:\nstr: small description of the domain status, 'down' if it's not\nfound at all.", "source": "codesearchnet"}
{"code": "def histogram(self, tag, values, bins, step=None):\n    \n    if step is None:\n      step = self._step\n    else:\n      self._step = step\n    values = onp.array(values)\n    bins = onp.array(bins)\n    values = onp.reshape(values, -1)\n    counts, limits = onp.histogram(values, bins=bins)\n    \n    cum_counts = onp.cumsum(onp.greater(counts, 0, dtype=onp.int32))\n    start, end = onp.searchsorted(\n        cum_counts, [0, cum_counts[-1] - 1], side='right')\n    start, end = int(start), int(end) + 1\n    counts = (\n        counts[start -\n               1:end] if start > 0 else onp.concatenate([[0], counts[:end]]))\n    limits = limits[start:end + 1]\n    sum_sq = values.dot(values)\n    histo = HistogramProto(\n        min=values.min(),\n        max=values.max(),\n        num=len(values),\n        sum=values.sum(),\n        sum_squares=sum_sq,\n        bucket_limit=limits.tolist(),\n        bucket=counts.tolist())\n    summary = Summary(value=[Summary.Value(tag=tag, histo=histo)])\n    self.add_summary(summary, step)", "docstring": "Saves histogram of values.\n\nArgs:\ntag: str: label for this data\nvalues: ndarray: will be flattened by this routine\nbins: number of bins in histogram, or array of bins for onp.histogram\nstep: int: training step", "source": "juraj-google-style"}
{"code": "def color_palette_dict(self, alpha=0.35):\n        \n\n        color_dict = {}\n        for hkl in self.all_slab_entries.keys():\n            rgb_indices = [0, 1, 2]\n            color = [0, 0, 0, 1]\n            random.shuffle(rgb_indices)\n            for i, ind in enumerate(rgb_indices):\n                if i == 2:\n                    break\n                color[ind] = np.random.uniform(0, 1)\n\n            \n            clean_list = np.linspace(0, 1, len(self.all_slab_entries[hkl]))\n            for i, clean in enumerate(self.all_slab_entries[hkl].keys()):\n                c = copy.copy(color)\n                c[rgb_indices[2]] = clean_list[i]\n                color_dict[clean] = c\n\n                \n                for ads_entry in self.all_slab_entries[hkl][clean]:\n                    c_ads = copy.copy(c)\n                    c_ads[3] = alpha\n                    color_dict[ads_entry] = c_ads\n\n        return color_dict", "docstring": "Helper function to assign each facet a unique color using a dictionary.\n\nArgs:\nalpha (float): Degree of transparency\n\nreturn (dict): Dictionary of colors (r,g,b,a) when plotting surface\nenergy stability. The keys are individual surface entries where\nclean surfaces have a solid color while the corresponding adsorbed\nsurface will be transparent.", "source": "juraj-google-style"}
{"code": "def load_spacy_rule(file_path: str) -> Dict:\n        \n        with open(file_path) as fp:\n            return json.load(fp)", "docstring": "A spacy rule file is a json file.\n\nArgs:\nfile_path (str): path to a text file containing a spacy rule sets.\n\nReturns: Dict as the representation of spacy rules", "source": "juraj-google-style"}
{"code": "def _validate_tensor_info(self, tensor_info):\n    if tensor_info is None:\n        raise AssertionError('All TensorInfo protos used in the SignatureDefs must have the name and dtype fields set.')\n    if tensor_info.WhichOneof('encoding') is None:\n        raise AssertionError(f\"Invalid `tensor_info`: {tensor_info}. All TensorInfo protos used in the SignatureDefs must have one of the 'encoding' fields (e.g., name or coo_sparse) set.\")\n    if tensor_info.WhichOneof('encoding') == 'composite_tensor':\n        for component in tensor_info.composite_tensor.components:\n            self._validate_tensor_info(component)\n    elif tensor_info.dtype == types_pb2.DT_INVALID:\n        raise AssertionError(f'Invalid `tensor_info`: {tensor_info}. All TensorInfo protos used in the SignatureDefs must have the dtype field set.')", "docstring": "Validates the `TensorInfo` proto.\n\nChecks if the `encoding` (`name` or `coo_sparse` or `type_spec`) and\n`dtype` fields exist and are non-empty.\n\nArgs:\ntensor_info: `TensorInfo` protocol buffer to validate.\n\nRaises:\nAssertionError: If the `encoding` or `dtype` fields of the supplied\n`TensorInfo` proto are not populated.", "source": "github-repos"}
{"code": "def list(self):\n    import IPython\n    data = [{'name': version['name'].split()[(- 1)], 'deploymentUri': version['deploymentUri'], 'createTime': version['createTime']} for version in self.get_iterator()]\n    IPython.display.display(datalab.utils.commands.render_dictionary(data, ['name', 'deploymentUri', 'createTime']))", "docstring": "List versions under the current model in a table view.\n\nRaises:\nException if it is called in a non-IPython environment.", "source": "codesearchnet"}
{"code": "def Audio(self, run, tag):\n    accumulator = self.GetAccumulator(run)\n    return accumulator.Audio(tag)", "docstring": "Retrieve the audio events associated with a run and tag.\n\nArgs:\nrun: A string name of the run for which values are retrieved.\ntag: A string name of the tag for which values are retrieved.\n\nRaises:\nKeyError: If the run is not found, or the tag is not available for\nthe given run.\n\nReturns:\nAn array of `event_accumulator.AudioEvents`.", "source": "codesearchnet"}
{"code": "def __init__(self, name: Union[str, bytes], bound_context: context.Context, function_type: function_type_lib.FunctionType, children: Optional[List['AtomicFunction']]=None, call_options: CallOptions=CallOptions(), cached_graph: Optional[func_graph_module.FuncGraph]=None):\n    self._name = compat.as_bytes(name)\n    self._bound_context = bound_context\n    self._function_type = function_type\n    self._children = children if children else []\n    self._call_options = call_options\n    self._cached_definition = None\n    self._cached_graph = cached_graph\n    self._generated_graph = None\n    ref_key = (self._bound_context.function_scope_id, self.name)\n    if ref_key not in RUNTIME_FUNCTION_REFS:\n        RUNTIME_FUNCTION_REFS[ref_key] = 1\n    else:\n        RUNTIME_FUNCTION_REFS[ref_key] += 1", "docstring": "Construct a new AtomicFunction.\n\nArgs:\nname: str/bytes name of the runtime function in the bound context.\nbound_context: interface to the runtime for the AtomicFunction.\nfunction_type: input/output contract for the AtomicFunction\nchildren: list of AtomicFunctions that are needed to call this one.\ncall_options: extra configuration options for the call.\ncached_graph: FuncGraph that this AtomicFunction was generated from (if\nknown). Otherwise it will lazily construct a new corresponding FuncGraph\nif ever needed.", "source": "github-repos"}
{"code": "def persist_as_png(structure_dict, filepath):\n    graph = _create_graph(structure_dict)\n    graph.write(filepath, format='png')", "docstring": "Saves pipeline diagram to disk as png file.\n\nArgs:\nstructure_dict (dict): dict returned by\n:func:`~steppy.base.Step.upstream_structure`\nfilepath (str): filepath to which the png with pipeline visualization should be persisted", "source": "codesearchnet"}
{"code": "def close_position(self, repay_only):\n    params = {'repay_only': repay_only}\n    return self._send_message('post', '/position/close', data=json.dumps(params))", "docstring": "Close position.\n\nArgs:\nrepay_only (bool): Undocumented by cbpro.\n\nReturns:\nUndocumented", "source": "codesearchnet"}
{"code": "def load_b26_file(file_name):\n    assert os.path.exists(file_name)\n    with open(file_name, 'r') as infile:\n        data = yaml.safe_load(infile)\n    return data", "docstring": "loads a .b26 file into a dictionary\n\nArgs:\nfile_name:\n\nReturns: dictionary with keys instrument, scripts, probes", "source": "codesearchnet"}
{"code": "def assign(self, value, use_locking=False, name=None, read_value=True):\n    raise NotImplementedError", "docstring": "Assigns a new value to the variable.\n\nThis is essentially a shortcut for `assign(self, value)`.\n\nArgs:\nvalue: A `Tensor`. The new value for this variable.\nuse_locking: If `True`, use locking during the assignment.\nname: The name of the operation to be created\nread_value: if True, will return something which evaluates to the new\nvalue of the variable; if False will return the assign op.\n\nReturns:\nThe updated variable. If `read_value` is false, instead returns None in\nEager mode and the assign op in graph mode.", "source": "github-repos"}
{"code": "def reset_dtensor_device(is_async: bool) -> None:\n    global _dtensor_singleton\n    device = dtensor_device.DTensorDevice(meshes=[], is_async=is_async)\n    _dtensor_singleton = device", "docstring": "Resets the Eager execution device for DTensor.\n\nThis function is only intended for testing and diagnostics.\n\nArgs:\nis_async: If True, the device uses async execution.", "source": "github-repos"}
{"code": "def ping(self, timeout=12):\n    self.conn('POST', '{0}/users/ME/endpoints/{1}/active'.format(self.conn.msgsHost, self.id), auth=SkypeConnection.Auth.RegToken, json={'timeout': timeout})", "docstring": "Send a keep-alive request for the endpoint.\n\nArgs:\ntimeout (int): maximum amount of time for the endpoint to stay active", "source": "codesearchnet"}
{"code": "def list_container_services_sub(access_token, subscription_id):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/providers/Microsoft.ContainerService/ContainerServices',\n                        '?api-version=', ACS_API])\n    return do_get(endpoint, access_token)", "docstring": "List the container services in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON model.", "source": "juraj-google-style"}
{"code": "def save_scatter_table(self, fn, description=''):\n    data = {'description': description, 'time': datetime.now(), 'psd_scatter': (self.num_points, self.D_max, self._psd_D, self._S_table, self._Z_table, self._angular_table, self._m_table, self.geometries), 'version': tmatrix_aux.VERSION}\n    pickle.dump(data, file(fn, 'w'), pickle.HIGHEST_PROTOCOL)", "docstring": "Save the scattering lookup tables.\n\nSave the state of the scattering lookup tables to a file.\nThis can be loaded later with load_scatter_table.\n\nOther variables will not be saved, but this does not matter because\nthe results of the computations are based only on the contents\nof the table.\n\nArgs:\nfn: The name of the scattering table file.\ndescription (optional): A description of the table.", "source": "codesearchnet"}
{"code": "def filter_by_doys(self, doys):\n        \n        _filt_values = []\n        _filt_datetimes = []\n        for i, d in enumerate(self.datetimes):\n            if d in doys:\n                _filt_datetimes.append(d)\n                _filt_values.append(self._values[i])\n        _filt_header = self.header.duplicate()\n        return DailyCollection(_filt_header, _filt_values, _filt_datetimes)", "docstring": "Filter the Data Collection based on a list of days of the year (as integers).\n\nArgs:\ndoys: A List of days of the year [1..365]\n\nReturn:\nA new Data Collection with filtered data", "source": "juraj-google-style"}
{"code": "def set_sleep_timer(self, sleep_time_seconds):\n    try:\n        if (sleep_time_seconds is None):\n            sleep_time = ''\n        else:\n            sleep_time = format(datetime.timedelta(seconds=int(sleep_time_seconds)))\n        self.avTransport.ConfigureSleepTimer([('InstanceID', 0), ('NewSleepTimerDuration', sleep_time)])\n    except SoCoUPnPException as err:\n        if ('Error 402 received' in str(err)):\n            raise ValueError('invalid sleep_time_seconds, must be integer                     value between 0 and 86399 inclusive or None')\n        raise\n    except ValueError:\n        raise ValueError('invalid sleep_time_seconds, must be integer                 value between 0 and 86399 inclusive or None')", "docstring": "Sets the sleep timer.\n\nArgs:\nsleep_time_seconds (int or NoneType): How long to wait before\nturning off speaker in seconds, None to cancel a sleep timer.\nMaximum value of 86399\n\nRaises:\nSoCoException: Upon errors interacting with Sonos controller\nValueError: Argument/Syntax errors", "source": "codesearchnet"}
{"code": "def is_remote_added(remote):\n    out = __salt__['cmd.run_all']((FLATPAK_BINARY_NAME + ' remotes'))\n    lines = out.splitlines()\n    for item in lines:\n        i = re.split('\\\\t+', item.rstrip('\\t'))\n        if (i[0] == remote):\n            return True\n    return False", "docstring": "Determines if a remote exists.\n\nArgs:\nremote (str): The remote's name.\n\nReturns:\nbool: True if the remote has already been added.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' flatpak.is_remote_added flathub", "source": "codesearchnet"}
{"code": "def get_output_mask_at(self, node_index):\n    output = self.get_output_at(node_index)\n    if isinstance(output, list):\n        return [getattr(x, '_keras_mask', None) for x in output]\n    else:\n        return getattr(output, '_keras_mask', None)", "docstring": "Retrieves the output mask tensor(s) of a layer at a given node.\n\nArgs:\nnode_index: Integer, index of the node\nfrom which to retrieve the attribute.\nE.g. `node_index=0` will correspond to the\nfirst time the layer was called.\n\nReturns:\nA mask tensor\n(or list of tensors if the layer has multiple outputs).", "source": "github-repos"}
{"code": "def register_access_db(fullfilename: str, dsn: str, description: str) -> bool:\n    \n    directory = os.path.dirname(fullfilename)\n    return create_sys_dsn(\n        access_driver,\n        SERVER=\"\",\n        DESCRIPTION=description,\n        DSN=dsn,\n        DBQ=fullfilename,\n        DefaultDir=directory\n    )", "docstring": "(Windows only.)\nRegisters a Microsoft Access database with ODBC.\n\nArgs:\nfullfilename: filename of the existing database\ndsn: ODBC data source name to create\ndescription: description of the database\n\nReturns:\nbool: was the DSN created?", "source": "juraj-google-style"}
{"code": "def get_data_files_path():\n    return _os.path.dirname(_inspect.getfile(_sys._getframe(1)))", "docstring": "Get a direct path to the data files colocated with the script.\n\nReturns:\nThe directory where files specified in data attribute of py_test\nand py_binary are stored.", "source": "github-repos"}
{"code": "def FixValue(value):\n    if value.startswith('\"') and value.endswith('\"') or (value.startswith(\"'\") and value.endswith(\"'\")):\n        value = value[1:-1]\n    try:\n        value = int(value)\n    except ValueError:\n        try:\n            value = float(value)\n        except ValueError:\n            return value\n    return value", "docstring": "Helper function to fix values loaded from a config file.\n\nCurrently we strip bracketed quotes as well as convert numbers to\nfloats for configuration parameters expecting numerical data types.\n\nArgs:\nvalue: value to be converted\n\nReturns:\nfixed value", "source": "github-repos"}
{"code": "def run_step(self, context):\n    logger.debug('starting')\n    self.set_step_input_context(context)\n    if self.while_decorator:\n        self.while_decorator.while_loop(context, self.run_foreach_or_conditional)\n    else:\n        self.run_foreach_or_conditional(context)\n    logger.debug('done')", "docstring": "Run a single pipeline step.\n\nArgs:\ncontext: (pypyr.context.Context) The pypyr context. This arg will\nmutate.", "source": "codesearchnet"}
{"code": "def verify_certs_chain(certs_chain: List[crypto.X509], amazon_cert: crypto.X509) -> bool:\n    \n    store = crypto.X509Store()\n\n    \n    for cert in certs_chain:\n        store.add_cert(cert)\n\n    \n    default_verify_paths = ssl.get_default_verify_paths()\n\n    default_verify_file = default_verify_paths.cafile\n    default_verify_file = Path(default_verify_file).resolve() if default_verify_file else None\n\n    default_verify_path = default_verify_paths.capath\n    default_verify_path = Path(default_verify_path).resolve() if default_verify_path else None\n\n    ca_files = [ca_file for ca_file in default_verify_path.iterdir()] if default_verify_path else []\n    if default_verify_file:\n        ca_files.append(default_verify_file)\n\n    for ca_file in ca_files:\n        ca_file: Path\n        if ca_file.is_file():\n            with ca_file.open('r', encoding='ascii') as crt_f:\n                ca_certs_txt = crt_f.read()\n                ca_certs = extract_certs(ca_certs_txt)\n                for cert in ca_certs:\n                    store.add_cert(cert)\n\n    \n    ssl_context = ssl.create_default_context()\n    der_certs = ssl_context.get_ca_certs(binary_form=True)\n    pem_certs = '\\n'.join([ssl.DER_cert_to_PEM_cert(der_cert) for der_cert in der_certs])\n    ca_certs = extract_certs(pem_certs)\n    for ca_cert in ca_certs:\n        store.add_cert(ca_cert)\n\n    store_context = crypto.X509StoreContext(store, amazon_cert)\n\n    try:\n        store_context.verify_certificate()\n        result = True\n    except crypto.X509StoreContextError:\n        result = False\n\n    return result", "docstring": "Verifies if Amazon and additional certificates creates chain of trust to a root CA.\n\nArgs:\ncerts_chain: List of pycrypto X509 intermediate certificates from signature chain URL.\namazon_cert: Pycrypto X509 Amazon certificate.\n\nReturns:\nresult: True if verification was successful, False if not.", "source": "juraj-google-style"}
{"code": "def solution(swarm):\n    \n    best = swarm[0]\n    cmp = comparator(best.best_fitness)\n    for particle in swarm:\n        if cmp(particle.best_fitness, best.best_fitness):\n            best = particle\n    return best", "docstring": "Determines the global best particle in the swarm.\n\nArgs:\nswarm: iterable: an iterable that yields all particles in the swarm.\n\nReturns:\ncipy.algorithms.pso.Particle: The best particle in the swarm when\ncomparing the best_fitness values of the particles.", "source": "juraj-google-style"}
{"code": "def is_native_xmon_op(op: ops.Operation) -> bool:\n    return (isinstance(op, ops.GateOperation) and is_native_xmon_gate(op.gate))", "docstring": "Check if the gate corresponding to an operation is a native xmon gate.\n\nArgs:\nop: Input operation.\n\nReturns:\nTrue if the operation is native to the xmon, false otherwise.", "source": "codesearchnet"}
{"code": "def get_img_shape(img):\n    \n    if isinstance(img, np.ndarray):\n        shape = img.shape\n    else:\n        shape = K.int_shape(img)\n\n    if K.image_data_format() == 'channels_last':\n        shape = list(shape)\n        shape.insert(1, shape[-1])\n        shape = tuple(shape[:-1])\n    return shape", "docstring": "Returns image shape in a backend agnostic manner.\n\nArgs:\nimg: An image tensor of shape: `(channels, image_dims...)` if data_format='channels_first' or\n`(image_dims..., channels)` if data_format='channels_last'.\n\nReturns:\nTuple containing image shape information in `(samples, channels, image_dims...)` order.", "source": "juraj-google-style"}
{"code": "def verify_sans(amazon_cert: crypto.X509) -> bool:\n    \n    cert_extentions = [amazon_cert.get_extension(i) for i in range(amazon_cert.get_extension_count())]\n    subject_alt_names = ''\n\n    for extention in cert_extentions:\n        if 'subjectAltName' in str(extention.get_short_name()):\n            subject_alt_names = extention.__str__()\n            break\n\n    result = 'echo-api.amazon.com' in subject_alt_names\n\n    return result", "docstring": "Verifies Subject Alternative Names (SANs) for Amazon certificate.\n\nArgs:\namazon_cert: Pycrypto X509 Amazon certificate.\n\nReturns:\nresult: True if verification was successful, False if not.", "source": "juraj-google-style"}
{"code": "def swo_disable(self, port_mask):\n    res = self._dll.JLINKARM_SWO_DisableTarget(port_mask)\n    if (res != 0):\n        raise errors.JLinkException(res)\n    return None", "docstring": "Disables ITM & Stimulus ports.\n\nArgs:\nself (JLink): the ``JLink`` instance\nport_mask (int): mask specifying which ports to disable\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error", "source": "codesearchnet"}
{"code": "def _validate_testbed_name(name):\n    \n    if not name:\n        raise MoblyConfigError(\"Test bed names can't be empty.\")\n    name = str(name)\n    for char in name:\n        if char not in utils.valid_filename_chars:\n            raise MoblyConfigError(\n                'Char \"%s\" is not allowed in test bed names.' % char)", "docstring": "Validates the name of a test bed.\n\nSince test bed names are used as part of the test run id, it needs to meet\ncertain requirements.\n\nArgs:\nname: The test bed's name specified in config file.\n\nRaises:\nMoblyConfigError: The name does not meet any criteria.", "source": "juraj-google-style"}
{"code": "def _replace_variable_with_pattern(match):\n    positional = match.group('positional')\n    name = match.group('name')\n    template = match.group('template')\n    if (name is not None):\n        if (not template):\n            return _SINGLE_SEGMENT_PATTERN.format(name)\n        elif (template == '**'):\n            return _MULTI_SEGMENT_PATTERN.format(name)\n        else:\n            return _generate_pattern_for_template(template)\n    elif (positional == '*'):\n        return _SINGLE_SEGMENT_PATTERN\n    elif (positional == '**'):\n        return _MULTI_SEGMENT_PATTERN\n    else:\n        raise ValueError('Unknown template expression {}'.format(match.group(0)))", "docstring": "Replace a variable match with a pattern that can be used to validate it.\n\nArgs:\nmatch (re.Match): A regular expression match\n\nReturns:\nstr: A regular expression pattern that can be used to validate the\nvariable in an expanded path.\n\nRaises:\nValueError: If an unexpected template expression is encountered.", "source": "codesearchnet"}
{"code": "def _get_app_path(url):\n    app_path = urlparse(url).path.rstrip('/')\n    if (not app_path.startswith('/')):\n        app_path = ('/' + app_path)\n    return app_path", "docstring": "Extract the app path from a Bokeh server URL\n\nArgs:\nurl (str) :\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def upsert_variant(self, variant_obj):\n        \n        LOG.debug(\"Upserting variant %s\", variant_obj['_id'])\n        try:\n            result = self.variant_collection.insert_one(variant_obj)\n        except DuplicateKeyError as err:\n            LOG.debug(\"Variant %s already exists in database\", variant_obj['_id'])\n            result = self.variant_collection.find_one_and_update(\n                {'_id': variant_obj['_id']},\n                {\n                    '$set': {\n                        'compounds': variant_obj.get('compounds',[])\n                    }\n                }\n            )\n            variant = self.variant_collection.find_one({'_id': variant_obj['_id']})\n        return result", "docstring": "Load a variant object, if the object already exists update compounds.\n\nArgs:\nvariant_obj(dict)\n\nReturns:\nresult", "source": "juraj-google-style"}
{"code": "def _conditional_patch(src: symbolic.Symbolic, condition: Callable[[utils.KeyPath, Any, symbolic.Symbolic], bool], value: Any=None, value_fn: Optional[Callable[[Any], Any]]=None, skip_notification: Optional[bool]=None) -> Any:\n    if value_fn is not None and value is not None:\n        raise ValueError('Either `value` or `value_fn` should be specified.')\n\n    def _fn(k, v, p):\n        if condition(k, v, p):\n            return value_fn(v) if value_fn else value\n        return v\n    return src.rebind(_fn, raise_on_no_change=False, skip_notification=skip_notification)", "docstring": "Recursive patch values on condition.\n\nArgs:\nsrc: symbolic value to patch.\ncondition: Callable object with signature (key_path, value, parent) which\nreturns whether a field should be patched.\nvalue: New value for field that satisfy `condition`.\nvalue_fn: Callable object that produces new value based on old value.\nIf not None, `value` must be None.\nskip_notification: If True, `on_change` event will not be triggered for this\noperation. If None, the behavior is decided by `pg.notify_on_rebind`.\nPlease see `symbolic.Symbolic.rebind` for details.\n\nReturns:\n`src` after being patched.", "source": "github-repos"}
{"code": "def find_ruuvitags(bt_device=''):\n    log.info('Finding RuuviTags. Stop with Ctrl+C.')\n    datas = dict()\n    for new_data in RuuviTagSensor._get_ruuvitag_datas(bt_device=bt_device):\n        if (new_data[0] in datas):\n            continue\n        datas[new_data[0]] = new_data[1]\n        log.info(new_data[0])\n        log.info(new_data[1])\n    return datas", "docstring": "Find all RuuviTags. Function will print the mac and the state of the sensors when found.\nFunction will execute as long as it is stopped. Stop ecexution with Crtl+C.\n\nReturns:\ndict: MAC and state of found sensors", "source": "codesearchnet"}
{"code": "def on_core_metadata_event(self, event):\n    core_metadata = json.loads(event.log_message.message)\n    input_names = ','.join(core_metadata['input_names'])\n    output_names = ','.join(core_metadata['output_names'])\n    target_nodes = ','.join(core_metadata['target_nodes'])\n    self._run_key = RunKey(input_names, output_names, target_nodes)\n    if (not self._graph_defs):\n        self._graph_defs_arrive_first = False\n    else:\n        for device_name in self._graph_defs:\n            self._add_graph_def(device_name, self._graph_defs[device_name])\n    self._outgoing_channel.put(_comm_metadata(self._run_key, event.wall_time))\n    logger.info('on_core_metadata_event() waiting for client ack (meta)...')\n    self._incoming_channel.get()\n    logger.info('on_core_metadata_event() client ack received (meta).')", "docstring": "Implementation of the core metadata-carrying Event proto callback.\n\nArgs:\nevent: An Event proto that contains core metadata about the debugged\nSession::Run() in its log_message.message field, as a JSON string.\nSee the doc string of debug_data.DebugDumpDir.core_metadata for details.", "source": "codesearchnet"}
{"code": "def n_feature_hash(feature, dims, seeds):\n    vec = np.zeros(sum(dims))\n    offset = 0\n    for (seed, dim) in zip(seeds, dims):\n        vec[offset:(offset + dim)] = feature_hash(feature, dim, seed)\n        offset += dim\n    return vec", "docstring": "N-hot-encoded feature hashing.\n\nArgs:\nfeature (str): Target feature represented as string.\ndims (list of int): Number of dimensions for each hash value.\nseeds (list of float): Seed of each hash function (mmh3).\n\nReturns:\nnumpy 1d array: n-hot-encoded feature vector for `s`.", "source": "codesearchnet"}
{"code": "def listen(self):\n        \n\n        self.listening = True\n        if self.threading:\n            from threading import Thread\n            self.listen_thread = Thread(target=self.listen_loop)\n            self.listen_thread.daemon = True\n            self.listen_thread.start()\n\n            self.scheduler_thread = Thread(target=self.scheduler)\n            self.scheduler_thread.daemon = True\n            self.scheduler_thread.start()\n\n        else:\n            self.listen_loop()", "docstring": "Starts the listen loop. If threading is enabled, then the loop will\nbe started in its own thread.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def assemble_transition_model_from_gradable_adjectives(self):\n        \n\n        df = pd.read_sql_table(\"gradableAdjectiveData\", con=engine)\n        gb = df.groupby(\"adjective\")\n\n        rs = gaussian_kde(\n            flatMap(\n                lambda g: gaussian_kde(get_respdevs(g[1]))\n                .resample(self.res)[0]\n                .tolist(),\n                gb,\n            )\n        ).resample(self.res)[0]\n\n        for edge in self.edges(data=True):\n            edge[2][\"ConditionalProbability\"] = constructConditionalPDF(\n                gb, rs, edge\n            )\n            edge[2][\"βs\"] = np.tan(\n                edge[2][\"ConditionalProbability\"].resample(self.res)[0]\n            )", "docstring": "Add probability distribution functions constructed from gradable\nadjective data to the edges of the analysis graph data structure.\n\nArgs:\nadjective_data\nres", "source": "juraj-google-style"}
{"code": "def scan_and_connect(self, devnames, timeout=DEF_TIMEOUT, calibration=True):\n    responses = self.scan_devices(devnames, timeout)\n    for dev in devnames:\n        if (dev not in responses):\n            logger.error('Failed to find device {} during scan'.format(dev))\n            return (False, [])\n    return self.connect([responses.get_device(dev) for dev in devnames], calibration)", "docstring": "Scan for and then connect to a set of one or more SK8s.\n\nThis method is intended to be a simple way to combine the steps of\nrunning a BLE scan, checking the results and connecting to one or more\ndevices. When called, a scan is started for a period equal to `timeout`,\nand a list of devices is collected. If at any point during the scan all of\nthe supplied devices are detected, the scan will be ended immediately.\n\nAfter the scan has completed, the method will only proceed to creating\nconnections if the scan results contain all the specified devices.\n\nArgs:\ndevnames (list): a list of device names (1 or more)\ntimeout (float): a time period in seconds to run the scanning process\n(will be terminated early if all devices in `devnames` are discovered)\n\nReturns:\nReturns the same results as :meth:`connect`.", "source": "codesearchnet"}
{"code": "def params(self, params):\n    url = furl(self._request.rawurl)\n    url = url.add(params)\n    self._request.url = url.url\n    self.add_matcher(matcher('QueryMatcher', params))", "docstring": "Defines a set of URL query params to match.\n\nArguments:\nparams (dict): set of params to match.\n\nReturns:\nself: current Mock instance.", "source": "codesearchnet"}
{"code": "def occupations(self, site_label):\n    return sum(((atom.site.label == site_label) for atom in self.atoms))", "docstring": "Number of these atoms occupying a specific site type.\n\nArgs:\nsite_label (Str): Label for the site type being considered.\n\nReturns:\n(Int): Number of atoms occupying sites of type `site_label`.", "source": "codesearchnet"}
{"code": "def __init__(self, memspace='private', memtype='mot_float_type'):\n        \n        super().__init__(\n            memtype,\n            self.__class__.__name__ + '_' + memspace + '_' + memtype,\n            [],\n            resource_filename('mot', 'data/opencl/euclidian_norm.cl'),\n            var_replace_dict={'MEMSPACE': memspace, 'MEMTYPE': memtype})", "docstring": "A CL functions for calculating the Euclidian distance between n values.\n\nArgs:\nmemspace (str): The memory space of the memtyped array (private, constant, global).\nmemtype (str): the memory type to use, double, float, mot_float_type, ...", "source": "juraj-google-style"}
{"code": "def goto_step(self, inst: InstanceNode) -> InstanceNode:\n        \n        try:\n            return inst._entry(\n                inst.value.index(self.parse_value(inst.schema_node)))\n        except ValueError:\n            raise NonexistentInstance(inst.json_pointer(),\n                                      f\"entry '{self.value!s}'\") from None", "docstring": "Return member instance of `inst` addressed by the receiver.\n\nArgs:\ninst: Current instance.", "source": "juraj-google-style"}
{"code": "def cc(project, detect_project=False):\n    from benchbuild.utils import cmd\n    cc_name = str(CFG['compiler']['c'])\n    wrap_cc(cc_name, compiler(cc_name), project, detect_project=detect_project)\n    return cmd['./{}'.format(cc_name)]", "docstring": "Return a clang that hides CFLAGS and LDFLAGS.\n\nThis will generate a wrapper script in the current directory\nand return a complete plumbum command to it.\n\nArgs:\ncflags: The CFLAGS we want to hide.\nldflags: The LDFLAGS we want to hide.\nfunc (optional): A function that will be pickled alongside the compiler.\nIt will be called before the actual compilation took place. This\nway you can intercept the compilation process with arbitrary python\ncode.\n\nReturns (benchbuild.utils.cmd):\nPath to the new clang command.", "source": "codesearchnet"}
{"code": "def _eig_complex_symmetric(M: np.ndarray) -> Tuple[(np.ndarray, np.ndarray)]:\n    if (not np.allclose(M, M.transpose())):\n        raise np.linalg.LinAlgError('Not a symmetric matrix')\n    max_attempts = 16\n    for _ in range(max_attempts):\n        c = np.random.uniform(0, 1)\n        matrix = ((c * M.real) + ((1 - c) * M.imag))\n        (_, eigvecs) = np.linalg.eigh(matrix)\n        eigvecs = np.array(eigvecs, dtype=complex)\n        eigvals = np.diag(((eigvecs.transpose() @ M) @ eigvecs))\n        reconstructed = ((eigvecs @ np.diag(eigvals)) @ eigvecs.transpose())\n        if np.allclose(M, reconstructed):\n            return (eigvals, eigvecs)\n    raise np.linalg.LinAlgError('Cannot diagonalize complex symmetric matrix.')", "docstring": "Diagonalize a complex symmetric  matrix. The eigenvalues are\ncomplex, and the eigenvectors form an orthogonal matrix.\n\nReturns:\neigenvalues, eigenvectors", "source": "codesearchnet"}
{"code": "def construct(cls, name, version=None):\n    other = VersionedObject(None)\n    other.name_ = name\n    other.version_ = (Version() if (version is None) else version)\n    return other", "docstring": "Create a VersionedObject directly from an object name and version.\n\nArgs:\nname: Object name string.\nversion: Version object.", "source": "codesearchnet"}
{"code": "def GetUpdateTimestamp(self):\n    return self._last_update_timestamp", "docstring": "Return last update timestamp of this map.\n\nReturns:\nAn int containing seconds since epoch, or None.", "source": "github-repos"}
{"code": "def properties(lines):\n    results = {}\n    for (i, line) in enumerate(lines):\n        type_ = line[3:6]\n        if (type_ not in ['CHG', 'RAD', 'ISO']):\n            continue\n        count = int(line[6:9])\n        results[type_] = []\n        for j in range(count):\n            idx = int(line[(10 + (j * 8)):(13 + (j * 8))])\n            val = int(line[(14 + (j * 8)):(17 + (j * 8))])\n            results[type_].append((idx, val))\n    return results", "docstring": "Parse properties block\n\nReturns:\ndict: {property_type: (atom_index, value)}", "source": "codesearchnet"}
{"code": "def collect_changes(self):\n    file_diffs = self._collect_file_diffs()\n    (candidate_feature_diffs, valid_init_diffs, inadmissible_diffs) = self._categorize_file_diffs(file_diffs)\n    new_feature_info = self._collect_feature_info(candidate_feature_diffs)\n    return CollectedChanges(file_diffs, candidate_feature_diffs, valid_init_diffs, inadmissible_diffs, new_feature_info)", "docstring": "Collect file and feature changes\n\nSteps\n1. Collects the files that have changed in this pull request as\ncompared to a comparison branch.\n2. Categorize these file changes into admissible or inadmissible file\nchanges. Admissible file changes solely contribute python files to\nthe contrib subdirectory.\n3. Collect features from admissible new files.\n\nReturns:\nCollectedChanges", "source": "codesearchnet"}
{"code": "def _IsTestFilename(filename):\n  \n  if (filename.endswith('_test.cc') or\n      filename.endswith('_unittest.cc') or\n      filename.endswith('_regtest.cc')):\n    return True\n  else:\n    return False", "docstring": "Determines if the given filename has a suffix that identifies it as a test.\n\nArgs:\nfilename: The input filename.\n\nReturns:\nTrue if 'filename' looks like a test, False otherwise.", "source": "juraj-google-style"}
{"code": "def send_rpc_response(self, rpc_tag, result, response):\n    if (rpc_tag not in self.in_flight_rpcs):\n        raise ArgumentError('In flight RPC could not be found, it may have timed out', rpc_tag=rpc_tag)\n    del self.in_flight_rpcs[rpc_tag]\n    response_message = {'response': response, 'result': result}\n    try:\n        self.rpc_results.set(rpc_tag, response_message)\n    except KeyError:\n        self._logger.warning('RPC response came but no one was waiting: response=%s', response)", "docstring": "Send a response to an RPC.\n\nArgs:\nrpc_tag (str): The exact string given in a previous call to send_rpc_command\nresult (str): The result of the operation.  The possible values of response are:\nservice_not_found, rpc_not_found, timeout, success, invalid_response,\ninvalid_arguments, execution_exception\nresponse (bytes): The raw bytes that we should send back as a response.", "source": "codesearchnet"}
{"code": "def _download_files(self, client, flow_id):\n    \n    output_file_path = os.path.join(\n        self.output_path, '.'.join((flow_id, 'zip')))\n\n    if os.path.exists(output_file_path):\n      print('{0:s} already exists: Skipping'.format(output_file_path))\n      return None\n\n    flow = client.Flow(flow_id)\n    file_archive = flow.GetFilesArchive()\n    file_archive.WriteToFile(output_file_path)\n\n    \n    fqdn = client.data.os_info.fqdn.lower()\n    client_output_file = os.path.join(self.output_path, fqdn)\n    if not os.path.isdir(client_output_file):\n      os.makedirs(client_output_file)\n\n    with zipfile.ZipFile(output_file_path) as archive:\n      archive.extractall(path=client_output_file)\n    os.remove(output_file_path)\n\n    return client_output_file", "docstring": "Download files from the specified flow.\n\nArgs:\nclient: GRR Client object to which to download flow data from.\nflow_id: GRR flow ID.\n\nReturns:\nstr: path of downloaded files.", "source": "juraj-google-style"}
{"code": "def trigger_chain(self):\n    trigger_stream = self.allocator.attach_stream(self.trigger_stream)\n    return (trigger_stream, self.trigger_cond)", "docstring": "Return a NodeInput tuple for creating a node.\n\nReturns:\n(StreamIdentifier, InputTrigger)", "source": "codesearchnet"}
{"code": "def __autofill_form_data(self, form_data, elements):\n        \n\n        for element in elements:\n            if not element[\"name\"] in form_data:\n                continue\n\n            if not len(form_data[element[\"name\"]]) is 0:\n                continue\n\n            if element.name == \"textarea\":\n                form_data[element[\"name\"]] = RandomInputHelper.get_for_type(\"textarea\")\n                continue\n\n            if element.has_attr(\"type\"):\n                form_data[element[\"name\"]] = RandomInputHelper.get_for_type(element[\"type\"])", "docstring": "Autofill empty form data with random data.\n\nArgs:\nform_data (obj): The {key: value} form data\nelements list(obj): Soup elements.\n\nReturns:\nobj: The {key: value}", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]:\n    batch_time, seq_length, hidden_size = hidden_states.size()\n    batch_size = batch_time \n    msg_token = self.message_fc(hidden_states[:, 0, :])\n    msg_token = msg_token.view(batch_size, self.num_frames, hidden_size)\n    msg_token = msg_token + self.drop_path(self.message_attn(self.message_ln(msg_token))[0])\n    msg_token = msg_token.view(-1, 1, hidden_size)\n    hidden_states = torch.cat([hidden_states, msg_token], dim=1)\n    residual = hidden_states\n    hidden_states = self.layer_norm1(hidden_states)\n    hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions)\n    hidden_states = residual + hidden_states\n    hidden_states = hidden_states[:, :seq_length, :]\n    residual = hidden_states\n    hidden_states = self.layer_norm2(hidden_states)\n    hidden_states = self.mlp(hidden_states)\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n`(config.encoder_attention_heads,)`.\ncausal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\nCausal mask for the text model. Mask values selected in `[0, 1]`:\n- 1 for tokens that are **not masked**,\n- 0 for tokens that are **masked**.\n[What are attention masks?](../glossary#attention-mask)\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def gates_to_idx(gates, qregs):\n    \n    sizes = [qr.size for qr in qregs.values()]\n    reg_idx = np.cumsum([0]+sizes)\n    regint = {}\n    for ind, qreg in enumerate(qregs.values()):\n        regint[qreg] = ind\n    out = np.zeros(2*len(gates), dtype=np.int32)\n    for idx, gate in enumerate(gates):\n        out[2*idx] = reg_idx[regint[gate[0][0]]]+gate[0][1]\n        out[2*idx+1] = reg_idx[regint[gate[1][0]]]+gate[1][1]\n    return out", "docstring": "Converts gate tuples into a nested list of integers.\n\nArgs:\ngates (list): List of (QuantumRegister, int) pairs\nrepresenting gates.\nqregs (dict): List of )QuantumRegister, int) tuples.\n\nReturns:\nlist: Nested list of integers for gates.", "source": "juraj-google-style"}
{"code": "def _test_dir(temp_dir, test_name):\n    test_dir = os.path.join(temp_dir, test_name)\n    if os.path.isdir(test_dir):\n        for f in glob.glob('%s/*' % test_dir):\n            os.remove(f)\n    else:\n        os.makedirs(test_dir)\n    return test_dir", "docstring": "Create an empty dir to use for tests.\n\nArgs:\ntemp_dir: Tmp directory path.\ntest_name: Name of the test.\n\nReturns:\nAbsolute path to the test directory.", "source": "github-repos"}
{"code": "def __init__(self, *content: WritableTypes, style_files: Optional[Iterable[str]]=None, styles: Optional[Iterable[str]]=None, script_files: Optional[Iterable[str]]=None, scripts: Optional[Iterable[str]]=None) -> None:\n    super().__init__(*content, style_files=Html.StyleFiles(*(style_files or [])), styles=Html.Styles(*(styles or [])), script_files=Html.ScriptFiles(*(script_files or [])), scripts=Html.Scripts(*(scripts or [])))", "docstring": "Constructor.\n\nArgs:\n*content: One or multiple body part (str, Html, lambda, None) of the HTML.\nstyle_files: URLs for external styles to include.\nstyles: CSS styles to include.\nscript_files: URLs for external scripts to include.\nscripts: JavaScript scripts to include.", "source": "github-repos"}
{"code": "def dismantle_graph(graph) -> None:\n    graph._functions.clear()\n    graph.Dismantle()", "docstring": "Cleans up reference cycles from a `Graph`.\n\nHelpful for making sure the garbage collector doesn't need to run after a\ntemporary `Graph` is no longer needed.\n\nArgs:\ngraph: A `Graph` object to destroy. Neither it nor any of its ops are usable\nafter this function runs.", "source": "github-repos"}
{"code": "def inception_resnet_v2(inputs, nb_classes=1001, is_training=True, dropout_keep_prob=0.8, reuse=None, scope='InceptionResnetV2', create_aux_logits=True, num_classes=None):\n    if (num_classes is not None):\n        warnings.warn('`num_classes` is deprecated. Switch to `nb_classes`. `num_classes` may be removed on or after 2019-04-23.')\n        nb_classes = num_classes\n        del num_classes\n    end_points = {}\n    with tf.variable_scope(scope, 'InceptionResnetV2', [inputs, nb_classes], reuse=reuse) as var_scope:\n        with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):\n            (net, end_points) = inception_resnet_v2_base(inputs, scope=var_scope)\n            if create_aux_logits:\n                with tf.variable_scope('AuxLogits'):\n                    aux = end_points['PreAuxLogits']\n                    aux = slim.avg_pool2d(aux, 5, stride=3, padding='VALID', scope='Conv2d_1a_3x3')\n                    aux = slim.conv2d(aux, 128, 1, scope='Conv2d_1b_1x1')\n                    aux = slim.conv2d(aux, 768, aux.get_shape()[1:3], padding='VALID', scope='Conv2d_2a_5x5')\n                    aux = slim.flatten(aux)\n                    aux = slim.fully_connected(aux, nb_classes, activation_fn=None, scope='Logits')\n                    end_points['AuxLogits'] = aux\n            with tf.variable_scope('Logits'):\n                net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID', scope='AvgPool_1a_8x8')\n                net = slim.flatten(net)\n                net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope='Dropout')\n                end_points['PreLogitsFlatten'] = net\n                logits = slim.fully_connected(net, nb_classes, activation_fn=None, scope='Logits')\n                end_points['Logits'] = logits\n                end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')\n        return (logits, end_points)", "docstring": "Creates the Inception Resnet V2 model.\n\nArgs:\ninputs: a 4-D tensor of size [batch_size, height, width, 3].\nnb_classes: number of predicted classes.\nis_training: whether is training or not.\ndropout_keep_prob: float, the fraction to keep before final layer.\nreuse: whether or not the network and its variables should be reused. To be\nable to reuse 'scope' must be given.\nscope: Optional variable_scope.\ncreate_aux_logits: Whether to include the auxilliary logits.\nnum_classes: depricated alias for nb_classes\n\nReturns:\nlogits: the logits outputs of the model.\nend_points: the set of end_points from the inception model.", "source": "codesearchnet"}
{"code": "def add_evolved_transformer_hparams(hparams):\n  \n  \n  \n  \n  hparams.num_encoder_layers = 3\n  hparams.num_decoder_layers = 4\n\n  \n  \n  hparams.learning_rate_constant /= hparams.learning_rate_warmup_steps ** 0.5\n  hparams.learning_rate_schedule = (\n      \"constant*linear_warmup*single_cycle_cos_decay*rsqrt_hidden_size\")\n  \n  \n  \n  \n  \n  hparams.learning_rate_decay_steps = 250000\n  return hparams", "docstring": "Add Evolved Transformer hparams.\n\nNote: These are for the Adam optimizer, not the Adafactor optimizer used in\nthe paper.\n\nArgs:\nhparams: Current hparams.\n\nReturns:\nhparams updated with Evolved Transformer values.", "source": "juraj-google-style"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    mru_values_dict = {}\n    for subkey in registry_key.GetSubkeys():\n        username_value = subkey.GetValueByName('UsernameHint')\n        if (username_value and username_value.data and username_value.DataIsString()):\n            username = username_value.GetDataAsObject()\n        else:\n            username = 'N/A'\n        mru_values_dict[subkey.name] = username\n        event_data = windows_events.WindowsRegistryEventData()\n        event_data.key_path = subkey.path\n        event_data.offset = subkey.offset\n        event_data.regvalue = {'Username hint': username}\n        event_data.source_append = self._SOURCE_APPEND\n        event = time_events.DateTimeValuesEvent(subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n    event_data = windows_events.WindowsRegistryEventData()\n    event_data.key_path = registry_key.path\n    event_data.offset = registry_key.offset\n    event_data.regvalue = mru_values_dict\n    event_data.source_append = self._SOURCE_APPEND\n    event = time_events.DateTimeValuesEvent(registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a Terminal Server Client Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "codesearchnet"}
{"code": "def add_arguments(cls, parser):\n        \n\n        parser.add_argument(\n            '-as-api', '--asana-api',\n            action='store',\n            nargs='?',\n            const='',\n            dest='asana_api',\n            help=\"[setting] asana api key.\",\n            )\n\n        parser.add_argument(\n            '-gh-api', '--github-api',\n            action='store',\n            nargs='?',\n            const='',\n            dest='github_api',\n            help=\"[setting] github api token.\",\n            )\n\n        parser.add_argument(\n            '--first-issue',\n            type=int,\n            action='store',\n            nargs='?',\n            const='',\n            help=\"[setting] only sync issues [FIRST_ISSUE] and above\"\n            )", "docstring": "Add arguments to the parser for collection in app.args.\n\nArgs:\nparser:\n`argparse.ArgumentParser`. Parser.\nArguments added here are server on\nself.args.", "source": "juraj-google-style"}
{"code": "def get_workers_list(cluster_resolver):\n    worker_job_name = 'worker'\n    cluster_spec = cluster_resolver.cluster_spec()\n    if not cluster_spec:\n        raise errors.UnavailableError('None', 'None', 'Cluster spec not found, your client must run in GCE environment.')\n    task_indices = cluster_spec.task_indices(worker_job_name)\n    workers_list = [cluster_spec.task_address(worker_job_name, i).replace(':8470', ':8466') for i in task_indices]\n    return ','.join(workers_list)", "docstring": "Returns a comma separated list of TPU worker host:port pairs.\n\nGets cluster_spec from cluster_resolver. Use the worker's task indices to\nobtain and return a list of host:port pairs.\n\nArgs:\ncluster_resolver: TensorFlow TPUClusterResolver instance.\n\nReturns:\nA string of comma separated list of host:port pairs. For example:\n'10.2.0.1:8466,10.2.0.2:8466,10.2.0.3:8466,10.2.0.4:8466'\n\nRaises:\nUnavailableError: cluster_resolver doesn't contain a valid cluster_spec.", "source": "github-repos"}
{"code": "def _save_tensor_value_to_tmp_cache(self, cache_idx, updates, graph):\n    updates = self._merge_tensor_signatures(updates)\n    updates = array_ops.reshape(updates, [self._num_signature_dimensions()])\n    if graph not in self._temp_cache_var:\n        raise RuntimeError('graph is not in self._temp_cache_var')\n    if cache_idx >= len(self._temp_cache_var[graph]):\n        raise RuntimeError('cache_idx (%d) is out of range (%d)' % (cache_idx, len(self._temp_cache_var[graph])))\n    self._temp_cache_var[graph][cache_idx] = updates", "docstring": "Returns an op that will save the given updates to an entry in the cache.\n\nArgs:\ncache_idx: The cache index of the tensor within the cache.\nupdates: A dictionary of the signature updates from signature name to\na tensor of dimension [1].\ngraph: A TensorFlow graph.\nRaises:\nRuntimeError:\n(1) graph is not already in self._temp_cache_var, or\n(2) cache_idx is out of range.", "source": "github-repos"}
{"code": "def activate_vcenter(self, **kwargs):\n        \n        name = kwargs.pop('name')\n        activate = kwargs.pop('activate', True)\n        vcenter_args = dict(id=name)\n        method_class = self._brocade_vswitch\n        if activate:\n            method_name = 'vcenter_activate'\n            vcenter_attr = getattr(method_class, method_name)\n            config = vcenter_attr(**vcenter_args)\n            output = self._callback(config)\n            print output\n            return output\n        else:\n            pass", "docstring": "Activate vCenter on the switch\n\nArgs:\nname: (str) : Name of an established vCenter\nactivate (bool) : Activates the vCenter if activate=True\nelse deactivates it\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def _split_result_for_readers(axis, num_splits, df):  \n    \n    splits = split_result_of_axis_func_pandas(axis, num_splits, df)\n    if not isinstance(splits, list):\n        splits = [splits]\n    return splits", "docstring": "Splits the DataFrame read into smaller DataFrames and handles all edge cases.\n\nArgs:\naxis: Which axis to split over.\nnum_splits: The number of splits to create.\ndf: The DataFrame after it has been read.\n\nReturns:\nA list of pandas DataFrames.", "source": "juraj-google-style"}
{"code": "def add_arguments(self, parser):\n        \n        parser.add_argument('-p', '--product', action='store_true',\n                            help='print the production information')\n        parser.add_argument('-j', '--jtag', action='store_true',\n                            help='print the JTAG pin status')\n        return self.add_common_arguments(parser, False)", "docstring": "Adds the information commands to the parser.\n\nArgs:\nself (InfoCommand): the ``InfoCommand`` instance\nparser (argparse.ArgumentParser): the parser to add the arguments to\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def parse(self, filename):\n        \n        with io.open(filename, 'r', encoding='utf-8') as _:\n            lines = _.readlines()\n\n        all_source_files = set()\n        source_map = {}\n\n        lineno = 0\n        root = None\n        index = None\n        cur_level = -1\n        parent_queue = []\n\n        for line in lines:\n            try:\n                level, line = dedent(line)\n                if line.startswith('\n                    lineno += 1\n                    continue\n                elif line.startswith('\\\\\n                    line = line[1:]\n            except IndentError as exc:\n                error('bad-indent', 'Invalid indentation', filename=filename,\n                      lineno=lineno, column=exc.column)\n\n            if not line:\n                lineno += 1\n                continue\n\n            source_file = dequote(line)\n\n            if not source_file:\n                lineno += 1\n                continue\n\n            if source_file in all_source_files:\n                error('sitemap-duplicate', 'Filename listed twice',\n                      filename=filename, lineno=lineno, column=level * 8 + 1)\n\n            all_source_files.add(source_file)\n            source_map[source_file] = (lineno, level * 8 + 1)\n\n            page = OrderedDict()\n\n            if root is not None and level == 0:\n                error('sitemap-error', 'Sitemaps only support one root',\n                      filename=filename, lineno=lineno, column=0)\n\n            if root is None:\n                root = page\n                index = source_file\n            else:\n                lvl_diff = cur_level - level\n                while lvl_diff >= 0:\n                    parent_queue.pop()\n                    lvl_diff -= 1\n\n                parent_queue[-1][source_file] = page\n\n            parent_queue.append(page)\n\n            cur_level = level\n\n            lineno += 1\n\n        return Sitemap(root, filename, index, source_map)", "docstring": "Parse a sitemap file.\n\nArgs:\nfilename: str, the path to the sitemap file.\n\nReturns:\nSitemap: the generated sitemap.", "source": "juraj-google-style"}
{"code": "def color_set_hsv(c: Color, h: float, s: float, v: float) -> None:\n    \n    new_color = ffi.new(\"TCOD_color_t*\")\n    lib.TCOD_color_set_HSV(new_color, h, s, v)\n    c[:] = new_color.r, new_color.g, new_color.b", "docstring": "Set a color using: hue, saturation, and value parameters.\n\nDoes not return a new Color.  ``c`` is modified inplace.\n\nArgs:\nc (Union[Color, List[Any]]): A Color instance, or a list of any kind.\nh (float): Hue, from 0 to 360.\ns (float): Saturation, from 0 to 1.\nv (float): Value, from 0 to 1.", "source": "juraj-google-style"}
{"code": "def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]]=None, prepend_batch_axis: bool=False):\n    if tensor_type is None:\n        return self\n    if not isinstance(tensor_type, TensorType):\n        tensor_type = TensorType(tensor_type)\n    if tensor_type == TensorType.TENSORFLOW:\n        if not is_tf_available():\n            raise ImportError('Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.')\n        import tensorflow as tf\n        as_tensor = tf.constant\n        is_tensor = tf.is_tensor\n    elif tensor_type == TensorType.PYTORCH:\n        if not is_torch_available():\n            raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.')\n        import torch\n        is_tensor = torch.is_tensor\n\n        def as_tensor(value, dtype=None):\n            if isinstance(value, list) and isinstance(value[0], np.ndarray):\n                return torch.from_numpy(np.array(value))\n            return torch.tensor(value)\n    elif tensor_type == TensorType.JAX:\n        if not is_flax_available():\n            raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.')\n        import jax.numpy as jnp\n        as_tensor = jnp.array\n        is_tensor = is_jax_tensor\n    elif tensor_type == TensorType.MLX:\n        if not is_mlx_available():\n            raise ImportError('Unable to convert output to MLX tensors format, MLX is not installed.')\n        import mlx.core as mx\n        as_tensor = mx.array\n\n        def is_tensor(obj):\n            return isinstance(obj, mx.array)\n    else:\n\n        def as_tensor(value, dtype=None):\n            if isinstance(value, (list, tuple)) and isinstance(value[0], (list, tuple, np.ndarray)):\n                value_lens = [len(val) for val in value]\n                if len(set(value_lens)) > 1 and dtype is None:\n                    value = as_tensor([np.asarray(val) for val in value], dtype=object)\n            return np.asarray(value, dtype=dtype)\n        is_tensor = is_numpy_array\n    for key, value in self.items():\n        try:\n            if prepend_batch_axis:\n                value = [value]\n            if not is_tensor(value):\n                tensor = as_tensor(value)\n                self[key] = tensor\n        except Exception as e:\n            if key == 'overflowing_tokens':\n                raise ValueError('Unable to create tensor returning overflowing tokens of different lengths. Please see if a fast version of this tokenizer is available to have this feature available.') from e\n            raise ValueError(f\"Unable to create tensor, you should probably activate truncation and/or padding with 'padding=True' 'truncation=True' to have batched tensors with the same length. Perhaps your features (`{key}` in this case) have excessive nesting (inputs type `list` where type `int` is expected).\") from e\n    return self", "docstring": "Convert the inner content to tensors.\n\nArgs:\ntensor_type (`str` or [`~utils.TensorType`], *optional*):\nThe type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If\n`None`, no modification is done.\nprepend_batch_axis (`int`, *optional*, defaults to `False`):\nWhether or not to add the batch dimension during the conversion.", "source": "github-repos"}
{"code": "def getStreamNetworkAsGeoJson(self, session, withNodes=True):\n    features_list = []\n    for link in self.streamLinks:\n        link_geoJson = link.getAsGeoJson(session)\n        if link_geoJson:\n            link_geometry = json.loads(link.getAsGeoJson(session))\n            link_properties = {'link_number': link.linkNumber, 'type': link.type, 'num_elements': link.numElements, 'dx': link.dx, 'erode': link.erode, 'subsurface': link.subsurface}\n            link_feature = {'type': 'Feature', 'geometry': link_geometry, 'properties': link_properties, 'id': link.id}\n            features_list.append(link_feature)\n        if withNodes:\n            for node in link.nodes:\n                node_geoJson = node.getAsGeoJson(session)\n                if node_geoJson:\n                    node_geometry = json.loads(node_geoJson)\n                node_properties = {'link_number': link.linkNumber, 'node_number': node.nodeNumber, 'elevation': node.elevation}\n                node_feature = {'type': 'Feature', 'geometry': node_geometry, 'properties': node_properties, 'id': node.id}\n                features_list.append(node_feature)\n    feature_collection = {'type': 'FeatureCollection', 'features': features_list}\n    return json.dumps(feature_collection)", "docstring": "Retrieve the stream network geometry in GeoJSON format.\n\nArgs:\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database\nwithNodes (bool, optional): Include nodes. Defaults to False.\n\nReturns:\nstr: GeoJSON string.", "source": "codesearchnet"}
{"code": "def parse_mapreduce_yaml(contents):\n  \n  try:\n    builder = yaml_object.ObjectBuilder(MapReduceYaml)\n    handler = yaml_builder.BuilderHandler(builder)\n    listener = yaml_listener.EventListener(handler)\n    listener.Parse(contents)\n\n    mr_info = handler.GetResults()\n  except (ValueError, yaml_errors.EventError), e:\n    raise errors.BadYamlError(e)\n\n  if len(mr_info) < 1:\n    raise errors.BadYamlError(\"No configs found in mapreduce.yaml\")\n  if len(mr_info) > 1:\n    raise errors.MultipleDocumentsInMrYaml(\"Found %d YAML documents\" %\n                                           len(mr_info))\n\n  jobs = mr_info[0]\n  job_names = set(j.name for j in jobs.mapreduce)\n  if len(jobs.mapreduce) != len(job_names):\n    raise errors.BadYamlError(\n        \"Overlapping mapreduce names; names must be unique\")\n\n  return jobs", "docstring": "Parses mapreduce.yaml file contents.\n\nArgs:\ncontents: mapreduce.yaml file contents.\n\nReturns:\nMapReduceYaml object with all the data from original file.\n\nRaises:\nerrors.BadYamlError: when contents is not a valid mapreduce.yaml file.", "source": "juraj-google-style"}
{"code": "def approve(self, peer_jid):\n        \n        self.roster.approve(aioxmpp.JID.fromstr(peer_jid).bare())", "docstring": "Approve a subscription request from jid\n\nArgs:\npeer_jid (str): the JID to approve", "source": "juraj-google-style"}
{"code": "def memory_read16(self, addr, num_halfwords, zone=None):\n    return self.memory_read(addr, num_halfwords, zone=zone, nbits=16)", "docstring": "Reads memory from the target system in units of 16-bits.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): start address to read from\nnum_halfwords (int): number of half words to read\nzone (str): memory zone to read from\n\nReturns:\nList of halfwords read from the target system.\n\nRaises:\nJLinkException: if memory could not be read", "source": "codesearchnet"}
{"code": "def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):\n  \n  \n  \n  \n  line = clean_lines.elided[linenum]\n  declarator_end = line.rfind(')')\n  if declarator_end >= 0:\n    fragment = line[declarator_end:]\n  else:\n    if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:\n      fragment = line\n    else:\n      return\n\n  \n  if Search(r'\\boverride\\b', fragment) and Search(r'\\bfinal\\b', fragment):\n    error(filename, linenum, 'readability/inheritance', 4,\n          ('\"override\" is redundant since function is '\n           'already declared as \"final\"'))", "docstring": "Check if line contains a redundant \"override\" or \"final\" virt-specifier.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def resorted(values):\n    if (not values):\n        return values\n    values = sorted(values)\n    first_word = next((cnt for (cnt, val) in enumerate(values) if (val and (not val[0].isdigit()))), None)\n    if (first_word is None):\n        return values\n    words = values[first_word:]\n    numbers = values[:first_word]\n    return (words + numbers)", "docstring": "Sort values, but put numbers after alphabetically sorted words.\n\nThis function is here to make outputs diff-compatible with Aleph.\n\nExample::\n>>> sorted([\"b\", \"1\", \"a\"])\n['1', 'a', 'b']\n>>> resorted([\"b\", \"1\", \"a\"])\n['a', 'b', '1']\n\nArgs:\nvalues (iterable): any iterable object/list/tuple/whatever.\n\nReturns:\nlist of sorted values, but with numbers after words", "source": "codesearchnet"}
{"code": "def from_index_amount(cls, idx, amount):\n        \n        if np.array(idx).ndim == 0:\n            v = np.zeros(6)\n            v[idx] = amount\n            return cls.from_voigt(v)\n        elif np.array(idx).ndim == 1:\n            v = np.zeros((3, 3))\n            for i in itertools.permutations(idx):\n                v[i] = amount\n            return cls(v)\n        else:\n            raise ValueError(\"Index must either be 2-tuple or integer \"\n                             \"corresponding to full-tensor or voigt index\")", "docstring": "Like Deformation.from_index_amount, except generates\na strain from the zero 3x3 tensor or voigt vector with\nthe amount specified in the index location.  Ensures\nsymmetric strain.\n\nArgs:\nidx (tuple or integer): index to be perturbed, can be voigt or\nfull-tensor notation\namount (float): amount to perturb selected index", "source": "juraj-google-style"}
{"code": "def _rows_event_to_dict(e, stream):\n    pk_cols = (e.primary_key if isinstance(e.primary_key, (list, tuple)) else (e.primary_key,))\n    if isinstance(e, row_event.UpdateRowsEvent):\n        sig = signals.rows_updated\n        action = 'update'\n        row_converter = _convert_update_row\n    elif isinstance(e, row_event.WriteRowsEvent):\n        sig = signals.rows_inserted\n        action = 'insert'\n        row_converter = _convert_write_row\n    elif isinstance(e, row_event.DeleteRowsEvent):\n        sig = signals.rows_deleted\n        action = 'delete'\n        row_converter = _convert_write_row\n    else:\n        assert False, 'Invalid binlog event'\n    meta = {'time': e.timestamp, 'log_pos': stream.log_pos, 'log_file': stream.log_file, 'schema': e.schema, 'table': e.table, 'action': action}\n    rows = list(map(row_converter, e.rows))\n    for row in rows:\n        row['keys'] = {k: row['values'][k] for k in pk_cols}\n    return (rows, meta)", "docstring": "Convert RowsEvent to a dict\n\nArgs:\ne (pymysqlreplication.row_event.RowsEvent): the event\nstream (pymysqlreplication.BinLogStreamReader):\nthe stream that yields event\n\nReturns:\ndict: event's data as a dict", "source": "codesearchnet"}
{"code": "def get_item(env, name, default=None):\n    for key in name.split('.'):\n        if (isinstance(env, dict) and (key in env)):\n            env = env[key]\n        elif (isinstance(env, types.ModuleType) and (key in env.__dict__)):\n            env = env.__dict__[key]\n        else:\n            return default\n    return env", "docstring": "Get an item from a dictionary, handling nested lookups with dotted notation.\n\nArgs:\nenv: the environment (dictionary) to use to look up the name.\nname: the name to look up, in dotted notation.\ndefault: the value to return if the name if not found.\n\nReturns:\nThe result of looking up the name, if found; else the default.", "source": "codesearchnet"}
{"code": "def export_as_file(self, filepath, hyperparameters):\n    if (not filepath.endswith('.py')):\n        filepath += '.py'\n    file_contents = ''\n    file_contents += self.source\n    file_contents += '\\n\\nbase_learner.set_params(**{})\\n'.format(hyperparameters)\n    file_contents += '\\nmeta_feature_generator = \"{}\"\\n'.format(self.meta_feature_generator)\n    with open(filepath, 'wb') as f:\n        f.write(file_contents.encode('utf8'))", "docstring": "Generates a Python file with the importable base learner set to ``hyperparameters``\n\nThis function generates a Python file in the specified file path that contains\nthe base learner as an importable variable stored in ``base_learner``. The base\nlearner will be set to the appropriate  hyperparameters through ``set_params``.\n\nArgs:\nfilepath (str, unicode): File path to save file in\n\nhyperparameters (dict): Dictionary to use for ``set_params``", "source": "codesearchnet"}
{"code": "def attention_bias_same_segment(query_segment_id, memory_segment_id):\n  \n  ret = (tf.to_float(\n      tf.not_equal(\n          tf.expand_dims(query_segment_id, 2),\n          tf.expand_dims(memory_segment_id, 1))) *\n         large_compatible_negative(memory_segment_id.dtype))\n  return tf.expand_dims(ret, axis=1)", "docstring": "Create an bias tensor to be added to attention logits.\n\nPositions with the same segment_ids can see each other.\n\nArgs:\nquery_segment_id: a float `Tensor` with shape [batch, query_length].\nmemory_segment_id: a float `Tensor` with shape [batch, memory_length].\n\nReturns:\na `Tensor` with shape [batch, 1, query_length, memory_length].", "source": "juraj-google-style"}
{"code": "def goto(self, rules, symbol):\n    return self.closure({rule.move_dot() for rule in rules if ((not rule.at_end) and (rule.rhs[rule.pos] == symbol))})", "docstring": "Computes the next closure for rules based on the symbol we got.\n\nArgs:\nrules - an iterable of DottedRules\nsymbol - a string denoting the symbol we've just seen\n\nReturns: frozenset of DottedRules", "source": "codesearchnet"}
{"code": "def wrap_cc(filepath, compiler, project, python=sys.executable, detect_project=False):\n    env = __create_jinja_env()\n    template = env.get_template('run_compiler.py.inc')\n    cc_fname = local.path(filepath).with_suffix('.benchbuild.cc', depth=0)\n    cc_f = persist(compiler, filename=cc_fname)\n    project_file = persist(project, suffix='.project')\n    with open(filepath, 'w') as wrapper:\n        wrapper.write(template.render(cc_f=cc_f, project_file=project_file, python=python, detect_project=detect_project))\n    chmod('+x', filepath)\n    LOG.debug('Placed wrapper in: %s for compiler %s', local.path(filepath), str(compiler))\n    LOG.debug('Placed project in: %s', local.path(project_file))\n    LOG.debug('Placed compiler command in: %s', local.path(cc_f))\n    return local[filepath]", "docstring": "Substitute a compiler with a script that hides CFLAGS & LDFLAGS.\n\nThis will generate a wrapper script in the current directory\nand return a complete plumbum command to it.\n\nArgs:\nfilepath (str): Path to the wrapper script.\ncompiler (benchbuild.utils.cmd):\nReal compiler command we should call in the script.\nproject (benchbuild.project.Project):\nThe project this compiler will be for.\npython (str): Path to the python interpreter we should use.\ndetect_project: Should we enable project detection or not.\n\nReturns (benchbuild.utils.cmd):\nCommand of the new compiler we can call.", "source": "codesearchnet"}
{"code": "def dump(self, out_path, header=True):\n    if (sys.version_info[0] < 3):\n        mode = 'wb'\n    else:\n        mode = 'w'\n    with open(out_path, mode) as outfile:\n        writer = csv.writer(outfile, quoting=csv.QUOTE_MINIMAL)\n        if header:\n            writer.writerow(['Timestamp', 'Tile Address', 'Property Name', 'Value'])\n        for entry in self.changes:\n            writer.writerow([entry.time, entry.tile, entry.property, entry.string_value])", "docstring": "Save this list of changes as a csv file at out_path.\n\nThe format of the output file will be a CSV with 4 columns:\ntimestamp, tile address, property, string_value\n\nThere will be a single header row starting the CSV output unless\nheader=False is passed.\n\nArgs:\nout_path (str): The path where we should save our current list of\nchanges.\nheader (bool): Whether we should include a header row in the csv\nfile.  Defaults to True.", "source": "codesearchnet"}
{"code": "def _follow_leafref(\n            self, xpath: \"Expr\", init: \"TerminalNode\") -> Optional[\"DataNode\"]:\n        \n        if isinstance(xpath, LocationPath):\n            lft = self._follow_leafref(xpath.left, init)\n            if lft is None:\n                return None\n            return lft._follow_leafref(xpath.right, init)\n        elif isinstance(xpath, Step):\n            if xpath.axis == Axis.parent:\n                return self.data_parent()\n            elif xpath.axis == Axis.child:\n                if isinstance(self, InternalNode) and xpath.qname:\n                    qname = (xpath.qname if xpath.qname[1]\n                             else (xpath.qname[0], init.ns))\n                    return self.get_data_child(*qname)\n        elif isinstance(xpath, Root):\n            return self.schema_root()\n        return None", "docstring": "Return the data node referred to by a leafref path.\n\nArgs:\nxpath: XPath expression compiled from a leafref path.\ninit: initial context node", "source": "juraj-google-style"}
{"code": "def start_task(self, task_type, task_id):\n    assert self._mpr\n    if not self._start_events[task_type][task_id].is_set() or not self._finish_events[task_type][task_id].is_set():\n        raise ValueError('The task %s:%d is still alive. You cannot start another one.' % (task_type, task_id))\n    self._start_events[task_type][task_id] = self._mpr_manager.Event()\n    self._finish_events[task_type][task_id] = self._mpr_manager.Event()\n    self._mpr.start_single_process(task_type=task_type, task_id=task_id)\n    self._start_events[task_type][task_id].wait()", "docstring": "Starts a server given task_type and task_id.\n\nArgs:\ntask_type: the type of the task such as \"worker\".\ntask_id: the id the task such as 1.\n\nRaises:\nValueError: if the server already exists.", "source": "github-repos"}
{"code": "def serialize(self) -> dict:\n    data = {**self}\n    if ('attachments' in self):\n        data['attachments'] = json.dumps(self['attachments'])\n    return data", "docstring": "Serialize the message for sending to slack API\n\nReturns:\nserialized message", "source": "codesearchnet"}
{"code": "def _get_or_load_domain(self, domain):\n        \n        if isinstance(domain, six.string_types):\n            if domain in self.domains:\n                return self.domains[domain]\n            elif exists(domain):\n                with open(domain, 'r') as fobj:\n                    domain = json.load(fobj)\n            else:\n                raise ValueError(\"No domain could be found/loaded from input \"\n                                 \"'{}'; value must be either the name of an \"\n                                 \"existing Domain, or a valid path to a \"\n                                 \"configuration file.\".format(domain))\n\n        \n        name = domain['name']\n        if name in self.domains:\n            msg = (\"Domain with name '{}' already exists; returning existing \"\n                   \"Domain configuration.\".format(name))\n            warnings.warn(msg)\n            return self.domains[name]\n\n        entities = domain.get('entities', [])\n        domain = Domain(domain)\n        for e in entities:\n            self.add_entity(domain=domain, **e)\n        self.domains[name] = domain\n        return self.domains[name]", "docstring": "Return a domain if one already exists, or create a new one if not.\n\nArgs:\ndomain (str, dict): Can be one of:\n- The name of the Domain to return (fails if none exists)\n- A path to the Domain configuration file\n- A dictionary containing configuration information", "source": "juraj-google-style"}
{"code": "def add_properties(props, mol):\n    if (not props):\n        return\n    for (_, atom) in mol.atoms_iter():\n        atom.charge = 0\n        atom.multi = 1\n        atom.mass = None\n    for prop in props.get('CHG', []):\n        mol.atom(prop[0]).charge = prop[1]\n    for prop in props.get('RAD', []):\n        mol.atom(prop[0]).multi = prop[1]\n    for prop in props.get('ISO', []):\n        mol.atom(prop[0]).mass = prop[1]", "docstring": "apply properties to the molecule object\n\nReturns:\nNone (alter molecule object directly)", "source": "codesearchnet"}
{"code": "def result(self):\n    raise NotImplementedError", "docstring": "Compute the current metric value.\n\nReturns:\nA scalar tensor, or a dictionary of scalar tensors.", "source": "github-repos"}
{"code": "def validate_with_tags(self, tags, confidence):\n        \n        result = {'intent_type': self.name}\n        intent_confidence = 0.0\n        local_tags = tags[:]\n        used_tags = []\n\n        for require_type, attribute_name in self.requires:\n            required_tag, canonical_form, confidence = find_first_tag(local_tags, require_type)\n            if not required_tag:\n                result['confidence'] = 0.0\n                return result, []\n\n            result[attribute_name] = canonical_form\n            if required_tag in local_tags:\n                local_tags.remove(required_tag)\n            used_tags.append(required_tag)\n            \n            intent_confidence += confidence\n\n        if len(self.at_least_one) > 0:\n            best_resolution = resolve_one_of(tags, self.at_least_one)\n            if not best_resolution:\n                result['confidence'] = 0.0\n                return result, []\n            else:\n                for key in best_resolution:\n                    result[key] = best_resolution[key][0].get('key') \n                    intent_confidence += 1.0\n                used_tags.append(best_resolution)\n                if best_resolution in local_tags:\n                    local_tags.remove(best_resolution)\n\n        for optional_type, attribute_name in self.optional:\n            optional_tag, canonical_form, conf = find_first_tag(local_tags, optional_type)\n            if not optional_tag or attribute_name in result:\n                continue\n            result[attribute_name] = canonical_form\n            if optional_tag in local_tags:\n                local_tags.remove(optional_tag)\n            used_tags.append(optional_tag)\n            intent_confidence += 1.0\n\n        total_confidence = intent_confidence / len(tags) * confidence\n\n        target_client, canonical_form, confidence = find_first_tag(local_tags, CLIENT_ENTITY_NAME)\n\n        result['target'] = target_client.get('key') if target_client else None\n        result['confidence'] = total_confidence\n\n        return result, used_tags", "docstring": "Validate weather tags has required entites for this intent to fire\n\nArgs:\ntags(list): Tags and Entities used for validation\nconfidence(float): ?\n\nReturns:\nintent, tags: Returns intent and tags used by the intent on\nfalure to meat required entities then returns intent with confidence\nof 0.0 and an empty list for tags.", "source": "juraj-google-style"}
{"code": "def is_same_file(path1, path2):\n    return (path1 and path2 and os.path.isfile(path1) and os.path.isfile(path2) and os.path.samefile(path1, path2))", "docstring": "Return True if path1 is the same file as path2.\n\nThe reason for this dance is that samefile throws if either file doesn't\nexist.\n\nArgs:\npath1: str or path-like.\npath2: str or path-like.\n\nReturns:\nbool. True if the same file, False if not.", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n        \n        self.DeleteLog = channel.unary_unary(\n            \"/google.logging.v2.LoggingServiceV2/DeleteLog\",\n            request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.DeleteLogRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n        self.WriteLogEntries = channel.unary_unary(\n            \"/google.logging.v2.LoggingServiceV2/WriteLogEntries\",\n            request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.WriteLogEntriesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.WriteLogEntriesResponse.FromString,\n        )\n        self.ListLogEntries = channel.unary_unary(\n            \"/google.logging.v2.LoggingServiceV2/ListLogEntries\",\n            request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogEntriesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogEntriesResponse.FromString,\n        )\n        self.ListMonitoredResourceDescriptors = channel.unary_unary(\n            \"/google.logging.v2.LoggingServiceV2/ListMonitoredResourceDescriptors\",\n            request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListMonitoredResourceDescriptorsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListMonitoredResourceDescriptorsResponse.FromString,\n        )\n        self.ListLogs = channel.unary_unary(\n            \"/google.logging.v2.LoggingServiceV2/ListLogs\",\n            request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__pb2.ListLogsResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def reset(self, entries_to_reset):\n    \n    num_updates = tf.size(entries_to_reset)\n    update_vals = tf.scatter_update(\n        self.mem_vals, entries_to_reset,\n        tf.tile(tf.expand_dims(\n            tf.fill([self.memory_size, self.val_depth], .0), 0),\n                [num_updates, 1, 1]))\n    update_logits = tf.scatter_update(\n        self.mean_logits, entries_to_reset,\n        tf.tile(tf.expand_dims(\n            tf.fill([self.memory_size], .0), 0),\n                [num_updates, 1]))\n    reset_op = tf.group([update_vals, update_logits])\n    return reset_op", "docstring": "Reset the entries in the memory.\n\nArgs:\nentries_to_reset: a 1D tensor.\nReturns:\nthe reset op.", "source": "juraj-google-style"}
{"code": "def _PopulateQuantilesHistogram(self, hist, nums):\n    \n    if not nums:\n      return\n    num_quantile_buckets = 10\n    quantiles_to_get = [\n        x * 100 / num_quantile_buckets for x in range(num_quantile_buckets + 1)\n    ]\n    quantiles = np.percentile(nums, quantiles_to_get)\n    hist.type = self.histogram_proto.QUANTILES\n    quantiles_sample_count = float(len(nums)) / num_quantile_buckets\n    for low, high in zip(quantiles, quantiles[1:]):\n      hist.buckets.add(\n          low_value=low, high_value=high, sample_count=quantiles_sample_count)", "docstring": "Fills in the histogram with quantile information from the provided array.\nArgs:\nhist: A Histogram proto message to fill in.\nnums: A list of numbers to create a quantiles histogram from.", "source": "juraj-google-style"}
{"code": "def device_configuration(self, pending=False, use_included=False):\n    device_configs = self.device_configurations(use_included=use_included)\n    for device_config in device_configs:\n        if (device_config.is_loaded() is not pending):\n            return device_config\n    return None", "docstring": "Get a specific device configuration.\n\nA device can have at most one loaded and one pending device\nconfiguration. This returns that device_configuration based on\na given flag.\n\nKeyword Args:\n\npending(bool): Fetch the pending configuration or return\nthe loaded one.\n\nuse_included(bool): Use included resources in this device\nconfiguration.\n\nReturns:\n\nThe requested loaded or pending configuration or None if\nno device configuration is found.", "source": "codesearchnet"}
{"code": "def readline(self, size=(- 1)):\n    self._check_open()\n    if ((size == 0) or (not self._remaining())):\n        return ''\n    data_list = []\n    newline_offset = self._buffer.find_newline(size)\n    while (newline_offset < 0):\n        data = self._buffer.read(size)\n        size -= len(data)\n        self._offset += len(data)\n        data_list.append(data)\n        if ((size == 0) or (not self._remaining())):\n            return ''.join(data_list)\n        self._buffer.reset(self._buffer_future.get_result())\n        self._request_next_buffer()\n        newline_offset = self._buffer.find_newline(size)\n    data = self._buffer.read_to_offset((newline_offset + 1))\n    self._offset += len(data)\n    data_list.append(data)\n    return ''.join(data_list)", "docstring": "Read one line delimited by '\\n' from the file.\n\nA trailing newline character is kept in the string. It may be absent when a\nfile ends with an incomplete line. If the size argument is non-negative,\nit specifies the maximum string size (counting the newline) to return.\nA negative size is the same as unspecified. Empty string is returned\nonly when EOF is encountered immediately.\n\nArgs:\nsize: Maximum number of bytes to read. If not specified, readline stops\nonly on '\\n' or EOF.\n\nReturns:\nThe data read as a string.\n\nRaises:\nIOError: When this buffer is closed.", "source": "codesearchnet"}
{"code": "def parse_args(test: ArgList=None) -> argparse.Namespace:\n    parser = argparse.ArgumentParser(prog='budoux', formatter_class=lambda prog: BudouxHelpFormatter(prog, **{'width': shutil.get_terminal_size(fallback=(120, 50)).columns, 'max_help_position': 30}), description=textwrap.dedent('        BudouX is the successor to Budou,\\n        the machine learning powered line break organizer tool.'), epilog='\\n- '.join(['supported languages of `-l`, `--lang`:', *langs.keys()]))\n    parser.add_argument('text', metavar='TXT', nargs='?', type=str, help='text')\n    parser.add_argument('-H', '--html', action='store_true', help='HTML mode')\n    model_select_group = parser.add_mutually_exclusive_group()\n    model_select_group.add_argument('-m', '--model', metavar='JSON', type=check_file, default=check_lang('ja'), help='custom model file path')\n    model_select_group.add_argument('-l', '--lang', metavar='LANG', type=check_lang, help='language of custom model')\n    parser.add_argument('-s', '--sep', metavar='STR', type=str, default='\\n', help='output phrase separator in TEXT mode')\n    parser.add_argument('-d', '--delim', metavar='STR', type=str, default='---', help='output sentence delimiter in TEXT mode')\n    parser.add_argument('-V', '--version', action='version', version='%(prog)s {}'.format(budoux.__version__))\n    if test is not None:\n        return parser.parse_args(test)\n    else:\n        return parser.parse_args()", "docstring": "Parse commandline arguments.\n\nArgs:\ntest (typing.Optional[typing.List[str]], optional): Commandline args for testing. Defaults to None.\n\nReturns:\nargparse.Namespace: Parsed data of args.", "source": "github-repos"}
{"code": "def VisitTypeDeclUnit(self, node):\n    if not self._star_imports:\n        return node\n    star_import_names = set()\n    p = self._ModulePrefix()\n    for x in self._star_imports:\n        if x.startswith(p):\n            star_import_names.add(x + '.*')\n        star_import_names.add(p + x + '.*')\n    new_aliases = []\n    new_getattrs = set()\n    for module in self._star_imports:\n        aliases, getattrs = self._ImportAll(module)\n        new_aliases.extend(aliases)\n        new_getattrs.update(getattrs)\n    new_aliases = self._DiscardExistingNames(node, new_aliases)\n    new_getattrs = self._DiscardExistingNames(node, new_getattrs)\n    new_aliases = self._HandleDuplicates(new_aliases)\n    if len(new_getattrs) > 1:\n        raise KeyError('Multiple __getattr__ definitions')\n    return node.Replace(functions=node.functions + tuple(new_getattrs), aliases=tuple((a for a in node.aliases if a.name not in star_import_names)) + tuple(new_aliases))", "docstring": "Add star imports to the ast.\n\nArgs:\nnode: A pytd.TypeDeclUnit instance.\n\nReturns:\nThe pytd.TypeDeclUnit instance, with star imports added.\n\nRaises:\nKeyError: If a duplicate member is found during import.", "source": "github-repos"}
{"code": "def threshold(self) -> float:\n    return self._cutoff", "docstring": "Returns the fixed cutoff threshold value.\n\nReturns:\nfloat: The fixed threshold value.", "source": "github-repos"}
{"code": "def check_lang(lang: str) -> Path:\n    if lang in langs:\n        return langs[lang]\n    else:\n        raise argparse.ArgumentTypeError(f\"'{lang}' does not exist in builtin models. (supported languages: {list(langs.keys())})\")", "docstring": "Check if given language exists or not.\n\nArgs:\nlang (str): language code (e.g.: 'ja')\n\nRaises:\nargparse.ArgumentTypeError: Raise if no model for given language exists.\n\nReturns:\nThe model path.", "source": "github-repos"}
{"code": "def inference_q(self, next_action_arr):\n        \n        q_arr = next_action_arr.reshape((next_action_arr.shape[0], -1))\n        self.__q_arr_list.append(q_arr)\n        while len(self.__q_arr_list) > self.__seq_len:\n            self.__q_arr_list = self.__q_arr_list[1:]\n        while len(self.__q_arr_list) < self.__seq_len:\n            self.__q_arr_list.append(self.__q_arr_list[-1])\n\n        q_arr = np.array(self.__q_arr_list)\n        q_arr = q_arr.transpose((1, 0, 2))\n        q_arr = self.__lstm_model.inference(q_arr)\n        return q_arr[:, -1].reshape((q_arr.shape[0], 1))", "docstring": "Infernce Q-Value.\n\nArgs:\nnext_action_arr:     `np.ndarray` of action.\n\nReturns:\n`np.ndarray` of Q-Values.", "source": "juraj-google-style"}
{"code": "def _dataset_load_from_hdx(self, id_or_name):\n    if (not self._load_from_hdx('dataset', id_or_name)):\n        return False\n    self._dataset_create_resources()\n    return True", "docstring": "Loads the dataset given by either id or name from HDX\n\nArgs:\nid_or_name (str): Either id or name of dataset\n\nReturns:\nbool: True if loaded, False if not", "source": "codesearchnet"}
{"code": "def set_axis(self, labels, axis=0, inplace=None):\n        \n        if is_scalar(labels):\n            warnings.warn(\n                'set_axis now takes \"labels\" as first argument, and '\n                '\"axis\" as named parameter. The old form, with \"axis\" as '\n                'first parameter and \"labels\" as second, is still supported '\n                \"but will be deprecated in a future version of pandas.\",\n                FutureWarning,\n                stacklevel=2,\n            )\n            labels, axis = axis, labels\n        if inplace is None:\n            warnings.warn(\n                \"set_axis currently defaults to operating inplace.\\nThis \"\n                \"will change in a future version of pandas, use \"\n                \"inplace=True to avoid this warning.\",\n                FutureWarning,\n                stacklevel=2,\n            )\n            inplace = True\n        if inplace:\n            setattr(self, pandas.DataFrame()._get_axis_name(axis), labels)\n        else:\n            obj = self.copy()\n            obj.set_axis(labels, axis=axis, inplace=True)\n            return obj", "docstring": "Assign desired index to given axis.\n\nArgs:\nlabels (pandas.Index or list-like): The Index to assign.\naxis (string or int): The axis to reassign.\ninplace (bool): Whether to make these modifications inplace.\n\nReturns:\nIf inplace is False, returns a new DataFrame, otherwise None.", "source": "juraj-google-style"}
{"code": "def from_config(cls, config, custom_objects=None):\n    if 'learning_rate' in config:\n        if isinstance(config['learning_rate'], dict):\n            config['learning_rate'] = serialization_lib.deserialize_keras_object(config['learning_rate'], custom_objects=custom_objects)\n    return cls(**config)", "docstring": "Creates an optimizer from its config.\n\nThis method is the reverse of `get_config`, capable of instantiating the\nsame optimizer from the config dictionary.\n\nArgs:\nconfig: A Python dictionary, typically the output of get_config.\ncustom_objects: A Python dictionary mapping names to additional\nuser-defined Python objects needed to recreate this optimizer.\n\nReturns:\nAn optimizer instance.", "source": "github-repos"}
{"code": "def _is_src_field_auto_convertible(src_field, dest_proto_fields_by_name) -> bool:\n    if src_field.name not in dest_proto_fields_by_name:\n        return False\n    dest_field = dest_proto_fields_by_name[src_field.name]\n    if dest_field.label != src_field.label or src_field.type != dest_field.type:\n        return False\n    if _is_map_field(src_field):\n        src_fields_by_name = src_field.message_type.fields_by_name\n        dest_fields_by_name = dest_field.message_type.fields_by_name\n        if not _is_src_field_auto_convertible(src_fields_by_name['key'], dest_fields_by_name) or not _is_src_field_auto_convertible(src_fields_by_name['value'], dest_fields_by_name):\n            return False\n    elif src_field.type == descriptor.FieldDescriptor.TYPE_MESSAGE:\n        if _is_any_field(src_field) and _is_any_field(dest_field):\n            return True\n        if _is_any_field(src_field):\n            return False\n        if _is_any_field(dest_field):\n            return True\n        if src_field.message_type != dest_field.message_type:\n            return False\n    return True", "docstring": "Checks if the src_field can be auto-converted.\n\nThere must be a field in dest_proto with same name and type as the src_field\nto auto convert src_field.\n\nArgs:\nsrc_field: the field to check if it's auto-convertible.\ndest_proto_fields_by_name: field name to field dict for dest_proto.\n\nReturns:\nbool: True if the src_field is auto-convertible.", "source": "github-repos"}
{"code": "def __init__(self, config):\n        \n\n        self.name = config['name']\n        self.config = config\n        self.entities = {}\n        self.files = []\n\n        self.include = listify(self.config.get('include', []))\n        self.exclude = listify(self.config.get('exclude', []))\n\n        if self.include and self.exclude:\n            raise ValueError(\"The 'include' and 'exclude' arguments cannot \"\n                             \"both be set. Please pass at most one of these \"\n                             \"for domain '%s'.\" % self.name)\n\n        self.path_patterns = listify(config.get('default_path_patterns', []))", "docstring": "A set of rules that applies to one or more directories\nwithin a Layout.\n\nArgs:\nname (str): The name of the Domain.\nconfig (dict): The configuration dictionary that defines the\nentities and paths for the current domain.", "source": "juraj-google-style"}
{"code": "def print_probabilities(state: State, ndigits: int=4, file: TextIO=None) -> None:\n    prob = bk.evaluate(state.probabilities())\n    for (index, prob) in np.ndenumerate(prob):\n        prob = round(prob, ndigits)\n        if (prob == 0.0):\n            continue\n        ket = ''.join([str(n) for n in index])\n        print(ket, ':', prob, file=file)", "docstring": "Pretty print state probabilities.\n\nArgs:\nstate:\nndigits: Number of digits of accuracy\nfile: Output stream (Defaults to stdout)", "source": "codesearchnet"}
{"code": "class PerceiverEmbeddingDecoder(nn.Module):\n\n    def __init__(self, config: PerceiverConfig) -> None:\n        super().__init__()\n        self.config = config\n        self.vocab_size = config.vocab_size\n        self.bias = nn.Parameter(torch.zeros(self.vocab_size))\n\n    def forward(self, hidden_states: torch.Tensor, embedding_layer: torch.Tensor) -> torch.Tensor:\n        batch_size, seq_len, d_model = hidden_states.shape\n        output = torch.matmul(hidden_states.reshape([-1, d_model]), embedding_layer.weight.transpose(0, 1))\n        output = output + self.bias\n        return output.reshape([batch_size, seq_len, self.vocab_size])", "docstring": "Module to decode embeddings (for masked language modeling).\n\nArgs:\nconfig ([`PerceiverConfig`]):\nModel configuration.", "source": "github-repos"}
{"code": "def copy_results(self, copy_to_dir, rename_model_to=None, force_rerun=False):\n        \n        \n        if not rename_model_to:\n            rename_model_to = self.model_to_use\n\n        new_model_path = op.join(copy_to_dir, '{}.pdb'.format(rename_model_to))\n\n        if self.structure_path:\n            if ssbio.utils.force_rerun(flag=force_rerun, outfile=new_model_path):\n                \n                custom_clean = CleanPDB()\n                my_pdb = StructureIO(self.structure_path)\n                new_model_path = my_pdb.write_pdb(custom_selection=custom_clean,\n                                                  custom_name=rename_model_to,\n                                                  out_dir=copy_to_dir,\n                                                  force_rerun=force_rerun)\n\n            \n            self.load_structure_path(structure_path=new_model_path, file_type='pdb')\n\n            \n            dest_itasser_dir = op.join(copy_to_dir, '{}_itasser'.format(rename_model_to))\n            if not op.exists(dest_itasser_dir):\n                os.mkdir(dest_itasser_dir)\n\n            for attr in self._attrs_to_copy:\n                old_file_path = getattr(self, attr)\n                new_file_path = op.join(dest_itasser_dir, op.basename(old_file_path))\n                if ssbio.utils.force_rerun(flag=force_rerun, outfile=new_file_path):\n                    shutil.copy2(old_file_path, new_file_path)\n                    log.debug('{}: copied from {}'.format(new_file_path, old_file_path))\n                else:\n                    log.debug('{}: file already exists'.format(new_file_path))\n                setattr(self, attr, new_file_path)", "docstring": "Copy the raw information from I-TASSER modeling to a new folder.\n\nCopies all files in the list _attrs_to_copy.\n\nArgs:\ncopy_to_dir (str): Directory to copy the minimal set of results per sequence.\nrename_model_to (str): New file name (without extension)\nforce_rerun (bool): If existing models and results should be overwritten.", "source": "juraj-google-style"}
{"code": "def get_access_token(tenant_id, application_id, application_secret):\n    context = adal.AuthenticationContext((get_auth_endpoint() + tenant_id), api_version=None)\n    token_response = context.acquire_token_with_client_credentials(get_resource_endpoint(), application_id, application_secret)\n    return token_response.get('accessToken')", "docstring": "get an Azure access token using the adal library.\n\nArgs:\ntenant_id (str): Tenant id of the user's account.\napplication_id (str): Application id of a Service Principal account.\napplication_secret (str): Application secret (password) of the Service Principal account.\n\nReturns:\nAn Azure authentication token string.", "source": "codesearchnet"}
{"code": "def __init__(self, **kwargs):\n        \n        try:\n            self.nap_time = int(os.environ.get('CSU_POLL_INTERVAL', 30))\n        except Exception:\n            self.nap_time = 15\n        self._stack_name = kwargs.get('Stack')\n        self._verbose = kwargs.get('Verbose', False)\n        if not self._stack_name:\n            logging.error('no stack name given, exiting')\n            raise SystemError\n\n        if not self._init_boto3_clients(kwargs.get('Profile'), kwargs.get('Region')):\n            logging.error('client initialization failed, exiting')\n            raise SystemError", "docstring": "The initializer sets up stuff to do the work\n\nArgs:\ndict of args\n\nReturns:\nkwarg[Profile]: asdasdf\n\nRaises:\nSystemError if thing are not all good", "source": "juraj-google-style"}
{"code": "def GetMetadataAttribute(self, attribute_name):\n    table_name = 'metadata'\n    has_table = self._database_file.HasTable(table_name)\n    if (not has_table):\n        return None\n    column_names = ['value']\n    condition = 'name == \"{0:s}\"'.format(attribute_name)\n    values = list(self._database_file.GetValues([table_name], column_names, condition))\n    number_of_values = len(values)\n    if (number_of_values == 0):\n        return None\n    if (number_of_values == 1):\n        return values[0]['value']\n    raise RuntimeError('More than one value found in database.')", "docstring": "Retrieves the metadata attribute.\n\nArgs:\nattribute_name (str): name of the metadata attribute.\n\nReturns:\nstr: the metadata attribute or None.\n\nRaises:\nRuntimeError: if more than one value is found in the database.", "source": "codesearchnet"}
{"code": "def _FormatInode(self, event):\n    \n    inode = event.inode\n    if inode is None:\n      if hasattr(event, 'pathspec') and hasattr(event.pathspec, 'image_inode'):\n        inode = event.pathspec.image_inode\n    if inode is None:\n      inode = '-'\n\n    return inode", "docstring": "Formats the inode.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: inode field.", "source": "juraj-google-style"}
{"code": "def __init__(self, checkpointer_impl, root=None, **kwargs):\n    if root:\n        trackable_root = root() if isinstance(root, weakref.ref) else root\n        kwargs['root'] = trackable_root\n        trackable_root._maybe_initialize_trackable()\n    if checkpointer_impl is None:\n        raise AttributeError('checkpointer_impl cannot be None for AsyncCheckpointHelper.')\n    self._checkpointer_impl = checkpointer_impl\n    self._checkpoint_items = kwargs\n    self._checkpoint = None\n    self.checkpointer()\n    self._checkpoint_options = None\n    self._initialized = False\n    self._original_nodes = None\n    self._object_map = None\n    self._tpu_embedding_objects = None\n    self._saveable_trackables = None\n    self._default_device = device_util.current() or 'CPU:0'\n    self._default_device = device_util.canonicalize(self._default_device)\n    self._save_file_prefix = None\n    self._use_checkpoint_save = False\n    self._async_save_thread = None\n    self._queue = queue.Queue(maxsize=1)\n    atexit.register(self._join_async_save_thread)\n    self._async_error = None\n    global _END_TIME_OF_LAST_ASYNC_WRITE\n    with _END_TIME_OF_LAST_ASYNC_WRITE_LOCK:\n        if _END_TIME_OF_LAST_ASYNC_WRITE is None:\n            _END_TIME_OF_LAST_ASYNC_WRITE = time.time()", "docstring": "Initialize AsyncCheckpoint.\n\nArgs:\ncheckpointer_impl: The Checkpoint class to power the AsyncCheckpoint.\nroot: The root object to checkpoint. `root` may be a trackable object or\n`WeakRef` of a trackable object.\n**kwargs: The keyword arguments representing the checkpointed variables.\n\nRaises:\nAttributeError: when checkpointer_impl is None.", "source": "github-repos"}
{"code": "def has_all_nonzero_section_lengths(neuron, threshold=0.0):\n    bad_ids = [s.id for s in _nf.iter_sections(neuron.neurites) if (section_length(s.points) <= threshold)]\n    return CheckResult((len(bad_ids) == 0), bad_ids)", "docstring": "Check presence of neuron sections with length not above threshold\n\nArguments:\nneuron(Neuron): The neuron object to test\nthreshold(float): value above which a section length is considered\nto be non-zero\n\nReturns:\nCheckResult with result including list of ids of bad sections", "source": "codesearchnet"}
{"code": "def wait_key(keys=None):\n    \n    if is_a_tty():\n        if keys:\n            if not isinstance(keys, tuple):\n                keys = (keys,)\n            while True:\n                key = _getch()\n                if key in keys:\n                    return key\n        else:\n            return _getch()", "docstring": "Waits for a keypress at the console and returns it.\n\"Where's the any key?\"\n\nArguments:\nkeys - if passed, wait for this specific key, e.g. ESC.\nmay be a tuple.\nReturns:\nchar or ESC - depending on key hit.\nNone - immediately under i/o redirection, not an interactive tty.", "source": "juraj-google-style"}
{"code": "def is_in_path(program):\n\t\n\n\tif sys.version_info.major == 2:\n\t\tpath = os.getenv('PATH')\n\t\tif os.name == 'nt':\n\t\t\tpath = path.split(';')\n\t\telse:\n\t\t\tpath = path.split(':')\n\telse:\n\t\tpath = os.get_exec_path()\n\n\tfor i in path:\n\t\tif os.path.isdir(i):\n\t\t\tif program in os.listdir(i):\n\t\t\t\treturn True", "docstring": "Check if a program is in the system ``PATH``.\n\nChecks if a given program is in the user's ``PATH`` or not.\n\nArgs:\nprogram (str): The program to try to find in ``PATH``.\n\nReturns:\nbool: Is the program in ``PATH``?", "source": "juraj-google-style"}
{"code": "def generate_parsers(config, paths):\n    \n    output = \n    \n    output += inspect.getsource(conf_reader._get_source) + \"\\n\\n\"\n    output += inspect.getsource(utils._get_encoding) + \"\\n\\n\"\n    output += inspect.getsource(utils.handle_encodnig) + \"\\n\\n\"\n    output += inspect.getsource(utils.is_equal_tag) + \"\\n\\n\"\n    output += inspect.getsource(utils.has_neigh) + \"\\n\\n\"\n    output += \"\n\n    for name, path in paths.items():\n        path = path[0]  \n\n        required = config[0][\"vars\"][name].get(\"required\", False)\n        notfoundmsg = config[0][\"vars\"][name].get(\"notfoundmsg\", \"\")\n\n        output += _generate_parser(name, path, required, notfoundmsg)\n\n    output += \"\n    output += _unittest_template(config)\n\n    output += \"\n    output += \"if __name__ == '__main__':\\n\"\n    output += IND + \"test_parsers()\"\n\n    return output", "docstring": "Generate parser for all `paths`.\n\nArgs:\nconfig (dict): Original configuration dictionary used to get matches\nfor unittests. See\n:mod:`~harvester.autoparser.conf_reader` for details.\npaths (dict): Output from :func:`.select_best_paths`.\n\nReturns:\nstr: Python code containing all parsers for `paths`.", "source": "juraj-google-style"}
{"code": "def send_cmd(cmd, args, ret):\n    from dvc.daemon import daemon\n    if (not Analytics._is_enabled(cmd)):\n        return\n    analytics = Analytics()\n    analytics.collect_cmd(args, ret)\n    daemon(['analytics', analytics.dump()])", "docstring": "Collect and send analytics for CLI command.\n\nArgs:\nargs (list): parsed args for the CLI command.\nret (int): return value of the CLI command.", "source": "codesearchnet"}
{"code": "def copy(self, destination):\n    destination_uri = self.repo.parse_uri(destination)\n    response = self.repo.api.http_request('COPY', self.uri, data=None, headers={'Destination': destination_uri.toPython()})\n    if (response.status_code == 201):\n        return destination_uri\n    else:\n        raise Exception(('HTTP %s, could not move resource %s to %s' % (response.status_code, self.uri, destination_uri)))", "docstring": "Method to copy resource to another location\n\nArgs:\ndestination (rdflib.term.URIRef, str): URI location to move resource\n\nReturns:\n(Resource) new, moved instance of resource", "source": "codesearchnet"}
{"code": "def display(self, *amplExpressions):\n    exprs = list(map(str, amplExpressions))\n    lock_and_call((lambda : self._impl.displayLst(exprs, len(exprs))), self._lock)", "docstring": "Writes on the current OutputHandler the outcome of the AMPL statement.\n\n.. code-block:: ampl\n\ndisplay e1, e2, .., en;\n\nwhere e1, ..., en are the strings passed to the procedure.\n\nArgs:\namplExpressions: Expressions to be evaluated.", "source": "codesearchnet"}
{"code": "def execute_script(self, script, *args):\n    return self._execute(Command.EXECUTE_SCRIPT, {'script': script, 'args': list(args)})", "docstring": "Execute JavaScript Synchronously in current context.\n\nSupport:\nWeb(WebView)\n\nArgs:\nscript: The JavaScript to execute.\n*args: Arguments for your JavaScript.\n\nReturns:\nReturns the return value of the function.", "source": "codesearchnet"}
{"code": "def get_field(self, field, default=None):\n    \n\n    metadata = self._op.get('metadata')\n\n    value = None\n    if field == 'internal-id':\n      value = self._op['name']\n    elif field == 'job-id':\n      value = metadata['labels'].get('job-id')\n    elif field == 'job-name':\n      value = metadata['labels'].get('job-name')\n    elif field == 'task-id':\n      value = metadata['labels'].get('task-id')\n    elif field == 'task-attempt':\n      value = metadata['labels'].get('task-attempt')\n    elif field == 'user-id':\n      value = metadata['labels'].get('user-id')\n    elif field == 'dsub-version':\n      value = metadata['labels'].get('dsub-version')\n    elif field == 'task-status':\n      value = self._operation_status()\n    elif field == 'logging':\n      value = metadata['request']['pipelineArgs']['logging']['gcsPath']\n    elif field == 'envs':\n      value = self._get_operation_input_field_values(metadata, False)\n    elif field == 'labels':\n      \n      value = {\n          k: v\n          for k, v in metadata['labels'].items()\n          if k not in job_model.RESERVED_LABELS\n      }\n    elif field == 'inputs':\n      value = self._get_operation_input_field_values(metadata, True)\n    elif field == 'outputs':\n      value = self._get_operation_output_field_values(metadata)\n    elif field == 'mounts':\n      value = None\n    elif field == 'create-time':\n      value = google_base.parse_rfc3339_utc_string(metadata['createTime'])\n    elif field == 'start-time':\n      \n      start_events = [\n          e for e in metadata.get('events', []) if e['description'] == 'start'\n      ]\n      \n      if start_events:\n        value = google_base.parse_rfc3339_utc_string(\n            start_events[-1]['startTime'])\n    elif field == 'end-time':\n      if 'endTime' in metadata:\n        value = google_base.parse_rfc3339_utc_string(metadata['endTime'])\n    elif field == 'status':\n      value = self._operation_status()\n    elif field in ['status-message', 'status-detail']:\n      status, last_update = self._operation_status_message()\n      value = status\n    elif field == 'last-update':\n      status, last_update = self._operation_status_message()\n      value = last_update\n    elif field == 'provider':\n      return _PROVIDER_NAME\n    elif field == 'provider-attributes':\n      \n      \n      gce_data = metadata.get('runtimeMetadata', {}).get('computeEngine', {})\n      if 'machineType' in gce_data:\n        machine_type = gce_data.get('machineType').rpartition('/')[2]\n      else:\n        machine_type = None\n      instance_name = gce_data.get('instanceName')\n      instance_zone = gce_data.get('zone')\n      value = {\n          'machine-type': machine_type,\n          'instance-name': instance_name,\n          'zone': instance_zone,\n      }\n    elif field == 'events':\n      events = metadata.get('events', [])\n      value = []\n      for event in events:\n        event_value = {\n            'name':\n                event.get('description', ''),\n            'start-time':\n                google_base.parse_rfc3339_utc_string(event['startTime'])\n        }\n        if 'endTime' in event:\n          event_value['end-time'] = google_base.parse_rfc3339_utc_string(\n              event['endTime'])\n\n        value.append(event_value)\n    elif field in [\n        'user-project', 'script-name', 'script', 'input-recursives',\n        'output-recursives'\n    ]:\n      \n      value = None\n\n    else:\n      raise ValueError('Unsupported field: \"%s\"' % field)\n\n    return value if value else default", "docstring": "Returns a value from the operation for a specific set of field names.\n\nArgs:\nfield: a dsub-specific job metadata key\ndefault: default value to return if field does not exist or is empty.\n\nReturns:\nA text string for the field or a list for 'inputs'.\n\nRaises:\nValueError: if the field label is not supported by the operation", "source": "juraj-google-style"}
{"code": "def get_graph(self, item_ids, language=None):\n\n    def _related(item_ids):\n        if (item_ids is None):\n            items = Item.objects.filter(active=True).prefetch_related('parents', 'children')\n        else:\n            item_ids = [ii for iis in item_ids.values() for ii in iis]\n            items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('parents', 'children')\n        return {item.id: sorted([_item.id for rel in [item.parents.all(), item.children.all()] for _item in rel]) for item in items}\n    if (item_ids is None):\n        return self._reachable_graph(None, _related, language=language)\n    else:\n        graph = self.get_graph(None, language)\n        return self._subset_graph(graph, item_ids)", "docstring": "Get a subgraph of items reachable from the given set of items through\nany relation.\n\nArgs:\nitem_ids (list): items which are taken as roots for the reachability\nlanguage (str): if specified, filter out items which are not\navailable in the given language\n\nReturns:\ndict: item id -> list of items (parent items), root items are\nreferenced by None key", "source": "codesearchnet"}
{"code": "def iter_predict_proba(self, X, include_init=False):\n    utils.validation.check_is_fitted(self, 'init_estimator_')\n    X = utils.check_array(X, accept_sparse=['csr', 'csc'], dtype=None, force_all_finite=False)\n    probas = np.empty(shape=(len(X), len(self.classes_)), dtype=np.float64)\n    for y_pred in super().iter_predict(X, include_init=include_init):\n        if (len(self.classes_) == 2):\n            probas[(:, 1)] = sigmoid(y_pred[(:, 0)])\n            probas[(:, 0)] = (1.0 - probas[(:, 1)])\n        else:\n            probas[:] = softmax(y_pred)\n        (yield probas)", "docstring": "Returns the predicted probabilities for ``X`` at every stage of the boosting procedure.\n\nArguments:\nX (array-like or sparse matrix of shape (n_samples, n_features)): The input samples.\nSparse matrices are accepted only if they are supported by the weak model.\ninclude_init (bool, default=False): If ``True`` then the prediction from\n``init_estimator`` will also be returned.\n\nReturns:\niterator of arrays of shape (n_samples, n_classes) containing the predicted\nprobabilities at each stage", "source": "codesearchnet"}
{"code": "def _parse_doc(doc):\n    \n    lines = doc.split(\"\\n\")\n    descriptions = list(itertools.takewhile(_checker(_KEYWORDS), lines))\n\n    if len(descriptions) < 3:\n        description = lines[0]\n    else:\n        description = \"{0}\\n\\n{1}\".format(\n            lines[0], textwrap.dedent(\"\\n\".join(descriptions[2:])))\n\n    args = list(itertools.takewhile(\n        _checker(_KEYWORDS_OTHERS),\n        itertools.dropwhile(_checker(_KEYWORDS_ARGS), lines)))\n    argmap = {}\n    if len(args) > 1:\n        for pair in args[1:]:\n            kv = [v.strip() for v in pair.split(\":\")]\n            if len(kv) >= 2:\n                argmap[kv[0]] = \":\".join(kv[1:])\n\n    return dict(headline=descriptions[0], description=description, args=argmap)", "docstring": "Parse a docstring.\n\nParse a docstring and extract three components; headline, description,\nand map of arguments to help texts.\n\nArgs:\ndoc: docstring.\n\nReturns:\na dictionary.", "source": "juraj-google-style"}
{"code": "def __init__(self, filter_string=None, context=None):\n    \n    self._client = _utils.make_client(context)\n    self._filter_string = filter_string\n    self._descriptors = None", "docstring": "Initializes the ResourceDescriptors based on the specified filters.\n\nArgs:\nfilter_string: An optional filter expression describing the resource\ndescriptors to be returned.\ncontext: An optional Context object to use instead of the global default.", "source": "juraj-google-style"}
{"code": "def add_observer(self, callback):\n    if (callback in self._observers):\n        raise ValueError('{} is already an observer of {}'.format(callback, self))\n    self._observers.append(callback)", "docstring": "Add an observer to this event.\n\nArgs:\ncallback: A function or coroutine callback to call when the event\nis fired.\n\nRaises:\nValueError: If the callback has already been added.", "source": "codesearchnet"}
{"code": "def calc_intent(self, query):\n    matches = self.calc_intents(query)\n    if (len(matches) == 0):\n        return MatchData('', '')\n    best_match = max(matches, key=(lambda x: x.conf))\n    best_matches = (match for match in matches if (match.conf == best_match.conf))\n    return min(best_matches, key=(lambda x: sum(map(len, x.matches.values()))))", "docstring": "Tests all the intents against the query and returns\nmatch data of the best intent\n\nArgs:\nquery (str): Input sentence to test against intents\nReturns:\nMatchData: Best intent match", "source": "codesearchnet"}
{"code": "def _DeserializeResponse(self, payload):\n    (status_line, payload) = payload.split('\\n', 1)\n    (_, status, _) = status_line.split(' ', 2)\n    parser = email_parser.Parser()\n    msg = parser.parsestr(payload)\n    info = dict(msg)\n    info['status'] = status\n    content = msg.get_payload()\n    return http_wrapper.Response(info, content, self.__batch_url)", "docstring": "Convert string into Response and content.\n\nArgs:\npayload: Header and body string to be deserialized.\n\nReturns:\nA Response object", "source": "codesearchnet"}
{"code": "def compiler_ir_generator(stage='hlo', device_name=None, platform_name=None):\n    if device_name is not None:\n        if platform_name is not None:\n            raise ValueError('device_name and platform_name cannot be provided at the same time.')\n        warnings.warn('device_name is being deprecated. Use platform_name.')\n    device_name = compiler_ir.maybe_get_device_name(device_name)\n    res_bytes = context.context().get_compiler_ir(device_name=device_name, platform_name=platform_name, function_name=fn_name, flat_args=list(filtered_flat_args), captured_inputs=concrete_fn.captured_inputs, stage=stage)\n    if stage in ('stablehlo_serialized', 'hlo_serialized', 'optimized_hlo_serialized', 'optimized_hlo_proto_serialized'):\n        return res_bytes\n    else:\n        return res_bytes.decode('utf-8')", "docstring": "Gets the compiler IR bytes.\n\nArgs:\nstage: The exported stage for the given function.\ndevice_name: The name of the device with the form as\n\"/job:localhost/replica:0/task:0/device:CPU:0\", \"/device:TPU:0\" etc.\nWhen this is used, actual device is used for getting the compiler IR.\nplatform_name: The name of the platform, e.g. \"TPU\". See the comment in\n`get_compiler_ir` in `context.py`.\n\nReturns:\nThe compiler IR bytes.", "source": "github-repos"}
{"code": "def _get_bounding_box(self, box: 'torch.Tensor') -> Dict[str, int]:\n    if self.framework != 'pt':\n        raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.')\n    xmin, ymin, xmax, ymax = box.int().tolist()\n    bbox = {'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax}\n    return bbox", "docstring": "Turns list [xmin, xmax, ymin, ymax] into dict { \"xmin\": xmin, ... }\n\nArgs:\nbox (`torch.Tensor`): Tensor containing the coordinates in corners format.\n\nReturns:\nbbox (`Dict[str, int]`): Dict containing the coordinates in corners format.", "source": "github-repos"}
{"code": "def dot(\n        self, coords_a: Vector3Like, coords_b: Vector3Like, frac_coords: bool = False\n    ) -> np.ndarray:\n        \n        coords_a, coords_b = (\n            np.reshape(coords_a, (-1, 3)),\n            np.reshape(coords_b, (-1, 3)),\n        )\n\n        if len(coords_a) != len(coords_b):\n            raise ValueError(\"\")\n\n        if np.iscomplexobj(coords_a) or np.iscomplexobj(coords_b):\n            raise TypeError(\"Complex array!\")\n\n        if not frac_coords:\n            cart_a, cart_b = coords_a, coords_b\n        else:\n            cart_a = np.reshape(\n                [self.get_cartesian_coords(vec) for vec in coords_a], (-1, 3)\n            )\n            cart_b = np.reshape(\n                [self.get_cartesian_coords(vec) for vec in coords_b], (-1, 3)\n            )\n\n        return np.array([dot(a, b) for a, b in zip(cart_a, cart_b)])", "docstring": "Compute the scalar product of vector(s).\n\nArgs:\ncoords_a, coords_b: Array-like objects with the coordinates.\nfrac_coords (bool): Boolean stating whether the vector\ncorresponds to fractional or cartesian coordinates.\n\nReturns:\none-dimensional `numpy` array.", "source": "juraj-google-style"}
{"code": "def reset(self):\n    fetches = []\n    for processor in self.preprocessors:\n        fetches.extend((processor.reset() or []))\n    return fetches", "docstring": "Calls `reset` on all our Preprocessor objects.\n\nReturns:\nA list of tensors to be fetched.", "source": "codesearchnet"}
{"code": "def run(self, dag):\n        \n        num_dag_qubits = sum([qreg.size for qreg in dag.qregs.values()])\n        if num_dag_qubits > self.coupling_map.size():\n            raise TranspilerError('Number of qubits greater than device.')\n        self.property_set['layout'] = Layout.generate_trivial_layout(*dag.qregs.values())", "docstring": "Pick a layout by assigning n circuit qubits to device qubits 0, .., n-1.\n\nArgs:\ndag (DAGCircuit): DAG to find layout for.\n\nRaises:\nTranspilerError: if dag wider than self.coupling_map", "source": "juraj-google-style"}
{"code": "def __init__(self, enum_class):\n    \n    \n    \n    import enum\n\n    if not issubclass(enum_class, enum.Enum):\n      raise TypeError('{} is not a subclass of Enum.'.format(enum_class))\n    if not enum_class.__members__:\n      raise ValueError('enum_class cannot be empty, but \"{}\" is empty.'\n                       .format(enum_class))\n\n    super(EnumClassParser, self).__init__()\n    self.enum_class = enum_class", "docstring": "Initializes EnumParser.\n\nArgs:\nenum_class: class, the Enum class with all possible flag values.\n\nRaises:\nTypeError: When enum_class is not a subclass of Enum.\nValueError: When enum_class is empty.", "source": "juraj-google-style"}
{"code": "def http_exception(channel, title):\n    gui = ui_embed.UI(channel, 'Too much help', '{} is too helpful! Try trimming some of the help messages.'.format(title), modulename=modulename)\n    return gui", "docstring": "Creates an embed UI containing the 'too long' error message\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\ntitle (str): The title of the embed\n\nReturns:\nui (ui_embed.UI): The embed UI object", "source": "codesearchnet"}
{"code": "def delete_attachment(cls, session, attachment):\n        \n        return super(Conversations, cls).delete(\n            session,\n            attachment,\n            endpoint_override='/attachments/%s.json' % attachment.id,\n            out_type=Attachment,\n        )", "docstring": "Delete an attachment.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nattachment (helpscout.models.Attachment): The attachment to\nbe deleted.\n\nReturns:\nNoneType: Nothing.", "source": "juraj-google-style"}
{"code": "def convert_ini(config_dict):\n    config_lines = []\n    for (env, configs) in sorted(config_dict.items()):\n        for (resource, app_properties) in sorted(configs.items()):\n            try:\n                for (app_property, value) in sorted(app_properties.items()):\n                    variable = '{env}_{resource}_{app_property}'.format(env=env, resource=resource, app_property=app_property).upper()\n                    if isinstance(value, (dict, DeepChainMap)):\n                        safe_value = \"'{0}'\".format(json.dumps(dict(value)))\n                    else:\n                        safe_value = json.dumps(value)\n                    line = '{variable}={value}'.format(variable=variable, value=safe_value)\n                    LOG.debug('INI line: %s', line)\n                    config_lines.append(line)\n            except AttributeError:\n                resource = resource.upper()\n                app_properties = \"'{}'\".format(json.dumps(app_properties))\n                line = '{0}={1}'.format(resource, app_properties)\n                LOG.debug('INI line: %s', line)\n                config_lines.append(line)\n    return config_lines", "docstring": "Convert _config_dict_ into a list of INI formatted strings.\n\nArgs:\nconfig_dict (dict): Configuration dictionary to be flattened.\n\nReturns:\n(list) Lines to be written to a file in the format of KEY1_KEY2=value.", "source": "codesearchnet"}
{"code": "def _det_large_enough_mask(x, det_bounds):\n    return tf.cast((tf.linalg.det(x) > det_bounds), dtype=x.dtype)", "docstring": "Returns whether the input matches the given determinant limit.\n\nArgs:\nx: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.\ndet_bounds: A floating-point `Tensor` that must broadcast to shape\n`[B1, ..., Bn]`, giving the desired lower bound on the\ndeterminants in `x`.\n\nReturns:\nmask: A floating-point `Tensor` of shape [B1, ..., Bn].  Each\nscalar is 1 if the corresponding matrix had determinant above\nthe corresponding bound, otherwise 0.", "source": "codesearchnet"}
{"code": "def delete(self, uri):\n    try:\n        self.connect(uri, method='DELETE')\n        return True\n    except urllib.error.HTTPError:\n        return False", "docstring": "Method deletes a Fedora Object in the repository\n\nArgs:\nuri(str): URI of Fedora Object", "source": "codesearchnet"}
{"code": "def get_latest_score_for_submission(submission_uuid, read_replica=False):\n    \n    try:\n        \n        submission_model = _get_submission_model(submission_uuid, read_replica)\n        score_qs = Score.objects.filter(\n            submission__uuid=submission_model.uuid\n        ).order_by(\"-id\").select_related(\"submission\")\n\n        if read_replica:\n            score_qs = _use_read_replica(score_qs)\n\n        score = score_qs[0]\n        if score.is_hidden():\n            return None\n    except (IndexError, Submission.DoesNotExist):\n        return None\n\n    return ScoreSerializer(score).data", "docstring": "Retrieve the latest score for a particular submission.\n\nArgs:\nsubmission_uuid (str): The UUID of the submission to retrieve.\n\nKwargs:\nread_replica (bool): If true, attempt to use the read replica database.\nIf no read replica is available, use the default database.\n\nReturns:\ndict: The serialized score model, or None if no score is available.", "source": "juraj-google-style"}
{"code": "def add_embedded_campaign(self, id, collection, campaign, confidence, analyst, date, description):\n    if (type(id) is not ObjectId):\n        id = ObjectId(id)\n    obj = getattr(self.db, collection)\n    result = obj.find({'_id': id, 'campaign.name': campaign})\n    if (result.count() > 0):\n        return\n    else:\n        log.debug('Adding campaign to set: {}'.format(campaign))\n        campaign_obj = {'analyst': analyst, 'confidence': confidence, 'date': date, 'description': description, 'name': campaign}\n        result = obj.update({'_id': id}, {'$push': {'campaign': campaign_obj}})\n        return result", "docstring": "Adds an embedded campaign to the TLO.\n\nArgs:\nid: the CRITs object id of the TLO\ncollection: The db collection. See main class documentation.\ncampaign: The campaign to assign.\nconfidence: The campaign confidence\nanalyst: The analyst making the assignment\ndate: The date of the assignment\ndescription: A description\nReturns:\nThe resulting mongo object", "source": "codesearchnet"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    result = [1] + [0] * len(token_ids_0) + [1]\n    if token_ids_1 is not None:\n        result += [0] * len(token_ids_1) + [1]\n    return result", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def allele_clusters(dists, t=0.025):\n    clusters = fcluster(linkage(dists), 0.025, criterion='distance')\n    cluster_idx = defaultdict(list)\n    for (idx, cl) in enumerate(clusters):\n        cluster_idx[cl].append(idx)\n    return cluster_idx", "docstring": "Flat clusters from distance matrix\n\nArgs:\ndists (numpy.array): pdist distance matrix\nt (float): fcluster (tree cutting) distance threshold\n\nReturns:\ndict of lists: cluster number to list of indices of distances in cluster", "source": "codesearchnet"}
{"code": "def count_params(self):\n    if not self.built:\n        if getattr(self, '_is_graph_network', False):\n            with tf_utils.maybe_init_scope(self):\n                self._maybe_build(self.inputs)\n        else:\n            raise ValueError('You tried to call `count_params` on ' + self.name + \", but the layer isn't built. You can build it manually via: `\" + self.name + '.build(batch_input_shape)`.')\n    return layer_utils.count_params(self.weights)", "docstring": "Count the total number of scalars composing the weights.\n\nReturns:\nAn integer count.\n\nRaises:\nValueError: if the layer isn't yet built\n(in which case its weights aren't yet defined).", "source": "github-repos"}
{"code": "def set_target(self, target: EventDispatcherBase) -> None:\n        \n        if self._target is not None:\n            raise PermissionError(\"The target property already has a valid value.\")\n\n        if not isinstance(target, EventDispatcherBase):\n            raise TypeError(\"Invalid target type: {}\".format(target))\n\n        self._target = target", "docstring": "This method should be called by the event dispatcher that dispatches this event\nto set its target property.\n\nArgs:\ntarget (EventDispatcherBase): The event dispatcher that will dispatch this event.\n\nRaises:\nPermissionError: If the target property of the event has already been set.\nTypeError: If `target` is not an `EventDispatcherBase` instance.", "source": "juraj-google-style"}
{"code": "def shapeplot(h, ax, sections=None, order='pre', cvals=None, clim=None, cmap=cm.YlOrBr_r, legend=True, **kwargs):\n    if (sections is None):\n        if (order == 'pre'):\n            sections = allsec_preorder(h)\n        else:\n            sections = list(h.allsec())\n    if ((cvals is not None) and (clim is None)):\n        clim = [np.nanmin(cvals), np.nanmax(cvals)]\n    lines = []\n    i = 0\n    allDiams = []\n    for sec in sections:\n        allDiams.append(get_section_diams(h, sec))\n    for (isec, sec) in enumerate(sections):\n        xyz = get_section_path(h, sec)\n        seg_paths = interpolate_jagged(xyz, sec.nseg)\n        diams = allDiams[isec]\n        linewidths = diams\n        for (j, path) in enumerate(seg_paths):\n            (line,) = plt.plot(path[(:, 0)], path[(:, 1)], path[(:, 2)], '-k', **kwargs)\n            try:\n                line.set_linewidth(linewidths[j])\n            except:\n                pass\n            if (cvals is not None):\n                if isinstance(cvals[i], numbers.Number):\n                    try:\n                        col = cmap(int((((cvals[i] - clim[0]) * 255) / (clim[1] - clim[0]))))\n                    except:\n                        col = cmap(0)\n                else:\n                    col = cvals[i]\n                line.set_color(col)\n            lines.append(line)\n            i += 1\n    return lines", "docstring": "Plots a 3D shapeplot\n\nArgs:\nh = hocObject to interface with neuron\nax = matplotlib axis for plotting\nsections = list of h.Section() objects to be plotted\norder = { None= use h.allsec() to get sections\n'pre'= pre-order traversal of morphology }\ncvals = list/array with values mapped to color by cmap; useful\nfor displaying voltage, calcium or some other state\nvariable across the shapeplot.\n**kwargs passes on to matplotlib (e.g. color='r' for red lines)\n\nReturns:\nlines = list of line objects making up shapeplot", "source": "codesearchnet"}
{"code": "def setDocumentedBy(self, documented_pid, documenting_pid):\n        \n        self._check_initialized()\n        documented_id = self.getObjectByPid(documented_pid)\n        documenting_id = self.getObjectByPid(documenting_pid)\n        self.add((documented_id, CITO.isDocumentedBy, documenting_id))", "docstring": "Add a CiTO, the Citation Typing Ontology, triple asserting that\n``documented_pid`` isDocumentedBy ``documenting_pid``.\n\nAdds assertion: ``documented_pid cito:isDocumentedBy documenting_pid``\n\nArgs:\ndocumented_pid: str\nPID of a Science Object that is documented by ``documenting_pid``.\n\ndocumenting_pid: str\nPID of a Science Object that documents ``documented_pid``.", "source": "juraj-google-style"}
{"code": "def screenshot(self, filename=None, scale=1.0, method=None):\n    image = None\n    method = (method or self._screenshot_method)\n    if (method == 'minicap'):\n        try:\n            image = self._adb_minicap(scale)\n        except Exception as e:\n            logger.warn('use minicap failed, fallback to screencap. error detail: %s', e)\n            self._screenshot_method = 'screencap'\n            return self.screenshot(filename=filename, scale=scale)\n    elif (method == 'screencap'):\n        image = self._adb_screencap(scale)\n    else:\n        raise RuntimeError(('No such method(%s)' % method))\n    if filename:\n        image.save(filename)\n    return image", "docstring": "Take device screenshot\n\nArgs:\n- filename(string): optional, save int filename\n- scale(float): scale size\n- method(string): one of minicap,screencap\n\nReturn:\nPIL.Image", "source": "codesearchnet"}
{"code": "def validate_word(self, word):\n        \n        while word:\n            match = self.seg_regex.match(word)\n            if match:\n                word = word[len(match.group(0)):]\n            else:\n                \n                return False\n        return True", "docstring": "Returns True if `word` consists exhaustively of valid IPA segments\n\nArgs:\nword (unicode): input word as Unicode IPA string\n\nReturns:\nbool: True if `word` can be divided exhaustively into IPA segments\nthat exist in the database", "source": "juraj-google-style"}
{"code": "def to_query(self, fields=None):\n    from . import _query\n    if (fields is None):\n        fields = '*'\n    elif isinstance(fields, list):\n        fields = ','.join(fields)\n    return _query.Query(('SELECT %s FROM %s' % (fields, self._repr_sql_())), context=self._context)", "docstring": "Return a Query for this Table.\n\nArgs:\nfields: the fields to return. If None, all fields will be returned. This can be a string\nwhich will be injected into the Query after SELECT, or a list of field names.\n\nReturns:\nA Query object that will return the specified fields from the records in the Table.", "source": "codesearchnet"}
{"code": "def make_val_and_grad_fn(value_fn):\n\n    @functools.wraps(value_fn)\n    def val_and_grad(x):\n        return value_and_gradient(value_fn, x)\n    return val_and_grad", "docstring": "Function decorator to compute both function value and gradient.\n\nFor example:\n\n```\n@tff.math.make_val_and_grad_fn\ndef quadratic(x):\nreturn tf.reduce_sum(scales * (x - minimum) ** 2, axis=-1)\n```\n\nTurns `quadratic` into a function that accepts a point as a `Tensor` as input\nand returns a tuple of two `Tensor`s with the value and the gradient of the\ndefined quadratic function evaluated at the input point.\n\nThis is useful for constructing functions to optimize with tff.math.optimizer\nmethods.\n\nArgs:\nvalue_fn: A python function to decorate.\n\nReturns:\nThe decorated function.", "source": "github-repos"}
{"code": "def _get_client(self):\n    return (_oss.StsAuth if ('security_token' in self._storage_parameters) else (_oss.Auth if self._storage_parameters else _oss.AnonymousAuth))(**self._storage_parameters)", "docstring": "OSS2 Auth client\n\nReturns:\noss2.Auth or oss2.StsAuth: client", "source": "codesearchnet"}
{"code": "def symbolic_tensor_id(self, graph_id, op_name, output_slot):\n    return self._graph_by_id[graph_id].get_tensor_id(op_name, output_slot)", "docstring": "Get the ID of a symbolic tensor.\n\nArgs:\ngraph_id: The ID of the immediately-enclosing graph.\nop_name: Name of the op.\noutput_slot: Output slot as an int.\n\nReturns:\nThe ID of the symbolic tensor as an int.", "source": "github-repos"}
{"code": "def get_num_days_required(offset, period='d', perc_required=0.90):\n    \n    x = pd.to_datetime('2010-01-01')\n    delta = x - (x - offset)\n    \n    days = delta.days * 0.69\n\n    if period == 'd':\n        req = days * perc_required\n    elif period == 'm':\n        req = (days / 20) * perc_required\n    elif period == 'y':\n        req = (days / 252) * perc_required\n    else:\n        raise NotImplementedError(\n            'period not supported. Supported periods are d, m, y')\n\n    return req", "docstring": "Estimates the number of days required to assume that data is OK.\n\nHelper function used to determine if there are enough \"good\" data\ndays over a given period.\n\nArgs:\n* offset (DateOffset): Offset (lookback) period.\n* period (str): Period string.\n* perc_required (float): percentage of number of days\nexpected required.", "source": "juraj-google-style"}
{"code": "def write_fasta_file(self, outfile, force_rerun=False):\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n        SeqIO.write(self, outfile, 'fasta')\n    self.sequence_path = outfile", "docstring": "Write a FASTA file for the protein sequence, ``seq`` will now load directly from this file.\n\nArgs:\noutfile (str): Path to new FASTA file to be written to\nforce_rerun (bool): If an existing file should be overwritten", "source": "codesearchnet"}
{"code": "def get_telex_definition(w_shorthand=True, brackets_shorthand=True):\n    \n    telex = {\n        \"a\": \"a^\",\n        \"o\": \"o^\",\n        \"e\": \"e^\",\n        \"w\": [\"u*\", \"o*\", \"a+\"],\n        \"d\": \"d-\",\n        \"f\": \"\\\\\",\n        \"s\": \"/\",\n        \"r\": \"?\",\n        \"x\": \"~\",\n        \"j\": \".\",\n    }\n\n    if w_shorthand:\n        telex[\"w\"].append('<ư')\n\n    if brackets_shorthand:\n        telex.update({\n            \"]\": \"<ư\",\n            \"[\": \"<ơ\",\n            \"}\": \"<Ư\",\n            \"{\": \"<Ơ\"\n        })\n\n    return telex", "docstring": "Create a definition dictionary for the TELEX input method\n\nArgs:\nw_shorthand (optional): allow a stand-alone w to be\ninterpreted as an ư. Default to True.\nbrackets_shorthand (optional, True): allow typing ][ as\nshorthand for ươ. Default to True.\n\nReturns a dictionary to be passed into process_key().", "source": "juraj-google-style"}
{"code": "def supported_device(self, index=0):\n        \n        if not util.is_natural(index) or index >= self.num_supported_devices():\n            raise ValueError('Invalid index.')\n\n        info = structs.JLinkDeviceInfo()\n\n        result = self._dll.JLINKARM_DEVICE_GetInfo(index, ctypes.byref(info))\n        return info", "docstring": "Gets the device at the given ``index``.\n\nArgs:\nself (JLink): the ``JLink`` instance\nindex (int): the index of the device whose information to get\n\nReturns:\nA ``JLinkDeviceInfo`` describing the requested device.\n\nRaises:\nValueError: if index is less than 0 or >= supported device count.", "source": "juraj-google-style"}
{"code": "def parse_example_tensor(examples, train_config, keep_target):\n    csv_header = []\n    if keep_target:\n        csv_header = train_config['csv_header']\n    else:\n        csv_header = [name for name in train_config['csv_header'] if (name != train_config['target_column'])]\n    record_defaults = [[train_config['csv_defaults'][name]] for name in csv_header]\n    tensors = tf.decode_csv(examples, record_defaults, name='csv_to_tensors')\n    tensors = [tf.expand_dims(x, axis=1) for x in tensors]\n    tensor_dict = dict(zip(csv_header, tensors))\n    return tensor_dict", "docstring": "Read the csv files.\n\nArgs:\nexamples: string tensor\ntrain_config: training config\nkeep_target: if true, the target column is expected to exist and it is\nreturned in the features dict.\n\nReturns:\nDict of feature_name to tensor. Target feature is in the dict.", "source": "codesearchnet"}
{"code": "def simplify_countryname(cls, country):\n        \n        \n        countryupper = country.upper()\n        words = get_words_in_sentence(countryupper)\n        index = countryupper.find(',')\n        if index != -1:\n            countryupper = countryupper[:index]\n        index = countryupper.find(':')\n        if index != -1:\n            countryupper = countryupper[:index]\n        regex = re.compile('\\(.+?\\)')\n        countryupper = regex.sub('', countryupper)\n        remove = copy.deepcopy(cls.simplifications)\n        for simplification1, simplification2 in cls.abbreviations.items():\n            countryupper = countryupper.replace(simplification1, '')\n            remove.append(simplification2)\n        for simplification1, simplifications in cls.multiple_abbreviations.items():\n            countryupper = countryupper.replace(simplification1, '')\n            for simplification2 in simplifications:\n                remove.append(simplification2)\n        remove = '|'.join(remove)\n        regex = re.compile(r'\\b(' + remove + r')\\b', flags=re.IGNORECASE)\n        countryupper = regex.sub('', countryupper)\n        countryupper = countryupper.strip()\n        countryupper_words = get_words_in_sentence(countryupper)\n        if len(countryupper_words) > 1:\n            countryupper = countryupper_words[0]\n        if countryupper:\n            words.remove(countryupper)\n        return countryupper, words", "docstring": "Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.\n\nArgs:\ncountry (str): Country name to simplify\n\nReturns:\nTuple[str, List[str]]: Uppercase simplified country name and list of removed words", "source": "juraj-google-style"}
{"code": "def assign_add(self, variable, value):\n    variable.assign_add(value)", "docstring": "Add a value to a variable.\n\nThis should be used in optimizers instead of\n`variable.assign_add(value)` to support backend specific optimizations.\nNote that the variable can be a model variable or an optimizer variable;\nit can be a backend native variable or a Keras variable.\n\nArgs:\nvariable: The variable to update.\nvalue: The value to add to the variable.", "source": "github-repos"}
{"code": "def specific_file_rst_filename(self, source_filename: str) -> str:\n    highest_code_to_target = relative_filename_within_dir(source_filename, self.highest_code_dir)\n    bname = basename(source_filename)\n    result = join(self.autodoc_rst_root_dir, dirname(highest_code_to_target), (bname + EXT_RST))\n    log.debug('Source {!r} -> RST {!r}', source_filename, result)\n    return result", "docstring": "Gets the RST filename corresponding to a source filename.\nSee the help for the constructor for more details.\n\nArgs:\nsource_filename: source filename within current project\n\nReturns:\nRST filename\n\nNote in particular: the way we structure the directories means that we\nwon't get clashes between files with idential names in two different\ndirectories. However, we must also incorporate the original source\nfilename, in particular for C++ where ``thing.h`` and ``thing.cpp``\nmust not generate the same RST filename. So we just add ``.rst``.", "source": "codesearchnet"}
{"code": "def __init__(self, value: 'ProcessorPartTypes', *, role: str='', substream_name: str='', mimetype: str | None=None, metadata: dict[str, Any] | None=None) -> None:\n    super().__init__()\n    match value:\n        case genai_types.Part():\n            self._part = value\n        case ProcessorPart():\n            self._part = value.part\n            role = role or value.role\n            substream_name = substream_name or value.substream_name\n            mimetype = mimetype or value.mimetype\n            metadata = metadata or value.metadata\n        case str():\n            self._part = genai_types.Part(text=value)\n        case bytes():\n            if not mimetype:\n                raise ValueError('MIME type must be specified when constructing a ProcessorPart from bytes.')\n            self._part = genai_types.Part.from_bytes(data=value, mime_type=mimetype)\n        case PIL.Image.Image():\n            if mimetype:\n                if not mimetype.startswith('image/'):\n                    raise ValueError(f\"Can't convert image of mimetype {mimetype}.\")\n                suffix = mimetype[len('image/'):]\n                if value.format:\n                    if suffix != value.format.lower():\n                        raise ValueError(f'The image format {value.format} and does not match the mimetype {suffix}.')\n            else:\n                suffix = value.format.lower() if value.format else 'webp'\n                mimetype = f'image/{suffix}'\n            bytes_io = io.BytesIO()\n            value.save(bytes_io, suffix.upper())\n            self._part = genai_types.Part.from_bytes(data=bytes_io.getvalue(), mime_type=mimetype)\n        case _:\n            raise ValueError(f\"Can't construct ProcessorPart from {type(value)}.\")\n    self._role = role\n    self._substream_name = substream_name\n    self._metadata = metadata\n    if mimetype:\n        self._mimetype = mimetype\n    elif self._part.inline_data and self._part.inline_data.mime_type:\n        self._mimetype = self._part.inline_data.mime_type\n    elif self._part.text:\n        self._mimetype = 'text/plain'\n    else:\n        self._mimetype = ''", "docstring": "Constructs a ProcessorPart using a `Part` or `ProcessorPart`.\n\nArgs:\nvalue: The content to use to construct the ProcessorPart.\nrole: Optional. The producer of the content. In Genai models, must be\neither 'user' or 'model', but the user can set their own semantics.\nUseful to set for multi-turn conversations, otherwise can be empty.\nsubstream_name: (Optional) ProcessorPart stream can be split into multiple\nindependent streams. They may have specific semantics, e.g. a song and\nits lyrics, or can be just alternative responses. Prefer using a default\nsubstream with an empty name. If the `ProcessorPart` is created using\nanother `ProcessorPart`, this ProcessorPart inherits the existing\nsubstream_name, unless it is overridden in this argument.\nmimetype: Mime type of the data.\nmetadata: (Optional) Auxiliary information about the part. If the\n`ProcessorPart` is created using another `ProcessorPart`, this\nProcessorPart inherits the existing metadata, unless it is overridden in\nthis argument.", "source": "github-repos"}
{"code": "def set_unit_desired_state(self, unit, desired_state):\n    if (desired_state not in self._STATES):\n        raise ValueError('state must be one of: {0}'.format(self._STATES))\n    if isinstance(unit, Unit):\n        unit = unit.name\n    else:\n        unit = str(unit)\n    self._single_request('Units.Set', unitName=unit, body={'desiredState': desired_state})\n    return self.get_unit(unit)", "docstring": "Update the desired state of a unit running in the cluster\n\nArgs:\nunit (str, Unit): The Unit, or name of the unit to update\n\ndesired_state: State the user wishes the Unit to be in\n(\"inactive\", \"loaded\", or \"launched\")\nReturns:\nUnit: The unit that was updated\n\nRaises:\nfleet.v1.errors.APIError: Fleet returned a response code >= 400\nValueError: An invalid value was provided for ``desired_state``", "source": "codesearchnet"}
{"code": "def get_variant_genotypes(self, variant):\n        \n        \n        \n        chrom = variant.chrom.name\n        if self.chrom is not None and chrom == self.chrom:\n            chrom = \"NA\"\n\n        \n        results = []\n        iterator = self._bgen.iter_variants_in_region(\n            CHROM_STR_DECODE.get(chrom, chrom), variant.pos, variant.pos,\n        )\n        for info, dosage in iterator:\n            if (variant.alleles is None or\n                    variant.iterable_alleles_eq([info.a1, info.a2])):\n                results.append(Genotypes(\n                    Variant(\n                        info.name,\n                        CHROM_STR_ENCODE.get(info.chrom, info.chrom),\n                        info.pos, [info.a1, info.a2],\n                    ),\n                    dosage,\n                    reference=info.a1,\n                    coded=info.a2,\n                    multiallelic=True,\n                ))\n\n        \n        if not results:\n            logging.variant_name_not_found(variant)\n\n        return results", "docstring": "Get the genotypes from a well formed variant instance.\n\nArgs:\nmarker (Variant): A Variant instance.\n\nReturns:\nA list of Genotypes instance containing a pointer to the variant as\nwell as a vector of encoded genotypes.", "source": "juraj-google-style"}
{"code": "def get_module_docstring(module_name, package, api_name):\n    for version in _API_VERSIONS:\n        compat_prefix = _COMPAT_MODULE_TEMPLATE % version\n        if module_name.startswith(compat_prefix):\n            module_name = module_name[len(compat_prefix):].strip('.')\n    docstring_module_name = module_name\n    doc_sources = doc_srcs.get_doc_sources(api_name)\n    if module_name in doc_sources:\n        docsrc = doc_sources[module_name]\n        if docsrc.docstring:\n            return docsrc.docstring\n        if docsrc.docstring_module_name:\n            docstring_module_name = docsrc.docstring_module_name\n    if package != 'tf_keras':\n        docstring_module_name = package + '.' + docstring_module_name\n    if docstring_module_name in sys.modules and sys.modules[docstring_module_name].__doc__:\n        return sys.modules[docstring_module_name].__doc__\n    return 'Public API for tf.%s namespace.' % module_name", "docstring": "Get docstring for the given module.\n\nThis method looks for docstring in the following order:\n1. Checks if module has a docstring specified in doc_srcs.\n2. Checks if module has a docstring source module specified\nin doc_srcs. If it does, gets docstring from that module.\n3. Checks if module with module_name exists under base package.\nIf it does, gets docstring from that module.\n4. Returns a default docstring.\n\nArgs:\nmodule_name: module name relative to tensorflow (excluding 'tensorflow.'\nprefix) to get a docstring for.\npackage: Base python package containing python with target tf_export\ndecorators.\napi_name: API you want to generate Currently, only `tensorflow`.\n\nReturns:\nOne-line docstring to describe the module.", "source": "github-repos"}
{"code": "def _prewarm_versatileimagefield(size_key, versatileimagefieldfile):\n        \n        versatileimagefieldfile.create_on_demand = True\n        try:\n            url = get_url_from_image_key(versatileimagefieldfile, size_key)\n        except Exception:\n            success = False\n            url_or_filepath = versatileimagefieldfile.name\n            logger.exception('Thumbnail generation failed',\n                             extra={'path': url_or_filepath})\n        else:\n            success = True\n            url_or_filepath = url\n        return (success, url_or_filepath)", "docstring": "Returns a 2-tuple:\n0: bool signifying whether the image was successfully pre-warmed\n1: The url of the successfully created image OR the path on storage of\nthe image that was not able to be successfully created.\n\nArguments:\n`size_key_list`: A list of VersatileImageField size keys. Examples:\n* 'crop__800x450'\n* 'thumbnail__800x800'\n`versatileimagefieldfile`: A VersatileImageFieldFile instance", "source": "juraj-google-style"}
{"code": "def StatResultFromStatEntry(stat_entry):\n    values = []\n    for attr in _STAT_ATTRS[:10]:\n        values.append(stat_entry.Get(attr))\n    return os.stat_result(values)", "docstring": "Returns a `os.stat_result` with most information from `StatEntry`.\n\nThis is a lossy conversion, only the 10 first stat_result fields are\npopulated, because the os.stat_result constructor is inflexible.\n\nArgs:\nstat_entry: An instance of rdf_client_fs.StatEntry.\n\nReturns:\nAn instance of `os.stat_result` with basic fields populated.", "source": "codesearchnet"}
{"code": "def _Assert3DImage(image):\n    return control_flow_ops.with_dependencies(_Check3DImage(image, require_static=False), image)", "docstring": "Assert that we are working with a properly shaped image.\n\nPerforms the check statically if possible (i.e. if the shape\nis statically known). Otherwise adds a control dependency\nto an assert op that checks the dynamic shape.\n\nArgs:\nimage: 3-D Tensor of shape [height, width, channels]\n\nRaises:\nValueError: if `image.shape` is not a 3-vector.\n\nReturns:\nIf the shape of `image` could be verified statically, `image` is\nreturned unchanged, otherwise there will be a control dependency\nadded that asserts the correct dynamic shape.", "source": "github-repos"}
{"code": "def RelayDirectly(self, inventory):\n        \n        relayed = False\n\n        self.RelayCache[inventory.Hash.ToBytes()] = inventory\n\n        for peer in self.Peers:\n            relayed |= peer.Relay(inventory)\n\n        if len(self.Peers) == 0:\n            if type(BC.Default()) is TestLevelDBBlockchain:\n                \n                return True\n\n            logger.info(\"no connected peers\")\n\n        return relayed", "docstring": "Relay the inventory to the remote client.\n\nArgs:\ninventory (neo.Network.Inventory):\n\nReturns:\nbool: True if relayed successfully. False otherwise.", "source": "juraj-google-style"}
{"code": "def __init__(self, row_class=Row):\n    \n    self.row_class = row_class\n    self.separator = ', '\n    self.Reset()", "docstring": "Initialises a new table.\n\nArgs:\nrow_class: A class to use as the row object. This should be a\nsubclass of this module's Row() class.", "source": "juraj-google-style"}
{"code": "def get_soa_record(client, zone_id, zone_name):\n    response = client.list_resource_record_sets(HostedZoneId=zone_id, StartRecordName=zone_name, StartRecordType='SOA', MaxItems='1')\n    return SOARecord(response['ResourceRecordSets'][0])", "docstring": "Gets the SOA record for zone_name from zone_id.\n\nArgs:\nclient (:class:`botocore.client.Route53`): The connection used to\ninteract with Route53's API.\nzone_id (string): The AWS Route53 zone id of the hosted zone to query.\nzone_name (string): The name of the DNS hosted zone to create.\n\nReturns:\n:class:`stacker.util.SOARecord`: An object representing the parsed SOA\nrecord returned from AWS Route53.", "source": "codesearchnet"}
{"code": "def __init__(\n            self, dir=None, options=None, upstream=None, prefix='', **kwargs):\n        \n\n        from ambry.dbexceptions import ConfigurationError\n\n        super(FsCache, self).__init__(upstream, **kwargs)\n\n        self._cache_dir = dir\n\n        if not os.path.isabs(self._cache_dir):\n            raise ConfigurationError(\n                \"Filesystem cache must have an absolute path. Got: '{}' \".format(\n                    self._cache_dir))\n\n        self.prefix = prefix", "docstring": "Init a new FileSystem Cache\n\nArgs:\ncache_dir\nmaxsize. Maximum size of the cache, in GB", "source": "juraj-google-style"}
{"code": "def __call__(self, fn):\n        \n\n        def benchmark(app, *args, **kwargs):\n            \n\n            \n            before = datetime.datetime.now()\n            data = fn(app, *args, **kwargs)\n            \n            after = datetime.datetime.now()\n            app.tcex.log.debug(\n                'function: \"{}\", benchmark_time: \"{}\"'.format(\n                    self.__class__.__name__, after - before\n                )\n            )\n            return data\n\n        return benchmark", "docstring": "Implement __call__ function for decorator.\n\nArgs:\nfn (function): The decorated function.\n\nReturns:\nfunction: The custom decorator function.", "source": "juraj-google-style"}
{"code": "def upsample_filters(filters, rate):\n    num_spatial_dims = len(rate)\n    spatial_shape = np.array(filters.shape[:num_spatial_dims])\n    output_spatial_shape = (spatial_shape - 1) * rate + 1\n    output = np.zeros(tuple(output_spatial_shape) + tuple(filters.shape[-2:]), filters.dtype)\n    output[tuple((np.s_[::rate[i]] for i in range(num_spatial_dims)))] = filters\n    return output", "docstring": "Upsamples the filters by a factor of rate along the spatial dimensions.\n\nArgs:\nfilters: spatial_shape + [in_channels, out_channels]\nOriginal filters.\nrate: A list of len(spatial_shape) positive ints, specifying the\nupsampling rate.\n\nReturns:\nfilters_up: output_spatial_shape + [in_channels, out_channels].\nUpsampled filters with\noutput_spatial_shape[i] = (spatial_shape[i] - 1) * rate[i] + 1\ncontaining (rate[i] - 1) zeros between consecutive filter values along\nspatial dimension i.", "source": "github-repos"}
{"code": "def __init__(self, optimizer, fraction=0.1, scope='subsampling-step', summary_labels=()):\n        \n        assert isinstance(fraction, float) and fraction > 0.0\n        self.fraction = fraction\n\n        super(SubsamplingStep, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new subsampling-step meta optimizer instance.\n\nArgs:\noptimizer: The optimizer which is modified by this meta optimizer.\nfraction: The fraction of instances of the batch to subsample.", "source": "juraj-google-style"}
{"code": "def _get_scope(node_name):\n    if not node_name:\n        raise ValueError(f'Node name cannot be empty or None. Received: {node_name}.')\n    if node_name.startswith('^'):\n        node_name = node_name[1:]\n    if '/' in node_name:\n        scope, _ = node_name.rsplit('/', 1)\n        return scope\n    return ''", "docstring": "Extract the scope name from a node name.\n\nThe scope name is everything before the final slash,\nnot including any ^ prefix denoting a control dependency.\n\nArgs:\nnode_name: the full name of an Op or a Tensor in the graph.\nReturns:\nThe deepest named scope containing the node.\nRaises:\nValueError: if tensor_name is None or empty", "source": "github-repos"}
{"code": "def __request_message_descriptor(self, request_kind, message_type, method_id,\n                                   path):\n    \n    if isinstance(message_type, resource_container.ResourceContainer):\n      base_message_type = message_type.body_message_class()\n      if (request_kind == self.__NO_BODY and\n          base_message_type != message_types.VoidMessage()):\n        msg = ('Method %s specifies a body message in its ResourceContainer, but '\n               'is a HTTP method type that cannot accept a body.') % method_id\n        raise api_exceptions.ApiConfigurationError(msg)\n    else:\n      base_message_type = message_type\n\n    if (request_kind != self.__NO_BODY and\n        base_message_type != message_types.VoidMessage()):\n      self.__request_schema[method_id] = self.__parser.add_message(\n          base_message_type.__class__)\n\n    params = self.__params_descriptor(message_type, request_kind, path,\n                                      method_id)\n\n    return params", "docstring": "Describes the parameters and body of the request.\n\nArgs:\nrequest_kind: The type of request being made.\nmessage_type: messages.Message or ResourceContainer class. The message to\ndescribe.\nmethod_id: string, Unique method identifier (e.g. 'myapi.items.method')\npath: string, HTTP path to method.\n\nReturns:\nDictionary describing the request.\n\nRaises:\nValueError: if the method path and request required fields do not match", "source": "juraj-google-style"}
{"code": "def write(self, vendor_id=None, log_type=None, json=None, **kwargs):\n    path = '/logging-service/v1/logs/{}/{}'.format(vendor_id, log_type)\n    r = self._httpclient.request(method='POST', url=self.url, json=json, path=path, **kwargs)\n    return r", "docstring": "Write log records to the Logging Service.\n\nThis API requires a JSON array in its request body, each element\nof which represents a single log record. Log records are\nprovided as JSON objects. Every log record must include the\nprimary timestamp field that you identified when you registered\nyour app. Every log record must also identify the log type.\n\nArgs:\nvendor_id (str): Vendor ID.\nlog_type (str): Log type.\njson (list): Payload/request body.\n**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.\n\nReturns:\nrequests.Response: Requests Response() object.\n\nExamples:\nRefer to ``logging_write.py`` example.", "source": "codesearchnet"}
{"code": "def _set_subject(self, subject):\n        \n        \n        \n        def test_uri(value):\n            \n            \n            if not isinstance(value, (Uri, BlankNode)):\n                try:\n                    if value.startswith(\"_:\"):\n                        return BlankNode(value)\n                    else:\n                        return Uri(value)\n                except:\n                    return BlankNode()\n            else:\n                return value\n\n        if isinstance(subject, dict):\n            self.subject = test_uri(subject['s'])\n            if isinstance(subject['o'], list):\n                for item in subject['o']:\n                    self.add_property(subject['p'],\n                                      item)\n            else:\n                self.add_property(subject['p'],\n                                  subject['o'])\n        else:\n            self.subject = test_uri(subject)", "docstring": "sets the subject value for the class instance\n\nArgs:\nsubject(dict, Uri, str): the subject for the class instance", "source": "juraj-google-style"}
{"code": "def join_tokens_to_sentences(tokens):\n    \n    text = \"\"\n    for (entry, next_entry) in zip(tokens, tokens[1:]):\n        text += entry\n        if next_entry not in SENTENCE_STOPS:\n            text += \" \"\n\n    text += tokens[-1]\n    return text", "docstring": "Correctly joins tokens to multiple sentences\n\nInstead of always placing white-space between the tokens, it will distinguish\nbetween the next symbol and *not* insert whitespace if it is a sentence\nsymbol (e.g. '.', or '?')\n\nArgs:\ntokens: array of string tokens\nReturns:\nJoint sentences as one string", "source": "juraj-google-style"}
{"code": "def path_to_zip(path):\n    if (not os.path.exists(path)):\n        raise IOError((\"%s doesn't exists!\" % path))\n    with tempfile.NamedTemporaryFile(delete=False) as ntf:\n        zip_fn = ntf.name\n    with zipfile.ZipFile(zip_fn, mode='w') as zip_file:\n        for (root, dirs, files) in os.walk(path):\n            for fn in files:\n                zip_file.write(os.path.join(root, fn))\n    return zip_fn", "docstring": "Compress `path` to the ZIP.\n\nArgs:\npath (str): Path to the directory.\n\nReturns:\nstr: Path to the zipped file (in /tmp).", "source": "codesearchnet"}
{"code": "def read(self, auth, resource, options, defer=False):\n        \n        return self._call('read', auth, [resource, options], defer)", "docstring": "Read value(s) from a dataport.\n\nCalls a function that builds a request to read the dataport specified by an alias or rid\nand returns timeseries data as defined by the options.\n\nArgs:\nauth: Takes the device cik\nresource: Takes the dataport alias or rid.\noptions: Takes a list of options for what to return.", "source": "juraj-google-style"}
{"code": "def log_run_info(self, model_name):\n    run_info = {'model_name': model_name, 'machine_config': {}, 'run_date': datetime.datetime.now().strftime(_DATE_TIME_FORMAT_PATTERN)}\n    _collect_tensorflow_info(run_info)\n    _collect_tensorflow_environment_variables(run_info)\n    _collect_cpu_info(run_info)\n    _collect_gpu_info(run_info)\n    _collect_memory_info(run_info)\n    with tf.gfile.GFile(os.path.join(self._logging_dir, BENCHMARK_RUN_LOG_FILE_NAME), 'w') as f:\n        try:\n            json.dump(run_info, f)\n            f.write('\\n')\n        except (TypeError, ValueError) as e:\n            tf.logging.warning('Failed to dump benchmark run info to log file: %s', e)", "docstring": "Collect most of the TF runtime information for the local env.\n\nThe schema of the run info follows official/benchmark/datastore/schema.\n\nArgs:\nmodel_name: string, the name of the model.", "source": "codesearchnet"}
{"code": "def _populate_calibration_options(quantization_options: quant_opts_pb2.QuantizationOptions):\n    calib_opts = quantization_options.calibration_options\n    if calib_opts.calibration_method == _CalibrationMethod.CALIBRATION_METHOD_UNSPECIFIED:\n        calib_opts.calibration_method = _CalibrationMethod.CALIBRATION_METHOD_MIN_MAX\n    elif calib_opts.calibration_method == _CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_PERCENTILE:\n        if not calib_opts.calibration_parameters.num_bins:\n            calib_opts.calibration_parameters.num_bins = 512\n        if not calib_opts.calibration_parameters.min_percentile:\n            calib_opts.calibration_parameters.min_percentile = 0.001\n        if not calib_opts.calibration_parameters.max_percentile:\n            calib_opts.calibration_parameters.max_percentile = 99.999\n    elif calib_opts.calibration_method in [_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE, _CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_MAX_FREQUENCY, _CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_SYMMETRIC]:\n        activation_tensor_type = quantization_options.quantization_method.quantization_component_specs[_QuantizationComponent.COMPONENT_ACTIVATION].tensor_type\n        if activation_tensor_type != _TensorType.TENSORTYPE_INT_8:\n            raise ValueError(f'Only TENSORTYPE_INT_8 is supported for HISTOGRAM_MSE calibration methods. calibration_method={calib_opts.calibration_method}')\n        if not calib_opts.calibration_parameters.num_bins:\n            calib_opts.calibration_parameters.num_bins = 512\n    if calib_opts.calibration_data_dir:\n        save_model.create_empty_output_dir(calib_opts.calibration_data_dir, overwrite=calib_opts.force_regenerate_calibration_data)", "docstring": "Populates default values for CalibrationOptions.\n\nArgs:\nquantization_options: An instance of QuantizationOptions with a field\nspecifying CalibrationOptions", "source": "github-repos"}
{"code": "def __init__(self, structure, transformations=None, history=None,\n                 other_parameters=None):\n        \n        self.final_structure = structure\n        self.history = history or []\n        self.other_parameters = other_parameters or {}\n        self._undone = []\n\n        transformations = transformations or []\n        for t in transformations:\n            self.append_transformation(t)", "docstring": "Initializes a transformed structure from a structure.\n\nArgs:\nstructure (Structure): Input structure\ntransformations ([Transformations]): List of transformations to\napply.\nhistory (list): Previous history.\nother_parameters (dict): Additional parameters to be added.", "source": "juraj-google-style"}
{"code": "def set_parent(self, node):\n    self._parent = node\n    if (node is None):\n        self._depth = 0\n    else:\n        self._depth = (node.get_depth() + 1)", "docstring": "Attach node to its parent.\n\nArgs:\nnode: Parent node.\n\nNote:\n``node`` can be ``None``. In that case, the node is detached from its previous parent.", "source": "codesearchnet"}
{"code": "def create_source_map(nodes, code, filepath):\n    reparsed_nodes = parser.parse(code, preamble_len=0, single_node=False)\n    for node in reparsed_nodes:\n        resolve(node, code, filepath, node.lineno, node.col_offset)\n    source_map = {}\n    try:\n        for before, after in ast_util.parallel_walk(nodes, reparsed_nodes):\n            origin_info = anno.getanno(before, anno.Basic.ORIGIN, default=None)\n            final_info = anno.getanno(after, anno.Basic.ORIGIN, default=None)\n            if origin_info is None or final_info is None:\n                continue\n            line_loc = LineLocation(final_info.loc.filename, final_info.loc.lineno)\n            existing_origin = source_map.get(line_loc)\n            if existing_origin is not None:\n                if existing_origin.loc.line_loc == origin_info.loc.line_loc:\n                    if existing_origin.loc.lineno >= origin_info.loc.lineno:\n                        continue\n                if existing_origin.loc.col_offset <= origin_info.loc.col_offset:\n                    continue\n            source_map[line_loc] = origin_info\n    except ValueError as err:\n        new_msg = 'Inconsistent ASTs detected. This is a bug. Cause: \\n'\n        new_msg += str(err)\n        new_msg += 'Diff:\\n'\n        for n, rn in zip(nodes, reparsed_nodes):\n            nodes_str = pretty_printer.fmt(n, color=False, noanno=True)\n            reparsed_nodes_str = pretty_printer.fmt(rn, color=False, noanno=True)\n            diff = difflib.context_diff(nodes_str.split('\\n'), reparsed_nodes_str.split('\\n'), fromfile='Original nodes', tofile='Reparsed nodes', n=7)\n            diff = '\\n'.join(diff)\n            new_msg += diff + '\\n'\n        raise ValueError(new_msg)\n    return source_map", "docstring": "Creates a source map between an annotated AST and the code it compiles to.\n\nNote: this function assumes nodes nodes, code and filepath correspond to the\nsame code.\n\nArgs:\nnodes: Iterable[ast.AST, ...], one or more AST modes.\ncode: Text, the source code in which nodes are found.\nfilepath: Text\n\nReturns:\nDict[LineLocation, OriginInfo], mapping locations in code to locations\nindicated by origin annotations in node.", "source": "github-repos"}
{"code": "def swd_read16(self, offset):\n    value = self._dll.JLINK_SWD_GetU16(offset)\n    return ctypes.c_uint16(value).value", "docstring": "Gets a unit of ``16`` bits from the input buffer.\n\nArgs:\nself (JLink): the ``JLink`` instance\noffset (int): the offset (in bits) from which to start reading\n\nReturns:\nThe integer read from the input buffer.", "source": "codesearchnet"}
{"code": "def __init__(self, _args):\n        \n        self.args = _args\n\n        \n        self._db_conn = None\n        self._install_json = None\n        self._install_json_params = None\n        self._install_json_output_variables = None\n        self._layout_json = None\n        self._layout_json_names = None\n        self._layout_json_params = None\n        self._layout_json_outputs = None\n        self._redis = None\n        self._tcex_json = None\n        self.app_path = os.getcwd()\n        self.exit_code = 0\n        self.input_table = 'inputs'\n        self.output = []\n\n        \n        c.init(autoreset=True, strip=False)", "docstring": "Initialize Class properties.\n\nArgs:\n_args (namespace): The argparser args Namespace.", "source": "juraj-google-style"}
{"code": "def __init__(self, states, internals, actions, include_next_states, capacity, scope='queue', summary_labels=None):\n        \n        self.capacity = capacity\n        self.scope = scope\n\n        \n        self.states_memory = dict()  \n        self.internals_memory = dict()  \n        self.actions_memory = dict()  \n        self.terminal_memory = None  \n        self.reward_memory = None  \n        self.memory_index = None  \n        self.episode_indices = None  \n        self.episode_count = None  \n\n        self.retrieve_indices = None\n\n        super(Queue, self).__init__(\n            states=states,\n            internals=internals,\n            actions=actions,\n            include_next_states=include_next_states,\n            scope=scope,\n            summary_labels=summary_labels\n        )", "docstring": "Queue memory.\n\nArgs:\ncapacity: Memory capacity.", "source": "juraj-google-style"}
{"code": "def write_genotypes(self, genotypes):\n    if (self._mode != 'w'):\n        raise UnsupportedOperation(\"not available in 'r' mode\")\n    if (self._nb_values is None):\n        self._nb_values = len(genotypes)\n    if (self._nb_values != len(genotypes)):\n        raise ValueError('{:,d} samples expected, got {:,d}'.format(self._nb_values, len(genotypes)))\n    byte_array = [(((g[0] | (g[1] << 2)) | (g[2] << 4)) | (g[3] << 6)) for g in self._grouper((_byte_recode[geno] for geno in genotypes), 4)]\n    self._bed.write(bytearray(byte_array))", "docstring": "Write genotypes to binary file.\n\nArgs:\ngenotypes (numpy.ndarray): The genotypes to write in the BED file.", "source": "codesearchnet"}
{"code": "def node_exists(self, node_name, device_name=None):\n    if not self._debug_graphs:\n        raise LookupError('Nodes have not been loaded from partition graphs yet.')\n    if device_name is not None and device_name not in self._debug_graphs:\n        raise ValueError(\"The specified device_name '%s' cannot be found.\" % device_name)\n    for _, debug_graph in self._debug_graphs.items():\n        if node_name in debug_graph.node_inputs:\n            return True\n    return False", "docstring": "Test if a node exists in the partition graphs.\n\nArgs:\nnode_name: (`str`) name of the node to be checked.\ndevice_name: optional device name. If None, will search for the node\non all available devices. Otherwise, search for the node only on\nthe given device.\n\nReturns:\nA boolean indicating whether the node exists.\n\nRaises:\nLookupError: If no partition graphs have been loaded yet.\nValueError: If device_name is specified but cannot be found.", "source": "github-repos"}
{"code": "def body(self, body: str):\n        \n        if body is not None and not isinstance(body, str):\n            raise TypeError(\"'body' MUST be a string\")\n        self._body = body", "docstring": "Set body of the message\nArgs:\nbody (str): The body of the message", "source": "juraj-google-style"}
{"code": "def _parse_target(target):\n    \n\n    if len(target) != 8:\n        raise ArgumentError(\"Invalid targeting data length\", expected=8, length=len(target))\n    slot, match_op = struct.unpack(\"<B6xB\", target)\n\n    if match_op == _MATCH_CONTROLLER:\n        return {'controller': True, 'slot': 0}\n    elif match_op == _MATCH_SLOT:\n        return {'controller': False, 'slot': slot}\n\n    raise ArgumentError(\"Unsupported complex targeting specified\", match_op=match_op)", "docstring": "Parse a binary targeting information structure.\n\nThis function only supports extracting the slot number or controller from\nthe target and will raise an ArgumentError if more complicated targeting\nis desired.\n\nArgs:\ntarget (bytes): The binary targeting data blob.\n\nReturns:\ndict: The parsed targeting data", "source": "juraj-google-style"}
{"code": "def _FetchMostRecentGraphSeriesFromTheLegacyDB(label, report_type, token=None):\n    try:\n        stats_for_label = aff4.FACTORY.Open(GetAFF4ClientReportsURN().Add(label), aff4_type=aff4_stats.ClientFleetStats, mode='r', token=token)\n    except aff4.InstantiationError:\n        return None\n    aff4_attr = _GetAFF4AttributeForReportType(report_type)\n    graph_series = rdf_stats.ClientGraphSeries(report_type=report_type)\n    if (aff4_attr.attribute_type == rdf_stats.GraphSeries):\n        graphs = stats_for_label.Get(aff4_attr)\n        if (graphs is None):\n            return None\n        for graph in graphs:\n            graph_series.graphs.Append(graph)\n    elif (aff4_attr.attribute_type == rdf_stats.Graph):\n        graph = stats_for_label.Get(aff4_attr)\n        if (graph is None):\n            return None\n        graph_series.graphs.Append(graph)\n    else:\n        raise AFF4AttributeTypeError(aff4_attr.attribute_type)\n    return graph_series", "docstring": "Fetches the latest graph-series for a client label from the legacy DB.\n\nArgs:\nlabel: Client label to fetch data for.\nreport_type: rdf_stats.ClientGraphSeries.ReportType to fetch data for.\ntoken: ACL token to use for reading from the DB.\n\nRaises:\nAFF4AttributeTypeError: If an unexpected report-data type is encountered.\n\nReturns:\nThe graph series for the given label and report type that was last\nwritten to the DB, or None if no series for that label and report-type\nexist.", "source": "codesearchnet"}
{"code": "def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame:\n        \n        raise NotImplementedError(\"This method must be defined for each subclass.\")", "docstring": "Pass the appropriate columns through each recoder function sequentially and return the final result.\n\nArgs:\ntable (pd.DataFrame): A dataframe on which to apply recoding logic.\nvalidate (bool): If ``True``, recoded table must pass validation tests.", "source": "juraj-google-style"}
{"code": "def cause_repertoire(self, mechanism, purview):\n    if (not purview):\n        return np.array([1.0])\n    if (not mechanism):\n        return max_entropy_distribution(purview, self.tpm_size)\n    purview = frozenset(purview)\n    joint = np.ones(repertoire_shape(purview, self.tpm_size))\n    joint *= functools.reduce(np.multiply, [self._single_node_cause_repertoire(m, purview) for m in mechanism])\n    return distribution.normalize(joint)", "docstring": "Return the cause repertoire of a mechanism over a purview.\n\nArgs:\nmechanism (tuple[int]): The mechanism for which to calculate the\ncause repertoire.\npurview (tuple[int]): The purview over which to calculate the\ncause repertoire.\n\nReturns:\nnp.ndarray: The cause repertoire of the mechanism over the purview.\n\n.. note::\nThe returned repertoire is a distribution over purview node states,\nnot the states of the whole network.", "source": "codesearchnet"}
{"code": "def ResourcePath(package_name, filepath):\n  \n  \n  \n  \n  if not getattr(sys, \"frozen\", None):\n    target = _GetPkgResources(package_name, filepath)\n    if target and os.access(target, os.R_OK):\n      return target\n\n  \n  \n  \n  \n  target = os.path.join(sys.prefix, filepath)\n  if target and os.access(target, os.R_OK):\n    return target\n\n  return None", "docstring": "Computes a path to the specified package resource.\n\nArgs:\npackage_name: A name of the package where the resource is located.\nfilepath: A path to the resource relative to the package location.\n\nReturns:\nA path to the resource or `None` if the resource cannot be found.", "source": "juraj-google-style"}
{"code": "def AddFilesWithUnknownHashes(\n    client_path_blob_refs,\n    use_external_stores = True\n):\n  \n  hash_id_blob_refs = dict()\n  client_path_hash_id = dict()\n  metadatas = dict()\n\n  all_client_path_blob_refs = list()\n  for client_path, blob_refs in iteritems(client_path_blob_refs):\n    \n    \n    \n    \n    if len(blob_refs) <= 1:\n      if blob_refs:\n        hash_id = rdf_objects.SHA256HashID.FromBytes(\n            blob_refs[0].blob_id.AsBytes())\n      else:\n        hash_id = rdf_objects.SHA256HashID.FromData(b\"\")\n\n      client_path_hash_id[client_path] = hash_id\n      hash_id_blob_refs[hash_id] = blob_refs\n      metadatas[hash_id] = FileMetadata(\n          client_path=client_path, blob_refs=blob_refs)\n    else:\n      for blob_ref in blob_refs:\n        all_client_path_blob_refs.append((client_path, blob_ref))\n\n  client_path_offset = collections.defaultdict(lambda: 0)\n  client_path_sha256 = collections.defaultdict(hashlib.sha256)\n  verified_client_path_blob_refs = collections.defaultdict(list)\n\n  client_path_blob_ref_batches = collection.Batch(\n      items=all_client_path_blob_refs, size=_BLOBS_READ_BATCH_SIZE)\n\n  for client_path_blob_ref_batch in client_path_blob_ref_batches:\n    blob_id_batch = set(\n        blob_ref.blob_id for _, blob_ref in client_path_blob_ref_batch)\n    blobs = data_store.BLOBS.ReadBlobs(blob_id_batch)\n\n    for client_path, blob_ref in client_path_blob_ref_batch:\n      blob = blobs[blob_ref.blob_id]\n      if blob is None:\n        message = \"Could not find one of referenced blobs: {}\".format(\n            blob_ref.blob_id)\n        raise BlobNotFoundError(message)\n\n      offset = client_path_offset[client_path]\n      if blob_ref.size != len(blob):\n        raise ValueError(\n            \"Got conflicting size information for blob %s: %d vs %d.\" %\n            (blob_ref.blob_id, blob_ref.size, len(blob)))\n      if blob_ref.offset != offset:\n        raise ValueError(\n            \"Got conflicting offset information for blob %s: %d vs %d.\" %\n            (blob_ref.blob_id, blob_ref.offset, offset))\n\n      verified_client_path_blob_refs[client_path].append(blob_ref)\n      client_path_offset[client_path] = offset + len(blob)\n      client_path_sha256[client_path].update(blob)\n\n  for client_path in iterkeys(client_path_sha256):\n    sha256 = client_path_sha256[client_path].digest()\n    hash_id = rdf_objects.SHA256HashID.FromBytes(sha256)\n\n    client_path_hash_id[client_path] = hash_id\n    hash_id_blob_refs[hash_id] = verified_client_path_blob_refs[client_path]\n\n  data_store.REL_DB.WriteHashBlobReferences(hash_id_blob_refs)\n\n  if use_external_stores:\n    for client_path in iterkeys(verified_client_path_blob_refs):\n      metadatas[client_path_hash_id[client_path]] = FileMetadata(\n          client_path=client_path,\n          blob_refs=verified_client_path_blob_refs[client_path])\n\n    EXTERNAL_FILE_STORE.AddFiles(metadatas)\n\n  return client_path_hash_id", "docstring": "Adds new files consisting of given blob references.\n\nArgs:\nclient_path_blob_refs: A dictionary mapping `db.ClientPath` instances to\nlists of blob references.\nuse_external_stores: A flag indicating if the files should also be added to\nexternal file stores.\n\nReturns:\nA dictionary mapping `db.ClientPath` to hash ids of the file.\n\nRaises:\nBlobNotFoundError: If one of the referenced blobs cannot be found.", "source": "juraj-google-style"}
{"code": "def dvds_current_releases(self, **kwargs):\n        \n        path = self._get_path('dvds_current_releases')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Gets the upcoming movies from the API.\n\nArgs:\npage_limit (optional): number of movies to show per page, default=16\npage (optional): results page number, default=1\ncountry (optional): localized data for selected country, default=\"us\"\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def from_bytes(cls, bt):\n    log.debug('Parsing email from bytes')\n    if six.PY2:\n        raise MailParserEnvironmentError('Parsing from bytes is valid only for Python 3.x version')\n    message = email.message_from_bytes(bt)\n    return cls(message)", "docstring": "Init a new object from bytes.\n\nArgs:\nbt (bytes-like object): raw email as bytes-like object\n\nReturns:\nInstance of MailParser", "source": "codesearchnet"}
{"code": "def _normalize_array(array, domain=(0, 1)):\n    array = np.array(array)\n    array = np.squeeze(array)\n    assert (len(array.shape) <= 3)\n    assert np.issubdtype(array.dtype, np.number)\n    assert (not np.isnan(array).any())\n    (low, high) = (np.min(array), np.max(array))\n    if (domain is None):\n        message = 'No domain specified, normalizing from measured (~%.2f, ~%.2f)'\n        log.debug(message, low, high)\n        domain = (low, high)\n    if ((low < domain[0]) or (high > domain[1])):\n        message = 'Clipping domain from (~{:.2f}, ~{:.2f}) to (~{:.2f}, ~{:.2f}).'\n        log.info(message.format(low, high, domain[0], domain[1]))\n        array = array.clip(*domain)\n    (min_value, max_value) = (np.iinfo(np.uint8).min, np.iinfo(np.uint8).max)\n    if np.issubdtype(array.dtype, np.inexact):\n        offset = domain[0]\n        if (offset != 0):\n            array -= offset\n            log.debug('Converting inexact array by subtracting -%.2f.', offset)\n        scalar = (max_value / (domain[1] - domain[0]))\n        if (scalar != 1):\n            array *= scalar\n            log.debug('Converting inexact array by scaling by %.2f.', scalar)\n    return array.clip(min_value, max_value).astype(np.uint8)", "docstring": "Given an arbitrary rank-3 NumPy array, produce one representing an image.\n\nThis ensures the resulting array has a dtype of uint8 and a domain of 0-255.\n\nArgs:\narray: NumPy array representing the image\ndomain: expected range of values in array,\ndefaults to (0, 1), if explicitly set to None will use the array's\nown range of values and normalize them.\n\nReturns:\nnormalized PIL.Image", "source": "codesearchnet"}
{"code": "def normalize(image: np.ndarray, mean: Union[float, Collection[float]], std: Union[float, Collection[float]], data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n    if not isinstance(image, np.ndarray):\n        raise ValueError('image must be a numpy array')\n    if input_data_format is None:\n        input_data_format = infer_channel_dimension_format(image)\n    channel_axis = get_channel_dimension_axis(image, input_data_format=input_data_format)\n    num_channels = image.shape[channel_axis]\n    if not np.issubdtype(image.dtype, np.floating):\n        image = image.astype(np.float32)\n    if isinstance(mean, Collection):\n        if len(mean) != num_channels:\n            raise ValueError(f'mean must have {num_channels} elements if it is an iterable, got {len(mean)}')\n    else:\n        mean = [mean] * num_channels\n    mean = np.array(mean, dtype=image.dtype)\n    if isinstance(std, Collection):\n        if len(std) != num_channels:\n            raise ValueError(f'std must have {num_channels} elements if it is an iterable, got {len(std)}')\n    else:\n        std = [std] * num_channels\n    std = np.array(std, dtype=image.dtype)\n    if input_data_format == ChannelDimension.LAST:\n        image = (image - mean) / std\n    else:\n        image = ((image.T - mean) / std).T\n    image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n    return image", "docstring": "Normalizes `image` using the mean and standard deviation specified by `mean` and `std`.\n\nimage = (image - mean) / std\n\nArgs:\nimage (`np.ndarray`):\nThe image to normalize.\nmean (`float` or `Collection[float]`):\nThe mean to use for normalization.\nstd (`float` or `Collection[float]`):\nThe standard deviation to use for normalization.\ndata_format (`ChannelDimension`, *optional*):\nThe channel dimension format of the output image. If unset, will use the inferred format from the input.\ninput_data_format (`ChannelDimension`, *optional*):\nThe channel dimension format of the input image. If unset, will use the inferred format from the input.", "source": "github-repos"}
{"code": "def batch_decode(self, sequences):\n    char_preds, bpe_preds, wp_preds = sequences\n    batch_size = char_preds.size(0)\n    char_strs, char_scores = self._decode_helper(char_preds, 'char')\n    bpe_strs, bpe_scores = self._decode_helper(bpe_preds, 'bpe')\n    wp_strs, wp_scores = self._decode_helper(wp_preds, 'wp')\n    final_strs = []\n    final_scores = []\n    for i in range(batch_size):\n        scores = [char_scores[i], bpe_scores[i], wp_scores[i]]\n        strs = [char_strs[i], bpe_strs[i], wp_strs[i]]\n        max_score_index = scores.index(max(scores))\n        final_strs.append(strs[max_score_index])\n        final_scores.append(scores[max_score_index])\n    out = {}\n    out['generated_text'] = final_strs\n    out['scores'] = final_scores\n    out['char_preds'] = char_strs\n    out['bpe_preds'] = bpe_strs\n    out['wp_preds'] = wp_strs\n    return out", "docstring": "Convert a list of lists of token ids into a list of strings by calling decode.\n\nArgs:\nsequences (`torch.Tensor`):\nList of tokenized input ids.\n\nReturns:\n`Dict[str, any]`: Dictionary of all the outputs of the decoded results.\ngenerated_text (`List[str]`): The final results after fusion of char, bpe, and wp. scores\n(`List[float]`): The final scores after fusion of char, bpe, and wp. char_preds (`List[str]`): The list\nof character decoded sentences. bpe_preds (`List[str]`): The list of bpe decoded sentences. wp_preds\n(`List[str]`): The list of wp decoded sentences.\n\nThis method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please\nrefer to the docstring of this method for more information.", "source": "github-repos"}
{"code": "def create_local_server(config=None, start=True):\n    return Server({'localhost': ['localhost:0']}, protocol='grpc', config=config, start=start)", "docstring": "Creates a new single-process cluster running on the local host.\n\nThis method is a convenience wrapper for creating a\n`tf.distribute.Server` with a `tf.train.ServerDef` that specifies a\nsingle-process cluster containing a single task in a job called\n`\"local\"`.\n\nArgs:\nconfig: (Options.) A `tf.compat.v1.ConfigProto` that specifies default\nconfiguration options for all sessions that run on this server.\nstart: (Optional.) Boolean, indicating whether to start the server after\ncreating it. Defaults to `True`.\n\nReturns:\nA local `tf.distribute.Server`.", "source": "github-repos"}
{"code": "def _translate_fhir_path_expression(self, builder: expressions.Builder) -> Tuple[Optional[str], Optional[str]]:\n    try:\n        result = self._bq_interpreter.visit(builder.node, use_resource_alias=False)\n        expression = f'{result.as_operand()}'\n        expression_as_array = f'ARRAY(SELECT {result.sql_alias}\\nFROM {result.to_subquery()}\\nWHERE {result.sql_alias} IS NOT NULL)'\n        return (expression, expression_as_array)\n    except Exception as e:\n        self._error_reporter.report_fhir_path_error(self._abs_path_invocation(builder), str(builder), self._error_message_for_exception(e))\n        return (None, None)", "docstring": "Returns a tuple containing both the SQL translation of a FHIRPath expression with array wrapping and the SQL translation without array wrapping.\n\nIf an error is encountered during encoding, the associated error reporter\nwill be notified, and this method will return [`None`, `None`].\n\nArgs:\nbuilder: Builder containing the information to be encoded to Standard SQL.\n\nReturns:\nA tuple (expression, expression_as_array) where `expression` is the SQL\ntranslation of the FHIRPath expression without array wrapping and\n`expression_as_array` is the SQL translation with array wrapping.", "source": "github-repos"}
{"code": "def _validate_isvalid_history(self, isvalid_history, field, value):\n    history_type = value['type']\n    if history_type.endswith('emission'):\n        history_type = 'emission'\n    elif history_type.endswith('absorption'):\n        history_type = 'absorption'\n    quantity = (1.0 * units(value['quantity']['units']))\n    try:\n        quantity.to(property_units[history_type])\n    except pint.DimensionalityError:\n        self._error(field, ('incompatible units; should be consistent with ' + property_units[history_type]))\n    time = (1.0 * units(value['time']['units']))\n    try:\n        time.to(property_units['time'])\n    except pint.DimensionalityError:\n        self._error(field, ('incompatible units; should be consistent with ' + property_units['time']))\n    n_cols = len(value['values'][0])\n    max_cols = (max(value['time']['column'], value['quantity']['column'], value.get('uncertainty', {}).get('column', 0)) + 1)\n    if (n_cols > max_cols):\n        self._error(field, 'too many columns in the values')\n    elif (n_cols < max_cols):\n        self._error(field, 'not enough columns in the values')", "docstring": "Checks that the given time history is properly formatted.\n\nArgs:\nisvalid_history (`bool`): flag from schema indicating units to be checked.\nfield (`str`): property associated with history in question.\nvalue (`dict`): dictionary of values from file associated with this property.\n\nThe rule's arguments are validated against this schema:\n{'isvalid_history': {'type': 'bool'}, 'field': {'type': 'str'},\n'value': {'type': 'dict'}}", "source": "codesearchnet"}
{"code": "def _suppression_loop_body(boxes, iou_threshold, output_size, idx, tile_size):\n    with ops.name_scope('suppression_loop_body'):\n        num_tiles = array_ops.shape(boxes)[1] \n        batch_size = array_ops.shape(boxes)[0]\n\n        def cross_suppression_func(boxes, box_slice, iou_threshold, inner_idx):\n            return _cross_suppression(boxes, box_slice, iou_threshold, inner_idx, tile_size)\n        box_slice = array_ops.slice(boxes, [0, idx * tile_size, 0], [batch_size, tile_size, 4])\n        _, box_slice, _, _ = while_loop.while_loop(lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx, cross_suppression_func, [boxes, box_slice, iou_threshold, constant_op.constant(0)])\n        iou = _bbox_overlap(box_slice, box_slice)\n        mask = array_ops.expand_dims(array_ops.reshape(math_ops.range(tile_size), [1, -1]) > array_ops.reshape(math_ops.range(tile_size), [-1, 1]), 0)\n        iou *= math_ops.cast(math_ops.logical_and(mask, iou >= iou_threshold), iou.dtype)\n        suppressed_iou, _, _, _ = while_loop.while_loop(lambda _iou, loop_condition, _iou_sum, _: loop_condition, _self_suppression, [iou, constant_op.constant(True), math_ops.reduce_sum(iou, [1, 2]), iou_threshold])\n        suppressed_box = math_ops.reduce_sum(suppressed_iou, 1) > 0\n        box_slice *= array_ops.expand_dims(1.0 - math_ops.cast(suppressed_box, box_slice.dtype), 2)\n        mask = array_ops.reshape(math_ops.cast(math_ops.equal(math_ops.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1])\n        boxes = array_ops.tile(array_ops.expand_dims(box_slice, [1]), [1, num_tiles, 1, 1]) * mask + array_ops.reshape(boxes, [batch_size, num_tiles, tile_size, 4]) * (1 - mask)\n        boxes = array_ops.reshape(boxes, [batch_size, -1, 4])\n        output_size += math_ops.reduce_sum(math_ops.cast(math_ops.reduce_any(box_slice > 0, [2]), dtypes.int32), [1])\n    return (boxes, iou_threshold, output_size, idx + 1)", "docstring": "Process boxes in the range [idx*tile_size, (idx+1)*tile_size).\n\nArgs:\nboxes: a tensor with a shape of [batch_size, anchors, 4].\niou_threshold: a float representing the threshold for deciding whether boxes\noverlap too much with respect to IOU.\noutput_size: an int32 tensor of size [batch_size]. Representing the number\nof selected boxes for each batch.\nidx: an integer scalar representing induction variable.\ntile_size: an integer representing the number of boxes in a tile\n\nReturns:\nboxes: updated boxes.\niou_threshold: pass down iou_threshold to the next iteration.\noutput_size: the updated output_size.\nidx: the updated induction variable.", "source": "github-repos"}
{"code": "def download_structure(pdb_id, file_type, outdir='', only_header=False, force_rerun=False):\n    pdb_id = pdb_id.lower()\n    file_type = file_type.lower()\n    file_types = ['pdb', 'pdb.gz', 'mmcif', 'cif', 'cif.gz', 'xml.gz', 'mmtf', 'mmtf.gz']\n    if (file_type not in file_types):\n        raise ValueError('Invalid file type, must be either: pdb, pdb.gz, cif, cif.gz, xml.gz, mmtf, mmtf.gz')\n    if (file_type == 'mmtf'):\n        file_type = 'mmtf.gz'\n    if file_type.endswith('.gz'):\n        gzipped = True\n    else:\n        gzipped = False\n    if (file_type == 'mmcif'):\n        file_type = 'cif'\n    if only_header:\n        folder = 'header'\n        outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))\n    else:\n        folder = 'download'\n        outfile = op.join(outdir, '{}.{}'.format(pdb_id, file_type))\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n        if ((file_type == 'mmtf.gz') or (file_type == 'mmtf')):\n            mmtf_api = '1.0'\n            download_link = 'http:\n        else:\n            download_link = 'http:\n        urlretrieve(download_link, outfile)\n        if gzipped:\n            outfile = ssbio.utils.gunzip_file(infile=outfile, outfile=outfile.strip('.gz'), outdir=outdir, delete_original=False, force_rerun_flag=force_rerun)\n        log.debug('{}: saved structure file'.format(outfile))\n    else:\n        if (file_type == 'mmtf.gz'):\n            outfile = op.join(outdir, '{}.{}'.format(pdb_id, 'mmtf'))\n        log.debug('{}: structure file already saved'.format(outfile))\n    return outfile", "docstring": "Download a structure from the RCSB PDB by ID. Specify the file type desired.\n\nArgs:\npdb_id: PDB ID\nfile_type: pdb, pdb.gz, mmcif, cif, cif.gz, xml.gz, mmtf, mmtf.gz\noutdir: Optional output directory\nonly_header: If only the header file should be downloaded\nforce_rerun: If the file should be downloaded again even if it exists\n\nReturns:\nstr: Path to outfile", "source": "codesearchnet"}
{"code": "def kill(self, container, signal=None):\n    url = self._url('/containers/{0}/kill', container)\n    params = {}\n    if (signal is not None):\n        if (not isinstance(signal, six.string_types)):\n            signal = int(signal)\n        params['signal'] = signal\n    res = self._post(url, params=params)\n    self._raise_for_status(res)", "docstring": "Kill a container or send a signal to a container.\n\nArgs:\ncontainer (str): The container to kill\nsignal (str or int): The signal to send. Defaults to ``SIGKILL``\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def streaming_restore(status, session=None):\n    if context.executing_eagerly():\n        return\n    if session is None:\n        session = get_session()\n    if isinstance(status, NameBasedSaverStatus):\n        raise NotImplementedError('Streaming restore not supported from name-based checkpoints when graph building. File a feature request if this limitation bothers you. As a workaround, consider either using tf.train.Checkpoint to load name-based checkpoints or enabling eager execution.')\n    status.run_restore_ops(session=session)\n    status._checkpoint.new_restore_ops_callback = lambda ops: session.run(ops, feed_dict=status._feed_dict)", "docstring": "When graph building, runs restore ops as soon as they come in.\n\nArgs:\nstatus: A _LoadStatus objects from an object-based saver's restore().\nStreaming restore from name-based checkpoints is not currently supported.\nsession: A session to run new restore ops in.", "source": "github-repos"}
{"code": "def __init__(self, element_id, driver):\n        \n        self.element_id = str(element_id)\n        self._driver = driver", "docstring": "Initialize the WebElement\n\nArgs:\nelement_id(str): The UDID returned by remote servers.\ndriver(WebDriver): The WebDriver Object.", "source": "juraj-google-style"}
{"code": "def __init__(self, sink: DataSink, store_type: Type[S], transform: Callable[[T], S]) -> None:\n        \n        self._sink = sink\n        self._store_type = store_type\n        self._transform = transform", "docstring": "Initializes a handler for a data sink.\n\nArgs:\nsink: The data sink.\nstore_type: ???\ntransform: ???", "source": "juraj-google-style"}
{"code": "def user_avatar_url(username, size=64, default='retro'):\n    openid = 'http:\n    return libravatar_url(openid=openid, size=size, default=default)", "docstring": "Get the avatar URL of the provided Fedora username.\n\nThe URL is returned from the Libravatar service.\n\nArgs:\nusername (str): The username to get the avatar of.\nsize (int): Size of the avatar in pixels (it's a square).\ndefault (str): Default avatar to return if not found.\nReturns:\nstr: The URL to the avatar image.", "source": "codesearchnet"}
{"code": "def print_stats(self, reset=True):\n    if (not self.ncalls):\n        return\n    stats = self.stats\n    code = self.fn.__code__\n    print('--- Function Profiling ---')\n    print('File \"{}\", line {}, function {}'.format(code.co_filename, code.co_firstlineno, self.fn.__name__))\n    stats.sort_stats(*self.sort_keys)\n    stats.print_stats(*self.print_restrictions)\n    print('--------------------------')\n    if reset:\n        self.reset_stats()", "docstring": "Manually print profiling result.\n\nArgs:\nreset (bool): If False is specified, the profiling statistics so\nfar is maintained. If ``True`` (default),\n:obj:`~reset_stats`\nis called to reset the profiling statistics.", "source": "codesearchnet"}
{"code": "def ephemeris(self, **kwargs):\n    for orb in self.iter(inclusive=True, **kwargs):\n        (yield orb)", "docstring": "Generator giving the propagation of the orbit at different dates\n\nArgs:\nstart (Date)\nstop (Date or timedelta)\nstep (timedelta)\nYield:\nOrbit", "source": "codesearchnet"}
{"code": "def create_predictable_zip(path):\n    \n    \n    if os.path.isdir(path): \n        paths = []\n        for root, directories, filenames in os.walk(path):\n            paths += [os.path.join(root, filename)[len(path)+1:] for filename in filenames]\n        reader = lambda x: _read_file(os.path.join(path, x))\n    \n    elif os.path.isfile(path) and os.path.splitext(path)[1] == \".zip\":\n        inputzip = zipfile.ZipFile(path)\n        paths = inputzip.namelist()\n        reader = lambda x: inputzip.read(x)\n    else:\n        raise Exception(\"The `path` must either point to a directory or to a zip file.\")\n\n    \n    zippathfd, zippath = tempfile.mkstemp(suffix=\".zip\")\n\n    with zipfile.ZipFile(zippath, \"w\") as outputzip:\n        \n        for filepath in sorted(paths):\n            write_file_to_zip_with_neutral_metadata(outputzip, filepath, reader(filepath))\n        os.fdopen(zippathfd).close()\n    return zippath", "docstring": "Create a zip file with predictable sort order and metadata so that MD5 will\nstay consistent if zipping the same content twice.\nArgs:\npath (str): absolute path either to a directory to zip up, or an existing zip file to convert.\nReturns: path (str) to the output zip file", "source": "juraj-google-style"}
{"code": "def tokeninfo(self, jwt):\n    warnings.warn('/tokeninfo will be deprecated in future releases', DeprecationWarning)\n    return self.post(url='https:", "docstring": "Returns user profile based on the user's jwt\n\nValidates a JSON Web Token (signature and expiration) and returns the\nuser information associated with the user id (sub property) of\nthe token.\n\nArgs:\njwt (str): User's jwt\n\nReturns:\nThe user profile.", "source": "codesearchnet"}
{"code": "def make_pool_tests(pool_op_in, allow_fully_quantize=False):\n    pool_op = pool_op_in\n\n    def f(options, expected_tf_failures=0):\n        \n        test_parameters = [{'ksize': [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], 'strides': [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], 'input_shape': [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]], 'padding': ['SAME', 'VALID'], 'data_format': ['NHWC'], 'fully_quantize': [False], 'quant_16x8': [False]}, {'ksize': [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], 'strides': [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], 'input_shape': [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]], 'padding': ['SAME', 'VALID'], 'data_format': ['NHWC'], 'fully_quantize': [True], 'quant_16x8': [False]}, {'ksize': [[1, 1, 1, 1]], 'strides': [[1, 1, 1, 1]], 'input_shape': [[1, 1, 1, 1]], 'padding': ['SAME', 'VALID'], 'data_format': ['NHWC'], 'fully_quantize': [True], 'quant_16x8': [True]}]\n        if not allow_fully_quantize:\n            test_parameters = [test_parameter for test_parameter in test_parameters if True not in test_parameter['fully_quantize']]\n\n        def build_graph(parameters):\n            input_tensor = tf.compat.v1.placeholder(dtype=tf.float32, name='input', shape=parameters['input_shape'])\n            out = pool_op(input_tensor, ksize=parameters['ksize'], strides=parameters['strides'], data_format=parameters['data_format'], padding=parameters['padding'])\n            return ([input_tensor], [out])\n\n        def build_inputs(parameters, sess, inputs, outputs):\n            if allow_fully_quantize:\n                input_values = create_tensor_data(tf.float32, parameters['input_shape'], min_value=-1, max_value=1)\n            else:\n                input_values = create_tensor_data(tf.float32, parameters['input_shape'])\n            return ([input_values], sess.run(outputs, feed_dict=dict(zip(inputs, [input_values]))))\n        make_zip_of_tests(options, test_parameters, build_graph, build_inputs, expected_tf_failures=expected_tf_failures)\n    return f", "docstring": "Make a set of tests to do average pooling.\n\nArgs:\npool_op_in: TensorFlow pooling operation to test  i.e. `tf.nn.avg_pool2d`.\nallow_fully_quantize: bool, whether fully_quantize is allowed.\n\nReturns:\nA function representing the true generator (after curried pool_op_in).", "source": "github-repos"}
{"code": "def dict_get_path(data, path, default=None):\n    \n\n    keys = path.split(\".\")\n    for k in keys:\n        if type(data) == list:\n            found = False\n            for item in data:\n                name = item.get(\"name\", item.get(\"type\"))\n                if name == k:\n                    found = True\n                    data = item\n                    break\n            if not found:\n                return default\n        elif type(data) == dict:\n            if k in data:\n                data = data[k]\n            else:\n                return default\n        else:\n            return default\n    return data", "docstring": "Returns the value inside nested structure of data located\nat period delimited path\n\nWhen traversing a list, as long as that list is containing objects of\ntype dict, items in that list will have their \"name\" and \"type\" values\ntested against the current key in the path.\n\nArgs:\ndata (dict or list): data to traverse\npath (str): '.' delimited string\n\nKwargs:\ndefault: value to return if path does not exist", "source": "juraj-google-style"}
{"code": "def NeedsCustomDescription(component):\n    type_ = type(component)\n    if type_ in (str, int, bytes) or type_ in (float, complex, bool) or type_ in (dict, tuple, list, set, frozenset):\n        return True\n    return False", "docstring": "Whether the component should use a custom description and summary.\n\nComponents of primitive type, such as ints, floats, dicts, lists, and others\nhave messy builtin docstrings. These are inappropriate for display as\ndescriptions and summaries in a CLI. This function determines whether the\nprovided component has one of these docstrings.\n\nNote that an object such as `int` has the same docstring as an int like `3`.\nThe docstring is OK for `int`, but is inappropriate as a docstring for `3`.\n\nArgs:\ncomponent: The component of interest.\nReturns:\nWhether the component should use a custom description and summary.", "source": "github-repos"}
{"code": "class FocalNetLayer(nn.Module):\n\n    def __init__(self, config, index, dim, input_resolution, drop_path=0.0):\n        super().__init__()\n        self.config = config\n        self.dim = dim\n        self.input_resolution = input_resolution\n        self.drop = config.hidden_dropout_prob\n        self.use_post_layernorm = config.use_post_layernorm\n        self.norm1 = nn.LayerNorm(dim, eps=config.layer_norm_eps)\n        self.modulation = FocalNetModulation(config=config, index=index, dim=dim, projection_dropout=self.drop)\n        self.drop_path = FocalNetDropPath(drop_path) if drop_path > 0.0 else nn.Identity()\n        self.norm2 = nn.LayerNorm(dim, eps=config.layer_norm_eps)\n        mlp_hidden_dim = int(dim * config.mlp_ratio)\n        self.mlp = FocalNetMlp(config=config, in_features=dim, hidden_features=mlp_hidden_dim, drop=self.drop)\n        self.gamma_1 = 1.0\n        self.gamma_2 = 1.0\n        if config.use_layerscale:\n            self.gamma_1 = nn.Parameter(config.layerscale_value * torch.ones(dim), requires_grad=True)\n            self.gamma_2 = nn.Parameter(config.layerscale_value * torch.ones(dim), requires_grad=True)\n\n    def forward(self, hidden_state, input_dimensions):\n        height, width = input_dimensions\n        batch_size, _, num_channels = hidden_state.shape\n        shortcut = hidden_state\n        hidden_state = hidden_state if self.use_post_layernorm else self.norm1(hidden_state)\n        hidden_state = hidden_state.view(batch_size, height, width, num_channels)\n        hidden_state = self.modulation(hidden_state).view(batch_size, height * width, num_channels)\n        hidden_state = hidden_state if not self.use_post_layernorm else self.norm1(hidden_state)\n        hidden_state = shortcut + self.drop_path(self.gamma_1 * hidden_state)\n        hidden_state = hidden_state + self.drop_path(self.gamma_2 * (self.norm2(self.mlp(hidden_state)) if self.use_post_layernorm else self.mlp(self.norm2(hidden_state))))\n        return hidden_state", "docstring": "Focal Modulation Network layer (block).\n\nArgs:\nconfig (`FocalNetConfig`):\nModel config.\nindex (`int`):\nLayer index.\ndim (`int`):\nNumber of input channels.\ninput_resolution (`Tuple[int]`):\nInput resolution.\ndrop_path (`float`, *optional*, defaults to 0.0):\nStochastic depth rate.", "source": "github-repos"}
{"code": "def add_email(self, email_path, source, reference, method='', upload_type='raw', campaign='', confidence='', description='', bucket_list=[], password=''):\n    if (not os.path.isfile(email_path)):\n        log.error('{} is not a file'.format(email_path))\n        return None\n    with open(email_path, 'rb') as fdata:\n        data = {'api_key': self.api_key, 'username': self.username, 'source': source, 'reference': reference, 'method': method, 'upload_type': upload_type, 'campaign': campaign, 'confidence': confidence, 'bucket_list': bucket_list, 'description': description}\n        if password:\n            data['password'] = password\n        r = requests.post('{0}/emails/'.format(self.url), data=data, files={'filedata': fdata}, verify=self.verify, proxies=self.proxies)\n        if (r.status_code == 200):\n            result_data = json.loads(r.text)\n            return result_data\n        else:\n            print('Error with status code {0} and message {1}'.format(r.status_code, r.text))\n    return None", "docstring": "Add an email object to CRITs. Only RAW, MSG, and EML are supported\ncurrently.\n\nArgs:\nemail_path: The path on disk of the email.\nsource: Source of the information\nreference: A reference where more information can be found\nmethod: The method for obtaining the email.\nupload_type: 'raw', 'eml', or 'msg'\ncampaign: An associated campaign\nconfidence: The campaign confidence\ndescription: A description of the email\nbucket_list: A list of bucket list items to add\npassword: A password for a 'msg' type.\nReturns:\nA JSON email object from CRITs or None if there was an error.", "source": "codesearchnet"}
{"code": "def __init__(self, engine):\n        \n        super(StatikJinjaTemplateProvider, self).__init__(engine)\n        project = engine.project\n\n        logger.debug(\"Instantiating Jinja2 template provider\")\n\n        \n        self.templatetags_path = os.path.join(project.path, project.TEMPLATETAGS_DIR)\n        if os.path.exists(self.templatetags_path) and os.path.isdir(self.templatetags_path):\n            \n            import_python_modules_by_path(self.templatetags_path)\n\n        extensions = [\n            'statik.jinja2ext.StatikUrlExtension',\n            'statik.jinja2ext.StatikAssetExtension',\n            'statik.jinja2ext.StatikLoremIpsumExtension',\n            'statik.jinja2ext.StatikTemplateTagsExtension',\n            'jinja2.ext.do',\n            'jinja2.ext.loopcontrols',\n            'jinja2.ext.with_',\n            'jinja2.ext.autoescape',\n        ]\n\n        jinja2_config = project.config.vars.get('jinja2', dict())\n        extensions.extend(jinja2_config.get('extensions', list()))\n\n        self.env = jinja2.Environment(\n            loader=jinja2.FileSystemLoader(\n                engine.template_paths,\n                encoding=project.config.encoding\n            ),\n            extensions=extensions\n        )\n\n        if templatetags.store.filters:\n            logger.debug(\n                \"Loaded custom template tag filters: %s\",\n                \", \".join(templatetags.store.filters)\n            )\n            self.env.filters.update(templatetags.store.filters)\n\n        \n        self.env.statik_views = project.views\n        self.env.statik_base_url = project.config.base_path\n        self.env.statik_base_asset_url = add_url_path_component(\n            project.config.base_path,\n            project.config.assets_dest_path\n        )", "docstring": "Constructor.\n\nArgs:\nengine: The StatikTemplateEngine to which this template provider belongs.", "source": "juraj-google-style"}
{"code": "def PrivateKeyFromNEP2(nep2_key, passphrase):\n        \n        if not nep2_key or len(nep2_key) != 58:\n            raise ValueError('Please provide a nep2_key with a length of 58 bytes (LEN: {0:d})'.format(len(nep2_key)))\n\n        ADDRESS_HASH_SIZE = 4\n        ADDRESS_HASH_OFFSET = len(NEP_FLAG) + len(NEP_HEADER)\n\n        try:\n            decoded_key = base58.b58decode_check(nep2_key)\n        except Exception as e:\n            raise ValueError(\"Invalid nep2_key\")\n\n        address_hash = decoded_key[ADDRESS_HASH_OFFSET:ADDRESS_HASH_OFFSET + ADDRESS_HASH_SIZE]\n        encrypted = decoded_key[-32:]\n\n        pwd_normalized = bytes(unicodedata.normalize('NFC', passphrase), 'utf-8')\n        derived = scrypt.hash(pwd_normalized, address_hash,\n                              N=SCRYPT_ITERATIONS,\n                              r=SCRYPT_BLOCKSIZE,\n                              p=SCRYPT_PARALLEL_FACTOR,\n                              buflen=SCRYPT_KEY_LEN_BYTES)\n\n        derived1 = derived[:32]\n        derived2 = derived[32:]\n\n        cipher = AES.new(derived2, AES.MODE_ECB)\n        decrypted = cipher.decrypt(encrypted)\n        private_key = xor_bytes(decrypted, derived1)\n\n        \n        kp_new = KeyPair(priv_key=private_key)\n        kp_new_address = kp_new.GetAddress()\n        kp_new_address_hash_tmp = hashlib.sha256(kp_new_address.encode(\"utf-8\")).digest()\n        kp_new_address_hash_tmp2 = hashlib.sha256(kp_new_address_hash_tmp).digest()\n        kp_new_address_hash = kp_new_address_hash_tmp2[:4]\n        if (kp_new_address_hash != address_hash):\n            raise ValueError(\"Wrong passphrase\")\n\n        return private_key", "docstring": "Gets the private key from a NEP-2 encrypted private key\n\nArgs:\nnep2_key (str): The nep-2 encrypted private key\npassphrase (str): The password to encrypt the private key with, as unicode string\n\nReturns:\nbytes: The private key", "source": "juraj-google-style"}
{"code": "def is_native_ion_gate(gate: ops.Gate) -> bool:\n    \n    return isinstance(gate, (ops.XXPowGate,\n                             ops.MeasurementGate,\n                             ops.XPowGate,\n                             ops.YPowGate,\n                             ops.ZPowGate))", "docstring": "Check if a gate is a native ion gate.\n\nArgs:\ngate: Input gate.\n\nReturns:\nTrue if the gate is native to the ion, false otherwise.", "source": "juraj-google-style"}
{"code": "def _relative_attention_inner(x, y, z, transpose):\n    batch_size = tf.shape(x)[0]\n    heads = x.get_shape().as_list()[1]\n    length = tf.shape(x)[2]\n    xy_matmul = tf.matmul(x, y, transpose_b=transpose)\n    x_t = tf.transpose(x, [2, 0, 1, 3])\n    x_t_r = tf.reshape(x_t, [length, (heads * batch_size), (- 1)])\n    x_tz_matmul = tf.matmul(x_t_r, z, transpose_b=transpose)\n    x_tz_matmul_r = tf.reshape(x_tz_matmul, [length, batch_size, heads, (- 1)])\n    x_tz_matmul_r_t = tf.transpose(x_tz_matmul_r, [1, 2, 0, 3])\n    return (xy_matmul + x_tz_matmul_r_t)", "docstring": "Relative position-aware dot-product attention inner calculation.\n\nThis batches matrix multiply calculations to avoid unnecessary broadcasting.\n\nArgs:\nx: Tensor with shape [batch_size, heads, length or 1, length or depth].\ny: Tensor with shape [batch_size, heads, length or 1, depth].\nz: Tensor with shape [length or 1, length, depth].\ntranspose: Whether to transpose inner matrices of y and z. Should be true if\nlast dimension of x is depth, not length.\n\nReturns:\nA Tensor with shape [batch_size, heads, length, length or depth].", "source": "codesearchnet"}
{"code": "def build_info_string(info):\n    \n    info_list = []\n    \n    for annotation in info:\n        \n        if info[annotation]:\n            info_list.append('='.join([annotation, ','.join(info[annotation])]))\n        else:\n            info_list.append(annotation)\n    \n    return ';'.join(info_list)", "docstring": "Build a new vcf INFO string based on the information in the info_dict.\n\nThe info is a dictionary with vcf info keys as keys and lists of vcf values\nas values. If there is no value False is value in info\n\nArgs:\ninfo (dict): A dictionary with information from the vcf file\n\nReturns:\nString: A string that is on the proper vcf format for the INFO column", "source": "juraj-google-style"}
{"code": "def _get_ref_args(self, node):\n    op_def = op_def_registry.get(node.op)\n    if op_def is None:\n        return []\n    ref_args = []\n    for i, output_arg in enumerate(op_def.output_arg):\n        if output_arg.is_ref:\n            arg_name = node.name if i == 0 else '%s:%d' % (node.name, i)\n            ref_args.append(arg_name)\n    return ref_args", "docstring": "Determine whether an input of an op is ref-type.\n\nArgs:\nnode: A `NodeDef`.\n\nReturns:\nA list of the arg names (as strs) that are ref-type.", "source": "github-repos"}
{"code": "async def puts(self, items, seqn=None):\n    size = 0\n    for chunk in s_common.chunks(items, 1000):\n        metrics = self._items.save(chunk)\n        self._metrics.add(metrics)\n        (await self.fire('cryotank:puts', numrecords=len(chunk)))\n        size += len(chunk)\n        (await asyncio.sleep(0))\n    if (seqn is not None):\n        (iden, offs) = seqn\n        self.setOffset(iden, (offs + size))\n    return size", "docstring": "Add the structured data from items to the CryoTank.\n\nArgs:\nitems (list):  A list of objects to store in the CryoTank.\nseqn (iden, offs): An iden / offset pair to record.\n\nReturns:\nint: The ending offset of the items or seqn.", "source": "codesearchnet"}
{"code": "def visualize_conv_activations(activation, name):\n    \n    import math\n    with tf.name_scope('visualize_act_' + name):\n        _, h, w, c = activation.get_shape().as_list()\n        rows = []\n        c_per_row = int(math.sqrt(c))\n        for y in range(0, c - c_per_row, c_per_row):\n            row = activation[:, :, :, y:y + c_per_row]  \n            cols = tf.unstack(row, axis=3)              \n            row = tf.concat(cols, 1)\n            rows.append(row)\n\n        viz = tf.concat(rows, 2)\n    tf.summary.image('visualize_act_' + name, tf.expand_dims(viz, -1))", "docstring": "Visualize activations for convolution layers.\n\nRemarks:\nThis tries to place all activations into a square.\n\nArgs:\nactivation: tensor with the activation [B,H,W,C]\nname: label for tensorboard\n\nReturns:\nimage of almost all activations", "source": "juraj-google-style"}
{"code": "def memory_read32(self, addr, num_words, zone=None):\n    return self.memory_read(addr, num_words, zone=zone, nbits=32)", "docstring": "Reads memory from the target system in units of 32-bits.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): start address to read from\nnum_words (int): number of words to read\nzone (str): memory zone to read from\n\nReturns:\nList of words read from the target system.\n\nRaises:\nJLinkException: if memory could not be read", "source": "codesearchnet"}
{"code": "def _process_health_pill_value(self, wall_time, step, device_name, output_slot, node_name, tensor_proto, node_name_set=None):\n    if (node_name_set and (node_name not in node_name_set)):\n        return None\n    elements = list(tensor_util.make_ndarray(tensor_proto))\n    return HealthPillEvent(wall_time=wall_time, step=step, device_name=device_name, output_slot=output_slot, node_name=node_name, dtype=repr(tf.as_dtype(elements[12])), shape=elements[14:], value=elements)", "docstring": "Creates a HealthPillEvent containing various properties of a health pill.\n\nArgs:\nwall_time: The wall time in seconds.\nstep: The session run step of the event.\ndevice_name: The name of the node's device.\noutput_slot: The numeric output slot.\nnode_name: The name of the node (without the output slot).\ntensor_proto: A tensor proto of data.\nnode_name_set: An optional set of node names that are relevant. If not\nprovided, no filtering by relevance occurs.\n\nReturns:\nAn event_accumulator.HealthPillEvent. Or None if one could not be created.", "source": "codesearchnet"}
{"code": "def build_vocab(self, texts, verbose=1, **kwargs):\n        \n        if self.has_vocab:\n            logger.warn(\n                \"Tokenizer already has existing vocabulary. Overriding and building new vocabulary.\")\n\n        progbar = Progbar(len(texts), verbose=verbose, interval=0.25)\n        count_tracker = utils._CountTracker()\n\n        self._token_counts.clear()\n        self._num_texts = len(texts)\n\n        for token_data in self.token_generator(texts, **kwargs):\n            indices, token = token_data[:-1], token_data[-1]\n            count_tracker.update(indices)\n            self._token_counts[token] += 1\n\n            \n            progbar.update(indices[0])\n\n        \n        self.create_token_indices(self._token_counts.keys())\n\n        \n        count_tracker.finalize()\n        self._counts = count_tracker.counts\n        progbar.update(len(texts))", "docstring": "Builds the internal vocabulary and computes various statistics.\n\nArgs:\ntexts: The list of text items to encode.\nverbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1)\n**kwargs: The kwargs for `token_generator`.", "source": "juraj-google-style"}
{"code": "def on_smart_contract_created(self, sc_event: SmartContractEvent):\n        \n        if isinstance(sc_event.contract, ContractState):\n            if not sc_event.test_mode:\n                sc_event.CheckIsNEP5()\n                if sc_event.token:\n                    self._new_contracts_to_write.append(sc_event)", "docstring": "Listener for SmartContractEvent\nArgs:\nsc_event (SmartContractEvent): event to check and see if it contains NEP5Token created", "source": "juraj-google-style"}
{"code": "def _extract_hunt_results(self, output_file_path):\n    collection_paths = []\n    client_ids = set()\n    client_id_to_fqdn = {}\n    hunt_dir = None\n    try:\n        with zipfile.ZipFile(output_file_path) as archive:\n            items = archive.infolist()\n            for f in items:\n                if (not hunt_dir):\n                    hunt_dir = f.filename.split('/')[0]\n                if (f.filename.split('/')[(- 1)] == 'client_info.yaml'):\n                    (client_id, fqdn) = self._get_client_fqdn(archive.read(f))\n                    client_id_to_fqdn[client_id] = fqdn\n                    continue\n                client_id = f.filename.split('/')[1]\n                if client_id.startswith('C.'):\n                    if (client_id not in client_ids):\n                        client_directory = os.path.join(self.output_path, hunt_dir, client_id)\n                        collection_paths.append((client_id, client_directory))\n                        client_ids.add(client_id)\n                    try:\n                        archive.extract(f, self.output_path)\n                    except KeyError as exception:\n                        print('Extraction error: {0:s}'.format(exception))\n                        return []\n    except OSError as exception:\n        msg = 'Error manipulating file {0:s}: {1!s}'.format(output_file_path, exception)\n        self.state.add_error(msg, critical=True)\n        return []\n    except zipfile.BadZipfile as exception:\n        msg = 'Bad zipfile {0:s}: {1!s}'.format(output_file_path, exception)\n        self.state.add_error(msg, critical=True)\n        return []\n    try:\n        os.remove(output_file_path)\n    except OSError as exception:\n        print('Output path {0:s} could not be removed: {1:s}'.format(output_file_path, exception))\n    fqdn_collection_paths = []\n    for (client_id, path) in collection_paths:\n        fqdn = client_id_to_fqdn.get(client_id, client_id)\n        fqdn_collection_paths.append((fqdn, path))\n    if (not fqdn_collection_paths):\n        self.state.add_error('Nothing was extracted from the hunt archive', critical=True)\n        return []\n    return fqdn_collection_paths", "docstring": "Open a hunt output archive and extract files.\n\nArgs:\noutput_file_path: The path where the hunt archive is downloaded to.\n\nReturns:\nlist: tuples containing:\nstr: The name of the client from where the files were downloaded.\nstr: The directory where the files were downloaded to.", "source": "codesearchnet"}
{"code": "def prod(x, axis=None, keepdims=False):\n    from .function_bases import prod as prod_base\n    if (axis is None):\n        axis = range(x.ndim)\n    elif (not hasattr(axis, '__iter__')):\n        axis = [axis]\n    return prod_base(x, axis, keepdims)", "docstring": "Reduction along axes with product operation.\n\nArgs:\nx (Variable): An input variable.\naxis (None, int or tuple of ints): Axis or axes along which product is\ncalculated. Passing the default value `None` will reduce all dimensions.\nkeepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.\n\nReturns:\n~nnabla.Variable: N-D array.\n\nNote:\nBackward computation is not accurate in a zero value input.", "source": "codesearchnet"}
{"code": "def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool=True):\n    with open(json_file_path, 'w', encoding='utf-8') as writer:\n        writer.write(self.to_json_string(use_diff=use_diff))", "docstring": "Save this instance to a JSON file.\n\nArgs:\njson_file_path (`str` or `os.PathLike`):\nPath to the JSON file in which this configuration instance's parameters will be saved.\nuse_diff (`bool`, *optional*, defaults to `True`):\nIf set to `True`, only the difference between the config instance and the default `PretrainedConfig()`\nis serialized to JSON file.", "source": "github-repos"}
{"code": "def _get_task_id(source):\n    \n    if type(source) is ray.actor.ActorHandle:\n        return source._ray_actor_id\n    else:\n        if type(source) is ray.TaskID:\n            return source\n        else:\n            return ray._raylet.compute_task_id(source)", "docstring": "Return the task id associated to the generic source of the signal.\n\nArgs:\nsource: source of the signal, it can be either an object id returned\nby a task, a task id, or an actor handle.\n\nReturns:\n- If source is an object id, return id of task which creted object.\n- If source is an actor handle, return id of actor's task creator.\n- If source is a task id, return same task id.", "source": "juraj-google-style"}
{"code": "def squad_v2_exact_match(y_true: List[List[str]], y_predicted: List[str]) -> float:\n    \n    EM_total = sum(normalize_answer(prediction) in map(normalize_answer, ground_truth)\n                   for ground_truth, prediction in zip(y_true, y_predicted))\n    return 100 * EM_total / len(y_true) if len(y_true) > 0 else 0", "docstring": "Calculates Exact Match score between y_true and y_predicted\nEM score uses the best matching y_true answer:\nif y_pred equal at least to one answer in y_true then EM = 1, else EM = 0\n\nThe same as in SQuAD-v2.0\n\nArgs:\ny_true: list of correct answers (correct answers are represented by list of strings)\ny_predicted: list of predicted answers\n\nReturns:\nexact match score : float", "source": "juraj-google-style"}
{"code": "def _destructively_move(self, dest_doc):\n        \n\n        if dest_doc is self:\n            raise RuntimeError(\"Attempted to overwrite a document with itself\")\n\n        dest_doc.clear()\n        \n        \n        \n        roots = []\n        self._push_all_models_freeze()\n        try:\n            while self.roots:\n                r = next(iter(self.roots))\n                self.remove_root(r)\n                roots.append(r)\n        finally:\n            self._pop_all_models_freeze()\n        for r in roots:\n            if r.document is not None:\n                raise RuntimeError(\"Somehow we didn't detach %r\" % (r))\n        if len(self._all_models) != 0:\n            raise RuntimeError(\"_all_models still had stuff in it: %r\" % (self._all_models))\n        for r in roots:\n            dest_doc.add_root(r)\n\n        dest_doc.title = self.title", "docstring": "Move all data in this doc to the dest_doc, leaving this doc empty.\n\nArgs:\ndest_doc (Document) :\nThe Bokeh document to populate with data from this one\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def movie(self, **kwargs):\n    path = self._get_path('movie')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Search for movies by title.\n\nArgs:\nquery: CGI escpaed string.\npage: (optional) Minimum value of 1. Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\ninclude_adult: (optional) Toggle the inclusion of adult titles.\nExpected value is True or False.\nyear: (optional) Filter the results release dates to matches that\ninclude this value.\nprimary_release_year: (optional) Filter the results so that only\nthe primary release dates have this value.\nsearch_type: (optional) By default, the search type is 'phrase'.\nThis is almost guaranteed the option you will want.\nIt's a great all purpose search type and by far the\nmost tuned for every day querying. For those wanting\nmore of an \"autocomplete\" type search, set this\noption to 'ngram'.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def remove(self, **kwargs):\n    return self.client.api.remove_container(self.id, **kwargs)", "docstring": "Remove this container. Similar to the ``docker rm`` command.\n\nArgs:\nv (bool): Remove the volumes associated with the container\nlink (bool): Remove the specified link and not the underlying\ncontainer\nforce (bool): Force the removal of a running container (uses\n``SIGKILL``)\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def _maybe_refresh_metadata(self, wakeup=False):\n    ttl = self.cluster.ttl()\n    wait_for_in_progress_ms = (self.config['request_timeout_ms'] if self._metadata_refresh_in_progress else 0)\n    metadata_timeout = max(ttl, wait_for_in_progress_ms)\n    if (metadata_timeout > 0):\n        return metadata_timeout\n    node_id = self.least_loaded_node()\n    if (node_id is None):\n        log.debug('Give up sending metadata request since no node is available')\n        return self.config['reconnect_backoff_ms']\n    if self._can_send_request(node_id):\n        topics = list(self._topics)\n        if ((not topics) and self.cluster.is_bootstrap(node_id)):\n            topics = list(self.config['bootstrap_topics_filter'])\n        if (self.cluster.need_all_topic_metadata or (not topics)):\n            topics = ([] if (self.config['api_version'] < (0, 10)) else None)\n        api_version = (0 if (self.config['api_version'] < (0, 10)) else 1)\n        request = MetadataRequest[api_version](topics)\n        log.debug('Sending metadata request %s to node %s', request, node_id)\n        future = self.send(node_id, request, wakeup=wakeup)\n        future.add_callback(self.cluster.update_metadata)\n        future.add_errback(self.cluster.failed_update)\n        self._metadata_refresh_in_progress = True\n\n        def refresh_done(val_or_error):\n            self._metadata_refresh_in_progress = False\n        future.add_callback(refresh_done)\n        future.add_errback(refresh_done)\n        return self.config['request_timeout_ms']\n    if self._connecting:\n        return self.config['reconnect_backoff_ms']\n    if self.maybe_connect(node_id, wakeup=wakeup):\n        log.debug('Initializing connection to node %s for metadata request', node_id)\n        return self.config['reconnect_backoff_ms']\n    return float('inf')", "docstring": "Send a metadata request if needed.\n\nReturns:\nint: milliseconds until next refresh", "source": "codesearchnet"}
{"code": "def to_dense(self, sampling_rate):\n        \n        duration = int(math.ceil(sampling_rate * self.get_duration()))\n        ts = np.zeros(duration, dtype=self.values.dtype)\n\n        onsets = np.round(self.onset * sampling_rate).astype(int)\n        durations = np.round(self.duration * sampling_rate).astype(int)\n\n        run_i, start, last_ind = 0, 0, 0\n        for i, val in enumerate(self.values.values):\n            if onsets[i] < last_ind:\n                start += self.run_info[run_i].duration * sampling_rate\n                run_i += 1\n            _onset = int(start + onsets[i])\n            _offset = int(_onset + durations[i])\n            if _onset >= duration:\n                warnings.warn(\"The onset time of a variable seems to exceed the runs\"\n                              \"duration, hence runs are incremented by one internally.\")\n            ts[_onset:_offset] = val\n            last_ind = onsets[i]\n\n        run_info = list(self.run_info)\n        return DenseRunVariable(\n            name=self.name,\n            values=ts,\n            run_info=run_info,\n            source=self.source,\n            sampling_rate=sampling_rate)", "docstring": "Convert the current sparse column to a dense representation.\nReturns: A DenseRunVariable.\n\nArgs:\nsampling_rate (int, str): Sampling rate (in Hz) to use when\nconstructing the DenseRunVariable.\n\nReturns:\nA DenseRunVariable.", "source": "juraj-google-style"}
{"code": "def xavier_init(n_inputs, n_outputs, uniform=True):\n    if uniform:\n        init_range = math.sqrt((6.0 / (n_inputs + n_outputs)))\n        return tf.random_uniform_initializer((- init_range), init_range)\n    else:\n        stddev = math.sqrt((3.0 / (n_inputs + n_outputs)))\n        return tf.truncated_normal_initializer(stddev=stddev)", "docstring": "Set the parameter initialization using the method described.\n\nThis method is designed to keep the scale of the gradients roughly the same\nin all layers.\n\nXavier Glorot and Yoshua Bengio (2010):\nUnderstanding the difficulty of training deep feedforward neural\nnetworks. International conference on artificial intelligence and\nstatistics.\nArgs:\nn_inputs: The number of input nodes into each output.\nn_outputs: The number of output nodes for each input.\nuniform: If true use a uniform distribution, otherwise use a normal.\nReturns:\nAn initializer.", "source": "codesearchnet"}
{"code": "def process(self, element):\n    decoded_inputs = self._tokenizer.decode(element.example, skip_special_tokens=True)\n    decoded_outputs = self._tokenizer.decode(element.inference, skip_special_tokens=True)\n    print(f'{decoded_inputs} \\t Output: {decoded_outputs}')", "docstring": "Process the PredictionResult to print the translated texts\n\nArgs:\nelement: The RunInference output to be processed.", "source": "github-repos"}
{"code": "def compute_transpose_output_shape(input_shape, axes):\n    input_shape = list(input_shape)\n    if axes is None:\n        return tuple(input_shape[::-1])\n    if len(axes) != len(input_shape):\n        raise ValueError(f'axis must be a list of the same length as the input shape, expected {len(input_shape)}, but received {len(axes)}.')\n    return tuple((input_shape[ax] for ax in axes))", "docstring": "Compute the output shape for the `transpose` operation.\n\nArgs:\ninput_shape: Input shape.\naxes: Permutation of the dimensions for the `transpose` operation.\n\nReturns:\nTuple of ints: The output shape after the `transpose` operation.", "source": "github-repos"}
{"code": "def from_json(cls, raw):\n    if (raw is None):\n        return None\n    bcls = None\n    _type = raw.get('type')\n    try:\n        bcls = cls._blob_type_map[BlobType(_type)]\n    except (KeyError, ValueError) as e:\n        logger.warning('Unknown blob type: %s', _type)\n        if DEBUG:\n            raise_from(exception.ParseException(('Parse error for %s' % _type), raw), e)\n        return None\n    blob = bcls()\n    blob.load(raw)\n    return blob", "docstring": "Helper to construct a blob from a dict.\n\nArgs:\nraw (dict): Raw blob representation.\n\nReturns:\nNodeBlob: A NodeBlob object or None.", "source": "codesearchnet"}
{"code": "def __setstate__(self, state):\n    \n    self._api = state['api']\n    self._path_with_token = state['path_token']\n    self._buffer = state['buffer']\n    self._buffered = state['buffered']\n    self._written = state['written']\n    self._offset = state['offset']\n    self.closed = state['closed']\n    self._path = state['path']\n    self.name = api_utils._unquote_filename(self._path)", "docstring": "Restore state as part of deserialization/unpickling.\n\nArgs:\nstate: the dictionary from a __getstate__ call", "source": "juraj-google-style"}
{"code": "def get_enabled_references(self, datas, meta_references):\n    references = OrderedDict()\n    for section in meta_references:\n        references[section] = self.get_reference(datas, section)\n    return references", "docstring": "Get enabled manifest references declarations.\n\nEnabled references are defined through meta references declaration,\nevery other references are ignored.\n\nArguments:\ndatas (dict): Data where to search for reference declarations.\nThis is commonly the fully parsed manifest.\nmeta_references (list): List of enabled reference names.\n\nReturns:\ncollections.OrderedDict: Serialized enabled references datas.", "source": "codesearchnet"}
{"code": "def dodge(field_name, value, range=None):\n    \n    return field(field_name, Dodge(value=value, range=range))", "docstring": "Create a ``DataSpec`` dict that applies a client-side ``Jitter``\ntransformation to a ``ColumnDataSource`` column.\n\nArgs:\nfield_name (str) : a field name to configure ``DataSpec`` with\n\nvalue (float) : the fixed offset to add to column data\n\nrange (Range, optional) : a range to use for computing synthetic\ncoordinates when necessary, e.g. a ``FactorRange`` when the\ncolumn data is categorical (default: None)\n\nReturns:\ndict", "source": "juraj-google-style"}
{"code": "def DEFINE_float(name, default, help, lower_bound=None, upper_bound=None, flag_values=_flagvalues.FLAGS, **args):\n    parser = _argument_parser.FloatParser(lower_bound, upper_bound)\n    serializer = _argument_parser.ArgumentSerializer()\n    DEFINE(parser, name, default, help, flag_values, serializer, **args)\n    _register_bounds_validator_if_needed(parser, name, flag_values=flag_values)", "docstring": "Registers a flag whose value must be a float.\n\nIf lower_bound or upper_bound are set, then this flag must be\nwithin the given range.\n\nArgs:\nname: str, the flag name.\ndefault: float|str|None, the default value of the flag.\nhelp: str, the help message.\nlower_bound: float, min value of the flag.\nupper_bound: float, max value of the flag.\nflag_values: FlagValues, the FlagValues instance with which the flag will\nbe registered. This should almost never need to be overridden.\n**args: dict, the extra keyword args that are passed to DEFINE.", "source": "codesearchnet"}
{"code": "def pack_image_features(self, image_features, image_sizes, image_newline=None, vision_aspect_ratio='anyres_max_9'):\n    new_image_features = []\n    feature_lens = []\n    for image_idx, image_feature in enumerate(image_features):\n        if image_feature.shape[0] > 1:\n            base_image_feature = image_feature[0]\n            image_feature = image_feature[1:]\n            height = width = self.config.vision_config.image_size \n            if height * width != base_image_feature.shape[0]:\n                raise ValueError('The number of patches is not consistent with the image size.')\n            num_patch_height, num_patch_width = get_anyres_image_grid_shape(image_sizes[image_idx], self.config.image_grid_pinpoints, self.config.vision_config.image_size)\n            image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1)\n            image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()\n            image_feature = image_feature.flatten(1, 2).flatten(2, 3)\n            image_feature = unpad_image(image_feature, image_sizes[image_idx])\n            max_num_patches = int(vision_aspect_ratio.strip('anyres_max_'))\n            channels, curr_height, curr_width = image_feature.shape\n            ratio = math.sqrt(curr_height * curr_width / (max_num_patches * height ** 2))\n            if ratio > 1.1:\n                image_feature = image_feature[None]\n                image_feature = nn.functional.interpolate(image_feature, [int(curr_height \n            if image_newline is not None:\n                image_feature = torch.cat((image_feature, image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device, image_feature.dtype)), dim=-1)\n            image_feature = image_feature.flatten(1, 2).transpose(0, 1)\n            image_feature = torch.cat((base_image_feature, image_feature), dim=0)\n        else:\n            image_feature = image_feature[0]\n            if image_newline is not None:\n                image_feature = torch.cat((image_feature, image_newline[None].to(image_feature)), dim=0)\n            image_feature = image_feature.flatten(0, 1)\n        new_image_features.append(image_feature)\n        feature_lens.append(image_feature.size(0))\n    feature_lens = torch.tensor(feature_lens, dtype=torch.long, device=image_features[0].device)\n    return (new_image_features, feature_lens)", "docstring": "Reshape, unpad and then pack each image_feature into a single image_features tensor containing all visual vectors.\n\nArgs:\nimage_features (`List[torch.Tensor]` of length num_images, each of shape `(num_patches, image_length, embed_dim)`)\nList of image feature tensor, each contains all the visual feature of all patches.\nimage_sizes (`torch.Tensor` of shape `(num_images, 2)`)\nActual image size of each images (H, W).\nimage_newline (`torch.Tensor` of shape `(embed_dim)`)\nNew line embedding vector.\nvision_aspect_ratio (`str`, *optional*, \"anyres_max_9\"):\nAspect ratio used when processong image features. The default value is \"anyres_max_9\".\nReturns:\nimage_features (`torch.Tensor` of shape `(all_feat_len, embed_dim)`)\nfeature_lens (`List[int]`)\ntoken length of each image in image_features", "source": "github-repos"}
{"code": "def run_parallel(self, para_func):\n        \n        if self.timer:\n            start_timer = time.time()\n\n        \n        \n        \n        \n\n        with mp.Pool(self.num_processors) as pool:\n            print('start pool with {} processors: {} total processes.\\n'.format(\n                    self.num_processors, len(self.args)))\n\n            results = [pool.apply_async(para_func, arg) for arg in self.args]\n            out = [r.get() for r in results]\n            out = {key: np.concatenate([out_i[key] for out_i in out]) for key in out[0].keys()}\n        if self.timer:\n            print(\"SNR calculation time:\", time.time()-start_timer)\n        return out", "docstring": "Run parallel calulation\n\nThis will run the parallel calculation on self.num_processors.\n\nArgs:\npara_func (obj): Function object to be used in parallel.\n\nReturns:\n(dict): Dictionary with parallel results.", "source": "juraj-google-style"}
{"code": "def __init__(self, coupling_map, layout=None):\n        \n        super().__init__()\n        self.coupling_map = coupling_map\n        self.layout = layout\n        self.ancilla_name = 'ancilla'", "docstring": "Extends a Layout with the idle nodes from coupling_map.\n\nArgs:\ncoupling_map (Coupling): directed graph representing a coupling map.\nlayout (Layout): an existing layout. ancilla allocation occurs if\nthe layout is smaller than the coupling_map.", "source": "juraj-google-style"}
{"code": "def exportUsufy(data, ext, fileH):\n    \n    if ext == \"csv\":\n        usufyToCsvExport(data, fileH+\".\"+ext)\n    elif ext == \"gml\":\n        usufyToGmlExport(data, fileH+\".\"+ext)\n    elif ext == \"json\":\n        usufyToJsonExport(data, fileH+\".\"+ext)\n    elif ext == \"ods\":\n        usufyToOdsExport(data, fileH+\".\"+ext)\n    elif ext == \"png\":\n        usufyToPngExport(data, fileH+\".\"+ext)\n    elif ext == \"txt\":\n        usufyToTextExport(data, fileH+\".\"+ext)\n    elif ext == \"xls\":\n        usufyToXlsExport(data, fileH+\".\"+ext)\n    elif ext == \"xlsx\":\n        usufyToXlsxExport(data, fileH+\".\"+ext)", "docstring": "Method that exports the different structures onto different formats.\n\nArgs:\n-----\ndata: Data to export.\next: One of the following: csv, excel, json, ods.\nfileH: Fileheader for the output files.\n\nReturns:\n--------\nPerforms the export as requested by parameter.", "source": "juraj-google-style"}
{"code": "def get_enumerations_from_bit_mask(enumeration, mask):\n    \n    return [x for x in enumeration if (x.value & mask) == x.value]", "docstring": "A utility function that creates a list of enumeration values from a bit\nmask for a specific mask enumeration class.\n\nArgs:\nenumeration (class): The enumeration class from which to draw\nenumeration values.\nmask (int): The bit mask from which to identify enumeration values.\n\nReturns:\nlist: A list of enumeration values corresponding to the bit mask.", "source": "juraj-google-style"}
{"code": "def remove_observer(self, callback):\n        \n        if callback not in self._observers:\n            raise ValueError('{} is not an observer of {}'\n                             .format(callback, self))\n        self._observers.remove(callback)", "docstring": "Remove an observer from this event.\n\nArgs:\ncallback: A function or coroutine callback to remove from this\nevent.\n\nRaises:\nValueError: If the callback is not an observer of this event.", "source": "juraj-google-style"}
{"code": "def localize_file(path_or_buffer):\n    path_or_buffer = _stringify_path(path_or_buffer)\n    if _is_url(path_or_buffer):\n        req = urlopen(path_or_buffer)\n        filename = os.path.basename(req.geturl())\n        if (os.path.splitext(filename)[(- 1)] is not '.pdf'):\n            pid = os.getpid()\n            filename = '{0}.pdf'.format(pid)\n        with open(filename, 'wb') as f:\n            shutil.copyfileobj(req, f)\n        return (filename, True)\n    elif is_file_like(path_or_buffer):\n        pid = os.getpid()\n        filename = '{0}.pdf'.format(pid)\n        with open(filename, 'wb') as f:\n            shutil.copyfileobj(path_or_buffer, f)\n        return (filename, True)\n    else:\n        return (os.path.expanduser(path_or_buffer), False)", "docstring": "Ensure localize target file.\n\nIf the target file is remote, this function fetches into local storage.\n\nArgs:\npath (str):\nFile path or file like object or URL of target file.\n\nReturns:\nfilename (str): file name in local storage\ntemporary_file_flag (bool): temporary file flag", "source": "codesearchnet"}
{"code": "def _GetAction(self, action, text):\n    \n    \n    if 'airportdProcessDLILEvent' in action:\n      interface = text.split()[0]\n      return 'Interface {0:s} turn up.'.format(interface)\n\n    if 'doAutoJoin' in action:\n      match = self._CONNECTED_RE.match(text)\n      if match:\n        ssid = match.group(1)[1:-1]\n      else:\n        ssid = 'Unknown'\n      return 'Wifi connected to SSID {0:s}'.format(ssid)\n\n    if 'processSystemPSKAssoc' in action:\n      wifi_parameters = self._WIFI_PARAMETERS_RE.search(text)\n      if wifi_parameters:\n        ssid = wifi_parameters.group(1)\n        bssid = wifi_parameters.group(2)\n        security = wifi_parameters.group(3)\n        if not ssid:\n          ssid = 'Unknown'\n        if not bssid:\n          bssid = 'Unknown'\n        if not security:\n          security = 'Unknown'\n\n        return (\n            'New wifi configured. BSSID: {0:s}, SSID: {1:s}, '\n            'Security: {2:s}.').format(bssid, ssid, security)\n\n    return text", "docstring": "Parse the well known actions for easy reading.\n\nArgs:\naction (str): the function or action called by the agent.\ntext (str): mac Wifi log text.\n\nReturns:\nstr: a formatted string representing the known (or common) action.\nIf the action is not known the original log text is returned.", "source": "juraj-google-style"}
{"code": "def get(self, node_id):\n    return self.prepare_model(self.client.api.inspect_node(node_id))", "docstring": "Get a node.\n\nArgs:\nnode_id (string): ID of the node to be inspected.\n\nReturns:\nA :py:class:`Node` object.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def retrieve_pwd_from_config(msg, cfg):\n    \n    msg_type = msg.__class__.__name__.lower()\n    key_fmt = msg.profile + \"_\" + msg_type\n    pwd = cfg.pwd[key_fmt].split(\" :: \")\n    if len(pwd) == 1:\n        msg.auth = pwd[0]\n    else:\n        msg.auth = tuple(pwd)", "docstring": "Retrieve auth from profile configuration and set in msg.auth attr.\n\nArgs:\n:msg: (Message class) an instance of a message class.\n:cfg: (jsonconfig.Config) config instance.", "source": "juraj-google-style"}
{"code": "def __init__(self, value=0):\n        \n        super(ExtensionTag, self).__init__(value, Tags.EXTENSION_TAG)", "docstring": "Construct an ExtensionTag object.\n\nArgs:\nvalue (int): A number representing the extension tag. Often\ndisplayed in hex format. Optional, defaults to 0.", "source": "juraj-google-style"}
{"code": "def _mean_of_runs(stats, key='runs'):\n    num_runs = len(stats[key])\n    first = stats[key][0]\n    mean = {}\n    for stat_key in first:\n        if isinstance(first[stat_key], numbers.Number):\n            mean[stat_key] = (sum((run[stat_key] for run in stats[key])) / float(num_runs))\n    return mean", "docstring": "Obtain the mean of stats.\n\nArgs:\nstats: dict; A set of stats, structured as above.\nkey: str; Optional key to determine where list of runs is found in stats", "source": "codesearchnet"}
{"code": "def compile_file(source, globals_=None):\n  \n  if isinstance(source, gast.AST):\n    source = quoting.to_source(source)\n\n  \n  tempdir = tempfile.mkdtemp()\n  uuid = str(uuid4().hex[:4])\n  tmpname = os.path.join(tempdir, 'tangent_%s.py' % uuid)\n  with open(tmpname, 'w') as f:\n    f.write(source)\n\n  \n  module_name = 'tangent_%s' % uuid\n  if six.PY3:\n    spec = util.spec_from_file_location(module_name, tmpname)\n    m = util.module_from_spec(spec)\n    spec.loader.exec_module(m)\n  else:\n    m = imp.load_source(module_name, tmpname)\n\n  \n  if globals_:\n    m.__dict__.update(globals_)\n  return m", "docstring": "Compile by saving to file and importing that.\n\nCompiling the AST/source code this way ensures that the source code is\nreadable by e.g. `pdb` or `inspect`.\n\nArgs:\nsource: The code to compile, either as a string or as an AST.\nglobals_: A dictionary of variables that should be available as globals in\nthe compiled module. They will be monkey patched after importing the\nmodule.\n\nReturns:\nA module object containing the compiled source code.", "source": "juraj-google-style"}
{"code": "def pack_x_y_sample_weight(x, y=None, sample_weight=None):\n    if y is None:\n        if not nest.is_nested(x):\n            return x\n        else:\n            return (x,)\n    elif sample_weight is None:\n        return (x, y)\n    else:\n        return (x, y, sample_weight)", "docstring": "Packs user-provided data into a tuple.\n\nThis is a convenience utility for packing data into the tuple formats\nthat `Model.fit` uses.\n\nStandalone usage:\n\n>>> x = tf.ones((10, 1))\n>>> data = tf.keras.utils.pack_x_y_sample_weight(x)\n>>> isinstance(data, tf.Tensor)\nTrue\n>>> y = tf.ones((10, 1))\n>>> data = tf.keras.utils.pack_x_y_sample_weight(x, y)\n>>> isinstance(data, tuple)\nTrue\n>>> x, y = data\n\nArgs:\nx: Features to pass to `Model`.\ny: Ground-truth targets to pass to `Model`.\nsample_weight: Sample weight for each element.\n\nReturns:\nTuple in the format used in `Model.fit`.", "source": "github-repos"}
{"code": "def execute_work_items(work_items, config):\n    \n    return celery.group(\n        worker_task.s(work_item, config)\n        for work_item in work_items\n    )", "docstring": "Execute a suite of tests for a given set of work items.\n\nArgs:\nwork_items: An iterable of `work_db.WorkItem`s.\nconfig: The configuration to use for the test execution.\n\nReturns: An iterable of WorkItems.", "source": "juraj-google-style"}
{"code": "def audio_bottom(x, model_hparams, vocab_size):\n    del vocab_size\n    inputs = x\n    with tf.variable_scope('audio_modality'):\n\n        def xnet_resblock(x, filters, res_relu, name):\n            'Xception block.'\n            with tf.variable_scope(name):\n                y = common_layers.separable_conv_block(x, filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], first_relu=True, padding='SAME', force2d=True, name='sep_conv_block')\n                y = common_layers.pool(y, (3, 3), 'MAX', 'SAME', strides=(2, 2))\n                return (y + common_layers.conv_block(x, filters, [((1, 1), (1, 1))], padding='SAME', strides=(2, 2), first_relu=res_relu, force2d=True, name='res_conv0'))\n        x = (tf.to_float(inputs) / 255.0)\n        x.set_shape([None, None, None, 1])\n        for i in range(model_hparams.audio_compression):\n            x = xnet_resblock(x, (2 ** (i + 1)), True, ('compress_block_%d' % i))\n        return xnet_resblock(x, model_hparams.hidden_size, False, 'compress_block_final')", "docstring": "Transform input from data space to model space.\n\nArgs:\nx: A Tensor with shape [batch, ...]\nmodel_hparams: HParams, model hyperparmeters.\nvocab_size: int, vocabulary size.\n\nReturns:\nbody_input: A Tensor with shape [batch, ?, ?,\nmodel_hparams.hidden_size].", "source": "codesearchnet"}
{"code": "def CollectFromFileSystem(\n      cls, artifacts_registry, knowledge_base, searcher, file_system):\n    \n    for preprocess_plugin in cls._file_system_plugins.values():\n      artifact_definition = artifacts_registry.GetDefinitionByName(\n          preprocess_plugin.ARTIFACT_DEFINITION_NAME)\n      if not artifact_definition:\n        logger.warning('Missing artifact definition: {0:s}'.format(\n            preprocess_plugin.ARTIFACT_DEFINITION_NAME))\n        continue\n\n      logger.debug('Running file system preprocessor plugin: {0:s}'.format(\n          preprocess_plugin.ARTIFACT_DEFINITION_NAME))\n      try:\n        preprocess_plugin.Collect(\n            knowledge_base, artifact_definition, searcher, file_system)\n      except (IOError, errors.PreProcessFail) as exception:\n        logger.warning((\n            'Unable to collect value from artifact definition: {0:s} '\n            'with error: {1!s}').format(\n                preprocess_plugin.ARTIFACT_DEFINITION_NAME, exception))", "docstring": "Collects values from Windows Registry values.\n\nArgs:\nartifacts_registry (artifacts.ArtifactDefinitionsRegistry): artifacts\ndefinitions registry.\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nsearcher (dfvfs.FileSystemSearcher): file system searcher to preprocess\nthe file system.\nfile_system (dfvfs.FileSystem): file system to be preprocessed.", "source": "juraj-google-style"}
{"code": "def ToVM(self):\n    if (self.Type == ContractParameterType.String):\n        return str(self.Value).encode('utf-8').hex()\n    elif ((self.Type == ContractParameterType.Integer) and isinstance(self.Value, int)):\n        return BigInteger(self.Value)\n    return self.Value", "docstring": "Used for turning a ContractParameter item into somethnig consumable by the VM\n\nReturns:", "source": "codesearchnet"}
{"code": "def set_lacp_timeout(self, name, value=None):\n    commands = [('interface %s' % name)]\n    string = 'port-channel lacp fallback timeout'\n    commands.append(self.command_builder(string, value=value))\n    return self.configure(commands)", "docstring": "Configures the Port-Channel LACP fallback timeout\nThe fallback timeout configures the period an interface in\nfallback mode remains in LACP mode without receiving a PDU.\n\nArgs:\nname(str): The Port-Channel interface name\n\nvalue(int): port-channel lacp fallback timeout in seconds\n\nReturns:\nTrue if the operation succeeds otherwise False is returned", "source": "codesearchnet"}
{"code": "def dispatch_pure(request: str, methods: Methods, *, context: Any, convert_camel_case: bool, debug: bool) -> Response:\n    try:\n        deserialized = validate(deserialize(request), schema)\n    except JSONDecodeError as exc:\n        return InvalidJSONResponse(data=str(exc), debug=debug)\n    except ValidationError as exc:\n        return InvalidJSONRPCResponse(data=None, debug=debug)\n    return call_requests(create_requests(deserialized, context=context, convert_camel_case=convert_camel_case), methods, debug=debug)", "docstring": "Pure version of dispatch - no logging, no optional parameters.\n\nDoes two things:\n1. Deserializes and validates the string.\n2. Calls each request.\n\nArgs:\nrequest: The incoming request string.\nmethods: Collection of methods that can be called.\ncontext: If specified, will be the first positional argument in all requests.\nconvert_camel_case: Will convert the method name/any named params to snake case.\ndebug: Include more information in error responses.\nReturns:\nA Response.", "source": "codesearchnet"}
{"code": "def _get_mpr_table(self, connection, partition):\n        \n        \n        \n        \n        \n        \n\n        \n        \n        \n        logger.debug(\n            'Looking for materialized view of the partition.\\n    partition: {}'.format(partition.name))\n        foreign_table = partition.vid\n        view_table = '{}_v'.format(foreign_table)\n        view_exists = self._relation_exists(connection, view_table)\n        if view_exists:\n            logger.debug(\n                'Materialized view of the partition found.\\n    partition: {}, view: {}'\n                .format(partition.name, view_table))\n            return view_table\n\n        \n        logger.debug(\n            'Looking for foreign table of the partition.\\n    partition: {}'.format(partition.name))\n        foreign_exists = self._relation_exists(connection, foreign_table)\n        if foreign_exists:\n            logger.debug(\n                'Foreign table of the partition found.\\n    partition: {}, foreign table: {}'\n                .format(partition.name, foreign_table))\n            return foreign_table\n        raise MissingTableError('postgres database does not have table for {} partition.'\n                                .format(partition.vid))", "docstring": "Returns name of the postgres table who stores mpr data.\n\nArgs:\nconnection: connection to postgres db who stores mpr data.\npartition (orm.Partition):\n\nReturns:\nstr:\n\nRaises:\nMissingTableError: if partition table not found in the db.", "source": "juraj-google-style"}
{"code": "def attribute_label(self, attribute_id, label, action='GET', params=None):\n    if (params is None):\n        params = {}\n    if (not self.can_update()):\n        self._tcex.handle_error(910, [self.type])\n    if (action == 'GET'):\n        return self.tc_requests.get_attribute_label(self.api_type, self.api_sub_type, self.unique_id, attribute_id, label, owner=self.owner, params=params)\n    if (action == 'DELETE'):\n        return self.tc_requests.delete_attribute_label(self.api_type, self.api_sub_type, self.unique_id, attribute_id, label, owner=self.owner)\n    self._tcex.handle_error(925, ['action', 'attribute_label', 'action', 'action', action])\n    return None", "docstring": "Gets a security labels from a attribute\n\nArgs:\nattribute_id:\nlabel:\naction:\nparams:\n\nReturns: Security label json", "source": "codesearchnet"}
{"code": "def norm(value, dims, order=None):\n  \n  if dims == 0:\n    return tf.math.abs(value)\n  elif dims == 1:\n    axis = -1\n  elif dims == 2:\n    axis = [-1, -2]\n  else:\n    ValueError(dims)\n  if order is None:\n    order = np.inf\n  return tf.norm(tensor=value, axis=axis, ord=order)", "docstring": "Compute the norm of the given (possibly batched) value.\n\nArgs:\nvalue: A `Tensor` of real dtype.\ndims: An Python integer with the number of non-batching dimensions in the\nvalue, i.e. `dims=0` (scalars), `dims=1` (vectors), `dims=2` (matrices).\norder: Order of the norm, defaults to `np.inf`.", "source": "juraj-google-style"}
{"code": "def __init__(self, source_urn=None, args=None, token=None):\n    \n    self.source_urn = source_urn\n    self.args = args\n    self.token = token\n    self.lock = threading.RLock()", "docstring": "OutputPlugin constructor.\n\nConstructor should be overridden to maintain instance-local state - i.e.\nstate that gets accumulated during the single output plugin run and that\nshould be used to update the global state via UpdateState method.\n\nArgs:\nsource_urn: URN of the data source to process the results from.\nargs: This plugin's arguments.\ntoken: Security token.", "source": "juraj-google-style"}
{"code": "def is_metal(self, efermi_tol=0.0001):\n    for (spin, values) in self.bands.items():\n        for i in range(self.nb_bands):\n            if (np.any(((values[(i, :)] - self.efermi) < (- efermi_tol))) and np.any(((values[(i, :)] - self.efermi) > efermi_tol))):\n                return True\n    return False", "docstring": "Check if the band structure indicates a metal by looking if the fermi\nlevel crosses a band.\n\nReturns:\nTrue if a metal, False if not", "source": "codesearchnet"}
{"code": "def compare_python_to_reference_murmur3_32(data: Any, seed: int = 0) -> None:\n    \n    assert mmh3, \"Need mmh3 module\"\n    c_data = to_str(data)\n    c_signed = mmh3.hash(c_data, seed=seed)  \n    py_data = to_bytes(c_data)\n    py_unsigned = murmur3_x86_32(py_data, seed=seed)\n    py_signed = twos_comp_to_signed(py_unsigned, n_bits=32)\n    preamble = \"Hashing {data} with MurmurHash3/32-bit/seed={seed}\".format(\n        data=repr(data), seed=seed)\n    if c_signed == py_signed:\n        print(preamble + \" -> {result}: OK\".format(result=c_signed))\n    else:\n        raise AssertionError(\n            preamble + \"; mmh3 says \"\n            \"{c_data} -> {c_signed}, Python version says {py_data} -> \"\n            \"{py_unsigned} = {py_signed}\".format(\n                c_data=repr(c_data),\n                c_signed=c_signed,\n                py_data=repr(py_data),\n                py_unsigned=py_unsigned,\n                py_signed=py_signed))", "docstring": "Checks the pure Python implementation of 32-bit murmur3 against the\n``mmh3`` C-based module.\n\nArgs:\ndata: data to hash\nseed: seed\n\nRaises:\nAssertionError: if the two calculations don't match", "source": "juraj-google-style"}
{"code": "def iterator_zip(variables: VarType, parent: str=None) -> Iterable[VarMatrix]:\n    logger.debug('Yielding from zip iterator')\n    if isinstance(variables, list):\n        for item in variables:\n            (yield list(variable_matrix(item, parent, 'zip')))\n    else:\n        (yield list(variable_matrix(variables, parent, 'zip')))", "docstring": "Apply the zip operator to a set of variables.\n\nThis uses the python zip iterator to combine multiple lists of variables such that\nthe nth variable in each list is aligned.\n\nArgs:\nvariables: The variables object\nparent: Unused", "source": "codesearchnet"}
{"code": "def _get_metric_object(self, metric, y_t, y_p):\n    if metric is None:\n        return None\n    if str(metric).lower() not in ['accuracy', 'acc', 'crossentropy', 'ce']:\n        metric_obj = metrics_mod.get(metric)\n    else:\n        y_t_rank = len(y_t.shape.as_list())\n        y_p_rank = len(y_p.shape.as_list())\n        y_t_last_dim = y_t.shape.as_list()[-1]\n        y_p_last_dim = y_p.shape.as_list()[-1]\n        is_binary = y_p_last_dim == 1\n        is_sparse_categorical = y_t_rank < y_p_rank or (y_t_last_dim == 1 and y_p_last_dim > 1)\n        if str(metric).lower() in ['accuracy', 'acc']:\n            if is_binary:\n                metric_obj = metrics_mod.binary_accuracy\n            elif is_sparse_categorical:\n                metric_obj = metrics_mod.sparse_categorical_accuracy\n            else:\n                metric_obj = metrics_mod.categorical_accuracy\n        elif is_binary:\n            metric_obj = metrics_mod.binary_crossentropy\n        elif is_sparse_categorical:\n            metric_obj = metrics_mod.sparse_categorical_crossentropy\n        else:\n            metric_obj = metrics_mod.categorical_crossentropy\n    if isinstance(metric_obj, losses_mod.Loss):\n        metric_obj._allow_sum_over_batch_size = True\n    if not isinstance(metric_obj, metrics_mod.Metric):\n        if isinstance(metric, str):\n            metric_name = metric\n        else:\n            metric_name = get_custom_object_name(metric)\n            if metric_name is None:\n                raise ValueError('Metric should be a callable, found: {}'.format(metric))\n        metric_obj = metrics_mod.MeanMetricWrapper(metric_obj, name=metric_name)\n    return metric_obj", "docstring": "Converts user-supplied metric to a `Metric` object.\n\nArgs:\nmetric: A string, function, or `Metric` object.\ny_t: Sample of label.\ny_p: Sample of output.\n\nReturns:\nA `Metric` object.", "source": "github-repos"}
{"code": "def _hash_outputs(self, index, sighash_type):\n        \n        if sighash_type == shared.SIGHASH_ALL:\n            \n            \n            \n            outputs = ByteData()\n            for tx_out in self.tx_outs:\n                outputs += tx_out.to_bytes()\n            return utils.hash256(outputs.to_bytes())\n        elif (sighash_type == shared.SIGHASH_SINGLE\n              and index < len(self.tx_outs)):\n            \n            \n            \n            return utils.hash256(self.tx_outs[index].to_bytes())\n        else:\n            \n            raise NotImplementedError(\n                'I refuse to implement the SIGHASH_SINGLE bug.')", "docstring": "BIP143 hashOutputs implementation\n\nArgs:\nindex        (int): index of input being signed\nsighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL\nReturns:\n(bytes): the hashOutputs, a 32 byte hash", "source": "juraj-google-style"}
{"code": "def get_model(self, model, model_id):\n    return self._store.find_record(self._get_model_class(model), int(model_id))", "docstring": "Get a single model from the server.\n\nArgs:\nmodel (string): The class as a string.\nmodel_id (string): The integer ID as a string.\n\nReturns:\n:class:`cinder_data.model.CinderModel`: A instance of the model.", "source": "codesearchnet"}
{"code": "def ParseMessageRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    event_data = TangoAndroidMessageEventData()\n    event_data.message_identifier = self._GetRowValue(query_hash, row, 'msg_id')\n    event_data.direction = self._GetRowValue(query_hash, row, 'direction')\n    timestamp = self._GetRowValue(query_hash, row, 'create_time')\n    if timestamp:\n        date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n    timestamp = self._GetRowValue(query_hash, row, 'send_time')\n    if timestamp:\n        date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_SENT)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a message row from the database.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from query.", "source": "codesearchnet"}
{"code": "def validate_callbacks(input_callbacks, optimizer):\n    if input_callbacks:\n        for callback in input_callbacks:\n            if isinstance(callback, (callbacks.LearningRateScheduler, callbacks.ReduceLROnPlateau)):\n                if not isinstance(optimizer, optimizer_v2.OptimizerV2):\n                    raise ValueError('You must specify a Keras Optimizer V2 when using %s callback with DistributionStrategy.' % callback)\n            if isinstance(callback, callbacks.TensorBoard):\n                if getattr(callback, 'write_grads', False):\n                    logging.warning(UserWarning('`write_grads` in the TensorBoard callback is not supported when using DistributionStrategy. Setting `write_grads` to `False`.'))\n                    callback.write_grads = False", "docstring": "Validate whether given callbacks are supported by DistributionStrategy.\n\nArgs:\ninput_callbacks: List of callbacks passed by the user to fit.\noptimizer: Optimizer instance used to train the model.\n\nRaises:\nValueError: If `LearningRateScheduler` or `ReduceLROnPlateau` is one of the\ncallbacks passed.\nValueError: If `write_grads` is one of the parameters passed as part of the\nTensorBoard callback.", "source": "github-repos"}
{"code": "def retrieve_api_token(self):\n    payload = self.oauth2_manager.get_access_token_params(refresh_token=self.refresh_token)\n    response = requests.post(self.oauth2_manager.access_token_url, json=payload)\n    response.raise_for_status()\n    response_json = json.loads(response.text)\n    return response_json['access_token']", "docstring": "Retrieve the access token from AVS.\n\nThis function is memoized, so the\nvalue returned by the function will be remembered and returned by\nsubsequent calls until the memo expires. This is because the access\ntoken lasts for one hour, then a new token needs to be requested.\n\nDecorators:\nhelpers.expiring_memo\n\nReturns:\nstr -- The access token for communicating with AVS", "source": "codesearchnet"}
{"code": "def sleep(self, seconds):\n        \n        until = time.time() + seconds\n        try:\n            while True:\n                self._service_futures([], until)\n        except TimeoutError:\n            return", "docstring": "Services all futures while waiting\n\nArgs:\nseconds (float): Time to wait", "source": "juraj-google-style"}
{"code": "def get_filename(self, **kwargs):\n        \n        if self.filename_parser is None:\n            raise RuntimeError(\"No filename pattern or specific filename provided\")\n        output_filename = self.filename_parser.compose(kwargs)\n        dirname = os.path.dirname(output_filename)\n        if dirname and not os.path.isdir(dirname):\n            LOG.info(\"Creating output directory: {}\".format(dirname))\n            os.makedirs(dirname)\n        return output_filename", "docstring": "Create a filename where output data will be saved.\n\nArgs:\nkwargs (dict): Attributes and other metadata to use for formatting\nthe previously provided `filename`.", "source": "juraj-google-style"}
{"code": "def rpc(self, address, rpc_id):\n    if ((address in self.mock_rpcs) and (rpc_id in self.mock_rpcs[address])):\n        value = self.mock_rpcs[address][rpc_id]\n        return value\n    result = self._call_rpc(address, rpc_id, bytes())\n    if (len(result) != 4):\n        self.warn((u'RPC 0x%X on address %d: response had invalid length %d not equal to 4' % (rpc_id, address, len(result))))\n    if (len(result) < 4):\n        raise HardwareError('Response from RPC was not long enough to parse as an integer', rpc_id=rpc_id, address=address, response_length=len(result))\n    if (len(result) > 4):\n        result = result[:4]\n    (res,) = struct.unpack('<L', result)\n    return res", "docstring": "Call an RPC and receive the result as an integer.\n\nIf the RPC does not properly return a 32 bit integer, raise a warning\nunless it cannot be converted into an integer at all, in which case\na HardwareError is thrown.\n\nArgs:\naddress (int): The address of the tile we want to call the RPC\non\nrpc_id (int): The id of the RPC that we want to call\n\nReturns:\nint: The result of the RPC call.  If the rpc did not succeed\nan error is thrown instead.", "source": "codesearchnet"}
{"code": "def width(self):\n    return sum((reg.size for reg in (self.qregs + self.cregs)))", "docstring": "Return number of qubits plus clbits in circuit.\n\nReturns:\nint: Width of circuit.", "source": "codesearchnet"}
{"code": "def __init__(self, pubsub_source_descriptors: List[PubSubSourceDescriptor], with_attributes: bool=False):\n    self.pubsub_source_descriptors = pubsub_source_descriptors\n    self.with_attributes = with_attributes\n    for descriptor in self.pubsub_source_descriptors:\n        match_descriptor = re.match(PUBSUB_DESCRIPTOR_REGEXP, descriptor.source)\n        if not match_descriptor:\n            raise ValueError('PubSub source descriptor must be in the form \"projects/<project>/topics/<topic>\" or \"projects/<project>/subscription/<subscription>\" (got %r).' % descriptor.source)", "docstring": "Initializes ``PubSubMultipleReader``.\n\nArgs:\npubsub_source_descriptors: List of Cloud Pub/Sub topics or subscriptions\nof type `~PubSubSourceDescriptor`.\nwith_attributes:\nTrue - input elements will be :class:`~PubsubMessage` objects.\nFalse - input elements will be of type ``bytes`` (message data only).", "source": "github-repos"}
{"code": "async def get_records_for_zone(self, dns_zone, params=None):\n    managed_zone = self.get_managed_zone(dns_zone)\n    url = f'{self._base_url}/managedZones/{managed_zone}/rrsets'\n    if (not params):\n        params = {}\n    if ('fields' not in params):\n        params['fields'] = 'rrsets/name,rrsets/kind,rrsets/rrdatas,rrsets/type,rrsets/ttl,nextPageToken'\n    next_page_token = None\n    records = []\n    while True:\n        if next_page_token:\n            params['pageToken'] = next_page_token\n        response = (await self.get_json(url, params=params))\n        records.extend(response['rrsets'])\n        next_page_token = response.get('nextPageToken')\n        if (not next_page_token):\n            break\n    logging.info(f'Found {len(records)} rrsets for zone \"{dns_zone}\".')\n    return records", "docstring": "Get all resource record sets for a managed zone, using the DNS zone.\n\nArgs:\ndns_zone (str): Desired DNS zone to query.\nparams (dict): (optional) Additional query parameters for HTTP\nrequests to the GDNS API.\nReturns:\nlist of dicts representing rrsets.", "source": "codesearchnet"}
{"code": "def __init__(self, column_names=None, column_sizes=None):\n    \n    super(CLITabularTableView, self).__init__()\n    self._columns = column_names or []\n    self._column_sizes = column_sizes or []\n    self._number_of_columns = len(self._columns)\n    self._rows = []", "docstring": "Initializes a command line interface tabular table view.\n\nArgs:\ncolumn_names (Optional[list[str]]): column names.\ncolumn_sizes (Optional[list[int]]): minimum column sizes, in number of\ncharacters. If a column name or row value is larger than the\nminimum column size the column will be enlarged. Note that the\nminimum columns size will be rounded up to the number of spaces\nof the next tab.", "source": "juraj-google-style"}
{"code": "def _create_request(self, verb, url, query_params=None, data=None, send_as_file=False):\n        \n\n        \n        kwargs = {\n            'headers': self._default_headers,\n            'params': query_params,\n            'timeout': self._req_timeout,\n        }\n\n        if MultiRequest._VERB_POST == verb:\n            if send_as_file:\n                kwargs['files'] = {'file': data}\n            else:\n                kwargs['data'] = data\n            return PreparedRequest(partial(self._session.post, url, **kwargs), url)\n        elif MultiRequest._VERB_GET == verb:\n            return PreparedRequest(partial(self._session.get, url, **kwargs), url)\n        else:\n            raise InvalidRequestError('Invalid verb {0}'.format(verb))", "docstring": "Helper method to create a single post/get requests.\n\nArgs:\nverb - MultiRequest._VERB_POST or MultiRequest._VERB_GET\nurl - A string URL\nquery_params - None or a dict\ndata - None or a string or a dict\nsend_as_file - A boolean, should the data be sent as a file.\nReturns:\nrequests.PreparedRequest\nRaises:\nInvalidRequestError - if an invalid verb is passed in.", "source": "juraj-google-style"}
{"code": "def set_state_tree(self, state_tree):\n    for k, v in state_tree.items():\n        path_value_dict = self._flatten_nested_dict(v)\n        if k == 'trainable_variables':\n            self._assign_variable_values(self.trainable_variables, path_value_dict)\n        elif k == 'non_trainable_variables':\n            self._assign_variable_values(self.non_trainable_variables, path_value_dict)\n        elif k == 'optimizer_variables':\n            self._assign_variable_values(self.optimizer.variables, path_value_dict)\n        elif k == 'metrics_variables':\n            self._assign_variable_values(self.metrics_variables, path_value_dict)\n        else:\n            raise ValueError(f'Unknown variable name: {k}')", "docstring": "Assigns values to variables of the model.\n\nThis method takes a dictionary of nested variable values, which\nrepresents the state tree of the model, and assigns them to the\ncorresponding variables of the model. The dictionary keys represent the\nvariable names (e.g., `'trainable_variables'`, `'optimizer_variables'`),\nand the values are nested dictionaries containing the variable\npaths and their corresponding values.\n\nArgs:\nstate_tree: A dictionary representing the state tree of the model.\nThe keys are the variable names, and the values are nested\ndictionaries representing the variable paths and their values.", "source": "github-repos"}
{"code": "def download_file(self, path, target_path):\n    self.__validate_storage_path(path)\n    entity = self.api_client.get_entity_by_query(path=path)\n    if (entity['entity_type'] != 'file'):\n        raise StorageArgumentException('Only file entities can be downloaded')\n    signed_url = self.api_client.get_signed_url(entity['uuid'])\n    response = self.api_client.download_signed_url(signed_url)\n    with open(target_path, 'wb') as output:\n        for chunk in response.iter_content(chunk_size=1024):\n            output.write(chunk)", "docstring": "Download a file from storage service to local disk.\n\nExisting files on the target path will be overwritten.\nThe download is not recursive, as it only works on files.\n\nArgs:\npath (str): The path of the entity to be downloaded. Must start with a '/'.\n\nReturns:\nNone\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "codesearchnet"}
{"code": "def diff_main(self, text1, text2, checklines=True, deadline=None):\n    if (deadline == None):\n        if (self.Diff_Timeout <= 0):\n            deadline = sys.maxsize\n        else:\n            deadline = (time.time() + self.Diff_Timeout)\n    if ((text1 == None) or (text2 == None)):\n        raise ValueError('Null inputs. (diff_main)')\n    if (text1 == text2):\n        if text1:\n            return [(self.DIFF_EQUAL, text1)]\n        return []\n    commonlength = self.diff_commonPrefix(text1, text2)\n    commonprefix = text1[:commonlength]\n    text1 = text1[commonlength:]\n    text2 = text2[commonlength:]\n    commonlength = self.diff_commonSuffix(text1, text2)\n    if (commonlength == 0):\n        commonsuffix = ''\n    else:\n        commonsuffix = text1[(- commonlength):]\n        text1 = text1[:(- commonlength)]\n        text2 = text2[:(- commonlength)]\n    diffs = self.diff_compute(text1, text2, checklines, deadline)\n    if commonprefix:\n        diffs[:0] = [(self.DIFF_EQUAL, commonprefix)]\n    if commonsuffix:\n        diffs.append((self.DIFF_EQUAL, commonsuffix))\n    self.diff_cleanupMerge(diffs)\n    return diffs", "docstring": "Find the differences between two texts.  Simplifies the problem by\nstripping any common prefix or suffix off the texts before diffing.\n\nArgs:\ntext1: Old string to be diffed.\ntext2: New string to be diffed.\nchecklines: Optional speedup flag.  If present and false, then don't run\na line-level diff first to identify the changed areas.\nDefaults to true, which does a faster, slightly less optimal diff.\ndeadline: Optional time when the diff should be complete by.  Used\ninternally for recursive calls.  Users should set DiffTimeout instead.\n\nReturns:\nArray of changes.", "source": "codesearchnet"}
{"code": "def GetMessages(self, files):\n    result = {}\n    for file_name in files:\n        file_desc = self.pool.FindFileByName(file_name)\n        for desc in file_desc.message_types_by_name.values():\n            result[desc.full_name] = self.GetPrototype(desc)\n        for extension in file_desc.extensions_by_name.values():\n            if (extension.containing_type.full_name not in self._classes):\n                self.GetPrototype(extension.containing_type)\n            extended_class = self._classes[extension.containing_type.full_name]\n            extended_class.RegisterExtension(extension)\n    return result", "docstring": "Gets all the messages from a specified file.\n\nThis will find and resolve dependencies, failing if the descriptor\npool cannot satisfy them.\n\nArgs:\nfiles: The file names to extract messages from.\n\nReturns:\nA dictionary mapping proto names to the message classes. This will include\nany dependent messages as well as any messages defined in the same file as\na specified message.", "source": "codesearchnet"}
{"code": "def hottestmonth(self, value=None):\n        \n        if value is not None:\n            try:\n                value = int(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type int '\n                                 'for field `hottestmonth`'.format(value))\n            if value < 1:\n                raise ValueError('value need to be greater or equal 1 '\n                                 'for field `hottestmonth`')\n            if value > 12:\n                raise ValueError('value need to be smaller 12 '\n                                 'for field `hottestmonth`')\n\n        self._hottestmonth = value", "docstring": "Corresponds to IDD Field `hottestmonth`\n\nArgs:\nvalue (int): value for IDD Field `hottestmonth`\nvalue >= 1\nvalue <= 12\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def get_event_position(voevent, index=0):\n    od = voevent.WhereWhen.ObsDataLocation[index]\n    ac = od.ObservationLocation.AstroCoords\n    ac_sys = voevent.WhereWhen.ObsDataLocation.ObservationLocation.AstroCoordSystem\n    sys = ac_sys.attrib['id']\n    if hasattr(ac.Position2D, 'Name1'):\n        assert ((ac.Position2D.Name1 == 'RA') and (ac.Position2D.Name2 == 'Dec'))\n    posn = Position2D(ra=float(ac.Position2D.Value2.C1), dec=float(ac.Position2D.Value2.C2), err=float(ac.Position2D.Error2Radius), units=ac.Position2D.attrib['unit'], system=sys)\n    return posn", "docstring": "Extracts the `AstroCoords` from a given `WhereWhen.ObsDataLocation`.\n\nNote that a packet may include multiple 'ObsDataLocation' entries\nunder the 'WhereWhen' section, for example giving locations of an object\nmoving over time. Most packets will have only one, however, so the\ndefault is to just return co-ords extracted from the first.\n\nArgs:\nvoevent (:class:`voeventparse.voevent.Voevent`): Root node of the\nVOEvent etree.\nindex (int): Index of the ObsDataLocation to extract AstroCoords from.\n\nReturns:\nPosition (:py:class:`.Position2D`): The sky position defined in the\nObsDataLocation.", "source": "codesearchnet"}
{"code": "def load_pdb(self, pdb_id, mapped_chains=None, pdb_file=None, file_type=None, is_experimental=True, set_as_representative=False, representative_chain=None, force_rerun=False):\n    if self.structures.has_id(pdb_id):\n        if force_rerun:\n            existing = self.structures.get_by_id(pdb_id)\n            self.structures.remove(existing)\n        else:\n            log.debug('{}: PDB ID already present in list of structures'.format(pdb_id))\n            pdb = self.structures.get_by_id(pdb_id)\n            if pdb_file:\n                pdb.load_structure_path(pdb_file, file_type)\n            if mapped_chains:\n                pdb.add_mapped_chain_ids(mapped_chains)\n    if (not self.structures.has_id(pdb_id)):\n        if is_experimental:\n            pdb = PDBProp(ident=pdb_id, mapped_chains=mapped_chains, structure_path=pdb_file, file_type=file_type)\n        else:\n            pdb = StructProp(ident=pdb_id, mapped_chains=mapped_chains, structure_path=pdb_file, file_type=file_type)\n        self.structures.append(pdb)\n    if set_as_representative:\n        pdb.parse_structure()\n        self._representative_structure_setter(structprop=pdb, keep_chain=representative_chain, force_rerun=force_rerun)\n    return self.structures.get_by_id(pdb_id)", "docstring": "Load a structure ID and optional structure file into the structures attribute.\n\nArgs:\npdb_id (str): PDB ID\nmapped_chains (str, list): Chain ID or list of IDs which you are interested in\npdb_file (str): Path to PDB file\nfile_type (str): Type of PDB file\nis_experimental (bool): If this structure file is experimental\nset_as_representative (bool): If this structure should be set as the representative structure\nrepresentative_chain (str): If ``set_as_representative`` is ``True``, provide the representative chain ID\nforce_rerun (bool): If the PDB should be reloaded if it is already in the list of structures\n\nReturns:\nPDBProp: The object that is now contained in the structures attribute", "source": "codesearchnet"}
{"code": "def flatten_with_tuple_paths(structure, expand_composites=False):\n    return list(zip(yield_flat_paths(structure, expand_composites=expand_composites), flatten(structure, expand_composites=expand_composites)))", "docstring": "Returns a list of `(tuple_path, atom)` tuples.\n\nThe order of pairs produced matches that of `nest.flatten`. This allows you\nto flatten a nested structure while keeping information about where in the\nstructure each atom was located. See `nest.yield_flat_paths`\nfor more information about tuple paths.\n\nArgs:\nstructure: the nested structure to flatten.\nexpand_composites: If true, then composite tensors such as\n`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their\ncomponent tensors.\n\nReturns:\nA list of `(tuple_path, atom)` tuples. Each `tuple_path` is a tuple\nof indices and/or dictionary keys that uniquely specify the path to\n`atom` within `structure`.", "source": "github-repos"}
{"code": "def find_causal_link(self, direction, mechanism, purviews=False, allow_neg=False):\n    purviews = self.potential_purviews(direction, mechanism, purviews)\n    if (not purviews):\n        max_ria = _null_ac_ria(self.mechanism_state(direction), direction, mechanism, None)\n    else:\n        max_ria = max((self.find_mip(direction, mechanism, purview, allow_neg) for purview in purviews))\n    return CausalLink(max_ria)", "docstring": "Return the maximally irreducible cause or effect ratio for a\nmechanism.\n\nArgs:\ndirection (str): The temporal direction, specifying cause or\neffect.\nmechanism (tuple[int]): The mechanism to be tested for\nirreducibility.\n\nKeyword Args:\npurviews (tuple[int]): Optionally restrict the possible purviews\nto a subset of the subsystem. This may be useful for _e.g._\nfinding only concepts that are \"about\" a certain subset of\nnodes.\n\nReturns:\nCausalLink: The maximally-irreducible actual cause or effect.", "source": "codesearchnet"}
{"code": "def capitalcase(string):\n    \n\n    string = str(string)\n    if not string:\n        return string\n    return uppercase(string[0]) + string[1:]", "docstring": "Convert string into capital case.\nFirst letters will be uppercase.\n\nArgs:\nstring: String to convert.\n\nReturns:\nstring: Capital case string.", "source": "juraj-google-style"}
{"code": "def _save_and_write_assets(self, assets_collection_to_add=None):\n    asset_filename_map = _maybe_save_assets(_add_asset_to_collection, assets_collection_to_add)\n    if not asset_filename_map:\n        tf_logging.info('No assets to write.')\n        return\n    copy_assets_to_destination_dir(asset_filename_map, self._export_dir, self._saved_asset_files)", "docstring": "Saves asset to the meta graph and writes asset files to disk.\n\nArgs:\nassets_collection_to_add: The collection where the asset paths are setup.", "source": "github-repos"}
{"code": "def SetUsername(self, username):\n    \n    self._username = username\n    logger.debug('Elasticsearch username: {0!s}'.format(username))", "docstring": "Sets the username.\n\nArgs:\nusername (str): username to authenticate with.", "source": "juraj-google-style"}
{"code": "def _extract_mnist_labels(filename, num_labels):\n    with gzip.open(filename) as bytestream:\n        bytestream.read(8)\n        buf = bytestream.read(num_labels)\n        labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n    return labels", "docstring": "Extract labels from an MNIST file into integers.\n\nArgs:\nfilename: The path to an MNIST labels file.\nnum_labels: The number of labels in the file.\n\nReturns:\nA int64 numpy array of shape [num_labels]", "source": "codesearchnet"}
{"code": "def gzip_dir(path, compresslevel=6):\n    for f in os.listdir(path):\n        full_f = os.path.join(path, f)\n        if (not f.lower().endswith('gz')):\n            with open(full_f, 'rb') as f_in, GzipFile('{}.gz'.format(full_f), 'wb', compresslevel=compresslevel) as f_out:\n                shutil.copyfileobj(f_in, f_out)\n            shutil.copystat(full_f, '{}.gz'.format(full_f))\n            os.remove(full_f)", "docstring": "Gzips all files in a directory. Note that this is different from\nshutil.make_archive, which creates a tar archive. The aim of this method\nis to create gzipped files that can still be read using common Unix-style\ncommands like zless or zcat.\n\nArgs:\npath (str): Path to directory.\ncompresslevel (int): Level of compression, 1-9. 9 is default for\nGzipFile, 6 is default for gzip.", "source": "codesearchnet"}
{"code": "def FromString(cls, desc):\n    parse_exp = (Literal(u'run_time').suppress() + time_interval(u'interval'))\n    try:\n        data = parse_exp.parseString(desc)\n        return TimeBasedStopCondition(data[u'interval'][0])\n    except ParseException:\n        raise ArgumentError(u'Could not parse time based stop condition')", "docstring": "Parse this stop condition from a string representation.\n\nThe string needs to match:\nrun_time number [seconds|minutes|hours|days|months|years]\n\nArgs:\ndesc (str): The description\n\nReturns:\nTimeBasedStopCondition", "source": "codesearchnet"}
{"code": "def get_package(name, version, paths=None):\n    if isinstance(version, basestring):\n        range_ = VersionRange(('==%s' % version))\n    else:\n        range_ = VersionRange.from_version(version, '==')\n    it = iter_packages(name, range_, paths)\n    try:\n        return it.next()\n    except StopIteration:\n        return None", "docstring": "Get an exact version of a package.\n\nArgs:\nname (str): Name of the package, eg 'maya'.\nversion (Version or str): Version of the package, eg '1.0.0'\npaths (list of str, optional): paths to search for package, defaults\nto `config.packages_path`.\n\nReturns:\n`Package` object, or None if the package was not found.", "source": "codesearchnet"}
{"code": "def search_features(self, search):\n    if isinstance(search, string_types):\n        search = [search]\n    search = [s.replace('*', '.*') for s in search]\n    cols = list(self.data.columns)\n    results = []\n    for s in search:\n        results.extend([f for f in cols if re.match((s + '$'), f)])\n    return list(set(results))", "docstring": "Returns all features that match any of the elements in the input\nlist.\n\nArgs:\nsearch (str, list): A string or list of strings defining the query.\n\nReturns:\nA list of matching feature names.", "source": "codesearchnet"}
{"code": "def remove(self, word):\n        \n        self._dictionary.pop(word.lower())\n        self._update_dictionary()", "docstring": "Remove a word from the word frequency list\n\nArgs:\nword (str): The word to remove", "source": "juraj-google-style"}
{"code": "def add_it(workbench, file_list, labels):\n    \n    md5s = []\n    for filename in file_list:\n        if filename != '.DS_Store':\n            with open(filename, 'rb') as pe_file:\n                base_name = os.path.basename(filename)\n                md5 = workbench.store_sample(pe_file.read(), base_name, 'exe')\n                workbench.add_node(md5, md5[:6], labels)\n                md5s.append(md5)\n    return md5s", "docstring": "Add the given file_list to workbench as samples, also add them as nodes.\n\nArgs:\nworkbench: Instance of Workbench Client.\nfile_list: list of files.\nlabels: labels for the nodes.\n\nReturns:\nA list of md5s.", "source": "juraj-google-style"}
{"code": "def generate_contour_data(pid):\n    if isinstance(pid, GenInput):\n        pid = pid.return_dict()\n    begin_time = time.time()\n    WORKING_DIRECTORY = '.'\n    if ('WORKING_DIRECTORY' not in pid['general'].keys()):\n        pid['general']['WORKING_DIRECTORY'] = WORKING_DIRECTORY\n    running_process = GenProcess(**{**pid, **pid['generate_info']})\n    running_process.set_parameters()\n    running_process.run_snr()\n    file_out = FileReadOut(running_process.xvals, running_process.yvals, running_process.final_dict, **{**pid['general'], **pid['generate_info'], **pid['output_info']})\n    print('outputing file:', ((pid['general']['WORKING_DIRECTORY'] + '/') + pid['output_info']['output_file_name']))\n    getattr(file_out, (file_out.output_file_type + '_read_out'))()\n    print((time.time() - begin_time))\n    return", "docstring": "Main function for this program.\n\nThis will read in sensitivity_curves and binary parameters; calculate snrs\nwith a matched filtering approach; and then read the contour data out to a file.\n\nArgs:\npid (obj or dict): GenInput class or dictionary containing all of the input information for\nthe generation. See BOWIE documentation and example notebooks for usage of\nthis class.", "source": "codesearchnet"}
{"code": "def get_pipeline(self, name):\n        \n        check.str_param(name, 'name')\n\n        if name in self._pipeline_cache:\n            return self._pipeline_cache[name]\n\n        try:\n            pipeline = self.pipeline_dict[name]()\n        except KeyError:\n            raise DagsterInvariantViolationError(\n                'Could not find pipeline \"{name}\". Found: {pipeline_names}.'.format(\n                    name=name,\n                    pipeline_names=', '.join(\n                        [\n                            '\"{pipeline_name}\"'.format(pipeline_name=pipeline_name)\n                            for pipeline_name in self.pipeline_dict.keys()\n                        ]\n                    ),\n                )\n            )\n        check.invariant(\n            pipeline.name == name,\n            'Name does not match. Name in dict {name}. Name in pipeline {pipeline.name}'.format(\n                name=name, pipeline=pipeline\n            ),\n        )\n\n        self._pipeline_cache[name] = check.inst(\n            pipeline,\n            PipelineDefinition,\n            (\n                'Function passed into pipeline_dict with key {key} must return a '\n                'PipelineDefinition'\n            ).format(key=name),\n        )\n\n        return pipeline", "docstring": "Get a pipeline by name. Only constructs that pipeline and caches it.\n\nArgs:\nname (str): Name of the pipeline to retriever\n\nReturns:\nPipelineDefinition: Instance of PipelineDefinition with that name.", "source": "juraj-google-style"}
{"code": "def call(self, input_ids: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, token_type_ids: Optional[tf.Tensor]=None, inputs_embeds: Optional[tf.Tensor]=None, past_key_values_length=0, training: bool=False) -> tf.Tensor:\n    if input_ids is None and inputs_embeds is None:\n        raise ValueError('Need to provide either `input_ids` or `input_embeds`.')\n    if input_ids is not None:\n        check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n        inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n    input_shape = shape_list(inputs_embeds)[:-1]\n    if token_type_ids is None:\n        token_type_ids = tf.fill(dims=input_shape, value=0)\n    if position_ids is None:\n        position_ids = tf.expand_dims(tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0)\n    position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)\n    token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)\n    final_embeddings = inputs_embeds + position_embeds + token_type_embeds\n    final_embeddings = self.LayerNorm(inputs=final_embeddings)\n    final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n    return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\nfinal_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github-repos"}
{"code": "def invoke_one(self, line):\n    funname = line.pop(0)\n    context = self.contexts[(- 1)]\n    func = self.find_function(context, funname)\n    if isinstance(func, dict):\n        self.contexts.append(func)\n        self._check_initialize_context()\n        return (None, line, False)\n    if (func.takes_cmdline is True):\n        val = func(line)\n        line = []\n    else:\n        (posargs, kwargs, line) = self.process_arguments(func, line)\n        if (inspect.isclass(func) and (not func.metadata.spec_filled(posargs, kwargs))):\n            raise ValidationError('Not enough parameters specified to call function', function=func.metadata.name, signature=func.metadata.signature())\n        val = func(*posargs, **kwargs)\n    finished = True\n    if (func.finalizer is True):\n        self.contexts.pop()\n    elif (val is not None):\n        if func.metadata.returns_data():\n            val = func.metadata.format_returnvalue(val)\n        else:\n            self.contexts.append(val)\n            self._check_initialize_context()\n            finished = False\n            val = None\n    return (val, line, finished)", "docstring": "Invoke a function given a list of arguments with the function listed first.\n\nThe function is searched for using the current context on the context stack\nand its annotated type information is used to convert all of the string parameters\npassed in line to appropriate python types.\n\nArgs:\nline (list): The list of command line arguments.\n\nReturns:\n(object, list, bool): A tuple containing the return value of the function, if any,\na boolean specifying if the function created a new context (False if a new context\nwas created) and a list with the remainder of the command line if this function\ndid not consume all arguments.", "source": "codesearchnet"}
{"code": "def has_all_nonzero_segment_lengths(neuron, threshold=0.0):\n    bad_ids = []\n    for sec in _nf.iter_sections(neuron):\n        p = sec.points\n        for (i, s) in enumerate(zip(p[:(- 1)], p[1:])):\n            if (segment_length(s) <= threshold):\n                bad_ids.append((sec.id, i))\n    return CheckResult((len(bad_ids) == 0), bad_ids)", "docstring": "Check presence of neuron segments with length not above threshold\n\nArguments:\nneuron(Neuron): The neuron object to test\nthreshold(float): value above which a segment length is considered to\nbe non-zero\n\nReturns:\nCheckResult with result including list of (section_id, segment_id)\nof zero length segments", "source": "codesearchnet"}
{"code": "def file(self, md5=None, sha1=None, sha256=None, **kwargs):\n    indicator_obj = File(md5, sha1, sha256, **kwargs)\n    return self._indicator(indicator_obj)", "docstring": "Add File data to Batch object.\n\n.. note:: A least one file hash value must be specified.\n\nArgs:\nmd5 (str, optional): The md5 value for this Indicator.\nsha1 (str, optional): The sha1 value for this Indicator.\nsha256 (str, optional): The sha256 value for this Indicator.\nconfidence (str, kwargs): The threat confidence for this Indicator.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nlast_modified (str, kwargs): The date timestamp the Indicator was last modified.\nrating (str, kwargs): The threat rating for this Indicator.\nsize (str, kwargs): The file size for this Indicator.\nxid (str, kwargs): The external id for this Indicator.\n\nReturns:\nobj: An instance of File.", "source": "codesearchnet"}
{"code": "def device(name):\n    ensure_initialized()\n    return context().device(name)", "docstring": "Context-manager to force placement of operations and Tensors on a device.\n\nExample:\n```python\nwith tf.device('gpu:0'):\nwith tf.device('cpu:0'):\nshape = tf.constant([], dtype=tf.int32)\nx = tf.random.truncated_normal(shape, tf.float32)\n```\nwill ensure that the `shape` Tensor is on CPU but the `truncated_normal`\noperation runs on GPU 0.\n\nArgs:\nname: Name of the device (see context().devices()), or None to perform\nautomatic placement.\n\nReturns:\nContext manager for setting the device.", "source": "github-repos"}
{"code": "def namespace_for_prefix(self, prefix):\n    try:\n        ni = self.__lookup_prefix(prefix)\n    except PrefixNotFoundError:\n        return None\n    else:\n        return ni.uri", "docstring": "Get the namespace the given prefix maps to.\n\nArgs:\nprefix (str): The prefix\n\nReturns:\nstr: The namespace, or None if the prefix isn't mapped to\nanything in this set.", "source": "codesearchnet"}
{"code": "def parse_query(query_str):\n\n    def _generate_match_all_fields_query():\n        stripped_query_str = ' '.join(query_str.replace(':', ' ').split())\n        return {'multi_match': {'query': stripped_query_str, 'fields': ['_all'], 'zero_terms_query': 'all'}}\n    if (not isinstance(query_str, six.text_type)):\n        query_str = six.text_type(query_str.decode('utf-8'))\n    logger.info((('Parsing: \"' + query_str) + '\".'))\n    parser = StatefulParser()\n    rst_visitor = RestructuringVisitor()\n    es_visitor = ElasticSearchVisitor()\n    try:\n        (unrecognized_text, parse_tree) = parser.parse(query_str, Query)\n        if unrecognized_text:\n            msg = (((('Parser returned unrecognized text: \"' + unrecognized_text) + '\" for query: \"') + query_str) + '\".')\n            if ((query_str == unrecognized_text) and (parse_tree is None)):\n                logger.warn(msg)\n                return _generate_match_all_fields_query()\n            else:\n                msg += 'Continuing with recognized parse tree.'\n            logger.warn(msg)\n    except SyntaxError as e:\n        logger.warn((((('Parser syntax error (' + six.text_type(e)) + ') with query: \"') + query_str) + '\". Continuing with a match_all with the given query.'))\n        return _generate_match_all_fields_query()\n    try:\n        restructured_parse_tree = parse_tree.accept(rst_visitor)\n        logger.debug(('Parse tree: \\n' + emit_tree_format(restructured_parse_tree)))\n    except Exception as e:\n        logger.exception((((RestructuringVisitor.__name__ + ' crashed') + ((': ' + six.text_type(e)) + '.')) if six.text_type(e) else '.'))\n        return _generate_match_all_fields_query()\n    try:\n        es_query = restructured_parse_tree.accept(es_visitor)\n    except Exception as e:\n        logger.exception((((ElasticSearchVisitor.__name__ + ' crashed') + ((': ' + six.text_type(e)) + '.')) if six.text_type(e) else '.'))\n        return _generate_match_all_fields_query()\n    if (not es_query):\n        return _generate_match_all_fields_query()\n    return es_query", "docstring": "Drives the whole logic, by parsing, restructuring and finally, generating an ElasticSearch query.\n\nArgs:\nquery_str (six.text_types): the given query to be translated to an ElasticSearch query\n\nReturns:\nsix.text_types: Return an ElasticSearch query.\n\nNotes:\nIn case there's an error, an ElasticSearch `multi_match` query is generated with its `query` value, being the\nquery_str argument.", "source": "codesearchnet"}
{"code": "def _model_to_dict(model, ignore):\n        \n        return {attr: value for attr, value in model.__dict__.items()\n                if not attr.startswith('_') and attr not in ignore}", "docstring": "Convert OSS model to dict.\n\nArgs:\nmodel (oss2.models.RequestResult): Model.\nignore (tuple of str): Keys to not insert to dict.\n\nReturns:\ndict: Model dict version.", "source": "juraj-google-style"}
{"code": "def __init__(self, addr, raw_addr, name=None, rssi=0):\n        \n        self.addr = addr\n        self.raw_addr = raw_addr\n        self.name = name\n        self.rssi = rssi\n        self._age = time.time()", "docstring": "Initialise a new ScanResult.\n\nArgs:\naddr (str): Device hardware address in xx:xx:xx:xx:xx:xx format.\nraw_addr (bytearray): Device hardware address as raw bytes.\nname (str): Device name (if available) as ASCII text.\nrssi (float): Latest RSSI from the scan result for the device, if any.", "source": "juraj-google-style"}
{"code": "def _parse(json_str: str, primitive_cls: Type[Decimal]) -> Decimal:\n    decimal_value = json.loads(json_str, parse_float=decimal.Decimal, parse_int=decimal.Decimal)\n    if not isinstance(decimal_value, decimal.Decimal):\n        raise ValueError('Invalid Decimal format')\n    if not decimal_value.is_finite():\n        raise ValueError('Decimal out of range.')\n    return cast(Any, primitive_cls)(value=json_str)", "docstring": "Parses the json_str into a Decimal FHIR primitive protobuf message.\n\nArgs:\njson_str: The raw JSON string to parse.\nprimitive_cls: The type of FHIR primitive to parse into.\n\nReturns:\nA FHIR primitive Decimal protobuf message.", "source": "github-repos"}
{"code": "def run_instrumentation_test(self, device, package, options=None, prefix=None, runner=None):\n    instrumentation_block = [_InstrumentationBlock(prefix=prefix)]\n\n    def parse_instrumentation(raw_line):\n        line = raw_line.rstrip().decode('utf-8')\n        logging.info(line)\n        instrumentation_block[0] = self._parse_line(instrumentation_block[0], line)\n    device.adb.instrument(package=package, options=options, runner=runner, handler=parse_instrumentation)\n    return self._finish_parsing(instrumentation_block[0])", "docstring": "Runs instrumentation tests on a device and creates test records.\n\nArgs:\ndevice: AndroidDevice, the device to run instrumentation tests on.\npackage: string, the package name of the instrumentation tests.\noptions: dict, Instrumentation options for the instrumentation\ntests.\nprefix: string, an optional prefix for parser output for\ndistinguishing between instrumentation test runs.\nrunner: string, the runner to use for the instrumentation package,\ndefault to DEFAULT_INSTRUMENTATION_RUNNER.\n\nReturns:\nA boolean indicating whether or not all the instrumentation test\nmethods passed.\n\nRaises:\nTestError if the instrumentation run crashed or if parsing the\noutput failed.", "source": "github-repos"}
{"code": "def handle(self, message):\n        \n\n        logger.debug(message)\n        if Utilities.isNotEmpty(message['metadata']['opts']):\n            target = message['metadata']['opts']['target']\n            thread = message['metadata']['opts'].get('thread')\n            \n            pattern = re.compile('^@([a-zA-Z0-9._-]+)|\\s@([a-zA-Z0-9._-]+)')\n            matches = re.findall(pattern, message['text'])\n            matches = set(matches)\n            logger.debug('MATCHES!!!!   {}'.format(matches))\n            for match in matches:\n                if isinstance(match, tuple):\n                    if match[0] != '':\n                        match = match[0]\n                    else:\n                        match = match[1]\n                if not match.startswith('@'):\n                    match = '@' + match\n                message['text'] = message['text'].replace(\n                    match,\n                    '<{}>'.format(match)\n                )\n\n            pattern = re.compile('\n            matches = re.findall(pattern, message['text'])\n            matches = set(matches)\n            for match in matches:\n                channel_id = self.botThread.get_channel_id_by_name(match)\n                if channel_id:\n                    message['text'] = message['text'].replace(\n                        '\n                        '<\n                            channel_id,\n                            match\n                        )\n                    )\n\n            if (message['text'].find('<<@') != -1\n                    or message['text'].find('<<\n                message['text'] = message['text'].replace('<<', '<')\n                message['text'] = message['text'].replace('>>', '>')\n\n            if target.startswith('U'):\n                target = self.botThread.get_dm_channel(target)\n            attachment = message['metadata']['opts'].get('attachment')\n            if attachment:\n                text = message['metadata']['opts'].get('fallback')\n                attachment = self.build_attachment(\n                    text, target, attachment, thread)\n                self.botThread.post_attachment(attachment)\n            else:\n                self.botThread.slack_client.rtm_send_message(\n                    target, message['text'], thread=thread)", "docstring": "Attempts to send a message to the specified destination in Slack.\nExtends Legobot.Lego.handle()\n\nArgs:\nmessage (Legobot.Message): message w/ metadata to send.", "source": "juraj-google-style"}
{"code": "def size(self, st_size):\n        \n\n        self._check_positive_int(st_size)\n        current_size = self.st_size or 0\n        self.filesystem.change_disk_usage(\n            st_size - current_size, self.name, self.st_dev)\n        if self._byte_contents:\n            if st_size < current_size:\n                self._byte_contents = self._byte_contents[:st_size]\n            else:\n                if IS_PY2:\n                    self._byte_contents = '%s%s' % (\n                        self._byte_contents, '\\0' * (st_size - current_size))\n                else:\n                    self._byte_contents += b'\\0' * (st_size - current_size)\n        self.st_size = st_size\n        self.epoch += 1", "docstring": "Resizes file content, padding with nulls if new size exceeds the\nold size.\n\nArgs:\nst_size: The desired size for the file.\n\nRaises:\nIOError: if the st_size arg is not a non-negative integer\nor if st_size exceeds the available file system space", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.BatchAnnotateImages = channel.unary_unary(\n            \"/google.cloud.vision.v1p1beta1.ImageAnnotator/BatchAnnotateImages\",\n            request_serializer=google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_image__annotator__pb2.BatchAnnotateImagesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_image__annotator__pb2.BatchAnnotateImagesResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def main(argv=None):\n    \n\n    if argv is None:\n        argv = sys.argv[1:]\n\n    try:\n        executor = None\n        parser = build_args()\n        args = parser.parse_args(args=argv)\n\n        model = DeviceModel()\n\n        parser = SensorGraphFileParser()\n        parser.parse_file(args.sensor_graph)\n        parser.compile(model)\n\n        if not args.disable_optimizer:\n            opt = SensorGraphOptimizer()\n            opt.optimize(parser.sensor_graph, model=model)\n\n        graph = parser.sensor_graph\n        sim = SensorGraphSimulator(graph)\n\n        for stop in args.stop:\n            sim.stop_condition(stop)\n\n        for watch in args.watch:\n            watch_sel = DataStreamSelector.FromString(watch)\n            graph.sensor_log.watch(watch_sel, watch_printer)\n\n        \n        if args.semihost_device is not None:\n            executor = SemihostedRPCExecutor(args.port, args.semihost_device)\n            sim.rpc_executor = executor\n\n        for mock in args.mock_rpc:\n            slot, rpc_id, value = process_mock_rpc(mock)\n            sim.rpc_executor.mock(slot, rpc_id, value)\n\n        for stim in args.stimulus:\n            sim.stimulus(stim)\n\n        graph.load_constants()\n\n        if args.trace is not None:\n            sim.record_trace()\n\n        try:\n            if args.connected:\n                sim.step(user_connected, 8)\n\n            sim.run(accelerated=not args.realtime)\n        except KeyboardInterrupt:\n            pass\n\n        if args.trace is not None:\n            sim.trace.save(args.trace)\n    finally:\n        if executor is not None:\n            executor.hw.close()\n\n    return 0", "docstring": "Main entry point for iotile sensorgraph simulator.\n\nThis is the iotile-sgrun command line program.  It takes\nan optional set of command line parameters to allow for\ntesting.\n\nArgs:\nargv (list of str): An optional set of command line\nparameters.  If not passed, these are taken from\nsys.argv.", "source": "juraj-google-style"}
{"code": "def files_comments_add(self, *, comment: str, file: str, **kwargs) -> SlackResponse:\n    kwargs.update({'comment': comment, 'file': file})\n    return self.api_call('files.comments.add', json=kwargs)", "docstring": "Add a comment to an existing file.\n\nArgs:\ncomment (str): The body of the comment.\ne.g. 'Everyone should take a moment to read this file.'\nfile (str): The file id. e.g. 'F1234467890'", "source": "codesearchnet"}
{"code": "def fail_all_requests(self, error):\n    for state in self.scheduler.active_requests.values():\n        self._handle_request_error(error, state)\n        self.scheduler.finish_request(state.request_id)\n    for req_id in list(self.scheduler.waiting_requests.keys()):\n        state = self.scheduler.waiting_requests.pop(req_id)\n        self._handle_request_error(error, state)\n    self.scheduler.waiting_requests_order.clear()", "docstring": "Fail all active requests with the given error.\n\nArgs:\nerror: The error to report in the failure message", "source": "github-repos"}
{"code": "def post_cutout(self, token, channel,\n                    x_start,\n                    y_start,\n                    z_start,\n                    data,\n                    resolution=0):\n        \n        datatype = self.get_proj_info(token)['channels'][channel]['datatype']\n        if data.dtype.name != datatype:\n            data = data.astype(datatype)\n\n        data = numpy.rollaxis(data, 1)\n        data = numpy.rollaxis(data, 2)\n\n        if six.PY3 or data.nbytes > 1.5e9:\n            ul_func = self._post_cutout_no_chunking_npz\n        else:\n            ul_func = self._post_cutout_no_chunking_blosc\n\n        if data.size < self._chunk_threshold:\n            return ul_func(token, channel, x_start,\n                           y_start, z_start, data,\n                           resolution)\n\n        return self._post_cutout_with_chunking(token, channel,\n                                               x_start, y_start, z_start, data,\n                                               resolution, ul_func)", "docstring": "Post a cutout to the server.\n\nArguments:\ntoken (str)\nchannel (str)\nx_start (int)\ny_start (int)\nz_start (int)\ndata (numpy.ndarray): A numpy array of data. Pass in (x, y, z)\nresolution (int : 0): Resolution at which to insert the data\n\nReturns:\nbool: True on success\n\nRaises:\nRemoteDataUploadError: if there's an issue during upload.", "source": "juraj-google-style"}
{"code": "def validate_yaml(self, properties):\n        \n        validator = OurValidator(schema)\n        if not validator.validate(properties):\n            for key, value in validator.errors.items():\n                if any(['unallowed value' in v for v in value]):\n                    print(('{key} has an illegal value. Allowed values are {values} and are case '\n                           'sensitive.').format(key=key, values=schema[key]['allowed']))\n\n            raise ValueError(validator.errors)", "docstring": "Validate the parsed YAML file for adherance to the ChemKED format.\n\nArguments:\nproperties (`dict`): Dictionary created from the parsed YAML file\n\nRaises:\n`ValueError`: If the YAML file cannot be validated, a `ValueError` is raised whose\nstring contains the errors that are present.", "source": "juraj-google-style"}
{"code": "def GetSOAPHeaders(self, create_method):\n    \n    header = create_method(self._SOAP_HEADER_CLASS % self._version)\n    header.clientCustomerId = self._adwords_client.client_customer_id\n    header.developerToken = self._adwords_client.developer_token\n    header.userAgent = ''.join([\n        self._adwords_client.user_agent,\n        googleads.common.GenerateLibSig(self._PRODUCT_SIG)])\n    header.validateOnly = self._adwords_client.validate_only\n    header.partialFailure = self._adwords_client.partial_failure\n    return header", "docstring": "Returns the SOAP headers required for request authorization.\n\nArgs:\ncreate_method: The SOAP library specific method used to instantiate SOAP\nobjects.\n\nReturns:\nA SOAP object containing the headers.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n               datastore_client,\n               work_type_entity_id):\n    \n    self._datastore_client = datastore_client\n    self._work_type_entity_id = work_type_entity_id\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    self._work = {}", "docstring": "Initializes WorkPiecesBase class.\n\nArgs:\ndatastore_client: instance of CompetitionDatastoreClient.\nwork_type_entity_id: ID of the WorkType parent entity", "source": "juraj-google-style"}
{"code": "def isregex(value):\n    \n    if not value:\n        return False\n    return any((isregex_expr(value), isinstance(value, retype)))", "docstring": "Returns ``True`` if the input argument object is a native\nregular expression object, otherwise ``False``.\n\nArguments:\nvalue (mixed): input value to test.\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def _do_parse(inp, fmt, encoding, force_types):\n    res = {}\n    _check_lib_installed(fmt, 'parse')\n    if (fmt == 'ini'):\n        cfg = configobj.ConfigObj(inp, encoding=encoding)\n        res = cfg.dict()\n    elif (fmt == 'json'):\n        if six.PY3:\n            inp = io.TextIOWrapper(inp, encoding=encoding)\n        res = json.load(inp, encoding=encoding)\n    elif (fmt == 'json5'):\n        if six.PY3:\n            inp = io.TextIOWrapper(inp, encoding=encoding)\n        res = json5.load(inp, encoding=encoding)\n    elif (fmt == 'toml'):\n        if (not _is_utf8(encoding)):\n            raise AnyMarkupError('toml is always utf-8 encoded according to specification')\n        if six.PY3:\n            inp = io.TextIOWrapper(inp, encoding=encoding)\n        res = toml.load(inp)\n    elif (fmt == 'xml'):\n        res = xmltodict.parse(inp, encoding=encoding)\n    elif (fmt == 'yaml'):\n        res = yaml.safe_load(inp)\n    else:\n        raise\n    return _ensure_proper_types(res, encoding, force_types)", "docstring": "Actually parse input.\n\nArgs:\ninp: bytes yielding file-like object\nfmt: format to use for parsing\nencoding: encoding of `inp`\nforce_types:\nif `True`, integers, floats, booleans and none/null\nare recognized and returned as proper types instead of strings;\nif `False`, everything is converted to strings\nif `None`, backend return value is used\nReturns:\nparsed `inp` (dict or list) containing unicode values\nRaises:\nvarious sorts of errors raised by used libraries while parsing", "source": "codesearchnet"}
{"code": "def batch(self, spec, batch_size):\n    raise NotImplementedError(f'{type(self).__name__}.batch')", "docstring": "Returns the TypeSpec representing a batch of values described by `spec`.\n\nArgs:\nspec: The `TypeSpec` for an individual value.\nbatch_size: An `int` indicating the number of values that are batched\ntogether, or `None` if the batch size is not known.\n\nReturns:\nA `TypeSpec` for a batch of values.", "source": "github-repos"}
{"code": "def get_params_from_sqlalchemy_url(db_url):\n        \n        \n        result = urlsplit(db_url)\n        return {'database': result.path[1:], 'host': result.hostname, 'port': result.port,\n                'username': result.username, 'password': result.password, 'driver': result.scheme}", "docstring": "Gets PostgreSQL database connection parameters from SQLAlchemy url\n\nArgs:\ndb_url (str): SQLAlchemy url\n\nReturns:\nDict[str,Any]: Dictionary of database connection parameters", "source": "juraj-google-style"}
{"code": "def easeOutBounce(n):\n    \n    _checkRange(n)\n    if n < (1/2.75):\n        return 7.5625 * n * n\n    elif n < (2/2.75):\n        n -= (1.5/2.75)\n        return 7.5625 * n * n + 0.75\n    elif n < (2.5/2.75):\n        n -= (2.25/2.75)\n        return 7.5625 * n * n + 0.9375\n    else:\n        n -= (2.65/2.75)\n        return 7.5625 * n * n + 0.984375", "docstring": "A bouncing tween function that hits the destination and then bounces to rest.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "juraj-google-style"}
{"code": "def _bbox(nodes):\n    \n    left, bottom = np.min(nodes, axis=1)\n    right, top = np.max(nodes, axis=1)\n    return left, right, bottom, top", "docstring": "Get the bounding box for set of points.\n\n.. note::\n\nThere is also a Fortran implementation of this function, which\nwill be used if it can be built.\n\nArgs:\nnodes (numpy.ndarray): A set of points.\n\nReturns:\nTuple[float, float, float, float]: The left, right,\nbottom and top bounds for the box.", "source": "juraj-google-style"}
{"code": "def execute_managed_notebook(cls, nb_man, kernel_name, log_output=False, start_timeout=60, execution_timeout=None, **kwargs):\n    preprocessor = PapermillExecutePreprocessor(timeout=execution_timeout, startup_timeout=start_timeout, kernel_name=kernel_name, log=logger)\n    preprocessor.log_output = log_output\n    preprocessor.preprocess(nb_man, kwargs)", "docstring": "Performs the actual execution of the parameterized notebook locally.\n\nArgs:\nnb (NotebookNode): Executable notebook object.\nkernel_name (str): Name of kernel to execute the notebook against.\nlog_output (bool): Flag for whether or not to write notebook output to stderr.\nstart_timeout (int): Duration to wait for kernel start-up.\nexecution_timeout (int): Duration to wait before failing execution (default: never).\n\n\nNote: The preprocessor concept in this method is similar to what is used\nby `nbconvert`, and it is somewhat misleading here. The preprocesser\nrepresents a notebook processor, not a preparation object.", "source": "codesearchnet"}
{"code": "def AddPathSegment(self, path_segment, scan_object):\n    \n    if path_segment in self._path_segments:\n      raise ValueError('Path segment already set.')\n\n    if isinstance(scan_object, PathFilterScanTreeNode):\n      scan_object.parent = self\n\n    self._path_segments[path_segment] = scan_object", "docstring": "Adds a path segment.\n\nArgs:\npath_segment: a string containing the path segment.\nscan_object: a scan object, either a scan tree sub node (instance of\nPathFilterScanTreeNode) or a string containing a path.\n\nRaises:\nValueError: if the node already contains a scan object for\nthe path segment.", "source": "juraj-google-style"}
{"code": "def filter_by_analysis_period(self, analysis_period):\n        \n        self._check_analysis_period(analysis_period)\n        _filtered_data = self.filter_by_moys(analysis_period.moys)\n        _filtered_data.header._analysis_period = analysis_period\n        return _filtered_data", "docstring": "Filter a Data Collection based on an analysis period.\n\nArgs:\nanalysis period: A Ladybug analysis period\n\nReturn:\nA new Data Collection with filtered data", "source": "juraj-google-style"}
{"code": "def alpha_blend(self, other):\n    \n    \n    fa = self.__a + other.__a - (self.__a * other.__a)\n\n    \n    if fa==0: sa = 0\n    else: sa = min(1.0, self.__a/other.__a)\n\n    \n    da = 1.0 - sa\n\n    sr, sg, sb = [v * sa for v in self.__rgb]\n    dr, dg, db = [v * da for v in other.__rgb]\n\n    return Color((sr+dr, sg+dg, sb+db), 'rgb', fa, self.__wref)", "docstring": "Alpha-blend this color on the other one.\n\nArgs:\n:other:\nThe grapefruit.Color to alpha-blend with this one.\n\nReturns:\nA grapefruit.Color instance which is the result of alpha-blending\nthis color on the other one.\n\n>>> c1 = Color.from_rgb(1, 0.5, 0, 0.2)\n>>> c2 = Color.from_rgb(1, 1, 1, 0.8)\n>>> c3 = c1.alpha_blend(c2)\n>>> c3\nColor(1.0, 0.875, 0.75, 0.84)", "source": "juraj-google-style"}
{"code": "def do_keygen(args):\n    if (args.key_name is not None):\n        key_name = args.key_name\n    else:\n        key_name = 'validator'\n    key_dir = get_key_dir()\n    if (not os.path.exists(key_dir)):\n        raise CliException('Key directory does not exist: {}'.format(key_dir))\n    priv_filename = os.path.join(key_dir, (key_name + '.priv'))\n    pub_filename = os.path.join(key_dir, (key_name + '.pub'))\n    if (not args.force):\n        file_exists = False\n        for filename in [priv_filename, pub_filename]:\n            if os.path.exists(filename):\n                file_exists = True\n                print('file exists: {}'.format(filename), file=sys.stderr)\n        if file_exists:\n            raise CliException('files exist, rerun with --force to overwrite existing files')\n    context = create_context('secp256k1')\n    private_key = context.new_random_private_key()\n    public_key = context.get_public_key(private_key)\n    try:\n        priv_exists = os.path.exists(priv_filename)\n        with open(priv_filename, 'w') as priv_fd:\n            if (not args.quiet):\n                if priv_exists:\n                    print('overwriting file: {}'.format(priv_filename))\n                else:\n                    print('writing file: {}'.format(priv_filename))\n            priv_fd.write(private_key.as_hex())\n            priv_fd.write('\\n')\n            keydir_info = os.stat(key_dir)\n            keydir_gid = keydir_info.st_gid\n            keydir_uid = keydir_info.st_uid\n            os.chown(priv_filename, keydir_uid, keydir_gid)\n            os.chmod(priv_filename, 416)\n        pub_exists = os.path.exists(pub_filename)\n        with open(pub_filename, 'w') as pub_fd:\n            if (not args.quiet):\n                if pub_exists:\n                    print('overwriting file: {}'.format(pub_filename))\n                else:\n                    print('writing file: {}'.format(pub_filename))\n            pub_fd.write(public_key.as_hex())\n            pub_fd.write('\\n')\n            os.chown(pub_filename, keydir_uid, keydir_gid)\n            os.chmod(pub_filename, 420)\n    except IOError as ioe:\n        raise CliException('IOError: {}'.format(str(ioe)))", "docstring": "Executes the key generation operation, given the parsed arguments.\n\nArgs:\nargs (:obj:`Namespace`): The parsed args.", "source": "codesearchnet"}
{"code": "def __init__(self, model, ncats, alpha_lambda=1.0, beta_lambda=2.0,\n        freeparams=['alpha_lambda', 'beta_lambda']):\n        \n        super(GammaDistributedOmegaModel, self).__init__(model, \"omega\",\n                ncats, alpha_lambda=1.0, beta_lambda=2.0,\n                freeparams=['alpha_lambda', 'beta_lambda'])", "docstring": "Initialize an `GammaDistributedModel` object.\n\nThe `lambda_param` is set to \"omega\".\n\nArgs:\n`model` `ncats`,`alpha_lambda`, `beta_lambda`, `freeparams`\nMeaning described in main class doc string for\n`GammaDistributedModel`.", "source": "juraj-google-style"}
{"code": "def get_graphs(self, run_key, debug=False):\n    \n    graph_dict = (self._run_key_to_debug_graphs if debug else\n                  self._run_key_to_original_graphs)\n    graph_wrappers = graph_dict.get(run_key, {})\n    graph_defs = dict()\n    for device_name, wrapper in graph_wrappers.items():\n      graph_defs[device_name] = wrapper.graph_def\n    return graph_defs", "docstring": "Get the runtime GraphDef protos associated with a run key.\n\nArgs:\nrun_key: A Session.run kay.\ndebug: Whether the debugger-decoratedgraph is to be retrieved.\n\nReturns:\nA `dict` mapping device name to `GraphDef` protos.", "source": "juraj-google-style"}
{"code": "def build_or_reuse_placeholder(tensor_spec):\n    \n    g = tfv1.get_default_graph()\n    name = tensor_spec.name\n    try:\n        tensor = g.get_tensor_by_name(name + ':0')\n        assert \"Placeholder\" in tensor.op.type, \"Tensor {} exists but is not a placeholder!\".format(name)\n        assert tensor_spec.is_compatible_with(tensor), \\\n            \"Tensor {} exists but is not compatible with the signature!\".format(tensor)\n        return tensor\n    except KeyError:\n        with tfv1.name_scope(None):   \n            ret = tfv1.placeholder(\n                tensor_spec.dtype, shape=tensor_spec.shape, name=tensor_spec.name)\n        return ret", "docstring": "Build a tf.placeholder from the metadata in the given tensor spec, or return an existing one.\n\nArgs:\ntensor_spec (tf.TensorSpec):\n\nReturns:\ntf.Tensor:", "source": "juraj-google-style"}
{"code": "def __create_and_save_state(cls, job_config, mapreduce_spec):\n    state = model.MapreduceState.create_new(job_config.job_id)\n    state.mapreduce_spec = mapreduce_spec\n    state.active = True\n    state.active_shards = 0\n    state.app_id = job_config._app\n    config = datastore_rpc.Configuration(force_writes=job_config._force_writes)\n    state.put(config=config)\n    return state", "docstring": "Save map job state to datastore.\n\nSave state to datastore so that UI can see it immediately.\n\nArgs:\njob_config: map_job.JobConfig.\nmapreduce_spec: model.MapreduceSpec.\n\nReturns:\nmodel.MapreduceState for this job.", "source": "codesearchnet"}
{"code": "def load_bmp(path):\n        \n        surface = object.__new__(Surface)\n        surface._ptr = check_ptr_err(lib.SDL_LoadBMP_RW(lib.SDL_RWFromFile(path, \"rb\"), 1))\n        return surface", "docstring": "Load a surface from a file.\n\nArgs:\npath (str): Path to the BMP file to load.\n\nReturns:\nSurface: A surface containing the pixels loaded from the file.\n\nRaises:\nSDLError: If the file cannot be loaded.", "source": "juraj-google-style"}
{"code": "def OnCreateAccount(self, account):\n        \n        pubkey = account.PublicKey.encode_point(False)\n        pubkeyunhex = binascii.unhexlify(pubkey)\n        pub = pubkeyunhex[1:65]\n\n        priv = bytearray(account.PrivateKey)\n        decrypted = pub + priv\n        encrypted_pk = self.EncryptPrivateKey(bytes(decrypted))\n\n        db_account, created = Account.get_or_create(\n            PrivateKeyEncrypted=encrypted_pk, PublicKeyHash=account.PublicKeyHash.ToBytes())\n        db_account.save()\n        self.__dbaccount = db_account", "docstring": "Save a KeyPair in encrypted form into the database.\n\nArgs:\naccount (KeyPair):", "source": "juraj-google-style"}
{"code": "def get_arrive_stop(self, **kwargs):\n        \n        \n        params = {\n            'idStop': kwargs.get('stop_number'),\n            'cultureInfo': util.language_code(kwargs.get('lang'))\n        }\n\n        \n        result = self.make_request('geo', 'get_arrive_stop', **params)\n\n        \n        if not util.check_result(result, 'arrives'):\n            return False, 'UNKNOWN ERROR'\n\n        \n        values = util.response_list(result, 'arrives')\n        return True, [emtype.Arrival(**a) for a in values]", "docstring": "Obtain bus arrival info in target stop.\n\nArgs:\nstop_number (int): Stop number to query.\nlang (str): Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[Arrival]), or message string\nin case of error.", "source": "juraj-google-style"}
{"code": "def replace_variables(self, text):\n        \n        variables = {\n            'python-executable': str(self._venv_path / 'bin' / 'python')\n        }\n        return text.format(**variables)", "docstring": "Replace variable placeholders in `text` with values from the virtual env.\n\nThe variables are:\n- {python-executable}\n\nArgs:\ntext: The text to do replacment int.\n\nReturns: The text after replacement.", "source": "juraj-google-style"}
{"code": "def is_common(schema):\n    if isinstance(schema, StreamSchema):\n        return (schema.schema() in _SCHEMA_COMMON)\n    if isinstance(schema, CommonSchema):\n        return True\n    if isinstance(schema, basestring):\n        return is_common(StreamSchema(schema))\n    return False", "docstring": "Is `schema` an common schema.\n\nArgs:\nschema: Scheme to test.\n\nReturns:\nbool: ``True`` if schema is a common schema, otherwise ``False``.", "source": "codesearchnet"}
{"code": "def get_absl_log_prefix(record):\n  \n  created_tuple = time.localtime(record.created)\n  created_microsecond = int(record.created % 1.0 * 1e6)\n\n  critical_prefix = ''\n  level = record.levelno\n  if _is_non_absl_fatal_record(record):\n    \n    \n    level = logging.ERROR\n    critical_prefix = _CRITICAL_PREFIX\n  severity = converter.get_initial_for_level(level)\n\n  return '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] %s' % (\n      severity,\n      created_tuple.tm_mon,\n      created_tuple.tm_mday,\n      created_tuple.tm_hour,\n      created_tuple.tm_min,\n      created_tuple.tm_sec,\n      created_microsecond,\n      _get_thread_id(),\n      record.filename,\n      record.lineno,\n      critical_prefix)", "docstring": "Returns the absl log prefix for the log record.\n\nArgs:\nrecord: logging.LogRecord, the record to get prefix for.", "source": "juraj-google-style"}
{"code": "def handle_error(self, error, download_request):\n    if (hasattr(error, 'errno') and (error.errno == errno.EACCES)):\n        self.handle_certificate_problem(str(error))\n    else:\n        self.handle_general_download_error(str(error), download_request)", "docstring": "Checks what error occured and looks for an appropriate solution.\n\nArgs:\nerror: Exception\nThe error that has occured.\ndownload_request:\nThe request which resulted in the error.", "source": "codesearchnet"}
{"code": "class Mask2FormerPixelDecoderOutput(ModelOutput):\n    multi_scale_features: Tuple[torch.FloatTensor] = None\n    mask_features: Optional[torch.FloatTensor] = None\n    attentions: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "Mask2Former's pixel decoder module output, practically a Multi-Scale Deformable Attention based decoder. It returns\nthe mask features and the multiscale features.\n\nArgs:\nmulti_scale_features (`tuple(torch.FloatTensor)`):\nTuple of multi-scale features of scales [1/8, 1/16, 1/32] and shape `(batch_size, num_channels, height,\nwidth)`from the Multi-Scale Deformable Attenntion based Pixel Decoder.\nmask_features (`torch.FloatTensor`):\nTensor of shape `(batch_size, num_channels, height, width)`, 1/4 scale features from the last Pixel Decoder\nLayer.\nattentions (`tuple(torch.FloatTensor)`, *optional*):\nTuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\nsequence_length)`. Attentions weights from pixel decoder. Returned when `output_attentions=True` is passed\nor when `config.output_attentions=True`", "source": "github-repos"}
{"code": "def local_get_state(self, device, id_override=None, type_override=None):\n    if ALLOW_LOCAL_CONTROL:\n        if (device.local_id() is not None):\n            hub = HUBS.get(device.hub_id())\n            if ((hub is not None) and (hub['token'] is not None)):\n                ip = hub['ip']\n                access_token = hub['token']\n            else:\n                return self.get_device_state(device, id_override, type_override)\n        else:\n            return self.get_device_state(device, id_override, type_override)\n        _LOGGER.info('Getting local state')\n        local_id = (id_override or device.local_id())\n        object_type = (type_override or device.object_type())\n        LOCAL_API_HEADERS['Authorization'] = ('Bearer ' + access_token)\n        url_string = 'https:\n        try:\n            arequest = requests.get(url_string, headers=LOCAL_API_HEADERS, verify=False, timeout=3)\n        except requests.exceptions.RequestException:\n            _LOGGER.error('Error sending local control request. Sending request online')\n            return self.get_device_state(device, id_override, type_override)\n        response_json = arequest.json()\n        _LOGGER.debug('%s', response_json)\n        temp_state = device.json_state\n        for (key, value) in response_json['data']['last_reading'].items():\n            temp_state['last_reading'][key] = value\n        return temp_state\n    else:\n        return self.get_device_state(device, id_override, type_override)", "docstring": "Get device state via local API, and fall back to online API.\n\nArgs:\ndevice (WinkDevice): The device the change is being requested for.\nid_override (String, optional): A device ID used to override the\npassed in device's ID. Used to make changes on sub-devices.\ni.e. Outlet in a Powerstrip. The Parent device's ID.\ntype_override (String, optional): Used to override the device type\nwhen a device inherits from a device other than WinkDevice.\nReturns:\nresponse_json (Dict): The API's response in dictionary format", "source": "codesearchnet"}
{"code": "def color_lerp(c1: Tuple[(int, int, int)], c2: Tuple[(int, int, int)], a: float) -> Color:\n    return Color._new_from_cdata(lib.TCOD_color_lerp(c1, c2, a))", "docstring": "Return the linear interpolation between two colors.\n\n``a`` is the interpolation value, with 0 returing ``c1``,\n1 returning ``c2``, and 0.5 returing a color halfway between both.\n\nArgs:\nc1 (Union[Tuple[int, int, int], Sequence[int]]):\nThe first color.  At a=0.\nc2 (Union[Tuple[int, int, int], Sequence[int]]):\nThe second color.  At a=1.\na (float): The interpolation value,\n\nReturns:\nColor: The interpolated Color.", "source": "codesearchnet"}
{"code": "def __fetch_route53_zones(self):\n    done = False\n    marker = None\n    zones = {}\n    route53 = self.session.client('route53')\n    try:\n        while (not done):\n            if marker:\n                response = route53.list_hosted_zones(Marker=marker)\n            else:\n                response = route53.list_hosted_zones()\n            if response['IsTruncated']:\n                marker = response['NextMarker']\n            else:\n                done = True\n            for zone_data in response['HostedZones']:\n                zones[get_resource_id('r53z', zone_data['Id'])] = {'name': zone_data['Name'].rstrip('.'), 'source': 'AWS/{}'.format(self.account), 'comment': (zone_data['Config']['Comment'] if ('Comment' in zone_data['Config']) else None), 'zone_id': zone_data['Id'], 'private_zone': zone_data['Config']['PrivateZone'], 'tags': self.__fetch_route53_zone_tags(zone_data['Id'])}\n        return zones\n    finally:\n        del route53", "docstring": "Return a list of all DNS zones hosted in Route53\n\nReturns:\n:obj:`list` of `dict`", "source": "codesearchnet"}
{"code": "def prepare_partition_index(config: Config, chunk_size: t.Optional[int]=None) -> t.Iterator[t.Tuple[Config, t.List[Index]]]:\n    dims = [range(len(config.selection[key])) for key in config.partition_keys]\n    n_partitions = math.prod([len(d) for d in dims])\n    logger.info(f'Creating {n_partitions} partitions.')\n    if chunk_size is None:\n        chunk_size = 1000\n    for option_idx in ichunked(itertools.product(*dims), chunk_size):\n        yield (config, list(option_idx))", "docstring": "Produce indexes over client parameters, partitioning over `partition_keys`\n\nThis produces a Cartesian-Cross over the range of keys.\n\nFor example, if the keys were 'year' and 'month', it would produce\nan iterable like:\n( (0, 0), (0, 1), (0, 2), ...)\n\nAfter the indexes were converted back to keys, it would produce values like:\n( ('2020', '01'), ('2020', '02'), ('2020', '03'), ...)\n\nReturns:\nAn iterator of index tuples.", "source": "github-repos"}
{"code": "def to(self, jid: str):\n        \n        if jid is not None and not isinstance(jid, str):\n            raise TypeError(\"'to' MUST be a string\")\n        self._to = aioxmpp.JID.fromstr(jid) if jid is not None else None", "docstring": "Set jid of the receiver.\n\nArgs:\njid (str): the jid of the receiver.", "source": "juraj-google-style"}
{"code": "def parameterized_truncated_normal(shape, means=0.0, stddevs=1.0, minvals=-2.0, maxvals=2.0, dtype=dtypes.float32, seed=None, name=None):\n    with ops.name_scope(name, 'parameterized_truncated_normal', [shape, means, stddevs, minvals, maxvals]) as name:\n        shape_tensor = shape_util.shape_tensor(shape)\n        means_tensor = ops.convert_to_tensor(means, dtype=dtype, name='means')\n        stddevs_tensor = ops.convert_to_tensor(stddevs, dtype=dtype, name='stddevs')\n        minvals_tensor = ops.convert_to_tensor(minvals, dtype=dtype, name='minvals')\n        maxvals_tensor = ops.convert_to_tensor(maxvals, dtype=dtype, name='maxvals')\n        seed1, seed2 = random_seed.get_seed(seed)\n        rnd = gen_random_ops.parameterized_truncated_normal(shape_tensor, means_tensor, stddevs_tensor, minvals_tensor, maxvals_tensor, seed=seed1, seed2=seed2)\n        shape_util.maybe_set_static_shape(rnd, shape)\n        return rnd", "docstring": "Outputs random values from a truncated normal distribution.\n\nThe generated values follow a normal distribution with specified mean and\nstandard deviation, except that values whose magnitude is more than 2 standard\ndeviations from the mean are dropped and re-picked.\n\nArgs:\nshape: A 1-D integer Tensor or Python array. The shape of the output tensor.\nmeans: A 0-D Tensor or Python value of type `dtype`. The mean of the\ntruncated normal distribution.\nstddevs: A 0-D Tensor or Python value of type `dtype`. The standard\ndeviation of the truncated normal distribution.\nminvals: A 0-D Tensor or Python value of type `dtype`. The minimum value of\nthe truncated normal distribution.\nmaxvals: A 0-D Tensor or Python value of type `dtype`. The maximum value of\nthe truncated normal distribution.\ndtype: The type of the output.\nseed: A Python integer. Used to create a random seed for the distribution.\nSee\n`tf.random.set_seed`\nfor behavior.\nname: A name for the operation (optional).\n\nReturns:\nA tensor of the specified shape filled with random truncated normal values.", "source": "github-repos"}
{"code": "def van_enc_2d(x, first_depth, reuse=False):\n    with tf.variable_scope('van_enc', reuse=reuse):\n        a = 4\n        b = 4\n        enc = tf.nn.relu(x)\n        enc = tf.layers.dense(enc, ((first_depth * a) * b), tf.nn.relu)\n        enc = tf.contrib.layers.layer_norm(enc)\n        enc = tf.reshape(enc, [(- 1), a, b, first_depth])\n        enc = tf.layers.conv2d_transpose(enc, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)\n        enc = tf.contrib.layers.layer_norm(enc)\n        enc = tf.layers.conv2d_transpose(enc, (first_depth * 2), 3, padding='same', activation=tf.nn.relu, strides=2)\n        van_higher_level_2 = tf.reshape(enc, [(- 1), (((((a * 2) * b) * 2) * first_depth) * 2)])\n        enc = tf.layers.conv2d_transpose(enc, (first_depth * 2), 3, padding='same', activation=tf.nn.relu, strides=1)\n        enc = tf.contrib.layers.layer_norm(enc)\n        enc = tf.layers.conv2d_transpose(enc, (first_depth * 4), 3, padding='same', activation=tf.nn.relu, strides=1)\n        van_higher_level_4 = tf.reshape(enc, [(- 1), (((((a * 2) * b) * 2) * first_depth) * 4)])\n        van_higher_level = tf.concat([x, van_higher_level_2, van_higher_level_4], 1)\n        return (enc, van_higher_level)", "docstring": "The higher level structure encoder for the VAN.\n\nThe high level structure is a vector instead of an image.\n\nArgs:\nx: The higher level structure to encode.\nfirst_depth: The depth of the first layer. Depth is increased in subsequent\nlayers.\nreuse: To reuse in variable scope or not.\n\nReturns:\nThe encoded image.", "source": "codesearchnet"}
{"code": "def same_dynamic_shape(a, b):\n    a = ops.convert_to_tensor(a, name='a')\n    b = ops.convert_to_tensor(b, name='b')\n\n    def all_shapes_equal():\n        return math_ops.reduce_all(math_ops.equal(array_ops.concat([array_ops.shape(a), array_ops.shape(b)], 0), array_ops.concat([array_ops.shape(b), array_ops.shape(a)], 0)))\n    return tf_cond.cond(math_ops.equal(array_ops.rank(a), array_ops.rank(b)), all_shapes_equal, lambda: constant_op.constant(False))", "docstring": "Returns whether a and b have the same dynamic shape.\n\nArgs:\na: `Tensor`\nb: `Tensor`\n\nReturns:\n`bool` `Tensor` representing if both tensors have the same shape.", "source": "github-repos"}
{"code": "def __init__(self, path):\n        \n        self.path = Path(path).resolve()\n\n        if not self.path.is_dir():\n            log.warning(\"path given to render engine is not a directory\")\n            raise NotADirectoryError(\"path '%s' is not a directory\" % path)", "docstring": "Constructor\n\nArgs:\npath (str): Top level directory to search for template files - the\npath must exist and the path must be a directory.\n\nRaises:\nFileNotFoundError: If the provided path does not exists.\nNotADirectoryError: If the path is not a directory.", "source": "juraj-google-style"}
{"code": "def transform_op_tree(root: OP_TREE, op_transformation: Callable[([Operation], OP_TREE)]=(lambda e: e), iter_transformation: Callable[([Iterable[OP_TREE]], OP_TREE)]=(lambda e: e), preserve_moments: bool=False) -> OP_TREE:\n    if isinstance(root, Operation):\n        return op_transformation(root)\n    if (preserve_moments and isinstance(root, Moment)):\n        return root\n    if isinstance(root, collections.Iterable):\n        return iter_transformation((transform_op_tree(subtree, op_transformation, iter_transformation, preserve_moments) for subtree in root))\n    raise TypeError('Not a collections.Iterable or an Operation: {}'.format(root))", "docstring": "Maps transformation functions onto the nodes of an OP_TREE.\n\nArgs:\nroot: The operation or tree of operations to transform.\nop_transformation: How to transform the operations (i.e. leaves).\niter_transformation: How to transform the iterables (i.e. internal\nnodes).\npreserve_moments: Whether to leave Moments alone. If True, the\ntransformation functions will not be applied to Moments or the\noperations within them.\n\nReturns:\nA transformed operation tree.\n\nRaises:\nTypeError: root isn't a valid OP_TREE.", "source": "codesearchnet"}
{"code": "def GetTestConfigs():\n    test_configs = ['NHWC', 'NCHW']\n    return test_configs", "docstring": "Get all the valid tests configs to run.\n\nReturns:\nall the valid test configs", "source": "github-repos"}
{"code": "def _assert_tensorlike_all_close(self, sess: session.Session, tensorlike_value_1: core.TensorLike, tensorlike_value_2: core.TensorLike) -> None:\n    if isinstance(tensorlike_value_1, core.Tensor):\n        tensorlike_value_1 = tensorlike_value_1.eval(session=sess)\n    if isinstance(tensorlike_value_2, core.Tensor):\n        tensorlike_value_2 = tensorlike_value_2.eval(session=sess)\n    self.assertAllClose(tensorlike_value_1, tensorlike_value_2)", "docstring": "Asserts that two different TensorLike values are \"all close\".\n\nArgs:\nsess: Session instance used to evaluate any tf.Tensors.\ntensorlike_value_1: A TensorLike value.\ntensorlike_value_2: A TensorLike value.", "source": "github-repos"}
{"code": "def get_method_returning_field_value(self, field_name):\n        \n        method = getattr(self, field_name, None)\n        return method if method and callable(method) else None", "docstring": "Method should return object method that can be used to get field value.\nArgs:\nfield_name: name of the field\n\nReturns: method for obtaining a field value", "source": "juraj-google-style"}
{"code": "def resource_input_index(tensor_name, input_names, node_defs, functions):\n    while tensor_name not in input_names:\n        parts = tensor_name.split(':')\n        if len(parts) == 3:\n            op_name, _, output_idx = parts\n        elif len(parts) == 2:\n            op_name, output_idx = parts\n        else:\n            assert len(parts) == 1\n            op_name = parts[0]\n            output_idx = 0\n            tensor_name = '%s:%d' % (tensor_name, output_idx)\n            if tensor_name in input_names:\n                break\n        output_idx = int(output_idx)\n        node_def = node_defs[op_name]\n\n        def _extract_input_index(function_attribute_name):\n            func_name = node_def.attr[function_attribute_name].func.name\n            fdef = functions[func_name].cached_definition\n            output_arg_name = fdef.signature.output_arg[output_idx].name\n            output_tensor_name = fdef.ret[output_arg_name]\n            return resource_input_index(output_tensor_name, [arg.name for arg in fdef.signature.input_arg], {ndef.name: ndef for ndef in fdef.node_def}, functions)\n        if node_def.op in ('Identity', 'While'):\n            tensor_name = node_def.input[output_idx]\n        elif node_def.op in ('PartitionedCall', 'StatefulPartitionedCall'):\n            tensor_name = node_def.input[_extract_input_index('f')]\n        elif node_def.op in ('If', 'StatelessIf'):\n            input_index = _extract_input_index('then_branch')\n            if input_index != _extract_input_index('else_branch'):\n                raise AssertionError('Expected cond branches ({} op) to each have the same input->output mapping of resources.'.format(node_def.op))\n            tensor_name = node_def.input[input_index + 1]\n        else:\n            raise ValueError('Taking gradient of a while loop which creates a resource in its body is not supported: %s (%s)' % (op_name, node_def.op))\n    return input_names.index(tensor_name)", "docstring": "Returns the index of the input corresponding to `tensor_name`.\n\nThis method is used to find the corresponding index of an arbitrary resource\ntensor in a function (the function could be a loop body). We assume that\nresource handles are never created in functions, so that every resource\ntensor can be traced back to a function input.\n\nThe awkward signature of this method is to make it work with both FuncGraphs\nand FunctionDefs. This is so we can recurse on function call ops without\nbuilding the corresponding FuncGraph (note that even if a FuncGraph for a\nFunctionDef already exists, the input/output/node names may have been\nchanged when the FuncGraph was serialized to the FunctionDef, which makes it\nunusable with this algorithm).\n\nArgs:\ntensor_name: the name of the resource tensor to be resolved to an input.\ninput_names: a list of the names of all inputs to the function.\nnode_defs: a dict mapping op name -> NodeDef for every op in the function.\nfunctions: a dict mapping function name -> AtomicFunction.\n\nReturns:\nThe index into input_names corresponding to `tensor_name`.", "source": "github-repos"}
{"code": "def register(self, callback_id: str, handler: Any, name: str = \"*\") -> None:\n        \n        LOG.info(\"Registering %s, %s to %s\", callback_id, name, handler)\n        if name not in self._routes[callback_id]:\n            self._routes[callback_id][name] = []\n\n        self._routes[callback_id][name].append(handler)", "docstring": "Register a new handler for a specific :class:`slack.actions.Action` `callback_id`.\nOptional routing based on the action name too.\n\nThe name argument is useful for actions of type `interactive_message` to provide\na different handler for each individual action.\n\nArgs:\ncallback_id: Callback_id the handler is interested in\nhandler: Callback\nname: Name of the action (optional).", "source": "juraj-google-style"}
{"code": "def get_node_angle(self, node):\n    return (atan2((self.pos[0] - node.pos[0]), (self.pos[1] - node.pos[1])) - (pi / 2))", "docstring": "Get the angle beetween 2 nodes relative to the horizont.\n\nArgs:\nnode (object): The other node.\n\nReturns:\nrad: The angle", "source": "codesearchnet"}
{"code": "def msgconvert(email):\n    log.debug('Started converting Outlook email')\n    (temph, temp) = tempfile.mkstemp(prefix='outlook_')\n    command = ['msgconvert', '--outfile', temp, email]\n    try:\n        if six.PY2:\n            with open(os.devnull, 'w') as devnull:\n                out = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=devnull)\n        elif six.PY3:\n            out = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n    except OSError:\n        message = \"To use this function you must install 'msgconvert' tool\"\n        log.exception(message)\n        raise MailParserOSError(message)\n    else:\n        (stdoutdata, _) = out.communicate()\n        return (temp, stdoutdata.decode('utf-8').strip())\n    finally:\n        os.close(temph)", "docstring": "Exec msgconvert tool, to convert msg Outlook\nmail in eml mail format\n\nArgs:\nemail (string): file path of Outlook msg mail\n\nReturns:\ntuple with file path of mail converted and\nstandard output data (unicode Python 2, str Python 3)", "source": "codesearchnet"}
{"code": "def variables(self):\n    return self._opt.variables()", "docstring": "Fetches a list of optimizer variables in the default graph.\n\nThis wraps `variables()` from the actual optimizer. It does not include\nthe `SyncReplicasOptimizer`'s local step.\n\nReturns:\nA list of variables.", "source": "github-repos"}
{"code": "def matches_to_marker_results(df):\n    \n    assert isinstance(df, pd.DataFrame)\n    from collections import defaultdict\n    d = defaultdict(list)\n    for idx, row in df.iterrows():\n        marker = row['marker']\n        d[marker].append(row)\n\n    marker_results = {}\n    for k,v in d.items():\n        if len(v) > 1:\n            logging.debug('Multiple potential cgMLST allele matches (n=%s) found for marker %s. Selecting match on longest contig.', len(v), k)\n            df_marker = pd.DataFrame(v)\n            df_marker.sort_values('slen', ascending=False, inplace=True)\n            for i,r in df_marker.iterrows():\n                allele = r['allele_name']\n                slen = r['slen']\n                logging.debug('Selecting allele %s from contig with length %s', allele, slen)\n                seq = r['sseq']\n                if '-' in seq:\n                    logging.warning('Gaps found in allele. Removing gaps. %s', r)\n                    seq = seq.replace('-', '').upper()\n                    allele = allele_name(seq)\n                marker_results[k] = allele_result_dict(allele, seq, r.to_dict())\n                break\n        elif len(v) == 1:\n            row = v[0]\n            seq = row['sseq']\n            if '-' in seq:\n                logging.warning('Gaps found in allele. Removing gaps. %s', row)\n                seq = seq.replace('-', '').upper()\n            allele = allele_name(seq)\n            marker_results[k] = allele_result_dict(allele, seq, row.to_dict())\n        else:\n            err_msg = 'Empty list of matches for marker {}'.format(k)\n            logging.error(err_msg)\n            raise Exception(err_msg)\n    return marker_results", "docstring": "Perfect BLAST matches to marker results dict\n\nParse perfect BLAST matches to marker results dict.\n\n\nArgs:\ndf (pandas.DataFrame): DataFrame of perfect BLAST matches\n\nReturns:\ndict: cgMLST330 marker names to matching allele numbers", "source": "juraj-google-style"}
{"code": "def get_list_index(lst, index_or_name):\n    if isinstance(index_or_name, six.integer_types):\n        return index_or_name\n    return lst.index(index_or_name)", "docstring": "Return the index of an element in the list.\n\nArgs:\nlst (list): The list.\nindex_or_name (int or str): The value of the reference element, or directly its numeric index.\n\nReturns:\n(int) The index of the element in the list.", "source": "codesearchnet"}
{"code": "def read_trailer_lines(self):\n    if (not self.closed):\n        raise ValueError('Cannot read trailers until the request body has been read.')\n    while True:\n        line = self.rfile.readline()\n        if (not line):\n            raise ValueError('Illegal end of headers.')\n        self.bytes_read += len(line)\n        if (self.maxlen and (self.bytes_read > self.maxlen)):\n            raise IOError('Request Entity Too Large')\n        if (line == CRLF):\n            break\n        if (not line.endswith(CRLF)):\n            raise ValueError('HTTP requires CRLF terminators')\n        (yield line)", "docstring": "Read HTTP headers and yield them.\n\nReturns:\nGenerator: yields CRLF separated lines.", "source": "codesearchnet"}
{"code": "def _dispatch_event(self, event, data=None):\n    for callback in self._callbacks[event]:\n        self._logger.debug(\"Running %s callbacks for event: '%s'\", len(self._callbacks[event]), event)\n        try:\n            if (self._stopped and (event not in ['close', 'error'])):\n                break\n            if self.run_async:\n                self._execute_callback_async(callback, data)\n            else:\n                self._execute_callback(callback, data)\n        except Exception as err:\n            name = callback.__name__\n            module = callback.__module__\n            msg = f\"When calling '\n            self._logger.error(msg)\n            raise", "docstring": "Dispatches the event and executes any associated callbacks.\n\nNote: To prevent the app from crashing due to callback errors. We\ncatch all exceptions and send all data to the logger.\n\nArgs:\nevent (str): The type of event. e.g. 'bot_added'\ndata (dict): The data Slack sent. e.g.\n{\n\"type\": \"bot_added\",\n\"bot\": {\n\"id\": \"B024BE7LH\",\n\"app_id\": \"A4H1JB4AZ\",\n\"name\": \"hugbot\"\n}\n}", "source": "codesearchnet"}
{"code": "def get_course_completions(self, enterprise_customer, days):\n    return PersistentCourseGrade.objects.filter(passed_timestamp__gt=(datetime.datetime.now() - datetime.timedelta(days=days))).filter(user_id__in=enterprise_customer.enterprise_customer_users.values_list('user_id', flat=True))", "docstring": "Get course completions via PersistentCourseGrade for all the learners of given enterprise customer.\n\nArguments:\nenterprise_customer (EnterpriseCustomer): Include Course enrollments for learners\nof this enterprise customer.\ndays (int): Include course enrollment of this number of days.\n\nReturns:\n(list): A list of PersistentCourseGrade objects.", "source": "codesearchnet"}
{"code": "def _begin(self, retry_id=None):\n    if self.in_progress:\n        msg = _CANT_BEGIN.format(self._id)\n        raise ValueError(msg)\n    transaction_response = self._client._firestore_api.begin_transaction(self._client._database_string, options_=self._options_protobuf(retry_id), metadata=self._client._rpc_metadata)\n    self._id = transaction_response.transaction", "docstring": "Begin the transaction.\n\nArgs:\nretry_id (Optional[bytes]): Transaction ID of a transaction to be\nretried.\n\nRaises:\nValueError: If the current transaction has already begun.", "source": "codesearchnet"}
{"code": "def convert_response(allocate_quota_response, project_id):\n    \n    if not allocate_quota_response or not allocate_quota_response.allocateErrors:\n        return _IS_OK\n\n    \n    theError = allocate_quota_response.allocateErrors[0]\n    error_tuple = _QUOTA_ERROR_CONVERSION.get(theError.code, _IS_UNKNOWN)\n    if error_tuple[1].find(u'{') == -1:  \n        return error_tuple\n\n    updated_msg = error_tuple[1].format(project_id=project_id, detail=theError.description or u'')\n    return error_tuple[0], updated_msg", "docstring": "Computes a http status code and message `AllocateQuotaResponse`\n\nThe return value a tuple (code, message) where\n\ncode: is the http status code\nmessage: is the message to return\n\nArgs:\nallocate_quota_response (:class:`endpoints_management.gen.servicecontrol_v1_messages.AllocateQuotaResponse`):\nthe response from calling an api\n\nReturns:\ntuple(code, message)", "source": "juraj-google-style"}
{"code": "def insert_system(cur, system_name, encoded_data=None):\n    \n    if encoded_data is None:\n        encoded_data = {}\n\n    if 'system_name' not in encoded_data:\n        encoded_data['system_name'] = system_name\n\n    insert = \"INSERT OR IGNORE INTO system(system_name) VALUES (:system_name);\"\n    cur.execute(insert, encoded_data)", "docstring": "Insert a system name into the cache.\n\nArgs:\ncur (:class:`sqlite3.Cursor`):\nAn sqlite3 cursor. This function is meant to be run within a :obj:`with` statement.\n\nsystem_name (str):\nThe unique name of a system\n\nencoded_data (dict, optional):\nIf a dictionary is provided, it will be populated with the serialized data. This is\nuseful for preventing encoding the same information many times.", "source": "juraj-google-style"}
{"code": "def _preprocess_journal_query_value(third_journal_field, old_publication_info_values):\n        \n        \n        publication_info_keys = [\n            ElasticSearchVisitor.JOURNAL_TITLE,\n            ElasticSearchVisitor.JOURNAL_VOLUME,\n            third_journal_field,\n        ]\n        values_list = [\n            value.strip()\n            for value\n            in old_publication_info_values.split(',')\n            if value\n        ]\n\n        old_publication_info = [\n            {\n                key: value\n                for key, value\n                in zip(publication_info_keys, values_list)\n                if value\n            }\n        ]\n\n        \n        \n        new_publication_info = convert_old_publication_info_to_new(old_publication_info)[0]\n\n        return new_publication_info", "docstring": "Transforms the given journal query value (old publication info) to the new one.\n\nArgs:\nthird_journal_field (six.text_type): The final field to be used for populating the old publication info.\nold_publication_info_values (six.text_type): The old publication info. It must be one of {only title, title\n& volume, title & volume & artid/page_start}.\n\nReturns:\n(dict) The new publication info.", "source": "juraj-google-style"}
{"code": "def register(self, alias, service_class, configs=None, start_service=True):\n    if not inspect.isclass(service_class):\n        raise Error(self._device, '\"%s\" is not a class!' % service_class)\n    if not issubclass(service_class, base_service.BaseService):\n        raise Error(self._device, 'Class %s is not a subclass of BaseService!' % service_class)\n    if alias in self._service_objects:\n        raise Error(self._device, 'A service is already registered with alias \"%s\".' % alias)\n    service_obj = service_class(self._device, configs)\n    service_obj.alias = alias\n    if start_service:\n        service_obj.start()\n    self._service_objects[alias] = service_obj", "docstring": "Registers a service.\n\nThis will create a service instance, starts the service, and adds the\ninstance to the mananger.\n\nArgs:\nalias: string, the alias for this instance.\nservice_class: class, the service class to instantiate.\nconfigs: (optional) config object to pass to the service class's\nconstructor.\nstart_service: bool, whether to start the service instance or not.\nDefault is True.", "source": "github-repos"}
{"code": "def start_tpot(automated_run, session, path):\n    \n    module = functions.import_string_code_as_module(automated_run.source)\n    extraction = session.query(models.Extraction).first()\n    X, y = extraction.return_train_dataset()\n\n    tpot_learner =  module.tpot_learner\n\n    tpot_learner.fit(X, y)\n\n    temp_filename = os.path.join(path, 'tpot-temp-export-{}'.format(os.getpid()))\n    tpot_learner.export(temp_filename)\n\n    with open(temp_filename) as f:\n        base_learner_source = f.read()\n\n    base_learner_source = constants.tpot_learner_docstring + base_learner_source\n\n    try:\n        os.remove(temp_filename)\n    except OSError:\n        pass\n\n    blo = models.BaseLearnerOrigin(\n        source=base_learner_source,\n        name='TPOT Learner',\n        meta_feature_generator='predict'\n    )\n\n    session.add(blo)\n    session.commit()", "docstring": "Starts a TPOT automated run that exports directly to base learner setup\n\nArgs:\nautomated_run (xcessiv.models.AutomatedRun): Automated run object\n\nsession: Valid SQLAlchemy session\n\npath (str, unicode): Path to project folder", "source": "juraj-google-style"}
{"code": "def _polar(abs_, angle):\n    abs_ = backend.convert_to_tensor(abs_)\n    angle = backend.convert_to_tensor(angle)\n    real = abs_ * backend.numpy.cos(angle)\n    imaginary = abs_ * backend.numpy.sin(angle)\n    result = backend.math._get_complex_tensor_from_tuple((real, imaginary))\n    return result", "docstring": "Internal implementation of the polar function.\n\nArgs:\nabs_: The magnitude (absolute value) of the complex number.\nangle: The angle (in radians) of the complex number.\n\nReturns:\nA complex number (or array of complex numbers) with the same shape as\n`abs_` and `angle`.", "source": "github-repos"}
{"code": "def DeregisterDecrypter(cls, decrypter):\n    encryption_method = decrypter.ENCRYPTION_METHOD.lower()\n    if (encryption_method not in cls._decrypters):\n        raise KeyError('Decrypter for encryption method: {0:s} not set.'.format(decrypter.ENCRYPTION_METHOD))\n    del cls._decrypters[encryption_method]", "docstring": "Deregisters a decrypter for a specific encryption method.\n\nArgs:\ndecrypter (type): decrypter class.\n\nRaises:\nKeyError: if the corresponding decrypter is not set.", "source": "codesearchnet"}
{"code": "def prefix(self, imod: YangIdentifier, mid: ModuleId) -> YangIdentifier:\n        \n        try:\n            did = (imod, self.implement[imod])\n        except KeyError:\n            raise ModuleNotImplemented(imod) from None\n        try:\n            pmap = self.modules[mid].prefix_map\n        except KeyError:\n            raise ModuleNotRegistered(*mid) from None\n        for p in pmap:\n            if pmap[p] == did:\n                return p\n        raise ModuleNotImported(imod, mid)", "docstring": "Return the prefix corresponding to an implemented module.\n\nArgs:\nimod: Name of an implemented module.\nmid: Identifier of the context module.\n\nRaises:\nModuleNotImplemented: If `imod` is not implemented.\nModuleNotRegistered: If `mid` is not registered in YANG library.\nModuleNotImported: If `imod` is not imported in `mid`.", "source": "juraj-google-style"}
{"code": "def read(self, vals):\n        \n        i = 0\n        count = int(vals[i])\n        i += 1\n        for _ in range(count):\n            obj = DesignCondition()\n            obj.read(vals[i:i + obj.field_count])\n            self.add_design_condition(obj)\n            i += obj.field_count", "docstring": "Read values.\n\nArgs:\nvals (list): list of strings representing values", "source": "juraj-google-style"}
{"code": "def residual_block_layer(inputs, hparams):\n  \n  kernel = (hparams.res_kernel_size, hparams.res_kernel_size)\n  x = inputs\n  for i in range(hparams.num_res_layers):\n    with tf.variable_scope(\"res_conv_%d\" % i):\n      \n      y = common_layers.conv_block(\n          common_layers.layer_norm(x, hparams.hidden_size, name=\"lnorm\"),\n          hparams.hidden_size, [((1, 1), kernel)],\n          strides=(1, 1),\n          padding=\"SAME\",\n          name=\"residual_conv\")\n      \n      y = common_layers.conv_block(\n          y,\n          hparams.hidden_size, [((1, 1), (1, 1))],\n          strides=(1, 1),\n          padding=\"SAME\",\n          name=\"residual_dense\")\n      x = common_layers.layer_postprocess(x, y, hparams)\n  return x", "docstring": "Residual block over inputs.\n\nRuns a residual block consisting of\nconv: kernel_size x kernel_size\nconv: 1x1\ndropout, add and normalize according to hparams.layer_postprocess_sequence.\n\nArgs:\ninputs: Tensor of shape [batch, height, width, hparams.hidden_size].\nhparams: HParams.\n\nReturns:\nTensor of shape [batch, height, width, hparams.hidden_size].", "source": "juraj-google-style"}
{"code": "def push_image(registry, image):\n    values = {'registry': registry, 'image': image['name']}\n    log.info('Pushing <33>{registry}<35>/{image}'.format(**values))\n    shell.run('docker push {registry}/{image}'.format(**values))", "docstring": "Push the given image to selected repository.\n\nArgs:\nregistry (str):\nThe name of the registry we're pushing to. This is the address of\nthe repository without the protocol specification (no http(s)://)\nimage (dict[str, Any]):\nThe dict containing the information about the image. This is the\nsame dictionary as defined in DOCKER_IMAGES variable.", "source": "codesearchnet"}
{"code": "def compute_video_metrics_from_predictions(predictions, decode_hparams):\n  \n  all_results = {}\n\n\n  ssim_all_decodes, psnr_all_decodes = [], []\n  for single_decode in predictions:\n    args = get_zipped_dataset_from_predictions(single_decode)\n    psnr_single, ssim_single = compute_one_decoding_video_metrics(*args)\n    psnr_all_decodes.append(psnr_single)\n    ssim_all_decodes.append(ssim_single)\n  psnr_all_decodes = np.array(psnr_all_decodes)\n  ssim_all_decodes = np.array(ssim_all_decodes)\n  all_results.update({\"PSNR\": psnr_all_decodes, \"SSIM\": ssim_all_decodes})\n  return compute_all_metrics_statistics(all_results)", "docstring": "Computes metrics from predictions.\n\nArgs:\npredictions: list of list of dicts.\nouter length: num_decodes, inner_length: num_samples\ndecode_hparams: Decode hparams. instance of HParams.\nReturns:\nstatistics: dict of Tensors, key being the metric with each Tensor\nhaving the shape (num_samples, num_frames).", "source": "juraj-google-style"}
{"code": "def _name_to_tensor(self, tensor_name):\n    (id1, id2) = self._tensor_name_to_ids[tensor_name]\n    return self._operations[id1].outputs[id2]", "docstring": "The tensor with the given name.\n\nArgs:\ntensor_name: a string, name of a tensor in the graph.\n\nReturns:\na tf.Tensor or mtf.Tensor", "source": "codesearchnet"}
{"code": "def load(filename):\n    \n    fileObj = open(filename, 'rb')\n    variable = pickle.load(fileObj)\n    fileObj.close()\n    return variable", "docstring": "Load variable from Pickle file\n\nArgs:\npath (str): path of the file to load\n\nReturns:\nvariable read from path", "source": "juraj-google-style"}
{"code": "def market_info(ticker: str) -> dict:\n    t_info = ticker.split()\n    assets = param.load_info('assets')\n    if ((t_info[(- 1)] == 'Equity') and ('=' not in t_info[0])):\n        exch = t_info[(- 2)]\n        for info in assets.get('Equity', [dict()]):\n            if ('exch_codes' not in info):\n                continue\n            if (exch in info['exch_codes']):\n                return info\n        return dict()\n    if (t_info[(- 1)] == 'Curncy'):\n        for info in assets.get('Curncy', [dict()]):\n            if ('tickers' not in info):\n                continue\n            if ((t_info[0].split('+')[0] in info['tickers']) or (t_info[0][(- 1)].isdigit() and (t_info[0][:(- 1)] in info['tickers']))):\n                return info\n        return dict()\n    if (t_info[(- 1)] == 'Comdty'):\n        for info in assets.get('Comdty', [dict()]):\n            if ('tickers' not in info):\n                continue\n            if (t_info[0][:(- 1)] in info['tickers']):\n                return info\n        return dict()\n    if ((t_info[(- 1)] == 'Index') or ((t_info[(- 1)] == 'Equity') and ('=' in t_info[0]))):\n        if (t_info[(- 1)] == 'Equity'):\n            tck = t_info[0].split('=')[0]\n        else:\n            tck = ' '.join(t_info[:(- 1)])\n        for info in assets.get('Index', [dict()]):\n            if ('tickers' not in info):\n                continue\n            if ((tck[:2] == 'UX') and ('UX' in info['tickers'])):\n                return info\n            if (tck in info['tickers']):\n                if (t_info[(- 1)] == 'Equity'):\n                    return info\n                if (not info.get('is_fut', False)):\n                    return info\n            if (tck[:(- 1)].rstrip() in info['tickers']):\n                if info.get('is_fut', False):\n                    return info\n        return dict()\n    if (t_info[(- 1)] == 'Corp'):\n        for info in assets.get('Corp', [dict()]):\n            if ('ticker' not in info):\n                continue\n    return dict()", "docstring": "Get info for given market\n\nArgs:\nticker: Bloomberg full ticker\n\nReturns:\ndict\n\nExamples:\n>>> info = market_info('SHCOMP Index')\n>>> info['exch']\n'EquityChina'\n>>> info = market_info('ICICIC=1 IS Equity')\n>>> info['freq'], info['is_fut']\n('M', True)\n>>> info = market_info('INT1 Curncy')\n>>> info['freq'], info['is_fut']\n('M', True)\n>>> info = market_info('CL1 Comdty')\n>>> info['freq'], info['is_fut']\n('M', True)\n>>> # Wrong tickers\n>>> market_info('C XX Equity')\n{}\n>>> market_info('XXX Comdty')\n{}\n>>> market_info('Bond_ISIN Corp')\n{}\n>>> market_info('XYZ Index')\n{}\n>>> market_info('XYZ Curncy')\n{}", "source": "codesearchnet"}
{"code": "def build_tfexample_transfored_training_input_fn(schema, features, analysis_output_dir, raw_data_file_pattern, training_batch_size, num_epochs=None, randomize_input=False, min_after_dequeue=1, reader_num_threads=1, allow_smaller_final_batch=True):\n\n    def transformed_training_input_fn():\n        'Training input function that reads transformed data.'\n        if isinstance(raw_data_file_pattern, six.string_types):\n            filepath_list = [raw_data_file_pattern]\n        else:\n            filepath_list = raw_data_file_pattern\n        files = []\n        for path in filepath_list:\n            files.extend(file_io.get_matching_files(path))\n        filename_queue = tf.train.string_input_producer(files, num_epochs=num_epochs, shuffle=randomize_input)\n        options = tf.python_io.TFRecordOptions(compression_type=tf.python_io.TFRecordCompressionType.GZIP)\n        (ex_id, ex_str) = tf.TFRecordReader(options=options).read_up_to(filename_queue, training_batch_size)\n        queue_capacity = (((reader_num_threads + 3) * training_batch_size) + min_after_dequeue)\n        if randomize_input:\n            (_, batch_ex_str) = tf.train.shuffle_batch(tensors=[ex_id, ex_str], batch_size=training_batch_size, capacity=queue_capacity, min_after_dequeue=min_after_dequeue, enqueue_many=True, num_threads=reader_num_threads, allow_smaller_final_batch=allow_smaller_final_batch)\n        else:\n            (_, batch_ex_str) = tf.train.batch(tensors=[ex_id, ex_str], batch_size=training_batch_size, capacity=queue_capacity, enqueue_many=True, num_threads=reader_num_threads, allow_smaller_final_batch=allow_smaller_final_batch)\n        feature_spec = {}\n        feature_info = get_transformed_feature_info(features, schema)\n        for (name, info) in six.iteritems(feature_info):\n            if (info['size'] is None):\n                feature_spec[name] = tf.VarLenFeature(dtype=info['dtype'])\n            else:\n                feature_spec[name] = tf.FixedLenFeature(shape=[info['size']], dtype=info['dtype'])\n        parsed_tensors = tf.parse_example(batch_ex_str, feature_spec)\n        transformed_features = {}\n        for (k, v) in six.iteritems(parsed_tensors):\n            if (isinstance(v, tf.Tensor) and (v.get_shape().ndims == 1)):\n                transformed_features[k] = tf.expand_dims(v, (- 1))\n            else:\n                transformed_features[k] = v\n        transformed_features = image_feature_engineering(features=features, feature_tensors_dict=transformed_features)\n        target_name = get_target_name(features)\n        if ((not target_name) or (target_name not in transformed_features)):\n            raise ValueError('Cannot find target transform in features')\n        transformed_target = transformed_features.pop(target_name)\n        return (transformed_features, transformed_target)\n    return transformed_training_input_fn", "docstring": "Creates training input_fn that reads transformed tf.example files.\n\nArgs:\nschema: schema list\nfeatures: features dict\nanalysis_output_dir: output folder from analysis\nraw_data_file_pattern: file path, or list of files\ntraining_batch_size: An int specifying the batch size to use.\nnum_epochs: numer of epochs to read from the files. Use None to read forever.\nrandomize_input: If true, the input rows are read out of order. This\nrandomness is limited by the min_after_dequeue value.\nmin_after_dequeue: Minimum number elements in the reading queue after a\ndequeue, used to ensure a level of mixing of elements. Only used if\nrandomize_input is True.\nreader_num_threads: The number of threads enqueuing data.\nallow_smaller_final_batch: If false, fractional batches at the end of\ntraining or evaluation are not used.\n\nReturns:\nAn input_fn suitable for training that reads transformed data in tf record\nfiles of tf.example.", "source": "codesearchnet"}
{"code": "def register_repeating_metric(self, metric_name, frequency, getter):\n    l = task.LoopingCall(self._publish_repeating_metric, metric_name, getter)\n    repeating_metric_handle = RepeatingMetricHandle(l, frequency)\n    self._repeating_metric_handles.append(repeating_metric_handle)\n    if self.running:\n        repeating_metric_handle.start()\n    return repeating_metric_handle", "docstring": "Record hits to a metric at a specified interval.\n\nArgs:\nmetric_name: The name of the metric to record with Carbon.\nfrequency: The frequency with which to poll the getter and record the value with Carbon.\ngetter: A function which takes no arguments and returns the value to record with Carbon.\n\nReturns:\nRepeatingMetricHandle instance. Call .stop() on it to stop recording the metric.", "source": "codesearchnet"}
{"code": "def _is_molecule_linear(self, mol):\n    if (mol.NumAtoms() < 3):\n        return True\n    a1 = mol.GetAtom(1)\n    a2 = mol.GetAtom(2)\n    for i in range(3, (mol.NumAtoms() + 1)):\n        angle = float(mol.GetAtom(i).GetAngle(a2, a1))\n        if (angle < 0.0):\n            angle = (- angle)\n        if (angle > 90.0):\n            angle = (180.0 - angle)\n        if (angle > self._angle_tolerance):\n            return False\n    return True", "docstring": "Is the molecule a linear one\n\nArgs:\nmol: The molecule. OpenBabel OBMol object.\n\nReturns:\nBoolean value.", "source": "codesearchnet"}
{"code": "def seek(self, offset=None, whence=0, position=None):\n    self._preread_check()\n    if offset is None and position is None:\n        raise TypeError('seek(): offset argument required')\n    if offset is not None and position is not None:\n        raise TypeError('seek(): offset and position may not be set simultaneously.')\n    if position is not None:\n        offset = position\n    if whence == 0:\n        pass\n    elif whence == 1:\n        offset += self.tell()\n    elif whence == 2:\n        offset += self.size()\n    else:\n        raise errors.InvalidArgumentError(None, None, 'Invalid whence argument: {}. Valid values are 0, 1, or 2.'.format(whence))\n    self._read_buf.seek(offset)", "docstring": "Seeks to the offset in the file.\n\nArgs:\noffset: The byte count relative to the whence argument.\nwhence: Valid values for whence are:\n0: start of the file (default)\n1: relative to the current position of the file\n2: relative to the end of file. `offset` is usually negative.", "source": "github-repos"}
{"code": "def Unzip(iterable):\n  \n  lefts = []\n  rights = []\n\n  for left, right in iterable:\n    lefts.append(left)\n    rights.append(right)\n\n  return lefts, rights", "docstring": "Unzips specified iterable of pairs to pair of two iterables.\n\nThis function is an inversion of the standard `zip` function and the following\nhold:\n\n* ∀ l, r. l, r == unzip(zip(l, r))\n* ∀ p. p == zip(unzip(p))\n\nExamples:\n>>> Unzip([(\"foo\", 1), (\"bar\", 2), (\"baz\", 3)])\n([\"foo\", \"bar\", \"baz\"], [1, 2, 3])\n\nArgs:\niterable: An iterable of pairs to unzip.\n\nReturns:\nA pair of iterables after unzipping.", "source": "juraj-google-style"}
{"code": "def create_bmi_config_file(self, filename: str='bmi_config.txt') -> None:\n    s0 = self.construct_default_initial_state()\n    s0.to_csv(filename, index_label='variable')", "docstring": "Create a BMI config file to initialize the model.\n\nArgs:\nfilename: The filename with which the config file should be saved.", "source": "codesearchnet"}
{"code": "def set_notify_dispatch_request(self, notify_dispatch_request, *args):\n    self._notify_dispatch_request = notify_dispatch_request\n    self._notify_args = args", "docstring": "Set function to call just before requests are dispatched\n\nArgs:\nnotify_dispatch_request (callable): function will be called\nwith request as single arg just before request is dispatched", "source": "codesearchnet"}
{"code": "def get_queue_name(queue_name):\n    if queue_name:\n        return queue_name\n    queue_name = os.environ.get('HTTP_X_APPENGINE_QUEUENAME', parameters.config.QUEUE_NAME)\n    if ((len(queue_name) > 1) and (queue_name[0:2] == '__')):\n        return parameters.config.QUEUE_NAME\n    else:\n        return queue_name", "docstring": "Determine which queue MR should run on.\n\nHow to choose the queue:\n1. If user provided one, use that.\n2. If we are starting a mr from taskqueue, inherit that queue.\nIf it's a special queue, fall back to the default queue.\n3. Default queue.\n\nIf user is using any MR pipeline interface, pipeline.start takes a\n\"queue_name\" argument. The pipeline will run on that queue and MR will\nsimply inherit the queue_name.\n\nArgs:\nqueue_name: queue_name from user. Maybe None.\n\nReturns:\nThe queue name to run on.", "source": "codesearchnet"}
{"code": "def generate_host_passthrough(self, vcpu_num):\n    cpu = ET.Element('cpu', mode='host-passthrough')\n    cpu.append(self.generate_topology(vcpu_num))\n    if (vcpu_num > 1):\n        cpu.append(self.generate_numa(vcpu_num))\n    return cpu", "docstring": "Generate host-passthrough XML cpu node\n\nArgs:\nvcpu_num(str): number of virtual CPUs\n\nReturns:\nlxml.etree.Element: CPU XML node", "source": "codesearchnet"}
{"code": "def compute(self, x_arr, y_arr):\n        \n        x_arr = x_arr / np.linalg.norm(x_arr, ord=1)\n        y_arr = y_arr / np.linalg.norm(y_arr, ord=1)\n        mixture_arr = 0.5 * (x_arr + y_arr)\n        return 0.5 * (super().compute(x_arr, mixture_arr) + super().compute(y_arr, mixture_arr))", "docstring": "Compute distance.\n\nArgs:\nx_arr:      `np.ndarray` of vectors.\ny_arr:      `np.ndarray` of vectors.\n\nRetruns:\n`np.ndarray` of distances.", "source": "juraj-google-style"}
{"code": "def download_tile(map_layer, zoom, x, y):\n    \n    try:\n        tile_url = map_layer.get_tile_url(zoom, x, y)\n        tmp_file, headers = urllib.request.urlretrieve(tile_url)\n        return (x, y), tmp_file\n    except URLError as e:\n        app.logger.info(\"Error downloading tile x={}, y={}, z={} for layer {}: {}\".format(\n            x, y, zoom, map_layer, e.reason))\n        return (x, y), pkg_resources.resource_filename(\"geos\", \"static/empty_tile.png\")", "docstring": "Download a given tile from the tile server.\n\nArgs:\nmap_layer (MapLayer): MapLayer object which provides the tile-url.\nzoom (int): zoom level\nx (int): Tile-x-coordinate\ny (int): Tile-y-coordinate\n\nReturns:\nfile: temporary file containing the downloaded image.", "source": "juraj-google-style"}
{"code": "def check_coordinates(chromosome, pos, coordinates):\n    chrom_match = CHR_PATTERN.match(chromosome)\n    chrom = chrom_match.group(2)\n    if (chrom != coordinates['chrom']):\n        return False\n    if ((pos >= coordinates['start']) and (pos <= coordinates['end'])):\n        return True\n    return False", "docstring": "Check if the variant is in the interval given by the coordinates\n\nArgs:\nchromosome(str): Variant chromosome\npos(int): Variant position\ncoordinates(dict): Dictionary with the region of interest", "source": "codesearchnet"}
{"code": "def split_if_relative_reference(reference: message.Message) -> None:\n    _validate_reference(reference)\n    uri_field = reference.DESCRIPTOR.fields_by_name.get('uri')\n    if not proto_utils.field_is_set(reference, uri_field):\n        return\n    uri = proto_utils.get_value_at_field(reference, uri_field)\n    internal_match = re.fullmatch(_INTERNAL_REFERENCE_PATTERN, uri.value)\n    if internal_match is not None:\n        reference_id_field = get_reference_id_field_for_resource(reference, internal_match.group('resource_type'))\n        reference_id = proto_utils.create_message_from_descriptor(reference_id_field.message_type)\n        populate_typed_reference_id(reference_id, internal_match.group('resource_id'), internal_match.group('version'))\n        proto_utils.copy_common_field(uri, reference_id, 'id')\n        proto_utils.copy_common_field(uri, reference_id, 'extension')\n        proto_utils.set_value_at_field(reference, reference_id_field, reference_id)\n        return\n    fragment_match = re.fullmatch(_FRAGMENT_REFERENCE_PATTERN, uri.value)\n    if fragment_match is not None:\n        fragment_field = reference.DESCRIPTOR.fields_by_name['fragment']\n        fragment = proto_utils.create_message_from_descriptor(fragment_field.message_type)\n        value_field = fragment.DESCRIPTOR.fields_by_name['value']\n        proto_utils.set_value_at_field(fragment, value_field, uri.value[1:])\n        proto_utils.copy_common_field(uri, fragment, 'id')\n        proto_utils.copy_common_field(uri, fragment, 'extension')\n        proto_utils.set_value_at_field(reference, fragment_field, fragment)\n        return", "docstring": "If possible, parses a `Reference` `uri` into more structured fields.\n\nThis is only possible for two forms of reference uris:\n* Relative references of the form $TYPE/$ID, e.g., \"Patient/1234\"\nIn this case, this will be parsed to a proto of the form:\n{patient_id: {value: \"1234\"}}\n* Fragments of the form \"#$FRAGMENT\", e.g., \"#vs1\".  In this case, this would\nbe parsed into a proto of the form:\n{fragment: {value: \"vs1\"} }\n\nIf the reference URI matches one of these schemas, the `uri` field will be\ncleared, and the appropriate structured fields set. Otherwise, the reference\nwill be unchanged.\n\nArgs:\nreference: The FHIR reference to potentially split.\n\nRaises:\nValueError: If the message is not a valid FHIR Reference proto.", "source": "github-repos"}
{"code": "def make_hex_texture(grid_size = 2, resolution=1):\n    \n    grid_x, grid_y = np.meshgrid(\n        np.arange(grid_size),\n        np.arange(grid_size)\n    )\n    ROOT_3_OVER_2 = np.sqrt(3) / 2\n    ONE_HALF = 0.5\n    \n    grid_x = (grid_x * np.sqrt(3) + (grid_y % 2) * ROOT_3_OVER_2).flatten()\n    grid_y = grid_y.flatten() * 1.5\n    \n    grid_points = grid_x.shape[0]\n    \n    x_offsets = np.interp(np.arange(4 * resolution),\n        np.arange(4) * resolution, [\n            ROOT_3_OVER_2,\n            0.,\n            -ROOT_3_OVER_2,\n            -ROOT_3_OVER_2,\n        ])\n    y_offsets = np.interp(np.arange(4 * resolution),\n        np.arange(4) * resolution, [\n            -ONE_HALF,\n            -1.,\n            -ONE_HALF,\n            ONE_HALF\n        ])\n    \n    tmx = 4 * resolution\n    x_t = np.tile(grid_x, (tmx, 1)) + x_offsets.reshape((tmx, 1))\n    y_t = np.tile(grid_y, (tmx, 1)) + y_offsets.reshape((tmx, 1))\n    \n    x_t = np.vstack([x_t, np.tile(np.nan, (1, grid_x.size))])\n    y_t = np.vstack([y_t, np.tile(np.nan, (1, grid_y.size))])\n    \n    return fit_texture((x_t.flatten('F'), y_t.flatten('F')))", "docstring": "Makes a texture consisting on a grid of hexagons.\n\nArgs:\ngrid_size (int): the number of hexagons along each dimension of the grid\nresolution (int): the number of midpoints along the line of each hexagon\n\nReturns:\nA texture.", "source": "juraj-google-style"}
{"code": "def opt(parser: Union[Parser, Sequence[Input]]) -> OptionalParser:\n    \n    if isinstance(parser, str):\n        parser = lit(parser)\n    return OptionalParser(parser)", "docstring": "Optionally match a parser.\n\nAn ``OptionalParser`` attempts to match ``parser``. If it succeeds, it\nreturns a list of length one with the value returned by the parser as the\nonly element. If it fails, it returns an empty list.\n\nArgs:\nparser: Parser or literal", "source": "juraj-google-style"}
{"code": "def add_license(self, contents):\n    buf_size = len(contents)\n    buf = (ctypes.c_char * (buf_size + 1))(*contents.encode())\n    res = self._dll.JLINK_EMU_AddLicense(buf)\n    if (res == (- 1)):\n        raise errors.JLinkException('Unspecified error.')\n    elif (res == (- 2)):\n        raise errors.JLinkException('Failed to read/write license area.')\n    elif (res == (- 3)):\n        raise errors.JLinkException('J-Link out of space.')\n    return (res == 0)", "docstring": "Adds the given ``contents`` as a new custom license to the J-Link.\n\nArgs:\nself (JLink): the ``JLink`` instance\ncontents: the string contents of the new custom license\n\nReturns:\n``True`` if license was added, ``False`` if license already existed.\n\nRaises:\nJLinkException: if the write fails.\n\nNote:\nJ-Link V9 and J-Link ULTRA/PRO V4 have 336 Bytes of memory for\nlicenses, while older versions of 80 bytes.", "source": "codesearchnet"}
{"code": "def connect_raise_node(self, node, except_guards):\n    for guard in except_guards:\n        if guard in self.raises:\n            self.raises[guard].append(node)\n        else:\n            self.raises[guard] = [node]", "docstring": "Adds extra connection between a raise node and containing except guards.\n\nThe node is a graph node, not an ast node.\n\nArgs:\nnode: Node\nexcept_guards: Tuple[ast.AST, ...], the except sections that guard node", "source": "github-repos"}
{"code": "def update(self, b):\n    hv = self.hashfunc(b)\n    reg_index = (hv & (self.m - 1))\n    bits = (hv >> self.p)\n    self.reg[reg_index] = max(self.reg[reg_index], self._get_rank(bits))", "docstring": "Update the HyperLogLog with a new data value in bytes.\nThe value will be hashed using the hash function specified by\nthe `hashfunc` argument in the constructor.\n\nArgs:\nb: The value to be hashed using the hash function specified.\n\nExample:\nTo update with a new string value (using the default SHA1 hash\nfunction, which requires bytes as input):\n\n.. code-block:: python\n\nhll = HyperLogLog()\nhll.update(\"new value\".encode('utf-8'))\n\nWe can also use a different hash function, for example, `pyfarmhash`:\n\n.. code-block:: python\n\nimport farmhash\ndef _hash_32(b):\nreturn farmhash.hash32(b)\nhll = HyperLogLog(hashfunc=_hash_32)\nhll.update(\"new value\")", "source": "codesearchnet"}
{"code": "def _check_module_is_text_embedding(module_spec):\n  \n  issues = []\n\n  \n  input_info_dict = module_spec.get_input_info_dict()\n  if len(input_info_dict) != 1:\n    issues.append(\"Module default signature must require only one input\")\n  else:\n    input_info, = input_info_dict.values()\n    input_shape = input_info.get_shape()\n    if not (input_info.dtype == tf.string and input_shape.ndims == 1 and\n            input_shape.as_list() == [None]):\n      issues.append(\n          \"Module default signature must have only one input \"\n          \"tf.Tensor(shape=(?,), dtype=string)\"\n      )\n\n  \n  output_info_dict = module_spec.get_output_info_dict()\n  if \"default\" not in output_info_dict:\n    issues.append(\"Module default signature must have a 'default' output.\")\n  else:\n    output_info = output_info_dict[\"default\"]\n    output_shape = output_info.get_shape()\n    if not (output_info.dtype == tf.float32 and output_shape.ndims == 2 and\n            not output_shape.as_list()[0] and output_shape.as_list()[1]):\n      issues.append(\n          \"Module default signature must have a 'default' output of \"\n          \"tf.Tensor(shape=(?,K), dtype=float32).\"\n      )\n\n  if issues:\n    raise ValueError(\"Module is not a text-embedding: %r\" % issues)", "docstring": "Raises ValueError if `module_spec` is not a text-embedding module.\n\nArgs:\nmodule_spec: A `ModuleSpec` to test.\n\nRaises:\nValueError: if `module_spec` default signature is not compatible with\nTensor(string, shape=(?,)) -> Tensor(float32, shape=(?,K)).", "source": "juraj-google-style"}
{"code": "def get_block(self, parent, config='running_config'):\n    try:\n        parent = ('^%s$' % parent)\n        return self.node.section(parent, config=config)\n    except TypeError:\n        return None", "docstring": "Scans the config and returns a block of code\n\nArgs:\nparent (str): The parent string to search the config for and\nreturn the block\nconfig (str): A text config string to be searched. Default\nis to search the running-config of the Node.\n\nReturns:\nA string object that represents the block from the config.  If\nthe parent string is not found, then this method will\nreturn None.", "source": "codesearchnet"}
{"code": "def GetMap(self, cache_info):\n    return self.GetParser().GetMap(cache_info, self.CreateMap())", "docstring": "Creates a Map from the cache_info data.\n\nArgs:\ncache_info: file-like object containing the data to parse\n\nReturns:\nA child of Map containing the cache data.", "source": "github-repos"}
{"code": "def remove_dimensions(self, dimension_names):\n        \n        with self._lock:\n            for dimension in dimension_names:\n                if dimension in self._extra_dimensions:\n                    del self._extra_dimensions[dimension]", "docstring": "Removes extra dimensions added by the add_dimensions() function.\nIgnores dimension names that don't exist.\n\nArgs:\ndimension_names (list): List of dimension names to remove.", "source": "juraj-google-style"}
{"code": "def if_sqlserver_disable_constraints_triggers(session: SqlASession,\n                                              tablename: str) -> None:\n    \n    with if_sqlserver_disable_constraints(session, tablename):\n        with if_sqlserver_disable_triggers(session, tablename):\n            yield", "docstring": "If we're running under SQL Server, disable triggers AND constraints for the\nspecified table while the resource is held.\n\nArgs:\nsession: SQLAlchemy :class:`Session`\ntablename: table name", "source": "juraj-google-style"}
{"code": "def record(ekey, entry, diff=False):\n    \n    taskdb = active_db()\n    taskdb.record(ekey, entry, diff)\n    \n    \n    taskdb.save()", "docstring": "Records the specified entry to the key-value store under the specified\nentity key.\n\nArgs:\nekey (str): fqdn/uuid of the method/object to store the entry for.\nentry (dict): attributes and values gleaned from the execution.\ndiff (bool): when True, the \"c\" element of `entry` will be diffed\nagainst previous entries under the same `ekey` if their method\n(attribute \"m\") matches.", "source": "juraj-google-style"}
{"code": "def funds(self, term, field=None, **kwargs):\n        \n        params = kwargs\n        params['q'] = term\n        if field:\n            params['f'] = field\n        else:\n            params['f'] = 'fu.org.n'\n        baseuri = self._BASE_URI + 'funds'\n        res = self.session.get(baseuri, params=params)\n        self.handle_http_error(res)\n        return res", "docstring": "Search for funds matching a search term.\n\nArgs:\nterm (str): Fund id to search on\nfield (str): The field to search on.\nOptions are title, amount, org_name and type.\nkwargs (dict): additional keywords passed into\nrequests.session.get params keyword.", "source": "juraj-google-style"}
{"code": "def __init__(self, Outer, Inner, *l):\n        \n        super().__init__()\n        self.value = [Outer()]\n        self.l = self.value[0].value\n        self.Outer = Outer\n        self.Inner = Inner\n        self.add(l)", "docstring": "init\n\nArgs:\nOuter (class): One of the possible outer classes.\nInner (class): One of the possible inner classes.\n*l: To be processed and set to value", "source": "juraj-google-style"}
{"code": "def set_number_of_shards(self, number_of_shards):\n    for policy in self._sharding_policies:\n        policy.set_number_of_shards(number_of_shards)\n        policy.set_number_of_partitions(self._number_of_partitions)\n    self._validate()", "docstring": "Sets the number of shards to use for the InfeedQueue.\n\nArgs:\nnumber_of_shards: number of ways to shard the InfeedQueue.\n\nRaises:\nValueError: if number_of_shards is not > 0; or the policies have\nbeen frozen and number_of_shards was already set to something\nelse.", "source": "github-repos"}
{"code": "def has_shell_command(self, command):\n    try:\n        output = self.shell(['command', '-v', command]).decode('utf-8').strip()\n        return (command in output)\n    except AdbError:\n        return False", "docstring": "Checks to see if a given check command exists on the device.\n\nArgs:\ncommand: A string that is the name of the command to check.\n\nReturns:\nA boolean that is True if the command exists and False otherwise.", "source": "codesearchnet"}
{"code": "def execute(self, correlation_id, args):\n        \n        return self._intercepter.execute(_next, correlation_id, args)", "docstring": "Executes the command given specific arguments as an input.\n\nArgs:\ncorrelation_id: a unique correlation/transaction id\nargs: command arguments\n\nReturns: an execution result.\n\nRaises:\nMicroserviceError: when execution fails for whatever reason.", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    data = file_object.read(self._HEADER_READ_SIZE)\n    if (not data.startswith(b'<?xml')):\n        raise errors.UnableToParseFile('Not an Opera typed history file [not a XML]')\n    (_, _, data) = data.partition(b'\\n')\n    if (not data.startswith(b'<typed_history')):\n        raise errors.UnableToParseFile('Not an Opera typed history file [wrong XML root key]')\n    file_object.seek(0, os.SEEK_SET)\n    xml = ElementTree.parse(file_object)\n    for history_item in xml.iterfind('typed_history_item'):\n        event_data = OperaTypedHistoryEventData()\n        event_data.entry_type = history_item.get('type', None)\n        event_data.url = history_item.get('content', None)\n        if (event_data.entry_type == 'selected'):\n            event_data.entry_selection = 'Filled from autocomplete.'\n        elif (event_data.entry_type == 'text'):\n            event_data.entry_selection = 'Manually typed.'\n        last_typed_time = history_item.get('last_typed', None)\n        if (last_typed_time is None):\n            parser_mediator.ProduceExtractionWarning('missing last typed time.')\n            continue\n        date_time = dfdatetime_time_elements.TimeElements()\n        try:\n            date_time.CopyFromStringISO8601(last_typed_time)\n        except ValueError as exception:\n            parser_mediator.ProduceExtractionWarning('unsupported last typed time: {0:s} with error: {1!s}.'.format(last_typed_time, exception))\n            continue\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an Opera typed history file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def add(self, datum, location_ids):\n    node_name = datum.node_exec_stats.node_name\n    if node_name in self._node_name_to_sample:\n        sample = self._node_name_to_sample[node_name]\n        sample.location_id.extend(location_ids)\n    else:\n        sample = profile_pb2.Sample()\n        sample.value.extend([0, 0, 0])\n        label = sample.label.add()\n        label.key = self._string_table.index_of('node_name')\n        label.str = self._string_table.index_of(node_name)\n        label = sample.label.add()\n        label.key = self._string_table.index_of('op_type')\n        label.str = self._string_table.index_of(datum.op_type)\n        self._node_name_to_sample[node_name] = sample\n    sample.value[0] += 1\n    sample.value[1] += datum.node_exec_stats.all_end_rel_micros\n    sample.value[2] += datum.node_exec_stats.op_end_rel_micros - datum.node_exec_stats.op_start_rel_micros", "docstring": "Adds a sample data point.\n\nArgs:\ndatum: `ProfileDatum` to add a sample for.\nlocation_ids: List of numberic location ids for this\nsample.", "source": "github-repos"}
{"code": "def header_string_from_file(filename='feff.inp'):\n        \n        with zopen(filename, \"r\") as fobject:\n            f = fobject.readlines()\n            feff_header_str = []\n            ln = 0\n\n            \n            try:\n                feffpmg = f[0].find(\"pymatgen\")\n            except IndexError:\n                feffpmg = False\n\n            \n            if feffpmg:\n                nsites = int(f[8].split()[2])\n                for line in f:\n                    ln += 1\n                    if ln <= nsites + 9:\n                        feff_header_str.append(line)\n            else:\n                \n                \n                end = 0\n                for line in f:\n                    if (line[0] == \"*\" or line[0] == \"T\") and end == 0:\n                        feff_header_str.append(line.replace(\"\\r\", \"\"))\n                    else:\n                        end = 1\n\n        return ''.join(feff_header_str)", "docstring": "Reads Header string from either a HEADER file or feff.inp file\nWill also read a header from a non-pymatgen generated feff.inp file\n\nArgs:\nfilename: File name containing the Header data.\n\nReturns:\nReads header string.", "source": "juraj-google-style"}
{"code": "def _craft_s3_keys(self):\n    now = time.gmtime()\n    stub = 'templates/{stack_name}/{version}'.format(stack_name=self._config.get('environment', {}).get('stack_name', None), version=self._config.get('codeVersion'))\n    stub = ((stub + '/') + str(now.tm_year))\n    stub = ((stub + '/') + str(('%02d' % now.tm_mon)))\n    stub = ((stub + '/') + str(('%02d' % now.tm_mday)))\n    stub = ((stub + '/') + str(('%02d' % now.tm_hour)))\n    stub = ((stub + ':') + str(('%02d' % now.tm_min)))\n    stub = ((stub + ':') + str(('%02d' % now.tm_sec)))\n    if self._yaml:\n        template_key = (stub + '/stack.yaml')\n    else:\n        template_key = (stub + '/stack.json')\n    property_key = (stub + '/stack.properties')\n    return (template_key, property_key)", "docstring": "We are putting stuff into S3, were supplied the bucket. Here we\ncraft the key of the elements we are putting up there in the\ninternet clouds.\n\nArgs:\nNone\n\nReturns:\na tuple of teplate file key and property file key", "source": "codesearchnet"}
{"code": "def __init__(self, cache_file_name=None, update_cache=True, req_timeout=90.0):\n        \n\n        \n        \n        self._requests = MultiRequest(max_requests=2, req_timeout=req_timeout)\n\n        \n        self._cache = ApiCache(cache_file_name, update_cache) if cache_file_name else None", "docstring": "Establishes basic HTTP params and loads a cache.\n\nArgs:\ncache_file_name: String file name of cache.\nupdate_cache: Determines whether cache should be written out back to the disk when closing it.\nDefault is `True`.\nreq_timeout: Maximum number of seconds to wait without reading a response byte before deciding an error has occurred.\nDefault is 90.0 seconds.", "source": "juraj-google-style"}
{"code": "def Process(self, parser_mediator, plist_name, top_level, **kwargs):\n    logger.debug('Plist {0:s} plugin used for: {1:s}'.format(self.NAME, plist_name))\n    self.GetEntries(parser_mediator, top_level=top_level, **kwargs)", "docstring": "Overwrite the default Process function so it always triggers.\n\nProcess() checks if the current plist being processed is a match for a\nplugin by comparing the PATH and KEY requirements defined by a plugin.  If\nboth match processing continues; else raise WrongPlistPlugin.\n\nThe purpose of the default plugin is to always trigger on any given plist\nfile, thus it needs to overwrite the default behavior of comparing PATH\nand KEY.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nplist_name (str): name of the plist.\ntop_level (dict[str, object]): plist top-level key.", "source": "codesearchnet"}
{"code": "def get_command_from_result(script, result, debug=False):\n    \n    if not debug:\n        command = \"python waf --run \\\"\" + script + \" \" + \" \".join(\n            ['--%s=%s' % (param, value) for param, value in\n             result['params'].items()]) + \"\\\"\"\n    else:\n        command = \"python waf --run \" + script + \" --command-template=\\\"\" +\\\n            \"gdb --args %s \" + \" \".join(['--%s=%s' % (param, value) for\n                                         param, value in\n                                         result['params'].items()]) + \"\\\"\"\n    return command", "docstring": "Return the command that is needed to obtain a certain result.\n\nArgs:\nparams (dict): Dictionary containing parameter: value pairs.\ndebug (bool): Whether the command should include the debugging\ntemplate.", "source": "juraj-google-style"}
{"code": "def begin_statement(self, stmt):\n    self.active_stmts.add(stmt)", "docstring": "Marks the beginning of a statement.\n\nArgs:\nstmt: Hashable, a key by which the statement can be identified in the\nCFG's stmt_prev and stmt_next attributes", "source": "github-repos"}
{"code": "def _build(self, inputs, memory, treat_input_as_matrix=False):\n    if treat_input_as_matrix:\n        inputs = basic.BatchFlatten(preserve_dims=2)(inputs)\n        inputs_reshape = basic.BatchApply(basic.Linear(self._mem_size), n_dims=2)(inputs)\n    else:\n        inputs = basic.BatchFlatten()(inputs)\n        inputs = basic.Linear(self._mem_size)(inputs)\n        inputs_reshape = tf.expand_dims(inputs, 1)\n    memory_plus_input = tf.concat([memory, inputs_reshape], axis=1)\n    next_memory = self._attend_over_memory(memory_plus_input)\n    n = inputs_reshape.get_shape().as_list()[1]\n    next_memory = next_memory[(:, :(- n), :)]\n    if ((self._gate_style == 'unit') or (self._gate_style == 'memory')):\n        (self._input_gate, self._forget_gate) = self._create_gates(inputs_reshape, memory)\n        next_memory = (self._input_gate * tf.tanh(next_memory))\n        next_memory += (self._forget_gate * memory)\n    output = basic.BatchFlatten()(next_memory)\n    return (output, next_memory)", "docstring": "Adds relational memory to the TensorFlow graph.\n\nArgs:\ninputs: Tensor input.\nmemory: Memory output from the previous time step.\ntreat_input_as_matrix: Optional, whether to treat `input` as a sequence\nof matrices. Defaulta to False, in which case the input is flattened\ninto a vector.\n\nReturns:\noutput: This time step's output.\nnext_memory: The next version of memory to use.", "source": "codesearchnet"}
{"code": "def _FindLargestIdPostfixNumber(self, schedule):\n    \n    postfix_number_re = re.compile('(\\d+)$')\n\n    def ExtractPostfixNumber(entity_id):\n      \n      if entity_id is None:\n        return 0\n      match = postfix_number_re.search(entity_id)\n      if match is not None:\n        return int(match.group(1))\n      else:\n        return 0\n\n    id_data_sets = {'agency_id': schedule.GetAgencyList(),\n                    'stop_id': schedule.GetStopList(),\n                    'route_id': schedule.GetRouteList(),\n                    'trip_id': schedule.GetTripList(),\n                    'service_id': schedule.GetServicePeriodList(),\n                    'fare_id': schedule.GetFareAttributeList(),\n                    'shape_id': schedule.GetShapeList()}\n\n    max_postfix_number = 0\n    for id_name, entity_list in id_data_sets.items():\n      for entity in entity_list:\n        entity_id = getattr(entity, id_name)\n        postfix_number = ExtractPostfixNumber(entity_id)\n        max_postfix_number = max(max_postfix_number, postfix_number)\n    return max_postfix_number", "docstring": "Finds the largest integer used as the ending of an id in the schedule.\n\nArgs:\nschedule: The schedule to check.\n\nReturns:\nThe maximum integer used as an ending for an id.", "source": "juraj-google-style"}
{"code": "def statement(self) -> Statement:\n    (pref, kw) = self.keyword()\n    pres = self.opt_separator()\n    next = self.peek()\n    if (next == ';'):\n        arg = None\n        sub = False\n    elif (next == '{'):\n        arg = None\n        sub = True\n    elif (not pres):\n        raise UnexpectedInput(self, 'separator')\n    else:\n        self._arg = ''\n        sub = self.argument()\n        arg = self._arg\n    self.offset += 1\n    res = Statement(kw, arg, pref=pref)\n    if sub:\n        res.substatements = self.substatements()\n        for sub in res.substatements:\n            sub.superstmt = res\n    return res", "docstring": "Parse YANG statement.\n\nRaises:\nEndOfInput: If past the end of input.\nUnexpectedInput: If no syntactically correct statement is found.", "source": "codesearchnet"}
{"code": "def insert(self, point, data=None):\n    assert (len(point) == self.k)\n    if (self.size == 0):\n        if (self.region is None):\n            self.region = ([[(- math.inf), math.inf]] * self.k)\n        axis = 0\n        return self.new_node(point, self.region, axis, data)\n    current_id = 0\n    while True:\n        parent_node = self.node_list[current_id]\n        axis = parent_node.axis\n        if (point[axis] < parent_node.point[axis]):\n            (next_id, left) = (parent_node.left, True)\n        else:\n            (next_id, left) = (parent_node.right, False)\n        if (next_id is None):\n            break\n        current_id = next_id\n    region = parent_node.region[:]\n    region[axis] = parent_node.region[axis][:]\n    limit = parent_node.point[axis]\n    if left:\n        self.node_list[current_id] = parent_node._replace(left=self.size)\n        region[axis][1] = limit\n    else:\n        self.node_list[current_id] = parent_node._replace(right=self.size)\n        region[axis][0] = limit\n    return self.new_node(point, region, ((axis + 1) % self.k), data)", "docstring": "Insert a new node in the tree.\n\nArgs:\npoint (:obj:`tuple` of float or int): Stores the position of the\nnode.\ndata (:obj, optional): The information stored by the node.\n\nReturns:\nint: The identifier of the new node.\n\nExample:\n>>> tree = Tree(4, 800)\n>>> point = (3, 7)\n>>> data = {'name': Fresnel, 'label': blue, 'speed': 98.2}\n>>> node_id = tree.insert(point, data)", "source": "codesearchnet"}
{"code": "def to_json(self):\n    mapper_spec = self.mapper.to_json()\n    return {'name': self.name, 'mapreduce_id': self.mapreduce_id, 'mapper_spec': mapper_spec, 'params': self.params, 'hooks_class_name': self.hooks_class_name}", "docstring": "Serializes all data in this mapreduce spec into json form.\n\nReturns:\ndata in json format.", "source": "codesearchnet"}
{"code": "def cast(self, value):\n    if (self.type is None):\n        return value\n    if (self.type in (str, int, float)):\n        try:\n            return self.type(value)\n        except Exception as e:\n            raise errors.BisonError('Failed to cast {} to {}'.format(value, self.type)) from e\n    elif (self.type == bool):\n        return (value.lower() == 'true')\n    else:\n        raise errors.BisonError('Unsupported type for casting: {}'.format(self.type))", "docstring": "Cast a value to the type required by the option, if one is set.\n\nThis is used to cast the string values gathered from environment\nvariable into their required type.\n\nArgs:\nvalue: The value to cast.\n\nReturns:\nThe value casted to the expected type for the option.", "source": "codesearchnet"}
{"code": "def parse_op_and_node(line):\n    op_type = line.strip().split(' ')[0].replace('[', '').replace(']', '')\n    node_name = line.strip().split(' ')[1]\n    return (op_type, node_name)", "docstring": "Parse a line containing an op node followed by a node name.\n\nFor example, if the line is\n\"  [Variable] hidden/weights\",\nthis function will return (\"Variable\", \"hidden/weights\")\n\nArgs:\nline: The line to be parsed, as a str.\n\nReturns:\nName of the parsed op type.\nName of the parsed node.", "source": "github-repos"}
{"code": "def _ProduceSingleContent(self, mod, showprivate=False, showinh=False):\n        \n        try:\n            all = mod[1].__all__\n        except AttributeError:\n            raise RuntimeError('Module (%s) MUST have `__all__` defined.' % mod[1].__name__)\n        try:\n            name = mod[1].__displayname__\n        except AttributeError:\n            name = mod[0]\n        try:\n            category = mod[1].__category__\n            self.__categories.setdefault(category, 0)\n            self.__categories[category] += 1\n        except AttributeError:\n            pass\n        feats = inspect.getmembers(mod[1])\n        fname = 'content/' + mod[1].__name__.replace('.', '/').replace(' ', '-')+'.rst'\n        feats = [f for f in feats if f[0] in all and (showprivate or not f[0][0:1] == '_')]\n        with open(fname, 'w') as fid:\n            fid.write(Classifier.GetModuleText(name, mod[1].__name__, showprivate=showprivate))\n\n            for f in feats:\n                \n                if inspect.isclass(f[1]) or inspect.isfunction(f[1]):\n                    try:\n                        featname = f[1].__displayname__\n                    except AttributeError:\n                        featname = f[1].__name__\n                    try:\n                        category = f[1].__category__\n                        self.__categories.setdefault(category, 0)\n                        self.__categories[category] += 1\n                    except AttributeError:\n                        pass\n                    \n                    if inspect.isclass(f[1]):\n                        fid.write(Classifier.GetClassText(featname, '%s.%s' % (mod[1].__name__, f[1].__name__), showprivate=showprivate, showinh=showinh))\n                    elif inspect.isfunction(f[1]):\n                         fid.write(Classifier.GetFunctionText(featname, '%s.%s' %  (mod[1].__name__, f[1].__name__)))\n\n            fid.close()\n        return '\\n   %s' % (fname.split('/')[-1])", "docstring": "An internal helper to create a page for a single module. This will\nautomatically generate the needed RSF to document the module\nand save the module to its own page in its appropriate location.\n\nArgs:\nmod (module): The single module to document as its own page\nshowprivate (bool): A flag for whether or not to display private members\n\nReturns:\nstr: The file name ready to be appended to a toctree", "source": "juraj-google-style"}
{"code": "def _exponent_handler_factory(ion_type, exp_chars, parse_func, first_char=None):\n\n    def transition(prev, c, ctx, trans):\n        if ((c in _SIGN) and (prev in exp_chars)):\n            ctx.value.append(c)\n        else:\n            _illegal_character(c, ctx)\n        return trans\n    illegal = (exp_chars + _SIGN)\n    return _numeric_handler_factory(_DIGITS, transition, (lambda c, ctx: (c in exp_chars)), illegal, parse_func, illegal_at_end=illegal, ion_type=ion_type, first_char=first_char)", "docstring": "Generates a handler co-routine which tokenizes an numeric exponent.\n\nArgs:\nion_type (IonType): The type of the value with this exponent.\nexp_chars (sequence): The set of ordinals of the legal exponent characters for this component.\nparse_func (callable): Called upon ending the numeric value. Accepts the current token value and returns a\nthunk that lazily parses the token.\nfirst_char (Optional[int]): The ordinal of the character that should be appended instead of the character that\noccurs first in this component. This is useful for preparing the token for parsing in the case where a\nparticular character is peculiar to the Ion format (e.g. 'd' to denote the exponent of a decimal value\nshould be replaced with 'e' for compatibility with python's Decimal type).", "source": "codesearchnet"}
{"code": "def read(self, n=-1):\n    self._preread_check()\n    if n == -1:\n        length = self.size() - self.tell()\n    else:\n        length = n\n    return self._prepare_value(self._read_buf.read(length))", "docstring": "Returns the contents of a file as a string.\n\nStarts reading from current position in file.\n\nArgs:\nn: Read `n` bytes if `n != -1`. If `n = -1`, reads to end of file.\n\nReturns:\n`n` bytes of the file (or whole file) in bytes mode or `n` bytes of the\nstring if in string (regular) mode.", "source": "github-repos"}
{"code": "def _value_loss(self, observ, reward, length):\n    \n    with tf.name_scope('value_loss'):\n      value = self._network(observ, length).value\n      return_ = utility.discounted_return(\n          reward, length, self._config.discount)\n      advantage = return_ - value\n      value_loss = 0.5 * self._mask(advantage ** 2, length)\n      summary = tf.summary.merge([\n          tf.summary.histogram('value_loss', value_loss),\n          tf.summary.scalar('avg_value_loss', tf.reduce_mean(value_loss))])\n      value_loss = tf.reduce_mean(value_loss)\n      return tf.check_numerics(value_loss, 'value_loss'), summary", "docstring": "Compute the loss function for the value baseline.\n\nThe value loss is the difference between empirical and approximated returns\nover the collected episodes. Returns the loss tensor and a summary strin.\n\nArgs:\nobserv: Sequences of observations.\nreward: Sequences of reward.\nlength: Batch of sequence lengths.\n\nReturns:\nTuple of loss tensor and summary tensor.", "source": "juraj-google-style"}
{"code": "def __init__(self, export_dir):\n    self._export_dir = export_dir\n    self._variables_path = path_helpers.get_variables_path(export_dir)\n    self._saved_model = parse_saved_model(export_dir)", "docstring": "Creates a `SavedModelLoader`.\n\nArgs:\nexport_dir: Directory in which the SavedModel protocol buffer and\nvariables to be loaded are located.", "source": "github-repos"}
{"code": "def add_polyhedron(self, neighbors, center, color, opacity=1.0,\n                       draw_edges=False, edges_color=[0.0, 0.0, 0.0],\n                       edges_linewidth=2):\n        \n        points = vtk.vtkPoints()\n        conv = vtk.vtkConvexPointSet()\n        for i in range(len(neighbors)):\n            x, y, z = neighbors[i].coords\n            points.InsertPoint(i, x, y, z)\n            conv.GetPointIds().InsertId(i, i)\n        grid = vtk.vtkUnstructuredGrid()\n        grid.Allocate(1, 1)\n        grid.InsertNextCell(conv.GetCellType(), conv.GetPointIds())\n        grid.SetPoints(points)\n\n        dsm = vtk.vtkDataSetMapper()\n        polysites = [center]\n        polysites.extend(neighbors)\n        self.mapper_map[dsm] = polysites\n        if vtk.VTK_MAJOR_VERSION <= 5:\n            dsm.SetInputConnection(grid.GetProducerPort())\n        else:\n            dsm.SetInputData(grid)\n        ac = vtk.vtkActor()\n        \n        ac.SetMapper(dsm)\n        ac.GetProperty().SetOpacity(opacity)\n        if color == 'element':\n            \n            \n            myoccu = 0.0\n            for specie, occu in center.species.items():\n                if occu > myoccu:\n                    myspecie = specie\n                    myoccu = occu\n            color = [i / 255 for i in self.el_color_mapping[myspecie.symbol]]\n            ac.GetProperty().SetColor(color)\n        else:\n            ac.GetProperty().SetColor(color)\n        if draw_edges:\n            ac.GetProperty().SetEdgeColor(edges_color)\n            ac.GetProperty().SetLineWidth(edges_linewidth)\n            ac.GetProperty().EdgeVisibilityOn()\n        self.ren.AddActor(ac)", "docstring": "Adds a polyhedron.\n\nArgs:\nneighbors: Neighbors of the polyhedron (the vertices).\ncenter: The atom in the center of the polyhedron.\ncolor: Color for text as RGB.\nopacity: Opacity of the polyhedron\ndraw_edges: If set to True, the a line will be drawn at each edge\nedges_color: Color of the line for the edges\nedges_linewidth: Width of the line drawn for the edges", "source": "juraj-google-style"}
{"code": "def CopyMicrosecondsToFractionOfSecond(cls, microseconds):\n    \n    if microseconds < 0 or microseconds >= definitions.MICROSECONDS_PER_SECOND:\n      raise ValueError(\n          'Number of microseconds value: {0:d} out of bounds.'.format(\n              microseconds))\n\n    milliseconds, _ = divmod(\n        microseconds, definitions.MICROSECONDS_PER_MILLISECOND)\n    return decimal.Decimal(milliseconds) / definitions.MILLISECONDS_PER_SECOND", "docstring": "Copies the number of microseconds to a fraction of second value.\n\nArgs:\nmicroseconds (int): number of microseconds.\n\nReturns:\ndecimal.Decimal: fraction of second, which must be a value between 0.0 and\n1.0.\n\nRaises:\nValueError: if the number of microseconds is out of bounds.", "source": "juraj-google-style"}
{"code": "def json_using_iso8601(__obj: Dict) -> Dict:\n    \n    for key, value in __obj.items():\n        with suppress(TypeError, ValueError):\n            __obj[key] = parse_datetime(value)\n        with suppress(TypeError, ValueError):\n            __obj[key] = parse_delta(value)\n    return __obj", "docstring": "Parse ISO-8601 values from JSON databases.\n\nSee :class:`json.JSONDecoder`\n\nArgs:\n__obj: Object to decode", "source": "juraj-google-style"}
{"code": "def save(self, resource):\n    resource_type = None\n    xid = None\n    if isinstance(resource, dict):\n        resource_type = resource.get('type')\n        xid = resource.get('xid')\n    else:\n        resource_type = resource.type\n        xid = resource.xid\n    if ((resource_type is not None) and (xid is not None)):\n        saved = True\n        if (resource_type in self.tcex.group_types):\n            try:\n                self.groups_shelf[xid] = resource\n            except Exception:\n                saved = False\n            if saved:\n                try:\n                    del self._groups[xid]\n                except KeyError:\n                    pass\n        elif (resource_type in self.tcex.indicator_types_data.keys()):\n            try:\n                self.indicators_shelf[xid] = resource\n            except Exception:\n                saved = False\n            if saved:\n                try:\n                    del self._indicators[xid]\n                except KeyError:\n                    pass", "docstring": "Save group|indicator dict or object to shelve.\n\nBest effort to save group/indicator data to disk.  If for any reason the save fails\nthe data will still be accessible from list in memory.\n\nArgs:\nresource (dict|obj): The Group or Indicator dict or object.", "source": "codesearchnet"}
{"code": "def IsAllocated(self):\n    if (self._stat_object is None):\n        self._stat_object = self._GetStat()\n    return (self._stat_object and self._stat_object.is_allocated)", "docstring": "Determines if the file entry is allocated.\n\nReturns:\nbool: True if the file entry is allocated.", "source": "codesearchnet"}
{"code": "def allan_variance(data, dt, tmax=10):\n    allanvar = []\n    nmax = (len(data) if (len(data) < (tmax / dt)) else int((tmax / dt)))\n    for i in range(1, (nmax + 1)):\n        databis = data[(len(data) % i):]\n        y = databis.reshape((len(data) \n        allanvar.append((((y[1:] - y[:(- 1)]) ** 2).mean() / 2))\n    return ((dt * np.arange(1, (nmax + 1))), np.array(allanvar))", "docstring": "Calculate Allan variance.\n\nArgs:\ndata (np.ndarray): Input data.\ndt (float): Time between each data.\ntmax (float): Maximum time.\n\nReturns:\nvk (np.ndarray): Frequency.\nallanvar (np.ndarray): Allan variance.", "source": "codesearchnet"}
{"code": "def list_of_vars(arg_plot):\n    lovs = [[[var for var in svars.split(',') if var] for svars in pvars.split('.') if svars] for pvars in arg_plot.split('-') if pvars]\n    lovs = [[slov for slov in lov if slov] for lov in lovs if lov]\n    return [lov for lov in lovs if lov]", "docstring": "Construct list of variables per plot.\n\nArgs:\narg_plot (str): string with variable names separated with\n``_`` (figures), ``.`` (subplots) and ``,`` (same subplot).\nReturns:\nthree nested lists of str\n\n- variables on the same subplot;\n- subplots on the same figure;\n- figures.", "source": "codesearchnet"}
{"code": "def get_sso(self, role):\n        \n        uri = \"{}/sso?role={}\".format(self.data['uri'], role)\n        return self._helper.do_get(uri)", "docstring": "Builds the SSO (Single Sign-On) URL parameters for the specified enclosure. This allows the user to\nlog in to the enclosure without providing credentials. This API is currently only supported by C7000 enclosures.\n\nArgs:\nrole: Role\n\nReturns:\nSSO (Single Sign-On) URL parameters.", "source": "juraj-google-style"}
{"code": "def make_parts_for(self, field_name, field_data):\n    typ = field_data.field_type\n    subtyp = field_data.field_subtype\n    if (typ in ('read', 'xadc')):\n        writeable = False\n    else:\n        writeable = True\n    if ((typ == 'time') or ((typ in ('param', 'read')) and (subtyp == 'time'))):\n        self._make_time_parts(field_name, field_data, writeable)\n    elif ((typ == 'write') and (subtyp == 'action')):\n        self._make_action_part(field_name, field_data)\n    elif (typ in ('param', 'read', 'write', 'xadc')):\n        self._make_param_part(field_name, field_data, writeable)\n    elif (typ == 'bit_out'):\n        self._make_out(field_name, field_data, 'bit')\n    elif (typ == 'pos_out'):\n        self._make_out(field_name, field_data, 'pos')\n        self._make_scale_offset(field_name)\n        self._make_out_capture(field_name, field_data)\n    elif (typ == 'ext_out'):\n        self._make_out_capture(field_name, field_data)\n    elif (typ == 'bit_mux'):\n        self._make_mux(field_name, field_data, 'bit')\n        self._make_mux_delay(field_name)\n    elif (typ == 'pos_mux'):\n        self._make_mux(field_name, field_data, 'pos')\n    elif (typ == 'table'):\n        self._make_table(field_name, field_data)\n    else:\n        raise ValueError(('Unknown type %r subtype %r' % (typ, subtyp)))", "docstring": "Create the relevant parts for this field\n\nArgs:\nfield_name (str): Short field name, e.g. VAL\nfield_data (FieldData): Field data object", "source": "codesearchnet"}
{"code": "class DacOutput(ModelOutput):\n    loss: Optional[torch.FloatTensor] = None\n    audio_values: Optional[torch.FloatTensor] = None\n    quantized_representation: Optional[torch.FloatTensor] = None\n    audio_codes: Optional[torch.LongTensor] = None\n    projected_latents: Optional[torch.FloatTensor] = None", "docstring": "Args:\nloss (`torch.Tensor`):\nLoss from the encoder model, comprising the weighted combination of the commitment and codebook losses.\naudio_values (`torch.Tensor` of shape `(batch_size, input_length)`):\nReconstructed audio data.\nquantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`):\nQuantized continuous representation of input.\naudio_codes (`torch.LongTensor` of shape `(batch_size, num_codebooks, time_steps)`):\nCodebook indices for each codebook (quantized discrete representation of input).\nprojected_latents (`torch.Tensor` of shape `(batch_size, num_codebooks * dimension, time_steps)`):\nProjected latents (continuous representation of input before quantization).", "source": "github-repos"}
{"code": "def _create_forward(out_node):\n    retval = out_node.body[0].body[(- 1)]\n    if (len(retval.value.elts) == 1):\n        retval.value = retval.value.elts[0]\n    return out_node", "docstring": "Create a user-friendly forward function.\n\nEnsures that a single value instead of a tuple is returned if the user asked\nfor the gradient with respect to only one input.\n\nArgs:\nout_node: The function definition AST.\n\nReturns:\nThe function definition with potentially changed return statement.", "source": "codesearchnet"}
{"code": "def read(self, *labels, **args):\n    raise NotImplementedError", "docstring": "Return the PCollection as a list as well as the version number.\n\nArgs:\n*labels: List of labels for PCollection instance.\n**args: Dict of additional arguments. Currently only 'tail' as a boolean.\nWhen tail is True, will wait and read new elements until the cache is\ncomplete.\n\nReturns:\nA tuple containing an iterator for the items in the PCollection and the\nversion number.\n\nIt is possible that the version numbers from read() and_latest_version()\nare different. This usually means that the cache's been evicted (thus\nunavailable => read() returns version = -1), but it had reached version n\nbefore eviction.", "source": "github-repos"}
{"code": "def query_file(self, file_sha, verbose=False):\n        \n\n        \n        if len(file_sha) not in [64, 40]:  \n            print('File sha looks malformed: {:s}'.format(file_sha))\n            return {'file_sha': file_sha, 'malformed': True}\n\n        \n        return self._query('file', file_sha, verbose)", "docstring": "Query the VirusTotal Service\nArgs:\nfile_sha (str): The file sha1 or sha256 hash\nurl (str): The domain/url to be queried (default=None)", "source": "juraj-google-style"}
{"code": "def functions(start=None, end=None):\n    (start, end) = fix_addresses(start, end)\n    for func_t in idautils.Functions(start, end):\n        (yield Function(func_t))", "docstring": "Get all functions in range.\n\nArgs:\nstart: Start address of the range. Defaults to IDB start.\nend: End address of the range. Defaults to IDB end.\n\nReturns:\nThis is a generator that iterates over all the functions in the IDB.", "source": "codesearchnet"}
{"code": "def reopen_encoded(fileobj, mode='r', fallback_encoding=None):\n    encoding = determine_encoding(fileobj.name, fallback_encoding)\n    fileobj.close()\n    return open(fileobj.name, mode, encoding=encoding)", "docstring": "Makes sure that a file was opened with some valid encoding.\n\nArguments:\nfileobj (file): The file-object.\nmode (str, optional): The mode in which to re-open the file.\nfallback_encoding (str, optional): The encoding in which to re-open\nthe file if it does not specify an encoding itself.\n\nReturns:\nfile: The re-opened file.", "source": "codesearchnet"}
{"code": "def _tokenize(self, text, **kwargs):\n    return self.sp_model.encode(text, out_type=str)", "docstring": "Args:\ntext: TextInput\nReturns a tokenized string. The Gemma tokenizer never adds a prefix space.", "source": "github-repos"}
{"code": "def DeserializeExclusiveData(self, reader):\n        \n        self.Type = TransactionType.RegisterTransaction\n        self.AssetType = reader.ReadByte()\n        self.Name = reader.ReadVarString()\n        self.Amount = reader.ReadFixed8()\n        self.Precision = reader.ReadByte()\n        self.Owner = ECDSA.Deserialize_Secp256r1(reader)\n        \n        self.Admin = reader.ReadUInt160()", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def get_absolute_name(package, relative_name):\n    path = (package.split('.') if package else [])\n    name = relative_name.lstrip('.')\n    ndots = (len(relative_name) - len(name))\n    if (ndots > len(path)):\n        return relative_name\n    absolute_path = path[:((len(path) + 1) - ndots)]\n    if name:\n        absolute_path.append(name)\n    return '.'.join(absolute_path)", "docstring": "Joins a package name and a relative name.\n\nArgs:\npackage: A dotted name, e.g. foo.bar.baz\nrelative_name: A dotted name with possibly some leading dots, e.g. ..x.y\n\nReturns:\nThe relative name appended to the parent's package, after going up one\nlevel for each leading dot.\ne.g. foo.bar.baz + ..hello.world -> foo.hello.world\nThe unchanged relative_name if it does not start with a dot\nor has too many leading dots.", "source": "codesearchnet"}
{"code": "def stop(self, timeout=5):\n    for worker in self._threads:\n        self._queue.put(_SHUTDOWNREQUEST)\n    current = threading.currentThread()\n    if ((timeout is not None) and (timeout >= 0)):\n        endtime = (time.time() + timeout)\n    while self._threads:\n        worker = self._threads.pop()\n        if ((worker is not current) and worker.isAlive()):\n            try:\n                if ((timeout is None) or (timeout < 0)):\n                    worker.join()\n                else:\n                    remaining_time = (endtime - time.time())\n                    if (remaining_time > 0):\n                        worker.join(remaining_time)\n                    if worker.isAlive():\n                        c = worker.conn\n                        if (c and (not c.rfile.closed)):\n                            try:\n                                c.socket.shutdown(socket.SHUT_RD)\n                            except TypeError:\n                                c.socket.shutdown()\n                        worker.join()\n            except (AssertionError, KeyboardInterrupt):\n                pass", "docstring": "Terminate all worker threads.\n\nArgs:\ntimeout (int): time to wait for threads to stop gracefully", "source": "codesearchnet"}
{"code": "def decode(cls, command_str):\n        \n\n        name, _, arg = command_str.partition(\" \")\n\n        args = []\n\n        if len(arg) > 0:\n            if arg[0] != '{' or arg[-1] != '}':\n                raise DataError(\"Invalid command, argument is not contained in { and }\", arg=arg, cmd=name)\n\n            arg = arg[1:-1]\n            args = arg.split(\",\")\n\n        proc = []\n\n        for arg in args:\n            if arg.startswith(\"hex:\"):\n                arg = unhexlify(arg[4:]).decode('utf-8')\n\n            proc.append(arg)\n\n        return Command(name, proc)", "docstring": "Decode a string encoded command back into a Command object.\n\nArgs:\ncommand_str (str): The encoded command string output from a\nprevious call to encode.\n\nReturns:\nCommand: The decoded Command object.", "source": "juraj-google-style"}
{"code": "def cut_video(in_file,\n              out_file,\n              start=None,\n              end=None,\n              vcodec=None,\n              acodec=None,\n              log_level='info',\n              print_cmd=False,\n              **kwargs):\n    \n    options = {'log_level': log_level}\n    if vcodec is None:\n        options['vcodec'] = 'copy'\n    if acodec is None:\n        options['acodec'] = 'copy'\n    if start:\n        options['ss'] = start\n    else:\n        start = 0\n    if end:\n        options['t'] = end - start\n    convert_video(in_file, out_file, print_cmd, **options)", "docstring": "Cut a clip from a video.\n\nArgs:\nin_file (str): Input video filename.\nout_file (str): Output video filename.\nstart (None or float): Start time (in seconds).\nend (None or float): End time (in seconds).\nvcodec (None or str): Output video codec, None for unchanged.\nacodec (None or str): Output audio codec, None for unchanged.\nlog_level (str): Logging level of ffmpeg.\nprint_cmd (bool): Whether to print the final ffmpeg command.", "source": "juraj-google-style"}
{"code": "def remove_collisions(self, min_dist=0.5):\n        \n        s_f_coords = self.structure.frac_coords\n        f_coords = self.extrema_coords\n        if len(f_coords) == 0:\n            if self.extrema_type is None:\n                logger.warning(\n                    \"Please run ChargeDensityAnalyzer.get_local_extrema first!\")\n                return\n            new_f_coords = []\n            self._update_extrema(new_f_coords, self.extrema_type)\n            return new_f_coords\n\n        dist_matrix = self.structure.lattice.get_all_distances(f_coords,\n                                                               s_f_coords)\n        all_dist = np.min(dist_matrix, axis=1)\n        new_f_coords = []\n\n        for i, f in enumerate(f_coords):\n            if all_dist[i] > min_dist:\n                new_f_coords.append(f)\n        self._update_extrema(new_f_coords, self.extrema_type)\n\n        return new_f_coords", "docstring": "Remove predicted sites that are too close to existing atoms in the\nstructure.\n\nArgs:\nmin_dist (float): The minimum distance (in Angstrom) that\na predicted site needs to be from existing atoms. A min_dist\nwith value <= 0 returns all sites without distance checking.", "source": "juraj-google-style"}
{"code": "def _add_ttl_ns(self, line):\n        \n        lg = logging.getLogger(\"%s.%s\" % (self.ln, inspect.stack()[0][3]))\n        lg.setLevel(self.log_level)\n\n        lg.debug(\"line:\\n%s\", line)\n        line = str(line).strip()\n        \n        if line is None or line == 'none' or line == '' \\\n                or not line.lower().startswith('@prefix'):\n            return\n        \n        line = line.replace(\"@prefix\",\"\",1).strip()\n        if line.endswith(\".\"):\n            line = line[:-1]\n        prefix = line[:line.find(\":\")].strip()\n        uri = self.clean_iri(line[line.find(\":\")+1:].strip())\n        \n        lg.debug(\"\\nprefix: %s  uri: %s\", prefix, uri)\n        self.bind(prefix, uri, override=False, calc=False)", "docstring": "takes one prefix line from the turtle file and binds the namespace\nto the class\n\nArgs:\nline: the turtle prefix line string", "source": "juraj-google-style"}
{"code": "def extract_backup_bundle(self, resource, timeout=-1):\n        \n        return self._client.update(resource, uri=self.BACKUP_ARCHIVE_PATH, timeout=timeout)", "docstring": "Extracts the existing backup bundle on the appliance and creates all the artifacts.\n\nArgs:\nresource (dict): Deployment Group to extract.\ntimeout:\nTimeout in seconds. Waits for task completion by default. The timeout does not abort the operation in\nOneView, it just stops waiting for its completion.\n\nReturns:\ndict: A Deployment Group associated with the Artifact Bundle backup.", "source": "juraj-google-style"}
{"code": "def dft_task(cls, mol, xc='b3lyp', **kwargs):\n    t = NwTask.from_molecule(mol, theory='dft', **kwargs)\n    t.theory_directives.update({'xc': xc, 'mult': t.spin_multiplicity})\n    return t", "docstring": "A class method for quickly creating DFT tasks with optional\ncosmo parameter .\n\nArgs:\nmol: Input molecule\nxc: Exchange correlation to use.\n\\\\*\\\\*kwargs: Any of the other kwargs supported by NwTask. Note the\ntheory is always \"dft\" for a dft task.", "source": "codesearchnet"}
{"code": "def get_special_dtypes_update(self, model, torch_dtype: 'torch.dtype') -> Dict[str, 'torch.dtype']:\n    return {name: torch_dtype for name, _ in model.named_parameters() if any((m in name for m in self.modules_to_not_convert))}", "docstring": "returns dtypes for modules that are not quantized - used for the computation of the device_map in case\none passes a str as a device_map. The method will use the `modules_to_not_convert` that is modified\nin `_process_model_before_weight_loading`.\n\nArgs:\nmodel (`~transformers.PreTrainedModel`):\nThe model to quantize\ntorch_dtype (`torch.dtype`):\nThe dtype passed in `from_pretrained` method.", "source": "github-repos"}
{"code": "def run(self, inputs=None, warmup_iterations: int=10, benchmark_iterations: int=100) -> TestResultCollection:\n    inputs = inputs or self.generate_random_inputs()\n\n    def run_model(model, **kwargs):\n        return model.run(inputs, warmup_iterations, benchmark_iterations, **kwargs)\n    try:\n        cpu_base_result = run_model(self._ori_model, enable_gpu=False)\n    except RuntimeError as err:\n        logging.info('%s cannot run on CPU. Reason: %s.', self._ori_model.model_config, err)\n        cpu_base_result = None\n    gpu_base_result = run_model(self._ori_model, enable_gpu=True)\n    trt_results = list(map(run_model, self._trt_models))\n    return TestResultCollection(test_name=self._name, model_config=self.model_config, cpu_base_result=cpu_base_result, gpu_base_result=gpu_base_result, trt_results=trt_results)", "docstring": "Runs model inference with provided or randomly generated input tensors.\n\nArgs:\ninputs: Mapping from names to input ndarrays in TF1. Or a sequence of\ntensors in TF2. If `None`, ramdomly generated input tensors will be used\ninstead.\nwarmup_iterations: Number of inferences to warm up the runtime.\nbenchmark_iterations: Number of inferences to measure the latency.\n\nReturns:\n`TestResultCollection` summarizing latency and numerics information for\ndifferent TensorRT conversion settings.", "source": "github-repos"}
{"code": "def _set_least_batch_id(self, txn_signature):\n    batch = self._batches_by_txn_id[txn_signature]\n    least_index = self._index_of_batch(self._batches_by_id[self._least_batch_id_wo_results].batch)\n    current_index = self._index_of_batch(batch)\n    all_prior = False\n    if (current_index <= least_index):\n        return\n    if all((all(((t.header_signature in self._txn_results) for t in b.transactions)) for b in self._batches[least_index:current_index])):\n        all_prior = True\n    if (not all_prior):\n        return\n    possible_least = self._batches[current_index].header_signature\n    for b in self._batches[current_index:]:\n        if (not all(((t.header_signature in self._txn_results) for t in b.transactions))):\n            possible_least = b.header_signature\n            break\n    self._least_batch_id_wo_results = possible_least", "docstring": "Set the first batch id that doesn't have all results.\n\nArgs:\ntxn_signature (str): The txn identifier of the transaction with\nresults being set.", "source": "codesearchnet"}
{"code": "def __init__(self,\n                 lang='en',\n                 lower=True,\n                 charset=None):\n        \n        super(SentenceCharTokenizer, self).__init__(lang, lower, charset)", "docstring": "Encodes text into `(samples, sentences, characters)`\n\nArgs:\nlang: The spacy language to use. (Default value: 'en')\nlower: Lower cases the tokens if True. (Default value: True)\ncharset: The character set to use. For example `charset = 'abc123'`. If None, all characters will be used.\n(Default value: None)", "source": "juraj-google-style"}
{"code": "def _FilterOutPathInfoDuplicates(path_infos):\n  \n  pi_dict = {}\n\n  for pi in path_infos:\n    path_key = (pi.path_type, pi.GetPathID())\n    pi_dict.setdefault(path_key, []).append(pi)\n\n  def _SortKey(pi):\n    return (\n        pi.stat_entry.st_ctime,\n        pi.stat_entry.st_mtime,\n        pi.stat_entry.st_atime,\n        pi.stat_entry.st_ino,\n    )\n\n  for pi_values in pi_dict.values():\n    if len(pi_values) > 1:\n      pi_values.sort(key=_SortKey, reverse=True)\n\n  return [v[0] for v in pi_dict.values()]", "docstring": "Filters out duplicates from passed PathInfo objects.\n\nArgs:\npath_infos: An iterable with PathInfo objects.\n\nReturns:\nA list of PathInfo objects with duplicates removed. Duplicates are\nremoved following this logic: they're sorted by (ctime, mtime, atime,\ninode number) in the descending order and then the first one is taken\nand the others are dropped.", "source": "juraj-google-style"}
{"code": "def dataverse_download_doi(doi, local_fname=None, file_requirements={}, clobber=False):\n    metadata = dataverse_search_doi(doi)\n\n    def requirements_match(metadata):\n        for key in file_requirements.keys():\n            if (metadata['dataFile'].get(key, None) != file_requirements[key]):\n                return False\n        return True\n    for file_metadata in metadata['data']['latestVersion']['files']:\n        if requirements_match(file_metadata):\n            file_id = file_metadata['dataFile']['id']\n            md5sum = file_metadata['dataFile']['md5']\n            if (local_fname is None):\n                local_fname = file_metadata['dataFile']['filename']\n            if ((not clobber) and os.path.isfile(local_fname)):\n                print('Checking existing file to see if MD5 sum matches ...')\n                md5_existing = get_md5sum(local_fname)\n                if (md5_existing == md5sum):\n                    print('File exists. Not overwriting.')\n                    return\n            print(\"Downloading data to '{}' ...\".format(local_fname))\n            dataverse_download_id(file_id, md5sum, fname=local_fname, clobber=False)\n            return\n    raise DownloadError(('No file found under the given DOI matches the requirements.\\nThe metadata found for this DOI was:\\n' + json.dumps(file_metadata, indent=2, sort_keys=True)))", "docstring": "Downloads a file from the Dataverse, using a DOI and set of metadata\nparameters to locate the file.\n\nArgs:\ndoi (str): Digital Object Identifier (DOI) containing the file.\nlocal_fname (Optional[str]): Local filename to download the file to. If\n`None`, then use the filename provided by the Dataverse. Defaults to\n`None`.\nfile_requirements (Optional[dict]): Select the file containing the\ngiven metadata entries. If multiple files meet these requirements,\nonly the first in downloaded. Defaults to `{}`, corresponding to no\nrequirements.\n\nRaises:\nDownloadError: Either no matching file was found under the given DOI, or\nthe MD5 sum of the file was not as expected.\nrequests.exceptions.HTTPError: The given DOI does not exist, or there\nwas a problem connecting to the Dataverse.", "source": "codesearchnet"}
{"code": "def ReadSerialized(cls, json_string):  \n    \n    if json_string:\n      json_dict = json.loads(json_string)\n      return cls.ReadSerializedDict(json_dict)\n\n    return None", "docstring": "Reads an attribute container from serialized form.\n\nArgs:\njson_string (str): JSON serialized attribute container.\n\nReturns:\nAttributeContainer: attribute container or None.", "source": "juraj-google-style"}
{"code": "def put(self, value, priority=100):\n        \n        task_name = '{}{:03d}_{}'.format(self.TASK_PREFIX, priority, self._counter)\n        path = posixpath.join(self._queue_path, task_name)\n        self._client.kv[path] = value", "docstring": "Put a task into the queue.\n\nArgs:\nvalue (str): Task data.\npriority (int): An optional priority as an integer with at most 3 digits.\nLower values signify higher priority.", "source": "juraj-google-style"}
{"code": "def update_branch(profile, name, sha):\n    \n    ref = \"heads/\" + name\n    data = refs.update_ref(profile, ref, sha)\n    return data", "docstring": "Move a branch's HEAD to a new SHA.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nname\nThe name of the branch to update.\n\nsha\nThe commit SHA to point the branch's HEAD to.\n\nReturns:\nA dict with data about the branch.", "source": "juraj-google-style"}
{"code": "def prepare_wheel_srcs(headers: list[str], srcs: list[str], dests: list[str], aot: list[str], srcs_dir: str, version: str) -> None:\n    prepare_headers(headers, os.path.join(srcs_dir, 'tensorflow/include'))\n    prepare_srcs(srcs, dests, srcs_dir)\n    prepare_aot(aot, os.path.join(srcs_dir, 'tensorflow/xla_aot_runtime_src'))\n    create_init_files(os.path.join(srcs_dir, 'tensorflow'))\n    shutil.move(os.path.join(srcs_dir, 'tensorflow/tools/pip_package/MANIFEST.in'), os.path.join(srcs_dir, 'MANIFEST.in'))\n    shutil.move(os.path.join(srcs_dir, 'tensorflow/tools/pip_package/THIRD_PARTY_NOTICES.txt'), os.path.join(srcs_dir, 'tensorflow/THIRD_PARTY_NOTICES.txt'))\n    update_xla_tsl_imports(os.path.join(srcs_dir, 'tensorflow'))\n    if dests:\n        return\n    if not is_windows():\n        rename_libtensorflow(os.path.join(srcs_dir, 'tensorflow'), version)\n    if not is_macos() and (not is_windows()):\n        patch_so(srcs_dir)", "docstring": "Rearrange source and header files.\n\nArgs:\nheaders: a list of paths to header files.\nsrcs: a list of paths to the rest of files.\ndests: a list of paths to files with srcs files destinations.\naot: a list of paths to files that should be in xla_aot directory.\nsrcs_dir: directory to copy files to.\nversion: tensorflow version.", "source": "github-repos"}
{"code": "def psnr_and_ssim(output, target):\n    output = tf.cast(output, dtype=tf.int32)\n    target = tf.cast(target, dtype=tf.int32)\n    psnr = tf.image.psnr(output, target, max_val=255)\n    ssim = tf.image.ssim(output, target, max_val=255)\n    return (psnr, ssim)", "docstring": "Compute the PSNR and SSIM.\n\nArgs:\noutput: 4-D Tensor, shape=(num_frames, height, width, num_channels)\ntarget: 4-D Tensor, shape=(num_frames, height, width, num_channels)\nReturns:\npsnr: 1-D Tensor, shape=(num_frames,)\nssim: 1-D Tensor, shape=(num_frames,)", "source": "codesearchnet"}
{"code": "def pad_mixture_dimensions(x, mixture_distribution, categorical_distribution, event_ndims):\n    with tf.name_scope('pad_mix_dims'):\n\n        def _get_ndims(d):\n            if (tensorshape_util.rank(d.batch_shape) is not None):\n                return tensorshape_util.rank(d.batch_shape)\n            return tf.shape(input=d.batch_shape_tensor())[0]\n        dist_batch_ndims = _get_ndims(mixture_distribution)\n        cat_batch_ndims = _get_ndims(categorical_distribution)\n        pad_ndims = tf.where(categorical_distribution.is_scalar_batch(), dist_batch_ndims, (dist_batch_ndims - cat_batch_ndims))\n        s = tf.shape(input=x)\n        x = tf.reshape(x, shape=tf.concat([s[:(- 1)], tf.ones([pad_ndims], dtype=tf.int32), s[(- 1):], tf.ones([event_ndims], dtype=tf.int32)], axis=0))\n        return x", "docstring": "Pad dimensions of event tensors for mixture distributions.\n\nSee `Mixture._sample_n` and `MixtureSameFamily._sample_n` for usage examples.\n\nArgs:\nx: event tensor to pad.\nmixture_distribution: Base distribution of the mixture.\ncategorical_distribution: `Categorical` distribution that mixes the base\ndistribution.\nevent_ndims: Integer specifying the number of event dimensions in the event\ntensor.\n\nReturns:\nA padded version of `x` that can broadcast with `categorical_distribution`.", "source": "codesearchnet"}
{"code": "def _checksum(cls, line):\n        \n        tr_table = str.maketrans({c: None for c in ascii_uppercase + \"+ .\"})\n        no_letters = line[:68].translate(tr_table).replace(\"-\", \"1\")\n        return sum([int(l) for l in no_letters]) % 10", "docstring": "Compute the checksum of a full line\n\nArgs:\nline (str): Line to compute the checksum from\nReturn:\nint: Checksum (modulo 10)", "source": "juraj-google-style"}
{"code": "def get_poi_types(self, **kwargs):\n    params = {'cultureInfo': util.language_code(kwargs.get('lang'))}\n    result = self.make_request('geo', 'get_poi_types', **params)\n    values = result.get('types', [])\n    return (True, [emtype.PoiType(**a) for a in values])", "docstring": "Obtain POI types.\n\nArgs:\nlang (str): Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[PoiType]), or message string\nin case of error.", "source": "codesearchnet"}
{"code": "def _has_strict_none_origins(self, binding):\n    if not self._analyzing:\n        return True\n    has_any_none_origin = False\n    walker = cfg_utils.walk_binding(binding, keep_binding=lambda b: self._data_is_none(b.data))\n    origin = None\n    while True:\n        try:\n            origin = walker.send(origin)\n        except StopIteration:\n            break\n        for source_set in origin.source_sets:\n            if not source_set:\n                if self.ctx.program.is_reachable(src=self.frame.node, dst=origin.where):\n                    return True\n                has_any_none_origin = True\n    return not has_any_none_origin", "docstring": "Whether the binding has any possible origins, with None filtering.\n\nDetermines whether the binding has any possibly visible origins at the\ncurrent node once we've filtered out false positives on None. The caller\nstill must call HasCombination() to find out whether these origins are\nactually reachable.\n\nArgs:\nbinding: A cfg.Binding.\n\nReturns:\nTrue if there are possibly visible origins, else False.", "source": "github-repos"}
{"code": "def _SmallestColSize(self, text):\n        \n        if not text:\n            return 0\n        stripped = terminal.StripAnsiText(text)\n        return max(len(word) for word in stripped.split())", "docstring": "Finds the largest indivisible word of a string.\n\n...and thus the smallest possible column width that can contain that\nword unsplit over rows.\n\nArgs:\ntext: A string of text potentially consisting of words.\n\nReturns:\nInteger size of the largest single word in the text.", "source": "juraj-google-style"}
{"code": "def remote_file(self, branch='master', filename=''):\n    LOG.info('Retrieving \"%s\" from \"%s\".', filename, self.git_short)\n    file_contents = ''\n    try:\n        file_blob = self.project.files.get(file_path=filename, ref=branch)\n    except gitlab.exceptions.GitlabGetError:\n        file_blob = None\n    LOG.debug('GitLab file response:\\n%s', file_blob)\n    if (not file_blob):\n        msg = 'Project \"{0}\" is missing file \"{1}\" in \"{2}\" branch.'.format(self.git_short, filename, branch)\n        LOG.warning(msg)\n        raise FileNotFoundError(msg)\n    else:\n        file_contents = b64decode(file_blob.content).decode()\n    LOG.debug('Remote file contents:\\n%s', file_contents)\n    return file_contents", "docstring": "Read the remote file on Git Server.\n\nArgs:\nbranch (str): Git Branch to find file.\nfilename (str): Name of file to retrieve relative to root of\nrepository.\n\nReturns:\nstr: Contents of remote file.\n\nRaises:\nFileNotFoundError: Requested file missing.", "source": "codesearchnet"}
{"code": "def _ParseVSSProcessingOptions(self, options):\n    vss_only = False\n    vss_stores = None\n    self._process_vss = (not getattr(options, 'no_vss', False))\n    if self._process_vss:\n        vss_only = getattr(options, 'vss_only', False)\n        vss_stores = getattr(options, 'vss_stores', None)\n    if vss_stores:\n        try:\n            self._ParseVolumeIdentifiersString(vss_stores, prefix='vss')\n        except ValueError:\n            raise errors.BadConfigOption('Unsupported VSS stores')\n    self._vss_only = vss_only\n    self._vss_stores = vss_stores", "docstring": "Parses the VSS processing options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "codesearchnet"}
{"code": "def AddTableColumns(self, table, columns):\n    \n    table_columns = self._table_columns.setdefault(table, [])\n    for attr in columns:\n      if attr not in table_columns:\n        table_columns.append(attr)", "docstring": "Add columns to table if they are not already there.\n\nArgs:\ntable: table name as a string\ncolumns: an iterable of column names", "source": "juraj-google-style"}
{"code": "def get_timezone_olson_id():\n    tzoffset = int(time.timezone / 3600)\n    if tzoffset <= 0:\n        gmt = f'GMT+{-tzoffset}'\n    else:\n        gmt = f'GMT-{tzoffset}'\n    return GMT_to_olson[gmt]", "docstring": "Return the Olson ID of the local (non-DST) timezone.\n\nReturns:\nA string representing one of the Olson IDs of the local (non-DST)\ntimezone.", "source": "github-repos"}
{"code": "def _IsMetadataFile(self, file_entry):\n    if ((file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK) and (file_entry.path_spec.location in self._METADATA_FILE_LOCATIONS_TSK)):\n        return True\n    return False", "docstring": "Determines if the file entry is a metadata file.\n\nArgs:\nfile_entry (dfvfs.FileEntry): a file entry object.\n\nReturns:\nbool: True if the file entry is a metadata file.", "source": "codesearchnet"}
{"code": "def run(self, inputs: Dict[(str, Union[(float, Iterable)])], torch_size: Optional[int]=None) -> Union[(float, Iterable)]:\n    for i in self.inputs:\n        self.nodes[i]['value'] = inputs[i]\n    for func_set in self.function_sets:\n        for func_name in func_set:\n            lambda_fn = self.nodes[func_name]['lambda_fn']\n            output_node = list(self.successors(func_name))[0]\n            signature = self.nodes[func_name]['func_inputs']\n            input_values = [self.nodes[n]['value'] for n in signature]\n            res = lambda_fn(*input_values)\n            if ((torch_size is not None) and (len(signature) == 0)):\n                self.nodes[output_node]['value'] = torch.tensor(([res] * torch_size), dtype=torch.double)\n            else:\n                self.nodes[output_node]['value'] = res\n    return self.nodes[self.output_node]['value']", "docstring": "Executes the GrFN over a particular set of inputs and returns the\nresult.\n\nArgs:\ninputs: Input set where keys are the names of input nodes in the\nGrFN and each key points to a set of input values (or just one).\n\nReturns:\nA set of outputs from executing the GrFN, one for every set of\ninputs.", "source": "codesearchnet"}
{"code": "def orient_directed_graph(self, data, dag, alg='HC'):\n    alg_dic = {'HC': hill_climbing, 'HCr': hill_climbing_with_removal, 'tabu': tabu_search, 'EHC': exploratory_hill_climbing}\n    return alg_dic[alg](data, dag, nh=self.nh, nb_runs=self.nb_runs, gpu=self.gpu, nb_jobs=self.nb_jobs, lr=self.lr, train_epochs=self.train_epochs, test_epochs=self.test_epochs, verbose=self.verbose)", "docstring": "Modify and improve a directed acyclic graph solution using CGNN.\n\nArgs:\ndata (pandas.DataFrame): Observational data on which causal\ndiscovery has to be performed.\ndag (nx.DiGraph): Graph that provides the initial solution,\non which the CGNN algorithm will be applied.\nalg (str): Exploration heuristic to use, among [\"HC\", \"HCr\",\n\"tabu\", \"EHC\"]\nReturns:\nnetworkx.DiGraph: Solution given by CGNN.", "source": "codesearchnet"}
{"code": "def prepare_minibatch(self, audio_paths, texts, overwrite=False,\n                          is_bi_graphemes=False, seq_length=-1, save_feature_as_csvfile=False):\n        \n        assert len(audio_paths) == len(texts),\\\n            \"Inputs and outputs to the network must be of the same number\"\n        \n        \n        \n        features = [self.featurize(a, overwrite=overwrite, save_feature_as_csvfile=save_feature_as_csvfile) for a in audio_paths]\n        input_lengths = [f.shape[0] for f in features]\n        feature_dim = features[0].shape[1]\n        mb_size = len(features)\n        \n        if seq_length == -1:\n            x = np.zeros((mb_size, self.max_seq_length, feature_dim))\n        else:\n            x = np.zeros((mb_size, seq_length, feature_dim))\n        y = np.zeros((mb_size, self.max_label_length))\n        labelUtil = LabelUtil.getInstance()\n        label_lengths = []\n        for i in range(mb_size):\n            feat = features[i]\n            feat = self.normalize(feat)  \n            x[i, :feat.shape[0], :] = feat\n            if is_bi_graphemes:\n                label = generate_bi_graphemes_label(texts[i])\n                label = labelUtil.convert_bi_graphemes_to_num(label)\n                y[i, :len(label)] = label\n            else:\n                label = labelUtil.convert_word_to_num(texts[i])\n                y[i, :len(texts[i])] = label\n            label_lengths.append(len(label))\n        return {\n            'x': x,  \n            'y': y,  \n            'texts': texts,  \n            'input_lengths': input_lengths,  \n            'label_lengths': label_lengths,  \n        }", "docstring": "Featurize a minibatch of audio, zero pad them and return a dictionary\nParams:\naudio_paths (list(str)): List of paths to audio files\ntexts (list(str)): List of texts corresponding to the audio files\nReturns:\ndict: See below for contents", "source": "juraj-google-style"}
{"code": "def caption_to_item(self, caption):\n    captions = self.captions()\n    if caption not in captions:\n        raise LookupError('There is no menu item with the caption \"%s\"' % caption)\n    return self._items[captions.index(caption)]", "docstring": "Get a MenuItem from the caption.\n\nArgs:\ncaption: (str) The caption to look up.\n\nReturns:\n(MenuItem) The first-match menu item with the caption, if any.\n\nRaises:\nLookupError: If a menu item with the caption does not exist.", "source": "github-repos"}
{"code": "def from_list(cls, vals: List[Value] = [], reverse: bool = False) -> \"LinkedList\":\n        \n        res = EmptyList()\n        for v in (vals if reverse else vals[::-1]):\n            res = cls(v, res)\n        return res", "docstring": "Create an instance from a standard list.\n\nArgs:\nvals: Python list of instance values.", "source": "juraj-google-style"}
{"code": "def __init__(self, hash_queue, hash_analysis_queue, **kwargs):\n    \n    super(VirusTotalAnalyzer, self).__init__(\n        hash_queue, hash_analysis_queue, **kwargs)\n    self._api_key = None\n    self._checked_for_old_python_version = False", "docstring": "Initializes a VirusTotal analyzer.\n\nArgs:\nhash_queue (Queue.queue): queue that contains hashes to be analyzed.\nhash_analysis_queue (Queue.queue): queue the analyzer will append\nHashAnalysis objects to.", "source": "juraj-google-style"}
{"code": "def digest_content(self, rule):\n        \n        data = OrderedDict()\n\n        current_key = None\n\n        for token in rule.content:\n            \n            if token.type == 'ident':\n                \n                name = token.value\n                if name.startswith('-'):\n                    name = name[1:]\n\n                current_key = name\n                data[current_key] = None\n\n            \n            if token.type == 'string':\n                data[current_key] = token.value\n\n        return data", "docstring": "Walk on rule content tokens to return a dict of properties.\n\nThis is pretty naive and will choke/fail on everything that is more\nevolved than simple ``ident(string):value(string)``\n\nArguments:\nrule (tinycss2.ast.QualifiedRule): Qualified rule object as\nreturned by  tinycss2.\n\nReturns:\ndict: Dictionnary of retrieved variables and properties.", "source": "juraj-google-style"}
{"code": "def _get_anchor(module_to_name, fullname):\n    if (not _anchor_re.match(fullname)):\n        raise ValueError((\"'%s' is not a valid anchor\" % fullname))\n    anchor = fullname\n    for module_name in module_to_name.values():\n        if fullname.startswith((module_name + '.')):\n            rest = fullname[(len(module_name) + 1):]\n            if (len(anchor) > len(rest)):\n                anchor = rest\n    return anchor", "docstring": "Turn a full member name into an anchor.\n\nArgs:\nmodule_to_name: Dictionary mapping modules to short names.\nfullname: Fully qualified name of symbol.\n\nReturns:\nHTML anchor string.  The longest module name prefix of fullname is\nremoved to make the anchor.\n\nRaises:\nValueError: If fullname uses characters invalid in an anchor.", "source": "codesearchnet"}
{"code": "def benchmark_config():\n    config = config_pb2.ConfigProto()\n    config.graph_options.rewrite_options.dependency_optimization = rewriter_config_pb2.RewriterConfig.OFF\n    return config", "docstring": "Returns a tf.compat.v1.ConfigProto for disabling the dependency optimizer.\n\nReturns:\nA TensorFlow ConfigProto object.", "source": "github-repos"}
{"code": "def __init__(self, resolver_context):\n    \n    super(FileSystem, self).__init__()\n    self._is_cached = False\n    self._is_open = False\n    self._path_spec = None\n    self._resolver_context = resolver_context\n\n    if not getattr(self, 'TYPE_INDICATOR', None):\n      raise ValueError('Missing type indicator.')", "docstring": "Initializes a file system.\n\nArgs:\nresolver_context (Context): resolver context.\n\nRaises:\nValueError: if a derived file system class does not define a type\nindicator.", "source": "juraj-google-style"}
{"code": "def get_course_enrollments(self, enterprise_customer, days):\n    return CourseEnrollment.objects.filter(created__gt=(datetime.datetime.now() - datetime.timedelta(days=days))).filter(user_id__in=enterprise_customer.enterprise_customer_users.values_list('user_id', flat=True))", "docstring": "Get course enrollments for all the learners of given enterprise customer.\n\nArguments:\nenterprise_customer (EnterpriseCustomer): Include Course enrollments for learners\nof this enterprise customer.\ndays (int): Include course enrollment of this number of days.\n\nReturns:\n(list): A list of CourseEnrollment objects.", "source": "codesearchnet"}
{"code": "def __init__(self, project_id, instance_id, database_id, pool=None, credentials=None, max_batch_size_bytes=1048576, max_number_rows=50, max_number_cells=500):\n    self._configuration = _BeamSpannerConfiguration(project=project_id, instance=instance_id, database=database_id, table=None, query_name=None, credentials=credentials, pool=pool, snapshot_read_timestamp=None, snapshot_exact_staleness=None)\n    self._max_batch_size_bytes = max_batch_size_bytes\n    self._max_number_rows = max_number_rows\n    self._max_number_cells = max_number_cells\n    self._database_id = database_id\n    self._project_id = project_id\n    self._instance_id = instance_id\n    self._pool = pool", "docstring": "A PTransform to write onto Google Cloud Spanner.\n\nArgs:\nproject_id: Cloud spanner project id. Be sure to use the Project ID,\nnot the Project Number.\ninstance_id: Cloud spanner instance id.\ndatabase_id: Cloud spanner database id.\nmax_batch_size_bytes: (optional) Split the mutations into batches to\nreduce the number of transaction sent to Spanner. By default it is\nset to 1 MB (1048576 Bytes).\nmax_number_rows: (optional) Split the mutations into batches to\nreduce the number of transaction sent to Spanner. By default it is\nset to 50 rows per batch.\nmax_number_cells: (optional) Split the mutations into batches to\nreduce the number of transaction sent to Spanner. By default it is\nset to 500 cells per batch.", "source": "github-repos"}
{"code": "def pyxb_to_dict(rp_pyxb):\n    \n    return {\n        'allowed': bool(_get_attr_or_list(rp_pyxb, 'allowed')),\n        'num': _get_as_int(rp_pyxb),\n        'block': _get_as_set(rp_pyxb, 'block'),\n        'pref': _get_as_set(rp_pyxb, 'pref'),\n    }", "docstring": "Convert ReplicationPolicy PyXB object to a normalized dict.\n\nArgs:\nrp_pyxb: ReplicationPolicy to convert.\n\nReturns:\ndict : Replication Policy as normalized dict.\n\nExample::\n\n{\n'allowed': True,\n'num': 3,\n'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'},\n'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'},\n}", "source": "juraj-google-style"}
{"code": "def set_transaction_execution_result(self, txn_signature, is_valid, context_id, state_changes, events, data, error_message, error_data):\n    raise NotImplementedError()", "docstring": "Set the status of an executed transaction.\n\nCalled by the executor after a transaction has been processed.\n\nThe scheduler must know when transactions have finished being\napplied so that it can determine which transactions will become\neligible for processing.\n\nArgs:\ntxn_signature (str): The signature of the transaction, which\nmust match the header_signature field of the Transaction\nobject which was part of the added Batch.\nis_valid (bool): True if transaction applied successfully or False\nif the transaction failed and was not applied.\ncontext_id (str): If status is True, contains the context_id\nassociated with the state changes made by the transaction.\nIf status is False, this should be set to None.\n\nRaises:\nValueError: Thrown if transaction_signature does not match a\ntransaction.", "source": "codesearchnet"}
{"code": "def restore(cls, metadata_checkpoint_dir, search_alg=None, scheduler=None, trial_executor=None):\n    newest_ckpt_path = _find_newest_ckpt(metadata_checkpoint_dir)\n    with open(newest_ckpt_path, 'r') as f:\n        runner_state = json.load(f, cls=_TuneFunctionDecoder)\n    logger.warning(''.join(['Attempting to resume experiment from {}. '.format(metadata_checkpoint_dir), 'This feature is experimental, and may not work with all search algorithms. ', 'This will ignore any new changes to the specification.']))\n    from ray.tune.suggest import BasicVariantGenerator\n    runner = TrialRunner((search_alg or BasicVariantGenerator()), scheduler=scheduler, trial_executor=trial_executor)\n    runner.__setstate__(runner_state['runner_data'])\n    trials = []\n    for trial_cp in runner_state['checkpoints']:\n        new_trial = Trial(trial_cp['trainable_name'])\n        new_trial.__setstate__(trial_cp)\n        trials += [new_trial]\n    for trial in sorted(trials, key=(lambda t: t.last_update_time), reverse=True):\n        runner.add_trial(trial)\n    return runner", "docstring": "Restores all checkpointed trials from previous run.\n\nRequires user to manually re-register their objects. Also stops\nall ongoing trials.\n\nArgs:\nmetadata_checkpoint_dir (str): Path to metadata checkpoints.\nsearch_alg (SearchAlgorithm): Search Algorithm. Defaults to\nBasicVariantGenerator.\nscheduler (TrialScheduler): Scheduler for executing\nthe experiment.\ntrial_executor (TrialExecutor): Manage the execution of trials.\n\nReturns:\nrunner (TrialRunner): A TrialRunner to resume experiments from.", "source": "codesearchnet"}
{"code": "def to_text_diagram_drawer(self, *, use_unicode_characters: bool=True, qubit_namer: Optional[Callable[([ops.Qid], str)]]=None, transpose: bool=False, precision: Optional[int]=3, qubit_order: ops.QubitOrderOrList=ops.QubitOrder.DEFAULT, get_circuit_diagram_info: Optional[Callable[([ops.Operation, protocols.CircuitDiagramInfoArgs], protocols.CircuitDiagramInfo)]]=None) -> TextDiagramDrawer:\n    qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for(self.all_qubits())\n    qubit_map = {qubits[i]: i for i in range(len(qubits))}\n    if (qubit_namer is None):\n        qubit_namer = (lambda q: (str(q) + ('' if transpose else ': ')))\n    diagram = TextDiagramDrawer()\n    for (q, i) in qubit_map.items():\n        diagram.write(0, i, qubit_namer(q))\n    moment_groups = []\n    for moment in self._moments:\n        _draw_moment_in_diagram(moment, use_unicode_characters, qubit_map, diagram, precision, moment_groups, get_circuit_diagram_info)\n    w = diagram.width()\n    for i in qubit_map.values():\n        diagram.horizontal_line(i, 0, w)\n    if moment_groups:\n        _draw_moment_groups_in_diagram(moment_groups, use_unicode_characters, diagram)\n    if transpose:\n        diagram = diagram.transpose()\n    return diagram", "docstring": "Returns a TextDiagramDrawer with the circuit drawn into it.\n\nArgs:\nuse_unicode_characters: Determines if unicode characters are\nallowed (as opposed to ascii-only diagrams).\nqubit_namer: Names qubits in diagram. Defaults to str.\ntranspose: Arranges qubit wires vertically instead of horizontally.\nprecision: Number of digits to use when representing numbers.\nqubit_order: Determines how qubits are ordered in the diagram.\nget_circuit_diagram_info: Gets circuit diagram info. Defaults to\nprotocol with fallback.\n\nReturns:\nThe TextDiagramDrawer instance.", "source": "codesearchnet"}
{"code": "def sg_max(tensor, opt):\n    r\n    return tf.reduce_max(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)", "docstring": "r\"\"\"Computes the maximum of elements across axis of a tensor.\n\nSee `tf.reduce_max()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` (automatically given by chain).\nopt:\naxis : A tuple/list of integers or an integer. The axis to reduce.\nkeep_dims: If true, retains reduced dimensions with length 1.\nname: If provided, replace current tensor's name.\n\nReturns:\nA `Tensor`.", "source": "juraj-google-style"}
{"code": "def get_resource_group(access_token, subscription_id, rgname):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', rgname,\n                        '?api-version=', RESOURCE_API])\n    return do_get(endpoint, access_token)", "docstring": "Get details about the named resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def _CheckGitkitError(self, raw_response):\n    \n    try:\n      response = simplejson.loads(raw_response)\n      if 'error' not in response:\n        return response\n      else:\n        error = response['error']\n        if 'code' in error:\n          code = error['code']\n          if str(code).startswith('4'):\n            raise errors.GitkitClientError(error['message'])\n          else:\n            raise errors.GitkitServerError(error['message'])\n    except simplejson.JSONDecodeError:\n      pass\n    raise errors.GitkitServerError('null error code from Gitkit server')", "docstring": "Raises error if API invocation failed.\n\nArgs:\nraw_response: string, the http response.\n\nRaises:\nGitkitClientError: if the error code is 4xx.\nGitkitServerError: if the response if malformed.\n\nReturns:\nSuccessful response as dict.", "source": "juraj-google-style"}
{"code": "def make_string_field_value(cls, field):\n        \n        if field.regex is not None:\n            raise NotImplementedError\n\n        string_range = cls.get_range(field)\n\n        return cls.get_random_string(string_range)", "docstring": "String Field has three constraints (apart from anything\nin the super class)\n\nArgs:\nfield (StringField): actual string field object from a\nmodel declaration\n\nReturns:\nrandom string value", "source": "juraj-google-style"}
{"code": "def _CreateLineString(self, parent, coordinate_list):\n    \n    if not coordinate_list:\n      return None\n    linestring = ET.SubElement(parent, 'LineString')\n    tessellate = ET.SubElement(linestring, 'tessellate')\n    tessellate.text = '1'\n    if len(coordinate_list[0]) == 3:\n      altitude_mode = ET.SubElement(linestring, 'altitudeMode')\n      altitude_mode.text = 'absolute'\n    coordinates = ET.SubElement(linestring, 'coordinates')\n    if len(coordinate_list[0]) == 3:\n      coordinate_str_list = ['%f,%f,%f' % t for t in coordinate_list]\n    else:\n      coordinate_str_list = ['%f,%f' % t for t in coordinate_list]\n    coordinates.text = ' '.join(coordinate_str_list)\n    return linestring", "docstring": "Create a KML LineString element.\n\nThe points of the string are given in coordinate_list. Every element of\ncoordinate_list should be one of a tuple (longitude, latitude) or a tuple\n(longitude, latitude, altitude).\n\nArgs:\nparent: The parent ElementTree.Element instance.\ncoordinate_list: The list of coordinates.\n\nReturns:\nThe LineString ElementTree.Element instance or None if coordinate_list is\nempty.", "source": "juraj-google-style"}
{"code": "def move(self, x, y):\n    self._cursor = self._normalizePoint(x, y)", "docstring": "Move the virtual cursor.\n\nArgs:\nx (int): x-coordinate to place the cursor.\ny (int): y-coordinate to place the cursor.\n\n.. seealso:: :any:`get_cursor`, :any:`print_str`, :any:`write`", "source": "codesearchnet"}
{"code": "def get(self, config_id):\n        \n        return self.prepare_model(self.client.api.inspect_config(config_id))", "docstring": "Get a config.\n\nArgs:\nconfig_id (str): Config ID.\n\nReturns:\n(:py:class:`Config`): The config.\n\nRaises:\n:py:class:`docker.errors.NotFound`\nIf the config does not exist.\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def global_variables_initializer():\n    if context.executing_eagerly():\n        return control_flow_ops.no_op(name='global_variables_initializer')\n    return variables_initializer(global_variables())", "docstring": "Returns an Op that initializes global variables.\n\nThis is just a shortcut for `variables_initializer(global_variables())`\n\n@compatibility(TF2)\nIn TF2, variables are initialized immediately when they are created. There is\nno longer a need to run variable initializers before using them.\n@end_compatibility\n\nReturns:\nAn Op that initializes global variables in the graph.", "source": "github-repos"}
{"code": "def __init__(\n      self, resolver_context, file_system, path_spec, is_root=False,\n      is_virtual=False):\n    \n    encoded_stream = resolver.Resolver.OpenFileObject(\n        path_spec, resolver_context=resolver_context)\n    if not encoded_stream:\n      raise errors.BackEndError(\n          'Unable to open encoded stream: {0:s}.'.format(\n              self.path_spec.comparable))\n\n    super(EncodedStreamFileEntry, self).__init__(\n        resolver_context, file_system, path_spec, is_root=is_root,\n        is_virtual=is_virtual)\n    self._encoded_stream = encoded_stream\n    self.entry_type = definitions.FILE_ENTRY_TYPE_FILE", "docstring": "Initializes a file entry.\n\nArgs:\nresolver_context (Context): resolver context.\nfile_system (FileSystem): file system.\npath_spec (PathSpec): path specification.\nis_root (Optional[bool]): True if the file entry is the root file entry\nof the corresponding file system.\nis_virtual (Optional[bool]): True if the file entry is a virtual file\n\nRaises:\nBackEndError: when the encoded stream is missing.", "source": "juraj-google-style"}
{"code": "def _ParseMRUListExValue(self, registry_key):\n    \n    mrulistex_value = registry_key.GetValueByName('MRUListEx')\n\n    \n    if not mrulistex_value:\n      return None\n\n    mrulistex_entries_map = self._GetDataTypeMap('mrulistex_entries')\n\n    context = dtfabric_data_maps.DataTypeMapContext(values={\n        'data_size': len(mrulistex_value.data)})\n\n    return self._ReadStructureFromByteStream(\n        mrulistex_value.data, 0, mrulistex_entries_map, context=context)", "docstring": "Parses the MRUListEx value in a given Registry key.\n\nArgs:\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains\nthe MRUListEx value.\n\nReturns:\nmrulistex_entries: MRUListEx entries or None if not available.", "source": "juraj-google-style"}
{"code": "def poll(self, channel_id=None, json=None, **kwargs):  \n        \n        path = \"/event-service/v1/channels/{}/poll\".format(channel_id)\n        r = self._httpclient.request(\n            method=\"POST\",\n            url=self.url,\n            json=json,\n            path=path,\n            **kwargs\n        )\n        return r", "docstring": "Read one or more events from a channel.\n\nReads events (log records) from the identified channel. Events\nare read in chronological order.\n\nArgs:\nchannel_id (str): The channel ID.\njson (dict): Payload/request body.\n**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.\n\nReturns:\nrequests.Response: Requests Response() object.\n\nExamples:\nRefer to ``event_poll.py`` example.", "source": "juraj-google-style"}
{"code": "def _GetCachedEntryDataTypeMap(self, format_type, value_data, cached_entry_offset):\n    if (format_type not in self._SUPPORTED_FORMAT_TYPES):\n        raise errors.ParseError('Unsupported format type: {0:d}'.format(format_type))\n    data_type_map_name = ''\n    if (format_type == self._FORMAT_TYPE_XP):\n        data_type_map_name = 'appcompatcache_cached_entry_xp_32bit'\n    elif (format_type in (self._FORMAT_TYPE_8, self._FORMAT_TYPE_10)):\n        data_type_map_name = 'appcompatcache_cached_entry_header_8'\n    else:\n        cached_entry = self._ParseCommon2003CachedEntry(value_data, cached_entry_offset)\n        if ((cached_entry.path_offset_32bit == 0) and (cached_entry.path_offset_64bit != 0)):\n            number_of_bits = '64'\n        else:\n            number_of_bits = '32'\n        if (format_type == self._FORMAT_TYPE_2003):\n            data_type_map_name = 'appcompatcache_cached_entry_2003_{0:s}bit'.format(number_of_bits)\n        elif (format_type == self._FORMAT_TYPE_VISTA):\n            data_type_map_name = 'appcompatcache_cached_entry_vista_{0:s}bit'.format(number_of_bits)\n        elif (format_type == self._FORMAT_TYPE_7):\n            data_type_map_name = 'appcompatcache_cached_entry_7_{0:s}bit'.format(number_of_bits)\n    return self._GetDataTypeMap(data_type_map_name)", "docstring": "Determines the cached entry data type map.\n\nArgs:\nformat_type (int): format type.\nvalue_data (bytes): value data.\ncached_entry_offset (int): offset of the first cached entry data\nrelative to the start of the value data.\n\nReturns:\ndtfabric.DataTypeMap: data type map which contains a data type definition,\nsuch as a structure, that can be mapped onto binary data or None\nif the data type map is not defined.\n\nRaises:\nParseError: if the cached entry data type map cannot be determined.", "source": "codesearchnet"}
{"code": "def _DictToListOfStrings(self, data_dict):\n    \n    ret_list = []\n    for key, value in iter(data_dict.items()):\n      if key in ('body', 'datetime', 'type', 'room', 'rooms', 'id'):\n        continue\n      ret_list.append('{0:s} = {1!s}'.format(key, value))\n\n    return ret_list", "docstring": "Converts a dictionary into a list of strings.\n\nArgs:\ndata_dict (dict[str, object]): dictionary to convert.\n\nReturns:\nlist[str]: list of strings.", "source": "juraj-google-style"}
{"code": "def delete_box(self, key):\n\t\t\n\t\tif key:\n\t\t\turi = self.box_root_uri + '/' + key\n\t\t\treturn self._req('delete', uri)\n\t\telse:\n\t\t\treturn requests.codes.bad_request, None", "docstring": "Deletes the box specified by the key\nArgs:\nreturns \t(status code for the DELETE request, success message dict)", "source": "juraj-google-style"}
{"code": "def _row_from_mapping(mapping, schema):\n    if (len(schema) == 0):\n        raise ValueError(_TABLE_HAS_NO_SCHEMA)\n    row = []\n    for field in schema:\n        if (field.mode == 'REQUIRED'):\n            row.append(mapping[field.name])\n        elif (field.mode == 'REPEATED'):\n            row.append(mapping.get(field.name, ()))\n        elif (field.mode == 'NULLABLE'):\n            row.append(mapping.get(field.name))\n        else:\n            raise ValueError('Unknown field mode: {}'.format(field.mode))\n    return tuple(row)", "docstring": "Convert a mapping to a row tuple using the schema.\n\nArgs:\nmapping (Dict[str, object])\nMapping of row data: must contain keys for all required fields in\nthe schema. Keys which do not correspond to a field in the schema\nare ignored.\nschema (List[google.cloud.bigquery.schema.SchemaField]):\nThe schema of the table destination for the rows\n\nReturns:\nTuple[object]:\nTuple whose elements are ordered according to the schema.\n\nRaises:\nValueError: If schema is empty.", "source": "codesearchnet"}
{"code": "def get_neighbor_ip(ip_addr, cidr=\"30\"):\n    \n    our_octet = None\n    neighbor_octet = None\n    try:\n        ip_addr_split = ip_addr.split(\".\")\n        max_counter = 0\n        if int(cidr) == 30:\n            ranger = 4\n        elif int(cidr) == 31:\n            ranger = 2\n        while max_counter < 256:\n            try:\n                if int(ip_addr_split[3]) >= max_counter and int(ip_addr_split[3]) < (max_counter + ranger):\n                    if ranger == 4:\n                        our_octet = max_counter + 1\n                        neighbor_octet = max_counter + 2\n                        break\n                    elif ranger == 2:\n                        our_octet = max_counter\n                        neighbor_octet = max_counter + 1\n                        break   \n                max_counter += ranger\n            except UnboundLocalError:\n                print(\"The mask between the neighbors must be 30, or 31\")\n                exit(\"BAD NEIGHBOR MASK\")\n        if int(ip_addr_split[3]) == our_octet:\n            our_ip_addr = \"%s.%s.%s.%i\" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], our_octet)\n            neighbor_ip_addr = \"%s.%s.%s.%i\" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], neighbor_octet)\n        elif int(ip_addr_split[3]) == neighbor_octet:\n            neighbor_ip_addr = \"%s.%s.%s.%i\" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], our_octet)\n            our_ip_addr = \"%s.%s.%s.%i\" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], neighbor_octet)\n        else:\n            our_ip_addr = \"%s.%s.%s.%i\" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], our_octet)\n            neighbor_ip_addr = \"%s.%s.%s.%i\" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], neighbor_octet)\n        return our_ip_addr, neighbor_ip_addr\n    except IndexError:\n        LOGGER.critical('Function get_neighbor_ip IndexError ip_addr {item} cidr {cidr}'.format(item=ip_addr,\n                                                                                                cidr=cidr))\n        raise IndexError(\"You have entered invalid input, you must enter a ipv4 address\")", "docstring": "Function to figure out the IP's between neighbors address\nArgs:\nip_addr: Unicast IP address in the following format 192.168.1.1\ncidr: CIDR value of 30, or 31\n\nReturns: returns Our IP and the Neighbor IP in a tuple", "source": "juraj-google-style"}
{"code": "def CreateSharedBudget(client):\n  \n  \n  budget_service = client.GetService('BudgetService', version='v201809')\n\n  \n  budget = {\n      'name': 'Shared Interplanetary Budget \n      'amount': {\n          'microAmount': '2000000'\n      },\n      'deliveryMethod': 'STANDARD',\n      'isExplicitlyShared': 'true'\n  }\n\n  \n  operation = {\n      'operator': 'ADD',\n      'operand': budget\n  }\n\n  response = budget_service.mutate([operation])\n  return response['value'][0]", "docstring": "Creates an explicit budget to be used only to create the Campaign.\n\nArgs:\nclient: AdWordsClient the client to run the example with.\n\nReturns:\ndict An object representing a shared budget.", "source": "juraj-google-style"}
{"code": "def load(url_or_handle, cache=None, **kwargs):\n    \n\n    ext = get_extension(url_or_handle)\n    try:\n        loader = loaders[ext.lower()]\n        message = \"Using inferred loader '%s' due to passed file extension '%s'.\"\n        log.debug(message, loader.__name__[6:], ext)\n        return load_using_loader(url_or_handle, loader, cache, **kwargs)\n\n    except KeyError:\n\n        log.warning(\"Unknown extension '%s', attempting to load as image.\", ext)\n        try:\n            with read_handle(url_or_handle, cache=cache) as handle:\n                result = _load_img(handle)\n        except Exception as e:\n            message = \"Could not load resource %s as image. Supported extensions: %s\"\n            log.error(message, url_or_handle, list(loaders))\n            raise RuntimeError(message.format(url_or_handle, list(loaders)))\n        else:\n            log.info(\"Unknown extension '%s' successfully loaded as image.\", ext)\n            return result", "docstring": "Load a file.\n\nFile format is inferred from url. File retrieval strategy is inferred from\nURL. Returned object type is inferred from url extension.\n\nArgs:\nurl_or_handle: a (reachable) URL, or an already open file handle\n\nRaises:\nRuntimeError: If file extension or URL is not supported.", "source": "juraj-google-style"}
{"code": "def make_connection_with_forwarded_port(self, host_port, device_port, uid=UNKNOWN_UID, cmd=ConnectionHandshakeCommand.INIT):\n    self.host_port = host_port\n    self.device_port = device_port\n    self._counter = self._id_counter()\n    self.create_socket_connection()\n    self.send_handshake_request(uid, cmd)", "docstring": "Makes a connection to the server with the given forwarded port.\n\nThis process assumes that a device port has already been forwarded to a\nhost port, and it only makes a connection to the snippet server based on\nthe forwarded port. This is typically used by clients that share the same\nsnippet server, e.g. the snippet client and its event client.\n\nArgs:\nhost_port: int, the host port which has already been forwarded.\ndevice_port: int, the device port listened by the snippet server.\nuid: int, the uid of the server session to continue. It will be ignored\nif the `cmd` requires the server to create a new session.\ncmd: ConnectionHandshakeCommand, the handshake command Enum for the\nserver, which requires the server to create a new session or use the\ncurrent session.", "source": "github-repos"}
{"code": "def coord(self):\n    return self._coord", "docstring": "Return the Coordinator used by the Supervisor.\n\nThe Coordinator can be useful if you want to run multiple threads\nduring your training.\n\nReturns:\nA Coordinator object.", "source": "github-repos"}
{"code": "def insert(self, table, insert_obj, ignore=True):\n        \n\n        if isinstance(insert_obj, pd.DataFrame):\n            if insert_obj.empty:\n                raise ValueError('The input DataFrame is empty, please check!')\n            insert_obj = insert_obj.to_dict(orient='records')\n        elif not isinstance(insert_obj, list):\n            raise ValueError(\n                f\"The {reprlib.repr(insert_obj)} must be list of dicts type!\")\n\n        ignore_str = 'IGNORE' if ignore else ''\n        return self._session.execute(\n            table.__table__.insert().prefix_with(ignore_str), insert_obj)", "docstring": "[insert bulk data]\n\nArguments:\ntable {[DeclarativeMeta cls]} -- [reflection of table]\ninsert_obj {[pd.DataFrame or list of dicts]} -- [insert_obj]\n\nKeyword Arguments:\nignore {bool} -- [wether ignore exception or not] (default: {True})\n\nRaises:\nValueError -- [f\"The {reprlib.repr(insert_obj)} must be list of dicts type!\"]\n\nReturns:\n[type] -- [description]", "source": "juraj-google-style"}
{"code": "def __init__(self, action, error):\n        \n        logger.error(action, exc_info=error)\n\n        QDialog.__init__(self)\n        self.setWindowTitle(_('Autosave error'))\n        self.setModal(True)\n\n        layout = QVBoxLayout()\n\n        header = _('Error message:')\n        txt = '<br>{}<br><br>{}<br>{!s}'.format(action, header, error)\n        layout.addWidget(QLabel(txt))\n        layout.addSpacing(15)\n\n        txt = _(\"Hide all future autosave-related errors during this session\")\n        self.dismiss_box = QCheckBox(txt)\n        layout.addWidget(self.dismiss_box)\n        layout.addSpacing(15)\n\n        button_box = QDialogButtonBox(QDialogButtonBox.Ok)\n        button_box.accepted.connect(self.accept)\n        layout.addWidget(button_box)\n\n        self.setLayout(layout)", "docstring": "Constructor.\n\nArgs:\naction (str): what Spyder was trying to do when error occured\nerror (Exception): the error that occured", "source": "juraj-google-style"}
{"code": "def ConvertOutputToUnicode(self, buf):\n    if isinstance(buf, str):\n        buf = buf.encode(self._encoding)\n    return str(buf, self._encoding, 'replace')", "docstring": "Converts a console output string buf to unicode.\n\nMainly used for testing. Allows test comparisons in unicode while ensuring\nthat unicode => encoding => unicode works.\n\nArgs:\nbuf: The console output string to convert.\n\nReturns:\nThe console output string buf converted to unicode.", "source": "github-repos"}
{"code": "def __init__(self, dims):\n    \n    self._dims = [convert_to_dimension(d) for d in tuple(dims)]\n    if len(set(dims)) != len(dims):\n      raise ValueError(\"Shape must not have repeated dimensions %s\" % dims)", "docstring": "Constructs a shape for a Tensor or Mesh.\n\nArgs:\ndims: List-like of Dimensions.\n\nRaises:\nValueError: If Dimensions are repeated.", "source": "juraj-google-style"}
{"code": "def load_entity(self, name, file_name, reload_cache=False):\n        \n        Entity.verify_name(name)\n        self.entities.load(Entity.wrap_name(name), file_name, reload_cache)\n        with open(file_name) as f:\n            self.padaos.add_entity(name, f.read().split('\\n'))\n        self.must_train = True", "docstring": "Loads an entity, optionally checking the cache first\n\nArgs:\nname (str): The associated name of the entity\nfile_name (str): The location of the entity file\nreload_cache (bool): Whether to refresh all of cache", "source": "juraj-google-style"}
{"code": "def __init__(self, etk, cdr_document: Dict, mime_type, url, doc_id=None) -> None:\n\n        \n        Segment.__init__(self, json_path=\"$\", _value=cdr_document, _document=self)\n        self.etk = etk\n        self.cdr_document = cdr_document\n        self.mime_type = mime_type\n        self.url = url\n        if doc_id:\n            self.cdr_document[\"doc_id\"] = doc_id\n        self.extraction_provenance_records = list()\n        if self.etk.kg_schema:\n            self.kg = KnowledgeGraph(self.etk.kg_schema, self.etk.ontology, self)\n        else:\n            self.kg = None\n            if not self.etk.kg_schema:\n                self.etk.log(\"Schema not found.\", \"warning\", self.doc_id, self.url)\n        self._provenance_id_index = 0\n        self._provenances = dict()\n        self._jsonpath_provenances = dict()\n        self._kg_provenances = dict()", "docstring": "Wrapper object for CDR documents.\n\nArgs:\netk (ETK): embed the etk object so that docs have access to global info.\ncdr_document (JSON): the raw CDR document received in ETK.\n\nReturns: the wrapped CDR document", "source": "juraj-google-style"}
{"code": "def auth(self, token):\n    t = self.sendToken(token)\n    return self.getToken(t)", "docstring": "Take an existing Skype token and refresh it, to extend the expiry time without other credentials.\n\nArgs:\ntoken (str): existing Skype token\n\nReturns:\n(str, datetime.datetime) tuple: Skype token, and associated expiry if known\n\nRaises:\n.SkypeAuthException: if the login request is rejected\n.SkypeApiException: if the login form can't be processed", "source": "codesearchnet"}
{"code": "def _snapshot_task_progresses(self) -> Iterable[_pywrap_server_lib.SnapshotTaskProgressWrapper]:\n    return self._server.snapshot_task_progresses()", "docstring": "Returns the progresses of the snapshot tasks currently being executed.\n\nReturns:\nAn `Iterable[common_pb2.SnapshotTaskProgress]`.", "source": "github-repos"}
{"code": "def make_iterable(value):\n    if (sys.version_info <= (3, 0)):\n        if isinstance(value, unicode):\n            value = str(value)\n    if (isinstance(value, str) or isinstance(value, dict)):\n        value = [value]\n    if (not isinstance(value, collections.Iterable)):\n        raise TypeError('value must be an iterable object')\n    return value", "docstring": "Converts the supplied value to a list object\n\nThis function will inspect the supplied value and return an\niterable in the form of a list.\n\nArgs:\nvalue (object): An valid Python object\n\nReturns:\nAn iterable object of type list", "source": "codesearchnet"}
{"code": "def hex(self):\n    return '", "docstring": "Returns the HTML-style hex code for the Colour.\n\nReturns:\nstr: the colour as a HTML-sytle hex string", "source": "codesearchnet"}
{"code": "def fastrcnn_predictions(boxes, scores):\n    assert (boxes.shape[1] == cfg.DATA.NUM_CLASS)\n    assert (scores.shape[1] == cfg.DATA.NUM_CLASS)\n    boxes = tf.transpose(boxes, [1, 0, 2])[(1:, :, :)]\n    scores = tf.transpose(scores[(:, 1:)], [1, 0])\n\n    def f(X):\n        '\\n        prob: n probabilities\\n        box: nx4 boxes\\n\\n        Returns: n boolean, the selection\\n        '\n        (prob, box) = X\n        output_shape = tf.shape(prob, out_type=tf.int64)\n        ids = tf.reshape(tf.where((prob > cfg.TEST.RESULT_SCORE_THRESH)), [(- 1)])\n        prob = tf.gather(prob, ids)\n        box = tf.gather(box, ids)\n        selection = tf.image.non_max_suppression(box, prob, cfg.TEST.RESULTS_PER_IM, cfg.TEST.FRCNN_NMS_THRESH)\n        selection = tf.gather(ids, selection)\n        if (get_tf_version_tuple() >= (1, 13)):\n            sorted_selection = tf.sort(selection, direction='ASCENDING')\n            mask = tf.sparse.SparseTensor(indices=tf.expand_dims(sorted_selection, 1), values=tf.ones_like(sorted_selection, dtype=tf.bool), dense_shape=output_shape)\n            mask = tf.sparse.to_dense(mask, default_value=False)\n        else:\n            sorted_selection = (- tf.nn.top_k((- selection), k=tf.size(selection))[0])\n            mask = tf.sparse_to_dense(sparse_indices=sorted_selection, output_shape=output_shape, sparse_values=True, default_value=False)\n        return mask\n    buggy_tf = (get_tf_version_tuple() in [(1, 11), (1, 12)])\n    masks = tf.map_fn(f, (scores, boxes), dtype=tf.bool, parallel_iterations=(1 if buggy_tf else 10))\n    selected_indices = tf.where(masks)\n    scores = tf.boolean_mask(scores, masks)\n    (topk_scores, topk_indices) = tf.nn.top_k(scores, tf.minimum(cfg.TEST.RESULTS_PER_IM, tf.size(scores)), sorted=False)\n    filtered_selection = tf.gather(selected_indices, topk_indices)\n    (cat_ids, box_ids) = tf.unstack(filtered_selection, axis=1)\n    final_scores = tf.identity(topk_scores, name='scores')\n    final_labels = tf.add(cat_ids, 1, name='labels')\n    final_ids = tf.stack([cat_ids, box_ids], axis=1, name='all_ids')\n    final_boxes = tf.gather_nd(boxes, final_ids, name='boxes')\n    return (final_boxes, final_scores, final_labels)", "docstring": "Generate final results from predictions of all proposals.\n\nArgs:\nboxes: n#classx4 floatbox in float32\nscores: nx#class\n\nReturns:\nboxes: Kx4\nscores: K\nlabels: K", "source": "codesearchnet"}
{"code": "def dequantize(arr, min_val, max_val, levels, dtype=np.float64):\n    \n    if not (isinstance(levels, int) and levels > 1):\n        raise ValueError(\n            'levels must be a positive integer, but got {}'.format(levels))\n    if min_val >= max_val:\n        raise ValueError(\n            'min_val ({}) must be smaller than max_val ({})'.format(\n                min_val, max_val))\n\n    dequantized_arr = (arr + 0.5).astype(dtype) * (\n        max_val - min_val) / levels + min_val\n\n    return dequantized_arr", "docstring": "Dequantize an array.\n\nArgs:\narr (ndarray): Input array.\nmin_val (scalar): Minimum value to be clipped.\nmax_val (scalar): Maximum value to be clipped.\nlevels (int): Quantization levels.\ndtype (np.type): The type of the dequantized array.\n\nReturns:\ntuple: Dequantized array.", "source": "juraj-google-style"}
{"code": "def _prefix_from_prefix_string(self, prefixlen_str):\n    try:\n        if (not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str)):\n            raise ValueError\n        prefixlen = int(prefixlen_str)\n        if (not (0 <= prefixlen <= self._max_prefixlen)):\n            raise ValueError\n    except ValueError:\n        raise NetmaskValueError(('%s is not a valid prefix length' % prefixlen_str))\n    return prefixlen", "docstring": "Turn a prefix length string into an integer.\n\nArgs:\nprefixlen_str: A decimal string containing the prefix length.\n\nReturns:\nThe prefix length as an integer.\n\nRaises:\nNetmaskValueError: If the input is malformed or out of range.", "source": "codesearchnet"}
{"code": "def date_clean(date, dashboard_style=False):\n  \n  if dashboard_style:\n    dt = str(date)\n    out = dt[4:6] + '/' + dt[6:] + '/' + dt[:4]\n  else:\n    dt = str(date)\n    out = dt[:4] + '-' + dt[4:6] + '-' + dt[6:]\n  return out", "docstring": "Clean the numerical date value in order to present it.\n\nArgs:\nboo: numerical date (20160205)\nReturns:\nStringified version of the input date (\"2016-02-05\")", "source": "juraj-google-style"}
{"code": "def _process_parameter_type(param, param_name, func):\n    optional = False\n    if param.annotation != inspect.Parameter.empty:\n        param_type = param.annotation\n        if 'typing' in str(param_type):\n            param_type = ''.join(str(param_type).split('typing.')).replace('transformers.', '~')\n        elif hasattr(param_type, '__module__'):\n            param_type = f'{param_type.__module__.replace('transformers.', '~').replace('builtins', '')}.{param.annotation.__name__}'\n            if param_type[0] == '.':\n                param_type = param_type[1:]\n        elif False:\n            print(f'🚨 {param_type} for {param_name} of {func.__qualname__} in file {func.__code__.co_filename} has an invalid type')\n        if 'ForwardRef' in param_type:\n            param_type = re.sub(\"ForwardRef\\\\('([\\\\w.]+)'\\\\)\", '\\\\1', param_type)\n        if 'Optional' in param_type:\n            param_type = re.sub('Optional\\\\[(.*?)\\\\]', '\\\\1', param_type)\n            optional = True\n    else:\n        param_type = ''\n    return (param_type, optional)", "docstring": "Process and format a parameter's type annotation.\n\nArgs:\nparam (`inspect.Parameter`): The parameter from the function signature\nparam_name (`str`): The name of the parameter\nfunc (`function`): The function the parameter belongs to", "source": "github-repos"}
{"code": "def _openfile(instance, filething, filename, fileobj, writable, create):\n    assert ((not create) or writable)\n    if isinstance(filething, FileThing):\n        filename = filething.filename\n        fileobj = filething.fileobj\n        filething = None\n    if (filething is not None):\n        if is_fileobj(filething):\n            fileobj = filething\n        elif hasattr(filething, '__fspath__'):\n            filename = filething.__fspath__()\n            if (not isinstance(filename, (bytes, text_type))):\n                raise TypeError('expected __fspath__() to return a filename')\n        else:\n            filename = filething\n    if (instance is not None):\n        if (not writable):\n            instance.filename = filename\n        elif (filename is None):\n            filename = getattr(instance, 'filename', None)\n    if (fileobj is not None):\n        verify_fileobj(fileobj, writable=writable)\n        (yield FileThing(fileobj, filename, (filename or fileobj_name(fileobj))))\n    elif (filename is not None):\n        verify_filename(filename)\n        inmemory_fileobj = False\n        try:\n            fileobj = open(filename, ('rb+' if writable else 'rb'))\n        except IOError as e:\n            if (writable and (e.errno == errno.EOPNOTSUPP)):\n                try:\n                    with open(filename, 'rb') as fileobj:\n                        fileobj = BytesIO(fileobj.read())\n                except IOError as e2:\n                    raise MutagenError(e2)\n                inmemory_fileobj = True\n            elif (create and (e.errno == errno.ENOENT)):\n                assert writable\n                try:\n                    fileobj = open(filename, 'wb+')\n                except IOError as e2:\n                    raise MutagenError(e2)\n            else:\n                raise MutagenError(e)\n        with fileobj as fileobj:\n            (yield FileThing(fileobj, filename, filename))\n            if inmemory_fileobj:\n                assert writable\n                data = fileobj.getvalue()\n                try:\n                    with open(filename, 'wb') as fileobj:\n                        fileobj.write(data)\n                except IOError as e:\n                    raise MutagenError(e)\n    else:\n        raise TypeError('Missing filename or fileobj argument')", "docstring": "yields a FileThing\n\nArgs:\nfilething: Either a file name, a file object or None\nfilename: Either a file name or None\nfileobj: Either a file object or None\nwritable (bool): if the file should be opened\ncreate (bool): if the file should be created if it doesn't exist.\nimplies writable\nRaises:\nMutagenError: In case opening the file failed\nTypeError: in case neither a file name or a file object is passed", "source": "codesearchnet"}
{"code": "def write_file(self, path, contents):\n\t\t\n\t\tpath = self._get_dist_path(path)\n\t\tif not os.path.isdir(os.path.dirname(path)):\n\t\t\tos.makedirs(os.path.dirname(path))\n\t\tif isinstance(contents, bytes):\n\t\t\tmode = 'wb+'\n\t\telse:\n\t\t\tmode = 'w'\n\t\twith open(path, mode) as file:\n\t\t\tfile.write(contents)", "docstring": "Write a file of any type to the destination path. Useful for files like\nrobots.txt, manifest.json, and so on.\n\nArgs:\npath (str): The name of the file to write to.\ncontents (str or bytes): The contents to write.", "source": "juraj-google-style"}
{"code": "def to_json(self):\n    return {'lat': self.lat, 'lon': self.lon, 'time': (self.time.isoformat() if (self.time is not None) else None)}", "docstring": "Creates a JSON serializable representation of this instance\n\nReturns:\n:obj:`dict`: For example,\n{\n\"lat\": 9.3470298,\n\"lon\": 3.79274,\n\"time\": \"2016-07-15T15:27:53.574110\"\n}", "source": "codesearchnet"}
{"code": "def remove(package_name):\n    if (package_name not in packages):\n        raise HolodeckException(('Unknown package name ' + package_name))\n    for (config, path) in _iter_packages():\n        if (config['name'] == package_name):\n            shutil.rmtree(path)", "docstring": "Removes a holodeck package.\n\nArgs:\npackage_name (str): the name of the package to remove", "source": "codesearchnet"}
{"code": "def pack_small_tensors(tower_grads, max_bytes=0):\n    assert (max_bytes >= 0)\n    orig_grads = [g for (g, _) in tower_grads[0]]\n    assert all(((g.dtype == tf.float32) for g in orig_grads))\n    sizes = [(4 * g.shape.num_elements()) for g in orig_grads]\n    print_stats(sizes)\n    small_ranges = []\n    large_indices = []\n    new_sizes = []\n\n    def end_interval(indices, small_ranges, large_indices):\n        if (len(indices) > 1):\n            small_ranges.insert(0, [indices[0], indices[(- 1)]])\n        else:\n            large_indices.insert(0, indices[0])\n    cur_range = []\n    cur_size = 0\n    for (i, s) in reversed(list(enumerate(sizes))):\n        if (cur_size > max_bytes):\n            end_interval(cur_range, small_ranges, large_indices)\n            new_sizes.insert(0, cur_size)\n            cur_range = []\n            cur_size = 0\n        cur_range.insert(0, i)\n        cur_size += s\n    end_interval(cur_range, small_ranges, large_indices)\n    new_sizes.insert(0, cur_size)\n    print_stats(new_sizes)\n    num_gv = len(orig_grads)\n    packing = {}\n    if len(small_ranges):\n        new_tower_grads = []\n        for (dev_idx, gv_list) in enumerate(tower_grads):\n            assert (len(gv_list) == num_gv), \"Possible cause: Networks constructed on different workers don't have the same number of variables. If you use tf.GraphKeys or tf.global_variables() with multiple graphs per worker during network construction, you need to use appropriate scopes, see https:\n            new_gv_list = []\n            for r in small_ranges:\n                key = ('%d:%d' % (dev_idx, len(new_gv_list)))\n                new_gv_list.append((pack_range(key, packing, gv_list, r), 'packing_var_placeholder'))\n            for i in large_indices:\n                new_gv_list.append(gv_list[i])\n            new_tower_grads.append(new_gv_list)\n        return (new_tower_grads, packing)\n    else:\n        return (tower_grads, None)", "docstring": "Concatenate gradients together more intelligently.\n\nDoes binpacking\nArgs:\ntower_grads: List of lists of (gradient, variable) tuples.\nmax_bytes: Int giving max number of bytes in a tensor that\nmay be considered small.", "source": "codesearchnet"}
{"code": "def format_sympy_expr(sympy_expr, functions=None):\n  \n  if functions is None:\n    functions = {}\n  str_expr = str(sympy_expr)\n  result = str_expr.replace(\" \", \"\")\n  for fn_name, char in six.iteritems(functions):\n    result = result.replace(fn_name, char)\n  return result", "docstring": "Convert sympy expression into a string which can be encoded.\n\nArgs:\nsympy_expr: Any sympy expression tree or string.\nfunctions: Defines special functions. A dict mapping human readable string\nnames, like \"log\", \"exp\", \"sin\", \"cos\", etc., to single chars. Each\nfunction gets a unique token, like \"L\" for \"log\".\n\nReturns:\nA string representation of the expression suitable for encoding as a\nsequence input.", "source": "juraj-google-style"}
{"code": "def WinChmod(filename, acl_list, user=None):\n  \n  if user is None:\n    user = win32api.GetUserName()\n\n  if not os.path.exists(filename):\n    raise RuntimeError(\"filename %s does not exist\" % filename)\n\n  acl_bitmask = 0\n  for acl in acl_list:\n    acl_bitmask |= getattr(ntsecuritycon, acl)\n\n  dacl = win32security.ACL()\n  win_user, _, _ = win32security.LookupAccountName(\"\", user)\n\n  dacl.AddAccessAllowedAce(win32security.ACL_REVISION, acl_bitmask, win_user)\n\n  security_descriptor = win32security.GetFileSecurity(\n      filename, win32security.DACL_SECURITY_INFORMATION)\n\n  \n  security_descriptor.SetSecurityDescriptorDacl(DACL_PRESENT, dacl,\n                                                DACL_DEFAULT)\n  win32security.SetFileSecurity(\n      filename, win32security.DACL_SECURITY_INFORMATION, security_descriptor)", "docstring": "Provide chmod-like functionality for windows.\n\nDoco links:\ngoo.gl/n7YR1\ngoo.gl/rDv81\ngoo.gl/hDobb\n\nArgs:\nfilename: target filename for acl\n\nacl_list: list of ntsecuritycon acl strings to be applied with bitwise OR.\ne.g. [\"FILE_GENERIC_READ\", \"FILE_GENERIC_WRITE\"]\n\nuser: username string. If not specified we use the user we are running as.\n\nRaises:\nAttributeError: if a bad permission is passed\nRuntimeError: if filename doesn't exist", "source": "juraj-google-style"}
{"code": "def save_aggregate_report_to_elasticsearch(aggregate_report,\n                                           index_suffix=None,\n                                           monthly_indexes=False):\n    \n    logger.debug(\"Saving aggregate report to Elasticsearch\")\n    aggregate_report = aggregate_report.copy()\n    metadata = aggregate_report[\"report_metadata\"]\n    org_name = metadata[\"org_name\"]\n    report_id = metadata[\"report_id\"]\n    domain = aggregate_report[\"policy_published\"][\"domain\"]\n    begin_date = human_timestamp_to_datetime(metadata[\"begin_date\"])\n    end_date = human_timestamp_to_datetime(metadata[\"end_date\"])\n    begin_date_human = begin_date.strftime(\"%Y-%m-%d %H:%M:%S\")\n    end_date_human = end_date.strftime(\"%Y-%m-%d %H:%M:%S\")\n    if monthly_indexes:\n        index_date = begin_date.strftime(\"%Y-%m\")\n    else:\n        index_date = begin_date.strftime(\"%Y-%m-%d\")\n    aggregate_report[\"begin_date\"] = begin_date\n    aggregate_report[\"end_date\"] = end_date\n    date_range = [aggregate_report[\"begin_date\"],\n                  aggregate_report[\"end_date\"]]\n\n    org_name_query = Q(dict(match=dict(org_name=org_name)))\n    report_id_query = Q(dict(match=dict(report_id=report_id)))\n    domain_query = Q(dict(match={\"published_policy.domain\": domain}))\n    begin_date_query = Q(dict(match=dict(date_range=begin_date)))\n    end_date_query = Q(dict(match=dict(date_range=end_date)))\n\n    search = Search(index=\"dmarc_aggregate*\")\n    query = org_name_query & report_id_query & domain_query\n    query = query & begin_date_query & end_date_query\n    search.query = query\n\n    existing = search.execute()\n    if len(existing) > 0:\n        raise AlreadySaved(\"An aggregate report ID {0} from {1} about {2} \"\n                           \"with a date range of {3} UTC to {4} UTC already \"\n                           \"exists in \"\n                           \"Elasticsearch\".format(report_id,\n                                                  org_name,\n                                                  domain,\n                                                  begin_date_human,\n                                                  end_date_human))\n    published_policy = _PublishedPolicy(\n        domain=aggregate_report[\"policy_published\"][\"domain\"],\n        adkim=aggregate_report[\"policy_published\"][\"adkim\"],\n        aspf=aggregate_report[\"policy_published\"][\"aspf\"],\n        p=aggregate_report[\"policy_published\"][\"p\"],\n        sp=aggregate_report[\"policy_published\"][\"sp\"],\n        pct=aggregate_report[\"policy_published\"][\"pct\"],\n        fo=aggregate_report[\"policy_published\"][\"fo\"]\n    )\n\n    for record in aggregate_report[\"records\"]:\n        agg_doc = _AggregateReportDoc(\n            xml_schemea=aggregate_report[\"xml_schema\"],\n            org_name=metadata[\"org_name\"],\n            org_email=metadata[\"org_email\"],\n            org_extra_contact_info=metadata[\"org_extra_contact_info\"],\n            report_id=metadata[\"report_id\"],\n            date_range=date_range,\n            errors=metadata[\"errors\"],\n            published_policy=published_policy,\n            source_ip_address=record[\"source\"][\"ip_address\"],\n            source_country=record[\"source\"][\"country\"],\n            source_reverse_dns=record[\"source\"][\"reverse_dns\"],\n            source_base_domain=record[\"source\"][\"base_domain\"],\n            message_count=record[\"count\"],\n            disposition=record[\"policy_evaluated\"][\"disposition\"],\n            dkim_aligned=record[\"policy_evaluated\"][\"dkim\"] == \"pass\",\n            spf_aligned=record[\"policy_evaluated\"][\"spf\"] == \"pass\",\n            header_from=record[\"identifiers\"][\"header_from\"],\n            envelope_from=record[\"identifiers\"][\"envelope_from\"],\n            envelope_to=record[\"identifiers\"][\"envelope_to\"]\n        )\n\n        for override in record[\"policy_evaluated\"][\"policy_override_reasons\"]:\n            agg_doc.add_policy_override(type_=override[\"type\"],\n                                        comment=override[\"comment\"])\n\n        for dkim_result in record[\"auth_results\"][\"dkim\"]:\n            agg_doc.add_dkim_result(domain=dkim_result[\"domain\"],\n                                    selector=dkim_result[\"selector\"],\n                                    result=dkim_result[\"result\"])\n\n        for spf_result in record[\"auth_results\"][\"spf\"]:\n            agg_doc.add_spf_result(domain=spf_result[\"domain\"],\n                                   scope=spf_result[\"scope\"],\n                                   result=spf_result[\"result\"])\n\n        index = \"dmarc_aggregate\"\n        if index_suffix:\n            index = \"{0}_{1}\".format(index, index_suffix)\n        index = \"{0}-{1}\".format(index, index_date)\n        create_indexes([index])\n        agg_doc.meta.index = index\n\n        try:\n            agg_doc.save()\n        except Exception as e:\n            raise ElasticsearchError(\n                \"Elasticsearch error: {0}\".format(e.__str__()))", "docstring": "Saves a parsed DMARC aggregate report to ElasticSearch\n\nArgs:\naggregate_report (OrderedDict): A parsed forensic report\nindex_suffix (str): The suffix of the name of the index to save to\nmonthly_indexes (bool): Use monthly indexes instead of daily indexes\n\nRaises:\nAlreadySaved", "source": "juraj-google-style"}
{"code": "def load_all_yamls(cls, directories):\n    yaml_files = []\n    loaded_yamls = {}\n    for d in directories:\n        if (d.startswith('/home') and (not os.path.exists(d))):\n            os.makedirs(d)\n        for (dirname, subdirs, files) in os.walk(d):\n            yaml_files.extend(map((lambda x: os.path.join(dirname, x)), filter((lambda x: x.endswith('.yaml')), files)))\n    for f in yaml_files:\n        loaded_yamls[f] = cls.load_yaml_by_path(f)\n    return loaded_yamls", "docstring": "Loads yaml files from all given directories.\n\nArgs:\ndirectories: list of directories to search\nReturns:\ndict of {fullpath: loaded_yaml_structure}", "source": "codesearchnet"}
{"code": "def DeleteSignedBinary(binary_urn, token=None):\n    if _ShouldUseLegacyDatastore():\n        try:\n            aff4.FACTORY.Open(binary_urn, aff4_type=aff4.AFF4Stream, mode='r', token=token)\n        except aff4.InstantiationError:\n            raise SignedBinaryNotFoundError(binary_urn)\n        aff4.FACTORY.Delete(binary_urn, token=token)\n    if data_store.RelationalDBEnabled():\n        try:\n            data_store.REL_DB.ReadSignedBinaryReferences(_SignedBinaryIDFromURN(binary_urn))\n        except db.UnknownSignedBinaryError:\n            if _ShouldUseLegacyDatastore():\n                return\n            else:\n                raise SignedBinaryNotFoundError(binary_urn)\n        data_store.REL_DB.DeleteSignedBinaryReferences(_SignedBinaryIDFromURN(binary_urn))", "docstring": "Deletes the binary with the given urn from the datastore.\n\nArgs:\nbinary_urn: RDFURN that serves as a unique identifier for the binary.\ntoken: ACL token to use with the legacy (non-relational) datastore.\n\nRaises:\nSignedBinaryNotFoundError: If the signed binary does not exist.", "source": "codesearchnet"}
{"code": "def open_file(cls, filename: str, response: BaseResponse, mode='wb+'):\n        \n        _logger.debug('Saving file to {0}, mode={1}.',\n                      filename, mode)\n\n        dir_path = os.path.dirname(filename)\n        if dir_path and not os.path.exists(dir_path):\n            os.makedirs(dir_path)\n\n        response.body = Body(open(filename, mode))", "docstring": "Open a file object on to the Response Body.\n\nArgs:\nfilename: The path where the file is to be saved\nresponse: Response\nmode: The file mode\n\nThis function will create the directories if not exist.", "source": "juraj-google-style"}
{"code": "def _create_keras_history_helper(tensors, processed_ops, created_layers):\n    if ops.executing_eagerly_outside_functions():\n        raise ValueError('`create_keras_history` should only be called if eager is disabled!')\n    from tensorflow.python.keras.engine import base_layer\n    tensor_list = nest.flatten(tensors)\n    sparse_ops = []\n    ragged_tensors = []\n    for tensor in tensor_list:\n        if getattr(tensor, '_keras_history', None) is not None:\n            continue\n        if isinstance(tensor, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n            sparse_ops.append(tensor.op)\n            continue\n        if tf_utils.is_ragged(tensor):\n            ragged_tensors.append(tensor)\n            continue\n        op = tensor.op\n        if op not in processed_ops:\n            op_inputs = list(op.inputs)\n            constants = {}\n            layer_inputs = []\n            for i, op_input in enumerate(op_inputs):\n                if uses_keras_history(op_input):\n                    layer_inputs.append(op_input)\n                else:\n                    ds_with_session = distribute_lib.in_cross_replica_context() and (not ops.executing_eagerly_outside_functions())\n                    using_xla = control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph())\n                    if ds_with_session or using_xla or _UNSAFE_GRAPH_OP_LAYER_CREATION:\n                        constants[i] = op_input\n                    else:\n                        with ops.init_scope():\n                            constants[i] = backend.function([], op_input)([])\n            layer_inputs = unnest_if_single_tensor(layer_inputs)\n            processed_ops, created_layers = _create_keras_history_helper(layer_inputs, processed_ops, created_layers)\n            name = op.name\n            node_def = op.node_def.SerializeToString()\n            op_layer = base_layer.TensorFlowOpLayer(node_def, constants=constants, name=name)\n            created_layers.append(op_layer)\n            op_layer._set_connectivity_metadata(args=(layer_inputs,), kwargs={}, outputs=op.outputs)\n            processed_ops.update([op])\n    if sparse_ops or ragged_tensors:\n        lambda_example = '\\n    weights_mult = lambda x: tf.sparse.sparse_dense_matmul(x, weights)\\n    output = tf.keras.layers.Lambda(weights_mult)(input)\\n    '\n        raise ValueError('Tensorflow ops that generate ragged or sparse tensor outputs are currently not supported by Keras automatic op wrapping. Please wrap these ops in a Lambda layer: \\n\\n```\\n{example}\\n```\\nSparse ops encountered: {sparse_ops}\\nRagged tensors encountered: {ragged_tensors}\\n'.format(example=lambda_example, sparse_ops=str(sparse_ops), ragged_tensors=str(ragged_tensors)))\n    return (processed_ops, created_layers)", "docstring": "Helper method for `create_keras_history`.\n\nArgs:\ntensors: A structure of Tensors for which to create Keras metadata.\nprocessed_ops: Set. TensorFlow operations that have already been wrapped in\n`TensorFlowOpLayer` instances.\ncreated_layers: List. The `TensorFlowOpLayer` instances created.\n\nReturns:\nTuple. First element is the updated set of TensorFlow Operations that\nhave been wrapped in `TensorFlowOpLayer` instances. Second element is\na list of the `TensorFlowOpLayer` instances created.", "source": "github-repos"}
{"code": "def infer_tests_to_run(output_file: str, diff_with_last_commit: bool=False, filter_models: bool=False, test_all: bool=False):\n    if not test_all:\n        modified_files = get_modified_python_files(diff_with_last_commit=diff_with_last_commit)\n    else:\n        modified_files = [str(k) for k in PATH_TO_TESTS.glob('*test_**.py', recursive=True) + glob.glob('examples*.py', recursive=True)\n        if len(model_impacted) >= NUM_MODELS_TO_TRIGGER_FULL_CI and filter_models:\n            print(f'More than {NUM_MODELS_TO_TRIGGER_FULL_CI - 1} models are impacted and `filter_models=False`. CI is configured to test everything.')\n    else:\n        test_files_to_run = [f for f in modified_files if f.startswith('tests') and '/test_' in f]\n        impacted_files = get_impacted_files_from_tiny_model_summary(diff_with_last_commit=diff_with_last_commit)\n        test_map = create_module_to_test_map(reverse_map=reverse_map, filter_models=filter_models)\n        for f in modified_files + impacted_files:\n            if f in test_map:\n                test_files_to_run.extend(test_map[f])\n        test_files_to_run = sorted(set(test_files_to_run))\n        test_files_to_run = [f for f in test_files_to_run if not f.split(os.path.sep)[1] == 'repo_utils']\n        test_files_to_run = [f for f in test_files_to_run if not f.split(os.path.sep)[1] == 'sagemaker']\n        test_files_to_run = [f for f in test_files_to_run if (PATH_TO_REPO / f).exists()]\n    print(f'\\n\n    create_test_list_from_filter(test_files_to_run, out_path='test_preparation/')\n    doctest_list = get_doctest_files()\n    print(f'\\n\n    if len(doctest_list) > 0:\n        doctest_file = Path(output_file).parent / 'doctest_list.txt'\n        with open(doctest_file, 'w', encoding='utf-8') as f:\n            f.write(' '.join(doctest_list))", "docstring": "The main function called by the test fetcher. Determines the tests to run from the diff.\n\nArgs:\noutput_file (`str`):\nThe path where to store the summary of the test fetcher analysis. Other files will be stored in the same\nfolder:\n\n- examples_test_list.txt: The list of examples tests to run.\n- test_repo_utils.txt: Will indicate if the repo utils tests should be run or not.\n- doctest_list.txt: The list of doctests to run.\n\ndiff_with_last_commit (`bool`, *optional*, defaults to `False`):\nWhether to analyze the diff with the last commit (for use on the main branch after a PR is merged) or with\nthe branching point from main (for use on each PR).\nfilter_models (`bool`, *optional*, defaults to `True`):\nWhether or not to filter the tests to core models only, when a file modified results in a lot of model\ntests.", "source": "github-repos"}
{"code": "def remove_objects_from_args(args: Iterable[Any], kwargs: Dict[str, Any], pvalue_class: Union[Type[T], Tuple[Type[T], ...]]) -> Tuple[List[Any], Dict[str, Any], List[T]]:\n    pvals = []\n\n    def swapper(value):\n        pvals.append(value)\n        return ArgumentPlaceholder()\n    new_args = [swapper(v) if isinstance(v, pvalue_class) else v for v in args]\n    new_kwargs = dict(((k, swapper(v)) if isinstance(v, pvalue_class) else (k, v) for k, v in sorted(kwargs.items())))\n    return (new_args, new_kwargs, pvals)", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\nReplaces all objects of a given type in args/kwargs with a placeholder.\n\nArgs:\nargs: A list of positional arguments.\nkwargs: A dictionary of keyword arguments.\npvalue_class: A class object representing the types of arguments that must\nbe replaced with a placeholder value (instance of ArgumentPlaceholder).\n\nReturns:\nA 3-tuple containing a modified list of positional arguments, a modified\ndictionary of keyword arguments, and a list of all objects replaced with\na placeholder value.", "source": "github-repos"}
{"code": "def load_transcripts(adapter, transcripts_lines=None, build='37', ensembl_genes=None):\n    ensembl_genes = (ensembl_genes or adapter.ensembl_genes(build))\n    if (transcripts_lines is None):\n        transcripts_lines = fetch_ensembl_transcripts(build=build)\n    transcripts_dict = parse_transcripts(transcripts_lines)\n    for ens_tx_id in list(transcripts_dict):\n        parsed_tx = transcripts_dict[ens_tx_id]\n        ens_gene_id = parsed_tx['ensembl_gene_id']\n        gene_obj = ensembl_genes.get(ens_gene_id)\n        if (not gene_obj):\n            transcripts_dict.pop(ens_tx_id)\n            LOG.debug('Gene %s does not exist in build %s', ens_gene_id, build)\n            continue\n        parsed_tx['hgnc_id'] = gene_obj['hgnc_id']\n        parsed_tx['primary_transcripts'] = set(gene_obj.get('primary_transcripts', []))\n    ref_seq_transcripts = 0\n    nr_primary_transcripts = 0\n    nr_transcripts = len(transcripts_dict)\n    transcript_objs = []\n    with progressbar(transcripts_dict.values(), label='Building transcripts', length=nr_transcripts) as bar:\n        for tx_data in bar:\n            tx_data['is_primary'] = False\n            primary_transcripts = tx_data['primary_transcripts']\n            refseq_identifier = None\n            refseq_identifiers = []\n            for category in TRANSCRIPT_CATEGORIES:\n                identifiers = tx_data[category]\n                if (not identifiers):\n                    continue\n                for refseq_id in identifiers:\n                    refseq_identifiers.append(refseq_id)\n                    ref_seq_transcripts += 1\n                    if (refseq_id in primary_transcripts):\n                        refseq_identifier = refseq_id\n                        tx_data['is_primary'] = True\n                        nr_primary_transcripts += 1\n                    if (not refseq_identifier):\n                        refseq_identifier = refseq_id\n            if refseq_identifier:\n                tx_data['refseq_id'] = refseq_identifier\n            if refseq_identifiers:\n                tx_data['refseq_identifiers'] = refseq_identifiers\n            tx_obj = build_transcript(tx_data, build)\n            transcript_objs.append(tx_obj)\n    LOG.info('Loading transcripts...')\n    if (len(transcript_objs) > 0):\n        adapter.load_transcript_bulk(transcript_objs)\n    LOG.info('Number of transcripts in build %s: %s', build, nr_transcripts)\n    LOG.info('Number of transcripts with refseq identifier: %s', ref_seq_transcripts)\n    LOG.info('Number of primary transcripts: %s', nr_primary_transcripts)\n    return transcript_objs", "docstring": "Load all the transcripts\n\nTranscript information is from ensembl.\n\nArgs:\nadapter(MongoAdapter)\ntranscripts_lines(iterable): iterable with ensembl transcript lines\nbuild(str)\nensembl_genes(dict): Map from ensembl_id -> HgncGene\n\nReturns:\ntranscript_objs(list): A list with all transcript objects", "source": "codesearchnet"}
{"code": "def start_dag(self, dag, *, data=None):\n    return self._client.send(Request(action='start_dag', payload={'name': (dag.name if isinstance(dag, Dag) else dag), 'data': (data if isinstance(data, MultiTaskData) else None)})).payload['dag_name']", "docstring": "Schedule the execution of a dag by sending a signal to the workflow.\n\nArgs:\ndag (Dag, str): The dag object or the name of the dag that should be started.\ndata (MultiTaskData): The data that should be passed on to the new dag.\n\nReturns:\nstr: The name of the successfully started dag.", "source": "codesearchnet"}
{"code": "def _pick_or_create_inserted_op_moment_index(self, splitter_index: int, op: ops.Operation, strategy: InsertStrategy) -> int:\n    if ((strategy is InsertStrategy.NEW) or (strategy is InsertStrategy.NEW_THEN_INLINE)):\n        self._moments.insert(splitter_index, ops.Moment())\n        return splitter_index\n    if (strategy is InsertStrategy.INLINE):\n        if ((0 <= (splitter_index - 1) < len(self._moments)) and self._can_add_op_at((splitter_index - 1), op)):\n            return (splitter_index - 1)\n        return self._pick_or_create_inserted_op_moment_index(splitter_index, op, InsertStrategy.NEW)\n    if (strategy is InsertStrategy.EARLIEST):\n        if self._can_add_op_at(splitter_index, op):\n            p = self._prev_moment_available(op, splitter_index)\n            return (p or 0)\n        return self._pick_or_create_inserted_op_moment_index(splitter_index, op, InsertStrategy.INLINE)\n    raise ValueError('Unrecognized append strategy: {}'.format(strategy))", "docstring": "Determines and prepares where an insertion will occur.\n\nArgs:\nsplitter_index: The index to insert at.\nop: The operation that will be inserted.\nstrategy: The insertion strategy.\n\nReturns:\nThe index of the (possibly new) moment where the insertion should\noccur.\n\nRaises:\nValueError: Unrecognized append strategy.", "source": "codesearchnet"}
{"code": "def write(self, b):\n    self._checkClosed()\n    self._uploader.put(b)\n    bytes_written = len(b)\n    self._position += bytes_written\n    return bytes_written", "docstring": "Write bytes from b.\n\nReturns number of bytes written (<= len(b)).\n\nArgs:\nb: (memoryview) Buffer with data to write.", "source": "github-repos"}
{"code": "def instantiate_references_json(references_json):\n    \n\n    \n    references = {}\n    for obj in references_json:\n        obj_id = obj['id']\n        obj_type = obj.get('subtype', obj['type'])\n\n        cls = get_class(obj_type)\n        instance = cls.__new__(cls, id=obj_id)\n        if instance is None:\n            raise RuntimeError('Error loading model from JSON (type: %s, id: %s)' % (obj_type, obj_id))\n        references[instance.id] = instance\n\n    return references", "docstring": "Given a JSON representation of all the models in a graph, return a\ndict of new model objects.\n\nArgs:\nreferences_json (``JSON``)\nJSON specifying new Bokeh models to create\n\nReturns:\ndict[str, Model]", "source": "juraj-google-style"}
{"code": "def run(self):\n        \n        for build_dir in self.build_dirs:\n            if os.path.isdir(build_dir):\n                sys.stdout.write('Removing %s%s' % (build_dir, os.linesep))\n                shutil.rmtree(build_dir)\n\n        for (root, dirs, files) in os.walk(self.cwd):\n            for name in files:\n                fullpath = os.path.join(root, name)\n                if any(fullpath.endswith(ext) for ext in self.build_artifacts):\n                    sys.stdout.write('Removing %s%s' % (fullpath, os.linesep))\n                    os.remove(fullpath)", "docstring": "Runs the command.\n\nArgs:\nself (CleanCommand): the ``CleanCommand`` instance\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def _CalculateComprehensionState(self, newline):\n    current = self.next_token\n    previous = current.previous_token\n    top_of_stack = self.comp_stack[-1] if self.comp_stack else None\n    penalty = 0\n    if top_of_stack is not None:\n        if current == top_of_stack.closing_bracket:\n            last = self.comp_stack.pop()\n            if last.has_interior_split:\n                penalty += style.Get('SPLIT_PENALTY_COMPREHENSION')\n            return penalty\n        if newline:\n            top_of_stack.has_interior_split = True\n    if subtypes.COMP_EXPR in current.subtypes and subtypes.COMP_EXPR not in previous.subtypes:\n        self.comp_stack.append(object_state.ComprehensionState(current))\n        return penalty\n    if current.value == 'for' and subtypes.COMP_FOR in current.subtypes:\n        if top_of_stack.for_token is not None:\n            if style.Get('SPLIT_COMPLEX_COMPREHENSION') and top_of_stack.has_split_at_for != newline and (top_of_stack.has_split_at_for or not top_of_stack.HasTrivialExpr()):\n                penalty += split_penalty.UNBREAKABLE\n        else:\n            top_of_stack.for_token = current\n            top_of_stack.has_split_at_for = newline\n            if style.Get('SPLIT_COMPLEX_COMPREHENSION') and newline and top_of_stack.HasTrivialExpr():\n                penalty += split_penalty.CONNECTED\n    if subtypes.COMP_IF in current.subtypes and subtypes.COMP_IF not in previous.subtypes:\n        if style.Get('SPLIT_COMPLEX_COMPREHENSION') and top_of_stack.has_split_at_for != newline and (top_of_stack.has_split_at_for or not top_of_stack.HasTrivialExpr()):\n            penalty += split_penalty.UNBREAKABLE\n    return penalty", "docstring": "Makes required changes to comprehension state.\n\nArgs:\nnewline: Whether the current token is to be added on a newline.\n\nReturns:\nThe penalty for the token-newline combination given the current\ncomprehension state.", "source": "github-repos"}
{"code": "def get_leaves(self, item_ids=None, language=None, forbidden_item_ids=None):\n        \n        forbidden_item_ids = set() if forbidden_item_ids is None else set(forbidden_item_ids)\n        children = self.get_children_graph(item_ids, language=language, forbidden_item_ids=forbidden_item_ids)\n        counts = self.get_children_counts(active=None)\n        if item_ids is None:\n            \n            item_ids = set(children.keys())\n\n        def _get_leaves(item_id):\n            leaves = set()\n\n            def __search(item_ids):\n                result = set(flatten([children.get(item_id, []) for item_id in item_ids]))\n                new_leaves = {item_id for item_id in result if item_id not in children.keys()}\n                leaves.update(new_leaves)\n                return result - new_leaves\n\n            fixed_point(\n                is_zero=lambda to_visit: len(to_visit) == 0,\n                minus=lambda to_visit, visited: to_visit - visited,\n                plus=lambda visited_x, visited_y: visited_x | visited_y,\n                f=__search,\n                x={item_id}\n            )\n            leaves = {leaf for leaf in leaves if counts[leaf] == 0}\n            if len(leaves) > 0:\n                return leaves\n            if counts[item_id] == 0 and item_id not in forbidden_item_ids:\n                return {item_id}\n            return set()\n\n        return {item_id: _get_leaves(item_id) for item_id in item_ids}", "docstring": "Get mapping of items to their reachable leaves. Leaves having\ninactive relations to other items are omitted.\n\nArgs:\nitem_ids (list): items which are taken as roots for the reachability\nlanguage (str): if specified, filter out items which are not\navailable in the given language\n\nReturns:\ndict: item id -> list of items (reachable leaves)", "source": "juraj-google-style"}
{"code": "def _validate(self):\n    probably_good_to_go = True\n    sheet = self.table\n    identity = self.db_sheet_cols.id\n    id_col = sheet.loc[(:, identity)]\n    if any(id_col.duplicated()):\n        warnings.warn('your database is corrupt: duplicates encountered in the srno-column')\n        logger.debug(('srno duplicates:\\n' + str(id_col.duplicated())))\n        probably_good_to_go = False\n    return probably_good_to_go", "docstring": "Checks that the db-file is ok\n\nReturns:\nTrue if OK, False if not.", "source": "codesearchnet"}
{"code": "def _any_overlap_or_contiguous(self, test_overlap: bool) -> bool:\n        \n        for i in range(len(self.intervals)):\n            for j in range(i + 1, len(self.intervals)):\n                first = self.intervals[i]\n                second = self.intervals[j]\n                if test_overlap:\n                    test = first.overlaps(second)\n                else:\n                    test = first.contiguous(second)\n                if test:\n                    return True\n        return False", "docstring": "Do any of the intervals overlap?\n\nArgs:\ntest_overlap: if ``True``, test for overlapping intervals; if\n``False``, test for contiguous intervals.", "source": "juraj-google-style"}
{"code": "def get_gdns_publisher(config, metrics, **kwargs):\n    builder = gdns_publisher.GDNSPublisherBuilder(config, metrics, **kwargs)\n    return builder.build_publisher()", "docstring": "Get a GDNSPublisher client.\n\nA factory function that validates configuration and returns a\npublisher client (:interface:`gordon.interfaces.IMessageHandler`)\nprovider.\n\nArgs:\nconfig (dict): Google Cloud DNS API related configuration.\nmetrics (obj): :interface:`IMetricRelay` implementation.\nkwargs (dict): Additional keyword arguments to pass to the\npublisher.\nReturns:\nA :class:`GDNSPublisher` instance.", "source": "codesearchnet"}
{"code": "def operator_and_matrix(self, shapes_info, dtype, use_placeholder, ensure_self_adjoint_and_pd=False):\n    raise NotImplementedError('Not implemented yet.')", "docstring": "Build a batch matrix and an Operator that should have similar behavior.\n\nEvery operator acts like a (batch) matrix.  This method returns both\ntogether, and is used by tests.\n\nArgs:\nshapes_info: `OperatorShapesInfo`, encoding shape information about the\noperator.\ndtype:  Numpy dtype.  Data type of returned array/operator.\nuse_placeholder:  Python bool.  If True, initialize the operator with a\nplaceholder of undefined shape and correct dtype.\nensure_self_adjoint_and_pd: If `True`,\nconstruct this operator to be Hermitian Positive Definite, as well\nas ensuring the hints `is_positive_definite` and `is_self_adjoint`\nare set.\nThis is useful for testing methods such as `cholesky`.\n\nReturns:\noperator:  `LinearOperator` subclass instance.\nmat:  `Tensor` representing operator.", "source": "github-repos"}
{"code": "def profile_view(request, user_id=None):\n    if (request.user.is_eighthoffice and ('full' not in request.GET) and (user_id is not None)):\n        return redirect('eighth_profile', user_id=user_id)\n    if (user_id is not None):\n        try:\n            profile_user = User.objects.get(id=user_id)\n            if (profile_user is None):\n                raise Http404\n        except User.DoesNotExist:\n            raise Http404\n    else:\n        profile_user = request.user\n    num_blocks = 6\n    eighth_schedule = []\n    start_block = EighthBlock.objects.get_first_upcoming_block()\n    blocks = []\n    if start_block:\n        blocks = ([start_block] + list(start_block.next_blocks((num_blocks - 1))))\n    for block in blocks:\n        sch = {'block': block}\n        try:\n            sch['signup'] = EighthSignup.objects.get(scheduled_activity__block=block, user=profile_user)\n        except EighthSignup.DoesNotExist:\n            sch['signup'] = None\n        except MultipleObjectsReturned:\n            client.captureException()\n            sch['signup'] = None\n        eighth_schedule.append(sch)\n    if profile_user.is_eighth_sponsor:\n        sponsor = EighthSponsor.objects.get(user=profile_user)\n        start_date = get_start_date(request)\n        eighth_sponsor_schedule = EighthScheduledActivity.objects.for_sponsor(sponsor).filter(block__date__gte=start_date).order_by('block__date', 'block__block_letter')\n        eighth_sponsor_schedule = eighth_sponsor_schedule[:10]\n    else:\n        eighth_sponsor_schedule = None\n    admin_or_teacher = (request.user.is_eighth_admin or request.user.is_teacher)\n    can_view_eighth = (profile_user.can_view_eighth or (request.user == profile_user))\n    eighth_restricted_msg = ((not can_view_eighth) and admin_or_teacher)\n    if ((not can_view_eighth) and (not request.user.is_eighth_admin) and (not request.user.is_teacher)):\n        eighth_schedule = []\n    has_been_nominated = (profile_user.username in [u.nominee.username for u in request.user.nomination_votes.filter(position__position_name=settings.NOMINATION_POSITION)])\n    context = {'profile_user': profile_user, 'eighth_schedule': eighth_schedule, 'can_view_eighth': can_view_eighth, 'eighth_restricted_msg': eighth_restricted_msg, 'eighth_sponsor_schedule': eighth_sponsor_schedule, 'nominations_active': settings.NOMINATIONS_ACTIVE, 'nomination_position': settings.NOMINATION_POSITION, 'has_been_nominated': has_been_nominated}\n    return render(request, 'users/profile.html', context)", "docstring": "Displays a view of a user's profile.\n\nArgs:\nuser_id\nThe ID of the user whose profile is being viewed. If not\nspecified, show the user's own profile.", "source": "codesearchnet"}
{"code": "def _import_templates(force=False):\n    \n    tmplpath = os.path.join(resource_filename('cloud_inquisitor', 'data'), 'templates')\n    disk_templates = {f: os.path.join(root, f) for root, directory, files in os.walk(tmplpath) for f in files}\n    db_templates = {tmpl.template_name: tmpl for tmpl in db.Template.find()}\n\n    for name, template_file in disk_templates.items():\n        with open(template_file, 'r') as f:\n            body = f.read()\n        disk_hash = get_hash(body)\n\n        if name not in db_templates:\n            template = Template()\n            template.template_name = name\n            template.template = body\n\n            db.session.add(template)\n            auditlog(\n                event='template.import',\n                actor='init',\n                data={\n                    'template_name': name,\n                    'template': body\n                }\n            )\n            logger.info('Imported template {}'.format(name))\n        else:\n            template = db_templates[name]\n            db_hash = get_hash(template.template)\n\n            if db_hash != disk_hash:\n                if force or not db_templates[name].is_modified:\n                    template.template = body\n\n                    db.session.add(template)\n                    auditlog(\n                        event='template.update',\n                        actor='init',\n                        data={\n                            'template_name': name,\n                            'template_diff': diff(template.template, body)\n                        }\n                    )\n                    logger.info('Updated template {}'.format(name))\n                else:\n                    logger.warning(\n                        'Updated template available for {}. Will not import as it would'\n                        ' overwrite user edited content and force is not enabled'.format(name)\n                    )", "docstring": "Import templates from disk into database\n\nReads all templates from disk and adds them to the database. By default, any template that has been modified by\nthe user will not be updated. This can however be changed by setting `force` to `True`, which causes all templates\nto be imported regardless of status\n\nArgs:\nforce (`bool`): Force overwrite any templates with local changes made. Default: `False`\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def _IsotonicRegressionGrad(op: ops.Operation, grad_output, grad_segments):\n    del grad_segments\n    segments = op.outputs[1]\n    return _MeanAggregator(grad_output, segments)", "docstring": "Gradient for the isotonic regression function.\n\nArgs:\nop: The IsotonicRegression tensorflow op.\ngrad_output: Tensor of incoming gradients with respect to the output.\ngrad_segments: Tensor of incoming gradients with respect to the segments.\n\nReturns:\nA tensor, same size as `grad_output` with the gradient with respect to\nthe input.", "source": "github-repos"}
{"code": "def set_parameters(self, parameters_dict):\n        \n        DB.set_hash_value(self._key, 'parameters', parameters_dict)\n        self.publish(\"parameters_updated\")", "docstring": "Set the subarray parameters.\n\nArgs:\nparameters_dict (dict): Dictionary of Subarray parameters", "source": "juraj-google-style"}
{"code": "def box(self, x0, y0, width, height):\n    assert (width > 1)\n    assert (height > 1)\n    width -= 1\n    height -= 1\n    for x in range(x0, (x0 + width)):\n        self.point(x, y0, '-')\n        self.point(x, (y0 + height), '-')\n    for y in range(y0, (y0 + height)):\n        self.point(x0, y, '|')\n        self.point((x0 + width), y, '|')\n    self.point(x0, y0, '+')\n    self.point((x0 + width), y0, '+')\n    self.point(x0, (y0 + height), '+')\n    self.point((x0 + width), (y0 + height), '+')", "docstring": "Create a box on ASCII canvas.\n\nArgs:\nx0 (int): x coordinate of the box corner.\ny0 (int): y coordinate of the box corner.\nwidth (int): box width.\nheight (int): box height.", "source": "codesearchnet"}
{"code": "def append(self, text, afterline=None):\n        \n        if afterline:\n            self._vim.current.buffer.append(text, afterline)\n        else:\n            self._vim.current.buffer.append(text)", "docstring": "Append text to the current buffer.\n\nArgs:\ntext (str or Sequence[str]): One or many lines of text to append.\nafterline (Optional[int]):\nLine number to append after. If 0, text is prepended before the\nfirst line; if ``None``, at end of the buffer.", "source": "juraj-google-style"}
{"code": "def _add_saveable(saveables, seen_ops, saveable):\n    if saveable.op is not None and saveable.op in seen_ops:\n        raise ValueError(f'The same saveable will be restored with two names: {saveable.name}')\n    saveables.append(saveable)\n    seen_ops.add(saveable.op)", "docstring": "Adds the saveable to the saveables list.\n\nArgs:\nsaveables: List to append the SaveableObject to.\nseen_ops: Set of the ops of the saveables already processed.  Used to\ncheck that each saveable is only saved once.\nsaveable: The saveable.\n\nRaises:\nValueError: If the saveable has already been processed.", "source": "github-repos"}
{"code": "def emit(self, record):\n        \n        \n        properties = {\n            'process': record.processName,\n            'module': record.module,\n            'fileName': record.filename,\n            'lineNumber': record.lineno,\n            'level': record.levelname,\n        }\n\n        \n        if record.exc_info:\n            self.client.track_exception(*record.exc_info, properties=properties)\n            return\n\n        \n        formatted_message = self.format(record)\n        self.client.track_trace(formatted_message, properties=properties, severity=record.levelname)", "docstring": "Emit a record.\n\nIf a formatter is specified, it is used to format the record. If exception information is present, an Exception\ntelemetry object is sent instead of a Trace telemetry object.\n\nArgs:\nrecord (:class:`logging.LogRecord`). the record to format and send.", "source": "juraj-google-style"}
{"code": "def collect_function_arg_names(function_names, return_all_args_function_names, function_renames):\n    function_name_v1_to_attr = {}\n    function_name_v2_to_attr = {}\n\n    def visit(unused_path, unused_parent, children):\n        \n        for child in children:\n            _, attr = tf_decorator.unwrap(child[1])\n            api_names_v1 = ['tf.' + name for name in tf_export.get_v1_names(attr)]\n            if any((name in function_names for name in api_names_v1)):\n                for name in api_names_v1:\n                    function_name_v1_to_attr[name] = attr\n            api_names_v2 = ['tf.' + name for name in tf_export.get_v2_names(attr)]\n            for name in api_names_v2:\n                function_name_v2_to_attr[name] = attr\n    visitor = public_api.PublicAPIVisitor(visit)\n    visitor.do_not_descend_map['tf'].append('contrib')\n    visitor.private_map['tf.compat'] = ['v1', 'v2']\n    traverse.traverse(tf.compat.v1, visitor)\n    traverse.traverse(tf.compat.v2, visitor)\n\n    def get_arguments_list(attr):\n        if tf_inspect.isclass(attr):\n            arg_list = tf_inspect.getargspec(getattr(attr, '__init__'))[0]\n            return arg_list[1:]\n        else:\n            return tf_inspect.getargspec(attr)[0]\n    function_to_args = {}\n    if any((name not in function_name_v1_to_attr for name in function_names)):\n        raise ValueError(f'Symbols not found in `tf.compat.v1`: `{'`, `'.join(function_names - function_name_v1_to_attr.keys())}`')\n    for name_v1, attr_v1 in function_name_v1_to_attr.items():\n        args_v1 = get_arguments_list(attr_v1)\n        if name_v1 in return_all_args_function_names:\n            function_to_args[name_v1] = args_v1\n            continue\n        name_v2 = name_v1\n        if name_v1 in function_renames:\n            name_v2 = function_renames[name_v1]\n            if name_v2.startswith('tf.compat.v1.'):\n                raise ValueError(f'Symbol `{name_v1}` is renamed to `{name_v2}`, no need to add keyword argument names, remove from `reordered_function_names`')\n        if name_v2 not in function_name_v2_to_attr:\n            raise ValueError(f'Symbol `{name_v2}` not found in `tf.compat.v2`')\n        args_v2 = get_arguments_list(function_name_v2_to_attr[name_v2])\n        if args_v1 == args_v2:\n            raise ValueError(f'Symbol `{name_v1}` has no changes in arguments, no need to add keyword argument names, remove from `reordered_function_names`')\n        needed_arg_names = []\n        same_so_far = True\n        for index, arg in enumerate(args_v1):\n            if same_so_far and index < len(args_v2) and (arg == args_v2[index]):\n                needed_arg_names.append(None)\n            else:\n                same_so_far = False\n                needed_arg_names.append(arg)\n        function_to_args[name_v1] = needed_arg_names\n    return function_to_args", "docstring": "Determines argument names for reordered function signatures.\n\nArgs:\nfunction_names: Functions to collect arguments for.\nreturn_all_args_function_names: Functions to collect all argument names for.\nfunction_renames: Function renames between v1 and v2.\n\nReturns:\nDictionary mapping function names to a list of argument names. Each argument\nname list can have leading `None` elements to indicate that some of the\nfunction arguments did not change between v1 and v2.", "source": "github-repos"}
{"code": "def bam2es(\n        bam_fn,\n        es_fo,\n        allowed_delta,\n    ):\n        \n\n        es_fo.write(\"\n        es_fo.write(\"\n        es_fo.write(\"\n        es_fo.write(\"\n        es_fo.write(\"\n        es_fo.write(\"\n        es_fo.write(\"\n        es_fo.write(\"\n        es_fo.write(\"\n        es_fo.write(\"\n        es_fo.write(\"\n        es_fo.write(\"\n        es_fo.write(\"\n        es_fo.write(\"\n        es_fo.write(\"\n\n        with pysam.AlignmentFile(bam_fn, \"rb\") as sam:\n            references_dict = {}\n\n            for i in range(len(sam.references)):\n                references_dict[sam.references[i]] = i + 1\n\n            for read in sam:\n                rnf_read_tuple = rnftools.rnfformat.ReadTuple()\n                rnf_read_tuple.destringize(read.query_name)\n\n                left = read.reference_start + 1\n                right = read.reference_end\n                chrom_id = references_dict[sam.references[read.reference_id]]\n\n                nb_of_segments = len(rnf_read_tuple.segments)\n\n                if rnf_read_tuple.segments[0].genome_id == 1:\n                    should_be_mapped = True\n                else:\n                    should_be_mapped = False\n\n                \n                if read.is_unmapped:\n                    \n                    if should_be_mapped:\n                        category = \"u\"\n                    \n                    else:\n                        category = \"U\"\n                \n                else:\n                    \n                    if should_be_mapped:\n                        exists_corresponding_segment = False\n\n                        for j in range(len(rnf_read_tuple.segments)):\n                            segment = rnf_read_tuple.segments[j]\n                            if (\n                                (segment.left == 0 or abs(segment.left - left) <= allowed_delta)\n                                and (segment.right == 0 or abs(segment.right - right) <= allowed_delta)\n                                and (segment.left != 0 or segment.right == 0)\n                                and (chrom_id == 0 or chrom_id == segment.chr_id)\n                            ):\n                                exists_corresponding_segment = True\n                                segment = str(j + 1)\n                                break\n\n                        \n                        if exists_corresponding_segment:  \n                            category = \"M_\" + segment\n                        \n                        else:\n                            category = \"w\"\n                    \n                    else:\n                        category = \"m\"\n\n                es_fo.write(\n                    \"\\t\".join(\n                        map(\n                            str,\n                            [\n                                \n                                read.query_name,\n                                \n                                \"unmapped\" if read.is_unmapped else \"mapped_\" + str(read.mapping_quality),\n                                \n                                chrom_id,\n                                \n                                \"R\" if read.is_reverse else \"F\",\n                                \n                                left,\n                                \n                                right,\n                                \n                                category,\n                                \n                                nb_of_segments\n                            ]\n                        )\n                    ) + os.linesep\n                )", "docstring": "Convert BAM file to ES file.\n\nArgs:\nbam_fn (str): File name of the BAM file.\nbam_fo (file): File object of the ES file.\nallowed_delta (int): Maximal allowed coordinates difference for correct reads.", "source": "juraj-google-style"}
{"code": "def _SetSELinuxContext(path):\n  \n  restorecon = '/sbin/restorecon'\n  if os.path.isfile(restorecon) and os.access(restorecon, os.X_OK):\n    subprocess.call([restorecon, path])", "docstring": "Set the appropriate SELinux context, if SELinux tools are installed.\n\nCalls /sbin/restorecon on the provided path to set the SELinux context as\nspecified by policy. This call does not operate recursively.\n\nOnly some OS configurations use SELinux. It is therefore acceptable for\nrestorecon to be missing, in which case we do nothing.\n\nArgs:\npath: string, the path on which to fix the SELinux context.", "source": "juraj-google-style"}
{"code": "def write_config(config, config_path=CONFIG_PATH):\n    if (not os.path.exists(config_path)):\n        os.makedirs(os.path.dirname(config_path))\n    with open(config_path, 'w', encoding='utf-8') as f:\n        config.write(f)", "docstring": "Write the config to the output path.\nCreates the necessary directories if they aren't there.\n\nArgs:\nconfig (configparser.ConfigParser): A ConfigParser.", "source": "codesearchnet"}
{"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    y_true = ops.convert_to_tensor(y_true, self._dtype)\n    y_pred = ops.convert_to_tensor(y_pred, self._dtype)\n    y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)\n    error_sq = ops.square(y_pred - y_true)\n    return super().update_state(error_sq, sample_weight=sample_weight)", "docstring": "Accumulates root mean squared error statistics.\n\nArgs:\ny_true: The ground truth values.\ny_pred: The predicted values.\nsample_weight: Optional weighting of each example. Can\nbe a `Tensor` whose rank is either 0, or the same rank as\n`y_true`, and must be broadcastable to `y_true`.\nDefaults to `1`.\n\nReturns:\nUpdate op.", "source": "github-repos"}
{"code": "def run(self, fn, args=(), kwargs=None, options=None):\n    return super(OneDeviceStrategy, self).run(fn, args, kwargs, options)", "docstring": "Run `fn` on each replica, with the given arguments.\n\nIn `OneDeviceStrategy`, `fn` is simply called within a device scope for the\ngiven device, with the provided arguments.\n\nArgs:\nfn: The function to run. The output must be a `tf.nest` of `Tensor`s.\nargs: (Optional) Positional arguments to `fn`.\nkwargs: (Optional) Keyword arguments to `fn`.\noptions: (Optional) An instance of `tf.distribute.RunOptions` specifying\nthe options to run `fn`.\n\nReturns:\nReturn value from running `fn`.", "source": "github-repos"}
{"code": "def _ragged_embedding_lookup_with_reduce(table: tf_variables.Variable, ragged: ragged_tensor.RaggedTensor, weights: ragged_tensor.RaggedTensor, combiner: str) -> core.Tensor:\n    if weights is None:\n        weights = array_ops.ones_like(ragged, dtype=table.dtype)\n    weights = array_ops.expand_dims(weights, axis=2)\n    ragged_result = embedding_ops.embedding_lookup(table, ragged)\n    ragged_result = math_ops.reduce_sum(ragged_result * weights, axis=1)\n    if combiner == 'mean':\n        ragged_result = math_ops.div_no_nan(ragged_result, math_ops.reduce_sum(weights, axis=1))\n    elif combiner == 'sqrtn':\n        ragged_result = math_ops.div_no_nan(ragged_result, math_ops.sqrt(math_ops.reduce_sum(weights * weights, axis=1)))\n    return ragged_result", "docstring": "Compute a ragged lookup followed by a reduce on axis 1.\n\nArgs:\ntable: The embedding table.\nragged: A RaggedTensor of ids to look up.\nweights: A RaggedTensor of weights (or None).\ncombiner: One of \"mean\", \"sum\", \"sqrtn\".\n\nReturns:\nA Tensor.", "source": "github-repos"}
{"code": "def repository_blob(self, sha, **kwargs):\n    path = ('/projects/%s/repository/blobs/%s' % (self.get_id(), sha))\n    return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "Return a file by blob SHA.\n\nArgs:\nsha(str): ID of the blob\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the server failed to perform the request\n\nReturns:\ndict: The blob content and metadata", "source": "codesearchnet"}
{"code": "def get_supervisor(func: types.AnyFunction) -> types.Supervisor:\n    \n    if not callable(func):\n        raise TypeError(\"func is not callable\")\n    if asyncio.iscoroutinefunction(func):\n        supervisor = _async_supervisor\n    else:\n        supervisor = _sync_supervisor\n    return functools.partial(supervisor, func)", "docstring": "Get the appropriate supervisor to use and pre-apply the function.\n\nArgs:\nfunc: A function.", "source": "juraj-google-style"}
{"code": "def _ResizeBilinearGrad(op: ops.Operation, grad):\n    grad0 = gen_image_ops.resize_bilinear_grad(grad, op.inputs[0], align_corners=op.get_attr('align_corners'), half_pixel_centers=op.get_attr('half_pixel_centers'))\n    return [grad0, None]", "docstring": "The derivatives for bilinear resizing.\n\nArgs:\nop: The ResizeBilinear op.\ngrad: The tensor representing the gradient w.r.t. the output.\n\nReturns:\nThe gradients w.r.t. the input.", "source": "github-repos"}
{"code": "def sunset(self, date=None, zenith=None):\n        \n        return (segment.sunset(date, zenith) for segment in self)", "docstring": "Calculate sunset times for locations.\n\nArgs:\ndate (datetime.date): Calculate rise or set for given date\nzenith (str): Calculate sunset events, or start of twilight times\n\nReturns:\nlist of list of datetime.datetime: The time for the sunset for each\npoint in each segment", "source": "juraj-google-style"}
{"code": "def ws004c(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `ws004c`'.format(value))\n\n        self._ws004c = value", "docstring": "Corresponds to IDD Field `ws004c`\n\nArgs:\nvalue (float): value for IDD Field `ws004c`\nUnit: m/s\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def add_positional_embedding(x, max_length, name=None, positions=None):\n    with tf.name_scope('add_positional_embedding'):\n        (_, length, depth) = common_layers.shape_list(x)\n        var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype)\n        if (positions is None):\n            pad_length = tf.maximum(0, (length - max_length))\n            sliced = tf.cond(tf.less(length, max_length), (lambda : tf.slice(var, [0, 0], [length, (- 1)])), (lambda : tf.pad(var, [[0, pad_length], [0, 0]])))\n            return (x + tf.expand_dims(sliced, 0))\n        else:\n            return (x + tf.gather(var, tf.to_int32(positions)))", "docstring": "Adds positional embedding.\n\nArgs:\nx: Tensor with shape [batch, length, depth].\nmax_length: int representing static maximum size of any dimension.\nname: str representing name of the embedding tf.Variable.\npositions: Tensor with shape [batch, length].\n\nReturns:\nTensor of same shape as x.", "source": "codesearchnet"}
{"code": "def xzhdr(self, header, msgid_range=None):\n        \n        args = header\n        if msgid_range is not None:\n            args += \" \" + utils.unparse_msgid_range(msgid_range)\n\n        code, message = self.command(\"XZHDR\", args)\n        if code != 221:\n            raise NNTPReplyError(code, message)\n\n        return self.info(code, message, compressed=True)", "docstring": "XZHDR command.\n\nArgs:\nmsgid_range: A message-id as a string, or an article number as an\ninteger, or a tuple of specifying a range of article numbers in\nthe form (first, [last]) - if last is omitted then all articles\nafter first are included. A msgid_range of None (the default)\nuses the current article.", "source": "juraj-google-style"}
{"code": "def _calculate_minimum_silent_period(baudrate):\n    \n    _checkNumerical(baudrate, minvalue=1, description='baudrate')  \n\n    BITTIMES_PER_CHARACTERTIME = 11\n    MINIMUM_SILENT_CHARACTERTIMES = 3.5\n\n    bittime = 1 / float(baudrate)\n    return bittime * BITTIMES_PER_CHARACTERTIME * MINIMUM_SILENT_CHARACTERTIMES", "docstring": "Calculate the silent period length to comply with the 3.5 character silence between messages.\n\nArgs:\nbaudrate (numerical): The baudrate for the serial port\n\nReturns:\nThe number of seconds (float) that should pass between each message on the bus.\n\nRaises:\nValueError, TypeError.", "source": "juraj-google-style"}
{"code": "def get_entities(seq, suffix=False):\n    if any((isinstance(s, list) for s in seq)):\n        seq = [item for sublist in seq for item in (sublist + ['O'])]\n    prev_tag = 'O'\n    prev_type = ''\n    begin_offset = 0\n    chunks = []\n    for (i, chunk) in enumerate((seq + ['O'])):\n        if suffix:\n            tag = chunk[(- 1)]\n            type_ = chunk.split('-')[0]\n        else:\n            tag = chunk[0]\n            type_ = chunk.split('-')[(- 1)]\n        if end_of_chunk(prev_tag, tag, prev_type, type_):\n            chunks.append((prev_type, begin_offset, (i - 1)))\n        if start_of_chunk(prev_tag, tag, prev_type, type_):\n            begin_offset = i\n        prev_tag = tag\n        prev_type = type_\n    return chunks", "docstring": "Gets entities from sequence.\n\nArgs:\nseq (list): sequence of labels.\n\nReturns:\nlist: list of (chunk_type, chunk_start, chunk_end).\n\nExample:\n>>> from seqeval.metrics.sequence_labeling import get_entities\n>>> seq = ['B-PER', 'I-PER', 'O', 'B-LOC']\n>>> get_entities(seq)\n[('PER', 0, 1), ('LOC', 3, 3)]", "source": "codesearchnet"}
{"code": "def search(self, term: str, case_sensitive: bool = False) -> 'PrettyDir':\n        \n        if case_sensitive:\n            return PrettyDir(\n                self.obj, [pattr for pattr in self.pattrs if term in pattr.name]\n            )\n        else:\n            term = term.lower()\n            return PrettyDir(\n                self.obj, [pattr for pattr in self.pattrs if term in pattr.name.lower()]\n            )", "docstring": "Searches for names that match some pattern.\n\nArgs:\nterm: String used to match names. A name is returned if it matches\nthe whole search term.\ncase_sensitive: Boolean to match case or not, default is False\n(case insensitive).\n\nReturn:\nA PrettyDir object with matched names.", "source": "juraj-google-style"}
{"code": "def get_current(cls):\n    filepath = os.getenv('REZ_RXT_FILE')\n    if ((not filepath) or (not os.path.exists(filepath))):\n        return None\n    return cls.load(filepath)", "docstring": "Get the context for the current env, if there is one.\n\nReturns:\n`ResolvedContext`: Current context, or None if not in a resolved env.", "source": "codesearchnet"}
{"code": "def GetMessages(self, formatter_mediator, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    event_values = event.CopyToDict()\n\n    cookie_flags = event_values.get('flags', None)\n    if cookie_flags == 0:\n      del event_values['flags']\n    elif cookie_flags:\n      flags = []\n      for flag_value, flag_description in iter(self._COOKIE_FLAGS.items()):\n        if cookie_flags & flag_value:\n          flags.append(flag_description)\n\n      event_values['flags'] = '|'.join(flags)\n\n    return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def to(self, new_unit):\n    return self.__class__((np.array(self) * self.unit.get_conversion_factor(new_unit)), unit_type=self.unit_type, unit=new_unit)", "docstring": "Conversion to a new_unit.\n\nArgs:\nnew_unit:\nNew unit type.\n\nReturns:\nA ArrayWithFloatWithUnit object in the new units.\n\nExample usage:\n>>> e = EnergyArray([1, 1.1], \"Ha\")\n>>> e.to(\"eV\")\narray([ 27.21138386,  29.93252225]) eV", "source": "codesearchnet"}
{"code": "def set_tpu_core_ids(self, mesh_name, tpu_core_ids):\n    _pywrap_dtensor_device.SetTPUCoreIDs(self._device_info, mesh_name, tpu_core_ids)", "docstring": "Sets the singleton global device ID-to-physical core ID map.\n\nArgs:\nmesh_name: The name of a mesh. If empty, set the default mapping.\ntpu_core_ids: TPU core IDs sorted by TF task/device ordinal.", "source": "github-repos"}
{"code": "def write_xls(data, file_name, worksheet_names=None):\n    \n    workbook = xlwt.Workbook()\n    for sheet_index, sheet_data in enumerate(data):\n        if worksheet_names and sheet_index < len(worksheet_names) and worksheet_names[sheet_index]:\n            name = worksheet_names[sheet_index]\n        else:\n            name = 'Worksheet {}'.format(sheet_index)\n        sheet = workbook.add_sheet(name)\n        for row_index, row in enumerate(sheet_data):\n            for col_index, value in enumerate(row):\n                sheet.write(row_index, col_index, value)\n    workbook.save(file_name)", "docstring": "Writes out to old excel format.\n\nArgs:\ndata: 2D list of tables/worksheets.\nfile_name: Name of the output file.\nworksheet_names: A list of worksheet names (optional).", "source": "juraj-google-style"}
{"code": "def param_type(self, name):\n    self._ensure_loaded()\n    if (name not in self.annotated_params):\n        return None\n    return self.annotated_params[name].type_name", "docstring": "Get the parameter type information by name.\n\nArgs:\nname (str): The full name of a parameter.\n\nReturns:\nstr: The type name or None if no type information is given.", "source": "codesearchnet"}
{"code": "def write_dot_file(G, filename):\n    with io.open(filename, 'w') as fh:\n        fh.write('strict digraph DependencyDiagram {\\n')\n        edge_list = G.edges()\n        node_list = set(G.nodes())\n        if edge_list:\n            for edge in sorted(edge_list):\n                (source, targ) = edge\n                node_list = (node_list - set(source))\n                node_list = (node_list - set(targ))\n                line = '\"{}\" -> \"{}\";\\n'\n                fh.write(line.format(source, targ))\n        if node_list:\n            for node in sorted(node_list):\n                line = '\"{}\"\\n'.format(node)\n                fh.write(line)\n        fh.write('}')", "docstring": "Writes the graph G in dot file format for graphviz visualization.\n\nArgs:\na Networkx graph\nA filename to name the dot files", "source": "codesearchnet"}
{"code": "def get_student_certificate(self, username, course_id):\n    resp = self.requester.get(urljoin(self.base_url, '/api/certificates/v0/certificates/{username}/courses/{course_key}/'.format(username=username, course_key=course_id)))\n    resp.raise_for_status()\n    return Certificate(resp.json())", "docstring": "Returns an Certificate object with the user certificates\n\nArgs:\nusername (str): an edx user's username\ncourse_id (str): an edX course id.\n\nReturns:\nCertificate: object representing the student certificate for a course", "source": "codesearchnet"}
{"code": "def update(self, reference, field_updates, option=None):\n    if (option.__class__.__name__ == 'ExistsOption'):\n        raise ValueError('you must not pass an explicit write option to update.')\n    write_pbs = _helpers.pbs_for_update(reference._document_path, field_updates, option)\n    self._add_write_pbs(write_pbs)", "docstring": "Add a \"change\" to update a document.\n\nSee\n:meth:`~.firestore_v1beta1.document.DocumentReference.update` for\nmore information on ``field_updates`` and ``option``.\n\nArgs:\nreference (~.firestore_v1beta1.document.DocumentReference): A\ndocument reference that will be deleted in this batch.\nfield_updates (dict): Field names or paths to update and values\nto update with.\noption (Optional[~.firestore_v1beta1.client.WriteOption]): A\nwrite option to make assertions / preconditions on the server\nstate of the document before applying changes.", "source": "codesearchnet"}
{"code": "def _is_autocomplete_valid(cur_commands, alias_command):\n    parent_command = ' '.join(cur_commands[1:])\n    with open(GLOBAL_ALIAS_TAB_COMP_TABLE_PATH, 'r') as tab_completion_table_file:\n        try:\n            tab_completion_table = json.loads(tab_completion_table_file.read())\n            return ((alias_command in tab_completion_table) and (parent_command in tab_completion_table[alias_command]))\n        except Exception:\n            return False", "docstring": "Determine whether autocomplete can be performed at the current state.\n\nArgs:\nparser: The current CLI parser.\ncur_commands: The current commands typed in the console.\nalias_command: The alias command.\n\nReturns:\nTrue if autocomplete can be performed.", "source": "codesearchnet"}
{"code": "def _print_list(self, values: List[Any], print_func: Callable[[Any], None]) -> None:\n    self.generator.open_json_list()\n    field_size = len(values)\n    for i in range(field_size):\n        print_func(values[i])\n        if i < field_size - 1:\n            self.generator.push(',')\n            self.generator.add_newline()\n    self.generator.close_json_list()", "docstring": "Adds the printed JSON list representation of values to _output.\n\nArgs:\nvalues: The values to print as a JSON list.\nprint_func: A function responsible for printing a single value.", "source": "github-repos"}
{"code": "def _package_path(package):\n    \n    from os import path\n    confdir = config_dir()\n    return path.join(confdir, \"{}.cfg\".format(package))", "docstring": "Returns the full path to the default package configuration file.\n\nArgs:\npackage (str): name of the python package to return a path for.", "source": "juraj-google-style"}
{"code": "def offsets_in_rows(self):\n    return gen_ragged_math_ops.ragged_range(starts=constant_op.constant(0, self.dtype), limits=self.row_lengths(), deltas=constant_op.constant(1, self.dtype)).rt_dense_values", "docstring": "Return the offset of each value.\n\nRowPartition takes an array x and converts it into sublists.\noffsets[i] is the index of x[i] in its sublist.\nGiven a shape, such as:\n[*,*,*],[*,*],[],[*,*]\nThis returns:\n0,1,2,0,1,0,1\n\nReturns:\nan offset for every value.", "source": "github-repos"}
{"code": "def slice_list(in_list, lens):\n    if (not isinstance(lens, list)):\n        raise TypeError('\"indices\" must be a list of integers')\n    elif (sum(lens) != len(in_list)):\n        raise ValueError('sum of lens and list length does not match: {} != {}'.format(sum(lens), len(in_list)))\n    out_list = []\n    idx = 0\n    for i in range(len(lens)):\n        out_list.append(in_list[idx:(idx + lens[i])])\n        idx += lens[i]\n    return out_list", "docstring": "Slice a list into several sub lists by a list of given length.\n\nArgs:\nin_list (list): The list to be sliced.\nlens(int or list): The expected length of each out list.\n\nReturns:\nlist: A list of sliced list.", "source": "codesearchnet"}
{"code": "def ChunkedCausalMultiHeadedAttention(\n    feature_depth, num_heads=8, dropout=0.0, chunk_selector=None, mode='train'):\n  \n  prepare_attention_input = combinators.Serial(\n      combinators.Branch(),\n      combinators.Parallel(\n          combinators.Branch(num_branches=3),  \n          CausalMask(axis=-2),  \n      ),\n      combinators.Parallel(\n          combinators.Parallel(\n              core.Dense(feature_depth),\n              core.Dense(feature_depth),\n              core.Dense(feature_depth),\n          ),\n          combinators.Identity()\n      )\n  )\n  return combinators.Serial(\n      combinators.Map(prepare_attention_input),\n      ChunkedAttentionSelector(selector=chunk_selector),  \n      combinators.Map(PureMultiHeadedAttention(  \n          feature_depth=feature_depth, num_heads=num_heads,\n          dropout=dropout, mode=mode), check_shapes=False),\n      combinators.Map(core.Dense(feature_depth))\n  )", "docstring": "Transformer-style causal multi-headed attention operating on chunks.\n\nAccepts inputs that are a list of chunks and applies causal attention.\n\nArgs:\nfeature_depth: int:  depth of embedding\nnum_heads: int: number of attention heads\ndropout: float: dropout rate\nchunk_selector: a function from chunk number to list of chunks to attend.\nmode: str: 'train' or 'eval'\n\nReturns:\nMulti-headed self-attention layer.", "source": "juraj-google-style"}
{"code": "def transition_retry(self, pipeline_key, retry_message):\n    \n    def txn():\n      pipeline_record = db.get(pipeline_key)\n      if pipeline_record is None:\n        logging.warning(\n            'Tried to retry pipeline ID \"%s\" but it does not exist.',\n            pipeline_key.name())\n        raise db.Rollback()\n      if pipeline_record.status not in (\n          _PipelineRecord.WAITING, _PipelineRecord.RUN):\n        logging.warning(\n            'Tried to retry pipeline ID \"%s\", found bad state: %s',\n            pipeline_key.name(), pipeline_record.status)\n        raise db.Rollback()\n\n      params = pipeline_record.params\n      offset_seconds = (\n          params['backoff_seconds'] *\n          (params['backoff_factor'] ** pipeline_record.current_attempt))\n      pipeline_record.next_retry_time = (\n          self._gettime() + datetime.timedelta(seconds=offset_seconds))\n      pipeline_record.current_attempt += 1\n      pipeline_record.retry_message = retry_message\n      pipeline_record.status = _PipelineRecord.WAITING\n\n      if pipeline_record.current_attempt >= pipeline_record.max_attempts:\n        root_pipeline_key = (\n            _PipelineRecord.root_pipeline.get_value_for_datastore(\n                pipeline_record))\n        logging.warning(\n            'Giving up on pipeline ID \"%s\" after %d attempt(s); causing abort '\n            'all the way to the root pipeline ID \"%s\"', pipeline_key.name(),\n            pipeline_record.current_attempt, root_pipeline_key.name())\n        \n        \n        pipeline_record.abort_message = (\n            'Aborting after %d attempts' % pipeline_record.current_attempt)\n        task = taskqueue.Task(\n            url=self.fanout_abort_handler_path,\n            params=dict(root_pipeline_key=root_pipeline_key))\n        task.add(queue_name=self.queue_name, transactional=True)\n      else:\n        task = taskqueue.Task(\n            url=self.pipeline_handler_path,\n            eta=pipeline_record.next_retry_time,\n            params=dict(pipeline_key=pipeline_key,\n                        purpose=_BarrierRecord.START,\n                        attempt=pipeline_record.current_attempt),\n            headers={'X-Ae-Pipeline-Key': pipeline_key},\n            target=pipeline_record.params['target'])\n        task.add(queue_name=self.queue_name, transactional=True)\n\n      pipeline_record.put()\n\n    db.run_in_transaction(txn)", "docstring": "Marks the given pipeline as requiring another retry.\n\nDoes nothing if all attempts have been exceeded.\n\nArgs:\npipeline_key: db.Key of the _PipelineRecord that needs to be retried.\nretry_message: User-supplied message indicating the reason for the retry.", "source": "juraj-google-style"}
{"code": "def with_organisation(self, organisation):\n        \n        if organisation is None:\n            organisation = ''\n        organisation = slugify(organisation)\n        self._validate_organisation(organisation)\n        self.organisation = organisation\n        return self", "docstring": "Add an organisation segment.\n\nArgs:\norganisation (str): Official name of an administrative body\nholding an election.\n\nReturns:\nIdBuilder\n\nRaises:\nValueError", "source": "juraj-google-style"}
{"code": "def learn(self, initial_state_key, limit=1000, game_n=1):\n    end_flag = False\n    state_key_list = ([None] * len(self.q_learning_list))\n    action_key_list = ([None] * len(self.q_learning_list))\n    next_action_key_list = ([None] * len(self.q_learning_list))\n    for game in range(game_n):\n        state_key = initial_state_key\n        self.t = 1\n        while (self.t <= limit):\n            for i in range(len(self.q_learning_list)):\n                state_key_list[i] = state_key\n                if ((game + 1) == game_n):\n                    self.state_key_list.append(tuple(i, state_key_list))\n                self.q_learning_list[i].t = self.t\n                next_action_list = self.q_learning_list[i].extract_possible_actions(tuple(i, state_key_list))\n                if len(next_action_list):\n                    action_key = self.q_learning_list[i].select_action(state_key=tuple(i, state_key_list), next_action_list=next_action_list)\n                    action_key_list[i] = action_key\n                    reward_value = self.q_learning_list[i].observe_reward_value(tuple(i, state_key_list), tuple(i, action_key_list))\n                    if (self.q_learning_list[i].check_the_end_flag(tuple(i, state_key_list)) is True):\n                        end_flag = True\n                    next_next_action_list = self.q_learning_list[i].extract_possible_actions(tuple(i, action_key_list))\n                    if len(next_next_action_list):\n                        next_action_key = self.q_learning_list[i].predict_next_action(tuple(i, action_key_list), next_next_action_list)\n                        next_action_key_list[i] = next_action_key\n                        next_max_q = self.q_learning_list[i].extract_q_df(tuple(i, action_key_list), next_action_key)\n                        self.q_learning_list[i].update_q(state_key=tuple(i, state_key_list), action_key=tuple(i, action_key_list), reward_value=reward_value, next_max_q=next_max_q)\n                        state_key = self.q_learning_list[i].update_state(state_key=tuple(i, state_key_list), action_key=tuple(i, action_key_list))\n                        state_key_list[i] = state_key\n                self.t += 1\n                self.q_learning_list[i].t = self.t\n                if (end_flag is True):\n                    break", "docstring": "Multi-Agent Learning.\n\nOverride.\n\nArgs:\ninitial_state_key:  Initial state.\nlimit:              Limit of the number of learning.\ngame_n:             The number of games.", "source": "codesearchnet"}
{"code": "def rename(self, source_file_names, destination_file_names):\n    err_msg = 'source_file_names and destination_file_names should be equal in length'\n    assert len(source_file_names) == len(destination_file_names), err_msg\n\n    def _rename_file(source, destination):\n        \n        try:\n            os.rename(source, destination)\n        except OSError as err:\n            raise IOError(err)\n    exceptions = {}\n    for source, destination in zip(source_file_names, destination_file_names):\n        try:\n            _rename_file(source, destination)\n        except Exception as e:\n            exceptions[source, destination] = e\n    if exceptions:\n        raise BeamIOError('Rename operation failed', exceptions)", "docstring": "Rename the files at the source list to the destination list.\nSource and destination lists should be of the same size.\n\nArgs:\nsource_file_names: List of file paths that need to be moved\ndestination_file_names: List of destination_file_names for the files\n\nRaises:\n``BeamIOError``: if any of the rename operations fail", "source": "github-repos"}
{"code": "def update_pipeline(self, pipeline):\n\t\t\n\t\t\n\t\tpayload = None\n\t\tif  type(pipeline) is not StreakPipeline:\n\t\t\treturn requests.codes.bad_request, None\n\n\t\tpayload = pipeline.to_dict(rw = True)\n\n\t\ttry:\n\t\t\turi = '/'.join([\n\t\t\t\t\t\tself.api_uri,\n\t\t\t\t\t\tself.pipelines_suffix,\n\t\t\t\t\t\tpipeline.attributes['pipelineKey']\n\t\t\t\t\t\t])\n\t\texcept KeyError:\n\t\t\treturn requests.codes.bad_request, None\n\t\n\t\tcode, r_data = self._req('post', uri , json.dumps(payload))\n\n\t\treturn code, r_data", "docstring": "Updates a pipeline with the provided attributes.\nArgs:\nkey\t\t\trequired identifier for the pipeline\npipeline\tStreakPipeline object\nreturn\t\t(status code, pipeline_dict)", "source": "juraj-google-style"}
{"code": "def download_file(url, destination, **kwargs):\n    \n    web_file = open_remote_url(url, **kwargs)\n    file_size = 0\n\n    if not web_file:\n        logger.error(\n            \"Remote file not found. Attempted URLs: {}\".format(url))\n        return\n\n    modified = is_remote_file_modified(web_file, destination)\n    if modified:\n        logger.info(\"Downloading: \" + web_file.url)\n        file_size = copy_remote_file(web_file, destination)\n    else:\n        logger.info(\"File up-to-date: \" + destination)\n\n    web_file.close()\n    return file_size", "docstring": "Download file  process:\n- Open the url\n- Check if it has been downloaded and it hanged.\n- Download it to  the destination folder.\n\nArgs:\n:urls: url to take the file.\n:destionation: place to store the downloaded file.", "source": "juraj-google-style"}
{"code": "def table(self, ref):\n        \n\n        try:\n            obj_number = ObjectNumber.parse(ref)\n            ds_obj_number = obj_number.as_dataset\n\n            dataset = self._db.dataset(ds_obj_number)  \n            table = dataset.table(ref)\n\n        except NotObjectNumberError:\n            q = self.database.session.query(Table)\\\n                .filter(Table.name == str(ref))\\\n                .order_by(Table.vid.desc())\n\n            table = q.first()\n\n        if not table:\n            raise NotFoundError(\"No table for ref: '{}'\".format(ref))\n        return table", "docstring": "Finds table by ref and returns it.\n\nArgs:\nref (str): id, vid (versioned id) or name of the table\n\nRaises:\nNotFoundError: if table with given ref not found.\n\nReturns:\norm.Table", "source": "juraj-google-style"}
{"code": "def update_remote_archive(self, save_uri, timeout=-1):\n        \n        return self._client.update_with_zero_body(uri=save_uri, timeout=timeout)", "docstring": "Saves a backup of the appliance to a previously-configured remote location.\n\nArgs:\nsave_uri (dict): The URI for saving the backup to a previously configured location.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.\n\nReturns:\ndict: Backup details.", "source": "juraj-google-style"}
{"code": "def _add_batched_ragged_partition(rt, partition, tensor_dict, feature_key, validate, outer_splits=None):\n    if isinstance(partition, RaggedFeature.UniformRowLength):\n        if rt.ragged_rank > 1:\n            length = ops.convert_to_tensor(partition.length, rt.row_splits.dtype)\n            return ragged_tensor.RaggedTensor.from_row_splits(ragged_tensor.RaggedTensor.from_uniform_row_length(rt.values, length, validate=validate), rt.row_splits \n        else:\n            reshaped_vals = array_ops.reshape(rt.values, array_ops.concat([[-1, partition.length], array_ops.shape(rt.values)[1:]], axis=0))\n            return ragged_tensor.RaggedTensor.from_row_splits(reshaped_vals, rt.row_splits \n    partition_t = tensor_dict[partition.key]\n    if partition_t.values.dtype != rt.row_splits.dtype:\n        partition_t = math_ops.cast(partition_t, rt.row_splits.dtype)\n    checks = []\n    if outer_splits is not None:\n        if validate:\n            checks.append(check_ops.assert_equal(outer_splits, partition_t.row_splits, message='Feature %s: values and partitions are not aligned' % feature_key))\n        partition_t = partition_t.values\n    with ops.control_dependencies(checks):\n        if isinstance(partition, (RaggedFeature.RowSplits, RaggedFeature.RowLimits)):\n            if isinstance(partition, RaggedFeature.RowSplits):\n                partition_t = partition_t[:, 1:]\n            adjusted_limits = partition_t.values + array_ops.repeat(rt.row_starts(), partition_t.row_lengths())\n            return partition_t.with_values(ragged_tensor.RaggedTensor.from_row_limits(rt.values, adjusted_limits, validate=validate))\n        elif isinstance(partition, RaggedFeature.RowStarts):\n            adjusted_starts = partition_t.values + array_ops.repeat(rt.row_starts(), partition_t.row_lengths())\n            return partition_t.with_values(ragged_tensor.RaggedTensor.from_row_starts(rt.values, adjusted_starts, validate=validate))\n        elif isinstance(partition, RaggedFeature.RowLengths):\n            return partition_t.with_values(ragged_tensor.RaggedTensor.from_row_lengths(rt.values, partition_t.values, validate=validate))\n        elif isinstance(partition, RaggedFeature.ValueRowIds):\n            nrows = math_ops.maximum(ragged_math_ops.reduce_max(partition_t + 1, axis=1), 0)\n            adjusted_rowids = partition_t.values + array_ops.repeat(math_ops.cumsum(nrows, exclusive=True), partition_t.row_lengths())\n            return ragged_tensor.RaggedTensor.from_row_lengths(ragged_tensor.RaggedTensor.from_value_rowids(rt.values, adjusted_rowids, validate=validate), nrows, validate=validate)\n        raise ValueError(f'Unhandled partition type {partition!r}')", "docstring": "Adds a batched ragged partition tensor to a batched ragged tensor.\n\nArgs:\nrt: A RaggedTensor with shape [batch_size, ...].\npartition: The partition configuration object.  Specifies the key that\nshould be used to look up the partition tensor (unless partition is a\nRaggedFeature.UniformRowLength, in which case there is no partition\ntensor).  The specified tensor must have shape [batch_size, ...].\ntensor_dict: The dictionary mapping keys to tensors.\nfeature_key: The name of the feature being parsed (for error messages).\nvalidate: Whether to validate that the values form a valid RaggedTensor.\nouter_splits: If not None, then we have two batch dimensions, and this\nis the row-splits for the collapsed batch dimension.  Every partition\ntensor must have an outer row_splits that matches this value.\n\nReturns:\nA new RaggedTensor where each batch item `rt[i]` has been partitioned\nusing the `partition_t[i]`.", "source": "github-repos"}
{"code": "def _calc_dir_size(path):\n    \n    dir_size = 0\n    for (root, dirs, files) in os.walk(path):\n        for fn in files:\n            full_fn = os.path.join(root, fn)\n            dir_size += os.path.getsize(full_fn)\n\n    return dir_size", "docstring": "Calculate size of all files in `path`.\n\nArgs:\npath (str): Path to the directory.\n\nReturns:\nint: Size of the directory in bytes.", "source": "juraj-google-style"}
{"code": "def __init__(self, *args, **kwargs):\n        \n        super(PublishTransaction, self).__init__(*args, **kwargs)\n        self.Type = TransactionType.PublishTransaction", "docstring": "Create instance.\n\nArgs:\n*args:\n**kwargs:", "source": "juraj-google-style"}
{"code": "def aggregate(all_stats):\n    \n    aggregate_stats = {'means': [], 'standard_deviations': []}\n    for optimizer_key in all_stats:\n        \n        mean_stats = copy.deepcopy(all_stats[optimizer_key]['mean'])\n        mean_stats['name'] = optimizer_key\n        aggregate_stats['means'].append(mean_stats)\n\n        \n        sd_stats = copy.deepcopy(\n            all_stats[optimizer_key]['standard_deviation'])\n        sd_stats['name'] = optimizer_key\n        aggregate_stats['standard_deviations'].append(sd_stats)\n\n    _add_mean_sd_to_stats(aggregate_stats, 'means')\n\n    return aggregate_stats", "docstring": "Combine stats for multiple optimizers to obtain one mean and sd.\n\nUseful for combining stats for the same optimizer class and multiple problems.\n\nArgs:\nall_stats: dict; output from compare.", "source": "juraj-google-style"}
{"code": "def get_intermediate_dirs(fs, dir_path):\n    \n    \n    intermediates = []\n    with fs.lock():\n        for path in recursepath(abspath(dir_path), reverse=True):\n            try:\n                resource = fs.getinfo(path)\n            except ResourceNotFound:\n                intermediates.append(abspath(path))\n            else:\n                if resource.is_dir:\n                    break\n                raise errors.DirectoryExpected(dir_path)\n    return intermediates[::-1][:-1]", "docstring": "Get a list of non-existing intermediate directories.\n\nArguments:\nfs (FS): A filesystem instance.\ndir_path (str): A path to a new directory on the filesystem.\n\nReturns:\nlist: A list of non-existing paths.\n\nRaises:\n~fs.errors.DirectoryExpected: If a path component\nreferences a file and not a directory.", "source": "juraj-google-style"}
{"code": "def traverse_pagination(response, endpoint):\n    \n    results = response.get('results', [])\n\n    next_page = response.get('next')\n    while next_page:\n        querystring = parse_qs(urlparse(next_page).query, keep_blank_values=True)\n        response = endpoint.get(**querystring)\n        results += response.get('results', [])\n        next_page = response.get('next')\n\n    return results", "docstring": "Traverse a paginated API response.\n\nExtracts and concatenates \"results\" (list of dict) returned by DRF-powered\nAPIs.\n\nArguments:\nresponse (Dict): Current response dict from service API\nendpoint (slumber Resource object): slumber Resource object from edx-rest-api-client\n\nReturns:\nlist of dict.", "source": "juraj-google-style"}
{"code": "def _GetEventIdentifiers(self, event):\n    attributes = []\n    attribute_string = 'data_type: {0:s}'.format(event.data_type)\n    attributes.append(attribute_string)\n    for (attribute_name, attribute_value) in sorted(event.GetAttributes()):\n        if (attribute_name in self._IDENTIFIER_EXCLUDED_ATTRIBUTES):\n            continue\n        if (not attribute_value):\n            continue\n        if (attribute_name == 'pathspec'):\n            attribute_value = attribute_value.comparable\n        elif isinstance(attribute_value, dict):\n            attribute_value = sorted(attribute_value.items())\n        elif isinstance(attribute_value, set):\n            attribute_value = sorted(list(attribute_value))\n        elif isinstance(attribute_value, py2to3.BYTES_TYPE):\n            attribute_value = repr(attribute_value)\n        try:\n            attribute_string = '{0:s}: {1!s}'.format(attribute_name, attribute_value)\n        except UnicodeDecodeError:\n            logger.error('Failed to decode attribute {0:s}'.format(attribute_name))\n        attributes.append(attribute_string)\n    if (event.timestamp_desc in ('atime', 'ctime', 'crtime', 'mtime', definitions.TIME_DESCRIPTION_LAST_ACCESS, definitions.TIME_DESCRIPTION_CHANGE, definitions.TIME_DESCRIPTION_CREATION, definitions.TIME_DESCRIPTION_MODIFICATION)):\n        macb_group_identifier = ', '.join(attributes)\n    else:\n        macb_group_identifier = None\n    attributes.insert(0, event.timestamp_desc)\n    content_identifier = ', '.join(attributes)\n    return (macb_group_identifier, content_identifier)", "docstring": "Retrieves different identifiers of the event.\n\nEvery event contains event data, which consists of attributes and values.\nThese attributes and values can be represented as a string and used for\nsorting and uniquely identifying events. This function determines multiple\nidentifiers:\n* an identifier of the attributes and values without the timestamp\ndescription (or usage). This is referred to as the MACB group\nidentifier.\n* an identifier of the attributes and values including the timestamp\ndescription (or usage). This is referred to as the event content\nidentifier.\n\nThe identifier without the timestamp description can be used to group\nevents that have the same MACB (modification, access, change, birth)\ntimestamps. The PsortEventHeap will store these events individually and\nrelies on PsortMultiProcessEngine to do the actual grouping of events.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\ntuple: containing:\n\nstr: identifier of the event MACB group or None if the event cannot\nbe grouped.\nstr: identifier of the event content.", "source": "codesearchnet"}
{"code": "def __init__(self, url, username, password, auth_header=DEFAULT_AUTH_HEADER, cafile=None):\n        \n        self._url = url\n        self._username = username\n        self._password = password\n        self._auth_header = auth_header\n        self._cafile = cafile", "docstring": "Constructor\n\nArgs:\nurl: API url endpoint\nusername: API username or real username\npassword: API token or user password\nauth_header: API HTTP header", "source": "juraj-google-style"}
{"code": "def number_check(check, return_number=True):\n    \n    try:\n        int(check)\n        good = True\n    except ValueError:\n        LOGGER.critical('Function number_check ValueError {item}'.format(item=check))\n        good = False\n    if return_number:\n        while not good:\n            print(\"That is not a number.\")\n            print(\"Please try again.\")\n            check = input(\"Please enter a number?: \")\n            try:\n                int(check)\n                good = True\n            except ValueError:\n                LOGGER.critical('Function number_check ValueError {item}'.format(item=check))\n                good = False\n        return check\n    else:\n        return good", "docstring": "Function to verify item entered is a number\nArgs:\ncheck: Thing to check for a number\nreturn_number: Set to True it returns a number value, set to False returns True or False\n\nReturns: Check return_number for return options", "source": "juraj-google-style"}
{"code": "def iter_replace_strings(replacements):\n        \n        def function_iter_replace_strings(iterable_strings):\n            \n            for string in iterable_strings:\n                yield reduce((lambda s, kv: s.replace(*kv)),\n                             replacements.items(),\n                             string)\n\n        return function_iter_replace_strings", "docstring": "Create a function that uses replacement pairs to process a string.\n\nThe returned function takes an iterator and yields on each processed\nline.\n\nArgs:\nreplacements: Dict containing 'find_string': 'replace_string' pairs\n\nReturns:\nfunction with signature: iterator of strings = function(iterable)", "source": "juraj-google-style"}
{"code": "def connect(self, uid=UNKNOWN_UID, cmd=JsonRpcCommand.INIT):\n    self._counter = self._id_counter()\n    self._conn = socket.create_connection(('localhost', self.host_port), _SOCKET_CONNECTION_TIMEOUT)\n    self._conn.settimeout(_SOCKET_READ_TIMEOUT)\n    self._client = self._conn.makefile(mode='brw')\n    resp = self._cmd(cmd, uid)\n    if (not resp):\n        raise ProtocolError(self._ad, ProtocolError.NO_RESPONSE_FROM_HANDSHAKE)\n    result = json.loads(str(resp, encoding='utf8'))\n    if result['status']:\n        self.uid = result['uid']\n    else:\n        self.uid = UNKNOWN_UID", "docstring": "Opens a connection to a JSON RPC server.\n\nOpens a connection to a remote client. The connection attempt will time\nout if it takes longer than _SOCKET_CONNECTION_TIMEOUT seconds. Each\nsubsequent operation over this socket will time out after\n_SOCKET_READ_TIMEOUT seconds as well.\n\nArgs:\nuid: int, The uid of the session to join, or UNKNOWN_UID to start a\nnew session.\ncmd: JsonRpcCommand, The command to use for creating the connection.\n\nRaises:\nIOError: Raised when the socket times out from io error\nsocket.timeout: Raised when the socket waits to long for connection.\nProtocolError: Raised when there is an error in the protocol.", "source": "codesearchnet"}
{"code": "async def find_deleted(self, seq_set: SequenceSet,\n                           selected: SelectedMailbox) -> Sequence[int]:\n        \n        session_flags = selected.session_flags\n        return [msg.uid async for _, msg in self.find(seq_set, selected)\n                if Deleted in msg.get_flags(session_flags)]", "docstring": "Return all the active message UIDs that have the ``\\\\Deleted`` flag.\n\nArgs:\nseq_set: The sequence set of the possible messages.\nselected: The selected mailbox session.", "source": "juraj-google-style"}
{"code": "def trace(name, *trace_args):\n\n    def decorator(f):\n\n        def wrapper(*args, **kwargs):\n            t = tracer(name)\n            if t.getEffectiveLevel() < logging.DEBUG:\n                return f(*args, **kwargs)\n            argspec = inspect.getfullargspec(f)\n            t.debug('%s: {', f.__name__)\n            for arg in trace_args:\n                if isinstance(arg, int):\n                    argname = argspec.args[arg]\n                    val = args[arg]\n                else:\n                    argname = arg\n                    val = kwargs[arg]\n                t.debug('%s: %s = %s', f.__name__, argname, show(val))\n            ret = f(*args, **kwargs)\n            t.debug('%s: -> %s', f.__name__, show(ret))\n            t.debug('%s: }', f.__name__)\n            return ret\n        return wrapper\n    return decorator", "docstring": "Record args and return value for a function call.\n\nThe trace is of the form\nfunction name: {\nfunction name: arg = value\nfunction name: arg = value\n...\nfunction name: -> return\nfunction name: }\n\nThis will let us write tools to pretty print the traces with indentation etc.\n\nArgs:\nname: module name, usually `__name__`\n*trace_args: function arguments to log\n\nReturns:\na decorator", "source": "github-repos"}
{"code": "class SquadResult:\n\n    def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):\n        self.start_logits = start_logits\n        self.end_logits = end_logits\n        self.unique_id = unique_id\n        if start_top_index:\n            self.start_top_index = start_top_index\n            self.end_top_index = end_top_index\n            self.cls_logits = cls_logits", "docstring": "Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.\n\nArgs:\nunique_id: The unique identifier corresponding to that example.\nstart_logits: The logits corresponding to the start of the answer\nend_logits: The logits corresponding to the end of the answer", "source": "github-repos"}
{"code": "def dump(self, output, close_after_write=True):\n    try:\n        output.write\n        self.stream = output\n    except AttributeError:\n        self.stream = io.open(output, 'w', encoding='utf-8')\n    try:\n        self.write_table()\n    finally:\n        if close_after_write:\n            self.stream.close()\n            self.stream = sys.stdout", "docstring": "Write data to the output with tabular format.\n\nArgs:\noutput (file descriptor or str):\nfile descriptor or path to the output file.\nclose_after_write (bool, optional):\nClose the output after write.\nDefaults to |True|.", "source": "codesearchnet"}
{"code": "def search(self, patterns, start=30, limit=1000, include_category=False):\n    api_name = 'opendns-patterns'\n    fmt_url_path = u'search/{0}'\n    start = '-{0}days'.format(start)\n    include_category = str(include_category).lower()\n    query_params = {'start': start, 'limit': limit, 'includecategory': include_category}\n    return self._multi_get(api_name, fmt_url_path, patterns, query_params)", "docstring": "Performs pattern searches against the Investigate database.\n\nArgs:\npatterns: An enumerable of RegEx domain patterns to search for\nstart:   How far back results extend from in days (max is 30)\nlimit:   Number of results to show (max is 1000)\ninclude_category: Include OpenDNS security categories\nReturns:\nAn enumerable of matching domain strings", "source": "codesearchnet"}
{"code": "def IsSocket(self):\n    if (self._stat_object is None):\n        self._stat_object = self._GetStat()\n    if (self._stat_object is not None):\n        self.entry_type = self._stat_object.type\n    return (self.entry_type == definitions.FILE_ENTRY_TYPE_SOCKET)", "docstring": "Determines if the file entry is a socket.\n\nReturns:\nbool: True if the file entry is a socket.", "source": "codesearchnet"}
{"code": "def Trim(lst, limit):\n  \n  limit = max(0, limit)\n\n  clipping = lst[limit:]\n  del lst[limit:]\n  return clipping", "docstring": "Trims a given list so that it is not longer than given limit.\n\nArgs:\nlst: A list to trim.\nlimit: A maximum number of elements in the list after trimming.\n\nReturns:\nA suffix of the input list that was trimmed.", "source": "juraj-google-style"}
{"code": "def process_command(self, command):\n        \n        result = ScubaContext()\n        result.script = None\n        result.image = self.image\n        result.entrypoint = self.entrypoint\n        result.environment = self.environment.copy()\n\n        if command:\n            alias = self.aliases.get(command[0])\n            if not alias:\n                \n                result.script = [shell_quote_cmd(command)]\n            else:\n                \n                \n                if alias.image:\n                    result.image = alias.image\n                if alias.entrypoint is not None:\n                    result.entrypoint = alias.entrypoint\n\n                \n                if alias.environment:\n                    result.environment.update(alias.environment)\n\n                if len(alias.script) > 1:\n                    \n                    \n                    if len(command) > 1:\n                        raise ConfigError('Additional arguments not allowed with multi-line aliases')\n                    result.script = alias.script\n\n                else:\n                    \n                    \n                    command.pop(0)\n                    result.script = [alias.script[0] + ' ' + shell_quote_cmd(command)]\n\n            result.script = flatten_list(result.script)\n\n        return result", "docstring": "Processes a user command using aliases\n\nArguments:\ncommand     A user command list (e.g. argv)\n\nReturns: A ScubaContext object with the following attributes:\nscript: a list of command line strings\nimage: the docker image name to use", "source": "juraj-google-style"}
{"code": "def _from_c_op(cls: type[OperationType], c_op, g) -> OperationType:\n    self = Operation(c_op, SymbolicTensor)\n    self._init(g)\n    return self", "docstring": "Create an Operation from a TF_Operation.\n\nFor internal use only: This is useful for creating Operation for ops\nindirectly created by C API methods, e.g. the ops created by\nTF_ImportGraphDef.\n\nArgs:\nc_op: a TF_Operation.\ng: A Graph.\n\nReturns:\nan Operation object.", "source": "github-repos"}
{"code": "def execute(cmd, shell=False, poll_period=1.0, catch_out=False):\n    \n    \n    log = logging.getLogger(__name__)\n    log.debug(\"Starting: %s\", cmd)\n\n    stdout = \"\"\n    stderr = \"\"\n\n    if not shell and isinstance(cmd, string_types):\n        cmd = shlex.split(cmd)\n\n    if catch_out:\n        process = subprocess.Popen(\n            cmd,\n            shell=shell,\n            stderr=subprocess.PIPE,\n            stdout=subprocess.PIPE,\n            close_fds=True)\n    else:\n        process = subprocess.Popen(cmd, shell=shell, close_fds=True)\n\n    stdout, stderr = process.communicate()\n    if stderr:\n        log.error(\"There were errors:\\n%s\", stderr)\n\n    if stdout:\n        log.debug(\"Process output:\\n%s\", stdout)\n    returncode = process.returncode\n    log.debug(\"Process exit code: %s\", returncode)\n    return returncode, stdout, stderr", "docstring": "Execute UNIX command and wait for its completion\n\nArgs:\ncmd (str or list): command to execute\nshell (bool): invoke inside shell environment\ncatch_out (bool): collect process' output\n\nReturns:\nreturncode (int): process return code\nstdout (str): collected process stdout (only if catch_out set to true)\nstderr (str): collected process stderr (only if catch_out set to true)", "source": "juraj-google-style"}
{"code": "def freeze_parameter(self, name):\n        \n        i = self.get_parameter_names(include_frozen=True).index(name)\n        self.unfrozen_mask[i] = False", "docstring": "Freeze a parameter by name\n\nArgs:\nname: The name of the parameter", "source": "juraj-google-style"}
{"code": "def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False):\n    assert not (input_ids is None and inputs_embeds is None)\n    if input_ids is not None:\n        check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n        inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n    input_shape = shape_list(inputs_embeds)[:-1]\n    if position_ids is None:\n        position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)\n    position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)\n    final_embeddings = inputs_embeds + position_embeds\n    final_embeddings = self.LayerNorm(inputs=final_embeddings)\n    final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n    return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\nfinal_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github-repos"}
{"code": "def get_dict_with_chain(self, chain, only_keys=None, chain_keys=None, exclude_attributes=None, df_format=False):\n        \n\n        \n        if not only_keys:\n            keys = list(self.__dict__.keys())\n        else:\n            keys = ssbio.utils.force_list(only_keys)\n\n        \n        if exclude_attributes:\n            exclude_attributes = ssbio.utils.force_list(exclude_attributes)\n            for x in exclude_attributes:\n                if x in keys:\n                    keys.remove(x)\n        else:\n            exclude_attributes = []\n\n        exclude_attributes.extend(['mapped_chains', 'chains'])\n\n        final_dict = {k: v for k, v in Object.get_dict(self, only_attributes=keys, exclude_attributes=exclude_attributes,\n                                                       df_format=df_format).items()}\n\n        chain_prop = self.chains.get_by_id(chain)\n        \n        if not chain_keys:\n            chain_keys = [x for x in chain_prop.get_dict().keys() if x not in final_dict]\n\n        chain_dict = chain_prop.get_dict(only_attributes=chain_keys, df_format=df_format)\n        final_dict.update(chain_dict)\n\n        return final_dict", "docstring": "get_dict method which incorporates attributes found in a specific chain. Does not overwrite any attributes\nin the original StructProp.\n\nArgs:\nchain:\nonly_keys:\nchain_keys:\nexclude_attributes:\ndf_format:\n\nReturns:\ndict: attributes of StructProp + the chain specified", "source": "juraj-google-style"}
{"code": "def walk(self, action, user_data=None):\n        \n        action(self.index_file, self.__root, 0, user_data)\n        self.__do_walk(self.__root, 1, action, user_data)", "docstring": "Walk the hierarchy, applying action to each filename.\n\nArgs:\naction: callable, the callable to invoke for each filename,\nwill be invoked with the filename, the subfiles, and\nthe level in the sitemap.", "source": "juraj-google-style"}
{"code": "def record_queue_metrics(self, active_requests: int, waiting_requests: int) -> None:\n    if not _has_opentelemetry:\n        return\n    try:\n        self.active_requests_gauge.set(active_requests)\n        self.waiting_requests_gauge.set(waiting_requests)\n        logger.debug(f'Queue metrics: {active_requests} active requests, {waiting_requests} waiting requests')\n    except Exception as e:\n        logger.warning(f'Failed to record queue metrics: {e}')", "docstring": "Record metrics about active and waiting requests.\n\nArgs:\nactive_requests: Number of active requests\nwaiting_requests: Number of waiting requests", "source": "github-repos"}
{"code": "def update_case(case_obj, existing_case):\n    \n    variant_nrs = ['nr_variants', 'nr_sv_variants']\n    individuals = [('individuals','_inds'), ('sv_individuals','_sv_inds')]\n    \n    updated_case = deepcopy(existing_case)\n    \n    for i,file_name in enumerate(['vcf_path','vcf_sv_path']):\n        variant_type = 'snv'\n        if file_name == 'vcf_sv_path':\n            variant_type = 'sv'\n        if case_obj.get(file_name):\n            if updated_case.get(file_name):\n                LOG.warning(\"VCF of type %s already exists in case\", variant_type)\n                raise CaseError(\"Can not replace VCF in existing case\")\n            else:\n                updated_case[file_name] = case_obj[file_name]\n                updated_case[variant_nrs[i]] = case_obj[variant_nrs[i]]\n                updated_case[individuals[i][0]] = case_obj[individuals[i][0]]\n                updated_case[individuals[i][1]] = case_obj[individuals[i][1]]\n\n    return updated_case", "docstring": "Update an existing case\n\nThis will add paths to VCF files, individuals etc\n\nArgs:\ncase_obj(models.Case)\nexisting_case(models.Case)\n\nReturns:\nupdated_case(models.Case): Updated existing case", "source": "juraj-google-style"}
{"code": "def helper(*commands):\n\n    def decorated_func(f):\n        f.__help_targets__ = list(commands)\n        return f\n    return decorated_func", "docstring": "Decorate a function to be the helper function of commands.\n\nArguments:\ncommands: Names of command that should trigger this function object.\n\n---------------------------\nInterface of helper methods:\n\n@helper('some-command')\ndef help_foo(self, args):\n'''\nArguments:\nargs: A list of arguments.\n\nReturns:\nA string that is the help message.\n'''\npass", "source": "codesearchnet"}
{"code": "def add_error(self, error):\n    self._count += 1\n    self._record.add_error(('expect@%s+%s' % (time.time(), self._count)), error)", "docstring": "Record an error from expect APIs.\n\nThis method generates a position stamp for the expect. The stamp is\ncomposed of a timestamp and the number of errors recorded so far.\n\nArgs:\nerror: Exception or signals.ExceptionRecord, the error to add.", "source": "codesearchnet"}
{"code": "def _parse_flowcontrol_receive(self, config):\n    value = 'off'\n    match = re.search('flowcontrol receive (\\\\w+)$', config, re.M)\n    if match:\n        value = match.group(1)\n    return dict(flowcontrol_receive=value)", "docstring": "Scans the config block and returns the flowcontrol receive value\n\nArgs:\nconfig (str): The interface config block to scan\n\nReturns:\ndict: Returns a dict object with the flowcontrol receive value\nretrieved from the config block.  The returned dict object\nis intended to be merged into the interface resource dict", "source": "codesearchnet"}
{"code": "def install_json_params(self, ij=None):\n    if ((self._install_json_params is None) or (ij is not None)):\n        self._install_json_params = {}\n        if (ij is None):\n            ij = self.install_json\n        for p in (ij.get('params') or []):\n            self._install_json_params.setdefault(p.get('name'), p)\n    return self._install_json_params", "docstring": "Return install.json params in a dict with name param as key.\n\nArgs:\nij (dict, optional): Defaults to None. The install.json contents.\n\nReturns:\ndict: A dictionary containing the install.json input params with name as key.", "source": "codesearchnet"}
{"code": "def trk50(msg):\n    d = hex2bin(data(msg))\n    if (d[11] == '0'):\n        return None\n    sign = int(d[12])\n    value = bin2int(d[13:23])\n    if sign:\n        value = (value - 1024)\n    trk = ((value * 90.0) / 512.0)\n    if (trk < 0):\n        trk = (360 + trk)\n    return round(trk, 3)", "docstring": "True track angle, BDS 5,0 message\n\nArgs:\nmsg (String): 28 bytes hexadecimal message (BDS50) string\n\nReturns:\nfloat: angle in degrees to true north (from 0 to 360)", "source": "codesearchnet"}
{"code": "def add_item(name, command, system_wide=False):\n\n\t\n\n\tdesktop_env = system.get_name()\n\n\tif os.path.isfile(command):\n\t\tcommand_is_file = True\n\n\t\tif not desktop_env == 'windows':\n\t\t\t\n\t\t\tsp.Popen(['chmod +x %s' % command], shell=True)\n\n\tif desktop_env == 'windows':\n\t\timport winreg\n\t\tif system_wide:\n\t\t\tstartup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\\\Windows\\\\Start Menu\\\\Programs\\\\Startup')\n\n\t\telse:\n\t\t\tstartup_dir = os.path.join(get_config_dir()[0], 'Roaming\\\\Microsoft\\\\Windows\\\\Start Menu\\\\Programs\\\\Startup')\n\n\t\tif not command_is_file:\n\t\t\twith open(os.path.join(startup_dir, name + '.bat'), 'w') as f:\n\t\t\t\tf.write(command)\n\t\telse:\n\t\t\tshutil.copy(command, startup_dir)\n\n\telif desktop_env == 'mac':\n\t\tsp.Popen(['launchctl submit -l %s -- %s'] % (name, command), shell=True)\n\t\t\n\t\t\n\n\telse:\n\t\t\n\n\t\tif desktop_env == 'unknown':\n\t\t\t\n\t\t\tif system_wide:\n\t\t\t\tlogin_file = '/etc/profile'\n\t\t\telse:\n\t\t\t\tlogin_file = os.path.expanduser('~/.profile')\n\n\t\t\twith open(login_file, 'a') as f:\n\t\t\t\tf.write(command)\n\n\t\telse:\n\t\t\ttry:\n\t\t\t\tdesktop_file_name = name + '.desktop'\n\n\t\t\t\tstartup_file = os.path.join(get_config_dir('autostart', system_wide=system_wide)[0], desktop_file_name)\n\n\t\t\t\t\n\t\t\t\tdesktop_str = desktopfile.construct(name=name, exec_=command, additional_opts={'X-GNOME-Autostart-enabled': 'true'})\n\n\t\t\t\twith open(startup_file, 'w') as f:\n\t\t\t\t\tf.write(desktop_str)\n\t\t\texcept:\n\t\t\t\tpass", "docstring": "Adds a program to startup.\n\nAdds a program to user startup.\n\nArgs:\nname        (str) : The name of the startup entry.\ncommand     (str) : The command to run.\nsystem_wide (bool): Add to system-wide startup.\n\nNote:\n``system_wide`` requires superuser/admin privileges.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n                 moments: Iterable[ops.Moment] = (),\n                 device: devices.Device = devices.UnconstrainedDevice) -> None:\n        \n        self._moments = list(moments)\n        self._device = device\n        self._device.validate_circuit(self)", "docstring": "Initializes a circuit.\n\nArgs:\nmoments: The initial list of moments defining the circuit.\ndevice: Hardware that the circuit should be able to run on.", "source": "juraj-google-style"}
{"code": "def _normalize_edge(self, edge: EDGE) -> EDGE:\n\n    def lower(n: GridQubit, m: GridQubit) -> bool:\n        return ((n.row < m.row) or ((n.row == m.row) and (n.col < m.col)))\n    (n1, n2) = edge\n    return ((n1, n2) if lower(n1, n2) else (n2, n1))", "docstring": "Gives unique representative of the edge.\n\nTwo edges are equivalent if they form an edge between the same nodes.\nThis method returns representative of this edge which can be compared\nusing equality operator later.\n\nArgs:\nedge: Edge to normalize.\n\nReturns:\nNormalized edge with lexicographically lower node on the first\nposition.", "source": "codesearchnet"}
{"code": "def _verify_parsed_token(parsed_token, issuers, audiences, allowed_client_ids, is_legacy_google_auth=True):\n    if (parsed_token.get('iss') not in issuers):\n        _logger.warning('Issuer was not valid: %s', parsed_token.get('iss'))\n        return False\n    aud = parsed_token.get('aud')\n    if (not aud):\n        _logger.warning('No aud field in token')\n        return False\n    cid = parsed_token.get('azp')\n    audience_allowed = ((aud in audiences) or (is_legacy_google_auth and (aud == cid)))\n    if (not audience_allowed):\n        _logger.warning('Audience not allowed: %s', aud)\n        return False\n    if is_legacy_google_auth:\n        if (list(allowed_client_ids) == SKIP_CLIENT_ID_CHECK):\n            _logger.warning(\"Client ID check can't be skipped for ID tokens.  Id_token cannot be verified.\")\n            return False\n        elif ((not cid) or (cid not in allowed_client_ids)):\n            _logger.warning('Client ID is not allowed: %s', cid)\n            return False\n    if ('email' not in parsed_token):\n        return False\n    return True", "docstring": "Verify a parsed user ID token.\n\nArgs:\nparsed_token: The parsed token information.\nissuers: A list of allowed issuers\naudiences: The allowed audiences.\nallowed_client_ids: The allowed client IDs.\n\nReturns:\nTrue if the token is verified, False otherwise.", "source": "codesearchnet"}
{"code": "def get_path(self, url):\n    cache_path = self._url_to_path(url)\n    if os.path.exists(cache_path):\n        return cache_path\n    return None", "docstring": "Returns the path of a cached resource.\n\nArgs:\nurl: The url of the resource\n\nReturns:\nThe path to the cached resource or None if not in the cache", "source": "codesearchnet"}
{"code": "def check(self, locator=None, allow_label_click=None, **kwargs):\n        \n\n        self._check_with_label(\n            \"checkbox\", True, locator=locator, allow_label_click=allow_label_click, **kwargs)", "docstring": "Find a check box and mark it as checked. The check box can be found via name, id, or label\ntext. ::\n\npage.check(\"German\")\n\nArgs:\nlocator (str, optional): Which check box to check.\nallow_label_click (bool, optional): Attempt to click the label to toggle state if\nelement is non-visible. Defaults to :data:`capybara.automatic_label_click`.\n**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.", "source": "juraj-google-style"}
{"code": "def load(self, filename, offset):\n        \n        self.offset = offset\n        self.filename = filename\n\n        self.bootsector = BootSector(\n            filename=filename,\n            length=NTFS_BOOTSECTOR_SIZE,\n            offset=self.offset)\n\n        self.mft_table = MftTable(\n            mft_entry_size=self.bootsector.mft_record_size,\n            filename=self.filename,\n            offset=self.mft_table_offset\n        )\n\n        self.mft_table.preload_entries(NUM_SYSTEM_ENTRIES)\n\n        self._load_volume_information()", "docstring": "Loads NTFS volume information\n\nArgs:\nfilename (str): Path to file/device to read the volume \\\ninformation from.\noffset (uint): Valid NTFS partition offset from the beginning \\\nof the file/device.\n\nRaises:\nIOError: If source file/device does not exist or is not readable", "source": "juraj-google-style"}
{"code": "def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(Boolean, self).write(ostream, kmip_version=kmip_version)\n    self.write_value(ostream, kmip_version=kmip_version)", "docstring": "Write the encoding of the Boolean object to the output stream.\n\nArgs:\nostream (Stream): A buffer to contain the encoded bytes of a\nBoolean object. Usually a BytearrayStream object. Required.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def update(self, friendly_name=None, description=None, query=None):\n    \n    self._table._load_info()\n    if query is not None:\n      if isinstance(query, _query.Query):\n        query = query.sql\n      self._table._info['view'] = {'query': query}\n    self._table.update(friendly_name=friendly_name, description=description)", "docstring": "Selectively updates View information.\n\nAny parameters that are None (the default) are not applied in the update.\n\nArgs:\nfriendly_name: if not None, the new friendly name.\ndescription: if not None, the new description.\nquery: if not None, a new query string for the View.", "source": "juraj-google-style"}
{"code": "def remove_node_by_value(self, value):\n    self.node_list = [node for node in self.node_list if (node.value != value)]\n    for node in self.node_list:\n        node.link_list = [link for link in node.link_list if (link.target.value != value)]", "docstring": "Delete all nodes in ``self.node_list`` with the value ``value``.\n\nArgs:\nvalue (Any): The value to find and delete owners of.\n\nReturns: None\n\nExample:\n>>> from blur.markov.node import Node\n>>> node_1 = Node('One')\n>>> graph = Graph([node_1])\n>>> graph.remove_node_by_value('One')\n>>> len(graph.node_list)\n0", "source": "codesearchnet"}
{"code": "def exponential_moving_average(self, var, avg_var=None, decay=0.999, ignore_nan=False):\n    with self._g.as_default():\n        if ((decay < 0) or (decay >= 1.0)):\n            raise ValueError(('Decay is %5.2f, but has to be in [0, 1).' % decay))\n        if (avg_var is None):\n            avg_name = ('%s_average' % _bare_var_name(var))\n            with tf.control_dependencies(None):\n                with tf.name_scope((avg_name + '/Initializer/')):\n                    if isinstance(var, tf.Variable):\n                        init_val = var.initialized_value()\n                    elif var.get_shape().is_fully_defined():\n                        init_val = tf.constant(0, shape=var.get_shape(), dtype=var.dtype.base_dtype)\n                    else:\n                        init_val = tf.constant(0, dtype=var.dtype.base_dtype)\n                avg_var = tf.Variable(init_val, name=avg_name, trainable=False)\n        num_updates = tf.cast(self.global_step, tf.float32)\n        decay = tf.minimum(decay, tf.maximum(0.9, ((1.0 + num_updates) / (10.0 + num_updates))))\n        with tf.device(avg_var.device):\n            if ignore_nan:\n                var = tf.where(tf.is_finite(var), var, avg_var)\n            if var.get_shape().is_fully_defined():\n                avg_update = tf.assign_sub(avg_var, ((1 - decay) * (avg_var - var)))\n            else:\n                avg_update = tf.assign(avg_var, (avg_var - ((1 - decay) * (avg_var - var))), validate_shape=False)\n        self._g.add_to_collection(GraphKeys.UPDATE_OPS, avg_update)\n        return avg_update", "docstring": "Calculates the exponential moving average.\n\nTODO(): check if this implementation of moving average can now\nbe replaced by tensorflows implementation.\n\nAdds a variable to keep track of the exponential moving average and adds an\nupdate operation to the bookkeeper. The name of the variable is\n'%s_average' % name prefixed with the current variable scope.\n\nArgs:\nvar: The variable for which a moving average should be computed.\navg_var: The variable to set the average into, if None create a zero\ninitialized one.\ndecay: How much history to use in the moving average.\nHigher, means more history values [0, 1) accepted.\nignore_nan: If the value is NaN or Inf, skip it.\nReturns:\nThe averaged variable.\nRaises:\nValueError: if decay is not in [0, 1).", "source": "codesearchnet"}
{"code": "def __init__(self, output_mediator):\n    \n    super(LinearOutputModule, self).__init__(output_mediator)\n    self._output_writer = None", "docstring": "Initializes a linear output module.\n\nArgs:\noutput_mediator (OutputMediator): mediates interactions between output\nmodules and other components, such as storage and dfvfs.\n\nRaises:\nValueError: if the output writer is missing.", "source": "juraj-google-style"}
{"code": "def housekeeping(self, **kwargs):\n        \n        path = '/projects/%s/housekeeping' % self.get_id()\n        self.manager.gitlab.http_post(path, **kwargs)", "docstring": "Start the housekeeping task.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabHousekeepingError: If the server failed to perform the\nrequest", "source": "juraj-google-style"}
{"code": "def save_forensic_reports_to_splunk(self, forensic_reports):\n    logger.debug('Saving forensic reports to Splunk')\n    if (type(forensic_reports) == dict):\n        forensic_reports = [forensic_reports]\n    if (len(forensic_reports) < 1):\n        return\n    json_str = ''\n    for report in forensic_reports:\n        data = self._common_data.copy()\n        data['sourcetype'] = 'dmarc:forensic'\n        timestamp = human_timestamp_to_timestamp(report['arrival_date_utc'])\n        data['time'] = timestamp\n        data['event'] = report.copy()\n        json_str += '{0}\\n'.format(json.dumps(data))\n    if (not self.session.verify):\n        logger.debug('Skipping certificate verification for Splunk HEC')\n    try:\n        response = self.session.post(self.url, data=json_str, timeout=self.timeout)\n        response = response.json()\n    except Exception as e:\n        raise SplunkError(e.__str__())\n    if (response['code'] != 0):\n        raise SplunkError(response['text'])", "docstring": "Saves forensic DMARC reports to Splunk\n\nArgs:\nforensic_reports (list):  A list of forensic report dictionaries\nto save in Splunk", "source": "codesearchnet"}
{"code": "def main(args=None):\n    \n    if args is None:\n        args = sys.argv[1:]\n\n    parser = create_parser()\n    args = parser.parse_args(args)\n\n    if args.verbose >= 2:\n        level = logging.DEBUG\n    elif args.verbose >= 1:\n        level = logging.INFO\n    else:\n        level = logging.WARNING\n\n    logging.basicConfig(level=level)\n\n    try:\n        args.command(args)\n    except pylink.JLinkException as e:\n        sys.stderr.write('Error: %s%s' % (str(e), os.linesep))\n        return 1\n\n    return 0", "docstring": "Main command-line interface entrypoint.\n\nRuns the given subcommand or argument that were specified.  If not given a\n``args`` parameter, assumes the arguments are passed on the command-line.\n\nArgs:\nargs (list): list of command-line arguments\n\nReturns:\nZero on success, non-zero otherwise.", "source": "juraj-google-style"}
{"code": "def increase_volume(percentage):\n\t\n\n\tif percentage > 100 or percentage < 0:\n\t\traise ValueError('percentage must be an integer between 0 and 100')\n\n\tif system.get_name() == 'windows':\n\t\t\n\t\t\n\t\tpass\n\n\telif system.get_name() == 'mac':\n\t\tvolume_int = percentage / 10\n\t\told_volume = get()\n\n\t\tnew_volume = old_volume + volume_int\n\n\t\tif new_volume > 10:\n\t\t\tnew_volume = 10\n\n\t\tset_volume(new_volume * 10)\n\n\telse:\n\t\t\n\t\tformatted = '%d%%+' % percentage\n\t\t\n\n\t\tsp.Popen(['amixer', '--quiet', 'sset', 'Master', formatted]).wait()", "docstring": "Increase the volume.\n\nIncrease the volume by a given percentage.\n\nArgs:\npercentage (int): The percentage (as an integer between 0 and 100) to increase the volume by.\n\nRaises:\nValueError: if the percentage is >100 or <0.", "source": "juraj-google-style"}
{"code": "class PromptDepthAnythingPreActResidualLayer(nn.Module):\n\n    def __init__(self, config):\n        super().__init__()\n        self.activation1 = nn.ReLU()\n        self.convolution1 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=True)\n        self.activation2 = nn.ReLU()\n        self.convolution2 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=True)\n\n    def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:\n        residual = hidden_state\n        hidden_state = self.activation1(hidden_state)\n        hidden_state = self.convolution1(hidden_state)\n        hidden_state = self.activation2(hidden_state)\n        hidden_state = self.convolution2(hidden_state)\n        return hidden_state + residual", "docstring": "ResidualConvUnit, pre-activate residual unit.\n\nArgs:\nconfig (`[PromptDepthAnythingConfig]`):\nModel configuration class defining the model architecture.", "source": "github-repos"}
{"code": "def file_name(self, file_name):\n    if (not self.can_update()):\n        self._tcex.handle_error(910, [self.type])\n    self._data['fileName'] = file_name\n    request = {'fileName': file_name}\n    return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)", "docstring": "Updates the file_name.\n\nArgs:\nfile_name:", "source": "codesearchnet"}
{"code": "def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):\n    raise NotImplementedError(f'{self.__class__} is an abstract class. Only classes inheriting this class can call `update_candidate_strategy`.')", "docstring": "Updates the candidate generation strategy based on the outcomes.\n\nArgs:\ninput_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\nIndices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)\nscores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):\nPrediction scores of a language modeling head. These can be logits for each vocabulary when not using\nbeam search or log softmax for each vocabulary token when using beam search\nnum_matches (`int`):\nThe number of matches between the candidate sequences and the model predictions.", "source": "github-repos"}
{"code": "def create_temp_creds(client_id, access_token, start=None, expires=None, scopes=None, name=None):\n    now = arrow.utcnow().replace(minutes=(- 10))\n    start = (start or now.datetime)\n    expires = (expires or now.replace(days=31).datetime)\n    scopes = (scopes or ['assume:project:taskcluster:worker-test-scopes'])\n    creds = createTemporaryCredentials(client_id, access_token, start, expires, scopes, name=name)\n    for (key, value) in creds.items():\n        try:\n            creds[key] = value.decode('utf-8')\n        except (AttributeError, UnicodeDecodeError):\n            pass\n    return creds", "docstring": "Request temp TC creds with our permanent creds.\n\nArgs:\nclient_id (str): the taskcluster client_id to use\naccess_token (str): the taskcluster access_token to use\nstart (str, optional): the datetime string when the credentials will\nstart to be valid.  Defaults to 10 minutes ago, for clock skew.\nexpires (str, optional): the datetime string when the credentials will\nexpire.  Defaults to 31 days after 10 minutes ago.\nscopes (list, optional): The list of scopes to request for the temp\ncreds.  Defaults to ['assume:project:taskcluster:worker-test-scopes', ]\nname (str, optional): the name to associate with the creds.\n\nReturns:\ndict: the temporary taskcluster credentials.", "source": "codesearchnet"}
{"code": "def symmetrically_add_atom(self, specie, point, coords_are_cartesian=False):\n    point2 = self.get_symmetric_site(point, cartesian=coords_are_cartesian)\n    self.append(specie, point, coords_are_cartesian=coords_are_cartesian)\n    self.append(specie, point2, coords_are_cartesian=coords_are_cartesian)", "docstring": "Class method for adding a site at a specified point in a slab.\nWill add the corresponding site on the other side of the\nslab to maintain equivalent surfaces.\n\nArg:\nspecie (str): The specie to add\npoint (coords): The coordinate of the site in the slab to add.\ncoords_are_cartesian (bool): Is the point in cartesian coordinates\n\nReturns:\n(Slab): The modified slab", "source": "codesearchnet"}
{"code": "def ConsumeIdentifier(self):\n    result = self.token\n    if (not self._IDENTIFIER.match(result)):\n        raise self._ParseError('Expected identifier.')\n    self.NextToken()\n    return result", "docstring": "Consumes protocol message field identifier.\n\nReturns:\nIdentifier string.\n\nRaises:\nParseError: If an identifier couldn't be consumed.", "source": "codesearchnet"}
{"code": "def show_help(bokehjs_action):\n    print()\n    if (bokehjs_action in ['built', 'installed']):\n        print(\"Bokeh-specific options available with 'install' or 'develop':\")\n        print()\n        print('  --build-js          build and install a fresh BokehJS')\n        print('  --install-js        install only last previously built BokehJS')\n    else:\n        print(\"Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'\")\n        print()\n        print('No extra Bokeh-specific options are available.')\n    print()", "docstring": "Print information about extra Bokeh-specific command line options.\n\nArgs:\nbokehjs_action (str) : one of 'built', 'installed', or 'packaged'\nhow (or if) BokehJS was installed into the python source tree\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def add_role(user, roles):\n        \n        def _add_role(role):\n            user_role = UserRole()\n            user_role.user_id = user.user_id\n            user_role.role_id = role.role_id\n            db.session.add(user_role)\n            db.session.commit()\n        [_add_role(role) for role in roles]", "docstring": "Map roles for user in database\n\nArgs:\nuser (User): User to add roles to\nroles ([Role]): List of roles to add\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def cross_entropy_loss(logits, one_hot_labels, label_smoothing=0, weight=1.0, scope=None):\n    logits.get_shape().assert_is_compatible_with(one_hot_labels.get_shape())\n    with tf.name_scope(scope, 'CrossEntropyLoss', [logits, one_hot_labels]):\n        num_classes = one_hot_labels.get_shape()[(- 1)].value\n        one_hot_labels = tf.cast(one_hot_labels, logits.dtype)\n        if (label_smoothing > 0):\n            smooth_positives = (1.0 - label_smoothing)\n            smooth_negatives = (label_smoothing / num_classes)\n            one_hot_labels = ((one_hot_labels * smooth_positives) + smooth_negatives)\n        cross_entropy = tf.contrib.nn.deprecated_flipped_softmax_cross_entropy_with_logits(logits, one_hot_labels, name='xentropy')\n        weight = tf.convert_to_tensor(weight, dtype=logits.dtype.base_dtype, name='loss_weight')\n        loss = tf.multiply(weight, tf.reduce_mean(cross_entropy), name='value')\n        tf.add_to_collection(LOSSES_COLLECTION, loss)\n        return loss", "docstring": "Define a Cross Entropy loss using softmax_cross_entropy_with_logits.\n\nIt can scale the loss by weight factor, and smooth the labels.\n\nArgs:\nlogits: [batch_size, num_classes] logits outputs of the network .\none_hot_labels: [batch_size, num_classes] target one_hot_encoded labels.\nlabel_smoothing: if greater than 0 then smooth the labels.\nweight: scale the loss by this factor.\nscope: Optional scope for name_scope.\n\nReturns:\nA tensor with the softmax_cross_entropy loss.", "source": "codesearchnet"}
{"code": "def get_value(self):\n    try:\n        value = self.raw_value\n    except (AttributeError, KeyError) as err:\n        self._reraise_if_required(err)\n        default_value = self.default_value\n        if self.transform_default:\n            return self.transform(default_value)\n        return default_value\n    else:\n        return self.transform(value)", "docstring": "Return the transformed raw or default value.\n\nIf the variable is missing from the project settings, and the setting\nis required, re-raise an AttributeError. If it is not required,\nreturn the (optionally transformed) default value.\n\nReturns:\nobject: the transformed raw value.", "source": "codesearchnet"}
{"code": "def _valid_deleted_file(path):\n    \n    ret = False\n    if path.endswith(' (deleted)'):\n        ret = True\n    if re.compile(r\"\\(path inode=[0-9]+\\)$\").search(path):\n        ret = True\n\n    regex = re.compile(\"|\".join(LIST_DIRS))\n    if regex.match(path):\n        ret = False\n    return ret", "docstring": "Filters file path against unwanted directories and decides whether file is marked as deleted.\n\nReturns:\nTrue if file is desired deleted file, else False.\n\nArgs:\npath: A string - path to file", "source": "juraj-google-style"}
{"code": "def post_error(self, name, message):\n    self.post_command(OPERATIONS.CMD_POST_MESSAGE, _create_message(name, states.ERROR_LEVEL, message))", "docstring": "Asynchronously post a user facing error message about a service.\n\nArgs:\nname (string): The name of the service\nmessage (string): The user facing error message that will be stored\nfor the service and can be queried later.", "source": "codesearchnet"}
{"code": "def __init__(self, pipeline_proto, pipeline_analyzer, cache_manager, pipeline_graph_renderer):\n    self._analyzer = pipeline_analyzer\n    self._cache_manager = cache_manager\n    self._pipeline_graph = interactive_pipeline_graph.InteractivePipelineGraph(pipeline_proto, required_transforms=self._analyzer.tl_required_trans_ids(), referenced_pcollections=self._analyzer.tl_referenced_pcoll_ids(), cached_pcollections=self._analyzer.caches_used())\n    self._renderer = pipeline_graph_renderer\n    self._text_to_print = collections.OrderedDict()\n    self._text_to_print['summary'] = 'Using %s cached PCollections\\nExecuting %s of %s transforms.' % (len(self._analyzer.caches_used()), len(self._analyzer.tl_required_trans_ids()) - len(self._analyzer.read_cache_ids()) - len(self._analyzer.write_cache_ids()), len(pipeline_proto.components.transforms[pipeline_proto.root_transform_ids[0]].subtransforms))\n    self._text_to_print.update({pcoll_id: '' for pcoll_id in self._analyzer.tl_referenced_pcoll_ids()})\n    self._pcollection_stats = {}\n    for pcoll_id in self._analyzer.tl_referenced_pcoll_ids():\n        self._pcollection_stats[pcoll_id] = {'cache_label': self._analyzer.pipeline_info().cache_label(pcoll_id), 'version': -1, 'sample': []}\n    self._producers = {}\n    for _, transform in pipeline_proto.components.transforms.items():\n        for pcoll_id in transform.outputs.values():\n            if pcoll_id not in self._producers or '/' not in transform.unique_name:\n                self._producers[pcoll_id] = transform.unique_name\n    self._lock = threading.Lock()\n    self._periodic_update = False", "docstring": "Constructor of DisplayManager.\n\nArgs:\npipeline_proto: (Pipeline proto)\npipeline_analyzer: (PipelineAnalyzer) the pipeline analyzer that\ncorresponds to this round of execution. This will provide more\ndetailed informations about the pipeline\ncache_manager: (interactive_runner.CacheManager) DisplayManager fetches\nthe latest status of pipeline execution by querying cache_manager.\npipeline_graph_renderer: (pipeline_graph_renderer.PipelineGraphRenderer)\ndecides how a pipeline graph is rendered.", "source": "github-repos"}
{"code": "def filter_by_hoys(self, hoys):\n        \n        existing_hoys = self.header.analysis_period.hoys\n        hoys = [h for h in hoys if h in existing_hoys]\n        _moys = tuple(int(hour * 60) for hour in hoys)\n        return self.filter_by_moys(_moys)", "docstring": "Filter the Data Collection based onva list of hoys.\n\nArgs:\nhoys: A List of hours of the year 0..8759\n\nReturn:\nA new Data Collection with filtered data", "source": "juraj-google-style"}
{"code": "def Insert(self, request, global_params=None):\n    config = self.GetMethodConfig('Insert')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Creates a new empty dataset.\n\nArgs:\nrequest: (BigqueryDatasetsInsertRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Dataset) The response message.", "source": "github-repos"}
{"code": "def groups_createChild(self, *, channel: str, **kwargs) -> SlackResponse:\n    self._validate_xoxp_token()\n    kwargs.update({'channel': channel})\n    return self.api_call('groups.createChild', http_verb='GET', params=kwargs)", "docstring": "Clones and archives a private channel.\n\nArgs:\nchannel (str): The group id. e.g. 'G1234567890'", "source": "codesearchnet"}
{"code": "def CopyFromStringTuple(self, time_elements_tuple):\n    \n    if len(time_elements_tuple) < 7:\n      raise ValueError((\n          'Invalid time elements tuple at least 7 elements required,'\n          'got: {0:d}').format(len(time_elements_tuple)))\n\n    year, month, day_of_month, hours, minutes, seconds, microseconds = (\n        time_elements_tuple)\n\n    try:\n      microseconds = int(microseconds, 10)\n    except (TypeError, ValueError):\n      raise ValueError('Invalid microsecond value: {0!s}'.format(microseconds))\n\n    if microseconds < 0 or microseconds >= definitions.MICROSECONDS_PER_SECOND:\n      raise ValueError('Invalid number of microseconds.')\n\n    fraction_of_second = (\n        decimal.Decimal(microseconds) / definitions.MICROSECONDS_PER_SECOND)\n\n    time_elements_tuple = (\n        year, month, day_of_month, hours, minutes, seconds,\n        str(fraction_of_second))\n\n    super(TimeElementsInMicroseconds, self).CopyFromStringTuple(\n        time_elements_tuple)", "docstring": "Copies time elements from string-based time elements tuple.\n\nArgs:\ntime_elements_tuple (Optional[tuple[str, str, str, str, str, str, str]]):\ntime elements, contains year, month, day of month, hours, minutes,\nseconds and microseconds.\n\nRaises:\nValueError: if the time elements tuple is invalid.", "source": "juraj-google-style"}
{"code": "def set_pattern_actual_step(self, patternnumber, value):\n        \n        _checkPatternNumber(patternnumber)\n        _checkStepNumber(value)\n        \n        address = _calculateRegisterAddress('actualstep', patternnumber)\n        self.write_register(address, value, 0)", "docstring": "Set the 'actual step' parameter for a given pattern.\n\nArgs:\n* patternnumber (integer): 0-7\n* value (integer): 0-7", "source": "juraj-google-style"}
{"code": "def hard_shrink(x, threshold=0.5):\n    if any_symbolic_tensors((x,)):\n        return HardShrink(threshold).symbolic_call(x)\n    return backend.nn.hard_shrink(x, threshold)", "docstring": "Hard Shrink activation function.\n\nThe Hard Shrink function is a thresholding operation defined as:\n\n`f(x) = x` if `|x| > threshold`,\n`f(x) = 0` otherwise.\n\nArgs:\nx: Input tensor.\nthreshold: Threshold value. Defaults to 0.5.\n\nReturns:\nA tensor with the same shape as `x`.\n\nExample:\n\n>>> x = np.array([-0.5, 0., 1.])\n>>> x_hard_shrink = keras.ops.hard_shrink(x)\n>>> print(x_hard_shrink)\narray([0. 0. 1.], shape=(3,), dtype=float64)", "source": "github-repos"}
{"code": "def prune_unconnected_ops_from_xla(prune_graph: ops.Graph):\n    for graph in [prune_graph] + [f for f in prune_graph._functions.values()]:\n        if not isinstance(graph, ops.Graph):\n            continue\n        for op in graph.get_operations():\n            if op.type not in _UNCONNECTED_OPS_TO_PRUNE:\n                continue\n            outputs_consumed = False\n            for output in op.outputs:\n                if output.consumers():\n                    outputs_consumed = True\n                    break\n            if not outputs_consumed:\n                logging.info('Pruning OP %s of type %s from XLA Compile due to it being disconnected.', op.name, op.type)\n                op._clear_attr(tpu_replication._TPU_REPLICATE_ATTR)", "docstring": "Prunes unconnected ops as listed in _UNCONNECTED_OPS_TO_PRUNE.\n\nArgs:\nprune_graph: A tensorflow graph from which we wish to prune unconnected ops\nas listed in _UNCONNECTED_OPS_TO_PRUNE.  In general, these ops should have\nno inputs and no consumers. These can often be left behind due to graph\nconstruction rewiring (for instance TF-Hub). While they never execute,\nthey will cause XLA compile to fail so we strip them from XLA compile by\nremoving the tpu_replicate attribute.", "source": "github-repos"}
{"code": "def _full_batch_training_op(self, inputs, num_clusters, cluster_idx_list, cluster_centers):\n    cluster_sums = []\n    cluster_counts = []\n    epsilon = constant_op.constant(1e-06, dtype=inputs[0].dtype)\n    for inp, cluster_idx in zip(inputs, cluster_idx_list):\n        with ops.colocate_with(inp, ignore_existing=True):\n            cluster_sums.append(math_ops.unsorted_segment_sum(inp, cluster_idx, num_clusters))\n            cluster_counts.append(math_ops.unsorted_segment_sum(array_ops.reshape(array_ops.ones(array_ops.reshape(array_ops.shape(inp)[0], [-1])), [-1, 1]), cluster_idx, num_clusters))\n    with ops.colocate_with(cluster_centers, ignore_existing=True):\n        new_clusters_centers = math_ops.add_n(cluster_sums) / (math_ops.cast(math_ops.add_n(cluster_counts), cluster_sums[0].dtype) + epsilon)\n        if self._clusters_l2_normalized():\n            new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1)\n    return state_ops.assign(cluster_centers, new_clusters_centers)", "docstring": "Creates an op for training for full batch case.\n\nArgs:\ninputs: list of input Tensors.\nnum_clusters: an integer Tensor providing the number of clusters.\ncluster_idx_list: A vector (or list of vectors). Each element in the\nvector corresponds to an input row in 'inp' and specifies the cluster id\ncorresponding to the input.\ncluster_centers: Tensor Ref of cluster centers.\n\nReturns:\nAn op for doing an update of mini-batch k-means.", "source": "github-repos"}
{"code": "def decode_single_feature_from_dict(feature_k, feature, tfexample_dict):\n    if (not feature.serialized_keys):\n        data_to_decode = tfexample_dict[feature_k]\n    else:\n        data_to_decode = {k: tfexample_dict[posixpath.join(feature_k, k)] for k in feature.serialized_keys}\n    return feature.decode_example(data_to_decode)", "docstring": "Decode the given feature from the tfexample_dict.\n\nArgs:\nfeature_k (str): Feature key in the tfexample_dict\nfeature (FeatureConnector): Connector object to use to decode the field\ntfexample_dict (dict): Dict containing the data to decode.\n\nReturns:\ndecoded_feature: The output of the feature.decode_example", "source": "codesearchnet"}
{"code": "def UploadOperations(self, operations, is_last=False):\n    \n    if self._is_last:\n      raise googleads.errors.AdWordsBatchJobServiceInvalidOperationError(\n          'Can\\'t add new operations to a completed incremental upload.')\n    \n    req = self._request_builder.BuildUploadRequest(\n        self._upload_url, operations,\n        current_content_length=self._current_content_length, is_last=is_last)\n    \n    \n    try:\n      _batch_job_logger.debug('Outgoing request: %s %s %s',\n                              req.get_full_url(), req.headers, req.data)\n\n      self._url_opener.open(req)\n\n      if _batch_job_logger.isEnabledFor(logging.INFO):\n        _batch_job_logger.info('Request summary: %s',\n                               self._ExtractRequestSummaryFields(req))\n    except urllib2.HTTPError as e:\n      if e.code != 308:\n        if _batch_job_logger.isEnabledFor(logging.WARNING):\n          _batch_job_logger.warning(\n              'Request summary: %s',\n              self._ExtractRequestSummaryFields(req, error=e))\n        raise\n    \n    self._current_content_length += len(req.data)\n    self._is_last = is_last", "docstring": "Uploads operations to the given uploadUrl in incremental steps.\n\nNote: Each list of operations is expected to contain operations of the\nsame type, similar to how one would normally send operations in an\nAdWords API Service request.\n\nArgs:\noperations: one or more lists of operations as would be sent to the\nAdWords API for the associated service.\nis_last: a boolean indicating whether this is the final increment to be\nadded to the batch job.", "source": "juraj-google-style"}
{"code": "def task_indices(self, job_name):\n    try:\n        job = self._cluster_spec[job_name]\n    except KeyError:\n        raise ValueError('No such job in cluster: %r' % job_name)\n    return list(sorted(job.keys()))", "docstring": "Returns a list of valid task indices in the given job.\n\nArgs:\njob_name: The string name of a job in this cluster.\n\nReturns:\nA list of valid task indices in the given job.\n\nRaises:\nValueError: If `job_name` does not name a job in this cluster,\nor no task with index `task_index` is defined in that job.", "source": "github-repos"}
{"code": "def check_structure(data):\n    \n    if not isinstance(data, dict):\n        try:\n            data = _convert_to_dict(data)\n        except MetaParsingException:\n            raise\n        except:\n            raise MetaParsingException(\n                \"Metadata format has invalid strucure (dict is expected).\"\n            )\n\n    for key, val in data.iteritems():\n        if type(key) not in _ALLOWED_TYPES:\n            raise MetaParsingException(\n                \"Can't decode the meta file - invalid type of keyword '\" +\n                str(key) +\n                \"'!\"\n            )\n        if type(val) not in _ALLOWED_TYPES:\n            raise MetaParsingException(\n                \"Can't decode the meta file - invalid type of keyword '\" +\n                str(key) +\n                \"'!\"\n            )\n\n    return data", "docstring": "Check whether the structure is flat dictionary. If not, try to convert it\nto dictionary.\n\nArgs:\ndata: Whatever data you have (dict/tuple/list).\n\nReturns:\ndict: When the conversion was successful or `data` was already `good`.\n\nRaises:\nMetaParsingException: When the data couldn't be converted or had `bad`\nstructure.", "source": "juraj-google-style"}
{"code": "def random_sign_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None):\n    dtype = dtypes.as_dtype(dtype)\n    with ops.name_scope('random_sign_uniform'):\n        unsigned_samples = random_uniform(shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)\n        if seed is not None:\n            seed += 12\n        signs = math_ops.sign(random_ops.random_uniform(shape, minval=-1.0, maxval=1.0, seed=seed))\n        return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype)", "docstring": "Tensor with (possibly complex) random entries from a \"sign Uniform\".\n\nLetting `Z` be a random variable equal to `-1` and `1` with equal probability,\nSamples from this `Op` are distributed like\n\n```\nZ * X, where X ~ Uniform[minval, maxval], if dtype is real,\nZ * (X + iY),  where X, Y ~ Uniform[minval, maxval], if dtype is complex.\n```\n\nArgs:\nshape:  `TensorShape` or Python list.  Shape of the returned tensor.\nminval:  `0-D` `Tensor` giving the minimum values.\nmaxval:  `0-D` `Tensor` giving the maximum values.\ndtype:  `TensorFlow` `dtype` or Python dtype\nseed:  Python integer seed for the RNG.\n\nReturns:\n`Tensor` with desired shape and dtype.", "source": "github-repos"}
{"code": "def duration_distance(item_a, item_b, max_value):\n    duration_a = item_a.times.size\n    duration_b = item_b.times.size\n    return (np.minimum(np.abs((duration_a - duration_b)), max_value) / float(max_value))", "docstring": "Absolute difference in the duration of two items\n\nArgs:\nitem_a: STObject from the first set in TrackMatcher\nitem_b: STObject from the second set in TrackMatcher\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "codesearchnet"}
{"code": "def fit_to_cols(what, indent='', cols=79):\n    \n    lines = []\n    while what:\n        what, next_line = split_line(\n            what=what,\n            cols=cols,\n            indent=indent,\n        )\n        lines.append(next_line)\n\n    return '\\n'.join(lines)", "docstring": "Wrap the given text to the columns, prepending the indent to each line.\n\nArgs:\nwhat(str): text to wrap.\nindent(str): indentation to use.\ncols(int): colt to wrap to.\n\nReturns:\nstr: Wrapped text", "source": "juraj-google-style"}
{"code": "def weighted_average(counts: tf.Tensor, values: tf.Tensor):\n    float_counts = tf.cast(counts, tf.float32)\n    weighted_values = tf.einsum('i,i...->...', float_counts, values)\n    return weighted_values / tf.reduce_sum(float_counts)", "docstring": "Returns the weighted average of input values.\n\nSubtensor `i` of `values` is multiplied by `counts[i]` resulting in a weighted\nversion of values; the mean is then taken across the first dimension.\n\nArgs:\ncounts: Non-negative integers of shape [batch_size].\nvalues: Floats of shape [batch_size, ...].\n\nReturns:\nTensor of shape [...] which is the weighted average.", "source": "github-repos"}
{"code": "def WriteEventBody(self, event):\n    \n    output_values = []\n    for field_name in self._fields:\n      output_value = self._dynamic_fields_helper.GetFormattedField(\n          event, field_name)\n\n      output_value = self._SanitizeField(output_value)\n      output_values.append(output_value)\n\n    output_line = '{0:s}\\n'.format(self._field_delimiter.join(output_values))\n    self._output_writer.Write(output_line)", "docstring": "Writes the body of an event to the output.\n\nArgs:\nevent (EventObject): event.", "source": "juraj-google-style"}
{"code": "def _RunActions(self, rule, client_id):\n    \n    actions_count = 0\n\n    for action in rule.actions:\n      try:\n        \n        token = self.token.Copy()\n        token.username = \"Foreman\"\n\n        if action.HasField(\"hunt_id\"):\n          if self._CheckIfHuntTaskWasAssigned(client_id, action.hunt_id):\n            logging.info(\n                \"Foreman: ignoring hunt %s on client %s: was started \"\n                \"here before\", client_id, action.hunt_id)\n          else:\n            logging.info(\"Foreman: Starting hunt %s on client %s.\",\n                         action.hunt_id, client_id)\n\n            flow_cls = registry.AFF4FlowRegistry.FlowClassByName(\n                action.hunt_name)\n            flow_cls.StartClients(action.hunt_id, [client_id])\n            actions_count += 1\n        else:\n          flow.StartAFF4Flow(\n              client_id=client_id,\n              flow_name=action.flow_name,\n              token=token,\n              **action.argv.ToDict())\n          actions_count += 1\n      \n      \n      except Exception as e:  \n        logging.exception(\"Failure running foreman action on client %s: %s\",\n                          action.hunt_id, e)\n\n    return actions_count", "docstring": "Run all the actions specified in the rule.\n\nArgs:\nrule: Rule which actions are to be executed.\nclient_id: Id of a client where rule's actions are to be executed.\n\nReturns:\nNumber of actions started.", "source": "juraj-google-style"}
{"code": "def get_col_info(table_name, col_name, meta_file):\n    with open(meta_file, 'r') as f:\n        meta = json.load(f)\n    (data_table, table) = load_data_table(table_name, meta_file, meta)\n    for field in table['fields']:\n        if (field['name'] == col_name):\n            col_meta = field\n    col = data_table[col_name]\n    return (col, col_meta)", "docstring": "Return the content and metadata of a fiven column.\n\nArgs:\ntable_name(str): Name of the table.\ncol_name(str): Name of the column.\nmeta_file(str): Path to the meta.json file.\n\nReturns:\ntuple(pandas.Series, dict)", "source": "codesearchnet"}
{"code": "def calculate_view_box(layers, aspect_ratio, margin=DEFAULT_VIEW_BOX_MARGIN):\n    min_x = min((np.nanmin(x) for (x, y) in layers))\n    max_x = max((np.nanmax(x) for (x, y) in layers))\n    min_y = min((np.nanmin(y) for (x, y) in layers))\n    max_y = max((np.nanmax(y) for (x, y) in layers))\n    height = (max_y - min_y)\n    width = (max_x - min_x)\n    if (height > (width * aspect_ratio)):\n        adj_height = (height * (1.0 + margin))\n        adj_width = (adj_height / aspect_ratio)\n    else:\n        adj_width = (width * (1.0 + margin))\n        adj_height = (adj_width * aspect_ratio)\n    width_buffer = ((adj_width - width) / 2.0)\n    height_buffer = ((adj_height - height) / 2.0)\n    return ((min_x - width_buffer), (min_y - height_buffer), adj_width, adj_height)", "docstring": "Calculates the size of the SVG viewBox to use.\n\nArgs:\nlayers (list): the layers in the image\naspect_ratio (float): the height of the output divided by the width\nmargin (float): minimum amount of buffer to add around the image, relative\nto the total dimensions\n\nReturns:\ntuple: a 4-tuple of floats representing the viewBox according to SVG\nspecifications ``(x, y, width, height)``.", "source": "codesearchnet"}
{"code": "def _get_fields(ast):\n    if (not ast.selection_set):\n        return ([], [])\n    property_fields = []\n    vertex_fields = []\n    seen_field_names = set()\n    switched_to_vertices = False\n    for field_ast in ast.selection_set.selections:\n        if (not isinstance(field_ast, Field)):\n            continue\n        name = get_ast_field_name(field_ast)\n        if (name in seen_field_names):\n            raise GraphQLCompilationError(u'Encountered repeated field name: {}'.format(name))\n        seen_field_names.add(name)\n        if is_vertex_field_name(name):\n            switched_to_vertices = True\n            vertex_fields.append(field_ast)\n        else:\n            if switched_to_vertices:\n                raise GraphQLCompilationError(u'Encountered property field {} after vertex fields!'.format(name))\n            property_fields.append(field_ast)\n    return (vertex_fields, property_fields)", "docstring": "Return a list of vertex fields, and a list of property fields, for the given AST node.\n\nAlso verifies that all property fields for the AST node appear before all vertex fields,\nraising GraphQLCompilationError if that is not the case.\n\nArgs:\nast: GraphQL AST node, obtained from the graphql library\n\nReturns:\ntuple of two lists\n- the first list contains ASTs for vertex fields\n- the second list contains ASTs for property fields", "source": "codesearchnet"}
{"code": "def process_gatt_service(services, event):\n    \n\n    length = len(event.payload) - 5\n\n    handle, start, end, uuid = unpack('<BHH%ds' % length, event.payload)\n\n    uuid = process_uuid(uuid)\n    services[uuid] = {'uuid_raw': uuid, 'start_handle': start, 'end_handle': end}", "docstring": "Process a BGAPI event containing a GATT service description and add it to a dictionary\n\nArgs:\nservices (dict): A dictionary of discovered services that is updated with this event\nevent (BGAPIPacket): An event containing a GATT service", "source": "juraj-google-style"}
{"code": "def commit_output(cls, shard_ctx, iterator):\n    outs = tuple(iterator)\n    shard_ctx._state.writer_state['outs'] = outs", "docstring": "Saves output references when a shard finishes.\n\nInside end_shard(), an output writer can optionally use this method\nto persist some references to the outputs from this shard\n(e.g a list of filenames)\n\nArgs:\nshard_ctx: map_job_context.ShardContext for this shard.\niterator: an iterator that yields json serializable\nreferences to the outputs from this shard.\nContents from the iterator can be accessible later via\nmap_job.Job.get_outputs.", "source": "codesearchnet"}
{"code": "def set_bias(self, value):\n    if self.get_lm_head() is not None:\n        lm_head = self.get_lm_head()\n        try:\n            lm_head.set_bias(value)\n        except AttributeError:\n            self.build_in_name_scope()\n            lm_head.set_bias(value)", "docstring": "Set all the bias in the LM head.\n\nArgs:\nvalue (`Dict[tf.Variable]`):\nAll the new bias attached to an LM head.", "source": "github-repos"}
{"code": "def get_variable(self, feature_column, name):\n    del feature_column, name\n    raise NotImplementedError('StateManager.get_var')", "docstring": "Returns an existing variable.\n\nArgs:\nfeature_column: A `FeatureColumn` object this variable corresponds to.\nname: variable name.", "source": "github-repos"}
{"code": "def abs_path_from_base(base_path, rel_path):\n    return os.path.abspath(os.path.join(os.path.dirname(sys._getframe(1).f_code.co_filename), base_path, rel_path))", "docstring": "Join a base and a relative path and return an absolute path to the resulting\nlocation.\n\nArgs:\nbase_path: str\nRelative or absolute path to prepend to ``rel_path``.\n\nrel_path: str\nPath relative to the location of the module file from which this function is called.\n\nReturns:\nstr : Absolute path to the location specified by ``rel_path``.", "source": "codesearchnet"}
{"code": "def setting_address(key):\n    key_parts = key.split('.', maxsplit=(_MAX_KEY_PARTS - 1))\n    addr_parts = [_short_hash(x.encode()) for x in key_parts]\n    addr_parts.extend(([_EMPTY_PART] * (_MAX_KEY_PARTS - len(addr_parts))))\n    return (CONFIG_STATE_NAMESPACE + ''.join(addr_parts))", "docstring": "Computes the radix address for the given setting key.\n\nKeys are broken into four parts, based on the dots in the string. For\nexample, the key `a.b.c` address is computed based on `a`, `b`, `c` and\nthe empty string. A longer key, for example `a.b.c.d.e`, is still\nbroken into four parts, but the remaining pieces are in the last part:\n`a`, `b`, `c` and `d.e`.\n\nEach of these peices has a short hash computed (the first 16 characters\nof its SHA256 hash in hex), and is joined into a single address, with\nthe config namespace (`000000`) added at the beginning.\n\nArgs:\nkey (str): the setting key\nReturns:\nstr: the computed address", "source": "codesearchnet"}
{"code": "def update(self, forecasts, observations):\n    for (t, threshold) in enumerate(self.thresholds[:(- 1)]):\n        self.frequencies.loc[(t, 'Positive_Freq')] += np.count_nonzero((((threshold <= forecasts) & (forecasts < self.thresholds[(t + 1)])) & (observations >= self.obs_threshold)))\n        self.frequencies.loc[(t, 'Total_Freq')] += np.count_nonzero(((threshold <= forecasts) & (forecasts < self.thresholds[(t + 1)])))", "docstring": "Update the statistics with a set of forecasts and observations.\n\nArgs:\nforecasts (numpy.ndarray): Array of forecast probability values\nobservations (numpy.ndarray): Array of observation values", "source": "codesearchnet"}
{"code": "def makecontinuum(cube, **kwargs):\n    \n    \n    inchs = kwargs.pop('inchs', None)\n    exchs = kwargs.pop('exchs', None)\n\n    if (inchs is not None) or (exchs is not None):\n        raise KeyError('Inchs and exchs are no longer supported. Use weight instead.')\n\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n\n    if weight is None:\n        weight = 1.\n    \n        \n        \n    cont = (cube * (1 / weight**2)).sum(dim='ch') / (1 / weight**2).sum(dim='ch')\n\n    \n    xcoords      = {'x': cube.x.values}\n    ycoords      = {'y': cube.y.values}\n    chcoords     = {'masterid': np.array([0]), \n                    'kidid': np.array([0]), \n                    'kidfq': np.array([0]), \n                    'kidtp': np.array([1])} \n    scalarcoords = {'coordsys': cube.coordsys.values, 'datatype': cube.datatype.values,\n                    'xref': cube.xref.values, 'yref': cube.yref.values}\n\n    return dc.cube(cont.values, xcoords=xcoords, ycoords=ycoords, chcoords=chcoords,\n                    scalarcoords=scalarcoords)", "docstring": "Make a continuum array.\n\nArgs:\ncube (decode.cube): Decode cube which will be averaged over channels.\nkwargs (optional): Other arguments.\ninchs (list): Included channel kidids.\nexchs (list): Excluded channel kidids.\n\nReturns:\ndecode cube (decode.cube): Decode cube (2d).", "source": "juraj-google-style"}
{"code": "def _AskUser(self):\n    if self._show_percent:\n        progress = int(((self._displayed * 100) / len(self._text.splitlines())))\n        progress_text = (' (%d%%)' % progress)\n    else:\n        progress_text = ''\n    question = AnsiText(('Enter: next line, Space: next page, b: prev page, q: quit.%s' % progress_text), ['green'])\n    sys.stdout.write(question)\n    sys.stdout.flush()\n    ch = self._GetCh()\n    sys.stdout.write(('\\r%s\\r' % (' ' * len(question))))\n    sys.stdout.flush()\n    return ch", "docstring": "Prompt the user for the next action.\n\nReturns:\nA string, the character entered by the user.", "source": "codesearchnet"}
{"code": "def find_bad_commit(target_test, start_commit, end_commit):\n    if start_commit == end_commit:\n        return start_commit\n    create_script(target_test=target_test)\n    bash = f'\\ngit bisect reset\\ngit bisect start {start_commit} {end_commit}\\ngit bisect run python3 target_script.py\\n'\n    with open('run_git_bisect.sh', 'w') as fp:\n        fp.write(bash.strip())\n    result = subprocess.run(['bash', 'run_git_bisect.sh'], capture_output=True, text=True)\n    print(result.stdout)\n    if 'error: bisect run failed' in result.stderr:\n        index = result.stderr.find('error: bisect run failed')\n        bash_error = result.stderr[index:]\n        error_msg = f'Error when running git bisect:\\nbash error: {bash_error}'\n        pattern = 'pytest failed to run: .+'\n        pytest_errors = re.findall(pattern, result.stdout)\n        if len(pytest_errors) > 0:\n            pytest_error = pytest_errors[0]\n            index = pytest_error.find('pytest failed to run: ')\n            index += len('pytest failed to run: ')\n            pytest_error = pytest_error[index:]\n            error_msg += f'pytest error: {pytest_error}'\n        raise ValueError(error_msg)\n    pattern = '(.+) is the first bad commit'\n    commits = re.findall(pattern, result.stdout)\n    bad_commit = None\n    if len(commits) > 0:\n        bad_commit = commits[0]\n    print(f'Between `start_commit` {start_commit} and `end_commit` {end_commit}')\n    print(f'bad_commit: {bad_commit}\\n')\n    return bad_commit", "docstring": "Find (backward) the earliest commit between `start_commit` and `end_commit` at which `target_test` fails.\n\nArgs:\ntarget_test (`str`): The test to check.\nstart_commit (`str`): The latest commit.\nend_commit (`str`): The earliest commit.\n\nReturns:\n`str`: The earliest commit at which `target_test` fails.", "source": "github-repos"}
{"code": "def _AddPropertiesForNonRepeatedScalarField(field, cls):\n  \n  proto_field_name = field.name\n  property_name = _PropertyName(proto_field_name)\n  type_checker = type_checkers.GetTypeChecker(field)\n  default_value = field.default_value\n  valid_values = set()\n  is_proto3 = field.containing_type.syntax == \"proto3\"\n\n  def getter(self):\n    \n    \n    return self._fields.get(field, default_value)\n  getter.__module__ = None\n  getter.__doc__ = 'Getter for %s.' % proto_field_name\n\n  clear_when_set_to_default = is_proto3 and not field.containing_oneof\n\n  def field_setter(self, new_value):\n    \n    \n    \n    new_value = type_checker.CheckValue(new_value)\n    if clear_when_set_to_default and not new_value:\n      self._fields.pop(field, None)\n    else:\n      self._fields[field] = new_value\n    \n    \n    if not self._cached_byte_size_dirty:\n      self._Modified()\n\n  if field.containing_oneof:\n    def setter(self, new_value):\n      field_setter(self, new_value)\n      self._UpdateOneofState(field)\n  else:\n    setter = field_setter\n\n  setter.__module__ = None\n  setter.__doc__ = 'Setter for %s.' % proto_field_name\n\n  \n  doc = 'Magic attribute generated for \"%s\" proto field.' % proto_field_name\n  setattr(cls, property_name, property(getter, setter, doc=doc))", "docstring": "Adds a public property for a nonrepeated, scalar protocol message field.\nClients can use this property to get and directly set the value of the field.\nNote that when the client sets the value of a field by using this property,\nall necessary \"has\" bits are set as a side-effect, and we also perform\ntype-checking.\n\nArgs:\nfield: A FieldDescriptor for this field.\ncls: The class we're constructing.", "source": "juraj-google-style"}
{"code": "def get_experiment(self, coll_name, exp_name):\n        \n        exp = ExperimentResource(exp_name, coll_name)\n        return self.get_project(exp)", "docstring": "Convenience method that gets experiment resource.\n\nArgs:\ncoll_name (str): Collection name\nexp_name (str): Experiment name\n\nReturns:\n(ExperimentResource)", "source": "juraj-google-style"}
{"code": "def initialize_logger(debug):\n    \n    level = logging.DEBUG if debug else logging.INFO\n    logger = logging.getLogger('cucco')\n    logger.setLevel(level)\n    formatter = logging.Formatter('%(asctime)s %(levelname).1s %(message)s')\n    console_handler = logging.StreamHandler()\n    console_handler.setLevel(level)\n    console_handler.setFormatter(formatter)\n    logger.addHandler(console_handler)\n\n    return logger", "docstring": "Set up logger to be used by the library.\n\nArgs:\ndebug: Wheter to use debug level or not.\n\nReturns:\nA logger ready to be used.", "source": "juraj-google-style"}
{"code": "def process_layer(layer_data):\n    layer_name = layer_data['name']\n    if layer_name in created_layers:\n        layer = created_layers[layer_name]\n    else:\n        from tensorflow.python.keras.layers import deserialize as deserialize_layer\n        layer = deserialize_layer(layer_data, custom_objects=custom_objects)\n        created_layers[layer_name] = layer\n    node_count_by_layer[layer] = int(_should_skip_first_node(layer))\n    inbound_nodes_data = layer_data['inbound_nodes']\n    inbound_nodes_data = tf_utils.convert_inner_node_data(inbound_nodes_data, wrap=True)\n    for node_data in inbound_nodes_data:\n        add_unprocessed_node(layer, node_data)", "docstring": "Deserializes a layer, then call it on appropriate inputs.\n\nArgs:\nlayer_data: layer config dict.\n\nRaises:\nValueError: In case of improperly formatted `layer_data` dict.", "source": "github-repos"}
{"code": "def _serialize_to_proto(self, object_proto=None, **kwargs):\n    del object_proto, kwargs\n    return None", "docstring": "Returns a proto of any type to be saved into the SavedModel.\n\nTrackable classes decorated with `register_serializable` should overwrite\nthis method to save metadata for this object to the SavedModel. The proto\nreturned by this function will be passed to `_deserialize_from_proto` in the\nform of a `google.protobuf.Any` proto.\n\nThis data is only saved and used by the Python API. Existing C++ loading\nAPIs such as `tensorflow::LoadSavedModel` will not read this field at all.\n\nArgs:\nobject_proto: A `SavedObject` proto that may be filled by this function.\nOnly the core serializable types (Variable, Function, Constant, Asset)\nshould modify this argument.\n**kwargs: Future keyword arguments passed to the object during saving.\n\nReturns:\nA proto that serializes this class's type.", "source": "github-repos"}
{"code": "def parse_verilog(text):\n    lex = VerilogLexer\n    name = None\n    kind = None\n    saved_type = None\n    mode = 'input'\n    ptype = 'wire'\n    metacomments = []\n    parameters = []\n    param_items = []\n    generics = []\n    ports = collections.OrderedDict()\n    sections = []\n    port_param_index = 0\n    last_item = None\n    array_range_start_pos = 0\n    objects = []\n    for (pos, action, groups) in lex.run(text):\n        if (action == 'metacomment'):\n            if (last_item is None):\n                metacomments.append(groups[0])\n            else:\n                last_item.desc = groups[0]\n        if (action == 'section_meta'):\n            sections.append((port_param_index, groups[0]))\n        elif (action == 'module'):\n            kind = 'module'\n            name = groups[0]\n            generics = []\n            ports = collections.OrderedDict()\n            param_items = []\n            sections = []\n            port_param_index = 0\n        elif (action == 'parameter_start'):\n            (net_type, vec_range) = groups\n            new_ptype = ''\n            if (net_type is not None):\n                new_ptype += net_type\n            if (vec_range is not None):\n                new_ptype += (' ' + vec_range)\n            ptype = new_ptype\n        elif (action == 'param_item'):\n            generics.append(VerilogParameter(groups[0], 'in', ptype))\n        elif (action == 'module_port_start'):\n            (new_mode, net_type, signed, vec_range) = groups\n            new_ptype = ''\n            if (net_type is not None):\n                new_ptype += net_type\n            if (signed is not None):\n                new_ptype += (' ' + signed)\n            if (vec_range is not None):\n                new_ptype += (' ' + vec_range)\n            for i in param_items:\n                ports[i] = VerilogParameter(i, mode, ptype)\n            param_items = []\n            if (len(ports) > 0):\n                last_item = next(reversed(ports))\n            mode = new_mode\n            ptype = new_ptype\n        elif (action == 'port_param'):\n            ident = groups[0]\n            param_items.append(ident)\n            port_param_index += 1\n        elif (action == 'end_module'):\n            for i in param_items:\n                ports[i] = VerilogParameter(i, mode, ptype)\n            vobj = VerilogModule(name, ports.values(), generics, dict(sections), metacomments)\n            objects.append(vobj)\n            last_item = None\n            metacomments = []\n    return objects", "docstring": "Parse a text buffer of Verilog code\n\nArgs:\ntext (str): Source code to parse\nReturns:\nList of parsed objects.", "source": "codesearchnet"}
{"code": "def fft(x):\n    if any_symbolic_tensors(x):\n        return FFT().symbolic_call(x)\n    return backend.math.fft(x)", "docstring": "Computes the Fast Fourier Transform along last axis of input.\n\nArgs:\nx: Tuple of the real and imaginary parts of the input tensor. Both\ntensors in the tuple should be of floating type.\n\nReturns:\nA tuple containing two tensors - the real and imaginary parts of the\noutput tensor.\n\nExample:\n\n>>> x = (\n...     keras.ops.convert_to_tensor([1., 2.]),\n...     keras.ops.convert_to_tensor([0., 1.]),\n... )\n>>> fft(x)\n(array([ 3., -1.], dtype=float32), array([ 1., -1.], dtype=float32))", "source": "github-repos"}
{"code": "def csv_to_num_matrix(csv_file_path):\n    mtx = []\n    with open(csv_file_path) as csv_data_file:\n        for row in csv_data_file:\n            mtx.append([float(val) for val in row.split(',')])\n    return mtx", "docstring": "Load a CSV file consisting only of numbers into a Python matrix of floats.\n\nArgs:\ncsv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)", "source": "codesearchnet"}
{"code": "def load_json_or_yaml(string, is_path=False, file_type='json', exception=ScriptWorkerTaskException, message='Failed to load %(file_type)s: %(exc)s'):\n    if (file_type == 'json'):\n        _load_fh = json.load\n        _load_str = json.loads\n    else:\n        _load_fh = yaml.safe_load\n        _load_str = yaml.safe_load\n    try:\n        if is_path:\n            with open(string, 'r') as fh:\n                contents = _load_fh(fh)\n        else:\n            contents = _load_str(string)\n        return contents\n    except (OSError, ValueError, yaml.scanner.ScannerError) as exc:\n        if (exception is not None):\n            repl_dict = {'exc': str(exc), 'file_type': file_type}\n            raise exception((message % repl_dict))", "docstring": "Load json or yaml from a filehandle or string, and raise a custom exception on failure.\n\nArgs:\nstring (str): json/yaml body or a path to open\nis_path (bool, optional): if ``string`` is a path. Defaults to False.\nfile_type (str, optional): either \"json\" or \"yaml\". Defaults to \"json\".\nexception (exception, optional): the exception to raise on failure.\nIf None, don't raise an exception.  Defaults to ScriptWorkerTaskException.\nmessage (str, optional): the message to use for the exception.\nDefaults to \"Failed to load %(file_type)s: %(exc)s\"\n\nReturns:\ndict: the data from the string.\n\nRaises:\nException: as specified, on failure", "source": "codesearchnet"}
{"code": "def fuse_resize_and_conv(input_graph_def: graph_pb2.GraphDef, output_node_names: Sequence[str]) -> graph_pb2.GraphDef:\n    input_node_map = {}\n    for node in input_graph_def.node:\n        if node.name not in input_node_map:\n            input_node_map[node.name] = node\n        else:\n            raise ValueError('Duplicate node names detected for ', node.name)\n    node_reference_count = collections.defaultdict(int)\n    for node in input_graph_def.node:\n        for input_name in node.input:\n            stripped_name = node_name_from_input(input_name)\n            node_reference_count[stripped_name] += 1\n    for output_name in output_node_names:\n        node_reference_count[output_name] += 1\n    new_ops = []\n    for node in input_graph_def.node:\n        if node.op != 'Conv2D':\n            continue\n        conv_op = node\n        input_op = node_from_map(input_node_map, conv_op.input[0])\n        if input_op.op == 'MirrorPad':\n            mirror_pad_op = input_op\n            resize_op = node_from_map(input_node_map, mirror_pad_op.input[0])\n            if resize_op.op != 'ResizeBilinear':\n                resize_op = None\n        else:\n            mirror_pad_op = None\n            if input_op.op == 'ResizeBilinear':\n                resize_op = input_op\n            else:\n                resize_op = None\n        if not mirror_pad_op and (not resize_op):\n            continue\n        node_reference_count[conv_op.name] = 0\n        if mirror_pad_op:\n            node_reference_count[mirror_pad_op.name] -= 1\n        if resize_op:\n            node_reference_count[resize_op.name] -= 1\n        fused_conv_op = node_def_pb2.NodeDef()\n        if resize_op:\n            fused_conv_op.op = 'FusedResizeAndPadConv2D'\n        else:\n            fused_conv_op.op = 'FusedPadConv2D'\n        fused_conv_op.name = conv_op.name\n        if mirror_pad_op:\n            mirror_paddings_name = mirror_pad_op.input[1]\n            mirror_paddings_mode = mirror_pad_op.attr['mode']\n        else:\n            paddings_op = node_def_pb2.NodeDef()\n            paddings_op.op = 'Const'\n            paddings_op.name = conv_op.name + '_dummy_paddings'\n            paddings_op.attr['dtype'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum))\n            paddings_op.attr['value'].CopyFrom(attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto([0, 0, 0, 0, 0, 0, 0, 0], dtypes.int32, [4, 2])))\n            new_ops.extend([paddings_op])\n            mirror_paddings_name = paddings_op.name\n            mirror_paddings_mode = attr_value_pb2.AttrValue(s=b'REFLECT')\n        if resize_op:\n            fused_conv_op.input.extend([resize_op.input[0], resize_op.input[1], mirror_paddings_name, conv_op.input[1]])\n            fused_conv_op.attr['resize_align_corners'].CopyFrom(resize_op.attr['align_corners'])\n        else:\n            fused_conv_op.input.extend([mirror_pad_op.input[0], mirror_paddings_name, conv_op.input[1]])\n        fused_conv_op.attr['T'].CopyFrom(conv_op.attr['T'])\n        fused_conv_op.attr['mode'].CopyFrom(mirror_paddings_mode)\n        fused_conv_op.attr['strides'].CopyFrom(conv_op.attr['strides'])\n        fused_conv_op.attr['padding'].CopyFrom(conv_op.attr['padding'])\n        new_ops.extend([fused_conv_op])\n    result_graph_def = graph_pb2.GraphDef()\n    for node in input_graph_def.node:\n        if node_reference_count[node.name] < 1:\n            continue\n        new_node = node_def_pb2.NodeDef()\n        new_node.CopyFrom(node)\n        result_graph_def.node.extend([new_node])\n    result_graph_def.node.extend(new_ops)\n    return result_graph_def", "docstring": "Merges preceding resize and mirror pad ops into a specialized convolution.\n\nThere's a common pattern of enlarging the input to a convolution using a\nresize operation, and also using MirrorPad to extend the boundaries to that\nzero edge pixels don't bleed inwards when convolving. This routine looks for\nthat pattern of operations, and fuses them together into a Conv2DWithResizeOp.\n\nArgs:\ninput_graph_def: A GraphDef containing a model.\noutput_node_names: A list of names of the nodes that produce the final\nresults.\n\nReturns:\nModified graph with resize and pad ops merged.\n\nRaises:\nValueError: If the graph is badly formed with duplicate node names.", "source": "github-repos"}
{"code": "def file_name(self, file_name):\n        \n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        self._data['fileName'] = file_name\n        request = {'fileName': file_name}\n        return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)", "docstring": "Updates the file_name.\n\nArgs:\nfile_name:", "source": "juraj-google-style"}
{"code": "def __init__(self, exprs):\n    self.exprs = exprs", "docstring": "Initialize a disjunction.\n\nArgs:\nexprs: A set. The subterms.", "source": "github-repos"}
{"code": "def copy_file(src_file: str, dst_dir: str, strip: str=None, dest_file: str=None) -> None:\n    dest = dest_file if dest_file else src_file\n    if dest.startswith('bazel-out'):\n        dest = dest[dest.index('bin') + 4:]\n    if strip:\n        dest = dest.removeprefix(strip)\n    dest_dir_path = os.path.join(dst_dir, os.path.dirname(dest))\n    os.makedirs(dest_dir_path, exist_ok=True)\n    shutil.copy(src_file, dest_dir_path)\n    os.chmod(os.path.join(dst_dir, dest), 420)", "docstring": "Copy a file to the destination directory.\n\nArgs:\nsrc_file: file to be copied\ndst_dir: destination directory\nstrip: prefix to strip before copying to destination\ndest_file: destanation file location if different from src_file", "source": "github-repos"}
{"code": "def rewrite_autodoc(app, what, name, obj, options, lines):\n    try:\n        lines[:] = parse_cartouche_text(lines)\n    except CartoucheSyntaxError as syntax_error:\n        args = syntax_error.args\n        arg0 = (args[0] if args else '')\n        arg0 += ' in docstring for {what} {name} :'.format(what=what, name=name)\n        arg0 += '\\n=== BEGIN DOCSTRING ===\\n{lines}\\n=== END DOCSTRING ===\\n'.format(lines='\\n'.join(lines))\n        syntax_error.args = ((arg0,) + args[1:])\n        raise", "docstring": "Convert lines from Cartouche to Sphinx format.\n\nThe function to be called by the Sphinx autodoc extension when autodoc\nhas read and processed a docstring. This function modified its\n``lines`` argument *in place* replacing Cartouche syntax input into\nSphinx reStructuredText output.\n\nArgs:\napps: The Sphinx application object.\n\nwhat: The type of object which the docstring belongs to. One of\n'module', 'class', 'exception', 'function', 'method', 'attribute'\n\nname: The fully qualified name of the object.\n\nobj: The object itself.\n\noptions: The options given to the directive. An object with attributes\n``inherited_members``, ``undoc_members``, ``show_inheritance`` and\n``noindex`` that are ``True`` if the flag option of the same name\nwas given to the auto directive.\n\nlines: The lines of the docstring.  Will be modified *in place*.\n\nRaises:\nCartoucheSyntaxError: If the docstring is malformed.", "source": "codesearchnet"}
{"code": "def inverse_transform(self, y, lengths=None):\n        \n        y = np.argmax(y, -1)\n        inverse_y = [self._label_vocab.id2doc(ids) for ids in y]\n        if lengths is not None:\n            inverse_y = [iy[:l] for iy, l in zip(inverse_y, lengths)]\n\n        return inverse_y", "docstring": "Return label strings.\n\nArgs:\ny: label id matrix.\nlengths: sentences length.\n\nReturns:\nlist: list of list of strings.", "source": "juraj-google-style"}
{"code": "def build_gemini_query(self, query, extra_info):\n    if ('WHERE' in query):\n        return '{0} AND {1}'.format(query, extra_info)\n    else:\n        return '{0} WHERE {1}'.format(query, extra_info)", "docstring": "Append sql to a gemini query\n\nArgs:\nquery(str): The gemini query\nextra_info(str): The text that should be added\n\nReturn:\nextended_query(str)", "source": "codesearchnet"}
{"code": "def post_shared_file(self, image_file=None, source_link=None, shake_id=None, title=None, description=None):\n    if (image_file and source_link):\n        raise Exception('You can only specify an image file or a source link, not both.')\n    if ((not image_file) and (not source_link)):\n        raise Exception('You must specify an image file or a source link')\n    content_type = self._get_image_type(image_file)\n    if (not title):\n        title = os.path.basename(image_file)\n    f = open(image_file, 'rb')\n    endpoint = '/api/upload'\n    files = {'file': (title, f, content_type)}\n    data = self._make_request('POST', endpoint=endpoint, files=files)\n    f.close()\n    return data", "docstring": "Upload an image.\n\nTODO:\nDon't have a pro account to test (or even write) code to upload a\nshared filed to a particular shake.\n\nArgs:\nimage_file (str): path to an image (jpg/gif) on your computer.\nsource_link (str): URL of a source (youtube/vine/etc.)\nshake_id (int): shake to which to upload the file or\nsource_link [optional]\ntitle (str): title of the SharedFile [optional]\ndescription (str): description of the SharedFile\n\nReturns:\nSharedFile key.", "source": "codesearchnet"}
{"code": "def apply_cut(self, cm):\n    inverse = np.logical_not(self.cut_matrix(cm.shape[0])).astype(int)\n    return (cm * inverse)", "docstring": "Return a modified connectivity matrix with all connections that are\nsevered by this cut removed.\n\nArgs:\ncm (np.ndarray): A connectivity matrix.", "source": "codesearchnet"}
{"code": "def update_hash_with_primitive_value(hash_value, value):\n    hash_const = np.uint64(11400714819323197440)\n    hash_value = np.uint64(hash_value)\n    value = np.uint64(value)\n    hash_value = np.array([hash_value])\n    value = np.array([value])\n    hash_value = np.bitwise_xor(hash_value, value + hash_const + np.left_shift(hash_value, 10) + np.right_shift(hash_value, 4))[0]\n    return hash_value", "docstring": "Update the hash value using a primitive value.\n\nArgs:\nhash_value (uint64): The current hash value.\nvalue: The primitive value to incorporate into the hash.\n\nReturns:\nint: The updated hash value.", "source": "github-repos"}
{"code": "def do_put(self, uri, resource, timeout, custom_headers):\n        \n        self.validate_resource_uri(uri)\n\n        task, body = self._connection.put(uri, resource, custom_headers=custom_headers)\n\n        if not task:\n            return body\n\n        return self._task_monitor.wait_for_task(task, timeout)", "docstring": "Helps to make put requests.\n\nArgs:\nuri: URI of the resource\ntimeout: Time out for the request in seconds.\ncustom_headers: Allows to set custom http headers.\n\nRetuns:\nReturns Task object", "source": "juraj-google-style"}
{"code": "def set_images(self, text, parse_html=True):\n        \n        \n        file_list = []\n        if parse_html:\n            processed_string = self.parse_html(text)\n        else:\n            processed_string = text\n        reg = re.compile(MARKDOWN_IMAGE_REGEX, flags=re.IGNORECASE)\n        matches = reg.findall(processed_string)\n\n        \n        for match in matches:\n            file_result = self.set_image(match[1])\n            if file_result[0] != \"\":\n                replacement, new_files = file_result\n                processed_string = processed_string.replace(match[1], replacement)\n                file_list += new_files\n        return processed_string, file_list", "docstring": "set_images: Replace image strings with downloaded image checksums\nArgs:\ntext (str): text to parse for image strings\nReturns:string with checksums in place of image strings and\nlist of files that were downloaded from string", "source": "juraj-google-style"}
{"code": "def dot(*values: Union[float, complex, np.ndarray]\n        ) -> Union[float, complex, np.ndarray]:\n    \n    if len(values) == 1:\n        if isinstance(values[0], np.ndarray):\n            return np.array(values[0])\n        return values[0]\n    return np.linalg.multi_dot(values)", "docstring": "Computes the dot/matrix product of a sequence of values.\n\nA *args version of np.linalg.multi_dot.\n\nArgs:\n*values: The values to combine with the dot/matrix product.\n\nReturns:\nThe resulting value or matrix.", "source": "juraj-google-style"}
{"code": "def get_key_by_job_id(cls, mapreduce_id):\n    return db.Key.from_path(cls.kind(), str(mapreduce_id))", "docstring": "Retrieves the Key for a Job.\n\nArgs:\nmapreduce_id: The job to retrieve.\n\nReturns:\nDatastore Key that can be used to fetch the MapreduceState.", "source": "codesearchnet"}
{"code": "def store(self, df, attribute_columns):\n        \n\n        \n        entity_id_start = models.Entity.get_max_id(self.session) + 1\n        attribute_id_start = models.Attribute.get_max_id(self.session) + 1\n\n        \n        df['id'] = range(entity_id_start, entity_id_start + len(df))\n        df['type'] = self.type\n\n        \n        df[['id', 'type']].to_sql(name=models.Entity.__tablename__,\n                                  con=self.client.engine,\n                                  if_exists='append',\n                                  index=False)\n\n        \n        for col in attribute_columns:\n            \n            attr_df = df[[col, 'id']].rename(columns={'id': 'entity_id',\n                                                      col: 'value'})\n            attr_df['name'] = col\n\n            \n            attr_df['id'] = range(attribute_id_start, attribute_id_start + len(df))\n            attribute_id_start += len(df)\n\n            \n            attr_df.to_sql(name=models.Attribute.__tablename__,\n                           con=self.client.engine,\n                           if_exists='append',\n                           index=False)", "docstring": "Store entities and their attributes\n\nArgs:\ndf (pandas.DataFrame): data to store (storing appends 'id' and 'type' columns!)\nattribute_columns (list(str)): list of column labels that define attributes", "source": "juraj-google-style"}
{"code": "def ParseByteStream(self, parser_mediator, byte_stream, parent_path_segments=None, codepage='cp1252'):\n    if (parent_path_segments and isinstance(parent_path_segments, list)):\n        self._path_segments = list(parent_path_segments)\n    else:\n        self._path_segments = []\n    shell_item_list = pyfwsi.item_list()\n    parser_mediator.AppendToParserChain(self)\n    try:\n        shell_item_list.copy_from_byte_stream(byte_stream, ascii_codepage=codepage)\n        for shell_item in iter(shell_item_list.items):\n            self._ParseShellItem(parser_mediator, shell_item)\n    finally:\n        parser_mediator.PopFromParserChain()", "docstring": "Parses the shell items from the byte stream.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nbyte_stream (bytes): shell items data.\nparent_path_segments (Optional[list[str]]): parent shell item path\nsegments.\ncodepage (Optional[str]): byte stream codepage.", "source": "codesearchnet"}
{"code": "def resize(self, image: np.ndarray, size: Dict[str, int], patch_size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n    if 'longest_edge' in size:\n        size = (size['longest_edge'], size['longest_edge'])\n    elif 'height' in size and 'width' in size:\n        size = (size['height'], size['width'])\n    else:\n        raise ValueError(\"size must contain either 'longest_edge' or 'height' and 'width'.\")\n    if 'height' in patch_size and 'width' in patch_size:\n        patch_size = (patch_size['height'], patch_size['width'])\n    else:\n        raise ValueError(\"patch_size must contain either 'shortest_edge' or 'height' and 'width'.\")\n    output_size = get_resize_output_image_size(image, size=size, patch_size=patch_size, input_data_format=input_data_format)\n    return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\nresized to keep the input aspect ratio.\n\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nsize (`Dict[str, int]`):\nDict containing the longest possible edge of the image.\npatch_size (`Dict[str, int]`):\nPatch size used to calculate the size of the output image.\nresample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\nResampling filter to use when resiizing the image.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def _rpc_metadata(self):\n    if (self._rpc_metadata_internal is None):\n        self._rpc_metadata_internal = _helpers.metadata_with_prefix(self._database_string)\n    return self._rpc_metadata_internal", "docstring": "The RPC metadata for this client's associated database.\n\nReturns:\nSequence[Tuple(str, str)]: RPC metadata with resource prefix\nfor the database associated with this client.", "source": "codesearchnet"}
{"code": "def create_project(self, resource):\n        \n        self.project_service.set_auth(self._token_project)\n        return self.project_service.create(resource)", "docstring": "Create the entity described by the given resource.\n\nArgs:\nresource (intern.resource.boss.BossResource)\n\nReturns:\n(intern.resource.boss.BossResource): Returns resource of type\nrequested on success.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def _ReadLine(self, text_file_object, max_len=None, depth=0):\n    line = text_file_object.readline(size=max_len)\n    if (not line):\n        return ''\n    if (line in self._EMPTY_LINES):\n        if (depth == self._MAXIMUM_DEPTH):\n            return ''\n        return self._ReadLine(text_file_object, max_len=max_len, depth=(depth + 1))\n    return line.strip()", "docstring": "Reads a line from a text file.\n\nArgs:\ntext_file_object (dfvfs.TextFile): text file.\nmax_len (Optional[int]): maximum number of bytes a single line can take,\nwhere None means all remaining bytes should be read.\ndepth (Optional[int]): number of new lines the parser encountered.\n\nReturns:\nstr: single line read from the file-like object, or the maximum number of\ncharacters, if max_len defined and line longer than the defined size.\n\nRaises:\nUnicodeDecodeError: if the text cannot be decoded using the specified\nencoding.", "source": "codesearchnet"}
{"code": "def set_management_icmp(enabled=True, deploy=False):\n    \n\n    if enabled is True:\n        value = \"no\"\n    elif enabled is False:\n        value = \"yes\"\n    else:\n        raise CommandExecutionError(\"Invalid option provided for service enabled option.\")\n\n    ret = {}\n\n    query = {'type': 'config',\n             'action': 'set',\n             'xpath': '/config/devices/entry[@name=\\'localhost.localdomain\\']/deviceconfig/system/service',\n             'element': '<disable-icmp>{0}</disable-icmp>'.format(value)}\n\n    ret.update(__proxy__['panos.call'](query))\n\n    if deploy is True:\n        ret.update(commit())\n\n    return ret", "docstring": "Enables or disables the ICMP management service on the device.\n\nCLI Example:\n\nArgs:\nenabled (bool): If true the service will be enabled. If false the service will be disabled.\n\ndeploy (bool): If true then commit the full candidate configuration, if false only set pending change.\n\n.. code-block:: bash\n\nsalt '*' panos.set_management_icmp\nsalt '*' panos.set_management_icmp enabled=False deploy=True", "source": "juraj-google-style"}
{"code": "def _prob_in_top_k(clean_values, noisy_values, noise_stddev, noisy_top_values, k):\n    batch = tf.shape(clean_values)[0]\n    m = tf.shape(noisy_top_values)[1]\n    top_values_flat = tf.reshape(noisy_top_values, [(- 1)])\n    threshold_positions_if_in = ((tf.range(batch) * m) + k)\n    threshold_if_in = tf.expand_dims(tf.gather(top_values_flat, threshold_positions_if_in), 1)\n    is_in = tf.greater(noisy_values, threshold_if_in)\n    if (noise_stddev is None):\n        return tf.to_float(is_in)\n    threshold_positions_if_out = (threshold_positions_if_in - 1)\n    threshold_if_out = tf.expand_dims(tf.gather(top_values_flat, threshold_positions_if_out), 1)\n    prob_if_in = _normal_distribution_cdf((clean_values - threshold_if_in), noise_stddev)\n    prob_if_out = _normal_distribution_cdf((clean_values - threshold_if_out), noise_stddev)\n    prob = tf.where(is_in, prob_if_in, prob_if_out)\n    return prob", "docstring": "Helper function to NoisyTopKGating.\n\nComputes the probability that value is in top k, given different random noise.\n\nThis gives us a way of backpropagating from a loss that balances the number\nof times each expert is in the top k experts per example.\n\nIn the case of no noise, pass in None for noise_stddev, and the result will\nnot be differentiable.\n\nArgs:\nclean_values: a `Tensor` of shape [batch, n].\nnoisy_values: a `Tensor` of shape [batch, n].  Equal to clean values plus\nnormally distributed noise with standard deviation noise_stddev.\nnoise_stddev: a `Tensor` of shape [batch, n], or None\nnoisy_top_values: a `Tensor` of shape [batch, m].\n\"values\" Output of tf.top_k(noisy_top_values, m).  m >= k+1\nk: an integer.\n\nReturns:\na `Tensor` of shape [batch, n].", "source": "codesearchnet"}
{"code": "def random_expr(depth, vlist, ops):\n    if (not depth):\n        return str(vlist[random.randrange(len(vlist))])\n    max_depth_side = random.randrange(2)\n    other_side_depth = random.randrange(depth)\n    left = random_expr(((depth - 1) if max_depth_side else other_side_depth), vlist, ops)\n    right = random_expr(((depth - 1) if (not max_depth_side) else other_side_depth), vlist, ops)\n    op = ops[random.randrange(len(ops))]\n    return ExprNode(left, right, op)", "docstring": "Generate a random expression tree.\n\nArgs:\ndepth: At least one leaf will be this many levels down from the top.\nvlist: A list of chars. These chars are randomly selected as leaf values.\nops: A list of ExprOp instances.\n\nReturns:\nAn ExprNode instance which is the root of the generated expression tree.", "source": "codesearchnet"}
{"code": "def to_lxml_encoding(encoding):\n    try:\n        lxml.html.HTMLParser(encoding=encoding)\n    except LookupError:\n        encoding = encoding.replace('-', '')\n    else:\n        return encoding\n    try:\n        lxml.html.HTMLParser(encoding=encoding)\n    except LookupError:\n        encoding = encoding.replace('_', '')\n    else:\n        return encoding\n    try:\n        lxml.html.HTMLParser(encoding=encoding)\n    except LookupError:\n        pass\n    else:\n        return encoding", "docstring": "Check if lxml supports the specified encoding.\n\nReturns:\nstr, None", "source": "codesearchnet"}
{"code": "def CreateAdGroup(client, campaign_id):\n    ad_group_service = client.GetService('AdGroupService', 'v201809')\n    adgroup = {'adGroupType': 'SHOPPING_SHOWCASE_ADS', 'campaignId': campaign_id, 'name': ('AdGroup \n    adgroup_operations = {'operator': 'ADD', 'operand': adgroup}\n    adgroup = ad_group_service.mutate(adgroup_operations)['value'][0]\n    print(('AdGroup with name \"%s\" and ID \"%s\" was added.' % (adgroup['name'], adgroup['id'])))\n    return adgroup", "docstring": "Creates an AdGroup for the given shopping campaign ID.\n\nArgs:\nclient: an AdWordsClient instance.\ncampaign_id: the str ID of a shopping campaign.\n\nReturns:\nThe created AdGroup as a sudsobject.", "source": "codesearchnet"}
{"code": "def __init__(self, request, scopes=None, return_url=None):\n        \n        self.request = request\n        self.return_url = return_url or request.get_full_path()\n        if scopes:\n            self._scopes = set(oauth2_settings.scopes) | set(scopes)\n        else:\n            self._scopes = set(oauth2_settings.scopes)", "docstring": "Initialize the Oauth2 Object.\n\nArgs:\nrequest: Django request object.\nscopes: Scopes desired for this OAuth2 flow.\nreturn_url: The url to return to after the OAuth flow is complete,\ndefaults to the request's current URL path.", "source": "juraj-google-style"}
{"code": "def slogdet(x):\n    if any_symbolic_tensors((x,)):\n        return Slogdet().symbolic_call(x)\n    return backend.numpy.slogdet(x)", "docstring": "Compute the sign and natural logarithm of the determinant of a matrix.\n\nArgs:\nx: Input matrix. It must 2D and square.\n\nReturns:\nA tuple `(sign, logabsdet)`. `sign` is a number representing\nthe sign of the determinant. For a real matrix, this is 1, 0, or -1.\nFor a complex matrix, this is a complex number with absolute value 1\n(i.e., it is on the unit circle), or else 0.\n`logabsdet` is the natural log of the absolute value of the determinant.", "source": "github-repos"}
{"code": "def get_all_results_for_query_batch(self, batch_id, job_id=None, chunk_size=2048):\n    result_ids = self.get_query_batch_result_ids(batch_id, job_id=job_id)\n    if (not result_ids):\n        raise RuntimeError('Batch is not complete')\n    for result_id in result_ids:\n        (yield self.get_query_batch_results(batch_id, result_id, job_id=job_id, chunk_size=chunk_size))", "docstring": "Gets result ids and generates each result set from the batch and returns it\nas an generator fetching the next result set when needed\n\nArgs:\nbatch_id: id of batch\njob_id: id of job, if not provided, it will be looked up", "source": "codesearchnet"}
{"code": "def get_directory_list_doc(self, configs):\n    \n\n    if not isinstance(configs, (tuple, list)):\n      configs = [configs]\n\n    util.check_list_type(configs, dict, 'configs', allow_none=False)\n\n    return self.__directory_list_descriptor(configs)", "docstring": "JSON dict description of a protorpc.remote.Service in list format.\n\nArgs:\nconfigs: Either a single dict or a list of dicts containing the service\nconfigurations to list.\n\nReturns:\ndict, The directory list document as a JSON dict.", "source": "juraj-google-style"}
{"code": "def mme_delete(case_obj, mme_base_url, mme_token):\n    \n    server_responses = []\n\n    if not mme_base_url or not mme_token:\n        return 'Please check that Matchmaker connection parameters are valid'\n\n    \n    for patient in case_obj['mme_submission']['patients']:\n\n        \n        patient_id = patient['id']\n        url = ''.join([mme_base_url, '/patient/delete/', patient_id])\n        resp = matchmaker_request(url=url, token=mme_token, method='DELETE', )\n\n        server_responses.append({\n            'patient_id': patient_id,\n            'message': resp.get('message'),\n            'status_code': resp.get('status_code')\n        })\n\n    return server_responses", "docstring": "Delete all affected samples for a case from MatchMaker\n\nArgs:\ncase_obj(dict) a scout case object\nmme_base_url(str) base url of the MME server\nmme_token(str) auth token of the MME server\n\nReturns:\nserver_responses(list): a list of object of this type:\n{\n'patient_id': patient_id\n'message': server_message,\n'status_code': server_status_code\n}", "source": "juraj-google-style"}
{"code": "def duration(self, value):\n        \n        if value == self._defaults['duration'] and 'duration' in self._values:\n            del self._values['duration']\n        else:\n            self._values['duration'] = value", "docstring": "The duration property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def parse_env(config_schema, env):\n    \n    try:\n        return {\n            key: item_schema.parse(key, env.get(key))\n            for key, item_schema in config_schema.items()\n        }\n    except KeyError as error:\n        raise MissingConfigError(\n            \"Required config not set: {}\".format(error.args[0])\n        )", "docstring": "Parse the values from a given environment against a given config schema\n\nArgs:\nconfig_schema: A dict which maps the variable name to a Schema object\nthat describes the requested value.\nenv: A dict which represents the value of each variable in the\nenvironment.", "source": "juraj-google-style"}
{"code": "def get_value_at_percentile(self, percentile):\n        \n        count_at_percentile = self.get_target_count_at_percentile(percentile)\n        total = 0\n        for index in range(self.counts_len):\n            total += self.get_count_at_index(index)\n            if total >= count_at_percentile:\n                value_at_index = self.get_value_from_index(index)\n                if percentile:\n                    return self.get_highest_equivalent_value(value_at_index)\n                return self.get_lowest_equivalent_value(value_at_index)\n        return 0", "docstring": "Get the value for a given percentile\n\nArgs:\npercentile: a float in [0.0..100.0]\nReturns:\nthe value for the given percentile", "source": "juraj-google-style"}
{"code": "def _encode_gif(images, fps):\n  \n  writer = WholeVideoWriter(fps)\n  writer.write_multi(images)\n  return writer.finish()", "docstring": "Encodes numpy images into gif string.\n\nArgs:\nimages: A 4-D `uint8` `np.array` (or a list of 3-D images) of shape\n`[time, height, width, channels]` where `channels` is 1 or 3.\nfps: frames per second of the animation\n\nReturns:\nThe encoded gif string.\n\nRaises:\nIOError: If the ffmpeg command returns an error.", "source": "juraj-google-style"}
{"code": "def serialize(activation):\n    if hasattr(activation, '__name__') and activation.__name__ in _TF_ACTIVATIONS_V2:\n        return _TF_ACTIVATIONS_V2[activation.__name__]\n    return serialize_keras_object(activation)", "docstring": "Returns the string identifier of an activation function.\n\nArgs:\nactivation : Function object.\n\nReturns:\nString denoting the name attribute of the input function\n\nFor example:\n\n>>> tf.keras.activations.serialize(tf.keras.activations.tanh)\n'tanh'\n>>> tf.keras.activations.serialize(tf.keras.activations.sigmoid)\n'sigmoid'\n>>> tf.keras.activations.serialize('abcd')\nTraceback (most recent call last):\n...\nValueError: ('Cannot serialize', 'abcd')\n\nRaises:\nValueError: The input function is not a valid one.", "source": "github-repos"}
{"code": "def depth_november_average_ground_temperature(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `depth_november_average_ground_temperature`'.format(value))\n\n        self._depth_november_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_november_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_november_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def json_fhir_string_to_proto(raw_json: str, proto_cls: Type[_T], *, validate: bool=True, default_timezone: str=_primitive_time_utils.SIMPLE_ZULU) -> _T:\n    resource = proto_cls()\n    merge_json_fhir_string_into_proto(raw_json, resource, validate=validate, default_timezone=default_timezone)\n    return resource", "docstring": "Creates a resource of proto_cls and merges contents of raw_json into it.\n\nArgs:\nraw_json: The raw FHIR JSON string to convert.\nproto_cls: A subclass of message.Message to instantiate and return.\nvalidate: A Boolean value indicating if validation should be performed on\nthe resultant Message. Validation takes the form of ensuring that basic\nchecks such as cardinality guarantees, required field adherence, etc. are\nmet. Defaults to True.\ndefault_timezone: A string specifying the timezone string to use for time-\nlike FHIR data during parsing. Defaults to 'Z' for UTC.\n\nRaises:\nfhir_errors.InvalidFhirError: In the event that raw_json was not valid FHIR.\n\nReturns:\nAn instance of proto_cls with FHIR JSON data from the raw_json\nrepresentation.", "source": "github-repos"}
{"code": "def add_string_pairs_from_label_element(xib_file, results, label, special_ui_components_prefix):\n    label_entry_comment = extract_element_internationalized_comment(label)\n    if (label_entry_comment is None):\n        return\n    warn_if_element_not_of_class(label, 'Label', special_ui_components_prefix)\n    if (label.hasAttribute('usesAttributedText') and (label.attributes['usesAttributedText'].value == 'YES')):\n        add_string_pairs_from_attributed_ui_element(results, label, label_entry_comment)\n    else:\n        try:\n            label_entry_key = label.attributes['text'].value\n        except KeyError:\n            try:\n                label_entry_key = label.getElementsByTagName('string')[0].firstChild.nodeValue\n            except Exception:\n                label_entry_key = 'N/A'\n                logging.warn('%s: Missing text entry in %s', xib_file, label.toxml('UTF8'))\n        results.append((label_entry_key, label_entry_comment))", "docstring": "Adds string pairs from a label element.\n\nArgs:\nxib_file (str): Path to the xib file.\nresults (list): The list to add the results to.\nlabel (element): The label element from the xib, to extract the string pairs from.\nspecial_ui_components_prefix (str):\nIf not None, extraction will not warn about internationalized UI components with this class prefix.", "source": "codesearchnet"}
{"code": "def validate_tag(self, key, value):\n    if (key == 'owner'):\n        return validate_email(value, self.partial_owner_match)\n    elif (key == self.gdpr_tag):\n        return (value in self.gdpr_tag_values)\n    else:\n        return True", "docstring": "Check whether a tag value is valid\n\nArgs:\nkey: A tag key\nvalue: A tag value\n\nReturns:\n`(True or False)`\nA boolean indicating whether or not the value is valid", "source": "codesearchnet"}
{"code": "def read_requirements(req_file):\n    items = list(parse_requirements(req_file, session={}))\n    result = []\n    for item in items:\n        line_number = item.comes_from.split((req_file + ' (line '))[1][:(- 1)]\n        if item.req:\n            item.req.marker = item.markers\n            result.append((item.req, line_number))\n        else:\n            result.append((item, line_number))\n    return result", "docstring": "Reads a requirements file.\n\nArgs:\nreq_file (str): Filename of requirements file", "source": "codesearchnet"}
{"code": "def tokenize_to_spacy_doc(self, text: str) -> Doc:\n        \n        if not self.keep_multi_space:\n            text = re.sub(' +', ' ', text)\n        doc = self.nlp(text, disable=['parser'])\n        for a_token in doc:\n            self.custom_token(a_token)\n\n        return doc", "docstring": "Tokenize the given text, returning a spacy doc. Used for spacy rule extractor\n\nArgs:\ntext (string):\n\nReturns: Doc", "source": "juraj-google-style"}
{"code": "def avl_split_last(root):\n    if (root is None):\n        raise IndexError('Empty tree has no maximum element')\n    (root, left, right) = avl_release_kids(root)\n    if (right is None):\n        (new_root, last_node) = (left, root)\n    else:\n        (new_right, last_node) = avl_split_last(right)\n        new_root = avl_join(left, new_right, root)\n    return (new_root, last_node)", "docstring": "Removes the maximum element from the tree\n\nReturns:\ntuple: new_root, last_node\n\nO(log(n)) = O(height(root))", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n    \n    self.GetAgent = channel.unary_unary(\n        '/google.cloud.dialogflow.v2.Agents/GetAgent',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.GetAgentRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.Agent.FromString,\n        )\n    self.SearchAgents = channel.unary_unary(\n        '/google.cloud.dialogflow.v2.Agents/SearchAgents',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.SearchAgentsRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.SearchAgentsResponse.FromString,\n        )\n    self.TrainAgent = channel.unary_unary(\n        '/google.cloud.dialogflow.v2.Agents/TrainAgent',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.TrainAgentRequest.SerializeToString,\n        response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n    self.ExportAgent = channel.unary_unary(\n        '/google.cloud.dialogflow.v2.Agents/ExportAgent',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.ExportAgentRequest.SerializeToString,\n        response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n    self.ImportAgent = channel.unary_unary(\n        '/google.cloud.dialogflow.v2.Agents/ImportAgent',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.ImportAgentRequest.SerializeToString,\n        response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n    self.RestoreAgent = channel.unary_unary(\n        '/google.cloud.dialogflow.v2.Agents/RestoreAgent',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_agent__pb2.RestoreAgentRequest.SerializeToString,\n        response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def add_spin_by_element(self, spins):\n        \n        for site in self.sites:\n            new_sp = {}\n            for sp, occu in site.species.items():\n                sym = sp.symbol\n                oxi_state = getattr(sp, \"oxi_state\", None)\n                new_sp[Specie(sym, oxidation_state=oxi_state,\n                              properties={'spin': spins.get(str(sp), spins.get(sym, None))})] = occu\n            site.species = new_sp", "docstring": "Add spin states to a structure.\n\nArgs:\nspisn (dict): Dict of spins associated with\nelements or species, e.g. {\"Ni\":+5} or {\"Ni2+\":5}", "source": "juraj-google-style"}
{"code": "def delete_dict_keys(dict_, key_list):\n    invalid_keys = (set(key_list) - set(dict_.keys()))\n    valid_keys = (set(key_list) - invalid_keys)\n    for key in valid_keys:\n        del dict_[key]\n    return dict_", "docstring": "r\"\"\"\nRemoves items from a dictionary inplace. Keys that do not exist are\nignored.\n\nArgs:\ndict_ (dict): dict like object with a __del__ attribute\nkey_list (list): list of keys that specify the items to remove\n\nCommandLine:\npython -m utool.util_dict --test-delete_dict_keys\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_dict import *  # NOQA\n>>> import utool as ut\n>>> dict_ = {'bread': 1, 'churches': 1, 'cider': 2, 'very small rocks': 2}\n>>> key_list = ['duck', 'bread', 'cider']\n>>> delete_dict_keys(dict_, key_list)\n>>> result = ut.repr4(dict_, nl=False)\n>>> print(result)\n{'churches': 1, 'very small rocks': 2}", "source": "codesearchnet"}
{"code": "def read_graph_op_creation_stack_trace(self, graph_op_creation_digest):\n    return (graph_op_creation_digest.host_name, [self._stack_frame_by_id[frame_id][1:] for frame_id in graph_op_creation_digest.stack_frame_ids])", "docstring": "Read the stack trace of a given graph op creation object.\n\nArgs:\ngraph_op_creation_digest: The GraphOpCreationDigest object of interest.\n\nReturns:\nA tuple consisting of:\n1. The host name.\n2. The stack trace, as a list of (file_path, lineno, func) tuples.", "source": "github-repos"}
{"code": "def describe_images(self, idaho_image_results):\n    results = idaho_image_results['results']\n    results = [r for r in results if ('IDAHOImage' in r['type'])]\n    self.logger.debug(('Describing %s IDAHO images.' % len(results)))\n    catids = set([r['properties']['catalogID'] for r in results])\n    description = {}\n    for catid in catids:\n        description[catid] = {}\n        description[catid]['parts'] = {}\n        images = [r for r in results if (r['properties']['catalogID'] == catid)]\n        for image in images:\n            description[catid]['sensorPlatformName'] = image['properties']['sensorPlatformName']\n            part = int(image['properties']['vendorDatasetIdentifier'].split(':')[1][(- 3):])\n            color = image['properties']['colorInterpretation']\n            bucket = image['properties']['tileBucketName']\n            identifier = image['identifier']\n            boundstr = image['properties']['footprintWkt']\n            try:\n                description[catid]['parts'][part]\n            except:\n                description[catid]['parts'][part] = {}\n            description[catid]['parts'][part][color] = {}\n            description[catid]['parts'][part][color]['id'] = identifier\n            description[catid]['parts'][part][color]['bucket'] = bucket\n            description[catid]['parts'][part][color]['boundstr'] = boundstr\n    return description", "docstring": "Describe the result set of a catalog search for IDAHO images.\n\nArgs:\nidaho_image_results (dict): Result set of catalog search.\nReturns:\nresults (json): The full catalog-search response for IDAHO images\ncorresponding to the given catID.", "source": "codesearchnet"}
{"code": "def GetExtractionStatusUpdateCallback(self):\n    if (self._mode == self.MODE_LINEAR):\n        return self._PrintExtractionStatusUpdateLinear\n    if (self._mode == self.MODE_WINDOW):\n        return self._PrintExtractionStatusUpdateWindow\n    return None", "docstring": "Retrieves the extraction status update callback function.\n\nReturns:\nfunction: status update callback function or None if not available.", "source": "codesearchnet"}
{"code": "def _getFuncArgs(func):\n    code = func.func_code\n    Defaults = func.func_defaults\n    nargs = code.co_argcount\n    ArgNames = code.co_varnames[:nargs]\n    Args = OrderedDict()\n    argCount = len(ArgNames)\n    defCount = (len(Defaults) if Defaults else 0)\n    diff = (argCount - defCount)\n    for i in range(0, diff):\n        Args[ArgNames[i]] = {}\n    for i in range(diff, argCount):\n        Args[ArgNames[i]] = {'default': Defaults[(i - diff)]}\n    return Args", "docstring": "r\"\"\"Gives the details on the args of the given func.\n\nArgs:\nfunc (function): The function to get details on.", "source": "codesearchnet"}
{"code": "def Expand(self, macro_ref_str):\n    match = _MACRO_RE.match(macro_ref_str)\n    if ((match is None) or (match.group(0) != macro_ref_str)):\n        raise PDDMError(('Failed to parse macro reference: \"%s\"' % macro_ref_str))\n    if (match.group('name') not in self._macros):\n        raise PDDMError(('No macro named \"%s\".' % match.group('name')))\n    return self._Expand(match, [], macro_ref_str)", "docstring": "Expands the macro reference.\n\nArgs:\nmacro_ref_str: String of a macro reference (i.e. foo(a, b)).\n\nReturns:\nThe text from the expansion.\n\nRaises:\nPDDMError if there are any issues.", "source": "codesearchnet"}
{"code": "def connect(self, *args, auto_reconnect=False, **kwargs):\n        \n        connection_info = {\n            'auto_reconnect': auto_reconnect,\n            'args': args,\n            'kwargs': kwargs,\n        }\n        self.connect_info['connection'] = connection_info\n\n        \n        if 'user' not in self.connect_info:\n            raise Exception('`set_user_info` must be called before connecting to server.')\n\n        \n        connection = loop.create_connection(lambda: self,\n                                            *args, **kwargs)\n        asyncio.Task(connection)", "docstring": "Connects to the given server.\n\nArgs:\nauto_reconnect (bool): Automatically reconnect on disconnection.\n\nOther arguments to this function are as usually supplied to\n:meth:`asyncio.BaseEventLoop.create_connection`.", "source": "juraj-google-style"}
{"code": "def _CheckType(value, check_type, name, allow_none=True):\n    if ((value is None) and allow_none):\n        return\n    if (not isinstance(value, check_type)):\n        raise TypeError((\"%s type doesn't match %s.\" % (name, check_type)))", "docstring": "Check that the type of an object is acceptable.\n\nArgs:\nvalue: The object whose type is to be checked.\ncheck_type: The type that the object must be an instance of.\nname: Name of the object, to be placed in any error messages.\nallow_none: True if value can be None, false if not.\n\nRaises:\nTypeError: If value is not an acceptable type.", "source": "codesearchnet"}
{"code": "def __init__(self, name, aliases=None, description=None, urls=None):\n    \n    super(FormatDefinition, self).__init__(\n        name, aliases=aliases, description=description, urls=urls)\n    self.metadata = {}", "docstring": "Initializes a format data type definition.\n\nArgs:\nname (str): name.\naliases (Optional[list[str]]): aliases.\ndescription (Optional[str]): description.\nurls (Optional[list[str]]): URLs.", "source": "juraj-google-style"}
{"code": "def generate_full_symmops(symmops, tol):\n    \n    \n    \n    \n    UNIT = np.eye(4)\n    generators = [op.affine_matrix for op in symmops\n                  if not np.allclose(op.affine_matrix, UNIT)]\n    if not generators:\n        \n        return symmops\n    else:\n        full = list(generators)\n\n        for g in full:\n            for s in generators:\n                op = np.dot(g, s)\n                d = np.abs(full - op) < tol\n                if not np.any(np.all(np.all(d, axis=2), axis=1)):\n                    full.append(op)\n\n        d = np.abs(full - UNIT) < tol\n        if not np.any(np.all(np.all(d, axis=2), axis=1)):\n            full.append(UNIT)\n        return [SymmOp(op) for op in full]", "docstring": "Recursive algorithm to permute through all possible combinations of the\ninitially supplied symmetry operations to arrive at a complete set of\noperations mapping a single atom to all other equivalent atoms in the\npoint group.  This assumes that the initial number already uniquely\nidentifies all operations.\n\nArgs:\nsymmops ([SymmOp]): Initial set of symmetry operations.\n\nReturns:\nFull set of symmetry operations.", "source": "juraj-google-style"}
{"code": "def parse_args(args=None):\n    parser = argparse.ArgumentParser(description='Main script to run LIVVkit.', formatter_class=argparse.ArgumentDefaultsHelpFormatter, fromfile_prefix_chars='@')\n    parser.add_argument('-o', '--out-dir', default=os.path.join(os.getcwd(), ('vv_' + time.strftime('%Y-%m-%d'))), help='Location to output the LIVVkit webpages.')\n    parser.add_argument('-v', '--verify', nargs=2, default=None, help=' '.join(['Specify the locations of the test and bench bundle to', 'compare (respectively).']))\n    parser.add_argument('-V', '--validate', action='store', nargs='+', default=None, help=' '.join(['Specify the location of the configuration files for', 'validation tests.']))\n    parser.add_argument('-e', '--extension', action='store', nargs='+', default=None, dest='validate', metavar='EXTENSION', help=' '.join(['Specify the location of the configuration files for', 'LIVVkit extensions.']))\n    parser.add_argument('-p', '--publish', action='store_true', help=' '.join(['Also produce a publication quality copy of the figure in', 'the output directory (eps, 600d pi).']))\n    parser.add_argument('-s', '--serve', nargs='?', type=int, const=8000, help=' '.join(['Start a simple HTTP server for the output website specified', 'by OUT_DIR on port SERVE.']))\n    parser.add_argument('--version', action='version', version='LIVVkit {}'.format(livvkit.__version__), help=\"Show LIVVkit's version number and exit\")\n    return init(parser.parse_args(args))", "docstring": "Handles the parsing of options for LIVVkit's command line interface\n\nArgs:\nargs: The list of arguments, typically sys.argv[1:]", "source": "codesearchnet"}
{"code": "def potcar_eatom_list_from_outcar( filename='OUTCAR' ):\n    \n    with open( filename ) as f:\n        outcar = f.read()\n    eatom_re = re.compile( \"energy of atom\\s+\\d+\\s+EATOM=\\s*([-\\d\\.]+)\" )\n    eatom = [ float( e ) for e in eatom_re.findall( outcar ) ]\n    return eatom", "docstring": "Returns a list of EATOM values for the pseudopotentials used.\n\nArgs:\nfilename (Str, optional): OUTCAR filename. Defaults to 'OUTCAR'.\n\nReturns:\n(List(Float)): A list of EATOM values, in the order they appear in the OUTCAR.", "source": "juraj-google-style"}
{"code": "def get_subclasses(self, t):\n    if isinstance(t, pytd.ClassType):\n        subclasses = self.direct_subclasses.get(t, [])\n        return sum((self.get_subclasses(pytd.ClassType(c.name, c)) for c in subclasses), [t])\n    else:\n        raise NotImplementedError(f\"Can't extract subclasses from {type(t)}\")", "docstring": "Get all classes derived from this type.\n\nArgs:\nt: A pytd.Type\n\nReturns:\nA list of pytd.Type.", "source": "github-repos"}
{"code": "def __parameter_descriptor(self, param):\n    \n    descriptor = {}\n\n    param_type, param_format = self.__field_to_parameter_type_and_format(param)\n\n    \n    if param.required:\n      descriptor['required'] = True\n\n    \n    descriptor['type'] = param_type\n\n    \n    if param_format:\n      descriptor['format'] = param_format\n\n    \n    default = self.__parameter_default(param)\n    if default is not None:\n      descriptor['default'] = default\n\n    \n    if param.repeated:\n      descriptor['repeated'] = True\n\n    \n    \n    \n    enum_descriptor = self.__parameter_enum(param)\n    if enum_descriptor is not None:\n      descriptor['enum'] = enum_descriptor\n      descriptor['enumDescriptions'] = [''] * len(enum_descriptor)\n\n    return descriptor", "docstring": "Creates descriptor for a parameter.\n\nArgs:\nparam: The parameter to be described.\n\nReturns:\nDictionary containing a descriptor for the parameter.", "source": "juraj-google-style"}
{"code": "def CleanClientVersions(clients=None, dry_run=True, token=None):\n    if (not clients):\n        index = client_index.CreateClientIndex(token=token)\n        clients = index.LookupClients(['.'])\n    clients.sort()\n    with data_store.DB.GetMutationPool() as pool:\n        logging.info('checking %d clients', len(clients))\n        client_infos = data_store.DB.MultiResolvePrefix(clients, 'aff4:type', data_store.DB.ALL_TIMESTAMPS)\n        for (client, type_list) in client_infos:\n            logging.info('%s: has %d versions', client, len(type_list))\n            cleared = 0\n            kept = 1\n            last_kept = type_list[0][2]\n            for (_, _, ts) in type_list[1:]:\n                if ((last_kept - ts) > ((60 * 60) * 1000000)):\n                    last_kept = ts\n                    kept += 1\n                else:\n                    if (not dry_run):\n                        pool.DeleteAttributes(client, ['aff4:type'], start=ts, end=ts)\n                    cleared += 1\n                    if (pool.Size() > 10000):\n                        pool.Flush()\n            logging.info('%s: kept %d and cleared %d', client, kept, cleared)", "docstring": "A script to remove excessive client versions.\n\nEspecially when a client is heavily cloned, we sometimes write an excessive\nnumber of versions of it. Since these version all go into the same database\nrow and are displayed as a dropdown list in the adminui, it is sometimes\nnecessary to clear them out.\n\nThis deletes version from clients so that we have at most one\nversion per hour.\n\nArgs:\nclients: A list of ClientURN, if empty cleans all clients.\ndry_run: whether this is a dry run\ntoken: datastore token.", "source": "codesearchnet"}
{"code": "def create_new_username(ip, devicetype=None, timeout=_DEFAULT_TIMEOUT):\n    res = Resource(_api_url(ip), timeout)\n    prompt = 'Press the Bridge button, then press Return: '\n    if (sys.version_info.major == 2):\n        _ = raw_input(prompt)\n    else:\n        _ = input(prompt)\n    if (devicetype is None):\n        devicetype = 'qhue\n    response = res(devicetype=devicetype, http_method='post')\n    return response[0]['success']['username']", "docstring": "Interactive helper function to generate a new anonymous username.\n\nArgs:\nip: ip address of the bridge\ndevicetype (optional): devicetype to register with the bridge. If\nunprovided, generates a device type based on the local hostname.\ntimeout (optional, default=5): request timeout in seconds\nRaises:\nQhueException if something went wrong with username generation (for\nexample, if the bridge button wasn't pressed).", "source": "codesearchnet"}
{"code": "def load_extra(cls, filename):\n    \n    try:\n      with open(filename, 'rb') as configuration_file:\n        cls.load_extra_data(configuration_file.read())\n        sys.stderr.write(\"Config successfully loaded from {0:s}\\n\".format(\n            filename))\n        return True\n    except IOError:\n      return False", "docstring": "Loads extra JSON configuration parameters from a file on the filesystem.\n\nArgs:\nfilename: str, the filename to open.\n\nReturns:\nbool: True if the extra configuration parameters were read.", "source": "juraj-google-style"}
{"code": "async def _pb_request(self, endpoint, request_pb, response_pb):\n        \n        logger.debug('Sending Protocol Buffer request %s:\\n%s', endpoint,\n                     request_pb)\n        res = await self._base_request(\n            'https:\n            'application/x-protobuf',  \n            'proto',  \n            request_pb.SerializeToString()\n        )\n        try:\n            response_pb.ParseFromString(base64.b64decode(res.body))\n        except binascii.Error as e:\n            raise exceptions.NetworkError(\n                'Failed to decode base64 response: {}'.format(e)\n            )\n        except google.protobuf.message.DecodeError as e:\n            raise exceptions.NetworkError(\n                'Failed to decode Protocol Buffer response: {}'.format(e)\n            )\n        logger.debug('Received Protocol Buffer response:\\n%s', response_pb)\n        status = response_pb.response_header.status\n        if status != hangouts_pb2.RESPONSE_STATUS_OK:\n            description = response_pb.response_header.error_description\n            raise exceptions.NetworkError(\n                'Request failed with status {}: \\'{}\\''\n                .format(status, description)\n            )", "docstring": "Send a Protocol Buffer formatted chat API request.\n\nArgs:\nendpoint (str): The chat API endpoint to use.\nrequest_pb: The request body as a Protocol Buffer message.\nresponse_pb: The response body as a Protocol Buffer message.\n\nRaises:\nNetworkError: If the request fails.", "source": "juraj-google-style"}
{"code": "def set_contrast(self, contrast):\n        \n        self._contrast = contrast\n\n        self.x_spread = 2 * (1.0 - contrast)\n        self.y_spread = 2.0 - 2 * (1.0 - contrast)\n\n        self._build_cdict()", "docstring": "Adjusts the image contrast.\n\nContrast refers to the rate of change of color with color level.\nAt low contrast, color changes gradually over many intensity\nlevels, while at high contrast it can change rapidly within a\nfew levels\n\nArgs:\ncontrast: float\nA number between 0 and 1.  Note that upon initialization the\ncolormap has a default contrast value of 0.5.\n\nReturns: void", "source": "juraj-google-style"}
{"code": "def delete(self):\n    if self.exists():\n        try:\n            self._api.objects_delete(self._bucket, self._key)\n        except Exception as e:\n            raise e", "docstring": "Deletes this item from its bucket.\n\nRaises:\nException if there was an error deleting the item.", "source": "codesearchnet"}
{"code": "def set_requestable(self, requestable=True):\n    self.data['is_requestdata_type'] = requestable\n    if requestable:\n        self.data['private'] = False", "docstring": "Set the dataset to be of type requestable or not\n\nArgs:\nrequestable (bool): Set whether dataset is requestable. Defaults to True.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _write_submit_script(self, script_string, script_filename):\n    try:\n        with open(script_filename, 'w') as f:\n            f.write(script_string)\n    except KeyError as e:\n        logger.error('Missing keys for submit script : %s', e)\n        raise ep_error.SchedulerMissingArgs(e.args, self.label)\n    except IOError as e:\n        logger.error('Failed writing to submit script: %s', script_filename)\n        raise ep_error.ScriptPathError(script_filename, e)\n    return True", "docstring": "Load the template string with config values and write the generated submit script to\na submit script file.\n\nArgs:\n- template_string (string) : The template string to be used for the writing submit script\n- script_filename (string) : Name of the submit script\n\nReturns:\n- True: on success\n\nRaises:\nSchedulerMissingArgs : If template is missing args\nScriptPathError : Unable to write submit script out", "source": "codesearchnet"}
{"code": "def post_create_app(cls, app, **settings):\n        \n        super(MarshmallowAwareApp, cls).post_create_app(app, **settings)\n\n        marsh.init_app(app)\n\n        return app", "docstring": "Automatically register and init the Flask Marshmallow extension.\n\nArgs:\napp (flask.Flask): The application instance in which to initialize\nFlask Marshmallow upon.\n\nKwargs:\nsettings (dict): The settings passed to this method from the\nparent app.\n\nReturns:\nflask.Flask: The Flask application that was passed in.", "source": "juraj-google-style"}
{"code": "def Map(self, function):\n        \n        new_table = self.__class__()\n        \n        new_table._table = [self.header]\n        for row in self:\n            filtered_row = function(row)\n            if filtered_row:\n                new_table.Append(filtered_row)\n        return new_table", "docstring": "Applies the function to every row in the table.\n\nArgs:\nfunction: A function applied to each row.\n\nReturns:\nA new TextTable()\n\nRaises:\nTableError: When transform is not invalid row entry. The transform\nmust be compatible with Append().", "source": "juraj-google-style"}
{"code": "def flag_is_related(self, flag):\n    same_worksheet = (flag.worksheet == self.worksheet)\n    if isinstance(flag.location, (tuple, list)):\n        return ((flag.location[0] >= self.start[0]) and (flag.location[0] < self.end[0]) and (flag.location[1] >= self.start[1]) and (flag.location[1] < self.end[1]) and same_worksheet)\n    else:\n        return same_worksheet", "docstring": "Checks for relationship between a flag and this block.\n\nReturns:\nTrue if the flag is related to this block.", "source": "codesearchnet"}
{"code": "def _log_epoch_metrics(self, epoch, logs):\n    if not logs:\n        return\n    train_logs = {k: v for k, v in logs.items() if not k.startswith('val_')}\n    val_logs = {k: v for k, v in logs.items() if k.startswith('val_')}\n    train_logs = self._collect_learning_rate(train_logs)\n    if self.write_steps_per_second:\n        train_logs['steps_per_second'] = self._compute_steps_per_second()\n    with summary_ops_v2.record_if(True):\n        if train_logs:\n            with self._train_writer.as_default():\n                for name, value in train_logs.items():\n                    summary_ops_v2.scalar('epoch_' + name, value, step=epoch)\n        if val_logs:\n            with self._val_writer.as_default():\n                for name, value in val_logs.items():\n                    name = name[4:]\n                    summary_ops_v2.scalar('epoch_' + name, value, step=epoch)", "docstring": "Writes epoch metrics out as scalar summaries.\n\nArgs:\nepoch: Int. The global step to use for TensorBoard.\nlogs: Dict. Keys are scalar summary names, values are scalars.", "source": "github-repos"}
{"code": "def infer_edge(tpm, a, b, contexts):\n\n    def a_in_context(context):\n        'Given a context C(A), return the states of the full system with A\\n        OFF and ON, respectively.\\n        '\n        a_off = ((context[:a] + OFF) + context[a:])\n        a_on = ((context[:a] + ON) + context[a:])\n        return (a_off, a_on)\n\n    def a_affects_b_in_context(context):\n        'Return ``True`` if A has an effect on B, given a context.'\n        (a_off, a_on) = a_in_context(context)\n        return (tpm[a_off][b] != tpm[a_on][b])\n    return any((a_affects_b_in_context(context) for context in contexts))", "docstring": "Infer the presence or absence of an edge from node A to node B.\n\nLet |S| be the set of all nodes in a network. Let |A' = S - {A}|. We call\nthe state of |A'| the context |C| of |A|. There is an edge from |A| to |B|\nif there exists any context |C(A)| such that |Pr(B | C(A), A=0) != Pr(B |\nC(A), A=1)|.\n\nArgs:\ntpm (np.ndarray): The TPM in state-by-node, multidimensional form.\na (int): The index of the putative source node.\nb (int): The index of the putative sink node.\nReturns:\nbool: ``True`` if the edge |A -> B| exists, ``False`` otherwise.", "source": "codesearchnet"}
{"code": "def request_file(link, outfile, force_rerun_flag=False):\n    if force_rerun(flag=force_rerun_flag, outfile=outfile):\n        req = requests.get(link)\n        if (req.status_code == 200):\n            with open(outfile, 'w') as f:\n                f.write(req.text)\n            log.debug('Loaded and saved {} to {}'.format(link, outfile))\n        else:\n            log.error('{}: request error {}'.format(link, req.status_code))\n    return outfile", "docstring": "Download a file given a URL if the outfile does not exist already.\n\nArgs:\nlink (str): Link to download file.\noutfile (str): Path to output file, will make a new file if it does not exist. Will not download if it does\nexist, unless force_rerun_flag is True.\nforce_rerun_flag (bool): Flag to force re-downloading of the file if it exists already.\n\nReturns:\nstr: Path to downloaded file.", "source": "codesearchnet"}
{"code": "def add_dataset(self, dataset, datasets_to_check=None):\n    showcase_dataset = self._get_showcase_dataset_dict(dataset)\n    if (datasets_to_check is None):\n        datasets_to_check = self.get_datasets()\n    for dataset in datasets_to_check:\n        if (showcase_dataset['package_id'] == dataset['id']):\n            return False\n    self._write_to_hdx('associate', showcase_dataset, 'package_id')\n    return True", "docstring": "Add a dataset\n\nArgs:\ndataset (Union[Dataset,Dict,str]): Either a dataset id or dataset metadata either from a Dataset object or a dictionary\ndatasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase.\n\nReturns:\nbool: True if the dataset was added, False if already present", "source": "codesearchnet"}
{"code": "def has_valid_soma(data_wrapper):\n    try:\n        make_soma(data_wrapper.soma_points())\n        return CheckResult(True)\n    except SomaError:\n        return CheckResult(False)", "docstring": "Check if a data block has a valid soma\n\nReturns:\nCheckResult with result", "source": "codesearchnet"}
{"code": "def get_job_results(self, job_resource_name: str) -> List[TrialResult]:\n        \n        response = self.service.projects().programs().jobs().getResult(\n            parent=job_resource_name).execute()\n        trial_results = []\n        for sweep_result in response['result']['sweepResults']:\n            sweep_repetitions = sweep_result['repetitions']\n            key_sizes = [(m['key'], len(m['qubits']))\n                         for m in sweep_result['measurementKeys']]\n            for result in sweep_result['parameterizedResults']:\n                data = base64.standard_b64decode(result['measurementResults'])\n                measurements = unpack_results(data, sweep_repetitions,\n                                              key_sizes)\n\n                trial_results.append(TrialResult(\n                    params=ParamResolver(\n                        result.get('params', {}).get('assignments', {})),\n                    repetitions=sweep_repetitions,\n                    measurements=measurements))\n        return trial_results", "docstring": "Returns the actual results (not metadata) of a completed job.\n\nParams:\njob_resource_name: A string of the form\n`projects/project_id/programs/program_id/jobs/job_id`.\n\nReturns:\nAn iterable over the TrialResult, one per parameter in the\nparameter sweep.", "source": "juraj-google-style"}
{"code": "def up(name, debug=False):\n    \n\n    if debug:\n        env.ensemble_debug = True\n\n    filenames_to_try = [\n        name,\n        '%s.yml' % name,\n        '%s.yaml' % name,\n    ]\n\n    for filename in filenames_to_try:\n        if os.path.exists(filename):\n            with open(filename, 'r') as f:\n                config = yaml.load(f)\n            break\n    else:\n        abort('Ensemble manifest not found: %s' % name)\n\n    uncache()\n    try:\n        do_up(config)\n    except exceptions.ConfigException, e:\n        abort('Config error: ' + str(e))", "docstring": "Create servers and containers as required to meet the configuration\nspecified in _name_.\n\nArgs:\n* name: The name of the yaml config file (you can omit the .yml extension for convenience)\n\nExample:\nfab ensemble.up:wordpress", "source": "juraj-google-style"}
{"code": "def AddPath(self, path):\n    node = self._root\n    for name in path.split('.'):\n        if (name not in node):\n            node[name] = {}\n        elif (not node[name]):\n            return\n        node = node[name]\n    node.clear()", "docstring": "Adds a field path into the tree.\n\nIf the field path to add is a sub-path of an existing field path\nin the tree (i.e., a leaf node), it means the tree already matches\nthe given path so nothing will be added to the tree. If the path\nmatches an existing non-leaf node in the tree, that non-leaf node\nwill be turned into a leaf node with all its children removed because\nthe path matches all the node's children. Otherwise, a new path will\nbe added.\n\nArgs:\npath: The field path to add.", "source": "codesearchnet"}
{"code": "def __init__( self, title, energy, stoichiometry ):\n        \n        self.title = title\n        self.energy = energy\n        self.stoichiometry = Counter( stoichiometry )", "docstring": "Initialise a Calculation object\n\nArgs:\ntitle (Str): The title string for this calculation.\nenergy (Float): Final energy in eV.\nstoichiometry (Dict{Str:Int}): A dict desribing the calculation stoichiometry,\ne.g. { 'Ti': 1, 'O': 2 }\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def update_task_ids(self, encoder_vocab_size):\n    for (idx, task) in enumerate(self.task_list):\n        task.set_task_id((idx + encoder_vocab_size))\n        tf.logging.info(('Task %d (%s) has id %d.' % (idx, task.name, task.task_id)))", "docstring": "Generate task_ids for each problem.\n\nThese ids correspond to the index of the task in the task_list.\n\nArgs:\nencoder_vocab_size: the size of the vocab which is used to compute\nthe index offset.", "source": "codesearchnet"}
{"code": "def multiple_replace(string, replacements):\n    pattern = re.compile('|'.join([re.escape(k) for k in sorted(replacements, key=len, reverse=True)]), flags=re.DOTALL)\n    return pattern.sub((lambda x: replacements[x.group(0)]), string)", "docstring": "Simultaneously replace multiple strigns in a string\n\nArgs:\nstring (str): Input string\nreplacements (Dict[str,str]): Replacements dictionary\n\nReturns:\nstr: String with replacements", "source": "codesearchnet"}
{"code": "def write_op_log(graph, log_dir, op_log=None, run_meta=None, add_trace=True):\n    if not graph and (not context.executing_eagerly()):\n        graph = ops.get_default_graph()\n    op_log = merge_default_with_oplog(graph, op_log, run_meta, add_trace)\n    with gfile.Open(os.path.join(log_dir, 'tfprof_log'), 'w') as log:\n        log.write(op_log.SerializeToString())", "docstring": "Log provided 'op_log', and add additional model information below.\n\nThe API also assigns ops in tf.compat.v1.trainable_variables() an op type\ncalled '_trainable_variables'.\nThe API also logs 'flops' statistics for ops with op.RegisterStatistics()\ndefined. flops calculation depends on Tensor shapes defined in 'graph',\nwhich might not be complete. 'run_meta', if provided, completes the shape\ninformation with best effort.\n\nArgs:\ngraph: tf.Graph. If None and eager execution is not enabled, use\ndefault graph.\nlog_dir: directory to write the log file.\nop_log: (Optional) OpLogProto proto to be written. If not provided, an new\none is created.\nrun_meta: (Optional) RunMetadata proto that helps flops computation using\nrun time shape information.\nadd_trace: Whether to add python code trace information.\nUsed to support \"code\" view.", "source": "github-repos"}
{"code": "def build_deps(self):\n    build_requires = self.metadata['setup_requires']\n    if self.has_test_suite:\n        build_requires += (self.metadata['tests_require'] + self.metadata['install_requires'])\n    if ('setuptools' not in build_requires):\n        build_requires.append('setuptools')\n    return sorted(self.name_convert_deps_list(deps_from_pyp_format(build_requires, runtime=False)))", "docstring": "Same as runtime_deps, but build dependencies. Test and install\nrequires are included if package contains test suite to prevent\n%check phase crashes because of missing dependencies\n\nReturns:\nlist of build dependencies of the package", "source": "codesearchnet"}
{"code": "def unpack(self, gpsd_socket_response):\n        \n        try:\n            fresh_data = json.loads(gpsd_socket_response)  \n            class_name = fresh_data.pop('class')\n            for key in self.packages[class_name]:\n                \n                if class_name == 'GST' and key == 'lat' or 'lon':\n                    setattr(self, 'sd' + key, fresh_data.get(key, 'n/a'))\n                setattr(self, key, fresh_data.get(key, 'n/a'))  \n\n        except AttributeError:  \n            sys.stderr.write('There is an unexpected exception unpacking JSON object')\n            return\n\n        except (ValueError, KeyError) as error:\n            sys.stderr.write(str(error))  \n            return", "docstring": "Sets new socket data as DataStream attributes in those initialised dictionaries\nArguments:\ngpsd_socket_response (json object):\nProvides:\nself attributes, e.g., self.lat, self.gdop\nRaises:\nAttributeError: 'str' object has no attribute 'keys' when the device falls out of the system\nValueError, KeyError: most likely extra, or mangled JSON data, should not happen, but that\napplies to a lot of things.", "source": "juraj-google-style"}
{"code": "def DistFitDataset(Dat):\n    \n    \n    (r,c) = Dat.shape\n    Poiss = np.zeros(r)\n    Norm = np.zeros(r)\n    LogNorm = np.zeros(r)\n    for i in range(r):\n        temp = GetDistFitError(Dat[i])\n        Poiss[i] = temp['poiss']\n        Norm[i] = temp['norm']\n        LogNorm[i] = temp['lognorm']\n    d = {}\n    d['poiss'] = Poiss\n    d['norm'] = Norm\n    d['lognorm'] = LogNorm\n    return d", "docstring": "Given a data matrix, this returns the per-gene fit error for the\nPoisson, Normal, and Log-Normal distributions.\n\nArgs:\nDat (array): numpy array with shape (genes, cells)\n\nReturns:\nd (dict): 'poiss', 'norm', 'lognorm' give the fit error for each distribution.", "source": "juraj-google-style"}
{"code": "def read_config(config_path=CONFIG_PATH):\n    if (not os.path.isfile(config_path)):\n        raise IOError(('No config file found at %s' % config_path))\n    config_parser = configparser.ConfigParser()\n    config_parser.read(config_path)\n    config = _config_parser_to_defaultdict(config_parser)\n    return config", "docstring": "Read the config information from the config file.\n\nArgs:\nconfig_path (str): Relative path to the email config file.\nReturns:\ndefaultdict: A defaultdict with the config information.\nRaises:\nIOError", "source": "codesearchnet"}
{"code": "def ensure_app_cache_dir(appname, *args):\n    \n    from ubelt import util_path\n    dpath = get_app_cache_dir(appname, *args)\n    util_path.ensuredir(dpath)\n    return dpath", "docstring": "Calls `get_app_cache_dir` but ensures the directory exists.\n\nArgs:\nappname (str): the name of the application\n*args: any other subdirectories may be specified\n\nSeeAlso:\nget_app_cache_dir\n\nExample:\n>>> import ubelt as ub\n>>> dpath = ub.ensure_app_cache_dir('ubelt')\n>>> assert exists(dpath)", "source": "juraj-google-style"}
{"code": "def _stop_profiler(self, save=True):\n    if not self._profiler_started:\n        return\n    try:\n        backend.tensorboard.stop_trace(save=save)\n    except Exception as e:\n        logging.error('Failed to stop profiler: %s', e)\n    finally:\n        self._profiler_started = False", "docstring": "Stops the profiler if currently active.\n\nArgs:\nsave: Whether to save the profiler results to TensorBoard.", "source": "github-repos"}
{"code": "def write_index_and_rst_files(self, overwrite: bool = False,\n                                  mock: bool = False) -> None:\n        \n        for f in self.files_to_index:\n            if isinstance(f, FileToAutodocument):\n                f.write_rst(\n                    prefix=self.rst_prefix,\n                    suffix=self.rst_suffix,\n                    heading_underline_char=self.source_rst_heading_underline_char,  \n                    overwrite=overwrite,\n                    mock=mock,\n                )\n            elif isinstance(f, AutodocIndex):\n                f.write_index_and_rst_files(overwrite=overwrite, mock=mock)\n            else:\n                fail(\"Unknown thing in files_to_index: {!r}\".format(f))\n        self.write_index(overwrite=overwrite, mock=mock)", "docstring": "Writes both the individual RST files and the index.\n\nArgs:\noverwrite: allow existing files to be overwritten?\nmock: pretend to write, but don't", "source": "juraj-google-style"}
{"code": "def __and__(self, other: 'TensorFluent') -> 'TensorFluent':\n        \n        return self._binary_op(self, other, tf.logical_and, tf.bool)", "docstring": "Returns a TensorFluent for the and logical operator.\n\nArgs:\nself: The first operand.\nother: The second operand.\n\nReturns:\nA TensorFluent wrapping the operator's output.", "source": "juraj-google-style"}
{"code": "def _handle_location(self, location):\n    if (not isinstance(location, ElementTree.Element)):\n        element = self.find(location)\n        if (element is None):\n            raise ValueError('Invalid path!')\n    else:\n        element = location\n    return element", "docstring": "Return an element located at location with flexible args.\n\nArgs:\nlocation: String xpath to use in an Element.find search OR\nan Element (which is simply returned).\n\nReturns:\nThe found Element.\n\nRaises:\nValueError if the location is a string that results in a\nfind of None.", "source": "codesearchnet"}
{"code": "def create(self, value):\n    if (self._optional and ((value is None) or (len(value) == 0))):\n        return None\n    if hasattr(self._type, 'resource_type'):\n        if (not isinstance(value, dict)):\n            raise ValueError('Resources must be specified as a dict of title to parameters')\n        if ((not self._many) and (len(value) > 1)):\n            raise ValueError('Only one resource can be provided for this TroposphereType variable')\n        result = [self._type.from_dict(title, v) for (title, v) in value.items()]\n    elif self._many:\n        result = [self._type.from_dict(None, v) for v in value]\n    elif (not isinstance(value, dict)):\n        raise ValueError('TroposphereType for a single non-resourcetype must be specified as a dict of parameters')\n    else:\n        result = [self._type.from_dict(None, value)]\n    if self._validate:\n        for v in result:\n            v._validate_props()\n    return (result[0] if (not self._many) else result)", "docstring": "Create the troposphere type from the value.\n\nArgs:\nvalue (Union[dict, list]): A dictionary or list of dictionaries\n(see class documentation for details) to use as parameters to\ncreate the Troposphere type instance.\nEach dictionary will be passed to the `from_dict` method of the\ntype.\n\nReturns:\nUnion[list, type]: Returns the value converted to the troposphere\ntype", "source": "codesearchnet"}
{"code": "def click_exists(self, timeout=0):\n    e = self.get(timeout=timeout, raise_error=False)\n    if (e is None):\n        return False\n    e.click()\n    return True", "docstring": "Wait element and perform click\n\nArgs:\ntimeout (float): timeout for wait\n\nReturns:\nbool: if successfully clicked", "source": "codesearchnet"}
{"code": "def has_arg(fn, arg_name):\n    \n    if sys.version_info < (3,):\n        if isinstance(fn, types.FunctionType) or isinstance(fn, types.MethodType):\n            arg_spec = inspect.getargspec(fn)\n        else:\n            try:\n                arg_spec = inspect.getargspec(fn.__call__)\n            except AttributeError:\n                return False\n        return (arg_name in arg_spec.args)\n    elif sys.version_info < (3, 6):\n        arg_spec = inspect.getfullargspec(fn)\n        return (arg_name in arg_spec.args or\n                arg_name in arg_spec.kwonlyargs)\n    else:\n        try:\n            signature = inspect.signature(fn)\n        except ValueError:\n            \n            signature = inspect.signature(fn.__call__)\n        parameter = signature.parameters.get(arg_name)\n        if parameter is None:\n            return False\n        return (parameter.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD,\n                                   inspect.Parameter.KEYWORD_ONLY))", "docstring": "Checks if a callable accepts a given keyword argument.\n\nArgs:\nfn: callable to inspect\narg_name: string, keyword argument name to check\n\nReturns:\nbool, whether `fn` accepts a `arg_name` keyword argument.", "source": "juraj-google-style"}
{"code": "def read_from_hdx(identifier, configuration=None):\n        \n        \n\n        user = User(configuration=configuration)\n        result = user._load_from_hdx('user', identifier)\n        if result:\n            return user\n        return None", "docstring": "Reads the user given by identifier from HDX and returns User object\n\nArgs:\nidentifier (str): Identifier of user\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nOptional[User]: User object if successful read, None if not", "source": "juraj-google-style"}
{"code": "def __init__(self, boundaries, values, name=None):\n    super(PiecewiseConstantDecay, self).__init__()\n    if len(boundaries) != len(values) - 1:\n        raise ValueError('The length of boundaries should be 1 less than the length of values')\n    self.boundaries = boundaries\n    self.values = values\n    self.name = name", "docstring": "Piecewise constant from boundaries and interval values.\n\nArgs:\nboundaries: A list of `Tensor`s or `int`s or `float`s with strictly\nincreasing entries, and with all elements having the same type as the\noptimizer step.\nvalues: A list of `Tensor`s or `float`s or `int`s that specifies the\nvalues for the intervals defined by `boundaries`. It should have one\nmore element than `boundaries`, and all elements should have the same\ntype.\nname: A string. Optional name of the operation. Defaults to\n'PiecewiseConstant'.\n\nRaises:\nValueError: if the number of elements in the lists do not match.", "source": "github-repos"}
{"code": "def indentjoin(strlist, indent='\\n    ', suffix=''):\n    r\n    indent_ = indent\n    strlist = list(strlist)\n    if len(strlist) == 0:\n        return ''\n    return indent_ + indent_.join([six.text_type(str_) + suffix\n                                   for str_ in strlist])", "docstring": "r\"\"\"\nConvineince indentjoin\n\nsimilar to '\\n    '.join(strlist) but indent is also prefixed\n\nArgs:\nstrlist (?):\nindent  (str):\nsuffix  (str):\n\nReturns:\nstr: joined list", "source": "juraj-google-style"}
{"code": "def get_equiv_transformations(self, transformation_sets, film_vectors, substrate_vectors):\n    for (film_transformations, substrate_transformations) in transformation_sets:\n        films = [reduce_vectors(*np.dot(f, film_vectors)) for f in film_transformations]\n        substrates = [reduce_vectors(*np.dot(s, substrate_vectors)) for s in substrate_transformations]\n        for (f, s) in product(films, substrates):\n            if self.is_same_vectors(f, s):\n                (yield [f, s])", "docstring": "Applies the transformation_sets to the film and substrate vectors\nto generate super-lattices and checks if they matches.\nReturns all matching vectors sets.\n\nArgs:\ntransformation_sets(array): an array of transformation sets:\neach transformation set is an array with the (i,j)\nindicating the area multipes of the film and subtrate it\ncorresponds to, an array with all possible transformations\nfor the film area multiple i and another array for the\nsubstrate area multiple j.\n\nfilm_vectors(array): film vectors to generate super lattices\nsubstrate_vectors(array): substrate vectors to generate super\nlattices", "source": "codesearchnet"}
{"code": "def classify_coupling(coupling):\n    \n    lower, upper = coupling\n\n    if lower is None and upper is None:\n        return CouplingClass.Uncoupled\n    elif lower is None or upper is None:\n        return CouplingClass.DirectionalReverse\n    elif lower == 0.0 and upper == 0.0:\n        return CouplingClass.Inconsistent\n    elif lower <= 0.0 and upper >= 0.0:\n        return CouplingClass.DirectionalForward\n    elif abs(lower - upper) < 1e-6:\n        return CouplingClass.Full\n    else:\n        return CouplingClass.Partial", "docstring": "Return a constant indicating the type of coupling.\n\nDepending on the type of coupling, one of the constants from\n:class:`.CouplingClass` is returned.\n\nArgs:\ncoupling: Tuple of minimum and maximum flux ratio", "source": "juraj-google-style"}
{"code": "def _determine_and_instrument_traced_tensors(self, graph_order, ops_in_exec_path, tensor_trace_points, report_handler):\n    traced_tensors = []\n    checkpoint_operations = set([tensor.op for tensor, _ in tensor_trace_points])\n    for op_id, op in enumerate(graph_order.operations):\n        if checkpoint_operations and op not in checkpoint_operations:\n            continue\n        if self._skip_op(op_id, op, ops_in_exec_path, report_handler):\n            continue\n        for i in range(len(op.outputs)):\n            out_tensor = op.outputs[i]\n            if not self._skip_tensor(op_id, out_tensor, report_handler):\n                traced_tensors.append(out_tensor)\n    return traced_tensors", "docstring": "Determines the tensors to trace and instruments the trace details.\n\nArgs:\ngraph_order: graph_order tuple containing graph (tf.graph), operations\n(list of operations), op_to_idx (op id mapping), (tensors) list of\ntensors, tensor_to_idx (tensor id mapping), contains_cycle (whether\nthere is a cycle in the graph), topological_order_or_cycle (list of ops\nin topological order or list of ops creating a cycle).\nops_in_exec_path: Set of ops in the execution path.\ntensor_trace_points: Collection of programatic tensor trace points.\nreport_handler: An instance of tensor_tracer_report.TTReportHandle.\nReturns:\nList of tensors to be traced.", "source": "github-repos"}
{"code": "def stats(self):\n    self.raise_error_if_not_open()\n    per_key_stats = self.stats_per_key()\n    return stats.DataStats.concatenate(per_key_stats.values())", "docstring": "Return statistics calculated overall features in the container.\n\nNote:\nThe feature container has to be opened in advance.\n\nReturns:\nDataStats: Statistics overall data points of all features.", "source": "codesearchnet"}
{"code": "class PatchTSMixerForPreTrainingOutput(ModelOutput):\n    loss: Optional[torch.FloatTensor] = None\n    prediction_outputs: Optional[torch.FloatTensor] = None\n    last_hidden_state: Optional[torch.FloatTensor] = None\n    hidden_states: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "Output type of [`PatchTSMixerForPreTrainingOutput`].\n\nArgs:\nprediction_outputs (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, patch_length)`):\nPrediction output from the pretrain head.\nhidden_states (`tuple(torch.FloatTensor)`, *optional*):\nHidden-states of the model at the output of each layer.\nlast_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`):\nBackbone embeddings before passing through the head.\nloss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`):\nTotal loss", "source": "github-repos"}
{"code": "def activate(fn=None):\n    if (not isfunction(fn)):\n        _engine.activate()\n        return None\n    if ((iscoroutinefunction is not None) and iscoroutinefunction(fn)):\n        return activate_async(fn, _engine)\n\n    @functools.wraps(fn)\n    def wrapper(*args, **kw):\n        _engine.activate()\n        try:\n            fn(*args, **kw)\n        finally:\n            _engine.disable()\n    return wrapper", "docstring": "Enables the HTTP traffic interceptors.\n\nThis function can be used as decorator.\n\nArguments:\nfn (function|coroutinefunction): Optional function argument\nif used as decorator.\n\nReturns:\nfunction: decorator wrapper function, only if called as decorator,\notherwise ``None``.\n\nExample::\n\n# Standard use case\npook.activate()\npook.mock('server.com/foo').reply(404)\n\nres = requests.get('server.com/foo')\nassert res.status_code == 404\npook.disable()\n\n# Decorator use case\n@pook.activate\ndef test_request():\npook.mock('server.com/foo').reply(404)\n\nres = requests.get('server.com/foo')\nassert res.status_code == 404", "source": "codesearchnet"}
{"code": "def repertoire(self, direction, mechanism, purview):\n    if (direction == Direction.CAUSE):\n        return self.cause_repertoire(mechanism, purview)\n    elif (direction == Direction.EFFECT):\n        return self.effect_repertoire(mechanism, purview)\n    return validate.direction(direction)", "docstring": "Return the cause or effect repertoire based on a direction.\n\nArgs:\ndirection (Direction): |CAUSE| or |EFFECT|.\nmechanism (tuple[int]): The mechanism for which to calculate the\nrepertoire.\npurview (tuple[int]): The purview over which to calculate the\nrepertoire.\n\nReturns:\nnp.ndarray: The cause or effect repertoire of the mechanism over\nthe purview.\n\nRaises:\nValueError: If ``direction`` is invalid.", "source": "codesearchnet"}
{"code": "def build_docs(output_dir, code_url_prefix, search_hints):\n    output_dir = pathlib.Path(output_dir)\n    site_path = pathlib.Path('/', FLAGS.site_path)\n    doc_controls.set_deprecated(tf.compat.v1)\n    try:\n        doc_controls.set_deprecated(tf.estimator)\n    except AttributeError:\n        pass\n    doc_controls.set_deprecated(tf.feature_column)\n    doc_controls.set_deprecated(tf.keras.preprocessing)\n    doc_controls.set_custom_page_builder_cls(tf.raw_ops, RawOpsPageInfo)\n    for name, obj in tf_inspect.getmembers(tf.raw_ops):\n        if not name.startswith('_'):\n            doc_controls.hide_from_search(obj)\n    for cls in [tf.Module, tf.keras.layers.Layer, tf.keras.optimizers.Optimizer]:\n        doc_controls.decorate_all_class_attributes(decorator=doc_controls.do_not_doc_in_subclasses, cls=cls, skip=['__init__'])\n    do_not_document = ['tf.__internal__', 'tf.keras.__internal__', 'tf.keras.wrappers', 'tf.__operators__', 'tf.tools', 'tf.compat.v1.pywrap_tensorflow', 'tf.pywrap_tensorflow', 'tf.flags', 'tf.batch_mat_mul_v3', 'tf.sparse_segment_sum_grad']\n    for path in do_not_document:\n        item = tf\n        for part in path.split('.')[1:]:\n            item = getattr(item, part, None)\n        if item is None:\n            continue\n        doc_controls.do_not_generate_docs(item)\n    base_dirs, code_url_prefixes = base_dir.get_base_dirs_and_prefixes(code_url_prefix)\n    doc_generator = generate_lib.DocGenerator(root_title='TensorFlow 2', py_modules=[('tf', tf)], base_dir=base_dirs, search_hints=search_hints, code_url_prefix=code_url_prefixes, site_path=site_path, visitor_cls=TfExportAwareVisitor, private_map=_PRIVATE_MAP, extra_docs=_EXTRA_DOCS, callbacks=base_dir.get_callbacks())\n    doc_generator.build(output_dir)\n\n    @contextlib.contextmanager\n    def edit_yaml_file(path):\n        content = yaml.safe_load(path.read_text())\n        yield content\n        with path.open('w') as f:\n            yaml.dump(content, f, default_flow_style=False)\n    toc_path = output_dir / 'tf/_toc.yaml'\n    with edit_yaml_file(toc_path) as toc:\n        toc['toc'][0]['section'][0]['path'] = str(site_path / 'tf_overview')\n    redirects_path = output_dir / 'tf/_redirects.yaml'\n    with edit_yaml_file(redirects_path) as redirects:\n        redirects['redirects'].append({'from': str(site_path / 'tf_overview'), 'to': str(site_path / 'tf')})\n    num_files = len(list(output_dir.rglob('*')))\n    if num_files < MIN_NUM_FILES_EXPECTED:\n        raise ValueError(f'The TensorFlow api should be more than {MIN_NUM_FILES_EXPECTED} files(found {num_files}).')", "docstring": "Build api docs for tensorflow v2.\n\nArgs:\noutput_dir: A string path, where to put the files.\ncode_url_prefix: prefix for \"Defined in\" links.\nsearch_hints: Bool. Include meta-data search hints at the top of each file.", "source": "github-repos"}
{"code": "def macro_state(self, micro_state):\n        \n        assert len(micro_state) == len(self.micro_indices)\n\n        reindexed = self.reindex()\n        return utils.state_of(reindexed.output_indices, micro_state)", "docstring": "Compute the macro-state of this blackbox.\n\nThis is just the state of the blackbox's output indices.\n\nArgs:\nmicro_state (tuple[int]): The state of the micro-elements in the\nblackbox.\n\nReturns:\ntuple[int]: The state of the output indices.", "source": "juraj-google-style"}
{"code": "def FixedUnPooling(x, shape, unpool_mat=None, data_format='channels_last'):\n    data_format = get_data_format(data_format, keras_mode=False)\n    shape = shape2d(shape)\n    output_shape = StaticDynamicShape(x)\n    output_shape.apply((1 if (data_format == 'NHWC') else 2), (lambda x: (x * shape[0])))\n    output_shape.apply((2 if (data_format == 'NHWC') else 3), (lambda x: (x * shape[1])))\n    if ((shape[0] == 2) and (shape[1] == 2) and (unpool_mat is None) and (data_format == 'NHWC')):\n        ret = UnPooling2x2ZeroFilled(x)\n    else:\n        if (unpool_mat is None):\n            mat = np.zeros(shape, dtype='float32')\n            mat[0][0] = 1\n            unpool_mat = tf.constant(mat, name='unpool_mat')\n        elif isinstance(unpool_mat, np.ndarray):\n            unpool_mat = tf.constant(unpool_mat, name='unpool_mat')\n        assert (unpool_mat.shape.as_list() == list(shape))\n        if (data_format == 'NHWC'):\n            x = tf.transpose(x, [0, 3, 1, 2])\n        x = tf.expand_dims(x, (- 1))\n        mat = tf.expand_dims(unpool_mat, 0)\n        ret = tf.tensordot(x, mat, axes=1)\n        if (data_format == 'NHWC'):\n            ret = tf.transpose(ret, [0, 2, 4, 3, 5, 1])\n        else:\n            ret = tf.transpose(ret, [0, 1, 2, 4, 3, 5])\n        shape3_dyn = [output_shape.get_dynamic(k) for k in range(1, 4)]\n        ret = tf.reshape(ret, tf.stack(([(- 1)] + shape3_dyn)))\n    ret.set_shape(tf.TensorShape(output_shape.get_static()))\n    return ret", "docstring": "Unpool the input with a fixed matrix to perform kronecker product with.\n\nArgs:\nx (tf.Tensor): a 4D image tensor\nshape: int or (h, w) tuple\nunpool_mat: a tf.Tensor or np.ndarray 2D matrix with size=shape.\nIf is None, will use a matrix with 1 at top-left corner.\n\nReturns:\ntf.Tensor: a 4D image tensor.", "source": "codesearchnet"}
{"code": "def bounding_box(locations):\n    \n    x_values = list(map(itemgetter(0), locations))\n    x_min, x_max = min(x_values), max(x_values)\n    y_values = list(map(itemgetter(1), locations))\n    y_min, y_max = min(y_values), max(y_values)\n    return Rect(x_min, y_min, x_max - x_min, y_max - y_min)", "docstring": "Computes the bounding box of an iterable of (x, y) coordinates.\n\nArgs:\nlocations: iterable of (x, y) tuples.\n\nReturns:\n`Rect`: Coordinates of the bounding box.", "source": "juraj-google-style"}
{"code": "def _recommend_command(command, description, indent=2, create_link=False):\n    indent_str = ' ' * indent\n    if create_link:\n        font_attr = [debugger_cli_common.MenuItem('', command), 'bold']\n    else:\n        font_attr = 'bold'\n    lines = [RL(indent_str) + RL(command, font_attr) + ':', indent_str + '  ' + description]\n    return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)", "docstring": "Generate a RichTextLines object that describes a recommended command.\n\nArgs:\ncommand: (str) The command to recommend.\ndescription: (str) A description of what the command does.\nindent: (int) How many spaces to indent in the beginning.\ncreate_link: (bool) Whether a command link is to be applied to the command\nstring.\n\nReturns:\n(RichTextLines) Formatted text (with font attributes) for recommending the\ncommand.", "source": "github-repos"}
{"code": "def ssh(container, cmd='', user='root', password='root'):\n    ip = get_ip(container)\n    ssh_cmd = (\"sshpass -p '%s' ssh -A -t -o StrictHostKeyChecking=no '%s'@%s\" % (password, user, ip))\n    local(('ssh -A -t -o StrictHostKeyChecking=no -i \"%s\" %s@%s %s %s' % (env.key_filename, env.user, env.host, ssh_cmd, cmd)))", "docstring": "SSH into a running container, using the host as a jump host. This requires\nthe container to have a running sshd process.\n\nArgs:\n* container: Container name or ID\n* cmd='': Command to run in the container\n* user='root': SSH username\n* password='root': SSH password", "source": "codesearchnet"}
{"code": "def AddSymbolicLink(self, path, linked_path):\n    if self.file_system.FileEntryExistsByPath(path):\n        raise ValueError('Path: {0:s} already set.'.format(path))\n    self._AddParentDirectories(path)\n    self.file_system.AddFileEntry(path, file_entry_type=definitions.FILE_ENTRY_TYPE_LINK, link_data=linked_path)", "docstring": "Adds a symbolic link to the fake file system.\n\nArgs:\npath (str): path of the symbolic link within the fake file system.\nlinked_path (str): path that is linked.\n\nRaises:\nValueError: if the path is already set.", "source": "codesearchnet"}
{"code": "def _compile_expression(self, expr: Expression, scope: Dict[(str, TensorFluent)], batch_size: Optional[int]=None, noise: Optional[List[tf.Tensor]]=None) -> TensorFluent:\n    etype2compiler = {'constant': self._compile_constant_expression, 'pvar': self._compile_pvariable_expression, 'randomvar': self._compile_random_variable_expression, 'arithmetic': self._compile_arithmetic_expression, 'boolean': self._compile_boolean_expression, 'relational': self._compile_relational_expression, 'func': self._compile_function_expression, 'control': self._compile_control_flow_expression, 'aggregation': self._compile_aggregation_expression}\n    etype = expr.etype\n    if (etype[0] not in etype2compiler):\n        raise ValueError('Expression type unknown: {}'.format(etype))\n    with self.graph.as_default():\n        compiler_fn = etype2compiler[etype[0]]\n        return compiler_fn(expr, scope, batch_size, noise)", "docstring": "Compile the expression `expr` into a TensorFluent\nin the given `scope` with optional batch size.\n\nArgs:\nexpr (:obj:`rddl2tf.expr.Expression`): A RDDL expression.\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.\nbatch_size (Optional[size]): The batch size.\n\nReturns:\n:obj:`rddl2tf.fluent.TensorFluent`: The compiled TensorFluent.", "source": "codesearchnet"}
{"code": "def from_path(cls, path, format=None):\n    name = None\n    data = None\n    if (format is None):\n        formats = (FileFormat.py, FileFormat.yaml)\n    else:\n        formats = (format,)\n    try:\n        mode = os.stat(path).st_mode\n    except (IOError, OSError):\n        raise PackageMetadataError(('Path %r did not exist, or was not accessible' % path))\n    is_dir = stat.S_ISDIR(mode)\n    for name_ in config.plugins.package_repository.filesystem.package_filenames:\n        for format_ in formats:\n            if is_dir:\n                filepath = os.path.join(path, ('%s.%s' % (name_, format_.extension)))\n                exists = os.path.isfile(filepath)\n            else:\n                if (format is None):\n                    if (os.path.splitext(path)[1] != format_.extension):\n                        continue\n                filepath = path\n                exists = True\n            if exists:\n                data = load_from_file(filepath, format_, disable_memcache=True)\n                break\n        if data:\n            name = data.get('name')\n            if ((name is not None) or isinstance(name, basestring)):\n                break\n    if (data is None):\n        raise PackageMetadataError(('No package definition file found at %s' % path))\n    if ((name is None) or (not isinstance(name, basestring))):\n        raise PackageMetadataError((\"Error in %r - missing or non-string field 'name'\" % filepath))\n    package = create_package(name, data, package_cls=cls)\n    result = package._get_preprocessed(data)\n    if result:\n        (package, data) = result\n    package.filepath = filepath\n    package.includes = set()\n\n    def visit(d):\n        for (k, v) in d.iteritems():\n            if isinstance(v, SourceCode):\n                package.includes |= (v.includes or set())\n            elif isinstance(v, dict):\n                visit(v)\n    visit(data)\n    package._validate_includes()\n    return package", "docstring": "Load a developer package.\n\nA developer package may for example be a package.yaml or package.py in a\nuser's source directory.\n\nArgs:\npath: Directory containing the package definition file, or file\npath for the package file itself\nformat: which FileFormat to use, or None to check both .py and .yaml\n\nReturns:\n`Package` object.", "source": "codesearchnet"}
{"code": "class CSVLogger(Callback):\n\n    def __init__(self, filename, separator=',', append=False):\n        super().__init__()\n        self.sep = separator\n        self.filename = file_utils.path_to_string(filename)\n        self.append = append\n        self.writer = None\n        self.keys = None\n        self.append_header = True\n        self.csv_file = None\n\n    def on_train_begin(self, logs=None):\n        if self.append:\n            if file_utils.exists(self.filename):\n                with file_utils.File(self.filename, 'r') as f:\n                    self.append_header = not bool(len(f.readline()))\n            mode = 'a'\n        else:\n            mode = 'w'\n        if self.csv_file and (not self.csv_file.closed):\n            self.csv_file.close()\n        self.csv_file = file_utils.File(self.filename, mode)\n        self.writer = None\n        self.keys = None\n\n    def on_epoch_end(self, epoch, logs=None):\n        logs = logs or {}\n\n        def handle_value(k):\n            is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0\n            if isinstance(k, str):\n                return k\n            elif isinstance(k, collections.abc.Iterable) and (not is_zero_dim_ndarray):\n                return f'\"[{', '.join(map(str, k))}]\"'\n            else:\n                return k\n        if self.keys is None:\n            self.keys = sorted(logs.keys())\n            val_keys_found = False\n            for key in self.keys:\n                if key.startswith('val_'):\n                    val_keys_found = True\n                    break\n            if not val_keys_found and self.keys:\n                self.keys.extend(['val_' + k for k in self.keys])\n        if not self.writer:\n\n            class CustomDialect(csv.excel):\n                delimiter = self.sep\n            fieldnames = ['epoch'] + (self.keys or [])\n            self.writer = csv.DictWriter(self.csv_file, fieldnames=fieldnames, dialect=CustomDialect)\n            if self.append_header:\n                self.writer.writeheader()\n        row_dict = collections.OrderedDict({'epoch': epoch})\n        row_dict.update(((key, handle_value(logs.get(key, 'NA'))) for key in self.keys))\n        self.writer.writerow(row_dict)\n        self.csv_file.flush()\n\n    def on_train_end(self, logs=None):\n        if self.csv_file and (not self.csv_file.closed):\n            self.csv_file.close()\n        self.writer = None", "docstring": "Callback that streams epoch results to a CSV file.\n\nSupports all values that can be represented as a string,\nincluding 1D iterables such as `np.ndarray`.\n\nArgs:\nfilename: Filename of the CSV file, e.g. `'run/log.csv'`.\nseparator: String used to separate elements in the CSV file.\nappend: Boolean. True: append if file exists (useful for continuing\ntraining). False: overwrite existing file.\n\nExample:\n\n```python\ncsv_logger = CSVLogger('training.log')\nmodel.fit(X_train, Y_train, callbacks=[csv_logger])\n```", "source": "github-repos"}
{"code": "def __init__(self, size, dropout=None, named_tensors=None, scope='lstm', summary_labels=(), return_final_state=True):\n        \n        self.size = size\n        self.dropout = dropout\n        self.return_final_state = return_final_state\n        super(Lstm, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "LSTM layer.\n\nArgs:\nsize: LSTM size.\ndropout: Dropout rate.", "source": "juraj-google-style"}
{"code": "def make_batches(size, batch_size):\n    num_batches = int(np.ceil(size / float(batch_size)))\n    return [(i * batch_size, min(size, (i + 1) * batch_size)) for i in range(0, num_batches)]", "docstring": "Returns a list of batch indices (tuples of indices).\n\nArgs:\nsize: Integer, total size of the data to slice into batches.\nbatch_size: Integer, batch size.\n\nReturns:\nA list of tuples of array indices.", "source": "github-repos"}
{"code": "def assignSeasonSchedule(self, season, month, day, schedule):\n    season += 1\n    schedule += 1\n    if ((season < 1) or (season > Extents.Seasons) or (schedule < 1) or (schedule > Extents.Schedules) or (month > 12) or (month < 0) or (day < 0) or (day > 31)):\n        ekm_log(((((((('Out of bounds: month ' + str(month)) + ' day ') + str(day)) + ' schedule ') + str(schedule)) + ' season ') + str(season)))\n        return False\n    idx_mon = (('Season_' + str(season)) + '_Start_Day')\n    idx_day = (('Season_' + str(season)) + '_Start_Month')\n    idx_schedule = (('Season_' + str(season)) + '_Schedule')\n    if (idx_mon not in self.m_seasons_sched_params):\n        ekm_log(('Incorrect index: ' + idx_mon))\n        return False\n    if (idx_day not in self.m_seasons_sched_params):\n        ekm_log(('Incorrect index: ' + idx_day))\n        return False\n    if (idx_schedule not in self.m_seasons_sched_params):\n        ekm_log(('Incorrect index: ' + idx_schedule))\n        return False\n    self.m_seasons_sched_params[idx_mon] = month\n    self.m_seasons_sched_params[idx_day] = day\n    self.m_seasons_sched_params[idx_schedule] = schedule\n    return True", "docstring": "Define a single season and assign a schedule\n\nArgs:\nseason (int): A :class:`~ekmmeters.Seasons` value or in range(Extent.Seasons).\nmonth (int): Month 1-12.\nday (int):  Day 1-31.\nschedule (int): A :class:`~ekmmeters.LCDItems` value or in range(Extent.Schedules).\n\nReturns:\nbool: True on completion and ACK.", "source": "codesearchnet"}
{"code": "def convex_hull_collide(nodes1, nodes2):\n    polygon1 = _helpers.simple_convex_hull(nodes1)\n    (_, polygon_size1) = polygon1.shape\n    polygon2 = _helpers.simple_convex_hull(nodes2)\n    (_, polygon_size2) = polygon2.shape\n    if ((polygon_size1 == 2) and (polygon_size2 == 2)):\n        return line_line_collide(polygon1, polygon2)\n    else:\n        return _helpers.polygon_collide(polygon1, polygon2)", "docstring": "Determine if the convex hulls of two curves collide.\n\n.. note::\n\nThis is a helper for :func:`from_linearized`.\n\nArgs:\nnodes1 (numpy.ndarray): Control points of a first curve.\nnodes2 (numpy.ndarray): Control points of a second curve.\n\nReturns:\nbool: Indicating if the convex hulls collide.", "source": "codesearchnet"}
{"code": "def __update_cleanup_paths(new_path):\n    \n    cleanup_dirs = settings.CFG[\"cleanup_paths\"].value\n    cleanup_dirs = set(cleanup_dirs)\n    cleanup_dirs.add(new_path)\n    cleanup_dirs = list(cleanup_dirs)\n    settings.CFG[\"cleanup_paths\"] = cleanup_dirs", "docstring": "Add the new path to the list of paths to clean up afterwards.\n\nArgs:\nnew_path: Path to the directory that need to be cleaned up.", "source": "juraj-google-style"}
{"code": "def download_image(self, handle, dest):\n        \n        shutil.copyfile(self._prefixed(handle), dest)", "docstring": "Copies over the handl to the destination\n\nArgs:\nhandle (str): path to copy over\ndest (str): path to copy to\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def tool(name):\n    global g_tools\n\n    def decorator(fn):\n        g_tools[name] = fn\n        return fn\n    return decorator", "docstring": "Decorator for defining lint tools.\n\nArgs:\nname (str):\nThe name of the tool. This name will be used to identify the tool\nin `pelconf.yaml`.", "source": "codesearchnet"}
{"code": "def get_all_instances(include_fastboot=False):\n    if include_fastboot:\n        serial_list = list_adb_devices() + list_fastboot_devices()\n        return get_instances(serial_list)\n    return get_instances(list_adb_devices())", "docstring": "Create AndroidDevice instances for all attached android devices.\n\nArgs:\ninclude_fastboot: Whether to include devices in bootloader mode or not.\n\nReturns:\nA list of AndroidDevice objects each representing an android device\nattached to the computer.", "source": "github-repos"}
{"code": "def _produce_posterior_estimate(posterior_dist, posterior_estimate_mode, raw_var_name):\n    conds = [tf.equal(posterior_estimate_mode, tf.constant(EstimatorModes.sample), name='equal_sample_mode'), tf.equal(posterior_estimate_mode, tf.constant(EstimatorModes.mean), name='equal_mean_mode'), tf.equal(posterior_estimate_mode, tf.constant(EstimatorModes.last_sample), name='equal_last_sample_mode')]\n    results = [(lambda : posterior_dist.sample()), (lambda : posterior_dist.mean()), (lambda : posterior_dist.last_sample())]\n\n    def default_case_branch_raising_error():\n        err_msg = 'Invalid posterior estimate mode.'\n        raise_err = tf.Assert(tf.constant(False), data=[tf.constant(err_msg)])\n        with tf.control_dependencies([raise_err]):\n            return posterior_dist.mean()\n    if hasattr(posterior_dist, 'last_sample'):\n        cases = {conds[0]: results[0], conds[1]: results[1], conds[2]: results[2]}\n    else:\n        cases = {conds[0]: results[0], conds[1]: results[1]}\n    z_sample = tf.case(cases, exclusive=True, default=default_case_branch_raising_error, name='{}_posterior_estimate'.format(raw_var_name))\n    return z_sample", "docstring": "Create tensor representing estimate of posterior.\n\nArgs:\nposterior_dist: An instance of `tfp.distributions.Distribution`.\nThe variational posterior from which to produce an estimate of the\nvariable in question.\nposterior_estimate_mode: A `Tensor` of dtype `tf.string`, which\ndetermines the inference mode.\nraw_var_name: The name of the variable over which inference is done.\n\nReturns:\n`z_sample`, a `Tensor` representing an estimate derived from the\nposterior distribution.", "source": "codesearchnet"}
{"code": "def datetime_string(day, month, year, hour, minute):\n    if ((hour < 0) or (hour > 23)):\n        hour = 0\n    if ((minute < 0) or (minute > 60)):\n        minute = 0\n    return ('%d-%02d-%02dT%02d:%02d:00' % (year, month, day, hour, minute))", "docstring": "Build a date string using the provided day, month, year numbers.\n\nAutomatically adds a leading zero to ``day`` and ``month`` if they only have\none digit.\n\nArgs:\nday (int): Day number.\nmonth(int): Month number.\nyear(int): Year number.\nhour (int): Hour of the day in 24h format.\nminute (int): Minute of the hour.\n\nReturns:\nstr: Date in the format *YYYY-MM-DDThh:mm:ss*.", "source": "codesearchnet"}
{"code": "def process_entry(self, entry):\n        \n        try:\n            corrections = self.get_corrections_dict(entry)\n        except CompatibilityError:\n            return None\n        entry.correction = sum(corrections.values())\n        return entry", "docstring": "Process a single entry with the chosen Corrections.\n\nArgs:\nentry: A ComputedEntry object.\n\nReturns:\nAn adjusted entry if entry is compatible, otherwise None is\nreturned.", "source": "juraj-google-style"}
{"code": "def trace_min_buffer_capacity(self):\n        \n        cmd = enums.JLinkTraceCommand.GET_MIN_CAPACITY\n        data = ctypes.c_uint32(0)\n        res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n        if (res == 1):\n            raise errors.JLinkException('Failed to get min trace buffer size.')\n        return data.value", "docstring": "Retrieves the minimum capacity the trace buffer can be configured with.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\nThe minimum configurable capacity for the trace buffer.", "source": "juraj-google-style"}
{"code": "def contains_method(self, method):\n        \n        return method in itertools.chain(self._literal, self._wildcard,\n                                         self._regex)", "docstring": "Check if there is at least one handler for *method*.\n\nArguments:\nmethod (str): HTTP method name, e.g. GET, POST, etc.\n\nReturns:\n``True`` if there is at least one route defined for *method*,\n``False`` otherwise", "source": "juraj-google-style"}
{"code": "def until(coro, coro_test, assert_coro=None, *args, **kw):\n\n    @asyncio.coroutine\n    def assert_coro(value):\n        return (not value)\n    return (yield from whilst(coro, coro_test, *args, assert_coro=assert_coro, **kw))", "docstring": "Repeatedly call `coro` coroutine function until `coro_test` returns `True`.\n\nThis function is the inverse of `paco.whilst()`.\n\nThis function is a coroutine.\n\nArguments:\ncoro (coroutinefunction): coroutine function to execute.\ncoro_test (coroutinefunction): coroutine function to test.\nassert_coro (coroutinefunction): optional assertion coroutine used\nto determine if the test passed or not.\n*args (mixed): optional variadic arguments to pass to `coro` function.\n\nRaises:\nTypeError: if input arguments are invalid.\n\nReturns:\nlist: result values returned by `coro`.\n\nUsage::\n\ncalls = 0\n\nasync def task():\nnonlocal calls\ncalls += 1\nreturn calls\n\nasync def calls_gt_4():\nreturn calls > 4\n\nawait paco.until(task, calls_gt_4)\n# => [1, 2, 3, 4, 5]", "source": "codesearchnet"}
{"code": "def from_bigquery(sql):\n    \n\n    if isinstance(sql, bq.Query):\n      sql = sql._expanded_sql()\n\n    parts = sql.split('.')\n    if len(parts) == 1 or len(parts) > 3 or any(' ' in x for x in parts):\n      sql = '(' + sql + ')'  \n    else:\n      sql = '`' + sql + '`'  \n\n    metrics = Metrics(bigquery=sql)\n    return metrics", "docstring": "Create a Metrics instance from a bigquery query or table.\n\nReturns:\na Metrics instance.\n\nArgs:\nsql: A BigQuery table name or a query.", "source": "juraj-google-style"}
{"code": "def invalid_fields(self, data, original_data):\n    errors = []\n    for field in original_data:\n        if isinstance(field, (set, list, tuple, dict)):\n            continue\n        if (field not in self.fields.keys()):\n            errors.append(field)\n    if errors:\n        raise ValidationError('Invalid field', field_names=errors)", "docstring": "Validator that checks if any keys provided aren't in the schema.\n\nSay your schema has support for keys ``a`` and ``b`` and the data\nprovided has keys ``a``, ``b``, and ``c``. When the data is loaded into\nthe schema, a :class:`marshmallow.ValidationError` will be raised\ninforming the developer that excess keys have been provided.\n\nRaises:\nmarshmallow.ValidationError: Raised if extra keys exist in the\npassed in data.", "source": "codesearchnet"}
{"code": "def abs(cls, x: 'TensorFluent') -> 'TensorFluent':\n        \n        return cls._unary_op(x, tf.abs, tf.float32)", "docstring": "Returns a TensorFluent for the abs function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the abs function.", "source": "juraj-google-style"}
{"code": "def absl_to_cpp(level):\n    if (not isinstance(level, int)):\n        raise TypeError('Expect an int level, found {}'.format(type(level)))\n    if (level >= 0):\n        return 0\n    else:\n        return (- level)", "docstring": "Converts an absl log level to a cpp log level.\n\nArgs:\nlevel: int, an absl.logging level.\n\nRaises:\nTypeError: Raised when level is not an integer.\n\nReturns:\nThe corresponding integer level for use in Abseil C++.", "source": "codesearchnet"}
{"code": "def get_timestamped_export_dir(export_dir_base):\n    attempts = 0\n    while (attempts < MAX_DIRECTORY_CREATION_ATTEMPTS):\n        export_timestamp = int(time.time())\n        export_dir = os.path.join(tf.compat.as_bytes(export_dir_base), tf.compat.as_bytes(str(export_timestamp)))\n        if (not tf_v1.gfile.Exists(export_dir)):\n            return export_dir\n        time.sleep(1)\n        attempts += 1\n        logging.warn('Export directory %s already exists; retrying (attempt %d/%d)', export_dir, attempts, MAX_DIRECTORY_CREATION_ATTEMPTS)\n    raise RuntimeError('Failed to obtain a unique export directory name after %d attempts.'.MAX_DIRECTORY_CREATION_ATTEMPTS)", "docstring": "Builds a path to a new subdirectory within the base directory.\n\nEach export is written into a new subdirectory named using the\ncurrent time.  This guarantees monotonically increasing version\nnumbers even across multiple runs of the pipeline.\nThe timestamp used is the number of seconds since epoch UTC.\n\nArgs:\nexport_dir_base: A string containing a directory to write the exported\ngraph and checkpoints.\nReturns:\nThe full path of the new subdirectory (which is not actually created yet).\n\nRaises:\nRuntimeError: if repeated attempts fail to obtain a unique timestamped\ndirectory name.", "source": "codesearchnet"}
{"code": "def get_datasets(self):\n    (assoc_result, datasets_dicts) = self._read_from_hdx('showcase', self.data['id'], fieldname='showcase_id', action=self.actions()['list_datasets'])\n    datasets = list()\n    if assoc_result:\n        for dataset_dict in datasets_dicts:\n            dataset = hdx.data.dataset.Dataset(dataset_dict, configuration=self.configuration)\n            datasets.append(dataset)\n    return datasets", "docstring": "Get any datasets in the showcase\n\nReturns:\nList[Dataset]: List of datasets", "source": "codesearchnet"}
{"code": "def unique():\n\n    def _apply_fn(dataset):\n        return dataset.unique()\n    return _apply_fn", "docstring": "Creates a `Dataset` from another `Dataset`, discarding duplicates.\n\nUse this transformation to produce a dataset that contains one instance of\neach unique element in the input. For example:\n\n```python\ndataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1])\n\n# Using `unique()` will drop the duplicate elements.\ndataset = dataset.apply(tf.data.experimental.unique())  # ==> { 1, 37, 2 }\n```\n\nReturns:\nA `Dataset` transformation function, which can be passed to\n`tf.data.Dataset.apply`.", "source": "github-repos"}
{"code": "def compute_capability_from_device_desc(device_attrs):\n    match = _PHYSICAL_DEVICE_DESCRIPTION_REGEX.search(device_attrs.physical_device_desc)\n    if not match:\n        return GpuInfo(None, None)\n    cc = (int(match.group(2)), int(match.group(3))) if match.group(2) else None\n    return GpuInfo(match.group(1), cc)", "docstring": "Returns the GpuInfo given a DeviceAttributes proto.\n\nArgs:\ndevice_attrs: A DeviceAttributes proto.\n\nReturns\nA gpu_info tuple. Both fields are None if `device_attrs` does not have a\nvalid physical_device_desc field.", "source": "github-repos"}
{"code": "def __extend_with_api_ref(raw_testinfo):\n    api_name = raw_testinfo['api']\n    if (not os.path.isabs(api_name)):\n        api_path = os.path.join(tests_def_mapping['PWD'], *api_name.split('/'))\n        if os.path.isfile(api_path):\n            api_name = api_path\n    try:\n        block = tests_def_mapping['api'][api_name]\n        raw_testinfo['api_def'] = utils.deepcopy_dict(block)\n    except KeyError:\n        raise exceptions.ApiNotFound('{} not found!'.format(api_name))", "docstring": "extend with api reference\n\nRaises:\nexceptions.ApiNotFound: api not found", "source": "codesearchnet"}
{"code": "def install_hook(self, hook_name, hook_content):\n        \n        hook_path = os.path.join(self.path, '.git/hooks', hook_name)\n        with open(hook_path, 'w') as f:\n            f.write(hook_content)\n        os.chmod(hook_path, stat.S_IEXEC | stat.S_IREAD | stat.S_IWRITE)", "docstring": "Install the repository hook for this repo.\n\nArgs:\nhook_name (str)\nhook_content (str)", "source": "juraj-google-style"}
{"code": "def reveal_undocumented(symbol_name, target_module=None):\n    if symbol_name not in _HIDDEN_ATTRIBUTES:\n        raise LookupError('Symbol %s is not a hidden symbol' % symbol_name)\n    symbol_basename = symbol_name.split('.')[-1]\n    original_module, attr_value = _HIDDEN_ATTRIBUTES[symbol_name]\n    if not target_module:\n        target_module = original_module\n    setattr(target_module, symbol_basename, attr_value)", "docstring": "Reveals a symbol that was previously removed by `remove_undocumented`.\n\nThis should be used by tensorflow internal tests only. It explicitly\ndefeats the encapsulation afforded by `remove_undocumented`.\n\nIt throws an exception when the symbol was not hidden in the first place.\n\nArgs:\nsymbol_name: a string representing the full absolute path of the symbol.\ntarget_module: if specified, the module in which to restore the symbol.", "source": "github-repos"}
{"code": "def show_bokehjs(bokehjs_action, develop=False):\n    \n    print()\n    if develop:\n        print(\"Installed Bokeh for DEVELOPMENT:\")\n    else:\n        print(\"Installed Bokeh:\")\n    if bokehjs_action in ['built', 'installed']:\n        print(\"  - using %s built BokehJS from bokehjs/build\\n\" % (bright(yellow(\"NEWLY\")) if bokehjs_action=='built' else bright(yellow(\"PREVIOUSLY\"))))\n    else:\n        print(\"  - using %s BokehJS, located in 'bokeh.server.static'\\n\" % bright(yellow(\"PACKAGED\")))\n    print()", "docstring": "Print a useful report after setuptools output describing where and how\nBokehJS is installed.\n\nArgs:\nbokehjs_action (str) : one of 'built', 'installed', or 'packaged'\nhow (or if) BokehJS was installed into the python source tree\n\ndevelop (bool, optional) :\nwhether the command was for \"develop\" mode (default: False)\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def set_position_i(self, ivalue):\n    ivalue_msb = (int(ivalue) >> 8)\n    ivalue_lsb = (int(ivalue) & 255)\n    data = []\n    data.append(11)\n    data.append(self.servoid)\n    data.append(RAM_WRITE_REQ)\n    data.append(POSITION_KI_RAM)\n    data.append(BYTE2)\n    data.append(ivalue_lsb)\n    data.append(ivalue_msb)\n    send_data(data)", "docstring": "Set the I gain of the position PID\n\nArgs:\nivalue (int): I value", "source": "codesearchnet"}
{"code": "def chosen_angle_to_half_turns(\n        half_turns: Optional[Union[sympy.Basic, float]] = None,\n        rads: Optional[float] = None,\n        degs: Optional[float] = None,\n        default: float = 1.0,\n) -> Union[sympy.Basic, float]:\n    \n\n    if len([1 for e in [half_turns, rads, degs] if e is not None]) > 1:\n        raise ValueError('Redundant angle specification. '\n                         'Use ONE of half_turns, rads, or degs.')\n\n    if rads is not None:\n        return rads / np.pi\n\n    if degs is not None:\n        return degs / 180\n\n    if half_turns is not None:\n        return half_turns\n\n    return default", "docstring": "Returns a half_turns value based on the given arguments.\n\nAt most one of half_turns, rads, degs must be specified. If none are\nspecified, the output defaults to half_turns=1.\n\nArgs:\nhalf_turns: The number of half turns to rotate by.\nrads: The number of radians to rotate by.\ndegs: The number of degrees to rotate by\ndefault: The half turns angle to use if nothing else is specified.\n\nReturns:\nA number of half turns.", "source": "juraj-google-style"}
{"code": "def probability_density(self, X):\n        \n        self.check_fit()\n\n        U, V = self.split_matrix(X)\n\n        a = (self.theta + 1) * np.power(np.multiply(U, V), -(self.theta + 1))\n        b = np.power(U, -self.theta) + np.power(V, -self.theta) - 1\n        c = -(2 * self.theta + 1) / self.theta\n        return a * np.power(b, c)", "docstring": "Compute probability density function for given copula family.\n\nArgs:\nX: `np.ndarray`\n\nReturns:\nnp.array: Probability density for the input values.", "source": "juraj-google-style"}
{"code": "def from_fn(cls, dna_spec: DNASpec, generator_fn: Callable[['DecisionPoint'], Union[List[int], float, str, 'DNA']]) -> 'DNA':\n    if not isinstance(dna_spec, DNASpec):\n        raise TypeError(f\"Argument 'dna_spec' should be DNASpec type. Encountered {dna_spec}.\")\n    if dna_spec.is_space:\n        children = []\n        for child_spec in dna_spec.elements:\n            children.append(DNA.from_fn(child_spec, generator_fn))\n        if len(children) == 1:\n            return children[0]\n        dna = DNA(None, children)\n    elif dna_spec.is_categorical:\n        assert isinstance(dna_spec, DecisionPoint), dna_spec\n        decision = generator_fn(dna_spec)\n        if isinstance(decision, DNA):\n            dna = decision\n        else:\n            if len(decision) != dna_spec.num_choices:\n                raise ValueError(f'Number of DNA child values does not match the number of choices. Child values: {decision!r}, Choices: {dna_spec.num_choices}, Location: {dna_spec.location.path}.')\n            children = []\n            for i, choice in enumerate(decision):\n                choice_location = utils.KeyPath(i, dna_spec.location)\n                if not isinstance(choice, int):\n                    raise ValueError(f'Choice value should be int. Encountered: {choice}, Location: {choice_location.path}.')\n                if choice >= len(dna_spec.candidates):\n                    raise ValueError(f'Choice out of range. Value: {choice}, Candidates: {len(dna_spec.candidates)}, Location: {choice_location.path}.')\n                child_dna = DNA.from_fn(dna_spec.candidates[choice], generator_fn)\n                children.append(DNA(choice, [child_dna]))\n            dna = DNA(None, children)\n    else:\n        assert isinstance(dna_spec, DecisionPoint), dna_spec\n        decision = generator_fn(dna_spec)\n        if isinstance(decision, DNA):\n            dna = decision\n        else:\n            dna = DNA(decision)\n    dna_spec.validate(dna)\n    return dna", "docstring": "Generate a DNA with user generator function.\n\nArgs:\ndna_spec: The DNASpec for the DNA.\ngenerator_fn: A callable object with signature:\n\n`(decision_point) -> decision`\n\nThe decision_point is a `Choices` object or a `Float` object.\nThe returned decision should be:\n\n* a list of integer or a DNA object for a `Choices` decision point.\nWhen a DNA is returned, it will be used as the DNA for the entire\nsub-tree, hence `generate_fn` will not be called on sub-decision\npoints.\n* a float or a DNA object for a Float decision point.\n* a string or a DNA object for a CustomDecisionPoint.\n\nReturns:\nA DNA generated from the user function.", "source": "github-repos"}
{"code": "def generate_hyperband_schedule(self, R, eta):\n    schedule = []\n    s_max = int(math.floor(math.log(R, eta)))\n    for s in range(0, (s_max + 1)):\n        n = math.ceil((int(((s_max + 1) / (s + 1))) * (eta ** s)))\n        r = (R * (eta ** (- s)))\n        bracket = []\n        for i in range(0, (s + 1)):\n            n_i = int(math.floor((n * (eta ** (- i)))))\n            r_i = int((r * (eta ** i)))\n            bracket.append((n_i, r_i))\n        schedule = ([bracket] + schedule)\n    return schedule", "docstring": "Generate hyperband schedule according to the paper.\n\nArgs:\nR: maximum resources per config.\neta: proportion of configruations to discard per\niteration of successive halving.\n\nReturns: hyperband schedule, which is represented\nas a list of brackets, where each bracket\ncontains a list of (num configurations,\nnum resources to use per configuration).\nSee the paper for more details.", "source": "codesearchnet"}
{"code": "def generate_payload(self, command, data=None):\n    json_data = payload_dict[self.dev_type][command]['command']\n    if ('gwId' in json_data):\n        json_data['gwId'] = self.id\n    if ('devId' in json_data):\n        json_data['devId'] = self.id\n    if ('uid' in json_data):\n        json_data['uid'] = self.id\n    if ('t' in json_data):\n        json_data['t'] = str(int(time.time()))\n    if (data is not None):\n        json_data['dps'] = data\n    json_payload = json.dumps(json_data)\n    json_payload = json_payload.replace(' ', '')\n    json_payload = json_payload.encode('utf-8')\n    log.debug('json_payload=%r', json_payload)\n    if (command == SET):\n        self.cipher = AESCipher(self.local_key)\n        json_payload = self.cipher.encrypt(json_payload)\n        preMd5String = (((((b'data=' + json_payload) + b'||lpv=') + PROTOCOL_VERSION_BYTES) + b'||') + self.local_key)\n        m = md5()\n        m.update(preMd5String)\n        hexdigest = m.hexdigest()\n        json_payload = ((PROTOCOL_VERSION_BYTES + hexdigest[8:][:16].encode('latin1')) + json_payload)\n        self.cipher = None\n    postfix_payload = hex2bin((bin2hex(json_payload) + payload_dict[self.dev_type]['suffix']))\n    assert (len(postfix_payload) <= 255)\n    postfix_payload_hex_len = ('%x' % len(postfix_payload))\n    buffer = (hex2bin((((payload_dict[self.dev_type]['prefix'] + payload_dict[self.dev_type][command]['hexByte']) + '000000') + postfix_payload_hex_len)) + postfix_payload)\n    return buffer", "docstring": "Generate the payload to send.\n\nArgs:\ncommand(str): The type of command.\nThis is one of the entries from payload_dict\ndata(dict, optional): The data to be send.\nThis is what will be passed via the 'dps' entry", "source": "codesearchnet"}
{"code": "def __call__(self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]]=None, boxes: Optional[Union[List[List[int]], List[List[List[int]]]]]=None, word_labels: Optional[Union[List[int], List[List[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n\n    def _is_valid_text_input(t):\n        if isinstance(t, str):\n            return True\n        elif isinstance(t, (list, tuple)):\n            if len(t) == 0:\n                return True\n            elif isinstance(t[0], str):\n                return True\n            elif isinstance(t[0], (list, tuple)):\n                return len(t[0]) == 0 or isinstance(t[0][0], str)\n            else:\n                return False\n        else:\n            return False\n    if text_pair is not None:\n        if not _is_valid_text_input(text):\n            raise ValueError('text input must of type `str` (single example) or `List[str]` (batch of examples). ')\n        if not isinstance(text_pair, (list, tuple)):\n            raise ValueError('words must of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')\n    elif not isinstance(text, (list, tuple)):\n        raise ValueError('Words must of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')\n    if text_pair is not None:\n        is_batched = isinstance(text, (list, tuple))\n    else:\n        is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))\n    words = text if text_pair is None else text_pair\n    if boxes is None:\n        raise ValueError('You must provide corresponding bounding boxes')\n    if is_batched:\n        if len(words) != len(boxes):\n            raise ValueError('You must provide words and boxes for an equal amount of examples')\n        for words_example, boxes_example in zip(words, boxes):\n            if len(words_example) != len(boxes_example):\n                raise ValueError('You must provide as many words as there are bounding boxes')\n    elif len(words) != len(boxes):\n        raise ValueError('You must provide as many words as there are bounding boxes')\n    if is_batched:\n        if text_pair is not None and len(text) != len(text_pair):\n            raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.')\n        batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text\n        is_pair = bool(text_pair is not None)\n        return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)\n    else:\n        return self.encode_plus(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)", "docstring": "Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of\nsequences with word-level normalized bounding boxes and optional labels.\n\nArgs:\ntext (`str`, `List[str]`, `List[List[str]]`):\nThe sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings\n(words of a single example or questions of a batch of examples) or a list of list of strings (batch of\nwords).\ntext_pair (`List[str]`, `List[List[str]]`):\nThe sequence or batch of sequences to be encoded. Each sequence should be a list of strings\n(pretokenized string).\nboxes (`List[List[int]]`, `List[List[List[int]]]`):\nWord-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.\nword_labels (`List[int]`, `List[List[int]]`, *optional*):\nWord-level integer labels (for token classification tasks such as FUNSD, CORD).", "source": "github-repos"}
{"code": "def _get_bounds(self, layers):\n        \n        extent_query = ('SELECT ST_EXTENT(the_geom) AS the_geom '\n                        'FROM ({query}) AS t{idx}\\n')\n        union_query = 'UNION ALL\\n'.join(\n            [extent_query.format(query=layer.orig_query, idx=idx)\n             for idx, layer in enumerate(layers)\n             if not layer.is_basemap])\n\n        extent = self.sql_client.send(\n            utils.minify_sql((\n                'SELECT',\n                '    ST_XMIN(ext) AS west,',\n                '    ST_YMIN(ext) AS south,',\n                '    ST_XMAX(ext) AS east,',\n                '    ST_YMAX(ext) AS north',\n                'FROM (',\n                '    SELECT ST_Extent(the_geom) AS ext',\n                '    FROM ({union_query}) AS _wrap1',\n                ') AS _wrap2',\n            )).format(union_query=union_query),\n            do_post=False)\n\n        return extent['rows'][0]", "docstring": "Return the bounds of all data layers involved in a cartoframes map.\n\nArgs:\nlayers (list): List of cartoframes layers. See `cartoframes.layers`\nfor all types.\n\nReturns:\ndict: Dictionary of northern, southern, eastern, and western bounds\nof the superset of data layers. Keys are `north`, `south`,\n`east`, and `west`. Units are in WGS84.", "source": "juraj-google-style"}
{"code": "def _check_registry_type(folder=None):\n    folder = _registry_folder(folder)\n    default_file = os.path.join(folder, 'registry_type.txt')\n    try:\n        with open(default_file, 'r') as infile:\n            data = infile.read()\n            data = data.strip()\n            ComponentRegistry.SetBackingStore(data)\n    except IOError:\n        pass", "docstring": "Check if the user has placed a registry_type.txt file to choose the registry type\n\nIf a default registry type file is found, the DefaultBackingType and DefaultBackingFile\nclass parameters in ComponentRegistry are updated accordingly.\n\nArgs:\nfolder (string): The folder that we should check for a default registry type", "source": "codesearchnet"}
{"code": "def output_hist(self, output_hist: Hist, input_observable: Any, **kwargs: Dict[(str, Any)]) -> Union[(Hist, Any)]:\n    return output_hist", "docstring": "Return an output object. It should store the ``output_hist``.\n\nNote:\nThe output object could just be the raw histogram.\n\nNote:\nThis function is just a basic placeholder which returns the given output object (a histogram)\nand likely should be overridden.\n\nArgs:\noutput_hist: The output histogram\ninput_observable (object): The corresponding input object. It could be a histogram or something\nmore complex.\nkwargs: Projection information dict combined with additional arguments passed to the\nprojection function\nReturn:\nThe output object which should be stored in the output dict. By default, it returns the\noutput hist.", "source": "codesearchnet"}
{"code": "def HasDataStream(self, name, case_sensitive=True):\n    \n    if not isinstance(name, py2to3.STRING_TYPES):\n      raise ValueError('Name is not a string.')\n\n    name_lower = name.lower()\n\n    for data_stream in self._GetDataStreams():\n      if data_stream.name == name:\n        return True\n\n      if not case_sensitive and data_stream.name.lower() == name_lower:\n        return True\n\n    return False", "docstring": "Determines if the file entry has specific data stream.\n\nArgs:\nname (str): name of the data stream.\ncase_sensitive (Optional[bool]): True if the name is case sensitive.\n\nReturns:\nbool: True if the file entry has the data stream.\n\nRaises:\nValueError: if the name is not string.", "source": "juraj-google-style"}
{"code": "def generate_custom_cert_name(env='', region='', account='', certificate=None):\n    cert_name = None\n    template_kwargs = {'account': account, 'name': certificate}\n    try:\n        rendered_template = get_template(template_file='infrastructure/iam/tlscert_naming.json.j2', **template_kwargs)\n        tlscert_dict = json.loads(rendered_template)\n    except ForemastTemplateNotFound:\n        LOG.info('Unable to find TLS Cert Template...falling back to default logic...')\n        return cert_name\n    try:\n        LOG.info('Attempting to find TLS Cert using TLS Cert Template v1 lookup...')\n        cert_name = tlscert_dict[env][certificate]\n        LOG.info('Found TLS certificate named %s under %s using TLS Cert Template v1', certificate, env)\n    except KeyError:\n        LOG.error('Unable to find TLS certificate named %s under %s using v1 TLS Cert Template.', certificate, env)\n    tls_services = ['iam', 'acm']\n    if ((cert_name is None) and all(((service in tlscert_dict) for service in tls_services))):\n        LOG.info('Attempting to find TLS Cert using TLS Cert Template v2 lookup...')\n        if (certificate in tlscert_dict['iam'][env]):\n            cert_name = tlscert_dict['iam'][env][certificate]\n            LOG.info('Found IAM TLS certificate named %s under %s using TLS Cert Template v2', certificate, env)\n        elif (certificate in tlscert_dict['acm'][region][env]):\n            cert_name = tlscert_dict['acm'][region][env][certificate]\n            LOG.info('Found ACM TLS certificate named %s under %s in %s using TLS Cert Template v2', certificate, env, region)\n        else:\n            LOG.error('Unable to find TLS certificate named %s under parent keys [ACM, IAM] %s in v2 TLS Cert Template.', certificate, env)\n    return cert_name", "docstring": "Generate a custom TLS Cert name based on a template.\n\nArgs:\nenv (str): Account environment name\nregion (str): AWS Region.\naccount (str): Account number for ARN.\ncertificate (str): Name of SSL certificate.\n\nReturns:\nstr: Fully qualified ARN for SSL certificate.\nNone: Template doesn't exist.", "source": "codesearchnet"}
{"code": "def _post_process(self, feed_item, item):\n    campaign = self._campaign_dao.get(feed_item, required=True)\n    if campaign:\n        feed_item[FieldMap.CAMPAIGN_NAME] = campaign['name']\n        feed_item[FieldMap.CAMPAIGN_ID] = campaign['id']", "docstring": "Updates the feed item with ids and names of related object so those can be updated in the Bulkdozer feed.\n\nArgs:\nfeed_item: The Bulkdozer feed item.\nitem: The CM newly created or updated object.", "source": "github-repos"}
{"code": "def _ConvertValueForCsv(self, pql_value):\n    \n    if 'value' in pql_value:\n      field = pql_value['value']\n    elif 'values' in pql_value:\n      field = pql_value['values']\n    else:\n      field = None\n\n    if field:\n      if isinstance(field, list):\n        if all(AdManagerClassType(single_field) == AdManagerClassType(field[0])\n               for single_field in field):\n          return ','.join([\n              '\"%s\"' % str(self._ConvertValueForCsv(single_field))\n              for single_field in field])\n        else:\n          raise googleads.errors.GoogleAdsValueError(\n              'The set value returned contains unsupported mix value types')\n\n      class_type = AdManagerClassType(pql_value)\n\n      if class_type == 'TextValue':\n        s = field.replace('\"', '\"\"')\n\n        \n        if sys.version_info.major < 3:\n          s = s.encode('UTF8')\n        return s\n      elif class_type == 'NumberValue':\n        return float(field) if '.' in field else int(field)\n      elif class_type == 'DateTimeValue':\n        return self._ConvertDateTimeToOffset(field)\n      elif class_type == 'DateValue':\n        return datetime.date(int(field['date']['year']),\n                             int(field['date']['month']),\n                             int(field['date']['day'])).isoformat()\n      else:\n        return field\n    else:\n      return '-'", "docstring": "Sanitizes a field value from a Value object to a CSV suitable format.\n\nArgs:\npql_value: dict a dictionary containing the data for a single field of an\nentity.\n\nReturns:\nstr a CSV writer friendly value formatted by Value.Type.", "source": "juraj-google-style"}
{"code": "def create_resource_group(access_token, subscription_id, rgname, location):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '?api-version=', RESOURCE_API])\n    rg_body = {'location': location}\n    body = json.dumps(rg_body)\n    return do_put(endpoint, body, access_token)", "docstring": "Create a resource group in the specified location.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\nlocation (str): Azure data center location. E.g. westus.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def request_parking(self, endpoint, url_args={}, **kwargs):\n    if (endpoint not in ENDPOINTS_PARKING):\n        return None\n    url = (URL_OPENBUS + ENDPOINTS_PARKING[endpoint])\n    lang = url_args.get('lang', 'ES')\n    address = url_args.get('address', '')\n    url = url.format(id_client=self._emt_id, passkey=self._emt_pass, address=address, lang=lang)\n    return _parking_req.post(url, data=kwargs).json()", "docstring": "Make a request to the given endpoint of the ``parking`` server.\n\nThis returns the plain JSON (dict) response which can then be parsed\nusing one of the implemented types.\n\nArgs:\nendpoint (str): Endpoint to send the request to.\nThis string corresponds to the key in the ``ENDPOINTS`` dict.\nurl_args (dict): Dictionary for URL string replacements.\n**kwargs: Request arguments.\n\nReturns:\nObtained response (dict) or None if the endpoint was not found.", "source": "codesearchnet"}
{"code": "def extract_tensors_from_dataset(dataset):\n    iterator = get_iterator(dataset)\n    inputs, targets, sample_weight = unpack_iterator_input(iterator)\n    return (inputs, targets, sample_weight)", "docstring": "Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset.\n\nArgs:\ndataset: Dataset instance.\n\nReturns:\nTuple of tensors `x, y, weights`. `y` and `weights` entry may be None.", "source": "github-repos"}
{"code": "def linear(self, x):\n    with tf.name_scope('presoftmax_linear'):\n        batch_size = tf.shape(x)[0]\n        length = tf.shape(x)[1]\n        x = tf.reshape(x, [(- 1), self.hidden_size])\n        logits = tf.matmul(x, self.shared_weights, transpose_b=True)\n        return tf.reshape(logits, [batch_size, length, self.vocab_size])", "docstring": "Computes logits by running x through a linear layer.\n\nArgs:\nx: A float32 tensor with shape [batch_size, length, hidden_size]\nReturns:\nfloat32 tensor with shape [batch_size, length, vocab_size].", "source": "codesearchnet"}
{"code": "def read(self, size=None):\n    \n    data = b''\n    while ((size and len(data) < size) and\n           self._current_offset < self.uncompressed_data_size):\n      member = self._GetMemberForOffset(self._current_offset)\n      member_offset = self._current_offset - member.uncompressed_data_offset\n      data_read = member.ReadAtOffset(member_offset, size)\n      if data_read:\n        self._current_offset += len(data_read)\n        data = b''.join([data, data_read])\n\n    return data", "docstring": "Reads a byte string from the gzip file at the current offset.\n\nThe function will read a byte string up to the specified size or\nall of the remaining data if no size was specified.\n\nArgs:\nsize (Optional[int]): number of bytes to read, where None is all\nremaining data.\n\nReturns:\nbytes: data read.\n\nRaises:\nIOError: if the read failed.\nOSError: if the read failed.", "source": "juraj-google-style"}
{"code": "def double(self, count: float) -> float:\n    return 2 * count", "docstring": "Returns the input multiplied by 2.\n\nArgs:\ncount: Input number that you want to double.\n\nReturns:\nA number that is the double of count.", "source": "github-repos"}
{"code": "def stylify(code: str) -> str:\n    has_indent = len(get_indent(code)) > 0\n    if has_indent:\n        code = f'class Bla:\\n{code}'\n    formatted_code = run_ruff(code)\n    return formatted_code[len('class Bla:\\n'):] if has_indent else formatted_code", "docstring": "Applies the ruff part of our `make style` command to some code. This formats the code using `ruff format`.\nAs `ruff` does not provide a python api this cannot be done on the fly.\n\nArgs:\ncode (`str`): The code to format.\n\nReturns:\n`str`: The formatted code.", "source": "github-repos"}
{"code": "def check_integrity(models):\n    messages = dict(error=[], warning=[])\n    for model in models:\n        validators = []\n        for name in dir(model):\n            if (not name.startswith('_check')):\n                continue\n            obj = getattr(model, name)\n            if getattr(obj, 'validator_type', None):\n                validators.append(obj)\n        for func in validators:\n            messages[func.validator_type].extend(func())\n    for msg in sorted(messages['error']):\n        log.error(('E-%d (%s): %s: %s' % msg))\n    for msg in sorted(messages['warning']):\n        (code, name, desc, obj) = msg\n        if (code not in __silencers__):\n            log.warning(('W-%d (%s): %s: %s' % msg))", "docstring": "Apply validation and integrity checks to a collection of Bokeh models.\n\nArgs:\nmodels (seq[Model]) : a collection of Models to test\n\nReturns:\nNone\n\nThis function will emit log warning and error messages for all error or\nwarning conditions that are detected. For example, layouts without any\nchildren will trigger a warning:\n\n.. code-block:: python\n\n>>> empty_row = Row\n\n>>> check_integrity([empty_row])\nW-1002 (EMPTY_LAYOUT): Layout has no children: Row(id='2404a029-c69b-4e30-9b7d-4b7b6cdaad5b', ...)", "source": "codesearchnet"}
{"code": "def list_documents(project_id, knowledge_base_id):\n    \n    import dialogflow_v2beta1 as dialogflow\n    client = dialogflow.DocumentsClient()\n    knowledge_base_path = client.knowledge_base_path(project_id,\n                                                     knowledge_base_id)\n\n    print('Documents for Knowledge Id: {}'.format(knowledge_base_id))\n    for document in client.list_documents(knowledge_base_path):\n        print(' - Display Name: {}'.format(document.display_name))\n        print(' - Knowledge ID: {}'.format(document.name))\n        print(' - MIME Type: {}'.format(document.mime_type))\n        print(' - Knowledge Types:')\n        for knowledge_type in document.knowledge_types:\n            print('    - {}'.format(KNOWLEDGE_TYPES[knowledge_type]))\n        print(' - Source: {}\\n'.format(document.content_uri))", "docstring": "Lists the Documents belonging to a Knowledge base.\n\nArgs:\nproject_id: The GCP project linked with the agent.\nknowledge_base_id: Id of the Knowledge base.", "source": "juraj-google-style"}
{"code": "def write_temp_bird_conf(dummy_ip_prefix,\n                         config_file,\n                         variable_name,\n                         prefixes):\n    \n    log = logging.getLogger(PROGRAM_NAME)\n    comment = (\"\n               \"REMOVED from the constant.\".format(i=dummy_ip_prefix))\n\n    \n    \n    \n    tm_file = os.path.join(os.path.dirname(config_file), str(time.time()))\n    log.debug(\"going to write to %s\", tm_file)\n\n    try:\n        with open(tm_file, 'w') as tmpf:\n            tmpf.write(\"\n                       .format(t=datetime.datetime.now(),\n                               n=PROGRAM_NAME,\n                               p=os.getpid()))\n            tmpf.write(\"{c}\\n\".format(c=comment))\n            tmpf.write(\"define {n} =\\n\".format(n=variable_name))\n            tmpf.write(\"{s}[\\n\".format(s=4 * ' '))\n            \n            \n            tmpf.write(',\\n'.join([' '*8 + n for n in prefixes]))\n            tmpf.write(\"\\n{s}];\\n\".format(s=4 * ' '))\n    except OSError as error:\n        log.critical(\"failed to write temporary file %s: %s. This is a FATAL \"\n                     \"error, this exiting main program\", tm_file, error)\n        sys.exit(1)\n    else:\n        return tm_file", "docstring": "Write in a temporary file the list of IP-Prefixes.\n\nA failure to create and write the temporary file will exit main program.\n\nArguments:\ndummy_ip_prefix (str): The dummy IP prefix, which must be always\nconfig_file (str): The file name of bird configuration\nvariable_name (str): The name of the variable set in bird configuration\nprefixes (list): The list of IP-Prefixes to write\n\nReturns:\nThe filename of the temporary file", "source": "juraj-google-style"}
{"code": "def _WriteRow(self, output_writer, values):\n    \n    maximum_row_width = self._MAXIMUM_WIDTH - self._column_width - 3\n\n    \n    primary_format_string = '{{0:>{0:d}s}} : {{1:s}}\\n'.format(\n        self._column_width)\n\n    \n    secondary_format_string = '{{0:<{0:d}s}}{{1:s}}\\n'.format(\n        self._column_width + 3)\n\n    if isinstance(values[1], py2to3.STRING_TYPES):\n      value_string = values[1]\n    else:\n      value_string = '{0!s}'.format(values[1])\n\n    if len(value_string) < maximum_row_width:\n      output_writer.Write(primary_format_string.format(\n          values[0], value_string))\n      return\n\n    \n    words = value_string.split()\n\n    current = 0\n\n    lines = []\n    word_buffer = []\n    for word in words:\n      current += len(word) + 1\n      if current >= maximum_row_width:\n        current = len(word)\n        lines.append(' '.join(word_buffer))\n        word_buffer = [word]\n      else:\n        word_buffer.append(word)\n    lines.append(' '.join(word_buffer))\n\n    \n    output_writer.Write(\n        primary_format_string.format(values[0], lines[0]))\n    for line in lines[1:]:\n      output_writer.Write(secondary_format_string.format('', line))", "docstring": "Writes a row of values aligned to the column width.\n\nArgs:\noutput_writer (OutputWriter): output writer.\nvalues (list[object]): values.", "source": "juraj-google-style"}
{"code": "def _AddProvidesEdges(self, rdf_artifact):\n    \n    for attribute in rdf_artifact.provides:\n      self._AddEdge(rdf_artifact.name, attribute)", "docstring": "Add an edge for every attribute the given artifact provides.\n\nThis method adds a directed edge from the artifact node to every attribute\nthis artifact provides.\n\nArgs:\nrdf_artifact: The artifact object.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n                 no_decomp: Callable[[ops.Operation], bool]=(lambda _: False)\n                 ) -> None:\n        \n        super().__init__()\n        self.no_decomp = no_decomp", "docstring": "Construct the optimization pass.\n\nArgs:\nno_decomp: A predicate that determines whether an operation should\nbe decomposed or not. Defaults to decomposing everything.", "source": "juraj-google-style"}
{"code": "def calc_digest(origin, algorithm='sha1', block_size=None):\n    try:\n        hashM = hashlib.new(algorithm)\n    except ValueError:\n        raise ValueError('hash algorithm not supported by the underlying platform: \"{0}\"'.format(algorithm))\n    while True:\n        chunk = (origin.read(block_size) if block_size else origin.read())\n        if (not chunk):\n            break\n        hashM.update(chunk)\n    return hashM.hexdigest()", "docstring": "Calculate digest of a readable object\n\nArgs:\norigin -- a readable object for which calculate digest\nalgorithn -- the algorithm to use. See ``hashlib.algorithms_available`` for supported algorithms.\nblock_size -- the size of the block to read at each iteration", "source": "codesearchnet"}
{"code": "def merge(self, ref_name: str):\n        \n        if self.is_dirty():\n            LOGGER.error('repository is dirty; cannot merge: %s', ref_name)\n            sys.exit(-1)\n        LOGGER.info('merging ref: \"%s\" into branch: %s', ref_name, self.get_current_branch())\n        self.repo.git.merge(ref_name)", "docstring": "Merges two refs\n\nArgs:\nref_name: ref to merge in the current one", "source": "juraj-google-style"}
{"code": "async def dist(self, mesg):\n    if self.isfini:\n        return ()\n    ret = []\n    for func in self._syn_funcs.get(mesg[0], ()):\n        try:\n            ret.append((await s_coro.ornot(func, mesg)))\n        except asyncio.CancelledError:\n            raise\n        except Exception:\n            logger.exception('base %s error with mesg %s', self, mesg)\n    for func in self._syn_links:\n        try:\n            ret.append((await func(mesg)))\n        except asyncio.CancelledError:\n            raise\n        except Exception:\n            logger.exception('base %s error with mesg %s', self, mesg)\n    return ret", "docstring": "Distribute an existing event tuple.\n\nArgs:\nmesg ((str,dict)):  An event tuple.\n\nExample:\n\nawait base.dist( ('foo',{'bar':'baz'}) )", "source": "codesearchnet"}
{"code": "def check_initializers(initializers, keys):\n    if (initializers is None):\n        return {}\n    _assert_is_dictlike(initializers, valid_keys=keys)\n    keys = set(keys)\n    if (not (set(initializers) <= keys)):\n        extra_keys = (set(initializers) - keys)\n        raise KeyError('Invalid initializer keys {}, initializers can only be provided for {}'.format(', '.join((\"'{}'\".format(key) for key in extra_keys)), ', '.join((\"'{}'\".format(key) for key in keys))))\n    _check_nested_callables(initializers, 'Initializer')\n    return dict(initializers)", "docstring": "Checks the given initializers.\n\nThis checks that `initializers` is a dictionary that only contains keys in\n`keys`, and furthermore the entries in `initializers` are functions or\nfurther dictionaries (the latter used, for example, in passing initializers\nto modules inside modules) that must satisfy the same constraints.\n\nArgs:\ninitializers: Dictionary of initializers (allowing nested dictionaries) or\nNone.\nkeys: Iterable of valid keys for `initializers`.\n\nReturns:\nCopy of checked dictionary of initializers. If `initializers=None`, an empty\ndictionary will be returned.\n\nRaises:\nKeyError: If an initializer is provided for a key not in `keys`.\nTypeError: If a provided initializer is not a callable function, or\n`initializers` is not a Mapping.", "source": "codesearchnet"}
{"code": "def is_file_on_local_server(self, text) -> Tuple[(Optional[Path], Optional[int], Optional[int])]:\n    lineno = None\n    colno = None\n    py_func = None\n    m = re.compile('(.*)\\\\:(\\\\d+)\\\\:(\\\\d+)$').match(text)\n    if m:\n        text = m.group(1)\n        lineno = m.group(2)\n        colno = m.group(3)\n    else:\n        m = re.compile('(.*)\\\\:(\\\\d+)$').match(text)\n        if m:\n            text = m.group(1)\n            lineno = m.group(2)\n        else:\n            m = re.compile('^(.*)\\\\:\\\\:([a-zA-Z0-9\\\\_]+)$').match(text)\n            if m:\n                text = m.group(1)\n                py_func = m.group(2).strip()\n\n    def find_lineno(text, pt, lineno, py_func):\n        if lineno:\n            return lineno\n        if (not py_func):\n            return\n        with pt.open() as f:\n            for (i, line) in enumerate(f.readlines()):\n                if line.startswith('def {}'.format(py_func)):\n                    return (i + 1)\n                    break\n    pt = Path(text)\n    log.debug('checking file existance: %r', pt)\n    try:\n        if pt.exists():\n            lineno = find_lineno(text, pt, lineno, py_func)\n            log.info('File exists: %r, line=%r', pt.absolute().as_posix(), lineno)\n            return (pt, lineno, colno)\n        log.debug('No file found matching: %r', text)\n        cwd = self.get_current_directory()\n        pt = (Path(cwd) / pt)\n        log.debug('checking file existance: %r', pt)\n        if pt.exists():\n            lineno = find_lineno(text, pt, lineno, py_func)\n            log.info('File exists: %r, line=%r', pt.absolute().as_posix(), lineno)\n            return (pt, lineno, colno)\n        log.debug('file does not exist: %s', str(pt))\n    except OSError:\n        log.debug('not a file name: %r', text)\n    return (None, None, None)", "docstring": "Test if the provided text matches a file on local server\n\nSupports:\n- absolute path\n- relative path (using current working directory)\n- file:line syntax\n- file:line:colum syntax\n\nArgs:\ntext (str): candidate for file search\n\nReturns\n- Tuple(None, None, None) if the provided text does not match anything\n- Tuple(file path, None, None) if only a file path is found\n- Tuple(file path, linenumber, None) if line number is found\n- Tuple(file path, linenumber, columnnumber) if line and column numbers are found", "source": "codesearchnet"}
{"code": "class PoolFormerFastImageProcessorKwargs(DefaultFastImageProcessorKwargs):\n    crop_pct: Optional[float]", "docstring": "Args:\ncrop_pct (`float`, *optional*, defaults to `self.crop_pct`):\nPercentage of the image to crop. Only has an effect if `do_resize` is set to `True`.", "source": "github-repos"}
{"code": "def run_node(self, node, stim):\n    if isinstance(node, string_types):\n        node = self.nodes[node]\n    result = node.transformer.transform(stim)\n    if node.is_leaf():\n        return listify(result)\n    stim = result\n    if ((len(node.children) > 1) and isgenerator(stim)):\n        stim = list(stim)\n    return list(chain(*[self.run_node(c, stim) for c in node.children]))", "docstring": "Executes the Transformer at a specific node.\n\nArgs:\nnode (str, Node): If a string, the name of the Node in the current\nGraph. Otherwise the Node instance to execute.\nstim (str, stim, list): Any valid input to the Transformer stored\nat the target node.", "source": "codesearchnet"}
{"code": "def load(self, cellpy_file, parent_level='CellpyData'):\n    try:\n        self.logger.debug('loading cellpy-file (hdf5):')\n        self.logger.debug(cellpy_file)\n        new_datasets = self._load_hdf5(cellpy_file, parent_level)\n        self.logger.debug('cellpy-file loaded')\n    except AttributeError:\n        new_datasets = []\n        self.logger.warning('This cellpy-file version is not supported bycurrent reader (try to update cellpy).')\n    if new_datasets:\n        for dataset in new_datasets:\n            self.datasets.append(dataset)\n    else:\n        self.logger.warning('Could not load')\n        self.logger.warning(str(cellpy_file))\n    self.number_of_datasets = len(self.datasets)\n    self.status_datasets = self._validate_datasets()\n    self._invent_a_name(cellpy_file)\n    return self", "docstring": "Loads a cellpy file.\n\nArgs:\ncellpy_file (path, str): Full path to the cellpy file.\nparent_level (str, optional): Parent level", "source": "codesearchnet"}
{"code": "def from_str(cls, input_string, fmt, primitive=False, sort=False, merge_tol=0.0):\n    from pymatgen.io.cif import CifParser\n    from pymatgen.io.vasp import Poscar\n    from pymatgen.io.cssr import Cssr\n    from pymatgen.io.xcrysden import XSF\n    from pymatgen.io.atat import Mcsqs\n    fmt = fmt.lower()\n    if (fmt == 'cif'):\n        parser = CifParser.from_string(input_string)\n        s = parser.get_structures(primitive=primitive)[0]\n    elif (fmt == 'poscar'):\n        s = Poscar.from_string(input_string, False, read_velocities=False).structure\n    elif (fmt == 'cssr'):\n        cssr = Cssr.from_string(input_string)\n        s = cssr.structure\n    elif (fmt == 'json'):\n        d = json.loads(input_string)\n        s = Structure.from_dict(d)\n    elif (fmt == 'yaml'):\n        import ruamel.yaml as yaml\n        d = yaml.safe_load(input_string)\n        s = Structure.from_dict(d)\n    elif (fmt == 'xsf'):\n        s = XSF.from_string(input_string).structure\n    elif (fmt == 'mcsqs'):\n        s = Mcsqs.structure_from_string(input_string)\n    else:\n        raise ValueError(('Unrecognized format `%s`!' % fmt))\n    if sort:\n        s = s.get_sorted_structure()\n    if merge_tol:\n        s.merge_sites(merge_tol)\n    return cls.from_sites(s)", "docstring": "Reads a structure from a string.\n\nArgs:\ninput_string (str): String to parse.\nfmt (str): A format specification.\nprimitive (bool): Whether to find a primitive cell. Defaults to\nFalse.\nsort (bool): Whether to sort the sites in accordance to the default\nordering criteria, i.e., electronegativity.\nmerge_tol (float): If this is some positive number, sites that\nare within merge_tol from each other will be merged. Usually\n0.01 should be enough to deal with common numerical issues.\n\nReturns:\nIStructure / Structure", "source": "codesearchnet"}
{"code": "def recalculate_concepts(self, concepts, lang=None):\n        \n        if len(concepts) == 0:\n            return\n\n        if lang is None:\n            items = Concept.objects.get_concept_item_mapping(concepts=Concept.objects.filter(pk__in=set(flatten(concepts.values()))))\n        else:\n            items = Concept.objects.get_concept_item_mapping(lang=lang)\n\n        environment = get_environment()\n        mastery_threshold = get_mastery_trashold()\n        for user, concepts in concepts.items():\n            all_items = list(set(flatten([items[c] for c in concepts])))\n            answer_counts = environment.number_of_answers_more_items(all_items, user)\n            correct_answer_counts = environment.number_of_correct_answers_more_items(all_items, user)\n            predictions = dict(list(zip(all_items, get_predictive_model().\n                                        predict_more_items(environment, user, all_items, time=get_time_for_knowledge_overview()))))\n            new_user_stats = []\n            stats_to_delete_condition = Q()\n            for concept in concepts:\n                answer_aggregates = Answer.objects.filter(user=user, item__in=items[concept]).aggregate(\n                    time_spent=Sum(\"response_time\"),\n                    sessions=Count(\"session\", True),\n                    time_first=Min(\"time\"),\n                    time_last=Max(\"time\"),\n                )\n                stats = {\n                    \"answer_count\": sum(answer_counts[i] for i in items[concept]),\n                    \"correct_answer_count\": sum(correct_answer_counts[i] for i in items[concept]),\n                    \"item_count\": len(items[concept]),\n                    \"practiced_items_count\": sum([answer_counts[i] > 0 for i in items[concept]]),\n                    \"mastered_items_count\": sum([predictions[i] >= mastery_threshold for i in items[concept]]),\n                    \"prediction\": sum([predictions[i] for i in items[concept]]) / len(items[concept]),\n                    \"time_spent\": answer_aggregates[\"time_spent\"] / 1000,\n                    \"session_count\": answer_aggregates[\"sessions\"],\n                    \"time_first\": answer_aggregates[\"time_first\"].timestamp(),\n                    \"time_last\": answer_aggregates[\"time_last\"].timestamp(),\n                }\n                stats_to_delete_condition |= Q(user=user, concept=concept)\n                for stat_name, value in stats.items():\n                    new_user_stats.append(UserStat(user_id=user, concept_id=concept, stat=stat_name, value=value))\n            self.filter(stats_to_delete_condition).delete()\n            self.bulk_create(new_user_stats)", "docstring": "Recalculated given concepts for given users\n\nArgs:\nconcepts (dict): user id (int -> set of concepts to recalculate)\nlang(Optional[str]): language used to get items in all concepts (cached).\nDefaults to None, in that case are get items only in used concepts", "source": "juraj-google-style"}
{"code": "def get_note_list(self, data=True, since=None, tags=[]):\n    status = 0\n    ret = []\n    response_notes = {}\n    notes = {'index': []}\n    params = ('/index?limit=%s' % str(NOTE_FETCH_LENGTH))\n    if (since is not None):\n        params += ('&since=%s' % since)\n    if data:\n        params += '&data=true'\n    request = Request((DATA_URL + params))\n    request.add_header(self.header, self.get_token())\n    try:\n        response = urllib2.urlopen(request)\n        response_notes = json.loads(response.read().decode('utf-8'))\n        note_objects = []\n        for n in response_notes['index']:\n            if (not data):\n                n['d'] = {}\n            note_object = self.__add_simplenote_api_fields(n['d'], n['id'], n['v'])\n            note_objects.append(note_object)\n        notes['index'].extend(note_objects)\n    except HTTPError as e:\n        if (e.code == 401):\n            raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.')\n        else:\n            return (e, (- 1))\n    except IOError as e:\n        return (e, (- 1))\n    while ('mark' in response_notes):\n        params += ('&mark=%s' % response_notes['mark'])\n        request = Request((DATA_URL + params))\n        request.add_header(self.header, self.get_token())\n        try:\n            response = urllib2.urlopen(request)\n            response_notes = json.loads(response.read().decode('utf-8'))\n            note_objects = []\n            for n in response_notes['index']:\n                if (not data):\n                    n['d'] = {}\n                note_object = n['d']\n                note_object = self.__add_simplenote_api_fields(n['d'], n['id'], n['v'])\n                note_objects.append(note_object)\n            notes['index'].extend(note_objects)\n        except HTTPError as e:\n            if (e.code == 401):\n                raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.')\n            else:\n                return (e, (- 1))\n        except IOError as e:\n            return (e, (- 1))\n    note_list = notes['index']\n    self.current = response_notes['current']\n    if (len(tags) > 0):\n        note_list = [n for n in note_list if (len(set(n['tags']).intersection(tags)) > 0)]\n    return (note_list, status)", "docstring": "Method to get the note list\n\nThe method can be passed optional arguments to limit the list to\nnotes containing a certain tag, or only updated since a certain\nSimperium cursor. If omitted a list of all notes is returned.\n\nBy default data objects are returned. If data is set to false only\nkeys/ids and versions are returned. An empty data object is inserted\nfor compatibility.\n\nArguments:\n- tags=[] list of tags as string: return notes that have\nat least one of these tags\n- since=cursor Simperium cursor as string: return only changes\nsince this cursor\n- data=True If false only return keys/ids and versions\n\nReturns:\nA tuple `(notes, status)`\n\n- notes (list): A list of note objects with all properties set except\n`content`.\n- status (int): 0 on success and -1 otherwise", "source": "codesearchnet"}
{"code": "def parse_frequencies(variant, transcripts):\n    \n    frequencies = {}\n    \n    thousand_genomes_keys = ['1000GAF']\n    thousand_genomes_max_keys = ['1000G_MAX_AF']\n\n    exac_keys = ['EXACAF']\n    exac_max_keys = ['ExAC_MAX_AF', 'EXAC_MAX_AF']\n\n    gnomad_keys = ['GNOMADAF', 'GNOMAD_AF']\n    gnomad_max_keys = ['GNOMADAF_POPMAX', 'GNOMADAF_MAX']\n\n    for test_key in thousand_genomes_keys:\n        thousand_g = parse_frequency(variant, test_key)\n        if thousand_g:\n            frequencies['thousand_g'] = thousand_g\n            break\n\n    for test_key in thousand_genomes_max_keys:\n        thousand_g_max = parse_frequency(variant, test_key)\n        if thousand_g_max:\n            frequencies['thousand_g_max'] = thousand_g_max\n            break\n\n    for test_key in exac_keys:\n        exac = parse_frequency(variant, test_key)\n        if exac:\n            frequencies['exac'] = exac\n            break\n\n    for test_key in exac_max_keys:\n        exac_max = parse_frequency(variant, test_key)\n        if exac_max:\n            frequencies['exac_max'] = exac_max\n            break\n\n    for test_key in gnomad_keys:\n        gnomad = parse_frequency(variant, test_key)\n        if gnomad:\n            frequencies['gnomad'] = gnomad\n            break\n\n    for test_key in gnomad_max_keys:\n        gnomad_max = parse_frequency(variant, test_key)\n        if gnomad_max:\n            frequencies['gnomad_max'] = gnomad_max\n            break\n\n    \n    if not frequencies:\n        for transcript in transcripts:\n            exac = transcript.get('exac_maf')\n            exac_max = transcript.get('exac_max')\n\n            thousand_g = transcript.get('thousand_g_maf')\n            thousandg_max = transcript.get('thousandg_max')\n\n            gnomad = transcript.get('gnomad_maf')\n            gnomad_max = transcript.get('gnomad_max')\n            if exac:\n                frequencies['exac'] = exac\n            if exac_max:\n                frequencies['exac_max'] = exac_max\n            if thousand_g:\n                frequencies['thousand_g'] = thousand_g\n            if thousandg_max:\n                frequencies['thousand_g_max'] = thousandg_max\n            if gnomad:\n                frequencies['gnomad'] = gnomad\n            if gnomad_max:\n                frequencies['gnomad_max'] = gnomad_max\n\n    \n    thousand_g_left = parse_frequency(variant, 'left_1000GAF')\n    if thousand_g_left:\n        frequencies['thousand_g_left'] = thousand_g_left\n\n    thousand_g_right = parse_frequency(variant, 'right_1000GAF')\n    if thousand_g_right:\n        frequencies['thousand_g_right'] = thousand_g_right\n\n    return frequencies", "docstring": "Add the frequencies to a variant\n\nFrequencies are parsed either directly from keys in info fieds or from the\ntranscripts is they are annotated there.\n\nArgs:\nvariant(cyvcf2.Variant): A parsed vcf variant\ntranscripts(iterable(dict)): Parsed transcripts\n\nReturns:\nfrequencies(dict): A dictionary with the relevant frequencies", "source": "juraj-google-style"}
{"code": "def _ParseRecord(\n      self, parser_mediator, record_index, evt_record, recovered=False):\n    \n    event_data = self._GetEventData(\n        parser_mediator, record_index, evt_record, recovered=recovered)\n\n    try:\n      creation_time = evt_record.get_creation_time_as_integer()\n    except OverflowError as exception:\n      parser_mediator.ProduceExtractionWarning((\n          'unable to read creation time from event record: {0:d} '\n          'with error: {1!s}').format(record_index, exception))\n\n      creation_time = None\n\n    if creation_time:\n      date_time = dfdatetime_posix_time.PosixTime(timestamp=creation_time)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_CREATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    try:\n      written_time = evt_record.get_written_time_as_integer()\n    except OverflowError as exception:\n      parser_mediator.ProduceExtractionWarning((\n          'unable to read written time from event record: {0:d} '\n          'with error: {1!s}').format(record_index, exception))\n\n      written_time = None\n\n    if written_time:\n      date_time = dfdatetime_posix_time.PosixTime(timestamp=written_time)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_WRITTEN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    if not creation_time and not written_time:\n      date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a Windows EventLog (EVT) record.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrecord_index (int): event record index.\nevt_record (pyevt.record): event record.\nrecovered (Optional[bool]): True if the record was recovered.", "source": "juraj-google-style"}
{"code": "def add_body_part(self, key, data, mime_type, size=None):\n    if isinstance(data, str):\n        size = len(data)\n    if hasattr(data, 'fileno'):\n        size = os.fstat(data.fileno())[stat.ST_SIZE]\n    if (size is None):\n        raise UnknownSize('Each part of the body must have a known size.')\n    if ('Content-Length' in self.headers):\n        content_length = int(self.headers['Content-Length'])\n    else:\n        content_length = 0\n    boundary_string = ('\\r\\n--%s\\r\\n' % (MIME_BOUNDARY,))\n    self._body_parts.append(boundary_string)\n    content_length += (len(boundary_string) + size)\n    cd = ('Content-Disposition: form-data; name=\"%s\"' % key)\n    mt = mime_type\n    if hasattr(data, 'fileno'):\n        cd += ('; filename=\"%s\"' % data.name.split('/')[(- 1)])\n        mt = (mimetypes.guess_type(data.name)[0] or 'application/octet-stream')\n    cd += '\\r\\n'\n    type_string = ('Content-Type: %s\\r\\n\\r\\n' % mt)\n    self._body_parts.append(cd)\n    self._body_parts.append(type_string)\n    content_length += (len(type_string) + len(cd))\n    self._body_parts.append(data)\n    self.headers['Content-Length'] = str(content_length)", "docstring": "Adds data to the HTTP request body.\n\nIf more than one part is added, this is assumed to be a mime-multipart\nrequest. This method is designed to create MIME 1.0 requests as specified\nin RFC 1341.\n\nArgs:\ndata: str or a file-like object containing a part of the request body.\nmime_type: str The MIME type describing the data\nsize: int Required if the data is a file like object. If the data is a\nstring, the size is calculated so this parameter is ignored.", "source": "codesearchnet"}
{"code": "def from_string(contents):\n        \n        lines = contents.split(\"\\n\")\n        num_sites = int(lines[0])\n        coords = []\n        sp = []\n        prop = []\n        coord_patt = re.compile(\n            r\"(\\w+)\\s+([0-9\\-\\.]+)\\s+([0-9\\-\\.]+)\\s+([0-9\\-\\.]+)\\s+\" +\n            r\"([0-9\\-\\.]+)\"\n        )\n        for i in range(2, 2 + num_sites):\n            m = coord_patt.search(lines[i])\n            if m:\n                sp.append(m.group(1))  \n                \n                coords.append([float(j)\n                               for j in [m.group(i) for i in [3, 4, 2]]])\n                prop.append(float(m.group(5)))\n        return ZeoVoronoiXYZ(\n            Molecule(sp, coords, site_properties={'voronoi_radius': prop})\n        )", "docstring": "Creates Zeo++ Voronoi XYZ object from a string.\nfrom_string method of XYZ class is being redefined.\n\nArgs:\ncontents: String representing Zeo++ Voronoi XYZ file.\n\nReturns:\nZeoVoronoiXYZ object", "source": "juraj-google-style"}
{"code": "def _remove_boring_lines(text):\n    lines = text.split('\\n')\n    filtered = [line for line in lines if re.match('[a-zA-z\"\\']', line)]\n    return '\\n'.join(filtered)", "docstring": "Remove lines that do not start with a letter or a quote.\n\nFrom inspecting the data, this seems to leave in most prose and remove\nmost weird stuff.\n\nArgs:\ntext: a string\nReturns:\na string", "source": "codesearchnet"}
{"code": "def create_deferred(self, func, input_layer, deferred_args, deferred_kwargs, name):\n    my_defaults = _defaults\n\n    def _with_method_complete(*args, **kwargs):\n        input_layer = args[0]\n        with input_layer.g.as_default(), defaults_scope(**my_defaults), tf.name_scope(name):\n            return input_layer._method_complete(func(*args, **kwargs))\n    full_args = [input_layer]\n    full_args.extend(deferred_args)\n    partial_context = {}\n    if isinstance(input_layer, _DeferredLayer):\n        partial_context = input_layer._partial_context\n    return _DeferredLayer(input_layer.bookkeeper, scopes.Template(None, _with_method_complete), full_args, deferred_kwargs, scope=input_layer._scope, defaults=input_layer.defaults, partial_context=partial_context)", "docstring": "Creates a deferred node with captured scope.\n\nArgs:\nfunc: The original function to call.\ninput_layer: The input_layer.\ndeferred_args: The arguments that will be used bythe deferred function.\ndeferred_kwargs: The keyword args for the deferred function.\nname: The name of this layer.\nReturns:\nA _DeferredLayer that will execute func in the correct scopes.", "source": "codesearchnet"}
{"code": "def set_datastore_policy(self, func):\n    if (func is None):\n        func = self.default_datastore_policy\n    elif isinstance(func, bool):\n        func = (lambda unused_key, flag=func: flag)\n    self._datastore_policy = func", "docstring": "Set the context datastore policy function.\n\nArgs:\nfunc: A function that accepts a Key instance as argument and returns\na bool indicating if it should use the datastore.  May be None.", "source": "codesearchnet"}
{"code": "def qualNorm(data, qualitative):\n    \n    genes, cells = data.shape\n    clusters = qualitative.shape[1]\n    output = np.zeros((genes, clusters))\n    missing_indices = []\n    qual_indices = []\n    thresholds = qualitative.min(1) + (qualitative.max(1) - qualitative.min(1))/2.0\n    for i in range(genes):\n        if qualitative[i,:].max() == -1 and qualitative[i,:].min() == -1:\n            missing_indices.append(i)\n            continue\n        qual_indices.append(i)\n        threshold = thresholds[i]\n        data_i = data[i,:]\n        if sparse.issparse(data):\n            data_i = data_i.toarray().flatten()\n        assignments, means = poisson_cluster(data_i.reshape((1, cells)), 2)\n        means = means.flatten()\n        high_i = 1\n        low_i = 0\n        if means[0]>means[1]:\n            high_i = 0\n            low_i = 1\n        high_mean = np.median(data_i[assignments==high_i])\n        low_mean = np.median(data_i[assignments==low_i])\n        for k in range(clusters):\n            if qualitative[i,k]>threshold:\n                output[i,k] = high_mean\n            else:\n                output[i,k] = low_mean\n    if missing_indices:\n        assignments, means = poisson_cluster(data[qual_indices, :], clusters, output[qual_indices, :], max_iters=1)\n        for ind in missing_indices:\n            for k in range(clusters):\n                if len(assignments==k)==0:\n                    output[ind, k] = data[ind,:].mean()\n                else:\n                    output[ind, k] = data[ind, assignments==k].mean()\n    return output", "docstring": "Generates starting points using binarized data. If qualitative data is missing for a given gene, all of its entries should be -1 in the qualitative matrix.\n\nArgs:\ndata (array): 2d array of genes x cells\nqualitative (array): 2d array of numerical data - genes x clusters\n\nReturns:\nArray of starting positions for state estimation or\nclustering, with shape genes x clusters", "source": "juraj-google-style"}
{"code": "def external_ids(self, **kwargs):\n        \n        path = self._get_series_id_season_number_path('external_ids')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the external ids that we have stored for a TV season by season\nnumber.\n\nArgs:\nlanguage: (optional) ISO 639 code.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def bind_to_storage_buffer(self, binding=0, *, offset=0, size=-1) -> None:\n        \n\n        self.mglo.bind_to_storage_buffer(binding, offset, size)", "docstring": "Bind the buffer to a shader storage buffer.\n\nArgs:\nbinding (int): The shader storage binding.\n\nKeyword Args:\noffset (int): The offset.\nsize (int): The size. Value ``-1`` means all.", "source": "juraj-google-style"}
{"code": "def _replace_variable_with_pattern(match):\n    \n    positional = match.group(\"positional\")\n    name = match.group(\"name\")\n    template = match.group(\"template\")\n    if name is not None:\n        if not template:\n            return _SINGLE_SEGMENT_PATTERN.format(name)\n        elif template == \"**\":\n            return _MULTI_SEGMENT_PATTERN.format(name)\n        else:\n            return _generate_pattern_for_template(template)\n    elif positional == \"*\":\n        return _SINGLE_SEGMENT_PATTERN\n    elif positional == \"**\":\n        return _MULTI_SEGMENT_PATTERN\n    else:\n        raise ValueError(\"Unknown template expression {}\".format(match.group(0)))", "docstring": "Replace a variable match with a pattern that can be used to validate it.\n\nArgs:\nmatch (re.Match): A regular expression match\n\nReturns:\nstr: A regular expression pattern that can be used to validate the\nvariable in an expanded path.\n\nRaises:\nValueError: If an unexpected template expression is encountered.", "source": "juraj-google-style"}
{"code": "def count_params(x):\n    return np.prod(x.shape.as_list())", "docstring": "Returns the static number of elements in a variable or tensor.\n\nArgs:\nx: Variable or tensor.\n\nReturns:\nInteger, the number of scalars in `x`.\n\nExample:\n\n>>> kvar = tf.keras.backend.zeros((2,3))\n>>> tf.keras.backend.count_params(kvar)\n6\n>>> tf.keras.backend.eval(kvar)\narray([[0.,  0.,  0.],\n[0.,  0.,  0.]], dtype=float32)", "source": "github-repos"}
{"code": "def from_bigquery(sql):\n    if isinstance(sql, bq.Query):\n        sql = sql._expanded_sql()\n    parts = sql.split('.')\n    if ((len(parts) == 1) or (len(parts) > 3) or any(((' ' in x) for x in parts))):\n        sql = (('(' + sql) + ')')\n    else:\n        sql = (('`' + sql) + '`')\n    metrics = Metrics(bigquery=sql)\n    return metrics", "docstring": "Create a Metrics instance from a bigquery query or table.\n\nReturns:\na Metrics instance.\n\nArgs:\nsql: A BigQuery table name or a query.", "source": "codesearchnet"}
{"code": "def disassemble(self, start=None, end=None, arch_mode=None):\n    if (arch_mode is None):\n        arch_mode = self.binary.architecture_mode\n    curr_addr = (start if start else self.binary.ea_start)\n    end_addr = (end if end else self.binary.ea_end)\n    while (curr_addr < end_addr):\n        encoding = self.__fetch_instr(curr_addr)\n        asm_instr = self.disassembler.disassemble(encoding, curr_addr, architecture_mode=arch_mode)\n        if (not asm_instr):\n            return\n        (yield (curr_addr, asm_instr, asm_instr.size))\n        curr_addr += asm_instr.size", "docstring": "Disassemble native instructions.\n\nArgs:\nstart (int): Start address.\nend (int): End address.\narch_mode (int): Architecture mode.\n\nReturns:\n(int, Instruction, int): A tuple of the form (address, assembler instruction, instruction size).", "source": "codesearchnet"}
{"code": "def rec_new(self, val):\n    if (val not in self.things):\n        for child in val.children():\n            self.rec_new(child)\n        self.new(val)\n    return val", "docstring": "Recursively add a new value and its children to me.\n\nArgs:\nval (LispVal): The value to be added.\n\nReturns:\nLispVal: The added value.", "source": "codesearchnet"}
{"code": "def set_name(self, name, anyway=False):\n        \n        set_name(self.startEA, name, anyway=anyway)", "docstring": "Set Function Name.\n\nDefault behavior throws an exception when setting to a name that already exists in\nthe IDB. to make IDA automatically add a counter to the name (like in the GUI,)\nuse `anyway=True`.\n\nArgs:\nname: Desired name.\nanyway: `True` to set anyway.", "source": "juraj-google-style"}
{"code": "def get_timing_signal(length,\n                      min_timescale=1,\n                      max_timescale=1e4,\n                      num_timescales=16):\n  \n  positions = to_float(tf.range(length))\n  log_timescale_increment = (\n      math.log(max_timescale / min_timescale) / (num_timescales - 1))\n  inv_timescales = min_timescale * tf.exp(\n      to_float(tf.range(num_timescales)) * -log_timescale_increment)\n  scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)\n  return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)", "docstring": "Create Tensor of sinusoids of different frequencies.\n\nArgs:\nlength: Length of the Tensor to create, i.e. Number of steps.\nmin_timescale: a float\nmax_timescale: a float\nnum_timescales: an int\n\nReturns:\nTensor of shape (length, 2*num_timescales)", "source": "juraj-google-style"}
{"code": "def dump(node, ast, annotate_fields=True, include_attributes=True, indent='  '):\n\n    def _format(node, level=0):\n        \n        if isinstance(node, ast.AST):\n            fields = [(a, _format(b, level)) for a, b in ast.iter_fields(node)]\n            if include_attributes and node._attributes:\n                fields.extend([(a, _format(getattr(node, a), level)) for a in node._attributes])\n            return ''.join([node.__class__.__name__, '(', ', '.join(('%s=%s' % field for field in fields) if annotate_fields else (b for a, b in fields)), ')'])\n        elif isinstance(node, list):\n            lines = ['[']\n            lines.extend((indent * (level + 2) + _format(x, level + 2) + ',' for x in node))\n            if len(lines) > 1:\n                lines.append(indent * (level + 1) + ']')\n            else:\n                lines[-1] += ']'\n            return '\\n'.join(lines)\n        return repr(node)\n    if not isinstance(node, ast.AST):\n        raise TypeError(f'expected AST, got {node.__class__!r}')\n    return _format(node)", "docstring": "Return a formatted dump of the tree in *node*.\n\nThis is mainly useful for debugging purposes.  The returned string will show\nthe names and the values for fields.  This makes the code impossible to\nevaluate, so if evaluation is wanted *annotate_fields* must be set to False.\nAttributes such as line numbers and column offsets are dumped by default. If\nthis is not wanted, *include_attributes* can be set to False.\n\nArguments:\nnode: Top AST node.\nast: An module providing an AST class hierarchy.\nannotate_fields: Show field annotations.\ninclude_attributes: Show all attributes.\nindent: Indentation string.\n\nReturns:\nA formatted tree.", "source": "github-repos"}
{"code": "def get(self, url, params=None, **kwargs):\n    return self.call_api('GET', url, params=params, **kwargs)", "docstring": "Call the API with a GET request.\n\nArgs:\nurl (str): Resource location relative to the base URL.\nparams (dict or None): Query-string parameters.\n\nReturns:\nResultParser or ErrorParser.", "source": "codesearchnet"}
{"code": "def from_options(cls, options):\n    if cls != Environment:\n        raise NotImplementedError\n    portable_options = options.view_as(PortableOptions)\n    environment_type = portable_options.environment_type\n    if not environment_type:\n        environment_urn = common_urns.environments.DOCKER.urn\n    elif environment_type.startswith('beam:env:'):\n        environment_urn = environment_type\n    elif environment_type == 'LOOPBACK':\n        environment_urn = python_urns.EMBEDDED_PYTHON_LOOPBACK\n    else:\n        try:\n            environment_urn = getattr(common_urns.environments, environment_type).urn\n        except AttributeError:\n            raise ValueError('Unknown environment type: %s' % environment_type)\n    env_class = Environment.get_env_cls_from_urn(environment_urn)\n    return env_class.from_options(portable_options)", "docstring": "Creates an Environment object from PortableOptions.\n\nArgs:\noptions: The PortableOptions object.", "source": "github-repos"}
{"code": "def implement(self, implementation, for_type=None, for_types=None):\n    unbound_implementation = self.__get_unbound_function(implementation)\n    for_types = self.__get_types(for_type, for_types)\n    for t in for_types:\n        self._write_lock.acquire()\n        try:\n            self.implementations.append((t, unbound_implementation))\n        finally:\n            self._write_lock.release()", "docstring": "Registers an implementing function for for_type.\n\nArguments:\nimplementation: Callable implementation for this type.\nfor_type: The type this implementation applies to.\nfor_types: Same as for_type, but takes a tuple of types.\n\nfor_type and for_types cannot both be passed (for obvious reasons.)\n\nRaises:\nValueError", "source": "codesearchnet"}
{"code": "def _ParseApplicationPasswordRecord(self, parser_mediator, record):\n    \n    key = record.get('_key_', None)\n    if not key or not key.startswith(b'ssgp'):\n      raise errors.ParseError((\n          'Unsupported application password record key value does not start '\n          'with: \"ssgp\".'))\n\n    event_data = KeychainApplicationRecordEventData()\n    event_data.account_name = self._ParseBinaryDataAsString(\n        parser_mediator, record['acct'])\n    event_data.comments = self._ParseBinaryDataAsString(\n        parser_mediator, record['crtr'])\n    event_data.entry_name = self._ParseBinaryDataAsString(\n        parser_mediator, record['PrintName'])\n    ssgp_hash = codecs.encode(key[4:], 'hex')\n    event_data.ssgp_hash = codecs.decode(ssgp_hash, 'utf-8')\n    event_data.text_description = self._ParseBinaryDataAsString(\n        parser_mediator, record['desc'])\n\n    date_time = self._ParseDateTimeValue(parser_mediator, record['cdat'])\n    if date_time:\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_CREATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    date_time = self._ParseDateTimeValue(parser_mediator, record['mdat'])\n    if date_time:\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts the information from an application password record.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrecord (dict[str, object]): database record.\n\nRaises:\nParseError: if Internet password record cannot be parsed.", "source": "juraj-google-style"}
{"code": "def quaternion_from_axis_rotation(angle, axis):\n    \n    out = np.zeros(4, dtype=float)\n    if axis == 'x':\n        out[1] = 1\n    elif axis == 'y':\n        out[2] = 1\n    elif axis == 'z':\n        out[3] = 1\n    else:\n        raise ValueError('Invalid axis input.')\n    out *= math.sin(angle/2.0)\n    out[0] = math.cos(angle/2.0)\n    return Quaternion(out)", "docstring": "Return quaternion for rotation about given axis.\n\nArgs:\nangle (float): Angle in radians.\naxis (str): Axis for rotation\n\nReturns:\nQuaternion: Quaternion for axis rotation.\n\nRaises:\nValueError: Invalid input axis.", "source": "juraj-google-style"}
{"code": "def cancel(self, workflow_id):\n    self.logger.debug(('Canceling workflow: ' + workflow_id))\n    url = ('%(wf_url)s/%(wf_id)s/cancel' % {'wf_url': self.workflows_url, 'wf_id': workflow_id})\n    r = self.gbdx_connection.post(url, data='')\n    r.raise_for_status()", "docstring": "Cancels a running workflow.\n\nArgs:\nworkflow_id (str): Workflow id.\n\nReturns:\nNothing", "source": "codesearchnet"}
{"code": "def parse_rdf_payload(self, data, headers):\n    if headers['Content-Type'].startswith('text/plain'):\n        logger.debug('text/plain Content-Type detected, using application/n-triples for parser')\n        parse_format = 'application/n-triples'\n    else:\n        parse_format = headers['Content-Type']\n    if (';charset' in parse_format):\n        parse_format = parse_format.split(';')[0]\n    graph = rdflib.Graph().parse(data=data.decode('utf-8'), format=parse_format)\n    return graph", "docstring": "small function to parse RDF payloads from various repository endpoints\n\nArgs:\ndata (response.data): data from requests response\nheaders (response.headers): headers from requests response\n\nReturns:\n(rdflib.Graph): parsed graph", "source": "codesearchnet"}
{"code": "def build_plans(self):\n    if (not self.__build_plans):\n        self.__build_plans = BuildPlans(self.__connection)\n    return self.__build_plans", "docstring": "Gets the Build Plans API client.\n\nReturns:\nBuildPlans:", "source": "codesearchnet"}
{"code": "def testWithSkip(self, verify_fn, symbolic_checkpoint, num_skips):\n\n    def build_dataset():\n\n        def my_map(x):\n            if x == 0:\n                return dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3])\n            elif x == 1:\n                return dataset_ops.Dataset.from_tensor_slices([4, 5, 6, 7])\n            else:\n                return dataset_ops.Dataset.from_tensor_slices([8, 9, 10, 11])\n        indices = dataset_ops.Dataset.from_tensor_slices([0, 1, 2])\n        dataset = indices.flat_map(my_map)\n        dataset = dataset.skip(num_skips)\n        options = options_lib.Options()\n        options.experimental_symbolic_checkpoint = symbolic_checkpoint\n        return dataset.with_options(options)\n    verify_fn(self, build_dataset, num_outputs=3 * 4 - num_skips)", "docstring": "Test `.flat_map().skip()` checkpointing behavior.\n\n`SkipInternal` and `GetNextInternal` are separate functions\nbut with slightly different implementations.\nTherefore, we should test this op's behavior when used with `.skip()`.\n\nArgs:\nverify_fn: Verify the correctness of this dataset's checkpointing.\nsymbolic_checkpoint: Whether symbolic checkpointing is turned on.\nnum_skips: `.skip(num_skips)`", "source": "github-repos"}
{"code": "def get_help_datapacks(filepath, prefix=\"!\"):\n    \n\n    help_contents = get_help_data(filepath)\n\n    datapacks = []\n\n    \n    for d in help_contents:\n        heading = d\n        content = \"\"\n\n        if \"commands\" in d.lower():\n            for c in help_contents[d]:\n                if \"name\" not in c:\n                    continue\n\n                content += \"- `\"\n                command = prefix + c[\"name\"]\n                content += \"{}\".format(command)\n                if \"params\" in c:\n                    for param in c[\"params\"]:\n                        content += \" [{}]\".format(param)\n                content += \"`: \"\n                if \"description\" in c:\n                    content += c[\"description\"]\n                content += \"\\n\"\n        else:\n            content += help_contents[d]\n\n        datapacks.append((heading, content, False))\n\n    return datapacks", "docstring": "Load help text from a file and give it as datapacks\n\nArgs:\nfilepath (str): The file to load help text from\nprefix (str): The prefix to use for commands\n\nReturns:\ndatapacks (list): The datapacks from the file", "source": "juraj-google-style"}
{"code": "def filter_bboxes(bboxes, rows, cols, min_area=0.0, min_visibility=0.0):\n    resulting_boxes = []\n    for bbox in bboxes:\n        transformed_box_area = calculate_bbox_area(bbox, rows, cols)\n        bbox[:4] = np.clip(bbox[:4], 0, 1.0)\n        clipped_box_area = calculate_bbox_area(bbox, rows, cols)\n        if ((not transformed_box_area) or ((clipped_box_area / transformed_box_area) <= min_visibility)):\n            continue\n        else:\n            bbox[:4] = np.clip(bbox[:4], 0, 1.0)\n        if (calculate_bbox_area(bbox, rows, cols) <= min_area):\n            continue\n        resulting_boxes.append(bbox)\n    return resulting_boxes", "docstring": "Remove bounding boxes that either lie outside of the visible area by more then min_visibility\nor whose area in pixels is under the threshold set by `min_area`. Also it crops boxes to final image size.\n\nArgs:\nbboxes (list): List of bounding box with coordinates in the format used by albumentations\nrows (int): Image rows.\ncols (int): Image cols.\nmin_area (float): minimum area of a bounding box. All bounding boxes whose visible area in pixels\nis less than this value will be removed. Default: 0.0.\nmin_visibility (float): minimum fraction of area for a bounding box to remain this box in list. Default: 0.0.", "source": "codesearchnet"}
{"code": "def draw_text(img, text, position=(10, 10), font='FreeSans.ttf', font_size=14, color=(0, 0, 0)):\n    \n    _check_pil()\n\n    font_files = _find_font_file(font)\n    if len(font_files) == 0:\n        logger.warn(\"Failed to lookup font '{}', falling back to default\".format(font))\n        font = ImageFont.load_default()\n    else:\n        font = ImageFont.truetype(font_files[0], font_size)\n\n    \n    img = Image.fromarray(img)\n    draw = ImageDraw.Draw(img)\n    draw.text(position, text, fill=color, font=font)\n    return np.asarray(img)", "docstring": "Draws text over the image. Requires PIL.\n\nArgs:\nimg: The image to use.\ntext: The text string to overlay.\nposition: The text (x, y) position. (Default value = (10, 10))\nfont: The ttf or open type font to use. (Default value = 'FreeSans.ttf')\nfont_size: The text font size. (Default value = 12)\ncolor: The (r, g, b) values for text color. (Default value = (0, 0, 0))\n\nReturns: Image overlayed with text.", "source": "juraj-google-style"}
{"code": "def clone_source_dir(source_dir, dest_dir):\n    \n    if os.path.isdir(dest_dir):\n        print('removing', dest_dir)\n        shutil.rmtree(dest_dir)\n    shutil.copytree(source_dir, dest_dir)", "docstring": "Copies the source Protobuf files into a build directory.\n\nArgs:\nsource_dir (str): source directory of the Protobuf files\ndest_dir (str): destination directory of the Protobuf files", "source": "juraj-google-style"}
{"code": "def calculate_mean_and_variance_from_sample_paths(samples, num_samples, dtype):\n    log_s = tf.math.log(samples)\n    mean = tf.reduce_mean(log_s, axis=-3, keepdims=True)\n    var = tf.reduce_mean((log_s - mean) ** 2, axis=-3, keepdims=True)\n    mean = tf.squeeze(mean, axis=[-1, -3])\n    var = tf.squeeze(var, axis=[-1, -3])\n    std_err_mean = tf.math.sqrt(var / num_samples)\n    std_err_var = var * tf.math.sqrt(tf.constant(2.0, dtype=dtype) / (tf.constant(num_samples, dtype=dtype) - tf.constant(1.0, dtype=dtype)))\n    return (mean, var, std_err_mean, std_err_var)", "docstring": "Returns the mean and variance of log(`samples`).\n\nArgs:\nsamples: A real `Tensor` of shape [batch_shape, `num_samples`, num_times, 1]\ncontaining the samples of random paths drawn from an Ito process.\nnum_samples: A scalar integer. The number of sample paths in `samples`.\ndtype: The default dtype to use when converting values to `Tensor`s.\n\nReturns:\nA tuple of (mean, variance, standard_error of the mean,\nstandard_error of the variance) of the log of the samples.  Where the\ncomponents of the tuple have shape [batch_shape, num_times].", "source": "github-repos"}
{"code": "def add_multiple_servers(self, information, timeout=(- 1)):\n    uri = '{}/discovery'.format(self.URI)\n    return self.create(information, uri=uri, timeout=timeout)", "docstring": "Adds multiple rack-mount servers for management by the appliance. This API initiates the asynchronous addition of\nsupported server models.\n\nNote: Servers in an enclosure are added by adding the enclosure resource. This is\nonly supported on appliances that support rack-mounted servers.\n\nThis is only supported for api version 600\n\nArgs:\ninformation (dict): Objects to create\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Created rack-mount servers.", "source": "codesearchnet"}
{"code": "def as_qubit_order(val: 'qubit_order_or_list.QubitOrderOrList') -> 'QubitOrder':\n    if isinstance(val, collections.Iterable):\n        return QubitOrder.explicit(val)\n    if isinstance(val, QubitOrder):\n        return val\n    raise ValueError(\"Don't know how to interpret <{}> as a Basis.\".format(val))", "docstring": "Converts a value into a basis.\n\nArgs:\nval: An iterable or a basis.\n\nReturns:\nThe basis implied by the value.", "source": "codesearchnet"}
{"code": "def map_(input_layer, fn):\n  \n  if not input_layer.is_sequence():\n    raise ValueError('Can only map a sequence.')\n  return [fn(x) for x in input_layer]", "docstring": "Maps the given function across this sequence.\n\nTo map an entire template across the sequence, use the `as_fn` method on the\ntemplate.\n\nArgs:\ninput_layer: The input tensor.\nfn: A function of 1 argument that is applied to each item in the sequence.\nReturns:\nA new sequence Pretty Tensor.\nRaises:\nValueError: If the input_layer does not hold a sequence.", "source": "juraj-google-style"}
{"code": "def is_closed(self):\n        \n        old_training_data = self.training_data\n        self.training_data = {x: [] for x in self.sm_vector}\n        for t in self.smi_vector:\n            src_state = t[:-1]\n            symbol = t[-1:]\n            found = False\n            for dst_state in self.sm_vector:\n                if self.observation_table[dst_state] == self.observation_table[t]:\n                    self._add_training_data(src_state, dst_state, symbol)\n                    found = True\n                    break\n            if not found:\n                return False, t\n\n        assert self.training_data != old_training_data, \\\n            \"No update happened from previous round. The algo will loop infinetely\"\n        return True, None", "docstring": "_check if the observation table is closed.\nArgs:\nNone\nReturns:\ntuple (bool, str): True if the observation table is closed and false otherwise.\nIf the table is not closed the escaping string is returned.", "source": "juraj-google-style"}
{"code": "def show(self, frame):\n    if (len(frame.shape) != 3):\n        raise ValueError('frame should have shape with only 3 dimensions')\n    if (not self.is_open):\n        self.open()\n    self._window.clear()\n    self._window.switch_to()\n    self._window.dispatch_events()\n    image = ImageData(frame.shape[1], frame.shape[0], 'RGB', frame.tobytes(), pitch=(frame.shape[1] * (- 3)))\n    image.blit(0, 0, width=self._window.width, height=self._window.height)\n    self._window.flip()", "docstring": "Show an array of pixels on the window.\n\nArgs:\nframe (numpy.ndarray): the frame to show on the window\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def refresh(self, only_closed=False):\n        \n        if only_closed:\n            opened = filter(self.__check_port, self.__closed)\n            self.__closed = self.__closed.difference(opened)\n            self.__ports = self.__ports.union(opened)\n        else:\n            ports = self.__closed.union(self.__ports)\n            self.__ports = set(filter(self.__check_port, ports))\n            self.__closed = ports.difference(self.__ports)", "docstring": "refresh ports status\nArgs:\nonly_closed - check status only for closed ports", "source": "juraj-google-style"}
{"code": "def reset_index(self, **kwargs):\n    drop = kwargs.get('drop', False)\n    new_index = pandas.RangeIndex(len(self.index))\n    if (not drop):\n        if isinstance(self.index, pandas.MultiIndex):\n            new_column_names = pandas.Index(self.index.names)\n            new_columns = new_column_names.append(self.columns)\n            index_data = pandas.DataFrame(list(zip(*self.index))).T\n            result = self.data.from_pandas(index_data).concat(1, self.data)\n            return self.__constructor__(result, new_index, new_columns)\n        else:\n            new_column_name = (self.index.name if (self.index.name is not None) else ('index' if ('index' not in self.columns) else 'level_0'))\n            new_columns = self.columns.insert(0, new_column_name)\n            result = self.insert(0, new_column_name, self.index)\n            return self.__constructor__(result.data, new_index, new_columns)\n    else:\n        return self.__constructor__(self.data.copy(), new_index, self.columns.copy(), self._dtype_cache)", "docstring": "Removes all levels from index and sets a default level_0 index.\n\nReturns:\nA new QueryCompiler with updated data and reset index.", "source": "codesearchnet"}
{"code": "def calculate_parity(n):\n    \n    if not is_natural(n):\n        raise ValueError('Expected n to be a positive integer.')\n\n    y = 0\n    n = abs(n)\n    while n:\n        y += n & 1\n        n = n >> 1\n    return y & 1", "docstring": "Calculates and returns the parity of a number.\n\nThe parity of a number is ``1`` if the number has an odd number of ones\nin its binary representation, otherwise ``0``.\n\nArgs:\nn (int): the number whose parity to calculate\n\nReturns:\n``1`` if the number has an odd number of ones, otherwise ``0``.\n\nRaises:\nValueError: if ``n`` is less than ``0``.", "source": "juraj-google-style"}
{"code": "def add_tile(self, tile_source, **kw):\n        \n        tile_renderer = TileRenderer(tile_source=tile_source, **kw)\n        self.renderers.append(tile_renderer)\n        return tile_renderer", "docstring": "Adds new ``TileRenderer`` into ``Plot.renderers``\n\nArgs:\ntile_source (TileSource) : a tile source instance which contain tileset configuration\n\nKeyword Arguments:\nAdditional keyword arguments are passed on as-is to the tile renderer\n\nReturns:\nTileRenderer : TileRenderer", "source": "juraj-google-style"}
{"code": "def _dataset_merge_hdx_update(self, update_resources, update_resources_by_name, remove_additional_resources, create_default_views, hxl_update):\n    merge_two_dictionaries(self.data, self.old_data)\n    if ('resources' in self.data):\n        del self.data['resources']\n    updated_resources = self.old_data.get('resources', None)\n    filestore_resources = list()\n    if (update_resources and updated_resources):\n        ignore_fields = ['package_id']\n        if update_resources_by_name:\n            resource_names = set()\n            for resource in self.resources:\n                resource_name = resource['name']\n                resource_names.add(resource_name)\n                for updated_resource in updated_resources:\n                    if (resource_name == updated_resource['name']):\n                        logger.warning(('Resource exists. Updating %s' % resource_name))\n                        self._dataset_merge_filestore_resource(resource, updated_resource, filestore_resources, ignore_fields)\n                        break\n            updated_resource_names = set()\n            for updated_resource in updated_resources:\n                updated_resource_name = updated_resource['name']\n                updated_resource_names.add(updated_resource_name)\n                if (not (updated_resource_name in resource_names)):\n                    self._dataset_merge_filestore_newresource(updated_resource, ignore_fields, filestore_resources)\n            if remove_additional_resources:\n                resources_to_delete = list()\n                for (i, resource) in enumerate(self.resources):\n                    resource_name = resource['name']\n                    if (resource_name not in updated_resource_names):\n                        logger.warning(('Removing additional resource %s!' % resource_name))\n                        resources_to_delete.append(i)\n                for i in sorted(resources_to_delete, reverse=True):\n                    del self.resources[i]\n        else:\n            for (i, updated_resource) in enumerate(updated_resources):\n                if (len(self.resources) > i):\n                    updated_resource_name = updated_resource['name']\n                    resource = self.resources[i]\n                    resource_name = resource['name']\n                    logger.warning(('Resource exists. Updating %s' % resource_name))\n                    if (resource_name != updated_resource_name):\n                        logger.warning(('Changing resource name to: %s' % updated_resource_name))\n                    self._dataset_merge_filestore_resource(resource, updated_resource, filestore_resources, ignore_fields)\n                else:\n                    self._dataset_merge_filestore_newresource(updated_resource, ignore_fields, filestore_resources)\n            if remove_additional_resources:\n                resources_to_delete = list()\n                for (i, resource) in enumerate(self.resources):\n                    if (len(updated_resources) <= i):\n                        logger.warning(('Removing additional resource %s!' % resource['name']))\n                        resources_to_delete.append(i)\n                for i in sorted(resources_to_delete, reverse=True):\n                    del self.resources[i]\n    if self.resources:\n        self.data['resources'] = self._convert_hdxobjects(self.resources)\n    ignore_field = self.configuration['dataset'].get('ignore_on_update')\n    self.check_required_fields(ignore_fields=[ignore_field])\n    self._save_to_hdx('update', 'id')\n    self._add_filestore_resources(filestore_resources, create_default_views, hxl_update)", "docstring": "Helper method to check if dataset or its resources exist and update them\n\nArgs:\nupdate_resources (bool): Whether to update resources\nupdate_resources_by_name (bool): Compare resource names rather than position in list\nremove_additional_resources (bool): Remove additional resources found in dataset (if updating)\ncreate_default_views (bool): Whether to call package_create_default_resource_views.\nhxl_update (bool): Whether to call package_hxl_update.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _add_query_parameter(url, name, value):\n    if (value is None):\n        return url\n    else:\n        return update_query_params(url, {name: value})", "docstring": "Adds a query parameter to a url.\n\nReplaces the current value if it already exists in the URL.\n\nArgs:\nurl: string, url to add the query parameter to.\nname: string, query parameter name.\nvalue: string, query parameter value.\n\nReturns:\nUpdated query parameter. Does not update the url if value is None.", "source": "codesearchnet"}
{"code": "def index_resources(self):\n    if (not self.__index_resources):\n        self.__index_resources = IndexResources(self.__connection)\n    return self.__index_resources", "docstring": "Gets the Index Resources API client.\n\nReturns:\nIndexResources:", "source": "codesearchnet"}
{"code": "def FetchURNsForAllSignedBinaries(token\n                                 ):\n  \n  if _ShouldUseLegacyDatastore():\n    urns = []\n    aff4_roots = [GetAFF4PythonHackRoot(), GetAFF4ExecutablesRoot()]\n    for _, descendant_urns in aff4.FACTORY.RecursiveMultiListChildren(\n        aff4_roots):\n      urns.extend(descendant_urns)\n    aff4_streams = aff4.FACTORY.MultiOpen(\n        urns, aff4_type=collects.GRRSignedBlob, mode=\"r\", token=token)\n    return [stream.urn for stream in aff4_streams]\n  else:\n    return [\n        _SignedBinaryURNFromID(i)\n        for i in data_store.REL_DB.ReadIDsForAllSignedBinaries()\n    ]", "docstring": "Returns URNs for all signed binaries in the datastore.\n\nArgs:\ntoken: ACL token to use with the legacy (non-relational) datastore.", "source": "juraj-google-style"}
{"code": "def Upgrade(self, aff4_class):\n    _ValidateAFF4Type(aff4_class)\n    if (self.__class__ == aff4_class):\n        return self\n    if (not isinstance(aff4_class, type)):\n        raise InstantiationError(('aff4_class=%s must be a type' % aff4_class))\n    if (not issubclass(aff4_class, AFF4Object)):\n        raise InstantiationError(('aff4_class=%s must be a subclass of AFF4Object.' % aff4_class))\n    if isinstance(self, aff4_class):\n        return self\n    result = aff4_class(self.urn, mode=self.mode, clone=self, parent=self.parent, token=self.token, age=self.age_policy, object_exists=self.object_exists, follow_symlinks=self.follow_symlinks, aff4_type=self.aff4_type, mutation_pool=self.mutation_pool, transaction=self.transaction)\n    result.symlink_urn = self.urn\n    result.Initialize()\n    return result", "docstring": "Upgrades this object to the type specified.\n\nAFF4 Objects can be upgraded on the fly to other type - As long as the new\ntype is derived from the current type. This feature allows creation of\nplaceholder objects which can later be upgraded to the fully featured\nobject.\n\nNote: It is not allowed to downgrade an object if that would result in a\nloss of information (since the new object has a smaller schema). This method\ntries to store the new object with its new attributes and will fail if any\nattributes can not be mapped.\n\nArgs:\naff4_class: A string representing the new class.\n\nReturns:\nan instance of the new class with all the same attributes as this current\nobject.\n\nRaises:\nValueError: When the object to upgrade is locked.\nAttributeError: When the new object can not accept some of the old\nattributes.\nInstantiationError: When we cannot instantiate the object type class.", "source": "codesearchnet"}
{"code": "def get_event(self, event_key):\n    event = self.event_key_map.get(event_key)\n    if event:\n        return event\n    self.logger.error(('Event \"%s\" is not in datafile.' % event_key))\n    self.error_handler.handle_error(exceptions.InvalidEventException(enums.Errors.INVALID_EVENT_KEY_ERROR))\n    return None", "docstring": "Get event for the provided event key.\n\nArgs:\nevent_key: Event key for which event is to be determined.\n\nReturns:\nEvent corresponding to the provided event key.", "source": "codesearchnet"}
{"code": "def _events(self, using_url, filters=None, limit=None):\n        \n\n        \n        if not isinstance(limit, (int, NoneType)):\n            limit = None\n\n        \n        if filters is None:\n            filters = []\n\n        \n        if isinstance(filters, string_types):\n            filters = filters.split(',')\n\n        \n        if not self.blocking:\n            self.blocking = True\n\n        \n        while self.blocking:\n            params = {\n                'since': self._last_seen_id,\n                'limit': limit,\n            }\n\n            if filters:\n                params['events'] = ','.join(map(str, filters))\n\n            try:\n                data = self.get(using_url, params=params, raw_exceptions=True)\n            except (ConnectTimeout, ConnectionError) as e:\n                \n                data = None\n            except Exception as e:\n                reraise('', e)\n\n            if data:\n                \n                self._last_seen_id = data[-1]['id']\n                for event in data:\n                    \n                    self._count += 1\n                    yield event", "docstring": "A long-polling method that queries Syncthing for events..\n\nArgs:\nusing_url (str): REST HTTP endpoint\nfilters (List[str]): Creates an \"event group\" in Syncthing to\nonly receive events that have been subscribed to.\nlimit (int): The number of events to query in the history\nto catch up to the current state.\n\nReturns:\ngenerator[dict]", "source": "juraj-google-style"}
{"code": "def time_pad(x, filter_size, dilations):\n  \n  x_shape = common_layers.shape_list(x)\n  if filter_size == [1, 1, 1]:\n    return x\n  _, h, w = filter_size\n  eff_h = h + (h - 1)*(dilations[2] - 1)\n  eff_w = w + (w - 1)*(dilations[3] - 1)\n  a = (eff_h - 1) \n  b = (eff_w - 1) \n  c = filter_size[0] - 1\n\n  \n  padding = [[0, 0], [c, 0], [a, a], [b, b], [0, 0]]\n\n  \n  \n  x_bias = tf.zeros(x_shape[:-1] + [1])\n  x_bias = tf.pad(x_bias, padding, constant_values=1)\n  x_pad = tf.pad(x, padding)\n  x_pad = tf.concat((x_bias, x_pad), axis=-1)\n  return x_pad", "docstring": "Pad left across time and pad valid across the spatial components.\n\nAlso concats a binary feature that indicates if a feature is padded or not.\n\nArgs:\nx: 5-D Tensor, (NTHWC)\nfilter_size: list of ints\ndilations: list of ints, dilations - 1 specifies the number of holes\nbetween two filter elements.\nReturns:\nx_pad: 5-D Tensor.", "source": "juraj-google-style"}
{"code": "def client(self, service_name, version, component, **kw):\n        \n        service = _create_service_api(\n            self._credentials,\n            service_name,\n            version,\n            kw.get('developer_key'),\n            kw.get('cache_discovery', False),\n            self._http or _build_http())\n\n        return ServiceClient(\n            gcp_service=service,\n            component=component,\n            credentials=self._credentials,\n            rate_limiter=self._rate_limiter,\n            use_cached_http=self._use_cached_http,\n            http=self._http)", "docstring": "Safely initialize a repository class to a property.\n\nArgs:\nrepository_class (class): The class to initialize.\nversion (str): The gcp service version for the repository.\n\nReturns:\nobject: An instance of repository_class.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.TranslateText = channel.unary_unary(\n            \"/google.cloud.translation.v3beta1.TranslationService/TranslateText\",\n            request_serializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.TranslateTextRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.TranslateTextResponse.FromString,\n        )\n        self.DetectLanguage = channel.unary_unary(\n            \"/google.cloud.translation.v3beta1.TranslationService/DetectLanguage\",\n            request_serializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.DetectLanguageRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.DetectLanguageResponse.FromString,\n        )\n        self.GetSupportedLanguages = channel.unary_unary(\n            \"/google.cloud.translation.v3beta1.TranslationService/GetSupportedLanguages\",\n            request_serializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.GetSupportedLanguagesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.SupportedLanguages.FromString,\n        )\n        self.BatchTranslateText = channel.unary_unary(\n            \"/google.cloud.translation.v3beta1.TranslationService/BatchTranslateText\",\n            request_serializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.BatchTranslateTextRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n        self.CreateGlossary = channel.unary_unary(\n            \"/google.cloud.translation.v3beta1.TranslationService/CreateGlossary\",\n            request_serializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.CreateGlossaryRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n        self.ListGlossaries = channel.unary_unary(\n            \"/google.cloud.translation.v3beta1.TranslationService/ListGlossaries\",\n            request_serializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.ListGlossariesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.ListGlossariesResponse.FromString,\n        )\n        self.GetGlossary = channel.unary_unary(\n            \"/google.cloud.translation.v3beta1.TranslationService/GetGlossary\",\n            request_serializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.GetGlossaryRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.Glossary.FromString,\n        )\n        self.DeleteGlossary = channel.unary_unary(\n            \"/google.cloud.translation.v3beta1.TranslationService/DeleteGlossary\",\n            request_serializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.DeleteGlossaryRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def sendmail(subject, text, mailto, sender=None):\n    \n    def user_at_host():\n        from socket import gethostname\n        return os.getlogin() + \"@\" + gethostname()\n\n    \n    try:\n        sender = user_at_host() if sender is None else sender\n    except OSError:\n        sender = 'abipyscheduler@youknowwhere'\n\n    if is_string(mailto): mailto = [mailto]\n\n    from email.mime.text import MIMEText\n    mail = MIMEText(text)\n    mail[\"Subject\"] = subject\n    mail[\"From\"] = sender\n    mail[\"To\"] = \", \".join(mailto)\n\n    msg = mail.as_string()\n\n    \n    \n    from subprocess import Popen, PIPE\n    import sys\n\n    sendmail = which(\"sendmail\")\n    if sendmail is None: return -1\n    if sys.version_info[0] < 3:\n        p = Popen([sendmail, \"-t\"], stdin=PIPE, stderr=PIPE)\n    else:\n        \n        p = Popen([sendmail, \"-t\"], stdin=PIPE, stderr=PIPE, universal_newlines=True)\n\n    outdata, errdata = p.communicate(msg)\n    return len(errdata)", "docstring": "Sends an e-mail with unix sendmail.\n\nArgs:\nsubject: String with the subject of the mail.\ntext: String with the body of the mail.\nmailto: String or list of string with the recipients.\nsender: string with the sender address.\nIf sender is None, username@hostname is used.\n\nReturns:\nExit status", "source": "juraj-google-style"}
{"code": "def match_alphabet(self, pattern):\n    \n    s = {}\n    for char in pattern:\n      s[char] = 0\n    for i in range(len(pattern)):\n      s[pattern[i]] |= 1 << (len(pattern) - i - 1)\n    return s", "docstring": "Initialise the alphabet for the Bitap algorithm.\n\nArgs:\npattern: The text to encode.\n\nReturns:\nHash of character locations.", "source": "juraj-google-style"}
{"code": "def GetHashersInformation(cls):\n    hashers_information = []\n    for (_, hasher_class) in cls.GetHasherClasses():\n        description = getattr(hasher_class, 'DESCRIPTION', '')\n        hashers_information.append((hasher_class.NAME, description))\n    return hashers_information", "docstring": "Retrieves the hashers information.\n\nReturns:\nlist[tuple]: containing:\n\nstr: hasher name.\nstr: hasher description.", "source": "codesearchnet"}
{"code": "def fft(x, axis=(- 1), padding_samples=0):\n    if (padding_samples > 0):\n        padded = np.concatenate([x, np.zeros((len(x), padding_samples), dtype=x.dtype)], axis=axis)\n    else:\n        padded = x\n    transformed = np.fft.rfft(padded, axis=axis, norm='ortho')\n    sr = audio_sample_rate(int((Seconds(1) / x.dimensions[axis].frequency)))\n    scale = LinearScale.from_sample_rate(sr, transformed.shape[(- 1)])\n    new_dimensions = list(x.dimensions)\n    new_dimensions[axis] = FrequencyDimension(scale)\n    return ArrayWithUnits(transformed, new_dimensions)", "docstring": "Apply an FFT along the given dimension, and with the specified amount of\nzero-padding\n\nArgs:\nx (ArrayWithUnits): an :class:`~zounds.core.ArrayWithUnits` instance\nwhich has one or more :class:`~zounds.timeseries.TimeDimension`\naxes\naxis (int): The axis along which the fft should be applied\npadding_samples (int): The number of padding zeros to apply along\naxis before performing the FFT", "source": "codesearchnet"}
{"code": "def FindMessageTypeByName(self, full_name):\n    \n\n    full_name = _NormalizeFullyQualifiedName(full_name)\n    if full_name not in self._descriptors:\n      self._FindFileContainingSymbolInDb(full_name)\n    return self._descriptors[full_name]", "docstring": "Loads the named descriptor from the pool.\n\nArgs:\nfull_name: The full name of the descriptor to load.\n\nReturns:\nThe descriptor for the named type.\n\nRaises:\nKeyError: if the message cannot be found in the pool.", "source": "juraj-google-style"}
{"code": "def list_vmss_vm_instance_view_pg(access_token, subscription_id, resource_group, vmss_name,\n                                  link=None):\n    \n    if link is None:\n        endpoint = ''.join([get_rm_endpoint(),\n                            '/subscriptions/', subscription_id,\n                            '/resourceGroups/', resource_group,\n                            '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,\n                            '/virtualMachines?$expand=instanceView&$select=instanceView',\n                            '&api-version=', COMP_API])\n    else:\n        endpoint = link\n    return do_get(endpoint, access_token)", "docstring": "Gets one page of a paginated list of scale set VM instance views.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvmss_name (str): Name of the virtual machine scale set.\nlink (str): Optional link to URI to get list (as part of a paginated API query).\n\nReturns:\nHTTP response. JSON body of list of VM instance views.", "source": "juraj-google-style"}
{"code": "def archs(self, as_list=False):\n        \n\n        archs = self.arch_list().split('/')\n\n        if as_list:\n            return archs\n\n        return set(archs)", "docstring": "Return all of the architectures for this target.\n\nArgs:\nas_list (bool): Return a list instead of the default set object.\n\nReturns:\nset or list: All of the architectures used in this TargetSettings object.", "source": "juraj-google-style"}
{"code": "def get(self, volume_id):\n    return self.prepare_model(self.client.api.inspect_volume(volume_id))", "docstring": "Get a volume.\n\nArgs:\nvolume_id (str): Volume name.\n\nReturns:\n(:py:class:`Volume`): The volume.\n\nRaises:\n:py:class:`docker.errors.NotFound`\nIf the volume does not exist.\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def get_label(self, label, params=None):\n        \n        return self.label(label, action='GET', params=params)", "docstring": "Gets a security label from a Indicator/Group/Victim\nArgs:\nlabel: The name of the Security Label\nparams:", "source": "juraj-google-style"}
{"code": "def get_course_and_course_run(self, course_run_id):\n    course_id = parse_course_key(course_run_id)\n    course = self.get_course_details(course_id)\n    course_run = None\n    if course:\n        course_run = None\n        course_runs = [course_run for course_run in course['course_runs'] if (course_run['key'] == course_run_id)]\n        if course_runs:\n            course_run = course_runs[0]\n    return (course, course_run)", "docstring": "Return the course and course run metadata for the given course run ID.\n\nArguments:\ncourse_run_id (str): The course run ID.\n\nReturns:\ntuple: The course metadata and the course run metadata.", "source": "codesearchnet"}
{"code": "async def send_message(  \n        self, request: str, response_expected: bool, **kwargs: Any\n    ) -> Response:\n        \n        headers = dict(self.DEFAULT_HEADERS)\n        headers.update(kwargs.pop(\"headers\", {}))\n\n        response = await self.client.fetch(\n            self.endpoint, method=\"POST\", body=request, headers=headers, **kwargs\n        )\n\n        return Response(response.body.decode(), raw=response)", "docstring": "Transport the message to the server and return the response.\n\nArgs:\nrequest: The JSON-RPC request string.\nresponse_expected: Whether the request expects a response.\n\nReturns:\nA Response object.", "source": "juraj-google-style"}
{"code": "def ADOPT_module_key_flags(  \n    module, flag_values=FLAGS):\n  \n  if not isinstance(module, types.ModuleType):\n    raise Error('Expected a module object, not %r.' % (module,))\n  \n  _internal_declare_key_flags(\n      [f.name for f in flag_values._GetKeyFlagsForModule(module.__name__)],  \n      flag_values=flag_values)\n  \n  if module == _helpers.GetModuleObjectAndName(globals())[0]:\n    _internal_declare_key_flags(\n        \n        \n        \n        \n        \n        [f.name for f in six.itervalues(_helpers.SPECIAL_FLAGS.FlagDict())],\n        flag_values=_helpers.SPECIAL_FLAGS,\n        key_flag_values=flag_values)", "docstring": "Declares that all flags key to a module are key to the current module.\n\nArgs:\nmodule: A module object.\nflag_values: A FlagValues object.  This should almost never need\nto be overridden.\n\nRaises:\nError: When given an argument that is a module name (a\nstring), instead of a module object.", "source": "juraj-google-style"}
{"code": "def calculate_dill_dG(seq_len, temp):\n    \n    Th = 373.5  \n    Ts = 385  \n    temp += 273.15\n\n    dH = (4.0 * seq_len + 143) * 1000\n    dS = 13.27 * seq_len + 448\n    dCp = (0.049 * seq_len + 0.85) * 1000\n    dG = dH + dCp * (temp - Th) - temp * dS - temp * dCp * math.log(float(temp) / Ts)\n\n    return dG", "docstring": "Get free energy of unfolding (dG) using Dill method in units J/mol.\n\nArgs:\nseq_len (int): Length of amino acid sequence\ntemp (float): Temperature in degrees C\n\nReturns:\nfloat: Free energy of unfolding dG (J/mol)", "source": "juraj-google-style"}
{"code": "def price(self, instrument, **kwargs):\n    request = Request('GET', '/v3/instruments/{instrument}/price')\n    request.set_path_param('instrument', instrument)\n    request.set_param('time', kwargs.get('time'))\n    response = self.ctx.request(request)\n    if (response.content_type is None):\n        return response\n    if (not response.content_type.startswith('application/json')):\n        return response\n    jbody = json.loads(response.raw_body)\n    parsed_body = {}\n    if (str(response.status) == '200'):\n        if (jbody.get('price') is not None):\n            parsed_body['price'] = self.ctx.pricing_common.Price.from_dict(jbody['price'], self.ctx)\n    elif (str(response.status) == '400'):\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    elif (str(response.status) == '401'):\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    elif (str(response.status) == '404'):\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    elif (str(response.status) == '405'):\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    else:\n        parsed_body = jbody\n    response.body = parsed_body\n    return response", "docstring": "Fetch a price for an instrument. Accounts are not associated in any way\nwith this endpoint.\n\nArgs:\ninstrument:\nName of the Instrument\ntime:\nThe time at which the desired price is in effect. The current\nprice is returned if no time is provided.\n\nReturns:\nv20.response.Response containing the results from submitting the\nrequest", "source": "codesearchnet"}
{"code": "def to_affine(self):\n    (X, Y, Z) = (self.x, self.y, self.inverse(self.z))\n    return (((X * (Z ** 2)) % P), ((Y * (Z ** 3)) % P))", "docstring": "Converts this point to an affine representation.\n\nReturns:\nAffinePoint: The affine reprsentation.", "source": "codesearchnet"}
{"code": "def interpolate_radius(r1, r2, fraction):\n\n    def f(a, b, c):\n        ' Returns the length of the interpolated radius calculated\\n        using similar triangles.\\n        '\n        return (a + (c * (b - a)))\n    return (f(r2, r1, (1.0 - fraction)) if (r1 > r2) else f(r1, r2, fraction))", "docstring": "Calculate the radius that corresponds to a point P that lies at a fraction of the length\nof a cut cone P1P2 where P1, P2 are the centers of the circles that bound the shape with radii\nr1 and r2 respectively.\n\nArgs:\nr1: float\nRadius of the first node of the segment.\nr2: float\nRadius of the second node of the segment\nfraction: float\nThe fraction at which the interpolated radius is calculated.\n\nReturns: float\nThe interpolated radius.\n\nNote: The fraction is assumed from point P1, not from point P2.", "source": "codesearchnet"}
{"code": "def jaccard(self, other):\n        \n        if other.seed != self.seed:\n            raise ValueError(\"Cannot compute Jaccard given MinHash with\\\n                    different seeds\")\n        if len(self) != len(other):\n            raise ValueError(\"Cannot compute Jaccard given MinHash with\\\n                    different numbers of permutation functions\")\n        return np.float(np.count_nonzero(self.hashvalues==other.hashvalues)) /\\\n                np.float(len(self))", "docstring": "Estimate the `Jaccard similarity`_ (resemblance) between the sets\nrepresented by this MinHash and the other.\n\nArgs:\nother (datasketch.MinHash): The other MinHash.\n\nReturns:\nfloat: The Jaccard similarity, which is between 0.0 and 1.0.", "source": "juraj-google-style"}
{"code": "def load_map_coordinates(map_file):\n    \n    if map_file[-4:] == \".pkl\":\n        map_data = pickle.load(open(map_file))\n        lon = map_data['lon']\n        lat = map_data['lat']\n    else:\n        map_data = Dataset(map_file)\n        if \"lon\" in map_data.variables.keys():\n            lon = map_data.variables['lon'][:]\n            lat = map_data.variables['lat'][:]\n        else:\n            lon = map_data.variables[\"XLONG\"][0]\n            lat = map_data.variables[\"XLAT\"][0]\n    return lon, lat", "docstring": "Loads map coordinates from netCDF or pickle file created by util.makeMapGrids.\n\nArgs:\nmap_file: Filename for the file containing coordinate information.\n\nReturns:\nLatitude and longitude grids as numpy arrays.", "source": "juraj-google-style"}
{"code": "def create_worker(self, func, interval, *args, **kwargs):\n    thread = StoppableWorkerThread(func, interval, args, kwargs)\n    self._workers.append(thread)\n    if self._started:\n        thread.start()", "docstring": "Spawn a worker thread running func.\n\nThe worker will be automatically be started when start() is called\nand terminated when stop() is called on this object.\nThis must be called only from the main thread, not from a worker thread.\n\ncreate_worker must not be called after stop() has been called.  If it\nis called before start() is called, the thread is started when start()\nis called, otherwise it is started immediately.\n\nArgs:\nfunc (callable): Either a function that will be called in a loop\nwith a sleep of interval seconds with *args and **kwargs or\na generator function that will be called once and expected to\nyield periodically so that the worker can check if it should\nbe killed.\ninterval (float): The time interval between invocations of func.\nThis should not be 0 so that the thread doesn't peg the CPU\nand should be short enough so that the worker checks if it\nshould be killed in a timely fashion.\n*args: Arguments that are passed to func as positional args\n**kwargs: Arguments that are passed to func as keyword args", "source": "codesearchnet"}
{"code": "def _all_correct_list(array):\n    if (type(array) not in _ITERABLE_TYPES):\n        return False\n    for item in array:\n        if (not (type(item) in _ITERABLE_TYPES)):\n            return False\n        if (len(item) != 2):\n            return False\n    return True", "docstring": "Make sure, that all items in `array` has good type and size.\n\nArgs:\narray (list): Array of python types.\n\nReturns:\nTrue/False", "source": "codesearchnet"}
{"code": "def write_file(self, filename, distance=6, velocity=8, charge=3):\n    with open(filename, 'w') as f:\n        f.write(self.get_string(distance=distance, velocity=velocity, charge=charge))", "docstring": "Writes LammpsData to file.\n\nArgs:\nfilename (str): Filename.\ndistance (int): No. of significant figures to output for\nbox settings (bounds and tilt) and atomic coordinates.\nDefault to 6.\nvelocity (int): No. of significant figures to output for\nvelocities. Default to 8.\ncharge (int): No. of significant figures to output for\ncharges. Default to 3.", "source": "codesearchnet"}
{"code": "def _gather_field_values(item, *, fields=None, field_map=FIELD_MAP, normalize_values=False, normalize_func=normalize_value):\n    it = get_item_tags(item)\n    if (fields is None):\n        fields = list(it.keys())\n    normalize = (normalize_func if normalize_values else (lambda x: str(x)))\n    field_values = []\n    for field in fields:\n        field_values.append(normalize(list_to_single_value(get_field(it, field, field_map=field_map))))\n    return tuple(field_values)", "docstring": "Create a tuple of normalized metadata field values.\n\nParameter:\nitem (~collections.abc.Mapping, str, os.PathLike): Item dict or filepath.\nfields (list): A list of fields used to compare item dicts.\nfield_map (~collections.abc.Mapping): A mapping field name aliases.\nDefault: :data:`~google_music_utils.constants.FIELD_MAP`\nnormalize_values (bool): Normalize metadata values to remove common differences between sources.\nDefault: ``False``\nnormalize_func (function): Function to apply to metadata values if\n``normalize_values`` is ``True``.\nDefault: :func:`~google_music_utils.utils.normalize_value`\n\nReturns:\ntuple: Values from the given metadata fields.", "source": "codesearchnet"}
{"code": "def extract_variable_info(kwargs: Any) -> Tuple[str, Tuple[int, ...], dtypes.DType, Callable[[], Any], Optional[int]]:\n\n    def get_restore_uid(initial_value: Callable[..., Any]) -> int | None:\n        return getattr(initial_value, 'restore_uid', None)\n    if isinstance(kwargs['initial_value'], functools.partial) and ('shape' in kwargs['initial_value'].keywords or kwargs['initial_value'].args):\n        if 'shape' in kwargs['initial_value'].keywords:\n            shape = kwargs['initial_value'].keywords['shape']\n        else:\n            shape = kwargs['initial_value'].args[0]\n        return (kwargs['name'], shape, kwargs['initial_value'].keywords.get('dtype', kwargs['dtype']), kwargs['initial_value'].func, get_restore_uid(kwargs['initial_value'].func))\n    elif 'shape' not in kwargs or kwargs['shape'] is None or (not callable(kwargs['initial_value'])):\n        raise ValueError('Unable to extract initializer function and shape from {}. Please either pass a function that expects a shape and dtype as the initial value for your variable or functools.partial object with the shape and dtype kwargs set. This is needed so that we can initialize the shards of the ShardedVariable locally.'.format(kwargs['initial_value']))\n    else:\n        return (kwargs['name'], kwargs['shape'], kwargs['dtype'], kwargs['initial_value'], get_restore_uid(kwargs['initial_value']))", "docstring": "Extracts the variable creation attributes from the kwargs.\n\nArgs:\nkwargs: a dict of keyword arguments that were passed to a variable creator\nscope.\n\nReturns:\nA tuple of variable name, shape, dtype, initialization function,\nrestore_uid.", "source": "github-repos"}
{"code": "def pan_and_scan_batched(self, images: 'torch.Tensor', pan_and_scan_min_crop_size: int, pan_and_scan_max_num_crops: int, pan_and_scan_min_ratio_to_activate: float):\n    height, width = images.shape[-2:]\n    if width >= height:\n        if width / height < pan_and_scan_min_ratio_to_activate:\n            return []\n        num_crops_w = int(math.floor(width / height + 0.5))\n        num_crops_w = min(int(math.floor(width / pan_and_scan_min_crop_size)), num_crops_w)\n        num_crops_w = max(2, num_crops_w)\n        num_crops_w = min(pan_and_scan_max_num_crops, num_crops_w)\n        num_crops_h = 1\n    else:\n        if height / width < pan_and_scan_min_ratio_to_activate:\n            return []\n        num_crops_h = int(math.floor(height / width + 0.5))\n        num_crops_h = min(int(math.floor(height / pan_and_scan_min_crop_size)), num_crops_h)\n        num_crops_h = max(2, num_crops_h)\n        num_crops_h = min(pan_and_scan_max_num_crops, num_crops_h)\n        num_crops_w = 1\n    crop_size_w = int(math.ceil(width / num_crops_w))\n    crop_size_h = int(math.ceil(height / num_crops_h))\n    if min(crop_size_w, crop_size_h) < pan_and_scan_min_crop_size:\n        return []\n    crop_positions_w = [crop_size_w * i for i in range(num_crops_w)]\n    crop_positions_h = [crop_size_h * i for i in range(num_crops_h)]\n    return [images[..., pos_h:pos_h + crop_size_h, pos_w:pos_w + crop_size_w] for pos_h, pos_w in itertools.product(crop_positions_h, crop_positions_w)]", "docstring": "Pan and Scan an image, by cropping into smaller images when the aspect ratio exceeds\nminimum allowed ratio.\n\nArgs:\nimage (`torch.Tensor`):\nImage to resize.\npan_and_scan_min_crop_size (`int`, *optional*):\nMinimum size of each crop in pan and scan.\npan_and_scan_max_num_crops (`int`, *optional*):\nMaximum number of crops per image in pan and scan.\npan_and_scan_min_ratio_to_activate (`float`, *optional*):\nMinimum aspect ratio to activate pan and scan.", "source": "github-repos"}
{"code": "def encode_tf(self, s):\n    \n    ids = subword_text_encoder_ops.subword_text_encoder_encode(\n        s, self._filepath)\n    \n    return ids[:-1]", "docstring": "Encode a tf.Scalar string to a tf.Tensor.\n\nThis will be necessary for on-the-fly tokenization.\n\nArgs:\ns: a tf.Scalar with dtype tf.string\nReturns:\na 1d tf.Tensor with dtype tf.int32", "source": "juraj-google-style"}
{"code": "def GetUnscannedSubNode(self):\n    if ((not self.sub_nodes) and (not self.scanned)):\n        return self\n    for sub_node in self.sub_nodes:\n        result = sub_node.GetUnscannedSubNode()\n        if result:\n            return result\n    return None", "docstring": "Retrieves the first unscanned sub node.\n\nReturns:\nSourceScanNode: sub scan node or None if not available.", "source": "codesearchnet"}
{"code": "def _read_at(self, d,\n                 interpolation='linear',\n                 index=False,\n                 return_basis=False):\n        \n        method = {'linear': utils.linear,\n                  'none': None}\n\n        i, d = utils.find_previous(self.basis,\n                                   d,\n                                   index=True,\n                                   return_distance=True)\n\n        if index:\n            return i\n        else:\n            return method[interpolation](self[i], self[i+1], d)", "docstring": "Private function. Implements read_at() for a single depth.\n\nArgs:\nd (float)\ninterpolation (str)\nindex(bool)\nreturn_basis (bool)\n\nReturns:\nfloat", "source": "juraj-google-style"}
{"code": "def __copy_extracted(self, path, destination):\n    unpacked_dir = (self.filename + '.unpacked')\n    if (not os.path.isdir(unpacked_dir)):\n        LOGGER.warn('Failed to copy extracted file %s, no extracted dir', path)\n        return\n    source_path = os.path.join(unpacked_dir, path)\n    if (not os.path.exists(source_path)):\n        LOGGER.warn('Failed to copy extracted file %s, does not exist', path)\n        return\n    destination_path = os.path.join(destination, path)\n    shutil.copyfile(source_path, destination_path)", "docstring": "Copies a file that was already extracted to the destination directory.\n\nArgs:\npath (str):\nRelative (to the root of the archive) of the file to copy.\n\ndestination (str):\nDirectory to extract the archive to.", "source": "codesearchnet"}
{"code": "def not_function(function: _evaluation.NotFunction, operand_result: Optional[_sql_data_types.Select], params_result: Collection[_sql_data_types.StandardSqlExpression]) -> _sql_data_types.Select:\n    del function, params_result\n    if operand_result is None:\n        raise ValueError('not() cannot be called without an operand.')\n    return dataclasses.replace(operand_result, select_part=_sql_data_types.FunctionCall('NOT', (operand_result.select_part,), _sql_alias='not_', _sql_data_type=_sql_data_types.Boolean))", "docstring": "Generates Spark SQL representing the FHIRPath not() function.\n\nReturns `TRUE` if the input collection evaluates to `FALSE`.\n\nThe operand is expected to be a table subquery of cardinality 1, whose value\nis a `BOOL` type. By default, `_NotFunction` will return `FALSE` if given no\noperator.\n\nArgs:\nfunction: The FHIRPath AST `NotFunction` node\noperand_result: The expression which is being evaluated\nparams_result: The parameter passed in to function\n\nReturns:\nA compiled Spark SQL expression.\n\nRaises:\nValueError: When the function is called without an operand", "source": "github-repos"}
{"code": "def umount(self, forced=True):\n        \n        if self.is_mounted():\n            if is_osx():\n                cmd = [\"/usr/sbin/diskutil\", \"unmount\",\n                       self.connection[\"mount_point\"]]\n                if forced:\n                    cmd.insert(2, \"force\")\n                subprocess.check_call(cmd)\n            else:\n                cmd = [\"umount\", self.connection[\"mount_point\"]]\n                if forced:\n                    cmd.insert(1, \"-f\")\n                subprocess.check_call(cmd)", "docstring": "Try to unmount our mount point.\n\nDefaults to using forced method. If OS is Linux, it will not\ndelete the mount point.\n\nArgs:\nforced: Bool whether to force the unmount. Default is True.", "source": "juraj-google-style"}
{"code": "def __fa_process_container(self, container, find, start, end, avoid, initial_state, execution_state, trace_current, trace_final):\n    ip = start\n    while ip:\n        try:\n            instr = container.fetch(ip)\n        except ReilContainerInvalidAddressError:\n            logger.debug('Exception @ {:\n            raise ReilContainerInvalidAddressError\n        try:\n            next_addr = container.get_next_address(ip)\n        except Exception:\n            logger.debug('Exception @ {:\n            raise ReilContainerInvalidAddressError\n        next_ip = self.__process_instr(instr, avoid, next_addr, initial_state, execution_state, trace_current)\n        if (find and next_ip and (next_ip == find)):\n            logger.debug('[+] Find address found!')\n            trace_final.append(list(trace_current))\n            next_ip = None\n        if (end and next_ip and (next_ip == end)):\n            logger.debug('[+] End address found!')\n            next_ip = None\n        ip = (next_ip if next_ip else None)\n        while (not ip):\n            if (not execution_state.empty()):\n                (ip, trace_current, registers, memory) = execution_state.get()\n                if (split_address(ip)[1] == 0):\n                    logger.debug('[+] Popping execution state @ {:\n                else:\n                    logger.debug('[+] Popping execution state @ {:\n                self.__cpu.registers = registers\n                self.__cpu.memory = memory\n                logger.debug('[+] Next address: {:\n            else:\n                logger.debug('[+] No more paths to explore! Exiting...')\n                break\n            if (find and (ip == find)):\n                logger.debug('[+] Find address found!')\n                trace_final.append(list(trace_current))\n                ip = None\n            if (end and (ip == end)):\n                logger.debug('[+] End address found!')\n                ip = None", "docstring": "Process a REIL container.\n\nArgs:\navoid (list): List of addresses to avoid while executing the code.\ncontainer (ReilContainer): REIL container to execute.\nend (int): End address.\nexecution_state (Queue): Queue of execution states.\nfind (int): Address to find.\ninitial_state (State): Initial state.\nstart (int): Start address.\ntrace_current:\ntrace_final:", "source": "codesearchnet"}
{"code": "def get_staking_cutoff(self, round_num=0, tournament=1):\n        \n        query = \n        arguments = {'number': round_num, 'tournament': tournament}\n        result = self.raw_query(query, arguments)\n        result = result['data']['rounds'][0]['selection']\n        key = 'bCutoff' if round_num >= 154 or round_num == 0 else 'pCutoff'\n        return utils.parse_float_string(result[key])", "docstring": "Compute staking cutoff for the given round and tournament.\n\nArgs:\nround_num (int, optional): The round you are interested in,\ndefaults to current round.\ntournament (int, optional): ID of the tournament, defaults to 1\n\nReturns:\ndecimal.Decimal: cutoff probability\n\nRaises:\nValueError: in case of missing prize pool information", "source": "juraj-google-style"}
{"code": "def __eq__(self, other) -> bool:\n        \n        if self.timeslots == other.timeslots:\n            return True\n        return False", "docstring": "Two time-slot collections are the same if they have the same time-slots.\n\nArgs:\nother (TimeslotCollection): other TimeslotCollection", "source": "juraj-google-style"}
{"code": "def CheckApproversForLabel(self, token, client_urn, requester, approvers, label):\n    auth = self.reader.GetAuthorizationForSubject(label)\n    if (not auth):\n        return True\n    if auth.requester_must_be_authorized:\n        if (not self.CheckPermissions(requester, label)):\n            raise access_control.UnauthorizedAccess(('User %s not in %s or groups:%s for %s' % (requester, auth.users, auth.groups, label)), subject=client_urn, requested_access=token.requested_access)\n    approved_count = 0\n    for approver in approvers:\n        if (self.CheckPermissions(approver, label) and (approver != requester)):\n            approved_count += 1\n    if (approved_count < auth.num_approvers_required):\n        raise access_control.UnauthorizedAccess(('Found %s approvers for %s, needed %s' % (approved_count, label, auth.num_approvers_required)), subject=client_urn, requested_access=token.requested_access)\n    return True", "docstring": "Checks if requester and approvers have approval privileges for labels.\n\nChecks against list of approvers for each label defined in approvers.yaml to\ndetermine if the list of approvers is sufficient.\n\nArgs:\ntoken: user token\nclient_urn: ClientURN object of the client\nrequester: username string of person requesting approval.\napprovers: list of username strings that have approved this client.\nlabel: label strings to check approval privs for.\nReturns:\nTrue if access is allowed, raises otherwise.", "source": "codesearchnet"}
{"code": "def get_domain_template(distro, libvirt_ver, **kwargs):\n    env = Environment(loader=PackageLoader('lago', 'providers/libvirt/templates'), trim_blocks=True, lstrip_blocks=True)\n    template_name = 'dom_template-{0}.xml.j2'.format(distro)\n    try:\n        template = env.get_template(template_name)\n    except TemplateNotFound:\n        LOGGER.debug('could not find template %s using default', template_name)\n        template = env.get_template('dom_template-base.xml.j2')\n    return template.render(libvirt_ver=libvirt_ver, **kwargs)", "docstring": "Get a rendered Jinja2 domain template\n\nArgs:\ndistro(str): domain distro\nlibvirt_ver(int): libvirt version\nkwargs(dict): args for template render\n\nReturns:\nstr: rendered template", "source": "codesearchnet"}
{"code": "def _export_mode(mode, has_saved_vars, builder, model, custom_objects, checkpoint_path, input_signature):\n    compile_clone = mode != mode_keys.ModeKeys.PREDICT\n    if compile_clone and (not model.optimizer):\n        raise ValueError('Model does not have an optimizer. Cannot export mode %s' % mode)\n    model_graph = ops.get_default_graph()\n    with ops.Graph().as_default() as g, backend.learning_phase_scope(mode == mode_keys.ModeKeys.TRAIN):\n        if input_signature is None:\n            input_tensors = None\n        else:\n            input_tensors = nest.map_structure(create_placeholder, input_signature)\n        clone = models_lib.clone_and_build_model(model, input_tensors=input_tensors, custom_objects=custom_objects, compile_clone=compile_clone)\n        if compile_clone:\n            g.add_to_collection(ops.GraphKeys.GLOBAL_STEP, clone.optimizer.iterations)\n        train_op = None\n        if mode == mode_keys.ModeKeys.TRAIN:\n            clone._make_train_function()\n            train_op = clone.train_function.updates_op\n        elif mode == mode_keys.ModeKeys.TEST:\n            clone._make_test_function()\n        else:\n            clone._make_predict_function()\n        g.get_collection_ref(ops.GraphKeys.UPDATE_OPS).extend(clone.state_updates)\n        with session.Session().as_default():\n            clone_var_list = _get_var_list(clone)\n            if has_saved_vars:\n                status = clone.load_weights(checkpoint_path)\n                status.assert_existing_objects_matched()\n            else:\n                _assert_same_non_optimizer_objects(model, model_graph, clone, g)\n                clone.load_weights(checkpoint_path)\n                clone.save_weights(checkpoint_path, save_format='tf', overwrite=True)\n                builder._has_saved_variables = True\n            builder.add_meta_graph(model_utils.EXPORT_TAG_MAP[mode], signature_def_map=_create_signature_def_map(clone, mode), saver=saver_lib.Saver(clone_var_list, allow_empty=True), init_op=variables.local_variables_initializer(), train_op=train_op)\n        return None", "docstring": "Exports a model, and optionally saves new vars from the clone model.\n\nArgs:\nmode: A `KerasModeKeys` string.\nhas_saved_vars: A `boolean` indicating whether the SavedModel has already\nexported variables.\nbuilder: A `SavedModelBuilder` object.\nmodel: A `tf.keras.Model` object.\ncustom_objects: A dictionary mapping string names to custom classes\nor functions.\ncheckpoint_path: String path to checkpoint.\ninput_signature: Nested TensorSpec containing the expected inputs. Can be\n`None`, in which case the signature will be inferred from the model.\n\nRaises:\nValueError: If the train/eval mode is being exported, but the model does\nnot have an optimizer.", "source": "github-repos"}
{"code": "def circuit_to_latex_using_qcircuit(\n        circuit: circuits.Circuit,\n        qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT) -> str:\n    \n    diagram = circuit.to_text_diagram_drawer(\n        qubit_namer=qcircuit_qubit_namer,\n        qubit_order=qubit_order,\n        get_circuit_diagram_info=get_qcircuit_diagram_info)\n    return _render(diagram)", "docstring": "Returns a QCircuit-based latex diagram of the given circuit.\n\nArgs:\ncircuit: The circuit to represent in latex.\nqubit_order: Determines the order of qubit wires in the diagram.\n\nReturns:\nLatex code for the diagram.", "source": "juraj-google-style"}
{"code": "def _bash_comp_command(self, cmd, add_help=True):\n        \n        out = ['-h', '--help'] if add_help else []\n        cmd_dict = self._opt_cmds[cmd] if cmd else self._opt_bare\n        for opt, sct in cmd_dict:\n            out.extend(_names(self._conf[sct], opt))\n        return out", "docstring": "Build a list of all options for a given command.\n\nArgs:\ncmd (str): command name, set to None or '' for bare command.\nadd_help (bool): add an help option.\n\nReturns:\nlist of str: list of CLI options strings.", "source": "juraj-google-style"}
{"code": "def downloadMARCXML(doc_id, library, base=\"nkc\"):\n    \n    downer = Downloader()\n\n    data = downer.download(\n        ALEPH_URL + Template(DOC_URL_TEMPLATE).substitute(\n            DOC_ID=doc_id,\n            LIBRARY=library\n        )\n    )\n\n    dom = dhtmlparser.parseString(data)\n\n    \n    \n    error = dom.find(\"login\")\n    if error:\n        error_msg = error[0].find(\"error\")\n\n        if error_msg:\n            raise LibraryNotFoundException(\n                \"Can't download document doc_id: '\" + str(doc_id) + \"' \" +\n                \"(probably bad library: '\" + library + \"')!\\nMessage: \" +\n                \"\\n\".join(map(lambda x: x.getContent(), error_msg))\n            )\n\n    \n    error = dom.find(\"ill-get-doc\")\n    if error:\n        error_msg = error[0].find(\"error\")\n\n        if error_msg:\n            raise DocumentNotFoundException(\n                \"\\n\".join(map(lambda x: x.getContent(), error_msg))\n            )\n\n    return data", "docstring": "Download MARC XML document with given `doc_id` from given `library`.\n\nArgs:\ndoc_id (DocumentID): You will get this from :func:`getDocumentIDs`.\nlibrary (str): \"``NKC01``\" in our case, but don't worry,\n:func:`getDocumentIDs` adds library specification into\n:class:`DocumentID` named tuple.\n\nReturns:\nstr: MARC XML unicode string.\n\nRaises:\nLibraryNotFoundException\nDocumentNotFoundException", "source": "juraj-google-style"}
{"code": "def __call__(self, inputs: List[Any], global_state: Optional[pg.geno.AttributeDict]=None, step: int=0) -> List[Any]:\n    if self.input_element_type is not None:\n        elem_type = self.input_element_type\n        for i, elem in enumerate(inputs):\n            if not isinstance(elem, elem_type):\n                raise TypeError(f'The input is expected to be a list of {elem_type!r} but {elem!r} is encountered at position {i}.')\n    if global_state is None:\n        global_state = pg.geno.AttributeDict()\n    self._on_input(inputs)\n    outputs = self._operate(inputs, global_state=global_state, step=step)\n    if self.output_element_type is not None:\n        elem_type = self.output_element_type\n        for i, elem in enumerate(outputs):\n            if not isinstance(elem, elem_type):\n                raise TypeError(f'The output is expected to be a list of {elem_type!r} but {elem!r} is encountered at position {i}.')\n    return outputs", "docstring": "Transform a list of input values to a list of output values.\n\nArgs:\ninputs: A list of values as inputs.\nglobal_state: An `AttributeDict` object (dictionary that provides\nattribute access) as the global state container, which is\nreadable/writable during the operation.\nstep: Number of examples historically proposed, which can be used for\ndetermining a cross over schedule.\n\nReturns:\nA list of values as output of current operation.", "source": "github-repos"}
{"code": "def _send_message(self, method, endpoint, params=None, data=None):\n        \n        url = self.url + endpoint\n        r = self.session.request(method, url, params=params, data=data,\n                                 auth=self.auth, timeout=30)\n        return r.json()", "docstring": "Send API request.\n\nArgs:\nmethod (str): HTTP method (get, post, delete, etc.)\nendpoint (str): Endpoint (to be added to base URL)\nparams (Optional[dict]): HTTP request parameters\ndata (Optional[str]): JSON-encoded string payload for POST\n\nReturns:\ndict/list: JSON response", "source": "juraj-google-style"}
{"code": "def _process_returns_section(func_documentation, sig, config_class, indent_level):\n    return_docstring = ''\n    if func_documentation is not None and (match_start := re.search('(?m)^([ \\\\t]*)(?=Return)', func_documentation)) is not None:\n        match_end = re.search('(?m)^([ \\\\t]*)(?=Example)', func_documentation)\n        if match_end:\n            return_docstring = func_documentation[match_start.start():match_end.start()]\n            func_documentation = func_documentation[match_end.start():]\n        else:\n            return_docstring = func_documentation[match_start.start():]\n            func_documentation = ''\n        return_docstring = set_min_indent(return_docstring, indent_level + 4)\n    elif sig.return_annotation is not None and sig.return_annotation != inspect._empty:\n        add_intro, return_annotation = contains_type(sig.return_annotation, ModelOutput)\n        return_docstring = _prepare_output_docstrings(return_annotation, config_class, add_intro=add_intro)\n        return_docstring = return_docstring.replace('typing.', '')\n        return_docstring = set_min_indent(return_docstring, indent_level + 4)\n    return (return_docstring, func_documentation)", "docstring": "Process the returns section of the docstring.\n\nArgs:\nfunc_documentation (`str`): Existing function documentation (manually specified in the docstring)\nsig (`inspect.Signature`): Function signature\nconfig_class (`str`): Config class for the model\nindent_level (`int`): Indentation level", "source": "github-repos"}
{"code": "def _trace_variant_creation(self):\n    variant = self._variant_tensor\n    if not isinstance(variant, ops.EagerTensor):\n        raise NotImplementedError('Constructing a tf.function that reproduces a given dataset is only supported for datasets created eagerly. Please file a feature request if this is important to you.')\n    with context.eager_mode(), ops.device('CPU'):\n        graph_def = graph_pb2.GraphDef().FromString(self._as_serialized_graph(external_state_policy=options_lib.ExternalStatePolicy.FAIL).numpy())\n    output_node_names = []\n    for node in graph_def.node:\n        if node.op == '_Retval':\n            output_node_names = node.input\n    if len(output_node_names) != 1:\n        raise AssertionError(f'Dataset graph is expected to only have one return value but found {len(output_node_names)} return values: {output_node_names}.')\n    output_node_name = output_node_names[0]\n    file_path_nodes = {}\n    if ops.get_default_graph().building_function:\n        asset_tracker = self._maybe_track_assets(graph_def)\n        for key in asset_tracker:\n            assets_list = [array_ops.expand_dims(asset.asset_path, axis=0) for asset in asset_tracker[key]]\n            file_path_nodes[key] = array_ops.concat(assets_list, axis=0)\n    variant_function = wrap_function.function_from_graph_def(graph_def, inputs=[], outputs=output_node_name + ':0', captures=file_path_nodes)\n    for used_function in self._functions():\n        used_function.function.add_to_graph(variant_function.graph)\n    return variant_function", "docstring": "Traces a function which outputs a variant `tf.Tensor` for this dataset.\n\nNote that creating this function involves evaluating an op, and is currently\nonly supported when executing eagerly.\n\nReturns:\nA zero-argument `ConcreteFunction` which outputs a variant `tf.Tensor`.", "source": "github-repos"}
{"code": "def cmd_path(self, cmd):\n        \n        for binscript in self.bin.files:\n\n            if binscript.path.endswith('/{0}'.format(cmd)):\n\n                return binscript.path\n\n        raise ValueError('The command {0} was not found.'.format(cmd))", "docstring": "Get the path of a command in the virtual if it exists.\n\nArgs:\ncmd (str): The command to look for.\n\nReturns:\nstr: The full path to the command.\n\nRaises:\nValueError: If the command is not present.", "source": "juraj-google-style"}
{"code": "def reminders_add(self, *, text: str, time: str, **kwargs) -> SlackResponse:\n    self._validate_xoxp_token()\n    kwargs.update({'text': text, 'time': time})\n    return self.api_call('reminders.add', json=kwargs)", "docstring": "Creates a reminder.\n\nArgs:\ntext (str): The content of the reminder. e.g. 'eat a banana'\ntime (str): When this reminder should happen:\nthe Unix timestamp (up to five years from now e.g. '1602288000'),\nthe number of seconds until the reminder (if within 24 hours),\nor a natural language description (Ex. 'in 15 minutes' or 'every Thursday')", "source": "codesearchnet"}
{"code": "def parse_hunks(diff: str) -> list[Hunk]:\n    diff_pattern = 'diff --git a/.* b/(.*)\\\\n(?:\\\\w+ file mode \\\\d+\\\\n)?index .*\\\\n--- .*\\\\n\\\\+\\\\+\\\\+ .*\\\\n'\n    hunk_header_pattern = '@@ -\\\\d+,\\\\d+ \\\\+(\\\\d+),(\\\\d+) @@.*\\\\n'\n    raw_per_file_hunks = re.split(diff_pattern, diff)[1:]\n    parsed_hunks = []\n    for file, raw_hunks in batch(raw_per_file_hunks, 2):\n        hunks = re.split(hunk_header_pattern, raw_hunks, re.MULTILINE)[1:]\n        for start, length, body in batch(hunks, 3):\n            lines = body.split('\\n')\n            lines = lines if lines[-1] else lines[:-1]\n            parsed_hunks.append(Hunk(file, int(start), int(length), lines))\n    return parsed_hunks", "docstring": "Parses a diff into hunks.\n\nArguments:\ndiff: The raw output of git diff.\n\nReturns:\nA list of Hunks.", "source": "github-repos"}
{"code": "def has_all_nonzero_neurite_radii(neuron, threshold=0.0):\n    \n    bad_ids = []\n    seen_ids = set()\n    for s in _nf.iter_sections(neuron):\n        for i, p in enumerate(s.points):\n            info = (s.id, i)\n            if p[COLS.R] <= threshold and info not in seen_ids:\n                seen_ids.add(info)\n                bad_ids.append(info)\n\n    return CheckResult(len(bad_ids) == 0, bad_ids)", "docstring": "Check presence of neurite points with radius not above threshold\n\nArguments:\nneuron(Neuron): The neuron object to test\nthreshold: value above which a radius is considered to be non-zero\n\nReturns:\nCheckResult with result including list of (section ID, point ID) pairs\nof zero-radius points", "source": "juraj-google-style"}
{"code": "def _solve(self, sense=None):\n    while (len(self._remove_constr) > 0):\n        self._remove_constr.pop().delete()\n    try:\n        return self._prob.solve(sense=sense)\n    except lp.SolverError as e:\n        raise_from(MOMAError(text_type(e)), e)\n    finally:\n        self._remove_constr = []", "docstring": "Remove old constraints and then solve the current problem.\n\nArgs:\nsense: Minimize or maximize the objective.\n(:class:`.lp.ObjectiveSense)\n\nReturns:\nThe Result object for the solved LP problem", "source": "codesearchnet"}
{"code": "def remove_server_data(server_id):\n    \n\n    logger.debug(\"Removing server from serverdata\")\n    \n    data = datatools.get_data()\n    if server_id in data[\"discord\"][\"servers\"]:\n        data[\"discord\"][\"servers\"].pop(server_id)\n        datatools.write_data(data)", "docstring": "Remove a server from the server data\n\nArgs:\nserver_id (int): The server to remove from the server data", "source": "juraj-google-style"}
{"code": "def _PrintPreprocessingInformation(self, storage_reader, session_number=None):\n    \n    knowledge_base_object = knowledge_base.KnowledgeBase()\n\n    storage_reader.ReadPreprocessingInformation(knowledge_base_object)\n\n    \n    system_configuration = knowledge_base_object.GetSystemConfigurationArtifact(\n        session_identifier=session_number)\n    if not system_configuration:\n      return\n\n    title = 'System configuration'\n    table_view = views.ViewsFactory.GetTableView(\n        self._views_format_type, title=title)\n\n    hostname = 'N/A'\n    if system_configuration.hostname:\n      hostname = system_configuration.hostname.name\n\n    operating_system = system_configuration.operating_system or 'N/A'\n    operating_system_product = (\n        system_configuration.operating_system_product or 'N/A')\n    operating_system_version = (\n        system_configuration.operating_system_version or 'N/A')\n    code_page = system_configuration.code_page or 'N/A'\n    keyboard_layout = system_configuration.keyboard_layout or 'N/A'\n    time_zone = system_configuration.time_zone or 'N/A'\n\n    table_view.AddRow(['Hostname', hostname])\n    table_view.AddRow(['Operating system', operating_system])\n    table_view.AddRow(['Operating system product', operating_system_product])\n    table_view.AddRow(['Operating system version', operating_system_version])\n    table_view.AddRow(['Code page', code_page])\n    table_view.AddRow(['Keyboard layout', keyboard_layout])\n    table_view.AddRow(['Time zone', time_zone])\n\n    table_view.Write(self._output_writer)\n\n    title = 'User accounts'\n    table_view = views.ViewsFactory.GetTableView(\n        self._views_format_type,\n        column_names=['Username', 'User directory'], title=title)\n\n    for user_account in system_configuration.user_accounts:\n      table_view.AddRow([\n          user_account.username, user_account.user_directory])\n\n    table_view.Write(self._output_writer)", "docstring": "Prints the details of the preprocessing information.\n\nArgs:\nstorage_reader (StorageReader): storage reader.\nsession_number (Optional[int]): session number.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n               k_ranges,\n               query_spec,\n               key_range_iter_cls):\n    \n    self._key_ranges = k_ranges\n    self._query_spec = query_spec\n    self._key_range_iter_cls = key_range_iter_cls\n    self._current_iter = None\n    self._current_key_range = None", "docstring": "Init.\n\nArgs:\nk_ranges: a key_ranges._KeyRanges object.\nquery_spec: a model.query_spec object that defines how to retrieve\nentities from datastore.\nkey_range_iter_cls: the class that iterates over a single key range.\nThe value yielded by this class is yielded.", "source": "juraj-google-style"}
{"code": "def get_numeric_sort_key_fn(numeric_values):\n    value_types = _get_all_types(numeric_values)\n    if len(value_types) != 1:\n        raise ValueError(f'No common value type in {numeric_values}')\n    value_type = next(iter(value_types))\n    if value_type == NUMBER_TYPE:\n        return _get_value_as_primitive_value\n    valid_indexes = set(range(_DATE_TUPLE_SIZE))\n    for numeric_value in numeric_values:\n        value = _get_value_as_primitive_value(numeric_value)\n        assert isinstance(value, tuple)\n        for tuple_index, inner_value in enumerate(value):\n            if inner_value is None:\n                valid_indexes.discard(tuple_index)\n    if not valid_indexes:\n        raise ValueError(f'No common value in {numeric_values}')\n\n    def _sort_key_fn(numeric_value):\n        value = _get_value_as_primitive_value(numeric_value)\n        return tuple((value[index] for index in valid_indexes))\n    return _sort_key_fn", "docstring": "Creates a function that can be used as a sort key or to compare the values. Maps to primitive types and finds the\nbiggest common subset. Consider the values \"05/05/2010\" and \"August 2007\". With the corresponding primitive values\n(2010.,5.,5.) and (2007.,8., None). These values can be compared by year and date so we map to the sequence (2010.,\n5.), (2007., 8.). If we added a third value \"2006\" with primitive value (2006., None, None), we could only compare\nby the year so we would map to (2010.,), (2007.,) and (2006.,).\n\nArgs:\nnumeric_values: Values to compare\n\nReturns:\nA function that can be used as a sort key function (mapping numeric values to a comparable tuple)\n\nRaises:\nValueError if values don't have a common type or are not comparable.", "source": "github-repos"}
{"code": "def setup_logging(verbosity, formats=None):\n    if (formats is None):\n        formats = {}\n    log_level = logging.INFO\n    log_format = formats.get('info', INFO_FORMAT)\n    if sys.stdout.isatty():\n        log_format = formats.get('color', COLOR_FORMAT)\n    if (verbosity > 0):\n        log_level = logging.DEBUG\n        log_format = formats.get('debug', DEBUG_FORMAT)\n    if (verbosity < 2):\n        logging.getLogger('botocore').setLevel(logging.CRITICAL)\n    hdlr = logging.StreamHandler()\n    hdlr.setFormatter(ColorFormatter(log_format, ISO_8601))\n    logging.root.addHandler(hdlr)\n    logging.root.setLevel(log_level)", "docstring": "Configure a proper logger based on verbosity and optional log formats.\n\nArgs:\nverbosity (int): 0, 1, 2\nformats (dict): Optional, looks for `info`, `color`, and `debug` keys\nwhich may override the associated default log formats.", "source": "codesearchnet"}
{"code": "def __spawn_new_request(self):\n    first_in_line = self.queue.get_first(QueueItem.STATUS_QUEUED)\n    if (first_in_line is None):\n        return False\n    while self.routing.is_treshold_reached(first_in_line.request):\n        self.queue.move(first_in_line, QueueItem.STATUS_CANCELLED)\n        first_in_line = self.queue.get_first(QueueItem.STATUS_QUEUED)\n        if (first_in_line is None):\n            return False\n    self.__request_start(first_in_line)\n    return True", "docstring": "Spawn the first queued request if there is one available.\n\nReturns:\nbool: True if a new request was spawned, false otherwise.", "source": "codesearchnet"}
{"code": "def get(self, dash_id):\n    data = json.loads(r_db.hmget(config.DASH_CONTENT_KEY, dash_id)[0])\n    return build_response(dict(data=data, code=200))", "docstring": "Read dashboard content.\n\nArgs:\ndash_id: dashboard id.\n\nReturns:\nA dict containing the content of that dashboard, not include the meta info.", "source": "codesearchnet"}
{"code": "def __init__(self, credentials):\n        \n        if not has_httplib2:\n            raise ImportError(\"No module named httplib2\")\n        super(GAPDecoratorAuthMethod, self).__init__()\n        self._http = None\n        self._credentials = credentials\n        self._action_token = None", "docstring": "Initialize auth method with existing credentials.\nArgs:\ncredentials: OAuth2 credentials obtained via GAP OAuth2 library.", "source": "juraj-google-style"}
{"code": "def __init__(self, label, ast_node, *, line_number=None, path):\n        \n        self.label = label\n        self.ast_node = ast_node\n        if line_number:\n            self.line_number = line_number\n        elif ast_node:\n            self.line_number = ast_node.lineno\n        else:\n            self.line_number = None\n        self.path = path\n        self.ingoing = list()\n        self.outgoing = list()", "docstring": "Create a Node that can be used in a CFG.\n\nArgs:\nlabel(str): The label of the node, describing its expression.\nline_number(Optional[int]): The line of the expression of the Node.", "source": "juraj-google-style"}
{"code": "def pprint_cell(self, row, col):\n        \n        ndims = self.ndims\n        if col >= self.cols:\n            raise Exception(\"Maximum column index is %d\" % self.cols-1)\n        elif row >= self.rows:\n            raise Exception(\"Maximum row index is %d\" % self.rows-1)\n        elif row == 0:\n            if col >= ndims:\n                if self.vdims:\n                    return self.vdims[col - ndims].pprint_label\n                else:\n                    return ''\n            return self.kdims[col].pprint_label\n        else:\n            dim = self.get_dimension(col)\n            return dim.pprint_value(self.iloc[row-1, col])", "docstring": "Formatted contents of table cell.\n\nArgs:\nrow (int): Integer index of table row\ncol (int): Integer index of table column\n\nReturns:\nFormatted table cell contents", "source": "juraj-google-style"}
{"code": "def ProcessStorage(self):\n    self._CheckStorageFile(self._storage_file_path)\n    self._status_view.SetMode(self._status_view_mode)\n    self._status_view.SetStorageFileInformation(self._storage_file_path)\n    status_update_callback = self._status_view.GetAnalysisStatusUpdateCallback()\n    session = engine.BaseEngine.CreateSession(command_line_arguments=self._command_line_arguments, preferred_encoding=self.preferred_encoding)\n    storage_reader = storage_factory.StorageFactory.CreateStorageReaderForFile(self._storage_file_path)\n    if (not storage_reader):\n        logger.error('Format of storage file: {0:s} not supported'.format(self._storage_file_path))\n        return\n    self._number_of_analysis_reports = storage_reader.GetNumberOfAnalysisReports()\n    storage_reader.Close()\n    configuration = configurations.ProcessingConfiguration()\n    configuration.data_location = self._data_location\n    configuration.profiling.directory = self._profiling_directory\n    configuration.profiling.sample_rate = self._profiling_sample_rate\n    configuration.profiling.profilers = self._profilers\n    analysis_counter = None\n    if self._analysis_plugins:\n        storage_writer = storage_factory.StorageFactory.CreateStorageWriterForFile(session, self._storage_file_path)\n        analysis_engine = psort.PsortMultiProcessEngine(use_zeromq=self._use_zeromq)\n        analysis_engine.AnalyzeEvents(self._knowledge_base, storage_writer, self._data_location, self._analysis_plugins, configuration, event_filter=self._event_filter, event_filter_expression=self._event_filter_expression, status_update_callback=status_update_callback, worker_memory_limit=self._worker_memory_limit)\n        analysis_counter = collections.Counter()\n        for (item, value) in iter(session.analysis_reports_counter.items()):\n            analysis_counter[item] = value\n    if (self._output_format != 'null'):\n        storage_reader = storage_factory.StorageFactory.CreateStorageReaderForFile(self._storage_file_path)\n        analysis_engine = psort.PsortMultiProcessEngine(use_zeromq=self._use_zeromq)\n        analysis_engine.ExportEvents(self._knowledge_base, storage_reader, self._output_module, configuration, deduplicate_events=self._deduplicate_events, event_filter=self._event_filter, status_update_callback=status_update_callback, time_slice=self._time_slice, use_time_slicer=self._use_time_slicer)\n    if self._quiet_mode:\n        return\n    self._output_writer.Write('Processing completed.\\n')\n    if analysis_counter:\n        table_view = views.ViewsFactory.GetTableView(self._views_format_type, title='Analysis reports generated')\n        for (element, count) in analysis_counter.most_common():\n            if (element != 'total'):\n                table_view.AddRow([element, count])\n        table_view.AddRow(['Total', analysis_counter['total']])\n        table_view.Write(self._output_writer)\n    storage_reader = storage_factory.StorageFactory.CreateStorageReaderForFile(self._storage_file_path)\n    self._PrintAnalysisReportsDetails(storage_reader)", "docstring": "Processes a plaso storage file.\n\nRaises:\nBadConfigOption: when a configuration parameter fails validation.\nRuntimeError: if a non-recoverable situation is encountered.", "source": "codesearchnet"}
{"code": "def remove_delegate(self, callback):\n    if (callback not in self._delegate_methods):\n        return\n    self._delegate_methods.remove(callback)", "docstring": "Unregisters a registered delegate function or a method.\n\nArgs:\ncallback(function): method to trigger when push center receives events", "source": "codesearchnet"}
{"code": "def convert_inner_node_data(nested, wrap=False):\n\n    def _is_serialized_node_data(nested):\n        if isinstance(nested, list) and len(nested) in [3, 4] and isinstance(nested[0], str):\n            return True\n        return False\n\n    def _is_atomic_nested(nested):\n        \n        if isinstance(nested, ListWrapper):\n            return True\n        if _is_serialized_node_data(nested):\n            return True\n        return not nest.is_nested(nested)\n\n    def _convert_object_or_list(nested):\n        \n        if wrap:\n            if isinstance(nested, ListWrapper):\n                return nested\n            if _is_serialized_node_data(nested):\n                return ListWrapper(nested)\n            return nested\n        else:\n            if isinstance(nested, ListWrapper):\n                return nested.as_list()\n            return nested\n    return map_structure_with_atomic(_is_atomic_nested, _convert_object_or_list, nested)", "docstring": "Either wraps or unwraps innermost node data lists in `ListWrapper` objects.\n\nArgs:\nnested: A nested data structure.\nwrap: If `True`, wrap innermost lists in `ListWrapper` objects. If `False`,\nunwraps `ListWrapper` objects into lists.\n\nReturns:\nStructure of same type as nested, with lists wrapped/unwrapped.", "source": "github-repos"}
{"code": "def _remove_outliers_from_hist(hist: Hist, outliers_start_index: int, outliers_removal_axis: OutliersRemovalAxis) -> None:\n    \n    \n    if outliers_start_index > 0:\n        \n        \n        x = ctypes.c_int(0)\n        y = ctypes.c_int(0)\n        z = ctypes.c_int(0)\n        \n        \n        outliers_removal_axis_values: Dict[OutliersRemovalAxis, ctypes.c_int] = {\n            projectors.TH1AxisType.x_axis: x,\n            projectors.TH1AxisType.y_axis: y,\n            projectors.TH1AxisType.z_axis: z,\n        }\n        for index in range(0, hist.GetNcells()):\n            \n            hist.GetBinXYZ(index, x, y, z)\n            \n            if hist.GetBinContent(index) < hist.GetBinError(index):\n                logger.warning(f\"Bin content < error. Name: {hist.GetName()}, Bin content: {hist.GetBinContent(index)}, Bin error: {hist.GetBinError(index)}, index: {index}, ({x.value}, {y.value})\")\n            if outliers_removal_axis_values[outliers_removal_axis].value >= outliers_start_index:\n                \n                hist.SetBinContent(index, 0)\n                hist.SetBinError(index, 0)\n    else:\n        logger.info(f\"Hist {hist.GetName()} did not have any outliers to cut\")", "docstring": "Remove outliers from a given histogram.\n\nArgs:\nhist: Histogram to check for outliers.\noutliers_start_index: Index in the truth axis where outliers begin.\noutliers_removal_axis: Axis along which outliers removal will be performed. Usually\nthe particle level aixs.\nReturns:\nNone. The histogram is modified in place.", "source": "juraj-google-style"}
{"code": "def train_validation_split(arrays, validation_split):\n\n    def _can_split(t):\n        tensor_types = _get_tensor_types()\n        return isinstance(t, tensor_types) or t is None\n    flat_arrays = nest.flatten(arrays)\n    unsplitable = [type(t) for t in flat_arrays if not _can_split(t)]\n    if unsplitable:\n        raise ValueError('`validation_split` is only supported for Tensors or NumPy arrays, found following types in the input: {}'.format(unsplitable))\n    if all((t is None for t in flat_arrays)):\n        return (arrays, arrays)\n    first_non_none = None\n    for t in flat_arrays:\n        if t is not None:\n            first_non_none = t\n            break\n    batch_dim = int(first_non_none.shape[0])\n    split_at = int(math.floor(batch_dim * (1.0 - validation_split)))\n    if split_at == 0 or split_at == batch_dim:\n        raise ValueError('Training data contains {batch_dim} samples, which is not sufficient to split it into a validation and training set as specified by `validation_split={validation_split}`. Either provide more data, or a different value for the `validation_split` argument.'.format(batch_dim=batch_dim, validation_split=validation_split))\n\n    def _split(t, start, end):\n        if t is None:\n            return t\n        return t[start:end]\n    train_arrays = nest.map_structure(functools.partial(_split, start=0, end=split_at), arrays)\n    val_arrays = nest.map_structure(functools.partial(_split, start=split_at, end=batch_dim), arrays)\n    return (train_arrays, val_arrays)", "docstring": "Split arrays into train and validation subsets in deterministic order.\n\nThe last part of data will become validation data.\n\nArgs:\narrays: Tensors to split. Allowed inputs are arbitrarily nested structures\nof Tensors and NumPy arrays.\nvalidation_split: Float between 0 and 1. The proportion of the dataset to\ninclude in the validation split. The rest of the dataset will be included\nin the training split.\nReturns:\n`(train_arrays, validation_arrays)`", "source": "github-repos"}
{"code": "def merged(cls, *flatterms: 'FlatTerm') -> 'FlatTerm':\n        \n        return cls(cls._combined_wildcards_iter(sum(flatterms, cls.empty())))", "docstring": "Concatenate the given flatterms to a single flatterm.\n\nArgs:\n*flatterms:\nThe flatterms which are concatenated.\n\nReturns:\nThe concatenated flatterms.", "source": "juraj-google-style"}
{"code": "def _ParseIntegerValue(self, byte_stream, file_offset):\n    data_type_map = self._GetDataTypeMap('int32be')\n    try:\n        return self._ReadStructureFromByteStream(byte_stream, file_offset, data_type_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse integer value with error: {0!s}'.format(exception))", "docstring": "Parses an integer value.\n\nArgs:\nbyte_stream (bytes): byte stream.\nfile_offset (int): offset of the attribute data relative to the start of\nthe file-like object.\n\nReturns:\nint: integer value.\n\nRaises:\nParseError: when the integer value cannot be parsed.", "source": "codesearchnet"}
{"code": "def get(cls, keyval, key='id', user_id=None):\n        \n        if keyval is None:\n            return None\n        if (key in cls.__table__.columns\n                and cls.__table__.columns[key].primary_key):\n            \n            \n            return cls.query.get(keyval)\n        else:\n            result = cls.query.filter(\n                getattr(cls, key) == keyval)\n            \n            \n            return result.first()", "docstring": "Fetches a single instance which has value `keyval`\nfor the attribute `key`.\n\nArgs:\n\nkeyval: The value of the attribute.\n\nkey (str, optional):  The attribute to search by. By default,\nit is 'id'.\n\nReturns:\n\nA model instance if found. Else None.\n\nExamples:\n\n>>> User.get(35)\nuser35@i.com\n\n>>> User.get('user35@i.com', key='email')\nuser35@i.com", "source": "juraj-google-style"}
{"code": "def write_table(self, table, rows, append=False, gzip=False):\n        \n        _write_table(self.root,\n                     table,\n                     rows,\n                     self.table_relations(table),\n                     append=append,\n                     gzip=gzip,\n                     encoding=self.encoding)", "docstring": "Encode and write out *table* to the profile directory.\n\nArgs:\ntable: The name of the table to write\nrows: The rows to write to the table\nappend: If `True`, append the encoded rows to any existing\ndata.\ngzip: If `True`, compress the resulting table with `gzip`.\nThe table's filename will have `.gz` appended.", "source": "juraj-google-style"}
{"code": "def get_children_graph(self, item_ids=None, language=None, forbidden_item_ids=None):\n        \n        if forbidden_item_ids is None:\n            forbidden_item_ids = set()\n\n        def _children(item_ids):\n            if item_ids is None:\n                items = Item.objects.filter(active=True).prefetch_related('children')\n            else:\n                item_ids = [ii for iis in item_ids.values() for ii in iis]\n                items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('children')\n            return {\n                item.id: sorted([\n                    _item.id for _item in item.children.all()\n                    if _item.active and _item.id not in forbidden_item_ids\n                ])\n                for item in items if item.id not in forbidden_item_ids\n            }\n\n        if item_ids is None:\n            return self._reachable_graph(None, _children, language=language)\n        else:\n            graph = self.get_children_graph(None, language, forbidden_item_ids=forbidden_item_ids)\n            return self._subset_graph(graph, set(item_ids) - set(forbidden_item_ids))", "docstring": "Get a subgraph of items reachable from the given set of items through\nthe 'child' relation.\n\nArgs:\nitem_ids (list): items which are taken as roots for the reachability\nlanguage (str): if specified, filter out items which are not\navailable in the given language\n\nReturns:\ndict: item id -> list of items (child items), root items are\nreferenced by None key", "source": "juraj-google-style"}
{"code": "def update_data(self, index, data):\n    datapack = self.built_embed.to_dict()['fields'][index]\n    self.built_embed.set_field_at(index, name=datapack['name'], value=data, inline=datapack['inline'])", "docstring": "Updates a particular datapack's data\n\nArgs:\nindex (int): The index of the datapack\ndata (str): The new value to set for this datapack", "source": "codesearchnet"}
{"code": "def __init__(self, input_filename=\"lammps.in\", bin=\"lammps\"):\n        \n        self.lammps_bin = bin.split()\n        if not which(self.lammps_bin[-1]):\n            raise RuntimeError(\n                \"LammpsRunner requires the executable {} to be in the path. \"\n                \"Please download and install LAMMPS from \" \\\n                \"http:\n                \"Don't forget to add the binary to your path\".format(self.lammps_bin[-1]))\n        self.input_filename = input_filename", "docstring": "LAMMPS wrapper\n\nArgs:\ninput_filename (string): input file name\nbin (string): command to run, excluding the input file name", "source": "juraj-google-style"}
{"code": "def __init__(self, input_reader=None, output_writer=None):\n    \n    super(PsortTool, self).__init__(\n        input_reader=input_reader, output_writer=output_writer)\n    self._analysis_manager = analysis_manager.AnalysisPluginManager\n    self._analysis_plugins = None\n    self._analysis_plugins_output_format = None\n    self._command_line_arguments = None\n    self._deduplicate_events = True\n    self._event_filter_expression = None\n    self._event_filter = None\n    self._knowledge_base = knowledge_base.KnowledgeBase()\n    self._number_of_analysis_reports = 0\n    self._preferred_language = 'en-US'\n    self._process_memory_limit = None\n    self._status_view_mode = status_view.StatusView.MODE_WINDOW\n    self._status_view = status_view.StatusView(self._output_writer, self.NAME)\n    self._stdout_output_writer = isinstance(\n        self._output_writer, tools.StdoutOutputWriter)\n    self._storage_file_path = None\n    self._temporary_directory = None\n    self._time_slice = None\n    self._use_time_slicer = False\n    self._use_zeromq = True\n    self._worker_memory_limit = None\n\n    self.list_analysis_plugins = False\n    self.list_language_identifiers = False\n    self.list_output_modules = False\n    self.list_profilers = False", "docstring": "Initializes the CLI tool object.\n\nArgs:\ninput_reader (Optional[InputReader]): input reader, where None indicates\nthat the stdin input reader should be used.\noutput_writer (Optional[OutputWriter]): output writer, where None\nindicates that the stdout output writer should be used.", "source": "juraj-google-style"}
{"code": "def create_failover_dns(self, primary_region='us-east-1'):\n        \n        dns_record = self.generated.dns()['global']\n        zone_ids = get_dns_zone_ids(env=self.env, facing=self.elb_subnet)\n\n        elb_dns_aws = find_elb(name=self.app_name, env=self.env, region=self.region)\n        elb_dns_zone_id = find_elb_dns_zone_id(name=self.app_name, env=self.env, region=self.region)\n\n        if primary_region in elb_dns_aws:\n            failover_state = 'PRIMARY'\n        else:\n            failover_state = 'SECONDARY'\n        self.log.info(\"%s set as %s record\", elb_dns_aws, failover_state)\n\n        self.log.info('Updating Application Failover URL: %s', dns_record)\n\n        dns_kwargs = {\n            'dns_name': dns_record,\n            'elb_dns_zone_id': elb_dns_zone_id,\n            'elb_aws_dns': elb_dns_aws,\n            'dns_ttl': self.dns_ttl,\n            'failover_state': failover_state,\n        }\n\n        for zone_id in zone_ids:\n            self.log.debug('zone_id: %s', zone_id)\n            update_failover_dns_record(self.env, zone_id, **dns_kwargs)\n\n        return dns_record", "docstring": "Create dns entries in route53 for multiregion failover setups.\n\nArgs:\nprimary_region (str): primary AWS region for failover\nReturns:\nAuto-generated DNS name.", "source": "juraj-google-style"}
{"code": "def _repeated_field_to_json(field, row_value):\n    \n    \n    \n    item_field = copy.deepcopy(field)\n    item_field._mode = \"NULLABLE\"\n    values = []\n    for item in row_value:\n        values.append(_field_to_json(item_field, item))\n    return values", "docstring": "Convert a repeated/array field to its JSON representation.\n\nArgs:\nfield ( \\\n:class:`~google.cloud.bigquery.schema.SchemaField`, \\\n):\nThe SchemaField to use for type conversion and field name. The\nfield mode must equal ``REPEATED``.\nrow_value (Sequence[any]):\nA sequence of values to convert to JSON-serializable values.\n\nReturns:\nList[any]:\nA list of JSON-serializable objects.", "source": "juraj-google-style"}
{"code": "def __contains__(self, id):\n        \n        if not isinstance(id, int):\n            raise TypeError(id)\n        return id in self._map", "docstring": "Return if the spreadsheet has a worksheet with the given id.\n\nArgs:\nid (int): numeric id of the worksheet\nReturns:\nbool: ``True`` if such a worksheet is present else ``False``\nRaises:\nTypeError: if ``id`` is not an ``int``", "source": "juraj-google-style"}
{"code": "def reaction_charge(reaction, compound_charge):\n    charge_sum = 0.0\n    for (compound, value) in reaction.compounds:\n        charge = compound_charge.get(compound.name, float('nan'))\n        charge_sum += (charge * float(value))\n    return charge_sum", "docstring": "Calculate the overall charge for the specified reaction.\n\nArgs:\nreaction: :class:`psamm.reaction.Reaction`.\ncompound_charge: a map from each compound to charge values.", "source": "codesearchnet"}
{"code": "def heightmap_normalize(\n    hm: np.ndarray, mi: float = 0.0, ma: float = 1.0\n) -> None:\n    \n    lib.TCOD_heightmap_normalize(_heightmap_cdata(hm), mi, ma)", "docstring": "Normalize heightmap values between ``mi`` and ``ma``.\n\nArgs:\nmi (float): The lowest value after normalization.\nma (float): The highest value after normalization.", "source": "juraj-google-style"}
{"code": "def empty(self) -> 'Builder':\n    return self._to_builder(_evaluation.EmptyFunction(self.node.context, self.node, []))", "docstring": "The FHIRPath empty() function.\n\nReturns:\nAn expression that evaluates to True if the parent evaluates to empty.", "source": "github-repos"}
{"code": "def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None):\n  \n  if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]):\n    \n    if num_lower < 0:\n      num_lower = rows - 1\n    if num_upper < 0:\n      num_upper = cols - 1\n    lower_mask = np.tri(cols, rows, num_lower).T\n    upper_mask = np.tri(rows, cols, num_upper)\n    band = np.ones((rows, cols)) * lower_mask * upper_mask\n    if out_shape:\n      band = band.reshape(out_shape)\n    band = tf.constant(band, tf.float32)\n  else:\n    band = tf.matrix_band_part(\n        tf.ones([rows, cols]), tf.cast(num_lower, tf.int64),\n        tf.cast(num_upper, tf.int64))\n    if out_shape:\n      band = tf.reshape(band, out_shape)\n\n  return band", "docstring": "Matrix band part of ones.\n\nArgs:\nrows: int determining number of rows in output\ncols: int\nnum_lower: int, maximum distance backward. Negative values indicate\nunlimited.\nnum_upper: int, maximum distance forward. Negative values indicate\nunlimited.\nout_shape: shape to reshape output by.\n\nReturns:\nTensor of size rows * cols reshaped into shape out_shape.", "source": "juraj-google-style"}
{"code": "def get_permissions(self, grp_name, resource):\n    self.project_service.set_auth(self._token_project)\n    return self.project_service.get_permissions(grp_name, resource)", "docstring": "Get permissions associated the group has with the given resource.\n\nArgs:\ngrp_name (string): Name of group.\nresource (intern.resource.boss.Resource): Identifies which data\nmodel object to operate on.\n\nReturns:\n(list): List of permissions.\n\nRaises:\nrequests.HTTPError on failure.", "source": "codesearchnet"}
{"code": "def _ParseDistributedTrackingIdentifier(self, parser_mediator, uuid_object, origin):\n    if (uuid_object.version == 1):\n        event_data = windows_events.WindowsDistributedLinkTrackingEventData(uuid_object, origin)\n        date_time = dfdatetime_uuid_time.UUIDTime(timestamp=uuid_object.time)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n    return '{{{0!s}}}'.format(uuid_object)", "docstring": "Extracts data from a Distributed Tracking identifier.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nuuid_object (uuid.UUID): UUID of the Distributed Tracking identifier.\norigin (str): origin of the event (event source).\n\nReturns:\nstr: UUID string of the Distributed Tracking identifier.", "source": "codesearchnet"}
{"code": "def convolve(image, pixel_filter, channels=3, name=None):\n    with tf.name_scope(name, 'convolve'):\n        tf.compat.v1.assert_type(image, tf.float32)\n        channel_filter = tf.eye(channels)\n        filter_ = (tf.expand_dims(tf.expand_dims(pixel_filter, (- 1)), (- 1)) * tf.expand_dims(tf.expand_dims(channel_filter, 0), 0))\n        result_batch = tf.nn.conv2d(tf.stack([image]), filter=filter_, strides=[1, 1, 1, 1], padding='SAME')\n        return result_batch[0]", "docstring": "Perform a 2D pixel convolution on the given image.\n\nArguments:\nimage: A 3D `float32` `Tensor` of shape `[height, width, channels]`,\nwhere `channels` is the third argument to this function and the\nfirst two dimensions are arbitrary.\npixel_filter: A 2D `Tensor`, representing pixel weightings for the\nkernel. This will be used to create a 4D kernel---the extra two\ndimensions are for channels (see `tf.nn.conv2d` documentation),\nand the kernel will be constructed so that the channels are\nindependent: each channel only observes the data from neighboring\npixels of the same channel.\nchannels: An integer representing the number of channels in the\nimage (e.g., 3 for RGB).\n\nReturns:\nA 3D `float32` `Tensor` of the same shape as the input.", "source": "codesearchnet"}
{"code": "def get_request(profile, resource):\n    url = get_url(profile, resource)\n    headers = get_headers(profile)\n    response = requests.get(url, headers=headers)\n    return response.json()", "docstring": "Do a GET request to Github's API.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nresource\nThe part of a Github API URL that comes after ``.../:repo/git``.\nFor instance, for ``.../:repo/git/commits``, it's ``/commits``.\n\nReturns:\nThe body of the response, converted from JSON into a Python dict.", "source": "codesearchnet"}
{"code": "def random_get_int_mean(\n    rnd: Optional[tcod.random.Random], mi: int, ma: int, mean: int\n) -> int:\n    \n    return int(\n        lib.TCOD_random_get_int_mean(\n            rnd.random_c if rnd else ffi.NULL, mi, ma, mean\n        )\n    )", "docstring": "Return a random weighted integer in the range: ``mi`` <= n <= ``ma``.\n\nThe result is affacted by calls to :any:`random_set_distribution`.\n\nArgs:\nrnd (Optional[Random]): A Random instance, or None to use the default.\nlow (int): The lower bound of the random range, inclusive.\nhigh (int): The upper bound of the random range, inclusive.\nmean (int): The mean return value.\n\nReturns:\nint: A random weighted integer in the range ``mi`` <= n <= ``ma``.", "source": "juraj-google-style"}
{"code": "def regex(self, regex = None):\n\t\t\n\n\t\t\n\t\tif regex is None:\n\t\t\treturn self._regex\n\n\t\t\n\t\tif self._type != 'string':\n\t\t\tsys.stderr.write('can not set __regex__ for %s' % self._type)\n\t\t\treturn\n\n\t\t\n\t\tif not isinstance(regex, (basestring, _REGEX_TYPE)):\n\t\t\traise ValueError('__regex__')\n\n\t\t\n\t\tself._regex = regex", "docstring": "Regex\n\nSets or gets the regular expression used to validate the Node\n\nArguments:\nregex {str} -- A standard regular expression string\n\nRaises:\nValueError\n\nReturns:\nNone | str", "source": "juraj-google-style"}
{"code": "def can_transition(self, status_from: str, status_to: str) -> bool:\n    if (not self.STATUSES.can_transition(status_from=status_from, status_to=status_to)):\n        _logger.info('`%s` tried to transition from status `%s` to non permitted status `%s`', str(self), status_from, status_to)\n        return False\n    return True", "docstring": "Update the status of the current instance.\n\nReturns:\nboolean: if the instance is updated.", "source": "codesearchnet"}
{"code": "def make_hello_bot_agent() -> DefaultAgent:\n    skill_hello = PatternMatchingSkill(['Hello world'], patterns=['hi', 'hello', 'good day'])\n    skill_bye = PatternMatchingSkill(['Goodbye world', 'See you around'], patterns=['bye', 'chao', 'see you'])\n    skill_fallback = PatternMatchingSkill([\"I don't understand, sorry\", 'I can say \"Hello world\"'])\n    agent = DefaultAgent([skill_hello, skill_bye, skill_fallback], skills_processor=HighestConfidenceSelector())\n    return agent", "docstring": "Builds agent based on PatternMatchingSkill and HighestConfidenceSelector.\n\nThis is agent building tutorial. You can use this .py file to check how hello-bot agent works.\n\nReturns:\nagent: Agent capable of handling several simple greetings.", "source": "codesearchnet"}
{"code": "def get_sample(self, md5):\n    if (len(md5) < 32):\n        md5 = self.get_full_md5(md5, self.sample_collection)\n    sample_info = self.database[self.sample_collection].find_one({'md5': md5})\n    if (not sample_info):\n        return None\n    try:\n        grid_fs_id = sample_info['__grid_fs']\n        sample_info = self.clean_for_serialization(sample_info)\n        sample_info.update({'raw_bytes': self.gridfs_handle.get(grid_fs_id).read()})\n        return sample_info\n    except gridfs.errors.CorruptGridFile:\n        self.database[self.sample_collection].update({'md5': md5}, {'md5': None})\n        return None", "docstring": "Get the sample from the data store.\n\nThis method first fetches the data from datastore, then cleans it for serialization\nand then updates it with 'raw_bytes' item.\n\nArgs:\nmd5: The md5 digest of the sample to be fetched from datastore.\n\nReturns:\nThe sample dictionary or None", "source": "codesearchnet"}
{"code": "def read_vocab_file(file_path):\n    with file_io.FileIO(file_path, 'r') as f:\n        vocab_pd = pd.read_csv(f, header=None, names=['vocab', 'count'], dtype=str, na_filter=False)\n    vocab = vocab_pd['vocab'].tolist()\n    ex_count = vocab_pd['count'].astype(int).tolist()\n    return (vocab, ex_count)", "docstring": "Reads a vocab file to memeory.\n\nArgs:\nfile_path: Each line of the vocab is in the form \"token,example_count\"\n\nReturns:\nTwo lists, one for the vocab, and one for just the example counts.", "source": "codesearchnet"}
{"code": "def save_output(results, output_directory='output'):\n    aggregate_reports = results['aggregate_reports']\n    forensic_reports = results['forensic_reports']\n    if os.path.exists(output_directory):\n        if (not os.path.isdir(output_directory)):\n            raise ValueError('{0} is not a directory'.format(output_directory))\n    else:\n        os.makedirs(output_directory)\n    with open('{0}'.format(os.path.join(output_directory, 'aggregate.json')), 'w', newline='\\n', encoding='utf-8') as agg_json:\n        agg_json.write(json.dumps(aggregate_reports, ensure_ascii=False, indent=2))\n    with open('{0}'.format(os.path.join(output_directory, 'aggregate.csv')), 'w', newline='\\n', encoding='utf-8') as agg_csv:\n        csv = parsed_aggregate_reports_to_csv(aggregate_reports)\n        agg_csv.write(csv)\n    with open('{0}'.format(os.path.join(output_directory, 'forensic.json')), 'w', newline='\\n', encoding='utf-8') as for_json:\n        for_json.write(json.dumps(forensic_reports, ensure_ascii=False, indent=2))\n    with open('{0}'.format(os.path.join(output_directory, 'forensic.csv')), 'w', newline='\\n', encoding='utf-8') as for_csv:\n        csv = parsed_forensic_reports_to_csv(forensic_reports)\n        for_csv.write(csv)\n    samples_directory = os.path.join(output_directory, 'samples')\n    if (not os.path.exists(samples_directory)):\n        os.makedirs(samples_directory)\n    sample_filenames = []\n    for forensic_report in forensic_reports:\n        sample = forensic_report['sample']\n        message_count = 0\n        parsed_sample = forensic_report['parsed_sample']\n        subject = parsed_sample['filename_safe_subject']\n        filename = subject\n        while (filename in sample_filenames):\n            message_count += 1\n            filename = '{0} ({1})'.format(subject, message_count)\n        sample_filenames.append(filename)\n        filename = '{0}.eml'.format(filename)\n        path = os.path.join(samples_directory, filename)\n        with open(path, 'w', newline='\\n', encoding='utf-8') as sample_file:\n            sample_file.write(sample)", "docstring": "Save report data in the given directory\n\nArgs:\nresults (OrderedDict): Parsing results\noutput_directory: The patch to the directory to save in", "source": "codesearchnet"}
{"code": "def validate_restore_function(trackable, registered_name):\n    try:\n        _saver_registry.name_lookup(registered_name)\n    except LookupError:\n        raise ValueError(f\"Error when restoring object {trackable} from checkpoint. This object was saved using a registered saver named '{registered_name}', but this saver cannot be found in the current context.\")\n    if not _saver_registry.get_predicate(registered_name)(trackable):\n        raise ValueError(f\"Object {trackable} was saved with the registered saver named '{registered_name}'. However, this saver cannot be used to restore the object because the predicate does not pass.\")", "docstring": "Validates whether the trackable can be restored with the saver.\n\nWhen using a checkpoint saved with a registered saver, that same saver must\nalso be also registered when loading. The name of that saver is saved to the\ncheckpoint and set in the `registered_name` arg.\n\nArgs:\ntrackable: A `Trackable` object.\nregistered_name: String name of the expected registered saver. This argument\nshould be set using the name saved in a checkpoint.\n\nRaises:\nValueError if the saver could not be found, or if the predicate associated\nwith the saver does not pass.", "source": "github-repos"}
{"code": "def check_whitelist(host, whitelist):\n    if (':' not in host):\n        host = (host + ':80')\n    if (host in whitelist):\n        return True\n    return any((match_host(host, pattern) for pattern in whitelist))", "docstring": "Check a given request host against a whitelist.\n\nArgs:\nhost (str) :\nA host string to compare against a whitelist.\n\nIf the host does not specify a port, then ``\":80\"`` is implicitly\nassumed.\n\nwhitelist (seq[str]) :\nA list of host patterns to match against\n\nReturns:\n``True``, if ``host`` matches any pattern in ``whitelist``, otherwise\n``False``", "source": "codesearchnet"}
{"code": "def CompleteBreakpoint(self, breakpoint_id):\n    with self._lock:\n        self._completed.add(breakpoint_id)\n        if (breakpoint_id in self._active):\n            self._active.pop(breakpoint_id).Clear()", "docstring": "Marks the specified breaking as completed.\n\nAppends the ID to set of completed breakpoints and clears it.\n\nArgs:\nbreakpoint_id: breakpoint ID to complete.", "source": "codesearchnet"}
{"code": "def merge(self, ts):\n        \n        if ts.shape[1:] != self.shape[1:]:\n            raise ValueError('Timeseries to merge must have compatible shapes')\n        indices = np.vstack((self.tspan, ts.tspan)).argsort()\n        return np.vstack((self, ts))[indices]", "docstring": "Merge another timeseries with this one\nArguments:\nts (Timeseries): The two timeseries being merged must have the\nsame shape except for axis 0.\nReturns:\nResulting merged timeseries which can have duplicate time points.", "source": "juraj-google-style"}
{"code": "def reset_logformat_timestamped(logger: logging.Logger,\n                                extraname: str = \"\",\n                                level: int = logging.INFO) -> None:\n    \n    namebit = extraname + \":\" if extraname else \"\"\n    fmt = (\"%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s:\" + namebit +\n           \"%(message)s\")\n    \n    reset_logformat(logger, fmt=fmt)\n    \n    logger.setLevel(level)", "docstring": "Apply a simple time-stamped log format to an existing logger, and set\nits loglevel to either ``logging.DEBUG`` or ``logging.INFO``.\n\nArgs:\nlogger: logger to modify\nextraname: additional name to append to the logger's name\nlevel: log level to set", "source": "juraj-google-style"}
{"code": "def parse_meta(meta):\n    \n    resources = {}\n    for name in meta:\n        if name.startswith(\"$\"):\n            continue\n        resources[name] = resource = {}\n        for action in meta[name]:\n            if action.startswith(\"$\"):\n                continue\n            url, httpmethod = res_to_url(name, action)\n            resource[action] = {\n                \"url\": url,\n                \"method\": httpmethod\n            }\n    url_prefix = meta.get(\"$url_prefix\", \"\").rstrip(\"/\")\n    return url_prefix, meta[\"$auth\"][\"header\"].lower(), resources", "docstring": "Parse metadata of API\n\nArgs:\nmeta: metadata of API\nReturns:\ntuple(url_prefix, auth_header, resources)", "source": "juraj-google-style"}
{"code": "def _ConvertRowToUnicode(self, parser_mediator, row):\n    for (key, value) in iter(row.items()):\n        if isinstance(value, py2to3.UNICODE_TYPE):\n            continue\n        try:\n            row[key] = value.decode(self._encoding)\n        except UnicodeDecodeError:\n            replaced_value = value.decode(self._encoding, errors='replace')\n            parser_mediator.ProduceExtractionWarning('error decoding DSV value: {0:s} as {1:s}, characters have been replaced in {2:s}'.format(key, self._encoding, replaced_value))\n            row[key] = replaced_value\n    return row", "docstring": "Converts all strings in a DSV row dict to Unicode.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrow (dict[str, bytes]): a row from a DSV file, where the dictionary\nkey contains the column name and the value a binary string.\n\nReturns:\ndict[str, str]: a row from the DSV file, where the dictionary key\ncontains the column name and the value a Unicode string.", "source": "codesearchnet"}
{"code": "def from_surface(renderer, surface):\n        \n        texture = object.__new__(Texture)\n        texture._ptr = check_ptr_err(lib.SDL_CreateTextureFromSurface(renderer._ptr, surface._ptr))\n        return texture", "docstring": "Create a texture from an existing surface.\n\nArgs:\nsurface (Surface): The surface containing pixel data used to fill the texture.\n\nReturns:\nTexture: A texture containing the pixels from surface.\n\nRaises:\nSDLError: If an error is encountered.", "source": "juraj-google-style"}
{"code": "def parsemeta(metadataloc):\n    \n\n    \n    if os.path.isdir(metadataloc):\n        metalist = glob.glob(os.path.join(metadataloc, METAPATTERN))\n        if not metalist:\n            raise MTLParseError(\n                \"No files matching metadata file pattern in directory %s.\"\n                % metadataloc)\n        elif len(metalist) > 0:\n            metadatafn = metalist[0]\n            filehandle = open(metadatafn, 'r')\n            if len(metalist) > 1:\n                logging.warning(\n                    \"More than one file in directory match metadata \"\n                    + \"file pattern. Using %s.\" % metadatafn)\n    elif os.path.isfile(metadataloc):\n        metadatafn = metadataloc\n        filehandle = open(metadatafn, 'r')\n        logging.info(\"Using file %s.\" % metadatafn)\n    elif 'L1_METADATA_FILE' in metadataloc:\n        filehandle = StringIO(metadataloc)\n    else:\n        raise MTLParseError(\n            \"File location %s is unavailable \" % metadataloc\n            + \"or doesn't contain a suitable metadata file.\")\n\n    \n    status = 0\n    metadata = {}\n    grouppath = []\n    dictpath = [metadata]\n\n    for line in filehandle:\n        if status == 4:\n            \n            \n            logging.warning(\n                \"Metadata file %s appears to \" % metadatafn\n                + \"have extra lines after the end of the metadata. \"\n                + \"This is probably, but not necessarily, harmless.\")\n        status = _checkstatus(status, line)\n        grouppath, dictpath = _transstat(status, grouppath, dictpath, line)\n\n    return metadata", "docstring": "Parses the metadata from a Landsat image bundle.\n\nArguments:\nmetadataloc: a filename or a directory.\n\nReturns metadata dictionary", "source": "juraj-google-style"}
{"code": "def _all_reduce(self, reduce_op, value, replica_id, options):\n    raise NotImplementedError('_all_reduce must be implemented in descendants.')", "docstring": "All-reduce the `value` across all replicas so that all get the result.\n\n`value` can be a nested structure of tensors or `IndexedSlices`. The\nimplementation should generally batch the all-reduces when possible.\n`options` can be set to hint the batching behavior.\n\nThis API must be called in a replica context.\n\nArgs:\nreduce_op: A `tf.distribute.ReduceOp` value specifying how values should\nbe combined.\nvalue: Value to be reduced. A tensor or a nested structure of tensors or\n`IndexedSlices`.\nreplica_id: An integer indicating the id of the replica where this\nall_reduce is called under. This is the local replica id that ranges\nfrom 0 to len(local_devices) - 1.\noptions: A `tf.distribute.experimental.CommunicationOptions`.\n\nReturns:\nA tensor/IndexedSlices or a nested structure of tensors/IndexedSlices with\nthe reduced values. The structure is the same as `value`.", "source": "github-repos"}
{"code": "def get_container_setting(name, container, settings):\n    ret = dict()\n    ps_cmd = list()\n    ps_cmd_validate = list()\n    container_path = 'IIS:\\\\{0}\\\\{1}'.format(container, name)\n    if (not settings):\n        log.warning('No settings provided')\n        return ret\n    ps_cmd.append('$Settings = @{};')\n    for setting in settings:\n        ps_cmd_validate.extend(['Get-ItemProperty', '-Path', \"'{0}'\".format(container_path), '-Name', \"'{0}'\".format(setting), '-ErrorAction', 'Stop', '|', 'Out-Null;'])\n        ps_cmd.append(\"$Property = Get-ItemProperty -Path '{0}'\".format(container_path))\n        ps_cmd.append(\"-Name '{0}' -ErrorAction Stop;\".format(setting))\n        ps_cmd.append('if (([String]::IsNullOrEmpty($Property) -eq $False) -and')\n        ps_cmd.append(\"($Property.GetType()).Name -eq 'ConfigurationAttribute') {\")\n        ps_cmd.append('$Property = $Property | Select-Object')\n        ps_cmd.append('-ExpandProperty Value };')\n        ps_cmd.append(\"$Settings['{0}'] = [String] $Property;\".format(setting))\n        ps_cmd.append('$Property = $Null;')\n    cmd_ret = _srvmgr(cmd=ps_cmd_validate, return_json=True)\n    if (cmd_ret['retcode'] != 0):\n        message = 'One or more invalid property names were specified for the provided container.'\n        raise SaltInvocationError(message)\n    ps_cmd.append('$Settings')\n    cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)\n    try:\n        items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n        if isinstance(items, list):\n            ret.update(items[0])\n        else:\n            ret.update(items)\n    except ValueError:\n        raise CommandExecutionError('Unable to parse return data as Json.')\n    return ret", "docstring": "Get the value of the setting for the IIS container.\n\n.. versionadded:: 2016.11.0\n\nArgs:\nname (str): The name of the IIS container.\ncontainer (str): The type of IIS container. The container types are:\nAppPools, Sites, SslBindings\nsettings (dict): A dictionary of the setting names and their values.\n\nReturns:\ndict: A dictionary of the provided settings and their values.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.get_container_setting name='MyTestPool' container='AppPools'\nsettings=\"['processModel.identityType']\"", "source": "codesearchnet"}
{"code": "def __getattr__(self, name: str):\n    if name.startswith('__'):\n        raise AttributeError(name)\n    attr = getattr(self._builder, name)\n    if isinstance(attr, expressions.Builder) and self._sealed:\n        raise self._fhir_path_sealed_error(name)\n    return ColumnExpressionBuilder._wrap_any(self, attr)", "docstring": "Redirects to the expressions.Builder when the attribute is not here.\n\nNote that in Python, '__getattribute__' always gets called first (the\nhighest priority). Thus for attributes which has already been defined in\nthis class, they won't be redirected to the expressions.Builder.\n\nArgs:\nname: The attribute name as a string.\n\nReturns:\nThe attribute get from expressions.Builder wrapped with _wrap_any.\n\nRaises:\nAttributeError: if the FHIR path in this class is already sealed, or if\ngetting the attribute from self._builder fails.", "source": "github-repos"}
{"code": "def visit_statements(self, nodes):\n    \n    for node in nodes:\n      if isinstance(node, gast.AST):\n        self.to_prepend.append(deque())\n        self.to_append.append(deque())\n        node = self.visit(node)\n        self.visit_statements(self.to_prepend.pop())\n        if isinstance(node, gast.AST):\n          self.to_insert[-1].append(node)\n        elif node:\n          self.to_insert[-1].extend(node)\n        self.visit_statements(self.to_append.pop())\n      else:\n        self.to_insert[-1].append(node)\n    return self.to_insert[-1]", "docstring": "Visit a series of nodes in a node body.\n\nThis function is factored out so that it can be called recursively on\nstatements that are appended or prepended. This allows e.g. a nested\nexpression to prepend a statement, and that statement can prepend a\nstatement again, etc.\n\nArgs:\nnodes: A list of statements.\n\nReturns:\nA list of transformed statements.", "source": "juraj-google-style"}
{"code": "def wait_while_reachable(self, servers, timeout=60):\n        \n        t_start = time.time()\n        while True:\n            try:\n                for server in servers:\n                    \n                    server_info = self.connection(\n                        hostname=server, timeout=5).admin.command('ismaster')\n                    logger.debug(\"server_info: {server_info}\".format(server_info=server_info))\n                    if int(server_info['ok']) != 1:\n                        raise pymongo.errors.OperationFailure(\"{server} is not reachable\".format(**locals))\n                return True\n            except (KeyError, AttributeError, pymongo.errors.AutoReconnect, pymongo.errors.OperationFailure):\n                if time.time() - t_start > timeout:\n                    return False\n                time.sleep(0.1)", "docstring": "wait while all servers be reachable\nArgs:\nservers - list of servers", "source": "juraj-google-style"}
{"code": "def write_vasp_input(self, vasp_input_set=MPRelaxSet, output_dir=\".\",\n                         create_directory=True, **kwargs):\n        \n        vasp_input_set(self.final_structure, **kwargs).write_input(\n            output_dir, make_dir_if_not_present=create_directory)\n        with open(os.path.join(output_dir, \"transformations.json\"), \"w\") as fp:\n            json.dump(self.as_dict(), fp)", "docstring": "Writes VASP input to an output_dir.\n\nArgs:\nvasp_input_set:\npymatgen.io.vaspio_set.VaspInputSet like object that creates\nvasp input files from structures\noutput_dir: Directory to output files\ncreate_directory: Create the directory if not present. Defaults to\nTrue.\n\\\\*\\\\*kwargs: All keyword args supported by the VASP input set.", "source": "juraj-google-style"}
{"code": "def shape(input, name=None, out_type=None):\n    if out_type is None:\n        if flags.config().tf_shape_default_int64.value():\n            out_type = dtypes.int64\n        else:\n            out_type = dtypes.int32\n    return shape_internal(input, name, optimize=True, out_type=out_type)", "docstring": "Returns the shape of a tensor.\n\nThis operation returns a 1-D integer tensor representing the shape of `input`.\n\nFor example:\n\n```python\nt = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\ntf.shape(t)  # [2, 2, 3]\n```\n\nArgs:\ninput: A `Tensor` or `SparseTensor`.\nname: A name for the operation (optional).\nout_type: (Optional) The specified output type of the operation (`int32`\nor `int64`). Defaults to `tf.int32`.\n\nReturns:\nA `Tensor` of type `out_type`.", "source": "github-repos"}
{"code": "def covariance_to_correlations(covariance):\n    diagonal_ind = np.arange(covariance.shape[1])\n    diagonal_els = covariance[(:, diagonal_ind, diagonal_ind)]\n    result = (covariance / np.sqrt((diagonal_els[(:, :, None)] * diagonal_els[(:, None, :)])))\n    result[np.isinf(result)] = 0\n    return np.clip(np.nan_to_num(result), (- 1), 1)", "docstring": "Transform a covariance matrix into a correlations matrix.\n\nThis can be seen as dividing a covariance matrix by the outer product of the diagonal.\n\nAs post processing we replace the infinities and the NaNs with zeros and clip the result to [-1, 1].\n\nArgs:\ncovariance (ndarray): a matrix of shape (n, p, p) with for n problems the covariance matrix of shape (p, p).\n\nReturns:\nndarray: the correlations matrix", "source": "codesearchnet"}
{"code": "def delete_unspent_outputs(self, *unspent_outputs):\n        \n        if unspent_outputs:\n            return backend.query.delete_unspent_outputs(\n                                        self.connection, *unspent_outputs)", "docstring": "Deletes the given ``unspent_outputs`` (utxos).\n\nArgs:\n*unspent_outputs (:obj:`tuple` of :obj:`dict`): Variable\nlength tuple or list of unspent outputs.", "source": "juraj-google-style"}
{"code": "def init_app(self, app):\n        \n        app.url_rule_class = partial(NavigationRule, copilot=self)\n        app.context_processor(self.inject_context)", "docstring": "Register the extension with the application.\n\nArgs:\napp (flask.Flask): The application to register with.", "source": "juraj-google-style"}
{"code": "def _get_ssm_parameter(self, p):\n        \n        try:\n            response = self._ssm.get_parameter(Name=p, WithDecryption=True)\n            return response.get('Parameter', {}).get('Value', None)\n        except Exception as ruh_roh:\n            logging.error(ruh_roh, exc_info=False)\n\n        return None", "docstring": "Get parameters from Simple Systems Manager\n\nArgs:\np - a parameter name\n\nReturns:\na value, decrypted if needed, if successful or None if things go\nsideways.", "source": "juraj-google-style"}
{"code": "def GrabObject(self, identifier):\n    if (identifier not in self._values):\n        raise KeyError('Missing cached object for identifier: {0:s}'.format(identifier))\n    cache_value = self._values[identifier]\n    if (not cache_value):\n        raise RuntimeError('Missing cache value for identifier: {0:s}'.format(identifier))\n    cache_value.IncrementReferenceCount()", "docstring": "Grabs a cached object based on the identifier.\n\nThis method increments the cache value reference count.\n\nArgs:\nidentifier (str): VFS object identifier.\n\nRaises:\nKeyError: if the VFS object is not found in the cache.\nRuntimeError: if the cache value is missing.", "source": "codesearchnet"}
{"code": "def extract_paths_dead(self, paths, ignore_nopath):\n    if (not self._has_guestfs):\n        raise LagoException('guestfs module not available, cannot '('extract files with libguestfs'))\n    LOGGER.debug('%s: attempting to extract files with libguestfs', self.vm.name())\n    guestfs_tools.extract_paths(disk_path=self.vm.spec['disks'][0]['path'], disk_root=self.vm.spec['disks'][0]['metadata'].get('root-partition', 'root'), paths=paths, ignore_nopath=ignore_nopath)", "docstring": "Extract the given paths from the domain using guestfs.\nUsing guestfs can have side-effects and should be used as a second\noption, mainly when SSH is not available.\n\nArgs:\npaths(list of str): paths to extract\nignore_nopath(boolean): if True will ignore none existing paths.\n\nReturns:\nNone\n\nRaises:\n:exc:`~lago.utils.LagoException`: if :mod:`guestfs` is not\nimportable.\n:exc:`~lago.plugins.vm.ExtractPathNoPathError`: if a none existing\npath was found on the VM, and `ignore_nopath` is True.\n:exc:`~lago.plugins.vm.ExtractPathError`: on failure extracting\nthe files.", "source": "codesearchnet"}
{"code": "def _get_filters(nodes, context):\n    filters = []\n    for node in nodes:\n        for filter_block in sql_context_helpers.get_filters(node, context):\n            filter_sql_expression = _transform_filter_to_sql(filter_block, node, context)\n            filters.append(filter_sql_expression)\n    return filters", "docstring": "Get filters to apply to a list of SqlNodes.\n\nArgs:\nnodes: List[SqlNode], the SqlNodes to get filters for.\ncontext: CompilationContext, global compilation state and metadata.\n\nReturns:\nList[Expression], list of SQLAlchemy expressions.", "source": "codesearchnet"}
{"code": "def generate_func_call(name, args=None, kwargs=None):\n    all_args = []\n    if args:\n        all_args.extend(args)\n    if kwargs:\n        all_args.extend(('{}={}'.format(k, v) for (k, v) in kwargs if (v is not None)))\n    return '{}({})'.format(name, ', '.join(all_args))", "docstring": "Generates code to call a function.\n\nArgs:\nname (str): The function name.\nargs (list[str]): Each positional argument.\nkwargs (list[tuple]): Each tuple is (arg: str, value: str). If\nvalue is None, then the keyword argument is omitted. Otherwise,\nif the value is not a string, then str() is called on it.\n\nReturns:\nstr: Code to call a function.", "source": "codesearchnet"}
{"code": "def call(self, y_true, y_pred):\n    raise NotImplementedError('Must be implemented in subclasses.')", "docstring": "Invokes the `Loss` instance.\n\nArgs:\ny_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except\nsparse loss functions such as sparse categorical crossentropy where\nshape = `[batch_size, d0, .. dN-1]`\ny_pred: The predicted values. shape = `[batch_size, d0, .. dN]`\n\nReturns:\nLoss values with the shape `[batch_size, d0, .. dN-1]`.", "source": "github-repos"}
{"code": "def bbox2distance(points, bbox, max_num_bins, reg_scale, up, eps=0.1):\n    reg_scale = abs(reg_scale)\n    left = (points[:, 0] - bbox[:, 0]) / (points[..., 2] / reg_scale + 1e-16) - 0.5 * reg_scale\n    top = (points[:, 1] - bbox[:, 1]) / (points[..., 3] / reg_scale + 1e-16) - 0.5 * reg_scale\n    right = (bbox[:, 2] - points[:, 0]) / (points[..., 2] / reg_scale + 1e-16) - 0.5 * reg_scale\n    bottom = (bbox[:, 3] - points[:, 1]) / (points[..., 3] / reg_scale + 1e-16) - 0.5 * reg_scale\n    four_lens = torch.stack([left, top, right, bottom], -1)\n    four_lens, weight_right, weight_left = translate_gt(four_lens, max_num_bins, reg_scale, up)\n    if max_num_bins is not None:\n        four_lens = four_lens.clamp(min=0, max=max_num_bins - eps)\n    return (four_lens.reshape(-1).detach(), weight_right.detach(), weight_left.detach())", "docstring": "Converts bounding box coordinates to distances from a reference point.\n\nArgs:\npoints (Tensor): (n, 4) [x, y, w, h], where (x, y) is the center.\nbbox (Tensor): (n, 4) bounding boxes in \"xyxy\" format.\nmax_num_bins (float): Maximum bin value.\nreg_scale (float): Controlling curvarture of W(n).\nup (Tensor): Controlling upper bounds of W(n).\neps (float): Small value to ensure target < max_num_bins.\n\nReturns:\nTensor: Decoded distances.", "source": "github-repos"}
{"code": "def set_help_intro(self, help_intro):\n    self._help_intro = help_intro", "docstring": "Set an introductory message to help output.\n\nArgs:\nhelp_intro: (RichTextLines) Rich text lines appended to the\nbeginning of the output of the command \"help\", as introductory\ninformation.", "source": "github-repos"}
{"code": "def register(self, cmd: Type[Command]) -> None:\n        \n        self.commands[cmd.command] = cmd", "docstring": "Register a new IMAP command.\n\nArgs:\ncmd: The new command type.", "source": "juraj-google-style"}
{"code": "def qry_create(options):\n    \n    qry_string = filt_end = param_str = \"\"\n    filt_st = \"Filters=[\"\n    param_str_default = \"All\"\n\n    if options.id:\n        qry_string += \"InstanceIds=['%s']\" % (options.id)\n        param_str += \"id: '%s'\" % (options.id)\n        param_str_default = \"\"\n\n    if options.instname:\n        (qry_string, param_str) = qry_helper(bool(options.id),\n                                             qry_string, param_str)\n        filt_end = \"]\"\n        param_str_default = \"\"\n        qry_string += filt_st + (\"{'Name': 'tag:Name', 'Values': ['%s']}\"\n                                 % (options.instname))\n        param_str += \"name: '%s'\" % (options.instname)\n\n    if options.inst_state:\n        (qry_string, param_str) = qry_helper(bool(options.id),\n                                             qry_string, param_str,\n                                             bool(options.instname), filt_st)\n        qry_string += (\"{'Name': 'instance-state-name',\"\n                       \"'Values': ['%s']}\" % (options.inst_state))\n        param_str += \"state: '%s'\" % (options.inst_state)\n        filt_end = \"]\"\n        param_str_default = \"\"\n\n    qry_string += filt_end\n    param_str += param_str_default\n    debg.dprintx(\"\\nQuery String\")\n    debg.dprintx(qry_string, True)\n    debg.dprint(\"param_str: \", param_str)\n    return(qry_string, param_str)", "docstring": "Create query from the args specified and command chosen.\n\nCreates a query string that incorporates the args in the options\nobject, and creates the title for the 'list' function.\n\nArgs:\noptions (object): contains args and data from parser\nReturns:\nqry_string (str): the query to be used against the aws ec2 client.\nparam_str (str): the title to display before the list.", "source": "juraj-google-style"}
{"code": "def realtime(widget, url_name=None, url_regex=None, time_interval=None):\n    if (not hasattr(widget, 'get_updated_content')):\n        raise AttributeError(('Widget %s must implement get_updated_content method.' % widget))\n    elif (not callable(widget.get_updated_content)):\n        raise ValueError(('get_updated_content in widget %s is not callable' % widget))\n    if (url_name is None):\n        if (getattr(widget, 'url_name', None) is not None):\n            url_name = widget.url_name\n        else:\n            url_name = widget.__class__.__name__\n    if (url_name in [w.url_name for w in REALTIME_WIDGETS]):\n        raise ValueError(('URL name %s is already used by another real time widget.' % url_name))\n    if (url_regex is None):\n        if (getattr(widget, 'url_regex', None) is not None):\n            url_regex = widget.url_regex\n        else:\n            url_regex = sha256(url_name.encode('utf-8'))\n            url_regex = url_regex.hexdigest()[:32]\n            url_regex = ('realtime/' + url_regex)\n    if (url_regex in [w.url_regex for w in REALTIME_WIDGETS]):\n        raise ValueError(('URL regex %s is already used by another real time widget.' % url_regex))\n    if (time_interval is None):\n        if (getattr(widget, 'time_interval', None) is not None):\n            time_interval = widget.time_interval\n        else:\n            time_interval = app_settings.default_time_interval\n    from django.views.generic import View\n    from braces.views import AjaxResponseMixin, JSONResponseMixin\n\n    class PartialResponse(JSONResponseMixin, AjaxResponseMixin, View):\n\n        def get_data(self):\n            return widget.get_updated_content()\n\n        def get(self, request, *args, **kwargs):\n            return self.get_ajax(request, *args, **kwargs)\n\n        def get_ajax(self, request, *args, **kwargs):\n            return self.render_json_response(self.get_data())\n    PartialResponse.url_name = url_name\n    PartialResponse.url_regex = url_regex\n    PartialResponse.time_interval = time_interval\n    REALTIME_WIDGETS.append(PartialResponse)\n    if (not hasattr(widget, 'url_name')):\n        widget.url_name = url_name\n    if (not hasattr(widget, 'url_regex')):\n        widget.url_regex = url_regex\n    if (not hasattr(widget, 'time_interval')):\n        widget.time_interval = time_interval\n    return widget", "docstring": "Return a widget as real-time.\n\nArgs:\nwidget (Widget): the widget to register and return as real-time.\nurl_name (str): the URL name to call to get updated content.\nurl_regex (regex): the URL regex to be matched.\ntime_interval (int): the interval of refreshment in milliseconds.\n\nReturns:\nWidget: the \"real-timed\" widget.", "source": "codesearchnet"}
{"code": "def remove(self, force=False):\n        \n        return self.client.api.remove_node(self.id, force=force)", "docstring": "Remove this node from the swarm.\n\nArgs:\nforce (bool): Force remove an active node. Default: `False`\n\nReturns:\n`True` if the request was successful.\n\nRaises:\n:py:class:`docker.errors.NotFound`\nIf the node doesn't exist in the swarm.\n\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def extend_validators(raw_validators, override_validators):\n    if (not raw_validators):\n        return override_validators\n    elif (not override_validators):\n        return raw_validators\n    else:\n        def_validators_mapping = _convert_validators_to_mapping(raw_validators)\n        ref_validators_mapping = _convert_validators_to_mapping(override_validators)\n        def_validators_mapping.update(ref_validators_mapping)\n        return list(def_validators_mapping.values())", "docstring": "extend raw_validators with override_validators.\noverride_validators will merge and override raw_validators.\n\nArgs:\nraw_validators (dict):\noverride_validators (dict):\n\nReturns:\nlist: extended validators\n\nExamples:\n>>> raw_validators = [{'eq': ['v1', 200]}, {\"check\": \"s2\", \"expect\": 16, \"comparator\": \"len_eq\"}]\n>>> override_validators = [{\"check\": \"v1\", \"expect\": 201}, {'len_eq': ['s3', 12]}]\n>>> extend_validators(raw_validators, override_validators)\n[\n{\"check\": \"v1\", \"expect\": 201, \"comparator\": \"eq\"},\n{\"check\": \"s2\", \"expect\": 16, \"comparator\": \"len_eq\"},\n{\"check\": \"s3\", \"expect\": 12, \"comparator\": \"len_eq\"}\n]", "source": "codesearchnet"}
{"code": "def create_issue(self, data, params=None):\n    return self._post((self.API_URL + 'issue'), data=data, params=params)", "docstring": "Creates an issue or a sub-task from a JSON representation.\n\nYou can provide two parameters in request's body: update or fields. The fields, that can be set on an issue\ncreate operation, can be determined using the /rest/api/2/issue/createmeta resource. If a particular field is\nnot configured to appear on the issue's Create screen, then it will not be returned in the createmeta response.\nA field validation error will occur if such field is submitted in request.\n\nCreating a sub-task is similar to creating an issue with the following differences:\nissueType field must be set to a sub-task issue type (use /issue/createmeta to find sub-task issue types), and\n\nYou must provide a parent field with the ID or key of the parent issue.\n\nArgs:\ndata:\nparams:\n\nReturns:", "source": "codesearchnet"}
{"code": "def fragmentate(self, give_only_index=False, use_lookup=None):\n    if (use_lookup is None):\n        use_lookup = settings['defaults']['use_lookup']\n    fragments = []\n    pending = set(self.index)\n    self.get_bonds(use_lookup=use_lookup)\n    while pending:\n        index = self.get_coordination_sphere(pending.pop(), use_lookup=True, n_sphere=float('inf'), only_surface=False, give_only_index=True)\n        pending = (pending - index)\n        if give_only_index:\n            fragments.append(index)\n        else:\n            fragment = self.loc[index]\n            fragment._metadata['bond_dict'] = fragment.restrict_bond_dict(self._metadata['bond_dict'])\n            try:\n                fragment._metadata['val_bond_dict'] = fragment.restrict_bond_dict(self._metadata['val_bond_dict'])\n            except KeyError:\n                pass\n            fragments.append(fragment)\n    return fragments", "docstring": "Get the indices of non bonded parts in the molecule.\n\nArgs:\ngive_only_index (bool): If ``True`` a set of indices is returned.\nOtherwise a new Cartesian instance.\nuse_lookup (bool): Use a lookup variable for\n:meth:`~chemcoord.Cartesian.get_bonds`.\nuse_lookup (bool): Use a lookup variable for\n:meth:`~chemcoord.Cartesian.get_bonds`. The default is\nspecified in ``settings['defaults']['use_lookup']``\n\nReturns:\nlist: A list of sets of indices or new Cartesian instances.", "source": "codesearchnet"}
{"code": "def list_tasks(target=None):\n    from os import getcwd, chdir\n    from glob import glob\n    original = getcwd()\n    if (target is None):\n        target = _dbdir()\n    chdir(target)\n    result = {}\n    for filename in glob('*.*.json'):\n        (project, task) = filename.split('.')[0:2]\n        if (project not in result):\n            result[project] = []\n        result[project].append(task)\n    chdir(original)\n    return result", "docstring": "Returns a list of all the projects and tasks available in the `acorn`\ndatabase directory.\n\nArgs:\ntarget (str): directory to list the projects for. Defaults to the configured\ndatabase directory.\n\nReturns:\ndict: keys are project names; values are lists of tasks associated with the\nproject.", "source": "codesearchnet"}
{"code": "def deep_update(d, u):\n    for (k, v) in u.items():\n        if isinstance(v, Mapping):\n            d[k] = deep_update(d.get(k, {}), v)\n        elif isinstance(v, list):\n            existing_elements = d.get(k, [])\n            d[k] = (existing_elements + [ele for ele in v if (ele not in existing_elements)])\n        else:\n            d[k] = v\n    return d", "docstring": "Deeply updates a dictionary. List values are concatenated.\n\nArgs:\nd (dict): First dictionary which will be updated\nu (dict): Second dictionary use to extend the first one\n\nReturns:\ndict: The merge dictionary", "source": "codesearchnet"}
{"code": "def map(self, map_fn, desc=None):\n        \n        if desc is None:\n            desc = getattr(map_fn, '__name__', '')\n        desc = u'map({})'.format(desc)\n\n        return self.transform(lambda xs: (map_fn(x) for x in xs), desc=desc)", "docstring": "Return a copy of this query, with the values mapped through `map_fn`.\n\nArgs:\nmap_fn (callable): A callable that takes a single argument and returns a new value.\n\nKeyword Args:\ndesc (str): A description of the mapping transform, for use in log message.\nDefaults to the name of the map function.\n\nReturns:\nQuery", "source": "juraj-google-style"}
{"code": "def is50(msg):\n    \n\n    if allzeros(msg):\n        return False\n\n    d = hex2bin(data(msg))\n\n    \n\n    if wrongstatus(d, 1, 3, 11):\n        return False\n\n    if wrongstatus(d, 12, 13, 23):\n        return False\n\n    if wrongstatus(d, 24, 25, 34):\n        return False\n\n    if wrongstatus(d, 35, 36, 45):\n        return False\n\n    if wrongstatus(d, 46, 47, 56):\n        return False\n\n    roll = roll50(msg)\n    if (roll is not None) and abs(roll) > 60:\n        return False\n\n    gs = gs50(msg)\n    if gs is not None and gs > 600:\n        return False\n\n    tas = tas50(msg)\n    if tas is not None and tas > 500:\n        return False\n\n    if (gs is not None) and (tas is not None) and (abs(tas - gs) > 200):\n        return False\n\n    return True", "docstring": "Check if a message is likely to be BDS code 5,0\n(Track and turn report)\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nbool: True or False", "source": "juraj-google-style"}
{"code": "def _ProcessFileEntryDataStream(self, mediator, file_entry, data_stream):\n    display_name = mediator.GetDisplayName()\n    data_stream_name = (getattr(data_stream, 'name', '') or '')\n    logger.debug('[ProcessFileEntryDataStream] processing data stream: \"{0:s}\" of file entry: {1:s}'.format(data_stream_name, display_name))\n    mediator.ClearEventAttributes()\n    if (data_stream and self._analyzers):\n        self._AnalyzeDataStream(mediator, file_entry, data_stream.name)\n    self._ExtractMetadataFromFileEntry(mediator, file_entry, data_stream)\n    if (not data_stream):\n        return\n    skip_content_extraction = self._CanSkipContentExtraction(file_entry)\n    if skip_content_extraction:\n        display_name = mediator.GetDisplayName()\n        logger.debug('Skipping content extraction of: {0:s}'.format(display_name))\n        self.processing_status = definitions.STATUS_INDICATOR_IDLE\n        return\n    path_spec = copy.deepcopy(file_entry.path_spec)\n    if (data_stream and (not data_stream.IsDefault())):\n        path_spec.data_stream = data_stream.name\n    archive_types = []\n    compressed_stream_types = []\n    if self._process_compressed_streams:\n        compressed_stream_types = self._GetCompressedStreamTypes(mediator, path_spec)\n    if (not compressed_stream_types):\n        archive_types = self._GetArchiveTypes(mediator, path_spec)\n    if archive_types:\n        if self._process_archives:\n            self._ProcessArchiveTypes(mediator, path_spec, archive_types)\n        if (dfvfs_definitions.TYPE_INDICATOR_ZIP in archive_types):\n            self._ExtractContentFromDataStream(mediator, file_entry, data_stream.name)\n    elif compressed_stream_types:\n        self._ProcessCompressedStreamTypes(mediator, path_spec, compressed_stream_types)\n    else:\n        self._ExtractContentFromDataStream(mediator, file_entry, data_stream.name)", "docstring": "Processes a specific data stream of a file entry.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\nfile_entry (dfvfs.FileEntry): file entry containing the data stream.\ndata_stream (dfvfs.DataStream): data stream or None if the file entry\nhas no data stream.", "source": "codesearchnet"}
{"code": "def run_suite_class(argv=None):\n    cli_args = _parse_cli_args(argv)\n    suite_class = _find_suite_class()\n    if cli_args.list_tests:\n        _print_test_names_for_suite(suite_class)\n        sys.exit(0)\n    test_configs = config_parser.load_test_config_file(cli_args.config, cli_args.test_bed)\n    config_count = len(test_configs)\n    if config_count != 1:\n        logging.error('Expect exactly one test config, found %d', config_count)\n    config = test_configs[0]\n    runner = test_runner.TestRunner(log_dir=config.log_path, testbed_name=config.testbed_name)\n    suite = suite_class(runner, config)\n    test_selector = _parse_raw_test_selector(cli_args.tests)\n    suite.set_test_selector(test_selector)\n    suite_record = SuiteInfoRecord(suite_class_name=suite_class.__name__)\n    console_level = logging.DEBUG if cli_args.verbose else logging.INFO\n    ok = False\n    with runner.mobly_logger(console_level=console_level) as log_path:\n        try:\n            suite.setup_suite(config.copy())\n            try:\n                suite_record.suite_begin()\n                runner.run()\n                ok = runner.results.is_all_pass\n                print(ok)\n            except signals.TestAbortAll:\n                pass\n        finally:\n            suite.teardown_suite()\n            suite_record.suite_end()\n            suite_record.suite_run_display_name = suite.get_suite_run_display_name()\n            suite_record.extras = suite.get_suite_info().copy()\n            _dump_suite_info(suite_record, log_path)\n    if not ok:\n        sys.exit(1)", "docstring": "Executes tests in the test suite.\n\nArgs:\nargv: A list that is then parsed as CLI args. If None, defaults to sys.argv.", "source": "github-repos"}
{"code": "def trainable_variables(self):\n    return tuple((v for v in self.variables if v.trainable))", "docstring": "A sequence of trainable variables accessed by this FuncGraph.\n\nNote that functions keep only weak references to variables. Calling the\nfunction after a variable it accesses has been deleted is an error.\n\nReturns:\nSequence of trainable variables for this func graph.", "source": "github-repos"}
{"code": "def hwvtep_add_loopback_interface(self, **kwargs):\n        \n        name = kwargs.pop('name')\n        id = kwargs.pop('int_id')\n        ip_args = dict(name=name, loopback_id=id)\n        method_name = 'overlay_gateway_ip_interface_loopback_loopback_id'\n        method_class = self._brocade_tunnels\n        gw_attr = getattr(method_class, method_name)\n        config = gw_attr(**ip_args)\n        output = self._callback(config)\n        return output", "docstring": "Add loopback interface to the overlay-gateway\n\nArgs:\nname  (str): gateway-name\nint_id (int): loopback inteface id\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def build_losses(self, logits_real, logits_fake):\n        \n        with tf.name_scope(\"GAN_loss\"):\n            score_real = tf.sigmoid(logits_real)\n            score_fake = tf.sigmoid(logits_fake)\n            tf.summary.histogram('score-real', score_real)\n            tf.summary.histogram('score-fake', score_fake)\n\n            with tf.name_scope(\"discrim\"):\n                d_loss_pos = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n                    logits=logits_real, labels=tf.ones_like(logits_real)), name='loss_real')\n                d_loss_neg = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n                    logits=logits_fake, labels=tf.zeros_like(logits_fake)), name='loss_fake')\n\n                d_pos_acc = tf.reduce_mean(tf.cast(score_real > 0.5, tf.float32), name='accuracy_real')\n                d_neg_acc = tf.reduce_mean(tf.cast(score_fake < 0.5, tf.float32), name='accuracy_fake')\n\n                d_accuracy = tf.add(.5 * d_pos_acc, .5 * d_neg_acc, name='accuracy')\n                self.d_loss = tf.add(.5 * d_loss_pos, .5 * d_loss_neg, name='loss')\n\n            with tf.name_scope(\"gen\"):\n                self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n                    logits=logits_fake, labels=tf.ones_like(logits_fake)), name='loss')\n                g_accuracy = tf.reduce_mean(tf.cast(score_fake > 0.5, tf.float32), name='accuracy')\n\n            add_moving_summary(self.g_loss, self.d_loss, d_accuracy, g_accuracy)", "docstring": "Build standard GAN loss and set `self.g_loss` and `self.d_loss`.\n\nD and G play two-player minimax game with value function V(G,D)\n\nmin_G max _D V(D, G) = IE_{x ~ p_data} [log D(x)] + IE_{z ~ p_fake} [log (1 - D(G(z)))]\n\nArgs:\nlogits_real (tf.Tensor): discrim logits from real samples\nlogits_fake (tf.Tensor): discrim logits from fake samples produced by generator", "source": "juraj-google-style"}
{"code": "def ScanForStorageMediaImage(self, source_path_spec):\n    try:\n        type_indicators = analyzer.Analyzer.GetStorageMediaImageTypeIndicators(source_path_spec, resolver_context=self._resolver_context)\n    except RuntimeError as exception:\n        raise errors.BackEndError('Unable to process source path specification with error: {0!s}'.format(exception))\n    if (not type_indicators):\n        file_system = resolver.Resolver.OpenFileSystem(source_path_spec, resolver_context=self._resolver_context)\n        raw_path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_RAW, parent=source_path_spec)\n        try:\n            glob_results = raw.RawGlobPathSpec(file_system, raw_path_spec)\n        except errors.PathSpecError:\n            glob_results = None\n        file_system.Close()\n        if (not glob_results):\n            return None\n        return raw_path_spec\n    if (len(type_indicators) > 1):\n        raise errors.BackEndError('Unsupported source found more than one storage media image types.')\n    return path_spec_factory.Factory.NewPathSpec(type_indicators[0], parent=source_path_spec)", "docstring": "Scans the path specification for a supported storage media image format.\n\nArgs:\nsource_path_spec (PathSpec): source path specification.\n\nReturns:\nPathSpec: storage media image path specification or None if no supported\nstorage media image type was found.\n\nRaises:\nBackEndError: if the source cannot be scanned or more than one storage\nmedia image type is found.", "source": "codesearchnet"}
{"code": "def _GetStringValue(self, data_dict, name, default_value=None):\n    \n    values = data_dict.get(name, None)\n    if not values:\n      return default_value\n\n    for index, value in enumerate(values):\n      if ',' in value:\n        values[index] = '\"{0:s}\"'.format(value)\n\n    return ', '.join(values)", "docstring": "Retrieves a specific string value from the data dict.\n\nArgs:\ndata_dict (dict[str, list[str]): values per name.\nname (str): name of the value to retrieve.\ndefault_value (Optional[object]): value to return if the name has no value\nset in data_dict.\n\nReturns:\nstr: value represented as a string.", "source": "juraj-google-style"}
{"code": "def _CallMethod(self, srvc, method_descriptor,\n                  rpc_controller, request, callback):\n    \n    if method_descriptor.containing_service != self.descriptor:\n      raise RuntimeError(\n          'CallMethod() given method descriptor for wrong service type.')\n    method = getattr(srvc, method_descriptor.name)\n    return method(rpc_controller, request, callback)", "docstring": "Calls the method described by a given method descriptor.\n\nArgs:\nsrvc: Instance of the service for which this method is called.\nmethod_descriptor: Descriptor that represent the method to call.\nrpc_controller: RPC controller to use for this method's execution.\nrequest: Request protocol message.\ncallback: A callback to invoke after the method has completed.", "source": "juraj-google-style"}
{"code": "def __init__(self, domain_postfix='_domain'):\n        \n        \n        super(ReverseDNS, self).__init__()\n\n        self.domain_postfix = domain_postfix\n        self.ip_lookup_cache = cache.Cache(timeout=600)\n\n        \n        self.output_stream = self.process_for_rdns()", "docstring": "Initialize ReverseDNS Class\n\nArgs:\ndomain_postfix: the string to be appended to the ip fields (e.g. IP.src -> IP.src_domain)", "source": "juraj-google-style"}
{"code": "def add(self, distinguished_name, object_class, attributes):\n        \n        self.conn.add(distinguished_name, object_class, attributes)", "docstring": "Add object to LDAP.\n\nArgs:\ndistinguished_name: the DN of the LDAP record to be added\nobject_class: The objectClass of the record to be added.\nThis is a list of length >= 1.\nattributes: a dictionary of LDAP attributes to add\nSee ldap_tools.api.group.API#__ldap_attr", "source": "juraj-google-style"}
{"code": "def set_reprompt_text(self, text):\n        \n        self.response.reprompt.outputSpeech.type = 'PlainText'\n        self.response.reprompt.outputSpeech.text = text", "docstring": "Set response reprompt output speech as plain text type.\n\nArgs:\ntext: str. Response speech used when type is 'PlainText'. Cannot\nexceed 8,000 characters.", "source": "juraj-google-style"}
{"code": "def __register_methods(self, parsed_config):\n    methods = parsed_config.get('methods')\n    if (not methods):\n        return\n    for (method_name, method) in methods.iteritems():\n        self.__api_methods[method_name] = method.get('rosyMethod')", "docstring": "Register all methods from the given api config file.\n\nMethods are stored in a map from method_name to rosyMethod,\nthe name of the ProtoRPC method to be called on the backend.\nIf no rosyMethod was specified the value will be None.\n\nArgs:\nparsed_config: The JSON object with the API configuration being added.", "source": "codesearchnet"}
{"code": "def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):\n    line = clean_lines.elided[linenum]\n    match = Match('^(.*\\\\S)&&', line)\n    if (not match):\n        match = Match('(.*)&&\\\\S', line)\n    if ((not match) or ('(&&)' in line) or Search('\\\\boperator\\\\s*$', match.group(1))):\n        return\n    typenames = GetTemplateArgs(clean_lines, linenum)\n    and_pos = len(match.group(1))\n    if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos):\n        if (not IsRValueAllowed(clean_lines, linenum, typenames)):\n            error(filename, linenum, 'build/c++11', 3, 'RValue references are an unapproved C++ feature.')\n    else:\n        error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around &&')", "docstring": "Check for rvalue references.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nnesting_state: A NestingState instance which maintains information about\nthe current stack of nested blocks being parsed.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def save_lines(lines, filename):\n    \n\n    with open(filename, 'w', encoding='utf-8') as f:\n        f.write('\\n'.join(lines))", "docstring": "Save an array of lines to a file.\n\nArgs:\nlines: An array of strings that will be saved as individual lines.\nfilename: Path to the output file.", "source": "juraj-google-style"}
{"code": "def _get_memory_contents(self):\n    if (self._memory_contents is not None):\n        return self._memory_contents\n    schedule = scheduler.minimize_peak_memory(self._graph, self._scheduler_alg)\n    self._memory_contents = self._graph.compute_memory_contents_under_schedule(schedule)\n    return self._memory_contents", "docstring": "Runs the scheduler to determine memory contents at every point in time.\n\nReturns:\na list of frozenset of strings, where the ith entry describes the tensors\nin memory when executing operation i (where schedule[i] is an index into\nGetAllOperationNames()).", "source": "codesearchnet"}
{"code": "def issuperset(self, other):\n        \n        other = self._cast_to_frameset(other)\n        if other is NotImplemented:\n            return NotImplemented\n        return self.items >= other.items", "docstring": "Check if the contents of `self` is a superset of the contents of\n`other.`\n\nArgs:\nother (:class:`FrameSet`):\n\nReturns:\nbool:\n:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`", "source": "juraj-google-style"}
{"code": "def insert(self, lines=None):\n        \n        for i, (key, line) in enumerate(lines.items()):\n            n = key + i\n            first_half = self._lines[:n]\n            last_half = self._lines[n:]\n            self._lines = first_half + [line] + last_half", "docstring": "Insert lines into the editor.\n\nNote:\nTo insert before the first line, use :func:`~exa.core.editor.Editor.preappend`\n(or key 0); to insert after the last line use :func:`~exa.core.editor.Editor.append`.\n\nArgs:\nlines (dict): Dictionary of lines of form (lineno, string) pairs", "source": "juraj-google-style"}
{"code": "def compute_qkv(query_antecedent, memory_antecedent, total_key_depth, total_value_depth, q_filter_width=1, kv_filter_width=1, q_padding='VALID', kv_padding='VALID', vars_3d_num_heads=0, layer_collection=None):\n    if (memory_antecedent is None):\n        memory_antecedent = query_antecedent\n    q = compute_attention_component(query_antecedent, total_key_depth, q_filter_width, q_padding, 'q', vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection)\n    k = compute_attention_component(memory_antecedent, total_key_depth, kv_filter_width, kv_padding, 'k', vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection)\n    v = compute_attention_component(memory_antecedent, total_value_depth, kv_filter_width, kv_padding, 'v', vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection)\n    return (q, k, v)", "docstring": "Computes query, key and value.\n\nArgs:\nquery_antecedent: a Tensor with shape [batch, length_q, channels]\nmemory_antecedent: a Tensor with shape [batch, length_m, channels]\ntotal_key_depth: an integer\ntotal_value_depth: an integer\nq_filter_width: An integer specifying how wide you want the query to be.\nkv_filter_width: An integer specifying how wide you want the keys and values\nto be.\nq_padding: One of \"VALID\", \"SAME\" or \"LEFT\". Default is VALID: No padding.\nkv_padding: One of \"VALID\", \"SAME\" or \"LEFT\". Default is VALID: No padding.\nvars_3d_num_heads: an optional (if we want to use 3d variables)\nlayer_collection: A tensorflow_kfac.LayerCollection. Only used by the\nKFAC optimizer. Default is None.\n\nReturns:\nq, k, v : [batch, length, depth] tensors", "source": "codesearchnet"}
{"code": "def get_all_function_definitions(base_most_function):\n    return ([base_most_function] + [function for derived_contract in base_most_function.contract.derived_contracts for function in derived_contract.functions if (function.full_name == base_most_function.full_name)])", "docstring": "Obtains all function definitions given a base-most function. This includes the provided function, plus any\noverrides of that function.\n\nReturns:\n(list): Returns any the provided function and any overriding functions defined for it.", "source": "codesearchnet"}
{"code": "def _simple_name(distribution):\n    simple_name = distribution.name\n    if simple_name.endswith('/'):\n        simple_name = simple_name.split('/')[(- 2)]\n    parts = simple_name.split('_')\n    if parts[(- 1)].isdigit():\n        simple_name = '_'.join(parts[:(- 1)])\n    return simple_name", "docstring": "Infer the original name passed into a distribution constructor.\n\nDistributions typically follow the pattern of\nwith.name_scope(name) as name:\nsuper(name=name)\nso we attempt to reverse the name-scope transformation to allow\naddressing of RVs by the distribution's original, user-visible\nname kwarg.\n\nArgs:\ndistribution: a tfd.Distribution instance.\nReturns:\nsimple_name: the original name passed into the Distribution.\n\n#### Example\n\n```\nd1 = tfd.Normal(0., 1., name='x') # d1.name = 'x/'\nd2 = tfd.Normal(0., 1., name='x') # d2.name = 'x_2/'\n_simple_name(d2) # returns 'x'\n\n```", "source": "codesearchnet"}
{"code": "class TFXGLMModel(TFXGLMPreTrainedModel):\n\n    def __init__(self, config: XGLMConfig, embed_tokens: Optional[TFSharedEmbeddings]=None, *inputs: Any, **kwargs: Any) -> None:\n        super().__init__(config, *inputs, **kwargs)\n        self.model = TFXGLMMainLayer(config, embed_tokens=embed_tokens, name='model')\n\n    @unpack_inputs\n    @add_start_docstrings_to_model_forward(XGLM_INPUTS_DOCSTRING)\n    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC)\n    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, cross_attn_head_mask: np.ndarray | tf.Tensor | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False, **kwargs: Any) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:\n        outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n        return outputs\n\n    def build(self, input_shape=None):\n        if self.built:\n            return\n        self.built = True\n        if getattr(self, 'model', None) is not None:\n            with tf.name_scope(self.model.name):\n                self.model.build(None)", "docstring": "Transformer decoder consisting of *config.num_layers* layers. Each layer is a [`TFXGLMDecoderLayer`]\n\nArgs:\nconfig: XGLMConfig\nembed_tokens: [TFSharedEmbeddings]: output embedding", "source": "github-repos"}
{"code": "def _manage_location(attr):\n    \n    return property(lambda self: getattr(self, '_%s' % attr),\n                    lambda self, value: self._set_location(attr, value))", "docstring": "Build managed property interface.\n\nArgs:\nattr (str): Property's name\n\nReturns:\nproperty: Managed property interface", "source": "juraj-google-style"}
{"code": "def compute_files(user1, user2, file_list, dir_pre, start_num):\n\n    \n    match_total = 0\n    test_total = 0\n    gold_total = 0\n    for fi in file_list:\n        file1 = dir_pre + user1 + \"/\" + fi + \".txt\"\n        file2 = dir_pre + user2 + \"/\" + fi + \".txt\"\n        if not os.path.exists(file1):\n            print(\"*********Error: \", file1, \"does not exist*********\", file=ERROR_LOG)\n            return -1.00\n        if not os.path.exists(file2):\n            print(\"*********Error: \", file2, \"does not exist*********\", file=ERROR_LOG)\n            return -1.00\n        try:\n            file1_h = open(file1, \"r\")\n            file2_h = open(file2, \"r\")\n        except IOError:\n            print(\"Cannot open the files\", file1, file2, file=ERROR_LOG)\n            break\n        cur_amr1 = smatch.get_amr_line(file1_h)\n        cur_amr2 = smatch.get_amr_line(file2_h)\n        if cur_amr1 == \"\":\n            print(\"AMR 1 is empty\", file=ERROR_LOG)\n            continue\n        if cur_amr2 == \"\":\n            print(\"AMR 2 is empty\", file=ERROR_LOG)\n            continue\n        amr1 = amr.AMR.parse_AMR_line(cur_amr1)\n        amr2 = amr.AMR.parse_AMR_line(cur_amr2)\n        test_label = \"a\"\n        gold_label = \"b\"\n        amr1.rename_node(test_label)\n        amr2.rename_node(gold_label)\n        (test_inst, test_rel1, test_rel2) = amr1.get_triples()\n        (gold_inst, gold_rel1, gold_rel2) = amr2.get_triples()\n        if verbose:\n            print(\"Instance triples of file 1:\", len(test_inst), file=DEBUG_LOG)\n            print(test_inst, file=DEBUG_LOG)\n            print(\"Attribute triples of file 1:\", len(test_rel1), file=DEBUG_LOG)\n            print(test_rel1, file=DEBUG_LOG)\n            print(\"Relation triples of file 1:\", len(test_rel2), file=DEBUG_LOG)\n            print(test_rel2, file=DEBUG_LOG)\n            print(\"Instance triples of file 2:\", len(gold_inst), file=DEBUG_LOG)\n            print(gold_inst, file=DEBUG_LOG)\n            print(\"Attribute triples of file 2:\", len(gold_rel1), file=DEBUG_LOG)\n            print(gold_rel1, file=DEBUG_LOG)\n            print(\"Relation triples of file 2:\", len(gold_rel2), file=DEBUG_LOG)\n            print(gold_rel2, file=DEBUG_LOG)\n        (best_match, best_match_num) = smatch.get_best_match(test_inst, test_rel1, test_rel2,\n                                                             gold_inst, gold_rel1, gold_rel2,\n                                                             test_label, gold_label)\n        if verbose:\n            print(\"best match number\", best_match_num, file=DEBUG_LOG)\n            print(\"Best Match:\", smatch.print_alignment(best_match, test_inst, gold_inst), file=DEBUG_LOG)\n        match_total += best_match_num\n        test_total += (len(test_inst) + len(test_rel1) + len(test_rel2))\n        gold_total += (len(gold_inst) + len(gold_rel1) + len(gold_rel2))\n        smatch.match_triple_dict.clear()\n    (precision, recall, f_score) = smatch.compute_f(match_total, test_total, gold_total)\n    return \"%.2f\" % f_score", "docstring": "Compute the smatch scores for a file list between two users\nArgs:\nuser1: user 1 name\nuser2: user 2 name\nfile_list: file list\ndir_pre: the file location prefix\nstart_num: the number of restarts in smatch\nReturns:\nsmatch f score.", "source": "juraj-google-style"}
{"code": "def matches(self, spec):\n    if (callable(spec) and (not isinstance(spec, type))):\n        return spec(self)\n    elif isinstance(spec, type):\n        return isinstance(self, spec)\n    specification = (self.__class__.__name__, self.group, self.label)\n    split_spec = (tuple(spec.split('.')) if (not isinstance(spec, tuple)) else spec)\n    (split_spec, nocompare) = zip(*(((None, True) if ((s == '*') or (s is None)) else (s, False)) for s in split_spec))\n    if all(nocompare):\n        return True\n    match_fn = itemgetter(*(idx for (idx, nc) in enumerate(nocompare) if (not nc)))\n    self_spec = match_fn(split_spec)\n    unescaped_match = (match_fn(specification[:len(split_spec)]) == self_spec)\n    if unescaped_match:\n        return True\n    sanitizers = [util.sanitize_identifier, util.group_sanitizer, util.label_sanitizer]\n    identifier_specification = tuple((fn(ident, escape=False) for (ident, fn) in zip(specification, sanitizers)))\n    identifier_match = (match_fn(identifier_specification[:len(split_spec)]) == self_spec)\n    return identifier_match", "docstring": "Whether the spec applies to this object.\n\nArgs:\nspec: A function, spec or type to check for a match\n* A 'type[[.group].label]' string which is compared\nagainst the type, group and label of this object\n* A function which is given the object and returns\na boolean.\n* An object type matched using isinstance.\n\nReturns:\nbool: Whether the spec matched this object.", "source": "codesearchnet"}
{"code": "def split_input(cls, mapper_spec):\n    \n    params = _get_params(mapper_spec)\n    blob_keys = params[cls.BLOB_KEYS_PARAM]\n    if isinstance(blob_keys, basestring):\n      \n      \n      blob_keys = blob_keys.split(\",\")\n\n    blob_sizes = {}\n    for blob_key in blob_keys:\n      blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))\n      blob_sizes[blob_key] = blob_info.size\n\n    shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)\n    shards_per_blob = shard_count \n    if shards_per_blob == 0:\n      shards_per_blob = 1\n\n    chunks = []\n    for blob_key, blob_size in blob_sizes.items():\n      blob_chunk_size = blob_size \n      for i in xrange(shards_per_blob - 1):\n        chunks.append(BlobstoreLineInputReader.from_json(\n            {cls.BLOB_KEY_PARAM: blob_key,\n             cls.INITIAL_POSITION_PARAM: blob_chunk_size * i,\n             cls.END_POSITION_PARAM: blob_chunk_size * (i + 1)}))\n      chunks.append(BlobstoreLineInputReader.from_json(\n          {cls.BLOB_KEY_PARAM: blob_key,\n           cls.INITIAL_POSITION_PARAM: blob_chunk_size * (shards_per_blob - 1),\n           cls.END_POSITION_PARAM: blob_size}))\n    return chunks", "docstring": "Returns a list of shard_count input_spec_shards for input_spec.\n\nArgs:\nmapper_spec: The mapper specification to split from. Must contain\n'blob_keys' parameter with one or more blob keys.\n\nReturns:\nA list of BlobstoreInputReaders corresponding to the specified shards.", "source": "juraj-google-style"}
{"code": "def SetInputSourceConfiguration(self, configuration):\n    \n    mount_path = configuration.mount_path\n\n    \n    \n    if mount_path and mount_path.endswith(os.sep):\n      mount_path = mount_path[:-1]\n\n    self._mount_path = mount_path", "docstring": "Sets the input source configuration settings.\n\nArgs:\nconfiguration (InputSourceConfiguration): input source configuration.", "source": "juraj-google-style"}
{"code": "def _use_widgets(objs):\n    from ..models.widgets import Widget\n    return _any(objs, (lambda obj: isinstance(obj, Widget)))", "docstring": "Whether a collection of Bokeh objects contains a any Widget\n\nArgs:\nobjs (seq[Model or Document]) :\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def limit_weights(weights, limit=0.1):\n    if ((1.0 / limit) > len(weights)):\n        raise ValueError('invalid limit -> 1 / limit must be <= len(weights)')\n    if isinstance(weights, dict):\n        weights = pd.Series(weights)\n    if (np.round(weights.sum(), 1) != 1.0):\n        raise ValueError(('Expecting weights (that sum to 1) - sum is %s' % weights.sum()))\n    res = np.round(weights.copy(), 4)\n    to_rebalance = (res[(res > limit)] - limit).sum()\n    ok = res[(res < limit)]\n    ok += ((ok / ok.sum()) * to_rebalance)\n    res[(res > limit)] = limit\n    res[(res < limit)] = ok\n    if any(((x > limit) for x in res)):\n        return limit_weights(res, limit=limit)\n    return res", "docstring": "Limits weights and redistributes excedent amount\nproportionally.\n\nex:\n- weights are {a: 0.7, b: 0.2, c: 0.1}\n- call with limit=0.5\n- excess 0.2 in a is ditributed to b and c\nproportionally.\n- result is {a: 0.5, b: 0.33, c: 0.167}\n\nArgs:\n* weights (Series): A series describing the weights\n* limit (float): Maximum weight allowed", "source": "codesearchnet"}
{"code": "def geojson_polygon_to_mask(feature, shape, lat_idx, lon_idx):\n    import matplotlib\n    matplotlib.use('agg')\n    import matplotlib.pyplot as plt\n    from matplotlib import patches\n    import numpy as np\n    if (feature.geometry.type not in ('Polygon', 'MultiPolygon')):\n        raise ValueError(('Cannot handle feature of type ' + feature.geometry.type))\n    dpi = 100\n    fig = plt.figure(frameon=False, dpi=dpi)\n    fig.set_size_inches((shape[1] / float(dpi)), (shape[0] / float(dpi)))\n    ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0])\n    ax.set_axis_off()\n    ax.set_xlim([0, shape[1]])\n    ax.set_ylim([0, shape[0]])\n    fig.add_axes(ax)\n    if (feature.geometry.type == 'Polygon'):\n        coords = [feature.geometry.coordinates]\n    else:\n        coords = feature.geometry.coordinates\n    for poly_coords in coords:\n        for (i, outline) in enumerate(poly_coords):\n            value = (0.0 if (i == 0) else 1.0)\n            outline = np.array(outline)\n            xs = lon_idx(outline[(:, 0)])\n            ys = lat_idx(outline[(:, 1)])\n            poly = patches.Polygon(list(zip(xs, ys)), facecolor=(value, value, value), edgecolor='none', antialiased=True)\n            ax.add_patch(poly)\n    fig.canvas.draw()\n    data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n    data = data.reshape((fig.canvas.get_width_height()[::(- 1)] + (3,)))[(:, :, 0)]\n    assert (data.shape[0] == shape[0])\n    assert (data.shape[1] == shape[1])\n    data = (1.0 - (data.astype(float) / 255.0))\n    data = data[(::(- 1), :)]\n    plt.close('all')\n    return data", "docstring": "Convert a GeoJSON polygon feature to a numpy array\n\nArgs:\nfeature (pygeoj.Feature): polygon feature to draw\nshape (tuple(int, int)): shape of 2D target numpy array to draw polygon in\nlat_idx (func): function converting a latitude to the (fractional) row index in the map\nlon_idx (func): function converting a longitude to the (fractional) column index in the map\n\nReturns:\nnp.array: mask, background is zero, foreground is one", "source": "codesearchnet"}
{"code": "def build(self, text, matrix, skim_depth=10, d_weights=False):\n    for anchor in bar(matrix.keys):\n        n1 = text.unstem(anchor)\n        pairs = matrix.anchored_pairs(anchor).items()\n        for (term, weight) in list(pairs)[:skim_depth]:\n            if d_weights:\n                weight = (1 - weight)\n            n2 = text.unstem(term)\n            self.graph.add_edge(n1, n2, weight=float(weight))", "docstring": "1. For each term in the passed matrix, score its KDE similarity with\nall other indexed terms.\n\n2. With the ordered stack of similarities in hand, skim off the top X\npairs and add them as edges.\n\nArgs:\ntext (Text): The source text instance.\nmatrix (Matrix): An indexed term matrix.\nskim_depth (int): The number of siblings for each term.\nd_weights (bool): If true, give \"close\" words low edge weights.", "source": "codesearchnet"}
{"code": "def start(self):\n    resp = self.post('start')\n    if resp.is_fail():\n        return None\n    if ('result' not in resp.data):\n        return None\n    result = resp.data['result']\n    return {'user': result['user'], 'ws_host': result['ws_host']}", "docstring": "Gets the rtm ws_host and user information\n\nReturns:\nNone if request failed,\nelse a dict containing \"user\"(User) and \"ws_host\"", "source": "codesearchnet"}
{"code": "def isna(obj):\n    \n    if isinstance(obj, BasePandasDataset):\n        return obj.isna()\n    else:\n        return pandas.isna(obj)", "docstring": "Detect missing values for an array-like object.\nArgs:\nobj: Object to check for null or missing values.\n\nReturns:\nbool or array-like of bool", "source": "juraj-google-style"}
{"code": "def set_tag(self, key, value, update_session=True):\n        \n        existing_tags = {x.key: x for x in self.tags}\n        if key in existing_tags:\n            tag = existing_tags[key]\n\n            if tag.value == value:\n                return False\n\n            tag.value = value\n        else:\n            tag = Tag()\n            tag.resource_id = self.id\n            tag.key = key\n            tag.value = value\n            self.tags.append(tag)\n\n        if update_session:\n            db.session.add(tag)\n        return True", "docstring": "Create or set the value of the tag with `key` to `value`. Returns `True` if the tag was created or updated or\n`False` if there were no changes to be made.\n\nArgs:\nkey (str): Key of the tag\nvalue (str): Value of the tag\nupdate_session (bool): Automatically add the change to the SQLAlchemy session. Default: True\n\nReturns:\n`bool`", "source": "juraj-google-style"}
{"code": "def to_grayscale(img):\n    \n    gray = numpy.asarray(ImageOps.grayscale(img)).astype(numpy.float)\n\n    imbands = img.getbands()\n    alpha = None\n    if 'A' in imbands:\n        alpha = numpy.asarray(img.split()[-1]).astype(numpy.float)\n\n    return gray, alpha", "docstring": "Convert PIL image to numpy grayscale array and numpy alpha array.\n\nArgs:\nimg (PIL.Image): PIL Image object.\n\nReturns:\n(gray, alpha): both numpy arrays.", "source": "juraj-google-style"}
{"code": "def match_exists(self, field, required=True, new_group=False):\n    return self.match_field(field, '*', required=required, new_group=new_group)", "docstring": "Require a field to exist in the results.\nMatches will have some value in ``field``.\n\nArguments:\nfield (str): The field to check.\nThe field must be namespaced according to Elasticsearch rules\nusing the dot syntax.\nFor example, ``\"mdf.source_name\"`` is the ``source_name`` field\nof the ``mdf`` dictionary.\nrequired (bool): If ``True``, will add term with ``AND``.\nIf ``False``, will use ``OR``. **Default:** ``True``.\nnew_group (bool): If ``True``, will separate the term into a new parenthetical group.\nIf ``False``, will not.\n**Default:** ``False``.\n\nReturns:\nSearchHelper: Self", "source": "codesearchnet"}
{"code": "def get(quantity, min_type=EventType.firstevent, max_type=EventType.lastevent):\n    return _peep(quantity, lib.SDL_GETEVENT, min_type, max_type)", "docstring": "Return events at the front of the event queue, within the specified minimum and maximum type,\nand remove them from the queue.\n\nArgs:\nquantity (int): The maximum number of events to return.\nmin_type (int): The minimum value for the event type of the returned events.\nmax_type (int): The maximum value for the event type of the returned events.\n\nReturns:\nList[Event]: Events from the front of the event queue.\n\nRaises:\nSDLError: If there was an error retrieving the events.", "source": "codesearchnet"}
{"code": "def Install(self, apk_path, destination_dir='', replace_existing=True, grant_permissions=False, timeout_ms=None, transfer_progress_callback=None):\n    if (not destination_dir):\n        destination_dir = '/data/local/tmp/'\n    basename = os.path.basename(apk_path)\n    destination_path = posixpath.join(destination_dir, basename)\n    self.Push(apk_path, destination_path, timeout_ms=timeout_ms, progress_callback=transfer_progress_callback)\n    cmd = ['pm install']\n    if grant_permissions:\n        cmd.append('-g')\n    if replace_existing:\n        cmd.append('-r')\n    cmd.append('\"{}\"'.format(destination_path))\n    ret = self.Shell(' '.join(cmd), timeout_ms=timeout_ms)\n    rm_cmd = ['rm', destination_path]\n    rmret = self.Shell(' '.join(rm_cmd), timeout_ms=timeout_ms)\n    return ret", "docstring": "Install an apk to the device.\n\nDoesn't support verifier file, instead allows destination directory to be\noverridden.\n\nArgs:\napk_path: Local path to apk to install.\ndestination_dir: Optional destination directory. Use /system/app/ for\npersistent applications.\nreplace_existing: whether to replace existing application\ngrant_permissions: If True, grant all permissions to the app specified in its manifest\ntimeout_ms: Expected timeout for pushing and installing.\ntransfer_progress_callback: callback method that accepts filename, bytes_written and total_bytes of APK transfer\n\nReturns:\nThe pm install output.", "source": "codesearchnet"}
{"code": "def from_sample_rate(sample_rate, n_bands, always_even=False):\n        \n        fb = FrequencyBand(0, sample_rate.nyquist)\n        return LinearScale(fb, n_bands, always_even=always_even)", "docstring": "Return a :class:`~zounds.spectral.LinearScale` instance whose upper\nfrequency bound is informed by the nyquist frequency of the sample rate.\n\nArgs:\nsample_rate (SamplingRate): the sample rate whose nyquist frequency\nwill serve as the upper frequency bound of this scale\nn_bands (int): the number of evenly-spaced frequency bands", "source": "juraj-google-style"}
{"code": "def _tower_loss(images, labels, num_classes, scope, reuse_variables=None):\n    restore_logits = (not FLAGS.fine_tune)\n    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):\n        logits = inception.inference(images, num_classes, for_training=True, restore_logits=restore_logits, scope=scope)\n    split_batch_size = images.get_shape().as_list()[0]\n    inception.loss(logits, labels, batch_size=split_batch_size)\n    losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope)\n    regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n    total_loss = tf.add_n((losses + regularization_losses), name='total_loss')\n    loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n    loss_averages_op = loss_averages.apply((losses + [total_loss]))\n    for l in (losses + [total_loss]):\n        loss_name = re.sub(('%s_[0-9]*/' % inception.TOWER_NAME), '', l.op.name)\n        tf.summary.scalar((loss_name + ' (raw)'), l)\n        tf.summary.scalar(loss_name, loss_averages.average(l))\n    with tf.control_dependencies([loss_averages_op]):\n        total_loss = tf.identity(total_loss)\n    return total_loss", "docstring": "Calculate the total loss on a single tower running the ImageNet model.\n\nWe perform 'batch splitting'. This means that we cut up a batch across\nmultiple GPU's. For instance, if the batch size = 32 and num_gpus = 2,\nthen each tower will operate on an batch of 16 images.\n\nArgs:\nimages: Images. 4D tensor of size [batch_size, FLAGS.image_size,\nFLAGS.image_size, 3].\nlabels: 1-D integer Tensor of [batch_size].\nnum_classes: number of classes\nscope: unique prefix string identifying the ImageNet tower, e.g.\n'tower_0'.\n\nReturns:\nTensor of shape [] containing the total loss for a batch of data", "source": "codesearchnet"}
{"code": "def alltoall(self, x, mesh_axis, split_axis, concat_axis):\n    return self._collective_with_groups(x, [mesh_axis], functools.partial(alltoall_ring, split_axis=split_axis, concat_axis=concat_axis))", "docstring": "Grouped alltoall.\n\nArgs:\nx: a LaidOutTensor\nmesh_axis: an integer the mesh axis along which to group\nsplit_axis: an integer (the Tensor axis along which to split)\nconcat_axis: an integer (the Tensor axis along which to concatenate)\nReturns:\na LaidOutTensor", "source": "codesearchnet"}
{"code": "def _extract_units(self, obj, value):\n    if isinstance(value, dict):\n        if ('units' in value):\n            value = copy(value)\n        units = value.pop('units', None)\n        if units:\n            self.units_prop.__set__(obj, units)\n    return value", "docstring": "Internal helper for dealing with units associated units properties\nwhen setting values on |UnitsSpec| properties.\n\nWhen ``value`` is a dict, this function may mutate the value of the\nassociated units property.\n\nArgs:\nobj (HasProps) : instance to update units spec property value for\nvalue (obj) : new value to set for the property\n\nReturns:\ncopy of ``value``, with 'units' key and value removed when\napplicable", "source": "codesearchnet"}
{"code": "def sg_summary_gradient(tensor, gradient, prefix=None, name=None):\n    r\n    \n    prefix = '' if prefix is None else prefix + '/'\n    \n    name = prefix + _pretty_name(tensor) if name is None else prefix + name\n    \n    \n    _scalar(name + '/grad', tf.reduce_mean(tf.abs(gradient)))\n    _histogram(name + '/grad-h', tf.abs(gradient))", "docstring": "r\"\"\"Register `tensor` to summary report as `gradient`\n\nArgs:\ntensor: A `Tensor` to log as gradient\ngradient: A 0-D `Tensor`. A gradient to log\nprefix: A `string`. A prefix to display in the tensor board web UI.\nname: A `string`. A name to display in the tensor board web UI.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def teleport(self, location=None, rotation=None):\n    val = 0\n    if (location is not None):\n        val += 1\n        np.copyto(self._teleport_buffer, location)\n    if (rotation is not None):\n        np.copyto(self._rotation_buffer, rotation)\n        val += 2\n    self._teleport_bool_buffer[0] = val", "docstring": "Teleports the agent to a specific location, with a specific rotation.\n\nArgs:\nlocation (np.ndarray, optional): An array with three elements specifying the target world coordinate in meters.\nIf None, keeps the current location. Defaults to None.\nrotation (np.ndarray, optional): An array with three elements specifying the target rotation of the agent.\nIf None, keeps the current rotation. Defaults to None.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def load_sst(path=None, url='http:\n    if (path is None):\n        path = os.path.expanduser('~/stanford_sentiment_treebank/')\n        makedirs(path, exist_ok=True)\n    fnames = download_sst(path, url)\n    return {key: import_tree_corpus(value) for (key, value) in fnames.items()}", "docstring": "Download and read in the Stanford Sentiment Treebank dataset\ninto a dictionary with a 'train', 'dev', and 'test' keys. The\ndictionary keys point to lists of LabeledTrees.\n\nArguments:\n----------\npath : str, (optional defaults to ~/stanford_sentiment_treebank),\ndirectory where the corpus should be downloaded (and\nimported from).\nurl : str, where the corpus should be downloaded from (defaults\nto nlp.stanford.edu address).\n\nReturns:\n--------\ndict : loaded dataset", "source": "codesearchnet"}
{"code": "def unfold_tensor(tensor, max_seq_len):\n    _, _, D = tensor.shape\n    tensor = tensor.transpose(-1, -2)\n    tensor = F.unfold(tensor[..., None, :], kernel_size=(1, max_seq_len), stride=(1, max_seq_len))\n    new_bsz, _, slen = tensor.shape\n    tensor = tensor.view(new_bsz, -1, max_seq_len, slen)\n    tensor = tensor.permute(0, 3, 2, 1)\n    tensor = tensor.view(-1, max_seq_len, D).contiguous()\n    return tensor", "docstring": "For a given tensor with shape of (N, T, D), if sequence length T is longer than max_seq_len,\nthis function unfold it to a (NT', max_seq_len, D) where T' is T // max_seq_len.\nArgs:\ntensor: N, T, D", "source": "github-repos"}
{"code": "def mark_flag_as_required(flag_name, flag_values=FLAGS):\n    if (flag_values[flag_name].default is not None):\n        warnings.warn(('Flag %s has a non-None default value; therefore, mark_flag_as_required will pass even if flag is not specified in the command line!' % flag_name))\n    register_validator(flag_name, (lambda value: (value is not None)), message=('Flag --%s must be specified.' % flag_name), flag_values=flag_values)", "docstring": "Ensures that flag is not None during program execution.\n\nRegisters a flag validator, which will follow usual validator rules.\nImportant note: validator will pass for any non-None value, such as False,\n0 (zero), '' (empty string) and so on.\n\nIt is recommended to call this method like this:\n\nif __name__ == '__main__':\ngflags.mark_flag_as_required('your_flag_name')\napp.run()\n\nBecause validation happens at app.run() we want to ensure required-ness\nis enforced at that time.  However, you generally do not want to force\nusers who import your code to have additional required flags for their\nown binaries or tests.\n\nArgs:\nflag_name: string, name of the flag\nflag_values: FlagValues\nRaises:\nAttributeError: if flag_name is not registered as a valid flag name.", "source": "codesearchnet"}
{"code": "def owned_by(self, owner, also_check_group=False):\n        \n        if also_check_group:\n            return self.owner == owner and self.group == owner\n        else:\n            return self.owner == owner", "docstring": "Checks if the specified user or user and group own the file.\n\nArgs:\nowner (str): the user (or group) name for which we ask about ownership\nalso_check_group (bool): if set to True, both user owner and group owner checked\nif set to False, only user owner checked\n\nReturns:\nbool: True if owner of the file is the specified owner", "source": "juraj-google-style"}
{"code": "class JsonPipelineDataFormat(PipelineDataFormat):\n\n    def __init__(self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False):\n        super().__init__(output_path, input_path, column, overwrite=overwrite)\n        with open(input_path, 'r') as f:\n            self._entries = json.load(f)\n\n    def __iter__(self):\n        for entry in self._entries:\n            if self.is_multi_columns:\n                yield {k: entry[c] for k, c in self.column}\n            else:\n                yield entry[self.column[0]]\n\n    def save(self, data: dict):\n        \n        with open(self.output_path, 'w') as f:\n            json.dump(data, f)", "docstring": "Support for pipelines using JSON file format.\n\nArgs:\noutput_path (`str`): Where to save the outgoing data.\ninput_path (`str`): Where to look for the input data.\ncolumn (`str`): The column to read.\noverwrite (`bool`, *optional*, defaults to `False`):\nWhether or not to overwrite the `output_path`.", "source": "github-repos"}
{"code": "def GetWindowsEventMessage(self, log_source, message_identifier):\n    database_reader = self._GetWinevtRcDatabaseReader()\n    if (not database_reader):\n        return None\n    if (self._lcid != self.DEFAULT_LCID):\n        message_string = database_reader.GetMessage(log_source, self.lcid, message_identifier)\n        if message_string:\n            return message_string\n    return database_reader.GetMessage(log_source, self.DEFAULT_LCID, message_identifier)", "docstring": "Retrieves the message string for a specific Windows Event Log source.\n\nArgs:\nlog_source (str): Event Log source, such as \"Application Error\".\nmessage_identifier (int): message identifier.\n\nReturns:\nstr: message string or None if not available.", "source": "codesearchnet"}
{"code": "def assert_matches_stdout(actual, expected_stdout, normalize_fn=lambda elem: elem, label=''):\n\n    def stdout_to_python_object(elem_str):\n        try:\n            elem = ast.literal_eval(elem_str)\n        except (SyntaxError, ValueError):\n            elem = elem_str\n        return normalize_fn(elem)\n    actual = actual | label >> beam.Map(stdout_to_python_object)\n    expected = list(map(stdout_to_python_object, expected_stdout))\n    assert_that(actual, equal_to(expected), 'assert ' + label)", "docstring": "Asserts a PCollection of strings matches the expected stdout elements.\n\nArgs:\nactual (beam.PCollection): A PCollection.\nexpected (List[str]): A list of stdout elements, one line per element.\nnormalize_fn (Function[any]): A function to normalize elements before\ncomparing them. Can be used to sort lists before comparing.\nlabel (str): [optional] Label to make transform names unique.", "source": "github-repos"}
{"code": "async def _auth_plain(self, username, password):\n    mechanism = 'PLAIN'\n    credentials = '\\x00{}\\x00{}'.format(username, password)\n    encoded_credentials = SMTP.b64enc(credentials)\n    try:\n        (code, message) = (await self.do_cmd('AUTH', mechanism, encoded_credentials, success=(235, 503)))\n    except SMTPCommandFailedError as e:\n        raise SMTPAuthenticationError(e.code, e.message, mechanism)\n    return (code, message)", "docstring": "Performs an authentication attempt using the PLAIN mechanism.\n\nProtocol:\n\n1. Format the username and password in a suitable way ;\n2. The formatted string is base64-encoded ;\n3. The string 'AUTH PLAIN' and a space character are prepended to\nthe base64-encoded username and password and sent to the\nserver ;\n4. If the server replies with a 235 return code, user is\nauthenticated.\n\nArgs:\nusername (str): Identifier of the user trying to authenticate.\npassword (str): Password for the user.\n\nRaises:\nConnectionResetError: If the connection with the server is\nunexpectedely lost.\nSMTPAuthenticationError: If the authentication attempt fails.\n\nReturns:\n(int, str): A (code, message) 2-tuple containing the server\nresponse.", "source": "codesearchnet"}
{"code": "def VerifyStructure(self, parser_mediator, lines):\n    \n    \n    match = self._PARSING_COMPONENTS['msg_left_delimiter'].match\n\n    \n    \n    \n    \n    return match in lines", "docstring": "Verifies whether content corresponds to an SCCM log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nlines (str): one or more lines from the text file.\n\nReturns:\nbool: True if this is the correct parser, False otherwise.", "source": "juraj-google-style"}
{"code": "def load_ui_wrapper(uifile, base_instance=None):\n    \n    if 'PySide' in __binding__:\n        return pyside_load_ui(uifile, base_instance)\n    elif 'PyQt' in __binding__:\n        uic = __import__(__binding__ + \".uic\").uic\n        return uic.loadUi(uifile, base_instance)", "docstring": "Load a Qt Designer .ui file and returns an instance of the user interface\n\nArgs:\nuifile (str): Absolute path to .ui file\nbase_instance (QWidget): The widget into which UI widgets are loaded\n\nReturns:\nfunction: pyside_load_ui or uic.loadUi", "source": "juraj-google-style"}
{"code": "def __call__(self, *args, **kwargs):\n        \n\n        retry_timedelta = kwargs.pop('retry_timedelta', self._retry_timedelta)\n        if retry_timedelta is None:\n            retry_timedelta = datetime.timedelta(days=1000000)\n\n        num_retries = kwargs.pop('num_retries', self._num_retries)\n        if num_retries is None:\n            num_retries = 1000000\n\n        if os.environ.get('WANDB_TEST'):\n            num_retries = 0\n\n        sleep_base = kwargs.pop('retry_sleep_base', 1)\n\n        \n        check_retry_fn = kwargs.pop('check_retry_fn', self._check_retry_fn)\n\n        first = True\n        sleep = sleep_base\n        start_time = datetime.datetime.now()\n        now = start_time\n\n        self._num_iter = 0\n\n        while True:\n            try:\n                result = self._call_fn(*args, **kwargs)\n                if not first:\n                    wandb.termlog('{} resolved after {}, resuming normal operation.'.format(\n                        self._error_prefix, datetime.datetime.now() - start_time))\n                return result\n            except self._retryable_exceptions as e:\n                \n                if not check_retry_fn(e):\n                    raise\n                if (datetime.datetime.now() - start_time >= retry_timedelta\n                        or self._num_iter >= num_retries):\n                    raise\n                if self._num_iter == 2:\n                    logger.exception('Retry attempt failed:')\n                    wandb.termlog(\n                        '{} ({}), entering retry loop. See {} for full traceback.'.format(\n                            self._error_prefix, e.__class__.__name__, util.get_log_file_path()))\n                if wandb.env.is_debug():\n                    traceback.print_exc()\n            first = False\n            time.sleep(sleep + random.random() * 0.25 * sleep)\n            sleep *= 2\n            if sleep > self.MAX_SLEEP_SECONDS:\n                sleep = self.MAX_SLEEP_SECONDS\n            now = datetime.datetime.now()\n\n            self._num_iter += 1", "docstring": "Call the wrapped function, with retries.\n\nArgs:\nretry_timedelta (kwarg): amount of time to retry before giving up.\nsleep_base (kwarg): amount of time to sleep upon first failure, all other sleeps\nare derived from this one.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.DeployStorageSecret = channel.unary_unary(\n        '/deploy.API/DeployStorageSecret',\n        request_serializer=client_dot_deploy_dot_deploy__pb2.DeployStorageSecretRequest.SerializeToString,\n        response_deserializer=client_dot_deploy_dot_deploy__pb2.DeployStorageSecretResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def try_add_variable(self, variable_name: str, replacement: VariableReplacement) -> None:\n    if (variable_name not in self):\n        self[variable_name] = (replacement.copy() if isinstance(replacement, Multiset) else replacement)\n    else:\n        existing_value = self[variable_name]\n        if isinstance(existing_value, tuple):\n            if isinstance(replacement, Multiset):\n                if (Multiset(existing_value) != replacement):\n                    raise ValueError\n            elif (replacement != existing_value):\n                raise ValueError\n        elif isinstance(existing_value, Multiset):\n            if (not isinstance(replacement, (tuple, list, Multiset))):\n                raise ValueError\n            compare_value = Multiset(replacement)\n            if (existing_value == compare_value):\n                if (not isinstance(replacement, Multiset)):\n                    self[variable_name] = replacement\n            else:\n                raise ValueError\n        elif (replacement != existing_value):\n            raise ValueError", "docstring": "Try to add the variable with its replacement to the substitution.\n\nThis considers an existing replacement and will only succeed if the new replacement\ncan be merged with the old replacement. Merging can occur if either the two replacements\nare equivalent. Replacements can also be merged if the old replacement for the variable_name was\nunordered (i.e. a :class:`~.Multiset`) and the new one is an equivalent ordered version of it:\n\n>>> subst = Substitution({'x': Multiset(['a', 'b'])})\n>>> subst.try_add_variable('x', ('a', 'b'))\n>>> print(subst)\n{x ↦ (a, b)}\n\nArgs:\nvariable:\nThe name of the variable to add.\nreplacement:\nThe replacement for the variable.\n\nRaises:\nValueError:\nif the variable cannot be merged because it conflicts with the existing\nsubstitution for the variable_name.", "source": "codesearchnet"}
{"code": "def fetch_github_pull_request(destination_directory: str, repository: github_repository.GithubRepository, pull_request_number: int, verbose: bool) -> prepared_env.PreparedEnv:\n    branch = 'pull/{}/head'.format(pull_request_number)\n    os.chdir(destination_directory)\n    print('chdir', destination_directory, file=sys.stderr)\n    shell_tools.run_cmd('git', 'init', (None if verbose else '--quiet'), out=sys.stderr)\n    result = _git_fetch_for_comparison(remote=repository.as_remote(), actual_branch=branch, compare_branch='master', verbose=verbose)\n    shell_tools.run_cmd('git', 'branch', (None if verbose else '--quiet'), 'compare_commit', result.compare_commit_id, log_run_to_stderr=verbose)\n    shell_tools.run_cmd('git', 'checkout', (None if verbose else '--quiet'), '-b', 'actual_commit', result.actual_commit_id, log_run_to_stderr=verbose)\n    return prepared_env.PreparedEnv(github_repo=repository, actual_commit_id=result.actual_commit_id, compare_commit_id=result.compare_commit_id, destination_directory=destination_directory, virtual_env_path=None)", "docstring": "Uses content from github to create a dir for testing and comparisons.\n\nArgs:\ndestination_directory: The location to fetch the contents into.\nrepository: The github repository that the commit lives under.\npull_request_number: The id of the pull request to clone. If None, then\nthe master branch is cloned instead.\nverbose: When set, more progress output is produced.\n\nReturns:\nCommit ids corresponding to content to test/compare.", "source": "codesearchnet"}
{"code": "def pool_function(args):\n    \n    is_valid = True\n\n    try:\n        checker = emailahoy.VerifyEmail()\n        status, message = checker.verify_email_smtp(args, from_host='gmail.com', from_email='sample@gmail.com')\n        if status == 250:\n            print(\"\\t[*] Verification of '{}' status: {}. Details:\\n\\t\\t{}\".format(general.success(args), general.success(\"SUCCESS ({})\".format(str(status))), message.replace('\\n', '\\n\\t\\t')))\n            is_valid = True\n        else:\n            print(\"\\t[*] Verification of '{}' status: {}. Details:\\n\\t\\t{}\".format(general.error(args), general.error(\"FAILED ({})\".format(str(status))), message.replace('\\n', '\\n\\t\\t')))\n            is_valid = False\n    except Exception, e:\n        print(general.warning(\"WARNING. An error was found when performing the search. You can omit this message.\\n\" + str(e)))\n        is_valid = False\n\n    aux = {}\n    aux[\"type\"] = \"i3visio.profile\"\n    aux[\"value\"] = \"Email - \" + args\n    aux[\"attributes\"] =  general.expandEntitiesFromEmail(args)\n    platform = aux[\"attributes\"][2][\"value\"].title()\n    aux[\"attributes\"].append({\n            \"type\": \"i3visio.platform\",\n            \"value\": platform,\n            \"attributes\": []\n        }\n    )\n\n    if is_valid:\n        return {\"platform\": platform, \"status\": \"DONE\", \"data\": aux}\n    else:\n        return {\"platform\": platform, \"status\": \"DONE\", \"data\": {}}", "docstring": "A wrapper for being able to launch all the threads.\n\nWe will use python-emailahoy library for the verification.\n\nArgs:\n-----\nargs: reception of the parameters for getPageWrapper as a tuple.\n\nReturns:\n--------\nA dictionary representing whether the verification was ended\nsuccessfully. The format is as follows:\n```\n{\"platform\": \"str(domain[\"value\"])\", \"status\": \"DONE\", \"data\": aux}\n```", "source": "juraj-google-style"}
{"code": "def create_config(cnf_file, uid, overwrite):\n    \n    conf = None\n\n    \n    if not os.path.exists(settings.DEB_CONF_PATH):\n        os.makedirs(settings.DEB_CONF_PATH, 0755)\n        os.chown(settings.DEB_CONF_PATH, uid, -1)\n\n    if not os.path.exists(cnf_file):       \n        conf = CLEAN_CONFIG\n    elif overwrite:                        \n        backup_name = cnf_file + \"_\"\n        if not os.path.exists(backup_name):\n            shutil.copyfile(cnf_file, backup_name)\n            os.chown(backup_name, uid, -1)\n\n        conf = CLEAN_CONFIG\n    else:                                  \n        with open(cnf_file) as f:\n            conf = f.read()\n\n    \n    with open(cnf_file, \"w\") as f:\n        f.write(update_configuration(conf))\n\n    \n    os.chown(cnf_file, uid, -1)\n    os.chmod(cnf_file, 0644)\n\n    symlink = settings.DEB_CONF_PATH + settings.CONF_FILE\n    if not settings.is_deb_system() and not os.path.exists(symlink):\n        os.symlink(cnf_file, symlink)\n        os.chown(symlink, uid, -1)\n        os.chmod(symlink, 0644)", "docstring": "Creates configuration file and the directory where it should be stored and\nset correct permissions.\n\nArgs:\ncnf_file (str): Path to the configuration file.\nuid (int): User ID - will be used for chown.\noverwrite (bool): Overwrite the configuration with :attr:`CLEAN_CONFIG`.", "source": "juraj-google-style"}
{"code": "def parse_arguments(argv):\n    parser = argparse.ArgumentParser(description='write-to-pubsub')\n    parser.add_argument('-m', '--mode', help='Mode to run pipeline in.', choices=['local', 'cloud'], default='local')\n    parser.add_argument('-p', '--project', help='GCP project to run pipeline on.', default=cfg.PROJECT_ID)\n    args, _ = parser.parse_known_args(args=argv)\n    return args", "docstring": "Parses the arguments passed to the command line and returns them as an object\n\nArgs:\nargv: The arguments passed to the command line.\n\nReturns:\nThe arguments that are being passed in.", "source": "github-repos"}
{"code": "def get_permissions(self, namespace, explicit=False):\n    if (not isinstance(namespace, Namespace)):\n        namespace = Namespace(namespace)\n    keys = namespace.keys\n    (p, _) = self._check(keys, self.index, explicit=explicit)\n    return p", "docstring": "Returns the permissions level for the specified namespace\n\nArguments:\n\nnamespace -- permissioning namespace (str)\nexplicit -- require explicitly set permissions to the provided namespace\n\nReturns:\n\nint -- permissioning flags", "source": "codesearchnet"}
{"code": "def nhs_check_digit(ninedigits: Union[(str, List[Union[(str, int)]])]) -> int:\n    if ((len(ninedigits) != 9) or (not all((str(x).isdigit() for x in ninedigits)))):\n        raise ValueError('bad string to nhs_check_digit')\n    check_digit = (11 - (sum([(int(d) * f) for (d, f) in zip(ninedigits, NHS_DIGIT_WEIGHTINGS)]) % 11))\n    if (check_digit == 11):\n        check_digit = 0\n    return check_digit", "docstring": "Calculates an NHS number check digit.\n\nArgs:\nninedigits: string or list\n\nReturns:\ncheck digit\n\nMethod:\n\n1. Multiply each of the first nine digits by the corresponding\ndigit weighting (see :const:`NHS_DIGIT_WEIGHTINGS`).\n2. Sum the results.\n3. Take remainder after division by 11.\n4. Subtract the remainder from 11\n5. If this is 11, use 0 instead\nIf it's 10, the number is invalid\nIf it doesn't match the actual check digit, the number is invalid", "source": "codesearchnet"}
{"code": "def add_user(self, group, username):\n        \n        try:\n            self.lookup_id(group)\n        except ldap_tools.exceptions.InvalidResult as err:  \n            raise err from None\n\n        operation = {'memberUid': [(ldap3.MODIFY_ADD, [username])]}\n        self.client.modify(self.__distinguished_name(group), operation)", "docstring": "Add a user to the specified LDAP group.\n\nArgs:\ngroup: Name of group to update\nusername: Username of user to add\n\nRaises:\nldap_tools.exceptions.InvalidResult:\nResults of the query were invalid.  The actual exception raised\ninherits from InvalidResult.  See #lookup_id for more info.", "source": "juraj-google-style"}
{"code": "def _register_bounds_validator_if_needed(parser, name, flag_values):\n  \n  if parser.lower_bound is not None or parser.upper_bound is not None:\n\n    def checker(value):\n      if value is not None and parser.is_outside_bounds(value):\n        message = '%s is not %s' % (value, parser.syntactic_help)\n        raise _exceptions.ValidationError(message)\n      return True\n\n    _validators.register_validator(name, checker, flag_values=flag_values)", "docstring": "Enforces lower and upper bounds for numeric flags.\n\nArgs:\nparser: NumericParser (either FloatParser or IntegerParser), provides lower\nand upper bounds, and help text to display.\nname: str, name of the flag\nflag_values: FlagValues.", "source": "juraj-google-style"}
{"code": "def get_id(page):\n    start_pos = page.find('<id>')\n    end_pos = page.find('</id>')\n    assert (start_pos != (- 1))\n    assert (end_pos != (- 1))\n    start_pos += len('<id>')\n    return int(page[start_pos:end_pos])", "docstring": "Extract the id from a page.\n\nArgs:\npage: a string\nReturns:\nan integer", "source": "codesearchnet"}
{"code": "def to_jdbc_url(self) -> str:\n    return self._build_jdbc_url(socketFactory='com.google.cloud.sql.postgres.SocketFactory', database_type='postgresql')", "docstring": "Convert options to a properly formatted JDBC URL.\n\nReturns:\nJDBC URL string configured with all options.", "source": "github-repos"}
{"code": "def md(cls, data, force_field, temperature, nsteps, other_settings=None):\n    template_path = os.path.join(cls.template_dir, 'md.txt')\n    with open(template_path) as f:\n        script_template = f.read()\n    settings = (other_settings.copy() if (other_settings is not None) else {})\n    settings.update({'force_field': force_field, 'temperature': temperature, 'nsteps': nsteps})\n    script_filename = 'in.md'\n    return cls(script_template=script_template, settings=settings, data=data, script_filename=script_filename)", "docstring": "Example for a simple MD run based on template md.txt.\n\nArgs:\ndata (LammpsData or str): Data file as a LammpsData\ninstance or path to an existing data file.\nforce_field (str): Combined force field related cmds. For\nexample, 'pair_style eam\\npair_coeff * * Cu_u3.eam'.\ntemperature (float): Simulation temperature.\nnsteps (int): No. of steps to run.\nother_settings (dict): other settings to be filled into\nplaceholders.", "source": "codesearchnet"}
{"code": "def _list_objects(self, client_kwargs, max_request_entries):\n        \n        client_kwargs = self._update_listing_client_kwargs(\n            client_kwargs, max_request_entries)\n\n        with _handle_azure_exception():\n            for obj in self.client.list_directories_and_files(**client_kwargs):\n                yield (obj.name, self._model_to_dict(obj),\n                       isinstance(obj, _Directory))", "docstring": "Lists objects.\n\nargs:\nclient_kwargs (dict): Client arguments.\nmax_request_entries (int): If specified, maximum entries returned\nby request.\n\nReturns:\ngenerator of tuple: object name str, object header dict,\ndirectory bool", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.MemberAdd = channel.unary_unary(\n        '/etcdserverpb.Cluster/MemberAdd',\n        request_serializer=rpc__pb2.MemberAddRequest.SerializeToString,\n        response_deserializer=rpc__pb2.MemberAddResponse.FromString,\n        )\n    self.MemberRemove = channel.unary_unary(\n        '/etcdserverpb.Cluster/MemberRemove',\n        request_serializer=rpc__pb2.MemberRemoveRequest.SerializeToString,\n        response_deserializer=rpc__pb2.MemberRemoveResponse.FromString,\n        )\n    self.MemberUpdate = channel.unary_unary(\n        '/etcdserverpb.Cluster/MemberUpdate',\n        request_serializer=rpc__pb2.MemberUpdateRequest.SerializeToString,\n        response_deserializer=rpc__pb2.MemberUpdateResponse.FromString,\n        )\n    self.MemberList = channel.unary_unary(\n        '/etcdserverpb.Cluster/MemberList',\n        request_serializer=rpc__pb2.MemberListRequest.SerializeToString,\n        response_deserializer=rpc__pb2.MemberListResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def Copy(self, name=None):\n    new = copy.copy(self)\n    new.d = copy.copy(self.d)\n    new.name = (name if (name is not None) else self.name)\n    return new", "docstring": "Returns a copy.\n\nMake a shallow copy of d.  If you want a deep copy of d,\nuse copy.deepcopy on the whole object.\n\nArgs:\nname: string name for the new Hist", "source": "codesearchnet"}
{"code": "def assign_sub(self, delta, use_locking=None, name=None, read_value=True):\n    with _handle_graph(self.handle), self._assign_dependencies():\n        assign_sub_op = gen_resource_variable_ops.assign_sub_variable_op(self.handle, ops.convert_to_tensor(delta, dtype=self.dtype), name=name)\n    if read_value:\n        return self._lazy_read(assign_sub_op)\n    return assign_sub_op", "docstring": "Subtracts a value from this variable.\n\nArgs:\ndelta: A `Tensor`. The value to subtract from this variable.\nuse_locking: If `True`, use locking during the operation.\nname: The name to use for the operation.\nread_value: A `bool`. Whether to read and return the new value of the\nvariable or not.\n\nReturns:\nIf `read_value` is `True`, this method will return the new value of the\nvariable after the assignment has completed. Otherwise, when in graph mode\nit will return the `Operation` that does the assignment, and when in eager\nmode it will return `None`.", "source": "github-repos"}
{"code": "def locate_file(start_path, file_name):\n    if os.path.isfile(start_path):\n        start_dir_path = os.path.dirname(start_path)\n    elif os.path.isdir(start_path):\n        start_dir_path = start_path\n    else:\n        raise exceptions.FileNotFound('invalid path: {}'.format(start_path))\n    file_path = os.path.join(start_dir_path, file_name)\n    if os.path.isfile(file_path):\n        return os.path.abspath(file_path)\n    if (os.path.abspath(start_dir_path) in [os.getcwd(), os.path.abspath(os.sep)]):\n        raise exceptions.FileNotFound('{} not found in {}'.format(file_name, start_path))\n    return locate_file(os.path.dirname(start_dir_path), file_name)", "docstring": "locate filename and return absolute file path.\nsearching will be recursive upward until current working directory.\n\nArgs:\nstart_path (str): start locating path, maybe file path or directory path\n\nReturns:\nstr: located file path. None if file not found.\n\nRaises:\nexceptions.FileNotFound: If failed to locate file.", "source": "codesearchnet"}
{"code": "def as_text(bytes_or_text, encoding='utf-8'):\n    encoding = codecs.lookup(encoding).name\n    if isinstance(bytes_or_text, str):\n        return bytes_or_text\n    elif isinstance(bytes_or_text, bytes):\n        return bytes_or_text.decode(encoding)\n    else:\n        raise TypeError('Expected binary or unicode string, got %r' % bytes_or_text)", "docstring": "Converts any string-like python input types to unicode.\n\nReturns the input as a unicode string. Uses utf-8 encoding for text\nby default.\n\nArgs:\nbytes_or_text: A `bytes`, `str`, or `unicode` object.\nencoding: A string indicating the charset for decoding unicode.\n\nReturns:\nA `unicode` (Python 2) or `str` (Python 3) object.\n\nRaises:\nTypeError: If `bytes_or_text` is not a binary or unicode string.", "source": "github-repos"}
{"code": "def set_precision(predictions, labels, weights_fn=common_layers.weights_nonzero):\n    with tf.variable_scope('set_precision', values=[predictions, labels]):\n        labels = tf.squeeze(labels, [2, 3])\n        weights = weights_fn(labels)\n        labels = tf.one_hot(labels, predictions.shape[(- 1)])\n        labels = tf.reduce_max(labels, axis=1)\n        labels = tf.cast(labels, tf.bool)\n        return (tf.to_float(tf.equal(labels, predictions)), weights)", "docstring": "Precision of set predictions.\n\nArgs:\npredictions : A Tensor of scores of shape [batch, nlabels].\nlabels: A Tensor of int32s giving true set elements,\nof shape [batch, seq_length].\nweights_fn: A function to weight the elements.\n\nReturns:\nhits: A Tensor of shape [batch, nlabels].\nweights: A Tensor of shape [batch, nlabels].", "source": "codesearchnet"}
{"code": "def parse_aggregate_report_file(_input, nameservers=None, dns_timeout=2.0,\n                                parallel=False):\n    \n    xml = extract_xml(_input)\n\n    return parse_aggregate_report_xml(xml,\n                                      nameservers=nameservers,\n                                      timeout=dns_timeout,\n                                      parallel=parallel)", "docstring": "Parses a file at the given path, a file-like object. or bytes as a\naggregate DMARC report\n\nArgs:\n_input: A path to a file, a file like object, or bytes\nnameservers (list): A list of one or more nameservers to use\n(Cloudflare's public DNS resolvers by default)\ndns_timeout (float): Sets the DNS timeout in seconds\nparallel (bool): Parallel processing\n\nReturns:\nOrderedDict: The parsed DMARC aggregate report", "source": "juraj-google-style"}
{"code": "def List(device, device_path):\n    \n    files = device.List(device_path)\n    files.sort(key=lambda x: x.filename)\n    maxname = max(len(f.filename) for f in files)\n    maxsize = max(len(str(f.size)) for f in files)\n    for f in files:\n        mode = (\n                ('d' if stat.S_ISDIR(f.mode) else '-') +\n                ('r' if f.mode & stat.S_IRUSR else '-') +\n                ('w' if f.mode & stat.S_IWUSR else '-') +\n                ('x' if f.mode & stat.S_IXUSR else '-') +\n                ('r' if f.mode & stat.S_IRGRP else '-') +\n                ('w' if f.mode & stat.S_IWGRP else '-') +\n                ('x' if f.mode & stat.S_IXGRP else '-') +\n                ('r' if f.mode & stat.S_IROTH else '-') +\n                ('w' if f.mode & stat.S_IWOTH else '-') +\n                ('x' if f.mode & stat.S_IXOTH else '-'))\n        t = time.gmtime(f.mtime)\n        yield '%s %*d %04d-%02d-%02d %02d:%02d:%02d %-*s\\n' % (\n            mode, maxsize, f.size,\n            t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec,\n            maxname, f.filename)", "docstring": "Prints a directory listing.\n\nArgs:\ndevice_path: Directory to list.", "source": "juraj-google-style"}
{"code": "def make_query(self, ns):\n    \n    if issubclass(self.model_class, db.Model):\n      query = db.Query(self.model_class, namespace=ns)\n      for f in self.filters:\n        query.filter(\"%s %s\" % (f[0], f[1]), f[2])\n    else:\n      query = self.model_class.query(namespace=ns)\n      for f in self.filters:\n        query = query.filter(ndb.FilterNode(*f))\n    return query", "docstring": "Make a query of entities within this range.\n\nQuery options are not supported. They should be specified when the query\nis run.\n\nArgs:\nns: namespace of this query.\n\nReturns:\na db.Query or ndb.Query, depends on the model class's type.", "source": "juraj-google-style"}
{"code": "def load_terms(fo: IO, metadata: dict, forceupdate: bool):\n    version = metadata['metadata']['version']\n    with timy.Timer('Load Terms') as timer:\n        es = bel.db.elasticsearch.get_client()\n        es_version = version.replace('T', '').replace('-', '').replace(':', '')\n        index_prefix = f\"terms_{metadata['metadata']['namespace'].lower()}\"\n        index_name = f'{index_prefix}_{es_version}'\n        if (not elasticsearch.index_exists(es, index_name)):\n            elasticsearch.create_terms_index(es, index_name)\n        elif forceupdate:\n            index_name += '_alt'\n            elasticsearch.create_terms_index(es, index_name)\n        else:\n            return\n        terms_iterator = terms_iterator_for_elasticsearch(fo, index_name)\n        elasticsearch.bulk_load_docs(es, terms_iterator)\n        index_names = elasticsearch.get_all_index_names(es)\n        for name in index_names:\n            if ((name != index_name) and (index_prefix in name)):\n                elasticsearch.delete_index(es, name)\n        elasticsearch.add_index_alias(es, index_name, terms_alias)\n        log.info('Load namespace terms', elapsed=timer.elapsed, namespace=metadata['metadata']['namespace'])\n    with timy.Timer('Load Term Equivalences') as timer:\n        arango_client = arangodb.get_client()\n        belns_db = arangodb.get_belns_handle(arango_client)\n        arangodb.batch_load_docs(belns_db, terms_iterator_for_arangodb(fo, version), on_duplicate='update')\n        log.info('Loaded namespace equivalences', elapsed=timer.elapsed, namespace=metadata['metadata']['namespace'])\n        remove_old_equivalence_edges = f\n        remove_old_equivalence_nodes = f\n        arangodb.aql_query(belns_db, remove_old_equivalence_edges)\n        arangodb.aql_query(belns_db, remove_old_equivalence_nodes)\n    metadata['_key'] = f\"Namespace_{metadata['metadata']['namespace']}\"\n    try:\n        belns_db.collection(arangodb.belns_metadata_name).insert(metadata)\n    except ArangoError as ae:\n        belns_db.collection(arangodb.belns_metadata_name).replace(metadata)", "docstring": "Load terms into Elasticsearch and ArangoDB\n\nForceupdate will create a new index in Elasticsearch regardless of whether\nan index with the resource version already exists.\n\nArgs:\nfo: file obj - terminology file\nmetadata: dict containing the metadata for terminology\nforceupdate: force full update - e.g. don't leave Elasticsearch indexes\nalone if their version ID matches", "source": "codesearchnet"}
{"code": "def __make_id(receiver):\n    \n    if __is_bound_method(receiver):\n        return (id(receiver.__func__), id(receiver.__self__))\n    return id(receiver)", "docstring": "Generate an identifier for a callable signal receiver.\n\nThis is used when disconnecting receivers, where we need to correctly\nestablish equivalence between the input receiver and the receivers assigned\nto a signal.\n\nArgs:\nreceiver: A callable object.\n\nReturns:\nAn identifier for the receiver.", "source": "juraj-google-style"}
{"code": "def keypoint_rot90(keypoint, factor, rows, cols, **params):\n    \n    if factor < 0 or factor > 3:\n        raise ValueError('Parameter n must be in range [0;3]')\n    x, y, angle, scale = keypoint\n    if factor == 1:\n        keypoint = [y, (cols - 1) - x, angle - math.pi / 2, scale]\n    if factor == 2:\n        keypoint = [(cols - 1) - x, (rows - 1) - y, angle - math.pi, scale]\n    if factor == 3:\n        keypoint = [(rows - 1) - y, x, angle + math.pi / 2, scale]\n    return keypoint", "docstring": "Rotates a keypoint by 90 degrees CCW (see np.rot90)\n\nArgs:\nkeypoint (tuple): A tuple (x, y, angle, scale).\nfactor (int): Number of CCW rotations. Must be in range [0;3] See np.rot90.\nrows (int): Image rows.\ncols (int): Image cols.", "source": "juraj-google-style"}
{"code": "def handle_error(program_name, cmd, log=None):\n    print('\\nHouston, we have a problem.', ('\\n%s did not finish successfully. Review the log' % program_name), 'file and the input file(s) to see what went wrong.')\n    print(('%s command: \"%s\"' % (program_name, cmd)))\n    if (log is not None):\n        print(('log: \"%s\"' % log))\n    print('Where do we go from here?')\n    print((' r  - retry running %s (probably after' % program_name), \"you've fixed any problems with the input files)\")\n    print(' c  - continue on with the script (probably after', \"you've manually re-run and generated the desired\", 'output file(s)')\n    print(' x  - exit, keeping the TEMP3D files and log')\n    print(' xd - exit, deleting the TEMP3D files and log')\n    while True:\n        choice = input('Select r, c, x (default), or xd: ')\n        if (choice not in ('r', 'c', 'x', 'xd')):\n            choice = 'x'\n        break\n    if (choice == 'x'):\n        print('Exiting ...')\n        sys.exit(1)\n    elif (choice == 'xd'):\n        print('Deleting TEMP3D* and log files and exiting ...')\n        util.delete_all('TEMP3D*')\n        if (log is not None):\n            os.remove(log)\n        sys.exit(1)\n    elif (choice == 'c'):\n        print('Continuing on ...')\n        break_now = True\n    elif (choice == 'r'):\n        print(('Retrying %s cmd ...' % program_name))\n        break_now = False\n    return break_now", "docstring": "Subprocess program error handling\n\nArgs:\nprogram_name (str): name of the subprocess program\n\nReturns:\nbreak_now (bool): indicate whether calling program should break out of loop", "source": "codesearchnet"}
{"code": "def _get_contexts_for_squash(self, batch_signature):\n    batch = self._batches_by_id[batch_signature].batch\n    index = self._batches.index(batch)\n    contexts = []\n    txns_added_predecessors = []\n    for b in self._batches[index::(- 1)]:\n        batch_is_valid = True\n        contexts_from_batch = []\n        for txn in b.transactions[::(- 1)]:\n            result = self._txn_results[txn.header_signature]\n            if (not result.is_valid):\n                batch_is_valid = False\n                break\n            else:\n                txn_id = txn.header_signature\n                if (txn_id not in txns_added_predecessors):\n                    txns_added_predecessors.append(self._txn_predecessors[txn_id])\n                    contexts_from_batch.append(result.context_id)\n        if batch_is_valid:\n            contexts.extend(contexts_from_batch)\n    return contexts", "docstring": "Starting with the batch referenced by batch_signature, iterate back\nthrough the batches and for each valid batch collect the context_id.\nAt the end remove contexts for txns that are other txn's predecessors.\n\nArgs:\nbatch_signature (str): The batch to start from, moving back through\nthe batches in the scheduler\n\nReturns:\n(list): Context ids that haven't been previous base contexts.", "source": "codesearchnet"}
{"code": "def RegisterPlugin(cls, plugin_class):\n    \n    plugin_name = plugin_class.NAME.lower()\n    if plugin_name in cls._plugin_classes:\n      raise KeyError((\n          'Plugin class already set for name: {0:s}.').format(\n              plugin_class.NAME))\n\n    cls._plugin_classes[plugin_name] = plugin_class", "docstring": "Registers a plugin class.\n\nThe plugin classes are identified based on their lower case name.\n\nArgs:\nplugin_class (type): class of the plugin.\n\nRaises:\nKeyError: if plugin class is already set for the corresponding name.", "source": "juraj-google-style"}
{"code": "def cmd_startstop(options):\n    statelu = {'start': 'stopped', 'stop': 'running'}\n    options.inst_state = statelu[options.command]\n    debg.dprint('toggle set state: ', options.inst_state)\n    (i_info, param_str) = gather_data(options)\n    (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command)\n    response = awsc.startstop(tar_inst, options.command)\n    responselu = {'start': 'StartingInstances', 'stop': 'StoppingInstances'}\n    filt = responselu[options.command]\n    resp = {}\n    state_term = ('CurrentState', 'PreviousState')\n    for (i, j) in enumerate(state_term):\n        resp[i] = response['{0}'.format(filt)][0]['{0}'.format(j)]['Name']\n    print('Current State: {}{}{}  -  Previous State: {}{}{}\\n'.format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM))", "docstring": "Start or Stop the specified instance.\n\nFinds instances that match args and instance-state expected by the\ncommand.  Then, the target instance is determined, the action is\nperformed on the instance, and the eturn information is displayed.\n\nArgs:\noptions (object): contains args and data from parser.", "source": "codesearchnet"}
{"code": "def exclude(self, scheduled_operation: ScheduledOperation) -> bool:\n    try:\n        self.scheduled_operations.remove(scheduled_operation)\n        return True\n    except ValueError:\n        return False", "docstring": "Omits a scheduled operation from the schedule, if present.\n\nArgs:\nscheduled_operation: The operation to try to remove.\n\nReturns:\nTrue if the operation was present and is now removed, False if it\nwas already not present.", "source": "codesearchnet"}
{"code": "def secure_channel(target, credentials, options=None, *, loop=None, executor=None, standalone_pool_for_streaming=False):\n    return Channel(_grpc.secure_channel(target, credentials, options), loop, executor, standalone_pool_for_streaming)", "docstring": "Creates a secure Channel to a server.\n\nArgs:\ntarget: The server address.\ncredentials: A ChannelCredentials instance.\noptions: An optional list of key-value pairs (channel args in gRPC runtime)\nto configure the channel.\n\nReturns:\nA Channel object.", "source": "codesearchnet"}
{"code": "def Execute(self, http, sleep_between_polls=5, max_retries=5, max_batch_size=None, batch_request_callback=None):\n    requests = [request for request in self.api_requests if (not request.terminal_state)]\n    batch_size = (max_batch_size or len(requests))\n    for attempt in range(max_retries):\n        if attempt:\n            time.sleep(sleep_between_polls)\n        for i in range(0, len(requests), batch_size):\n            batch_http_request = BatchHttpRequest(batch_url=self.batch_url, callback=batch_request_callback, response_encoding=self.response_encoding)\n            for request in itertools.islice(requests, i, (i + batch_size)):\n                batch_http_request.Add(request.http_request, request.HandleResponse)\n            batch_http_request.Execute(http)\n            if hasattr(http.request, 'credentials'):\n                if any((request.authorization_failed for request in itertools.islice(requests, i, (i + batch_size)))):\n                    http.request.credentials.refresh(http)\n        requests = [request for request in self.api_requests if (not request.terminal_state)]\n        if (not requests):\n            break\n    return self.api_requests", "docstring": "Execute all of the requests in the batch.\n\nArgs:\nhttp: httplib2.Http object for use in the request.\nsleep_between_polls: Integer number of seconds to sleep between\npolls.\nmax_retries: Max retries. Any requests that have not succeeded by\nthis number of retries simply report the last response or\nexception, whatever it happened to be.\nmax_batch_size: int, if specified requests will be split in batches\nof given size.\nbatch_request_callback: function of (http_response, exception) passed\nto BatchHttpRequest which will be run on any given results.\n\nReturns:\nList of ApiCalls.", "source": "codesearchnet"}
{"code": "def keys(self, full_grid=False):\n        \n        keys = super(GridSpace, self).keys()\n        if self.ndims == 1 or not full_grid:\n            return keys\n        dim1_keys = sorted(set(k[0] for k in keys))\n        dim2_keys = sorted(set(k[1] for k in keys))\n        return [(d1, d2) for d1 in dim1_keys for d2 in dim2_keys]", "docstring": "Returns the keys of the GridSpace\n\nArgs:\nfull_grid (bool, optional): Return full cross-product of keys\n\nReturns:\nList of keys", "source": "juraj-google-style"}
{"code": "def define_batch_env(constructor, num_agents, env_processes):\n  \n  with tf.variable_scope('environments'):\n    if env_processes:\n      envs = [\n          tools.wrappers.ExternalProcess(constructor)\n          for _ in range(num_agents)]\n    else:\n      envs = [constructor() for _ in range(num_agents)]\n    batch_env = tools.BatchEnv(envs, blocking=not env_processes)\n    batch_env = tools.InGraphBatchEnv(batch_env)\n  return batch_env", "docstring": "Create environments and apply all desired wrappers.\n\nArgs:\nconstructor: Constructor of an OpenAI gym environment.\nnum_agents: Number of environments to combine in the batch.\nenv_processes: Whether to step environment in external processes.\n\nReturns:\nIn-graph environments object.", "source": "juraj-google-style"}
{"code": "def get_sfa_conjecture(self):\n        \n        sfa = SFA(self.alphabet)\n        for s in self.observation_table.sm_vector:\n            transitions = self._get_predicate_guards(\n                s, self.observation_table.training_data[s])\n            for (t, pred) in transitions:\n                src_id = self.observation_table.sm_vector.index(s)\n                dst_id = self.observation_table.sm_vector.index(t)\n                assert isinstance(\n                    pred, SetPredicate), \"Invalid type for predicate {}\".format(pred)\n                sfa.add_arc(src_id, dst_id, pred)\n\n        \n        i = 0\n        for s in self.observation_table.sm_vector:\n            sfa.states[i].final = self.observation_table[s, self.epsilon]\n            i += 1\n        return sfa", "docstring": "Utilize the observation table to construct a Mealy Machine.\nThe library used for representing the Mealy Machine is the python\nbindings of the openFST library (pyFST).\nArgs:\nNone\nReturns:\nMealyMachine: A mealy machine build based on a closed and consistent\nobservation table.", "source": "juraj-google-style"}
{"code": "def serialize_training_step(features, model_fn, batch_dim, num_splits):\n    for v in features.values():\n        mesh = v.mesh\n        graph = v.graph\n    microbatch_dim = Dimension('microbatch', num_splits)\n    smaller_batch_dim = Dimension(batch_dim.name, (batch_dim.size \n    cache = {}\n\n    def select(t, microbatch_num):\n        return gather(replace_dimensions(t, batch_dim, [smaller_batch_dim, microbatch_dim]), microbatch_num, microbatch_dim)\n\n    def cond_fn(microbatch_num):\n        return less(microbatch_num, num_splits)\n\n    def body_fn(microbatch_num):\n        'Body function for mtf.while_loop.\\n\\n    Args:\\n      microbatch_num: a mtf Scalar\\n    Returns:\\n      a list of mtf Tensors\\n    '\n        my_features = {}\n        for (k, v) in six.iteritems(features):\n            my_features[k] = select(v, microbatch_num)\n        outputs = model_fn(my_features)\n        grads = gradients([outputs['loss']], [v.outputs[0] for v in graph.trainable_variables])\n        output_keys = outputs.keys()\n        cache['output_keys'] = output_keys\n        ret = []\n        ret.append((microbatch_num + 1))\n        for t in outputs.values():\n            if (smaller_batch_dim in t.shape):\n                t = einsum([t, one_hot(microbatch_num, microbatch_dim, dtype=t.dtype)], output_shape=replace_dimensions(t.shape, smaller_batch_dim, [smaller_batch_dim, microbatch_dim]))\n                t = replace_dimensions(t, [smaller_batch_dim, microbatch_dim], batch_dim)\n                ret.append(t)\n            else:\n                ret.append(t)\n        ret.extend(grads)\n        return ret\n    while_out = while_loop(cond_fn, body_fn, [constant(mesh, 0, dtype=tf.int32)], has_accumulators=True)\n    num_outputs = len(cache['output_keys'])\n    combined_outputs = {}\n    for (k, v) in zip(cache['output_keys'], while_out[1:(1 + num_outputs)]):\n        combined_outputs[k] = v\n    combined_grads = while_out[(1 + num_outputs):]\n    return (combined_grads, combined_outputs)", "docstring": "Break the training batch into multiple microbatches.\n\nReturns two structures:\n\ngrads - a list of Tensors corresponding to the gradients on\ngraph.trainable_variables.  These are summed across all microbatches\n\noutputs - a dictionary of Tensors corresponding to the output dictionary of\nmodel_fn.   Each value is either summed across all microbatches (if it\nhas no batch-dimension), or concatenated across all microbatches to\nrepresent the original batch (if it does have a batch-dimension).\n\nArgs:\nfeatures: a dictionary of Tensors, each with a batch_dim dimension\nmodel_fn: a function from feature dictionary to output dictionary\noutput_dictionary must contain \"loss\"\nbatch_dim: a Dimension\nnum_splits: an integer dividing batch_dim.size\n\nReturns:\ngrads: a list of Tensors corresponding to the gradients on\ngraph.trainable_variables\noutputs: dictionary of output Tensors summed across microbatches", "source": "codesearchnet"}
{"code": "def insert(self, i, species, coords, coords_are_cartesian=False, validate_proximity=False, properties=None):\n    if (not coords_are_cartesian):\n        new_site = PeriodicSite(species, coords, self._lattice, properties=properties)\n    else:\n        frac_coords = self._lattice.get_fractional_coords(coords)\n        new_site = PeriodicSite(species, frac_coords, self._lattice, properties=properties)\n    if validate_proximity:\n        for site in self:\n            if (site.distance(new_site) < self.DISTANCE_TOLERANCE):\n                raise ValueError('New site is too close to an existing site!')\n    self._sites.insert(i, new_site)", "docstring": "Insert a site to the structure.\n\nArgs:\ni (int): Index to insert site\nspecies (species-like): Species of inserted site\ncoords (3x1 array): Coordinates of inserted site\ncoords_are_cartesian (bool): Whether coordinates are cartesian.\nDefaults to False.\nvalidate_proximity (bool): Whether to check if inserted site is\ntoo close to an existing site. Defaults to False.\nproperties (dict): Properties associated with the site.\n\nReturns:\nNew structure with inserted site.", "source": "codesearchnet"}
{"code": "def write_other_members(self, f, catch_all=False):\n    \n    if catch_all:\n      names = self._members.items()\n    else:\n      names = inspect.getmembers(self._module)\n    leftovers = []\n    for name, _ in names:\n      if name in self._members and name not in self._documented:\n        leftovers.append(name)\n    if leftovers:\n      print(\"%s: undocumented members: %d\" % (self._title, len(leftovers)))\n      print(\"\\n\n      for name in sorted(leftovers):\n        print(\"  %s\" % name)\n        self._documented.add(name)\n        self._mentioned.add(name)\n        self._write_member_markdown_to_file(f, \"", "docstring": "Writes the leftover members to `f`.\n\nArgs:\nf: File to write to.\ncatch_all: If true, document all missing symbols from any module.\nOtherwise, document missing symbols from just this module.", "source": "juraj-google-style"}
{"code": "def codepointsInNamelist(namFilename, unique_glyphs=False, cache=None):\n  \n  key = 'charset' if not unique_glyphs else 'ownCharset'\n\n  internals_dir = os.path.dirname(os.path.abspath(__file__))\n  target = os.path.join(internals_dir, namFilename)\n  result = readNamelist(target, unique_glyphs, cache)\n  return result[key]", "docstring": "Returns the set of codepoints contained in a given Namelist file.\n\nThis is a replacement CodepointsInSubset and implements the \"#$ include\"\nheader format.\n\nArgs:\nnamFilename: The path to the  Namelist file.\nunique_glyphs: Optional, whether to only include glyphs unique to subset.\nReturns:\nA set containing the glyphs in the subset.", "source": "juraj-google-style"}
{"code": "def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):\n    assert len(padding) == 2\n    assert len(padding[0]) == 2\n    assert len(padding[1]) == 2\n    if data_format is None:\n        data_format = image_data_format()\n    if data_format not in {'channels_first', 'channels_last'}:\n        raise ValueError('Unknown data_format: ' + str(data_format))\n    if data_format == 'channels_first':\n        pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]\n    else:\n        pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]\n    return array_ops.pad(x, pattern)", "docstring": "Pads the 2nd and 3rd dimensions of a 4D tensor.\n\nArgs:\nx: Tensor or variable.\npadding: Tuple of 2 tuples, padding pattern.\ndata_format: One of `channels_last` or `channels_first`.\n\nReturns:\nA padded 4D tensor.\n\nRaises:\nValueError: if `data_format` is neither\n`channels_last` or `channels_first`.", "source": "github-repos"}
{"code": "def _escape_token(token, alphabet):\n    if (not isinstance(token, six.text_type)):\n        raise ValueError(('Expected string type for token, got %s' % type(token)))\n    token = token.replace(u'\\\\', u'\\\\\\\\').replace(u'_', u'\\\\u')\n    ret = [(c if ((c in alphabet) and (c != u'\\n')) else ('\\\\%d;' % ord(c))) for c in token]\n    return (u''.join(ret) + '_')", "docstring": "Escape away underscores and OOV characters and append '_'.\n\nThis allows the token to be expressed as the concatenation of a list\nof subtokens from the vocabulary. The underscore acts as a sentinel\nwhich allows us to invertibly concatenate multiple such lists.\n\nArgs:\ntoken: A unicode string to be escaped.\nalphabet: A set of all characters in the vocabulary's alphabet.\n\nReturns:\nescaped_token: An escaped unicode string.\n\nRaises:\nValueError: If the provided token is not unicode.", "source": "codesearchnet"}
{"code": "def create(self, data=None, uri=None, timeout=-1, force=True):\n        \n        if not data:\n            data = {}\n\n        default_values = self._get_default_values()\n        for key, value in default_values.items():\n            if not data.get(key):\n                data[key] = value\n\n        resource_data = self._helper.create(data, uri, timeout, force=force)\n        new_resource = self.new(self._connection, resource_data)\n\n        return new_resource", "docstring": "Makes a POST request to create a resource when a request body is required.\n\nArgs:\ndata: Additional fields can be passed to create the resource.\nuri: Resouce uri\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\nforce: Flag to force the operation\nReturns:\nCreated resource.", "source": "juraj-google-style"}
{"code": "def compute_positions(cls, screen_width, line):\n    left = 1\n    right = (screen_width + 1)\n    flexible = None\n    for field in line:\n        if field.is_flexible():\n            if flexible:\n                raise FormatError('There can be only one flexible field per line.')\n            flexible = field\n        elif (not flexible):\n            left += field.width\n        else:\n            right -= field.width\n    available = (right - left)\n    if (available <= 0):\n        raise FormatError('Too much data for screen width')\n    if flexible:\n        if (available < 1):\n            raise FormatError(('Not enough space to display flexible field %s' % flexible.name))\n        flexible.width = available\n    positions = []\n    left = 1\n    for field in line:\n        positions.append((left, field))\n        left += field.width\n    logger.debug('Positions are %r', positions)\n    return positions", "docstring": "Compute the relative position of the fields on a given line.\n\nArgs:\nscreen_width (int): the width of the screen\nline (mpdlcd.display_fields.Field list): the list of fields on the\nline\n\nReturns:\n((int, mpdlcd.display_fields.Field) list): the positions of fields,\nas (position, field) tuples.\n\nRaises:\nFormatError: if the line contains more than one flexible field, or\nis too long for the screen size.", "source": "codesearchnet"}
{"code": "def decode_single_feature_from_dict(\n    feature_k,\n    feature,\n    tfexample_dict):\n  \n  \n  if not feature.serialized_keys:\n    data_to_decode = tfexample_dict[feature_k]\n  \n  else:\n    \n    data_to_decode = {\n        k: tfexample_dict[posixpath.join(feature_k, k)]\n        for k in feature.serialized_keys\n    }\n  return feature.decode_example(data_to_decode)", "docstring": "Decode the given feature from the tfexample_dict.\n\nArgs:\nfeature_k (str): Feature key in the tfexample_dict\nfeature (FeatureConnector): Connector object to use to decode the field\ntfexample_dict (dict): Dict containing the data to decode.\n\nReturns:\ndecoded_feature: The output of the feature.decode_example", "source": "juraj-google-style"}
{"code": "def ReSpecTh_to_ChemKED(filename_xml, file_author='', file_author_orcid='', *, validate=False):\n    tree = etree.parse(filename_xml)\n    root = tree.getroot()\n    properties = get_file_metadata(root)\n    properties['reference'] = get_reference(root)\n    properties['reference']['detail'] = ((properties['reference'].get('detail', '') + 'Converted from ReSpecTh XML file ') + os.path.basename(filename_xml))\n    properties.update(get_experiment_kind(root))\n    properties['common-properties'] = get_common_properties(root)\n    properties['common-properties']['ignition-type'] = get_ignition_type(root)\n    properties['datapoints'] = get_datapoints(root)\n    has_pres_rise = (('pressure-rise' in properties['common-properties']) or any([True for dp in properties['datapoints'] if ('pressure-rise' in dp)]))\n    if (has_pres_rise and (properties['apparatus']['kind'] == 'rapid compression machine')):\n        raise KeywordError('Pressure rise cannot be defined for RCM.')\n    has_vol_hist = any([(t.get('type') == 'volume') for dp in properties['datapoints'] for t in dp.get('time-histories', [{}])])\n    if (has_vol_hist and (properties['apparatus']['kind'] == 'shock tube')):\n        raise KeywordError('Volume history cannot be defined for shock tube.')\n    if (file_author_orcid and (not file_author)):\n        raise KeywordError('If file_author_orcid is specified, file_author must be as well')\n    if file_author:\n        temp_author = {'name': file_author}\n        if file_author_orcid:\n            temp_author['ORCID'] = file_author_orcid\n        properties['file-authors'].append(temp_author)\n    for idx in range(len(properties['datapoints'])):\n        for prop in properties['common-properties']:\n            properties['datapoints'][idx][prop] = properties['common-properties'][prop]\n    if validate:\n        chemked.ChemKED(dict_input=properties)\n    return properties", "docstring": "Convert ReSpecTh XML file to ChemKED-compliant dictionary.\n\nArgs:\nfilename_xml (`str`): Name of ReSpecTh XML file to be converted.\nfile_author (`str`, optional): Name to override original file author\nfile_author_orcid (`str`, optional): ORCID of file author\nvalidate (`bool`, optional, keyword-only): Set to `True` to validate the resulting\nproperty dictionary with `ChemKED`. Set to `False` if the file is being loaded and will\nbe validated at some other point before use.", "source": "codesearchnet"}
{"code": "def add_metric(self, labels, buckets, gsum_value, timestamp=None):\n        \n        for bucket, value in buckets:\n            self.samples.append(Sample(\n                self.name + '_bucket',\n                dict(list(zip(self._labelnames, labels)) + [('le', bucket)]),\n                value, timestamp))\n        \n        self.samples.extend([\n            Sample(self.name + '_gcount', dict(zip(self._labelnames, labels)), buckets[-1][1], timestamp),\n            Sample(self.name + '_gsum', dict(zip(self._labelnames, labels)), gsum_value, timestamp),\n        ])", "docstring": "Add a metric to the metric family.\n\nArgs:\nlabels: A list of label values\nbuckets: A list of pairs of bucket names and values.\nThe buckets must be sorted, and +Inf present.\ngsum_value: The sum value of the metric.", "source": "juraj-google-style"}
{"code": "def __init__(self, input_queue, output_queue):\n        \n        super(WorkerThread, self).__init__()\n        self.daemon = True\n        self.input_queue = input_queue\n        self.output_queue = output_queue\n        self.interrupted = False\n        self.polltime = FLAGS.polltime", "docstring": "Initializer.\n\nArgs:\ninput_queue: Queue this worker consumes work from.\noutput_queue: Queue where this worker puts new work items, if any.", "source": "juraj-google-style"}
{"code": "def list_cert_bindings(site):\n    \n    ret = dict()\n    sites = list_sites()\n\n    if site not in sites:\n        log.warning('Site not found: %s', site)\n        return ret\n\n    for binding in sites[site]['bindings']:\n        if sites[site]['bindings'][binding]['certificatehash']:\n            ret[binding] = sites[site]['bindings'][binding]\n\n    if not ret:\n        log.warning('No certificate bindings found for site: %s', site)\n\n    return ret", "docstring": "List certificate bindings for an IIS site.\n\n.. versionadded:: 2016.11.0\n\nArgs:\nsite (str): The IIS site name.\n\nReturns:\ndict: A dictionary of the binding names and properties.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.list_bindings site", "source": "juraj-google-style"}
{"code": "def status(self, targets, jobs=None, remote=None, show_checksums=False):\n        \n        cloud = self._get_cloud(remote, \"status\")\n        return self.repo.cache.local.status(\n            targets, jobs=jobs, remote=cloud, show_checksums=show_checksums\n        )", "docstring": "Check status of data items in a cloud-agnostic way.\n\nArgs:\ntargets (list): list of targets to check status for.\njobs (int): number of jobs that can be running simultaneously.\nremote (dvc.remote.base.RemoteBase): optional remote to compare\ntargets to. By default remote from core.remote config option\nis used.\nshow_checksums (bool): show checksums instead of file names in\ninformation messages.", "source": "juraj-google-style"}
{"code": "def enforce_epsilon_and_compute_hash(dataset_batch_dir, adv_dir, output_dir, epsilon):\n    dataset_images = [f for f in os.listdir(dataset_batch_dir) if f.endswith('.png')]\n    image_hashes = {}\n    resize_warning = False\n    for img_name in dataset_images:\n        if (not os.path.exists(os.path.join(adv_dir, img_name))):\n            logging.warning('Image %s not found in the output', img_name)\n            continue\n        image = np.array(Image.open(os.path.join(dataset_batch_dir, img_name)).convert('RGB'))\n        image = image.astype('int32')\n        image_max_clip = np.clip((image + epsilon), 0, 255).astype('uint8')\n        image_min_clip = np.clip((image - epsilon), 0, 255).astype('uint8')\n        adv_image = Image.open(os.path.join(adv_dir, img_name)).convert('RGB')\n        if (adv_image.size[::(- 1)] != image.shape[:2]):\n            resize_warning = True\n            adv_image = adv_image.resize((image.shape[1], image.shape[0]), Image.BICUBIC)\n        adv_image = np.array(adv_image)\n        clipped_adv_image = np.clip(adv_image, image_min_clip, image_max_clip)\n        Image.fromarray(clipped_adv_image).save(os.path.join(output_dir, img_name))\n        image_hashes[img_name[:(- 4)]] = hashlib.sha1(clipped_adv_image.view(np.uint8)).hexdigest()\n    if resize_warning:\n        logging.warning('One or more adversarial images had incorrect size')\n    return image_hashes", "docstring": "Enforces size of perturbation on images, and compute hashes for all images.\n\nArgs:\ndataset_batch_dir: directory with the images of specific dataset batch\nadv_dir: directory with generated adversarial images\noutput_dir: directory where to copy result\nepsilon: size of perturbation\n\nReturns:\ndictionary with mapping form image ID to hash.", "source": "codesearchnet"}
{"code": "def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> torch.FloatTensor:\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    text_outputs: BaseModelOutputWithPooling = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states)\n    pooled_output = text_outputs.pooler_output\n    return pooled_output", "docstring": "Returns:\ntext_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by\napplying the projection layer to the pooled output of [`Siglip2TextModel`].\n\nExamples:\n\n```python\n>>> from transformers import AutoTokenizer, AutoModel\n>>> import torch\n\n>>> model = AutoModel.from_pretrained(\"google/siglip2-base-patch16-224\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google/siglip2-base-patch16-224\")\n\n>>> # important: make sure to set padding=\"max_length\" as that's how the model was trained\n>>> inputs = tokenizer([\"a photo of a cat\", \"a photo of a dog\"], padding=\"max_length\", return_tensors=\"pt\")\n>>> with torch.no_grad():\n...     text_features = model.get_text_features(**inputs)\n```", "source": "github-repos"}
{"code": "def _internal_add(self, pattern: Pattern, label, renaming) -> int:\n        \n        pattern_index = len(self.patterns)\n        renamed_constraints = [c.with_renamed_vars(renaming) for c in pattern.local_constraints]\n        constraint_indices = [self._add_constraint(c, pattern_index) for c in renamed_constraints]\n        self.patterns.append((pattern, label, constraint_indices))\n        self.pattern_vars.append(renaming)\n        pattern = rename_variables(pattern.expression, renaming)\n        state = self.root\n        patterns_stack = [deque([pattern])]\n\n        self._process_pattern_stack(state, patterns_stack, renamed_constraints, pattern_index)\n\n        return pattern_index", "docstring": "Add a new pattern to the matcher.\n\nEquivalent patterns are not added again. However, patterns that are structurally equivalent,\nbut have different constraints or different variable names are distinguished by the matcher.\n\nArgs:\npattern: The pattern to add.\n\nReturns:\nThe internal id for the pattern. This is mainly used by the :class:`CommutativeMatcher`.", "source": "juraj-google-style"}
{"code": "def zip_(*structures, **kwargs):\n  \n  \n  \n  flatten = kwargs.pop('flatten', False)\n  assert not kwargs, 'zip() got unexpected keyword arguments.'\n  return map(\n      lambda *x: x if len(x) > 1 else x[0],\n      *structures,\n      flatten=flatten)", "docstring": "Combine corresponding elements in multiple nested structure to tuples.\n\nThe nested structures can consist of any combination of lists, tuples, and\ndicts. All provided structures must have the same nesting.\n\nArgs:\n*structures: Nested structures.\nflatten: Whether to flatten the resulting structure into a tuple. Keys of\ndictionaries will be discarded.\n\nReturns:\nNested structure.", "source": "juraj-google-style"}
{"code": "def _ParseTimezoneOption(self, options):\n    time_zone_string = self.ParseStringOption(options, 'timezone')\n    if isinstance(time_zone_string, py2to3.STRING_TYPES):\n        if (time_zone_string.lower() == 'list'):\n            self.list_timezones = True\n        elif time_zone_string:\n            try:\n                pytz.timezone(time_zone_string)\n            except pytz.UnknownTimeZoneError:\n                raise errors.BadConfigOption('Unknown time zone: {0:s}'.format(time_zone_string))\n            self._preferred_time_zone = time_zone_string", "docstring": "Parses the timezone options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "codesearchnet"}
{"code": "def turb45(msg):\n    \n    d = hex2bin(data(msg))\n    if d[0] == '0':\n        return None\n\n    turb = bin2int(d[1:3])\n    return turb", "docstring": "Turbulence.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nint: Turbulence level. 0=NIL, 1=Light, 2=Moderate, 3=Severe", "source": "juraj-google-style"}
{"code": "def set_property(self, name, value, update_session=True):\n    if (type(value) == datetime):\n        value = value.isoformat()\n    else:\n        value = value\n    try:\n        prop = self.get_property(name)\n        if (prop.value == value):\n            return False\n        prop.value = value\n    except AttributeError:\n        prop = ResourceProperty()\n        prop.resource_id = self.id\n        prop.name = name\n        prop.value = value\n    if update_session:\n        db.session.add(prop)\n    return True", "docstring": "Create or set the value of a property. Returns `True` if the property was created or updated, or `False` if\nthere were no changes to the value of the property.\n\nArgs:\nname (str): Name of the property to create or update\nvalue (any): Value of the property. This can be any type of JSON serializable data\nupdate_session (bool): Automatically add the change to the SQLAlchemy session. Default: True\n\nReturns:\n`bool`", "source": "codesearchnet"}
{"code": "def _on_connection_finished(self, result):\n        \n\n        success, retval, context = self._parse_return(result)\n        conn_id = context['connection_id']\n        callback = context['callback']\n\n        if success is False:\n            callback(conn_id, self.id, False, 'Timeout opening connection')\n\n            with self.count_lock:\n                self.connecting_count -= 1\n            return\n\n        handle = retval['handle']\n        context['disconnect_handler'] = self._on_connection_failed\n        context['connect_time'] = time.time()\n        context['state'] = 'preparing'\n        self._connections[handle] = context\n\n        self.probe_services(handle, conn_id, self._probe_services_finished)", "docstring": "Callback when the connection attempt to a BLE device has finished\n\nThis function if called when a new connection is successfully completed\n\nArgs:\nevent (BGAPIPacket): Connection event", "source": "juraj-google-style"}
{"code": "def get_rotation_matrix(axis, angle):\n    \n    axis = normalize(np.array(axis))\n    if not (np.array([1, 1, 1]).shape) == (3, ):\n        raise ValueError('axis.shape has to be 3')\n    angle = float(angle)\n    return _jit_get_rotation_matrix(axis, angle)", "docstring": "Returns the rotation matrix.\n\nThis function returns a matrix for the counterclockwise rotation\naround the given axis.\nThe Input angle is in radians.\n\nArgs:\naxis (vector):\nangle (float):\n\nReturns:\nRotation matrix (np.array):", "source": "juraj-google-style"}
{"code": "def project_surface(surface, angle=DEFAULT_ANGLE):\n    \n    z_coef = np.sin(np.radians(angle))\n    y_coef = np.cos(np.radians(angle))\n\n    surface_height, surface_width = surface.shape\n    slope = np.tile(np.linspace(0., 1., surface_height), [surface_width, 1]).T\n\n    return slope * y_coef + surface * z_coef", "docstring": "Returns the height of the surface when projected at the given angle.\n\nArgs:\nsurface (surface): the surface to project\nangle (float): the angle at which to project the surface\n\nReturns:\nsurface: A projected surface.", "source": "juraj-google-style"}
{"code": "def parse_results_mol2(mol2_outpath):\n    docked_ligands = pd.DataFrame()\n    lines = [line.strip() for line in open(mol2_outpath, 'r')]\n    props = {}\n    for (i, line) in enumerate(lines):\n        if line.startswith('\n            ligand = line.strip().strip('\n            line = lines[(i + 1)]\n            props = {}\n            props['Ligand'] = ligand\n        if line.startswith('\n            splitter = line.strip().strip('\n            props[splitter[0]] = float(splitter[1])\n        if line.startswith('@<TRIPOS>MOLECULE'):\n            if props:\n                docked_ligands = docked_ligands.append(props, ignore_index=True)\n    return docked_ligands", "docstring": "Parse a DOCK6 mol2 output file, return a Pandas DataFrame of the results.\n\nArgs:\nmol2_outpath (str): Path to mol2 output file\n\nReturns:\nDataFrame: Pandas DataFrame of the results", "source": "codesearchnet"}
{"code": "def set(self, *args, **kwargs):\n    if args:\n        for arg in args:\n            if (arg is not None):\n                for name in self.__slots__:\n                    self._set(name, getattr(arg, name, UNSET))\n    for name in kwargs:\n        self._set(name, kwargs.get(name, UNSET))", "docstring": "Conveniently set one or more fields at a time.\n\nArgs:\n*args: Optionally set from other objects, available fields from the passed object are used in order\n**kwargs: Set from given key/value pairs (only names defined in __slots__ are used)", "source": "codesearchnet"}
{"code": "def ParseOptions(cls, options, configuration_object):\n    \n    if not isinstance(configuration_object, tools.CLITool):\n      raise errors.BadConfigObject(\n          'Configuration object is not an instance of CLITool')\n\n    storage_file = cls._ParseStringOption(options, 'storage_file')\n\n    setattr(configuration_object, '_storage_file_path', storage_file)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.", "source": "juraj-google-style"}
{"code": "def delete_detector(self, detector_id, **kwargs):\n        \n        resp = self._delete(self._u(self._DETECTOR_ENDPOINT_SUFFIX,\n                                    detector_id),\n                            **kwargs)\n        resp.raise_for_status()\n        \n        return resp", "docstring": "Remove a detector.\n\nArgs:\ndetector_id (string): the ID of the detector.", "source": "juraj-google-style"}
{"code": "def _Open(self, path_spec=None, mode='rb'):\n    \n    if not path_spec:\n      raise ValueError('Missing path specification.')\n\n    data_stream = getattr(path_spec, 'data_stream', None)\n\n    file_system = resolver.Resolver.OpenFileSystem(\n        path_spec, resolver_context=self._resolver_context)\n\n    file_entry = file_system.GetFileEntryByPathSpec(path_spec)\n    if not file_entry:\n      file_system.Close()\n      raise IOError('Unable to retrieve file entry.')\n\n    tsk_file = file_entry.GetTSKFile()\n    tsk_attribute = None\n\n    \n    \n    \n    if getattr(tsk_file, 'info', None) is None:\n      file_system.Close()\n      raise IOError('Missing attribute info in file (pytsk3.File).')\n\n    \n    \n    \n    if getattr(tsk_file.info, 'meta', None) is None:\n      file_system.Close()\n      raise IOError(\n          'Missing attribute meta in file.info pytsk3.TSK_FS_FILE).')\n\n    \n    \n    if not hasattr(tsk_file.info.meta, 'size'):\n      file_system.Close()\n      raise IOError(\n          'Missing attribute size in file.info.meta (pytsk3.TSK_FS_META).')\n\n    \n    \n    if not hasattr(tsk_file.info.meta, 'type'):\n      file_system.Close()\n      raise IOError(\n          'Missing attribute type in file.info.meta (pytsk3.TSK_FS_META).')\n\n    if data_stream:\n      for attribute in tsk_file:\n        if getattr(attribute, 'info', None) is None:\n          continue\n\n        \n        \n        attribute_name = getattr(attribute.info, 'name', None)\n        if attribute_name is None:\n          attribute_name = ''\n\n        else:\n          try:\n            \n            attribute_name = attribute_name.decode('utf8')\n          except UnicodeError:\n            \n            continue\n\n        attribute_type = getattr(attribute.info, 'type', None)\n        if attribute_name == data_stream and attribute_type in (\n            pytsk3.TSK_FS_ATTR_TYPE_HFS_DEFAULT,\n            pytsk3.TSK_FS_ATTR_TYPE_HFS_DATA,\n            pytsk3.TSK_FS_ATTR_TYPE_NTFS_DATA):\n          tsk_attribute = attribute\n          break\n\n      if tsk_attribute is None:\n        file_system.Close()\n        raise IOError('Unable to open data stream: {0:s}.'.format(data_stream))\n\n    if (not tsk_attribute and\n        tsk_file.info.meta.type != pytsk3.TSK_FS_META_TYPE_REG):\n      file_system.Close()\n      raise IOError('Not a regular file.')\n\n    self._current_offset = 0\n    self._file_system = file_system\n    self._tsk_attribute = tsk_attribute\n    self._tsk_file = tsk_file\n\n    if self._tsk_attribute:\n      self._size = self._tsk_attribute.info.size\n    else:\n      self._size = self._tsk_file.info.meta.size", "docstring": "Opens the file-like object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nmode (Optional[str]): file access mode.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file-like object could not be opened.\nOSError: if the file-like object could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def scatter_update(self, sparse_delta, use_locking=False, name=None):\n    raise NotImplementedError", "docstring": "Assigns `tf.IndexedSlices` to this variable.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to be assigned to this variable.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nThe updated variable.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"}
{"code": "def filter_keys_by_dataset_id(did, key_container):\n    keys = iter(key_container)\n    for key in DATASET_KEYS:\n        if (getattr(did, key) is not None):\n            if (key == 'wavelength'):\n                keys = [k for k in keys if ((getattr(k, key) is not None) and DatasetID.wavelength_match(getattr(k, key), getattr(did, key)))]\n            else:\n                keys = [k for k in keys if ((getattr(k, key) is not None) and (getattr(k, key) == getattr(did, key)))]\n    return keys", "docstring": "Filer provided key iterable by the provided `DatasetID`.\n\nNote: The `modifiers` attribute of `did` should be `None` to allow for\n**any** modifier in the results.\n\nArgs:\ndid (DatasetID): Query parameters to match in the `key_container`.\nkey_container (iterable): Set, list, tuple, or dict of `DatasetID`\nkeys.\n\nReturns (list): List of keys matching the provided parameters in no\nspecific order.", "source": "codesearchnet"}
{"code": "def build_rank_score_dict(rank_scores):\n    \n    logger = getLogger(__name__)\n    logger.debug(\"Checking rank scores: {0}\".format(rank_scores))\n    scores = {}\n    for family in rank_scores:\n        entry = family.split(':')\n        try:\n            family_id = entry[0]\n            logger.debug(\"Extracting rank score for family:{0}\".format(family_id))\n            score = entry[1]\n            logger.debug(\"Score:{0}\".format(score))\n        except Exception:\n            raise SyntaxError(\"Malformed rank score input\")\n            \n        scores[family_id] = score\n    \n    return scores", "docstring": "Take a list with annotated rank scores for each family and returns a\ndictionary with family_id as key and a list of genetic models as value.\n\nArgs:\nrank_scores    : A list on the form ['1:12','2:20']\n\nReturns:\nscores       : A dictionary with family id:s as key and scores as value\n{\n'1':'12',\n'2':'20'\n}", "source": "juraj-google-style"}
{"code": "def _parse(json_str: str, primitive_cls: Type[Base64Binary], *, separator_stride_cls: Type[SeparatorStride]) -> Base64Binary:\n    result = primitive_cls()\n    stride = json_str.find(' ')\n    if stride != -1:\n        end = stride\n        while end < len(json_str) and json_str[end] == ' ':\n            end += 1\n        separator = json_str[stride:end]\n        separator_stride_extension = cast(Any, separator_stride_cls())\n        separator_stride_extension.separator.value = separator\n        separator_stride_extension.stride.value = stride\n        extensions.add_message_to_extension(separator_stride_extension, result.extension.add())\n        json_str = json_str.replace(separator, '')\n    try:\n        result.value = base64.b64decode(json_str, validate=True)\n    except binascii.Error as e:\n        raise fhir_errors.InvalidFhirError('Invalid base64-encoded string.') from e\n    return result", "docstring": "Parses the json_str into a Base64Binary FHIR primitive protobuf message.\n\nArgs:\njson_str: The raw JSON string to parse.\nprimitive_cls: The type of FHIR primitive to parse into.\nseparator_stride_cls: The type of Base64BinarySeparatorStride extension\nassociated with primitive_cls.\n\nReturns:\nA FHIR primitive Base64Binary protobuf message.\n\nRaises:\nfhir_errors.InvalidFhirError: In the event that the provided json_str is\nnot a valid base64-encoded string.", "source": "github-repos"}
{"code": "def ReadFileObject(self, definitions_registry, file_object):\n    last_definition_object = None\n    error_location = None\n    error_message = None\n    try:\n        yaml_generator = yaml.safe_load_all(file_object)\n        for yaml_definition in yaml_generator:\n            definition_object = self._ReadDefinition(definitions_registry, yaml_definition)\n            if (not definition_object):\n                error_location = self._GetFormatErrorLocation(yaml_definition, last_definition_object)\n                error_message = '{0:s} Missing definition object.'.format(error_location)\n                raise errors.FormatError(error_message)\n            definitions_registry.RegisterDefinition(definition_object)\n            last_definition_object = definition_object\n    except errors.DefinitionReaderError as exception:\n        error_message = 'in: {0:s} {1:s}'.format((exception.name or '<NAMELESS>'), exception.message)\n        raise errors.FormatError(error_message)\n    except (yaml.reader.ReaderError, yaml.scanner.ScannerError) as exception:\n        error_location = self._GetFormatErrorLocation({}, last_definition_object)\n        error_message = '{0:s} {1!s}'.format(error_location, exception)\n        raise errors.FormatError(error_message)", "docstring": "Reads data type definitions from a file-like object into the registry.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\nfile_object (file): file-like object to read from.\n\nRaises:\nFormatError: if the definitions values are missing or if the format is\nincorrect.", "source": "codesearchnet"}
{"code": "def insert_arguments_into_match_query(compilation_result, arguments):\n    if (compilation_result.language != MATCH_LANGUAGE):\n        raise AssertionError(u'Unexpected query output language: {}'.format(compilation_result))\n    base_query = compilation_result.query\n    argument_types = compilation_result.input_metadata\n    sanitized_arguments = {key: _safe_match_argument(argument_types[key], value) for (key, value) in six.iteritems(arguments)}\n    return base_query.format(**sanitized_arguments)", "docstring": "Insert the arguments into the compiled MATCH query to form a complete query.\n\nArgs:\ncompilation_result: a CompilationResult object derived from the GraphQL compiler\narguments: dict, mapping argument name to its value, for every parameter the query expects.\n\nReturns:\nstring, a MATCH query with inserted argument data", "source": "codesearchnet"}
{"code": "def requested_packages(self, include_implicit=False):\n    if include_implicit:\n        return (self._package_requests + self.implicit_packages)\n    else:\n        return self._package_requests", "docstring": "Get packages in the request.\n\nArgs:\ninclude_implicit (bool): If True, implicit packages are appended\nto the result.\n\nReturns:\nList of `PackageRequest` objects.", "source": "codesearchnet"}
{"code": "def _convert_type(self, t, as_instance=False):\n    src = textwrap.dedent(f'\\n      from typing import Any, Callable, Iterator, Tuple, Type, Union\\n      from protocols import Sequence, SupportsLower\\n      x = ...  \n    filename = str(hash((t, as_instance)))\n    x = self._parse_and_lookup(src, 'x', filename).type\n    if as_instance:\n        x = abstract_utils.AsInstance(x)\n    return self.ctx.convert.constant_to_value(x, {}, self.ctx.root_node)", "docstring": "Convenience function for turning a string into an abstract value.\n\nNote that this function cannot be called more than once per test with\nthe same arguments, since we hash the arguments to get a filename for\nthe temporary pyi.\n\nArgs:\nt: The string representation of a type.\nas_instance: Whether to convert as an instance.\n\nReturns:\nA BaseValue.", "source": "github-repos"}
{"code": "def erfinv(x, name='erfinv'):\n    with tf.name_scope(name):\n        x = tf.convert_to_tensor(value=x, name='x')\n        if (dtype_util.as_numpy_dtype(x.dtype) not in [np.float32, np.float64]):\n            raise TypeError('x.dtype={} is not handled, see docstring for supported types.'.format(dtype_util.name(x.dtype)))\n        return (ndtri(((x + 1.0) / 2.0)) / np.sqrt(2.0))", "docstring": "The inverse function for erf, the error function.\n\nArgs:\nx: `Tensor` of type `float32`, `float64`.\nname: Python string. A name for the operation (default=\"erfinv\").\n\nReturns:\nx: `Tensor` with `dtype=x.dtype`.\n\nRaises:\nTypeError: if `x` is not floating-type.", "source": "codesearchnet"}
{"code": "def custom(colors, bins=None, bin_method=BinMethod.quantiles):\n    return {'colors': colors, 'bins': (bins if (bins is not None) else len(colors)), 'bin_method': bin_method}", "docstring": "Create a custom scheme.\n\nArgs:\ncolors (list of str): List of hex values for styling data\nbins (int, optional): Number of bins to style by. If not given, the\nnumber of colors will be used.\nbin_method (str, optional): Classification method. One of the values\nin :obj:`BinMethod`. Defaults to `quantiles`, which only works with\nquantitative data.", "source": "codesearchnet"}
{"code": "def matmul(self, matmul_input: core.Tensor) -> Mapping[str, core.Tensor]:\n    out = math_ops.matmul(matmul_input, self.matmul_filters)\n    return {'output': out}", "docstring": "Performs a matrix multiplication.\n\nArgs:\nmatmul_input: Input tensor to matmul with the filter.\n\nReturns:\nA map of: output key -> output result.", "source": "github-repos"}
{"code": "def lines_from_stream(f, as_interned=False):\n    if as_interned:\n        return [sys.intern(line) for line in f.read().splitlines()]\n    return f.read().splitlines()", "docstring": "Create a list of file lines from a given file stream.\n\nArgs:\nf (io.TextIOWrapper): File stream\nas_interned (bool): List of \"interned\" strings (default False)\n\nReturns:\nstrings (list): File line list", "source": "codesearchnet"}
{"code": "def _tensor_name_base(full_tensor_name):\n    if full_tensor_name.startswith('^'):\n        return full_tensor_name[1:]\n    return full_tensor_name.split(':')[0]", "docstring": "Removes the device assignment code from a tensor.\n\ne.g. _tensor_name_base(\"foo:3\") => \"foo\"\n\nArgs:\nfull_tensor_name: A tensor name that is annotated with a device placement\n(this is what tensor flow introspection gives).\n\nReturns:\nA name without any device assignment.", "source": "github-repos"}
{"code": "def _StructPackDecoder(wire_type, format):\n    value_size = struct.calcsize(format)\n    local_unpack = struct.unpack\n\n    def InnerDecode(buffer, pos):\n        new_pos = (pos + value_size)\n        result = local_unpack(format, buffer[pos:new_pos])[0]\n        return (result, new_pos)\n    return _SimpleDecoder(wire_type, InnerDecode)", "docstring": "Return a constructor for a decoder for a fixed-width field.\n\nArgs:\nwire_type:  The field's wire type.\nformat:  The format string to pass to struct.unpack().", "source": "codesearchnet"}
{"code": "def parse_storage_size(storage_size):\n    \n    pattern = re.compile(r'^([0-9]+(\\.[0-9]+)?)([gmk])?$', re.I)\n\n    units = {\n        'k': 1024,\n        'm': 1024 * 1024,\n        'g': 1024 * 1024 * 1024\n    }\n\n    match = pattern.fullmatch(str(storage_size))\n\n    if match is None:\n        raise ValueError('Invalid partition size: {0}'.format(storage_size))\n\n    groups = match.groups()\n\n    \n    if groups[2] is None:\n        \n        return int(float(groups[0]))\n\n    return int(float(groups[0]) * units[groups[2].lower()])", "docstring": "Parses an expression that represents an amount of storage/memory and returns the number of bytes it represents.\n\nArgs:\nstorage_size(str): Size in bytes. The units ``k`` (kibibytes), ``m`` (mebibytes) and ``g``\n(gibibytes) are supported, i.e. a ``partition_size`` of ``1g`` equates :math:`2^{30}` bytes.\n\nReturns:\nint: Number of bytes.", "source": "juraj-google-style"}
{"code": "def _VerifyRecord(self, pls_record):\n    future_timestamp = (timelib.Timestamp.GetNow() + self._SIX_YEARS_IN_MICRO_SECONDS)\n    if (pls_record.last_written_time > future_timestamp):\n        return False\n    (first_word, _, _) = pls_record.query.partition(' ')\n    if (first_word.lower() not in self._PLS_KEYWORD):\n        return False\n    return True", "docstring": "Verifies a PLS Recall record.\n\nArgs:\npls_record (pls_recall_record): a PLS Recall record to verify.\n\nReturns:\nbool: True if this is a valid PLS Recall record, False otherwise.", "source": "codesearchnet"}
{"code": "def ParseMessagesRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    event_data = HangoutsMessageData()\n    event_data.sender = self._GetRowValue(query_hash, row, 'full_name')\n    event_data.body = self._GetRowValue(query_hash, row, 'text')\n    event_data.offset = self._GetRowValue(query_hash, row, '_id')\n    event_data.query = query\n    event_data.message_status = self._GetRowValue(query_hash, row, 'status')\n    event_data.message_type = self._GetRowValue(query_hash, row, 'type')\n    timestamp = self._GetRowValue(query_hash, row, 'timestamp')\n    date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an Messages row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "codesearchnet"}
{"code": "class FalconMambaOutput(ModelOutput):\n    last_hidden_state: Optional[torch.FloatTensor] = None\n    cache_params: Optional[MambaCache] = None\n    hidden_states: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "Class for the FALCONMAMBA model outputs.\n\nArgs:\nlast_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\nSequence of hidden-states at the output of the last layer of the model.\ncache_params (`MambaCache`):\nThe state of the model at the last time step. Can be used in a forward method with the next `input_ids` to\navoid providing the old `input_ids`.\n\nIncludes both the State space model state matrices after the selective scan, and the Convolutional states\nhidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\nTuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\none for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\nHidden-states of the model at the output of each layer plus the optional initial embedding outputs.", "source": "github-repos"}
{"code": "def callback_trigger(msg, arg):\n    gnlh = genlmsghdr(nlmsg_data(nlmsg_hdr(msg)))\n    if (gnlh.cmd == nl80211.NL80211_CMD_SCAN_ABORTED):\n        arg.value = 1\n    elif (gnlh.cmd == nl80211.NL80211_CMD_NEW_SCAN_RESULTS):\n        arg.value = 0\n    return libnl.handlers.NL_SKIP", "docstring": "Called when the kernel is done scanning. Only signals if it was successful or if it failed. No other data.\n\nPositional arguments:\nmsg -- nl_msg class instance containing the data sent by the kernel.\narg -- mutable integer (ctypes.c_int()) to update with results.\n\nReturns:\nAn integer, value of NL_SKIP. It tells libnl to stop calling other callbacks for this message and proceed with\nprocessing the next kernel message.", "source": "codesearchnet"}
{"code": "def _UpdateYear(self, mediator, month):\n    if (not self._year_use):\n        self._year_use = mediator.GetEstimatedYear()\n    if (not self._maximum_year):\n        self._maximum_year = mediator.GetLatestYear()\n    if (not self._last_month):\n        self._last_month = month\n        return\n    if (self._last_month > (month + 1)):\n        if (self._year_use != self._maximum_year):\n            self._year_use += 1\n    self._last_month = month", "docstring": "Updates the year to use for events, based on last observed month.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\nmonth (int): month observed by the parser, where January is 1.", "source": "codesearchnet"}
{"code": "def positions(self, account: str='') -> List[Position]:\n    if account:\n        return list(self.wrapper.positions[account].values())\n    else:\n        return [v for d in self.wrapper.positions.values() for v in d.values()]", "docstring": "List of positions for the given account,\nor of all accounts if account is left blank.\n\nArgs:\naccount: If specified, filter for this account name.", "source": "codesearchnet"}
{"code": "def end_run_group(group, session):\n    \n    from datetime import datetime\n\n    group.end = datetime.now()\n    group.status = 'completed'\n    session.commit()", "docstring": "End the run_group successfully.\n\nArgs:\ngroup: The run_group we want to complete.\nsession: The database transaction we will finish.", "source": "juraj-google-style"}
{"code": "def port_tag_details(cls, tags):\n    for tag in tags:\n        match = port_tag_re.match(tag)\n        if match:\n            (source_sink, port, extra) = match.groups()\n            return ((source_sink == 'source'), cls(port), extra)", "docstring": "Search tags for port info, returning it\n\nArgs:\ntags: A list of tags to check\n\nReturns:\nNone or (is_source, port, connected_value|disconnected_value)\nwhere port is one of the Enum entries of Port", "source": "codesearchnet"}
{"code": "def to_dataframe(self, view: views.View, limit: Optional[int]=None) -> pandas.DataFrame:\n    df = self.run_query(view, limit).result().to_dataframe()\n    return runner_utils.clean_dataframe(df, view.get_select_columns_to_return_type())", "docstring": "Returns a Pandas dataframe of the results, if Pandas is installed.\n\nArgs:\nview: the view that defines the query to run.\nlimit: optional limit of the number of items to return.\n\nReturns:\npandas.DataFrame: dataframe of the view contents.\n\nRaises:\nValueError propagated from the BigQuery client if pandas is not installed.", "source": "github-repos"}
{"code": "def get_attribute_list(self, uid=None):\n        \n        batch_item = self._build_get_attribute_list_batch_item(uid)\n\n        request = self._build_request_message(None, [batch_item])\n        response = self._send_and_receive_message(request)\n        results = self._process_batch_items(response)\n        return results[0]", "docstring": "Send a GetAttributeList request to the server.\n\nArgs:\nuid (string): The ID of the managed object with which the retrieved\nattribute names should be associated.\n\nReturns:\nresult (GetAttributeListResult): A structure containing the results\nof the operation.", "source": "juraj-google-style"}
{"code": "def __init__(self, sizes, scope='mlp-baseline', summary_labels=()):\n        \n\n        network = []\n        for size in sizes:\n            network.append(dict(type='dense', size=size))\n\n        super(MLPBaseline, self).__init__(network=network, scope=scope, summary_labels=summary_labels)", "docstring": "Multi-layer perceptron baseline.\n\nArgs:\nsizes: List of dense layer sizes", "source": "juraj-google-style"}
{"code": "def get_factors(dividend: int) -> Set[int]:\n    factors_set = set()\n    for i in range(1, int(dividend ** 0.5) + 1):\n        if dividend % i == 0:\n            factors_set.add(i)\n            factors_set.add(dividend \n    return factors_set", "docstring": "Calculate all factors of a given number, i.e. a divisor that leaves\nno remainder. For example, if dividend=12, it will return {1, 2, 3, 4, 6, 12}.\n\nArgs:\ndividend (int): The number to find factors for.\n\nReturns:\nset: A set containing all factors of the number.", "source": "github-repos"}
{"code": "def session_creator(self, scaffold=None, config=None, checkpoint_dir=None, checkpoint_filename_with_path=None, max_wait_secs=7200):\n    if config:\n        session_config = copy.deepcopy(config)\n        session_config.MergeFrom(self._session_config)\n    else:\n        session_config = self._session_config\n    if not self._strategy or self._strategy.extended.experimental_should_init:\n        logging.info('Creating chief session creator with config: %r', config)\n        return monitored_session.ChiefSessionCreator(scaffold, master=self.master_target, config=session_config, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path)\n    else:\n        logging.info('Creating worker session creator with config: %r', config)\n        return monitored_session.WorkerSessionCreator(scaffold, master=self.master_target, config=session_config, max_wait_secs=max_wait_secs)", "docstring": "Returns a session creator.\n\nThe returned session creator will be configured with the correct master\ntarget and session configs. It will also run either init ops or ready ops\nby querying the `strategy` object when `create_session` is called on it.\n\nArgs:\nscaffold: A `Scaffold` used for gathering or building supportive ops. If\nnot specified a default one is created. It's used to finalize the graph.\nconfig: `ConfigProto` proto used to configure the session.\ncheckpoint_dir: A string. Optional path to a directory where to restore\nvariables.\ncheckpoint_filename_with_path: Full file name path to the checkpoint file.\nOnly one of `checkpoint_dir` or `checkpoint_filename_with_path` can be\nspecified.\nmax_wait_secs: Maximum time to wait for the session to become available.\n\nReturns:\na descendant of SessionCreator.", "source": "github-repos"}
{"code": "def pprint_value_string(self, value):\n    unit = ('' if (self.unit is None) else (' ' + bytes_to_unicode(self.unit)))\n    value = self.pprint_value(value)\n    return title_format.format(name=bytes_to_unicode(self.label), val=value, unit=unit)", "docstring": "Pretty print the dimension value and unit.\n\nArgs:\nvalue: Dimension value to format\n\nReturns:\nFormatted dimension value string with unit", "source": "codesearchnet"}
{"code": "def parse_brome_config_from_browser_config(browser_config):\n    config = {}\n    brome_keys = [key for key in browser_config if (key.find(':') != (- 1))]\n    for brome_key in brome_keys:\n        (section, option) = brome_key.split(':')\n        value = browser_config[brome_key]\n        if (section not in config):\n            config[section] = {}\n        config[section][option] = value\n    return config", "docstring": "Parse the browser config and look for brome specific config\n\nArgs:\nbrowser_config (dict)", "source": "codesearchnet"}
{"code": "def set_xla_env_flag(flag: str='') -> Callable[[_F], _F]:\n\n    def decorator(f: _F) -> _F:\n\n        @functools.wraps(f)\n        def decorated(*args, **kwargs):\n            original_xla_flags = os.environ.get('XLA_FLAGS')\n            new_xla_flags = flag\n            if original_xla_flags:\n                new_xla_flags = new_xla_flags + ' ' + original_xla_flags\n            os.environ['XLA_FLAGS'] = new_xla_flags\n            try:\n                return f(*args, **kwargs)\n            finally:\n                if original_xla_flags is None:\n                    del os.environ['XLA_FLAGS']\n                else:\n                    os.environ['XLA_FLAGS'] = original_xla_flags\n        return decorated\n    return decorator", "docstring": "Decorator for setting XLA_FLAGS prior to running a test.\n\nThis function returns a decorator intended to be applied to test methods in\na `tf.test.TestCase` class. Doing so will allow users to set any xla flags\nexposed via the XLA_FLAGS environment variable, execute the test, then reset\nthe XLA_FLAGS to the state it was in prior to this test.\n\nExample:\n\nclass MyTest(test.TestCase):\n\n@set_xla_env_flag(flag='--xla_gpu_enable_fast_min_max=false')\ndef testFoo(self):\n...\n\nArgs:\nflag: The xla flag to be set in the XLA_FLAGS env variable.\n\nReturns:\nA decorator which sets the configured flag in XLA_FLAGS for the decorated\nfunction.", "source": "github-repos"}
{"code": "def set_datastore_policy(self, func):\n    \n    if func is None:\n      func = self.default_datastore_policy\n    elif isinstance(func, bool):\n      func = lambda unused_key, flag=func: flag\n    self._datastore_policy = func", "docstring": "Set the context datastore policy function.\n\nArgs:\nfunc: A function that accepts a Key instance as argument and returns\na bool indicating if it should use the datastore.  May be None.", "source": "juraj-google-style"}
{"code": "def GetBatchJobHelper(self, version=sorted(_SERVICE_MAP.keys())[(- 1)], server=None):\n    if (not server):\n        server = _DEFAULT_ENDPOINT\n    request_builder = BatchJobHelper.GetRequestBuilder(self, version=version, server=server)\n    response_parser = BatchJobHelper.GetResponseParser()\n    return BatchJobHelper(request_builder, response_parser)", "docstring": "Returns a BatchJobHelper to work with the BatchJobService.\n\nThis is a convenience method. It is functionally identical to calling\nBatchJobHelper(adwords_client, version).\n\nArgs:\n[optional]\nversion: A string identifying the AdWords version to connect to. This\ndefaults to what is currently the latest version. This will be updated\nin future releases to point to what is then the latest version.\nserver: A string identifying the webserver hosting the AdWords API.\n\nReturns:\nAn initialized BatchJobHelper tied to this client.", "source": "codesearchnet"}
{"code": "def write_examples(fp, examples):\n\n    def write_tensor(fp, name, x):\n        \n        fp.write('name,%s\\n' % name)\n        fp.write('dtype,%s\\n' % x.dtype)\n        fp.write('shape,' + ','.join(map(str, x.shape)) + '\\n')\n        fp.write('values,' + format_result(x) + '\\n')\n    fp.write('test_cases,%d\\n' % len(examples))\n    for example in examples:\n        fp.write('inputs,%d\\n' % len(example['inputs']))\n        for name, value in example['inputs'].items():\n            if value is not None:\n                write_tensor(fp, name, value)\n        fp.write('outputs,%d\\n' % len(example['outputs']))\n        for name, value in example['outputs'].items():\n            write_tensor(fp, name, value)", "docstring": "Given a list `examples`, write a text format representation.\n\nThe file format is csv like with a simple repeated pattern. We would ike\nto use proto here, but we can't yet due to interfacing with the Android\nteam using this format.\n\nArgs:\nfp: File-like object to write to.\nexamples: Example dictionary consisting of keys \"inputs\" and \"outputs\"", "source": "github-repos"}
{"code": "def __init__(self, encoding='utf-8'):\n    \n    super(StdinInputReader, self).__init__(sys.stdin, encoding=encoding)", "docstring": "Initializes an stdin input reader.\n\nArgs:\nencoding (Optional[str]): input encoding.", "source": "juraj-google-style"}
{"code": "def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, **kwargs):\n    tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs)\n    return cls.from_tokenizer(tokenizer, *init_inputs, **kwargs)", "docstring": "Creates TFGPT2Tokenizer from pretrained GPT2Tokenizer\n\nArgs:\npretrained_model_name_or_path (Union[str, os.PathLike]): Path to pretrained model\n\nExamples:\n\n```python\nfrom transformers import TFGPT2Tokenizer\n\ntf_tokenizer = TFGPT2Tokenizer.from_pretrained(\"openai-community/gpt2\")\n```", "source": "github-repos"}
{"code": "def _get_prefix_length(number1, number2, bits):\n    for i in range(bits):\n        if ((number1 >> i) == (number2 >> i)):\n            return (bits - i)\n    return 0", "docstring": "Get the number of leading bits that are same for two numbers.\n\nArgs:\nnumber1: an integer.\nnumber2: another integer.\nbits: the maximum number of bits to compare.\n\nReturns:\nThe number of leading bits that are the same for two numbers.", "source": "codesearchnet"}
{"code": "def tensor_equals(self, other):\n    if other is None:\n        return False\n    g = getattr(self, 'graph', None)\n    if tensor_lib.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions() and (g is None or g.building_function):\n        self, other = override_binary_operator.maybe_promote_tensors(self, other)\n        return gen_math_ops.equal(self, other, incompatible_shape_error=False)\n    else:\n        return self is other", "docstring": "The operation invoked by the `Tensor.__eq__` operator.\n\nCompares two tensors element-wise for equality if they are\nbroadcast-compatible; or returns False if they are not broadcast-compatible.\n(Note that this behavior differs from `tf.math.equal`, which raises an\nexception if the two tensors are not broadcast-compatible.)\n\nPurpose in the API:\n\nThis method is exposed in TensorFlow's API so that library developers\ncan register dispatching for `Tensor.__eq__` to allow it to handle\ncustom composite tensors & other custom objects.\n\nThe API symbol is not intended to be called by users directly and does\nappear in TensorFlow's generated documentation.\n\nArgs:\nself: The left-hand side of the `==` operator.\nother: The right-hand side of the `==` operator.\n\nReturns:\nThe result of the elementwise `==` operation, or `False` if the arguments\nare not broadcast-compatible.", "source": "github-repos"}
{"code": "def __getitem__(self, key: InstanceKey) -> \"InstanceNode\":\n        \n        if isinstance(self.value, ObjectValue):\n            return self._member(key)\n        if isinstance(self.value, ArrayValue):\n            return self._entry(key)\n        raise InstanceValueError(self.json_pointer(), \"scalar instance\")", "docstring": "Return member or entry with the given key.\n\nArgs:\nkey: Entry index (for an array) or member name (for an object).\n\nRaises:\nNonexistentInstance: If receiver's value doesn't contain member\n`name`.\nInstanceValueError: If the receiver's value is not an object.", "source": "juraj-google-style"}
{"code": "def print_test_summary(self, executed_tests):\n        \n\n        separator = '---------------------'\n\n        with DbSessionContext(BROME_CONFIG['database']['mongo_database_name']) as session:  \n            test_batch = session.query(Testbatch).filter(Testbatch.mongo_id == self.test_batch_id).one()  \n\n            \n            self.info_log('******* TEST BATCH SUMMARY ********')\n\n            \n            base_query = session.query(Testresult).filter(Testresult.test_batch_id == self.test_batch_id)  \n            total_test = base_query.count()\n            total_test_successful = base_query.filter(Testresult.result == True).count()  \n            base_query = session.query(Testresult).filter(Testresult.test_batch_id == self.test_batch_id)  \n            total_test_failed = base_query.filter(Testresult.result == False).count()  \n            self.info_log(\n                'Total_test: %s; Total_test_successful: %s; Total_test_failed: %s' %  \n                (total_test, total_test_successful, total_test_failed)\n            )\n\n            \n            self.info_log(\n                \"Total execution time: %s\" %\n                (test_batch.ending_timestamp - test_batch.starting_timestamp)\n            )\n\n            \n            self.info_log(separator)\n\n            self.info_log('Failed tests:')\n\n            \n            failed_test_list = []\n            test_results = session.query(Testresult)\\\n                .filter(Testresult.result == False)\\\n                .filter(Testresult.test_batch_id == self.test_batch_id).all()  \n            for test_result in test_results:\n                if test_result.title not in failed_test_list:\n                    failed_test_list.append(test_result.title)\n                    query = session.query(Test)\\\n                        .filter(Test.mongo_id == test_result.test_id)\n                    if query.count():\n                        test = query.one()\n                        self.info_log(\n                            \"[%s] %s\" %\n                            (test.test_id, test.name)\n                        )\n                    else:\n                        self.info_log(\n                            \"[noid] %s\" %\n                            (test_result.title)\n                        )\n\n            if not failed_test_list:\n                self.info_log('No test failed!')\n\n            \n            self.info_log(separator)\n\n            \n            for test in executed_tests:\n                \n                self.info_log(\n                    '%s %s' %\n                    (test._name, test.pdriver.get_id())\n                )\n\n                test_instance = session.query(Testinstance)\\\n                    .filter(Testinstance.mongo_id == test._test_instance_id)\\\n                    .one()\n\n                \n                try:\n                    self.info_log(\n                        \"Test execution time: %s\" %\n                        (test_instance.ending_timestamp - test_instance.starting_timestamp)  \n                    )\n                except TypeError:\n                    self.info_log(\"Test execution time exception\")\n\n                \n                results = test.get_test_result_summary()\n                for result in results:\n                    self.info_log(result)\n\n                \n                if test._crash_error:\n                    self.info_log(test._crash_error)\n                else:\n                    self.info_log('No crash!')\n\n                \n                self.info_log(separator)\n\n            \n            self.info_log('Finished')", "docstring": "Print test summary\n\nWhen the test batch is finished a test summary will be printed\n\nArgs:\nexecuted_tests (list)", "source": "juraj-google-style"}
{"code": "def assign_stream_id_raster(stream_file, subbasin_file, out_stream_file):\n        \n        stream_raster = RasterUtilClass.read_raster(stream_file)\n        stream_data = stream_raster.data\n        nrows = stream_raster.nRows\n        ncols = stream_raster.nCols\n        nodata = stream_raster.noDataValue\n        subbain_data = RasterUtilClass.read_raster(subbasin_file).data\n        nodata_array = ones((nrows, ncols)) * DEFAULT_NODATA\n        newstream_data = where((stream_data > 0) & (stream_data != nodata),\n                               subbain_data, nodata_array)\n        RasterUtilClass.write_gtiff_file(out_stream_file, nrows, ncols, newstream_data,\n                                         stream_raster.geotrans, stream_raster.srs,\n                                         DEFAULT_NODATA, GDT_Int16)", "docstring": "Assign stream link ID according to subbasin ID.\nArgs:\nstream_file: input stream raster file\nsubbasin_file: subbasin raster file\nout_stream_file: output stream raster file", "source": "juraj-google-style"}
{"code": "def get_cqz(self, callsign, timestamp=timestamp_now):\n        \n        return self.get_all(callsign, timestamp)[const.CQZ]", "docstring": "Returns CQ Zone of a callsign\n\nArgs:\ncallsign (str): Amateur Radio callsign\ntimestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)\n\nReturns:\nint: containing the callsign's CQ Zone\n\nRaises:\nKeyError: no CQ Zone found for callsign", "source": "juraj-google-style"}
{"code": "def insert(self, point, data=None):\n        \n        assert len(point) == self.k\n\n        if self.size == 0:\n            if self.region is None:\n                self.region = [[-math.inf, math.inf]] * self.k\n            axis = 0\n            return self.new_node(point, self.region, axis, data)\n\n        \n        current_id = 0\n        while True:\n            parent_node = self.node_list[current_id]\n            axis = parent_node.axis\n            if point[axis] < parent_node.point[axis]:\n                next_id, left = parent_node.left, True\n            else:\n                next_id, left = parent_node.right, False\n\n            if next_id is None:\n                break\n\n            current_id = next_id\n\n        \n        region = parent_node.region[:]\n        region[axis] = parent_node.region[axis][:]\n\n        \n        limit = parent_node.point[axis]\n\n        \n        if left:\n            self.node_list[current_id] = parent_node._replace(left=self.size)\n            region[axis][1] = limit\n        else:\n            self.node_list[current_id] = parent_node._replace(right=self.size)\n            region[axis][0] = limit\n\n        return self.new_node(point, region, (axis + 1) % self.k, data)", "docstring": "Insert a new node in the tree.\n\nArgs:\npoint (:obj:`tuple` of float or int): Stores the position of the\nnode.\ndata (:obj, optional): The information stored by the node.\n\nReturns:\nint: The identifier of the new node.\n\nExample:\n>>> tree = Tree(4, 800)\n>>> point = (3, 7)\n>>> data = {'name': Fresnel, 'label': blue, 'speed': 98.2}\n>>> node_id = tree.insert(point, data)", "source": "juraj-google-style"}
{"code": "def append_to_list(self, key, *value, pipeline=False):\n        \n        if pipeline:\n            self._pipeline.rpush(key, *value)\n        else:\n            self._db.rpush(key, *value)", "docstring": "Add new element to the end of the list stored at key.\n\nArgs:\nkey (str): Key where the list is stored\nvalue: Value to add to the list\npipeline (bool): True, start a transaction block. Default false.", "source": "juraj-google-style"}
{"code": "async def download_cot(chain):\n    artifact_tasks = []\n    for link in chain.links:\n        task_id = link.task_id\n        parent_dir = link.cot_dir\n        urls = []\n        unsigned_url = get_artifact_url(chain.context, task_id, 'public/chain-of-trust.json')\n        urls.append(unsigned_url)\n        if chain.context.config['verify_cot_signature']:\n            urls.append(get_artifact_url(chain.context, task_id, 'public/chain-of-trust.json.sig'))\n        artifact_tasks.append(asyncio.ensure_future(download_artifacts(chain.context, urls, parent_dir=parent_dir, valid_artifact_task_ids=[task_id])))\n    artifacts_paths = (await raise_future_exceptions(artifact_tasks))\n    for path in artifacts_paths:\n        sha = get_hash(path[0])\n        log.debug('{} downloaded; hash is {}'.format(path[0], sha))", "docstring": "Download the signed chain of trust artifacts.\n\nArgs:\nchain (ChainOfTrust): the chain of trust to add to.\n\nRaises:\nBaseDownloadError: on failure.", "source": "codesearchnet"}
{"code": "def __init__(self, config, log):\n        \n        \n        self._config = config\n        self._log = log\n        log.info('Pulsar Search Interface Initialisation')", "docstring": "Constructor.\n\nThe supplied configuration dictionary must contain all parameters\nneeded to define new user\n\nSee pulsar_receiver_config.json for an example.\n\nArgs:\nconfig (dict): Dictionary containing JSON configuration file.\nlog: Logger.", "source": "juraj-google-style"}
{"code": "def get(self, id=None, name=None):\n    if (not ((id is None) ^ (name is None))):\n        raise ValueError('Either id or name must be set (but not both!)')\n    if (id is not None):\n        return super(TaskQueueManager, self).get(id=id)\n    return self.list(filters={'name': name})[0]", "docstring": "Get a task queue.\n\nEither the id xor the name of the task type must be specified.\n\nArgs:\nid (int, optional): The id of the task type to get.\nname (str, optional): The name of the task type to get.\n\nReturns:\n:class:`saltant.models.task_queue.TaskQueue`:\nA task queue model instance representing the task queue\nrequested.\n\nRaises:\nValueError: Neither id nor name were set *or* both id and\nname were set.", "source": "codesearchnet"}
{"code": "def unit(x1, x2, block_num, depth, num_layers, dim='2d', bottleneck=True, first_batch_norm=True, stride=1, training=True):\n    scope_name = ('unit_%d' % block_num)\n    if bottleneck:\n        depth1 = depth\n        depth2 = (depth * 4)\n    else:\n        depth1 = depth2 = depth\n    residual = wrapped_partial(f, depth1=depth1, depth2=depth2, dim=dim, training=training, bottleneck=bottleneck)\n    with tf.variable_scope(scope_name):\n        downsample = (downsample_bottleneck if bottleneck else downsample_residual)\n        with tf.variable_scope('downsampling'):\n            with tf.variable_scope('x1'):\n                hx1 = downsample(x1, depth2, dim=dim, stride=stride)\n                fx2 = residual(x2, stride=stride, first_batch_norm=first_batch_norm)\n                x1 = (hx1 + fx2)\n            with tf.variable_scope('x2'):\n                hx2 = downsample(x2, depth2, dim=dim, stride=stride)\n                fx1 = residual(x1)\n                x2 = (hx2 + fx1)\n        with tf.variable_scope('full_block'):\n            (x1, x2) = tf.contrib.layers.rev_block(x1, x2, residual, residual, num_layers=num_layers)\n            return (x1, x2)", "docstring": "Implements bottleneck RevNet unit from authors' RevNet architecture.\n\nArgs:\nx1: [N, H, W, C] tensor of network activations.\nx2: [N, H, W, C] tensor of network activations.\nblock_num: integer ID of block\ndepth: First depth in bottleneck residual unit.\nnum_layers: Number of layers in the RevNet block.\ndim: '2d' if 2-dimensional, '3d' if 3-dimensional.\nbottleneck: Should a bottleneck layer be used.\nfirst_batch_norm: Whether to keep the first batch norm layer or not.\nTypically used in the first RevNet block.\nstride: Stride for the residual function.\ntraining: True for train phase, False for eval phase.\n\nReturns:\nTwo [N, H, W, C] output activation tensors.", "source": "codesearchnet"}
{"code": "def remove_config(self, id):\n    url = self._url('/configs/{0}', id)\n    res = self._delete(url)\n    self._raise_for_status(res)\n    return True", "docstring": "Remove a config\n\nArgs:\nid (string): Full ID of the config to remove\n\nReturns (boolean): True if successful\n\nRaises:\n:py:class:`docker.errors.NotFound`\nif no config with that ID exists", "source": "codesearchnet"}
{"code": "def group_structures(self, s_list, anonymous=False):\n    if self._subset:\n        raise ValueError('allow_subset cannot be used with group_structures')\n    original_s_list = list(s_list)\n    s_list = self._process_species(s_list)\n    if anonymous:\n        c_hash = (lambda c: c.anonymized_formula)\n    else:\n        c_hash = self._comparator.get_hash\n    s_hash = (lambda s: c_hash(s[1].composition))\n    sorted_s_list = sorted(enumerate(s_list), key=s_hash)\n    all_groups = []\n    for (k, g) in itertools.groupby(sorted_s_list, key=s_hash):\n        unmatched = list(g)\n        while (len(unmatched) > 0):\n            (i, refs) = unmatched.pop(0)\n            matches = [i]\n            if anonymous:\n                inds = filter((lambda i: self.fit_anonymous(refs, unmatched[i][1])), list(range(len(unmatched))))\n            else:\n                inds = filter((lambda i: self.fit(refs, unmatched[i][1])), list(range(len(unmatched))))\n            inds = list(inds)\n            matches.extend([unmatched[i][0] for i in inds])\n            unmatched = [unmatched[i] for i in range(len(unmatched)) if (i not in inds)]\n            all_groups.append([original_s_list[i] for i in matches])\n    return all_groups", "docstring": "Given a list of structures, use fit to group\nthem by structural equality.\n\nArgs:\ns_list ([Structure]): List of structures to be grouped\nanonymous (bool): Wheher to use anonymous mode.\n\nReturns:\nA list of lists of matched structures\nAssumption: if s1 == s2 but s1 != s3, than s2 and s3 will be put\nin different groups without comparison.", "source": "codesearchnet"}
{"code": "def start_listener_thread(self, timeout_ms=30000, exception_handler=None):\n    try:\n        thread = Thread(target=self.listen_forever, args=(timeout_ms, exception_handler))\n        thread.daemon = True\n        self.sync_thread = thread\n        self.should_listen = True\n        thread.start()\n    except RuntimeError:\n        e = sys.exc_info()[0]\n        logger.error('Error: unable to start thread. %s', str(e))", "docstring": "Start a listener thread to listen for events in the background.\n\nArgs:\ntimeout (int): How long to poll the Home Server for before\nretrying.\nexception_handler (func(exception)): Optional exception handler\nfunction which can be used to handle exceptions in the caller\nthread.", "source": "codesearchnet"}
{"code": "def from_files(path_dir, dos_spin=1):\n    (run_type, warning, efermi, gap, doping_levels) = BoltztrapAnalyzer.parse_outputtrans(path_dir)\n    vol = BoltztrapAnalyzer.parse_struct(path_dir)\n    intrans = BoltztrapAnalyzer.parse_intrans(path_dir)\n    if (run_type == 'BOLTZ'):\n        (dos, pdos) = BoltztrapAnalyzer.parse_transdos(path_dir, efermi, dos_spin=dos_spin, trim_dos=False)\n        (mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping, seebeck_doping, cond_doping, kappa_doping, hall_doping, carrier_conc) = BoltztrapAnalyzer.parse_cond_and_hall(path_dir, doping_levels)\n        return BoltztrapAnalyzer(gap, mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping, seebeck_doping, cond_doping, kappa_doping, hall_doping, intrans, dos, pdos, carrier_conc, vol, warning)\n    elif (run_type == 'DOS'):\n        trim = (True if (intrans['dos_type'] == 'HISTO') else False)\n        (dos, pdos) = BoltztrapAnalyzer.parse_transdos(path_dir, efermi, dos_spin=dos_spin, trim_dos=trim)\n        return BoltztrapAnalyzer(gap=gap, dos=dos, dos_partial=pdos, warning=warning, vol=vol)\n    elif (run_type == 'BANDS'):\n        bz_kpoints = np.loadtxt(os.path.join(path_dir, 'boltztrap_band.dat'))[(:, (- 3):)]\n        bz_bands = np.loadtxt(os.path.join(path_dir, 'boltztrap_band.dat'))[(:, 1:(- 6))]\n        return BoltztrapAnalyzer(bz_bands=bz_bands, bz_kpoints=bz_kpoints, warning=warning, vol=vol)\n    elif (run_type == 'FERMI'):\n        '\\n            '\n        if os.path.exists(os.path.join(path_dir, 'boltztrap_BZ.cube')):\n            fs_data = read_cube_file(os.path.join(path_dir, 'boltztrap_BZ.cube'))\n        elif os.path.exists(os.path.join(path_dir, 'fort.30')):\n            fs_data = read_cube_file(os.path.join(path_dir, 'fort.30'))\n        else:\n            raise BoltztrapError('No data file found for fermi surface')\n        return BoltztrapAnalyzer(fermi_surface_data=fs_data)\n    else:\n        raise ValueError('Run type: {} not recognized!'.format(run_type))", "docstring": "get a BoltztrapAnalyzer object from a set of files\n\nArgs:\npath_dir: directory where the boltztrap files are\ndos_spin: in DOS mode, set to 1 for spin up and -1 for spin down\n\nReturns:\na BoltztrapAnalyzer object", "source": "codesearchnet"}
{"code": "def __init__(self, details):\n\t\t\n\n\t\t\n\t\tif isinstance(details, basestring):\n\t\t\tdetails = {\"__type__\": details}\n\n\t\t\n\t\telif not isinstance(details, dict):\n\t\t\traise ValueError('details')\n\n\t\t\n\t\tif '__type__' not in details or details['__type__'] not in self._VALID_TYPES:\n\t\t\traise KeyError('__type__')\n\n\t\t\n\t\tself._type = details['__type__']\n\t\tdel details['__type__']\n\n\t\t\n\t\tself._regex = None\n\t\tself._options = None\n\t\tself._minimum = None\n\t\tself._maximum = None\n\n\t\t\n\t\tif '__regex__' in details:\n\t\t\tself.regex(details['__regex__'])\n\t\t\tdel details['__regex__']\n\n\t\t\n\t\telif '__options__' in details:\n\t\t\tself.options(details['__options__'])\n\t\t\tdel details['__options__']\n\n\t\t\n\t\telse:\n\n\t\t\t\n\t\t\tbMin = ('__minimum__' in details and True or False)\n\t\t\tbMax = ('__maximum__' in details and True or False)\n\n\t\t\tif bMin or bMax:\n\t\t\t\tself.minmax(\n\t\t\t\t\t(bMin and details['__minimum__'] or None),\n\t\t\t\t\t(bMax and details['__maximum__'] or None)\n\t\t\t\t)\n\n\t\t\tif bMin: del details['__minimum__']\n\t\t\tif bMax: del details['__maximum__']\n\n\t\t\n\t\tsuper(Node, self).__init__(details, 'Node')", "docstring": "Constructor\n\nInitialises the instance\n\nArguments:\ndetails {dict} -- Details describing the type of value allowed for\nthe node\n\nRaises:\nKeyError\nValueError\n\nReturns:\nNode", "source": "juraj-google-style"}
{"code": "def all_gather(self, input_tensor: core.TensorLike, axis: core.TensorLike, options: Optional[collective_util.Options]=None) -> core.Tensor:\n    if context.executing_eagerly():\n        raise RuntimeError('all_gather is not supported in eager mode.')\n    with ops.device(self._device), ops.control_dependencies([array_ops.identity(input_tensor)]):\n        perm_pre = array_ops.concat(([axis], math_ops.range(axis), math_ops.range(axis + 1, array_ops.rank(input_tensor))), axis=0)\n        input_tensor_t = array_ops.transpose(input_tensor, perm=perm_pre)\n        gathered_shape = self._all_gather(array_ops.expand_dims_v2(array_ops.shape_v2(input_tensor_t), axis=0), options)\n        first_dims = gathered_shape[:, 0]\n        full_axis_dim = math_ops.reduce_max(first_dims)\n        padded_input_tensor = _pad_util(input_tensor_t, full_axis_dim)\n        gather_padded_out_tensor = self._all_gather(padded_input_tensor, options)\n        split_tensors = []\n        for i in range(self._group_size):\n            start_pos = i * full_axis_dim\n            split_tensors.append(gather_padded_out_tensor[start_pos:start_pos + first_dims[i]])\n        out_tensor_t = array_ops.concat(split_tensors, 0)\n        perm_after = array_ops.concat((math_ops.range(1, axis + 1), [0], math_ops.range(axis + 1, array_ops.rank(input_tensor_t))), axis=0)\n        return array_ops.transpose(out_tensor_t, perm=perm_after)", "docstring": "All-gather a dense tensor.\n\nThis method must be called inside a tf.function.\n\nArgs:\ninput_tensor: a dense tensor. It must have the same rank on all replicas,\nand dimensions other than `axis` need to be the same as well.\naxis: 0-D int32 Tensor. Dimension along which to gather. Must be in the\nrange [0, rank(value)).\noptions: an optional tf.distribute.experimental.CommunicationOptions. If\nprovided, it overrides the default options.\n\nReturns:\nThe gathered Tensor.\n\nRaises:\nRuntimeError: if called in eager mode.", "source": "github-repos"}
{"code": "def get_output_info_dict(self, signature=None):\n    return self._spec.get_output_info_dict(signature=signature, tags=self._tags)", "docstring": "Describes the outputs provided by a signature.\n\nArgs:\nsignature: A string with the signature to get ouputs information for.\nIf None, the default signature is used if defined.\n\nReturns:\nThe result of ModuleSpec.get_output_info_dict() for the given signature,\nand the graph variant selected by `tags` when this Module was initialized.\n\nRaises:\nKeyError: if there is no such signature.", "source": "codesearchnet"}
{"code": "def _lookup_namespace(self, symbol, namespace):\n        \n        for namespace_part in symbol.parts:\n            namespace = namespace.get(namespace_part)\n            if namespace is None:\n                break\n            if not isinstance(namespace, dict):\n                return namespace\n        raise Error('%s not found' % symbol.name)", "docstring": "Helper for lookup_symbol that only looks up variables in a\nnamespace.\n\nArgs:\nsymbol: Symbol\nnamespace: pointer into self.namespaces", "source": "juraj-google-style"}
{"code": "def channels_replies(self, *, channel: str, thread_ts: str, **kwargs) -> SlackResponse:\n    kwargs.update({'channel': channel, 'thread_ts': thread_ts})\n    return self.api_call('channels.replies', http_verb='GET', params=kwargs)", "docstring": "Retrieve a thread of messages posted to a channel\n\nArgs:\nchannel (str): The channel id. e.g. 'C1234567890'\nthread_ts (str): The timestamp of an existing message with 0 or more replies.\ne.g. '1234567890.123456'", "source": "codesearchnet"}
{"code": "def remove_function(self, name):\n    self.ensure_initialized()\n    pywrap_tfe.TFE_ContextRemoveFunction(self._handle, name)", "docstring": "Remove a function from the context.\n\nOnce removed, the function cannot be executed anymore.\n\nArgs:\nname: function signature name.", "source": "github-repos"}
{"code": "def import_submodules(package: Union[str, ModuleType],\n                      base_package_for_relative_import: str = None,\n                      recursive: bool = True) -> Dict[str, ModuleType]:\n    \n    \n    if isinstance(package, str):\n        package = importlib.import_module(package,\n                                          base_package_for_relative_import)\n    results = {}\n    for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):\n        full_name = package.__name__ + '.' + name\n        log.debug(\"importing: {}\", full_name)\n        results[full_name] = importlib.import_module(full_name)\n        if recursive and is_pkg:\n            results.update(import_submodules(full_name))\n    return results", "docstring": "Import all submodules of a module, recursively, including subpackages.\n\nArgs:\npackage: package (name or actual module)\nbase_package_for_relative_import: path to prepend?\nrecursive: import submodules too?\n\nReturns:\ndict: mapping from full module name to module", "source": "juraj-google-style"}
{"code": "def heightmap_add_fbm(hm: np.ndarray, noise: tcod.noise.Noise, mulx: float, muly: float, addx: float, addy: float, octaves: float, delta: float, scale: float) -> None:\n    noise = (noise.noise_c if (noise is not None) else ffi.NULL)\n    lib.TCOD_heightmap_add_fbm(_heightmap_cdata(hm), noise, mulx, muly, addx, addy, octaves, delta, scale)", "docstring": "Add FBM noise to the heightmap.\n\nThe noise coordinate for each map cell is\n`((x + addx) * mulx / width, (y + addy) * muly / height)`.\n\nThe value added to the heightmap is `delta + noise * scale`.\n\nArgs:\nhm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.\nnoise (Noise): A Noise instance.\nmulx (float): Scaling of each x coordinate.\nmuly (float): Scaling of each y coordinate.\naddx (float): Translation of each x coordinate.\naddy (float): Translation of each y coordinate.\noctaves (float): Number of octaves in the FBM sum.\ndelta (float): The value added to all heightmap cells.\nscale (float): The noise value is scaled with this parameter.\n\n.. deprecated:: 8.1\nAn equivalent array of noise samples can be taken using a method such\nas :any:`Noise.sample_ogrid`.", "source": "codesearchnet"}
{"code": "def __call__(self, environ, start_response):  \n    \n    request = wrappers.Request(environ)\n    parsed_url = urlparse.urlparse(request.path)\n    clean_path = _clean_path(parsed_url.path, self._path_prefix)\n\n    \n    if clean_path in self.data_applications:\n      return self.data_applications[clean_path](environ, start_response)\n    else:\n      logger.warn('path %s not found, sending 404', clean_path)\n      return http_util.Respond(request, 'Not found', 'text/plain', code=404)(\n          environ, start_response)", "docstring": "Central entry point for the TensorBoard application.\n\nThis method handles routing to sub-applications. It does simple routing\nusing regular expression matching.\n\nThis __call__ method conforms to the WSGI spec, so that instances of this\nclass are WSGI applications.\n\nArgs:\nenviron: See WSGI spec.\nstart_response: See WSGI spec.\n\nReturns:\nA werkzeug Response.", "source": "juraj-google-style"}
{"code": "def _my_top_k(x, k):\n    if (k > 10):\n        return tf.nn.top_k(x, k)\n    values = []\n    indices = []\n    depth = tf.shape(x)[1]\n    for i in range(k):\n        values.append(tf.reduce_max(x, 1))\n        argmax = tf.argmax(x, 1)\n        indices.append(argmax)\n        if ((i + 1) < k):\n            x += tf.one_hot(argmax, depth, (- 1000000000.0))\n    return (tf.stack(values, axis=1), tf.to_int32(tf.stack(indices, axis=1)))", "docstring": "GPU-compatible version of top-k that works for very small constant k.\n\nCalls argmax repeatedly.\n\ntf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense,\nseems not to be, so if we use tf.nn.top_k, then both the top_k and its\ngradient go on cpu.  Once this is not an issue, this function becomes\nobsolete and should be replaced by tf.nn.top_k.\n\nArgs:\nx: a 2d Tensor.\nk: a small integer.\n\nReturns:\nvalues: a Tensor of shape [batch_size, k]\nindices: a int32 Tensor of shape [batch_size, k]", "source": "codesearchnet"}
{"code": "def findSequencesOnDisk(cls, pattern, include_hidden=False, strictPadding=False):\n    _not_hidden = (lambda f: (not f.startswith('.')))\n    _match_pattern = None\n    _filter_padding = None\n    _join = os.path.join\n    seq = None\n    dirpath = pattern\n    if (not os.path.isdir(pattern)):\n        (dirpath, filepat) = os.path.split(pattern)\n        if (not os.path.isdir(dirpath)):\n            return []\n        seq = cls(filepat)\n        patt = seq.basename().replace('.', '\\\\.')\n        if seq.padding():\n            patt += '\\\\d+'\n        if seq.extension():\n            patt += seq.extension()\n        view = bytearray(patt)\n        matches = re.finditer('{(.*?)(?:,(.*?))*}', patt)\n        for match in reversed(list(matches)):\n            (i, j) = match.span()\n            view[i:j] = ('(%s)' % '|'.join([m.strip() for m in match.groups()]))\n        view = view.replace('*', '.*')\n        view = view.replace('?', '.')\n        view += '$'\n        try:\n            _match_pattern = re.compile(str(view)).match\n        except re.error:\n            msg = 'Invalid file pattern: {}'.format(filepat)\n            raise FileSeqException(msg)\n        if (seq.padding() and strictPadding):\n            _filter_padding = functools.partial(cls._filterByPaddingNum, num=seq.zfill())\n    ret = next(os.walk(dirpath), None)\n    files = (ret[(- 1)] if ret else [])\n    if (not include_hidden):\n        files = ifilter(_not_hidden, files)\n    if _match_pattern:\n        files = ifilter(_match_pattern, files)\n    if _filter_padding:\n        files = _filter_padding(files)\n    sep = utils._getPathSep(dirpath)\n    if (not dirpath.endswith(sep)):\n        dirpath += sep\n    files = (_join(dirpath, f) for f in files)\n    files = list(files)\n    seqs = list(FileSequence.yield_sequences_in_list(files))\n    if (_filter_padding and seq):\n        pad = cls.conformPadding(seq.padding())\n        for s in seqs:\n            s.setPadding(pad)\n    return seqs", "docstring": "Yield the sequences found in the given directory.\n\nExamples:\n>>> findSequencesOnDisk('/path/to/files')\n\nThe `pattern` can also specify glob-like shell wildcards including the following:\n* ``?``         - 1 wildcard character\n* ``*``         - 1 or more wildcard character\n* ``{foo,bar}`` - either 'foo' or 'bar'\n\nExact frame ranges are not considered, and padding characters are converted to\nwildcards (``#`` or ``@``)\n\nExamples:\n>>> findSequencesOnDisk('/path/to/files/image_stereo_{left,right}.#.jpg')\n>>> findSequencesOnDisk('/path/to/files/imag?_*_{left,right}.@@@.jpg', strictPadding=True)\n\nArgs:\npattern (str): directory to scan, or pattern to filter in directory\ninclude_hidden (bool): if true, show .hidden files as well\nstrictPadding (bool): if True, ignore files with padding length different from pattern\n\nReturns:\nlist:", "source": "codesearchnet"}
{"code": "def remove_global_handler(self, event, handler):\n        \n        with self.mutex:\n            if event not in self.handlers:\n                return 0\n            for h in self.handlers[event]:\n                if handler == h.callback:\n                    self.handlers[event].remove(h)\n        return 1", "docstring": "Removes a global handler function.\n\nArguments:\n\nevent -- Event type (a string).\nhandler -- Callback function.\n\nReturns 1 on success, otherwise 0.", "source": "juraj-google-style"}
{"code": "def execute(self, action):\n        \n        next_state, rew, done, _ = self.env.step(action)\n        return next_state, rew, done", "docstring": "Executes action, observes next state and reward.\n\nArgs:\nactions: Actions to execute.\n\nReturns:\nTuple of (next state, bool indicating terminal, reward)", "source": "juraj-google-style"}
{"code": "def inplace_update(x, i, v):\n    return alias_inplace_update(gen_array_ops.deep_copy(x), i, v)", "docstring": "Applies an inplace update on input x at index i with value v.\n\nNote that this function is not actually inplace - it allocates\na copy of x.  The utility is not avoiding memory copies but rather\nspecifying a sparse update.\n\nIf i is None, x and v must be the same shape. Computes\ny = x; y = v;\nIf i is a scalar, x has a rank 1 higher than v's. Computes\ny = x; y[i, :] = v;\nOtherwise, x and v must have the same rank. Computes\ny = x; y[i, :] = v;\n\nArgs:\nx: A Tensor.\ni: None, a scalar or a vector.\nv: A Tensor.\n\nReturns:\nReturns y, which is guaranteed not to be an alias of x.", "source": "github-repos"}
{"code": "def generate_name_variations(name):\n    \n    def _update_name_variations_with_product(set_a, set_b):\n        name_variations.update([\n            unidecode((names_variation[0] +\n                       separator +\n                       names_variation[1]).strip(''.join(_LASTNAME_NON_LASTNAME_SEPARATORS))).lower()\n            for names_variation\n            in product(set_a, set_b)\n            for separator\n            in _LASTNAME_NON_LASTNAME_SEPARATORS\n        ])\n\n    parsed_name = ParsedName.loads(name)\n\n    \n    if len(parsed_name) == 1:\n        return [parsed_name.dumps().lower()]\n\n    name_variations = set()\n\n    \n    \n    non_lastnames = [\n        non_lastname\n        for non_lastname\n        in parsed_name.first_list + parsed_name.suffix_list\n        if non_lastname\n    ]\n\n    \n    \n    \n    if len(non_lastnames) > _NAMES_MAX_NUMBER_THRESHOLD or len(parsed_name.last_list) > _NAMES_MAX_NUMBER_THRESHOLD:\n        LOGGER.error('Skipping name variations generation - too many names in: \"%s\"', name)\n        return [name]\n\n    non_lastnames_variations = \\\n        _generate_non_lastnames_variations(non_lastnames)\n    lastnames_variations = _generate_lastnames_variations(parsed_name.last_list)\n\n    \n    _update_name_variations_with_product(lastnames_variations, non_lastnames_variations)\n\n    \n    _update_name_variations_with_product(non_lastnames_variations, lastnames_variations)\n\n    return list(name_variations)", "docstring": "Generate name variations for a given name.\n\nArgs:\nname (six.text_type): The name whose variations are to be generated.\n\nReturns:\nlist: All the name variations for the given name.\n\nNotes:\nUses `unidecode` for doing unicode characters transliteration to ASCII ones. This was chosen so that we can map\nboth full names of authors in HEP records and user's input to the same space and thus make exact queries work.", "source": "juraj-google-style"}
{"code": "def parseMagnitude(m):\n    m = NumberService().parse(m)\n\n    def toDecimalPrecision(n, k):\n        return float(('%.*f' % (k, round(n, k))))\n    digits = 2\n    magnitude = toDecimalPrecision(m, digits)\n    while (not magnitude):\n        digits += 1\n        magnitude = toDecimalPrecision(m, digits)\n    if (m < 1.0):\n        magnitude = toDecimalPrecision(m, (digits + 1))\n    if (int(magnitude) == magnitude):\n        magnitude = int(magnitude)\n    magString = str(magnitude)\n    magString = re.sub('(\\\\d)e-(\\\\d+)', '\\\\g<1> times ten to the negative \\\\g<2>', magString)\n    magString = re.sub('(\\\\d)e\\\\+(\\\\d+)', '\\\\g<1> times ten to the \\\\g<2>', magString)\n    magString = re.sub('-(\\\\d+)', 'negative \\\\g<1>', magString)\n    magString = re.sub('\\\\b0(\\\\d+)', '\\\\g<1>', magString)\n    return magString", "docstring": "Parses a number m into a human-ready string representation.\nFor example, crops off floats if they're too accurate.\n\nArguments:\nm (float): Floating-point number to be cleaned.\n\nReturns:\nHuman-ready string description of the number.", "source": "codesearchnet"}
{"code": "def greater_than_evaluator(self, index):\n    \n    condition_name = self.condition_data[index][0]\n    condition_value = self.condition_data[index][1]\n    user_value = self.attributes.get(condition_name)\n\n    if not validator.is_finite_number(condition_value):\n      self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(\n        self._get_condition_json(index)\n      ))\n      return None\n\n    if not self.is_value_a_number(user_value):\n      self.logger.warning(audience_logs.UNEXPECTED_TYPE.format(\n          self._get_condition_json(index),\n          type(user_value),\n          condition_name\n      ))\n      return None\n\n    if not validator.is_finite_number(user_value):\n      self.logger.warning(audience_logs.INFINITE_ATTRIBUTE_VALUE.format(\n        self._get_condition_json(index),\n        condition_name\n      ))\n      return None\n\n    return user_value > condition_value", "docstring": "Evaluate the given greater than match condition for the user attributes.\n\nArgs:\nindex: Index of the condition to be evaluated.\n\nReturns:\nBoolean:\n- True if the user attribute value is greater than the condition value.\n- False if the user attribute value is less than or equal to the condition value.\nNone: if the condition value isn't finite or the user attribute value isn't finite.", "source": "juraj-google-style"}
{"code": "def _neg(x, name=None):\n    return negative(x, name)", "docstring": "Computes numerical negative value element-wise.\n\nI.e., \\(y = -x\\).\n\nArgs:\nx: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.", "source": "github-repos"}
{"code": "def RegisterMessage(self, message):\n    desc = message.DESCRIPTOR\n    self._classes[desc.full_name] = message\n    self.pool.AddDescriptor(desc)\n    return message", "docstring": "Registers the given message type in the local database.\n\nCalls to GetSymbol() and GetMessages() will return messages registered here.\n\nArgs:\nmessage: a message.Message, to be registered.\n\nReturns:\nThe provided message.", "source": "codesearchnet"}
{"code": "def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):\n    hasher = _resolve_hasher(algorithm, file_hash)\n    if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):\n        return True\n    else:\n        return False", "docstring": "Validates a file against a sha256 or md5 hash.\n\nArgs:\nfpath: path to the file being validated\nfile_hash:  The expected hash string of the file.\nThe sha256 and md5 hash algorithms are both supported.\nalgorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.\nThe default 'auto' detects the hash algorithm in use.\nchunk_size: Bytes to read at a time, important for large files.\n\nReturns:\nWhether the file is valid", "source": "github-repos"}
{"code": "def copy_script(self, filename, id_=-1):\n        \n        if (\"jss\" in self.connection.keys() and\n                self.connection[\"jss\"].jss_migrated):\n            self._copy_script_migrated(filename, id_, SCRIPT_FILE_TYPE)\n        else:\n            basename = os.path.basename(filename)\n            self._copy(filename, os.path.join(self.connection[\"mount_point\"],\n                                              \"Scripts\", basename))", "docstring": "Copy a script to the repo's Script subdirectory.\n\nScripts are copied as files to a path, or, on a \"migrated\" JSS,\nare POSTed to the JSS (pass an id if you wish to associate\nthe script with an existing Script object).\n\nArgs:\nfilename: Path for file to copy.\nid_: Int ID, used _only_ for migrated repos. Default is -1,\nwhich creates a new Script.", "source": "juraj-google-style"}
{"code": "def _get_authorization_headers(self) -> dict:\n        \n        auth = base64.encodestring((self.client_id + ':' + self.client_secret).encode('latin-1')).decode('latin-1')\n        auth = auth.replace('\\n', '').replace(' ', '')\n        auth = 'Basic {}'.format(auth)\n        headers = {'Authorization': auth}\n        return headers", "docstring": "Constructs and returns the Authorization header for the client app.\n\nArgs:\nNone\n\nReturns:\nheader dict for communicating with the authorization endpoints", "source": "juraj-google-style"}
{"code": "def query_origin_stack(self):\n    ret = []\n    for stack, id_to_string in zip(self._origin_stacks, self._origin_id_to_strings):\n        ret.append(self._code_def_to_traceback(stack, id_to_string))\n    return ret", "docstring": "Query the stack of the origin of the execution call.\n\nReturns:\nA `list` of all tracebacks. Each item corresponds to an execution call,\ni.e., a `SendTracebacks` request. Each item is a `list` of 3-tuples:\n(filename, lineno, function_name).", "source": "github-repos"}
{"code": "def _create_authenticator(a_service):\n    \n    if not isinstance(a_service, sm_messages.Service):\n        raise ValueError(u\"service is None or not an instance of Service\")\n\n    authentication = a_service.authentication\n    if not authentication:\n        _logger.info(u\"authentication is not configured in service, \"\n                     u\"authentication checks will be disabled\")\n        return\n\n    issuers_to_provider_ids = {}\n    issuer_uri_configs = {}\n    for provider in authentication.providers:\n        issuer = provider.issuer\n        jwks_uri = provider.jwksUri\n\n        \n        open_id = jwks_uri is None\n        issuer_uri_configs[issuer] = suppliers.IssuerUriConfig(open_id, jwks_uri)\n        issuers_to_provider_ids[issuer] = provider.id\n\n    key_uri_supplier = suppliers.KeyUriSupplier(issuer_uri_configs)\n    jwks_supplier = suppliers.JwksSupplier(key_uri_supplier)\n    authenticator = tokens.Authenticator(issuers_to_provider_ids, jwks_supplier)\n    return authenticator", "docstring": "Create an instance of :class:`google.auth.tokens.Authenticator`.\n\nArgs:\na_service (:class:`endpoints_management.gen.servicemanagement_v1_messages.Service`): a\nservice instance", "source": "juraj-google-style"}
{"code": "def log_sigmoid(x):\n    return ops.log_sigmoid(x)", "docstring": "Logarithm of the sigmoid activation function.\n\nIt is defined as `f(x) = log(1 / (1 + exp(-x)))`.\n\nArgs:\nx: Input tensor.", "source": "github-repos"}
{"code": "def _hexdecode(hexstring):\n    _checkString(hexstring, description='hexstring')\n    if ((len(hexstring) % 2) != 0):\n        raise ValueError('The input hexstring must be of even length. Given: {!r}'.format(hexstring))\n    if (sys.version_info[0] > 2):\n        by = bytes(hexstring, 'latin1')\n        try:\n            return str(binascii.unhexlify(by), encoding='latin1')\n        except binascii.Error as err:\n            new_error_message = 'Hexdecode reported an error: {!s}. Input hexstring: {}'.format(err.args[0], hexstring)\n            raise TypeError(new_error_message)\n    else:\n        try:\n            return hexstring.decode('hex')\n        except TypeError as err:\n            raise TypeError('Hexdecode reported an error: {}. Input hexstring: {}'.format(err.message, hexstring))", "docstring": "Convert a hex encoded string to a byte string.\n\nFor example '4A' will return 'J', and '04' will return ``'\\\\x04'`` (which has length 1).\n\nArgs:\nhexstring (str): Can be for example 'A3' or 'A3B4'. Must be of even length.\nAllowed characters are '0' to '9', 'a' to 'f' and 'A' to 'F' (not space).\n\nReturns:\nA string of half the length, with characters corresponding to all 0-255 values for each byte.\n\nRaises:\nTypeError, ValueError", "source": "codesearchnet"}
{"code": "def ToJson(self, auto_hex=True):\n    jsn = {}\n    jsn['type'] = str(ContractParameterType(self.Type))\n    if (self.Type == ContractParameterType.Signature):\n        jsn['value'] = self.Value.hex()\n    elif (self.Type == ContractParameterType.ByteArray):\n        if auto_hex:\n            jsn['value'] = self.Value.hex()\n        else:\n            jsn['value'] = self.Value\n    elif (self.Type == ContractParameterType.Boolean):\n        jsn['value'] = self.Value\n    elif (self.Type == ContractParameterType.String):\n        jsn['value'] = str(self.Value)\n    elif (self.Type == ContractParameterType.Integer):\n        jsn['value'] = self.Value\n    elif (self.Type == ContractParameterType.PublicKey):\n        jsn['value'] = self.Value.ToString()\n    elif (self.Type in [ContractParameterType.Hash160, ContractParameterType.Hash256]):\n        jsn['value'] = self.Value.ToString()\n    elif (self.Type == ContractParameterType.Array):\n        res = []\n        for item in self.Value:\n            if item:\n                res.append(item.ToJson(auto_hex=auto_hex))\n        jsn['value'] = res\n    elif (self.Type == ContractParameterType.InteropInterface):\n        try:\n            jsn['value'] = self.Value.ToJson()\n        except Exception as e:\n            pass\n    return jsn", "docstring": "Converts a ContractParameter instance to a json representation\n\nReturns:\ndict: a dictionary representation of the contract parameter", "source": "codesearchnet"}
{"code": "def add(a, b, allow_overflow=False):\n    for m in (a, b):\n        if (not isinstance(m, sc_messages.Money)):\n            raise ValueError((u'Inputs should be of type %s' % (sc_messages.Money,)))\n    if (a.currencyCode != b.currencyCode):\n        raise ValueError(u'Money values need the same currency to be summed')\n    (nano_carry, nanos_sum) = _sum_nanos(a, b)\n    units_sum_no_carry = (a.units + b.units)\n    units_sum = (units_sum_no_carry + nano_carry)\n    if ((units_sum > 0) and (nanos_sum < 0)):\n        units_sum -= 1\n        nanos_sum += _BILLION\n    elif ((units_sum < 0) and (nanos_sum > 0)):\n        units_sum += 1\n        nanos_sum -= _BILLION\n    sign_a = _sign_of(a)\n    sign_b = _sign_of(b)\n    if ((sign_a > 0) and (sign_b > 0) and (units_sum >= _INT64_MAX)):\n        if (not allow_overflow):\n            raise OverflowError(u'Money addition positive overflow')\n        else:\n            return sc_messages.Money(units=_INT64_MAX, nanos=MAX_NANOS, currencyCode=a.currencyCode)\n    elif ((sign_a < 0) and (sign_b < 0) and ((units_sum_no_carry <= (- _INT64_MAX)) or (units_sum <= (- _INT64_MAX)))):\n        if (not allow_overflow):\n            raise OverflowError(u'Money addition negative overflow')\n        else:\n            return sc_messages.Money(units=_INT64_MIN, nanos=(- MAX_NANOS), currencyCode=a.currencyCode)\n    else:\n        return sc_messages.Money(units=units_sum, nanos=nanos_sum, currencyCode=a.currencyCode)", "docstring": "Adds two instances of `Money`.\n\nArgs:\na (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): one money\nvalue\nb (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): another\nmoney value\nallow_overflow: determines if the addition is allowed to overflow\n\nReturn:\n`Money`: an instance of Money\n\nRaises:\nValueError: if the inputs do not have the same currency code\nOverflowError: if the sum overflows and allow_overflow is not `True`", "source": "codesearchnet"}
{"code": "def _remove_session_callback(self, callback_obj, originator):\n    try:\n        callback_objs = [callback_obj]\n        self._session_callbacks.remove(callback_obj)\n        for (cb, cb_objs) in list(self._callback_objs_by_callable[originator].items()):\n            try:\n                cb_objs.remove(callback_obj)\n                if (not cb_objs):\n                    del self._callback_objs_by_callable[originator][cb]\n            except KeyError:\n                pass\n    except KeyError:\n        raise ValueError('callback already ran or was already removed, cannot be removed again')\n    for callback_obj in callback_objs:\n        self._trigger_on_change(SessionCallbackRemoved(self, callback_obj))", "docstring": "Remove a callback added earlier with ``add_periodic_callback``,\n``add_timeout_callback``, or ``add_next_tick_callback``.\n\nReturns:\nNone\n\nRaises:\nKeyError, if the callback was never added", "source": "codesearchnet"}
{"code": "def get_env(key, *default, **kwargs):\n    assert (len(default) in (0, 1)), 'Too many args supplied.'\n    func = kwargs.get('coerce', (lambda x: x))\n    required = (len(default) == 0)\n    default = (default[0] if (not required) else None)\n    return _get_env(key, default=default, coerce=func, required=required)", "docstring": "Return env var.\n\nThis is the parent function of all other get_foo functions,\nand is responsible for unpacking args/kwargs into the values\nthat _get_env expects (it is the root function that actually\ninteracts with environ).\n\nArgs:\nkey: string, the env var name to look up.\ndefault: (optional) the value to use if the env var does not\nexist. If this value is not supplied, then the env var is\nconsidered to be required, and a RequiredSettingMissing\nerror will be raised if it does not exist.\n\nKwargs:\ncoerce: a func that may be supplied to coerce the value into\nsomething else. This is used by the default get_foo functions\nto cast strings to builtin types, but could be a function that\nreturns a custom class.\n\nReturns the env var, coerced if required, and a default if supplied.", "source": "codesearchnet"}
{"code": "def delete_as(access_token, subscription_id, resource_group, as_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/availabilitySets/', as_name, '?api-version=', COMP_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete availability set.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nas_name (str): Name of the availability set.\n\nReturns:\nHTTP response.", "source": "codesearchnet"}
{"code": "def handleresult(self, r):\n    if ((r.status_code >= 400) and (r.status_code < 500)):\n        msg = r.json()\n        raise AuthenticationError((((((str(msg['code']) + ': ') + msg['msg']) + ' (') + msg['ref']) + ')'))\n    elif (r.status_code > 300):\n        err = None\n        try:\n            msg = r.json()\n            err = ServerError((((((str(msg['code']) + ': ') + msg['msg']) + ' (') + msg['ref']) + ')'))\n        except:\n            raise ServerError('Server returned error, but did not give a valid error message')\n        raise err\n    return r", "docstring": "Handles HTTP error codes for the given request\n\nRaises:\nAuthenticationError on the appropriate 4** errors\nServerError if the response is not an ok (2**)\n\nArguments:\nr -- The request result", "source": "codesearchnet"}
{"code": "def check_memo(self, task_id, task):\n    if ((not self.memoize) or (not task['memoize'])):\n        task['hashsum'] = None\n        return (None, None)\n    hashsum = self.make_hash(task)\n    present = False\n    result = None\n    if (hashsum in self.memo_lookup_table):\n        present = True\n        result = self.memo_lookup_table[hashsum]\n        logger.info('Task %s using result from cache', task_id)\n    task['hashsum'] = hashsum\n    return (present, result)", "docstring": "Create a hash of the task and its inputs and check the lookup table for this hash.\n\nIf present, the results are returned. The result is a tuple indicating whether a memo\nexists and the result, since a Null result is possible and could be confusing.\nThis seems like a reasonable option without relying on an cache_miss exception.\n\nArgs:\n- task(task) : task from the dfk.tasks table\n\nReturns:\nTuple of the following:\n- present (Bool): Is this present in the memo_lookup_table\n- Result (Py Obj): Result of the function if present in table\n\nThis call will also set task['hashsum'] to the unique hashsum for the func+inputs.", "source": "codesearchnet"}
{"code": "def _filter_match(self, span: span, relations: Dict, patterns: List) -> bool:\n        \n\n        for pattern_id, a_pattern in enumerate(patterns):\n            token_range = relations[pattern_id]\n            if token_range:\n                tokens = [x for x in span[token_range[0]:token_range[1]]]\n                if a_pattern.type == \"word\":\n                    if not self._pre_suf_fix_filter(tokens, a_pattern.prefix, a_pattern.suffix):\n                        return False\n                if a_pattern.type == \"shape\":\n                    if not (self._full_shape_filter(tokens, a_pattern.full_shape)\n                            and self._pre_suf_fix_filter(tokens, a_pattern.prefix,a_pattern.suffix)):\n                        return False\n                if a_pattern.type == \"number\":\n                    if not self._min_max_filter(tokens, a_pattern.min, a_pattern.max):\n                        return False\n        return True", "docstring": "Filter the match result according to prefix, suffix, min, max ...\nArgs:\nspan: span\nrelations: Dict\npatterns: List of pattern\n\nReturns: bool", "source": "juraj-google-style"}
{"code": "def GetFileEntryByPathSpec(self, path_spec):\n    \n    store_index = vshadow.VShadowPathSpecGetStoreIndex(path_spec)\n\n    \n    \n    if store_index is None:\n      location = getattr(path_spec, 'location', None)\n      if location is None or location != self.LOCATION_ROOT:\n        return None\n\n      return vshadow_file_entry.VShadowFileEntry(\n          self._resolver_context, self, path_spec, is_root=True,\n          is_virtual=True)\n\n    if store_index < 0 or store_index >= self._vshadow_volume.number_of_stores:\n      return None\n\n    return vshadow_file_entry.VShadowFileEntry(\n        self._resolver_context, self, path_spec)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nVShadowFileEntry: file entry or None if not available.", "source": "juraj-google-style"}
{"code": "def _call_partitioner(partitioner, shape, dtype):\n    if not shape.is_fully_defined():\n        raise ValueError('Shape of a new partitioned variable must be fully defined, but instead was %s.' % (shape,))\n    if shape.ndims < 1:\n        raise ValueError('A partitioned Variable must have rank at least 1, shape: %s' % shape)\n    slicing = partitioner(shape=shape, dtype=dtype)\n    if not isinstance(slicing, collections_abc.Sequence):\n        raise ValueError('Partitioner must return a sequence, but saw: %s' % slicing)\n    if len(slicing) != shape.ndims:\n        raise ValueError(\"Partitioner returned a partition list that does not match the Variable's rank: %s vs. %s\" % (slicing, shape))\n    if any((p < 1 for p in slicing)):\n        raise ValueError('Partitioner returned zero partitions for some axes: %s' % slicing)\n    if sum((p > 1 for p in slicing)) > 1:\n        raise ValueError('Can only slice a variable along one dimension: shape: %s, partitioning: %s' % (shape, slicing))\n    return slicing", "docstring": "Call partitioner validating its inputs/output.\n\nArgs:\npartitioner: a function mapping `Tensor` shape and dtype to a list of\npartitions.\nshape: shape of the `Tensor` to partition, must have at least two\ndimensions.\ndtype: dtype of the elements in the `Tensor`.\n\nReturns:\nA list with elements >=1 and exactly one >1. The index of that\nelement corresponds to the partitioning axis.", "source": "github-repos"}
{"code": "def save_scan_plot(self, filename=\"scan.pdf\",\n                       img_format=\"pdf\", coords=None):\n        \n        plt = self.get_scan_plot(coords)\n        plt.savefig(filename, format=img_format)", "docstring": "Save matplotlib plot of the potential energy surface to a file.\n\nArgs:\nfilename: Filename to write to.\nimg_format: Image format to use. Defaults to EPS.\ncoords: internal coordinate name to use as abcissa.", "source": "juraj-google-style"}
{"code": "def RegisterPlugin(cls, plugin_class):\n    name = getattr(plugin_class, 'ARTIFACT_DEFINITION_NAME', plugin_class.__name__)\n    name = name.lower()\n    if (name in cls._plugins):\n        raise KeyError('Artifact plugin class already set for name: {0:s}.'.format(name))\n    preprocess_plugin = plugin_class()\n    cls._plugins[name] = preprocess_plugin\n    if isinstance(preprocess_plugin, interface.FileSystemArtifactPreprocessorPlugin):\n        cls._file_system_plugins[name] = preprocess_plugin\n    elif isinstance(preprocess_plugin, interface.KnowledgeBasePreprocessorPlugin):\n        cls._knowledge_base_plugins[name] = preprocess_plugin\n    elif isinstance(preprocess_plugin, interface.WindowsRegistryKeyArtifactPreprocessorPlugin):\n        cls._windows_registry_plugins[name] = preprocess_plugin", "docstring": "Registers an preprocess plugin class.\n\nArgs:\nplugin_class (type): preprocess plugin class.\n\nRaises:\nKeyError: if plugin class is already set for the corresponding name.\nTypeError: if the source type of the plugin class is not supported.", "source": "codesearchnet"}
{"code": "def Open(self, file_object):\n    self._file_object = file_object\n    self._regf_file.open_file_object(self._file_object)\n    return True", "docstring": "Opens the Windows Registry file using a file-like object.\n\nArgs:\nfile_object (file): file-like object.\n\nReturns:\nbool: True if successful or False if not.", "source": "codesearchnet"}
{"code": "def _get_updated_values(before_values, after_values):\n    assert (before_values.keys() == after_values.keys())\n    return dict([(k, [before_values[k], after_values[k]]) for k in before_values.keys() if (before_values[k] != after_values[k])])", "docstring": "Get updated values from 2 dicts of values\n\nArgs:\nbefore_values (dict): values before update\nafter_values (dict): values after update\n\nReturns:\ndict: a diff dict with key is field key, value is tuple of\n(before_value, after_value)", "source": "codesearchnet"}
{"code": "def _WriteSerializedAttributeContainerList(self, container_type):\n    \n    if container_type == self._CONTAINER_TYPE_EVENT:\n      if not self._serialized_event_heap.data_size:\n        return\n\n      number_of_attribute_containers = (\n          self._serialized_event_heap.number_of_events)\n\n    else:\n      container_list = self._GetSerializedAttributeContainerList(container_type)\n      if not container_list.data_size:\n        return\n\n      number_of_attribute_containers = (\n          container_list.number_of_attribute_containers)\n\n    if self._serializers_profiler:\n      self._serializers_profiler.StartTiming('write')\n\n    if container_type == self._CONTAINER_TYPE_EVENT:\n      query = 'INSERT INTO event (_timestamp, _data) VALUES (?, ?)'\n    else:\n      query = 'INSERT INTO {0:s} (_data) VALUES (?)'.format(container_type)\n\n    \n    values_tuple_list = []\n    for _ in range(number_of_attribute_containers):\n      if container_type == self._CONTAINER_TYPE_EVENT:\n        timestamp, serialized_data = self._serialized_event_heap.PopEvent()\n      else:\n        serialized_data = container_list.PopAttributeContainer()\n\n      if self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB:\n        compressed_data = zlib.compress(serialized_data)\n        serialized_data = sqlite3.Binary(compressed_data)\n      else:\n        compressed_data = ''\n\n      if self._storage_profiler:\n        self._storage_profiler.Sample(\n            'write', container_type, len(serialized_data), len(compressed_data))\n\n      if container_type == self._CONTAINER_TYPE_EVENT:\n        values_tuple_list.append((timestamp, serialized_data))\n      else:\n        values_tuple_list.append((serialized_data, ))\n\n    self._cursor.executemany(query, values_tuple_list)\n\n    if self._serializers_profiler:\n      self._serializers_profiler.StopTiming('write')\n\n    if container_type == self._CONTAINER_TYPE_EVENT:\n      self._serialized_event_heap.Empty()\n    else:\n      container_list.Empty()", "docstring": "Writes a serialized attribute container list.\n\nArgs:\ncontainer_type (str): attribute container type.", "source": "juraj-google-style"}
{"code": "def get_sns_topic_arn(topic_name, account, region):\n    \n    if topic_name.count(':') == 5 and topic_name.startswith('arn:aws:sns:'):\n        return topic_name\n    session = boto3.Session(profile_name=account, region_name=region)\n    sns_client = session.client('sns')\n\n    topics = sns_client.list_topics()['Topics']\n\n    matched_topic = None\n    for topic in topics:\n        topic_arn = topic['TopicArn']\n        if topic_name == topic_arn.split(':')[-1]:\n            matched_topic = topic_arn\n            break\n    else:\n        LOG.critical(\"No topic with name %s found.\", topic_name)\n        raise SNSTopicNotFound('No topic with name {0} found'.format(topic_name))\n    return matched_topic", "docstring": "Get SNS topic ARN.\n\nArgs:\ntopic_name (str): Name of the topic to lookup.\naccount (str): Environment, e.g. dev\nregion (str): Region name, e.g. us-east-1\n\nReturns:\nstr: ARN for requested topic name", "source": "juraj-google-style"}
{"code": "def start_listing(self, request: Request) -> ListingResponse:\n        \n        if self._session_state != SessionState.ready:\n            raise RuntimeError('Session not ready')\n\n        response = ListingResponse()\n\n        yield from self._prepare_fetch(request, response)\n        yield from self._open_data_stream()\n\n        mlsd_command = Command('MLSD', self._request.file_path)\n        list_command = Command('LIST', self._request.file_path)\n\n        try:\n            yield from self._begin_stream(mlsd_command)\n            self._listing_type = 'mlsd'\n        except FTPServerError as error:\n            if error.reply_code in (ReplyCodes.syntax_error_command_unrecognized,\n                                    ReplyCodes.command_not_implemented):\n                self._listing_type = None\n            else:\n                raise\n\n        if not self._listing_type:\n            \n            \n            yield from self._begin_stream(list_command)\n            self._listing_type = 'list'\n\n        _logger.debug('Listing type is %s', self._listing_type)\n\n        self._session_state = SessionState.directory_request_sent\n\n        return response", "docstring": "Fetch a file listing.\n\nArgs:\nrequest: Request.\n\nReturns:\nA listing response populated with the initial data connection\nreply.\n\nOnce the response is received, call :meth:`download_listing`.\n\nCoroutine.", "source": "juraj-google-style"}
{"code": "def __init__(\n      self, script_type, default_shell=None, run_dir=None, debug=False):\n    \n    self.script_type = script_type\n    self.default_shell = default_shell\n    name = '%s-script' % self.script_type\n    facility = logging.handlers.SysLogHandler.LOG_DAEMON\n    self.logger = logger.Logger(name=name, debug=debug, facility=facility)\n    self.retriever = script_retriever.ScriptRetriever(self.logger, script_type)\n    self.executor = script_executor.ScriptExecutor(\n        self.logger, script_type, default_shell=default_shell)\n    self._RunScripts(run_dir=run_dir)", "docstring": "Constructor.\n\nArgs:\nscript_type: string, the metadata script type to run.\ndefault_shell: string, the default shell to execute the script.\nrun_dir: string, the base directory location of the temporary directory.\ndebug: bool, True if debug output should write to the console.", "source": "juraj-google-style"}
{"code": "def preprocess_frame(frame):\n  \n  \n  frame = common_layers.convert_rgb_to_real(frame)\n  frame = frame - 0.5\n  frame, _ = glow_ops.uniform_binning_correction(frame)\n  return frame", "docstring": "Preprocess frame.\n\n1. Converts [0, 255] to [-0.5, 0.5]\n2. Adds uniform noise.\n\nArgs:\nframe: 3-D Tensor representing pixels.\nReturns:\nframe: 3-D Tensor with values in between [-0.5, 0.5]", "source": "juraj-google-style"}
{"code": "def match_date(date):\n    date_pattern = re.compile('^(19|20)\\\\d\\\\d[- /.](0[1-9]|1[012])[- /.](0[1-9]|[12][0-9]|3[01])')\n    if re.match(date_pattern, date):\n        return True\n    return False", "docstring": "Check if a string is a valid date\n\nArgs:\ndate(str)\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def get_tri_area(pts):\n    (a, b, c) = (pts[0], pts[1], pts[2])\n    v1 = (np.array(b) - np.array(a))\n    v2 = (np.array(c) - np.array(a))\n    area_tri = abs((sp.linalg.norm(sp.cross(v1, v2)) / 2))\n    return area_tri", "docstring": "Given a list of coords for 3 points,\nCompute the area of this triangle.\n\nArgs:\npts: [a, b, c] three points", "source": "codesearchnet"}
{"code": "def element_spec(self):\n    raise NotImplementedError('Optional.element_spec')", "docstring": "The type specification of an element of this optional.\n\n>>> optional = tf.experimental.Optional.from_value(42)\n>>> print(optional.element_spec)\ntf.TensorSpec(shape=(), dtype=tf.int32, name=None)\n\nReturns:\nA (nested) structure of `tf.TypeSpec` objects matching the structure of an\nelement of this optional, specifying the type of individual components.", "source": "github-repos"}
{"code": "def to_json(self, **kwargs) -> JSONValueType:", "docstring": "Returns a plain Python value as a representation for this object.\n\nA plain Python value are basic python types that can be serialized into\nJSON, e.g: ``bool``, ``int``, ``float``, ``str``, ``dict`` (with string\nkeys), ``list``, ``tuple`` where the container types should have plain\nPython values as their values.\n\nArgs:\n**kwargs: Keyword arguments as flags to control JSON conversion.\n\nReturns:\nA plain Python value.", "source": "github-repos"}
{"code": "def create_lease_object_from_subnet(self, subnet):\n        \n        if '/' not in subnet:\n            subnet = '{}/{}'.format(subnet, self._cidr)\n\n        try:\n            if not self.is_leasable_subnet(subnet):\n                raise LagoSubnetLeaseOutOfRangeException(\n                    subnet, self.get_allowed_range()\n                )\n        except AddrFormatError:\n            raise LagoSubnetLeaseMalformedAddrException(subnet)\n\n        return Lease(store_path=self.path, subnet=subnet)", "docstring": "Create a lease from ip in a dotted decimal format,\n(for example `192.168.200.0/24`). the _cidr will be added if not exist\nin `subnet`.\n\nArgs:\nsubnet (str): The value of the third octet\n\nReturns:\nLease: Lease object which represents the requested subnet.\n\nRaises:\nLagoSubnetLeaseOutOfRangeException: If the resultant subnet is\nmalformed or out of the range of the store.", "source": "juraj-google-style"}
{"code": "def absl_to_standard(level):\n  \n  if not isinstance(level, int):\n    raise TypeError('Expect an int level, found {}'.format(type(level)))\n  if level < ABSL_FATAL:\n    level = ABSL_FATAL\n  if level <= ABSL_DEBUG:\n    return ABSL_TO_STANDARD[level]\n  \n  return STANDARD_DEBUG - level + 1", "docstring": "Converts an integer level from the absl value to the standard value.\n\nArgs:\nlevel: int, an absl.logging level.\n\nRaises:\nTypeError: Raised when level is not an integer.\n\nReturns:\nThe corresponding integer level for use in standard logging.", "source": "juraj-google-style"}
{"code": "def round_model(model: typing.Dict[str, typing.Dict[str, float]], scale: int) -> typing.Dict[str, typing.Dict[str, int]]:\n    model_rounded: typing.Dict[str, typing.Dict[str, int]] = dict()\n    for feature_group, features in model.items():\n        for feature_content, score in features.items():\n            scaled_score = int(score * scale)\n            if abs(scaled_score) > 0:\n                model_rounded.setdefault(feature_group, {})\n                model_rounded[feature_group][feature_content] = scaled_score\n    return model_rounded", "docstring": "Rounds the scores in the model to integer after scaling.\n\nArgs:\nmodel (Dict[str, Dict[str, float]]): The model to round scores.\nscale (int, optional): A scale factor to multiply scores.\n\nReturns:\nmodel_rounded (Dict[str, Dict[str, int]]) The rounded model.", "source": "github-repos"}
{"code": "def ed25519_public_key_to_string(key):\n    \n    return base64.b64encode(key.public_bytes(\n        encoding=serialization.Encoding.Raw,\n        format=serialization.PublicFormat.Raw,\n    ), None).decode('utf-8')", "docstring": "Convert an ed25519 public key to a base64-encoded string.\n\nArgs:\nkey (Ed25519PublicKey): the key to write to the file.\n\nReturns:\nstr: the key representation as a str", "source": "juraj-google-style"}
{"code": "def variables_accessed(variables):\n    accessed = []\n    for variable in variables:\n        if variable.trainable:\n            accessed.extend(_variables_override(variable))\n    for var in accessed:\n        pywrap_tfe.TFE_Py_TapeVariableAccessed(var)\n        pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var)", "docstring": "Notifies all tapes in the stack that variables have been accessed.\n\nOnly trainable variables are marked as accessed.\n\nArgs:\nvariables: iterable of variables to mark as accessed.", "source": "github-repos"}
{"code": "def get_speaker_info(self, refresh=False, timeout=None):\n        \n        if self.speaker_info and refresh is False:\n            return self.speaker_info\n        else:\n            response = requests.get('http:\n                                    ':1400/xml/device_description.xml',\n                                    timeout=timeout)\n            dom = XML.fromstring(response.content)\n\n        device = dom.find('{urn:schemas-upnp-org:device-1-0}device')\n        if device is not None:\n            self.speaker_info['zone_name'] = device.findtext(\n                '{urn:schemas-upnp-org:device-1-0}roomName')\n\n            \n            self.speaker_info['player_icon'] = device.findtext(\n                '{urn:schemas-upnp-org:device-1-0}iconList/'\n                '{urn:schemas-upnp-org:device-1-0}icon/'\n                '{urn:schemas-upnp-org:device-1-0}url'\n            )\n\n            self.speaker_info['uid'] = self.uid\n            self.speaker_info['serial_number'] = device.findtext(\n                '{urn:schemas-upnp-org:device-1-0}serialNum')\n            self.speaker_info['software_version'] = device.findtext(\n                '{urn:schemas-upnp-org:device-1-0}softwareVersion')\n            self.speaker_info['hardware_version'] = device.findtext(\n                '{urn:schemas-upnp-org:device-1-0}hardwareVersion')\n            self.speaker_info['model_number'] = device.findtext(\n                '{urn:schemas-upnp-org:device-1-0}modelNumber')\n            self.speaker_info['model_name'] = device.findtext(\n                '{urn:schemas-upnp-org:device-1-0}modelName')\n            self.speaker_info['display_version'] = device.findtext(\n                '{urn:schemas-upnp-org:device-1-0}displayVersion')\n\n            \n            mac = self.speaker_info['serial_number'].split(':')[0]\n            self.speaker_info['mac_address'] = mac\n\n            return self.speaker_info\n        return None", "docstring": "Get information about the Sonos speaker.\n\nArguments:\nrefresh(bool): Refresh the speaker info cache.\ntimeout: How long to wait for the server to send\ndata before giving up, as a float, or a\n`(connect timeout, read timeout)` tuple\ne.g. (3, 5). Default is no timeout.\n\nReturns:\ndict: Information about the Sonos speaker, such as the UID,\nMAC Address, and Zone Name.", "source": "juraj-google-style"}
{"code": "def _construct_key(self, rule_id: str, spacy_rule_id:int) -> int:\n        \n\n        hash_key = (rule_id, spacy_rule_id)\n        hash_v = hash(hash_key) + sys.maxsize + 1\n        self._hash_map[hash_v] = hash_key\n        return hash_v", "docstring": "Use a mapping to store the information about rule_id for each matches, create the mapping key here\nArgs:\nrule_id: str\nspacy_rule_id:int\n\nReturns: int", "source": "juraj-google-style"}
{"code": "def update_in_hdx(self):\n    capacity = self.data.get('capacity')\n    if (capacity is not None):\n        del self.data['capacity']\n    self._update_in_hdx('user', 'id')\n    if (capacity is not None):\n        self.data['capacity'] = capacity", "docstring": "Check if user exists in HDX and if so, update user\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def Scan(self, scan_context, auto_recurse=True, scan_path_spec=None):\n    if (not scan_context):\n        raise ValueError('Invalid scan context.')\n    scan_context.updated = False\n    if scan_path_spec:\n        scan_node = scan_context.GetScanNode(scan_path_spec)\n    else:\n        scan_node = scan_context.GetUnscannedScanNode()\n    if scan_node:\n        self._ScanNode(scan_context, scan_node, auto_recurse=auto_recurse)", "docstring": "Scans for supported formats.\n\nArgs:\nscan_context (SourceScannerContext): source scanner context.\nauto_recurse (Optional[bool]): True if the scan should automatically\nrecurse as far as possible.\nscan_path_spec (Optional[PathSpec]): path specification to indicate\nwhere the source scanner should continue scanning, where None\nindicates the scanner will start with the sources.\n\nRaises:\nValueError: if the scan context is invalid.", "source": "codesearchnet"}
{"code": "def _get_backend(filename):\n    filename = os.path.abspath(filename)\n    with _backends_lock:\n        if (filename not in _backends):\n            _backends[filename] = _MultiprocessStorageBackend(filename)\n        return _backends[filename]", "docstring": "A helper method to get or create a backend with thread locking.\n\nThis ensures that only one backend is used per-file per-process, so that\nthread and process locks are appropriately shared.\n\nArgs:\nfilename: The full path to the credential storage file.\n\nReturns:\nAn instance of :class:`_MultiprocessStorageBackend`.", "source": "codesearchnet"}
{"code": "def all_near_zero(a: Union[(float, complex, Iterable[float], np.ndarray)], *, atol: float=1e-08) -> bool:\n    return np.all(np.less_equal(np.abs(a), atol))", "docstring": "Checks if the tensor's elements are all near zero.\n\nArgs:\na: Tensor of elements that could all be near zero.\natol: Absolute tolerance.", "source": "codesearchnet"}
{"code": "def GetSources(self, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    source_long = getattr(event, 'source_long', 'UNKNOWN')\n    source_append = getattr(event, 'source_append', None)\n    if source_append:\n      source_long = '{0:s} {1:s}'.format(source_long, source_append)\n\n    return self.SOURCE_SHORT, source_long", "docstring": "Determines the the short and long source for an event object.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): short and long source string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def index(cls):\n    res = requests.get(cls.URL, headers=HEADERS, verify=False)\n    res.raise_for_status()\n    return res.json()", "docstring": "Fetches all records.\n\nReturns:\n`dict`. The JSON formatted response.\n\nRaises:\n`requests.exceptions.HTTPError`: The status code is not ok.", "source": "codesearchnet"}
{"code": "def fast_cond_v2(pred, true_fn, false_fn, name=None):\n    if isinstance(pred, bool):\n        raise TypeError('pred must not be a Python bool', pred)\n    if not name:\n        name = 'fast_cond'\n    with ops.name_scope(name) as scope:\n        true_name = util.unique_fn_name(scope, 'true')\n        false_name = util.unique_fn_name(scope, 'false')\n        pred = _normalize_pred(pred)\n        true_graph = func_graph_module.func_graph_from_py_func(true_name, true_fn, [], {}, func_graph=util.CondBranchFuncGraph(true_name, collections=ops.get_default_graph()._collections), add_control_dependencies=False, op_return_value=pred)\n        false_graph = func_graph_module.func_graph_from_py_func(false_name, false_fn, [], {}, func_graph=util.CondBranchFuncGraph(false_name, collections=ops.get_default_graph()._collections), add_control_dependencies=False, op_return_value=pred)\n        verify_captures(_COND, [true_graph, false_graph])\n        return _build_cond(pred, true_graph, false_graph, true_graph.external_captures, false_graph.external_captures, building_gradient=False, add_identities=False, prevent_lowering=True, name=scope)", "docstring": "Like cond_v2, except emits an If op and applies various optimizations.\n\nThis function is intended to be used for cases where the cond is used to\nimplement a simple conditional control flow operator. It makes the following\nassumptions:\n\n1. The conditional is never differentiated.\n2. The caller does not rely on V1 control flow semantics, i.e. for cross\ndevice execution, pruning subgraphs of the true or false branches, or\nnon-strict evaluation order.\n3. The caller manually configures any control dependencies within the graphs.\n\nIn this case, the cond will be lowered to a single If (or StatelessIf) op and\nthe true and false graphs will be executed as TF functions.\n\nArgs:\npred: boolean Tensor\ntrue_fn: function to execute if pred is true\nfalse_fn: function to execute if pred is false\nname: the name for the If op.\n\nReturns:\nA list of Tensors which are the outputs of the If op. Does not include\nintermediate outputs.", "source": "github-repos"}
{"code": "def _list_node_attributes(self, node_name):\n    lines = []\n    lines.append('')\n    lines.append('Node attributes:')\n    attrs = self._debug_dump.node_attributes(node_name)\n    for attr_key in attrs:\n        lines.append('  %s:' % attr_key)\n        attr_val_str = repr(attrs[attr_key]).strip().replace('\\n', ' ')\n        lines.append('    %s' % attr_val_str)\n        lines.append('')\n    return debugger_cli_common.RichTextLines(lines)", "docstring": "List neighbors (inputs or recipients) of a node.\n\nArgs:\nnode_name: Name of the node of which the attributes are to be listed.\n\nReturns:\nA RichTextLines object.", "source": "github-repos"}
{"code": "def all_reduce_indexed_slices(self, input_slices: indexed_slices.IndexedSlices, options: Optional[collective_util.Options]=None) -> indexed_slices.IndexedSlices:\n    options = self._options.merge(options)\n    with ops.device(self._device):\n\n        def all_gather_indexed_slices(all_gather_fn: Callable[[core.TensorLike, Optional[collective_util.Options]], core.Tensor]) -> indexed_slices.IndexedSlices:\n            \n            all_values = all_gather_fn(input_slices.values, options)\n            if options.implementation == collective_util.CommunicationImplementation.NCCL:\n                control = [all_values]\n            else:\n                control = []\n            with ops.control_dependencies(control):\n                all_indices = all_gather_fn(input_slices.indices, options)\n            return indexed_slices.IndexedSlices(values=all_values, indices=all_indices, dense_shape=input_slices.dense_shape)\n        length = array_ops.shape(input_slices.indices)\n        all_lengths = self._all_gather(length, options)\n\n        def all_gather_with_padding(input_tensor: core.TensorLike, options: Optional[collective_util.Options]) -> core.Tensor:\n            \n            max_length = math_ops.reduce_max(all_lengths)\n            padded_tensor = _pad_util(input_tensor, max_length)\n            all_padded_tensors = self._all_gather(padded_tensor, options)\n            split_tensors = []\n            for i in range(self._group_size):\n                start_pos = i * max_length\n                split_tensors.append(all_padded_tensors[start_pos:start_pos + all_lengths[i]])\n            return array_ops.concat(split_tensors, 0)\n        return cond.cond(math_ops.equal(math_ops.reduce_max(all_lengths), math_ops.reduce_min(all_lengths)), lambda: all_gather_indexed_slices(self._all_gather), lambda: all_gather_indexed_slices(all_gather_with_padding))", "docstring": "All-reduce an IndexedSlices.\n\nThis method can be called outside  tf.function.\n\nArgs:\ninput_slices: an IndexedSlices.\noptions: an optional tf.distribute.experimental.CommunicationOptions. If\nprovided, it overrides the default options.\n\nReturns:\nThe reduced IndexedSlices.", "source": "github-repos"}
{"code": "def create_heroku_connect_schema(using=DEFAULT_DB_ALIAS):\n    \n    connection = connections[using]\n\n    with connection.cursor() as cursor:\n        cursor.execute(_SCHEMA_EXISTS_QUERY, [settings.HEROKU_CONNECT_SCHEMA])\n        schema_exists = cursor.fetchone()[0]\n        if schema_exists:\n            return False\n\n        cursor.execute(\"CREATE SCHEMA %s;\", [AsIs(settings.HEROKU_CONNECT_SCHEMA)])\n\n    with connection.schema_editor() as editor:\n        for model in get_heroku_connect_models():\n            editor.create_model(model)\n\n        \n        editor.execute('CREATE EXTENSION IF NOT EXISTS \"hstore\";')\n\n        from heroku_connect.models import (TriggerLog, TriggerLogArchive)\n        for cls in [TriggerLog, TriggerLogArchive]:\n            editor.create_model(cls)\n    return True", "docstring": "Create Heroku Connect schema.\n\nNote:\nThis function is only meant to be used for local development.\nIn a production environment the schema will be created by\nHeroku Connect.\n\nArgs:\nusing (str): Alias for database connection.\n\nReturns:\nbool: ``True`` if the schema was created, ``False`` if the\nschema already exists.", "source": "juraj-google-style"}
{"code": "def set_pattern_step_setpoint(self, patternnumber, stepnumber, setpointvalue):\n    _checkPatternNumber(patternnumber)\n    _checkStepNumber(stepnumber)\n    _checkSetpointValue(setpointvalue, self.setpoint_max)\n    address = _calculateRegisterAddress('setpoint', patternnumber, stepnumber)\n    self.write_register(address, setpointvalue, 1)", "docstring": "Set the setpoint value for a step.\n\nArgs:\n* patternnumber (integer): 0-7\n* stepnumber (integer): 0-7\n* setpointvalue (float): Setpoint value", "source": "codesearchnet"}
{"code": "def _use_memcache(self, key, options=None):\n    flag = ContextOptions.use_memcache(options)\n    if (flag is None):\n        flag = self._memcache_policy(key)\n    if (flag is None):\n        flag = ContextOptions.use_memcache(self._conn.config)\n    if (flag is None):\n        flag = True\n    return flag", "docstring": "Return whether to use memcache for this key.\n\nArgs:\nkey: Key instance.\noptions: ContextOptions instance, or None.\n\nReturns:\nTrue if the key should be cached in memcache, False otherwise.", "source": "codesearchnet"}
{"code": "def writeline(self, line=b'', sep=b'\\n', echo=None):\n    self.writelines([line], sep, echo)", "docstring": "Write a byte sequences to the channel and terminate it with carriage\nreturn and line feed.\n\nArgs:\nline(bytes): The line to send.\nsep(bytes): The separator to use after each line.\necho(bool): Whether to echo the written data to stdout.\n\nRaises:\nEOFError: If the channel was closed before all data was sent.", "source": "codesearchnet"}
{"code": "def transformers_to_megatron_fix_query_key_value_ordering(param, checkpoint_version, num_splits, num_heads, hidden_size):\n    input_shape = param.size()\n    if checkpoint_version == 1.0:\n        current_shape = (num_splits, num_heads, hidden_size) + input_shape[1:]\n        param = param.view(*current_shape)\n        param = param.transpose(0, 2)\n        param = param.transpose(1, 2).contiguous()\n    elif checkpoint_version >= 2.0:\n        current_shape = (num_splits, num_heads, hidden_size) + input_shape[1:]\n        param = param.view(*current_shape)\n        param = param.transpose(0, 1).contiguous()\n    param = param.view(*input_shape)\n    return param", "docstring": "Permutes layout of param tensor to the one compatible with respective NVIDIA Megatron-LM checkpoint versions. Input\nis [num_splits * num_heads * hidden_size, :] and output is [num_heads * hidden_size * num_splits, :] for version\n1.0 and [num_heads * num_splits * hidden_size, :] for version 2.0 and later. If param is the weight tensor of the\nself-attention block, the param needs to be already transposed before calling this function.\n\nArgs:\nparam (torch.Tensor): the tensor to permute\ncheckpoint_version (int): the version of the checkpoint.\nnum_splits (int): the number of projections, usually 3 for (Query, Key, Value)\nnum_heads (int): the number of attention heads\nhidden_size (int): the hidden size per head", "source": "github-repos"}
{"code": "def post_async(self, path, params=None):\n        \n        request = Post(self._get_next_id(), path, params)\n        request.set_callback(self._q.put)\n        future = self._dispatch_request(request)\n        return future", "docstring": "Asynchronously calls a function on a child block\n\nArgs:\npath (list): The path to post to\nparams (dict): parameters for the call\n\nReturns:\nFuture: as single Future that will resolve to the result", "source": "juraj-google-style"}
{"code": "def ParseFromUnicode(self, value):\n    \n    precondition.AssertType(value, Text)\n    value = value.strip()\n\n    super(ClientURN, self).ParseFromUnicode(value)\n\n    match = self.CLIENT_ID_RE.match(self._string_urn)\n    if not match:\n      raise type_info.TypeValueError(\"Client urn malformed: %s\" % value)\n\n    clientid = match.group(\"clientid\")\n    clientid_correctcase = \"\".join((clientid[0].upper(), clientid[1:].lower()))\n\n    self._string_urn = self._string_urn.replace(clientid, clientid_correctcase,\n                                                1)", "docstring": "Parse a string into a client URN.\n\nConvert case so that all URNs are of the form C.[0-9a-f].\n\nArgs:\nvalue: string value to parse", "source": "juraj-google-style"}
{"code": "def _parse_string(self, xml):\n        \n        if not isinstance(xml, HTMLElement):\n            xml = dhtmlparser.parseString(str(xml))\n\n        \n        record = xml.find(\"record\")\n        if not record:\n            raise ValueError(\"There is no <record> in your MARC XML document!\")\n        record = record[0]\n\n        self.oai_marc = len(record.find(\"oai_marc\")) > 0\n\n        \n        if not self.oai_marc:\n            leader = record.find(\"leader\")\n            if len(leader) >= 1:\n                self.leader = leader[0].getContent()\n\n        \n        if self.oai_marc:\n            self._parse_control_fields(record.find(\"fixfield\"), \"id\")\n            self._parse_data_fields(record.find(\"varfield\"), \"id\", \"label\")\n        else:\n            self._parse_control_fields(record.find(\"controlfield\"), \"tag\")\n            self._parse_data_fields(record.find(\"datafield\"), \"tag\", \"code\")\n\n        \n        if self.oai_marc and \"LDR\" in self.controlfields:\n            self.leader = self.controlfields[\"LDR\"]", "docstring": "Parse MARC XML document to dicts, which are contained in\nself.controlfields and self.datafields.\n\nArgs:\nxml (str or HTMLElement): input data\n\nAlso detect if this is oai marc format or not (see elf.oai_marc).", "source": "juraj-google-style"}
{"code": "def GetDecoder(cls, encoding_method):\n    encoding_method = encoding_method.lower()\n    decoder = cls._decoders.get(encoding_method, None)\n    if (not decoder):\n        return None\n    return decoder()", "docstring": "Retrieves the decoder object for a specific encoding method.\n\nArgs:\nencoding_method (str): encoding method identifier.\n\nReturns:\nDecoder: decoder or None if the encoding method does not exists.", "source": "codesearchnet"}
{"code": "def __init__(self, timeseries, loop=None):\n        \n        self.timeseries = timeseries\n        self.queue = deque()\n        self.continuation_url = timeseries._base_url", "docstring": "Construct an iterator.\n\nArgs:\ntimeseries: the timeseries to iterate over\n\nloop: The asyncio loop to use for iterating", "source": "juraj-google-style"}
{"code": "def singleOrPair(obj):\n    if (len(list(obj.__class__.__mro__)) <= 2):\n        return 'Neither'\n    elif (ancestorJr(obj) is Pair):\n        return 'Pair'\n    elif (ancestor(obj) is Single):\n        return 'Single'\n    else:\n        return 'Neither'", "docstring": "Chech an object is single or pair or neither.\n\nOf course,, all pairs are single, so what the function is really detecting is whether an object is only single or at the same time a pair.\n\nArgs:\nobj (object): Literally anything.\n\nReturns:\nstr: 'Single', or 'Pair', or 'Neither'", "source": "codesearchnet"}
{"code": "def _GetAccessToken(self):\n    d = {'assertion': self._GenerateAssertion(), 'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer'}\n    try:\n        body = parse.urlencode(d)\n    except AttributeError:\n        body = urllib.urlencode(d)\n    req = urllib_request.Request(RpcHelper.TOKEN_ENDPOINT)\n    req.add_header('Content-type', 'application/x-www-form-urlencoded')\n    binary_body = body.encode('utf-8')\n    raw_response = urllib_request.urlopen(req, binary_body)\n    return simplejson.loads(raw_response.read())['access_token']", "docstring": "Gets oauth2 access token for Gitkit API using service account.\n\nReturns:\nstring, oauth2 access token.", "source": "codesearchnet"}
{"code": "def __convertChannelMask(self, channelsArray):\n    maskSet = 0\n    for eachChannel in channelsArray:\n        mask = (1 << eachChannel)\n        maskSet = (maskSet | mask)\n    return maskSet", "docstring": "convert channelsArray to bitmask format\n\nArgs:\nchannelsArray: channel array (i.e. [21, 22])\n\nReturns:\nbitmask format corresponding to a given channel array", "source": "codesearchnet"}
{"code": "async def register_service(self, short_name, long_name, allow_duplicate=True):\n        \n\n        try:\n            await self.send_command(OPERATIONS.CMD_REGISTER_SERVICE, dict(name=short_name, long_name=long_name),\n                                    MESSAGES.RegisterServiceResponse)\n        except ArgumentError:\n            if not allow_duplicate:\n                raise", "docstring": "Register a new service with the service manager.\n\nArgs:\nshort_name (string): A unique short name for this service that functions\nas an id\nlong_name (string): A user facing name for this service\nallow_duplicate (boolean): Don't throw an error if this service is already\nregistered.  This is important if the service is preregistered for example.\nRaises:\nArgumentError: if the short_name is already taken", "source": "juraj-google-style"}
{"code": "def _postprocess_for_mg(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7):\n    keep_by_nms = batched_nms(boxes=mask_boxes.float(), scores=iou_scores, idxs=torch.zeros(mask_boxes.shape[0]), iou_threshold=amg_crops_nms_thresh)\n    iou_scores = iou_scores[keep_by_nms]\n    rle_masks = [rle_masks[i] for i in keep_by_nms]\n    mask_boxes = mask_boxes[keep_by_nms]\n    masks = [_rle_to_mask(rle) for rle in rle_masks]\n    return (masks, iou_scores, rle_masks, mask_boxes)", "docstring": "Perform NMS (Non Maximum Suppression) on the outputs.\n\nArgs:\nrle_masks (`torch.Tensor`):\nbinary masks in the RLE format\niou_scores (`torch.Tensor` of shape (nb_masks, 1)):\niou_scores predicted by the model\nmask_boxes (`torch.Tensor`):\nThe bounding boxes corresponding to segmentation masks\namg_crops_nms_thresh (`float`, *optional*, defaults to 0.7):\nNMS threshold.", "source": "github-repos"}
{"code": "def check(self, instance, format):\n        \n\n        if format not in self.checkers:\n            return\n\n        func, raises = self.checkers[format]\n        result, cause = None, None\n        try:\n            result = func(instance)\n        except raises as e:\n            cause = e\n        if not result:\n            raise FormatError(\n                \"%r is not a %r\" % (instance, format), cause=cause,\n            )", "docstring": "Check whether the instance conforms to the given format.\n\nArguments:\n\ninstance (*any primitive type*, i.e. str, number, bool):\n\nThe instance to check\n\nformat (str):\n\nThe format that instance should conform to\n\n\nRaises:\n\nFormatError: if the instance does not conform to ``format``", "source": "juraj-google-style"}
{"code": "def starting_wall_time(self):\n    return self._reader.starting_wall_time()", "docstring": "Wall timestamp for when the debugged TensorFlow program started.\n\nReturns:\nStating wall time as seconds since the epoch, as a `float`.", "source": "github-repos"}
{"code": "def ParseBookmarkRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    rev_host = self._GetRowValue(query_hash, row, 'rev_host')\n    bookmark_type = self._GetRowValue(query_hash, row, 'type')\n    event_data = FirefoxPlacesBookmarkEventData()\n    event_data.host = (rev_host or 'N/A')\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.places_title = self._GetRowValue(query_hash, row, 'places_title')\n    event_data.query = query\n    event_data.title = self._GetRowValue(query_hash, row, 'bookmark_title')\n    event_data.type = self._BOOKMARK_TYPES.get(bookmark_type, 'N/A')\n    event_data.url = self._GetRowValue(query_hash, row, 'url')\n    event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count')\n    timestamp = self._GetRowValue(query_hash, row, 'dateAdded')\n    if timestamp:\n        date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ADDED)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n    timestamp = self._GetRowValue(query_hash, row, 'lastModified')\n    if timestamp:\n        date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a bookmark row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "codesearchnet"}
{"code": "def section(self, section):\n        \n        if not isinstance(self._container, ConfigUpdater):\n            raise ValueError(\"Sections can only be added at section level!\")\n        if isinstance(section, str):\n            \n            section = Section(section, container=self._container)\n        elif not isinstance(section, Section):\n            raise ValueError(\"Parameter must be a string or Section type!\")\n        if section.name in [block.name for block in self._container\n                            if isinstance(block, Section)]:\n            raise DuplicateSectionError(section.name)\n        self._container.structure.insert(self._idx, section)\n        self._idx += 1\n        return self", "docstring": "Creates a section block\n\nArgs:\nsection (str or :class:`Section`): name of section or object\n\nReturns:\nself for chaining", "source": "juraj-google-style"}
{"code": "def do_block(args):\n    rest_client = RestClient(args.url, args.user)\n    if (args.subcommand == 'list'):\n        block_generator = rest_client.list_blocks()\n        blocks = []\n        left = args.count\n        for block in block_generator:\n            blocks.append(block)\n            left -= 1\n            if (left <= 0):\n                break\n        keys = ('num', 'block_id', 'batches', 'txns', 'signer')\n        headers = tuple(((k.upper() if (k != 'batches') else 'BATS') for k in keys))\n\n        def parse_block_row(block):\n            batches = block.get('batches', [])\n            txns = [t for b in batches for t in b['transactions']]\n            return (block['header'].get('block_num', 0), block['header_signature'], len(batches), len(txns), block['header']['signer_public_key'])\n        if (args.format == 'default'):\n            fmt.print_terminal_table(headers, blocks, parse_block_row)\n        elif (args.format == 'csv'):\n            fmt.print_csv(headers, blocks, parse_block_row)\n        elif ((args.format == 'json') or (args.format == 'yaml')):\n            data = [{k: d for (k, d) in zip(keys, parse_block_row(b))} for b in blocks]\n            if (args.format == 'yaml'):\n                fmt.print_yaml(data)\n            elif (args.format == 'json'):\n                fmt.print_json(data)\n            else:\n                raise AssertionError('Missing handler: {}'.format(args.format))\n        else:\n            raise AssertionError('Missing handler: {}'.format(args.format))\n    if (args.subcommand == 'show'):\n        output = rest_client.get_block(args.block_id)\n        if args.key:\n            if (args.key in output):\n                output = output[args.key]\n            elif (args.key in output['header']):\n                output = output['header'][args.key]\n            else:\n                raise CliException('key \"{}\" not found in block or header'.format(args.key))\n        if (args.format == 'yaml'):\n            fmt.print_yaml(output)\n        elif (args.format == 'json'):\n            fmt.print_json(output)\n        else:\n            raise AssertionError('Missing handler: {}'.format(args.format))", "docstring": "Runs the block list or block show command, printing output to the\nconsole\n\nArgs:\nargs: The parsed arguments sent to the command at runtime", "source": "codesearchnet"}
{"code": "def construct(cls, name, version=None):\n        \n        other = VersionedObject(None)\n        other.name_ = name\n        other.version_ = Version() if version is None else version\n        return other", "docstring": "Create a VersionedObject directly from an object name and version.\n\nArgs:\nname: Object name string.\nversion: Version object.", "source": "juraj-google-style"}
{"code": "def authenticate(self, connection_certificate=None, connection_info=None, request_credentials=None):\n    if ((self.users_url is None) or (self.groups_url is None)):\n        raise exceptions.ConfigurationError('The SLUGS URL must be specified.')\n    user_id = utils.get_client_identity_from_certificate(connection_certificate)\n    try:\n        response = requests.get(self.users_url.format(user_id))\n    except Exception:\n        raise exceptions.ConfigurationError('A connection could not be established using the SLUGS URL.')\n    if (response.status_code == 404):\n        raise exceptions.PermissionDenied('Unrecognized user ID: {}'.format(user_id))\n    response = requests.get(self.groups_url.format(user_id))\n    if (response.status_code == 404):\n        raise exceptions.PermissionDenied('Group information could not be retrieved for user ID: {}'.format(user_id))\n    return (user_id, response.json().get('groups'))", "docstring": "Query the configured SLUGS service with the provided credentials.\n\nArgs:\nconnection_certificate (cryptography.x509.Certificate): An X.509\ncertificate object obtained from the connection being\nauthenticated. Required for SLUGS authentication.\nconnection_info (tuple): A tuple of information pertaining to the\nconnection being authenticated, including the source IP address\nand a timestamp (e.g., ('127.0.0.1', 1519759267.467451)).\nOptional, defaults to None. Ignored for SLUGS authentication.\nrequest_credentials (list): A list of KMIP Credential structures\ncontaining credential information to use for authentication.\nOptional, defaults to None. Ignored for SLUGS authentication.", "source": "codesearchnet"}
{"code": "def he_init(n_inputs, n_outputs, activation_fn, uniform=True):\n\n    def in_relu_family(activation_fn):\n        if isinstance(activation_fn, collections.Sequence):\n            activation_fn = activation_fn[0]\n        return (activation_fn in (tf.nn.relu, tf.nn.relu6))\n    if in_relu_family(activation_fn):\n        stddev = math.sqrt((2.0 / n_inputs))\n        return tf.random_normal_initializer(stddev=stddev)\n    else:\n        return xavier_init(n_inputs, n_outputs, uniform)", "docstring": "Sets the parameter initialization using the method described.\n\nThis method is designed to keep the scale of the gradients roughly the same\nin all layers with ReLU activations.\n\nHe et al. (2015):\nDelving deep into rectifiers: surpassing human-level performance on\nimageNet classification. International Conference on Computer Vision.\n\nFor activations other than ReLU and ReLU6, this method uses Xavier\ninitialization as in xavier_init().\n\nArgs:\nn_inputs: The number of input nodes into each output.\nn_outputs: The number of output nodes for each input.\nactivation_fn: Activation function used in this layer.\nuniform: If uniform distribution will be used for Xavier initialization.\nNormal distribution will be used if False.\nReturns:\nAn initializer.", "source": "codesearchnet"}
{"code": "def Open(self, filename, read_only=False):\n    \n    if self._connection:\n      raise RuntimeError('Cannot open database already opened.')\n\n    self.filename = filename\n    self.read_only = read_only\n\n    try:\n      self._connection = sqlite3.connect(filename)\n    except sqlite3.OperationalError:\n      return False\n\n    if not self._connection:\n      return False\n\n    self._cursor = self._connection.cursor()\n    if not self._cursor:\n      return False\n\n    return True", "docstring": "Opens the database file.\n\nArgs:\nfilename (str): filename of the database.\nread_only (Optional[bool]): True if the database should be opened in\nread-only mode. Since sqlite3 does not support a real read-only\nmode we fake it by only permitting SELECT queries.\n\nReturns:\nbool: True if successful.\n\nRaises:\nRuntimeError: if the database is already opened.", "source": "juraj-google-style"}
{"code": "def inner(*args):\n    \n    haspoly = sum([isinstance(arg, Poly) for arg in args])\n\n    \n    if not haspoly:\n        return numpy.sum(numpy.prod(args, 0), 0)\n\n    \n    out = args[0]\n    for arg in args[1:]:\n        out = out * arg\n    return sum(out)", "docstring": "Inner product of a polynomial set.\n\nArgs:\nargs (chaospy.poly.base.Poly):\nThe polynomials to perform inner product on.\n\nReturns:\n(chaospy.poly.base.Poly):\nResulting polynomial.\n\nExamples:\n>>> x,y = cp.variable(2)\n>>> P = cp.Poly([x-1, y])\n>>> Q = cp.Poly([x+1, x*y])\n>>> print(cp.inner(P, Q))\nq0^2+q0q1^2-1\n>>> x = numpy.arange(4)\n>>> print(cp.inner(x, x))\n14", "source": "juraj-google-style"}
{"code": "def __register_class(self, parsed_config):\n    \n    methods = parsed_config.get('methods')\n    if not methods:\n      return\n\n    \n    service_classes = set()\n    for method in methods.itervalues():\n      rosy_method = method.get('rosyMethod')\n      if rosy_method and '.' in rosy_method:\n        method_class = rosy_method.split('.', 1)[0]\n        service_classes.add(method_class)\n\n    for service_class in service_classes:\n      if service_class in self.__registered_classes:\n        raise api_exceptions.ApiConfigurationError(\n            'API class %s has already been registered.' % service_class)\n      self.__registered_classes.add(service_class)", "docstring": "Register the class implementing this config, so we only add it once.\n\nArgs:\nparsed_config: The JSON object with the API configuration being added.\n\nRaises:\nApiConfigurationError: If the class has already been registered.", "source": "juraj-google-style"}
{"code": "def set_clang_compiler_path_win(environ_cp):\n    default_clang_path = 'C:/Program Files/LLVM/bin/clang.exe'\n    if not os.path.exists(default_clang_path):\n        default_clang_path = shutil.which('clang') or ''\n    clang_compiler_path = prompt_loop_or_load_from_env(environ_cp, var_name='CLANG_COMPILER_PATH', var_default=default_clang_path, ask_for_var='Please specify the path to clang executable.', check_success=os.path.exists, resolve_symlinks=True, error_msg='Invalid clang path. %s cannot be found. Note that Clang is nowpreferred compiler. You may use MSVC by removing --config=win_clang')\n    write_action_env_to_bazelrc('CLANG_COMPILER_PATH', clang_compiler_path)\n    write_to_bazelrc(f'build --repo_env=CC=\"{clang_compiler_path}\"')\n    write_to_bazelrc(f'build --repo_env=BAZEL_COMPILER=\"{clang_compiler_path}\"')\n    return clang_compiler_path", "docstring": "Set CLANG_COMPILER_PATH and environment variables.\n\nLoop over user prompts for clang path until receiving a valid response.\nDefault is used if no input is given. Set CLANG_COMPILER_PATH and write\nenvironment variables CC and BAZEL_COMPILER to .bazelrc.\n\nArgs:\nenviron_cp: (Dict) copy of the os.environ.\n\nReturns:\nstring value for clang_compiler_path.", "source": "github-repos"}
{"code": "def move_to(self, destination_filename: str,\n                alter_if_clash: bool = True) -> None:\n        \n        if not self.src_filename:\n            return\n        if alter_if_clash:\n            counter = 0\n            while os.path.exists(destination_filename):\n                root, ext = os.path.splitext(destination_filename)\n                destination_filename = \"{r}_{c}{e}\".format(\n                    r=root, c=counter, e=ext)\n                counter += 1\n            \n            \n        else:\n            if os.path.exists(destination_filename):\n                src = self.rescue_filename or self.src_filename\n                log.warning(\"Destination exists; won't move {!r} to {!r}\",\n                            src, destination_filename)\n                return\n        if self.rescue_filename:\n            shutil.move(self.rescue_filename, destination_filename)\n            os.remove(self.src_filename)\n            log.info(\"Moved recovered file {!r} to {!r} and deleted corrupted \"\n                     \"original {!r}\",\n                     self.rescue_filename,\n                     destination_filename,\n                     self.src_filename)\n            self.rescue_filename = \"\"\n        else:\n            shutil.move(self.src_filename, destination_filename)\n            log.info(\"Moved {!r} to {!r}\", self.src_filename,\n                     destination_filename)\n        self.src_filename = \"\"", "docstring": "Move the file to which this class refers to a new location.\nThe function will not overwrite existing files (but offers the option\nto rename files slightly to avoid a clash).\n\nArgs:\ndestination_filename: filename to move to\nalter_if_clash: if ``True`` (the default), appends numbers to\nthe filename if the destination already exists, so that the\nmove can proceed.", "source": "juraj-google-style"}
{"code": "def present_weather_codes(self, value=None):\n        \n        if value is not None:\n            try:\n                value = int(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type int '\n                    'for field `present_weather_codes`'.format(value))\n\n        self._present_weather_codes = value", "docstring": "Corresponds to IDD Field `present_weather_codes`\n\nArgs:\nvalue (int): value for IDD Field `present_weather_codes`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def load(tiff_filename):\n    tiff_filename = os.path.expanduser(tiff_filename)\n    try:\n        img = tiff.imread(tiff_filename)\n    except Exception as e:\n        raise ValueError('Could not load file {0} for conversion.'.format(tiff_filename))\n        raise\n    return numpy.array(img)", "docstring": "Import a TIFF file into a numpy array.\n\nArguments:\ntiff_filename:  A string filename of a TIFF datafile\n\nReturns:\nA numpy array with data from the TIFF file", "source": "codesearchnet"}
{"code": "def nlargest(self, n=None):\n\t\t\n\t\tif n is None:\n\t\t\treturn sorted(self.counts(), key=itemgetter(1), reverse=True)\n\t\telse:\n\t\t\treturn heapq.nlargest(n, self.counts(), key=itemgetter(1))", "docstring": "List the n most common elements and their counts.\n\nList is from the most\ncommon to the least.  If n is None, the list all element counts.\n\nRun time should be O(m log m) where m is len(self)\nArgs:\nn (int): The number of elements to return", "source": "juraj-google-style"}
{"code": "def duration_to_string(duration):\n    (m, s) = divmod(duration, 60)\n    (h, m) = divmod(m, 60)\n    return ('%d:%02d:%02d' % (h, m, s))", "docstring": "Converts a duration to a string\n\nArgs:\nduration (int): The duration in seconds to convert\n\nReturns s (str): The duration as a string", "source": "codesearchnet"}
{"code": "def set_input(self, p_name, value):\n    name = self.python_names.get(p_name)\n    if ((p_name is None) or (name not in self.get_input_names())):\n        raise ValueError('Invalid input \"{}\"'.format(p_name))\n    self.step_inputs[name] = value", "docstring": "Set a Step's input variable to a certain value.\n\nThe value comes either from a workflow input or output of a previous\nstep.\n\nArgs:\nname (str): the name of the Step input\nvalue (str): the name of the output variable that provides the\nvalue for this input.\n\nRaises:\nValueError: The name provided is not a valid input name for this\nStep.", "source": "codesearchnet"}
{"code": "def create_standalone_context(require=None, **settings) -> 'Context':\n    backend = os.environ.get('MODERNGL_BACKEND')\n    if (backend is not None):\n        settings['backend'] = backend\n    ctx = Context.__new__(Context)\n    (ctx.mglo, ctx.version_code) = mgl.create_standalone_context(settings)\n    ctx._screen = None\n    ctx.fbo = None\n    ctx._info = None\n    ctx.extra = None\n    if ((require is not None) and (ctx.version_code < require)):\n        raise ValueError('Requested OpenGL version {}, got version {}'.format(require, ctx.version_code))\n    return ctx", "docstring": "Create a standalone ModernGL context.\n\nExample::\n\n# Create a context with highest possible supported version\nctx = moderngl.create_context()\n\n# Require at least OpenGL 4.3\nctx = moderngl.create_context(require=430)\n\nKeyword Arguments:\nrequire (int): OpenGL version code.\n\nReturns:\n:py:class:`Context` object", "source": "codesearchnet"}
{"code": "class PipelineException(Exception):\n\n    def __init__(self, task: str, model: str, reason: str):\n        super().__init__(reason)\n        self.task = task\n        self.model = model", "docstring": "Raised by a [`Pipeline`] when handling __call__.\n\nArgs:\ntask (`str`): The task of the pipeline.\nmodel (`str`): The model used by the pipeline.\nreason (`str`): The error message to display.", "source": "github-repos"}
{"code": "def _make_request(self, verb: str, endpoint: str, **kwargs: dict[str, Any]) -> requests.Response:\n    res = self._session.request(verb, urllib.parse.urljoin('https:\n    res.raise_for_status()\n    return res.json()", "docstring": "Helper method to make a request and raise an HTTPError if one occurred.\n\nArguments:\nverb: The HTTP verb to use\nendpoint: The endpoint to make the request to\n**kwargs: The json that will be sent as the body of the request.\n\nReturns:\na requests.Response object containing the response from the API.\n\nRaises:\nrequests.exceptions.HTTPError", "source": "github-repos"}
{"code": "def split_code_in_indented_blocks(code: str, indent_level: str='', start_prompt: Optional[str]=None, end_prompt: Optional[str]=None) -> List[str]:\n    index = 0\n    lines = code.split('\\n')\n    if start_prompt is not None:\n        while not lines[index].startswith(start_prompt):\n            index += 1\n        blocks = ['\\n'.join(lines[:index])]\n    else:\n        blocks = []\n    current_block = [lines[index]]\n    index += 1\n    while index < len(lines) and (end_prompt is None or not lines[index].startswith(end_prompt)):\n        if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level:\n            if len(current_block) > 0 and get_indent(current_block[-1]).startswith(indent_level + ' '):\n                current_block.append(lines[index])\n                blocks.append('\\n'.join(current_block))\n                if index < len(lines) - 1:\n                    current_block = [lines[index + 1]]\n                    index += 1\n                else:\n                    current_block = []\n            else:\n                blocks.append('\\n'.join(current_block))\n                current_block = [lines[index]]\n        else:\n            current_block.append(lines[index])\n        index += 1\n    if len(current_block) > 0:\n        blocks.append('\\n'.join(current_block))\n    if end_prompt is not None and index < len(lines):\n        blocks.append('\\n'.join(lines[index:]))\n    return blocks", "docstring": "Split some code into its indented blocks, starting at a given level.\n\nArgs:\ncode (`str`): The code to split.\nindent_level (`str`): The indent level (as string) to use for identifying the blocks to split.\nstart_prompt (`str`, *optional*): If provided, only starts splitting at the line where this text is.\nend_prompt (`str`, *optional*): If provided, stops splitting at a line where this text is.\n\nWarning:\nThe text before `start_prompt` or after `end_prompt` (if provided) is not ignored, just not split. The input `code`\ncan thus be retrieved by joining the result.\n\nReturns:\n`List[str]`: The list of blocks.", "source": "github-repos"}
{"code": "def make_view(controller, context, data):\n    \n    \n    if isinstance(data, BlockModel):\n        \n        view = _make_view_subclass(Block, controller, context, data)\n    elif isinstance(data, AttributeModel):\n        \n        view = Attribute(controller, context, data)\n    elif isinstance(data, MethodModel):\n        \n        view = Method(controller, context, data)\n    elif isinstance(data, Model):\n        \n        view = _make_view_subclass(View, controller, context, data)\n    elif isinstance(data, dict):\n        \n        d = OrderedDict()\n        for k, v in data.items():\n            d[k] = make_view(controller, context, v)\n        view = d\n    elif isinstance(data, list):\n        \n        view = [make_view(controller, context, x) for x in data]\n    else:\n        \n        view = data\n    return view", "docstring": "Make a View subclass containing properties specific for given data\n\nArgs:\ncontroller (Controller): The child controller that hosts the data\ncontext (Context): The context the parent has made that the View should\nuse for manipulating the data\ndata (Model): The actual data that context will be manipulating\n\nReturns:\nView: A View subclass instance that provides a user-focused API to\nthe given data", "source": "juraj-google-style"}
{"code": "def fft(x, axis=-1, padding_samples=0):\n    \n    if padding_samples > 0:\n        padded = np.concatenate(\n            [x, np.zeros((len(x), padding_samples), dtype=x.dtype)],\n            axis=axis)\n    else:\n        padded = x\n\n    transformed = np.fft.rfft(padded, axis=axis, norm='ortho')\n\n    sr = audio_sample_rate(int(Seconds(1) / x.dimensions[axis].frequency))\n    scale = LinearScale.from_sample_rate(sr, transformed.shape[-1])\n    new_dimensions = list(x.dimensions)\n    new_dimensions[axis] = FrequencyDimension(scale)\n    return ArrayWithUnits(transformed, new_dimensions)", "docstring": "Apply an FFT along the given dimension, and with the specified amount of\nzero-padding\n\nArgs:\nx (ArrayWithUnits): an :class:`~zounds.core.ArrayWithUnits` instance\nwhich has one or more :class:`~zounds.timeseries.TimeDimension`\naxes\naxis (int): The axis along which the fft should be applied\npadding_samples (int): The number of padding zeros to apply along\naxis before performing the FFT", "source": "juraj-google-style"}
{"code": "def _unique_constraint_name(table: str, field, keys):\n    postfix = '_'.join(keys)\n    return '{table}_{field}_unique_{postfix}'.format(table=table, field=field.column, postfix=postfix)", "docstring": "Gets the name for a UNIQUE INDEX that applies\nto one or more keys in a hstore field.\n\nArguments:\ntable:\nThe name of the table the field is\na part of.\n\nfield:\nThe hstore field to create a\nUNIQUE INDEX for.\n\nkey:\nThe name of the hstore key\nto create the name for.\n\nThis can also be a tuple\nof multiple names.\n\nReturns:\nThe name for the UNIQUE index.", "source": "codesearchnet"}
{"code": "def _ReadEntry(self, parser_mediator, file_object, file_offset):\n    entry_map = self._GetDataTypeMap('linux_libc6_utmp_entry')\n    try:\n        (entry, _) = self._ReadStructureFromFileObject(file_object, file_offset, entry_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse utmp entry at offset: 0x{0:08x} with error: {1!s}.'.format(file_offset, exception))\n    if (entry.type not in self._SUPPORTED_TYPES):\n        raise errors.UnableToParseFile('Unsupported type: {0:d}'.format(entry.type))\n    encoding = (parser_mediator.codepage or 'utf-8')\n    try:\n        username = entry.username.split(b'\\x00')[0]\n        username = username.decode(encoding)\n    except UnicodeDecodeError:\n        parser_mediator.ProduceExtractionWarning('unable to decode username string')\n        username = None\n    try:\n        terminal = entry.terminal.split(b'\\x00')[0]\n        terminal = terminal.decode(encoding)\n    except UnicodeDecodeError:\n        parser_mediator.ProduceExtractionWarning('unable to decode terminal string')\n        terminal = None\n    if (terminal == '~'):\n        terminal = 'system boot'\n    try:\n        hostname = entry.hostname.split(b'\\x00')[0]\n        hostname = hostname.decode(encoding)\n    except UnicodeDecodeError:\n        parser_mediator.ProduceExtractionWarning('unable to decode hostname string')\n        hostname = None\n    if ((not hostname) or (hostname == ':0')):\n        hostname = 'localhost'\n    if (entry.ip_address[4:] == self._EMPTY_IP_ADDRESS[4:]):\n        ip_address = self._FormatPackedIPv4Address(entry.ip_address[:4])\n    else:\n        ip_address = self._FormatPackedIPv6Address(entry.ip_address)\n    event_data = UtmpEventData()\n    event_data.hostname = hostname\n    event_data.exit_status = entry.exit_status\n    event_data.ip_address = ip_address\n    event_data.offset = file_offset\n    event_data.pid = entry.pid\n    event_data.terminal = terminal\n    event_data.terminal_identifier = entry.terminal_identifier\n    event_data.type = entry.type\n    event_data.username = username\n    timestamp = (entry.microseconds + (entry.timestamp * definitions.MICROSECONDS_PER_SECOND))\n    return (timestamp, event_data)", "docstring": "Reads an utmp entry.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\nfile_offset (int): offset of the data relative to the start of\nthe file-like object.\n\nReturns:\ntuple: containing:\n\nint: timestamp, which contains the number of microseconds\nsince January 1, 1970, 00:00:00 UTC.\nUtmpEventData: event data of the utmp entry read.\n\nRaises:\nParseError: if the entry cannot be parsed.", "source": "codesearchnet"}
{"code": "def log_cdf_laplace(x, name='log_cdf_laplace'):\n    with ops.name_scope(name, values=[x]):\n        x = ops.convert_to_tensor(x, name='x')\n        lower_solution = -np.log(2.0) + x\n        safe_exp_neg_x = math_ops.exp(-math_ops.abs(x))\n        upper_solution = math_ops.log1p(-0.5 * safe_exp_neg_x)\n        return array_ops.where_v2(x < 0.0, lower_solution, upper_solution)", "docstring": "Log Laplace distribution function.\n\nThis function calculates `Log[L(x)]`, where `L(x)` is the cumulative\ndistribution function of the Laplace distribution, i.e.\n\n```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```\n\nFor numerical accuracy, `L(x)` is computed in different ways depending on `x`,\n\n```\nx <= 0:\nLog[L(x)] = Log[0.5] + x, which is exact\n\n0 < x:\nLog[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact\n```\n\nArgs:\nx: `Tensor` of type `float32`, `float64`.\nname: Python string. A name for the operation (default=\"log_ndtr\").\n\nReturns:\n`Tensor` with `dtype=x.dtype`.\n\nRaises:\nTypeError: if `x.dtype` is not handled.", "source": "github-repos"}
{"code": "def set_vector_catch(self, flags):\n        \n        res = self._dll.JLINKARM_WriteVectorCatch(flags)\n        if res < 0:\n            raise errors.JLinkException(res)\n        return None", "docstring": "Sets vector catch bits of the processor.\n\nThe CPU will jump to a vector if the given vector catch is active, and\nwill enter a debug state.  This has the effect of halting the CPU as\nwell, meaning the CPU must be explicitly restarted.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error.", "source": "juraj-google-style"}
{"code": "def cancel_merge_when_pipeline_succeeds(self, **kwargs):\n        \n\n        path = ('%s/%s/cancel_merge_when_pipeline_succeeds' %\n                (self.manager.path, self.get_id()))\n        server_data = self.manager.gitlab.http_put(path, **kwargs)\n        self._update_attrs(server_data)", "docstring": "Cancel merge when the pipeline succeeds.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabMROnBuildSuccessError: If the server could not handle the\nrequest", "source": "juraj-google-style"}
{"code": "def recv(self, socket_, encoding=None):\n    unpacker = msgpack.Unpacker(encoding=encoding)\n    response = socket_.recv(8)\n    if (response == b''):\n        raise TensorForceError(('No data received by socket.recv in call to method `recv` ' + '(listener possibly closed)!'))\n    orig_len = int(response)\n    received_len = 0\n    while True:\n        data = socket_.recv(min((orig_len - received_len), self.max_msg_len))\n        if (not data):\n            raise TensorForceError('No data of len {} received by socket.recv in call to method `recv`!'.format((orig_len - received_len)))\n        data_len = len(data)\n        received_len += data_len\n        unpacker.feed(data)\n        if (received_len == orig_len):\n            break\n    for message in unpacker:\n        sts = message.get('status', message.get(b'status'))\n        if sts:\n            if ((sts == 'ok') or (sts == b'ok')):\n                return message\n            else:\n                raise TensorForceError('RemoteEnvironment server error: {}'.format(message.get('message', 'not specified')))\n        else:\n            raise TensorForceError(\"Message without field 'status' received!\")\n    raise TensorForceError('No message encoded in data stream (data stream had len={})'.format(orig_len))", "docstring": "Receives a message as msgpack-numpy encoded byte-string from the given socket object.\nBlocks until something was received.\n\nArgs:\nsocket_: The python socket object to use.\nencoding (str): The encoding to use for unpacking messages from the socket.\nReturns: The decoded (as dict) message received.", "source": "codesearchnet"}
{"code": "def _ConvertDictToCollectionsCounter(cls, json_dict):\n    collections_counter = collections.Counter()\n    for (key, value) in iter(json_dict.items()):\n        if (key == '__type__'):\n            continue\n        collections_counter[key] = value\n    return collections_counter", "docstring": "Converts a JSON dict into a collections.Counter.\n\nThe dictionary of the JSON serialized objects consists of:\n{\n'__type__': 'collections.Counter'\n...\n}\n\nHere '__type__' indicates the object base type. In this case this should\nbe 'collections.Counter'. The rest of the elements of the dictionary make up\nthe preprocessing object properties.\n\nArgs:\njson_dict (dict[str, object]): JSON serialized objects.\n\nReturns:\ncollections.Counter: counter.", "source": "codesearchnet"}
{"code": "def get_config_dict(self, services, hostname=None):\n    if (not isinstance(services, (tuple, list))):\n        services = [services]\n    endpoints_util.check_list_type(services, remote._ServiceClass, 'services', allow_none=False)\n    return self.__api_descriptor(services, hostname=hostname)", "docstring": "JSON dict description of a protorpc.remote.Service in API format.\n\nArgs:\nservices: Either a single protorpc.remote.Service or a list of them\nthat implements an api/version.\nhostname: string, Hostname of the API, to override the value set on the\ncurrent service. Defaults to None.\n\nReturns:\ndict, The API descriptor document as a JSON dict.", "source": "codesearchnet"}
{"code": "def from_sites(cls, sites, charge=None, validate_proximity=False, to_unit_cell=False):\n    if (len(sites) < 1):\n        raise ValueError(('You need at least one site to construct a %s' % cls))\n    prop_keys = []\n    props = {}\n    lattice = None\n    for (i, site) in enumerate(sites):\n        if (not lattice):\n            lattice = site.lattice\n        elif (site.lattice != lattice):\n            raise ValueError('Sites must belong to the same lattice')\n        for (k, v) in site.properties.items():\n            if (k not in prop_keys):\n                prop_keys.append(k)\n                props[k] = ([None] * len(sites))\n            props[k][i] = v\n    for (k, v) in props.items():\n        if any(((vv is None) for vv in v)):\n            warnings.warn(('Not all sites have property %s. Missing values are set to None.' % k))\n    return cls(lattice, [site.species for site in sites], [site.frac_coords for site in sites], charge=charge, site_properties=props, validate_proximity=validate_proximity, to_unit_cell=to_unit_cell)", "docstring": "Convenience constructor to make a Structure from a list of sites.\n\nArgs:\nsites: Sequence of PeriodicSites. Sites must have the same\nlattice.\nvalidate_proximity (bool): Whether to check if there are sites\nthat are less than 0.01 Ang apart. Defaults to False.\nto_unit_cell (bool): Whether to translate sites into the unit\ncell.\n\nReturns:\n(Structure) Note that missing properties are set as None.", "source": "codesearchnet"}
{"code": "def matching_selectors(self, partial_selector):\n    if (partial_selector in self._selector_map):\n        return [partial_selector]\n    selector_components = partial_selector.split('.')\n    node = self._selector_tree\n    for component in reversed(selector_components):\n        if (component not in node):\n            return []\n        node = node[component]\n    selectors = []\n    dfs_stack = [node]\n    while dfs_stack:\n        node = dfs_stack.pop().copy()\n        selector = node.pop(_TERMINAL_KEY, None)\n        dfs_stack.extend(node.values())\n        if selector:\n            selectors.append(selector)\n    return selectors", "docstring": "Retrieves all selectors matching `partial_selector`.\n\nFor instance, if \"one.a.b\" and \"two.a.b\" are stored in a `SelectorMap`, both\n`matching_selectors('b')` and `matching_selectors('a.b')` will return them.\n\nIn the event that `partial_selector` exactly matches an existing complete\nselector, only that complete selector is returned. For instance, if\n\"a.b.c.d\" and \"c.d\" are stored, `matching_selectors('c.d')` will return only\n`['c.d']`, while `matching_selectors('d')` will return both.\n\nArgs:\npartial_selector: The partial selector to find matches for.\n\nReturns:\nA list of selectors matching `partial_selector`.", "source": "codesearchnet"}
{"code": "def diag_part(self, name='diag_part'):\n    with self._name_scope(name):\n        return self._diag_part()", "docstring": "Efficiently get the [batch] diagonal part of this operator.\n\nIf this operator has shape `[B1,...,Bb, M, N]`, this returns a\n`Tensor` `diagonal`, of shape `[B1,...,Bb, min(M, N)]`, where\n`diagonal[b1,...,bb, i] = self.to_dense()[b1,...,bb, i, i]`.\n\n```\nmy_operator = LinearOperatorDiag([1., 2.])\n\n# Efficiently get the diagonal\nmy_operator.diag_part()\n==> [1., 2.]\n\n# Equivalent, but inefficient method\ntf.linalg.diag_part(my_operator.to_dense())\n==> [1., 2.]\n```\n\nArgs:\nname:  A name for this `Op`.\n\nReturns:\ndiag_part:  A `Tensor` of same `dtype` as self.", "source": "github-repos"}
{"code": "def end(self):\n    return (self.last.lineno, self.last.column + len(self.last.value))", "docstring": "The end of the logical line.\n\nReturns:\nA tuple of the ending line number and column.", "source": "github-repos"}
{"code": "def __init__(self, sink):\n    super().__init__()\n    self.sink = sink", "docstring": "Initializes a Write transform.\n\nArgs:\nsink: Data sink to write to.", "source": "github-repos"}
{"code": "def sync(coro, timeout=None):\n    loop = initloop()\n    return asyncio.run_coroutine_threadsafe(coro, loop).result(timeout)", "docstring": "Schedule a coroutine to run on the global loop and return it's result.\n\nArgs:\ncoro (coroutine): The coroutine instance.\n\nNotes:\nThis API is thread safe and should only be called by non-loop threads.", "source": "codesearchnet"}
{"code": "def _pywrap_tensorflow():\n    try:\n        from tensorboard.compat import notf\n    except ImportError:\n        try:\n            from tensorflow.python import pywrap_tensorflow\n            return pywrap_tensorflow\n        except ImportError:\n            pass\n    from tensorboard.compat.tensorflow_stub import pywrap_tensorflow\n    return pywrap_tensorflow", "docstring": "Provide pywrap_tensorflow access in TensorBoard.\n\npywrap_tensorflow cannot be accessed from tf.python.pywrap_tensorflow\nand needs to be imported using\n`from tensorflow.python import pywrap_tensorflow`. Therefore, we provide\na separate accessor function for it here.\n\nNOTE: pywrap_tensorflow is not part of TensorFlow API and this\ndependency will go away soon.\n\nReturns:\npywrap_tensorflow import, if available.\n\nRaises:\nImportError: if we couldn't import pywrap_tensorflow.", "source": "codesearchnet"}
{"code": "def parse(self, words):\n\n    def exact(words):\n        'If already represented as float or int, convert.'\n        try:\n            return float(words)\n        except:\n            return None\n    guess = exact(words)\n    if (guess is not None):\n        return guess\n    split = words.split(' ')\n    if (split[(- 1)] in self.__fractions__):\n        split[(- 1)] = self.__fractions__[split[(- 1)]]\n    elif (split[(- 1)] in self.__ordinals__):\n        split[(- 1)] = self.__ordinals__[split[(- 1)]]\n    parsed_ordinals = ' '.join(split)\n    return self.parseFloat(parsed_ordinals)", "docstring": "A general method for parsing word-representations of numbers.\nSupports floats and integers.\n\nArgs:\nwords (str): Description of an arbitrary number.\n\nReturns:\nA double representation of the words.", "source": "codesearchnet"}
{"code": "def _generate_splits(self, m, r):\n    new_rects = []\n    if (r.left > m.left):\n        new_rects.append(Rectangle(m.left, m.bottom, (r.left - m.left), m.height))\n    if (r.right < m.right):\n        new_rects.append(Rectangle(r.right, m.bottom, (m.right - r.right), m.height))\n    if (r.top < m.top):\n        new_rects.append(Rectangle(m.left, r.top, m.width, (m.top - r.top)))\n    if (r.bottom > m.bottom):\n        new_rects.append(Rectangle(m.left, m.bottom, m.width, (r.bottom - m.bottom)))\n    return new_rects", "docstring": "When a rectangle is placed inside a maximal rectangle, it stops being one\nand up to 4 new maximal rectangles may appear depending on the placement.\n_generate_splits calculates them.\n\nArguments:\nm (Rectangle): max_rect rectangle\nr (Rectangle): rectangle placed\n\nReturns:\nlist : list containing new maximal rectangles or an empty list", "source": "codesearchnet"}
{"code": "def _dump_worker_context(self, strategy):\n    context = distribute_coordinator_context.get_current_worker_context()\n    self.assertTrue(context is not None)\n    task_type = str(context.task_type)\n    task_id = context.task_id or 0\n    with self._lock:\n        if task_type not in self._worker_context:\n            self._worker_context[task_type] = []\n        while len(self._worker_context[task_type]) <= task_id:\n            self._worker_context[task_type].append(None)\n        self._worker_context[task_type][task_id] = (context.master_target, context.num_workers, context.is_chief, context.distributed_mode)", "docstring": "Dumps the properties of each worker context.\n\nIt dumps the context properties to a dict mapping from task_type to a list\nof tuples of master_target, num_workers, is_chief and distribute_mode, where\nthe list is indexed by the task_id.\n\nArgs:\nstrategy: a `DistributionStrategy` object.", "source": "github-repos"}
{"code": "def slicewise(tf_fn, xs, output_shape=None, output_dtype=None, splittable_dims=None, grad_function=None, name=None):\n    multiple_outputs = isinstance(output_dtype, list)\n    output_shapes = (output_shape if multiple_outputs else [output_shape])\n    output_dtypes = (output_dtype if multiple_outputs else [output_dtype])\n    op = SlicewiseOperation(tf_fn, xs, [(convert_to_shape(shape) or xs[0].shape) for shape in output_shapes], [(dtype or xs[0].dtype) for dtype in output_dtypes], splittable_dims, grad_function, name=name)\n    return (tuple(op.outputs) if multiple_outputs else op.outputs[0])", "docstring": "Slice-wise call to any tensorflow function.\n\nThe output shape and dtype default to those of the first input.\nsplittable_dims is a list of Dimensions which can be split while keeping the\ncomputation valid.\n\nArgs:\ntf_fn: a function taking n tf.Tensors and returning a tf.Tensor\nxs: a list of n Tensors\noutput_shape: a Shape (or list of shapes)\noutput_dtype: a dtype (or list of dtypes)\nsplittable_dims: a list of Dimensions which are ok to split\ngrad_function: an optional gradients function.  If None, use tf gradient.\nname: an optional string\n\nReturns:\na Tensor (or a tuple of Tensors)", "source": "codesearchnet"}
{"code": "def get_pipeline_boxes(self, pipeline_key, sort_by = None):\n\t\t\n\t\tif not pipeline_key:\n\t\t\treturn requests.codes.bad_request, None\n\n\t\turi = '/'.join([\n\t\t\t\t\t\tself.api_uri,\n\t\t\t\t\t\tself.pipelines_suffix,\n\t\t\t\t\t\tpipeline_key\n\t\t\t\t\t\t])\n\t\t\n\t\tif sort_by:\n\t\t\t\tif sort_by in ['creationTimestamp', 'lastUpdatedTimestamp']:\n\t\t\t\t\turi += self.sort_by_postfix + sort_by\n\t\t\t\telse:\t\t\n\t\t\t\t\treturn requests.codes.bad_request, {'success' : 'False', \n\t\t\t\t\t\t\t\t\t\t\t\t'error': 'sortBy needs to be \\'creationTimestamp\\', or \\'lastUpdatedTimestamp\\''}\n\t\t\n\t\treturn self._req('get', uri)", "docstring": "Gets a list of all box objects in a pipeline. Performs a single GET.\nArgs:\npipeline_key\tkey for pipeline\nsort_by\t\t\tin desc order by 'creationTimestamp' or 'lastUpdatedTimestamp'\nNot sure if it is supported\nreturns \t\t(status code for the GET request, dict of boxes)", "source": "juraj-google-style"}
{"code": "def ParseOptions(cls, options, configuration_object):\n    \n    if not isinstance(configuration_object, tools.CLITool):\n      raise errors.BadConfigObject(\n          'Configuration object is not an instance of CLITool')\n\n    hashers = cls._ParseStringOption(\n        options, 'hashers', default_value=cls._DEFAULT_HASHER_STRING)\n\n    hasher_file_size_limit = cls._ParseNumericOption(\n        options, 'hasher_file_size_limit', default_value=0)\n\n    \n\n    if hasher_file_size_limit < 0:\n      raise errors.BadConfigOption(\n          'Invalid hasher file size limit value cannot be negative.')\n\n    setattr(configuration_object, '_hasher_names_string', hashers)\n    setattr(\n        configuration_object, '_hasher_file_size_limit', hasher_file_size_limit)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.\nBadConfigOption: when a configuration parameter fails validation.", "source": "juraj-google-style"}
{"code": "def _check_if_fenced(self, name):\n    if (name in object.__getattribute__(self, '_attributes_to_fence')):\n        raise TranspilerAccessError(('The fenced %s has the property %s protected' % (type(object.__getattribute__(self, '_wrapped')), name)))", "docstring": "Checks if the attribute name is in the list of attributes to protect. If so, raises\nTranspilerAccessError.\n\nArgs:\nname (string): the attribute name to check\n\nRaises:\nTranspilerAccessError: when name is the list of attributes to protect.", "source": "codesearchnet"}
{"code": "def GetFileObjectByPathSpec(self, path_spec):\n    \n    file_entry = self.GetFileEntryByPathSpec(path_spec)\n    if not file_entry:\n      return None\n\n    return file_entry.GetFileObject()", "docstring": "Retrieves a file-like object for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\nFileIO: a file-like object or None if not available.", "source": "juraj-google-style"}
{"code": "def with_max_depth(self, max_depth):\n    self._options['max_depth'] = max_depth\n    return self", "docstring": "Set the maximum depth of display.\n\nThe depth depends on profiling view. For 'scope' view, it's the\ndepth of name scope hierarchy (tree), for 'op' view, it's the number\nof operation types (list), etc.\n\nArgs:\nmax_depth: Maximum depth of the data structure to display.\nReturns:\nself", "source": "github-repos"}
{"code": "class TFForcedBOSTokenLogitsProcessor(TFLogitsProcessor):\n\n    def __init__(self, bos_token_id: int):\n        if bos_token_id < 0:\n            raise ValueError(f'The forced bos token id  must be a non-negative integer, got {bos_token_id}')\n        self.bos_token_id = bos_token_id\n\n    def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:\n        if cur_len == 1:\n            batch_size, num_tokens = scores.shape\n            scores = tf.zeros((batch_size, 1))\n            if self.bos_token_id > 0:\n                scores = tf.concat((tf.broadcast_to(-float('inf'), (batch_size, self.bos_token_id)), scores), axis=-1)\n            if self.bos_token_id < num_tokens - 1:\n                scores = tf.concat((scores, tf.broadcast_to(-float('inf'), (batch_size, num_tokens - 1 - self.bos_token_id))), axis=-1)\n        return scores", "docstring": "[`TFLogitsProcessor`] that enforces the specified token as the first generated token.\n\nArgs:\nbos_token_id (`int`):\nThe id of the token to force as the first generated token.", "source": "github-repos"}
{"code": "def _varslist2axis(cls, fluent: 'TensorFluent', vars_list: List[str]) -> List[int]:\n        \n        axis = []\n        for var in vars_list:\n            if var in fluent.scope.as_list():\n                ax = fluent.scope.index(var)\n                if fluent.batch:\n                    ax += 1\n                axis.append(ax)\n        return axis", "docstring": "Maps the `vars_list` into a list of axis indices\ncorresponding to the `fluent` scope.\n\nArgs:\nx: The fluent.\nvars_list: The list of variables to be aggregated over.\n\nReturns:\nList[int]: a list of axis.", "source": "juraj-google-style"}
{"code": "def _wrap_call_and_conditional_losses(layer):\n    layer_call = _get_layer_call_method(layer)\n\n    def call_and_return_conditional_losses(*args, **kwargs):\n        \n        call_output = layer_call(*args, **kwargs)\n        if version_utils.is_v1_layer_or_model(layer):\n            conditional_losses = layer.get_losses_for(_filtered_inputs([args, kwargs]))\n        else:\n            conditional_losses = [l for l in layer.losses if not hasattr(l, '_unconditional_loss')]\n        return (call_output, conditional_losses)\n    return _create_call_fn_decorator(layer, call_and_return_conditional_losses)", "docstring": "Wraps call function that returns a tuple of (outputs, losses).\n\nThe losses returned are conditional on the inputs passed to the call function.\nUnconditional losses (e.g. weight regularizeration) are wrapped separately.\n\nArgs:\nlayer: a Keras layer object\n\nReturns:\npython call function that returns outputs and conditional losses -- excludes\nactivity regularizer", "source": "github-repos"}
{"code": "def uniprot_reviewed_checker(uniprot_id):\n    query_string = ('id:' + uniprot_id)\n    uni_rev_raw = StringIO(bsup.search(query_string, columns='id,reviewed', frmt='tab'))\n    uni_rev_df = pd.read_table(uni_rev_raw, sep='\\t', index_col=0)\n    uni_rev_df = uni_rev_df.fillna(False)\n    uni_rev_df = uni_rev_df[pd.notnull(uni_rev_df.Status)]\n    uni_rev_df = uni_rev_df.replace(to_replace='reviewed', value=True)\n    uni_rev_df = uni_rev_df.replace(to_replace='unreviewed', value=False)\n    uni_rev_dict_adder = uni_rev_df.to_dict()['Status']\n    return uni_rev_dict_adder[uniprot_id]", "docstring": "Check if a single UniProt ID is reviewed or not.\n\nArgs:\nuniprot_id:\n\nReturns:\nbool: If the entry is reviewed", "source": "codesearchnet"}
{"code": "def get_aws_unique_id(timeout=DEFAULT_AWS_TIMEOUT):\n    \n    try:\n        resp = requests.get(AWS_ID_URL, timeout=timeout).json()\n    except requests.exceptions.ConnectTimeout:\n        _logger.warning('Connection timeout when determining AWS unique '\n                        'ID. Not using AWS unique ID.')\n        return None\n    else:\n        aws_id = \"{0}_{1}_{2}\".format(resp['instanceId'], resp['region'],\n                                      resp['accountId'])\n        _logger.debug('Using AWS unique ID %s.', aws_id)\n        return aws_id", "docstring": "Determine the current AWS unique ID\n\nArgs:\ntimeout (int): How long to wait for a response from AWS metadata IP", "source": "juraj-google-style"}
{"code": "def SetRange(self, range_offset, range_size):\n    \n    if self._is_open:\n      raise IOError('Already open.')\n\n    if range_offset < 0:\n      raise ValueError(\n          'Invalid range offset: {0:d} value out of bounds.'.format(\n              range_offset))\n\n    if range_size < 0:\n      raise ValueError(\n          'Invalid range size: {0:d} value out of bounds.'.format(\n              range_size))\n\n    self._range_offset = range_offset\n    self._range_size = range_size\n    self._current_offset = 0", "docstring": "Sets the data range (offset and size).\n\nThe data range is used to map a range of data within one file\n(e.g. a single partition within a full disk image) as a file-like object.\n\nArgs:\nrange_offset (int): start offset of the data range.\nrange_size (int): size of the data range.\n\nRaises:\nIOError: if the file-like object is already open.\nOSError: if the file-like object is already open.\nValueError: if the range offset or range size is invalid.", "source": "juraj-google-style"}
{"code": "def derive_temporary_python2_environment(destination_directory: str, python3_environment: PreparedEnv, verbose: bool, env_name: str='.test_virtualenv_py2', python_path: str='/usr/bin/python2.7') -> PreparedEnv:\n    shutil.rmtree(destination_directory)\n    input_directory = cast(str, python3_environment.destination_directory)\n    os.chdir(input_directory)\n    conversion_script_path = os.path.join(input_directory, 'dev_tools', 'python2.7-generate.sh')\n    shell_tools.run_cmd('bash', conversion_script_path, destination_directory, input_directory, python3_environment.virtual_env_path, out=sys.stderr)\n    os.chdir(destination_directory)\n    env_path = os.path.join(destination_directory, env_name)\n    req_path = os.path.join(destination_directory, 'requirements.txt')\n    dev_req_path = os.path.join(destination_directory, 'pip-list-test-tools.txt')\n    contrib_req_path = os.path.join(destination_directory, 'cirq', 'contrib', 'contrib-requirements.txt')\n    req_paths = [req_path, dev_req_path, contrib_req_path]\n    create_virtual_env(venv_path=env_path, python_path=python_path, requirements_paths=req_paths, verbose=verbose)\n    return PreparedEnv(github_repo=python3_environment.repository, actual_commit_id=python3_environment.actual_commit_id, compare_commit_id=python3_environment.compare_commit_id, destination_directory=destination_directory, virtual_env_path=env_path)", "docstring": "Creates a python 2.7 environment starting from a prepared python 3 one.\n\nArgs:\ndestination_directory: Where to put the python 2 environment.\npython3_environment: The prepared environment to start from.\nverbose: When set, more progress output is produced.\nenv_name: The name to use for the virtualenv directory.\npython_path: The python binary to use.\n\nReturns:\nA description of the environment that was prepared.", "source": "codesearchnet"}
{"code": "def hours(start, end=None):\n    \n    return iterate.between(start, datetime.timedelta(hours=1), end)", "docstring": "Iterate over the hours between the given datetime_tzs.\n\nArgs:\nstart: datetime_tz to start from.\nend: (Optional) Date to end at, if not given the iterator will never\nterminate.\n\nReturns:\nAn iterator which generates datetime_tz objects a hour apart.", "source": "juraj-google-style"}
{"code": "def add_spin_by_element(self, spins):\n    for site in self.sites:\n        new_sp = {}\n        for (sp, occu) in site.species.items():\n            sym = sp.symbol\n            oxi_state = getattr(sp, 'oxi_state', None)\n            new_sp[Specie(sym, oxidation_state=oxi_state, properties={'spin': spins.get(str(sp), spins.get(sym, None))})] = occu\n        site.species = new_sp", "docstring": "Add spin states to a structure.\n\nArgs:\nspisn (dict): Dict of spins associated with\nelements or species, e.g. {\"Ni\":+5} or {\"Ni2+\":5}", "source": "codesearchnet"}
{"code": "def __init__(self, f, name, problems):\n    \n    self._f = f\n    self._name = name\n    self._crlf = 0\n    self._crlf_examples = []\n    self._lf = 0\n    self._lf_examples = []\n    self._line_number = 0  \n    self._problems = problems", "docstring": "Create new object.\n\nArgs:\nf: file-like object to wrap\nname: name to use for f. StringIO objects don't have a name attribute.\nproblems: a ProblemReporterBase object", "source": "juraj-google-style"}
{"code": "def fetch(self, virtual_account_id, data={}, **kwargs):\n        \n        return super(VirtualAccount, self).fetch(\n            virtual_account_id,\n            data,\n            **kwargs)", "docstring": "Fetch Virtual Account for given Id\n\nArgs:\nvirtual_account_id :\nId for which Virtual Account object has to be retrieved\n\nReturns:\nVirtual Account dict for given Virtual Account Id", "source": "juraj-google-style"}
{"code": "def _define_loop(graph, eval_steps):\n    loop = tools.Loop(None, graph.step, graph.should_log, graph.do_report, graph.force_reset)\n    loop.add_phase('eval', graph.done, graph.score, graph.summary, eval_steps, report_every=eval_steps, log_every=None, checkpoint_every=None, feed={graph.is_training: False})\n    return loop", "docstring": "Create and configure an evaluation loop.\n\nArgs:\ngraph: Object providing graph elements via attributes.\neval_steps: Number of evaluation steps per epoch.\n\nReturns:\nLoop object.", "source": "codesearchnet"}
{"code": "def load_py(stream, filepath=None):\n    with add_sys_paths(config.package_definition_build_python_paths):\n        return _load_py(stream, filepath=filepath)", "docstring": "Load python-formatted data from a stream.\n\nArgs:\nstream (file-like object).\n\nReturns:\ndict.", "source": "codesearchnet"}
{"code": "def _aggregate(self, tests_results):\n        \n        summary = {\n            \"success\": True,\n            \"stat\": {\n                \"testcases\": {\n                    \"total\": len(tests_results),\n                    \"success\": 0,\n                    \"fail\": 0\n                },\n                \"teststeps\": {}\n            },\n            \"time\": {},\n            \"platform\": report.get_platform(),\n            \"details\": []\n        }\n\n        for tests_result in tests_results:\n            testcase, result = tests_result\n            testcase_summary = report.get_summary(result)\n\n            if testcase_summary[\"success\"]:\n                summary[\"stat\"][\"testcases\"][\"success\"] += 1\n            else:\n                summary[\"stat\"][\"testcases\"][\"fail\"] += 1\n\n            summary[\"success\"] &= testcase_summary[\"success\"]\n            testcase_summary[\"name\"] = testcase.config.get(\"name\")\n            testcase_summary[\"in_out\"] = utils.get_testcase_io(testcase)\n\n            report.aggregate_stat(summary[\"stat\"][\"teststeps\"], testcase_summary[\"stat\"])\n            report.aggregate_stat(summary[\"time\"], testcase_summary[\"time\"])\n\n            summary[\"details\"].append(testcase_summary)\n\n        return summary", "docstring": "aggregate results\n\nArgs:\ntests_results (list): list of (testcase, result)", "source": "juraj-google-style"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_stream = utils.BytearrayStream()\n    if self._asynchronous_correlation_value:\n        self._asynchronous_correlation_value.write(local_stream, kmip_version=kmip_version)\n    self.length = local_stream.length()\n    super(CancelRequestPayload, self).write(output_stream, kmip_version=kmip_version)\n    output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the Cancel request payload to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the data attribute is not defined.", "source": "codesearchnet"}
{"code": "def get_file(self, file_name, local_destination=None, **kwargs):\n        \n        if not local_destination:\n            local_destination = file_name\n\n        return SubprocessTask(\n            self._rsync_cmd() +\n            ['-ut', '%s:%s' % (self.hostname, file_name), local_destination],\n            **kwargs)", "docstring": "Get a file from a remote host with rsync.\n\nArgs:\nfile_name (str): The relative location of the file on the remote\nhost.\n\nlocal_destination (str): The destination for the file on the local\nhost. If `None`, will be assumed to be the same as\n**file_name**. Default `None`.\n\n**kwargs: Passed to ``SubprocessTask``'s init method.\n\nReturn:\n``pyrem.task.SubprocessTask``: The resulting task.", "source": "juraj-google-style"}
{"code": "def identity(shape, dtype: Optional[torch.dtype]=None, device: Optional[torch.device]=None, requires_grad: bool=True, fmt: str='quat') -> Rotation:\n    if fmt == 'rot_mat':\n        rot_mats = identity_rot_mats(shape, dtype, device, requires_grad)\n        return Rotation(rot_mats=rot_mats, quats=None)\n    elif fmt == 'quat':\n        quats = identity_quats(shape, dtype, device, requires_grad)\n        return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n    else:\n        raise ValueError(f'Invalid format: f{fmt}')", "docstring": "Returns an identity Rotation.\n\nArgs:\nshape:\nThe \"shape\" of the resulting Rotation object. See documentation for the shape property\ndtype:\nThe torch dtype for the rotation\ndevice:\nThe torch device for the new rotation\nrequires_grad:\nWhether the underlying tensors in the new rotation object should require gradient computation\nfmt:\nOne of \"quat\" or \"rot_mat\". Determines the underlying format of the new object's rotation\nReturns:\nA new identity rotation", "source": "github-repos"}
{"code": "def _tidy_names(names, nnames, extra_names=None):\n    \n    if len(names) < nnames and extra_names is not None:\n        names.extend(extra_names)\n    names.extend(range(nnames - len(names)))\n    del names[nnames:]", "docstring": "Truncate or extend names so that its len is nnames.\n\nThe list is modified, this function returns nothing.\n\nArgs:\nnames (list): list of names.\nnnames (int): desired number of names.\nextra_names (list of str): list of names to be used to extend the list\nif needed. If this list isn't provided, a range is used instead.", "source": "juraj-google-style"}
{"code": "def action_fluent_variables(self) -> FluentParamsList:\n    fluents = self.domain.action_fluents\n    ordering = self.domain.action_fluent_ordering\n    return self._fluent_params(fluents, ordering)", "docstring": "Returns the instantiated action fluents in canonical order.\n\nReturns:\nSequence[Tuple[str, List[str]]]: A tuple of pairs of fluent name\nand a list of instantiated fluents represented as strings.", "source": "codesearchnet"}
{"code": "def _validate_isvalid_quantity(self, isvalid_quantity, field, value):\n    quantity = Q_(value[0])\n    low_lim = (0.0 * units(property_units[field]))\n    try:\n        if (quantity <= low_lim):\n            self._error(field, 'value must be greater than 0.0 {}'.format(property_units[field]))\n    except pint.DimensionalityError:\n        self._error(field, ('incompatible units; should be consistent with ' + property_units[field]))", "docstring": "Checks for valid given value and appropriate units.\n\nArgs:\nisvalid_quantity (`bool`): flag from schema indicating quantity to be checked.\nfield (`str`): property associated with quantity in question.\nvalue (`list`): list whose first element is a string representing a value with units\n\nThe rule's arguments are validated against this schema:\n{'isvalid_quantity': {'type': 'bool'}, 'field': {'type': 'str'},\n'value': {'type': 'list'}}", "source": "codesearchnet"}
{"code": "def save(self, wf_state):\n    self.wf_state = wf_state\n    self.wf_state['role_id'] = self.current.role_id\n    self.set(self.wf_state)\n    if (self.wf_state['name'] not in settings.EPHEMERAL_WORKFLOWS):\n        self.publish(job='_zops_sync_wf_cache', token=self.db_key)", "docstring": "write wf state to DB through MQ >> Worker >> _zops_sync_wf_cache\n\nArgs:\nwf_state dict: wf state", "source": "codesearchnet"}
{"code": "def get_percentile(self, percentile):\n    assert (0 <= percentile <= 100), 'percentile must be between 0 and 100. Got {}'.format(percentile)\n    return self._percentile(self._values, percentile)", "docstring": "Get a value representing a the input percentile of the Data Collection.\n\nArgs:\npercentile: A float value from 0 to 100 representing the\nrequested percentile.\n\nReturn:\nThe Data Collection value at the input percentile", "source": "codesearchnet"}
{"code": "def get(identifier):\n    if isinstance(identifier, (Optimizer, optimizer_v2.OptimizerV2)):\n        return identifier\n    elif isinstance(identifier, tf_optimizer_module.Optimizer):\n        opt = TFOptimizer(identifier)\n        backend.track_tf_optimizer(opt)\n        return opt\n    elif isinstance(identifier, dict):\n        return deserialize(identifier)\n    elif isinstance(identifier, str):\n        config = {'class_name': str(identifier), 'config': {}}\n        return deserialize(config)\n    else:\n        raise ValueError('Could not interpret optimizer identifier: {}'.format(identifier))", "docstring": "Retrieves a Keras Optimizer instance.\n\nArgs:\nidentifier: Optimizer identifier, one of\n- String: name of an optimizer\n- Dictionary: configuration dictionary. - Keras Optimizer instance (it\nwill be returned unchanged). - TensorFlow Optimizer instance (it\nwill be wrapped as a Keras Optimizer).\n\nReturns:\nA Keras Optimizer instance.\n\nRaises:\nValueError: If `identifier` cannot be interpreted.", "source": "github-repos"}
{"code": "def from_private_key(account_name, private_key=None, private_key_path=None, storage=None, storage_path=None, api_version='v3', readonly=False, http_client=None, ga_hook=None):\n    if (not private_key):\n        if (not private_key_path):\n            raise GapyError('Must provide either a private_key or a private_key_file')\n        if isinstance(private_key_path, basestring):\n            private_key_path = open(private_key_path)\n        private_key = private_key_path.read()\n    storage = _get_storage(storage, storage_path)\n    scope = (GOOGLE_API_SCOPE_READONLY if readonly else GOOGLE_API_SCOPE)\n    credentials = SignedJwtAssertionCredentials(account_name, private_key, scope)\n    credentials.set_store(storage)\n    return Client(_build(credentials, api_version, http_client), ga_hook)", "docstring": "Create a client for a service account.\n\nCreate a client with an account name and a private key.\n\nArgs:\naccount_name: str, the account identifier (probably the account email).\nprivate_key: str, the private key as a string.\nprivate_key_path: str, path to a file with the private key in.\nstorage: oauth2client.client.Storage, a Storage implementation to store\ncredentials.\nstorage_path: str, path to a file storage.\nreadonly: bool, default False, if True only readonly access is requested\nfrom GA.\nhttp_client: httplib2.Http, Override the default http client used.\nga_hook: function, a hook that is called every time a query is made\nagainst GA.", "source": "codesearchnet"}
{"code": "def _create_L_ind(self, L):\n        \n        \n        \n        if issparse(L):\n            L = L.todense()\n\n        L_ind = np.zeros((self.n, self.m * self.k))\n        for y in range(1, self.k + 1):\n            \n            \n            L_ind[:, (y - 1) :: self.k] = np.where(L == y, 1, 0)\n        return L_ind", "docstring": "Convert a label matrix with labels in 0...k to a one-hot format\n\nArgs:\nL: An [n,m] scipy.sparse label matrix with values in {0,1,...,k}\n\nReturns:\nL_ind: An [n,m*k] dense np.ndarray with values in {0,1}\n\nNote that no column is required for 0 (abstain) labels.", "source": "juraj-google-style"}
{"code": "def mangle(tree, toplevel=False):\n    \n    sym_table = SymbolTable()\n    visitor = ScopeTreeVisitor(sym_table)\n    visitor.visit(tree)\n\n    fill_scope_references(tree)\n    mangle_scope_tree(sym_table.globals, toplevel)\n\n    mangler = NameManglerVisitor()\n    mangler.visit(tree)", "docstring": "Mangle names.\n\nArgs:\ntoplevel: defaults to False. Defines if global\nscope should be mangled or not.", "source": "juraj-google-style"}
{"code": "def collect_members(module_to_name):\n  \n  members = {}\n  for module, module_name in module_to_name.items():\n    all_names = getattr(module, \"__all__\", None)\n    for name, member in inspect.getmembers(module):\n      if ((inspect.isfunction(member) or inspect.isclass(member)) and\n          not _always_drop_symbol_re.match(name) and\n          (all_names is None or name in all_names)):\n        fullname = '%s.%s' % (module_name, name)\n        if name in members:\n          other_fullname, other_member = members[name]\n          if member is not other_member:\n            raise RuntimeError(\"Short name collision between %s and %s\" %\n                               (fullname, other_fullname))\n          if len(fullname) == len(other_fullname):\n            raise RuntimeError(\"Can't decide whether to use %s or %s for %s: \"\n                               \"both full names have length %d\" %\n                               (fullname, other_fullname, name, len(fullname)))\n          if len(fullname) > len(other_fullname):\n            continue  \n        members[name] = fullname, member\n  return members", "docstring": "Collect all symbols from a list of modules.\n\nArgs:\nmodule_to_name: Dictionary mapping modules to short names.\n\nReturns:\nDictionary mapping name to (fullname, member) pairs.", "source": "juraj-google-style"}
{"code": "def get_and_check_tasks_for(context, task, msg_prefix=''):\n    tasks_for = task['extra']['tasks_for']\n    if (tasks_for not in context.config['valid_tasks_for']):\n        raise ValueError('{}Unknown tasks_for: {}'.format(msg_prefix, tasks_for))\n    return tasks_for", "docstring": "Given a parent task, return the reason the parent task was spawned.\n\n``.taskcluster.yml`` uses this to know whether to spawn an action,\ncron, or decision task definition.  ``tasks_for`` must be a valid one defined in the context.\n\nArgs:\ntask (dict): the task definition.\nmsg_prefix (str): the string prefix to use for an exception.\n\nRaises:\n(KeyError, ValueError): on failure to find a valid ``tasks_for``.\n\nReturns:\nstr: the ``tasks_for``", "source": "codesearchnet"}
{"code": "def sparse_union_indices_and_values(x1, x2_indices, x2_values=None):\n    zeros2 = tf.SparseTensor(x2_indices, tf.zeros((tf.shape(x2_indices)[0],), x1.values.dtype), x1.dense_shape)\n    x1_for_union = tf.sparse.add(x1, zeros2)\n    if x2_values is not None:\n        x2 = tf.SparseTensor(x2_indices, x2_values, x1.dense_shape)\n        zeros1 = tf.sparse.map_values(tf.zeros_like, x1)\n        x2_for_union = tf.sparse.add(x2, zeros1)\n        return (x1_for_union.indices, x1_for_union.values, x2_for_union.values)\n    else:\n        return (x1_for_union.indices, x1_for_union.values, None)", "docstring": "Compute the indices for the union of the indices of the provided\n`tf.SparseTensor`s and another set of indices and return the modified values\nfor these indices.\n\nArgs:\nx: a `tf.SparseTensor`.\nindices: another set of indices in the `tf.SparseTensor` format.\nReturns: A tuple containing:\n- the indices for the union\n- `x1` values for the union indices (some zeros were added)\n- `x2` values for the union indices (some zeros were added) or `None` if\n`x2_values` was `None`.", "source": "github-repos"}
{"code": "def put_content(self, url, content):\n    cache_path = self._url_to_path(url)\n    try:\n        dir = os.path.dirname(cache_path)\n        os.makedirs(dir)\n    except OSError as e:\n        if (e.errno != errno.EEXIST):\n            raise Error(('Failed to create cache directories for ' % cache_path))\n    try:\n        with open(cache_path, 'wb') as f:\n            f.write(content)\n    except IOError:\n        raise Error(('Failed to cache content as %s for %s' % (cache_path, url)))", "docstring": "Stores the content of a resource into the disk cache.\n\nArgs:\nurl: The url of the resource\ncontent: The content of the resource\n\nRaises:\nCacheError: If the content cannot be put in cache", "source": "codesearchnet"}
{"code": "def get_day_of_month_description(self):\n    expression = self._expression_parts[3]\n    expression = expression.replace('?', '*')\n    if (expression == 'L'):\n        description = _(', on the last day of the month')\n    elif ((expression == 'LW') or (expression == 'WL')):\n        description = _(', on the last weekday of the month')\n    else:\n        regex = re.compile('(\\\\d{1,2}W)|(W\\\\d{1,2})')\n        if regex.match(expression):\n            m = regex.match(expression)\n            day_number = int(m.group().replace('W', ''))\n            day_string = (_('first weekday') if (day_number == 1) else _('weekday nearest day {0}').format(day_number))\n            description = _(', on the {0} of the month').format(day_string)\n        else:\n            description = self.get_segment_description(expression, _(', every day'), (lambda s: s), (lambda s: (_(', every day') if (s == '1') else _(', every {0} days'))), (lambda s: _(', between day {0} and {1} of the month')), (lambda s: _(', on day {0} of the month')))\n    return description", "docstring": "Generates a description for only the DAYOFMONTH portion of the expression\n\nReturns:\nThe DAYOFMONTH description", "source": "codesearchnet"}
{"code": "def _viz_prototype(self, vis_fn):\n\n    def _viz_logger(*args, **kwargs):\n        self.win = vis_fn(*args, win=self.win, env=self.env, opts=self.opts, **kwargs)\n    return _viz_logger", "docstring": "Outputs a function which will log the arguments to Visdom in an appropriate way.\n\nArgs:\nvis_fn: A function, such as self.vis.image", "source": "codesearchnet"}
{"code": "def set_data(self, data={}, datetime_fields=[]):\n    if datetime_fields:\n        for field in datetime_fields:\n            if (field in data):\n                data[field] = self._parse_datetime(data[field])\n    super(CampfireEntity, self).set_data(data)", "docstring": "Set entity data\n\nArgs:\ndata (dict): Entity data\ndatetime_fields (array): Fields that should be parsed as datetimes", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n    \n    self.PutObject = channel.stream_unary(\n        '/pfs.ObjectAPI/PutObject',\n        request_serializer=client_dot_pfs_dot_pfs__pb2.PutObjectRequest.SerializeToString,\n        response_deserializer=client_dot_pfs_dot_pfs__pb2.Object.FromString,\n        )\n    self.PutObjectSplit = channel.stream_unary(\n        '/pfs.ObjectAPI/PutObjectSplit',\n        request_serializer=client_dot_pfs_dot_pfs__pb2.PutObjectRequest.SerializeToString,\n        response_deserializer=client_dot_pfs_dot_pfs__pb2.Objects.FromString,\n        )\n    self.PutObjects = channel.stream_unary(\n        '/pfs.ObjectAPI/PutObjects',\n        request_serializer=client_dot_pfs_dot_pfs__pb2.PutObjectRequest.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n    self.GetObject = channel.unary_stream(\n        '/pfs.ObjectAPI/GetObject',\n        request_serializer=client_dot_pfs_dot_pfs__pb2.Object.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString,\n        )\n    self.GetObjects = channel.unary_stream(\n        '/pfs.ObjectAPI/GetObjects',\n        request_serializer=client_dot_pfs_dot_pfs__pb2.GetObjectsRequest.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString,\n        )\n    self.GetBlocks = channel.unary_stream(\n        '/pfs.ObjectAPI/GetBlocks',\n        request_serializer=client_dot_pfs_dot_pfs__pb2.GetBlocksRequest.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString,\n        )\n    self.TagObject = channel.unary_unary(\n        '/pfs.ObjectAPI/TagObject',\n        request_serializer=client_dot_pfs_dot_pfs__pb2.TagObjectRequest.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n    self.InspectObject = channel.unary_unary(\n        '/pfs.ObjectAPI/InspectObject',\n        request_serializer=client_dot_pfs_dot_pfs__pb2.Object.SerializeToString,\n        response_deserializer=client_dot_pfs_dot_pfs__pb2.ObjectInfo.FromString,\n        )\n    self.CheckObject = channel.unary_unary(\n        '/pfs.ObjectAPI/CheckObject',\n        request_serializer=client_dot_pfs_dot_pfs__pb2.CheckObjectRequest.SerializeToString,\n        response_deserializer=client_dot_pfs_dot_pfs__pb2.CheckObjectResponse.FromString,\n        )\n    self.ListObjects = channel.unary_stream(\n        '/pfs.ObjectAPI/ListObjects',\n        request_serializer=client_dot_pfs_dot_pfs__pb2.ListObjectsRequest.SerializeToString,\n        response_deserializer=client_dot_pfs_dot_pfs__pb2.Object.FromString,\n        )\n    self.DeleteObjects = channel.unary_unary(\n        '/pfs.ObjectAPI/DeleteObjects',\n        request_serializer=client_dot_pfs_dot_pfs__pb2.DeleteObjectsRequest.SerializeToString,\n        response_deserializer=client_dot_pfs_dot_pfs__pb2.DeleteObjectsResponse.FromString,\n        )\n    self.GetTag = channel.unary_stream(\n        '/pfs.ObjectAPI/GetTag',\n        request_serializer=client_dot_pfs_dot_pfs__pb2.Tag.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString,\n        )\n    self.InspectTag = channel.unary_unary(\n        '/pfs.ObjectAPI/InspectTag',\n        request_serializer=client_dot_pfs_dot_pfs__pb2.Tag.SerializeToString,\n        response_deserializer=client_dot_pfs_dot_pfs__pb2.ObjectInfo.FromString,\n        )\n    self.ListTags = channel.unary_stream(\n        '/pfs.ObjectAPI/ListTags',\n        request_serializer=client_dot_pfs_dot_pfs__pb2.ListTagsRequest.SerializeToString,\n        response_deserializer=client_dot_pfs_dot_pfs__pb2.ListTagsResponse.FromString,\n        )\n    self.DeleteTags = channel.unary_unary(\n        '/pfs.ObjectAPI/DeleteTags',\n        request_serializer=client_dot_pfs_dot_pfs__pb2.DeleteTagsRequest.SerializeToString,\n        response_deserializer=client_dot_pfs_dot_pfs__pb2.DeleteTagsResponse.FromString,\n        )\n    self.Compact = channel.unary_unary(\n        '/pfs.ObjectAPI/Compact',\n        request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def delete_weight(self, object_name, weight_name):\n\n    def delete_weight_fn(weights_dict, source_name, target_name=None):\n        if weight_name not in weights_dict[source_name]:\n            raise ValueError(f'Weight {weight_name} not found in object {object_name}. Weights found: {list(weights_dict[source_name].keys())}')\n        weights_dict[source_name].pop(weight_name)\n    self._edit_object(delete_weight_fn, object_name)", "docstring": "Removes a weight from an existing object.\n\nArgs:\nobject_name: String, name or path of the\nobject from which to remove the weight\n(e.g. `\"dense_2\"` or `\"layers/dense_2\"`).\nweight_name: String, name of the weight to\ndelete (e.g. `\"0\"`).", "source": "github-repos"}
{"code": "def get_ext(url):\n    \n\n    parsed = urllib.parse.urlparse(url)\n    root, ext = os.path.splitext(parsed.path)\n    return ext.lstrip('.')", "docstring": "Extract an extension from the url.\n\nArgs:\nurl (str): String representation of a url.\n\nReturns:\nstr: Filename extension from a url (without a dot), '' if extension is not present.", "source": "juraj-google-style"}
{"code": "def load_source(source_file_path):\n    if os.path.isfile(source_file_path):\n        with open(source_file_path, 'rb') as f:\n            source_text = f.read().decode('utf-8')\n        source_lines = source_text.split('\\n')\n    else:\n        source_lines = _try_load_par_source(source_file_path)\n        if source_lines is None:\n            raise IOError('Source path neither exists nor can be loaded as a .par file: %s' % source_file_path)\n    line_num_width = int(np.ceil(np.log10(len(source_lines)))) + 3\n    return (source_lines, line_num_width)", "docstring": "Load the content of a Python source code file.\n\nThis function covers the following case:\n1. source_file_path points to an existing Python (.py) file on the\nfile system.\n2. source_file_path is a path within a .par file (i.e., a zip-compressed,\nself-contained Python executable).\n\nArgs:\nsource_file_path: Path to the Python source file to read.\n\nReturns:\nA length-2 tuple:\n- Lines of the source file, as a `list` of `str`s.\n- The width of the string needed to show the line number in the file.\nThis is calculated based on the number of lines in the source file.\n\nRaises:\nIOError: if loading is unsuccessful.", "source": "github-repos"}
{"code": "def _detect_encoding(data=None):\n    \n    import locale\n    enc_list = ['utf-8', 'latin-1', 'iso8859-1', 'iso8859-2',\n                'utf-16', 'cp720']\n    code = locale.getpreferredencoding(False)\n    if data is None:\n        return code\n    if code.lower() not in enc_list:\n        enc_list.insert(0, code.lower())\n    for c in enc_list:\n        try:\n            for line in data:\n                line.decode(c)\n        except (UnicodeDecodeError, UnicodeError, AttributeError):\n            continue\n        return c\n    print(\"Encoding not detected. Please pass encoding value manually\")", "docstring": "Return the default system encoding. If data is passed, try\nto decode the data with the default system encoding or from a short\nlist of encoding types to test.\n\nArgs:\ndata - list of lists\nReturns:\nenc - system encoding", "source": "juraj-google-style"}
{"code": "def merge_single_qubit_gates_into_phased_x_z(\n        circuit: circuits.Circuit,\n        atol: float = 1e-8) -> None:\n    \n\n    def synth(qubit: ops.Qid, matrix: np.ndarray) -> List[ops.Operation]:\n        out_gates = decompositions.single_qubit_matrix_to_phased_x_z(\n            matrix, atol)\n        return [gate(qubit) for gate in out_gates]\n\n    MergeSingleQubitGates(synthesizer=synth).optimize_circuit(circuit)", "docstring": "Canonicalizes runs of single-qubit rotations in a circuit.\n\nSpecifically, any run of non-parameterized circuits will be replaced by an\noptional PhasedX operation followed by an optional Z operation.\n\nArgs:\ncircuit: The circuit to rewrite. This value is mutated in-place.\natol: Absolute tolerance to angle error. Larger values allow more\nnegligible gates to be dropped, smaller values increase accuracy.", "source": "juraj-google-style"}
{"code": "def _GetMetadataRequest(self, metadata_url, params=None, timeout=None):\n    headers = {'Metadata-Flavor': 'Google'}\n    params = urlparse.urlencode((params or {}))\n    url = ('%s?%s' % (metadata_url, params))\n    request = urlrequest.Request(url, headers=headers)\n    request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({}))\n    timeout = (timeout or self.timeout)\n    return request_opener.open(request, timeout=(timeout * 1.1))", "docstring": "Performs a GET request with the metadata headers.\n\nArgs:\nmetadata_url: string, the URL to perform a GET request on.\nparams: dictionary, the query parameters in the GET request.\ntimeout: int, timeout in seconds for metadata requests.\n\nReturns:\nHTTP response from the GET request.\n\nRaises:\nurlerror.HTTPError: raises when the GET request fails.", "source": "codesearchnet"}
{"code": "def __call__(self, utterances_batch: list, history_batch: list) -> list:\n        \n        return [[True] * len(utterances_batch)] * self.size", "docstring": "Returns skills-utterances application matrix.\n\nGenerates skills-utterances application matrix with all True\nelements.\n\nArgs:\nutterances_batch: A batch of utterances of any type.\nhistory_batch: Not used.\n\nReturns:\nresponse: Skills-utterances application matrix with all True\nelements.", "source": "juraj-google-style"}
{"code": "def batch(self, timelimit=None):\n    from .launcher import BatchLauncher\n    prev_dir = os.path.join(*self.workdir.split(os.path.sep)[:(- 1)])\n    prev_dir = os.path.join(os.path.sep, prev_dir)\n    workdir = os.path.join(prev_dir, (os.path.basename(self.workdir) + '_batch'))\n    return BatchLauncher(workdir=workdir, flows=self).submit(timelimit=timelimit)", "docstring": "Run the flow in batch mode, return exit status of the job script.\nRequires a manager.yml file and a batch_adapter adapter.\n\nArgs:\ntimelimit: Time limit (int with seconds or string with time given with the slurm convention:\n\"days-hours:minutes:seconds\"). If timelimit is None, the default value specified in the\n`batch_adapter` entry of `manager.yml` is used.", "source": "codesearchnet"}
{"code": "def set_value(self, value: ScalarType) -> None:\n        \n        if isinstance(value, bool):\n            value_str = 'true' if value else 'false'\n        else:\n            value_str = str(value)\n        start_mark = self.yaml_node.start_mark\n        end_mark = self.yaml_node.end_mark\n        \n        \n        \n        tag = self.yaml_node.tag\n        if tag.startswith('tag:yaml.org,2002:'):\n            tag = scalar_type_to_tag[type(value)]\n        new_node = yaml.ScalarNode(tag, value_str, start_mark, end_mark)\n        self.yaml_node = new_node", "docstring": "Sets the value of the node to a scalar value.\n\nAfter this, is_scalar(type(value)) will return true.\n\nArgs:\nvalue: The value to set this node to, a str, int, float, \\\nbool, or None.", "source": "juraj-google-style"}
{"code": "def query(self, query):\n    path = self.path(query.key)\n    if os.path.exists(path):\n        filenames = os.listdir(path)\n        filenames = list((set(filenames) - set(self.ignore_list)))\n        filenames = map((lambda f: os.path.join(path, f)), filenames)\n        iterable = self._read_object_gen(filenames)\n    else:\n        iterable = list()\n    return query(iterable)", "docstring": "Returns an iterable of objects matching criteria expressed in `query`\nFSDatastore.query queries all the `.obj` files within the directory\nspecified by the query.key.\n\nArgs:\nquery: Query object describing the objects to return.\n\nRaturns:\nCursor with all objects matching criteria", "source": "codesearchnet"}
{"code": "def parse(file_or_string):\n    from mysqlparse.grammar.sql_file import sql_file_syntax\n    if (hasattr(file_or_string, 'read') and hasattr(file_or_string.read, '__call__')):\n        return sql_file_syntax.parseString(file_or_string.read())\n    elif isinstance(file_or_string, six.string_types):\n        return sql_file_syntax.parseString(file_or_string)\n    else:\n        raise TypeError(\"Expected file-like or string object, but got '{type_name}' instead.\".format(type_name=type(file_or_string).__name__))", "docstring": "Parse a file-like object or string.\n\nArgs:\nfile_or_string (file, str): File-like object or string.\n\nReturns:\nParseResults: instance of pyparsing parse results.", "source": "codesearchnet"}
{"code": "def testGradient(self, params, indices, expected_out, out_grad, expected_grad, params_ragged_rank=None):\n    if context.executing_eagerly():\n        return\n    params = ragged_factory_ops.constant(params, dtype=dtypes.float32, ragged_rank=params_ragged_rank)\n    indices = constant_op.constant(indices, dtype=dtypes.int32)\n    out_ragged_rank = params.ragged_rank + indices.shape.ndims - 1\n    out_grad = ragged_factory_ops.constant(out_grad, dtype=dtypes.float32, ragged_rank=out_ragged_rank)\n    expected_out = ragged_factory_ops.constant(expected_out, dtype=dtypes.float32, ragged_rank=out_ragged_rank)\n    expected_grad = ragged_factory_ops.constant(expected_grad, dtype=dtypes.float32, ragged_rank=params.ragged_rank)\n    out = ragged_gather_ops.gather(params, indices)\n    self.assertAllClose(out, expected_out)\n    grads = gradients_impl.gradients(out.flat_values, params.nested_row_splits + (params.flat_values, indices), out_grad.flat_values)\n    param_nested_splits_grads = grads[:-2]\n    params_flat_values_grad = grads[-2]\n    indices_grad = grads[-1]\n    self.assertEqual(indices_grad, None)\n    for splits_grad in param_nested_splits_grads:\n        self.assertEqual(splits_grad, None)\n    self.assertIsInstance(params_flat_values_grad, indexed_slices.IndexedSlices)\n    params_flat_values_grad = ops.convert_to_tensor(params_flat_values_grad)\n    params_grad = params.with_flat_values(params_flat_values_grad)\n    self.assertAllClose(params_grad, expected_grad, atol=2e-06, rtol=2e-06)", "docstring": "Tests that ragged_gather generates the right gradient.\n\nArgs:\nparams: The `params` that should be passed to `gather`.\nindices: The `indices` that should be passed to `gather`.\nexpected_out: The expected value of `gather(params, indices)`.\n`expected_out.shape = indices.shape + params.shape[1:]`.\nout_grad: The value that should be fed in as the gradient for `out`\nwhen testing the gradient of `ragged_gather`.  Must have the same\nshape as `expected_out`.\nexpected_grad: The expected gradient for that should be returned for\n`params`.  Must have hte same shape as `params`.\nparams_ragged_rank: The ragged_rank of `params`.", "source": "github-repos"}
{"code": "def put(self, resource):\n    endpoint = self.endpoint\n    if resource.id:\n        endpoint = self._build_url(endpoint, resource.id)\n    response = self.api.execute('PUT', endpoint, json=resource.as_dict())\n    if (not response.ok):\n        raise Error.parse(response.json())\n    return self._cls.parse(response.json())", "docstring": "Edits an existing resource\n\nArgs:\nresource - gophish.models.Model - The resource instance", "source": "codesearchnet"}
{"code": "def char(self, c: str) -> None:\n    if (self.peek() == c):\n        self.offset += 1\n    else:\n        raise UnexpectedInput(self, f\"char '{c}'\")", "docstring": "Parse the specified character.\n\nArgs:\nc: One-character string.\n\nRaises:\nEndOfInput: If past the end of `self.input`.\nUnexpectedInput: If the next character is different from `c`.", "source": "codesearchnet"}
{"code": "def SendTracebacks(self, request, context):\n    return debug_service_pb2.EventReply()", "docstring": "Base implementation of the handling of SendTracebacks calls.\n\nThe base implementation does nothing with the incoming request.\nOverride in an implementation of the server if necessary.\n\nArgs:\nrequest: A `CallTraceback` proto, containing information about the\ntype (e.g., graph vs. eager execution) and source-code traceback of the\ncall and (any) associated `tf.Graph`s.\ncontext: Server context.\n\nReturns:\nA `EventReply` proto.", "source": "github-repos"}
{"code": "def __init__(self, resource_handle, create_op, name):\n    stamp_token, serialized = gen_boosted_trees_ops.boosted_trees_serialize_ensemble(resource_handle)\n    slice_spec = ''\n    specs = [saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec, name + '_stamp'), saver.BaseSaverBuilder.SaveSpec(serialized, slice_spec, name + '_serialized')]\n    super(_TreeEnsembleSavable, self).__init__(resource_handle, specs, name)\n    self.resource_handle = resource_handle\n    self._create_op = create_op", "docstring": "Creates a _TreeEnsembleSavable object.\n\nArgs:\nresource_handle: handle to the decision tree ensemble variable.\ncreate_op: the op to initialize the variable.\nname: the name to save the tree ensemble variable under.", "source": "github-repos"}
{"code": "def create_linear(num_finite_buckets, width, offset):\n    if (num_finite_buckets <= 0):\n        raise ValueError(_BAD_NUM_FINITE_BUCKETS)\n    if (width <= 0.0):\n        raise ValueError((_BAD_FLOAT_ARG % (u'width', 0.0)))\n    return sc_messages.Distribution(bucketCounts=([0] * (num_finite_buckets + 2)), linearBuckets=sc_messages.LinearBuckets(numFiniteBuckets=num_finite_buckets, width=width, offset=offset))", "docstring": "Creates a new instance of distribution with linear buckets.\n\nArgs:\nnum_finite_buckets (int): initializes number of finite buckets\nwidth (float): initializes the width of each bucket\noffset (float): initializes the offset\n\nReturn:\n:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`\n\nRaises:\nValueError: if the args are invalid for creating an instance", "source": "codesearchnet"}
{"code": "def remove_metric(self, metric_name):\n    with self._lock:\n        metric = self._metrics.pop(metric_name, None)\n        if metric:\n            for reporter in self._reporters:\n                reporter.metric_removal(metric)\n        return metric", "docstring": "Remove a metric if it exists and return it. Return None otherwise.\nIf a metric is removed, `metric_removal` will be invoked\nfor each reporter.\n\nArguments:\nmetric_name (MetricName): The name of the metric\n\nReturns:\nKafkaMetric: the removed `KafkaMetric` or None if no such\nmetric exists", "source": "codesearchnet"}
{"code": "def show_help(bokehjs_action):\n    \n    print()\n    if bokehjs_action in ['built', 'installed']:\n        print(\"Bokeh-specific options available with 'install' or 'develop':\")\n        print()\n        print(\"  --build-js          build and install a fresh BokehJS\")\n        print(\"  --install-js        install only last previously built BokehJS\")\n    else:\n        print(\"Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'\")\n        print()\n        print(\"No extra Bokeh-specific options are available.\")\n    print()", "docstring": "Print information about extra Bokeh-specific command line options.\n\nArgs:\nbokehjs_action (str) : one of 'built', 'installed', or 'packaged'\nhow (or if) BokehJS was installed into the python source tree\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "async def get_active(self, *args, **kwargs):\n    coinids = kwargs.get('coinids')\n    uid = kwargs.get('uid', 0)\n    address = kwargs.get('address')\n    try:\n        coinid = coinid.replace('TEST', '')\n    except:\n        pass\n    try:\n        uid = int(uid)\n    except:\n        return (await self.error_400('User id must be integer. '))\n    if ((not uid) and address):\n        uid = (await self.get_uid_by_address(address=address, coinid=coinid))\n        if isinstance(uid, dict):\n            return uid\n    if (not all([coinids, uid])):\n        return (await self.error_400('Get active. Missed required fields.'))\n    if isinstance(coinids, list):\n        actives = {}\n        for coinid in coinids:\n            database = self.client[self.collection]\n            collection = database[coinid]\n            balance = (await collection.find_one({'uid': uid}))\n            if (not balance):\n                return (await self.error_404(('Get active. Balance with uid:%s and type:%s not found' % (uid, coinid))))\n            actives[coinid] = int(balance['amount_active'])\n    if isinstance(coinids, str):\n        actives = {}\n        for coinid in self.coinids:\n            database = self.client[coinid]\n            collection = database[self.collection]\n            balance = (await collection.find_one({'uid': uid}))\n            if (not balance):\n                return (await self.error_404(('Get active. Balance with uid:%s and type:%s not found' % (uid, coinid))))\n            actives[coinid] = int(balance['amount_active'])\n    return actives", "docstring": "Get active users balance\n\nAccepts:\n- uid [integer] (users id)\n- types [list | string] (array with needed types or \"all\")\n\nReturns:\n{\ntype [string] (blockchain type): amount\n}", "source": "codesearchnet"}
{"code": "def Get(self, request, global_params=None):\n    config = self.GetMethodConfig('Get')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Gets information about a snapshot.\n\nArgs:\nrequest: (DataflowProjectsLocationsSnapshotsGetRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Snapshot) The response message.", "source": "github-repos"}
{"code": "def score_one(self, x: beam.Row) -> Optional[float]:\n    if len(x.__dict__) != 1:\n        raise ValueError('RobustZScore.score_one expected univariate input, but got %s', str(x))\n    v = next(iter(x))\n    if v is None or math.isnan(v):\n        return None\n    median = self._mad_tracker.get_median()\n    mad = self._mad_tracker.get()\n    if math.isnan(mad) or math.isnan(median):\n        return float('NaN')\n    if abs(mad) < EPSILON:\n        return 0.0\n    return abs(RobustZScore.SCALE_FACTOR * (v - median) / mad)", "docstring": "Scores a data point using the Robust Z-Score.\n\nArgs:\nx: A `beam.Row` containing a single numerical value.\n\nReturns:\nfloat | None: The Robust Z-Score.", "source": "github-repos"}
{"code": "def find_file(search_dir, file_pattern):\n    for (root, dirnames, fnames) in os.walk(search_dir):\n        for fname in fnames:\n            if fnmatch.fnmatch(fname, file_pattern):\n                return os.path.join(root, fname)\n    return ''", "docstring": "Search for a file in a directory, and return the first match.\nIf the file is not found return an empty string\n\nArgs:\nsearch_dir: The root directory to search in\nfile_pattern: A unix-style wildcard pattern representing\nthe file to find\n\nReturns:\nThe path to the file if it was found, otherwise an empty string", "source": "codesearchnet"}
{"code": "def create_identical_dataset_and_algorithm_tuner(parent, additional_parents=None, sagemaker_session=None):\n    parent_tuner = HyperparameterTuner.attach(tuning_job_name=parent, sagemaker_session=sagemaker_session)\n    return parent_tuner.identical_dataset_and_algorithm_tuner(additional_parents=additional_parents)", "docstring": "Creates a new tuner by copying the request fields from the provided parent to the new instance of\n``HyperparameterTuner`` followed by addition of warm start configuration with the type as\n\"IdenticalDataAndAlgorithm\" and ``parents`` as the union of provided list of ``additional_parents`` and the\n``parent``.\n\nArgs:\nparent (str): Primary parent tuning job's name from which the Tuner and Estimator configuration has to be copied\nadditional_parents (set{str}): Set of additional parent tuning job's names along with the primary parent tuning\njob name to be used in warm starting the transfer learning tuner.\nsagemaker_session (sagemaker.session.Session): Session object which manages interactions with\nAmazon SageMaker APIs and any other AWS services needed. If not specified, one is created\nusing the default AWS configuration chain.\n\nReturns:\nsagemaker.tuner.HyperparameterTuner: a new ``HyperparameterTuner`` object for the warm-started\nhyperparameter tuning job", "source": "codesearchnet"}
{"code": "def send_email(recipients, subject, message, attachments=None):\n    \n    if not attachments:\n        attachments = []\n\n    if os.path.exists(EMAIL_SETTINGS_FILE):\n        email_settings = json.load(open(EMAIL_SETTINGS_FILE))\n        sender = email_settings.get('sender', 'ambry@localhost')\n        use_tls = email_settings.get('use_tls')\n        username = email_settings['username']\n        password = email_settings['password']\n        server = email_settings['server']\n    else:\n        \n        server = 'localhost'\n        username = None\n        password = None\n        sender = 'ambry@localhost'\n\n    \n    msg = MIMEMultipart()\n    msg['Subject'] = subject\n    msg['From'] = sender\n    msg['To'] = ','.join(recipients)\n    msg.attach(MIMEText(message, 'plain'))\n\n    \n    for file_name in attachments:\n        if os.path.exists(file_name):\n            with open(file_name, 'r') as fp:\n                attachment = MIMEBase('application', 'text')\n                attachment.set_payload(fp.read())\n                attachment.add_header(\n                    'Content-Disposition',\n                    'attachment; filename=\"{}\"'.format(os.path.basename(file_name)))\n                msg.attach(attachment)\n\n    \n    srv = smtplib.SMTP(server)\n    if use_tls:\n        srv.starttls()\n    if username:\n        srv.login(username, password)\n\n    srv.sendmail(sender, ','.join(recipients), msg.as_string())\n    srv.quit()", "docstring": "Sends email.\nArgs:\nrecipients (list of str):\nsubject (str):\nmessage (str):\nattachments (list of str): list containing full paths (txt files only) to attach to email.", "source": "juraj-google-style"}
{"code": "def run(self, module, post_check):\n    try:\n        _cwd = os.getcwd()\n        _sys_path = list(sys.path)\n        _sys_argv = list(sys.argv)\n        sys.path.insert(0, os.path.dirname(self._path))\n        sys.argv = ([os.path.basename(self._path)] + self._argv)\n        exec(self._code, module.__dict__)\n        post_check()\n    except Exception as e:\n        self._failed = True\n        self._error_detail = traceback.format_exc()\n        (_exc_type, _exc_value, exc_traceback) = sys.exc_info()\n        (filename, line_number, func, txt) = traceback.extract_tb(exc_traceback)[(- 1)]\n        self._error = ('%s\\nFile \"%s\", line %d, in %s:\\n%s' % (str(e), os.path.basename(filename), line_number, func, txt))\n    finally:\n        os.chdir(_cwd)\n        sys.path = _sys_path\n        sys.argv = _sys_argv\n        self.ran = True", "docstring": "Execute the configured source code in a module and run any post\nchecks.\n\nArgs:\nmodule (Module) : a module to execute the configured code in.\n\npost_check(callable) : a function that can raise an exception\nif expected post-conditions are not met after code execution.", "source": "codesearchnet"}
{"code": "def from_composition_and_pd(comp, pd, working_ion_symbol='Li'):\n    working_ion = Element(working_ion_symbol)\n    entry = None\n    working_ion_entry = None\n    for e in pd.stable_entries:\n        if (e.composition.reduced_formula == comp.reduced_formula):\n            entry = e\n        elif (e.is_element and (e.composition.reduced_formula == working_ion_symbol)):\n            working_ion_entry = e\n    if (not entry):\n        raise ValueError('Not stable compound found at composition {}.'.format(comp))\n    profile = pd.get_element_profile(working_ion, comp)\n    profile.reverse()\n    if (len(profile) < 2):\n        return None\n    working_ion_entry = working_ion_entry\n    working_ion = working_ion_entry.composition.elements[0].symbol\n    normalization_els = {}\n    for (el, amt) in comp.items():\n        if (el != Element(working_ion)):\n            normalization_els[el] = amt\n    vpairs = [ConversionVoltagePair.from_steps(profile[i], profile[(i + 1)], normalization_els) for i in range((len(profile) - 1))]\n    return ConversionElectrode(vpairs, working_ion_entry, comp)", "docstring": "Convenience constructor to make a ConversionElectrode from a\ncomposition and a phase diagram.\n\nArgs:\ncomp:\nStarting composition for ConversionElectrode, e.g.,\nComposition(\"FeF3\")\npd:\nA PhaseDiagram of the relevant system (e.g., Li-Fe-F)\nworking_ion_symbol:\nElement symbol of working ion. Defaults to Li.", "source": "codesearchnet"}
{"code": "def get_service_details(self, service_id: str) -> dict:\n        \n        \n        if not self._manager:\n            raise RuntimeError('Only the Swarm manager node can retrieve all'\n                               ' the services details.')\n\n        service = self._client.services.get(service_id)\n        return service.attrs", "docstring": "Get details of a service.\n\nOnly the manager nodes can retrieve service details\n\nArgs:\nservice_id (string): List of service id\n\nReturns:\ndict, details of the service", "source": "juraj-google-style"}
{"code": "def _get_upload_session_status(res):\n        \n        response = json.loads(res.body.decode())\n        if 'sessionStatus' not in response:\n            try:\n                info = (\n                    response['errorMessage']['additionalInfo']\n                    ['uploader_service.GoogleRupioAdditionalInfo']\n                    ['completionInfo']['customerSpecificInfo']\n                )\n                reason = '{} : {}'.format(info['status'], info['message'])\n            except KeyError:\n                reason = 'unknown reason'\n            raise exceptions.NetworkError('image upload failed: {}'.format(\n                reason\n            ))\n        return response['sessionStatus']", "docstring": "Parse the image upload response to obtain status.\n\nArgs:\nres: http_utils.FetchResponse instance, the upload response\n\nReturns:\ndict, sessionStatus of the response\n\nRaises:\nhangups.NetworkError: If the upload request failed.", "source": "juraj-google-style"}
{"code": "def configure_plugin(self, name, options):\n    url = self._url('/plugins/{0}/set', name)\n    data = options\n    if isinstance(data, dict):\n        data = ['{0}={1}'.format(k, v) for (k, v) in six.iteritems(data)]\n    res = self._post_json(url, data=data)\n    self._raise_for_status(res)\n    return True", "docstring": "Configure a plugin.\n\nArgs:\nname (string): The name of the plugin. The ``:latest`` tag is\noptional, and is the default if omitted.\noptions (dict): A key-value mapping of options\n\nReturns:\n``True`` if successful", "source": "codesearchnet"}
{"code": "def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool=False, **kwargs):\n    use_auth_token = kwargs.pop('use_auth_token', None)\n    if use_auth_token is not None:\n        warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)\n        if kwargs.get('token', None) is not None:\n            raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')\n        kwargs['token'] = use_auth_token\n    if os.path.isfile(save_directory):\n        raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file')\n    os.makedirs(save_directory, exist_ok=True)\n    if push_to_hub:\n        commit_message = kwargs.pop('commit_message', None)\n        repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1])\n        repo_id = self._create_repo(repo_id, **kwargs)\n        files_timestamps = self._get_files_timestamps(save_directory)\n    if self._auto_class is not None:\n        custom_object_save(self, save_directory, config=self)\n    output_video_processor_file = os.path.join(save_directory, VIDEO_PROCESSOR_NAME)\n    self.to_json_file(output_video_processor_file)\n    logger.info(f'Video processor saved in {output_video_processor_file}')\n    if push_to_hub:\n        self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get('token'))\n    return [output_video_processor_file]", "docstring": "Save an video processor object to the directory `save_directory`, so that it can be re-loaded using the\n[`~video_processing_utils.VideoProcessorBase.from_pretrained`] class method.\n\nArgs:\nsave_directory (`str` or `os.PathLike`):\nDirectory where the video processor JSON file will be saved (will be created if it does not exist).\npush_to_hub (`bool`, *optional*, defaults to `False`):\nWhether or not to push your model to the Hugging Face model hub after saving it. You can specify the\nrepository you want to push to with `repo_id` (will default to the name of `save_directory` in your\nnamespace).\nkwargs (`Dict[str, Any]`, *optional*):\nAdditional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.", "source": "github-repos"}
{"code": "def poke_native(getstate):\n    \n    def poke(service, objname, obj, container, visited=None, _stack=None):\n        service.pokeNative(objname, getstate(obj), container)\n    return poke", "docstring": "Serializer factory for types which state can be natively serialized.\n\nArguments:\n\ngetstate (callable): takes an object and returns the object's state\nto be passed to `pokeNative`.\n\nReturns:\n\ncallable: serializer (`poke` routine).", "source": "juraj-google-style"}
{"code": "def memory_read64(self, addr, num_long_words):\n    buf_size = num_long_words\n    buf = (ctypes.c_ulonglong * buf_size)()\n    units_read = self._dll.JLINKARM_ReadMemU64(addr, buf_size, buf, 0)\n    if (units_read < 0):\n        raise errors.JLinkException(units_read)\n    return buf[:units_read]", "docstring": "Reads memory from the target system in units of 64-bits.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): start address to read from\nnum_long_words (int): number of long words to read\n\nReturns:\nList of long words read from the target system.\n\nRaises:\nJLinkException: if memory could not be read", "source": "codesearchnet"}
{"code": "def _register_info(self, server):\n    \n    server_url = urllib.parse.urlparse(server.get_url())\n    info = manager.TensorBoardInfo(\n        version=version.VERSION,\n        start_time=int(time.time()),\n        port=server_url.port,\n        pid=os.getpid(),\n        path_prefix=self.flags.path_prefix,\n        logdir=self.flags.logdir,\n        db=self.flags.db,\n        cache_key=self.cache_key,\n    )\n    atexit.register(manager.remove_info_file)\n    manager.write_info_file(info)", "docstring": "Write a TensorBoardInfo file and arrange for its cleanup.\n\nArgs:\nserver: The result of `self._make_server()`.", "source": "juraj-google-style"}
{"code": "def create(self, port, value, timestamp=None):\n    session = self._session\n    datapoint_class = self._datapoint_class\n    attributes = {'port': port, 'value': value}\n    if (timestamp is not None):\n        attributes['timestamp'] = to_iso_date(timestamp)\n    attributes = build_request_body('data-point', None, attributes=attributes)\n\n    def _process(json):\n        data = json.get('data')\n        return datapoint_class(data, session)\n    return session.post(self._base_url, CB.json(201, _process), json=attributes)", "docstring": "Post a new reading to a timeseries.\n\nA reading is comprised of a `port`, a `value` and a timestamp.\n\nA port is like a tag for the given reading and gives an\nindication of the meaning of the value.\n\nThe value of the reading can be any valid json value.\n\nThe timestamp is considered the time the reading was taken, as\nopposed to the `created` time of the data-point which\nrepresents when the data-point was stored in the Helium\nAPI. If the timestamp is not given the server will construct a\ntimestemp upon receiving the new reading.\n\nArgs:\n\nport(string): The port to use for the new data-point\nvalue: The value for the new data-point\n\nKeyword Args:\n\ntimestamp(:class:`datetime`): An optional :class:`datetime` object", "source": "codesearchnet"}
{"code": "def add_line_if_absent(filename: str, line: str) -> None:\n    \n    assert \"\\n\" not in line\n    if not is_line_in_file(filename, line):\n        log.info(\"Appending line {!r} to file {!r}\", line, filename)\n        with open(filename, \"a\") as file:\n            file.writelines([line])", "docstring": "Adds a line (at the end) if it's not already in the file somewhere.\n\nArgs:\nfilename: filename to modify (in place)\nline: line to append (which must not have a newline in)", "source": "juraj-google-style"}
{"code": "def update_query_parameters(url, query_parameters):\n    \n    scheme, netloc, path, query_string, fragment = urlsplit(url)\n    url_params = parse_qs(query_string)\n\n    \n    url_params.update(query_parameters)\n\n    return urlunsplit(\n        (scheme, netloc, path, urlencode(sorted(url_params.items()), doseq=True), fragment),\n    )", "docstring": "Return url with updated query parameters.\n\nArguments:\nurl (str): Original url whose query parameters need to be updated.\nquery_parameters (dict): A dictionary containing query parameters to be added to course selection url.\n\nReturns:\n(slug): slug identifier for the identity provider that can be used for identity verification of\nusers associated the enterprise customer of the given user.", "source": "juraj-google-style"}
{"code": "def _invalid_string_quote(self, quote, row, correct_quote=None, col=None):\n    if (not correct_quote):\n        correct_quote = SMART_QUOTE_OPTS.get(self.config.string_quote)\n    self.add_message('invalid-string-quote', line=row, args=(quote, correct_quote), **self.get_offset(col))", "docstring": "Add a message for an invalid string literal quote.\n\nArgs:\nquote: The quote characters that were found.\nrow: The row number the quote character was found on.\ncorrect_quote: The quote characters that is required. If None\n(default), will use the one from the config.\ncol: The column the quote characters were found on.", "source": "codesearchnet"}
{"code": "def _to_camel_case(string):\n        \n        components = string.split('_')\n        return '%s%s' % (\n            components[0],\n            ''.join(c.title() for c in components[1:]),\n        )", "docstring": "Return a camel cased version of the input string.\n\nArgs:\nstring (str): A snake cased string.\n\nReturns:\nstr: A camel cased string.", "source": "juraj-google-style"}
{"code": "def StartFlowAndWait(client_id,\n                     token=None,\n                     timeout=DEFAULT_TIMEOUT,\n                     **flow_args):\n  \n  flow_urn = flow.StartAFF4Flow(\n      client_id=client_id, token=token, sync=True, **flow_args)\n\n  WaitForFlow(flow_urn, token=token, timeout=timeout)\n\n  return flow_urn", "docstring": "Runs a flow and waits for it to finish.\n\nArgs:\nclient_id: The client id of the client to run on.\ntoken: The datastore access token.\ntimeout: How long to wait for a flow to complete, maximum.\n**flow_args: Pass through to flow.\n\nReturns:\nThe urn of the flow that was run.", "source": "juraj-google-style"}
{"code": "def _save_metadata(self):\n    with open(self.paths.metadata(), 'w') as metadata_fd:\n        utils.json_dump(self.metadata, metadata_fd)", "docstring": "Write this prefix metadata to disk\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def input_on_stderr(prompt='', default=None, convert=None):\n    \n\n    print(prompt, end='', file=sys.stderr)\n    value = builtins.input()\n    return _convert(value, default, convert)", "docstring": "Output a string to stderr and wait for input.\n\nArgs:\nprompt (str): the message to display.\ndefault: the default value to return if the user\nleaves the field empty\nconvert (callable): a callable to be used to convert\nthe value the user inserted. If None, the type of\n``default`` will be used.", "source": "juraj-google-style"}
{"code": "def _handle_request_error(self, orig_request, error, start_response):\n    headers = [('Content-Type', 'application/json')]\n    status_code = error.status_code()\n    body = error.rest_error()\n    response_status = ('%d %s' % (status_code, httplib.responses.get(status_code, 'Unknown Error')))\n    cors_handler = self._create_cors_handler(orig_request)\n    return util.send_wsgi_response(response_status, headers, body, start_response, cors_handler=cors_handler)", "docstring": "Handle a request error, converting it to a WSGI response.\n\nArgs:\norig_request: An ApiRequest, the original request from the user.\nerror: A RequestError containing information about the error.\nstart_response: A function with semantics defined in PEP-333.\n\nReturns:\nA string containing the response body.", "source": "codesearchnet"}
{"code": "def __new__(cls, strain_matrix):\n        \n        vscale = np.ones((6,))\n        vscale[3:] *= 2\n        obj = super().__new__(cls, strain_matrix, vscale=vscale)\n        if not obj.is_symmetric():\n            raise ValueError(\"Strain objects must be initialized \"\n                             \"with a symmetric array or a voigt-notation \"\n                             \"vector with six entries.\")\n        return obj.view(cls)", "docstring": "Create a Strain object.  Note that the constructor uses __new__\nrather than __init__ according to the standard method of\nsubclassing numpy ndarrays.  Note also that the default constructor\ndoes not include the deformation gradient\n\nArgs:\nstrain_matrix (3x3 array-like): the 3x3 array-like\nrepresenting the Green-Lagrange strain", "source": "juraj-google-style"}
{"code": "def _symmetric_projection(self, n):\n    q = self._orthogonal_matrix(n)\n    mask = math_ops.cast(random_ops.random_normal([n], seed=self.seed) > 0, self.dtype)\n    if self.seed:\n        self.seed += 1\n    c = math_ops.multiply(q, mask)\n    return math_ops.matmul(c, array_ops.matrix_transpose(c))", "docstring": "Compute a n x n symmetric projection matrix.\n\nArgs:\nn: Dimension.\n\nReturns:\nA n x n symmetric projection matrix, i.e. a matrix P s.t. P=P*P, P=P^T.", "source": "github-repos"}
{"code": "def plot_lattice_vectors(lattice, ax=None, **kwargs):\n    \n    ax, fig, plt = get_ax3d_fig_plt(ax)\n\n    if \"color\" not in kwargs:\n        kwargs[\"color\"] = \"g\"\n    if \"linewidth\" not in kwargs:\n        kwargs[\"linewidth\"] = 3\n\n    vertex1 = lattice.get_cartesian_coords([0.0, 0.0, 0.0])\n    vertex2 = lattice.get_cartesian_coords([1.0, 0.0, 0.0])\n    ax.plot(*zip(vertex1, vertex2), **kwargs)\n    vertex2 = lattice.get_cartesian_coords([0.0, 1.0, 0.0])\n    ax.plot(*zip(vertex1, vertex2), **kwargs)\n    vertex2 = lattice.get_cartesian_coords([0.0, 0.0, 1.0])\n    ax.plot(*zip(vertex1, vertex2), **kwargs)\n\n    return fig, ax", "docstring": "Adds the basis vectors of the lattice provided to a matplotlib Axes\n\nArgs:\nlattice: Lattice object\nax: matplotlib :class:`Axes` or None if a new figure should be created.\nkwargs: kwargs passed to the matplotlib function 'plot'. Color defaults to green\nand linewidth to 3.\n\nReturns:\nmatplotlib figure and matplotlib ax", "source": "juraj-google-style"}
{"code": "def convert_response(allocate_quota_response, project_id):\n    if ((not allocate_quota_response) or (not allocate_quota_response.allocateErrors)):\n        return _IS_OK\n    theError = allocate_quota_response.allocateErrors[0]\n    error_tuple = _QUOTA_ERROR_CONVERSION.get(theError.code, _IS_UNKNOWN)\n    if (error_tuple[1].find(u'{') == (- 1)):\n        return error_tuple\n    updated_msg = error_tuple[1].format(project_id=project_id, detail=(theError.description or u''))\n    return (error_tuple[0], updated_msg)", "docstring": "Computes a http status code and message `AllocateQuotaResponse`\n\nThe return value a tuple (code, message) where\n\ncode: is the http status code\nmessage: is the message to return\n\nArgs:\nallocate_quota_response (:class:`endpoints_management.gen.servicecontrol_v1_messages.AllocateQuotaResponse`):\nthe response from calling an api\n\nReturns:\ntuple(code, message)", "source": "codesearchnet"}
{"code": "def cache_url_data(self, url, data, attempt=0):\n        \n        \n        if attempt > 5:\n            raise ValueError('too many attempts at writing to the cache')\n        \n        key = self.get_key_from_url(url)\n        \n        \n        if key == \"info.rest\":\n            return\n        \n        current_date = datetime.strftime(self.today, \"%Y-%m-%d\")\n        \n        \n        if IS_PYTHON3:\n            data = data.encode(\"utf-8\")\n        \n        compressed = zlib.compress(data)\n        \n        \n        \n        if IS_PYTHON2:\n            compressed = buffer(compressed)\n        \n        t = (key, self.genome_build, current_date, self.api_version, compressed)\n        \n        cmd = \"INSERT OR REPLACE INTO ensembl \" \\\n            \"(key, genome_build, cache_date, api_version, data) VALUES (?,?,?,?,?)\"\n        try:\n            with self.conn as cursor:\n                cursor.execute(cmd, t)\n        except sqlite3.OperationalError:\n            \n            \n            time.sleep(random.uniform(1, 10))\n            self.cache_url_data(url, data.decode('utf-8'), attempt + 1)", "docstring": "cache the data retrieved from ensembl\n\nArgs:\nurl: URL for the Ensembl REST service\ndata: response data from Ensembl", "source": "juraj-google-style"}
{"code": "def add(self, other_op):\n        \n        self._op.logEntries.extend(other_op.logEntries)\n        self._merge_timestamps(other_op)\n        self._merge_metric_values(other_op)", "docstring": "Combines `other_op` with the operation held by this aggregator.\n\nN.B. It merges the operations log entries and metric values, but makes\nthe assumption the operation is consistent.  It's the callers\nresponsibility to ensure consistency\n\nArgs:\nother_op (\nclass:`endpoints_management.gen.servicecontrol_v1_messages.Operation`):\nan operation merge into this one", "source": "juraj-google-style"}
{"code": "def remove_snippet_client(self, name):\n        \n        if name not in self._snippet_clients:\n            raise Error(self._device, MISSING_SNIPPET_CLIENT_MSG % name)\n        client = self._snippet_clients.pop(name)\n        client.stop_app()", "docstring": "Removes a snippet client from management.\n\nArgs:\nname: string, the name of the snippet client to remove.\n\nRaises:\nError: if no snippet client is managed under the specified name.", "source": "juraj-google-style"}
{"code": "def finalize_options(self):\n        \n        self.cwd = os.path.abspath(os.path.dirname(__file__))\n        self.features_dir = os.path.join(self.cwd, 'tests', 'functional', 'features')\n        self.firmware_dirs = []\n\n        root = os.path.join(self.cwd, 'tests', 'functional', 'firmware')\n        for f in os.listdir(root):\n            fullpath = os.path.join(root, f)\n            if os.path.isdir(fullpath):\n                self.firmware_dirs.append(fullpath)", "docstring": "Populate the attributes.\n\nArgs:\nself (BDDTestCommand): the ``BDDTestCommand`` instance\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def check_for_session(self, status=None):\n        \n        status = Status.LAST if status is None else status\n        return os.path.isfile(self.get_restore_path(status)) and os.path.getsize(self.get_restore_path(status)) > 0", "docstring": "check_for_session: see if session is in progress\nArgs:\nstatus (str): step to check if last session reached (optional)\nReturns: boolean indicating if session exists", "source": "juraj-google-style"}
{"code": "def update_user(self, user_obj):\n        \n        LOG.info(\"Updating user %s\", user_obj['_id'])\n        updated_user = self.user_collection.find_one_and_replace(\n            {'_id': user_obj['_id']},\n            user_obj,\n            return_document=pymongo.ReturnDocument.AFTER\n        )\n        return updated_user", "docstring": "Update an existing user.\n\n\nArgs:\nuser_obj(dict)\n\nReturns:\nupdated_user(dict)", "source": "juraj-google-style"}
{"code": "def is_generator_function(obj):\n  \n  CO_GENERATOR = 0x20\n  return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and\n               obj.func_code.co_flags & CO_GENERATOR))", "docstring": "Return true if the object is a user-defined generator function.\n\nGenerator function objects provides same attributes as functions.\nSee isfunction.__doc__ for attributes listing.\n\nAdapted from Python 2.6.\n\nArgs:\nobj: an object to test.\n\nReturns:\ntrue if the object is generator function.", "source": "juraj-google-style"}
{"code": "def help(self, print_output=True):\n    help_text = self._rpc('help')\n    if print_output:\n        print(help_text)\n    else:\n        return help_text", "docstring": "Calls the help RPC, which returns the list of RPC calls available.\n\nThis RPC should normally be used in an interactive console environment\nwhere the output should be printed instead of returned. Otherwise,\nnewlines will be escaped, which will make the output difficult to read.\n\nArgs:\nprint_output: A bool for whether the output should be printed.\n\nReturns:\nA str containing the help output otherwise None if print_output\nwasn't set.", "source": "codesearchnet"}
{"code": "def surface_velocity(msg):\n    \n\n    if common.typecode(msg) < 5 or common.typecode(msg) > 8:\n        raise RuntimeError(\"%s: Not a surface message, expecting 5<TC<8\" % msg)\n\n    mb = common.hex2bin(msg)[32:]\n\n    \n    trk_status = int(mb[12])\n    if trk_status == 1:\n        trk = common.bin2int(mb[13:20]) * 360.0 / 128.0\n        trk = round(trk, 1)\n    else:\n        trk = None\n\n    \n    mov = common.bin2int(mb[5:12])\n\n    if mov == 0 or mov > 124:\n        spd = None\n    elif mov == 1:\n        spd = 0\n    elif mov == 124:\n        spd = 175\n    else:\n        movs = [2, 9, 13, 39, 94, 109, 124]\n        kts = [0.125, 1, 2, 15, 70, 100, 175]\n        i = next(m[0] for m in enumerate(movs) if m[1] > mov)\n        step = (kts[i] - kts[i-1]) * 1.0 / (movs[i]-movs[i-1])\n        spd = kts[i-1] + (mov-movs[i-1]) * step\n        spd = round(spd, 2)\n\n    return spd, trk, 0, 'GS'", "docstring": "Decode surface velocity from from a surface position message\nArgs:\nmsg (string): 28 bytes hexadecimal message string\n\nReturns:\n(int, float, int, string): speed (kt), ground track (degree),\nrate of climb/descend (ft/min), and speed type\n('GS' for ground speed, 'AS' for airspeed)", "source": "juraj-google-style"}
{"code": "def __init__(self, key_dtype, value_dtype, default_value, name='MutableHashTable', checkpoint=True, experimental_is_anonymous=False):\n    self._default_value = ops.convert_to_tensor(default_value, dtype=value_dtype)\n    self._value_shape = self._default_value.get_shape()\n    self._checkpoint = checkpoint\n    self._key_dtype = key_dtype\n    self._value_dtype = value_dtype\n    self._name = name\n    self._is_anonymous = experimental_is_anonymous\n    if not self._is_anonymous:\n        self._shared_name = None\n        if context.executing_eagerly():\n            self._shared_name = 'table_%d' % (ops.uid(),)\n    super(MutableHashTable, self).__init__(key_dtype, value_dtype)\n    self._resource_handle = self._create_resource()\n    if checkpoint:\n        saveable = MutableHashTable._Saveable(self, name)\n        if not context.executing_eagerly():\n            ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)", "docstring": "Creates an empty `MutableHashTable` object.\n\nCreates a table, the type of its keys and values are specified by key_dtype\nand value_dtype, respectively.\n\nArgs:\nkey_dtype: the type of the key tensors.\nvalue_dtype: the type of the value tensors.\ndefault_value: The value to use if a key is missing in the table.\nname: A name for the operation (optional).\ncheckpoint: if True, the contents of the table are saved to and restored\nfrom checkpoints. If `shared_name` is empty for a checkpointed table, it\nis shared using the table node name.\nexperimental_is_anonymous: Whether to use anonymous mode for the\ntable (default is False). In anonymous mode, the table\nresource can only be accessed via a resource handle. It can't\nbe looked up by a name. When all resource handles pointing to\nthat resource are gone, the resource will be deleted\nautomatically.\n\nReturns:\nA `MutableHashTable` object.\n\nRaises:\nValueError: If checkpoint is True and no name was specified.", "source": "github-repos"}
{"code": "def walk(self, walker):\n\n    def walk_func(step):\n        for dep in self.graph.downstream(step.name):\n            if (not dep.ok):\n                step.set_status(FailedStatus('dependency has failed'))\n                return step.ok\n        return step.run()\n    return self.graph.walk(walker, walk_func)", "docstring": "Walks each step in the underlying graph, in topological order.\n\nArgs:\nwalker (func): a walker function to be passed to\n:class:`stacker.dag.DAG` to walk the graph.", "source": "codesearchnet"}
{"code": "def get_output_dict(stack):\n    \n    outputs = {}\n    if 'Outputs' not in stack:\n        return outputs\n\n    for output in stack['Outputs']:\n        logger.debug(\"    %s %s: %s\", stack['StackName'], output['OutputKey'],\n                     output['OutputValue'])\n        outputs[output['OutputKey']] = output['OutputValue']\n    return outputs", "docstring": "Returns a dict of key/values for the outputs for a given CF stack.\n\nArgs:\nstack (dict): The stack object to get\noutputs from.\n\nReturns:\ndict: A dictionary with key/values for each output on the stack.", "source": "juraj-google-style"}
{"code": "def create_sonos_playlist(self, title):\n        \n        response = self.avTransport.CreateSavedQueue([\n            ('InstanceID', 0),\n            ('Title', title),\n            ('EnqueuedURI', ''),\n            ('EnqueuedURIMetaData', ''),\n        ])\n\n        item_id = response['AssignedObjectID']\n        obj_id = item_id.split(':', 2)[1]\n        uri = \"file:\n\n        res = [DidlResource(uri=uri, protocol_info=\"x-rincon-playlist:*:*:*\")]\n        return DidlPlaylistContainer(\n            resources=res, title=title, parent_id='SQ:', item_id=item_id)", "docstring": "Create a new empty Sonos playlist.\n\nArgs:\ntitle: Name of the playlist\n\n:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`", "source": "juraj-google-style"}
{"code": "class TvpLoss(nn.Module):\n\n    def __init__(self, losses):\n        super().__init__()\n        self.loss_map = {'iou': self.loss_iou, 'distance': self.loss_distance, 'duration': self.loss_duration}\n        for loss in losses:\n            if loss not in self.loss_map:\n                raise ValueError(f'Loss {loss} not supported')\n        self.losses = losses\n\n    def loss_iou(self, start_time, end_time, candidates_start_time, candidates_end_time, duration):\n        \n        inter = torch.min(candidates_end_time, end_time) - torch.max(candidates_start_time, start_time)\n        union = torch.max(candidates_end_time, end_time) - torch.min(candidates_start_time, start_time)\n        iou = 1 - inter.clamp(min=0) / union\n        return iou\n\n    def loss_distance(self, start_time, end_time, candidates_start_time, candidates_end_time, duration):\n        \n        mid_candidates = torch.div(torch.add(candidates_start_time, candidates_end_time), 2.0)\n        mid_groundtruth = torch.div(torch.add(start_time, end_time), 2.0)\n        distance_diff = torch.div(torch.max(mid_candidates, mid_groundtruth) - torch.min(mid_candidates, mid_groundtruth), duration).clamp(min=0.2)\n        return distance_diff\n\n    def loss_duration(self, start_time, end_time, candidates_start_time, candidates_end_time, duration):\n        \n        duration_candidates = torch.sub(candidates_end_time, candidates_start_time)\n        duration_groundtruth = torch.sub(end_time, start_time)\n        duration_diff = torch.square(torch.div(torch.sub(duration_candidates, duration_groundtruth), duration))\n        duration_diff = duration_diff.clamp(min=0.4)\n        return duration_diff\n\n    def forward(self, logits, labels):\n        \n        duration, start_time, end_time = labels\n        candidates = torch.mul(logits, duration)\n        candidates_start_time, candidates_end_time = (candidates[:, 0].float(), candidates[:, 1].float())\n        losses_dict = {}\n        for loss in self.losses:\n            losses_dict.update({loss: self.loss_map[loss](start_time, end_time, candidates_start_time, candidates_end_time, duration)})\n        return losses_dict", "docstring": "This class computes the losses for `TvpForVideoGrounding`. The process happens in two steps: 1) we compute\nhungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched\nground-truth / prediction (supervise class and box).\n\nArgs:\nlosses (`List[str]`):\nList of all the losses to be applied.", "source": "github-repos"}
{"code": "def diff(self, a_ref, target=None, b_ref=None):\n    result = {}\n    diff_dct = self.scm.get_diff_trees(a_ref, b_ref=b_ref)\n    result[DIFF_A_REF] = diff_dct[DIFF_A_REF]\n    result[DIFF_B_REF] = diff_dct[DIFF_B_REF]\n    if diff_dct[DIFF_EQUAL]:\n        result[DIFF_EQUAL] = True\n        return result\n    result[DIFF_LIST] = []\n    diff_outs = _get_diff_outs(self, diff_dct)\n    if (target is None):\n        result[DIFF_LIST] = [_diff_royal(self, path, diff_outs[path]) for path in diff_outs]\n    elif (target in diff_outs):\n        result[DIFF_LIST] = [_diff_royal(self, target, diff_outs[target])]\n    else:\n        msg = \"Have not found file/directory '{}' in the commits\"\n        raise FileNotInCommitError(msg.format(target))\n    return result", "docstring": "Gerenates diff message string output\n\nArgs:\ntarget(str) - file/directory to check diff of\na_ref(str) - first tag\n(optional) b_ref(str) - second git tag\n\nReturns:\nstring: string of output message with diff info", "source": "codesearchnet"}
{"code": "def get_input(self, value, _search=None):\n        \n        if _search is None:\n            if isinstance(value, string_types):\n                _search = lambda s: s.name  \n            elif isinstance(value, type):\n                _search = type\n\n        for i in self.inputs:\n            step = i.get_input(value, _search)\n            if step is not None:\n                return step\n\n        if _search(self) == value:\n            return self", "docstring": "Searches the tree for a step\nArgs:\nvalue: The value to search for. If value is a string then the search looks for\na step of that name. If the value is a type, it looks for a step\nof that type.\nReturns: The first step found via a depth-first search.", "source": "juraj-google-style"}
{"code": "def send(self, conn):\n        \n        if conn is None:\n            raise ValueError(\"Cannot send to connection None\")\n\n        with (yield conn.write_lock.acquire()):\n            sent = 0\n\n            yield conn.write_message(self.header_json, locked=False)\n            sent += len(self.header_json)\n\n            \n            \n\n            yield conn.write_message(self.metadata_json, locked=False)\n            sent += len(self.metadata_json)\n\n            \n            \n\n            yield conn.write_message(self.content_json, locked=False)\n            sent += len(self.content_json)\n\n            sent += yield self.write_buffers(conn, locked=False)\n\n            raise gen.Return(sent)", "docstring": "Send the message on the given connection.\n\nArgs:\nconn (WebSocketHandler) : a WebSocketHandler to send messages\n\nReturns:\nint : number of bytes sent", "source": "juraj-google-style"}
{"code": "def generate(self, information, timeout=(- 1)):\n    return self._client.create(information, timeout=timeout)", "docstring": "Generates a self signed certificate or an internal CA signed certificate for RabbitMQ clients.\n\nArgs:\ninformation (dict): Information to generate the certificate for RabbitMQ clients.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: RabbitMQ certificate generated", "source": "codesearchnet"}
{"code": "async def disconnect(self, conn_id):\n    self._ensure_connection(conn_id, True)\n    dev = self._get_property(conn_id, 'device')\n    dev.connected = False\n    self._teardown_connection(conn_id)", "docstring": "Asynchronously disconnect from a connected device\n\nArgs:\nconn_id (int): A unique identifier that will refer to this connection\ncallback (callback): A callback that will be called as\ncallback(conn_id, adapter_id, success, failure_reason)", "source": "codesearchnet"}
{"code": "def get_predicted_structure(self, structure, icsd_vol=False):\n        \n        new_structure = structure.copy()\n        new_structure.scale_lattice(self.predict(structure, icsd_vol=icsd_vol))\n        return new_structure", "docstring": "Given a structure, returns back the structure scaled to predicted\nvolume.\nArgs:\nstructure (Structure): structure w/unknown volume\n\nReturns:\na Structure object with predicted volume", "source": "juraj-google-style"}
{"code": "def truncate(self, new_count):\n        \n        self.posterior_state_estimates = self.posterior_state_estimates[:new_count]\n        self.prior_state_estimates = self.prior_state_estimates[:new_count]\n        self.measurements = self.measurements[:new_count]\n        self.process_matrices = self.process_matrices[:new_count]\n        self.process_covariances = self.process_covariances[:new_count]", "docstring": "Truncate the filter as if only *new_count* :py:meth:`.predict`,\n:py:meth:`.update` steps had been performed. If *new_count* is greater\nthan :py:attr:`.state_count` then this function is a no-op.\n\nMeasurements, state estimates, process matrices and process noises which\nare truncated are discarded.\n\nArgs:\nnew_count (int): Number of states to retain.", "source": "juraj-google-style"}
{"code": "def __init__(self, name, **kwargs):\n    if enabled:\n        self._traceme = _pywrap_traceme.TraceMe(name, **kwargs)\n    else:\n        self._traceme = None", "docstring": "Creates a trace event in the profiler.\n\nArgs:\nname: The name of the trace event.\n**kwargs: Keyword arguments added to the trace event.\nBoth the key and value are of types that\ncan be converted to strings, which will be\ninterpreted by the profiler according to the\ntraceme name.\n\nExample usage:\n\n```python\n\ntf.profiler.experimental.start('logdir')\nfor step in range(num_steps):\n# Creates a trace event for each training step with the\n# step number.\nwith tf.profiler.experimental.Trace(\"Train\", step_num=step):\ntrain_fn()\ntf.profiler.experimental.stop()\n\n```\nThe example above uses the keyword argument \"step_num\" to specify the\ntraining step being traced.", "source": "github-repos"}
{"code": "def BuildServiceStub(self, cls):\n    \n\n    def _ServiceStubInit(stub, rpc_channel):\n      stub.rpc_channel = rpc_channel\n    self.cls = cls\n    cls.__init__ = _ServiceStubInit\n    for method in self.descriptor.methods:\n      setattr(cls, method.name, self._GenerateStubMethod(method))", "docstring": "Constructs the stub class.\n\nArgs:\ncls: The class that will be constructed.", "source": "juraj-google-style"}
{"code": "async def send_script(self, conn_id, data):\n        \n\n        self._ensure_connection(conn_id, True)\n        dev = self._get_property(conn_id, 'device')\n        conn_string = self._get_property(conn_id, 'connection_string')\n\n        \n        await self.notify_progress(conn_string, 'script', 0, len(data))\n        await self.notify_progress(conn_string, 'script', len(data) \n        await self.notify_progress(conn_string, 'script', len(data), len(data))\n\n        dev.script = data", "docstring": "Asynchronously send a a script to this IOTile device\n\nArgs:\nconn_id (int): A unique identifier that will refer to this connection\ndata (bytes or bytearray): the script to send to the device", "source": "juraj-google-style"}
{"code": "def start_cluster_server(ctx, num_gpus=1, rdma=False):\n    import tensorflow as tf\n    from . import gpu_info\n    logging.info('{0}: ======== {1}:{2} ========'.format(ctx.worker_num, ctx.job_name, ctx.task_index))\n    cluster_spec = ctx.cluster_spec\n    logging.info('{0}: Cluster spec: {1}'.format(ctx.worker_num, cluster_spec))\n    if (tf.test.is_built_with_cuda() and (num_gpus > 0)):\n        my_addr = cluster_spec[ctx.job_name][ctx.task_index]\n        my_host = my_addr.split(':')[0]\n        flattened = [v for sublist in cluster_spec.values() for v in sublist]\n        local_peers = [p for p in flattened if p.startswith(my_host)]\n        my_index = local_peers.index(my_addr)\n        gpu_initialized = False\n        retries = 3\n        while ((not gpu_initialized) and (retries > 0)):\n            try:\n                if (ctx.job_name == 'ps'):\n                    num_gpus = 1\n                gpus_to_use = gpu_info.get_gpus(num_gpus, my_index)\n                gpu_prompt = ('GPU' if (num_gpus == 1) else 'GPUs')\n                logging.info('{0}: Using {1}: {2}'.format(ctx.worker_num, gpu_prompt, gpus_to_use))\n                os.environ['CUDA_VISIBLE_DEVICES'] = gpus_to_use\n                cluster = tf.train.ClusterSpec(cluster_spec)\n                if rdma:\n                    server = tf.train.Server(cluster, ctx.job_name, ctx.task_index, protocol='grpc+verbs')\n                else:\n                    server = tf.train.Server(cluster, ctx.job_name, ctx.task_index)\n                gpu_initialized = True\n            except Exception as e:\n                print(e)\n                logging.error('{0}: Failed to allocate GPU, trying again...'.format(ctx.worker_num))\n                retries -= 1\n                time.sleep(10)\n        if (not gpu_initialized):\n            raise Exception('Failed to allocate GPU')\n    else:\n        os.environ['CUDA_VISIBLE_DEVICES'] = ''\n        logging.info('{0}: Using CPU'.format(ctx.worker_num))\n        cluster = tf.train.ClusterSpec(cluster_spec)\n        server = tf.train.Server(cluster, ctx.job_name, ctx.task_index)\n    return (cluster, server)", "docstring": "Function that wraps the creation of TensorFlow ``tf.train.Server`` for a node in a distributed TensorFlow cluster.\n\nThis is intended to be invoked from within the TF ``map_fun``, replacing explicit code to instantiate ``tf.train.ClusterSpec``\nand ``tf.train.Server`` objects.\n\nArgs:\n:ctx: TFNodeContext containing the metadata specific to this node in the cluster.\n:num_gpu: number of GPUs desired\n:rdma: boolean indicating if RDMA 'iverbs' should be used for cluster communications.\n\nReturns:\nA tuple of (cluster_spec, server)", "source": "codesearchnet"}
{"code": "def register_event(self, direction, verb, child_fn, priority=10):\n        \n        event_managers = []\n        if direction in ('in', 'both'):\n            event_managers.append(self._events_in)\n        if direction in ('out', 'both'):\n            event_managers.append(self._events_out)\n        if direction == 'girc':\n            event_managers.append(self._girc_events)\n\n        for event_manager in event_managers:\n            event_manager.register(verb, child_fn, priority=priority)", "docstring": "Register an event with all servers.\n\nArgs:\ndirection (str): `in`, `out`, `both`, or `girc`.\nverb (str): Event name, `all`, or `raw`.\nchild_fn (function): Handler function.\npriority (int): Handler priority (lower priority executes first).\n\nNote: `all` will not match `raw` events. If you wish to receive both\n`raw` and all other events, you need to register these separately.", "source": "juraj-google-style"}
{"code": "def AFF4Path(self, client_urn):\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n\n    if not self.HasField(\"pathtype\"):\n      raise ValueError(\"Can't determine AFF4 path without a valid pathtype.\")\n\n    first_component = self[0]\n    dev = first_component.path\n    if first_component.HasField(\"offset\"):\n      \n      dev += \":{}\".format(first_component.offset \n\n    if (len(self) > 1 and first_component.pathtype == PathSpec.PathType.OS and\n        self[1].pathtype == PathSpec.PathType.TSK):\n      result = [self.AFF4_PREFIXES[PathSpec.PathType.TSK], dev]\n\n      \n      start = 1\n    else:\n      \n      result = [self.AFF4_PREFIXES[first_component.pathtype]]\n      start = 0\n\n    for p in self[start]:\n      component = p.path\n\n      \n      \n      \n      \n      \n      if p.HasField(\"offset\"):\n        component += \":{}\".format(p.offset \n\n      \n      if p.HasField(\"stream_name\"):\n        component += \":\" + p.stream_name\n\n      result.append(component)\n\n    return client_urn.Add(\"/\".join(result))", "docstring": "Returns the AFF4 URN this pathspec will be stored under.\n\nArgs:\nclient_urn: A ClientURN.\n\nReturns:\nA urn that corresponds to this pathspec.\n\nRaises:\nValueError: If pathspec is not of the correct type.", "source": "juraj-google-style"}
{"code": "def default_storable(python_type, exposes=None, version=None, storable_type=None, peek=default_peek):\n    \n    if not exposes:\n        for extension in expose_extensions:\n            try:\n                exposes = extension(python_type)\n            except (SystemExit, KeyboardInterrupt):\n                raise\n            except:\n                pass\n            else:\n                if exposes:\n                    break\n        if not exposes:\n            raise AttributeError('`exposes` required for type: {!r}'.format(python_type))\n    return Storable(python_type, key=storable_type, \\\n        handlers=StorableHandler(version=version, exposes=exposes, \\\n        poke=poke(exposes), peek=peek(python_type, exposes)))", "docstring": "Default mechanics for building the storable instance for a type.\n\nArguments:\n\npython_type (type): type.\n\nexposes (iterable): attributes exposed by the type.\n\nversion (tuple): version number.\n\nstorable_type (str): universal string identifier for the type.\n\npeek (callable): peeking routine.\n\nReturns:\n\nStorable: storable instance.", "source": "juraj-google-style"}
{"code": "def export_model(self, export_formats, export_dir=None):\n        \n        export_dir = export_dir or self.logdir\n        return self._export_model(export_formats, export_dir)", "docstring": "Exports model based on export_formats.\n\nSubclasses should override _export_model() to actually\nexport model to local directory.\n\nArgs:\nexport_formats (list): List of formats that should be exported.\nexport_dir (str): Optional dir to place the exported model.\nDefaults to self.logdir.\n\nReturn:\nA dict that maps ExportFormats to successfully exported models.", "source": "juraj-google-style"}
{"code": "def fill_tree(self, tree, input_dict):\n        \n\n\n        def removeAll(tree):\n\n            if tree.model().rowCount() > 0:\n                for i in range(0, tree.model().rowCount()):\n                    item = tree.model().item(i)\n                    del item\n                    tree.model().removeRows(0, tree.model().rowCount())\n                    tree.model().reset()\n\n        def add_probe(tree, instrument, probes):\n            item = QtGui.QStandardItem(instrument)\n            item.setEditable(False)\n\n            for probe in probes.split(','):\n                child_name = QtGui.QStandardItem(probe)\n                child_name.setDragEnabled(True)\n                child_name.setSelectable(True)\n                child_name.setEditable(False)\n                item.appendRow(child_name)\n            tree.model().appendRow(item)\n\n        removeAll(tree)\n\n        for index, (instrument, probes) in enumerate(input_dict.items()):\n            add_probe(tree, instrument, probes)\n            \n        tree.expandAll()", "docstring": "fills a tree with nested parameters\nArgs:\ntree: QtGui.QTreeView\nparameters: dictionary or Parameter object\n\nReturns:", "source": "juraj-google-style"}
{"code": "def github_belspec_files(spec_dir, force: bool=False):\n    if (not force):\n        dtnow = datetime.datetime.utcnow()\n        delta = datetime.timedelta(1)\n        yesterday = (dtnow - delta)\n        for fn in glob.glob(f'{spec_dir}/bel*yaml'):\n            if (datetime.datetime.fromtimestamp(os.path.getmtime(fn)) > yesterday):\n                log.info('Skipping BEL Specification update - specs less than 1 day old')\n                return\n    repo_url = 'https:\n    params = {}\n    github_access_token = os.getenv('GITHUB_ACCESS_TOKEN', '')\n    if github_access_token:\n        params = {'access_token': github_access_token}\n    r = requests.get(repo_url, params=params)\n    if (r.status_code == 200):\n        results = r.json()\n        for f in results:\n            url = f['download_url']\n            fn = os.path.basename(url)\n            if (('yaml' not in fn) and ('yml' in fn)):\n                fn = fn.replace('yml', 'yaml')\n            r = requests.get(url, params=params, allow_redirects=True)\n            if (r.status_code == 200):\n                open(f'{spec_dir}/{fn}', 'wb').write(r.content)\n            else:\n                sys.exit(f'Could not get BEL Spec file {url} from Github -- Status: {r.status_code}  Msg: {r.content}')\n    else:\n        sys.exit(f'Could not get BEL Spec directory listing from Github -- Status: {r.status_code}  Msg: {r.content}')", "docstring": "Get belspec files from Github repo\n\n\nArgs:\nspec_dir: directory to store the BEL Specification and derived files\nforce: force update of BEL Specifications from Github - skipped if local files less than 1 day old", "source": "codesearchnet"}
{"code": "def get_obj(self, objpath, metahash, dst_path):\n    incachepath = self.path_in_cache(objpath, metahash)\n    if (not os.path.exists(incachepath)):\n        raise CacheMiss(('%s not in cache.' % incachepath))\n    else:\n        log.debug('Cache hit! %s~%s', objpath, metahash.hexdigest())\n        if (not os.path.exists(os.path.dirname(dst_path))):\n            os.makedirs(os.path.dirname(dst_path))\n        os.link(incachepath, dst_path)", "docstring": "Get object from cache, write it to dst_path.\n\nArgs:\nobjpath: filename relative to buildroot\n(example: mini-boot/blahblah/somefile.bin)\nmetahash: metahash. See targets/base.py\ndst_path: Absolute path where the file should be written.\nRaises:\nCacheMiss: if the item is not in the cache", "source": "codesearchnet"}
{"code": "def run(self, resources):\n    hwman = resources['connection']\n    updater = hwman.hwman.app(name='device_updater')\n    updater.run_script(self._script, no_reboot=self._no_reboot)", "docstring": "Actually send the trub script.\n\nArgs:\nresources (dict): A dictionary containing the required resources that\nwe needed access to in order to perform this step.", "source": "codesearchnet"}
{"code": "def run_inference(self, batch: Sequence[numpy.ndarray], inference_session: ort.InferenceSession, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n    predictions = self._model_inference_fn(inference_session, batch, inference_args)\n    return utils._convert_to_result(batch, predictions)", "docstring": "Runs inferences on a batch of numpy arrays.\n\nArgs:\nbatch: A sequence of examples as numpy arrays. They should\nbe single examples.\ninference_session: An onnx inference session.\nMust be runnable with input x where x is sequence of numpy array\ninference_args: Any additional arguments for an inference.\n\nReturns:\nAn Iterable of type PredictionResult.", "source": "github-repos"}
{"code": "def mt_report(context, case_id, test, outpath=None):\n    LOG.info('exporting mitochondrial variants for case \"{}\"'.format(case_id))\n    adapter = context.obj['adapter']\n    query = {'chrom': 'MT'}\n    case_obj = adapter.case(case_id=case_id)\n    if (not case_obj):\n        LOG.warning('Could not find a scout case with id \"{}\". No report was created.'.format(case_id))\n        context.abort()\n    samples = case_obj.get('individuals')\n    mt_variants = list(adapter.variants(case_id=case_id, query=query, nr_of_variants=(- 1), sort_key='position'))\n    if (not mt_variants):\n        LOG.warning('There are no MT variants associated to case {} in database!'.format(case_id))\n        context.abort()\n    today = datetime.datetime.now().strftime('%Y-%m-%d')\n    if (not outpath):\n        outpath = str(os.getcwd())\n    written_files = 0\n    for sample in samples:\n        sample_id = sample['individual_id']\n        sample_lines = export_mt_variants(variants=mt_variants, sample_id=sample_id)\n        document_name = ('.'.join([case_obj['display_name'], sample_id, today]) + '.xlsx')\n        workbook = Workbook(os.path.join(outpath, document_name))\n        Report_Sheet = workbook.add_worksheet()\n        if (test and sample_lines and workbook):\n            written_files += 1\n            continue\n        row = 0\n        for (col, field) in enumerate(MT_EXPORT_HEADER):\n            Report_Sheet.write(row, col, field)\n        for (row, line) in enumerate(sample_lines, 1):\n            for (col, field) in enumerate(line):\n                Report_Sheet.write(row, col, field)\n        workbook.close()\n        if os.path.exists(os.path.join(outpath, document_name)):\n            written_files += 1\n    if test:\n        LOG.info('Number of excel files that can be written to folder {0}: {1}'.format(outpath, written_files))\n    else:\n        LOG.info('Number of excel files written to folder {0}: {1}'.format(outpath, written_files))\n    return written_files", "docstring": "Export all mitochondrial variants for each sample of a case\nand write them to an excel file\n\nArgs:\nadapter(MongoAdapter)\ncase_id(str)\ntest(bool): True if the function is called for testing purposes\noutpath(str): path to output file\n\nReturns:\nwritten_files(int): number of written or simulated files", "source": "codesearchnet"}
{"code": "def Create(self, request, global_params=None):\n    config = self.GetMethodConfig('Create')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Creates a new `BuildTrigger`. This API is experimental.\n\nArgs:\nrequest: (CloudbuildProjectsTriggersCreateRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(BuildTrigger) The response message.", "source": "github-repos"}
{"code": "def _country_level_time_zones_for_number(numobj):\n    \n    cc = str(numobj.country_code)\n    for prefix_len in range(TIMEZONE_LONGEST_PREFIX, 0, -1):\n        prefix = cc[:(1 + prefix_len)]\n        if prefix in TIMEZONE_DATA:\n            return TIMEZONE_DATA[prefix]\n    return _UNKNOWN_TIME_ZONE_LIST", "docstring": "Returns the list of time zones corresponding to the country calling code of a number.\nArguments:\nnumobj -- the phone number to look up\nReturns a list of the corresponding time zones or a single element list with the default\nunknown time zone if no other time zone was found or if the number was invalid", "source": "juraj-google-style"}
{"code": "def _parse_list(cls, args):\n        \n        argparser = ArgumentParser(prog=\"cluster list\")\n\n        group = argparser.add_mutually_exclusive_group()\n\n        group.add_argument(\"--id\", dest=\"cluster_id\",\n                           help=\"show cluster with this id\")\n\n        group.add_argument(\"--label\", dest=\"label\",\n                           help=\"show cluster with this label\")\n\n        group.add_argument(\"--state\", dest=\"state\", action=\"store\",\n                           choices=['up', 'down', 'pending', 'terminating'],\n                           help=\"list only clusters in the given state\")\n        pagination_group = group.add_argument_group()\n        pagination_group.add_argument(\"--page\", dest=\"page\", action=\"store\", type=int,\n                           help=\"page number\")\n        pagination_group.add_argument(\"--per-page\", dest=\"per_page\", action=\"store\", type=int,\n                           help=\"number of clusters to be retrieved per page\")\n\n        arguments = argparser.parse_args(args)\n        return vars(arguments)", "docstring": "Parse command line arguments to construct a dictionary of cluster\nparameters that can be used to determine which clusters to list.\n\nArgs:\n`args`: sequence of arguments\n\nReturns:\nDictionary that can be used to determine which clusters to list", "source": "juraj-google-style"}
{"code": "def verified(context, collaborator, test, outpath=None):\n    written_files = 0\n    collaborator = (collaborator or 'cust000')\n    LOG.info('Exporting verified variants for cust {}'.format(collaborator))\n    adapter = context.obj['adapter']\n    verified_vars = adapter.verified(institute_id=collaborator)\n    LOG.info('FOUND {} verified variants for institute {}'.format(len(verified_vars), collaborator))\n    if (not verified_vars):\n        LOG.warning('There are no verified variants for institute {} in database!'.format(collaborator))\n        return None\n    document_lines = export_verified_variants(verified_vars)\n    today = datetime.datetime.now().strftime('%Y-%m-%d')\n    document_name = ('.'.join(['verified_variants', collaborator, today]) + '.xlsx')\n    if (test and document_lines):\n        written_files += 1\n        LOG.info('Success. Verified variants file contains {} lines'.format(len(document_lines)))\n        return written_files\n    if (not outpath):\n        outpath = str(os.getcwd())\n    workbook = Workbook(os.path.join(outpath, document_name))\n    Report_Sheet = workbook.add_worksheet()\n    row = 0\n    for (col, field) in enumerate(VERIFIED_VARIANTS_HEADER):\n        Report_Sheet.write(row, col, field)\n    for (row, line) in enumerate(document_lines, 1):\n        for (col, field) in enumerate(line):\n            Report_Sheet.write(row, col, field)\n    workbook.close()\n    if os.path.exists(os.path.join(outpath, document_name)):\n        LOG.info('Success. Verified variants file of {} lines was written to disk'.format(len(document_lines)))\n        written_files += 1\n    return written_files", "docstring": "Export variants which have been verified for an institute\nand write them to an excel file.\n\nArgs:\ncollaborator(str): institute id\ntest(bool): True if the function is called for testing purposes\noutpath(str): path to output file\n\nReturns:\nwritten_files(int): number of written or simulated files", "source": "codesearchnet"}
{"code": "def most_frequent_terms(self, depth):\n\n        \n\n        counts = self.term_counts()\n\n        \n        top_terms = set(list(counts.keys())[:depth])\n        end_count = list(counts.values())[:depth][-1]\n\n        \n        \n        \n\n        bucket = self.term_count_buckets()[end_count]\n        return top_terms.union(set(bucket))", "docstring": "Get the X most frequent terms in the text, and then probe down to get\nany other terms that have the same count as the last term.\n\nArgs:\ndepth (int): The number of terms.\n\nReturns:\nset: The set of frequent terms.", "source": "juraj-google-style"}
{"code": "def encrypt(self, mesg):\n        \n        seqn = next(self._tx_sn)\n        rv = self._tx_tinh.enc(s_msgpack.en((seqn, mesg)))\n        return rv", "docstring": "Wrap a message with a sequence number and encrypt it.\n\nArgs:\nmesg: The mesg to encrypt.\n\nReturns:\nbytes: The encrypted message.", "source": "juraj-google-style"}
{"code": "def __init__(self, output_mediator):\n    \n    super(ElasticsearchOutputModule, self).__init__(output_mediator)\n    self._raw_fields = False", "docstring": "Initializes an Elasticsearch output module.\n\nArgs:\noutput_mediator (OutputMediator): mediates interactions between output\nmodules and other components, such as storage and dfvfs.", "source": "juraj-google-style"}
{"code": "def _create_object_from_type_and_dict(cls, obj_dict):\n    value = object.__new__(cls)\n    value.__dict__.update(obj_dict)\n    return value", "docstring": "Creates an object, bypassing the constructor.\n\nCreates an object of type `cls`, whose `__dict__` is updated to contain\n`obj_dict`.\n\nArgs:\ncls: The type of the new object.\nobj_dict: A `Mapping` that should be used to initialize the new object's\n`__dict__`.\n\nReturns:\nAn object of type `cls`.", "source": "github-repos"}
{"code": "def ice_register_write(self, register_index, value, delay=False):\n    self._dll.JLINKARM_WriteICEReg(register_index, int(value), int(delay))\n    return None", "docstring": "Writes a value to an ARM ICE register.\n\nArgs:\nself (JLink): the ``JLink`` instance\nregister_index (int): the ICE register to write to\nvalue (int): the value to write to the ICE register\ndelay (bool): boolean specifying if the write should be delayed\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def _get_parser_call_method(self, parser_to_method):\n        \n        def inner_call(args=None, instance=None):\n            \n            parser = self._cls.parser\n            namespace = parser.parse_args(_get_args_to_parse(args, sys.argv))\n            if instance is None:\n                \n                \n                if \"__init__\" not in parser_to_method:\n                    raise ParseThisError((\"'__init__' method is not decorated. \"\n                                          \"Please provide an instance to \"\n                                          \"'{}.parser.call' or decorate the \"\n                                          \"'__init___' method with \"\n                                          \"'create_parser'\"\n                                          .format(self._cls.__name__)))\n                \n                instance = _call_method_from_namespace(self._cls, \"__init__\",\n                                                       namespace)\n            method_name = parser_to_method[namespace.method]\n            return _call_method_from_namespace(instance, method_name, namespace)\n        return inner_call", "docstring": "Return the parser special method 'call' that handles sub-command\ncalling.\n\nArgs:\nparser_to_method: mapping of the parser registered name\nto the method it is linked to", "source": "juraj-google-style"}
{"code": "def lookup_entity(self, entity=None):\n    if (self._lookuptype == 'clublogxml'):\n        entity = int(entity)\n        if (entity in self._entities):\n            return self._strip_metadata(self._entities[entity])\n        else:\n            raise KeyError\n    elif (self._lookuptype == 'redis'):\n        if (self._redis_prefix is None):\n            raise KeyError('redis_prefix is missing')\n        json_data = self._redis.get(((self._redis_prefix + '_entity_') + str(entity)))\n        if (json_data is not None):\n            my_dict = self._deserialize_data(json_data)\n            return self._strip_metadata(my_dict)\n    elif (self._lookuptype == 'qrz'):\n        result = self._lookup_qrz_dxcc(entity, self._apikey)\n        return result\n    raise KeyError", "docstring": "Returns lookup data of an ADIF Entity\n\nArgs:\nentity (int): ADIF identifier of country\n\nReturns:\ndict: Dictionary containing the country specific data\n\nRaises:\nKeyError: No matching entity found\n\nExample:\nThe following code queries the the Clublog XML database for the ADIF entity Turkmenistan, which has\nthe id 273.\n\n>>> from pyhamtools import LookupLib\n>>> my_lookuplib = LookupLib(lookuptype=\"clublogapi\", apikey=\"myapikey\")\n>>> print my_lookuplib.lookup_entity(273)\n{\n'deleted': False,\n'country': u'TURKMENISTAN',\n'longitude': 58.4,\n'cqz': 17,\n'prefix': u'EZ',\n'latitude': 38.0,\n'continent': u'AS'\n}\n\n\nNote:\nThis method is available for the following lookup type\n\n- clublogxml\n- redis\n- qrz.com", "source": "codesearchnet"}
{"code": "def as_data_frame(self, **kwargs):\n        \n        try:\n            import pandas as pd\n        except ImportError:\n            raise ImportError(\"What are you doing trying to export a Layout \"\n                              \"as a pandas DataFrame when you don't have \"\n                              \"pandas installed? Eh? Eh?\")\n        if kwargs:\n            files = self.get(return_type='obj', **kwargs)\n        else:\n            files = self.files.values()\n        data = pd.DataFrame.from_records([f.entities for f in files])\n        data.insert(0, 'path', [f.path for f in files])\n        return data", "docstring": "Return information for all Files tracked in the Layout as a pandas\nDataFrame.\n\nArgs:\nkwargs: Optional keyword arguments passed on to get(). This allows\none to easily select only a subset of files for export.\nReturns:\nA pandas DataFrame, where each row is a file, and each column is\na tracked entity. NaNs are injected whenever a file has no\nvalue for a given attribute.", "source": "juraj-google-style"}
{"code": "def ProcessMessage(self, message):\n    \n    cert = rdf_crypto.Certificate(message.payload)\n\n    queue = self.well_known_session_id.Queue()\n\n    client_id = message.source\n\n    \n    \n    \n    try:\n      enrolment_cache.Get(client_id)\n      return\n    except KeyError:\n      enrolment_cache.Put(client_id, 1)\n\n    \n    if data_store.AFF4Enabled():\n      client = aff4.FACTORY.Create(\n          client_id, aff4_grr.VFSGRRClient, mode=\"rw\", token=self.token)\n      client_cert = client.Get(client.Schema.CERT)\n\n    if data_store.RelationalDBEnabled():\n      try:\n        md = data_store.REL_DB.ReadClientMetadata(client_id.Basename())\n        client_cert = md.certificate\n      except db.UnknownClientError:\n        client_cert = None\n\n    if data_store.RelationalDBEnabled():\n      data_store.REL_DB.WriteClientMetadata(\n          client_id.Basename(), fleetspeak_enabled=False)\n\n    \n    if not client_cert:\n      \n\n      \n      \n      \n      flow.StartAFF4Flow(\n          client_id=client_id,\n          flow_name=CAEnroler.__name__,  \n          csr=cert,\n          queue=queue,\n          token=self.token)", "docstring": "Begins an enrollment flow for this client.\n\nArgs:\nmessage: The Certificate sent by the client. Note that this message is\nnot authenticated.", "source": "juraj-google-style"}
{"code": "def delete(self, entity):\n    key = _normalize_key(entity)\n    if (key is None):\n        return self.ndb_delete(entity)\n    self.deletes.append(key)", "docstring": "Registers entity to delete from datastore.\n\nArgs:\nentity: an entity, model instance, or key to delete.", "source": "codesearchnet"}
{"code": "def baby_names(max_length=15):\n    names = []\n    lengths = []\n    targets = []\n    with open(os.path.join(os.path.dirname(sys.modules[__name__].__file__), 'baby_names.csv'), 'rb') as f:\n        first = True\n        for l in csv.reader(f, delimiter=','):\n            if first:\n                first = False\n                continue\n            assert (len(l) == 4), l\n            name = l[0]\n            if (max_length < len(name)):\n                raise ValueError(('Max length is too small: %d > %d' % (max_length, len(name))))\n            chars = [convert_to_int(c) for c in name]\n            names.append((chars + ([EOS] * (max_length - len(chars)))))\n            lengths.append([len(name)])\n            values = [float(l[2]), float(l[3])]\n            if (abs((sum(values) - 1)) > 0.001):\n                raise ValueError(('Each row must sum to 1: %s' % l))\n            targets.append(values)\n    return (np.array(names), np.array(targets), np.array(lengths))", "docstring": "Opens the baby_names csv file and produces numpy array.\n\nArgs:\nmax_length: The maximum length, 15 was the longest name when this was\nwritten.  Short entries will be padded with the EOS marker.\nReturns:\nA numpy array of the names converted to ascii codes, the labels and an\narray of lengths.\nRaises:\nValueError: if max_length is too small.", "source": "codesearchnet"}
{"code": "def push_datapackage(descriptor, backend, **backend_options):\n    warnings.warn('Functions \"push/pull_datapackage\" are deprecated. Please use \"Package\" class', UserWarning)\n    tables = []\n    schemas = []\n    datamap = {}\n    mapping = {}\n    model = Package(descriptor)\n    plugin = import_module(('jsontableschema.plugins.%s' % backend))\n    storage = plugin.Storage(**backend_options)\n    for resource in model.resources:\n        if (not resource.tabular):\n            continue\n        name = resource.descriptor.get('name', None)\n        table = _convert_path(resource.descriptor['path'], name)\n        schema = resource.descriptor['schema']\n        data = resource.table.iter(keyed=True)\n\n        def values(schema, data):\n            for item in data:\n                row = []\n                for field in schema['fields']:\n                    row.append(item.get(field['name'], None))\n                (yield tuple(row))\n        tables.append(table)\n        schemas.append(schema)\n        datamap[table] = values(schema, data)\n        if (name is not None):\n            mapping[name] = table\n    schemas = _convert_schemas(mapping, schemas)\n    for table in tables:\n        if (table in storage.buckets):\n            storage.delete(table)\n    storage.create(tables, schemas)\n    for table in storage.buckets:\n        if (table in datamap):\n            storage.write(table, datamap[table])\n    return storage", "docstring": "Push Data Package to storage.\n\nAll parameters should be used as keyword arguments.\n\nArgs:\ndescriptor (str): path to descriptor\nbackend (str): backend name like `sql` or `bigquery`\nbackend_options (dict): backend options mentioned in backend docs", "source": "codesearchnet"}
{"code": "def dismiss(self, targets, exit_when=None, sleep_interval=0.5, appearance_timeout=20, timeout=120):\n    try:\n        self.wait_for_any(targets, timeout=appearance_timeout)\n    except PocoTargetTimeout:\n        warnings.warn('Waiting timeout when trying to dismiss something before them appear. Targets are {}'.encode('utf-8').format(targets))\n        return\n    start_time = time.time()\n    while True:\n        no_target = True\n        for t in targets:\n            if t.exists():\n                try:\n                    for n in t:\n                        try:\n                            n.click(sleep_interval=sleep_interval)\n                            no_target = False\n                        except:\n                            pass\n                except:\n                    pass\n        time.sleep(sleep_interval)\n        should_exit = (exit_when() if exit_when else False)\n        if (no_target or should_exit):\n            return\n        if ((time.time() - start_time) > timeout):\n            raise PocoTargetTimeout('dismiss', targets)", "docstring": "Automatically dismiss the target objects\n\nArgs:\ntargets (:obj:`list`): list of poco objects to be dropped\nexit_when: termination condition, default is None which means to automatically exit when list of\n``targets`` is empty\nsleep_interval: time interval between each actions for the given targets, default is 0.5s\nappearance_timeout: time interval to wait for given target to appear on the screen, automatically exit when\ntimeout, default is 20s\ntimeout: dismiss function timeout, default is 120s\n\nRaises:\nPocoTargetTimeout: when dismiss time interval timeout, under normal circumstances, this should not happen\nand if happens, it will be reported", "source": "codesearchnet"}
{"code": "def prefer_static_broadcast_shape(shape1, shape2, name='prefer_static_broadcast_shape'):\n    with ops.name_scope(name, values=[shape1, shape2]):\n\n        def make_shape_tensor(x):\n            return ops.convert_to_tensor(x, name='shape', dtype=dtypes.int32)\n\n        def get_tensor_shape(s):\n            if isinstance(s, tensor_shape.TensorShape):\n                return s\n            s_ = tensor_util.constant_value(make_shape_tensor(s))\n            if s_ is not None:\n                return tensor_shape.TensorShape(s_)\n            return None\n\n        def get_shape_tensor(s):\n            if not isinstance(s, tensor_shape.TensorShape):\n                return make_shape_tensor(s)\n            if s.is_fully_defined():\n                return make_shape_tensor(s.as_list())\n            raise ValueError('Cannot broadcast from partially defined `TensorShape`.')\n        shape1_ = get_tensor_shape(shape1)\n        shape2_ = get_tensor_shape(shape2)\n        if shape1_ is not None and shape2_ is not None:\n            return array_ops.broadcast_static_shape(shape1_, shape2_)\n        shape1_ = get_shape_tensor(shape1)\n        shape2_ = get_shape_tensor(shape2)\n        return array_ops.broadcast_dynamic_shape(shape1_, shape2_)", "docstring": "Convenience function which statically broadcasts shape when possible.\n\nArgs:\nshape1:  `1-D` integer `Tensor`.  Already converted to tensor!\nshape2:  `1-D` integer `Tensor`.  Already converted to tensor!\nname:  A string name to prepend to created ops.\n\nReturns:\nThe broadcast shape, either as `TensorShape` (if broadcast can be done\nstatically), or as a `Tensor`.", "source": "github-repos"}
{"code": "def _package_to_staging(staging_package_url):\n    \n    import google.datalab.ml as ml\n\n    \n    package_root = os.path.abspath(\n        os.path.join(os.path.dirname(__file__), '../../'))\n    setup_path = os.path.abspath(\n        os.path.join(os.path.dirname(__file__), 'master_setup.py'))\n    tar_gz_path = os.path.join(staging_package_url, 'staging', 'trainer.tar.gz')\n\n    print('Building package and uploading to %s' % tar_gz_path)\n    ml.package_and_copy(package_root, setup_path, tar_gz_path)\n\n    return tar_gz_path", "docstring": "Repackage this package from local installed location and copy it to GCS.\n\nArgs:\nstaging_package_url: GCS path.", "source": "juraj-google-style"}
{"code": "def get_column(self, X, column):\n    if isinstance(X, pd.DataFrame):\n        return X[column].values\n    return X[(:, column)]", "docstring": "Return a column of the given matrix.\n\nArgs:\nX: `numpy.ndarray` or `pandas.DataFrame`.\ncolumn: `int` or `str`.\n\nReturns:\nnp.ndarray: Selected column.", "source": "codesearchnet"}
{"code": "def outer(vector1, vector2=None):\n    \n    if vector2 is None:\n        vector2 = np.array(vector1).conj()\n    else:\n        vector2 = np.array(vector2).conj()\n    return np.outer(vector1, vector2)", "docstring": "Construct the outer product of two vectors.\n\nThe second vector argument is optional, if absent the projector\nof the first vector will be returned.\n\nArgs:\nvector1 (ndarray): the first vector.\nvector2 (ndarray): the (optional) second vector.\n\nReturns:\nnp.array: The matrix |v1><v2|.", "source": "juraj-google-style"}
{"code": "def from_api(cls, **kwargs):\n    vals = cls.get_non_empty_vals({cls._to_snake_case(k): v for (k, v) in kwargs.items()})\n    remove = []\n    for (attr, val) in vals.items():\n        try:\n            vals[attr] = cls._parse_property(attr, val)\n        except HelpScoutValidationException:\n            remove.append(attr)\n            logger.info('Unexpected property received in API response', exc_info=True)\n    for attr in remove:\n        del vals[attr]\n    return cls(**cls.get_non_empty_vals(vals))", "docstring": "Create a new instance from API arguments.\n\nThis will switch camelCase keys into snake_case for instantiation.\n\nIt will also identify any ``Instance`` or ``List`` properties, and\ninstantiate the proper objects using the values. The end result being\na fully Objectified and Pythonified API response.\n\nReturns:\nBaseModel: Instantiated model using the API values.", "source": "codesearchnet"}
{"code": "def unpack(self, buff, offset=0):\n        \n        begin = offset\n        for name, value in self.get_class_attributes():\n            if type(value).__name__ != \"Header\":\n                size = self._unpack_attribute(name, value, buff, begin)\n                begin += size", "docstring": "Unpack a binary message into this object's attributes.\n\nUnpack the binary value *buff* and update this object attributes based\non the results. It is an inplace method and it receives the binary data\nof the message **without the header**.\n\nArgs:\nbuff (bytes): Binary data package to be unpacked, without the\nheader.\noffset (int): Where to begin unpacking.", "source": "juraj-google-style"}
{"code": "def _get_def_class(self, class_obj, member_name):\n    member_obj = getattr(class_obj, member_name)\n    for def_class_obj in inspect.getmro(class_obj):\n        if (member_name in def_class_obj.__dict__):\n            if (def_class_obj.__name__ in self._excluded_classes):\n                return class_obj\n            return def_class_obj\n    self._logger.warning('%s: Definition class not found for member %s.%s, defaulting to class %s', self._log_prefix, class_obj.__name__, member_name, class_obj.__name__)\n    return class_obj", "docstring": "Return the class object in MRO order that defines a member.\n\nclass_obj: Class object that exposes (but not necessarily defines) the\nmember. I.e. starting point of the search.\n\nmember_name: Name of the member (method or attribute).\n\nReturns:\nClass object that defines the member.", "source": "codesearchnet"}
{"code": "def set_config_variables(repo, variables):\n    \n    with repo.config_writer() as writer:\n        for k, value in variables.items():\n            section, option = k.split('.')\n            writer.set_value(section, option, value)\n        writer.release()", "docstring": "Set config variables\n\nArgs:\nrepo (git.Repo): repo\nvariables (dict): entries of the form 'user.email': 'you@example.com'", "source": "juraj-google-style"}
{"code": "def set_integer(self, option, value):\n    try:\n        int_value = int(value)\n    except ValueError as err:\n        print(err.args)\n    self.options[option] = value", "docstring": "Set an integer option.\n\nArgs:\noption (str): name of option.\nvalue (int): value of the option.\n\nRaises:\nValueError: Value must be an integer.", "source": "codesearchnet"}
{"code": "def clone(self) -> 'Event':\n    return self.__class__(copy.deepcopy(self.event), copy.deepcopy(self.metadata))", "docstring": "Clone the event\n\nReturns:\n:class:`slack.events.Event`", "source": "codesearchnet"}
{"code": "def open(self, mode=None):\n    if (mode is None):\n        mode = self.mode\n    elif (mode not in ['r', 'w', 'a']):\n        raise ValueError(\"Invalid mode! Modes: ['a', 'r', 'w']\")\n    if (self._file is None):\n        self._file = h5py.File(self.path, mode=mode)", "docstring": "Open the container file.\n\nArgs:\nmode (str): Either 'r' for read-only, 'w' for truncate and write or\n'a' for append. (default: 'a').\nIf ``None``, uses ``self.mode``.", "source": "codesearchnet"}
{"code": "def _CheckParserCanProcessFileEntry(self, parser, file_entry):\n    \n    for filter_object in parser.FILTERS:\n      if filter_object.Match(file_entry):\n        return True\n\n    return False", "docstring": "Determines if a parser can process a file entry.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry.\nparser (BaseParser): parser.\n\nReturns:\nbool: True if the file entry can be processed by the parser object.", "source": "juraj-google-style"}
{"code": "def _name_search(cls, method, filters):\n        \n        filters = cls._get_name_filters(filters)\n        return [\n            cls.deserialize(cls._zeep_to_dict(row)) for row in method(filters)\n        ]", "docstring": "Helper for search methods that use name filters.\n\nArgs:\nmethod (callable): The Five9 API method to call with the name\nfilters.\nfilters (dict): A dictionary of search parameters, keyed by the\nname of the field to search. This should conform to the\nschema defined in :func:`five9.Five9.create_criteria`.\n\nReturns:\nlist[BaseModel]: A list of records representing the result.", "source": "juraj-google-style"}
{"code": "def divide(x1, x2, output_shape=None, name=None):\n  \n  output_shape = convert_to_shape(output_shape)\n  if not isinstance(x2, Tensor):\n    return ScalarMultiplyOperation(x1, 1.0 / x2).outputs[0]\n  with tf.name_scope(name, default_name=\"divide\"):\n    x1, x2 = binary_arguments_to_tensors(x1, x2)\n    return multiply(x1, reciprocal(x2), output_shape=output_shape)", "docstring": "Binary division with broadcasting.\n\nArgs:\nx1: a Tensor\nx2: a Tensor\noutput_shape: an optional Shape\nname: an optional string\nReturns:\na Tensor", "source": "juraj-google-style"}
{"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    deps = []\n    if not self._built:\n        self._build(tensor_shape.TensorShape(y_pred.shape))\n    if self.multi_label or self.label_weights is not None:\n        shapes = [(y_true, ('N', 'L'))]\n        if self.multi_label:\n            shapes.extend([(self.true_positives, ('T', 'L')), (self.true_negatives, ('T', 'L')), (self.false_positives, ('T', 'L')), (self.false_negatives, ('T', 'L'))])\n        if self.label_weights is not None:\n            shapes.append((self.label_weights, ('L',)))\n        deps = [check_ops.assert_shapes(shapes, message='Number of labels is not consistent.')]\n    label_weights = None if self.multi_label else self.label_weights\n    if self._from_logits:\n        y_pred = activations.sigmoid(y_pred)\n    with ops.control_dependencies(deps):\n        return metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives}, y_true, y_pred, self._thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, sample_weight=sample_weight, multi_label=self.multi_label, label_weights=label_weights)", "docstring": "Accumulates confusion matrix statistics.\n\nArgs:\ny_true: The ground truth values.\ny_pred: The predicted values.\nsample_weight: Optional weighting of each example. Defaults to 1. Can be a\n`Tensor` whose rank is either 0, or the same rank as `y_true`, and must\nbe broadcastable to `y_true`.\n\nReturns:\nUpdate op.", "source": "github-repos"}
{"code": "def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    encoder_hidden_states = encoder_outputs[0]\n    if encoder_attention_mask is None:\n        batch_size, sequence_length = encoder_hidden_states.shape[:2]\n        encoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    batch_size, sequence_length = decoder_input_ids.shape\n    if decoder_attention_mask is None:\n        decoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    if decoder_position_ids is None:\n        if past_key_values is not None:\n            raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.')\n        decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n    inputs = {'params': params or self.params}\n    if past_key_values:\n        inputs['cache'] = past_key_values\n        mutable = ['cache']\n    else:\n        mutable = False\n\n    def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):\n        decoder_module = module._get_decoder_module()\n        return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)\n    outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)\n    if past_key_values is not None and return_dict:\n        outputs, past = outputs\n        outputs['past_key_values'] = unfreeze(past['cache'])\n        return outputs\n    elif past_key_values is not None and (not return_dict):\n        outputs, past = outputs\n        outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> import jax.numpy as jnp\n>>> from transformers import AutoTokenizer, FlaxMarianMTModel\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"Helsinki-NLP/opus-mt-en-de\")\n>>> model = FlaxMarianMTModel.from_pretrained(\"Helsinki-NLP/opus-mt-en-de\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, max_length=64, return_tensors=\"jax\")\n>>> encoder_outputs = model.encode(**inputs)\n\n>>> decoder_start_token_id = model.config.decoder_start_token_id\n>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype=\"i4\") * decoder_start_token_id\n\n>>> outputs = model.decode(decoder_input_ids, encoder_outputs)\n>>> last_decoder_hidden_states = outputs.last_hidden_state\n```", "source": "github-repos"}
{"code": "def cmd_rollback(context):\n  \n  last_stable = get_versions(context, return_stable=True)\n  if len(last_stable) != 1:\n    fail(\"Didn't find a version marked stable for key: {} in env/service: {}/{}\".format(\n         context.key, context.env, context.service_name))\n  context.value = last_stable[0].value\n  context.commit_hash = last_stable[0].commit_hash\n  context.build_number = last_stable[0].build_number\n  context.location = last_stable[0].location\n  context.stable = True\n  cmd_set(context)", "docstring": "Roll back by finding the most recent \"stable\" tagged version, and putting it again, so that\nit's the new \"current\" version.\nArgs:\ncontext: a populated EFVersionContext object", "source": "juraj-google-style"}
{"code": "def solidity_resolve_address(hex_code, library_symbol, library_address):\n    if library_address.startswith('0x'):\n        raise ValueError('Address should not contain the 0x prefix')\n    try:\n        decode_hex(library_address)\n    except TypeError:\n        raise ValueError('library_address contains invalid characters, it must be hex encoded.')\n    if ((len(library_symbol) != 40) or (len(library_address) != 40)):\n        raise ValueError('Address with wrong length')\n    return hex_code.replace(library_symbol, library_address)", "docstring": "Change the bytecode to use the given library address.\n\nArgs:\nhex_code (bin): The bytecode encoded in hexadecimal.\nlibrary_name (str): The library that will be resolved.\nlibrary_address (str): The address of the library.\n\nReturns:\nbin: The bytecode encoded in hexadecimal with the library references\nresolved.", "source": "codesearchnet"}
{"code": "def parent_suite(self):\n    if (self.context and self.context.parent_suite_path):\n        return Suite.load(self.context.parent_suite_path)\n    return None", "docstring": "Get the current parent suite.\n\nA parent suite exists when a context within a suite is active. That is,\nduring execution of a tool within a suite, or after a user has entered\nan interactive shell in a suite context, for example via the command-\nline syntax 'tool +i', where 'tool' is an alias in a suite.\n\nReturns:\n`Suite` object, or None if there is no current parent suite.", "source": "codesearchnet"}
{"code": "def present(name, parent=None, vlan=None):\n    \n    ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}\n\n    \n    comment_bridge_created = 'Bridge {0} created.'.format(name)\n    comment_bridge_notcreated = 'Unable to create bridge: {0}.'.format(name)\n    comment_bridge_exists = 'Bridge {0} already exists.'.format(name)\n    comment_bridge_mismatch = ('Bridge {0} already exists, but has a different'\n                               ' parent or VLAN ID.').format(name)\n    changes_bridge_created = {name: {'old': 'Bridge {0} does not exist.'.format(name),\n                                     'new': 'Bridge {0} created'.format(name),\n                                     }\n                              }\n\n    bridge_exists = __salt__['openvswitch.bridge_exists'](name)\n    if bridge_exists:\n        current_parent = __salt__['openvswitch.bridge_to_parent'](name)\n        if current_parent == name:\n            current_parent = None\n        current_vlan = __salt__['openvswitch.bridge_to_vlan'](name)\n        if current_vlan == 0:\n            current_vlan = None\n\n    \n    if __opts__['test']:\n        if bridge_exists:\n            if current_parent == parent and current_vlan == vlan:\n                ret['result'] = True\n                ret['comment'] = comment_bridge_exists\n            else:\n                ret['result'] = False\n                ret['comment'] = comment_bridge_mismatch\n        else:\n            ret['result'] = None\n            ret['comment'] = comment_bridge_created\n\n        return ret\n\n    if bridge_exists:\n        if current_parent == parent and current_vlan == vlan:\n            ret['result'] = True\n            ret['comment'] = comment_bridge_exists\n        else:\n            ret['result'] = False\n            ret['comment'] = comment_bridge_mismatch\n    else:\n        bridge_create = __salt__['openvswitch.bridge_create'](\n            name, parent=parent, vlan=vlan)\n        if bridge_create:\n            ret['result'] = True\n            ret['comment'] = comment_bridge_created\n            ret['changes'] = changes_bridge_created\n        else:\n            ret['result'] = False\n            ret['comment'] = comment_bridge_notcreated\n\n    return ret", "docstring": "Ensures that the named bridge exists, eventually creates it.\n\nArgs:\nname: The name of the bridge.\nparent: The name of the parent bridge (if the bridge shall be created\nas a fake bridge). If specified, vlan must also be specified.\nvlan: The VLAN ID of the bridge (if the bridge shall be created as a\nfake bridge). If specified, parent must also be specified.", "source": "juraj-google-style"}
{"code": "def __init__(self):\n        \n        super(JLinkWatchpointInfo, self).__init__()\n        self.SizeOfStruct = ctypes.sizeof(self)", "docstring": "Initializes the ``JLinkWatchpointInfo`` instance.\n\nSets the size of the structure.\n\nArgs:\nself (JLinkWatchpointInfo): the ``JLinkWatchpointInfo`` instance\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def GetHasher(cls, hasher_name):\n    hasher_name = hasher_name.lower()\n    if (hasher_name not in cls._hasher_classes):\n        raise KeyError('hasher class not set for name: {0:s}.'.format(hasher_name))\n    hasher_class = cls._hasher_classes[hasher_name]\n    return hasher_class()", "docstring": "Retrieves an instance of a specific hasher.\n\nArgs:\nhasher_name (str): the name of the hasher to retrieve.\n\nReturns:\nBaseHasher: hasher.\n\nRaises:\nKeyError: if hasher class is not set for the corresponding name.", "source": "codesearchnet"}
{"code": "def plot_residuals(self, plot=None):\n    if (plot is None):\n        import matplotlib.pyplot as plot\n    x = numpy.arange(1, (len(self.residuals) + 1))\n    y = _gvar.mean(self.residuals)\n    yerr = _gvar.sdev(self.residuals)\n    plot.errorbar(x=x, y=y, yerr=yerr, fmt='o', color='b')\n    plot.ylabel('normalized residuals')\n    xr = [x[0], x[(- 1)]]\n    plot.plot([x[0], x[(- 1)]], [0, 0], 'r-')\n    plot.fill_between(x=xr, y1=[(- 1), (- 1)], y2=[1, 1], color='r', alpha=0.075)\n    return plot", "docstring": "Plot normalized fit residuals.\n\nThe sum of the squares of the residuals equals ``self.chi2``.\nIndividual residuals should be distributed about one, in\na Gaussian distribution.\n\nArgs:\nplot: :mod:`matplotlib` plotter. If ``None``, uses\n``matplotlib.pyplot`.\n\nReturns:\nPlotter ``plot``.", "source": "codesearchnet"}
{"code": "def delete_nsg(access_token, subscription_id, resource_group, nsg_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/networkSecurityGroups/', nsg_name, '?api-version=', NETWORK_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete network security group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nnsg_name (str): Name of the NSG.\n\nReturns:\nHTTP response.", "source": "codesearchnet"}
{"code": "def cluster_spec(self):\n    raise NotImplementedError()", "docstring": "Retrieve the current state of the cluster and return a `tf.train.ClusterSpec`.\n\nReturns:\nA `tf.train.ClusterSpec` representing the state of the cluster at the\nmoment this function is called.\n\nImplementors of this function must take care in ensuring that the\nClusterSpec returned is up-to-date at the time of calling this function.\nThis usually means retrieving the information from the underlying cluster\nmanagement system every time this function is invoked and reconstructing\na cluster_spec, rather than attempting to cache anything.", "source": "github-repos"}
{"code": "def set_topic(self, topic):\n    if (not topic):\n        topic = ''\n    result = self._connection.put(('room/%s' % self.id), {'room': {'topic': topic}})\n    if result['success']:\n        self._load()\n    return result['success']", "docstring": "Set the room topic.\n\nArgs:\ntopic (str): Topic\n\nReturns:\nbool. Success", "source": "codesearchnet"}
{"code": "def make_layer_stack(layers=gin.REQUIRED, num_layers=6):\n    return LayerStack(([cls() for cls in layers] * num_layers))", "docstring": "Configurable layer stack.\n\nArgs:\nlayers: a list of subclasses of TransformerLayer\nnum_layers: an integer\nReturns:\na LayerStack", "source": "codesearchnet"}
{"code": "def kaiser(x, beta):\n    if any_symbolic_tensors((x,)):\n        return Kaiser(beta).symbolic_call(x)\n    return backend.numpy.kaiser(x, beta)", "docstring": "Kaiser window function.\n\nThe Kaiser window is defined as:\n`w[n] = I0(beta * sqrt(1 - (2n / (N - 1) - 1)^2)) / I0(beta)`\nwhere I0 is the modified zeroth-order Bessel function of the first kind.\n\nArgs:\nx: Scalar or 1D Tensor. The window length.\nbeta: Float. Shape parameter for the Kaiser window.\n\nReturns:\nA 1D tensor containing the Kaiser window values.\n\nExample:\n>>> x = keras.ops.convert_to_tensor(5)\n>>> keras.ops.kaiser(x, beta=14.0)\narray([7.7268669e-06, 1.6493219e-01, 1.0000000e+00, 1.6493219e-01,\n7.7268669e-06], dtype=float32)", "source": "github-repos"}
{"code": "def add_event(self, event):\n    if not self._closed:\n        self._try_put(event)", "docstring": "Adds an event to the event file.\n\nArgs:\nevent: An `Event` protocol buffer.", "source": "github-repos"}
{"code": "def bessel_i1e(x, name=None):\n    with ops.name_scope(name, 'bessel_i1e', [x]):\n        return gen_special_math_ops.bessel_i1e(x)", "docstring": "Computes the Bessel i1e function of `x` element-wise.\n\nModified Bessel function of order 1.\n\n>>> tf.math.special.bessel_i1e([-1., -0.5, 0.5, 1.]).numpy()\narray([-0.20791042, -0.15642083,  0.15642083,  0.20791042], dtype=float32)\n\nArgs:\nx: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n`float32`, `float64`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.i1e\n@end_compatibility", "source": "github-repos"}
{"code": "def get_median(self):\n    return self._median_tracker.get()", "docstring": "Retrieves the current median value.\n\nReturns:\nfloat: The median of the values within the defined window. Returns `NaN`\nif the window is empty.", "source": "github-repos"}
{"code": "def get_unassigned_ports(self):\n    uri = '{}/unassignedPortsForPortMonitor'.format(self.data['uri'])\n    response = self._helper.do_get(uri)\n    return self._helper.get_members(response)", "docstring": "Gets the collection ports from the member interconnects\nwhich are eligible for assignment to an anlyzer port\n\nReturns:\ndict: Collection of ports", "source": "codesearchnet"}
{"code": "def from_task(cls, task):\n    target = cls(name=task.get_name(), params=task.get_param_string())\n    return target", "docstring": "Create a new target representing a task and its parameters\n\nArgs:\ntask: Task instance to create target for; the task class has to inherit\nfrom :class:`ozelot.tasks.TaskBase`.\n\nReturns:\nozelot.tasks.ORMTarget: a new target instance", "source": "codesearchnet"}
{"code": "def setup_lookup_table(self, hamiltonian='nearest-neighbour'):\n    expected_hamiltonian_values = ['nearest-neighbour', 'coordination_number']\n    if (hamiltonian not in expected_hamiltonian_values):\n        raise ValueError\n    self.lattice.jump_lookup_table = lookup_table.LookupTable(self.lattice, hamiltonian)", "docstring": "Create a jump-probability look-up table corresponding to the appropriate Hamiltonian.\n\nArgs:\nhamiltonian (Str, optional): String specifying the simulation Hamiltonian.\nvalid values are 'nearest-neighbour' (default) and 'coordination_number'.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def resize_attention_map(attentions: tf.Tensor, height: int, width: int, align_corners: bool=False) -> tf.Tensor:\n    scale = (height * width \n    if height > width:\n        feat_width = int(np.round(width / scale))\n        feat_height = shape_list(attentions)[2] \n    else:\n        feat_height = int(np.round(height / scale))\n        feat_width = shape_list(attentions)[2] \n    batch_size = shape_list(attentions)[0]\n    groups = shape_list(attentions)[1]\n    attentions = tf.reshape(attentions, (batch_size, groups, feat_height, feat_width))\n    attentions = tf.transpose(attentions, perm=(0, 2, 3, 1))\n    if align_corners:\n        attentions = tf.compat.v1.image.resize(attentions, size=(height, width), method='bilinear', align_corners=align_corners)\n    else:\n        attentions = tf.image.resize(attentions, size=(height, width), method='bilinear')\n    attentions = tf.transpose(attentions, perm=(0, 3, 1, 2))\n    return attentions", "docstring": "Args:\nattentions (`tf.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width]\nheight (`int`): height of the output attention map\nwidth (`int`): width of the output attention map\nalign_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`.\n\nReturns:\n`tf.Tensor`: resized attention map of shape [batch_size, groups, height, width]", "source": "github-repos"}
{"code": "def exists(self):\n    try:\n        info = self._api.tables_get(self._name_parts)\n    except google.datalab.utils.RequestException as e:\n        if (e.status == 404):\n            return False\n        raise e\n    except Exception as e:\n        raise e\n    self._info = info\n    return True", "docstring": "Checks if the table exists.\n\nReturns:\nTrue if the table exists; False otherwise.\nRaises:\nException if there was an error requesting information about the table.", "source": "codesearchnet"}
{"code": "def remove_checkpoint(checkpoint_prefix, checkpoint_format_version=saver_pb2.SaverDef.V2, meta_graph_suffix='meta'):\n    _delete_file_if_exists(meta_graph_filename(checkpoint_prefix, meta_graph_suffix))\n    if checkpoint_format_version == saver_pb2.SaverDef.V2:\n        _delete_file_if_exists(checkpoint_prefix + '.index')\n        _delete_file_if_exists(checkpoint_prefix + '.data-?????-of-?????')\n    else:\n        _delete_file_if_exists(checkpoint_prefix)", "docstring": "Removes a checkpoint given by `checkpoint_prefix`.\n\nArgs:\ncheckpoint_prefix: The prefix of a V1 or V2 checkpoint. Typically the result\nof `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of\nsharded/non-sharded or V1/V2.\ncheckpoint_format_version: `SaverDef.CheckpointFormatVersion`, defaults to\n`SaverDef.V2`.\nmeta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.", "source": "github-repos"}
{"code": "def get_ethernet_networks(self):\n    network_uris = self.data.get('networkUris')\n    networks = []\n    if network_uris:\n        for uri in network_uris:\n            networks.append(self._ethernet_networks.get_by_uri(uri))\n    return networks", "docstring": "Gets a list of associated ethernet networks of an uplink set.\n\nArgs:\nid_or_uri: Can be either the uplink set id or the uplink set uri.\n\nReturns:\nlist: Associated ethernet networks.", "source": "codesearchnet"}
{"code": "def error_print(msg, color=colorama.Fore.RED, file=sys.stderr):\n    if CLI_QUIET:\n        return\n    file.write('{sep}{bright}{color}Error: {normal}{msg}{sep}{reset}'.format(sep=_linesep_for_file(file), bright=colorama.Style.BRIGHT, color=color, normal=colorama.Style.NORMAL, msg=msg, reset=colorama.Style.RESET_ALL))\n    file.flush()", "docstring": "Print the error message to the file in the specified color.\n\nArgs:\nmsg: The error message to be printed.\ncolor: Optional colorama color string to be applied to the message. You can\nconcatenate colorama color strings together here, but note that style\nstrings will not be applied.\nfile: A file object to which the baracketed text will be written. Intended\nfor use with CLI output file objects, specifically sys.stderr.", "source": "codesearchnet"}
{"code": "def apply_actions(self, actions):\n    modified = []\n    for a in actions:\n        if ('dict' in a):\n            k = a['dict']\n            modified.append(k)\n            self.feffinp[k] = self.modify_object(a['action'], self.feffinp[k])\n        elif ('file' in a):\n            self.modify(a['action'], a['file'])\n        else:\n            raise ValueError('Unrecognized format: {}'.format(a))\n    if modified:\n        feff = self.feffinp\n        feff_input = '\\n\\n'.join((str(feff[k]) for k in ['HEADER', 'PARAMETERS', 'POTENTIALS', 'ATOMS'] if (k in feff)))\n        for (k, v) in six.iteritems(feff):\n            with open(os.path.join('.', k), 'w') as f:\n                f.write(str(v))\n        with open(os.path.join('.', 'feff.inp'), 'w') as f:\n            f.write(feff_input)", "docstring": "Applies a list of actions to the FEFF Input Set and rewrites modified\nfiles.\n\nArgs:\nactions [dict]: A list of actions of the form {'file': filename,\n'action': moddermodification} or {'dict': feffinput_key,\n'action': moddermodification}", "source": "codesearchnet"}
{"code": "def _send_receive(self, payload):\n        \n        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n        s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n        s.settimeout(self.connection_timeout)\n        s.connect((self.address, self.port))\n        s.send(payload)\n        data = s.recv(1024)\n        s.close()\n        return data", "docstring": "Send single buffer `payload` and receive a single buffer.\n\nArgs:\npayload(bytes): Data to send.", "source": "juraj-google-style"}
{"code": "def get_dimensions(js_dict, naming):\n    \n\n    dimensions = []\n    dim_names = []\n    if check_version_2(js_dict):\n        dimension_dict = js_dict\n    else:\n        dimension_dict = js_dict['dimension']\n    for dim in dimension_dict['id']:\n        dim_name = js_dict['dimension'][dim]['label']\n        if not dim_name:\n            dim_name = dim\n        if naming == 'label':\n            dim_label = get_dim_label(js_dict, dim)\n            dimensions.append(dim_label)\n            dim_names.append(dim_name)\n        else:\n            dim_index = get_dim_index(js_dict, dim)\n            dimensions.append(dim_index)\n            dim_names.append(dim)\n    return dimensions, dim_names", "docstring": "Get dimensions from input data.\n\nArgs:\njs_dict (dict): dictionary containing dataset data and metadata.\nnaming (string, optional): dimension naming. Possible values: 'label' \\\nor 'id'.\n\nReturns:\ndimensions (list): list of pandas data frames with dimension \\\ncategory data.\ndim_names (list): list of strings with dimension names.", "source": "juraj-google-style"}
{"code": "def __init__(self, output_mediator):\n    \n    super(DynamicFieldsHelper, self).__init__()\n    self._output_mediator = output_mediator", "docstring": "Initializes a dynamic fields helper.\n\nArgs:\noutput_mediator (OutputMediator): output mediator.", "source": "juraj-google-style"}
{"code": "def parse(self) -> bool:\n    self.skip_ws()\n    res = self._feature_disj()\n    self.skip_ws()\n    if (not self.at_end()):\n        raise InvalidFeatureExpression(self)\n    return res", "docstring": "Parse and evaluate a complete feature expression.\n\nRaises:\nInvalidFeatureExpression: If the if-feature expression is not\nsyntactically correct.\nUnknownPrefix: If a prefix of a feature name is not declared.", "source": "codesearchnet"}
{"code": "def sheetNames(book=None):\n    if book:\n        if (not (book.lower() in [x.lower() for x in bookNames()])):\n            return False\n    else:\n        book = activeBook()\n    if (not book):\n        return False\n    poBook = PyOrigin.WorksheetPages(book)\n    if (not len(poBook)):\n        return None\n    return [x.GetName() for x in poBook.Layers()]", "docstring": "return sheet names of a book.\n\nArgs:\nbook (str, optional): If a book is given, pull names from\nthat book. Otherwise, try the active one\n\nReturns:\nlist of sheet names (typical case).\nNone if book has no sheets.\nFalse if book doesn't exlist.", "source": "codesearchnet"}
{"code": "def get_kinds(start=None, end=None):\n    q = Kind.query()\n    if ((start is not None) and (start != '')):\n        q = q.filter((Kind.key >= Kind.key_for_kind(start)))\n    if (end is not None):\n        if (end == ''):\n            return []\n        q = q.filter((Kind.key < Kind.key_for_kind(end)))\n    return [x.kind_name for x in q]", "docstring": "Return all kinds in the specified range, for the current namespace.\n\nArgs:\nstart: only return kinds >= start if start is not None.\nend: only return kinds < end if end is not None.\n\nReturns:\nA list of kind names between the (optional) start and end values.", "source": "codesearchnet"}
{"code": "def _remove_hidden_parts(projected_surface):\n    surface = np.copy(projected_surface)\n    surface[(~ _make_occlusion_mask(projected_surface))] = np.nan\n    return surface", "docstring": "Removes parts of a projected surface that are not visible.\n\nArgs:\nprojected_surface (surface): the surface to use\n\nReturns:\nsurface: A projected surface.", "source": "codesearchnet"}
{"code": "def get_unstable_entries(self, charge_to_discharge=True):\n    list_copy = list(self._unstable_entries)\n    return (list_copy if charge_to_discharge else list_copy.reverse())", "docstring": "Returns the unstable entries for the electrode.\n\nArgs:\ncharge_to_discharge: Order from most charge to most discharged\nstate? Defaults to True.\n\nReturns:\nA list of unstable entries in the electrode, ordered by amount of\nthe working ion.", "source": "codesearchnet"}
{"code": "def get_tag_hash(self, tag_name):\n        \n        tag_object = get_single_item_from_sequence(\n            sequence=self._github_repository.tags(),\n            condition=lambda tag: tag.name == tag_name,\n            no_item_error_message='No tag \"{}\" exist'.format(tag_name),\n            too_many_item_error_message='Too many tags \"{}\" found'.format(tag_name),\n        )\n\n        return tag_object.commit.sha", "docstring": "Fetch the commit hash that was tagged with ``tag_name``.\n\nArgs:\ntag_name (str): the name of the tag\n\nReturns:\nstr: the commit hash linked by the tag", "source": "juraj-google-style"}
{"code": "def __init__(self, global_rate_limit_qps: int, latency_per_request: float, max_concurrent_requests: int, use_metrics: bool):\n    self._rate_limit = global_rate_limit_qps\n    self._latency_per_request = datetime.timedelta(seconds=latency_per_request)\n    self._num_shards = max(1, min(int(self._rate_limit * self._latency_per_request.total_seconds()), max_concurrent_requests))\n    self.use_metrics = use_metrics", "docstring": "Creates a RateLimit object.\n\nglobal_rate_limit_qps and latency_per_request are used to determine how the\ndata should be sharded via:\nglobal_rate_limit_qps * latency_per_request.total_seconds()\n\nFor example, global_rate_limit_qps = 500 and latency_per_request=.5 seconds.\nThen the data will be sharded into 500*.5=250 groups.  Each group can be\nprocessed in parallel and will call the 'process' function at most once\nevery latency_per_request.\n\nIt is important to note that the max QPS may not be reach based on how many\nworkers are scheduled.\n\nArgs:\nglobal_rate_limit_qps: QPS to rate limit requests across all workers to.\nlatency_per_request: The expected latency per request.\nmax_concurrent_requests: Maximum allowed concurrent api requests to EE.", "source": "github-repos"}
{"code": "def fetch_friends(self, user):\n    if USING_ALLAUTH:\n        raise NotImplementedError('VKontakte support is not implemented for django-allauth')\n    else:\n        social_auth_backend = VKOAuth2Backend()\n        tokens = social_auth_backend.tokens(user)\n        oauth_token = tokens['access_token']\n    api = vkontakte.API(token=oauth_token)\n    return api.get('friends.get')", "docstring": "fethces friends from VKontakte using the access_token\nfethched by django-social-auth.\n\nNote - user isn't a user - it's a UserSocialAuth if using social auth, or a SocialAccount if using allauth\n\nReturns:\ncollection of friend objects fetched from VKontakte", "source": "codesearchnet"}
{"code": "def load_checkpoint(model, filename, map_location=None, strict=False, logger=None):\n    if filename.startswith('modelzoo:\n        import torchvision\n        model_urls = dict()\n        for (_, name, ispkg) in pkgutil.walk_packages(torchvision.models.__path__):\n            if (not ispkg):\n                _zoo = import_module('torchvision.models.{}'.format(name))\n                _urls = getattr(_zoo, 'model_urls')\n                model_urls.update(_urls)\n        model_name = filename[11:]\n        checkpoint = model_zoo.load_url(model_urls[model_name])\n    elif filename.startswith('open-mmlab:\n        model_name = filename[13:]\n        checkpoint = model_zoo.load_url(open_mmlab_model_urls[model_name])\n    elif filename.startswith(('http:\n        checkpoint = model_zoo.load_url(filename)\n    else:\n        if (not osp.isfile(filename)):\n            raise IOError('{} is not a checkpoint file'.format(filename))\n        checkpoint = torch.load(filename, map_location=map_location)\n    if isinstance(checkpoint, OrderedDict):\n        state_dict = checkpoint\n    elif (isinstance(checkpoint, dict) and ('state_dict' in checkpoint)):\n        state_dict = checkpoint['state_dict']\n    else:\n        raise RuntimeError('No state_dict found in checkpoint file {}'.format(filename))\n    if list(state_dict.keys())[0].startswith('module.'):\n        state_dict = {k[7:]: v for (k, v) in checkpoint['state_dict'].items()}\n    if hasattr(model, 'module'):\n        load_state_dict(model.module, state_dict, strict, logger)\n    else:\n        load_state_dict(model, state_dict, strict, logger)\n    return checkpoint", "docstring": "Load checkpoint from a file or URI.\n\nArgs:\nmodel (Module): Module to load checkpoint.\nfilename (str): Either a filepath or URL or modelzoo://xxxxxxx.\nmap_location (str): Same as :func:`torch.load`.\nstrict (bool): Whether to allow different params for the model and\ncheckpoint.\nlogger (:mod:`logging.Logger` or None): The logger for error message.\n\nReturns:\ndict or OrderedDict: The loaded checkpoint.", "source": "codesearchnet"}
{"code": "def _CreatePerformanceTarget(client, campaign_group_id):\n  \n  \n  cgpt_service = client.GetService('CampaignGroupPerformanceTargetService',\n                                   version='v201809')\n\n  \n  operations = [{\n      'operator': 'ADD',\n      \n      'operand': {\n          'campaignGroupId': campaign_group_id,\n          'performanceTarget': {\n              \n              'efficiencyTargetType': 'CPC_LESS_THAN_OR_EQUAL_TO',\n              'efficiencyTargetValue': 3000000,\n              \n              'spendTargetType': 'MAXIMUM',\n              'spendTarget': {\n                  'microAmount': 500000000\n              },\n              \n              'volumeGoalType': 'MAXIMIZE_CLICKS',\n              'volumeTargetValue': 3000,\n              \n              \n              'startDate': datetime.datetime.now().strftime('%Y%m%d'),\n              'endDate': (datetime.datetime.now() +\n                          datetime.timedelta(90)).strftime('%Y%m%d')\n          }\n      }\n  }]\n\n  cgpt = cgpt_service.mutate(operations)['value'][0]\n\n  \n  print ('Campaign performance target with ID \"%d\" was added for campaign '\n         'group ID \"%d\".' % (cgpt['id'], cgpt['campaignGroupId']))", "docstring": "Creates a performance target for the campaign group.\n\nArgs:\nclient: an AdWordsClient instance.\ncampaign_group_id: an integer ID for the campaign group.", "source": "juraj-google-style"}
{"code": "def __mul__(self, other: 'TensorFluent') -> 'TensorFluent':\n        \n        return self._binary_op(self, other, tf.multiply, tf.float32)", "docstring": "Returns a TensorFluent for the multiplication arithmetic operator.\n\nArgs:\nself: The first operand.\nother: The second operand.\n\nReturns:\nA TensorFluent wrapping the operator's output.", "source": "juraj-google-style"}
{"code": "def call_each(seq):\n    try:\n        reduce((lambda _, y: y()), seq)\n    except TypeError as e:\n        if (text_type(e) != 'reduce() of empty sequence with no initial value'):\n            raise", "docstring": "Calls each element of sequence to invoke the side effect.\n\nArgs:\nseq:\n\nReturns: None", "source": "codesearchnet"}
{"code": "def _credentials_found_in_envars():\n    return any([os.getenv('PAN_ACCESS_TOKEN'), os.getenv('PAN_CLIENT_ID'), os.getenv('PAN_CLIENT_SECRET'), os.getenv('PAN_REFRESH_TOKEN')])", "docstring": "Check for credentials in envars.\n\nReturns:\nbool: ``True`` if at least one is found, otherwise ``False``.", "source": "codesearchnet"}
{"code": "def CheckMySQLConnection(db_options):\n    for tries_left in range(_MYSQL_MAX_RETRIES, (- 1), (- 1)):\n        try:\n            connection_options = dict(host=db_options['Mysql.host'], port=db_options['Mysql.port'], db=db_options['Mysql.database_name'], user=db_options['Mysql.database_username'], passwd=db_options['Mysql.database_password'], charset='utf8')\n            ssl_enabled = ('Mysql.client_key_path' in db_options)\n            if ssl_enabled:\n                connection_options['ssl'] = {'key': db_options['Mysql.client_key_path'], 'cert': db_options['Mysql.client_cert_path'], 'ca': db_options['Mysql.ca_cert_path']}\n            connection = MySQLdb.connect(**connection_options)\n            if ssl_enabled:\n                cursor = connection.cursor()\n                cursor.execute(\"SHOW VARIABLES LIKE 'have_ssl'\")\n                res = cursor.fetchone()\n                if ((res[0] == 'have_ssl') and (res[1] == 'YES')):\n                    print('SSL enabled successfully.')\n                else:\n                    print('Unable to establish SSL connection to MySQL.')\n                    return False\n            return True\n        except MySQLdb.OperationalError as mysql_op_error:\n            if (len(mysql_op_error.args) < 2):\n                print(('Unexpected exception type received from MySQL. %d attempts left: %s' % (tries_left, mysql_op_error)))\n                time.sleep(_MYSQL_RETRY_WAIT_SECS)\n                continue\n            if (mysql_op_error.args[0] == mysql_conn_errors.CONNECTION_ERROR):\n                print(('Failed to connect to MySQL. Is it running? %d attempts left.' % tries_left))\n            elif (mysql_op_error.args[0] == mysql_conn_errors.UNKNOWN_HOST):\n                print('Unknown-hostname error encountered while trying to connect to MySQL.')\n                return False\n            elif (mysql_op_error.args[0] == general_mysql_errors.BAD_DB_ERROR):\n                return True\n            elif (mysql_op_error.args[0] in (general_mysql_errors.ACCESS_DENIED_ERROR, general_mysql_errors.DBACCESS_DENIED_ERROR)):\n                print(('Permission error encountered while trying to connect to MySQL: %s' % mysql_op_error))\n                return False\n            else:\n                print(('Unexpected operational error encountered while trying to connect to MySQL. %d attempts left: %s' % (tries_left, mysql_op_error)))\n        except MySQLdb.Error as mysql_error:\n            print(('Unexpected error encountered while trying to connect to MySQL. %d attempts left: %s' % (tries_left, mysql_error)))\n        time.sleep(_MYSQL_RETRY_WAIT_SECS)\n    return False", "docstring": "Checks whether a connection can be established to MySQL.\n\nArgs:\ndb_options: A dict mapping GRR MySQL config options to their values.\n\nReturns:\nA boolean indicating whether a connection could be made to a MySQL server\ninstance with the given options.", "source": "codesearchnet"}
{"code": "def ReadDataAtOffset(self, file_offset, size):\n    \n    self._file_object.seek(file_offset, os.SEEK_SET)\n    return self._file_object.read(size)", "docstring": "Reads a byte string from the file-like object at a specific offset.\n\nArgs:\nfile_offset (int): file offset.\nsize (int): number of bytes to read.\n\nReturns:\nbytes: data read.\n\nRaises:\nIOError: if the read failed.\nOSError: if the read failed.", "source": "juraj-google-style"}
{"code": "def _manual_repartition(self, axis, repartition_func, **kwargs):\n    func = self._prepare_method(repartition_func, **kwargs)\n    return self.data.manual_shuffle(axis, func)", "docstring": "This method applies all manual partitioning functions.\n\nArgs:\naxis: The axis to shuffle data along.\nrepartition_func: The function used to repartition data.\n\nReturns:\nA `BaseFrameManager` object.", "source": "codesearchnet"}
{"code": "def __init__(self,\n                 solution_size,\n                 population_size=20):\n        \n        super(_RandomOptimizer, self).__init__(solution_size, population_size)", "docstring": "Create an object that optimizes a given fitness function with random strings.\n\nArgs:\nsolution_size: The number of bits in every solution.\npopulation_size: The number of solutions in every iteration.", "source": "juraj-google-style"}
{"code": "def moments_of_masked_time_series(time_series_tensor, broadcast_mask):\n    num_unmasked_entries = tf.cast(tf.reduce_sum(input_tensor=tf.cast((~ broadcast_mask), tf.int32), axis=(- 1)), time_series_tensor.dtype)\n    mean = (tf.reduce_sum(input_tensor=tf.where(broadcast_mask, tf.zeros_like(time_series_tensor), time_series_tensor), axis=(- 1)) / num_unmasked_entries)\n    variance = (tf.reduce_sum(input_tensor=tf.where(broadcast_mask, tf.zeros_like(time_series_tensor), ((time_series_tensor - mean[(..., tf.newaxis)]) ** 2)), axis=(- 1)) / num_unmasked_entries)\n    return (mean, variance)", "docstring": "Compute mean and variance, accounting for a mask.\n\nArgs:\ntime_series_tensor: float `Tensor` time series of shape\n`concat([batch_shape, [num_timesteps]])`.\nbroadcast_mask: bool `Tensor` of the same shape as `time_series`.\nReturns:\nmean: float `Tensor` of shape `batch_shape`.\nvariance: float `Tensor` of shape `batch_shape`.", "source": "codesearchnet"}
{"code": "def _create_split(last_client_key, next_client_key, query):\n    if not (last_client_key or next_client_key):\n        return query\n    split_query = query.clone()\n    filters = list(split_query.filters)\n    if last_client_key:\n        filters.append((KEY_PROPERTY_NAME, '>=', last_client_key))\n    if next_client_key:\n        filters.append((KEY_PROPERTY_NAME, '<', next_client_key))\n    split_query.filters = filters\n    return split_query", "docstring": "Create a new {@link Query} given the query and range.\n\nArgs:\nlast_client_key: the previous key. If null then assumed to be the beginning.\nnext_client_key: the next key. If null then assumed to be the end.\nquery: query to base the split query on.\n\nReturns:\nA split query with fetches entities in the range [last_key, next_client_key)", "source": "github-repos"}
{"code": "def start_server(self):\n    persists_shell_cmd = self._get_persisting_command()\n    self.log.debug('Snippet server for package %s is using protocol %d.%d', self.package, _PROTOCOL_MAJOR_VERSION, _PROTOCOL_MINOR_VERSION)\n    option_str = self._get_instrument_options_str()\n    cmd = _LAUNCH_CMD.format(shell_cmd=persists_shell_cmd, user=self._get_user_command_string(), snippet_package=self.package, instrument_options=option_str)\n    self._proc = self._run_adb_cmd(cmd)\n    self._server_start_stdout = []\n    line = self._read_protocol_line()\n    match = re.match('^SNIPPET START, PROTOCOL ([0-9]+) ([0-9]+)$', line)\n    if not match or int(match.group(1)) != _PROTOCOL_MAJOR_VERSION:\n        raise errors.ServerStartProtocolError(self._device, line)\n    line = self._read_protocol_line()\n    match = re.match('^SNIPPET SERVING, PORT ([0-9]+)$', line)\n    if not match:\n        message = _SNIPPET_SERVER_START_ERROR_DEBUG_TIP.format(instrumentation_result=line, server_start_stdout='\\n'.join(self._server_start_stdout))\n        raise errors.ServerStartProtocolError(self._device, message)\n    self.device_port = int(match.group(1))", "docstring": "Starts the server on the remote device.\n\nThis function starts the snippet server with adb command, checks the\nprotocol version of the server, parses device port from the server\noutput and sets it to self.device_port.\n\nRaises:\nerrors.ServerStartProtocolError: if the protocol reported by the server\nstartup process is unknown.\nerrors.ServerStartError: if failed to start the server or process the\nserver output.", "source": "github-repos"}
{"code": "def __init__(self, name, constants=None):\n        \n        if not constants:\n            constants = ParsedName.constants\n\n        if isinstance(name, HumanName):\n            self._parsed_name = name\n        else:\n            self._parsed_name = HumanName(name, constants=constants)\n            self._parsed_name.capitalize()", "docstring": "Create a ParsedName instance.\n\nArgs:\nname (Union[str, HumanName]): The name to be parsed (must be non empty nor None).\nconstants (:class:`nameparser.config.Constants`): Configuration for `HumanName` instantiation.\n(Can be None, if provided it overwrites the default one generated in\n:method:`prepare_nameparser_constants`.)", "source": "juraj-google-style"}
{"code": "def sample(self, bqm, num_reads=10):\n    values = tuple(bqm.vartype.value)\n\n    def _itersample():\n        for __ in range(num_reads):\n            sample = {v: choice(values) for v in bqm.linear}\n            energy = bqm.energy(sample)\n            (yield (sample, energy))\n    (samples, energies) = zip(*_itersample())\n    return SampleSet.from_samples(samples, bqm.vartype, energies)", "docstring": "Give random samples for a binary quadratic model.\n\nVariable assignments are chosen by coin flip.\n\nArgs:\nbqm (:obj:`.BinaryQuadraticModel`):\nBinary quadratic model to be sampled from.\n\nnum_reads (int, optional, default=10):\nNumber of reads.\n\nReturns:\n:obj:`.SampleSet`", "source": "codesearchnet"}
{"code": "def get_graph_token_from_msi():\n    if (('ACC_CLOUD' in os.environ) and ('MSI_ENDPOINT' in os.environ)):\n        endpoint = os.environ['MSI_ENDPOINT']\n    else:\n        return None\n    headers = {'Metadata': 'true'}\n    body = {'resource': (('https:\n    ret = requests.post(endpoint, headers=headers, data=body)\n    return ret.json()['access_token']", "docstring": "get a Microsoft Graph access token using Azure Cloud Shell's MSI_ENDPOINT.\n\nNotes:\nThe auth token returned by this function is not an Azure auth token. Use it for querying\nthe Microsoft Graph API.\nThis function only works in an Azure cloud shell or virtual machine.\n\nReturns:\nA Microsoft Graph authentication token string.", "source": "codesearchnet"}
{"code": "def empty(shape, dtype=None, **kwargs):\n    data = np.empty(shape, dtype)\n    return dc.array(data, **kwargs)", "docstring": "Create an array of given shape and type, without initializing entries.\n\nArgs:\nshape (sequence of ints): 2D shape of the array.\ndtype (data-type, optional): Desired data-type for the array.\nkwargs (optional): Other arguments of the array (*coords, attrs, and name).\n\nReturns:\narray (decode.array): Decode array without initializing entries.", "source": "codesearchnet"}
{"code": "def year_month_day_to_ordinal(year, month, day):\n    with tf.compat.v1.name_scope(None, 'ymd2o', [year, month, day]):\n        year = tf.convert_to_tensor(year, tf.int32, name='year')\n        month = tf.convert_to_tensor(month, tf.int32, name='month')\n        day = tf.convert_to_tensor(day, tf.int32, name='day')\n        year -= tf.compat.v2.where(month <= 2, 1, 0)\n        month += tf.compat.v2.where(month > 2, -3, 9)\n        era = year \n        year_of_era = year % _YEARS_IN_ERA\n        day_of_year = _days_in_year_before_month(month) + day - 1\n        day_of_era = year_of_era * _DAYS_IN_YEAR + year_of_era \n        return era * _DAYS_IN_ERA + day_of_era + _ORDINAL_OF_1_3_0000", "docstring": "Calculates ordinals Tensor given years, months and dates.\n\nArgs:\nyear: Tensor of int32 type. Elements should be positive.\nmonth: Tensor of int32 type of same shape as `year`. Elements should be in\nrange `[1, 12]`.\nday: Tensor of int32 type of same shape as `year`. Elements should be in\nrange `[1, 31]` and represent valid dates together with corresponding\nelements of `month` and `year` Tensors.\n\nReturns:\nTensor of int32 type. Each element is number of days since 1 Jan 0001. 1 Jan\n0001 has `ordinal = 1`.", "source": "github-repos"}
{"code": "def get_sentence(self, offset: int) -> BioCSentence or None:\n        \n        for sentence in self.sentences:\n            if sentence.offset == offset:\n                return sentence\n        return None", "docstring": "Gets sentence with specified offset\n\nArgs:\noffset: sentence offset\n\nReturn:\nthe sentence with specified offset", "source": "juraj-google-style"}
{"code": "def extract_string_pairs_in_dir(directory, exclude_dirs, special_ui_components_prefix):\n    \n    result = []\n    for ib_file_path in find_files(directory, [\".xib\", \".storyboard\"], exclude_dirs):\n        result += extract_string_pairs_in_ib_file(ib_file_path, special_ui_components_prefix)\n\n    return result", "docstring": "Extract string pairs in the given directory's xib/storyboard files.\n\nArgs:\ndirectory (str): The path to the directory.\nexclude_dirs (str): A list of directories to exclude from extraction.\nspecial_ui_components_prefix (str):\nIf not None, extraction will not warn about internationalized UI components with this class prefix.\n\nReturns:\nlist: The extracted string pairs for all IB files in the directory.", "source": "juraj-google-style"}
{"code": "def remove(self, force=False):\n        \n        return self.client.api.remove_plugin(self.name, force=force)", "docstring": "Remove the plugin from the server.\n\nArgs:\nforce (bool): Remove even if the plugin is enabled.\nDefault: False\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def add_scope(scope=None, scope_fn=None):\n  \n  def decorator(f):\n\n    @functools.wraps(f)\n    def decorated(*args, **kwargs):\n      name = kwargs.pop(\"name\", None)  \n      with scope_fn(name or scope or f.__name__):\n        return f(*args, **kwargs)\n\n    return decorated\n\n  return decorator", "docstring": "Return a decorator which add a TF name/variable scope to a function.\n\nNote that the function returned by the decorator accept an additional 'name'\nparameter, which can overwrite the name scope given when the function is\ncreated.\n\nArgs:\nscope (str): name of the scope. If None, the function name is used.\nscope_fn (fct): Either tf.name_scope or tf.variable_scope\n\nReturns:\nfct: the add_scope decorator", "source": "juraj-google-style"}
{"code": "def _process_new(self, feed_item):\n    campaign = self.campaign_dao.get(feed_item, required=True)\n    feed_item[FieldMap.CAMPAIGN_ID] = campaign['id']\n    feed_item[FieldMap.CAMPAIGN_NAME] = campaign['name']\n    return {'advertiserId': feed_item.get(FieldMap.ADVERTISER_ID, None), 'campaignId': campaign['id'] if campaign else None, 'siteId': feed_item.get(FieldMap.SITE_ID, None), 'name': feed_item.get(FieldMap.PLACEMENT_GROUP_NAME, None), 'placementGroupType': feed_item.get(FieldMap.PLACEMENT_GROUP_TYPE, None), 'pricingSchedule': {'startDate': feed_item.get(FieldMap.PLACEMENT_GROUP_START_DATE, None), 'endDate': feed_item.get(FieldMap.PLACEMENT_GROUP_END_DATE, None), 'pricingType': feed_item.get(FieldMap.PLACEMENT_GROUP_PRICING_TYPE, None)}}", "docstring": "Creates a new placement group DCM object from a feed item representing a placement group from the Bulkdozer feed.\n\nThis function simply creates the object to be inserted later by the BaseDAO\nobject.\n\nArgs:\nfeed_item: Feed item representing the placement group from the Bulkdozer\nfeed.\n\nReturns:\nA placement group object ready to be inserted in DCM through the API.", "source": "github-repos"}
{"code": "def _ParseHeader(self, parser_mediator, structure):\n    (_, month, day, hours, minutes, seconds, year) = structure.date_time\n    month = timelib.MONTH_DICT.get(month.lower(), 0)\n    time_elements_tuple = (year, month, day, hours, minutes, seconds)\n    try:\n        date_time = dfdatetime_time_elements.TimeElements(time_elements_tuple=time_elements_tuple)\n        date_time.is_local_time = True\n    except ValueError:\n        parser_mediator.ProduceExtractionWarning('invalid date time value: {0!s}'.format(structure.date_time))\n        return\n    self._last_month = month\n    event_data = XChatLogEventData()\n    if (structure.log_action[0] == 'BEGIN'):\n        self._xchat_year = year\n        event_data.text = 'XChat start logging'\n    elif (structure.log_action[0] == 'END'):\n        self._xchat_year = None\n        event_data.text = 'XChat end logging'\n    else:\n        logger.debug('Unknown log action: {0:s}.'.format(' '.join(structure.log_action)))\n        return\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ADDED, time_zone=parser_mediator.timezone)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a log header.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.", "source": "codesearchnet"}
{"code": "def _Open(self, path_spec, mode='rb'):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(path_spec)\n\n    encryption_method = getattr(path_spec, 'encryption_method', None)\n    if not encryption_method:\n      raise errors.PathSpecError(\n          'Unsupported path specification without encryption method.')\n\n    self._encryption_method = encryption_method", "docstring": "Opens the file system defined by path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\nmode (Optional[str]): file access mode. The default is 'rb' which\nrepresents read-only binary.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file system could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def sample(self, signum, frame):  \n        \n        stack = []\n        while frame and frame != self.base_frame:\n            stack.append((\n                frame.f_code.co_name,\n                frame.f_code.co_filename,\n                frame.f_code.co_firstlineno))\n            frame = frame.f_back\n        self._stats[tuple(stack)] += 1\n        signal.setitimer(signal.ITIMER_PROF, _SAMPLE_INTERVAL)", "docstring": "Samples current stack and adds result in self._stats.\n\nArgs:\nsignum: Signal that activates handler.\nframe: Frame on top of the stack when signal is handled.", "source": "juraj-google-style"}
{"code": "def wind_speed(self, value=999.0):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `wind_speed`'.format(value))\n            if value < 0.0:\n                raise ValueError('value need to be greater or equal 0.0 '\n                                 'for field `wind_speed`')\n            if value > 40.0:\n                raise ValueError('value need to be smaller 40.0 '\n                                 'for field `wind_speed`')\n\n        self._wind_speed = value", "docstring": "Corresponds to IDD Field `wind_speed`\n\nArgs:\nvalue (float): value for IDD Field `wind_speed`\nUnit: m/s\nvalue >= 0.0\nvalue <= 40.0\nMissing value: 999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def distance(p_a, p_b):\n    \n    return sqrt((p_a.lat - p_b.lat) ** 2 + (p_a.lon - p_b.lon) ** 2)", "docstring": "Euclidean distance, between two points\n\nArgs:\np_a (:obj:`Point`)\np_b (:obj:`Point`)\nReturns:\nfloat: distance, in degrees", "source": "juraj-google-style"}
{"code": "def command(self, verb, args=None):\n    if self.__generating:\n        raise NNTPSyncError('Command issued while a generator is active')\n    cmd = verb\n    if args:\n        cmd += (' ' + args)\n    cmd += '\\r\\n'\n    self.socket.sendall(cmd)\n    try:\n        (code, message) = self.status()\n    except NNTPTemporaryError as e:\n        if (e.code() != 480):\n            raise e\n        (code, message) = self.command('AUTHINFO USER', self.username)\n        if (code == 381):\n            (code, message) = self.command('AUTHINFO PASS', self.password)\n        if (code != 281):\n            raise NNTPReplyError(code, message)\n        (code, message) = self.command(verb, args)\n    return (code, message)", "docstring": "Call a command on the server.\n\nIf the user has not authenticated then authentication will be done\nas part of calling the command on the server.\n\nFor commands that don't return a status message the status message\nwill default to an empty string.\n\nArgs:\nverb: The verb of the command to call.\nargs: The arguments of the command as a string (default None).\n\nReturns:\nA tuple of status code (as an integer) and status message.\n\nNote:\nYou can run raw commands by supplying the full command (including\nargs) in the verb.\n\nNote: Although it is possible you shouldn't issue more than one command\nat a time by adding newlines to the verb as it will most likely lead\nto undesirable results.", "source": "codesearchnet"}
{"code": "def _reduce_pseudo_inverse(nodes):\n    (_, num_nodes) = np.shape(nodes)\n    if (num_nodes == 2):\n        reduction = _REDUCTION0\n        denom = _REDUCTION_DENOM0\n    elif (num_nodes == 3):\n        reduction = _REDUCTION1\n        denom = _REDUCTION_DENOM1\n    elif (num_nodes == 4):\n        reduction = _REDUCTION2\n        denom = _REDUCTION_DENOM2\n    elif (num_nodes == 5):\n        reduction = _REDUCTION3\n        denom = _REDUCTION_DENOM3\n    else:\n        raise _helpers.UnsupportedDegree((num_nodes - 1), supported=(1, 2, 3, 4))\n    result = _helpers.matrix_product(nodes, reduction)\n    result /= denom\n    return result", "docstring": "Performs degree-reduction for a B |eacute| zier curve.\n\nDoes so by using the pseudo-inverse of the degree elevation\noperator (which is overdetermined).\n\n.. note::\n\nThere is also a Fortran implementation of this function, which\nwill be used if it can be built.\n\nArgs:\nnodes (numpy.ndarray): The nodes in the curve.\n\nReturns:\nnumpy.ndarray: The reduced nodes.\n\nRaises:\n.UnsupportedDegree: If the degree is not 1, 2, 3 or 4.", "source": "codesearchnet"}
{"code": "def log10(x):\n    if any_symbolic_tensors((x,)):\n        return Log10().symbolic_call(x)\n    return backend.numpy.log10(x)", "docstring": "Return the base 10 logarithm of the input tensor, element-wise.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput tensor, element-wise base 10 logarithm of `x`.", "source": "github-repos"}
{"code": "def create_border(self, border_style_type):\n        \n        if border_style_type == MenuBorderStyleType.ASCII_BORDER:\n            return self.create_ascii_border()\n        elif border_style_type == MenuBorderStyleType.LIGHT_BORDER:\n            return self.create_light_border()\n        elif border_style_type == MenuBorderStyleType.HEAVY_BORDER:\n            return self.create_heavy_border()\n        elif border_style_type == MenuBorderStyleType.DOUBLE_LINE_BORDER:\n            return self.create_doubleline_border()\n        elif border_style_type == MenuBorderStyleType.HEAVY_OUTER_LIGHT_INNER_BORDER:\n            return self.create_heavy_outer_light_inner_border()\n        elif border_style_type == MenuBorderStyleType.DOUBLE_LINE_OUTER_LIGHT_INNER_BORDER:\n            return self.create_doubleline_outer_light_inner_border()\n        else:\n            \n            self.logger.info('Unrecognized border style type: {}. Defaulting to ASCII.'.format(border_style_type))\n            return self.create_ascii_border()", "docstring": "Create a new MenuBorderStyle instance based on the given border style type.\n\nArgs:\nborder_style_type (int):  an integer value from :obj:`MenuBorderStyleType`.\n\nReturns:\n:obj:`MenuBorderStyle`: a new MenuBorderStyle instance of the specified style.", "source": "juraj-google-style"}
{"code": "def all_folders(\n        path_name, keyword='', has_date=False, date_fmt=DATE_FMT\n) -> list:\n    \n    if not os.path.exists(path=path_name): return []\n    path_name = path_name.replace('\\\\', '/')\n\n    if keyword:\n        folders = sort_by_modified([\n            f.replace('\\\\', '/') for f in glob.iglob(f'{path_name}/*{keyword}*')\n            if os.path.isdir(f) and (f.replace('\\\\', '/').split('/')[-1][0] != '~')\n        ])\n\n    else:\n        folders = sort_by_modified([\n            f'{path_name}/{f}' for f in os.listdir(path=path_name)\n            if os.path.isdir(f'{path_name}/{f}') and (f[0] != '~')\n        ])\n\n    if has_date:\n        folders = filter_by_dates(folders, date_fmt=date_fmt)\n\n    return folders", "docstring": "Search all folders with criteria\nReturned list will be sorted by last modified\n\nArgs:\npath_name: full path name\nkeyword: keyword to search\nhas_date: whether has date in file name (default False)\ndate_fmt: date format to check for has_date parameter\n\nReturns:\nlist: all folder names fulfilled criteria", "source": "juraj-google-style"}
{"code": "def ToByteArray(self):\n    ms = StreamManager.GetStream()\n    writer = BinaryWriter(ms)\n    self.Serialize(writer)\n    retval = ms.ToArray()\n    StreamManager.ReleaseStream(ms)\n    return retval", "docstring": "Serialize self and get the byte stream.\n\nReturns:\nbytes: serialized object.", "source": "codesearchnet"}
{"code": "def get(self, key, namespace='default', default=None, as_object=False):\n        \n\n        if namespace in self.__data and key in self.__data[namespace]:\n            if as_object:\n                return db.ConfigItem.find_one(\n                    ConfigItem.namespace_prefix == namespace,\n                    ConfigItem.key == key\n                )\n\n            return self.__data[namespace][key]\n        else:\n            return default", "docstring": "Return the value of a key/namespace pair\n\nArgs:\nkey (str): Key to return\nnamespace (str): Namespace of the key\ndefault (:obj:`Any`): Optional default value to return, if key was not found\nas_object (bool): If `True` returns the object as a :py:obj:`ConfigItem` object instead of its primitive\ntype\n\nReturns:\nRequested value if found, else default value or `None`", "source": "juraj-google-style"}
{"code": "def _GenerateSummary(self):\n    items = []\n    if self._notices:\n        items.append(('notices: %d' % self._notice_count))\n    if self._dataset_errors:\n        items.append(('errors: %d' % self._error_count))\n    if self._dataset_warnings:\n        items.append(('warnings: %d' % self._warning_count))\n    if items:\n        return ('<p><span class=\"fail\">%s</span></p>' % '<br>'.join(items))\n    else:\n        return '<p><span class=\"pass\">feeds merged successfully</span></p>'", "docstring": "Generate a summary of the warnings and errors.\n\nReturns:\nThe generated HTML as a string.", "source": "codesearchnet"}
{"code": "def getHostCertPath(self, name):\n        \n        path = s_common.genpath(self.certdir, 'hosts', '%s.crt' % name)\n        if not os.path.isfile(path):\n            return None\n        return path", "docstring": "Gets the path to a host certificate.\n\nArgs:\nname (str): The name of the host keypair.\n\nExamples:\nGet the path to the host certificate for the host \"myhost\":\n\nmypath = cdir.getHostCertPath('myhost')\n\nReturns:\nstr: The path if exists.", "source": "juraj-google-style"}
{"code": "def query_put_bounders(query, partition_column, start, end):\n    \n    where = \" WHERE TMP_TABLE.{0} >= {1} AND TMP_TABLE.{0} <= {2}\".format(\n        partition_column, start, end\n    )\n    query_with_bounders = \"SELECT * FROM ({0}) AS TMP_TABLE {1}\".format(query, where)\n    return query_with_bounders", "docstring": "Put bounders in the query\n\nArgs:\nquery: SQL query string\npartition_column: partition_column name\nstart: lower_bound\nend: upper_bound\n\nReturns:\nQuery with bounders", "source": "juraj-google-style"}
{"code": "def stop(save=True):\n    global _profiler\n    with _profiler_lock:\n        if _profiler is None:\n            raise errors.UnavailableError(None, None, 'Cannot export profiling results. No profiler is running.')\n        if save:\n            try:\n                _profiler.export_to_tb()\n            except Exception:\n                _profiler = None\n                raise\n        _profiler = None", "docstring": "Stops the current profiling session.\n\nThe profiler session will be stopped and profile results can be saved.\n\nArgs:\nsave: An optional variable to save the results to TensorBoard. Default True.\n\nRaises:\nUnavailableError: If there is no active profiling session.", "source": "github-repos"}
{"code": "def infer_transportation_mode(self, clf, min_time):\n    for segment in self.segments:\n        segment.infer_transportation_mode(clf, min_time)\n    return self", "docstring": "In-place transportation mode inferring of segments\n\nReturns:\nThis track", "source": "codesearchnet"}
{"code": "def _do_export(self, remote_function):\n    if self._worker.load_code_from_local:\n        return\n    function = remote_function._function\n    function_name_global_valid = (function.__name__ in function.__globals__)\n    function_name_global_value = function.__globals__.get(function.__name__)\n    if (not is_cython(function)):\n        function.__globals__[function.__name__] = remote_function\n    try:\n        pickled_function = pickle.dumps(function)\n    finally:\n        if function_name_global_valid:\n            function.__globals__[function.__name__] = function_name_global_value\n        else:\n            del function.__globals__[function.__name__]\n    check_oversized_pickle(pickled_function, remote_function._function_name, 'remote function', self._worker)\n    key = (((b'RemoteFunction:' + self._worker.task_driver_id.binary()) + b':') + remote_function._function_descriptor.function_id.binary())\n    self._worker.redis_client.hmset(key, {'driver_id': self._worker.task_driver_id.binary(), 'function_id': remote_function._function_descriptor.function_id.binary(), 'name': remote_function._function_name, 'module': function.__module__, 'function': pickled_function, 'max_calls': remote_function._max_calls})\n    self._worker.redis_client.rpush('Exports', key)", "docstring": "Pickle a remote function and export it to redis.\n\nArgs:\nremote_function: the RemoteFunction object.", "source": "codesearchnet"}
{"code": "def GetProcessedTaskIdentifiers(self):\n    if (self._storage_type != definitions.STORAGE_TYPE_SESSION):\n        raise IOError('Unsupported storage type.')\n    if (not self._processed_task_storage_path):\n        raise IOError('Missing processed task storage path.')\n    return [path.replace('.plaso', '') for path in os.listdir(self._processed_task_storage_path)]", "docstring": "Identifiers for tasks which have been processed.\n\nReturns:\nlist[str]: task identifiers that are processed.\n\nRaises:\nIOError: if the storage type is not supported or\nif the temporary path for the task storage does not exist.\nOSError: if the storage type is not supported or\nif the temporary path for the task storage does not exist.", "source": "codesearchnet"}
{"code": "def resolve_parameters(val: Any, param_resolver: 'cirq.ParamResolverOrSimilarType') -> Any:\n    if (not param_resolver):\n        return val\n    from cirq import ParamResolver\n    param_resolver = ParamResolver(param_resolver)\n    if isinstance(val, sympy.Basic):\n        return param_resolver.value_of(val)\n    getter = getattr(val, '_resolve_parameters_', None)\n    result = (NotImplemented if (getter is None) else getter(param_resolver))\n    if (result is not NotImplemented):\n        return result\n    else:\n        return val", "docstring": "Resolves symbol parameters in the effect using the param resolver.\n\nThis function will use the `_resolve_parameters_` magic method\nof `val` to resolve any Symbols with concrete values from the given\nparameter resolver.\n\nArgs:\nval: The object to resolve (e.g. the gate, operation, etc)\nparam_resolver: the object to use for resolving all symbols\n\nReturns:\na gate or operation of the same type, but with all Symbols\nreplaced with floats according to the given ParamResolver.\nIf `val` has no `_resolve_parameters_` method or if it returns\nNotImplemented, `val` itself is returned.", "source": "codesearchnet"}
{"code": "def _parse_configs(self, config):\n    for config_dict in config:\n        label = config_dict.keys()[0]\n        cfg = config_dict[label]\n        dbpath = cfg['dbpath']\n        pattern = self._parse_dbpath(dbpath)\n        read_preference = cfg.get('read_preference', 'primary').upper()\n        read_preference = self._get_read_preference(read_preference)\n        cluster_config = {'params': {'host': cfg['host'], 'port': cfg['port'], 'read_preference': read_preference, 'replicaSet': cfg.get('replicaSet')}, 'pattern': pattern, 'label': label}\n        self._clusters.append(cluster_config)", "docstring": "Builds a dict with information to connect to Clusters.\n\nParses the list of configuration dictionaries passed by the user and\nbuilds an internal dict (_clusters) that holds information for creating\nClients connecting to Clusters and matching database names.\n\nArgs:\nconfig: A list of dictionaries containing connecting and\nidentification information about Clusters.\nA dict has the following structure:\n{label: {host, port, read_preference, dbpath}}.\n\nRaises:\nException('No configuration provided'): no configuration provided.", "source": "codesearchnet"}
{"code": "def get_accounts_for_service(cls, service_type):\n    return [a for a in cls.get_accounts().values() if (a.service_type == service_type)]", "docstring": "Get a list of accounts for a given music service.\n\nArgs:\nservice_type (str): The service_type to use.\n\nReturns:\nlist: A list of `Account` instances.", "source": "codesearchnet"}
{"code": "def from_string(contents):\n    lines = [l.strip() for l in contents.split('\\n')]\n    link0_patt = re.compile('^(%.+)\\\\s*=\\\\s*(.+)')\n    link0_dict = {}\n    for (i, l) in enumerate(lines):\n        if link0_patt.match(l):\n            m = link0_patt.match(l)\n            link0_dict[m.group(1).strip('=')] = m.group(2)\n    route_patt = re.compile('^\n    route = ''\n    route_index = None\n    for (i, l) in enumerate(lines):\n        if route_patt.match(l):\n            route += (' ' + l)\n            route_index = i\n        elif (((l == '') or l.isspace()) and route_index):\n            break\n    (functional, basis_set, route_paras, dieze_tag) = read_route_line(route)\n    ind = 2\n    title = []\n    while lines[(route_index + ind)].strip():\n        title.append(lines[(route_index + ind)].strip())\n        ind += 1\n    title = ' '.join(title)\n    ind += 1\n    toks = re.split('[,\\\\s]+', lines[(route_index + ind)])\n    charge = int(toks[0])\n    spin_mult = int(toks[1])\n    coord_lines = []\n    spaces = 0\n    input_paras = {}\n    ind += 1\n    for i in range((route_index + ind), len(lines)):\n        if (lines[i].strip() == ''):\n            spaces += 1\n        if (spaces >= 2):\n            d = lines[i].split('=')\n            if (len(d) == 2):\n                input_paras[d[0]] = d[1]\n        else:\n            coord_lines.append(lines[i].strip())\n    mol = GaussianInput._parse_coords(coord_lines)\n    mol.set_charge_and_spin(charge, spin_mult)\n    return GaussianInput(mol, charge=charge, spin_multiplicity=spin_mult, title=title, functional=functional, basis_set=basis_set, route_parameters=route_paras, input_parameters=input_paras, link0_parameters=link0_dict, dieze_tag=dieze_tag)", "docstring": "Creates GaussianInput from a string.\n\nArgs:\ncontents: String representing an Gaussian input file.\n\nReturns:\nGaussianInput object", "source": "codesearchnet"}
{"code": "def get_mapreduce_yaml(parse=parse_mapreduce_yaml):\n    mr_yaml_path = find_mapreduce_yaml()\n    if (not mr_yaml_path):\n        raise errors.MissingYamlError()\n    mr_yaml_file = open(mr_yaml_path)\n    try:\n        return parse(mr_yaml_file.read())\n    finally:\n        mr_yaml_file.close()", "docstring": "Locates mapreduce.yaml, loads and parses its info.\n\nArgs:\nparse: Used for testing.\n\nReturns:\nMapReduceYaml object.\n\nRaises:\nerrors.BadYamlError: when contents is not a valid mapreduce.yaml file or the\nfile is missing.", "source": "codesearchnet"}
{"code": "def get_unbound_arg_names(arg_names, arg_binding_keys):\n    bound_arg_names = [abk._arg_name for abk in arg_binding_keys]\n    return [arg_name for arg_name in arg_names if (arg_name not in bound_arg_names)]", "docstring": "Determines which args have no arg binding keys.\n\nArgs:\narg_names: a sequence of the names of possibly bound args\narg_binding_keys: a sequence of ArgBindingKey each of whose arg names is\nin arg_names\nReturns:\na sequence of arg names that is a (possibly empty, possibly non-proper)\nsubset of arg_names", "source": "codesearchnet"}
{"code": "def remove(path, dir_fd=None):\n    \n    system = get_instance(path)\n\n    \n    if system.is_locator(path) or path[-1] == '/':\n        raise is_a_directory_error(\"Is a directory: '%s'\" % path)\n\n    \n    system.remove(path)", "docstring": "Remove a file.\n\nEquivalent to \"os.remove\" and \"os.unlink\".\n\nArgs:\npath (path-like object): Path or URL.\ndir_fd: directory descriptors;\nsee the os.remove() description for how it is interpreted.\nNot supported on cloud storage objects.", "source": "juraj-google-style"}
{"code": "def color_get_hsv(c: Tuple[int, int, int]) -> Tuple[float, float, float]:\n    \n    hsv = ffi.new(\"float [3]\")\n    lib.TCOD_color_get_HSV(c, hsv, hsv + 1, hsv + 2)\n    return hsv[0], hsv[1], hsv[2]", "docstring": "Return the (hue, saturation, value) of a color.\n\nArgs:\nc (Union[Tuple[int, int, int], Sequence[int]]):\nAn (r, g, b) sequence or Color instance.\n\nReturns:\nTuple[float, float, float]:\nA tuple with (hue, saturation, value) values, from 0 to 1.", "source": "juraj-google-style"}
{"code": "def times(coro, limit=1, raise_exception=False, return_value=None):\n    assert_corofunction(coro=coro)\n    limit = max(limit, 1)\n    times = limit\n    result = None\n\n    @asyncio.coroutine\n    def wrapper(*args, **kw):\n        nonlocal limit\n        nonlocal result\n        if (limit == 0):\n            if raise_exception:\n                raise RuntimeError(ExceptionMessage.format(times))\n            if return_value:\n                return return_value\n            return result\n        limit -= 1\n        if return_value:\n            return (yield from coro(*args, **kw))\n        result = (yield from coro(*args, **kw))\n        return result\n    return wrapper", "docstring": "Wraps a given coroutine function to be executed only a certain amount\nof times.\n\nIf the execution limit is exceeded, the last execution return value will\nbe returned as result.\n\nYou can optionally define a custom return value on exceeded via\n`return_value` param.\n\nThis function can be used as decorator.\n\narguments:\ncoro (coroutinefunction): coroutine function to wrap.\nlimit (int): max limit of coroutine executions.\nraise_exception (bool): raise exception if execution times exceeded.\nreturn_value (mixed): value to return when execution times exceeded.\n\nRaises:\nTypeError: if coro argument is not a coroutine function.\nRuntimeError: if max execution excedeed (optional).\n\nReturns:\ncoroutinefunction\n\nUsage::\n\nasync def mul_2(num):\nreturn num * 2\n\ntimed = paco.times(mul_2, 3)\nawait timed(2)\n# => 4\nawait timed(3)\n# => 6\nawait timed(4)\n# => 8\nawait timed(5)  # ignored!\n# => 8", "source": "codesearchnet"}
{"code": "def _parse_stop_words_file(self, path):\n    language = None\n    loaded = False\n    if os.path.isfile(path):\n        self._logger.debug('Loading stop words in %s', path)\n        language = path.split('-')[(- 1)]\n        if (not (language in self.__stop_words)):\n            self.__stop_words[language] = set()\n        with codecs.open(path, 'r', 'UTF-8') as file:\n            loaded = True\n            for word in file:\n                self.__stop_words[language].add(word.strip())\n    return loaded", "docstring": "Load stop words from the given path.\n\nParse the stop words file, saving each word found in it in a set\nfor the language of the file. This language is obtained from\nthe file name. If the file doesn't exist, the method will have\nno effect.\n\nArgs:\npath: Path to the stop words file.\n\nReturns:\nA boolean indicating whether the file was loaded.", "source": "codesearchnet"}
{"code": "def preprocess_frame(frame):\n    frame = common_layers.convert_rgb_to_real(frame)\n    frame = (frame - 0.5)\n    (frame, _) = glow_ops.uniform_binning_correction(frame)\n    return frame", "docstring": "Preprocess frame.\n\n1. Converts [0, 255] to [-0.5, 0.5]\n2. Adds uniform noise.\n\nArgs:\nframe: 3-D Tensor representing pixels.\nReturns:\nframe: 3-D Tensor with values in between [-0.5, 0.5]", "source": "codesearchnet"}
{"code": "def from_tokenizer(cls, tokenizer: 'PreTrainedTokenizerBase', **kwargs):\n    do_lower_case = kwargs.pop('do_lower_case', None)\n    do_lower_case = tokenizer.do_lower_case if do_lower_case is None else do_lower_case\n    cls_token_id = kwargs.pop('cls_token_id', None)\n    cls_token_id = tokenizer.cls_token_id if cls_token_id is None else cls_token_id\n    sep_token_id = kwargs.pop('sep_token_id', None)\n    sep_token_id = tokenizer.sep_token_id if sep_token_id is None else sep_token_id\n    pad_token_id = kwargs.pop('pad_token_id', None)\n    pad_token_id = tokenizer.pad_token_id if pad_token_id is None else pad_token_id\n    vocab = tokenizer.get_vocab()\n    vocab = sorted(vocab.items(), key=lambda x: x[1])\n    vocab_list = [entry[0] for entry in vocab]\n    return cls(vocab_list=vocab_list, do_lower_case=do_lower_case, cls_token_id=cls_token_id, sep_token_id=sep_token_id, pad_token_id=pad_token_id, **kwargs)", "docstring": "Initialize a `TFBertTokenizer` from an existing `Tokenizer`.\n\nArgs:\ntokenizer (`PreTrainedTokenizerBase`):\nThe tokenizer to use to initialize the `TFBertTokenizer`.\n\nExamples:\n\n```python\nfrom transformers import AutoTokenizer, TFBertTokenizer\n\ntokenizer = AutoTokenizer.from_pretrained(\"google-bert/bert-base-uncased\")\ntf_tokenizer = TFBertTokenizer.from_tokenizer(tokenizer)\n```", "source": "github-repos"}
{"code": "def get_domain_template(distro, libvirt_ver, **kwargs):\n    \n    env = Environment(\n        loader=PackageLoader('lago', 'providers/libvirt/templates'),\n        trim_blocks=True,\n        lstrip_blocks=True,\n    )\n\n    template_name = 'dom_template-{0}.xml.j2'.format(distro)\n    try:\n        template = env.get_template(template_name)\n    except TemplateNotFound:\n        LOGGER.debug('could not find template %s using default', template_name)\n        template = env.get_template('dom_template-base.xml.j2')\n    return template.render(libvirt_ver=libvirt_ver, **kwargs)", "docstring": "Get a rendered Jinja2 domain template\n\nArgs:\ndistro(str): domain distro\nlibvirt_ver(int): libvirt version\nkwargs(dict): args for template render\n\nReturns:\nstr: rendered template", "source": "juraj-google-style"}
{"code": "def has_all_nonzero_section_lengths(neuron, threshold=0.0):\n    \n    bad_ids = [s.id for s in _nf.iter_sections(neuron.neurites)\n               if section_length(s.points) <= threshold]\n\n    return CheckResult(len(bad_ids) == 0, bad_ids)", "docstring": "Check presence of neuron sections with length not above threshold\n\nArguments:\nneuron(Neuron): The neuron object to test\nthreshold(float): value above which a section length is considered\nto be non-zero\n\nReturns:\nCheckResult with result including list of ids of bad sections", "source": "juraj-google-style"}
{"code": "def finish(self):\n    if (self.proc is None):\n        return None\n    self.proc.stdin.close()\n    for thread in (self._out_thread, self._err_thread):\n        thread.join()\n    (out, err) = [b''.join(chunks) for chunks in (self._out_chunks, self._err_chunks)]\n    self.proc.stdout.close()\n    self.proc.stderr.close()\n    if self.proc.returncode:\n        err = '\\n'.join([' '.join(self.cmd), err.decode('utf8')])\n        raise IOError(err)\n    del self.proc\n    self.proc = None\n    return out", "docstring": "Finishes transconding and returns the video.\n\nReturns:\nbytes\n\nRaises:\nIOError: in case of transcoding error.", "source": "codesearchnet"}
{"code": "def _update_state(self, state_type: str, value: str) -> datetime:\n        \n        timestamp = datetime.utcnow()\n        field = '{}_state'.format(state_type)\n        old_state = DB.get_hash_value(self._key, field)\n        DB.set_hash_value(self._key, field, value, pipeline=True)\n        DB.set_hash_value(self._key, '{}_timestamp'.format(state_type),\n                          timestamp.isoformat(), pipeline=True)\n        DB.execute()\n\n        \n        self.publish('{}_state_updated'.format(state_type),\n                     event_data=dict(state=value, old_state=old_state))\n\n        return timestamp", "docstring": "Update the state of type specified (current or target).\n\nArgs:\nstate_type(str): Type of state to update, current or target.\nvalue (str): New state value.\n\nReturns:\ntimestamp, current time", "source": "juraj-google-style"}
{"code": "def _ParseArgs(fn_args, fn_defaults, num_required_args, kwargs, remaining_args, metadata):\n    accepts_positional_args = metadata.get(decorators.ACCEPTS_POSITIONAL_ARGS)\n    capacity = False\n    parsed_args = []\n    for index, arg in enumerate(fn_args):\n        value = kwargs.pop(arg, None)\n        if value is not None:\n            value = _ParseValue(value, index, arg, metadata)\n            parsed_args.append(value)\n        elif remaining_args and accepts_positional_args:\n            value = remaining_args.pop(0)\n            value = _ParseValue(value, index, arg, metadata)\n            parsed_args.append(value)\n        elif index < num_required_args:\n            raise FireError('The function received no value for the required argument:', arg)\n        else:\n            capacity = True\n            default_index = index - num_required_args\n            parsed_args.append(fn_defaults[default_index])\n    for key, value in kwargs.items():\n        kwargs[key] = _ParseValue(value, None, key, metadata)\n    return (parsed_args, kwargs, remaining_args, capacity)", "docstring": "Parses the positional and named arguments from the available supplied args.\n\nModifies kwargs, removing args as they are used.\n\nArgs:\nfn_args: A list of argument names that the target function accepts,\nincluding positional and named arguments, but not the varargs or kwargs\nnames.\nfn_defaults: A list of the default values in the function argspec.\nnum_required_args: The number of required arguments from the function's\nargspec. This is the number of arguments without a default value.\nkwargs: Dict with named command line arguments and their values.\nremaining_args: The remaining command line arguments, which may still be\nused as positional arguments.\nmetadata: Metadata about the function, typically from Fire decorators.\nReturns:\nparsed_args: A list of values to be used as positional arguments for calling\nthe target function.\nkwargs: The input dict kwargs modified with the used kwargs removed.\nremaining_args: A list of the supplied args that have not been used yet.\ncapacity: Whether the call could have taken args in place of defaults.\nRaises:\nFireError: If additional positional arguments are expected, but none are\navailable.", "source": "github-repos"}
{"code": "def get_sid(principal):\n    if (principal is None):\n        principal = 'NULL SID'\n    try:\n        sid = salt.utils.win_functions.get_sid_from_name(principal)\n    except CommandExecutionError:\n        sid = principal\n    try:\n        sid = win32security.ConvertStringSidToSid(sid)\n    except pywintypes.error:\n        log.exception('Invalid user/group or sid: %s', principal)\n        raise CommandExecutionError('Invalid user/group or sid: {0}'.format(principal))\n    except TypeError:\n        raise CommandExecutionError\n    return sid", "docstring": "Converts a username to a sid, or verifies a sid. Required for working with\nthe DACL.\n\nArgs:\n\nprincipal(str):\nThe principal to lookup the sid. Can be a sid or a username.\n\nReturns:\nPySID Object: A sid\n\nUsage:\n\n.. code-block:: python\n\n# Get a user's sid\nsalt.utils.win_dacl.get_sid('jsnuffy')\n\n# Verify that the sid is valid\nsalt.utils.win_dacl.get_sid('S-1-5-32-544')", "source": "codesearchnet"}
{"code": "def manual_to_auto_spmd_partition(tensor, manual_sharding, full_shape, single_dim=-1, unspecified_dims=None):\n    return tf2xla.spmd_shard_to_full_shape(tensor, manual_sharding=manual_sharding, full_shape=full_shape, dim=single_dim, unspecified_dims=unspecified_dims or [])", "docstring": "Switches from manual partitioning to automatic SPMD partitioning.\n\nConverts a shard-shaped tensor (manually partitioned in SPMD-style) to a\nfull-shaped tensor to be partitioned automatically by the SPMD partitioner.\n\nArgs:\ntensor: A tf.Tensor in shard shape.\nmanual_sharding: a serialized string of OpSharding to be used in manual\npartitioning.\nfull_shape: the shape of tensor before partitioning.\nsingle_dim: If >= 0, the conversion will happen only on this dim in\nsubgroups.\nunspecified_dims: An optional list of dimensions unspecified.\n\nReturns:\nA full-shaped tensor to be partitioned automatically by the SPMD\npartitioner.", "source": "github-repos"}
{"code": "def userinfo(self, access_token):\n\n        \n\n        return self.get(\n            url='https:\n            headers={'Authorization': 'Bearer {}'.format(access_token)}\n        )", "docstring": "Returns the user information based on the Auth0 access token.\nThis endpoint will work only if openid was granted as a scope for the access_token.\n\nArgs:\naccess_token (str): Auth0 access token (obtained during login).\n\nReturns:\nThe user profile.", "source": "juraj-google-style"}
{"code": "def change_numbering(self, new_index=None):\n        \n        if (new_index is None):\n            new_index = range(len(self))\n        elif len(new_index) != len(self):\n            raise ValueError('len(new_index) has to be the same as len(self)')\n\n        c_table = self.loc[:, ['b', 'a', 'd']]\n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n\n        c_table = c_table.replace(constants.int_label)\n        try:\n            c_table = c_table.astype('i8')\n        except ValueError:\n            raise ValueError('Due to a bug in pandas it is necessary to have '\n                             'integer columns')\n        c_table = c_table.replace(self.index, new_index)\n        c_table = c_table.replace(\n            {v: k for k, v in constants.int_label.items()})\n\n        out = self.copy()\n        out.unsafe_loc[:, ['b', 'a', 'd']] = c_table\n        out._frame.index = new_index\n        return out", "docstring": "Change numbering to a new index.\n\nChanges the numbering of index and all dependent numbering\n(bond_with...) to a new_index.\nThe user has to make sure that the new_index consists of distinct\nelements.\n\nArgs:\nnew_index (list): If None the new_index is taken from 1 to the\nnumber of atoms.\n\nReturns:\nZmat: Reindexed version of the zmatrix.", "source": "juraj-google-style"}
{"code": "def _ParseNumericOption(cls, options, argument_name, default_value=None):\n    argument_value = getattr(options, argument_name, None)\n    if (argument_value is None):\n        return default_value\n    if (not isinstance(argument_value, py2to3.INTEGER_TYPES)):\n        raise errors.BadConfigOption('Unsupported option: {0:s} integer type required.'.format(argument_name))\n    return argument_value", "docstring": "Parses a numeric command line argument.\n\nArgs:\noptions (argparse.Namespace): parser options.\nargument_name (str): name of the command line argument.\ndefault_value (Optional[int]): default value of the command line argument.\n\nReturns:\nint: command line argument value or the default value if the command line\nargument is not set\n\nRaises:\nBadConfigOption: if the command line argument value cannot be converted\nto a Unicode string.", "source": "codesearchnet"}
{"code": "def minimize_best_n(Members):\n    return list(reversed(sorted(Members, key=(lambda Member: Member.fitness_score))))", "docstring": "Orders population members from lowest fitness to highest fitness\n\nArgs:\nMembers (list): list of PyGenetics Member objects\n\nReturns:\nlsit: ordered lsit of Members, from highest fitness to lowest fitness", "source": "codesearchnet"}
{"code": "def cwise(tf_fn, xs, output_dtype=None, grad_function=None, name=None):\n  \n  return slicewise(\n      tf_fn, xs, output_dtype=output_dtype, splittable_dims=xs[0].shape.dims,\n      grad_function=grad_function, name=name or \"cwise\")", "docstring": "Component-wise operation with no broadcasting.\n\nArgs:\ntf_fn: a component-wise function taking n tf.Tensor inputs and producing\na tf.Tensor output\nxs: n Tensors\noutput_dtype: an optional dtype\ngrad_function: an optional python function\nname: an optional string\n\nReturns:\na Tensor", "source": "juraj-google-style"}
{"code": "def _valid_deleted_file(path):\n    ret = False\n    if path.endswith(' (deleted)'):\n        ret = True\n    if re.compile('\\\\(path inode=[0-9]+\\\\)$').search(path):\n        ret = True\n    regex = re.compile('|'.join(LIST_DIRS))\n    if regex.match(path):\n        ret = False\n    return ret", "docstring": "Filters file path against unwanted directories and decides whether file is marked as deleted.\n\nReturns:\nTrue if file is desired deleted file, else False.\n\nArgs:\npath: A string - path to file", "source": "codesearchnet"}
{"code": "def as_str_any(value, encoding='utf-8'):\n    if isinstance(value, bytes):\n        return as_str(value, encoding=encoding)\n    else:\n        return str(value)", "docstring": "Converts input to `str` type.\n\nUses `str(value)`, except for `bytes` typed inputs, which are converted\nusing `as_str`.\n\nArgs:\nvalue: A object that can be converted to `str`.\nencoding: Encoding for `bytes` typed inputs.\n\nReturns:\nA `str` object.", "source": "github-repos"}
{"code": "def watchpoint_info(self, handle=0, index=(- 1)):\n    if ((index < 0) and (handle == 0)):\n        raise ValueError('Handle must be provided if index is not set.')\n    wp = structs.JLinkWatchpointInfo()\n    res = self._dll.JLINKARM_GetWPInfoEx(index, ctypes.byref(wp))\n    if (res < 0):\n        raise errors.JLinkException('Failed to get watchpoint info.')\n    for i in range(res):\n        res = self._dll.JLINKARM_GetWPInfoEx(i, ctypes.byref(wp))\n        if (res < 0):\n            raise errors.JLinkException('Failed to get watchpoint info.')\n        elif ((wp.Handle == handle) or (wp.WPUnit == index)):\n            return wp\n    return None", "docstring": "Returns information about the specified watchpoint.\n\nNote:\nEither ``handle`` or ``index`` can be specified.  If the ``index``\nis not provided, the ``handle`` must be set, and vice-versa.  If\nboth ``index`` and ``handle`` are provided, the ``index`` overrides\nthe provided ``handle``.\n\nArgs:\nself (JLink): the ``JLink`` instance\nhandle (int): optional handle of a valid watchpoint.\nindex (int): optional index of a watchpoint.\n\nReturns:\nAn instance of ``JLinkWatchpointInfo`` specifying information about\nthe watchpoint if the watchpoint was found, otherwise ``None``.\n\nRaises:\nJLinkException: on error.\nValueError: if both handle and index are invalid.", "source": "codesearchnet"}
{"code": "def open(path, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO):\n    filesystem = FileSystems.get_filesystem(path)\n    return filesystem.open(path, mime_type, compression_type)", "docstring": "Returns a read channel for the given file path.\n\nArgs:\npath: string path of the file object to be written to the system\nmime_type: MIME type to specify the type of content in the file object\ncompression_type: Type of compression to be used for this object. See\n``CompressionTypes`` for possible values.\n\nReturns: file handle with a ``close`` function for the user to use.", "source": "github-repos"}
{"code": "def remove_entry(self, pathname_name, recursive=True):\n    pathname_name = self._normalized_entryname(pathname_name)\n    entry = self.get_entry(pathname_name)\n    if self.filesystem.is_windows_fs:\n        if ((entry.st_mode & PERM_WRITE) == 0):\n            self.filesystem.raise_os_error(errno.EACCES, pathname_name)\n        if self.filesystem.has_open_file(entry):\n            self.filesystem.raise_os_error(errno.EACCES, pathname_name)\n    elif ((not is_root()) and ((self.st_mode & (PERM_WRITE | PERM_EXE)) != (PERM_WRITE | PERM_EXE))):\n        self.filesystem.raise_os_error(errno.EACCES, pathname_name)\n    if (recursive and isinstance(entry, FakeDirectory)):\n        while entry.contents:\n            entry.remove_entry(list(entry.contents)[0])\n    elif (entry.st_nlink == 1):\n        self.filesystem.change_disk_usage((- entry.size), pathname_name, entry.st_dev)\n    self.st_nlink -= 1\n    entry.st_nlink -= 1\n    assert (entry.st_nlink >= 0)\n    del self.contents[pathname_name]", "docstring": "Removes the specified child file or directory.\n\nArgs:\npathname_name: Basename of the child object to remove.\nrecursive: If True (default), the entries in contained directories\nare deleted first. Used to propagate removal errors\n(e.g. permission problems) from contained entries.\n\nRaises:\nKeyError: if no child exists by the specified name.\nOSError: if user lacks permission to delete the file,\nor (Windows only) the file is open.", "source": "codesearchnet"}
{"code": "def make_hash(self, task):\n        \n        \n        t = [serialize_object(task['func_name'])[0],\n             serialize_object(task['fn_hash'])[0],\n             serialize_object(task['args'])[0],\n             serialize_object(task['kwargs'])[0],\n             serialize_object(task['env'])[0]]\n        x = b''.join(t)\n        hashedsum = hashlib.md5(x).hexdigest()\n        return hashedsum", "docstring": "Create a hash of the task inputs.\n\nThis uses a serialization library borrowed from ipyparallel.\nIf this fails here, then all ipp calls are also likely to fail due to failure\nat serialization.\n\nArgs:\n- task (dict) : Task dictionary from dfk.tasks\n\nReturns:\n- hash (str) : A unique hash string", "source": "juraj-google-style"}
{"code": "def _parse_query_modifier(self, modifier, qval, is_escaped):\n        \n        if modifier == 'range':\n            if not qval[0]:\n                start = '*'\n            elif isinstance(qval[0], date):\n                start = self._handle_date(qval[0])\n            elif isinstance(qval[0], datetime):\n                start = self._handle_datetime(qval[0])\n            elif not is_escaped:\n                start = self._escape_query(qval[0])\n            else:\n                start = qval[0]\n            if not qval[1]:\n                end = '*'\n            elif isinstance(qval[1], date):\n                end = self._handle_date(qval[1])\n            elif isinstance(qval[1], datetime):\n                end = self._handle_datetime(qval[1])\n            elif not is_escaped:\n                end = self._escape_query(qval[1])\n            else:\n                end = qval[1]\n            qval = '[%s TO %s]' % (start, end)\n        else:\n            if not is_escaped and not isinstance(qval, (date, datetime, int, float)):\n                qval = self._escape_query(qval)\n            if modifier == 'exact':\n                qval = qval\n            elif modifier == 'contains':\n                qval = \"*%s*\" % qval\n            elif modifier == 'startswith':\n                qval = \"%s*\" % qval\n            elif modifier == 'endswith':\n                qval = \"%s*\" % qval\n            elif modifier == 'lte':\n                qval = '[* TO %s]' % qval\n            elif modifier == 'gte':\n                qval = '[%s TO *]' % qval\n            elif modifier == 'lt':\n                if isinstance(qval, int):\n                    qval -= 1\n                qval = '[* TO %s]' % qval\n            elif modifier == 'gt':\n                if isinstance(qval, int):\n                    qval += 1\n                qval = '[%s TO *]' % qval\n        return qval", "docstring": "Parses query_value according to query_type\n\nArgs:\nmodifier (str): Type of query. Exact, contains, lte etc.\nqval: Value partition of the query.\n\nReturns:\nParsed query_value.", "source": "juraj-google-style"}
{"code": "def limit(self, count):\n        \n        query = query_mod.Query(self)\n        return query.limit(count)", "docstring": "Create a limited query with this collection as parent.\n\nSee\n:meth:`~.firestore_v1beta1.query.Query.limit` for\nmore information on this method.\n\nArgs:\ncount (int): Maximum number of documents to return that match\nthe query.\n\nReturns:\n~.firestore_v1beta1.query.Query: A limited query.", "source": "juraj-google-style"}
{"code": "def _fft(self, x):\n    x_complex = _to_complex(x)\n    return _FFT_OP[self.block_depth](x_complex)", "docstring": "FFT along the last self.block_depth dimensions of x.\n\nArgs:\nx: `Tensor` with floating or complex `dtype`.\nShould be in the form returned by self._vectorize_then_blockify.\n\nReturns:\n`Tensor` with `dtype` `complex64`.", "source": "github-repos"}
{"code": "def encode_produce_request(cls, payloads=(), acks=1, timeout=1000):\n        \n        if acks not in (1, 0, -1):\n            raise ValueError('ProduceRequest acks (%s) must be 1, 0, -1' % acks)\n\n        topics = []\n        for topic, topic_payloads in group_by_topic_and_partition(payloads).items():\n            topic_msgs = []\n            for partition, payload in topic_payloads.items():\n                partition_msgs = []\n                for msg in payload.messages:\n                    m = kafka.protocol.message.Message(\n                          msg.value, key=msg.key,\n                          magic=msg.magic, attributes=msg.attributes\n                    )\n                    partition_msgs.append((0, m.encode()))\n                topic_msgs.append((partition, MessageSet.encode(partition_msgs, prepend_size=False)))\n            topics.append((topic, topic_msgs))\n\n\n        return kafka.protocol.produce.ProduceRequest[0](\n            required_acks=acks,\n            timeout=timeout,\n            topics=topics\n        )", "docstring": "Encode a ProduceRequest struct\n\nArguments:\npayloads: list of ProduceRequestPayload\nacks: How \"acky\" you want the request to be\n1: written to disk by the leader\n0: immediate response\n-1: waits for all replicas to be in sync\ntimeout: Maximum time (in ms) the server will wait for replica acks.\nThis is _not_ a socket timeout\n\nReturns: ProduceRequest", "source": "juraj-google-style"}
{"code": "def maybe_copy_file_to_directory(source_filepath, target_directory):\n    if (not tf.gfile.Exists(target_directory)):\n        tf.logging.info(('Creating directory %s' % target_directory))\n        os.mkdir(target_directory)\n    target_filepath = os.path.join(target_directory, os.path.basename(source_filepath))\n    if (not tf.gfile.Exists(target_filepath)):\n        tf.logging.info(('Copying %s to %s' % (source_filepath, target_filepath)))\n        tf.gfile.Copy(source_filepath, target_filepath)\n        statinfo = os.stat(target_filepath)\n        tf.logging.info(('Successfully copied %s, %s bytes.' % (target_filepath, statinfo.st_size)))\n    else:\n        tf.logging.info(('Not copying, file already found: %s' % target_filepath))\n    return target_filepath", "docstring": "Copy a file to a directory if it is not already there.\n\nReturns the target filepath.\n\nArgs:\nsource_filepath: a string\ntarget_directory: a string\n\nReturns:\na string", "source": "codesearchnet"}
{"code": "def _dict_mapping_to_pb(mapping, proto_type):\n    \n    converted_pb = getattr(trace_pb2, proto_type)()\n    ParseDict(mapping, converted_pb)\n    return converted_pb", "docstring": "Convert a dict to protobuf.\n\nArgs:\nmapping (dict): A dict that needs to be converted to protobuf.\nproto_type (str): The type of the Protobuf.\n\nReturns:\nAn instance of the specified protobuf.", "source": "juraj-google-style"}
{"code": "def __init__(self, task_name, dag_name, workflow_name, workflow_id, worker_hostname):\n        \n        self.task_name = task_name\n        self.dag_name = dag_name\n        self.workflow_name = workflow_name\n        self.workflow_id = workflow_id\n        self.worker_hostname = worker_hostname", "docstring": "Initialize the task context object.\n\nArgs:\ntask_name (str): The name of the task.\ndag_name (str): The name of the DAG the task was started from.\nworkflow_name (str): The name of the workflow the task was started from.\nworkflow_id (str): The id of the workflow this task is member of.\nworker_hostname (str): The name of the worker executing this task.", "source": "juraj-google-style"}
{"code": "def inquire_property(name, doc=None):\n    \n\n    def inquire_property(self):\n        if not self._started:\n            msg = (\"Cannot read {0} from a security context whose \"\n                   \"establishment has not yet been started.\")\n            raise AttributeError(msg)\n\n        return getattr(self._inquire(**{name: True}), name)\n\n    return property(inquire_property, doc=doc)", "docstring": "Creates a property based on an inquire result\n\nThis method creates a property that calls the\n:python:`_inquire` method, and return the value of the\nrequested information.\n\nArgs:\nname (str): the name of the 'inquire' result information\n\nReturns:\nproperty: the created property", "source": "juraj-google-style"}
{"code": "async def _send_email(email_, config, loop=asyncio.get_event_loop()):\n    \n    smtp_server = get_attribute_from_config(config, EMAIL_SECTION_KEY, SMTP_SERVER_KEY)\n    smtp_port = int(get_attribute_from_config(config, EMAIL_SECTION_KEY, SMTP_PORT_KEY))\n    user = get_attribute_from_config(config, EMAIL_SECTION_KEY, USER_KEY)\n    password = get_attribute_from_config(config, EMAIL_SECTION_KEY, PASSWORD_KEY)\n    server = aiosmtplib.SMTP(hostname=smtp_server, port=smtp_port, loop=loop, use_tls=False)\n    await server.connect()\n    await server.starttls()\n    await server.login(user, password)\n    await server.send_message(email_)\n    await server.quit()", "docstring": "Send an email.\n\nArgs:\nemail_ (email.MIMEMultipart): The email to send.\nconfig (defaultdict): A defaultdict.", "source": "juraj-google-style"}
{"code": "async def get_participant(self, p_id: int, force_update=False) -> Participant:\n    found_p = self._find_participant(p_id)\n    if (force_update or (found_p is None)):\n        (await self.get_participants())\n        found_p = self._find_participant(p_id)\n    return found_p", "docstring": "get a participant by its id\n\n|methcoro|\n\nArgs:\np_id: participant id\nforce_update (dfault=False): True to force an update to the Challonge API\n\nReturns:\nParticipant: None if not found\n\nRaises:\nAPIException", "source": "codesearchnet"}
{"code": "def __init__(self, scope, parent, name, result, paren=False):\n        \n        CodeExpression.__init__(self, scope, parent, name, result, paren)\n        self.field_of = None\n        self.reference = None", "docstring": "Constructor for references.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.\nname (str): The name of the reference in the program.\nresult (str): The return type of the expression in the program.\n\nKwargs:\nparen (bool): Whether the reference is enclosed in parentheses.", "source": "juraj-google-style"}
{"code": "def __init__(self, queue, ev_writer, flush_secs, flush_complete, flush_sentinel, close_sentinel):\n    threading.Thread.__init__(self, name='EventLoggerThread')\n    self.daemon = True\n    self._queue = queue\n    self._ev_writer = ev_writer\n    self._flush_secs = flush_secs\n    self._next_event_flush_time = 0\n    self._flush_complete = flush_complete\n    self._flush_sentinel = flush_sentinel\n    self._close_sentinel = close_sentinel\n    self.failure_exc_info = ()", "docstring": "Creates an _EventLoggerThread.\n\nArgs:\nqueue: A CloseableQueue from which to dequeue events. The queue will be\nclosed just before the thread exits, whether due to `close_sentinel` or\nany exception raised in the writing loop.\nev_writer: An event writer. Used to log brain events for\nthe visualizer.\nflush_secs: How often, in seconds, to flush the\npending file to disk.\nflush_complete: A threading.Event that will be set whenever a flush\noperation requested via `flush_sentinel` has been completed.\nflush_sentinel: A sentinel element in queue that tells this thread to\nflush the writer and mark the current flush operation complete.\nclose_sentinel: A sentinel element in queue that tells this thread to\nterminate and close the queue.", "source": "github-repos"}
{"code": "def convert(self, obj):\n    if isinstance(obj, pobjects.SymmetricKey):\n        return self._build_core_key(obj, secrets.SymmetricKey)\n    elif isinstance(obj, secrets.SymmetricKey):\n        return self._build_pie_key(obj, pobjects.SymmetricKey)\n    elif isinstance(obj, pobjects.PublicKey):\n        return self._build_core_key(obj, secrets.PublicKey)\n    elif isinstance(obj, secrets.PublicKey):\n        return self._build_pie_key(obj, pobjects.PublicKey)\n    elif isinstance(obj, pobjects.PrivateKey):\n        return self._build_core_key(obj, secrets.PrivateKey)\n    elif isinstance(obj, secrets.PrivateKey):\n        return self._build_pie_key(obj, pobjects.PrivateKey)\n    elif isinstance(obj, pobjects.Certificate):\n        return self._build_core_certificate(obj)\n    elif isinstance(obj, secrets.Certificate):\n        return self._build_pie_certificate(obj)\n    elif isinstance(obj, pobjects.SecretData):\n        return self._build_core_secret_data(obj)\n    elif isinstance(obj, secrets.SecretData):\n        return self._build_pie_secret_data(obj)\n    elif isinstance(obj, pobjects.OpaqueObject):\n        return self._build_core_opaque_object(obj)\n    elif isinstance(obj, secrets.OpaqueObject):\n        return self._build_pie_opaque_object(obj)\n    else:\n        raise TypeError('object type unsupported and cannot be converted')", "docstring": "Convert a Pie object into a core secret object and vice versa.\n\nArgs:\nobj (various): A Pie or core secret object to convert into the\nopposite object space. Required.\n\nRaises:\nTypeError: if the object type is unrecognized or unsupported.", "source": "codesearchnet"}
{"code": "def __init__(self, proto, *, proto_as_initial_chunk: bool=True, parent_splitter: Optional['ComposableSplitter']=None, fields_in_parent: Optional[util.FieldTypes]=None):\n    self._proto = proto\n    self._parent_splitter = parent_splitter\n    self._fields_in_parent = fields_in_parent\n    self._built = False\n    self._add_chunk_order = []\n    self._fix_chunk_order = False\n    if parent_splitter is not None:\n        self._chunks = None\n        self._chunked_message = None\n    elif proto_as_initial_chunk:\n        self._chunks = [self._proto]\n        self._chunked_message = chunk_pb2.ChunkedMessage(chunk_index=0)\n        self._add_chunk_order.append(id(self._proto))\n    else:\n        self._chunks = []\n        self._chunked_message = chunk_pb2.ChunkedMessage()", "docstring": "Initializes ComposableSplitter.\n\nArgs:\nproto: Proto message to split.\nproto_as_initial_chunk: Whether to initialize chunks with the\nuser-provided proto as the initial chunk.\nparent_splitter: The parent `ComposableSplitter` object.\nfields_in_parent: Fields to access `proto` from the parent splitter's\nproto.", "source": "github-repos"}
{"code": "def analyze(self, text, tokenizer=str.split):\n        \n        if not self.tagger:\n            self.tagger = Tagger(self.model,\n                                 preprocessor=self.p,\n                                 tokenizer=tokenizer)\n\n        return self.tagger.analyze(text)", "docstring": "Analyze text and return pretty format.\n\nArgs:\ntext: string, the input text.\ntokenizer: Tokenize input sentence. Default tokenizer is `str.split`.\n\nReturns:\nres: dict.", "source": "juraj-google-style"}
{"code": "def _verify_control_structure(self, device_info, control_info=None):\n    if (control_info is None):\n        control_info = self._find_control_structure(device_info.ram_start, device_info.ram_size)\n    return control_info", "docstring": "Verify that a control structure is still valid or find one.\n\nReturns:\nControlStructure: The verified or discovered control structure.", "source": "codesearchnet"}
{"code": "def set_hibernate_timeout(timeout, power='ac', scheme=None):\n    return _set_powercfg_value(scheme=scheme, sub_group='SUB_SLEEP', setting_guid='HIBERNATEIDLE', power=power, value=timeout)", "docstring": "Set the hibernate timeout in minutes for the given power scheme\n\nArgs:\ntimeout (int):\nThe amount of time in minutes before the computer hibernates\n\npower (str):\nSet the value for AC or DC power. Default is ``ac``. Valid options\nare:\n\n- ``ac`` (AC Power)\n- ``dc`` (Battery)\n\nscheme (str):\nThe scheme to use, leave as ``None`` to use the current. Default is\n``None``. This can be the GUID or the Alias for the Scheme. Known\nAliases are:\n\n- ``SCHEME_BALANCED`` - Balanced\n- ``SCHEME_MAX`` - Power saver\n- ``SCHEME_MIN`` - High performance\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\nCLI Example:\n\n.. code-block:: bash\n\n# Sets the hibernate timeout to 30 minutes on Battery\nsalt '*' powercfg.set_hibernate_timeout 30 power=dc", "source": "codesearchnet"}
{"code": "def add_url_rule(self, route, endpoint, handler):\n        \n        self.app.add_url_rule(route, endpoint, handler)", "docstring": "Add a new url route.\n\nArgs:\nSee flask.Flask.add_url_route().", "source": "juraj-google-style"}
{"code": "def flash_attention_mask(batch_size: int, cache_position: torch.Tensor, kv_length: int, kv_offset: int=0, mask_function: Callable=causal_mask_function, attention_mask: Optional[torch.Tensor]=None, **kwargs):\n    if attention_mask is not None:\n        attention_mask = attention_mask[:, -kv_length:]\n        if attention_mask.all():\n            attention_mask = None\n    return attention_mask", "docstring": "Create the attention mask necesary to use FA2. Since FA2 is un-padded by definition, here we simply return\n`None` if the mask is fully causal, or we return the 2D mask which will then be used to extract the seq_lens.\nWe just slice it in case of sliding window.\n\nArgs:\nbatch_size (`int`):\nThe batch size of the input sequence.\ncache_position (`torch.Tensor`):\nA tensor of shape (query_length,) indicating the current indices of the input sequence elements.\nkv_length (`int`):\nThe size that the key and value states will have during the attention computation.\nkv_offset (`int`, optional):\nAn optional offset to indicate at which first position the key and values states will refer to.\nmask_function (`Callable`):\nThe mask factory function describing the mask pattern.\nattention_mask (`torch.Tensor`, optional):\nThe 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length)", "source": "github-repos"}
{"code": "def _format_ase2clusgeo(obj, all_atomtypes=None):\n    \n    \n    totalAN = len(obj)\n    if all_atomtypes is not None:\n        atomtype_set = set(all_atomtypes)\n    else:\n        atomtype_set = set(obj.get_atomic_numbers())\n\n    atomtype_lst = np.sort(list(atomtype_set))\n    n_atoms_per_type_lst = []\n    pos_lst = []\n    for atomtype in atomtype_lst:\n        condition = obj.get_atomic_numbers() == atomtype\n        pos_onetype = obj.get_positions()[condition]\n        n_onetype = pos_onetype.shape[0]\n\n        \n        pos_lst.append(pos_onetype)\n        n_atoms_per_type_lst.append(n_onetype)\n\n    typeNs = n_atoms_per_type_lst\n    Ntypes = len(n_atoms_per_type_lst)\n    atomtype_lst\n    Apos = np.concatenate(pos_lst).ravel()\n    return Apos, typeNs, Ntypes, atomtype_lst, totalAN", "docstring": "Takes an ase Atoms object and returns numpy arrays and integers\nwhich are read by the internal clusgeo. Apos is currently a flattened\nout numpy array\n\nArgs:\nobj():\nall_atomtypes():\nsort():", "source": "juraj-google-style"}
{"code": "def MakePmfFromDict(d, name=''):\n    \n    pmf = Pmf(d, name)\n    pmf.Normalize()\n    return pmf", "docstring": "Makes a PMF from a map from values to probabilities.\n\nArgs:\nd: dictionary that maps values to probabilities\nname: string name for this PMF\n\nReturns:\nPmf object", "source": "juraj-google-style"}
{"code": "def get_size(self):\n    rec = self.get_rectangle()\n    return (int((rec[2] - rec[0])), int((rec[3] - rec[1])))", "docstring": "Get the size of the tree.\n\nReturns:\ntupel: (width, height)", "source": "codesearchnet"}
{"code": "def _GroupByDevices(self, saveables):\n    per_device = collections.defaultdict(lambda: [])\n    for saveable in saveables:\n        canonical_device = set((pydev.canonical_name(spec.device) for spec in saveable.specs))\n        if len(canonical_device) != 1:\n            raise ValueError('All tensors of a saveable object must be on the same device: %s' % saveable.name)\n        per_device[canonical_device.pop()].append(saveable)\n    return sorted(per_device.items(), key=lambda t: t[0])", "docstring": "Group Variable tensor slices per device.\n\nTODO(touts): Make sure that all the devices found are on different\njob/replica/task/cpu|gpu.  It would be bad if 2 were on the same device.\nIt can happen if the devices are unspecified.\n\nArgs:\nsaveables: A list of BaseSaverBuilder.SaveableObject objects.\n\nReturns:\nA list of tuples: (device_name, BaseSaverBuilder.SaveableObject) tuples.\nThe list is sorted by ascending device_name.\n\nRaises:\nValueError: If the tensors of a saveable are on different devices.", "source": "github-repos"}
{"code": "def build_pos_args_table(full_alias, args, start_index):\n    \n    pos_args_placeholder = get_placeholders(full_alias, check_duplicates=True)\n    pos_args = args[start_index: start_index + len(pos_args_placeholder)]\n\n    if len(pos_args_placeholder) != len(pos_args):\n        error_msg = INSUFFICIENT_POS_ARG_ERROR.format(full_alias,\n                                                      len(pos_args_placeholder),\n                                                      '' if len(pos_args_placeholder) == 1 else 's',\n                                                      len(pos_args))\n        raise CLIError(error_msg)\n\n    \n    for i, pos_arg in enumerate(pos_args):\n        pos_args[i] = pos_arg.replace('\"', '\\\\\"')\n\n    return dict(zip(pos_args_placeholder, pos_args))", "docstring": "Build a dictionary where the key is placeholder name and the value is the position argument value.\n\nArgs:\nfull_alias: The full alias (including any placeholders).\nargs: The arguments that the user inputs in the terminal.\nstart_index: The index at which we start ingesting position arguments.\n\nReturns:\nA dictionary with the key beign the name of the placeholder and its value\nbeing the respective positional argument.", "source": "juraj-google-style"}
{"code": "def parse_original_feature_from_example(example, feature_name):\n  \n  feature = get_example_features(example)[feature_name]\n  feature_type = feature.WhichOneof('kind')\n  original_value = proto_value_for_feature(example, feature_name)\n\n  return OriginalFeatureList(feature_name, original_value, feature_type)", "docstring": "Returns an `OriginalFeatureList` for the specified feature_name.\n\nArgs:\nexample: An example.\nfeature_name: A string feature name.\n\nReturns:\nA filled in `OriginalFeatureList` object representing the feature.", "source": "juraj-google-style"}
{"code": "def _send_impression_event(self, experiment, variation, user_id, attributes):\n    \n\n    impression_event = self.event_builder.create_impression_event(experiment,\n                                                                  variation.id,\n                                                                  user_id,\n                                                                  attributes)\n\n    self.logger.debug('Dispatching impression event to URL %s with params %s.' % (\n      impression_event.url,\n      impression_event.params\n    ))\n\n    try:\n      self.event_dispatcher.dispatch_event(impression_event)\n    except:\n      self.logger.exception('Unable to dispatch impression event!')\n\n    self.notification_center.send_notifications(enums.NotificationTypes.ACTIVATE,\n                                                experiment, user_id, attributes, variation, impression_event)", "docstring": "Helper method to send impression event.\n\nArgs:\nexperiment: Experiment for which impression event is being sent.\nvariation: Variation picked for user for the given experiment.\nuser_id: ID for user.\nattributes: Dict representing user attributes and values which need to be recorded.", "source": "juraj-google-style"}
{"code": "def add_dos_dict(self, dos_dict, key_sort_func=None):\n        \n        if key_sort_func:\n            keys = sorted(dos_dict.keys(), key=key_sort_func)\n        else:\n            keys = dos_dict.keys()\n        for label in keys:\n            self.add_dos(label, dos_dict[label])", "docstring": "Add a dictionary of doses, with an optional sorting function for the\nkeys.\n\nArgs:\ndos_dict: dict of {label: Dos}\nkey_sort_func: function used to sort the dos_dict keys.", "source": "juraj-google-style"}
{"code": "def do_not_descend_map(self):\n    return self._do_not_descend_map", "docstring": "A map from parents to symbols that should not be descended into.\n\nThis map can be edited, but it should not be edited once traversal has\nbegun.\n\nReturns:\nThe map marking symbols to not explore.", "source": "github-repos"}
{"code": "def play_random(env, steps):\n    try:\n        done = True\n        progress = tqdm(range(steps))\n        for _ in progress:\n            if done:\n                _ = env.reset()\n            action = env.action_space.sample()\n            (_, reward, done, info) = env.step(action)\n            progress.set_postfix(reward=reward, info=info)\n            env.render()\n    except KeyboardInterrupt:\n        pass\n    env.close()", "docstring": "Play the environment making uniformly random decisions.\n\nArgs:\nenv (gym.Env): the initialized gym environment to play\nsteps (int): the number of random steps to take\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def NCHWToNHWC(input_tensor):\n    if isinstance(input_tensor, tensor.Tensor):\n        return array_ops.transpose(input_tensor, [0, 2, 3, 1])\n    else:\n        return [input_tensor[0], input_tensor[2], input_tensor[3], input_tensor[1]]", "docstring": "Convert the input from NCHW format to NHWC.\n\nArgs:\ninput_tensor:  a 4-D tensor, or a 4-element array representing the same.\n\nReturns:\nthe converted tensor or a shape array", "source": "github-repos"}
{"code": "def map_texture_to_surface(texture, surface):\n    (texture_x, texture_y) = texture\n    (surface_h, surface_w) = surface.shape\n    surface_x = np.clip(np.int32(((surface_w * texture_x) - 1e-09)), 0, (surface_w - 1))\n    surface_y = np.clip(np.int32(((surface_h * texture_y) - 1e-09)), 0, (surface_h - 1))\n    surface_z = surface[(surface_y, surface_x)]\n    return surface_z", "docstring": "Returns values on a surface for points on a texture.\n\nArgs:\ntexture (texture): the texture to trace over the surface\nsurface (surface): the surface to trace along\n\nReturns:\nan array of surface heights for each point in the\ntexture. Line separators (i.e. values that are ``nan`` in\nthe texture) will be ``nan`` in the output, so the output\nwill have the same dimensions as the x/y axes in the\ninput texture.", "source": "codesearchnet"}
{"code": "def is_closed(self):\n        \n        for t in self.smi_vector:\n            found = False\n            for s in self.sm_vector:\n                if self.observation_table[s] == self.observation_table[t]:\n                    self.equiv_classes[t] = s\n                    found = True\n                    break\n            if not found:\n                return False, t\n        return True, None", "docstring": "_check if the observation table is closed.\nArgs:\nNone\nReturns:\ntuple (bool, str): True if the observation table is\nclosed and false otherwise. If the table is not closed\nthe escaping string is returned.", "source": "juraj-google-style"}
{"code": "def read_at(self, d, **kwargs):\n    try:\n        return np.array([self._read_at(depth, **kwargs) for depth in d])\n    except:\n        return self._read_at(d, **kwargs)", "docstring": "Read the log at a specific depth or an array of depths.\n\nArgs:\nd (float or array-like)\ninterpolation (str)\nindex(bool)\nreturn_basis (bool)\n\nReturns:\nfloat or ndarray.", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states):\n    hidden_states = hidden_states.view(self.num_experts, -1, self.hidden_size)\n    num_tokens = None\n    next_states = torch.empty_like(hidden_states)\n    for i in range(self.num_experts):\n        expert_hidden = hidden_states[i]\n        expert_hidden_reshaped = expert_hidden.reshape(-1, self.hidden_size)\n        expert_quantized, expert_scale = torch.ops.fbgemm.quantize_fp8_per_row(expert_hidden_reshaped, num_tokens, self.input_scale_ub)\n        sharded_expert_dim = self.gate_up_proj.shape[-1] \n        gate_up_proj_scale_float32 = self.gate_up_proj_scale.to(torch.float32)\n        gate = torch.ops.fbgemm.f8f8bf16_rowwise(expert_quantized, self.gate_up_proj[i].transpose(0, 1)[:sharded_expert_dim].contiguous(), expert_scale, gate_up_proj_scale_float32[i][0][:sharded_expert_dim].view(-1, 1).contiguous(), use_fast_accum=True)\n        up = torch.ops.fbgemm.f8f8bf16_rowwise(expert_quantized, self.gate_up_proj[i].transpose(0, 1)[sharded_expert_dim:].contiguous(), expert_scale, gate_up_proj_scale_float32[i][0][sharded_expert_dim:].view(-1, 1).contiguous(), use_fast_accum=True)\n        activated = up * self.act_fn(gate)\n        activated_quantized, activated_scale = torch.ops.fbgemm.quantize_fp8_per_row(activated, num_tokens, self.input_scale_ub)\n        down_proj_scale_float32 = self.down_proj_scale.to(torch.float32)\n        expert_output = torch.ops.fbgemm.f8f8bf16_rowwise(activated_quantized, self.down_proj[i].transpose(0, 1).contiguous(), activated_scale, down_proj_scale_float32[i].view(-1, 1).contiguous(), use_fast_accum=True)\n        next_states[i] = expert_output\n    next_states = next_states.to(hidden_states.device)\n    return next_states.view(-1, self.hidden_size)", "docstring": "Args:\nhidden_states (torch.Tensor): (batch_size * token_num, hidden_size)\nReturns:\ntorch.Tensor: (batch_size * token_num, hidden_size)", "source": "github-repos"}
{"code": "def getStreamNetworkAsGeoJson(self, session, withNodes=True):\n        \n        features_list = []\n\n        \n        for link in self.streamLinks:\n            link_geoJson = link.getAsGeoJson(session)\n\n            if link_geoJson:\n                link_geometry = json.loads(link.getAsGeoJson(session))\n\n                link_properties = {\"link_number\": link.linkNumber,\n                                   \"type\": link.type,\n                                   \"num_elements\": link.numElements,\n                                   \"dx\": link.dx,\n                                   \"erode\": link.erode,\n                                   \"subsurface\": link.subsurface}\n\n                link_feature = {\"type\": \"Feature\",\n                                \"geometry\": link_geometry,\n                                \"properties\": link_properties,\n                                \"id\": link.id}\n\n                features_list.append(link_feature)\n\n            \n            if withNodes:\n                for node in link.nodes:\n                    node_geoJson = node.getAsGeoJson(session)\n\n                    if node_geoJson:\n                        node_geometry = json.loads(node_geoJson)\n\n                    node_properties = {\"link_number\": link.linkNumber,\n                                       \"node_number\": node.nodeNumber,\n                                       \"elevation\": node.elevation}\n\n                    node_feature = {\"type\": \"Feature\",\n                                    \"geometry\": node_geometry,\n                                    \"properties\": node_properties,\n                                    \"id\": node.id}\n\n                    features_list.append(node_feature)\n\n        feature_collection = {\"type\": \"FeatureCollection\",\n                              \"features\": features_list}\n\n        return json.dumps(feature_collection)", "docstring": "Retrieve the stream network geometry in GeoJSON format.\n\nArgs:\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database\nwithNodes (bool, optional): Include nodes. Defaults to False.\n\nReturns:\nstr: GeoJSON string.", "source": "juraj-google-style"}
{"code": "def cancelPnLSingle(\n            self, account: str, modelCode: str, conId: int):\n        \n        key = (account, modelCode, conId)\n        reqId = self.wrapper.pnlSingleKey2ReqId.pop(key, None)\n        if reqId:\n            self.client.cancelPnLSingle(reqId)\n            self.wrapper.pnlSingles.pop(reqId, None)\n        else:\n            self._logger.error(\n                'cancelPnLSingle: No subscription for '\n                f'account {account}, modelCode {modelCode}, conId {conId}')", "docstring": "Cancel PnLSingle subscription for the given account, modelCode\nand conId.\n\nArgs:\naccount: Cancel for this account name.\nmodelCode: Cancel for this account model.\nconId: Cancel for this contract ID.", "source": "juraj-google-style"}
{"code": "def __init__(self, reactor, hostname, port):\n        \n        service.MultiService.__init__(self)\n        self._reactor = reactor\n        self._hostname = hostname\n        self._port = port\n        self._client_factory = None\n        self._tcp_client = None\n\n        self._repeating_metric_handles = []", "docstring": "Construct a CarbonClientService.\n\nArgs:\nreactor: The Twisted reactor for your application.\nhostname: The hostname of your Carbon server.\nport: The port that the Carbon pickle endpoint is listening on.", "source": "juraj-google-style"}
{"code": "def sg_one_hot(tensor, opt):\n    r\n    assert opt.depth is not None, 'depth is mandatory.'\n    return tf.one_hot(tensor, opt.depth, name=opt.name)", "docstring": "r\"\"\"Converts a tensor into a one-hot tensor.\n\nSee `tf.one_hot()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` ( automatically given by chain )\nopt:\ndepth: The number of classes.\nname: If provided, replace current tensor's name.\n\nReturns:\nA `Tensor`.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n                 qubits: List[Qubit],\n                 registers: List[RegisterSlot],\n                 mem_slots: List[MemorySlot]):\n        \n        self._qubits = qubits\n        self._reg_slots = registers\n        self._mem_slots = mem_slots", "docstring": "Create device specification with specified `qubits`.\nArgs:\nqubits:", "source": "juraj-google-style"}
{"code": "def compute_precedence(terminals, productions, precedence_levels):\n    precedence = collections.OrderedDict()\n    for terminal in terminals:\n        precedence[terminal] = DEFAULT_PREC\n    level_precs = range(len(precedence_levels), 0, (- 1))\n    for (i, level) in zip(level_precs, precedence_levels):\n        assoc = level[0]\n        for symbol in level[1:]:\n            precedence[symbol] = (assoc, i)\n    for (production, prec_symbol) in productions:\n        if (prec_symbol is None):\n            prod_terminals = ([symbol for symbol in production.rhs if (symbol in terminals)] or [None])\n            precedence[production] = precedence.get(prod_terminals[(- 1)], DEFAULT_PREC)\n        else:\n            precedence[production] = precedence.get(prec_symbol, DEFAULT_PREC)\n    return precedence", "docstring": "Computes the precedence of terminal and production.\n\nThe precedence of a terminal is it's level in the PRECEDENCE tuple. For\na production, the precedence is the right-most terminal (if it exists).\nThe default precedence is DEFAULT_PREC - (LEFT, 0).\n\nReturns:\nprecedence - dict[terminal | production] = (assoc, level)", "source": "codesearchnet"}
{"code": "def initialize_tpu_system(cluster_resolver=None):\n    return tpu_strategy_util.initialize_tpu_system_impl(cluster_resolver, TPUClusterResolver)", "docstring": "Initialize the TPU devices.\n\nArgs:\ncluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,\nwhich provides information about the TPU cluster.\nReturns:\nThe tf.tpu.Topology object for the topology of the TPU cluster. If called\ninside tf.function, it returns the serialized topology object instead.\n\nRaises:\nRuntimeError: If running inside a tf.function.\nNotFoundError: If no TPU devices found in eager mode.", "source": "github-repos"}
{"code": "def Parse(self, rdf_data):\n    \n    if not isinstance(rdf_data, (list, set)):\n      raise ProcessingError(\"Bad host data format: %s\" % type(rdf_data))\n    if self.baseline:\n      comparison = self.baseliner.Parse(rdf_data)\n    else:\n      comparison = rdf_data\n    found = self.handler.Parse(comparison)\n    results = self.hint.Render(found)\n    return self.matcher.Detect(comparison, results)", "docstring": "Process rdf data through filters. Test if results match expectations.\n\nProcessing of rdf data is staged by a filter handler, which manages the\nprocessing of host data. The output of the filters are compared against\nexpected results.\n\nArgs:\nrdf_data: An list containing 0 or more rdf values.\n\nReturns:\nAn anomaly if data didn't match expectations.\n\nRaises:\nProcessingError: If rdf_data is not a handled type.", "source": "juraj-google-style"}
{"code": "def CreateFeedItemAddOperation(name, price, date, ad_customizer_feed):\n  \n  feed_item = {\n      'feedId': ad_customizer_feed['feedId'],\n      'attributeValues': [\n          {\n              'feedAttributeId': ad_customizer_feed['feedAttributes'][0]['id'],\n              'stringValue': name\n          },\n          {\n              'feedAttributeId': ad_customizer_feed['feedAttributes'][1]['id'],\n              'stringValue': price\n          },\n          {\n              'feedAttributeId': ad_customizer_feed['feedAttributes'][2]['id'],\n              'stringValue': date\n          }\n      ]\n  }\n\n  operation = {\n      'operator': 'ADD',\n      'operand': feed_item\n  }\n\n  return operation", "docstring": "Creates a FeedItemOperation.\n\nThe generated FeedItemOperation will create a FeedItem with the specified\nvalues when sent to FeedItemService.mutate.\n\nArgs:\nname: the value for the name attribute of the FeedItem.\nprice: the value for the price attribute of the FeedItem.\ndate: the value for the date attribute of the FeedItem.\nad_customizer_feed: the AdCustomizerFeed we're associating the FeedItems\nwith.\n\nReturns:\nA new FeedItemOperation for adding a FeedItem.", "source": "juraj-google-style"}
{"code": "def _update_services_target_state(sdp_target_state: str):\n    \n    service_states = get_service_state_list()\n\n    \n    for service in service_states:\n        if service.current_state != sdp_target_state:\n            LOG.debug('Setting the target state of %s to be %s', service.id,\n                      sdp_target_state)\n            service.update_target_state(sdp_target_state)", "docstring": "Update the target states of services based on SDP target state.\n\nWhen we get a new target state this function is called to ensure\ncomponents receive the target state(s) and/or act on them.\n\nArgs:\nsdp_target_state (str): Target state of SDP", "source": "juraj-google-style"}
{"code": "def _GetMemberForOffset(self, offset):\n    \n    if offset < 0 or offset >= self.uncompressed_data_size:\n      raise ValueError('Offset {0:d} is larger than file size {1:d}.'.format(\n          offset, self.uncompressed_data_size))\n\n    for end_offset, member in iter(self._members_by_end_offset.items()):\n      if offset < end_offset:\n        return member\n\n    return None", "docstring": "Finds the member whose data includes the provided offset.\n\nArgs:\noffset (int): offset in the uncompressed data to find the\ncontaining member for.\n\nReturns:\ngzipfile.GzipMember: gzip file member or None if not available.\n\nRaises:\nValueError: if the provided offset is outside of the bounds of the\nuncompressed data.", "source": "juraj-google-style"}
{"code": "def logdet(matrix, name=None):\n    with ops.name_scope(name, 'logdet', [matrix]):\n        chol = gen_linalg_ops.cholesky(matrix)\n        return 2.0 * math_ops.reduce_sum(math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))), axis=[-1])", "docstring": "Computes log of the determinant of a hermitian positive definite matrix.\n\n```python\n# Compute the determinant of a matrix while reducing the chance of over- or\nunderflow:\nA = ... # shape 10 x 10\ndet = tf.exp(tf.linalg.logdet(A))  # scalar\n```\n\nArgs:\nmatrix:  A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,\nor `complex128` with shape `[..., M, M]`.\nname:  A name to give this `Op`.  Defaults to `logdet`.\n\nReturns:\nThe natural log of the determinant of `matrix`.\n\n@compatibility(numpy)\nEquivalent to numpy.linalg.slogdet, although no sign is returned since only\nhermitian positive definite matrices are supported.\n@end_compatibility", "source": "github-repos"}
{"code": "def remove_tree_by_path(self, path):\n    with transaction.manager:\n        trees = self.path_db.get(path, None)\n    if (not trees):\n        return\n    for tree in trees:\n        return self._remove_tree(tree)", "docstring": "Remove the tree from database by given `path`.\n\nArgs:\npath (str): Path of the tree.", "source": "codesearchnet"}
{"code": "def restore_model(self, directory=None, file=None):\n        \n        self.model.restore(directory=directory, file=file)", "docstring": "Restore TensorFlow model. If no checkpoint file is given, the latest checkpoint is\nrestored. If no checkpoint directory is given, the model's default saver directory is\nused (unless file specifies the entire path).\n\nArgs:\ndirectory: Optional checkpoint directory.\nfile: Optional checkpoint file, or path if directory not given.", "source": "juraj-google-style"}
{"code": "def get_ticker_metadata(self, ticker, fmt='json'):\n        \n        url = \"tiingo/daily/{}\".format(ticker)\n        response = self._request('GET', url)\n        data = response.json()\n        if fmt == 'json':\n            return data\n        elif fmt == 'object':\n            return dict_to_object(data, \"Ticker\")", "docstring": "Return metadata for 1 ticker\nUse TiingoClient.list_tickers() to get available options\n\nArgs:\nticker (str) : Unique identifier for stock", "source": "juraj-google-style"}
{"code": "def run_and_monitor(args, pid_to_wait, std_out_filter_fn=None, cwd=None):\n    monitor_process = None\n    try:\n        p = subprocess.Popen(args, cwd=cwd, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n        pids_to_kill = [p.pid]\n        script = ('import %s;%s._wait_and_kill(%s, %s)' % (__name__, __name__, str(pid_to_wait), str(pids_to_kill)))\n        monitor_process = subprocess.Popen(['python', '-c', script], env=os.environ)\n        while (p.poll() is None):\n            line = p.stdout.readline()\n            if (not six.PY2):\n                line = line.decode()\n            if ((std_out_filter_fn is None) or std_out_filter_fn(line)):\n                sys.stdout.write(line)\n    finally:\n        if monitor_process:\n            monitor_process.kill()", "docstring": "Start a process, and have it depend on another specified process.\n\nArgs:\nargs: the args of the process to start and monitor.\npid_to_wait: the process to wait on. If the process ends, also kill the started process.\nstd_out_filter_fn: a filter function which takes a string content from the stdout of the\nstarted process, and returns True if the string should be redirected to console stdout.\ncwd: the current working directory for the process to start.", "source": "codesearchnet"}
{"code": "def retry(max_count):\n    if max_count <= 1:\n        raise ValueError(f'The `max_count` for `retry` must be larger than 1, got \"{max_count}\".')\n\n    def _outer_decorator(func):\n        setattr(func, ATTR_MAX_RETRY_CNT, max_count)\n\n        @functools.wraps(func)\n        def _wrapper(*args):\n            func(*args)\n        return _wrapper\n    return _outer_decorator", "docstring": "Decorator for retrying a test case until it passes.\n\nThe BaseTestClass will keep executing the test cases annotated with this\ndecorator until the test passes, or the maxinum number of iterations have\nbeen met.\n\nThis decorator only stores the information needed for the retry. It does not\nexecute the retry.\n\nArgs:\nmax_count: int, the maximum number of times to execute the decorated test\ncase.\n\nReturns:\nThe wrapped test function.\n\nRaises:\nValueError, if the user input is invalid.", "source": "github-repos"}
{"code": "def from_stream(credential_filename):\n    if (credential_filename and os.path.isfile(credential_filename)):\n        try:\n            return _get_application_default_credential_from_file(credential_filename)\n        except (ApplicationDefaultCredentialsError, ValueError) as error:\n            extra_help = ' (provided as parameter to the from_stream() method)'\n            _raise_exception_for_reading_json(credential_filename, extra_help, error)\n    else:\n        raise ApplicationDefaultCredentialsError('The parameter passed to the from_stream() method should point to a file.')", "docstring": "Create a Credentials object by reading information from a file.\n\nIt returns an object of type GoogleCredentials.\n\nArgs:\ncredential_filename: the path to the file from where the\ncredentials are to be read\n\nRaises:\nApplicationDefaultCredentialsError: raised when the credentials\nfail to be retrieved.", "source": "codesearchnet"}
{"code": "class ConfidenceCriteria(StoppingCriteria):\n\n    def __init__(self, assistant_confidence_threshold):\n        self.assistant_confidence_threshold = assistant_confidence_threshold\n\n    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:\n        probs = scores[-1].softmax(-1)\n        p = probs[0, input_ids[0, -1]].item()\n        if p < self.assistant_confidence_threshold:\n            return True\n        return False", "docstring": "This class can be used to stop generation whenever assistant model's confidence in its prediction for the current token is lower than the threshold\n`model.generation_config.assistant_confidence_threshold` even if the number of speculative tokens (defined by `num_assistant_tokens`) is not yet reached.\n\nArgs:\nassistant_confidence_threshold (`float`):\nThe value of the threshold.", "source": "github-repos"}
{"code": "class SegGptImageSegmentationOutput(ModelOutput):\n    loss: Optional[torch.FloatTensor] = None\n    pred_masks: Optional[torch.FloatTensor] = None\n    hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n    attentions: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "Output type of [`SegGptImageSegmentationOutput`].\n\nArgs:\nloss (`torch.FloatTensor`, *optional*, returned when `labels` is provided):\nThe loss value.\npred_masks (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\nThe predicted masks.\nhidden_states (`Tuple[torch.FloatTensor]`, `optional`, returned when `config.output_hidden_states=True`):\nTuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\nof shape `(batch_size, patch_height, patch_width, hidden_size)`.\nattentions (`Tuple[torch.FloatTensor]`, `optional`, returned when `config.output_attentions=True`):\nTuple of `torch.FloatTensor` (one for each layer) of shape\n`(batch_size, num_heads, seq_len, seq_len)`.", "source": "github-repos"}
{"code": "def wait_and_ignore(condition, timeout=WTF_TIMEOUT_MANAGER.NORMAL, sleep=0.5):\n    try:\n        return wait_until(condition, timeout, sleep)\n    except:\n        pass", "docstring": "Waits wrapper that'll wait for the condition to become true, but will\nnot error if the condition isn't met.\n\nArgs:\ncondition (lambda) - Lambda expression to wait for to evaluate to True.\n\nKwargs:\ntimeout (number) : Maximum number of seconds to wait.\nsleep (number) : Sleep time to wait between iterations.\n\nExample::\n\nwait_and_ignore(lambda: driver.find_element_by_id(\"success\").is_displayed(),\ntimeout=30,\nsleep=0.5)\n\nis equivalent to::\n\nend_time = datetime.now() + timedelta(seconds=30)\nwhile datetime.now() < end_time:\ntry:\nif driver.find_element_by_id(\"success\").is_displayed():\nbreak;\nexcept:\npass\ntime.sleep(0.5)", "source": "codesearchnet"}
{"code": "def __eq__(self, other):\n        \n        if (type(self) is type(other) and\n                self._ub == other._ub and\n                self._lb == other._lb):\n            return True\n        return False", "docstring": "Two LO ranges are the same if they are of the same type, and\nhave the same frequency range\n\nArgs:\nother (LoRange): other LoRange\n\nReturns:\nbool: are self and other equal.", "source": "juraj-google-style"}
{"code": "def __init__(self, input: EventSetNode, func: MapFunction, receive_extras: bool, dtype: Optional[DType]=None, dtype_to_dtype: Optional[Dict[DType, DType]]=None, feature_name_to_dtype: Optional[Dict[str, DType]]=None):\n    super().__init__()\n    assert sum((x is not None for x in [dtype, dtype_to_dtype, feature_name_to_dtype])) <= 1\n    output_dtypes = build_dtypes_list_from_target_dtypes(input, dtype, dtype_to_dtype, feature_name_to_dtype)\n    assert len(output_dtypes) == len(input.schema.features)\n    self._receive_extras = receive_extras\n    self.add_attribute('func', func)\n    self._func = func\n    self.add_input('input', input)\n    self.add_output('output', create_node_new_features_existing_sampling(features=[FeatureSchema(f.name, dtype) for f, dtype in zip(input.schema.features, output_dtypes)], sampling_node=input, creator=self))\n    self.check()", "docstring": "Constructor.\n\nThere can only be one of dtype, dtype_to_dtype or feature_name_to_dtype.\n\nArgs:\ninput: Input node.\nfunc: Function to apply to each elemnent.\ndtype: All the output features are expected to be of this type.\ndtype_to_dtype: Mapping between current dtype and new dtype.\nfeature_name_to_dtype: Mapping between feature name and new dtype.", "source": "github-repos"}
{"code": "async def register_user(self, password, **kwds):\n    user = (await self._create_remote_user(password=password, **kwds))\n    if (not ('pk' in user)):\n        user['pk'] = user['id']\n    match_query = (self.model.user == user['id'])\n    if (self.model.select().where(match_query).count() > 0):\n        raise RuntimeError('The user is already registered.')\n    password = self.model(user=user['id'], password=password)\n    password.save()\n    return {'user': user, 'sessionToken': self._user_session_token(user)}", "docstring": "This function is used to provide a sessionToken for later requests.\n\nArgs:\nuid (str): The", "source": "codesearchnet"}
{"code": "def orient_undirected_graph(self, data, graph):\n        \n        \n        self.arguments['{VERBOSE}'] = str(self.verbose).upper()\n        self.arguments['{SCORE}'] = self.score\n        self.arguments['{BETA}'] = str(self.beta)\n        self.arguments['{OPTIM}'] = str(self.optim).upper()\n        self.arguments['{ALPHA}'] = str(self.alpha)\n\n        whitelist = DataFrame(list(nx.edges(graph)), columns=[\"from\", \"to\"])\n        blacklist = DataFrame(list(nx.edges(nx.DiGraph(DataFrame(-nx.adj_matrix(graph, weight=None).to_dense() + 1,\n                                                                 columns=list(graph.nodes()),\n                                                                 index=list(graph.nodes()))))), columns=[\"from\", \"to\"])\n        results = self._run_bnlearn(data, whitelist=whitelist,\n                                   blacklist=blacklist, verbose=self.verbose)\n\n        return nx.relabel_nodes(nx.DiGraph(results),\n                                {idx: i for idx, i in enumerate(data.columns)})", "docstring": "Run the algorithm on an undirected graph.\n\nArgs:\ndata (pandas.DataFrame): DataFrame containing the data\ngraph (networkx.Graph): Skeleton of the graph to orient\n\nReturns:\nnetworkx.DiGraph: Solution on the given skeleton.", "source": "juraj-google-style"}
{"code": "def read_nose(in_file):\n        \n        suites = {}\n        doc_xml = minidom.parse(in_file)\n        suite_xml = doc_xml.getElementsByTagName(\"testsuite\")[0]\n        for case_xml in suite_xml.getElementsByTagName('testcase'):\n            classname = case_xml.getAttribute('classname')\n            if classname not in suites:\n                suites[classname] = []\n            case = {\n                'name': case_xml.getAttribute('name'),\n                'time': float(case_xml.getAttribute('time')),\n            }\n            \n            skipped_xml = case_xml.getElementsByTagName('skipped')\n            if skipped_xml:\n                if skipped_xml[0].hasAttribute('type'):\n                    type = skipped_xml[0].getAttribute('type')\n                else:\n                    type = ''\n                case['skipped'] = {\n                    'type': type,\n                    'message': skipped_xml[0].getAttribute('message'),\n                    'text': \"\".join([child.nodeValue for child in skipped_xml[0].childNodes]),\n                }\n\n            failure_xml = case_xml.getElementsByTagName('failure')\n            if failure_xml:\n                if failure_xml[0].hasAttribute('type'):\n                    type = failure_xml[0].getAttribute('type')\n                else:\n                    type = ''\n                case['failure'] = {\n                    'type': type,\n                    'message': failure_xml[0].getAttribute('message'),\n                    'text': \"\".join([child.nodeValue for child in failure_xml[0].childNodes]),\n                }\n\n            error_xml = case_xml.getElementsByTagName('error')\n            if error_xml:\n                if error_xml[0].hasAttribute('type'):\n                    type = error_xml[0].getAttribute('type')\n                else:\n                    type = ''\n                case['error'] = {\n                    'type': type,\n                    'message': error_xml[0].getAttribute('message'),\n                    'text': \"\".join([child.nodeValue for child in error_xml[0].childNodes]),\n                }\n\n            suites[classname].append(case)\n\n        return suites", "docstring": "Parse nose-style test reports into a `dict`\n\nArgs:\nin_file (:obj:`str`): path to nose-style test report\n\nReturns:\n:obj:`dict`: dictionary of test suites", "source": "juraj-google-style"}
{"code": "def click(self, x, y):\n        \n        self._run_nowait('target.tap({x: %d, y: %d})' % (x/self._scale, y/self._scale))\n        return self", "docstring": "Simulate click operation\nArgs:\n- x (int): position of x\n- y (int): position of y\nReturns:\nself", "source": "juraj-google-style"}
{"code": "def _operator(attr):\n\n    @functools.wraps(attr)\n    def func(a, *args):\n        return attr(a.value, *args)\n    return func", "docstring": "Defers an operator overload to `attr`.\n\nArgs:\nattr: Operator attribute to use.\n\nReturns:\nFunction calling operator attribute.", "source": "codesearchnet"}
{"code": "def receiveds_format(receiveds):\n    log.debug('Receiveds for this email are parsed')\n    output = []\n    counter = Counter()\n    for i in receiveds[::(- 1)]:\n        j = {k: v.strip() for (k, v) in i.items() if v}\n        j['hop'] = (counter['hop'] + 1)\n        if i.get('date'):\n            i['date'] = i['date'].split(';')[(- 1)]\n            try:\n                (j['date_utc'], _) = convert_mail_date(i['date'])\n            except TypeError:\n                j['date_utc'] = None\n        size = len(output)\n        now = j.get('date_utc')\n        if (size and now):\n            before = output[(counter['hop'] - 1)].get('date_utc')\n            if before:\n                j['delay'] = (now - before).total_seconds()\n            else:\n                j['delay'] = 0\n        else:\n            j['delay'] = 0\n        output.append(j)\n        counter['hop'] += 1\n    else:\n        for i in output:\n            if i.get('date_utc'):\n                i['date_utc'] = i['date_utc'].isoformat()\n        else:\n            return output", "docstring": "Given a list of receiveds hop, adds metadata and reformat\nfield values\n\nArgs:\nreceiveds (list): list of receiveds hops already formatted\n\nReturns:\nlist of receiveds reformated and with new fields", "source": "codesearchnet"}
{"code": "def kill_dashboard(self, check_alive=True):\n        \n        self._kill_process_type(\n            ray_constants.PROCESS_TYPE_DASHBOARD, check_alive=check_alive)", "docstring": "Kill the dashboard.\n\nArgs:\ncheck_alive (bool): Raise an exception if the process was already\ndead.", "source": "juraj-google-style"}
{"code": "def _ExpectedKeysForEntry(self, entry):\n    return [entry.name]", "docstring": "Generate a list of expected cache keys for this type of map.\n\nArgs:\nentry: A NetgroupMapEntry\n\nReturns:\nA list of strings", "source": "github-repos"}
{"code": "def list2str(self, l: List, joiner: str) -> str:\n        \n        result = str()\n        for item in l:\n            if isinstance(item, list):\n                result = result + self.list2str(item, joiner) + joiner\n            elif isinstance(item, dict):\n                result = result + self.dict2str(item, joiner) + joiner\n            elif item:\n                result = result + str(item) + joiner\n        return result", "docstring": "Convert list to str as input for tokenizer\n\nArgs:\nl (list): list for converting\njoiner (str): join the elements using this string to separate them.\n\nReturns: the value of the list as a string", "source": "juraj-google-style"}
{"code": "def _recursive_apply(tensors, apply_fn):\n    tensors_type = type(tensors)\n    if isinstance(tensors, tensor_lib.Tensor):\n        return apply_fn(tensors)\n    elif isinstance(tensors, variables.Variable):\n        return apply_fn(tensors.value())\n    elif isinstance(tensors, (list, tuple)):\n        tensors = [_recursive_apply(t, apply_fn) for t in tensors]\n        if tensors_type is list:\n            return list(tensors)\n        elif tensors_type is tuple:\n            return tuple(tensors)\n        return tensors_type(*tensors)\n    elif tensors_type is dict:\n        return dict(((k, _recursive_apply(v, apply_fn)) for k, v in tensors.items()))\n    else:\n        raise TypeError(f'_recursive_apply argument {tensors!r} has invalid type {tensors_type!r}')", "docstring": "Helper method to recursively apply a function to structure of tensors.\n\nThe structure of the tensors should take the form similar to fetches in\n`tf.compat.v1.Session` and includes single `Tensor`, `list`, nested `list`,\n`tuple`,\n`namedtuple`, or `dict`.\n\nArgs:\ntensors: Single `Tensor`, `list`, nested `list, `tuple`, `namedtuple`, or\n`dict`.\napply_fn: Function to apply to each `Tensor` and should return a `Tensor`.\n\nReturns:\nReturns the modified tensors with the same structure.\nRaises:\n`TypeError` if undefined type in the tensors structure.", "source": "github-repos"}
{"code": "def __init__(self, conv_sizes, dense_sizes, scope='cnn-baseline', summary_labels=()):\n        \n\n        network = []\n        for size in conv_sizes:\n            network.append(dict(type='conv2d', size=size))\n\n        \n        network[0]['window'] = 5\n\n        network.append(dict(type='flatten'))  \n        for size in dense_sizes:\n            network.append(dict(type='dense', size=size))\n\n        super(CNNBaseline, self).__init__(network=network, scope=scope, summary_labels=summary_labels)", "docstring": "CNN baseline.\n\nArgs:\nconv_sizes: List of convolutional layer sizes\ndense_sizes: List of dense layer sizes", "source": "juraj-google-style"}
{"code": "def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training: bool=False):\n    residual = hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    hidden_states, self_attn_weights, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, training=training)\n    tf.debugging.assert_equal(shape_list(hidden_states), shape_list(residual), message=f'Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}')\n    hidden_states = self.dropout(hidden_states, training=training)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = self.activation_dropout(hidden_states, training=training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = self.dropout(hidden_states, training=training)\n    hidden_states = residual + hidden_states\n    return (hidden_states, self_attn_weights)", "docstring": "Args:\nhidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`tf.Tensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\nlayer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size\n`(encoder_attention_heads,)`", "source": "github-repos"}
{"code": "def check_result(data, key=''):\n    if (not isinstance(data, dict)):\n        return False\n    if key:\n        if (key in data):\n            return True\n        return False\n    if ('resultCode' in data.keys()):\n        return (True if (data.get('resultCode', (- 1)) == 0) else False)\n    elif ('code' in data.keys()):\n        return (True if (data.get('code', (- 1)) == 0) else False)\n    return False", "docstring": "Check the result of an API response.\n\nIdeally, this should be done by checking that the value of the ``resultCode``\nattribute is 0, but there are endpoints that simply do not follow this rule.\n\nArgs:\ndata (dict): Response obtained from the API endpoint.\nkey (string): Key to check for existence in the dict.\n\nReturns:\nbool: True if result was correct, False otherwise.", "source": "codesearchnet"}
{"code": "def append_block(self, node, reverse=False):\n    \n    if not isinstance(node, grammar.STATEMENTS):\n      raise ValueError\n    if reverse:\n      self.to_append_block[-1].appendleft(node)\n    else:\n      self.to_append_block[-1].append(node)", "docstring": "Append a statement to the current block.\n\nArgs:\nnode: The statement to prepend.\nreverse: When called multiple times, this flag determines whether the\nstatement should be prepended or appended to the already inserted\nstatements.\n\nRaises:\nValueError: If the given node is not a statement.", "source": "juraj-google-style"}
{"code": "def stack_residual_blocks_v2(x, filters, blocks, stride1=2, name=None):\n    x = residual_block_v2(x, filters, conv_shortcut=True, name=name + '_block1')\n    for i in range(2, blocks):\n        x = residual_block_v2(x, filters, name=name + '_block' + str(i))\n    x = residual_block_v2(x, filters, stride=stride1, name=name + '_block' + str(blocks))\n    return x", "docstring": "A set of stacked residual blocks.\n\nArgs:\nx: Input tensor.\nfilters: Number of filters in the bottleneck layer in a block.\nblocks: Number of blocks in the stacked blocks.\nstride1: Stride of the first layer in the first block. Defaults to `2`.\nname: Stack label.\n\nReturns:\nOutput tensor for the stacked blocks.", "source": "github-repos"}
{"code": "def _CreateSanitizedDestination(self, source_file_entry, source_path_spec, source_data_stream_name, destination_path):\n    file_system = source_file_entry.GetFileSystem()\n    path = getattr(source_path_spec, 'location', None)\n    path_segments = file_system.SplitPath(path)\n    for (index, path_segment) in enumerate(path_segments):\n        path_segments[index] = ''.join([(character if (character not in self._DIRTY_CHARACTERS) else '_') for character in path_segment])\n    target_filename = path_segments.pop()\n    parent_path_spec = getattr(source_file_entry.path_spec, 'parent', None)\n    while parent_path_spec:\n        if (parent_path_spec.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION):\n            path_segments.insert(0, parent_path_spec.location[1:])\n            break\n        elif (parent_path_spec.type_indicator == dfvfs_definitions.TYPE_INDICATOR_VSHADOW):\n            path_segments.insert(0, parent_path_spec.location[1:])\n        parent_path_spec = getattr(parent_path_spec, 'parent', None)\n    target_directory = os.path.join(destination_path, *path_segments)\n    if source_data_stream_name:\n        target_filename = '{0:s}_{1:s}'.format(target_filename, source_data_stream_name)\n    return (target_directory, target_filename)", "docstring": "Creates a sanitized path of both destination directory and filename.\n\nThis function replaces non-printable and other characters defined in\n_DIRTY_CHARACTERS with an underscore \"_\".\n\nArgs:\nsource_file_entry (dfvfs.FileEntry): file entry of the source file.\nsource_path_spec (dfvfs.PathSpec): path specification of the source file.\nsource_data_stream_name (str): name of the data stream of the source file\nentry.\ndestination_path (str): path of the destination directory.\n\nReturns:\ntuple[str, str]: sanitized paths of both destination directory and\nfilename.", "source": "codesearchnet"}
{"code": "def check(self, src, expected=None, prologue=None, name=None, version=None, platform='linux'):\n    ast = self.parse(src, name, version, platform)\n    actual = pytd_utils.Print(ast)\n    if expected != IGNORE:\n        if expected is None:\n            expected = src\n        expected = textwrap.dedent(expected).lstrip()\n        if prologue:\n            expected = f'{textwrap.dedent(prologue)}\\n\\n{expected}'\n        self.assertMultiLineEqual(expected.rstrip(), actual)\n    return ast", "docstring": "Check the parsing of src.\n\nThis checks that parsing the source and then printing the resulting\nAST results in the expected text.\n\nArgs:\nsrc: A source string.\nexpected: Optional expected result string.  If not provided, src is used\ninstead.  The special value IGNORE can be used to skip checking the\nparsed results against expected text.\nprologue: An optional prologue to be prepended to the expected text before\ncomparison.  Useful for imports that are introduced during printing the\nAST.\nname: The name of the module.\nversion: A python version tuple (None for default value).\nplatform: A platform string (defaults to \"linux\").\n\nReturns:\nThe parsed pytd.TypeDeclUnit.", "source": "github-repos"}
{"code": "def _assert_float_dtype(dtype):\n    dtype = dtypes.as_dtype(dtype)\n    if not dtype.is_floating:\n        raise ValueError(f'Argument `dtype` is expected to be floating point. Received: {dtype}.')\n    return dtype", "docstring": "Validate and return floating point type based on `dtype`.\n\n`dtype` must be a floating point type.\n\nArgs:\ndtype: The data type to validate.\n\nReturns:\nValidated type.\n\nRaises:\nValueError: if `dtype` is not a floating point type.", "source": "github-repos"}
{"code": "def intersect(self, other: Iterable[Flag]) -> FrozenSet[Flag]:\n        \n        if Wildcard in self._defined:\n            return frozenset(other)\n        else:\n            return self._defined & frozenset(other)", "docstring": "Returns the subset of flags in ``other`` that are also in\n:attr:`.defined`. If the wildcard flag is defined, then all flags in\n``other`` are returned.\n\nThe ``&`` operator is an alias of this method, making these two\ncalls equivalent::\n\nperm_flags.union(other_flags)\nperm_flags & other_flags\n\nArgs:\nother: The operand flag set.", "source": "juraj-google-style"}
{"code": "def tryload(self, cfgstr=None, on_error='raise'):\n    cfgstr = self._rectify_cfgstr(cfgstr)\n    if self.enabled:\n        try:\n            if (self.verbose > 1):\n                self.log('[cacher] tryload fname={}'.format(self.fname))\n            return self.load(cfgstr)\n        except IOError:\n            if (self.verbose > 0):\n                self.log('[cacher] ... {} cache miss'.format(self.fname))\n        except Exception:\n            if (self.verbose > 0):\n                self.log('[cacher] ... failed to load')\n            if (on_error == 'raise'):\n                raise\n            elif (on_error == 'clear'):\n                self.clear(cfgstr)\n                return None\n            else:\n                raise KeyError('Unknown method on_error={}'.format(on_error))\n    elif (self.verbose > 1):\n        self.log('[cacher] ... cache disabled: fname={}'.format(self.fname))\n    return None", "docstring": "Like load, but returns None if the load fails due to a cache miss.\n\nArgs:\non_error (str): How to handle non-io errors errors. Either raise,\nwhich re-raises the exception, or clear which deletes the cache\nand returns None.", "source": "codesearchnet"}
{"code": "def prepend_to_list(self, key, *value, pipeline=False):\n        \n        if pipeline:\n            self._pipeline.lpush(key, *value)\n        else:\n            self._db.lpush(key, *value)", "docstring": "Add new element to the start of the list stored at key.\n\nArgs:\nkey (str): Key where the list is stored\nvalue: Value to add to the list\npipeline (bool): True, start a transaction block. Default false.", "source": "juraj-google-style"}
{"code": "def _decode_exp(self, access_token=None):\n        \n        c = self.get_credentials()\n        jwt = access_token or c.access_token\n        x = self.decode_jwt_payload(jwt)\n\n        if 'exp' in x:\n            try:\n                exp = int(x['exp'])\n            except ValueError:\n                raise PanCloudError(\n                    \"Expiration time (exp) must be an integer\")\n            else:\n                self.jwt_exp = exp\n                return exp\n        else:\n            raise PanCloudError(\"No exp field found in payload\")", "docstring": "Extract exp field from access token.\n\nArgs:\naccess_token (str): Access token to decode. Defaults to ``None``.\n\nReturns:\nint: JWT expiration in epoch seconds.", "source": "juraj-google-style"}
{"code": "def processed_shape(self, shape):\n    for processor in self.preprocessors:\n        shape = processor.processed_shape(shape=shape)\n    return shape", "docstring": "Shape of preprocessed state given original shape.\n\nArgs:\nshape: original state shape\n\nReturns: processed state shape", "source": "codesearchnet"}
{"code": "def position(x=None, y=None):\n    \n    posx, posy = platformModule._position()\n    posx = int(posx)\n    posy = int(posy)\n    if x is not None: \n        posx = int(x)\n    if y is not None: \n        posy = int(y)\n    return Point(posx, posy)", "docstring": "Returns the current xy coordinates of the mouse cursor as a two-integer\ntuple.\n\nArgs:\nx (int, None, optional) - If not None, this argument overrides the x in\nthe return value.\ny (int, None, optional) - If not None, this argument overrides the y in\nthe return value.\n\nReturns:\n(x, y) tuple of the current xy coordinates of the mouse cursor.", "source": "juraj-google-style"}
{"code": "def __init__(self, workflow_name, graph_name):\n        \n        self.workflow_name = workflow_name\n        self.graph_name = graph_name", "docstring": "Initialize the exception for invalid workflow definitions.\n\nArgs:\nworkflow_name (str): The name of the workflow that contains an invalid\ndefinition.\ngraph_name (str): The name of the dag that is invalid.", "source": "juraj-google-style"}
{"code": "def _update_dict(self, to_dict, from_dict):\n        \n        for key, value in from_dict.items():\n            if key in to_dict and isinstance(to_dict[key], dict) and \\\n                    isinstance(from_dict[key], dict):\n                self._update_dict(to_dict[key], from_dict[key])\n            else:\n                to_dict[key] = from_dict[key]", "docstring": "Recursively merges the fields for two dictionaries.\n\nArgs:\nto_dict (dict): The dictionary onto which the merge is executed.\nfrom_dict (dict): The dictionary merged into to_dict", "source": "juraj-google-style"}
{"code": "def shuffle_dataset(filenames, extra_fn=None):\n    if outputs_exist(filenames):\n        tf.logging.info('Skipping shuffle because output files exist')\n        return\n    tf.logging.info('Shuffling data...')\n    for filename in filenames:\n        _shuffle_single(filename, extra_fn=extra_fn)\n    tf.logging.info('Data shuffled.')", "docstring": "Shuffles the dataset.\n\nArgs:\nfilenames: a list of strings\nextra_fn: an optional function from list of records to list of records\nto be called after shuffling a file.", "source": "codesearchnet"}
{"code": "def flatten_expr(self):\n    if '[' in self.expr and self.is_recursive():\n        return '_' + self.expr.replace('.', '_DOT').replace('[', '_LBAR_').replace(']', '_RBAR').replace(', ', '_COMMA_')\n    return self.expr", "docstring": "Flattens the expression into a legal variable name if necessary.\n\nPytype stores parameterized recursive types in intermediate variables. If\nself is such a type, this method flattens self.expr into a string that can\nserve as a variable name. For example, 'MyRecursiveAlias[int, str]' is\nflattened into '_MyRecursiveAlias_LBAR_int_COMMA_str_RBAR'.\n\nReturns:\nIf self is a parameterized recursive type, a flattened version of\nself.expr that is a legal variable name. Otherwise, self.expr unchanged.", "source": "github-repos"}
{"code": "def sparse_message_pass_batched(node_states, adjacency_matrices, num_edge_types, hidden_size, use_bias=True, average_aggregation=False, name='sparse_ggnn_batched'):\n    (b, n) = (tf.shape(node_states)[0], tf.shape(node_states)[1])\n    node_states = tf.reshape(node_states, [(b * n), hidden_size])\n    indices = adjacency_matrices.indices\n    new_index2 = indices[(:, 3)]\n    new_index0 = (indices[(:, 1)] + (indices[(:, 0)] * tf.cast(n, tf.int64)))\n    new_index1 = (indices[(:, 2)] + (indices[(:, 0)] * tf.cast(n, tf.int64)))\n    new_indices = tf.stack([new_index0, new_index1, new_index2], axis=1)\n    new_shape = [tf.cast((b * n), tf.int64), tf.cast((b * n), tf.int64), num_edge_types]\n    adjacency_matrices = tf.SparseTensor(indices=new_indices, values=adjacency_matrices.values, dense_shape=new_shape)\n    node_states = sparse_message_pass(node_states, adjacency_matrices, num_edge_types, hidden_size, use_bias=use_bias, average_aggregation=average_aggregation, name=name)\n    return tf.reshape(node_states, [b, n, hidden_size])", "docstring": "Identical to sparse_ggnn except that each input has a batch dimension.\n\nB = The batch size.\nN = The number of nodes in each batch.\nH = The size of the hidden states.\nT = The number of edge types.\n\nArgs:\nnode_states: Initial states of each node in the graph. Shape: [B, N, H]\nadjacency_matrices: Adjacency matrices of directed edges for each edge\ntype and batch. Shape: [B, N, N, T] (sparse).\nnum_edge_types: The number of edge types. T.\nhidden_size: The size of the hidden layer. H.\nuse_bias: Whether to use bias in the hidden layer.\naverage_aggregation: How to aggregate the incoming node messages. If\naverage_aggregation is true, the messages are averaged. If it is false,\nthey are summed.\nname: (optional) The scope within which tf variables should be created.\n\nReturns:\nThe result of one round of message-passing of shape [B, N, H].", "source": "codesearchnet"}
{"code": "def is_saving_non_distributed():\n    if not save_context.in_save_context():\n        return False\n    options = save_context.get_save_options()\n    return options.experimental_variable_policy != save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES", "docstring": "Returns whether we're saving a non-distributed version of the model.\n\nIt returns True iff we are in saving context and are saving a non-distributed\nversion of the model. That is, SaveOptions.experimental_variable_policy is\nNONE.\n\nReturns:\nA boolean.", "source": "github-repos"}
{"code": "def _create_vocab_table_lookup_qat_model_tf1(self, sess: session.Session) -> Tuple[core.Tensor, core.Tensor, core.Tensor]:\n    asset_dir = self.create_tempdir('assets').full_path\n    asset_file = os.path.join(asset_dir, 'vocab_file.txt')\n    file_io.write_string_to_file(filename=asset_file, file_content='hello,model,quantization\\n')\n    vocab_file = asset.Asset(asset_file)\n    raw_vocab = io_ops.read_file(vocab_file)\n    vocabs = ragged_string_ops.string_split_v2(string_ops.string_strip(raw_vocab), sep=',')\n    kv_init = lookup_ops.KeyValueTensorInitializer(keys=vocabs, values=np.array([0, 1, 2]), value_dtype=dtypes.int64)\n    table = lookup_ops.StaticVocabularyTable(kv_init, num_oov_buckets=5)\n    input_vocabs_placeholder = array_ops.placeholder(dtypes.string, shape=(None,), name='input_vocabs')\n    lookup_vals = math_ops.cast(table.lookup(input_vocabs_placeholder), dtypes.float32)\n    matmul_input = array_ops_stack.stack([lookup_vals, lookup_vals])\n    matmul_input = array_ops.fake_quant_with_min_max_args(matmul_input, min=-0.3, max=0.3, num_bits=8, narrow_range=False)\n    weight_row = array_ops.ones(shape=array_ops.shape(input_vocabs_placeholder), dtype=dtypes.float32)\n    weight = array_ops.transpose_v2(array_ops_stack.stack([weight_row, weight_row]))\n    weight = array_ops.fake_quant_with_min_max_args(weight, min=-0.1, max=0.2, num_bits=8, narrow_range=False)\n    output_tensor = math_ops.matmul(matmul_input, weight)\n    output_tensor = array_ops.fake_quant_with_min_max_args(output_tensor, min=-0.2, max=0.2, num_bits=8, narrow_range=False)\n    return (input_vocabs_placeholder, lookup_vals, output_tensor)", "docstring": "Creates a simple QAT model that initializes and lookups a vocab table.\n\nThis model creates an asset file at \"vocab_file.txt\" containing\ncomma-separated vocabularies.  It also initializes a `StaticVocabularyTable`\nand performs a lookup with the input vocabs, which is a 1D tensor of\nstrings.\n\nArgs:\nsess: Tensorflow Session to create the model in.\n\nReturns:\n(input_vocabs_placeholder, lookup_vals, output_tensor), where\n* input_vocabs_placeholder is a placeholder tensor of 1D strings\n* lookup_vals is an output tensor that is a direct result of table lookup\n* output_tensor is a float 2x2 matrix", "source": "github-repos"}
{"code": "def update_metric_by_name(self, metric_name, metric_type, description=None,\n                              custom_properties=None, tags=None, **kwargs):\n        \n        data = {'type': metric_type.upper(),\n                'description': description or '',\n                'customProperties': custom_properties or {},\n                'tags': tags or []}\n        resp = self._put(self._u(self._METRIC_ENDPOINT_SUFFIX,\n                                 str(metric_name)),\n                         data=data, **kwargs)\n        resp.raise_for_status()\n        return resp.json()", "docstring": "Create or update a metric object\n\nArgs:\nmetric_name (string): name of metric\ntype (string): metric type, must be one of 'gauge', 'counter',\n'cumulative_counter'\ndescription (optional[string]): a description\ncustom_properties (optional[dict]): dictionary of custom properties\ntags (optional[list of strings]): list of tags associated with\nmetric", "source": "juraj-google-style"}
{"code": "def get_image_features(self, pixel_values: torch.Tensor) -> torch.Tensor:\n    vision_outputs = self.vision_tower(pixel_values=pixel_values).last_hidden_state\n    image_features = self.multi_modal_projector(vision_outputs)\n    return image_features", "docstring": "Projects the last hidden state from the vision model into language model space.\n\nArgs:\npixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)\nThe tensors corresponding to the input images.\nReturns:\nimage_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).", "source": "github-repos"}
{"code": "def _ParseEntry(self, key, val):\n    \n    if key in self._repeated:\n      setting = self.section.setdefault(key, [])\n      setting.extend(val)\n    else:\n      self.section.setdefault(key, val)", "docstring": "Adds an entry for a configuration setting.\n\nArgs:\nkey: The name of the setting.\nval: The value of the setting.", "source": "juraj-google-style"}
{"code": "def compress_file(filepath, compression=\"gz\"):\n    \n    if compression not in [\"gz\", \"bz2\"]:\n        raise ValueError(\"Supported compression formats are 'gz' and 'bz2'.\")\n    from monty.io import zopen\n    if not filepath.lower().endswith(\".%s\" % compression):\n        with open(filepath, 'rb') as f_in, \\\n                zopen('%s.%s' % (filepath, compression), 'wb') as f_out:\n            f_out.writelines(f_in)\n        os.remove(filepath)", "docstring": "Compresses a file with the correct extension. Functions like standard\nUnix command line gzip and bzip2 in the sense that the original\nuncompressed files are not retained.\n\nArgs:\nfilepath (str): Path to file.\ncompression (str): A compression mode. Valid options are \"gz\" or\n\"bz2\". Defaults to \"gz\".", "source": "juraj-google-style"}
{"code": "def _gradient_function(op_name, attr_tuple, num_inputs, inputs, outputs, out_grads, skip_input_indices, forward_pass_name_scope):\n    mock_op = _MockOp(attr_tuple, inputs, outputs, op_name, skip_input_indices)\n    grad_fn = ops._gradient_registry.lookup(op_name)\n    if grad_fn is None:\n        return [None] * num_inputs\n    if ops.executing_eagerly_outside_functions() or control_flow_util.EnableControlFlowV2(ops.get_default_graph()):\n        gradient_name_scope = 'gradient_tape/'\n        if forward_pass_name_scope:\n            gradient_name_scope += forward_pass_name_scope + '/'\n        with ops.name_scope(gradient_name_scope):\n            return grad_fn(mock_op, *out_grads)\n    else:\n        return grad_fn(mock_op, *out_grads)", "docstring": "Calls the gradient function of the op.\n\nArgs:\nop_name: the name of the op to be differentiated.\nattr_tuple: the attrs, as a tuple.\nnum_inputs: the number of inputs to the op.\ninputs: inputs to the original operation.\noutputs: outputs to the original operation.\nout_grads: gradients of the operation wrt its outputs.\nskip_input_indices: a tuple that is passed to the gradient function,\nindicating which inputs to skip calculating the gradient for\nforward_pass_name_scope: the namescope of the op in the forward pass.\n\nReturns:\nThe gradients with respect to the inputs of the function, as a list.", "source": "github-repos"}
{"code": "def find_one(self, collection, query):\n        \n        obj = getattr(self.db, collection)\n        result = obj.find_one(query)\n        return result", "docstring": "Search a collection for the query provided and return one result. Just\na raw interface to mongo to do any query you want.\n\nArgs:\ncollection: The db collection. See main class documentation.\nquery: A mongo find query.\nReturns:\npymongo Cursor object with the results.", "source": "juraj-google-style"}
{"code": "def label_TM_tmhmm_residue_numbers_and_leaflets(tmhmm_seq):\n    TM_number_dict = {}\n    T_index = []\n    T_residue = []\n    residue_count = 1\n    for residue_label in tmhmm_seq:\n        if (residue_label == 'T'):\n            T_residue.append(residue_count)\n        residue_count = (residue_count + 1)\n    TM_number_dict.update({'T_residue': T_residue})\n    T_residue_list = TM_number_dict['T_residue']\n    count = 0\n    max_count = (len(T_residue_list) - 1)\n    TM_helix_count = 0\n    TM_boundary_dict = {}\n    while (count <= max_count):\n        if (count == 0):\n            TM_start = T_residue_list[count]\n            count = (count + 1)\n            continue\n        elif (count == max_count):\n            TM_end = T_residue_list[count]\n            TM_helix_count = (TM_helix_count + 1)\n            TM_boundary_dict.update({('TM_helix_' + str(TM_helix_count)): [TM_start, TM_end]})\n            break\n        elif (T_residue_list[count] != (T_residue_list[(count + 1)] - 1)):\n            TM_end = T_residue_list[count]\n            TM_helix_count = (TM_helix_count + 1)\n            TM_boundary_dict.update({('TM_helix_' + str(TM_helix_count)): [TM_start, TM_end]})\n            TM_start = T_residue_list[(count + 1)]\n        count = (count + 1)\n    leaflet_dict = {}\n    for leaflet in ['O', 'I']:\n        leaflet_list = []\n        for (TM_helix, TM_residues) in TM_boundary_dict.items():\n            for residue_num in TM_residues:\n                tmhmm_seq_index = (residue_num - 1)\n                previous_residue = (tmhmm_seq_index - 1)\n                next_residue = (tmhmm_seq_index + 1)\n                if ((tmhmm_seq[previous_residue] == leaflet) or (tmhmm_seq[next_residue] == leaflet)):\n                    leaflet_list.append(residue_num)\n        leaflet_dict.update({('tmhmm_leaflet_' + leaflet): leaflet_list})\n    return (TM_boundary_dict, leaflet_dict)", "docstring": "Determine the residue numbers of the TM-helix residues that cross the membrane and label them by leaflet.\n\nArgs:\ntmhmm_seq: g.protein.representative_sequence.seq_record.letter_annotations['TM-tmhmm']\n\nReturns:\nleaflet_dict: a dictionary with leaflet_variable : [residue list] where the variable is inside or outside\nTM_boundary dict: outputs a dictionar with : TM helix number : [TM helix residue start , TM helix residue end]\n\nTODO:\nuntested method!", "source": "codesearchnet"}
{"code": "def state_y(self, t: types.RealTensor, name: str=None) -> types.RealTensor:\n    name = name or 'state_y'\n    with tf.name_scope(name):\n        t = tf.convert_to_tensor(t, dtype=self._dtype)\n        t_shape = tf.shape(t)\n        t = tf.broadcast_to(t, tf.concat([[self._dim], t_shape], axis=0))\n        time_index = tf.searchsorted(self._jump_locations, t)\n        mr2 = tf.expand_dims(self._mean_reversion, axis=-1)\n        mr2 = tf.expand_dims(mr2 + tf.transpose(mr2), axis=-1)\n\n        def _integrate_volatility_squared(vol, l_limit, u_limit):\n            vol = tf.expand_dims(vol, axis=-2)\n            vol_squared = tf.expand_dims(self._rho, axis=-1) * (vol * tf.transpose(vol, perm=[1, 0, 2]))\n            return vol_squared / mr2 * (tf.math.exp(mr2 * u_limit) - tf.math.exp(mr2 * l_limit))\n        is_constant_vol = tf.math.equal(tf.shape(self._jump_values_vol)[-1], 0)\n        v_squared_between_vol_knots = tf.cond(is_constant_vol, lambda: tf.zeros(shape=(self._dim, self._dim, 0), dtype=self._dtype), lambda: _integrate_volatility_squared(self._jump_values_vol, self._padded_knots, self._jump_locations))\n        v_squared_at_vol_knots = tf.concat([tf.zeros((self._dim, self._dim, 1), dtype=self._dtype), utils.cumsum_using_matvec(v_squared_between_vol_knots)], axis=-1)\n        vn = tf.concat([self._zero_padding, self._jump_locations], axis=1)\n        v_squared_t = _integrate_volatility_squared(self._volatility(t), tf.gather(vn, time_index, batch_dims=1), t)\n        v_squared_t += tf.gather(v_squared_at_vol_knots, time_index, batch_dims=-1)\n        return tf.math.exp(-mr2 * t) * v_squared_t", "docstring": "Computes the state variable `y(t)` for tha Gaussian HJM Model.\n\nFor Gaussian HJM model, the state parameter y(t), can be analytically\ncomputed as follows:\n\ny_ij(t) = exp(-k_i * t) * exp(-k_j * t) * (\nint_0^t rho_ij * sigma_i(u) * sigma_j(u) * du)\n\nArgs:\nt: A rank 1 real `Tensor` of shape `[num_times]` specifying the time `t`.\nname: Python string. The name to give to the ops created by this function.\nDefault value: `None` which maps to the default name `state_y`.\n\nReturns:\nA real `Tensor` of shape [self._factors, self._factors, num_times]\ncontaining the computed y_ij(t).", "source": "github-repos"}
{"code": "def recipe_dcm_to_storage(config, auth_read, auth_write, account, report_id, report_name, bucket, path):\n    dcm(config, {'auth': auth_read, 'report': {'account': account, 'report_id': report_id, 'name': report_name}, 'out': {'storage': {'auth': auth_write, 'bucket': bucket, 'path': path}}})", "docstring": "Move existing CM report into a Storage bucket.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nauth_write (authentication) - Credentials used for writing data.\naccount (integer) - NA\nreport_id (integer) - NA\nreport_name (string) - NA\nbucket (string) - NA\npath (string) - NA", "source": "github-repos"}
{"code": "def camel_to_title(name):\n    \n    split = re.findall(r\"[A-Z]?[a-z0-9]+|[A-Z]+(?=[A-Z]|$)\", name)\n    ret = \" \".join(split)\n    ret = ret[0].upper() + ret[1:]\n    return ret", "docstring": "Takes a camelCaseFieldName and returns an Title Case Field Name\n\nArgs:\nname (str): E.g. camelCaseFieldName\n\nReturns:\nstr: Title Case converted name. E.g. Camel Case Field Name", "source": "juraj-google-style"}
{"code": "def getattr(self, c, attr, default=None, match_only=None):\n        \n        matching_decor = self.get_decor(c, match_only=match_only)\n\n        try:\n            return getattr(matching_decor, attr)\n        except AttributeError:\n            return default", "docstring": "Get the attribute of a component.\n\nArgs:\nc (component): The component to look up.\nattr (str): The attribute to get.\ndefault (str): What to return in the event of no match.\nmatch_only (list of str): The component attributes to include in the\ncomparison. Default: All of them.\n\nReturns:\nobj. The specified attribute of the matching Decor in the Legend.", "source": "juraj-google-style"}
{"code": "def allsame(list_, strict=True):\n    \n    if len(list_) == 0:\n        return True\n    first_item = list_[0]\n    return list_all_eq_to(list_, first_item, strict)", "docstring": "checks to see if list is equal everywhere\n\nArgs:\nlist_ (list):\n\nReturns:\nTrue if all items in the list are equal", "source": "juraj-google-style"}
{"code": "def copy(self) -> 'ConsoleBuffer':\n    other = ConsoleBuffer(0, 0)\n    other.width = self.width\n    other.height = self.height\n    other.back_r = list(self.back_r)\n    other.back_g = list(self.back_g)\n    other.back_b = list(self.back_b)\n    other.fore_r = list(self.fore_r)\n    other.fore_g = list(self.fore_g)\n    other.fore_b = list(self.fore_b)\n    other.char = list(self.char)\n    return other", "docstring": "Returns a copy of this ConsoleBuffer.\n\nReturns:\nConsoleBuffer: A new ConsoleBuffer copy.", "source": "codesearchnet"}
{"code": "def __init__(self, left, right):\n    self.left = left\n    self.right = right", "docstring": "Initialize an equality.\n\nArgs:\nleft: A string. Left side of the equality.\nright: A string. Right side of the equality.", "source": "github-repos"}
{"code": "def link_to_storage(self, sensor_log):\n        \n\n        if self.walker is not None:\n            self._sensor_log.destroy_walker(self.walker)\n            self.walker = None\n\n        self.walker = sensor_log.create_walker(self.selector)\n        self._sensor_log = sensor_log", "docstring": "Attach this DataStreamer to an underlying SensorLog.\n\nCalling this method is required if you want to use this DataStreamer\nto generate reports from the underlying data in the SensorLog.\n\nYou can call it multiple times and it will unlink itself from any\nprevious SensorLog each time.\n\nArgs:\nsensor_log (SensorLog): Actually create a StreamWalker to go along with this\nstreamer so that we can check if it's triggered.", "source": "juraj-google-style"}
{"code": "def _start_trial(self, trial, checkpoint=None):\n    prior_status = trial.status\n    self.set_status(trial, Trial.RUNNING)\n    trial.runner = self._setup_runner(trial, reuse_allowed=((checkpoint is not None) or (trial._checkpoint.value is not None)))\n    if (not self.restore(trial, checkpoint)):\n        if (trial.status == Trial.ERROR):\n            raise RuntimeError('Restore from checkpoint failed for Trial {}.'.format(str(trial)))\n    previous_run = self._find_item(self._paused, trial)\n    if ((prior_status == Trial.PAUSED) and previous_run):\n        self._paused.pop(previous_run[0])\n        self._running[previous_run[0]] = trial\n    else:\n        self._train(trial)", "docstring": "Starts trial and restores last result if trial was paused.\n\nRaises:\nValueError if restoring from checkpoint fails.", "source": "codesearchnet"}
{"code": "def to_jacobian(self):\n    if (not self):\n        return JacobianPoint(X=0, Y=0, Z=0)\n    return JacobianPoint(X=self.X, Y=self.Y, Z=1)", "docstring": "Converts this point to a Jacobian representation.\n\nReturns:\nJacobianPoint: The Jacobian representation.", "source": "codesearchnet"}
{"code": "def dr( self, r1, r2, cutoff=None ):\n        \n        delta_r_cartesian = ( r1 - r2 ).dot( self.matrix )\n        delta_r_squared = sum( delta_r_cartesian**2 )\n        if cutoff != None:\n            cutoff_squared = cutoff ** 2\n            if delta_r_squared > cutoff_squared:\n                return None\n        return( math.sqrt( delta_r_squared ) )", "docstring": "Calculate the distance between two fractional coordinates in the cell.\n\nArgs:\nr1 (np.array): fractional coordinates for position 1.\nr2 (np.array): fractional coordinates for position 2.\ncutoff (optional:Bool): If set, returns None for distances greater than the cutoff. Default None (unset).\n\nReturns:\n(float): the distance between r1 and r2.", "source": "juraj-google-style"}
{"code": "def _WsdlHasMethod(self, method_name):\n    \n    try:\n      self._method_bindings.get(method_name)\n      return True\n    except ValueError:\n      return False", "docstring": "Determine if a method is in the wsdl.\n\nArgs:\nmethod_name: The name of the method.\n\nReturns:\nTrue if the method is in the wsdl, otherwise False.", "source": "juraj-google-style"}
{"code": "def monkhorst_automatic(kpts=(2, 2, 2), shift=(0, 0, 0)):\n        \n        return Kpoints(\"Automatic kpoint scheme\", 0,\n                       Kpoints.supported_modes.Monkhorst, kpts=[kpts],\n                       kpts_shift=shift)", "docstring": "Convenient static constructor for an automatic Monkhorst pack Kpoint\ngrid.\n\nArgs:\nkpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice\nvectors. Defaults to (2,2,2)\nshift: Shift to be applied to the kpoints. Defaults to (0,0,0).\n\nReturns:\nKpoints object", "source": "juraj-google-style"}
{"code": "def _write_credentials_file(credentials_file, credentials):\n    data = {'file_version': 2, 'credentials': {}}\n    for (key, credential) in iteritems(credentials):\n        credential_json = credential.to_json()\n        encoded_credential = _helpers._from_bytes(base64.b64encode(_helpers._to_bytes(credential_json)))\n        data['credentials'][key] = encoded_credential\n    credentials_file.seek(0)\n    json.dump(data, credentials_file)\n    credentials_file.truncate()", "docstring": "Writes credentials to a file.\n\nRefer to :func:`_load_credentials_file` for the format.\n\nArgs:\ncredentials_file: An open file handle, must be read/write.\ncredentials: A dictionary mapping user-defined keys to an instance of\n:class:`oauth2client.client.Credentials`.", "source": "codesearchnet"}
{"code": "def lenet5(images, labels):\n  \n  images = pt.wrap(images)\n  with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):\n    return (images.conv2d(5, 20).max_pool(2, 2).conv2d(5, 50).max_pool(2, 2)\n            .flatten().fully_connected(500).softmax_classifier(10, labels))", "docstring": "Creates a multi layer convolutional network.\n\nThe architecture is similar to that defined in LeNet 5.\nPlease change this to experiment with architectures.\n\nArgs:\nimages: The input images.\nlabels: The labels as dense one-hot vectors.\nReturns:\nA softmax result.", "source": "juraj-google-style"}
{"code": "def UploadSignedConfigBlob(content, aff4_path, client_context=None, limit=None, token=None):\n    if (limit is None):\n        limit = config.CONFIG['Datastore.maximum_blob_size']\n    if (client_context is None):\n        client_context = ['Platform:Windows', 'Client Context']\n    config.CONFIG.Validate(parameters='PrivateKeys.executable_signing_private_key')\n    signing_key = config.CONFIG.Get('PrivateKeys.executable_signing_private_key', context=client_context)\n    verification_key = config.CONFIG.Get('Client.executable_signing_public_key', context=client_context)\n    signed_binary_utils.WriteSignedBinary(rdfvalue.RDFURN(aff4_path), content, signing_key, public_key=verification_key, chunk_size=limit, token=token)\n    logging.info('Uploaded to %s', aff4_path)", "docstring": "Upload a signed blob into the datastore.\n\nArgs:\ncontent: File content to upload.\naff4_path: aff4 path to upload to.\nclient_context: The configuration contexts to use.\nlimit: The maximum size of the chunk to use.\ntoken: A security token.\n\nRaises:\nIOError: On failure to write.", "source": "codesearchnet"}
{"code": "def write_dot(g):\n    lines = ['digraph g {']\n\n    def attrs_txt(items):\n        if items:\n            txt = ', '.join((('%s=\"%s\"' % (k, str(v).strip('\"'))) for (k, v) in items))\n            return (('[' + txt) + ']')\n        else:\n            return ''\n    for node in g.nodes():\n        atxt = attrs_txt(g.node_attributes(node))\n        txt = ('%s %s;' % (node, atxt))\n        lines.append(txt)\n    for e in g.edges():\n        (edge_from, edge_to) = e\n        attrs = g.edge_attributes(e)\n        label = str(g.edge_label(e))\n        if label:\n            attrs.append(('label', label))\n        atxt = attrs_txt(attrs)\n        txt = ('%s -> %s %s;' % (edge_from, edge_to, atxt))\n        lines.append(txt)\n    lines.append('}')\n    return '\\n'.join(lines)", "docstring": "Replacement for pygraph.readwrite.dot.write, which is dog slow.\n\nNote:\nThis isn't a general replacement. It will work for the graphs that\nRez generates, but there are no guarantees beyond that.\n\nArgs:\ng (`pygraph.digraph`): Input graph.\n\nReturns:\nstr: Graph in dot format.", "source": "codesearchnet"}
{"code": "def enqueue_message(self, message, timeout):\n    \n    \n    if message.command == 'WRTE':\n      self._send_command('OKAY', timeout=timeout)\n    elif message.command == 'OKAY':\n      self._set_or_check_remote_id(message.arg0)\n    self.message_queue.put(message)", "docstring": "Add the given message to this transport's queue.\n\nThis method also handles ACKing any WRTE messages.\n\nArgs:\nmessage: The AdbMessage to enqueue.\ntimeout: The timeout to use for the operation.  Specifically, WRTE\nmessages cause an OKAY to be sent; timeout is used for that send.", "source": "juraj-google-style"}
{"code": "def create(cls, env, filenames, trim=False):\n    import_graph = cls(env)\n    for filename in filenames:\n        import_graph.add_file_recursive(os.path.abspath(filename), trim)\n    import_graph.build()\n    return import_graph", "docstring": "Create and return a final graph.\n\nArgs:\nenv: An environment.Environment object\nfilenames: A list of filenames\ntrim: Whether to trim the dependencies of builtin and system files.\n\nReturns:\nAn immutable ImportGraph with the recursive dependencies of all the\nfiles in filenames", "source": "codesearchnet"}
{"code": "def parse_mmcif_header(infile):\n    \n    from Bio.PDB.MMCIF2Dict import MMCIF2Dict\n\n    newdict = {}\n    try:\n        mmdict = MMCIF2Dict(infile)\n    except ValueError as e:\n        log.exception(e)\n        return newdict\n\n    chemical_ids_exclude = ['HOH']\n    chemical_types_exclude = ['l-peptide linking','peptide linking']\n\n    if '_struct.title' in mmdict:\n        newdict['pdb_title'] = mmdict['_struct.title']\n    else:\n        log.debug('{}: No title field'.format(infile))\n\n    if '_struct.pdbx_descriptor' in mmdict:\n        newdict['description'] = mmdict['_struct.pdbx_descriptor']\n    else:\n        log.debug('{}: no description field'.format(infile))\n\n    if '_pdbx_database_status.recvd_initial_deposition_date' in mmdict:\n        newdict['date'] = mmdict['_pdbx_database_status.recvd_initial_deposition_date']\n    elif '_database_PDB_rev.date' in mmdict:\n        newdict['date'] = mmdict['_database_PDB_rev.date']\n    else:\n        log.debug('{}: no date field'.format(infile))\n\n    if '_exptl.method' in mmdict:\n        newdict['experimental_method'] = mmdict['_exptl.method']\n    else:\n        log.debug('{}: no experimental method field'.format(infile))\n\n    \n    if '_refine.ls_d_res_high' in mmdict:\n        try:\n            if isinstance(mmdict['_refine.ls_d_res_high'], list):\n                newdict['resolution'] = [float(x) for x in mmdict['_refine.ls_d_res_high']]\n            else:\n                newdict['resolution'] = float(mmdict['_refine.ls_d_res_high'])\n        except:\n            try:\n                newdict['resolution'] = float(mmdict['_em_3d_reconstruction.resolution'])\n            except:\n                log.debug('{}: no resolution field'.format(infile))\n    else:\n        log.debug('{}: no resolution field'.format(infile))\n\n    if '_chem_comp.id' in mmdict:\n        chemicals_filtered = ssbio.utils.filter_list_by_indices(mmdict['_chem_comp.id'],\n                                                            ssbio.utils.not_find(mmdict['_chem_comp.type'],\n                                                                           chemical_types_exclude,\n                                                                           case_sensitive=False))\n        chemicals_fitered = ssbio.utils.filter_list(chemicals_filtered, chemical_ids_exclude, case_sensitive=True)\n        newdict['chemicals'] = chemicals_fitered\n    else:\n        log.debug('{}: no chemical composition field'.format(infile))\n\n    if '_entity_src_gen.pdbx_gene_src_scientific_name' in mmdict:\n        newdict['taxonomy_name'] = mmdict['_entity_src_gen.pdbx_gene_src_scientific_name']\n    else:\n        log.debug('{}: no organism field'.format(infile))\n\n    return newdict", "docstring": "Parse a couple important fields from the mmCIF file format with some manual curation of ligands.\n\nIf you want full access to the mmCIF file just use the MMCIF2Dict class in Biopython.\n\nArgs:\ninfile: Path to mmCIF file\n\nReturns:\ndict: Dictionary of parsed header", "source": "juraj-google-style"}
{"code": "def AssignTasksToClient(self, client_id):\n    \n    rules = self.Get(self.Schema.RULES)\n    if not rules:\n      return 0\n\n    if data_store.RelationalDBEnabled():\n      last_foreman_run = self._GetLastForemanRunTimeRelational(client_id)\n    else:\n      last_foreman_run = self._GetLastForemanRunTime(client_id)\n\n    latest_rule = max(rule.created for rule in rules)\n\n    if latest_rule <= last_foreman_run:\n      return 0\n\n    \n    if data_store.RelationalDBEnabled():\n      try:\n        self._SetLastForemanRunTimeRelational(client_id, latest_rule)\n      except db.UnknownClientError:\n        pass\n\n    \n    \n    if not data_store.RelationalDBEnabled():\n      self._SetLastForemanRunTime(client_id, latest_rule)\n\n    relevant_rules = []\n    expired_rules = False\n\n    now = time.time() * 1e6\n\n    for rule in rules:\n      if rule.expires < now:\n        expired_rules = True\n        continue\n      if rule.created <= int(last_foreman_run):\n        continue\n\n      relevant_rules.append(rule)\n\n    if data_store.RelationalDBEnabled():\n      client_data = data_store.REL_DB.ReadClientFullInfo(client_id)\n      if client_data is None:\n        return\n    else:\n      client_data = aff4.FACTORY.Open(client_id, mode=\"rw\", token=self.token)\n\n    actions_count = 0\n    for rule in relevant_rules:\n      if self._EvaluateRules(rule, client_data):\n        actions_count += self._RunActions(rule, client_id)\n\n    if expired_rules:\n      self.ExpireRules()\n\n    return actions_count", "docstring": "Examines our rules and starts up flows based on the client.\n\nArgs:\nclient_id: Client id of the client for tasks to be assigned.\n\nReturns:\nNumber of assigned tasks.", "source": "juraj-google-style"}
{"code": "def emit(self, record):\n    \n    \n    \n    \n    \n    \n    \n    level = record.levelno\n    if not FLAGS.is_parsed():  \n      global _warn_preinit_stderr\n      if _warn_preinit_stderr:\n        sys.stderr.write(\n            'WARNING: Logging before flag parsing goes to stderr.\\n')\n        _warn_preinit_stderr = False\n      self._log_to_stderr(record)\n    elif FLAGS['logtostderr'].value:\n      self._log_to_stderr(record)\n    else:\n      super(PythonHandler, self).emit(record)\n      stderr_threshold = converter.string_to_standard(\n          FLAGS['stderrthreshold'].value)\n      if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and\n          self.stream != sys.stderr):\n        self._log_to_stderr(record)\n    \n    if _is_absl_fatal_record(record):\n      self.flush()  \n\n      \n      \n      os.abort()", "docstring": "Prints a record out to some streams.\n\nIf FLAGS.logtostderr is set, it will print to sys.stderr ONLY.\nIf FLAGS.alsologtostderr is set, it will print to sys.stderr.\nIf FLAGS.logtostderr is not set, it will log to the stream\nassociated with the current thread.\n\nArgs:\nrecord: logging.LogRecord, the record to emit.", "source": "juraj-google-style"}
{"code": "def __init__(self, file_pattern, interval=360, stop_timestamp=MAX_TIMESTAMP):\n    self.file_pattern = file_pattern\n    self.interval = interval\n    self.stop_timestamp = stop_timestamp", "docstring": "Watches a directory for updates to files matching a given file pattern.\n\nArgs:\nfile_pattern: The file path to read from as a local file path or a\nGCS ``gs://`` path. The path can contain glob characters\n(``*``, ``?``, and ``[...]`` sets).\ninterval: Interval at which to check for files matching file_pattern\nin seconds.\nstop_timestamp: Timestamp after which no more files will be checked.\n\n**Note**:\n\n1. Any previously used filenames cannot be reused. If a file is added\nor updated to a previously used filename, this transform will ignore\nthat update. To trigger a model update, always upload a file with\nunique name.\n2. Initially, before the pipeline startup time, WatchFilePattern expects\nat least one file present that matches the file_pattern.\n3. This transform is supported in streaming mode since\nMatchContinuously produces an unbounded source. Running in batch\nmode can lead to undesired results or result in pipeline being stuck.", "source": "github-repos"}
{"code": "def compile_expression(structdef_url: str, fhir_context: context.FhirPathContext, fhir_path: str) -> python_compiled_expressions.PythonCompiledExpression:\n    return python_compiled_expressions.PythonCompiledExpression.compile(fhir_path, _PRIMITIVE_HANDLER, structdef_url, fhir_context)", "docstring": "Compiles the FHIRPath expression for the given resource.\n\nArgs:\nstructdef_url: the URL of the FHIR StructureDefinition to use.\nfhir_context: a DefinitionLoader used to load FHIR structure definitions and\ndependencies.\nfhir_path: a FHIRPath expression to be run on the resource\n\nReturns:\n\nA PythonCompiledExpression representing the given FHIRPath string that can\nbe evaluated against the target resource.", "source": "github-repos"}
{"code": "def number(self):\n    cmd = 'git log --oneline {}'.format(self.sha1)\n    out = shell.run(cmd, capture=True, never_pretend=True).stdout.strip()\n    return len(out.splitlines())", "docstring": "Return this commits number.\n\nThis is the same as the total number of commits in history up until\nthis commit.\n\nThis value can be useful in some CI scenarios as it allows to track\nprogress on any given branch (although there can be two commits with the\nsame number existing on different branches).\n\nReturns:\nint: The commit number/index.", "source": "codesearchnet"}
{"code": "def get_geostationary_mask(area):\n    h = area.proj_dict['h']\n    (xmax, ymax) = get_geostationary_angle_extent(area)\n    xmax *= h\n    ymax *= h\n    (x, y) = area.get_proj_coords_dask()\n    return ((((x / xmax) ** 2) + ((y / ymax) ** 2)) <= 1)", "docstring": "Compute a mask of the earth's shape as seen by a geostationary satellite\n\nArgs:\narea (pyresample.geometry.AreaDefinition) : Corresponding area\ndefinition\n\nReturns:\nBoolean mask, True inside the earth's shape, False outside.", "source": "codesearchnet"}
{"code": "def set_running_in_gce(worker_executing_project):\n    global is_running_in_gce\n    global executing_project\n    is_running_in_gce = True\n    executing_project = worker_executing_project", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\nInforms the authentication library that we are running in GCE.\n\nWhen we are running in GCE, we have the option of using the VM metadata\ncredentials for authentication to Google services.\n\nArgs:\nworker_executing_project: The project running the workflow. This information\ncomes from worker startup information.", "source": "github-repos"}
{"code": "def period_from_dict(period: Dict[int, List[int]]) -> dateslib.PeriodTensor:\n    amount = period['frequency']\n    period_type = period_pb2.PeriodType.Name(period['type'])\n    return dateslib.PeriodTensor(amount, dateslib.PeriodType[period_type])", "docstring": "Utility to convert a dictionary of periods to a PeriodTensor.\n\nArgs:\nperiod: A dictionary with keys \"type\" (which corresponds to the proto type\nof the period (see `period_pb2.Period`)) and \"frequency\".\n\nReturns:\nAn instance of the `PeriodTensor`.", "source": "github-repos"}
{"code": "def _WriteRow(self, output_writer, values, in_bold=False):\n    \n    row_strings = []\n    for value_index, value_string in enumerate(values):\n      padding_size = self._column_sizes[value_index] - len(value_string)\n      padding_string = ' ' * padding_size\n\n      row_strings.extend([value_string, padding_string])\n\n    row_strings.pop()\n\n    row_strings = ''.join(row_strings)\n\n    if in_bold and not win32console:\n      \n      \n      row_strings = '\\x1b[1m{0:s}\\x1b[0m'.format(row_strings)\n\n    output_writer.Write('{0:s}\\n'.format(row_strings))", "docstring": "Writes a row of values aligned with the width to the output writer.\n\nArgs:\noutput_writer (CLIOutputWriter): output writer.\nvalues (list[object]): values.\nin_bold (Optional[bool]): True if the row should be written in bold.", "source": "juraj-google-style"}
{"code": "def uninstall(pkg):\n    ret = {'result': None, 'output': ''}\n    out = __salt__['cmd.run_all'](((FLATPAK_BINARY_NAME + ' uninstall ') + pkg))\n    if (out['retcode'] and out['stderr']):\n        ret['stderr'] = out['stderr'].strip()\n        ret['result'] = False\n    else:\n        ret['stdout'] = out['stdout'].strip()\n        ret['result'] = True\n    return ret", "docstring": "Uninstall the specified package.\n\nArgs:\npkg (str): The package name.\n\nReturns:\ndict: The ``result`` and ``output``.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' flatpak.uninstall org.gimp.GIMP", "source": "codesearchnet"}
{"code": "def get_song_type(self, cache=True):\n         \n        if not (cache and ('song_type' in self.cache)):\n            response = self.get_attribute('profile', bucket='song_type')\n            if response['songs'][0].has_key('song_type'):\n                self.cache['song_type'] = response['songs'][0]['song_type']\n            else:\n                self.cache['song_type'] = []\n        return self.cache['song_type']", "docstring": "Get the types of a song.\n\nArgs:\ncache (boolean): A boolean indicating whether or not the cached value should be used\n(if available). Defaults to True.\n\nReturns:\nA list of strings, each representing a song type:  'christmas', for example.\n\nExample:\n>>> s = song.Song('SOQKVPH12A58A7AF4D')\n>>> s.song_type\n[u'christmas']\n>>>", "source": "juraj-google-style"}
{"code": "def _build_js(inputs, outputs, name, implementation, support_code):\n    input_fields = json.dumps([f[0] for f in inputs])\n    output_fields = [{'name': f[0], 'type': f[1]} for f in outputs]\n    output_fields = json.dumps(output_fields, sort_keys=True)\n    if (support_code is None):\n        support_code = ''\n    return \"{code}\\n{name}={implementation};\\nbigquery.defineFunction('{name}', {inputs}, {outputs}, {name});\".format(code=support_code, name=name, implementation=implementation, inputs=str(input_fields), outputs=str(output_fields))", "docstring": "Creates a BigQuery SQL UDF javascript object.\n\nArgs:\ninputs: a list of (name, type) tuples representing the schema of input.\noutputs: a list of (name, type) tuples representing the schema of the output.\nname: the name of the function\nimplementation: a javascript function defining the UDF logic.\nsupport_code: additional javascript code that the function can use.", "source": "codesearchnet"}
{"code": "def annotate_source_against_profile(profile_data, source_file_path, node_name_filter=None, op_type_filter=None, min_line=None, max_line=None):\n    source_file_path = _norm_abs_path(source_file_path)\n    node_name_regex = re.compile(node_name_filter) if node_name_filter else None\n    op_type_regex = re.compile(op_type_filter) if op_type_filter else None\n    line_to_profile_summary = {}\n    for profile_datum in profile_data:\n        if not profile_datum.file_path:\n            continue\n        if _norm_abs_path(profile_datum.file_path) != source_file_path:\n            continue\n        if min_line is not None and profile_datum.line_number < min_line or (max_line is not None and profile_datum.line_number >= max_line):\n            continue\n        if node_name_regex and (not node_name_regex.match(profile_datum.node_exec_stats.node_name)):\n            continue\n        if op_type_regex and (not op_type_regex.match(profile_datum.op_type)):\n            continue\n        if profile_datum.line_number not in line_to_profile_summary:\n            line_to_profile_summary[profile_datum.line_number] = profiling.AggregateProfile(profile_datum)\n        else:\n            line_to_profile_summary[profile_datum.line_number].add(profile_datum)\n    return line_to_profile_summary", "docstring": "Annotate a Python source file with profiling information at each line.\n\n(The annotation doesn't change the source file itself.)\n\nArgs:\nprofile_data: (`list` of `ProfileDatum`) A list of `ProfileDatum`.\nsource_file_path: (`str`) Path to the source file being annotated.\nnode_name_filter: Regular expression to filter by node name.\nop_type_filter: Regular expression to filter by op type.\nmin_line: (`None` or `int`) The 1-based line to start annotate the source\nfile from (inclusive).\nmax_line: (`None` or `int`) The 1-based line number to end the annotation\nat (exclusive).\n\nReturns:\nA `dict` mapping 1-based line number to a the namedtuple\n`profiling.LineOrFuncProfileSummary`.", "source": "github-repos"}
{"code": "def noise_set_type(n: tcod.noise.Noise, typ: int) -> None:\n    n.algorithm = typ", "docstring": "Set a Noise objects default noise algorithm.\n\nArgs:\ntyp (int): Any NOISE_* constant.", "source": "codesearchnet"}
{"code": "def AddSpecification(self, specification):\n    \n    if specification.identifier in self._format_specifications:\n      raise KeyError(\n          'Format specification {0:s} is already defined in store.'.format(\n              specification.identifier))\n\n    self._format_specifications[specification.identifier] = specification\n\n    for signature in specification.signatures:\n      signature_index = len(self._signature_map)\n\n      signature_identifier = '{0:s}:{1:d}'.format(\n          specification.identifier, signature_index)\n\n      if signature_identifier in self._signature_map:\n        raise KeyError('Signature {0:s} is already defined in map.'.format(\n            signature_identifier))\n\n      signature.SetIdentifier(signature_identifier)\n      self._signature_map[signature_identifier] = specification", "docstring": "Adds a format specification.\n\nArgs:\nspecification (FormatSpecification): format specification.\n\nRaises:\nKeyError: if the store already contains a specification with\nthe same identifier.", "source": "juraj-google-style"}
{"code": "def _AddDependencyEdges(self, rdf_artifact):\n    \n    artifact_dependencies = artifact_registry.GetArtifactPathDependencies(\n        rdf_artifact)\n    if artifact_dependencies:\n      for attribute in artifact_dependencies:\n        self._AddEdge(attribute, rdf_artifact.name)\n    else:\n      self.reachable_nodes.add(rdf_artifact.name)\n      self.graph[rdf_artifact.name].is_provided = True", "docstring": "Add an edge for every dependency of the given artifact.\n\nThis method gets the attribute names for a given artifact and for every\nattribute it adds a directed edge from the attribute node to the artifact\nnode. If an artifact does not have any dependencies it is added to the set\nof reachable nodes.\n\nArgs:\nrdf_artifact: The artifact object.", "source": "juraj-google-style"}
{"code": "def hwvtep_set_overlaygw_type(self, **kwargs):\n        \n        name = kwargs.pop('name')\n        type = kwargs.pop('type')\n        ip_args = dict(name=name, gw_type=type)\n        method_name = 'overlay_gateway_gw_type'\n        method_class = self._brocade_tunnels\n        gw_attr = getattr(method_class, method_name)\n        config = gw_attr(**ip_args)\n        output = self._callback(config)\n        return output", "docstring": "Set gateway type\n\nArgs:\nname  (str): gateway-name\ntype (str): gateway-type\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def CaptureNamedVariable(self, name, value, depth, limits):\n    if (not hasattr(name, '__dict__')):\n        name = str(name)\n    else:\n        name = str(id(name))\n    self._total_size += len(name)\n    v = (self.CheckDataVisiblity(value) or self.CaptureVariable(value, depth, limits))\n    v['name'] = name\n    return v", "docstring": "Appends name to the product of CaptureVariable.\n\nArgs:\nname: name of the variable.\nvalue: data to capture\ndepth: nested depth of dictionaries and vectors so far.\nlimits: Per-object limits for capturing variable data.\n\nReturns:\nFormatted captured data as per Variable proto with name.", "source": "codesearchnet"}
{"code": "def __call__(self, shape, dtype=None, **kwargs):\n    _validate_kwargs(self.__class__.__name__, kwargs)\n    dtype = _get_dtype(dtype)\n    if not dtype.is_numpy_compatible or dtype == dtypes.string:\n        raise ValueError('Expected numeric or boolean dtype, got %s.' % dtype)\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    return array_ops.zeros(shape, dtype)", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor. Only numeric or boolean dtypes are\nsupported. If not specified, `tf.keras.backend.floatx()` is used,\nwhich default to `float32` unless you configured it otherwise\n(via `tf.keras.backend.set_floatx(float_dtype)`).\n**kwargs: Additional keyword arguments.", "source": "github-repos"}
{"code": "def decorate_event_js(js_code):\n    \n    def add_annotation(method):\n        setattr(method, \"__is_event\", True )\n        setattr(method, \"_js_code\", js_code )\n        return method\n    return add_annotation", "docstring": "setup a method as an event, adding also javascript code to generate\n\nArgs:\njs_code (str): javascript code to generate the event client-side.\njs_code is added to the widget html as\nwidget.attributes['onclick'] = js_code%{'emitter_identifier':widget.identifier, 'event_name':'onclick'}", "source": "juraj-google-style"}
{"code": "def build_kalman_cov_step(get_transition_matrix_for_timestep, get_transition_noise_for_timestep, get_observation_matrix_for_timestep, get_observation_noise_for_timestep):\n\n    def cov_step(previous_covs, t):\n        'Single step of prior covariance recursion.'\n        (previous_latent_cov, _) = previous_covs\n        latent_cov = _propagate_cov(previous_latent_cov, get_transition_matrix_for_timestep((t - 1)), get_transition_noise_for_timestep((t - 1)))\n        observation_cov = _propagate_cov(latent_cov, get_observation_matrix_for_timestep(t), get_observation_noise_for_timestep(t))\n        return (latent_cov, observation_cov)\n    return cov_step", "docstring": "Build a callable for one step of Kalman covariance recursion.\n\nArgs:\nget_transition_matrix_for_timestep: callable taking a timestep\nas an integer `Tensor` argument, and returning a `LinearOperator`\nof shape `[latent_size, latent_size]`.\nget_transition_noise_for_timestep: callable taking a timestep as\nan integer `Tensor` argument, and returning a\n`MultivariateNormalLinearOperator` of event shape\n`[latent_size]`.\nget_observation_matrix_for_timestep: callable taking a timestep\nas an integer `Tensor` argument, and returning a `LinearOperator`\nof shape `[observation_size, observation_size]`.\nget_observation_noise_for_timestep: callable taking a timestep as\nan integer `Tensor` argument, and returning a\n`MultivariateNormalLinearOperator` of event shape\n`[observation_size]`.\n\nReturns:\ncov_step: a callable that computes latent state and observation\ncovariance at time `t`, given latent covariance at time `t-1`.", "source": "codesearchnet"}
{"code": "def get_exitstatus(self):\n    logger.debug('Exit status is {0}'.format(self._spawn.exitstatus))\n    return self._spawn.exitstatus", "docstring": "Get the exit status of the program execution.\n\nReturns:\nint: Exit status as reported by the operating system,\nor None if it is not available.", "source": "codesearchnet"}
{"code": "def check_address(address):\n    if isinstance(address, tuple):\n        check_host(address[0])\n        check_port(address[1])\n    elif isinstance(address, string_types):\n        if (os.name != 'posix'):\n            raise ValueError('Platform does not support UNIX domain sockets')\n        if (not (os.path.exists(address) or os.access(os.path.dirname(address), os.W_OK))):\n            raise ValueError('ADDRESS not a valid socket domain socket ({0})'.format(address))\n    else:\n        raise ValueError('ADDRESS is not a tuple, string, or character buffer ({0})'.format(type(address).__name__))", "docstring": "Check if the format of the address is correct\n\nArguments:\naddress (tuple):\n(``str``, ``int``) representing an IP address and port,\nrespectively\n\n.. note::\nalternatively a local ``address`` can be a ``str`` when working\nwith UNIX domain sockets, if supported by the platform\nRaises:\nValueError:\nraised when address has an incorrect format\n\nExample:\n>>> check_address(('127.0.0.1', 22))", "source": "codesearchnet"}
{"code": "def register_obj_processors(self, obj_processors):\n    self.obj_processors = obj_processors\n    self.type_convertors.update(obj_processors)", "docstring": "Object processors are callables that will be called after\neach successful model object construction.\nThose callables receive model object as its parameter.\nRegistration of new object processors will replace previous.\n\nArgs:\nobj_processors(dict): A dictionary where key=class name,\nvalue=callable", "source": "codesearchnet"}
{"code": "def pearson_correlation_coefficient(predictions, labels, weights_fn=None):\n    del weights_fn\n    (_, pearson) = tf.contrib.metrics.streaming_pearson_correlation(predictions, labels)\n    return (pearson, tf.constant(1.0))", "docstring": "Calculate pearson correlation coefficient.\n\nArgs:\npredictions: The raw predictions.\nlabels: The actual labels.\nweights_fn: Weighting function.\n\nReturns:\nThe pearson correlation coefficient.", "source": "codesearchnet"}
{"code": "def minimum(inputs, **kwargs):\n    return Minimum(**kwargs)(inputs)", "docstring": "Functional interface to the `Minimum` layer.\n\nArgs:\ninputs: A list of input tensors (at least 2).\n**kwargs: Standard layer keyword arguments.\n\nReturns:\nA tensor, the element-wise minimum of the inputs.", "source": "github-repos"}
{"code": "def set(self, name, value):\n    curr = self.values\n    parts = name.split('.')\n    for (i, part) in enumerate(parts[:(- 1)]):\n        try:\n            curr = curr.setdefault(part, {})\n        except AttributeError:\n            raise InvalidPath('.'.join(parts[:(i + 1)]))\n    try:\n        curr[parts[(- 1)]] = value\n    except TypeError:\n        raise InvalidPath('.'.join(parts[:(- 1)]))", "docstring": "Set context value.\n\nArgs:\nname (str):\nThe name of the context value to change.\nvalue (Any):\nThe new value for the selected context value", "source": "codesearchnet"}
{"code": "def _ragged_stack_concat_helper(rt_inputs, axis, stack_values):\n    if not rt_inputs:\n        raise ValueError('rt_inputs may not be empty.')\n    rt_inputs = [ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input, name='rt_input') for rt_input in rt_inputs]\n    row_splits_dtype, rt_inputs = ragged_tensor.match_row_splits_dtypes(*rt_inputs, return_dtype=True)\n    rt_inputs = list(rt_inputs)\n    if len(rt_inputs) == 1 and (not stack_values):\n        return rt_inputs[0]\n    ndims = None\n    for rt in rt_inputs:\n        if ndims is None:\n            ndims = rt.shape.ndims\n        else:\n            rt.shape.assert_has_rank(ndims)\n    out_ndims = ndims if ndims is None or not stack_values else ndims + 1\n    axis = array_ops.get_positive_axis(axis, out_ndims)\n    if stack_values and ndims == 1 and (axis == 0):\n        return ragged_tensor.RaggedTensor.from_row_lengths(values=array_ops.concat(rt_inputs, axis=0), row_lengths=array_ops.concat([array_ops.shape(r) for r in rt_inputs], axis=0))\n    if all((not ragged_tensor.is_ragged(rt) for rt in rt_inputs)):\n        if ndims is not None and (axis == out_ndims - 1 or axis == ndims - 1):\n            if stack_values:\n                return array_ops_stack.stack(rt_inputs, axis)\n            else:\n                return array_ops.concat(rt_inputs, axis)\n    for i in range(len(rt_inputs)):\n        if not ragged_tensor.is_ragged(rt_inputs[i]):\n            rt_inputs[i] = ragged_tensor.RaggedTensor.from_tensor(rt_inputs[i], ragged_rank=1, row_splits_dtype=row_splits_dtype)\n    ragged_rank = max(max((rt.ragged_rank for rt in rt_inputs)), 1)\n    rt_inputs = [_increase_ragged_rank_to(rt, ragged_rank, row_splits_dtype) for rt in rt_inputs]\n    if axis == 0:\n        return _ragged_stack_concat_axis_0(rt_inputs, stack_values)\n    elif axis == 1:\n        return _ragged_stack_concat_axis_1(rt_inputs, stack_values)\n    else:\n        values = [rt.values for rt in rt_inputs]\n        splits = [[rt_input.row_splits] for rt_input in rt_inputs]\n        with ops.control_dependencies(ragged_util.assert_splits_match(splits)):\n            return ragged_tensor.RaggedTensor.from_row_splits(_ragged_stack_concat_helper(values, axis - 1, stack_values), splits[0][0], validate=False)", "docstring": "Helper function to concatenate or stack ragged tensors.\n\nArgs:\nrt_inputs: A list of RaggedTensors or Tensors to combine.\naxis: The axis along which to concatenate or stack.\nstack_values: A boolean -- if true, then stack values; otherwise,\nconcatenate them.\n\nReturns:\nA RaggedTensor.\nRaises:\nValueError: If rt_inputs is empty, or if axis is out of range.", "source": "github-repos"}
{"code": "def check_error(response, expect_status=200):\n    \n    json = None\n    try:\n        json = response.json()\n    except:\n        pass\n    if (response.status_code != expect_status or\n            response.status_code == 400 or\n            'error' in json):\n        if json:\n            error = json['error']\n            raise YoukuError(error['code'], error['type'],\n                             error['description'], response.status_code)\n        else:\n            \n            error = parse_qs(response.text)\n            raise YoukuError(error.get('code', [None])[0],\n                             error.get('type', [None])[0],\n                             error.get('description', [None])[0],\n                             response.status_code)", "docstring": "Youku error should return in json form, like:\nHTTP 400\n{\n\"error\":{\n\"code\":120010223,\n\"type\":\"UploadsException\",\n\"description\":\"Expired upload token\"\n}\n}\n\nBut error also maybe in response url params or response booy.\n\nContent-Type maybe application/json or text/plain, so\ndon't relay on it.\n\nArgs:\nexpect_status: normally is 200 or 201", "source": "juraj-google-style"}
{"code": "def query_blockchain_events(web3: Web3, contract_manager: ContractManager, contract_address: Address, contract_name: str, topics: List, from_block: BlockNumber, to_block: BlockNumber) -> List[Dict]:\n    filter_params = {'fromBlock': from_block, 'toBlock': to_block, 'address': to_checksum_address(contract_address), 'topics': topics}\n    events = web3.eth.getLogs(filter_params)\n    contract_abi = contract_manager.get_contract_abi(contract_name)\n    return [decode_event(abi=contract_abi, log_=raw_event) for raw_event in events]", "docstring": "Returns events emmitted by a contract for a given event name, within a certain range.\n\nArgs:\nweb3: A Web3 instance\ncontract_manager: A contract manager\ncontract_address: The address of the contract to be filtered, can be `None`\ncontract_name: The name of the contract\ntopics: The topics to filter for\nfrom_block: The block to start search events\nto_block: The block to stop searching for events\n\nReturns:\nAll matching events", "source": "codesearchnet"}
{"code": "def variables(self):\n    return self.weights", "docstring": "Returns the list of all layer variables/weights.\n\nAlias of `self.weights`.\n\nReturns:\nA list of variables.", "source": "github-repos"}
{"code": "def ParseActivityLogUncompressedRow(\n      self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = ChromeExtensionActivityEventData()\n    event_data.action_type = self._GetRowValue(query_hash, row, 'action_type')\n    event_data.activity_id = self._GetRowValue(query_hash, row, 'activity_id')\n    event_data.api_name = self._GetRowValue(query_hash, row, 'api_name')\n    event_data.arg_url = self._GetRowValue(query_hash, row, 'arg_url')\n    event_data.args = self._GetRowValue(query_hash, row, 'args')\n    event_data.extension_id = self._GetRowValue(query_hash, row, 'extension_id')\n    event_data.other = self._GetRowValue(query_hash, row, 'other')\n    event_data.page_title = self._GetRowValue(query_hash, row, 'page_title')\n    event_data.page_url = self._GetRowValue(query_hash, row, 'page_url')\n    event_data.query = query\n\n    timestamp = self._GetRowValue(query_hash, row, 'time')\n    date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_UNKNOWN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an activity log row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def approve(self, sha=None, **kwargs):\n    path = ('%s/%s/approve' % (self.manager.path, self.get_id()))\n    data = {}\n    if sha:\n        data['sha'] = sha\n    server_data = self.manager.gitlab.http_post(path, post_data=data, **kwargs)\n    self._update_attrs(server_data)", "docstring": "Approve the merge request.\n\nArgs:\nsha (str): Head SHA of MR\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabMRApprovalError: If the approval failed", "source": "codesearchnet"}
{"code": "def load_embeddings(lang=\"en\", task=\"embeddings\", type=\"cw\", normalize=False):\n  \n  src_dir = \"_\".join((type, task)) if type else task\n  p = locate_resource(src_dir, lang)\n  e = Embedding.load(p)\n  if type == \"cw\":\n    e.apply_expansion(CaseExpander)\n    e.apply_expansion(DigitExpander)\n  if type == \"sgns\":\n    e.apply_expansion(CaseExpander)\n  if type == \"ue\":\n    e.apply_expansion(CaseExpander)\n  if normalize:\n    e.normalize_words(inplace=True)\n  return e", "docstring": "Return a word embeddings object for `lang` and of type `type`\n\nArgs:\nlang (string): language code.\ntask (string): parameters that define task.\ntype (string): skipgram, cw, cbow ...\nnoramlized (boolean): returns noramlized word embeddings vectors.", "source": "juraj-google-style"}
{"code": "def _save_to_database(url, property_name, data):\n    data = json.dumps([(d.to_dict() if hasattr(d, 'to_dict') else d) for d in data])\n    logger.debug(('_save_to_database() data: %s' % repr(data)))\n    requests.post((_WEB_URL + _REQUEST_DB_SAVE), timeout=REQUEST_TIMEOUT, allow_redirects=True, verify=False, data={'url': url, 'value': data, 'property_name': property_name})\n    logger.info(('`%s` for `%s` sent to REST DB.' % (property_name, url)))", "docstring": "Store `data` under `property_name` in the `url` key in REST API DB.\n\nArgs:\nurl (obj): URL of the resource to which `property_name` will be stored.\nproperty_name (str): Name of the property under which the `data` will\nbe stored.\ndata (obj): Any object.", "source": "codesearchnet"}
{"code": "def bulk_load_docs(es, docs):\n    chunk_size = 200\n    try:\n        results = elasticsearch.helpers.bulk(es, docs, chunk_size=chunk_size)\n        log.debug(f'Elasticsearch documents loaded: {results[0]}')\n        if (len(results[1]) > 0):\n            log.error('Bulk load errors {}'.format(results))\n    except elasticsearch.ElasticsearchException as e:\n        log.error('Indexing error: {}\\n'.format(e))", "docstring": "Bulk load docs\n\nArgs:\nes: elasticsearch handle\ndocs: Iterator of doc objects - includes index_name", "source": "codesearchnet"}
{"code": "def get_relative_embeddings_left_right(max_relative_position, length, depth, num_heads, heads_share_relative_embedding, name):\n    initializer_stddev = (depth ** (- 0.5))\n    max_relative_position_unmasked = ((2 * max_relative_position) - 1)\n    if heads_share_relative_embedding:\n        embedding_shape = (max_relative_position_unmasked, depth)\n    else:\n        embedding_shape = (num_heads, max_relative_position_unmasked, depth)\n    relative_embeddings = tf.get_variable(name=name, shape=embedding_shape, initializer=tf.random_normal_initializer(stddev=initializer_stddev))\n    pad_length = tf.maximum((length - max_relative_position), 0)\n    slice_start_position = tf.maximum((max_relative_position - length), 0)\n    if heads_share_relative_embedding:\n        padded_relative_embeddings = tf.pad(relative_embeddings, [[pad_length, pad_length], [0, 0]])\n        used_relative_embeddings = tf.slice(padded_relative_embeddings, [slice_start_position, 0], [((2 * length) - 1), (- 1)])\n    else:\n        padded_relative_embeddings = tf.pad(relative_embeddings, [[0, 0], [pad_length, pad_length], [0, 0]])\n        used_relative_embeddings = tf.slice(padded_relative_embeddings, [0, slice_start_position, 0], [(- 1), ((2 * length) - 1), (- 1)])\n    return used_relative_embeddings", "docstring": "Instantiate or retrieve relative embeddings, sliced according to length.\n\nUse for unmasked case where the relative attention looks both left and right.\n\nArgs:\nmax_relative_position: an Integer for the number of entries in the relative\nembedding, which corresponds to the max relative distance that is\nconsidered.\nlength: an Integer, specifies the length of the input sequence for which\nthis relative embedding is retrieved for.\ndepth: an Integer, specifies the depth for relative embeddings.\nnum_heads: an Integer, specifies the number of heads.\nheads_share_relative_embedding: a Boolean specifying if the relative\nembedding is shared across heads.\nname: a string giving the name of the embedding variables.\n\nReturns:\na Tensor with shape [length, depth]", "source": "codesearchnet"}
{"code": "def configure_parser(self, parser):\n        \n        parser.add_argument(\"--log_path\", default=\"\", help=\"The log file path\")\n        parser.add_argument(\"--verbose\", help=\"Increase logging verbosity\", action=\"store_true\")", "docstring": "Adds the necessary supported arguments to the argument parser.\n\nArgs:\nparser (argparse.ArgumentParser): The parser to add arguments to.", "source": "juraj-google-style"}
{"code": "class AriaProjector(nn.Module):\n\n    def __init__(self, config: AriaConfig):\n        super().__init__()\n        self.patch_to_query_dict = config.projector_patch_to_query_dict\n        self.in_features = config.vision_config.hidden_size\n        self.num_heads = config.vision_config.num_attention_heads\n        self.kv_dim = config.vision_config.hidden_size\n        self.hidden_features = config.text_config.hidden_size\n        self.output_dim = config.text_config.hidden_size\n        self.query = nn.Parameter(torch.zeros(config.max_value_projector_patch_to_query_dict, self.in_features))\n        self.cross_attn = AriaCrossAttention(config)\n        self.layer_norm = nn.LayerNorm(self.in_features)\n        self.feed_forward = AriaProjectorMLP(self.in_features, self.hidden_features, self.output_dim)\n\n    def forward(self, key_value_states: torch.Tensor, attn_mask: Optional[torch.Tensor]=None):\n        \n        batch_size, num_patches = (key_value_states.shape[0], key_value_states.shape[1])\n        if num_patches not in self.patch_to_query_dict.keys():\n            raise KeyError(f'Number of patches {num_patches} not found in patch_to_query_dict amongst possible values {self.patch_to_query_dict.keys()}.')\n        query_num = self.patch_to_query_dict[num_patches]\n        queries = self.query[:query_num].unsqueeze(0).repeat(batch_size, 1, 1)\n        if attn_mask is not None:\n            attn_mask = attn_mask.repeat_interleave(self.num_heads, 0)\n            attn_mask = attn_mask.unsqueeze(1).expand(-1, queries.size(1), -1)\n        attention_out = self.cross_attn(key_value_states, queries, attn_mask=attn_mask)\n        out = self.feed_forward(self.layer_norm(attention_out))\n        return out", "docstring": "Aria Projector module.\n\nThis module projects vision features into the language model's embedding space, enabling interaction between vision and language components.\n\nArgs:\nconfig (`AriaConfig`):\nConfiguration object for the model.", "source": "github-repos"}
{"code": "def _get_ssm_parameter(self, p):\n    try:\n        response = self._ssm.get_parameter(Name=p, WithDecryption=True)\n        return response.get('Parameter', {}).get('Value', None)\n    except Exception as ruh_roh:\n        logging.error(ruh_roh, exc_info=False)\n    return None", "docstring": "Get parameters from Simple Systems Manager\n\nArgs:\np - a parameter name\n\nReturns:\na value, decrypted if needed, if successful or None if things go\nsideways.", "source": "codesearchnet"}
{"code": "def limits(self, clip_negative=True):\n    if self.as_numpy_dtype in dtype_range:\n        min, max = dtype_range[self.as_numpy_dtype]\n    else:\n        raise ValueError(str(self) + ' does not have defined limits.')\n    if clip_negative:\n        min = 0\n    return (min, max)", "docstring": "Return intensity limits, i.e.\n\n(min, max) tuple, of the dtype.\nArgs:\nclip_negative : bool, optional If True, clip the negative range (i.e.\nreturn 0 for min intensity) even if the image dtype allows negative\nvalues. Returns\nmin, max : tuple Lower and upper intensity limits.", "source": "github-repos"}
{"code": "def _check_error(self, response, json_response=None):\n        \n\n        \n        if response.status_code >= 400:\n            json_response = json_response or self._get_json_response(response)\n            err_cls = self._check_http_error_code(response.status_code)\n            try:\n                raise err_cls(\"%s error: %s\" % (response.status_code, json_response[\"error\"][\"error_msg\"]), response.status_code)\n            \n            except TypeError:\n                raise err_cls(\"%s error: %s\" % (response.status_code, json_response[\"error_description\"]), response.status_code)\n\n        \n        return True", "docstring": "Check for HTTP error code from the response, raise exception if there's any\n\nArgs:\nresponse (object): Object returned by requests' `get` and `post`\nmethods\n\njson_response (dict): JSON response, if applicable\n\nRaises:\nHTTPError: If the status code of response is either 4xx or 5xx\n\nReturns:\nTrue if status code is not error code", "source": "juraj-google-style"}
{"code": "def initialize_block(self, block_header):\n        \n        \n        \n        state_view = \\\n            BlockWrapper.state_view_for_block(\n                self._block_cache.block_store.chain_head,\n                self._state_view_factory)\n\n        settings_view = SettingsView(state_view)\n        self._min_wait_time = settings_view.get_setting(\n            \"sawtooth.consensus.min_wait_time\", self._min_wait_time, int)\n        self._max_wait_time = settings_view.get_setting(\n            \"sawtooth.consensus.max_wait_time\", self._max_wait_time, int)\n        self._valid_block_publishers = settings_view.get_setting(\n            \"sawtooth.consensus.valid_block_publishers\",\n            self._valid_block_publishers,\n            list)\n\n        block_header.consensus = b\"Devmode\"\n        self._start_time = time.time()\n        self._wait_time = random.uniform(\n            self._min_wait_time, self._max_wait_time)\n        return True", "docstring": "Do initialization necessary for the consensus to claim a block,\nthis may include initiating voting activates, starting proof of work\nhash generation, or create a PoET wait timer.\n\nArgs:\nblock_header (BlockHeader): the BlockHeader to initialize.\nReturns:\nTrue", "source": "juraj-google-style"}
{"code": "def cli_cmd_to_string(args):\n    \n    if isinstance(args, basestring):\n        \n        return args\n    return ' '.join([pipes.quote(arg) for arg in args])", "docstring": "Converts a cmd arg list to string.\n\nArgs:\nargs: list of strings, the arguments of a command.\n\nReturns:\nString representation of the command.", "source": "juraj-google-style"}
{"code": "def assertStartsWith(self, actual, expected_start, msg=None):\n    if not actual.startswith(expected_start):\n        fail_msg = '%r does not start with %r' % (actual, expected_start)\n        fail_msg += ' : %r' % msg if msg else ''\n        self.fail(fail_msg)", "docstring": "Assert that actual.startswith(expected_start) is True.\n\nArgs:\nactual: str\nexpected_start: str\nmsg: Optional message to report on failure.", "source": "github-repos"}
{"code": "def get_test_input_for_op(val, dtype):\n    python_inferred_types = {(dtypes.int32, True): 1, (dtypes.float32, True): 1.0, (dtypes.complex128, True): 1j}\n    dtype, weak = dtype\n    inputs = []\n    if weak:\n        inputs.append(convert_to_input_type(val, 'WeakTensor', dtype))\n        if dtype in python_inferred_types:\n            val_in_dtype = val * python_inferred_types[dtype]\n            inputs.append(val_in_dtype)\n            inputs.append(convert_to_input_type(val_in_dtype, 'Tensor', None))\n    else:\n        inputs.append(convert_to_input_type(val, 'Tensor', dtype))\n        inputs.append(convert_to_input_type(val, 'NumPy', dtype))\n    return inputs", "docstring": "Returns a list containing all the possible inputs with a given dtype.\n\nArgs:\nval: value to convert to test input.\ndtype: a tuple of format (tf.Dtype, bool) where the bool value represents\nwhether the dtype is \"weak\" or not.\n\nReturns:\nA list of all possible inputs given a value and a dtype.", "source": "github-repos"}
{"code": "def service_status(self, short_name):\n    if (short_name not in self.services):\n        raise ArgumentError('Unknown service name', short_name=short_name)\n    info = {}\n    service = self.services[short_name]['state']\n    info['heartbeat_age'] = (monotonic() - service.last_heartbeat)\n    info['numeric_status'] = service.state\n    info['string_status'] = service.string_state\n    return info", "docstring": "Get the current status of a service.\n\nReturns information about the service such as the length since the last\nheartbeat, any status messages that have been posted about the service\nand whether the heartbeat should be considered out of the ordinary.\n\nArgs:\nshort_name (string): The short name of the service to query\n\nReturns:\ndict: A dictionary with the status of the service", "source": "codesearchnet"}
{"code": "def reformat_to_pretty_xml(doc_xml):\n    assert isinstance(doc_xml, str)\n    dom_obj = xml.dom.minidom.parseString(doc_xml)\n    pretty_xml = dom_obj.toprettyxml(indent='  ')\n    return re.sub('^\\\\s*$\\\\n', '', pretty_xml, flags=re.MULTILINE)", "docstring": "Pretty print XML doc.\n\nArgs:\ndoc_xml : str\nWell formed XML doc\n\nReturns:\nstr: Pretty printed XML doc", "source": "codesearchnet"}
{"code": "def is_same_file(path1, path2):\n    \n    return (\n        path1 and path2\n        and os.path.isfile(path1) and os.path.isfile(path2)\n        and os.path.samefile(path1, path2))", "docstring": "Return True if path1 is the same file as path2.\n\nThe reason for this dance is that samefile throws if either file doesn't\nexist.\n\nArgs:\npath1: str or path-like.\npath2: str or path-like.\n\nReturns:\nbool. True if the same file, False if not.", "source": "juraj-google-style"}
{"code": "class CLIPSegDecoderOutput(ModelOutput):\n    logits: Optional[torch.FloatTensor] = None\n    hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n    attentions: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "Args:\nlogits (`torch.FloatTensor` of shape `(batch_size, height, width)`):\nClassification scores for each pixel.\nhidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\nTuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\none for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\nattentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\nTuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\nsequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in\nthe self-attention heads.", "source": "github-repos"}
{"code": "def poly_energy(sample_like, poly):\n    msg = 'poly_energy is deprecated and will be removed in dimod 0.9.0.In the future, use BinaryPolynomial.energy'\n    warnings.warn(msg, DeprecationWarning)\n    return BinaryPolynomial(poly, 'SPIN').energy(sample_like)", "docstring": "Calculates energy of a sample from a higher order polynomial.\n\nArgs:\nsample (samples_like):\nA raw sample. `samples_like` is an extension of NumPy's\narray_like structure. See :func:`.as_samples`.\n\npoly (dict):\nPolynomial as a dict of form {term: bias, ...}, where `term` is a\ntuple of variables and `bias` the associated bias.\n\nReturns:\nfloat: The energy of the sample.", "source": "codesearchnet"}
{"code": "def get_model_from_feature(feature: str, model: str, framework: Optional[str]=None, cache_dir: Optional[str]=None) -> Union['PreTrainedModel', 'TFPreTrainedModel']:\n    framework = FeaturesManager.determine_framework(model, framework)\n    model_class = FeaturesManager.get_model_class_for_feature(feature, framework)\n    try:\n        model = model_class.from_pretrained(model, cache_dir=cache_dir)\n    except OSError:\n        if framework == 'pt':\n            logger.info('Loading TensorFlow model in PyTorch before exporting to ONNX.')\n            model = model_class.from_pretrained(model, from_tf=True, cache_dir=cache_dir)\n        else:\n            logger.info('Loading PyTorch model in TensorFlow before exporting to ONNX.')\n            model = model_class.from_pretrained(model, from_pt=True, cache_dir=cache_dir)\n    return model", "docstring": "Attempts to retrieve a model from a model's name and the feature to be enabled.\n\nArgs:\nfeature (`str`):\nThe feature required.\nmodel (`str`):\nThe name of the model to export.\nframework (`str`, *optional*, defaults to `None`):\nThe framework to use for the export. See `FeaturesManager.determine_framework` for the priority should\nnone be provided.\n\nReturns:\nThe instance of the model.", "source": "github-repos"}
{"code": "def fix_addresses(start=None, end=None):\n    if (start in (None, idaapi.BADADDR)):\n        start = idaapi.cvar.inf.minEA\n    if (end in (None, idaapi.BADADDR)):\n        end = idaapi.cvar.inf.maxEA\n    return (start, end)", "docstring": "Set missing addresses to start and end of IDB.\n\nTake a start and end addresses. If an address is None or `BADADDR`,\nreturn start or end addresses of the IDB instead.\n\nArgs\nstart: Start EA. Use `None` to get IDB start.\nend:  End EA. Use `None` to get IDB end.\n\nReturns:\n(start, end)", "source": "codesearchnet"}
{"code": "def all_near_zero_mod(a: Union[float, complex, Iterable[float], np.ndarray],\n                      period: float,\n                      *,\n                      atol: float = 1e-8) -> bool:\n    \n    b = (np.asarray(a) + period / 2) % period - period / 2\n    return np.all(np.less_equal(np.abs(b), atol))", "docstring": "Checks if the tensor's elements are all near multiples of the period.\n\nArgs:\na: Tensor of elements that could all be near multiples of the period.\nperiod: The period, e.g. 2 pi when working in radians.\natol: Absolute tolerance.", "source": "juraj-google-style"}
{"code": "def align_segmentation(beat_times, song):\n    try:\n        (segment_times, segment_labels) = msaf.io.read_references(song)\n    except:\n        return (None, None, None)\n    segment_times = np.asarray(segment_times)\n    segment_intervals = msaf.utils.times_to_intervals(segment_times)\n    beat_intervals = np.asarray(zip(beat_times[:(- 1)], beat_times[1:]))\n    beat_segment_ids = librosa.util.match_intervals(beat_intervals, segment_intervals)\n    segment_beats = []\n    segment_times_out = []\n    segment_labels_out = []\n    for i in range(segment_times.shape[0]):\n        hits = np.argwhere((beat_segment_ids == i))\n        if ((len(hits) > 0) and (i < len(segment_intervals)) and (i < len(segment_labels))):\n            segment_beats.extend(hits[0])\n            segment_times_out.append(segment_intervals[(i, :)])\n            segment_labels_out.append(segment_labels[i])\n    segment_beats = list(segment_beats)\n    segment_times_out = segment_times\n    return (segment_beats, segment_times_out, segment_labels_out)", "docstring": "Load a ground-truth segmentation, and align times to the nearest\ndetected beats.\n\nArguments:\nbeat_times -- array\nsong -- path to the audio file\n\nReturns:\nsegment_beats -- array\nbeat-aligned segment boundaries\n\nsegment_times -- array\ntrue segment times\n\nsegment_labels -- array\nlist of segment labels", "source": "codesearchnet"}
{"code": "def cctop_check_status(jobid):\n    \n    status = 'http:\n    status_text = requests.post(status)\n    return status_text.text", "docstring": "Check the status of a CCTOP job ID.\n\nArgs:\njobid (str): Job ID obtained when job was submitted\n\nReturns:\nstr: 'Finished' if the job is finished and results ready to be downloaded, 'Running' if still in progress,\n'Invalid' for any errors.", "source": "juraj-google-style"}
{"code": "def _PrintStorageInformationAsJSON(self, storage_reader):\n    serializer = json_serializer.JSONAttributeContainerSerializer\n    storage_counters = self._CalculateStorageCounters(storage_reader)\n    storage_counters_json = json.dumps(storage_counters)\n    self._output_writer.Write('{')\n    self._output_writer.Write('\"storage_counters\": {0:s}'.format(storage_counters_json))\n    self._output_writer.Write(',\\n')\n    self._output_writer.Write(' \"sessions\": {')\n    for (index, session) in enumerate(storage_reader.GetSessions()):\n        json_string = serializer.WriteSerialized(session)\n        if (index != 0):\n            self._output_writer.Write(',\\n')\n        self._output_writer.Write('\"session_{0:s}\": {1:s} '.format(session.identifier, json_string))\n    self._output_writer.Write('}}')", "docstring": "Writes a summary of sessions as machine-readable JSON.\n\nArgs:\nstorage_reader (StorageReader): storage reader.", "source": "codesearchnet"}
{"code": "def list_cert_bindings(site):\n    ret = dict()\n    sites = list_sites()\n    if (site not in sites):\n        log.warning('Site not found: %s', site)\n        return ret\n    for binding in sites[site]['bindings']:\n        if sites[site]['bindings'][binding]['certificatehash']:\n            ret[binding] = sites[site]['bindings'][binding]\n    if (not ret):\n        log.warning('No certificate bindings found for site: %s', site)\n    return ret", "docstring": "List certificate bindings for an IIS site.\n\n.. versionadded:: 2016.11.0\n\nArgs:\nsite (str): The IIS site name.\n\nReturns:\ndict: A dictionary of the binding names and properties.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.list_bindings site", "source": "codesearchnet"}
{"code": "class RuntimeMetric(Metric):\n\n    def __init__(self, runtime_list, metric_id):\n        value = self._prepare_runtime_metrics(runtime_list)\n        submit_timestamp = time.time()\n        label = runtime_list[0].key.metric.namespace + '_' + RUNTIME_METRIC\n        super().__init__(submit_timestamp, metric_id, value, None, label)\n\n    def _prepare_runtime_metrics(self, distributions):\n        min_values = []\n        max_values = []\n        for dist in distributions:\n            min_values.append(dist.result.min)\n            max_values.append(dist.result.max)\n        min_value = min(min_values)\n        max_value = max(max_values)\n        runtime_in_s = float(max_value - min_value)\n        return runtime_in_s", "docstring": "The Distribution Metric in ready-to-publish format.\n\nArgs:\nruntime_list: list of distributions metrics from MetricResult\nwith runtime name\nmetric_id(uuid): unique id to identify test run", "source": "github-repos"}
{"code": "def update_(self, sct_dict, conf_arg=True):\n        \n        for opt, val in sct_dict.items():\n            if opt not in self.def_:\n                continue\n            if not conf_arg or self.def_[opt].conf_arg:\n                self[opt] = val", "docstring": "Update values of configuration section with dict.\n\nArgs:\nsct_dict (dict): dict indexed with option names. Undefined\noptions are discarded.\nconf_arg (bool): if True, only options that can be set in a config\nfile are updated.", "source": "juraj-google-style"}
{"code": "def remove(path, **kwargs):\n    kwargs = salt.utils.args.clean_kwargs(**kwargs)\n    rehash_ = kwargs.pop('rehash', True)\n    if kwargs:\n        salt.utils.args.invalid_kwargs(kwargs)\n    path = _normalize_dir(path)\n    path_str = salt.utils.stringutils.to_str(path)\n    system_path = get_path()\n    local_path = [salt.utils.stringutils.to_str(x) for x in os.environ['PATH'].split(PATHSEP)]\n\n    def _check_path(dirs, path):\n        '\\n        Check the dir list for the specified path, and make changes to the list\\n        if needed. Return True if changes were made to the list, otherwise\\n        return False.\\n        '\n        dirs_lc = [x.lower() for x in dirs]\n        path_lc = path.lower()\n        new_dirs = []\n        for (index, dirname) in enumerate(dirs_lc):\n            if (path_lc != dirname):\n                new_dirs.append(dirs[index])\n        if (len(new_dirs) != len(dirs)):\n            dirs[:] = new_dirs[:]\n            return True\n        else:\n            return False\n    if _check_path(local_path, path_str):\n        _update_local_path(local_path)\n    if (not _check_path(system_path, path)):\n        return True\n    result = __utils__['reg.set_value'](HIVE, KEY, VNAME, ';'.join(salt.utils.data.decode(system_path)), VTYPE)\n    if (result and rehash_):\n        return rehash()\n    else:\n        return result", "docstring": "r'''\nRemove the directory from the SYSTEM path\n\nReturns:\nboolean True if successful, False if unsuccessful\n\nrehash : True\nIf the registry was updated, and this value is set to ``True``, sends a\nWM_SETTINGCHANGE broadcast to refresh the environment variables. Set\nthis to ``False`` to skip this broadcast.\n\nCLI Example:\n\n.. code-block:: bash\n\n# Will remove C:\\Python27 from the path\nsalt '*' win_path.remove 'c:\\\\python27'", "source": "codesearchnet"}
{"code": "def register_name(self, register_index):\n        \n        result = self._dll.JLINKARM_GetRegisterName(register_index)\n        return ctypes.cast(result, ctypes.c_char_p).value.decode()", "docstring": "Retrives and returns the name of an ARM CPU register.\n\nArgs:\nself (JLink): the ``JLink`` instance\nregister_index (int): index of the register whose name to retrieve\n\nReturns:\nName of the register.", "source": "juraj-google-style"}
{"code": "def adjust_contrast(img, contrast_factor):\n    if (not _is_pil_image(img)):\n        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n    enhancer = ImageEnhance.Contrast(img)\n    img = enhancer.enhance(contrast_factor)\n    return img", "docstring": "Adjust contrast of an Image.\n\nArgs:\nimg (PIL Image): PIL Image to be adjusted.\ncontrast_factor (float): How much to adjust the contrast. Can be any\nnon negative number. 0 gives a solid gray image, 1 gives the\noriginal image while 2 increases the contrast by a factor of 2.\n\nReturns:\nPIL Image: Contrast adjusted image.", "source": "codesearchnet"}
{"code": "def process_runway_configs(runway_dir=''):\n    \n    LOG.info('Processing application.json files from local directory \"%s\".', runway_dir)\n    file_lookup = FileLookup(runway_dir=runway_dir)\n    app_configs = process_configs(file_lookup, 'application-master-{env}.json', 'pipeline.json')\n    return app_configs", "docstring": "Read the _application.json_ files.\n\nArgs:\nrunway_dir (str): Name of runway directory with app.json files.\n\nReturns:\ncollections.defaultdict: Configurations stored for each environment\nfound.", "source": "juraj-google-style"}
{"code": "def CleanseRawStrings(raw_lines):\n  \n\n  delimiter = None\n  lines_without_raw_strings = []\n  for line in raw_lines:\n    if delimiter:\n      \n      end = line.find(delimiter)\n      if end >= 0:\n        \n        \n        \n        leading_space = Match(r'^(\\s*)\\S', line)\n        line = leading_space.group(1) + '\"\"' + line[end + len(delimiter):]\n        delimiter = None\n      else:\n        \n        line = '\"\"'\n\n    \n    \n    \n    while delimiter is None:\n      \n      \n      \n      \n      \n      \n      \n      \n      \n      \n      matched = Match(r'^(.*?)\\b(?:R|u8R|uR|UR|LR)\"([^\\s\\\\()]*)\\((.*)$', line)\n      if (matched and\n          not Match(r'^([^\\'\"]|\\'(\\\\.|[^\\'])*\\'|\"(\\\\.|[^\"])*\")*\n                    matched.group(1))):\n        delimiter = ')' + matched.group(2) + '\"'\n\n        end = matched.group(3).find(delimiter)\n        if end >= 0:\n          \n          line = (matched.group(1) + '\"\"' +\n                  matched.group(3)[end + len(delimiter):])\n          delimiter = None\n        else:\n          \n          line = matched.group(1) + '\"\"'\n      else:\n        break\n\n    lines_without_raw_strings.append(line)\n\n  \n  \n  return lines_without_raw_strings", "docstring": "Removes C++11 raw strings from lines.\n\nBefore:\nstatic const char kData[] = R\"(\nmulti-line string\n)\";\n\nAfter:\nstatic const char kData[] = \"\"\n(replaced by blank line)\n\"\";\n\nArgs:\nraw_lines: list of raw lines.\n\nReturns:\nlist of lines with C++11 raw strings replaced by empty strings.", "source": "juraj-google-style"}
{"code": "def add_sam2rnf_parser(subparsers, subcommand, help, description, simulator_name=None):\n    \n\n    parser_sam2rnf = subparsers.add_parser(subcommand, help=help, description=description)\n\n    parser_sam2rnf.set_defaults(func=sam2rnf)\n\n    parser_sam2rnf.add_argument(\n        '-s', '--sam', type=str, metavar='file', dest='sam_fn', required=True,\n        help='Input SAM/BAM with true (expected) alignments of the reads  (- for standard input).'\n    )\n\n    _add_shared_params(parser_sam2rnf, unmapped_switcher=True)\n\n    parser_sam2rnf.add_argument(\n        '-n',\n        '--simulator-name',\n        type=str,\n        metavar='str',\n        dest='simulator_name',\n        default=simulator_name,\n        help='Name of the simulator (for RNF).' if simulator_name is not None else argparse.SUPPRESS,\n    )", "docstring": "Add another parser for a SAM2RNF-like command.\n\nArgs:\nsubparsers (subparsers): File name of the genome from which read tuples are created (FASTA file).\nsimulator_name (str): Name of the simulator used in comments.", "source": "juraj-google-style"}
{"code": "def assert_has_rank(self, rank):\n    if self.rank not in (None, rank):\n        raise ValueError('Shape %s must have rank %d' % (self, rank))", "docstring": "Raises an exception if `self` is not compatible with the given `rank`.\n\nArgs:\nrank: An integer.\n\nRaises:\nValueError: If `self` does not represent a shape with the given `rank`.", "source": "github-repos"}
{"code": "def compile(cfg_path, out_path, executable=None, env=None, log=None):\n    try:\n        check_call([(executable or 'ace'), '-g', cfg_path, '-G', out_path], stdout=log, stderr=log, close_fds=True, env=(env or os.environ))\n    except (CalledProcessError, OSError):\n        logging.error('Failed to compile grammar with ACE. See {}'.format((log.name if (log is not None) else '<stderr>')))\n        raise", "docstring": "Use ACE to compile a grammar.\n\nArgs:\ncfg_path (str): the path to the ACE config file\nout_path (str): the path where the compiled grammar will be\nwritten\nexecutable (str, optional): the path to the ACE binary; if\n`None`, the `ace` command will be used\nenv (dict, optional): environment variables to pass to the ACE\nsubprocess\nlog (file, optional): if given, the file, opened for writing,\nor stream to write ACE's stdout and stderr compile messages", "source": "codesearchnet"}
{"code": "async def run(*cmd):\n  \n\n  stdout = await checked_run(*cmd)\n\n  log_path = os.path.join(FLAGS.base_dir, get_cmd_name(cmd) + '.log')\n  with gfile.Open(log_path, 'a') as f:\n    f.write(expand_cmd_str(cmd))\n    f.write('\\n')\n    f.write(stdout)\n    f.write('\\n')\n\n  \n  return stdout.split('\\n')", "docstring": "Run the given subprocess command in a coroutine.\n\nArgs:\n*cmd: the command to run and its arguments.\n\nReturns:\nThe output that the command wrote to stdout as a list of strings, one line\nper element (stderr output is piped to stdout).\n\nRaises:\nRuntimeError: if the command returns a non-zero result.", "source": "juraj-google-style"}
{"code": "def create_datastore_write_config(mapreduce_spec):\n  \n  force_writes = parse_bool(mapreduce_spec.params.get(\"force_writes\", \"false\"))\n  if force_writes:\n    return datastore_rpc.Configuration(force_writes=force_writes)\n  else:\n    \n    return datastore_rpc.Configuration()", "docstring": "Creates datastore config to use in write operations.\n\nArgs:\nmapreduce_spec: current mapreduce specification as MapreduceSpec.\n\nReturns:\nan instance of datastore_rpc.Configuration to use for all write\noperations in the mapreduce.", "source": "juraj-google-style"}
{"code": "def pairwise_alignment_stats(reference_seq_aln, other_seq_aln):\n    if (len(reference_seq_aln) != len(other_seq_aln)):\n        raise ValueError('Sequence lengths not equal - was an alignment run?')\n    reference_seq_aln = ssbio.protein.sequence.utils.cast_to_str(reference_seq_aln)\n    other_seq_aln = ssbio.protein.sequence.utils.cast_to_str(other_seq_aln)\n    infodict = {}\n    stats_percent_ident = get_percent_identity(a_aln_seq=reference_seq_aln, b_aln_seq=other_seq_aln)\n    infodict['percent_identity'] = stats_percent_ident\n    aln_df = get_alignment_df(a_aln_seq=reference_seq_aln, b_aln_seq=other_seq_aln)\n    infodict['deletions'] = get_deletions(aln_df)\n    infodict['insertions'] = get_insertions(aln_df)\n    infodict['mutations'] = get_mutations(aln_df)\n    infodict['unresolved'] = get_unresolved(aln_df)\n    return infodict", "docstring": "Get a report of a pairwise alignment.\n\nArgs:\nreference_seq_aln (str, Seq, SeqRecord): Reference sequence, alignment form\nother_seq_aln (str, Seq, SeqRecord): Other sequence, alignment form\n\nReturns:\ndict: Dictionary of information on mutations, insertions, sequence identity, etc.", "source": "codesearchnet"}
{"code": "def _grappler_enabled_session_config():\n    rewriter_config = rewriter_config_pb2.RewriterConfig(disable_model_pruning=False, arithmetic_optimization=rewriter_config_pb2.RewriterConfig.ON)\n    graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)\n    return config_pb2.ConfigProto(graph_options=graph_options)", "docstring": "Constructs a Session config proto that explicitly enables Grappler.\n\nReturns:\nA config proto that obtains extra safety for the unit tests in this\nfile by ensuring that the relevant Grappler rewrites are always enabled.", "source": "github-repos"}
{"code": "def check_runtime_errors(cmd_derived_from_alias, pos_args_table):\n    \n    for placeholder, value in pos_args_table.items():\n        exec('{} = \"{}\"'.format(placeholder, value))  \n\n    expressions = get_placeholders(cmd_derived_from_alias)\n    for expression in expressions:\n        try:\n            exec(expression)  \n        except Exception as exception:  \n            error_msg = PLACEHOLDER_EVAL_ERROR.format(expression, exception)\n            raise CLIError(error_msg)", "docstring": "Validate placeholders and their expressions in cmd_derived_from_alias to make sure\nthat there is no runtime error (such as index out of range).\n\nArgs:\ncmd_derived_from_alias: The command derived from the alias\n(include any positional argument placehodlers)\npos_args_table: The positional argument table.", "source": "juraj-google-style"}
{"code": "def cardinality(gym_space):\n  \n\n  if (gym_space.dtype == np.float32) or (gym_space.dtype == np.float64):\n    tf.logging.error(\"Returning None for a float gym space's cardinality: \",\n                     gym_space)\n    return None\n\n  if isinstance(gym_space, Discrete):\n    return gym_space.n\n\n  if isinstance(gym_space, Box):\n    \n    return np.prod(gym_space.high - gym_space.low + 1)\n\n  raise NotImplementedError", "docstring": "Number of elements that can be represented by the space.\n\nMakes the most sense for Discrete or Box type with integral dtype, ex: number\nof actions in an action space.\n\nArgs:\ngym_space: The gym space.\n\nReturns:\nnp.int64 number of observations that can be represented by this space, or\nreturns None when this doesn't make sense, i.e. float boxes etc.\n\nRaises:\nNotImplementedError when a space's cardinality makes sense but we haven't\nimplemented it.", "source": "juraj-google-style"}
{"code": "def _wait_time(self, shard_state, secs, now=datetime.datetime.now):\n    assert (shard_state.slice_start_time is not None)\n    delta = (now() - shard_state.slice_start_time)\n    duration = datetime.timedelta(seconds=secs)\n    if (delta < duration):\n        return util.total_seconds((duration - delta))\n    else:\n        return 0", "docstring": "Time to wait until slice_start_time is secs ago from now.\n\nArgs:\nshard_state: shard state.\nsecs: duration in seconds.\nnow: a func that gets now.\n\nReturns:\n0 if no wait. A positive int in seconds otherwise. Always around up.", "source": "codesearchnet"}
{"code": "def concept_distance(c1, c2):\n    cause_purview = tuple(set((c1.cause.purview + c2.cause.purview)))\n    effect_purview = tuple(set((c1.effect.purview + c2.effect.purview)))\n    return (repertoire_distance(c1.expand_cause_repertoire(cause_purview), c2.expand_cause_repertoire(cause_purview)) + repertoire_distance(c1.expand_effect_repertoire(effect_purview), c2.expand_effect_repertoire(effect_purview)))", "docstring": "Return the distance between two concepts in concept space.\n\nArgs:\nc1 (Concept): The first concept.\nc2 (Concept): The second concept.\n\nReturns:\nfloat: The distance between the two concepts in concept space.", "source": "codesearchnet"}
{"code": "def get_street_from_xy(self, **kwargs):\n        \n        \n        params = {\n            'coordinateX': kwargs.get('longitude'),\n            'coordinateY': kwargs.get('latitude'),\n            'Radius': kwargs.get('radius'),\n            'cultureInfo': util.language_code(kwargs.get('lang'))\n        }\n\n        \n        result = self.make_request('geo', 'get_street_from_xy', **params)\n\n        \n        if not util.check_result(result, 'site'):\n            return False, 'UNKNOWN ERROR'\n\n        \n        values = util.response_list(result, 'site')\n        return True, [emtype.Street(**a) for a in values]", "docstring": "Obtain a list of streets around the specified point.\n\nArgs:\nlatitude (double): Latitude in decimal degrees.\nlongitude (double): Longitude in decimal degrees.\nradius (int): Radius (in meters) of the search.\nlang (str): Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[Street]), or message string\nin case of error.", "source": "juraj-google-style"}
{"code": "def GetNodeAnnotation(node, annotation, default=None):\n    return getattr(node, _NODE_ANNOTATION_PREFIX + annotation, default)", "docstring": "Get annotation value from a node.\n\nArguments:\nnode: the node.\nannotation: annotation name - a string.\ndefault: the default value to return if there's no annotation.\n\nReturns:\nValue of the annotation in the given node. If the node doesn't have this\nparticular annotation name yet, returns default.", "source": "github-repos"}
{"code": "def file_path(path=None, payload=None, objectInput=None):\n    f = (path if path else write_payload(payload, objectInput))\n    if (not os.path.exists(f)):\n        msg = 'File {!r} does not exist'.format(f)\n        log.exception(msg)\n        raise TikaAppFilePathError(msg)\n    return f", "docstring": "Given a file path, payload or file object, it writes file on disk and\nreturns the temp path.\n\nArgs:\npath (string): path of real file\npayload(string): payload in base64 of file\nobjectInput (object): file object/standard input to analyze\n\nReturns:\nPath of file", "source": "codesearchnet"}
{"code": "def ParseMessage(descriptor, byte_str):\n    result_class = MakeClass(descriptor)\n    new_msg = result_class()\n    new_msg.ParseFromString(byte_str)\n    return new_msg", "docstring": "Generate a new Message instance from this Descriptor and a byte string.\n\nArgs:\ndescriptor: Protobuf Descriptor object\nbyte_str: Serialized protocol buffer byte string\n\nReturns:\nNewly created protobuf Message object.", "source": "codesearchnet"}
{"code": "def wait_for_elements(self, using, value, timeout=10000, interval=1000, asserter=is_displayed):\n    if (not callable(asserter)):\n        raise TypeError('Asserter must be callable.')\n\n    @retry(retry_on_exception=(lambda ex: isinstance(ex, WebDriverException)), stop_max_delay=timeout, wait_fixed=interval)\n    def _wait_for_elements(ctx, using, value):\n        els = ctx.elements(using, value)\n        if (not len(els)):\n            raise WebDriverException('no such element')\n        else:\n            el = els[0]\n            asserter(el)\n            return els\n    return _wait_for_elements(self, using, value)", "docstring": "Wait for elements till satisfy the given condition\n\nSupport:\nAndroid iOS Web(WebView)\n\nArgs:\nusing(str): The element location strategy.\nvalue(str): The value of the location strategy.\ntimeout(int): How long we should be retrying stuff.\ninterval(int): How long between retries.\nasserter(callable): The asserter func to determine the result.\n\nReturns:\nReturn the list of Element if any of them satisfy the condition.\n\nRaises:\nWebDriverException.", "source": "codesearchnet"}
{"code": "def from_parent(parent_key, i):\n        \n        if not isinstance(parent_key, HDPrivateKey):\n            raise TypeError(\"parent_key must be an HDPrivateKey object.\")\n\n        hmac_key = parent_key.chain_code\n        if i & 0x80000000:\n            hmac_data = b'\\x00' + bytes(parent_key._key) + i.to_bytes(length=4, byteorder='big')\n        else:\n            hmac_data = parent_key.public_key.compressed_bytes + i.to_bytes(length=4, byteorder='big')\n\n        I = hmac.new(hmac_key, hmac_data, hashlib.sha512).digest()\n        Il, Ir = I[:32], I[32:]\n\n        parse_Il = int.from_bytes(Il, 'big')\n        if parse_Il >= bitcoin_curve.n:\n            return None\n\n        child_key = (parse_Il + parent_key._key.key) % bitcoin_curve.n\n\n        if child_key == 0:\n            \n            return None\n\n        child_depth = parent_key.depth + 1\n        return HDPrivateKey(key=child_key,\n                            chain_code=Ir,\n                            index=i,\n                            depth=child_depth,\n                            parent_fingerprint=parent_key.fingerprint)", "docstring": "Derives a child private key from a parent\nprivate key. It is not possible to derive a child\nprivate key from a public parent key.\n\nArgs:\nparent_private_key (HDPrivateKey):", "source": "juraj-google-style"}
{"code": "def __init__(self, resume_delay=0):\n    self.resume_delay = resume_delay", "docstring": "Initializes a ProcessContinuation object.\n\nArgs:\nresume_delay: indicates the minimum time, in seconds, that should elapse\nbefore re-invoking process() method for resuming the invocation of the\ncurrent element.", "source": "github-repos"}
{"code": "def __mul__( self, scaling ):\n        \n        new_calculation = Calculation( title=self.title, energy=self.energy*scaling, stoichiometry=self.scale_stoichiometry( scaling ) )\n        return new_calculation", "docstring": "\"Multiply\" this Calculation by a scaling factor.\nReturns a new Calculation with the same title, but scaled energy and stoichiometry.\n\nArgs:\nscaling (float): The scaling factor.\n\nReturns:\n(vasppy.Calculation): The scaled Calculation.", "source": "juraj-google-style"}
{"code": "def __div__(self, other):\n        \n        if isinstance(other, LazyOpResult):\n            other = other.expr\n        return NumpyArrayWeld(\n            numpy_weld_impl.div(\n                self.expr,\n                other,\n                self.weld_type\n            ),\n            self.weld_type\n        )", "docstring": "Summary\n\nArgs:\nother (TYPE): Description\n\nReturns:\nTYPE: Description", "source": "juraj-google-style"}
{"code": "def hash64(data: Any, seed: int = 0) -> int:\n    \n    \n    \n    \n    c_data = to_str(data)\n    if mmh3:\n        c_signed_low, _ = mmh3.hash64(data, seed=seed, x64arch=IS_64_BIT)\n        return c_signed_low\n    py_data = to_bytes(c_data)\n    py_signed_low, _ = pymmh3_hash64(py_data, seed=seed)\n    return py_signed_low", "docstring": "Non-cryptographic, deterministic, fast hash.\n\nArgs:\ndata: data to hash\nseed: seed\n\nReturns:\nsigned 64-bit integer", "source": "juraj-google-style"}
{"code": "def prose_wc(args):\n    \n    if args.file is None:\n        return 1\n    if args.split_hyphens:\n        INTERSTITIAL_PUNCTUATION.append(re.compile(r'-'))\n    content = args.file.read().decode('utf-8')\n    filename = args.file.name\n    body = strip_frontmatter(content)\n    parsed = markdown_to_text(body)\n    result = wc(filename, body, parsed=parsed,\n                is_jekyll=(body != content))\n    if (args.update and\n            filename != '_stdin_' and\n            result['counts']['type'] == 'jekyll'):\n        update_file(filename, result, content, args.indent)\n    else:\n        _mockable_print({\n            'yaml': yaml.safe_dump(result, default_flow_style=False,\n                              indent=args.indent),\n            'json': json.dumps(result, indent=args.indent),\n            'default': default_dump(result),\n        }[args.format])\n    return 0", "docstring": "Processes data provided to print a count object, or update a file.\n\nArgs:\nargs: an ArgumentParser object returned by setup()", "source": "juraj-google-style"}
{"code": "def send_log_messages(self, messages: List[LogMessage]) -> None:\n    for message in messages:\n        self.send_log_message(message)", "docstring": "Prints multiple log messages to be captured by cloud logging.\n\nArgs:\n* messages: list of LogMessage dictionaries\n\nReturns:\n* None", "source": "github-repos"}
{"code": "def conv2d_bn(x, filters, num_row, num_col, padding='same', strides=(1, 1), name=None):\n    if name is not None:\n        bn_name = name + '_bn'\n        conv_name = name + '_conv'\n    else:\n        bn_name = None\n        conv_name = None\n    if backend.image_data_format() == 'channels_first':\n        bn_axis = 1\n    else:\n        bn_axis = 3\n    x = layers.Conv2D(filters, (num_row, num_col), strides=strides, padding=padding, use_bias=False, name=conv_name)(x)\n    x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)\n    x = layers.Activation('relu', name=name)(x)\n    return x", "docstring": "Utility function to apply conv + BN.\n\nArgs:\nx: input tensor.\nfilters: filters in `Conv2D`.\nnum_row: height of the convolution kernel.\nnum_col: width of the convolution kernel.\npadding: padding mode in `Conv2D`.\nstrides: strides in `Conv2D`.\nname: name of the ops; will become `name + '_conv'`\nfor the convolution and `name + '_bn'` for the\nbatch norm layer.\n\nReturns:\nOutput tensor after applying `Conv2D` and `BatchNormalization`.", "source": "github-repos"}
{"code": "def add_update_resource_views(self, resource_views):\n    if (not isinstance(resource_views, list)):\n        raise HDXError('ResourceViews should be a list!')\n    for resource_view in resource_views:\n        self.add_update_resource_view(resource_view)", "docstring": "Add new or update existing resource views in resource with new metadata.\n\nArgs:\nresource_views (List[Union[ResourceView,Dict]]): A list of resource views metadata from ResourceView objects or dictionaries\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[Tuple, TFBaseModelOutput]:\n    encoder_outputs = self.encoder(input_ids, attention_mask=attention_mask, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=inputs_embeds, head_mask=head_mask, past_key_values=None, use_cache=False, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n    if not return_dict:\n        return encoder_outputs\n    return TFBaseModelOutput(last_hidden_state=encoder_outputs.last_hidden_state, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)", "docstring": "Returns:\n\nExamples:\n\n```python\n>>> from transformers import AutoTokenizer, TFT5EncoderModel\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google-t5/t5-small\")\n>>> model = TFT5EncoderModel.from_pretrained(\"google-t5/t5-small\")\n\n>>> input_ids = tokenizer(\n...     \"Studies have been shown that owning a dog is good for you\", return_tensors=\"tf\"\n... ).input_ids  # Batch size 1\n>>> outputs = model(input_ids)\n```", "source": "github-repos"}
{"code": "def _CaptureExpression(self, frame, expression):\n    (rc, value) = _EvaluateExpression(frame, expression)\n    if (not rc):\n        return {'name': expression, 'status': value}\n    return self.CaptureNamedVariable(expression, value, 0, self.expression_capture_limits)", "docstring": "Evalutes the expression and captures it into a Variable object.\n\nArgs:\nframe: evaluation context.\nexpression: watched expression to compile and evaluate.\n\nReturns:\nVariable object (which will have error status if the expression fails\nto evaluate).", "source": "codesearchnet"}
{"code": "def getModName(self):\n    ret = self.mod_name\n    if (ret is None):\n        ret = self.__class__.__name__\n    return ret.lower()", "docstring": "Return the lowercased name of this module.\n\nNotes:\nThis pulls the ``mod_name`` attribute on the class. This allows\nan implementer to set a arbitrary name for the module.  If this\nattribute is not set, it defaults to\n``self.__class__.__name__.lower()`` and sets ``mod_name`` to\nthat value.\n\nReturns:\n(str): The module name.", "source": "codesearchnet"}
{"code": "def en(item):\n    if (pakr is None):\n        return msgpack.packb(item, use_bin_type=True, unicode_errors='surrogatepass')\n    try:\n        return pakr.pack(item)\n    except Exception:\n        pakr.reset()\n        raise", "docstring": "Use msgpack to serialize a compatible python object.\n\nArgs:\nitem (obj): The object to serialize\n\nNotes:\nString objects are encoded using utf8 encoding.  In order to handle\npotentially malformed input, ``unicode_errors='surrogatepass'`` is set\nto allow encoding bad input strings.\n\nReturns:\nbytes: The serialized bytes in msgpack format.", "source": "codesearchnet"}
{"code": "def get_excitation_spectrum(self, width=0.1, npoints=2000):\n    roots = self.parse_tddft()\n    data = roots['singlet']\n    en = np.array([d['energy'] for d in data])\n    osc = np.array([d['osc_strength'] for d in data])\n    epad = (20.0 * width)\n    emin = (en[0] - epad)\n    emax = (en[(- 1)] + epad)\n    de = ((emax - emin) / npoints)\n    if (width < (2 * de)):\n        width = (2 * de)\n    energies = [(emin + (ie * de)) for ie in range(npoints)]\n    cutoff = (20.0 * width)\n    gamma = (0.5 * width)\n    gamma_sqrd = (gamma * gamma)\n    de = ((energies[(- 1)] - energies[0]) / (len(energies) - 1))\n    prefac = ((gamma / np.pi) * de)\n    x = []\n    y = []\n    for energy in energies:\n        xx0 = (energy - en)\n        stot = (osc / ((xx0 * xx0) + gamma_sqrd))\n        t = np.sum(stot[(np.abs(xx0) <= cutoff)])\n        x.append(energy)\n        y.append((t * prefac))\n    return ExcitationSpectrum(x, y)", "docstring": "Generate an excitation spectra from the singlet roots of TDDFT\ncalculations.\n\nArgs:\nwidth (float): Width for Gaussian smearing.\nnpoints (int): Number of energy points. More points => smoother\ncurve.\n\nReturns:\n(ExcitationSpectrum) which can be plotted using\npymatgen.vis.plotters.SpectrumPlotter.", "source": "codesearchnet"}
{"code": "def find_record(self, model_class, record_id, reload=False):\n        \n        cached_model = self.peek_record(model_class, record_id)\n        if cached_model is not None and reload is False:\n            return cached_model\n        else:\n            return self._get_record(model_class, record_id)", "docstring": "Return a instance of model_class from the API or the local cache.\n\nArgs:\nmodel_class (:class:`cinder_data.model.CinderModel`): A subclass of\n:class:`cinder_data.model.CinderModel` of your chosen model.\nrecord_id (int): The id of the record requested.\nreload (bool, optional): Don't return the cached version if reload==True.\n\nReturns:\n:class:`cinder_data.model.CinderModel`: An instance of model_class or None.", "source": "juraj-google-style"}
{"code": "def get_entity(self, etype, entity_id):\n        \n        r = fapi.get_entity(self.namespace, self.name, etype,\n                               entity_id, self.api_url)\n        fapi._check_response_code(r, 200)\n        dresp = r.json()\n        return Entity(etype, entity_id, dresp['attributes'])", "docstring": "Return entity in this workspace.\n\nArgs:\netype (str): Entity type\nentity_id (str): Entity name/unique id", "source": "juraj-google-style"}
{"code": "def dumps(ms, single=False, version=_default_version, properties=True, pretty_print=False, color=False, **kwargs):\n    if ((not pretty_print) and kwargs.get('indent')):\n        pretty_print = True\n    if single:\n        ms = [ms]\n    return serialize(ms, version=version, properties=properties, pretty_print=pretty_print, color=color)", "docstring": "Serialize an Xmrs object to a SimpleMRS representation\n\nArgs:\nms: an iterator of Xmrs objects to serialize (unless the\n*single* option is `True`)\nsingle: if `True`, treat *ms* as a single Xmrs object instead\nof as an iterator\nproperties: if `False`, suppress variable properties\npretty_print: if `True`, add newlines and indentation\ncolor: if `True`, colorize the output with ANSI color codes\nReturns:\na SimpleMrs string representation of a corpus of Xmrs", "source": "codesearchnet"}
{"code": "def parse_gene(gene_info):\n    gene = {}\n    identifier = None\n    hgnc_id = None\n    try:\n        if ('hgnc_id' in gene_info):\n            hgnc_id = int(gene_info['hgnc_id'])\n        elif ('hgnc_idnumber' in gene_info):\n            hgnc_id = int(gene_info['hgnc_idnumber'])\n        elif ('hgncid' in gene_info):\n            hgnc_id = int(gene_info['hgncid'])\n    except ValueError as e:\n        raise SyntaxError('Invalid hgnc id: {0}'.format(hgnc_id))\n    gene['hgnc_id'] = hgnc_id\n    identifier = hgnc_id\n    hgnc_symbol = None\n    if ('hgnc_symbol' in gene_info):\n        hgnc_symbol = gene_info['hgnc_symbol']\n    elif ('hgncsymbol' in gene_info):\n        hgnc_symbol = gene_info['hgncsymbol']\n    elif ('symbol' in gene_info):\n        hgnc_symbol = gene_info['symbol']\n    gene['hgnc_symbol'] = hgnc_symbol\n    if (not identifier):\n        if hgnc_symbol:\n            identifier = hgnc_symbol\n        else:\n            raise SyntaxError('No gene identifier could be found')\n    gene['identifier'] = identifier\n    transcripts = ''\n    if ('disease_associated_transcripts' in gene_info):\n        transcripts = gene_info['disease_associated_transcripts']\n    elif ('disease_associated_transcript' in gene_info):\n        transcripts = gene_info['disease_associated_transcript']\n    elif ('transcripts' in gene_info):\n        transcripts = gene_info['transcripts']\n    gene['transcripts'] = [transcript.strip() for transcript in transcripts.split(',') if transcript]\n    models = ''\n    if ('genetic_disease_models' in gene_info):\n        models = gene_info['genetic_disease_models']\n    elif ('genetic_disease_model' in gene_info):\n        models = gene_info['genetic_disease_model']\n    elif ('inheritance_models' in gene_info):\n        models = gene_info['inheritance_models']\n    elif ('genetic_inheritance_models' in gene_info):\n        models = gene_info['genetic_inheritance_models']\n    gene['inheritance_models'] = [model.strip() for model in models.split(',') if (model.strip() in VALID_MODELS)]\n    gene['mosaicism'] = (True if gene_info.get('mosaicism') else False)\n    gene['reduced_penetrance'] = (True if gene_info.get('reduced_penetrance') else False)\n    gene['database_entry_version'] = gene_info.get('database_entry_version')\n    return gene", "docstring": "Parse a gene line with information from a panel file\n\nArgs:\ngene_info(dict): dictionary with gene info\n\nReturns:\ngene(dict): A dictionary with the gene information\n{\n'hgnc_id': int,\n'hgnc_symbol': str,\n'disease_associated_transcripts': list(str),\n'inheritance_models': list(str),\n'mosaicism': bool,\n'reduced_penetrance': bool,\n'database_entry_version': str,\n}", "source": "codesearchnet"}
{"code": "def extract_subject_from_dn(cert_obj):\n    return ','.join(('{}={}'.format(OID_TO_SHORT_NAME_DICT.get(v.oid.dotted_string, v.oid.dotted_string), rdn_escape(v.value)) for v in reversed(list(cert_obj.subject))))", "docstring": "Serialize a DN to a DataONE subject string.\n\nArgs:\ncert_obj: cryptography.Certificate\n\nReturns:\nstr:\nPrimary subject extracted from the certificate DN.\n\nThe certificate DN (DistinguishedName) is a sequence of RDNs\n(RelativeDistinguishedName). Each RDN is a set of AVAs (AttributeValueAssertion /\nAttributeTypeAndValue). A DataONE subject is a plain string. As there is no single\nstandard specifying how to create a string representation of a DN, DataONE selected\none of the most common ways, which yield strings such as:\n\nCN=Some Name A123,O=Some Organization,C=US,DC=Some Domain,DC=org\n\nIn particular, the sequence of RDNs is reversed. Attribute values are escaped,\nattribute type and value pairs are separated by \"=\", and AVAs are joined together\nwith \",\". If an RDN contains an unknown OID, the OID is serialized as a dotted\nstring.\n\nAs all the information in the DN is preserved, it is not possible to create the\nsame subject with two different DNs, and the DN can be recreated from the subject.", "source": "codesearchnet"}
{"code": "def _verify_signature(message, signature, certs):\n    \n    for pem in certs:\n        verifier = Verifier.from_string(pem, is_x509_cert=True)\n        if verifier.verify(message, signature):\n            return\n\n    \n    raise AppIdentityError('Invalid token signature')", "docstring": "Verifies signed content using a list of certificates.\n\nArgs:\nmessage: string or bytes, The message to verify.\nsignature: string or bytes, The signature on the message.\ncerts: iterable, certificates in PEM format.\n\nRaises:\nAppIdentityError: If none of the certificates can verify the message\nagainst the signature.", "source": "juraj-google-style"}
{"code": "def select_files(self, what=\"o\"):\n        \n        choices = collections.OrderedDict([\n            (\"i\", self.input_file),\n            (\"o\", self.output_file),\n            (\"f\", self.files_file),\n            (\"j\", self.job_file),\n            (\"l\", self.log_file),\n            (\"e\", self.stderr_file),\n            (\"q\", self.qout_file),\n        ])\n\n        if what == \"all\":\n            return [getattr(v, \"path\") for v in choices.values()]\n\n        selected = []\n        for c in what:\n            try:\n                selected.append(getattr(choices[c], \"path\"))\n            except KeyError:\n                logger.warning(\"Wrong keyword %s\" % c)\n\n        return selected", "docstring": "Helper function used to select the files of a task.\n\nArgs:\nwhat: string with the list of characters selecting the file type\nPossible choices:\ni ==> input_file,\no ==> output_file,\nf ==> files_file,\nj ==> job_file,\nl ==> log_file,\ne ==> stderr_file,\nq ==> qout_file,\nall ==> all files.", "source": "juraj-google-style"}
{"code": "def debye_temperature(self, structure):\n        \n        v0 = (structure.volume * 1e-30 / structure.num_sites)\n        vl, vt = self.long_v(structure), self.trans_v(structure)\n        vm = 3**(1./3.) * (1 / vl**3 + 2 / vt**3)**(-1./3.)\n        td = 1.05457e-34 / 1.38065e-23 * vm * (6 * np.pi**2 / v0) ** (1./3.)\n        return td", "docstring": "Estimates the debye temperature from longitudinal and\ntransverse sound velocities\n\nArgs:\nstructure: pymatgen structure object\n\nReturns: debye temperature (in SI units)", "source": "juraj-google-style"}
{"code": "def add_properties(entity_proto, property_dict, exclude_from_indexes=None):\n    for (name, value) in property_dict.iteritems():\n        set_property(entity_proto.properties, name, value, exclude_from_indexes)", "docstring": "Add values to the given datastore.Entity proto message.\n\nArgs:\nentity_proto: datastore.Entity proto message.\nproperty_dict: a dictionary from property name to either a python object or\ndatastore.Value.\nexclude_from_indexes: if the value should be exclude from indexes. None\nleaves indexing as is (defaults to False if value is not a Value\nmessage).\n\nUsage:\n>>> add_properties(proto, {'foo': u'a', 'bar': [1, 2]})\n\nRaises:\nTypeError: if a given property value type is not supported.", "source": "codesearchnet"}
{"code": "def convert_predictions_to_image_summaries(hook_args):\n  \n  decode_hparams = hook_args.decode_hparams\n  if not decode_hparams.display_decoded_images:\n    return []\n  predictions = hook_args.predictions[0]\n\n  \n  all_summaries = []\n  rand_predictions = np.random.choice(predictions, size=10)\n  for ind, prediction in enumerate(rand_predictions):\n    output_summary = image_to_tf_summary_value(\n        prediction[\"outputs\"], tag=\"%d_output\" % ind)\n    input_summary = image_to_tf_summary_value(\n        prediction[\"inputs\"], tag=\"%d_input\" % ind)\n    all_summaries.append(input_summary)\n    all_summaries.append(output_summary)\n  return all_summaries", "docstring": "Optionally converts images from hooks_args to image summaries.\n\nArgs:\nhook_args: DecodeHookArgs namedtuple\nReturns:\nsummaries: list of tf.Summary values if hook_args.decode_hpara", "source": "juraj-google-style"}
{"code": "def as_dataframe(self, max_rows=None):\n      \n      max_rows = len(self._timeseries_list) if max_rows is None else max_rows\n      headers = [{\n          'resource': ts.resource._asdict(), 'metric': ts.metric._asdict()}\n          for ts in self._timeseries_list[:max_rows]]\n\n      if not headers:\n        return pandas.DataFrame()\n\n      dataframe = pandas.io.json.json_normalize(headers)\n\n      \n      dataframe.columns = pandas.MultiIndex.from_tuples(\n          [(col, '') if col == 'resource.type' else col.rsplit('.', 1)\n           for col in dataframe.columns])\n\n      \n      resource_keys = google.cloud.monitoring._dataframe._sorted_resource_labels(\n          dataframe['resource.labels'].columns)\n      sorted_columns = [('resource.type', '')]\n      sorted_columns += [('resource.labels', key) for key in resource_keys]\n      sorted_columns += sorted(col for col in dataframe.columns\n                               if col[0] == 'metric.labels')\n      dataframe = dataframe[sorted_columns]\n\n      \n      dataframe = dataframe.sort_values(sorted_columns)\n      dataframe = dataframe.reset_index(drop=True).fillna('')\n      return dataframe", "docstring": "Creates a pandas dataframe from the query metadata.\n\nArgs:\nmax_rows: The maximum number of timeseries metadata to return. If None,\nreturn all.\n\nReturns:\nA pandas dataframe containing the resource type, resource labels and\nmetric labels. Each row in this dataframe corresponds to the metadata\nfrom one time series.", "source": "juraj-google-style"}
{"code": "def get_base_most_function(function):\n    for contract in (function.contract.inheritance + [function.contract]):\n        for f in contract.functions_not_inherited:\n            if (f.full_name == function.full_name):\n                return f\n    raise Exception('Could not resolve the base-most function for the provided function.')", "docstring": "Obtains the base function definition for the provided function. This could be used to obtain the original\ndefinition of a function, if the provided function is an override.\n\nReturns:\n(function): Returns the base-most function of a provided function. (The original definition).", "source": "codesearchnet"}
{"code": "def getprop(self, prop_name, timeout=DEFAULT_GETPROP_TIMEOUT_SEC):\n    return self.shell(['getprop', prop_name], timeout=timeout).decode('utf-8').strip()", "docstring": "Get a property of the device.\n\nThis is a convenience wrapper for `adb shell getprop xxx`.\n\nArgs:\nprop_name: A string that is the name of the property to get.\ntimeout: float, the number of seconds to wait before timing out.\nIf not specified, the DEFAULT_GETPROP_TIMEOUT_SEC is used.\n\nReturns:\nA string that is the value of the property, or None if the property\ndoesn't exist.", "source": "github-repos"}
{"code": "def scale(self, replicas):\n        \n\n        if 'Global' in self.attrs['Spec']['Mode'].keys():\n            raise InvalidArgument('Cannot scale a global container')\n\n        service_mode = ServiceMode('replicated', replicas)\n        return self.client.api.update_service(self.id, self.version,\n                                              mode=service_mode,\n                                              fetch_current_spec=True)", "docstring": "Scale service container.\n\nArgs:\nreplicas (int): The number of containers that should be running.\n\nReturns:\nbool: ``True`` if successful.", "source": "juraj-google-style"}
{"code": "def connect(signal, receiver):\n    \n    __check_receiver(receiver)\n\n    if __is_bound_method(receiver):\n        ref = WeakMethod\n    else:\n        ref = weakref.ref\n\n    with __lock:\n        __purge()\n        __receivers[signal].append(ref(receiver))", "docstring": "Register `receiver` method/function as a receiver for the `signal`.\n\nWhen the signal is emitted, this receiver will be invoked along with\nall other associated signals.\n\nArgs:\nsignal: A signal identifier (e.g., a signal name)\nreceiver: A callable object to connect to the signal.", "source": "juraj-google-style"}
{"code": "def convert_instancenorm(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting instancenorm ...')\n\n    if names == 'short':\n        tf_name = 'IN' + random_string(6)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    assert(len(inputs) == 3)\n\n    bias_name = '{0}.bias'.format(w_name)\n    weights_name = '{0}.weight'.format(w_name)\n\n    \n    if inputs[-2] + '_np' in layers:\n        gamma = layers[inputs[-2] + '_np']\n    else:\n        gamma = weights[weights_name].numpy()\n\n    if inputs[-1] + '_np' in layers:\n        beta = layers[inputs[-1] + '_np']\n    else:\n        beta = weights[bias_name].numpy()\n\n    def target_layer(x, epsilon=params['epsilon'], gamma=gamma, beta=beta):\n        layer = tf.contrib.layers.instance_norm(\n            x,\n            param_initializers={'beta': tf.constant_initializer(beta), 'gamma': tf.constant_initializer(gamma)},\n            epsilon=epsilon, data_format='NCHW',\n            trainable=False\n        )\n        return layer\n\n    lambda_layer = keras.layers.Lambda(target_layer, name=tf_name)\n    layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert instance normalization layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def make_multi_qq_plots(arrays, key_text):\n    import omega as om\n    p = om.RectPlot()\n    p.addXY([0, 1.0], [0, 1.0], '1:1')\n    for (index, array) in enumerate(arrays):\n        (kev, obs, mdl) = array\n        c_obs = np.cumsum(obs)\n        c_mdl = np.cumsum(mdl)\n        mx = (0.5 * (c_obs[(- 1)] + c_mdl[(- 1)]))\n        c_obs /= mx\n        c_mdl /= mx\n        p.addXY(c_mdl, c_obs, ('%s \n    locs = (np.array([0, 0.05, 0.08, 0.11, 0.17, 0.3, 0.4, 0.7, 1]) * (kev.size - 2))\n    c0 = 1.05\n    c1 = 1.1\n    for loc in locs:\n        i0 = int(np.floor(loc))\n        frac = (loc - i0)\n        kevval = (((1 - frac) * kev[i0]) + (frac * kev[(i0 + 1)]))\n        mdlval = (((1 - frac) * c_mdl[i0]) + (frac * c_mdl[(i0 + 1)]))\n        obsval = (((1 - frac) * c_obs[i0]) + (frac * c_obs[(i0 + 1)]))\n        p.addXY([mdlval, mdlval], [c0, c1], ('%.2f keV' % kevval), dsn=2)\n        p.addXY([c0, c1], [obsval, obsval], None, dsn=2)\n    p.setLabels('Cumulative rescaled model', 'Cumulative rescaled data')\n    p.defaultKeyOverlay.vAlign = 0.3\n    return p", "docstring": "Make a quantile-quantile plot comparing multiple sets of events and models.\n\n*arrays*\nX.\n*key_text*\nText describing the quantile-quantile comparison quantity; will be\nshown on the plot legend.\nReturns:\nAn :class:`omega.RectPlot` instance.\n\n*TODO*: nothing about this is Sherpa-specific. Same goes for some of the\nplotting routines in :mod:`pkwit.environments.casa.data`; might be\nreasonable to add a submodule for generic X-ray-y plotting routines.\n\n*TODO*: Some gross code duplication here.", "source": "codesearchnet"}
{"code": "def get_job(self, id):\n    return self._get_element_by_id(self.jobs, 'jobs', Job, str(id))", "docstring": "Retrieves a job matching the given `id`\n\nArgs:\nid (str): Job `id` to match.\n\nReturns:\nJob: Job matching the given `id`\n\nRaises:\nValueError: No resource matches given `id` or multiple resources matching given `id`", "source": "codesearchnet"}
{"code": "def send(self, value):\n        \n        if not self.block and self._stdin is not None:\n            self.writer.write(\"{}\\n\".format(value))\n            return self\n        else:\n            raise TypeError(NON_BLOCKING_ERROR_MESSAGE)", "docstring": "Send text to stdin. Can only be used on non blocking commands\n\nArgs:\nvalue (str): the text to write on stdin\nRaises:\nTypeError: If command is blocking\nReturns:\nShellCommand: return this ShellCommand instance for chaining", "source": "juraj-google-style"}
{"code": "def PluginRunToTagToContent(self, plugin_name):\n    \n    mapping = {}\n    for run in self.Runs():\n      try:\n        tag_to_content = self.GetAccumulator(run).PluginTagToContent(\n            plugin_name)\n      except KeyError:\n        \n        continue\n      mapping[run] = tag_to_content\n    return mapping", "docstring": "Returns a 2-layer dictionary of the form {run: {tag: content}}.\n\nThe `content` referred above is the content field of the PluginData proto\nfor the specified plugin within a Summary.Value proto.\n\nArgs:\nplugin_name: The name of the plugin for which to fetch content.\n\nReturns:\nA dictionary of the form {run: {tag: content}}.", "source": "juraj-google-style"}
{"code": "def infer_transportation_modes(self, dt_threshold=10):\n    self.segments = [segment.infer_transportation_mode(dt_threshold=dt_threshold) for segment in self.segments]\n    return self", "docstring": "In-place transportation inferring of segments\n\nReturns:\nThis track", "source": "codesearchnet"}
{"code": "def show_stories(self, raw=False, limit=None):\n        \n        show_stories = self._get_stories('showstories', limit)\n        if raw:\n            show_stories = [story.raw for story in show_stories]\n        return show_stories", "docstring": "Returns list of item ids of latest Show HN stories\n\nArgs:\nlimit (int): specifies the number of stories to be returned.\nraw (bool): Flag to indicate whether to transform all\nobjects into raw json.\n\nReturns:\n`list` object containing ids of Show HN stories.", "source": "juraj-google-style"}
{"code": "def __init__(self, text, stopwords=None):\n\n        \n\n        self.text = text\n        self.load_stopwords(stopwords)\n        self.tokenize()", "docstring": "Store the raw text, tokenize.\n\nArgs:\ntext (str): The raw text string.\nstopwords (str): A custom stopwords list path.", "source": "juraj-google-style"}
{"code": "def future_value(present_value, annual_rate, periods_per_year, years):\n    rate_per_period = (annual_rate / float(periods_per_year))\n    periods = (periods_per_year * years)\n    return (present_value * ((1 + rate_per_period) ** periods))", "docstring": "Calculates the future value of money invested at an anual interest rate,\nx times per year, for a given number of years.\n\nArgs:\npresent_value: int or float, the current value of the money (principal).\n\nannual_rate: float 0 to 1 e.g., .5 = 50%), the interest rate paid out.\n\nperiods_per_year: int, the number of times money is invested per year.\n\nyears: int, the number of years invested.\n\nReturns:\nFloat, the future value of the money invested with compound interest.", "source": "codesearchnet"}
{"code": "def isdisjoint(self, other):\n    if isinstance(other, (_sequence_types + (BaseMultiset,))):\n        pass\n    elif (not isinstance(other, Container)):\n        other = self._as_multiset(other)\n    return all(((element not in other) for element in self._elements.keys()))", "docstring": "r\"\"\"Return True if the set has no elements in common with other.\n\nSets are disjoint iff their intersection is the empty set.\n\n>>> ms = Multiset('aab')\n>>> ms.isdisjoint('bc')\nFalse\n>>> ms.isdisjoint(Multiset('ccd'))\nTrue\n\nArgs:\nother: The other set to check disjointedness. Can also be an :class:`~typing.Iterable`\\[~T]\nor :class:`~typing.Mapping`\\[~T, :class:`int`] which are then converted to :class:`Multiset`\\[~T].", "source": "codesearchnet"}
{"code": "def build_filter_stack(stack, options):\n    if options.get('keyword_case'):\n        stack.preprocess.append(filters.KeywordCaseFilter(options['keyword_case']))\n    if options.get('identifier_case'):\n        stack.preprocess.append(filters.IdentifierCaseFilter(options['identifier_case']))\n    if options.get('truncate_strings'):\n        stack.preprocess.append(filters.TruncateStringFilter(width=options['truncate_strings'], char=options['truncate_char']))\n    if options.get('use_space_around_operators', False):\n        stack.enable_grouping()\n        stack.stmtprocess.append(filters.SpacesAroundOperatorsFilter())\n    if options.get('strip_comments'):\n        stack.enable_grouping()\n        stack.stmtprocess.append(filters.StripCommentsFilter())\n    if (options.get('strip_whitespace') or options.get('reindent')):\n        stack.enable_grouping()\n        stack.stmtprocess.append(filters.StripWhitespaceFilter())\n    if options.get('reindent'):\n        stack.enable_grouping()\n        stack.stmtprocess.append(filters.ReindentFilter(char=options['indent_char'], width=options['indent_width'], indent_after_first=options['indent_after_first'], indent_columns=options['indent_columns'], wrap_after=options['wrap_after'], comma_first=options['comma_first']))\n    if options.get('reindent_aligned', False):\n        stack.enable_grouping()\n        stack.stmtprocess.append(filters.AlignedIndentFilter(char=options['indent_char']))\n    if options.get('right_margin'):\n        stack.enable_grouping()\n        stack.stmtprocess.append(filters.RightMarginFilter(width=options['right_margin']))\n    if options.get('output_format'):\n        frmt = options['output_format']\n        if (frmt.lower() == 'php'):\n            fltr = filters.OutputPHPFilter()\n        elif (frmt.lower() == 'python'):\n            fltr = filters.OutputPythonFilter()\n        else:\n            fltr = None\n        if (fltr is not None):\n            stack.postprocess.append(fltr)\n    return stack", "docstring": "Setup and return a filter stack.\n\nArgs:\nstack: :class:`~sqlparse.filters.FilterStack` instance\noptions: Dictionary with options validated by validate_options.", "source": "codesearchnet"}
{"code": "def after_run(self, run_context, run_values):\n    pass", "docstring": "Called after each call to run().\n\nThe `run_values` argument contains results of requested ops/tensors by\n`before_run()`.\n\nThe `run_context` argument is the same one send to `before_run` call.\n`run_context.request_stop()` can be called to stop the iteration.\n\nIf `session.run()` raises any exceptions then `after_run()` is not called.\n\nArgs:\nrun_context: A `SessionRunContext` object.\nrun_values: A SessionRunValues object.", "source": "github-repos"}
{"code": "def stream_matching(self, address, name):\n    matching = [x for x in self.entries if (x.valid and x.target.matches(address, name))]\n    rpc_list = []\n    for var in matching:\n        rpc_list.extend(var.generate_rpcs(address))\n    return rpc_list", "docstring": "Return the RPCs needed to stream matching config variables to the given tile.\n\nThis function will return a list of tuples suitable for passing to\nEmulatedDevice.deferred_rpc.\n\nArgs:\naddress (int): The address of the tile that we wish to stream to\nname (str or bytes): The 6 character name of the target tile.\n\nReturns:\nlist of tuple: The list of RPCs to send to stream these variables to a tile.", "source": "codesearchnet"}
{"code": "def _GetHashes(self, target_queue, max_hashes):\n    hashes = []\n    for _ in range(0, max_hashes):\n        try:\n            item = target_queue.get_nowait()\n        except Queue.Empty:\n            continue\n        hashes.append(item)\n    return hashes", "docstring": "Retrieves a list of items from a queue.\n\nArgs:\ntarget_queue (Queue.queue): queue to retrieve hashes from.\nmax_hashes (int): maximum number of items to retrieve from the\ntarget_queue.\n\nReturns:\nlist[object]: list of at most max_hashes elements from the target_queue.\nThe list may have no elements if the target_queue is empty.", "source": "codesearchnet"}
{"code": "def decorate(fn):\n    \n    if not isfunction(fn):\n        raise TypeError('paco: fn must be a callable object')\n\n    @functools.wraps(fn)\n    def decorator(*args, **kw):\n        \n        for arg in args:\n            if iscoro_or_corofunc(arg):\n                return fn(*args, **kw)\n\n        \n        if len(args) and args[0] is None:\n            raise TypeError('paco: first argument cannot be empty')\n\n        def wrapper(coro, *_args, **_kw):\n            \n            if not iscoro_or_corofunc(coro):\n                raise TypeError('paco: first argument must be a '\n                                'coroutine or coroutine function')\n\n            \n            _args = ((coro,) + (args + _args))\n            kw.update(_kw)\n\n            \n            return fn(*_args, **kw)\n        return wrapper\n    return decorator", "docstring": "Generic decorator for coroutines helper functions allowing\nmultiple variadic initialization arguments.\n\nThis function is intended to be used internally.\n\nArguments:\nfn (function): target function to decorate.\n\nRaises:\nTypeError: if function or coroutine function is not provided.\n\nReturns:\nfunction: decorated function.", "source": "juraj-google-style"}
{"code": "def _single_request(self, method, *args, **kwargs):\n        \n\n        \n        \n        \n        \n        \n\n        \n        _method = self._service\n\n        \n        for item in method.split('.'):\n\n            \n            if method.endswith(item):\n                _method = getattr(_method, item)(*args, **kwargs)\n            else:\n                \n                _method = getattr(_method, item)()\n\n        \n        \n            \n            \n\n        \n        _method.uri = _method.uri.replace('$ENDPOINT', self._endpoint)\n\n        \n        try:\n            return _method.execute(http=self._http)\n        except googleapiclient.errors.HttpError as exc:\n            response = json.loads(exc.content.decode('utf-8'))['error']\n\n            raise APIError(code=response['code'], message=response['message'], http_error=exc)", "docstring": "Make a single request to the fleet API endpoint\n\nArgs:\nmethod (str): A dot delimited string indicating the method to call.  Example: 'Machines.List'\n*args: Passed directly to the method being called.\n**kwargs: Passed directly to the method being called.\n\nReturns:\ndict: The response from the method called.\n\nRaises:\nfleet.v1.errors.APIError: Fleet returned a response code >= 400", "source": "juraj-google-style"}
{"code": "def tas53(msg):\n    d = hex2bin(data(msg))\n    if (d[33] == '0'):\n        return None\n    tas = (bin2int(d[34:46]) * 0.5)\n    return round(tas, 1)", "docstring": "Aircraft true airspeed, BDS 5,3 message\n\nArgs:\nmsg (String): 28 bytes hexadecimal message\n\nReturns:\nfloat: true airspeed in knots", "source": "codesearchnet"}
{"code": "def bridge_to_parent(br):\n    \n    cmd = 'ovs-vsctl br-to-parent {0}'.format(br)\n    result = __salt__['cmd.run_all'](cmd)\n    if result['retcode'] != 0:\n        return False\n    return result['stdout']", "docstring": "Returns the parent bridge of a bridge.\n\nArgs:\nbr: A string - bridge name\n\nReturns:\nName of the parent bridge. This is the same as the bridge name if the\nbridge is not a fake bridge. If the bridge does not exist, False is\nreturned.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' openvswitch.bridge_to_parent br0", "source": "juraj-google-style"}
{"code": "def nr_profiles(arr, genomes):\n    \n    gs_collapse = []\n    genome_idx_dict = {}\n    indices = []\n    patt_dict = {}\n    for i, g in enumerate(genomes):\n        p = arr[i, :].tostring()\n        if p in patt_dict:\n            parent = patt_dict[p]\n            idx = genome_idx_dict[parent]\n            gs_collapse[idx].append(g)\n        else:\n            indices.append(i)\n            patt_dict[p] = g\n            genome_idx_dict[g] = len(gs_collapse)\n            gs_collapse.append([g])\n    return arr[indices, :], gs_collapse", "docstring": "Get a condensed cgMLST pairwise distance matrix for specified Genomes_\nwhere condensed means redundant cgMLST profiles are only represented once in the distance matrix.\n\nArgs:\nuser_name (list): List of Genome_ names to retrieve condensed distance matrix for\n\nReturns:\n(numpy.array, list): tuple of condensed cgMLST distance matrix and list of grouped Genomes_", "source": "juraj-google-style"}
{"code": "def zip(self, destination: typing.Union[str, Path] = None, encode: bool = True) -> str:\n        \n        if encode:\n            self._encode()\n\n        if destination is None:\n            destination_path = self.miz_path.parent.joinpath(f'{self.miz_path.stem}_EMIZ.miz')\n        else:\n            destination_path = elib.path.ensure_file(destination, must_exist=False)\n\n        LOGGER.debug('zipping mission to: %s', destination_path)\n\n        destination_path.write_bytes(dummy_miz)\n\n        with ZipFile(str(destination_path), mode='w', compression=8) as zip_file:\n\n            for root, _, items in os.walk(self.temp_dir.absolute()):\n                for item in items:\n                    item_abs_path = Path(root, item).absolute()\n                    item_rel_path = Path(item_abs_path).relative_to(self.temp_dir)\n                    zip_file.write(item_abs_path, arcname=item_rel_path)\n\n        return str(destination_path)", "docstring": "Write mission, dictionary etc. to a MIZ file\n\nArgs:\ndestination: target MIZ file (if none, defaults to source MIZ + \"_EMIZ\"\n\nReturns: destination file", "source": "juraj-google-style"}
{"code": "def read_field_h5(xdmf_file, fieldname, snapshot, header=None):\n    \n    if header is None:\n        header, xdmf_root = read_geom_h5(xdmf_file, snapshot)\n    else:\n        xdmf_root = xmlET.parse(str(xdmf_file)).getroot()\n\n    npc = header['nts'] \n    flds = np.zeros(_flds_shape(fieldname, header))\n    data_found = False\n\n    for elt_subdomain in xdmf_root[0][0][snapshot].findall('Grid'):\n        ibk = int(elt_subdomain.get('Name').startswith('meshYang'))\n        for data_attr in elt_subdomain.findall('Attribute'):\n            if data_attr.get('Name') != fieldname:\n                continue\n            icore, fld = _get_field(xdmf_file, data_attr.find('DataItem'))\n            \n            fld = fld.T\n            shp = fld.shape\n            if shp[-1] == 1 and header['nts'][0] == 1:  \n                fld = fld.reshape((shp[0], 1, shp[1], shp[2]))\n                if header['rcmb'] < 0:\n                    fld = fld[(2, 0, 1), ...]\n            elif shp[-1] == 1:  \n                fld = fld.reshape((shp[0], shp[1], 1, shp[2]))\n                if header['rcmb'] < 0:\n                    fld = fld[(0, 2, 1), ...]\n            elif header['nts'][1] == 1:  \n                fld = fld.reshape((1, shp[0], 1, shp[1]))\n            ifs = [icore \n                   npc[i] for i in range(3)]\n            if header['zp']:  \n                fld = fld[:, :, :, :-1]\n            flds[:,\n                 ifs[0]:ifs[0] + npc[0] + header['xp'],\n                 ifs[1]:ifs[1] + npc[1] + header['yp'],\n                 ifs[2]:ifs[2] + npc[2],\n                 ibk] = fld\n            data_found = True\n\n    flds = _post_read_flds(flds, header)\n\n    return (header, flds) if data_found else None", "docstring": "Extract field data from hdf5 files.\n\nArgs:\nxdmf_file (:class:`pathlib.Path`): path of the xdmf file.\nfieldname (str): name of field to extract.\nsnapshot (int): snapshot number.\nheader (dict): geometry information.\nReturns:\n(dict, numpy.array): geometry information and field data. None\nis returned if data is unavailable.", "source": "juraj-google-style"}
{"code": "def exact_match(self, descriptor):\n        \n        return self._exact_match_field(self._group, descriptor.get_group()) \\\n            and self._exact_atch_field(self._type, descriptor.get_type()) \\\n            and self._exact_match_field(self._kind, descriptor.get_kind()) \\\n            and self._exact_match_field(self._name, descriptor.get_name()) \\\n            and self._exact_match_field(self._version, descriptor.get_version())", "docstring": "Matches this descriptor to another descriptor exactly.\n\nArgs:\ndescriptor: another descriptor to match this one.\n\nReturns: True if descriptors match or False otherwise.", "source": "juraj-google-style"}
{"code": "def _normalize(self, text: str) -> str:\n    accepted = [chr(i) for i in range(ord('a'), ord('z') + 1)] + [chr(i) for i in range(ord('A'), ord('Z') + 1)] + [chr(i) for i in range(ord('0'), ord('9') + 1)] + ['.']\n    accepted = frozenset(accepted)\n    pattern = re.compile('_+')\n    text = ''.join([c if c in accepted else '_' for c in text.lower()])\n    text = pattern.sub('_', text).strip('_')\n    return text", "docstring": "Normalizes the input text. This process is for the genres and the artist\n\nArgs:\ntext (`str`):\nArtist or Genre string to normalize", "source": "github-repos"}
{"code": "def build_dot_value(key, value):\n    if (key.count('.') == 0):\n        return (key, value)\n    final_value = value\n    reverse_split = key.split('.')[::(- 1)]\n    end = (len(reverse_split) - 1)\n    for (idx, k) in enumerate(reverse_split):\n        if (idx == end):\n            return (k, final_value)\n        final_value = {k: final_value}", "docstring": "Build new dictionaries based off of the dot notation key.\n\nFor example, if a key were 'x.y.z' and the value was 'foo',\nwe would expect a return value of: ('x', {'y': {'z': 'foo'}})\n\nArgs:\nkey (str): The key to build a dictionary off of.\nvalue: The value associated with the dot notation key.\n\nReturns:\ntuple: A 2-tuple where the first element is the key of\nthe outermost scope (e.g. left-most in the dot\nnotation key) and the value is the constructed value\nfor that key (e.g. a dictionary)", "source": "codesearchnet"}
{"code": "def stop(self, timeout=None):\n        \n        assert self.state == STARTED, \"Process not started\"\n        self.state = STOPPING\n        \n        self._run_hook(ProcessStopHook, timeout=timeout)\n        for s in self._spawned:\n            if not s.ready():\n                self.log.debug(\n                    \"Waiting for %s *%s **%s\", s._function, s._args, s._kwargs)\n            s.wait(timeout=timeout)\n        self._spawned = []\n        self._controllers = OrderedDict()\n        self._unpublished = set()\n        self.state = STOPPED\n        self.log.debug(\"Done process.stop()\")", "docstring": "Stop the process and wait for it to finish\n\nArgs:\ntimeout (float): Maximum amount of time to wait for each spawned\nobject. None means forever", "source": "juraj-google-style"}
{"code": "def _resize_images(self, x, height_factor, width_factor, data_format, interpolation='nearest'):\n    if data_format not in {'channels_last', 'channels_first'}:\n        raise ValueError(f'Invalid `data_format` argument: {data_format}')\n    if data_format == 'channels_first':\n        x = ops.transpose(x, [0, 2, 3, 1])\n    if interpolation == 'nearest':\n        x = ops.repeat(x, height_factor, axis=1)\n        x = ops.repeat(x, width_factor, axis=2)\n    else:\n        shape = ops.shape(x)\n        new_shape = (shape[1] * height_factor, shape[2] * width_factor)\n        x = ops.image.resize(x, new_shape, interpolation=interpolation)\n    if data_format == 'channels_first':\n        x = ops.transpose(x, [0, 3, 1, 2])\n    return x", "docstring": "Resizes the images contained in a 4D tensor.\n\nArgs:\nx: Tensor or variable to resize.\nheight_factor: Positive integer.\nwidth_factor: Positive integer.\ndata_format: One of `\"channels_first\"`, `\"channels_last\"`.\ninterpolation: A string, one of `\"bicubic\"`, `\"bilinear\"`,\n`\"lanczos3\"`, `\"lanczos5\"`, or `\"nearest\"`.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def includes(self, lo_freq: float) -> bool:\n    if (self._lb <= lo_freq <= self._ub):\n        return True\n    return False", "docstring": "Whether `lo_freq` is within the `LoRange`.\n\nArgs:\nlo_freq: LO frequency to be checked\n\nReturns:\nbool: True if lo_freq is included in this range, otherwise False", "source": "codesearchnet"}
{"code": "def knots_from_marginal(marginal, nr_knots, spline_order):\n    cumsum = np.cumsum(marginal)\n    cumsum = (cumsum / cumsum.max())\n    borders = np.linspace(0, 1, nr_knots)\n    knot_placement = (([0] + np.unique([np.where((cumsum >= b))[0][0] for b in borders[1:(- 1)]]).tolist()) + [(len(marginal) - 1)])\n    knots = augknt(knot_placement, spline_order)\n    return knots", "docstring": "Determines knot placement based on a marginal distribution.\n\nIt places knots such that each knot covers the same amount\nof probability mass. Two of the knots are reserved for the\nborders which are treated seperatly. For example, a uniform\ndistribution with 5 knots will cause the knots to be equally\nspaced with 25% of the probability mass between each two\nknots.\n\nInput:\nmarginal: Array\nEstimate of the marginal distribution used to estimate\nknot placement.\nnr_knots: int\nNumber of knots to be placed.\nspline_order: int\nOrder of the splines\n\nReturns:\nknots: Array\nSequence of knot positions", "source": "codesearchnet"}
{"code": "def append_filter(self, structure_filter):\n        \n        hdict = structure_filter.as_dict()\n        hdict[\"input_structure\"] = self.final_structure.as_dict()\n        self.history.append(hdict)", "docstring": "Adds a filter.\n\nArgs:\nstructure_filter (StructureFilter): A filter implementating the\nAbstractStructureFilter API. Tells transmuter waht structures\nto retain.", "source": "juraj-google-style"}
{"code": "def unzip(self, overwrite: bool=False):\n    if (self.zip_content and (not overwrite)):\n        raise FileExistsError(str(self.temp_dir))\n    LOGGER.debug('unzipping miz to temp dir')\n    try:\n        with ZipFile(str(self.miz_path)) as zip_file:\n            LOGGER.debug('reading infolist')\n            self.zip_content = [f.filename for f in zip_file.infolist()]\n            self._extract_files_from_zip(zip_file)\n    except BadZipFile:\n        raise BadZipFile(str(self.miz_path))\n    except:\n        LOGGER.exception('error while unzipping miz file: %s', self.miz_path)\n        raise\n    LOGGER.debug('checking miz content')\n    for miz_item in ['mission', 'options', 'warehouses', 'l10n/DEFAULT/dictionary', 'l10n/DEFAULT/mapResource']:\n        if (not Path(self.temp_dir.joinpath(miz_item)).exists()):\n            LOGGER.error('missing file in miz: %s', miz_item)\n            raise FileNotFoundError(miz_item)\n    self._check_extracted_content()\n    LOGGER.debug('all files have been found, miz successfully unzipped')", "docstring": "Flattens a MIZ file into the temp dir\n\nArgs:\noverwrite: allow overwriting exiting files", "source": "codesearchnet"}
{"code": "async def runCmdLine(self, line):\n    opts = self.getCmdOpts(line)\n    return (await self.runCmdOpts(opts))", "docstring": "Run a line of command input for this command.\n\nArgs:\nline (str): Line to execute\n\nExamples:\nRun the foo command with some arguments:\n\nawait foo.runCmdLine('foo --opt baz woot.com')", "source": "codesearchnet"}
{"code": "def save_json(obj, filename, **kwargs):\n    with open(filename, 'w', encoding='utf-8') as f:\n        json.dump(obj, f, **kwargs)", "docstring": "Save an object as a JSON file.\n\nArgs:\nobj: The object to save. Must be JSON-serializable.\nfilename: Path to the output file.\n**kwargs: Additional arguments to `json.dump`.", "source": "codesearchnet"}
{"code": "def codemirror_settings_update(configs, parameters, on=None, names=None):\n    output = copy.deepcopy(configs)\n    if names:\n        output = {k: output[k] for k in names}\n    if (not on):\n        on = output.keys()\n    for k in on:\n        output[k].update(parameters)\n    return output", "docstring": "Return a new dictionnary of configs updated with given parameters.\n\nYou may use ``on`` and ``names`` arguments to select config or filter out\nsome configs from returned dict.\n\nArguments:\nconfigs (dict): Dictionnary of configurations to update.\nparameters (dict): Dictionnary of parameters to apply on selected\nconfigurations.\n\nKeyword Arguments:\non (list): List of configuration names to select for update. If empty,\nall given configurations will be updated.\nnames (list): List of configuration names to keep. If not empty, only\nthose configurations will be in returned dict. Else every\nconfigs from original dict will be present.\n\nReturns:\ndict: Dict of configurations with updated parameters.", "source": "codesearchnet"}
{"code": "def _GetSerializedAttributeContainerByIndex(self, container_type, index):\n    \n    container_list = self._GetSerializedAttributeContainerList(container_type)\n    return container_list.GetAttributeContainerByIndex(index)", "docstring": "Retrieves a specific serialized attribute container.\n\nArgs:\ncontainer_type (str): attribute container type.\nindex (int): attribute container index.\n\nReturns:\nbytes: serialized attribute container data or None if not available.", "source": "juraj-google-style"}
{"code": "def _cast_value(self, value):\n    if self.convert_datetimes:\n        try:\n            date_time = datetime.datetime.fromtimestamp(float(value))\n            if (datetime.datetime(1970, 1, 1) > date_time):\n                raise ValueError\n            else:\n                return date_time\n        except ValueError:\n            pass\n    tests = (int, float, str)\n    for test in tests:\n        try:\n            return test(value)\n        except ValueError:\n            continue\n    return value", "docstring": "Internal method that makes sure every value in dictionary\nis properly cast into the correct types, instead of\njust treating everything like a string from the csv file.\n\nArgs:\nvalue : The value to be casted\n\nReturns:\nA casted Value.", "source": "codesearchnet"}
{"code": "def autodecode(b):\n    \n    import warnings\n    import chardet\n\n    try:\n        return b.decode()\n    except UnicodeError:\n        result = chardet.detect(b)\n        if result['confidence'] < 0.95:\n            warnings.warn('autodecode failed with utf-8; guessing %s' % result['encoding'])\n        return result.decode(result['encoding'])", "docstring": "Try to decode ``bytes`` to text - try default encoding first, otherwise try to autodetect\n\nArgs:\nb (bytes): byte string\n\nReturns:\nstr: decoded text string", "source": "juraj-google-style"}
{"code": "def is_sparse(x):\n    return isinstance(x, (SparseTensor, SparseTensorValue))", "docstring": "Check whether `x` is sparse.\n\nCheck whether an object is a `tf.sparse.SparseTensor` or\n`tf.compat.v1.SparseTensorValue`.\n\nArgs:\nx: A python object to check.\n\nReturns:\n`True` iff `x` is a `tf.sparse.SparseTensor` or\n`tf.compat.v1.SparseTensorValue`.", "source": "github-repos"}
{"code": "def collection(self, collection_id):\n    child_path = (self._path + (collection_id,))\n    return self._client.collection(*child_path)", "docstring": "Create a sub-collection underneath the current document.\n\nArgs:\ncollection_id (str): The sub-collection identifier (sometimes\nreferred to as the \"kind\").\n\nReturns:\n~.firestore_v1beta1.collection.CollectionReference: The\nchild collection.", "source": "codesearchnet"}
{"code": "def average_pooling1d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None):\n    warnings.warn('`tf.layers.average_pooling1d` is deprecated and will be removed in a future version. Please use `tf.keras.layers.AveragePooling1D` instead.')\n    layer = AveragePooling1D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name)\n    return layer.apply(inputs)", "docstring": "Average Pooling layer for 1D inputs.\n\nArgs:\ninputs: The tensor over which to pool. Must have rank 3.\npool_size: An integer or tuple/list of a single integer,\nrepresenting the size of the pooling window.\nstrides: An integer or tuple/list of a single integer, specifying the\nstrides of the pooling operation.\npadding: A string. The padding method, either 'valid' or 'same'.\nCase-insensitive.\ndata_format: A string, one of `channels_last` (default) or `channels_first`.\nThe ordering of the dimensions in the inputs.\n`channels_last` corresponds to inputs with shape\n`(batch, length, channels)` while `channels_first` corresponds to\ninputs with shape `(batch, channels, length)`.\nname: A string, the name of the layer.\n\nReturns:\nThe output tensor, of rank 3.\n\nRaises:\nValueError: if eager execution is enabled.", "source": "github-repos"}
{"code": "def plot_entropy(self, tmin, tmax, ntemp, ylim=None, **kwargs):\n        \n        temperatures = np.linspace(tmin, tmax, ntemp)\n\n        if self.structure:\n            ylabel = r\"$S$ (J/K/mol)\"\n        else:\n            ylabel = r\"$S$ (J/K/mol-c)\"\n\n        fig = self._plot_thermo(self.dos.entropy, temperatures, ylabel=ylabel, ylim=ylim, **kwargs)\n\n        return fig", "docstring": "Plots the vibrational entrpy in a temperature range.\n\nArgs:\ntmin: minimum temperature\ntmax: maximum temperature\nntemp: number of steps\nylim: tuple specifying the y-axis limits.\nkwargs: kwargs passed to the matplotlib function 'plot'.\nReturns:\nmatplotlib figure", "source": "juraj-google-style"}
{"code": "def AddCredentialOptions(self, argument_group):\n    argument_group.add_argument('--credential', action='append', default=[], type=str, dest='credentials', metavar='TYPE:DATA', help='Define a credentials that can be used to unlock encrypted volumes e.g. BitLocker. The credential is defined as type:data e.g. \"password:BDE-test\". Supported credential types are: {0:s}. Binary key data is expected to be passed in BASE-16 encoding (hexadecimal). WARNING credentials passed via command line arguments can end up in logs, so use this option with care.'.format(', '.join(self._SUPPORTED_CREDENTIAL_TYPES)))", "docstring": "Adds the credential options to the argument group.\n\nThe credential options are use to unlock encrypted volumes.\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "codesearchnet"}
{"code": "def state_invariant_scope(self, state: Sequence[tf.Tensor]):\n        \n        scope = {}\n        scope.update(self.non_fluents_scope())\n        scope.update(self.state_scope(state))\n        return scope", "docstring": "Returns the state invariant fluent scope for the current `state`.\n\nArgs:\nstate (Sequence[tf.Tensor]): The current state fluents.\n\nReturns:\nA mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.", "source": "juraj-google-style"}
{"code": "def replace_batch_norm(model):\n    for name, module in model.named_children():\n        if isinstance(module, nn.BatchNorm2d):\n            new_module = TestDetrFrozenBatchNorm2d(module.num_features)\n            if not module.weight.device == torch.device('meta'):\n                new_module.weight.data.copy_(module.weight)\n                new_module.bias.data.copy_(module.bias)\n                new_module.running_mean.data.copy_(module.running_mean)\n                new_module.running_var.data.copy_(module.running_var)\n            model._modules[name] = new_module\n        if len(list(module.children())) > 0:\n            replace_batch_norm(module)", "docstring": "Recursively replace all `torch.nn.BatchNorm2d` with `TestDetrFrozenBatchNorm2d`.\n\nArgs:\nmodel (torch.nn.Module):\ninput model", "source": "github-repos"}
{"code": "def cumsum(x, dim, exclusive=False):\n    with tf.variable_scope('cumsum'):\n        new_name = 'tmp_dim_cumsum'\n        new_dim = Dimension(new_name, dim.size)\n        new_shape = x.shape.rename_dimension(dim.name, new_name)\n        comparator = (less if exclusive else less_equal)\n        m = cast(comparator(mtf_range(x.mesh, dim, dtype=tf.float32), mtf_range(x.mesh, new_dim, dtype=tf.float32)), x.dtype)\n        ret = einsum([x, m], output_shape=new_shape)\n        return reshape(ret, x.shape)", "docstring": "Cumulative sum.\n\nArgs:\nx: a Tensor\ndim: a Dimension\nexclusive: a boolean\n\nReturns:\na Tensor with the same shape as x.", "source": "codesearchnet"}
{"code": "def from_composition_and_entries(comp, entries_in_chemsys,\n                                     working_ion_symbol=\"Li\"):\n        \n        pd = PhaseDiagram(entries_in_chemsys)\n        return ConversionElectrode.from_composition_and_pd(comp, pd,\n                                                           working_ion_symbol)", "docstring": "Convenience constructor to make a ConversionElectrode from a\ncomposition and all entries in a chemical system.\n\nArgs:\ncomp: Starting composition for ConversionElectrode, e.g.,\nComposition(\"FeF3\")\nentries_in_chemsys: Sequence containing all entries in a\nchemical system. E.g., all Li-Fe-F containing entries.\nworking_ion_symbol: Element symbol of working ion. Defaults to Li.", "source": "juraj-google-style"}
{"code": "def __init__(\n      self, identifier=None, location=None, parent=None, **kwargs):\n    \n    if (not identifier and not location) or not parent:\n      raise ValueError('Missing identifier and location, or parent value.')\n\n    super(APFSPathSpec, self).__init__(parent=parent, **kwargs)\n    self.identifier = identifier\n    self.location = location", "docstring": "Initializes a path specification.\n\nNote that an APFS path specification must have a parent.\n\nArgs:\nidentifier (Optional[int]): identifier.\nlocation (Optional[str]): location.\nparent (Optional[PathSpec]): parent path specification.\n\nRaises:\nValueError: when parent or both identifier and location are not set.", "source": "juraj-google-style"}
{"code": "def set(self, response: 'requests.Response') -> None:\n    self.data[response.url] = SavedEndpoint(response.json(), self._get_expiration(response.headers))", "docstring": "Adds a response to the cache.\n\nArgs:\nresponse: response from ESI\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def add_features_to_nglview(view, structure_resnums, chain_id):\n    \n    \n    if not structprop.chains.has_id(chain_id):\n        structprop.parse_structure()\n        if not structprop.chains.has_id(chain_id):\n            raise ValueError('Chain {} not present in structure {}'.format(chain_id, structprop.id))\n\n    if not seqprop.features:\n        log.warning('{}: no stored features'.format(seqprop.id))\n\n    \n    for f in seqprop.features:\n\n        \n        if f.type.lower() == 'disulfide bond':\n            \n            disulfide = map_seqprop_resnums_to_structprop_resnums(resnums=[f.location.start + 1, f.location.end],\n                                                                  seqprop=seqprop,\n                                                                  structprop=structprop,\n                                                                  chain_id=chain_id,\n                                                                  use_representatives=False)\n            to_view = [str(x)+'.CA' for x in list(disulfide.values())]\n            view.add_distance(atom_pair=[to_view], color='black')\n            log.info('Disulfide bridge at residues {} & {}'.format(f.location.start + 1, f.location.end))\n\n        \n        if f.type.lower() == 'dna-binding region' or f.type.lower() == 'nucleotide phosphate-binding region':\n            impres = self.map_seqprop_resnums_to_structprop_resnums(resnums=[f.location.start + 1,\n                                                                             f.location.end],\n                                                                    seqprop=seqprop,\n                                                                    structprop=structprop,\n                                                                    chain_id=chain_id,\n                                                                    use_representatives=use_representatives)\n\n            \n            \n            if f.location.start + 1 in impres and f.location.end in impres:\n                mapped_start = impres[f.location.start + 1]\n                mapped_end = impres[f.location.end]\n                view.add_ball_and_stick(selection=':{} and ( {}-{} )'.format(chain_id,\n                                                                             mapped_start,\n                                                                             mapped_end), color='black')\n                log.info('{} at sequence region {}-{}, structure residues {}-{}'.format(f.type,\n                                                                                        f.location.start,\n                                                                                        f.location.end,\n                                                                                        mapped_start,\n                                                                                        mapped_end))\n\n        \n        if f.location.end - 1 == f.location.start:\n            if f.type.lower() == 'sequence variant' or f.type.lower() == 'mutagenesis site':\n                continue\n            impres = self.map_seqprop_resnums_to_structprop_resnums(resnums=f.location.end,\n                                                                    seqprop=seqprop,\n                                                                    structprop=structprop,\n                                                                    chain_id=chain_id,\n                                                                    use_representatives=use_representatives)\n            if f.location.end in impres:\n                impres_mapped = impres[f.location.end]\n                view.add_ball_and_stick(selection=str(impres_mapped), color='black')\n                view.add_label(selection=':{} and {}'.format(chain_id, impres_mapped), label_type='res', color='black')\n                log.info('{} at sequence residue {}, structure residue {}'.format(f.type, f.location.end, impres_mapped))", "docstring": "Add select features from the selected SeqProp object to an NGLWidget view object.\n\nCurrently parsing for:\n* Single residue features (ie. metal binding sites)\n* Disulfide bonds\n\nArgs:\nview (NGLWidget): NGLWidget view object\nseqprop (SeqProp): SeqProp object\nstructprop (StructProp): StructProp object\nchain_id (str): ID of the structure's chain to get annotation from", "source": "juraj-google-style"}
{"code": "def record2marcxml(record):\n    schema_name = _get_schema_name(record)\n    if (schema_name == 'hep'):\n        marcjson = hep2marc.do(record)\n    elif (schema_name == 'authors'):\n        marcjson = hepnames2marc.do(record)\n    else:\n        raise NotImplementedError(u'JSON -> MARC rules missing for \"{}\"'.format(schema_name))\n    record = RECORD()\n    for (key, values) in sorted(iteritems(marcjson)):\n        (tag, ind1, ind2) = _parse_key(key)\n        if _is_controlfield(tag, ind1, ind2):\n            value = force_single_element(values)\n            if (not isinstance(value, text_type)):\n                value = text_type(value)\n            record.append(CONTROLFIELD(_strip_invalid_chars_for_xml(value), {'tag': tag}))\n        else:\n            for value in force_list(values):\n                datafield = DATAFIELD({'tag': tag, 'ind1': ind1, 'ind2': ind2})\n                for (code, els) in sorted(iteritems(value)):\n                    for el in force_list(els):\n                        if (not isinstance(el, text_type)):\n                            el = text_type(el)\n                        datafield.append(SUBFIELD(_strip_invalid_chars_for_xml(el), {'code': code}))\n                record.append(datafield)\n    return tostring(record, encoding='utf8', pretty_print=True)", "docstring": "Convert a JSON record to a MARCXML string.\n\nDeduces which set of rules to use by parsing the ``$schema`` key, as\nit unequivocally determines which kind of record we have.\n\nArgs:\nrecord(dict): a JSON record.\n\nReturns:\nstr: a MARCXML string converted from the record.", "source": "codesearchnet"}
{"code": "def show_ordered_code(code, extra_col=None):\n    if not extra_col:\n        extra_col = {}\n    _setup_tabulate()\n    block_lines = []\n    op_lines = []\n    boundaries = []\n    start = 0\n    for block in code.order:\n        end = start\n        ids = lambda xs: [x.id for x in xs]\n        block_lines.append(f'block: {block.id} -> {ids(block.outgoing)} <- {ids(block.incoming)}')\n        for op in block:\n            end += 1\n            op_lines.append([op.index, op.__class__.__name__, getattr(op, 'argval', ''), op.target and op.target.index, op.block_target and op.block_target.index, '✓' if op.push_exc_block else '', '✓' if op.pop_exc_block else '', op.next and op.next.index, op.line, extra_col.get(op.index)])\n        boundaries.append((start, end))\n        start = end\n    headers = ['ix', 'op', 'arg', 'tgt', 'btgt', '>exc', '<exc', 'next', 'line', 'extra']\n    block_table = tabulate.tabulate(op_lines, headers, tablefmt='presto')\n    block_table = block_table.split('\\n')\n    tab = [[block_table[0]]]\n    block_table = block_table[2:]\n    for blk, (start, end) in zip(block_lines, boundaries):\n        tab.append([blk])\n        tab.append(['\\n'.join(block_table[start:end])])\n    print(tabulate.tabulate(tab, tablefmt='fancy_grid'))", "docstring": "Print out the block structure of an OrderedCode object as a table.\n\nArgs:\ncode: A blocks.OrderedCode object\nextra_col: A map from opcode_index to a single additional cell to display", "source": "github-repos"}
{"code": "def replace_case(self, case_obj):\n        \n        \n        \n        LOG.info(\"Saving case %s\", case_obj['_id'])\n        \n\n        case_obj['updated_at'] = datetime.datetime.now(),\n\n        updated_case = self.case_collection.find_one_and_replace(\n            {'_id': case_obj['_id']},\n            case_obj,\n            return_document=pymongo.ReturnDocument.AFTER\n        )\n\n        return updated_case", "docstring": "Replace a existing case with a new one\n\nKeeps the object id\n\nArgs:\ncase_obj(dict)\n\nReturns:\nupdated_case(dict)", "source": "juraj-google-style"}
{"code": "def poweroff_vmss(access_token, subscription_id, resource_group, vmss_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,\n                        '/powerOff?api-version=', COMP_API])\n    body = '{\"instanceIds\" : [\"*\"]}'\n    return do_post(endpoint, body, access_token)", "docstring": "Power off all the VMs in a virtual machine scale set.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvmss_name (str): Name of the virtual machine scale set.\n\nReturns:\nHTTP response.", "source": "juraj-google-style"}
{"code": "def users(self):\n    if (not self.__users):\n        self.__users = Users(self.__connection)\n    return self.__users", "docstring": "Gets the Users API client.\n\nReturns:\nUsers:", "source": "codesearchnet"}
{"code": "def stop_site(name):\n    \n    ps_cmd = ['Stop-WebSite', r\"'{0}'\".format(name)]\n\n    cmd_ret = _srvmgr(ps_cmd)\n\n    return cmd_ret['retcode'] == 0", "docstring": "Stop a Web Site in IIS.\n\n.. versionadded:: 2017.7.0\n\nArgs:\nname (str): The name of the website to stop.\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.stop_site name='My Test Site'", "source": "juraj-google-style"}
{"code": "def logsumexp(x, axis=None, keepdims=False):\n    return math_ops.reduce_logsumexp(x, axis, keepdims)", "docstring": "Computes log(sum(exp(elements across dimensions of a tensor))).\n\nThis function is more numerically stable than log(sum(exp(x))).\nIt avoids overflows caused by taking the exp of large inputs and\nunderflows caused by taking the log of small inputs.\n\nArgs:\nx: A tensor or variable.\naxis: An integer, the axis to reduce over.\nkeepdims: A boolean, whether to keep the dimensions or not.\nIf `keepdims` is `False`, the rank of the tensor is reduced\nby 1. If `keepdims` is `True`, the reduced dimension is\nretained with length 1.\n\nReturns:\nThe reduced tensor.", "source": "github-repos"}
{"code": "def ws025(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `ws025`'.format(value))\n    self._ws025 = value", "docstring": "Corresponds to IDD Field `ws025`\nWind speed corresponding to 2.5% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `ws025`\nUnit: m/s\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def state_invariant_scope(self, state: Sequence[tf.Tensor]):\n    scope = {}\n    scope.update(self.non_fluents_scope())\n    scope.update(self.state_scope(state))\n    return scope", "docstring": "Returns the state invariant fluent scope for the current `state`.\n\nArgs:\nstate (Sequence[tf.Tensor]): The current state fluents.\n\nReturns:\nA mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.", "source": "codesearchnet"}
{"code": "def _CreateRoutePatternsFolder(self, parent, route, style_id=None, visible=True):\n    pattern_id_to_trips = route.GetPatternIdTripDict()\n    if (not pattern_id_to_trips):\n        return None\n    pattern_trips = pattern_id_to_trips.values()\n    pattern_trips.sort((lambda a, b: cmp(len(b), len(a))))\n    folder = self._CreateFolder(parent, 'Patterns', visible)\n    for (n, trips) in enumerate(pattern_trips):\n        trip_ids = [trip.trip_id for trip in trips]\n        name = ('Pattern %d (trips: %d)' % ((n + 1), len(trips)))\n        description = ('Trips using this pattern (%d in total): %s' % (len(trips), ', '.join(trip_ids)))\n        placemark = self._CreatePlacemark(folder, name, style_id, visible, description)\n        coordinates = [(stop.stop_lon, stop.stop_lat) for stop in trips[0].GetPattern()]\n        self._CreateLineString(placemark, coordinates)\n    return folder", "docstring": "Create a KML Folder containing placemarks for each pattern in the route.\n\nA pattern is a sequence of stops used by one of the trips in the route.\n\nIf there are not patterns for the route then no folder is created and None\nis returned.\n\nArgs:\nparent: The parent ElementTree.Element instance.\nroute: The transitfeed.Route instance.\nstyle_id: The id of a style to use if not None.\nvisible: Whether the folder is initially visible or not.\n\nReturns:\nThe Folder ElementTree.Element instance or None if there are no patterns.", "source": "codesearchnet"}
{"code": "def init_from_acceptor(self, acceptor):\n        \n        self.states = copy.deepcopy(acceptor.states)\n        self.alphabet = copy.deepcopy(acceptor.alphabet)\n        self.osyms = copy.deepcopy(acceptor.osyms)\n        self.isyms = copy.deepcopy(acceptor.isyms)", "docstring": "Adds a sink state\nArgs:\nalphabet (list): The input alphabet\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def color_scale_HSV(c: Color, scoef: float, vcoef: float) -> None:\n    color_p = ffi.new('TCOD_color_t*')\n    (color_p.r, color_p.g, color_p.b) = (c.r, c.g, c.b)\n    lib.TCOD_color_scale_HSV(color_p, scoef, vcoef)\n    c[:] = (color_p.r, color_p.g, color_p.b)", "docstring": "Scale a color's saturation and value.\n\nDoes not return a new Color.  ``c`` is modified inplace.\n\nArgs:\nc (Union[Color, List[int]]): A Color instance, or an [r, g, b] list.\nscoef (float): Saturation multiplier, from 0 to 1.\nUse 1 to keep current saturation.\nvcoef (float): Value multiplier, from 0 to 1.\nUse 1 to keep current value.", "source": "codesearchnet"}
{"code": "def sigmoid_cross_entropy_with_logits(logits, targets):\n  \n  if logits.shape != targets.shape:\n    raise ValueError(\n        \"logits shape must equal targets shape\"\n        \"logits=%s targets=%s\" % (logits.to_string, targets.to_string))\n  x = logits\n  z = targets\n  return mtf.relu(x) - x * z + mtf.log(1 + mtf.exp(-mtf.abs(x)))", "docstring": "Sigmoid cross-entropy loss.\n\nArgs:\nlogits: a mtf.Tensor\ntargets: a mtf.Tensor with the same shape as logits\n\nReturns:\na mtf.Tensor whose shape is equal to logits.shape\n\nRaises:\nValueError: if the shapes do not match.", "source": "juraj-google-style"}
{"code": "def ParseRecord(self, parser_mediator, key, structure):\n    \n    if key not in ('header', 'logline'):\n      raise errors.ParseError(\n          'Unable to parse record, unknown structure: {0:s}'.format(key))\n\n    if key == 'logline':\n      self._ParseLine(parser_mediator, structure)\n\n    elif key == 'header':\n      self._ParseHeader(parser_mediator, structure)", "docstring": "Parse each record structure and return an EventObject if applicable.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nkey (str): identifier of the structure of tokens.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.\n\nRaises:\nParseError: when the structure type is unknown.", "source": "juraj-google-style"}
{"code": "def binfiles_set(self, isnap):\n    possible_files = set((self.filename(fstem, isnap, force_legacy=True) for fstem in phyvars.FIELD_FILES))\n    return (possible_files & self.files)", "docstring": "Set of existing binary files at a given snap.\n\nArgs:\nisnap (int): snapshot index.\nReturns:\nset of pathlib.Path: the set of output files available for this\nsnapshot number.", "source": "codesearchnet"}
{"code": "def vibrational_internal_energy(self, temperature, volume):\n        \n        y = self.debye_temperature(volume) / temperature\n        return self.kb * self.natoms * temperature * (9./8. * y +\n                                                      3*self.debye_integral(y))", "docstring": "Vibrational internal energy, U_vib(V, T).\nEq(4) in doi.org/10.1016/j.comphy.2003.12.001\n\nArgs:\ntemperature (float): temperature in K\nvolume (float): in Ang^3\n\nReturns:\nfloat: vibrational internal energy in eV", "source": "juraj-google-style"}
{"code": "def default_get_arg_names_from_class_name(class_name):\n    parts = []\n    rest = class_name\n    if rest.startswith('_'):\n        rest = rest[1:]\n    while True:\n        m = re.match('([A-Z][a-z]+)(.*)', rest)\n        if (m is None):\n            break\n        parts.append(m.group(1))\n        rest = m.group(2)\n    if (not parts):\n        return []\n    return ['_'.join((part.lower() for part in parts))]", "docstring": "Converts normal class names into normal arg names.\n\nNormal class names are assumed to be CamelCase with an optional leading\nunderscore.  Normal arg names are assumed to be lower_with_underscores.\n\nArgs:\nclass_name: a class name, e.g., \"FooBar\" or \"_FooBar\"\nReturns:\nall likely corresponding arg names, e.g., [\"foo_bar\"]", "source": "codesearchnet"}
{"code": "def _extract_match(self, candidate, offset):\n        \n        \n        \n        if (_SLASH_SEPARATED_DATES.search(candidate)):\n            return None\n\n        \n        if _TIME_STAMPS.search(candidate):\n            following_text = self.text[offset + len(candidate):]\n            if _TIME_STAMPS_SUFFIX.match(following_text):\n                return None\n\n        \n        match = self._parse_and_verify(candidate, offset)\n        if match is not None:\n            return match\n\n        \n        \n        return self._extract_inner_match(candidate, offset)", "docstring": "Attempts to extract a match from a candidate string.\n\nArguments:\ncandidate -- The candidate text that might contain a phone number.\noffset -- The offset of candidate within self.text\nReturns the match found, None if none can be found", "source": "juraj-google-style"}
{"code": "def create_border(self, border_style_type):\n    if (border_style_type == MenuBorderStyleType.ASCII_BORDER):\n        return self.create_ascii_border()\n    elif (border_style_type == MenuBorderStyleType.LIGHT_BORDER):\n        return self.create_light_border()\n    elif (border_style_type == MenuBorderStyleType.HEAVY_BORDER):\n        return self.create_heavy_border()\n    elif (border_style_type == MenuBorderStyleType.DOUBLE_LINE_BORDER):\n        return self.create_doubleline_border()\n    elif (border_style_type == MenuBorderStyleType.HEAVY_OUTER_LIGHT_INNER_BORDER):\n        return self.create_heavy_outer_light_inner_border()\n    elif (border_style_type == MenuBorderStyleType.DOUBLE_LINE_OUTER_LIGHT_INNER_BORDER):\n        return self.create_doubleline_outer_light_inner_border()\n    else:\n        self.logger.info('Unrecognized border style type: {}. Defaulting to ASCII.'.format(border_style_type))\n        return self.create_ascii_border()", "docstring": "Create a new MenuBorderStyle instance based on the given border style type.\n\nArgs:\nborder_style_type (int):  an integer value from :obj:`MenuBorderStyleType`.\n\nReturns:\n:obj:`MenuBorderStyle`: a new MenuBorderStyle instance of the specified style.", "source": "codesearchnet"}
{"code": "def from_tuplelist(tuple_list):\n        \n        out = Layout()\n        for physical, virtual in enumerate(tuple_list):\n            if virtual is None:\n                continue\n            elif Layout.is_virtual(virtual):\n                if virtual in out._v2p:\n                    raise LayoutError('Duplicate values not permitted; Layout is bijective.')\n                out[virtual] = physical\n            else:\n                raise LayoutError(\"The list should contain elements of the form\"\n                                  \" (Register, integer) or None\")\n        return out", "docstring": "Populates a Layout from a list containing virtual\nqubits---(QuantumRegister, int) tuples---, or None.\n\nArgs:\ntuple_list (list):\ne.g.: [qr[0], None, qr[2], qr[3]]\nReturns:\nLayout: the corresponding Layout object\nRaises:\nLayoutError: If the elements are not (Register, integer) or None", "source": "juraj-google-style"}
{"code": "def sym_hash(x: Any) -> int:\n    if isinstance(x, Symbolic):\n        return x.sym_hash()\n    if inspect.isfunction(x):\n        return hash(x.__code__.co_code)\n    if inspect.ismethod(x):\n        return hash((sym_hash(x.__self__), x.__code__.co_code))\n    return hash(x)", "docstring": "Returns hash of value. Use symbolic hashing function if possible.\n\nExample::\n\n@pg.symbolize\nclass A:\ndef __init__(self, x):\nself.x = x\n\nassert hash(A(1)) != hash(A(1))\nassert pg.hash(A(1)) == pg.hash(A(1))\nassert pg.hash(pg.Dict(x=[A(1)])) == pg.hash(pg.Dict(x=[A(1)]))\n\nArgs:\nx: Value for computing hash.\n\nReturns:\nThe hash value for `x`.", "source": "github-repos"}
{"code": "def JoinTypes(types):\n    queue = collections.deque(types)\n    seen = set()\n    new_types = []\n    while queue:\n        t = queue.popleft()\n        if isinstance(t, pytd.UnionType):\n            queue.extendleft(reversed(t.type_list))\n        elif isinstance(t, pytd.NothingType):\n            pass\n        elif t not in seen:\n            new_types.append(t)\n            seen.add(t)\n    if len(new_types) == 1:\n        return new_types.pop()\n    elif any((isinstance(t, pytd.AnythingType) for t in new_types)):\n        nonetype = pytd.NamedType('builtins.NoneType')\n        unresolved_nonetype = pytd.NamedType('NoneType')\n        if any((t in (nonetype, unresolved_nonetype) for t in new_types)):\n            return pytd.UnionType((pytd.AnythingType(), nonetype))\n        return pytd.AnythingType()\n    elif new_types:\n        return pytd.UnionType(tuple(new_types))\n    else:\n        return pytd.NothingType()", "docstring": "Combine a list of types into a union type, if needed.\n\nLeaves singular return values alone, or wraps a UnionType around them if there\nare multiple ones, or if there are no elements in the list (or only\nNothingType) return NothingType.\n\nArguments:\ntypes: A list of types. This list might contain other UnionTypes. If so,\nthey are flattened.\n\nReturns:\nA type that represents the union of the types passed in. Order is preserved.", "source": "github-repos"}
{"code": "def unparse_headers(hdrs):\n    return (''.join([unparse_header(n, v) for (n, v) in hdrs.items()]) + '\\r\\n')", "docstring": "Parse a dictionary of headers to a string.\n\nArgs:\nhdrs: A dictionary of headers.\n\nReturns:\nThe headers as a string that can be used in an NNTP POST.", "source": "codesearchnet"}
{"code": "class BayesianWatermarkDetectorModelOutput(ModelOutput):\n    loss: Optional[torch.FloatTensor] = None\n    posterior_probabilities: Optional[torch.FloatTensor] = None", "docstring": "Base class for outputs of models predicting if the text is watermarked.\n\nArgs:\nloss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\nLanguage modeling loss.\nposterior_probabilities (`torch.FloatTensor` of shape `(1,)`):\nMultiple choice classification loss.", "source": "github-repos"}
{"code": "def draw_rects(self, *rects):\n    rect_array = ffi.new('SDL_Rect[]', len(rects))\n    for (i, r) in enumerate(rects):\n        rect_array[i] = r._ptr[0]\n    check_int_err(lib.SDL_RenderDrawRects(self._ptr, rect_array, len(rects)))", "docstring": "Draw some number of rectangles on the current rendering target.\n\nArgs:\n*rects (Rect): The destination rectangles.\n\nRaises:\nSDLError: If an error is encountered.", "source": "codesearchnet"}
{"code": "def load_structure_path(self, structure_path, file_type):\n        \n\n        if not file_type:\n            raise ValueError('File type must be specified')\n\n        self.file_type = file_type\n        self.structure_dir = op.dirname(structure_path)\n        self.structure_file = op.basename(structure_path)", "docstring": "Load a structure file and provide pointers to its location\n\nArgs:\nstructure_path (str): Path to structure file\nfile_type (str): Type of structure file", "source": "juraj-google-style"}
{"code": "def managed(name, table, data, record=None):\n    ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}\n    if (record is None):\n        record = name\n    current_data = {column: __salt__['openvswitch.db_get'](table, record, column) for column in data}\n    comment_changes = 'Columns have been updated.'\n    comment_no_changes = 'All columns are already up to date.'\n    comment_error = 'Error while updating column {0}: {1}'\n    if __opts__['test']:\n        for column in data:\n            if (data[column] != current_data[column]):\n                ret['changes'][column] = {'old': current_data[column], 'new': data[column]}\n        if ret['changes']:\n            ret['result'] = None\n            ret['comment'] = comment_changes\n        else:\n            ret['result'] = True\n            ret['comment'] = comment_no_changes\n        return ret\n    for column in data:\n        if (data[column] != current_data[column]):\n            result = __salt__['openvswitch.db_set'](table, record, column, data[column])\n            if (result is not None):\n                ret['comment'] = comment_error.format(column, result)\n                ret['result'] = False\n                return ret\n            ret['changes'][column] = {'old': current_data[column], 'new': data[column]}\n    ret['result'] = True\n    ret['comment'] = comment_no_changes\n    return ret", "docstring": "Ensures that the specified columns of the named record have the specified\nvalues.\n\nArgs:\nname: The name of the record.\ntable: The name of the table to which the record belongs.\ndata: Dictionary containing a mapping from column names to the desired\nvalues. Columns that exist, but are not specified in this\ndictionary are not touched.\nrecord: The name of the record (optional). Replaces name if specified.", "source": "codesearchnet"}
{"code": "def __init__(self, token=None, auth_test=False, verify=True, lazy=False):\n        \n\n        try:\n            self.token = token if token else os.environ['SLACK_TOKEN']\n        except KeyError:\n            raise ValueError('If not providing a token, must set SLACK_TOKEN envvar')\n        if auth_test:\n            response = self.auth_test()\n            if not response['ok']:\n                raise ValueError('Authentication Failed with response: {}'.format(response))\n        self.verify = verify\n\n        \n        self._channels = []\n        self._users = []\n\n        if not lazy:\n            _ = self.channels\n            _ = self.users", "docstring": "Instantiation an instance of the Slack API\n\nArgs:\ntoken: {str} (required) API token, read from SLACK_TOKEN env var\nauth_test: {bool} verify this token\nverify: {bool} verify all API calls return with a True 'ok'\nlazy: {bool} Don't populate properties until called", "source": "juraj-google-style"}
{"code": "def _read_mode_tsopt(self, size, kind):\n    temp = struct.unpack('>II', self._read_fileng(size))\n    data = dict(kind=kind, length=size, val=temp[0], ecr=temp[1])\n    return data", "docstring": "Read Timestamps option.\n\nPositional arguments:\n* size - int, length of option\n* kind - int, 8 (Timestamps)\n\nReturns:\n* dict -- extracted Timestamps (TS) option\n\nStructure of TCP TSopt [RFC 7323]:\n+-------+-------+---------------------+---------------------+\n|Kind=8 |  10   |   TS Value (TSval)  |TS Echo Reply (TSecr)|\n+-------+-------+---------------------+---------------------+\n1       1              4                     4\n\nOctets      Bits        Name                    Description\n0           0     tcp.ts.kind             Kind (8)\n1           8     tcp.ts.length           Length (10)\n2          16     tcp.ts.val              Timestamp Value\n6          48     tcp.ts.ecr              Timestamps Echo Reply", "source": "codesearchnet"}
{"code": "def with_row_splits_dtype(self, dtype):\n    dtype = dtypes.as_dtype(dtype)\n    if dtype not in (dtypes.int32, dtypes.int64):\n        raise ValueError(f'Argument `row_splits` dtype must be int32 or int64. Received {dtype}.')\n    if self._row_partition.dtype == dtype:\n        return self\n    current_values = self._values\n    if isinstance(current_values, RaggedTensor):\n        return RaggedTensor(values=current_values.with_row_splits_dtype(dtype), row_partition=self._row_partition.with_dtype(dtype), internal=True)\n    else:\n        return RaggedTensor(values=current_values, row_partition=self._row_partition.with_dtype(dtype), internal=True)", "docstring": "Returns a copy of this RaggedTensor with the given `row_splits` dtype.\n\nFor RaggedTensors with multiple ragged dimensions, the `row_splits` for all\nnested `RaggedTensor` objects are cast to the given dtype.\n\nArgs:\ndtype: The dtype for `row_splits`.  One of `tf.int32` or `tf.int64`.\n\nReturns:\nA copy of this RaggedTensor, with the `row_splits` cast to the given\ntype.", "source": "github-repos"}
{"code": "async def getTempCoreCmdr(mods=None, outp=None):\n    acm = genTempCoreProxy(mods)\n    prox = (await acm.__aenter__())\n    cmdrcore = (await CmdrCore.anit(prox, outp=outp))\n    cmdrcore.acm = acm\n    return cmdrcore", "docstring": "Get a CmdrCore instance which is backed by a temporary Cortex.\n\nArgs:\nmods (list): A list of additional CoreModules to load in the Cortex.\noutp: A output helper.  Will be used for the Cmdr instance.\n\nNotes:\nThe CmdrCore returned by this should be fini()'d to tear down the temporary Cortex.\n\nReturns:\nCmdrCore: A CmdrCore instance.", "source": "codesearchnet"}
{"code": "def GetLinkedFileEntry(self):\n    link = self._GetLink()\n    if (not link):\n        return None\n    path_spec = os_path_spec.OSPathSpec(location=link)\n    return OSFileEntry(self._resolver_context, self._file_system, path_spec)", "docstring": "Retrieves the linked file entry, for example for a symbolic link.\n\nReturns:\nOSFileEntry: linked file entry or None if not available.", "source": "codesearchnet"}
{"code": "def get_channel(self, channel_name, project_name, dataset_name):\n        \n        return self.resources.get_channel(channel_name, project_name,\n                                          dataset_name)", "docstring": "Gets info about a channel given its name, name of its project\n, and name of its dataset.\n\nArguments:\nchannel_name (str): Channel name\nproject_name (str): Project name\ndataset_name (str): Dataset name\n\nReturns:\ndict: Channel info", "source": "juraj-google-style"}
{"code": "def coldestmonth(self, value=None):\n        \n        if value is not None:\n            try:\n                value = int(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type int '\n                                 'for field `coldestmonth`'.format(value))\n            if value < 1:\n                raise ValueError('value need to be greater or equal 1 '\n                                 'for field `coldestmonth`')\n            if value > 12:\n                raise ValueError('value need to be smaller 12 '\n                                 'for field `coldestmonth`')\n\n        self._coldestmonth = value", "docstring": "Corresponds to IDD Field `coldestmonth`\n\nArgs:\nvalue (int): value for IDD Field `coldestmonth`\nvalue >= 1\nvalue <= 12\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def savefits(cube, fitsname, **kwargs):\n    dropdeg = kwargs.pop('dropdeg', False)\n    ndim = len(cube.dims)\n    FITSINFO = get_data('decode', 'data/fitsinfo.yaml')\n    hdrdata = yaml.load(FITSINFO, dc.utils.OrderedLoader)\n    if (ndim == 2):\n        header = fits.Header(hdrdata['dcube_2d'])\n        data = cube.values.T\n    elif (ndim == 3):\n        if dropdeg:\n            header = fits.Header(hdrdata['dcube_2d'])\n            data = cube.values[(:, :, 0)].T\n        else:\n            header = fits.Header(hdrdata['dcube_3d'])\n            kidfq = cube.kidfq.values\n            freqrange = (~ np.isnan(kidfq))\n            orderedfq = np.argsort(kidfq[freqrange])\n            newcube = cube[(:, :, orderedfq)]\n            data = newcube.values.T\n    else:\n        raise TypeError(ndim)\n    if (cube.coordsys == 'AZEL'):\n        header.update({'CTYPE1': 'dAZ', 'CTYPE2': 'dEL'})\n    elif (cube.coordsys == 'RADEC'):\n        header.update({'OBSRA': float(cube.xref), 'OBSDEC': float(cube.yref)})\n    else:\n        pass\n    header.update({'CRVAL1': float(cube.x[0]), 'CDELT1': float((cube.x[1] - cube.x[0])), 'CRVAL2': float(cube.y[0]), 'CDELT2': float((cube.y[1] - cube.y[0])), 'DATE': datetime.now(timezone('UTC')).isoformat()})\n    if ((ndim == 3) and (not dropdeg)):\n        header.update({'CRVAL3': float(newcube.kidfq[0]), 'CDELT3': float((newcube.kidfq[1] - newcube.kidfq[0]))})\n    fitsname = str(Path(fitsname).expanduser())\n    fits.writeto(fitsname, data, header, **kwargs)\n    logger.info('{} has been created.'.format(fitsname))", "docstring": "Save a cube to a 3D-cube FITS file.\n\nArgs:\ncube (xarray.DataArray): Cube to be saved.\nfitsname (str): Name of output FITS file.\nkwargs (optional): Other arguments common with astropy.io.fits.writeto().", "source": "codesearchnet"}
{"code": "def log_mel_spectrogram(data, audio_sample_rate=8000, log_offset=0.0, window_length_secs=0.025, hop_length_secs=0.01, **kwargs):\n    window_length_samples = int(round((audio_sample_rate * window_length_secs)))\n    hop_length_samples = int(round((audio_sample_rate * hop_length_secs)))\n    fft_length = (2 ** int(np.ceil((np.log(window_length_samples) / np.log(2.0)))))\n    spectrogram = stft_magnitude(data, fft_length=fft_length, hop_length=hop_length_samples, window_length=window_length_samples)\n    mel_spectrogram = np.dot(spectrogram, spectrogram_to_mel_matrix(num_spectrogram_bins=spectrogram.shape[1], audio_sample_rate=audio_sample_rate, **kwargs))\n    return np.log((mel_spectrogram + log_offset))", "docstring": "Convert waveform to a log magnitude mel-frequency spectrogram.\n\nArgs:\ndata: 1D np.array of waveform data.\naudio_sample_rate: The sampling rate of data.\nlog_offset: Add this to values when taking log to avoid -Infs.\nwindow_length_secs: Duration of each window to analyze.\nhop_length_secs: Advance between successive analysis windows.\n**kwargs: Additional arguments to pass to spectrogram_to_mel_matrix.\n\nReturns:\n2D np.array of (num_frames, num_mel_bins) consisting of log mel filterbank\nmagnitudes for successive frames.", "source": "codesearchnet"}
{"code": "def replace_batch_norm(model):\n    for name, module in model.named_children():\n        if isinstance(module, nn.BatchNorm2d):\n            new_module = DeformableDetrFrozenBatchNorm2d(module.num_features)\n            if not module.weight.device == torch.device('meta'):\n                new_module.weight.data.copy_(module.weight)\n                new_module.bias.data.copy_(module.bias)\n                new_module.running_mean.data.copy_(module.running_mean)\n                new_module.running_var.data.copy_(module.running_var)\n            model._modules[name] = new_module\n        if len(list(module.children())) > 0:\n            replace_batch_norm(module)", "docstring": "Recursively replace all `torch.nn.BatchNorm2d` with `DeformableDetrFrozenBatchNorm2d`.\n\nArgs:\nmodel (torch.nn.Module):\ninput model", "source": "github-repos"}
{"code": "def downsample_residual(x, output_channels, dim='2d', stride=1, scope='h'):\n    with tf.variable_scope(scope):\n        if (stride > 1):\n            avg_pool = CONFIG[dim]['avg_pool']\n            x = avg_pool(x, pool_size=(stride, stride), strides=(stride, stride), padding='VALID')\n        input_channels = tf.shape(x)[3]\n        diff = (output_channels - input_channels)\n        x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [(diff \n        return x", "docstring": "Downsamples 'x' by `stride` using average pooling.\n\nArgs:\nx: input tensor of size [N, H, W, C]\noutput_channels: Desired number of output channels.\ndim: '2d' if 2-dimensional, '3d' if 3-dimensional.\nstride: What stride to use. Usually 1 or 2.\nscope: Optional variable scope.\n\nReturns:\nA downsampled tensor of size [N, H/2, W/2, output_channels] if stride\nis 2, else returns a tensor of size [N, H, W, output_channels] if\nstride is 1.", "source": "codesearchnet"}
{"code": "def writegroup(self, auth, entries, defer=False):\n        \n        return self._call('writegroup', auth, [entries], defer)", "docstring": "Writes the given values for the respective resources in the list, all writes have same\ntimestamp.\n\nArgs:\nauth: cik for authentication.\nentries: List of key, value lists. eg. [[key, value], [k,v],,,]", "source": "juraj-google-style"}
{"code": "def dump_property(self, name):\n        \n\n        if not hasattr(self, name):\n            raise ArgumentError(\"Unknown property %s\" % name)\n\n        value = getattr(self, name)\n        if name in self._complex_properties:\n            value = self._complex_properties[name][0](value)\n\n        return value", "docstring": "Serialize a property of this class by name.\n\nArgs:\nname (str): The name of the property to dump.\n\nReturns:\nobject: The serialized value of the property.", "source": "juraj-google-style"}
{"code": "def _tf_extension_type_fields(cls):\n    if '_tf_extension_type_cached_fields' in cls.__dict__:\n        return cls._tf_extension_type_cached_fields\n    try:\n        type_hints = typing_extensions.get_type_hints(cls, include_extras=False)\n        ok_to_cache = True\n    except (NameError, AttributeError):\n        type_hints = {}\n        for base in reversed(cls.__mro__):\n            type_hints.update(base.__dict__.get('__annotations__', {}))\n        ok_to_cache = False\n    fields = []\n    for name, value_type in type_hints.items():\n        default = getattr(cls, name, extension_type_field.ExtensionTypeField.NO_DEFAULT)\n        fields.append(extension_type_field.ExtensionTypeField(name, value_type, default))\n    fields = tuple(fields)\n    if ok_to_cache:\n        cls._tf_extension_type_cached_fields = fields\n    return fields", "docstring": "An ordered list describing the fields of this ExtensionType.\n\nReturns:\nA list of `ExtensionTypeField` objects.  Forward references are resolved\nif possible, or left unresolved otherwise.", "source": "github-repos"}
{"code": "def trainable(self, value):\n    value = bool(value)\n    self._trainable = value\n    for v in self._trainable_variables:\n        v.trainable = value\n    for layer in self._layers:\n        layer.trainable = value", "docstring": "Sets trainable attribute for the layer and its sublayers.\n\nWhen this value is changed during training (e.g. with a\n`Callback`) you need to call the parent\n`Model.make_train_function` with `force=True` in order to\nrecompile the training graph.\n\nArgs:\nvalue: Boolean with the desired state for the layer's trainable\nattribute.", "source": "github-repos"}
{"code": "def GetCacheValueByObject(self, vfs_object):\n    \n    for identifier, cache_value in iter(self._values.items()):\n      if not cache_value:\n        raise RuntimeError('Missing cache value.')\n\n      if cache_value.vfs_object == vfs_object:\n        return identifier, cache_value\n\n    return None, None", "docstring": "Retrieves the cache value for the cached object.\n\nArgs:\nvfs_object (object): VFS object that was cached.\n\nReturns:\ntuple[str, ObjectsCacheValue]: identifier and cache value object or\n(None, None) if not cached.\n\nRaises:\nRuntimeError: if the cache value is missing.", "source": "juraj-google-style"}
{"code": "def setProvisioningUrl(self, strURL='grl.com'):\n        \n        print '%s call setProvisioningUrl' % self.port\n        self.provisioningUrl = strURL\n        if self.deviceRole == Thread_Device_Role.Commissioner:\n            cmd = WPANCTL_CMD + 'setprop Commissioner:ProvisioningUrl %s' %(strURL)\n            print cmd\n            return self.__sendCommand(cmd)[0] != \"Fail\"\n        return True", "docstring": "set provisioning Url\n\nArgs:\nstrURL: Provisioning Url string\n\nReturns:\nTrue: successful to set provisioning Url\nFalse: fail to set provisioning Url", "source": "juraj-google-style"}
{"code": "def _results_tc_args(self):\n    results = []\n    if os.access(self.default_args.tc_out_path, os.W_OK):\n        result_file = '{}/results.tc'.format(self.default_args.tc_out_path)\n    else:\n        result_file = 'results.tc'\n    if os.path.isfile(result_file):\n        with open(result_file, 'r') as rh:\n            results = rh.read().strip().split('\\n')\n        os.remove(result_file)\n    for line in results:\n        if ((not line) or (' = ' not in line)):\n            continue\n        (key, value) = line.split(' = ')\n        if (value == 'true'):\n            value = True\n        elif (value == 'false'):\n            value = False\n        elif (not value):\n            value = None\n        setattr(self._default_args, key, value)", "docstring": "Read data from results_tc file from previous run of app.\n\nThis method is only required when not running from the with the\nTcEX platform and is only intended for testing apps locally.\n\nReturns:\n(dictionary): A dictionary of values written to results_tc.", "source": "codesearchnet"}
{"code": "def find_many(self, url, type, resource):\n    return [type(item) for item in RestClient.get(url)[resource]]", "docstring": "Get a list of resources\n\nArgs:\nurl (string): URL to invoke\ntype (class): Class type\nresource (string): The REST Resource\nReturns:\nlist of object: List of resource instances", "source": "codesearchnet"}
{"code": "def scoped_format(txt, **objects):\n    pretty = objects.pop('pretty', RecursiveAttribute.format_pretty)\n    expand = objects.pop('expand', RecursiveAttribute.format_expand)\n    attr = RecursiveAttribute(objects, read_only=True)\n    formatter = scoped_formatter(**objects)\n    return formatter.format(txt, pretty=pretty, expand=expand)", "docstring": "Format a string with respect to a set of objects' attributes.\n\nExample:\n\n>>> Class Foo(object):\n>>>     def __init__(self):\n>>>         self.name = \"Dave\"\n>>> print scoped_format(\"hello {foo.name}\", foo=Foo())\nhello Dave\n\nArgs:\nobjects (dict): Dict of objects to format with. If a value is a dict,\nits values, and any further neted dicts, will also format with dot\nnotation.\npretty (bool): See `ObjectStringFormatter`.\nexpand (bool): See `ObjectStringFormatter`.", "source": "codesearchnet"}
{"code": "def get_graphql_schema_from_schema_graph(schema_graph, class_to_field_type_overrides, hidden_classes):\n    _validate_overriden_fields_are_not_defined_in_superclasses(class_to_field_type_overrides, schema_graph)\n    inherited_field_type_overrides = _get_inherited_field_types(class_to_field_type_overrides, schema_graph)\n    if (not schema_graph.get_element_by_class_name(ORIENTDB_BASE_VERTEX_CLASS_NAME).properties):\n        hidden_classes.add(ORIENTDB_BASE_VERTEX_CLASS_NAME)\n    graphql_types = OrderedDict()\n    type_equivalence_hints = OrderedDict()\n    for vertex_cls_name in sorted(schema_graph.vertex_class_names):\n        vertex_cls = schema_graph.get_element_by_class_name(vertex_cls_name)\n        if (vertex_cls_name in hidden_classes):\n            continue\n        inherited_field_type_overrides.setdefault(vertex_cls_name, dict())\n        field_type_overrides = inherited_field_type_overrides[vertex_cls_name]\n        field_specification_lambda = _create_field_specification(schema_graph, graphql_types, field_type_overrides, hidden_classes, vertex_cls_name)\n        current_graphql_type = None\n        if vertex_cls.abstract:\n            current_graphql_type = GraphQLInterfaceType(vertex_cls_name, fields=field_specification_lambda)\n        else:\n            interface_specification_lambda = _create_interface_specification(schema_graph, graphql_types, hidden_classes, vertex_cls_name)\n            current_graphql_type = GraphQLObjectType(vertex_cls_name, field_specification_lambda, interfaces=interface_specification_lambda, is_type_of=(lambda : None))\n        graphql_types[vertex_cls_name] = current_graphql_type\n    for vertex_cls_name in sorted(schema_graph.vertex_class_names):\n        vertex_cls = schema_graph.get_element_by_class_name(vertex_cls_name)\n        if (vertex_cls_name in hidden_classes):\n            continue\n        vertex_cls_subclasses = schema_graph.get_subclass_set(vertex_cls_name)\n        if ((not vertex_cls.abstract) and (len(vertex_cls_subclasses) > 1)):\n            union_type_name = _get_union_type_name(vertex_cls_subclasses)\n            type_specification_lambda = _create_union_types_specification(schema_graph, graphql_types, hidden_classes, vertex_cls_name)\n            union_type = GraphQLUnionType(union_type_name, types=type_specification_lambda)\n            graphql_types[union_type_name] = union_type\n            type_equivalence_hints[graphql_types[vertex_cls_name]] = union_type\n    for non_graph_cls_name in sorted(schema_graph.non_graph_class_names):\n        if (non_graph_cls_name in hidden_classes):\n            continue\n        if (not schema_graph.get_element_by_class_name(non_graph_cls_name).abstract):\n            continue\n        cls_subclasses = schema_graph.get_subclass_set(non_graph_cls_name)\n        if (len(cls_subclasses) > 1):\n            all_non_abstract_subclasses_are_vertices = True\n            for subclass_name in cls_subclasses:\n                subclass = schema_graph.get_element_by_class_name(subclass_name)\n                if (subclass_name != non_graph_cls_name):\n                    if ((not subclass.abstract) and (not subclass.is_vertex)):\n                        all_non_abstract_subclasses_are_vertices = False\n                        break\n            if all_non_abstract_subclasses_are_vertices:\n                inherited_field_type_overrides.setdefault(non_graph_cls_name, dict())\n                field_type_overrides = inherited_field_type_overrides[non_graph_cls_name]\n                field_specification_lambda = _create_field_specification(schema_graph, graphql_types, field_type_overrides, hidden_classes, non_graph_cls_name)\n                graphql_type = GraphQLInterfaceType(non_graph_cls_name, fields=field_specification_lambda)\n                graphql_types[non_graph_cls_name] = graphql_type\n    if (not graphql_types):\n        raise EmptySchemaError(u'After evaluating all subclasses of V, we were not able to find visible schema data to import into the GraphQL schema object')\n    RootSchemaQuery = GraphQLObjectType('RootSchemaQuery', OrderedDict([(name, GraphQLField(value)) for (name, value) in sorted(six.iteritems(graphql_types), key=(lambda x: x[0])) if (not isinstance(value, GraphQLUnionType))]))\n    schema = GraphQLSchema(RootSchemaQuery, directives=DIRECTIVES)\n    return (schema, _get_referenced_type_equivalences(graphql_types, type_equivalence_hints))", "docstring": "Return a GraphQL schema object corresponding to the schema of the given schema graph.\n\nArgs:\nschema_graph: SchemaGraph\nclass_to_field_type_overrides: dict, class name -> {field name -> field type},\n(string -> {string -> GraphQLType}). Used to override the\ntype of a field in the class where it's first defined and all\nthe class's subclasses.\nhidden_classes: set of strings, classes to not include in the GraphQL schema.\n\nReturns:\ntuple of (GraphQL schema object, GraphQL type equivalence hints dict).\nThe tuple is of type (GraphQLSchema, {GraphQLObjectType -> GraphQLUnionType}).", "source": "codesearchnet"}
{"code": "def maybe_saved_model_directory(export_dir):\n    txt_path = file_io.join(export_dir, constants.SAVED_MODEL_FILENAME_PBTXT)\n    pb_path = file_io.join(export_dir, constants.SAVED_MODEL_FILENAME_PB)\n    cpb_path = file_io.join(export_dir, constants.SAVED_MODEL_FILENAME_CPB)\n    return file_io.file_exists(txt_path) or file_io.file_exists(pb_path) or file_io.file_exists(cpb_path)", "docstring": "Checks whether the provided export directory could contain a SavedModel.\n\nNote that the method does not load any data by itself. If the method returns\n`false`, the export directory definitely does not contain a SavedModel. If the\nmethod returns `true`, the export directory may contain a SavedModel but\nprovides no guarantee that it can be loaded.\n\nArgs:\nexport_dir: Absolute string path to possible export location. For example,\n'/my/foo/model'.\n\nReturns:\nTrue if the export directory contains SavedModel files, False otherwise.", "source": "github-repos"}
{"code": "def get_stable_entries(self, charge_to_discharge=True):\n        \n        list_copy = list(self._stable_entries)\n        return list_copy if charge_to_discharge else list_copy.reverse()", "docstring": "Get the stable entries.\n\nArgs:\ncharge_to_discharge: order from most charge to most discharged\nstate? Default to True.\n\nReturns:\nA list of stable entries in the electrode, ordered by amount of the\nworking ion.", "source": "juraj-google-style"}
{"code": "def _process_inputs(self, input_reader, shard_state, tstate, ctx):\n    processing_limit = self._processing_limit(tstate.mapreduce_spec)\n    if (processing_limit == 0):\n        return\n    finished_shard = True\n    iterator = iter(input_reader)\n    while True:\n        try:\n            entity = iterator.next()\n        except StopIteration:\n            break\n        if isinstance(entity, db.Model):\n            shard_state.last_work_item = repr(entity.key())\n        elif isinstance(entity, ndb.Model):\n            shard_state.last_work_item = repr(entity.key)\n        else:\n            shard_state.last_work_item = repr(entity)[:100]\n        processing_limit -= 1\n        if (not self._process_datum(entity, input_reader, ctx, tstate)):\n            finished_shard = False\n            break\n        elif (processing_limit == 0):\n            finished_shard = False\n            break\n    self.slice_context.incr(context.COUNTER_MAPPER_WALLTIME_MS, int(((self._time() - self._start_time) * 1000)))\n    return finished_shard", "docstring": "Read inputs, process them, and write out outputs.\n\nThis is the core logic of MapReduce. It reads inputs from input reader,\ninvokes user specified mapper function, and writes output with\noutput writer. It also updates shard_state accordingly.\ne.g. if shard processing is done, set shard_state.active to False.\n\nIf errors.FailJobError is caught, it will fail this MR job.\nAll other exceptions will be logged and raised to taskqueue for retry\nuntil the number of retries exceeds a limit.\n\nArgs:\ninput_reader: input reader.\nshard_state: shard state.\ntstate: transient shard state.\nctx: mapreduce context.\n\nReturns:\nWhether this shard has finished processing all its input split.", "source": "codesearchnet"}
{"code": "def detailed_log_handler(self, handler):\n    if (not self.opened()):\n        handler = (handler or util.noop)\n        self._detailed_log_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler)\n        self._dll.JLINKARM_EnableLogCom(self._detailed_log_handler)", "docstring": "Setter for the detailed log handler function.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def price(self, valuation_date, market, model=None, pricing_context=None, name=None):\n    name = name or self._name + '_price'\n    with tf.name_scope(name):\n        valuation_date = dates.convert_to_date_tensor(valuation_date)\n        pay_cf = self._pay_leg.price(valuation_date, market, model, pricing_context)\n        receive_cf = self._receive_leg.price(valuation_date, market, model, pricing_context)\n        return receive_cf - pay_cf", "docstring": "Returns the present value of the instrument on the valuation date.\n\nArgs:\nvaluation_date: A scalar `DateTensor` specifying the date on which\nvaluation is being desired.\nmarket: A namedtuple of type `InterestRateMarket` which contains the\nnecessary information for pricing the interest rate swap.\nmodel: Reserved for future use.\npricing_context: Additional context relevant for pricing.\nname: Python str. The name to give to the ops created by this function.\nDefault value: `None` which maps to 'price'.\n\nReturns:\nA Rank 1 `Tensor` of real type containing the modeled price of each IRS\ncontract based on the input market data.", "source": "github-repos"}
{"code": "def load_dataset(data_path: str, findex: typing.Dict[str, int]) -> Dataset:\n    Y = array.array('i')\n    X_rows = array.array('I')\n    X_cols = array.array('I')\n    with open(data_path) as f:\n        i = 0\n        for row in f:\n            cols = row.strip().split('\\t')\n            if len(cols) < 2:\n                continue\n            Y.append(int(cols[0]))\n            hit_indices = [findex[feat] for feat in cols[1:] if feat in findex]\n            X_rows.extend((i for _ in range(len(hit_indices))))\n            X_cols.extend(hit_indices)\n            i += 1\n    return Dataset(jnp.asarray(X_rows), jnp.asarray(X_cols), jnp.asarray(Y))", "docstring": "Loads a dataset from the given encoded data file.\n\nArgs:\ndata_path (str): A file path for the encoded data file.\nfindex (Dict[str, int]): A dictionary that maps a feature to its index.\n\nReturns:\nA dataset", "source": "github-repos"}
{"code": "def to_dict(mapreduce_yaml):\n    all_configs = []\n    for config in mapreduce_yaml.mapreduce:\n        out = {'name': config.name, 'mapper_input_reader': config.mapper.input_reader, 'mapper_handler': config.mapper.handler}\n        if config.mapper.params_validator:\n            out['mapper_params_validator'] = config.mapper.params_validator\n        if config.mapper.params:\n            param_defaults = {}\n            for param in config.mapper.params:\n                param_defaults[param.name] = (param.default or param.value)\n            out['mapper_params'] = param_defaults\n        if config.params:\n            param_defaults = {}\n            for param in config.params:\n                param_defaults[param.name] = (param.default or param.value)\n            out['params'] = param_defaults\n        if config.mapper.output_writer:\n            out['mapper_output_writer'] = config.mapper.output_writer\n        all_configs.append(out)\n    return all_configs", "docstring": "Converts a MapReduceYaml file into a JSON-encodable dictionary.\n\nFor use in user-visible UI and internal methods for interfacing with\nuser code (like param validation). as a list\n\nArgs:\nmapreduce_yaml: The Pyton representation of the mapreduce.yaml document.\n\nReturns:\nA list of configuration dictionaries.", "source": "codesearchnet"}
{"code": "def _get_entry_energy(pd, composition):\n        \n        candidate = [i.energy_per_atom for i in pd.qhull_entries if\n                     i.composition.fractional_composition ==\n                     composition.fractional_composition]\n\n        if not candidate:\n            warnings.warn(\"The reactant \" + composition.reduced_formula +\n                          \" has no matching entry with negative formation\"\n                          \" energy, instead convex hull energy for this\"\n                          \" composition will be used for reaction energy \"\n                          \"calculation. \")\n            return pd.get_hull_energy(composition)\n        else:\n            min_entry_energy = min(candidate)\n            return min_entry_energy * composition.num_atoms", "docstring": "Finds the lowest entry energy for entries matching the composition.\nEntries with non-negative formation energies are excluded. If no\nentry is found, use the convex hull energy for the composition.\n\nArgs:\npd (PhaseDiagram): PhaseDiagram object.\ncomposition (Composition): Composition object that the target\nentry should match.\n\nReturns:\nThe lowest entry energy among entries matching the composition.", "source": "juraj-google-style"}
{"code": "def json(self, branch='master', filename=''):\n        \n        file_contents = self.get(branch=branch, filename=filename)\n\n        try:\n            json_dict = json.loads(file_contents)\n        \n        except ValueError as error:\n            msg = ('\"{filename}\" appears to be invalid json. '\n                   'Please validate it with http:\n                   'JSON decoder error:\\n'\n                   '{error}').format(\n                       filename=filename, error=error)\n            raise SystemExit(msg)\n\n        LOG.debug('JSON object:\\n%s', json_dict)\n        return json_dict", "docstring": "Retrieve _filename_ from GitLab.\n\nArgs:\nbranch (str): Git Branch to find file.\nfilename (str): Name of file to retrieve.\n\nReturns:\ndict: Decoded JSON.\n\nRaises:\nSystemExit: Invalid JSON provided.", "source": "juraj-google-style"}
{"code": "def validate_metadata(train_config):\n    if (len(train_config['csv_header']) != len(train_config['csv_defaults'])):\n        raise ValueError('Unequal number of columns in input features file and schema file.')\n    sorted_columns = sorted((train_config['csv_header'] + [train_config['target_column']]))\n    sorted_columns2 = sorted((((train_config['categorical_columns'] + train_config['numerical_columns']) + [train_config['key_column']]) + [train_config['target_column']]))\n    if (sorted_columns2 != sorted_columns):\n        raise ValueError('Each csv header must be a numerical/categorical type, a  key, or a target.')", "docstring": "Perform some checks that the trainig config is correct.\n\nArgs:\ntrain_config: train config as produced by merge_metadata()\n\nRaises:\nValueError: if columns look wrong.", "source": "codesearchnet"}
{"code": "def delete_object(self, ref, delete_arguments=None):\n    opts = self._get_request_options()\n    if (not isinstance(delete_arguments, dict)):\n        delete_arguments = {}\n    url = self._construct_url(ref, query_params=delete_arguments)\n    self._log_request('delete', url, opts)\n    r = self.session.delete(url, **opts)\n    self._validate_authorized(r)\n    if (r.status_code != requests.codes.ok):\n        self._check_service_availability('delete', r, ref)\n        raise ib_ex.InfobloxCannotDeleteObject(response=jsonutils.loads(r.content), ref=ref, content=r.content, code=r.status_code)\n    return self._parse_reply(r)", "docstring": "Remove an Infoblox object\n\nArgs:\nref               (str): Object reference\ndelete_arguments (dict): Extra delete arguments\nReturns:\nThe object reference of the removed object\nRaises:\nInfobloxException", "source": "codesearchnet"}
{"code": "def enforce_periodic_boundary_conditions( self ):\n        \n        for s in self.sites:\n            for i in range(3):\n                if s.r[i] < 0.0:\n                    s.r[i] += self.cell_lengths[i]\n                if s.r[i] > self.cell_lengths[i]:\n                    s.r[i] -= self.cell_lengths[i]", "docstring": "Ensure that all lattice sites are within the central periodic image of the simulation cell.\nSites that are outside the central simulation cell are mapped back into this cell.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _GetNumericProjectId(self):\n    project_id = 'project/numeric-project-id'\n    return self.watcher.GetMetadata(metadata_key=project_id, recursive=False)", "docstring": "Get the numeric project ID for this VM.\n\nReturns:\nstring, the numeric project ID if one is found.", "source": "codesearchnet"}
{"code": "def generate_token(key, user_id, action_id='', when=None):\n    \n    digester = hmac.new(_helpers._to_bytes(key, encoding='utf-8'))\n    digester.update(_helpers._to_bytes(str(user_id), encoding='utf-8'))\n    digester.update(DELIMITER)\n    digester.update(_helpers._to_bytes(action_id, encoding='utf-8'))\n    digester.update(DELIMITER)\n    when = _helpers._to_bytes(str(when or int(time.time())), encoding='utf-8')\n    digester.update(when)\n    digest = digester.digest()\n\n    token = base64.urlsafe_b64encode(digest + DELIMITER + when)\n    return token", "docstring": "Generates a URL-safe token for the given user, action, time tuple.\n\nArgs:\nkey: secret key to use.\nuser_id: the user ID of the authenticated user.\naction_id: a string identifier of the action they requested\nauthorization for.\nwhen: the time in seconds since the epoch at which the user was\nauthorized for this action. If not set the current time is used.\n\nReturns:\nA string XSRF protection token.", "source": "juraj-google-style"}
{"code": "def is_multiline_string(self):\n    return self.is_string and self.value.endswith(('\"\"\"', \"'''\"))", "docstring": "Test if this string is a multiline string.\n\nReturns:\nA multiline string always ends with triple quotes, so if it is a string\ntoken, inspect the last 3 characters and return True if it is a triple\ndouble or triple single quote mark.", "source": "github-repos"}
{"code": "def log_images(self, name, images, step=None):\n    if isinstance(images, six.string_types):\n        raise TypeError('\"images\" should be a list of ndarrays, got {}'.format(type(images)))\n    self._check_step(step)\n    tf_name = self._ensure_tf_name(name)\n    summary = self._image_summary(tf_name, images, step=step)\n    self._log_summary(tf_name, summary, images, step=step)", "docstring": "Log new images for given name on given step.\n\nArgs:\nname (str): name of the variable (it will be converted to a valid\ntensorflow summary name).\nimages (list): list of images to visualize\nstep (int): non-negative integer used for visualization", "source": "codesearchnet"}
{"code": "def graphviz_imshow(self, ax=None, figsize=None, dpi=300, fmt='png', **kwargs):\n    graph = self.get_graphviz(**kwargs)\n    graph.format = fmt\n    graph.attr(dpi=str(dpi))\n    (_, tmpname) = tempfile.mkstemp()\n    path = graph.render(tmpname, view=False, cleanup=True)\n    (ax, fig, _) = get_ax_fig_plt(ax=ax, figsize=figsize, dpi=dpi)\n    import matplotlib.image as mpimg\n    ax.imshow(mpimg.imread(path, format='png'))\n    ax.axis('off')\n    return fig", "docstring": "Generate flow graph in the DOT language and plot it with matplotlib.\n\nArgs:\nax: matplotlib :class:`Axes` or None if a new figure should be created.\nfigsize: matplotlib figure size (None to use default)\ndpi: DPI value.\nfmt: Select format for output image\n\nReturn: matplotlib Figure", "source": "codesearchnet"}
{"code": "def _encode_fhir_path_constraint(self, struct_def: _fhir_path_data_types.StructureDataType, fhir_path_expression: str, node_context: expressions.Builder) -> Optional[_BuilderSql]:\n    if node_context.get_root_builder().fhir_path == node_context.fhir_path:\n        node_context = None\n    try:\n        new_builder = expressions.from_fhir_path_expression(fhir_path_expression, self._context, struct_def, self._primitive_handler, node_context)\n    except Exception as e:\n        self._error_reporter.report_fhir_path_error(self._abs_path_invocation(node_context), f'{node_context}.{fhir_path_expression}', self._error_message_for_exception(e))\n        return None\n    return self._encode_fhir_path_builder_constraint(new_builder, node_context)", "docstring": "Returns a Standard SQL translation of the constraint `fhir_path_expression`.\n\nIf an error is encountered during encoding, the associated error reporter\nwill be notified, and this method will return `None`.\n\nArgs:\nstruct_def: The Structure definition that the fhir_path_expression\noriginates from.\nfhir_path_expression: The fluent-style dot-delimited ('.') FHIRPath\nexpression that encodes to Standard SQL.\nnode_context: The root builder of the fhir_path_expression. May be another\nFHIRPath expression.\n\nReturns:\nA Standard SQL encoding of the constraint `fhir_path_expression` upon\nsuccessful completion. The SQL will evaluate to a single boolean\nindicating whether the constraint is satisfied and the builder that\ncreated it. May be different from the input builder(s).", "source": "github-repos"}
{"code": "def MakeSuiteFromCdf(cdf, name=None):\n    \n    if name is None:\n        name = cdf.name\n\n    suite = Suite(name=name)\n\n    prev = 0.0\n    for val, prob in cdf.Items():\n        suite.Incr(val, prob - prev)\n        prev = prob\n\n    return suite", "docstring": "Makes a normalized Suite from a Cdf object.\n\nArgs:\ncdf: Cdf object\nname: string name for the new Suite\n\nReturns:\nSuite object", "source": "juraj-google-style"}
{"code": "def remove_liers(points):\n    \n    result = [points[0]]\n    for i in range(1, len(points) - 2):\n        prv = points[i-1]\n        crr = points[i]\n        nxt = points[i+1]\n        if prv.time <= crr.time and crr.time <= nxt.time:\n            result.append(crr)\n    result.append(points[-1])\n\n    return result", "docstring": "Removes obvious noise points\n\nChecks time consistency, removing points that appear out of order\n\nArgs:\npoints (:obj:`list` of :obj:`Point`)\nReturns:\n:obj:`list` of :obj:`Point`", "source": "juraj-google-style"}
{"code": "def __init__(self, num_layers: int=1, in_channels: int=3, out_channels: int=64, use_batchnorm: bool=True):\n    super().__init__()\n    self.conv = Conv2dSamePadding(in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=2, bias=False)\n    self.batchnorm = nn.BatchNorm2d(num_features=out_channels) if use_batchnorm else nn.Identity()\n    self.relu = nn.ReLU()\n    self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2)", "docstring": "Constructs a Conv2DDownsample model.\n\nArgs:\nin_channels (`int`, *optional*, defaults to 3):\nThe number of input channels.\nout_channels (`int`, *optional*, defaults to 64):\nThe number of conv output channels.\nuse_batchnorm (`bool`, *optional*, defaults to `True`):\nWhether to use batchnorm.", "source": "github-repos"}
{"code": "def _expect(self, expected, times=50):\n        \n        logger.debug('[%s] Expecting [%s]', self.port, expected)\n        retry_times = 10\n        while times:\n            if not retry_times:\n                break\n\n            line = self._readline()\n\n            if line == expected:\n                return\n\n            if not line:\n                retry_times -= 1\n                time.sleep(0.1)\n\n            times -= 1\n\n        raise Exception('failed to find expected string[%s]' % expected)", "docstring": "Find the `expected` line within `times` trials.\n\nArgs:\nexpected    str: the expected string\ntimes       int: number of trials", "source": "juraj-google-style"}
{"code": "def get_image_tokens(self, pixel_values: torch.FloatTensor, image_sizes: torch.LongTensor):\n    image_tokens_list = self.vqmodel.encode(pixel_values, image_sizes)\n    bpe_tokens_list = [self.vocabulary_mapping.convert_img2bpe(tokens).flatten() for tokens in image_tokens_list]\n    bpe_tokens = torch.cat(bpe_tokens_list)\n    return bpe_tokens", "docstring": "Tokenizes images into discrete tokens with VQGAN module. Converts\nobtained image tokens into BPE tokens and wraps with \"boi\" and \"eoi\"\nspecial tokens.\n\nArgs:\npixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\nThe tensors corresponding to the input images.\nimage_sizes (`torch.LongTensor` of shape `(batch_size, 2)`):\nThe sizes of the images in the batch, being (height, width) for each image.", "source": "github-repos"}
{"code": "def simple_vertex_array(self, program, buffer, *attributes,\n                            index_buffer=None, index_element_size=4) -> 'VertexArray':\n        \n\n        if type(buffer) is list:\n            raise SyntaxError('Change simple_vertex_array to vertex_array')\n\n        content = [(buffer, detect_format(program, attributes)) + attributes]\n        return self.vertex_array(program, content, index_buffer, index_element_size)", "docstring": "Create a :py:class:`VertexArray` object.\n\nArgs:\nprogram (Program): The program used when rendering.\nbuffer (Buffer): The buffer.\nattributes (list): A list of attribute names.\n\nKeyword Args:\nindex_element_size (int): byte size of each index element, 1, 2 or 4.\nindex_buffer (Buffer): An index buffer.\n\nReturns:\n:py:class:`VertexArray` object", "source": "juraj-google-style"}
{"code": "def _ReverseHostname(self, hostname):\n    \n    if not hostname:\n      return ''\n\n    if len(hostname) <= 1:\n      return hostname\n\n    if hostname[-1] == '.':\n      return hostname[::-1][1:]\n\n    return hostname[::-1][0:]", "docstring": "Reverses the hostname and strips the leading dot.\n\nThe hostname entry is reversed:\nmoc.elgoog.www.\nShould be:\nwww.google.com\n\nArgs:\nhostname (str): reversed hostname.\n\nReturns:\nstr: hostname without a leading dot.", "source": "juraj-google-style"}
{"code": "def get_meta_references(self, datas):\n    rule = datas.get(RULE_META_REFERENCES, {})\n    if (not rule):\n        msg = \"Manifest lacks of '.{}' or is empty\"\n        raise SerializerError(msg.format(RULE_META_REFERENCES))\n    elif rule.get('names', None):\n        names = rule.get('names').split(' ')\n    elif rule.get('auto', None):\n        names = self.get_available_references(datas)\n    else:\n        msg = \"'.{}' either require '--names' or '--auto' variable to be defined\"\n        raise SerializerError(msg.format(RULE_META_REFERENCES))\n    for item in names:\n        self.validate_rule_name(item)\n    return names", "docstring": "Get manifest enabled references declaration\n\nThis required declaration is readed from\n``styleguide-metas-references`` rule that require either a ``--names``\nor ``--auto`` variable, each one define the mode to enable reference:\n\nManually\nUsing ``--names`` which define a list of names to enable, every\nother non enabled rule will be ignored.\n\nSection name (and so Reference name also) must not contains special\ncharacter nor ``-`` so they still be valid variable name for almost\nany languages. For word separator inside name, use ``_``.\nAutomatic\nUsing ``--auto`` variable every reference rules will be enabled.\nThe value of this variable is not important since it is not empty.\n\nIf both of these variables are defined, the manual enable mode is used.\n\nArguments:\ndatas (dict): Data where to search for meta references declaration.\nThis is commonly the fully parsed manifest.\n\nReturns:\nlist: A list of reference names.", "source": "codesearchnet"}
{"code": "def search(self, queryType, query=None, vendorSpecific=None, **kwargs):\n        \n        response = self.searchResponse(queryType, query, vendorSpecific, **kwargs)\n        return self._read_dataone_type_response(response, 'ObjectList')", "docstring": "See Also: searchResponse()\n\nArgs:\nqueryType:\nquery:\nvendorSpecific:\n**kwargs:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def get(cls, sha1=''):\n        \n        \n        with conf.within_proj_dir():\n            cmd = 'git show -s --format=\"%H||%an||%ae||%s||%b||%P\" {}'.format(\n                sha1\n            )\n            result = shell.run(cmd, capture=True, never_pretend=True).stdout\n\n        sha1, name, email, title, desc, parents = result.split('||')\n\n        return CommitDetails(\n            sha1=sha1,\n            author=Author(name, email),\n            title=title,\n            desc=desc,\n            parents_sha1=parents.split(),\n        )", "docstring": "Return details about a given commit.\n\nArgs:\nsha1 (str):\nThe sha1 of the commit to query. If not given, it will return\nthe details for the latest commit.\n\nReturns:\nCommitDetails: Commit details. You can use the instance of the\nclass to query git tree further.", "source": "juraj-google-style"}
{"code": "def options(self, options=None):\n\t\t\n\n\t\t\n\t\tif options is None:\n\t\t\treturn self._options\n\n\t\t\n\t\tif not isinstance(options, (list, tuple)):\n\t\t\traise ValueError('__options__')\n\n\t\t\n\t\tif self._type not in ['base64', 'date', 'datetime', 'decimal', 'float', \\\n\t\t\t\t\t\t\t\t'int', 'ip', 'md5', 'price', 'string', 'time', \\\n\t\t\t\t\t\t\t\t'timestamp', 'uint', 'uuid']:\n\t\t\traise TypeError('can not set __options__ for ' + self._type)\n\n\t\t\n\t\tlOpts = []\n\n\t\t\n\t\tfor i in range(len(options)):\n\n\t\t\t\n\t\t\t\n\t\t\tif self._type in ['base64', 'date', 'datetime', 'ip', 'md5', 'time', 'uuid']:\n\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tif not isinstance(options[i], basestring) \\\n\t\t\t\t\tor not _typeToRegex[self._type].match(options[i]):\n\t\t\t\t\traise ValueError('__options__[%d]' % i)\n\n\t\t\t\n\t\t\telif self._type == 'decimal':\n\n\t\t\t\t\n\t\t\t\tif isinstance(options[i], Decimal):\n\t\t\t\t\tpass\n\n\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\ttry: options[i] = Decimal(options[i])\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\traise ValueError('__options__[%d]' % i)\n\n\t\t\t\n\t\t\telif self._type == 'float':\n\n\t\t\t\ttry:\n\t\t\t\t\toptions[i] = float(options[i])\n\t\t\t\texcept ValueError:\n\t\t\t\t\traise ValueError('__options__[%d]' % i)\n\n\t\t\t\n\t\t\telif self._type in ['int', 'timestamp', 'uint']:\n\n\t\t\t\t\n\t\t\t\tif not isinstance(options[i], (int, long)):\n\n\t\t\t\t\t\n\t\t\t\t\tif not isinstance(options[i], basestring):\n\t\t\t\t\t\traise ValueError('__options__[%d]' % i)\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\toptions[i] = int(options[i], 0)\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\traise ValueError('__options__[%d]' % i)\n\n\t\t\t\t\n\t\t\t\tif self._type in ['timestamp', 'uint'] and options[i] < 0:\n\t\t\t\t\traise ValueError('__options__[' + str(i) + ']')\n\n\t\t\t\n\t\t\telif self._type == 'price':\n\n\t\t\t\t\n\t\t\t\tif isinstance(options[i], Decimal):\n\t\t\t\t\tpass\n\n\t\t\t\t\n\t\t\t\telif not isinstance(options[i], basestring) or not _typeToRegex['price'].match(options[i]):\n\t\t\t\t\traise ValueError('__options__[%d]' % i)\n\n\t\t\t\t\n\t\t\t\toptions[i] = Decimal(options[i])\n\n\t\t\t\n\t\t\telif self._type == 'string':\n\n\t\t\t\t\n\t\t\t\tif not isinstance(options[i], basestring):\n\n\t\t\t\t\t\n\t\t\t\t\ttry:\n\t\t\t\t\t\toptions[i] = str(options[i])\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\traise ValueError('__options__[%d]' % i)\n\n\t\t\t\n\t\t\telse:\n\t\t\t\traise TypeError('can not set __options__ for ' + self._type)\n\n\t\t\t\n\t\t\tif options[i] in lOpts:\n\t\t\t\tsys.stderr.write('__options__[' + str(i) + '] is a duplicate')\n\n\t\t\t\n\t\t\telse:\n\t\t\t\tlOpts.append(options[i])\n\n\t\t\n\t\tself._options = lOpts", "docstring": "Options\n\nSets or gets the list of acceptable values for the Node\n\nArguments:\noptions {list} -- A list of valid values\n\nRaises:\nTypeError, ValueError\n\nReturns:\nNone | list", "source": "juraj-google-style"}
{"code": "def logged(level=logging.DEBUG):\n    \n    def wrap(f):\n        _logger = logging.getLogger(\"{}.{}\".format(f.__module__, f.__name__))\n\n        def wrapped_f(*args, **kwargs):\n            _logger.log(level, \"Called at {} with args = {} and kwargs = {}\"\n                        .format(datetime.datetime.now(), args, kwargs))\n            data = f(*args, **kwargs)\n            _logger.log(level, \"Done at {} with args = {} and kwargs = {}\"\n                        .format(datetime.datetime.now(), args, kwargs))\n            return data\n\n        return wrapped_f\n    return wrap", "docstring": "Useful logging decorator. If a method is logged, the beginning and end of\nthe method call will be logged at a pre-specified level.\n\nArgs:\nlevel: Level to log method at. Defaults to DEBUG.", "source": "juraj-google-style"}
{"code": "def get_table_map() -> t.Dict:\n    fs = GCSFileSystem()\n    table_map = {}\n    with fs.open(METADATA_URI) as f:\n        table_map = json.load(f)\n    return table_map", "docstring": "Load and return the table map from dataset-meta.json file.\n\nReturns:\ndict: Dictionary containing table names as keys and their metadata as values.", "source": "github-repos"}
{"code": "def transfer(self, data):\n        \n        settings = self.transfer_settings\n        settings.spi_tx_size = len(data)\n        self.transfer_settings = settings\n\n        response = ''\n        for i in range(0, len(data), 60):\n            response += self.sendCommand(commands.SPITransferCommand(data[i:i + 60])).data\n            time.sleep(0.01)\n\n        while len(response) < len(data):\n            response += self.sendCommand(commands.SPITransferCommand('')).data\n\n        return ''.join(response)", "docstring": "Transfers data over SPI.\n\nArguments:\ndata: The data to transfer.\n\nReturns:\nThe data returned by the SPI device.", "source": "juraj-google-style"}
{"code": "def write(name, value):\n\n    def wrapped(func):\n\n        @functools.wraps(func)\n        def _decorator(*args, **kwargs):\n            existing_env = core.read(name, allow_none=True)\n            core.write(name, value)\n            func_val = func(*args, **kwargs)\n            core.write(name, existing_env)\n            return func_val\n        return _decorator\n    return wrapped", "docstring": "Temporarily change or set the environment variable during the execution of a function.\n\nArgs:\nname: The name of the environment variable\nvalue: A value to set for the environment variable\n\nReturns:\nThe function return value.", "source": "codesearchnet"}
{"code": "def _upsample_filters(filters, rate):\n    if rate == 1:\n        return filters\n    filters_up = np.transpose(filters, [2, 3, 0, 1])\n    ker = np.zeros([rate, rate], dtype=np.float32)\n    ker[0, 0] = 1\n    filters_up = np.kron(filters_up, ker)[:, :, :-(rate - 1), :-(rate - 1)]\n    filters_up = np.transpose(filters_up, [2, 3, 0, 1])\n    return filters_up", "docstring": "Upsamples the filters by a factor of rate along the spatial dimensions.\n\nArgs:\nfilters: [h, w, in_depth, out_depth]. Original filters.\nrate: An int, specifying the upsampling rate.\n\nReturns:\nfilters_up: [h_up, w_up, in_depth, out_depth]. Upsampled filters with\nh_up = h + (h - 1) * (rate - 1)\nw_up = w + (w - 1) * (rate - 1)\ncontaining (rate - 1) zeros between consecutive filter values along\nthe filters' spatial dimensions.", "source": "github-repos"}
{"code": "def __init__(self,\n            density_matrix: np.ndarray,\n            measurements: Dict[str, np.ndarray],\n            qubit_map: Dict[ops.Qid, int],\n            dtype: Type[np.number] = np.complex64):\n        \n        super().__init__(measurements)\n        self._density_matrix = density_matrix\n        self._qubit_map = qubit_map\n        self._dtype = dtype", "docstring": "DensityMatrixStepResult.\n\nArgs:\ndensity_matrix: The density matrix at this step. Can be mutated.\nmeasurements: The measurements for this step of the simulation.\nqubit_map: A map from qid to index used to define the\nordering of the basis in density_matrix.\ndtype: The numpy dtype for the density matrix.", "source": "juraj-google-style"}
{"code": "def GetKeyByScriptHash(self, script_hash):\n    contract = self.GetContract(script_hash)\n    if contract:\n        return self.GetKey(contract.PublicKeyHash)\n    return None", "docstring": "Get the KeyPair belonging to the script hash.\n\nArgs:\nscript_hash (UInt160): a bytearray (len 20) representing the public key.\n\nReturns:\nKeyPair: If successful, the KeyPair belonging to the public key hash, otherwise None", "source": "codesearchnet"}
{"code": "def ConvertValueForCsv(pql_value):\n  \n  if 'value' in pql_value:\n    field = pql_value['value']\n  elif 'values' in pql_value:\n    field = pql_value['values']\n  else:\n    field = None\n\n  if field:\n    if isinstance(field, list):\n      return ','.join(['\"%s\"' % str(ConvertValueForCsv(single_field))\n                       for single_field in field])\n    else:\n      class_type = ad_manager.AdManagerClassType(pql_value)\n\n      if class_type == 'TextValue':\n        return field.replace('\"', '\"\"').encode('UTF8')\n      elif class_type == 'NumberValue':\n        return float(field) if '.' in field else int(field)\n      elif class_type == 'DateTimeValue':\n        return ConvertDateTimeToOffset(field)\n      elif class_type == 'DateValue':\n        return date(int(field['date']['year']),\n                    int(field['date']['month']),\n                    int(field['date']['day'])).isoformat()\n      else:\n        return field\n  else:\n    return '-'", "docstring": "Sanitizes a field value from a Value object to a CSV suitable format.\n\nArgs:\npql_value: dict a dictionary containing the data for a single field of an\nentity.\n\nReturns:\nstr a CSV writer friendly value formatted by Value.Type.", "source": "juraj-google-style"}
{"code": "def __init__(self, prevHash=None, timestamp=None, index=None,\n                 consensusData=None, nextConsensus=None,\n                 script=None, transactions=None, build_root=False):\n        \n\n        super(Block, self).__init__()\n        self.Version = 0\n        self.PrevHash = prevHash\n        self.Timestamp = timestamp\n        self.Index = index\n        self.ConsensusData = consensusData\n        self.NextConsensus = nextConsensus\n        self.Script = script\n\n        if transactions:\n            self.Transactions = transactions\n        else:\n            self.Transactions = []\n\n        if build_root:\n            self.RebuildMerkleRoot()", "docstring": "Create an instance.\n\nArgs:\nprevHash (UInt160):\ntimestamp (int): seconds since Unix epoch.\nindex (int): block height.\nconsensusData (int): uint64.\nnextConsensus (UInt160):\nscript (neo.Core.Witness): script used to verify the block.\ntransactions (list): of neo.Core.TX.Transaction.Transaction objects.\nbuild_root (bool): flag indicating whether to rebuild the merkle root.", "source": "juraj-google-style"}
{"code": "def ReadFrom(self, byte_stream):\n    \n    try:\n      return self._struct.unpack_from(byte_stream)\n    except (TypeError, struct.error) as exception:\n      raise IOError('Unable to read byte stream with error: {0!s}'.format(\n          exception))", "docstring": "Read values from a byte stream.\n\nArgs:\nbyte_stream (bytes): byte stream.\n\nReturns:\ntuple[object, ...]: values copies from the byte stream.\n\nRaises:\nIOError: if byte stream cannot be read.\nOSError: if byte stream cannot be read.", "source": "juraj-google-style"}
{"code": "def loads(s, single=False):\n    \n    corpus = etree.fromstring(s)\n    if single:\n        ds = _deserialize_mrs(next(corpus))\n    else:\n        ds = (_deserialize_mrs(mrs_elem) for mrs_elem in corpus)\n    return ds", "docstring": "Deserialize MRX string representations\n\nArgs:\ns (str): a MRX string\nsingle (bool): if `True`, only return the first Xmrs object\nReturns:\na generator of Xmrs objects (unless *single* is `True`)", "source": "juraj-google-style"}
{"code": "def get_from(input_file, property_names):\n    \n\n    \n    with open(input_file) as f:\n        feature_collection = geojson.load(f)\n\n    features = feature_collection['features']\n    values = [tuple([feat['properties'].get(x)\n                     for x in property_names]) for feat in features]\n\n    return values", "docstring": "Reads a geojson and returns a list of value tuples, each value corresponding to a\nproperty in property_names.\n\nArgs:\ninput_file (str): File name.\nproperty_names: List of strings; each string is a property name.\n\nReturns:\nList of value tuples.", "source": "juraj-google-style"}
{"code": "def _deferred_dependencies(self):\n    return self._self_unconditional_deferred_dependencies", "docstring": "A dictionary with deferred dependencies.\n\nStores restorations for other Trackable objects on which this object\nmay eventually depend. May be overridden by sub-classes (e.g. Optimizers use\nconditional dependencies based the current graph, and so need separate\nmanagement of deferred dependencies too).\n\nReturns:\nA dictionary mapping from local name to a list of CheckpointPosition\nobjects.", "source": "github-repos"}
{"code": "def predict_features(self, df_features, df_target, idx=0, **kwargs):\n        \n        X = df_features.values\n        y = df_target.values\n        regressor = DecisionTreeRegressor()\n        regressor.fit(X, y)\n\n        return regressor.feature_importances_", "docstring": "For one variable, predict its neighbouring nodes.\n\nArgs:\ndf_features (pandas.DataFrame):\ndf_target (pandas.Series):\nidx (int): (optional) for printing purposes\nkwargs (dict): additional options for algorithms\n\nReturns:\nlist: scores of each feature relatively to the target", "source": "juraj-google-style"}
{"code": "def report_conversion_error(self, element_path: str, msg: str) -> None:", "docstring": "Reports the given error during FHIR conversion.\n\nThis indicates that the resource does not fully comply with the FHIR\nspecification or profile, and the field could not be converted to the target\nstructure. Data may have been lost during the conversion.\n\nArgs:\nelement_path: The path to the field where the issue occurred.\nmsg: The error message produced.", "source": "github-repos"}
{"code": "def package_in_memory(cls, workflow_name, workflow_files):\n        \n        s = StringIO()\n        p = cls(s, workflow_name, meta_data=[])\n        p.add_bpmn_files_by_glob(workflow_files)\n        p.create_package()\n        return s.getvalue()", "docstring": "Generates wf packages from workflow diagrams.\n\nArgs:\nworkflow_name: Name of wf\nworkflow_files:  Diagram  file.\n\nReturns:\nWorkflow package (file like) object", "source": "juraj-google-style"}
{"code": "def verify_mfa(self, mfa_token):\n    response = self.resource.verify_mfa({'mfa_token': mfa_token})\n    return ((response['valid'] == True) or (response['valid'] == 'true'))", "docstring": "Verify an SMS or TOTP MFA token for this user.\n\nArgs:\nmfa_token (str): An alphanumeric code from either a User's TOTP\napplication or sent to them via SMS.\n\nReturns:\nTrue if the mfa_token is valid, False otherwise.", "source": "codesearchnet"}
{"code": "def _load_submissions_from_datastore_dir(self, dir_suffix, id_pattern):\n    submissions = self._storage_client.list_blobs(prefix=os.path.join(self._round_name, dir_suffix))\n    return {id_pattern.format(idx): SubmissionDescriptor(path=s, participant_id=participant_from_submission_path(s)) for (idx, s) in enumerate(submissions)}", "docstring": "Loads list of submissions from the directory.\n\nArgs:\ndir_suffix: suffix of the directory where submissions are stored,\none of the folowing constants: ATTACK_SUBDIR, TARGETED_ATTACK_SUBDIR\nor DEFENSE_SUBDIR.\nid_pattern: pattern which is used to generate (internal) IDs\nfor submissins. One of the following constants: ATTACK_ID_PATTERN,\nTARGETED_ATTACK_ID_PATTERN or DEFENSE_ID_PATTERN.\n\nReturns:\ndictionary with all found submissions", "source": "codesearchnet"}
{"code": "def split(x, split_dim, num_or_size_splits, name=None):\n    return SplitOperation(x, split_dim, num_or_size_splits, name=name).outputs", "docstring": "Like tf.split.\n\nArgs:\nx: a Tensor\nsplit_dim: a Dimension in x.shape.dims\nnum_or_size_splits: either an integer dividing split_dim.size\nor a list of integers adding up to split_dim.size\nname: an optional string\nReturns:\na list of Tensors.", "source": "codesearchnet"}
{"code": "def find_code_and_splits(object_name: str, base_path: str, buffer: Optional[dict]=None):\n    if buffer is None:\n        buffer = {}\n    if (object_name, base_path) in buffer:\n        lines, code, code_splits = buffer[object_name, base_path]\n    else:\n        code, (lines, target_start_index, target_end_index) = find_code_in_transformers(object_name, base_path=base_path, return_indices=True)\n        indent = get_indent(code)\n        code_splits = split_code_into_blocks(lines, target_start_index, target_end_index, len(indent) + 4, backtrace=True)\n        buffer[object_name, base_path] = (lines, code, code_splits)\n    return (lines, code, code_splits)", "docstring": "Find the code of an object (specified by `object_name`) and split it into blocks.\n\nArgs:\nobject_name (`str`):\nThe name of the object, e.g. `transformers.models.bert.modeling_bert.BertAttention` or\n`tests.models.llama.test_modeling_llama.LlamaModelTest.test_config`.\nbase_path (`str`):\nThe path to the base directory within which the search will be performed. It could be either\n`TRANSFORMERS_PATH` or `MODEL_TEST_PATH`.\nbuffer (`dict`, *optional*):\nThe buffer used to store the previous results in order to speed up the process.\n\nReturns:\nlines (`List[str]`):\nThe lines of the whole file where the object is defined.\ncode (`str`):\nThe object's code.\ncode_splits (`List[Tuple[str, int, int]]`):\n`code` splitted into blocks. See `split_code_into_blocks`.", "source": "github-repos"}
{"code": "def compute_mup_vector(config):\n    intermediate_size = config.mamba_d_ssm if config.mamba_d_ssm is not None else int(config.mamba_expand * config.hidden_size)\n    groups_time_state_size = config.mamba_n_groups * config.mamba_d_state\n    num_heads = config.mamba_n_heads\n    zxbcdt_multipliers = config.ssm_multipliers\n    vector_shape = 2 * intermediate_size + 2 * groups_time_state_size + num_heads\n    mup_vector = torch.ones(1, 1, vector_shape)\n    mup_vector[:, :, :intermediate_size] *= zxbcdt_multipliers[0]\n    mup_vector[:, :, intermediate_size:2 * intermediate_size] *= zxbcdt_multipliers[1]\n    mup_vector[:, :, 2 * intermediate_size:2 * intermediate_size + groups_time_state_size] *= zxbcdt_multipliers[2]\n    mup_vector[:, :, 2 * intermediate_size + groups_time_state_size:2 * intermediate_size + 2 * groups_time_state_size] *= zxbcdt_multipliers[3]\n    mup_vector[:, :, 2 * intermediate_size + 2 * groups_time_state_size:] *= zxbcdt_multipliers[4]\n    return mup_vector", "docstring": "Computes the MuP vector based on model configuration.\n\nFalconH1 applies different MuP multiplier for each dimension of the hidden states.\nThe MuP vector is partitioned into chunks, and each chunk is multiplied with its\ncorresponding projected dimension.\n\nArgs:\nconfig: FalconH1Config object\n\nReturns:\ntorch.Tensor: The computed MuP vector", "source": "github-repos"}
{"code": "def build_vocab(self, texts, verbose=1, **kwargs):\n    if self.has_vocab:\n        logger.warn('Tokenizer already has existing vocabulary. Overriding and building new vocabulary.')\n    progbar = Progbar(len(texts), verbose=verbose, interval=0.25)\n    count_tracker = utils._CountTracker()\n    self._token_counts.clear()\n    self._num_texts = len(texts)\n    for token_data in self.token_generator(texts, **kwargs):\n        (indices, token) = (token_data[:(- 1)], token_data[(- 1)])\n        count_tracker.update(indices)\n        self._token_counts[token] += 1\n        progbar.update(indices[0])\n    self.create_token_indices(self._token_counts.keys())\n    count_tracker.finalize()\n    self._counts = count_tracker.counts\n    progbar.update(len(texts))", "docstring": "Builds the internal vocabulary and computes various statistics.\n\nArgs:\ntexts: The list of text items to encode.\nverbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1)\n**kwargs: The kwargs for `token_generator`.", "source": "codesearchnet"}
{"code": "def collect_human_trajectory(env, device):\n    \n\n    obs = env.reset()\n\n    \n    env.set_robot_joint_positions([0, -1.18, 0.00, 2.18, 0.00, 0.57, 1.5708])\n\n    env.viewer.set_camera(camera_id=2)\n    env.render()\n\n    is_first = True\n\n    \n    reset = False\n    task_completion_hold_count = -1 \n    device.start_control()\n    while not reset:\n        state = device.get_controller_state()\n        dpos, rotation, grasp, reset = (\n            state[\"dpos\"],\n            state[\"rotation\"],\n            state[\"grasp\"],\n            state[\"reset\"],\n        )\n\n        \n        current = env._right_hand_orn\n        drotation = current.T.dot(rotation)  \n        dquat = T.mat2quat(drotation)\n        grasp = grasp - 1.  \n        action = np.concatenate([dpos, dquat, [grasp]])\n\n        obs, reward, done, info = env.step(action)\n\n        if is_first:\n            is_first = False\n\n            \n            \n            \n            \n            \n            \n            initial_mjstate = env.sim.get_state().flatten()\n            xml_str = env.model.get_xml()\n            env.reset_from_xml_string(xml_str)\n            env.sim.reset()\n            env.sim.set_state_from_flattened(initial_mjstate)\n            env.sim.forward()\n            env.viewer.set_camera(camera_id=2)\n\n        env.render()\n\n        if task_completion_hold_count == 0:\n            break\n\n        \n        if env._check_success():\n            if task_completion_hold_count > 0:\n                task_completion_hold_count -= 1 \n            else:\n                task_completion_hold_count = 10 \n        else:\n            task_completion_hold_count = -1 \n\n    \n    env.close()", "docstring": "Use the device (keyboard or SpaceNav 3D mouse) to collect a demonstration.\nThe rollout trajectory is saved to files in npz format.\nModify the DataCollectionWrapper wrapper to add new fields or change data formats.\n\nArgs:\nenv: environment to control\ndevice (instance of Device class): to receive controls from the device", "source": "juraj-google-style"}
{"code": "def annotate_op(self, op):\n        \n\n        if isinstance(op, Label):\n            return op\n        else:\n            return AnnotatedOp(self, op.name, op.arg)", "docstring": "Takes a bytecode operation (:class:`Op`) and annotates it using the\ndata contained in this code object.\n\nArguments:\nop(Op): An :class:`Op` instance.\n\nReturns:\nAnnotatedOp: An annotated bytecode operation.", "source": "juraj-google-style"}
{"code": "def configure(self, cfg, handler, path=\"\"):\n\n        \n\n        \n        for name, attr in handler.attributes():\n            if cfg.get(name) is not None:\n                continue\n            if attr.expected_type not in [list, dict]:\n                cfg[name] = self.set(handler, attr, name, path, cfg)\n            elif attr.default is None and not hasattr(handler, \"configure_%s\" % name):\n                self.action_required.append((\"%s.%s: %s\" % (path, name, attr.help_text)).strip(\".\"))\n\n        \n        \n        \n        for name, attr in handler.attributes():\n            if cfg.get(name) is not None:\n                continue\n            if hasattr(handler, \"configure_%s\" % name):\n                fn = getattr(handler, \"configure_%s\" % name)\n                fn(self, cfg, \"%s.%s\"% (path, name))\n                if attr.expected_type in [list, dict] and not cfg.get(name):\n                    try:\n                        del cfg[name]\n                    except KeyError:\n                        pass", "docstring": "Start configuration process for the provided handler\n\nArgs:\ncfg (dict): config container\nhandler (config.Handler class): config handler to use\npath (str): current path in the configuration progress", "source": "juraj-google-style"}
{"code": "def ValidateDict(self, dict_value, outer_messages):\n    valid_dict = {}\n    for f in self.fields:\n        if f.name in dict_value:\n            valid_dict[f.name] = self._ValidateField(f, dict_value[f.name], outer_messages)\n        elif not f.optional:\n            raise NameError(\"Mandatoray field missing in message '%s': %s\" % (self.name, f.name))\n    return valid_dict", "docstring": "Validate a dictionary value.\n\nIt checks whether all individual fields of |dict_value| are valid, i.e.\nall required fields exist and the values of fields correspond to their\ntypes.\n\nArgs:\ndict_value: Dictionary value to validate.\nouter_messages: Messages visible from the scope of |dict_value|.\nReturns:\nDictionary value validated.\nRaises:\nNameError: If any required fields are missed.", "source": "github-repos"}
{"code": "def non_slot_devices(self, var_list):\n    raise NotImplementedError('must be implemented in descendants')", "docstring": "Device(s) for non-slot variables.\n\nDEPRECATED: TF 1.x ONLY.\n\nThis method returns non-slot devices where non-slot variables are placed.\nUsers can create non-slot variables on these devices by using a block:\n\n```python\nwith tf.distribute.StrategyExtended.colocate_vars_with(tf.distribute.StrategyExtended.non_slot_devices(...)):\n...\n```\n\nArgs:\nvar_list: The list of variables being optimized, needed with the\ndefault `tf.distribute.Strategy`.\nReturns:\nA sequence of devices for non-slot variables.", "source": "github-repos"}
{"code": "def n_choose_k(n, k):\n    if (n == 0):\n        return 0\n    return reduce((lambda x, y: ((x * y[0]) / y[1])), zip(range(((n - k) + 1), (n + 1)), range(1, (k + 1))), 1)", "docstring": "Return the number of combinations for n choose k.\n\nArgs:\nn (int): the total number of options .\nk (int): The number of elements.\n\nReturns:\nint: returns the binomial coefficient", "source": "codesearchnet"}
{"code": "def __init__(self, config, n_bins=16, mlp_dim=256, min_depth=0.001, max_depth=10):\n    super().__init__()\n    self.in_features = config.bottleneck_features\n    self.bin_centers_type = config.bin_centers_type\n    self.min_depth = min_depth\n    self.max_depth = max_depth\n    self.conv1 = nn.Conv2d(self.in_features, mlp_dim, 1, 1, 0)\n    self.act1 = nn.ReLU(inplace=True)\n    self.conv2 = nn.Conv2d(mlp_dim, n_bins, 1, 1, 0)\n    self.act2 = nn.ReLU(inplace=True) if self.bin_centers_type == 'normed' else nn.Softplus()", "docstring": "Bin center regressor network.\n\nCan be \"normed\" or \"unnormed\". If \"normed\", bin centers are bounded on the (min_depth, max_depth) interval.\n\nArgs:\nconfig (`int`):\nModel configuration.\nn_bins (`int`, *optional*, defaults to 16):\nNumber of bin centers.\nmlp_dim (`int`, *optional*, defaults to 256):\nHidden dimension.\nmin_depth (`float`, *optional*, defaults to 1e-3):\nMin depth value.\nmax_depth (`float`, *optional*, defaults to 10):\nMax depth value.", "source": "github-repos"}
{"code": "def _valuelistToBytestring(valuelist, numberOfRegisters):\n    \n    MINVALUE = 0\n    MAXVALUE = 65535\n\n    _checkInt(numberOfRegisters, minvalue=1, description='number of registers')\n\n    if not isinstance(valuelist, list):\n        raise TypeError('The valuelist parameter must be a list. Given {0!r}.'.format(valuelist))\n\n    for value in valuelist:\n        _checkInt(value, minvalue=MINVALUE, maxvalue=MAXVALUE, description='elements in the input value list')\n\n    _checkInt(len(valuelist), minvalue=numberOfRegisters, maxvalue=numberOfRegisters, \\\n        description='length of the list')\n\n    numberOfBytes = _NUMBER_OF_BYTES_PER_REGISTER * numberOfRegisters\n\n    bytestring = ''\n    for value in valuelist:\n        bytestring += _numToTwoByteString(value, signed=False)\n\n    assert len(bytestring) == numberOfBytes\n    return bytestring", "docstring": "Convert a list of numerical values to a bytestring.\n\nEach element is 'unsigned INT16'.\n\nArgs:\n* valuelist (list of int): The input list. The elements should be in the range 0 to 65535.\n* numberOfRegisters (int): The number of registers. For error checking.\n\nReturns:\nA bytestring (str). Length = 2*numberOfRegisters\n\nRaises:\nTypeError, ValueError", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.ListInstances = channel.unary_unary(\n            \"/google.cloud.redis.v1beta1.CloudRedis/ListInstances\",\n            request_serializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.ListInstancesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.ListInstancesResponse.FromString,\n        )\n        self.GetInstance = channel.unary_unary(\n            \"/google.cloud.redis.v1beta1.CloudRedis/GetInstance\",\n            request_serializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.GetInstanceRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.Instance.FromString,\n        )\n        self.CreateInstance = channel.unary_unary(\n            \"/google.cloud.redis.v1beta1.CloudRedis/CreateInstance\",\n            request_serializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.CreateInstanceRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n        self.UpdateInstance = channel.unary_unary(\n            \"/google.cloud.redis.v1beta1.CloudRedis/UpdateInstance\",\n            request_serializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.UpdateInstanceRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n        self.DeleteInstance = channel.unary_unary(\n            \"/google.cloud.redis.v1beta1.CloudRedis/DeleteInstance\",\n            request_serializer=google_dot_cloud_dot_redis__v1beta1_dot_proto_dot_cloud__redis__pb2.DeleteInstanceRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def start(self, channel):\n        \n\n        super(TileBasedVirtualDevice, self).start(channel)\n\n        for tile in self._tiles.values():\n            tile.start(channel=channel)", "docstring": "Start running this virtual device including any necessary worker threads.\n\nArgs:\nchannel (IOTilePushChannel): the channel with a stream and trace\nroutine for streaming and tracing data through a VirtualInterface", "source": "juraj-google-style"}
{"code": "def listdir(dir_name, get_dirs=None, get_files=None, hide_ignored=False):\n    \n    if get_dirs is None and get_files is None:\n        get_dirs = True\n        get_files = True\n\n    source_dir = os.path.join(settings.BASE_DIR, 'app', dir_name)\n\n    dirs = []\n\n    for dir_or_file_name in os.listdir(source_dir):\n        path = os.path.join(source_dir, dir_or_file_name)\n        if hide_ignored and dir_or_file_name.startswith('_'):\n            continue\n        is_dir = os.path.isdir(path)\n        if get_dirs and is_dir or get_files and not is_dir:\n            dirs.append(dir_or_file_name)\n\n    return dirs", "docstring": "Return list of all dirs and files inside given dir.\n\nAlso can filter contents to return only dirs or files.\n\nArgs:\n- dir_name: Which directory we need to scan (relative)\n- get_dirs: Return dirs list\n- get_files: Return files list\n- hide_ignored: Exclude files and dirs with initial underscore", "source": "juraj-google-style"}
{"code": "def delete(self, **options):\n    \n    fut = delete_async(self.key(), **options)\n    fut.get_result()", "docstring": "Permanently delete this blob from Blobstore.\n\nArgs:\n**options: Options for create_rpc().", "source": "juraj-google-style"}
{"code": "def __init__(self, particle_kind=\"bead\"):\n        \n        super(Bead, self).__init__()\n\n        self.add(mb.Particle(name=particle_kind), particle_kind)\n\n        self.add(mb.Port(anchor=self.labels[particle_kind]), 'up')\n        self['up'].translate(np.array([0, 0.7, 0]))\n\n        self.add(mb.Port(anchor=self.labels[particle_kind]), 'down')\n        self['down'].translate(np.array([0, -0.7, 0]))", "docstring": "Initialize a Bead object.\n\nArgs:\nparticle_kind (str): Descriptive name for the Bead.", "source": "juraj-google-style"}
{"code": "def has_member(self, device_object):\n        \n        if device_object.tag == \"computer\":\n            container_search = \"computers/computer\"\n        elif device_object.tag == \"mobile_device\":\n            container_search = \"mobile_devices/mobile_device\"\n        else:\n            raise ValueError\n\n        return len([device for device in self.findall(container_search) if\n                    device.findtext(\"id\") == device_object.id]) is not 0", "docstring": "Return bool whether group has a device as a member.\n\nArgs:\ndevice_object (Computer or MobileDevice). Membership is\ndetermined by ID, as names can be shared amongst devices.", "source": "juraj-google-style"}
{"code": "def _generate_fieldnames_if_bai_query(self, node_value, bai_field_variation, query_bai_field_if_dots_in_name):\n    if (bai_field_variation not in (FieldVariations.search, FieldVariations.raw)):\n        raise ValueError('Non supported field variation \"{}\".'.format(bai_field_variation))\n    normalized_author_name = normalize_name(node_value).strip('.')\n    if (ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'] and ElasticSearchVisitor.BAI_REGEX.match(node_value)):\n        return [((ElasticSearchVisitor.AUTHORS_BAI_FIELD + '.') + bai_field_variation)]\n    elif ((not whitespace.search(normalized_author_name)) and query_bai_field_if_dots_in_name and ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'] and ('.' in normalized_author_name)):\n        return ([((ElasticSearchVisitor.AUTHORS_BAI_FIELD + '.') + bai_field_variation)] + force_list(ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author']))\n    else:\n        return None", "docstring": "Generates new fieldnames in case of BAI query.\n\nArgs:\nnode_value (six.text_type): The node's value (i.e. author name).\nbai_field_variation (six.text_type): Which field variation to query ('search' or 'raw').\nquery_bai_field_if_dots_in_name (bool): Whether to query BAI field (in addition to author's name field)\nif dots exist in the name and name contains no whitespace.\n\nReturns:\nlist: Fieldnames to query on, in case of BAI query or None, otherwise.\n\nRaises:\nValueError, if ``field_variation`` is not one of ('search', 'raw').", "source": "codesearchnet"}
{"code": "def assert_type(__x, __t) -> None:\n    del __x, __t", "docstring": "Prevent runtime errors from assert_type statements.\n\nassert_type is handled internally by pytype at type-checking time; it should\ndo nothing at runtime.\n\nUsage example:\n\n```\nimport pytype_extensions\nassert_type = pytype_extensions.assert_type\n\nx = 3\nassert_type(x, int)\n```\n\nArgs:\n__x: The object to make the type assertion about.\n__t: The type we want to assert.", "source": "github-repos"}
{"code": "def _tensor_product(t1, t2):\n    return tf.matmul(tf.expand_dims(t1, axis=(- 1)), tf.expand_dims(t2, axis=(- 2)))", "docstring": "Computes the outer product of two possibly batched vectors.\n\nArgs:\nt1: A `tf.Tensor` of shape `[..., n]`.\nt2: A `tf.Tensor` of shape `[..., m]`.\n\nReturns:\nA tensor of shape `[..., n, m]` with matching batch dimensions, let's call\nit `r`, whose components are:\n\n```None\nr[..., i, j] = t1[..., i] * t2[..., j]\n```", "source": "codesearchnet"}
{"code": "def find_matching(self) -> Dict[(TLeft, TRight)]:\n    directed_graph = {}\n    for (left, right) in self._edges:\n        tail = (LEFT, left)\n        head = (RIGHT, right)\n        if (tail not in directed_graph):\n            directed_graph[tail] = {head}\n        else:\n            directed_graph[tail].add(head)\n    matching = HopcroftKarp(directed_graph).maximum_matching()\n    return dict(((tail[1], head[1]) for (tail, head) in matching.items() if (tail[0] == LEFT)))", "docstring": "Finds a matching in the bipartite graph.\n\nThis is done using the Hopcroft-Karp algorithm with an implementation from the\n`hopcroftkarp` package.\n\nReturns:\nA dictionary where each edge of the matching is represented by a key-value pair\nwith the key being from the left part of the graph and the value from te right part.", "source": "codesearchnet"}
{"code": "def _GetMember(component, args):\n    members = dir(component)\n    arg = args[0]\n    arg_names = [arg, arg.replace('-', '_')]\n    for arg_name in arg_names:\n        if arg_name in members:\n            return (getattr(component, arg_name), [arg], args[1:])\n    raise FireError('Could not consume arg:', arg)", "docstring": "Returns a subcomponent of component by consuming an arg from args.\n\nGiven a starting component and args, this function gets a member from that\ncomponent, consuming one arg in the process.\n\nArgs:\ncomponent: The component from which to get a member.\nargs: Args from which to consume in the search for the next component.\nReturns:\ncomponent: The component that was found by consuming an arg.\nconsumed_args: The args that were consumed by getting this member.\nremaining_args: The remaining args that haven't been consumed yet.\nRaises:\nFireError: If we cannot consume an argument to get a member.", "source": "github-repos"}
{"code": "def load_variants(adapter, vcf_obj, case_obj, skip_case_id=False, gq_treshold=None,\n                  max_window=3000, variant_type='snv'):\n    \n    if variant_type == 'snv':\n        nr_variants = case_obj['nr_variants']\n    else:\n        nr_variants = case_obj['nr_sv_variants']\n\n    nr_inserted = 0\n    case_id = case_obj['case_id']\n    if skip_case_id:\n        case_id = None\n    \n    with click.progressbar(vcf_obj, label=\"Inserting variants\",length=nr_variants) as bar:\n\n        variants = (build_variant(variant,case_obj,case_id, gq_treshold) for variant in bar)\n\n    if variant_type == 'sv':\n        for sv_variant in variants:\n            if not sv_variant:\n                continue\n            adapter.add_structural_variant(variant=sv_variant, max_window=max_window)\n            nr_inserted += 1\n\n    if variant_type == 'snv':\n        nr_inserted = adapter.add_variants(variants)\n\n    LOG.info(\"Inserted %s variants of type %s\", nr_inserted, variant_type)\n\n    return nr_inserted", "docstring": "Load variants for a family into the database.\n\nArgs:\nadapter (loqusdb.plugins.Adapter): initialized plugin\ncase_obj(Case): dict with case information\nnr_variants(int)\nskip_case_id (bool): whether to include the case id on variant level\nor not\ngq_treshold(int)\nmax_window(int): Specify the max size for sv windows\nvariant_type(str): 'sv' or 'snv'\n\nReturns:\nnr_inserted(int)", "source": "juraj-google-style"}
{"code": "def _process_arguments(arguments):\n    \n    if arguments is None: return \"\"\n    result = \"\"\n    for key, value in arguments.items():\n        if not key.startswith(\"bokeh-\"):\n            result += \"&{}={}\".format(quote_plus(str(key)), quote_plus(str(value)))\n    return result", "docstring": "Return user-supplied HTML arguments to add to a Bokeh server URL.\n\nArgs:\narguments (dict[str, object]) :\nKey/value pairs to add to the URL\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def getFingerprintForExpression(self, body, sparsity=1.0):\n        \n        return self._expressions.resolveExpression(self._retina, body, sparsity)", "docstring": "Resolve an expression\nArgs:\nbody, ExpressionOperation: The JSON encoded expression to be evaluated (required)\nsparsity, float: Sparsify the resulting expression to this percentage (optional)\nReturns:\nFingerprint\nRaises:\nCorticalioException: if the request was not successful", "source": "juraj-google-style"}
{"code": "def ExpandGlobs(path, opts=None):\n    precondition.AssertType(path, Text)\n    if (not path):\n        raise ValueError('Path is empty')\n    if (not _IsAbsolutePath(path, opts)):\n        raise ValueError((\"Path '%s' is not absolute\" % path))\n    if ((opts is not None) and (opts.pathtype == rdf_paths.PathSpec.PathType.REGISTRY)):\n        (root_dir, tail) = path.replace('\\\\', '/').lstrip('/').split('/', 1)\n        components = list(ParsePath(tail, opts=opts))\n    else:\n        (drive, tail) = os.path.splitdrive(path)\n        root_dir = os.path.join(drive, os.path.sep).upper()\n        components = list(ParsePath(tail[1:], opts=opts))\n    return _ExpandComponents(root_dir, components)", "docstring": "Performs glob expansion on a given path.\n\nPath can contain regular glob elements (such as `**`, `*`, `?`, `[a-z]`). For\nexample, having files `foo`, `bar`, `baz` glob expansion of `ba?` will yield\n`bar` and `baz`.\n\nArgs:\npath: A path to expand.\nopts: A `PathOpts` object.\n\nReturns:\nGenerator over all possible glob expansions of a given path.\n\nRaises:\nValueError: If given path is empty or relative.", "source": "codesearchnet"}
{"code": "def subdivide_with(self, branches, join_function, name='mixed'):\n    return _subdivide_context(self, branches, join_function, name)", "docstring": "Branches this pretty tensor and uses an explicit join function.\n\nThis should be used in a with statement, for example to fork and join with\na sum:\n\nwith pt.subdivide_with(2, tf.add_n) as [a, b]:\na...\nb...\n\nArgs:\nbranches: The number of branches.\njoin_function: A function to use when rejoining.\nname: A base name for this branch.\nReturns:\nA python context manager to use in a with statement that supplies a\nsequence of tensors with one per branch.\nRaises:\nValueError: if join_function is None.", "source": "codesearchnet"}
{"code": "def get_callback_task(self, *args, **kwargs):\n    \n    if not self.async:\n      raise UnexpectedPipelineError(\n          'May only call get_callback_task() method for asynchronous pipelines.')\n\n    params = kwargs.get('params', {})\n    kwargs['params'] = params\n    params['pipeline_id'] = self._pipeline_key.name()\n    kwargs['url'] = self.base_path + '/callback'\n    kwargs['method'] = 'POST'\n    return taskqueue.Task(*args, **kwargs)", "docstring": "Returns a task for calling back this Pipeline.\n\nArgs:\nparams: Keyword argument containing a dictionary of key/value pairs\nthat will be passed to the callback when it is executed.\nargs, kwargs: Passed to the taskqueue.Task constructor. Use these\narguments to set the task name (for idempotence), etc.\n\nReturns:\nA taskqueue.Task instance that must be enqueued by the caller.", "source": "juraj-google-style"}
{"code": "def do_dock6_flexible(self, ligand_path, force_rerun=False):\n        \n        log.debug('{}: running DOCK6...'.format(self.id))\n\n        ligand_name = os.path.basename(ligand_path).split('.')[0]\n        in_name = op.join(self.dock_dir, \"{}_{}_flexdock.in\".format(self.id, ligand_name))\n        out_name = op.join(self.dock_dir, \"{}_{}_flexdock.out\".format(self.id, ligand_name))\n\n        conformers_out = op.join(self.dock_dir, '{}_{}_flexdock_conformers.mol2'.format(self.id, ligand_name))\n        scored_out = op.join(self.dock_dir, '{}_{}_flexdock_scored.mol2'.format(self.id, ligand_name))\n        ranked_out = op.join(self.dock_dir, '{}_{}_flexdock_ranked.mol2'.format(self.id, ligand_name))\n\n        if ssbio.utils.force_rerun(flag=force_rerun, outfile=ranked_out):\n            with open(in_name, \"w\") as f:\n                dock_text = .format(ligand_path, op.basename(self.sphsel_path), op.splitext(op.basename(self.grid_path))[0],\n                   op.splitext(op.basename(self.grid_path))[0], self.amb_file, self.flex1_file, self.flex2_file,\n                   self.id, ligand_name)\n\n                f.write(dock_text)\n\n            os.chdir(self.dock_dir)\n            cmd = \"dock6 -i {} -o {} -v\".format(in_name, out_name)\n            os.system(cmd)\n\n        if ssbio.utils.is_non_zero_file(ranked_out):\n            self.dock_flexible_outfile = out_name\n            self.dock_flexible_conformers_result = conformers_out\n            self.dock_flexible_scored_result = scored_out\n            log.debug('{}: successful docking!'.format(self.dock_flexible_outfile))\n        else:\n            log.error('{}+{}: empty DOCK6 ranked file, execution error (or ligand failed to dock)'.format(self.id,\n                                                                                                          op.basename(ligand_path)))", "docstring": "Dock a ligand to the protein.\n\nArgs:\nligand_path (str): Path to ligand (mol2 format) to dock to protein\nforce_rerun (bool): If method should be rerun even if output file exists", "source": "juraj-google-style"}
{"code": "def modify_object(self, modification, obj):\n        \n        d = obj.as_dict()\n        self.modify(modification, d)\n        return obj.from_dict(d)", "docstring": "Modify an object that supports pymatgen's as_dict() and from_dict API.\n\nArgs:\nmodification (dict): Modification must be {action_keyword :\nsettings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}}\nobj (object): Object to modify", "source": "juraj-google-style"}
{"code": "def series_expand(self, param: Symbol, about, order: int):\n        \n        s = self.shape\n        emats = zip(*[o.series_expand(param, about, order)\n                      for o in self.matrix.ravel()])\n        return tuple((Matrix(np_array(em).reshape(s)) for em in emats))", "docstring": "Expand the matrix expression as a truncated power series in a scalar\nparameter.\n\nArgs:\nparam: Expansion parameter.\nabout (.Scalar): Point about which to expand.\norder: Maximum order of expansion >= 0\n\nReturns:\ntuple of length (order+1), where the entries are the expansion\ncoefficients.", "source": "juraj-google-style"}
{"code": "def load(tiff_filename):\n    \n    \n    tiff_filename = os.path.expanduser(tiff_filename)\n\n    try:\n        img = tiff.imread(tiff_filename)\n    except Exception as e:\n        raise ValueError(\"Could not load file {0} for conversion.\"\n                         .format(tiff_filename))\n        raise\n\n    return numpy.array(img)", "docstring": "Import a TIFF file into a numpy array.\n\nArguments:\ntiff_filename:  A string filename of a TIFF datafile\n\nReturns:\nA numpy array with data from the TIFF file", "source": "juraj-google-style"}
{"code": "def Dump(obj):\n  \n  text = yaml.safe_dump(obj, default_flow_style=False, allow_unicode=True)\n\n  if compatibility.PY2:\n    text = text.decode(\"utf-8\")\n\n  return text", "docstring": "Stringifies a Python object into its YAML representation.\n\nArgs:\nobj: A Python object to convert to YAML.\n\nReturns:\nA YAML representation of the given object.", "source": "juraj-google-style"}
{"code": "def Verify(self, public_key):\n    if (self.digest_type != self.HashType.SHA256):\n        raise rdfvalue.DecodeError('Unsupported digest.')\n    if (self.signature_type not in [self.SignatureType.RSA_PKCS1v15, self.SignatureType.RSA_PSS]):\n        raise rdfvalue.DecodeError('Unsupported signature type.')\n    try:\n        public_key.Verify(self.data, self.signature)\n    except InvalidSignature as e:\n        raise rdfvalue.DecodeError(('Could not verify blob. Error: %s' % e))\n    return True", "docstring": "Verify the data in this blob.\n\nArgs:\npublic_key: The public key to use for verification.\n\nReturns:\nTrue when verification succeeds.\n\nRaises:\nrdfvalue.DecodeError if the data is not suitable verified.", "source": "codesearchnet"}
{"code": "def add_becs_from_scf_task(self, scf_task, ddk_tolerance, ph_tolerance):\n    if (not isinstance(scf_task, ScfTask)):\n        raise TypeError(('task `%s` does not inherit from ScfTask' % scf_task))\n    multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance)\n    ddk_tasks = []\n    for ddk_inp in multi_ddk:\n        ddk_task = self.register_ddk_task(ddk_inp, deps={scf_task: 'WFK'})\n        ddk_tasks.append(ddk_task)\n    bec_deps = {ddk_task: 'DDK' for ddk_task in ddk_tasks}\n    bec_deps.update({scf_task: 'WFK'})\n    bec_inputs = scf_task.input.make_bec_inputs(tolerance=ph_tolerance)\n    bec_tasks = []\n    for bec_inp in bec_inputs:\n        bec_task = self.register_bec_task(bec_inp, deps=bec_deps)\n        bec_tasks.append(bec_task)\n    return (ddk_tasks, bec_tasks)", "docstring": "Build tasks for the computation of Born effective charges and add them to the work.\n\nArgs:\nscf_task: ScfTask object.\nddk_tolerance: dict {\"varname\": value} with the tolerance used in the DDK run.\nNone to use AbiPy default.\nph_tolerance: dict {\"varname\": value} with the tolerance used in the phonon run.\nNone to use AbiPy default.\n\nReturn:\n(ddk_tasks, bec_tasks)", "source": "codesearchnet"}
{"code": "def deconv_output_length(input_length, filter_size, padding, output_padding=None, stride=0, dilation=1):\n    assert padding in {'same', 'valid', 'full'}\n    if input_length is None:\n        return None\n    filter_size = filter_size + (filter_size - 1) * (dilation - 1)\n    if output_padding is None:\n        if padding == 'valid':\n            length = input_length * stride + max(filter_size - stride, 0)\n        elif padding == 'full':\n            length = input_length * stride - (stride + filter_size - 2)\n        elif padding == 'same':\n            length = input_length * stride\n    else:\n        if padding == 'same':\n            pad = filter_size \n        elif padding == 'valid':\n            pad = 0\n        elif padding == 'full':\n            pad = filter_size - 1\n        length = (input_length - 1) * stride + filter_size - 2 * pad + output_padding\n    return length", "docstring": "Determines output length of a transposed convolution given input length.\n\nArgs:\ninput_length: Integer.\nfilter_size: Integer.\npadding: one of `\"same\"`, `\"valid\"`, `\"full\"`.\noutput_padding: Integer, amount of padding along the output dimension. Can\nbe set to `None` in which case the output length is inferred.\nstride: Integer.\ndilation: Integer.\n\nReturns:\nThe output length (integer).", "source": "github-repos"}
{"code": "def SplitPatch(data):\n\t\n\tpatches = []\n\tfilename = None\n\tdiff = []\n\tfor line in data.splitlines(True):\n\t\tnew_filename = None\n\t\tif line.startswith('Index:'):\n\t\t\tunused, new_filename = line.split(':', 1)\n\t\t\tnew_filename = new_filename.strip()\n\t\telif line.startswith('Property changes on:'):\n\t\t\tunused, temp_filename = line.split(':', 1)\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\ttemp_filename = to_slash(temp_filename.strip())\n\t\t\tif temp_filename != filename:\n\t\t\t\t\n\t\t\t\tnew_filename = temp_filename\n\t\tif new_filename:\n\t\t\tif filename and diff:\n\t\t\t\tpatches.append((filename, ''.join(diff)))\n\t\t\tfilename = new_filename\n\t\t\tdiff = [line]\n\t\t\tcontinue\n\t\tif diff is not None:\n\t\t\tdiff.append(line)\n\tif filename and diff:\n\t\tpatches.append((filename, ''.join(diff)))\n\treturn patches", "docstring": "Splits a patch into separate pieces for each file.\n\nArgs:\ndata: A string containing the output of svn diff.\n\nReturns:\nA list of 2-tuple (filename, text) where text is the svn diff output\npertaining to filename.", "source": "juraj-google-style"}
{"code": "def validate(self):\n    if (self.value is not None):\n        if (not isinstance(self.value, six.integer_types)):\n            raise TypeError('expected (one of): {0}, observed: {1}'.format(six.integer_types, type(self.value)))", "docstring": "Verify that the value of the BigInteger is valid.\n\nRaises:\nTypeError: if the value is not of type int or long", "source": "codesearchnet"}
{"code": "def autosave(self, index):\n    finfo = self.stack.data[index]\n    document = finfo.editor.document()\n    if ((not document.changed_since_autosave) or finfo.newly_created):\n        return\n    autosave_filename = self.get_autosave_filename(finfo.filename)\n    logger.debug('Autosaving %s to %s', finfo.filename, autosave_filename)\n    try:\n        self.stack._write_to_file(finfo, autosave_filename)\n        document.changed_since_autosave = False\n    except EnvironmentError as error:\n        action = _('Error while autosaving {} to {}').format(finfo.filename, autosave_filename)\n        msgbox = AutosaveErrorDialog(action, error)\n        msgbox.exec_if_enabled()", "docstring": "Autosave a file.\n\nDo nothing if the `changed_since_autosave` flag is not set or the file\nis newly created (and thus not named by the user). Otherwise, save a\ncopy of the file with the name given by `self.get_autosave_filename()`\nand clear the `changed_since_autosave` flag. Errors raised when saving\nare silently ignored.\n\nArgs:\nindex (int): index into self.stack.data", "source": "codesearchnet"}
{"code": "class JanusVQVAEOutput(ModelOutput):\n    decoded_pixel_values: Optional[torch.FloatTensor] = None\n    embedding_loss: torch.FloatTensor = None", "docstring": "Base class for Janus VQ-VAE mode model outputs.\nArgs:\ndecoded_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\nReconstructed pixel values after encoding and decoding the input.\nembedding_loss (`torch.FloatTensor`):\nEmbedding loss.", "source": "github-repos"}
{"code": "def __copy_extracted(self, path, destination):\n        \n\n        unpacked_dir = self.filename + '.unpacked'\n        if not os.path.isdir(unpacked_dir):\n            LOGGER.warn(\n                'Failed to copy extracted file %s, no extracted dir',\n                path\n            )\n\n            return\n\n        source_path = os.path.join(unpacked_dir, path)\n\n        if not os.path.exists(source_path):\n            LOGGER.warn(\n                'Failed to copy extracted file %s, does not exist',\n                path\n            )\n\n            return\n\n        destination_path = os.path.join(destination, path)\n        shutil.copyfile(source_path, destination_path)", "docstring": "Copies a file that was already extracted to the destination directory.\n\nArgs:\npath (str):\nRelative (to the root of the archive) of the file to copy.\n\ndestination (str):\nDirectory to extract the archive to.", "source": "juraj-google-style"}
{"code": "def add_point_feature(self, resnum, feat_type=None, feat_id=None, qualifiers=None):\n    if self.feature_file:\n        raise ValueError('Feature file associated with sequence, please remove file association to append additional features.')\n    if (not feat_type):\n        feat_type = 'Manually added protein sequence single residue feature'\n    newfeat = SeqFeature(location=FeatureLocation(ExactPosition((resnum - 1)), ExactPosition(resnum)), type=feat_type, id=feat_id, qualifiers=qualifiers)\n    self.features.append(newfeat)", "docstring": "Add a feature to the features list describing a single residue.\n\nArgs:\nresnum (int): Protein sequence residue number\nfeat_type (str, optional): Optional description of the feature type (ie. 'catalytic residue')\nfeat_id (str, optional): Optional ID of the feature type (ie. 'TM1')", "source": "codesearchnet"}
{"code": "def matrix_worker(data):\n    \n    matrix = data['matrix']\n    Logger.get_logger(__name__ + '.worker').info(\n        \"Processing pipeline for matrix entry '%s'\", matrix['name'])\n\n    env = matrix['env'].copy()\n    env.update({'PIPELINE_MATRIX': matrix['name']})\n\n    pipeline = Pipeline(model=data['model'], env=env, options=data['options'])\n    pipeline.hooks = data['hooks']\n    return pipeline.process(data['pipeline'])", "docstring": "Run pipelines in parallel.\n\nArgs:\ndata(dict): parameters for the pipeline (model, options, ...).\nReturns:\ndict: with two fields: success True/False and captured output (list of str).", "source": "juraj-google-style"}
{"code": "def validate_user_name(self, user_name, timeout=(- 1)):\n    uri = ((self.URI + '/validateLoginName/') + user_name)\n    return self._client.create_with_zero_body(uri=uri, timeout=timeout)", "docstring": "Verifies if a userName is already in use.\n\nArgs:\nuser_name:\nThe userName to be verified.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation in\nOneView, just stops waiting for its completion.\n\nReturns: True if user name is in use, False if it is not.", "source": "codesearchnet"}
{"code": "def add_status_parser(subparsers, parent_parser):\n    parser = subparsers.add_parser('status', help='Displays information about validator status', description=\"Provides a subcommand to show a validator's status\")\n    grand_parsers = parser.add_subparsers(title='subcommands', dest='subcommand')\n    grand_parsers.required = True\n    add_status_show_parser(grand_parsers, parent_parser)", "docstring": "Adds argument parser for the status command\n\nArgs:\nsubparsers: Add parsers to this subparser object\nparent_parser: The parent argparse.ArgumentParser object", "source": "codesearchnet"}
{"code": "def swo_num_bytes(self):\n        \n        res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.GET_NUM_BYTES,\n                                             0)\n        if res < 0:\n            raise errors.JLinkException(res)\n\n        return res", "docstring": "Retrives the number of bytes in the SWO buffer.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nNumber of bytes in the SWO buffer.\n\nRaises:\nJLinkException: on error", "source": "juraj-google-style"}
{"code": "def trim_whitespace(self, text):\n    lines = text.split('\\n')\n    new_lines = [x.lstrip() for x in lines]\n    return '\\n'.join(new_lines)", "docstring": "Remove leading whitespace from each line of a multiline string\n\nArgs:\ntext (string): The text to be unindented\n\nReturns:\nstring: The unindented block of text", "source": "codesearchnet"}
{"code": "def to_json(self):\n    web_resp = collections.OrderedDict()\n    web_resp['status_code'] = self.status_code\n    web_resp['status_text'] = dict(HTTP_CODES).get(self.status_code)\n    web_resp['data'] = (self.data if (self.data is not None) else {})\n    web_resp['errors'] = (self.errors or [])\n    return web_resp", "docstring": "Short cut for JSON response service data.\n\nReturns:\nDict that implements JSON interface.", "source": "codesearchnet"}
{"code": "def persist_config(run, session, cfg):\n    from benchbuild.utils import schema as s\n    for cfg_elem in cfg:\n        session.add(s.Config(name=cfg_elem, value=cfg[cfg_elem], run_id=run.id))", "docstring": "Persist the configuration in as key-value pairs.\n\nArgs:\nrun: The run we attach the config to.\nsession: The db transaction we belong to.\ncfg: The configuration we want to persist.", "source": "codesearchnet"}
{"code": "def abort(cls, mapreduce_id, **kwargs):\n    cls(key_name=('%s:%s' % (mapreduce_id, cls._KEY_NAME)), command=cls.ABORT).put(**kwargs)", "docstring": "Causes a job to abort.\n\nArgs:\nmapreduce_id: The job to abort. Not verified as a valid job.", "source": "codesearchnet"}
{"code": "def get_counter(self, counter_name, default=0):\n    \n    self.__update_state()\n    return self._state.counters_map.get(counter_name, default)", "docstring": "Get the value of the named counter from this job.\n\nWhen a job is running, counter values won't be very accurate.\n\nArgs:\ncounter_name: name of the counter in string.\ndefault: default value if the counter doesn't exist.\n\nReturns:\nValue in int of the named counter.", "source": "juraj-google-style"}
{"code": "def create_table_from(self, name, src):\n    query = self.execute(\"SELECT sql FROM sqlite_master WHERE type='table' and name=?\", (src,))\n    try:\n        cmd = query.fetchone()[0]\n    except TypeError:\n        raise sql.OperationalError(\"Cannot copy non-existent table '{0}'\".format(src))\n    new_cmd = re.sub('(CREATE TABLE) \\\\w+', ('\\\\1 ' + name), cmd, re.IGNORECASE)\n    self.execute(new_cmd)", "docstring": "Create a new table with same schema as the source.\n\nIf the named table already exists, nothing happens.\n\nArguments:\n\nname (str): The name of the table to create.\nsrc (str): The name of the source table to duplicate.\n\nRaises:\n\nsql.OperationalError: If source table does not exist.", "source": "codesearchnet"}
{"code": "def from_json(cls, json):\n    \n    if json[\"name\"] in _KEYRANGES_CLASSES:\n      return _KEYRANGES_CLASSES[json[\"name\"]].from_json(json)\n    raise ValueError(\"Invalid json %s\", json)", "docstring": "Deserialize from json.\n\nArgs:\njson: a dict of json compatible fields.\n\nReturns:\na KeyRanges object.\n\nRaises:\nValueError: if the json is invalid.", "source": "juraj-google-style"}
{"code": "def dockprep(self, force_rerun=False):\n    log.debug('{}: running dock preparation...'.format(self.id))\n    prep_mol2 = op.join(self.dock_dir, '{}_prep.mol2'.format(self.id))\n    prep_py = op.join(self.dock_dir, 'prep.py')\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=prep_mol2):\n        with open(prep_py, 'w') as f:\n            f.write('import chimera\\n')\n            f.write('from DockPrep import prep\\n')\n            f.write('models = chimera.openModels.list(modelTypes=[chimera.Molecule])\\n')\n            f.write('prep(models)\\n')\n            f.write('from WriteMol2 import writeMol2\\n')\n            f.write('writeMol2(models, \"{}\")\\n'.format(prep_mol2))\n        cmd = 'chimera --nogui {} {}'.format(self.structure_path, prep_py)\n        os.system(cmd)\n        os.remove(prep_py)\n        os.remove('{}c'.format(prep_py))\n    if ssbio.utils.is_non_zero_file(prep_mol2):\n        self.dockprep_path = prep_mol2\n        log.debug('{}: successful dockprep execution'.format(self.dockprep_path))\n    else:\n        log.critical('{}: dockprep failed to run on PDB file'.format(self.structure_path))", "docstring": "Prepare a PDB file for docking by first converting it to mol2 format.\n\nArgs:\nforce_rerun (bool): If method should be rerun even if output file exists", "source": "codesearchnet"}
{"code": "def assemble_concatenated_meta(concated_meta_dfs, remove_all_metadata_fields):\n    \n    \n    if remove_all_metadata_fields:\n        for df in concated_meta_dfs:\n            df.drop(df.columns, axis=1, inplace=True)\n\n    all_concated_meta_df = pd.concat(concated_meta_dfs, axis=0)\n\n    \n    \n    n_rows = all_concated_meta_df.shape[0]\n    logger.debug(\"all_concated_meta_df.shape[0]: {}\".format(n_rows))\n    n_rows_cumulative = sum([df.shape[0] for df in concated_meta_dfs])\n    assert n_rows == n_rows_cumulative\n\n    \n    all_concated_meta_df_sorted = all_concated_meta_df.sort_index(axis=0).sort_index(axis=1)\n\n    return all_concated_meta_df_sorted", "docstring": "Assemble the concatenated metadata dfs together. For example,\nif horizontally concatenating, the concatenated metadata dfs are the\ncolumn metadata dfs. Both indices are sorted.\n\nArgs:\nconcated_meta_dfs (list of pandas dfs)\n\nReturns:\nall_concated_meta_df_sorted (pandas df)", "source": "juraj-google-style"}
{"code": "def stringify(self, string, phrases, parent=None):\n    last_tag = 0\n    beauty = ''\n    for phrase in phrases:\n        beauty += string[last_tag:phrase.opening]\n        if ((phrase.string in self.always) and (not phrase.override)):\n            phrase.style = self.always[phrase.string]\n        if phrase.arguments:\n            combination = 0\n            for i in phrase.arguments:\n                try:\n                    combination |= self.positional[i]\n                except IndexError:\n                    raise errors.ArgumentError(\"Positional argument '{0}' is out of range!\".format(i))\n            phrase.style |= combination\n        elif ((phrase.string not in self.always) or phrase.increment or phrase.override):\n            try:\n                combination = self.positional[self.counter]\n                if (phrase.increment or (not phrase.override)):\n                    self.counter += 1\n            except IndexError:\n                self.raise_not_enough_arguments(phrase.string)\n            phrase.style |= combination\n        phrase.style = flags.codify(phrase.style)\n        if phrase.nested:\n            phrase.string = self.stringify(phrase.string, phrase.nested, phrase)\n        reset = (parent.style if parent else '')\n        beauty += '\\x1b[{0}m{1}\\x1b[0;{2}m'.format(phrase.style, phrase, reset)\n        last_tag = (phrase.closing + 1)\n    beauty += string[last_tag:]\n    return beauty", "docstring": "Stringifies phrases.\n\nAfter parsing of the string via self.parse(), this method takes the\nescaped string and the list of phrases returned by self.parse() and\nreplaces the original phrases (with tags) with the Phrase-objects in\nthe list and adds the appropriate flag-combinations as determined by\nthe string or the position of the phrase (the string if it's in\nself.always, i.e. an 'always' argument). This method also works\nrecursively to handle nested phrases (and resetting of parent-phrase\nstyles).\n\nArguments:\nstring (str): The escaped string returned by self.parse().\nphrases (list): The list of Phrase-objects returned by self.parse().\nparent (Phrase): For recursive calls, the current parent Phrase.\n\nReturns:\nThe finished, beautifully beautified string.\n\nRaises:\nerrors.ArgumentError: If more positional arguments are requested\nthan were supplied.", "source": "codesearchnet"}
{"code": "def default_scan(self, region='mainland', expected_num=20, val_thr_num=4, queue_timeout=3, val_timeout=5, out_file='proxies.json', src_files=None):\n    if (expected_num > 30):\n        self.logger.warn('The more proxy you expect, the more time it will take. It is highly recommended to limit the expected num under 30.')\n    proxy_scanner = ProxyScanner()\n    if (src_files is None):\n        src_files = []\n    elif isinstance(src_files, str):\n        src_files = [src_files]\n    for filename in src_files:\n        proxy_scanner.register_func(proxy_scanner.scan_file, {'src_file': filename})\n    if (region == 'mainland'):\n        proxy_scanner.register_func(proxy_scanner.scan_cnproxy, {})\n    elif (region == 'overseas'):\n        proxy_scanner.register_func(proxy_scanner.scan_free_proxy_list, {})\n    proxy_scanner.register_func(proxy_scanner.scan_ip84, {'region': region, 'page': 5})\n    proxy_scanner.register_func(proxy_scanner.scan_mimiip, {'region': region, 'page': 5})\n    self.scan(proxy_scanner, expected_num, val_thr_num, queue_timeout, val_timeout, out_file)", "docstring": "Default scan method, to simplify the usage of `scan` method.\n\nIt will register following scan functions:\n1. scan_file\n2. scan_cnproxy (if region is mainland)\n3. scan_free_proxy_list (if region is overseas)\n4. scan_ip84\n5. scan_mimiip\nAfter scanning, all the proxy info will be saved in out_file.\n\nArgs:\nregion: Either 'mainland' or 'overseas'\nexpected_num: An integer indicating the expected number of proxies,\nif this argument is set too great, it may take long to\nfinish scanning process.\nval_thr_num: Number of threads used for validating proxies.\nqueue_timeout: An integer indicating the timeout for getting a\ncandidate proxy from the queue.\nval_timeout: An integer indicating the timeout when connecting the\ntest url using a candidate proxy.\nout_file: the file name of the output file saving all the proxy info\nsrc_files: A list of file names to scan", "source": "codesearchnet"}
{"code": "def stop(self, block: bool=False, timeout: Optional[float]=None):\n    if self._generation_thread is None:\n        logger.warning('Manager not started.')\n        return\n    if not self.stop_event.is_set():\n        self.stop_event.set()\n        logger.info('Stopping continuous batching manager...')\n    if block:\n        self.join(timeout)", "docstring": "Signal the background thread to stop.\n\nArgs:\nblock: Whether to wait for the thread to stop\ntimeout: Maximum time to wait for the thread to stop", "source": "github-repos"}
{"code": "def process(self, element, *args, **kwargs):\n    yield {'text': element.data.decode('utf-8'), 'id': element.attributes['id']}", "docstring": "For each element in the input PCollection, retrieve the id and decode the bytes into string\n\nArgs:\nelement: The element that is being processed.", "source": "github-repos"}
{"code": "def read(self, offset, size):\n    \n    self._file_object.seek(offset, os.SEEK_SET)\n    return self._file_object.read(size)", "docstring": "Reads a byte string from the image object at the specified offset.\n\nArgs:\noffset (int): offset where to start reading.\nsize (int): number of bytes to read.\n\nReturns:\nbytes: data read.", "source": "juraj-google-style"}
{"code": "def mix_over_posterior_draws(means, variances):\n    with tf.compat.v1.name_scope('mix_over_posterior_draws', values=[means, variances]):\n        num_posterior_draws = dist_util.prefer_static_value(tf.shape(input=means))[0]\n        component_observations = tfd.Independent(distribution=tfd.Normal(loc=dist_util.move_dimension(means, 0, (- 2)), scale=tf.sqrt(dist_util.move_dimension(variances, 0, (- 2)))), reinterpreted_batch_ndims=1)\n        return tfd.MixtureSameFamily(mixture_distribution=tfd.Categorical(logits=tf.zeros([num_posterior_draws], dtype=component_observations.dtype)), components_distribution=component_observations)", "docstring": "Construct a predictive normal distribution that mixes over posterior draws.\n\nArgs:\nmeans: float `Tensor` of shape\n`[num_posterior_draws, ..., num_timesteps]`.\nvariances: float `Tensor` of shape\n`[num_posterior_draws, ..., num_timesteps]`.\n\nReturns:\nmixture_dist: `tfd.MixtureSameFamily(tfd.Independent(tfd.Normal))` instance\nrepresenting a uniform mixture over the posterior samples, with\n`batch_shape = ...` and `event_shape = [num_timesteps]`.", "source": "codesearchnet"}
{"code": "def _ParseInsserv(self, data):\n    \n    p = config_file.FieldParser()\n    entries = p.ParseEntries(data)\n    raw = {e[0]: e[1:] for e in entries}\n    \n    facilities = {}\n    for k, v in iteritems(raw):\n      \n      k = k.replace(\"<\", \"\").replace(\">\", \"\")\n      facilities[k] = v\n    for k, vals in iteritems(facilities):\n      self.insserv[k] = []\n      for v in vals:\n        self.insserv[k].extend(self._InsservExpander(facilities, v))", "docstring": "/etc/insserv.conf* entries define system facilities.\n\nFull format details are in man 8 insserv, but the basic structure is:\n$variable          facility1 facility2\n$second_variable   facility3 $variable\n\nAny init script that specifies Required-Start: $second_variable needs to be\nexpanded to facility1 facility2 facility3.\n\nArgs:\ndata: A string of insserv definitions.", "source": "juraj-google-style"}
{"code": "def QA_fetch_user(user_cookie, db=DATABASE):\n    collection = DATABASE.account\n    return [res for res in collection.find({'user_cookie': user_cookie}, {'_id': 0})]", "docstring": "get the user\n\nArguments:\nuser_cookie : str the unique cookie_id for a user\nKeyword Arguments:\ndb: database for query\n\nReturns:\nlist ---  [ACCOUNT]", "source": "codesearchnet"}
{"code": "def __init__(self, prev_hash=None, prev_index=None):\n        \n        self.PrevHash = prev_hash\n        self.PrevIndex = prev_index", "docstring": "Create an instance.\n\nArgs:\nprev_hash (UInt256): hash of the previous output.\nprev_index (int):", "source": "juraj-google-style"}
{"code": "def _construct(configdict, prefix, ua):\n        \n        \n        if not ua:\n            raise UserAgentError(\"User_agent parameter missing. It can be your project's name for example.\")\n        preprefix = configdict.get('preprefix')\n        if preprefix:\n            user_agent = '%s:' % preprefix\n        else:\n            user_agent = ''\n        if prefix:\n            user_agent = '%s%s-' % (user_agent, prefix)\n        user_agent = '%s%s' % (user_agent, ua)\n        return user_agent", "docstring": "Construct user agent\n\nArgs:\nconfigdict (str): Additional configuration for user agent\nprefix (str): Text to put at start of user agent\nua (str): Custom user agent text\n\nReturns:\nstr: Full user agent string", "source": "juraj-google-style"}
{"code": "def load(self):\n    if self.loaded:\n        LOGGER.debug('Already loaded')\n        return\n    try:\n        (basepath, dirs, _) = os.walk(self.path).next()\n    except StopIteration:\n        raise MalformedWorkdir(('Empty dir %s' % self.path))\n    full_path = partial(os.path.join, basepath)\n    found_current = False\n    for dirname in dirs:\n        if ((dirname == 'current') and os.path.islink(full_path('current'))):\n            self.current = os.path.basename(os.readlink(full_path('current')))\n            found_current = True\n            continue\n        elif (dirname == 'current'):\n            raise MalformedWorkdir(('\"%s/current\" should be a soft link' % self.path))\n        self.prefixes[dirname] = self.prefix_class(prefix=self.join(dirname))\n    if (not found_current):\n        raise MalformedWorkdir(('\"%s/current\" should exist and be a soft link' % self.path))\n    self._update_current()", "docstring": "Loads the prefixes that are available is the workdir\n\nReturns:\nNone\n\nRaises:\nMalformedWorkdir: if the wordir is malformed", "source": "codesearchnet"}
{"code": "def init_app(self, app):\n    app.ldap3_login_manager = self\n    servers = list(self._server_pool)\n    for s in servers:\n        self._server_pool.remove(s)\n    self.init_config(app.config)\n    if hasattr(app, 'teardown_appcontext'):\n        app.teardown_appcontext(self.teardown)\n    else:\n        app.teardown_request(self.teardown)\n    self.app = app", "docstring": "Configures this extension with the given app. This registers an\n``teardown_appcontext`` call, and attaches this ``LDAP3LoginManager``\nto it as ``app.ldap3_login_manager``.\n\nArgs:\napp (flask.Flask): The flask app to initialise with", "source": "codesearchnet"}
{"code": "def _get_cached_time(self):\n    if (not self._cached_time):\n        self._cached_time = self._meta.datetime.utcnow()\n    return self._cached_time", "docstring": "Method that will allow for consistent modified and archived\ntimestamps.\n\nReturns:\nself.Meta.datetime: This method will return a datetime that is\ncompatible with the current class's datetime library.", "source": "codesearchnet"}
{"code": "def create_datastore_for_topline(self, delete_first=0, path=None):\n        \n        \n        data = load_yaml(script_dir_plus_file(join('..', 'hdx_datasource_topline.yml'), Resource))\n        self.create_datastore_from_dict_schema(data, delete_first, path=path)", "docstring": "For tabular data, create a resource in the HDX datastore which enables data preview in HDX using the built in\nYAML definition for a topline. If path is not supplied, the file is first downloaded from HDX.\n\nArgs:\ndelete_first (int): Delete datastore before creation. 0 = No, 1 = Yes, 2 = If no primary key. Defaults to 0.\npath (Optional[str]): Local path to file that was uploaded. Defaults to None.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def registry_key(self, key_name, value_name, value_type, **kwargs):\n    indicator_obj = RegistryKey(key_name, value_name, value_type, **kwargs)\n    return self._indicator(indicator_obj)", "docstring": "Add Registry Key data to Batch object.\n\nArgs:\nkey_name (str): The key_name value for this Indicator.\nvalue_name (str): The value_name value for this Indicator.\nvalue_type (str): The value_type value for this Indicator.\nconfidence (str, kwargs): The threat confidence for this Indicator.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nlast_modified (str, kwargs): The date timestamp the Indicator was last modified.\nrating (str, kwargs): The threat rating for this Indicator.\nxid (str, kwargs): The external id for this Indicator.\n\nReturns:\nobj: An instance of Registry Key.", "source": "codesearchnet"}
{"code": "def step(self, input_stream, value):\n        \n\n        reading = IOTileReading(input_stream.encode(), self.tick_count, value)\n        self.sensor_graph.process_input(input_stream, reading, self.rpc_executor)", "docstring": "Step the sensor graph through one since input.\n\nThe internal tick count is not advanced so this function may\nbe called as many times as desired to input specific conditions\nwithout simulation time passing.\n\nArgs:\ninput_stream (DataStream): The input stream to push the\nvalue into\nvalue (int): The reading value to push as an integer", "source": "juraj-google-style"}
{"code": "def df(self):\n    import pandas as pd\n    return pd.concat([w.df(uwi=True) for w in self])", "docstring": "Makes a pandas DataFrame containing Curve data for all the wells\nin the Project. The DataFrame has a dual index of well UWI and\ncurve Depths. Requires `pandas`.\n\nArgs:\nNo arguments.\n\nReturns:\n`pandas.DataFrame`.", "source": "codesearchnet"}
{"code": "def stack_colormap(lower, upper, n=256):\n    \n    A = get_cmap(lower)\n    B = get_cmap(upper)\n    name = \"%s-%s\" % (A.name, B.name)\n    lin = np.linspace(0, 1, n)\n    return array_cmap(np.vstack((A(lin), B(lin))), name, n=n)", "docstring": "Stacks two colormaps (``lower`` and ``upper``) such that\nlow half -> ``lower`` colors, high half -> ``upper`` colors\n\nArgs:\n\nlower (colormap): colormap for the lower half of the stacked colormap.\n\nupper (colormap): colormap for the upper half of the stacked colormap.\n\nn (int): Number of colormap steps. Default is ``256``.", "source": "juraj-google-style"}
{"code": "def get_site_orbital_dos(self, site, orbital):\n    return Dos(self.efermi, self.energies, self.pdos[site][orbital])", "docstring": "Get the Dos for a particular orbital of a particular site.\n\nArgs:\nsite: Site in Structure associated with CompleteDos.\norbital: Orbital in the site.\n\nReturns:\nDos containing densities for orbital of site.", "source": "codesearchnet"}
{"code": "def get_symmetric_wallace_tensor(self, tau):\n        \n        wallace = self.get_wallace_tensor(tau)\n        return Tensor(0.5 * (wallace + np.transpose(wallace, [2, 3, 0, 1])))", "docstring": "Gets the symmetrized wallace tensor for determining\nyield strength criteria.\n\nArgs:\ntau (3x3 array-like): stress at which to evaluate\nthe wallace tensor.", "source": "juraj-google-style"}
{"code": "def frames2video(frame_dir, video_file, fps=30, fourcc='XVID', filename_tmpl='{:06d}.jpg', start=0, end=0, show_progress=True):\n    if (end == 0):\n        ext = filename_tmpl.split('.')[(- 1)]\n        end = len([name for name in scandir(frame_dir, ext)])\n    first_file = osp.join(frame_dir, filename_tmpl.format(start))\n    check_file_exist(first_file, ('The start frame not found: ' + first_file))\n    img = cv2.imread(first_file)\n    (height, width) = img.shape[:2]\n    resolution = (width, height)\n    vwriter = cv2.VideoWriter(video_file, VideoWriter_fourcc(*fourcc), fps, resolution)\n\n    def write_frame(file_idx):\n        filename = osp.join(frame_dir, filename_tmpl.format(file_idx))\n        img = cv2.imread(filename)\n        vwriter.write(img)\n    if show_progress:\n        track_progress(write_frame, range(start, end))\n    else:\n        for i in range(start, end):\n            filename = osp.join(frame_dir, filename_tmpl.format(i))\n            img = cv2.imread(filename)\n            vwriter.write(img)\n    vwriter.release()", "docstring": "Read the frame images from a directory and join them as a video\n\nArgs:\nframe_dir (str): The directory containing video frames.\nvideo_file (str): Output filename.\nfps (float): FPS of the output video.\nfourcc (str): Fourcc of the output video, this should be compatible\nwith the output file type.\nfilename_tmpl (str): Filename template with the index as the variable.\nstart (int): Starting frame index.\nend (int): Ending frame index.\nshow_progress (bool): Whether to show a progress bar.", "source": "codesearchnet"}
{"code": "def _FormatDate(self, event):\n    \n    \n    \n    date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n        timestamp=event.timestamp)\n\n    year, month, day_of_month = date_time.GetDate()\n    try:\n      return '{0:04d}-{1:02d}-{2:02d}'.format(year, month, day_of_month)\n    except (TypeError, ValueError):\n      self._ReportEventError(event, (\n          'unable to copy timestamp: {0!s} to a human readable date. '\n          'Defaulting to: \"0000-00-00\"').format(event.timestamp))\n\n      return '0000-00-00'", "docstring": "Formats the date.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: date field.", "source": "juraj-google-style"}
{"code": "def custom_line_color_map(self, values):\n    if (not isinstance(values, list)):\n        raise TypeError('custom_line_color_map must be a list')\n    self.options['custom_line_color_map'] = values", "docstring": "Set the custom line color map.\n\nArgs:\nvalues (list): list of colors.\n\nRaises:\nTypeError: Custom line color map must be a list.", "source": "codesearchnet"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n    output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    if token_ids_1 is not None:\n        output += token_ids_1 + [self.sep_token_id]\n    return output", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A Lxmert sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def solve_sweep_wavelength(self, structure, wavelengths, filename='wavelength_n_effs.dat', plot=True):\n    n_effs = []\n    for w in tqdm.tqdm(wavelengths, ncols=70):\n        structure.change_wavelength(w)\n        self.solve(structure)\n        n_effs.append(np.real(self.n_effs))\n    if filename:\n        self._write_n_effs_to_file(n_effs, (self._modes_directory + filename), wavelengths)\n        if plot:\n            if MPL:\n                title = '$n_{eff}$ vs Wavelength'\n                y_label = '$n_{eff}$'\n            else:\n                title = ('n_{effs} vs Wavelength' % x_label)\n                y_label = 'n_{eff}'\n            self._plot_n_effs((self._modes_directory + filename), (self._modes_directory + 'fraction_te.dat'), 'Wavelength', 'n_{eff}', title)\n    return n_effs", "docstring": "Solve for the effective indices of a fixed structure at\ndifferent wavelengths.\n\nArgs:\nstructure (Slabs): The target structure to solve\nfor modes.\nwavelengths (list): A list of wavelengths to sweep\nover.\nfilename (str): The nominal filename to use when saving the\neffective indices.  Defaults to 'wavelength_n_effs.dat'.\nplot (bool): `True` if plots should be generates,\notherwise `False`.  Default is `True`.\n\nReturns:\nlist: A list of the effective indices found for each wavelength.", "source": "codesearchnet"}
{"code": "def index(self, value, start=0, end=None):\n\t\t\n\t\ttry:\n\t\t\tindex = self._dict[value]\n\t\texcept KeyError:\n\t\t\traise ValueError\n\t\telse:\n\t\t\tstart = self._fix_neg_index(start)\n\t\t\tend = self._fix_end_index(end)\n\t\t\tif start <= index and index < end:\n\t\t\t\treturn index\n\t\t\telse:\n\t\t\t\traise ValueError", "docstring": "Return the index of value between start and end.\n\nBy default, the entire setlist is searched.\n\nThis runs in O(1)\n\nArgs:\nvalue: The value to find the index of\nstart (int): The index to start searching at (defaults to 0)\nend (int): The index to stop searching at (defaults to the end of the list)\nReturns:\nint: The index of the value\nRaises:\nValueError: If the value is not in the list or outside of start - end\nIndexError: If start or end are out of range", "source": "juraj-google-style"}
{"code": "def GetAdGroups(self, client_customer_id, campaign_id):\n    self.client.SetClientCustomerId(client_customer_id)\n    selector = {'fields': ['Id', 'Name', 'Status'], 'predicates': [{'field': 'CampaignId', 'operator': 'EQUALS', 'values': [campaign_id]}, {'field': 'Status', 'operator': 'NOT_EQUALS', 'values': ['REMOVED']}]}\n    adgroups = self.client.GetService('AdGroupService').get(selector)\n    if (int(adgroups['totalNumEntries']) > 0):\n        return adgroups['entries']\n    else:\n        return None", "docstring": "Retrieves all AdGroups for the given campaign that haven't been removed.\n\nArgs:\nclient_customer_id: str Client Customer Id being used in API request.\ncampaign_id: str id of the campaign for which to fetch ad groups.\n\nReturns:\nlist List of AdGroup data objects.", "source": "codesearchnet"}
{"code": "def indicator(self, indicator_type, summary, **kwargs):\n    indicator_obj = Indicator(indicator_type, summary, **kwargs)\n    return self._indicator(indicator_obj)", "docstring": "Add Indicator data to Batch object.\n\nArgs:\nindicator_type (str): The ThreatConnect define Indicator type.\nsummary (str): The value for this Indicator.\nconfidence (str, kwargs): The threat confidence for this Indicator.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nlast_modified (str, kwargs): The date timestamp the Indicator was last modified.\nrating (str, kwargs): The threat rating for this Indicator.\nxid (str, kwargs): The external id for this Indicator.\n\nReturns:\nobj: An instance of Indicator.", "source": "codesearchnet"}
{"code": "def memory_read8(self, addr, num_bytes, zone=None):\n    return self.memory_read(addr, num_bytes, zone=zone, nbits=8)", "docstring": "Reads memory from the target system in units of bytes.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): start address to read from\nnum_bytes (int): number of bytes to read\nzone (str): memory zone to read from\n\nReturns:\nList of bytes read from the target system.\n\nRaises:\nJLinkException: if memory could not be read.", "source": "codesearchnet"}
{"code": "def compile_dependencies(self, sourcepath, include_self=False):\n    items = self.inspector.parents(sourcepath)\n    if include_self:\n        items.add(sourcepath)\n    return filter(None, [self.compile_source(item) for item in items])", "docstring": "Apply compile on all dependencies\n\nArgs:\nsourcepath (string): Sass source path to compile to its\ndestination using project settings.\n\nKeyword Arguments:\ninclude_self (bool): If ``True`` the given sourcepath is add to\nitems to compile, else only its dependencies are compiled.", "source": "codesearchnet"}
{"code": "def _ParseFileEntry(self, knowledge_base, file_entry):\n    \n    root_key = self._GetPlistRootKey(file_entry)\n    if not root_key:\n      location = getattr(file_entry.path_spec, 'location', '')\n      raise errors.PreProcessFail((\n          'Unable to read: {0:s} plist: {1:s} with error: missing root '\n          'key.').format(self.ARTIFACT_DEFINITION_NAME, location))\n\n    try:\n      match = self._GetKeysDefaultEmpty(root_key, self._KEYS)\n    except KeyError as exception:\n      location = getattr(file_entry.path_spec, 'location', '')\n      raise errors.PreProcessFail(\n          'Unable to read: {0:s} plist: {1:s} with error: {2!s}'.format(\n              self.ARTIFACT_DEFINITION_NAME, location, exception))\n\n    name = match.get('name', [None])[0]\n    uid = match.get('uid', [None])[0]\n\n    if not name or not uid:\n      \n      return\n\n    user_account = artifacts.UserAccountArtifact(\n        identifier=uid, username=name)\n    user_account.group_identifier = match.get('gid', [None])[0]\n    user_account.full_name = match.get('realname', [None])[0]\n    user_account.shell = match.get('shell', [None])[0]\n    user_account.user_directory = match.get('home', [None])[0]\n\n    try:\n      knowledge_base.AddUserAccount(user_account)\n    except KeyError:\n      \n      pass", "docstring": "Parses artifact file system data for a preprocessing attribute.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nfile_entry (dfvfs.FileEntry): file entry that contains the artifact\nvalue data.\n\nRaises:\nerrors.PreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def to_api_repr(self):\n    resource = copy.deepcopy(self._properties)\n    query_parameters = resource['query'].get('queryParameters')\n    if query_parameters:\n        if (query_parameters[0].get('name') is None):\n            resource['query']['parameterMode'] = 'POSITIONAL'\n        else:\n            resource['query']['parameterMode'] = 'NAMED'\n    return resource", "docstring": "Build an API representation of the query job config.\n\nReturns:\ndict: A dictionary in the format used by the BigQuery API.", "source": "codesearchnet"}
{"code": "def imread(path, grayscale=False, size=None, interpolate='bilinear', channel_first=False, as_uint16=False, num_channels=(- 1)):\n    _imread_before(grayscale, num_channels)\n    r_mode = (cv2.IMREAD_GRAYSCALE if grayscale else cv2.IMREAD_UNCHANGED)\n    img = _imread_helper(path, r_mode)\n    if (as_uint16 and (img.dtype != np.uint16)):\n        if (img.dtype == np.uint8):\n            logger.warning('You want to read image as uint16, but the original bit-depth is 8 bit.All pixel values are simply increased by 256 times.')\n            img = (img.astype(np.uint16) * 256)\n        else:\n            raise ValueError('casting {} to uint16 is not safe.'.format(img.dtype))\n    img = _cvtColor_helper(img, num_channels)\n    img = _imread_after(img, size, interpolate, channel_first, imresize)\n    return img", "docstring": "Read image by cv2 module.\n\nArgs:\npath (str or 'file object'): File path or object to read.\ngrayscale (bool):\nsize (tupple of int):\n(width, height).\nIf None, output img shape depends on the files to read.\nchannel_first (bool):\nThis argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).\nDefault value is False, which means the img shape is (height, width, channel).\ninterpolate (str):\nmust be one of [\"nearest\", \"box\", \"bilinear\", \"hamming\", \"bicubic\", \"lanczos\"].\nas_uint16 (bool):\nIf True, this function reads image as uint16.\nnum_channels (int):\nchannel size of output array.\nDefault is -1 which preserves raw image shape.\n\nReturns:\nnumpy.ndarray", "source": "codesearchnet"}
{"code": "def _get_genes(self, variant):\n        \n        ensembl_ids = []\n        hgnc_symbols = []\n        \n        for transcript in variant.transcripts:\n            if transcript.ensembl_id:\n                ensembl_ids.append(transcript.ensembl_id)\n            if transcript.hgnc_symbol:\n                hgnc_symbols.append(transcript.hgnc_symbol)\n        \n        genes = get_gene_info(\n                        ensembl_ids=ensembl_ids, \n                        hgnc_symbols=hgnc_symbols\n                        )\n        return genes", "docstring": "Add the genes for a variant\n\nGet the hgnc symbols from all transcripts and add them\nto the variant\n\nArgs:\nvariant (dict): A variant dictionary\n\nReturns:\ngenes (list): A list of Genes", "source": "juraj-google-style"}
{"code": "def start(self, request: Request) -> Response:\n    if (self._session_state != SessionState.ready):\n        raise RuntimeError('Session not ready')\n    response = Response()\n    (yield from self._prepare_fetch(request, response))\n    response.file_transfer_size = (yield from self._fetch_size(request))\n    if request.restart_value:\n        try:\n            (yield from self._commander.restart(request.restart_value))\n            response.restart_value = request.restart_value\n        except FTPServerError:\n            _logger.debug('Could not restart file.', exc_info=1)\n    (yield from self._open_data_stream())\n    command = Command('RETR', request.file_path)\n    (yield from self._begin_stream(command))\n    self._session_state = SessionState.file_request_sent\n    return response", "docstring": "Start a file or directory listing download.\n\nArgs:\nrequest: Request.\n\nReturns:\nA Response populated with the initial data connection reply.\n\nOnce the response is received, call :meth:`download`.\n\nCoroutine.", "source": "codesearchnet"}
{"code": "def indicator(self, data):\n        \n        \n        data = self.get_first_hash(data)\n        super(File, self).indicator(data)", "docstring": "Update the request URI to include the Indicator for specific indicator retrieval.\n\nArgs:\ndata (string): The indicator value", "source": "juraj-google-style"}
{"code": "def audio(self, tag, audiodata, step=None, sample_rate=44100):\n    audiodata = onp.array(audiodata)\n    if (step is None):\n        step = self._step\n    else:\n        self._step = step\n    audiodata = onp.clip(onp.squeeze(audiodata), (- 1), 1)\n    if (audiodata.ndim != 1):\n        raise ValueError('Audio data must be 1D.')\n    sample_list = (32767.0 * audiodata).astype(int).tolist()\n    wio = io.BytesIO()\n    wav_buf = wave.open(wio, 'wb')\n    wav_buf.setnchannels(1)\n    wav_buf.setsampwidth(2)\n    wav_buf.setframerate(sample_rate)\n    enc = b''.join([struct.pack('<h', v) for v in sample_list])\n    wav_buf.writeframes(enc)\n    wav_buf.close()\n    encoded_audio_bytes = wio.getvalue()\n    wio.close()\n    audio = Summary.Audio(sample_rate=sample_rate, num_channels=1, length_frames=len(sample_list), encoded_audio_string=encoded_audio_bytes, content_type='audio/wav')\n    summary = Summary(value=[Summary.Value(tag=tag, audio=audio)])\n    self.add_summary(summary, step)", "docstring": "Saves audio.\n\nNB: single channel only right now.\n\nArgs:\ntag: str: label for this data\naudiodata: ndarray [Nsamples,]: data between (-1.0,1.0) to save as wave\nstep: int: training step\nsample_rate: sample rate of passed in audio buffer", "source": "codesearchnet"}
{"code": "def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(LocateRequestPayload, self).read(input_buffer, kmip_version=kmip_version)\n    local_buffer = utils.BytearrayStream(input_buffer.read(self.length))\n    if self.is_tag_next(enums.Tags.MAXIMUM_ITEMS, local_buffer):\n        self._maximum_items = primitives.Integer(tag=enums.Tags.MAXIMUM_ITEMS)\n        self._maximum_items.read(local_buffer, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.OFFSET_ITEMS, local_buffer):\n        self._offset_items = primitives.Integer(tag=enums.Tags.OFFSET_ITEMS)\n        self._offset_items.read(local_buffer, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.STORAGE_STATUS_MASK, local_buffer):\n        self._storage_status_mask = primitives.Integer(tag=enums.Tags.STORAGE_STATUS_MASK)\n        self._storage_status_mask.read(local_buffer, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.OBJECT_GROUP_MEMBER, local_buffer):\n        self._object_group_member = primitives.Enumeration(enums.ObjectGroupMember, tag=enums.Tags.OBJECT_GROUP_MEMBER)\n        self._object_group_member.read(local_buffer, kmip_version=kmip_version)\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        while self.is_tag_next(enums.Tags.ATTRIBUTE, local_buffer):\n            attribute = objects.Attribute()\n            attribute.read(local_buffer, kmip_version=kmip_version)\n            self._attributes.append(attribute)\n    elif self.is_tag_next(enums.Tags.ATTRIBUTES, local_buffer):\n        attributes = objects.Attributes()\n        attributes.read(local_buffer, kmip_version=kmip_version)\n        temp_attr = objects.convert_attributes_to_template_attribute(attributes)\n        self._attributes = temp_attr.attributes\n    else:\n        raise exceptions.InvalidKmipEncoding('The Locate request payload encoding is missing the attributes structure.')", "docstring": "Read the data encoding the Locate request payload and decode it into\nits constituent parts.\n\nArgs:\ninput_buffer (stream): A data buffer containing encoded object\ndata, supporting a read method.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nInvalidKmipEncoding: Raised if the attributes structure is missing\nfrom the encoded payload for KMIP 2.0+ encodings.", "source": "codesearchnet"}
{"code": "def changes(self, **kwargs):\n        \n        path = '%s/%s/changes' % (self.manager.path, self.get_id())\n        return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "List the merge request changes.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabListError: If the list could not be retrieved\n\nReturns:\nRESTObjectList: List of changes", "source": "juraj-google-style"}
{"code": "def _default_static_lib(self, obj_files):\n        \n        c_compiler = self.F90_COMPILER.c_compiler\n        static_lib_dir = os.path.join(self.build_lib, \"bezier\", \"lib\")\n        if not os.path.exists(static_lib_dir):\n            os.makedirs(static_lib_dir)\n        c_compiler.create_static_lib(\n            obj_files, \"bezier\", output_dir=static_lib_dir\n        )\n        \n        \n        \n        for extension in self.extensions:\n            extension.extra_objects[:] = [\n                os.path.join(self.build_temp, rel_path)\n                for rel_path in extension.extra_objects\n            ]", "docstring": "Create a static library (i.e. a ``.a`` / ``.lib`` file).\n\nArgs:\nobj_files (List[str]): List of paths of compiled object files.", "source": "juraj-google-style"}
{"code": "def _get_accumulator(tensor):\n    assert isinstance(tensor.graph, func_graph_module.FuncGraph)\n\n    def get_func_graph_output(t):\n        \n        for output in tensor.graph.outputs:\n            if output is t:\n                return t\n        identity_op = t.consumers()[0]\n        if identity_op.type == 'Identity' and any((identity_op.outputs[0] is t for t in tensor.graph.outputs)):\n            return identity_op.outputs[0]\n        return None\n    for consumer in tensor.consumers():\n        if consumer.type != 'TensorListPushBack':\n            continue\n        accum_input_idx = -1\n        for accum_input_idx, inp in enumerate(tensor.graph.inputs):\n            if inp is consumer.inputs[0]:\n                break\n        else:\n            continue\n        output = get_func_graph_output(consumer.outputs[0])\n        if output is None:\n            continue\n        for accum_output_idx, out in enumerate(tensor.graph.outputs):\n            if out is output:\n                if accum_input_idx == accum_output_idx:\n                    return output\n                break\n    return None", "docstring": "Returns TensorList if any containing accumulated values of tensor.\n\nWe try to find a pattern of the form:\n\ninput_tl   tensor\n\\        /\n(TensorListPushBack)\n|\noutput_tl\n\nwhich satisfies the following conditions:\n\n1. input_tl must be in tensor.graph.inputs.\n2. output_tl or Identity(output_tl) must be in tensor.graph.outputs.\n3. tensor.graph.input_index(input_tl) == tensor.graph.output_index(output_t).\n\noutput_tl or Identity(output_tl) (whichever is in tensor.graph.outputs) is\nreturned if such a pattern is found else None is returned.\n\nArgs:\ntensor: The Tensor to be accumulated.\n\nReturns:\nA variant tensor in the same graph as `tensor` or None if no accumulator is\nfound.", "source": "github-repos"}
{"code": "def wait_until_finished(\n        self, refresh_period=DEFAULT_TASK_INSTANCE_WAIT_REFRESH_PERIOD\n    ):\n        \n        return self.manager.wait_until_finished(\n            uuid=self.uuid, refresh_period=refresh_period\n        )", "docstring": "Wait until a task instance with the given UUID is finished.\n\nArgs:\nrefresh_period (int, optional): How many seconds to wait\nbefore checking the task's status. Defaults to 5\nseconds.\n\nReturns:\n:class:`saltant.models.base_task_instance.BaseTaskInstance`:\nThis task instance model after it finished.", "source": "juraj-google-style"}
{"code": "def _semicircle_integral(dist_bins, idx):\n        \n        r = 1\n\n        x1 = dist_bins[idx]\n        x2 = dist_bins[idx + 1]\n\n        if dist_bins[idx] == 1:\n            area1 = 0.25 * math.pi * r ** 2\n        else:\n            area1 = 0.5 * ((x1 * math.sqrt(r ** 2 - x1 ** 2)) + (\n                    r ** 2 * math.atan(x1 / math.sqrt(r ** 2 - x1 ** 2))))\n\n        area2 = 0.5 * ((x2 * math.sqrt(r ** 2 - x2 ** 2)) + (\n                r ** 2 * math.atan(x2 / math.sqrt(r ** 2 - x2 ** 2))))\n\n        return (area1 - area2) / (0.25 * math.pi * r ** 2)", "docstring": "An internal method to get an integral between two bounds of a unit\nsemicircle. Used in algorithm to determine bond probabilities.\nArgs:\ndist_bins: (float) list of all possible bond weights\nidx: (float) index of starting bond weight\n\nReturns:\n(float) integral of portion of unit semicircle", "source": "juraj-google-style"}
{"code": "def _check_edgemap_registers(self, edge_map, keyregs, valregs, valreg=True):\n    add_regs = set()\n    reg_frag_chk = {}\n    for v in keyregs.values():\n        reg_frag_chk[v] = {j: False for j in range(len(v))}\n    for k in edge_map.keys():\n        if (k[0].name in keyregs):\n            reg_frag_chk[k[0]][k[1]] = True\n    for (k, v) in reg_frag_chk.items():\n        s = set(v.values())\n        if (len(s) == 2):\n            raise DAGCircuitError(('edge_map fragments reg %s' % k))\n        elif (s == set([False])):\n            if ((k in self.qregs.values()) or (k in self.cregs.values())):\n                raise DAGCircuitError(('unmapped duplicate reg %s' % k))\n            else:\n                add_regs.add(k)\n        elif valreg:\n            if (not (edge_map[(k, 0)][0].name in valregs)):\n                size = max(map((lambda x: x[1]), filter((lambda x: (x[0] == edge_map[(k, 0)][0])), edge_map.values())))\n                qreg = QuantumRegister((size + 1), edge_map[(k, 0)][0].name)\n                add_regs.add(qreg)\n    return add_regs", "docstring": "Check that wiremap neither fragments nor leaves duplicate registers.\n\n1. There are no fragmented registers. A register in keyregs\nis fragmented if not all of its (qu)bits are renamed by edge_map.\n2. There are no duplicate registers. A register is duplicate if\nit appears in both self and keyregs but not in edge_map.\n\nArgs:\nedge_map (dict): map from (reg,idx) in keyregs to (reg,idx) in valregs\nkeyregs (dict): a map from register names to Register objects\nvalregs (dict): a map from register names to Register objects\nvalreg (bool): if False the method ignores valregs and does not\nadd regs for bits in the edge_map image that don't appear in valregs\n\nReturns:\nset(Register): the set of regs to add to self\n\nRaises:\nDAGCircuitError: if the wiremap fragments, or duplicates exist", "source": "codesearchnet"}
{"code": "def sin(duration: int, amp: complex, freq: float = None,\n        phase: float = 0, name: str = None) -> SamplePulse:\n    \n    if freq is None:\n        freq = 1/duration\n\n    return _sampled_sin_pulse(duration, amp, freq, phase=phase, name=name)", "docstring": "Generates sine wave `SamplePulse`.\n\nArgs:\nduration: Duration of pulse. Must be greater than zero.\namp: Pulse amplitude.\nfreq: Pulse frequency, units of 1/dt. If `None` defaults to single cycle.\nphase: Pulse phase.\nname: Name of pulse.", "source": "juraj-google-style"}
{"code": "def assert_no_new_pyobjects_executing_eagerly(warmup_iters: int=2) -> Callable[[Callable[..., Any]], Callable[..., None]]:\n\n    def wrap_f(f: Callable[..., Any]) -> Callable[..., None]:\n\n        def decorator(self: 'TensorFlowTestCase', *args, **kwargs) -> None:\n            \n            with context.eager_mode():\n                gc.disable()\n                test_errors = None\n                test_skipped = None\n                if hasattr(self._outcome, 'errors'):\n                    test_errors = self._outcome.errors\n                    test_skipped = self._outcome.skipped\n                else:\n                    test_errors = self._outcome.result.errors\n                    test_skipped = self._outcome.result.skipped\n                for _ in range(warmup_iters):\n                    f(self, *args, **kwargs)\n                self.doCleanups()\n                obj_count_by_type = _get_object_count_by_type()\n                gc.collect()\n                registered_function_names = context.context().list_function_names()\n                obj_count_by_type = _get_object_count_by_type(exclude=gc.get_referents(test_errors, test_skipped))\n                if ops.has_default_graph():\n                    collection_sizes_before = {collection: len(ops.get_collection(collection)) for collection in ops.get_default_graph().collections}\n                for _ in range(3):\n                    f(self, *args, **kwargs)\n                self.doCleanups()\n                if ops.has_default_graph():\n                    for collection_key in ops.get_default_graph().collections:\n                        collection = ops.get_collection(collection_key)\n                        size_before = collection_sizes_before.get(collection_key, 0)\n                        if len(collection) > size_before:\n                            raise AssertionError('Collection %s increased in size from %d to %d (current items %s).' % (collection_key, size_before, len(collection), collection))\n                        del collection\n                        del collection_key\n                        del size_before\n                    del collection_sizes_before\n                gc.collect()\n                obj_count_by_type = _get_object_count_by_type(exclude=gc.get_referents(test_errors, test_skipped)) - obj_count_by_type\n                leftover_functions = context.context().list_function_names() - registered_function_names\n                assert not leftover_functions, 'The following functions were newly created: %s' % leftover_functions\n                assert not obj_count_by_type, 'The following objects were newly created: %s' % str(obj_count_by_type)\n                gc.enable()\n        return tf_decorator.make_decorator(f, decorator)\n    return wrap_f", "docstring": "Decorator for asserting that no new Python objects persist after a test.\n\nReturns a decorator that runs the test multiple times executing eagerly,\nfirst as a warmup and then to let objects accumulate. The warmup helps ignore\ncaches which do not grow as the test is run repeatedly.\n\nUseful for checking that there are no missing Py_DECREFs in the C exercised by\na bit of Python.\n\nArgs:\nwarmup_iters: The number of warmup iterations, excluded from measuring.\n\nReturns:\nA decorator function which can be applied to the test function.", "source": "github-repos"}
{"code": "def _safe_initial_value_from_tensor(name, tensor, op_cache):\n    op = tensor.op\n    new_op = op_cache.get(op.name)\n    if new_op is None:\n        new_op = _safe_initial_value_from_op(name, op, op_cache)\n        op_cache[op.name] = new_op\n    return new_op.outputs[tensor.value_index]", "docstring": "Replace dependencies on variables with their initialized values.\n\nArgs:\nname: Variable name.\ntensor: A `Tensor`. The tensor to replace.\nop_cache: A dict mapping operation names to `Operation`s. Used to memoize\nthe results so as to avoid creating redundant operations.\n\nReturns:\nA `Tensor` compatible with `tensor`. Any inputs that lead to variable\nvalues will be replaced with a corresponding graph that uses the\nvariable's initialized values. This is done on a best-effort basis. If no\nmodifications need to be made then `tensor` will be returned unchanged.", "source": "github-repos"}
{"code": "async def get_messages(self, name):\n        \n\n        resp = await self.send_command(OPERATIONS.CMD_QUERY_MESSAGES, {'name': name},\n                                       MESSAGES.QueryMessagesResponse, timeout=5.0)\n\n        return [states.ServiceMessage.FromDictionary(x) for x in resp]", "docstring": "Get stored messages for a service.\n\nArgs:\nname (string): The name of the service to get messages from.\n\nReturns:\nlist(ServiceMessage): A list of the messages stored for this service", "source": "juraj-google-style"}
{"code": "def get_scalar_arg_dtypes(self):\n    dtypes = []\n    for (name, data) in self._kernel_data.items():\n        dtypes.extend(data.get_scalar_arg_dtypes())\n    return dtypes", "docstring": "Get the location and types of the input scalars.\n\nReturns:\nlist: for every kernel input element either None if the data is a buffer or the numpy data type if\nif is a scalar.", "source": "codesearchnet"}
{"code": "def get_estimator(output_dir, train_config, args):\n    target_name = train_config['target_column']\n    if (is_classification_model(args.model_type) and (target_name not in train_config['categorical_columns'])):\n        raise ValueError('When using a classification model, the target must be a categorical variable.')\n    if (is_regression_model(args.model_type) and (target_name not in train_config['numerical_columns'])):\n        raise ValueError('When using a regression model, the target must be a numerical variable.')\n    if (is_dnn_model(args.model_type) and (not args.layer_sizes)):\n        raise ValueError('--layer-size* must be used with DNN models')\n    if (is_linear_model(args.model_type) and args.layer_sizes):\n        raise ValueError('--layer-size* cannot be used with linear models')\n    feature_columns = _tflearn_features(train_config, args)\n    config = tf.contrib.learn.RunConfig(save_checkpoints_secs=args.save_checkpoints_secs)\n    train_dir = os.path.join(output_dir, 'train')\n    if (args.model_type == 'dnn_regression'):\n        estimator = tf.contrib.learn.DNNRegressor(feature_columns=feature_columns, hidden_units=args.layer_sizes, config=config, model_dir=train_dir, optimizer=tf.train.AdamOptimizer(args.learning_rate, epsilon=args.epsilon))\n    elif (args.model_type == 'linear_regression'):\n        estimator = tf.contrib.learn.LinearRegressor(feature_columns=feature_columns, config=config, model_dir=train_dir, optimizer=tf.train.AdamOptimizer(args.learning_rate, epsilon=args.epsilon))\n    elif (args.model_type == 'dnn_classification'):\n        estimator = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns, hidden_units=args.layer_sizes, n_classes=train_config['vocab_stats'][target_name]['n_classes'], config=config, model_dir=train_dir, optimizer=tf.train.AdamOptimizer(args.learning_rate, epsilon=args.epsilon))\n    elif (args.model_type == 'linear_classification'):\n        estimator = tf.contrib.learn.LinearClassifier(feature_columns=feature_columns, n_classes=train_config['vocab_stats'][target_name]['n_classes'], config=config, model_dir=train_dir, optimizer=tf.train.AdamOptimizer(args.learning_rate, epsilon=args.epsilon))\n    else:\n        raise ValueError('bad --model-type value')\n    return estimator", "docstring": "Returns a tf learn estimator.\n\nWe only support {DNN, Linear}Regressor and {DNN, Linear}Classifier. This is\ncontrolled by the values of model_type in the args.\n\nArgs:\noutput_dir: Modes are saved into outputdir/train\ntrain_config: our training config\nargs: command line parameters\n\nReturns:\nTF lean estimator\n\nRaises:\nValueError: if config is wrong.", "source": "codesearchnet"}
{"code": "def add(self, row):\n        \n        if not self._types:\n            raise wandb.Error(\n                'TypedTable.set_columns must be called before add.')\n        mapped_row = {}\n        for key, val in row.items():\n            try:\n                typed_val = self._types[key](val)\n                if hasattr(typed_val, 'encode'):\n                    typed_val = typed_val.encode()\n                mapped_row[key] = typed_val\n            except KeyError:\n                raise wandb.Error(\n                    'TypedTable.add received key (\"%s\") which wasn\\'t provided to set_columns' % key)\n            except:\n                raise wandb.Error('TypedTable.add couldn\\'t convert and encode (\"{}\") provided for key (\"{}\") to type ({})'.format(\n                    val, key, self._types[key]))\n        self._output.add(mapped_row)\n        self._count += 1", "docstring": "Add a row to the table.\n\nArgs:\nrow: A dict whose keys match the keys added in set_columns, and whose\nvalues can be cast to the types added in set_columns.", "source": "juraj-google-style"}
{"code": "def download(cls, root, check=None):\n        \n        path = os.path.join(root, cls.name)\n        check = path if check is None else check\n        if not os.path.isdir(check):\n            for url in cls.urls:\n                if isinstance(url, tuple):\n                    url, filename = url\n                else:\n                    filename = os.path.basename(url)\n                zpath = os.path.join(path, filename)\n                if not os.path.isfile(zpath):\n                    if not os.path.exists(os.path.dirname(zpath)):\n                        os.makedirs(os.path.dirname(zpath))\n                    print('downloading {}'.format(filename))\n                    download_from_url(url, zpath)\n                zroot, ext = os.path.splitext(zpath)\n                _, ext_inner = os.path.splitext(zroot)\n                if ext == '.zip':\n                    with zipfile.ZipFile(zpath, 'r') as zfile:\n                        print('extracting')\n                        zfile.extractall(path)\n                \n                elif ext == '.tgz' or ext == '.gz' and ext_inner == '.tar':\n                    with tarfile.open(zpath, 'r:gz') as tar:\n                        dirs = [member for member in tar.getmembers()]\n                        tar.extractall(path=path, members=dirs)\n                elif ext == '.gz':\n                    with gzip.open(zpath, 'rb') as gz:\n                        with open(zroot, 'wb') as uncompressed:\n                            shutil.copyfileobj(gz, uncompressed)\n\n        return os.path.join(path, cls.dirname)", "docstring": "Download and unzip an online archive (.zip, .gz, or .tgz).\n\nArguments:\nroot (str): Folder to download data to.\ncheck (str or None): Folder whose existence indicates\nthat the dataset has already been downloaded, or\nNone to check the existence of root/{cls.name}.\n\nReturns:\nstr: Path to extracted dataset.", "source": "juraj-google-style"}
{"code": "def Delete(self, request, global_params=None):\n    config = self.GetMethodConfig('Delete')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Deletes a snapshot.\n\nArgs:\nrequest: (DataflowProjectsLocationsSnapshotsDeleteRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(DeleteSnapshotResponse) The response message.", "source": "github-repos"}
{"code": "def _parse_networks(self, config):\n        \n\n        networks = list()\n        regexp = r'network (.+)/(\\d+) area (\\d+\\.\\d+\\.\\d+\\.\\d+)'\n        matches = re.findall(regexp, config)\n        for (network, netmask, area) in matches:\n            networks.append(dict(network=network, netmask=netmask, area=area))\n        return dict(networks=networks)", "docstring": "Parses config file for the networks advertised\nby the OSPF process\n\nArgs:\nconfig(str):  Running configuration\nReturns:\nlist: dict:\nkeys: network (str)\nnetmask (str)\narea (str)", "source": "juraj-google-style"}
{"code": "def regularizer(name, regularization_fn, name_filter='weights'):\n  \n  regex = re.compile(name_filter)\n  def fn(var_name, variable, phase):\n    if phase is pt.Phase.train and regex.search(var_name):\n      with tf.name_scope(None, name, [variable]):\n        loss = regularization_fn(variable)\n      if loss is not None:\n        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, loss)\n    return variable\n  return fn", "docstring": "Wraps a regularizer in a parameter-function.\n\nArgs:\nname: The name scope for this regularizer.\nregularization_fn: A function with signature:\nfn(variable) -> loss `Tensor` or `None`.\nname_filter: A regex that will be used to filter variables by name.\n\nReturns:\nA parameter modification function that adds the loss to the\nREGULARIZATION_LOSSES graph key.", "source": "juraj-google-style"}
{"code": "def __init__(self, comment=None):\n    \n    super(EventTag, self).__init__()\n    self._event_identifier = None\n    self.comment = comment\n    self.event_entry_index = None\n    self.event_row_identifier = None\n    self.event_stream_number = None\n    self.labels = []", "docstring": "Initializes an event tag attribute container.\n\nArgs:\ncomment (Optional[str]): comments.", "source": "juraj-google-style"}
{"code": "def set_input_embeddings(self, value: nn.Module):\n    base_model = getattr(self, self.base_model_prefix, self)\n    if base_model is not self:\n        base_model.set_input_embeddings(value)\n    else:\n        raise NotImplementedError", "docstring": "Set model's input embeddings.\n\nArgs:\nvalue (`nn.Module`): A module mapping vocabulary to hidden states.", "source": "github-repos"}
{"code": "def get_releasenotes(project_dir=os.curdir, bugtracker_url=''):\n    \n    releasenotes = ''\n    pkg_info_file = os.path.join(project_dir, 'PKG-INFO')\n    releasenotes_file = os.path.join(project_dir, 'RELEASE_NOTES')\n    if os.path.exists(pkg_info_file) and os.path.exists(releasenotes_file):\n        with open(releasenotes_file) as releasenotes_fd:\n            releasenotes = releasenotes_fd.read()\n\n    else:\n        releasenotes = api.get_releasenotes(\n            repo_path=project_dir,\n            bugtracker_url=bugtracker_url,\n        )\n\n    return releasenotes", "docstring": "Retrieves the release notes, from the RELEASE_NOTES file (if in a package)\nor generates it from the git history.\n\nArgs:\nproject_dir(str): Path to the git repo of the project.\nbugtracker_url(str): Url to the bug tracker for the issues.\n\nReturns:\nstr: release notes\n\nRaises:\nRuntimeError: If the release notes could not be retrieved", "source": "juraj-google-style"}
{"code": "def embeddings(idx):\n    embed = []\n    embed.append((f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight', f'stage{idx}.patch_embed.proj.weight'))\n    embed.append((f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias', f'stage{idx}.patch_embed.proj.bias'))\n    embed.append((f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight', f'stage{idx}.patch_embed.norm.weight'))\n    embed.append((f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias', f'stage{idx}.patch_embed.norm.bias'))\n    return embed", "docstring": "The function helps in renaming embedding layer weights.\n\nArgs:\nidx: stage number in original model", "source": "github-repos"}
{"code": "def apply(self, func, axis, *args, **kwargs):\n        \n        if callable(func):\n            return self._callable_func(func, axis, *args, **kwargs)\n        elif isinstance(func, dict):\n            return self._dict_func(func, axis, *args, **kwargs)\n        elif is_list_like(func):\n            return self._list_like_func(func, axis, *args, **kwargs)\n        else:\n            pass", "docstring": "Apply func across given axis.\n\nArgs:\nfunc: The function to apply.\naxis: Target axis to apply the function along.\n\nReturns:\nA new PandasQueryCompiler.", "source": "juraj-google-style"}
{"code": "def cast_if_floating_dtype(x, dtype=None):\n    return nest.map_structure(functools.partial(cast_single_tensor, dtype=dtype), x)", "docstring": "Casts the given data tensors to the default floating point type.\n\nCasts only if the input is already a floating point type.\nArgs:\nx: tensor or list/tuple of tensors.\ndtype: The dtype to which Tensors should be cast.\n\nReturns:\nConverted input.", "source": "github-repos"}
{"code": "def evaluate_model(self, accuracy, num_steps, feed_vars=(), feed_data=None, summary_tag=None, print_every=0):\n    if (not hasattr(self, '_saver')):\n        raise ValueError('Before evaluating, you must initialize the model with load_from_checkpoint, prepare or saver.')\n    self._run_init_test_vars_op()\n    if ((not isinstance(accuracy, collections.Sequence)) or isinstance(accuracy, six.string_types)):\n        accuracy = (accuracy,)\n        if summary_tag:\n            summary_tag = (summary_tag,)\n    if (summary_tag and (len(summary_tag) != len(accuracy))):\n        raise ValueError('If summaries are requested, there must be a tag per accuracy node.')\n    result = self.run_model(accuracy, num_steps, feed_vars=feed_vars, feed_data=feed_data, print_every=print_every, allow_initialize=False)\n    assert (len(result) == (len(accuracy) + 1)), ('results is wrong length, was %s but should be 1 longer than %s' % (result, accuracy))\n    if summary_tag:\n        self.add_summaries(result[0], *zip(summary_tag, result[1:]))\n    return result[1:]", "docstring": "Evaluates the given model.\n\nArgs:\naccuracy: The metric that is being evaluated or a tuple of metrics.\nnum_steps: The number of steps to run in the evaluator.\nfeed_vars: A list or tuple of the variables that will be fed.\nfeed_data: A generator that produces tuples of the same length as\nfeed_vars.\nsummary_tag: If provided, the final result of running the model will be\npublished to this tag.\nprint_every: Print a summary every so many steps, use 0 to disable.\nReturns:\nThe accuracy.\nRaises:\nValueError: If the wrong number of summary tags are provided or previously\nrunning QueueRunners haven't been stopped.", "source": "codesearchnet"}
{"code": "def GetStandardAddress(self):\n    for contract in self._contracts.values():\n        if contract.IsStandard:\n            return contract.ScriptHash\n    raise Exception('Could not find a standard contract address')", "docstring": "Get the Wallet's default address.\n\nRaises:\nException: if no default contract address is set.\n\nReturns:\nUInt160: script hash.", "source": "codesearchnet"}
{"code": "def transform(self, value):\n    \n    with tf.name_scope(self._name + '/transform'):\n      no_batch_dim = value.shape.ndims == self._mean.shape.ndims\n      if no_batch_dim:\n        \n        value = value[None, ...]\n      if self._center:\n        value -= self._mean[None, ...]\n      if self._scale:\n        \n        value /= tf.cond(\n            self._count > 1, lambda: self._std() + 1e-8,\n            lambda: tf.ones_like(self._var_sum))[None]\n      if self._clip:\n        value = tf.clip_by_value(value, -self._clip, self._clip)\n      \n      if no_batch_dim:\n        value = value[0]\n      return tf.check_numerics(value, 'value')", "docstring": "Normalize a single or batch tensor.\n\nApplies the activated transformations in the constructor using current\nestimates of mean and variance.\n\nArgs:\nvalue: Batch or single value tensor.\n\nReturns:\nNormalized batch or single value tensor.", "source": "juraj-google-style"}
{"code": "def setData(self, data, setName=None):\n    if (not isinstance(data, DataFrame)):\n        if ((pd is not None) and isinstance(data, pd.DataFrame)):\n            data = DataFrame.fromPandas(data)\n    if (setName is None):\n        lock_and_call((lambda : self._impl.setData(data._impl)), self._lock)\n    else:\n        lock_and_call((lambda : self._impl.setData(data._impl, setName)), self._lock)", "docstring": "Assign the data in the dataframe to the AMPL entities with the names\ncorresponding to the column names.\n\nArgs:\ndata: The dataframe containing the data to be assigned.\n\nsetName: The name of the set to which the indices values of the\nDataFrame are to be assigned.\n\nRaises:\nAMPLException: if the data assignment procedure was not successful.", "source": "codesearchnet"}
{"code": "def get_name_scope() -> str:\n    if context.executing_eagerly():\n        return context.context().scope_name.rstrip('/')\n    return get_default_graph().get_name_scope()", "docstring": "Returns the current name scope in the default_graph.\n\nFor example:\n\n```python\nwith tf.name_scope('scope1'):\nwith tf.name_scope('scope2'):\nprint(tf.get_name_scope())\n```\nwould print the string `scope1/scope2`.\n\nReturns:\nA string representing the current name scope.", "source": "github-repos"}
{"code": "def register_flag_by_module(self, module_name, flag):\n    \n    flags_by_module = self.flags_by_module_dict()\n    flags_by_module.setdefault(module_name, []).append(flag)", "docstring": "Records the module that defines a specific flag.\n\nWe keep track of which flag is defined by which module so that we\ncan later sort the flags by module.\n\nArgs:\nmodule_name: str, the name of a Python module.\nflag: Flag, the Flag instance that is key to the module.", "source": "juraj-google-style"}
{"code": "def _is_variant(self, gemini_variant, ind_objs):\n    indexes = (ind.ind_index for ind in ind_objs)\n    for index in indexes:\n        gt_call = gemini_variant['gt_types'][index]\n        if ((gt_call == 1) or (gt_call == 3)):\n            return True\n    return False", "docstring": "Check if the variant is a variation in any of the individuals\n\nArgs:\ngemini_variant (GeminiQueryRow): The gemini variant\nind_objs (list(puzzle.models.individual)): A list of individuals to check\n\nReturns:\nbool : If any of the individuals has the variant", "source": "codesearchnet"}
{"code": "def run(stream_spec, cmd='ffmpeg', capture_stdout=False, capture_stderr=False, input=None, quiet=False, overwrite_output=False):\n    process = run_async(stream_spec, cmd, pipe_stdin=(input is not None), pipe_stdout=capture_stdout, pipe_stderr=capture_stderr, quiet=quiet, overwrite_output=overwrite_output)\n    (out, err) = process.communicate(input)\n    retcode = process.poll()\n    if retcode:\n        raise Error('ffmpeg', out, err)\n    return (out, err)", "docstring": "Invoke ffmpeg for the supplied node graph.\n\nArgs:\ncapture_stdout: if True, capture stdout (to be used with\n``pipe:`` ffmpeg outputs).\ncapture_stderr: if True, capture stderr.\nquiet: shorthand for setting ``capture_stdout`` and ``capture_stderr``.\ninput: text to be sent to stdin (to be used with ``pipe:``\nffmpeg inputs)\n**kwargs: keyword-arguments passed to ``get_args()`` (e.g.\n``overwrite_output=True``).\n\nReturns: (out, err) tuple containing captured stdout and stderr data.", "source": "codesearchnet"}
{"code": "def visit_Call(self, node):\n        \n        if self.depth == 0:\n            return node\n\n        if self.ignore_exceptions is None:\n            ignore_exceptions = ast.Name(\"None\", ast.Load())\n\n        else:\n            ignore_exceptions = ast.List(self.ignore_exceptions, ast.Load())\n\n        catch_exception_type = self.catch_exception \\\n            if self.catch_exception else \"None\"\n\n        catch_exception = ast.Name(catch_exception_type, ast.Load())\n        depth = ast.Num(self.depth - 1 if self.depth > 0 else -1)\n\n        debug_node_name = ast.Name(\"debug\", ast.Load())\n        call_extra_parameters = [] if IS_PYTHON_3 else [None, None]\n        node.func = ast.Call(debug_node_name,\n                             [node.func, ignore_exceptions,\n                              catch_exception, depth],\n                             [], *call_extra_parameters)\n\n        return node", "docstring": "Propagate 'debug' wrapper into inner function calls if needed.\n\nArgs:\nnode (ast.AST): node statement to surround.", "source": "juraj-google-style"}
{"code": "def CreateRetryTask(self):\n    retry_task = Task(session_identifier=self.session_identifier)\n    retry_task.file_entry_type = self.file_entry_type\n    retry_task.merge_priority = self.merge_priority\n    retry_task.path_spec = self.path_spec\n    retry_task.storage_file_size = self.storage_file_size\n    self.has_retry = True\n    return retry_task", "docstring": "Creates a new task to retry a previously abandoned task.\n\nThe retry task will have a new identifier but most of the attributes\nwill be a copy of the previously abandoned task.\n\nReturns:\nTask: a task to retry a previously abandoned task.", "source": "codesearchnet"}
{"code": "def FromDateTimeToTimestamp(datetime_obj):\n    dt = datetime_obj.replace(tzinfo=None)\n    return int((dt - datetime.datetime(1970, 1, 1)).total_seconds())", "docstring": "Converts datetime object to internal nss_cache timestamp.\n\nArgs:\ndatetime object\nReturns:\nnumber of seconds since epoch", "source": "github-repos"}
{"code": "async def get_action_context_and_template(chain, parent_link, decision_link):\n    actions_path = decision_link.get_artifact_full_path('public/actions.json')\n    all_actions = load_json_or_yaml(actions_path, is_path=True)['actions']\n    action_name = get_action_callback_name(parent_link.task)\n    action_defn = _get_action_from_actions_json(all_actions, action_name)\n    jsone_context = (await populate_jsone_context(chain, parent_link, decision_link, 'action'))\n    if (('task' in action_defn) and (chain.context.config['min_cot_version'] <= 2)):\n        tmpl = {'tasks': [action_defn['task']]}\n    elif (action_defn.get('kind') == 'hook'):\n        in_tree_tmpl = (await get_in_tree_template(decision_link))\n        action_perm = _get_action_perm(action_defn)\n        tmpl = _wrap_action_hook_with_let(in_tree_tmpl, action_perm)\n        jsone_context = {'payload': _render_action_hook_payload(action_defn, jsone_context, parent_link), 'taskId': parent_link.task_id, 'now': jsone_context['now'], 'as_slugid': jsone_context['as_slugid'], 'clientId': jsone_context.get('clientId')}\n    elif (action_defn.get('kind') == 'task'):\n        tmpl = (await get_in_tree_template(decision_link))\n        for k in ('action', 'push', 'repository'):\n            jsone_context[k] = deepcopy(action_defn['hookPayload']['decision'].get(k, {}))\n        jsone_context['action']['repo_scope'] = get_repo_scope(parent_link.task, parent_link.name)\n    else:\n        raise CoTError('Unknown action kind `{kind}` for action `{name}`.'.format(kind=action_defn.get('kind', '<MISSING>'), name=action_defn.get('name', '<MISSING>')))\n    return (jsone_context, tmpl)", "docstring": "Get the appropriate json-e context and template for an action task.\n\nArgs:\nchain (ChainOfTrust): the chain of trust.\nparent_link (LinkOfTrust): the parent link to test.\ndecision_link (LinkOfTrust): the parent link's decision task link.\ntasks_for (str): the reason the parent link was created (cron,\nhg-push, action)\n\nReturns:\n(dict, dict): the json-e context and template.", "source": "codesearchnet"}
{"code": "def autocorrect(query, possibilities, delta=0.75):\n    possibilities = [possibility.lower() for possibility in possibilities]\n    if (query in possibilities):\n        return query\n    options = [word for word in possibilities if word.startswith(query)]\n    if (len(options) > 0):\n        possibilities = options\n        query = max_substring(options)\n    matches = get_close_matches(query, possibilities, cutoff=delta)\n    try:\n        assert (len(matches) > 0)\n    except AssertionError:\n        raise AssertionError('No matches for \"{0}\" found'.format(query))\n    return matches[0]", "docstring": "Attempts to figure out what possibility the query is\n\nThis autocorrect function is rather simple right now with plans for later\nimprovement. Right now, it just attempts to finish spelling a word as much\nas possible, and then determines which possibility is closest to said word.\n\nArgs:\n\nquery (unicode): query to attempt to complete\n\npossibilities (list): list of unicodes of possible answers for query\n\ndelta (float): Minimum delta similarity between query and\nany given possibility for possibility to be considered.\nDelta used by difflib.get_close_matches().\n\nReturns:\nunicode: best guess of correct answer\n\nRaises:\nAssertionError: raised if no matches found\n\nExample:\n.. code-block:: Python\n\n>>> autocorrect('bowtei', ['bowtie2', 'bot'])\n'bowtie2'", "source": "codesearchnet"}
{"code": "def get_device_name(self, cached=True):\n        \n        if cached and self.name is not None:\n            return self.name\n\n        device_name = self.get_characteristic_handle_from_uuid(UUID_DEVICE_NAME)\n        if device_name is None:\n            logger.warn('Failed to find handle for device name')\n            return None\n\n        self.name = self.dongle._read_attribute(self.conn_handle, device_name)\n        return self.name", "docstring": "Returns the SK8 device BLE name.\n\nArgs:\ncached (bool): if True, returns the locally cached copy of the name. If this is\nset to False, or the name is not cached, it will read from the device instead.\n\nReturns:\nstr. The current device name. May be `None` if an error occurs.", "source": "juraj-google-style"}
{"code": "def union(cls, *mhs):\n    if (len(mhs) < 2):\n        raise ValueError('Cannot union less than 2 MinHash')\n    num_perm = len(mhs[0])\n    seed = mhs[0].seed\n    if any((((seed != m.seed) or (num_perm != len(m))) for m in mhs)):\n        raise ValueError('The unioning MinHash must have the                    same seed and number of permutation functions')\n    hashvalues = np.minimum.reduce([m.hashvalues for m in mhs])\n    permutations = mhs[0].permutations\n    return cls(num_perm=num_perm, seed=seed, hashvalues=hashvalues, permutations=permutations)", "docstring": "Create a MinHash which is the union of the MinHash objects passed as arguments.\n\nArgs:\n*mhs: The MinHash objects to be united. The argument list length is variable,\nbut must be at least 2.\n\nReturns:\ndatasketch.MinHash: A new union MinHash.", "source": "codesearchnet"}
{"code": "def restart(self, **kwargs):\n        \n        return self.client.api.restart(self.id, **kwargs)", "docstring": "Restart this container. Similar to the ``docker restart`` command.\n\nArgs:\ntimeout (int): Number of seconds to try to stop for before killing\nthe container. Once killed it will then be restarted. Default\nis 10 seconds.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "async def _auth_cram_md5(self, username, password):\n    mechanism = 'CRAM-MD5'\n    (code, message) = (await self.do_cmd('AUTH', mechanism, success=(334,)))\n    decoded_challenge = base64.b64decode(message)\n    challenge_hash = hmac.new(key=password.encode('utf-8'), msg=decoded_challenge, digestmod='md5')\n    hex_hash = challenge_hash.hexdigest()\n    response = '{} {}'.format(username, hex_hash)\n    encoded_response = SMTP.b64enc(response)\n    try:\n        (code, message) = (await self.do_cmd(encoded_response, success=(235, 503)))\n    except SMTPCommandFailedError as e:\n        raise SMTPAuthenticationError(e.code, e.message, mechanism)\n    return (code, message)", "docstring": "Performs an authentication attemps using the CRAM-MD5 mechanism.\n\nProtocol:\n\n1. Send 'AUTH CRAM-MD5' to server ;\n2. If the server replies with a 334 return code, we can go on:\n\n1) The challenge (sent by the server) is base64-decoded ;\n2) The decoded challenge is hashed using HMAC-MD5 and the user\npassword as key (shared secret) ;\n3) The hashed challenge is converted to a string of lowercase\nhexadecimal digits ;\n4) The username and a space character are prepended to the hex\ndigits ;\n5) The concatenation is base64-encoded and sent to the server.\n6) If the server replies with a return code of 235, user is\nauthenticated.\n\nArgs:\nusername (str): Identifier of the user trying to authenticate.\npassword (str): Password for the user.\n\nRaises:\nConnectionResetError: If the connection with the server is\nunexpectedely lost.\nSMTPAuthenticationError: If the authentication attempt fails.\n\nReturns:\n(int, str): A (code, message) 2-tuple containing the server\nresponse.", "source": "codesearchnet"}
{"code": "def prefer_static_broadcast_shape(shape1, shape2, name='prefer_static_broadcast_shape'):\n    with tf.name_scope(name):\n\n        def make_shape_tensor(x):\n            return tf.convert_to_tensor(value=x, name='shape', dtype=tf.int32)\n\n        def get_tensor_shape(s):\n            if isinstance(s, tf.TensorShape):\n                return s\n            s_ = tf.get_static_value(make_shape_tensor(s))\n            if (s_ is not None):\n                return tf.TensorShape(s_)\n            return None\n\n        def get_shape_tensor(s):\n            if (not isinstance(s, tf.TensorShape)):\n                return make_shape_tensor(s)\n            if tensorshape_util.is_fully_defined(s):\n                return make_shape_tensor(tensorshape_util.as_list(s))\n            raise ValueError('Cannot broadcast from partially defined `TensorShape`.')\n        shape1_ = get_tensor_shape(shape1)\n        shape2_ = get_tensor_shape(shape2)\n        if ((shape1_ is not None) and (shape2_ is not None)):\n            return tf.broadcast_static_shape(shape1_, shape2_)\n        shape1_ = get_shape_tensor(shape1)\n        shape2_ = get_shape_tensor(shape2)\n        return tf.broadcast_dynamic_shape(shape1_, shape2_)", "docstring": "Convenience function which statically broadcasts shape when possible.\n\nArgs:\nshape1:  `1-D` integer `Tensor`.  Already converted to tensor!\nshape2:  `1-D` integer `Tensor`.  Already converted to tensor!\nname:  A string name to prepend to created ops.\n\nReturns:\nThe broadcast shape, either as `TensorShape` (if broadcast can be done\nstatically), or as a `Tensor`.", "source": "codesearchnet"}
{"code": "def validate_layout_display(self, table, display_condition):\n    display = False\n    if (display_condition is None):\n        display = True\n    else:\n        display_query = 'select count(*) from {} where {}'.format(table, display_condition)\n        try:\n            cur = self.db_conn.cursor()\n            cur.execute(display_query.replace('\"', ''))\n            rows = cur.fetchall()\n            if (rows[0][0] > 0):\n                display = True\n        except sqlite3.Error as e:\n            print('\"{}\" query returned an error: ({}).'.format(display_query, e))\n            sys.exit(1)\n    return display", "docstring": "Check to see if the display condition passes.\n\nArgs:\ntable (str): The name of the DB table which hold the App data.\ndisplay_condition (str): The \"where\" clause of the DB SQL statement.\n\nReturns:\nbool: True if the row count is greater than 0.", "source": "codesearchnet"}
{"code": "def BuildFindSpecs(self, artifact_filter_names, environment_variables=None):\n    find_specs = []\n    for name in artifact_filter_names:\n        definition = self._artifacts_registry.GetDefinitionByName(name)\n        if (not definition):\n            logger.debug('undefined artifact definition: {0:s}'.format(name))\n            continue\n        logger.debug('building find spec from artifact definition: {0:s}'.format(name))\n        artifact_find_specs = self._BuildFindSpecsFromArtifact(definition, environment_variables)\n        find_specs.extend(artifact_find_specs)\n    for find_spec in find_specs:\n        if isinstance(find_spec, file_system_searcher.FindSpec):\n            self.file_system_find_specs.append(find_spec)\n        elif isinstance(find_spec, registry_searcher.FindSpec):\n            self.registry_find_specs.append(find_spec)\n        else:\n            logger.warning('Unsupported find specification type: {0:s}'.format(type(find_spec)))", "docstring": "Builds find specifications from artifact definitions.\n\nArgs:\nartifact_filter_names (list[str]): names of artifact definitions that are\nused for filtering file system and Windows Registry key paths.\nenvironment_variables (Optional[list[EnvironmentVariableArtifact]]):\nenvironment variables.", "source": "codesearchnet"}
{"code": "def factored_joint_mvn(distributions):\n    graph_parents = [tensor for distribution in distributions for tensor in distribution._graph_parents]\n    with tf.compat.v1.name_scope('factored_joint_mvn', values=graph_parents):\n        dtype = tf.debugging.assert_same_float_dtype(distributions)\n        broadcast_ones = tf.ones(broadcast_batch_shape(distributions), dtype=dtype)[(..., tf.newaxis)]\n        return MultivariateNormalLinearOperator(loc=tf.concat([(mvn.mean() * broadcast_ones) for mvn in distributions], axis=(- 1)), scale=tfl.LinearOperatorBlockDiag([mvn.scale for mvn in distributions], is_square=True))", "docstring": "Combine MultivariateNormals into a factored joint distribution.\n\nGiven a list of multivariate normal distributions\n`dist[i] = Normal(loc[i], scale[i])`, construct the joint\ndistribution given by concatenating independent samples from these\ndistributions. This is multivariate normal with mean vector given by the\nconcatenation of the component mean vectors, and block-diagonal covariance\nmatrix in which the blocks are the component covariances.\n\nNote that for computational efficiency, multivariate normals are represented\nby a 'scale' (factored covariance) linear operator rather than the full\ncovariance matrix.\n\nArgs:\ndistributions: Python `iterable` of MultivariateNormal distribution\ninstances (e.g., `tfd.MultivariateNormalDiag`,\n`tfd.MultivariateNormalTriL`, etc.). These must be broadcastable to a\nconsistent batch shape, but may have different event shapes\n(i.e., defined over spaces of different dimension).\n\nReturns:\njoint_distribution: An instance of `tfd.MultivariateNormalLinearOperator`\nrepresenting the joint distribution constructed by concatenating\nan independent sample from each input distributions.", "source": "codesearchnet"}
{"code": "def __frontend_limit_descriptor(self, api_info):\n    \n    if api_info.frontend_limits is None:\n      return None\n\n    descriptor = {}\n    for propname, descname in (('unregistered_user_qps', 'unregisteredUserQps'),\n                               ('unregistered_qps', 'unregisteredQps'),\n                               ('unregistered_daily', 'unregisteredDaily')):\n      if getattr(api_info.frontend_limits, propname) is not None:\n        descriptor[descname] = getattr(api_info.frontend_limits, propname)\n\n    rules = self.__frontend_limit_rules_descriptor(api_info)\n    if rules:\n      descriptor['rules'] = rules\n\n    return descriptor", "docstring": "Builds a frontend limit descriptor from API info.\n\nArgs:\napi_info: An _ApiInfo object.\n\nReturns:\nA dictionary with frontend limit information.", "source": "juraj-google-style"}
{"code": "def _process_string_token(self, token, start_row, start_col):\n    for (i, char) in enumerate(token):\n        if (char in QUOTES):\n            break\n    norm_quote = token[i:]\n    if ((len(norm_quote) >= 3) and (norm_quote[:3] in TRIPLE_QUOTE_OPTS.values())):\n        self._tokenized_triple_quotes[start_row] = (token, norm_quote[:3], start_row, start_col)\n        return\n    preferred_quote = SMART_QUOTE_OPTS.get(self.config.string_quote)\n    if (self.config.string_quote in SMART_CONFIG_OPTS):\n        other_quote = next((q for q in QUOTES if (q != preferred_quote)))\n        if ((preferred_quote in token[(i + 1):(- 1)]) and (other_quote not in token[(i + 1):(- 1)])):\n            preferred_quote = other_quote\n    if (norm_quote[0] != preferred_quote):\n        self._invalid_string_quote(quote=norm_quote[0], row=start_row, correct_quote=preferred_quote, col=start_col)", "docstring": "Internal method for identifying and checking string tokens\nfrom the token stream.\n\nArgs:\ntoken: the token to check.\nstart_row: the line on which the token was found.\nstart_col: the column on which the token was found.", "source": "codesearchnet"}
{"code": "def list_values(hive, key=None, use_32bit_registry=False, include_default=True):\n    return __utils__['reg.list_values'](hive=hive, key=key, use_32bit_registry=use_32bit_registry, include_default=include_default)", "docstring": "r'''\nEnumerates the values in a registry key or hive.\n\nArgs:\n\nhive (str):\nThe name of the hive. Can be one of the following:\n\n- HKEY_LOCAL_MACHINE or HKLM\n- HKEY_CURRENT_USER or HKCU\n- HKEY_USER or HKU\n- HKEY_CLASSES_ROOT or HKCR\n- HKEY_CURRENT_CONFIG or HKCC\n\nkey (str):\nThe key (looks like a path) to the value name. If a key is not\npassed, the values under the hive will be returned.\n\nuse_32bit_registry (bool):\nAccesses the 32bit portion of the registry on 64 bit installations.\nOn 32bit machines this is ignored.\n\ninclude_default (bool):\nToggle whether to include the '(Default)' value.\n\nReturns:\nlist: A list of values under the hive or key.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' reg.list_values HKLM 'SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip'", "source": "codesearchnet"}
{"code": "def prepare_message(self, message):\n    message.meta.update(path=self.path)\n    for handler in self.handlers:\n        handler.outgoing(message, self)\n    return message", "docstring": "Prepares the message before sending it out\n\nReturns:\n- message.Message: the message", "source": "codesearchnet"}
{"code": "def fn(x: tuple[int, ...]):\n    return x", "docstring": "Test function\n\nArgs:\nx: The input\n\n\nReturns:\nThe output", "source": "github-repos"}
{"code": "def learn(self, grad_arr, fix_opt_flag=False):\n        \n        delta_arr = super().learn(grad_arr, fix_opt_flag)\n        if self.__add_channel_flag is True:\n            return delta_arr[:, 0]\n        else:\n            return delta_arr", "docstring": "Update this Discriminator by ascending its stochastic gradient.\n\nArgs:\ngrad_arr:       `np.ndarray` of gradients.\nfix_opt_flag:   If `False`, no optimization in this model will be done.\n\nReturns:\n`np.ndarray` of delta or gradients.", "source": "juraj-google-style"}
{"code": "def prepare_amazon_algorithm_estimator(estimator, inputs, mini_batch_size=None):\n    if isinstance(inputs, list):\n        for record in inputs:\n            if (isinstance(record, amazon_estimator.RecordSet) and (record.channel == 'train')):\n                estimator.feature_dim = record.feature_dim\n                break\n    elif isinstance(inputs, amazon_estimator.RecordSet):\n        estimator.feature_dim = inputs.feature_dim\n    else:\n        raise TypeError('Training data must be represented in RecordSet or list of RecordSets')\n    estimator.mini_batch_size = mini_batch_size", "docstring": "Set up amazon algorithm estimator, adding the required `feature_dim` hyperparameter from training data.\n\nArgs:\nestimator (sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase):\nAn estimator for a built-in Amazon algorithm to get information from and update.\ninputs: The training data.\n* (sagemaker.amazon.amazon_estimator.RecordSet) - A collection of\nAmazon :class:~`Record` objects serialized and stored in S3.\nFor use with an estimator for an Amazon algorithm.\n* (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of\n:class:~`sagemaker.amazon.amazon_estimator.RecordSet` objects, where each instance is\na different channel of training data.", "source": "codesearchnet"}
{"code": "def handle_callback(self, callback_id, ret_value, rpc_func_name):", "docstring": "Creates a callback handler for the asynchronous RPC.\n\nArgs:\ncallback_id: str, the callback ID for creating a callback handler object.\nret_value: any, the result field of the RPC response.\nrpc_func_name: str, the name of the snippet function executed on the\nserver.\n\nReturns:\nThe callback handler object.", "source": "github-repos"}
{"code": "def __init__(self, field, lower_bound, upper_bound):\n        \n        super(BetweenClause, self).__init__(field, lower_bound, upper_bound)\n        self.field = field\n        self.lower_bound = lower_bound\n        self.upper_bound = upper_bound\n        self.validate()", "docstring": "Construct an expression that is true when the field value is within the given bounds.\n\nArgs:\nfield: LocalField Expression, denoting the field in consideration\nlower_bound: lower bound constraint for given field\nupper_bound: upper bound constraint for given field\n\nReturns:\na new BetweenClause object", "source": "juraj-google-style"}
{"code": "def add_all_exchange_reactions(model, compartment, allow_duplicates=False):\n    all_reactions = {}\n    if (not allow_duplicates):\n        for rxnid in model.database.reactions:\n            rx = model.database.get_reaction(rxnid)\n            all_reactions[rx] = rxnid\n    added = set()\n    added_compounds = set()\n    initial_compounds = set(model.compounds)\n    reactions = set(model.database.reactions)\n    for model_compound in initial_compounds:\n        compound = model_compound.in_compartment(compartment)\n        if (compound in added_compounds):\n            continue\n        rxnid_ex = create_exchange_id(reactions, compound)\n        reaction_ex = Reaction(Direction.Both, {compound: (- 1)})\n        if (reaction_ex not in all_reactions):\n            model.database.set_reaction(rxnid_ex, reaction_ex)\n            reactions.add(rxnid_ex)\n        else:\n            rxnid_ex = all_reactions[reaction_ex]\n        if (not model.has_reaction(rxnid_ex)):\n            added.add(rxnid_ex)\n        model.add_reaction(rxnid_ex)\n        added_compounds.add(compound)\n    return added", "docstring": "Add all exchange reactions to database and to model.\n\nArgs:\nmodel: :class:`psamm.metabolicmodel.MetabolicModel`.", "source": "codesearchnet"}
{"code": "def list(self, **kwargs):\n    request = Request('GET', '/v3/accounts')\n    response = self.ctx.request(request)\n    if (response.content_type is None):\n        return response\n    if (not response.content_type.startswith('application/json')):\n        return response\n    jbody = json.loads(response.raw_body)\n    parsed_body = {}\n    if (str(response.status) == '200'):\n        if (jbody.get('accounts') is not None):\n            parsed_body['accounts'] = [self.ctx.account.AccountProperties.from_dict(d, self.ctx) for d in jbody.get('accounts')]\n    elif (str(response.status) == '401'):\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    elif (str(response.status) == '405'):\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    else:\n        parsed_body = jbody\n    response.body = parsed_body\n    return response", "docstring": "Get a list of all Accounts authorized for the provided token.\n\nArgs:\n\nReturns:\nv20.response.Response containing the results from submitting the\nrequest", "source": "codesearchnet"}
{"code": "def add_help_text(parent, filepath, prefix='!'):\n    import tkinter as tk\n    import tkinter.ttk as ttk\n    help_contents = get_help_data(filepath)\n    text = tk.Text(parent, wrap='word', font=('Helvetica', 10))\n    text.grid(row=0, column=0, sticky='W E N S')\n    text.tag_config('heading', font=('Helvetica', 14))\n    text.tag_config('command', font=('Courier', 10))\n    text.tag_config('param', font=('Courier', 10))\n    text.tag_config('description')\n    scrollbar = ttk.Scrollbar(parent, orient='vertical', command=text.yview)\n    scrollbar.grid(column=1, row=0, sticky='N S')\n    text['yscrollcommand'] = scrollbar.set\n    for d in help_contents:\n        text.insert('end', d, 'heading')\n        text.insert('end', '\\n')\n        if ('commands' in d.lower()):\n            for c in help_contents[d]:\n                if ('name' not in c):\n                    continue\n                command = (prefix + c['name'])\n                text.insert('end', command, ('command', 'description'))\n                if ('params' in c):\n                    for param in c['params']:\n                        text.insert('end', ' [{}]'.format(param), ('param', 'description'))\n                text.insert('end', ': ')\n                if ('description' in c):\n                    text.insert('end', c['description'], 'description')\n                text.insert('end', '\\n')\n            text.insert('end', '\\n')\n        else:\n            text.insert('end', help_contents[d], 'description')\n            text.insert('end', '\\n\\n')\n    text.config(state=tk.DISABLED)", "docstring": "Load help text from a file and adds it to the parent\n\nArgs:\nparent: A tk or ttk object\nfilepath (str): The file to load help text from\nprefix (str): The prefix to use for commands", "source": "codesearchnet"}
{"code": "def match_not_exists(self, field, new_group=False):\n    return self.exclude_field(field, '*', new_group=new_group)", "docstring": "Require a field to not exist in the results.\nMatches will not have ``field`` present.\n\nArguments:\nfield (str): The field to check.\nThe field must be namespaced according to Elasticsearch rules\nusing the dot syntax.\nFor example, ``\"mdf.source_name\"`` is the ``source_name`` field\nof the ``mdf`` dictionary.\nnew_group (bool): If ``True``, will separate the term into a new parenthetical group.\nIf ``False``, will not.\n**Default:** ``False``.\n\nReturns:\nSearchHelper: Self", "source": "codesearchnet"}
{"code": "def next_event_type(self):\n    type_ = self._libinput.libinput_next_event_type(self._li)\n    if (type_ == 0):\n        return None\n    else:\n        return EventType(type_)", "docstring": "Return the type of the next event in the internal queue.\n\nThis method does not pop the event off the queue and the next call\nto :attr:`events` returns that event.\n\nReturns:\n~libinput.constant.EventType: The event type of the next available\nevent or :obj:`None` if no event is available.", "source": "codesearchnet"}
{"code": "def chrome_tracing_object_transfer_dump(self, filename=None):\n    client_id_to_address = {}\n    for client_info in ray.global_state.client_table():\n        client_id_to_address[client_info['ClientID']] = '{}:{}'.format(client_info['NodeManagerAddress'], client_info['ObjectManagerPort'])\n    all_events = []\n    for (key, items) in self.profile_table().items():\n        if (items[0]['component_type'] != 'object_manager'):\n            continue\n        for event in items:\n            if (event['event_type'] == 'transfer_send'):\n                (object_id, remote_client_id, _, _) = event['extra_data']\n            elif (event['event_type'] == 'transfer_receive'):\n                (object_id, remote_client_id, _, _) = event['extra_data']\n            elif (event['event_type'] == 'receive_pull_request'):\n                (object_id, remote_client_id) = event['extra_data']\n            else:\n                assert False, 'This should be unreachable.'\n            object_id_int = int(object_id[:2], 16)\n            color = self._chrome_tracing_colors[(object_id_int % len(self._chrome_tracing_colors))]\n            new_event = {'cat': event['event_type'], 'name': event['event_type'], 'pid': client_id_to_address[key], 'tid': client_id_to_address[remote_client_id], 'ts': self._seconds_to_microseconds(event['start_time']), 'dur': self._seconds_to_microseconds((event['end_time'] - event['start_time'])), 'ph': 'X', 'cname': color, 'args': event['extra_data']}\n            all_events.append(new_event)\n            if (event['event_type'] == 'transfer_send'):\n                additional_event = new_event.copy()\n                additional_event['cname'] = 'black'\n                all_events.append(additional_event)\n            elif (event['event_type'] == 'transfer_receive'):\n                additional_event = new_event.copy()\n                additional_event['cname'] = 'grey'\n                all_events.append(additional_event)\n            else:\n                pass\n    if (filename is not None):\n        with open(filename, 'w') as outfile:\n            json.dump(all_events, outfile)\n    else:\n        return all_events", "docstring": "Return a list of transfer events that can viewed as a timeline.\n\nTo view this information as a timeline, simply dump it as a json file\nby passing in \"filename\" or using using json.dump, and then load go to\nchrome://tracing in the Chrome web browser and load the dumped file.\nMake sure to enable \"Flow events\" in the \"View Options\" menu.\n\nArgs:\nfilename: If a filename is provided, the timeline is dumped to that\nfile.\n\nReturns:\nIf filename is not provided, this returns a list of profiling\nevents. Each profile event is a dictionary.", "source": "codesearchnet"}
{"code": "def to_json(self):\n    return {'xblock_id': six.text_type(self.xblock_id), 'messages': [message.to_json() for message in self.messages], 'empty': self.empty}", "docstring": "Convert to a json-serializable representation.\n\nReturns:\ndict: A dict representation that is json-serializable.", "source": "codesearchnet"}
{"code": "def read_dftbp(filename):\n    infile = open(filename, 'r')\n    lines = infile.readlines()\n    for ss in lines:\n        if ss.strip().startswith('\n            lines.remove(ss)\n    natoms = int(lines[0].split()[0])\n    symbols = lines[1].split()\n    if (lines[0].split()[1].lower() == 'f'):\n        is_scaled = True\n        scale_pos = 1\n        scale_latvecs = dftbpToBohr\n    else:\n        is_scaled = False\n        scale_pos = dftbpToBohr\n        scale_latvecs = dftbpToBohr\n    positions = []\n    expaned_symbols = []\n    for ii in range(2, (natoms + 2)):\n        lsplit = lines[ii].split()\n        expaned_symbols.append(symbols[(int(lsplit[1]) - 1)])\n        positions.append([(float(ss) * scale_pos) for ss in lsplit[2:5]])\n    origin = [float(ss) for ss in lines[(natoms + 2)].split()]\n    cell = []\n    for ii in range((natoms + 3), (natoms + 6)):\n        lsplit = lines[ii].split()\n        cell.append([(float(ss) * scale_latvecs) for ss in lsplit[:3]])\n    cell = np.array(cell)\n    if is_scaled:\n        atoms = Atoms(symbols=expaned_symbols, cell=cell, scaled_positions=positions)\n    else:\n        atoms = Atoms(symbols=expaned_symbols, cell=cell, positions=positions)\n    return atoms", "docstring": "Reads DFTB+ structure files in gen format.\n\nArgs:\nfilename: name of the gen-file to be read\n\nReturns:\natoms: an object of the phonopy.Atoms class, representing the structure\nfound in filename", "source": "codesearchnet"}
{"code": "def get_course_runs_from_program(program):\n    \n    course_runs = set()\n    for course in program.get(\"courses\", []):\n        for run in course.get(\"course_runs\", []):\n            if \"key\" in run and run[\"key\"]:\n                course_runs.add(run[\"key\"])\n\n    return course_runs", "docstring": "Return course runs from program data.\n\nArguments:\nprogram(dict): Program data from Course Catalog API\n\nReturns:\nset: course runs in given program", "source": "juraj-google-style"}
{"code": "def put(self, filename):\n        \n        from . import LocalFile\n        target = get_target_path(filename, self.source)\n        with self.open('rb') as infile, open(target, 'wb') as outfile:\n            shutil.copyfileobj(infile, outfile)\n        return LocalFile(target)", "docstring": "Write the file to the given path\n\nArgs:\nfilename(str): path to write this file to\n\nReturns:\nLocalFile: reference to the copy of the file stored at ``filename``", "source": "juraj-google-style"}
{"code": "def headers(self, headers=None, **kw):\n    headers = (kw if kw else headers)\n    self._request.headers = headers\n    self.add_matcher(matcher('HeadersMatcher', headers))", "docstring": "Defines a dictionary of arguments.\n\nHeader keys are case insensitive.\n\nArguments:\nheaders (dict): headers to match.\n**headers (dict): headers to match as variadic keyword arguments.\n\nReturns:\nself: current Mock instance.", "source": "codesearchnet"}
{"code": "def generate_visualizations(methods, data, true_labels, base_dir = 'visualizations',\n        figsize=(18,10), **scatter_options):\n    \n    plt.figure(figsize=figsize)\n    for method in methods:\n        preproc= method[0]\n        if isinstance(preproc, Preprocess):\n            preprocessed, ll = preproc.run(data)\n            output_names = preproc.output_names\n        else:\n            \n            p1 = data\n            output_names = ['']\n            for p in preproc:\n                p1, ll = p.run(p1)\n                p1 = p1[0]\n                output_names[0] = output_names[0] + p.output_names[0]\n            preprocessed = [p1]\n        for r, name in zip(preprocessed, output_names):\n            \n            print(name)\n            \n            if r.shape[0]==2:\n                r_dim_red = r\n            else:\n                \n                if sparse.issparse(r) and r.shape[0] > 100:\n                    name = 'tsvd_' + name\n                    tsvd = TruncatedSVD(50)\n                    r_dim_red = tsvd.fit_transform(r.T)\n                    try:\n                        tsne = TSNE(2)\n                        r_dim_red = tsne.fit_transform(r_dim_red).T\n                        name = 'tsne_' + name\n                    except:\n                        tsvd2 = TruncatedSVD(2)\n                        r_dim_red = tsvd2.fit_transform(r_dim_red).T\n                else:\n                    name = 'tsne_' + name\n                    tsne = TSNE(2)\n                    r_dim_red = tsne.fit_transform(r.T).T\n            if isinstance(method[1], list):\n                for clustering_method in method[1]:\n                    try:\n                        cluster_labels = clustering_method.run(r)\n                    except:\n                        print('clustering failed')\n                        continue\n                    output_path = base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name)\n                    visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options)\n            else:\n                clustering_method = method[1]\n                try:\n                    cluster_labels = clustering_method.run(r)\n                except:\n                    print('clustering failed')\n                    continue\n                output_path = base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name)\n                visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options)\n            output_path = base_dir + '/{0}_true_labels.png'.format(name)\n            visualize_dim_red(r_dim_red, true_labels, output_path, **scatter_options)", "docstring": "Generates visualization scatters for all the methods.\n\nArgs:\nmethods: follows same format as run_experiments. List of tuples.\ndata: genes x cells\ntrue_labels: array of integers\nbase_dir: base directory to save all the plots\nfigsize: tuple of ints representing size of figure\nscatter_options: options for plt.scatter", "source": "juraj-google-style"}
{"code": "def get(self, element):\n        \n\n        if self['link']['item'][element]['class'] == 'dataset':\n            return Dataset.read(self['link']['item'][element]['href'])\n        elif self['link']['item'][element]['class'] == 'collection':\n            return Collection.read(self['link']['item'][element]['href'])\n        elif self['link']['item'][element]['class'] == 'dimension':\n            return Dimension.read(self['link']['item'][element]['href'])\n        else:\n            raise ValueError(\n                \"Class not allowed. Please use dataset, collection or \"\n                \"dimension'\")", "docstring": "Gets ith element of a collection in an object of the corresponding \\\nclass.\nArgs:\noutput(string): can accept 'jsonstat' or 'dataframe_list'\n\nReturns:\nSerialized JSONstat or a list of Pandas Dataframes,depending on \\\nthe 'output' parameter.", "source": "juraj-google-style"}
{"code": "def xldate_as_datetime(xldate, datemode=0, option='to_datetime'):\n    if (option == 'to_float'):\n        d = ((xldate - 25589) * 86400.0)\n    else:\n        try:\n            d = (datetime.datetime(1899, 12, 30) + datetime.timedelta(days=(xldate + (1462 * datemode))))\n            if (option == 'to_string'):\n                date_format = '%Y-%m-%d %H:%M:%S'\n                d = d.strftime(date_format)\n        except TypeError:\n            logging.info(f'The date is not of correct type [{xldate}]')\n            d = xldate\n    return d", "docstring": "Converts a xls date stamp to a more sensible format.\n\nArgs:\nxldate (str): date stamp in Excel format.\ndatemode (int): 0 for 1900-based, 1 for 1904-based.\noption (str): option in (\"to_datetime\", \"to_float\", \"to_string\"),\nreturn value\n\nReturns:\ndatetime (datetime object, float, or string).", "source": "codesearchnet"}
{"code": "def remove_handler(self, handler: Handler, group: int = 0):\n        \n        if isinstance(handler, DisconnectHandler):\n            self.disconnect_handler = None\n        else:\n            self.dispatcher.remove_handler(handler, group)", "docstring": "Removes a previously-added update handler.\n\nMake sure to provide the right group that the handler was added in. You can use\nthe return value of the :meth:`add_handler` method, a tuple of (handler, group), and\npass it directly.\n\nArgs:\nhandler (``Handler``):\nThe handler to be removed.\n\ngroup (``int``, *optional*):\nThe group identifier, defaults to 0.", "source": "juraj-google-style"}
{"code": "def set_license(self, license, **kwargs):\n        \n        data = {'license': license}\n        return self.http_post('/license', post_data=data, **kwargs)", "docstring": "Add a new license.\n\nArgs:\nlicense (str): The license string\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabPostError: If the server cannot perform the request\n\nReturns:\ndict: The new license information", "source": "juraj-google-style"}
{"code": "def ParseOptions(cls, options, config_object, category=None, names=None):\n    for (helper_name, helper_class) in cls._helper_classes.items():\n        if ((category and (helper_class.CATEGORY != category)) or (names and (helper_name not in names))):\n            continue\n        try:\n            helper_class.ParseOptions(options, config_object)\n        except errors.BadConfigObject:\n            pass", "docstring": "Parses and validates arguments using the appropriate helpers.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfig_object (object): object to be configured by an argument helper.\ncategory (Optional[str]): category of helpers to apply to\nthe group, such as storage, output, where None will apply the\narguments to all helpers. The category can be used to add arguments\nto a specific group of registered helpers.\nnames (Optional[list[str]]): names of argument helpers to apply,\nwhere None will apply the arguments to all helpers.", "source": "codesearchnet"}
{"code": "def sample(self, features):\n    (logits, losses) = self(features)\n    if self._target_modality_is_real:\n        return (logits, logits, losses)\n    if (self.hparams.sampling_method == 'argmax'):\n        samples = tf.argmax(logits, axis=(- 1))\n    else:\n        assert (self.hparams.sampling_method == 'random')\n\n        def multinomial_squeeze(logits, temperature=1.0):\n            logits_shape = common_layers.shape_list(logits)\n            reshaped_logits = (tf.reshape(logits, [(- 1), logits_shape[(- 1)]]) / temperature)\n            choices = tf.multinomial(reshaped_logits, 1)\n            choices = tf.reshape(choices, logits_shape[:(- 1)])\n            return choices\n        samples = multinomial_squeeze(logits, self.hparams.sampling_temp)\n    return (samples, logits, losses)", "docstring": "Run the model and extract samples.\n\nArgs:\nfeatures: an map of string to `Tensor`.\n\nReturns:\nsamples: an integer `Tensor`.\nlogits: a list of `Tensor`s, one per datashard.\nlosses: a dictionary: {loss-name (string): floating point `Scalar`}.", "source": "codesearchnet"}
{"code": "def ToCsv(self, columns_order=None, order_by=(), separator=','):\n    csv_buffer = six.StringIO()\n    writer = csv.writer(csv_buffer, delimiter=separator)\n    if (columns_order is None):\n        columns_order = [col['id'] for col in self.__columns]\n    col_dict = dict([(col['id'], col) for col in self.__columns])\n\n    def ensure_str(s):\n        'Compatibility function. Ensures using of str rather than unicode.'\n        if isinstance(s, str):\n            return s\n        return s.encode('utf-8')\n    writer.writerow([ensure_str(col_dict[col]['label']) for col in columns_order])\n    for (row, unused_cp) in self._PreparedData(order_by):\n        cells_list = []\n        for col in columns_order:\n            value = ''\n            if ((col in row) and (row[col] is not None)):\n                value = self.CoerceValue(row[col], col_dict[col]['type'])\n            if isinstance(value, tuple):\n                if (col_dict[col]['type'] in ['date', 'datetime', 'timeofday']):\n                    cells_list.append(ensure_str(self.ToString(value[1])))\n                else:\n                    cells_list.append(ensure_str(self.ToString(value[0])))\n            else:\n                cells_list.append(ensure_str(self.ToString(value)))\n        writer.writerow(cells_list)\n    return csv_buffer.getvalue()", "docstring": "Writes the data table as a CSV string.\n\nOutput is encoded in UTF-8 because the Python \"csv\" module can't handle\nUnicode properly according to its documentation.\n\nArgs:\ncolumns_order: Optional. Specifies the order of columns in the\noutput table. Specify a list of all column IDs in the order\nin which you want the table created.\nNote that you must list all column IDs in this parameter,\nif you use it.\norder_by: Optional. Specifies the name of the column(s) to sort by.\nPassed as is to _PreparedData.\nseparator: Optional. The separator to use between the values.\n\nReturns:\nA CSV string representing the table.\nExample result:\n'a','b','c'\n1,'z',2\n3,'w',''\n\nRaises:\nDataTableException: The data does not match the type.", "source": "codesearchnet"}
{"code": "def __get__(self, inst, cls):\n        \n\n        if inst is None:\n            return self._unbound_method\n        else:\n            if not hasattr(inst, INSTANCE_OBSERVER_ATTR):\n                d = {}\n                setattr(inst, INSTANCE_OBSERVER_ATTR, d)\n            else:\n                d = getattr(inst, INSTANCE_OBSERVER_ATTR)\n            observers = d.setdefault(self._func.__name__, {})\n        return ObservableBoundMethod(self._func, inst, observers)", "docstring": "Return an ObservableBoundMethod or ObservableUnboundMethod.\n\nIf accessed by instance, I return an ObservableBoundMethod which\nhandles that instance. If accessed by class I return an\nObservableUnboundMethod.\n\nArgs:\ninst: The instance through which I was accessed. This will be None\nif I was accessed through the class, i.e. as an unbound method.\ncls: The class through which I was accessed.", "source": "juraj-google-style"}
{"code": "def updateFeatureService(self, efs_config):\n        \n        if self.securityhandler is None:\n            print (\"Security handler required\")\n            return\n        fsRes = None\n        fst = None\n        fURL = None\n        resItm= None\n        try:\n\n            fsRes = []\n            fst = featureservicetools.featureservicetools(securityinfo=self)\n\n\n            if isinstance(efs_config, list):\n                for ext_service in efs_config:\n                    fURL = None\n                    cs = 0\n                    try:\n                        if 'ChunkSize' in ext_service:\n                            if common.is_number(ext_service['ChunkSize']):\n                                cs = ext_service['ChunkSize']\n                    except Exception as e:\n                        pass\n                    resItm={\"DeleteDetails\": None,\"AddDetails\":None}\n                    if 'ItemId' in ext_service and 'LayerName' in ext_service:\n                        fs = fst.GetFeatureService(itemId=ext_service['ItemId'],returnURLOnly=False)\n                        if not fs is None:\n                            fURL = fst.GetLayerFromFeatureService(fs=fs,layerName=ext_service['LayerName'],returnURLOnly=True)\n                    if fURL is None and 'URL' in ext_service:\n\n                        fURL = ext_service['URL']\n                    if fURL is None:\n                        print(\"Item and layer not found or URL not in config\")\n                        continue\n\n                    if 'DeleteInfo' in ext_service:\n                        if str(ext_service['DeleteInfo']['Delete']).upper() == \"TRUE\":\n                            resItm['DeleteDetails'] = fst.DeleteFeaturesFromFeatureLayer(url=fURL, sql=ext_service['DeleteInfo']['DeleteSQL'],chunksize=cs)\n                            if not 'error' in resItm['DeleteDetails'] :\n                                print (\"Delete Successful: %s\" % fURL)\n                            else:\n                                print (str(resItm['DeleteDetails']))\n\n                    resItm['AddDetails'] = fst.AddFeaturesToFeatureLayer(url=fURL, pathToFeatureClass = ext_service['FeatureClass'],chunksize=cs)\n\n                    fsRes.append(resItm)\n\n                    if not 'error' in resItm['AddDetails']:\n                        print (\"Add Successful: %s \" % fURL)\n                    else:\n                        print (str(resItm['AddDetails']))\n\n            else:\n                resItm={\"DeleteDetails\": None,\"AddDetails\":None}\n                fURL = efs_config['URL']\n                cs = 0\n                try:\n                    if 'ChunkSize' in efs_config:\n                        if common.is_number(efs_config['ChunkSize']):\n                            cs = efs_config['ChunkSize']\n                except Exception as e:\n                    pass\n                if 'ItemId' in efs_config and 'LayerName' in efs_config:\n                    fs = fst.GetFeatureService(itemId=efs_config['ItemId'],returnURLOnly=False)\n                    if not fs is None:\n                        fURL = fst.GetLayerFromFeatureService(fs=fs,layerName=efs_config['LayerName'],returnURLOnly=True)\n                if fURL is None and 'URL' in efs_config:\n\n                    fURL = efs_config['URL']\n                if fURL is None:\n                    print(\"Item and layer not found or URL not in config\")\n                    return None\n                if 'DeleteInfo' in efs_config:\n                    if str(efs_config['DeleteInfo']['Delete']).upper() == \"TRUE\":\n                        resItm['DeleteDetails'] = fst.DeleteFeaturesFromFeatureLayer(url=fURL, sql=efs_config['DeleteInfo']['DeleteSQL'],chunksize=cs)\n                        if not 'error' in resItm['DeleteDetails'] :\n                            print (\"            Delete Successful: %s\" % fURL)\n                        else:\n                            print (\"            \" + str(resItm['DeleteDetails']))\n\n                resItm['AddDetails'] = fst.AddFeaturesToFeatureLayer(url=fURL, pathToFeatureClass = efs_config['FeatureClass'],chunksize=cs)\n\n                fsRes.append(resItm)\n\n                if not 'error' in resItm['AddDetails']:\n                    print (\"            Add Successful: %s \" % fURL)\n                else:\n                    print (\"            \" + str(resItm['AddDetails']))\n\n            return fsRes\n\n        except common.ArcRestHelperError as e:\n            raise e\n        except Exception as e:\n\n            line, filename, synerror = trace()\n            raise common.ArcRestHelperError({\n                \"function\": \"updateFeatureService\",\n                \"line\": line,\n                \"filename\":  filename,\n                \"synerror\": synerror,\n            })\n        finally:\n            fst = None\n            fURL = None\n            resItm= None\n\n            del fst\n            del fURL\n            del resItm\n\n            gc.collect()", "docstring": "Updates a feature service.\n\nArgs:\nefs_config (list): A list of JSON configuration feature service details to update.\nReturns:\ndict: A dictionary of results objects.", "source": "juraj-google-style"}
{"code": "def __init__(self, tcex, name, description, data_type, interval, keyed=False):\n        \n        self.tcex = tcex\n        self._metric_data_type = data_type\n        self._metric_description = description\n        self._metric_id = None\n        self._metric_interval = interval\n        self._metric_keyed = keyed\n        self._metric_name = name\n\n        if not self.metric_find():\n            self.metric_create()", "docstring": "Initialize the Class properties.\n\nArgs:\nname (str): The name for the metric.\ndescription (str): The description of the metric.\ndata_type (str): The type of metric: Sum, Count, Min, Max, First, Last, and Average.\ninterval (str): The metric interval: Hourly, Daily, Weekly, Monthly, and Yearly.\nkeyed (bool, default:False): Indicates whether the data will have a keyed value.", "source": "juraj-google-style"}
{"code": "def lchmod(self, path, mode):\n    if self.filesystem.is_windows_fs:\n        raise (NameError, \"name 'lchmod' is not defined\")\n    self.filesystem.chmod(path, mode, follow_symlinks=False)", "docstring": "Change the permissions of a file as encoded in integer mode.\nIf the file is a link, the permissions of the link are changed.\n\nArgs:\npath: (str) Path to the file.\nmode: (int) Permissions.", "source": "codesearchnet"}
{"code": "def apply_inverse(self, y):\n    self._recompute()\n    return self.solver.solve(self._process_input(y))", "docstring": "Apply the inverse of the covariance matrix to a vector or matrix\n\nSolve ``K.x = y`` for ``x`` where ``K`` is the covariance matrix of\nthe GP with the white noise and ``yerr`` components included on the\ndiagonal.\n\nArgs:\ny (array[n] or array[n, nrhs]): The vector or matrix ``y``\ndescribed above.\n\nReturns:\narray[n] or array[n, nrhs]: The solution to the linear system.\nThis will have the same shape as ``y``.\n\nRaises:\nValueError: For mismatched dimensions.", "source": "codesearchnet"}
{"code": "def get_last_next(self, date):\n        \n        past, future = (None, None), (None, None)\n\n        for mjd, value in reversed(self.data):\n            if mjd <= date:\n                past = (mjd, value)\n                break\n            future = (mjd, value)\n\n        return past, future", "docstring": "Provide the last and next leap-second events relative to a date\n\nArgs:\ndate (float): Date in MJD\nReturn:\ntuple:", "source": "juraj-google-style"}
{"code": "def ExpandSignature(sig):\n    params = []\n    for param in sig.params:\n        if isinstance(param.type, pytd.UnionType):\n            params.append([param.Replace(type=t) for t in param.type.type_list])\n        else:\n            params.append([param])\n    new_signatures = [sig.Replace(params=tuple(combination)) for combination in itertools.product(*params)]\n    return new_signatures", "docstring": "Expand a single signature.\n\nFor argument lists that contain disjunctions, generates all combinations\nof arguments. The expansion will be done right to left.\nE.g., from (a or b, c or d), this will generate the signatures\n(a, c), (a, d), (b, c), (b, d). (In that order)\n\nArguments:\nsig: A pytd.Signature instance.\n\nReturns:\nA list. The visit function of the parent of this node (VisitFunction) will\nprocess this list further.", "source": "github-repos"}
{"code": "def CaptureFrameLocals(self, frame):\n    \n    \n    variables = {n: self.CaptureNamedVariable(n, v, 1,\n                                              self.default_capture_limits)\n                 for n, v in six.viewitems(frame.f_locals)}\n\n    \n    nargs = frame.f_code.co_argcount\n    if frame.f_code.co_flags & inspect.CO_VARARGS: nargs += 1\n    if frame.f_code.co_flags & inspect.CO_VARKEYWORDS: nargs += 1\n\n    frame_arguments = []\n    for argname in frame.f_code.co_varnames[:nargs]:\n      if argname in variables: frame_arguments.append(variables.pop(argname))\n\n    return (frame_arguments, list(six.viewvalues(variables)))", "docstring": "Captures local variables and arguments of the specified frame.\n\nArgs:\nframe: frame to capture locals and arguments.\n\nReturns:\n(arguments, locals) tuple.", "source": "juraj-google-style"}
{"code": "def _transform_filter_to_sql(filter_block, node, context):\n    expression = filter_block.predicate\n    return _expression_to_sql(expression, node, context)", "docstring": "Transform a Filter block to its corresponding SQLAlchemy expression.\n\nArgs:\nfilter_block: Filter, the Filter block to transform.\nnode: SqlNode, the node Filter block applies to.\ncontext: CompilationContext, global compilation state and metadata.\n\nReturns:\nExpression, SQLAlchemy expression equivalent to the Filter.predicate expression.", "source": "codesearchnet"}
{"code": "def _MaxPoolAlongCols(self, input_matrix, col_seq, overlapping):\n    input_matrix = input_matrix.transpose()\n    output_matrix = self._MaxPoolAlongRows(input_matrix, col_seq, overlapping)\n    return output_matrix.transpose()", "docstring": "Perform max pool along column of a 2-D matrix based on col_seq.\n\nArgs:\ninput_matrix: A 2-D matrix.\ncol_seq: Cumulative pooling sequence along column.\noverlapping: Whether or not use overlapping when pooling.\n\nReturns:\nA 2-D matrix, with\n* num_rows = input_matrix.num_rows\n* num_cols = len(col_seq)-1.", "source": "github-repos"}
{"code": "def HasColumn(self, table_name, column_name):\n    \n    if not self._connection:\n      raise IOError('Not opened.')\n\n    if not column_name:\n      return False\n\n    table_name = table_name.lower()\n    column_names = self._column_names_per_table.get(table_name, None)\n    if column_names is None:\n      column_names = []\n\n      self._cursor.execute(self._HAS_COLUMN_QUERY.format(table_name))\n      for row in self._cursor.fetchall():\n        if not row[1]:\n          continue\n\n        row_column_name = row[1]\n        if isinstance(row_column_name, bytes):\n          row_column_name = row_column_name.decode('utf-8')\n\n        column_names.append(row_column_name.lower())\n\n      self._column_names_per_table[table_name] = column_names\n\n    column_name = column_name.lower()\n    return column_name in column_names", "docstring": "Determines if a specific column exists.\n\nArgs:\ntable_name (str): name of the table.\ncolumn_name (str): name of the column.\n\nReturns:\nbool: True if the column exists.\n\nRaises:\nIOError: if the database file is not opened.\nOSError: if the database file is not opened.", "source": "juraj-google-style"}
{"code": "def perform(self, agent_indices, observ):\n    with tf.name_scope('perform/'):\n        observ = self._observ_filter.transform(observ)\n        if (self._last_state is None):\n            state = None\n        else:\n            state = tools.nested.map((lambda x: tf.gather(x, agent_indices)), self._last_state)\n        with tf.device(('/gpu:0' if self._use_gpu else '/cpu:0')):\n            output = self._network(observ[(:, None)], tf.ones(observ.shape[0]), state)\n        action = tf.cond(self._is_training, output.policy.sample, output.policy.mode)\n        logprob = output.policy.log_prob(action)[(:, 0)]\n        summary = tf.cond(self._should_log, (lambda : tf.summary.merge([tf.summary.histogram('mode', output.policy.mode()[(:, 0)]), tf.summary.histogram('action', action[(:, 0)]), tf.summary.histogram('logprob', logprob)])), str)\n        if (self._last_state is None):\n            assign_state = tf.no_op()\n        else:\n            assign_state = utility.assign_nested_vars(self._last_state, output.state, agent_indices)\n        remember_last_action = tf.scatter_update(self._last_action, agent_indices, action[(:, 0)])\n        policy_params = tools.nested.filter((lambda x: isinstance(x, tf.Tensor)), output.policy.parameters)\n        assert policy_params, 'Policy has no parameters to store.'\n        remember_last_policy = tools.nested.map((lambda var, val: tf.scatter_update(var, agent_indices, val[(:, 0)])), self._last_policy, policy_params, flatten=True)\n        with tf.control_dependencies(((assign_state, remember_last_action) + remember_last_policy)):\n            return (action[(:, 0)], tf.identity(summary))", "docstring": "Compute batch of actions and a summary for a batch of observation.\n\nArgs:\nagent_indices: Tensor containing current batch indices.\nobserv: Tensor of a batch of observations for all agents.\n\nReturns:\nTuple of action batch tensor and summary tensor.", "source": "codesearchnet"}
{"code": "def AddEventTag(self, event_tag):\n    \n    self._RaiseIfNotWritable()\n\n    event_identifier = event_tag.GetEventIdentifier()\n    if not isinstance(event_identifier, identifiers.FakeIdentifier):\n      raise IOError('Unsupported event identifier type: {0:s}'.format(\n          type(event_identifier)))\n\n    event_tag = self._PrepareAttributeContainer(event_tag)\n\n    self._event_tags.append(event_tag)\n    self.number_of_event_tags += 1", "docstring": "Adds an event tag.\n\nArgs:\nevent_tag (EventTag): event tag.\n\nRaises:\nIOError: when the storage writer is closed.\nOSError: when the storage writer is closed.", "source": "juraj-google-style"}
{"code": "def get_assignee(self, main_type, sub_type, unique_id, assignee_id, params=None):\n        \n        params = params or {}\n\n        return self.assignee(main_type, sub_type, unique_id, assignee_id, params=params)", "docstring": "Args:\nmain_type:\nsub_type:\nunique_id:\nassignee_id:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def is_test_executed(self, test_name):\n    for record in self.executed:\n        if record.test_name == test_name:\n            return True\n    return False", "docstring": "Checks if a specific test has been executed.\n\nArgs:\ntest_name: string, the name of the test to check.\n\nReturns:\nTrue if the test has been executed according to the test result,\nFalse otherwise.", "source": "github-repos"}
{"code": "def check_time(timer_id):\n    if (timer_id not in _g_timers):\n        _g_timers[timer_id] = Timer()\n        return 0\n    else:\n        return _g_timers[timer_id].since_last_check()", "docstring": "Add check points in a single line.\n\nThis method is suitable for running a task on a list of items. A timer will\nbe registered when the method is called for the first time.\n\n:Example:\n\n>>> import time\n>>> import mmcv\n>>> for i in range(1, 6):\n>>>     # simulate a code block\n>>>     time.sleep(i)\n>>>     mmcv.check_time('task1')\n2.000\n3.000\n4.000\n5.000\n\nArgs:\ntimer_id (str): Timer identifier.", "source": "codesearchnet"}
{"code": "def broker_metadata(self, broker_id):\n        \n        return self._brokers.get(broker_id) or self._bootstrap_brokers.get(broker_id)", "docstring": "Get BrokerMetadata\n\nArguments:\nbroker_id (int): node_id for a broker to check\n\nReturns:\nBrokerMetadata or None if not found", "source": "juraj-google-style"}
{"code": "def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(CreateKeyPairRequestPayload, self).read(input_buffer, kmip_version=kmip_version)\n    local_buffer = utils.BytearrayStream(input_buffer.read(self.length))\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        if self.is_tag_next(enums.Tags.COMMON_TEMPLATE_ATTRIBUTE, local_buffer):\n            self._common_template_attribute = objects.TemplateAttribute(tag=enums.Tags.COMMON_TEMPLATE_ATTRIBUTE)\n            self._common_template_attribute.read(local_buffer, kmip_version=kmip_version)\n    elif self.is_tag_next(enums.Tags.COMMON_ATTRIBUTES, local_buffer):\n        attributes = objects.Attributes(tag=enums.Tags.COMMON_ATTRIBUTES)\n        attributes.read(local_buffer, kmip_version=kmip_version)\n        self._common_template_attribute = objects.convert_attributes_to_template_attribute(attributes)\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        if self.is_tag_next(enums.Tags.PRIVATE_KEY_TEMPLATE_ATTRIBUTE, local_buffer):\n            self._private_key_template_attribute = objects.TemplateAttribute(tag=enums.Tags.PRIVATE_KEY_TEMPLATE_ATTRIBUTE)\n            self._private_key_template_attribute.read(local_buffer, kmip_version=kmip_version)\n    elif self.is_tag_next(enums.Tags.PRIVATE_KEY_ATTRIBUTES, local_buffer):\n        attributes = objects.Attributes(tag=enums.Tags.PRIVATE_KEY_ATTRIBUTES)\n        attributes.read(local_buffer, kmip_version=kmip_version)\n        self._private_key_template_attribute = objects.convert_attributes_to_template_attribute(attributes)\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        if self.is_tag_next(enums.Tags.PUBLIC_KEY_TEMPLATE_ATTRIBUTE, local_buffer):\n            self._public_key_template_attribute = objects.TemplateAttribute(tag=enums.Tags.PUBLIC_KEY_TEMPLATE_ATTRIBUTE)\n            self._public_key_template_attribute.read(local_buffer, kmip_version=kmip_version)\n    elif self.is_tag_next(enums.Tags.PUBLIC_KEY_ATTRIBUTES, local_buffer):\n        attributes = objects.Attributes(tag=enums.Tags.PUBLIC_KEY_ATTRIBUTES)\n        attributes.read(local_buffer, kmip_version=kmip_version)\n        self._public_key_template_attribute = objects.convert_attributes_to_template_attribute(attributes)\n    self.is_oversized(local_buffer)", "docstring": "Read the data encoding the CreateKeyPair request payload and decode it\ninto its constituent parts.\n\nArgs:\ninput_buffer (stream): A data buffer containing encoded object\ndata, supporting a read method.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def intersect(self, other, strategy=_STRATEGY.GEOMETRIC, _verify=True):\n    if _verify:\n        if (not isinstance(other, Surface)):\n            raise TypeError('Can only intersect with another surface', 'Received', other)\n        if ((self._dimension != 2) or (other._dimension != 2)):\n            raise NotImplementedError('Intersection only implemented in 2D')\n    if (strategy == _STRATEGY.GEOMETRIC):\n        do_intersect = _surface_intersection.geometric_intersect\n    elif (strategy == _STRATEGY.ALGEBRAIC):\n        do_intersect = _surface_intersection.algebraic_intersect\n    else:\n        raise ValueError('Unexpected strategy.', strategy)\n    (edge_infos, contained, all_edge_nodes) = do_intersect(self._nodes, self._degree, other._nodes, other._degree, _verify)\n    if (edge_infos is None):\n        if contained:\n            return [self]\n        else:\n            return [other]\n    else:\n        return [_make_intersection(edge_info, all_edge_nodes) for edge_info in edge_infos]", "docstring": "Find the common intersection with another surface.\n\nArgs:\nother (Surface): Other surface to intersect with.\nstrategy (Optional[~bezier.curve.IntersectionStrategy]): The\nintersection algorithm to use. Defaults to geometric.\n_verify (Optional[bool]): Indicates if extra caution should be\nused to verify assumptions about the algorithm as it\nproceeds. Can be disabled to speed up execution time.\nDefaults to :data:`True`.\n\nReturns:\nList[Union[~bezier.curved_polygon.CurvedPolygon, \\\n~bezier.surface.Surface]]: List of intersections (possibly empty).\n\nRaises:\nTypeError: If ``other`` is not a surface (and ``_verify=True``).\nNotImplementedError: If at least one of the surfaces\nisn't two-dimensional (and ``_verify=True``).\nValueError: If ``strategy`` is not a valid\n:class:`.IntersectionStrategy`.", "source": "codesearchnet"}
{"code": "def seat_button_count(self):\n    if (self.type != EventType.TABLET_TOOL_BUTTON):\n        raise AttributeError(_wrong_prop.format(self.type))\n    return self._libinput.libinput_event_tablet_tool_get_seat_button_count(self._handle)", "docstring": "The total number of buttons pressed on all devices on\nthe associated seat after the the event was triggered.\n\nFor events that are not of type\n:attr:`~libinput.constant.EventType.TABLET_TOOL_BUTTON`, this property\nraises :exc:`AttributeError`.\n\nReturns:\nint: The seat wide pressed button count for the key of this event.", "source": "codesearchnet"}
{"code": "def load_resource(path):\n    with open(get_path_to_datafile(path), 'rb') as f:\n        return f.read()", "docstring": "Load the resource at given path, where path is relative to tensorflow/.\n\nArgs:\npath: a string resource path relative to tensorflow/.\n\nReturns:\nThe contents of that resource.\n\nRaises:\nIOError: If the path is not found, or the resource can't be opened.", "source": "github-repos"}
{"code": "def get(self: 'Option[Mapping[K,V]]', key: K, default=None) -> 'Option[V]':\n    if self._is_some:\n        return self._type.maybe(self._val.get(key, default))\n    return self._type.maybe(default)", "docstring": "Gets a mapping value by key in the contained value or returns\n``default`` if the key doesn't exist.\n\nArgs:\nkey: The mapping key.\ndefault: The defauilt value.\n\nReturns:\n* ``Some`` variant of the mapping value if the key exists\nand the value is not None.\n* ``Some(default)`` if ``default`` is not None.\n* :py:data:`NONE` if ``default`` is None.\n\nExamples:\n>>> Some({'hi': 1}).get('hi')\nSome(1)\n>>> Some({}).get('hi', 12)\nSome(12)\n>>> NONE.get('hi', 12)\nSome(12)\n>>> NONE.get('hi')\nNONE", "source": "codesearchnet"}
{"code": "def default(self, obj):\n    from ..model import Model\n    from ..colors import Color\n    from .has_props import HasProps\n    if (pd and isinstance(obj, (pd.Series, pd.Index))):\n        return transform_series(obj, force_list=True)\n    elif isinstance(obj, np.ndarray):\n        return transform_array(obj, force_list=True)\n    elif isinstance(obj, collections.deque):\n        return list(map(self.default, obj))\n    elif isinstance(obj, Model):\n        return obj.ref\n    elif isinstance(obj, HasProps):\n        return obj.properties_with_values(include_defaults=False)\n    elif isinstance(obj, Color):\n        return obj.to_css()\n    else:\n        return self.transform_python_types(obj)", "docstring": "The required ``default`` method for ``JSONEncoder`` subclasses.\n\nArgs:\nobj (obj) :\n\nThe object to encode. Anything not specifically handled in\nthis method is passed on to the default system JSON encoder.", "source": "codesearchnet"}
{"code": "def choose_branch(exclude=None):\n    \n    \n    if exclude is None:\n        master = conf.get('git.master_branch', 'master')\n        develop = conf.get('git.devel_branch', 'develop')\n        exclude = {master, develop}\n\n    branches = list(set(git.branches()) - exclude)\n\n    \n    for i, branch_name in enumerate(branches):\n        shell.cprint('<90>[{}] <33>{}'.format(i + 1, branch_name))\n\n    \n    choice = 0\n    while choice < 1 or choice > len(branches):\n        prompt = \"Pick a base branch from the above [1-{}]\".format(\n            len(branches)\n        )\n        choice = click.prompt(prompt, value_proc=int)\n        if not (1 <= choice <= len(branches)):\n            fmt = \"Invalid choice {}, you must pick a number between {} and {}\"\n            log.err(fmt.format(choice, 1, len(branches)))\n\n    return branches[choice - 1]", "docstring": "Show the user a menu to pick a branch from the existing ones.\n\nArgs:\nexclude (list[str]):\nList of branch names to exclude from the menu. By default it will\nexclude master and develop branches. To show all branches pass an\nempty array here.\n\nReturns:\nstr: The name of the branch chosen by the user. If the user inputs an\ninvalid choice, he will be asked again (and again) until he picks a\na valid branch.", "source": "juraj-google-style"}
{"code": "def do_batch(args):\n    \n    if args.subcommand == 'list':\n        do_batch_list(args)\n\n    if args.subcommand == 'show':\n        do_batch_show(args)\n\n    if args.subcommand == 'status':\n        do_batch_status(args)\n\n    if args.subcommand == 'submit':\n        do_batch_submit(args)", "docstring": "Runs the batch list, batch show or batch status command, printing output\nto the console\n\nArgs:\nargs: The parsed arguments sent to the command at runtime", "source": "juraj-google-style"}
{"code": "def _WriteFile(output_path, name, content):\n    path = os.path.join(output_path, name)\n    with open(path, 'wb') as f:\n        f.write(content)\n    return path", "docstring": "Write given content to a file in a given directory.\n\nArgs:\noutput_path: The directory to store the file in.\nname: The name of the file to store the content in.\ncontent: The content to write to the file.close\n\nReturns:\nThe full path to the written file.", "source": "codesearchnet"}
{"code": "def valueWritePreprocessor(valueString, replaceParamsFile=None):\n    \n    if type(valueString) is bool:\n        log.warning(\"Only numerical variable types can be handled by the valueReadPreprocessor function.\")\n        return valueString\n\n    \n    variableString = valueString\n\n    \n    if replaceParamsFile is not None:\n        \n        if variableString == REPLACE_NO_VALUE:\n            variableString = '[NO_VARIABLE]'\n        else:\n            try:\n                number = int(valueString)\n                if number < 0:\n                    parameterID = number * -1\n\n                    \n                    for targetParam in replaceParamsFile.targetParameters:\n                        if targetParam.id == parameterID:\n                            variableString = targetParam.targetVariable\n                            break\n            except:\n                pass\n\n    return variableString", "docstring": "Look up variable name in replace param file for the negative id given and return it.\n\nArgs:\nvalueString (str): String representing the value to be preprocessed.\nreplaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if\nreplacement variables are included in the project.\n\nReturns:\nstr: Processed value as a string", "source": "juraj-google-style"}
{"code": "def write_compounds(self, stream, compounds, properties=None):\n        \n        self._write_entries(\n            stream, compounds, self.convert_compound_entry, properties)", "docstring": "Write iterable of compounds as YAML object to stream.\n\nArgs:\nstream: File-like object.\ncompounds: Iterable of compound entries.\nproperties: Set of compound properties to output (or None to output\nall).", "source": "juraj-google-style"}
{"code": "def profile_graph(self, options):\n    opts = _build_options(options)\n    tfprof_node = tfprof_output_pb2.GraphNodeProto()\n    try:\n        tfprof_node.ParseFromString(print_mdl.Profile('graph'.encode('utf-8'), opts.SerializeToString()))\n    except message.DecodeError as e:\n        sys.stderr.write('Cannot parse returned proto: %s.\\n' % e)\n    return tfprof_node", "docstring": "Profile the statistics of graph nodes, organized by dataflow graph.\n\nArgs:\noptions: A dict of options. See core/profiler/g3doc/options.md.\n\nReturns:\na GraphNodeProto that records the results.", "source": "github-repos"}
{"code": "def validate(self,\n                 proxy_scanner,\n                 expected_num=20,\n                 queue_timeout=3,\n                 val_timeout=5):\n        \n        while self.proxy_num() < expected_num:\n            try:\n                candidate_proxy = proxy_scanner.proxy_queue.get(\n                    timeout=queue_timeout)\n            except queue.Empty:\n                if proxy_scanner.is_scanning():\n                    continue\n                else:\n                    break\n            addr = candidate_proxy['addr']\n            protocol = candidate_proxy['protocol']\n            ret = self.is_valid(addr, protocol, val_timeout)\n            if self.proxy_num() >= expected_num:\n                self.logger.info('Enough valid proxies, thread {} exit.'\n                                 .format(threading.current_thread().name))\n                break\n            if ret['valid']:\n                self.add_proxy(Proxy(addr, protocol))\n                self.logger.info('{} ok, {:.2f}s'.format(addr, ret[\n                    'response_time']))\n            else:\n                self.logger.info('{} invalid, {}'.format(addr, ret['msg']))", "docstring": "Target function of validation threads\n\nArgs:\nproxy_scanner: A ProxyScanner object.\nexpected_num: Max number of valid proxies to be scanned.\nqueue_timeout: Timeout for getting a proxy from the queue.\nval_timeout: An integer passed to `is_valid` as argument `timeout`.", "source": "juraj-google-style"}
{"code": "def __make_request(self, url, method, data, auth, cookies, headers, proxies, timeout, verify):\n    request_by_method = getattr(requests, method)\n    return request_by_method(url=url, data=data, auth=auth, cookies=cookies, headers=headers, proxies=proxies, timeout=timeout, verify=verify, allow_redirects=True, stream=False)", "docstring": "Execute a request with the given data.\n\nArgs:\nurl (str): The URL to call.\nmethod (str): The method (e.g. `get` or `post`).\ndata (str): The data to call the URL with.\nauth (obj): The authentication class.\ncookies (obj): The cookie dict.\nheaders (obj): The header dict.\nproxies (obj): The proxies dict.\ntimeout (int): The request timeout in seconds.\nverify (mixed): SSL verification.\n\nReturns:\nobj: The response object.", "source": "codesearchnet"}
{"code": "def is_layouts_same(self, embedding_layouts) -> bool:\n    if self._checkpoint_layouts.keys() != embedding_layouts.keys():\n        raise ValueError('Layouts in checkpoint and embedding must have the same keys. found {} and {}'.format(self._checkpoint_layouts.keys(), embedding_layouts.keys()))\n    for key, layout in self._checkpoint_layouts.items():\n        if not compare.ProtoEq(layout, embedding_layouts[key]):\n            logging.info('Layouts do not match for %s this will require resharding; %s vs %s', key, layout, embedding_layouts[key])\n            return False\n    return True", "docstring": "Returns True if the all the embedding and checkpoint layouts are the same.\n\nArgs:\nembedding_layouts: dict of layouts for embedding tables.\n\nRaises: ValueError if the embedding layouts and checkpoint layouts do not\nhave the same keys.\nReturns: Bool representing if the embedding layouts match the layouts in\ncheckpoint.", "source": "github-repos"}
{"code": "def VerifyCipherSignature(self, remote_public_key):\n    if (self.cipher_metadata.signature and remote_public_key):\n        stats_collector_instance.Get().IncrementCounter('grr_rsa_operations')\n        remote_public_key.Verify(self.serialized_cipher, self.cipher_metadata.signature)\n        return True", "docstring": "Verifies the signature on the encrypted cipher block.\n\nThis method returns True if the signature verifies correctly with\nthe key given.\n\nArgs:\nremote_public_key: The remote public key.\n\nReturns:\nNone\nRaises:\nrdf_crypto.VerificationError: A signature and a key were both given but\nverification fails.", "source": "codesearchnet"}
{"code": "def emit_completion(self, completion_percent):\n        \n        completion_mode = XBlockCompletionMode.get_mode(self)\n        if not self.has_custom_completion or completion_mode != XBlockCompletionMode.COMPLETABLE:\n            raise AttributeError(\n                \"Using `emit_completion` requires `has_custom_completion == True` (was {}) \"\n                \"and `completion_mode == 'completable'` (was {})\".format(\n                    self.has_custom_completion, completion_mode,\n                )\n            )\n\n        if completion_percent is None or not 0.0 <= completion_percent <= 1.0:\n            raise ValueError(\"Completion percent must be in [0.0; 1.0] interval, {} given\".format(completion_percent))\n\n        self.runtime.publish(\n            self,\n            'completion',\n            {'completion': completion_percent},\n        )", "docstring": "Emits completion event through Completion API.\n\nUnlike grading API, calling this method allows completion to go down - i.e. emitting a value of 0.0 on\na previously completed block indicates that it is no longer considered complete.\n\nArguments:\ncompletion_percent (float): Completion in range [0.0; 1.0] (inclusive), where 0.0 means the block\nis not completed, 1.0 means the block is fully completed.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def filter(self, items=None, like=None, regex=None, axis=None):\n        \n        nkw = count_not_none(items, like, regex)\n        if nkw > 1:\n            raise TypeError(\n                \"Keyword arguments `items`, `like`, or `regex` are mutually exclusive\"\n            )\n        if nkw == 0:\n            raise TypeError(\"Must pass either `items`, `like`, or `regex`\")\n        if axis is None:\n            axis = \"columns\"  \n\n        axis = self._get_axis_number(axis)\n        labels = self.columns if axis else self.index\n\n        if items is not None:\n            bool_arr = labels.isin(items)\n        elif like is not None:\n\n            def f(x):\n                return like in to_str(x)\n\n            bool_arr = labels.map(f).tolist()\n        else:\n\n            def f(x):\n                return matcher.search(to_str(x)) is not None\n\n            matcher = re.compile(regex)\n            bool_arr = labels.map(f).tolist()\n        if not axis:\n            return self[bool_arr]\n        return self[self.columns[bool_arr]]", "docstring": "Subset rows or columns based on their labels\n\nArgs:\nitems (list): list of labels to subset\nlike (string): retain labels where `arg in label == True`\nregex (string): retain labels matching regex input\naxis: axis to filter on\n\nReturns:\nA new DataFrame with the filter applied.", "source": "juraj-google-style"}
{"code": "def from_dict(d: Dict[(str, Any)]) -> 'CoverageInstructions':\n    name_type = d['type']\n    cls = _NAME_TO_INSTRUCTIONS[name_type]\n    return cls.from_dict(d)", "docstring": "Loads a set of coverage instructions from a given dictionary.\n\nRaises:\nBadCoverageInstructions: if the given coverage instructions are\nillegal.", "source": "codesearchnet"}
{"code": "def set_python_graph(self, python_graph):\n    self._python_graph = python_graph\n    self._node_traceback = {}\n    if self._python_graph:\n        for op in self._python_graph.get_operations():\n            self._node_traceback[op.name] = tuple(map(tuple, op.traceback))", "docstring": "Provide Python `Graph` object to the wrapper.\n\nUnlike the partition graphs, which are protobuf `GraphDef` objects, `Graph`\nis a Python object and carries additional information such as the traceback\nof the construction of the nodes in the graph.\n\nArgs:\npython_graph: (ops.Graph) The Python Graph object.", "source": "github-repos"}
{"code": "def load_from_dict(self, conf_dict=None):\n        \n        self.set_to_default()\n        self._update_dict(self._config, conf_dict)\n        self._update_python_paths()", "docstring": "Load the configuration from a dictionary.\n\nArgs:\nconf_dict (dict): Dictionary with the configuration.", "source": "juraj-google-style"}
{"code": "def __init__(self, pb_id):\n        \n        SchedulingObject.__init__(self, PB_KEY, pb_id)\n        self._check_object_exists()", "docstring": "Create a PB object.\n\nArgs:\npb_id (str): Processing Block Identifier\n\nRaises:\nKeyError, if the specified PB does not exist", "source": "juraj-google-style"}
{"code": "def list_file_extensions(path: str, reportevery: int = 1) -> List[str]:\n    \n    extensions = set()\n    count = 0\n    for root, dirs, files in os.walk(path):\n        count += 1\n        if count % reportevery == 0:\n            log.debug(\"Walking directory {}: {!r}\", count, root)\n        for file in files:\n            filename, ext = os.path.splitext(file)\n            extensions.add(ext)\n    return sorted(list(extensions))", "docstring": "Returns a sorted list of every file extension found in a directory\nand its subdirectories.\n\nArgs:\npath: path to scan\nreportevery: report directory progress after every *n* steps\n\nReturns:\nsorted list of every file extension found", "source": "juraj-google-style"}
{"code": "def populate_settings_dir(force: bool=False) -> bool:\n    res = False\n    if (_default_settings_path == _settings_path):\n        return res\n    for src in list(_default_settings_path.glob('**/*.json')):\n        dest = (_settings_path / src.relative_to(_default_settings_path))\n        if ((not force) and dest.exists()):\n            continue\n        res = True\n        dest.parent.mkdir(parents=True, exist_ok=True)\n        shutil.copy(src, dest)\n    return res", "docstring": "Populate settings directory with default settings files\n\nArgs:\nforce: if ``True``, replace existing settings files with default ones\n\nReturns:\n``True`` if any files were copied and ``False`` otherwise", "source": "codesearchnet"}
{"code": "def __call__(self, text):\n        \n        text = remove(text, string.punctuation)\n        words = text.split()\n\n        invalid_words = list(filter(lambda word: word and word.lower() not in self.words, words))\n        return len(invalid_words) * self.floor", "docstring": "Score based on number of words not in the corpus.\n\nExample:\n>>> fitness = Corpus([\"example\"])\n>>> fitness(\"example\")\n0\n\n>>> fitness(\"different\")\n-2.0\n\nArgs:\ntext (str): The text to score\n\nReturns:\nCorpus score for text", "source": "juraj-google-style"}
{"code": "def simulate_values(cls, num_events, lr_scheduler, **kwargs):\n        \n        \n        \n        \n        copy_lr_scheduler = LRScheduler._replicate_lr_scheduler(lr_scheduler)\n        values = []\n        scheduler = cls(save_history=False, lr_scheduler=copy_lr_scheduler)\n        for i in range(num_events):\n            scheduler(engine=None)\n            values.append([i, scheduler.optimizer_param_groups[0][scheduler.param_name]])\n\n        return values", "docstring": "Method to simulate scheduled values during num_events events.\n\nArgs:\nnum_events (int): number of events during the simulation.\nlr_scheduler (subclass of `torch.optim.lr_scheduler._LRScheduler`): lr_scheduler object to wrap.\n\nReturns:\nlist of pairs: [event_index, value]", "source": "juraj-google-style"}
{"code": "def __render_config_block(self, config_block):\n        \n        config_block_str = ''\n        for line in config_block:\n            if isinstance(line, config.Option):\n                line_str = self.__render_option(line)\n            elif isinstance(line, config.Config):\n                line_str = self.__render_config(line)\n            elif isinstance(line, config.Server):\n                line_str = self.__render_server(line)\n            elif isinstance(line, config.Bind):\n                line_str = self.__render_bind(line)\n            elif isinstance(line, config.Acl):\n                line_str = self.__render_acl(line)\n            elif isinstance(line, config.UseBackend):\n                line_str = self.__render_usebackend(line)\n            elif isinstance(line, config.User):\n                line_str = self.__render_user(line)\n            elif isinstance(line, config.Group):\n                line_str = self.__render_group(line)\n            \n            config_block_str = config_block_str + line_str\n\n        return config_block_str", "docstring": "Summary\n\nArgs:\nconfig_block [config.Item, ...]: config lines\n\nReturns:\nstr: config block str", "source": "juraj-google-style"}
{"code": "def LoadConfig(config_obj, config_file=None, config_fd=None, secondary_configs=None, contexts=None, reset=False, parser=ConfigFileParser):\n    if ((config_obj is None) or reset):\n        config_obj = _CONFIG.MakeNewConfig()\n    if (config_file is not None):\n        config_obj.Initialize(filename=config_file, must_exist=True, parser=parser)\n    elif (config_fd is not None):\n        config_obj.Initialize(fd=config_fd, parser=parser)\n    if secondary_configs:\n        for config_file in secondary_configs:\n            config_obj.LoadSecondaryConfig(config_file)\n    if contexts:\n        for context in contexts:\n            config_obj.AddContext(context)\n    return config_obj", "docstring": "Initialize a ConfigManager with the specified options.\n\nArgs:\nconfig_obj: The ConfigManager object to use and update. If None, one will be\ncreated.\nconfig_file: Filename to read the config from.\nconfig_fd: A file-like object to read config data from.\nsecondary_configs: A list of secondary config URLs to load.\ncontexts: Add these contexts to the config object.\nreset: Completely wipe previous config before doing the load.\nparser: Specify which parser to use.\n\nReturns:\nThe resulting config object. The one passed in, unless None was specified.", "source": "codesearchnet"}
{"code": "def cancel(batch_fn, cancel_fn, ops):\n  \n\n  \n  \n  \n\n  canceled_ops = []\n  error_messages = []\n\n  max_batch = 256\n  total_ops = len(ops)\n  for first_op in range(0, total_ops, max_batch):\n    batch_canceled, batch_messages = _cancel_batch(\n        batch_fn, cancel_fn, ops[first_op:first_op + max_batch])\n    canceled_ops.extend(batch_canceled)\n    error_messages.extend(batch_messages)\n\n  return canceled_ops, error_messages", "docstring": "Cancel operations.\n\nArgs:\nbatch_fn: API-specific batch function.\ncancel_fn: API-specific cancel function.\nops: A list of operations to cancel.\n\nReturns:\nA list of operations canceled and a list of error messages.", "source": "juraj-google-style"}
{"code": "def add_range_headers(self, range_header):\n        \n        self['Accept-Ranges'] = 'bytes'\n        size = self.ranged_file.size\n        try:\n            ranges = self.ranged_file.parse_range_header(range_header, size)\n        except ValueError:\n            ranges = None\n        \n        \n        if ranges is not None and len(ranges) == 1:\n            start, stop = ranges[0]\n            if start >= size:\n                \n                self.status_code = 416\n                return\n\n            if stop >= size:\n                stop = size\n\n            self.ranged_file.start = start\n            self.ranged_file.stop = stop\n            self['Content-Range'] = 'bytes %d-%d/%d' % (start, stop - 1, size)\n            self['Content-Length'] = stop - start\n            self.status_code = 206", "docstring": "Adds several headers that are necessary for a streaming file\nresponse, in order for Safari to play audio files. Also\nsets the HTTP status_code to 206 (partial content).\n\nArgs:\nrange_header (str): Browser HTTP_RANGE request header.", "source": "juraj-google-style"}
{"code": "def clause(self, *args, **kwargs):\n    if (args and isinstance(args[0], Clause)):\n        clause = args[0]\n    else:\n        clause = Clause(*args, **kwargs)\n    if (not clause.fields):\n        clause.fields = self.all_fields\n    if ((clause.wildcard & Query.WILDCARD_LEADING) and (clause.term[0] != Query.WILDCARD)):\n        clause.term = (Query.WILDCARD + clause.term)\n    if ((clause.wildcard & Query.WILDCARD_TRAILING) and (clause.term[(- 1)] != Query.WILDCARD)):\n        clause.term = (clause.term + Query.WILDCARD)\n    self.clauses.append(clause)\n    return self", "docstring": "Adds a `lunr.Clause` to this query.\n\nUnless the clause contains the fields to be matched all fields will be\nmatched. In addition a default boost of 1 is applied to the clause.\n\nIf the first argument is a `lunr.Clause` it will be mutated and added,\notherwise args and kwargs will be used in the constructor.\n\nReturns:\nlunr.Query: The Query itself.", "source": "codesearchnet"}
{"code": "def serialize_to_nested(self, name, datas):\n        \n        keys = datas.get('keys', None)\n        splitter = datas.get('splitter', self._DEFAULT_SPLITTER)\n\n        if not keys:\n            msg = (\"Nested reference '{}' lacks of required 'keys' variable \"\n                   \"or is empty\")\n            raise SerializerError(msg.format(name))\n        else:\n            keys = self.value_splitter(name, 'keys', keys, mode=splitter)\n\n        \n        context = OrderedDict()\n        for k in keys:\n            context[k] = OrderedDict()\n\n        \n        for k, v in datas.items():\n            \n            if k not in ('keys', 'structure', 'splitter'):\n                values = self.value_splitter(name, 'values', v, mode=splitter)\n\n                if len(values) != len(keys):\n                    msg = (\"Nested reference '{}' has different length for \"\n                           \"values of '{}' and 'keys'\")\n                    raise SerializerError(msg.format(name, k))\n\n                \n                for i, item in enumerate(values):\n                    ref = keys[i]\n                    context[ref][k] = item\n\n        return context", "docstring": "Serialize given datas to a nested structure where each key create an\nitem and each other variable is stored as a subitem with corresponding\nvalue (according to key index position).\n\nArguments:\nname (string): Name only used inside possible exception message.\ndatas (dict): Datas to serialize.\n\nReturns:\ndict: Nested dictionnary of serialized reference datas.", "source": "juraj-google-style"}
{"code": "def merge_default_values(resource_list, default_values):\n    \n\n    def merge_item(resource):\n        return merge_resources(default_values, resource)\n\n    return lmap(merge_item, resource_list)", "docstring": "Generate a new list where each item of original resource_list will be merged with the default_values.\n\nArgs:\nresource_list: list with items to be merged\ndefault_values: properties to be merged with each item list. If the item already contains some property\nthe original value will be maintained.\n\nReturns:\nlist: list containing each item merged with default_values", "source": "juraj-google-style"}
{"code": "def get_iterator_spec_from_dataset(strategy, dataset):\n    output_element_spec = dataset.element_spec\n    if isinstance(dataset._type_spec, (DistributedDatasetSpec, DistributedDatasetsFromFunctionSpec)):\n        iterator_type_spec = DistributedIteratorSpec(strategy.extended._input_workers_with_options(), output_element_spec, strategy.extended._container_strategy(), options=None, cardinality=dataset.cardinality, enable_get_next_as_optional=True)\n    else:\n        if strategy.extended._num_gpus_per_worker:\n            logging.warning(f'{strategy.extended._num_gpus_per_worker} GPUs are allocated per worker. Please use DistributedDataset by calling strategy.experimental_distribute_dataset or strategy.distribute_datasets_from_function to make best use of GPU resources')\n        iterator_type_spec = iterator_ops.IteratorSpec(output_element_spec)\n    return iterator_type_spec", "docstring": "Returns an iterator spec from dataset function.\n\nThis function constructs type spec for iterator obtained from\niter(dataset).\n\nArgs:\nstrategy: a `tf.distribute.Strategy` object, used to run all-reduce to\nhandle last partial batch.\ndataset: A tf.data.Dataset instance. If using a function that returns a\ntf.data.Dataset instance, pass dataset_fn.structured_outputs.\n\nReturns:\nA type_spec for iterator for dataset instance.", "source": "github-repos"}
{"code": "def run(self, dag):\n        \n        coupling_map = self._coupling_map\n        ordered_virtual_gates = list(dag.serial_layers())\n\n        if self.initial_layout is None:\n            if self.property_set[\"layout\"]:\n                self.initial_layout = self.property_set[\"layout\"]\n            else:\n                self.initial_layout = Layout.generate_trivial_layout(*dag.qregs.values())\n\n        if len(dag.qubits()) != len(self.initial_layout):\n            raise TranspilerError('The layout does not match the amount of qubits in the DAG')\n\n        if len(self._coupling_map.physical_qubits) != len(self.initial_layout):\n            raise TranspilerError(\n                \"Mappers require to have the layout to be the same size as the coupling map\")\n\n        mapped_gates = []\n        layout = self.initial_layout.copy()\n        gates_remaining = ordered_virtual_gates.copy()\n\n        while gates_remaining:\n            best_step = _search_forward_n_swaps(layout, gates_remaining,\n                                                coupling_map)\n\n            layout = best_step['layout']\n            gates_mapped = best_step['gates_mapped']\n            gates_remaining = best_step['gates_remaining']\n\n            mapped_gates.extend(gates_mapped)\n\n        \n        mapped_dag = _copy_circuit_metadata(dag, coupling_map)\n\n        for node in mapped_gates:\n            mapped_dag.apply_operation_back(op=node.op, qargs=node.qargs, cargs=node.cargs)\n\n        return mapped_dag", "docstring": "Run one pass of the lookahead mapper on the provided DAG.\n\nArgs:\ndag (DAGCircuit): the directed acyclic graph to be mapped\nReturns:\nDAGCircuit: A dag mapped to be compatible with the coupling_map in\nthe property_set.\nRaises:\nTranspilerError: if the coupling map or the layout are not\ncompatible with the DAG", "source": "juraj-google-style"}
{"code": "def merge_sketches(outdir, sketch_paths):\n    \n    merge_sketch_path = os.path.join(outdir, 'sistr.msh')\n    args = ['mash', 'paste', merge_sketch_path]\n    for x in sketch_paths:\n        args.append(x)\n    args.append(MASH_SKETCH_FILE)\n    logging.info('Running Mash paste with command: %s', ' '.join(args))\n    p = Popen(args)\n    p.wait()\n    assert os.path.exists(merge_sketch_path), 'Merged sketch was not created at {}'.format(merge_sketch_path)\n    return merge_sketch_path", "docstring": "Merge new Mash sketches with current Mash sketches\n\nArgs:\noutdir (str): output directory to write merged Mash sketch file\nsketch_paths (list of str): Mash sketch file paths for input fasta files\n\nReturns:\nstr: output path for Mash sketch file with new and old sketches", "source": "juraj-google-style"}
{"code": "def get_typed_value_descriptor(obj):\n    if isinstance(obj, (bytes, str)):\n        type_name = 'Text'\n    elif isinstance(obj, bool):\n        type_name = 'Boolean'\n    elif isinstance(obj, int):\n        type_name = 'Integer'\n    elif isinstance(obj, float):\n        type_name = 'Float'\n    else:\n        raise TypeError('Cannot get a type descriptor for %s.' % repr(obj))\n    return {'@type': 'http:", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\nConverts a basic type into a @type/value dictionary.\n\nArgs:\nobj: A bytes, unicode, bool, int, or float to be converted.\n\nReturns:\nA dictionary containing the keys ``@type`` and ``value`` with the value for\nthe ``@type`` of appropriate type.\n\nRaises:\nTypeError: if the Python object has a type that is not\nsupported.", "source": "github-repos"}
{"code": "def _RegisterDebuggee(self, service):\n    \n    try:\n      request = {'debuggee': self._GetDebuggee()}\n\n      try:\n        response = service.debuggees().register(body=request).execute()\n\n        \n        \n        \n        \n        project_number = response['debuggee'].get('project')\n        self._project_number = project_number or self._project_number\n\n        self._debuggee_id = response['debuggee']['id']\n        native.LogInfo('Debuggee registered successfully, ID: %s' % (\n            self._debuggee_id))\n        self.register_backoff.Succeeded()\n        return (False, 0)  \n      except BaseException:\n        native.LogInfo('Failed to register debuggee: %s, %s' %\n                       (request, traceback.format_exc()))\n    except BaseException:\n      native.LogWarning('Debuggee information not available: ' +\n                        traceback.format_exc())\n\n    return (True, self.register_backoff.Failed())", "docstring": "Single attempt to register the debuggee.\n\nIf the registration succeeds, sets self._debuggee_id to the registered\ndebuggee ID.\n\nArgs:\nservice: client to use for API calls\n\nReturns:\n(registration_required, delay) tuple", "source": "juraj-google-style"}
{"code": "def var(x, axis=None, keepdims=False):\n    if any_symbolic_tensors((x,)):\n        return Var(axis=axis, keepdims=keepdims).symbolic_call(x)\n    return backend.numpy.var(x, axis=axis, keepdims=keepdims)", "docstring": "Compute the variance along the specified axes.\n\nArgs:\nx: Input tensor.\naxis: Axis or axes along which the variance is computed. The default\nis to compute the variance of the flattened tensor.\nkeepdims: If this is set to `True`, the axes which are reduced are left\nin the result as dimensions with size one.\n\nReturns:\nOutput tensor containing the variance.", "source": "github-repos"}
{"code": "def route(self, dst=None, verbose=conf.verb):\n        \n        dst = dst or \"0.0.0.0\"  \n        if isinstance(dst, bytes):\n            try:\n                dst = plain_str(dst)\n            except UnicodeDecodeError:\n                raise TypeError(\"Unknown IP address input (bytes)\")\n        if dst in self.cache:\n            return self.cache[dst]\n        \n        _dst = dst.split(\"/\")[0].replace(\"*\", \"0\")\n        while True:\n            idx = _dst.find(\"-\")\n            if idx < 0:\n                break\n            m = (_dst[idx:] + \".\").find(\".\")\n            _dst = _dst[:idx] + _dst[idx + m:]\n\n        atol_dst = atol(_dst)\n        paths = []\n        for d, m, gw, i, a, me in self.routes:\n            if not a:  \n                continue\n            aa = atol(a)\n            if aa == atol_dst:\n                paths.append(\n                    (0xffffffff, 1, (scapy.consts.LOOPBACK_INTERFACE, a, \"0.0.0.0\"))  \n                )\n            if (atol_dst & m) == (d & m):\n                paths.append((m, me, (i, a, gw)))\n\n        if not paths:\n            if verbose:\n                warning(\"No route found (no default route?)\")\n            return scapy.consts.LOOPBACK_INTERFACE, \"0.0.0.0\", \"0.0.0.0\"\n        \n        \n        paths.sort(key=lambda x: (-x[0], x[1]))\n        \n        ret = paths[0][2]\n        self.cache[dst] = ret\n        return ret", "docstring": "Returns the IPv4 routes to a host.\nparameters:\n- dst: the IPv4 of the destination host\n\nreturns: (iface, output_ip, gateway_ip)\n- iface: the interface used to connect to the host\n- output_ip: the outgoing IP that will be used\n- gateway_ip: the gateway IP that will be used", "source": "juraj-google-style"}
{"code": "def _time_delta_from_info(info):\n    delta_seconds = (int(time.time()) - info.start_time)\n    return str(datetime.timedelta(seconds=delta_seconds))", "docstring": "Format the elapsed time for the given TensorBoardInfo.\n\nArgs:\ninfo: A TensorBoardInfo value.\n\nReturns:\nA human-readable string describing the time since the server\ndescribed by `info` started: e.g., \"2 days, 0:48:58\".", "source": "codesearchnet"}
{"code": "def union(self, *others):\n    result = self.__copy__()\n    _elements = result._elements\n    _total = result._total\n    for other in map(self._as_mapping, others):\n        for (element, multiplicity) in other.items():\n            old_multiplicity = _elements.get(element, 0)\n            if (multiplicity > old_multiplicity):\n                _elements[element] = multiplicity\n                _total += (multiplicity - old_multiplicity)\n    result._total = _total\n    return result", "docstring": "r\"\"\"Return a new multiset with all elements from the multiset and the others with maximal multiplicities.\n\n>>> ms = Multiset('aab')\n>>> sorted(ms.union('bc'))\n['a', 'a', 'b', 'c']\n\nYou can also use the ``|`` operator for the same effect. However, the operator version\nwill only accept a set as other operator, not any iterable, to avoid errors.\n\n>>> ms = Multiset('aab')\n>>> sorted(ms | Multiset('aaa'))\n['a', 'a', 'a', 'b']\n\nFor a variant of the operation which modifies the multiset in place see\n:meth:`union_update`.\n\nArgs:\n*others: The other sets to union the multiset with. Can also be any :class:`~typing.Iterable`\\[~T]\nor :class:`~typing.Mapping`\\[~T, :class:`int`] which are then converted to :class:`Multiset`\\[~T].\n\nReturns:\nThe multiset resulting from the union.", "source": "codesearchnet"}
{"code": "def restructure(modality_sizes: ModalitySizeType, inputs: torch.Tensor) -> Mapping[str, torch.Tensor]:\n    outputs = {}\n    index = 0\n    for modality in sorted(modality_sizes.keys()):\n        size = modality_sizes[modality]\n        inp = inputs[:, index:index + size]\n        index += size\n        outputs[modality] = inp\n    return outputs", "docstring": "Partitions a [B, N, C] tensor into tensors for each modality.\n\nArgs:\nmodality_sizes\ndict specifying the size of the modality\ninputs:\ninput tensor\n\nReturns:\ndict mapping name of modality to its associated tensor.", "source": "github-repos"}
{"code": "def test_error(self, e=None):\n    self._test_end(TestResultEnums.TEST_RESULT_ERROR, e)", "docstring": "To mark the test as error in this record.\n\nArgs:\ne: An exception object.", "source": "github-repos"}
{"code": "def has_event_handler(self, handler, event_name=None):\n        \n        if event_name is not None:\n            if event_name not in self._event_handlers:\n                return False\n            events = [event_name]\n        else:\n            events = self._event_handlers\n        for e in events:\n            for h, _, _ in self._event_handlers[e]:\n                if h == handler:\n                    return True\n        return False", "docstring": "Check if the specified event has the specified handler.\n\nArgs:\nhandler (callable): the callable event handler.\nevent_name: The event the handler attached to. Set this\nto ``None`` to search all events.", "source": "juraj-google-style"}
{"code": "def get_value(self, field, quick):\n    if callable(field.default):\n        default = field.default(self)\n    else:\n        default = field.default\n    if (quick and (default is not None)):\n        return default\n    shell.cprint('<90>{}', field.help)\n    while True:\n        try:\n            answer = click.prompt(field.pretty_prompt, default=default)\n            return field.type(answer)\n        except ValueError:\n            shell.cprint('<31>Unsupported value')", "docstring": "Ask user the question represented by this instance.\n\nArgs:\nfield (Field):\nThe field we're asking the user to provide the value for.\nquick (bool):\nEnable quick mode. In quick mode, the form will reduce the\nnumber of question asked by using defaults wherever possible.\nThis can greatly reduce the number of interactions required on\nthe user part, but will obviously limit the user choices. This\nshould probably be enabled only by a specific user action\n(like passing a ``--quick`` flag etc.).\n\nReturns:\nThe user response converted to a python type using the\n:py:attr:`cliform.core.Field.type` converter.", "source": "codesearchnet"}
{"code": "def with_env_recursive(cmd, **envvars):\n    \n    from plumbum.commands.base import BoundCommand, BoundEnvCommand\n    if isinstance(cmd, BoundCommand):\n        cmd.cmd = with_env_recursive(cmd.cmd, **envvars)\n    elif isinstance(cmd, BoundEnvCommand):\n        cmd.envvars.update(envvars)\n        cmd.cmd = with_env_recursive(cmd.cmd, **envvars)\n    return cmd", "docstring": "Recursively updates the environment of cmd and all its subcommands.\n\nArgs:\ncmd - A plumbum command-like object\n**envvars - The environment variables to update\n\nReturns:\nThe updated command.", "source": "juraj-google-style"}
{"code": "def prepare_headers(headers: list[str], srcs_dir: str) -> None:\n    path_to_exclude = ['cuda_cccl/_virtual_includes', 'cuda_cublas/_virtual_includes', 'cuda_cudart/_virtual_includes', 'cuda_cudnn/_virtual_includes', 'cuda_cufft/_virtual_includes', 'cuda_cupti/_virtual_includes', 'cuda_curand/_virtual_includes', 'cuda_cusolver/_virtual_includes', 'cuda_cusparse/_virtual_includes', 'cuda_nccl/_virtual_includes', 'cuda_nvcc/_virtual_includes', 'cuda_nvjitlink/_virtual_includes', 'cuda_nvml/_virtual_includes', 'cuda_nvrtc/_virtual_includes', 'cuda_nvtx/_virtual_includes', 'external/pypi', 'external/jsoncpp_git/src', 'local_config_cuda/cuda/_virtual_includes', 'local_config_tensorrt', 'python_x86_64', 'python_aarch64', 'llvm-project/llvm/', 'external/cpuinfo', 'external/FXdiv', 'external/net_zstd', 'external/org_brotli/c', 'external/org_brotli/_virtual_includes', 'external/pthreadpool', 'external/riegeli/riegeli', 'external/XNNPACK/src/']\n    path_to_replace = {'external/com_google_absl/': '', 'external/eigen_archive/': '', 'external/jsoncpp_git/': '', 'external/com_google_protobuf/src/': '', 'external/local_xla/': 'tensorflow/compiler', 'external/local_tsl/': 'tensorflow'}\n    for file in headers:\n        if file.endswith('cc.inc'):\n            continue\n        if any((i in file for i in path_to_exclude)):\n            continue\n        for path, val in path_to_replace.items():\n            if path in file:\n                copy_file(file, os.path.join(srcs_dir, val), path)\n                break\n        else:\n            copy_file(file, srcs_dir)\n    create_local_config_python(os.path.join(srcs_dir, 'external/local_config_python'))\n    shutil.copytree(os.path.join(srcs_dir, 'external/local_config_cuda/cuda'), os.path.join(srcs_dir, 'third_party/gpus'))\n    _copy_cuda_tree(srcs_dir, 'external/cuda_cccl', 'third_party/gpus/cuda')\n    _copy_cuda_tree(srcs_dir, 'external/cuda_cublas', 'third_party/gpus/cuda')\n    _copy_cuda_tree(srcs_dir, 'external/cuda_cudart', 'third_party/gpus/cuda')\n    _copy_cuda_tree(srcs_dir, 'external/cuda_cudnn', 'third_party/gpus/cudnn')\n    _copy_cuda_tree(srcs_dir, 'external/cuda_cufft', 'third_party/gpus/cuda')\n    _copy_cuda_tree(srcs_dir, 'external/cuda_cupti', 'third_party/gpus/cuda/extras/CUPTI')\n    _copy_cuda_tree(srcs_dir, 'external/cuda_curand', 'third_party/gpus/cuda')\n    _copy_cuda_tree(srcs_dir, 'external/cuda_cusolver', 'third_party/gpus/cuda')\n    _copy_cuda_tree(srcs_dir, 'external/cuda_cusparse', 'third_party/gpus/cuda')\n    _copy_cuda_tree(srcs_dir, 'external/cuda_nvcc', 'third_party/gpus/cuda')\n    _copy_cuda_tree(srcs_dir, 'external/cuda_nvjitlink', 'third_party/gpus/cuda')\n    _copy_cuda_tree(srcs_dir, 'external/cuda_nvml', 'third_party/gpus/cuda/nvml')\n    _copy_cuda_tree(srcs_dir, 'external/cuda_nvrtc', 'third_party/gpus/cuda')\n    _copy_cuda_tree(srcs_dir, 'external/cuda_nvtx', 'third_party/gpus/cuda')\n    shutil.copytree(os.path.join(srcs_dir, 'tensorflow/compiler/xla'), os.path.join(srcs_dir, 'xla'))\n    shutil.copytree(os.path.join(srcs_dir, 'tensorflow/tsl'), os.path.join(srcs_dir, 'tsl'))", "docstring": "Copy and rearrange header files in the target directory.\n\nFilter out headers by their path and replace paths for some of them.\n\nArgs:\nheaders: a list of paths to header files.\nsrcs_dir: target directory where headers are copied to.", "source": "github-repos"}
{"code": "def make_config_get(conf_path):\n    \n    project_root = _get_project_root_from_conf_path(conf_path)\n    config = load_config_in_dir(project_root)\n    return partial(config_get, config)", "docstring": "Return a function to get configuration options for a specific project\n\nArgs:\nconf_path (path-like): path to project's conf file (i.e. foo.conf\nmodule)", "source": "juraj-google-style"}
{"code": "def verify(self, obj):\n        \n\n        if not isinstance(obj, bool):\n            raise ValidationError(\"Object is not a bool\", reason='object is not a bool', object=obj)\n\n        if self._require_value is not None and obj != self._require_value:\n            raise ValidationError(\"Boolean is not equal to specified literal\", reason='boolean value %s should be %s'\n                                                                                      % (str(obj), str(self._require_value)))\n\n        return obj", "docstring": "Verify that the object conforms to this verifier's schema\n\nArgs:\nobj (object): A python object to verify\n\nRaises:\nValidationError: If there is a problem verifying the dictionary, a\nValidationError is thrown with at least the reason key set indicating\nthe reason for the lack of validation.", "source": "juraj-google-style"}
{"code": "def neighborhood_probability(self, threshold, radius):\n    weights = disk(radius, dtype=np.uint8)\n    thresh_data = np.zeros(self.data.shape[1:], dtype=np.uint8)\n    neighbor_prob = np.zeros(self.data.shape, dtype=np.float32)\n    for t in np.arange(self.data.shape[0]):\n        thresh_data[(self.data[t] >= threshold)] = 1\n        maximized = fftconvolve(thresh_data, weights, mode='same')\n        maximized[(maximized > 1)] = 1\n        maximized[(maximized < 1)] = 0\n        neighbor_prob[t] = fftconvolve(maximized, weights, mode='same')\n        thresh_data[:] = 0\n    neighbor_prob[(neighbor_prob < 1)] = 0\n    neighbor_prob /= weights.sum()\n    return neighbor_prob", "docstring": "Calculate a probability based on the number of grid points in an area that exceed a threshold.\n\nArgs:\nthreshold:\nradius:\n\nReturns:", "source": "codesearchnet"}
{"code": "def remove_feature(feature, remove_payload=False, image=None, restart=False):\n    cmd = ['DISM', '/Quiet', ('/Image:{0}'.format(image) if image else '/Online'), '/Disable-Feature', '/FeatureName:{0}'.format(feature)]\n    if remove_payload:\n        cmd.append('/Remove')\n    if (not restart):\n        cmd.append('/NoRestart')\n    return __salt__['cmd.run_all'](cmd)", "docstring": "Disables the feature.\n\nArgs:\nfeature (str): The feature to uninstall\nremove_payload (Optional[bool]): Remove the feature's payload. Must\nsupply source when enabling in the future.\nimage (Optional[str]): The path to the root directory of an offline\nWindows image. If `None` is passed, the running operating system is\ntargeted. Default is None.\nrestart (Optional[bool]): Reboot the machine if required by the install\n\nReturns:\ndict: A dictionary containing the results of the command\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' dism.remove_feature NetFx3", "source": "codesearchnet"}
{"code": "def log_estimator_evaluation_result(self, eval_results):\n    \n    if not isinstance(eval_results, dict):\n      tf.logging.warning(\"eval_results should be directory for logging. Got %s\",\n                         type(eval_results))\n      return\n    global_step = eval_results[tf.GraphKeys.GLOBAL_STEP]\n    for key in sorted(eval_results):\n      if key != tf.GraphKeys.GLOBAL_STEP:\n        self.log_metric(key, eval_results[key], global_step=global_step)", "docstring": "Log the evaluation result for a estimator.\n\nThe evaluate result is a directory that contains metrics defined in\nmodel_fn. It also contains a entry for global_step which contains the value\nof the global step when evaluation was performed.\n\nArgs:\neval_results: dict, the result of evaluate() from a estimator.", "source": "juraj-google-style"}
{"code": "def shift(self, time: int) -> 'Timeslot':\n        \n        return Timeslot(self.interval.shift(time), self.channel)", "docstring": "Return a new Timeslot shifted by `time`.\n\nArgs:\ntime: time to be shifted", "source": "juraj-google-style"}
{"code": "def _WriteData(self, target, entry):\n    sshkey_entry = '%s:%s' % (entry.name, entry.sshkey)\n    target.write(sshkey_entry.encode() + b'\\n')\n    return len(sshkey_entry) + 1", "docstring": "Write a SshekeyMapEntry to the target cache.\n\nArgs:\ntarget: A file-like object.\nentry: A SshkeyMapEntry.\n\nReturns:\nNumber of bytes written to the target.", "source": "github-repos"}
{"code": "def default_get_arg_names_from_class_name(class_name):\n    \n    parts = []\n    rest = class_name\n    if rest.startswith('_'):\n        rest = rest[1:]\n    while True:\n        m = re.match(r'([A-Z][a-z]+)(.*)', rest)\n        if m is None:\n            break\n        parts.append(m.group(1))\n        rest = m.group(2)\n    if not parts:\n        return []\n    return ['_'.join(part.lower() for part in parts)]", "docstring": "Converts normal class names into normal arg names.\n\nNormal class names are assumed to be CamelCase with an optional leading\nunderscore.  Normal arg names are assumed to be lower_with_underscores.\n\nArgs:\nclass_name: a class name, e.g., \"FooBar\" or \"_FooBar\"\nReturns:\nall likely corresponding arg names, e.g., [\"foo_bar\"]", "source": "juraj-google-style"}
{"code": "def remove(path, follow_symlink=False):\n    if os.path.isfile(path):\n        os.remove(path)\n    elif os.path.islink(path):\n        if follow_symlink:\n            remove(os.readlink(path))\n        os.unlink(path)\n    else:\n        shutil.rmtree(path)", "docstring": "Implements an remove function that will delete files, folder trees and symlink trees\n\n1.) Remove a file\n2.) Remove a symlink and follow into with a recursive rm if follow_symlink\n3.) Remove directory with rmtree\n\nArgs:\npath (str): path to remove\nfollow_symlink(bool): follow symlinks and removes whatever is in them", "source": "codesearchnet"}
{"code": "def convert(self, calibration_inputs=None, num_runs=1) -> None:\n    for trt_model in self._trt_models:\n        trt_model.convert(calibration_inputs, num_runs)", "docstring": "Converts models with TensorRT and calibrates if using INT8 precision mode.\n\nArgs:\ncalibration_inputs: Mapping from input names to ndarrays in TF1. Or a\nsequence of tensors in TF2. Used as calibration data.\nnum_runs: Number of calibration runs.", "source": "github-repos"}
{"code": "def segment(self, text):\n        \n\n        files = {'text': text}\n        res, status_code = self.post(self.segmentation_service, files=files)\n\n        if status_code != 200:\n            logger.debug('Segmentation failed.')\n\n        return self.decode(res), status_code", "docstring": "Call the segmenter in order to split text in sentences.\n\nArgs:\ntext (str): Text to be segmented.\n\nReturns:\ndict, int: A dict containing a list of dicts with the offsets of\neach sentence; an integer representing the response code.", "source": "juraj-google-style"}
{"code": "def DeregisterFormatter(cls, formatter_class):\n    formatter_data_type = formatter_class.DATA_TYPE.lower()\n    if (formatter_data_type not in cls._formatter_classes):\n        raise KeyError('Formatter class not set for data type: {0:s}.'.format(formatter_class.DATA_TYPE))\n    del cls._formatter_classes[formatter_data_type]", "docstring": "Deregisters a formatter class.\n\nThe formatter classes are identified based on their lower case data type.\n\nArgs:\nformatter_class (type): class of the formatter.\n\nRaises:\nKeyError: if formatter class is not set for the corresponding data type.", "source": "codesearchnet"}
{"code": "def __init__(self, sess):\n    self._sess = sess\n    self._wrapped_is_stoppable = isinstance(self._sess, _WrappedSession)", "docstring": "Creates a `_WrappedSession`.\n\nArgs:\nsess: A `tf.compat.v1.Session` or `_WrappedSession` object.  The wrapped\nsession.", "source": "github-repos"}
{"code": "def _ass_refresh_attrs(self, cached_ass, file_ass):\n    loaded_ass = yaml_loader.YamlLoader.load_yaml_by_path(file_ass['source'], log_debug=True)\n    attrs = loaded_ass\n    yaml_checker.check(file_ass['source'], attrs)\n    cached_ass['source'] = file_ass['source']\n    cached_ass['ctime'] = os.path.getctime(file_ass['source'])\n    cached_ass['attrs'] = {}\n    cached_ass['snippets'] = {}\n    for a in ['fullname', 'description', 'icon_path']:\n        if (a in attrs):\n            cached_ass['attrs'][a] = attrs.get(a)\n    if ('args' in attrs):\n        cached_ass['attrs']['args'] = {}\n    for (argname, argparams) in attrs.get('args', {}).items():\n        if (('use' in argparams) or ('snippet' in argparams)):\n            snippet_name = (argparams.pop('use', None) or argparams.pop('snippet'))\n            snippet = yaml_snippet_loader.YamlSnippetLoader.get_snippet_by_name(snippet_name)\n            cached_ass['attrs']['args'][argname] = snippet.get_arg_by_name(argname)\n            cached_ass['attrs']['args'][argname].update(argparams)\n            cached_ass['snippets'][snippet.name] = self._get_snippet_ctime(snippet.name)\n        else:\n            cached_ass['attrs']['args'][argname] = argparams", "docstring": "Completely refreshes cached assistant from file.\n\nArgs:\ncached_ass: an assistant from cache hierarchy\n(for format see Cache class docstring)\nfile_ass: the respective assistant from filesystem hierarchy\n(for format see what refresh_role accepts)", "source": "codesearchnet"}
{"code": "def energies(self, samples_like, dtype=np.float):\n    (samples, labels) = as_samples(samples_like)\n    if labels:\n        (idx, label) = zip(*enumerate(labels))\n        labeldict = dict(zip(label, idx))\n    else:\n        labeldict = {}\n    num_samples = samples.shape[0]\n    energies = np.zeros(num_samples, dtype=dtype)\n    for (term, bias) in self.items():\n        if (len(term) == 0):\n            energies += bias\n        else:\n            energies += (np.prod([samples[(:, labeldict[v])] for v in term], axis=0) * bias)\n    return energies", "docstring": "The energies of the given samples.\n\nArgs:\nsamples_like (samples_like):\nA collection of raw samples. `samples_like` is an extension of\nNumPy's array_like structure. See :func:`.as_samples`.\n\ndtype (:class:`numpy.dtype`, optional):\nThe data type of the returned energies. Defaults to float.\n\nReturns:\n:obj:`numpy.ndarray`: The energies.", "source": "codesearchnet"}
{"code": "def nmf_ensemble(data, k, n_runs=10, W_list=[], **nmf_params):\n    \n    nmf = NMF(k)\n    if len(W_list)==0:\n        W_list = []\n        for i in range(n_runs):\n            W = nmf.fit_transform(data)\n            W_list.append(W)\n    W_stacked = np.hstack(W_list)\n    nmf_w = nmf.fit_transform(W_stacked)\n    nmf_h = nmf.components_\n    H_new = data.T.dot(nmf_w).T\n    nmf2 = NMF(k, init='custom')\n    nmf_w = nmf2.fit_transform(data, W=nmf_w, H=H_new)\n    H_new = nmf2.components_\n    \n    \n    return nmf_w, H_new", "docstring": "Runs an ensemble method on the list of NMF W matrices...\n\nArgs:\ndata: genes x cells array (should be log + cell-normalized)\nk: number of classes\nn_runs (optional): number of random initializations of state estimation\nM_list (optional): list of M arrays from state estimation\nse_params (optional): optional poisson_estimate_state params\n\nReturns:\nW_new\nH_new", "source": "juraj-google-style"}
{"code": "def delete(self, paths):\n    exceptions = {}\n    for path in paths:\n        if path.endswith('/'):\n            self._gcsIO().delete(path, recursive=True)\n            continue\n        else:\n            path_to_use = path\n        match_result = self.match([path_to_use])[0]\n        statuses = self._gcsIO().delete_batch([m.path for m in match_result.metadata_list])\n        for target, exception in statuses:\n            if exception:\n                exceptions[target] = exception\n    if exceptions:\n        raise BeamIOError('Delete operation failed', exceptions)", "docstring": "Deletes files or directories at the provided paths.\nDirectories will be deleted recursively.\n\nArgs:\npaths: list of paths that give the file objects to be deleted", "source": "github-repos"}
{"code": "def convert_idx_to_name(self, y, lens):\n    y = [[self.id2label[idx] for idx in row[:l]] for (row, l) in zip(y, lens)]\n    return y", "docstring": "Convert label index to name.\n\nArgs:\ny (list): label index list.\nlens (list): true length of y.\n\nReturns:\ny: label name list.\n\nExamples:\n>>> # assumes that id2label = {1: 'B-LOC', 2: 'I-LOC'}\n>>> y = [[1, 0, 0], [1, 2, 0], [1, 1, 1]]\n>>> lens = [1, 2, 3]\n>>> self.convert_idx_to_name(y, lens)\n[['B-LOC'], ['B-LOC', 'I-LOC'], ['B-LOC', 'B-LOC', 'B-LOC']]", "source": "codesearchnet"}
{"code": "def _FormatSubjectExOrProcessExToken(self, token_data):\n    \n    if token_data.net_type == 4:\n      ip_address = self._FormatPackedIPv4Address(token_data.ip_address)\n    elif token_data.net_type == 16:\n      ip_address = self._FormatPackedIPv6Address(token_data.ip_address)\n    else:\n      ip_address = 'unknown'\n\n    return {\n        'aid': token_data.audit_user_identifier,\n        'euid': token_data.effective_user_identifier,\n        'egid': token_data.effective_group_identifier,\n        'uid': token_data.real_user_identifier,\n        'gid': token_data.real_group_identifier,\n        'pid': token_data.process_identifier,\n        'session_id': token_data.session_identifier,\n        'terminal_port': token_data.terminal_port,\n        'terminal_ip': ip_address}", "docstring": "Formats a subject or process token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_subject32_ex|bsm_token_data_subject64_ex):\nAUT_SUBJECT32_EX, AUT_PROCESS32_EX, AUT_SUBJECT64_EX or\nAUT_PROCESS64_EX token data.\n\nReturns:\ndict[str, str]: token values.", "source": "juraj-google-style"}
{"code": "def with_dependencies(dependencies, output_tensor, name=None):\n    if tf.executing_eagerly():\n        return output_tensor\n    with tf.name_scope((name or 'control_dependency')) as name:\n        with tf.control_dependencies((d for d in dependencies if (d is not None))):\n            output_tensor = tf.convert_to_tensor(value=output_tensor)\n            if isinstance(output_tensor, tf.Tensor):\n                return tf.identity(output_tensor, name=name)\n            else:\n                return tf.IndexedSlices(tf.identity(output_tensor.values, name=name), output_tensor.indices, output_tensor.dense_shape)", "docstring": "Produces the content of `output_tensor` only after `dependencies`.\n\nIn some cases, a user may want the output of an operation to be consumed\nexternally only after some other dependencies have run first. This function\nreturns `output_tensor`, but only after all operations in `dependencies` have\nrun. Note that this means that there is no guarantee that `output_tensor` will\nbe evaluated after any `dependencies` have run.\n\nSee also `tf.tuple` and `tf.group`.\n\nArgs:\ndependencies: Iterable of operations to run before this op finishes.\noutput_tensor: A `Tensor` or `IndexedSlices` that will be returned.\nname: (Optional) A name for this operation.\n\nReturns:\noutput_with_deps: Same as `output_tensor` but with embedded dependencies.\n\nRaises:\nTypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.", "source": "codesearchnet"}
{"code": "def parse_arguments(argv):\n  \n  parser = argparse.ArgumentParser(\n      description='Runs Prediction inside a beam or Dataflow job.')\n  \n  parser.add_argument('--project-id',\n                      help='The project to which the job will be submitted.')\n  parser.add_argument('--cloud',\n                      action='store_true',\n                      help='Run preprocessing on the cloud.')\n  parser.add_argument('--job-name',\n                      default=('mltoolbox-batch-prediction-' +\n                               datetime.datetime.now().strftime('%Y%m%d%H%M%S')),\n                      help='Dataflow job name. Must be unique over all jobs.')\n  parser.add_argument('--extra-package',\n                      default=[],\n                      action='append',\n                      help=('If using --cloud, also installs these packages on '\n                            'each dataflow worker'))\n\n  \n  parser.add_argument('--predict-data',\n                      required=True,\n                      help='Data to run prediction on')\n  parser.add_argument('--trained-model-dir',\n                      required=True,\n                      help='Usually train_output_path/model.')\n  parser.add_argument('--output-dir',\n                      required=True,\n                      help=('Location to save output.'))\n\n  \n  parser.add_argument('--batch-size',\n                      required=False,\n                      default=1000,\n                      type=int,\n                      help=('Batch size. Larger values consumes more memrory '\n                            'but takes less time to finish.'))\n  parser.add_argument('--shard-files',\n                      dest='shard_files',\n                      action='store_true',\n                      help='Shard files')\n  parser.add_argument('--no-shard-files',\n                      dest='shard_files',\n                      action='store_false',\n                      help='Don\\'t shard files')\n  parser.set_defaults(shard_files=True)\n\n  parser.add_argument('--output-format',\n                      choices=['csv', 'json'],\n                      default='csv',\n                      help=)\n\n  args, _ = parser.parse_known_args(args=argv[1:])\n\n  if args.cloud:\n    if not args.project_id:\n      raise ValueError('--project-id needed with --cloud')\n    if not args.trained_model_dir.startswith('gs:\n      raise ValueError('--trained-model-dir needs to be a GCS path,')\n    if not args.output_dir.startswith('gs:\n      raise ValueError('--output-dir needs to be a GCS path.')\n    if not args.predict_data.startswith('gs:\n      raise ValueError('--predict-data needs to be a GCS path.')\n\n  return args", "docstring": "Parse command line arguments.\n\nArgs:\nargv: includes the script's name.\n\nReturns:\nargparse object", "source": "juraj-google-style"}
{"code": "def _get_dict_of_block_index(self, axis, indices, ordered=False):\n    all_partitions_and_idx = [self._get_blocks_containing_index(axis, i) for i in indices]\n    if ordered:\n        partitions_dict = []\n        last_part = (- 1)\n        for (part_idx, internal_idx) in all_partitions_and_idx:\n            if (part_idx == last_part):\n                partitions_dict[(- 1)][(- 1)].append(internal_idx)\n            else:\n                partitions_dict.append((part_idx, [internal_idx]))\n            last_part = part_idx\n    else:\n        partitions_dict = {}\n        for (part_idx, internal_idx) in all_partitions_and_idx:\n            if (part_idx not in partitions_dict):\n                partitions_dict[part_idx] = [internal_idx]\n            else:\n                partitions_dict[part_idx].append(internal_idx)\n    return partitions_dict", "docstring": "Convert indices to a dict of block index to internal index mapping.\n\nNote: See `_get_blocks_containing_index` for primary usage. This method\naccepts a list of indices rather than just a single value, and uses\n`_get_blocks_containing_index`.\n\nArgs:\naxis: The axis along which to get the indices\n(0 - columns, 1 - rows)\nindices: A list of global indices to convert.\n\nReturns\nFor unordered: a dictionary of {block index: list of local indices}.\nFor ordered: a list of tuples mapping block index: list of local indices.", "source": "codesearchnet"}
{"code": "def _configure_common(self, prefix, fallback_level, fallback_format, handler_name, handler, custom_args=''):\n    log_level = self.config.get_option('LOGGING', (prefix + 'log_level'), None, fallback_level)\n    log_format_name = self.config.get_option('LOGGING', (prefix + 'log_format'), None, None)\n    log_format = (ReportingFormats[log_format_name].value if log_format_name else fallback_format)\n    log_format = log_format.format(custom_args=custom_args)\n    formatter = logging.Formatter(log_format)\n    handler.setFormatter(formatter)\n    handler.setLevel(log_level)\n    self.logger.addHandler(handler)\n    if (not self.logger.isEnabledFor(logging.getLevelName(log_level))):\n        self.logger.setLevel(log_level)\n    self.log_info.append(((handler_name + ' @ ') + str(log_level)))\n    self.log_handlers.append(handler)", "docstring": "commom configuration code\n\nArgs:\nprefix (str): A prefix for the `log_level` and `log_format` keys to use with the config. #FIXME: Hacky, add separate sections for each logger config?\nfallback_level (str): Fallback/minimum log level, for if config does not have one.\nfallback_format (str): Fallback format for if it's not in the config.\nhandler_name (str): Handler used in debug messages.\nhandler (str): The handler to configure and use.\ncustom_args (str): special ID to include in messages", "source": "codesearchnet"}
{"code": "def set_speech_ssml(self, ssml):\n    self.response.outputSpeech.type = 'SSML'\n    self.response.outputSpeech.ssml = ssml", "docstring": "Set response output speech as SSML type.\n\nArgs:\nssml: str. Response speech used when type is 'SSML', should be formatted\nwith Speech Synthesis Markup Language. Cannot exceed 8,000\ncharacters.", "source": "codesearchnet"}
{"code": "def chosen_probabs(probab_observations, actions):\n    (B, T) = actions.shape\n    assert ((B, (T + 1)) == probab_observations.shape[:2])\n    return probab_observations[(np.arange(B)[(:, None)], np.arange(T), actions)]", "docstring": "Picks out the probabilities of the actions along batch and time-steps.\n\nArgs:\nprobab_observations: ndarray of shape `[B, T+1, A]`, where\nprobab_observations[b, t, i] contains the log-probability of action = i at\nthe t^th time-step in the b^th trajectory.\nactions: ndarray of shape `[B, T]`, with each entry in [0, A) denoting which\naction was chosen in the b^th trajectory's t^th time-step.\n\nReturns:\n`[B, T]` ndarray with the log-probabilities of the chosen actions.", "source": "codesearchnet"}
{"code": "def sendto(self, transport, addr):\n        \n        msg = bytes(self) + b'\\r\\n'\n        logger.debug(\"%s:%s < %s\", *(addr + (self,)))\n        transport.sendto(msg, addr)", "docstring": "Send request to a given address via given transport.\n\nArgs:\ntransport (asyncio.DatagramTransport):\nWrite transport to send the message on.\naddr (Tuple[str, int]):\nIP address and port pair to send the message to.", "source": "juraj-google-style"}
{"code": "def zip(self, second_iterable, result_selector=(lambda x, y: (x, y))):\n    if self.closed():\n        raise ValueError('Attempt to call zip() on a closed Queryable.')\n    if (not is_iterable(second_iterable)):\n        raise TypeError('Cannot compute zip() with second_iterable of non-iterable {0}'.format(str(type(second_iterable))[7:(- 1)]))\n    if (not is_callable(result_selector)):\n        raise TypeError('zip() parameter result_selector={0} is not callable'.format(repr(result_selector)))\n    return self._create((result_selector(*t) for t in izip(self, second_iterable)))", "docstring": "Elementwise combination of two sequences.\n\nThe source sequence and the second iterable are merged element-by-\nelement using a function to combine them into the single corresponding\nelement of the result sequence. The length of the result sequence is\nequal to the length of the shorter of the two input sequences.\n\nNote: This method uses deferred execution.\n\nArgs:\nsecond_iterable: The second sequence to be combined with the source\nsequence.\n\nresult_selector: An optional binary function for combining\ncorresponding elements of the source sequences into an\nelement of the result sequence. The first and second positional\narguments are the elements from the source sequences. The\nresult should be the result sequence element. If omitted, the\nresult sequence will consist of 2-tuple pairs of corresponding\nelements from the source sequences.\n\nReturns:\nA Queryable over the merged elements.\n\nRaises:\nValueError: If the Queryable is closed.\nTypeError: If result_selector is not callable.", "source": "codesearchnet"}
{"code": "def _ReadSources(self, artifact_definition_values, artifact_definition, name):\n    \n    sources = artifact_definition_values.get('sources')\n    if not sources:\n      raise errors.FormatError(\n          'Invalid artifact definition: {0:s} missing sources.'.format(name))\n\n    for source in sources:\n      type_indicator = source.get('type', None)\n      if not type_indicator:\n        raise errors.FormatError(\n            'Invalid artifact definition: {0:s} source type.'.format(name))\n\n      attributes = source.get('attributes', None)\n\n      try:\n        source_type = artifact_definition.AppendSource(\n            type_indicator, attributes)\n      except errors.FormatError as exception:\n        raise errors.FormatError(\n            'Invalid artifact definition: {0:s}, with error: {1!s}'.format(\n                name, exception))\n\n      \n      if source_type:\n        if source.get('returned_types', None):\n          raise errors.FormatError((\n              'Invalid artifact definition: {0:s} returned_types no longer '\n              'supported.').format(name))\n\n        source_type.conditions = source.get('conditions', [])\n        self._ReadSupportedOS(source, source_type, name)\n        if set(source_type.supported_os) - set(\n            artifact_definition.supported_os):\n          raise errors.FormatError((\n              'Invalid artifact definition: {0:s} missing '\n              'supported_os.').format(name))", "docstring": "Reads the artifact definition sources.\n\nArgs:\nartifact_definition_values (dict[str, object]): artifact definition\nvalues.\nartifact_definition (ArtifactDefinition): an artifact definition.\nname (str): name of the artifact definition.\n\nRaises:\nFormatError: if the type indicator is not set or unsupported,\nor if required attributes are missing.", "source": "juraj-google-style"}
{"code": "def my_sum(x, y, *args, **kwargs):\n    del args, kwargs\n    return x + y", "docstring": "Returns the sum of two integers.\n\nThis function will return the sum of two integers.\n\nExamples:\n\n```\nret = sum(1, 2)\nprint(ret)\n```\n\nArgs:\nx: An integer.\ny: Another integer.\n*args: Variable positional args.\n**kwargs: Variable keyword args.\n\nReturns:\nThe sum of both.\n\nRaises:\nValueError: when either `x` and `y` is not an integer.", "source": "github-repos"}
{"code": "def __init__(self, options):\n        \n        self.event = Event.create(__name__)\n        self.options = options\n        self.logging_level = logging.DEBUG\n        self.setup_logging()\n        self.logger = Logger.get_logger(__name__)", "docstring": "Initialize application with command line options.\n\nArgs:\noptions (ApplicationOptions): given command line options.", "source": "juraj-google-style"}
{"code": "def sentencecase(string):\n    joiner = ' '\n    string = re.sub('[\\\\-_\\\\.\\\\s]', joiner, str(string))\n    if (not string):\n        return string\n    return capitalcase(trimcase(re.sub('[A-Z]', (lambda matched: (joiner + lowercase(matched.group(0)))), string)))", "docstring": "Convert string into sentence case.\nFirst letter capped and each punctuations are joined with space.\n\nArgs:\nstring: String to convert.\n\nReturns:\nstring: Sentence cased string.", "source": "codesearchnet"}
{"code": "def __init__(self, dsn, echo=False, foreign_keys=True, engine_kwargs=None, application_prefix='ambry'):\n        \n\n        self.dsn = dsn\n\n        d = parse_url_to_dict(self.dsn)\n        self.path = d['path'].replace('\n\n        self.driver = d['scheme']\n        self.engine_kwargs = engine_kwargs or {}\n\n        self.Session = None\n        self._session = None\n        self._engine = None\n        self._connection = None\n        self._echo = echo\n        self._foreign_keys = foreign_keys\n\n        self._raise_on_commit = False  \n\n        if self.driver in ['postgres', 'postgresql', 'postgresql+psycopg2', 'postgis']:\n            self.driver = 'postgres'\n            self._schema = POSTGRES_SCHEMA_NAME\n        else:\n            self._schema = None\n\n        self.logger = logger\n\n        self.library = None  \n\n        self._application_prefix = application_prefix", "docstring": "Initializes database.\n\nArgs:\ndsn (str): database connect string, 'sqlite://' for example.\necho (boolean): echo parameter of the create_engine.\nengine_kwargs (dict): parameters to pass to the create_engine method of the Sqlalchemy.", "source": "juraj-google-style"}
{"code": "def move(self, delta):\n    pos = self.pos\n    self.pos = ((pos[0] + delta[0]), (pos[1] + delta[1]), (pos[2] + delta[0]), (pos[3] + delta[1]))\n    for age in self.nodes:\n        for node in age:\n            node.move(delta)", "docstring": "Move the tree.\n\nArgs:\ndelta (tupel): The adjustment of the position.", "source": "codesearchnet"}
{"code": "def check_rank(player, platform=\"steam\"):\n    \n\n    \n    webpage = requests.get(\n        \"https:\n    ).text\n\n    try:\n        \n        playerid_index = webpage.index(\"/live?ids=\") + len(\"/live?ids=\")\n        playerid_end_index = webpage.index(, playerid_index)\n        playerid = webpage[playerid_index:playerid_end_index]\n\n        \n        name_index = webpage.index(\"Stats Profile : \") + len(\"Stats Profile : \")\n        name_end_index = webpage.index(, name_index)\n        name = webpage[name_index:name_end_index]\n    except (ValueError, IndexError):\n        return False, ()\n\n    \n    livedata = json.loads(\n        requests.post(\n            \"https:\n            json={\"playerIds\": [playerid]}\n        ).text\n    )\n\n    stats = []\n    try:\n        for statpack in livedata['players'][0]['Stats']:\n            field = statpack['Value']['Label']\n            value = str(statpack['Value']['DisplayValue'])\n            if statpack['Value']['Percentile']:\n                percentile = str(statpack['Value']['Percentile'])\n            else:\n                percentile = None\n            stats.append((field, value, percentile))\n    except (IndexError, KeyError):\n        return False, ()\n\n    dp = \"https:\n\n    platform_display = platform\n    if platform == \"steam\":\n        platform_display = \"Steam\"\n    elif platform == \"ps\":\n        platform_display = \"PlayStation\"\n    elif platform == \"xbox\":\n        platform_display = \"Xbox\"\n\n    return True, (stats, name, platform_display, dp)", "docstring": "Gets the Rocket League stats and name and dp of a UserID\n\nArgs:\nplayer (str): The UserID of the player we want to rank check\nplatform (str): The platform to check for, can be 'steam', 'ps', or 'xbox'\n\nReturns:\nsuccess (bool): Whether the rank check was successful\npackage (tuple): If successful, the retrieved stats, in order (stats, name, dp)", "source": "juraj-google-style"}
{"code": "def _CreateEventTag(self, event, comment, labels):\n    event_identifier = event.GetIdentifier()\n    event_tag = events.EventTag(comment=comment)\n    event_tag.SetEventIdentifier(event_identifier)\n    event_tag.AddLabels(labels)\n    event_identifier_string = event_identifier.CopyToString()\n    logger.debug('Created event tag: {0:s} for event: {1:s}'.format(comment, event_identifier_string))\n    return event_tag", "docstring": "Creates an event tag.\n\nArgs:\nevent (EventObject): event to tag.\ncomment (str): event tag comment.\nlabels (list[str]): event tag labels.\n\nReturns:\nEventTag: the event tag.", "source": "codesearchnet"}
{"code": "def add_droplets(self, droplet_ids):\n    return self.get_data(('load_balancers/%s/droplets/' % self.id), type=POST, params={'droplet_ids': droplet_ids})", "docstring": "Assign a LoadBalancer to a Droplet.\n\nArgs:\ndroplet_ids (obj:`list` of `int`): A list of Droplet IDs", "source": "codesearchnet"}
{"code": "def extend(self, records):\n        \n        fields = self.fields\n        for record in records:\n            record = _cast_record_to_str_tuple(record, fields)\n            self._records.append(record)", "docstring": "Add each record in *records* to the end of the table.\n\nArgs:\nrecord: an iterable of :class:`Record` or other iterables\ncontaining column values", "source": "juraj-google-style"}
{"code": "async def update_pairing_method(self, pairing: Pairing):\n        \n        do_sequential_pairing = pairing == Pairing.sequential\n        await self.update(sequential_pairings=do_sequential_pairing)", "docstring": "|methcoro|\n\nArgs:\npairing:\n\nRaises:\nAPIException", "source": "juraj-google-style"}
{"code": "def member_update(self, repl_id, member_id, params):\n        \n        repl = self[repl_id]\n        result = repl.member_update(member_id, params)\n        self[repl_id] = repl\n        return result", "docstring": "apply new params to replica set member\nArgs:\nrepl_id - replica set identity\nmember_id - member index\nparams - new member's params\n\nreturn True if operation success otherwise False", "source": "juraj-google-style"}
{"code": "def get_attribute(self, obj, attr):\n        \n        \n        \n        if attr == '*':\n            return obj\n\n        \n        if isinstance(obj, Mapping):\n            return obj.get(attr, None)\n\n        return getattr(obj, attr, None)", "docstring": "Get attribute of given object instance.\n\nReason for existence of this method is the fact that  'attribute' can\nbe also object's key from if is a dict or any other kind of mapping.\n\nNote: it will return None if attribute key does not exist\n\nArgs:\nobj (object): internal object to retrieve data from\n\nReturns:\ninternal object's key value or attribute", "source": "juraj-google-style"}
{"code": "def with_output_types(self, type_hint):\n    type_hint = native_type_compatibility.convert_to_beam_type(type_hint)\n    validate_composite_type_param(type_hint, 'Type hints for a PTransform')\n    return super().with_output_types(type_hint)", "docstring": "Annotates the output type of a :class:`PTransform` with a type-hint.\n\nArgs:\ntype_hint (type): An instance of an allowed built-in type, a custom class,\nor a :class:`~apache_beam.typehints.typehints.TypeConstraint`.\n\nRaises:\nTypeError: If **type_hint** is not a valid type-hint. See\n:obj:`~apache_beam.typehints.typehints.validate_composite_type_param()`\nfor further details.\n\nReturns:\nPTransform: A reference to the instance of this particular\n:class:`PTransform` object. This allows chaining type-hinting related\nmethods.", "source": "github-repos"}
{"code": "def write_uint8(self, value, little_endian=True):\n        \n        if little_endian:\n            endian = \"<\"\n        else:\n            endian = \">\"\n        return self.pack('%sB' % endian, value)", "docstring": "Pack the value as an unsigned byte and write 1 byte to the stream.\n\nArgs:\nvalue:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint: the number of bytes written.", "source": "juraj-google-style"}
{"code": "def init_algebra(*, default_hs_cls='LocalSpace'):\n    \n    from qnet.algebra.core.hilbert_space_algebra import LocalSpace\n    from qnet.algebra.core.abstract_quantum_algebra import QuantumExpression\n    default_hs_cls = getattr(importlib.import_module('qnet'), default_hs_cls)\n    if issubclass(default_hs_cls, LocalSpace):\n        QuantumExpression._default_hs_cls = default_hs_cls\n    else:\n        raise TypeError(\"default_hs_cls must be a subclass of LocalSpace\")", "docstring": "Initialize the algebra system\n\nArgs:\ndefault_hs_cls (str): The name of the :class:`.LocalSpace` subclass\nthat should be used when implicitly creating Hilbert spaces, e.g.\nin :class:`.OperatorSymbol`", "source": "juraj-google-style"}
{"code": "def _InternalUnpackAny(msg):\n  \n  \n  \n  \n  \n  from google.protobuf import symbol_database\n  factory = symbol_database.Default()\n\n  type_url = msg.type_url\n\n  if not type_url:\n    return None\n\n  \n  \n  type_name = type_url.split('/')[-1]\n  descriptor = factory.pool.FindMessageTypeByName(type_name)\n\n  if descriptor is None:\n    return None\n\n  message_class = factory.GetPrototype(descriptor)\n  message = message_class()\n\n  message.ParseFromString(msg.value)\n  return message", "docstring": "Unpacks Any message and returns the unpacked message.\n\nThis internal method is different from public Any Unpack method which takes\nthe target message as argument. _InternalUnpackAny method does not have\ntarget message type and need to find the message type in descriptor pool.\n\nArgs:\nmsg: An Any message to be unpacked.\n\nReturns:\nThe unpacked message.", "source": "juraj-google-style"}
{"code": "def split(self):\n    assert (self.status == SolverStatus.exhausted)\n    scopes = []\n    next_scopes = []\n    split_i = None\n    for (i, scope) in enumerate(self.scopes):\n        if (split_i is None):\n            r = scope.split()\n            if (r is not None):\n                (scope_, next_scope) = r\n                scopes.append(scope_)\n                next_scopes.append(next_scope)\n                split_i = i\n                continue\n        scopes.append(scope)\n        next_scopes.append(scope)\n    assert (split_i is not None)\n    phase = copy.copy(self)\n    phase.scopes = scopes\n    phase.status = SolverStatus.pending\n    phase.changed_scopes_i = set([split_i])\n    next_phase = copy.copy(phase)\n    next_phase.scopes = next_scopes\n    return (phase, next_phase)", "docstring": "Split the phase.\n\nWhen a phase is exhausted, it gets split into a pair of phases to be\nfurther solved. The split happens like so:\n1) Select the first unsolved package scope.\n2) Find some common dependency in the first N variants of the scope.\n3) Split the scope into two: [:N] and [N:].\n4) Create two copies of the phase, containing each half of the split\nscope.\n\nThe result of this split is that we have a new phase (the first phase),\nwhich contains a package scope with a common dependency. This\ndependency can now be intersected with the current resolve, thus\nprogressing it.\n\nReturns:\nA 2-tuple of _ResolvePhase objects, where the first phase is the\nbest contender for resolving.", "source": "codesearchnet"}
{"code": "def add(self, X):\n    for each in X:\n        self.dpp_vector[each] = X[each]\n    self.fit(self.dpp_vector.reshape(1, (- 1)))", "docstring": "Add data about known pipeline and scores.\n\nUpdates ``dpp_vector`` and refits model with all data.\n\nArgs:\nX (dict): mapping of pipeline indices to scores. Keys must correspond to the index of a\ncolumn in ``dpp_matrix`` and values are the corresponding score for pipeline on\nthe dataset.", "source": "codesearchnet"}
{"code": "def DEFINE_spaceseplist(name, default, help, comma_compat=False, flag_values=_flagvalues.FLAGS, **args):\n    parser = _argument_parser.WhitespaceSeparatedListParser(comma_compat=comma_compat)\n    serializer = _argument_parser.ListSerializer(' ')\n    DEFINE(parser, name, default, help, flag_values, serializer, **args)", "docstring": "Registers a flag whose value is a whitespace-separated list of strings.\n\nAny whitespace can be used as a separator.\n\nArgs:\nname: str, the flag name.\ndefault: list|str|None, the default value of the flag.\nhelp: str, the help message.\ncomma_compat: bool - Whether to support comma as an additional separator.\nIf false then only whitespace is supported.  This is intended only for\nbackwards compatibility with flags that used to be comma-separated.\nflag_values: FlagValues, the FlagValues instance with which the flag will\nbe registered. This should almost never need to be overridden.\n**args: Dictionary with extra keyword args that are passed to the\nFlag __init__.", "source": "codesearchnet"}
{"code": "def __init__(self, original_embedding: nn.Embedding, assistant_overlap_token_ids):\n    super().__init__()\n    self.original_embedding = original_embedding\n    self.weight = original_embedding.weight\n    self.assistant_overlap_token_ids = assistant_overlap_token_ids\n    self.map = False", "docstring": "Wraps an existing embedding layer and remaps token IDs before lookup.\n\nArgs:\noriginal_embedding (nn.Embedding): Pre-trained or existing embedding layer.\nassistant_overlap_token_ids (dict): Mapping from original token IDs to new token IDs.\nExample: {old_id: new_id}", "source": "github-repos"}
{"code": "def reflection_matrix_pow(reflection_matrix: np.ndarray, exponent: float):\n    \n\n    \n    squared_phase = np.dot(reflection_matrix[:, 0],\n                           reflection_matrix[0, :])\n    phase = complex(np.sqrt(squared_phase))\n\n    \n    i = np.eye(reflection_matrix.shape[0]) * phase\n    pos_part = (i + reflection_matrix) * 0.5\n    neg_part = (i - reflection_matrix) * 0.5\n\n    \n    pos_factor = phase**(exponent - 1)\n    neg_factor = pos_factor * complex(-1)**exponent\n    pos_part_raised = pos_factor * pos_part\n    neg_part_raised = neg_part * neg_factor\n    return pos_part_raised + neg_part_raised", "docstring": "Raises a matrix with two opposing eigenvalues to a power.\n\nArgs:\nreflection_matrix: The matrix to raise to a power.\nexponent: The power to raise the matrix to.\n\nReturns:\nThe given matrix raised to the given power.", "source": "juraj-google-style"}
{"code": "def contains_call_signature(caller, key):\n    try:\n        args = inspect.signature(caller).parameters\n    except AttributeError:\n        args = inspect.getargspec(caller).args\n    return (key in args)", "docstring": "Check if a function or method call signature contains a specific\nargument.\n\nArgs:\ncaller (Callable):\nMethod or function to check if signature is contain in.\nkey (str):\nSignature to look for.\n\nReturns:\nTrue if ``key`` exits in ``caller`` call signature.\n\nExamples:\n>>> def foo(param): pass\n>>> contains_call_signature(foo, \"param\")\nTrue\n>>> contains_call_signature(foo, \"not_param\")\nFalse\n>>> class Bar:\n...     def baz(self, param): pass\n>>> bar = Bar()\n>>> contains_call_signature(bar.baz, \"param\")\nTrue\n>>> contains_call_signature(bar.baz, \"not_param\")\nFalse", "source": "codesearchnet"}
{"code": "def get_table_columns(metadata):\n    \n    cols = OrderedDict()\n    for col in metadata.c:\n        name = str(col).rpartition(\".\")[2]\n        cols[name] = col.type.python_type.__name__\n    return cols", "docstring": "Extract columns names and python typos from metadata\n\nArgs:\nmetadata: Table metadata\n\nReturns:\ndict with columns names and python types", "source": "juraj-google-style"}
{"code": "def accept_prompt(self, text=None, response=None, wait=None):\n    with self.driver.accept_modal('prompt', text=text, response=response, wait=wait):\n        (yield)", "docstring": "Execute the wrapped code, accepting a prompt, optionally responding to the prompt.\n\nArgs:\ntext (str | RegexObject, optional): Text to match against the text in the modal.\nresponse (str, optional): Response to provide to the prompt.\nwait (int | float, optional): Maximum time to wait for the modal to appear after\nexecuting the wrapped code.\n\nRaises:\nModalNotFound: If a modal dialog hasn't been found.", "source": "codesearchnet"}
{"code": "def encoder_vgg(x, enc_final_size, reuse=False, scope_prefix='', hparams=None, is_training=True):\n    with tf.variable_scope((scope_prefix + 'encoder'), reuse=reuse):\n        x *= 256\n        x = (x - COLOR_NORMALIZATION_VECTOR)\n        with arg_scope(vgg.vgg_arg_scope()):\n            x = tf.pad(x, [[0, 0], [0, (VGG_IMAGE_SIZE - IMG_WIDTH)], [0, (VGG_IMAGE_SIZE - IMG_HEIGHT)], [0, 0]])\n            (_, end_points) = vgg.vgg_16(x, num_classes=enc_final_size, is_training=is_training)\n            pool5_key = [key for key in end_points.keys() if ('pool5' in key)]\n            assert (len(pool5_key) == 1)\n            enc = end_points[pool5_key[0]]\n            enc = tf.slice(enc, [0, 0, 0, 0], [(- 1), 2, 2, (- 1)])\n        enc_shape = enc.get_shape().as_list()\n        enc_shape[0] = (- 1)\n        enc_size = ((enc_shape[1] * enc_shape[2]) * enc_shape[3])\n        enc_flat = tf.reshape(enc, ((- 1), enc_size))\n        enc_flat = tf.nn.dropout(enc_flat, hparams.enc_keep_prob)\n        enc_flat = tf.layers.dense(enc_flat, enc_final_size, kernel_initializer=tf.truncated_normal_initializer(stddev=0.0001))\n        if hparams.enc_pred_use_l2norm:\n            enc_flat = tf.nn.l2_normalize(enc_flat, 1)\n    return enc_flat", "docstring": "VGG network to use as encoder without the top few layers.\n\nCan be pretrained.\n\nArgs:\nx: The image to encode. In the range 0 to 1.\nenc_final_size: The desired size of the encoding.\nreuse: To reuse in variable scope or not.\nscope_prefix: The prefix before the scope name.\nhparams: The python hparams.\nis_training: boolean value indicating if training is happening.\n\nReturns:\nThe generated image.", "source": "codesearchnet"}
{"code": "def get_effect_class(self, effect_name: str, package_name: str = None) -> Type['Effect']:\n        \n        return self._project.get_effect_class(effect_name, package_name=package_name)", "docstring": "Get an effect class by the class name\n\nArgs:\neffect_name (str): Name of the effect class\n\nKeyword Args:\npackage_name (str): The package the effect belongs to. This is optional and only\nneeded when effect class names are not unique.\n\nReturns:\n:py:class:`Effect` class", "source": "juraj-google-style"}
{"code": "def __is_function_action(self, action_function):\n    is_function_action = True\n    if (not hasattr(action_function, '__call__')):\n        return False\n    try:\n        for (end_string, context) in action_function():\n            if (not isinstance(end_string, basestring)):\n                self.log_error('Action function must return end of filename as a string as first argument')\n            if (not isinstance(context, dict)):\n                self.log_error('Action function must return context as a dict as second argument')\n            break\n    except Exception:\n        is_function_action = False\n    return is_function_action", "docstring": "Detect if given function is really an action function.\n\nArgs:\naction_function: Function to test.\n\nNote:\nWe don't care if the variable refer to a function but rather if it is callable or not.", "source": "codesearchnet"}
{"code": "def get_gan_loss(self, true_frames, gen_frames, name):\n    \n    \n    with tf.variable_scope(\"%s_discriminator\" % name, reuse=tf.AUTO_REUSE):\n      gan_d_loss, _, fake_logits_stop = self.d_step(\n          true_frames, gen_frames)\n\n    \n    with tf.variable_scope(\"%s_discriminator\" % name, reuse=True):\n      gan_g_loss_pos_d, gan_g_loss_neg_d = self.g_step(\n          gen_frames, fake_logits_stop)\n    gan_g_loss = gan_g_loss_pos_d + gan_g_loss_neg_d\n    tf.summary.scalar(\"gan_loss_%s\" % name, gan_g_loss_pos_d + gan_d_loss)\n\n    if self.hparams.gan_optimization == \"joint\":\n      gan_loss = gan_g_loss + gan_d_loss\n    else:\n      curr_step = self.get_iteration_num()\n      gan_loss = tf.cond(\n          tf.logical_not(curr_step % 2 == 0), lambda: gan_g_loss,\n          lambda: gan_d_loss)\n    return gan_loss", "docstring": "Get the discriminator + generator loss at every step.\n\nThis performs an 1:1 update of the discriminator and generator at every\nstep.\n\nArgs:\ntrue_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)\nAssumed to be ground truth.\ngen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)\nAssumed to be fake.\nname: discriminator scope.\nReturns:\nloss: 0-D Tensor, with d_loss + g_loss", "source": "juraj-google-style"}
{"code": "def __init__(self, subscription_path, deduplicate=None, expansion_service=None):\n    if deduplicate is None:\n        deduplicate = False\n    if expansion_service is None:\n        expansion_service = _default_io_expansion_service()\n    super().__init__('beam:transform:org.apache.beam:pubsublite_read:v1', NamedTupleBasedPayloadBuilder(_ReadSchema(subscription_path=subscription_path, deduplicate=deduplicate)), expansion_service)", "docstring": "Initializes a read operation from Pub/Sub Lite, returning the serialized\nbytes of SequencedMessage protos.\n\nArgs:\nsubscription_path: A Pub/Sub Lite Subscription path.\ndeduplicate: Whether to deduplicate messages based on the value of\nthe 'x-goog-pubsublite-dataflow-uuid' attribute.", "source": "github-repos"}
{"code": "def get_workunit(self, ignore_list=None):\n        \n        if ignore_list is None:\n            ignore_list = []\n\n        potential_files = self.get_potential_files(ignore_list)\n\n        while len(potential_files) > 0:\n            potential_file = self.select_potential_file(potential_files)\n            potential_files.remove(potential_file)\n            if self._filter(potential_file):\n                continue\n\n            if self.directory_context.get_file_size(potential_file) == 0:\n                continue\n\n            if self.progress_manager.is_done(potential_file):\n                self._done.append(potential_file)\n                continue\n            else:\n                try:\n                    self.progress_manager.lock(potential_file)\n                except FileLockedException:\n                    continue\n\n                self._already_fetched.append(potential_file)\n                return self.builder.build_workunit(\n                    self.directory_context.get_full_path(potential_file))\n\n        logger.info(\"No eligible workunits remain to be fetched.\")\n\n        raise NoAvailableWorkException()", "docstring": "Gets a new unit of work.\n\nArgs:\nignore_list: list(str)\nA list of filenames which should be ignored.  Defaults to None.\n\nReturns:\nnew_workunit: WorkUnit\nA new unit of work that has not yet been processed.  A lock on\nit has been acquired.\n\nRaises:\nNoAvailableWorkException\nThere is no more work available.", "source": "juraj-google-style"}
{"code": "def get_branch_length(self, age=None, pos=0):\n    if (age is None):\n        age = self.age\n    return (self.length * pow(self.branches[pos][0], age))", "docstring": "Get the length of a branch.\n\nThis method calculates the length of a branch in specific age.\nThe used formula: length * scale^age.\n\nArgs:\nage (int): The age, for which you want to know the branch length.\nReturns:\nfloat: The length of the branch", "source": "codesearchnet"}
{"code": "def get_shreds(self, feature_extractors, sheet_name):\n        \n        if self._shreds is None:\n            shreds = []\n            _, contours, _ = cv2.findContours(self._foreground_mask,\n                                           cv2.RETR_EXTERNAL,\n                                           cv2.CHAIN_APPROX_SIMPLE)\n            for i, contour in enumerate(contours):\n                shred = self._make_shred(contour, i, feature_extractors,\n                                         sheet_name)\n                if shred is not None:\n                    shreds.append(shred)\n            self._shreds = shreds\n        return self._shreds", "docstring": "Detects shreds in the current sheet and constructs Shred instances.\n\nCaches the results for further invocations.\n\nArgs:\nfeature_extractors: iterable of AbstractShredFeature instances to\nuse for shreds feature assignment.\nsheet_name: string, included in shred attributes.\n\nReturns:\nlist of Shred instances.", "source": "juraj-google-style"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of ids.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "class FlavaProcessor(ProcessorMixin):\n    attributes = ['image_processor', 'tokenizer']\n    image_processor_class = 'FlavaImageProcessor'\n    tokenizer_class = ('BertTokenizer', 'BertTokenizerFast')\n\n    def __init__(self, image_processor=None, tokenizer=None, **kwargs):\n        feature_extractor = None\n        if 'feature_extractor' in kwargs:\n            warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning)\n            feature_extractor = kwargs.pop('feature_extractor')\n        image_processor = image_processor if image_processor is not None else feature_extractor\n        if image_processor is None:\n            raise ValueError('You need to specify an `image_processor`.')\n        if tokenizer is None:\n            raise ValueError('You need to specify a `tokenizer`.')\n        super().__init__(image_processor, tokenizer)\n        self.current_processor = self.image_processor\n\n    def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=False, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_image_mask: Optional[bool]=None, return_codebook_pixels: Optional[bool]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs):\n        \n        if text is None and images is None:\n            raise ValueError('You have to specify either text or images. Both cannot be none.')\n        if text is not None:\n            encoding = self.tokenizer(text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs)\n        if images is not None:\n            image_features = self.image_processor(images, return_image_mask=return_image_mask, return_codebook_pixels=return_codebook_pixels, return_tensors=return_tensors, **kwargs)\n        if text is not None and images is not None:\n            encoding.update(image_features)\n            return encoding\n        elif text is not None:\n            return encoding\n        else:\n            return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)\n\n    def batch_decode(self, *args, **kwargs):\n        \n        return self.tokenizer.batch_decode(*args, **kwargs)\n\n    def decode(self, *args, **kwargs):\n        \n        return self.tokenizer.decode(*args, **kwargs)\n\n    @property\n    def model_input_names(self):\n        tokenizer_input_names = self.tokenizer.model_input_names\n        image_processor_input_names = self.image_processor.model_input_names\n        return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))\n\n    @property\n    def feature_extractor_class(self):\n        warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning)\n        return self.image_processor_class\n\n    @property\n    def feature_extractor(self):\n        warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning)\n        return self.image_processor", "docstring": "Constructs a FLAVA processor which wraps a FLAVA image processor and a FLAVA tokenizer into a single processor.\n\n[`FlavaProcessor`] offers all the functionalities of [`FlavaImageProcessor`] and [`BertTokenizerFast`]. See the\n[`~FlavaProcessor.__call__`] and [`~FlavaProcessor.decode`] for more information.\n\nArgs:\nimage_processor ([`FlavaImageProcessor`], *optional*): The image processor is a required input.\ntokenizer ([`BertTokenizerFast`], *optional*): The tokenizer is a required input.", "source": "github-repos"}
{"code": "def store_sample(self, input_bytes, filename, type_tag):\n        \n\n        \n        if type_tag == 'unknown':\n            print 'Info: Unknown File -- Trying to Determine Type...'\n            type_tag = self.guess_type_tag(input_bytes, filename)\n\n        \n        if type_tag == 'lz4':\n            input_bytes = lz4.loads(input_bytes)\n\n        \n        md5 = self.data_store.store_sample(input_bytes, filename, type_tag)\n\n        \n        if type_tag != 'lz4':\n            self.add_tags(md5, type_tag)\n\n        return md5", "docstring": "Store a sample into the DataStore.\nArgs:\ninput_bytes: the actual bytes of the sample e.g. f.read()\nfilename: name of the file (used purely as meta data not for lookup)\ntype_tag: ('exe','pcap','pdf','json','swf', or ...)\nReturns:\nthe md5 of the sample.", "source": "juraj-google-style"}
{"code": "def score_task(self, X, Y, t=0, metric=\"accuracy\", verbose=True, **kwargs):\n        \n        Y = self._to_numpy(Y)\n        Y_tp = self.predict_task(X, t=t, **kwargs)\n        probs = self.predict_proba(X)[t]\n        score = metric_score(\n            Y[t], Y_tp, metric, ignore_in_gold=[0], probs=probs, **kwargs\n        )\n        if verbose:\n            print(f\"[t={t}] {metric.capitalize()}: {score:.3f}\")\n        return score", "docstring": "Scores the predictive performance of the Classifier on task t\n\nArgs:\nX: The input for the predict_task method\nY: A [n] or [n, 1] np.ndarray or torch.Tensor of gold labels in\n{1,...,K_t}\nt: The task index to score\nmetric: The metric with which to score performance on this task\nReturns:\nThe (float) score of the Classifier for the specified task and\nmetric", "source": "juraj-google-style"}
{"code": "def _map_args(self, node: cfg.CFGNode, args: function.Args) -> tuple[list[tuple[str, _base.BaseValue]], dict[str, cfg.Variable]]:\n    formal_args: list[tuple[str, _base.BaseValue]] = [(p.name, self.signature.annotations[p.name]) for p in self.pytd_sig.params]\n    arg_dict: dict[str, cfg.Variable] = {}\n    for name, arg in zip(self.signature.param_names, args.posargs):\n        arg_dict[name] = arg\n    num_expected_posargs = len(self.signature.param_names)\n    if len(args.posargs) > num_expected_posargs and (not self.pytd_sig.starargs):\n        raise error_types.WrongArgCount(self.signature, args, self.ctx)\n    varargs_type = self.signature.annotations.get(self.signature.varargs_name)\n    if isinstance(varargs_type, _classes.ParameterizedClass):\n        for i, vararg in enumerate(args.posargs[num_expected_posargs:]):\n            name = function.argname(num_expected_posargs + i)\n            arg_dict[name] = vararg\n            formal_args.append((name, varargs_type.get_formal_type_parameter(abstract_utils.T)))\n    posonly_names = set(self.signature.posonly_params)\n    for name, arg in args.namedargs.items():\n        if name in posonly_names:\n            continue\n        elif name in arg_dict:\n            raise error_types.DuplicateKeyword(self.signature, args, self.ctx, name)\n        else:\n            arg_dict[name] = arg\n    kws = set(args.namedargs)\n    extra_kwargs = kws - {p.name for p in self.pytd_sig.params}\n    if extra_kwargs and (not self.pytd_sig.starstarargs):\n        if function.has_visible_namedarg(node, args, extra_kwargs):\n            raise error_types.WrongKeywordArgs(self.signature, args, self.ctx, extra_kwargs)\n    posonly_kwargs = kws & posonly_names\n    if posonly_kwargs and (not self.signature.kwargs_name):\n        raise error_types.WrongKeywordArgs(self.signature, args, self.ctx, posonly_kwargs)\n    kwargs_type = self.signature.annotations.get(self.signature.kwargs_name)\n    if isinstance(kwargs_type, _classes.ParameterizedClass):\n        for name in sorted(extra_kwargs):\n            formal_args.append((name, kwargs_type.get_formal_type_parameter(abstract_utils.V)))\n    packed_args = [('starargs', self.signature.varargs_name), ('starstarargs', self.signature.kwargs_name)]\n    for arg_type, name in packed_args:\n        actual = getattr(args, arg_type)\n        pytd_val = getattr(self.pytd_sig, arg_type)\n        if actual and pytd_val:\n            arg_dict[name] = actual\n            typ = self.ctx.convert.widen_type(self.signature.annotations[name])\n            formal_args.append((name, typ))\n    return (formal_args, arg_dict)", "docstring": "Map the passed arguments to a name->binding dictionary.\n\nArgs:\nnode: The current node.\nargs: The passed arguments.\n\nReturns:\nA tuple of:\na list of formal arguments, each a (name, abstract value) pair;\na name->variable dictionary of the passed arguments.\n\nRaises:\nInvalidParameters: If the passed arguments don't match this signature.", "source": "github-repos"}
{"code": "def attention_lm_small():\n    hparams = attention_lm_base()\n    hparams.num_hidden_layers = 4\n    hparams.hidden_size = 512\n    hparams.filter_size = 2048\n    hparams.layer_prepostprocess_dropout = 0.5\n    return hparams", "docstring": "Cheap model.\n\non lm1b_32k:\n45M params\n2 steps/sec on  [GeForce GTX TITAN X]\n\nReturns:\nan hparams object.", "source": "codesearchnet"}
{"code": "def HandleForwardedIps(self, interface, forwarded_ips, interface_ip=None):\n    \n    desired = self.ip_forwarding_utils.ParseForwardedIps(forwarded_ips)\n    configured = self.ip_forwarding_utils.GetForwardedIps(\n        interface, interface_ip)\n    to_add = sorted(set(desired) - set(configured))\n    to_remove = sorted(set(configured) - set(desired))\n    self._LogForwardedIpChanges(\n        configured, desired, to_add, to_remove, interface)\n    self._AddForwardedIps(to_add, interface)\n    self._RemoveForwardedIps(to_remove, interface)", "docstring": "Handle changes to the forwarded IPs on a network interface.\n\nArgs:\ninterface: string, the output device to configure.\nforwarded_ips: list, the forwarded IP address strings desired.\ninterface_ip: string, current interface ip address.", "source": "juraj-google-style"}
{"code": "def ready(self, cluster):\n    ready_nodes = set()\n    next_ready_check = 9999999.99\n    unknown_leaders_exist = False\n    now = time.time()\n    exhausted = bool((self._free.queued() > 0))\n    partitions = list(self._batches.keys())\n    for tp in partitions:\n        leader = cluster.leader_for_partition(tp)\n        if ((leader is None) or (leader == (- 1))):\n            unknown_leaders_exist = True\n            continue\n        elif (leader in ready_nodes):\n            continue\n        elif (tp in self.muted):\n            continue\n        with self._tp_locks[tp]:\n            dq = self._batches[tp]\n            if (not dq):\n                continue\n            batch = dq[0]\n            retry_backoff = (self.config['retry_backoff_ms'] / 1000.0)\n            linger = (self.config['linger_ms'] / 1000.0)\n            backing_off = bool(((batch.attempts > 0) and ((batch.last_attempt + retry_backoff) > now)))\n            waited_time = (now - batch.last_attempt)\n            time_to_wait = (retry_backoff if backing_off else linger)\n            time_left = max((time_to_wait - waited_time), 0)\n            full = bool(((len(dq) > 1) or batch.records.is_full()))\n            expired = bool((waited_time >= time_to_wait))\n            sendable = (full or expired or exhausted or self._closed or self._flush_in_progress())\n            if (sendable and (not backing_off)):\n                ready_nodes.add(leader)\n            else:\n                next_ready_check = min(time_left, next_ready_check)\n    return (ready_nodes, next_ready_check, unknown_leaders_exist)", "docstring": "Get a list of nodes whose partitions are ready to be sent, and the\nearliest time at which any non-sendable partition will be ready;\nAlso return the flag for whether there are any unknown leaders for the\naccumulated partition batches.\n\nA destination node is ready to send if:\n\n* There is at least one partition that is not backing off its send\n* and those partitions are not muted (to prevent reordering if\nmax_in_flight_requests_per_connection is set to 1)\n* and any of the following are true:\n\n* The record set is full\n* The record set has sat in the accumulator for at least linger_ms\nmilliseconds\n* The accumulator is out of memory and threads are blocking waiting\nfor data (in this case all partitions are immediately considered\nready).\n* The accumulator has been closed\n\nArguments:\ncluster (ClusterMetadata):\n\nReturns:\ntuple:\nready_nodes (set): node_ids that have ready batches\nnext_ready_check (float): secs until next ready after backoff\nunknown_leaders_exist (bool): True if metadata refresh needed", "source": "codesearchnet"}
{"code": "def get(self, public_key, spent=None, headers=None):\n    return self.transport.forward_request(method='GET', path=self.path, params={'public_key': public_key, 'spent': spent}, headers=headers)", "docstring": "Get transaction outputs by public key. The public_key parameter\nmust be a base58 encoded ed25519 public key associated with\ntransaction output ownership.\n\nArgs:\npublic_key (str): Public key for which unfulfilled\nconditions are sought.\nspent (bool): Indicate if the result set should include only spent\nor only unspent outputs. If not specified (``None``) the\nresult includes all the outputs (both spent and unspent)\nassociated with the public key.\nheaders (dict): Optional headers to pass to the request.\n\nReturns:\n:obj:`list` of :obj:`str`: List of unfulfilled conditions.\n\nExample:\nGiven a transaction with `id` ``da1b64a907ba54`` having an\n`ed25519` condition (at index ``0``) with alice's public\nkey::\n\n>>> bdb = BigchainDB()\n>>> bdb.outputs.get(alice_pubkey)\n... ['../transactions/da1b64a907ba54/conditions/0']", "source": "codesearchnet"}
{"code": "def _get_block_sizes(resnet_size):\n    choices = {18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], 200: [3, 24, 36, 3]}\n    try:\n        return choices[resnet_size]\n    except KeyError:\n        err = 'Could not find layers for selected Resnet size.\\nSize received: {}; sizes allowed: {}.'.format(resnet_size, choices.keys())\n        raise ValueError(err)", "docstring": "Retrieve the size of each block_layer in the ResNet model.\n\nThe number of block layers used for the Resnet model varies according\nto the size of the model. This helper grabs the layer set we want, throwing\nan error if a non-standard size has been selected.\n\nArgs:\nresnet_size: The number of convolutional layers needed in the model.\n\nReturns:\nA list of block sizes to use in building the model.\n\nRaises:\nKeyError: if invalid resnet_size is received.", "source": "codesearchnet"}
{"code": "def create_new(cls, mapreduce_id, shard_number):\n    shard_id = cls.shard_id_from_number(mapreduce_id, shard_number)\n    state = cls(key_name=shard_id, mapreduce_id=mapreduce_id)\n    return state", "docstring": "Create new shard state.\n\nArgs:\nmapreduce_id: unique mapreduce id as string.\nshard_number: shard number for which to create shard state.\n\nReturns:\nnew instance of ShardState ready to put into datastore.", "source": "codesearchnet"}
{"code": "def ignore_path(path):\n        \n        ignore = False\n        for name in ['.tox', 'dist', 'build', 'node_modules', 'htmlcov']:\n            if path.find(name) >= 0:\n                ignore = True\n                break\n        return ignore", "docstring": "Verify whether to ignore a path.\n\nArgs:\npath (str): path to check.\n\nReturns:\nbool: True when to ignore given path.", "source": "juraj-google-style"}
{"code": "def all_near_zero(a: Union[float, complex, Iterable[float], np.ndarray],\n                  *,\n                  atol: float = 1e-8) -> bool:\n    \n    return np.all(np.less_equal(np.abs(a), atol))", "docstring": "Checks if the tensor's elements are all near zero.\n\nArgs:\na: Tensor of elements that could all be near zero.\natol: Absolute tolerance.", "source": "juraj-google-style"}
{"code": "def connect_all(state):\n    \n\n    hosts = [\n        host for host in state.inventory\n        if state.is_host_in_limit(host)\n    ]\n\n    greenlet_to_host = {\n        state.pool.spawn(host.connect, state): host\n        for host in hosts\n    }\n\n    with progress_spinner(greenlet_to_host.values()) as progress:\n        for greenlet in gevent.iwait(greenlet_to_host.keys()):\n            host = greenlet_to_host[greenlet]\n            progress(host)\n\n    \n    failed_hosts = set()\n\n    for greenlet, host in six.iteritems(greenlet_to_host):\n        \n        greenlet.get()\n\n        if host.connection:\n            state.activate_host(host)\n        else:\n            failed_hosts.add(host)\n\n    \n    state.fail_hosts(failed_hosts, activated_count=len(hosts))", "docstring": "Connect to all the configured servers in parallel. Reads/writes state.inventory.\n\nArgs:\nstate (``pyinfra.api.State`` obj): the state containing an inventory to connect to", "source": "juraj-google-style"}
{"code": "def load_audio(audio: Union[str, np.ndarray], sampling_rate=16000, timeout=None) -> np.ndarray:\n    requires_backends(load_audio, ['librosa'])\n    if isinstance(audio, str):\n        if audio.startswith('http:\n            audio = librosa.load(BytesIO(requests.get(audio, timeout=timeout).content), sr=sampling_rate)[0]\n        elif os.path.isfile(audio):\n            audio = librosa.load(audio, sr=sampling_rate)[0]\n    elif isinstance(audio, np.ndarray):\n        audio = audio\n    else:\n        raise TypeError('Incorrect format used for `audio`. Should be an url linking to an audio, a local path, or numpy array.')\n    return audio", "docstring": "Loads `audio` to an np.ndarray object.\n\nArgs:\naudio (`str` or `np.ndarray`):\nThe audio to be loaded to the numpy array format.\nsampling_rate (`int`, *optional*, defaults to 16000):\nThe sampling rate to be used when loading the audio. It should be same as the\nsampling rate the model you will be using further was trained with.\ntimeout (`float`, *optional*):\nThe timeout value in seconds for the URL request.\n\nReturns:\n`np.ndarray`: A numpy array representing the audio.", "source": "github-repos"}
{"code": "def spherical_to_cartesian(r,theta,phi):\n    \n    x = r * np.sin(phi) * np.cos(theta)\n    y = r * np.sin(phi) * np.sin(theta)\n    z = r * np.cos(phi)\n    return (x,y,z)", "docstring": "Simple conversion of spherical to cartesian coordinates\n\nArgs:\nr,theta,phi = scalar spherical coordinates\n\nReturns:\nx,y,z = scalar cartesian coordinates", "source": "juraj-google-style"}
{"code": "def update_network_asset(self, asset_id, name, asset_type):\n        \n        self.update_asset('NETWORK', asset_id, name, asset_type)", "docstring": "Updates a Network Asset\nArgs:\nname: The name provided to the network asset\nasset_type: The type provided to the network asset\nasset_id:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def set_seat_logical_name(self, seat):\n    rc = self._libinput.libinput_device_set_seat_logical_name(self._handle, seat.encode())\n    assert (rc == 0), 'Cannot assign device to {}'.format(seat)", "docstring": "Change the logical seat associated with this device by removing\nthe device and adding it to the new seat.\n\nThis command is identical to physically unplugging the device, then\nre-plugging it as a member of the new seat. libinput will generate\na :attr:`~libinput.constant.EventType.DEVICE_REMOVED` event and this\n:class:`Device` is considered removed from the context; it will not\ngenerate further events.\nA :attr:`~libinput.constant.EventType.DEVICE_ADDED` event is\ngenerated with a new :class:`Device`. It is the caller's\nresponsibility to update references to the new device accordingly.\n\nIf the logical seat name already exists in the device's physical seat,\nthe device is added to this seat. Otherwise, a new seat is created.\n\nNote:\nThis change applies to this device until removal or\n:meth:`~libinput.LibInput.suspend`, whichever happens earlier.\nArgs:\nseat (str): The new logical seat name.\nRaises:\nAssertionError", "source": "codesearchnet"}
{"code": "def get_unique_families(hkls):\n    \n    \n    def is_perm(hkl1, hkl2):\n        h1 = np.abs(hkl1)\n        h2 = np.abs(hkl2)\n        return all([i == j for i, j in zip(sorted(h1), sorted(h2))])\n\n    unique = collections.defaultdict(list)\n    for hkl1 in hkls:\n        found = False\n        for hkl2 in unique.keys():\n            if is_perm(hkl1, hkl2):\n                found = True\n                unique[hkl2].append(hkl1)\n                break\n        if not found:\n            unique[hkl1].append(hkl1)\n\n    pretty_unique = {}\n    for k, v in unique.items():\n        pretty_unique[sorted(v)[-1]] = len(v)\n\n    return pretty_unique", "docstring": "Returns unique families of Miller indices. Families must be permutations\nof each other.\n\nArgs:\nhkls ([h, k, l]): List of Miller indices.\n\nReturns:\n{hkl: multiplicity}: A dict with unique hkl and multiplicity.", "source": "juraj-google-style"}
{"code": "def __init__(self, start_at):\n        \n        super().__init__()\n\n        self._timeout = start_at\n        self._timeout_triggered = False", "docstring": "Creates a timeout behaviour, which is run at start_at\n\nArgs:\nstart_at (datetime.datetime): when to start the behaviour", "source": "juraj-google-style"}
{"code": "def fram_wave(waveform: np.array, hop_length: int=160, fft_window_size: int=400, center: bool=True):\n    warnings.warn('The function `fram_wave` is deprecated and will be removed in version 4.31.0 of Transformers', FutureWarning)\n    frames = []\n    for i in range(0, waveform.shape[0] + 1, hop_length):\n        if center:\n            half_window = (fft_window_size - 1) \n            start = i - half_window if i > half_window else 0\n            end = i + half_window if i < waveform.shape[0] - half_window else waveform.shape[0]\n            frame = waveform[start:end]\n            if start == 0:\n                padd_width = (-i + half_window, 0)\n                frame = np.pad(frame, pad_width=padd_width, mode='reflect')\n            elif end == waveform.shape[0]:\n                padd_width = (0, i - waveform.shape[0] + half_window)\n                frame = np.pad(frame, pad_width=padd_width, mode='reflect')\n        else:\n            frame = waveform[i:i + fft_window_size]\n            frame_width = frame.shape[0]\n            if frame_width < waveform.shape[0]:\n                frame = np.lib.pad(frame, pad_width=(0, fft_window_size - frame_width), mode='constant', constant_values=0)\n        frames.append(frame)\n    frames = np.stack(frames, 0)\n    return frames", "docstring": "In order to compute the short time fourier transform, the waveform needs to be split in overlapping windowed\nsegments called `frames`.\n\nThe window length (window_length) defines how much of the signal is contained in each frame, while the hop length\ndefines the step between the beginning of each new frame.\n\n\nArgs:\nwaveform (`np.array` of shape `(sample_length,)`):\nThe raw waveform which will be split into smaller chunks.\nhop_length (`int`, *optional*, defaults to 160):\nStep between each window of the waveform.\nfft_window_size (`int`, *optional*, defaults to 400):\nDefines the size of the window.\ncenter (`bool`, defaults to `True`):\nWhether or not to center each frame around the middle of the frame. Centering is done by reflecting the\nwaveform on the left and on the right.\n\nReturn:\nframed_waveform (`np.array` of shape `(waveform.shape // hop_length , fft_window_size)`):\nThe framed waveforms that can be fed to `np.fft`.", "source": "github-repos"}
{"code": "def get_appliance(self, id_or_uri, fields=''):\n        \n        uri = self.URI + '/image-streamer-appliances/' + extract_id_from_uri(id_or_uri)\n        if fields:\n            uri += '?fields=' + fields\n\n        return self._client.get(uri)", "docstring": "Gets the particular Image Streamer resource based on its ID or URI.\n\nArgs:\nid_or_uri:\nCan be either the Os Deployment Server ID or the URI\nfields:\nSpecifies which fields should be returned in the result.\n\nReturns:\ndict: Image Streamer resource.", "source": "juraj-google-style"}
{"code": "def processor_groups(mesh_shape, group_dims):\n    group_numbers = [pnum_to_group(mesh_shape, group_dims, pnum) for pnum in xrange(mesh_shape.size)]\n    ret = []\n    for (pnum, g) in enumerate(group_numbers):\n        while (len(ret) <= g):\n            ret.append([])\n        ret[g].append(pnum)\n    return ret", "docstring": "Groups of processors which differ only in the given dimensions.\n\nArgs:\nmesh_shape: a Shape\ngroup_dims: a list of integers\n\nReturns:\na list of lists of integers (processor numbers)", "source": "codesearchnet"}
{"code": "def get_unique_variable(name):\n    candidates = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, name)\n    if (not candidates):\n        raise ValueError(('Couldnt find variable %s' % name))\n    for candidate in candidates:\n        if (candidate.op.name == name):\n            return candidate\n    raise ValueError('Variable %s does not uniquely identify a variable', name)", "docstring": "Gets the variable uniquely identified by that name.\n\nArgs:\nname: a name that uniquely identifies the variable.\n\nReturns:\na tensorflow variable.\n\nRaises:\nValueError: if no variable uniquely identified by the name exists.", "source": "codesearchnet"}
{"code": "def export_artifacts(self, processed_artifacts, sketch_id):\n    \n\n    \n    for timeline_name, artifact_path in processed_artifacts:\n      print('Uploading {0:s} to timeline {1:s}'.format(\n          artifact_path, timeline_name))\n      new_timeline_id = self.upload_timeline(timeline_name, artifact_path)\n      self.add_timeline_to_sketch(sketch_id, new_timeline_id)\n\n    return sketch_id", "docstring": "Upload provided artifacts to specified, or new if non-existent, sketch.\n\nArgs:\nprocessed_artifacts:  List of (timeline_name, artifact_path) tuples\nsketch_id: ID of sketch to append the timeline to\n\nReturns:\nint: ID of sketch.", "source": "juraj-google-style"}
{"code": "def set_match_statements(self, name, action, seqno, statements):\n    try:\n        current_statements = self.get(name)[action][seqno]['match']\n    except:\n        current_statements = []\n    commands = list()\n    for entry in set(current_statements).difference(statements):\n        commands.append(('route-map %s %s %s' % (name, action, seqno)))\n        commands.append(('no match %s' % entry))\n    for entry in set(statements).difference(current_statements):\n        commands.append(('route-map %s %s %s' % (name, action, seqno)))\n        commands.append(('match %s' % entry))\n    return (self.configure(commands) if commands else True)", "docstring": "Configures the match statements within the routemap clause.\nThe final configuration of match statements will reflect the list\nof statements passed into the statements attribute. This implies\nmatch statements found in the routemap that are not specified in the\nstatements attribute will be removed.\n\nArgs:\nname (string): The full name of the routemap.\naction (string): The action to take for this routemap clause.\nseqno (integer): The sequence number for the routemap clause.\nstatements (list): A list of the match-related statements. Note\nthat the statements should omit the leading\nmatch.\n\nReturns:\nTrue if the operation succeeds otherwise False", "source": "codesearchnet"}
{"code": "def idle(self, stop_signals: tuple = (SIGINT, SIGTERM, SIGABRT)):\n        \n\n        def signal_handler(*args):\n            self.is_idle = False\n\n        for s in stop_signals:\n            signal(s, signal_handler)\n\n        self.is_idle = True\n\n        while self.is_idle:\n            time.sleep(1)\n\n        self.stop()", "docstring": "Blocks the program execution until one of the signals are received,\nthen gently stop the Client by closing the underlying connection.\n\nArgs:\nstop_signals (``tuple``, *optional*):\nIterable containing signals the signal handler will listen to.\nDefaults to (SIGINT, SIGTERM, SIGABRT).", "source": "juraj-google-style"}
{"code": "def _publish_scan_response(self, client):\n        \n\n        devices = self._manager.scanned_devices\n\n        converted_devs = []\n        for uuid, info in devices.items():\n            slug = self._build_device_slug(uuid)\n\n            message = {}\n            message['uuid'] = uuid\n            if uuid in self._connections:\n                message['user_connected'] = True\n            elif 'user_connected' in info:\n                message['user_connected'] = info['user_connected']\n            else:\n                message['user_connected'] = False\n\n            message['connection_string'] = slug\n            message['signal_strength'] = info['signal_strength']\n\n            converted_devs.append({x: y for x, y in message.items()})\n            message['type'] = 'notification'\n            message['operation'] = 'advertisement'\n\n            self.client.publish(self.topics.gateway_topic(slug, 'data/advertisement'), message)\n\n        probe_message = {}\n        probe_message['type'] = 'response'\n        probe_message['client'] = client\n        probe_message['success'] = True\n        probe_message['devices'] = converted_devs\n\n        self.client.publish(self.topics.status, probe_message)", "docstring": "Publish a scan response message\n\nThe message contains all of the devices that are currently known\nto this agent.  Connection strings for direct connections are\ntranslated to what is appropriate for this agent.\n\nArgs:\nclient (string): A unique id for the client that made this request", "source": "juraj-google-style"}
{"code": "def get_saved_model_tag_sets(saved_model_dir):\n    saved_model = read_saved_model(saved_model_dir)\n    all_tags = []\n    for meta_graph_def in saved_model.meta_graphs:\n        all_tags.append(list(meta_graph_def.meta_info_def.tags))\n    return all_tags", "docstring": "Retrieves all the tag-sets available in the SavedModel.\n\nArgs:\nsaved_model_dir: Directory containing the SavedModel.\n\nReturns:\nList of all tag-sets in the SavedModel, where a tag-set is represented as a\nlist of strings.", "source": "github-repos"}
{"code": "def render_wrapper(self, region='us-east-1'):\n        \n        base = self.settings['pipeline']['base']\n\n        if self.base:\n            base = self.base\n\n        email = self.settings['pipeline']['notifications']['email']\n        slack = self.settings['pipeline']['notifications']['slack']\n        baking_process = self.settings['pipeline']['image']['builder']\n        provider = 'aws'\n        root_volume_size = self.settings['pipeline']['image']['root_volume_size']\n        bake_instance_type = self.settings['pipeline']['image']['bake_instance_type']\n\n        ami_id = ami_lookup(name=base, region=region)\n\n        ami_template_file = generate_packer_filename(provider, region, baking_process)\n\n        pipeline_id = self.compare_with_existing(region=region)\n\n        data = {\n            'app': {\n                'ami_id': ami_id,\n                'appname': self.app_name,\n                'group_name': self.group_name,\n                'repo_name': self.repo_name,\n                'base': base,\n                'environment': 'packaging',\n                'region': region,\n                'triggerjob': self.trigger_job,\n                'run_as_user': DEFAULT_RUN_AS_USER,\n                'email': email,\n                'slack': slack,\n                'root_volume_size': root_volume_size,\n                'bake_instance_type': bake_instance_type,\n                'ami_template_file': ami_template_file,\n                'pipeline': self.settings['pipeline']\n            },\n            'id': pipeline_id\n        }\n\n        self.log.debug('Wrapper app data:\\n%s', pformat(data))\n\n        wrapper = get_template(template_file='pipeline/pipeline_wrapper.json.j2', data=data, formats=self.generated)\n\n        return json.loads(wrapper)", "docstring": "Generate the base Pipeline wrapper.\n\nThis renders the non-repeatable stages in a pipeline, like jenkins, baking, tagging and notifications.\n\nArgs:\nregion (str): AWS Region.\n\nReturns:\ndict: Rendered Pipeline wrapper.", "source": "juraj-google-style"}
{"code": "def GetDataStream(self, name, case_sensitive=True):\n    \n    if not isinstance(name, py2to3.STRING_TYPES):\n      raise ValueError('Name is not a string.')\n\n    name_lower = name.lower()\n    matching_data_stream = None\n\n    for data_stream in self._GetDataStreams():\n      if data_stream.name == name:\n        return data_stream\n\n      if not case_sensitive and data_stream.name.lower() == name_lower:\n        if not matching_data_stream:\n          matching_data_stream = data_stream\n\n    return matching_data_stream", "docstring": "Retrieves a data stream by name.\n\nArgs:\nname (str): name of the data stream.\ncase_sensitive (Optional[bool]): True if the name is case sensitive.\n\nReturns:\nDataStream: a data stream or None if not available.\n\nRaises:\nValueError: if the name is not string.", "source": "juraj-google-style"}
{"code": "def embedding_lookup(self, features: Any, weights: Optional[Any]=None) -> Any:\n    if not self._built:\n        self.build()\n    nest.assert_same_structure(features, self._feature_config)\n    flat_inputs = nest.flatten(features)\n    flat_weights = [None] * len(flat_inputs)\n    if weights is not None:\n        nest.assert_same_structure(features, weights)\n        flat_weights = nest.flatten(weights)\n    flat_features = nest.flatten_with_joined_string_paths(self._feature_config)\n    outputs = []\n    for inp, weight, (path, feature) in zip(flat_inputs, flat_weights, flat_features):\n        table = self.embedding_tables[feature.table]\n        if weight is not None:\n            if isinstance(inp, tensor.Tensor):\n                raise ValueError('Weight specified for {}, but input is dense.'.format(path))\n            elif type(weight) is not type(inp):\n                raise ValueError('Weight for {} is of type {} but it does not match type of the input which is {}.'.format(path, type(weight), type(inp)))\n            elif feature.max_sequence_length > 0:\n                raise ValueError('Weight specified for {}, but this is a sequence feature.'.format(path))\n        if isinstance(inp, tensor.Tensor):\n            if feature.max_sequence_length > 0:\n                raise ValueError('Feature {} is a sequence feature but a dense tensor was passed.'.format(path))\n            outputs.append(embedding_ops.embedding_lookup_v2(table, inp))\n        elif isinstance(inp, sparse_tensor.SparseTensor):\n            outputs.append(self._embedding_lookup_for_sparse_tensor(inp, weight, table, feature))\n        elif isinstance(inp, ragged_tensor.RaggedTensor):\n            outputs.append(self._embedding_lookup_for_ragged_tensor(inp, weight, table, feature))\n        else:\n            raise ValueError('Input {} is type {}. Tensor, SparseTensor or RaggedTensor expected.'.format(path, type(inp)))\n    return nest.pack_sequence_as(self._feature_config, outputs)", "docstring": "Apply embedding lookup on TPUs using Tensorcore.\n\nNote that all the sparse and ragged tensors will be converted to dense\ntensors on CPU and then passed to the TPU to do embedding look up. Large\nembedding lookup is not supported by this API, use the TPUEmbedding mid\nlevel api instead.\n\nArgs:\nfeatures: a nested structure of Tensors, SparseTensors or RaggedTensors.\nweights: a nested structure of Tensors, SparseTensors or RaggedTensors or\nNone for no weights. If not None, structure must match that of inputs,\nbut entries are allowed to be None.\n\nReturns:\nA nested structure of Tensors with the same structure as inputs.", "source": "github-repos"}
{"code": "def update(self, domain, type_name, search_command, body):\n    return self._request(domain, type_name, search_command, 'PUT', body)", "docstring": "Update entry in ThreatConnect Data Store\n\nArgs:\ndomain (string): One of 'local', 'organization', or 'system'.\ntype_name (string): This is a free form index type name. The ThreatConnect API will use\nthis resource verbatim.\nsearch_command (string): Search command to pass to ES.\nbody (str): JSON body", "source": "codesearchnet"}
{"code": "def get_token(self, text, start=0):\n        \n        best_class = best_match = None\n\n        for token_class, match in self.matching_tokens(text):\n            if best_match and best_match.end() >= match.end():\n                continue\n            best_match = match\n            best_class = token_class\n\n        return best_class, best_match", "docstring": "Retrieve the next token from some text.\n\nArgs:\ntext (str): the text from which tokens should be extracted\n\nReturns:\n(token_kind, token_text): the token kind and its content.", "source": "juraj-google-style"}
{"code": "def index_2d(seqs: List[List[Any]], target: Any) -> Tuple[(int, int)]:\n    for i in range(len(seqs)):\n        for j in range(len(seqs[i])):\n            if (seqs[i][j] == target):\n                return (i, j)\n    raise ValueError('Item not present.')", "docstring": "Finds the first index of a target item within a list of lists.\n\nArgs:\nseqs: The list of lists to search.\ntarget: The item to find.\n\nRaises:\nValueError: Item is not present.", "source": "codesearchnet"}
{"code": "def _ParseCommentRecord(self, structure):\n    comment = structure[1]\n    if comment.startswith('Version'):\n        (_, _, self._version) = comment.partition(':')\n    elif comment.startswith('Software'):\n        (_, _, self._software) = comment.partition(':')\n    elif comment.startswith('Time'):\n        (_, _, time_format) = comment.partition(':')\n        if ('local' in time_format.lower()):\n            self._use_local_timezone = True", "docstring": "Parse a comment and store appropriate attributes.\n\nArgs:\nstructure (pyparsing.ParseResults): parsed log line.", "source": "codesearchnet"}
{"code": "def mset(self, values):\n    for (key, value) in values.items():\n        self.set(key, value)", "docstring": "Set the value of several keys at once.\n\nArgs:\nvalues (dict): maps a key to its value.", "source": "codesearchnet"}
{"code": "def stream_sample(self, md5, kwargs=None):\n        \n\n        \n        max_rows = kwargs.get('max_rows', None) if kwargs else None\n\n        \n        sample = self.get_sample(md5)['sample']\n        raw_bytes = sample['raw_bytes']\n\n        \n        type_tag = sample['type_tag']\n        if type_tag == 'bro':\n            bro_log = bro_log_reader.BroLogReader(convert_datetimes=False)\n            mem_file = StringIO(raw_bytes)\n            generator = bro_log.read_log(mem_file)\n            return generator\n        elif type_tag == 'els_query':\n            els_log = json.loads(raw_bytes)\n            \n            if 'fields' in els_log['hits']['hits'][0]:\n                generator = (row['fields'] for row in els_log['hits']['hits'][:max_rows])\n            else:\n                generator = (row['_source'] for row in els_log['hits']['hits'][:max_rows])\n            return generator\n        elif type_tag == 'log':\n            generator = ({'row':row} for row in raw_bytes.split('\\n')[:max_rows])\n            return generator\n        elif type_tag == 'json':\n            generator = (row for row in json.loads(raw_bytes)[:max_rows])\n            return generator\n        else:\n            raise RuntimeError('Cannot stream file %s with type_tag:%s' % (md5, type_tag))", "docstring": "Stream the sample by giving back a generator, typically used on 'logs'.\nArgs:\nmd5: the md5 of the sample\nkwargs: a way of specifying subsets of samples (None for all)\nmax_rows: the maximum number of rows to return\nReturns:\nA generator that yields rows of the file/log", "source": "juraj-google-style"}
{"code": "def index_library_datasets(self, tick_f=None):\n        \n\n        dataset_n = 0\n        partition_n = 0\n\n        def tick(d, p):\n            if tick_f:\n                tick_f('datasets: {} partitions: {}'.format(d, p))\n\n        for dataset in self.library.datasets:\n\n            if self.backend.dataset_index.index_one(dataset):\n                \n                dataset_n += 1\n                tick(dataset_n, partition_n)\n                for partition in dataset.partitions:\n                    self.backend.partition_index.index_one(partition)\n                    partition_n += 1\n                    tick(dataset_n, partition_n)\n            else:\n                \n                pass", "docstring": "Indexes all datasets of the library.\n\nArgs:\ntick_f (callable, optional): callable of one argument. Gets string with index state.", "source": "juraj-google-style"}
{"code": "def fswap(p, q):\n    (yield (cirq.ISWAP(q, p), (cirq.Z(p) ** 1.5)))\n    (yield (cirq.Z(q) ** 1.5))", "docstring": "Decompose the Fermionic SWAP gate into two single-qubit gates and\none iSWAP gate.\n\nArgs:\np: the id of the first qubit\nq: the id of the second qubit", "source": "codesearchnet"}
{"code": "def inner_text(node):\n    \n\n    from lxml import etree\n\n    \n    parts = [node.text]\n\n    for child in node.getchildren():\n        \n        parts.append(etree.tostring(child, encoding=\"utf-8\", method=\"text\"))\n\n        \n        parts.append(child.tail)\n\n    \n    return \"\".join(map(decode_bytes, filter(None, parts)))", "docstring": "Returns the inner text of a given XML node, excluding tags.\n\nArgs:\nnode: (lxml.etree.Element): The node whose inner text is desired.\n\nReturns:\nstr: The inner text of the node.", "source": "juraj-google-style"}
{"code": "def _parse_domain_id(self, config):\n    match = re.search('domain-id (.+)$', config)\n    value = (match.group(1) if match else None)\n    return dict(domain_id=value)", "docstring": "Scans the config block and parses the domain-id value\n\nArgs:\nconfig (str): The config block to scan\n\nReturns:\ndict: A dict object that is intended to be merged into the\nresource dict", "source": "codesearchnet"}
{"code": "def get(self, key):\n    ''\n    value = self.child_datastore.get(key)\n    return self.deserializedValue(value)", "docstring": "Return the object named by key or None if it does not exist.\nRetrieves the value from the ``child_datastore``, and de-serializes\nit on the way out.\n\nArgs:\nkey: Key naming the object to retrieve\n\nReturns:\nobject or None", "source": "codesearchnet"}
{"code": "def datasets_update(self, dataset_name, dataset_info):\n    \n    url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)\n    return datalab.utils.Http.request(url, method='PUT', data=dataset_info,\n                                      credentials=self._credentials)", "docstring": "Updates the Dataset info.\n\nArgs:\ndataset_name: the name of the dataset to update as a tuple of components.\ndataset_info: the Dataset resource with updated fields.", "source": "juraj-google-style"}
{"code": "class AltCLIPEncoder(nn.Module):\n\n    def __init__(self, config: AltCLIPConfig):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([AltCLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for idx, encoder_layer in enumerate(self.layers):\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions)\n            else:\n                layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`AltCLIPEncoderLayer`].\n\nArgs:\nconfig: AltCLIPConfig", "source": "github-repos"}
{"code": "def _verify_watches(self, watch_opts, expected_output_slot, expected_debug_ops, expected_debug_urls):\n    node_names = []\n    for watch in watch_opts:\n        node_names.append(watch.node_name)\n        if watch.node_name == '*':\n            self.assertEqual(-1, watch.output_slot)\n            self.assertEqual(expected_debug_ops, watch.debug_ops)\n            self.assertEqual(expected_debug_urls, watch.debug_urls)\n        else:\n            self.assertEqual(expected_output_slot, watch.output_slot)\n            self.assertEqual(expected_debug_ops, watch.debug_ops)\n            self.assertEqual(expected_debug_urls, watch.debug_urls)\n    return node_names", "docstring": "Verify a list of debug tensor watches.\n\nThis requires all watches in the watch list have exactly the same\noutput_slot, debug_ops and debug_urls.\n\nArgs:\nwatch_opts: Repeated protobuf field of DebugTensorWatch.\nexpected_output_slot: Expected output slot index, as an integer.\nexpected_debug_ops: Expected debug ops, as a list of strings.\nexpected_debug_urls: Expected debug URLs, as a list of strings.\n\nReturns:\nList of node names from the list of debug tensor watches.", "source": "github-repos"}
{"code": "def create(self, vectors):\n    if (type(vectors) is dict):\n        vectors = [vectors]\n    for vector in vectors:\n        if (not ('properties' in list(vector.keys()))):\n            raise Exception('Vector does not contain \"properties\" field.')\n        if (not ('item_type' in list(vector['properties'].keys()))):\n            raise Exception('Vector does not contain \"item_type\".')\n        if (not ('ingest_source' in list(vector['properties'].keys()))):\n            raise Exception('Vector does not contain \"ingest_source\".')\n    r = self.gbdx_connection.post(self.create_url, data=json.dumps(vectors))\n    r.raise_for_status()\n    return r.json()", "docstring": "Create a vectors in the vector service.\n\nArgs:\nvectors: A single geojson vector or a list of geojson vectors. Item_type and ingest_source are required.\n\nReturns:\n(list): IDs of the vectors created\n\nExample:\n>>> vectors.create(\n...     {\n...         \"type\": \"Feature\",\n...         \"geometry\": {\n...             \"type\": \"Point\",\n...             \"coordinates\": [1.0,1.0]\n...         },\n...         \"properties\": {\n...             \"text\" : \"item text\",\n...             \"name\" : \"item name\",\n...             \"item_type\" : \"type\",\n...             \"ingest_source\" : \"source\",\n...             \"attributes\" : {\n...                 \"latitude\" : 1,\n...                 \"institute_founded\" : \"2015-07-17\",\n...                 \"mascot\" : \"moth\"\n...             }\n...         }\n...     }\n... )", "source": "codesearchnet"}
{"code": "def add_permissions(self, grp_name, resource, permissions):\n    self.service.add_permissions(grp_name, resource, permissions, self.url_prefix, self.auth, self.session, self.session_send_opts)", "docstring": "Add additional permissions for the group associated with the given resource.\n\nArgs:\ngrp_name (string): Name of group.\nresource (intern.resource.boss.BossResource): Identifies which data model object to operate on.\npermissions (list): List of permissions to add to the given resource.\n\nRaises:\nrequests.HTTPError on failure.", "source": "codesearchnet"}
{"code": "def GetHTTPHeaders(self):\n    http_headers = self._adwords_client.oauth2_client.CreateHttpHeader()\n    if self.enable_compression:\n        http_headers['accept-encoding'] = 'gzip'\n    http_headers.update(self.custom_http_headers)\n    return http_headers", "docstring": "Returns the HTTP headers required for request authorization.\n\nReturns:\nA dictionary containing the required headers.", "source": "codesearchnet"}
{"code": "def to_json_str(value: Any, *, json_indent=None, **kwargs) -> str:\n\n    def _encode_int_keys(v):\n        if isinstance(v, dict):\n            return {f'n_:{k}' if isinstance(k, int) else k: _encode_int_keys(v) for k, v in v.items()}\n        elif isinstance(v, list):\n            return [_encode_int_keys(v) for v in v]\n        return v\n    return json.dumps(_encode_int_keys(to_json(value, **kwargs)), indent=json_indent)", "docstring": "Serializes a (maybe) symbolic value into a JSON string.\n\nExample::\n\n@pg.members([\n('x', pg.typing.Any())\n])\nclass A(pg.Object):\npass\n\na1 = A(1)\njson_str = a1.to_json_str()\na2 = pg.from_json_str(json_str)\nassert pg.eq(a1, a2)\n\nArgs:\nvalue: Value to serialize.\njson_indent: The size of indentation for JSON format.\n**kwargs: Additional keyword arguments that are passed to ``pg.to_json``.\n\nReturns:\nA JSON string.", "source": "github-repos"}
{"code": "def cross_section(verts, tris, plane_orig, plane_normal, **kwargs):\n    \n    mesh = TriangleMesh(verts, tris)\n    plane = Plane(plane_orig, plane_normal)\n    return cross_section_mesh(mesh, plane, **kwargs)", "docstring": "Compute the planar cross section of a mesh. This returns a set of\npolylines.\n\nArgs:\nverts: Nx3 array of the vertices position\nfaces: Nx3 array of the faces, containing vertex indices\nplane_orig: 3-vector indicating the plane origin\nplane_normal: 3-vector indicating the plane normal\n\nReturns:\nA list of Nx3 arrays, each representing a disconnected portion\nof the cross section as a polyline", "source": "juraj-google-style"}
{"code": "def get_guild_info(self, id: str) -> Dict[(str, Any)]:\n    return self._query(f'guilds/{id}', 'GET')", "docstring": "Get a guild's information by its id\n\nArgs:\nid: snowflake id of the guild\n\nReturns:\nDictionary data for the guild API object\n\nExample:\n{\n\"id\": \"41771983423143937\",\n\"name\": \"Discord Developers\",\n\"icon\": \"SEkgTU9NIElUUyBBTkRSRUkhISEhISEh\",\n\"splash\": null,\n\"owner_id\": \"80351110224678912\",\n\"region\": \"us-east\",\n\"afk_channel_id\": \"42072017402331136\",\n\"afk_timeout\": 300,\n\"embed_enabled\": true,\n\"embed_channel_id\": \"41771983444115456\",\n\"verification_level\": 1,\n\"roles\": [],\n\"emojis\": [],\n\"features\": [\"INVITE_SPLASH\"],\n\"unavailable\": false\n}", "source": "codesearchnet"}
{"code": "def prepare_soap_envelope(self, prepared_soap_header, prepared_soap_body):\n        \n\n        \n        soap_env_template = (\n            '<?xml version=\"1.0\"?>'\n            '<s:Envelope xmlns:s=\"http:\n            ' s:encodingStyle=\"http:\n                '{soap_header}'\n                    '<s:Body>'\n                        '{soap_body}'\n                    '</s:Body>'\n            '</s:Envelope>')  \n        return soap_env_template.format(\n            soap_header=prepared_soap_header,\n            soap_body=prepared_soap_body)", "docstring": "Prepare the SOAP Envelope for sending.\n\nArgs:\nprepared_soap_header (str): A SOAP Header prepared by\n`prepare_soap_header`\nprepared_soap_body (str): A SOAP Body prepared by\n`prepare_soap_body`\n\nReturns:\nstr: A prepared SOAP Envelope", "source": "juraj-google-style"}
{"code": "def create_and_fill_np_array(start_or_end_logits, dataset, max_len):\n    step = 0\n    logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float64)\n    for i, output_logit in enumerate(start_or_end_logits):\n        batch_size = output_logit.shape[0]\n        cols = output_logit.shape[1]\n        if step + batch_size < len(dataset):\n            logits_concat[step:step + batch_size, :cols] = output_logit\n        else:\n            logits_concat[step:, :cols] = output_logit[:len(dataset) - step]\n        step += batch_size\n    return logits_concat", "docstring": "Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor\n\nArgs:\nstart_or_end_logits(:obj:`tensor`):\nThis is the output predictions of the model. We can only enter either start or end logits.\neval_dataset: Evaluation dataset\nmax_len(:obj:`int`):\nThe maximum length of the output tensor. ( See the model.eval() part for more details )", "source": "github-repos"}
{"code": "def copy_remote_file(web_file, destination):\n    size = 0\n    dir_name = os.path.dirname(destination)\n    if (not os.path.exists(dir_name)):\n        os.makedirs(dir_name)\n    with open(destination, 'wb') as file_:\n        chunk_size = (8 * 1024)\n        for chunk in web_file.iter_content(chunk_size=chunk_size):\n            if chunk:\n                file_.write(chunk)\n                size += len(chunk)\n    return size", "docstring": "Check if exist the destination path, and copy the online resource\nfile to local.\n\nArgs:\n:web_file: reference to online file resource to take.\n:destination: path to store the file.", "source": "codesearchnet"}
{"code": "def cancel(self, job_ids):\n        \n        statuses = []\n        for job_id in job_ids:\n            try:\n                self.delete_instance(job_id)\n                statuses.append(True)\n                self.provisioned_blocks -= 1\n            except Exception:\n                statuses.append(False)\n        return statuses", "docstring": "Cancels the resources identified by the job_ids provided by the user.\n\nArgs:\n- job_ids (list): A list of job identifiers\n\nReturns:\n- A list of status from cancelling the job which can be True, False\n\nRaises:\n- ExecutionProviderException or its subclasses", "source": "juraj-google-style"}
{"code": "def validate_config_has_one_of(config, one_of_keys):\n    intersection = set(config).intersection(one_of_keys)\n    if (len(intersection) > 1):\n        raise Exception(('Only one of the values in \"%s\" is needed' % ', '.join(intersection)))\n    if (len(intersection) == 0):\n        raise Exception(('One of the values in \"%s\" is needed' % ', '.join(one_of_keys)))", "docstring": "Validate a config dictionary to make sure it has one and only one\nkey in one_of_keys.\n\nArgs:\nconfig: the config to validate.\none_of_keys: the list of possible keys that config can have one and only one.\n\nRaises:\nException if the config does not have any of them, or multiple of them.", "source": "codesearchnet"}
{"code": "def deserialize(config, custom_objects=None):\n    from tensorflow.python.keras.mixed_precision import loss_scale_optimizer\n    all_classes = {'adadelta': adadelta_v2.Adadelta, 'adagrad': adagrad_v2.Adagrad, 'adam': adam_v2.Adam, 'adamax': adamax_v2.Adamax, 'nadam': nadam_v2.Nadam, 'rmsprop': rmsprop_v2.RMSprop, 'sgd': gradient_descent_v2.SGD, 'ftrl': ftrl.Ftrl, 'lossscaleoptimizer': loss_scale_optimizer.LossScaleOptimizer, 'lossscaleoptimizerv1': loss_scale_optimizer.LossScaleOptimizer}\n    if config['class_name'].lower() in all_classes:\n        config['class_name'] = config['class_name'].lower()\n    return deserialize_keras_object(config, module_objects=all_classes, custom_objects=custom_objects, printable_module_name='optimizer')", "docstring": "Inverse of the `serialize` function.\n\nArgs:\nconfig: Optimizer configuration dictionary.\ncustom_objects: Optional dictionary mapping names (strings) to custom\nobjects (classes and functions) to be considered during deserialization.\n\nReturns:\nA Keras Optimizer instance.", "source": "github-repos"}
{"code": "def ValidatePassword(self, password):\n    password = to_aes_key(password)\n    return (hashlib.sha256(password).digest() == self.LoadStoredData('PasswordHash'))", "docstring": "Validates if the provided password matches with the stored password.\n\nArgs:\npassword (string): a password.\n\nReturns:\nbool: the provided password matches with the stored password.", "source": "codesearchnet"}
{"code": "def _validate_alias_name(alias_name):\n    \n    if not alias_name:\n        raise CLIError(EMPTY_ALIAS_ERROR)\n\n    if not re.match('^[a-zA-Z]', alias_name):\n        raise CLIError(INVALID_STARTING_CHAR_ERROR.format(alias_name[0]))", "docstring": "Check if the alias name is valid.\n\nArgs:\nalias_name: The name of the alias to validate.", "source": "juraj-google-style"}
{"code": "def __gt__(self, other):\n    \n    if not isinstance(other, interface.DateTimeValues):\n      raise ValueError('Other not an instance of DateTimeValues')\n\n    return not isinstance(other, Never)", "docstring": "Determines if the date time values are greater than other.\n\nArgs:\nother (DateTimeValues): date time values to compare against.\n\nReturns:\nbool: True if the date time values are greater than other.\n\nRaises:\nValueError: if other is not an instance of DateTimeValues.", "source": "juraj-google-style"}
{"code": "def GetSubNodeByLocation(self, location):\n    \n    for sub_node in self.sub_nodes:\n      sub_node_location = getattr(sub_node.path_spec, 'location', None)\n      if location == sub_node_location:\n        return sub_node\n\n    return None", "docstring": "Retrieves a sub scan node based on the location.\n\nArgs:\nlocation (str): location that should match the location of the path\nspecification of a sub scan node.\n\nReturns:\nSourceScanNode: sub scan node or None if not available.", "source": "juraj-google-style"}
{"code": "def files(self, request, id):\n    gist = self.send(request, id).json()\n    return gist['files']", "docstring": "Returns a list of files in the gist\n\nArguments:\nrequest: an initial request object\nid:      the gist identifier\n\nReturns:\nA list of the files", "source": "codesearchnet"}
{"code": "def detect_mbr(self, filename, offset, fs_id):\n        \n        self.logger.debug('Detecting MBR partition type')\n\n        if fs_id not in self.__mbr_plugins:\n            return None\n        else:\n            plugins = self.__mbr_plugins.get(fs_id)\n            for plugin in plugins:\n                if plugin.detect(filename, offset):\n                    return plugin.get_volume_object()\n        return None", "docstring": "Used by rawdisk.session.Session to match mbr partitions against\nfilesystem plugins.\n\nArgs:\nfilename: device or file that it will read in order to detect\nthe filesystem fs_id: filesystem id to match (ex. 0x07)\noffset: offset for the filesystem that is being matched\n\nReturns:\nVolume object supplied by matched plugin.\nIf there is no match, None is returned", "source": "juraj-google-style"}
{"code": "def _validate_write(self, address):\n    if (not any((address.startswith(ns) for ns in self._write_list))):\n        raise AuthorizationException(address=address)", "docstring": "Raises an exception if the address is not allowed to be set\nin this context, based on txn outputs.\n\nNotes:\nChecks that the address is either listed fully as one of the\noutputs, or some portion of the address is listed as a namespace\nin the outputs of the txn.\n\nArgs:\naddress (str): The address to be validated. The context manager\nvalidates the address correctness (70 hex characters).\nReturns:\nNone\n\nRaises:\nAuthorizationException", "source": "codesearchnet"}
{"code": "def get_vocabulary(self, include_special_tokens=True):\n    return self._lookup_layer.get_vocabulary(include_special_tokens)", "docstring": "Returns the current vocabulary of the layer.\n\nArgs:\ninclude_special_tokens: If `True`, the returned vocabulary\nwill include the padding and OOV tokens,\nand a term's index in the vocabulary will equal\nthe term's index when calling the layer. If `False`, the\nreturned vocabulary will not include any padding\nor OOV tokens.", "source": "github-repos"}
{"code": "def infer(self, **kwargs) -> Any:", "docstring": "Returns the inferred value.\n\nArgs:\n**kwargs: Optional keyword arguments for inference, which are usually\ninferential subclass specific.\n\nReturns:\nInferred value.\n\nRaises:\nAttributeError: If the value cannot be inferred.", "source": "github-repos"}
{"code": "def __init__(self, config: FastSpeech2ConformerConfig, num_layers=2, num_chans=384, kernel_size=3, dropout_rate=0.5):\n    super().__init__()\n    self.conv_layers = nn.ModuleList()\n    for idx in range(num_layers):\n        input_channels = config.hidden_size if idx == 0 else num_chans\n        layer = FastSpeech2ConformerPredictorLayer(input_channels, num_chans, kernel_size, dropout_rate)\n        self.conv_layers.append(layer)\n    self.linear = nn.Linear(num_chans, 1)", "docstring": "Initialize variance predictor module.\n\nArgs:\ninput_dim (`int`): Input dimension.\nnum_layers (`int`, *optional*, defaults to 2): Number of convolutional layers.\nnum_chans (`int`, *optional*, defaults to 384): Number of channels of convolutional layers.\nkernel_size (`int`, *optional*, defaults to 3): Kernel size of convolutional layers.\ndropout_rate (`float`, *optional*, defaults to 0.5): Dropout rate.", "source": "github-repos"}
{"code": "def NodeName(node):\n    if node.type < 256:\n        return token.tok_name[node.type]\n    else:\n        return pygram.python_grammar.number2symbol[node.type]", "docstring": "Produce a string name for a given node.\n\nFor a Leaf this is the token name, and for a Node this is the type.\n\nArguments:\nnode: a tree node\n\nReturns:\nName as a string.", "source": "github-repos"}
{"code": "def permute(self, ordering: np.ndarray, *, axis: int) -> None:\n    if (axis == 0):\n        self.values = self.values[(ordering, :)]\n    elif (axis == 1):\n        self.values = self.values[(:, ordering)]\n    else:\n        raise ValueError('axis must be 0 or 1')", "docstring": "Permute the layer along an axis\n\nArgs:\naxis: The axis to permute (0, permute the rows; 1, permute the columns)\nordering: The permutation vector", "source": "codesearchnet"}
{"code": "def _pipeline_cell(args, cell_body):\n    \n    name = args.get('name')\n    if name is None:\n        raise Exception('Pipeline name was not specified.')\n\n    import google.datalab.utils as utils\n    bq_pipeline_config = utils.commands.parse_config(\n      cell_body, utils.commands.notebook_environment())\n\n    try:\n      airflow_spec = \\\n        google.datalab.contrib.bigquery.commands.get_airflow_spec_from_config(name,\n                                                                              bq_pipeline_config)\n    except AttributeError:\n      return \"Perhaps you're missing: import google.datalab.contrib.bigquery.commands\"\n\n    \n    error_message = ''\n    gcs_dag_bucket = args.get('gcs_dag_bucket')\n    gcs_dag_file_path = args.get('gcs_dag_file_path')\n    if gcs_dag_bucket:\n      try:\n        airflow = google.datalab.contrib.pipeline.airflow.Airflow(gcs_dag_bucket, gcs_dag_file_path)\n        airflow.deploy(name, airflow_spec)\n        error_message += (\"Airflow pipeline successfully deployed! View dashboard for more \"\n                          \"details.\\n\")\n      except AttributeError:\n        return \"Perhaps you're missing: import google.datalab.contrib.pipeline.airflow\"\n\n    location = args.get('location')\n    environment = args.get('environment')\n\n    if location and environment:\n      try:\n        composer = google.datalab.contrib.pipeline.composer.Composer(location, environment)\n        composer.deploy(name, airflow_spec)\n        error_message += (\"Composer pipeline successfully deployed! View dashboard for more \"\n                          \"details.\\n\")\n      except AttributeError:\n        return \"Perhaps you're missing: import google.datalab.contrib.pipeline.composer\"\n\n    if args.get('debug'):\n      error_message += '\\n\\n' + airflow_spec\n\n    return error_message", "docstring": "Implements the pipeline subcommand in the %%bq magic.\nArgs:\nargs: the arguments following '%%bq pipeline'.\ncell_body: Cell contents.", "source": "juraj-google-style"}
{"code": "def DownloadDir(aff4_path, output_dir, bufsize=8192, preserve_path=True):\n    if (not os.path.isdir(output_dir)):\n        os.makedirs(output_dir)\n    fd = aff4.FACTORY.Open(aff4_path)\n    for child in fd.OpenChildren():\n        if preserve_path:\n            full_dir = utils.JoinPath(output_dir, child.urn.Path())\n            full_dir = os.path.dirname(full_dir)\n            if (not os.path.isdir(full_dir)):\n                os.makedirs(full_dir)\n            outfile = os.path.join(full_dir, child.urn.Basename())\n        else:\n            outfile = os.path.join(output_dir, child.urn.Basename())\n        logging.info(u'Downloading %s to %s', child.urn, outfile)\n        with open(outfile, 'wb') as out_fd:\n            try:\n                buf = child.Read(bufsize)\n                while buf:\n                    out_fd.write(buf)\n                    buf = child.Read(bufsize)\n            except IOError as e:\n                logging.error('Failed to read %s. Err: %s', child.urn, e)", "docstring": "Take an aff4 path and download all files in it to output_dir.\n\nArgs:\naff4_path: Any aff4 path as a string\noutput_dir: A local directory to write to, will be created if not there.\nbufsize: Buffer size to use.\npreserve_path: If set all paths will be created.  Note that this works for\ncollections as well. It will download all files in the collection.  This\nonly downloads files that are already in the datastore, it doesn't queue\nanything on the client.", "source": "codesearchnet"}
{"code": "def __init__(self, target_shape, **kwargs):\n    super(Reshape, self).__init__(**kwargs)\n    self.target_shape = tuple(target_shape)", "docstring": "Creates a `tf.keras.layers.Reshape`  layer instance.\n\nArgs:\ntarget_shape: Target shape. Tuple of integers, does not include the\nsamples dimension (batch size).\n**kwargs: Any additional layer keyword arguments.", "source": "github-repos"}
{"code": "def setup_build(self):\n    if not self.make_imports_dir():\n        return set()\n    default_output = self.write_default_pyi()\n    self.write_ninja_preamble()\n    files = set()\n    module_to_imports_map = {}\n    module_to_output = {}\n    for module, action, deps, stage in self.yield_sorted_modules():\n        if files >= self.filenames:\n            logging.info('skipped: %s %s (%s)', action, module.name, stage)\n            continue\n        if action == Action.GENERATE_DEFAULT:\n            module_to_output[module] = default_output\n            continue\n        if stage == Stage.SINGLE_PASS:\n            files.add(module.full_path)\n            suffix = ''\n        elif stage == Stage.FIRST_PASS:\n            suffix = FIRST_PASS_SUFFIX\n        else:\n            assert stage == Stage.SECOND_PASS\n            files.add(module.full_path)\n            suffix = ''\n        imports_map = module_to_imports_map[module] = get_imports_map(deps, module_to_imports_map, module_to_output)\n        imports = self.write_imports(module.name, imports_map, suffix)\n        deps = tuple((module_to_output[m] for m in deps if module_to_output[m] != default_output))\n        module_to_output[module] = self.write_build_statement(module, action, deps, imports, suffix)\n    return files", "docstring": "Write out the full build.ninja file.\n\nReturns:\nAll files with build statements.", "source": "github-repos"}
{"code": "def parse_problem_name(name):\n  \n  \n  if name.endswith(\"_rev\"):\n    base, was_reversed, was_copy = parse_problem_name(name[:-4])\n    if was_reversed:\n      \n      raise ValueError(\n          \"Invalid problem name %s: multiple '_rev' instances\" % name)\n    return ProblemSpec(base, True, was_copy)\n  elif name.endswith(\"_copy\"):\n    base, was_reversed, was_copy = parse_problem_name(name[:-5])\n    if was_copy:\n      raise ValueError(\n          \"Invalid problem_name %s: multiple '_copy' instances\" % name)\n    return ProblemSpec(base, was_reversed, True)\n  else:\n    return ProblemSpec(name, False, False)", "docstring": "Determines if problem_name specifies a copy and/or reversal.\n\nArgs:\nname: str, problem name, possibly with suffixes.\n\nReturns:\nProblemSpec: namedtuple with [\"base_name\", \"was_reversed\", \"was_copy\"]\n\nRaises:\nValueError if name contains multiple suffixes of the same type\n('_rev' or '_copy'). One of each is ok.", "source": "juraj-google-style"}
{"code": "def get_metadata(self, resource, keys):\n    self.metadata_service.set_auth(self._token_metadata)\n    return self.metadata_service.get(resource, keys)", "docstring": "Gets the values for given keys associated with the given resource.\n\nArgs:\nresource (intern.resource.boss.BossResource)\nkeys (list)\n\nReturns:\n(dictionary)\n\nRaises:\nHTTPErrorList on failure.", "source": "codesearchnet"}
{"code": "def forward_feed(self, amount):\n        \n        if amount <= 255 and amount >=0:\n            self.send(chr(27)+'J'+chr(amount))\n        else:\n            raise RuntimeError('Invalid foward feed, must be less than 255 and >= 0')", "docstring": "Calling this function finishes input of the current line, then moves the vertical\nprint position forward by x/300 inch.\n\nArgs:\namount: how far foward you want the position moved. Actual movement is calculated as\namount/300 inches.\nReturns:\nNone\nRaises:\nRuntimeError: Invalid foward feed.", "source": "juraj-google-style"}
{"code": "async def rename(self, name):\n        \n        await self._client.rename_conversation(\n            hangouts_pb2.RenameConversationRequest(\n                request_header=self._client.get_request_header(),\n                new_name=name,\n                event_request_header=self._get_event_request_header(),\n            )\n        )", "docstring": "Rename this conversation.\n\nHangouts only officially supports renaming group conversations, so\ncustom names for one-to-one conversations may or may not appear in all\nfirst party clients.\n\nArgs:\nname (str): New name.\n\nRaises:\n.NetworkError: If conversation cannot be renamed.", "source": "juraj-google-style"}
{"code": "def isHostCert(self, name):\n        \n        crtpath = self._getPathJoin('hosts', '%s.crt' % name)\n        return os.path.isfile(crtpath)", "docstring": "Checks if a host certificate exists.\n\nArgs:\nname (str): The name of the host keypair.\n\nExamples:\nCheck if the host cert \"myhost\" exists:\n\nexists = cdir.isUserCert('myhost')\n\nReturns:\nbool: True if the certificate is present, False otherwise.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.GenerateAccessToken = channel.unary_unary(\n            \"/google.iam.credentials.v1.IAMCredentials/GenerateAccessToken\",\n            request_serializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.GenerateAccessTokenRequest.SerializeToString,\n            response_deserializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.GenerateAccessTokenResponse.FromString,\n        )\n        self.GenerateIdToken = channel.unary_unary(\n            \"/google.iam.credentials.v1.IAMCredentials/GenerateIdToken\",\n            request_serializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.GenerateIdTokenRequest.SerializeToString,\n            response_deserializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.GenerateIdTokenResponse.FromString,\n        )\n        self.SignBlob = channel.unary_unary(\n            \"/google.iam.credentials.v1.IAMCredentials/SignBlob\",\n            request_serializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.SignBlobRequest.SerializeToString,\n            response_deserializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.SignBlobResponse.FromString,\n        )\n        self.SignJwt = channel.unary_unary(\n            \"/google.iam.credentials.v1.IAMCredentials/SignJwt\",\n            request_serializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.SignJwtRequest.SerializeToString,\n            response_deserializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.SignJwtResponse.FromString,\n        )\n        self.GenerateIdentityBindingAccessToken = channel.unary_unary(\n            \"/google.iam.credentials.v1.IAMCredentials/GenerateIdentityBindingAccessToken\",\n            request_serializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.GenerateIdentityBindingAccessTokenRequest.SerializeToString,\n            response_deserializer=google_dot_iam_dot_credentials_dot_v1_dot_common__pb2.GenerateIdentityBindingAccessTokenResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def get_example_from_prop_spec(self, prop_spec, from_allof=False):\n    easy_keys = ['example', 'x-example', 'default']\n    for key in easy_keys:\n        if ((key in prop_spec.keys()) and self.use_example):\n            return prop_spec[key]\n    if ('enum' in prop_spec.keys()):\n        return prop_spec['enum'][0]\n    if ('$ref' in prop_spec.keys()):\n        return self._example_from_definition(prop_spec)\n    if ('allOf' in prop_spec.keys()):\n        return self._example_from_allof(prop_spec)\n    if ('type' not in prop_spec):\n        return self._example_from_complex_def(prop_spec)\n    if (prop_spec['type'] == 'object'):\n        (example, additional_properties) = self._get_example_from_properties(prop_spec)\n        if (additional_properties or from_allof):\n            return example\n        return [example]\n    if ((prop_spec['type'] == 'array') or (isinstance(prop_spec['type'], list) and (prop_spec['type'][0] == 'array'))):\n        return self._example_from_array_spec(prop_spec)\n    if (prop_spec['type'] == 'file'):\n        return (StringIO('my file contents'), 'hello world.txt')\n    if (('format' in prop_spec.keys()) and (prop_spec['format'] == 'date-time')):\n        return self._get_example_from_basic_type('datetime')[0]\n    if isinstance(prop_spec['type'], list):\n        return self._get_example_from_basic_type(prop_spec['type'][0])[0]\n    logging.info('falling back to basic type, no other match found')\n    return self._get_example_from_basic_type(prop_spec['type'])[0]", "docstring": "Return an example value from a property specification.\n\nArgs:\nprop_spec: the specification of the property.\nfrom_allof: whether these properties are part of an\nallOf section\n\nReturns:\nAn example value", "source": "codesearchnet"}
{"code": "def plot_term_kdes(self, words, **kwargs):\n\n        \n\n        stem = PorterStemmer().stem\n\n        for word in words:\n            kde = self.kde(stem(word), **kwargs)\n            plt.plot(kde)\n\n        plt.show()", "docstring": "Plot kernel density estimates for multiple words.\n\nArgs:\nwords (list): A list of unstemmed terms.", "source": "juraj-google-style"}
{"code": "def complies_with_scope(queue_item, new_request, scope):\n    if (not URLHelper.is_parsable(queue_item.request.url)):\n        return False\n    if (not URLHelper.is_parsable(new_request.url)):\n        return False\n    if scope.request_methods:\n        if (not (queue_item.request.method in scope.request_methods)):\n            return False\n    if scope.protocol_must_match:\n        if (URLHelper.get_protocol(queue_item.request.url) != URLHelper.get_protocol(new_request.url)):\n            return False\n    if scope.subdomain_must_match:\n        current_subdomain = URLHelper.get_subdomain(queue_item.request.url)\n        new_subdomain = URLHelper.get_subdomain(new_request.url)\n        www_matches = False\n        if ((current_subdomain == 'www') and (new_subdomain == '')):\n            www_matches = True\n        if ((new_subdomain == 'www') and (current_subdomain == '')):\n            www_matches = True\n        if ((not www_matches) and (current_subdomain != new_subdomain)):\n            return False\n    if scope.hostname_must_match:\n        if (URLHelper.get_hostname(queue_item.request.url) != URLHelper.get_hostname(new_request.url)):\n            return False\n    if scope.tld_must_match:\n        if (URLHelper.get_tld(queue_item.request.url) != URLHelper.get_tld(new_request.url)):\n            return False\n    return True", "docstring": "Check if the new request complies with the crawling scope.\n\nArgs:\nqueue_item (:class:`nyawc.QueueItem`): The parent queue item of the new request.\nnew_request (:class:`nyawc.http.Request`): The request to check.\nscope (:class:`nyawc.Options.OptionsScope`): The scope to check.\n\nReturns:\nbool: True if it complies, False otherwise.", "source": "codesearchnet"}
{"code": "def coefficients(self):\n    vector = self.get_parameter_vector(include_frozen=True)\n    pars = self.get_all_coefficients(vector)\n    if (len(pars) != 6):\n        raise ValueError('there must be 6 coefficient blocks')\n    if any(((len(p.shape) != 1) for p in pars)):\n        raise ValueError('coefficient blocks must be 1D')\n    if (len(pars[0]) != len(pars[1])):\n        raise ValueError('coefficient blocks must have the same shape')\n    if any(((len(pars[2]) != len(p)) for p in pars[3:])):\n        raise ValueError('coefficient blocks must have the same shape')\n    return pars", "docstring": "All of the coefficient arrays\n\nThis property is the concatenation of the results from\n:func:`terms.Term.get_real_coefficients` and\n:func:`terms.Term.get_complex_coefficients` but it will always return\na tuple of length 6, even if ``alpha_complex_imag`` was omitted from\n``get_complex_coefficients``.\n\nReturns:\n(array[j_real], array[j_real], array[j_complex], array[j_complex],\narray[j_complex], array[j_complex]): ``alpha_real``, ``beta_real``,\n``alpha_complex_real``, ``alpha_complex_imag``,\n``beta_complex_real``, and ``beta_complex_imag`` as described\nabove.\n\nRaises:\nValueError: For invalid dimensions for the coefficients.", "source": "codesearchnet"}
{"code": "def codemirror_script(self, inputid):\n    varname = '{}_codemirror'.format(inputid)\n    html = self.get_codemirror_field_js()\n    opts = self.codemirror_config()\n    return html.format(varname=varname, inputid=inputid, settings=json.dumps(opts, sort_keys=True))", "docstring": "Build CodeMirror HTML script tag which contains CodeMirror init.\n\nArguments:\ninputid (string): Input id.\n\nReturns:\nstring: HTML for field CodeMirror instance.", "source": "codesearchnet"}
{"code": "def propagate(self, date):\n    if (self.propagator.orbit is not self):\n        self.propagator.orbit = self\n    return self.propagator.propagate(date)", "docstring": "Propagate the orbit to a new date\n\nArgs:\ndate (Date)\nReturn:\nOrbit", "source": "codesearchnet"}
{"code": "def counts(self, *args, **kwargs):\n    n = Counts.read_cellframe(self, prune_neighbors=False)\n    if ('measured_regions' in kwargs):\n        n.measured_regions = kwargs['measured_regions']\n    else:\n        n.measured_regions = self.get_measured_regions()\n    if ('measured_phenotypes' in kwargs):\n        n.measured_phenotypes = kwargs['measured_phenotypes']\n    else:\n        n.measured_phenotypes = self.phenotypes\n    n.microns_per_pixel = self.microns_per_pixel\n    if ('minimum_region_size_pixels' in kwargs):\n        n.minimum_region_size_pixels = kwargs['minimum_region_size_pixels']\n    else:\n        n.minimum_region_size_pixels = 1\n    return n", "docstring": "Return a class that can be used to access count densities\n\nArgs:\nmeasured_regions (pandas.DataFrame): Dataframe of regions that are being measured (defaults to all the regions)\nmeasured_phenotypes (list): List of phenotypes present (defaults to all the phenotypes)\nminimum_region_size_pixels (int): Minimum region size to calculate counts on in pixels (Default: 1)\n\nReturns:\nCounts: returns a class that holds the counts.", "source": "codesearchnet"}
{"code": "def _get_reaction(self, x):\n        \n        mix_comp = self.comp1 * x + self.comp2 * (1-x)\n        decomp = self.pd.get_decomposition(mix_comp)\n\n        \n        if np.isclose(x, 0):\n            reactant = [self.c2_original]\n        elif np.isclose(x, 1):\n            reactant = [self.c1_original]\n        else:\n            reactant = list(set([self.c1_original, self.c2_original]))\n\n        if self.grand:\n            reactant += [Composition(e.symbol)\n                         for e, v in self.pd.chempots.items()]\n\n        product = [Composition(k.name) for k, v in decomp.items()]\n        reaction = Reaction(reactant, product)\n\n        x_original = self._get_original_composition_ratio(reaction)\n        if np.isclose(x_original, 1):\n            reaction.normalize_to(self.c1_original, x_original)\n        else:\n            reaction.normalize_to(self.c2_original, 1-x_original)\n        return reaction", "docstring": "Generates balanced reaction at mixing ratio x : (1-x) for\nself.comp1 : self.comp2.\n\nArgs:\nx (float): Mixing ratio x of reactants, a float between 0 and 1.\n\nReturns:\nReaction object.", "source": "juraj-google-style"}
{"code": "def load_vasp_summary(filename):\n    with open(filename, 'r') as stream:\n        docs = yaml.load_all(stream, Loader=yaml.SafeLoader)\n        data = {d['title']: d for d in docs}\n    return data", "docstring": "Reads a `vasp_summary.yaml` format YAML file and returns\na dictionary of dictionaries. Each YAML document in the file\ncorresponds to one sub-dictionary, with the corresponding\ntop-level key given by the `title` value.\n\nExample:\nThe file:\n\n---\ntitle: foo\ndata: foo_data\n---\ntitle: bar\ndata: bar_data\n\nis converted to the dictionary\n\n{ 'foo': { 'title': 'foo', 'data': 'foo_data' },\n'bar': { 'title': 'bar', 'data': 'bar_data' } }\n\nArgs:\nfilename (str): File path for the `vasp_summary.yaml` file.\n\nReturns:\ndict(dict,dict,...): A dictionary of separate YAML documents,\neach as dictionaries.a", "source": "codesearchnet"}
{"code": "def start(logdir):\n    \n    if logdir.startswith('gs:\n      \n      \n      \n      datalab.storage._api.Api.verify_permitted_to_read(logdir)\n\n    port = datalab.utils.pick_unused_port()\n    args = ['tensorboard', '--logdir=' + logdir, '--port=' + str(port)]\n    p = subprocess.Popen(args)\n    retry = 10\n    while (retry > 0):\n      if datalab.utils.is_http_running_on(port):\n        basepath = os.environ.get('DATALAB_ENDPOINT_URL', '')\n        url = '%s/_proxy/%d/' % (basepath.rstrip('/'), port)\n        html = '<p>TensorBoard was started successfully with pid %d. ' % p.pid\n        html += 'Click <a href=\"%s\" target=\"_blank\">here</a> to access it.</p>' % url\n        IPython.display.display_html(html, raw=True)\n        return p.pid\n      time.sleep(1)\n      retry -= 1\n\n    raise Exception('Cannot start TensorBoard.')", "docstring": "Start a TensorBoard instance.\n\nArgs:\nlogdir: the logdir to run TensorBoard on.\nRaises:\nException if the instance cannot be started.", "source": "juraj-google-style"}
{"code": "def _parse_metadata(self, message):\n    metadata = Metadata(source=self.actor_urn).__dict__\n    if ('author' in message['d']):\n        metadata['source_user'] = message['d']['author']['username']\n    else:\n        metadata['source_user'] = None\n    if ('channel_id' in message['d']):\n        metadata['source_channel'] = message['d']['channel_id']\n    else:\n        metadata['source_channel'] = None\n    metadata['user_id'] = metadata['source_user']\n    metadata['display_name'] = metadata['source_user']\n    metadata['source_connector'] = 'discord'\n    return metadata", "docstring": "Sets metadata in Legobot message\n\nArgs:\nmessage (dict): Full message from Discord websocket connection\"\n\nReturns:\nLegobot.Metadata", "source": "codesearchnet"}
{"code": "def to_dict(self):\n    out = {}\n    out['reason'] = self.msg\n    out['type'] = self.__class__.__name__\n    out['params'] = self.params\n    return out", "docstring": "Convert this exception to a dictionary.\n\nReturns:\ndist: A dictionary of information about this exception,\nHas a 'reason' key, a 'type' key  and a dictionary of params", "source": "codesearchnet"}
{"code": "def inspect_virtual(self, stream_id):\n    stream = DataStream.FromEncoded(stream_id)\n    if stream.buffered:\n        return [pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.VIRTUAL_STREAM_NOT_FOUND), 0]\n    try:\n        reading = self.storage.inspect_last(stream, only_allocated=True)\n        return [Error.NO_ERROR, reading.value]\n    except StreamEmptyError:\n        return [Error.NO_ERROR, 0]\n    except UnresolvedIdentifierError:\n        return [pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.VIRTUAL_STREAM_NOT_FOUND), 0]", "docstring": "Inspect the last value written into a virtual stream.\n\nArgs:\nstream_id (int): The virtual stream was want to inspect.\n\nReturns:\n(int, int): An error code and the stream value.", "source": "codesearchnet"}
{"code": "def find_node_by_value(self, value):\n    try:\n        return next((n for n in self.node_list if (n.value == value)))\n    except StopIteration:\n        return None", "docstring": "Find and return a node in self.node_list with the value ``value``.\n\nIf multiple nodes exist with the value ``value``,\nreturn the first one found.\n\nIf no such node exists, this returns ``None``.\n\nArgs:\nvalue (Any): The value of the node to find\n\nReturns:\nNode: A node with value ``value`` if it was found\n\nNone: If no node exists with value ``value``\n\nExample:\n>>> from blur.markov.node import Node\n>>> node_1 = Node('One')\n>>> graph = Graph([node_1])\n>>> found_node = graph.find_node_by_value('One')\n>>> found_node == node_1\nTrue", "source": "codesearchnet"}
{"code": "def update_torch_dtype(self, torch_dtype: 'torch.dtype') -> 'torch.dtype':\n    return torch_dtype", "docstring": "Some quantization methods require to explicitly set the dtype of the model to a\ntarget dtype. You need to override this method in case you want to make sure that behavior is\npreserved\n\nArgs:\ntorch_dtype (`torch.dtype`):\nThe input dtype that is passed in `from_pretrained`", "source": "github-repos"}
{"code": "class BaseModelOutputWithNoAttention(ModelOutput):\n    last_hidden_state: Optional[torch.FloatTensor] = None\n    hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None", "docstring": "Base class for model's outputs, with potential hidden states.\n\nArgs:\nlast_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\nSequence of hidden-states at the output of the last layer of the model.\nhidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\nTuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\none for the output of each layer) of shape `(batch_size, num_channels, height, width)`.\n\nHidden-states of the model at the output of each layer plus the optional initial embedding outputs.", "source": "github-repos"}
{"code": "def get_pipeline_path(pipeline_name, working_directory):\n    logger.debug('starting')\n    logger.debug(f'current directory is {working_directory}')\n    pipeline_path = os.path.abspath(os.path.join(working_directory, 'pipelines', (pipeline_name + '.yaml')))\n    if os.path.isfile(pipeline_path):\n        logger.debug(f'Found {pipeline_path}')\n    else:\n        logger.debug(f'{pipeline_name} not found in current directory/pipelines folder. Looking in pypyr install directory instead.')\n        pypyr_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n        logger.debug(f'pypyr installation directory is: {pypyr_dir}')\n        pipeline_path = os.path.abspath(os.path.join(pypyr_dir, 'pipelines', (pipeline_name + '.yaml')))\n        if os.path.isfile(pipeline_path):\n            logger.debug(f'Found {pipeline_path}')\n        else:\n            raise PipelineNotFoundError(f'{pipeline_name}.yaml not found in either {working_directory}/pipelines or {pypyr_dir}/pipelines')\n    logger.debug('done')\n    return pipeline_path", "docstring": "Look for the pipeline in the various places it could be.\n\nFirst checks the cwd. Then checks pypyr/pipelines dir.\n\nArgs:\npipeline_name: string. Name of pipeline to find\nworking_directory: string. Path in which to look for pipeline_name.yaml\n\nReturns:\nAbsolute path to the pipeline_name.yaml file\n\nRaises:\nPipelineNotFoundError: if pipeline_name.yaml not found in working_dir\nor in {pypyr install dir}/pipelines.", "source": "codesearchnet"}
{"code": "def save(self, recipe):\n    if (('id' in recipe) and (recipe['id'] is not None)):\n        self.logger.debug(('Updating existing recipe: ' + json.dumps(recipe)))\n        url = ('%(base_url)s/recipe/json/%(recipe_id)s' % {'base_url': self.base_url, 'recipe_id': recipe['id']})\n        r = self.gbdx_connection.put(url, json=recipe)\n        try:\n            r.raise_for_status()\n        except:\n            print(r.text)\n            raise\n        return recipe['id']\n    else:\n        self.logger.debug(('Creating new recipe: ' + json.dumps(recipe)))\n        url = ('%(base_url)s/recipe/json' % {'base_url': self.base_url})\n        r = self.gbdx_connection.post(url, json=recipe)\n        try:\n            r.raise_for_status()\n        except:\n            print(r.text)\n            raise\n        recipe_json = r.json()\n        return recipe_json['id']", "docstring": "Saves an AnswerFactory Recipe\n\nArgs:\nrecipe (dict): Dictionary specifying a recipe\n\nReturns:\nAnswerFactory Recipe id", "source": "codesearchnet"}
{"code": "def market_open(self, session, mins) -> Session:\n    if (session not in self.exch):\n        return SessNA\n    start_time = self.exch[session][0]\n    return Session(start_time, shift_time(start_time, int(mins)))", "docstring": "Time intervals for market open\n\nArgs:\nsession: [allday, day, am, pm, night]\nmins: mintues after open\n\nReturns:\nSession of start_time and end_time", "source": "codesearchnet"}
{"code": "def dummyctrl(self,r,ctrl):\n        \n        dv = DummyVertex(r)\n        dv.view.w,dv.view.h=self.dw,self.dh\n        self.grx[dv] = dv\n        dv.ctrl = ctrl\n        ctrl[r] = dv\n        self.layers[r].append(dv)\n        return dv", "docstring": "creates a DummyVertex at rank r inserted in the ctrl dict\nof the associated edge and layer.\n\nArguments:\nr (int): rank value\nctrl (dict): the edge's control vertices\n\nReturns:\nDummyVertex : the created DummyVertex.", "source": "juraj-google-style"}
{"code": "def split(self, desired_bundle_size: int, start_position: Union[int, str, bytes, ObjectId]=None, stop_position: Union[int, str, bytes, ObjectId]=None):\n    desired_bundle_size_in_mb = desired_bundle_size \n    desired_bundle_size_in_mb = max(desired_bundle_size_in_mb, 1)\n    is_initial_split = start_position is None and stop_position is None\n    start_position, stop_position = self._replace_none_positions(start_position, stop_position)\n    if self.bucket_auto:\n        split_keys = []\n        weights = []\n        for bucket in self._get_auto_buckets(desired_bundle_size_in_mb, start_position, stop_position, is_initial_split):\n            split_keys.append({'_id': bucket['_id']['max']})\n            weights.append(bucket['count'])\n    else:\n        split_keys = self._get_split_keys(desired_bundle_size_in_mb, start_position, stop_position)\n        weights = itertools.cycle((desired_bundle_size_in_mb,))\n    bundle_start = start_position\n    for split_key_id, weight in zip(split_keys, weights):\n        if bundle_start >= stop_position:\n            break\n        bundle_end = min(stop_position, split_key_id['_id'])\n        yield iobase.SourceBundle(weight=weight, source=self, start_position=bundle_start, stop_position=bundle_end)\n        bundle_start = bundle_end\n    if bundle_start < stop_position:\n        weight = 1 if self.bucket_auto else desired_bundle_size_in_mb\n        yield iobase.SourceBundle(weight=weight, source=self, start_position=bundle_start, stop_position=stop_position)", "docstring": "Splits the source into a set of bundles.\n\nBundles should be approximately of size ``desired_bundle_size`` bytes.\n\nArgs:\ndesired_bundle_size: the desired size (in bytes) of the bundles returned.\nstart_position: if specified the given position must be used as the\nstarting position of the first bundle.\nstop_position: if specified the given position must be used as the ending\nposition of the last bundle.\nReturns:\nan iterator of objects of type 'SourceBundle' that gives information about\nthe generated bundles.", "source": "github-repos"}
{"code": "def get(self, url):\n    self._driver.get(url)\n    if self.bot_diary:\n        self.bot_diary.add_auto_entry('I went on', target=url, take_screenshot=True)\n    if BROME_CONFIG['proxy_driver']['intercept_javascript_error']:\n        self.init_javascript_error_interception()\n    return True", "docstring": "Navigate to a specific url\n\nThis specific implementation inject a javascript\nscript to intercept the javascript error\n\nConfigurable with the \"proxy_driver:intercept_javascript_error\" config\n\nArgs:\nurl (str): the url to navigate to\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def describe_field(field_definition):\n    \n    field_descriptor = FieldDescriptor()\n    field_descriptor.name = field_definition.name\n    field_descriptor.number = field_definition.number\n    field_descriptor.variant = field_definition.variant\n\n    if isinstance(field_definition, messages.EnumField):\n        field_descriptor.type_name = field_definition.type.definition_name()\n\n    if isinstance(field_definition, messages.MessageField):\n        field_descriptor.type_name = (\n            field_definition.message_type.definition_name())\n\n    if field_definition.default is not None:\n        field_descriptor.default_value = _DEFAULT_TO_STRING_MAP[\n            type(field_definition)](field_definition.default)\n\n    \n    if field_definition.repeated:\n        field_descriptor.label = FieldDescriptor.Label.REPEATED\n    elif field_definition.required:\n        field_descriptor.label = FieldDescriptor.Label.REQUIRED\n    else:\n        field_descriptor.label = FieldDescriptor.Label.OPTIONAL\n\n    return field_descriptor", "docstring": "Build descriptor for Field instance.\n\nArgs:\nfield_definition: Field instance to provide descriptor for.\n\nReturns:\nInitialized FieldDescriptor instance describing the Field instance.", "source": "juraj-google-style"}
{"code": "def _IsWindowsDrivePathSegment(cls, path_segment):\n    if ((len(path_segment) == 2) and (path_segment[1] == ':') and path_segment[0].isalpha()):\n        return True\n    path_segment = path_segment.upper()\n    return (path_segment in ('%%ENVIRON_SYSTEMDRIVE%%', '%SYSTEMDRIVE%'))", "docstring": "Determines if the path segment contains a Windows Drive indicator.\n\nA drive indicator can be a drive letter or %SystemDrive%.\n\nArgs:\npath_segment (str): path segment.\n\nReturns:\nbool: True if the path segment contains a Windows Drive indicator.", "source": "codesearchnet"}
{"code": "def _CreateOutputModule(self, options):\n    formatter_mediator = formatters_mediator.FormatterMediator(data_location=self._data_location)\n    try:\n        formatter_mediator.SetPreferredLanguageIdentifier(self._preferred_language)\n    except (KeyError, TypeError) as exception:\n        raise RuntimeError(exception)\n    mediator = output_mediator.OutputMediator(self._knowledge_base, formatter_mediator, preferred_encoding=self.preferred_encoding)\n    mediator.SetTimezone(self._preferred_time_zone)\n    try:\n        output_module = output_manager.OutputManager.NewOutputModule(self._output_format, mediator)\n    except (KeyError, ValueError) as exception:\n        raise RuntimeError('Unable to create output module with error: {0!s}'.format(exception))\n    if output_manager.OutputManager.IsLinearOutputModule(self._output_format):\n        output_file_object = open(self._output_filename, 'wb')\n        output_writer = tools.FileObjectOutputWriter(output_file_object)\n        output_module.SetOutputWriter(output_writer)\n    helpers_manager.ArgumentHelperManager.ParseOptions(options, output_module)\n    missing_parameters = output_module.GetMissingArguments()\n    while missing_parameters:\n        for parameter in missing_parameters:\n            value = self._PromptUserForInput('Missing parameter {0:s} for output module'.format(parameter))\n            if (value is None):\n                logger.warning('Unable to set the missing parameter for: {0:s}'.format(parameter))\n                continue\n            setattr(options, parameter, value)\n        helpers_manager.ArgumentHelperManager.ParseOptions(options, output_module)\n        missing_parameters = output_module.GetMissingArguments()\n    return output_module", "docstring": "Creates the output module.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nReturns:\nOutputModule: output module.\n\nRaises:\nRuntimeError: if the output module cannot be created.", "source": "codesearchnet"}
{"code": "def apply(self, read_tuple_name, read_tuple_id=None, synchronize_widths=True):\n        \n        parts = read_tuple_name.split(\"__\")\n        parts[0] = self._fill_right(parts[0], \"-\", self.prefix_width)\n        if read_tuple_id is not None:\n            parts[1] = \"{:x}\".format(read_tuple_id)\n        parts[1] = self._fill_left(parts[1], \"0\", self.read_tuple_id_width)\n\n        if synchronize_widths:\n            new_segments = []\n            segments = parts[2][1:-1].split(\"),(\")\n            for segment in segments:\n                values = segment.split(\",\")\n                values[0] = values[0].zfill(self.genome_id_width)\n                values[1] = values[1].zfill(self.chr_id_width)\n                values[3] = values[3].zfill(self.coor_width)\n                values[4] = values[4].zfill(self.coor_width)\n                new_segments.append(\"(\" + \",\".join(values) + \")\")\n            parts[2] = \",\".join(new_segments)\n\n        return \"__\".join(parts)", "docstring": "Apply profile on a read tuple name and update read tuple ID.\n\nArgs:\nread_tuple_name (str): Read tuple name to be updated.\nread_tuple_id (id): New read tuple ID.\nsynchronize_widths (bool): Update widths (in accordance to this profile).", "source": "juraj-google-style"}
{"code": "def _make_sent_vector(self, sent: List, bucket_length: int =None) -> np.ndarray:\n        \n        bucket_length = bucket_length or len(sent)\n        answer = np.zeros(shape=(bucket_length, MAX_WORD_LENGTH+2), dtype=np.int32)\n        for i, word in enumerate(sent):\n            answer[i, 0] = self.tags.tok2idx(\"BEGIN\")\n            m = min(len(word), MAX_WORD_LENGTH)\n            for j, x in enumerate(word[-m:]):\n                answer[i, j+1] = self.symbols.tok2idx(x)\n            answer[i, m+1] = self.tags.tok2idx(\"END\")\n            answer[i, m+2:] = self.tags.tok2idx(\"PAD\")\n        return answer", "docstring": "Transforms a sentence to Numpy array, which will be the network input.\n\nArgs:\nsent: input sentence\nbucket_length: the width of the bucket\n\nReturns:\nA 3d array, answer[i][j][k] contains the index of k-th letter\nin j-th word of i-th input sentence.", "source": "juraj-google-style"}
{"code": "def nCr(n, r):\n    f = math.factorial\n    return int(((f(n) / f(r)) / f((n - r))))", "docstring": "Calculates nCr.\n\nArgs:\nn (int): total number of items.\nr (int): items to choose\n\nReturns:\nnCr.", "source": "codesearchnet"}
{"code": "def get_site_energy(self, site_index):\n        \n        if self._charged:\n            warn('Per atom energies for charged structures not supported in EwaldSummation')\n        return np.sum(self._recip[:,site_index]) + np.sum(self._real[:,site_index]) \\\n            + self._point[site_index]", "docstring": "Compute the energy for a single site in the structure\n\nArgs:\nsite_index (int): Index of site\nReturnS:\n(float) - Energy of that site", "source": "juraj-google-style"}
{"code": "def get_airports(self, country):\n    url = AIRPORT_BASE.format(country.replace(' ', '-'))\n    return self._fr24.get_airports_data(url)", "docstring": "Returns a list of all the airports\nFor a given country this returns a list of dicts, one for each airport, with information like the iata code of the airport etc\n\nArgs:\ncountry (str): The country for which the airports will be fetched\n\nExample::\n\nfrom pyflightdata import FlightData\nf=FlightData()\nf.get_airports('India')", "source": "codesearchnet"}
{"code": "def alias_inplace_add(x, i, v):\n    return _inplace_helper(x, i, v, gen_array_ops.inplace_add)", "docstring": "Applies an inplace add on input x at index i with value v. Aliases x.\n\nIf i is None, x and v must be the same shape. Computes\nx += v;\nIf i is a scalar, x has a rank 1 higher than v's. Computes\nx[i, :] += v;\nOtherwise, x and v must have the same rank. Computes\nx[i, :] += v;\n\nArgs:\nx: A Tensor.\ni: None, a scalar or a vector.\nv: A Tensor.\n\nReturns:\nReturns x.", "source": "github-repos"}
{"code": "def log_variables(variables=None):\n  \n  if variables is None:\n    variables = tf.global_variables() + tf.local_variables()\n  for row in format_variables(variables, join_lines=False):\n    tf.logging.info(row)", "docstring": "Logs variable information.\n\nThis function logs the name, shape, type, collections, and device for either\nall variables or a given iterable of variables. In the \"Device\" columns,\nthe nature of the variable (legacy or resource (for ResourceVariables)) is\nalso specified in parenthesis.\n\nArgs:\nvariables: iterable of variables; if not provided, then all variables\n(in the default graph) are logged.", "source": "juraj-google-style"}
{"code": "def to_tensorflow_dataset(evset: EventSet, timestamps: str='timestamp') -> 'tensorflow.data.Dataset':\n    tf = import_tf()\n    if len(evset.schema.indexes) != 0:\n        evset = drop_index(evset)\n    data = evset.get_arbitrary_index_data()\n    dict_data = {timestamps: data.timestamps}\n    for feature_idx, feature in enumerate(evset.schema.features):\n        dict_data[feature.name] = data.features[feature_idx]\n    return tf.data.Dataset.from_tensor_slices(dict_data)", "docstring": "Converts an [`EventSet`][temporian.EventSet] to a tensorflow Dataset.\n\nUsage example:\n```python\nevset = event_set(\ntimestamps=[1, 2, 3, 4],\nfeatures={\n\"f1\": [10, 11, 12, 13],\n\"f2\": [b\"a\", b\"b\", b\"c\", b\"d\"],\n\"label\": [0, 1, 0, 1],\n},\n)\n\ntf_dataset = tp.to_tensorflow_dataset(evset)\n\ndef extract_label(example):\nlabel = example.pop(\"label\")\nreturn example, label\ntf_dataset = tf_dataset.map(extract_label).batch(100)\n\nmodel = ... # A Keras model\nmodel.fit(tf_dataset)\n```\n\nArgs:\nevset: Input event set.\ntimestamps: Output key containing the timestamps.\n\nReturns:\nTensorFlow dataset created from EventSet.", "source": "github-repos"}
{"code": "def min_sequence_length(self, dataset_split):\n    \n    return {\n        problem.DatasetSplit.TRAIN: 8,\n        problem.DatasetSplit.EVAL: 65,\n        problem.DatasetSplit.TEST: 65\n    }[dataset_split]", "docstring": "Determine the minimum sequence length given a dataset_split.\n\nArgs:\ndataset_split: A problem.DatasetSplit.\n\nReturns:\nThe minimum length that a sequence can be for this dataset_split.", "source": "juraj-google-style"}
{"code": "def __init__(self, data_location=None):\n    \n    super(FormatterMediator, self).__init__()\n    self._data_location = data_location\n    self._language_identifier = self.DEFAULT_LANGUAGE_IDENTIFIER\n    self._lcid = self.DEFAULT_LCID\n    self._winevt_database_reader = None", "docstring": "Initializes a formatter mediator object.\n\nArgs:\ndata_location (str): path of the formatter data files.", "source": "juraj-google-style"}
{"code": "def detail_poi(self, **kwargs):\n        \n        \n        params = {\n            'language': util.language_code(kwargs.get('lang')),\n            'family': kwargs.get('family')\n        }\n\n        if kwargs.get('id'):\n            params['id'] = kwargs['id']\n\n        \n        result = self.make_request('detail_poi', {}, **params)\n\n        if not util.check_result(result):\n            return False, result.get('message', 'UNKNOWN ERROR')\n\n        \n        values = util.response_list(result, 'Data')\n        return True, [emtype.PoiDetails(**a) for a in values]", "docstring": "Obtain detailed info of a given POI.\n\nArgs:\nfamily (str): Family code of the POI (3 chars).\nlang (str): Language code (*es* or *en*).\nid (int): Optional, ID of the POI to query. Passing value -1 will\nresult in information from all POIs.\n\nReturns:\nStatus boolean and parsed response (list[PoiDetails]), or\nmessage string in case of error.", "source": "juraj-google-style"}
{"code": "def serialize(self):\n    segment = hangouts_pb2.Segment(type=self.type_, text=self.text, formatting=hangouts_pb2.Formatting(bold=self.is_bold, italic=self.is_italic, strikethrough=self.is_strikethrough, underline=self.is_underline))\n    if (self.link_target is not None):\n        segment.link_data.link_target = self.link_target\n    return segment", "docstring": "Serialize this segment to a ``Segment`` message.\n\nReturns:\n``Segment`` message.", "source": "codesearchnet"}
{"code": "def unpack_grad_tuple(gv, gpt):\n    elt_widths = [x.num_elements() for x in gpt.shapes]\n    with tf.device(gv[0][0].device):\n        with tf.name_scope('unpack'):\n            splits = tf.split(gv[0], elt_widths)\n            unpacked_gv = []\n            for (idx, s) in enumerate(splits):\n                unpacked_gv.append((tf.reshape(s, gpt.shapes[idx]), gpt.vars[idx]))\n    return unpacked_gv", "docstring": "Unpack a previously packed collection of gradient tensors.\n\nArgs:\ngv: A (grad, var) pair to be unpacked.\ngpt: A GradPackTuple describing the packing operation that produced gv.\n\nReturns:\nA list of (grad, var) pairs corresponding to the values that were\noriginally packed into gv, maybe following subsequent operations like\nreduction.", "source": "codesearchnet"}
{"code": "def days(value: Union[int, float]) -> Duration:\n    return float(value * 60 * 60 * 24)", "docstring": "Converts input value from number of days to a `Duration` in seconds.\n\nExample:\n```python\n>>> a = tp.event_set(\n...    # Dates are converted to unix timestamps\n...    timestamps=[\"2020-01-01\", \"2020-01-02\", \"2020-01-31\"],\n...    features={\"f1\": [1, 5, -5]}\n... )\n\n>>> a.moving_sum(window_length=tp.duration.days(2))\nindexes: ...\ntimestamps: ['2020-01-01T00:00:00' '2020-01-02T00:00:00'\n'2020-01-31T00:00:00']\n'f1': [ 1 6 -5]\n...\n\n```\n\nArgs:\nvalue: number of days.\n\nReturns:\nEquivalent number of seconds.", "source": "github-repos"}
{"code": "def addSearchers(self, *searchers):\n    self._searchers.extend(searchers)\n    ((debug.logger & debug.flagCompiler) and debug.logger(('current compiled MIBs location(s): %s' % ', '.join([str(x) for x in self._searchers]))))\n    return self", "docstring": "Add more transformed MIBs repositories.\n\nMibCompiler.compile will invoke each of configured searcher objects\nin order of their addition asking each if already transformed MIB\nmodule already exists and is more recent than specified.\n\nArgs:\nsearchers: searcher object(s)\n\nReturns:\nreference to itself (can be used for call chaining)", "source": "codesearchnet"}
{"code": "def create_module_graph(module_spec):\n  \n  height, width = hub.get_expected_image_size(module_spec)\n  with tf.Graph().as_default() as graph:\n    resized_input_tensor = tf.placeholder(tf.float32, [None, height, width, 3])\n    m = hub.Module(module_spec)\n    bottleneck_tensor = m(resized_input_tensor)\n    wants_quantization = any(node.op in FAKE_QUANT_OPS\n                             for node in graph.as_graph_def().node)\n  return graph, bottleneck_tensor, resized_input_tensor, wants_quantization", "docstring": "Creates a graph and loads Hub Module into it.\n\nArgs:\nmodule_spec: the hub.ModuleSpec for the image module being used.\n\nReturns:\ngraph: the tf.Graph that was created.\nbottleneck_tensor: the bottleneck values output by the module.\nresized_input_tensor: the input images, resized as expected by the module.\nwants_quantization: a boolean, whether the module has been instrumented\nwith fake quantization ops.", "source": "juraj-google-style"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    installation_value = None\n    string_values = {}\n    for registry_value in registry_key.GetValues():\n      \n      if not registry_value.name:\n        continue\n\n      if (registry_value.name == 'InstallDate' and\n          registry_value.DataIsInteger()):\n        installation_value = registry_value\n        continue\n\n      \n      if not registry_value.data or not registry_value.DataIsString():\n        continue\n\n      string_value_name = self._STRING_VALUE_NAME_STRINGS.get(\n          registry_value.name, None)\n      if not string_value_name:\n        continue\n\n      string_values[string_value_name] = registry_value.GetDataAsObject()\n\n    values_dict = {}\n    values_dict['Owner'] = string_values.get('owner', '')\n    values_dict['Product name'] = string_values.get('product_name', '')\n    values_dict['Service pack'] = string_values.get('service_pack', '')\n    values_dict['Windows Version Information'] = string_values.get(\n        'version', '')\n\n    event_data = windows_events.WindowsRegistryEventData()\n    event_data.key_path = registry_key.path\n    event_data.offset = registry_key.offset\n    event_data.regvalue = values_dict\n\n    event = time_events.DateTimeValuesEvent(\n        registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    \n    \n    if installation_value:\n      event_data = windows_events.WindowsRegistryInstallationEventData()\n      event_data.key_path = registry_key.path\n      event_data.offset = registry_key.offset\n      event_data.owner = string_values.get('owner', None)\n      event_data.product_name = string_values.get('product_name', None)\n      event_data.service_pack = string_values.get('service_pack', None)\n      event_data.version = string_values.get('version', None)\n\n      installation_time = installation_value.GetDataAsObject()\n      date_time = dfdatetime_posix_time.PosixTime(timestamp=installation_time)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_INSTALLATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def packtext(text, width=80):\n    r\n    import utool as ut\n    import textwrap\n    new_text = '\\n'.join(textwrap.wrap(text, width))\n    new_text = ut.remove_doublspaces(new_text).strip()\n    return new_text", "docstring": "r\"\"\"\nArgs:\ntext (str):\n\nCommandLine:\npython -m utool.util_str --exec-pack_paragraph --show\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_str import *  # NOQA\n>>> import utool as ut\n>>> width = 80\n>>> text = lorium_ipsum()\n>>> result = packtext(text)\n>>> print(result)", "source": "juraj-google-style"}
{"code": "def merge_and_fit(self, track, pairings):\n        \n\n        for (self_seg_index, track_seg_index, _) in pairings:\n            self_s = self.segments[self_seg_index]\n            ss_start = self_s.points[0]\n            track_s = track.segments[track_seg_index]\n            tt_start = track_s.points[0]\n            tt_end = track_s.points[-1]\n\n            d_start = ss_start.distance(tt_start)\n            d_end = ss_start.distance(tt_end)\n\n            if d_start > d_end:\n                track_s = track_s.copy()\n                track_s.points = list(reversed(track_s.points))\n\n            self_s.merge_and_fit(track_s)\n        return self", "docstring": "Merges another track with this one, ordering the points based on a\ndistance heuristic\n\nArgs:\ntrack (:obj:`Track`): Track to merge with\npairings\nReturns:\n:obj:`Segment`: self", "source": "juraj-google-style"}
{"code": "def get_metar(\n            metar: typing.Union[str, 'CustomMetar']\n    ) -> typing.Tuple[typing.Union[str, None], typing.Union['CustomMetar', None]]:\n        \n        error: typing.Optional[str] = None\n        if isinstance(metar, CustomMetar):\n            return None, metar\n\n        if isinstance(metar, str):\n            LOGGER.debug('building CustomMetar from: %s', metar)\n            if len(metar) == 4:\n                LOGGER.debug('retrieving METAR from ICAO')\n                \n                \n                \n                metar = AWC.query_icao(metar).raw_metar\n        else:\n            error = f'expected a string or or a CustomMetar object, got: {type(metar)}'\n\n        if error:\n            return error, None\n\n        try:\n            return None, CustomMetar(metar_code=metar)\n        except ParserError:\n            return f'Unable to parse METAR: {metar}', None", "docstring": "Builds a CustomMetar object from a CustomMetar object (returns it), an ICAO code or a METAR string\n\nArgs:\nmetar: CustomMetar object, ICAO string or METAR string\n\nReturns: CustomMetar object", "source": "juraj-google-style"}
{"code": "def _anonymize_table(cls, table_data, pii_fields):\n    for pii_field in pii_fields:\n        field_name = pii_field['name']\n        transformer = cls.get_class(TRANSFORMERS['categorical'])(pii_field)\n        table_data[field_name] = transformer.anonymize_column(table_data)\n    return table_data", "docstring": "Anonymize in `table_data` the fields in `pii_fields`.\n\nArgs:\ntable_data (pandas.DataFrame): Original dataframe/table.\npii_fields (list[dict]): Metadata for the fields to transform.\n\nResult:\npandas.DataFrame: Anonymized table.", "source": "codesearchnet"}
{"code": "def _embedding_lookup_for_ragged_tensor(self, inp: ragged_tensor.RaggedTensor, weight: Optional[ragged_tensor.RaggedTensor], table: tf_variables.Variable, feature: tpu_embedding_v2_utils.FeatureConfig) -> tensor.Tensor:\n    if inp.shape.rank != 2:\n        raise ValueError('Only rank 2 ragged tensor is supported, but got rank {}'.format(inp.shape.rank))\n    batch_size = inp.shape[0]\n\n    def ragged_to_dense_outside_compilation(inp, weight, batch_size, feature):\n        if weight is None:\n            weight = ragged_tensor.RaggedTensor.from_row_splits(array_ops.ones_like(inp.values, dtype=dtypes.float32), inp.row_splits)\n        if not feature.output_shape and feature.max_sequence_length > 0:\n            inp = inp.to_tensor(shape=(batch_size, feature.max_sequence_length))\n            weight = array_ops.ones_like(inp, dtype=dtypes.float32)\n        elif feature.output_shape:\n            with ops.init_scope():\n                output_batch_size = math_ops.reduce_prod(feature.output_shape).numpy()\n            if output_batch_size == batch_size:\n                inp, weight = (inp.to_tensor(), weight.to_tensor())\n            elif output_batch_size > batch_size and output_batch_size % batch_size == 0:\n                seq_length = output_batch_size \n                inp = inp.to_tensor(shape=(batch_size, seq_length))\n                weight = array_ops.ones_like(inp, dtype=dtypes.float32)\n            else:\n                raise ValueError('Output shape set in the FeatureConfig should be the factor of the input data batch size. But instead got output shape {}, input data batch size {}'.format(feature.output_shape, batch_size))\n        else:\n            inp, weight = (inp.to_tensor(), weight.to_tensor())\n        return (inp, weight)\n    inp, weight = tpu_replication.outside_compilation(ragged_to_dense_outside_compilation, inp=inp, weight=weight, batch_size=batch_size, feature=feature)\n    embeddings = embedding_ops.embedding_lookup_v2(table, inp)\n    weight = array_ops.expand_dims(weight, -1)\n    embeddings *= weight\n    if feature.output_shape:\n        with ops.init_scope():\n            output_batch_size = math_ops.reduce_prod(feature.output_shape).numpy()\n        if output_batch_size == batch_size:\n            embeddings = self._apply_combiner_to_embeddings(embeddings, weight, feature.table.combiner)\n        embeddings = array_ops.reshape(embeddings, shape=feature.output_shape + [feature.table.dim])\n    elif feature.max_sequence_length == 0:\n        embeddings = self._apply_combiner_to_embeddings(embeddings, weight, feature.table.combiner)\n    return embeddings", "docstring": "Embedding lookup for ragged tensor based on its feature config.\n\nArgs:\ninp: a single rank 2 RaggedTensor input.\nweight: None or RaggedTensor which has the same shape of the input.\ntable: a table variable.\nfeature: a feature config.\n\nReturns:\nEmbedding lookup result.\n\nRaises:\nValueError: if input ragged tensor is not rank 2 or output shape set in\nthe feature config doesn't match with the first dim size of the input.", "source": "github-repos"}
{"code": "def settings(package, reload_=False):\n    \n    global packages\n    if package not in packages or reload_:\n        from os import path\n        result = CaseConfigParser()\n        if package != \"acorn\":\n            confpath = _package_path(package)\n            _read_single(result, confpath)\n        _read_single(result, _package_path(\"acorn\"))\n        packages[package] = result\n\n    return packages[package]", "docstring": "Returns the config settings for the specified package.\n\nArgs:\npackage (str): name of the python package to get settings for.", "source": "juraj-google-style"}
{"code": "def ReadSerializedDict(cls, json_dict):\n    if json_dict:\n        json_object = cls._ConvertDictToObject(json_dict)\n        if (not isinstance(json_object, containers_interface.AttributeContainer)):\n            raise TypeError('{0:s} is not an attribute container type.'.format(type(json_object)))\n        return json_object\n    return None", "docstring": "Reads an attribute container from serialized dictionary form.\n\nArgs:\njson_dict (dict[str, object]): JSON serialized objects.\n\nReturns:\nAttributeContainer: attribute container or None.\n\nRaises:\nTypeError: if the serialized dictionary does not contain an\nAttributeContainer.", "source": "codesearchnet"}
{"code": "def _to_live_trigger_log(self, **kwargs):\n        \n        field_names = (field.name for field in TriggerLogAbstract._meta.get_fields())\n        attributes = {name: getattr(self, name) for name in field_names}\n        del attributes['id']  \n        attributes.update(kwargs)\n        return TriggerLog(**attributes)", "docstring": "Make a new, non-archived :class:`.TriggerLog` instance with duplicate data.\n\nArgs:\n**kwargs: Set as attributes of the new instance, overriding what would otherwise be\ncopied from ``self``.\n\nReturns:\nThe new (unpersisted) :class:`TriggerLog` instance.", "source": "juraj-google-style"}
{"code": "def __init__(self, step_name, transform_id=None):\n    self.step_name = step_name\n    self.transform_id = transform_id", "docstring": "Creates a new step NameContext.\n\nArgs:\nstep_name: The name of the step.", "source": "github-repos"}
{"code": "def datasets_list(self, project_id=None, max_results=0, page_token=None):\n    if (project_id is None):\n        project_id = self._project_id\n    url = (Api._ENDPOINT + (Api._DATASETS_PATH % (project_id, '')))\n    args = {}\n    if (max_results != 0):\n        args['maxResults'] = max_results\n    if (page_token is not None):\n        args['pageToken'] = page_token\n    return datalab.utils.Http.request(url, args=args, credentials=self._credentials)", "docstring": "Issues a request to list the datasets in the project.\n\nArgs:\nproject_id: the project id to use to fetch the results; use None for the default project.\nmax_results: an optional maximum number of tables to retrieve.\npage_token: an optional token to continue the retrieval.\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "codesearchnet"}
{"code": "def _get_descending_key(gettime=time.time):\n  \n  now_descending = int((_FUTURE_TIME - gettime()) * 100)\n  request_id_hash = os.environ.get(\"REQUEST_ID_HASH\")\n  if not request_id_hash:\n    request_id_hash = str(random.getrandbits(32))\n  return \"%d%s\" % (now_descending, request_id_hash)", "docstring": "Returns a key name lexically ordered by time descending.\n\nThis lets us have a key name for use with Datastore entities which returns\nrows in time descending order when it is scanned in lexically ascending order,\nallowing us to bypass index building for descending indexes.\n\nArgs:\ngettime: Used for testing.\n\nReturns:\nA string with a time descending key.", "source": "juraj-google-style"}
{"code": "def editline_with_regex(self, regex_tgtline, to_replace):\n        \n        for idx, line in enumerate(self._swp_lines):\n            mobj = re.match(regex_tgtline, line)\n\n            if mobj:\n                self._swp_lines[idx] = to_replace\n\n                return", "docstring": "find the first matched line, then replace\n\nArgs:\nregex_tgtline (str): regular expression used to match the target line\nto_replace    (str): line you wanna use to replace", "source": "juraj-google-style"}
{"code": "def emit_pid(self, name: str, pid: int) -> None:\n    event = {}\n    event['name'] = 'process_name'\n    event['ph'] = 'M'\n    event['pid'] = pid\n    event['args'] = {'name': name}\n    self._metadata.append(event)", "docstring": "Adds a process metadata event to the trace.\n\nArgs:\nname:  The process name as a string.\npid:  Identifier of the process as an integer.", "source": "github-repos"}
{"code": "def convert_reshape(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting reshape ...')\n    if (names == 'short'):\n        tf_name = ('RESH' + random_string(4))\n    elif (names == 'keep'):\n        tf_name = w_name\n    else:\n        tf_name = (w_name + str(random.random()))\n    if (len(inputs) > 1):\n        if (layers[inputs[1]][0] == (- 1)):\n            print('Cannot deduct batch size! It will be omitted, but result may be wrong.')\n        reshape = keras.layers.Reshape(layers[(inputs[1] + '_np')], name=tf_name)\n        layers[scope_name] = reshape(layers[inputs[0]])\n    elif (inputs[0] in layers):\n        reshape = keras.layers.Reshape(params['shape'][1:], name=tf_name)\n        layers[scope_name] = reshape(layers[inputs[0]])\n    else:\n        print('Skip weight matrix transpose, but result may be wrong.')", "docstring": "Convert reshape layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def visit(self, visitor, visitor_arg):\n    visitor(self, visitor_arg)\n    for t in self._inner_types():\n        if isinstance(t, TypeConstraint):\n            t.visit(visitor, visitor_arg)\n        else:\n            visitor(t, visitor_arg)", "docstring": "Visitor method to visit all inner types of a composite type.\n\nArgs:\nvisitor: A callable invoked for all nodes in the type tree comprising\na composite type. The visitor will be called with the node visited\nand the visitor argument specified here.\nvisitor_arg: Visitor callback second argument.", "source": "github-repos"}
{"code": "def _align_monomer(self, monomer, mon_vector, move_direction):\n    axis = np.cross(mon_vector, move_direction)\n    origin = monomer[self.start].coords\n    angle = get_angle(mon_vector, move_direction)\n    op = SymmOp.from_origin_axis_angle(origin, axis, angle)\n    monomer.apply_operation(op)", "docstring": "rotate the monomer so that it is aligned along the move direction\n\nArgs:\nmonomer (Molecule)\nmon_vector (numpy.array): molecule vector that starts from the\nstart atom index to the end atom index\nmove_direction (numpy.array): the direction of the polymer chain\nextension", "source": "codesearchnet"}
{"code": "def copy_submission_to_destination(self, src_filename, dst_subdir, submission_id):\n    extension = [e for e in ALLOWED_EXTENSIONS if src_filename.endswith(e)]\n    if (len(extension) != 1):\n        logging.error('Invalid submission extension: %s', src_filename)\n        return\n    dst_filename = os.path.join(self.target_dir, dst_subdir, (submission_id + extension[0]))\n    cmd = ['gsutil', 'cp', src_filename, dst_filename]\n    if (subprocess.call(cmd) != 0):\n        logging.error(\"Can't copy submission to destination\")\n    else:\n        logging.info('Submission copied to: %s', dst_filename)", "docstring": "Copies submission to target directory.\n\nArgs:\nsrc_filename: source filename of the submission\ndst_subdir: subdirectory of the target directory where submission should\nbe copied to\nsubmission_id: ID of the submission, will be used as a new\nsubmission filename (before extension)", "source": "codesearchnet"}
{"code": "def make_ndarray(tensor):\n    \n    shape = [d.size for d in tensor.tensor_shape.dim]\n    num_elements = np.prod(shape, dtype=np.int64)\n    tensor_dtype = dtypes.as_dtype(tensor.dtype)\n    dtype = tensor_dtype.as_numpy_dtype\n\n    if tensor.tensor_content:\n        return np.frombuffer(tensor.tensor_content, dtype=dtype).copy().reshape(shape)\n    elif tensor_dtype == dtypes.float16 or tensor_dtype == dtypes.bfloat16:\n        \n        \n        if len(tensor.half_val) == 1:\n            tmp = np.array(tensor.half_val[0], dtype=np.uint16)\n            tmp.dtype = tensor_dtype.as_numpy_dtype\n            return np.repeat(tmp, num_elements).reshape(shape)\n        else:\n            tmp = np.fromiter(tensor.half_val, dtype=np.uint16)\n            tmp.dtype = tensor_dtype.as_numpy_dtype\n            return tmp.reshape(shape)\n    elif tensor_dtype == dtypes.float32:\n        if len(tensor.float_val) == 1:\n            return np.repeat(\n                np.array(tensor.float_val[0], dtype=dtype), num_elements\n            ).reshape(shape)\n        else:\n            return np.fromiter(tensor.float_val, dtype=dtype).reshape(shape)\n    elif tensor_dtype == dtypes.float64:\n        if len(tensor.double_val) == 1:\n            return np.repeat(\n                np.array(tensor.double_val[0], dtype=dtype), num_elements\n            ).reshape(shape)\n        else:\n            return np.fromiter(tensor.double_val, dtype=dtype).reshape(shape)\n    elif tensor_dtype in [\n        dtypes.int32,\n        dtypes.uint8,\n        dtypes.uint16,\n        dtypes.int16,\n        dtypes.int8,\n        dtypes.qint32,\n        dtypes.quint8,\n        dtypes.qint8,\n        dtypes.qint16,\n        dtypes.quint16,\n    ]:\n        if len(tensor.int_val) == 1:\n            return np.repeat(\n                np.array(tensor.int_val[0], dtype=dtype), num_elements\n            ).reshape(shape)\n        else:\n            return np.fromiter(tensor.int_val, dtype=dtype).reshape(shape)\n    elif tensor_dtype == dtypes.int64:\n        if len(tensor.int64_val) == 1:\n            return np.repeat(\n                np.array(tensor.int64_val[0], dtype=dtype), num_elements\n            ).reshape(shape)\n        else:\n            return np.fromiter(tensor.int64_val, dtype=dtype).reshape(shape)\n    elif tensor_dtype == dtypes.string:\n        if len(tensor.string_val) == 1:\n            return np.repeat(\n                np.array(tensor.string_val[0], dtype=dtype), num_elements\n            ).reshape(shape)\n        else:\n            return np.array([x for x in tensor.string_val], dtype=dtype).reshape(shape)\n    elif tensor_dtype == dtypes.complex64:\n        it = iter(tensor.scomplex_val)\n        if len(tensor.scomplex_val) == 2:\n            return np.repeat(\n                np.array(\n                    complex(tensor.scomplex_val[0], tensor.scomplex_val[1]), dtype=dtype\n                ),\n                num_elements,\n            ).reshape(shape)\n        else:\n            return np.array(\n                [complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype\n            ).reshape(shape)\n    elif tensor_dtype == dtypes.complex128:\n        it = iter(tensor.dcomplex_val)\n        if len(tensor.dcomplex_val) == 2:\n            return np.repeat(\n                np.array(\n                    complex(tensor.dcomplex_val[0], tensor.dcomplex_val[1]), dtype=dtype\n                ),\n                num_elements,\n            ).reshape(shape)\n        else:\n            return np.array(\n                [complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype\n            ).reshape(shape)\n    elif tensor_dtype == dtypes.bool:\n        if len(tensor.bool_val) == 1:\n            return np.repeat(\n                np.array(tensor.bool_val[0], dtype=dtype), num_elements\n            ).reshape(shape)\n        else:\n            return np.fromiter(tensor.bool_val, dtype=dtype).reshape(shape)\n    else:\n        raise TypeError(\"Unsupported tensor type: %s\" % tensor.dtype)", "docstring": "Create a numpy ndarray from a tensor.\n\nCreate a numpy ndarray with the same shape and data as the tensor.\n\nArgs:\ntensor: A TensorProto.\n\nReturns:\nA numpy array with the tensor contents.\n\nRaises:\nTypeError: if tensor has unsupported type.", "source": "juraj-google-style"}
{"code": "def _get_next_task_from_raylet(self):\n    with profiling.profile('worker_idle'):\n        task = self.raylet_client.get_task()\n    ray.utils.set_cuda_visible_devices(ray.get_gpu_ids())\n    return task", "docstring": "Get the next task from the raylet.\n\nReturns:\nA task from the raylet.", "source": "codesearchnet"}
{"code": "def build_case(case, vcf_individuals=None, case_id=None, vcf_path=None, sv_individuals=None, vcf_sv_path=None, nr_variants=None, nr_sv_variants=None, profiles=None, matches=None, profile_path=None):\n    individual_positions = get_individual_positions(vcf_individuals)\n    sv_individual_positions = get_individual_positions(sv_individuals)\n    family_id = None\n    if case:\n        if (not case.affected_individuals):\n            LOG.warning('No affected individuals could be found in ped file')\n        family_id = case.family_id\n    case_id = (case_id or family_id)\n    if (case_id is None):\n        raise CaseError\n    case_obj = Case(case_id=case_id)\n    if vcf_path:\n        case_obj['vcf_path'] = vcf_path\n        case_obj['nr_variants'] = nr_variants\n    if vcf_sv_path:\n        case_obj['vcf_sv_path'] = vcf_sv_path\n        case_obj['nr_sv_variants'] = nr_sv_variants\n    if profile_path:\n        case_obj['profile_path'] = profile_path\n    ind_objs = []\n    if case:\n        if individual_positions:\n            _ind_pos = individual_positions\n        else:\n            _ind_pos = sv_individual_positions\n        for ind_id in case.individuals:\n            individual = case.individuals[ind_id]\n            try:\n                profile = (profiles[ind_id] if profiles else None)\n                similar_samples = (matches[ind_id] if matches else None)\n                ind_obj = Individual(ind_id=ind_id, case_id=case_id, ind_index=_ind_pos[ind_id], sex=individual.sex, profile=profile, similar_samples=similar_samples)\n                ind_objs.append(dict(ind_obj))\n            except KeyError:\n                raise CaseError('Ind %s in ped file does not exist in VCF', ind_id)\n    else:\n        for ind_id in individual_positions:\n            profile = (profiles[ind_id] if profiles else None)\n            similar_samples = (matches[ind_id] if matches else None)\n            ind_obj = Individual(ind_id=ind_id, case_id=case_id, ind_index=individual_positions[ind_id], profile=profile, similar_samples=similar_samples)\n            ind_objs.append(dict(ind_obj))\n    for ind_obj in ind_objs:\n        if vcf_sv_path:\n            case_obj['sv_individuals'].append(dict(ind_obj))\n            case_obj['_sv_inds'][ind_obj['ind_id']] = dict(ind_obj)\n        if vcf_path:\n            case_obj['individuals'].append(dict(ind_obj))\n            case_obj['_inds'][ind_obj['ind_id']] = dict(ind_obj)\n    return case_obj", "docstring": "Build a Case from the given information\n\nArgs:\ncase(ped_parser.Family): A family object\nvcf_individuals(list): Show the order of inds in vcf file\ncase_id(str): If another name than the one in family file should be used\nvcf_path(str)\nsv_individuals(list): Show the order of inds in vcf file\nvcf_sv_path(str)\nnr_variants(int)\nnr_sv_variants(int)\nprofiles(dict): The profiles for each sample in vcf\nmatches(dict(list)): list of similar samples for each sample in vcf.\n\nReturns:\ncase_obj(models.Case)", "source": "codesearchnet"}
{"code": "def any_sparse(classes):\n    return any((c is sparse_tensor.SparseTensor for c in nest.flatten(classes)))", "docstring": "Checks for sparse tensor.\n\nArgs:\nclasses: a structure of objects that identify the dataset item classes\n\nReturns:\n`True` if `classes` contains a sparse tensor type and `False` otherwise.", "source": "github-repos"}
{"code": "def __init__(self, unique_identifier=None, usage_limits_count=None):\n        \n        super(GetUsageAllocationRequestPayload, self).__init__(\n            enums.Tags.REQUEST_PAYLOAD\n        )\n\n        self._unique_identifier = None\n        self._usage_limits_count = None\n\n        self.unique_identifier = unique_identifier\n        self.usage_limits_count = usage_limits_count", "docstring": "Construct a GetUsageAllocation request payload struct.\n\nArgs:\nunique_identifier (string): The ID of the managed object (e.g.,\na public key) to obtain a usage allocation for. Optional,\ndefaults to None.\nusage_limits_count (int): The number of usage limits units that\nshould be reserved for the object. Optional, defaults to None.", "source": "juraj-google-style"}
{"code": "def from_string(cls, cl_function, dependencies=()):\n        \n        return_type, function_name, parameter_list, body = split_cl_function(cl_function)\n        return SimpleCLFunction(return_type, function_name, parameter_list, body, dependencies=dependencies)", "docstring": "Parse the given CL function into a SimpleCLFunction object.\n\nArgs:\ncl_function (str): the function we wish to turn into an object\ndependencies (list or tuple of CLLibrary): The list of CL libraries this function depends on\n\nReturns:\nSimpleCLFunction: the CL data type for this parameter declaration", "source": "juraj-google-style"}
{"code": "def write_int16(self, value, little_endian=True):\n    if little_endian:\n        endian = '<'\n    else:\n        endian = '>'\n    return self.pack(('%sh' % endian), value)", "docstring": "Pack the value as a signed integer and write 2 bytes to the stream.\n\nArgs:\nvalue:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint: the number of bytes written.", "source": "codesearchnet"}
{"code": "def _load_audio_list(self, path):\n        \n\n        result = {}\n\n        for entry in textfile.read_separated_lines_generator(path, separator='\\t', max_columns=4):\n            for i in range(len(entry)):\n                if entry[i] == '\\\\N':\n                    entry[i] = None\n\n            if len(entry) < 4:\n                entry.extend([None] * (4 - len(entry)))\n\n            if not self.include_empty_licence and entry[2] is None:\n                continue\n\n            if self.include_licenses is not None and entry[2] not in self.include_licenses:\n                continue\n\n            result[entry[0]] = entry[1:]\n\n        return result", "docstring": "Load and filter the audio list.\n\nArgs:\npath (str): Path to the audio list file.\n\nReturns:\ndict: Dictionary of filtered sentences (id : username, license, attribution-url)", "source": "juraj-google-style"}
{"code": "def commit_signature(vcs, user_config, signature):\n    \n    if signature not in get_staged_signatures(vcs):\n        raise NotStagedError\n    evidence_path = _get_committed_history_path(vcs)\n    committed_signatures = get_committed_signatures(vcs)\n    if signature in committed_signatures:\n        raise AlreadyCommittedError\n    committed_signatures.append(signature)\n    string = '\\n'.join(committed_signatures[-user_config['history_limit']:])\n    with open(evidence_path, 'w') as f:\n        f.write(string)\n    unstage_signature(vcs, signature)", "docstring": "Add `signature` to the list of committed signatures\n\nThe signature must already be staged\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\nuser_config (dict)\nsignature (basestring)\n\nRaises:\nNotStagedError\nAlreadyCommittedError", "source": "juraj-google-style"}
{"code": "def get(self, entity, key):\n    if entity in self._store:\n        return self._store[entity].get(str(key))\n    return None", "docstring": "Gets and item from the cache.\n\nArgs:\nentity: The entity cache to use.\nkey: The key to use to lookup the cached item.", "source": "github-repos"}
{"code": "def ParseRecord(self, parser_mediator, key, structure):\n    \n    if key not in ('logline', 'no_header_single_line'):\n      raise errors.ParseError(\n          'Unable to parse record, unknown structure: {0:s}'.format(key))\n\n    if key == 'logline':\n      self._ParseLogline(parser_mediator, structure)\n\n    elif key == 'no_header_single_line':\n      self._ParseNoHeaderSingleLine(parser_mediator, structure)", "docstring": "Parse each record structure and return an EventObject if applicable.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nkey (str): identifier of the structure of tokens.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.\n\nRaises:\nParseError: when the structure type is unknown.", "source": "juraj-google-style"}
{"code": "def _verify_docker_image_size(self, image_name):\n    shell_call(['docker', 'pull', image_name])\n    try:\n        image_size = subprocess.check_output(['docker', 'inspect', '--format={{.Size}}', image_name]).strip()\n        image_size = int(image_size)\n    except (ValueError, subprocess.CalledProcessError) as e:\n        logging.error('Failed to determine docker image size: %s', e)\n        return False\n    logging.info('Size of docker image %s is %d', image_name, image_size)\n    if (image_size > MAX_DOCKER_IMAGE_SIZE):\n        logging.error('Image size exceeds limit %d', MAX_DOCKER_IMAGE_SIZE)\n    return (image_size <= MAX_DOCKER_IMAGE_SIZE)", "docstring": "Verifies size of Docker image.\n\nArgs:\nimage_name: name of the Docker image.\n\nReturns:\nTrue if image size is within the limits, False otherwise.", "source": "codesearchnet"}
{"code": "def createURL(self, word, mode=\"phonefy\"):\n        \n        try:\n            return self.modes[mode][\"url\"].format(placeholder=urllib.pathname2url(word))\n        except:\n            if mode == \"base\":\n                if word[0] == \"/\":\n                    return self.baseURL+word[1:], word\n                else:\n                    return self.baseURL+word\n            else:\n                try:\n                    return self.url[mode].replace(\"<\"+mode+\">\", urllib.pathname2url(word))\n                except:\n                    pass\n        return None", "docstring": "Method to create the URL replacing the word in the appropriate URL.\n\nArgs:\n-----\nword: Word to be searched.\nmode: Mode to be executed.\n\nReturn:\n-------\nThe URL to be queried.", "source": "juraj-google-style"}
{"code": "def encode(self, sequence):\n        \n        sequence = super().encode(sequence)\n        sequence = self.tokenize(sequence)\n        vector = [self.stoi.get(token, self.unknown_index) for token in sequence]\n        if self.append_eos:\n            vector.append(self.eos_index)\n        return torch.tensor(vector)", "docstring": "Encodes a ``sequence``.\n\nArgs:\nsequence (str): String ``sequence`` to encode.\n\nReturns:\ntorch.Tensor: Encoding of the ``sequence``.", "source": "juraj-google-style"}
{"code": "def delete(self, vid):\n        \n        command = 'no vlan %s' % vid\n        return self.configure(command) if isvlan(vid) else False", "docstring": "Deletes a VLAN from the running configuration\n\nArgs:\nvid (str): The VLAN ID to delete\n\nReturns:\nTrue if the operation was successful otherwise False", "source": "juraj-google-style"}
{"code": "def get_device(ads, **kwargs):\n    filtered = get_devices(ads, **kwargs)\n    if len(filtered) == 1:\n        return filtered[0]\n    else:\n        serials = [ad.serial for ad in filtered]\n        raise Error('More than one device matched: %s' % serials)", "docstring": "Finds a unique AndroidDevice instance from a list that has specific\nattributes of certain values.\n\nExample:\nget_device(android_devices, label='foo', phone_number='1234567890')\nget_device(android_devices, model='angler')\n\nArgs:\nads: A list of AndroidDevice instances.\nkwargs: keyword arguments used to filter AndroidDevice instances.\n\nReturns:\nThe target AndroidDevice instance.\n\nRaises:\nError: None or more than one device is matched.", "source": "github-repos"}
{"code": "def __init__(self, in_features: int, lateral_widths: List[int], feature_size: int=256):\n    super().__init__()\n    self.stem = MaskFormerFPNConvLayer(in_features, feature_size)\n    self.layers = nn.Sequential(*[MaskFormerFPNLayer(feature_size, lateral_width) for lateral_width in lateral_widths[::-1]])", "docstring": "Feature Pyramid Network, given an input tensor and a set of feature map of different feature/spatial size, it\ncreates a list of feature maps with the same feature size.\n\nArgs:\nin_features (`int`):\nThe number of input features (channels).\nlateral_widths (`List[int]`):\nA list with the features (channels) size of each lateral connection.\nfeature_size (int, *optional*, defaults to 256):\nThe features (channels) of the resulting feature maps.", "source": "github-repos"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    self._last_charset_attribute = 'ascii'\n    self._ParseHeader(parser_mediator, file_object)\n    data_dict = {}\n    time_dict = {}\n    try:\n        for (name, value) in self._ParseAttributesGroup(file_object):\n            name = self._ATTRIBUTE_NAME_TRANSLATION.get(name, name)\n            if (name in self._DATE_TIME_VALUE_NAMES):\n                time_dict.setdefault(name, []).append(value)\n            else:\n                data_dict.setdefault(name, []).append(value)\n    except (ValueError, errors.ParseError) as exception:\n        parser_mediator.ProduceExtractionWarning('unable to parse attributes with error: {0!s}'.format(exception))\n        return\n    event_data = CupsIppEventData()\n    event_data.application = self._GetStringValue(data_dict, 'application')\n    event_data.computer_name = self._GetStringValue(data_dict, 'computer_name')\n    event_data.copies = data_dict.get('copies', [0])[0]\n    event_data.data_dict = data_dict\n    event_data.doc_type = self._GetStringValue(data_dict, 'doc_type')\n    event_data.job_id = self._GetStringValue(data_dict, 'job_id')\n    event_data.job_name = self._GetStringValue(data_dict, 'job_name')\n    event_data.user = self._GetStringValue(data_dict, 'user')\n    event_data.owner = self._GetStringValue(data_dict, 'owner')\n    event_data.printer_id = self._GetStringValue(data_dict, 'printer_id')\n    event_data.uri = self._GetStringValue(data_dict, 'uri')\n    for (name, usage) in iter(self._DATE_TIME_VALUES.items()):\n        for date_time in time_dict.get(name, []):\n            event = time_events.DateTimeValuesEvent(date_time, usage)\n            parser_mediator.ProduceEventWithEventData(event, event_data)\n    for (name, usage) in iter(self._POSIX_TIME_VALUES.items()):\n        for time_value in time_dict.get(name, []):\n            date_time = dfdatetime_posix_time.PosixTime(timestamp=time_value)\n            event = time_events.DateTimeValuesEvent(date_time, usage)\n            parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a CUPS IPP file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Args:\nCreate a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not:\nmake use of token type ids, therefore a list of zeros is returned.\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def _build_map(inputs, outputs):\n    finished_nodes = set()\n    nodes_in_progress = set()\n    nodes_in_decreasing_depth = []\n    operation_indices = {}\n    for output in tree.flatten(outputs):\n        _build_map_helper(inputs, output, finished_nodes, nodes_in_progress, nodes_in_decreasing_depth, operation_indices)\n    return (nodes_in_decreasing_depth, operation_indices)", "docstring": "Topologically sort nodes in order from inputs to outputs.\n\nIt uses a depth-first search to topologically sort nodes that appear in the\n_keras_history connectivity metadata of `outputs`.\n\nArgs:\noutputs: the output tensors whose _keras_history metadata should be\nwalked. This may be an arbitrary nested structure.\n\nReturns:\nA tuple like (ordered_nodes, operation_to_first_traversal_index)\nordered_nodes: list of nodes appearing in the keras history,\ntopologically sorted from original inputs to the `outputs`.\n(If outputs have different sets of ancestors, the inputs to one\noutput may appear after a different output).\noperation_to_first_traversal_index:\nA dict mapping operation to the traversal index in the DFS where it\nis seen. Note: if a operation is shared by several nodes, the dict\nwill onlystore the index corresponding to the *first* time the\noperation seen.", "source": "github-repos"}
{"code": "def is_spontaneous(gene, custom_id=None):\n    \n\n    spont = re.compile(\"[Ss](_|)0001\")\n    if spont.match(gene.id):\n        return True\n    elif gene.id == custom_id:\n        return True\n    else:\n        return False", "docstring": "Input a COBRApy Gene object and check if the ID matches a spontaneous ID regex.\n\nArgs:\ngene (Gene): COBRApy Gene\ncustom_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``\n\nReturns:\nbool: If gene ID matches spontaneous ID", "source": "juraj-google-style"}
{"code": "def backups(self):\n    if (not self.__backups):\n        self.__backups = Backups(self.__connection)\n    return self.__backups", "docstring": "Gets the Backup API client.\n\nReturns:\nBackups:", "source": "codesearchnet"}
{"code": "def Pack(cls, obj, version):\n    if isinstance(obj, (datetime.datetime, datetime.date)):\n        return cls.AdManagerDateTimePacker(obj, version)\n    return obj", "docstring": "Pack the given object using Ad Manager-specific logic.\n\nArgs:\nobj: an object to be packed for SOAP using Ad Manager-specific logic, if\napplicable.\nversion: the version of the current API, e.g. 'v201811'\n\nReturns:\nThe given object packed with Ad Manager-specific logic for SOAP,\nif applicable. Otherwise, returns the given object unmodified.", "source": "codesearchnet"}
{"code": "def write_config_files(self, host, hyperparameters, input_data_config):\n        \n        config_path = os.path.join(self.container_root, host, 'input', 'config')\n\n        resource_config = {\n            'current_host': host,\n            'hosts': self.hosts\n        }\n\n        json_input_data_config = {}\n        for c in input_data_config:\n            channel_name = c['ChannelName']\n            json_input_data_config[channel_name] = {\n                'TrainingInputMode': 'File'\n            }\n            if 'ContentType' in c:\n                json_input_data_config[channel_name]['ContentType'] = c['ContentType']\n\n        _write_json_file(os.path.join(config_path, 'hyperparameters.json'), hyperparameters)\n        _write_json_file(os.path.join(config_path, 'resourceconfig.json'), resource_config)\n        _write_json_file(os.path.join(config_path, 'inputdataconfig.json'), json_input_data_config)", "docstring": "Write the config files for the training containers.\n\nThis method writes the hyperparameters, resources and input data configuration files.\n\nArgs:\nhost (str): Host to write the configuration for\nhyperparameters (dict): Hyperparameters for training.\ninput_data_config (dict): Training input channels to be used for training.\n\nReturns: None", "source": "juraj-google-style"}
{"code": "def get_pluggable_module_information(self, id_or_uri):\n    uri = (self._client.build_uri(id_or_uri) + '/pluggableModuleInformation')\n    return self._client.get(uri)", "docstring": "Gets all the pluggable module information.\n\nArgs:\nid_or_uri: Can be either the interconnect id or uri.\n\nReturns:\narray: dicts of the pluggable module information.", "source": "codesearchnet"}
{"code": "def _get_manager(cluster_info, host, executor_id):\n    for node in cluster_info:\n        if ((node['host'] == host) and (node['executor_id'] == executor_id)):\n            addr = node['addr']\n            authkey = node['authkey']\n            TFSparkNode.mgr = TFManager.connect(addr, authkey)\n            break\n    if (TFSparkNode.mgr is None):\n        msg = ((('No TFManager found on this node, please ensure that:\\n' + '1. Spark num_executors matches TensorFlow cluster_size\\n') + '2. Spark cores/tasks per executor is 1.\\n') + '3. Spark dynamic allocation is disabled.')\n        raise Exception(msg)\n    logging.info('Connected to TFSparkNode.mgr on {0}, executor={1}, state={2}'.format(host, executor_id, str(TFSparkNode.mgr.get('state'))))\n    return TFSparkNode.mgr", "docstring": "Returns this executor's \"singleton\" instance of the multiprocessing.Manager, reconnecting per python-worker if needed.\n\nArgs:\n:cluster_info: cluster node reservations\n:host: host IP address\n:executor_id: unique id per executor (created during initial call to run())\n\nReturns:\nTFManager instance for this executor/python-worker", "source": "codesearchnet"}
{"code": "def get_prep_value(self, value: LocalizedValue) -> dict:\n    if isinstance(value, dict):\n        value = LocalizedValue(value)\n    if ((not isinstance(value, LocalizedValue)) and value):\n        value = None\n    if value:\n        cleaned_value = self.clean(value)\n        self.validate(cleaned_value)\n    else:\n        cleaned_value = value\n    return super(LocalizedField, self).get_prep_value((cleaned_value.__dict__ if cleaned_value else None))", "docstring": "Turns the specified value into something the database\ncan store.\n\nIf an illegal value (non-LocalizedValue instance) is\nspecified, we'll treat it as an empty :see:LocalizedValue\ninstance, on which the validation will fail.\n\nDictonaries are converted into :see:LocalizedValue instances.\n\nArguments:\nvalue:\nThe :see:LocalizedValue instance to serialize\ninto a data type that the database can understand.\n\nReturns:\nA dictionary containing a key for every language,\nextracted from the specified value.", "source": "codesearchnet"}
{"code": "def __init__(self, input_fn, input_workers, input_contexts, strategy):\n    assert isinstance(input_workers, input_lib.InputWorkers)\n    if input_workers.num_workers != len(input_contexts):\n        raise ValueError('Number of input workers (%d) is not same as number of input_contexts (%d)' % (input_workers.num_workers, len(input_contexts)))\n    iterators = []\n    for i, ctx in enumerate(input_contexts):\n        worker = input_workers.worker_devices[i]\n        with ops.device(worker):\n            result = input_fn(ctx)\n            devices = input_workers.compute_devices_for_worker(i)\n            if isinstance(result, data_types.DatasetV2):\n                iterator = _SingleWorkerDatasetIterator(result, worker, devices)\n            elif callable(result):\n                iterator = _SingleWorkerCallableIterator(result, worker, devices)\n            else:\n                raise ValueError('input_fn must return a tf.data.Dataset or a callable.')\n            iterators.append(iterator)\n    super(InputFunctionIterator, self).__init__(input_workers, iterators, strategy, cardinality=cardinality_lib.UNKNOWN, enable_get_next_as_optional=False)\n    self._enable_get_next_as_optional = False", "docstring": "Make an iterator for input provided via an input function.\n\nCurrently implements PER_WORKER mode, in which the `input_fn` is called\nonce on each worker.\n\nTODO(priyag): Add other replication modes.\n\nArgs:\ninput_fn: Input function that returns a `tf.data.Dataset` object.\ninput_workers: an `InputWorkers` object.\ninput_contexts: A list of `InputContext` instances to be passed to call(s)\nto `input_fn`. Length and order should match worker order in\n`worker_device_pairs`.\nstrategy: a `tf.distribute.Strategy` object, used to run all-reduce to\nhandle last partial batch.", "source": "github-repos"}
{"code": "def _right_pad(x, final_rank):\n    padded_shape = tf.concat([tf.shape(input=x), tf.ones((final_rank - tf.rank(x)), dtype=tf.int32)], axis=0)\n    static_padded_shape = None\n    if (x.shape.is_fully_defined() and isinstance(final_rank, int)):\n        static_padded_shape = x.shape.as_list()\n        extra_dims = (final_rank - len(static_padded_shape))\n        static_padded_shape.extend(([1] * extra_dims))\n    padded_x = tf.reshape(x, (static_padded_shape or padded_shape))\n    return padded_x", "docstring": "Pads the shape of x to the right to be of rank final_rank.\n\nExpands the dims of `x` to the right such that its rank is equal to\nfinal_rank. For example, if `x` is of shape [1, 5, 7, 2] and `final_rank` is\n7, we return padded_x, which is of shape [1, 5, 7, 2, 1, 1, 1].\n\nArgs:\nx: The tensor whose shape is to be padded.\nfinal_rank: Scalar int32 `Tensor` or Python `int`. The desired rank of x.\n\nReturns:\npadded_x: A tensor of rank final_rank.", "source": "codesearchnet"}
{"code": "def from_bytes(value):\n    result = (value.decode('utf-8') if isinstance(value, six.binary_type) else value)\n    if isinstance(result, six.text_type):\n        return result\n    else:\n        raise ValueError('{0!r} could not be converted to unicode'.format(value))", "docstring": "Converts bytes to a string value, if necessary.\n\nArgs:\nvalue (Union[str, bytes]): The value to be converted.\n\nReturns:\nstr: The original value converted to unicode (if bytes) or as passed in\nif it started out as unicode.\n\nRaises:\nValueError: If the value could not be converted to unicode.", "source": "codesearchnet"}
{"code": "def set_servo_speed(self, goalspeed, led):\n        \n        if goalspeed>0 :\n            goalspeed_msb = (int(goalspeed)& 0xFF00) >> 8\n            goalspeed_lsb = int(goalspeed) & 0xff\n        elif goalspeed<0 :\n            goalspeed_msb = 64+(255- ((int(goalspeed)& 0xFF00) >> 8))\n            goalspeed_lsb = (abs(goalspeed) & 0xff)\n\n        \n        data = []\n        data.append(0x0C)\n        data.append(self.servoid)\n        data.append(I_JOG_REQ)\n        data.append(goalspeed_lsb)\n        data.append(goalspeed_msb)\n        data.append(0x02|led)\n        data.append(self.servoid)\n        data.append(0x00)\n        send_data(data)", "docstring": "Set the Herkulex in continuous rotation mode\n\nArgs:\n\ngoalspeed (int): the speed , range -1023 to 1023\nled (int): the LED color\n0x00 LED off\n0x04 GREEN\n0x08 BLUE\n0x10 RED", "source": "juraj-google-style"}
{"code": "def GetEntries(self, parser_mediator, match=None, **unused_kwargs):\n    device_cache = match.get('DeviceCache', {})\n    for (device, value) in iter(device_cache.items()):\n        name = value.get('Name', '')\n        if name:\n            name = ''.join(('Name:', name))\n        event_data = plist_event.PlistTimeEventData()\n        event_data.root = '/DeviceCache'\n        datetime_value = value.get('LastInquiryUpdate', None)\n        if datetime_value:\n            event_data.desc = ' '.join(filter(None, ('Bluetooth Discovery', name)))\n            event_data.key = '{0:s}/LastInquiryUpdate'.format(device)\n            event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n            parser_mediator.ProduceEventWithEventData(event, event_data)\n            if (device in match.get('PairedDevices', [])):\n                event_data.desc = 'Paired:True {0:s}'.format(name)\n                event_data.key = device\n                event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n                parser_mediator.ProduceEventWithEventData(event, event_data)\n        datetime_value = value.get('LastNameUpdate', None)\n        if datetime_value:\n            event_data.desc = ' '.join(filter(None, ('Device Name Set', name)))\n            event_data.key = '{0:s}/LastNameUpdate'.format(device)\n            event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n            parser_mediator.ProduceEventWithEventData(event, event_data)\n        datetime_value = value.get('LastServicesUpdate', None)\n        if datetime_value:\n            event_data.desc = ' '.join(filter(None, ('Services Updated', name)))\n            event_data.key = '{0:s}/LastServicesUpdate'.format(device)\n            event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n            parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts relevant BT entries.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmatch (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.", "source": "codesearchnet"}
{"code": "def reactions_add(self, *, name: str, **kwargs) -> SlackResponse:\n    kwargs.update({'name': name})\n    return self.api_call('reactions.add', json=kwargs)", "docstring": "Adds a reaction to an item.\n\nArgs:\nname (str): Reaction (emoji) name. e.g. 'thumbsup'\nchannel (str): Channel where the message to add reaction to was posted.\ne.g. 'C1234567890'\ntimestamp (str): Timestamp of the message to add reaction to. e.g. '1234567890.123456'", "source": "codesearchnet"}
{"code": "def unsplat(f: Callable[([Iterable], A)]) -> Callable[(..., A)]:\n\n    def unsplatted(*args):\n        return f(args)\n    return unsplatted", "docstring": "Convert a function taking a single iterable argument into a function taking multiple arguments.\n\nArgs:\nf: Any function taking a single iterable argument\n\nReturns:\nA function that accepts multiple arguments. Each argument of this function is passed as an element of an\niterable to ``f``.\n\nExample:\n$ def f(a):\n$     return a[0] + a[1] + a[2]\n$\n$ f([1, 2, 3])  # 6\n$ g = unsplat(f)\n$ g(1, 2, 3)  # 6", "source": "codesearchnet"}
{"code": "def get_full_filename_by_suffixes(dir_src, suffixes):\n        \n        \n        file_names = FileClass.get_filename_by_suffixes(dir_src, suffixes)\n        if file_names is None:\n            return None\n        return list(dir_src + os.sep + name for name in file_names)", "docstring": "get full file names with the given suffixes in the given directory\n\nArgs:\ndir_src: directory path\nsuffixes: wanted suffixes\n\nReturns:\nfull file names with the given suffixes as list", "source": "juraj-google-style"}
{"code": "def sign(check_request):\n    if (not isinstance(check_request, sc_messages.CheckRequest)):\n        raise ValueError(u'Invalid request')\n    op = check_request.operation\n    if ((op is None) or (op.operationName is None) or (op.consumerId is None)):\n        logging.error(u'Bad %s: not initialized => not signed', check_request)\n        raise ValueError(u'check request must be initialized with an operation')\n    md5 = hashlib.md5()\n    md5.update(op.operationName.encode('utf-8'))\n    md5.update(b'\\x00')\n    md5.update(op.consumerId.encode('utf-8'))\n    if op.labels:\n        signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels))\n    for value_set in op.metricValueSets:\n        md5.update(b'\\x00')\n        md5.update(value_set.metricName.encode('utf-8'))\n        for mv in value_set.metricValues:\n            metric_value.update_hash(md5, mv)\n    md5.update(b'\\x00')\n    if op.quotaProperties:\n        md5.update(repr(op.quotaProperties).encode('utf-8'))\n    md5.update(b'\\x00')\n    return md5.digest()", "docstring": "Obtains a signature for an operation in a `CheckRequest`\n\nArgs:\nop (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an\noperation used in a `CheckRequest`\n\nReturns:\nstring: a secure hash generated from the operation", "source": "codesearchnet"}
{"code": "def LineWrap(text, omit_sgr=False):\n  \n\n  def _SplitWithSgr(text_line):\n    \n    token_list = sgr_re.split(text_line)\n    text_line_list = []\n    line_length = 0\n    for (index, token) in enumerate(token_list):\n      \n      if token is '':\n        continue\n\n      if sgr_re.match(token):\n        \n        text_line_list.append(token)\n        text_line = ''.join(token_list[index +1:])\n      else:\n        if line_length + len(token) <= width:\n          \n          text_line_list.append(token)\n          line_length += len(token)\n          text_line = ''.join(token_list[index +1:])\n        else:\n          \n          \n          text_line_list.append(token[:width - line_length])\n          text_line = token[width - line_length:]\n          text_line += ''.join(token_list[index +1:])\n          break\n\n    return (''.join(text_line_list), text_line)\n\n  \n  \n  (_, width) = TerminalSize()\n  text = str(text)\n  text_multiline = []\n  for text_line in text.splitlines():\n    \n    while ((omit_sgr and (len(StripAnsiText(text_line)) > width)) or\n           (len(text_line) > width)):\n      \n      if not omit_sgr:\n        text_multiline.append(text_line[:width])\n        text_line = text_line[width:]\n      else:\n        (multiline_line, text_line) = _SplitWithSgr(text_line)\n        text_multiline.append(multiline_line)\n    if text_line:\n      text_multiline.append(text_line)\n  return '\\n'.join(text_multiline)", "docstring": "Break line to fit screen width, factoring in ANSI/SGR escape sequences.\n\nArgs:\ntext: String to line wrap.\nomit_sgr: Bool, to omit counting ANSI/SGR sequences in the length.\n\nReturns:\nText with additional line wraps inserted for lines grater than the width.", "source": "juraj-google-style"}
{"code": "async def _on_state_update(self, state_update):\n        \n        \n        notification_type = state_update.WhichOneof('state_update')\n\n        \n        \n        \n        if state_update.HasField('conversation'):\n            try:\n                await self._handle_conversation_delta(\n                    state_update.conversation\n                )\n            except exceptions.NetworkError:\n                logger.warning(\n                    'Discarding %s for %s: Failed to fetch conversation',\n                    notification_type.replace('_', ' '),\n                    state_update.conversation.conversation_id.id\n                )\n                return\n\n        if notification_type == 'typing_notification':\n            await self._handle_set_typing_notification(\n                state_update.typing_notification\n            )\n        elif notification_type == 'watermark_notification':\n            await self._handle_watermark_notification(\n                state_update.watermark_notification\n            )\n        elif notification_type == 'event_notification':\n            await self._on_event(\n                state_update.event_notification.event\n            )", "docstring": "Receive a StateUpdate and fan out to Conversations.\n\nArgs:\nstate_update: hangouts_pb2.StateUpdate instance", "source": "juraj-google-style"}
{"code": "def cmd_ssh_user(tar_aminame, inst_name):\n    \n    if tar_aminame == \"Unknown\":\n        tar_aminame = inst_name\n    \n    userlu = {\"ubunt\": \"ubuntu\", \"debia\": \"admin\", \"fedor\": \"root\",\n              \"cento\": \"centos\", \"openb\": \"root\"}\n    usertemp = ['name'] + [value for key, value in list(userlu.items())\n                           if key in tar_aminame.lower()]\n    usertemp = dict(zip(usertemp[::2], usertemp[1::2]))\n    username = usertemp.get('name', 'ec2-user')\n    debg.dprint(\"loginuser Calculated: \", username)\n    return username", "docstring": "Calculate instance login-username based on image-name.\n\nArgs:\ntar_aminame (str): name of the image instance created with.\ninst_name (str): name of the instance.\nReturns:\nusername (str): name for ssh based on AMI-name.", "source": "juraj-google-style"}
{"code": "def get_users(self, capacity=None):\n    users = list()\n    usersdicts = self.data.get('users')\n    if (usersdicts is not None):\n        for userdata in usersdicts:\n            if ((capacity is not None) and (userdata['capacity'] != capacity)):\n                continue\n            id = userdata.get('id')\n            if (id is None):\n                id = userdata['name']\n            user = hdx.data.user.User.read_from_hdx(id, configuration=self.configuration)\n            user['capacity'] = userdata['capacity']\n            users.append(user)\n    return users", "docstring": "Returns the organization's users.\n\nArgs:\ncapacity (Optional[str]): Filter by capacity eg. member, admin. Defaults to None.\nReturns:\nList[User]: Organization's users.", "source": "codesearchnet"}
{"code": "def to_text_diagram(\n            self,\n            *,\n            use_unicode_characters: bool = True,\n            transpose: bool = False,\n            precision: Optional[int] = 3,\n            qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT) -> str:\n        \n        diagram = self.to_text_diagram_drawer(\n            use_unicode_characters=use_unicode_characters,\n            precision=precision,\n            qubit_order=qubit_order,\n            transpose=transpose)\n\n        return diagram.render(\n            crossing_char=(None\n                           if use_unicode_characters\n                           else ('-' if transpose else '|')),\n            horizontal_spacing=1 if transpose else 3,\n            use_unicode_characters=use_unicode_characters)", "docstring": "Returns text containing a diagram describing the circuit.\n\nArgs:\nuse_unicode_characters: Determines if unicode characters are\nallowed (as opposed to ascii-only diagrams).\ntranspose: Arranges qubit wires vertically instead of horizontally.\nprecision: Number of digits to display in text diagram\nqubit_order: Determines how qubits are ordered in the diagram.\n\nReturns:\nThe text diagram.", "source": "juraj-google-style"}
{"code": "def _PrintStorageInformationAsText(self, storage_reader):\n    table_view = views.ViewsFactory.GetTableView(self._views_format_type, title='Plaso Storage Information')\n    table_view.AddRow(['Filename', os.path.basename(self._storage_file_path)])\n    table_view.AddRow(['Format version', storage_reader.format_version])\n    table_view.AddRow(['Serialization format', storage_reader.serialization_format])\n    table_view.Write(self._output_writer)\n    if (storage_reader.storage_type == definitions.STORAGE_TYPE_SESSION):\n        self._PrintSessionsOverview(storage_reader)\n        self._PrintSessionsDetails(storage_reader)\n        storage_counters = self._CalculateStorageCounters(storage_reader)\n        if ('parsers' not in storage_counters):\n            self._output_writer.Write('Unable to determine number of events generated per parser.\\n')\n        else:\n            self._PrintParsersCounter(storage_counters['parsers'])\n        if ('analysis_reports' not in storage_counters):\n            self._output_writer.Write('Unable to determine number of reports generated per plugin.\\n')\n        else:\n            self._PrintAnalysisReportCounter(storage_counters['analysis_reports'])\n        if ('event_labels' not in storage_counters):\n            self._output_writer.Write('Unable to determine number of event tags generated per label.\\n')\n        else:\n            self._PrintEventLabelsCounter(storage_counters['event_labels'])\n        self._PrintWarningCounters(storage_counters)\n        if self._verbose:\n            self._PrintWarningsDetails(storage_reader)\n        self._PrintAnalysisReportsDetails(storage_reader)\n    elif (storage_reader.storage_type == definitions.STORAGE_TYPE_TASK):\n        self._PrintTasksInformation(storage_reader)", "docstring": "Prints information about the store as human-readable text.\n\nArgs:\nstorage_reader (StorageReader): storage reader.", "source": "codesearchnet"}
{"code": "def PyParseIntCast(string, location, tokens):\n    for (index, token) in enumerate(tokens):\n        try:\n            tokens[index] = int(token)\n        except ValueError:\n            logger.error('Unable to cast [{0:s}] to an int, setting to 0'.format(token))\n            tokens[index] = 0\n    for key in tokens.keys():\n        try:\n            tokens[key] = int(tokens[key], 10)\n        except ValueError:\n            logger.error('Unable to cast [{0:s} = {1:d}] to an int, setting to 0'.format(key, tokens[key]))\n            tokens[key] = 0", "docstring": "Return an integer from a string.\n\nThis is a pyparsing callback method that converts the matched\nstring into an integer.\n\nThe method modifies the content of the tokens list and converts\nthem all to an integer value.\n\nArgs:\nstring (str): original string.\nlocation (int): location in the string where the match was made.\ntokens (list[str]): extracted tokens, where the string to be converted\nis stored.", "source": "codesearchnet"}
{"code": "def create_header(cls, request_id=None):\n    header = {'msgid': bkserial.make_id(), 'msgtype': cls.msgtype}\n    if (request_id is not None):\n        header['reqid'] = request_id\n    return header", "docstring": "Return a message header fragment dict.\n\nArgs:\nrequest_id (str or None) :\nMessage ID of the message this message replies to\n\nReturns:\ndict : a message header", "source": "codesearchnet"}
{"code": "def recipe_bigquery_storage(config, auth_read, bucket, auth_write, path, dataset, table, schema):\n    bigquery(config, {'auth': auth_read, 'from': {'bucket': bucket, 'path': path}, 'to': {'auth': auth_write, 'dataset': dataset, 'table': table}, 'schema': schema})", "docstring": "Move using bucket and path prefix.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nbucket (string) - Google cloud bucket.\nauth_write (authentication) - Credentials used for writing data.\npath (string) - Path prefix to read from, no * required.\ndataset (string) - Existing BigQuery dataset.\ntable (string) - Table to create from this query.\nschema (json) - Schema provided in JSON list format or empty list.", "source": "github-repos"}
{"code": "def MeshViewers(\n        shape=(1, 1), titlebar=\"Mesh Viewers\", keepalive=False,\n        window_width=1280, window_height=960\n):\n    \n    if not test_for_opengl():\n        return Dummy()\n    mv = MeshViewerLocal(\n        shape=shape, titlebar=titlebar, uid=None, keepalive=keepalive,\n        window_width=window_width, window_height=window_height\n    )\n    return mv.get_subwindows()", "docstring": "Allows subplot-style inspection of primitives in multiple subwindows.\n\nArgs:\nshape: a tuple indicating the number of vertical and horizontal windows requested\n\nReturns: a list of lists of MeshViewer objects: one per window requested.", "source": "juraj-google-style"}
{"code": "def standard_to_absl(level):\n    if (not isinstance(level, int)):\n        raise TypeError('Expect an int level, found {}'.format(type(level)))\n    if (level < 0):\n        level = 0\n    if (level < STANDARD_DEBUG):\n        return ((STANDARD_DEBUG - level) + 1)\n    elif (level < STANDARD_INFO):\n        return ABSL_DEBUG\n    elif (level < STANDARD_WARNING):\n        return ABSL_INFO\n    elif (level < STANDARD_ERROR):\n        return ABSL_WARNING\n    elif (level < STANDARD_CRITICAL):\n        return ABSL_ERROR\n    else:\n        return ABSL_FATAL", "docstring": "Converts an integer level from the standard value to the absl value.\n\nArgs:\nlevel: int, a Python standard logging level.\n\nRaises:\nTypeError: Raised when level is not an integer.\n\nReturns:\nThe corresponding integer level for use in absl logging.", "source": "codesearchnet"}
{"code": "def get_help_datapacks(module_name, server_prefix):\n    _dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n    module_dir = '{}/../{}'.format(_dir, module_name, '_help.json')\n    if os.path.isdir(module_dir):\n        module_help_path = '{}/{}'.format(module_dir, '_help.json')\n        if os.path.isfile(module_help_path):\n            return helptools.get_help_datapacks(module_help_path, server_prefix)\n        else:\n            return [('Help', '{} does not have a help.json file'.format(module_name), False)]\n    else:\n        return [('Help', 'No module found called {}'.format(module_name), False)]", "docstring": "Get the help datapacks for a module\n\nArgs:\nmodule_name (str): The module to get help data for\nserver_prefix (str): The command prefix for this server\n\nReturns:\ndatapacks (list): The help datapacks for the module", "source": "codesearchnet"}
{"code": "def export(self, filename, offset=0, length=None):\n        \n        self.__validate_offset(filename=filename, offset=offset, length=length)\n\n        with open(filename, 'w') as f:\n            if length is None:\n                length = len(self.data) - offset\n\n            if offset > 0:\n                output = self.data[offset:length]\n            else:\n                output = self.data[:length]\n\n            f.write(output)", "docstring": "Exports byte array to specified destination\n\nArgs:\nfilename (str): destination to output file\noffset (int): byte offset (default: 0)", "source": "juraj-google-style"}
{"code": "def _compute_diff(left: pg.DNA, right: pg.DNA) -> Tuple[int, int, int]:\n    if left.value == right.value:\n        assert len(left.children) == len(right.children)\n        n = 0 if left.value is None else 1\n        w = 0\n        d = 0\n        for c1, c2 in zip(left.children, right.children):\n            cn, cw, cd = _compute_diff(c1, c2)\n            n += cn\n            w += cw\n            d += cd\n        return (n, w, d)\n    else:\n        nl = len(left.to_numbers())\n        nr = len(right.to_numbers())\n        n = max(nl, nr)\n        return (n, 1, n - 1)", "docstring": "Compute different positions in two DNAs.\n\nArgs:\nleft: the first DNA to compare.\nright: the right DNA to compare.\n\nReturns:\nA tuple of (N, W, D). 'N' is the total number of components in the larger\nDNA, 'W' is the number of matching genes with different values, and 'D' is\nthe number of disjoint\ngenes. PyGlove DNAs have no notion of 'E' (i.e. excess genes from the\noriginal paper), so we exclude them.", "source": "github-repos"}
{"code": "def vgg11_bn(pretrained=False, **kwargs):\n    \n    if pretrained:\n        kwargs['init_weights'] = False\n    model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)\n    if pretrained:\n        model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']))\n    return model", "docstring": "VGG 11-layer model (configuration \"A\") with batch normalization\n\nArgs:\npretrained (bool): If True, returns a model pre-trained on ImageNet", "source": "juraj-google-style"}
{"code": "def refresh(self, refresh_binary=True):\n\n\t\t\n\n\t\tupdated_self = self.repo.get_resource(self.uri)\n\n\t\t\n\t\tif not isinstance(self, type(updated_self)):\n\t\t\traise Exception('Instantiated %s, but repository reports this resource is %s' % (type(updated_self), type(self)) )\n\n\t\tif updated_self:\n\n\t\t\t\n\t\t\tself.status_code = updated_self.status_code\n\t\t\tself.rdf.data = updated_self.rdf.data\n\t\t\tself.headers = updated_self.headers\n\t\t\tself.exists = updated_self.exists\n\n\t\t\t\n\t\t\tif type(self) != NonRDFSource:\n\t\t\t\tself._parse_graph()\n\n\t\t\t\n\t\t\tself.versions = SimpleNamespace()\n\n\t\t\t\n\t\t\tif type(updated_self) == NonRDFSource and refresh_binary:\n\t\t\t\tself.binary.refresh(updated_self)\n\n\t\t\t\n\t\t\tif hasattr(self,'_post_refresh'):\n\t\t\t\tself._post_refresh()\n\n\t\t\t\n\t\t\tdel(updated_self)\n\n\t\telse:\n\t\t\tlogger.debug('resource %s not found, dumping values')\n\t\t\tself._empty_resource_attributes()", "docstring": "Performs GET request and refreshes RDF information for resource.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def get_data_node(self, path: DataPath) -> Optional[DataNode]:\n    addr = self.schema_data.path2route(path)\n    node = self.schema\n    for p in addr:\n        node = node.get_data_child(*p)\n        if (node is None):\n            return None\n    return node", "docstring": "Return the data node addressed by a data path.\n\nArgs:\npath: Data path.\n\nReturns:\nData node if found in the schema, or ``None``.\n\nRaises:\nInvalidSchemaPath: If the schema path is invalid.", "source": "codesearchnet"}
{"code": "def convert_to_numpy(x):\n    if any_symbolic_tensors((x,)):\n        return np.array(x)\n    return backend.convert_to_numpy(x)", "docstring": "Convert a tensor to a NumPy array.\n\nArgs:\nx: A tensor.\n\nReturns:\nA NumPy array.", "source": "github-repos"}
{"code": "def _process_arguments(arguments):\n    if (arguments is None):\n        return ''\n    result = ''\n    for (key, value) in arguments.items():\n        if (not key.startswith('bokeh-')):\n            result += '&{}={}'.format(quote_plus(str(key)), quote_plus(str(value)))\n    return result", "docstring": "Return user-supplied HTML arguments to add to a Bokeh server URL.\n\nArgs:\narguments (dict[str, object]) :\nKey/value pairs to add to the URL\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def serialize_many_sparse_v2(sp_input, out_type=dtypes.string, name=None):\n    sp_input = _convert_to_sparse_tensor(sp_input)\n    return gen_sparse_ops.serialize_many_sparse(sp_input.indices, sp_input.values, sp_input.dense_shape, name=name, out_type=out_type)", "docstring": "Serialize `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor`.\n\nThe `SparseTensor` must have rank `R` greater than 1, and the first dimension\nis treated as the minibatch dimension.  Elements of the `SparseTensor`\nmust be sorted in increasing order of this first dimension.  The serialized\n`SparseTensor` objects going into each row of the output `Tensor` will have\nrank `R-1`.\n\nThe minibatch size `N` is extracted from `sparse_shape[0]`.\n\nArgs:\nsp_input: The input rank `R` `SparseTensor`.\nout_type: The `dtype` to use for serialization.\nname: A name prefix for the returned tensors (optional).\n\nReturns:\nA matrix (2-D `Tensor`) with `N` rows and `3` columns. Each column\nrepresents serialized `SparseTensor`'s indices, values, and shape\n(respectively).\n\nRaises:\nTypeError: If `sp_input` is not a `SparseTensor`.", "source": "github-repos"}
{"code": "def __frontend_limit_rules_descriptor(self, api_info):\n    if (not api_info.frontend_limits.rules):\n        return None\n    rules = []\n    for rule in api_info.frontend_limits.rules:\n        descriptor = {}\n        for (propname, descname) in (('match', 'match'), ('qps', 'qps'), ('user_qps', 'userQps'), ('daily', 'daily'), ('analytics_id', 'analyticsId')):\n            if (getattr(rule, propname) is not None):\n                descriptor[descname] = getattr(rule, propname)\n        if descriptor:\n            rules.append(descriptor)\n    return rules", "docstring": "Builds a frontend limit rules descriptor from API info.\n\nArgs:\napi_info: An _ApiInfo object.\n\nReturns:\nA list of dictionaries with frontend limit rules information.", "source": "codesearchnet"}
{"code": "def __init__(self, loc, scale, validate_args=False, allow_nan_stats=True, name='Laplace'):\n    parameters = dict(locals())\n    with ops.name_scope(name, values=[loc, scale]) as name:\n        with ops.control_dependencies([check_ops.assert_positive(scale)] if validate_args else []):\n            self._loc = array_ops.identity(loc, name='loc')\n            self._scale = array_ops.identity(scale, name='scale')\n            check_ops.assert_same_float_dtype([self._loc, self._scale])\n        super(Laplace, self).__init__(dtype=self._loc.dtype, reparameterization_type=distribution.FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._loc, self._scale], name=name)", "docstring": "Construct Laplace distribution with parameters `loc` and `scale`.\n\nThe parameters `loc` and `scale` must be shaped in a way that supports\nbroadcasting (e.g., `loc / scale` is a valid operation).\n\nArgs:\nloc: Floating point tensor which characterizes the location (center)\nof the distribution.\nscale: Positive floating point tensor which characterizes the spread of\nthe distribution.\nvalidate_args: Python `bool`, default `False`. When `True` distribution\nparameters are checked for validity despite possibly degrading runtime\nperformance. When `False` invalid inputs may silently render incorrect\noutputs.\nallow_nan_stats: Python `bool`, default `True`. When `True`,\nstatistics (e.g., mean, mode, variance) use the value \"`NaN`\" to\nindicate the result is undefined. When `False`, an exception is raised\nif one or more of the statistic's batch members are undefined.\nname: Python `str` name prefixed to Ops created by this class.\n\nRaises:\nTypeError: if `loc` and `scale` are of different dtype.", "source": "github-repos"}
{"code": "def get_filetypes_info(editor_quote='`', flag_leaf=True):\n    NONE_REPL = ''\n    import f311\n    data = []\n    for attr in f311.classes_file(flag_leaf):\n        description = a99.get_obj_doc0(attr)\n        def_ = (NONE_REPL if (attr.default_filename is None) else attr.default_filename)\n        ee = attr.editors\n        if (ee is None):\n            ee = NONE_REPL\n        else:\n            ee = ', '.join(['{0}{1}{0}'.format(editor_quote, x, editor_quote) for x in ee])\n        data.append({'description': description, 'default_filename': def_, 'classname': attr.__name__, 'editors': ee, 'class': attr, 'txtbin': ('text' if attr.flag_txt else 'binary')})\n    data.sort(key=(lambda x: x['description']))\n    return data", "docstring": "Reports available data types\n\nArgs:\neditor_quote: character to enclose the name of the editor script between.\nflag_leaf: see tabulate_filetypes_rest()\n\nReturns:\nlist: list of FileTypeInfo", "source": "codesearchnet"}
{"code": "def cancel_id(cls, id):\n        \n        conn = Qubole.agent()\n        data = {\"status\": \"kill\"}\n        return conn.put(cls.element_path(id), data)", "docstring": "Cancels command denoted by this id\n\nArgs:\n`id`: command id", "source": "juraj-google-style"}
{"code": "def _Open(self, path_spec, mode='rb'):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    file_object = resolver.Resolver.OpenFileObject(\n        path_spec.parent, resolver_context=self._resolver_context)\n\n    cpio_archive_file = cpio.CPIOArchiveFile()\n    try:\n      cpio_archive_file.Open(file_object)\n    except:\n      file_object.close()\n      raise\n\n    self._file_object = file_object\n    self._cpio_archive_file = cpio_archive_file", "docstring": "Opens the file system defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nmode (Optional[str]): file access mode. The default is 'rb' which\nrepresents read-only binary.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file system could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def find_resistance(record):\n    \n    for feature in record.features:\n        labels = set(feature.qualifiers.get(\"label\", []))\n        cassettes = labels.intersection(_ANTIBIOTICS)\n        if len(cassettes) > 1:\n            raise RuntimeError(\"multiple resistance cassettes detected\")\n        elif len(cassettes) == 1:\n            return _ANTIBIOTICS.get(cassettes.pop())\n    raise RuntimeError(\"could not find the resistance of '{}'\".format(record.id))", "docstring": "Infer the antibiotics resistance of the given record.\n\nArguments:\nrecord (`~Bio.SeqRecord.SeqRecord`): an annotated sequence.\n\nRaises:\nRuntimeError: when there's not exactly one resistance cassette.", "source": "juraj-google-style"}
{"code": "def read_from_bigquery(*, table: Optional[str]=None, query: Optional[str]=None, row_restriction: Optional[str]=None, fields: Optional[Iterable[str]]=None):\n    if query is None:\n        assert table is not None\n    else:\n        assert table is None and row_restriction is None and (fields is None)\n    return ReadFromBigQuery(query=query, table=table, row_restriction=row_restriction, selected_fields=fields, method='DIRECT_READ', output_type='BEAM_ROW')", "docstring": "Reads data from BigQuery.\n\nExactly one of table or query must be set.\nIf query is set, neither row_restriction nor fields should be set.\n\nArgs:\ntable (str): The table to read from, specified as `DATASET.TABLE`\nor `PROJECT:DATASET.TABLE`.\nquery (str): A query to be used instead of the table argument.\nrow_restriction (str): Optional SQL text filtering statement, similar to a\nWHERE clause in a query. Aggregates are not supported. Restricted to a\nmaximum length for 1 MB.\nselected_fields (list[str]): Optional List of names of the fields in the\ntable that should be read. If empty, all fields will be read. If the\nspecified field is a nested field, all the sub-fields in the field will be\nselected. The output field order is unrelated to the order of fields\ngiven here.", "source": "github-repos"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    display_name = parser_mediator.GetDisplayName()\n    self.ParseFileLNKFile(parser_mediator, file_object, display_name)", "docstring": "Parses a Windows Shortcut (LNK) file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.", "source": "juraj-google-style"}
{"code": "def add(self, *value):\n    flattenedValueList = list(flatten(value))\n    return self._add(flattenedValueList, self.value)", "docstring": "convert value and add to self.value\n\nSubclass must overwrite this method.\nSubclass are responsible of creating whatever single instance it need from its ``add(*value)`` and call ``_add()`` to add them to ``self.value``\n\nArgs:\n*value: the value to be added", "source": "codesearchnet"}
{"code": "def _UploadChunk(self, chunk):\n    \n    blob = _CompressedDataBlob(chunk)\n\n    self._action.ChargeBytesToSession(len(chunk.data))\n    self._action.SendReply(blob, session_id=self._TRANSFER_STORE_SESSION_ID)\n\n    return rdf_client_fs.BlobImageChunkDescriptor(\n        digest=hashlib.sha256(chunk.data).digest(),\n        offset=chunk.offset,\n        length=len(chunk.data))", "docstring": "Uploads a single chunk to the transfer store flow.\n\nArgs:\nchunk: A chunk to upload.\n\nReturns:\nA `BlobImageChunkDescriptor` object.", "source": "juraj-google-style"}
{"code": "def set_syslog_server(server=None, type=\"primary\"):\n    \n\n    if not server:\n        raise salt.exceptions.CommandExecutionError(\"The SYSLOG server must be specified.\")\n\n    if type == \"primary\":\n        dn = \"sys/svc-ext/syslog/client-primary\"\n        inconfig = .format(server)\n    elif type == \"secondary\":\n        dn = \"sys/svc-ext/syslog/client-secondary\"\n        inconfig = .format(server)\n    else:\n        raise salt.exceptions.CommandExecutionError(\"The SYSLOG type must be either primary or secondary.\")\n\n    ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False)\n\n    return ret", "docstring": "Set the SYSLOG server on the host.\n\nArgs:\nserver(str): The hostname or IP address of the SYSLOG server.\n\ntype(str): Specifies the type of SYSLOG server. This can either be primary (default) or secondary.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' cimc.set_syslog_server foo.bar.com\n\nsalt '*' cimc.set_syslog_server foo.bar.com primary\n\nsalt '*' cimc.set_syslog_server foo.bar.com secondary", "source": "juraj-google-style"}
{"code": "def _build_http_client(cls, session: AppSession):\n    stream_factory = functools.partial(HTTPStream, ignore_length=session.args.ignore_length, keep_alive=session.args.http_keep_alive)\n    return session.factory.new('HTTPClient', connection_pool=session.factory['ConnectionPool'], stream_factory=stream_factory)", "docstring": "Create the HTTP client.\n\nReturns:\nClient: An instance of :class:`.http.Client`.", "source": "codesearchnet"}
{"code": "def counter(urn: str, labels: Optional[Dict[str, str]]=None, process_wide: bool=False) -> UserMetrics.DelegatingCounter:\n    return UserMetrics.DelegatingCounter(MetricName(namespace=None, name=None, urn=urn, labels=labels), process_wide=process_wide)", "docstring": "Obtains or creates a Counter metric.\n\nArgs:\nnamespace: A class or string that gives the namespace to a metric\nname: A string that gives a unique name to a metric\nurn: URN to populate on a MonitoringInfo, when sending to RunnerHarness.\nlabels: Labels to populate on a MonitoringInfo\nprocess_wide: Whether or not the metric is specific to the current bundle\nor should be calculated for the entire process.\n\nReturns:\nA Counter object.", "source": "github-repos"}
{"code": "def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n    hidden_states = hidden_states.view(self.num_experts, -1, self.hidden_size)\n    gate_up = torch.bmm(hidden_states, self.gate_up_proj)\n    gate, up = gate_up.chunk(2, dim=-1)\n    next_states = torch.bmm(up * self.act_fn(gate), self.down_proj)\n    next_states = next_states.view(-1, self.hidden_size)\n    return next_states", "docstring": "This should really not be run on a single machine, as we are reaching compute bound:\n- the inputs are expected to be \"sorted\" per expert already.\n- the weights are viewed with another dim, to match num_expert, 1, shape * num_tokens, shape\n\nArgs:\nhidden_states (torch.Tensor): (batch_size * token_num, hidden_size)\nselected_experts (torch.Tensor): (batch_size * token_num, top_k)\nrouting_weights (torch.Tensor): (batch_size * token_num, top_k)\nReturns:\ntorch.Tensor", "source": "github-repos"}
{"code": "def _sym_inferred(self, key: str, **kwargs):\n    if key not in self._sym_attributes:\n        raise AttributeError(key)\n    v = pg_utils.contextual.get_scoped_value(self._contextual_overrides, key)\n    if v is not None:\n        return v.value\n    override = pg_utils.contextual.get_contextual_override(key)\n    if override and override.override_attrs:\n        return override.value\n    return super()._sym_inferred(key, context_override=override, **kwargs)", "docstring": "Override to allow attribute to access scoped value.\n\nArgs:\nkey: attribute name.\n**kwargs: Optional keyword arguments for value inference.\n\nReturns:\nThe value of the symbolic attribute. If not available, returns the\ndefault value.\n\nRaises:\nAttributeError: If the attribute does not exist or contextual attribute\nis not ready.", "source": "github-repos"}
{"code": "def list_container_services(access_token, subscription_id, resource_group):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourcegroups/', resource_group,\n                        '/providers/Microsoft.ContainerService/ContainerServices',\n                        '?api-version=', ACS_API])\n    return do_get(endpoint, access_token)", "docstring": "List the container services in a resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\n\nReturns:\nHTTP response. JSON model.", "source": "juraj-google-style"}
{"code": "def load_manual_sequence(self, seq, ident=None, write_fasta_file=False, outdir=None, set_as_representative=False, force_rewrite=False):\n    if write_fasta_file:\n        if (not outdir):\n            outdir = self.sequence_dir\n            if (not outdir):\n                raise ValueError('Output directory must be specified')\n        outfile = op.join(outdir, '{}.faa'.format(ident))\n    else:\n        outfile = None\n    if (isinstance(seq, str) or isinstance(seq, Seq)):\n        if (not ident):\n            raise ValueError('ID must be specified if sequence is a string or Seq object')\n        manual_sequence = SeqProp(id=ident, seq=seq)\n    else:\n        if (not ident):\n            ident = seq.id\n        else:\n            seq.id = ident\n        manual_sequence = SeqProp(id=ident, seq=seq, name=seq.name, description=seq.description)\n    if write_fasta_file:\n        manual_sequence.write_fasta_file(outfile=outfile, force_rerun=force_rewrite)\n    self.sequences.append(manual_sequence)\n    if set_as_representative:\n        self.representative_sequence = manual_sequence\n    return self.sequences.get_by_id(ident)", "docstring": "Load a manual sequence given as a string and optionally set it as the representative sequence.\nAlso store it in the sequences attribute.\n\nArgs:\nseq (str, Seq, SeqRecord): Sequence string, Biopython Seq or SeqRecord object\nident (str): Optional identifier for the sequence, required if seq is a string. Also will override existing\nIDs in Seq or SeqRecord objects if set.\nwrite_fasta_file (bool): If this sequence should be written out to a FASTA file\noutdir (str): Path to output directory\nset_as_representative (bool): If this sequence should be set as the representative one\nforce_rewrite (bool): If the FASTA file should be overwritten if it already exists\n\nReturns:\nSeqProp: Sequence that was loaded into the ``sequences`` attribute", "source": "codesearchnet"}
{"code": "def is_finite(val_1, val_2=None):\n    val_1_finite = (tf.math.is_finite(val_1.f) & tf.math.is_finite(val_1.df))\n    if (val_2 is not None):\n        return ((val_1_finite & tf.math.is_finite(val_2.f)) & tf.math.is_finite(val_2.df))\n    return val_1_finite", "docstring": "Checks if the supplied values are finite.\n\nArgs:\nval_1: A namedtuple instance with the function value and derivative,\nas returned e.g. by value_and_gradients_function evaluations.\nval_2: (Optional) A namedtuple instance with the function value and\nderivative, as returned e.g. by value_and_gradients_function evaluations.\n\nReturns:\nis_finite: Scalar boolean `Tensor` indicating whether the function value\nand the derivative in `val_1` (and optionally in `val_2`) are all finite.", "source": "codesearchnet"}
{"code": "def load_variable(ckpt_dir_or_file, name):\n    if name.endswith(':0'):\n        name = name[:-2]\n    reader = load_checkpoint(ckpt_dir_or_file)\n    return reader.get_tensor(name)", "docstring": "Returns the tensor value of the given variable in the checkpoint.\n\nWhen the variable name is unknown, you can use `tf.train.list_variables` to\ninspect all the variable names.\n\nExample usage:\n\n```python\nimport tensorflow as tf\na = tf.Variable(1.0)\nb = tf.Variable(2.0)\nckpt = tf.train.Checkpoint(var_list={'a': a, 'b': b})\nckpt_path = ckpt.save('tmp-ckpt')\nvar= tf.train.load_variable(\nckpt_path, 'var_list/a/.ATTRIBUTES/VARIABLE_VALUE')\nprint(var)  # 1.0\n```\n\nArgs:\nckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.\nname: Name of the variable to return.\n\nReturns:\nA numpy `ndarray` with a copy of the value of this variable.", "source": "github-repos"}
{"code": "def send(self, data_to_send):\n        \n        request_payload = json.dumps([ a.write() for a in data_to_send ])\n\n        request = HTTPClient.Request(self._service_endpoint_uri, bytearray(request_payload, 'utf-8'), { 'Accept': 'application/json', 'Content-Type' : 'application/json; charset=utf-8' })\n        try:\n            response = HTTPClient.urlopen(request, timeout=self._timeout)\n            status_code = response.getcode()\n            if 200 <= status_code < 300:\n                return\n        except HTTPError as e:\n            if e.getcode() == 400:\n                return\n        except Exception as e:\n            pass\n\n        \n        for data in data_to_send:\n            self._queue.put(data)", "docstring": "Immediately sends the data passed in to :func:`service_endpoint_uri`. If the service request fails, the\npassed in items are pushed back to the :func:`queue`.\n\nArgs:\ndata_to_send (Array): an array of :class:`contracts.Envelope` objects to send to the service.", "source": "juraj-google-style"}
{"code": "def get_text_config(self, decoder=False) -> 'PretrainedConfig':\n    decoder_possible_text_config_names = ('decoder', 'generator', 'text_config')\n    encoder_possible_text_config_names = ('text_encoder',)\n    if decoder:\n        possible_text_config_names = decoder_possible_text_config_names\n    else:\n        possible_text_config_names = encoder_possible_text_config_names + decoder_possible_text_config_names\n    valid_text_config_names = []\n    for text_config_name in possible_text_config_names:\n        if hasattr(self, text_config_name):\n            text_config = getattr(self, text_config_name, None)\n            if text_config is not None:\n                valid_text_config_names += [text_config_name]\n    if len(valid_text_config_names) > 1:\n        raise ValueError(f'Multiple valid text configs were found in the model config: {valid_text_config_names}. In this case, using `get_text_config()` would be ambiguous. Please specify the desied text config directly.')\n    elif len(valid_text_config_names) == 1:\n        config_to_return = getattr(self, valid_text_config_names[0])\n    else:\n        config_to_return = self\n    return config_to_return", "docstring": "Returns the config that is meant to be used with text IO. On most models, it is the original config instance\nitself. On specific composite models, it is under a set of valid names.\n\nArgs:\ndecoder (`Optional[bool]`, *optional*, defaults to `False`):\nIf set to `True`, then only search for decoder config names.", "source": "github-repos"}
{"code": "def create_list(self, list_json):\n    return trolly.list.List(trello_client=self, list_id=list_json['id'], name=list_json['name'], data=list_json)", "docstring": "Create List object from JSON object\n\nReturns:\nList: The list from the given `list_json`.", "source": "codesearchnet"}
{"code": "def view_edit(name=None):\n    response.set_header('Cache-control', 'no-cache')\n    response.set_header('Pragma', 'no-cache')\n    if (name is None):\n        return template('edit', type='edit', name=name, extended_name=None, is_repo=check_repo(), history=[], gitref=None, today=datetime.datetime.now().strftime('%Y%m%d'), content='')\n    else:\n        files = glob.glob('{0}.rst'.format(name))\n        if (len(files) > 0):\n            file_handle = open(files[0], 'r')\n            return template('edit', type='edit', name=name, extended_name=None, is_repo=check_repo(), history=[], gitref=None, today=datetime.datetime.now().strftime('%Y%m%d'), content=file_handle.read())\n        else:\n            return abort(404)", "docstring": "Edit or creates a new page.\n\n.. note:: this is a bottle view\n\nif no page name is given, creates a new page.\n\nKeyword Arguments:\n:name: (str) -- name of the page (OPTIONAL)\n\nReturns:\nbottle response object", "source": "codesearchnet"}
{"code": "def normalize_expression(self, expression_parts):\n        \n        \n        expression_parts[3] = expression_parts[3].replace(\"?\", \"*\")\n        expression_parts[5] = expression_parts[5].replace(\"?\", \"*\")\n\n        \n        if expression_parts[0].startswith(\"0/\"):\n            expression_parts[0] = expression_parts[\n                0].replace(\"0/\", \"*/\")  \n\n        if expression_parts[1].startswith(\"0/\"):\n            expression_parts[1] = expression_parts[\n                1].replace(\"0/\", \"*/\")  \n\n        if expression_parts[2].startswith(\"0/\"):\n            expression_parts[2] = expression_parts[\n                2].replace(\"0/\", \"*/\")  \n\n        if expression_parts[3].startswith(\"1/\"):\n            expression_parts[3] = expression_parts[3].replace(\"1/\", \"*/\")  \n\n        if expression_parts[4].startswith(\"1/\"):\n            expression_parts[4] = expression_parts[\n                4].replace(\"1/\", \"*/\")  \n\n        if expression_parts[5].startswith(\"1/\"):\n            expression_parts[5] = expression_parts[5].replace(\"1/\", \"*/\")  \n\n        if expression_parts[6].startswith(\"1/\"):\n            expression_parts[6] = expression_parts[6].replace(\"1/\", \"*/\")\n\n        \n        if self._options.day_of_week_start_index_zero is False:\n            expression_parts[5] = self.decrease_days_of_week(expression_parts[5])\n\n        if expression_parts[3] == \"?\":\n            expression_parts[3] = \"*\"\n\n        \n        for day_number in self._cron_days:\n            expression_parts[5] = expression_parts[5].upper().replace(self._cron_days[day_number], str(day_number))\n\n        \n        for month_number in self._cron_months:\n            expression_parts[4] = expression_parts[4].upper().replace(\n                self._cron_months[month_number], str(month_number))\n\n        \n        if expression_parts[0] == \"0\":\n            expression_parts[0] = ''\n\n        \n        length = len(expression_parts)\n        for i in range(length):\n\n            \n            if expression_parts[i] == \"*/1\":\n                expression_parts[i] = \"*\"\n\n            \n\n            if \"/\" in expression_parts[i] and any(exp in expression_parts[i] for exp in ['*', '-', ',']) is False:\n                choices = {\n                    4: \"12\",\n                    5: \"6\",\n                    6: \"9999\"\n                }\n\n                step_range_through = choices.get(i)\n\n                if step_range_through is not None:\n                    parts = expression_parts[i].split('/')\n                    expression_parts[i] = \"{0}-{1}/{2}\".format(parts[0], step_range_through, parts[1])", "docstring": "Converts cron expression components into consistent, predictable formats.\nArgs:\nexpression_parts: A 7 part string array, one part for each component of the cron expression\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def execute(command, cwd=os.path.curdir, **options):\n    \n    process = subprocess.Popen(shlex.split(command), cwd=cwd, **options)\n    stdout, stderr = process.communicate() \n    return process, stdout, stderr", "docstring": "Run the system command with optional options.\n\nArgs:\n* command: system command.\n* cwd: current working directory.\n* verbose: direct options for :func:`subprocess.Popen`.\n\nReturns:\nOpened process, standard output & error.", "source": "juraj-google-style"}
{"code": "async def upload_artifacts(context, files):\n\n    def to_upload_future(target_path):\n        path = os.path.join(context.config['artifact_dir'], target_path)\n        (content_type, content_encoding) = compress_artifact_if_supported(path)\n        return asyncio.ensure_future(retry_create_artifact(context, path, target_path=target_path, content_type=content_type, content_encoding=content_encoding))\n    tasks = list(map(to_upload_future, files))\n    (await raise_future_exceptions(tasks))", "docstring": "Compress and upload the requested files from ``artifact_dir``, preserving relative paths.\n\nCompression only occurs with files known to be supported.\n\nThis function expects the directory structure in ``artifact_dir`` to remain\nthe same.  So if we want the files in ``public/...``, create an\n``artifact_dir/public`` and put the files in there.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\nfiles (list of str): files that should be uploaded as artifacts\n\nRaises:\nException: any exceptions the tasks raise.", "source": "codesearchnet"}
{"code": "def add_vtep(self, name, vtep, vlan=None):\n        \n        if not vlan:\n            cmd = 'vxlan flood vtep add {}'.format(vtep)\n        else:\n            cmd = 'vxlan vlan {} flood vtep add {}'.format(vlan, vtep)\n        return self.configure_interface(name, cmd)", "docstring": "Adds a new VTEP endpoint to the global or local flood list\n\nEosVersion:\n4.13.7M\n\nArgs:\nname (str): The name of the interface to configure\nvtep (str): The IP address of the remote VTEP endpoint to add\nvlan (str): The VLAN ID associated with this VTEP.  If the VLAN\nkeyword is used, then the VTEP is configured as a local flood\nendpoing\n\nReturns:\nTrue if the command completes successfully", "source": "juraj-google-style"}
{"code": "def _document_path(self):\n    if (self._document_path_internal is None):\n        if (self._client is None):\n            raise ValueError('A document reference requires a `client`.')\n        self._document_path_internal = _get_document_path(self._client, self._path)\n    return self._document_path_internal", "docstring": "Create and cache the full path for this document.\n\nOf the form:\n\n``projects/{project_id}/databases/{database_id}/...\ndocuments/{document_path}``\n\nReturns:\nstr: The full document path.\n\nRaises:\nValueError: If the current document reference has no ``client``.", "source": "codesearchnet"}
{"code": "def get(self, resource):\n        \n        return self.service.get(\n            resource, self.url_prefix, self.auth, self.session,\n            self.session_send_opts)", "docstring": "Get attributes of the data model object named by the given resource.\n\nArgs:\nresource (intern.resource.boss.BossResource): resource.name as well as any parents must be identified to succeed.\n\nReturns:\n(intern.resource.boss.BossResource): Returns resource of type requested on success.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def make_tables(grammar, precedence):\n    ACTION = {}\n    GOTO = {}\n    labels = {}\n\n    def get_label(closure):\n        if (closure not in labels):\n            labels[closure] = len(labels)\n        return labels[closure]\n\n    def resolve_shift_reduce(lookahead, s_action, r_action):\n        (s_assoc, s_level) = precedence[lookahead]\n        (r_assoc, r_level) = precedence[r_action[1]]\n        if (s_level < r_level):\n            return r_action\n        elif ((s_level == r_level) and (r_assoc == LEFT)):\n            return r_action\n        else:\n            return s_action\n    (initial, closures, goto) = grammar.closures()\n    for closure in closures:\n        label = get_label(closure)\n        for rule in closure:\n            (new_action, lookahead) = (None, rule.lookahead)\n            if (not rule.at_end):\n                symbol = rule.rhs[rule.pos]\n                is_terminal = (symbol in grammar.terminals)\n                has_goto = (symbol in goto[closure])\n                if (is_terminal and has_goto):\n                    next_state = get_label(goto[closure][symbol])\n                    (new_action, lookahead) = (('shift', next_state), symbol)\n            elif ((rule.production == grammar.start) and rule.at_end):\n                new_action = ('accept',)\n            elif rule.at_end:\n                new_action = ('reduce', rule.production)\n            if (new_action is None):\n                continue\n            prev_action = ACTION.get((label, lookahead))\n            if ((prev_action is None) or (prev_action == new_action)):\n                ACTION[(label, lookahead)] = new_action\n            else:\n                types = (prev_action[0], new_action[0])\n                if (types == ('shift', 'reduce')):\n                    chosen = resolve_shift_reduce(lookahead, prev_action, new_action)\n                elif (types == ('reduce', 'shift')):\n                    chosen = resolve_shift_reduce(lookahead, new_action, prev_action)\n                else:\n                    raise TableConflictError(prev_action, new_action)\n                ACTION[(label, lookahead)] = chosen\n        for symbol in grammar.nonterminals:\n            if (symbol in goto[closure]):\n                GOTO[(label, symbol)] = get_label(goto[closure][symbol])\n    return (get_label(initial), ACTION, GOTO)", "docstring": "Generates the ACTION and GOTO tables for the grammar.\n\nReturns:\naction - dict[state][lookahead] = (action, ...)\ngoto - dict[state][just_reduced] = new_state", "source": "codesearchnet"}
{"code": "def VerifyStructure(self, parser_mediator, line):\n    \n    try:\n      structure = self._LOG_LINE.parseString(line)\n    except pyparsing.ParseException:\n      logger.debug('Not a Sophos Anti-Virus log file')\n      return False\n\n    \n    if ' ' not in (line[8], line[15]):\n      logger.debug('Not a Sophos Anti-Virus log file')\n      return False\n\n    try:\n      dfdatetime_time_elements.TimeElements(\n          time_elements_tuple=structure.date_time)\n    except ValueError:\n      logger.debug((\n          'Not a Sophos Anti-Virus log file, invalid date and time: '\n          '{0!s}').format(structure.date_time))\n      return False\n\n    return True", "docstring": "Verify that this file is a Sophos Anti-Virus log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfVFS.\nline (str): line from a text file.\n\nReturns:\nbool: True if the line is in the expected format, False if not.", "source": "juraj-google-style"}
{"code": "def from_concrete_function(concrete_fn, specialized_flat_specs: Optional[List[tensor_spec.TensorSpec]]=None):\n    context.ensure_initialized()\n    fn_name = concrete_fn.name\n    filtered_flat_specs = specialized_flat_specs or list(nest.flatten(concrete_fn.structured_input_signature))\n    if not all((s.shape.is_fully_defined() for s in filtered_flat_specs)):\n        raise ValueError(f'Only support static input shape but got inputs = {concrete_fn.inputs}')\n\n    def compiler_ir_generator(stage='hlo', device_name=None, platform_name=None):\n        \n        if device_name is not None:\n            if platform_name is not None:\n                raise ValueError('device_name and platform_name cannot be provided at the same time.')\n            warnings.warn('device_name is being deprecated. Use platform_name.')\n        device_name = maybe_get_device_name(device_name)\n        res_bytes = context.context().get_compiler_ir(device_name=device_name, platform_name=platform_name, function_name=fn_name, flat_args=filtered_flat_specs, captured_inputs=concrete_fn.captured_inputs, stage=stage)\n        if stage in ('stablehlo_serialized', 'hlo_serialized', 'optimized_hlo_serialized', 'optimized_hlo_proto_serialized'):\n            return res_bytes\n        else:\n            return res_bytes.decode('utf-8')\n    return compiler_ir_generator", "docstring": "Generate the Compiler Ir from tf concrete function with TensorSpec.\n\nArgs:\nconcrete_fn: returned by using get_concrete_function.\nspecialized_flat_specs: specialized flat tf.TensorSpecs for function args.\n\nReturns:\nFunction callable that generate the HLO text.\n\nRaises:\nValueError: if concrete_fn is not \"compilable\" without concrete\ninputs.", "source": "github-repos"}
{"code": "def artifact(self, counter, stage, job, stage_counter=1):\n        \n        return Artifact(self.server, self.name, counter, stage, job, stage_counter)", "docstring": "Helper to instantiate an :class:`gocd.api.artifact.Artifact` object\n\nArgs:\ncounter (int): The pipeline counter to get the artifact for\nstage: Stage name\njob: Job name\nstage_counter: Defaults to 1\n\nReturns:\nArtifact: :class:`gocd.api.artifact.Artifact` object", "source": "juraj-google-style"}
{"code": "def _GetUnifiedDiff(before, after, filename='code'):\n    before = before.splitlines()\n    after = after.splitlines()\n    return '\\n'.join(difflib.unified_diff(before, after, filename, filename, '(original)', '(reformatted)', lineterm='')) + '\\n'", "docstring": "Get a unified diff of the changes.\n\nArguments:\nbefore: (unicode) The original source code.\nafter: (unicode) The reformatted source code.\nfilename: (unicode) The code's filename.\n\nReturns:\nThe unified diff text.", "source": "github-repos"}
{"code": "def flatten(array: _ArrayT, pattern: str) -> tuple[_ArrayT, _Shape]:\n    array, (batch_shape,) = einops.pack([array], pattern.replace('...', '*'))\n    return (array, tuple(batch_shape))", "docstring": "Flatten an array along custom dimensions.\n\nUses `einops` syntax.\n\n```python\nflat_x, batch_shape = enp.flatten(x, '... h w c')\ny = enp.unflatten(y, batch_shape, '... h w c')\n```\n\n* `x.shape == (h, w, c)`       -> `flat_x.shape == (1, h, w, c)`\n* `x.shape == (b, h, w, c)`    -> `flat_x.shape == (b, h, w, c)`\n* `x.shape == (b, n, h, w, c)` -> `flat_x.shape == (b * n, h, w, c)`\n\nArgs:\narray: Array to flatten.\npattern: Einops pattern to flatten the array.\n\nReturns:\nTuple of (flattened array, batch shape).", "source": "github-repos"}
{"code": "def _update_flags(compiler_flags, remove_flags=()):\n    \n    for flag in GFORTRAN_SHARED_FLAGS:\n        if flag not in compiler_flags:\n            compiler_flags.append(flag)\n    if DEBUG_ENV in os.environ:\n        to_add = GFORTRAN_DEBUG_FLAGS\n        to_remove = GFORTRAN_OPTIMIZE_FLAGS\n    else:\n        to_add = GFORTRAN_OPTIMIZE_FLAGS\n        if os.environ.get(WHEEL_ENV) is None:\n            to_add += (GFORTRAN_NATIVE_FLAG,)\n        to_remove = GFORTRAN_DEBUG_FLAGS\n    for flag in to_add:\n        if flag not in compiler_flags:\n            compiler_flags.append(flag)\n    return [\n        flag\n        for flag in compiler_flags\n        if not (flag in to_remove or flag in remove_flags)\n    ]", "docstring": "Update a given set of compiler flags.\n\nArgs:\ncompiler_flags (List[str]): Existing flags associated with a compiler.\nremove_flags (Optional[Container[str]]): A container of flags to remove\nthat will override any of the defaults.\n\nReturns:\nList[str]: The modified list (i.e. some flags added and some removed).", "source": "juraj-google-style"}
{"code": "def target_code_to_name(code):\n    \n    TARGET_NAMES = {v: k for k, v in TARGET_CODES.items()}\n    return TARGET_NAMES[code]", "docstring": "Converts an int target code to a target name\n\nSince self.TARGET_CODES is a 1:1 mapping, perform a reverse lookup\nto get the more readable name.\n\nArgs:\ncode: Value from self.TARGET_CODES\n\nReturns:\nString target name corresponding to the given code.", "source": "juraj-google-style"}
{"code": "def delete_customer(self, customer_id):\n    return self.client._delete((self.url + 'customers/{}'.format(customer_id)), headers=self.get_headers())", "docstring": "Removes a user from the system.\n\nArgs:\ncustomer_id: Identifier of the client to be deleted.\n\nReturns:", "source": "codesearchnet"}
{"code": "def _convert_schemas(mapping, schemas):\n    schemas = deepcopy(schemas)\n    for schema in schemas:\n        for fk in schema.get('foreignKeys', []):\n            resource = fk['reference']['resource']\n            if (resource != 'self'):\n                if (resource not in mapping):\n                    message = 'Not resource \"%s\" for foreign key \"%s\"'\n                    message = (message % (resource, fk))\n                    raise ValueError(message)\n                fk['reference']['resource'] = mapping[resource]\n    return schemas", "docstring": "Convert schemas to be compatible with storage schemas.\n\nForeign keys related operations.\n\nArgs:\nmapping (dict): mapping between resource name and table name\nschemas (list): schemas\n\nRaises:\nValueError: if there is no resource\nfor some foreign key in given mapping\n\nReturns:\nlist: converted schemas", "source": "codesearchnet"}
{"code": "def subscribe(self, subject, callback, queue=''):\n        \n        s = Subscription(\n            sid=self._next_sid,\n            subject=subject,\n            queue=queue,\n            callback=callback,\n            connetion=self\n        )\n\n        self._subscriptions[s.sid] = s\n        self._send('SUB %s %s %d' % (s.subject, s.queue, s.sid))\n        self._next_sid += 1\n\n        return s", "docstring": "Subscribe will express interest in the given subject. The subject can\nhave wildcards (partial:*, full:>). Messages will be delivered to the\nassociated callback.\n\nArgs:\nsubject (string): a string with the subject\ncallback (function): callback to be called", "source": "juraj-google-style"}
{"code": "def to_dict(cls):\n    return dict(((item.name, item.number) for item in iter(cls)))", "docstring": "Make dictionary version of enumerated class.\n\nDictionary created this way can be used with def_num.\n\nReturns:\nA dict (name) -> number", "source": "codesearchnet"}
{"code": "def __call__(self, *args: Union[str, 'Image.Image', List['Image.Image'], List[str]], **kwargs: Any) -> List[Any]:\n    return super().__call__(*args, **kwargs)", "docstring": "Extract the features of the input(s).\n\nArgs:\nimages (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):\nThe pipeline handles three types of images:\n\n- A string containing a http link pointing to an image\n- A string containing a local path to an image\n- An image loaded in PIL directly\n\nThe pipeline accepts either a single image or a batch of images, which must then be passed as a string.\nImages in a batch must all be in the same format: all as http links, all as local paths, or all as PIL\nimages.\ntimeout (`float`, *optional*, defaults to None):\nThe maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and\nthe call may block forever.\nReturn:\nA nested list of `float`: The features computed by the model.", "source": "github-repos"}
{"code": "def remove(path, dir_fd=None):\n    system = get_instance(path)\n    if (system.is_locator(path) or (path[(- 1)] == '/')):\n        raise is_a_directory_error((\"Is a directory: '%s'\" % path))\n    system.remove(path)", "docstring": "Remove a file.\n\nEquivalent to \"os.remove\" and \"os.unlink\".\n\nArgs:\npath (path-like object): Path or URL.\ndir_fd: directory descriptors;\nsee the os.remove() description for how it is interpreted.\nNot supported on cloud storage objects.", "source": "codesearchnet"}
{"code": "def pop(self, name, defval=None):\n        \n        valu = self.info.pop(name, defval)\n        lkey = self.pref + name.encode('utf8')\n        self.slab.pop(lkey, db=self.db)\n        return valu", "docstring": "Pop a name from the SlabDict.\n\nArgs:\nname (str): The name to remove.\ndefval (obj): The default value to return if the name is not present.\n\nReturns:\nobject: The object stored in the SlabDict, or defval if the object was not present.", "source": "juraj-google-style"}
{"code": "def validate_session(self, token, remote='127.0.0.1', proxy=None):\n    params = {'validationFactors': [{'name': 'remote_address', 'value': remote}]}\n    if proxy:\n        params['validation-factors']['validationFactors'].append({'name': 'X-Forwarded-For', 'value': proxy})\n    url = (self.rest_url + ('/session/%s' % token))\n    response = self._post(url, data=json.dumps(params), params={'expand': 'user'})\n    if (not response.ok):\n        return None\n    return response.json()", "docstring": "Validate a session token.\n\nValidate a previously acquired session token against the\nCrowd server. This may be a token provided by a user from\na http cookie or by some other means.\n\nArgs:\ntoken: The session token.\n\nremote: The remote address of the user.\n\nproxy: Value of X-Forwarded-For server header\n\nReturns:\ndict:\nA dict mapping of user attributes if the application\nauthentication was successful. See the Crowd\ndocumentation for the authoritative list of attributes.\n\nNone: If authentication failed.", "source": "codesearchnet"}
{"code": "def parse_relations(\n    belstr: str, char_locs: CharLocs, parsed: Parsed, errors: Errors\n) -> Tuple[Parsed, Errors]:\n    \n    quotes = char_locs[\"quotes\"]\n    quoted_range = set([i for start, end in quotes.items() for i in range(start, end)])\n\n    for match in relations_pattern_middle.finditer(belstr):\n        (start, end) = match.span(1)\n        \n        end = end - 1  \n        if start != end:\n            test_range = set(range(start, end))\n        else:\n            test_range = set(start)\n\n        \n        if test_range.intersection(quoted_range):\n            continue\n\n        span_key = (start, end)\n        parsed[span_key] = {\n            \"type\": \"Relation\",\n            \"name\": match.group(1),\n            \"span\": (start, end),\n        }\n\n    for match in relations_pattern_end.finditer(belstr):\n        (start, end) = match.span(1)\n        log.debug(f\"Relation-end {match}\")\n        end = end - 1  \n        if start != end:\n            test_range = set(range(start, end))\n        else:\n            test_range = set(start)\n\n        \n        if test_range.intersection(quoted_range):\n            continue\n\n        span_key = (start, end)\n        parsed[span_key] = {\n            \"type\": \"Relation\",\n            \"name\": match.group(1),\n            \"span\": (start, end),\n        }\n\n    return parsed, errors", "docstring": "Parse relations from BEL string\n\nArgs:\nbelstr: BEL string as one single string (not list of chars)\nchar_locs: paren, comma and quote char locations\nparsed: data structure for parsed functions, relations, nested\nerrors: error messages\n\nReturns:\n(parsed, errors):", "source": "juraj-google-style"}
{"code": "def as_session(name_or_func):  \n    \n    if callable(name_or_func):  \n        func = name_or_func\n        name = func.__name__\n        name = \"\".join([(' ' + x) if x.isupper() else x for x in name])\n        name = name.replace('_', ' ')\n        return as_session(name)(func)  \n    else:\n        name = name_or_func\n\n    def get_func(func):\n        @wraps(func)\n        def wrapper(*args, **kwargs):\n            start()\n            title(name)\n            result = func(*args, **kwargs)\n            end()\n            return result\n        return wrapper\n    return get_func", "docstring": "print start/title/end info before and after the function call\n\nArgs:\ntitle: title will show after the start, if has any", "source": "juraj-google-style"}
{"code": "def _read_git_tags(default_version=DEFAULT_VERSION, git_command=('git', 'tag')):\n    try:\n        current_tags = check_output(git_command).splitlines()\n    except Exception:\n        raise\n    if (not current_tags[0]):\n        warnings.warn('Unable to resolve current version', exceptions.ProsperDefaultVersionWarning)\n        return default_version\n    latest_version = semantic_version.Version(default_version)\n    for tag in current_tags:\n        tag_str = decode(tag, 'utf-8').replace('v', '')\n        try:\n            tag_ver = semantic_version.Version(tag_str)\n        except Exception:\n            continue\n        if (tag_ver > latest_version):\n            latest_version = tag_ver\n    return str(latest_version)", "docstring": "tries to find current git tag\n\nNotes:\ngit_command exposed for testing null case\n\nArgs:\ndefault_version (str): what version to make\ngit_command (:obj:`list`): subprocess command\n\nRetruns:\nstr: latest version found, or default\n\nWarns:\nexceptions.ProsperDefaultVersionWarning: git version not found", "source": "codesearchnet"}
{"code": "def save_pkl(filename=None, times=None):\n    \n    if times is None:\n        if not f.root.stopped:\n            times = collapse.collapse_times()\n        else:\n            times = f.root.times\n    else:\n        if isinstance(times, (list, tuple)):\n            for t in times:\n                if not isinstance(t, Times):\n                    raise TypeError(\"Expected single Times instance or list/tuple of Times instances for param 'times'.\")\n        elif not isinstance(times, Times):\n            raise TypeError(\"Expected single Times instance or list/tuple of Times instances for param 'times'.\")\n    if filename is not None:\n        with open(str(filename), 'wb') as file:\n            pickle.dump(times, file)\n    else:\n        return pickle.dumps(times)", "docstring": "Serialize and / or save a Times data object using pickle (cPickle).\n\nArgs:\nfilename (None, optional): Filename to dump to. If not provided,\nreturns serialized object.\ntimes (None, optional): object to dump.  If non provided, uses\ncurrent root.\n\nReturns:\npkl: Pickled Times data object, only if no filename provided.\n\nRaises:\nTypeError: If 'times' is not a Times object or a list of tuple of\nthem.", "source": "juraj-google-style"}
{"code": "async def receive(self, timeout: float = None) -> Union[Message, None]:\n        \n        if timeout:\n            coro = self.queue.get()\n            try:\n                msg = await asyncio.wait_for(coro, timeout=timeout)\n            except asyncio.TimeoutError:\n                msg = None\n        else:\n            try:\n                msg = self.queue.get_nowait()\n            except asyncio.QueueEmpty:\n                msg = None\n        return msg", "docstring": "Receives a message for this behaviour.\nIf timeout is not None it returns the message or \"None\"\nafter timeout is done.\n\nArgs:\ntimeout (float): number of seconds until return\n\nReturns:\nspade.message.Message: a Message or None", "source": "juraj-google-style"}
{"code": "def transform(self, program: moderngl.Program, buffer: moderngl.Buffer, mode=None, vertices=(- 1), first=0, instances=1):\n    vao = self.instance(program)\n    if (mode is None):\n        mode = self.mode\n    vao.transform(buffer, mode=mode, vertices=vertices, first=first, instances=instances)", "docstring": "Transform vertices. Stores the output in a single buffer.\n\nArgs:\nprogram: The ``moderngl.Program``\nbuffer: The ``moderngl.buffer`` to store the output\n\nKeyword Args:\nmode: Draw mode (for example ``moderngl.POINTS``)\nvertices (int): The number of vertices to transform\nfirst (int): The index of the first vertex to start with\ninstances (int): The number of instances", "source": "codesearchnet"}
{"code": "def resolve_lookups(variable, context, provider):\n    \n    resolved_lookups = {}\n    for lookup in variable.lookups:\n        try:\n            handler = LOOKUP_HANDLERS[lookup.type]\n        except KeyError:\n            raise UnknownLookupType(lookup)\n        try:\n            resolved_lookups[lookup] = handler(\n                value=lookup.input,\n                context=context,\n                provider=provider,\n            )\n        except Exception as e:\n            raise FailedVariableLookup(variable.name, lookup, e)\n    return resolved_lookups", "docstring": "Resolve a set of lookups.\n\nArgs:\nvariable (:class:`stacker.variables.Variable`): The variable resolving\nit's lookups.\ncontext (:class:`stacker.context.Context`): stacker context\nprovider (:class:`stacker.provider.base.BaseProvider`): subclass of the\nbase provider\n\nReturns:\ndict: dict of Lookup -> resolved value", "source": "juraj-google-style"}
{"code": "def reduce_by(self, package_request):\n    self.solver.reduction_broad_tests_count += 1\n    if self.package_request.conflict:\n        return (self, [])\n    (new_slice, reductions) = self.variant_slice.reduce_by(package_request)\n    if (new_slice is None):\n        self.solver.reductions_count += 1\n        if self.pr:\n            reqstr = _short_req_str(package_request)\n            self.pr('%s was reduced to nothing by %s', self, reqstr)\n            self.pr.br()\n        return (None, reductions)\n    if (new_slice is not self.variant_slice):\n        self.solver.reductions_count += 1\n        scope = self._copy(new_slice)\n        if self.pr:\n            reqstr = _short_req_str(package_request)\n            self.pr('%s was reduced to %s by %s', self, scope, reqstr)\n            self.pr.br()\n        return (scope, reductions)\n    return (self, [])", "docstring": "Reduce this scope wrt a package request.\n\nReturns:\nA (_PackageScope, [Reduction]) tuple, where the scope is a new\nscope copy with reductions applied, or self if there were no\nreductions, or None if the scope was completely reduced.", "source": "codesearchnet"}
{"code": "def infer_graph(inputs: Optional[Set[EventSetNode]], outputs: Set[EventSetNode]) -> Graph:\n    graph = Graph()\n    graph.outputs.update(outputs)\n    pending_nodes: Set[EventSetNode] = outputs.copy()\n    done_nodes: Set[EventSetNode] = set()\n    missing_nodes: Set[EventSetNode] = set()\n    while pending_nodes:\n        node = next(iter(pending_nodes))\n        pending_nodes.remove(node)\n        assert node not in done_nodes\n        graph.add_node(node)\n        if inputs is not None and node in inputs:\n            graph.inputs.add(node)\n            continue\n        if node.creator is None:\n            if inputs is not None:\n                missing_nodes.add(node)\n            else:\n                graph.inputs.add(node)\n            continue\n        graph.add_operator(node.creator)\n        for input_node in node.creator.inputs.values():\n            if input_node in done_nodes:\n                continue\n            pending_nodes.add(input_node)\n        for output_node in node.creator.outputs.values():\n            graph.add_node(output_node)\n    if missing_nodes:\n        raise ValueError(f'The following input nodes are required but not provided as input:\\n{missing_nodes}')\n    for e in graph.nodes:\n        graph.add_sampling(e.sampling_node)\n        for f in e.feature_nodes:\n            graph.add_feature(f)\n    return graph", "docstring": "Extracts the nodes in between the output and input nodes.\n\nIf inputs is set, fails if outputs cannot be computed from `inputs`.\nIf inputs is not set, infers the required set of inputs.\n\nArgs:\ninputs: Set of available input nodes. If None, inputs are inferred.\noutputs: Set of expected output nodes.\n\nReturns:\nThe inferred graph.\n\nRaises:\nValueError: If there are repeated nodes in the `inputs`; an\nunexpected type of input is provided; an unnamed node is inferred\nas input; or some nodes are required but not provided.", "source": "github-repos"}
{"code": "def CreateAdGroup(client, campaign_id):\n  \n  ad_group_service = client.GetService('AdGroupService', 'v201809')\n\n  adgroup = {\n      \n      'adGroupType': 'SHOPPING_SHOWCASE_ADS',\n      'campaignId': campaign_id,\n      'name': 'AdGroup \n      \n      'biddingStrategyConfiguration': {\n          \n          'biddingStrategyType': 'MANUAL_CPC',\n          \n          'bids': [{\n              'xsi_type': 'CpcBid',\n              'bid': {\n                  'microAmount': 100000\n              }\n          }]\n      }\n  }\n\n  adgroup_operations = {\n      'operator': 'ADD',\n      'operand': adgroup\n  }\n\n  \n  adgroup = ad_group_service.mutate(adgroup_operations)['value'][0]\n\n  print ('AdGroup with name \"%s\" and ID \"%s\" was added.'\n         % (adgroup['name'], adgroup['id']))\n\n  return adgroup", "docstring": "Creates an AdGroup for the given shopping campaign ID.\n\nArgs:\nclient: an AdWordsClient instance.\ncampaign_id: the str ID of a shopping campaign.\n\nReturns:\nThe created AdGroup as a sudsobject.", "source": "juraj-google-style"}
{"code": "def handle_or_else(self, orelse, test):\n        \n        if isinstance(orelse[0], ast.If):\n            control_flow_node = self.visit(orelse[0])\n            \n            control_flow_node.test.label = 'el' + control_flow_node.test.label\n\n            test.connect(control_flow_node.test)\n            return control_flow_node.last_nodes\n        else:\n            else_connect_statements = self.stmt_star_handler(\n                orelse,\n                prev_node_to_avoid=self.nodes[-1]\n            )\n            test.connect(else_connect_statements.first_statement)\n            return else_connect_statements.last_statements", "docstring": "Handle the orelse part of an if or try node.\n\nArgs:\norelse(list[Node])\ntest(Node)\n\nReturns:\nThe last nodes of the orelse branch.", "source": "juraj-google-style"}
{"code": "def _save_model(self, epoch, batch, logs):\n    filepath = self._get_file_path(epoch, batch, logs)\n    try:\n        if self._should_save_model(epoch, batch, logs, filepath):\n            dirname = os.path.dirname(filepath)\n            if dirname and (not file_utils.exists(dirname)):\n                file_utils.makedirs(dirname)\n            if self.save_weights_only:\n                self.model.save_weights(filepath, overwrite=True)\n            else:\n                self.model.save(filepath, overwrite=True)\n    except IsADirectoryError:\n        raise IOError(f'Please specify a non-directory filepath for ModelCheckpoint. Filepath used is an existing directory: {filepath}')\n    except IOError as e:\n        if 'is a directory' in str(e.args[0]).lower():\n            raise IOError(f'Please specify a non-directory filepath for ModelCheckpoint. Filepath used is an existing directory: f{filepath}')\n        raise e", "docstring": "Saves the model.\n\nArgs:\nepoch: the epoch this iteration is in.\nbatch: the batch this iteration is in. `None` if the `save_freq`\nis set to `\"epoch\"`.\nlogs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.", "source": "github-repos"}
{"code": "def delete(self):\n    cmd = self.command_builder('ntp source', disable=True)\n    return self.configure(cmd)", "docstring": "Delete the NTP source entry from the node.\n\nReturns:\nTrue if the operation succeeds, otherwise False.", "source": "codesearchnet"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return self.prefix_tokens + token_ids_0 + self.suffix_tokens\n    return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. The special tokens depend on calling set_lang.\n\nAn MBART sequence has the following format, where `X` represents the sequence:\n\n- `input_ids` (for encoder) `X [eos, src_lang_code]`\n- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`\n\nBOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a\nseparator.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def interconnects(self):\n    if (not self.__interconnects):\n        self.__interconnects = Interconnects(self.__connection)\n    return self.__interconnects", "docstring": "Gets the Interconnects API client.\n\nReturns:\nInterconnects:", "source": "codesearchnet"}
{"code": "def __init__(self, rate, validate_args=False, allow_nan_stats=True, name='Exponential'):\n    parameters = dict(locals())\n    with ops.name_scope(name, values=[rate]) as name:\n        self._rate = ops.convert_to_tensor(rate, name='rate')\n    super(Exponential, self).__init__(concentration=array_ops.ones([], dtype=self._rate.dtype), rate=self._rate, allow_nan_stats=allow_nan_stats, validate_args=validate_args, name=name)\n    self._parameters = parameters\n    self._graph_parents += [self._rate]", "docstring": "Construct Exponential distribution with parameter `rate`.\n\nArgs:\nrate: Floating point tensor, equivalent to `1 / mean`. Must contain only\npositive values.\nvalidate_args: Python `bool`, default `False`. When `True` distribution\nparameters are checked for validity despite possibly degrading runtime\nperformance. When `False` invalid inputs may silently render incorrect\noutputs.\nallow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n(e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\nresult is undefined. When `False`, an exception is raised if one or\nmore of the statistic's batch members are undefined.\nname: Python `str` name prefixed to Ops created by this class.", "source": "github-repos"}
{"code": "def max_neighbor(self, in_lon, in_lat, radius=0.05):\n    out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1]))\n    in_tree = cKDTree(np.vstack((in_lat.ravel(), in_lon.ravel())).T)\n    out_indices = np.indices(out_data.shape[1:])\n    out_rows = out_indices[0].ravel()\n    out_cols = out_indices[1].ravel()\n    for d in range(self.data.shape[0]):\n        nz_points = np.where((self.data[d] > 0))\n        if (len(nz_points[0]) > 0):\n            nz_vals = self.data[d][nz_points]\n            nz_rank = np.argsort(nz_vals)\n            original_points = cKDTree(np.vstack((self.lat[nz_points[0][nz_rank]], self.lon[nz_points[1][nz_rank]])).T)\n            all_neighbors = original_points.query_ball_tree(in_tree, radius, p=2, eps=0)\n            for (n, neighbors) in enumerate(all_neighbors):\n                if (len(neighbors) > 0):\n                    out_data[(d, out_rows[neighbors], out_cols[neighbors])] = nz_vals[nz_rank][n]\n    return out_data", "docstring": "Finds the largest value within a given radius of a point on the interpolated grid.\n\nArgs:\nin_lon: 2D array of longitude values\nin_lat: 2D array of latitude values\nradius: radius of influence for largest neighbor search in degrees\n\nReturns:\nArray of interpolated data", "source": "codesearchnet"}
{"code": "def FileEntryExistsByPathSpec(self, path_spec):\n    \n    store_index = vshadow.VShadowPathSpecGetStoreIndex(path_spec)\n\n    \n    \n    if store_index is None:\n      location = getattr(path_spec, 'location', None)\n      return location is not None and location == self.LOCATION_ROOT\n\n    return 0 <= store_index < self._vshadow_volume.number_of_stores", "docstring": "Determines if a file entry for a path specification exists.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nbool: True if the file entry exists.", "source": "juraj-google-style"}
{"code": "def _create_L_ind(self, L):\n    if issparse(L):\n        L = L.todense()\n    L_ind = np.zeros((self.n, (self.m * self.k)))\n    for y in range(1, (self.k + 1)):\n        L_ind[(:, (y - 1)::self.k)] = np.where((L == y), 1, 0)\n    return L_ind", "docstring": "Convert a label matrix with labels in 0...k to a one-hot format\n\nArgs:\nL: An [n,m] scipy.sparse label matrix with values in {0,1,...,k}\n\nReturns:\nL_ind: An [n,m*k] dense np.ndarray with values in {0,1}\n\nNote that no column is required for 0 (abstain) labels.", "source": "codesearchnet"}
{"code": "def __init__(self, context):\n    \n    self.multiplexer = context.multiplexer\n    self.logdir = context.logdir\n    self._handlers = None\n    self.readers = {}\n    self.run_paths = None\n    self._configs = {}\n    self.old_num_run_paths = None\n    self.config_fpaths = None\n    self.tensor_cache = LRUCache(_TENSOR_CACHE_CAPACITY)\n\n    \n    \n    \n    self._is_active = False\n\n    \n    \n    self._thread_for_determining_is_active = None\n\n    if self.multiplexer:\n      self.run_paths = self.multiplexer.RunPaths()", "docstring": "Instantiates ProjectorPlugin via TensorBoard core.\n\nArgs:\ncontext: A base_plugin.TBContext instance.", "source": "juraj-google-style"}
{"code": "def _get(self, rec_id=None, upstream=None):\n    if rec_id:\n        self.record_url = self.__class__.get_record_url(rec_id)\n        self.debug_logger.debug('GET {} record with ID {}: {}'.format(self.__class__.__name__, rec_id, self.record_url))\n        response = requests.get(url=self.record_url, headers=HEADERS, verify=False)\n        if ((not response.ok) and (response.status_code == requests.codes.NOT_FOUND)):\n            raise RecordNotFound(\"Search for {} record with ID '{}' returned no results.\".format(self.__class__.__name__, rec_id))\n        self.write_response_html_to_file(response, 'get_bob.html')\n        response.raise_for_status()\n        return response.json()\n    elif upstream:\n        rec_json = self.__class__.find_by({'upstream_identifier': upstream}, require=True)\n        self.record_url = self.__class__.get_record_url(rec_json['id'])\n    return rec_json", "docstring": "Fetches a record by the record's ID or upstream_identifier.\n\nRaises:\n`pulsarpy.models.RecordNotFound`: A record could not be found.", "source": "codesearchnet"}
{"code": "def batch(self, timelimit=None):\n        \n        from .launcher import BatchLauncher\n        \n        prev_dir = os.path.join(*self.workdir.split(os.path.sep)[:-1])\n        prev_dir = os.path.join(os.path.sep, prev_dir)\n        workdir = os.path.join(prev_dir, os.path.basename(self.workdir) + \"_batch\")\n\n        return BatchLauncher(workdir=workdir, flows=self).submit(timelimit=timelimit)", "docstring": "Run the flow in batch mode, return exit status of the job script.\nRequires a manager.yml file and a batch_adapter adapter.\n\nArgs:\ntimelimit: Time limit (int with seconds or string with time given with the slurm convention:\n\"days-hours:minutes:seconds\"). If timelimit is None, the default value specified in the\n`batch_adapter` entry of `manager.yml` is used.", "source": "juraj-google-style"}
{"code": "def UpdateTaskAsPendingMerge(self, task):\n    with self._lock:\n        is_abandoned = (task.identifier in self._tasks_abandoned)\n        is_processing = (task.identifier in self._tasks_processing)\n        is_queued = (task.identifier in self._tasks_queued)\n        if ((not is_queued) and (not is_processing) and (not is_abandoned)):\n            raise KeyError('Status of task {0:s} is unknown.'.format(task.identifier))\n        if (is_abandoned and task.has_retry):\n            raise KeyError('Will not merge a task {0:s} with retry task.'.format(task.identifier))\n        if is_queued:\n            logger.debug('Task {0:s} was queued, now merging.'.format(task.identifier))\n            del self._tasks_queued[task.identifier]\n        if is_processing:\n            logger.debug('Task {0:s} was processing, now merging.'.format(task.identifier))\n            del self._tasks_processing[task.identifier]\n        if is_abandoned:\n            logger.debug('Task {0:s} was abandoned, now merging.'.format(task.identifier))\n            del self._tasks_abandoned[task.identifier]\n        self._tasks_pending_merge.PushTask(task)\n        self.SampleTaskStatus(task, 'pending_merge')\n        task.UpdateProcessingTime()\n        self._UpdateLatestProcessingTime(task)", "docstring": "Updates the task manager to reflect the task is ready to be merged.\n\nArgs:\ntask (Task): task.\n\nRaises:\nKeyError: if the task was not queued, processing or abandoned, or\nthe task was abandoned and has a retry task.", "source": "codesearchnet"}
{"code": "def assertDTypeEqual(self, target, expected_dtype):\n    target = self._GetNdArray(target)\n    if not isinstance(target, list):\n        arrays = [target]\n    for arr in arrays:\n        self.assertEqual(arr.dtype, expected_dtype)", "docstring": "Assert ndarray data type is equal to expected.\n\nArgs:\ntarget: The numpy `ndarray`, or anything that can be converted into a\nnumpy `ndarray` (including Tensor).\nexpected_dtype: Expected data type.", "source": "github-repos"}
{"code": "def find(self, query=None, func=None, labels=None, colors=None, pinned=None, archived=None, trashed=False):\n    if (labels is not None):\n        labels = [(i.id if isinstance(i, _node.Label) else i) for i in labels]\n    return (node for node in self.all() if (((query is None) or ((isinstance(query, six.string_types) and ((query in node.title) or (query in node.text))) or (isinstance(query, Pattern) and (query.search(node.title) or query.search(node.text))))) and ((func is None) or func(node)) and ((labels is None) or ((not labels) and (not node.labels.all())) or any(((node.labels.get(i) is not None) for i in labels))) and ((colors is None) or (node.color in colors)) and ((pinned is None) or (node.pinned == pinned)) and ((archived is None) or (node.archived == archived)) and ((trashed is None) or (node.trashed == trashed))))", "docstring": "Find Notes based on the specified criteria.\n\nArgs:\nquery (Union[_sre.SRE_Pattern, str, None]): A str or regular expression to match against the title and text.\nfunc (Union[callable, None]): A filter function.\nlabels (Union[List[str], None]): A list of label ids or objects to match. An empty list matches notes with no labels.\ncolors (Union[List[str], None]): A list of colors to match.\npinned (Union[bool, None]): Whether to match pinned notes.\narchived (Union[bool, None]): Whether to match archived notes.\ntrashed (Union[bool, None]): Whether to match trashed notes.\n\nReturn:\nList[gkeepapi.node.TopLevelNode]: Results.", "source": "codesearchnet"}
{"code": "def Collect(self, knowledge_base, artifact_definition, searcher, file_system):\n    for source in artifact_definition.sources:\n        if (source.type_indicator not in (artifact_definitions.TYPE_INDICATOR_FILE, artifact_definitions.TYPE_INDICATOR_PATH)):\n            continue\n        for path in source.paths:\n            path_segments = path.split(source.separator)\n            find_spec = file_system_searcher.FindSpec(location_glob=path_segments[1:], case_sensitive=False)\n            for path_specification in searcher.Find(find_specs=[find_spec]):\n                self._ParsePathSpecification(knowledge_base, searcher, file_system, path_specification, source.separator)", "docstring": "Collects values using a file artifact definition.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nartifact_definition (artifacts.ArtifactDefinition): artifact definition.\nsearcher (dfvfs.FileSystemSearcher): file system searcher to preprocess\nthe file system.\nfile_system (dfvfs.FileSystem): file system to be preprocessed.\n\nRaises:\nPreProcessFail: if the preprocessing fails.", "source": "codesearchnet"}
{"code": "def belspec_yaml2json(yaml_fn: str, json_fn: str) -> str:\n    \n\n    try:\n        spec_dict = yaml.load(open(yaml_fn, \"r\").read(), Loader=yaml.SafeLoader)\n\n        \n        spec_dict[\"admin\"] = {}\n        spec_dict[\"admin\"][\"version_underscored\"] = spec_dict[\"version\"].replace(\".\", \"_\")\n        spec_dict[\"admin\"][\"parser_fn\"] = yaml_fn.replace(\".yaml\", \"_parser.py\")\n\n        \n        add_relations(spec_dict)\n        \n        add_functions(spec_dict)\n        \n        add_namespaces(spec_dict)\n\n        enhance_function_signatures(spec_dict)\n\n        add_function_signature_help(spec_dict)\n\n        with open(json_fn, \"w\") as f:\n            json.dump(spec_dict, f)\n\n    except Exception as e:\n        log.error(\n            \"Warning: BEL Specification {yaml_fn} could not be read. Cannot proceed.\".format(\n                yaml_fn\n            )\n        )\n        sys.exit()\n\n    return spec_dict[\"version\"]", "docstring": "Enhance BEL specification and save as JSON file\n\nLoad all BEL Specification YAML files and convert to JSON files\nafter enhancing them.  Also create a bel_versions.json file with\nall available BEL versions for fast loading.\n\nArgs:\nyaml_fn: original YAML version of BEL Spec\njson_fn: enhanced JSON version of BEL Spec\nReturns:\nstr: version of BEL Spec", "source": "juraj-google-style"}
{"code": "def map(self, op: Callable[[T], U]) -> 'Union[Result[U, E], Result[T, E]]':\n        \n        return self._type.Ok(op(cast(T, self._val))) if self._is_ok else self", "docstring": "Applies a function to the contained :meth:`Result.Ok` value.\n\nArgs:\nop: The function to apply to the :meth:`Result.Ok` value.\n\nReturns:\nA :class:`Result` with its success value as the function result\nif `self` is an :meth:`Result.Ok` value, otherwise returns\n`self`.\n\nExamples:\n>>> Ok(1).map(lambda x: x * 2)\nOk(2)\n>>> Err(1).map(lambda x: x * 2)\nErr(1)", "source": "juraj-google-style"}
{"code": "def _sparse_or_dense_matmul_onehot(sparse_or_dense_matrix, col_index):\n  \n  if isinstance(sparse_or_dense_matrix,\n                (tf.SparseTensor, tf.compat.v1.SparseTensorValue)):\n    \n    \n    num_rows = _get_shape(sparse_or_dense_matrix)[-2]\n    batch_shape = _get_shape(sparse_or_dense_matrix)[:-2]\n    slice_start = tf.concat([tf.zeros_like(batch_shape), [0, col_index]],\n                            axis=0)\n    slice_size = tf.concat([batch_shape, [num_rows, 1]], axis=0)\n    \n    \n    sparse_slice = tf.sparse.slice(sparse_or_dense_matrix,\n                                   tf.cast(slice_start, tf.int64),\n                                   tf.cast(slice_size, tf.int64))\n\n    output_shape = tf.concat([batch_shape, [num_rows]], axis=0)\n    return tf.reshape(tf.sparse.to_dense(sparse_slice), output_shape)\n  else:\n    return tf.gather(sparse_or_dense_matrix, col_index, axis=-1)", "docstring": "Returns a (dense) column of a Tensor or SparseTensor.\n\nArgs:\nsparse_or_dense_matrix: matrix-shaped, `float` `Tensor` or `SparseTensor`.\ncol_index: scalar, `int` `Tensor` representing the index of the desired\ncolumn.\n\nReturns:\ncolumn: vector-shaped, `float` `Tensor` with the same dtype as\n`sparse_or_dense_matrix`, representing the `col_index`th column of\n`sparse_or_dense_matrix`.", "source": "juraj-google-style"}
{"code": "def filter_with_theta(image, theta, sigma=1.0, filter_size=9):\n    x = np.arange((((- filter_size) \n    g = np.array([np.exp(((- (x ** 2)) / (2 * (sigma ** 2))))])\n    gp = np.array([((- (x / sigma)) * np.exp(((- (x ** 2)) / (2 * (sigma ** 2)))))])\n    ix = convolve2d(image, (- gp), mode='same', boundary='fill', fillvalue=0)\n    ix = convolve2d(ix, g.T, mode='same', boundary='fill', fillvalue=0)\n    iy = convolve2d(image, g, mode='same', boundary='fill', fillvalue=0)\n    iy = convolve2d(iy, (- gp.T), mode='same', boundary='fill', fillvalue=0)\n    output = ((np.cos(theta) * ix) + (np.sin(theta) * iy))\n    gt_filter = np.matmul(g.T, gp)\n    gt_filter = ((np.cos(theta) * gt_filter) + (np.sin(theta) * gt_filter.T))\n    return (output, gt_filter)", "docstring": "Implements a steerable Gaussian filter.\n\nThis function can be used to evaluate the first\ndirectional derivative of an image, using the\nmethod outlined in\n\nW. T. Freeman and E. H. Adelson, \"The Design\nand Use of Steerable Filters\", IEEE PAMI, 1991.\n\nIt evaluates the directional derivative of the input\nimage I, oriented at THETA degrees with respect to the\nimage rows. The standard deviation of the Gaussian kernel\nis given by SIGMA (assumed to be equal to unity by default).\n\nArgs:\nimage: any input image (only one channel)\ntheta: orientation of filter [0, 2 * pi]\nsigma (float, optional): standard derivation of Gaussian\nfilter_size (int, optional): filter support\n\nReturns:\nfiltered image and the filter", "source": "codesearchnet"}
{"code": "def random_line_data(chars_per_line=80):\n    \n    return ''.join(__random.choice(__string.ascii_letters) for x in range(chars_per_line))", "docstring": "Function to create a line of a random string\nArgs:\nchars_per_line: An integer that says how many characters to return\n\nReturns:\nA String", "source": "juraj-google-style"}
{"code": "def resolve_image_exif(self, image_url):\n        \n        files = self.mets.find_files(url=image_url)\n        if files:\n            image_filename = self.download_file(files[0]).local_filename\n        else:\n            image_filename = self.download_url(image_url)\n\n        if image_url not in self.image_cache['exif']:\n            self.image_cache['exif'][image_url] = OcrdExif(Image.open(image_filename))\n        return self.image_cache['exif'][image_url]", "docstring": "Get the EXIF metadata about an image URL as :class:`OcrdExif`\n\nArgs:\nimage_url (string) : URL of image\n\nReturn\n:class:`OcrdExif`", "source": "juraj-google-style"}
{"code": "def _convert_tf2_model(flags):\n    if flags.saved_model_dir:\n        converter = lite.TFLiteConverterV2.from_saved_model(flags.saved_model_dir, signature_keys=_parse_array(flags.saved_model_signature_key), tags=_parse_set(flags.saved_model_tag_set))\n    elif flags.keras_model_file:\n        model = keras_deps.get_load_model_function()(flags.keras_model_file)\n        converter = lite.TFLiteConverterV2.from_keras_model(model)\n    converter.experimental_new_converter = flags.experimental_new_converter\n    if flags.experimental_new_quantizer is not None:\n        converter.experimental_new_quantizer = flags.experimental_new_quantizer\n    tflite_model = converter.convert()\n    with gfile.GFile(flags.output_file, 'wb') as f:\n        f.write(tflite_model)", "docstring": "Calls function to convert the TensorFlow 2.0 model into a TFLite model.\n\nArgs:\nflags: argparse.Namespace object.\n\nRaises:\nValueError: Unsupported file format.", "source": "github-repos"}
{"code": "def parse_response(service, response, search_type):\n    \n    _LOG.debug('Parse response \"%s\" from service \"%s\" of type \"%s\"', response,\n               service, search_type)\n    items = []\n    \n    if 'searchResult' in response:\n        response = response['searchResult']\n    elif 'getMetadataResult' in response:\n        response = response['getMetadataResult']\n    else:\n        raise ValueError('\"response\" should contain either the key '\n                         '\"searchResult\" or \"getMetadataResult\"')\n\n    \n    search_metadata = {\n        'number_returned': response['count'],\n        'total_matches': None,\n        'search_type': search_type,\n        'update_id': None,\n    }\n\n    for result_type in ('mediaCollection', 'mediaMetadata'):\n        \n        result_type_proper = result_type[0].upper() + result_type[1:]\n        raw_items = response.get(result_type, [])\n        \n        if isinstance(raw_items, OrderedDict):\n            raw_items = [raw_items]\n\n        for raw_item in raw_items:\n            \n            \n            \n            class_key = result_type_proper + raw_item['itemType'].title()\n            cls = get_class(class_key)\n            items.append(cls.from_music_service(service, raw_item))\n    return SearchResult(items, **search_metadata)", "docstring": "Parse the response to a music service query and return a SearchResult\n\nArgs:\nservice (MusicService): The music service that produced the response\nresponse (OrderedDict): The response from the soap client call\nsearch_type (str): A string that indicates the search type that the\nresponse is from\n\nReturns:\nSearchResult: A SearchResult object", "source": "juraj-google-style"}
{"code": "def _has_next_page(self):\n    if (self.page_number == 0):\n        return True\n    if (self.max_results is not None):\n        if (self.num_results >= self.max_results):\n            return False\n    return (True if self.next_page_token else False)", "docstring": "Determines whether or not there are more pages with results.\n\nReturns:\nbool: Whether the iterator has more pages.", "source": "codesearchnet"}
{"code": "def _read_wrappers(self, name):\n        \n        io_attr = getattr(self._io, name)\n\n        def read_wrapper(*args, **kwargs):\n            \n            self._io.seek(self._read_seek, self._read_whence)\n            ret_value = io_attr(*args, **kwargs)\n            self._read_seek = self._io.tell()\n            self._read_whence = 0\n            self._io.seek(0, 2)\n            return ret_value\n\n        return read_wrapper", "docstring": "Wrap a stream attribute in a read wrapper.\n\nReturns a read_wrapper which tracks our own read pointer since the\nstream object has no concept of a different read and write pointer.\n\nArgs:\nname: The name of the attribute to wrap. Should be a read call.\n\nReturns:\nThe read_wrapper function.", "source": "juraj-google-style"}
{"code": "def run(self, include_reset=True, accelerated=True):\n    self._start_tick = self.tick_count\n    if self._check_stop_conditions(self.sensor_graph):\n        return\n    if include_reset:\n        pass\n    i = None\n    for (i, stim) in enumerate(self.stimuli):\n        if (stim.time != 0):\n            break\n        reading = IOTileReading(self.tick_count, stim.stream.encode(), stim.value)\n        self.sensor_graph.process_input(stim.stream, reading, self.rpc_executor)\n    if ((i is not None) and (i > 0)):\n        self.stimuli = self.stimuli[i:]\n    while (not self._check_stop_conditions(self.sensor_graph)):\n        now = monotonic()\n        next_tick = (now + 1.0)\n        self.tick_count += 1\n        i = None\n        for (i, stim) in enumerate(self.stimuli):\n            if (stim.time != self.tick_count):\n                break\n            reading = IOTileReading(self.tick_count, stim.stream.encode(), stim.value)\n            self.sensor_graph.process_input(stim.stream, reading, self.rpc_executor)\n        if ((i is not None) and (i > 0)):\n            self.stimuli = self.stimuli[i:]\n        self._check_additional_ticks(self.tick_count)\n        if ((self.tick_count % 10) == 0):\n            reading = IOTileReading(self.tick_count, system_tick.encode(), self.tick_count)\n            self.sensor_graph.process_input(system_tick, reading, self.rpc_executor)\n            reading = IOTileReading(self.tick_count, battery_voltage.encode(), int((self.voltage * 65536)))\n            self.sensor_graph.process_input(battery_voltage, reading, self.rpc_executor)\n        now = monotonic()\n        if ((not accelerated) and (now < next_tick)):\n            time.sleep((next_tick - now))", "docstring": "Run this sensor graph until a stop condition is hit.\n\nMultiple calls to this function are useful only if\nthere has been some change in the stop conditions that would\ncause the second call to not exit immediately.\n\nArgs:\ninclude_reset (bool): Start the sensor graph run with\na reset event to match what would happen when an\nactual device powers on.\naccelerated (bool): Whether to run this sensor graph as\nfast as possible or to delay tick events to simulate\nthe actual passage of wall clock time.", "source": "codesearchnet"}
{"code": "def disconnect_async(self, conn_id, callback):\n        \n\n        found_handle = None\n        \n        for handle, conn in self._connections.items():\n            if conn['connection_id'] == conn_id:\n                found_handle = handle\n\n        if found_handle is None:\n            callback(conn_id, self.id, False, 'Invalid connection_id')\n            return\n\n        self._command_task.async_command(['_disconnect', found_handle], self._on_disconnect,\n                                         {'connection_id': conn_id, 'handle': found_handle,\n                                          'callback': callback})", "docstring": "Asynchronously disconnect from a device that has previously been connected\n\nArgs:\nconn_id (int): a unique identifier for this connection on the DeviceManager\nthat owns this adapter.\ncallback (callable): A function called as callback(conn_id, adapter_id, success, failure_reason)\nwhen the disconnection finishes.  Disconnection can only either succeed or timeout.", "source": "juraj-google-style"}
{"code": "def set_dtype_conversion_mode(dtype_conversion_mode) -> None:\n    global _dtype_conversion_mode\n    _dtype_conversion_mode = _get_promo_mode_enum(dtype_conversion_mode)", "docstring": "Enables the specified dtype conversion mode.\n\nArgs:\ndtype_conversion_mode: a string that specifies dtype conversion mode. This\nstring corresponds to a PromoMode Enum and can be 'off', 'legacy', 'safe'\nor 'all'.", "source": "github-repos"}
{"code": "def register(self, username=\"\"):\n        \n        if not username:\n            username = utils.mxid2localpart(self.identity)\n        content = {\n            \"type\": \"m.login.application_service\",\n            \"username\": username,\n        }\n        return self._send(\"POST\", \"/register\", content,\n                          api_path=MATRIX_V2_API_PATH)", "docstring": "Performs /register with type: m.login.application_service\n\nArgs:\nusername(str): Username to register.", "source": "juraj-google-style"}
{"code": "def _read_ipv6_opts_options(self, length):\n    counter = 0\n    optkind = list()\n    options = dict()\n    while (counter < length):\n        code = self._read_unpack(1)\n        if (not code):\n            break\n        (abbr, desc) = _IPv6_Opts_OPT.get(code, ('None', 'Unassigned'))\n        data = _IPv6_Opts_PROC(abbr)(self, code, desc=desc)\n        enum = _OPT_TYPE.get(code)\n        counter += data['length']\n        if (enum in optkind):\n            if isinstance(options[abbr], tuple):\n                options[abbr] += (Info(data),)\n            else:\n                options[abbr] = (Info(options[abbr]), Info(data))\n        else:\n            optkind.append(enum)\n            options[abbr] = data\n    if (counter != length):\n        raise ProtocolError(f'{self.alias}: invalid format')\n    return (tuple(optkind), options)", "docstring": "Read IPv6_Opts options.\n\nPositional arguments:\n* length -- int, length of options\n\nReturns:\n* dict -- extracted IPv6_Opts options", "source": "codesearchnet"}
{"code": "def lists(self, **kwargs):\n    path = self._get_path('lists')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Gets the top-level lists available from the API.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def _get_client_by_id(self, client_id):\n    client = self.grr_api.Client(client_id)\n    print('Checking for client approval')\n    self._check_approval_wrapper(client, client.ListFlows)\n    print('{0:s}: Client approval is valid'.format(client_id))\n    return client.Get()", "docstring": "Get GRR client dictionary and make sure valid approvals exist.\n\nArgs:\nclient_id: GRR client ID.\n\nReturns:\nGRR API Client object", "source": "codesearchnet"}
{"code": "def _write_submit_script(self, template, script_filename, job_name, configs):\n    try:\n        submit_script = Template(template).substitute(jobname=job_name, **configs)\n        with open(script_filename, 'w') as f:\n            f.write(submit_script)\n    except KeyError as e:\n        logger.error('Missing keys for submit script : %s', e)\n        raise SchedulerMissingArgs(e.args, self.sitename)\n    except IOError as e:\n        logger.error('Failed writing to submit script: %s', script_filename)\n        raise ScriptPathError(script_filename, e)\n    except Exception as e:\n        print('Template : ', template)\n        print('Args : ', job_name)\n        print('Kwargs : ', configs)\n        logger.error('Uncategorized error: %s', e)\n        raise e\n    return True", "docstring": "Generate submit script and write it to a file.\n\nArgs:\n- template (string) : The template string to be used for the writing submit script\n- script_filename (string) : Name of the submit script\n- job_name (string) : job name\n- configs (dict) : configs that get pushed into the template\n\nReturns:\n- True: on success\n\nRaises:\nSchedulerMissingArgs : If template is missing args\nScriptPathError : Unable to write submit script out", "source": "codesearchnet"}
{"code": "def depth(self, local: bool=True) -> int:\n    G = self.graph\n    if (not local):\n\n        def remove_local(dagc: DAGCircuit) -> Generator[(Operation, None, None)]:\n            for elem in dagc:\n                if (dagc.graph.degree[elem] > 2):\n                    (yield elem)\n        G = DAGCircuit(remove_local(self)).graph\n    return (nx.dag_longest_path_length(G) - 1)", "docstring": "Return the circuit depth.\n\nArgs:\nlocal:  If True include local one-qubit gates in depth\ncalculation. Else return the multi-qubit gate depth.", "source": "codesearchnet"}
{"code": "def mkfile_uchroot(filepath, root=\".\"):\n    \n    from benchbuild.utils.uchroot import no_args, uretry\n\n    uchroot = no_args()\n    uchroot = uchroot[\"-E\", \"-A\", \"-C\", \"-w\", \"/\", \"-r\"]\n    uchroot = uchroot[os.path.abspath(root)]\n    uretry(uchroot[\"--\", \"/bin/touch\", filepath])", "docstring": "Create a file inside a uchroot env.\n\nYou will want to use this when you need to create a file with apropriate\nrights inside a uchroot container with subuid/subgid handling enabled.\n\nArgs:\nfilepath:\nThe filepath that should be created. Absolute inside the\nuchroot container.\nroot:\nThe root PATH of the container filesystem as seen outside of\nthe container.", "source": "juraj-google-style"}
{"code": "def locate_point(nodes, x_val, y_val):\n    zero1 = (_curve_helpers.full_reduce(nodes[([0], :)]) - x_val)\n    zero2 = (_curve_helpers.full_reduce(nodes[([1], :)]) - y_val)\n    if (zero1.shape[1] > zero2.shape[1]):\n        (zero1, zero2) = (zero2, zero1)\n    if (zero1.shape[1] == 1):\n        (zero1, zero2) = (zero2, zero1)\n    power_basis1 = poly_to_power_basis(zero1[(0, :)])\n    all_roots = roots_in_unit_interval(power_basis1)\n    if (all_roots.size == 0):\n        return None\n    power_basis2 = normalize_polynomial(poly_to_power_basis(zero2[(0, :)]))\n    near_zero = np.abs(polynomial.polyval(all_roots, power_basis2))\n    index = np.argmin(near_zero)\n    if (near_zero[index] < _ZERO_THRESHOLD):\n        return all_roots[index]\n    return None", "docstring": "r\"\"\"Find the parameter corresponding to a point on a curve.\n\n.. note::\n\nThis assumes that the curve :math:`B(s, t)` defined by ``nodes``\nlives in :math:`\\mathbf{R}^2`.\n\nArgs:\nnodes (numpy.ndarray): The nodes defining a B |eacute| zier curve.\nx_val (float): The :math:`x`-coordinate of the point.\ny_val (float): The :math:`y`-coordinate of the point.\n\nReturns:\nOptional[float]: The parameter on the curve (if it exists).", "source": "codesearchnet"}
{"code": "def ic45(msg):\n    \n    d = hex2bin(data(msg))\n    if d[9] == '0':\n        return None\n\n    ic = bin2int(d[10:12])\n    return ic", "docstring": "Icing.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nint: Icing level. 0=NIL, 1=Light, 2=Moderate, 3=Severe", "source": "juraj-google-style"}
{"code": "def reset_trial(self, trial, new_config, new_experiment_tag):\n        \n        trial.experiment_tag = new_experiment_tag\n        trial.config = new_config\n        trainable = trial.runner\n        with warn_if_slow(\"reset_config\"):\n            reset_val = ray.get(trainable.reset_config.remote(new_config))\n        return reset_val", "docstring": "Tries to invoke `Trainable.reset_config()` to reset trial.\n\nArgs:\ntrial (Trial): Trial to be reset.\nnew_config (dict): New configuration for Trial\ntrainable.\nnew_experiment_tag (str): New experiment name\nfor trial.\n\nReturns:\nTrue if `reset_config` is successful else False.", "source": "juraj-google-style"}
{"code": "def get_context_from_cmdln(args, desc='Run scriptworker'):\n    context = Context()\n    parser = argparse.ArgumentParser(description=desc)\n    parser.add_argument('config_path', type=str, nargs='?', default='scriptworker.yaml', help='the path to the config file')\n    parsed_args = parser.parse_args(args)\n    (context.config, credentials) = create_config(config_path=parsed_args.config_path)\n    update_logging_config(context)\n    return (context, credentials)", "docstring": "Create a Context object from args.\n\nArgs:\nargs (list): the commandline args.  Generally sys.argv\n\nReturns:\ntuple: ``scriptworker.context.Context`` with populated config, and\ncredentials frozendict", "source": "codesearchnet"}
{"code": "def usergroups_users_update(\n        self, *, usergroup: str, users: List[str], **kwargs\n    ) -> SlackResponse:\n        \n        self._validate_xoxp_token()\n        kwargs.update({\"usergroup\": usergroup, \"users\": users})\n        return self.api_call(\"usergroups.users.update\", json=kwargs)", "docstring": "Update the list of users for a User Group\n\nArgs:\nusergroup (str): The encoded ID of the User Group to update.\ne.g. 'S0604QSJC'\nusers (list): A list user IDs that represent the entire list of\nusers for the User Group. e.g. ['U060R4BJ4', 'U060RNRCZ']", "source": "juraj-google-style"}
{"code": "def video_augmentation(features, hue=False, saturate=False, contrast=False):\n  \n  inputs, targets = features[\"inputs\"], features[\"targets\"]\n  in_steps = common_layers.shape_list(inputs)[0]\n\n  \n  \n  video = tf.concat((inputs, targets), axis=0)\n  if hue:\n    video = tf.image.random_hue(video, max_delta=0.2)\n  if saturate:\n    video = tf.image.random_saturation(video, lower=0.5, upper=1.5)\n  if contrast:\n    video = tf.image.random_contrast(video, lower=0.5, upper=1.5)\n  features[\"inputs\"], features[\"targets\"] = video[:in_steps], video[in_steps:]\n  return features", "docstring": "Augments video with optional hue, saturation and constrast.\n\nArgs:\nfeatures: dict, with keys \"inputs\", \"targets\".\nfeatures[\"inputs\"], 4-D Tensor, shape=(THWC)\nfeatures[\"targets\"], 4-D Tensor, shape=(THWC)\nhue: bool, apply hue_transform.\nsaturate: bool, apply saturation transform.\ncontrast: bool, apply constrast transform.\nReturns:\naugment_features: dict with transformed \"inputs\" and \"targets\".", "source": "juraj-google-style"}
{"code": "def _find_workflows(mcs, attrs):\n    workflows = {}\n    for (attribute, value) in attrs.items():\n        if isinstance(value, Workflow):\n            workflows[attribute] = StateField(value)\n    return workflows", "docstring": "Finds all occurrences of a workflow in the attributes definitions.\n\nReturns:\ndict(str => StateField): maps an attribute name to a StateField\ndescribing the related Workflow.", "source": "codesearchnet"}
{"code": "def search_features(self, search):\n        \n        if isinstance(search, string_types):\n            search = [search]\n        search = [s.replace('*', '.*') for s in search]\n        cols = list(self.data.columns)\n        results = []\n        for s in search:\n            results.extend([f for f in cols if re.match(s + '$', f)])\n        return list(set(results))", "docstring": "Returns all features that match any of the elements in the input\nlist.\n\nArgs:\nsearch (str, list): A string or list of strings defining the query.\n\nReturns:\nA list of matching feature names.", "source": "juraj-google-style"}
{"code": "def _ParseTensorName(tensor_name):\n    components = tensor_name.split(':')\n    if len(components) == 2:\n        try:\n            output_index = int(components[1])\n        except ValueError:\n            raise ValueError(f'Cannot convert {tensor_name!r} to a tensor name. Second component of the name following the `:` should be an int. Got {components[1]}.')\n        return (components[0], output_index)\n    elif len(components) == 1:\n        return (components[0], 0)\n    else:\n        raise ValueError(f\"Cannot convert '{tensor_name}' to a tensor name. Tensor names should not contain more than 1 `:`. Obtained {len(components) - 1}\")", "docstring": "Parses a tensor name into an operation name and output index.\n\nThis function will canonicalize tensor names as follows:\n\n* \"foo:0\"       -> (\"foo\", 0)\n* \"foo:7\"       -> (\"foo\", 7)\n* \"foo\"         -> (\"foo\", 0)\n* \"foo:bar:baz\" -> ValueError\n\nArgs:\ntensor_name: The name of a tensor.\n\nReturns:\nA tuple containing the operation name, and the output index.\n\nRaises:\nValueError: If `tensor_name' cannot be interpreted as the name of a tensor.", "source": "github-repos"}
{"code": "def _run_single(self, thread_id, agent, environment, deterministic=False, max_episode_timesteps=(- 1), episode_finished=None, testing=False, sleep=None):\n    old_episode_finished = False\n    if ((episode_finished is not None) and (len(getargspec(episode_finished).args) == 1)):\n        old_episode_finished = True\n    episode = 0\n    while (not self.should_stop):\n        state = environment.reset()\n        agent.reset()\n        (self.global_timestep, self.global_episode) = (agent.timestep, agent.episode)\n        episode_reward = 0\n        time_step = 0\n        time_start = time.time()\n        while True:\n            (action, internals, states) = agent.act(states=state, deterministic=deterministic, buffered=False)\n            reward = 0\n            for repeat in xrange(self.repeat_actions):\n                (state, terminal, step_reward) = environment.execute(action=action)\n                reward += step_reward\n                if terminal:\n                    break\n            if (not testing):\n                agent.atomic_observe(states=state, actions=action, internals=internals, reward=reward, terminal=terminal)\n            if (sleep is not None):\n                time.sleep(sleep)\n            time_step += 1\n            episode_reward += reward\n            if (terminal or (time_step == max_episode_timesteps)):\n                break\n            if self.should_stop:\n                return\n        self.global_timestep += time_step\n        self.episode_list_lock.acquire()\n        self.episode_rewards.append(episode_reward)\n        self.episode_timesteps.append(time_step)\n        self.episode_times.append((time.time() - time_start))\n        self.episode_list_lock.release()\n        if (episode_finished is not None):\n            if old_episode_finished:\n                summary_data = {'thread_id': thread_id, 'episode': episode, 'timestep': time_step, 'episode_reward': episode_reward}\n                if (not episode_finished(summary_data)):\n                    return\n            elif (not episode_finished(self, thread_id)):\n                return\n        episode += 1", "docstring": "The target function for a thread, runs an agent and environment until signaled to stop.\nAdds rewards to shared episode rewards list.\n\nArgs:\nthread_id (int): The ID of the thread that's running this target function.\nagent (Agent): The Agent object that this particular thread uses.\nenvironment (Environment): The Environment object that this particular thread uses.\nmax_episode_timesteps (int): Max. number of timesteps per episode. Use -1 or 0 for non-limited episodes.\nepisode_finished (callable): Function called after each episode that takes an episode summary spec and\nreturns False, if this single run should terminate after this episode.\nCan be used e.g. to set a particular mean reward threshold.", "source": "codesearchnet"}
{"code": "def _MergeIdentical(self, a, b):\n    \n    if a != b:\n      raise MergeError(\"values must be identical ('%s' vs '%s')\" %\n                       (transitfeed.EncodeUnicode(a),\n                        transitfeed.EncodeUnicode(b)))\n    return b", "docstring": "Tries to merge two values. The values are required to be identical.\n\nArgs:\na: The first value.\nb: The second value.\n\nReturns:\nThe trivially merged value.\n\nRaises:\nMergeError: The values were not identical.", "source": "juraj-google-style"}
{"code": "def element(self, using, value):\n        \n        return self._execute(Command.FIND_ELEMENT, {\n            'using': using,\n            'value': value\n        })", "docstring": "Find an element in the current context.\n\nSupport:\nAndroid iOS Web(WebView)\n\nArgs:\nusing(str): The element location strategy.\nvalue(str): The value of the location strategy.\n\nReturns:\nWebElement Object.\n\nRaises:\nWebDriverException.", "source": "juraj-google-style"}
{"code": "def filter_by_analysis_period(self, analysis_period):\n    self._check_analysis_period(analysis_period)\n    _filtered_data = self.filter_by_moys(analysis_period.moys)\n    _filtered_data.header._analysis_period = analysis_period\n    return _filtered_data", "docstring": "Filter a Data Collection based on an analysis period.\n\nArgs:\nanalysis period: A Ladybug analysis period\n\nReturn:\nA new Data Collection with filtered data", "source": "codesearchnet"}
{"code": "def word_fts(self, word):\n        \n        return list(map(self.fts, self.segs(word)))", "docstring": "Return featural analysis of `word`\n\nArgs:\nword (unicode):  one or more IPA segments\n\nReturns:\nlist: list of lists (value, feature) tuples where each inner list\ncorresponds to a segment in `word`", "source": "juraj-google-style"}
{"code": "def path_walk(\n    p: tcod.path.AStar, recompute: bool\n) -> Union[Tuple[int, int], Tuple[None, None]]:\n    \n    x = ffi.new(\"int *\")\n    y = ffi.new(\"int *\")\n    if lib.TCOD_path_walk(p._path_c, x, y, recompute):\n        return x[0], y[0]\n    return None, None", "docstring": "Return the next (x, y) point in a path, or (None, None) if it's empty.\n\nWhen ``recompute`` is True and a previously valid path reaches a point\nwhere it is now blocked, a new path will automatically be found.\n\nArgs:\np (AStar): An AStar instance.\nrecompute (bool): Recompute the path automatically.\nReturns:\nUnion[Tuple[int, int], Tuple[None, None]]:\nA single (x, y) point, or (None, None)", "source": "juraj-google-style"}
{"code": "def apply_region_configs(env_config):\n    \n    new_config = env_config.copy()\n    for region in env_config.get('regions', REGIONS):\n        if isinstance(env_config.get('regions'), dict):\n            region_specific_config = env_config['regions'][region]\n            new_config[region] = dict(DeepChainMap(region_specific_config, env_config))\n        else:\n            new_config[region] = env_config.copy()\n    LOG.debug('Region Specific Config:\\n%s', new_config)\n    return new_config", "docstring": "Override default env configs with region specific configs and nest\nall values under a region\n\nArgs:\nenv_config (dict): The environment specific config.\n\nReturn:\ndict: Newly updated dictionary with region overrides applied.", "source": "juraj-google-style"}
{"code": "def read(self, face, *, alignment=1) -> bytes:\n        \n\n        return self.mglo.read(face, alignment)", "docstring": "Read a face from the cubemap texture.\n\nArgs:\nface (int): The face to read.\n\nKeyword Args:\nalignment (int): The byte alignment of the pixels.", "source": "juraj-google-style"}
{"code": "def get_merged_env(self, include_os=False):\n        \n        env = {}\n        if include_os:\n            env.update(os.environ.copy())\n        for level in range(3):\n            env.update(self.pipeline.data.env_list[level].copy())\n        return env", "docstring": "Copying and merging environment variables.\n\nArgs:\ninclude_os (bool): when true then include the environment variables (default: False)\n\nReturns:\ndict: environment variables as defined in the pipeline\n(optional including system environment variables).", "source": "juraj-google-style"}
{"code": "def set_permitted_ip(address=None, deploy=False):\n    \n\n    if not address:\n        raise CommandExecutionError(\"Address option must not be empty.\")\n\n    ret = {}\n\n    query = {'type': 'config',\n             'action': 'set',\n             'xpath': '/config/devices/entry[@name=\\'localhost.localdomain\\']/deviceconfig/system/permitted-ip',\n             'element': '<entry name=\\'{0}\\'></entry>'.format(address)}\n\n    ret.update(__proxy__['panos.call'](query))\n\n    if deploy is True:\n        ret.update(commit())\n\n    return ret", "docstring": "Add an IPv4 address or network to the permitted IP list.\n\nCLI Example:\n\nArgs:\naddress (str): The IPv4 address or network to allow access to add to the Palo Alto device.\n\ndeploy (bool): If true then commit the full candidate configuration, if false only set pending change.\n\n.. code-block:: bash\n\nsalt '*' panos.set_permitted_ip 10.0.0.1\nsalt '*' panos.set_permitted_ip 10.0.0.0/24\nsalt '*' panos.set_permitted_ip 10.0.0.1 deploy=True", "source": "juraj-google-style"}
{"code": "def StatFS(self, path=None):\n    \n    if platform.system() == \"Windows\":\n      raise RuntimeError(\"os.statvfs not available on Windows\")\n\n    local_path = client_utils.CanonicalPathToLocalPath(path or self.path)\n\n    return os.statvfs(local_path)", "docstring": "Call os.statvfs for a given list of rdf_paths.\n\nOS X and Linux only.\n\nNote that a statvfs call for a network filesystem (e.g. NFS) that is\nunavailable, e.g. due to no network, will result in the call blocking.\n\nArgs:\npath: a Unicode string containing the path or None. If path is None the\nvalue in self.path is used.\n\nReturns:\nposix.statvfs_result object\nRaises:\nRuntimeError: if called on windows", "source": "juraj-google-style"}
{"code": "def _PromptUserForPartitionIdentifiers(self, volume_system, volume_identifiers):\n    print_header = True\n    while True:\n        if print_header:\n            self._PrintTSKPartitionIdentifiersOverview(volume_system, volume_identifiers)\n            print_header = False\n        lines = self._textwrapper.wrap(self._USER_PROMPT_TSK)\n        self._output_writer.Write('\\n'.join(lines))\n        self._output_writer.Write('\\n\\nPartition identifiers: ')\n        try:\n            selected_volumes = self._ReadSelectedVolumes(volume_system, prefix='p')\n            if (selected_volumes and (not set(selected_volumes).difference(volume_identifiers))):\n                break\n        except ValueError:\n            pass\n        self._output_writer.Write('\\n')\n        lines = self._textwrapper.wrap('Unsupported partition identifier(s), please try again or abort with Ctrl^C.')\n        self._output_writer.Write('\\n'.join(lines))\n        self._output_writer.Write('\\n\\n')\n    return selected_volumes", "docstring": "Prompts the user to provide partition identifiers.\n\nArgs:\nvolume_system (dfvfs.TSKVolumeSystem): volume system.\nvolume_identifiers (list[str]): volume identifiers including prefix.\n\nReturns:\nlist[str]: selected volume identifiers including prefix or None.", "source": "codesearchnet"}
{"code": "def write_data(data):\n    sorted_dict = sort_recursive(data)\n    with open(_datafile, 'w') as file:\n        _json.dump(sorted_dict, file, indent=2)", "docstring": "Write the data to the data.json file\n\nArgs:\ndata (dict): The updated data dictionary for Modis", "source": "codesearchnet"}
{"code": "def get_tensor_device(self, tensor_name):\n    tensor = self._name_to_tensor(tensor_name)\n    if isinstance(tensor, tf.Tensor):\n        return tensor.device\n    else:\n        return None", "docstring": "The device of a tensor.\n\nNote that only tf tensors have device assignments.\n\nArgs:\ntensor_name: a string, name of a tensor in the graph.\n\nReturns:\na string or None, representing the device name.", "source": "codesearchnet"}
{"code": "def _add_op_node(self, op, qargs, cargs, condition=None):\n    node_properties = {'type': 'op', 'op': op, 'name': op.name, 'qargs': qargs, 'cargs': cargs, 'condition': condition}\n    self._max_node_id += 1\n    new_node = DAGNode(data_dict=node_properties, nid=self._max_node_id)\n    self._multi_graph.add_node(new_node)\n    self._id_to_node[self._max_node_id] = new_node", "docstring": "Add a new operation node to the graph and assign properties.\n\nArgs:\nop (Instruction): the operation associated with the DAG node\nqargs (list): list of quantum wires to attach to.\ncargs (list): list of classical wires to attach to.\ncondition (tuple or None): optional condition (ClassicalRegister, int)", "source": "codesearchnet"}
{"code": "def set_load_handler(load_handler: Optional[Callable[..., Any]]) -> Optional[Callable[..., Any]]:\n    if load_handler and (not callable(load_handler)):\n        raise ValueError('`load_handler` must be callable.')\n    global _LOAD_HANDLER\n    old_handler = _LOAD_HANDLER\n    _LOAD_HANDLER = load_handler\n    return old_handler", "docstring": "Sets global load handler.\n\nArgs:\nload_handler: A callable object that takes arbitrary arguments and returns\na value. `symbolic.load` method will pass through all arguments to this\nhandler and return its return value.\n\nReturns:\nPrevious global load handler.", "source": "github-repos"}
{"code": "def request(self, batch: Sequence[Any], model: aiplatform.Endpoint, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n    prediction = model.predict(instances=list(batch), parameters=inference_args)\n    return utils._convert_to_result(batch, prediction.predictions, prediction.deployed_model_id)", "docstring": "Sends a prediction request to a Vertex AI endpoint containing batch\nof inputs and matches that input with the prediction response from\nthe endpoint as an iterable of PredictionResults.\n\nArgs:\nbatch: a sequence of any values to be passed to the Vertex AI endpoint.\nShould be encoded as the model expects.\nmodel: an aiplatform.Endpoint object configured to access the desired\nmodel.\ninference_args: any additional arguments to send as part of the\nprediction request.\n\nReturns:\nAn iterable of Predictions.", "source": "github-repos"}
{"code": "def commutator(A, B=None):\n    if B:\n        return ((A * B) - (B * A))\n    return (SPre(A) - SPost(A))", "docstring": "Commutator of `A` and `B`\n\nIf ``B != None``, return the commutator :math:`[A,B]`, otherwise return\nthe super-operator :math:`[A,\\cdot]`.  The super-operator :math:`[A,\\cdot]`\nmaps any other operator ``B`` to the commutator :math:`[A, B] = A B - B A`.\n\nArgs:\nA: The first operator to form the commutator of.\nB: The second operator to form the commutator of, or None.\n\nReturns:\nSuperOperator: The linear superoperator :math:`[A,\\cdot]`", "source": "codesearchnet"}
{"code": "def _locate_point(nodes, degree, x_val, y_val):\n    candidates = [(1.0, 1.0, 1.0, nodes)]\n    for _ in six.moves.xrange((MAX_LOCATE_SUBDIVISIONS + 1)):\n        next_candidates = []\n        for candidate in candidates:\n            update_locate_candidates(candidate, next_candidates, x_val, y_val, degree)\n        candidates = next_candidates\n    if (not candidates):\n        return None\n    (s_approx, t_approx) = mean_centroid(candidates)\n    (s, t) = newton_refine(nodes, degree, x_val, y_val, s_approx, t_approx)\n    actual = _surface_helpers.evaluate_barycentric(nodes, degree, ((1.0 - s) - t), s, t)\n    expected = np.asfortranarray([x_val, y_val])\n    if (not _helpers.vector_close(actual.ravel(order='F'), expected, eps=LOCATE_EPS)):\n        (s, t) = newton_refine(nodes, degree, x_val, y_val, s, t)\n    return (s, t)", "docstring": "r\"\"\"Locate a point on a surface.\n\n.. note::\n\nThere is also a Fortran implementation of this function, which\nwill be used if it can be built.\n\nDoes so by recursively subdividing the surface and rejecting\nsub-surfaces with bounding boxes that don't contain the point.\nAfter the sub-surfaces are sufficiently small, uses Newton's\nmethod to narrow in on the pre-image of the point.\n\nArgs:\nnodes (numpy.ndarray): Control points for B |eacute| zier surface\n(assumed to be two-dimensional).\ndegree (int): The degree of the surface.\nx_val (float): The :math:`x`-coordinate of a point\non the surface.\ny_val (float): The :math:`y`-coordinate of a point\non the surface.\n\nReturns:\nOptional[Tuple[float, float]]: The :math:`s` and :math:`t`\nvalues corresponding to ``x_val`` and ``y_val`` or\n:data:`None` if the point is not on the ``surface``.", "source": "codesearchnet"}
{"code": "def get_structure_by_formula(self, formula, **kwargs):\n        \n        structures = []\n        sql = 'select file, sg from data where formula=\"- %s -\"' % \\\n              Composition(formula).hill_formula\n        text = self.query(sql).split(\"\\n\")\n        text.pop(0)\n        for l in text:\n            if l.strip():\n                cod_id, sg = l.split(\"\\t\")\n                r = requests.get(\"http:\n                                 % cod_id.strip())\n                try:\n                    s = Structure.from_str(r.text, fmt=\"cif\", **kwargs)\n                    structures.append({\"structure\": s, \"cod_id\": int(cod_id),\n                                       \"sg\": sg})\n                except Exception:\n                    import warnings\n                    warnings.warn(\"\\nStructure.from_str failed while parsing CIF file:\\n%s\" % r.text)\n                    raise\n\n        return structures", "docstring": "Queries the COD for structures by formula. Requires mysql executable to\nbe in the path.\n\nArgs:\ncod_id (int): COD id.\nkwargs: All kwargs supported by\n:func:`pymatgen.core.structure.Structure.from_str`.\n\nReturns:\nA list of dict of the format\n[{\"structure\": Structure, \"cod_id\": cod_id, \"sg\": \"P n m a\"}]", "source": "juraj-google-style"}
{"code": "def Substitute(self, pattern):\n    if isinstance(pattern, bytes):\n        substs = [re.escape(subst.encode('ascii')) for subst in self._substs]\n        regex = re.compile(b'|'.join(substs))\n\n        def Replacement(match):\n            key = match.group(0).decode('ascii')\n            return self._substs[key].encode('utf-8')\n    elif isinstance(pattern, Text):\n        substs = [re.escape(subst) for subst in self._substs]\n        regex = re.compile('|'.join(substs))\n\n        def Replacement(match):\n            key = match.group(0)\n            return self._substs[key]\n    else:\n        raise TypeError(\"Unexpected pattern type '{}'\".format(type(pattern)))\n    if (not substs):\n        return pattern\n    else:\n        return regex.sub(Replacement, pattern)", "docstring": "Formats given pattern with this substitution environment.\n\nA pattern can contain placeholders for variables (`%%foo%%`) and scopes\n(`%%bar.baz%%`) that are replaced with concrete values in this substiution\nenvironment (specified in the constructor).\n\nArgs:\npattern: A pattern with placeholders to substitute.\n\nReturns:\nA pattern with placeholders substituted with concrete values.", "source": "codesearchnet"}
{"code": "def assert_almost_equal(first, second, places=None, msg=None, delta=None, extras=None):\n    _call_unittest_assertion(_pyunit_proxy.assertAlmostEqual, first, second, places=places, msg=msg, delta=delta, extras=extras)", "docstring": "Asserts that first is almost equal to second.\n\nFails if the two objects are unequal as determined by their difference\nrounded to the given number of decimal places (default 7) and\ncomparing to zero, or by comparing that the difference between the two\nobjects is more than the given delta.\nIf the two objects compare equal then they automatically compare\nalmost equal.\n\nArgs:\nfirst: The first value to compare.\nsecond: The second value to compare.\nplaces: How many decimal places to take into account for comparison.\nNote that decimal places (from zero) are usually not the same\nas significant digits (measured from the most significant digit).\nmsg: A string that adds additional info about the failure.\ndelta: Delta to use for comparison instead of decimal places.\nextras: An optional field for extra information to be included in\ntest result.", "source": "github-repos"}
{"code": "def document(obj, doc):\n    try:\n        obj.__doc__ = doc\n    except AttributeError:\n        _EXTRA_DOCS[id(obj)] = doc", "docstring": "Adds a docstring to typealias by overriding the `__doc__` attribute.\n\nNote: Overriding `__doc__` is only possible after python 3.7.\n\nArgs:\nobj: Typealias object that needs to be documented.\ndoc: Docstring of the typealias. It should follow the standard pystyle\ndocstring rules.", "source": "github-repos"}
{"code": "def process_buffer(buffer, n_channels):\n    samples = np.concatenate(buffer)\n    if (n_channels > 1):\n        samples = samples.reshape(((- 1), n_channels)).T\n        samples = librosa.to_mono(samples)\n    return samples", "docstring": "Merge the read blocks and resample if necessary.\n\nArgs:\nbuffer (list): A list of blocks of samples.\nn_channels (int): The number of channels of the input data.\n\nReturns:\nnp.array: The samples", "source": "codesearchnet"}
{"code": "def clip_action(action, space):\n    if isinstance(space, gym.spaces.Box):\n        return np.clip(action, space.low, space.high)\n    elif isinstance(space, gym.spaces.Tuple):\n        if (type(action) not in (tuple, list)):\n            raise ValueError('Expected tuple space for actions {}: {}'.format(action, space))\n        out = []\n        for (a, s) in zip(action, space.spaces):\n            out.append(clip_action(a, s))\n        return out\n    else:\n        return action", "docstring": "Called to clip actions to the specified range of this policy.\n\nArguments:\naction: Single action.\nspace: Action space the actions should be present in.\n\nReturns:\nClipped batch of actions.", "source": "codesearchnet"}
{"code": "def market(self, accountID, **kwargs):\n    return self.create(accountID, order=MarketOrderRequest(**kwargs))", "docstring": "Shortcut to create a Market Order in an Account\n\nArgs:\naccountID : The ID of the Account\nkwargs : The arguments to create a MarketOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "codesearchnet"}
{"code": "def _parse_schema(schema, method):\n    \n    if method and schema.get('readOnly', False):\n        return _READONLY_PROPERTY\n\n    \n    if 'allOf' in schema:\n        schema_ = copy.deepcopy(schema['allOf'][0])\n        for x in schema['allOf'][1:]:\n            _dict_merge(schema_, x)\n\n        return _parse_schema(schema_, method)\n\n    \n    \n\n    \n    if 'oneOf' in schema:\n        \n        return _parse_schema(schema['oneOf'][0], method)\n\n    if 'enum' in schema:\n        \n        return schema['enum'][0]\n\n    schema_type = schema.get('type', 'object')\n\n    if schema_type == 'array':\n        \n        \n        if 'oneOf' in schema['items']:\n            return [\n                _parse_schema(x, method) for x in schema['items']['oneOf']]\n\n        return [_parse_schema(schema['items'], method)]\n\n    if schema_type == 'object':\n        if method and all(v.get('readOnly', False)\n                          for v in schema['properties'].values()):\n            return _READONLY_PROPERTY\n\n        results = []\n        for name, prop in schema.get('properties', {}).items():\n            result = _parse_schema(prop, method)\n            if result != _READONLY_PROPERTY:\n                results.append((name, result))\n\n        return collections.OrderedDict(results)\n\n    if (schema_type, schema.get('format')) in _TYPE_MAPPING:\n        return _TYPE_MAPPING[(schema_type, schema.get('format'))]\n\n    return _TYPE_MAPPING[(schema_type, None)]", "docstring": "Convert a Schema Object to a Python object.\n\nArgs:\nschema: An ``OrderedDict`` representing the schema object.", "source": "juraj-google-style"}
{"code": "def put(self, rid, data, raise_on_error=True):\n    response_data = None\n    headers = {'Content-Type': 'application/json', 'DB-Method': 'PUT'}\n    url = '/v2/exchange/db/{}/{}/{}'.format(self.domain, self.data_type, rid)\n    r = self.tcex.session.post(url, json=data, headers=headers)\n    self.tcex.log.debug('datastore put status code: {}'.format(r.status_code))\n    if (r.ok and ('application/json' in r.headers.get('content-type', ''))):\n        response_data = r.json()\n    else:\n        error = (r.text or r.reason)\n        self.tcex.handle_error(805, ['put', r.status_code, error], raise_on_error)\n    return response_data", "docstring": "Update the data for the provided Id.\n\nArgs:\nrid (str): The record identifier.\ndata (dict): A search query\nraise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.\n\nReturns:\nobject : Python request response.", "source": "codesearchnet"}
{"code": "def __init__(self, uri='mongodb:\n        \n        \n        self.sample_collection = 'samples'\n        self.worker_cap = worker_cap\n        self.samples_cap = samples_cap\n\n        \n        self.database_name = database\n        self.uri = 'mongodb:\n        self.mongo = pymongo.MongoClient(self.uri, use_greenlets=True)\n        self.database = self.mongo.get_default_database()\n\n        \n        self.gridfs_handle = gridfs.GridFS(self.database)\n\n        \n        self.last_ops_run = time.time()\n        self.periodic_ops()\n\n        print '\\t- WorkBench DataStore connected: %s:%s' % (self.uri, self.database_name)", "docstring": "Initialization for the Workbench data store class.\n\nArgs:\nuri: Connection String for DataStore backend.\ndatabase: Name of database.\nworker_cap: MBs in the capped collection.\nsamples_cap: MBs of sample to be stored.", "source": "juraj-google-style"}
{"code": "def __init__(self, output_writer, tool_name):\n    \n    super(StatusView, self).__init__()\n    self._artifact_filters = None\n    self._filter_file = None\n    self._have_ansi_support = not win32console\n    self._mode = self.MODE_WINDOW\n    self._output_writer = output_writer\n    self._source_path = None\n    self._source_type = None\n    self._stdout_output_writer = isinstance(\n        output_writer, tools.StdoutOutputWriter)\n    self._storage_file_path = None\n    self._tool_name = tool_name\n\n    if win32console:\n      kernel32 = ctypes.windll.kernel32\n      stdout_handle = kernel32.GetStdHandle(self._WINAPI_STD_OUTPUT_HANDLE)\n      result = kernel32.SetConsoleMode(\n          stdout_handle, self._WINAPI_ANSI_CONSOLE_MODE)\n      self._have_ansi_support = result != 0", "docstring": "Initializes a status view.\n\nArgs:\noutput_writer (OutputWriter): output writer.\ntool_name (str): namd of the tool.", "source": "juraj-google-style"}
{"code": "def _tzinfome(tzinfo):\n  \n  if not isinstance(tzinfo, datetime.tzinfo):\n    try:\n      tzinfo = pytz.timezone(tzinfo)\n      assert tzinfo.zone in pytz.all_timezones\n    except AttributeError:\n      raise pytz.UnknownTimeZoneError(\"Unknown timezone! %s\" % tzinfo)\n  return tzinfo", "docstring": "Gets a tzinfo object from a string.\n\nArgs:\ntzinfo: A string (or string like) object, or a datetime.tzinfo object.\n\nReturns:\nAn datetime.tzinfo object.\n\nRaises:\nUnknownTimeZoneError: If the timezone given can't be decoded.", "source": "juraj-google-style"}
{"code": "def add(self, name, value, bitmask=DEFMASK):\n    _add_enum_member(self._eid, name, value, bitmask)", "docstring": "Add an enum member\n\nArgs:\nname: Name of the member\nvalue: value of the member\nbitmask: bitmask. Only use if enum is a bitfield.", "source": "codesearchnet"}
{"code": "def _test_tensorflow_vs_numpy(self, x_np):\n    y_np = self._total_variation_np(x_np)\n    self._test(x_np, y_np)", "docstring": "Test the TensorFlow implementation against a numpy implementation.\n\nArgs:\nx_np: Numpy array with 3 or 4 dimensions.", "source": "github-repos"}
{"code": "def movies_in_theaters(self, **kwargs):\n    path = self._get_path('movies_in_theaters')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Gets the movies currently in theaters from the API.\n\nArgs:\npage_limit (optional): number of movies to show per page, default=16\npage (optional): results page number, default=1\ncountry (optional): localized data for selected country, default=\"us\"\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def compiler(name):\n    pinfo = __get_paths()\n    _compiler = local[name]\n    _compiler = _compiler.setenv(PATH=pinfo['path'], LD_LIBRARY_PATH=pinfo['ld_library_path'])\n    return _compiler", "docstring": "Get a usable clang++ plumbum command.\n\nThis searches for a usable clang++ in the llvm binary path\n\nReturns:\nplumbum Command that executes clang++", "source": "codesearchnet"}
{"code": "def AddMonths(start_date, months):\n  \n  current_date = start_date\n  i = 0\n  while i < months:\n    month_days = calendar.monthrange(current_date.year, current_date.month)[1]\n    current_date += timedelta(days=month_days)\n    i += 1\n  return current_date", "docstring": "A simple convenience utility for adding months to a given start date.\n\nThis increments the months by adding the number of days in the current month\nto the current month, for each month.\n\nArgs:\nstart_date: date The date months are being added to.\nmonths: int The number of months to add.\n\nReturns:\nA date equal to the start date incremented by the given number of months.", "source": "juraj-google-style"}
{"code": "def update_non_slot(self, colocate_with, fn, args=(), kwargs=None, group=True):\n    _require_cross_replica_or_default_context_extended(self)\n    if kwargs is None:\n        kwargs = {}\n    fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx(), convert_by_default=False)\n    with self._container_strategy().scope():\n        return self._update_non_slot(colocate_with, fn, args, kwargs, group)", "docstring": "Runs `fn(*args, **kwargs)` on `colocate_with` devices.\n\nUsed to update non-slot variables.\n\nDEPRECATED: TF 1.x ONLY.\n\nArgs:\ncolocate_with: Devices returned by `non_slot_devices()`.\nfn: Function to execute.\nargs: Tuple or list. Positional arguments to pass to `fn()`.\nkwargs: Dict with keyword arguments to pass to `fn()`.\ngroup: Boolean. Defaults to True. If False, the return value will be\nunwrapped.\n\nReturns:\nReturn value of `fn`, possibly merged across devices.", "source": "github-repos"}
{"code": "def generate_zip_data(M, L, n_cells, cluster_probs=None):\n    (genes, clusters) = M.shape\n    output = np.zeros((genes, n_cells))\n    if (cluster_probs is None):\n        cluster_probs = (np.ones(clusters) / clusters)\n    zip_p = np.random.random((genes, n_cells))\n    labels = []\n    for i in range(n_cells):\n        c = np.random.choice(range(clusters), p=cluster_probs)\n        labels.append(c)\n        output[(:, i)] = np.where((zip_p[(:, i)] < L[(:, c)]), 0, np.random.poisson(M[(:, c)]))\n    return (output, np.array(labels))", "docstring": "Generates zero-inflated poisson-distributed data, given a set of means and zero probs for each cluster.\n\nArgs:\nM (array): genes x clusters matrix\nL (array): genes x clusters matrix - zero-inflation parameters\nn_cells (int): number of output cells\ncluster_probs (array): prior probability for each cluster.\nDefault: uniform.\n\nReturns:\noutput - array with shape genes x n_cells\nlabels - array of cluster labels", "source": "codesearchnet"}
{"code": "def checkout_commit(repo: Repo, commit_id: str):\n    current_head = repo.head.commit if repo.head.is_detached else repo.head.ref\n    try:\n        repo.git.checkout(commit_id)\n        yield\n    finally:\n        repo.git.checkout(current_head)", "docstring": "Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit.\nArgs:\nrepo (`git.Repo`): A git repository (for instance the Transformers repo).\ncommit_id (`str`): The commit reference to checkout inside the context manager.", "source": "github-repos"}
{"code": "def add_string_pairs_from_text_field_element(xib_file, results, text_field, special_ui_components_prefix):\n    \n    text_field_entry_comment = extract_element_internationalized_comment(text_field)\n    if text_field_entry_comment is None:\n        return\n\n    if text_field.hasAttribute('usesAttributedText') and text_field.attributes['usesAttributedText'].value == 'YES':\n        add_string_pairs_from_attributed_ui_element(results, text_field, text_field_entry_comment)\n    else:\n        try:\n            text_field_entry_key = text_field.attributes['text'].value\n            results.append((text_field_entry_key, text_field_entry_comment + ' default text value'))\n        except KeyError:\n            pass\n    try:\n        text_field_entry_key = text_field.attributes['placeholder'].value\n        results.append((text_field_entry_key, text_field_entry_comment + ' placeholder text value'))\n    except KeyError:\n        pass\n    warn_if_element_not_of_class(text_field, 'TextField', special_ui_components_prefix)", "docstring": "Adds string pairs from a textfield element.\n\nArgs:\nxib_file (str): Path to the xib file.\nresults (list): The list to add the results to.\ntext_field(element): The textfield element from the xib, to extract the string pairs from.\nspecial_ui_components_prefix (str):\nIf not None, extraction will not warn about internationalized UI components with this class prefix.", "source": "juraj-google-style"}
{"code": "def has_title(self, title, **kwargs):\n    try:\n        self.assert_title(title, **kwargs)\n        return True\n    except ExpectationNotMet:\n        return False", "docstring": "Checks if the page has the given title.\n\nArgs:\ntitle (str | RegexObject): The string or regex that the title should match.\n**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.\n\nReturns:\nbool: Whether it matches.", "source": "codesearchnet"}
{"code": "def create_folder_structure(project_name, batch_name):\n    out_data_dir = prms.Paths['outdatadir']\n    project_dir = os.path.join(out_data_dir, project_name)\n    batch_dir = os.path.join(project_dir, batch_name)\n    raw_dir = os.path.join(batch_dir, 'raw_data')\n    if (not os.path.isdir(project_dir)):\n        os.mkdir(project_dir)\n    if (not os.path.isdir(batch_dir)):\n        os.mkdir(batch_dir)\n    if (not os.path.isdir(raw_dir)):\n        os.mkdir(raw_dir)\n    info_file = ('cellpy_batch_%s.json' % batch_name)\n    info_file = os.path.join(project_dir, info_file)\n    return (info_file, (project_dir, batch_dir, raw_dir))", "docstring": "This function creates a folder structure for the batch project.\n\nThe folder structure consists of main working folder ``project_name`\nlocated in the ``outdatadir`` (as defined in the cellpy configuration file)\nwith a sub-folder named ``batch_name``. It also creates a folder\ninside the ``batch_name`` folder for storing the raw data.\nIf the folders does not exist, they will be made. The function also returns\nthe name of the info-df.\n\nArgs:\nproject_name: name of the project\nbatch_name: name of the batch\n\nReturns: (info_file, (project_dir, batch_dir, raw_dir))", "source": "codesearchnet"}
{"code": "def dump(self,format='ttl'):\n\n\t\t\n\n\t\treturn self.rdf.graph.serialize(format=format).decode('utf-8')", "docstring": "Convenience method to return RDF data for resource,\noptionally selecting serialization format.\nInspired by .dump from Samvera.\n\nArgs:\nformat (str): expecting serialization formats accepted by rdflib.serialization(format=)", "source": "juraj-google-style"}
{"code": "def _handle_request(self, request: dict) -> dict:\n    request_body: bytes = request['request_body']\n    signature_chain_url: str = request['signature_chain_url']\n    signature: str = request['signature']\n    alexa_request: dict = request['alexa_request']\n    if (not self._verify_request(signature_chain_url, signature, request_body)):\n        return {'error': 'failed certificate/signature check'}\n    timestamp_str = alexa_request['request']['timestamp']\n    timestamp_datetime = datetime.strptime(timestamp_str, '%Y-%m-%dT%H:%M:%SZ')\n    now = datetime.utcnow()\n    delta = ((now - timestamp_datetime) if (now >= timestamp_datetime) else (timestamp_datetime - now))\n    if (abs(delta.seconds) > REQUEST_TIMESTAMP_TOLERANCE_SECS):\n        log.error(f\"Failed timestamp check for request: {request_body.decode('utf-8', 'replace')}\")\n        return {'error': 'failed request timestamp check'}\n    conversation_key = alexa_request['session']['user']['userId']\n    if (conversation_key not in self.conversations.keys()):\n        if self.config['multi_instance']:\n            conv_agent = self._init_agent()\n            log.info('New conversation instance level agent initiated')\n        else:\n            conv_agent = self.agent\n        self.conversations[conversation_key] = Conversation(config=self.config, agent=conv_agent, conversation_key=conversation_key, self_destruct_callback=(lambda : self._del_conversation(conversation_key)))\n        log.info(f'Created new conversation, key: {conversation_key}')\n    conversation = self.conversations[conversation_key]\n    response = conversation.handle_request(alexa_request)\n    return response", "docstring": "Processes Alexa requests from skill server and returns responses to Alexa.\n\nArgs:\nrequest: Dict with Alexa request payload and metadata.\nReturns:\nresult: Alexa formatted or error response.", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor) -> Tuple:\n    router_probs, router_logits = self._compute_router_probabilities(hidden_states)\n    expert_index = torch.argmax(router_probs, dim=-1)\n    expert_index = torch.nn.functional.one_hot(expert_index, num_classes=self.num_experts)\n    token_priority = torch.cumsum(expert_index, dim=-2)\n    expert_capacity_mask = token_priority <= self.expert_capacity\n    expert_index = expert_index * expert_capacity_mask\n    router_probs = torch.max(router_probs, dim=-1).values.unsqueeze(-1)\n    return (expert_index, router_probs, router_logits)", "docstring": "Generic forward function for every Router class. Each Router expects to have the same input hidden states\n(`hidden_states`) corresponding to the hidden states for each token, the `expert_capacity` corresponding to the\nnumber of tokens the Router will send to each expert, some Routers can send up to few tokens to each expert.\n\nEach Router works as the following: it expects the hidden states for each token, gets the `router_probs` and\n`router_logits` from the `router_weights`. This will assign for each token, the raw probability to be assigned\nto an expert. Then each Router class will have to define its own `_compute_routing_instructions`.\n\nArgs:\nhidden_states (`torch.Tensor`) :\n[num_groups, tokens_per_group, hidden_dim] inputs to send to experts.\nReturns:\nTuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`] Tuple containing the expert index, the router probs\nand the router logits. The router probabilities and logits are required to compute the loss.", "source": "github-repos"}
{"code": "def update_state(self, values, sample_weight=None):\n    [values], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values([values], sample_weight)\n    try:\n        values = math_ops.cast(values, self._dtype)\n    except (ValueError, TypeError):\n        msg = 'The output of a metric function can only be a single Tensor. Got: %s' % (values,)\n        if isinstance(values, dict):\n            msg += '. To return a dict of values, implement a custom Metric subclass.'\n        raise RuntimeError(msg)\n    if sample_weight is not None:\n        sample_weight = math_ops.cast(sample_weight, self._dtype)\n        values, _, sample_weight = losses_utils.squeeze_or_expand_dimensions(values, sample_weight=sample_weight)\n        try:\n            sample_weight = weights_broadcast_ops.broadcast_weights(sample_weight, values)\n        except ValueError:\n            ndim = backend.ndim(values)\n            weight_ndim = backend.ndim(sample_weight)\n            if self.reduction == metrics_utils.Reduction.SUM:\n                values = math_ops.reduce_sum(values, axis=list(range(weight_ndim, ndim)))\n            else:\n                values = math_ops.reduce_mean(values, axis=list(range(weight_ndim, ndim)))\n        values = math_ops.multiply(values, sample_weight)\n    value_sum = math_ops.reduce_sum(values)\n    with ops.control_dependencies([value_sum]):\n        update_total_op = self.total.assign_add(value_sum)\n    if self.reduction == metrics_utils.Reduction.SUM:\n        return update_total_op\n    if self.reduction == metrics_utils.Reduction.SUM_OVER_BATCH_SIZE:\n        num_values = math_ops.cast(array_ops.size(values), self._dtype)\n    elif self.reduction == metrics_utils.Reduction.WEIGHTED_MEAN:\n        if sample_weight is None:\n            num_values = math_ops.cast(array_ops.size(values), self._dtype)\n        else:\n            num_values = math_ops.reduce_sum(sample_weight)\n    else:\n        raise NotImplementedError('reduction [%s] not implemented' % self.reduction)\n    with ops.control_dependencies([update_total_op]):\n        return self.count.assign_add(num_values)", "docstring": "Accumulates statistics for computing the metric.\n\nArgs:\nvalues: Per-example value.\nsample_weight: Optional weighting of each example. Defaults to 1.\n\nReturns:\nUpdate op.", "source": "github-repos"}
{"code": "def list_types_poi(self, **kwargs):\n    url_args = {'language': util.language_code(kwargs.get('lang'))}\n    result = self.make_request('list_poi_types', url_args)\n    if (not util.check_result(result)):\n        return (False, result.get('message', 'UNKNOWN ERROR'))\n    values = util.response_list(result, 'Data')\n    return (True, [emtype.ParkingPoiType(**a) for a in values])", "docstring": "Obtain a list of families, types and categories of POI.\n\nArgs:\nlang (str): Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[ParkingPoiType]), or message\nstring in case of error.", "source": "codesearchnet"}
{"code": "def depth_soil_conductivity(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `depth_soil_conductivity`'.format(value))\n\n        self._depth_soil_conductivity = value", "docstring": "Corresponds to IDD Field `depth_soil_conductivity`\n\nArgs:\nvalue (float): value for IDD Field `depth_soil_conductivity`\nUnit: W/m-K,\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def set_type(self, weather_type):\n    weather_type.lower()\n    exists = self.has_type(weather_type)\n    if exists:\n        self.add_string_parameters(weather_type)", "docstring": "Set the weather type.\n\nArgs:\nweather_type (str): The weather type.", "source": "codesearchnet"}
{"code": "def on(self,\n           *qubits: raw_types.Qid) -> 'SingleQubitPauliStringGateOperation':\n        \n        if len(qubits) != 1:\n            raise ValueError(\n                'Expected a single qubit, got <{!r}>.'.format(qubits))\n        from cirq.ops.pauli_string import SingleQubitPauliStringGateOperation\n        return SingleQubitPauliStringGateOperation(self, qubits[0])", "docstring": "Returns an application of this gate to the given qubits.\n\nArgs:\n*qubits: The collection of qubits to potentially apply the gate to.", "source": "juraj-google-style"}
{"code": "def bfloat16_activations_var_getter(getter, *args, **kwargs):\n  \n  requested_dtype = kwargs[\"dtype\"]\n  if requested_dtype == tf.bfloat16:\n    kwargs[\"dtype\"] = tf.float32\n  var = getter(*args, **kwargs)\n  \n  \n  \n  \n  \n  if var.dtype.base_dtype != requested_dtype:\n    var = tf.cast(var, requested_dtype)\n  return var", "docstring": "A custom getter function for float32 parameters and bfloat16 activations.\n\nArgs:\ngetter: custom getter\n*args: arguments\n**kwargs: keyword arguments\nReturns:\nvariables with the correct dtype.\nRaises:\nKeyError: if \"dtype\" is not provided as a kwarg.", "source": "juraj-google-style"}
{"code": "def warn_once(self, msg, msg_name=None):\n        \n        assert isinstance(msg, str)\n        msg_name = msg_name if msg_name else msg\n        if msg_name not in warnings_given:\n            warnings.warn(msg)\n        warnings_given.add(msg_name)", "docstring": "Prints a warning statement just once\n\nArgs:\nmsg: The warning message\nmsg_name: [optional] The name of the warning. If None, the msg_name\nwill be the msg itself.", "source": "juraj-google-style"}
{"code": "def create_graph_from_data(self, data):\n        \n        \n        self.arguments['{SCORE}'] = self.score\n        self.arguments['{VERBOSE}'] = str(self.verbose).upper()\n        self.arguments['{BETA}'] = str(self.beta)\n        self.arguments['{OPTIM}'] = str(self.optim).upper()\n        self.arguments['{ALPHA}'] = str(self.alpha)\n\n        results = self._run_bnlearn(data, verbose=self.verbose)\n        graph = nx.DiGraph()\n        graph.add_edges_from(results)\n        return graph", "docstring": "Run the algorithm on data.\n\nArgs:\ndata (pandas.DataFrame): DataFrame containing the data\n\nReturns:\nnetworkx.DiGraph: Solution given by the algorithm.", "source": "juraj-google-style"}
{"code": "def business_days_between(self, from_dates, to_dates):\n    pass", "docstring": "Calculates number of business between pairs of dates.\n\nFor each pair, the initial date is included in the difference, and the final\ndate is excluded. If the final date is the same or earlier than the initial\ndate, zero is returned.\n\nArgs:\nfrom_dates: DateTensor of initial dates.\nto_dates: DateTensor of final dates, should be broadcastable to\n`from_dates`.\n\nReturns:\nAn int32 Tensor with the number of business days between the\ncorresponding pairs of dates.", "source": "github-repos"}
{"code": "def _or_join(self, close_group=False):\n    if (not self.initialized):\n        raise ValueError('You must add a search term before adding an operator.')\n    else:\n        self._operator('OR', close_group=close_group)\n    return self", "docstring": "Combine terms with OR.\nThere must be a term added before using this method.\n\nArguments:\nclose_group (bool): If ``True``, will end the current group and start a new one.\nIf ``False``, will continue current group.\n\nExample:\n\nIf the current query is \"(term1\"\n.or(close_group=True) => \"(term1) OR(\"\n.or(close_group=False) => \"(term1 OR \"\n\nReturns:\nSearchHelper: Self", "source": "codesearchnet"}
{"code": "def get_stim(self, type_, return_all=False):\n    if isinstance(type_, string_types):\n        type_ = _get_stim_class(type_)\n    matches = []\n    for s in self.elements:\n        if isinstance(s, type_):\n            if (not return_all):\n                return s\n            matches.append(s)\n    if (not matches):\n        return ([] if return_all else None)\n    return matches", "docstring": "Returns component elements of the specified type.\n\nArgs:\ntype_ (str or Stim class): the desired Stim subclass to return.\nreturn_all (bool): when True, returns all elements that matched the\nspecified type as a list. When False (default), returns only\nthe first matching Stim.\n\nReturns:\nIf return_all is True, a list of matching elements (or an empty\nlist if no elements match). If return_all is False, returns the\nfirst matching Stim, or None if no elements match.", "source": "codesearchnet"}
{"code": "def set_last_checkpoints(self, last_checkpoints):\n    assert isinstance(last_checkpoints, list)\n    self._last_checkpoints = [(s, np.inf) for s in last_checkpoints]", "docstring": "DEPRECATED: Use set_last_checkpoints_with_time.\n\nSets the list of old checkpoint filenames.\n\nArgs:\nlast_checkpoints: A list of checkpoint filenames.\n\nRaises:\nAssertionError: If last_checkpoints is not a list.", "source": "github-repos"}
{"code": "def delete(self, membershipId):\n        \n        check_type(membershipId, basestring, may_be_none=False)\n\n        \n        self._session.delete(API_ENDPOINT + '/' + membershipId)", "docstring": "Delete a team membership, by ID.\n\nArgs:\nmembershipId(basestring): The team membership ID.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "juraj-google-style"}
{"code": "def __init__(self, output_filename=\"OSZICAR\", nionic_steps=10):\n        \n        self.output_filename = output_filename\n        self.nionic_steps = nionic_steps", "docstring": "Initializes the handler with the output file to check.\n\nArgs:\noutput_filename (str): This is the OSZICAR file. Change\nthis only if it is different from the default (unlikely).\nnionic_steps (int): The threshold number of ionic steps that\nneeds to hit the maximum number of electronic steps for the\nrun to be considered non-converging.", "source": "juraj-google-style"}
{"code": "def add_policy_statements(self, statements):\n        \n        if isinstance(statements, Statement):\n            statements = [statements]\n        self._policy_statements.extend(statements)", "docstring": "Adds statements to the policy.\n\nArgs:\nstatements (:class:`awacs.aws.Statement` or list): Either a single\nStatment, or a list of statements.", "source": "juraj-google-style"}
{"code": "def dict_head(d, N=5):\n    return {k: d[k] for k in list(d.keys())[:N]}", "docstring": "Return the head of a dictionary. It will be random!\n\nDefault is to return the first 5 key/value pairs in a dictionary.\n\nArgs:\nd: Dictionary to get head.\nN: Number of elements to display.\n\nReturns:\ndict: the first N items of the dictionary.", "source": "codesearchnet"}
{"code": "def napalm_cli(task: Task, commands: List[str]) -> Result:\n    device = task.host.get_connection('napalm', task.nornir.config)\n    result = device.cli(commands)\n    return Result(host=task.host, result=result)", "docstring": "Run commands on remote devices using napalm\n\nArguments:\ncommands: commands to execute\n\nReturns:\nResult object with the following attributes set:\n* result (``dict``): result of the commands execution", "source": "codesearchnet"}
{"code": "def make_row(row, fields):\n    \n    if not hasattr(row, 'get'):\n        row = {f.name: col for f, col in zip(fields, row)}\n\n    row_fields = []\n    for f in fields:\n        val = row.get(f.name, None)\n        if val is None:\n            val = str(f.default_value())\n        row_fields.append(val)\n    return encode_row(row_fields)", "docstring": "Encode a mapping of column name to values into a [incr tsdb()]\nprofile line. The *fields* parameter determines what columns are\nused, and default values are provided if a column is missing from\nthe mapping.\n\nArgs:\nrow: a mapping of column names to values\nfields: an iterable of :class:`Field` objects\nReturns:\nA [incr tsdb()]-encoded string", "source": "juraj-google-style"}
{"code": "def setall(self, key, values):\n    self.delall(key)\n    for tag in values:\n        self[tag.HashKey] = tag", "docstring": "Delete frames of the given type and add frames in 'values'.\n\nArgs:\nkey (text): key for frames to delete\nvalues (list[Frame]): frames to add", "source": "codesearchnet"}
{"code": "def html(self, data=None, template=None):\n    if (data is None):\n        data = {}\n    if template:\n        return render(self.request, template, data)\n    return HttpResponse(data)", "docstring": "Send html document to user.\n\nArgs:\n- data: Dict to render template, or string with rendered HTML.\n- template: Name of template to render HTML document with passed data.", "source": "codesearchnet"}
{"code": "def _get_language_modeling_inputs(filename, delimiter='\\n', repeat=1, append_space_to_final_punctionation=True):\n    with tf.gfile.Open(filename) as f:\n        text = f.read()\n    inputs = text.split(delimiter)\n    if (not inputs[(- 1)]):\n        inputs.pop()\n    inputs *= repeat\n    if append_space_to_final_punctionation:\n        inputs = [((s + ' ') if (s and (s[(- 1)] in string.punctuation)) else s) for s in inputs]\n    return inputs", "docstring": "Read a file of partial texts to continue.\n\nThe purpose of append_space_to_final_punctionation is that SubwordTokenizer\ngroups punctuation and the ensuing space in the same token.  Adding a space\ncauses the token to be completed.\n\nArgs:\nfilename: a string\ndelimiter: a string\nrepeat: an integer - we repeat the entire file that many times.\nappend_space_to_final_punctionation: a boolean\n\nReturns:\na list of strings", "source": "codesearchnet"}
{"code": "def check_for_dep_in_outputs(dep, verbose, G):\n    \n    if verbose:\n        print(\"checking dep {}\".format(dep))\n    ret_list = []\n    for node in G.nodes(data=True):\n        if \"output\" not in node[1]:\n            continue\n        for out in node[1]['output']:\n            if fnmatch.fnmatch(out, dep):\n                ret_list.append(node[0])\n                break\n    return ret_list", "docstring": "Function to help construct_graph() identify dependencies\n\nArgs:\nA dependency\nA flag indication verbosity\nA (populated) NetworkX DiGraph\n\nReturns:\nA list of targets that build given dependency", "source": "juraj-google-style"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_stream = utils.BytearrayStream()\n    if self.object_type:\n        self._object_type.write(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Payload is missing the object type field.')\n    if self.unique_identifier:\n        self._unique_identifier.write(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Payload is missing the unique identifier field.')\n    if self.secret:\n        self._secret.write(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Payload is missing the secret field.')\n    self.length = local_stream.length()\n    super(GetResponsePayload, self).write(output_stream, kmip_version=kmip_version)\n    output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the Get response payload to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the object type, unique identifier, or\nsecret attributes are missing from the payload struct.", "source": "codesearchnet"}
{"code": "def convert_nested_bidirectional(weights):\n    num_weights_per_layer = len(weights) \n    forward_weights = preprocess_weights_for_loading(layer.forward_layer, weights[:num_weights_per_layer], original_keras_version, original_backend)\n    backward_weights = preprocess_weights_for_loading(layer.backward_layer, weights[num_weights_per_layer:], original_keras_version, original_backend)\n    return forward_weights + backward_weights", "docstring": "Converts layers nested in `Bidirectional` wrapper.\n\nThis function uses `preprocess_weights_for_loading()` for converting\nlayers.\n\nArgs:\nweights: List of weights values (Numpy arrays).\n\nReturns:\nA list of weights values (Numpy arrays).", "source": "github-repos"}
{"code": "def get_one_aminame(inst_img_id):\n    \n    try:\n        aminame = EC2R.Image(inst_img_id).name\n    except AttributeError:\n        aminame = \"Unknown\"\n    return aminame", "docstring": "Get Image_Name for the image_id specified.\n\nArgs:\ninst_img_id (str): image_id to get name value from.\nReturns:\naminame (str): name of the image.", "source": "juraj-google-style"}
{"code": "def _update_internal_nodes(self, index, delta):\n        \n        \n        while index > 0:\n            index = (index - 1) \n            self._memory[index] += delta", "docstring": "Update internal priority sums when leaf priority has been changed.\nArgs:\nindex: leaf node index\ndelta: change in priority", "source": "juraj-google-style"}
{"code": "def parse_args(self, argv):\n    file_config_names = set(config.ITEMS) | set(self.pytype_single_args)\n    args = self.create_initial_args(file_config_names)\n    self._parser.parse_args(argv, args)\n    self.clean_args(args, file_config_names)\n    self.postprocess(args)\n    return args", "docstring": "Parses argv.\n\nCommandline-only args are parsed normally. File-configurable args appear in\nthe parsed args only if explicitly present in argv.\n\nArgs:\nargv: sys.argv[1:]\n\nReturns:\nAn argparse.Namespace.", "source": "github-repos"}
{"code": "def __eq__(self, other):\n        \n        if type(self) is type(other) and \\\n                self.phase == other.phase:\n            return True\n        return False", "docstring": "Two FrameChanges are the same if they are of the same type\nand have the same phase.\n\nArgs:\nother (FrameChange): other FrameChange\n\nReturns:\nbool: are self and other equal.", "source": "juraj-google-style"}
{"code": "def flatten_excel(path='.', ext='xlsx', sheetname=0, skiprows=None, header=0, date_parser=parse_date, verbosity=0, output_ext=None):\n    date_parser = (date_parser or (lambda x: x))\n    (dotted_ext, dotted_output_ext) = (None, None)\n    if ((ext != None) and (output_ext != None)):\n        dotted_ext = (('' if ext.startswith('.') else '.') + ext)\n        dotted_output_ext = (('' if output_ext.startswith('.') else '.') + output_ext)\n    table = {}\n    for file_properties in util.find_files(path, ext=(ext or ''), verbosity=verbosity):\n        file_path = file_properties['path']\n        if (output_ext and ((dotted_output_ext + '.') in file_path)):\n            continue\n        df = dataframe_from_excel(file_path, sheetname=sheetname, header=header, skiprows=skiprows)\n        df = flatten_dataframe(df, verbosity=verbosity)\n        if ((dotted_ext != None) and (dotted_output_ext != None)):\n            df.to_csv(((file_path[:(- len(dotted_ext))] + dotted_output_ext) + dotted_ext))\n    return table", "docstring": "Load all Excel files in the given path, write .flat.csv files, return `DataFrame` dict\n\nArguments:\npath (str): file or folder to retrieve CSV files and `pandas.DataFrame`s from\next (str): file name extension (to filter files by)\ndate_parser (function): if the MultiIndex can be interpretted as a datetime, this parser will be used\n\nReturns:\ndict of DataFrame: { file_path: flattened_data_frame }", "source": "codesearchnet"}
{"code": "def set_slats_level(self, slatsLevel=0.0, shutterLevel=None):\n    if (shutterLevel is None):\n        shutterLevel = self.shutterLevel\n    data = {'channelIndex': 1, 'deviceId': self.id, 'slatsLevel': slatsLevel, 'shutterLevel': shutterLevel}\n    return self._restCall('device/control/setSlatsLevel', json.dumps(data))", "docstring": "sets the slats and shutter level\n\nArgs:\nslatsLevel(float): the new level of the slats. 0.0 = open, 1.0 = closed,\nshutterLevel(float): the new level of the shutter. 0.0 = open, 1.0 = closed, None = use the current value\nReturns:\nthe result of the _restCall", "source": "codesearchnet"}
{"code": "def _get_cached_response_from_django_cache(key):\n    if TieredCache._should_force_django_cache_miss():\n        return CachedResponse(is_found=False, key=key, value=None)\n    cached_value = django_cache.get(key, _CACHE_MISS)\n    is_found = (cached_value is not _CACHE_MISS)\n    return CachedResponse(is_found, key, cached_value)", "docstring": "Retrieves a CachedResponse for the given key from the django cache.\n\nIf the request was set to force cache misses, then this will always\nreturn a cache miss response.\n\nArgs:\nkey (string)\n\nReturns:\nA CachedResponse with is_found status and value.", "source": "codesearchnet"}
{"code": "def RunOnce(self):\n    if (not self._FetchServerCertificate()):\n        self.timer.Wait()\n        return HTTPObject(code=500)\n    if (self.http_manager.consecutive_connection_errors == 0):\n        message_list = self.client_worker.Drain(max_size=config.CONFIG['Client.max_post_size'])\n    else:\n        message_list = rdf_flows.MessageList()\n    for message in message_list.job:\n        if message.require_fastpoll:\n            self.timer.FastPoll()\n            break\n    payload = rdf_flows.ClientCommunication()\n    if self.client_worker.MemoryExceeded():\n        logging.info('Memory exceeded, will not retrieve jobs.')\n        payload.queue_size = 1000000\n    else:\n        payload.queue_size = self.client_worker.InQueueSize()\n    nonce = self.communicator.EncodeMessages(message_list, payload)\n    payload_data = payload.SerializeToString()\n    response = self.MakeRequest(payload_data)\n    if ((response.code != 200) or (response.messages is None)):\n        logging.info('%s: Could not connect to server at %s, status %s', self.communicator.common_name, self.http_manager.active_base_url, response.code)\n        self.server_certificate = None\n        messages = list(message_list.job)\n        for message in messages:\n            message.require_fastpoll = False\n            message.ttl -= 1\n            if (message.ttl > 0):\n                self.client_worker.QueueResponse(message)\n            else:\n                logging.info('Dropped message due to retransmissions.')\n        return response\n    if (response.nonce != nonce):\n        logging.info('Nonce not matched.')\n        response.code = 500\n        return response\n    if (response.source != self.communicator.server_name):\n        logging.info('Received a message not from the server %s, expected %s.', response.source, self.communicator.server_name)\n        response.code = 500\n        return response\n    for message in response.messages:\n        if message.require_fastpoll:\n            self.timer.FastPoll()\n            break\n    self.client_worker.QueueMessages(response.messages)\n    cn = self.communicator.common_name\n    logging.info('%s: Sending %s(%s), Received %s messages in %s sec. Sleeping for %s sec.', cn, len(message_list), len(payload_data), len(response.messages), response.duration, self.timer.sleep_time)\n    return response", "docstring": "Makes a single request to the GRR server.\n\nReturns:\nA Status() object indicating how the last POST went.", "source": "codesearchnet"}
{"code": "def scan_devices(self, subnet, timeout=None):\n    max_range = {16: 256, 24: 256, 25: 128, 27: 32, 28: 16, 29: 8, 30: 4, 31: 2}\n    if ('/' not in subnet):\n        mask = int(24)\n        network = subnet\n    else:\n        (network, mask) = subnet.split('/')\n        mask = int(mask)\n    if (mask not in max_range):\n        raise RuntimeError('Cannot determine the subnet mask!')\n    network = network.rpartition('.')[0]\n    if (mask == 16):\n        for i in range(0, 1):\n            network = network.rpartition('.')[0]\n    if (mask == 16):\n        for seq1 in range(0, max_range[mask]):\n            for seq2 in range(0, max_range[mask]):\n                ipaddr = '{0}.{1}.{2}'.format(network, seq1, seq2)\n                thd = threading.Thread(target=self.__raw_scan, args=(ipaddr, timeout))\n                thd.start()\n    else:\n        for seq1 in range(0, max_range[mask]):\n            ipaddr = '{0}.{1}'.format(network, seq1)\n            thd = threading.Thread(target=self.__raw_scan, args=(ipaddr, timeout))\n            thd.start()\n    return self.amcrest_ips", "docstring": "Scan cameras in a range of ips\n\nParams:\nsubnet - subnet, i.e: 192.168.1.0/24\nif mask not used, assuming mask 24\n\ntimeout_sec - timeout in sec\n\nReturns:", "source": "codesearchnet"}
{"code": "def endpoints(self):\n    children = [item.endpoints() for item in self.items]\n    return (self.name, self.endpoint, children)", "docstring": "Get all the endpoints under this node in a tree like structure.\n\nReturns:\n(tuple):\nname (str): This node's name.\nendpoint (str): Endpoint name relative to root.\nchildren (list): ``child.endpoints for each child", "source": "codesearchnet"}
{"code": "def item_at(self, row, column):\n        \n        return self.children[str(row)].children[str(column)]", "docstring": "Returns the TableItem instance at row, column cordinates\n\nArgs:\nrow (int): zero based index\ncolumn (int): zero based index", "source": "juraj-google-style"}
{"code": "def _get_remote(self, config, name):\n    from dvc.remote import Remote\n    remote = config.get(name)\n    if (not remote):\n        return None\n    settings = self.repo.config.get_remote_settings(remote)\n    return Remote(self.repo, settings)", "docstring": "The config file is stored in a way that allows you to have a\ncache for each remote.\n\nThis is needed when specifying external outputs\n(as they require you to have an external cache location).\n\nImagine a config file like the following:\n\n['remote \"dvc-storage\"']\nurl = ssh://localhost/tmp\nask_password = true\n\n[cache]\nssh = dvc-storage\n\nThis method resolves the name under the cache section into the\ncorrect Remote instance.\n\nArgs:\nconfig (dict): The cache section on the config file\nname (str): Name of the section we are interested in to retrieve\n\nReturns:\nremote (dvc.Remote): Remote instance that the section is referring.\nNone when there's no remote with that name.\n\nExample:\n>>> _get_remote(config={'ssh': 'dvc-storage'}, name='ssh')", "source": "codesearchnet"}
{"code": "def ReadFileObject(self, artifacts_reader, file_object):\n    \n    for artifact_definition in artifacts_reader.ReadFileObject(file_object):\n      self.RegisterDefinition(artifact_definition)", "docstring": "Reads artifact definitions into the registry from a file-like object.\n\nArgs:\nartifacts_reader (ArtifactsReader): an artifacts reader.\nfile_object (file): file-like object to read from.", "source": "juraj-google-style"}
{"code": "def exceptions_raised(self):\n    return self._exceptions_raised", "docstring": "Exceptions raised but not handled by the `QueueRunner` threads.\n\nExceptions raised in queue runner threads are handled in one of two ways\ndepending on whether or not a `Coordinator` was passed to\n`create_threads()`:\n\n* With a `Coordinator`, exceptions are reported to the coordinator and\nforgotten by the `QueueRunner`.\n* Without a `Coordinator`, exceptions are captured by the `QueueRunner` and\nmade available in this `exceptions_raised` property.\n\nReturns:\nA list of Python `Exception` objects.  The list is empty if no exception\nwas captured.  (No exceptions are captured when using a Coordinator.)", "source": "github-repos"}
{"code": "def add_link_to_self(self, source, weight):\n    if (not isinstance(source, list)):\n        source = [source]\n    for source_node in source:\n        source_node.add_link(self, weight=weight)", "docstring": "Create and add a ``Link`` from a source node to ``self``.\n\nArgs:\nsource (Node): The node that will own the new ``Link``\npointing to ``self``\nweight (int or float): The weight of the newly created ``Link``\n\nReturns: None\n\nExample:\n>>> node_1 = Node('One')\n>>> node_2 = Node('Two')\n>>> node_1.add_link_to_self(node_2, 5)\n>>> new_link = node_2.link_list[0]\n>>> print('{} {}'.format(new_link.target.value, new_link.weight))\nOne 5\n>>> print(new_link)\nnode.Link instance pointing to node with value \"One\" with weight 5", "source": "codesearchnet"}
{"code": "def get(identifier):\n    if identifier is None:\n        return None\n    elif isinstance(identifier, dict):\n        obj = deserialize(identifier)\n    elif isinstance(identifier, str):\n        config = {'class_name': identifier, 'config': {}}\n        obj = deserialize(config)\n    else:\n        obj = identifier\n    if isinstance(obj, Optimizer):\n        return obj\n    raise ValueError(f'Could not interpret optimizer identifier: {identifier}')", "docstring": "Retrieves a Keras Optimizer instance.\n\nArgs:\nidentifier: Optimizer identifier, one of:\n- String: name of an optimizer\n- Dictionary: configuration dictionary.\n- Keras Optimizer instance (it will be returned unchanged).\n\nReturns:\nA Keras Optimizer instance.", "source": "github-repos"}
{"code": "def global_step(device=''):\n    global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP)\n    if global_step_ref:\n        return global_step_ref[0]\n    else:\n        collections = [VARIABLES_TO_RESTORE, tf.GraphKeys.GLOBAL_VARIABLES, tf.GraphKeys.GLOBAL_STEP]\n        with tf.device(variable_device(device, 'global_step')):\n            return tf.get_variable('global_step', shape=[], dtype=tf.int64, initializer=tf.zeros_initializer(), trainable=False, collections=collections)", "docstring": "Returns the global step variable.\n\nArgs:\ndevice: Optional device to place the variable. It can be an string or a\nfunction that is called to get the device for the variable.\n\nReturns:\nthe tensor representing the global step variable.", "source": "codesearchnet"}
{"code": "def add_all_database_reactions(model, compartments):\n    added = set()\n    for rxnid in model.database.reactions:\n        reaction = model.database.get_reaction(rxnid)\n        if all(((compound.compartment in compartments) for (compound, _) in reaction.compounds)):\n            if (not model.has_reaction(rxnid)):\n                added.add(rxnid)\n            model.add_reaction(rxnid)\n    return added", "docstring": "Add all reactions from database that occur in given compartments.\n\nArgs:\nmodel: :class:`psamm.metabolicmodel.MetabolicModel`.", "source": "codesearchnet"}
{"code": "def _var_key(var):\n    if hasattr(var, '_distributed_container'):\n        var = var._distributed_container()\n    if var._in_graph_mode:\n        return var._shared_name\n    return var._unique_id", "docstring": "Key for representing a primary variable, for looking up slots.\n\nIn graph mode the name is derived from the var shared name.\nIn eager mode the name is derived from the var unique id.\nIf distribution strategy exists, get the primary variable first.\n\nArgs:\nvar: the variable.\n\nReturns:\nthe unique name of the variable.", "source": "github-repos"}
{"code": "def __init__(self, task_type=None, task_id=None, rpc_layer=None, environment=None):\n    self._task_type = task_type\n    self._task_id = task_id\n    self._rpc_layer = rpc_layer\n    self._environment = environment", "docstring": "Creates a new TFConfigClusterResolver.\n\nArgs:\ntask_type: (String, optional) Overrides the task type specified in the\nTF_CONFIG environment variable.\ntask_id: (Integer, optional) Overrides the task index specified in the\nTF_CONFIG environment variable.\nrpc_layer: (String, optional) Overrides the rpc layer TensorFlow uses.\nenvironment: (String, optional) Overrides the environment TensorFlow\noperates in.", "source": "github-repos"}
{"code": "def parametrize_xnp(*, with_none: bool=False, restrict: Optional[Iterable[str]]=None, skip: Optional[Iterable[str]]=None) -> Callable[[_FnT], _FnT]:\n    name_to_modules = {'np': lambda: np, 'jnp': lambda: lazy.jnp, 'tnp': lambda: lazy.tnp, 'torch': lambda: lazy.torch}\n    keep = _normalize_set(restrict, default=name_to_modules, valid=name_to_modules)\n    skip = _normalize_set(skip, default=[], valid=name_to_modules)\n    name_to_modules = {k: v() for k, v in name_to_modules.items() if k not in skip and k in keep}\n    if with_none:\n        name_to_modules['no_np'] = None\n    return pytest.mark.parametrize('xnp', list(name_to_modules.values()), ids=list(name_to_modules.keys()))", "docstring": "Parametrize over the numpy modules.\n\nArgs:\nwith_none: If `True`, also yield `None` among the values (to test `list`)\nrestrict: If given, only test the given module (e.g. `restrict=['jnp']`)\nskip: If given, skip the given module from test (e.g. `skip=['torch']`)\n\nReturns:\nThe fixture to apply to the `def test_xyz()` function", "source": "github-repos"}
{"code": "def __init__(self, kind, required=False, default_factory=None,\n               can_be_none=False):\n    \n    if required and default_factory is not None:\n      raise ValueError(\"No default_factory value when option is required.\")\n    self.kind = kind\n    self.required = required\n    self.default_factory = default_factory\n    self.can_be_none = can_be_none", "docstring": "Init.\n\nArgs:\nkind: type of the option.\nrequired: whether user is required to supply a value.\ndefault_factory: a factory, when called, returns the default value.\ncan_be_none: whether value can be None.\n\nRaises:\nValueError: if arguments aren't compatible.", "source": "juraj-google-style"}
{"code": "def __init__(self, existing_stack: Optional[list[TraceableObject[T]]]=None):\n    self._stack: list[TraceableObject[T]] = existing_stack[:] if existing_stack else []", "docstring": "Constructor.\n\nArgs:\nexisting_stack: [TraceableObject, ...] If provided, this object will\nset its new stack to a SHALLOW COPY of existing_stack.", "source": "github-repos"}
{"code": "def __init__(self, max_simultaneous_downloads=50, checksumer=None):\n    \n    self._executor = concurrent.futures.ThreadPoolExecutor(\n        max_workers=max_simultaneous_downloads)\n    self._checksumer = checksumer or hashlib.sha256\n    self._pbar_url = None\n    self._pbar_dl_size = None", "docstring": "Init _Downloader instance.\n\nArgs:\nmax_simultaneous_downloads: `int`, max number of simultaneous downloads.\nchecksumer: `hashlib.HASH`. Defaults to `hashlib.sha256`.", "source": "juraj-google-style"}
{"code": "def init_benchmarks(n_values=None):\n    if (n_values is None):\n        n_values = (0, 5, 50, 250, 1000, 5000, 10000)\n    string_tables = {n: gen_string_table(n) for n in n_values}\n    regexs = gen_regex_table()\n    data = []\n    for n in n_values:\n        for id in xrange(len(regexs)):\n            regex = regexs[id]\n            string = string_tables[n][id]\n            data.append((regex, string))\n    return data", "docstring": "Initialize the strings we'll run the regexes against.\n\nThe strings used in the benchmark are prefixed and suffixed by\nstrings that are repeated n times.\n\nThe sequence n_values contains the values for n.\nIf n_values is None the values of n from the original benchmark\nare used.\n\nThe generated list of strings is cached in the string_tables\nvariable, which is indexed by n.\n\nReturns:\nA list of string prefix/suffix lengths.", "source": "codesearchnet"}
{"code": "def diagonalize_real_symmetric_matrix(\n        matrix: np.ndarray,\n        *,\n        rtol: float = 1e-5,\n        atol: float = 1e-8) -> np.ndarray:\n    \n\n    \n    if np.any(np.imag(matrix) != 0) or not predicates.is_hermitian(matrix):\n        raise ValueError('Input must be real and symmetric.')\n\n    _, result = np.linalg.eigh(matrix)\n\n    return result", "docstring": "Returns an orthogonal matrix that diagonalizes the given matrix.\n\nArgs:\nmatrix: A real symmetric matrix to diagonalize.\nrtol: float = 1e-5,\natol: float = 1e-8\n\nReturns:\nAn orthogonal matrix P such that P.T @ matrix @ P is diagonal.\n\nRaises:\nValueError: Matrix isn't real symmetric.", "source": "juraj-google-style"}
{"code": "def _split_input_from_namespace(cls, app, namespace, entity_kind, shard_count):\n    raw_entity_kind = cls._get_raw_entity_kind(entity_kind)\n    if (shard_count == 1):\n        return [key_range.KeyRange(namespace=namespace, _app=app)]\n    ds_query = datastore.Query(kind=raw_entity_kind, namespace=namespace, _app=app, keys_only=True)\n    ds_query.Order('__scatter__')\n    random_keys = ds_query.Get((shard_count * cls._OVERSAMPLING_FACTOR))\n    if (not random_keys):\n        return ([key_range.KeyRange(namespace=namespace, _app=app)] + ([None] * (shard_count - 1)))\n    random_keys.sort()\n    if (len(random_keys) >= shard_count):\n        random_keys = cls._choose_split_points(random_keys, shard_count)\n    key_ranges = []\n    key_ranges.append(key_range.KeyRange(key_start=None, key_end=random_keys[0], direction=key_range.KeyRange.ASC, include_start=False, include_end=False, namespace=namespace, _app=app))\n    for i in range(0, (len(random_keys) - 1)):\n        key_ranges.append(key_range.KeyRange(key_start=random_keys[i], key_end=random_keys[(i + 1)], direction=key_range.KeyRange.ASC, include_start=True, include_end=False, namespace=namespace, _app=app))\n    key_ranges.append(key_range.KeyRange(key_start=random_keys[(- 1)], key_end=None, direction=key_range.KeyRange.ASC, include_start=True, include_end=False, namespace=namespace, _app=app))\n    if (len(key_ranges) < shard_count):\n        key_ranges += ([None] * (shard_count - len(key_ranges)))\n    return key_ranges", "docstring": "Helper for _split_input_from_params.\n\nIf there are not enough Entities to make all of the given shards, the\nreturned list of KeyRanges will include Nones. The returned list will\ncontain KeyRanges ordered lexographically with any Nones appearing at the\nend.\n\nArgs:\napp: the app.\nnamespace: the namespace.\nentity_kind: entity kind as string.\nshard_count: the number of shards.\n\nReturns:\nKeyRange objects.", "source": "codesearchnet"}
{"code": "def create_run_group(prj):\n    from benchbuild.utils import schema as s\n    session = s.Session()\n    experiment = prj.experiment\n    group = s.RunGroup(id=prj.run_uuid, experiment=experiment.id)\n    session.add(group)\n    session.commit()\n    return (group, session)", "docstring": "Create a new 'run_group' in the database.\n\nThis creates a new transaction in the database and creates a new run_group\nwithin this transaction. Afterwards we return both the transaction as well\nas the run_group itself. The user is responsible for committing it when the\ntime comes.\n\nArgs:\nprj - The project for which we open the run_group.\n\nReturns:\nA tuple (group, session) containing both the newly created run_group and\nthe transaction object.", "source": "codesearchnet"}
{"code": "def sort_sites_by_integrated_chg(self, r=0.4):\n        \n\n        if self.extrema_type is None:\n            self.get_local_extrema()\n        int_den = []\n        for isite in self.extrema_coords:\n            mask = self._dist_mat(isite) < r\n            vol_sphere = self.chgcar.structure.volume * (mask.sum()/self.chgcar.ngridpts)\n            chg_in_sphere = np.sum(self.chgcar.data['total'] * mask) / mask.size / vol_sphere\n            int_den.append(chg_in_sphere)\n        self._extrema_df['avg_charge_den'] = int_den\n        self._extrema_df.sort_values(by=['avg_charge_den'], inplace=True)\n        self._extrema_df.reset_index(drop=True, inplace=True)", "docstring": "Get the average charge density around each local minima in the charge density\nand store the result in _extrema_df\nArgs:\nr (float): radius of sphere around each site to evaluate the average", "source": "juraj-google-style"}
{"code": "def find_rt_jar(javahome=None):\n    if (not javahome):\n        if ('JAVA_HOME' in os.environ):\n            javahome = os.environ['JAVA_HOME']\n        elif (sys.platform == 'darwin'):\n            javahome = _find_osx_javahome()\n        else:\n            javahome = _get_javahome_from_java(_find_java_binary())\n    rtpath = os.path.join(javahome, 'jre', 'lib', 'rt.jar')\n    if (not os.path.isfile(rtpath)):\n        msg = 'Could not find rt.jar: {} is not a file'.format(rtpath)\n        raise ExtensionError(msg)\n    return rtpath", "docstring": "Find the path to the Java standard library jar.\n\nThe jar is expected to exist at the path 'jre/lib/rt.jar' inside a\nstandard Java installation directory. The directory is found using\nthe following procedure:\n\n1. If the javehome argument is provided, use the value as the\ndirectory.\n2. If the JAVA_HOME environment variable is set, use the value as\nthe directory.\n3. Find the location of the ``java`` binary in the current PATH and\ncompute the installation directory from this location.\n\nArgs:\njavahome: A path to a Java installation directory (optional).", "source": "codesearchnet"}
{"code": "def check_task(taskid, timeout=DEFAULT_TASK_TIMEOUT, wait=2):\n    max_attempts = int((timeout / wait))\n    try:\n        return retry_call(partial(_check_task, taskid), max_attempts=max_attempts, wait=wait, exceptions=(AssertionError, ValueError))\n    except ValueError:\n        raise SpinnakerTaskInconclusiveError('Task failed to complete in {0} seconds: {1}'.format(timeout, taskid))", "docstring": "Wrap check_task.\n\nArgs:\ntaskid (str): Existing Spinnaker Task ID.\ntimeout (int, optional): Consider Task failed after given seconds.\nwait (int, optional): Seconds to pause between polling attempts.\n\nReturns:\nstr: Task status.\n\nRaises:\nAssertionError: API did not respond with a 200 status code.\n:obj:`foremast.exceptions.SpinnakerTaskInconclusiveError`: Task did not\nreach a terminal state before the given time out.", "source": "codesearchnet"}
{"code": "def extract_tree_without(self, labels, suppress_unifurcations=True):\n    return self.extract_tree(labels, True, suppress_unifurcations)", "docstring": "Extract a copy of this ``Tree`` without the leaves labeled by the strings in ``labels``\n\nArgs:\n``labels`` (``set``): Set of leaf labels to exclude\n\n``suppress_unifurcations`` (``bool``): ``True`` to suppress unifurcations, otherwise ``False``\n\nReturns:\n``Tree``: Copy of this ``Tree``, exluding the leaves labeled by the strings in ``labels``", "source": "codesearchnet"}
{"code": "def path_fraction_point(points, fraction):\n    (seg_id, offset) = path_fraction_id_offset(points, fraction, relative_offset=True)\n    return linear_interpolate(points[seg_id], points[(seg_id + 1)], offset)", "docstring": "Computes the point which corresponds to the fraction\nof the path length along the piecewise linear curve which\nis constructed from the set of points.\n\nArgs:\npoints: an iterable of indexable objects with indices\n0, 1, 2 correspoding to 3D cartesian coordinates\nfraction: path length fraction (0 <= fraction <= 1)\n\nReturns:\nThe 3D coordinates of the aforementioned point", "source": "codesearchnet"}
{"code": "def escape_yaml(raw_str: str) -> str:\n    escape_list = [char for char in raw_str if (char in ['!', '{', '['])]\n    if (len(escape_list) == 0):\n        return raw_str\n    str_quotes = '\"'\n    i_str_quotes = \"'\"\n    if ((str_quotes in raw_str) and (str_quotes not in raw_str[1:(- 1)])):\n        return raw_str\n    if (str_quotes in raw_str[1:(- 1)]):\n        raw_str = ((i_str_quotes + raw_str) + i_str_quotes)\n    else:\n        raw_str = ((str_quotes + raw_str) + str_quotes)\n    return raw_str", "docstring": "Shell-Escape a yaml input string.\n\nArgs:\nraw_str: The unescaped string.", "source": "codesearchnet"}
{"code": "def calculate_parent_python_path(test_filepath):\n    split_path = test_filepath.rsplit(FLAGS.bazel_repo_root, 1)\n    if len(split_path) < 2:\n        raise ValueError(f'Filepath \"{test_filepath}\" does not contain repo root \"{FLAGS.bazel_repo_root}\"')\n    path = FLAGS.bazel_repo_root + split_path[1]\n    path = path.rsplit('/', 1)[0]\n    return path.replace('/', '.')", "docstring": "Returns the absolute import path for the containing directory.\n\nArgs:\ntest_filepath: The filepath which Bazel invoked\n(ex: /filesystem/path/tensorflow/tensorflow/python/tpu/tpu_test)\n\nReturns:\nAbsolute import path of parent (ex: tensorflow.python.tpu).\n\nRaises:\nValueError: if bazel_repo_root does not appear within test_filepath.", "source": "github-repos"}
{"code": "def get_by_hostname(self, hostname):\n    resources = self._client.get_all()\n    resources_filtered = [x for x in resources if (x['hostname'] == hostname)]\n    if resources_filtered:\n        return resources_filtered[0]\n    else:\n        return None", "docstring": "Retrieve a storage system by its hostname.\n\nWorks only in API500 onwards.\n\nArgs:\nhostname: Storage system hostname.\n\nReturns:\ndict", "source": "codesearchnet"}
{"code": "def pop_all(self, event_name):\n        \n        if not self.started:\n            raise IllegalStateError((\"Dispatcher needs to be started before \"\n                                     \"popping.\"))\n        results = []\n        try:\n            self.lock.acquire()\n            while True:\n                e = self.event_dict[event_name].get(block=False)\n                results.append(e)\n        except (queue.Empty, KeyError):\n            return results\n        finally:\n            self.lock.release()", "docstring": "Return and remove all stored events of a specified name.\n\nPops all events from their queue. May miss the latest ones.\nIf no event is available, return immediately.\n\nArgs:\nevent_name: Name of the events to be popped.\n\nReturns:\nList of the desired events.\n\nRaises:\nIllegalStateError: Raised if pop is called before the dispatcher\nstarts polling.", "source": "juraj-google-style"}
{"code": "def get_metric_fns(metric_names, labels, outputs):\n  \n  metric_fns = {}\n  for metric_name in metric_names:\n    metric_fn_name = metric_name.split(\"/\")[-1]\n    if hasattr(metrics, metric_fn_name):\n      metric_fn = getattr(metrics, metric_fn_name)\n      metric_fns[metric_name] = metric_fn(labels, outputs)\n    else:\n      raise ValueError(\"Metric {} is not implemented\".format(metric_fn_name))\n\n  return metric_fns", "docstring": "Generate a dictionary of metric name to metric function.\n\nArgs:\nmetric_names: list of strings in the format \"prefix/metric_function_name\".\nmetric_function_name should refer to a function name in metrics.py. The\nprefix will be included in the key in the returned dict.\nlabels: a tensor where batch is the first dimension.\noutputs: a tensor of model predictions, same dimensionality as labels.\n\nReturns:\nmetric_fns: dict of metric functions keyed by their name.", "source": "juraj-google-style"}
{"code": "def register_app(self, app):\n    app.route(self.uri, methods=self.methods)(self.callable_obj)\n    return self", "docstring": "Register the route object to a `bottle.Bottle` app instance.\n\nArgs:\napp (instance):\n\nReturns:\nRoute instance (for chaining purposes)", "source": "codesearchnet"}
{"code": "def fetch(clobber=False):\n    \n\n    dest_dir = fname_pattern = os.path.join(data_dir(), 'chen2014')\n    url = 'http:\n    dat_fname = os.path.join(dest_dir, 'chen2014.dat')\n    h5_fname = os.path.join(dest_dir, 'chen2014.h5')\n    md5 = 'f8a2bc46d411c57ca4c76dc344e291f1'\n\n    \n    if not clobber:\n        h5_size = 52768768 \n        h5_dsets = {\n            'dists': (30,),\n            'pix_lb': (557398, 2),\n            'A_r': (557398, 30),\n            'A_r_err': (557398, 30)\n        }\n        if fetch_utils.h5_file_exists(h5_fname, h5_size, dsets=h5_dsets):\n            print('File appears to exist already. Call `fetch(clobber=True)` '\n                  'to force overwriting of existing file.')\n            return\n\n    \n    print('Downloading {}'.format(url))\n    fetch_utils.download_and_verify(url, md5, fname=dat_fname)\n\n    \n    print('Repacking files...')\n    ascii2h5(dat_fname, h5_fname)\n\n    \n    print('Removing original file...')\n    os.remove(dat_fname)", "docstring": "Downloads the Chen et al. (2014) dust map.\n\nArgs:\nclobber (Optional[:obj:`bool`]): If ``True``, any existing file will be\noverwritten, even if it appears to match. If ``False`` (the\ndefault), :obj:`fetch()` will attempt to determine if the dataset\nalready exists. This determination is not 100\\% robust against data\ncorruption.", "source": "juraj-google-style"}
{"code": "async def process_check_ins(self):\n    params = {'include_participants': 1, 'include_matches': (1 if AUTO_GET_MATCHES else 0)}\n    res = (await self.connection('POST', 'tournaments/{}/process_check_ins'.format(self._id), **params))\n    self._refresh_from_json(res)", "docstring": "finalize the check in phase\n\n|methcoro|\n\nWarning:\n|unstable|\n\nNote:\n|from_api| This should be invoked after a tournament's check-in window closes before the tournament is started.\n1. Marks participants who have not checked in as inactive.\n2. Moves inactive participants to bottom seeds (ordered by original seed).\n3. Transitions the tournament state from 'checking_in' to 'checked_in'\nNOTE: Checked in participants on the waiting list will be promoted if slots become available.\n\nRaises:\nAPIException", "source": "codesearchnet"}
{"code": "def get(cls, resource_type):\n    if isinstance(resource_type, str):\n        obj = getattr(db, cls.__name__).find_one((cls.resource_type == resource_type))\n    elif isinstance(resource_type, int):\n        obj = getattr(db, cls.__name__).find_one((cls.resource_type_id == resource_type))\n    elif isinstance(resource_type, cls):\n        return resource_type\n    else:\n        obj = None\n    if (not obj):\n        obj = cls()\n        obj.resource_type = resource_type\n        db.session.add(obj)\n        db.session.commit()\n        db.session.refresh(obj)\n    return obj", "docstring": "Returns the ResourceType object for `resource_type`. If no existing object was found, a new type will\nbe created in the database and returned\n\nArgs:\nresource_type (str): Resource type name\n\nReturns:\n:obj:`ResourceType`", "source": "codesearchnet"}
{"code": "def write_new_config(self, updates):\n        \n        with open(self._new_config, 'w') as config_file:\n            for update in updates:\n                line = '{0}=={1}  \n                    update.name,\n                    update.new_version,\n                    update.current_version\n                )\n\n                config_file.write(line)", "docstring": "Given a list of updates, write the updates out to the provided\nconfiguartion file.\n\nArgs:\nupdates (list): List of Update objects.", "source": "juraj-google-style"}
{"code": "def post_op(self, id: str, path_data: Union[dict, None], post_data: Any) -> dict:\n        \n        path = self._get_path_for_op_id(id)\n        return self.post_path(path, path_data, post_data)", "docstring": "Modifies the ESI by looking up an operation id.\n\nArgs:\npath: raw ESI URL path\npath_data: data to format the path with (can be None)\npost_data: data to send to ESI\n\nReturns:\nESI data", "source": "juraj-google-style"}
{"code": "def notify_rollover(self, stream):\n    self.offset -= 1\n    if (not self.matches(stream)):\n        return\n    if (self._count == 0):\n        raise InternalError('BufferedStreamWalker out of sync with storage engine, count was wrong.')\n    self._count -= 1", "docstring": "Notify that a reading in the given stream was overwritten.\n\nArgs:\nstream (DataStream): The stream that had overwritten data.", "source": "codesearchnet"}
{"code": "def _list(self, dir_or_prefix):\n    try:\n        for path, (size, updated) in s3io.S3IO(options=self._options).list_files(dir_or_prefix, with_metadata=True):\n            yield FileMetadata(path, size, updated)\n    except Exception as e:\n        raise BeamIOError('List operation failed', {dir_or_prefix: e})", "docstring": "List files in a location.\n\nListing is non-recursive, for filesystems that support directories.\n\nArgs:\ndir_or_prefix: (string) A directory or location prefix (for filesystems\nthat don't have directories).\n\nReturns:\nGenerator of ``FileMetadata`` objects.\n\nRaises:\n``BeamIOError``: if listing fails, but not if no files were found.", "source": "github-repos"}
{"code": "def add_config_paths(**kwargs):\n    \n\n    for k, path in kwargs.items():\n        if not os.path.exists(path):\n            raise ValueError(\n                'Configuration file \"{}\" does not exist'.format(k))\n        if k in cf.get_option('config_paths'):\n            raise ValueError('Configuration {!r} already exists'.format(k))\n\n    kwargs.update(**cf.get_option('config_paths'))\n    cf.set_option('config_paths', kwargs)", "docstring": "Add to the pool of available configuration files for BIDSLayout.\n\nArgs:\nkwargs: dictionary specifying where to find additional config files.\nKeys are names, values are paths to the corresponding .json file.\n\nExample:\n> add_config_paths(my_config='/path/to/config')\n> layout = BIDSLayout('/path/to/bids', config=['bids', 'my_config'])", "source": "juraj-google-style"}
{"code": "def __learn_labels(self, labels):\n    if (self.feature_length > 0):\n        result = list(self.labels.classes_)\n    else:\n        result = []\n    for label in labels:\n        result.append(label)\n    self.labels.fit(result)", "docstring": "Learns new labels, this method is intended for internal use\n\nArgs:\nlabels (:obj:`list` of :obj:`str`): Labels to learn", "source": "codesearchnet"}
{"code": "def main(conf_file, overwrite, logger):\n    uid = pwd.getpwnam(get_username()).pw_uid\n    logger.info('Stopping the daemon.')\n    sh.service(get_service_name(), 'stop')\n    logger.info('Creating config file.')\n    create_config(cnf_file=conf_file, uid=uid, overwrite=overwrite)\n    logger.info('Creating log file.')\n    create_log(log_file=REQUIRED_SETTINGS['LogFile'], uid=uid)\n    logger.info('Starting the daemon..')\n    sh.service(get_service_name(), 'start')", "docstring": "Create configuration and log file. Restart the daemon when configuration\nis done.\n\nArgs:\nconf_file (str): Path to the configuration file.\noverwrite (bool): Overwrite the configuration file with `clean` config?", "source": "codesearchnet"}
{"code": "def do_patch(endpoint, body, access_token):\n    headers = {'content-type': 'application/json', 'Authorization': ('Bearer ' + access_token)}\n    headers['User-Agent'] = get_user_agent()\n    return requests.patch(endpoint, data=body, headers=headers)", "docstring": "Do an HTTP PATCH request and return JSON.\n\nArgs:\nendpoint (str): Azure Resource Manager management endpoint.\nbody (str): JSON body of information to patch.\naccess_token (str): A valid Azure authentication token.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def sg_summary_loss(tensor, prefix='losses', name=None):\n    prefix = ('' if (prefix is None) else (prefix + '/'))\n    name = ((prefix + _pretty_name(tensor)) if (name is None) else (prefix + name))\n    _scalar(name, tf.reduce_mean(tensor))\n    _histogram((name + '-h'), tensor)", "docstring": "r\"\"\"Register `tensor` to summary report as `loss`\n\nArgs:\ntensor: A `Tensor` to log as loss\nprefix: A `string`. A prefix to display in the tensor board web UI.\nname: A `string`. A name to display in the tensor board web UI.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _add_arg_python(self, key, value=None, mask=False):\n        \n        self._data[key] = value\n        if not value:\n            \n            pass\n        elif value is True:\n            \n            self._args.append('--{}'.format(key))\n            self._args_quoted.append('--{}'.format(key))\n            self._args_masked.append('--{}'.format(key))\n        else:\n            self._args.append('--{}={}'.format(key, value))\n            if mask:\n                \n                value = 'x' * len(str(value))\n            else:\n                \n                value = self.quote(value)\n            self._args_quoted.append('--{}={}'.format(key, value))\n            self._args_masked.append('--{}={}'.format(key, value))", "docstring": "Add CLI Arg formatted specifically for Python.\n\nArgs:\nkey (string): The CLI Args key (e.g., --name).\nvalue (string): The CLI Args value (e.g., bob).\nmask (boolean, default:False): Indicates whether no mask value.", "source": "juraj-google-style"}
{"code": "def is_admin(name):\n    \n    groups = get_user_groups(name, True)\n\n    for group in groups:\n        if group in ('S-1-5-32-544', 'S-1-5-18'):\n            return True\n\n    return False", "docstring": "Is the passed user a member of the Administrators group\n\nArgs:\nname (str): The name to check\n\nReturns:\nbool: True if user is a member of the Administrators group, False\notherwise", "source": "juraj-google-style"}
{"code": "def rgb_to_hex(cls, color):\n        \n        return '\n            cls._bound_color_value(color[0]),\n            cls._bound_color_value(color[1]),\n            cls._bound_color_value(color[2])).upper()", "docstring": "Convert an ``(r, g, b)`` color tuple to a hexadecimal string.\n\nAlphabetical characters in the output will be capitalized.\n\nArgs:\ncolor (tuple): An rgb color tuple of form: (int, int, int)\n\nReturns: string\n\nExample:\n>>> SoftColor.rgb_to_hex((0, 0, 0))\n'#000000'\n>>> SoftColor.rgb_to_hex((255, 255, 255))\n'#FFFFFF'", "source": "juraj-google-style"}
{"code": "def Lookup(self, name):\n        \n        if name == '@':\n            return self.stack[-1].context\n        \n        parts = name.split('.')\n        value = self._LookUpStack(parts[0])\n        \n        \n        for part in parts[1:]:\n            try:\n                value = value[part]\n            except (KeyError, TypeError):  \n                return self._Undefined(part)\n        \n        return value", "docstring": "Get the value associated with a name in the current context.\n\nThe current context could be an dictionary in a list, or a dictionary\noutside a list.\n\nArgs:\nname: name to lookup, e.g. 'foo' or 'foo.bar.baz'\n\nReturns:\nThe value, or self.undefined_str\n\nRaises:\nUndefinedVariable if self.undefined_str is not set", "source": "juraj-google-style"}
{"code": "def set_session(session):\n    global _SESSION\n    _SESSION.session = session", "docstring": "Sets the global TensorFlow session.\n\nArgs:\nsession: A TF Session.", "source": "github-repos"}
{"code": "def recover_cfg_all(self, entries, symbols=None, callback=None, arch_mode=None):\n        \n        \n        if arch_mode is None:\n            arch_mode = self.binary.architecture_mode\n\n        \n        self._load(arch_mode=arch_mode)\n\n        \n        symbols = {} if not symbols else symbols\n\n        \n        cfgs = []\n        addrs_processed = set()\n        calls = entries\n\n        while len(calls) > 0:\n            start, calls = calls[0], calls[1:]\n\n            cfg, calls_tmp = self._recover_cfg(start=start, symbols=symbols, callback=callback)\n\n            addrs_processed.add(start)\n\n            cfgs.append(cfg)\n\n            for addr in sorted(calls_tmp):\n                if addr not in addrs_processed and addr not in calls:\n                    calls.append(addr)\n\n        return cfgs", "docstring": "Recover CFG for all functions from an entry point and/or symbol table.\n\nArgs:\nentries (list): A list of function addresses' to start the CFG recovery process.\nsymbols (dict): Symbol table.\ncallback (function): A callback function which is called after each successfully recovered CFG.\narch_mode (int): Architecture mode.\n\nReturns:\nlist: A list of recovered CFGs.", "source": "juraj-google-style"}
{"code": "def _ParseFiletime(self, byte_stream):\n    \n    filetime_map = self._GetDataTypeMap('filetime')\n\n    try:\n      filetime = self._ReadStructureFromByteStream(\n          byte_stream, 0, filetime_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError(\n          'Unable to parse FILETIME value with error: {0!s}'.format(\n              exception))\n\n    if filetime == 0:\n      return None\n\n    try:\n      return dfdatetime_filetime.Filetime(timestamp=filetime)\n    except ValueError:\n      raise errors.ParseError(\n          'Invalid FILETIME value: 0x{0:08x}'.format(filetime))", "docstring": "Parses a FILETIME date and time value from a byte stream.\n\nArgs:\nbyte_stream (bytes): byte stream.\n\nReturns:\ndfdatetime.Filetime: FILETIME date and time value or None if no\nvalue is set.\n\nRaises:\nParseError: if the FILETIME could not be parsed.", "source": "juraj-google-style"}
{"code": "def duplicate_doc_file(doc_file: Union[str, os.PathLike], old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns, dest_file: Optional[Union[str, os.PathLike]]=None, frameworks: Optional[List[str]]=None):\n    with open(doc_file, 'r', encoding='utf-8') as f:\n        content = f.read()\n    content = re.sub('<!--\\\\s*Copyright (\\\\d+)\\\\s', f'<!--Copyright {CURRENT_YEAR} ', content)\n    if frameworks is None:\n        frameworks = get_default_frameworks()\n    if dest_file is None:\n        dest_file = Path(doc_file).parent / f'{new_model_patterns.model_type}.md'\n    lines = content.split('\\n')\n    blocks = []\n    current_block = []\n    for line in lines:\n        if line.startswith('\n            blocks.append('\\n'.join(current_block))\n            current_block = [line]\n        else:\n            current_block.append(line)\n    blocks.append('\\n'.join(current_block))\n    new_blocks = []\n    in_classes = False\n    for block in blocks:\n        if not block.startswith('\n            new_blocks.append(block)\n        elif re.search('^\n            new_blocks.append(f'\n        elif not in_classes and old_model_patterns.config_class in block.split('\\n')[0]:\n            in_classes = True\n            new_blocks.append(DOC_OVERVIEW_TEMPLATE.format(model_name=new_model_patterns.model_name))\n            new_block, _ = replace_model_patterns(block, old_model_patterns, new_model_patterns)\n            new_blocks.append(new_block)\n        elif in_classes:\n            in_classes = True\n            block_title = block.split('\\n')[0]\n            block_class = re.search('^\n            new_block, _ = replace_model_patterns(block, old_model_patterns, new_model_patterns)\n            if 'Tokenizer' in block_class:\n                if old_model_patterns.tokenizer_class != new_model_patterns.tokenizer_class:\n                    new_blocks.append(new_block)\n            elif 'ImageProcessor' in block_class:\n                if old_model_patterns.image_processor_class != new_model_patterns.image_processor_class:\n                    new_blocks.append(new_block)\n            elif 'ImageProcessorFast' in block_class:\n                if old_model_patterns.image_processor_fast_class != new_model_patterns.image_processor_fast_class:\n                    new_blocks.append(new_block)\n            elif 'FeatureExtractor' in block_class:\n                if old_model_patterns.feature_extractor_class != new_model_patterns.feature_extractor_class:\n                    new_blocks.append(new_block)\n            elif 'Processor' in block_class:\n                if old_model_patterns.processor_class != new_model_patterns.processor_class:\n                    new_blocks.append(new_block)\n            elif block_class.startswith('Flax'):\n                if 'flax' in frameworks:\n                    new_blocks.append(new_block)\n            elif block_class.startswith('TF'):\n                if 'tf' in frameworks:\n                    new_blocks.append(new_block)\n            elif len(block_class.split(' ')) == 1:\n                if 'pt' in frameworks:\n                    new_blocks.append(new_block)\n            else:\n                new_blocks.append(new_block)\n    with open(dest_file, 'w', encoding='utf-8') as f:\n        f.write('\\n'.join(new_blocks))", "docstring": "Duplicate a documentation file and adapts it for a new model.\n\nArgs:\nmodule_file (`str` or `os.PathLike`): Path to the doc file to duplicate.\nold_model_patterns (`ModelPatterns`): The patterns for the old model.\nnew_model_patterns (`ModelPatterns`): The patterns for the new model.\ndest_file (`str` or `os.PathLike`, *optional*): Path to the new doc file.\nWill default to the a file named `{new_model_patterns.model_type}.md` in the same folder as `module_file`.\nframeworks (`List[str]`, *optional*):\nIf passed, will only keep the model classes corresponding to this list of frameworks in the new doc file.", "source": "github-repos"}
{"code": "def __init__(self, channel):\n        \n        self.ListLogMetrics = channel.unary_unary(\n            \"/google.logging.v2.MetricsServiceV2/ListLogMetrics\",\n            request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__metrics__pb2.ListLogMetricsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__metrics__pb2.ListLogMetricsResponse.FromString,\n        )\n        self.GetLogMetric = channel.unary_unary(\n            \"/google.logging.v2.MetricsServiceV2/GetLogMetric\",\n            request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__metrics__pb2.GetLogMetricRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__metrics__pb2.LogMetric.FromString,\n        )\n        self.CreateLogMetric = channel.unary_unary(\n            \"/google.logging.v2.MetricsServiceV2/CreateLogMetric\",\n            request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__metrics__pb2.CreateLogMetricRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__metrics__pb2.LogMetric.FromString,\n        )\n        self.UpdateLogMetric = channel.unary_unary(\n            \"/google.logging.v2.MetricsServiceV2/UpdateLogMetric\",\n            request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__metrics__pb2.UpdateLogMetricRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__metrics__pb2.LogMetric.FromString,\n        )\n        self.DeleteLogMetric = channel.unary_unary(\n            \"/google.logging.v2.MetricsServiceV2/DeleteLogMetric\",\n            request_serializer=google_dot_cloud_dot_logging__v2_dot_proto_dot_logging__metrics__pb2.DeleteLogMetricRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def from_dict(self, dictionary):\n        \n\n        for remote_name, remote_value in dictionary.items():\n            \n            \n            local_name = next((name for name, attribute in self._attributes.items() if attribute.remote_name == remote_name), None)\n\n            if local_name:\n                setattr(self, local_name, remote_value)\n            else:\n                \n                pass", "docstring": "Sets all the exposed ReST attribues from the given dictionary\n\nArgs:\ndictionary (dict): dictionnary containing the raw object attributes and their values.\n\nExample:\n>>> info = {\"name\": \"my group\", \"private\": False}\n>>> group = NUGroup()\n>>> group.from_dict(info)\n>>> print \"name: %s - private: %s\" % (group.name, group.private)\n\"name: my group - private: False\"", "source": "juraj-google-style"}
{"code": "def _convert_op_hints_to_stubs_helper(graph_def, write_callback=lambda sess, graph_def: None):\n    hints = _find_all_hints_in_nodes(graph_def.node)\n    hints_q = []\n    for hint in hints.values():\n        hints_q.append((hint.level, hint.uuid))\n    hints_q.sort(key=lambda tup: tup[0])\n    for i in range(len(hints_q) - 1, -1, -1):\n        level, hint_uuid = hints_q[i]\n    curr_graph_def = graph_def\n    del graph_def\n    for i in range(len(hints_q) - 1, -1, -1):\n        level, hint_uuid = hints_q[i]\n        if level >= 2:\n            children_hints, curr_graph_def, function_def_nodes = _find_children_hints(hints[hint_uuid], curr_graph_def)\n            assert len(children_hints) > 0\n            children_inputs_mappings = hints[hint_uuid].children_inputs_mappings\n            for j, child_hint in enumerate(children_hints):\n                if j == 0:\n                    for mapping in children_inputs_mappings['parent_first_child_input']:\n                        parent_input_index = _get_correct_mapping(mapping['parent_ophint_input_index'], hints[hint_uuid].inputs)\n                        child_input_index = _get_correct_mapping(mapping['first_child_ophint_input_index'], child_hint.inputs)\n                        child_hint.inputs[child_input_index] = hints[hint_uuid].inputs[parent_input_index]\n                else:\n                    for mapping in children_inputs_mappings['internal_children_input_output']:\n                        input_index = _get_correct_mapping(mapping['child_input_index'], child_hint.inputs)\n                        output_index = _get_correct_mapping(mapping['child_output_index'], children_hints[j - 1].outputs)\n                        child_hint.inputs[input_index] = children_hints[j - 1].outputs[output_index]\n                if j == len(children_hints) - 1:\n                    for mapping in children_inputs_mappings['parent_last_child_output']:\n                        parent_output_index = _get_correct_mapping(mapping['parent_output_index'], hints[hint_uuid].outputs)\n                        child_output_index = _get_correct_mapping(mapping['child_output_index'], child_hint.outputs)\n                        child_hint.outputs[child_output_index] = hints[hint_uuid].outputs[parent_output_index]\n            for j, child_hint in enumerate(children_hints):\n                curr_graph_def = _convert_single_op_hint_to_stub(child_hint, curr_graph_def, function_def_nodes, j == len(children_hints) - 1)\n        else:\n            curr_graph_def = _convert_single_op_hint_to_stub(hints[hint_uuid], curr_graph_def)\n            write_callback(curr_graph_def, 'initial')\n    curr_graph_def = _remove_redundant_stack_unstack(curr_graph_def)\n    return curr_graph_def", "docstring": "Converts a graph_def to a new graph_def where all op hints are stubbed.\n\nArgs:\ngraph_def: A graph def that we should convert.\nwrite_callback: A function pointer that can be used to write intermediate\nsteps of graph transformation (optional).\n\nReturns:\nA new stubbed graph_def.", "source": "github-repos"}
{"code": "def _awaitReset(self, utcTimeStamp, verbose=True):\n        \n        resetTime = pytz.utc.localize(datetime.utcfromtimestamp(utcTimeStamp))\n        _vPrint(verbose, \"--- Current Timestamp\")\n        _vPrint(verbose, \"      %s\" % (time.strftime('%c')))\n        now = pytz.utc.localize(datetime.utcnow())\n        waitTime = round((resetTime - now).total_seconds()) + 1\n        _vPrint(verbose, \"--- Current UTC Timestamp\")\n        _vPrint(verbose, \"      %s\" % (now.strftime('%c')))\n        _vPrint(verbose, \"--- GITHUB NEEDS A BREAK Until UTC Timestamp\")\n        _vPrint(verbose, \"      %s\" % (resetTime.strftime('%c')))\n        self._countdown(waitTime, printString=\"--- Waiting %*d seconds...\", verbose=verbose)\n        _vPrint(verbose, \"--- READY!\")", "docstring": "Wait until the given UTC timestamp.\n\nArgs:\nutcTimeStamp (int): A UTC format timestamp.\nverbose (Optional[bool]): If False, all extra printouts will be\nsuppressed. Defaults to True.", "source": "juraj-google-style"}
{"code": "def setAvatar(self, image):\n    self.conn('PUT', '{0}/users/{1}/profile/avatar'.format(SkypeConnection.API_USER, self.userId), auth=SkypeConnection.Auth.SkypeToken, data=image.read())", "docstring": "Update the profile picture for the current user.\n\nArgs:\nimage (file): a file-like object to read the image from", "source": "codesearchnet"}
{"code": "def from_lasio(cls, l, remap=None, funcs=None, data=True, req=None, alias=None, fname=None):\n    curve_params = {}\n    for (field, (sect, code)) in LAS_FIELDS['data'].items():\n        curve_params[field] = utils.lasio_get(l, sect, code, remap=remap, funcs=funcs)\n    if req:\n        reqs = utils.flatten_list([v for (k, v) in alias.items() if (k in req)])\n    if (l.depth_m[0] < l.depth_m[1]):\n        curve_params['depth'] = l.depth_m\n    else:\n        curve_params['depth'] = np.flipud(l.depth_m)\n    depth_curves = ['DEPT', 'TIME']\n    if (data and req):\n        curves = {c.mnemonic: Curve.from_lasio_curve(c, **curve_params) for c in l.curves if ((c.mnemonic[:4] not in depth_curves) and (c.mnemonic in reqs))}\n    elif (data and (not req)):\n        curves = {c.mnemonic: Curve.from_lasio_curve(c, **curve_params) for c in l.curves if (c.mnemonic[:4] not in depth_curves)}\n    elif ((not data) and req):\n        curves = {c.mnemonic: True for c in l.curves if ((c.mnemonic[:4] not in depth_curves) and (c.mnemonic in reqs))}\n    else:\n        curves = {c.mnemonic: True for c in l.curves if (c.mnemonic[:4] not in depth_curves)}\n    if req:\n        aliases = utils.flatten_list([c.get_alias(alias) for (m, c) in curves.items()])\n        if (len(set(aliases)) < len(req)):\n            return cls(params={})\n    params = {'las': l, 'header': Header.from_lasio(l, remap=remap, funcs=funcs), 'location': Location.from_lasio(l, remap=remap, funcs=funcs), 'data': curves, 'fname': fname}\n    for (field, (sect, code)) in LAS_FIELDS['well'].items():\n        params[field] = utils.lasio_get(l, sect, code, remap=remap, funcs=funcs)\n    return cls(params)", "docstring": "Constructor. If you already have the lasio object, then this makes a\nwell object from it.\n\nArgs:\nl (lasio object): a lasio object.\nremap (dict): Optional. A dict of 'old': 'new' LAS field names.\nfuncs (dict): Optional. A dict of 'las field': function() for\nimplementing a transform before loading. Can be a lambda.\ndata (bool): Whether to load curves or not.\nreq (dict): An alias list, giving all required curves. If not\nall of the aliases are present, the well is empty.\n\nReturns:\nwell. The well object.", "source": "codesearchnet"}
{"code": "def fetch(self, payment_id, data={}, **kwargs):\n    return super(Payment, self).fetch(payment_id, data, **kwargs)", "docstring": "Fetch Payment for given Id\n\nArgs:\npayment_id : Id for which payment object has to be retrieved\n\nReturns:\nPayment dict for given payment Id", "source": "codesearchnet"}
{"code": "def get_states(self, n):\n    \n    return self.states[len(self.new_states):len(self.new_states) + n]", "docstring": "Get the next n recurrent states.\n\nCalled by layers in \"incremental\" mode.\n\nArgs:\nn: an integer\nReturns:\na list of n Tensors", "source": "juraj-google-style"}
{"code": "def update_q(self, state_key, action_key, reward_value, next_max_q):\n    q = self.extract_q_df(state_key, action_key)\n    new_q = (q + (self.alpha_value * ((reward_value + (self.gamma_value * next_max_q)) - q)))\n    self.save_q_df(state_key, action_key, new_q)", "docstring": "Update Q-Value.\n\nArgs:\nstate_key:              The key of state.\naction_key:             The key of action.\nreward_value:           R-Value(Reward).\nnext_max_q:             Maximum Q-Value.", "source": "codesearchnet"}
{"code": "def _parse_peer_link(self, config):\n    match = re.search('peer-link (\\\\S+)', config)\n    value = (match.group(1) if match else None)\n    return dict(peer_link=value)", "docstring": "Scans the config block and parses the peer-link value\n\nArgs:\nconfig (str): The config block to scan\n\nReturns:\ndict: A dict object that is intended to be merged into the\nresource dict", "source": "codesearchnet"}
{"code": "def create_page(cls, webdriver=None, **kwargs):\n    if (not webdriver):\n        webdriver = WTF_WEBDRIVER_MANAGER.get_driver()\n    return PageFactory.create_page(cls, webdriver=webdriver, **kwargs)", "docstring": "Class method short cut to call PageFactory on itself.  Use it to instantiate\nthis PageObject using a webdriver.\n\nArgs:\nwebdriver (Webdriver): Instance of Selenium Webdriver.\n\nReturns:\nPageObject\n\nRaises:\nInvalidPageError", "source": "codesearchnet"}
{"code": "def upload(self, resource_id, data):\n        \n        self.body = data\n        self.content_type = 'application/octet-stream'\n        self.resource_id(str(resource_id))\n        self._request_uri = '{}/upload'.format(self._request_uri)", "docstring": "Update the request URI to upload the a document to this resource.\n\nArgs:\nresource_id (integer): The group id.\ndata (any): The raw data to upload.", "source": "juraj-google-style"}
{"code": "def import_class(classpath):\n    (modname, classname) = classpath.rsplit('.', 1)\n    module = importlib.import_module(modname)\n    klass = getattr(module, classname)\n    return klass", "docstring": "Import the class referred to by the fully qualified class path.\n\nArgs:\nclasspath: A full \"foo.bar.MyClass\" path to a class definition.\n\nReturns:\nThe class referred to by the classpath.\n\nRaises:\nImportError: If an error occurs while importing the module.\nAttributeError: IF the class does not exist in the imported module.", "source": "codesearchnet"}
{"code": "def add_period_and_roll(self, date_tensor, period_tensor, roll_convention=constants.BusinessDayConvention.NONE):\n    return self.roll_to_business_day(date_tensor + period_tensor, roll_convention)", "docstring": "Adds given periods to given dates and rolls to business days.\n\nThe original dates are not rolled prior to addition.\n\nArgs:\ndate_tensor: DateTensor of dates to add to.\nperiod_tensor: PeriodTensor broadcastable to `date_tensor`.\nroll_convention: BusinessDayConvention. Determines how to roll a date that\nfalls on a holiday.\n\nReturns:\nThe resulting DateTensor.", "source": "github-repos"}
{"code": "def post_info(self, name, message):\n    self.post_command(OPERATIONS.CMD_POST_MESSAGE, _create_message(name, states.INFO_LEVEL, message))", "docstring": "Asynchronously post a user facing info message about a service.\n\nArgs:\nname (string): The name of the service\nmessage (string): The user facing info message that will be stored\nfor the service and can be queried later.", "source": "codesearchnet"}
{"code": "def dict_to_xml(spec, full_document=False):\n    \n\n    middle = xmltodict.unparse(spec, full_document=full_document, pretty=True)\n    return lxml.etree.fromstring(middle)", "docstring": "Convert dict to XML\n\nArgs:\nspec(dict): dict to convert\nfull_document(bool): whether to add XML headers\n\nReturns:\nlxml.etree.Element: XML tree", "source": "juraj-google-style"}
{"code": "def _assert_float_dtype(dtype):\n    dtype = dtypes.as_dtype(dtype)\n    if not dtype.is_floating:\n        raise ValueError('Expected floating point type, got %s.' % dtype)\n    return dtype", "docstring": "Validate and return floating point type based on `dtype`.\n\n`dtype` must be a floating point type.\n\nArgs:\ndtype: The data type to validate.\n\nReturns:\nValidated type.\n\nRaises:\nValueError: if `dtype` is not a floating point type.", "source": "github-repos"}
{"code": "def join(self, *data: Iterable[MaybeBytes]) -> bytes:\n        \n        return self.how.join([bytes(item) for item in chain(*data)])", "docstring": "Iterable join on a delimiter.\n\nArgs:\ndata: Iterable of items to join.\n\nExamples:\n::\n\nBytesFormat(b' ').join([b'one', b'two', b'three'])", "source": "juraj-google-style"}
{"code": "def __init__(self, command, short_help, params: List[ParameterDesc] = None):\n        \n        self.command = command\n        self.short_help = short_help\n        self.params = params if params else []", "docstring": "Command descriptor\n\nArgs:\ncommand: 1 word command identifier\nshort_help: short description of the purpose of the command\nparams: list of parameter descriptions belonging to the command", "source": "juraj-google-style"}
{"code": "def GetBatchJob(client, batch_job_id):\n    batch_job_service = client.GetService('BatchJobService', 'v201809')\n    selector = {'fields': ['Id', 'Status', 'DownloadUrl'], 'predicates': [{'field': 'Id', 'operator': 'EQUALS', 'values': [batch_job_id]}]}\n    return batch_job_service.get(selector)['entries'][0]", "docstring": "Retrieves the BatchJob with the given id.\n\nArgs:\nclient: an instantiated AdWordsClient used to retrieve the BatchJob.\nbatch_job_id: a long identifying the BatchJob to be retrieved.\nReturns:\nThe BatchJob associated with the given id.", "source": "codesearchnet"}
{"code": "def __init__(self, datastore_client, storage_client, round_name):\n    \n    self._datastore_client = datastore_client\n    self._storage_client = storage_client\n    self._round_name = round_name\n    \n    self._data = {}", "docstring": "Initializes ClassificationBatches.\n\nArgs:\ndatastore_client: instance of CompetitionDatastoreClient\nstorage_client: instance of CompetitionStorageClient\nround_name: name of the round", "source": "juraj-google-style"}
{"code": "def add_completions(replace_list: list, belstr: str, replace_span: Span, completion_text: str) -> List[Mapping[(str, Any)]]:\n    completions = []\n    for r in replace_list:\n        if (len(belstr) > 0):\n            belstr_end = (len(belstr) - 1)\n        else:\n            belstr_end = 0\n        log.debug(f\"Replace list {r}  Replace_span {replace_span}  BELstr: {belstr} Len: {belstr_end} Test1 {(r['type'] == 'Function')}  Test2 {((replace_span[1] + 1) == len(belstr))}\")\n        if ((r['type'] == 'Function') and (replace_span[0] > 0) and (belstr[(replace_span[0] - 1)] == ',')):\n            log.debug('prior char is a comma')\n            replacement = (((belstr[0:replace_span[0]] + ' ') + f\"{r['replacement']}()\") + belstr[(replace_span[1] + 1):])\n            cursor_loc = len(((belstr[0:replace_span[0]] + ' ') + f\"{r['replacement']}()\"))\n        elif ((replace_span[0] > 0) and (belstr[(replace_span[0] - 1)] == ',')):\n            log.debug('prior char is a comma')\n            replacement = (((belstr[0:replace_span[0]] + ' ') + r['replacement']) + belstr[(replace_span[1] + 1):])\n            cursor_loc = len(((belstr[0:replace_span[0]] + ' ') + r['replacement']))\n        elif ((r['type'] == 'Function') and (replace_span[1] >= belstr_end)):\n            replacement = (belstr[0:replace_span[0]] + f\"{r['replacement']}()\")\n            cursor_loc = (len(replacement) - 1)\n            log.debug(f'Replacement: {replacement}')\n        else:\n            replacement = ((belstr[0:replace_span[0]] + r['replacement']) + belstr[(replace_span[1] + 1):])\n            cursor_loc = len((belstr[0:replace_span[0]] + r['replacement']))\n        completions.append({'replacement': replacement, 'cursor_loc': cursor_loc, 'highlight': r['highlight'], 'label': r['label']})\n    return completions", "docstring": "Create completions to return given replacement list\n\nArgs:\nreplace_list: list of completion replacement values\nbelstr: BEL String\nreplace_span: start, stop of belstr to replace\ncompletion_text: text to use for completion - used for creating highlight\nReturns:\n[{\n\"replacement\": replacement,\n\"cursor_loc\": cursor_loc,\n\"highlight\": highlight,\n\"label\": label,\n}]", "source": "codesearchnet"}
{"code": "def sg_input(shape=None, dtype=sg_floatx, name=None):\n    if (shape is None):\n        return tf.placeholder(dtype, shape=None, name=name)\n    else:\n        if (not isinstance(shape, (list, tuple))):\n            shape = [shape]\n        return tf.placeholder(dtype, shape=([None] + list(shape)), name=name)", "docstring": "r\"\"\"Creates a placeholder.\n\nArgs:\nshape: A tuple/list of integers. If an integers is given, it will turn to a list.\ndtype: A data type. Default is float32.\nname: A name for the placeholder.\n\nReturns:\nA wrapped placeholder `Tensor`.", "source": "codesearchnet"}
{"code": "def make_mapper(features):\n    \n    if not features:\n        features = Feature(input=[], transformer=NullTransformer())\n    if not iterable(features):\n        features = (features, )\n    return DataFrameMapper(\n        [t.as_input_transformer_tuple() for t in features],\n        input_df=True)", "docstring": "Make a DataFrameMapper from a feature or list of features\n\nArgs:\nfeatures (Union[Feature, List[Feature]]): feature or list of features\n\nReturns:\nDataFrameMapper: mapper made from features", "source": "juraj-google-style"}
{"code": "def get_text_features(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> tf.Tensor:\n    text_features = self.clip.get_text_features(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n    return text_features", "docstring": "Returns:\ntext_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying\nthe projection layer to the pooled output of [`TFCLIPTextModel`].\n\nExamples:\n\n```python\n>>> from transformers import AutoTokenizer, TFCLIPModel\n\n>>> model = TFCLIPModel.from_pretrained(\"openai/clip-vit-base-patch32\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-base-patch32\")\n\n>>> inputs = tokenizer([\"a photo of a cat\", \"a photo of a dog\"], padding=True, return_tensors=\"tf\")\n>>> text_features = model.get_text_features(**inputs)\n```", "source": "github-repos"}
{"code": "def get_anchor_labels(anchors, gt_boxes, crowd_boxes):\n    \n    \n    def filter_box_label(labels, value, max_num):\n        curr_inds = np.where(labels == value)[0]\n        if len(curr_inds) > max_num:\n            disable_inds = np.random.choice(\n                curr_inds, size=(len(curr_inds) - max_num),\n                replace=False)\n            labels[disable_inds] = -1    \n            curr_inds = np.where(labels == value)[0]\n        return curr_inds\n\n    NA, NB = len(anchors), len(gt_boxes)\n    assert NB > 0  \n    box_ious = np_iou(anchors, gt_boxes)  \n    ious_argmax_per_anchor = box_ious.argmax(axis=1)  \n    ious_max_per_anchor = box_ious.max(axis=1)\n    ious_max_per_gt = np.amax(box_ious, axis=0, keepdims=True)  \n    \n    anchors_with_max_iou_per_gt = np.where(box_ious == ious_max_per_gt)[0]\n\n    \n    anchor_labels = -np.ones((NA,), dtype='int32')   \n\n    \n    anchor_labels[anchors_with_max_iou_per_gt] = 1\n    anchor_labels[ious_max_per_anchor >= cfg.RPN.POSITIVE_ANCHOR_THRESH] = 1\n    anchor_labels[ious_max_per_anchor < cfg.RPN.NEGATIVE_ANCHOR_THRESH] = 0\n\n    \n    if crowd_boxes.size > 0:\n        cand_inds = np.where(anchor_labels >= 0)[0]\n        cand_anchors = anchors[cand_inds]\n        ioas = np_ioa(crowd_boxes, cand_anchors)\n        overlap_with_crowd = cand_inds[ioas.max(axis=0) > cfg.RPN.CROWD_OVERLAP_THRESH]\n        anchor_labels[overlap_with_crowd] = -1\n\n    \n    target_num_fg = int(cfg.RPN.BATCH_PER_IM * cfg.RPN.FG_RATIO)\n    fg_inds = filter_box_label(anchor_labels, 1, target_num_fg)\n    \n    \n    \n\n    \n    old_num_bg = np.sum(anchor_labels == 0)\n    if old_num_bg == 0:\n        \n        raise MalformedData(\"No valid background for RPN!\")\n    target_num_bg = cfg.RPN.BATCH_PER_IM - len(fg_inds)\n    filter_box_label(anchor_labels, 0, target_num_bg)   \n\n    \n    anchor_boxes = np.zeros((NA, 4), dtype='float32')\n    fg_boxes = gt_boxes[ious_argmax_per_anchor[fg_inds], :]\n    anchor_boxes[fg_inds, :] = fg_boxes\n    \n    return anchor_labels, anchor_boxes", "docstring": "Label each anchor as fg/bg/ignore.\nArgs:\nanchors: Ax4 float\ngt_boxes: Bx4 float, non-crowd\ncrowd_boxes: Cx4 float\n\nReturns:\nanchor_labels: (A,) int. Each element is {-1, 0, 1}\nanchor_boxes: Ax4. Contains the target gt_box for each anchor when the anchor is fg.", "source": "juraj-google-style"}
{"code": "def from_entity(entity, self_user_id):\n        \n        user_id = UserID(chat_id=entity.id.chat_id,\n                         gaia_id=entity.id.gaia_id)\n        return User(user_id, entity.properties.display_name,\n                    entity.properties.first_name,\n                    entity.properties.photo_url,\n                    entity.properties.email,\n                    (self_user_id == user_id) or (self_user_id is None))", "docstring": "Construct user from ``Entity`` message.\n\nArgs:\nentity: ``Entity`` message.\nself_user_id (~hangups.user.UserID or None): The ID of the current\nuser. If ``None``, assume ``entity`` is the current user.\n\nReturns:\n:class:`~hangups.user.User` object.", "source": "juraj-google-style"}
{"code": "def expect_equal(first, second, msg=None, extras=None):\n    try:\n        asserts.assert_equal(first, second, msg, extras)\n    except signals.TestSignal as e:\n        logging.exception('Expected %s equals to %s, but they are not.', first, second)\n        recorder.add_error(e)", "docstring": "Expects the equality of objects, otherwise fail the test.\n\nIf the expectation is not met, the test is marked as fail after its\nexecution finishes.\n\nError message is \"first != second\" by default. Additional explanation can\nbe supplied in the message.\n\nArgs:\nfirst: The first object to compare.\nsecond: The second object to compare.\nmsg: A string that adds additional info about the failure.\nextras: An optional field for extra information to be included in test\nresult.", "source": "github-repos"}
{"code": "def cleandata(inputlist):\n    \n    output = []\n    for e in inputlist:\n        new = []\n        for f in e:\n            if f == \"--\":\n                new.append(None)\n            else:\n                new.append(float(f))\n        output.append(new)\n    return output", "docstring": "Helper function for parse.getdata.\nRemove empty variables, convert strings to float\n\nargs:\ninputlist: list\nList of Variables\nReturns:\nouput:\nCleaned list", "source": "juraj-google-style"}
{"code": "def is_coord_subset_pbc(subset, superset, atol=1e-08, mask=None):\n    c1 = np.array(subset, dtype=np.float64)\n    c2 = np.array(superset, dtype=np.float64)\n    if (mask is not None):\n        m = np.array(mask, dtype=np.int)\n    else:\n        m = np.zeros((len(subset), len(superset)), dtype=np.int)\n    atol = (np.zeros(3, dtype=np.float64) + atol)\n    return cuc.is_coord_subset_pbc(c1, c2, atol, m)", "docstring": "Tests if all fractional coords in subset are contained in superset.\n\nArgs:\nsubset, superset: List of fractional coords\natol (float or size 3 array): Tolerance for matching\nmask (boolean array): Mask of matches that are not allowed.\ni.e. if mask[1,2] == True, then subset[1] cannot be matched\nto superset[2]\n\nReturns:\nTrue if all of subset is in superset.", "source": "codesearchnet"}
{"code": "def distance_matrix(self, leaf_labels=False):\n        \n        M = dict(); leaf_dists = dict()\n        for node in self.traverse_postorder():\n            if node.is_leaf():\n                leaf_dists[node] = [[node,0]]\n            else:\n                for c in node.children:\n                    if c.edge_length is not None:\n                        for i in range(len(leaf_dists[c])):\n                            leaf_dists[c][i][1] += c.edge_length\n                for c1 in range(0,len(node.children)-1):\n                    leaves_c1 = leaf_dists[node.children[c1]]\n                    for c2 in range(c1+1,len(node.children)):\n                        leaves_c2 = leaf_dists[node.children[c2]]\n                        for i in range(len(leaves_c1)):\n                            for j in range(len(leaves_c2)):\n                                u,ud = leaves_c1[i]; v,vd = leaves_c2[j]; d = ud+vd\n                                if leaf_labels:\n                                    u_key = u.label; v_key = v.label\n                                else:\n                                    u_key = u; v_key = v\n                                if u_key not in M:\n                                    M[u_key] = dict()\n                                M[u_key][v_key] = d\n                                if v_key not in M:\n                                    M[v_key] = dict()\n                                M[v_key][u_key] = d\n                leaf_dists[node] = leaf_dists[node.children[0]]; del leaf_dists[node.children[0]]\n                for i in range(1,len(node.children)):\n                    leaf_dists[node] += leaf_dists[node.children[i]]; del leaf_dists[node.children[i]]\n        return M", "docstring": "Return a distance matrix (2D dictionary) of the leaves of this ``Tree``\n\nArgs:\n``leaf_labels`` (``bool``): ``True`` to have keys be labels of leaf ``Node`` objects, otherwise ``False`` to have keys be ``Node`` objects\n\nReturns:\n``dict``: Distance matrix (2D dictionary) of the leaves of this ``Tree``, where keys are labels of leaves; ``M[u][v]`` = distance from ``u`` to ``v``", "source": "juraj-google-style"}
{"code": "def as_fn(self, *binding_order):\n    \n    if len(binding_order) != len(self.unbound_vars):\n      raise ValueError('All vars must be specified.')\n    for arg in binding_order:\n      if arg not in self.unbound_vars:\n        raise ValueError('Unknown binding: %s' % arg)\n\n    def func(*args, **kwargs):\n      \n      if len(binding_order) != len(args):\n        raise ValueError('Missing values, expects: %s' % binding_order)\n      values = dict(zip(binding_order, args))\n      values.update(kwargs)\n      return self.construct(**values)\n\n    func.__doc__ = _gen_ipython_string(func, binding_order, [], func.__doc__)\n    return func", "docstring": "Creates a function by binding the arguments in the given order.\n\nArgs:\n*binding_order: The unbound variables. This must include all values.\nReturns:\nA function that takes the arguments of binding_order.\nRaises:\nValueError: If the bindings are missing values or include unknown values.", "source": "juraj-google-style"}
{"code": "def get_dispatcher_event(self, name):\n    e = self.__property_events.get(name)\n    if (e is None):\n        e = self.__events[name]\n    return e", "docstring": "Retrieves an Event object by name\n\nArgs:\nname (str): The name of the :class:`Event` or\n:class:`~pydispatch.properties.Property` object to retrieve\n\nReturns:\nThe :class:`Event` instance for the event or property definition\n\n.. versionadded:: 0.1.0", "source": "codesearchnet"}
{"code": "def get_pkg_names(pkgs):\n    \n    result = set()\n    with open(join(\"mapping\"), \"r\") as f:\n        data = dict(x.strip().split(\":\") for x in f)\n    for pkg in pkgs:\n        \n        \n        result.add(data.get(pkg, pkg))\n    \n    return sorted(result, key=lambda s: s.lower())", "docstring": "Get PyPI package names from a list of imports.\n\nArgs:\npkgs (List[str]): List of import names.\n\nReturns:\nList[str]: The corresponding PyPI package names.", "source": "juraj-google-style"}
{"code": "def get_hash_of_dirs(directory):\n    \n    import hashlib\n    sha = hashlib.sha512()\n    if not os.path.exists(directory):\n        return -1\n\n    for root, _, files in os.walk(directory):\n        for name in files:\n            filepath = local.path(root) / name\n            if filepath.exists():\n                with open(filepath, 'rb') as next_file:\n                    for line in next_file:\n                        sha.update(line)\n    return sha.hexdigest()", "docstring": "Recursively hash the contents of the given directory.\n\nArgs:\ndirectory (str): The root directory we want to hash.\n\nReturns:\nA hash of all the contents in the directory.", "source": "juraj-google-style"}
{"code": "def _read_opm(string):\n    \n\n    maneuvers = []\n\n    data = {}\n    comments = {}\n    for i, line in enumerate(string.splitlines()):\n        if not line:\n            continue\n        if line.startswith(\"COMMENT\"):\n            comments[i] = line.split(\"COMMENT\")[-1].strip()\n            continue\n\n        key, _, value = line.partition(\"=\")\n\n        key = key.strip()\n        value = value.strip()\n\n        if key.startswith('MAN_'):\n            if key == \"MAN_EPOCH_IGNITION\":\n                maneuvers.append({})\n                man_idx = len(maneuvers) - 1\n                if i - 1 in comments:\n                    maneuvers[man_idx][\"comment\"] = comments[i - 1]\n            maneuvers[man_idx][key] = value\n        else:\n            data[key] = value\n\n    try:\n        name = data['OBJECT_NAME']\n        cospar_id = data['OBJECT_ID']\n        scale = data['TIME_SYSTEM']\n        frame = data['REF_FRAME']\n        date = Date.strptime(data['EPOCH'], \"%Y-%m-%dT%H:%M:%S.%f\", scale=scale)\n        vx = _float(data['X_DOT'])\n        vy = _float(data['Y_DOT'])\n        vz = _float(data['Z_DOT'])\n        x = _float(data['X'])\n        y = _float(data['Y'])\n        z = _float(data['Z'])\n    except KeyError as e:\n        raise ValueError('Missing mandatory parameter')\n\n    orb = Orbit(date, [x, y, z, vx, vy, vz], 'cartesian', frame, None)\n    orb.name = name\n    orb.cospar_id = cospar_id\n\n    for raw_man in maneuvers:\n\n        man = {}\n        man['date'] = Date.strptime(raw_man['MAN_EPOCH_IGNITION'], \"%Y-%m-%dT%H:%M:%S.%f\", scale=scale)\n        man['duration'] = timedelta(seconds=_float(raw_man['MAN_DURATION']))\n        man['frame'] = raw_man['MAN_REF_FRAME'] if raw_man['MAN_REF_FRAME'] != frame else None\n        man['delta_mass'] = raw_man['MAN_DELTA_MASS']\n        man['comment'] = raw_man.get('comment')\n\n        for i in range(1, 4):\n            man.setdefault('dv', []).append(_float(raw_man['MAN_DV_{}'.format(i)]))\n\n        if man['duration'].total_seconds() == 0:\n            orb.maneuvers.append(Maneuver(man['date'], man['dv'], frame=man['frame'], comment=man['comment']))\n\n    if 'CX_X' in data:\n\n        frame = data.get('COV_REF_FRAME', orb.cov.PARENT_FRAME)\n        if frame in ('RSW', 'RTN'):\n            frame = \"QSW\"\n\n        values = [\n            [data['CX_X'],     data['CY_X'],     data['CZ_X'],     data['CX_DOT_X'],     data['CY_DOT_X'],     data['CZ_DOT_X']],\n            [data['CY_X'],     data['CY_Y'],     data['CZ_Y'],     data['CX_DOT_Y'],     data['CY_DOT_Y'],     data['CZ_DOT_Y']],\n            [data['CZ_X'],     data['CZ_Y'],     data['CZ_Z'],     data['CX_DOT_Z'],     data['CY_DOT_Z'],     data['CZ_DOT_Z']],\n            [data['CX_DOT_X'], data['CX_DOT_Y'], data['CX_DOT_Z'], data['CX_DOT_X_DOT'], data['CY_DOT_X_DOT'], data['CZ_DOT_X_DOT']],\n            [data['CY_DOT_X'], data['CY_DOT_Y'], data['CY_DOT_Z'], data['CY_DOT_X_DOT'], data['CY_DOT_Y_DOT'], data['CZ_DOT_Y_DOT']],\n            [data['CZ_DOT_X'], data['CZ_DOT_Y'], data['CZ_DOT_Z'], data['CZ_DOT_X_DOT'], data['CZ_DOT_Y_DOT'], data['CZ_DOT_Z_DOT']]\n        ]\n\n        orb.cov = np.array(values).astype(np.float) * 1e6\n        orb.cov._frame = frame\n\n    return orb", "docstring": "Read of OPM string\n\nArgs:\nstring (str): Text containing the OPM\nReturn:\nOrbit:", "source": "juraj-google-style"}
{"code": "def normalize_date(tmy_date, year):\n    \n    month = tmy_date.month\n    day = tmy_date.day - 1\n    hour = tmy_date.hour\n    \n    if month is 1 and day is 0 and hour is 0:\n        year = year + 1\n    return datetime.datetime(year, month, 1) + \\\n        datetime.timedelta(days=day, hours=hour, minutes=0)", "docstring": "change TMY3 date to an arbitrary year.\n\nArgs:\ntmy_date (datetime): date to mangle.\nyear (int): desired year.\n\nReturns:\n(None)", "source": "juraj-google-style"}
{"code": "def describe_enum_value(enum_value):\n    enum_value_descriptor = EnumValueDescriptor()\n    enum_value_descriptor.name = six.text_type(enum_value.name)\n    enum_value_descriptor.number = enum_value.number\n    return enum_value_descriptor", "docstring": "Build descriptor for Enum instance.\n\nArgs:\nenum_value: Enum value to provide descriptor for.\n\nReturns:\nInitialized EnumValueDescriptor instance describing the Enum instance.", "source": "codesearchnet"}
{"code": "def var(series):\n    if np.issubdtype(series.dtype, np.number):\n        return series.var()\n    else:\n        return np.nan", "docstring": "Returns the variance of values in a series.\n\nArgs:\nseries (pandas.Series): column to summarize.", "source": "codesearchnet"}
{"code": "def bool(name, execute_bool=True, default=None):\n\n    def wrapped(func):\n\n        @functools.wraps(func)\n        def _decorator(*args, **kwargs):\n            if (core.isset(name) and (core.bool(name) == execute_bool)):\n                return func(*args, **kwargs)\n            elif ((default is not None) and (default == execute_bool)):\n                return func(*args, **kwargs)\n        return _decorator\n    return wrapped", "docstring": "Only execute the function if the boolean variable is set.\n\nArgs:\nname: The name of the environment variable\nexecute_bool: The boolean value to execute the function on\ndefault: The default value if the environment variable is not set (respects `execute_bool`)\n\nReturns:\nThe function return value or `None` if the function was skipped.", "source": "codesearchnet"}
{"code": "def get_suffixes():\n    names = []\n    if at_least_libvips(8, 8):\n        array = vips_lib.vips_foreign_get_suffixes()\n        i = 0\n        while (array[i] != ffi.NULL):\n            name = _to_string(array[i])\n            if (name not in names):\n                names.append(name)\n            glib_lib.g_free(array[i])\n            i += 1\n        glib_lib.g_free(array)\n    return names", "docstring": "Get a list of all the filename suffixes supported by libvips.\n\nReturns:\n[string]", "source": "codesearchnet"}
{"code": "def maybe_center_plot(result):\n    begin = re.search('(% .* matplotlib2tikz v.*)', result)\n    if begin:\n        result = (('\\\\begin{center}\\n' + result[begin.end():]) + '\\n\\\\end{center}')\n    return result", "docstring": "Embeds a possible tikz image inside a center environment.\n\nSearches for matplotlib2tikz last commend line to detect tikz images.\n\nArgs:\nresult: The code execution result\n\nReturns:\nThe input result if no tikzpicture was found, otherwise a centered\nversion.", "source": "codesearchnet"}
{"code": "def create_socket(self):\n        \n        socket_path = os.path.join(self.config_dir, 'pueue.sock')\n        \n        try:\n            if os.path.exists(socket_path):\n                os.remove(socket_path)\n            self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n            self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n            self.socket.bind(socket_path)\n            self.socket.setblocking(0)\n            self.socket.listen(0)\n            \n            os.chmod(socket_path, stat.S_IRWXU)\n        except Exception:\n            self.logger.error(\"Daemon couldn't socket. Aborting\")\n            self.logger.exception()\n            sys.exit(1)\n\n        return self.socket", "docstring": "Create a socket for the daemon, depending on the directory location.\n\nArgs:\nconfig_dir (str): The absolute path to the config directory used by the daemon.\n\nReturns:\nsocket.socket: The daemon socket. Clients connect to this socket.", "source": "juraj-google-style"}
{"code": "async def basic_consume(self, queue_name='', consumer_tag='', no_local=False, no_ack=False, exclusive=False, no_wait=False, arguments=None, wait_message=True, timeout=0):\n    consumer_tag = (consumer_tag or ('ctag%i.%s' % (self.channel_id, uuid.uuid4().hex)))\n    if (arguments is None):\n        arguments = {}\n    frame = amqp_frame.AmqpRequest(self.protocol._stream_writer, amqp_constants.TYPE_METHOD, self.channel_id)\n    frame.declare_method(amqp_constants.CLASS_BASIC, amqp_constants.BASIC_CONSUME)\n    request = amqp_frame.AmqpEncoder()\n    request.write_short(0)\n    request.write_shortstr(queue_name)\n    request.write_shortstr(consumer_tag)\n    request.write_bits(no_local, no_ack, exclusive, no_wait)\n    request.write_table(arguments)\n    self.consumer_queues[consumer_tag] = asyncio.Queue(self.max_queue_size)\n    self.last_consumer_tag = consumer_tag\n    consumer = self.CONSUMER_CLASS(self, self.consumer_queues[consumer_tag], consumer_tag, nowait=(not wait_message), timeout=timeout)\n    (await self._write_frame_awaiting_response('basic_consume', frame, request, no_wait))\n    if (not no_wait):\n        self._ctag_events[consumer_tag].set()\n    return consumer", "docstring": "Starts the consumption of message into a queue.\nthe callback will be called each time we're receiving a message.\n\nArgs:\nqueue_name:     str, the queue to receive message from\nconsumer_tag:   str, optional consumer tag\nno_local:       bool, if set the server will not send messages\nto the connection that published them.\nno_ack:         bool, if set the server does not expect\nacknowledgements for messages\nexclusive:      bool, request exclusive consumer access,\nmeaning only this consumer can access the queue\nno_wait:        bool, if set, the server will not respond to\nthe method\narguments:      dict, AMQP arguments to be passed to the server\nwait_message:   Indicates if the consumer should wait for new\nmessages in the queue or simply return None if\nthe queue is empty.\ntimeout:        A timeout for waiting messages.\n``wait_message`` has precendence over timeout.", "source": "codesearchnet"}
{"code": "def _embedding_dim(vocab_size):\n  \n  if not vocab_size or (vocab_size <= 0):\n    raise ValueError(\"Invalid vocab_size %g.\" % vocab_size)\n  return int(round(6.0 * math.sqrt(math.sqrt(vocab_size))))", "docstring": "Calculate a reasonable embedding size for a vocabulary.\n\nRule of thumb is 6 * 4th root of vocab_size.\n\nArgs:\nvocab_size: Size of the input vocabulary.\nReturns:\nThe embedding size to use.\nRaises:\nValueError: if `vocab_size` is invalid.", "source": "juraj-google-style"}
{"code": "def learn_transportation_mode(track, clf):\n    \n    for segment in track.segments:\n        tmodes = segment.transportation_modes\n        points = segment.points\n        features = []\n        labels = []\n\n        for tmode in tmodes:\n            points_part = points[tmode['from']:tmode['to']]\n            if len(points_part) > 0:\n                features.append(extract_features_2(points_part))\n                labels.append(tmode['label'])\n\n        clf.learn(features, labels)", "docstring": "Inserts transportation modes of a track into a classifier\n\nArgs:\ntrack (:obj:`Track`)\nclf (:obj:`Classifier`)", "source": "juraj-google-style"}
{"code": "def _on_resource_closure_failure(self, e):\n    logging.info('[Worker %d] Clearing tagged queue after resource closure failure.', self.worker_index)\n    with self._resource_tracking_lock:\n        self._is_dead_with_error = e\n        self._cluster.closure_queue.clear_tag_unlocked(self.worker_index)\n        self._set_resources_aborted(e)", "docstring": "Clear tagged queue to ensure resource closures are rebuilt.\n\nArgs:\ne: The exception arisen from the resource closure.", "source": "github-repos"}
{"code": "def _update_dicts(name_scope, model_layer, input_to_in_layer, model_name_to_output, prev_node_name):\n    layer_config = model_layer.get('config')\n    if (not layer_config.get('layers')):\n        raise ValueError('layer is not a model.')\n    node_name = _scoped_name(name_scope, layer_config.get('name'))\n    input_layers = layer_config.get('input_layers')\n    output_layers = layer_config.get('output_layers')\n    inbound_nodes = model_layer.get('inbound_nodes')\n    is_functional_model = bool((input_layers and output_layers))\n    is_parent_functional_model = bool(inbound_nodes)\n    if (is_parent_functional_model and is_functional_model):\n        for (input_layer, inbound_node) in zip(input_layers, inbound_nodes):\n            input_layer_name = _scoped_name(node_name, input_layer)\n            inbound_node_name = _scoped_name(name_scope, inbound_node[0])\n            input_to_in_layer[input_layer_name] = inbound_node_name\n    elif (is_parent_functional_model and (not is_functional_model)):\n        prev_node_name = _scoped_name(name_scope, inbound_nodes[0][0][0])\n    elif ((not is_parent_functional_model) and prev_node_name and is_functional_model):\n        assert (len(input_layers) == 1), ('Cannot have multi-input Functional model when parent model is not Functional. Number of input layers: %d' % len(input_layer))\n        input_layer = input_layers[0]\n        input_layer_name = _scoped_name(node_name, input_layer)\n        input_to_in_layer[input_layer_name] = prev_node_name\n    if (is_functional_model and output_layers):\n        layers = _norm_to_list_of_layers(output_layers)\n        layer_names = [_scoped_name(node_name, layer[0]) for layer in layers]\n        model_name_to_output[node_name] = layer_names\n    else:\n        last_layer = layer_config.get('layers')[(- 1)]\n        last_layer_name = last_layer.get('config').get('name')\n        output_node = _scoped_name(node_name, last_layer_name)\n        model_name_to_output[node_name] = [output_node]\n    return (input_to_in_layer, model_name_to_output, prev_node_name)", "docstring": "Updates input_to_in_layer, model_name_to_output, and prev_node_name\nbased on the model_layer.\n\nArgs:\nname_scope: a string representing a scope name, similar to that of tf.name_scope.\nmodel_layer: a dict representing a Keras model configuration.\ninput_to_in_layer: a dict mapping Keras.layers.Input to inbound layer.\nmodel_name_to_output: a dict mapping Keras Model name to output layer of the model.\nprev_node_name: a string representing a previous, in sequential model layout,\nnode name.\n\nReturns:\nA tuple of (input_to_in_layer, model_name_to_output, prev_node_name).\ninput_to_in_layer: a dict mapping Keras.layers.Input to inbound layer.\nmodel_name_to_output: a dict mapping Keras Model name to output layer of the model.\nprev_node_name: a string representing a previous, in sequential model layout,\nnode name.", "source": "codesearchnet"}
{"code": "def flatten_(structure):\n    if isinstance(structure, dict):\n        if structure:\n            structure = zip(*sorted(structure.items(), key=(lambda x: x[0])))[1]\n        else:\n            structure = ()\n    if isinstance(structure, (tuple, list)):\n        result = []\n        for element in structure:\n            result += flatten_(element)\n        return tuple(result)\n    return (structure,)", "docstring": "Combine all leaves of a nested structure into a tuple.\n\nThe nested structure can consist of any combination of tuples, lists, and\ndicts. Dictionary keys will be discarded but values will ordered by the\nsorting of the keys.\n\nArgs:\nstructure: Nested structure.\n\nReturns:\nFlat tuple.", "source": "codesearchnet"}
{"code": "def authenticate(self):\n    endpoint = '/authenticate'\n    payload = {'agent': {'name': 'Minecraft', 'version': self.ygg_version}, 'username': self.username, 'password': self.password, 'clientToken': self.client_token}\n    rep = self._ygg_req(endpoint, payload)\n    if ((not rep) or ('error' in rep)):\n        return False\n    self.access_token = rep['accessToken']\n    self.client_token = rep['clientToken']\n    self.available_profiles = rep['availableProfiles']\n    self.selected_profile = rep['selectedProfile']\n    return True", "docstring": "Generate an access token using an username and password. Any existing\nclient token is invalidated if not provided.\n\nReturns:\ndict: Response or error dict", "source": "codesearchnet"}
{"code": "def _MarkReachedOps(from_ops, reached_ops, func_graphs):\n    queue = collections.deque()\n    queue.extend(from_ops)\n    while queue:\n        op = queue.popleft()\n        if op not in reached_ops:\n            reached_ops.add(op)\n            for output in op.outputs:\n                if backprop_util.IsTrainable(output):\n                    queue.extend(_Consumers(output, func_graphs))", "docstring": "Mark all ops reached from \"from_ops\".\n\nArgs:\nfrom_ops: list of Operations.\nreached_ops: set of Operations.\nfunc_graphs: list of FuncGraphs. This method will traverse through\nthese functions if they capture from_ops or any reachable ops.", "source": "github-repos"}
{"code": "def most_recent_n(self, n):\n    return self._commands[-n:]", "docstring": "Look up the n most recent commands.\n\nArgs:\nn: Number of most recent commands to look up.\n\nReturns:\nA list of n most recent commands, or all available most recent commands,\nif n exceeds size of the command history, in chronological order.", "source": "github-repos"}
{"code": "def ResetSection(self, directive):\n    \n    \n    self._section = self._INITIAL_SECTION\n    \n    self._last_header = ''\n\n    \n    \n    if directive in ('if', 'ifdef', 'ifndef'):\n      self.include_list.append([])\n    elif directive in ('else', 'elif'):\n      self.include_list[-1] = []", "docstring": "Reset section checking for preprocessor directive.\n\nArgs:\ndirective: preprocessor directive (e.g. \"if\", \"else\").", "source": "juraj-google-style"}
{"code": "def get_untranscribed_prefixes_from_file(target_directory: Path) -> List[str]:\n    untranscribed_prefix_fn = (target_directory / 'untranscribed_prefixes.txt')\n    if untranscribed_prefix_fn.exists():\n        with untranscribed_prefix_fn.open() as f:\n            prefixes = f.readlines()\n        return [prefix.strip() for prefix in prefixes]\n    else:\n        pass\n    return []", "docstring": "The file \"untranscribed_prefixes.txt\" will specify prefixes which\ndo not have an associated transcription file if placed in the target directory.\n\nThis will fetch those prefixes from that file and will return an empty\nlist if that file does not exist.\n\nSee find_untranscribed_wavs function for finding untranscribed prefixes in an\nexperiment directory.\n\nReturns:\nA list of all untranscribed prefixes as specified in the file", "source": "codesearchnet"}
{"code": "def tokenize(template, def_ldel='{{', def_rdel='}}'):\n    global _CURRENT_LINE, _LAST_TAG_LINE\n    _CURRENT_LINE = 1\n    _LAST_TAG_LINE = None\n    try:\n        template = template.read()\n    except AttributeError:\n        pass\n    is_standalone = True\n    open_sections = []\n    l_del = def_ldel\n    r_del = def_rdel\n    while template:\n        (literal, template) = grab_literal(template, l_del)\n        if (not template):\n            (yield ('literal', literal))\n            break\n        is_standalone = l_sa_check(template, literal, is_standalone)\n        (tag, template) = parse_tag(template, l_del, r_del)\n        (tag_type, tag_key) = tag\n        if (tag_type == 'set delimiter'):\n            dels = tag_key.strip().split(' ')\n            (l_del, r_del) = (dels[0], dels[(- 1)])\n        elif (tag_type in ['section', 'inverted section']):\n            open_sections.append(tag_key)\n            _LAST_TAG_LINE = _CURRENT_LINE\n        elif (tag_type == 'end'):\n            try:\n                last_section = open_sections.pop()\n            except IndexError:\n                raise ChevronError('Trying to close tag \"{0}\"\\nLooks like it was not opened.\\nline {1}'.format(tag_key, (_CURRENT_LINE + 1)))\n            if (tag_key != last_section):\n                raise ChevronError('Trying to close tag \"{0}\"\\nlast open tag is \"{1}\"\\nline {2}'.format(tag_key, last_section, (_CURRENT_LINE + 1)))\n        is_standalone = r_sa_check(template, tag_type, is_standalone)\n        if is_standalone:\n            template = template.split('\\n', 1)[(- 1)]\n            if (tag_type != 'partial'):\n                literal = literal.rstrip(' ')\n        if (literal != ''):\n            (yield ('literal', literal))\n        if (tag_type not in ['comment', 'set delimiter?']):\n            (yield (tag_type, tag_key))\n    if open_sections:\n        raise ChevronError('Unexpected EOF\\nthe tag \"{0}\" was never closed\\nwas opened at line {1}'.format(open_sections[(- 1)], _LAST_TAG_LINE))", "docstring": "Tokenize a mustache template\n\nTokenizes a mustache template in a generator fashion,\nusing file-like objects. It also accepts a string containing\nthe template.\n\n\nArguments:\n\ntemplate -- a file-like object, or a string of a mustache template\n\ndef_ldel -- The default left delimiter\n(\"{{\" by default, as in spec compliant mustache)\n\ndef_rdel -- The default right delimiter\n(\"}}\" by default, as in spec compliant mustache)\n\n\nReturns:\n\nA generator of mustache tags in the form of a tuple\n\n-- (tag_type, tag_key)\n\nWhere tag_type is one of:\n* literal\n* section\n* inverted section\n* end\n* partial\n* no escape\n\nAnd tag_key is either the key or in the case of a literal tag,\nthe literal itself.", "source": "codesearchnet"}
{"code": "def get_tag(self, tag_name, **kwargs):\n        \n        return self._get_object_by_name(self._TAG_ENDPOINT_SUFFIX,\n                                        tag_name,\n                                        **kwargs)", "docstring": "get a tag by name\n\nArgs:\ntag_name (string): name of tag to get\n\nReturns:\ndictionary of the response", "source": "juraj-google-style"}
{"code": "def update(self, roomId, title=None, **request_parameters):\n    check_type(roomId, basestring, may_be_none=False)\n    check_type(roomId, basestring)\n    put_data = dict_from_items_with_values(request_parameters, title=title)\n    json_data = self._session.put(((API_ENDPOINT + '/') + roomId), json=put_data)\n    return self._object_factory(OBJECT_TYPE, json_data)", "docstring": "Update details for a room, by ID.\n\nArgs:\nroomId(basestring): The room ID.\ntitle(basestring): A user-friendly name for the room.\n**request_parameters: Additional request parameters (provides\nsupport for parameters that may be added in the future).\n\nReturns:\nRoom: A Room object with the updated Webex Teams room details.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"}
{"code": "def get_op_traceback(self, op_name):\n    \n    if not self._graph_traceback:\n      raise ValueError('No graph traceback has been received yet.')\n    for op_log_entry in self._graph_traceback.log_entries:\n      if op_log_entry.name == op_name:\n        return self._code_def_to_traceback_list(op_log_entry.code_def)\n    raise ValueError(\n        'No op named \"%s\" can be found in the graph of the latest version '\n        ' (%d).' % (op_name, self._graph_version))", "docstring": "Get the traceback of an op in the latest version of the TF graph.\n\nArgs:\nop_name: Name of the op.\n\nReturns:\nCreation traceback of the op, in the form of a list of 2-tuples:\n(file_path, lineno)\n\nRaises:\nValueError: If the op with the given name cannot be found in the latest\nversion of the graph that this SourceManager instance has received, or\nif this SourceManager instance has not received any graph traceback yet.", "source": "juraj-google-style"}
{"code": "def before_run(self, run_context):\n    if not self._grpc_debug_wrapper_session:\n        self._grpc_debug_wrapper_session = grpc_wrapper.GrpcDebugWrapperSession(run_context.session, self._grpc_debug_server_addresses, watch_fn=self._watch_fn, thread_name_filter=self._thread_name_filter)\n    fetches = run_context.original_args.fetches\n    feed_dict = run_context.original_args.feed_dict\n    watch_options = self._watch_fn(fetches, feed_dict)\n    run_options = config_pb2.RunOptions()\n    debug_utils.watch_graph(run_options, run_context.session.graph, debug_urls=self._grpc_debug_wrapper_session.prepare_run_debug_urls(fetches, feed_dict), debug_ops=watch_options.debug_ops, node_name_regex_allowlist=watch_options.node_name_regex_allowlist, op_type_regex_allowlist=watch_options.op_type_regex_allowlist, tensor_dtype_regex_allowlist=watch_options.tensor_dtype_regex_allowlist, tolerate_debug_op_creation_failures=watch_options.tolerate_debug_op_creation_failures)\n    return session_run_hook.SessionRunArgs(None, feed_dict=None, options=run_options)", "docstring": "Called right before a session is run.\n\nArgs:\nrun_context: A session_run_hook.SessionRunContext. Encapsulates\ninformation on the run.\n\nReturns:\nA session_run_hook.SessionRunArgs object.", "source": "github-repos"}
{"code": "def __init__(self, env):\n    if 'NSSCACHE_CONFIG' in env:\n        self.config_file = env['NSSCACHE_CONFIG']\n    else:\n        self.config_file = self.NSSCACHE_CONFIG\n    self.command = None\n    self.help_command = None\n    self.maps = []\n    self.options = {}\n    self.lockfile = None\n    self.timestamp_dir = None\n    self.log = logging.getLogger(__name__)", "docstring": "Initialize defaults for data we hold.\n\nArgs:\nenv: dictionary of environment variables (typically os.environ)", "source": "github-repos"}
{"code": "def save(self, path, verbose=False):\n        \n        path = os.path.realpath(path)\n        if os.path.exists(path):\n            if self.load_path and self.load_path == path:\n                if verbose:\n                    print \"saving over previous suite...\"\n                for context_name in self.context_names:\n                    self.context(context_name)  \n                shutil.rmtree(path)\n            else:\n                raise SuiteError(\"Cannot save, path exists: %r\" % path)\n\n        contexts_path = os.path.join(path, \"contexts\")\n        os.makedirs(contexts_path)\n\n        \n        data = self.to_dict()\n        filepath = os.path.join(path, \"suite.yaml\")\n        with open(filepath, \"w\") as f:\n            f.write(dump_yaml(data))\n\n        \n        for context_name in self.context_names:\n            context = self.context(context_name)\n            context._set_parent_suite(path, context_name)\n            filepath = self._context_path(context_name, path)\n            if verbose:\n                print \"writing %r...\" % filepath\n            context.save(filepath)\n\n        \n        tools_path = os.path.join(path, \"bin\")\n        os.makedirs(tools_path)\n        if verbose:\n            print \"creating alias wrappers in %r...\" % tools_path\n\n        tools = self.get_tools()\n        for tool_alias, d in tools.iteritems():\n            tool_name = d[\"tool_name\"]\n            context_name = d[\"context_name\"]\n\n            data = self._context(context_name)\n            prefix_char = data.get(\"prefix_char\")\n\n            if verbose:\n                print (\"creating %r -> %r (%s context)...\"\n                       % (tool_alias, tool_name, context_name))\n            filepath = os.path.join(tools_path, tool_alias)\n\n            create_forwarding_script(filepath,\n                                     module=\"suite\",\n                                     func_name=\"_FWD__invoke_suite_tool_alias\",\n                                     context_name=context_name,\n                                     tool_name=tool_name,\n                                     prefix_char=prefix_char)", "docstring": "Save the suite to disk.\n\nArgs:\npath (str): Path to save the suite to. If a suite is already saved\nat `path`, then it will be overwritten. Otherwise, if `path`\nexists, an error is raised.", "source": "juraj-google-style"}
{"code": "def dead_code_elimination(node):\n    to_remove = set((def_[1] for def_ in annotate.unused(node) if (not isinstance(def_[1], (gast.arguments, gast.For)))))\n    for n in list(to_remove):\n        for succ in gast.walk(n):\n            if anno.getanno(succ, 'push', False):\n                to_remove.add(anno.getanno(succ, 'push'))\n    transformers.Remove(to_remove).visit(node)\n    anno.clearanno(node)\n    return node", "docstring": "Perform a simple form of dead code elimination on a Python AST.\n\nThis method performs reaching definitions analysis on all function\ndefinitions. It then looks for the definition of variables that are not used\nelsewhere and removes those definitions.\n\nThis function takes into consideration push and pop statements; if a pop\nstatement is removed, it will also try to remove the accompanying push\nstatement. Note that this *requires dead code elimination to be performed on\nthe primal and adjoint simultaneously*.\n\nArgs:\nnode: The AST to optimize.\n\nReturns:\nThe optimized AST.", "source": "codesearchnet"}
{"code": "def formula_balance(model):\n    \n\n    \n    compound_formula = {}\n    for compound in model.compounds:\n        if compound.formula is not None:\n            try:\n                f = Formula.parse(compound.formula).flattened()\n                compound_formula[compound.id] = f\n            except ParseError as e:\n                msg = 'Error parsing formula for compound {}:\\n{}\\n{}'.format(\n                    compound.id, e, compound.formula)\n                if e.indicator is not None:\n                    msg += '\\n{}'.format(e.indicator)\n                logger.warning(msg)\n\n    for reaction in model.reactions:\n        yield reaction, reaction_formula(reaction.equation, compound_formula)", "docstring": "Calculate formula compositions for each reaction.\n\nCall :func:`reaction_formula` for each reaction.\nYield (reaction, result) pairs, where result has two formula compositions\nor `None`.\n\nArgs:\nmodel: :class:`psamm.datasource.native.NativeModel`.", "source": "juraj-google-style"}
{"code": "def _check_domain(tokens) -> bool:\n        \n\n        idx = None\n        for e in tokens:\n            if e.text == \"@\":\n                idx = e.i\n                break\n        if not idx or tokens[idx+1].text in FILTER_PROVIDER:\n            return False\n        else:\n            return True", "docstring": "Check if the email provider should be filtered\nArgs:\ntokens:\n\nReturns: Bool", "source": "juraj-google-style"}
{"code": "def run(self, args):\n        \n        jlink = self.create_jlink(args)\n        if args.downgrade:\n            if not jlink.firmware_newer():\n                print('DLL firmware is not older than J-Link firmware.')\n            else:\n                jlink.invalidate_firmware()\n\n                try:\n                    \n                    jlink.update_firmware()\n                except pylink.JLinkException as e:\n                    \n                    \n                    jlink = self.create_jlink(args)\n\n                print('Firmware Downgraded: %s' % jlink.firmware_version)\n        elif args.upgrade:\n            if not jlink.firmware_outdated():\n                print('DLL firmware is not newer than J-Link firmware.')\n            else:\n                try:\n                    \n                    jlink.update_firmware()\n                except pylink.JLinkException as e:\n                    \n                    \n                    jlink = self.create_jlink(args)\n                print('Firmware Updated: %s' % jlink.firmware_version)\n\n        return None", "docstring": "Runs the firmware command.\n\nArgs:\nself (FirmwareCommand): the ``FirmwareCommand`` instance\nargs (Namespace): arguments to parse\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def event(self, name, owner=None, **kwargs):\n        \n        return Event(self.tcex, name, owner=owner, **kwargs)", "docstring": "Create the Event TI object.\n\nArgs:\nname:\n**kwargs:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def get_column(column_name, node, context):\n    column = try_get_column(column_name, node, context)\n    if (column is None):\n        selectable = get_node_selectable(node, context)\n        raise AssertionError(u'Column \"{}\" not found in selectable \"{}\". Columns present are {}. Context is {}.'.format(column_name, selectable.original, [col.name for col in selectable.c], context))\n    return column", "docstring": "Get a column by name from the selectable.\n\nArgs:\ncolumn_name: str, name of the column to retrieve.\nnode: SqlNode, the node the column is being retrieved for.\ncontext: CompilationContext, compilation specific metadata.\n\nReturns:\ncolumn, the SQLAlchemy column if found. Raises an AssertionError otherwise.", "source": "codesearchnet"}
{"code": "def get_shifted_center_blocks(x, indices):\n  \n  center_x = gather_blocks_2d(x, indices)\n\n  \n  def shift_right_2d_blocks(x):\n    \n    shifted_targets = (\n        tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0], [0, 0]])[:, :, :, :-1, :])\n    return shifted_targets\n\n  x_shifted = shift_right_2d_blocks(center_x)\n  return x_shifted", "docstring": "Get right shifted blocks for masked local attention 2d.\n\nArgs:\nx: A tensor with shape [batch, heads, height, width, depth]\nindices: The indices to gather blocks\n\nReturns:\nx_shifted: a tensor of extracted blocks, each block right shifted along\nlength.", "source": "juraj-google-style"}
{"code": "def _get_id_token_user(token, issuers, audiences, allowed_client_ids, time_now, cache):\n  \n  \n  \n  for issuer_key, issuer in issuers.items():\n    issuer_cert_uri = convert_jwks_uri(issuer.jwks_uri)\n    try:\n      parsed_token = _verify_signed_jwt_with_certs(\n          token, time_now, cache, cert_uri=issuer_cert_uri)\n    except Exception:  \n      _logger.debug(\n          'id_token verification failed for issuer %s', issuer_key, exc_info=True)\n      continue\n\n    issuer_values = _listlike_guard(issuer.issuer, 'issuer', log_warning=False)\n    if isinstance(audiences, _Mapping):\n      audiences = audiences[issuer_key]\n    if _verify_parsed_token(\n        parsed_token, issuer_values, audiences, allowed_client_ids,\n        \n        \n        \n        is_legacy_google_auth=(issuer.issuer == _ISSUERS)):\n      email = parsed_token['email']\n      \n      \n      \n      \n      \n      \n      return users.User(email)", "docstring": "Get a User for the given id token, if the token is valid.\n\nArgs:\ntoken: The id_token to check.\nissuers: dict of Issuers\naudiences: List of audiences that are acceptable.\nallowed_client_ids: List of client IDs that are acceptable.\ntime_now: The current time as a long (eg. long(time.time())).\ncache: Cache to use (eg. the memcache module).\n\nReturns:\nA User if the token is valid, None otherwise.", "source": "juraj-google-style"}
{"code": "def create_asymmetric_key_pair(self, algorithm, length):\n    if (algorithm not in self._asymmetric_key_algorithms.keys()):\n        raise exceptions.InvalidField('The cryptographic algorithm ({0}) is not a supported asymmetric key algorithm.'.format(algorithm))\n    engine_method = self._asymmetric_key_algorithms.get(algorithm)\n    return engine_method(length)", "docstring": "Create an asymmetric key pair.\n\nArgs:\nalgorithm(CryptographicAlgorithm): An enumeration specifying the\nalgorithm for which the created keys will be compliant.\nlength(int): The length of the keys to be created. This value must\nbe compliant with the constraints of the provided algorithm.\n\nReturns:\ndict: A dictionary containing the public key data, with at least\nthe following key/value fields:\n* value - the bytes of the key\n* format - a KeyFormatType enumeration for the bytes format\ndict: A dictionary containing the private key data, identical in\nstructure to the one above.\n\nRaises:\nInvalidField: Raised when the algorithm is unsupported or the\nlength is incompatible with the algorithm.\nCryptographicFailure: Raised when the key generation process\nfails.\n\nExample:\n>>> engine = CryptographyEngine()\n>>> key = engine.create_asymmetric_key(\n...     CryptographicAlgorithm.RSA, 2048)", "source": "codesearchnet"}
{"code": "def CreateStyleFromConfig(style_config):\n\n    def GlobalStyles():\n        for style, _ in _DEFAULT_STYLE_TO_FACTORY:\n            yield style\n    def_style = False\n    if style_config is None:\n        for style in GlobalStyles():\n            if _style == style:\n                def_style = True\n                break\n        if not def_style:\n            return _style\n        return _GLOBAL_STYLE_FACTORY()\n    if isinstance(style_config, dict):\n        config = _CreateConfigParserFromConfigDict(style_config)\n    elif isinstance(style_config, str):\n        style_factory = _STYLE_NAME_TO_FACTORY.get(style_config.lower())\n        if style_factory is not None:\n            return style_factory()\n        if style_config.startswith('{'):\n            config = _CreateConfigParserFromConfigString(style_config)\n        else:\n            config = _CreateConfigParserFromConfigFile(style_config)\n    return _CreateStyleFromConfigParser(config)", "docstring": "Create a style dict from the given config.\n\nArguments:\nstyle_config: either a style name or a file name. The file is expected to\ncontain settings. It can have a special BASED_ON_STYLE setting naming the\nstyle which it derives from. If no such setting is found, it derives from\nthe default style. When style_config is None, the _GLOBAL_STYLE_FACTORY\nconfig is created.\n\nReturns:\nA style dict.\n\nRaises:\nStyleConfigError: if an unknown style option was encountered.", "source": "github-repos"}
{"code": "def mb45(msg):\n    \n    d = hex2bin(data(msg))\n    if d[6] == '0':\n        return None\n\n    mb = bin2int(d[7:9])\n    return mb", "docstring": "Microburst.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nint: Microburst level. 0=NIL, 1=Light, 2=Moderate, 3=Severe", "source": "juraj-google-style"}
{"code": "def find_kv(pcoll, regex, keyGroup, valueGroup=0):\n    regex = Regex._regex_compile(regex)\n\n    def _process(element):\n        matches = regex.finditer(element)\n        if matches:\n            for match in matches:\n                yield (match.group(keyGroup), match.group(valueGroup))\n    return pcoll | FlatMap(_process)", "docstring": "Returns the matches if a portion of the line matches the Regex. Returns the\nspecified groups as the key and value pair.\n\nArgs:\nregex: the regular expression string or (re.compile) pattern.\nkeyGroup: The Regex group to use as the key. Can be int or str.\nvalueGroup: (optional) Regex group to use the value. Can be int or str.\nThe default value \"0\" returns entire matched string.", "source": "github-repos"}
{"code": "def _frame_advance(self, action):\n        \n        \n        self.controllers[0][:] = action\n        \n        _LIB.Step(self._env)", "docstring": "Advance a frame in the emulator with an action.\n\nArgs:\naction (byte): the action to press on the joy-pad\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _CreateClassTemplate(cls, data_type_definition):\n    type_name = data_type_definition.name\n    type_description = (data_type_definition.description or type_name)\n    while type_description.endswith('.'):\n        type_description = type_description[:(- 1)]\n    class_attributes_description = []\n    init_arguments = []\n    instance_attributes = []\n    for member_definition in data_type_definition.members:\n        attribute_name = member_definition.name\n        description = (member_definition.description or attribute_name)\n        while description.endswith('.'):\n            description = description[:(- 1)]\n        member_data_type = getattr(member_definition, 'member_data_type', '')\n        if isinstance(member_definition, data_types.MemberDataTypeDefinition):\n            member_definition = member_definition.member_data_type_definition\n        member_type_indicator = member_definition.TYPE_INDICATOR\n        if (member_type_indicator == definitions.TYPE_INDICATOR_SEQUENCE):\n            element_type_indicator = member_definition.element_data_type\n            member_type_indicator = 'tuple[{0:s}]'.format(element_type_indicator)\n        else:\n            member_type_indicator = cls._PYTHON_NATIVE_TYPES.get(member_type_indicator, member_data_type)\n        argument = '{0:s}=None'.format(attribute_name)\n        definition = '    self.{0:s} = {0:s}'.format(attribute_name)\n        description = '    {0:s} ({1:s}): {2:s}.'.format(attribute_name, member_type_indicator, description)\n        class_attributes_description.append(description)\n        init_arguments.append(argument)\n        instance_attributes.append(definition)\n    class_attributes_description = '\\n'.join(sorted(class_attributes_description))\n    init_arguments = ', '.join(init_arguments)\n    instance_attributes = '\\n'.join(sorted(instance_attributes))\n    template_values = {'class_attributes_description': class_attributes_description, 'init_arguments': init_arguments, 'instance_attributes': instance_attributes, 'type_description': type_description, 'type_name': type_name}\n    return cls._CLASS_TEMPLATE.format(**template_values)", "docstring": "Creates the class template.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\n\nReturns:\nstr: class template.", "source": "codesearchnet"}
{"code": "def matches(pcoll, regex, group=0):\n    regex = Regex._regex_compile(regex)\n\n    def _process(element):\n        m = regex.match(element)\n        if m:\n            yield m.group(group)\n    return pcoll | FlatMap(_process)", "docstring": "Returns the matches (group 0 by default) if zero or more characters at the\nbeginning of string match the regular expression. To match the entire\nstring, add \"$\" sign at the end of regex expression.\n\nGroup can be integer value or a string value.\n\nArgs:\nregex: the regular expression string or (re.compile) pattern.\ngroup: (optional) name/number of the group, it can be integer or a string\nvalue. Defaults to 0, meaning the entire matched string will be\nreturned.", "source": "github-repos"}
{"code": "def usufyToJsonExport(d, fPath):\n    oldData = []\n    try:\n        with open(fPath) as iF:\n            oldText = iF.read()\n            if (oldText != ''):\n                oldData = json.loads(oldText)\n    except:\n        pass\n    jsonText = json.dumps((oldData + d), indent=2, sort_keys=True)\n    with open(fPath, 'w') as oF:\n        oF.write(jsonText)", "docstring": "Workaround to export to a json file.\n\nArgs:\n-----\nd: Data to export.\nfPath: File path for the output file.", "source": "codesearchnet"}
{"code": "def _make_nonce(self):\n    chars = (string.digits + string.ascii_letters)\n    nonce = ''.join((random.choice(chars) for i in range(25)))\n    if self._logging:\n        utils.log(('nonce created: %s' % nonce))\n    return nonce", "docstring": "Generate a unique ID for the request, 25 chars in length\n\nReturns:\n- str: Cryptographic nonce", "source": "codesearchnet"}
{"code": "def source_file_list(self):\n    return tuple(self._host_name_file_path_to_offset.keys())", "docstring": "Get a list of source files known to the debugger data reader.\n\nReturns:\nA tuple of `(host_name, file_path)` tuples.", "source": "github-repos"}
{"code": "def execute(source, optimize=True, output=sys.stdout, input=sys.stdin, steps=-1):\n    \n    from crianza import compiler\n    code = compiler.compile(parser.parse(source), optimize=optimize)\n    machine = Machine(code, output=output, input=input)\n    return machine.run(steps)", "docstring": "Compiles and runs program, returning the machine used to execute the\ncode.\n\nArgs:\noptimize: Whether to optimize the code after parsing it.\noutput: Stream which program can write output to.\ninput: Stream which program can read input from.\nsteps: An optional maximum number of instructions to execute on the\nvirtual machine.  Set to -1 for no limit.\n\nReturns:\nA Machine instance.", "source": "juraj-google-style"}
{"code": "def ascii_tree(self, no_types: bool = False, val_count: bool = False) -> str:\n        \n        return self.schema._ascii_tree(\"\", no_types, val_count)", "docstring": "Generate ASCII art representation of the schema tree.\n\nArgs:\nno_types: Suppress output of data type info.\nval_count: Show accumulated validation counts.\n\nReturns:\nString with the ASCII tree.", "source": "juraj-google-style"}
{"code": "def _build(self, inputs):\n    \n    batch_size = inputs.get_shape()[0]\n    output_sequence, _ = tf.nn.dynamic_rnn(\n        cell=self._core,\n        inputs=inputs,\n        time_major=False,\n        initial_state=self._core.initial_state(\n            batch_size, trainable=False)\n    )\n    outputs = snt.BatchFlatten()(output_sequence[:, -1, :])\n    outputs = self._final_mlp(outputs)\n    logits = snt.Linear(self._target_size)(outputs)\n    return logits", "docstring": "Dynamic unroll across input objects.\n\nArgs:\ninputs: tensor (batch x num_objects x feature). Objects to sort.\n\nReturns:\nTensor (batch x num_objects); logits indicating the reference objects.", "source": "juraj-google-style"}
{"code": "def group_molecules(self, mol_list):\n        \n        mol_hash = [(i, self._mapper.get_molecule_hash(m))\n                    for i, m in enumerate(mol_list)]\n        mol_hash.sort(key=lambda x: x[1])\n\n        \n        raw_groups = tuple([tuple([m[0] for m in g]) for k, g\n                            in itertools.groupby(mol_hash,\n                                                 key=lambda x: x[1])])\n\n        group_indices = []\n        for rg in raw_groups:\n            mol_eq_test = [(p[0], p[1], self.fit(mol_list[p[0]],\n                                                 mol_list[p[1]]))\n                           for p in itertools.combinations(sorted(rg), 2)]\n            mol_eq = set([(p[0], p[1]) for p in mol_eq_test if p[2]])\n            not_alone_mols = set(itertools.chain.from_iterable(mol_eq))\n            alone_mols = set(rg) - not_alone_mols\n            group_indices.extend([[m] for m in alone_mols])\n            while len(not_alone_mols) > 0:\n                current_group = {not_alone_mols.pop()}\n                while len(not_alone_mols) > 0:\n                    candidate_pairs = set(\n                        [tuple(sorted(p)) for p\n                         in itertools.product(current_group, not_alone_mols)])\n                    mutual_pairs = candidate_pairs & mol_eq\n                    if len(mutual_pairs) == 0:\n                        break\n                    mutual_mols = set(itertools.chain\n                                      .from_iterable(mutual_pairs))\n                    current_group |= mutual_mols\n                    not_alone_mols -= mutual_mols\n                group_indices.append(sorted(current_group))\n\n        group_indices.sort(key=lambda x: (len(x), -x[0]), reverse=True)\n        all_groups = [[mol_list[i] for i in g] for g in group_indices]\n        return all_groups", "docstring": "Group molecules by structural equality.\n\nArgs:\nmol_list: List of OpenBabel OBMol or pymatgen objects\n\nReturns:\nA list of lists of matched molecules\nAssumption: if s1=s2 and s2=s3, then s1=s3\nThis may not be true for small tolerances.", "source": "juraj-google-style"}
{"code": "def docx_table_from_xml_node(table_node: ElementTree.Element, level: int, config: TextProcessingConfig) -> str:\n    table = CustomDocxTable()\n    for row_node in table_node:\n        if (row_node.tag != DOCX_TABLE_ROW):\n            continue\n        table.new_row()\n        for cell_node in row_node:\n            if (cell_node.tag != DOCX_TABLE_CELL):\n                continue\n            table.new_cell()\n            for para_node in cell_node:\n                text = docx_text_from_xml_node(para_node, level, config)\n                if text:\n                    table.add_paragraph(text)\n    return docx_process_table(table, config)", "docstring": "Converts an XML node representing a DOCX table into a textual\nrepresentation.\n\nArgs:\ntable_node: XML node\nlevel: current level in XML hierarchy (used for recursion; start level\nis 0)\nconfig: :class:`TextProcessingConfig` control object\n\nReturns:\nstring representation", "source": "codesearchnet"}
{"code": "def section(self, section_title):\n    section = '== {0} =='.format(section_title)\n    try:\n        content = self.content\n        index = (content.index(section) + len(section))\n        while True:\n            if (content[(index + 1)] == '='):\n                index += 1\n            else:\n                break\n    except ValueError:\n        return None\n    except IndexError:\n        pass\n    try:\n        next_index = self.content.index('==', index)\n    except ValueError:\n        next_index = len(self.content)\n    return self.content[index:next_index].lstrip('=').strip()", "docstring": "Plain text section content\n\nArgs:\nsection_title (str): Name of the section to pull\nReturns:\nstr: The content of the section\nNote:\nReturns **None** if section title is not found; only text \\\nbetween title and next section or sub-section title is returned\nNote:\nSide effect is to also pull the content which can be slow\nNote:\nThis is a parsing operation and not part of the standard API", "source": "codesearchnet"}
{"code": "def atomic_write_string_to_file(filename, contents, overwrite=True):\n    if not has_atomic_move(filename):\n        write_string_to_file(filename, contents)\n    else:\n        temp_pathname = filename + '.tmp' + uuid.uuid4().hex\n        write_string_to_file(temp_pathname, contents)\n        try:\n            rename(temp_pathname, filename, overwrite)\n        except errors.OpError:\n            delete_file(temp_pathname)\n            raise", "docstring": "Writes to `filename` atomically.\n\nThis means that when `filename` appears in the filesystem, it will contain\nall of `contents`. With write_string_to_file, it is possible for the file\nto appear in the filesystem with `contents` only partially written.\n\nAccomplished by writing to a temp file and then renaming it.\n\nArgs:\nfilename: string, pathname for a file\ncontents: string, contents that need to be written to the file\noverwrite: boolean, if false it's an error for `filename` to be occupied by\nan existing file.", "source": "github-repos"}
{"code": "def baseline_optimizer_arguments(self, states, internals, reward):\n        \n        arguments = dict(\n            time=self.global_timestep,\n            variables=self.baseline.get_variables(),\n            arguments=dict(\n                states=states,\n                internals=internals,\n                reward=reward,\n                update=tf.constant(value=True),\n            ),\n            fn_reference=self.baseline.reference,\n            fn_loss=self.fn_baseline_loss,\n            \n        )\n        if self.global_model is not None:\n            arguments['global_variables'] = self.global_model.baseline.get_variables()\n        return arguments", "docstring": "Returns the baseline optimizer arguments including the time, the list of variables to\noptimize, and various functions which the optimizer might require to perform an update\nstep.\n\nArgs:\nstates: Dict of state tensors.\ninternals: List of prior internal state tensors.\nreward: Reward tensor.\n\nReturns:\nBaseline optimizer arguments as dict.", "source": "juraj-google-style"}
{"code": "def resetAndRejoin(self, timeout):\n        \n        print '%s call resetAndRejoin' % self.port\n        print timeout\n        try:\n            self._sendline('reset')\n            self.isPowerDown = True\n            time.sleep(timeout)\n\n            if self.deviceRole == Thread_Device_Role.SED:\n                self.setPollingRate(self.sedPollingRate)\n\n            self.__startOpenThread()\n            time.sleep(3)\n\n            if self.__sendCommand('state')[0] == 'disabled':\n                print '[FAIL] reset and rejoin'\n                return False\n            return True\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger(\"resetAndRejoin() Error: \" + str(e))", "docstring": "reset and join back Thread Network with a given timeout delay\n\nArgs:\ntimeout: a timeout interval before rejoin Thread Network\n\nReturns:\nTrue: successful to reset and rejoin Thread Network\nFalse: fail to reset and rejoin the Thread Network", "source": "juraj-google-style"}
{"code": "def get_torque_state(self):\n    data = []\n    data.append(9)\n    data.append(self.servoid)\n    data.append(RAM_READ_REQ)\n    data.append(TORQUE_CONTROL_RAM)\n    data.append(BYTE2)\n    send_data(data)\n    rxdata = []\n    try:\n        rxdata = SERPORT.read(13)\n        return bool(ord(rxdata[9]))\n    except HerkulexError:\n        raise HerkulexError('could not communicate with motors')", "docstring": "get the torque state of motor\n\nReturns:\nbool: True if torque is enabled, else False", "source": "codesearchnet"}
{"code": "def teredo(self):\n    if ((self._ip >> 96) != 536936448):\n        return None\n    return (IPv4Address(((self._ip >> 64) & 4294967295)), IPv4Address(((~ self._ip) & 4294967295)))", "docstring": "Tuple of embedded teredo IPs.\n\nReturns:\nTuple of the (server, client) IPs or None if the address\ndoesn't appear to be a teredo address (doesn't start with\n2001::/32)", "source": "codesearchnet"}
{"code": "def set_presence(self, state=None, status=None, priority=None):\n        \n        state = state if state is not None else self.state\n        status = status if status is not None else self.status\n        priority = priority if priority is not None else self.priority\n        self.presenceserver.set_presence(state, status, priority)", "docstring": "Change the presence broadcast by the client.\nIf the client is currently connected, the new presence is broadcast immediately.\n\nArgs:\nstate(aioxmpp.PresenceState, optional): New presence state to broadcast (Default value = None)\nstatus(dict or str, optional): New status information to broadcast (Default value = None)\npriority (int, optional): New priority for the resource (Default value = None)", "source": "juraj-google-style"}
{"code": "def _mac(model, obs, h):\n    (B, n_agents) = (obs.size(0), obs.size(1))\n    obs_flat = obs.reshape([(B * n_agents), (- 1)])\n    h_flat = [s.reshape([(B * n_agents), (- 1)]) for s in h]\n    (q_flat, _, _, h_flat) = model.forward({'obs': obs_flat}, h_flat)\n    return (q_flat.reshape([B, n_agents, (- 1)]), [s.reshape([B, n_agents, (- 1)]) for s in h_flat])", "docstring": "Forward pass of the multi-agent controller.\n\nArguments:\nmodel: TorchModel class\nobs: Tensor of shape [B, n_agents, obs_size]\nh: List of tensors of shape [B, n_agents, h_size]\n\nReturns:\nq_vals: Tensor of shape [B, n_agents, n_actions]\nh: Tensor of shape [B, n_agents, h_size]", "source": "codesearchnet"}
{"code": "def authenticate(self, code: str) -> 'Preston':\n    headers = self._get_authorization_headers()\n    data = {'grant_type': 'authorization_code', 'code': code}\n    r = self.session.post(self.TOKEN_URL, headers=headers, data=data)\n    if (not (r.status_code == 200)):\n        raise Exception(f'Could not authenticate, got repsonse code {r.status_code}')\n    new_kwargs = dict(self._kwargs)\n    response_data = r.json()\n    new_kwargs['access_token'] = response_data['access_token']\n    new_kwargs['access_expiration'] = (time.time() + float(response_data['expires_in']))\n    new_kwargs['refresh_token'] = response_data['refresh_token']\n    return Preston(**new_kwargs)", "docstring": "Authenticates using the code from the EVE SSO.\n\nA new Preston object is returned; this object is not modified.\n\nThe intended usage is:\n\nauth = preston.authenticate('some_code_here')\n\nArgs:\ncode: SSO code\n\nReturns:\nnew Preston, authenticated", "source": "codesearchnet"}
{"code": "def create(self, path, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO) -> BinaryIO:\n    raise NotImplementedError", "docstring": "Returns a write channel for the given file path.\n\nArgs:\npath: string path of the file object to be written to the system\nmime_type: MIME type to specify the type of content in the file object\ncompression_type: Type of compression to be used for this object\n\nReturns: file handle with a close function for the user to use", "source": "github-repos"}
{"code": "def sign(x):\n    if any_symbolic_tensors((x,)):\n        return Sign().symbolic_call(x)\n    return backend.numpy.sign(x)", "docstring": "Returns a tensor with the signs of the elements of `x`.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput tensor of same shape as `x`.", "source": "github-repos"}
{"code": "def report_schema(headers):\n    schema = []\n    for header_name in headers:\n        header_sanitized = column_header_sanitize(header_name)\n        header_type = DCM_Field_Lookup.get(header_sanitized)\n        if header_type is None:\n            for field_name, field_type in DCM_Field_Lookup.items():\n                if header_sanitized.endswith('_' + field_name):\n                    header_type = field_type\n                    break\n        if header_type is None:\n            header_type = 'STRING'\n        schema.append({'name': header_sanitized, 'type': header_type, 'mode': 'NULLABLE'})\n    return schema", "docstring": "Helper to determine the schema of a given set of report headers.\n\nUsing a match table generated from the DCM proto, each report header is\nmatched\nto its type and a schema is assembled. If not found defaults to STRING.\n\nUsage example:\n\n```\nfilename, report = report_file(...)\nrows = report_to_rows(report)\nrows = report_clean(rows)\nschema = report_schema(next(rows))\n```\n\nArgs:\n* headers: (list) First row of a report.\n\nReturns:\n* JSON schema definition.", "source": "github-repos"}
{"code": "def uninstalled(name):\n    ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''}\n    old = __salt__['flatpak.is_installed'](name)\n    if (not old):\n        ret['comment'] = 'Package {0} is not installed'.format(name)\n        ret['result'] = True\n        return ret\n    else:\n        if __opts__['test']:\n            ret['comment'] = 'Package {0} would have been uninstalled'.format(name)\n            ret['changes']['old'] = old[0]['version']\n            ret['changes']['new'] = None\n            ret['result'] = None\n            return ret\n        __salt__['flatpak.uninstall'](name)\n        if (not __salt__['flatpak.is_installed'](name)):\n            ret['comment'] = 'Package {0} uninstalled'.format(name)\n            ret['changes']['old'] = old[0]['version']\n            ret['changes']['new'] = None\n            ret['result'] = True\n            return ret", "docstring": "Ensure that the named package is not installed.\n\nArgs:\nname (str): The flatpak package.\n\nReturns:\ndict: The ``result`` and ``output``.\n\nExample:\n\n.. code-block:: yaml\n\nuninstall_package:\nflatpack.uninstalled:\n- name: gimp", "source": "codesearchnet"}
{"code": "def save_state_regularly(self, fname, frequency=600):\n    self.save_state(fname)\n    loop = asyncio.get_event_loop()\n    self.save_state_loop = loop.call_later(frequency, self.save_state_regularly, fname, frequency)", "docstring": "Save the state of node with a given regularity to the given\nfilename.\n\nArgs:\nfname: File name to save retularly to\nfrequency: Frequency in seconds that the state should be saved.\nBy default, 10 minutes.", "source": "codesearchnet"}
{"code": "def __init__(self, context):\n    \n    self._histograms_plugin = histograms_plugin.HistogramsPlugin(context)\n    self._multiplexer = context.multiplexer", "docstring": "Instantiates DistributionsPlugin via TensorBoard core.\n\nArgs:\ncontext: A base_plugin.TBContext instance.", "source": "juraj-google-style"}
{"code": "def _is_every_steps(self, phase_step, batch, every):\n    if (not every):\n        return False\n    covered_steps = range(phase_step, (phase_step + batch))\n    return any(((((step + 1) % every) == 0) for step in covered_steps))", "docstring": "Determine whether a periodic event should happen at this step.\n\nArgs:\nphase_step: The incrementing step.\nbatch: The number of steps progressed at once.\nevery: The interval of the period.\n\nReturns:\nBoolean of whether the event should happen.", "source": "codesearchnet"}
{"code": "def _response_good(self, respond):\n        \n        if respond.status_code != requests.codes.ok:\n            log.warning('Got a {} code response to {}: {}'.format(\n                respond.status_code,\n                respond.url,\n                respond.text))\n            if respond.status_code in self.errorsNotRetry:\n              raise ApiError(usr_msg='Got a {} code response to {}: {}'.format(\n                respond.status_code,\n                respond.url,\n                respond.text))\n            else:\n              return self._parse_response(respond)\n        try:\n            if (str(respond.headers['content-type']).startswith(\"text/html;\")):\n                self.result = respond.text\n                return True\n            else:\n                self.result = respond.json()\n        except (json.JSONDecodeError, ValueError):\n            usr_msg = 'device server returned unexpected http response'\n            dev_msg = usr_msg + ': ' + respond.text\n            raise ApiError(usr_msg=usr_msg, dev_msg=dev_msg)\n        if not isinstance(self.result, (list, dict)):\n            msg = ('JSON not a list or dict: url: {0},'\n                   'status: {1}, reason: {2}, text: {3}')\n            raise ApiError(\n                usr_msg=msg.format(respond.url,\n                                   respond.status_code,\n                                   respond.reason, respond.text))\n        if ('error' not in self.result or\n                ('status' not in self.result['error'] or\n                 self.result['error']['status'] != 400)):\n            return True\n        else:\n            log.warning(\"Got a 400 code JSON response to %s\", respond.url)\n            return False", "docstring": "check response\n\nArgs:\nrespond (str): HTTP response.\n\nReturns:\nbool: True if the response is good, else False.\n\nRaises:\nApiError: response isn't formatted properly.", "source": "juraj-google-style"}
{"code": "def get_cols_to_keep(gctoo, cid=None, col_bool=None, cidx=None, exclude_cid=None):\n    if (cid is not None):\n        assert (type(cid) == list), 'cid must be a list. cid: {}'.format(cid)\n        cols_to_keep = [gctoo_col for gctoo_col in gctoo.data_df.columns if (gctoo_col in cid)]\n        num_missing_cids = (len(cid) - len(cols_to_keep))\n        if (num_missing_cids != 0):\n            logger.info('{} cids were not found in the GCT.'.format(num_missing_cids))\n    elif (col_bool is not None):\n        assert (len(col_bool) == gctoo.data_df.shape[1]), ('col_bool must have length equal to gctoo.data_df.shape[1]. ' + 'len(col_bool): {}, gctoo.data_df.shape[1]: {}'.format(len(col_bool), gctoo.data_df.shape[1]))\n        cols_to_keep = gctoo.data_df.columns[col_bool].values\n    elif (cidx is not None):\n        assert (type(cidx[0]) is int), ('cidx must be a list of integers. cidx[0]: {}, ' + 'type(cidx[0]): {}').format(cidx[0], type(cidx[0]))\n        assert (max(cidx) <= gctoo.data_df.shape[1]), ('cidx contains an integer larger than the number of columns in ' + 'the GCToo. max(cidx): {}, gctoo.data_df.shape[1]: {}').format(max(cidx), gctoo.data_df.shape[1])\n        cols_to_keep = gctoo.data_df.columns[cidx].values\n    else:\n        cols_to_keep = gctoo.data_df.columns.values\n    if (exclude_cid is not None):\n        cols_to_keep = [col_to_keep for col_to_keep in cols_to_keep if (col_to_keep not in exclude_cid)]\n    return cols_to_keep", "docstring": "Figure out based on the possible columns inputs which columns to keep.\n\nArgs:\ngctoo (GCToo object):\ncid (list of strings):\ncol_bool (boolean array):\ncidx (list of integers):\nexclude_cid (list of strings):\n\nReturns:\ncols_to_keep (list of strings): col ids to be kept", "source": "codesearchnet"}
{"code": "def Environ(variable, default):\n  \n  precondition.AssertType(variable, Text)\n\n  value = os.environ.get(variable, default)\n  if value is None:\n    return default\n  if PY2:\n    \n    value = value.decode(\"utf-8\")  \n  return value", "docstring": "A wrapper for `os.environ.get` that works the same way in both Pythons.\n\nArgs:\nvariable: A name of the variable to get the value of.\ndefault: A default value to return in case no value for the given variable\nis set.\n\nReturns:\nAn environment value of the given variable.", "source": "juraj-google-style"}
{"code": "def update_state(self, state_arr, action_arr):\n        \n        x, y = np.where(action_arr[-1] == 1)\n        self.__agent_pos = (x[0], y[0])\n        self.__route_memory_list.append((x[0], y[0]))\n        self.__route_long_memory_list.append((x[0], y[0]))\n        self.__route_long_memory_list = list(set(self.__route_long_memory_list))\n        while len(self.__route_memory_list) > self.__memory_num:\n            self.__route_memory_list = self.__route_memory_list[1:]\n\n        return self.extract_now_state()", "docstring": "Update state.\n\nOverride.\n\nArgs:\nstate_arr:    `np.ndarray` of state in `self.t`.\naction_arr:   `np.ndarray` of action in `self.t`.\n\nReturns:\n`np.ndarray` of state in `self.t+1`.", "source": "juraj-google-style"}
{"code": "class NotebookTrainingTracker(NotebookProgressBar):\n\n    def __init__(self, num_steps, column_names=None):\n        super().__init__(num_steps)\n        self.inner_table = None if column_names is None else [column_names]\n        self.child_bar = None\n\n    def display(self):\n        self.html_code = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)\n        if self.inner_table is not None:\n            self.html_code += text_to_html_table(self.inner_table)\n        if self.child_bar is not None:\n            self.html_code += self.child_bar.html_code\n        if self.output is None:\n            self.output = disp.display(disp.HTML(self.html_code), display_id=True)\n        else:\n            self.output.update(disp.HTML(self.html_code))\n\n    def write_line(self, values):\n        \n        if self.inner_table is None:\n            self.inner_table = [list(values.keys()), list(values.values())]\n        else:\n            columns = self.inner_table[0]\n            for key in values.keys():\n                if key not in columns:\n                    columns.append(key)\n            self.inner_table[0] = columns\n            if len(self.inner_table) > 1:\n                last_values = self.inner_table[-1]\n                first_column = self.inner_table[0][0]\n                if last_values[0] != values[first_column]:\n                    self.inner_table.append([values[c] if c in values else 'No Log' for c in columns])\n                else:\n                    new_values = values\n                    for c in columns:\n                        if c not in new_values.keys():\n                            new_values[c] = last_values[columns.index(c)]\n                    self.inner_table[-1] = [new_values[c] for c in columns]\n            else:\n                self.inner_table.append([values[c] for c in columns])\n\n    def add_child(self, total, prefix=None, width=300):\n        \n        self.child_bar = NotebookProgressBar(total, prefix=prefix, parent=self, width=width)\n        return self.child_bar\n\n    def remove_child(self):\n        \n        self.child_bar = None\n        self.display()", "docstring": "An object tracking the updates of an ongoing training with progress bars and a nice table reporting metrics.\n\nArgs:\nnum_steps (`int`): The number of steps during training. column_names (`List[str]`, *optional*):\nThe list of column names for the metrics table (will be inferred from the first call to\n[`~utils.notebook.NotebookTrainingTracker.write_line`] if not set).", "source": "github-repos"}
{"code": "def setMood(self, mood):\n    self.conn('POST', '{0}/users/{1}/profile/partial'.format(SkypeConnection.API_USER, self.userId), auth=SkypeConnection.Auth.SkypeToken, json={'payload': {'mood': (mood or '')}})\n    self.user.mood = (SkypeUser.Mood(plain=mood) if mood else None)", "docstring": "Update the activity message for the current user.\n\nArgs:\nmood (str): new mood message", "source": "codesearchnet"}
{"code": "def node_filter(self, name, **kwargs):\n        \n\n        def decorator(func):\n            self.filters[name] = NodeFilter(name, func, **kwargs)\n\n        return decorator", "docstring": "Returns a decorator function for adding a node filter.\n\nArgs:\nname (str): The name of the filter.\n**kwargs: Variable keyword arguments for the filter.\n\nReturns:\nCallable[[Callable[[Element, Any], bool]]]: A decorator function for adding a node\nfilter.", "source": "juraj-google-style"}
{"code": "def forward(self, inputs, expert_size):\n    input_list = inputs.split(expert_size, dim=0)\n    output_list = []\n    for i in range(self.num_experts):\n        output_list.append(F.linear(input_list[i], self.weight[i]))\n    results = torch.cat(output_list, dim=0)\n    return results", "docstring": "Forward pass of the GraniteMoeSharedParallelExperts module.\n\nArgs:\ninputs (Tensor):\nInput tensor.\nexpert_size:\nExpert size information.\n\nReturns:\nTensor: Output tensor.", "source": "github-repos"}
{"code": "def _sync_call(self, func):\n        \n\n        @functools.wraps(func)\n        def wrapper(*args, **kwargs):\n            if self._start_msg:\n                self._start_print()\n            result = func(*args, **kwargs)\n            if self._end_msg:\n                print(self._end_msg)\n            return result\n\n        setattr(wrapper, ANNOTATED, True)\n        return wrapper", "docstring": "__call__ function for regular synchronous functions.\n\nArgs:\nfunc: The annotated function.\nargs: Arguments for func.\nkwargs: Keyword arguments for func.", "source": "juraj-google-style"}
{"code": "def get_data_excel_xml(file_name, file_contents=None, on_demand=False):\n    \n    \n    if file_contents:\n        xml_file = BytesIO(file_contents)\n    else:\n        xml_file = file_name\n    book = xmlparse.ParseExcelXMLFile(xml_file)\n    row_builder = lambda s, r: list(s.row_values(r))\n    return [XMLSheetYielder(book, index, row_builder) for index in range(len(book))]", "docstring": "Loads xml excel format files.\n\nArgs:\nfile_name: The name of the local file, or the holder for the\nextension type when the file_contents are supplied.\nfile_contents: The file-like object holding contents of file_name.\nIf left as None, then file_name is directly loaded.\non_demand: Requests that a yielder be used in place of a full data\ncopy (will be ignored).", "source": "juraj-google-style"}
{"code": "def GenerateLibSig(short_name):\n  \n  with _UTILITY_LOCK:\n    utilities_used = ', '.join([utility for utility\n                                in sorted(_utility_registry)])\n    _utility_registry.Clear()\n\n  if utilities_used:\n    return ' (%s, %s, %s, %s)' % (short_name, _COMMON_LIB_SIG, _PYTHON_VERSION,\n                                  utilities_used)\n  else:\n    return ' (%s, %s, %s)' % (short_name, _COMMON_LIB_SIG, _PYTHON_VERSION)", "docstring": "Generates a library signature suitable for a user agent field.\n\nArgs:\nshort_name: The short, product-specific string name for the library.\nReturns:\nA library signature string to append to user-supplied user-agent value.", "source": "juraj-google-style"}
{"code": "def merge(self, other_cluster):\n    new_cluster = Cluster((self.sites | other_cluster.sites))\n    new_cluster.neighbours = (self.neighbours | other_cluster.neighbours).difference(new_cluster.sites)\n    return new_cluster", "docstring": "Combine two clusters into a single cluster.\n\nArgs:\nother_cluster (Cluster): The second cluster to combine.\n\nReturns:\n(Cluster):   The combination of both clusters.", "source": "codesearchnet"}
{"code": "def ndtri(p, name='ndtri'):\n    with tf.name_scope(name):\n        p = tf.convert_to_tensor(value=p, name='p')\n        if (dtype_util.as_numpy_dtype(p.dtype) not in [np.float32, np.float64]):\n            raise TypeError(('p.dtype=%s is not handled, see docstring for supported types.' % p.dtype))\n        return _ndtri(p)", "docstring": "The inverse of the CDF of the Normal distribution function.\n\nReturns x such that the area under the pdf from minus infinity to x is equal\nto p.\n\nA piece-wise rational approximation is done for the function.\nThis is a port of the implementation in netlib.\n\nArgs:\np: `Tensor` of type `float32`, `float64`.\nname: Python string. A name for the operation (default=\"ndtri\").\n\nReturns:\nx: `Tensor` with `dtype=p.dtype`.\n\nRaises:\nTypeError: if `p` is not floating-type.", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    filename = parser_mediator.GetFilename()\n\n    if (not self._CACHE_FILENAME_RE.match(filename) and\n        not filename.startswith('_CACHE_00')):\n      raise errors.UnableToParseFile('Not a Firefox cache1 file.')\n\n    display_name = parser_mediator.GetDisplayName()\n    firefox_config = self._GetFirefoxConfig(file_object, display_name)\n\n    file_object.seek(firefox_config.first_record_offset)\n\n    while file_object.get_offset() < file_object.get_size():\n      try:\n        self._ParseCacheEntry(\n            parser_mediator, file_object, display_name,\n            firefox_config.block_size)\n\n      except IOError:\n        file_offset = file_object.get_offset() - self._MINIMUM_BLOCK_SIZE\n        logger.debug((\n            '[{0:s}] Invalid cache record in file: {1:s} at offset: '\n            '{2:d}.').format(self.NAME, display_name, file_offset))", "docstring": "Parses a Firefox cache file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "async def set(self, name, valu, init=False):\n        \n        with s_editatom.EditAtom(self.snap.core.bldgbuids) as editatom:\n            retn = await self._setops(name, valu, editatom, init)\n            if not retn:\n                return False\n            await editatom.commit(self.snap)\n            return True", "docstring": "Set a property on the node.\n\nArgs:\nname (str): The name of the property.\nvalu (obj): The value of the property.\ninit (bool): Set to True to disable read-only enforcement\n\nReturns:\n(bool): True if the property was changed.", "source": "juraj-google-style"}
{"code": "def enable_napps(cls, napps):\n        \n        mgr = NAppsManager()\n        for napp in napps:\n            mgr.set_napp(*napp)\n            LOG.info('NApp %s:', mgr.napp_id)\n            cls.enable_napp(mgr)", "docstring": "Enable a list of NApps.\n\nArgs:\nnapps (list): List of NApps.", "source": "juraj-google-style"}
{"code": "def parse_longitude(longitude, hemisphere):\n    \n    longitude = int(longitude[:3]) + float(longitude[3:]) / 60\n    if hemisphere == 'W':\n        longitude = -longitude\n    elif not hemisphere == 'E':\n        raise ValueError('Incorrect North/South value %r' % hemisphere)\n    return longitude", "docstring": "Parse a NMEA-formatted longitude pair.\n\nArgs:\nlongitude (str): Longitude in DDDMM.MMMM\nhemisphere (str): East or West\n\nReturns:\nfloat: Decimal representation of longitude", "source": "juraj-google-style"}
{"code": "def usergroups_disable(self, *, usergroup: str, **kwargs) -> SlackResponse:\n        \n        self._validate_xoxp_token()\n        kwargs.update({\"usergroup\": usergroup})\n        return self.api_call(\"usergroups.disable\", json=kwargs)", "docstring": "Disable an existing User Group\n\nArgs:\nusergroup (str): The encoded ID of the User Group to disable.\ne.g. 'S0604QSJC'", "source": "juraj-google-style"}
{"code": "def write_fasta_file(seq_records, outname, outdir=None, outext='.faa', force_rerun=False):\n    \n\n    if not outdir:\n        outdir = ''\n    outfile = ssbio.utils.outfile_maker(inname='', outname=outname, outdir=outdir, outext=outext)\n\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n        SeqIO.write(seq_records, outfile, \"fasta\")\n\n    return outfile", "docstring": "Write a FASTA file for a SeqRecord or a list of SeqRecord objects.\n\nArgs:\nseq_records (SeqRecord, list): SeqRecord or a list of SeqRecord objects\noutname: Name of the output file which will have outext appended to it\noutdir: Path to directory to output sequences to\noutext: Extension of FASTA file, default \".faa\"\nforce_rerun: If file should be overwritten if it exists\n\nReturns:\nstr: Path to output FASTA file.", "source": "juraj-google-style"}
{"code": "def _instruction_list(self, filters):\n    return '\\n\\n'.join(([self.INSTRUCTIONS.strip(), '*Supported methods:*', 'If you send \"@{}: help\" to me I reply with these instructions.'.format(self.user), 'If you send \"@{}: version\" to me I reply with my current version.'.format(self.user)] + [filter.description() for filter in filters]))", "docstring": "Generates the instructions for a bot and its filters.\n\nNote:\nThe guidance for each filter is generated by combining the\ndocstrings of the predicate filter and resulting dispatch\nfunction with a single space between. The class's\n:py:attr:`INSTRUCTIONS` and the default help command are\nadded.\n\nArguments:\nfilters (:py:class:`list`): The filters to apply to incoming\nmessages.\n\nReturns:\n:py:class:`str`: The bot's instructions.", "source": "codesearchnet"}
{"code": "def append_flag_values(self, flag_values):\n    \n    for flag_name, flag in six.iteritems(flag_values._flags()):  \n      \n      \n      \n      \n      \n      if flag_name == flag.name:\n        try:\n          self[flag_name] = flag\n        except _exceptions.DuplicateFlagError:\n          raise _exceptions.DuplicateFlagError.from_flag(\n              flag_name, self, other_flag_values=flag_values)", "docstring": "Appends flags registered in another FlagValues instance.\n\nArgs:\nflag_values: FlagValues, the FlagValues instance from which to copy flags.", "source": "juraj-google-style"}
{"code": "def listTemplates(data={}):\n    conn = Qubole.agent()\n    url_path = Template.rest_entity_path\n    page_attr = []\n    if (('page' in data) and (data['page'] is not None)):\n        page_attr.append(('page=%s' % data['page']))\n    if (('per_page' in data) and (data['per_page'] is not None)):\n        page_attr.append(('per_page=%s' % data['per_page']))\n    if page_attr:\n        url_path = ('%s?%s' % (url_path, '&'.join(page_attr)))\n    return conn.get(url_path)", "docstring": "Fetch existing Templates details.\n\nArgs:\n`data`: dictionary containing the value of page number and per-page value\nReturns:\nDictionary containing paging_info and command_templates details", "source": "codesearchnet"}
{"code": "def clear_errors():\n    data = []\n    data.append(11)\n    data.append(BROADCAST_ID)\n    data.append(RAM_WRITE_REQ)\n    data.append(STATUS_ERROR_RAM)\n    data.append(BYTE2)\n    data.append(0)\n    data.append(0)\n    send_data(data)", "docstring": "Clears the errors register of all Herkulex servos\n\nArgs:\nnone", "source": "codesearchnet"}
{"code": "def s3_download(url, dst):\n    url = parse.urlparse(url)\n    if (url.scheme != 's3'):\n        raise ValueError((\"Expecting 's3' scheme, got: %s in %s\" % (url.scheme, url)))\n    (bucket, key) = (url.netloc, url.path.lstrip('/'))\n    region = os.environ.get('AWS_REGION', os.environ.get(_params.REGION_NAME_ENV))\n    s3 = boto3.resource('s3', region_name=region)\n    s3.Bucket(bucket).download_file(key, dst)", "docstring": "Download a file from S3.\n\nArgs:\nurl (str): the s3 url of the file.\ndst (str): the destination where the file will be saved.", "source": "codesearchnet"}
{"code": "def __init__(self, scope, parent, name):\n        \n        CodeStatement.__init__(self, scope, parent)\n        self.name = name\n        self.condition = True\n        self.body = CodeBlock(scope, self, explicit=False)", "docstring": "Constructor for control flow structures.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.\nname (str): The name of the control flow statement in the program.", "source": "juraj-google-style"}
{"code": "def _update_libdata(self, line):\n        \n        \n        \n        \n        \n        \n        if re.match('^Comment.*$', line, re.IGNORECASE):\n            comments = re.findall('\"([^\"]*)\"', line)\n            for c in comments:\n                self._parse_meta_info(c)\n                self._parse_compound_info(c)\n\n        \n        \n        \n        \n        \n        self._parse_meta_info(line)\n        self._parse_compound_info(line)\n\n        \n        \n        \n        \n        \n        \n        if self.collect_meta and (re.match('^Num Peaks(.*)$', line, re.IGNORECASE) or re.match('^PK\\$PEAK:(.*)', line,\n                re.IGNORECASE) or re.match('^PK\\$ANNOTATION(.*)', line, re.IGNORECASE)):\n\n            self._store_compound_info()\n\n            self._store_meta_info()\n\n            \n            self.meta_info = get_blank_dict(self.meta_regex)\n            self.compound_info = get_blank_dict(self.compound_regex)\n            self.other_names = []\n            self.collect_meta = False\n\n        \n        if re.match('^PK\\$PEAK: m/z int\\. rel\\.int\\.$', line, re.IGNORECASE):\n            self.ignore_additional_spectra_info = True\n\n        \n        if re.match('^Num Peaks(.*)$', line, re.IGNORECASE) or re.match('^PK\\$PEAK:(.*)', line, re.IGNORECASE):\n            self.start_spectra = True\n            return\n        elif re.match('^PK\\$ANNOTATION(.*)', line, re.IGNORECASE):\n            self.start_spectra_annotation = True\n\n            match = re.match('^PK\\$ANNOTATION:(.*)', line, re.IGNORECASE)\n            columns = match.group(1)\n            cl = columns.split()\n\n            self.spectra_annotation_indexes = {i: cl.index(i) for i in cl}\n            return\n\n        \n        \n        \n        \n        if self.start_spectra_annotation:\n            self._parse_spectra_annotation(line)\n\n        \n        \n        \n        if self.start_spectra:\n            self._parse_spectra(line)", "docstring": "Update the library meta data from the current line being parsed\n\nArgs:\nline (str): The current line of the of the file being parsed", "source": "juraj-google-style"}
{"code": "def add_time(data):\n    \n    payload = data['data']\n    updated = data['updated'].date()\n    if updated == date.today():\n        payload['last_updated'] = data['updated'].strftime('today at %H:%M:%S')\n    elif updated >= (date.today() - timedelta(days=1)):\n        payload['last_updated'] = 'yesterday'\n    elif updated >= (date.today() - timedelta(days=7)):\n        payload['last_updated'] = updated.strftime('on %A')\n    else:\n        payload['last_updated'] = updated.strftime('%Y-%m-%d')\n    return payload", "docstring": "And a friendly update time to the supplied data.\n\nArguments:\ndata (:py:class:`dict`): The response data and its update time.\n\nReturns:\n:py:class:`dict`: The data with a friendly update time.", "source": "juraj-google-style"}
{"code": "def _UpdateUserGroups(self, user, groups):\n    groups = ','.join(groups)\n    self.logger.debug('Updating user %s with groups %s.', user, groups)\n    command = self.usermod_cmd.format(user=user, groups=groups)\n    try:\n        subprocess.check_call(command.split(' '))\n    except subprocess.CalledProcessError as e:\n        self.logger.warning('Could not update user %s. %s.', user, str(e))\n        return False\n    else:\n        self.logger.debug('Updated user account %s.', user)\n        return True", "docstring": "Update group membership for a Linux user.\n\nArgs:\nuser: string, the name of the Linux user account.\ngroups: list, the group names to add the user as a member.\n\nReturns:\nbool, True if user update succeeded.", "source": "codesearchnet"}
{"code": "def record(self, flat_outputs, inference_args, input_tangents):\n    backward_function, to_record = self._wrap_backward_function(self._forward_graph, self._backward, flat_outputs)\n    if self._forwardprop_output_indices:\n        record.record_operation_backprop_only(self._forward.cached_definition.signature.name, to_record, inference_args, backward_function)\n        record.record_operation_forwardprop_only(self._forward.cached_definition.signature.name, flat_outputs, inference_args + input_tangents, backward_function, self._forwardprop_output_indices)\n    else:\n        record.record_operation(self._forward.cached_definition.signature.name, to_record, inference_args + input_tangents, backward_function)", "docstring": "Record the function call operation.\n\nFor backprop, indicates the backward function to use and which new Tensors\nmust be watched. For forwardprop from eager, the function call itself will\nhave produced tangents which need to be recorded.\n\nArgs:\nflat_outputs: The result of running `forward`.\ninference_args: A flat list of Tensors with inference inputs to the\noperation.\ninput_tangents: A flat list of Tensors with input tangents consumed by the\noperation.", "source": "github-repos"}
{"code": "def list_vm_images_sub(access_token, subscription_id):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/providers/Microsoft.Compute/images',\n                        '?api-version=', COMP_API])\n    return do_get_next(endpoint, access_token)", "docstring": "List VM images in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body of a list of VM images.", "source": "juraj-google-style"}
{"code": "def __init__(self, sid=None):\n        \n        self.final = False\n        self.initial = False\n        self.stateid = sid\n        self.arcs = []", "docstring": "Initialization function\nArgs:\nsid (int): The state identifier\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def get_first(self, status):\n        \n\n        items = self.get_all(status)\n\n        if items:\n            return list(items.items())[0][1]\n\n        return None", "docstring": "Get the first item in the queue that has the given status.\n\nArgs:\nstatus (str): return the first item with this status.\n\nReturns:\n:class:`nyawc.QueueItem`: The first queue item with the given status.", "source": "juraj-google-style"}
{"code": "def _maxSizeCheck(cls, obj):\n        \n        fail = False\n        size = 0\n\n        if isinstance(obj, numbers.Number):\n            if obj > constants.MAX_FRAME_SIZE:\n                fail = True\n                size = obj\n\n        elif hasattr(obj, '__len__'):\n            size = len(obj)\n            fail = size > constants.MAX_FRAME_SIZE\n\n        if fail:\n            raise MaxSizeException('Frame size %s > %s (MAX_FRAME_SIZE)' \\\n                    % (size, constants.MAX_FRAME_SIZE))", "docstring": "Raise a MaxSizeException if ``obj`` exceeds MAX_FRAME_SIZE\n\nArgs:\nobj (numbers.Number or collection):\n\nRaises:\n:class:`fileseq.exceptions.MaxSizeException`:", "source": "juraj-google-style"}
{"code": "def accountValues(self, account: str = '') -> List[AccountValue]:\n        \n        if account:\n            return [v for v in self.wrapper.accountValues.values()\n                    if v.account == account]\n        else:\n            return list(self.wrapper.accountValues.values())", "docstring": "List of account values for the given account,\nor of all accounts if account is left blank.\n\nArgs:\naccount: If specified, filter for this account name.", "source": "juraj-google-style"}
{"code": "def generate(self, model, outfolder, *, exclude=None):\n    with pythonic_names():\n        super().generate(model, outfolder)\n        check_dependency = (self.with_dependencies and model.eResource)\n        if check_dependency:\n            if (exclude is None):\n                exclude = set()\n            resource = model.eResource\n            exclude.add(resource)\n            rset = resource.resource_set\n            direct_resources = {r for r in rset.resources.values() if (r not in exclude)}\n            for resource in direct_resources:\n                self.generate(resource.contents[0], outfolder, exclude=exclude)", "docstring": "Generate model code.\n\nArgs:\nmodel: The meta-model to generate code for.\noutfolder: Path to the directoty that will contain the generated code.\nexclude: List of referenced resources for which code was already generated\n(to prevent regeneration).", "source": "codesearchnet"}
{"code": "def from_steps(step1, step2, normalization_els):\n        \n        working_ion_entry = step1[\"element_reference\"]\n        working_ion = working_ion_entry.composition.elements[0].symbol\n        working_ion_valence = max(Element(working_ion).oxidation_states)\n        voltage = (-step1[\"chempot\"] + working_ion_entry.energy_per_atom)/working_ion_valence\n        mAh = (step2[\"evolution\"] - step1[\"evolution\"]) \\\n            * Charge(1, \"e\").to(\"C\") * Time(1, \"s\").to(\"h\") * N_A * 1000*working_ion_valence\n        licomp = Composition(working_ion)\n        prev_rxn = step1[\"reaction\"]\n        reactants = {comp: abs(prev_rxn.get_coeff(comp))\n                     for comp in prev_rxn.products if comp != licomp}\n\n        curr_rxn = step2[\"reaction\"]\n        products = {comp: abs(curr_rxn.get_coeff(comp))\n                    for comp in curr_rxn.products if comp != licomp}\n\n        reactants[licomp] = (step2[\"evolution\"] - step1[\"evolution\"])\n\n        rxn = BalancedReaction(reactants, products)\n\n        for el, amt in normalization_els.items():\n            if rxn.get_el_amount(el) > 1e-6:\n                rxn.normalize_to_element(el, amt)\n                break\n\n        prev_mass_dischg = sum([prev_rxn.all_comp[i].weight\n                                * abs(prev_rxn.coeffs[i])\n                                for i in range(len(prev_rxn.all_comp))]) / 2\n        vol_charge = sum([abs(prev_rxn.get_coeff(e.composition))\n                          * e.structure.volume\n                          for e in step1[\"entries\"]\n                          if e.composition.reduced_formula != working_ion])\n        mass_discharge = sum([curr_rxn.all_comp[i].weight\n                              * abs(curr_rxn.coeffs[i])\n                              for i in range(len(curr_rxn.all_comp))]) / 2\n        mass_charge = prev_mass_dischg\n        mass_discharge = mass_discharge\n        vol_discharge = sum([abs(curr_rxn.get_coeff(e.composition))\n                             * e.structure.volume\n                             for e in step2[\"entries\"]\n                             if e.composition.reduced_formula != working_ion])\n\n        totalcomp = Composition({})\n        for comp in prev_rxn.products:\n            if comp.reduced_formula != working_ion:\n                totalcomp += comp * abs(prev_rxn.get_coeff(comp))\n        frac_charge = totalcomp.get_atomic_fraction(Element(working_ion))\n\n        totalcomp = Composition({})\n        for comp in curr_rxn.products:\n            if comp.reduced_formula != working_ion:\n                totalcomp += comp * abs(curr_rxn.get_coeff(comp))\n        frac_discharge = totalcomp.get_atomic_fraction(Element(working_ion))\n\n        rxn = rxn\n        entries_charge = step2[\"entries\"]\n        entries_discharge = step1[\"entries\"]\n\n        return ConversionVoltagePair(rxn, voltage, mAh, vol_charge,\n                                     vol_discharge, mass_charge,\n                                     mass_discharge,\n                                     frac_charge, frac_discharge,\n                                     entries_charge, entries_discharge,\n                                     working_ion_entry)", "docstring": "Creates a ConversionVoltagePair from two steps in the element profile\nfrom a PD analysis.\n\nArgs:\nstep1: Starting step\nstep2: Ending step\nnormalization_els: Elements to normalize the reaction by. To\nensure correct capacities.", "source": "juraj-google-style"}
{"code": "def download_data_impl(self, run, tag, response_format):\n    scalars_plugin_instance = self._get_scalars_plugin()\n    if (not scalars_plugin_instance):\n        raise ValueError('Failed to respond to request for /download_data. The scalars plugin is oddly not registered.')\n    (body, mime_type) = scalars_plugin_instance.scalars_impl(tag, run, None, response_format)\n    return (body, mime_type)", "docstring": "Provides a response for downloading scalars data for a data series.\n\nArgs:\nrun: The run.\ntag: The specific tag.\nresponse_format: A string. One of the values of the OutputFormat enum of\nthe scalar plugin.\n\nRaises:\nValueError: If the scalars plugin is not registered.\n\nReturns:\n2 entities:\n- A JSON object response body.\n- A mime type (string) for the response.", "source": "codesearchnet"}
{"code": "def search_stack_for_localvar(varname):\n    curr_frame = inspect.currentframe()\n    print((' * Searching parent frames for: ' + six.text_type(varname)))\n    frame_no = 0\n    while (curr_frame.f_back is not None):\n        if (varname in curr_frame.f_locals.keys()):\n            print((' * Found in frame: ' + six.text_type(frame_no)))\n            return curr_frame.f_locals[varname]\n        frame_no += 1\n        curr_frame = curr_frame.f_back\n    print((('... Found nothing in all ' + six.text_type(frame_no)) + ' frames.'))\n    return None", "docstring": "Finds a local varable somewhere in the stack and returns the value\n\nArgs:\nvarname (str): variable name\n\nReturns:\nNone if varname is not found else its value", "source": "codesearchnet"}
{"code": "def apply(self, parent_environ=None):\n        \n        interpreter = Python(target_environ=os.environ)\n        executor = self._create_executor(interpreter, parent_environ)\n        self._execute(executor)\n        interpreter.apply_environ()", "docstring": "Apply the context to the current python session.\n\nNote that this updates os.environ and possibly sys.path, if\n`parent_environ` is not provided.\n\nArgs:\nparent_environ: Environment to interpret the context within,\ndefaults to os.environ if None.", "source": "juraj-google-style"}
{"code": "def remove_triple(self, p, o, auto_refresh=True):\n\n\t\t\n\n\t\tself.rdf.graph.remove((self.uri, p, self._handle_object(o)))\n\n\t\t\n\t\tself._handle_triple_refresh(auto_refresh)", "docstring": "remove triple by supplying p,o\n\nArgs:\np (rdflib.term.URIRef): predicate\no (): object\nauto_refresh (bool): whether or not to update object-like self.rdf.triples\n\nReturns:\nNone: removes triple from self.rdf.graph", "source": "juraj-google-style"}
{"code": "class Pooling1D(Layer):\n\n    def __init__(self, pool_function, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs):\n        super(Pooling1D, self).__init__(name=name, **kwargs)\n        if data_format is None:\n            data_format = backend.image_data_format()\n        if strides is None:\n            strides = pool_size\n        self.pool_function = pool_function\n        self.pool_size = conv_utils.normalize_tuple(pool_size, 1, 'pool_size')\n        self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')\n        self.padding = conv_utils.normalize_padding(padding)\n        self.data_format = conv_utils.normalize_data_format(data_format)\n        self.input_spec = InputSpec(ndim=3)\n\n    def call(self, inputs):\n        pad_axis = 2 if self.data_format == 'channels_last' else 3\n        inputs = array_ops.expand_dims(inputs, pad_axis)\n        outputs = self.pool_function(inputs, self.pool_size + (1,), strides=self.strides + (1,), padding=self.padding, data_format=self.data_format)\n        return array_ops.squeeze(outputs, pad_axis)\n\n    def compute_output_shape(self, input_shape):\n        input_shape = tensor_shape.TensorShape(input_shape).as_list()\n        if self.data_format == 'channels_first':\n            steps = input_shape[2]\n            features = input_shape[1]\n        else:\n            steps = input_shape[1]\n            features = input_shape[2]\n        length = conv_utils.conv_output_length(steps, self.pool_size[0], self.padding, self.strides[0])\n        if self.data_format == 'channels_first':\n            return tensor_shape.TensorShape([input_shape[0], features, length])\n        else:\n            return tensor_shape.TensorShape([input_shape[0], length, features])\n\n    def get_config(self):\n        config = {'strides': self.strides, 'pool_size': self.pool_size, 'padding': self.padding, 'data_format': self.data_format}\n        base_config = super(Pooling1D, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))", "docstring": "Pooling layer for arbitrary pooling functions, for 1D inputs.\n\nThis class only exists for code reuse. It will never be an exposed API.\n\nArgs:\npool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`.\npool_size: An integer or tuple/list of a single integer,\nrepresenting the size of the pooling window.\nstrides: An integer or tuple/list of a single integer, specifying the\nstrides of the pooling operation.\npadding: A string. The padding method, either 'valid' or 'same'.\nCase-insensitive.\ndata_format: A string,\none of `channels_last` (default) or `channels_first`.\nThe ordering of the dimensions in the inputs.\n`channels_last` corresponds to inputs with shape\n`(batch, steps, features)` while `channels_first`\ncorresponds to inputs with shape\n`(batch, features, steps)`.\nname: A string, the name of the layer.", "source": "github-repos"}
{"code": "def _create_position_ids_from_inputs_embeds(inputs_embeds: tf.Tensor, past_key_values_length: int, padding_idx: Optional[int]) -> tf.Tensor:\n    input_shape = shape_list(inputs_embeds)[:-1]\n    sequence_length = input_shape[1]\n    position_ids = tf.range(padding_idx + 1, sequence_length + padding_idx + 1, dtype=tf.int64)\n    return tf.broadcast_to(tf.expand_dims(position_ids, axis=0), input_shape) + past_key_values_length", "docstring": "Args:\nWe are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.\ninputs_embeds: tf.Tensor\nReturns: tf.Tensor", "source": "github-repos"}
{"code": "def path_fraction_id_offset(points, fraction, relative_offset=False):\n    \n    if not (0. <= fraction <= 1.0):\n        raise ValueError(\"Invalid fraction: %.3f\" % fraction)\n    pts = np.array(points)[:, COLS.XYZ]\n    lengths = np.linalg.norm(np.diff(pts, axis=0), axis=1)\n    cum_lengths = np.cumsum(lengths)\n    offset = cum_lengths[-1] * fraction\n    seg_id = np.argmin(cum_lengths < offset)\n    if seg_id > 0:\n        offset -= cum_lengths[seg_id - 1]\n    if relative_offset:\n        offset /= lengths[seg_id]\n    return seg_id, offset", "docstring": "Find the segment which corresponds to the fraction\nof the path length along the piecewise linear curve which\nis constructed from the set of points.\n\nArgs:\npoints: an iterable of indexable objects with indices\n0, 1, 2 correspoding to 3D cartesian coordinates\nfraction: path length fraction (0.0 <= fraction <= 1.0)\nrelative_offset: return absolute or relative segment distance\n\nReturns:\n(segment ID, segment offset) pair.", "source": "juraj-google-style"}
{"code": "def __logical_source__(self, map_iri):\n        \n        \n        logical_source = SimpleNamespace()\n        logical_src_bnode = self.rml.value(\n            subject=map_iri,\n            predicate=NS_MGR.rml.logicalSource.rdflib)\n        if logical_src_bnode is None:\n            return\n        logical_source.source = self.rml.value(\n            subject=logical_src_bnode,\n\t        predicate=NS_MGR.rml.source.rdflib)\n        logical_source.reference_formulations = [r for r in self.rml.objects(\n            subject=logical_src_bnode,\n            predicate=NS_MGR.rml.referenceFormulation.rdflib)]\n        logical_source.iterator = self.rml.value(\n            subject=logical_src_bnode,\n            predicate=NS_MGR.rml.iterator.rdflib)\n        query = self.rml.value(\n            subject=logical_src_bnode,\n            predicate=NS_MGR.rml.query.rdflib)\n        json_query = self.rml.value(\n            subject=logical_src_bnode,\n            predicate=NS_MGR.rml.reference.rdflib)\n        json_key = self.rml.value(\n            subject=logical_src_bnode,\n            predicate=NS_MGR.rml.key.rdflib)\n        if query is not None:\n            logical_source.query = query\n        if json_query is not None:\n            self.use_json_qry = True\n            self.default_use_json_qry = True\n            logical_source.json_query = json_query\n            logical_source.json_key = json_key\n        return logical_source", "docstring": "Creates a SimpleNamespace for the TripelMap's logicalSource\n\nArgs:\n\n-----\nmap_iri: URIRef", "source": "juraj-google-style"}
{"code": "def run_inference_on_image(image):\n  \n  if not tf.gfile.Exists(image):\n    tf.logging.fatal('File does not exist %s', image)\n  image_data = tf.gfile.FastGFile(image, 'rb').read()\n\n  \n  create_graph()\n\n  with tf.Session() as sess:\n    \n    \n    \n    \n    \n    \n    \n    \n    softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')\n    predictions = sess.run(softmax_tensor,\n                           {'DecodeJpeg/contents:0': image_data})\n    predictions = np.squeeze(predictions)\n\n    \n    node_lookup = NodeLookup()\n\n    top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]\n    for node_id in top_k:\n      human_string = node_lookup.id_to_string(node_id)\n      score = predictions[node_id]\n      print('%s (score = %.5f)' % (human_string, score))", "docstring": "Runs inference on an image.\n\nArgs:\nimage: Image file name.\n\nReturns:\nNothing", "source": "juraj-google-style"}
{"code": "def sum(x, axis=None, keepdims=False):\n    \n    from .function_bases import sum as sum_base\n    if axis is None:\n        axis = range(x.ndim)\n    elif not hasattr(axis, '__iter__'):\n        axis = [axis]\n    return sum_base(x, axis, keepdims)", "docstring": "Reduction along axes with sum operation.\n\nArgs:\nx (Variable): An input variable.\naxis (None, int or tuple of ints): Axis or axes along which the sum is\ncalculated. Passing the default value `None` will reduce all dimensions.\nkeepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.\n\nReturns:\n~nnabla.Variable: N-D array.", "source": "juraj-google-style"}
{"code": "def read_tree(input, schema):\n    schema_to_function = {'dendropy': read_tree_dendropy, 'newick': read_tree_newick, 'nexml': read_tree_nexml, 'nexus': read_tree_nexus}\n    if (schema.lower() not in schema_to_function):\n        raise ValueError(('Invalid schema: %s (valid options: %s)' % (schema, ', '.join(sorted(schema_to_function.keys())))))\n    return schema_to_function[schema.lower()](input)", "docstring": "Read a tree from a string or file\n\nArgs:\n``input`` (``str``): Either a tree string, a path to a tree file (plain-text or gzipped), or a DendroPy Tree object\n\n``schema`` (``str``): The schema of ``input`` (DendroPy, Newick, NeXML, or Nexus)\n\nReturns:\n* If the input is Newick, either a ``Tree`` object if ``input`` contains a single tree, or a ``list`` of ``Tree`` objects if ``input`` contains multiple trees (one per line)\n\n* If the input is NeXML or Nexus, a ``dict`` of trees represented by ``input``, where keys are tree names (``str``) and values are ``Tree`` objects", "source": "codesearchnet"}
{"code": "def __init__(self, instruments = None, scripts = None, name=None, settings=None, log_function = None, data_path = None):\n        \n        super(ScriptDummyWrapper, self).__init__(self, name, settings, log_function= log_function, data_path=data_path)", "docstring": "Example of a script\nArgs:\nname (optional): name of script, if empty same as class name\nsettings (optional): settings for this script, if empty same as default settings", "source": "juraj-google-style"}
{"code": "def add_key_value(self, key, value):\n        \n        if key == 'unique_id':\n            self._unique_id = str(value)\n        else:\n            self._data[key] = value", "docstring": "Converts the value and adds it as a data field.\n\nArgs:\nkey:\nvalue:", "source": "juraj-google-style"}
{"code": "def create_line_plot(df):\n    \n    fig = Figure(\"/mg/line_plot/\", \"mg_line_plot\")\n    fig.graphics.transition_on_update(True)\n    fig.graphics.animate_on_load()\n    fig.layout.set_size(width=450, height=200)\n    fig.layout.set_margin(left=40, right=40)\n    return LineChart(df, fig, \"Date\", [\"value\"],\n        init_params={\"Data\": \"Steps\"}, timeseries=True)", "docstring": "create a mg line plot\n\nArgs:\ndf (pandas.DataFrame): data to plot", "source": "juraj-google-style"}
{"code": "def _gen_save_and_restore_functions(checkpoint_factory_map: object_identity.ObjectIdentityDictionary) -> object_identity.ObjectIdentityDictionary:\n    saveable_fn_map = object_identity.ObjectIdentityDictionary()\n    for obj, factory_data_list in checkpoint_factory_map.items():\n        if resource_variable_ops.is_resource_variable(obj) or not factory_data_list:\n            continue\n        if factory_data_list[0].name == trackable_utils.SERIALIZE_TO_TENSORS_NAME:\n            assert len(factory_data_list) == 1\n            saveable_fn_map[obj] = {trackable_utils.SERIALIZE_TO_TENSORS_NAME: tracing_utils.trace_save_and_restore(obj)}\n        else:\n            saveable_fn_map[obj] = trace_saveable_util.trace_save_restore_function_map(obj, factory_data_list)\n    return saveable_fn_map", "docstring": "Generates global and individual save/restore concrete functions.\n\nThe global functions records the ops to save and restore the entire object to\na file prefix, while the individual functions save and restore value tensors\nfor resources.\n\nThis function is intended to run on the output of\n`save_util_v1.get_checkpoint_factories_and_keys(object_names)`,\nwhich returns the generated a map of `_CheckpointFactoryData`.\n\nArgs:\ncheckpoint_factory_map: A dictionary mapping trackable objects to\na list of `_CheckpointFactoryData`.\n\nReturns:\nTuple of (\nsaveable_fn_map: Maps obj -> factory name -> (concrete save, restore)\n)", "source": "github-repos"}
{"code": "def set_max_freq(self, max_freq=None):\n        \n        if max_freq:\n            self['max_freq'] = max_freq\n        else:\n            for frequency in self['frequencies']:\n                if self['max_freq']:\n                    if frequency['value'] > self['max_freq']:\n                        self['max_freq'] = frequency['value']\n                else:\n                    self['max_freq'] = frequency['value']\n        return", "docstring": "Set the max frequency for the variant\n\nIf max_freq use this, otherwise go through all frequencies and\nset the highest as self['max_freq']\n\nArgs:\nmax_freq (float): The max frequency", "source": "juraj-google-style"}
{"code": "def plot_hall_carriers(self, temp=300):\n        \n        import matplotlib.pyplot as plt\n        hall_carriers = [abs(i) for i in\n                         self._bz.get_hall_carrier_concentration()[temp]]\n        plt.semilogy(self._bz.mu_steps,\n                     hall_carriers,\n                     linewidth=3.0, color='r')\n        self._plot_bg_limits()\n        self._plot_doping(temp)\n        plt.xlim(-0.5, self._bz.gap + 0.5)\n        plt.ylim(1e14, 1e22)\n        plt.ylabel(\"Hall carrier concentration (cm-3)\", fontsize=30.0)\n        plt.xlabel(\"E-E$_f$ (eV)\", fontsize=30)\n        plt.xticks(fontsize=25)\n        plt.yticks(fontsize=25)\n        return plt", "docstring": "Plot the Hall carrier concentration in function of Fermi level\n\nArgs:\ntemp: the temperature\n\nReturns:\na matplotlib object", "source": "juraj-google-style"}
{"code": "def _stringify_path(path_or_buffer):\n    try:\n        import pathlib\n        _PATHLIB_INSTALLED = True\n    except ImportError:\n        _PATHLIB_INSTALLED = False\n    if hasattr(path_or_buffer, '__fspath__'):\n        return path_or_buffer.__fspath__()\n    if (_PATHLIB_INSTALLED and isinstance(path_or_buffer, pathlib.Path)):\n        return text_type(path_or_buffer)\n    return path_or_buffer", "docstring": "Convert path like object to string\n\nArgs:\npath_or_buffer: object to be converted\n\nReturns:\nstring_path_or_buffer: maybe string version of path_or_buffer", "source": "codesearchnet"}
{"code": "def trace_distance_bound(val: Any) -> float:\n    getter = getattr(val, '_trace_distance_bound_', None)\n    result = (NotImplemented if (getter is None) else getter())\n    if ((result is not NotImplemented) and (result < 1.0)):\n        return result\n    return 1.0", "docstring": "Returns a maximum on the trace distance between this effect's input\nand output.  This method makes use of the effect's `_trace_distance_bound_`\nmethod to determine the maximum bound on the trace difference between\nbefore and after the effect.\n\nArgs:\nval: The effect of which the bound should be calculated\n\nReturns:\nIf `val` has a _trace_distance_bound_ method and its result is not\nNotImplemented, that result is returned. Otherwise, 1 is returned.\nResult is capped at a maximum of 1, even if the underlying function\nproduces a result greater than 1.", "source": "codesearchnet"}
{"code": "def make(self, path, metadata=None):\n    self.current_shard_filenames = []\n    if self.h5_file is not None:\n        self.current_shard_filenames.append(pathlib.Path(self.h5_file.filename).name)\n    return super().make(path, metadata)", "docstring": "Make a new H5 entry group.\n\nThis method is only available in write mode. It defers the creation of\nthe H5 entry group until `__setitem__` is called, preventing the\ncreation of empty groups.\n\nThe information about the current shard is reset.\n\nArgs:\npath: `str`. The variable path.\nmetadata: Optional `dict`. The metadata to save with the H5 entry\ngroup. Defaults to `None`.", "source": "github-repos"}
{"code": "def freeze_script(script_path, cache=True, temp_path='_hadoopy_temp'):\n    script_abspath = os.path.abspath(script_path)\n    if (not os.path.exists(script_abspath)):\n        raise ValueError(('Script [%s] does not exist.' % script_abspath))\n    try:\n        if (not cache):\n            raise KeyError\n        (cmds, frozen_tar_path) = FREEZE_CACHE[script_abspath]\n    except KeyError:\n        tmp_frozen_tar_path = (temp_path + ('/%f.tar' % time.time()))\n        freeze_fp = tempfile.NamedTemporaryFile(suffix='.tar')\n        cmds = hadoopy._freeze.freeze_to_tar(os.path.abspath(script_path), freeze_fp.name)\n        md5 = _md5_file(freeze_fp.name)\n        frozen_tar_path = (temp_path + ('/%s.tar' % md5))\n        if (not hadoopy.exists(frozen_tar_path)):\n            if (not hadoopy.exists(temp_path)):\n                hadoopy.mkdir(temp_path)\n            hadoopy.put(freeze_fp.name, tmp_frozen_tar_path)\n            try:\n                hadoopy.mv(tmp_frozen_tar_path, frozen_tar_path)\n            except IOError:\n                if (not hadoopy.exists(frozen_tar_path)):\n                    raise\n    FREEZE_CACHE[script_abspath] = (cmds, frozen_tar_path)\n    return {'cmds': cmds, 'frozen_tar_path': frozen_tar_path}", "docstring": "Freezes a script, puts it on hdfs, and gives you the path\n\n'frozen_tar_path' can be given to launch_frozen and it will use that\ninstead of making its own, this is useful for repeated calls.  If a\nfile with the same md5 already exists in the temp_path, it is used\ninstead of putting a new copy there to avoid the file transfer.  The\nfiles are put into a temporary file based on the timestamp first, then\nmoved to a location that is only a function of their md5 to prevent partial\nfiles.\n\nArgs:\nscript_path: Path to a hadoopy script\ncache: If True (default) then use previously frozen scripts.  Cache is stored in memory (not persistent).\ntemp_path: HDFS temporary path (default is '_hadoopy_temp')\n\nReturns:\n{'cmds': commands_ran, 'frozen_tar_path': frozen_tar_path}\n\nRaises:\nValueError: Script cannot be found", "source": "codesearchnet"}
{"code": "def FromStream(cls, stream):\n        \n\n        if stream.system:\n            specifier = DataStreamSelector.MatchSystemOnly\n        else:\n            specifier = DataStreamSelector.MatchUserOnly\n\n        return DataStreamSelector(stream.stream_type, stream.stream_id, specifier)", "docstring": "Create a DataStreamSelector from a DataStream.\n\nArgs:\nstream (DataStream): The data stream that we want to convert.", "source": "juraj-google-style"}
{"code": "def _do_retrieve_scopes(self, http, token):\n        \n        logger.info('Refreshing scopes')\n        query_params = {'access_token': token, 'fields': 'scope'}\n        token_info_uri = _helpers.update_query_params(\n            self.token_info_uri, query_params)\n        resp, content = transport.request(http, token_info_uri)\n        content = _helpers._from_bytes(content)\n        if resp.status == http_client.OK:\n            d = json.loads(content)\n            self.scopes = set(_helpers.string_to_scopes(d.get('scope', '')))\n        else:\n            error_msg = 'Invalid response {0}.'.format(resp.status)\n            try:\n                d = json.loads(content)\n                if 'error_description' in d:\n                    error_msg = d['error_description']\n            except (TypeError, ValueError):\n                pass\n            raise Error(error_msg)", "docstring": "Retrieves the list of authorized scopes from the OAuth2 provider.\n\nArgs:\nhttp: an object to be used to make HTTP requests.\ntoken: A string used as the token to identify the credentials to\nthe provider.\n\nRaises:\nError: When refresh fails, indicating the the access token is\ninvalid.", "source": "juraj-google-style"}
{"code": "def __init__(self, theta: types.RealTensor, mean_reversion: types.RealTensor, sigma: types.RealTensor, dtype: Optional[tf.DType]=None, name: Optional[str]=None):\n    dim = 1\n    dtype = dtype or tf.float32\n    name = name or 'cir_model'\n    with tf.name_scope(name):\n\n        def _convert_param_to_tensor(param):\n            \n            param_t = tf.convert_to_tensor(param, dtype=dtype)\n            return param_t * tf.ones(shape=dim, dtype=dtype)\n\n        def _get_batch_shape(param):\n            \n            param_shape = tff_utils.get_shape(param)\n            return param_shape[:-1]\n        self._theta = _convert_param_to_tensor(theta)\n        self._mean_reversion = _convert_param_to_tensor(mean_reversion)\n        self._sigma = _convert_param_to_tensor(sigma)\n        self._batch_shape = _get_batch_shape(self._theta)\n        self._batch_shape_rank = len(self._batch_shape)\n\n        def _drift_fn(t, x):\n            del t\n            expand_rank = tff_utils.get_shape(x).rank - self._batch_shape_rank - 1\n            theta_expand = self._expand_param_on_rank(self._theta, expand_rank, axis=-2)\n            mean_reversion_expand = self._expand_param_on_rank(self._mean_reversion, expand_rank, axis=-2)\n            return theta_expand - mean_reversion_expand * x\n\n        def _volatility_fn(t, x):\n            del t\n            expand_rank = len(tff_utils.get_shape(x)) - self._batch_shape_rank - 1\n            sigma_expand = self._expand_param_on_rank(self._sigma, expand_rank, axis=-2)\n            return tf.expand_dims(sigma_expand * tf.sqrt(x), axis=-1)\n    super(CirModel, self).__init__(dim, _drift_fn, _volatility_fn, dtype, name)", "docstring": "Initializes the CIR Model.\n\nArgs:\ntheta: A positive scalar `Tensor` with shape `batch_shape` + [1].\nmean_reversion: A positive scalar `Tensor` of the same dtype and shape as\n`theta`. Means speed of reversion.\nsigma: A scalar `Tensor` of the same dtype and shape as `theta`.Means\nvolatility.\ndtype: The default dtype to use when converting values to `Tensor`s.\nDefault value: `None` which maps to `tf.float32`.\nname: Python string. The name to give to the ops created by this class.\nDefault value: `None` which maps to the default name `cir_model`.", "source": "github-repos"}
{"code": "def __init__(self, backend_prop):\n        \n        super().__init__()\n        self.backend_prop = backend_prop\n        self.swap_graph = nx.DiGraph()\n        self.cx_errors = {}\n        self.readout_errors = {}\n        self.available_hw_qubits = []\n        self.gate_list = []\n        self.gate_cost = {}\n        self.swap_paths = {}\n        self.swap_costs = {}\n        self.prog_graph = nx.Graph()\n        self.qarg_to_id = {}\n        self.pending_program_edges = []\n        self.prog2hw = {}", "docstring": "Chooses a Noise Adaptive Layout\n\nArgs:\nbackend_prop (BackendProperties): backend properties object\n\nRaises:\nTranspilerError: if invalid options", "source": "juraj-google-style"}
{"code": "def power_devices(self):\n    if (not self.__power_devices):\n        self.__power_devices = PowerDevices(self.__connection)\n    return self.__power_devices", "docstring": "Gets the PowerDevices API client.\n\nReturns:\nPowerDevices:", "source": "codesearchnet"}
{"code": "def _ValidateFleetspeakServiceConfig(self, config_path):\n    \n    with open(config_path, \"rb\") as f:\n      pool = descriptor_pool.DescriptorPool()\n      pool.AddDescriptor(fs_config_pb2.Config.DESCRIPTOR)\n      parsed_config = text_format.Parse(\n          f.read(), fs_system_pb2.ClientServiceConfig(), descriptor_pool=pool)\n      if parsed_config.factory != \"Daemon\":\n        raise BuildError(\n            \"Fleetspeak config does not have the expected factory type.\")\n      daemon_cfg = fs_config_pb2.Config()\n      parsed_config.config.Unpack(daemon_cfg)\n      if not daemon_cfg.argv:\n        raise BuildError(\n            \"Fleetspeak daemon service config does not specify command line \"\n            \"args.\")", "docstring": "Validates a Fleetspeak service config.\n\nChecks that the given file is a valid TextFormat representation of\na Fleetspeak service config proto.\n\nArgs:\nconfig_path: Path to the config file.\n\nRaises:\nBuildError: If the config is not valid.", "source": "juraj-google-style"}
{"code": "def round(x, name=None):\n    x = ops.convert_to_tensor(x, name='x')\n    if x.dtype.is_integer:\n        return x\n    else:\n        return gen_math_ops.round(x, name=name)", "docstring": "Rounds the values of a tensor to the nearest integer, element-wise.\n\nRounds half to even.  Also known as bankers rounding. If you want to round\naccording to the current system rounding mode use tf::cint.\nFor example:\n\n```python\nx = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])\ntf.round(x)  # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]\n```\n\nArgs:\nx: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` of same shape and type as `x`.", "source": "github-repos"}
{"code": "def test_config_to_dict(test_config_string):\n    \n\n    test_config = {}\n    if test_config_string:\n        for config in test_config_string.split(','):\n            key, value = config.split('=')\n            test_config[key] = value\n\n    return test_config", "docstring": "Parse the test config to a dictionary\n\nArgs:\ntest_config_string (str) this string come from the --test-config\nflag of the bro executable run command", "source": "juraj-google-style"}
{"code": "def clear(cls, fn):\n    if hasattr(fn, cls.CACHE_VAR):\n        delattr(fn, cls.CACHE_VAR)", "docstring": "Clear result cache on the given function.\n\nIf the function has no cached result, this call will do nothing.\n\nArgs:\nfn (FunctionType):\nThe function whose cache should be cleared.", "source": "codesearchnet"}
{"code": "def _and_join(self, close_group=False):\n    if (not self.initialized):\n        raise ValueError('You must add a search term before adding an operator.')\n    else:\n        self._operator('AND', close_group=close_group)\n    return self", "docstring": "Combine terms with AND.\nThere must be a term added before using this method.\n\nArguments:\nclose_group (bool): If ``True``, will end the current group and start a new one.\nIf ``False``, will continue current group.\n\nExample::\n\nIf the current query is \"(term1\"\n.and(close_group=True) => \"(term1) AND (\"\n.and(close_group=False) => \"(term1 AND \"\n\nReturns:\nSearchHelper: Self", "source": "codesearchnet"}
{"code": "def _variable_with_weight_decay(name, shape, stddev, wd):\n  \n  dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n  var = _variable_on_cpu(\n      name,\n      shape,\n      tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n  if wd is not None:\n    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n    tf.add_to_collection('losses', weight_decay)\n  return var", "docstring": "Helper to create an initialized Variable with weight decay.\n\nNote that the Variable is initialized with a truncated normal distribution.\nA weight decay is added only if one is specified.\n\nArgs:\nname: name of the variable\nshape: list of ints\nstddev: standard deviation of a truncated Gaussian\nwd: add L2Loss weight decay multiplied by this float. If None, weight\ndecay is not added for this Variable.\n\nReturns:\nVariable Tensor", "source": "juraj-google-style"}
{"code": "def _inputs_valid(self, output_condition_uris):\n        \n\n        if len(self.inputs) != len(output_condition_uris):\n            raise ValueError('Inputs and '\n                             'output_condition_uris must have the same count')\n\n        tx_dict = self.tx_dict if self.tx_dict else self.to_dict()\n        tx_dict = Transaction._remove_signatures(tx_dict)\n        tx_dict['id'] = None\n        tx_serialized = Transaction._to_str(tx_dict)\n\n        def validate(i, output_condition_uri=None):\n            \n            return self._input_valid(self.inputs[i], self.operation,\n                                     tx_serialized, output_condition_uri)\n\n        return all(validate(i, cond)\n                   for i, cond in enumerate(output_condition_uris))", "docstring": "Validates an Input against a given set of Outputs.\n\nNote:\nThe number of `output_condition_uris` must be equal to the\nnumber of Inputs a Transaction has.\n\nArgs:\noutput_condition_uris (:obj:`list` of :obj:`str`): A list of\nOutputs to check the Inputs against.\n\nReturns:\nbool: If all Outputs are valid.", "source": "juraj-google-style"}
{"code": "def set_control_mode(self, modevalue):\n        \n        minimalmodbus._checkInt(modevalue, minvalue=0, maxvalue=3, description='control mode') \n        self.write_register(4101, modevalue)", "docstring": "Set the control method using the corresponding integer value.\n\nArgs:\nmodevalue(int): 0-3\n\nThe modevalue is one of the keys in :data:`CONTROL_MODES`.", "source": "juraj-google-style"}
{"code": "def __init__(self, api_url='https:\n                 headers=None):\n        \n        self.API_BASE_URL = '{api_url}'.format(**locals())\n        self.headers = headers", "docstring": "Initialize Paystack Request object for browsing resource.\n\nArgs:\napi_url: str\nheaders: dict", "source": "juraj-google-style"}
{"code": "def _get_archive_filelist(filename):\n    \n    \n    names = []  \n    if tarfile.is_tarfile(filename):\n        with tarfile.open(filename) as tar_file:\n            names = sorted(tar_file.getnames())\n    elif zipfile.is_zipfile(filename):\n        with zipfile.ZipFile(filename) as zip_file:\n            names = sorted(zip_file.namelist())\n    else:\n        raise ValueError(\"Can not get filenames from '{!s}'. \"\n                         \"Not a tar or zip file\".format(filename))\n    if \"./\" in names:\n        names.remove(\"./\")\n    return names", "docstring": "Extract the list of files from a tar or zip archive.\n\nArgs:\nfilename: name of the archive\n\nReturns:\nSorted list of files in the archive, excluding './'\n\nRaises:\nValueError: when the file is neither a zip nor a tar archive\nFileNotFoundError: when the provided file does not exist (for Python 3)\nIOError: when the provided file does not exist (for Python 2)", "source": "juraj-google-style"}
{"code": "def getTraitCovarStdErrors(self,term_i):\n        \n        assert self.init,        'GP not initialised'\n        assert self.fast==False, 'Not supported for fast implementation'\n\n        if self.P==1:\n            out = (2*self.getScales()[term_i])**2*self._getLaplaceCovar()[term_i,term_i]\n        else:\n            C = self.vd.getTerm(term_i).getTraitCovar()\n            n_params = C.getNumberParams()\n            par_index = 0\n            for term in range(term_i-1):\n                par_index += self.vd.getTerm(term_i).getNumberScales()\n            Sigma1 = self._getLaplaceCovar()[par_index:(par_index+n_params),:][:,par_index:(par_index+n_params)]\n            out = sp.zeros((self.P,self.P))\n            for param_i in range(n_params):\n                out += C.Kgrad_param(param_i)**2*Sigma1[param_i,param_i]\n                for param_j in range(param_i):\n                    out += 2*abs(C.Kgrad_param(param_i)*C.Kgrad_param(param_j))*Sigma1[param_i,param_j]\n        out = sp.sqrt(out)\n        return out", "docstring": "Returns standard errors on trait covariances from term_i (for the covariance estimate \\see getTraitCovar)\n\nArgs:\nterm_i:     index of the term we are interested in", "source": "juraj-google-style"}
{"code": "def node_traceback(self, element_name):\n    if self._python_graph is None:\n        raise LookupError('Python graph is not available for traceback lookup')\n    node_name = debug_graphs.get_node_name(element_name)\n    if node_name not in self._node_traceback:\n        raise KeyError('Cannot find node \"%s\" in Python graph' % node_name)\n    return self._node_traceback[node_name]", "docstring": "Try to retrieve the Python traceback of node's construction.\n\nArgs:\nelement_name: (`str`) Name of a graph element (node or tensor).\n\nReturns:\n(list) The traceback list object as returned by the `extract_trace`\nmethod of Python's traceback module.\n\nRaises:\nLookupError: If Python graph is not available for traceback lookup.\nKeyError: If the node cannot be found in the Python graph loaded.", "source": "github-repos"}
{"code": "def with_row(self, row):\n    self = self.copy()\n    self.append(row)\n    return self", "docstring": "Return a table with an additional row.\n\nArgs:\n``row`` (sequence): A value for each column.\n\nRaises:\n``ValueError``: If the row length differs from the column count.\n\n>>> tiles = Table(make_array('letter', 'count', 'points'))\n>>> tiles.with_row(['c', 2, 3]).with_row(['d', 4, 2])\nletter | count | points\nc      | 2     | 3\nd      | 4     | 2", "source": "codesearchnet"}
{"code": "def GetEntries(self, parser_mediator, match=None, **unused_kwargs):\n    shortcuts = match.get('UserShortcuts', {})\n    for (search_text, data) in iter(shortcuts.items()):\n        datetime_value = data.get('LAST_USED', None)\n        if (not datetime_value):\n            continue\n        display_name = data.get('DISPLAY_NAME', '<DISPLAY_NAME>')\n        path = data.get('PATH', '<PATH>')\n        event_data = plist_event.PlistTimeEventData()\n        event_data.desc = 'Spotlight term searched \"{0:s}\" associate to {1:s} ({2:s})'.format(search_text, display_name, path)\n        event_data.key = search_text\n        event_data.root = '/UserShortcuts'\n        event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts relevant Spotlight entries.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmatch (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.", "source": "codesearchnet"}
{"code": "def get_distance(self, node):\n        \n        delta = (node.pos[0]-self.pos[0], node.pos[1]-self.pos[1])\n        return sqrt(delta[0]**2+delta[1]**2)", "docstring": "Get the distance beetween 2 nodes\n\nArgs:\nnode (object): The other node.", "source": "juraj-google-style"}
{"code": "def update_variant_rank(self, case_obj, variant_type='clinical', category='snv'):\n        \n        \n        variants = self.variant_collection.find({\n            'case_id': case_obj['_id'],\n            'category': category,\n            'variant_type': variant_type,\n        }).sort('rank_score', pymongo.DESCENDING)\n\n        LOG.info(\"Updating variant_rank for all variants\")\n\n        requests = []\n\n        for index, var_obj in enumerate(variants):\n            if len(requests) > 5000:\n                try:\n                    self.variant_collection.bulk_write(requests, ordered=False)\n                    requests = []\n                except BulkWriteError as err:\n                    LOG.warning(\"Updating variant rank failed\")\n                    raise err\n\n            operation = pymongo.UpdateOne(\n                {'_id': var_obj['_id']},\n                {\n                    '$set': {\n                        'variant_rank': index + 1,\n                    }\n                })\n            requests.append(operation)\n\n        \n        try:\n            self.variant_collection.bulk_write(requests, ordered=False)\n        except BulkWriteError as err:\n            LOG.warning(\"Updating variant rank failed\")\n            raise err\n\n        LOG.info(\"Updating variant_rank done\")", "docstring": "Updates the manual rank for all variants in a case\n\nAdd a variant rank based on the rank score\nWhenever variants are added or removed from a case we need to update the variant rank\n\nArgs:\ncase_obj(Case)\nvariant_type(str)", "source": "juraj-google-style"}
{"code": "def lola_image(self, save=False, name='BaseLola.png'):\n    fig = plt.figure(figsize=(10, 8))\n    ax1 = fig.add_subplot(111)\n    (lon_m, lon_M, lat_m, lat_M) = self.lambert_window(self.size_window, self.lat0, self.lon0)\n    m = Basemap(llcrnrlon=lon_m, llcrnrlat=lat_m, urcrnrlon=lon_M, urcrnrlat=lat_M, resolution='i', projection='laea', rsphere=1734400, lat_0=self.lat0, lon_0=self.lon0)\n    (Xl, Yl, Zl) = self.get_arrays('Lola')\n    (Xl, Yl) = m(Xl, Yl)\n    CS = m.pcolormesh(Xl, Yl, Zl, cmap='gist_earth', alpha=0.5, ax=ax1, zorder=1)\n    (xc, yc) = m(self.lon0, self.lat0)\n    ax1.scatter(xc, yc, s=200, marker='v', zorder=2)\n    self._add_scale(m, ax1)\n    self._add_colorbar(m, CS, ax1, 'Topography')\n    if (save == True):\n        fig.savefig(name, rasterized=True, dpi=50, bbox_inches='tight', pad_inches=0.1)", "docstring": "Draw the topography of the region of interest\n\nArgs:\nsave (Optional[bool]): Weither or not to save the image.\nDefaults to False.\nname (Optional[str]): Absolut path to save the resulting\nimage. Default to 'BaseLola.png' in the working\ndirectory.\n\nReturns:\nAn image correponding to the region tography. Realized\nfrom the data taken by the LOLA instrument on board of LRO.\n\nNote:\nNice to use in a jupyter notebook with ``%matplotib inline``\nactivated.\n\nFeel free to modify this method to plot exactly what you need.", "source": "codesearchnet"}
{"code": "def _has_nchw_support():\n    explicitly_on_cpu = _is_current_explicit_device('CPU')\n    gpus_available = bool(_get_available_gpus())\n    return not explicitly_on_cpu and gpus_available", "docstring": "Check whether the current scope supports NCHW ops.\n\nTensorFlow does not support NCHW on CPU. Therefore we check if we are not\nexplicitly put on\nCPU, and have GPUs available. In this case there will be soft-placing on the\nGPU device.\n\nReturns:\nbool: if the current scope device placement would support nchw", "source": "github-repos"}
{"code": "def configure_vrf(self, vrf_name, commands):\n        \n        commands = make_iterable(commands)\n        commands.insert(0, 'vrf definition %s' % vrf_name)\n        return self.configure(commands)", "docstring": "Configures the specified VRF using commands\n\nArgs:\nvrf_name (str): The VRF name to configure\ncommands: The list of commands to configure\n\nReturns:\nTrue if the commands completed successfully", "source": "juraj-google-style"}
{"code": "def decode(self, music_tokens, start_level=0, end_level=None, bs_chunks=1) -> torch.Tensor:\n    token_chunks = [torch.chunk(token, bs_chunks, dim=0) for token in music_tokens]\n    dequantised_states = []\n    for i in range(bs_chunks):\n        music_tokens_i = [chunks[i] for chunks in token_chunks]\n        dequantised_state = self._decode(music_tokens_i, start_level=start_level, end_level=end_level)\n        dequantised_states.append(dequantised_state)\n    return torch.cat(dequantised_states, dim=0)", "docstring": "Transforms the input `music_tokens` to their `raw_audio` representation.\n\nArgs:\nmusic_tokens (`torch.LongTensor`):\nTensor of music tokens which will be decoded to raw audio by using the codebook. Each music token\nshould be an index to a corresponding `code` vector in the codebook.\nstart_level (`int`, *optional*):\nLevel at which the decoding process will start. Default to 0.\nend_level (`int`, *optional*):\nLevel at which the decoding process will start. Default to None.\nbs_chunks (int, *optional*):\nNumber of chunks to process at the same time.", "source": "github-repos"}
{"code": "def from_log(cls, log, cutoff=None, components=None, legend=None, legend_field=None, field=None, right=False, basis=None, source='Log'):\n    if ((components is None) and (legend is None) and (field is None)):\n        m = 'You must provide a list of components, and legend, or a field.'\n        raise StriplogError(m)\n    if ((legend is not None) and (legend_field is None)):\n        try:\n            components = [deepcopy(decor.component) for decor in legend]\n        except AttributeError:\n            pass\n    if (legend_field is not None):\n        field_values = [getattr(d, legend_field, 0) for d in legend]\n        components = [Component() for i in range(int((max(field_values) + 1)))]\n        for (i, decor) in enumerate(legend):\n            components[i] = deepcopy(decor.component)\n    if (cutoff is not None):\n        try:\n            n = len(cutoff)\n        except TypeError:\n            n = 1\n        if (len(components) < (n + 1)):\n            m = 'For n cutoffs, you need to provide at least'\n            m += 'n+1 components.'\n            raise StriplogError(m)\n        try:\n            a = np.digitize(log, cutoff, right)\n        except ValueError:\n            a = np.digitize(log, [cutoff], right)\n    else:\n        a = np.copy(log)\n    (tops, values) = utils.tops_from_loglike(a)\n    if (basis is None):\n        m = 'You must provide a depth or elevation basis.'\n        raise StriplogError(m)\n    list_of_Intervals = cls.__intervals_from_tops(tops, values, basis, components, field=field)\n    return cls(list_of_Intervals, source=source)", "docstring": "Turn a 1D array into a striplog, given a cutoff.\n\nArgs:\nlog (array-like): A 1D array or a list of integers.\ncutoff (number or array-like): The log value(s) at which to bin\nthe log. Optional.\ncomponents (array-like): A list of components. Use this or\n``legend``.\nlegend (``Legend``): A legend object. Use this or ``components``.\nlegend_field ('str'): If you're not trying to match against\ncomponents, then you can match the log values to this field in\nthe Decors.\nfield (str): The field in the Interval's ``data`` to store the log\nvalues as.\nright (bool): Which side of the cutoff to send things that are\nequal to, i.e. right on, the cutoff.\nbasis (array-like): A depth basis for the log, so striplog knows\nwhere to put the boundaries.\nsource (str): The source of the data. Default 'Log'.\n\nReturns:\nStriplog: The ``striplog`` object.", "source": "codesearchnet"}
{"code": "def Header(self):\n    if (not self._header):\n        self._header = Header(self.PrevHash, self.MerkleRoot, self.Timestamp, self.Index, self.ConsensusData, self.NextConsensus, self.Script)\n    return self._header", "docstring": "Get the block header.\n\nReturns:\nneo.Core.Header:", "source": "codesearchnet"}
{"code": "class FlaxBaseModelOutputWithNoAttention(ModelOutput):\n    last_hidden_state: Optional[jnp.ndarray] = None\n    hidden_states: Optional[Tuple[jnp.ndarray]] = None", "docstring": "Base class for model's outputs, with potential hidden states.\n\nArgs:\nlast_hidden_state (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`):\nSequence of hidden-states at the output of the last layer of the model.\nhidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\nTuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one\nfor the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the\nmodel at the output of each layer plus the optional initial embedding outputs.", "source": "github-repos"}
{"code": "def get_corner(self, time):\n        \n        if self.start_time <= time <= self.end_time:\n            diff = time - self.start_time\n            return self.i[diff][0, 0], self.j[diff][0, 0]\n        else:\n            return -1, -1", "docstring": "Gets the corner array indices of the STObject at a given time that corresponds\nto the upper left corner of the bounding box for the STObject.\n\nArgs:\ntime: time at which the corner is being extracted.\n\nReturns:\ncorner index.", "source": "juraj-google-style"}
{"code": "def state_size(self) -> Sequence[Sequence[int]]:\n    fluents = self.domain.state_fluents\n    ordering = self.domain.state_fluent_ordering\n    return self._fluent_size(fluents, ordering)", "docstring": "The size of each state fluent in canonical order.\n\nReturns:\nSequence[Sequence[int]]: A tuple of tuple of integers\nrepresenting the shape and size of each fluent.", "source": "codesearchnet"}
{"code": "def check_decorator_order(filename: str) -> List[int]:\n    with open(filename, 'r', encoding='utf-8', newline='\\n') as f:\n        lines = f.readlines()\n    decorator_before = None\n    errors = []\n    for i, line in enumerate(lines):\n        search = _re_decorator.search(line)\n        if search is not None:\n            decorator_name = search.groups()[0]\n            if decorator_before is not None and decorator_name.startswith('parameterized'):\n                errors.append(i)\n            decorator_before = decorator_name\n        elif decorator_before is not None:\n            decorator_before = None\n    return errors", "docstring": "Check that in a given test file, the slow decorator is always last.\n\nArgs:\nfilename (`str`): The path to a test file to check.\n\nReturns:\n`List[int]`: The list of failures as a list of indices where there are problems.", "source": "github-repos"}
{"code": "def GetStoredHostname(self):\n    store_number = len(self._hostnames)\n    return self._hostnames.get(store_number, None)", "docstring": "Retrieves the stored hostname.\n\nThe hostname is determined based on the preprocessing information\nthat is stored inside the storage file.\n\nReturns:\nstr: hostname.", "source": "codesearchnet"}
{"code": "def __init__(self, reader_ref, supports_serialize=False):\n    if context.executing_eagerly():\n        raise RuntimeError('Readers are not supported when eager execution is enabled. Instead, please use tf.data to get data into your model.')\n    self._reader_ref = reader_ref\n    self._supports_serialize = supports_serialize", "docstring": "Creates a new ReaderBase.\n\nArgs:\nreader_ref: The operation that implements the reader.\nsupports_serialize: True if the reader implementation can\nserialize its state.\n\nRaises:\nRuntimeError: If eager execution is enabled.", "source": "github-repos"}
{"code": "def _verify_signature(message, signature, certs):\n    for pem in certs:\n        verifier = Verifier.from_string(pem, is_x509_cert=True)\n        if verifier.verify(message, signature):\n            return\n    raise AppIdentityError('Invalid token signature')", "docstring": "Verifies signed content using a list of certificates.\n\nArgs:\nmessage: string or bytes, The message to verify.\nsignature: string or bytes, The signature on the message.\ncerts: iterable, certificates in PEM format.\n\nRaises:\nAppIdentityError: If none of the certificates can verify the message\nagainst the signature.", "source": "codesearchnet"}
{"code": "def _print_tensor(tensor_name, num_elements, tensor, output_tensor):\n    if self._parameters.is_brief_mode():\n        if tensor_name not in tensor_trace_order.tensorname_to_cache_idx:\n            raise ValueError('Tensor %s with name %s is not in the tensorname_to_cache_idx' % (tensor, tensor_name))\n        msg = '%d' % tensor_trace_order.tensorname_to_cache_idx[tensor_name]\n    else:\n        msg = '\"%s\"' % tensor_name\n    if self._parameters.trace_dir:\n        output_path = os.path.join(self._parameters.trace_dir, _TRACE_FILE_NAME + self._get_outfile_suffix())\n        output_stream = _OUTPUT_STREAM_ESCAPE + output_path\n    else:\n        output_stream = sys.stderr\n    return logging_ops.print_v2(msg, array_ops.shape(output_tensor), '@', self._replica_id, '\\n', output_tensor, '\\n', summarize=num_elements, output_stream=output_stream)", "docstring": "Prints a tensor value to a file.\n\nArgs:\ntensor_name: name of the tensor being traced.\nnum_elements: number of elements to print (-1 means print all).\ntensor: the tensor needs to be returned.\noutput_tensor: the tensor needs to be printed.\n\nReturns:\nThe same tensor passed via the \"tensor\" argument.\n\nRaises:\nValueError: If tensor_name is not already in\ntensor_trace_order.tensorname_to_cache_idx.", "source": "github-repos"}
{"code": "def get_customer(self, customer_id):\n        \n        return self.client._get(self.url + 'customers/{}'.format(customer_id), headers=self.get_headers())", "docstring": "Queries the information related to the customer.\n\nArgs:\ncustomer_id: Identifier of the client from which you want to find the associated information.\n\nReturns:", "source": "juraj-google-style"}
{"code": "def get_asides(self, block):\n    aside_instances = [self.get_aside_of_type(block, aside_type) for aside_type in self.applicable_aside_types(block)]\n    return [aside_instance for aside_instance in aside_instances if aside_instance.should_apply_to_block(block)]", "docstring": "Return instances for all of the asides that will decorate this `block`.\n\nArguments:\nblock (:class:`.XBlock`): The block to render retrieve asides for.\n\nReturns:\nList of XBlockAside instances", "source": "codesearchnet"}
{"code": "def chdir(self, target_directory):\n    target_directory = self.filesystem.resolve_path(target_directory, allow_fd=True)\n    self.filesystem.confirmdir(target_directory)\n    directory = self.filesystem.resolve(target_directory)\n    if ((not is_root()) and (not (directory.st_mode | PERM_EXE))):\n        self.filesystem.raise_os_error(errno.EACCES, directory)\n    self.filesystem.cwd = target_directory", "docstring": "Change current working directory to target directory.\n\nArgs:\ntarget_directory: The path to new current working directory.\n\nRaises:\nOSError: if user lacks permission to enter the argument directory\nor if the target is not a directory.", "source": "codesearchnet"}
{"code": "def trees_by_path(self, path):\n        \n        return set(\n            self.path_db.get(path, OOSet()).keys()\n        )", "docstring": "Search trees by `path`.\n\nArgs:\npath (str): :attr:`.Tree.path` property of :class:`.Tree`.\n\nReturns:\nset: Set of matching :class:`Tree` instances.", "source": "juraj-google-style"}
{"code": "def get_bkg_qq_data(id=None, bkg_id=None):\n    bdata = ui.get_bkg(id=id, bkg_id=bkg_id)\n    kev = bdata.get_x()\n    obs_data = bdata.counts\n    model_data = ui.get_bkg_model(id=id, bkg_id=bkg_id)(kev)\n    return np.vstack((kev, obs_data, model_data))", "docstring": "Get data for a quantile-quantile plot of the background data and model.\n\n*id*\nThe dataset id for which to get the data; defaults if unspecified.\n*bkg_id*\nThe identifier of the background; defaults if unspecified.\nReturns:\nAn ndarray of shape ``(3, npts)``. The first slice is the energy axis in\nkeV; the second is the observed values in each bin (counts, or rate, or\nrate per keV, etc.); the third is the corresponding model value in each\nbin.\n\nThe inputs are implicit; the data are obtained from the current state of\nthe Sherpa ``ui`` module.", "source": "codesearchnet"}
{"code": "def _stop_trial(self, trial, error=False, error_msg=None,\n                    stop_logger=True):\n        \n\n        if stop_logger:\n            trial.close_logger()\n\n        if error:\n            self.set_status(trial, Trial.ERROR)\n        else:\n            self.set_status(trial, Trial.TERMINATED)\n\n        try:\n            trial.write_error_log(error_msg)\n            if hasattr(trial, \"runner\") and trial.runner:\n                if (not error and self._reuse_actors\n                        and self._cached_actor is None):\n                    logger.debug(\"Reusing actor for {}\".format(trial.runner))\n                    self._cached_actor = trial.runner\n                else:\n                    logger.info(\n                        \"Destroying actor for trial {}. If your trainable is \"\n                        \"slow to initialize, consider setting \"\n                        \"reuse_actors=True to reduce actor creation \"\n                        \"overheads.\".format(trial))\n                    trial.runner.stop.remote()\n                    trial.runner.__ray_terminate__.remote()\n        except Exception:\n            logger.exception(\"Error stopping runner for Trial %s\", str(trial))\n            self.set_status(trial, Trial.ERROR)\n        finally:\n            trial.runner = None", "docstring": "Stops this trial.\n\nStops this trial, releasing all allocating resources. If stopping the\ntrial fails, the run will be marked as terminated in error, but no\nexception will be thrown.\n\nArgs:\nerror (bool): Whether to mark this trial as terminated in error.\nerror_msg (str): Optional error message.\nstop_logger (bool): Whether to shut down the trial logger.", "source": "juraj-google-style"}
{"code": "def get_reversed_statuses(context):\n    _rev = {v: k for (k, v) in STATUSES.items()}\n    _rev.update(dict(context.config['reversed_statuses']))\n    return _rev", "docstring": "Return a mapping of exit codes to status strings.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context\n\nReturns:\ndict: the mapping of exit codes to status strings.", "source": "codesearchnet"}
{"code": "def mtr_lm_v1():\n    hparams = mtr_lm_dense(0)\n    hparams.layers = (['local_self_att', 'local_self_att', 'drd', 'self_att', 'drd', 'local_self_att', 'local_self_att', 'moe_2d'] * 4)[:(- 1)]\n    hparams.d_kv = 128\n    hparams.moe_expert_x = 8\n    hparams.moe_expert_y = 4\n    hparams.moe_hidden_size = 32768\n    hparams.d_ff = 2048\n    hparams.num_memory_heads = 0\n    hparams.mesh_shape = 'b0:4;b1:8'\n    hparams.layout = 'outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0'\n    hparams.outer_batch_size = 4\n    return hparams", "docstring": "Model incorporating mixture-of-experts, local and global attention.\n\n~6B parameters\n\n32 experts in 3 hierarchichal moe layers.\n\nReturns:\na hparams", "source": "codesearchnet"}
{"code": "def detect_response_encoding(response, is_html=False, peek=131072):\n    encoding = get_heading_encoding(response)\n    encoding = wpull.string.detect_encoding(wpull.util.peek_file(response.body, peek), encoding=encoding, is_html=is_html)\n    _logger.debug(__('Got encoding: {0}', encoding))\n    return encoding", "docstring": "Return the likely encoding of the response document.\n\nArgs:\nresponse (Response): An instance of :class:`.http.Response`.\nis_html (bool): See :func:`.util.detect_encoding`.\npeek (int): The maximum number of bytes of the document to be analyzed.\n\nReturns:\n``str``, ``None``: The codec name.", "source": "codesearchnet"}
{"code": "def default(self, interface, vrid):\n    vrrp_str = ('default vrrp %d' % vrid)\n    return self.configure_interface(interface, vrrp_str)", "docstring": "Defaults a vrrp instance from an interface\n\nNote:\nThis method will attempt to default the vrrp on the node's\noperational config. Default results in the deletion of the\nspecified vrrp . If the vrrp does not exist on the\ninterface then this method will not perform any changes\nbut still return True\n\nArgs:\ninterface (string): The interface to configure.\nvrid (integer): The vrid number for the vrrp to be defaulted.\n\nReturns:\nTrue if the vrrp could be defaulted otherwise False (see Node)", "source": "codesearchnet"}
{"code": "def as_tensor(self):\n    with ops.control_dependencies(None):\n        return self._concat()", "docstring": "Returns the overall concatenated value as a `Tensor`.\n\nThe returned tensor will not inherit the control dependencies from the scope\nwhere the value is used, which is similar to getting the value of\n`Variable`.\n\nReturns:\n`Tensor` containing the concatenated value.", "source": "github-repos"}
{"code": "def check_panels(adapter, panels, default_panels=None):\n    default_panels = (default_panels or [])\n    panels_exist = True\n    for panel in default_panels:\n        if (panel not in panels):\n            log.warning('Default panels have to be defined in panels')\n            panels_exist = False\n    for panel in panels:\n        if (not adapter.gene_panel(panel)):\n            log.warning('Panel {} does not exist in database'.format(panel))\n            panels_exist = False\n    return panels_exist", "docstring": "Make sure that the gene panels exist in the database\nAlso check if the default panels are defined in gene panels\n\nArgs:\nadapter(MongoAdapter)\npanels(list(str)): A list with panel names\n\nReturns:\npanels_exists(bool)", "source": "codesearchnet"}
{"code": "def _ReformatMessageString(self, message_string):\n    \n    def _PlaceHolderSpecifierReplacer(match_object):\n      \n      expanded_groups = []\n      for group in match_object.groups():\n        try:\n          place_holder_number = int(group, 10) - 1\n          expanded_group = '{{{0:d}:s}}'.format(place_holder_number)\n        except ValueError:\n          expanded_group = group\n\n        expanded_groups.append(expanded_group)\n\n      return ''.join(expanded_groups)\n\n    if not message_string:\n      return None\n\n    message_string = self._WHITE_SPACE_SPECIFIER_RE.sub(r'', message_string)\n    message_string = self._TEXT_SPECIFIER_RE.sub(r'\\\\\\1', message_string)\n    message_string = self._CURLY_BRACKETS.sub(r'\\1\\1', message_string)\n    return self._PLACE_HOLDER_SPECIFIER_RE.sub(\n        _PlaceHolderSpecifierReplacer, message_string)", "docstring": "Reformats the message string.\n\nArgs:\nmessage_string (str): message string.\n\nReturns:\nstr: message string in Python format() (PEP 3101) style.", "source": "juraj-google-style"}
{"code": "def get(self, key):\n    \n    path = self.object_path(key)\n    return self._read_object(path)", "docstring": "Return the object named by key or None if it does not exist.\n\nArgs:\nkey: Key naming the object to retrieve\n\nReturns:\nobject or None", "source": "juraj-google-style"}
{"code": "def GetPublicCert(self):\n    cert_url = (self.google_api_url + 'publicKeys')\n    (resp, content) = self.http.request(cert_url)\n    if (resp.status == 200):\n        return simplejson.loads(content)\n    else:\n        raise errors.GitkitServerError(('Error response for cert url: %s' % content))", "docstring": "Download Gitkit public cert.\n\nReturns:\ndict of public certs.", "source": "codesearchnet"}
{"code": "def read32(self, offset):\n    if (not isinstance(offset, (int, long))):\n        raise TypeError('Invalid offset type, should be integer.')\n    offset = self._adjust_offset(offset)\n    self._validate_offset(offset, 4)\n    return struct.unpack('=L', self.mapping[offset:(offset + 4)])[0]", "docstring": "Read 32-bits from the specified `offset` in bytes, relative to the\nbase physical address of the MMIO region.\n\nArgs:\noffset (int, long): offset from base physical address, in bytes.\n\nReturns:\nint: 32-bit value read.\n\nRaises:\nTypeError: if `offset` type is invalid.\nValueError: if `offset` is out of bounds.", "source": "codesearchnet"}
{"code": "def get_c_function(self, name):\n    self.ensure_initialized()\n    return c_api_util.ScopedTFFunction(pywrap_tfe.TFE_ContextGetFunction(self._handle, name), name)", "docstring": "Get a C API TF_Function from the context.\n\nArgs:\nname: Name of the function to get.\n\nReturns:\nA ScopedTFFunction wrapping the C API TF_Function.", "source": "github-repos"}
{"code": "def one_hot_encoding(labels, num_classes, scope=None):\n  \n  with tf.name_scope(scope, 'OneHotEncoding', [labels]):\n    batch_size = labels.get_shape()[0]\n    indices = tf.expand_dims(tf.range(0, batch_size), 1)\n    labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)\n    concated = tf.concat(axis=1, values=[indices, labels])\n    onehot_labels = tf.sparse_to_dense(\n        concated, tf.stack([batch_size, num_classes]), 1.0, 0.0)\n    onehot_labels.set_shape([batch_size, num_classes])\n    return onehot_labels", "docstring": "Transform numeric labels into onehot_labels.\n\nArgs:\nlabels: [batch_size] target labels.\nnum_classes: total number of classes.\nscope: Optional scope for name_scope.\nReturns:\none hot encoding of the labels.", "source": "juraj-google-style"}
{"code": "def score(text, *score_functions):\n    \n    if not score_functions:\n        raise ValueError(\"score_functions must not be empty\")\n\n    return statistics.mean(func(text) for func in score_functions)", "docstring": "Score ``text`` using ``score_functions``.\n\nExamples:\n>>> score(\"abc\", function_a)\n>>> score(\"abc\", function_a, function_b)\n\nArgs:\ntext (str): The text to score\n*score_functions (variable length argument list): functions to score with\n\nReturns:\nArithmetic mean of scores\n\nRaises:\nValueError: If score_functions is empty", "source": "juraj-google-style"}
{"code": "def split_line(what, indent='', cols=79):\n    if (len(indent) > cols):\n        raise ValueError(\"The indent can't be longer than cols.\")\n    if (cols < 2):\n        raise ValueError(\"The cols can't be smaller than 2 (a char plus a possible '-')\")\n    what = (indent + what.lstrip())\n    if (len(what) <= cols):\n        (what, new_line) = ('', what)\n    else:\n        try:\n            closest_space = what[:cols].rindex(' ')\n        except ValueError:\n            closest_space = (- 1)\n        if (closest_space > len(indent)):\n            (what, new_line) = (what[closest_space:], what[:closest_space])\n        elif (what[cols] == ' '):\n            (what, new_line) = (what[cols:], what[:cols])\n        else:\n            (what, new_line) = (what[(cols - 1):], (what[:(cols - 1)] + '-'))\n    return (what.lstrip(), new_line.rstrip())", "docstring": "Split a line on the closest space, or break the last word with '-'.\n\nArgs:\nwhat(str): text to spli one line of.\nindent(str): will prepend this indent to the split line, taking it into\naccount in the column count.\ncols(int): maximum length of the split line.\n\nReturns:\ntuple(str, str): rest of the text and split line in that order.\n\nRaises:\nValueError: when the indent is greater than the indent, or the cols\nparam is too small", "source": "codesearchnet"}
{"code": "def add_user(username, password):\n    \n    assert _is_valid_username(username), \\\n            \"Invalid format of username '%s'!\" % username\n\n    assert username not in passwd_reader.load_users(), \\\n            \"User '%s' is already registered!\" % username\n\n    assert password, \"Password is reqired!\"\n\n    \n    home_dir = settings.DATA_PATH + username\n    sh.ftpasswd(\n        passwd=True,                    \n        name=username,\n        home=home_dir,                  \n        shell=\"/bin/false\",\n        uid=settings.PROFTPD_USERS_GID, \n        gid=settings.PROFTPD_USERS_GID,\n        stdin=True,                 \n        file=settings.LOGIN_FILE,\n        _in=password\n    )\n\n    \n    if not os.path.exists(home_dir):\n        os.makedirs(home_dir, 0775)\n\n    \n    \n    passwd_reader.set_permissions(home_dir, gid=settings.PROFTPD_USERS_GID)\n    passwd_reader.set_permissions(settings.LOGIN_FILE, mode=0600)\n\n    create_lock_file(home_dir + \"/\" + settings.LOCK_FILENAME)\n\n    reload_configuration()", "docstring": "Adds record to passwd-like file for ProFTPD, creates home directory and\nsets permissions for important files.\n\nArgs:\nusername (str): User's name.\npassword (str): User's password.", "source": "juraj-google-style"}
{"code": "def __init__(self, input_reader=None, output_writer=None):\n    \n    super(PstealTool, self).__init__(\n        input_reader=input_reader, output_writer=output_writer)\n    self._artifacts_registry = None\n    self._command_line_arguments = None\n    self._deduplicate_events = True\n    self._enable_sigsegv_handler = False\n    self._knowledge_base = knowledge_base.KnowledgeBase()\n    self._number_of_analysis_reports = 0\n    self._number_of_extraction_workers = 0\n    self._output_format = None\n    self._parsers_manager = parsers_manager.ParsersManager\n    self._preferred_language = 'en-US'\n    self._preferred_year = None\n    self._status_view_mode = status_view.StatusView.MODE_WINDOW\n    self._status_view = status_view.StatusView(self._output_writer, self.NAME)\n    self._time_slice = None\n    self._use_time_slicer = False\n\n    self.list_hashers = False\n    self.list_language_identifiers = False\n    self.list_output_modules = False\n    self.list_parsers_and_plugins = False\n    self.list_timezones = False", "docstring": "Initializes the CLI tool object.\n\nArgs:\ninput_reader (Optional[InputReader]): input reader, where None indicates\nthat the stdin input reader should be used.\noutput_writer (Optional[OutputWriter]): output writer, where None\nindicates that the stdout output writer should be used.", "source": "juraj-google-style"}
{"code": "def remove_padding(sequence):\n    length = sequence.pop('length')\n    sequence = tools.nested.map((lambda tensor: tensor[:length]), sequence)\n    return sequence", "docstring": "Selects the used frames of a sequence, up to its length.\n\nThis function does not expect a batch of sequences, but a single sequence.\nThe sequence must be a dict with `length` key, which will removed from the\nresult.\n\nArgs:\nsequence: Nested dict of tensors with time dimension.\n\nReturns:\nNested dict of tensors with padding elements and `length` key removed.", "source": "codesearchnet"}
{"code": "def __init__(self, *args, root_path: Optional[utils.KeyPath]=None, override_args: bool=False, ignore_extra_args: bool=False, **kwargs):\n    _ = kwargs.pop('allow_partial', None)\n    varargs = None\n    signature = self.__signature__\n    if len(args) > len(signature.args):\n        if signature.varargs:\n            varargs = list(args[len(signature.args):])\n            args = args[:len(signature.args)]\n        else:\n            arg_phrase = utils.auto_plural(len(signature.args), 'argument')\n            was_phrase = utils.auto_plural(len(args), 'was', 'were')\n            raise TypeError(f'{signature.id}() takes {len(signature.args)} positional {arg_phrase} but {len(args)} {was_phrase} given.')\n    bound_kwargs = dict()\n    for i, v in enumerate(args):\n        if pg_typing.MISSING_VALUE != v:\n            bound_kwargs[signature.args[i].name] = v\n    if varargs is not None:\n        bound_kwargs[signature.varargs.name] = varargs\n    for k, v in kwargs.items():\n        if pg_typing.MISSING_VALUE != v:\n            if k in bound_kwargs:\n                raise TypeError(f'{signature.id}() got multiple values for keyword argument {k!r}.')\n            bound_kwargs[k] = v\n    default_args = set()\n    non_default_args = set(bound_kwargs)\n    for arg_spec in signature.named_args:\n        if not arg_spec.value_spec.has_default:\n            continue\n        arg_name = arg_spec.name\n        if arg_name not in non_default_args:\n            default_args.add(arg_name)\n        elif bound_kwargs[arg_name] == arg_spec.value_spec.default:\n            default_args.add(arg_name)\n            non_default_args.discard(arg_name)\n    if signature.varargs and (not varargs):\n        default_args.add(signature.varargs.name)\n    super().__init__(allow_partial=True, root_path=root_path, **bound_kwargs)\n    self._non_default_args = non_default_args\n    self._default_args = default_args\n    self._specified_args = set(bound_kwargs)\n    self._override_args = override_args\n    self._ignore_extra_args = ignore_extra_args\n    self._tls = threading.local() if self.is_subclassed_functor else None", "docstring": "Constructor.\n\nArgs:\n*args: prebound positional arguments.\nroot_path: The symbolic path for current object.\noverride_args: If True, allows arguments provided during `__call__` to\noverride existing bound arguments.\nignore_extra_args: If True, unsupported arguments can be passed in\nduring `__call__` without using them. Otherwise, calling with\nunsupported arguments will raise error.\n**kwargs: prebound keyword arguments.\n\nRaises:\nKeyError: constructor got unexpected arguments.", "source": "github-repos"}
{"code": "def closest_leaf_to_root(self):\n    best = (None, float('inf'))\n    d = dict()\n    for node in self.traverse_preorder():\n        if (node.edge_length is None):\n            d[node] = 0\n        else:\n            d[node] = node.edge_length\n        if (not node.is_root()):\n            d[node] += d[node.parent]\n        if (node.is_leaf() and (d[node] < best[1])):\n            best = (node, d[node])\n    return best", "docstring": "Return the leaf that is closest to the root and the corresponding distance. Edges with no length will be considered to have a length of 0\n\nReturns:\n``tuple``: First value is the closest leaf to the root, and second value is the corresponding distance", "source": "codesearchnet"}
{"code": "def binding_site_mol2(self, residues, force_rerun=False):\n    log.debug('{}: running binding site isolation...'.format(self.id))\n    if (not self.receptorpdb_path):\n        return ValueError('Please run protein_only_and_noH')\n    prefix = ((self.id + '_') + 'binding_residues')\n    mol2maker = op.join(self.dock_dir, '{}_make_mol2.py'.format(prefix))\n    outfile = op.join(self.dock_dir, '{}.mol2'.format(prefix))\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n        with open(mol2maker, 'w') as mol2_maker:\n            mol2_maker.write('\n            mol2_maker.write('from chimera import runCommand\\n')\n            mol2_maker.write('runCommand(\"open {}\")\\n'.format(self.receptorpdb_path))\n            mol2_maker.write('runCommand(\"delete ~:{}\")\\n'.format(residues))\n            mol2_maker.write('runCommand(\"write format mol2 resnum 0 {}\")\\n'.format(outfile))\n            mol2_maker.write('runCommand(\"close all\")')\n        cmd = 'chimera --nogui {}'.format(mol2maker)\n        os.system(cmd)\n        os.remove(mol2maker)\n        os.remove('{}c'.format(mol2maker))\n    if ssbio.utils.is_non_zero_file(outfile):\n        self.bindingsite_path = outfile\n        log.debug('{}: successful binding site isolation'.format(self.bindingsite_path))\n    else:\n        log.critical('{}: binding_site_mol2 failed to run on receptor file'.format(self.receptorpdb_path))", "docstring": "Create mol2 of only binding site residues from the receptor\n\nThis function will take in a .pdb file (preferably the _receptor_noH.pdb file)\nand a string of residues (eg: '144,170,199') and delete all other residues in the\n.pdb file. It then saves the coordinates of the selected residues as a .mol2 file.\nThis is necessary for Chimera to select spheres within the radius of the binding\nsite.\n\nArgs:\nresidues (str): Comma separated string of residues (eg: '144,170,199')\nforce_rerun (bool): If method should be rerun even if output file exists", "source": "codesearchnet"}
{"code": "def assemble_data(data_dfs, concat_direction):\n    \n    if concat_direction == \"horiz\":\n        \n        all_data_df = pd.concat(data_dfs, axis=1)\n\n        \n        \n        n_cols = all_data_df.shape[1]\n        logger.debug(\"all_data_df.shape[1]: {}\".format(n_cols))\n        n_cols_cumulative = sum([df.shape[1] for df in data_dfs])\n        assert n_cols == n_cols_cumulative\n\n    elif concat_direction == \"vert\":\n\n        \n        all_data_df = pd.concat(data_dfs, axis=0)\n\n        \n        \n        n_rows = all_data_df.shape[0]\n        logger.debug(\"all_data_df.shape[0]: {}\".format(n_rows))\n        n_rows_cumulative = sum([df.shape[0] for df in data_dfs])\n        assert n_rows == n_rows_cumulative\n\n    \n    all_data_df_sorted = all_data_df.sort_index(axis=0).sort_index(axis=1)\n\n    return all_data_df_sorted", "docstring": "Assemble the data dfs together. Both indices are sorted.\n\nArgs:\ndata_dfs (list of pandas dfs)\nconcat_direction (string): 'horiz' or 'vert'\n\nReturns:\nall_data_df_sorted (pandas df)", "source": "juraj-google-style"}
{"code": "def optionally_with_plugs(phase, **subplugs):\n    if isinstance(phase, PhaseGroup):\n        return phase.with_plugs(**subplugs)\n    if isinstance(phase, collections.Iterable):\n        return [optionally_with_plugs(p, **subplugs) for p in phase]\n    if (not isinstance(phase, phase_descriptor.PhaseDescriptor)):\n        phase = phase_descriptor.PhaseDescriptor.wrap_or_copy(phase)\n    return phase.with_known_plugs(**subplugs)", "docstring": "Apply only the with_plugs that the phase knows.\n\nThis will determine the subset of plug overrides for only plugs the phase\nactually has.\n\nArgs:\nphase: phase_descriptor.PhaseDescriptor or PhaseGroup or callable, or\niterable of those, the phase or phase group (or iterable) to apply the\nplug changes to.\n**subplugs: mapping from plug name to derived plug class, the subplugs to\napply.\n\nRaises:\nopenhtf.plugs.InvalidPlugError: if a specified subplug class is not a valid\nreplacement for the specified plug name.\n\nReturns:\nphase_descriptor.PhaseDescriptor or PhaseGroup or iterable with the updated\nplugs.", "source": "codesearchnet"}
{"code": "def from_config(cls, config):\n    return cls(**config)", "docstring": "Instantiates an initializer from a configuration dictionary.\n\nExample:\n\n```python\ninitializer = RandomUniform(-1, 1)\nconfig = initializer.get_config()\ninitializer = RandomUniform.from_config(config)\n```\n\nArgs:\nconfig: A Python dictionary, the output of `get_config()`.\n\nReturns:\nAn `Initializer` instance.", "source": "github-repos"}
{"code": "def fetch(url: str, **kwargs) -> Selector:\n    kwargs.setdefault('headers', DEFAULT_HEADERS)\n    try:\n        res = requests.get(url, **kwargs)\n        res.raise_for_status()\n    except requests.RequestException as e:\n        print(e)\n    else:\n        html = res.text\n        tree = Selector(text=html)\n        return tree", "docstring": "Send HTTP request and parse it as a DOM tree.\n\nArgs:\nurl (str): The url of the site.\n\nReturns:\nSelector: allows you to select parts of HTML text using CSS or XPath expressions.", "source": "codesearchnet"}
{"code": "def read_submissions_from_directory(dirname, use_gpu):\n    result = []\n    for sub_dir in os.listdir(dirname):\n        submission_path = os.path.join(dirname, sub_dir)\n        try:\n            if (not os.path.isdir(submission_path)):\n                continue\n            if (not os.path.exists(os.path.join(submission_path, 'metadata.json'))):\n                continue\n            with open(os.path.join(submission_path, 'metadata.json')) as f:\n                metadata = json.load(f)\n            if (use_gpu and ('container_gpu' in metadata)):\n                container = metadata['container_gpu']\n            else:\n                container = metadata['container']\n            entry_point = metadata['entry_point']\n            submission_type = metadata['type']\n            if ((submission_type == 'attack') or (submission_type == 'targeted_attack')):\n                submission = Attack(submission_path, container, entry_point, use_gpu)\n            elif (submission_type == 'defense'):\n                submission = Defense(submission_path, container, entry_point, use_gpu)\n            else:\n                raise ValueError(('Invalid type of submission: %s' % submission_type))\n            result.append(submission)\n        except (IOError, KeyError, ValueError):\n            print('Failed to read submission from directory ', submission_path)\n    return result", "docstring": "Scans directory and read all submissions.\n\nArgs:\ndirname: directory to scan.\nuse_gpu: whether submissions should use GPU. This argument is\nused to pick proper Docker container for each submission and create\ninstance of Attack or Defense class.\n\nReturns:\nList with submissions (subclasses of Submission class).", "source": "codesearchnet"}
{"code": "def max_neighbor(self, in_lon, in_lat, radius=0.05):\n        \n        out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1]))\n        in_tree = cKDTree(np.vstack((in_lat.ravel(), in_lon.ravel())).T)\n        out_indices = np.indices(out_data.shape[1:])\n        out_rows = out_indices[0].ravel()\n        out_cols = out_indices[1].ravel()\n        for d in range(self.data.shape[0]):\n            nz_points = np.where(self.data[d] > 0)\n            if len(nz_points[0]) > 0:\n                nz_vals = self.data[d][nz_points]\n                nz_rank = np.argsort(nz_vals)\n                original_points = cKDTree(np.vstack((self.lat[nz_points[0][nz_rank]], self.lon[nz_points[1][nz_rank]])).T)\n                all_neighbors = original_points.query_ball_tree(in_tree, radius, p=2, eps=0)\n                for n, neighbors in enumerate(all_neighbors):\n                    if len(neighbors) > 0:\n                        out_data[d, out_rows[neighbors], out_cols[neighbors]] = nz_vals[nz_rank][n]\n        return out_data", "docstring": "Finds the largest value within a given radius of a point on the interpolated grid.\n\nArgs:\nin_lon: 2D array of longitude values\nin_lat: 2D array of latitude values\nradius: radius of influence for largest neighbor search in degrees\n\nReturns:\nArray of interpolated data", "source": "juraj-google-style"}
{"code": "def output(self, value):\n        \n        return super(Map, self).output(self.stream, value)", "docstring": "SPL output port assignment expression.\n\nArguments:\nvalue(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.\n\nReturns:\nExpression: Output assignment expression that is valid as a the context of this operator.", "source": "juraj-google-style"}
{"code": "def visible_devices(self):\n    devs = {}\n    for (device_id, adapters) in self._devices.items():\n        dev = None\n        max_signal = None\n        best_adapter = None\n        for (adapter_id, devinfo) in adapters.items():\n            connstring = 'adapter/{0}/{1}'.format(adapter_id, devinfo['connection_string'])\n            if (dev is None):\n                dev = copy.deepcopy(devinfo)\n                del dev['connection_string']\n            if ('adapters' not in dev):\n                dev['adapters'] = []\n                best_adapter = adapter_id\n            dev['adapters'].append((adapter_id, devinfo['signal_strength'], connstring))\n            if (max_signal is None):\n                max_signal = devinfo['signal_strength']\n            elif (devinfo['signal_strength'] > max_signal):\n                max_signal = devinfo['signal_strength']\n                best_adapter = adapter_id\n        if (dev is None):\n            continue\n        dev['connection_string'] = ('device/%x' % dev['uuid'])\n        dev['adapters'] = sorted(dev['adapters'], key=(lambda x: x[1]), reverse=True)\n        dev['best_adapter'] = best_adapter\n        dev['signal_strength'] = max_signal\n        devs[device_id] = dev\n    return devs", "docstring": "Unify all visible devices across all connected adapters\n\nReturns:\ndict: A dictionary mapping UUIDs to device information dictionaries", "source": "codesearchnet"}
{"code": "def are_values_same_type(first_val, second_val):\n  \n\n  first_val_type = type(first_val)\n  second_val_type = type(second_val)\n\n  \n  if isinstance(first_val, string_types) and isinstance(second_val, string_types):\n    return True\n\n  \n  if isinstance(first_val, bool) or isinstance(second_val, bool):\n    return first_val_type == second_val_type\n\n  \n  if isinstance(first_val, (numbers.Integral, float)) and isinstance(second_val, (numbers.Integral, float)):\n    return True\n\n  return False", "docstring": "Method to verify that both values belong to same type. Float and integer are\nconsidered as same type.\n\nArgs:\nfirst_val: Value to validate.\nsecond_Val: Value to validate.\n\nReturns:\nBoolean: True if both values belong to same type. Otherwise False.", "source": "juraj-google-style"}
{"code": "def LockScanNode(self, path_spec):\n    scan_node = self._scan_nodes.get(path_spec, None)\n    if (not scan_node):\n        raise KeyError('Scan node does not exist.')\n    self._locked_scan_nodes[path_spec] = scan_node", "docstring": "Marks a scan node as locked.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nRaises:\nKeyError: if the scan node does not exists.", "source": "codesearchnet"}
{"code": "async def leave(self, *, force: bool = False) -> bool:\n        \n\n        params = {\"force\": force}\n\n        await self.docker._query(\"swarm/leave\", method=\"POST\", params=params)\n\n        return True", "docstring": "Leave a swarm.\n\nArgs:\nforce: force to leave the swarm even if the node is a master", "source": "juraj-google-style"}
{"code": "def __trim_grave_accent(self, href):\n        \n\n        if href.startswith(\"`\"):\n            href = href[1:]\n\n        if href.endswith(\"`\"):\n            href = href[:-1]\n\n        return href", "docstring": "Trim grave accents manually (because BeautifulSoup doesn\"t support it).\n\nArgs:\nhref (str): The BeautifulSoup href value.\n\nReturns:\nstr: The BeautifulSoup href value without grave accents.", "source": "juraj-google-style"}
{"code": "def is_allowed(request, level, pid):\n    if is_trusted_subject(request):\n        return True\n    return d1_gmn.app.models.Permission.objects.filter(sciobj__pid__did=pid, subject__subject__in=request.all_subjects_set, level__gte=level).exists()", "docstring": "Check if one or more subjects are allowed to perform action level on object.\n\nIf a subject holds permissions for one action level on object, all lower action\nlevels are also allowed. Any included subject that is unknown to this MN is treated\nas a subject without permissions.\n\nReturns:\nbool\nTrue:\n- The active subjects include one or more subjects that:\n- are fully trusted DataONE infrastructure subjects, causing all rights to be\ngranted regardless of requested access level and SciObj\n- OR are in the object's ACL for the requested access level. The ACL contains\nthe subjects from the object's allow rules and the object's rightsHolder,\nwhich has all rights.\n- OR object is public, which always yields a match on the \"public\" symbolic\nsubject.\nFalse:\n- None of the active subjects are in the object's ACL for the requested\naccess level or for lower levels.\n- OR PID does not exist\n- OR access level is invalid", "source": "codesearchnet"}
{"code": "def build(self, var_list):\n    if self.built:\n        return\n    super().build(var_list)\n    self._m, self._u = self.add_optimizer_variables(var_list, ['momentum', 'norm'])", "docstring": "Initialize optimizer variables.\n\nAdamax optimizer has 2 types of variables: momentums (denoted as m),\nexponentially weighted infinity norm (denoted as u).\n\nArgs:\nvar_list: list of model variables to build Adamax variables on.", "source": "github-repos"}
{"code": "def status(self):\n    line = next(self.__line_gen()).rstrip()\n    parts = line.split(None, 1)\n    try:\n        (code, message) = (int(parts[0]), '')\n    except ValueError:\n        raise NNTPProtocolError(line)\n    if ((code < 100) or (code >= 600)):\n        raise NNTPProtocolError(line)\n    if (len(parts) > 1):\n        message = parts[1]\n    if (400 <= code <= 499):\n        raise NNTPTemporaryError(code, message)\n    if (500 <= code <= 599):\n        raise NNTPPermanentError(code, message)\n    return (code, message)", "docstring": "Reads a command response status.\n\nIf there is no response message then the returned status message will\nbe an empty string.\n\nRaises:\nNNTPError: If data is required to be read from the socket and fails.\nNNTPProtocolError: If the status line can't be parsed.\nNNTPTemporaryError: For status code 400-499\nNNTPPermanentError: For status code 500-599\n\nReturns:\nA tuple of status code (as an integer) and status message.", "source": "codesearchnet"}
{"code": "def get_random_password():\n    password = []\n    password.append(RandomInputHelper.get_random_value(4, [string.ascii_lowercase]))\n    password.append(RandomInputHelper.get_random_value(2, [string.digits]))\n    password.append(RandomInputHelper.get_random_value(2, ['$&*@!']))\n    password.append(RandomInputHelper.get_random_value(4, [string.ascii_uppercase]))\n    return ''.join(password)", "docstring": "Get a random password that complies with most of the requirements.\n\nNote:\nThis random password is not strong and not \"really\" random, and should only be\nused for testing purposes.\n\nReturns:\nstr: The random password.", "source": "codesearchnet"}
{"code": "def transfer(self, address, messages):\n        \n        if not isinstance(messages, list):\n            raise TypeError(\"Invalid messages type, should be list of I2C.Message.\")\n        elif len(messages) == 0:\n            raise ValueError(\"Invalid messages data, should be non-zero length.\")\n\n        \n        cmessages = (_CI2CMessage * len(messages))()\n        for i in range(len(messages)):\n            \n            if isinstance(messages[i].data, bytes):\n                data = messages[i].data\n            elif isinstance(messages[i].data, bytearray):\n                data = bytes(messages[i].data)\n            elif isinstance(messages[i].data, list):\n                data = bytes(bytearray(messages[i].data))\n\n            cmessages[i].addr = address\n            cmessages[i].flags = messages[i].flags | (I2C._I2C_M_RD if messages[i].read else 0)\n            cmessages[i].len = len(data)\n            cmessages[i].buf = ctypes.cast(ctypes.create_string_buffer(data, len(data)), ctypes.POINTER(ctypes.c_ubyte))\n\n        \n        i2c_xfer = _CI2CIocTransfer()\n        i2c_xfer.nmsgs = len(cmessages)\n        i2c_xfer.msgs = cmessages\n\n        \n        try:\n            fcntl.ioctl(self._fd, I2C._I2C_IOC_RDWR, i2c_xfer, False)\n        except IOError as e:\n            raise I2CError(e.errno, \"I2C transfer: \" + e.strerror)\n\n        \n        for i in range(len(messages)):\n            if messages[i].read:\n                data = [cmessages[i].buf[j] for j in range(cmessages[i].len)]\n                \n                if isinstance(messages[i].data, list):\n                    messages[i].data = data\n                elif isinstance(messages[i].data, bytearray):\n                    messages[i].data = bytearray(data)\n                elif isinstance(messages[i].data, bytes):\n                    messages[i].data = bytes(bytearray(data))", "docstring": "Transfer `messages` to the specified I2C `address`. Modifies the\n`messages` array with the results of any read transactions.\n\nArgs:\naddress (int): I2C address.\nmessages (list): list of I2C.Message messages.\n\nRaises:\nI2CError: if an I/O or OS error occurs.\nTypeError: if `messages` type is not list.\nValueError: if `messages` length is zero, or if message data is not valid bytes.", "source": "juraj-google-style"}
{"code": "def fit_transform(self, col):\n    if self.anonymize:\n        col = self.anonymize_column(col)\n    self._fit(col)\n    return self.transform(col)", "docstring": "Prepare the transformer and return processed data.\n\nArgs:\ncol(pandas.DataFrame): Data to transform.\n\nReturns:\npandas.DataFrame", "source": "codesearchnet"}
{"code": "def changeset_info(changeset):\n    keys = [tag.attrib.get('k') for tag in changeset.getchildren()]\n    keys += ['id', 'user', 'uid', 'bbox', 'created_at']\n    values = [tag.attrib.get('v') for tag in changeset.getchildren()]\n    values += [changeset.get('id'), changeset.get('user'), changeset.get('uid'), get_bounds(changeset), changeset.get('created_at')]\n    return dict(zip(keys, values))", "docstring": "Return a dictionary with id, user, user_id, bounds, date of creation\nand all the tags of the changeset.\n\nArgs:\nchangeset: the XML string of the changeset.", "source": "codesearchnet"}
{"code": "def clean_structure(self, out_suffix='_clean', outdir=None, force_rerun=False, remove_atom_alt=True, keep_atom_alt_id='A', remove_atom_hydrogen=True, add_atom_occ=True, remove_res_hetero=True, keep_chemicals=None, keep_res_only=None, add_chain_id_if_empty='X', keep_chains=None):\n    if (not self.structure_file):\n        log.error('{}: no structure file, unable to clean'.format(self.id))\n        return None\n    clean_pdb_file = ssbio.protein.structure.utils.cleanpdb.clean_pdb(self.structure_path, out_suffix=out_suffix, outdir=outdir, force_rerun=force_rerun, remove_atom_alt=remove_atom_alt, remove_atom_hydrogen=remove_atom_hydrogen, keep_atom_alt_id=keep_atom_alt_id, add_atom_occ=add_atom_occ, remove_res_hetero=remove_res_hetero, keep_chemicals=keep_chemicals, keep_res_only=keep_res_only, add_chain_id_if_empty=add_chain_id_if_empty, keep_chains=keep_chains)\n    return clean_pdb_file", "docstring": "Clean the structure file associated with this structure, and save it as a new file. Returns the file path.\n\nArgs:\nout_suffix (str): Suffix to append to original filename\noutdir (str): Path to output directory\nforce_rerun (bool): If structure should be re-cleaned if a clean file exists already\nremove_atom_alt (bool): Remove alternate positions\nkeep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep\nremove_atom_hydrogen (bool): Remove hydrogen atoms\nadd_atom_occ (bool): Add atom occupancy fields if not present\nremove_res_hetero (bool): Remove all HETATMs\nkeep_chemicals (str, list): If removing HETATMs, keep specified chemical names\nkeep_res_only (str, list): Keep ONLY specified resnames, deletes everything else!\nadd_chain_id_if_empty (str): Add a chain ID if not present\nkeep_chains (str, list): Keep only these chains\n\nReturns:\nstr: Path to cleaned PDB file", "source": "codesearchnet"}
{"code": "def _RawGlobPathSpecWithNumericSchema(file_system, parent_path_spec, segment_format, location, segment_number):\n    segment_files = []\n    while True:\n        segment_location = segment_format.format(location, segment_number)\n        kwargs = path_spec_factory.Factory.GetProperties(parent_path_spec)\n        kwargs['location'] = segment_location\n        if (parent_path_spec.parent is not None):\n            kwargs['parent'] = parent_path_spec.parent\n        segment_path_spec = path_spec_factory.Factory.NewPathSpec(parent_path_spec.type_indicator, **kwargs)\n        if (not file_system.FileEntryExistsByPathSpec(segment_path_spec)):\n            break\n        segment_files.append(segment_path_spec)\n        segment_number += 1\n    return segment_files", "docstring": "Globs for path specifications according to a numeric naming schema.\n\nArgs:\nfile_system (FileSystem): file system.\nparent_path_spec (PathSpec): parent path specification.\nsegment_format (str): naming schema of the segment file location.\nlocation (str): the base segment file location string.\nsegment_number (int): first segment number.\n\nReturns:\nlist[PathSpec]: path specifications that match the glob.", "source": "codesearchnet"}
{"code": "def _sample_action_fluent(self, name: str, dtype: tf.DType, size: Sequence[int], constraints: Dict[(str, Constraints)], default_value: tf.Tensor, prob: float) -> tf.Tensor:\n    shape = ([self.batch_size] + list(size))\n    if (dtype == tf.float32):\n        bounds = constraints.get(name)\n        if (bounds is None):\n            (low, high) = ((- self.MAX_REAL_VALUE), self.MAX_REAL_VALUE)\n            dist = tf.distributions.Uniform(low=low, high=high)\n            sampled_fluent = dist.sample(shape)\n        else:\n            (low, high) = bounds\n            batch = (((low is not None) and low.batch) or ((high is not None) and high.batch))\n            low = (tf.cast(low.tensor, tf.float32) if (low is not None) else (- self.MAX_REAL_VALUE))\n            high = (tf.cast(high.tensor, tf.float32) if (high is not None) else self.MAX_REAL_VALUE)\n            dist = tf.distributions.Uniform(low=low, high=high)\n            if batch:\n                sampled_fluent = dist.sample()\n            elif (isinstance(low, tf.Tensor) or isinstance(high, tf.Tensor)):\n                if ((low + high).shape.as_list() == list(size)):\n                    sampled_fluent = dist.sample([self.batch_size])\n                else:\n                    raise ValueError('bounds are not compatible with action fluent.')\n            else:\n                sampled_fluent = dist.sample(shape)\n    elif (dtype == tf.int32):\n        logits = ([1.0] * self.MAX_INT_VALUE)\n        dist = tf.distributions.Categorical(logits=logits, dtype=tf.int32)\n        sampled_fluent = dist.sample(shape)\n    elif (dtype == tf.bool):\n        probs = 0.5\n        dist = tf.distributions.Bernoulli(probs=probs, dtype=tf.bool)\n        sampled_fluent = dist.sample(shape)\n    select_default = tf.distributions.Bernoulli(prob, dtype=tf.bool).sample(self.batch_size)\n    action_fluent = tf.where(select_default, default_value, sampled_fluent)\n    return action_fluent", "docstring": "Samples the action fluent with given `name`, `dtype`, and `size`.\n\nWith probability `prob` it chooses the action fluent `default_value`,\nwith probability 1-`prob` it samples the fluent w.r.t. its `constraints`.\n\nArgs:\nname (str): The name of the action fluent.\ndtype (tf.DType): The data type of the action fluent.\nsize (Sequence[int]): The size and shape of the action fluent.\nconstraints (Dict[str, Tuple[Optional[TensorFluent], Optional[TensorFluent]]]): The bounds for each action fluent.\ndefault_value (tf.Tensor): The default value for the action fluent.\nprob (float): A probability measure.\n\nReturns:\ntf.Tensor: A tensor for sampling the action fluent.", "source": "codesearchnet"}
{"code": "def _color_level(str_, level):\n    (fore_color, back_color, styles) = _get_style_from_config(level)\n    return _color(str_, fore_color, back_color, styles)", "docstring": "Return the string wrapped with the appropriate styling for the message\nlevel.  The styling will be determined based on the rez configuration.\n\nArgs:\nstr_ (str): The string to be wrapped.\nlevel (str): The message level. Should be one of 'critical', 'error',\n'warning', 'info' or 'debug'.\n\nReturns:\nstr: The string styled with the appropriate escape sequences.", "source": "codesearchnet"}
{"code": "def _ParseCommon2003CachedEntry(self, value_data, cached_entry_offset):\n    data_type_map = self._GetDataTypeMap('appcompatcache_cached_entry_2003_common')\n    try:\n        cached_entry = self._ReadStructureFromByteStream(value_data[cached_entry_offset:], cached_entry_offset, data_type_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse cached entry value with error: {0!s}'.format(exception))\n    if (cached_entry.path_size > cached_entry.maximum_path_size):\n        raise errors.ParseError('Path size value out of bounds.')\n    path_end_of_string_size = (cached_entry.maximum_path_size - cached_entry.path_size)\n    if ((cached_entry.path_size == 0) or (path_end_of_string_size != 2)):\n        raise errors.ParseError('Unsupported path size values.')\n    return cached_entry", "docstring": "Parses the cached entry structure common for Windows 2003, Vista and 7.\n\nArgs:\nvalue_data (bytes): value data.\ncached_entry_offset (int): offset of the first cached entry data\nrelative to the start of the value data.\n\nReturns:\nappcompatcache_cached_entry_2003_common: cached entry structure common\nfor Windows 2003, Windows Vista and Windows 7.\n\nRaises:\nParseError: if the value data could not be parsed.", "source": "codesearchnet"}
{"code": "def __init__(self, project, sub_name, expected_msg=None, expected_msg_len=None, timeout=DEFAULT_TIMEOUT, with_attributes=False, strip_attributes=None, sleep_time=DEFAULT_SLEEP_TIME, max_messages_in_one_pull=DEFAULT_MAX_MESSAGES_IN_ONE_PULL, pull_timeout=DEFAULT_PULL_TIMEOUT):\n    if pubsub is None:\n        raise ImportError('PubSub dependencies are not installed.')\n    if not project:\n        raise ValueError('Invalid project %s.' % project)\n    if not sub_name:\n        raise ValueError('Invalid subscription %s.' % sub_name)\n    if not expected_msg_len and (not expected_msg):\n        raise ValueError('Required expected_msg: {} or expected_msg_len: {}.'.format(expected_msg, expected_msg_len))\n    if expected_msg and (not isinstance(expected_msg, list)):\n        raise ValueError('Invalid expected messages %s.' % expected_msg)\n    if expected_msg_len and (not isinstance(expected_msg_len, int)):\n        raise ValueError('Invalid expected messages %s.' % expected_msg_len)\n    self.project = project\n    self.sub_name = sub_name\n    self.expected_msg = expected_msg\n    self.expected_msg_len = expected_msg_len or len(self.expected_msg)\n    self.timeout = timeout\n    self.messages = None\n    self.messages_all_details = None\n    self.with_attributes = with_attributes\n    self.strip_attributes = strip_attributes\n    self.sleep_time = sleep_time\n    self.max_messages_in_one_pull = max_messages_in_one_pull\n    self.pull_timeout = pull_timeout", "docstring": "Initialize PubSubMessageMatcher object.\n\nArgs:\nproject: A name string of project.\nsub_name: A name string of subscription which is attached to output.\nexpected_msg: A string list that contains expected message data pulled\nfrom the subscription. See also: with_attributes.\nexpected_msg_len: Number of expected messages pulled from the\nsubscription.\ntimeout: Timeout in seconds to wait for all expected messages appears.\nwith_attributes: If True, will match against both message data and\nattributes. If True, expected_msg should be a list of ``PubsubMessage``\nobjects. Otherwise, it should be a list of ``bytes``.\nstrip_attributes: List of strings. If with_attributes==True, strip the\nattributes keyed by these values from incoming messages.\nIf a key is missing, will add an attribute with an error message as\nvalue to prevent a successful match.\nsleep_time: Time in seconds between which the pulls from pubsub are done.\nmax_messages_in_one_pull: Maximum number of messages pulled from pubsub\nat once.\npull_timeout: Time in seconds after which the pull from pubsub is repeated", "source": "github-repos"}
{"code": "def _WriteIfcfg(self, interfaces, logger):\n    \n    for interface in interfaces:\n      interface_config = os.path.join(\n          self.network_path, 'ifcfg-%s' % interface)\n      interface_content = [\n          '\n          'STARTMODE=hotplug',\n          'BOOTPROTO=dhcp',\n          'DHCLIENT_SET_DEFAULT_ROUTE=yes',\n          'DHCLIENT_ROUTE_PRIORITY=10%s00' % interface,\n          '',\n      ]\n      with open(interface_config, 'w') as interface_file:\n        interface_file.write('\\n'.join(interface_content))\n      logger.info('Created ifcfg file for interface %s.', interface)", "docstring": "Write ifcfg files for multi-NIC support.\n\nOverwrites the files. This allows us to update ifcfg-* in the future.\nDisable the network setup to override this behavior and customize the\nconfigurations.\n\nArgs:\ninterfaces: list of string, the output device names to enable.\nlogger: logger object, used to write to SysLog and serial port.", "source": "juraj-google-style"}
{"code": "def raster_binarization(given_value, rasterfilename):\n        \n        origin_raster = RasterUtilClass.read_raster(rasterfilename)\n        binary_raster = numpy.where(origin_raster.data == given_value, 1, 0)\n        return binary_raster", "docstring": "Make the raster into binarization.\n\nThe opening and closing are based on binary image. Therefore we need to\nmake the raster into binarization.\n\nArgs:\ngiven_value: The given value's pixels will be value in 1,\nother pixels will be value in 0.\nrasterfilename: The initial rasterfilena,e.\n\nReturns:\nbinary_raster: Raster after binarization.", "source": "juraj-google-style"}
{"code": "def CalculateForecastStats(matched, available, possible=None):\n  \n  if matched > 0:\n    available_percent = (float(available) / matched) * 100.\n  else:\n    available_percent = 0\n\n  if possible is not None:\n    if matched > 0:\n      possible_percent = (possible/float(matched)) * 100.\n    else:\n      possible_percent = 0\n  else:\n    possible_percent = None\n\n  return available_percent, possible_percent", "docstring": "Calculate forecast percentage stats.\n\nArgs:\nmatched: The number of matched impressions.\navailable: The number of available impressions.\npossible: The optional number of possible impressions.\n\nReturns:\nThe percentage of impressions that are available and possible.", "source": "juraj-google-style"}
{"code": "def check_validation_split_arg(validation_split, subset, shuffle, seed):\n    if validation_split and (not 0 < validation_split < 1):\n        raise ValueError(f'`validation_split` must be between 0 and 1, received: {validation_split}')\n    if (validation_split or subset) and (not (validation_split and subset)):\n        raise ValueError('If `subset` is set, `validation_split` must be set, and inversely.')\n    if subset not in ('training', 'validation', 'both', None):\n        raise ValueError(f'`subset` must be either \"training\", \"validation\" or \"both\", received: {subset}')\n    if validation_split and shuffle and (seed is None):\n        raise ValueError('If using `validation_split` and shuffling the data, you must provide a `seed` argument, to make sure that there is no overlap between the training and validation subset.')", "docstring": "Raise errors in case of invalid argument values.\n\nArgs:\nvalidation_split: float between 0 and 1, fraction of data to reserve for\nvalidation.\nsubset: One of `\"training\"`, `\"validation\"`, or `\"both\"`. Only used if\n`validation_split` is set.\nshuffle: Whether to shuffle the data. Either `True` or `False`.\nseed: random seed for shuffling and transformations.", "source": "github-repos"}
{"code": "def MultifactorSchedule(history=None, factors='constant * linear_warmup * rsqrt_decay', constant=0.1, warmup_steps=100, decay_factor=0.5, steps_per_decay=20000):\n    del history\n    cache_args = (factors, constant, warmup_steps)\n    if (cache_args in _memoized_multifactor_schedules):\n        return _memoized_multifactor_schedules[cache_args]\n    factors = [n.strip() for n in factors.split('*')]\n\n    def learning_rate(step):\n        'Step to learning rate function.'\n        ret = 1.0\n        for name in factors:\n            if (name == 'constant'):\n                ret *= constant\n            elif (name == 'linear_warmup'):\n                ret *= np.minimum(1.0, (step / warmup_steps))\n            elif (name == 'rsqrt_decay'):\n                ret /= np.sqrt(np.maximum(step, warmup_steps))\n            elif (name == 'decay_every'):\n                ret *= (decay_factor ** (step \n            else:\n                raise ValueError(('Unknown factor %s.' % name))\n        return ret\n    _memoized_multifactor_schedules[cache_args] = learning_rate\n    return learning_rate", "docstring": "Factor-based learning rate schedule.\n\nInterprets factors in the factors string which can consist of:\n* constant: interpreted as the constant value,\n* linear_warmup: interpreted as linear warmup until warmup_steps,\n* rsqrt_decay: divide by square root of max(step, warmup_steps)\n* decay_every: Every k steps decay the learning rate by decay_factor.\n\nArgs:\nhistory: the history of training and evaluation (History object).\nfactors: a string with factors separated by \"*\" that defines the schedule.\nconstant: float, the starting constant for the learning rate schedule.\nwarmup_steps: how many steps to warm up for in the warmup schedule.\ndecay_factor: The amount to decay the learning rate by.\nsteps_per_decay: How often to decay the learning rate.\n\nReturns:\na function learning_rate(step): float -> float, the step-dependent lr.", "source": "codesearchnet"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    cls = [self.cls_token_id]\n    sep = [self.sep_token_id]\n    return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A Lxmert sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def angle(self, deg=False):\n    if (self.dtype.str[1] != 'c'):\n        warnings.warn('angle() is intended for complex-valued timeseries', RuntimeWarning, 1)\n    return Timeseries(np.angle(self, deg=deg), self.tspan, self.labels)", "docstring": "Return the angle of the complex argument.\n\nArgs:\ndeg (bool, optional):\nReturn angle in degrees if True, radians if False (default).\n\nReturns:\nangle (Timeseries):\nThe counterclockwise angle from the positive real axis on\nthe complex plane, with dtype as numpy.float64.", "source": "codesearchnet"}
{"code": "def load_plugins(config, plugin_kwargs):\n    installed_plugins = _gather_installed_plugins()\n    metrics_plugin = _get_metrics_plugin(config, installed_plugins)\n    if metrics_plugin:\n        plugin_kwargs['metrics'] = metrics_plugin\n    active_plugins = _get_activated_plugins(config, installed_plugins)\n    if (not active_plugins):\n        return ([], [], [], None)\n    plugin_namespaces = _get_plugin_config_keys(active_plugins)\n    plugin_configs = _load_plugin_configs(plugin_namespaces, config)\n    (plugin_names, plugins, errors) = _init_plugins(active_plugins, installed_plugins, plugin_configs, plugin_kwargs)\n    return (plugin_names, plugins, errors, plugin_kwargs)", "docstring": "Discover and instantiate plugins.\n\nArgs:\nconfig (dict): loaded configuration for the Gordon service.\nplugin_kwargs (dict): keyword arguments to give to plugins\nduring instantiation.\nReturns:\nTuple of 3 lists: list of names of plugins, list of\ninstantiated plugin objects, and any errors encountered while\nloading/instantiating plugins. A tuple of three empty lists is\nreturned if there are no plugins found or activated in gordon\nconfig.", "source": "codesearchnet"}
{"code": "def _build(self, input_, prev_state):\n    self._in_to_hidden_linear = basic.Linear(self._hidden_size, name='in_to_hidden', initializers=self._initializers.get('in_to_hidden'), partitioners=self._partitioners.get('in_to_hidden'), regularizers=self._regularizers.get('in_to_hidden'))\n    self._hidden_to_hidden_linear = basic.Linear(self._hidden_size, name='hidden_to_hidden', initializers=self._initializers.get('hidden_to_hidden'), partitioners=self._partitioners.get('hidden_to_hidden'), regularizers=self._regularizers.get('hidden_to_hidden'))\n    in_to_hidden = self._in_to_hidden_linear(input_)\n    hidden_to_hidden = self._hidden_to_hidden_linear(prev_state)\n    output = self._activation((in_to_hidden + hidden_to_hidden))\n    return (output, output)", "docstring": "Connects the VanillaRNN module into the graph.\n\nIf this is not the first time the module has been connected to the graph,\nthe Tensors provided as input_ and state must have the same final\ndimension, in order for the existing variables to be the correct size for\ntheir corresponding multiplications. The batch size may differ for each\nconnection.\n\nArgs:\ninput_: a 2D Tensor of size [batch_size, input_size].\nprev_state: a 2D Tensor of size [batch_size, hidden_size].\n\nReturns:\noutput: a 2D Tensor of size [batch_size, hidden_size].\nnext_state: a Tensor of size [batch_size, hidden_size].\n\nRaises:\nValueError: if connecting the module into the graph any time after the\nfirst time, and the inferred size of the inputs does not match previous\ninvocations.", "source": "codesearchnet"}
{"code": "def CopyNoFail(src, root=None):\n    if (root is None):\n        root = str(CFG['tmp_dir'])\n    src_path = (local.path(root) / src)\n    if src_path.exists():\n        Copy(src_path, '.')\n        return True\n    return False", "docstring": "Just copy fName into the current working directory, if it exists.\n\nNo action is executed, if fName does not exist. No Hash is checked.\n\nArgs:\nsrc: The filename we want to copy to '.'.\nroot: The optional source dir we should pull fName from. Defaults\nto benchbuild.settings.CFG[\"tmpdir\"].\n\nReturns:\nTrue, if we copied something.", "source": "codesearchnet"}
{"code": "def set_icon_data(self, base64_data, mimetype=\"image/png\", rel=\"icon\"):\n        \n        self.add_child(\"favicon\", '<link rel=\"%s\" href=\"%s\" type=\"%s\" />'%(rel, base64_data, mimetype))", "docstring": "Allows to define an icon for the App\n\nArgs:\nbase64_data (str): base64 encoded image data  (ie. \"data:image/x-icon;base64,AAABAAEAEBA....\")\nmimetype (str): mimetype of the image (\"image/png\" or \"image/x-icon\"...)\nrel (str): leave it unchanged (standard \"icon\")", "source": "juraj-google-style"}
{"code": "def _OpenFileObject(self, path_spec):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    parent_path_spec = path_spec.parent\n\n    parent_location = getattr(parent_path_spec, 'location', None)\n    if not parent_location:\n      raise errors.PathSpecError(\n          'Unsupported parent path specification without location.')\n\n    \n    \n\n    file_system = resolver.Resolver.OpenFileSystem(\n        parent_path_spec, resolver_context=self._resolver_context)\n\n    file_object = resolver.Resolver.OpenFileObject(\n        parent_path_spec, resolver_context=self._resolver_context)\n\n    vmdk_handle = pyvmdk.handle()\n    vmdk_handle.open_file_object(file_object)\n\n    parent_location_path_segments = file_system.SplitPath(parent_location)\n\n    extent_data_files = []\n    for extent_descriptor in iter(vmdk_handle.extent_descriptors):\n      extent_data_filename = extent_descriptor.filename\n\n      _, path_separator, filename = extent_data_filename.rpartition('/')\n      if not path_separator:\n        _, path_separator, filename = extent_data_filename.rpartition('\\\\')\n\n      if not path_separator:\n        filename = extent_data_filename\n\n      \n      \n      \n      \n      \n      parent_location_path_segments.pop()\n      parent_location_path_segments.append(filename)\n      extent_data_file_location = file_system.JoinPath(\n          parent_location_path_segments)\n\n      \n      \n      \n      kwargs = path_spec_factory.Factory.GetProperties(parent_path_spec)\n\n      kwargs['location'] = extent_data_file_location\n      if parent_path_spec.parent is not None:\n        kwargs['parent'] = parent_path_spec.parent\n\n      extent_data_file_path_spec = path_spec_factory.Factory.NewPathSpec(\n          parent_path_spec.type_indicator, **kwargs)\n\n      if not file_system.FileEntryExistsByPathSpec(extent_data_file_path_spec):\n        break\n\n      extent_data_files.append(extent_data_file_path_spec)\n\n    if len(extent_data_files) != vmdk_handle.number_of_extents:\n      raise IOError('Unable to locate all extent data files.')\n\n    file_objects = []\n    for extent_data_file_path_spec in extent_data_files:\n      file_object = resolver.Resolver.OpenFileObject(\n          extent_data_file_path_spec, resolver_context=self._resolver_context)\n      file_objects.append(file_object)\n\n    \n    vmdk_handle.open_extent_data_files_file_objects(file_objects)\n\n    return vmdk_handle", "docstring": "Opens the file-like object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\npyvmdk.handle: a file-like object.\n\nRaises:\nIOError: if the file-like object could not be opened.\nOSError: if the file-like object could not be opened.\nPathSpecError: if the path specification is incorrect.", "source": "juraj-google-style"}
{"code": "def _FractionalMaxPoolGrad(op: ops.Operation, grad_0, unused_grad_1, unused_grad_2):\n    return gen_nn_ops.fractional_max_pool_grad(op.inputs[0], op.outputs[0], grad_0, op.outputs[1], op.outputs[2], op.get_attr('overlapping'))", "docstring": "Returns gradient for FractionalMaxPool.\n\nSince FractionalMaxPool has three outputs, there are three gradients passed in\nfor each of the outputs. Only the first one is useful, the other two gradients\nare empty.\n\nArgs:\nop: The FractionalMaxPoolOp.\ngrad_0: Gradient with respect to op.outputs[0]\nunused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.\nunused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.\n\nReturns:\nInput backprop for FractionalMaxPool op.", "source": "github-repos"}
{"code": "def send_magic_packet(*macs, **kwargs):\n    packets = []\n    ip = kwargs.pop('ip_address', BROADCAST_IP)\n    port = kwargs.pop('port', DEFAULT_PORT)\n    for k in kwargs:\n        raise TypeError('send_magic_packet() got an unexpected keyword argument {!r}'.format(k))\n    for mac in macs:\n        packet = create_magic_packet(mac)\n        packets.append(packet)\n    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n    sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n    sock.connect((ip, port))\n    for packet in packets:\n        sock.send(packet)\n    sock.close()", "docstring": "Wake up computers having any of the given mac addresses.\n\nWake on lan must be enabled on the host device.\n\nArgs:\nmacs (str): One or more macaddresses of machines to wake.\n\nKeyword Args:\nip_address (str): the ip address of the host to send the magic packet\nto (default \"255.255.255.255\")\nport (int): the port of the host to send the magic packet to\n(default 9)", "source": "codesearchnet"}
{"code": "def index_bgen(fn, legacy=False):\n    \n    logger.info(\"Indexing {} (BGEN) using 'bgenix'{}\".format(\n        fn, \" (legacy mode)\" if legacy else \"\",\n    ))\n    command = [\"bgenix\", \"-g\", fn, \"-index\"]\n    if legacy:\n        command.append(\"-with-rowid\")\n    try:\n        logger.info(\"Executing '{}'\".format(\" \".join(command)))\n        subprocess.Popen(command).communicate()\n    except FileNotFoundError:\n        logger.error(\"Cannot find 'bgenix', impossible to index {}\".format(fn))\n        sys.exit(1)\n    logger.info(\"Index generated\")", "docstring": "Indexes a BGEN file.\n\nArgs:\nfn (str): The name of the BGEN file.", "source": "juraj-google-style"}
{"code": "def _GetAPFSVolumeIdentifiers(self, scan_node):\n    \n    if not scan_node or not scan_node.path_spec:\n      raise errors.SourceScannerError('Invalid scan node.')\n\n    volume_system = apfs_volume_system.APFSVolumeSystem()\n    volume_system.Open(scan_node.path_spec)\n\n    volume_identifiers = self._source_scanner.GetVolumeIdentifiers(\n        volume_system)\n    if not volume_identifiers:\n      return []\n\n    \n    if self._volumes:\n      if self._volumes == 'all':\n        volumes = range(1, volume_system.number_of_volumes + 1)\n      else:\n        volumes = self._volumes\n\n      selected_volume_identifiers = self._NormalizedVolumeIdentifiers(\n          volume_system, volumes, prefix='apfs')\n\n      if not set(selected_volume_identifiers).difference(volume_identifiers):\n        return selected_volume_identifiers\n\n    if len(volume_identifiers) > 1:\n      try:\n        volume_identifiers = self._PromptUserForAPFSVolumeIdentifiers(\n            volume_system, volume_identifiers)\n      except KeyboardInterrupt:\n        raise errors.UserAbort('File system scan aborted.')\n\n    return self._NormalizedVolumeIdentifiers(\n        volume_system, volume_identifiers, prefix='apfs')", "docstring": "Determines the APFS volume identifiers.\n\nArgs:\nscan_node (dfvfs.SourceScanNode): scan node.\n\nReturns:\nlist[str]: APFS volume identifiers.\n\nRaises:\nSourceScannerError: if the format of or within the source is not\nsupported or the the scan node is invalid.\nUserAbort: if the user requested to abort.", "source": "juraj-google-style"}
{"code": "class LayoutLMv3FastImageProcessorKwargs(DefaultFastImageProcessorKwargs):\n    apply_ocr: Optional[bool]\n    ocr_lang: Optional[str]\n    tesseract_config: Optional[str]", "docstring": "Args:\napply_ocr (`bool`, *optional*, defaults to `True`):\nWhether to apply the Tesseract OCR engine to get words + normalized bounding boxes. Can be overridden by\nthe `apply_ocr` parameter in the `preprocess` method.\nocr_lang (`str`, *optional*):\nThe language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is\nused. Can be overridden by the `ocr_lang` parameter in the `preprocess` method.\ntesseract_config (`str`, *optional*):\nAny additional custom configuration flags that are forwarded to the `config` parameter when calling\nTesseract. For example: '--psm 6'. Can be overridden by the `tesseract_config` parameter in the\n`preprocess` method.", "source": "github-repos"}
{"code": "def tpu_devices(devices=None):\n    return find_devices('TPU', devices)", "docstring": "Gets TPU devices out of `devices`.\n\nArgs:\ndevices: A device list (as a list of strings). If None, the list of all\navailable devices will be used for it.\n\nReturns:\nThose in `devices` that are TPUs.", "source": "github-repos"}
{"code": "def _FormatArgToken(self, token_data):\n    return {'string': token_data.argument_value.rstrip('\\x00'), 'num_arg': token_data.argument_index, 'is': token_data.argument_name}", "docstring": "Formats an argument token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_arg32|bsm_token_data_arg64): AUT_ARG32 or\nAUT_ARG64 token data.\n\nReturns:\ndict[str, str]: token values.", "source": "codesearchnet"}
{"code": "def _get_args(typ):\n    try:\n        if typ.__args__ is None:\n            return ()\n        return typ.__args__\n    except AttributeError:\n        if isinstance(typ, typing.TypeVar):\n            return (typ.__name__,)\n        return ()", "docstring": "Returns a list of arguments to the given type.\n\nArgs:\ntyp: A typing module typing type.\n\nReturns:\nA tuple of args.", "source": "github-repos"}
{"code": "def headers_present(self, headers):\n    headers = {name: re.compile('(.*)') for name in headers}\n    self.add_matcher(matcher('HeadersMatcher', headers))", "docstring": "Defines a list of headers that must be present in the\noutgoing request in order to satisfy the matcher, no matter what value\nthe headers hosts.\n\nHeader keys are case insensitive.\n\nArguments:\nheaders (list|tuple): header keys to match.\n\nReturns:\nself: current Mock instance.\n\nExample::\n\n(pook.get('server.com/api')\n.headers_present(['content-type', 'Authorization']))", "source": "codesearchnet"}
{"code": "def date_proc(func):\n\t\n\t@wraps(func)\n\tdef wrapped(request, *args, **kwargs):\n\t\tif 'date' in request.GET and request.GET['date'] == '':\n\t\t\traise Http404(\"api does not exist\")\n\t\telif 'date' not in request.GET:\n\t\t\tdate = datetime.today()\n\t\t\treturn func(request, date)\n\t\telse:\n\t\t\tdate = tuple(int(intValue) for intValue in request.GET['date'].split('-'))\n\t\t\tif len(date) == 3:\n\t\t\t\tdate = datetime(*date)\n\t\t\telif len(date) == 2:\n\t\t\t\tdate = datetime(*date, day = 1)\n\t\t\telse:\n\t\t\t\tdate = datetime(*date, month = 1, day = 1)\n\t\t\treturn func(request, date)\n\treturn wrapped", "docstring": "An decorator checking whether date parameter is passing in or not. If not, default date value is all PTT data.\nElse, return PTT data with right date.\nArgs:\nfunc: function you want to decorate.\nrequest: WSGI request parameter getten from django.\n\nReturns:\ndate:\na datetime variable, you can only give year, year + month or year + month + day, three type.\nThe missing part would be assigned default value 1 (for month is Jan, for day is 1).", "source": "juraj-google-style"}
{"code": "def to_base_10_int(n, input_base):\n    \n    return sum(c * input_base ** i for i, c in enumerate(n[::-1]))", "docstring": "Converts an integer in any base into it's decimal representation.\n\nArgs:\nn - An integer represented as a tuple of digits in the specified base.\ninput_base - the base of the input number.\n\nReturns:\ninteger converted into base 10.\n\nExample:\n>>> to_base_10_int((8,1), 16)\n129", "source": "juraj-google-style"}
{"code": "def min(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent':\n    return cls._binary_op(x, y, tf.minimum, tf.float32)", "docstring": "Returns a TensorFluent for the minimum function.\n\nArgs:\nx: The first operand.\ny: The second operand.\n\nReturns:\nA TensorFluent wrapping the minimum function.", "source": "codesearchnet"}
{"code": "def _skip_op(self, op_id, op, ops_in_exec_path, report_handler):\n    if TensorTracer.while_loop_op(op):\n        report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_WHILELOOP_OP))\n        return True\n    if TensorTracer.control_flow_op(op):\n        report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_CONTROLFLOW_OP))\n        return True\n    if TensorTracer.unsafe_op(op):\n        report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_UNSAFE_OP))\n        return True\n    if TensorTracer.device_mismatch(self._tt_config.device_type, op):\n        report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_DEVICE_MISMATCH))\n        return True\n    if op not in ops_in_exec_path:\n        report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_NOT_EXECUTED))\n        return True\n    if self._is_in_control_flow(op) or not self._is_in_outmost_while_loop(op):\n        if not self._should_trace_in_control_flow():\n            report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_IN_CONTROL_FLOW))\n            return True\n    if self._is_user_included_op(op):\n        report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_USER_INCLUDED))\n        if tensor_tracer_flags.TT_CHECK_FILTER.value:\n            logging.info('USER_INCLUDED op %s', op.name)\n        return False\n    if not self._inside_op_range(op_id):\n        report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_OUTSIDE_OP_RANGE))\n        return True\n    if not self._is_interesting_op(op):\n        report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_LESS_INTERESTING_OP))\n        return True\n    if self._is_user_excluded_op(op):\n        report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_USER_EXCLUDED))\n        if tensor_tracer_flags.TT_CHECK_FILTER.value:\n            logging.info('USER_EXCLUDED op %s', op.name)\n        return True\n    return False", "docstring": "Returns True if we should not trace Op.\n\nArgs:\nop_id: Topological index of the op.\nop: tf.Operation\nops_in_exec_path: Set of operations that are in the execution path.\nreport_handler: An instance of tensor_tracer_report.TTReportHandle.\nReturns:\nTrue if the op should not be traced, false otherwise.", "source": "github-repos"}
{"code": "def query(self, rank):\n        \n        self._flush()\n\n        current = self._head\n        if not current:\n          return 0\n\n        mid_rank = math.floor(rank * self._observations)\n        max_rank = mid_rank + math.floor(\n            self._invariant(mid_rank, self._observations) / 2)\n\n        rank = 0.0\n        while current._successor:\n            rank += current._rank\n            if rank + current._successor._rank + current._successor._delta > max_rank:\n                return current._value\n\n            current = current._successor\n\n        return current._value", "docstring": "Retrieves the value estimate for the requested quantile rank.\n\nThe requested quantile rank must be registered in the estimator's\ninvariants a priori!\n\nArgs:\nrank: A floating point quantile rank along the interval [0, 1].\n\nReturns:\nA numeric value for the quantile estimate.", "source": "juraj-google-style"}
{"code": "def EscapeWildcards(string):\n    precondition.AssertType(string, Text)\n    return string.replace('%', '\\\\%').replace('_', '\\\\_')", "docstring": "Escapes wildcard characters for strings intended to be used with `LIKE`.\n\nDatabases don't automatically escape wildcard characters ('%', '_'), so any\nnon-literal string that is passed to `LIKE` and is expected to match literally\nhas to be manually escaped.\n\nArgs:\nstring: A string to escape.\n\nReturns:\nAn escaped string.", "source": "codesearchnet"}
{"code": "def _estimate_step_duration(self, current, now):\n    if current:\n        if self._time_after_first_step is not None and current > 1:\n            time_per_unit = (now - self._time_after_first_step) / (current - 1)\n        else:\n            time_per_unit = (now - self._start) / current\n        if current == 1:\n            self._time_after_first_step = now\n        return time_per_unit\n    else:\n        return 0", "docstring": "Estimate the duration of a single step.\n\nGiven the step number `current` and the corresponding time `now` this\nfunction returns an estimate for how long a single step takes. If this\nis called before one step has been completed (i.e. `current == 0`) then\nzero is given as an estimate. The duration estimate ignores the duration\nof the (assumed to be non-representative) first step for estimates when\nmore steps are available (i.e. `current>1`).\n\nArgs:\ncurrent: Index of current step.\nnow: The current time.\n\nReturns: Estimate of the duration of a single step.", "source": "github-repos"}
{"code": "def _get_input_to_checker_function(self, flag_values):\n    return dict(([key, flag_values[key].value] for key in self.flag_names))", "docstring": "Given flag values, returns the input to be given to checker.\n\nArgs:\nflag_values: flags.FlagValues, the FlagValues instance to get flags from.\nReturns:\ndict, with keys() being self.lag_names, and value for each key\nbeing the value of the corresponding flag (string, boolean, etc).", "source": "codesearchnet"}
{"code": "def open_pspsfile(self, ecut=20, pawecutdg=None):\n        \n        from pymatgen.io.abinit.tasks import AbinitTask\n        from abipy.core.structure import Structure\n        from abipy.abio.factories import gs_input\n        from abipy.electrons.psps import PspsFile\n\n        \n        lattice = 10 * np.eye(3)\n        structure = Structure(lattice, [self.element], coords=[[0, 0, 0]])\n\n        if self.ispaw and pawecutdg is None: pawecutdg = ecut * 4\n        inp = gs_input(structure, pseudos=[self], ecut=ecut, pawecutdg=pawecutdg,\n                       spin_mode=\"unpolarized\", kppa=1)\n        \n        inp[\"prtpsps\"] = -1\n\n        \n        task = AbinitTask.temp_shell_task(inp)\n        task.start_and_wait()\n\n        filepath = task.outdir.has_abiext(\"_PSPS.nc\")\n        if not filepath:\n            logger.critical(\"Cannot find PSPS.nc file in %s\" % task.outdir)\n            return None\n\n        \n        try:\n            return PspsFile(filepath)\n        except Exception as exc:\n            logger.critical(\"Exception while reading PSPS file at %s:\\n%s\" % (filepath, str(exc)))\n            return None", "docstring": "Calls Abinit to compute the internal tables for the application of the\npseudopotential part. Returns :class:`PspsFile` object providing methods\nto plot and analyze the data or None if file is not found or it's not readable.\n\nArgs:\necut: Cutoff energy in Hartree.\npawecutdg: Cutoff energy for the PAW double grid.", "source": "juraj-google-style"}
{"code": "def get_relevant_lyric_tokens(full_tokens, max_n_lyric_tokens, total_length, offset, duration):\n    full_tokens = full_tokens[0]\n    if len(full_tokens) < max_n_lyric_tokens:\n        tokens = torch.cat([torch.zeros(max_n_lyric_tokens - len(full_tokens), dtype=torch.long).to(full_tokens.device), full_tokens])\n        indices = [-1] * (max_n_lyric_tokens - len(full_tokens)) + list(range(0, len(full_tokens)))\n    else:\n        midpoint = int(len(full_tokens) * (offset + duration / 2.0) / total_length)\n        midpoint = min(max(midpoint, max_n_lyric_tokens \n        tokens = full_tokens[midpoint - max_n_lyric_tokens \n        indices = list(range(midpoint - max_n_lyric_tokens \n    return (tokens.unsqueeze(dim=0), indices)", "docstring": "Extract only the relevant tokens based on the character position. A total of `max_n_lyric_tokens` tokens will be\nreturned. If the provided token sequence is smaller, it will be padded, otherwise, only characters ranging from the\nmidpoint - `max_n_lyric_tokens//2` to the midpoint + `max_n_lyric_tokens//2` will be returned. This *focuses* on\nthe most relevant tokens (in time) for the sequence.\n\nArgs:\nfull_tokens (`List[int]`):\nList containing the token ids of the entire lyrics.\ntotal_length (`int`):\nTotal expected length of the music (not all of it is generated, see duration), in samples.\noffset (`int`):\nStarting sample in the music. If the offset is greater than 0, the lyrics will be shifted take that into\naccount\nduration (`int`):\nExpected duration of the generated music, in samples. The duration has to be smaller than the total length,\nwhich represent the overall length of the signal,", "source": "github-repos"}
{"code": "def get(self, config_id):\n    return self.prepare_model(self.client.api.inspect_config(config_id))", "docstring": "Get a config.\n\nArgs:\nconfig_id (str): Config ID.\n\nReturns:\n(:py:class:`Config`): The config.\n\nRaises:\n:py:class:`docker.errors.NotFound`\nIf the config does not exist.\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def whois_nameservers(self, nameservers):\n        \n        api_name = 'opendns-whois-nameservers'\n        fmt_url_path = u'whois/nameservers/{0}'\n        return self._multi_get(api_name, fmt_url_path, nameservers)", "docstring": "Calls WHOIS Nameserver end point\n\nArgs:\nemails: An enumerable of nameservers\nReturns:\nA dict of {nameserver: domain_result}", "source": "juraj-google-style"}
{"code": "def _add_example(self, example):\n        \n        if self.has_enumerated_subtypes():\n            self._add_example_enumerated_subtypes_helper(example)\n        else:\n            self._add_example_helper(example)", "docstring": "Adds a \"raw example\" for this type.\n\nThis does basic sanity checking to ensure that the example is valid\n(required fields specified, no unknown fields, correct types, ...).\n\nThe example is not available via :meth:`get_examples` until\n:meth:`_compute_examples` is called.\n\nArgs:\nexample (stone.frontend.ast.AstExample): An example of this type.", "source": "juraj-google-style"}
{"code": "def __init__(self, feature_dict):\n    \n    super(FeaturesDict, self).__init__()\n    self._feature_dict = {k: to_feature(v) for k, v in feature_dict.items()}", "docstring": "Initialize the features.\n\nArgs:\nfeature_dict (dict): Dictionary containing the feature connectors of a\nexample. The keys should correspond to the data dict as returned by\ntf.data.Dataset(). Types (tf.int32,...) and dicts will automatically\nbe converted into FeatureConnector.\n\nRaises:\nValueError: If one of the given features is not recognized", "source": "juraj-google-style"}
{"code": "def find_all(pcoll, regex, group=0, outputEmpty=True):\n    regex = Regex._regex_compile(regex)\n\n    def _process(element):\n        matches = regex.finditer(element)\n        if group == Regex.ALL:\n            yield [(m.group(), m.groups()[0]) for m in matches if outputEmpty or m.groups()[0]]\n        else:\n            yield [m.group(group) for m in matches if outputEmpty or m.group(group)]\n    return pcoll | FlatMap(_process)", "docstring": "Returns the matches if a portion of the line matches the Regex. By default,\nlist of group 0 will return with empty items. To get all groups, pass the\n`Regex.ALL` flag in the `group` parameter which returns all the groups in\nthe tuple format.\n\nArgs:\nregex: the regular expression string or (re.compile) pattern.\ngroup: (optional) name of the group, it can be integer or a string value.\noutputEmpty: (optional) Should empty be output. True to output empties\nand false if not.", "source": "github-repos"}
{"code": "def _ParseAndValidateRecord(self, parser_mediator, text_file_object):\n    try:\n        title = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)\n        url = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)\n        timestamp = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)\n        popularity_index = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)\n    except UnicodeDecodeError:\n        return False\n    if ((len(title) == self._MAXIMUM_LINE_SIZE) and (title[(- 1)] != '\\n')):\n        return False\n    if ((len(url) == self._MAXIMUM_LINE_SIZE) and (url[(- 1)] != '\\n')):\n        return False\n    if ((len(timestamp) == self._MAXIMUM_LINE_SIZE) and (timestamp[(- 1)] != '\\n')):\n        return False\n    if ((len(popularity_index) == self._MAXIMUM_LINE_SIZE) and (popularity_index[(- 1)] != '\\n')):\n        return False\n    title = title.strip()\n    url = url.strip()\n    timestamp = timestamp.strip()\n    popularity_index = popularity_index.strip()\n    if ((not title) or (not url) or (not timestamp) or (not popularity_index)):\n        return False\n    event_data = OperaGlobalHistoryEventData()\n    if (not self._IsValidUrl(url)):\n        return False\n    event_data.url = url\n    if (title != url):\n        event_data.title = title\n    try:\n        event_data.popularity_index = int(popularity_index, 10)\n        timestamp = int(timestamp, 10)\n    except ValueError:\n        return False\n    if (event_data.popularity_index < 0):\n        event_data.description = 'First and Only Visit'\n    else:\n        event_data.description = 'Last Visit'\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n    return True", "docstring": "Parses and validates an Opera global history record.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ntext_file_object (dfvfs.TextFile): text file.\n\nReturns:\nbool: True if the record was successfully parsed.", "source": "codesearchnet"}
{"code": "def unzip(x, split_dim, current_length, num_splits=2, name=None):\n    with tf.name_scope(name, 'unzip', [x]) as scope:\n        x = tf.convert_to_tensor(x, name='x')\n        all_splits = tf.split(value=x, num_or_size_splits=current_length, axis=split_dim, name=scope)\n        splits = [[] for _ in xrange(num_splits)]\n        for i in xrange(current_length):\n            splits[(i % num_splits)].append(all_splits[i])\n        return [tf.concat(s, split_dim) for s in splits]", "docstring": "Splits a tensor by unzipping along the split_dim.\n\nFor example the following array split into 2 would be:\n[1, 2, 3, 4, 5, 6] -> [1, 3, 5], [2, 4, 6]\nand by 3:\n[1, 2, 3, 4] -> [1, 4], [2], [3]\n\nArgs:\nx: The tensor to split.\nsplit_dim: The dimension to split along.\ncurrent_length: Current length along the split_dim.\nnum_splits: The number of splits.\nname: Optional name for this op.\nReturns:\nA length num_splits sequence.", "source": "codesearchnet"}
{"code": "def _hash_bucket_tensors(self, input_ids, num_hashes: int, num_buckets: int):\n    if num_hashes > len(_PRIMES):\n        raise ValueError(f'`num_hashes` must be <= {len(_PRIMES)}')\n    primes = _PRIMES[:num_hashes]\n    result_tensors = []\n    for prime in primes:\n        hashed = (input_ids + 1) * prime % num_buckets\n        result_tensors.append(hashed)\n    return result_tensors", "docstring": "Converts ids to hash bucket ids via multiple hashing.\n\nArgs:\ninput_ids: The codepoints or other IDs to be hashed.\nnum_hashes: The number of hash functions to use.\nnum_buckets: The number of hash buckets (i.e. embeddings in each table).\n\nReturns:\nA list of tensors, each of which is the hash bucket IDs from one hash function.", "source": "github-repos"}
{"code": "def from_service_account_info(cls, info, **kwargs):\n        \n        signer = _service_account_info.from_dict(\n            info, require=['client_email', 'token_uri'])\n        return cls._from_signer_and_info(signer, info, **kwargs)", "docstring": "Creates a Credentials instance from parsed service account info.\n\nArgs:\ninfo (Mapping[str, str]): The service account info in Google\nformat.\nkwargs: Additional arguments to pass to the constructor.\n\nReturns:\ngoogle.auth.service_account.Credentials: The constructed\ncredentials.\n\nRaises:\nValueError: If the info is not in the expected format.", "source": "juraj-google-style"}
{"code": "def inter_data_operation(self, axis, func, other):\n    if axis:\n        partitions = self.row_partitions\n        other_partitions = other.row_partitions\n    else:\n        partitions = self.column_partitions\n        other_partitions = other.column_partitions\n    func = self.preprocess_func(func)\n    result = np.array([partitions[i].apply(func, num_splits=self._compute_num_partitions(), other_axis_partition=other_partitions[i]) for i in range(len(partitions))])\n    return (self.__constructor__(result) if axis else self.__constructor__(result.T))", "docstring": "Apply a function that requires two BaseFrameManager objects.\n\nArgs:\naxis: The axis to apply the function over (0 - rows, 1 - columns)\nfunc: The function to apply\nother: The other BaseFrameManager object to apply func to.\n\nReturns:\nA new BaseFrameManager object, the type of object that called this.", "source": "codesearchnet"}
{"code": "def __init__(self, shape, num_actions, probabilities=None, scope='categorical', summary_labels=()):\n        \n        self.num_actions = num_actions\n\n        action_size = util.prod(shape) * self.num_actions\n        if probabilities is None:\n            logits = 0.0\n        else:\n            logits = [log(prob) for _ in range(util.prod(shape)) for prob in probabilities]\n        self.logits = Linear(size=action_size, bias=logits, scope='logits', summary_labels=summary_labels)\n\n        super(Categorical, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels)", "docstring": "Categorical distribution.\n\nArgs:\nshape: Action shape.\nnum_actions: Number of discrete action alternatives.\nprobabilities: Optional distribution bias.", "source": "juraj-google-style"}
{"code": "def _merge_call(self, fn, args, kwargs):\n    t = threading.current_thread()\n    assert isinstance(t, _MirroredReplicaThread)\n    t.merge_fn = fn\n    t.merge_args = args\n    t.merge_kwargs = kwargs\n    t.captured_name_scope = t.graph.get_name_scope()\n    if t.captured_name_scope:\n        t.captured_name_scope += '/'\n    t.captured_var_scope = variable_scope.get_variable_scope()\n    t.captured_control_deps = t.graph._current_control_dependencies()\n    t.merge_call_entered_in_eager = context.context().executing_eagerly()\n    if ops.get_default_graph() != t.graph:\n        raise RuntimeError('`merge_call` called while defining a new graph or a tf.function. This can often happen if the function `fn` passed to `strategy.run()` contains a nested `@tf.function`, and the nested `@tf.function` contains a synchronization point, such as aggregating gradients (e.g, optimizer.apply_gradients), or if the function `fn` uses a control flow statement which contains a synchronization point in the body. Such behaviors are not yet supported. Instead, please avoid nested `tf.function`s or control flow statements that may potentially cross a synchronization boundary, for example, wrap the `fn` passed to `strategy.run` or the entire `strategy.run` inside a `tf.function` or move the control flow out of `fn`. If you are subclassing a `tf.keras.Model`, please avoid decorating overridden methods `test_step` and `train_step` in `tf.function`.')\n    t.has_paused.set()\n    t.should_run.wait()\n    t.should_run.clear()\n    if t.coord.should_stop():\n        raise _RequestedStop()\n    t.merge_call_entered_in_eager = None\n    return t.merge_result", "docstring": "`merge_call()` implementation for synchronized replica.\n\nThis pauses the current replica thread and passes `fn` and its arguments to\nthe main thread. The main thread will wait until all replicas pause, then\ninvoke `fn` with grouped arguments. The current replica thread will continue\nafter `fn` completes.\n\nSee `_call_for_each_replica` for the logic in the main thread.\n\nArgs:\nfn: a function that is called in cross replica context with grouped\narguments from each replica. `fn` should returns grouped values.\nargs: positional arguments to `fn`.\nkwargs: keyword arguments to `fn`.\n\nReturns:\nReturn value of `fn` for the current replica.\n\nRaises:\nRuntimeError: when merge_call happens in a different graph, e.g. in a\ndifferent tf.function, which is not supported now.\n_RequestedStop: when stop is requested.", "source": "github-repos"}
{"code": "def get(self, attr: FetchAttribute) -> MaybeBytes:\n    attr_name = attr.value.decode('ascii')\n    method = getattr(self, ('_get_' + attr_name.replace('.', '_')))\n    return method(attr)", "docstring": "Return the bytes representation of the given message attribue.\n\nArgs:\nattr: The fetch attribute.\n\nRaises:\n:class:`NotFetchable`", "source": "codesearchnet"}
{"code": "def save(self, data):\n    if self.__nested:\n        raise ConfigLoaderException(\"Cannot save the config if the 'nested' paramter is True!\")\n    if (self.__loaded_config_file is None):\n        raise ConfigLoaderException('Load not called yet!')\n    try:\n        with open(self.__loaded_config_file, 'w') as f:\n            f.write(self.__formatter.encode(data))\n    except Exception as e:\n        raise ConfigLoaderException(('Config data is not serializable: %s' % e))", "docstring": "Save the config data\n\nArgs:\ndata: any serializable config data\n\nRaises:\nConfigLoaderException: if the ConfigLoader.load not called, so there is no config file name,\nor the data is not serializable or the loader is nested", "source": "codesearchnet"}
{"code": "def _snake_to_camel(name, strict=False):\n    \n    if strict:\n        name = name.lower()\n    terms = name.split('_')\n    return terms[0] + ''.join([term.capitalize() for term in terms[1:]])", "docstring": "Converts parameter names from snake_case to camelCase.\n\nArgs:\nname, str. Snake case.\nstrict: bool, default True. If True, will set name to lowercase before\nconverting, otherwise assumes original name is proper camel case.\nSet to False if name may already be in camelCase.\n\nReturns:\nstr: CamelCase.", "source": "juraj-google-style"}
{"code": "def get_users(self, sort=True):\n    self._load()\n    if sort:\n        self.users.sort(key=operator.itemgetter('name'))\n    return self.users", "docstring": "Get list of users in the room.\n\nKwargs:\nsort (bool): If True, sort rooms by name\n\nReturns:\narray. List of users", "source": "codesearchnet"}
{"code": "def configure(self, cfg, handler, path=''):\n    for (name, attr) in handler.attributes():\n        if (cfg.get(name) is not None):\n            continue\n        if (attr.expected_type not in [list, dict]):\n            cfg[name] = self.set(handler, attr, name, path, cfg)\n        elif ((attr.default is None) and (not hasattr(handler, ('configure_%s' % name)))):\n            self.action_required.append(('%s.%s: %s' % (path, name, attr.help_text)).strip('.'))\n    for (name, attr) in handler.attributes():\n        if (cfg.get(name) is not None):\n            continue\n        if hasattr(handler, ('configure_%s' % name)):\n            fn = getattr(handler, ('configure_%s' % name))\n            fn(self, cfg, ('%s.%s' % (path, name)))\n            if ((attr.expected_type in [list, dict]) and (not cfg.get(name))):\n                try:\n                    del cfg[name]\n                except KeyError:\n                    pass", "docstring": "Start configuration process for the provided handler\n\nArgs:\ncfg (dict): config container\nhandler (config.Handler class): config handler to use\npath (str): current path in the configuration progress", "source": "codesearchnet"}
{"code": "def start(self, **kwargs):\n    if (not self.is_running()):\n        self.websock_url = self.chrome.start(**kwargs)\n        self.websock = websocket.WebSocketApp(self.websock_url)\n        self.websock_thread = WebsockReceiverThread(self.websock, name=('WebsockThread:%s' % self.chrome.port))\n        self.websock_thread.start()\n        self._wait_for((lambda : self.websock_thread.is_open), timeout=30)\n        self.send_to_chrome(method='Network.enable')\n        self.send_to_chrome(method='Page.enable')\n        self.send_to_chrome(method='Console.enable')\n        self.send_to_chrome(method='Runtime.enable')\n        self.send_to_chrome(method='ServiceWorker.enable')\n        self.send_to_chrome(method='ServiceWorker.setForceUpdateOnPageLoad')\n        self.send_to_chrome(method='Network.setBlockedURLs', params={'urls': ['*google-analytics.com/analytics.js', '*google-analytics.com/ga.js']})", "docstring": "Starts chrome if it's not running.\n\nArgs:\n**kwargs: arguments for self.chrome.start(...)", "source": "codesearchnet"}
{"code": "def disassemble(self, annotate=False, blocks=False):\n        \n\n        ops = disassemble(self.co_code, self.internals)\n\n        if annotate:\n            ops = [self.annotate_op(op) for op in ops]\n\n        if blocks:\n            return blocks_from_ops(ops)\n        else:\n            return ops", "docstring": "Disassemble the bytecode of this code object into a series of\nopcodes and labels. Can also annotate the opcodes and group\nthe opcodes into blocks based on the labels.\n\nArguments:\nannotate(bool): Whether to annotate the operations.\nblocks(bool): Whether to group the operations into blocks.\n\nReturns:\nlist: A list of :class:`Op` (or :class:`AnnotatedOp`) instances\nand labels.", "source": "juraj-google-style"}
{"code": "def zone_compare(timezone):\n    if (timezone.lower() in mapper.win_to_unix):\n        check_zone = timezone\n    elif (timezone.lower() in mapper.unix_to_win):\n        check_zone = mapper.get_win(timezone)\n    else:\n        raise CommandExecutionError('Invalid timezone passed: {0}'.format(timezone))\n    return (get_zone() == mapper.get_unix(check_zone, 'Unknown'))", "docstring": "Compares the given timezone with the machine timezone. Mostly useful for\nrunning state checks.\n\nArgs:\ntimezone (str):\nThe timezone to compare. This can be in Windows or Unix format. Can\nbe any of the values returned by the ``timezone.list`` function\n\nReturns:\nbool: ``True`` if they match, otherwise ``False``\n\nExample:\n\n.. code-block:: bash\n\nsalt '*' timezone.zone_compare 'America/Denver'", "source": "codesearchnet"}
{"code": "def get_commits(self, since_sha=None):\n        \n        assert self.tempdir\n\n        cmd = ['git', 'log', '--first-parent', '--reverse', COMMIT_FORMAT]\n        if since_sha:\n            commits = [self.get_commit(since_sha)]\n            cmd.append('{}..HEAD'.format(since_sha))\n        else:\n            commits = []\n            cmd.append('HEAD')\n\n        output = cmd_output(*cmd, cwd=self.tempdir)\n\n        for sha, date in chunk_iter(output.splitlines(), 2):\n            commits.append(Commit(sha, int(date)))\n\n        return commits", "docstring": "Returns a list of Commit objects.\n\nArgs:\nsince_sha - (optional) A sha to search from", "source": "juraj-google-style"}
{"code": "def ChunkedCausalMultiHeadedAttention(feature_depth, num_heads=8, dropout=0.0, chunk_selector=None, mode='train'):\n    prepare_attention_input = combinators.Serial(combinators.Branch(), combinators.Parallel(combinators.Branch(num_branches=3), CausalMask(axis=(- 2))), combinators.Parallel(combinators.Parallel(core.Dense(feature_depth), core.Dense(feature_depth), core.Dense(feature_depth)), combinators.Identity()))\n    return combinators.Serial(combinators.Map(prepare_attention_input), ChunkedAttentionSelector(selector=chunk_selector), combinators.Map(PureMultiHeadedAttention(feature_depth=feature_depth, num_heads=num_heads, dropout=dropout, mode=mode), check_shapes=False), combinators.Map(core.Dense(feature_depth)))", "docstring": "Transformer-style causal multi-headed attention operating on chunks.\n\nAccepts inputs that are a list of chunks and applies causal attention.\n\nArgs:\nfeature_depth: int:  depth of embedding\nnum_heads: int: number of attention heads\ndropout: float: dropout rate\nchunk_selector: a function from chunk number to list of chunks to attend.\nmode: str: 'train' or 'eval'\n\nReturns:\nMulti-headed self-attention layer.", "source": "codesearchnet"}
{"code": "def getAll(self, event_name):\n    raw_events = self._event_client.eventGetAll(self._id, event_name)\n    return [snippet_event.from_dict(msg) for msg in raw_events]", "docstring": "Gets all the events of a certain name that have been received so\nfar. This is a non-blocking call.\n\nArgs:\ncallback_id: The id of the callback.\nevent_name: string, the name of the event to get.\n\nReturns:\nA list of SnippetEvent, each representing an event from the Java\nside.", "source": "codesearchnet"}
{"code": "def delete_object_from_file(file_name, save_key, file_location):\n    \n    file = __os.path.join(file_location, file_name)\n    shelve_store = __shelve.open(file)\n    del shelve_store[save_key]\n    shelve_store.close()", "docstring": "Function to delete objects from a shelve\nArgs:\nfile_name: Shelve storage file name\nsave_key: The name of the key the item is stored in\nfile_location: The location of the file, derive from the os module\n\nReturns:", "source": "juraj-google-style"}
{"code": "def confirm_cw_log(self, account, region, vpcname):\n    try:\n        cw = self.session.client('logs', region)\n        token = None\n        log_groups = []\n        while True:\n            result = (cw.describe_log_groups() if (not token) else cw.describe_log_groups(nextToken=token))\n            token = result.get('nextToken')\n            log_groups.extend([x['logGroupName'] for x in result.get('logGroups', [])])\n            if (not token):\n                break\n        if (vpcname not in log_groups):\n            cw.create_log_group(logGroupName=vpcname)\n            cw_vpc = VPC.get(vpcname)\n            cw_vpc.set_property('vpc_flow_logs_log_group', vpcname)\n            self.log.info('Created log group {}/{}/{}'.format(account.account_name, region, vpcname))\n            auditlog(event='vpc_flow_logs.create_cw_log_group', actor=self.ns, data={'account': account.account_name, 'region': region, 'log_group_name': vpcname, 'vpc': vpcname})\n        return True\n    except Exception:\n        self.log.exception('Failed creating log group for {}/{}/{}.'.format(account, region, vpcname))", "docstring": "Create a new CloudWatch log group based on the VPC Name if none exists. Returns `True` if succesful\n\nArgs:\naccount (:obj:`Account`): Account to create the log group in\nregion (`str`): Region to create the log group in\nvpcname (`str`): Name of the VPC the log group is fow\n\nReturns:\n`bool`", "source": "codesearchnet"}
{"code": "def compute_dtype(self):\n    return self._compute_dtype", "docstring": "The compute dtype of this policy.\n\nThis is the dtype layers will do their computations in. Typically layers\noutput tensors with the compute dtype as well.\n\nNote that even if the compute dtype is float16 or bfloat16, hardware\ndevices may not do individual adds, multiplies, and other fundamental\noperations in float16 or bfloat16, but instead may do some of them in\nfloat32 for numeric stability. The compute dtype is the dtype of the\ninputs and outputs of the ops that the layer executes.\nInternally, many ops will do certain internal calculations in\nfloat32 or some other device-internal intermediate format with higher\nprecision than float16/bfloat16, to increase numeric stability.\n\nReturns:\nThe compute dtype of this policy, as a string.", "source": "github-repos"}
{"code": "def _find_current_phase(self, global_step):\n    \n    epoch_size = sum(phase.steps for phase in self._phases)\n    epoch = int(global_step \n    steps_in = global_step % epoch_size\n    for phase in self._phases:\n      if steps_in < phase.steps:\n        return phase, epoch, steps_in\n      steps_in -= phase.steps", "docstring": "Determine the current phase based on the global step.\n\nThis ensures continuing the correct phase after restoring checkoints.\n\nArgs:\nglobal_step: The global number of steps performed across all phases.\n\nReturns:\nTuple of phase object, epoch number, and phase steps within the epoch.", "source": "juraj-google-style"}
{"code": "def _get_document_path(client, path):\n    \n    parts = (client._database_string, \"documents\") + path\n    return _helpers.DOCUMENT_PATH_DELIMITER.join(parts)", "docstring": "Convert a path tuple into a full path string.\n\nOf the form:\n\n``projects/{project_id}/databases/{database_id}/...\ndocuments/{document_path}``\n\nArgs:\nclient (~.firestore_v1beta1.client.Client): The client that holds\nconfiguration details and a GAPIC client object.\npath (Tuple[str, ...]): The components in a document path.\n\nReturns:\nstr: The fully-qualified document path.", "source": "juraj-google-style"}
{"code": "def __init__(self, id, type=None, **kwargs):\n        \n        super(Catalog, self).__init__(id, type, **kwargs)", "docstring": "Create a catalog object (get a catalog by ID or get or create one given by name and type)\n\nArgs:\nid (str): A catalog id or name\n\nKwargs:\ntype (str): 'song' or 'artist', specifying the catalog type\n\nReturns:\nA catalog object\n\nExample:\n\n>>> c = catalog.Catalog('my_songs', type='song')\n>>> c.id\nu'CAVKUPC12BCA792120'\n>>> c.name\nu'my_songs'\n>>>", "source": "juraj-google-style"}
{"code": "def _convert_fields(fields, field_values, context):\n    converted = {}\n    if len(fields) != len(field_values):\n        _report_field_mismatches(fields, field_values)\n    for field in fields:\n        if field.name not in field_values:\n            _report_field_mismatches(fields, field_values)\n        field_value = field_values[field.name]\n        converted[field.name] = _convert_value(field_value, field.value_type, (field.name,), context)\n    field_values.update(converted)", "docstring": "Type-checks and converts each field in `field_values` (in place).\n\nArgs:\nfields: A list of `ExtensionTypeField` objects.\nfield_values: A `dict` mapping field names to values.  Must contain an entry\nfor each field.  I.e., `set(field_values.keys())` must be equal to\n`set([f.name for f in fields])`.\ncontext: _ConversionContext, indicates what kind of value we are converting.\n\nRaises:\nValueError: If the keys of `field_values` do not match the names of\nthe fields in `fields`.\nTypeError: If any value in `field_values` does not have the type indicated\nby the corresponding `ExtensionTypeField` object.", "source": "github-repos"}
{"code": "def calc_timestep_statistic(self, statistic, time):\n    ti = np.where((self.times == time))[0][0]\n    ma = np.where((self.masks[ti].ravel() == 1))\n    if (statistic in ['mean', 'max', 'min', 'std', 'ptp']):\n        stat_val = getattr(self.timesteps[ti].ravel()[ma], statistic)()\n    elif (statistic == 'median'):\n        stat_val = np.median(self.timesteps[ti].ravel()[ma])\n    elif ('percentile' in statistic):\n        per = int(statistic.split('_')[1])\n        stat_val = np.percentile(self.timesteps[ti].ravel()[ma], per)\n    elif ('dt' in statistic):\n        stat_name = statistic[:(- 3)]\n        if (ti == 0):\n            stat_val = 0\n        else:\n            stat_val = (self.calc_timestep_statistic(stat_name, time) - self.calc_timestep_statistic(stat_name, (time - 1)))\n    else:\n        stat_val = np.nan\n    return stat_val", "docstring": "Calculate statistics from the primary attribute of the StObject.\n\nArgs:\nstatistic: statistic being calculated\ntime: Timestep being investigated\n\nReturns:\nValue of the statistic", "source": "codesearchnet"}
{"code": "def rpc_name(rpc_id):\n    name = _RPC_NAME_MAP.get(rpc_id)\n    if (name is None):\n        name = ('RPC 0x%04X' % rpc_id)\n    return name", "docstring": "Map an RPC id to a string name.\n\nThis function looks the RPC up in a map of all globally declared RPCs,\nand returns a nice name string.  if the RPC is not found in the global\nname map, returns a generic name string such as 'rpc 0x%04X'.\n\nArgs:\nrpc_id (int): The id of the RPC that we wish to look up.\n\nReturns:\nstr: The nice name of the RPC.", "source": "codesearchnet"}
{"code": "def json_to_params(fn=None, return_json=True):\n\n    def json_to_params_decorator(fn):\n\n        @handle_type_error\n        @wraps(fn)\n        def json_to_params_wrapper(*args, **kwargs):\n            data = decode_json_body()\n            if (type(data) in [tuple, list]):\n                args = (list(args) + data)\n            elif (type(data) == dict):\n                allowed_keys = (set(data.keys()) - set(kwargs.keys()))\n                for key in allowed_keys:\n                    kwargs[key] = data[key]\n            elif (type(data) in PRIMITIVE_TYPES):\n                args = list(args)\n                args.append(data)\n            if (not return_json):\n                return fn(*args, **kwargs)\n            return encode_json_body(fn(*args, **kwargs))\n        return json_to_params_wrapper\n    if fn:\n        return json_to_params_decorator(fn)\n    return json_to_params_decorator", "docstring": "Convert JSON in the body of the request to the parameters for the wrapped\nfunction.\n\nIf the JSON is list, add it to ``*args``.\n\nIf dict, add it to ``**kwargs`` in non-rewrite mode (no key in ``**kwargs``\nwill be overwritten).\n\nIf single value, add it to ``*args``.\n\nArgs:\nreturn_json (bool, default True): Should the decorator automatically\nconvert returned value to JSON?", "source": "codesearchnet"}
{"code": "def relative_to_contrib(diff, project):\n    \n    path = pathlib.Path(diff.b_path)\n    contrib_path = project.contrib_module_path\n    return path.relative_to(contrib_path)", "docstring": "Compute relative path of changed file to contrib dir\n\nArgs:\ndiff (git.diff.Diff): file diff\nproject (Project): project\n\nReturns:\nPath", "source": "juraj-google-style"}
{"code": "def info(name):\n    \n    try:\n        handle_scm = win32service.OpenSCManager(\n            None, None, win32service.SC_MANAGER_CONNECT)\n    except pywintypes.error as exc:\n        raise CommandExecutionError(\n            'Failed to connect to the SCM: {0}'.format(exc.strerror))\n\n    try:\n        handle_svc = win32service.OpenService(\n            handle_scm, name,\n            win32service.SERVICE_ENUMERATE_DEPENDENTS |\n            win32service.SERVICE_INTERROGATE |\n            win32service.SERVICE_QUERY_CONFIG |\n            win32service.SERVICE_QUERY_STATUS)\n    except pywintypes.error as exc:\n        raise CommandExecutionError(\n            'Failed To Open {0}: {1}'.format(name, exc.strerror))\n\n    try:\n        config_info = win32service.QueryServiceConfig(handle_svc)\n        status_info = win32service.QueryServiceStatusEx(handle_svc)\n\n        try:\n            description = win32service.QueryServiceConfig2(\n                handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION)\n        except pywintypes.error:\n            description = 'Failed to get description'\n\n        delayed_start = win32service.QueryServiceConfig2(\n            handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO)\n    finally:\n        win32service.CloseServiceHandle(handle_scm)\n        win32service.CloseServiceHandle(handle_svc)\n\n    ret = dict()\n    try:\n        sid = win32security.LookupAccountName(\n            '', 'NT Service\\\\{0}'.format(name))[0]\n        ret['sid'] = win32security.ConvertSidToStringSid(sid)\n    except pywintypes.error:\n        ret['sid'] = 'Failed to get SID'\n\n    ret['BinaryPath'] = config_info[3]\n    ret['LoadOrderGroup'] = config_info[4]\n    ret['TagID'] = config_info[5]\n    ret['Dependencies'] = config_info[6]\n    ret['ServiceAccount'] = config_info[7]\n    ret['DisplayName'] = config_info[8]\n    ret['Description'] = description\n    ret['Status_ServiceCode'] = status_info['ServiceSpecificExitCode']\n    ret['Status_CheckPoint'] = status_info['CheckPoint']\n    ret['Status_WaitHint'] = status_info['WaitHint']\n    ret['StartTypeDelayed'] = delayed_start\n\n    flags = list()\n    for bit in SERVICE_TYPE:\n        if isinstance(bit, int):\n            if config_info[0] & bit:\n                flags.append(SERVICE_TYPE[bit])\n\n    ret['ServiceType'] = flags if flags else config_info[0]\n\n    flags = list()\n    for bit in SERVICE_CONTROLS:\n        if status_info['ControlsAccepted'] & bit:\n            flags.append(SERVICE_CONTROLS[bit])\n\n    ret['ControlsAccepted'] = flags if flags else status_info['ControlsAccepted']\n\n    try:\n        ret['Status_ExitCode'] = SERVICE_ERRORS[status_info['Win32ExitCode']]\n    except KeyError:\n        ret['Status_ExitCode'] = status_info['Win32ExitCode']\n\n    try:\n        ret['StartType'] = SERVICE_START_TYPE[config_info[1]]\n    except KeyError:\n        ret['StartType'] = config_info[1]\n\n    try:\n        ret['ErrorControl'] = SERVICE_ERROR_CONTROL[config_info[2]]\n    except KeyError:\n        ret['ErrorControl'] = config_info[2]\n\n    try:\n        ret['Status'] = SERVICE_STATE[status_info['CurrentState']]\n    except KeyError:\n        ret['Status'] = status_info['CurrentState']\n\n    return ret", "docstring": "Get information about a service on the system\n\nArgs:\nname (str): The name of the service. This is not the display name. Use\n``get_service_name`` to find the service name.\n\nReturns:\ndict: A dictionary containing information about the service.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' service.info spooler", "source": "juraj-google-style"}
{"code": "def _get_beamer_page(self):\n    PIL_limit = 40000\n    beamer_limit = 550\n    aspect_ratio = (self.sum_row_heights / self.sum_column_widths)\n    margin_factor = 1.5\n    height = min((self.sum_row_heights * margin_factor), beamer_limit)\n    width = min((self.sum_column_widths * margin_factor), beamer_limit)\n    if ((height * width) > PIL_limit):\n        height = min(np.sqrt((PIL_limit * aspect_ratio)), beamer_limit)\n        width = min(np.sqrt((PIL_limit / aspect_ratio)), beamer_limit)\n    height = max(height, 10)\n    width = max(width, 10)\n    return (height, width, self.scale)", "docstring": "Get height, width & scale attributes for the beamer page.\n\nReturns:\ntuple: (height, width, scale) desirable page attributes", "source": "codesearchnet"}
{"code": "def ScanForVolumeSystem(self, source_path_spec):\n    if (source_path_spec.type_indicator == definitions.TYPE_INDICATOR_VSHADOW):\n        return None\n    if source_path_spec.IsVolumeSystemRoot():\n        return source_path_spec\n    if (source_path_spec.type_indicator == definitions.TYPE_INDICATOR_APFS_CONTAINER):\n        return None\n    try:\n        type_indicators = analyzer.Analyzer.GetVolumeSystemTypeIndicators(source_path_spec, resolver_context=self._resolver_context)\n    except (IOError, RuntimeError) as exception:\n        raise errors.BackEndError('Unable to process source path specification with error: {0!s}'.format(exception))\n    if (not type_indicators):\n        return None\n    if (len(type_indicators) > 1):\n        raise errors.BackEndError('Unsupported source found more than one volume system types.')\n    if ((type_indicators[0] == definitions.TYPE_INDICATOR_TSK_PARTITION) and (source_path_spec.type_indicator in [definitions.TYPE_INDICATOR_TSK_PARTITION])):\n        return None\n    if (type_indicators[0] in definitions.VOLUME_SYSTEM_TYPE_INDICATORS):\n        return path_spec_factory.Factory.NewPathSpec(type_indicators[0], location='/', parent=source_path_spec)\n    return path_spec_factory.Factory.NewPathSpec(type_indicators[0], parent=source_path_spec)", "docstring": "Scans the path specification for a supported volume system format.\n\nArgs:\nsource_path_spec (PathSpec): source path specification.\n\nReturns:\nPathSpec: volume system path specification or None if no supported volume\nsystem type was found.\n\nRaises:\nBackEndError: if the source cannot be scanned or more than one volume\nsystem type is found.", "source": "codesearchnet"}
{"code": "def filter(self, field_name, operand, value):\n        \n        if operand not in self._FILTER_OPERANDS:\n            raise ValueError('Operand must be one of {}'.format(', '.join(self._FILTER_OPERANDS)))\n\n        \n        record_stub = record_factory(self._app)\n        field = record_stub.get_field(field_name)\n\n        self._raw['filters'].append({\n            \"fieldId\": field.id,\n            \"filterType\": operand,\n            \"value\": field.get_report(value)\n        })", "docstring": "Adds a filter to report\n\nNotes:\nAll filters are currently AND'ed together\n\nArgs:\nfield_name (str): Target field name to filter on\noperand (str): Operand used in comparison. See `swimlane.core.search` for options\nvalue: Target value used in comparision", "source": "juraj-google-style"}
{"code": "def __init__(self, shape, probability=0.5, scope='bernoulli', summary_labels=()):\n        \n        self.shape = shape\n        action_size = util.prod(self.shape)\n\n        self.logit = Linear(size=action_size, bias=log(probability), scope='logit', summary_labels=summary_labels)\n\n        super(Bernoulli, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels)", "docstring": "Bernoulli distribution.\n\nArgs:\nshape: Action shape.\nprobability: Optional distribution bias.", "source": "juraj-google-style"}
{"code": "def auth(self, token):\n        \n        t = self.sendToken(token)\n        return self.getToken(t)", "docstring": "Take an existing Skype token and refresh it, to extend the expiry time without other credentials.\n\nArgs:\ntoken (str): existing Skype token\n\nReturns:\n(str, datetime.datetime) tuple: Skype token, and associated expiry if known\n\nRaises:\n.SkypeAuthException: if the login request is rejected\n.SkypeApiException: if the login form can't be processed", "source": "juraj-google-style"}
{"code": "def get_simulated_data(nmr_problems):\n    nmr_observed_tanks = 10\n    nmr_tanks_ground_truth = normal(nmr_problems, 1, mean=250, std=30, ctype='uint')\n    observations = uniform(nmr_problems, nmr_observed_tanks, low=0, high=nmr_tanks_ground_truth, ctype='uint')\n    return (observations, nmr_tanks_ground_truth)", "docstring": "Simulate some data.\n\nThis returns the simulated tank observations and the corresponding ground truth maximum number of tanks.\n\nArgs:\nnmr_problems (int): the number of problems\n\nReturns:\ntuple: (observations, nmr_tanks_ground_truth)", "source": "codesearchnet"}
{"code": "def create_inference_graph(wanted_words, sample_rate, clip_duration_ms, clip_stride_ms, window_size_ms, window_stride_ms, feature_bin_count, model_architecture, preprocess):\n    words_list = input_data.prepare_words_list(wanted_words.split(','))\n    model_settings = models.prepare_model_settings(len(words_list), sample_rate, clip_duration_ms, window_size_ms, window_stride_ms, feature_bin_count, preprocess)\n    runtime_settings = {'clip_stride_ms': clip_stride_ms}\n    wav_data_placeholder = tf.compat.v1.placeholder(tf.string, [], name='wav_data')\n    decoded_sample_data = tf.audio.decode_wav(wav_data_placeholder, desired_channels=1, desired_samples=model_settings['desired_samples'], name='decoded_sample_data')\n    spectrogram = audio_ops.audio_spectrogram(decoded_sample_data.audio, window_size=model_settings['window_size_samples'], stride=model_settings['window_stride_samples'], magnitude_squared=True)\n    if preprocess == 'average':\n        fingerprint_input = tf.nn.pool(input=tf.expand_dims(spectrogram, -1), window_shape=[1, model_settings['average_window_width']], strides=[1, model_settings['average_window_width']], pooling_type='AVG', padding='SAME')\n    elif preprocess == 'mfcc':\n        fingerprint_input = audio_ops.mfcc(spectrogram, sample_rate, dct_coefficient_count=model_settings['fingerprint_width'])\n    elif preprocess == 'micro':\n        if not frontend_op:\n            raise Exception('Micro frontend op is currently not available when running TensorFlow directly from Python, you need to build and run through Bazel, for example `bazel run tensorflow/examples/speech_commands:freeze_graph`')\n        sample_rate = model_settings['sample_rate']\n        window_size_ms = model_settings['window_size_samples'] * 1000 / sample_rate\n        window_step_ms = model_settings['window_stride_samples'] * 1000 / sample_rate\n        int16_input = tf.cast(tf.multiply(decoded_sample_data.audio, 32767), tf.int16)\n        micro_frontend = frontend_op.audio_microfrontend(int16_input, sample_rate=sample_rate, window_size=window_size_ms, window_step=window_step_ms, num_channels=model_settings['fingerprint_width'], out_scale=1, out_type=tf.float32)\n        fingerprint_input = tf.multiply(micro_frontend, 10.0 / 256.0)\n    else:\n        raise Exception('Unknown preprocess mode \"%s\" (should be \"mfcc\", \"average\", or \"micro\")' % preprocess)\n    fingerprint_size = model_settings['fingerprint_size']\n    reshaped_input = tf.reshape(fingerprint_input, [-1, fingerprint_size])\n    logits = models.create_model(reshaped_input, model_settings, model_architecture, is_training=False, runtime_settings=runtime_settings)\n    softmax = tf.nn.softmax(logits, name='labels_softmax')\n    return (reshaped_input, softmax)", "docstring": "Creates an audio model with the nodes needed for inference.\n\nUses the supplied arguments to create a model, and inserts the input and\noutput nodes that are needed to use the graph for inference.\n\nArgs:\nwanted_words: Comma-separated list of the words we're trying to recognize.\nsample_rate: How many samples per second are in the input audio files.\nclip_duration_ms: How many samples to analyze for the audio pattern.\nclip_stride_ms: How often to run recognition. Useful for models with cache.\nwindow_size_ms: Time slice duration to estimate frequencies from.\nwindow_stride_ms: How far apart time slices should be.\nfeature_bin_count: Number of frequency bands to analyze.\nmodel_architecture: Name of the kind of model to generate.\npreprocess: How the spectrogram is processed to produce features, for\nexample 'mfcc', 'average', or 'micro'.\n\nReturns:\nInput and output tensor objects.\n\nRaises:\nException: If the preprocessing mode isn't recognized.", "source": "github-repos"}
{"code": "def counter(self, counter_name, default=0):\n    return self._state.counters_map.get(counter_name, default)", "docstring": "Get the current counter value.\n\nArgs:\ncounter_name: name of the counter in string.\ndefault: default value in int if one doesn't exist.\n\nReturns:\nCurrent value of the counter.", "source": "codesearchnet"}
{"code": "def create_statement_inspection_table(sts: List[Influence]):\n    columns = ['un_groundings', 'subj_polarity', 'obj_polarity', 'Sentence', 'Source API']\n    polarity_to_str = (lambda x: ('+' if (x == 1) else ('-' if (x == (- 1)) else 'None')))\n    l = []\n    for s in sts:\n        subj_un_grounding = s.subj.db_refs['UN'][0][0].split('/')[(- 1)]\n        obj_un_grounding = s.obj.db_refs['UN'][0][0].split('/')[(- 1)]\n        subj_polarity = s.subj_delta['polarity']\n        obj_polarity = s.obj_delta['polarity']\n        subj_adjectives = s.subj_delta['adjectives']\n        for e in s.evidence:\n            l.append(((subj_un_grounding, obj_un_grounding), subj_polarity, obj_polarity, e.text, e.source_api))\n    df = pd.DataFrame(l, columns=columns)\n    df = df.pivot_table(index=['un_groundings', 'Source API', 'Sentence'])\n\n    def hover(hover_color='\n        return dict(selector='tr:hover', props=[('background-color', ('%s' % hover_color))])\n    styles = [hover(), dict(props=[('font-size', '100%'), ('font-family', 'Gill Sans')])]\n    return df.style.set_table_styles(styles)", "docstring": "Display an HTML representation of a table with INDRA statements to\nmanually inspect for validity.\n\nArgs:\nsts: A list of INDRA statements to be manually inspected for validity.", "source": "codesearchnet"}
{"code": "def valid(self, value, level=[]):\n\t\t\n\n\t\t\n\t\tself.validation_failures = []\n\n\t\t\n\t\tif value is None and self._optional:\n\t\t\treturn True\n\n\t\t\n\t\tif not isinstance(value, list):\n\t\t\tself.validation_failures.append(('.'.join(level), str(value)))\n\t\t\treturn False\n\n\t\t\n\t\tbRet = True\n\n\t\t\n\t\tif self._type == 'unique':\n\t\t\tlItems\t= []\n\n\t\t\n\t\tfor i in range(len(value)):\n\n\t\t\t\n\t\t\tlLevel = level[:]\n\t\t\tlLevel.append('[%d]' % i)\n\n\t\t\t\n\t\t\tif not self._node.valid(value[i], lLevel):\n\t\t\t\tself.validation_failures.extend(self._node.validation_failures[:])\n\t\t\t\tbRet = False;\n\t\t\t\tcontinue;\n\n\t\t\t\n\t\t\tif self._type == 'unique':\n\n\t\t\t\t\n\t\t\t\ttry:\n\n\t\t\t\t\t\n\t\t\t\t\tiIndex = lItems.index(value[i])\n\n\t\t\t\t\t\n\t\t\t\t\tself.validation_failures.append(('.'.join(lLevel), 'duplicate of %s[%d]' % ('.'.join(level), iIndex)))\n\t\t\t\t\tbRet = False\n\t\t\t\t\tcontinue\n\n\t\t\t\t\n\t\t\t\t\n\t\t\t\texcept ValueError:\n\t\t\t\t\tlItems.append(value[i])\n\n\t\t\n\t\tif self._minimum is not None:\n\n\t\t\t\n\t\t\tif len(value) < self._minimum:\n\t\t\t\tself.validation_failures.append(('.'.join(level), 'did not meet minimum'))\n\t\t\t\tbRet = False\n\n\t\t\n\t\tif self._maximum is not None:\n\n\t\t\t\n\t\t\tif len(value) > self._maximum:\n\t\t\t\tself.validation_failures.append(('.'.join(level), 'exceeds maximum'))\n\t\t\t\tbRet = False\n\n\t\t\n\t\treturn bRet", "docstring": "Valid\n\nChecks if a value is valid based on the instance's values\n\nArguments:\nvalue {mixed} -- The value to validate\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def validate_sqs_policy(self, accounts):\n    sqs_queue_name = self.dbconfig.get('sqs_queue_name', self.ns)\n    sqs_queue_region = self.dbconfig.get('sqs_queue_region', self.ns)\n    sqs_account = AWSAccount.get(self.dbconfig.get('sqs_queue_account', self.ns))\n    session = get_aws_session(sqs_account)\n    sqs = session.client('sqs', region_name=sqs_queue_region)\n    sqs_queue_url = sqs.get_queue_url(QueueName=sqs_queue_name, QueueOwnerAWSAccountId=sqs_account.account_number)\n    sqs_attribs = sqs.get_queue_attributes(QueueUrl=sqs_queue_url['QueueUrl'], AttributeNames=['Policy'])\n    policy = json.loads(sqs_attribs['Attributes']['Policy'])\n    for account in accounts:\n        arn = 'arn:aws:sns:*:{}:{}'.format(account.account_number, sqs_queue_name)\n        if (arn not in policy['Statement'][0]['Condition']['ForAnyValue:ArnEquals']['aws:SourceArn']):\n            self.log.warning('SQS policy is missing condition for ARN {}'.format(arn))\n            policy['Statement'][0]['Condition']['ForAnyValue:ArnEquals']['aws:SourceArn'].append(arn)\n    sqs.set_queue_attributes(QueueUrl=sqs_queue_url['QueueUrl'], Attributes={'Policy': json.dumps(policy)})", "docstring": "Given a list of accounts, ensures that the SQS policy allows all the accounts to write to the queue\n\nArgs:\naccounts (`list` of :obj:`Account`): List of accounts\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def close_log(log_file):\n    \n    sys.stdout = sys.__stdout__\n    if log_file is not None:\n        log_file.close()\n        del log_file", "docstring": "Closes the open file and returns :py:class:`sys.stdout` to the default (i.e., console output).\n\nArgs:\nlog_file (file): The file object to close.", "source": "juraj-google-style"}
{"code": "def validate_addr(self, address, id=None, endpoint=None):\n        \n        return self._call_endpoint(VALIDATE_ADDR, params=[address], id=id, endpoint=endpoint)", "docstring": "returns whether or not addr string is valid\n\nArgs:\naddress: (str) address to lookup ( in format 'AXjaFSP23Jkbe6Pk9pPGT6NBDs1HVdqaXK')\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\n\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"}
{"code": "def __init__(self, dtype, ragged_rank, row_splits_dtype=dtypes.int64):\n    row_splits_dtype = dtypes.as_dtype(row_splits_dtype)\n    self._dtype = dtype\n    self._ragged_rank = ragged_rank\n    self._row_splits_dtype = row_splits_dtype", "docstring": "Initializes a RaggedTensorType object.\n\nArgs:\ndtype: data type of the `RaggedTensor`'s inner values.\nragged_rank: ragged_rank of the declared `RaggedTensor`.\nrow_splits_dtype: data type for the `RaggedTensor`'s row splits.\nOne of: `tf.int32` or `tf.int64`.", "source": "github-repos"}
{"code": "def serialize_sparse_v2(sp_input, out_type=dtypes.string, name=None):\n    sp_input = _convert_to_sparse_tensor(sp_input)\n    return gen_sparse_ops.serialize_sparse(sp_input.indices, sp_input.values, sp_input.dense_shape, name=name, out_type=out_type)", "docstring": "Serialize a `SparseTensor` into a 3-vector (1-D `Tensor`) object.\n\nArgs:\nsp_input: The input `SparseTensor`.\nout_type: The `dtype` to use for serialization.\nname: A name prefix for the returned tensors (optional).\n\nReturns:\nA 3-vector (1-D `Tensor`), with each column representing the serialized\n`SparseTensor`'s indices, values, and shape (respectively).\n\nRaises:\nTypeError: If `sp_input` is not a `SparseTensor`.", "source": "github-repos"}
{"code": "def _GetSignatureScanner(cls, specification_store):\n    signature_scanner = pysigscan.scanner()\n    signature_scanner.set_scan_buffer_size(cls._SCAN_BUFFER_SIZE)\n    for format_specification in specification_store.specifications:\n        for signature in format_specification.signatures:\n            pattern_offset = signature.offset\n            if (pattern_offset is None):\n                signature_flags = pysigscan.signature_flags.NO_OFFSET\n            elif (pattern_offset < 0):\n                pattern_offset *= (- 1)\n                signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END\n            else:\n                signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START\n            signature_scanner.add_signature(signature.identifier, pattern_offset, signature.pattern, signature_flags)\n    return signature_scanner", "docstring": "Initializes a signature scanner based on a specification store.\n\nArgs:\nspecification_store (FormatSpecificationStore): specification store.\n\nReturns:\npysigscan.scanner: signature scanner.", "source": "codesearchnet"}
{"code": "def list_as_sub(access_token, subscription_id):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/availabilitySets', '?api-version=', COMP_API])\n    return do_get_next(endpoint, access_token)", "docstring": "List availability sets in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body of the list of availability set properties.", "source": "codesearchnet"}
{"code": "def _build_kernel(self, kernel_source, compile_flags=()):\n    return cl.Program(self._cl_context, kernel_source).build(' '.join(compile_flags))", "docstring": "Convenience function for building the kernel for this worker.\n\nArgs:\nkernel_source (str): the kernel source to use for building the kernel\n\nReturns:\ncl.Program: a compiled CL kernel", "source": "codesearchnet"}
{"code": "def _apply_unary_to_chunks(f, chunks_by_dev):\n    output = []\n    for x in chunks_by_dev:\n        with ops.colocate_with(x[0]):\n            output.append([f(t) for t in x])\n    return output", "docstring": "Apply a unary op to each tensor in chunks_by_dev, on same device.\n\nArgs:\nf: a unary function over `tf.Tensor`.\nchunks_by_dev: list of lists of `tf.Tensor`.\n\nReturns:\nnew list of lists of `tf.Tensor` with the same structure as\nchunks_by_dev containing the derived tensors.", "source": "github-repos"}
{"code": "def raise_for_status(response):\n    \n    for err_name in web_exceptions.__all__:\n        err = getattr(web_exceptions, err_name)\n        if err.status_code == response.status:\n            payload = dict(\n                headers=response.headers,\n                reason=response.reason,\n            )\n            if issubclass(err, web_exceptions._HTTPMove):  \n                raise err(response.headers['Location'], **payload)\n            raise err(**payload)", "docstring": "Raise an appropriate error for a given response.\n\nArguments:\nresponse (:py:class:`aiohttp.ClientResponse`): The API response.\n\nRaises:\n:py:class:`aiohttp.web_exceptions.HTTPException`: The appropriate\nerror for the response's status.", "source": "juraj-google-style"}
{"code": "def wait_for_vacancy(self, processor_type):\n    with self._condition:\n        self._condition.wait_for((lambda : (self._processor_available(processor_type) or self._cancelled_event.is_set())))\n        if self._cancelled_event.is_set():\n            raise WaitCancelledException()\n        processor = self[processor_type].next_processor()\n        return processor", "docstring": "Waits for a particular processor type to have the capacity to\nhandle additional transactions or until is_cancelled is True.\n\nArgs:\nprocessor_type (ProcessorType): The family, and version of\nthe transaction processor.\n\nReturns:\nProcessor", "source": "codesearchnet"}
{"code": "def make_x(self, operator, adjoint, with_batch=True):\n    raise NotImplementedError('make_x is not defined.')", "docstring": "Make an 'x' appropriate for calling operator.matmul(x).\n\nArgs:\noperator:  A `LinearOperator`\nadjoint:  Python `bool`.  If `True`, we are making an 'x' value for the\nadjoint operator.\nwith_batch: Python `bool`. If `True`, create `x` with the same batch shape\nas operator, and otherwise create a matrix without any batch shape.\n\nReturns:\nA `Tensor`", "source": "github-repos"}
{"code": "def get(self):\n    if isinstance(self.red, SoftInt):\n        red = self.red.get()\n    else:\n        red = self.red\n    if isinstance(self.green, SoftInt):\n        green = self.green.get()\n    else:\n        green = self.green\n    if isinstance(self.blue, SoftInt):\n        blue = self.blue.get()\n    else:\n        blue = self.blue\n    return (red, green, blue)", "docstring": "Get an rgb color tuple according to the probability distribution.\n\nReturns:\ntuple(int, int, int): A ``(red, green, blue)`` tuple.\n\nExample:\n>>> color = SoftColor(([(0, 1), (255, 10)],),\n...                   ([(0, 1), (255, 10)],),\n...                   ([(0, 1), (255, 10)],))\n>>> color.get()                                    # doctest: +SKIP\n(234, 201, 243)", "source": "codesearchnet"}
{"code": "def wv45(msg):\n    \n    d = hex2bin(data(msg))\n    if d[12] == '0':\n        return None\n\n    ws = bin2int(d[13:15])\n    return ws", "docstring": "Wake vortex.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nint: Wake vortex level. 0=NIL, 1=Light, 2=Moderate, 3=Severe", "source": "juraj-google-style"}
{"code": "def scanJoiner(self, xEUI='*', strPSKd='threadjpaketest'):\n        \n        print '%s call scanJoiner' % self.port\n\n        \n        timeout = 500\n\n        if not isinstance(xEUI, str):\n            eui64 = self.__convertLongToString(xEUI)\n\n            \n            if len(eui64) < 16:\n                eui64 = eui64.zfill(16)\n                print eui64\n        else:\n            eui64 = xEUI\n\n        cmd = 'commissioner joiner add %s %s %s' % (eui64, strPSKd, str(timeout))\n        print cmd\n        if self.__sendCommand(cmd)[0] == 'Done':\n            if self.logThreadStatus == self.logStatus['stop']:\n                self.logThread = ThreadRunner.run(target=self.__readCommissioningLogs, args=(120,))\n            return True\n        else:\n            return False", "docstring": "scan Joiner\n\nArgs:\nxEUI: Joiner's EUI-64\nstrPSKd: Joiner's PSKd for commissioning\n\nReturns:\nTrue: successful to add Joiner's steering data\nFalse: fail to add Joiner's steering data", "source": "juraj-google-style"}
{"code": "def _count_op_with_name_and_attribute(self, nodes: Iterable[node_def_pb2.NodeDef], op_name: str, attr_name: str, attr_val: _AttrValType, get_op_name: bool=False) -> int:\n    if get_op_name:\n        return len([node.attr.get(attr_name) == attr_val for node in nodes if node.name == op_name])\n    else:\n        return len([node.attr.get(attr_name) == attr_val for node in nodes if node.op == op_name])", "docstring": "Determine the number of nodes whose operation name matches `op_name`.\n\nIf `attr_name` is given, additionally check if the `attr_val` matches with\nthe attribute value of the op.\n\nArgs:\nnodes: Iterable of NodeDefs.\nop_name: Name of the op to match.\nattr_name: Name of the attribute of the op to match.\nattr_val: Value of the attr_name to check.\nget_op_name: If set True, checks node.name rather than node.op.\n\nReturns:\nThe number of occurrences of nodes whose name match `op_name` and\n'attr_val' if 'attr_name' is given.", "source": "github-repos"}
{"code": "def timestamp_d_b_Y_H_M_S(value):\n    \n    d, b, Y, t, Z = value.split()\n    H, M, S = t.split(\":\")\n    return int(calendar.timegm((\n        int(Y), _months[b.lower()], int(d), int(H), int(M), int(S), 0, 0, 0\n    )))", "docstring": "Convert timestamp string to time in seconds since epoch.\n\nTimestamps strings like '18 Jun 2013 12:00:00 GMT' are able to be converted\nby this function.\n\nArgs:\nvalue: A timestamp string in the format '%d %b %Y %H:%M:%S GMT'.\n\nReturns:\nThe time in seconds since epoch as an integer.\n\nRaises:\nValueError: If timestamp is invalid.\nKeyError: If the abbrieviated month is invalid.\n\nNote: The timezone is ignored it is simply assumed to be UTC/GMT.", "source": "juraj-google-style"}
{"code": "def _get_control_flow_context(self):\n    return self._control_flow_context", "docstring": "Returns the control flow context of this op.\n\nReturns:\nA context object.", "source": "github-repos"}
{"code": "def run(self, args):\n        \n        kwargs = {}\n        kwargs['path'] = args.file[0]\n        kwargs['addr'] = args.addr\n        kwargs['on_progress'] = pylink.util.flash_progress_callback\n\n        jlink = self.create_jlink(args)\n        _ = jlink.flash_file(**kwargs)\n        print('Flashed device successfully.')", "docstring": "Flashes the device connected to the J-Link.\n\nArgs:\nself (FlashCommand): the ``FlashCommand`` instance\nargs (Namespace): the arguments passed on the command-line\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def inputs(num_devices, dataset_name, data_dir=None, input_name=None, num_chunks=0, append_targets=False):\n    assert data_dir, 'Must provide a data directory'\n    data_dir = os.path.expanduser(data_dir)\n    (train_batches, train_eval_batches, eval_batches, input_name, input_shape) = _train_and_eval_batches(dataset_name, data_dir, input_name, num_devices)\n\n    def numpy_stream(dataset):\n        return dataset_to_stream(dataset, input_name, num_chunks=num_chunks, append_targets=append_targets)\n    if (num_chunks > 0):\n        length = input_shape[0]\n        input_shape = tuple(([tuple(([(length \n    return Inputs(train_stream=(lambda : numpy_stream(train_batches)), train_eval_stream=(lambda : numpy_stream(train_eval_batches)), eval_stream=(lambda : numpy_stream(eval_batches)), input_shape=input_shape)", "docstring": "Make Inputs for built-in datasets.\n\nArgs:\nnum_devices: how many devices to build the inputs for.\ndataset_name: a TFDS or T2T dataset name. If it's a T2T dataset name, prefix\nwith \"t2t_\".\ndata_dir: data directory.\ninput_name: optional, name of the inputs from the dictionary.\nnum_chunks: optional, into how many pieces should we chunk (large inputs).\nappend_targets: optional, instead of inputs return a pair (inputs, targets)\nwhich is useful for autoregressive models.\n\nReturns:\ntrax.inputs.Inputs", "source": "codesearchnet"}
{"code": "def is_valid_package_name(name, raise_error=False):\n    \n    is_valid = PACKAGE_NAME_REGEX.match(name)\n    if raise_error and not is_valid:\n        raise PackageRequestError(\"Not a valid package name: %r\" % name)\n    return is_valid", "docstring": "Test the validity of a package name string.\n\nArgs:\nname (str): Name to test.\nraise_error (bool): If True, raise an exception on failure\n\nReturns:\nbool.", "source": "juraj-google-style"}
{"code": "def fulltypes_for_flat_tensors(element_spec):\n    specs = _specs_for_flat_tensors(element_spec)\n    full_types_lists = [_translate_to_fulltype_for_flat_tensors(s) for s in specs]\n    rval = nest.flatten(full_types_lists)\n    return rval", "docstring": "Convert the element_spec for a dataset to a list of FullType Def.\n\nNote that \"flat\" in this function and in `_flat_tensor_specs` is a nickname\nfor the \"batchable tensor list\" encoding used by datasets and map_fn.\nThe FullTypeDef created corresponds to this encoding (e.g. that uses variants\nand not the FullTypeDef corresponding to the default \"component\" encoding).\n\nThis is intended for temporary internal use and expected to be removed\nwhen type inference support is sufficient. See limitations of\n`_translate_to_fulltype_for_flat_tensors`.\n\nArgs:\nelement_spec: A nest of TypeSpec describing the elements of a dataset (or\nmap_fn).\n\nReturns:\nA list of FullTypeDef corresponding to ELEMENT_SPEC. The items\nin this list correspond to the items in `_flat_tensor_specs`.", "source": "github-repos"}
{"code": "def set_smartplug_state(self, device_label, state):\n        \n        response = None\n        try:\n            response = requests.post(\n                urls.smartplug(self._giid),\n                headers={\n                    'Content-Type': 'application/json',\n                    'Cookie': 'vid={}'.format(self._vid)},\n                data=json.dumps([{\n                    \"deviceLabel\": device_label,\n                    \"state\": state}]))\n        except requests.exceptions.RequestException as ex:\n            raise RequestError(ex)\n        _validate_response(response)", "docstring": "Turn on or off smartplug\n\nArgs:\ndevice_label (str): Smartplug device label\nstate (boolean): new status, 'True' or 'False'", "source": "juraj-google-style"}
{"code": "def __init__(self,\n                 html_id=None,\n                 name=None,\n                 content=None,\n                 template=None,\n                 classes=None,\n                 **kwargs):\n        \n        if html_id is not None:\n            try:\n                self.html_id = html_id\n            except AttributeError:\n                self._html_id = html_id\n        if name is not None:\n            try:\n                self.name = name\n            except AttributeError:\n                self._name = name\n        if content is not None:\n            try:\n                self.content = content\n            except AttributeError:\n                self._content = content\n        if template is not None:\n            try:\n                self.template = template\n            except AttributeError:\n                self._template = template\n        if classes is not None:\n            try:\n                self.classes = classes\n            except AttributeError:\n                self._classes = classes\n\n        if not hasattr(self, 'template'):\n            raise AttributeError('template is a required widget attribute')\n\n        for kw, arg in kwargs.items():\n            setattr(self, kw, arg)", "docstring": "Init method.\n\nArgs:\nhtml_id (str): an ID to set on the HTML item.\nname (str): the name of the item, displayed in HTML.\ncontent (): suitable content according to chosen display.\ntemplate (str): the template responsible for display.\nclasses (str): additional classes to pass to the HTML item.", "source": "juraj-google-style"}
{"code": "def expect_output(self, pattern, timeout=(- 1)):\n    logger.debug(\"Expecting output '{0}' from '{1}'\".format(pattern, self.name))\n    try:\n        return self._spawn.expect(pattern, timeout)\n    except pexpect.exceptions.EOF as e:\n        logger.debug('Raising termination exception.')\n        raise TerminationException(instance=self, real_exception=e, output=self.get_output())\n    except pexpect.exceptions.TIMEOUT as e:\n        logger.debug('Raising timeout exception.')\n        raise TimeoutException(instance=self, real_exception=e, output=self.get_output())\n    except Exception as e:\n        logger.debug(('Expecting output failed: ' + str(e)))\n        raise NestedException(instance=self, real_exception=e, output=self.get_output())", "docstring": "Wait until the running program performs some given output, or terminates.\n\nArgs:\npattern:  The pattern the output should be checked for.\ntimeout (int):  How many seconds should be waited for the output.\n\nThe pattern argument may be a string, a compiled regular expression,\nor a list of any of those types. Strings will be compiled into regular expressions.\n\nReturns:\nint: The index into the pattern list. If the pattern was not a list, it returns 0 on a successful match.\n\nRaises:\nTimeoutException: The output did not match within the given time frame.\nTerminationException: The program terminated before producing the output.\nNestedException: An internal problem occured while waiting for the output.", "source": "codesearchnet"}
{"code": "def ReadFromFile(self, artifacts_reader, filename):\n    for artifact_definition in artifacts_reader.ReadFile(filename):\n        self.RegisterDefinition(artifact_definition)", "docstring": "Reads artifact definitions into the registry from a file.\n\nArgs:\nartifacts_reader (ArtifactsReader): an artifacts reader.\nfilename (str): name of the file to read from.", "source": "codesearchnet"}
{"code": "def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n    size = get_size_dict(size, default_to_square=True, param_name='size')\n    if 'height' not in size or 'width' not in size:\n        raise ValueError(f'The `size` argument must contain `height` and `width` keys. Got {size.keys()}')\n    return resize(image, size=(size['height'], size['width']), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Resize an image to (size[\"height\"], size[\"width\"]).\n\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nsize (`Dict[str, int]`):\nSize of the output image.\nresample (`PILImageResampling`, *optional*, defaults to `PIL.Image.BICUBIC`):\nResampling filter to use when resiizing the image.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def valid_config_exists(config_path=CONFIG_PATH):\n    if os.path.isfile(config_path):\n        try:\n            config = read_config(config_path)\n            check_config(config)\n        except (ConfigurationError, IOError):\n            return False\n    else:\n        return False\n    return True", "docstring": "Verify that a valid config file exists.\n\nArgs:\nconfig_path (str): Path to the config file.\n\nReturns:\nboolean: True if there is a valid config file, false if not.", "source": "codesearchnet"}
{"code": "def interactive_update_stack(self, fqn, template, old_parameters, parameters, stack_policy, tags, **kwargs):\n    logger.debug('Using interactive provider mode for %s.', fqn)\n    (changes, change_set_id) = create_change_set(self.cloudformation, fqn, template, parameters, tags, 'UPDATE', service_role=self.service_role, **kwargs)\n    old_parameters_as_dict = self.params_as_dict(old_parameters)\n    new_parameters_as_dict = self.params_as_dict([(x if ('ParameterValue' in x) else {'ParameterKey': x['ParameterKey'], 'ParameterValue': old_parameters_as_dict[x['ParameterKey']]}) for x in parameters])\n    params_diff = diff_parameters(old_parameters_as_dict, new_parameters_as_dict)\n    action = ('replacements' if self.replacements_only else 'changes')\n    full_changeset = changes\n    if self.replacements_only:\n        changes = requires_replacement(changes)\n    if (changes or params_diff):\n        ui.lock()\n        try:\n            output_summary(fqn, action, changes, params_diff, replacements_only=self.replacements_only)\n            ask_for_approval(full_changeset=full_changeset, params_diff=params_diff, include_verbose=True)\n        finally:\n            ui.unlock()\n    self.deal_with_changeset_stack_policy(fqn, stack_policy)\n    self.cloudformation.execute_change_set(ChangeSetName=change_set_id)", "docstring": "Update a Cloudformation stack in interactive mode.\n\nArgs:\nfqn (str): The fully qualified name of the Cloudformation stack.\ntemplate (:class:`stacker.providers.base.Template`): A Template\nobject to use when updating the stack.\nold_parameters (list): A list of dictionaries that defines the\nparameter list on the existing Cloudformation stack.\nparameters (list): A list of dictionaries that defines the\nparameter list to be applied to the Cloudformation stack.\nstack_policy (:class:`stacker.providers.base.Template`): A template\nobject representing a stack policy.\ntags (list): A list of dictionaries that defines the tags\nthat should be applied to the Cloudformation stack.", "source": "codesearchnet"}
{"code": "def blend_rgba(self, image: ImageInput) -> ImageInput:\n    if not isinstance(image, PIL.Image.Image):\n        return image\n    elif image.mode == 'RGB':\n        return image\n    img_rgba = np.array(image.convert('RGBA'))\n    if not (img_rgba[:, :, 3] < 255).any():\n        return image.convert('RGB')\n    alpha = img_rgba[:, :, 3] / 255.0\n    img_rgb = (1 - alpha[:, :, np.newaxis]) * 255 + alpha[:, :, np.newaxis] * img_rgba[:, :, :3]\n    return PIL.Image.fromarray(img_rgb.astype('uint8'), 'RGB')", "docstring": "Convert image to RGB by blending the transparency layer if it's in RGBA format.\nIf image is not `PIL.Image`, it si simply returned without modifications.\n\nArgs:\nimage (`ImageInput`):\nImage to convert.", "source": "github-repos"}
{"code": "def find_all(self, model_class, params={}):\n        \n        url = '{host}/{namespace}/{model}{params}'.format(\n            host=self._host,\n            namespace=self._namespace,\n            model=self._translate_name(model_class.__name__),\n            params=self._build_param_string(params)\n        )\n        data = self._get_json(url)['data']\n        fresh_models = []\n        for item in data:\n            fresh_model = model_class(item['attributes'])\n            fresh_model.id = item['id']\n            fresh_model.validate()\n            fresh_models.append(fresh_model)\n            if self._cache is not None:\n                self._cache.set_record(model_class.__name__, fresh_model.id, fresh_model)\n        return fresh_models", "docstring": "Return an list of models from the API and caches the result.\n\nArgs:\nmodel_class (:class:`cinder_data.model.CinderModel`): A subclass of\n:class:`cinder_data.model.CinderModel` of your chosen model.\nparams (dict, optional): Description\n\nReturns:\nlist: A list of instances of you model_class or and empty list.", "source": "juraj-google-style"}
{"code": "def reset(self, state):\n    state = _convert_to_state_tensor(state)\n    state.shape.assert_is_compatible_with([_get_state_size(self.algorithm)])\n    self._state_var.assign(state)", "docstring": "Resets the generator by a new state.\n\nSee `__init__` for the meaning of \"state\".\n\nArgs:\nstate: the new state.", "source": "github-repos"}
{"code": "def pb_for_delete(document_path, option):\n    \n    write_pb = write_pb2.Write(delete=document_path)\n    if option is not None:\n        option.modify_write(write_pb)\n\n    return write_pb", "docstring": "Make a ``Write`` protobuf for ``delete()`` methods.\n\nArgs:\ndocument_path (str): A fully-qualified document path.\noption (optional[~.firestore_v1beta1.client.WriteOption]): A\nwrite option to make assertions / preconditions on the server\nstate of the document before applying changes.\n\nReturns:\ngoogle.cloud.firestore_v1beta1.types.Write: A\n``Write`` protobuf instance for the ``delete()``.", "source": "juraj-google-style"}
{"code": "def _ParseNoHeaderSingleLine(self, parser_mediator, structure):\n    if (not self._last_event_data):\n        logger.debug('SkyDrive, found isolated line with no previous events')\n        return\n    event_data = SkyDriveOldLogEventData()\n    event_data.offset = self._last_event_data.offset\n    event_data.text = structure.text\n    event = time_events.DateTimeValuesEvent(self._last_date_time, definitions.TIME_DESCRIPTION_ADDED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n    self._last_date_time = None\n    self._last_event_data = None", "docstring": "Parse an isolated header line and store appropriate attributes.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.", "source": "codesearchnet"}
{"code": "def __init__(self, page_iterator):\n        \n        self._page_iterator = page_iterator\n        self._current = None\n        self._index = -1", "docstring": "Constructor.\n\nArgs:\npage_iterator (PageIterator): the base iterator of getting pages.", "source": "juraj-google-style"}
{"code": "def ask_question(self, field_name, pattern=NAME_PATTERN, is_required=False, password=False):\n    input_value = ''\n    question = 'Insert the field using the pattern below:\\n{}\\n{}: '.format(pattern[0], field_name)\n    while (not input_value):\n        input_value = (getpass(question) if password else input(question))\n        if (not (input_value or is_required)):\n            break\n        if password:\n            confirm_password = getpass('Confirm your password: ')\n            if (confirm_password != input_value):\n                print('Password does not match')\n                input_value = ''\n        if (not self.valid_attribute(input_value, pattern[1])):\n            error_message = 'The content must fit the pattern: {}\\n'\n            print(error_message.format(pattern[0]))\n            input_value = ''\n    return input_value", "docstring": "Ask a question and get the input values.\n\nThis method will validade the input values.\nArgs:\nfield_name(string): Field name used to ask for input value.\npattern(tuple): Pattern to validate the input value.\nis_required(bool): Boolean value if the input value is required.\npassword(bool): Boolean value to get input password with mask.\nReturns:\ninput_value(string): Input value validated.", "source": "codesearchnet"}
{"code": "def new_message_from_message_type(message_type):\n    message_type = str(message_type)\n    if (message_type not in MESSAGE_TYPES):\n        raise ValueError('\"{}\" is not known.'.format(message_type))\n    message_class = MESSAGE_TYPES.get(message_type)\n    message_instance = message_class()\n    return message_instance", "docstring": "Given an OpenFlow Message Type, return an empty message of that type.\n\nArgs:\nmessageType (:class:`~pyof.v0x01.common.header.Type`):\nPython-openflow message.\n\nReturns:\nEmpty OpenFlow message of the requested message type.\n\nRaises:\nKytosUndefinedMessageType: Unkown Message_Type.", "source": "codesearchnet"}
{"code": "def _add_results(self, results, trial_id):\n    for result in results:\n        self.logger.debug(('Appending result: %s' % result))\n        result['trial_id'] = trial_id\n        result_record = ResultRecord.from_json(result)\n        result_record.save()", "docstring": "Add a list of results into db.\n\nArgs:\nresults (list): A list of json results.\ntrial_id (str): Id of the trial.", "source": "codesearchnet"}
{"code": "def on_predict_end(self, logs=None):", "docstring": "Called at the end of prediction.\n\nSubclasses should override for any actions to run.\n\nArgs:\nlogs: Dict. Currently no data is passed to this argument for this\nmethod but that may change in the future.", "source": "github-repos"}
{"code": "def pdf_to_text(pdf_filepath='', **kwargs):\n    result = []\n    try:\n        if (not os.path.exists(pdf_filepath)):\n            raise ValueError('No valid pdf filepath introduced..')\n        kwargs['outfp'] = kwargs.get('outfp', StringIO())\n        kwargs['laparams'] = kwargs.get('laparams', pdfminer.layout.LAParams())\n        kwargs['imagewriter'] = kwargs.get('imagewriter', None)\n        kwargs['output_type'] = kwargs.get('output_type', 'text')\n        kwargs['codec'] = kwargs.get('codec', 'utf-8')\n        kwargs['disable_caching'] = kwargs.get('disable_caching', False)\n        with open(pdf_filepath, 'rb') as f_pdf:\n            pdfminer.high_level.extract_text_to_fp(f_pdf, **kwargs)\n        result = kwargs.get('outfp').getvalue()\n    except Exception:\n        logger.error('fail pdf to text parsing')\n    return result", "docstring": "Parse pdf to a list of strings using the pdfminer lib.\n\nArgs:\nno_laparams=False,\nall_texts=None,\ndetect_vertical=None, word_margin=None, char_margin=None,\nline_margin=None, boxes_flow=None, codec='utf-8',\nstrip_control=False, maxpages=0, page_numbers=None, password=\"\",\nscale=1.0, rotation=0, layoutmode='normal', debug=False,\ndisable_caching=False,", "source": "codesearchnet"}
{"code": "def path_set_md5(url):\n    \n    scheme, netloc, path, query_string, fragment = urlsplit(url)\n    path += '.md5'\n\n    return urlunsplit((scheme, netloc, path, query_string, fragment))", "docstring": "Given a file URL, return a md5 query of the file\n\nArgs:\nurl: a given URL\nReturns:\nURL of the md5 file", "source": "juraj-google-style"}
{"code": "def singularize(plural):\n    if (plural in UNCOUNTABLES):\n        return plural\n    for i in IRREGULAR:\n        if (i[1] == plural):\n            return i[0]\n    for i in SINGULARIZE_PATTERNS:\n        if re.search(i[0], plural):\n            return re.sub(i[0], i[1], plural)\n    return plural", "docstring": "Convert plural word to its singular form.\n\nArgs:\nplural: A word in its plural form.\nReturns:\nThe word in its singular form.", "source": "codesearchnet"}
{"code": "def print_results(results):\n    \n    if not isinstance(results, list):\n        results = [results]\n\n    for r in results:\n        try:\n            r.log()\n        except AttributeError:\n            raise ValueError('Argument to print_results() must be a list of '\n                             'FileValidationResults or ObjectValidationResults.')", "docstring": "Print `results` (the results of validation) to stdout.\n\nArgs:\nresults: A list of FileValidationResults or ObjectValidationResults\ninstances.", "source": "juraj-google-style"}
{"code": "def maybe_merge_call(fn, strategy, *args, **kwargs):\n    if strategy_supports_no_merge_call():\n        return fn(strategy, *args, **kwargs)\n    else:\n        return distribute_lib.get_replica_context().merge_call(fn, args=args, kwargs=kwargs)", "docstring": "Maybe invoke `fn` via `merge_call` which may or may not be fulfilled.\n\nThe caller of this utility function requests to invoke `fn` via `merge_call`\nat `tf.distribute.Strategy`'s best efforts. It is `tf.distribute`'s internal\nwhether the request is honored, depending on the `Strategy`. See\n`tf.distribute.ReplicaContext.merge_call()` for more information.\n\nThis is an interim API which is subject to removal and does not guarantee\nbackward-compatibility.\n\nArgs:\nfn: the function to be invoked.\nstrategy: the `tf.distribute.Strategy` to call `fn` with.\n*args: the positional arguments to be passed in to `fn`.\n**kwargs: the keyword arguments to be passed in to `fn`.\n\nReturns:\nThe return value of the `fn` call.", "source": "github-repos"}
{"code": "def update(self, value):\n    \n    with tf.name_scope(self._name + '/update'):\n      if value.shape.ndims == self._mean.shape.ndims:\n        \n        value = value[None, ...]\n      count = tf.shape(value)[0]\n      with tf.control_dependencies([self._count.assign_add(count)]):\n        step = tf.cast(self._count, tf.float32)\n        mean_delta = tf.reduce_sum(value - self._mean[None, ...], 0)\n        new_mean = self._mean + mean_delta / step\n        new_mean = tf.cond(self._count > 1, lambda: new_mean, lambda: value[0])\n        var_delta = (\n            value - self._mean[None, ...]) * (value - new_mean[None, ...])\n        new_var_sum = self._var_sum + tf.reduce_sum(var_delta, 0)\n      with tf.control_dependencies([new_mean, new_var_sum]):\n        update = self._mean.assign(new_mean), self._var_sum.assign(new_var_sum)\n      with tf.control_dependencies(update):\n        if value.shape.ndims == 1:\n          value = tf.reduce_mean(value)\n        return self._summary('value', tf.reduce_mean(value))", "docstring": "Update the mean and variance estimates.\n\nArgs:\nvalue: Batch or single value tensor.\n\nReturns:\nSummary tensor.", "source": "juraj-google-style"}
{"code": "def add_state(self, name: str, state: State, initial: bool = False):\n        \n        if not issubclass(state.__class__, State):\n            raise AttributeError(\"state must be subclass of spade.behaviour.State\")\n        self._states[name] = state\n        if initial:\n            self.current_state = name", "docstring": "Adds a new state to the FSM.\n\nArgs:\nname (str): the name of the state, which is used as its identifier.\nstate (spade.behaviour.State): The state class\ninitial (bool, optional): wether the state is the initial state or not. (Only one initial state is allowed) (Default value = False)", "source": "juraj-google-style"}
{"code": "def tv_credits(self, **kwargs):\n        \n        path = self._get_id_path('tv_credits')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the TV credits for a specific person id.\n\nArgs:\nlanguage: (optional) ISO 639-1 code.\nappend_to_response: (optional) Comma separated, any person method.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def connect(self, chip_name, speed='auto', verbose=False):\n    if verbose:\n        self.exec_command('EnableRemarks = 1')\n    self.exec_command(('Device = %s' % chip_name))\n    if (speed == 'auto'):\n        self.set_speed(auto=True)\n    elif (speed == 'adaptive'):\n        self.set_speed(adaptive=True)\n    else:\n        self.set_speed(speed)\n    result = self._dll.JLINKARM_Connect()\n    if (result < 0):\n        raise errors.JLinkException(result)\n    try:\n        self.halted()\n    except errors.JLinkException:\n        pass\n    for index in range(self.num_supported_devices()):\n        device = self.supported_device(index)\n        if (device.name.lower() == chip_name.lower()):\n            self._device = device\n            break\n    else:\n        raise errors.JLinkException('Unsupported device was connected to.')\n    return None", "docstring": "Connects the J-Link to its target.\n\nArgs:\nself (JLink): the ``JLink`` instance\nchip_name (str): target chip name\nspeed (int): connection speed, one of ``{5-12000, 'auto', 'adaptive'}``\nverbose (bool): boolean indicating if connection should be verbose in logging\n\nReturns:\n``None``\n\nRaises:\nJLinkException: if connection fails to establish.\nTypeError: if given speed is invalid", "source": "codesearchnet"}
{"code": "def _location_infos_equal(left, right):\n    \n    if not isinstance(left, LocationInfo) or not isinstance(right, LocationInfo):\n        raise AssertionError(\n            u'Unsupported LocationInfo comparison between types {} and {} '\n            u'with values {}, {}'.format(type(left), type(right), left, right))\n    optional_scopes_depth_equal = (left.optional_scopes_depth == right.optional_scopes_depth)\n\n    parent_query_paths_equal = (\n        (left.parent_location is None and right.parent_location is None) or\n        (left.parent_location.query_path == right.parent_location.query_path))\n\n    recursive_scopes_depths_equal = (left.recursive_scopes_depth == right.recursive_scopes_depth)\n\n    types_equal = left.type == right.type\n\n    return all([\n        optional_scopes_depth_equal,\n        parent_query_paths_equal,\n        recursive_scopes_depths_equal,\n        types_equal,\n    ])", "docstring": "Return True if LocationInfo objects are equivalent for the SQL backend, False otherwise.\n\nLocationInfo objects are considered equal for the SQL backend iff the optional scopes depth,\nrecursive scopes depth, types and parent query paths are equal.\n\nArgs:\nleft: LocationInfo, left location info object to compare.\nright: LocationInfo, right location info object to compare.\n\nReturns:\nbool, True if LocationInfo objects equivalent, False otherwise.", "source": "juraj-google-style"}
{"code": "def task_pivot(self, task_resource):\n    resource = self.copy()\n    resource._request_uri = '{}/{}'.format(task_resource.request_uri, resource._request_uri)\n    return resource", "docstring": "Pivot point on Tasks for this resource.\n\nThis method will return all *resources* (group, indicators, victims,\netc) for this resource that are associated with the provided task id.\n\n**Example Endpoints URI's**\n\n+--------------+-------------------------------------------------------------+\n| HTTP Method  | API Endpoint URI's                                          |\n+==============+=============================================================+\n| GET          | /v2/tasks/{resourceId}/groups/{resourceType}                |\n+--------------+-------------------------------------------------------------+\n| GET          | /v2/tasks/{resourceId}/groups/{resourceType}/{uniqueId}     |\n+--------------+-------------------------------------------------------------+\n| GET          | /v2/tasks/{resourceId}/indicators/{resourceType}            |\n+--------------+-------------------------------------------------------------+\n| GET          | /v2/tasks/{resourceId}/indicators/{resourceType}/{uniqueId} |\n+--------------+-------------------------------------------------------------+\n\nArgs:\nresource_id (integer): The resource pivot id (task id).", "source": "codesearchnet"}
{"code": "def forbidden(cls, errors=None):\n    if cls.expose_status:\n        cls.response.content_type = 'application/json'\n        cls.response._status_line = '403 Forbidden'\n    return cls(403, errors=errors).to_json", "docstring": "Shortcut API for HTTP 403 `Forbidden` response.\n\nArgs:\nerrors (list): Response key/value data.\n\nReturns:\nWSResponse Instance.", "source": "codesearchnet"}
{"code": "def heartbeat(queue_name, task_id, owner, message, index):\n    task = _get_task_with_policy(queue_name, task_id, owner)\n    if (task.heartbeat_number > index):\n        return False\n    task.heartbeat = message\n    task.heartbeat_number = index\n    now = datetime.datetime.utcnow()\n    timeout_delta = (task.eta - task.last_lease)\n    task.eta = (now + timeout_delta)\n    task.last_lease = now\n    db.session.add(task)\n    signals.task_updated.send(app, task=task)\n    return True", "docstring": "Sets the heartbeat status of the task and extends its lease.\n\nThe task's lease is extended by the same amount as its last lease to\nensure that any operations following the heartbeat will still hold the\nlock for the original lock period.\n\nArgs:\nqueue_name: Name of the queue the work item is on.\ntask_id: ID of the task that is finished.\nowner: Who or what has the current lease on the task.\nmessage: Message to report as the task's current status.\nindex: Number of this message in the sequence of messages from the\ncurrent task owner, starting at zero. This lets the API receive\nheartbeats out of order, yet ensure that the most recent message\nis actually saved to the database. This requires the owner issuing\nheartbeat messages to issue heartbeat indexes sequentially.\n\nReturns:\nTrue if the heartbeat message was set, False if it is lower than the\ncurrent heartbeat index.\n\nRaises:\nTaskDoesNotExistError if the task does not exist.\nLeaseExpiredError if the lease is no longer active.\nNotOwnerError if the specified owner no longer owns the task.", "source": "codesearchnet"}
{"code": "def url_is_project(url, default='not_a_func'):\n    try:\n        u = resolve(url)\n        if (u and (u.func != default)):\n            return True\n    except Resolver404:\n        static_url = settings.STATIC_URL\n        static_url_wd = static_url.lstrip('/')\n        if url.startswith(static_url):\n            url = url[len(static_url):]\n        elif url.startswith(static_url_wd):\n            url = url[len(static_url_wd):]\n        else:\n            return False\n        if finders.find(url):\n            return True\n    return False", "docstring": "Check if URL is part of the current project's URLs.\n\nArgs:\nurl (str): URL to check.\ndefault (callable): used to filter out some URLs attached to function.\n\nReturns:", "source": "codesearchnet"}
{"code": "def _update_bird_conf_file(self, operation):\n    conf_updated = False\n    prefixes = []\n    ip_version = operation.ip_version\n    config_file = self.bird_configuration[ip_version]['config_file']\n    variable_name = self.bird_configuration[ip_version]['variable_name']\n    changes_counter = self.bird_configuration[ip_version]['changes_counter']\n    dummy_ip_prefix = self.bird_configuration[ip_version]['dummy_ip_prefix']\n    try:\n        prefixes = get_ip_prefixes_from_bird(config_file)\n    except OSError as error:\n        self.log.error('failed to open Bird configuration %s, this is a FATAL error, thus exiting main program', error)\n        sys.exit(1)\n    if (not prefixes):\n        self.log.error('found empty bird configuration %s, this is a FATAL error, thus exiting main program', config_file)\n        sys.exit(1)\n    if (dummy_ip_prefix not in prefixes):\n        self.log.warning(\"dummy IP prefix %s wasn't found in bird configuration, adding it. This shouldn't have happened!\", dummy_ip_prefix)\n        prefixes.insert(0, dummy_ip_prefix)\n        conf_updated = True\n    ip_prefixes_without_check = set(prefixes).difference(self.ip_prefixes[ip_version])\n    if ip_prefixes_without_check:\n        self.log.warning(\"found %s IP prefixes in Bird configuration but we aren't configured to run health checks on them. Either someone modified the configuration manually or something went horrible wrong. We remove them from Bird configuration\", ','.join(ip_prefixes_without_check))\n        prefixes[:] = (ip for ip in prefixes if (ip not in ip_prefixes_without_check))\n        conf_updated = True\n    if operation.update(prefixes):\n        conf_updated = True\n    if (not conf_updated):\n        self.log.info('no updates for bird configuration')\n        return conf_updated\n    if self.bird_configuration[ip_version]['keep_changes']:\n        archive_bird_conf(config_file, changes_counter)\n    tempname = write_temp_bird_conf(dummy_ip_prefix, config_file, variable_name, prefixes)\n    try:\n        os.rename(tempname, config_file)\n    except OSError as error:\n        self.log.critical('failed to create Bird configuration %s, this is a FATAL error, thus exiting main program', error)\n        sys.exit(1)\n    else:\n        self.log.info('Bird configuration for IPv%s is updated', ip_version)\n    if (len(prefixes) == 1):\n        self.log.warning(\"Bird configuration doesn't have IP prefixes for any of the services we monitor! It means local node doesn't receive any traffic\")\n    return conf_updated", "docstring": "Update BIRD configuration.\n\nIt adds to or removes IP prefix from BIRD configuration. It also\nupdates generation time stamp in the configuration file.\n\nMain program will exit if configuration file cant be read/written.\n\nArguments:\noperation (obj): Either an AddOperation or DeleteOperation object\n\nReturns:\nTrue if BIRD configuration was updated otherwise False.", "source": "codesearchnet"}
{"code": "def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n    with open(json_file_path, 'w', encoding='utf-8') as writer:\n        config_dict = self.to_dict()\n        json_string = json.dumps(config_dict, indent=2, sort_keys=True) + '\\n'\n        writer.write(json_string)", "docstring": "Save this instance to a JSON file.\n\nArgs:\njson_file_path (`str` or `os.PathLike`):\nPath to the JSON file in which this configuration instance's parameters will be saved.\nuse_diff (`bool`, *optional*, defaults to `True`):\nIf set to `True`, only the difference between the config instance and the default\n`QuantizationConfig()` is serialized to JSON file.", "source": "github-repos"}
{"code": "def drop_incomplete_days(dataframe, shift=0):\n    dropped = 0\n    if ((shift > 23) or (shift < 0)):\n        print('Invalid shift parameter setting! Using defaults.')\n        shift = 0\n    first = shift\n    last = (first - 1)\n    if (last < 0):\n        last += 24\n    try:\n        n = len(dataframe.index)\n    except:\n        print('Error: Invalid dataframe.')\n        return dataframe\n    delete = list()\n    for i in range(0, n):\n        if ((dataframe.index.hour[i] == first) and (dataframe.index.minute[i] == 0)):\n            break\n        else:\n            delete.append(i)\n            dropped += 1\n    for i in range((n - 1), 0, (- 1)):\n        if ((dataframe.index.hour[i] == last) and (dataframe.index.minute[i] == 0)):\n            break\n        else:\n            delete.append(i)\n            dropped += 1\n    return dataframe.drop(dataframe.index[[delete]])", "docstring": "truncates a given dataframe to full days only\n\nThis funtion truncates a given pandas dataframe (time series) to full days\nonly, thus dropping leading and tailing hours of incomplete days. Please\nnote that this methodology only applies to hourly time series.\n\nArgs:\ndataframe: A pandas dataframe object with index defined as datetime\nshift (unsigned int, opt): First hour of daily recordings. For daily\nrecordings of precipitation gages, 8 would be the first hour of\nthe subsequent day of recordings since daily totals are\nusually recorded at 7. Omit defining this parameter if you intend\nto pertain recordings to 0-23h.\n\nReturns:\nA dataframe with full days only.", "source": "codesearchnet"}
{"code": "def _reverse_transform_column(self, table, metadata, table_name):\n    column_name = metadata['name']\n    if (column_name not in table):\n        return\n    null_name = ('?' + column_name)\n    content = pd.DataFrame(columns=[column_name], index=table.index)\n    transformer = self.transformers[(table_name, column_name)]\n    content[column_name] = transformer.reverse_transform(table[column_name].to_frame())\n    if (self.missing and (null_name in table[column_name])):\n        content[null_name] = table.pop(null_name)\n        null_transformer = transformers.NullTransformer(metadata)\n        content[column_name] = null_transformer.reverse_transform(content)\n    return content", "docstring": "Reverses the transformtion on a column from table using the given parameters.\n\nArgs:\ntable (pandas.DataFrame): Dataframe containing column to transform.\nmetadata (dict): Metadata for given column.\ntable_name (str): Name of table in original dataset.\n\nReturns:\npandas.DataFrame: Dataframe containing the transformed column. If self.missing=True,\nit will contain a second column containing 0 and 1 marking if that\nvalue was originally null or not.\nIt will return None in the case the column is not in the table.", "source": "codesearchnet"}
{"code": "def write_merged_bioassembly(inpath, outdir, outname, force_rerun=False):\n    \n    outpath = outfile=op.join(outdir, outname + '.pdb')\n\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=op.join(outdir, outname + '.pdb')):\n        s = StructProp('Model merging', structure_path=inpath, file_type='pdb')\n        ss = s.parse_structure()\n        merge_all_models_into_first_model(ss.structure)\n        outpath = ss.write_pdb(custom_name=outname, out_dir=outdir, force_rerun=force_rerun)\n    else:\n        return outpath", "docstring": "Utility to take as input a bioassembly file and merge all its models into multiple chains in a single model.\n\nArgs:\ninfile (str): Path to input PDB file with multiple models that represent an oligomeric form of a structure.\noutdir (str): Path to output directory\noutname (str): New filename of structure file\nforce_rerun (bool): If a new PDB should be written if the file exists\n\nReturns:\nstr: Path to newly written PDB file.", "source": "juraj-google-style"}
{"code": "def _merge_type(t0: '_instance_base.SimpleValue', t1: '_instance_base.SimpleValue', name: str, cls: 'class_mixin.Class') -> '_instance_base.SimpleValue':\n    if t0 is None or isinstance(t0, _abstract.Unsolvable):\n        return t1\n    if t1 is None or isinstance(t1, _abstract.Unsolvable):\n        return t0\n    if t0 in t1.mro:\n        return t1\n    if t1 in t0.mro:\n        return t0\n    raise GenericTypeError(cls, f'Conflicting value for TypeVar {name}')", "docstring": "Merge two types.\n\nRules: Type `Any` can match any type, we will return the other type if one\nof them is `Any`. Return the sub-class if the types have inheritance\nrelationship.\n\nArgs:\nt0: The first type.\nt1: The second type.\nname: Type parameter name.\ncls: The class_mixin.Class on which any error should be reported.\n\nReturns:\nA type.\nRaises:\nGenericTypeError: if the types don't match.", "source": "github-repos"}
{"code": "def from_string(cls, table_id, default_project=None):\n    from google.cloud.bigquery.dataset import DatasetReference\n    (output_project_id, output_dataset_id, output_table_id) = _helpers._parse_3_part_id(table_id, default_project=default_project, property_name='table_id')\n    return cls(DatasetReference(output_project_id, output_dataset_id), output_table_id)", "docstring": "Construct a table reference from table ID string.\n\nArgs:\ntable_id (str):\nA table ID in standard SQL format. If ``default_project``\nis not specified, this must included a project ID, dataset\nID, and table ID, each separated by ``.``.\ndefault_project (str):\nOptional. The project ID to use when ``table_id`` does not\ninclude a project ID.\n\nReturns:\nTableReference: Table reference parsed from ``table_id``.\n\nExamples:\n>>> TableReference.from_string('my-project.mydataset.mytable')\nTableRef...(DatasetRef...('my-project', 'mydataset'), 'mytable')\n\nRaises:\nValueError:\nIf ``table_id`` is not a fully-qualified table ID in\nstandard SQL format.", "source": "codesearchnet"}
{"code": "def expire_key(self, key):\n        \n        value = self.base_dict[key]\n        del self[key]\n        if self.callback is not None:\n            self.callback(\n                key, value, *self.callback_args, **self.callback_kwargs)", "docstring": "Expire the key, delete the value, and call the callback function\nif one is specified.\n\nArgs:\nkey: The ``TimedDict`` key", "source": "juraj-google-style"}
{"code": "def from_file(cls, filename):\n        \n        yaml = YAML(typ=\"safe\")\n        with open(filename, \"r\") as f:\n            d = yaml.load(f)\n        return cls.from_dict(d)", "docstring": "Constructor that reads in a file in YAML format.\n\nArgs:\nfilename (str): Filename.", "source": "juraj-google-style"}
{"code": "def windows_from_blocksize(self, blocksize_xy=512):\n    meta = self._get_template_for_given_resolution(self.dst_res, 'meta')\n    width = meta['width']\n    height = meta['height']\n    blocksize_wins = windows_from_blocksize(blocksize_xy, width, height)\n    self.windows = np.array([win[1] for win in blocksize_wins])\n    self.windows_row = np.array([win[0][0] for win in blocksize_wins])\n    self.windows_col = np.array([win[0][1] for win in blocksize_wins])\n    return self", "docstring": "Create rasterio.windows.Window instances with given size which fully cover the raster.\n\nArguments:\nblocksize_xy {int or list of two int} -- Size of the window. If one integer is given it defines\nthe width and height of the window. If a list of two integers if given the first defines the\nwidth and the second the height.\n\nReturns:\nNone -- But the attributes ``windows``, ``windows_row`` and ``windows_col`` are updated.", "source": "codesearchnet"}
{"code": "def _wrap(text, columns=80):\n    \n    out = []\n    for cnt, char in enumerate(text):\n        out.append(char)\n\n        if (cnt + 1) % columns == 0:\n            out.append(\"\\n\")\n\n    return \"\".join(out)", "docstring": "Own \"dumb\" reimplementation of textwrap.wrap().\n\nThis is because calling .wrap() on bigger strings can take a LOT of\nprocessor power. And I mean like 8 seconds of 3GHz CPU just to wrap 20kB of\ntext without spaces.\n\nArgs:\ntext (str): Text to wrap.\ncolumns (int): Wrap after `columns` characters.\n\nReturns:\nstr: Wrapped text.", "source": "juraj-google-style"}
{"code": "def signCertAs(self, cert, signas):\n    cakey = self.getCaKey(signas)\n    if (cakey is None):\n        raise s_exc.NoCertKey(('Missing .key for %s' % signas))\n    cacert = self.getCaCert(signas)\n    if (cacert is None):\n        raise s_exc.NoCertKey(('Missing .crt for %s' % signas))\n    cert.set_issuer(cacert.get_subject())\n    cert.sign(cakey, self.signing_digest)", "docstring": "Signs a certificate with a CA keypair.\n\nArgs:\ncert (OpenSSL.crypto.X509): The certificate to sign.\nsignas (str): The CA keypair name to sign the new keypair with.\n\nExamples:\nSign a certificate with the CA \"myca\":\n\ncdir.signCertAs(mycert, 'myca')\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def push(stack, x, op_id):\n  \n  if isinstance(x, numpy.ndarray):\n    x = x.copy()\n  elif isinstance(x, list):\n    x = x[:]\n  if __debug__:\n    stack.append((x, op_id))\n  else:\n    stack.append(x)", "docstring": "Push a value onto the stack (i.e. record it on the tape).\n\nArgs:\nstack: The stack object, which must support appending values.\nx: The value to append. If it is a mutable object like an array or list, it\nwill be copied before being added onto the stack.\nop_id: A unique variable that is also passed into the corresponding pop.\nAllows optimization passes to track pairs of pushes and pops.", "source": "juraj-google-style"}
{"code": "def __call__(self, *binary_args):\n        \n        \n        if self.num_processors is None:\n            return self.snr_function(0, binary_args, self.wavegen,\n                                     self.signal_type,  self.noise_interpolants,\n                                     self.prefactor,  self.verbose)\n        other_args = (self.wavegen, self.signal_type,\n                      self.noise_interpolants, self.prefactor,  self.verbose)\n        self.prep_parallel(binary_args, other_args)\n        return self.run_parallel(self.snr_function)", "docstring": "Input binary parameters and calculate the SNR\n\nBinary parameters are read in and adjusted based on shapes. They are then\nfed into ``run`` for calculation of the snr.\n\nArgs:\n*args: Arguments for binary parameters (see `:meth:gwsnrcalc.utils.pyphenomd.__call__`)\n\nReturns:\n(dict): Dictionary with the SNR output from the calculation.", "source": "juraj-google-style"}
{"code": "def __init__(\n      self, encrypted_root_plist=None, password=None, parent=None,\n      recovery_password=None, **kwargs):\n    \n    if not parent:\n      raise ValueError('Missing parent value.')\n\n    super(FVDEPathSpec, self).__init__(parent=parent, **kwargs)\n    self.encrypted_root_plist = encrypted_root_plist\n    self.password = password\n    self.recovery_password = recovery_password", "docstring": "Initializes a path specification.\n\nNote that the FVDE path specification must have a parent.\n\nArgs:\nencrypted_root_plist (Optional[str]): path to the\nEncryptedRoot.plist.wipekey file.\npassword (Optional[str]): password.\nparent (Optional[PathSpec]): parent path specification.\nrecovery_password (Optional[str]): recovery password.\n\nRaises:\nValueError: when parent is not set.", "source": "juraj-google-style"}
{"code": "def process(self, msg: str, kwargs: _KWARGS_TYPE) -> _PROCESS_RETURN_TYPE:\n    new_msg = f'{self.extra[PrefixLoggerAdapter.EXTRA_KEY_LOG_PREFIX]} {msg}'\n    return (new_msg, kwargs)", "docstring": "Processes the logging call to insert contextual information.\n\nArgs:\nmsg: The logging message.\nkwargs: Keyword arguments passed in to a logging call.\n\nReturns:\nThe message and kwargs modified.", "source": "github-repos"}
{"code": "def f(self, y, t):\n        \n        coupling = self.coupling_function[0]\n        res = np.empty_like(self.y0)\n        for j, m in enumerate(self.submodels):\n            slicej = slice(self._si[j], self._si[j+1])\n            target_y = y[slicej] \n            res[slicej] = m.f(target_y, t) \n            \n            sources = np.nonzero(self.network[:,j])[0]\n            for i in sources:\n                weight = self.network[i, j]\n                source_y = y[slice(self._si[i], self._si[i+1])] \n                res[slicej] += coupling(source_y, target_y, weight)\n        return res", "docstring": "Deterministic term f of the complete network system\ndy = f(y, t)dt + G(y, t).dot(dW)\n\n(or for an ODE network system without noise, dy/dt = f(y, t))\n\nArgs:\ny (array of shape (d,)): where d is the dimension of the overall\nstate space of the complete network system.\n\nReturns:\nf (array of shape (d,)):  Defines the deterministic term of the\ncomplete network system", "source": "juraj-google-style"}
{"code": "class Flatten(keras_layers.Flatten, base.Layer):\n    pass", "docstring": "Flattens an input tensor while preserving the batch axis (axis 0).\n\nArgs:\ndata_format: A string, one of `channels_last` (default) or `channels_first`.\nThe ordering of the dimensions in the inputs.\n`channels_last` corresponds to inputs with shape\n`(batch, ..., channels)` while `channels_first` corresponds to\ninputs with shape `(batch, channels, ...)`.\n\nExamples:\n\n```\nx = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32')\ny = Flatten()(x)\n# now `y` has shape `(None, 16)`\n\nx = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32')\ny = Flatten()(x)\n# now `y` has shape `(None, None)`\n```", "source": "github-repos"}
{"code": "def marcxml2record(marcxml):\n    marcjson = create_record(marcxml, keep_singletons=False)\n    collections = _get_collections(marcjson)\n    if ('conferences' in collections):\n        return conferences.do(marcjson)\n    elif ('data' in collections):\n        return data.do(marcjson)\n    elif ('experiment' in collections):\n        return experiments.do(marcjson)\n    elif ('hepnames' in collections):\n        return hepnames.do(marcjson)\n    elif ('institution' in collections):\n        return institutions.do(marcjson)\n    elif (('job' in collections) or ('jobhidden' in collections)):\n        return jobs.do(marcjson)\n    elif (('journals' in collections) or ('journalsnew' in collections)):\n        return journals.do(marcjson)\n    return hep.do(marcjson)", "docstring": "Convert a MARCXML string to a JSON record.\n\nTries to guess which set of rules to use by inspecting the contents\nof the ``980__a`` MARC field, but falls back to HEP in case nothing\nmatches, because records belonging to special collections logically\nbelong to the Literature collection but don't have ``980__a:HEP``.\n\nArgs:\nmarcxml(str): a string containing MARCXML.\n\nReturns:\ndict: a JSON record converted from the string.", "source": "codesearchnet"}
{"code": "def is_testcase_path(path):\n    \n    if not isinstance(path, (str, list)):\n        return False\n\n    if isinstance(path, list):\n        for p in path:\n            if not is_testcase_path(p):\n                return False\n\n    if isinstance(path, str):\n        if not os.path.exists(path):\n            return False\n\n    return True", "docstring": "check if path is testcase path or path list.\n\nArgs:\npath (str/list): file path or file path list.\n\nReturns:\nbool: True if path is valid file path or path list, otherwise False.", "source": "juraj-google-style"}
{"code": "def set_window_position(self, x, y, window_handle='current'):\n        \n        self._execute(Command.SET_WINDOW_POSITION, {\n            'x': int(x),\n            'y': int(y),\n            'window_handle': window_handle})", "docstring": "Sets the x,y position of the current window.\n\nSupport:\nWeb(WebView)\n\nArgs:\nx(int): the x-coordinate in pixels.\ny(int): the y-coordinate in pixels.\nwindow_handle(str): Identifier of window_handle,\ndefault to 'current'.\n\nReturns:\nWebDriver Object.", "source": "juraj-google-style"}
{"code": "def l1_normalize(x, dim, epsilon=1e-12, name=None):\n    with tf.name_scope(name, 'l1_normalize', [x]) as scope:\n        x = tf.convert_to_tensor(x, name='x')\n        x = tf.verify_tensor_all_finite(x, ('Error at input %s' % scope))\n        x_norm = tf.maximum(tf.reduce_sum(tf.abs(x), [dim], keep_dims=True), epsilon)\n        return tf.div(x, x_norm, name=scope)", "docstring": "l1 normalizes x.\n\nArgs:\nx: The tensor to normalize.\ndim: The dimension to normalize along.\nepsilon: Lower bound on the norm, used to avoid exploding gradients as the\nnorm approaches 0.\nname: Optional name for this op.\nReturns:\nx normalized along dim.", "source": "codesearchnet"}
{"code": "def __stripValue(self, value):\n    if isinstance(value, str):\n        if (((value[0] == '\"') and (value[(- 1)] == '\"')) or ((value[0] == '[') and (value[(- 1)] == ']'))):\n            return value[1:(- 1)]\n    return value", "docstring": "strip the special characters in the value\n\nArgs:\nvalue: value string\n\nReturns:\nvalue string without special characters", "source": "codesearchnet"}
{"code": "def validate_request_signature(body: str, headers: MutableMapping, signing_secret: str) -> None:\n    request_timestamp = int(headers['X-Slack-Request-Timestamp'])\n    if ((int(time.time()) - request_timestamp) > (60 * 5)):\n        raise exceptions.InvalidTimestamp(timestamp=request_timestamp)\n    slack_signature = headers['X-Slack-Signature']\n    calculated_signature = ('v0=' + hmac.new(signing_secret.encode('utf-8'), f\"v0:{headers['X-Slack-Request-Timestamp']}:{body}\".encode('utf-8'), digestmod=hashlib.sha256).hexdigest())\n    if (not hmac.compare_digest(slack_signature, calculated_signature)):\n        raise exceptions.InvalidSlackSignature(slack_signature, calculated_signature)", "docstring": "Validate incoming request signature using the application signing secret.\n\nContrary to the ``team_id`` and ``verification_token`` verification this method is not called by ``slack-sansio`` when creating object from incoming HTTP request. Because the body of the request needs to be provided as text and not decoded as json beforehand.\n\nArgs:\nbody: Raw request body\nheaders: Request headers\nsigning_secret: Application signing_secret\n\nRaise:\n:class:`slack.exceptions.InvalidSlackSignature`: when provided and calculated signature do not match\n:class:`slack.exceptions.InvalidTimestamp`: when incoming request timestamp is more than 5 minutes old", "source": "codesearchnet"}
{"code": "def transformer_prepare_encoder(inputs, target_space, hparams, features=None):\n    ishape_static = inputs.shape.as_list()\n    encoder_input = inputs\n    if (features and ('inputs_segmentation' in features)):\n        inputs_segmentation = features['inputs_segmentation']\n        inputs_position = features['inputs_position']\n        targets_segmentation = features['targets_segmentation']\n        if (hasattr(hparams, 'unidirectional_encoder') and hparams.unidirectional_encoder):\n            tf.logging.info('Using unidirectional encoder')\n            encoder_self_attention_bias = common_attention.attention_bias_lower_triangle(common_layers.shape_list(inputs)[1])\n        else:\n            encoder_self_attention_bias = common_attention.attention_bias_same_segment(inputs_segmentation, inputs_segmentation)\n        encoder_decoder_attention_bias = common_attention.attention_bias_same_segment(targets_segmentation, inputs_segmentation)\n    else:\n        encoder_padding = common_attention.embedding_to_padding(encoder_input)\n        ignore_padding = common_attention.attention_bias_ignore_padding(encoder_padding)\n        if (hasattr(hparams, 'unidirectional_encoder') and hparams.unidirectional_encoder):\n            tf.logging.info('Using unidirectional encoder')\n            encoder_self_attention_bias = common_attention.attention_bias_lower_triangle(common_layers.shape_list(inputs)[1])\n        else:\n            encoder_self_attention_bias = ignore_padding\n        encoder_decoder_attention_bias = ignore_padding\n        inputs_position = None\n    if hparams.proximity_bias:\n        encoder_self_attention_bias += common_attention.attention_bias_proximal(common_layers.shape_list(inputs)[1])\n    if ((target_space is not None) and hparams.get('use_target_space_embedding', True)):\n        emb_target_space = common_layers.embedding(target_space, 32, ishape_static[(- 1)], name='target_space_embedding', dtype=hparams.get('activation_dtype', 'float32'))\n        emb_target_space = tf.reshape(emb_target_space, [1, 1, (- 1)])\n        encoder_input += emb_target_space\n    if (hparams.pos == 'timing'):\n        if (inputs_position is not None):\n            encoder_input = common_attention.add_timing_signal_1d_given_position(encoder_input, inputs_position)\n        else:\n            encoder_input = common_attention.add_timing_signal_1d(encoder_input)\n    elif (hparams.pos == 'emb'):\n        encoder_input = common_attention.add_positional_embedding(encoder_input, hparams.max_length, 'inputs_positional_embedding', inputs_position)\n    encoder_self_attention_bias = common_layers.cast_like(encoder_self_attention_bias, encoder_input)\n    encoder_decoder_attention_bias = common_layers.cast_like(encoder_decoder_attention_bias, encoder_input)\n    return (encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias)", "docstring": "Prepare one shard of the model for the encoder.\n\nArgs:\ninputs: a Tensor.\ntarget_space: a Tensor.\nhparams: run hyperparameters\nfeatures: optionally pass the entire features dictionary as well.\nThis is needed now for \"packed\" datasets.\n\nReturns:\nencoder_input: a Tensor, bottom of encoder stack\nencoder_self_attention_bias: a bias tensor for use in encoder self-attention\nencoder_decoder_attention_bias: a bias tensor for use in encoder-decoder\nattention", "source": "codesearchnet"}
{"code": "def _get_input_target_path(self, local_file_path):\n    \n\n    path, filename = os.path.split(local_file_path)\n    if '*' in filename:\n      return path + '/'\n    else:\n      return local_file_path", "docstring": "Returns a directory or file path to be the target for \"gsutil cp\".\n\nIf the filename contains a wildcard, then the target path must\nbe a directory in order to ensure consistency whether the source pattern\ncontains one or multiple files.\n\n\nArgs:\nlocal_file_path: A full path terminating in a file or a file wildcard.\n\nReturns:\nThe path to use as the \"gsutil cp\" target.", "source": "juraj-google-style"}
{"code": "def get_int(self, min_int=_MIN_INT, max_int=_MAX_INT):\n    return self.fdp.ConsumeIntInRange(min_int, max_int)", "docstring": "Consume a signed integer with given constraints.\n\nArgs:\nmin_int: Minimum allowed integer.\nmax_int: Maximum allowed integer.\n\nReturns:\nConsumed integer based on input bytes and constraints.", "source": "github-repos"}
{"code": "def convert_to_tensor(value, dtype=None, dtype_hint=None):\n    if dtype is None and isinstance(value, int) and (value >= 2 ** 63):\n        dtype = dtypes.uint64\n    elif dtype is None and dtype_hint is None and isinstance(value, float):\n        dtype = np_dtypes.default_float_type()\n    return tensor_conversion.convert_to_tensor_v2_with_dispatch(value, dtype=dtype, dtype_hint=dtype_hint)", "docstring": "Wrapper over `tf.convert_to_tensor`.\n\nArgs:\nvalue: value to convert\ndtype: (optional) the type we would like it to be converted to.\ndtype_hint: (optional) soft preference for the type we would like it to be\nconverted to. `tf.convert_to_tensor` will attempt to convert value to this\ntype first, but will not fail if conversion is not possible falling back\nto inferring the type instead.\n\nReturns:\nValue converted to tf.Tensor.", "source": "github-repos"}
{"code": "def add_channel(channel: EFBChannel):\n    \n    global master, slaves\n    if isinstance(channel, EFBChannel):\n        if channel.channel_type == ChannelType.Slave:\n            slaves[channel.channel_id] = channel\n        else:\n            master = channel\n    else:\n        raise TypeError(\"Channel instance is expected\")", "docstring": "Register the channel with the coordinator.\n\nArgs:\nchannel (EFBChannel): Channel to register", "source": "juraj-google-style"}
{"code": "def regular_polygon_area(number_of_sides, length_of_sides):\n    \n    return (0.25 * number_of_sides * length_of_sides ** 2) / math.tan(\n        math.pi / number_of_sides\n    )", "docstring": "Calculates the area of a regular polygon (with sides of equal length).\n\nArgs:\nnumber_of_sides: Integer, the number of sides of the polygon\n\nlength_of_sides: Integer or floating point number, the length of the sides\n\nReturns:\nThe area of a regular polygon as an integer or floating point number\n\nRequires:\nThe math module", "source": "juraj-google-style"}
{"code": "def __init__(self, force=False):\n        \n        self.colorize = force or sys.stdout.isatty() or os.environ.get('JPY_PARENT_PID', None)", "docstring": "Initialize the class.\n\nArgs:\nforce (bool): If True, render colorizes output no matter where the output is (default: False).", "source": "juraj-google-style"}
{"code": "def snapshot(self, filename=\"tmp.png\"):\n        \n        if not filename:\n            filename = \"tmp.png\"\n        if self.handle:\n            try:\n                screenshot(filename, self.handle)\n            except win32gui.error:\n                self.handle = None\n                screenshot(filename)\n        else:\n            screenshot(filename)\n\n        img = aircv.imread(filename)\n        os.remove(filename)\n\n        return img", "docstring": "Take a screenshot and save it to `tmp.png` filename by default\n\nArgs:\nfilename: name of file where to store the screenshot\n\nReturns:\ndisplay the screenshot", "source": "juraj-google-style"}
{"code": "def format_var_name(variable, var_list):\n    z_index = None\n    if (variable in var_list):\n        var_name = variable\n    elif (variable.ljust(6, '_') in var_list):\n        var_name = variable.ljust(6, '_')\n    elif any([(variable in v_sub.split('_')) for v_sub in var_list]):\n        var_name = var_list[[(variable in v_sub.split('_')) for v_sub in var_list].index(True)]\n        z_index = var_name.split('_').index(variable)\n    else:\n        raise KeyError('{0} not found in {1}'.format(variable, var_list))\n    return (var_name, z_index)", "docstring": "Searches var list for variable name, checks other variable name format options.\n\nArgs:\nvariable (str): Variable being loaded\nvar_list (list): List of variables in file.\n\nReturns:\nName of variable in file containing relevant data, and index of variable z-level if multiple variables\ncontained in same array in file.", "source": "codesearchnet"}
{"code": "def is_compatible_with(self, other):\n    other = as_dtype(other)\n    return self._type_enum in (other.as_datatype_enum, other.base_dtype.as_datatype_enum)", "docstring": "Returns True if the `other` DType will be converted to this DType (TF1).\n\nPrograms written for TensorFlow 2.x do not need this function.\nInstead, they can do equality comparison on `DType` objects directly:\n`tf.as_dtype(this) == tf.as_dtype(other)`.\n\nThis function exists only for compatibility with TensorFlow 1.x, where it\nadditionally allows conversion from a reference type (used by\n`tf.compat.v1.Variable`) to its base type.\n\nArgs:\nother: A `DType` (or object that may be converted to a `DType`).\n\nReturns:\nTrue if a Tensor of the `other` `DType` will be implicitly converted to\nthis `DType`.", "source": "github-repos"}
{"code": "def is_original_format(tweet):\n    if ('created_at' in tweet):\n        original_format = True\n    elif ('postedTime' in tweet):\n        original_format = False\n    else:\n        raise NotATweetError(\"This dict has neither 'created_at' or 'postedTime' as keys\")\n    return original_format", "docstring": "Simple checker to flag the format of a tweet.\n\nArgs:\ntweet (Tweet): tweet in qustion\n\nReturns:\nBool\n\nExample:\n>>> import tweet_parser.tweet_checking as tc\n>>> tweet = {\"created_at\": 124125125125,\n...          \"text\": \"just setting up my twttr\",\n...          \"nested_field\": {\"nested_1\": \"field\", \"nested_2\": \"field2\"}}\n>>> tc.is_original_format(tweet)\nTrue", "source": "codesearchnet"}
{"code": "def cumulative_gain_curve(y_true, y_score, pos_label=None):\n    (y_true, y_score) = (np.asarray(y_true), np.asarray(y_score))\n    classes = np.unique(y_true)\n    if ((pos_label is None) and (not (np.array_equal(classes, [0, 1]) or np.array_equal(classes, [(- 1), 1]) or np.array_equal(classes, [0]) or np.array_equal(classes, [(- 1)]) or np.array_equal(classes, [1])))):\n        raise ValueError('Data is not binary and pos_label is not specified')\n    elif (pos_label is None):\n        pos_label = 1.0\n    y_true = (y_true == pos_label)\n    sorted_indices = np.argsort(y_score)[::(- 1)]\n    y_true = y_true[sorted_indices]\n    gains = np.cumsum(y_true)\n    percentages = np.arange(start=1, stop=(len(y_true) + 1))\n    gains = (gains / float(np.sum(y_true)))\n    percentages = (percentages / float(len(y_true)))\n    gains = np.insert(gains, 0, [0])\n    percentages = np.insert(percentages, 0, [0])\n    return (percentages, gains)", "docstring": "This function generates the points necessary to plot the Cumulative Gain\n\nNote: This implementation is restricted to the binary classification task.\n\nArgs:\ny_true (array-like, shape (n_samples)): True labels of the data.\n\ny_score (array-like, shape (n_samples)): Target scores, can either be\nprobability estimates of the positive class, confidence values, or\nnon-thresholded measure of decisions (as returned by\ndecision_function on some classifiers).\n\npos_label (int or str, default=None): Label considered as positive and\nothers are considered negative\n\nReturns:\npercentages (numpy.ndarray): An array containing the X-axis values for\nplotting the Cumulative Gains chart.\n\ngains (numpy.ndarray): An array containing the Y-axis values for one\ncurve of the Cumulative Gains chart.\n\nRaises:\nValueError: If `y_true` is not composed of 2 classes. The Cumulative\nGain Chart is only relevant in binary classification.", "source": "codesearchnet"}
{"code": "async def change_url(self, url: str, description: str=None):\n    (await self._change(url=url, description=description))", "docstring": "change the url of that attachment\n\n|methcoro|\n\nArgs:\nurl: url you want to change\ndescription: *optional* description for your attachment\n\nRaises:\nValueError: url must not be None\nAPIException", "source": "codesearchnet"}
{"code": "def endpoints(self):\n    if (not self.__endpoints):\n        self.__endpoints = Endpoints(self.__connection)\n    return self.__endpoints", "docstring": "Gets the Endpoints API client.\n\nReturns:\nEndpoints:", "source": "codesearchnet"}
{"code": "def _resource_context(fn):\n    \n    return os.path.join(\n        os.path.dirname(__file__),\n        DES_DIR,\n        fn\n    )", "docstring": "Compose path to the ``resources`` directory for given `fn`.\n\nArgs:\nfn (str): Filename of file in ``resources`` directory.\n\nReturns:\nstr: Absolute path to the file in resources directory.", "source": "juraj-google-style"}
{"code": "def FlatMapTuple(fn, *args, **kwargs):\n    if not callable(fn):\n        raise TypeError('FlatMapTuple can be used only with callable objects. Received %r instead.' % fn)\n    label = 'FlatMapTuple(%s)' % ptransform.label_from_callable(fn)\n    arg_names, defaults = get_function_args_defaults(fn)\n    num_defaults = len(defaults)\n    if num_defaults < len(args) + len(kwargs):\n        raise TypeError('Side inputs must have defaults for FlatMapTuple.')\n    if defaults or args or kwargs:\n        wrapper = lambda x, *args, **kwargs: fn(*tuple(x) + args, **kwargs)\n    else:\n        wrapper = lambda x: fn(*tuple(x))\n    type_hints = get_type_hints(fn).with_defaults(typehints.decorators.IOTypeHints.from_callable(fn))\n    if type_hints.input_types is not None:\n        pass\n    output_hint = type_hints.simple_output_type(label)\n    if output_hint:\n        wrapper = with_output_types(_strip_output_annotations(output_hint))(wrapper)\n    modified_arg_names = ['tuple_element'] + arg_names[-num_defaults:]\n    modified_argspec = (modified_arg_names, defaults)\n    pardo = ParDo(CallableWrapperDoFn(wrapper, fullargspec=modified_argspec), *args, **kwargs)\n    pardo.label = label\n    return pardo", "docstring": ":func:`FlatMapTuple` is like :func:`FlatMap` but expects tuple inputs and\nflattens them into multiple input arguments.\n\nIn other words\n\nbeam.FlatMap(lambda start_end: range(start_end[0], start_end[1]))\n\nis equivalent to\n\nbeam.FlatMapTuple(lambda start, end: range(start, end))\n\nThis can be useful when processing a PCollection of tuples\n(e.g. key-value pairs).\n\nArgs:\nfn (callable): a callable object.\n*args: positional arguments passed to the transform callable.\n**kwargs: keyword arguments passed to the transform callable.\n\nReturns:\n~apache_beam.pvalue.PCollection:\nA :class:`~apache_beam.pvalue.PCollection` containing the\n:func:`FlatMapTuple` outputs.\n\nRaises:\nTypeError: If the **fn** passed as argument is not a callable.\nTypical error is to pass a :class:`DoFn` instance which is supported only\nfor :class:`ParDo`.", "source": "github-repos"}
{"code": "def bin_hash160Bytes(bts):\n    \n    intermed = hashlib.sha256(bts).digest()\n    return hashlib.new('ripemd160', intermed).digest()", "docstring": "Get a hash of the provided message using the ripemd160 algorithm.\n\nArgs:\nbts (str): message to hash.\n\nReturns:\nbytes: hash.", "source": "juraj-google-style"}
{"code": "def __call__(self, decision_points: List[pg.geno.DecisionPoint], global_state: Optional[pg.geno.AttributeDict]=None, step: int=0) -> List[pg.geno.DecisionPoint]:\n    return self._call(decision_points, global_state=global_state, step=step)", "docstring": "Filtering decision points based on global state and current step.\n\nArgs:\ndecision_points: A list of decision points as candidates for filtering.\nglobal_state: An optional keyword argument as the global state.\nstep: An optional keyword argument as current step of evolution.\n\nReturns:\nA list of decision points that should be kept.", "source": "github-repos"}
{"code": "def save_cache(cache):\n    \n    with open(settings.DUP_FILTER_FILE, \"w\") as f:\n        f.write(\n            json.dumps(list(cache))\n        )", "docstring": "Save cahce to the disk.\n\nArgs:\ncache (set): Set with cached data.", "source": "juraj-google-style"}
{"code": "def port(self, container, private_port):\n    res = self._get(self._url('/containers/{0}/json', container))\n    self._raise_for_status(res)\n    json_ = res.json()\n    private_port = str(private_port)\n    h_ports = None\n    port_settings = json_.get('NetworkSettings', {}).get('Ports')\n    if (port_settings is None):\n        return None\n    if ('/' in private_port):\n        return port_settings.get(private_port)\n    for protocol in ['tcp', 'udp', 'sctp']:\n        h_ports = port_settings.get(((private_port + '/') + protocol))\n        if h_ports:\n            break\n    return h_ports", "docstring": "Lookup the public-facing port that is NAT-ed to ``private_port``.\nIdentical to the ``docker port`` command.\n\nArgs:\ncontainer (str): The container to look up\nprivate_port (int): The private port to inspect\n\nReturns:\n(list of dict): The mapping for the host ports\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.\n\nExample:\n.. code-block:: bash\n\n$ docker run -d -p 80:80 ubuntu:14.04 /bin/sleep 30\n7174d6347063a83f412fad6124c99cffd25ffe1a0807eb4b7f9cec76ac8cb43b\n\n.. code-block:: python\n\n>>> cli.port('7174d6347063', 80)\n[{'HostIp': '0.0.0.0', 'HostPort': '80'}]", "source": "codesearchnet"}
{"code": "def passive_mode(self) -> Tuple[(str, int)]:\n    (yield from self._control_stream.write_command(Command('PASV')))\n    reply = (yield from self._control_stream.read_reply())\n    self.raise_if_not_match('Passive mode', ReplyCodes.entering_passive_mode, reply)\n    try:\n        return wpull.protocol.ftp.util.parse_address(reply.text)\n    except ValueError as error:\n        raise ProtocolError(str(error)) from error", "docstring": "Enable passive mode.\n\nReturns:\nThe address (IP address, port) of the passive port.\n\nCoroutine.", "source": "codesearchnet"}
{"code": "def resolve_widget(self, field):\n    if hasattr(field, 'field'):\n        widget = field.field.widget\n    else:\n        widget = field.widget\n    return widget", "docstring": "Given a Field or BoundField, return widget instance.\n\nTodo:\nRaise an exception if given field object does not have a\nwidget.\n\nArguments:\nfield (Field or BoundField): A field instance.\n\nReturns:\ndjango.forms.widgets.Widget: Retrieved widget from given field.", "source": "codesearchnet"}
{"code": "def _ParseValueData(self, knowledge_base, value_data):\n    \n    if not isinstance(value_data, py2to3.UNICODE_TYPE):\n      raise errors.PreProcessFail(\n          'Unsupported Windows Registry value type: {0:s} for '\n          'artifact: {1:s}.'.format(\n              type(value_data), self.ARTIFACT_DEFINITION_NAME))\n\n    \n    lookup_key = value_data.replace(' ', '')\n\n    time_zone = time_zones.TIME_ZONES.get(lookup_key, value_data)\n    \n    if time_zone:\n      try:\n        \n        knowledge_base.SetTimeZone(time_zone)\n      except ValueError:\n        \n        time_zone = value_data\n        logger.warning('Unable to map: \"{0:s}\" to time zone'.format(\n            value_data))", "docstring": "Parses Windows Registry value data for a preprocessing attribute.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nvalue_data (object): Windows Registry value data.\n\nRaises:\nerrors.PreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def __init__(self, scores=None, classes=None):\n    if scores is not None and (not (isinstance(scores, tensor.Tensor) and scores.dtype.is_floating)):\n        raise ValueError('Classification scores must be a float32 Tensor; got {}'.format(scores))\n    if classes is not None and (not (isinstance(classes, tensor.Tensor) and dtypes.as_dtype(classes.dtype) == dtypes.string)):\n        raise ValueError('Classification classes must be a string Tensor; got {}'.format(classes))\n    if scores is None and classes is None:\n        raise ValueError('At least one of scores and classes must be set.')\n    self._scores = scores\n    self._classes = classes", "docstring": "Constructor for `ClassificationOutput`.\n\nArgs:\nscores: A float `Tensor` giving scores (sometimes but not always\ninterpretable as probabilities) for each class.  May be `None`, but\nonly if `classes` is set.  Interpretation varies-- see class doc.\nclasses: A string `Tensor` giving predicted class labels.  May be `None`,\nbut only if `scores` is set.  Interpretation varies-- see class doc.\n\nRaises:\nValueError: if neither classes nor scores is set, or one of them is not a\n`Tensor` with the correct dtype.", "source": "github-repos"}
{"code": "def install_exception_handler(handler):\n    if (not isinstance(handler, ExceptionHandler)):\n        raise TypeError(('handler of type %s does not inherit from ExceptionHandler' % type(handler)))\n    EXCEPTION_HANDLERS.append(handler)", "docstring": "Installs an exception handler.\n\nArgs:\nhandler: ExceptionHandler, the exception handler to install.\n\nRaises:\nTypeError: Raised when the handler was not of the correct type.\n\nAll installed exception handlers will be called if main() exits via\nan abnormal exception, i.e. not one of SystemExit, KeyboardInterrupt,\nFlagsError or UsageError.", "source": "codesearchnet"}
{"code": "def delete(self, location):\n        \n        bucket = self.info['bucket']\n        prefix = self.info['prefix']\n\n        self.logger.debug('Connecting to S3')\n        s3conn = self.client \n\n        \n        if location[0] == '/':\n            location = location[1:]\n        if location[-1] == '/':\n            location = location[:-2]\n\n        self.logger.debug('Deleting contents')\n\n        for s3key in s3conn.list_objects(Bucket=bucket, Prefix=(prefix+'/'+location))['Contents']:\n            s3conn.delete_object(Bucket=bucket, Key=s3key['Key'])\n\n        self.logger.debug('Done!')", "docstring": "Delete content in bucket/prefix/location.\nLocation can be a directory or a file (e.g., my_dir or my_dir/my_image.tif)\nIf location is a directory, all files in the directory are deleted.\nIf it is a file, then that file is deleted.\n\nArgs:\nlocation (str): S3 location within prefix. Can be a directory or\na file (e.g., my_dir or my_dir/my_image.tif).", "source": "juraj-google-style"}
{"code": "def SampleTaskStatus(self, task, status):\n    \n    if self._tasks_profiler:\n      self._tasks_profiler.Sample(task, status)", "docstring": "Takes a sample of the status of the task for profiling.\n\nArgs:\ntask (Task): a task.\nstatus (str): status.", "source": "juraj-google-style"}
{"code": "def Histograms(self, run, tag):\n    accumulator = self.GetAccumulator(run)\n    return accumulator.Histograms(tag)", "docstring": "Retrieve the histogram events associated with a run and tag.\n\nArgs:\nrun: A string name of the run for which values are retrieved.\ntag: A string name of the tag for which values are retrieved.\n\nRaises:\nKeyError: If the run is not found, or the tag is not available for\nthe given run.\n\nReturns:\nAn array of `event_accumulator.HistogramEvents`.", "source": "codesearchnet"}
{"code": "def first(series, order_by=None):\n    \n\n    if order_by is not None:\n        series = order_series_by(series, order_by)\n    first_s = series.iloc[0]\n    return first_s", "docstring": "Returns the first value of a series.\n\nArgs:\nseries (pandas.Series): column to summarize.\n\nKwargs:\norder_by: a pandas.Series or list of series (can be symbolic) to order\nthe input series by before summarization.", "source": "juraj-google-style"}
{"code": "def set_pyftpsync_logger(logger=True):\n    \n    global _logger\n    prev_logger = _logger\n    if logger is True:\n        logging.basicConfig(level=logging.INFO)\n        _logger = logging.getLogger(\"pyftpsync\")\n        _logger.setLevel(logging.DEBUG)\n    else:\n        _logger = logger\n    return prev_logger", "docstring": "Define target for common output.\n\nArgs:\nlogger (bool | None | logging.Logger):\nPass None to use `print()` to stdout instead of logging.\nPass True to create a simple standard logger.", "source": "juraj-google-style"}
{"code": "def GetCompressedStreamTypeIndicators(cls, path_spec, resolver_context=None):\n    \n    if (cls._compressed_stream_remainder_list is None or\n        cls._compressed_stream_store is None):\n      specification_store, remainder_list = cls._GetSpecificationStore(\n          definitions.FORMAT_CATEGORY_COMPRESSED_STREAM)\n      cls._compressed_stream_remainder_list = remainder_list\n      cls._compressed_stream_store = specification_store\n\n    if cls._compressed_stream_scanner is None:\n      cls._compressed_stream_scanner = cls._GetSignatureScanner(\n          cls._compressed_stream_store)\n\n    return cls._GetTypeIndicators(\n        cls._compressed_stream_scanner, cls._compressed_stream_store,\n        cls._compressed_stream_remainder_list, path_spec,\n        resolver_context=resolver_context)", "docstring": "Determines if a file contains a supported compressed stream types.\n\nArgs:\npath_spec (PathSpec): path specification.\nresolver_context (Optional[Context]): resolver context, where None\nrepresents the built-in context which is not multi process safe.\n\nReturns:\nlist[str]: supported format type indicators.", "source": "juraj-google-style"}
{"code": "def _md5_file(fn, block_size=1048576):\n    h = hashlib.md5()\n    with open(fn) as fp:\n        d = 1\n        while d:\n            d = fp.read(block_size)\n            h.update(d)\n    return h.hexdigest()", "docstring": "Builds the MD5 of a file block by block\n\nArgs:\nfn: File path\nblock_size: Size of the blocks to consider (default 1048576)\n\nReturns:\nFile MD5", "source": "codesearchnet"}
{"code": "def last_checkpoints(self):\n    return list((self._CheckpointFilename(p) for p in self._last_checkpoints))", "docstring": "List of not-yet-deleted checkpoint filenames.\n\nYou can pass any of the returned values to `restore()`.\n\nReturns:\nA list of checkpoint filenames, sorted from oldest to newest.", "source": "github-repos"}
{"code": "def __init__(self, columns: list[str], split_string_by_delimiter: Optional[str]=None, *, ngram_range: tuple[int, int]=(1, 1), ngrams_separator: Optional[str]=None, name: Optional[str]=None):\n    super().__init__(columns)\n    self.ngram_range = ngram_range\n    self.ngrams_separator = ngrams_separator\n    self.name = name\n    self.split_string_by_delimiter = split_string_by_delimiter\n    if ngram_range != (1, 1) and (not ngrams_separator):\n        raise ValueError('ngrams_separator must be specified when ngram_range is not (1, 1)')", "docstring": "An n-gram is a contiguous sequence of n items from a given sample of text\nor speech. This operation applies an n-gram transformation to\nspecified columns of incoming data, splitting the input data into a\nset of consecutive n-grams.\n\nArgs:\ncolumns: A list of column names to apply the transformation on.\nsplit_string_by_delimiter: (Optional) A string that specifies the\ndelimiter to split the input strings before computing ngrams.\nngram_range: A tuple of integers(inclusive) specifying the range of\nn-gram sizes.\nngrams_separator: A string that will be inserted between each ngram.\nname: A name for the operation (optional).", "source": "github-repos"}
{"code": "def allsplit(self, x, mesh_axis, split_axis, which=None):\n    if (which is None):\n        which = self.laid_out_pcoord(mesh_axis)\n    num_splits = self.shape[mesh_axis].size\n\n    def my_fn(x, which):\n        slice_begin = [(((dimsize \n        slice_size = [((dimsize \n        return tf.slice(x, slice_begin, slice_size)\n    return self.slicewise(my_fn, x, which)", "docstring": "Inverse of allconcat - split each slice and keep only one piece of it.\n\nThe number of ways to split is the number of processors in the group.\nThe part that is kept corresponds to the processor's index in the group.\n\nArgs:\nx: LaidOutTensor.\nmesh_axis: int, the mesh axis along which to split.\nsplit_axis: int, the Tensor axis along which to split.\nwhich: an optional LaidOutTensor of integer scalars. Selects the slice to\nto keep, instead of the coordinate.\n\nReturns:\nLaidOutTensor.", "source": "codesearchnet"}
{"code": "def emit_tree_format(tree, verbose=False):\n    if verbose:\n        print(('Converting: ' + repr(tree)))\n    ret_str = __recursive_formatter(tree)\n    return ret_str", "docstring": "Returns a tree representation of a parse tree.\n\nArguments:\ntree:           the parse tree whose tree representation is to be generated\nverbose (bool): if True prints the parse tree to be formatted\n\nReturns:\nstr:  tree-like representation of the parse tree", "source": "codesearchnet"}
{"code": "def _validate(self):\n    errors = []\n    for k in self._defaults.keys():\n        try:\n            validator = self._defaults[k]['validator']\n            if (validator is not None):\n                self[k] = validator(self[k])\n        except ValueError as e:\n            errors.append('\\t{}: {}'.format(k, six.text_type(e)))\n    if errors:\n        raise ValueError('Invalid configuration values were set: \\n{}'.format('\\n'.join(errors)))", "docstring": "Run the validators found in self._defaults on all the corresponding values.\n\nRaises:\nValueError: If the configuration contains an invalid configuration value.", "source": "codesearchnet"}
{"code": "def _load_chunk(dat_path, cat_path, info_path):\n  \n  dat_array = read_binary_matrix(dat_path)\n  \n  \n  dat_array = np.expand_dims(dat_array, -1)\n\n  cat_array = read_binary_matrix(cat_path)\n\n  info_array = read_binary_matrix(info_path)\n  info_array = np.copy(info_array)  \n  \n  info_array[:, 2] = info_array[:, 2] / 2\n\n  return dat_array, cat_array, info_array", "docstring": "Loads a data chunk as specified by the paths.\n\nArgs:\ndat_path: Path to dat file of the chunk.\ncat_path: Path to cat file of the chunk.\ninfo_path: Path to info file of the chunk.\n\nReturns:\nTuple with the dat, cat, info_arrays.", "source": "juraj-google-style"}
{"code": "def _CheckAttribute(self, attribute, value):\n    if (not isinstance(attribute, Attribute)):\n        raise AttributeError(('Attribute %s must be of type aff4.Attribute()' % attribute))\n    if (not isinstance(value, attribute.attribute_type)):\n        raise ValueError(('Value for attribute %s must be of type %s()' % (attribute, attribute.attribute_type.__name__)))", "docstring": "Check that the value is of the expected type.\n\nArgs:\nattribute: An instance of Attribute().\nvalue: An instance of RDFValue.\n\nRaises:\nValueError: when the value is not of the expected type.\nAttributeError: When the attribute is not of type Attribute().", "source": "codesearchnet"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    names_key = registry_key.GetSubkeyByName('Names')\n    if not names_key:\n      parser_mediator.ProduceExtractionWarning('missing subkey: Names.')\n      return\n\n    last_written_time_per_username = {\n        registry_value.name: registry_value.last_written_time\n        for registry_value in names_key.GetSubkeys()}\n\n    for subkey in registry_key.GetSubkeys():\n      if subkey.name == 'Names':\n        continue\n\n      try:\n        f_value = self._ParseFValue(subkey)\n      except errors.ParseError as exception:\n        parser_mediator.ProduceExtractionWarning(\n            'unable to parse F value with error: {0!s}'.format(exception))\n        continue\n\n      registry_value = subkey.GetValueByName('V')\n      if not registry_value:\n        parser_mediator.ProduceExtractionWarning(\n            'missing Registry value: \"V\" in subkey: {0:s}.'.format(\n                subkey.name))\n        continue\n\n      v_value_map = self._GetDataTypeMap('v_value')\n\n      try:\n        v_value = self._ReadStructureFromByteStream(\n            registry_value.data, 0, v_value_map)\n      except (ValueError, errors.ParseError) as exception:\n        parser_mediator.ProduceExtractionWarning(\n            'unable to parse V value with error: {0!s}'.format(exception))\n        continue\n\n      username = self._ParseVValueString(\n          parser_mediator, registry_value.data, v_value[1])\n\n      fullname = self._ParseVValueString(\n          parser_mediator, registry_value.data, v_value[2])\n\n      comments = self._ParseVValueString(\n          parser_mediator, registry_value.data, v_value[3])\n\n      last_written_time = last_written_time_per_username.get(username, None)\n\n      \n\n      if last_written_time:\n        values_dict = {\n            'account_rid': f_value.rid,\n            'login_count': f_value.number_of_logons}\n\n        if username:\n          values_dict['username'] = username\n        if fullname:\n          values_dict['full_name'] = fullname\n        if comments:\n          values_dict['comments'] = comments\n\n        event_data = windows_events.WindowsRegistryEventData()\n        event_data.key_path = registry_key.path\n        event_data.regvalue = values_dict\n        event_data.source_append = self._SOURCE_APPEND\n\n        event = time_events.DateTimeValuesEvent(\n            last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      event_data = SAMUsersWindowsRegistryEventData()\n      event_data.account_rid = f_value.rid\n      event_data.comments = comments\n      event_data.fullname = fullname\n      event_data.key_path = registry_key.path\n      event_data.login_count = f_value.number_of_logons\n      event_data.username = username\n\n      if f_value.last_login_time != 0:\n        date_time = dfdatetime_filetime.Filetime(\n            timestamp=f_value.last_login_time)\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_LAST_LOGIN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      if f_value.last_password_set_time != 0:\n        date_time = dfdatetime_filetime.Filetime(\n            timestamp=f_value.last_password_set_time)\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_LAST_PASSWORD_RESET)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def get_factors_iterative2(n):\n    \n\n    ans, stack, x = [], [], 2\n    while True:\n        if x > n \n            if not stack:\n                return ans\n            ans.append(stack + [n])\n            x = stack.pop()\n            n *= x\n            x += 1\n        elif n % x == 0:\n            stack.append(x)\n            n \n        else:\n            x += 1", "docstring": "[summary]\nanalog as above\n\nArguments:\nn {[int]} -- [description]\n\nReturns:\n[list of lists] -- [all factors of n]", "source": "juraj-google-style"}
{"code": "def serialize_to_transport(self, doc_format=\"xml\", *args, **kwargs):\n        \n        return super(ResourceMap, self).serialize(\n            format=doc_format, encoding=\"utf-8\", *args, **kwargs\n        )", "docstring": "Serialize ResourceMap to UTF-8 encoded XML document.\n\nArgs:\ndoc_format: str\nOne of: ``xml``, ``n3``, ``turtle``, ``nt``, ``pretty-xml``, ``trix``,\n``trig`` and ``nquads``.\n\nargs and kwargs:\nOptional arguments forwarded to rdflib.ConjunctiveGraph.serialize().\n\nReturns:\nbytes: UTF-8 encoded XML doc.\n\nNote:\nOnly the default, \"xml\", is automatically indexed by DataONE.", "source": "juraj-google-style"}
{"code": "async def find_person(self, query):\n        \n        url = self.url_builder(\n            'search/person',\n            dict(),\n            url_params=OrderedDict([\n                ('query', query), ('include_adult', False)\n            ]),\n        )\n        data = await self.get_data(url)\n        if data is None:\n            return\n        return [\n            Person.from_json(item, self.config['data'].get('images'))\n            for item in data.get('results', [])\n        ]", "docstring": "Retrieve person data by search query.\n\nArguments:\nquery (:py:class:`str`): Query to search for.\n\nReturns:\n:py:class:`list`: Possible matches.", "source": "juraj-google-style"}
{"code": "def Unlock(fd, path):\n    try:\n        fcntl.flock(fd, (fcntl.LOCK_UN | fcntl.LOCK_NB))\n    except IOError as e:\n        if (e.errno == errno.EWOULDBLOCK):\n            raise IOError(('Exception unlocking %s. Locked by another process.' % path))\n        else:\n            raise IOError(('Exception unlocking %s. %s.' % (path, str(e))))", "docstring": "Release the lock on the file.\n\nArgs:\nfd: int, the file descriptor of the file to unlock.\npath: string, the name of the file to lock.\n\nRaises:\nIOError, raised from flock while attempting to release a file lock.", "source": "codesearchnet"}
{"code": "def merge_wells(self, right, keys=None):\n        \n        wells = []\n        for w in self:\n            rw = right.get_well(w.uwi)\n            if rw is not None:\n                if keys is None:\n                    keys = list(rw.data.keys())\n                for k in keys:\n                    try:\n                        w.data[k] = rw.data[k]\n                    except:\n                        pass\n            wells.append(w)\n        return Project(wells)", "docstring": "Returns a new Project object containing wells from self where\ncurves from the wells on the right have been added. Matching between\nwells in self and right is based on uwi match and ony wells in self\nare considered\n\nArgs:\nuwi (string): the UWI string for the well.\n\nReturns:\nproject", "source": "juraj-google-style"}
{"code": "def __type_to_tag(self, type_: Type) -> str:\n    if (type_ in scalar_type_to_tag):\n        return scalar_type_to_tag[type_]\n    if is_generic_list(type_):\n        return 'tag:yaml.org,2002:seq'\n    if is_generic_dict(type_):\n        return 'tag:yaml.org,2002:map'\n    if (type_ in self._registered_classes.values()):\n        return '!{}'.format(type_.__name__)\n    raise RuntimeError('Unknown type {} in type_to_tag, please report a YAtiML bug.'.format(type_))", "docstring": "Convert a type to the corresponding YAML tag.\n\nArgs:\ntype_: The type to convert\n\nReturns:\nA string containing the YAML tag.", "source": "codesearchnet"}
{"code": "def matmul(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:\n    out = math_ops.matmul(input_tensor, self.filters, name='sample/matmul')\n    if self.has_reshape():\n        input_shape = input_tensor.shape\n        if len(input_shape) == 3:\n            reshape_shape = (input_shape[0], -1, self.bias_size)\n        else:\n            reshape_shape = (-1, self.bias_size)\n        out = array_ops.reshape(out, reshape_shape)\n    if self.has_bias():\n        if self.use_biasadd:\n            out = nn_ops.bias_add(out, self.bias)\n        else:\n            out = math_ops.add_v2(out, self.bias)\n    if self.activation_fn is not None:\n        out = self.activation_fn(out)\n    return {'output': out}", "docstring": "Performs a matrix multiplication.\n\nDepending on self.has_bias and self.activation_fn, it may add a bias\nterm or\ngo through the activaction function.\n\nArgs:\ninput_tensor: Input tensor to matmul with the filter.\n\nReturns:\nA map of: output key -> output result.", "source": "github-repos"}
{"code": "def creating_schema_and_index(self, models, func):\n    waiting_models = []\n    self.base_thread.do_with_submit(func, models, waiting_models, threads=self.threads)\n    if waiting_models:\n        print('WAITING MODELS ARE CHECKING...')\n        self.creating_schema_and_index(waiting_models, func)", "docstring": "Executes given functions with given models.\n\nArgs:\nmodels: models to execute\nfunc: function name to execute\n\nReturns:", "source": "codesearchnet"}
{"code": "def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):\n        \n        \n        countriesdata = cls.countriesdata(use_live=use_live)\n        country = countriesdata['countries'].get(iso3.upper())\n        if country is not None:\n            return country\n\n        if exception is not None:\n            raise exception\n        return None", "docstring": "Get country information from ISO3 code\n\nArgs:\niso3 (str): ISO3 code for which to get country information\nuse_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\nexception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\nReturns:\nOptional[Dict[str]]: country information", "source": "juraj-google-style"}
{"code": "def image_needs_building(image):\n    \n    d = docker_client()\n\n    \n    try:\n        d.images.get(image)\n    except docker.errors.ImageNotFound:\n        \n        pass\n    else:\n        \n        return False\n\n    \n    return image_needs_pushing(image)", "docstring": "Return whether an image needs building\n\nChecks if the image exists (ignores commit range),\neither locally or on the registry.\n\nArgs:\n\nimage (str): the `repository:tag` image to be build.\n\nReturns:\n\nTrue: if image needs to be built\nFalse: if not (image already exists)", "source": "juraj-google-style"}
{"code": "def md2tvd(self, kind='linear'):\n        \n        if self.position is None:\n            return lambda x: x\n        return interp1d(self.md, self.tvd,\n                        kind=kind,\n                        assume_sorted=True,\n                        fill_value=\"extrapolate\",\n                        bounds_error=False)", "docstring": "Provides an transformation and interpolation function that converts\nMD to TVD.\n\nArgs:\nkind (str): The kind of interpolation to do, e.g. 'linear',\n'cubic', 'nearest'.\n\nReturns:\nfunction.", "source": "juraj-google-style"}
{"code": "def provider(func=None, *, singleton=False, injector=None):\n\n    def decorator(func):\n        wrapped = _wrap_provider_func(func, {'singleton': singleton})\n        if injector:\n            injector.register_provider(wrapped)\n        return wrapped\n    if func:\n        return decorator(func)\n    return decorator", "docstring": "Decorator to mark a function as a provider.\n\nArgs:\nsingleton (bool): The returned value should be a singleton or shared\ninstance. If False (the default) the provider function will be\ninvoked again for every time it's needed for injection.\ninjector (Injector): If provided, the function is immediately\nregistered as a provider with the injector instance.\n\nExample:\n@diay.provider(singleton=True)\ndef myfunc() -> MyClass:\nreturn MyClass(args)", "source": "codesearchnet"}
{"code": "class XLMSQuADHead(nn.Module):\n\n    def __init__(self, config: XLMConfig):\n        super().__init__()\n        self.start_n_top = config.start_n_top\n        self.end_n_top = config.end_n_top\n        self.start_logits = XLMPoolerStartLogits(config)\n        self.end_logits = XLMPoolerEndLogits(config)\n        self.answer_class = XLMPoolerAnswerClass(config)\n\n    @auto_docstring\n    def forward(self, hidden_states: torch.FloatTensor, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, cls_index: Optional[torch.LongTensor]=None, is_impossible: Optional[torch.LongTensor]=None, p_mask: Optional[torch.FloatTensor]=None, return_dict: bool=False) -> Union[XLMSquadHeadOutput, Tuple[torch.FloatTensor]]:\n        \n        start_logits = self.start_logits(hidden_states, p_mask=p_mask)\n        if start_positions is not None and end_positions is not None:\n            for x in (start_positions, end_positions, cls_index, is_impossible):\n                if x is not None and x.dim() > 1:\n                    x.squeeze_(-1)\n            end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)\n            loss_fct = CrossEntropyLoss()\n            start_loss = loss_fct(start_logits, start_positions)\n            end_loss = loss_fct(end_logits, end_positions)\n            total_loss = (start_loss + end_loss) / 2\n            if cls_index is not None and is_impossible is not None:\n                cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)\n                loss_fct_cls = nn.BCEWithLogitsLoss()\n                cls_loss = loss_fct_cls(cls_logits, is_impossible)\n                total_loss += cls_loss * 0.5\n            return XLMSquadHeadOutput(loss=total_loss) if return_dict else (total_loss,)\n        else:\n            bsz, slen, hsz = hidden_states.size()\n            start_log_probs = nn.functional.softmax(start_logits, dim=-1)\n            start_top_log_probs, start_top_index = torch.topk(start_log_probs, self.start_n_top, dim=-1)\n            start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz)\n            start_states = torch.gather(hidden_states, -2, start_top_index_exp)\n            start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1)\n            hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states)\n            p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None\n            end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)\n            end_log_probs = nn.functional.softmax(end_logits, dim=1)\n            end_top_log_probs, end_top_index = torch.topk(end_log_probs, self.end_n_top, dim=1)\n            end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)\n            end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)\n            start_states = torch.einsum('blh,bl->bh', hidden_states, start_log_probs)\n            cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)\n            if not return_dict:\n                return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits)\n            else:\n                return XLMSquadHeadOutput(start_top_log_probs=start_top_log_probs, start_top_index=start_top_index, end_top_log_probs=end_top_log_probs, end_top_index=end_top_index, cls_logits=cls_logits)", "docstring": "A SQuAD head inspired by XLNet.\n\nArgs:\nconfig ([`XLMConfig`]):\nThe config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps`\nto use.", "source": "github-repos"}
{"code": "def create_local_copy(self, effects=None, store=None):\n        \n        effects = self._build_effects(effects)\n        store = store or ''\n        data = {\n            'source': self.cdn_path(effects)\n        }\n        if store:\n            data['store'] = store\n        return rest_request('POST', 'files/', data=data)", "docstring": "Creates a Local File Copy on Uploadcare Storage.\n\nArgs:\n- effects:\nAdds CDN image effects. If ``self.default_effects`` property\nis set effects will be combined with default effects.\n- store:\nIf ``store`` option is set to False the copy of your file will\nbe deleted in 24 hour period after the upload.\nWorks only if `autostore` is enabled in the project.", "source": "juraj-google-style"}
{"code": "def resize_video(in_file, out_file, size=None, ratio=None, keep_ar=False, log_level='info', print_cmd=False, **kwargs):\n    if ((size is None) and (ratio is None)):\n        raise ValueError('expected size or ratio must be specified')\n    elif ((size is not None) and (ratio is not None)):\n        raise ValueError('size and ratio cannot be specified at the same time')\n    options = {'log_level': log_level}\n    if size:\n        if (not keep_ar):\n            options['vf'] = 'scale={}:{}'.format(size[0], size[1])\n        else:\n            options['vf'] = 'scale=w={}:h={}:force_original_aspect_ratio=decrease'.format(size[0], size[1])\n    else:\n        if (not isinstance(ratio, tuple)):\n            ratio = (ratio, ratio)\n        options['vf'] = 'scale=\"trunc(iw*{}):trunc(ih*{})\"'.format(ratio[0], ratio[1])\n    convert_video(in_file, out_file, print_cmd, **options)", "docstring": "Resize a video.\n\nArgs:\nin_file (str): Input video filename.\nout_file (str): Output video filename.\nsize (tuple): Expected size (w, h), eg, (320, 240) or (320, -1).\nratio (tuple or float): Expected resize ratio, (2, 0.5) means\n(w*2, h*0.5).\nkeep_ar (bool): Whether to keep original aspect ratio.\nlog_level (str): Logging level of ffmpeg.\nprint_cmd (bool): Whether to print the final ffmpeg command.", "source": "codesearchnet"}
{"code": "def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n    self._validate_kwargs(kwargs)\n    dtype = dtypes.as_dtype(dtype)\n    if not dtype.is_numpy_compatible or dtype == dtypes.string:\n        raise ValueError(f'Argument `dtype` expected to be numeric or boolean. Received {dtype}.')\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    return array_ops.zeros(shape, dtype)", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor. Only numeric or boolean dtypes are\nsupported.\n**kwargs: Additional keyword arguments.\n\nRaises:\nValuesError: If the dtype is not numeric or boolean.", "source": "github-repos"}
{"code": "class SessionRunValues(collections.namedtuple('SessionRunValues', ['results', 'options', 'run_metadata'])):", "docstring": "Contains the results of `Session.run()`.\n\nIn the future we may use this object to add more information about result of\nrun without changing the Hook API.\n\nArgs:\nresults: The return values from `Session.run()` corresponding to the fetches\nattribute returned in the RunArgs. Note that this has the same shape as\nthe RunArgs fetches.  For example:\nfetches = global_step_tensor\n=> results = nparray(int)\nfetches = [train_op, summary_op, global_step_tensor]\n=> results = [None, nparray(string), nparray(int)]\nfetches = {'step': global_step_tensor, 'summ': summary_op}\n=> results = {'step': nparray(int), 'summ': nparray(string)}\noptions: `RunOptions` from the `Session.run()` call.\nrun_metadata: `RunMetadata` from the `Session.run()` call.", "source": "github-repos"}
{"code": "def write_payload(payload=None, objectInput=None):\n    \n\n    temp = tempfile.mkstemp()[1]\n    log.debug(\"Write payload in temp file {!r}\".format(temp))\n\n    with open(temp, 'wb') as f:\n        if payload:\n            payload = base64.b64decode(payload)\n        elif objectInput:\n            if six.PY3:\n                payload = objectInput.buffer.read()\n            elif six.PY2:\n                payload = objectInput.read()\n\n        f.write(payload)\n\n    return temp", "docstring": "This function writes a base64 payload or file object on disk.\n\nArgs:\npayload (string): payload in base64\nobjectInput (object): file object/standard input to analyze\n\nReturns:\nPath of file", "source": "juraj-google-style"}
{"code": "def _factored_dims(self, shape):\n    if ((not self._factored) or (shape.ndims < 2)):\n        return None\n    sorted_dims = sorted(shape.dims, key=(lambda d: (- d.size)))\n    if (sorted_dims[1].size < self._min_dim_size_to_factor):\n        return None\n    return sorted_dims[:2]", "docstring": "Should we use a factored second moment estimator.\n\nBased on the shape of the variable.\nIf we factor the accumulator, then this function returns a list of two\nmtf.Dimensions to reduce over.  We always pick the two largest dimensions.\nIf there are not two dimensions of size >= min_dim_size_to_factor, then we\ndo not factor.\n\nArgs:\nshape: a Shape\nReturns:\neither a list of 2 Dimensions or None", "source": "codesearchnet"}
{"code": "def _set_options_from_file(self, file_handle):\n        \n\n        \n        \n        \n\n        \n        options = []\n\n        \n        line_number = 0\n\n        \n        section = None\n        for line in file_handle.read().splitlines():\n            line_number += 1\n\n            \n            orig_line = line\n            line = line.strip()\n\n            \n            if not line or line.startswith('\n                continue\n\n            \n            \n            if line.startswith('[') and line.endswith(']'):\n                section = line.strip('[]')\n                continue\n\n            \n            if not section:\n                raise ValueError(\n                    'Unable to parse unit file; '\n                    'Unexpected line outside of a section: {0} (line: {1}'.format(\n                        line,\n                        line_number\n                    ))\n\n            \n            \n            \n            continuation = False\n            try:\n                    \n                    \n                    if options[-1]['value'].endswith('\\\\'):\n                        options[-1]['value'] = options[-1]['value'][:-1]\n                        continuation = True\n            except IndexError:\n                    pass\n\n            try:\n                \n                if continuation:\n                    options[-1]['value'] += orig_line\n                    continue\n\n                \n                name, value = line.split('=', 1)\n                options.append({\n                    'section': section,\n                    'name': name,\n                    'value': value\n                })\n            except ValueError:\n                raise ValueError(\n                    'Unable to parse unit file; '\n                    'Malformed line in section {0}: {1} (line: {2})'.format(\n                        section,\n                        line,\n                        line_number\n                    ))\n\n        \n        self._data['options'] = options\n\n        return True", "docstring": "Parses a unit file and updates self._data['options']\n\nArgs:\nfile_handle (file): a file-like object (supporting read()) containing a unit\n\nReturns:\nTrue: The file was successfuly parsed and options were updated\n\nRaises:\nIOError: from_file was specified and it does not exist\nValueError: The unit contents specified in from_string or from_file is not valid", "source": "juraj-google-style"}
{"code": "def short(cls, path):\n        \n        if not path:\n            return path\n\n        path = str(path)\n        if cls.paths:\n            for p in cls.paths:\n                if p:\n                    path = path.replace(p + \"/\", \"\")\n\n        path = path.replace(cls.home, \"~\")\n        return path", "docstring": "Example:\nshort(\"examined /Users/joe/foo\") => \"examined ~/foo\"\n\nArgs:\npath: Path to represent in its short form\n\nReturns:\n(str): Short form, using '~' if applicable", "source": "juraj-google-style"}
{"code": "def _previous_block_never_completed(self, current_block, previous_block, new_state):\n    if previous_block:\n        previously_timing_block = previous_block.status_code in _InstrumentationStatusCodeCategories.TIMING\n        currently_new_block = current_block.status_code == _InstrumentationStatusCodes.START or new_state == _InstrumentationBlockStates.RESULT\n        return all([previously_timing_block, currently_new_block])\n    else:\n        return False", "docstring": "Checks if the previous instrumentation method block completed.\n\nArgs:\ncurrent_block: _InstrumentationBlock, the current instrumentation\nblock to check for being a different instrumentation test\nmethod.\nprevious_block: _InstrumentationBlock, rhe previous\ninstrumentation block to check for an incomplete status.\nnew_state: _InstrumentationBlockStates, the next state for the\nparser, used to check for the instrumentation run ending\nwith an incomplete test.\n\nReturns:\nA boolean indicating whether the previous instrumentation block\ncompleted executing.", "source": "github-repos"}
{"code": "def binary_crossentropy(target, output, from_logits=False):\n    target = tf.convert_to_tensor(target)\n    output = tf.convert_to_tensor(output)\n    if len(target.shape) != len(output.shape):\n        raise ValueError(f'Arguments `target` and `output` must have the same rank (ndim). Received: target.shape={target.shape}, output.shape={output.shape}')\n    for e1, e2 in zip(target.shape, output.shape):\n        if e1 is not None and e2 is not None and (e1 != e2):\n            raise ValueError(f'Arguments `target` and `output` must have the same shape. Received: target.shape={target.shape}, output.shape={output.shape}')\n    output, from_logits = _get_logits(output, from_logits, 'Sigmoid', 'binary_crossentropy')\n    if from_logits:\n        return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)\n    output = tf.clip_by_value(output, backend.epsilon(), 1.0 - backend.epsilon())\n    bce = target * tf.math.log(output)\n    bce += (1 - target) * tf.math.log(1 - output)\n    return -bce", "docstring": "Binary crossentropy between an output tensor and a target tensor.\n\nArgs:\ntarget: A tensor with the same shape as `output`.\noutput: A tensor.\nfrom_logits: Whether `output` is expected to be a logits tensor.\nBy default, we consider that `output`\nencodes a probability distribution.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def from_api_repr(cls, resource):\n        \n        etag = resource.get(\"etag\")\n\n        if etag is not None:\n            resource = resource.copy()\n            resource[\"etag\"] = base64.b64decode(etag.encode(\"ascii\"))\n\n        return super(Policy, cls).from_api_repr(resource)", "docstring": "Factory: create a policy from a JSON resource.\n\nOverrides the base class version to store :attr:`etag` as bytes.\n\nArgs:\nresource (dict): JSON policy resource returned by the\n``getIamPolicy`` REST API.\n\nReturns:\n:class:`Policy`: the parsed policy", "source": "juraj-google-style"}
{"code": "def _get_log_file(self, handler):\n    if ('file_name_pattern' not in handler):\n        filename = '%Y-%m-%d-%H-%M-%S-{name}.pcap'\n    else:\n        filename = handler['file_name_pattern']\n    log_file = handler['log_dir']\n    if ('path' in handler):\n        log_file = os.path.join(log_file, handler['path'], filename)\n    else:\n        log_file = os.path.join(log_file, filename)\n    log_file = time.strftime(log_file, time.gmtime())\n    log_file = log_file.format(**handler)\n    return log_file", "docstring": "Generate log file path for a given handler\n\nArgs:\nhandler:\nThe handler configuration dictionary for which a log file\npath should be generated.", "source": "codesearchnet"}
{"code": "def no_selenium_errors(func):\n\n    def _inner(*args, **kwargs):\n        try:\n            return_val = func(*args, **kwargs)\n        except WebDriverException:\n            LOGGER.warning(u'Exception ignored during retry loop:', exc_info=True)\n            return False\n        else:\n            return return_val\n    return _inner", "docstring": "Decorator to create an `EmptyPromise` check function that is satisfied\nonly when `func` executes without a Selenium error.\n\nThis protects against many common test failures due to timing issues.\nFor example, accessing an element after it has been modified by JavaScript\nordinarily results in a `StaleElementException`.  Methods decorated\nwith `no_selenium_errors` will simply retry if that happens, which makes tests\nmore robust.\n\nArgs:\nfunc (callable): The function to execute, with retries if an error occurs.\n\nReturns:\nDecorated function", "source": "codesearchnet"}
{"code": "def from_file(feff_inp_file='feff.inp', ldos_file='ldos'):\n        \n        header_str = Header.header_string_from_file(feff_inp_file)\n        header = Header.from_string(header_str)\n        structure = header.struct\n        nsites = structure.num_sites\n        parameters = Tags.from_file(feff_inp_file)\n\n        if \"RECIPROCAL\" in parameters:\n            pot_dict = dict()\n            pot_readstart = re.compile('.*iz.*lmaxsc.*xnatph.*xion.*folp.*')\n            pot_readend = re.compile('.*ExternalPot.*switch.*')\n            pot_inp = re.sub(r'feff.inp', r'pot.inp', feff_inp_file)\n            dos_index = 1\n            begin = 0\n\n            with zopen(pot_inp, \"r\") as potfile:\n                for line in potfile:\n                    if len(pot_readend.findall(line)) > 0:\n                        break\n\n                    if begin == 1:\n                        begin += 1\n                        continue\n\n                    if begin == 2:\n                        z_number = int(line.strip().split()[0])\n                        ele_name = Element.from_Z(z_number).name\n                        if ele_name not in pot_dict:\n                            pot_dict[ele_name] = dos_index\n                        else:\n                            pot_dict[ele_name] = min(dos_index, pot_dict[ele_name])\n                        dos_index += 1\n\n                    if len(pot_readstart.findall(line)) > 0:\n                        begin = 1\n        else:\n            pot_string = Potential.pot_string_from_file(feff_inp_file)\n            dicts = Potential.pot_dict_from_string(pot_string)\n            pot_dict = dicts[0]\n\n        with zopen(ldos_file + \"00.dat\", \"r\") as fobject:\n            f = fobject.readlines()\n        efermi = float(f[0].split()[4])\n\n        dos_energies = []\n        ldos = {}\n\n        for i in range(1, len(pot_dict) + 1):\n            if len(str(i)) == 1:\n                ldos[i] = np.loadtxt(\"{}0{}.dat\".format(ldos_file, i))\n            else:\n                ldos[i] = np.loadtxt(\"{}{}.dat\".format(ldos_file, i))\n\n        for i in range(0, len(ldos[1])):\n            dos_energies.append(ldos[1][i][0])\n\n        all_pdos = []\n        vorb = {\"s\": Orbital.s, \"p\": Orbital.py, \"d\": Orbital.dxy,\n                \"f\": Orbital.f0}\n        forb = {\"s\": 0, \"p\": 1, \"d\": 2, \"f\": 3}\n\n        dlength = len(ldos[1])\n\n        for i in range(nsites):\n            pot_index = pot_dict[structure.species[i].symbol]\n            all_pdos.append(defaultdict(dict))\n            for k, v in vorb.items():\n                density = [ldos[pot_index][j][forb[k] + 1]\n                           for j in range(dlength)]\n                updos = density\n                downdos = None\n                if downdos:\n                    all_pdos[-1][v] = {Spin.up: updos, Spin.down: downdos}\n                else:\n                    all_pdos[-1][v] = {Spin.up: updos}\n\n        pdos = all_pdos\n        vorb2 = {0: Orbital.s, 1: Orbital.py, 2: Orbital.dxy, 3: Orbital.f0}\n        pdoss = {structure[i]: {v: pdos[i][v]\n                                for v in vorb2.values()}\n                 for i in range(len(pdos))}\n\n        forb = {\"s\": 0, \"p\": 1, \"d\": 2, \"f\": 3}\n\n        tdos = [0] * dlength\n        for i in range(nsites):\n            pot_index = pot_dict[structure.species[i].symbol]\n            for v in forb.values():\n                density = [ldos[pot_index][j][v + 1] for j in range(dlength)]\n                for j in range(dlength):\n                    tdos[j] = tdos[j] + density[j]\n        tdos = {Spin.up: tdos}\n\n        dos = Dos(efermi, dos_energies, tdos)\n        complete_dos = CompleteDos(structure, dos, pdoss)\n        charge_transfer = LDos.charge_transfer_from_file(feff_inp_file,\n                                                         ldos_file)\n        return LDos(complete_dos, charge_transfer)", "docstring": "Creates LDos object from raw Feff ldos files by\nby assuming they are numbered consecutively, i.e. ldos01.dat\nldos02.dat...\n\nArgs:\nfeff_inp_file (str): input file of run to obtain structure\nldos_file (str): output ldos file of run to obtain dos info, etc.", "source": "juraj-google-style"}
{"code": "def flag(self, diagnostic, thresh=None):\n        \n        if thresh is None:\n            thresh = self.defaults[diagnostic]\n\n        result = self.results[diagnostic]\n        if isinstance(result, pd.DataFrame):\n            if diagnostic == 'CorrelationMatrix':\n                result = result.copy()\n                np.fill_diagonal(result.values, 0)\n            return result.applymap(thresh).sum().nonzero()[0]\n        else:\n            return result.apply(thresh).nonzero()[0]", "docstring": "Returns indices of diagnostic that satisfy (return True from) the\nthreshold predicate. Will use class-level default threshold if\nNone provided.\n\nArgs:\ndiagnostic (str): name of the diagnostic\nthresh (func): threshold function (boolean predicate) to apply to\neach element", "source": "juraj-google-style"}
{"code": "def __init__(self, learning_rate, l1_regularization_strength=0.0, l2_regularization_strength=0.0, use_locking=False, name='ProximalGradientDescent'):\n    super(ProximalGradientDescentOptimizer, self).__init__(use_locking, name)\n    self._learning_rate = learning_rate\n    self._l1_regularization_strength = l1_regularization_strength\n    self._l2_regularization_strength = l2_regularization_strength\n    self._l1_regularization_strength_tensor = None\n    self._l2_regularization_strength_tensor = None", "docstring": "Construct a new proximal gradient descent optimizer.\n\nArgs:\nlearning_rate: A Tensor or a floating point value.  The learning\nrate to use.\nl1_regularization_strength: A float value, must be greater than or\nequal to zero.\nl2_regularization_strength: A float value, must be greater than or\nequal to zero.\nuse_locking: If True use locks for update operations.\nname: Optional name prefix for the operations created when applying\ngradients. Defaults to \"GradientDescent\".", "source": "github-repos"}
{"code": "def is_ordered(cat_id):\n    \n    url = 'https:\n    auth = Auth()\n    r = _req_with_retries(auth.gbdx_connection, url)\n    if r is not None:\n        return r.status_code == 200\n    return False", "docstring": "Checks to see if a CatalogID has been ordered or not.\n\nArgs:\ncatalogID (str): The catalog ID from the platform catalog.\nReturns:\nordered (bool): Whether or not the image has been ordered", "source": "juraj-google-style"}
{"code": "def solid_named(self, name):\n        \n        check.str_param(name, 'name')\n        if name not in self._solid_dict:\n            raise DagsterInvariantViolationError(\n                'Pipeline {pipeline_name} has no solid named {name}.'.format(\n                    pipeline_name=self.name, name=name\n                )\n            )\n        return self._solid_dict[name]", "docstring": "Return the solid named \"name\". Throws if it does not exist.\n\nArgs:\nname (str): Name of solid\n\nReturns:\nSolidDefinition: SolidDefinition with correct name.", "source": "juraj-google-style"}
{"code": "def gpu_devices(devices=None):\n    return find_devices('GPU', devices)", "docstring": "Gets GPU devices out of `devices`.\n\nArgs:\ndevices: A device list (as a list of strings). If None, the list of all\navailable devices will be used for it.\n\nReturns:\nThose in `devices` that are GPUs.", "source": "github-repos"}
{"code": "def sys_save_screenshot(name: Optional[str] = None) -> None:\n    \n    lib.TCOD_sys_save_screenshot(\n        _bytes(name) if name is not None else ffi.NULL\n    )", "docstring": "Save a screenshot to a file.\n\nBy default this will automatically save screenshots in the working\ndirectory.\n\nThe automatic names are formatted as screenshotNNN.png.  For example:\nscreenshot000.png, screenshot001.png, etc.  Whichever is available first.\n\nArgs:\nfile Optional[AnyStr]: File path to save screenshot.", "source": "juraj-google-style"}
{"code": "def suggest(self, query):\n    (res, suggest) = self.search(query, results=1, suggestion=True)\n    try:\n        title = (suggest or res[0])\n    except IndexError:\n        title = None\n    return title", "docstring": "Gather suggestions based on the provided title or None if no\nsuggestions found\n\nArgs:\nquery (str): Page title\nReturns:\nString or None: Suggested page title or **None** if no \\\nsuggestion found", "source": "codesearchnet"}
{"code": "def change(script, layer_num=None):\n    if (layer_num is None):\n        if isinstance(script, mlx.FilterScript):\n            layer_num = script.last_layer()\n        else:\n            layer_num = 0\n    filter_xml = ''.join(['  <filter name=\"Change the current layer\">\\n', '    <Param name=\"mesh\" ', 'value=\"{:d}\" '.format(layer_num), 'description=\"Mesh\" ', 'type=\"RichMesh\" ', '/>\\n', '  </filter>\\n'])\n    util.write_filter(script, filter_xml)\n    if isinstance(script, mlx.FilterScript):\n        script.set_current_layer(layer_num)\n    return None", "docstring": "Change the current layer by specifying the new layer number.\n\nArgs:\nscript: the mlx.FilterScript object or script filename to write\nthe filter to.\nlayer_num (int): the number of the layer to change to. Default is the\nlast layer if script is a mlx.FilterScript object; if script is a\nfilename the default is the first layer.\n\nLayer stack:\nModifies current layer\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "codesearchnet"}
{"code": "def from_file(cls, filename):\n    with zopen(filename) as f:\n        return cls.from_string(f.read())", "docstring": "Read an Fiesta input from a file. Currently tested to work with\nfiles generated from this class itself.\n\nArgs:\nfilename: Filename to parse.\n\nReturns:\nFiestaInput object", "source": "codesearchnet"}
{"code": "def _free_array(self, handle: int):\n    with self._lock:\n        if (self._arrays[handle] is not None):\n            self._arrays[handle] = None\n            self._count -= 1", "docstring": "Frees the memory for the array with the given handle.\n\nArgs:\nhandle: The handle of the array whose memory should be freed. This\nhandle must come from the _create_array method.", "source": "codesearchnet"}
{"code": "def _FormatSocketUnixToken(self, token_data):\n    \n    protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.socket_family, 'UNKNOWN')\n    return {\n        'protocols': protocol,\n        'family': token_data.socket_family,\n        'path': token_data.socket_path}", "docstring": "Formats an Unix socket token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_sockunix): AUT_SOCKUNIX token data.\n\nReturns:\ndict[str, str]: token values.", "source": "juraj-google-style"}
{"code": "def convert_elementwise_add(\n    params, w_name, scope_name, inputs, layers, weights, names\n):\n    \n    print('Converting elementwise_add ...')\n    if 'broadcast' in params:\n        model0 = layers[inputs[0]]\n        model1 = layers[inputs[1]]\n\n        if names == 'short':\n            tf_name = 'A' + random_string(7)\n        elif names == 'keep':\n            tf_name = w_name\n        else:\n            tf_name = w_name + str(random.random())\n\n        def target_layer(x):\n            layer = tf.add(x[0], x[1])\n            return layer\n\n        lambda_layer = keras.layers.Lambda(target_layer, name=tf_name)\n        layers[scope_name] = lambda_layer([layers[inputs[0]], layers[inputs[1]]])\n    else:\n        model0 = layers[inputs[0]]\n        model1 = layers[inputs[1]]\n\n        if names == 'short':\n            tf_name = 'A' + random_string(7)\n        elif names == 'keep':\n            tf_name = w_name\n        else:\n            tf_name = w_name + str(random.random())\n\n        add = keras.layers.Add(name=tf_name)\n        layers[scope_name] = add([model0, model1])", "docstring": "Convert elementwise addition.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def disconnect_async(self, conn_id, callback):\n        \n\n        try:\n            context = self.conns.get_context(conn_id)\n        except ArgumentError:\n            callback(conn_id, self.id, False, \"Could not find connection information\")\n            return\n\n        self.conns.begin_disconnection(conn_id, callback, self.get_config('default_timeout'))\n\n        topics = context['topics']\n        disconn_message = {'key': context['key'], 'client': self.name, 'type': 'command', 'operation': 'disconnect'}\n\n        self.client.publish(topics.action, disconn_message)", "docstring": "Asynchronously disconnect from a device that has previously been connected\n\nArgs:\nconn_id (int): a unique identifier for this connection on the DeviceManager\nthat owns this adapter.\ncallback (callable): A function called as callback(conn_id, adapter_id, success, failure_reason)\nwhen the disconnection finishes.  Disconnection can only either succeed or timeout.", "source": "juraj-google-style"}
{"code": "def create_document(self, doc: Dict, mime_type: str = None, url: str = \"http:\n                        doc_id=None, type_=None) -> Document:\n        \n        return Document(self, doc, mime_type, url, doc_id=doc_id).with_type(type_)", "docstring": "Factory method to wrap input JSON docs in an ETK Document object.\n\nArgs:\ndoc (object): a JSON object containing a document in CDR format.\nmime_type (str): if doc is a string, the mime_type tells what it is\nurl (str): if the doc came from the web, specifies the URL for it\ndoc_id\ntype_\n\nReturns: wrapped Document", "source": "juraj-google-style"}
{"code": "def round(self, decimals=0):\n        \n        return self.__class__(np.round(self, decimals=decimals))", "docstring": "Wrapper around numpy.round to ensure object\nof same type is returned\n\nArgs:\ndecimals :Number of decimal places to round to (default: 0).\nIf decimals is negative, it specifies the number of\npositions to the left of the decimal point.\n\nReturns (Tensor):\nrounded tensor of same type", "source": "juraj-google-style"}
{"code": "def _check_disabled(self):\n    if self.config['check_disabled']:\n        if (self.config['on_disabled'] == 'withdraw'):\n            self.log.info('Check is disabled and ip_prefix will be withdrawn')\n            self.log.info('adding %s in the queue', self.ip_with_prefixlen)\n            self.action.put(self.del_operation)\n            self.log.info('Check is now permanently disabled')\n        elif (self.config['on_disabled'] == 'advertise'):\n            self.log.info('check is disabled, ip_prefix wont be withdrawn')\n            self.log.info('adding %s in the queue', self.ip_with_prefixlen)\n            self.action.put(self.add_operation)\n            self.log.info('check is now permanently disabled')\n        return True\n    return False", "docstring": "Check if health check is disabled.\n\nIt logs a message if health check is disabled and it also adds an item\nto the action queue based on 'on_disabled' setting.\n\nReturns:\nTrue if check is disabled otherwise False.", "source": "codesearchnet"}
{"code": "def set_task(project_, task_):\n    \n    global project, task\n    project = project_\n    task = task_\n    msg.okay(\"Set project name to {}.{}\".format(project, task), 2)", "docstring": "Sets the active project and task. All subsequent logging will be saved to\nthe database with that project and task.\n\nArgs:\nproject_ (str): active project name; a project can have multiple tasks.\ntask_ (str): active task name. Logging is separated at the project and task\nlevel.", "source": "juraj-google-style"}
{"code": "def ch_start_time(self, *channels: List[Channel]) -> int:\n        \n        intervals = list(itertools.chain(*(self._table[chan] for chan in channels\n                                           if chan in self._table)))\n        if intervals:\n            return min((interval.begin for interval in intervals))\n        return 0", "docstring": "Return earliest start time in this collection.\n\nArgs:\n*channels: Channels over which to obtain start_time.", "source": "juraj-google-style"}
{"code": "def _to_bfloat16_unbiased(x, noise):\n    x_sign = tf.sign(x)\n    x = ((x * x_sign) + 1e-30)\n    cand1 = tf.to_bfloat16(x)\n    cand1_f = tf.to_float(cand1)\n    cand2 = tf.to_bfloat16(tf.where(tf.greater(x, cand1_f), (cand1_f * 1.005), (cand1_f * 0.995)))\n    ret = _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2)\n    return (ret * tf.to_bfloat16(x_sign))", "docstring": "Convert a float32 to a bfloat16 using randomized roundoff.\n\nArgs:\nx: A float32 Tensor.\nnoise: a float32 Tensor with values in [0, 1), broadcastable to tf.shape(x)\nReturns:\nA float32 Tensor.", "source": "codesearchnet"}
{"code": "def get_layer_policy(layer):\n    if not isinstance(layer, base_layer.Layer):\n        raise ValueError('get_policy can only be called on a layer, but got: %s' % (layer,))\n    return layer.dtype_policy", "docstring": "Returns the dtype policy of a layer.\n\nWarning: This function is deprecated. Use\n`tf.keras.layers.Layer.dtype_policy` instead.\n\nArgs:\nlayer: A `tf.keras.layers.Layer`.\n\nReturns:\nThe `tf.keras.mixed_precision.Policy` of the layer.", "source": "github-repos"}
{"code": "def __init__(self, expression, options=None, **kwargs):\n        \n        if options is None:\n            options = Options()\n        self._expression = expression\n        self._options = options\n        self._expression_parts = []\n        self._parsed = False\n\n        \n        for kwarg in kwargs:\n            if hasattr(self._options, kwarg):\n                setattr(self._options, kwarg, kwargs[kwarg])\n            else:\n                raise WrongArgumentException(\n                    \"Unknow {} configuration argument\".format(kwarg))\n\n        \n        GetText(options.locale_code)", "docstring": "Initializes a new instance of the ExpressionDescriptorclass\n\nArgs:\nexpression: The cron expression string\noptions: Options to control the output description\nRaises:\nWrongArgumentException: if kwarg is unknow", "source": "juraj-google-style"}
{"code": "def put_async(self, path, value):\n    request = Put(self._get_next_id(), path, value)\n    request.set_callback(self._q.put)\n    future = self._dispatch_request(request)\n    return future", "docstring": "Puts a value to a path and returns immediately\n\nArgs:\npath (list): The path to put to\nvalue (object): The value to set\n\nReturns:\nFuture: A single Future which will resolve to the result", "source": "codesearchnet"}
{"code": "def trace_flush(self):\n    cmd = enums.JLinkTraceCommand.FLUSH\n    res = self._dll.JLINKARM_TRACE_Control(cmd, 0)\n    if (res == 1):\n        raise errors.JLinkException('Failed to flush the trace buffer.')\n    return None", "docstring": "Flushes the trace buffer.\n\nAfter this method is called, the trace buffer is empty.  This method is\nbest called when the device is reset.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def get_edgestore_handle(client: arango.client.ArangoClient, username=None, password=None, edgestore_db_name: str=edgestore_db_name, edgestore_edges_name: str=edgestore_edges_name, edgestore_nodes_name: str=edgestore_nodes_name, edgestore_pipeline_name: str=edgestore_pipeline_name, edgestore_pipeline_stats_name: str=edgestore_pipeline_stats_name, edgestore_pipeline_errors_name: str=edgestore_pipeline_errors_name) -> arango.database.StandardDatabase:\n    (username, password) = get_user_creds(username, password)\n    sys_db = client.db('_system', username=username, password=password)\n    try:\n        if (username and password):\n            edgestore_db = sys_db.create_database(name=edgestore_db_name, users=[{'username': username, 'password': password, 'active': True}])\n        else:\n            edgestore_db = sys_db.create_database(name=edgestore_db_name)\n    except arango.exceptions.DatabaseCreateError:\n        if (username and password):\n            edgestore_db = client.db(edgestore_db_name, username=username, password=password)\n        else:\n            edgestore_db = client.db(edgestore_db_name)\n    try:\n        nodes = edgestore_db.create_collection(edgestore_nodes_name, index_bucket_count=64)\n        nodes.add_hash_index(fields=['name'], unique=False)\n        nodes.add_hash_index(fields=['components'], unique=False)\n    except Exception:\n        pass\n    try:\n        edges = edgestore_db.create_collection(edgestore_edges_name, edge=True, index_bucket_count=64)\n        edges.add_hash_index(fields=['relation'], unique=False)\n        edges.add_hash_index(fields=['edge_types'], unique=False)\n        edges.add_hash_index(fields=['nanopub_id'], unique=False)\n        edges.add_hash_index(fields=['metadata.project'], unique=False)\n        edges.add_hash_index(fields=['annotations[*].id'], unique=False)\n    except Exception:\n        pass\n    try:\n        edgestore_db.create_collection(edgestore_pipeline_name)\n    except Exception:\n        pass\n    try:\n        edgestore_db.create_collection(edgestore_pipeline_errors_name)\n    except Exception:\n        pass\n    try:\n        edgestore_db.create_collection(edgestore_pipeline_stats_name)\n    except arango.exceptions.CollectionCreateError as e:\n        pass\n    return edgestore_db", "docstring": "Get Edgestore arangodb database handle\n\nArgs:\nclient (arango.client.ArangoClient): Description\nusername (None, optional): Description\npassword (None, optional): Description\nedgestore_db_name (str, optional): Description\nedgestore_edges_name (str, optional): Description\nedgestore_nodes_name (str, optional): Description\n\nReturns:\narango.database.StandardDatabase: Description", "source": "codesearchnet"}
{"code": "def __setRouterSelectionJitter(self, iRouterJitter):\n        \n        print 'call _setRouterSelectionJitter'\n        try:\n            cmd = 'routerselectionjitter %s' % str(iRouterJitter)\n            print cmd\n            return self.__sendCommand(cmd) == 'Done'\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger(\"setRouterSelectionJitter() Error: \" + str(e))", "docstring": "set ROUTER_SELECTION_JITTER parameter for REED to upgrade to Router\n\nArgs:\niRouterJitter: a random period prior to request Router ID for REED\n\nReturns:\nTrue: successful to set the ROUTER_SELECTION_JITTER\nFalse: fail to set ROUTER_SELECTION_JITTER", "source": "juraj-google-style"}
{"code": "def Skew(poly, dist=None, **kws):\n    if isinstance(poly, distributions.Dist):\n        x = polynomials.variable(len(poly))\n        (poly, dist) = (x, poly)\n    else:\n        poly = polynomials.Poly(poly)\n    if (poly.dim < len(dist)):\n        polynomials.setdim(poly, len(dist))\n    shape = poly.shape\n    poly = polynomials.flatten(poly)\n    m1 = E(poly, dist)\n    m2 = E((poly ** 2), dist)\n    m3 = E((poly ** 3), dist)\n    out = (((m3 - ((3 * m2) * m1)) + (2 * (m1 ** 3))) / ((m2 - (m1 ** 2)) ** 1.5))\n    out = numpy.reshape(out, shape)\n    return out", "docstring": "Skewness operator.\n\nElement by element 3rd order statistics of a distribution or polynomial.\n\nArgs:\npoly (Poly, Dist):\nInput to take skewness on.\ndist (Dist):\nDefines the space the skewness is taken on. It is ignored if\n``poly`` is a distribution.\n\nReturns:\n(numpy.ndarray):\nElement for element variance along ``poly``, where\n``skewness.shape == poly.shape``.\n\nExamples:\n>>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2))\n>>> print(chaospy.Skew(dist))\n[2. 0.]\n>>> x, y = chaospy.variable(2)\n>>> poly = chaospy.Poly([1, x, y, 10*x*y])\n>>> print(chaospy.Skew(poly, dist))\n[nan  2.  0.  0.]", "source": "codesearchnet"}
{"code": "def fit_to_cols(what, indent='', cols=79):\n    lines = []\n    while what:\n        (what, next_line) = split_line(what=what, cols=cols, indent=indent)\n        lines.append(next_line)\n    return '\\n'.join(lines)", "docstring": "Wrap the given text to the columns, prepending the indent to each line.\n\nArgs:\nwhat(str): text to wrap.\nindent(str): indentation to use.\ncols(int): colt to wrap to.\n\nReturns:\nstr: Wrapped text", "source": "codesearchnet"}
{"code": "def load_pos_model(lang=\"en\", version=\"2\"):\n  \n  src_dir = \"pos{}\".format(version)\n  p = locate_resource(src_dir, lang)\n  fh = _open(p)\n  return dict(np.load(fh))", "docstring": "Return a part of speech tagger parameters for `lang` and of version `version`\n\nArgs:\nlang (string): language code.\nversion (string): version of the parameters to be used.", "source": "juraj-google-style"}
{"code": "def static_lengths(self, ragged_lengths=True):\n    if self.num_row_partitions == 0:\n        return self._static_inner_shape_as_list(False)\n    first_dim = self.row_partitions[0].static_nrows\n    if isinstance(first_dim, tensor_shape.Dimension):\n        first_dim = first_dim.value\n    rp_dims = [first_dim]\n    for rp in self.row_partitions:\n        if rp.is_uniform():\n            rp_dims.append(rp.static_uniform_row_length)\n        elif ragged_lengths:\n            const_vals = tensor_util.constant_value(rp.row_lengths())\n            if const_vals is None:\n                rp_dims.append(None)\n            else:\n                rp_dims.append(tuple(const_vals.tolist()))\n        else:\n            rp_dims.append(None)\n    return rp_dims + self._static_inner_shape_as_list(True)", "docstring": "Returns a list of statically known axis lengths.\n\nThis represents what values are known. For each row partition, it presents\neither the uniform row length (if statically known),\nthe list of row lengths, or none if it is not statically known.\nFor the inner shape, if the rank is known, then each dimension is reported\nif known, and None otherwise. If the rank of the inner shape is not known,\nthen the returned list ends with an ellipsis.\n\nArgs:\nragged_lengths: If false, returns None for all ragged dimensions.\n\nReturns:\nA Sequence[Union[Sequence[int],int, None]] of lengths, with a possible\nEllipsis at the end.", "source": "github-repos"}
{"code": "def set_result(self, result):\n    if self.done():\n        raise RuntimeError('set_result can only be called once.')\n    self._result = result\n    self._trigger()", "docstring": "Set the result of the future to the provided result.\n\nArgs:\nresult (Any): The result", "source": "codesearchnet"}
{"code": "def stream(self, report, callback=None):\n    conn_id = self._find_connection(self.conn_string)\n    if isinstance(report, BroadcastReport):\n        self.adapter.notify_event_nowait(self.conn_string, 'broadcast', report)\n    elif (conn_id is not None):\n        self.adapter.notify_event_nowait(self.conn_string, 'report', report)\n    if (callback is not None):\n        callback((isinstance(report, BroadcastReport) or (conn_id is not None)))", "docstring": "Queue data for streaming\n\nArgs:\nreport (IOTileReport): A report object to stream to a client\ncallback (callable): An optional callback that will be called with\na bool value of True when this report actually gets streamed.\nIf the client disconnects and the report is dropped instead,\ncallback will be called with False", "source": "codesearchnet"}
{"code": "def _unescape_token(token):\n  r\n\n  def match(m):\n    r\n    \n    if m.group(1) is None:\n      return u\"_\" if m.group(0) == u\"\\\\u\" else u\"\\\\\"\n\n    \n    try:\n      return six.unichr(int(m.group(1)))\n    except (ValueError, OverflowError) as _:\n      return _UNDEFINED_UNICODE\n\n  \n  return _UNESCAPE_REGEX.sub(match, token)", "docstring": "r\"\"\"Replaces escaped characters in the token with their unescaped versions.\n\nApplies inverse transformations as _escape_token():\n1. Replace \"\\u\" with \"_\", and \"\\\\\" with \"\\\".\n2. Replace \"\\###;\" with the unicode character the ### refers to.\n\nArgs:\ntoken: escaped string\n\nReturns:\nunescaped string", "source": "juraj-google-style"}
{"code": "def get_sendback(self, uuid, key):\n\n    def send_back_callback(data):\n        self.sendResponse(serializers.serialize(data), uuid, key)\n    return send_back_callback", "docstring": "Return function for sending progress messages back to original caller.\n\nArgs:\nuuid (str): UUID of the received message.\nkey (str): Routing key.\n\nReturns:\nfn reference: Reference to function which takes only one data \\\nargument.", "source": "codesearchnet"}
{"code": "def truncate_too_long_number(numobj):\n    \n    if is_valid_number(numobj):\n        return True\n    numobj_copy = PhoneNumber()\n    numobj_copy.merge_from(numobj)\n    national_number = numobj.national_number\n\n    while not is_valid_number(numobj_copy):\n        \n        national_number = national_number \n        numobj_copy.national_number = national_number\n        validation_result = is_possible_number_with_reason(numobj_copy)\n        if (validation_result == ValidationResult.TOO_SHORT or\n            national_number == 0):\n            return False\n    \n    numobj.national_number = national_number\n    return True", "docstring": "Truncate a number object that is too long.\n\nAttempts to extract a valid number from a phone number that is too long\nto be valid, and resets the PhoneNumber object passed in to that valid\nversion. If no valid number could be extracted, the PhoneNumber object\npassed in will not be modified.\n\nArguments:\nnumobj -- A PhoneNumber object which contains a number that is too long to\nbe valid.\n\nReturns True if a valid phone number can be successfully extracted.", "source": "juraj-google-style"}
{"code": "async def change_file(self, file_path: str, description: str = None):\n        \n        with open(file_path, 'rb') as f:\n            await self._change(asset=f.read())", "docstring": "change the file of that attachment\n\n|methcoro|\n\nWarning:\n|unstable|\n\nArgs:\nfile_path: path to the file you want to add / modify\ndescription: *optional* description for your attachment\n\nRaises:\nValueError: file_path must not be None\nAPIException", "source": "juraj-google-style"}
{"code": "def str_delimited(results, header=None, delimiter=\"\\t\"):\n    \n    returnstr = \"\"\n    if header is not None:\n        returnstr += delimiter.join(header) + \"\\n\"\n    return returnstr + \"\\n\".join([delimiter.join([str(m) for m in result])\n                                  for result in results])", "docstring": "Given a tuple of tuples, generate a delimited string form.\n>>> results = [[\"a\",\"b\",\"c\"],[\"d\",\"e\",\"f\"],[1,2,3]]\n>>> print(str_delimited(results,delimiter=\",\"))\na,b,c\nd,e,f\n1,2,3\n\nArgs:\nresult: 2d sequence of arbitrary types.\nheader: optional header\n\nReturns:\nAligned string output in a table-like format.", "source": "juraj-google-style"}
{"code": "def __init__(self, current):\n        import sys\n        from pyoko.modelmeta import model_registry\n        \n        out = []\n        for mdl_name in sys.PYOKO_LOGS.copy():\n            try:\n                mdl = model_registry.get_model(mdl_name)\n            except KeyError:\n                continue\n            bucket_name = mdl.objects.adapter.bucket.name\n            mdl.objects.adapter.bucket.set_decoder('application/json', lambda a: bytes_to_str(a))\n            for k in set(sys.PYOKO_LOGS[mdl_name]):\n                if k not in sys.PYOKO_LOGS['new']:\n                    obj = mdl.objects.data().get(k)\n                    print(obj)\n                    out.append(\"{}/|{}/|{}\".format(\n                        bucket_name, k, obj[0]))\n                    \n            sys.PYOKO_LOGS[mdl_name] = []\n            mdl.objects.adapter.bucket.set_decoder('application/json', binary_json_decoder)\n        sys.PYOKO_LOGS['new'] = []\n        current.output = {\n            'response': \"\\n\".join(out),\n            'http_headers': (('Content-Type', 'text/plain; charset=utf-8'),\n                             ),\n        }", "docstring": "GET method handler\nArgs:\nreq: Request object.\nresp: Response object.", "source": "juraj-google-style"}
{"code": "def get_asn_origin_whois(self, asn_registry='radb', asn=None, retry_count=3, server=None, port=43):\n    try:\n        if (server is None):\n            server = ASN_ORIGIN_WHOIS[asn_registry]['server']\n        conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n        conn.settimeout(self.timeout)\n        log.debug('ASN origin WHOIS query for {0} at {1}:{2}'.format(asn, server, port))\n        conn.connect((server, port))\n        query = ' -i origin {0}{1}'.format(asn, '\\r\\n')\n        conn.send(query.encode())\n        response = ''\n        while True:\n            d = conn.recv(4096).decode()\n            response += d\n            if (not d):\n                break\n        conn.close()\n        if ('Query rate limit exceeded' in response):\n            if (retry_count > 0):\n                log.debug('ASN origin WHOIS query rate limit exceeded. Waiting...')\n                sleep(1)\n                return self.get_asn_origin_whois(asn_registry=asn_registry, asn=asn, retry_count=(retry_count - 1), server=server, port=port)\n            else:\n                raise WhoisRateLimitError('ASN origin Whois lookup failed for {0}. Rate limit exceeded, wait and try again (possibly a temporary block).'.format(asn))\n        elif (('error 501' in response) or ('error 230' in response)):\n            log.debug('ASN origin WHOIS query error: {0}'.format(response))\n            raise ValueError\n        return str(response)\n    except (socket.timeout, socket.error) as e:\n        log.debug('ASN origin WHOIS query socket error: {0}'.format(e))\n        if (retry_count > 0):\n            log.debug('ASN origin WHOIS query retrying (count: {0})'.format(str(retry_count)))\n            return self.get_asn_origin_whois(asn_registry=asn_registry, asn=asn, retry_count=(retry_count - 1), server=server, port=port)\n        else:\n            raise WhoisLookupError('ASN origin WHOIS lookup failed for {0}.'.format(asn))\n    except WhoisRateLimitError:\n        raise\n    except:\n        raise WhoisLookupError('ASN origin WHOIS lookup failed for {0}.'.format(asn))", "docstring": "The function for retrieving CIDR info for an ASN via whois.\n\nArgs:\nasn_registry (:obj:`str`): The source to run the query against\n(asn.ASN_ORIGIN_WHOIS).\nasn (:obj:`str`): The AS number (required).\nretry_count (:obj:`int`): The number of times to retry in case\nsocket errors, timeouts, connection resets, etc. are\nencountered. Defaults to 3.\nserver (:obj:`str`): An optional server to connect to.\nport (:obj:`int`): The network port to connect on. Defaults to 43.\n\nReturns:\nstr: The raw ASN origin whois data.\n\nRaises:\nWhoisLookupError: The ASN origin whois lookup failed.\nWhoisRateLimitError: The ASN origin Whois request rate limited and\nretries were exhausted.", "source": "codesearchnet"}
{"code": "def db_wb004(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `db_wb004`'.format(value))\n    self._db_wb004 = value", "docstring": "Corresponds to IDD Field `db_wb004`\nmean coincident dry-bulb temperature to\nWet-bulb temperature corresponding to 0.4% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `db_wb004`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def collect_results(rule, max_results=500, result_stream_args=None):\n    if (result_stream_args is None):\n        logger.error('This function requires a configuration dict for the inner ResultStream object.')\n        raise KeyError\n    rs = ResultStream(rule_payload=rule, max_results=max_results, **result_stream_args)\n    return list(rs.stream())", "docstring": "Utility function to quickly get a list of tweets from a ``ResultStream``\nwithout keeping the object around. Requires your args to be configured\nprior to using.\n\nArgs:\nrule (str): valid powertrack rule for your account, preferably\ngenerated by the `gen_rule_payload` function.\nmax_results (int): maximum number of tweets or counts to return from\nthe API / underlying ``ResultStream`` object.\nresult_stream_args (dict): configuration dict that has connection\ninformation for a ``ResultStream`` object.\n\nReturns:\nlist of results\n\nExample:\n>>> from searchtweets import collect_results\n>>> tweets = collect_results(rule,\nmax_results=500,\nresult_stream_args=search_args)", "source": "codesearchnet"}
{"code": "def audio_bottom(x, model_hparams, vocab_size):\n  \n  del vocab_size  \n  inputs = x\n  with tf.variable_scope(\"audio_modality\"):\n    \n    def xnet_resblock(x, filters, res_relu, name):\n      \n      with tf.variable_scope(name):\n        \n        \n        y = common_layers.separable_conv_block(\n            x,\n            filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))],\n            first_relu=True,\n            padding=\"SAME\",\n            force2d=True,\n            name=\"sep_conv_block\")\n        y = common_layers.pool(y, (3, 3), \"MAX\", \"SAME\", strides=(2, 2))\n        return y + common_layers.conv_block(\n            x,\n            filters, [((1, 1), (1, 1))],\n            padding=\"SAME\",\n            strides=(2, 2),\n            first_relu=res_relu,\n            force2d=True,\n            name=\"res_conv0\")\n\n    x = tf.to_float(inputs) / 255.\n    x.set_shape([None, None, None, 1])\n    for i in range(model_hparams.audio_compression):\n      x = xnet_resblock(x, 2**(i + 1), True, \"compress_block_%d\" % i)\n    return xnet_resblock(x,\n                         model_hparams.hidden_size,\n                         False,\n                         \"compress_block_final\")", "docstring": "Transform input from data space to model space.\n\nArgs:\nx: A Tensor with shape [batch, ...]\nmodel_hparams: HParams, model hyperparmeters.\nvocab_size: int, vocabulary size.\n\nReturns:\nbody_input: A Tensor with shape [batch, ?, ?,\nmodel_hparams.hidden_size].", "source": "juraj-google-style"}
{"code": "def _copy_fn(fn):\n    if (not callable(fn)):\n        raise TypeError('fn is not callable: {}'.format(fn))\n    return types.FunctionType(code=fn.__code__, globals=fn.__globals__, name=fn.__name__, argdefs=fn.__defaults__, closure=fn.__closure__)", "docstring": "Create a deep copy of fn.\n\nArgs:\nfn: a callable\n\nReturns:\nA `FunctionType`: a deep copy of fn.\n\nRaises:\nTypeError: if `fn` is not a callable.", "source": "codesearchnet"}
{"code": "def to_json_string(self):\n    return json.dumps(self.__dict__, indent=2) + '\\n'", "docstring": "Serializes this instance to a JSON formatted string.\n\nReturns:\nstr: JSON formatted string representing the configuration instance.", "source": "github-repos"}
{"code": "def send_html(self, html, body=None, msgtype=\"m.text\"):\n        \n        return self.client.api.send_message_event(\n            self.room_id, \"m.room.message\", self.get_html_content(html, body, msgtype))", "docstring": "Send an html formatted message.\n\nArgs:\nhtml (str): The html formatted message to be sent.\nbody (str): The unformatted body of the message to be sent.", "source": "juraj-google-style"}
{"code": "def add_signature_block(src_fileobj, dest_fileobj, signing_algorithm, signature=None):\n    \n    algo_id = {'sha1': 1, 'sha384': 2}[signing_algorithm]\n    if not signature:\n        signature = make_dummy_signature(algo_id)\n\n    src_fileobj.seek(0)\n    mardata = mar.parse_stream(src_fileobj)\n\n    \n    header = mardata.header\n    dest_fileobj.write(mar_header.build(header))\n\n    \n    sig = dict(algorithm_id=algo_id,\n               size=len(signature),\n               signature=signature,\n               )\n\n    \n    filesize = 0\n    sigs_offset = dest_fileobj.tell()\n    sigs = sigs_header.build(dict(\n        filesize=filesize,\n        count=1,\n        sigs=[sig],\n    ))\n    dest_fileobj.write(sigs)\n\n    \n    dest_fileobj.write(extras_header.build(mardata.additional))\n\n    \n    data_offset = dest_fileobj.tell()\n    src_fileobj.seek(mardata.data_offset)\n    write_to_file(takeexactly(src_fileobj, mardata.data_length), dest_fileobj)\n\n    \n    index_offset = dest_fileobj.tell()\n\n    index = mardata.index\n\n    \n    data_offset_delta = data_offset - mardata.data_offset\n\n    for e in index.entries:\n        e.offset += data_offset_delta\n\n    dest_fileobj.write(index_header.build(index))\n    filesize = dest_fileobj.tell()\n\n    \n    dest_fileobj.seek(0)\n    header.index_offset = index_offset\n    dest_fileobj.write(mar_header.build(header))\n\n    dest_fileobj.seek(sigs_offset)\n    sigs = sigs_header.build(dict(\n        filesize=filesize,\n        count=1,\n        sigs=[sig],\n    ))\n    dest_fileobj.write(sigs)", "docstring": "Add a signature block to marfile, a MarReader object.\n\nProductversion and channel are preserved, but any existing signatures are overwritten.\n\nArgs:\nsrc_fileobj (file object): The input MAR file to add a signature to\ndest_fileobj (file object): File object to write new MAR file to. Must be open in w+b mode.\nsigning_algorithm (str): One of 'sha1', or 'sha384'\nsignature (bytes): Signature to write, or None to use a dummy signature", "source": "juraj-google-style"}
{"code": "def from_value(cls, ion_type, value, annotations=()):\n        \n        if value is None:\n            value = IonPyNull()\n        else:\n            args, kwargs = cls._to_constructor_args(value)\n            value = cls(*args, **kwargs)\n        value.ion_event = None\n        value.ion_type = ion_type\n        value.ion_annotations = annotations\n        return value", "docstring": "Constructs a value as a copy with an associated Ion type and annotations.\n\nArgs:\nion_type (IonType): The associated Ion type.\nvalue (Any): The value to construct from, generally of type ``cls``.\nannotations (Sequence[unicode]):  The sequence Unicode strings decorating this value.", "source": "juraj-google-style"}
{"code": "def out_file_name(out_dir, fname, ext=None):\n    if (ext is None):\n        return os.path.join(out_dir, os.path.basename(fname))\n    fname = remove_ext(fname)\n    return os.path.join(out_dir, '{}.{}'.format(fname, ext))", "docstring": "Return path of output file, given a directory, file name and extension.\n\nIf fname is a path, it is converted to its basename.\n\nArgs:\nout_dir (str): path to the directory where output should be written.\nfname (str): path to the input file.\next (str): file extension of the output file (defaults to None).\n\nReturns:\nstr: out_dir + fname with extension replaced. If `ext` is `None`, the\noriginal extension is kept.", "source": "codesearchnet"}
{"code": "def from_string(string):\n        \n        lines = string.split(\"\\n\")\n        toks = lines[0].split()\n        lengths = [float(i) for i in toks]\n        toks = lines[1].split()\n        angles = [float(i) for i in toks[0:3]]\n        \n        a = lengths.pop(-1)\n        lengths.insert(0, a)\n        alpha = angles.pop(-1)\n        angles.insert(0, alpha)\n        latt = Lattice.from_lengths_and_angles(lengths, angles)\n        sp = []\n        coords = []\n        chrg = []\n        for l in lines[4:]:\n            m = re.match(r'\\d+\\s+(\\w+)\\s+([0-9\\-\\.]+)\\s+([0-9\\-\\.]+)\\s+' +\n                         r'([0-9\\-\\.]+)\\s+(?:0\\s+){8}([0-9\\-\\.]+)', l.strip())\n            if m:\n                sp.append(m.group(1))\n                \n                \n                coords.append([float(m.group(i)) for i in [3, 4, 2]])\n                chrg.append(m.group(5))\n        return ZeoCssr(\n            Structure(latt, sp, coords, site_properties={'charge': chrg})\n        )", "docstring": "Reads a string representation to a ZeoCssr object.\n\nArgs:\nstring: A string representation of a ZeoCSSR.\n\nReturns:\nZeoCssr object.", "source": "juraj-google-style"}
{"code": "def is_registered(self, cuuid, host):\n        \n        \n        \n        if (cuuid in self.registry) and (self.registry[cuuid][\"host\"] == host):\n            return True\n        else:\n            return False", "docstring": "This function will check to see if a given host with client uuid is\ncurrently registered.\n\nArgs:\ncuuid (string): The client uuid that wishes to register.\nhost (tuple): The (address, port) tuple of the client that is\nregistering.\n\nReturns:\nWill return True if the client is registered and will return False if\nit is not.", "source": "juraj-google-style"}
{"code": "def remove_interceptor(self, name):\n        \n        for index, interceptor in enumerate(self.interceptors):\n            matches = (\n                type(interceptor).__name__ == name or\n                getattr(interceptor, 'name') == name\n            )\n            if matches:\n                self.interceptors.pop(index)\n                return True\n        return False", "docstring": "Removes a specific interceptor by name.\n\nArguments:\nname (str): interceptor name to disable.\n\nReturns:\nbool: `True` if the interceptor was disabled, otherwise `False`.", "source": "juraj-google-style"}
{"code": "def pause():\n    t = timer()\n    if f.t.stopped:\n        raise StoppedError('Cannot pause stopped timer.')\n    if f.t.paused:\n        raise PausedError('Timer already paused.')\n    f.t.paused = True\n    f.t.tmp_total += (t - f.t.start_t)\n    f.t.start_t = None\n    f.t.last_t = None\n    return t", "docstring": "Pause the timer, preventing subsequent time from accumulating in the\ntotal.  Renders the timer inactive, disabling other timing commands.\n\nReturns:\nfloat: The current time.\n\nRaises:\nPausedError: If timer already paused.\nStoppedError: If timer already stopped.", "source": "codesearchnet"}
{"code": "def write_label_list(path, label_list):\n    entries = []\n    for label in label_list:\n        entries.append([label.start, label.end, label.value])\n    textfile.write_separated_lines(path, entries, separator='\\t')", "docstring": "Writes the given `label_list` to an audacity label file.\n\nArgs:\npath (str): Path to write the file to.\nlabel_list (audiomate.annotations.LabelList): Label list", "source": "codesearchnet"}
{"code": "def make_fixture(model_class, **kwargs):\n    \n    all_fields = get_fields(model_class)\n\n    fields_for_random_generation = map(\n        lambda x: getattr(model_class, x), all_fields\n    )\n\n    overrides = {}\n\n    for kwarg, value in kwargs.items():\n        if kwarg in all_fields:\n            kwarg_field = getattr(model_class, kwarg)\n            fields_for_random_generation.remove(kwarg_field)\n            overrides.update({kwarg_field: value})\n\n    random_values = get_random_values(fields_for_random_generation)\n\n    values = dict(overrides, **random_values)\n\n    assert len(all_fields) == len(values), (\n        \"Mismatch in values, {} != {}\".format(\n            len(all_fields), len(values)\n        )\n    )\n    data = {k.name: v for k, v in values.items()}\n    return model_class(**data)", "docstring": "Take the model_klass and generate a fixure for it\n\nArgs:\nmodel_class (MongoEngine Document): model for which a fixture\nis needed\nkwargs (dict): any overrides instead of random values\n\nReturns:\ndict for now, other fixture types are not implemented yet", "source": "juraj-google-style"}
{"code": "def _scale_size(size, scale):\n    (w, h) = size\n    return (int(((w * float(scale)) + 0.5)), int(((h * float(scale)) + 0.5)))", "docstring": "Rescale a size by a ratio.\n\nArgs:\nsize (tuple): w, h.\nscale (float): Scaling factor.\n\nReturns:\ntuple[int]: scaled size.", "source": "codesearchnet"}
{"code": "def lookup(self, obj):\n    for registered in self._registry:\n        if isinstance(obj, registered):\n            return self._registry[registered]\n    raise LookupError(f'{type(obj)} has not been registered.')", "docstring": "Looks up 'obj'.\n\nArgs:\nobj: The object to lookup within the registry.\n\nReturns:\nValue for 'obj' in the registry if found.\nRaises:\nLookupError: if 'obj' has not been registered.", "source": "github-repos"}
{"code": "def myRank(grade, badFormat, year, length):\n    \n    return int(sorted(everyonesAverage(year, badFormat, length), reverse=True).index(grade) + 1)", "docstring": "rank of candidateNumber in year\n\nArguments:\ngrade {int} -- a weighted average for a specific candidate number and year\nbadFormat {dict} -- candNumber : [results for candidate]\nyear {int} -- year you are in\nlength {int} -- length of each row in badFormat divided by 2\n\n\n\nReturns:\nint -- rank of candidateNumber in year", "source": "juraj-google-style"}
{"code": "def get_version_details(self, version_name):\n    \n    name = ('%s/versions/%s' % (self._full_model_name, version_name))\n    return self._api.projects().models().versions().get(name=name).execute()", "docstring": "Get details of a version.\n\nArgs:\nversion: the name of the version in short form, such as \"v1\".\nReturns: a dictionary containing the version details.", "source": "juraj-google-style"}
{"code": "def create_temp(node, namer):\n  \n  if isinstance(node, gast.Name):\n    name = node.id\n  elif isinstance(node, (gast.Attribute, gast.Subscript)):\n    name = node.value.id\n  else:\n    raise TypeError\n  temp_node = gast.Name(id=namer.temp(name), annotation=None, ctx=None)\n  anno.setanno(temp_node, 'temp_var', node)\n  return temp_node", "docstring": "Create a temporary variable.\n\nArgs:\nnode: Create a temporary variable to store this variable in.\nnamer: A naming object that guarantees the names are unique.\n\nReturns:\nnode: See `create_grad`. Returns a temporary variable, which is always a\nsimple variable annotated with `temp_var`.", "source": "juraj-google-style"}
{"code": "def render_wrapper(self, region='us-east-1'):\n        \n        base = self.settings['pipeline']['base']\n\n        if self.base:\n            base = self.base\n\n        email = self.settings['pipeline']['notifications']['email']\n        slack = self.settings['pipeline']['notifications']['slack']\n        deploy_type = self.settings['pipeline']['type']\n        pipeline_id = self.compare_with_existing(region=region)\n\n        data = {\n            'app': {\n                'appname': self.app_name,\n                'group_name': self.group_name,\n                'repo_name': self.repo_name,\n                'base': base,\n                'deploy_type': deploy_type,\n                'environment': 'packaging',\n                'region': region,\n                'triggerjob': self.trigger_job,\n                'run_as_user': DEFAULT_RUN_AS_USER,\n                'email': email,\n                'slack': slack,\n                'pipeline': self.settings['pipeline']\n            },\n            'id': pipeline_id\n        }\n\n        self.log.debug('Wrapper app data:\\n%s', pformat(data))\n\n        wrapper = get_template(template_file='pipeline/pipeline_wrapper.json.j2', data=data, formats=self.generated)\n\n        return json.loads(wrapper)", "docstring": "Generate the base Pipeline wrapper.\n\nThis renders the non-repeatable stages in a pipeline, like jenkins, baking, tagging and notifications.\n\nArgs:\nregion (str): AWS Region.\n\nReturns:\ndict: Rendered Pipeline wrapper.", "source": "juraj-google-style"}
{"code": "def __init__(self, inputs, num_clusters, initial_clusters, distance_metric, random_seed, kmeans_plus_plus_num_retries, kmc2_chain_length, cluster_centers, cluster_centers_updated, cluster_centers_initialized):\n    self._inputs = inputs\n    self._num_clusters = num_clusters\n    self._initial_clusters = initial_clusters\n    self._distance_metric = distance_metric\n    self._seed = random_seed\n    self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries\n    self._kmc2_chain_length = kmc2_chain_length\n    self._cluster_centers = cluster_centers\n    self._cluster_centers_updated = cluster_centers_updated\n    self._cluster_centers_initialized = cluster_centers_initialized\n    self._num_selected = array_ops.shape(self._cluster_centers)[0]\n    self._num_remaining = self._num_clusters - self._num_selected\n    self._num_data = math_ops.add_n([array_ops.shape(i)[0] for i in self._inputs])", "docstring": "Creates an op factory.\n\nArgs:\ninputs: See KMeans constructor.\nnum_clusters: An integer Tensor providing the number of clusters.\ninitial_clusters: See KMeans constructor.\ndistance_metric: See KMeans constructor.\nrandom_seed: See KMeans constructor.\nkmeans_plus_plus_num_retries: See KMeans constructor.\nkmc2_chain_length: See KMeans constructor.\ncluster_centers: The TF variable holding the initial centers. It may\nalready contain some centers when the op is executed.\ncluster_centers_updated: A second TF variable to hold a copy of the\ninitial centers, used for full-batch mode. In mini-batch mode,\ncluster_centers_updated is the same variable as cluster_centers.\ncluster_centers_initialized: A boolean TF variable that will be set to\ntrue when all the initial centers have been chosen.", "source": "github-repos"}
{"code": "def formula_balance(model):\n    compound_formula = {}\n    for compound in model.compounds:\n        if (compound.formula is not None):\n            try:\n                f = Formula.parse(compound.formula).flattened()\n                compound_formula[compound.id] = f\n            except ParseError as e:\n                msg = 'Error parsing formula for compound {}:\\n{}\\n{}'.format(compound.id, e, compound.formula)\n                if (e.indicator is not None):\n                    msg += '\\n{}'.format(e.indicator)\n                logger.warning(msg)\n    for reaction in model.reactions:\n        (yield (reaction, reaction_formula(reaction.equation, compound_formula)))", "docstring": "Calculate formula compositions for each reaction.\n\nCall :func:`reaction_formula` for each reaction.\nYield (reaction, result) pairs, where result has two formula compositions\nor `None`.\n\nArgs:\nmodel: :class:`psamm.datasource.native.NativeModel`.", "source": "codesearchnet"}
{"code": "def sample_forecast_max_hail(self, dist_model_name, condition_model_name, num_samples, condition_threshold=0.5, query=None):\n    if (query is not None):\n        dist_forecasts = self.matched_forecasts['dist'][dist_model_name].query(query)\n        dist_forecasts = dist_forecasts.reset_index(drop=True)\n        condition_forecasts = self.matched_forecasts['condition'][condition_model_name].query(query)\n        condition_forecasts = condition_forecasts.reset_index(drop=True)\n    else:\n        dist_forecasts = self.matched_forecasts['dist'][dist_model_name]\n        condition_forecasts = self.matched_forecasts['condition'][condition_model_name]\n    max_hail_samples = np.zeros((dist_forecasts.shape[0], num_samples))\n    areas = dist_forecasts['Area'].values\n    for f in np.arange(dist_forecasts.shape[0]):\n        condition_prob = condition_forecasts.loc[(f, self.forecast_bins['condition'][0])]\n        if (condition_prob >= condition_threshold):\n            max_hail_samples[f] = np.sort(gamma.rvs(*dist_forecasts.loc[(f, self.forecast_bins['dist'])].values, size=(num_samples, areas[f])).max(axis=1))\n    return max_hail_samples", "docstring": "Samples every forecast hail object and returns an empirical distribution of possible maximum hail sizes.\n\nHail sizes are sampled from each predicted gamma distribution. The total number of samples equals\nnum_samples * area of the hail object. To get the maximum hail size for each realization, the maximum\nvalue within each area sample is used.\n\nArgs:\ndist_model_name: Name of the distribution machine learning model being evaluated\ncondition_model_name: Name of the hail/no-hail model being evaluated\nnum_samples: Number of maximum hail samples to draw\ncondition_threshold: Threshold for drawing hail samples\nquery: A str that selects a subset of the data for evaluation\n\nReturns:\nA numpy array containing maximum hail samples for each forecast object.", "source": "codesearchnet"}
{"code": "def __init__(self, http_error):\n    \n    error_details = None\n    error_response = None\n    if http_error.fp:\n      try:\n        error_response = http_error.fp.read()\n        error_body = json.loads(error_response)\n        error_details = ['%s: %s' % (detail['message'], detail['debug_info'])\n                         for detail in error_body['error']['errors']]\n      except (ValueError, TypeError, KeyError):\n        pass\n    if error_details:\n      error_details_str = ', '.join(error_details)\n      error_message = ('HTTP %s (%s) error when communicating with URL: %s.  '\n                       'Details: %s' % (http_error.code, http_error.reason,\n                                        http_error.filename, error_details_str))\n    else:\n      error_message = ('HTTP %s (%s) error when communicating with URL: %s. '\n                       'Response: %s' % (http_error.code, http_error.reason,\n                                         http_error.filename,\n                                         error_response))\n    super(ServerRequestException, self).__init__(error_message)", "docstring": "Create a ServerRequestException from a given urllib2.HTTPError.\n\nArgs:\nhttp_error: The HTTPError that the ServerRequestException will be\nbased on.", "source": "juraj-google-style"}
{"code": "def generate_block_graph(block_graph: blocks.BlockGraph, loader: jinja2.BaseLoader) -> str:\n    return _generate_visualization(template_file=_BLOCKGRAPH_TEMPLATE_NAME, loader=loader, graph_data=block_serializer.encode_merged_graph(block_graph))", "docstring": "Generate the visualization webpage.\n\nArgs:\nblock_graph: blocks.BlockGraph. The block graph of the code.\nloader: A jinja22 loader\n\nReturns:\nstr. The rendered visualization page.", "source": "github-repos"}
{"code": "def setPadding(self, padding):\n        \n        self._pad = padding\n        self._zfill = self.__class__.getPaddingNum(self._pad)", "docstring": "Set new padding characters for the sequence.\ni.e. \"#\" or \"@@@\" or '%04d', or an empty string to disable range formatting.\n\nArgs:\npadding (str): sequence padding to set", "source": "juraj-google-style"}
{"code": "def squad_v1_exact_match(y_true: List[List[str]], y_predicted: List[str]) -> float:\n    \n    EM_total = 0\n    count = 0\n    for ground_truth, prediction in zip(y_true, y_predicted):\n        if len(ground_truth[0]) == 0:\n            \n            continue\n        count += 1\n        EMs = [int(normalize_answer(gt) == normalize_answer(prediction)) for gt in ground_truth]\n        EM_total += max(EMs)\n    return 100 * EM_total / count if count > 0 else 0", "docstring": "Calculates Exact Match score between y_true and y_predicted\nEM score uses the best matching y_true answer:\nif y_pred equal at least to one answer in y_true then EM = 1, else EM = 0\nSkips examples without an answer.\nArgs:\ny_true: list of correct answers (correct answers are represented by list of strings)\ny_predicted: list of predicted answers\nReturns:\nexact match score : float", "source": "juraj-google-style"}
{"code": "def _RemoveAuthorizedKeys(self, user):\n    \n    pw_entry = self._GetUser(user)\n    if not pw_entry:\n      return\n\n    home_dir = pw_entry.pw_dir\n    authorized_keys_file = os.path.join(home_dir, '.ssh', 'authorized_keys')\n    if os.path.exists(authorized_keys_file):\n      try:\n        os.remove(authorized_keys_file)\n      except OSError as e:\n        message = 'Could not remove authorized keys for user %s. %s.'\n        self.logger.warning(message, user, str(e))", "docstring": "Remove a Linux user account's authorized keys file to prevent login.\n\nArgs:\nuser: string, the Linux user account to remove access.", "source": "juraj-google-style"}
{"code": "async def disconnect(self, conn_id):\n        \n\n        self._ensure_connection(conn_id, True)\n\n        dev = self._get_property(conn_id, 'device')\n        dev.connected = False\n\n        self._teardown_connection(conn_id)", "docstring": "Asynchronously disconnect from a connected device\n\nArgs:\nconn_id (int): A unique identifier that will refer to this connection\ncallback (callback): A callback that will be called as\ncallback(conn_id, adapter_id, success, failure_reason)", "source": "juraj-google-style"}
{"code": "def __init__(self, vlan_id=None):\n        \n        super().__init__(action_type=ActionType.OFPAT_SET_VLAN_VID, length=8)\n        self.vlan_id = vlan_id", "docstring": "Create an ActionVlanVid with the optional parameters below.\n\nArgs:\nvlan_id (int): VLAN priority.", "source": "juraj-google-style"}
{"code": "def transform_feature(self, transformation_cache, state_manager):\n    input_tensor = transformation_cache.get(self.key, state_manager)\n    if self.normalizer_fn is not None:\n        input_tensor = self.normalizer_fn(input_tensor)\n    return input_tensor", "docstring": "See `FeatureColumn` base class.\n\nIn this case, we apply the `normalizer_fn` to the input tensor.\n\nArgs:\ntransformation_cache: A `FeatureTransformationCache` object to access\nfeatures.\nstate_manager: A `StateManager` to create / access resources such as\nlookup tables.\n\nReturns:\nNormalized input tensor.", "source": "github-repos"}
{"code": "def create_authors(project_dir=os.curdir):\n    pkg_info_file = os.path.join(project_dir, 'PKG-INFO')\n    authors_file = os.path.join(project_dir, 'AUTHORS')\n    if os.path.exists(pkg_info_file):\n        return\n    authors = get_authors(project_dir=project_dir)\n    with open(authors_file, 'wb') as authors_fd:\n        authors_fd.write((b'\\n'.join((a.encode('utf-8') for a in authors)) + b'\\n'))", "docstring": "Creates the authors file, if not in a package.\n\nReturns:\nNone\n\nRaises:\nRuntimeError: If the authors could not be retrieved", "source": "codesearchnet"}
{"code": "async def get_records_for_zone(self, dns_zone, params=None):\n        \n        managed_zone = self.get_managed_zone(dns_zone)\n        url = f'{self._base_url}/managedZones/{managed_zone}/rrsets'\n\n        if not params:\n            params = {}\n\n        if 'fields' not in params:\n            \n            params['fields'] = ('rrsets/name,rrsets/kind,rrsets/rrdatas,'\n                                'rrsets/type,rrsets/ttl,nextPageToken')\n        next_page_token = None\n\n        records = []\n        while True:\n            if next_page_token:\n                params['pageToken'] = next_page_token\n            response = await self.get_json(url, params=params)\n            records.extend(response['rrsets'])\n            next_page_token = response.get('nextPageToken')\n            if not next_page_token:\n                break\n\n        logging.info(f'Found {len(records)} rrsets for zone \"{dns_zone}\".')\n        return records", "docstring": "Get all resource record sets for a managed zone, using the DNS zone.\n\nArgs:\ndns_zone (str): Desired DNS zone to query.\nparams (dict): (optional) Additional query parameters for HTTP\nrequests to the GDNS API.\nReturns:\nlist of dicts representing rrsets.", "source": "juraj-google-style"}
{"code": "class TFDebertaXSoftmax(keras.layers.Layer):\n\n    def __init__(self, axis=-1, **kwargs):\n        super().__init__(**kwargs)\n        self.axis = axis\n\n    def call(self, inputs: tf.Tensor, mask: tf.Tensor):\n        rmask = tf.logical_not(tf.cast(mask, tf.bool))\n        output = tf.where(rmask, tf.cast(float('-inf'), dtype=self.compute_dtype), inputs)\n        output = stable_softmax(tf.cast(output, dtype=tf.float32), self.axis)\n        output = tf.where(rmask, 0.0, output)\n        return output", "docstring": "Masked Softmax which is optimized for saving memory\n\nArgs:\ninput (`tf.Tensor`): The input tensor that will apply softmax.\nmask (`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.\ndim (int): The dimension that will apply softmax", "source": "github-repos"}
{"code": "def AddValue(self, registry_value):\n    name = registry_value.name.upper()\n    if (name in self._values):\n        raise KeyError('Value: {0:s} already exists.'.format(registry_value.name))\n    self._values[name] = registry_value", "docstring": "Adds a value.\n\nArgs:\nregistry_value (WinRegistryValue): Windows Registry value.\n\nRaises:\nKeyError: if the value already exists.", "source": "codesearchnet"}
{"code": "def add_profile_variants(self, profile_variants):\n    results = self.db.profile_variant.insert_many(profile_variants)\n    return results", "docstring": "Add several variants to the profile_variant collection in the\ndatabase\n\nArgs:\n\nprofile_variants(list(models.ProfileVariant))", "source": "codesearchnet"}
{"code": "def valueReadPreprocessor(valueString, replaceParamsFile=None):\n    if (type(valueString) is bool):\n        log.warning('Only numerical variable types can be handled by the valueReadPreprocessor function.')\n        return valueString\n    processedValue = valueString\n    if ((replaceParamsFile is not None) and (valueString is not None)):\n        if (('[' in valueString) or (']' in valueString)):\n            processedValue = '{0}'.format(REPLACE_NO_VALUE)\n            for targetParam in replaceParamsFile.targetParameters:\n                if (targetParam.targetVariable == valueString):\n                    processedValue = '{0}'.format(((- 1) * targetParam.id))\n                    break\n    return processedValue", "docstring": "Apply global pre-processing to values during reading throughout the project.\n\nArgs:\nvalueString (str): String representing the value to be preprocessed.\nreplaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if\nreplacement variables are included in the project.\n\nReturns:\nstr: Processed value as a string", "source": "codesearchnet"}
{"code": "def index_min(x, idx, y):\n    return _index_update_helper(tf_np.ndarray._with_index_min, x, idx, y)", "docstring": "Pure equivalent of `x[idx] = minimum(x[idx], y)`.\n\nReturns the value of x that would result from the NumPy-style indexed\nassignment `x[idx] = minimum(x[idx], y)`. Because it's a pure function, `x`\nitself won't be changed.\n\nArgs:\nx: an array with the values to be updated.\nidx: a Numpy-style index, consisting of `None`, integers, slice objects,\nellipses, ndarrays with integer dtypes, or a tuple of the above.\ny: the array of updates. `y` must be broadcastable to the shape of the array\nthat would be returned by `x[idx]`.\n\nReturns:\nThe updated version of `x`.", "source": "github-repos"}
{"code": "def queuify_logger(logger, queue_handler, queue_listener):\n    if isinstance(logger, str):\n        logger = logging.getLogger(logger)\n    handlers = [handler for handler in logger.handlers if (handler not in queue_listener.handlers)]\n    if handlers:\n        queue_listener.handlers = tuple((list(queue_listener.handlers) + handlers))\n    del logger.handlers[:]\n    logger.addHandler(queue_handler)", "docstring": "Replace logger's handlers with a queue handler while adding existing\nhandlers to a queue listener.\n\nThis is useful when you want to use a default logging config but then\noptionally add a logger's handlers to a queue during runtime.\n\nArgs:\nlogger (mixed): Logger instance or string name of logger to queue-ify\nhandlers.\nqueue_handler (QueueHandler): Instance of a ``QueueHandler``.\nqueue_listener (QueueListener): Instance of a ``QueueListener``.", "source": "codesearchnet"}
{"code": "def process_user_info_response(self, response):\n    mapping = (('username', 'preferred_username'), ('email', 'email'), ('last_name', 'family_name'), ('first_name', 'given_name'))\n    return {dest: response[source] for (dest, source) in mapping}", "docstring": "Process the user info response data.\n\nBy default, this simply maps the edX user info key-values (example below) to Django-friendly names. If your\nprovider returns different fields, you should sub-class this class and override this method.\n\n.. code-block:: python\n\n{\n\"username\": \"jdoe\",\n\"email\": \"jdoe@example.com\",\n\"first_name\": \"Jane\",\n\"last_name\": \"Doe\"\n}\n\nArguments:\nresponse (dict): User info data\n\nReturns:\ndict", "source": "codesearchnet"}
{"code": "def yaml(modules_to_register: Iterable[Any] = None, classes_to_register: Iterable[Any] = None) -> ruamel.yaml.YAML:\n    \n    \n    \n    yaml = ruamel.yaml.YAML(typ = \"rt\")\n\n    \n    \n    yaml.representer.add_representer(np.ndarray, numpy_to_yaml)\n    yaml.constructor.add_constructor(\"!numpy_array\", numpy_from_yaml)\n    \n    yaml = register_module_classes(yaml = yaml, modules = modules_to_register)\n    yaml = register_classes(yaml = yaml, classes = classes_to_register)\n\n    return yaml", "docstring": "Create a YAML object for loading a YAML configuration.\n\nArgs:\nmodules_to_register: Modules containing classes to be registered with the YAML object. Default: None.\nclasses_to_register: Classes to be registered with the YAML object. Default: None.\nReturns:\nA newly creating YAML object, configured as apporpirate.", "source": "juraj-google-style"}
{"code": "def _compute_transitions(self, corpus, order=1):\n        \n        self.transitions = defaultdict(lambda: defaultdict(int))\n\n        for corpus_entry in corpus:\n            tokens = self.tokenize(corpus_entry)\n\n            last_tokens = utils.prefilled_buffer(\n                self._start_symbol, length=self.order)\n            \n            for token_value in chain(tokens, self._end_symbol):\n                for suffix in utils.get_suffixes(last_tokens):\n                    self.transitions[suffix][token_value] += 1\n\n                last_tokens.append(token_value)\n\n        self._compute_relative_probs(self.transitions)", "docstring": "Computes the transition probabilities of a corpus\nArgs:\ncorpus: the given corpus (a corpus_entry needs to be iterable)\norder: the maximal Markov chain order", "source": "juraj-google-style"}
{"code": "def eval_image(image, height, width, scope=None):\n    with tf.name_scope(values=[image, height, width], name=scope, default_name='eval_image'):\n        image = tf.image.central_crop(image, central_fraction=0.875)\n        image = tf.expand_dims(image, 0)\n        image = tf.image.resize_bilinear(image, [height, width], align_corners=False)\n        image = tf.squeeze(image, [0])\n        return image", "docstring": "Prepare one image for evaluation.\n\nArgs:\nimage: 3-D float Tensor\nheight: integer\nwidth: integer\nscope: Optional scope for name_scope.\nReturns:\n3-D float Tensor of prepared image.", "source": "codesearchnet"}
{"code": "def check_upload_status(self, video_id):\n    if (not self.authenticated):\n        raise ApiError(_('Authentication is required'))\n    entry = self.fetch_video(video_id)\n    upload_status = Api.yt_service.CheckUploadStatus(entry)\n    if (upload_status is not None):\n        video_upload_state = upload_status[0]\n        detailed_message = upload_status[1]\n        return {'upload_state': video_upload_state, 'detailed_message': detailed_message}\n    else:\n        return True", "docstring": "Checks the video upload status\nNewly uploaded videos may be in the processing state\n\nAuthentication is required\n\nReturns:\nTrue if video is available\notherwise a dict containes upload_state and detailed message\ni.e. {\"upload_state\": \"processing\", \"detailed_message\": \"\"}", "source": "codesearchnet"}
{"code": "def check_par(chrom, pos):\n    \n    par = False\n    \n    for interval in PAR.get(chrom,[]):\n        if (pos >= interval[0] and pos <= interval[1]):\n            par = True\n    \n    return par", "docstring": "Check if a coordinate is in the PAR region\n\nArgs:\nchrom(str)\npos(int)\n\nReturns:\npar(bool)", "source": "juraj-google-style"}
{"code": "async def _on_event(self, event_):\n        \n        conv_id = event_.conversation_id.id\n        try:\n            conv = await self._get_or_fetch_conversation(conv_id)\n        except exceptions.NetworkError:\n            logger.warning(\n                'Failed to fetch conversation for event notification: %s',\n                conv_id\n            )\n        else:\n            self._sync_timestamp = parsers.from_timestamp(event_.timestamp)\n            conv_event = conv.add_event(event_)\n            \n            if conv_event is not None:\n                await self.on_event.fire(conv_event)\n                await conv.on_event.fire(conv_event)", "docstring": "Receive a hangouts_pb2.Event and fan out to Conversations.\n\nArgs:\nevent_: hangouts_pb2.Event instance", "source": "juraj-google-style"}
{"code": "def _multiple_field(cls):\n    klassdict = cls.__dict__\n    try:\n        return klassdict['_entitylist_multifield'][0]\n    except (KeyError, IndexError, TypeError):\n        from . import fields\n        multifield_tuple = tuple(fields.find(cls, multiple=True))\n        assert (len(multifield_tuple) == 1)\n        multifield = multifield_tuple[0]\n        assert issubclass(multifield.type_, Entity)\n        cls._entitylist_multifield = multifield_tuple\n        return multifield_tuple[0]", "docstring": "Return the \"multiple\" TypedField associated with this EntityList.\n\nThis also lazily sets the ``_entitylist_multiplefield`` value if it\nhasn't been set yet. This is set to a tuple containing one item because\nif we set the class attribute to the TypedField, we would effectively\nadd a TypedField descriptor to the class, which we don't want.\n\nRaises:\nAssertionError: If there is more than one multiple TypedField\nor the the TypedField type_ is not a subclass of Entity.", "source": "codesearchnet"}
{"code": "def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor:\n    if encoder_attention_mask.dim() == 3:\n        encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]\n    if encoder_attention_mask.dim() == 2:\n        encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]\n    encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype)\n    encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * torch.finfo(self.dtype).min\n    return encoder_extended_attention_mask", "docstring": "Invert an attention mask (e.g., switches 0. and 1.).\n\nArgs:\nencoder_attention_mask (`torch.Tensor`): An attention mask.\n\nReturns:\n`torch.Tensor`: The inverted attention mask.", "source": "github-repos"}
{"code": "def _getScalesDiag(self, termx=0):\n    assert (self.P > 1), 'VarianceDecomposition:: diagonal init_method allowed only for multi trait models'\n    assert (self.noisPos is not None), 'VarianceDecomposition:: noise term has to be set'\n    assert (termx < (self.n_randEffs - 1)), 'VarianceDecomposition:: termx>=n_randEffs-1'\n    assert (self.trait_covar_type[self.noisPos] not in ['lowrank', 'block', 'fixed']), 'VarianceDecomposition:: diagonal initializaiton not posible for such a parametrization'\n    assert (self.trait_covar_type[termx] not in ['lowrank', 'block', 'fixed']), 'VarianceDecimposition:: diagonal initializaiton not posible for such a parametrization'\n    scales = []\n    res = self._getH2singleTrait(self.vd.getTerm(termx).getK())\n    scaleg = sp.sqrt(res['varg'].mean())\n    scalen = sp.sqrt(res['varn'].mean())\n    for term_i in range(self.n_randEffs):\n        if (term_i == termx):\n            _scales = (scaleg * self.diag[term_i])\n        elif (term_i == self.noisPos):\n            _scales = (scalen * self.diag[term_i])\n        else:\n            _scales = (0.0 * self.diag[term_i])\n        if (self.jitter[term_i] > 0):\n            _scales = sp.concatenate((_scales, sp.array([sp.sqrt(self.jitter[term_i])])))\n        scales.append(_scales)\n    return sp.concatenate(scales)", "docstring": "Internal function for parameter initialization\nUses 2 term single trait model to get covar params for initialization\n\nArgs:\ntermx:      non-noise term terms that is used for initialization", "source": "codesearchnet"}
{"code": "def __init__(self, device_name, node_exec_stats, file_path, line_number, func_name, op_type):\n    self.device_name = device_name\n    self.node_exec_stats = node_exec_stats\n    self.file_path = file_path\n    self.line_number = line_number\n    self.func_name = func_name\n    if self.file_path:\n        self.file_line_func = '%s:%d(%s)' % (os.path.basename(self.file_path), self.line_number, self.func_name)\n    else:\n        self.file_line_func = ''\n    self.op_type = op_type\n    self.start_time = self.node_exec_stats.all_start_micros\n    self.op_time = self.node_exec_stats.op_end_rel_micros - self.node_exec_stats.op_start_rel_micros", "docstring": "Constructor.\n\nArgs:\ndevice_name: (string) name of the device.\nnode_exec_stats: `NodeExecStats` proto.\nfile_path: path to the source file involved in creating the op.\nline_number: line number in the file involved in creating the op.\nfunc_name: name of the function that the line belongs to.\nop_type: (string) Operation type.", "source": "github-repos"}
{"code": "def deepnn(x):\n    \n    \n    \n    \n    with tf.name_scope(\"reshape\"):\n        x_image = tf.reshape(x, [-1, 28, 28, 1])\n\n    \n    with tf.name_scope(\"conv1\"):\n        W_conv1 = weight_variable([5, 5, 1, 32])\n        b_conv1 = bias_variable([32])\n        h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n\n    \n    with tf.name_scope(\"pool1\"):\n        h_pool1 = max_pool_2x2(h_conv1)\n\n    \n    with tf.name_scope(\"conv2\"):\n        W_conv2 = weight_variable([5, 5, 32, 64])\n        b_conv2 = bias_variable([64])\n        h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n\n    \n    with tf.name_scope(\"pool2\"):\n        h_pool2 = max_pool_2x2(h_conv2)\n\n    \n    \n    with tf.name_scope(\"fc1\"):\n        W_fc1 = weight_variable([7 * 7 * 64, 1024])\n        b_fc1 = bias_variable([1024])\n\n        h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])\n        h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n    \n    \n    with tf.name_scope(\"dropout\"):\n        keep_prob = tf.placeholder(tf.float32)\n        h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n    \n    with tf.name_scope(\"fc2\"):\n        W_fc2 = weight_variable([1024, 10])\n        b_fc2 = bias_variable([10])\n\n        y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n    return y_conv, keep_prob", "docstring": "deepnn builds the graph for a deep net for classifying digits.\n\nArgs:\nx: an input tensor with the dimensions (N_examples, 784), where 784 is\nthe number of pixels in a standard MNIST image.\n\nReturns:\nA tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with\nvalues equal to the logits of classifying the digit into one of 10\nclasses (the digits 0-9). keep_prob is a scalar placeholder for the\nprobability of dropout.", "source": "juraj-google-style"}
{"code": "def set_default(self, name, value):\n    \n    fl = self._flags()\n    if name not in fl:\n      self._set_unknown_flag(name, value)\n      return\n    fl[name]._set_default(value)  \n    self._assert_validators(fl[name].validators)", "docstring": "Changes the default value of the named flag object.\n\nThe flag's current value is also updated if the flag is currently using\nthe default value, i.e. not specified in the command line, and not set\nby FLAGS.name = value.\n\nArgs:\nname: str, the name of the flag to modify.\nvalue: The new default value.\n\nRaises:\nUnrecognizedFlagError: Raised when there is no registered flag named name.\nIllegalFlagValueError: Raised when value is not valid.", "source": "juraj-google-style"}
{"code": "def copy(self) -> 'TraceableStack[T]':\n    return TraceableStack(self._stack)", "docstring": "Return a copy of self referencing the same objects but in a new list.\n\nThis method is implemented to support thread-local stacks.\n\nReturns:\nTraceableStack with a new list that holds existing objects.", "source": "github-repos"}
{"code": "def analyze(model_path=None, model_content=None, gpu_compatibility=False, **kwargs):\n    if not model_path and (not model_content):\n        raise ValueError('neither `model_path` nor `model_content` is provided')\n    if model_path:\n        print(f'=== {model_path} ===\\n')\n        tflite_model = model_path\n        input_is_filepath = True\n    else:\n        print('=== TFLite ModelAnalyzer ===\\n')\n        tflite_model = model_content\n        input_is_filepath = False\n    if kwargs.get('experimental_use_mlir', False):\n        print(wrap_converter.wrapped_flat_buffer_file_to_mlir(tflite_model, input_is_filepath))\n    else:\n        print(_analyzer_wrapper.ModelAnalyzer(tflite_model, input_is_filepath, gpu_compatibility))", "docstring": "Analyzes the given tflite_model with dumping model structure.\n\nThis tool provides a way to understand users' TFLite flatbuffer model by\ndumping internal graph structure. It also provides additional features\nlike checking GPU delegate compatibility.\n\nWARNING: Experimental interface, subject to change.\nThe output format is not guaranteed to stay stable, so don't\nwrite scripts to this.\n\nArgs:\nmodel_path: TFLite flatbuffer model path.\nmodel_content: TFLite flatbuffer model object.\ngpu_compatibility: Whether to check GPU delegate compatibility.\n**kwargs: Experimental keyword arguments to analyze API.\n\nReturns:\nPrint analyzed report via console output.", "source": "github-repos"}
{"code": "def flush_all(self, delay=0, noreply=None):\n    if (noreply is None):\n        noreply = self.default_noreply\n    cmd = (b'flush_all ' + six.text_type(delay).encode('ascii'))\n    if noreply:\n        cmd += b' noreply'\n    cmd += b'\\r\\n'\n    results = self._misc_cmd([cmd], b'flush_all', noreply)\n    if noreply:\n        return True\n    return (results[0] == b'OK')", "docstring": "The memcached \"flush_all\" command.\n\nArgs:\ndelay: optional int, the number of seconds to wait before flushing,\nor zero to flush immediately (the default).\nnoreply: optional bool, True to not wait for the reply (defaults to\nself.default_noreply).\n\nReturns:\nTrue.", "source": "codesearchnet"}
{"code": "def _location_infos_equal(left, right):\n    if ((not isinstance(left, LocationInfo)) or (not isinstance(right, LocationInfo))):\n        raise AssertionError(u'Unsupported LocationInfo comparison between types {} and {} with values {}, {}'.format(type(left), type(right), left, right))\n    optional_scopes_depth_equal = (left.optional_scopes_depth == right.optional_scopes_depth)\n    parent_query_paths_equal = (((left.parent_location is None) and (right.parent_location is None)) or (left.parent_location.query_path == right.parent_location.query_path))\n    recursive_scopes_depths_equal = (left.recursive_scopes_depth == right.recursive_scopes_depth)\n    types_equal = (left.type == right.type)\n    return all([optional_scopes_depth_equal, parent_query_paths_equal, recursive_scopes_depths_equal, types_equal])", "docstring": "Return True if LocationInfo objects are equivalent for the SQL backend, False otherwise.\n\nLocationInfo objects are considered equal for the SQL backend iff the optional scopes depth,\nrecursive scopes depth, types and parent query paths are equal.\n\nArgs:\nleft: LocationInfo, left location info object to compare.\nright: LocationInfo, right location info object to compare.\n\nReturns:\nbool, True if LocationInfo objects equivalent, False otherwise.", "source": "codesearchnet"}
{"code": "def get_tag(self, tag_name, **kwargs):\n    return self._get_object_by_name(self._TAG_ENDPOINT_SUFFIX, tag_name, **kwargs)", "docstring": "get a tag by name\n\nArgs:\ntag_name (string): name of tag to get\n\nReturns:\ndictionary of the response", "source": "codesearchnet"}
{"code": "def get(cls, ns, key):\n        \n        return getattr(db, cls.__name__).find_one(\n            ConfigItem.namespace_prefix == ns,\n            ConfigItem.key == key\n        )", "docstring": "Fetch an item by namespace and key\n\nArgs:\nns (str): Namespace prefix\nkey (str): Item key\n\nReturns:\n:obj:`Configitem`: Returns config item object if found, else `None`", "source": "juraj-google-style"}
{"code": "def _check_wires_list(self, wires, node):\n    if (len(set(wires)) != len(wires)):\n        raise DAGCircuitError('duplicate wires')\n    wire_tot = (len(node.qargs) + len(node.cargs))\n    if (node.condition is not None):\n        wire_tot += node.condition[0].size\n    if (len(wires) != wire_tot):\n        raise DAGCircuitError(('expected %d wires, got %d' % (wire_tot, len(wires))))", "docstring": "Check that a list of wires is compatible with a node to be replaced.\n\n- no duplicate names\n- correct length for operation\nRaise an exception otherwise.\n\nArgs:\nwires (list[register, index]): gives an order for (qu)bits\nin the input circuit that is replacing the node.\nnode (DAGNode): a node in the dag\n\nRaises:\nDAGCircuitError: if check doesn't pass.", "source": "codesearchnet"}
{"code": "def move(self, delta):\n    self.pos = ((self.pos[0] + delta[0]), (self.pos[1] + delta[1]))", "docstring": "Move the node.\n\nArgs:\ndelta (tupel): A tupel, holding the adjustment of the position.", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, padding_mask: Optional[torch.LongTensor]=None) -> Tuple:\n    self.input_dtype = hidden_states.dtype\n    batch_size, sequence_length, hidden_dim = hidden_states.shape\n    hidden_states = hidden_states.reshape(batch_size * sequence_length, hidden_dim)\n    hidden_states = hidden_states.to(self.dtype)\n    self._cast_classifier()\n    router_logits = self.classifier(hidden_states)\n    top_1_mask, router_probs = self.route_tokens(router_logits, self.input_dtype, padding_mask)\n    return (top_1_mask, router_probs)", "docstring": "The hidden states are reshaped to simplify the computation of the router probabilities (combining weights for\neach experts.)\n\nArgs:\nhidden_states (`torch.Tensor`):\n(batch_size, sequence_length, hidden_dim) from which router probabilities are computed.\nReturns:\ntop_1_mask (`torch.Tensor` of shape (batch_size, sequence_length)):\nIndex tensor of shape [batch_size, sequence_length] corresponding to the expert selected for each token\nusing the top1 probabilities of the router.\nrouter_probabilities (`torch.Tensor` of shape (batch_size, sequence_length, nump_experts)):\nTensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each\ntoken and expert. Used for routing tokens to experts.\nrouter_logits (`torch.Tensor` of shape (batch_size, sequence_length))):\nLogits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits.\nThis is used later for computing router z-loss.", "source": "github-repos"}
{"code": "def DeserializeFromBufer(buffer, offset=0):\n    mstream = StreamManager.GetStream(buffer)\n    reader = BinaryReader(mstream)\n    tx = Transaction.DeserializeFrom(reader)\n    StreamManager.ReleaseStream(mstream)\n    return tx", "docstring": "Deserialize object instance from the specified buffer.\n\nArgs:\nbuffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from.\noffset: UNUSED\n\nReturns:\nTransaction:", "source": "codesearchnet"}
{"code": "def _validate_query(query):\n    \n    query = deepcopy(query)\n    \n    if query[\"q\"] == BLANK_QUERY[\"q\"]:\n        raise ValueError(\"No query specified.\")\n\n    query[\"q\"] = _clean_query_string(query[\"q\"])\n\n    \n    if query[\"limit\"] is None:\n        query[\"limit\"] = SEARCH_LIMIT if query[\"advanced\"] else NONADVANCED_LIMIT\n    \n    elif query[\"limit\"] > SEARCH_LIMIT:\n        warnings.warn('Reduced result limit from {} to the Search maximum: {}'\n                      .format(query[\"limit\"], SEARCH_LIMIT), RuntimeWarning)\n        query[\"limit\"] = SEARCH_LIMIT\n\n    \n    for key, val in BLANK_QUERY.items():\n        \n        if query.get(key, float('nan')) == val:\n            query.pop(key)\n\n    \n    to_remove = [field for field in query.keys() if field not in BLANK_QUERY.keys()]\n    [query.pop(field) for field in to_remove]\n\n    return query", "docstring": "Validate and clean up a query to be sent to Search.\nCleans the query string, removes unneeded parameters, and validates for correctness.\nDoes not modify the original argument.\nRaises an Exception on invalid input.\n\nArguments:\nquery (dict): The query to validate.\n\nReturns:\ndict: The validated query.", "source": "juraj-google-style"}
{"code": "def OpenFileEntry(cls, path_spec_object, resolver_context=None):\n    \n    file_system = cls.OpenFileSystem(\n        path_spec_object, resolver_context=resolver_context)\n\n    if resolver_context is None:\n      resolver_context = cls._resolver_context\n\n    file_entry = file_system.GetFileEntryByPathSpec(path_spec_object)\n\n    \n    \n    resolver_context.ReleaseFileSystem(file_system)\n\n    return file_entry", "docstring": "Opens a file entry object defined by path specification.\n\nArgs:\npath_spec_object (PathSpec): path specification.\nresolver_context (Optional[Context]): resolver context, where None\nrepresents the built in context which is not multi process safe.\n\nReturns:\nFileEntry: file entry or None if the path specification could not be\nresolved.", "source": "juraj-google-style"}
{"code": "def pie(self, key=\"wall_time\", minfract=0.05, ax=None, **kwargs):\n        \n        ax, fig, plt = get_ax_fig_plt(ax=ax)\n        \n        ax.axis(\"equal\")\n        \n        labels, vals = self.names_and_values(key, minfract=minfract)\n        ax.pie(vals, explode=None, labels=labels, autopct='%1.1f%%', shadow=True)\n        return fig", "docstring": "Plot pie chart for this timer.\n\nArgs:\nkey: Keyword used to extract data from the timer.\nminfract: Don't show sections whose relative weight is less that minfract.\nax: matplotlib :class:`Axes` or None if a new figure should be created.\n\nReturns:\n`matplotlib` figure", "source": "juraj-google-style"}
{"code": "def read_handle(url, cache=None, mode='rb'):\n    scheme = urlparse(url).scheme\n    if (cache == 'purge'):\n        _purge_cached(url)\n        cache = None\n    if (_is_remote(scheme) and (cache is None)):\n        cache = True\n        log.debug('Cache not specified, enabling because resource is remote.')\n    if cache:\n        handle = _read_and_cache(url, mode=mode)\n    elif (scheme in ('http', 'https')):\n        handle = _handle_web_url(url, mode=mode)\n    elif (scheme in 'gs'):\n        handle = _handle_gfile(url, mode=mode)\n    else:\n        handle = open(url, mode=mode)\n    (yield handle)\n    handle.close()", "docstring": "Read from any URL with a file handle.\n\nUse this to get a handle to a file rather than eagerly load the data:\n\n```\nwith read_handle(url) as handle:\nresult = something.load(handle)\n\nresult.do_something()\n\n```\n\nWhen program execution leaves this `with` block, the handle will be closed\nautomatically.\n\nArgs:\nurl: a URL including scheme or a local path\nReturns:\nA file handle to the specified resource if it could be reached.\nThe handle will be closed automatically once execution leaves this context.", "source": "codesearchnet"}
{"code": "def delete_interconnect(self, enclosure_uri, bay, timeout=(- 1)):\n    uri = '{path}?location=Enclosure:{enclosure_uri},Bay:{bay}'.format(path=self.LOCATIONS_PATH, enclosure_uri=enclosure_uri, bay=bay)\n    return self._helper.delete(uri, timeout=timeout)", "docstring": "Deletes an interconnect from a location.\n\nWarning:\nThis won't delete the LOGICAL INTERCONNECT itself and might cause inconsistency between the enclosure\nand Logical Interconnect Group.\n\nArgs:\nenclosure_uri: URI of the Enclosure\nbay: Bay\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\nbool: Indicating if the interconnect was successfully deleted.", "source": "codesearchnet"}
{"code": "def get_subgraph_for_concept_pairs(\n        self, concepts: List[str], cutoff: Optional[int] = None\n    ):\n        \n        path_generator = (\n            nx.all_simple_paths(self, source, target, cutoff=cutoff)\n            for source, target in permutations(concepts, 2)\n        )\n        paths = chain.from_iterable(path_generator)\n        return AnalysisGraph(self.subgraph(set(chain.from_iterable(paths))))", "docstring": "Get subgraph comprised of simple paths between the source and the\ntarget.\n\nArgs:\nconcepts\ncutoff", "source": "juraj-google-style"}
{"code": "def get_hist(self, observable: Any, **kwargs: Dict[(str, Any)]) -> Any:\n    return observable", "docstring": "Get the histogram that may be stored in some object.\n\nThis histogram is used to project from.\n\nNote:\nThe output object could just be the raw ROOT histogram.\n\nNote:\nThis function is just a basic placeholder and likely should be overridden.\n\nArgs:\nobservable (object): The input object. It could be a histogram or something more complex\nkwargs: Additional arguments passed to the projection function\nReturn:\nROOT.TH1 or ROOT.THnBase histogram which should be projected. By default, it returns the\nobservable (input object).", "source": "codesearchnet"}
{"code": "def slice_begin(self, tensor_shape, pnum):\n    tensor_layout = self.tensor_layout(tensor_shape)\n    coordinates = pnum_to_processor_coordinates(self.shape, pnum)\n    ret = []\n    for (dim_size, mesh_axis) in zip(tensor_shape.to_integer_list, tensor_layout.tensor_axis_to_mesh_axis):\n        if (mesh_axis is None):\n            ret.append(0)\n        else:\n            ret.append(((dim_size \n    return ret", "docstring": "Begin position for the tensor slice for the given processor.\n\nArgs:\ntensor_shape: Shape.\npnum: int <= self.size.\n\nReturns:\nlist of integers with length tensor_shape.ndims.", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n    \n    self.Converse = channel.stream_stream(\n        '/google.assistant.embedded.v1alpha1.EmbeddedAssistant/Converse',\n        request_serializer=google_dot_assistant_dot_embedded_dot_v1alpha1_dot_embedded__assistant__pb2.ConverseRequest.SerializeToString,\n        response_deserializer=google_dot_assistant_dot_embedded_dot_v1alpha1_dot_embedded__assistant__pb2.ConverseResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def isexe(*components):\n    _path = path(*components)\n    return (isfile(_path) and os.access(_path, os.X_OK))", "docstring": "Return whether a path is an executable file.\n\nArguments:\n\npath (str): Path of the file to check.\n\nExamples:\n\n>>> fs.isexe(\"/bin/ls\")\nTrue\n\n>>> fs.isexe(\"/home\")\nFalse\n\n>>> fs.isexe(\"/not/a/real/path\")\nFalse\n\nReturns:\n\nbool: True if file is executable, else false.", "source": "codesearchnet"}
{"code": "def log_jwt_dict_info(log, msg_str, jwt_dict):\n    d = ts_to_str(jwt_dict)\n    log_list = ([(b, d.pop(a)) for (a, b, c) in CLAIM_LIST if (a in d)] + [(k, d[k]) for k in sorted(d)])\n    list(map(log, (['{}:'.format(msg_str)] + ['  {}: {}'.format(k, v) for (k, v) in log_list])))", "docstring": "Dump JWT to log.\n\nArgs:\nlog: Logger\nLogger to which to write the message.\n\nmsg_str: str\nA message to write to the log before the JWT values.\n\njwt_dict: dict\nJWT containing values to log.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def format_delta(__timedelta: datetime.timedelta) -> str:\n    if (__timedelta == datetime.timedelta(0)):\n        return ''\n    days_s = ('{}D'.format(__timedelta.days) if __timedelta.days else '')\n    (hours, minutes) = divmod(__timedelta.seconds, 3600)\n    (minutes, seconds) = divmod(minutes, 60)\n    hours_s = ('{:02d}H'.format(hours) if hours else '')\n    minutes_s = ('{:02d}M'.format(minutes) if minutes else '')\n    seconds_s = ('{:02d}S'.format(seconds) if seconds else '')\n    return 'P{}{}{}{}{}'.format(days_s, ('T' if (hours or minutes or seconds) else ''), hours_s, minutes_s, seconds_s)", "docstring": "Format ISO-8601 duration string.\n\nArgs:\n__timedelta: Duration to process\nReturns:\nISO-8601 representation of duration", "source": "codesearchnet"}
{"code": "def _Write(self, data, output_file):\n    _, extension = os.path.splitext(output_file)\n    with TemporaryDirectoryResource() as tempdir:\n        if extension == '.json':\n            json.dump(data, open(output_file, 'w'), sort_keys=True, indent=2)\n        elif extension in ['.tflite', '.bin']:\n            input_json = os.path.join(tempdir, 'temp.json')\n            with open(input_json, 'w') as fp:\n                json.dump(data, fp, sort_keys=True, indent=2)\n            returncode = subprocess.call([self._flatc_path, '-b', '--defaults-json', '--strict-json', '-o', tempdir, self._new_schema, input_json])\n            if returncode != 0:\n                raise RuntimeError('flatc failed to convert upgraded json to binary.')\n            shutil.copy(os.path.join(tempdir, 'temp.tflite'), output_file)\n        else:\n            raise ValueError('Invalid extension on output file %r' % output_file)", "docstring": "Output a json or bin version of the flatbuffer model.\n\nArgs:\ndata: Dict representing the TensorFlow Lite model to write.\noutput_file: filename to write the converted flatbuffer to. (json,\ntflite, or bin extension is required).\nRaises:\nValueError: When the extension is not json or bin\nRuntimeError: When flatc fails to convert json data to binary.", "source": "github-repos"}
{"code": "def easeInOutBack(n, s=1.70158):\n    \n    _checkRange(n)\n    n = n * 2\n    if n < 1:\n        s *= 1.525\n        return 0.5 * (n * n * ((s + 1) * n - s))\n    else:\n        n -= 2\n        s *= 1.525\n        return 0.5 * (n * n * ((s + 1) * n + s) + 2)", "docstring": "A \"back-in\" tween function that overshoots both the start and destination.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "juraj-google-style"}
{"code": "def closure(self, rules):\n        \n        closure = set()\n\n        todo = set(rules)\n        while todo:\n            rule = todo.pop()\n            closure.add(rule)\n\n            \n            if rule.at_end:\n                continue\n\n            symbol = rule.rhs[rule.pos]\n            for production in self.nonterminals[symbol]:\n                for first in self.first(rule.rest):\n                    if EPSILON in production.rhs:\n                        \n                        \n                        new_rule = DottedRule(production, 1, first)\n                    else:\n                        new_rule = DottedRule(production, 0, first)\n\n                    if new_rule not in closure:\n                        todo.add(new_rule)\n\n        return frozenset(closure)", "docstring": "Fills out the entire closure based on some initial dotted rules.\n\nArgs:\nrules - an iterable of DottedRules\n\nReturns: frozenset of DottedRules", "source": "juraj-google-style"}
{"code": "def aggregate(self, index):\n        \n\n        \n        if isinstance(index, string_types):\n            col_df_grouped = self.col_df.groupby(self.df[index])\n        else:\n            self.col_df.index = pd.MultiIndex.from_arrays([self.df[i] for i in index])\n            col_df_grouped = self.col_df.groupby(level=index)\n            self.col_df.index = self.df.index\n\n        \n        self.reduced_df = pd.DataFrame({\n            colred: col_df_grouped[colred.column].agg(colred.agg_func)\n            for colred in self.column_reductions\n            })\n\n        \n        reduced_dfs = []\n        for cf in self.column_functions:\n            \n            reduced_dfs.append(cf.apply_and_name(self))\n\n        return pd.concat(reduced_dfs, axis=1)", "docstring": "Performs a groupby of the unique Columns by index, as constructed from self.df.\n\nArgs:\nindex (str, or pd.Index): Index or column name of self.df.\n\nReturns:\npd.DataFrame: A dataframe, aggregated by index, that contains the result\nof the various ColumnFunctions, and named accordingly.", "source": "juraj-google-style"}
{"code": "def __tomo_linear_inv(freqs, ops, weights=None, trace=None):\n    \n    \n    if weights is not None:\n        W = np.array(weights)\n        if W.ndim == 1:\n            W = np.diag(W)\n\n    \n    S = np.array([vectorize(m).conj()\n                  for m in ops]).reshape(len(ops), ops[0].size)\n    if weights is not None:\n        S = np.dot(W, S)  \n\n    \n    v = np.array(freqs)  \n    if weights is not None:\n        v = np.dot(W, freqs)  \n    Sdg = S.T.conj()  \n    inv = np.linalg.pinv(np.dot(Sdg, S))  \n\n    \n    ret = devectorize(np.dot(inv, np.dot(Sdg, v)))\n    \n    if trace is not None:\n        ret = trace * ret / np.trace(ret)\n    return ret", "docstring": "Reconstruct a matrix through linear inversion.\n\nArgs:\nfreqs (list[float]): list of observed frequences.\nops (list[np.array]): list of corresponding projectors.\nweights (list[float] or array_like):\nweights to be used for weighted fitting.\ntrace (float or None): trace of returned operator.\n\nReturns:\nnumpy.array: A numpy array of the reconstructed operator.", "source": "juraj-google-style"}
{"code": "def stage(self, startimage, newimage):\n        \n        client = utils.get_client()\n        cprint('  Copying file from \"%s:/%s\" \\n                 to \"%s:\n               % (self.sourceimage, self.sourcepath, startimage, self.destpath),\n               'blue')\n\n        \n        cachedir = self._setcache(client)\n        cacherelpath = os.path.relpath(cachedir, TMPDIR)\n\n        \n        if os.path.exists(cachedir) and not os.path.exists(os.path.join(cachedir, 'content.tar')):\n            shutil.rmtree(cachedir)\n\n        if not os.path.exists(cachedir):\n            print(' * Creating cache at %s' % cacherelpath)\n            container = client.containers.create(self.sourceimage)\n            try:\n                tarfile_stream, tarfile_stats = container.get_archive(self.sourcepath)\n            except docker.errors.NotFound:\n                raise errors.MissingFileError(\n                        'Cannot copy file \"%s\" from image \"%s\" - it does not exist!' %\n                        (self.sourcepath, self.sourceimage))\n\n            \n            tempdir = tempfile.mkdtemp(dir=BUILD_TEMPDIR)\n            with open(os.path.join(tempdir, 'content.tar'), 'wb') as localfile:\n                for chunk in tarfile_stream:\n                    localfile.write(chunk)\n            os.mkdir(cachedir)\n            os.rename(tempdir, cachedir)\n        else:\n            print('  Using cached files from %s' % cacherelpath)\n\n        \n        dockerfile = 'FROM %s\\nADD content.tar %s' % (startimage, self.destpath)\n        with open(os.path.join(cachedir, 'Dockerfile'), 'w') as df:\n            df.write(dockerfile)\n\n        buildargs = dict(path=cachedir,\n                         tag=newimage,\n                         decode=True)\n        utils.set_build_cachefrom(self.cache_from, buildargs, client)\n\n        \n        stream = client.api.build(**buildargs)\n        try:\n            utils.stream_docker_logs(stream, newimage)\n        except ValueError as e:\n            raise errors.BuildError(dockerfile, e.args[0], build_args=buildargs)", "docstring": "Copies the file from source to target\n\nArgs:\nstartimage (str): name of the image to stage these files into\nnewimage (str): name of the created image", "source": "juraj-google-style"}
{"code": "def get_all_counters(obj, instance_list=None):\n    \n    counters, instances_avail = win32pdh.EnumObjectItems(None, None, obj, -1, 0)\n\n    if instance_list is None:\n        instance_list = instances_avail\n\n    if not isinstance(instance_list, list):\n        instance_list = [instance_list]\n\n    counter_list = []\n    for counter in counters:\n        for instance in instance_list:\n            instance = '*' if instance.lower() == '_total' else instance\n            counter_list.append((obj, instance, counter))\n        else:  \n            counter_list.append((obj, None, counter))\n\n    return get_counters(counter_list) if counter_list else {}", "docstring": "Get the values for all counters available to a Counter object\n\nArgs:\n\nobj (str):\nThe name of the counter object. You can get a list of valid names\nusing the ``list_objects`` function\n\ninstance_list (list):\nA list of instances to return. Use this to narrow down the counters\nthat are returned.\n\n.. note::\n``_Total`` is returned as ``*``", "source": "juraj-google-style"}
{"code": "def __init__(self, min_score=DEFAULT_MIN_SCORE,\n                 user_attributes=DEFAULT_USER_ATTRIBUTES):\n        \n        if min_score < 1:\n            min_score = 1\n        elif min_score > 4:\n            min_score = 4\n        self.min_score = min_score\n        self.user_attributes = user_attributes", "docstring": "Init method.\n\nArgs:\nmin_score (int): minimum score to accept (between 0 and 4).\nuser_attributes (tuple): list of user attributes to check.", "source": "juraj-google-style"}
{"code": "def notch_filter(data: FLOATS_TYPE,\n                 sampling_freq_hz: float,\n                 notch_freq_hz: float,\n                 quality_factor: float) -> FLOATS_TYPE:\n    \n    b, a = iirnotch(\n        w0=normalized_frequency(notch_freq_hz, sampling_freq_hz),\n        Q=quality_factor\n    )\n    filtered_data = lfilter(b=b, a=a, x=data)\n    return filtered_data", "docstring": "Design and use a notch (band reject) filter to filter the data.\n\nArgs:\ndata: time series of the data\nsampling_freq_hz: sampling frequency :math:`f_s`, in Hz\n(or other consistent units)\nnotch_freq_hz: notch frequency, in Hz\n(or other consistent units)\nquality_factor: notch filter quality factor, :math:`Q`\n\nReturns:\nfiltered data", "source": "juraj-google-style"}
{"code": "def write(self, x, access_logits):\n    gamma = tf.layers.dense(x, 1, activation=tf.sigmoid, name='gamma')\n    write_logits = (access_logits - (gamma * tf.expand_dims(self.mean_logits, 1)))\n    candidate_value = tf.layers.dense(x, self.val_depth, activation=tf.nn.relu, name='candidate_value')\n    erase_gates = tf.layers.dense(x, self.memory_size, activation=tf.nn.sigmoid, name='erase')\n    write_weights = tf.nn.softmax(write_logits)\n    erase_weights = tf.expand_dims((1 - (erase_gates * write_weights)), 3)\n    erase = tf.multiply(erase_weights, tf.expand_dims(self.mem_vals, 1))\n    addition = tf.multiply(tf.expand_dims(write_weights, 3), tf.expand_dims(candidate_value, 2))\n    update_value_op = self.mem_vals.assign(tf.reduce_mean((erase + addition), axis=1))\n    with tf.control_dependencies([update_value_op]):\n        write_op = self.mean_logits.assign(((self.mean_logits * 0.1) + tf.reduce_mean((write_logits * 0.9), axis=1)))\n        return write_op", "docstring": "Write to the memory based on a combination of similarity and least used.\n\nBased on arXiv:1607.00036v2 [cs.LG].\n\nArgs:\nx: a tensor in the shape of [batch_size, length, depth].\naccess_logits: the logits for accessing the memory.\nReturns:\nthe update op.", "source": "codesearchnet"}
{"code": "def dump_in_memory_result(self, result, output_path):\n    file_count = 0\n    logger.debug('Dumping in-memory processing results to output folder: %s', output_path)\n    for (k, v) in iteritems(result):\n        cur_output_path = os.path.join(output_path, k)\n        if isinstance(v, dict):\n            file_count += self.dump_in_memory_result(v, cur_output_path)\n        else:\n            if (not os.path.isdir(output_path)):\n                os.makedirs(output_path)\n            filename = os.path.join(output_path, k)\n            logger.debug('Writing output file: %s', filename)\n            with open(filename, 'wt', encoding=self.config.encoding) as f:\n                f.write(v)\n            file_count += 1\n    return file_count", "docstring": "Recursively dumps the result of our processing into files within the\ngiven output path.\n\nArgs:\nresult: The in-memory result of our processing.\noutput_path: Full path to the folder into which to dump the files.\n\nReturns:\nThe number of files generated (integer).", "source": "codesearchnet"}
{"code": "def ContainsNone(self, *values):\n    \n    self._awql = self._CreateMultipleValuesCondition(values, 'CONTAINS_NONE')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"contains none\".\n\nArgs:\n*values: The values to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "juraj-google-style"}
{"code": "def depricated_name(newmethod):\n\n    def decorator(func):\n\n        @wraps(func)\n        def wrapper(*args, **kwargs):\n            warnings.simplefilter('always', DeprecationWarning)\n            warnings.warn('Function {} is depricated, please use {} instead.'.format(func.__name__, newmethod), category=DeprecationWarning, stacklevel=2)\n            warnings.simplefilter('default', DeprecationWarning)\n            return func(*args, **kwargs)\n        return wrapper\n    return decorator", "docstring": "Decorator for warning user of depricated functions before use.\n\nArgs:\nnewmethod (str): Name of method to use instead.", "source": "codesearchnet"}
{"code": "def distance(self, other):\n        \n        return distance(self.lat, self.lon, None, other.lat, other.lon, None)", "docstring": "Distance between points\n\nArgs:\nother (:obj:`Point`)\nReturns:\nfloat: Distance in km", "source": "juraj-google-style"}
{"code": "def _get_enrollments_list_page(self, params=None):\n    req_url = urljoin(self.base_url, self.enrollment_list_url)\n    resp = self.requester.get(req_url, params=params)\n    resp.raise_for_status()\n    resp_json = resp.json()\n    results = resp_json['results']\n    next_url_str = resp_json.get('next')\n    cursor = None\n    qstr_cursor = None\n    if next_url_str:\n        next_url = urlparse(next_url_str)\n        qstr = parse_qs(next_url.query)\n        qstr_cursor = qstr.get('cursor')\n    if (qstr_cursor and isinstance(qstr_cursor, list)):\n        cursor = qstr_cursor[0]\n    return (results, cursor)", "docstring": "Submit request to retrieve enrollments list.\n\nArgs:\nparams (dict): Query parameters to use in the request. Valid parameters are:\n* course_id: Filters the result to course enrollments for the course\ncorresponding to the given course ID. The value must be URL encoded.\nOptional.\n* username: username: List of comma-separated usernames. Filters the result to the\ncourse enrollments of the given users. Optional.", "source": "codesearchnet"}
{"code": "def map_creative_click_tag_feeds(self, creative_feed, click_tag_feed):\n    for creative in creative_feed:\n        creative['click_tags'] = [click_tag for click_tag in click_tag_feed if self._assignment_matches(creative, click_tag)]", "docstring": "Maps click tag feed to the corresponding creative.\n\nClick Tag is a child object to the creative, and there is a 1 creative\nto many click tags relationship. In Bulkdozer they are represented by\ntwo separate tab in the feed, and this method maps the creatives to their\nrespective click tags based on the creative ID.\n\nArgs:\ncreative_feed: Creative feed.\nclick_tag_feed: Click tag feed.", "source": "github-repos"}
{"code": "def _get_fbeta_score(true_positives, selected, relevant, beta=1):\n    precision = 1\n    if (selected > 0):\n        precision = (true_positives / selected)\n    if (beta == 0):\n        return precision\n    recall = 1\n    if (relevant > 0):\n        recall = (true_positives / relevant)\n    if ((precision > 0) and (recall > 0)):\n        beta2 = (beta * beta)\n        return ((((1 + beta2) * precision) * recall) / ((beta2 * precision) + recall))\n    else:\n        return 0", "docstring": "Compute Fbeta score.\n\nArgs:\ntrue_positives: Number of true positive ngrams.\nselected: Number of selected ngrams.\nrelevant: Number of relevant ngrams.\nbeta: 0 gives precision only, 1 gives F1 score, and Inf gives recall only.\n\nReturns:\nFbeta score.", "source": "codesearchnet"}
{"code": "def GetCloudPath(self, resource_id, cache, database):\n    cloud_path = cache.GetResults('cloud_path')\n    if (not cloud_path):\n        results = database.Query(self.CLOUD_PATH_CACHE_QUERY)\n        cache.CacheQueryResults(results, 'cloud_path', 'resource_id', ('filename', 'parent'))\n        cloud_path = cache.GetResults('cloud_path')\n    if (resource_id == 'folder:root'):\n        return '/'\n    paths = []\n    (parent_path, parent_id) = cloud_path.get(resource_id, ['', ''])\n    while parent_path:\n        if (parent_path == 'folder:root'):\n            break\n        paths.append(parent_path)\n        (parent_path, parent_id) = cloud_path.get(parent_id, ['', ''])\n    if (not paths):\n        return '/'\n    paths.reverse()\n    return '/{0:s}/'.format('/'.join(paths))", "docstring": "Return cloud path given a resource id.\n\nArgs:\nresource_id (str): resource identifier for the file.\ncache (SQLiteCache): cache.\ndatabase (SQLiteDatabase): database.\n\nReturns:\nstr: full path to the resource value.", "source": "codesearchnet"}
{"code": "def _convert_template_option(template):\n    option = {}\n    extraction_method = template.get('extraction_method')\n    if (extraction_method == 'guess'):\n        option['guess'] = True\n    elif (extraction_method == 'lattice'):\n        option['lattice'] = True\n    elif (extraction_method == 'stream'):\n        option['stream'] = True\n    option['pages'] = template.get('page')\n    option['area'] = [round(template['y1'], 3), round(template['x1'], 3), round(template['y2'], 3), round(template['x2'], 3)]\n    return option", "docstring": "Convert Tabula app template to tabula-py option\n\nArgs:\ntemplate (dict): Tabula app template\n\nReturns:\n`obj`:dict: tabula-py option", "source": "codesearchnet"}
{"code": "def add_sample_meta(self, source, reference, method='', filename='', md5='', sha1='', sha256='', size='', mimetype='', campaign='', confidence='', description='', bucket_list=[]):\n    data = {'api_key': self.api_key, 'username': self.username, 'source': source, 'reference': reference, 'method': method, 'filename': filename, 'md5': md5, 'sha1': sha1, 'sha256': sha256, 'size': size, 'mimetype': mimetype, 'upload_type': 'meta', 'campaign': campaign, 'confidence': confidence, 'bucket_list': ','.join(bucket_list)}\n    r = requests.post('{0}/samples/'.format(self.url), data=data, verify=self.verify, proxies=self.proxies)\n    if (r.status_code == 200):\n        result_data = json.loads(r.text)\n        return result_data\n    else:\n        log.error('Error with status code {0} and message {1}'.format(r.status_code, r.text))\n    return None", "docstring": "Adds a metadata sample. To add an actual file, use add_sample_file.\n\nArgs:\nsource: Source of the information\nreference: A reference where more information can be found\nmethod: The method for obtaining the sample.\nfilename: The name of the file.\nmd5: An MD5 hash of the file.\nsha1: SHA1 hash of the file.\nsha256: SHA256 hash of the file.\nsize: size of the file.\nmimetype: The mimetype of the file.\ncampaign: An associated campaign\nconfidence: The campaign confidence\nbucket_list: A list of bucket list items to add\nupload_type: Either 'file' or 'meta'\nReturns:\nA JSON sample object or None if there was an error.", "source": "codesearchnet"}
{"code": "def api_class(self, resource_name=None, path=None, audiences=None, scopes=None, allowed_client_ids=None, auth_level=None, api_key_required=None):\n    if (auth_level is not None):\n        _logger.warn(_AUTH_LEVEL_WARNING)\n\n    def apiserving_api_decorator(api_class):\n        \"Decorator for ProtoRPC class that configures Google's API server.\\n\\n      Args:\\n        api_class: remote.Service class, ProtoRPC service class being wrapped.\\n\\n      Returns:\\n        Same class with API attributes assigned in api_info.\\n      \"\n        self.__classes.append(api_class)\n        api_class.api_info = _ApiInfo(self.__common_info, resource_name=resource_name, path=path, audiences=audiences, scopes=scopes, allowed_client_ids=allowed_client_ids, auth_level=auth_level, api_key_required=api_key_required)\n        return api_class\n    return apiserving_api_decorator", "docstring": "Get a decorator for a class that implements an API.\n\nThis can be used for single-class or multi-class implementations.  It's\nused implicitly in simple single-class APIs that only use @api directly.\n\nArgs:\nresource_name: string, Resource name for the class this decorates.\n(Default: None)\npath: string, Base path prepended to any method paths in the class this\ndecorates. (Default: None)\naudiences: list of strings, Acceptable audiences for authentication.\n(Default: None)\nscopes: list of strings, Acceptable scopes for authentication.\n(Default: None)\nallowed_client_ids: list of strings, Acceptable client IDs for auth.\n(Default: None)\nauth_level: enum from AUTH_LEVEL, Frontend authentication level.\n(Default: None)\napi_key_required: bool, Whether a key is required to call into this API.\n(Default: None)\n\nReturns:\nA decorator function to decorate a class that implements an API.", "source": "codesearchnet"}
{"code": "def add_imports_for_symbol(module_code_builder, symbol, source_module_name, source_name, api_name, api_version, output_module_prefix=''):\n    if api_version == 1:\n        names_attr = API_ATTRS_V1[api_name].names\n        constants_attr = API_ATTRS_V1[api_name].constants\n    else:\n        names_attr = API_ATTRS[api_name].names\n        constants_attr = API_ATTRS[api_name].constants\n    if source_name == constants_attr:\n        for exports, name in symbol:\n            for export in exports:\n                dest_module, dest_name = _get_name_and_module(export)\n                dest_module = _join_modules(output_module_prefix, dest_module)\n                module_code_builder.add_import(None, source_module_name, name, dest_module, dest_name)\n    if hasattr(symbol, '__dict__') and names_attr in symbol.__dict__:\n        for export in getattr(symbol, names_attr):\n            dest_module, dest_name = _get_name_and_module(export)\n            dest_module = _join_modules(output_module_prefix, dest_module)\n            module_code_builder.add_import(symbol, source_module_name, source_name, dest_module, dest_name)", "docstring": "Add imports for the given symbol to `module_code_builder`.\n\nArgs:\nmodule_code_builder: `_ModuleInitCodeBuilder` instance.\nsymbol: A symbol.\nsource_module_name: Module that we can import the symbol from.\nsource_name: Name we can import the symbol with.\napi_name: API name. Currently, must be `tensorflow`.\napi_version: API version.\noutput_module_prefix: Prefix to prepend to destination module.", "source": "github-repos"}
{"code": "def value_from_message(self, message):\n        \n        if not isinstance(message, self.message_type):\n            raise DecodeError('Expected type %s, got %s: %r' %\n                              (self.message_type.__name__,\n                               type(message).__name__,\n                               message))\n        return message", "docstring": "Convert a message to a value instance.\n\nUsed by deserializers to convert from underlying messages to\nvalue of expected user type.\n\nArgs:\nmessage: A message instance of type self.message_type.\n\nReturns:\nValue of self.message_type.", "source": "juraj-google-style"}
{"code": "def __init__(cls, name, bases, dictionary):\n    \n    \n    \n    if GeneratedServiceType._DESCRIPTOR_KEY not in dictionary:\n      return\n    descriptor = dictionary[GeneratedServiceType._DESCRIPTOR_KEY]\n    service_builder = _ServiceBuilder(descriptor)\n    service_builder.BuildService(cls)", "docstring": "Creates a message service class.\n\nArgs:\nname: Name of the class (ignored, but required by the metaclass\nprotocol).\nbases: Base classes of the class being constructed.\ndictionary: The class dictionary of the class being constructed.\ndictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object\ndescribing this protocol service type.", "source": "juraj-google-style"}
{"code": "def num_parameters(self, only_trainable: bool=False, exclude_embeddings: bool=False) -> int:\n    if exclude_embeddings:\n        embedding_param_names = [f'{name}.weight' for name, module_type in self.named_modules() if isinstance(module_type, nn.Embedding)]\n        total_parameters = [parameter for name, parameter in self.named_parameters() if name not in embedding_param_names]\n    else:\n        total_parameters = list(self.parameters())\n    total_numel = []\n    is_loaded_in_4bit = getattr(self, 'is_loaded_in_4bit', False)\n    if is_loaded_in_4bit:\n        if is_bitsandbytes_available():\n            import bitsandbytes as bnb\n        else:\n            raise ValueError('bitsandbytes is not installed but it seems that the model has been loaded in 4bit precision, something went wrong make sure to install bitsandbytes with `pip install bitsandbytes`. You also need a GPU. ')\n    for param in total_parameters:\n        if param.requires_grad or not only_trainable:\n            if is_loaded_in_4bit and isinstance(param, bnb.nn.Params4bit):\n                if hasattr(param, 'element_size'):\n                    num_bytes = param.element_size()\n                elif hasattr(param, 'quant_storage'):\n                    num_bytes = param.quant_storage.itemsize\n                else:\n                    num_bytes = 1\n                total_numel.append(param.numel() * 2 * num_bytes)\n            else:\n                total_numel.append(param.numel())\n    return sum(total_numel)", "docstring": "Get number of (optionally, trainable or non-embeddings) parameters in the module.\n\nArgs:\nonly_trainable (`bool`, *optional*, defaults to `False`):\nWhether or not to return only the number of trainable parameters\n\nexclude_embeddings (`bool`, *optional*, defaults to `False`):\nWhether or not to return only the number of non-embeddings parameters\n\nReturns:\n`int`: The number of parameters.", "source": "github-repos"}
{"code": "def WriteHashes(self, arr):\n        \n        length = len(arr)\n        self.WriteVarInt(length)\n        for item in arr:\n            ba = bytearray(binascii.unhexlify(item))\n            ba.reverse()\n            \n            self.WriteBytes(ba)", "docstring": "Write an array of hashes to the stream.\n\nArgs:\narr (list): a list of 32 byte hashes.", "source": "juraj-google-style"}
{"code": "def __getattr__(self, name):\n        \n        if self.has_service_by_name(name):\n            return self._service_objects[name]\n        return self.__getattribute__(name)", "docstring": "Syntactic sugar to enable direct access of service objects by alias.\n\nArgs:\nname: string, the alias a service object was registered under.", "source": "juraj-google-style"}
{"code": "def _create_test(self, testcase, function_name, sdkobject, attribute=None):\n        \n        func = getattr(testcase, function_name)\n        object_name = sdkobject.rest_name\n\n        \n        test_name = \"\"\n        rep = dict()\n        rep[\"object\"] = object_name\n\n        if attribute:\n            rep[\"attribute\"] = attribute.local_name\n\n        rep = dict((re.escape(k), v) for k, v in rep.items())\n        pattern = re.compile(\"|\".join(list(rep.keys())))\n\n        if function_name.startswith(\"_\"):\n            function_name = function_name[1:]\n\n        test_name = pattern.sub(lambda m: rep[re.escape(m.group(0))], function_name)\n\n        \n        test_func = None\n\n        if attribute:\n            test_func = lambda self, attribute=attribute: func(self, attribute)\n        else:\n            test_func = lambda self: func(self)\n\n        test_func.__name__ = str(test_name)\n\n        return (test_name, test_func)", "docstring": "Create a test method for the sdkoject\n\nArgs:\ntestcase: the testcase to that should manage the method\nfunction_name: the name of the method in the testcase\nsdkobject: the object that should be tested\nattribute: the attribute information if necessary\n\nReturns:\nIt returns a tuple (name, method) that represents the test method", "source": "juraj-google-style"}
{"code": "def consume(self, tokens):\n        \n        wait_time = 0.\n        self.tokens -= tokens\n        if self.tokens < 0:\n            self._get_tokens()\n        if self.tokens < 0:\n            wait_time = -self.tokens / self.fill_rate\n        return wait_time", "docstring": "Consume tokens.\nArgs:\ntokens (float): number of transport tokens to consume\nReturns:\nwait_time (float): waiting time for the consumer", "source": "juraj-google-style"}
{"code": "def _setup(self):\n        \n        if isinstance(self.module, torch.nn.RNNBase): self.module.flatten_parameters = noop\n        for name_w in self.weights:\n            w = getattr(self.module, name_w)\n            del self.module._parameters[name_w]\n            self.module.register_parameter(name_w + '_raw', nn.Parameter(w.data))", "docstring": "for each string defined in self.weights, the corresponding\nattribute in the wrapped module is referenced, then deleted, and subsequently\nregistered as a new parameter with a slightly modified name.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def __init__(self, name):\n        \n        super(ProgressWorker, self).__init__()\n        self.name = name", "docstring": "Worker object that will be passed to the thread.\n\nArgs:\nname (str): name shown in progress ui.", "source": "juraj-google-style"}
{"code": "def parse_formal_type_parameters(base: '_classes.InterpreterClass | _classes.PyTDClass | _classes.ParameterizedClass', prefix: str | None, formal_type_parameters: 'datatypes.AliasingDict[str, _instance_base.SimpleValue]', container: '_instance_base.SimpleValue | DummyContainer | None'=None) -> None:\n\n    def merge(t0: '_instance_base.SimpleValue', t1: '_instance_base.SimpleValue', name: str) -> '_instance_base.SimpleValue':\n        return _merge_type(t0, t1, name, base)\n    if isinstance(base, _abstract.ParameterizedClass):\n        if base.full_name == 'typing.Generic':\n            return\n        if isinstance(base.base_cls, (_abstract.InterpreterClass, _abstract.PyTDClass)):\n            formal_type_parameters.merge_from(base.base_cls.all_formal_type_parameters, merge)\n        params = base.get_formal_type_parameters()\n        if hasattr(container, 'cls'):\n            container_template = container.cls.template\n        else:\n            container_template = ()\n        for name, param in params.items():\n            if isinstance(param, _abstract.TypeParameter):\n                if prefix:\n                    formal_type_parameters.add_alias(name, prefix + '.' + param.name, merge)\n                elif param in container_template:\n                    formal_type_parameters[name] = param\n            elif name not in formal_type_parameters:\n                formal_type_parameters[name] = param\n            else:\n                last_type = formal_type_parameters[name]\n                formal_type_parameters[name] = merge(last_type, param, name)\n    else:\n        if isinstance(base, (_abstract.InterpreterClass, _abstract.PyTDClass)):\n            formal_type_parameters.merge_from(base.all_formal_type_parameters, merge)\n        if base.template:\n            for item in base.template:\n                if isinstance(item, _abstract.TypeParameter):\n                    name = full_type_name(base, item.name)\n                    if name not in formal_type_parameters:\n                        formal_type_parameters[name] = None", "docstring": "Parse type parameters from base class.\n\nArgs:\nbase: base class.\nprefix: the full name of subclass of base class.\nformal_type_parameters: the mapping of type parameter name to its type.\ncontainer: An abstract value whose class template is used when prefix=None\nto decide how to handle type parameters that are aliased to other type\nparameters. Values that are in the class template are kept, while all\nothers are ignored.\n\nRaises:\nGenericTypeError: If the lazy types of type parameter don't match", "source": "github-repos"}
{"code": "def put(self, item: T, context: PipelineContext = None) -> None:\n        \n        LOGGER.info(\"Converting item \\\"{item}\\\" for sink \\\"{sink}\\\"\".format(item=item, sink=self._sink))\n        item = self._transform(data=item, context=context)\n        LOGGER.info(\"Puting item \\\"{item}\\\" into sink \\\"{sink}\\\"\".format(item=item, sink=self._sink))\n        self._sink.put(self._store_type, item, context)", "docstring": "Puts an objects into the data sink. The objects may be transformed into a new type for insertion if necessary.\n\nArgs:\nitem: The objects to be inserted into the data sink.\ncontext: The context of the insertion (mutable).", "source": "juraj-google-style"}
{"code": "def from_tuple(cls, query):\n    (field, query) = (query[0], query[1:])\n    try:\n        cls = TYPES[type(query[0])]\n    except KeyError:\n        pass\n    return cls(field, *query)", "docstring": "Create a condition from a query tuple.\n\nArgs:\nquery (tuple or list): Tuple or list that contains a query domain\nin the format of ``(field_name, field_value,\nfield_value_to)``. ``field_value_to`` is only applicable in\nthe case of a date search.\n\nReturns:\nDomainCondition: An instance of a domain condition. The specific\ntype will depend on the data type of the first value provided\nin ``query``.", "source": "codesearchnet"}
{"code": "def waitAndGet(self, event_name, timeout=None):\n    if timeout is None:\n        timeout = self.default_timeout_sec\n    if timeout:\n        if timeout > self.rpc_max_timeout_sec:\n            raise errors.CallbackHandlerBaseError(self._device, f'Specified timeout {timeout} is longer than max timeout {self.rpc_max_timeout_sec}.')\n    raw_event = self.callEventWaitAndGetRpc(self._id, event_name, timeout)\n    return callback_event.from_dict(raw_event)", "docstring": "Waits and gets a CallbackEvent with the specified identifier.\n\nIt will raise a timeout error if the expected event does not occur within\nthe time limit.\n\nArgs:\nevent_name: str, the name of the event to get.\ntimeout: float, the number of seconds to wait before giving up. If None,\nit will be set to self.default_timeout_sec.\n\nReturns:\nCallbackEvent, the oldest entry of the specified event.\n\nRaises:\nerrors.CallbackHandlerBaseError: If the specified timeout is longer than\nthe max timeout supported.\nerrors.CallbackHandlerTimeoutError: The expected event does not occur\nwithin the time limit.", "source": "github-repos"}
{"code": "def transcripts_by_gene(self, build='37'):\n        \n        hgnc_transcripts = {}\n        LOG.info(\"Fetching all transcripts\")\n        for transcript in self.transcript_collection.find({'build':build}):\n            hgnc_id = transcript['hgnc_id']\n            if not hgnc_id in hgnc_transcripts:\n                hgnc_transcripts[hgnc_id] = []\n            \n            hgnc_transcripts[hgnc_id].append(transcript)\n        \n        return hgnc_transcripts", "docstring": "Return a dictionary with hgnc_id as keys and a list of transcripts as value\n\nArgs:\nbuild(str)\n\nReturns:\nhgnc_transcripts(dict)", "source": "juraj-google-style"}
{"code": "def from_config(cls, config):\n    return cls(**config)", "docstring": "Creates a regularizer from its config.\n\nThis method is the reverse of `get_config`,\ncapable of instantiating the same regularizer from the config\ndictionary.\n\nThis method is used by Keras `model_to_estimator`, saving and\nloading models to HDF5 formats, Keras model cloning, some visualization\nutilities, and exporting models to and from JSON.\n\nArgs:\nconfig: A Python dictionary, typically the output of get_config.\n\nReturns:\nA regularizer instance.", "source": "github-repos"}
{"code": "def timeout_thread_handler(timeout, stop_event):\n    stop_happened = stop_event.wait(timeout)\n    if (stop_happened is False):\n        print(('Killing program due to %f second timeout' % timeout))\n    os._exit(2)", "docstring": "A background thread to kill the process if it takes too long.\n\nArgs:\ntimeout (float): The number of seconds to wait before killing\nthe process.\nstop_event (Event): An optional event to cleanly stop the background\nthread if required during testing.", "source": "codesearchnet"}
{"code": "def get_config_dir(program='', system_wide=False):\n\t\n\n\tconfig_homes = []\n\n\tif system_wide:\n\t\tif os.name == 'nt':\n\t\t\tconfig_homes.append(\n\t\t\t\twinreg.ExpandEnvironmentStrings('%PROGRAMDATA%'))\n\n\t\telse:\n\t\t\tconfig_homes.append('/etc')\n\t\t\tconfig_homes.append('/etc/xdg')\n\n\t\t\tif os.name == 'darwin':\n\t\t\t\tconfig_homes.append('/Library')\n\n\telse:\n\t\tif os.name == 'nt':\n\t\t\timport winreg\n\t\t\tconfig_homes.append(\n\t\t\t\twinreg.ExpandEnvironmentStrings('%LOCALAPPDATA%'))\n\t\t\tconfig_homes.append(\n\t\t\t\tos.path.join(\n\t\t\t\t\twinreg.ExpandEnvironmentStrings('%APPDATA%'),\n\t\t\t\t\t'Roaming'))\n\t\telse:\n\t\t\tif os.getenv('XDG_CONFIG_HOME'):\n\t\t\t\tconfig_homes.append(os.getenv('XDG_CONFIG_HOME'))\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tfrom xdg import BaseDirectory\n\t\t\t\t\tconfig_homes.append(BaseDirectory.xdg_config_home)\n\t\t\t\texcept ImportError:\n\t\t\t\t\tconfig_homes.append(os.path.expanduser('~/.config'))\n\n\t\t\t\tconfig_homes.append(os.path.expanduser('~'))\n\n\t\t\t\tif os.name == 'darwin':\n\t\t\t\t\tconfig_homes.append(os.path.expanduser('~/Library'))\n\n\tif program:\n\t\tdef __find_homes(app, dirs):\n\n\t\t\thomes = []\n\n\t\t\tfor home in dirs:\n\t\t\t\tif os.path.isdir(os.path.join(home, app)):\n\t\t\t\t\thomes.append(os.path.join(home, app))\n\n\t\t\t\tif os.path.isdir(os.path.join(home, '.' + app)):\n\t\t\t\t\thomes.append(os.path.join(home, '.' + app))\n\n\t\t\t\tif os.path.isdir(os.path.join(home, app + '.d')):\n\t\t\t\t\thomes.append(os.path.join(home, app + '.d'))\n\n\t\t\treturn homes\n\n\t\tapp_homes = __find_homes(program, config_homes)\n\n\t\t\n\n\t\tif program == 'vim':\n\t\t\tapp_homes.extend(__find_homes('vimfiles', config_homes))\n\n\t\telif program == 'chrome':\n\t\t\tapp_homes.extend(__find_homes('google-chrome', config_homes))\n\n\t\telif program in ['firefox', 'thunderbird']:\n\t\t\tapp_homes.extend(\n\t\t\t\t__find_homes(\n\t\t\t\t\tprogram, [\n\t\t\t\t\t\tos.path.expanduser('~/.mozilla')]))\n\n\t\treturn app_homes\n\n\treturn config_homes", "docstring": "Get the configuration directory.\n\nGet the configuration directories, optionally for a specific program.\n\nArgs:\nprogram\t(str) : The name of the program whose configuration directories have to be found.\nsystem_wide (bool): Gets the system-wide configuration directories.\n\nReturns:\nlist: A list of all matching configuration directories found.", "source": "juraj-google-style"}
{"code": "def decompress(content, encoding, filename='N/A'):\n    try:\n        encoding = (encoding or '').lower()\n        if (encoding == ''):\n            return content\n        elif (encoding == 'gzip'):\n            return gunzip(content)\n    except DecompressionError as err:\n        print(('Filename: ' + str(filename)))\n        raise\n    raise NotImplementedError((str(encoding) + ' is not currently supported. Supported Options: None, gzip'))", "docstring": "Decompress file content.\n\nRequired:\ncontent (bytes): a file to be compressed\nencoding: None (no compression) or 'gzip'\nOptional:\nfilename (str:default:'N/A'): Used for debugging messages\nRaises:\nNotImplementedError if an unsupported codec is specified.\ncompression.EncodeError if the encoder has an issue\n\nReturn: decompressed content", "source": "codesearchnet"}
{"code": "def compile_files(raw_dir, raw_files, tag):\n  \n  tf.logging.info(\"Compiling files with tag %s.\" % tag)\n  filename = \"%s-%s\" % (_PREFIX, tag)\n  input_compiled_file = os.path.join(raw_dir, filename + \".lang1\")\n  target_compiled_file = os.path.join(raw_dir, filename + \".lang2\")\n\n  with tf.gfile.Open(input_compiled_file, mode=\"w\") as input_writer:\n    with tf.gfile.Open(target_compiled_file, mode=\"w\") as target_writer:\n      for i in range(len(raw_files[\"inputs\"])):\n        input_file = raw_files[\"inputs\"][i]\n        target_file = raw_files[\"targets\"][i]\n\n        tf.logging.info(\"Reading files %s and %s.\" % (input_file, target_file))\n        write_file(input_writer, input_file)\n        write_file(target_writer, target_file)\n  return input_compiled_file, target_compiled_file", "docstring": "Compile raw files into a single file for each language.\n\nArgs:\nraw_dir: Directory containing downloaded raw files.\nraw_files: Dict containing filenames of input and target data.\n{\"inputs\": list of files containing data in input language\n\"targets\": list of files containing corresponding data in target language\n}\ntag: String to append to the compiled filename.\n\nReturns:\nFull path of compiled input and target files.", "source": "juraj-google-style"}
{"code": "def register(self, node, vendorSpecific=None):\n        \n        response = self.registerResponse(node, vendorSpecific)\n        return self._read_boolean_response(response)", "docstring": "See Also: registerResponse()\n\nArgs:\nnode:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def check_error_response(self, body, status):\n    \n    status_code = int(status.split(' ', 1)[0])\n    if status_code >= 300:\n      raise errors.BackendError(body, status)", "docstring": "Raise an exception if the response from the backend was an error.\n\nArgs:\nbody: A string containing the backend response body.\nstatus: A string containing the backend response status.\n\nRaises:\nBackendError if the response is an error.", "source": "juraj-google-style"}
{"code": "def set_last_step_output(self, name, output, reduce_op=None):\n    if distribute_lib.in_cross_replica_context():\n        self._last_step_outputs_reduce_ops[name] = reduce_op\n        if reduce_op is None:\n            self._last_step_outputs[name] = output\n        else:\n            distribution = distribute_lib.get_strategy()\n            self._last_step_outputs[name] = distribution.reduce(reduce_op, output, axis=None)\n    else:\n        assert reduce_op is not None\n\n        def merge_fn(distribution, value):\n            self._last_step_outputs[name] = distribution.reduce(reduce_op, value, axis=None)\n            self._last_step_outputs_reduce_ops[name] = reduce_op\n        distribute_lib.get_replica_context().merge_call(merge_fn, args=(output,))", "docstring": "Set `output` with `name` to be outputted from the last step.\n\nArgs:\nname: String, name to identify the output. Doesn't need to match tensor\nname.\noutput: The tensors that should be outputted with `name`. See below for\nactual types supported.\nreduce_op: Reduction method to use to reduce outputs from multiple\nreplicas. Required if `set_last_step_output` is called in a replica\ncontext. Optional in cross_replica_context.\nWhen present, the outputs from all the replicas are reduced using the\ncurrent distribution strategy's `reduce` method. Hence, the type of\n`output` must be what's supported by the corresponding `reduce` method.\nFor e.g. if using MirroredStrategy and reduction is set, output\nmust be a `PerReplica` value.\nThe reduce method is also recorded in a dictionary\n`_last_step_outputs_reduce_ops` for later interpreting of the\noutputs as already reduced or not.", "source": "github-repos"}
{"code": "def __init__(self, quantity, period_type):\n    self._quantity = tf.convert_to_tensor(quantity, dtype=tf.int32, name='pt_quantity')\n    self._period_type = period_type", "docstring": "Initializer.\n\nArgs:\nquantity: A Tensor of type tf.int32, representing the quantities\nof period types (e.g. how many months). Can be both positive and\nnegative.\nperiod_type: A PeriodType (a day, a month, etc). Currently only one\nPeriodType per instance of PeriodTensor is supported.\n\nExample:\n```python\ntwo_weeks = PeriodTensor(2, PeriodType.WEEK)\n\nmonths = [3, 6, 9, 12]\nperiods = PeriodTensor(months, PeriodType.MONTH)\n```", "source": "github-repos"}
{"code": "def create_conversion_event(self, event_key, user_id, attributes, event_tags):\n    params = self._get_common_params(user_id, attributes)\n    conversion_params = self._get_required_params_for_conversion(event_key, event_tags)\n    params[self.EventParams.USERS][0][self.EventParams.SNAPSHOTS].append(conversion_params)\n    return Event(self.EVENTS_URL, params, http_verb=self.HTTP_VERB, headers=self.HTTP_HEADERS)", "docstring": "Create conversion Event to be sent to the logging endpoint.\n\nArgs:\nevent_key: Key representing the event which needs to be recorded.\nuser_id: ID for user.\nattributes: Dict representing user attributes and values.\nevent_tags: Dict representing metadata associated with the event.\n\nReturns:\nEvent object encapsulating the conversion event.", "source": "codesearchnet"}
{"code": "def decompose(miz_file: Path, output_folder: Path):\n    (mission_folder, assets_folder) = NewMiz._get_subfolders(output_folder)\n    NewMiz._wipe_folders(mission_folder, assets_folder)\n    LOGGER.info('unzipping mission file')\n    with Miz(miz_file) as miz:\n        version = miz.mission.d['version']\n        LOGGER.debug(f'mission version: \"%s\"', version)\n        LOGGER.info('copying assets to: \"%s\"', assets_folder)\n        ignore = shutil.ignore_patterns('mission')\n        shutil.copytree(str(miz.temp_dir), str(assets_folder), ignore=ignore)\n        NewMiz._reorder_warehouses(assets_folder)\n        LOGGER.info('decomposing mission table into: \"%s\" (this will take a while)', mission_folder)\n        NewMiz._decompose_dict(miz.mission.d, 'base_info', mission_folder, version, miz)", "docstring": "Decompose this Miz into json\n\nArgs:\noutput_folder: folder to output the json structure as a Path\nmiz_file: MIZ file path as a Path", "source": "codesearchnet"}
{"code": "def tanh_shrink(x):\n    return ops.tanh_shrink(x)", "docstring": "Tanh shrink activation function.\n\nIt is defined as:\n\n`f(x) = x - tanh(x)`.\n\nArgs:\nx: Input tensor.", "source": "github-repos"}
{"code": "def add_pagination_meta(self, params, meta):\n        \n        meta['page_size'] = params['page_size']\n        meta['page'] = params['page']\n\n        meta['prev'] = \"page={0}&page_size={1}\".format(\n            params['page'] - 1, params['page_size']\n        ) if meta['page'] > 0 else None\n\n        meta['next'] = \"page={0}&page_size={1}\".format(\n            params['page'] + 1, params['page_size']\n        ) if meta.get('has_more', True) else None", "docstring": "Extend default meta dictionary value with pagination hints.\n\nNote:\nThis method handler attaches values to ``meta`` dictionary without\nchanging it's reference. This means that you should never replace\n``meta`` dictionary with any other dict instance but simply modify\nits content.\n\nArgs:\nparams (dict): dictionary of decoded parameter values\nmeta (dict): dictionary of meta values attached to response", "source": "juraj-google-style"}
{"code": "def _create_variable(self, *args, **kwargs):\n    with ops.name_scope('random_generator'):\n        kwargs['name'] = 'StateVar'\n        v = variables.Variable(*args, **kwargs)\n    if isinstance(v, sharded_variable.ShardedVariable):\n        raise ValueError(\"tf.random.Generator state is sharded, which is not allowed. When creating a tf.distribute.experimental.ParameterServerStrategy, please make sure that the `variable_partitioner` argument won't shard a small variable of shape [2] or [3]. Ways to avoid sharding small variables include setting `variable_partitioner` to None or to tf.distribute.experimental.partitioners.MinSizePartitioner with a large enough `min_shard_bytes`.\")\n    return v", "docstring": "Creates a variable.\n\nArgs:\n*args: positional arguments passed along to `variables.Variable.\n**kwargs: keyword arguments passed along to `variables.Variable.\n\nReturns:\nThe created variable.", "source": "github-repos"}
{"code": "def write_other_members(self, f, catch_all=False):\n    if catch_all:\n        names = self._members.items()\n    else:\n        names = inspect.getmembers(self._module)\n    leftovers = []\n    for (name, _) in names:\n        if ((name in self._members) and (name not in self._documented)):\n            leftovers.append(name)\n    if leftovers:\n        print(('%s: undocumented members: %d' % (self._title, len(leftovers))))\n        print('\\n\n        for name in sorted(leftovers):\n            print(('  %s' % name))\n            self._documented.add(name)\n            self._mentioned.add(name)\n            self._write_member_markdown_to_file(f, '", "docstring": "Writes the leftover members to `f`.\n\nArgs:\nf: File to write to.\ncatch_all: If true, document all missing symbols from any module.\nOtherwise, document missing symbols from just this module.", "source": "codesearchnet"}
{"code": "def get_function_id(sig):\n    \n    s = sha3.keccak_256()\n    s.update(sig.encode('utf-8'))\n    return int(\"0x\" + s.hexdigest()[:8], 16)", "docstring": "Return the function id of the given signature\nArgs:\nsig (str)\nReturn:\n(int)", "source": "juraj-google-style"}
{"code": "def __get_merged_api_info(self, services):\n    \n    base_paths = sorted(set(s.api_info.base_path for s in services))\n    if len(base_paths) != 1:\n      raise api_exceptions.ApiConfigurationError(\n          'Multiple base_paths found: {!r}'.format(base_paths))\n    names_versions = sorted(set(\n        (s.api_info.name, s.api_info.api_version) for s in services))\n    if len(names_versions) != 1:\n      raise api_exceptions.ApiConfigurationError(\n          'Multiple apis/versions found: {!r}'.format(names_versions))\n    return services[0].api_info", "docstring": "Builds a description of an API.\n\nArgs:\nservices: List of protorpc.remote.Service instances implementing an\napi/version.\n\nReturns:\nThe _ApiInfo object to use for the API that the given services implement.", "source": "juraj-google-style"}
{"code": "def keras_mode_combinations(mode=None, run_eagerly=None):\n    if mode is None:\n        mode = ['eager'] if tf2.enabled() else ['graph', 'eager']\n    if run_eagerly is None:\n        run_eagerly = [True, False]\n    result = []\n    if 'eager' in mode:\n        result += combinations.combine(mode=['eager'], run_eagerly=run_eagerly)\n    if 'graph' in mode:\n        result += combinations.combine(mode=['graph'], run_eagerly=[False])\n    return result", "docstring": "Returns the default test combinations for tf.keras tests.\n\nNote that if tf2 is enabled, then v1 session test will be skipped.\n\nArgs:\nmode: List of modes to run the tests. The valid options are 'graph' and\n'eager'. Default to ['graph', 'eager'] if not specified. If a empty list\nis provide, then the test will run under the context based on tf's\nversion, eg graph for v1 and eager for v2.\nrun_eagerly: List of `run_eagerly` value to be run with the tests.\nDefault to [True, False] if not specified. Note that for `graph` mode,\nrun_eagerly value will only be False.\n\nReturns:\nA list contains all the combinations to be used to generate test cases.", "source": "github-repos"}
{"code": "def get_session(op_input_list=()):\n    session = _get_session(op_input_list)\n    if not _MANUAL_VAR_INIT:\n        with session.graph.as_default():\n            _initialize_variables(session)\n    return session", "docstring": "Returns the TF session to be used by the backend.\n\nIf a default TensorFlow session is available, we will return it.\n\nElse, we will return the global Keras session assuming it matches\nthe current graph.\n\nIf no global Keras session exists at this point:\nwe will create a new global session.\n\nNote that you can manually set the global session\nvia `K.set_session(sess)`.\n\nArgs:\nop_input_list: An option sequence of tensors or ops, which will be used\nto determine the current graph. Otherwise the default graph will be\nused.\n\nReturns:\nA TensorFlow session.", "source": "github-repos"}
{"code": "def batch_decode(self, waveforms, waveform_lengths=None) -> List[np.ndarray]:\n    waveforms = [waveform.detach().to(device='cpu', copy=True).numpy() for waveform in waveforms]\n    if waveform_lengths is not None:\n        waveforms = [waveform[:waveform_lengths[i]] for i, waveform in enumerate(waveforms)]\n    return waveforms", "docstring": "Removes padding from generated audio after running [`UnivNetModel.forward`]. This returns a ragged list of 1D\naudio waveform arrays and not a single tensor/array because in general the waveforms will have different\nlengths after removing padding.\n\nArgs:\nwaveforms (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\nThe batched output waveforms from the [`UnivNetModel`].\nwaveform_lengths (`torch.FloatTensor` of shape `(batch_size,)`, *optional*):\nThe batched lengths of each waveform before padding.\n\nReturns:\n`List[np.ndarray]`: A ragged list of 1D waveform arrays with padding removed.", "source": "github-repos"}
{"code": "def kill(self, procname):\n    for proc in psutil.process_iter():\n        if (proc.name() == procname):\n            self.info_log(('[pid:%s][name:%s] killed' % (proc.pid, proc.name())))\n            proc.kill()", "docstring": "Kill by process name\n\nArgs:\nprocname (str)", "source": "codesearchnet"}
{"code": "def append(self, event, help=\"\"):\n        \n        if isinstance(event, str):\n            self._events[event] = HookList(is_waterfall=self.is_waterfall)\n            self._help[event] = (help, getframeinfo(stack()[1][0]))\n\n            if not help:\n                logger.warning(\"Great, don't say anything about your hooks and \\\n                wait for plugin creators to figure it out.\")\n        elif isinstance(event, Iterable):\n            \n            \n            for name in event:\n                self.append(name)\n        else:\n            raise TypeError(\"Invalid event name!\")", "docstring": "Creates a new event. `event` may be iterable or string\n\nArgs:\nevent (str): Name of event to declare\n\nKwrgs:\nhelp (str): Help string for the event\n\nRaises:\nTypeError\n\n**Please** describe the event and its calling arguments in the help\nstring.", "source": "juraj-google-style"}
{"code": "def sort_resources(cls, request, resources, fail_enum, header_proto=None):\n    if (not request.sorting):\n        return resources\n    value_handlers = cls._get_handler_set(request, fail_enum, header_proto)\n\n    def sorter(resource_a, resource_b):\n        for handler in value_handlers:\n            (val_a, val_b) = handler.get_sort_values(resource_a, resource_b)\n            if (val_a < val_b):\n                return handler.xform_result((- 1))\n            if (val_a > val_b):\n                return handler.xform_result(1)\n        return 0\n    return sorted(resources, key=cmp_to_key(sorter))", "docstring": "Sorts a list of resources based on a list of sort controls\n\nArgs:\nrequest (object): The parsed protobuf request object\nresources (list of objects): The resources to be sorted\nfail_enum (int, enum): The enum status to raise with invalid keys\nheader_proto(class): Class to decode a resources header\n\nReturns:\nlist: The sorted list of resources", "source": "codesearchnet"}
{"code": "def set_category(self, category):\n    if isinstance(category, Category):\n        name = category.name\n    else:\n        name = category\n    self.find('category').text = name", "docstring": "Set package category\n\nArgs:\ncategory: String of an existing category's name, or a\nCategory object.", "source": "codesearchnet"}
{"code": "def logloss(y, p):\n    p[(p < EPS)] = EPS\n    p[(p > (1 - EPS))] = (1 - EPS)\n    return log_loss(y, p)", "docstring": "Bounded log loss error.\n\nArgs:\ny (numpy.array): target\np (numpy.array): prediction\n\nReturns:\nbounded log loss error", "source": "codesearchnet"}
{"code": "def serialize_feature_columns(feature_columns):\n    return [serialize_feature_column(fc) for fc in feature_columns]", "docstring": "Serializes a list of FeatureColumns.\n\nReturns a list of Keras-style config dicts that represent the input\nFeatureColumns and can be used with `deserialize_feature_columns` for\nreconstructing the original columns.\n\nArgs:\nfeature_columns: A list of FeatureColumns.\n\nReturns:\nKeras serialization for the list of FeatureColumns.\n\nRaises:\nValueError if called with input that is not a list of FeatureColumns.", "source": "github-repos"}
{"code": "def load_validator(schema_path, schema):\n    \n    \n    if os.name == 'nt':\n        file_prefix = 'file:\n    else:\n        file_prefix = 'file:'\n\n    resolver = RefResolver(file_prefix + schema_path.replace(\"\\\\\", \"/\"), schema)\n    validator = Draft4Validator(schema, resolver=resolver)\n\n    return validator", "docstring": "Create a JSON schema validator for the given schema.\n\nArgs:\nschema_path: The filename of the JSON schema.\nschema: A Python object representation of the same schema.\n\nReturns:\nAn instance of Draft4Validator.", "source": "juraj-google-style"}
{"code": "def get_mutation_rates(transcripts, mut_dict, ensembl):\n    \n    \n    rates = {'missense': 0, 'nonsense': 0, 'splice_lof': 0,\n        'splice_region': 0, 'synonymous': 0}\n    combined = None\n    \n    for tx_id in transcripts:\n        try:\n            tx = construct_gene_object(ensembl, tx_id)\n        except ValueError:\n            continue\n        \n        if len(tx.get_cds_sequence()) % 3 != 0:\n            raise ValueError(\"anomalous_coding_sequence\")\n        \n        \n        if tx.get_chrom() == \"MT\":\n            continue\n        \n        sites = SiteRates(tx, mut_dict, masked_sites=combined)\n        combined = tx + combined\n        \n        for cq in ['missense', 'nonsense', 'splice_lof', 'splice_region', 'synonymous']:\n            rates[cq] += sites[cq].get_summed_rate()\n    \n    if combined is None:\n        raise ValueError('no tx found')\n    \n    length = combined.get_coding_distance(combined.get_cds_end())['pos']\n    \n    return rates, combined, length", "docstring": "determines mutation rates per functional category for transcripts\n\nArgs:\ntranscripts: list of transcript IDs for a gene\nmut_dict: dictionary of local sequence context mutation rates\nensembl: EnsemblRequest object, to retrieve information from Ensembl.\n\nReturns:\ntuple of (rates, merged transcript, and transcript CDS length)", "source": "juraj-google-style"}
{"code": "def register(self, name, asymmetric=False):\n\n    def register_func(func):\n        if asymmetric:\n            self._asymmetric.append(name)\n        self.store[name] = func\n        return func\n    return register_func", "docstring": "Decorator for registering a measure with PyPhi.\n\nArgs:\nname (string): The name of the measure.\n\nKeyword Args:\nasymmetric (boolean): ``True`` if the measure is asymmetric.", "source": "codesearchnet"}
{"code": "def post_error(self, name, message):\n        \n\n        self.post_command(OPERATIONS.CMD_POST_MESSAGE,\n                          _create_message(name, states.ERROR_LEVEL, message))", "docstring": "Asynchronously post a user facing error message about a service.\n\nArgs:\nname (string): The name of the service\nmessage (string): The user facing error message that will be stored\nfor the service and can be queried later.", "source": "juraj-google-style"}
{"code": "def from_axis_angle_and_translation(axis, angle, angle_in_radians=False, translation_vec=(0, 0, 0)):\n    if isinstance(axis, (tuple, list)):\n        axis = np.array(axis)\n    if isinstance(translation_vec, (tuple, list)):\n        vec = np.array(translation_vec)\n    else:\n        vec = translation_vec\n    a = (angle if angle_in_radians else ((angle * pi) / 180))\n    cosa = cos(a)\n    sina = sin(a)\n    u = (axis / np.linalg.norm(axis))\n    r = np.zeros((3, 3))\n    r[(0, 0)] = (cosa + ((u[0] ** 2) * (1 - cosa)))\n    r[(0, 1)] = (((u[0] * u[1]) * (1 - cosa)) - (u[2] * sina))\n    r[(0, 2)] = (((u[0] * u[2]) * (1 - cosa)) + (u[1] * sina))\n    r[(1, 0)] = (((u[0] * u[1]) * (1 - cosa)) + (u[2] * sina))\n    r[(1, 1)] = (cosa + ((u[1] ** 2) * (1 - cosa)))\n    r[(1, 2)] = (((u[1] * u[2]) * (1 - cosa)) - (u[0] * sina))\n    r[(2, 0)] = (((u[0] * u[2]) * (1 - cosa)) - (u[1] * sina))\n    r[(2, 1)] = (((u[1] * u[2]) * (1 - cosa)) + (u[0] * sina))\n    r[(2, 2)] = (cosa + ((u[2] ** 2) * (1 - cosa)))\n    return SymmOp.from_rotation_and_translation(r, vec)", "docstring": "Generates a SymmOp for a rotation about a given axis plus translation.\n\nArgs:\naxis: The axis of rotation in cartesian space. For example,\n[1, 0, 0]indicates rotation about x-axis.\nangle (float): Angle of rotation.\nangle_in_radians (bool): Set to True if angles are given in\nradians. Or else, units of degrees are assumed.\ntranslation_vec: A translation vector. Defaults to zero.\n\nReturns:\nSymmOp for a rotation about given axis and translation.", "source": "codesearchnet"}
{"code": "def viewTemplate(id):\n        \n        conn = Qubole.agent()\n        return conn.get(Template.element_path(id))", "docstring": "View an existing Template details.\n\nArgs:\n`id`: ID of the template to fetch\n\nReturns:\nDictionary containing the details of the template.", "source": "juraj-google-style"}
{"code": "def AddStop(self, lat, lng, name, stop_id=None):\n    if (stop_id is None):\n        stop_id = util.FindUniqueId(self.stops)\n    stop = self._gtfs_factory.Stop(stop_id=stop_id, lat=lat, lng=lng, name=name)\n    self.AddStopObject(stop)\n    return stop", "docstring": "Add a stop to this schedule.\n\nArgs:\nlat: Latitude of the stop as a float or string\nlng: Longitude of the stop as a float or string\nname: Name of the stop, which will appear in the feed\nstop_id: stop_id of the stop or None, in which case a unique id is picked\n\nReturns:\nA new Stop object", "source": "codesearchnet"}
{"code": "def _apply_user_agent(headers, user_agent):\n    \n    if user_agent is not None:\n        if 'user-agent' in headers:\n            headers['user-agent'] = (user_agent + ' ' + headers['user-agent'])\n        else:\n            headers['user-agent'] = user_agent\n\n    return headers", "docstring": "Adds a user-agent to the headers.\n\nArgs:\nheaders: dict, request headers to add / modify user\nagent within.\nuser_agent: str, the user agent to add.\n\nReturns:\ndict, the original headers passed in, but modified if the\nuser agent is not None.", "source": "juraj-google-style"}
{"code": "def get_airport_weather(self, iata, page=1, limit=100):\n    url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)\n    weather = self._fr24.get_airport_weather(url)\n    mi = weather['sky']['visibility']['mi']\n    if ((mi is not None) and (mi != 'None')):\n        mi = float(mi)\n        km = (mi * 1.6094)\n        weather['sky']['visibility']['km'] = km\n    return weather", "docstring": "Retrieve the weather at an airport\n\nGiven the IATA code of an airport, this method returns the weather information.\n\nArgs:\niata (str): The IATA code for an airport, e.g. HYD\npage (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data\nlimit (int): Optional limit on number of records returned\n\nReturns:\nA list of dicts with the data; one dict for each row of data from flightradar24\n\nExample::\n\nfrom pyflightdata import FlightData\nf=FlightData()\n#optional login\nf.login(myemail,mypassword)\nf.get_airport_weather('HYD')\nf.get_airport_weather('HYD',page=1,limit=10)", "source": "codesearchnet"}
{"code": "def mkdirs(self, path):\n    raise NotImplementedError", "docstring": "Recursively create directories for the provided path.\n\nArgs:\npath: string path of the directory structure that should be created\n\nRaises:\nIOError: if leaf directory already exists.", "source": "github-repos"}
{"code": "def get_conversion_metadata(model_buffer):\n    model_object = flatbuffer_utils.convert_bytearray_to_object(model_buffer)\n    if not model_object or not model_object.metadata:\n        return None\n    for meta in model_object.metadata:\n        if meta.name.decode('utf-8') == CONVERSION_METADATA_FIELD_NAME:\n            metadata_buf = model_object.buffers[meta.buffer].data.tobytes()\n            return conversion_metadata_fb.ConversionMetadataT.InitFromObj(conversion_metadata_fb.ConversionMetadata.GetRootAsConversionMetadata(metadata_buf, 0))\n    return None", "docstring": "Read conversion metadata from a tflite model.\n\nArgs:\nmodel_buffer: A tflite model.\n\nReturns:\nThe conversion metadata or None if it is not populated.", "source": "github-repos"}
{"code": "def make_query(self, ns):\n    if issubclass(self.model_class, db.Model):\n        query = db.Query(self.model_class, namespace=ns)\n        for f in self.filters:\n            query.filter(('%s %s' % (f[0], f[1])), f[2])\n    else:\n        query = self.model_class.query(namespace=ns)\n        for f in self.filters:\n            query = query.filter(ndb.FilterNode(*f))\n    return query", "docstring": "Make a query of entities within this range.\n\nQuery options are not supported. They should be specified when the query\nis run.\n\nArgs:\nns: namespace of this query.\n\nReturns:\na db.Query or ndb.Query, depends on the model class's type.", "source": "codesearchnet"}
{"code": "def _namespace_to_ord(namespace):\n  \n  n = 0\n  for i, c in enumerate(namespace):\n    n += (_LEX_DISTANCE[MAX_NAMESPACE_LENGTH - i- 1] *\n          NAMESPACE_CHARACTERS.index(c)\n          + 1)\n  return n", "docstring": "Converts a namespace string into an int representing its lexographic order.\n\n>>> _namespace_to_ord('')\n''\n>>> _namespace_to_ord('_')\n1\n>>> _namespace_to_ord('__')\n2\n\nArgs:\nnamespace: A namespace string.\n\nReturns:\nAn int representing the lexographical order of the given namespace string.", "source": "juraj-google-style"}
{"code": "def search(self, search_phrase, limit=None):\n        \n\n        \n        \n        \n        search_phrase = search_phrase.replace('-', '_')\n        terms = SearchTermParser().parse(search_phrase)\n        from_year = terms.pop('from', None)\n        to_year = terms.pop('to', None)\n\n        query, query_params = self._make_query_from_terms(terms)\n\n        self._parsed_query = (query, query_params)\n\n        connection = self.backend.library.database.connection\n\n        connection.connection.create_function('rank', 1, _make_rank_func((1., .1, 0, 0)))\n\n        \n        \n        \n        \n        \n\n        results = connection.execute(query, query_params).fetchall()\n\n        for result in results:\n            vid, dataset_vid, score, db_from_year, db_to_year = result\n            if from_year and from_year < db_from_year:\n                continue\n            if to_year and to_year > db_to_year:\n                continue\n            yield PartitionSearchResult(\n                vid=vid, dataset_vid=dataset_vid, score=score)", "docstring": "Finds partitions by search phrase.\n\nArgs:\nsearch_phrase (str or unicode):\nlimit (int, optional): how many results to generate. None means without limit.\n\nGenerates:\nPartitionSearchResult instances.", "source": "juraj-google-style"}
{"code": "def sg_lookup(tensor, opt):\n    r\n    assert opt.emb is not None, 'emb is mandatory.'\n    return tf.nn.embedding_lookup(opt.emb, tensor, name=opt.name)", "docstring": "r\"\"\"Looks up the `tensor`, which is the embedding matrix.\n\nArgs:\ntensor: A tensor ( automatically given by chain )\nopt:\nemb: A 2-D `Tensor`. An embedding matrix.\nname: If provided, replace current tensor's name.\n\nReturns:\nA `Tensor`.", "source": "juraj-google-style"}
{"code": "def __driver_completer(self, toks, text, state):\n    if (state != 0):\n        return self.__completion_candidates[state]\n    if ((not toks) or ((len(toks) == 1) and (text == toks[0]))):\n        try:\n            self.__completion_candidates = self.__complete_cmds(text)\n        except:\n            self.stderr.write('\\n')\n            self.stderr.write(traceback.format_exc())\n            self.__completion_candidates = []\n        return self.__completion_candidates[state]\n    cmd = toks[0]\n    args = (toks[1:] if (len(toks) > 1) else None)\n    if (text and args):\n        del args[(- 1)]\n    if (cmd in self._completer_map.keys()):\n        completer_name = self._completer_map[cmd]\n        completer_method = getattr(self, completer_name)\n        try:\n            self.__completion_candidates = completer_method(cmd, args, text)\n        except:\n            self.stderr.write('\\n')\n            self.stderr.write(traceback.format_exc())\n            self.__completion_candidates = []\n    else:\n        self.__completion_candidates = []\n    return self.__completion_candidates[state]", "docstring": "Driver level completer.\n\nArguments:\ntoks: A list of tokens, tokenized from the original input line.\ntext: A string, the text to be replaced if a completion candidate is\nchosen.\nstate: An integer, the index of the candidate out of the list of\ncandidates.\n\nReturns:\nA string, the candidate.", "source": "codesearchnet"}
{"code": "def get_version(package_name):\n    \n    module = 'prosper.' + package_name + '._version'\n    package = importlib.import_module(module)\n\n    version = package.__version__\n\n    return version", "docstring": "find __version__ for making package\n\nArgs:\npackage_name (str): path to _version.py folder (abspath > relpath)\n\nReturns:\nstr: __version__ value", "source": "juraj-google-style"}
{"code": "def delaunay_reduce(lattice, eps=1e-05):\n    _set_no_error()\n    delaunay_lattice = np.array(np.transpose(lattice), dtype='double', order='C')\n    result = spg.delaunay_reduce(delaunay_lattice, float(eps))\n    _set_error_message()\n    if (result == 0):\n        return None\n    else:\n        return np.array(np.transpose(delaunay_lattice), dtype='double', order='C')", "docstring": "Run Delaunay reduction\n\nArgs:\nlattice: Lattice parameters in the form of\n[[a_x, a_y, a_z],\n[b_x, b_y, b_z],\n[c_x, c_y, c_z]]\nsymprec:\nfloat: Tolerance to check if volume is close to zero or not and\nif two basis vectors are orthogonal by the value of dot\nproduct being close to zero or not.\n\nReturns:\nif the Delaunay reduction succeeded:\nReduced lattice parameters are given as a numpy 'double' array:\n[[a_x, a_y, a_z],\n[b_x, b_y, b_z],\n[c_x, c_y, c_z]]\notherwise None is returned.", "source": "codesearchnet"}
{"code": "def clear_worker_output(self):\n        \n        self.data_store.clear_worker_output()\n\n        \n        self.plugin_manager.load_all_plugins()\n\n        \n        self._store_information()", "docstring": "Drops all of the worker output collections\nArgs:\nNone\nReturns:\nNothing", "source": "juraj-google-style"}
{"code": "def port(alias_name, default=None, allow_none=False):\n    warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2)\n    try:\n        return int(_split_docker_link(alias_name)[2])\n    except KeyError as err:\n        if (default or allow_none):\n            return default\n        else:\n            raise err", "docstring": "Get the port from the docker link alias or return the default.\n\nArgs:\nalias_name: The docker link alias\ndefault: The default value if the link isn't available\nallow_none: If the return value can be `None` (i.e. optional)\n\nExamples:\nAssuming a Docker link was created with ``docker --link postgres:db``\nand the resulting environment variable is ``DB_PORT=tcp://172.17.0.82:5432``.\n\n>>> envitro.docker.port('DB')\n5432", "source": "codesearchnet"}
{"code": "def decode_list_oov(self, ids, source_oov_id_to_token):\n    \n    seq = reversed(ids) if self._reverse else ids\n    tokens = []\n    for cur_id in seq:\n      if cur_id in self._id_to_token:\n        tokens.append(self._id_to_token[cur_id])\n      else:\n        tokens.append(source_oov_id_to_token[cur_id - self.vocab_size])\n    return tokens", "docstring": "decode ids back to tokens, considering OOVs temporary IDs.\n\nArgs:\nids: vocab ids. Could possibly include source temporary OOV ID starting\nfrom vocab_size.\nsource_oov_id_to_token: a list of source OOV tokens, with the order the\nsame as they appear in the source.\n\nReturns:\ndecoded tokens, possibly including source OOV tokens.", "source": "juraj-google-style"}
{"code": "def add_loss(self, loss, name=None, regularization=False, add_summaries=True):\n    _ = name\n    if regularization:\n        self._g.add_to_collection(GraphKeys.REGULARIZATION_LOSSES, loss)\n    tf.add_to_collection(GraphKeys.LOSSES, loss)\n    if add_summaries:\n        self.add_scalar_summary(loss, 'loss')\n        self.add_average_summary(loss, 'loss_average')", "docstring": "Append a loss to the total loss for the network.\n\nArgs:\nloss: append this loss operation\nname: The name for this loss, defaults to loss.op.name\nregularization: Set to True if this is a regularization loss.\nadd_summaries: Set to True if you want to see scalar and average summary.", "source": "codesearchnet"}
{"code": "def import_subview(self, idx, subview):\n        \n\n        subview.corpus = self\n        self._subviews[idx] = subview", "docstring": "Add the given subview to the corpus.\n\nArgs:\nidx (str): An idx that is unique in the corpus for identifying the subview.\nIf already a subview exists with the given id it will be overridden.\nsubview (Subview): The subview to add.", "source": "juraj-google-style"}
{"code": "def _set_verbosity_from(posarg):\n\n    def decorator(f):\n\n        def wrapper(*args, **kwargs):\n            options = kwargs.get('options', args[posarg])\n            with config.verbosity_from(options):\n                return f(*args, **kwargs)\n        return wrapper\n    return decorator", "docstring": "Decorator to set the verbosity for a function that takes an options arg.\n\nAssumes that the function has an argument named `options` that is a\nconfig.Options object.\n\nArguments:\nposarg: The index of `options` in the positional arguments.\n\nReturns:\nThe decorator.", "source": "github-repos"}
{"code": "def default_search_space():\n    matrix = [[pg.oneof([0, 1], hints=EDGE_HINTS) if y > x else 0 for y in range(NUM_VERTICES)] for x in range(NUM_VERTICES)]\n    return model_spec(pg.manyof(NUM_VERTICES - 2, ALLOWED_OPS, choices_distinct=False, hints=OP_HINTS), matrix)", "docstring": "The default search space in NAS-Bench.\n\nThis equals to the default search space of NAS-Bench, which mutate candidate\nops and their connections.\n\nReturns:\nA hyper model object that repesents a search space.", "source": "github-repos"}
{"code": "def _is_definition_section(source):\n    try:\n        definitions = textwrap.dedent(source).split('\\n', 1)[1].splitlines()\n        return all((re.match('\\\\s\\\\s+((?!\\\\s\\\\s).+)\\\\s\\\\s+.+', s) for s in definitions))\n    except IndexError:\n        return False", "docstring": "Determine if the source is a definition section.\n\nArgs:\nsource: The usage string source that may be a section.\n\nReturns:\nTrue if the source describes a definition section; otherwise, False.", "source": "codesearchnet"}
{"code": "def pkg_config(pkg_libraries):\n    \n    libraries=[]\n    library_dirs=[]\n    include_dirs=[]\n\n    \n    for pkg in pkg_libraries:\n        if os.system('pkg-config --exists %s 2>/dev/null' % pkg) == 0:\n            pass\n        else:\n            print(\"Could not find library {0}\".format(pkg))\n            sys.exit(1)\n\n    \n    if len(pkg_libraries)>0 :\n        \n        \n        for token in getoutput(\"PKG_CONFIG_ALLOW_SYSTEM_CFLAGS=1 pkg-config --libs --cflags %s\" % ' '.join(pkg_libraries)).split():\n            if token.startswith(\"-l\"):\n                libraries.append(token[2:])\n            elif token.startswith(\"-L\"):\n                library_dirs.append(token[2:])\n            elif token.startswith(\"-I\"):\n                include_dirs.append(token[2:])\n\n    return libraries, library_dirs, include_dirs", "docstring": "Use pkg-config to query for the location of libraries, library directories,\nand header directories\n\nArguments:\npkg_libries(list): A list of packages as strings\n\nReturns:\nlibraries(list), library_dirs(list), include_dirs(list)", "source": "juraj-google-style"}
{"code": "def add_columns(tree_view, df_py_dtypes, list_store):\n    tree_view.set_model(list_store)\n    for (column_i, (i, dtype_i)) in df_py_dtypes[['i', 'dtype']].iterrows():\n        tree_column_i = gtk.TreeViewColumn(column_i)\n        tree_column_i.set_name(column_i)\n        if (dtype_i in (int, long)):\n            property_name = 'text'\n            cell_renderer_i = gtk.CellRendererSpin()\n        elif (dtype_i == float):\n            property_name = 'text'\n            cell_renderer_i = gtk.CellRendererSpin()\n        elif (dtype_i in (bool,)):\n            property_name = 'active'\n            cell_renderer_i = gtk.CellRendererToggle()\n        elif (dtype_i in (str,)):\n            property_name = 'text'\n            cell_renderer_i = gtk.CellRendererText()\n        else:\n            raise ValueError(('No cell renderer for dtype: %s' % dtype_i))\n        cell_renderer_i.set_data('column_i', i)\n        cell_renderer_i.set_data('column', tree_column_i)\n        tree_column_i.pack_start(cell_renderer_i, True)\n        tree_column_i.add_attribute(cell_renderer_i, property_name, i)\n        tree_view.append_column(tree_column_i)", "docstring": "Add columns to a `gtk.TreeView` for the types listed in `df_py_dtypes`.\n\nArgs:\n\ntree_view (gtk.TreeView) : Tree view to append columns to.\ndf_py_dtypes (pandas.DataFrame) : Data frame containing type\ninformation for one or more columns in `list_store`.\nlist_store (gtk.ListStore) : Model data.\n\nReturns:\n\nNone", "source": "codesearchnet"}
{"code": "def _bfs_path_states(self, graph, start):\n        \n        pathstates = {}\n        \n        queue = []\n        visited = []\n        \n        queue.append([['', start]])\n        while queue:\n            \n            path = queue.pop(0)\n            \n            node = path[-1][1]\n            \n            if node.stateid not in pathstates and node.stateid != len(list(graph.states)):\n                pathstates[node.stateid] = ''.join(\n                    [mnode[0] for mnode in path])\n            visited.append(node.stateid)\n            \n            \n            for arc in node.arcs:\n                char = graph.isyms.find(arc.ilabel)\n                next_state = graph[arc.nextstate]\n                if next_state.stateid not in visited:\n                    new_path = list(path)\n                    new_path.append([char, next_state])\n                    queue.append(new_path)\n        return pathstates", "docstring": "Find state access strings (DFA shortest paths for every state)\nusing BFS\nArgs:\ngraph (DFA): The DFA states\nstart (int): The DFA initial state\nReturn:\nlist: A list of all the DFA shortest paths for every state", "source": "juraj-google-style"}
{"code": "def dataframe(self, force_refresh=False):\n        \n        if force_refresh:\n            self.clear_cache()\n        if self._dataframe is None:\n            self._dataframe = self._fetch_dataframe()\n        return self._dataframe", "docstring": "A pandas dataframe with lots of interesting results about this object.\nCreated by calling SageMaker List and Describe APIs and converting them into\na convenient tabular summary.\n\nArgs:\nforce_refresh (bool): Set to True to fetch the latest data from SageMaker API.", "source": "juraj-google-style"}
{"code": "def wind_direction(self, value=999.0):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `wind_direction`'.format(value))\n        if (value < 0.0):\n            raise ValueError('value need to be greater or equal 0.0 for field `wind_direction`')\n        if (value > 360.0):\n            raise ValueError('value need to be smaller 360.0 for field `wind_direction`')\n    self._wind_direction = value", "docstring": "Corresponds to IDD Field `wind_direction`\n\nArgs:\nvalue (float): value for IDD Field `wind_direction`\nUnit: degrees\nvalue >= 0.0\nvalue <= 360.0\nMissing value: 999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def size(self, name=None):\n    with ops.name_scope(name, '%s_Size' % self.name, [self.resource_handle]):\n        with ops.colocate_with(self.resource_handle):\n            return gen_lookup_ops.lookup_table_size_v2(self.resource_handle)", "docstring": "Compute the number of elements in this table.\n\nArgs:\nname: A name for the operation (optional).\n\nReturns:\nA scalar tensor containing the number of elements in this table.", "source": "github-repos"}
{"code": "def _add_dispatcher(self, path_regex, dispatch_function):\n    self._dispatchers.append((re.compile(path_regex), dispatch_function))", "docstring": "Add a request path and dispatch handler.\n\nArgs:\npath_regex: A string regex, the path to match against incoming requests.\ndispatch_function: The function to call for these requests.  The function\nshould take (request, start_response) as arguments and\nreturn the contents of the response body.", "source": "codesearchnet"}
{"code": "def add_group_maintainer(self, name, user):\n        \n        self.service.add_group_maintainer(\n            name, user, self.url_prefix, self.auth, self.session,\n            self.session_send_opts)", "docstring": "Add the given user to the named group.\n\nBoth group and user must already exist for this to succeed.\n\nArgs:\nname (string): Name of group.\nuser (string): User to add to group.\nversion (optional[string]): Version of the Boss API to use.  Defaults to the latest supported version.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def check(self, check_req):\n    self.start()\n    res = self._check_aggregator.check(check_req)\n    if res:\n        _logger.debug(u'using cached check response for %s: %s', check_request, res)\n        return res\n    try:\n        transport = self._create_transport()\n        resp = transport.services.Check(check_req)\n        self._check_aggregator.add_response(check_req, resp)\n        return resp\n    except exceptions.Error:\n        _logger.error(u'direct send of check request failed %s', check_request, exc_info=True)\n        return None", "docstring": "Process a check_request.\n\nThe req is first passed to the check_aggregator.  If there is a valid\ncached response, that is returned, otherwise a response is obtained from\nthe transport.\n\nArgs:\ncheck_req (``ServicecontrolServicesCheckRequest``): to be sent to\nthe service control service\n\nReturns:\n``CheckResponse``: either the cached response if one is applicable\nor a response from making a transport request, or None if\nif the request to the transport fails", "source": "codesearchnet"}
{"code": "def with_context(cls, setup_phases, teardown_phases):\n    setup = flatten_phases_and_groups(setup_phases)\n    teardown = flatten_phases_and_groups(teardown_phases)\n\n    def _context_wrapper(*phases):\n        return cls(setup=setup, main=flatten_phases_and_groups(phases), teardown=teardown)\n    return _context_wrapper", "docstring": "Create PhaseGroup creator function with setup and teardown phases.\n\nArgs:\nsetup_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/\ncallables/iterables, phases to run during the setup for the PhaseGroup\nreturned from the created function.\nteardown_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/\ncallables/iterables, phases to run during the teardown for the\nPhaseGroup returned from the created function.\n\nReturns:\nFunction that takes *phases and returns a PhaseGroup with the predefined\nsetup and teardown phases, with *phases as the main phases.", "source": "codesearchnet"}
{"code": "def period(self, value: float):\n        \n        if value < 0:\n            raise ValueError(\"Period must be greater or equal than zero.\")\n        self._period = timedelta(seconds=value)", "docstring": "Set the period.\n\nArgs:\nvalue (float): seconds", "source": "juraj-google-style"}
{"code": "def ReverseCloseExpression(clean_lines, linenum, pos):\n    line = clean_lines.elided[linenum]\n    if (line[pos] not in ')}]>'):\n        return (line, 0, (- 1))\n    (start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])\n    if (start_pos > (- 1)):\n        return (line, linenum, start_pos)\n    while (stack and (linenum > 0)):\n        linenum -= 1\n        line = clean_lines.elided[linenum]\n        (start_pos, stack) = FindStartOfExpressionInLine(line, (len(line) - 1), stack)\n        if (start_pos > (- 1)):\n            return (line, linenum, start_pos)\n    return (line, 0, (- 1))", "docstring": "If input points to ) or } or ] or >, finds the position that opens it.\n\nIf lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the\nlinenum/pos that correspond to the opening of the expression.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\npos: A position on the line.\n\nReturns:\nA tuple (line, linenum, pos) pointer *at* the opening brace, or\n(line, 0, -1) if we never find the matching opening brace.  Note\nwe ignore strings and comments when matching; and the line we\nreturn is the 'cleansed' line at linenum.", "source": "codesearchnet"}
{"code": "def __init__(self, package, ad):\n    super().__init__(app_name=package, ad=ad)\n    self.package = package\n    self._ad = ad\n    self._adb = ad.adb\n    self._proc = None\n    self._user_id = None", "docstring": "Initializes a SnippetClient.\n\nArgs:\npackage: (str) The package name of the apk where the snippets are\ndefined.\nad: (AndroidDevice) the device object associated with this client.", "source": "github-repos"}
{"code": "def nb_ll_row(params, data_row):\n    p = params[0]\n    r = params[1]\n    n = len(data_row)\n    ll = (np.sum(gammaln((data_row + r))) - np.sum(gammaln((data_row + 1))))\n    ll -= (n * gammaln(r))\n    ll += (np.sum(data_row) * np.log(p))\n    ll += ((n * r) * np.log((1 - p)))\n    return (- ll)", "docstring": "returns the negative LL of a single row.\n\nArgs:\nparams (array) - [p, r]\ndata_row (array) - 1d array of data\n\nReturns:\nLL of row", "source": "codesearchnet"}
{"code": "def decrypt(key, ciphertext):\n    index = 0\n    decrypted = ''\n    for char in ciphertext:\n        if (char in ((string.punctuation + string.whitespace) + string.digits)):\n            decrypted += char\n            continue\n        alphabet = (string.ascii_uppercase if key[index].isupper() else string.ascii_lowercase)\n        decrypted += ''.join(shift.decrypt(int(alphabet.index(key[index])), char))\n        index = ((index + 1) % len(key))\n    return decrypted", "docstring": "Decrypt Vigenere encrypted ``ciphertext`` using ``key``.\n\nExample:\n>>> decrypt(\"KEY\", \"RIJVS\")\nHELLO\n\nArgs:\nkey (iterable): The key to use\nciphertext (str): The text to decrypt\n\nReturns:\nDecrypted ciphertext", "source": "codesearchnet"}
{"code": "def add_send_last_message(self, connection, send_last_message):\n    self._send_last_message[connection] = send_last_message\n    LOGGER.debug('Added send_last_message function for connection %s', connection)", "docstring": "Adds a send_last_message function to the Dispatcher's\ndictionary of functions indexed by connection.\n\nArgs:\nconnection (str): A locally unique identifier\nprovided by the receiver of messages.\nsend_last_message (fn): The method that should be called\nby the dispatcher to respond to messages which\narrive via connection, when the connection should be closed\nafter the message has been sent.", "source": "codesearchnet"}
{"code": "def __setitem__(self, key, item):\n        \n        if isinstance(key, str):\n            column = item\n            self.columns.add(key)\n            if len(column) > len(self.rows):\n                for i, value in enumerate(column):\n                    if i < len(self.rows):\n                        self.rows[i][key] = value\n                    else:\n                        self.rows.append({key: value})\n            else:\n                for i, row in enumerate(self.rows):\n                    if i < len(column):\n                        self.rows[i][key] = column[i]\n                    else:\n                        self.rows[i][key] = None\n        elif isinstance(key, slice):\n            rows = item\n            for row in rows:\n                if not isinstance(row, dict):\n                    raise ValueError('Row must be a dict.')\n                self.columns.update(row.keys())\n            self.rows[key] = rows\n        elif isinstance(key, int):\n            row = item\n            if not isinstance(row, dict):\n                raise ValueError('Row must be a dict.')\n            self.columns.update(row.keys())\n            self.rows[key] = row\n        else:\n            raise TypeError('Invalid argument type.')", "docstring": "Set a column or row for a dataset.\n\nArgs:\nkey (str or int): String referencing a column or integer referencing a row\nitem (list or dict): Column or rows to set in the dataset.", "source": "juraj-google-style"}
{"code": "def create_in_hdx(self):\n    self.check_required_fields()\n    id = self.data.get('id')\n    if (id and self._load_from_hdx('resource', id)):\n        logger.warning(('%s exists. Updating %s' % ('resource', id)))\n        if (self.file_to_upload and ('url' in self.data)):\n            del self.data['url']\n        self._merge_hdx_update('resource', 'id', self.file_to_upload)\n    else:\n        self._save_to_hdx('create', 'name', self.file_to_upload)", "docstring": "Check if resource exists in HDX and if so, update it, otherwise create it\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def molecule(lines):\n    count_line = lines[3]\n    num_atoms = int(count_line[0:3])\n    num_bonds = int(count_line[3:6])\n    compound = Compound()\n    compound.graph._node = atoms(lines[4:(num_atoms + 4)])\n    compound.graph._adj = bonds(lines[(num_atoms + 4):((num_atoms + num_bonds) + 4)], compound.graph._node.keys())\n    props = properties(lines[((num_atoms + num_bonds) + 4):])\n    add_properties(props, compound)\n    return compound", "docstring": "Parse molfile part into molecule object\n\nArgs:\nlines (list): lines of molfile part\n\nRaises:\nValueError: Symbol not defined in periodictable.yaml\n(Polymer expression not supported yet)", "source": "codesearchnet"}
{"code": "def most_frequent_terms(self, depth):\n    counts = self.term_counts()\n    top_terms = set(list(counts.keys())[:depth])\n    end_count = list(counts.values())[:depth][(- 1)]\n    bucket = self.term_count_buckets()[end_count]\n    return top_terms.union(set(bucket))", "docstring": "Get the X most frequent terms in the text, and then probe down to get\nany other terms that have the same count as the last term.\n\nArgs:\ndepth (int): The number of terms.\n\nReturns:\nset: The set of frequent terms.", "source": "codesearchnet"}
{"code": "def _AddNextStateToQueue(penalty, previous_node, newline, count, p_queue):\n    must_split = previous_node.state.MustSplit()\n    if newline and (not previous_node.state.CanSplit(must_split)):\n        return count\n    if not newline and must_split:\n        return count\n    node = _StateNode(previous_node.state, newline, previous_node)\n    penalty += node.state.AddTokenToState(newline=newline, dry_run=True, must_split=must_split)\n    heapq.heappush(p_queue, _QueueItem(_OrderedPenalty(penalty, count), node))\n    return count + 1", "docstring": "Add the following state to the analysis queue.\n\nAssume the current state is 'previous_node' and has been reached with a\npenalty of 'penalty'. Insert a line break if 'newline' is True.\n\nArguments:\npenalty: (int) The penalty associated with the path up to this point.\nprevious_node: (_StateNode) The last _StateNode inserted into the priority\nqueue.\nnewline: (bool) Add a newline if True.\ncount: (int) The number of elements in the queue.\np_queue: (heapq) The priority queue representing the solution space.\n\nReturns:\nThe updated number of elements in the queue.", "source": "github-repos"}
{"code": "def _save_model(self):\n    if not file_utils.exists(self.backup_dir):\n        file_utils.makedirs(self.backup_dir)\n    if self.double_checkpoint and file_utils.exists(self._weights_path):\n        file_utils.copy(self._weights_path, self._prev_weights_path)\n    if self.double_checkpoint and file_utils.exists(self._training_metadata_path):\n        file_utils.copy(self._training_metadata_path, self._prev_training_metadata_path)\n    self.model.save_weights(filepath=self._weights_path, overwrite=True)\n    with file_utils.File(self._training_metadata_path, 'w') as f:\n        training_metadata = {'epoch': self._current_epoch, 'batch': self._last_batch_seen}\n        f.write(json.dumps(training_metadata))", "docstring": "Saves the model.\n\nArgs:\nepoch: the epoch this iteration is in.\nbatch: the batch this iteration is in. `None` if the `save_freq`\nis set to `\"epoch\"`.\nlogs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.", "source": "github-repos"}
{"code": "def ensure_tf_install():\n    try:\n        import tensorflow as tf\n    except ImportError:\n        print('\\n\\nFailed to import TensorFlow. Please note that TensorFlow is not installed by default when you install TensorFlow Datasets. This is so that users can decide whether to install the GPU-enabled TensorFlow package. To use TensorFlow Datasets, please install the most recent version of TensorFlow, by following instructions at https:\n        raise\n    tf_version = distutils.version.LooseVersion(tf.__version__)\n    v_1_12 = distutils.version.LooseVersion('1.12.0')\n    if (tf_version < v_1_12):\n        raise ImportError('This version of TensorFlow Datasets requires TensorFlow version >= {required}; Detected an installation of version {present}. Please upgrade TensorFlow to proceed.'.format(required='1.12.0', present=tf.__version__))\n    _patch_tf(tf)", "docstring": "Attempt to import tensorflow, and ensure its version is sufficient.\n\nRaises:\nImportError: if either tensorflow is not importable or its version is\ninadequate.", "source": "codesearchnet"}
{"code": "def __init__(self,\n               name=\"\",\n               default=None,\n               description=\"\",\n               friendly_name=\"\",\n               hidden=False):\n    \n    self.name = name\n    self.default = default\n    self.description = description\n    self.hidden = hidden\n    if not friendly_name:\n      friendly_name = name.replace(\"_\", \" \").capitalize()\n\n    self.friendly_name = friendly_name", "docstring": "Build a TypeInfo type descriptor.\n\nArgs:\nname: The name of the parameter that this Type info corresponds to.\ndefault: The default value that should be specified if the parameter was\nnot set.\ndescription: A string describing this flow argument.\nfriendly_name: A human readable name which may be provided.\nhidden: Should the argument be hidden from the UI.", "source": "juraj-google-style"}
{"code": "def CreateDataTypeMap(self, definition_name):\n    \n    data_type_definition = self._definitions_registry.GetDefinitionByName(\n        definition_name)\n    if not data_type_definition:\n      return None\n\n    return DataTypeMapFactory.CreateDataTypeMapByType(data_type_definition)", "docstring": "Creates a specific data type map by name.\n\nArgs:\ndefinition_name (str): name of the data type definition.\n\nReturns:\nDataTypeMap: data type map or None if the date type definition\nis not available.", "source": "juraj-google-style"}
{"code": "def memory_write64(self, addr, data, zone=None):\n        \n        words = []\n        bitmask = 0xFFFFFFFF\n        for long_word in data:\n            words.append(long_word & bitmask)          \n            words.append((long_word >> 32) & bitmask)  \n        return self.memory_write32(addr, words, zone=zone)", "docstring": "Writes long words to memory of a target system.\n\nNote:\nThis is little-endian.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): start address to write to\ndata (list): list of long words to write\nzone (str): optional memory zone to access\n\nReturns:\nNumber of long words written to target.\n\nRaises:\nJLinkException: on memory access error.", "source": "juraj-google-style"}
{"code": "def gripper_factory(name):\n    \n    if name == \"TwoFingerGripper\":\n        return TwoFingerGripper()\n    if name == \"LeftTwoFingerGripper\":\n        return LeftTwoFingerGripper()\n    if name == \"PR2Gripper\":\n        return PR2Gripper()\n    if name == \"RobotiqGripper\":\n        return RobotiqGripper()\n    if name == \"PushingGripper\":\n        return PushingGripper()\n    if name == \"RobotiqThreeFingerGripper\":\n        return RobotiqThreeFingerGripper()\n    raise ValueError(\"Unkown gripper name {}\".format(name))", "docstring": "Genreator for grippers\n\nCreates a Gripper instance with the provided name.\n\nArgs:\nname: the name of the gripper class\n\nReturns:\ngripper: Gripper instance\n\nRaises:\nXMLError: [description]", "source": "juraj-google-style"}
{"code": "def plot_kdes(self, mnemonic, alias=None, uwi_regex=None, return_fig=False):\n    wells = self.find_wells_with_curve(mnemonic, alias=alias)\n    (fig, axs) = plt.subplots(len(self), 1, figsize=(10, (1.5 * len(self))))\n    curves = [w.get_curve(mnemonic, alias=alias) for w in wells]\n    all_data = np.hstack(curves)\n    all_data = all_data[(~ np.isnan(all_data))]\n    amax = np.percentile(all_data, 99)\n    amin = np.percentile(all_data, 1)\n    for (i, w) in enumerate(self):\n        c = w.get_curve(mnemonic, alias=alias)\n        if (uwi_regex is not None):\n            label = re.sub(uwi_regex, '\\\\1', w.uwi)\n        else:\n            label = w.uwi\n        if (c is not None):\n            axs[i] = c.plot_kde(ax=axs[i], amax=amax, amin=amin, label=((label + '-') + str(c.mnemonic)))\n        else:\n            continue\n    if return_fig:\n        return fig\n    else:\n        return", "docstring": "Plot KDEs for all curves with the given name.\n\nArgs:\nmenmonic (str): the name of the curve to look for.\nalias (dict): a welly alias dictionary.\nuwi_regex (str): a regex pattern. Only this part of the UWI will be displayed\non the plot of KDEs.\nreturn_fig (bool): whether to return the matplotlib figure object.\n\nReturns:\nNone or figure.", "source": "codesearchnet"}
{"code": "def GetAnalyzerInstances(cls, analyzer_names):\n    analyzer_instances = []\n    for (analyzer_name, analyzer_class) in iter(cls.GetAnalyzers()):\n        if (analyzer_name in analyzer_names):\n            analyzer_instances.append(analyzer_class())\n    return analyzer_instances", "docstring": "Retrieves instances for all the specified analyzers.\n\nArgs:\nanalyzer_names (list[str]): names of the analyzers to retrieve.\n\nReturns:\nlist[BaseAnalyzer]: analyzer instances.", "source": "codesearchnet"}
{"code": "def __init__(self,\n                 solution_size,\n                 population_size=20):\n        \n        super(ExhaustiveBinary, self).__init__(solution_size, population_size)\n        self._next_int = 0", "docstring": "Create an object that optimizes a given fitness function.\n\nArgs:\nsolution_size: The number of bits in every solution.\npopulation_size: The number of solutions in every iteration.", "source": "juraj-google-style"}
{"code": "def delete_existing_cname(env, zone_id, dns_name):\n    \n    client = boto3.Session(profile_name=env).client('route53')\n    startrecord = None\n    newrecord_name = dns_name\n    startrecord = find_existing_record(env, zone_id, newrecord_name, check_key='Type', check_value='CNAME')\n    if startrecord:\n        LOG.info(\"Deleting old record: %s\", newrecord_name)\n        _response = client.change_resource_record_sets(\n            HostedZoneId=zone_id, ChangeBatch={'Changes': [{\n                'Action': 'DELETE',\n                'ResourceRecordSet': startrecord\n            }]})\n        LOG.debug('Response from deleting %s: %s', dns_name, _response)", "docstring": "Delete an existing CNAME record.\n\nThis is used when updating to multi-region for deleting old records. The\nrecord can not just be upserted since it changes types.\n\nArgs:\nenv (str): Deployment environment.\nzone_id (str): Route53 zone id.\ndns_name (str): FQDN of application's dns entry to add/update.", "source": "juraj-google-style"}
{"code": "def to_qsw(orbit):\n    (pos, vel) = _split(orbit)\n    q = (pos / norm(pos))\n    w = (np.cross(pos, vel) / (norm(pos) * norm(vel)))\n    s = np.cross(w, q)\n    return np.array([q, s, w])", "docstring": "In the QSW Local Orbital Reference Frame, x is oriented along the position vector,\nz along the angular momentum, and y complete the frame.\n\nThe frame is sometimes also called RSW (where R stands for radial) or LVLH (Local\nVertical Local Horizontal).\n\nArgs:\norbit (list): Array of length 6\nReturn:\nnumpy.ndarray: matrix to convert from inertial frame to QSW\n\n>>> delta_qsw = [1, 0, 0]\n>>> p = [-6142438.668, 3492467.560, -25767.25680]\n>>> v = [505.8479685, 942.7809215, 7435.922231]\n>>> pv = p + v\n>>> mat = to_qsw(pv).T\n>>> delta_inert = mat @ delta_qsw\n>>> all(delta_inert == p / norm(p))\nTrue", "source": "codesearchnet"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        if token_ids_1 is not None:\n            raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')\n        return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]\n    if token_ids_1 is not None:\n        return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]\n    return [1] + [0] * len(token_ids_0) + [1]", "docstring": "Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of ids.\ntoken_ids_1 (`List[int]`, *optional*, defaults to `None`):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nSet to True if the token list is already formatted with special tokens for the model\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def TSKVolumeGetBytesPerSector(tsk_volume):\n    if (hasattr(tsk_volume, 'info') and (tsk_volume.info is not None)):\n        block_size = getattr(tsk_volume.info, 'block_size', 512)\n    else:\n        block_size = 512\n    return block_size", "docstring": "Retrieves the number of bytes per sector from a TSK volume object.\n\nArgs:\ntsk_volume (pytsk3.Volume_Info): TSK volume information.\n\nReturns:\nint: number of bytes per sector or 512 by default.", "source": "codesearchnet"}
{"code": "def mock(self, slot, rpc_id, value):\n        \n\n        address = slot.address\n\n        if address not in self.mock_rpcs:\n            self.mock_rpcs[address] = {}\n\n        self.mock_rpcs[address][rpc_id] = value", "docstring": "Store a mock return value for an RPC\n\nArgs:\nslot (SlotIdentifier): The slot we are mocking\nrpc_id (int): The rpc we are mocking\nvalue (int): The value that should be returned\nwhen the RPC is called.", "source": "juraj-google-style"}
{"code": "class PerKey(PTransform):\n\n    def __init__(self, num_quantiles, key=None, reverse=False, weighted=False, input_batched=False):\n        self._num_quantiles = num_quantiles\n        self._key = key\n        self._reverse = reverse\n        self._weighted = weighted\n        self._input_batched = input_batched\n\n    def expand(self, pcoll):\n        return pcoll | CombinePerKey(ApproximateQuantilesCombineFn.create(num_quantiles=self._num_quantiles, key=self._key, reverse=self._reverse, weighted=self._weighted, input_batched=self._input_batched))\n\n    def display_data(self):\n        return ApproximateQuantiles._display_data(num_quantiles=self._num_quantiles, key=self._key, reverse=self._reverse, weighted=self._weighted, input_batched=self._input_batched)", "docstring": "PTransform takes PCollection of KV and returns a list based on each key\nwhose single value is list of approximate N-tiles of the input element of\nthe key.\n\nArgs:\nnum_quantiles: number of elements in the resulting quantiles values list.\nkey: (optional) Key is  a mapping of elements to a comparable key, similar\nto the key argument of Python's sorting methods.\nreverse: (optional) whether to order things smallest to largest, rather\nthan largest to smallest.\nweighted: (optional) if set to True, the transform returns weighted\nquantiles. The input PCollection is then expected to contain tuples of\ninput values with the corresponding weight.\ninput_batched: (optional) if set to True, the transform expects each\nelement of input PCollection to be a batch, which is a list of elements\nfor non-weighted case and a tuple of lists of elements and weights for\nweighted. Provides a way to accumulate multiple elements at a time more\nefficiently.", "source": "github-repos"}
{"code": "def CheckNextIncludeOrder(self, header_type):\n    error_message = ('Found %s after %s' % (self._TYPE_NAMES[header_type], self._SECTION_NAMES[self._section]))\n    last_section = self._section\n    if (header_type == _C_SYS_HEADER):\n        if (self._section <= self._C_SECTION):\n            self._section = self._C_SECTION\n        else:\n            self._last_header = ''\n            return error_message\n    elif (header_type == _CPP_SYS_HEADER):\n        if (self._section <= self._CPP_SECTION):\n            self._section = self._CPP_SECTION\n        else:\n            self._last_header = ''\n            return error_message\n    elif (header_type == _LIKELY_MY_HEADER):\n        if (self._section <= self._MY_H_SECTION):\n            self._section = self._MY_H_SECTION\n        else:\n            self._section = self._OTHER_H_SECTION\n    elif (header_type == _POSSIBLE_MY_HEADER):\n        if (self._section <= self._MY_H_SECTION):\n            self._section = self._MY_H_SECTION\n        else:\n            self._section = self._OTHER_H_SECTION\n    else:\n        assert (header_type == _OTHER_HEADER)\n        self._section = self._OTHER_H_SECTION\n    if (last_section != self._section):\n        self._last_header = ''\n    return ''", "docstring": "Returns a non-empty error message if the next header is out of order.\n\nThis function also updates the internal state to be ready to check\nthe next include.\n\nArgs:\nheader_type: One of the _XXX_HEADER constants defined above.\n\nReturns:\nThe empty string if the header is in the right order, or an\nerror message describing what's wrong.", "source": "codesearchnet"}
{"code": "def get_as(access_token, subscription_id, resource_group, as_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Compute/availabilitySets/', as_name,\n                        '?api-version=', COMP_API])\n    return do_get(endpoint, access_token)", "docstring": "Get availability set details.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nas_name (str): Name of the new availability set.\n\nReturns:\nHTTP response. JSON body of the availability set properties.", "source": "juraj-google-style"}
{"code": "def list_matching(self, ref_name: str, filter_: str) -> Iterable[ListEntry]:\n    (canonical, canonical_i) = self._get_pattern((ref_name + filter_))\n    for entry in self.list():\n        if (entry.name == 'INBOX'):\n            if canonical_i.match('INBOX'):\n                (yield entry)\n        elif canonical.match(entry.name):\n            (yield entry)", "docstring": "Return all the entries in the list tree that match the given query.\n\nArgs:\nref_name: Mailbox reference name.\nfilter_: Mailbox name with possible wildcards.", "source": "codesearchnet"}
{"code": "def call(self, inputs_embeds, attention_mask: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=None) -> Union[Tuple, TFBaseModelOutput]:\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    encoder_states = () if output_hidden_states else None\n    all_attentions = () if output_attentions else None\n    hidden_states = inputs_embeds\n    for idx, encoder_layer in enumerate(self.layers):\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions, training=training)\n        hidden_states = layer_outputs[0]\n        if output_attentions:\n            all_attentions = all_attentions + (layer_outputs[1],)\n    if output_hidden_states:\n        encoder_states = encoder_states + (hidden_states,)\n    if not return_dict:\n        return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n    return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Args:\ninputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):\nEmbedded representation of the inputs. Should be float, not int tokens.\nattention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\nMask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n- 1 for tokens that are **not masked**,\n- 0 for tokens that are **masked**.\n\n[What are attention masks?](../glossary#attention-mask)\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.\noutput_hidden_states (`bool`, *optional*):\nWhether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\nfor more detail.\nreturn_dict (`bool`, *optional*):\nWhether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.", "source": "github-repos"}
{"code": "def _batched_mask_to_box(masks: 'torch.Tensor'):\n    if torch.numel(masks) == 0:\n        return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n    shape = masks.shape\n    height, width = shape[-2:]\n    in_height, _ = torch.max(masks, dim=-1)\n    in_height_coords = in_height * torch.arange(height, device=in_height.device)[None, :]\n    bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n    in_height_coords = in_height_coords + height * ~in_height\n    top_edges, _ = torch.min(in_height_coords, dim=-1)\n    in_width, _ = torch.max(masks, dim=-2)\n    in_width_coords = in_width * torch.arange(width, device=in_width.device)[None, :]\n    right_edges, _ = torch.max(in_width_coords, dim=-1)\n    in_width_coords = in_width_coords + width * ~in_width\n    left_edges, _ = torch.min(in_width_coords, dim=-1)\n    empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n    out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n    out = out * (~empty_filter).unsqueeze(-1)\n    out = out.reshape(*shape[:-2], 4)\n    return out", "docstring": "Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which\ncorresponds the following required indices:\n- LEFT: left hand side of the bounding box\n- TOP: top of the bounding box\n- RIGHT: right of the bounding box\n- BOTTOM: bottom of the bounding box\n\nReturn [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape\nis channel_1 x channel_2 x ... x 4.\n\nArgs:\n- masks (`torch.Tensor` of shape `(batch, nb_mask, height, width)`)", "source": "github-repos"}
{"code": "def accountSummary(self, account: str = '') -> List[AccountValue]:\n        \n        if not self.wrapper.acctSummary:\n            \n            self.reqAccountSummary()\n        if account:\n            return [v for v in self.wrapper.acctSummary.values()\n                    if v.account == account]\n        else:\n            return list(self.wrapper.acctSummary.values())", "docstring": "List of account values for the given account,\nor of all accounts if account is left blank.\n\nThis method is blocking on first run, non-blocking after that.\n\nArgs:\naccount: If specified, filter for this account name.", "source": "juraj-google-style"}
{"code": "def stop_standing_subprocess(proc):\n    \n    \n    \n    \n    import psutil\n    pid = proc.pid\n    logging.debug('Stopping standing subprocess %d', pid)\n    process = psutil.Process(pid)\n    failed = []\n    try:\n        children = process.children(recursive=True)\n    except AttributeError:\n        \n        children = process.get_children(recursive=True)\n    for child in children:\n        try:\n            child.kill()\n            child.wait(timeout=10)\n        except psutil.NoSuchProcess:\n            \n            pass\n        except:\n            failed.append(child.pid)\n            logging.exception('Failed to kill standing subprocess %d',\n                              child.pid)\n    try:\n        process.kill()\n        process.wait(timeout=10)\n    except psutil.NoSuchProcess:\n        \n        pass\n    except:\n        failed.append(pid)\n        logging.exception('Failed to kill standing subprocess %d', pid)\n    if failed:\n        raise Error('Failed to kill standing subprocesses: %s' % failed)\n    \n    \n    if proc.stdout:\n        proc.stdout.close()\n    if proc.stderr:\n        proc.stderr.close()\n    proc.wait()\n    logging.debug('Stopped standing subprocess %d', pid)", "docstring": "Stops a subprocess started by start_standing_subprocess.\n\nBefore killing the process, we check if the process is running, if it has\nterminated, Error is raised.\n\nCatches and ignores the PermissionError which only happens on Macs.\n\nArgs:\nproc: Subprocess to terminate.\n\nRaises:\nError: if the subprocess could not be stopped.", "source": "juraj-google-style"}
{"code": "def compute_nats_and_bits_per_dim(data_dim, latent_dim, average_reconstruction, average_prior):\n    with tf.name_scope(None, default_name='compute_nats_per_dim'):\n        data_dim = tf.cast(data_dim, average_reconstruction.dtype)\n        latent_dim = tf.cast(latent_dim, average_prior.dtype)\n        negative_log_likelihood = (data_dim * average_reconstruction)\n        negative_log_prior = (latent_dim * average_prior)\n        negative_elbo = (negative_log_likelihood + negative_log_prior)\n        nats_per_dim = tf.divide(negative_elbo, data_dim, name='nats_per_dim')\n        bits_per_dim = tf.divide(nats_per_dim, tf.log(2.0), name='bits_per_dim')\n        return (nats_per_dim, bits_per_dim)", "docstring": "Computes negative ELBO, which is an upper bound on the negative likelihood.\n\nArgs:\ndata_dim: int-like indicating data dimensionality.\nlatent_dim: int-like indicating latent dimensionality.\naverage_reconstruction: Scalar Tensor indicating the reconstruction cost\naveraged over all data dimensions and any data batches.\naverage_prior: Scalar Tensor indicating the negative log-prior probability\naveraged over all latent dimensions and any data batches.\n\nReturns:\nTuple of scalar Tensors, representing the nats and bits per data dimension\n(e.g., subpixels) respectively.", "source": "codesearchnet"}
{"code": "def CreateCampaignWithBiddingStrategy(client, bidding_strategy_id, budget_id):\n  \n  \n  campaign_service = client.GetService('CampaignService', version='v201809')\n\n  \n  campaign = {\n      'name': 'Interplanetary Cruise \n      'budget': {\n          'budgetId': budget_id\n      },\n      'biddingStrategyConfiguration': {\n          'biddingStrategyId': bidding_strategy_id\n      },\n      'advertisingChannelType': 'SEARCH',\n      'networkSetting': {\n          'targetGoogleSearch': 'true',\n          'targetSearchNetwork': 'true',\n          'targetContentNetwork': 'true'\n      }\n  }\n\n  \n  operation = {\n      'operator': 'ADD',\n      'operand': campaign\n  }\n\n  response = campaign_service.mutate([operation])\n  new_campaign = response['value'][0]\n\n  print ('Campaign with name \"%s\", ID \"%s\" and bidding scheme ID \"%s\" '\n         'was created.' %\n         (new_campaign['name'], new_campaign['id'],\n          new_campaign['biddingStrategyConfiguration']['biddingStrategyId']))\n\n  return new_campaign", "docstring": "Create a Campaign with a Shared Bidding Strategy.\n\nArgs:\nclient: AdWordsClient the client to run the example with.\nbidding_strategy_id: string the bidding strategy ID to use.\nbudget_id: string the shared budget ID to use.\n\nReturns:\ndict An object representing a campaign.", "source": "juraj-google-style"}
{"code": "def register_array_types_from_sources(self, source_files):\n    \n    for fname in source_files:\n      if is_vhdl(fname):\n        self._register_array_types(self.extract_objects(fname))", "docstring": "Add array type definitions from a file list to internal registry\n\nArgs:\nsource_files (list of str): Files to parse for array definitions", "source": "juraj-google-style"}
{"code": "def _generate_legacy_type_checks(types=()):\n    \n    types = dict(types)\n\n    def gen_type_check(pytypes):\n        pytypes = _utils.flatten(pytypes)\n\n        def type_check(checker, instance):\n            if isinstance(instance, bool):\n                if bool not in pytypes:\n                    return False\n            return isinstance(instance, pytypes)\n\n        return type_check\n\n    definitions = {}\n    for typename, pytypes in iteritems(types):\n        definitions[typename] = gen_type_check(pytypes)\n\n    return definitions", "docstring": "Generate newer-style type checks out of JSON-type-name-to-type mappings.\n\nArguments:\n\ntypes (dict):\n\nA mapping of type names to their Python types\n\nReturns:\n\nA dictionary of definitions to pass to `TypeChecker`", "source": "juraj-google-style"}
{"code": "def around(A, decimals=0):\n    if isinstance(A, Poly):\n        B = A.A.copy()\n        for key in A.keys:\n            B[key] = around(B[key], decimals)\n        return Poly(B, A.dim, A.shape, A.dtype)\n    return numpy.around(A, decimals)", "docstring": "Evenly round to the given number of decimals.\n\nArgs:\nA (Poly, numpy.ndarray):\nInput data.\ndecimals (int):\nNumber of decimal places to round to (default: 0).  If decimals is\nnegative, it specifies the number of positions to the left of the\ndecimal point.\n\nReturns:\n(Poly, numpy.ndarray):\nSame type as A.\n\nExamples:\n>>> P = chaospy.prange(3)*2**-numpy.arange(0, 6, 2, float)\n>>> print(P)\n[1.0, 0.25q0, 0.0625q0^2]\n>>> print(chaospy.around(P))\n[1.0, 0.0, 0.0]\n>>> print(chaospy.around(P, 2))\n[1.0, 0.25q0, 0.06q0^2]", "source": "codesearchnet"}
{"code": "def _ParseRecord(self, parser_mediator, file_object, record_offset):\n    record_strings_data_offset = file_object.tell()\n    record_strings_data_size = (record_offset - record_strings_data_offset)\n    record_strings_data = self._ReadData(file_object, record_strings_data_offset, record_strings_data_size)\n    record_map = self._GetDataTypeMap('asl_record')\n    try:\n        (record, record_data_size) = self._ReadStructureFromFileObject(file_object, record_offset, record_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.UnableToParseFile('Unable to parse record at offset: 0x{0:08x} with error: {1!s}'.format(record_offset, exception))\n    hostname = self._ParseRecordString(record_strings_data, record_strings_data_offset, record.hostname_string_offset)\n    sender = self._ParseRecordString(record_strings_data, record_strings_data_offset, record.sender_string_offset)\n    facility = self._ParseRecordString(record_strings_data, record_strings_data_offset, record.facility_string_offset)\n    message = self._ParseRecordString(record_strings_data, record_strings_data_offset, record.message_string_offset)\n    file_offset = (record_offset + record_data_size)\n    additional_data_size = ((record.data_size + 6) - record_data_size)\n    if ((additional_data_size % 8) != 0):\n        raise errors.ParseError('Invalid record additional data size: {0:d}.'.format(additional_data_size))\n    additional_data = self._ReadData(file_object, file_offset, additional_data_size)\n    extra_fields = {}\n    for additional_data_offset in range(0, (additional_data_size - 8), 16):\n        record_extra_field = self._ParseRecordExtraField(additional_data[additional_data_offset:], file_offset)\n        file_offset += 16\n        name = self._ParseRecordString(record_strings_data, record_strings_data_offset, record_extra_field.name_string_offset)\n        value = self._ParseRecordString(record_strings_data, record_strings_data_offset, record_extra_field.value_string_offset)\n        if (name is not None):\n            extra_fields[name] = value\n    event_data = ASLEventData()\n    event_data.computer_name = hostname\n    event_data.extra_information = ', '.join(['{0:s}: {1:s}'.format(name, value) for (name, value) in sorted(extra_fields.items())])\n    event_data.facility = facility\n    event_data.group_id = record.group_identifier\n    event_data.level = record.alert_level\n    event_data.message_id = record.message_identifier\n    event_data.message = message\n    event_data.pid = record.process_identifier\n    event_data.read_gid = record.real_group_identifier\n    event_data.read_uid = record.real_user_identifier\n    event_data.record_position = record_offset\n    event_data.sender = sender\n    event_data.user_sid = '{0:d}'.format(record.user_identifier)\n    (microseconds, _) = divmod(record.written_time_nanoseconds, 1000)\n    timestamp = ((record.written_time * 1000000) + microseconds)\n    date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n    return record.next_record_offset", "docstring": "Parses a record and produces events.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (file): file-like object.\nrecord_offset (int): offset of the record relative to the start of\nthe file.\n\nReturns:\nint: next record offset.\n\nRaises:\nParseError: if the record cannot be parsed.", "source": "codesearchnet"}
{"code": "def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n    (self._grad, self._vars) = zip(*[(g, t) for (g, t) in grads_and_vars if (g is not None)])\n    with tf.variable_scope('apply_updates'):\n        if (self._clip_thresh_var is not None):\n            (self._grad, _) = tf.clip_by_global_norm(self._grad, self._clip_thresh_var)\n            apply_grad_op = self._momentum_optimizer.apply_gradients(zip(self._grad, self._vars), global_step=global_step, name=name)\n        else:\n            apply_grad_op = self._momentum_optimizer.apply_gradients(zip(self._grad, self._vars), global_step=global_step, name=name)\n    with tf.variable_scope('prepare_yellowFin_variables'):\n        with tf.control_dependencies([apply_grad_op]):\n            prepare_variables_op = self._prepare_variables()\n    with tf.variable_scope('yellowfin'):\n        with tf.control_dependencies([prepare_variables_op]):\n            yellowfin_op = self._yellowfin()\n    with tf.control_dependencies([yellowfin_op]):\n        self._increment_step_op = tf.assign_add(self._step, 1).op\n    return tf.group(apply_grad_op, prepare_variables_op, yellowfin_op, self._increment_step_op)", "docstring": "Applying gradients and tune hyperparams with YellowFin.\n\nArgs:\ngrads_and_vars: List of (gradient, variable) pairs as returned by\ncompute_gradients().\nglobal_step: Optional Variable to increment by one after the\nvariables have been updated.\nname:  Optional name for the returned operation. Default to the\nname passed to the Optimizer constructor.\n\nReturns:\n(A group of operations)\nVariable Update with Momentum ops,\nYellowFin ops(Curvature, Variance, Distance) ops,\nSingleStep and lr_mu tuning ops,\nStep increment ops.", "source": "codesearchnet"}
{"code": "def _retrieve_info(self, http):\n    if self.invalid:\n        info = _metadata.get_service_account_info(http, service_account=(self.service_account_email or 'default'))\n        self.invalid = False\n        self.service_account_email = info['email']\n        self.scopes = info['scopes']", "docstring": "Retrieves service account info for invalid credentials.\n\nArgs:\nhttp: an object to be used to make HTTP requests.", "source": "codesearchnet"}
{"code": "def attachment_to_multidim_measurement(attachment, name=None):\n  \n  data = json.loads(attachment.data)\n\n  name = name or data.get('name')\n  \n  attachment_dims = data.get('dimensions', [])\n  \n  attachment_values = data.get('value')\n\n  attachment_outcome_str = data.get('outcome')\n  if attachment_outcome_str not in TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME:\n    \n    try:\n      attachment_outcome_str = test_runs_pb2.Status.Name(\n          int(attachment_outcome_str))\n    except ValueError:\n      attachment_outcome_str = None\n\n  \n  outcome = TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME.get(\n      attachment_outcome_str)\n\n  \n  _lazy_load_units_by_code()\n  dims = []\n  for d in attachment_dims:\n    \n    unit = UNITS_BY_CODE.get(d.get('uom_code'), units.NONE)\n    description = d.get('name', '')\n    dims.append(measurements.Dimension(description=description, unit=unit))\n\n  \n  if attachment_values and len(dims) == len(attachment_values[0]):\n    \n    units_ = dims[-1].unit\n    dimensions = dims[:-1]\n  else:\n    units_ = None\n    dimensions = dims\n\n  \n  measured_value = measurements.DimensionedMeasuredValue(\n      name=name,\n      num_dimensions=len(dimensions)\n  )\n  for row in attachment_values:\n    coordinates = tuple(row[:-1])\n    val = row[-1]\n    measured_value[coordinates] = val\n\n  measurement = measurements.Measurement(\n      name=name,\n      units=units_,\n      dimensions=tuple(dimensions),\n      measured_value=measured_value,\n      outcome=outcome\n  )\n  return measurement", "docstring": "Convert an OpenHTF test record attachment to a multi-dim measurement.\n\nThis is a best effort attempt to reverse, as some data is lost in converting\nfrom a multidim to an attachment.\n\nArgs:\nattachment: an `openhtf.test_record.Attachment` from a multi-dim.\nname: an optional name for the measurement.  If not provided will use the\nname included in the attachment.\n\nReturns:\nAn multi-dim `openhtf.Measurement`.", "source": "juraj-google-style"}
{"code": "def _hash_sequence(self, sighash_type, anyone_can_pay):\n    if (anyone_can_pay or (sighash_type == shared.SIGHASH_SINGLE)):\n        return (b'\\x00' * 32)\n    else:\n        sequences = ByteData()\n        for tx_in in self.tx_ins:\n            sequences += tx_in.sequence\n        return utils.hash256(sequences.to_bytes())", "docstring": "BIP143 hashSequence implementation\n\nArgs:\nsighash_type    (int): SIGHASH_SINGLE or SIGHASH_ALL\nanyone_can_pay (bool): true if ANYONECANPAY should be set\nReturns:\n(bytes): the hashSequence, a 32 byte hash", "source": "codesearchnet"}
{"code": "def _uniform_correlation_like_matrix(num_rows, batch_shape, dtype, seed):\n    num_entries = ((num_rows * (num_rows + 1)) / 2)\n    ones = tf.ones(shape=[num_entries], dtype=dtype)\n    unifs = uniform.Uniform((- ones), ones).sample(batch_shape, seed=seed)\n    tril = util.fill_triangular(unifs)\n    symmetric = (tril + tf.linalg.matrix_transpose(tril))\n    diagonal_ones = tf.ones(shape=util.pad(batch_shape, axis=0, back=True, value=num_rows), dtype=dtype)\n    return tf.linalg.set_diag(symmetric, diagonal_ones)", "docstring": "Returns a uniformly random `Tensor` of \"correlation-like\" matrices.\n\nA \"correlation-like\" matrix is a symmetric square matrix with all entries\nbetween -1 and 1 (inclusive) and 1s on the main diagonal.  Of these,\nthe ones that are positive semi-definite are exactly the correlation\nmatrices.\n\nArgs:\nnum_rows: Python `int` dimension of the correlation-like matrices.\nbatch_shape: `Tensor` or Python `tuple` of `int` shape of the\nbatch to return.\ndtype: `dtype` of the `Tensor` to return.\nseed: Random seed.\n\nReturns:\nmatrices: A `Tensor` of shape `batch_shape + [num_rows, num_rows]`\nand dtype `dtype`.  Each entry is in [-1, 1], and each matrix\nalong the bottom two dimensions is symmetric and has 1s on the\nmain diagonal.", "source": "codesearchnet"}
{"code": "def __setitem__(self, key, value):\n        \n        \n        if key == 'resources':\n            self.add_update_resources(value, ignore_datasetid=True)\n            return\n        super(Dataset, self).__setitem__(key, value)", "docstring": "Set dictionary items but do not allow setting of resources\n\nArgs:\nkey (Any): Key in dictionary\nvalue (Any): Value to put in dictionary\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def cmap_from_color(color, dark=False):\n    \n    if dark:\n        return sns.dark_palette(color, as_cmap=True)\n    else:\n        return sns.light_palette(color, as_cmap=True)", "docstring": "Generates a matplotlib colormap from a single color.\n\nColormap will be built, by default, from white to ``color``.\n\nArgs:\n\ncolor: Can be one of several things:\n\n1. Hex code\n2. HTML color name\n3. RGB tuple\n\ndark (bool): If ``True``, colormap will be built from ``color`` to\nblack. Default is ``False``, which builds a colormap from\nwhite to ``color``.\n\nReturns:\n\ncolormap: A matplotlib colormap", "source": "juraj-google-style"}
{"code": "async def get_participants(self, force_update=False) -> list:\n    if (force_update or (self.participants is None)):\n        res = (await self.connection('GET', 'tournaments/{}/participants'.format(self._id)))\n        self._refresh_participants_from_json(res)\n    return (self.participants or [])", "docstring": "get all participants\n\n|methcoro|\n\nArgs:\nforce_update (default=False): True to force an update to the Challonge API\n\nReturns:\nlist[Participant]:\n\nRaises:\nAPIException", "source": "codesearchnet"}
{"code": "def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n    return self._lazy_read(gen_resource_variable_ops.resource_scatter_sub(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))", "docstring": "Subtracts `tf.IndexedSlices` from this variable.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to be subtracted from this variable.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nThe updated variable.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"}
{"code": "def asdict(self):\n    timestamp_str = None\n    if (self.reading_time is not None):\n        timestamp_str = self.reading_time.isoformat()\n    return {'stream': self.stream, 'device_timestamp': self.raw_time, 'streamer_local_id': self.reading_id, 'timestamp': timestamp_str, 'value': self.value}", "docstring": "Encode the data in this reading into a dictionary.\n\nReturns:\ndict: A dictionary containing the information from this reading.", "source": "codesearchnet"}
{"code": "def flownet2_sd(self, x):\n        \n        with argscope([tf.layers.conv2d], activation=lambda x: tf.nn.leaky_relu(x, 0.1),\n                      padding='valid', strides=2, kernel_size=3,\n                      data_format='channels_first'), \\\n            argscope([tf.layers.conv2d_transpose], padding='same', activation=tf.identity,\n                     data_format='channels_first', strides=2, kernel_size=4):\n            x = tf.layers.conv2d(pad(x, 1), 64, name='conv0', strides=1)\n\n            x = tf.layers.conv2d(pad(x, 1), 64, name='conv1')\n            conv1 = tf.layers.conv2d(pad(x, 1), 128, name='conv1_1', strides=1)\n            x = tf.layers.conv2d(pad(conv1, 1), 128, name='conv2')\n            conv2 = tf.layers.conv2d(pad(x, 1), 128, name='conv2_1', strides=1)\n\n            x = tf.layers.conv2d(pad(conv2, 1), 256, name='conv3')\n            conv3 = tf.layers.conv2d(pad(x, 1), 256, name='conv3_1', strides=1)\n            x = tf.layers.conv2d(pad(conv3, 1), 512, name='conv4')\n            conv4 = tf.layers.conv2d(pad(x, 1), 512, name='conv4_1', strides=1)\n            x = tf.layers.conv2d(pad(conv4, 1), 512, name='conv5')\n            conv5 = tf.layers.conv2d(pad(x, 1), 512, name='conv5_1', strides=1)\n            x = tf.layers.conv2d(pad(conv5, 1), 1024, name='conv6')\n            conv6 = tf.layers.conv2d(pad(x, 1), 1024, name='conv6_1', strides=1)\n\n            flow6 = tf.layers.conv2d(pad(conv6, 1), 2, name='predict_flow6', strides=1, activation=tf.identity)\n            flow6_up = tf.layers.conv2d_transpose(flow6, 2, name='upsampled_flow6_to_5')\n            x = tf.layers.conv2d_transpose(conv6, 512, name='deconv5', activation=lambda x: tf.nn.leaky_relu(x, 0.1))\n\n            concat5 = tf.concat([conv5, x, flow6_up], axis=1, name='concat5')\n            interconv5 = tf.layers.conv2d(pad(concat5, 1), 512, strides=1, name='inter_conv5', activation=tf.identity)\n            flow5 = tf.layers.conv2d(pad(interconv5, 1), 2, name='predict_flow5', strides=1, activation=tf.identity)\n            flow5_up = tf.layers.conv2d_transpose(flow5, 2, name='upsampled_flow5_to_4')\n            x = tf.layers.conv2d_transpose(concat5, 256, name='deconv4', activation=lambda x: tf.nn.leaky_relu(x, 0.1))\n\n            concat4 = tf.concat([conv4, x, flow5_up], axis=1, name='concat4')\n            interconv4 = tf.layers.conv2d(pad(concat4, 1), 256, strides=1, name='inter_conv4', activation=tf.identity)\n            flow4 = tf.layers.conv2d(pad(interconv4, 1), 2, name='predict_flow4', strides=1, activation=tf.identity)\n            flow4_up = tf.layers.conv2d_transpose(flow4, 2, name='upsampled_flow4_to_3')\n            x = tf.layers.conv2d_transpose(concat4, 128, name='deconv3', activation=lambda x: tf.nn.leaky_relu(x, 0.1))\n\n            concat3 = tf.concat([conv3, x, flow4_up], axis=1, name='concat3')\n            interconv3 = tf.layers.conv2d(pad(concat3, 1), 128, strides=1, name='inter_conv3', activation=tf.identity)\n            flow3 = tf.layers.conv2d(pad(interconv3, 1), 2, name='predict_flow3', strides=1, activation=tf.identity)\n            flow3_up = tf.layers.conv2d_transpose(flow3, 2, name='upsampled_flow3_to_2')\n            x = tf.layers.conv2d_transpose(concat3, 64, name='deconv2', activation=lambda x: tf.nn.leaky_relu(x, 0.1))\n\n            concat2 = tf.concat([conv2, x, flow3_up], axis=1, name='concat2')\n            interconv2 = tf.layers.conv2d(pad(concat2, 1), 64, strides=1, name='inter_conv2', activation=tf.identity)\n            flow2 = tf.layers.conv2d(pad(interconv2, 1), 2, name='predict_flow2', strides=1, activation=tf.identity)\n\n            return resize(flow2 / DISP_SCALE, mode='nearest')", "docstring": "Architecture in Table 3 of FlowNet 2.0.\n\nArgs:\nx: concatenation of two inputs, of shape [1, 2xC, H, W]", "source": "juraj-google-style"}
{"code": "def _add_namespace(marc_xml):\n    \n    dom = marc_xml\n\n    if isinstance(dom, basestring):\n        dom = dhtmlparser.parseString(marc_xml)\n\n    root = dom.find(\"root\")\n    if root:\n        root[0].params = {}\n\n    for record in dom.find(\"record\"):\n        record.params = {}\n\n    collections = dom.find(\"collection\")\n    if not collections:\n        record = dom.find(\"record\")[0]\n        return XML_TEMPLATE.replace(\"$CONTENT\", str(record))\n\n    for col in collections:\n        col.params[\"xmlns\"] = \"http:\n        col.params[\"xmlns:xsi\"] = \"http:\n        col.params[\"xsi:schemaLocation\"] = \"http:\n                   \"http:\n\n    return str(dom)", "docstring": "Add proper XML namespace to the `marc_xml` record.\n\nArgs:\nmarc_xml (str): String representation of the XML record.\n\nReturns:\nstr: XML with namespace.", "source": "juraj-google-style"}
{"code": "def iterate_ngrams(text, n):\n    \n    if n <= 0:\n        raise ValueError(\"n must be a positive integer\")\n\n    return [text[i: i + n] for i in range(len(text) - n + 1)]", "docstring": "Generator to yield ngrams in ``text``.\n\nExample:\n>>> for ngram in iterate_ngrams(\"example\", 4):\n...     print(ngram)\nexam\nxamp\nampl\nmple\n\nArgs:\ntext (str): text to iterate over\nn (int): size of window for iteration\n\nReturns:\nGenerator expression to yield the next ngram in the text\n\nRaises:\nValueError: If n is non positive", "source": "juraj-google-style"}
{"code": "def gnuplot_2d(x, y, filename, title='', x_label='', y_label=''):\n    (_, ext) = os.path.splitext(filename)\n    if (ext != '.png'):\n        filename += '.png'\n    gnuplot_cmds = '\\n    set datafile separator \",\"\\n    set term pngcairo size 30cm,25cm\\n    set out filename\\n\\n    unset key\\n    set border lw 1.5\\n    set grid lt -1 lc rgb \"gray80\"\\n\\n    set title title\\n    set xlabel x_label\\n    set ylabel y_label\\n\\n    plot filename_data u 1:2 w lp pt 6 ps 0.5\\n    '\n    scr = _GnuplotScriptTemp(gnuplot_cmds)\n    data = _GnuplotDataTemp(x, y)\n    args_dict = {'filename': filename, 'filename_data': data.name, 'title': title, 'x_label': x_label, 'y_label': y_label}\n    gnuplot(scr.name, args_dict)", "docstring": "Function to produce a general 2D plot.\n\nArgs:\nx (list): x points.\ny (list): y points.\nfilename (str): Filename of the output image.\ntitle (str): Title of the plot.  Default is '' (no title).\nx_label (str): x-axis label.\ny_label (str): y-axis label.", "source": "codesearchnet"}
{"code": "def _CreateClassTemplate(cls, data_type_definition):\n    \n    type_name = data_type_definition.name\n\n    type_description = data_type_definition.description or type_name\n    while type_description.endswith('.'):\n      type_description = type_description[:-1]\n\n    class_attributes_description = []\n    init_arguments = []\n    instance_attributes = []\n\n    for member_definition in data_type_definition.members:\n      attribute_name = member_definition.name\n\n      description = member_definition.description or attribute_name\n      while description.endswith('.'):\n        description = description[:-1]\n\n      member_data_type = getattr(member_definition, 'member_data_type', '')\n      if isinstance(member_definition, data_types.MemberDataTypeDefinition):\n        member_definition = member_definition.member_data_type_definition\n\n      member_type_indicator = member_definition.TYPE_INDICATOR\n      if member_type_indicator == definitions.TYPE_INDICATOR_SEQUENCE:\n        element_type_indicator = member_definition.element_data_type\n        member_type_indicator = 'tuple[{0:s}]'.format(element_type_indicator)\n      else:\n        member_type_indicator = cls._PYTHON_NATIVE_TYPES.get(\n            member_type_indicator, member_data_type)\n\n      argument = '{0:s}=None'.format(attribute_name)\n\n      definition = '    self.{0:s} = {0:s}'.format(attribute_name)\n\n      description = '    {0:s} ({1:s}): {2:s}.'.format(\n          attribute_name, member_type_indicator, description)\n\n      class_attributes_description.append(description)\n      init_arguments.append(argument)\n      instance_attributes.append(definition)\n\n    class_attributes_description = '\\n'.join(\n        sorted(class_attributes_description))\n    init_arguments = ', '.join(init_arguments)\n    instance_attributes = '\\n'.join(sorted(instance_attributes))\n\n    template_values = {\n        'class_attributes_description': class_attributes_description,\n        'init_arguments': init_arguments,\n        'instance_attributes': instance_attributes,\n        'type_description': type_description,\n        'type_name': type_name}\n\n    return cls._CLASS_TEMPLATE.format(**template_values)", "docstring": "Creates the class template.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\n\nReturns:\nstr: class template.", "source": "juraj-google-style"}
{"code": "def conditionally_create_security_groups(env, service_name, service_type):\n  \n  if service_type not in SG_SERVICE_TYPES:\n    print_if_verbose(\"not eligible for security group(s); service type: {}\".format(service_type))\n    return\n\n  target_name = \"{}-{}\".format(env, service_name)\n  if service_type == \"aws_ec2\":\n    sg_names = [\"{}-ec2\".format(target_name)]\n  elif service_type == \"aws_lambda\":\n    sg_names = [\"{}-lambda\".format(target_name)]\n  elif service_type == \"http_service\":\n    sg_names = [\n      \"{}-ec2\".format(target_name),\n      \"{}-elb\".format(target_name)\n    ]\n  elif service_type == \"aws_security_group\":\n    sg_names = [target_name]\n  else:\n    fail(\"Unexpected service_type: {} when creating security group for: {}\".format(service_type, target_name))\n\n  for sg_name in sg_names:\n    if not AWS_RESOLVER.ec2_security_group_security_group_id(sg_name):\n      vpc_name = \"vpc-{}\".format(env)\n      print(\"Create security group: {} in vpc: {}\".format(sg_name, vpc_name))\n      vpc = AWS_RESOLVER.ec2_vpc_vpc_id(vpc_name)\n      if not vpc:\n        fail(\"Error: could not get VPC by name: {}\".format(vpc_name))\n      \n      if CONTEXT.commit:\n        try:\n          new_sg = CLIENTS[\"ec2\"].create_security_group(GroupName=sg_name, VpcId=vpc, Description=sg_name)\n        except:\n          fail(\"Exception creating security group named: {} in VpcId: {}\".format(sg_name, vpc_name), sys.exc_info())\n        print(new_sg[\"GroupId\"])\n    else:\n      print_if_verbose(\"security group already exists: {}\".format(sg_name))", "docstring": "Create security groups as needed; name and number created depend on service_type\nArgs:\nenv: the environment the SG will be created in\nservice_name: name of the service in service registry\nservice_type: service registry service type: 'aws_ec2', 'aws_lambda', 'aws_security_group', or 'http_service'", "source": "juraj-google-style"}
{"code": "def get_current_human_time():\n    return time.strftime('%m-%d-%Y %H:%M:%S ')", "docstring": "Returns the current time in human readable format.\n\nReturns:\nThe current time stamp in Month-Day-Year Hour:Min:Sec format.", "source": "github-repos"}
{"code": "def split_key(key, max_keys=0):\n    parts = [x for x in re.split(SPLIT_REGEX, key) if (x != '.')]\n    result = []\n    while (len(parts) > 0):\n        if ((max_keys > 0) and (len(result) == max_keys)):\n            break\n        result.append(parts.pop(0))\n    if (len(parts) > 0):\n        result.append('.'.join(parts))\n    return result", "docstring": "Splits a key but allows dots in the key name if they're scaped properly.\n\nSplitting this complex key:\n\ncomplex_key = \".dont\\.splitme.d\\.o\\. origen.splitme\\.dontsplit.splitme.\"\nsplit_key(complex_key)\n\nresults in:\n\n['', 'dont\\.splitme', 'd\\.o\\. origen', 'splitme\\.dontsplit', 'splitme', '']\n\n\nArgs:\nkey (basestring): The key to be splitted.\nmax_keys (int): The maximum number of keys to be extracted. 0 means no\nlimits.\n\nReturns:\nA list of keys", "source": "codesearchnet"}
{"code": "def __init__(self, func, type):\n    self.func = func\n    self.type = type", "docstring": "Instantiates a bound method object.\n\nArgs:\nfunc (types.FunctionType): The method's underlying function\ntype (type): The class of the method.", "source": "github-repos"}
{"code": "def minimize_peak_memory(graph, scheduler_alg):\n  \n  if scheduler_alg == 'NAIVE':\n    return _minimize_peak_memory_naive(graph)\n  elif scheduler_alg == 'LIST':\n    return _minimize_peak_memory_list(graph)\n  else:\n    raise NotImplementedError('{} is not a scheduler algorithm. It should be '\n                              'one of NAIVE or LIST.'\n                              .format(scheduler_alg))", "docstring": "Computes a schedule to minimize peak memory.\n\nArgs:\ngraph: an mtf.auto_mtf.graph_interface.GraphInterface.\nscheduler_alg: a string, one of 'NAIVE' or 'LIST'\n\nReturns:\nan iterable of integers representing the schedule.", "source": "juraj-google-style"}
{"code": "def set_nsxcontroller_port(self, **kwargs):\n        \n        name = kwargs.pop('name')\n        port = str(kwargs.pop('port'))\n        port_args = dict(name=name, port=port)\n        method_name = 'nsx_controller_connection_addr_port'\n        method_class = self._brocade_tunnels\n        nsxcontroller_attr = getattr(method_class, method_name)\n        config = nsxcontroller_attr(**port_args)\n        output = self._callback(config)\n        return output", "docstring": "Set Nsx Controller pot on the switch\n\nArgs:\nport (int): 1 to 65535.\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def _print_contained_resource(self, contained_resource: message.Message) -> None:\n    for _, set_field_value in contained_resource.ListFields():\n        if self.json_format == _FhirJsonFormat.ANALYTIC:\n            structure_definition_url = annotation_utils.get_structure_definition_url(set_field_value)\n            self.generator.push(f'\"{structure_definition_url}\"')\n        else:\n            self._print(set_field_value)", "docstring": "Prints the set fields of the contained resource.\n\nIf the _FhirJsonFormat is set to ANALYTIC, this method only prints the url.\n\nArgs:\ncontained_resource: The contained resource to iterate over and print.", "source": "github-repos"}
{"code": "def market_if_touched_replace(self, accountID, orderID, **kwargs):\n    return self.replace(accountID, orderID, order=MarketIfTouchedOrderRequest(**kwargs))", "docstring": "Shortcut to replace a pending MarketIfTouched Order in an Account\n\nArgs:\naccountID : The ID of the Account\norderID : The ID of the MarketIfTouched Order to replace\nkwargs : The arguments to create a MarketIfTouchedOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "codesearchnet"}
{"code": "def is_significant(sample1, sample2):\n    deg_freedom = ((len(sample1) + len(sample2)) - 2)\n    critical_value = tdist95conf_level(deg_freedom)\n    t_score = tscore(sample1, sample2)\n    return ((abs(t_score) >= critical_value), t_score)", "docstring": "Determine whether two samples differ significantly.\n\nThis uses a Student's two-sample, two-tailed t-test with alpha=0.95.\n\nArgs:\nsample1: one sample.\nsample2: the other sample.\n\nReturns:\n(significant, t_score) where significant is a bool indicating whether\nthe two samples differ significantly; t_score is the score from the\ntwo-sample T test.", "source": "codesearchnet"}
{"code": "def covariance_to_correlations(covariance):\n    \n    diagonal_ind = np.arange(covariance.shape[1])\n    diagonal_els = covariance[:, diagonal_ind, diagonal_ind]\n    result = covariance / np.sqrt(diagonal_els[:, :, None] * diagonal_els[:, None, :])\n    result[np.isinf(result)] = 0\n    return np.clip(np.nan_to_num(result), -1, 1)", "docstring": "Transform a covariance matrix into a correlations matrix.\n\nThis can be seen as dividing a covariance matrix by the outer product of the diagonal.\n\nAs post processing we replace the infinities and the NaNs with zeros and clip the result to [-1, 1].\n\nArgs:\ncovariance (ndarray): a matrix of shape (n, p, p) with for n problems the covariance matrix of shape (p, p).\n\nReturns:\nndarray: the correlations matrix", "source": "juraj-google-style"}
{"code": "def test_antithetic_sample_paths_mean_2d(self, random_type, seed):\n    mu = np.array([0.2, 0.7])\n    a = np.array([[0.4, 0.1], [0.3, 0.2]])\n    b = np.array([[0.33, -0.03], [0.21, 0.5]])\n\n    def drift_fn(t, x):\n        del x\n        return mu * tf.sqrt(t)\n\n    def vol_fn(t, x):\n        del x\n        return (a * t + b) * tf.ones([2, 2], dtype=t.dtype)\n    times = np.array([0.1, 0.21, 0.32, 0.43, 0.55])\n    num_samples = 5000\n    x0 = np.array([0.1, -1.1])\n    paths = self.evaluate(euler_sampling.sample(dim=2, drift_fn=drift_fn, volatility_fn=vol_fn, times=times, num_samples=num_samples, initial_state=x0, random_type=random_type, time_step=0.01, seed=seed))\n    self.assertAllClose(paths.shape, (num_samples, 5, 2), atol=0)\n    means = np.mean(paths, axis=0)\n    times = np.reshape(times, [-1, 1])\n    expected_means = x0 + 2.0 / 3.0 * mu * np.power(times, 1.5)\n    self.assertAllClose(means, expected_means, rtol=0.005, atol=0.005)", "docstring": "Tests path properties for 2-dimentional anthithetic variates method.\n\nThe same test as above but with `PSEUDO_ANTITHETIC` random type.\nWe construct the following Ito processes.\n\ndX_1 = mu_1 sqrt(t) dt + s11 dW_1 + s12 dW_2\ndX_2 = mu_2 sqrt(t) dt + s21 dW_1 + s22 dW_2\n\nmu_1, mu_2 are constants.\ns_ij = a_ij t + b_ij\n\nFor this process expected value at time t is (x_0)_i + 2/3 * mu_i * t^1.5.\n\nArgs:\nrandom_type: Random number type defined by tff.math.random.RandomType\nenum.\nseed: Random seed.", "source": "github-repos"}
{"code": "def get_metrics_namespace(self) -> str:\n    return 'BeamML_HuggingFaceModelHandler_KeyedTensor'", "docstring": "Returns:\nA namespace for metrics collected by the RunInference transform.", "source": "github-repos"}
{"code": "def get_value(value_proto):\n    field = value_proto.WhichOneof('value_type')\n    if (field in __native_value_types):\n        return getattr(value_proto, field)\n    if (field == 'timestamp_value'):\n        return from_timestamp(value_proto.timestamp_value)\n    if (field == 'array_value'):\n        return [get_value(sub_value) for sub_value in value_proto.array_value.values]\n    return None", "docstring": "Gets the python object equivalent for the given value proto.\n\nArgs:\nvalue_proto: datastore.Value proto message.\n\nReturns:\nthe corresponding python object value. timestamps are converted to\ndatetime, and datastore.Value is returned for blob_key_value.", "source": "codesearchnet"}
{"code": "def get_time_series(sdat, var, tstart, tend):\n    tseries = sdat.tseries_between(tstart, tend)\n    if (var in tseries.columns):\n        series = tseries[var]\n        time = None\n        if (var in phyvars.TIME):\n            meta = phyvars.TIME[var]\n        else:\n            meta = phyvars.Vart(var, None, '1')\n    elif (var in phyvars.TIME_EXTRA):\n        meta = phyvars.TIME_EXTRA[var]\n        (series, time) = meta.description(sdat, tstart, tend)\n        meta = phyvars.Vart(misc.baredoc(meta.description), meta.kind, meta.dim)\n    else:\n        raise UnknownTimeVarError(var)\n    (series, _) = sdat.scale(series, meta.dim)\n    if (time is not None):\n        (time, _) = sdat.scale(time, 's')\n    return (series, time, meta)", "docstring": "Extract or compute and rescale a time series.\n\nArgs:\nsdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.\nvar (str): time series name, a key of :data:`stagpy.phyvars.TIME`\nor :data:`stagpy.phyvars.TIME_EXTRA`.\ntstart (float): starting time of desired series. Set to None to start\nat the beginning of available data.\ntend (float): ending time of desired series. Set to None to stop at the\nend of available data.\nReturns:\ntuple of :class:`numpy.array` and :class:`stagpy.phyvars.Vart`:\nseries, time, meta\nseries is the requested time series, time the time at which it\nis evaluated (set to None if it is the one of time series output\nby StagYY), and meta is a :class:`stagpy.phyvars.Vart` instance\nholding metadata of the requested variable.", "source": "codesearchnet"}
{"code": "def split_input(cls, mapper_spec, _reader=blobstore.BlobReader):\n    \n    params = _get_params(mapper_spec)\n    blob_key = params[cls.BLOB_KEY_PARAM]\n    zip_input = zipfile.ZipFile(_reader(blob_key))\n    zfiles = zip_input.infolist()\n    total_size = sum(x.file_size for x in zfiles)\n    num_shards = min(mapper_spec.shard_count, cls._MAX_SHARD_COUNT)\n    size_per_shard = total_size \n\n    \n    \n    shard_start_indexes = [0]\n    current_shard_size = 0\n    for i, fileinfo in enumerate(zfiles):\n      current_shard_size += fileinfo.file_size\n      if current_shard_size >= size_per_shard:\n        shard_start_indexes.append(i + 1)\n        current_shard_size = 0\n\n    if shard_start_indexes[-1] != len(zfiles):\n      shard_start_indexes.append(len(zfiles))\n\n    return [cls(blob_key, start_index, end_index, _reader)\n            for start_index, end_index\n            in zip(shard_start_indexes, shard_start_indexes[1:])]", "docstring": "Returns a list of input shard states for the input spec.\n\nArgs:\nmapper_spec: The MapperSpec for this InputReader. Must contain\n'blob_key' parameter with one blob key.\n_reader: a callable that returns a file-like object for reading blobs.\nUsed for dependency injection.\n\nReturns:\nA list of InputReaders spanning files within the zip.", "source": "juraj-google-style"}
{"code": "def remove_temp_dirpath(dirpath, strategy):\n    if strategy is None:\n        strategy = distribute_lib.get_strategy()\n    if strategy is None:\n        return\n    if strategy.extended._in_multi_worker_mode() and (not strategy.extended.should_checkpoint):\n        file_io.delete_recursively(_get_temp_dir(dirpath, strategy))", "docstring": "Removes the temp path after writing is finished.\n\nArgs:\ndirpath: Original dirpath that would be used without distribution.\nstrategy: The tf.distribute strategy object currently used.", "source": "github-repos"}
{"code": "def _GenerateUniqueRandomInputTensor(self, shape):\n    num_elements = 1\n    for size in shape:\n        num_elements *= size\n    x = np.arange(num_elements, dtype=np.float32)\n    self._PRNG.shuffle(x)\n    return x.reshape(shape)", "docstring": "Generate 'unique' random input tensor.\n\n'Unique' means there's no collision values in the tensor, all elements are\ndifferent. This is done by generating sequence of integers with step of 1\nand then randomly shuffle these integers.\n\nArgs:\nshape: Shape of the tensor desired.\n\nReturns:\nA numpy ndarray with size = shape and dtype = numpy.float32.", "source": "github-repos"}
{"code": "def add_other_location(self, location, exact=True, alterror=None, locations=None):\n    (hdx_code, match) = Locations.get_HDX_code_from_location_partial(location, locations=locations, configuration=self.configuration)\n    if ((hdx_code is None) or ((exact is True) and (match is False))):\n        if (alterror is None):\n            raise HDXError(('Location: %s - cannot find in HDX!' % location))\n        else:\n            raise HDXError(alterror)\n    groups = self.data.get('groups', None)\n    hdx_code = hdx_code.lower()\n    if groups:\n        if (hdx_code in [x['name'] for x in groups]):\n            return False\n    else:\n        groups = list()\n    groups.append({'name': hdx_code})\n    self.data['groups'] = groups\n    return True", "docstring": "Add a location which is not a country or region. Value is parsed and compared to existing locations in\nHDX. If the location is already added, it is ignored.\n\nArgs:\nlocation (str): Location to add\nexact (bool): True for exact matching or False to allow fuzzy matching. Defaults to True.\nalterror (Optional[str]): Alternative error message to builtin if location not found. Defaults to None.\nlocations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX.\n\nReturns:\nbool: True if location added or False if location already present", "source": "codesearchnet"}
{"code": "def feat(self, subset):\n    r = None\n    for f in self:\n        if (isinstance(f, Feature) and (f.subset == subset)):\n            if r:\n                if isinstance(r, list):\n                    r.append(f.cls)\n                else:\n                    r = [r, f.cls]\n            else:\n                r = f.cls\n    if (r is None):\n        raise NoSuchAnnotation\n    else:\n        return r", "docstring": "Obtain the feature class value of the specific subset.\n\nIf a feature occurs multiple times, the values will be returned in a list.\n\nExample::\n\nsense = word.annotation(folia.Sense)\nsynset = sense.feat('synset')\n\nReturns:\nstr or list", "source": "codesearchnet"}
{"code": "def execute_before(self, sensor_graph, scope_stack):\n        \n\n        parent = scope_stack[-1]\n        alloc = parent.allocator\n\n        stream_a, trigger_a = self._convert_trigger(self.trigger_a, parent)\n\n        if self.trigger_b is None:\n            new_scope = TriggerScope(sensor_graph, scope_stack, (stream_a, trigger_a))\n        else:\n            stream_b, trigger_b = self._convert_trigger(self.trigger_b, parent)\n            trigger_stream = alloc.allocate_stream(DataStream.UnbufferedType)\n\n            if self.combiner == u'and':\n                combiner = '&&'\n            else:\n                combiner = '||'\n\n            if stream_a.input and not stream_b.input:\n                unbuffered_stream = alloc.allocate_stream(DataStream.UnbufferedType, attach=True)\n                sensor_graph.add_node(u\"({} always) => {} using copy_latest_a\".format(stream_a, unbuffered_stream))\n                sensor_graph.add_node(u\"({} {} {} {} {}) => {} using copy_latest_a\".format(unbuffered_stream, trigger_a, combiner, stream_b, trigger_b, trigger_stream))\n            elif stream_b.input and not stream_a.input:\n                unbuffered_stream = alloc.allocate_stream(DataStream.UnbufferedType, attach=True)\n                sensor_graph.add_node(u\"({} always) => {} using copy_latest_a\".format(stream_b, unbuffered_stream))\n                sensor_graph.add_node(u\"({} {} {} {} {}) => {} using copy_latest_a\".format(stream_a, trigger_a, combiner, unbuffered_stream, trigger_b, trigger_stream))\n            else:\n                sensor_graph.add_node(u\"({} {} {} {} {}) => {} using copy_latest_a\".format(stream_a, trigger_a, combiner, stream_b, trigger_b, trigger_stream))\n            new_scope = TriggerScope(sensor_graph, scope_stack, (trigger_stream, TrueTrigger()))\n\n        scope_stack.append(new_scope)", "docstring": "Execute statement before children are executed.\n\nArgs:\nsensor_graph (SensorGraph): The sensor graph that we are building or\nmodifying\nscope_stack (list(Scope)): A stack of nested scopes that may influence\nhow this statement allocates clocks or other stream resources.", "source": "juraj-google-style"}
{"code": "def list_profile(self, args, screen_info=None):\n    screen_cols = 80\n    if screen_info and 'cols' in screen_info:\n        screen_cols = screen_info['cols']\n    parsed = self._arg_parsers['list_profile'].parse_args(args)\n    op_time_interval = command_parser.parse_time_interval(parsed.op_time) if parsed.op_time else None\n    exec_time_interval = command_parser.parse_time_interval(parsed.execution_time) if parsed.execution_time else None\n    node_name_regex = re.compile(parsed.node_name_filter) if parsed.node_name_filter else None\n    file_path_regex = re.compile(parsed.file_path_filter) if parsed.file_path_filter else None\n    op_type_regex = re.compile(parsed.op_type_filter) if parsed.op_type_filter else None\n    output = debugger_cli_common.RichTextLines([''])\n    device_name_regex = re.compile(parsed.device_name_filter) if parsed.device_name_filter else None\n    data_generator = self._get_profile_data_generator()\n    device_count = len(self._run_metadata.step_stats.dev_stats)\n    for index in range(device_count):\n        device_stats = self._run_metadata.step_stats.dev_stats[index]\n        if not device_name_regex or device_name_regex.match(device_stats.device):\n            profile_data = [datum for datum in data_generator(device_stats) if _list_profile_filter(datum, node_name_regex, file_path_regex, op_type_regex, op_time_interval, exec_time_interval, min_lineno=parsed.min_lineno, max_lineno=parsed.max_lineno)]\n            profile_data = sorted(profile_data, key=lambda datum: _list_profile_sort_key(datum, parsed.sort_by), reverse=parsed.reverse)\n            output.extend(self._get_list_profile_lines(device_stats.device, index, device_count, profile_data, parsed.sort_by, parsed.reverse, parsed.time_unit, device_name_filter=parsed.device_name_filter, node_name_filter=parsed.node_name_filter, op_type_filter=parsed.op_type_filter, screen_cols=screen_cols))\n    return output", "docstring": "Command handler for list_profile.\n\nList per-operation profile information.\n\nArgs:\nargs: Command-line arguments, excluding the command prefix, as a list of\nstr.\nscreen_info: Optional dict input containing screen information such as\ncols.\n\nReturns:\nOutput text lines as a RichTextLines object.", "source": "github-repos"}
{"code": "def _broadcast_and_set_attrs(self, local_dict):\n    del local_dict['self']\n    self.remove_axis = False\n    max_length = 0\n    for key in local_dict:\n        try:\n            length = len(local_dict[key])\n            if (length > max_length):\n                max_length = length\n        except TypeError:\n            pass\n    if (max_length == 0):\n        self.remove_axis = True\n        for key in local_dict:\n            setattr(self, key, np.array([local_dict[key]]))\n    else:\n        for key in local_dict:\n            try:\n                if ((len(local_dict[key]) < max_length) and (len(local_dict[key]) > 1)):\n                    raise ValueError((('Casting parameters not correct.' + ' Need all at a maximum shape and the rest being') + 'len-1 arrays or scalars'))\n            except TypeError:\n                pass\n        for key in local_dict:\n            try:\n                if (len(local_dict[key]) == max_length):\n                    setattr(self, key, local_dict[key])\n                elif (len(local_dict[key]) == 1):\n                    setattr(self, key, np.full((max_length,), local_dict[key][0]))\n            except TypeError:\n                setattr(self, key, np.full((max_length,), local_dict[key]))\n    return", "docstring": "Cast all inputs to correct dimensions.\n\nThis method fixes inputs who have different lengths. Namely one input as\nan array and others that are scalara or of len-1.\n\nRaises:\nValue Error: Multiple length arrays of len>1", "source": "codesearchnet"}
{"code": "def do_state(args):\n    rest_client = RestClient(args.url, args.user)\n    if (args.subcommand == 'list'):\n        response = rest_client.list_state(args.subtree, args.head)\n        leaves = response['data']\n        head = response['head']\n        keys = ('address', 'size', 'data')\n        headers = tuple((k.upper() for k in keys))\n\n        def parse_leaf_row(leaf, decode=True):\n            decoded = b64decode(leaf['data'])\n            return (leaf['address'], len(decoded), (str(decoded) if decode else leaf['data']))\n        if (args.format == 'default'):\n            fmt.print_terminal_table(headers, leaves, parse_leaf_row)\n            print('HEAD BLOCK: \"{}\"'.format(head))\n        elif (args.format == 'csv'):\n            fmt.print_csv(headers, leaves, parse_leaf_row)\n            print('(data for head block: \"{}\")'.format(head))\n        elif ((args.format == 'json') or (args.format == 'yaml')):\n            state_data = {'head': head, 'data': [{k: d for (k, d) in zip(keys, parse_leaf_row(l, False))} for l in leaves]}\n            if (args.format == 'yaml'):\n                fmt.print_yaml(state_data)\n            elif (args.format == 'json'):\n                fmt.print_json(state_data)\n            else:\n                raise AssertionError('Missing handler: {}'.format(args.format))\n        else:\n            raise AssertionError('Missing handler: {}'.format(args.format))\n    if (args.subcommand == 'show'):\n        output = rest_client.get_leaf(args.address, args.head)\n        if (output is not None):\n            print('DATA: \"{}\"'.format(b64decode(output['data'])))\n            print('HEAD: \"{}\"'.format(output['head']))\n        else:\n            raise CliException('No data available at {}'.format(args.address))", "docstring": "Runs the batch list or batch show command, printing output to the\nconsole\n\nArgs:\nargs: The parsed arguments sent to the command at runtime", "source": "codesearchnet"}
{"code": "def _scale_size(size, scale):\n    \n    w, h = size\n    return int(w * float(scale) + 0.5), int(h * float(scale) + 0.5)", "docstring": "Rescale a size by a ratio.\n\nArgs:\nsize (tuple): w, h.\nscale (float): Scaling factor.\n\nReturns:\ntuple[int]: scaled size.", "source": "juraj-google-style"}
{"code": "def interpolations_to_summary(sample_ind, interpolations, first_frame, last_frame, hparams, decode_hp):\n    parent_tag = ('sample_%d' % sample_ind)\n    frame_shape = hparams.problem.frame_shape\n    interp_shape = ([hparams.batch_size, decode_hp.num_interp] + frame_shape)\n    interpolations = np.reshape(interpolations, interp_shape)\n    interp_tag = ('%s/interp/%s' % (parent_tag, decode_hp.channel_interp))\n    if (decode_hp.channel_interp == 'ranked'):\n        interp_tag = ('%s/rank_%d' % (interp_tag, decode_hp.rank_interp))\n    (summaries, _) = common_video.py_gif_summary(interp_tag, interpolations, return_summary_value=True, max_outputs=decode_hp.max_display_outputs, fps=decode_hp.frames_per_second)\n    if decode_hp.save_frames:\n        first_frame_summ = image_utils.image_to_tf_summary_value(first_frame, ('%s/first' % parent_tag))\n        last_frame_summ = image_utils.image_to_tf_summary_value(last_frame, ('%s/last' % parent_tag))\n        summaries.append(first_frame_summ)\n        summaries.append(last_frame_summ)\n    return summaries", "docstring": "Converts interpolated frames into tf summaries.\n\nThe summaries consists of:\n1. Image summary corresponding to the first frame.\n2. Image summary corresponding to the last frame.\n3. The interpolated frames as a gif summary.\n\nArgs:\nsample_ind: int\ninterpolations: Numpy array, shape=(num_interp, H, W, 3)\nfirst_frame: Numpy array, shape=(HWC)\nlast_frame: Numpy array, shape=(HWC)\nhparams: HParams, train hparams\ndecode_hp: HParams, decode hparams\nReturns:\nsummaries: list of tf Summary Values.", "source": "codesearchnet"}
{"code": "def merge_sketches(outdir, sketch_paths):\n    merge_sketch_path = os.path.join(outdir, 'sistr.msh')\n    args = ['mash', 'paste', merge_sketch_path]\n    for x in sketch_paths:\n        args.append(x)\n    args.append(MASH_SKETCH_FILE)\n    logging.info('Running Mash paste with command: %s', ' '.join(args))\n    p = Popen(args)\n    p.wait()\n    assert os.path.exists(merge_sketch_path), 'Merged sketch was not created at {}'.format(merge_sketch_path)\n    return merge_sketch_path", "docstring": "Merge new Mash sketches with current Mash sketches\n\nArgs:\noutdir (str): output directory to write merged Mash sketch file\nsketch_paths (list of str): Mash sketch file paths for input fasta files\n\nReturns:\nstr: output path for Mash sketch file with new and old sketches", "source": "codesearchnet"}
{"code": "def traverse_by(self, fixers, traversal):\n    if (not fixers):\n        return\n    for node in traversal:\n        for fixer in fixers[node.type]:\n            results = fixer.match(node)\n            if results:\n                new = fixer.transform(node, results)\n                if (new is not None):\n                    node.replace(new)\n                    node = new", "docstring": "Traverse an AST, applying a set of fixers to each node.\n\nThis is a helper method for refactor_tree().\n\nArgs:\nfixers: a list of fixer instances.\ntraversal: a generator that yields AST nodes.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def get_report_zip(results):\n    \n    def add_subdir(root_path, subdir):\n        subdir_path = os.path.join(root_path, subdir)\n        for subdir_root, subdir_dirs, subdir_files in os.walk(subdir_path):\n            for subdir_file in subdir_files:\n                subdir_file_path = os.path.join(root_path, subdir, subdir_file)\n                if os.path.isfile(subdir_file_path):\n                    rel_path = os.path.relpath(subdir_root, subdir_file_path)\n                    subdir_arc_name = os.path.join(rel_path, subdir_file)\n                    zip_file.write(subdir_file_path, subdir_arc_name)\n            for subdir in subdir_dirs:\n                add_subdir(subdir_path, subdir)\n\n    storage = BytesIO()\n    tmp_dir = tempfile.mkdtemp()\n    try:\n        save_output(results, tmp_dir)\n        with zipfile.ZipFile(storage, 'w', zipfile.ZIP_DEFLATED) as zip_file:\n            for root, dirs, files in os.walk(tmp_dir):\n                for file in files:\n                    file_path = os.path.join(root, file)\n                    if os.path.isfile(file_path):\n                        arcname = os.path.join(os.path.relpath(root, tmp_dir),\n                                               file)\n                        zip_file.write(file_path, arcname)\n                for directory in dirs:\n                    dir_path = os.path.join(root, directory)\n                    if os.path.isdir(dir_path):\n                        zip_file.write(dir_path, directory)\n                        add_subdir(root, directory)\n    finally:\n        shutil.rmtree(tmp_dir)\n\n    return storage.getvalue()", "docstring": "Creates a zip file of parsed report output\n\nArgs:\nresults (OrderedDict): The parsed results\n\nReturns:\nbytes: zip file bytes", "source": "juraj-google-style"}
{"code": "def set_floatx(value):\n    global _FLOATX\n    accepted_dtypes = {'bfloat16', 'float16', 'float32', 'float64'}\n    if value not in accepted_dtypes:\n        raise ValueError(f'Unknown `floatx` value: {value}. Expected one of {accepted_dtypes}')\n    _FLOATX = str(value)", "docstring": "Set the default float dtype.\n\nNote: It is not recommended to set this to `\"float16\"` for training,\nas this will likely cause numeric stability issues.\nInstead, mixed precision, which leverages\na mix of `float16` and `float32`. It can be configured by calling\n`keras.mixed_precision.set_dtype_policy('mixed_float16')`.\n\nArgs:\nvalue: String; `'bfloat16'`, `'float16'`, `'float32'`, or `'float64'`.\n\nExamples:\n>>> keras.config.floatx()\n'float32'\n\n>>> keras.config.set_floatx('float64')\n>>> keras.config.floatx()\n'float64'\n\n>>> # Set it back to float32\n>>> keras.config.set_floatx('float32')\n\nRaises:\nValueError: In case of invalid value.", "source": "github-repos"}
{"code": "def get_model_schema_and_features(model_dir):\n    schema_file = os.path.join(model_dir, 'assets.extra', 'schema.json')\n    schema = json.loads(file_io.read_file_to_string(schema_file))\n    features_file = os.path.join(model_dir, 'assets.extra', 'features.json')\n    features_config = json.loads(file_io.read_file_to_string(features_file))\n    return (schema, features_config)", "docstring": "Get a local model's schema and features config.\n\nArgs:\nmodel_dir: local or GCS path of a model.\nReturns:\nA tuple of schema (list) and features config (dict).", "source": "codesearchnet"}
{"code": "def _get_static_ndims(x, expect_static=False, expect_ndims=None, expect_ndims_no_more_than=None, expect_ndims_at_least=None):\n    ndims = x.shape.ndims\n    if (ndims is None):\n        shape_const = tf.get_static_value(tf.shape(input=x))\n        if (shape_const is not None):\n            ndims = shape_const.ndim\n    if (ndims is None):\n        if expect_static:\n            raise ValueError(('Expected argument `x` to have statically defined `ndims`.  Found: ' % x))\n        return\n    if (expect_ndims is not None):\n        ndims_message = ('Expected argument `x` to have ndims %s.  Found tensor %s' % (expect_ndims, x))\n        if (ndims != expect_ndims):\n            raise ValueError(ndims_message)\n    if (expect_ndims_at_least is not None):\n        ndims_at_least_message = ('Expected argument `x` to have ndims >= %d.  Found tensor %s' % (expect_ndims_at_least, x))\n        if (ndims < expect_ndims_at_least):\n            raise ValueError(ndims_at_least_message)\n    if (expect_ndims_no_more_than is not None):\n        ndims_no_more_than_message = ('Expected argument `x` to have ndims <= %d.  Found tensor %s' % (expect_ndims_no_more_than, x))\n        if (ndims > expect_ndims_no_more_than):\n            raise ValueError(ndims_no_more_than_message)\n    return ndims", "docstring": "Get static number of dimensions and assert that some expectations are met.\n\nThis function returns the number of dimensions 'ndims' of x, as a Python int.\n\nThe optional expect arguments are used to check the ndims of x, but this is\nonly done if the static ndims of x is not None.\n\nArgs:\nx:  A Tensor.\nexpect_static:  Expect `x` to have statically defined `ndims`.\nexpect_ndims:  Optional Python integer.  If provided, assert that x has\nnumber of dimensions equal to this.\nexpect_ndims_no_more_than:  Optional Python integer.  If provided, assert\nthat x has no more than this many dimensions.\nexpect_ndims_at_least:  Optional Python integer.  If provided, assert that x\nhas at least this many dimensions.\n\nReturns:\nndims:  A Python integer.\n\nRaises:\nValueError:  If any of the expectations above are violated.", "source": "codesearchnet"}
{"code": "def get_proposed_feature(project):\n    change_collector = ChangeCollector(project)\n    collected_changes = change_collector.collect_changes()\n    try:\n        new_feature_info = one_or_raise(collected_changes.new_feature_info)\n        (importer, _, _) = new_feature_info\n    except ValueError:\n        raise BalletError('Too many features collected')\n    module = importer()\n    feature = _get_contrib_feature_from_module(module)\n    return feature", "docstring": "Get the proposed feature\n\nThe path of the proposed feature is determined by diffing the project\nagainst a comparison branch, such as master. The feature is then imported\nfrom that path and returned.\n\nArgs:\nproject (ballet.project.Project): project info\n\nRaises:\nballet.exc.BalletError: more than one feature collected", "source": "codesearchnet"}
{"code": "def WrapCFTypeInPython(self, obj):\n    obj_type = self.dll.CFGetTypeID(obj)\n    if (obj_type == self.dll.CFBooleanGetTypeID()):\n        return CFBoolean(obj)\n    elif (obj_type == self.dll.CFNumberGetTypeID()):\n        return CFNumber(obj)\n    elif (obj_type == self.dll.CFStringGetTypeID()):\n        return CFString(obj)\n    elif (obj_type == self.dll.CFDictionaryGetTypeID()):\n        return CFDictionary(obj)\n    elif (obj_type == self.dll.CFArrayGetTypeID()):\n        return CFArray(obj)\n    else:\n        raise TypeError('Unknown type for object: {0}'.format(obj))", "docstring": "Package a CoreFoundation object in a Python wrapper.\n\nArgs:\nobj: The CoreFoundation object.\nReturns:\nOne of CFBoolean, CFNumber, CFString, CFDictionary, CFArray.\nRaises:\nTypeError: If the type is not supported.", "source": "codesearchnet"}
{"code": "def get_structures(self, primitive=True):\n        \n        structures = []\n        for d in self._cif.data.values():\n            try:\n                s = self._get_structure(d, primitive)\n                if s:\n                    structures.append(s)\n            except (KeyError, ValueError) as exc:\n                \n                \n                \n                self.errors.append(str(exc))\n                warnings.warn(str(exc))\n        if self.errors:\n            warnings.warn(\"Issues encountered while parsing CIF:\")\n            for error in self.errors:\n                warnings.warn(error)\n        if len(structures) == 0:\n            raise ValueError(\"Invalid cif file with no structures!\")\n        return structures", "docstring": "Return list of structures in CIF file. primitive boolean sets whether a\nconventional cell structure or primitive cell structure is returned.\n\nArgs:\nprimitive (bool): Set to False to return conventional unit cells.\nDefaults to True. With magnetic CIF files, will return primitive\nmagnetic cell which may be larger than nuclear primitive cell.\n\nReturns:\nList of Structures.", "source": "juraj-google-style"}
{"code": "def _GetOutputModulesInformation(self):\n    output_modules_information = []\n    for (name, output_class) in output_manager.OutputManager.GetOutputClasses():\n        output_modules_information.append((name, output_class.DESCRIPTION))\n    return output_modules_information", "docstring": "Retrieves the output modules information.\n\nReturns:\nlist[tuple[str, str]]: pairs of output module names and descriptions.", "source": "codesearchnet"}
{"code": "def _convert_schemas(mapping, schemas):\n    \n    schemas = deepcopy(schemas)\n    for schema in schemas:\n        for fk in schema.get('foreignKeys', []):\n            resource = fk['reference']['resource']\n            if resource != 'self':\n                if resource not in mapping:\n                    message = 'Not resource \"%s\" for foreign key \"%s\"'\n                    message = message % (resource, fk)\n                    raise ValueError(message)\n                fk['reference']['resource'] = mapping[resource]\n    return schemas", "docstring": "Convert schemas to be compatible with storage schemas.\n\nForeign keys related operations.\n\nArgs:\nmapping (dict): mapping between resource name and table name\nschemas (list): schemas\n\nRaises:\nValueError: if there is no resource\nfor some foreign key in given mapping\n\nReturns:\nlist: converted schemas", "source": "juraj-google-style"}
{"code": "def get_child(self, injection_site_fn, binding):\n        \n        child_scope_id = binding.scope_id\n        new_binding_stack = self._binding_stack + [binding]\n        if binding in self._binding_stack:\n            raise errors.CyclicInjectionError(new_binding_stack)\n        if not self._is_scope_usable_from_scope_fn(\n                child_scope_id, self._scope_id):\n            raise errors.BadDependencyScopeError(\n                self.get_injection_site_desc(),\n                self._scope_id, child_scope_id, binding.binding_key)\n        return _InjectionContext(\n            injection_site_fn, new_binding_stack, child_scope_id,\n            self._is_scope_usable_from_scope_fn)", "docstring": "Creates a child injection context.\n\nA \"child\" injection context is a context for a binding used to\ninject something into the current binding's provided value.\n\nArgs:\ninjection_site_fn: the child function being injected into\nbinding: a Binding\nReturns:\na new _InjectionContext", "source": "juraj-google-style"}
{"code": "def get_user(self, user):\n        \n        return self.service.get_user(\n            user, self.url_prefix, self.auth, self.session, self.session_send_opts)", "docstring": "Get user's data (first and last name, email, etc).\n\nArgs:\nuser (string): User name.\n\nReturns:\n(dictionary): User's data encoded in a dictionary.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def GetMap(self, cache_filename=None):\n    data = self.data\n    if cache_filename is None:\n        cache_filename = self.GetCacheFilename()\n    self.log.debug('Opening %r for reading existing cache', cache_filename)\n    if not os.path.exists(cache_filename):\n        self.log.warning('Cache file does not exist, using an empty map instead')\n    else:\n        cache_file = open(cache_filename)\n        data = self.map_parser.GetMap(cache_file, data)\n    return data", "docstring": "Returns the map from the cache.\n\nArgs:\ncache_filename: alternative file to read, optional.\n\nReturns:\nA child of Map containing the cache data.\n\nRaises:\nCacheNotFound: The cache file we expected to read from does not exist.", "source": "github-repos"}
{"code": "def make_dict_observable(matrix_observable):\n    dict_observable = {}\n    observable = np.array(matrix_observable)\n    observable_size = len(observable)\n    observable_bits = int(np.ceil(np.log2(observable_size)))\n    binary_formater = '0{}b'.format(observable_bits)\n    if (observable.ndim == 2):\n        observable = observable.diagonal()\n    for state_no in range(observable_size):\n        state_str = format(state_no, binary_formater)\n        dict_observable[state_str] = observable[state_no]\n    return dict_observable", "docstring": "Convert an observable in matrix form to dictionary form.\n\nTakes in a diagonal observable as a matrix and converts it to a dictionary\nform. Can also handle a list sorted of the diagonal elements.\n\nArgs:\nmatrix_observable (list): The observable to be converted to dictionary\nform. Can be a matrix or just an ordered list of observed values\n\nReturns:\nDict: A dictionary with all observable states as keys, and corresponding\nvalues being the observed value for that state", "source": "codesearchnet"}
{"code": "def run_without_tensor_float_32(description):\n\n    def decorator(f):\n\n        @functools.wraps(f)\n        def decorated(self, *args, **kwargs):\n            allowed = config.tensor_float_32_execution_enabled()\n            try:\n                config.enable_tensor_float_32_execution(False)\n                f(self, *args, **kwargs)\n            finally:\n                config.enable_tensor_float_32_execution(allowed)\n        return decorated\n    return decorator", "docstring": "Execute test with TensorFloat-32 disabled.\n\nWhile almost every real-world deep learning model runs fine with\nTensorFloat-32, many tests use assertAllClose or similar methods.\nTensorFloat-32 matmuls typically will cause such methods to fail with the\ndefault tolerances.\n\nArgs:\ndescription: A description used for documentation purposes, describing why\nthe test requires TensorFloat-32 to be disabled.\n\nReturns:\nDecorator which runs a test with TensorFloat-32 disabled.", "source": "github-repos"}
{"code": "def make_module_spec(vocabulary_file, vocab_size, embeddings_dim, num_oov_buckets, preprocess_text):\n\n    def module_fn():\n        'Spec function for a token embedding module.'\n        tokens = tf.placeholder(shape=[None], dtype=tf.string, name='tokens')\n        embeddings_var = tf.get_variable(initializer=tf.zeros([(vocab_size + num_oov_buckets), embeddings_dim]), name=EMBEDDINGS_VAR_NAME, dtype=tf.float32)\n        lookup_table = tf.contrib.lookup.index_table_from_file(vocabulary_file=vocabulary_file, num_oov_buckets=num_oov_buckets)\n        ids = lookup_table.lookup(tokens)\n        combined_embedding = tf.nn.embedding_lookup(params=embeddings_var, ids=ids)\n        hub.add_signature('default', {'tokens': tokens}, {'default': combined_embedding})\n\n    def module_fn_with_preprocessing():\n        'Spec function for a full-text embedding module with preprocessing.'\n        sentences = tf.placeholder(shape=[None], dtype=tf.string, name='sentences')\n        normalized_sentences = tf.regex_replace(input=sentences, pattern='\\\\pP', rewrite='')\n        tokens = tf.string_split(normalized_sentences, ' ')\n        embeddings_var = tf.get_variable(initializer=tf.zeros([(vocab_size + num_oov_buckets), embeddings_dim]), name=EMBEDDINGS_VAR_NAME, dtype=tf.float32)\n        lookup_table = tf.contrib.lookup.index_table_from_file(vocabulary_file=vocabulary_file, num_oov_buckets=num_oov_buckets)\n        sparse_ids = tf.SparseTensor(indices=tokens.indices, values=lookup_table.lookup(tokens.values), dense_shape=tokens.dense_shape)\n        (sparse_ids, _) = tf.sparse_fill_empty_rows(sparse_ids, lookup_table.lookup(tf.constant('')))\n        sparse_ids = tf.sparse_reset_shape(sparse_ids)\n        combined_embedding = tf.nn.embedding_lookup_sparse(params=embeddings_var, sp_ids=sparse_ids, sp_weights=None, combiner='sqrtn')\n        hub.add_signature('default', {'sentences': sentences}, {'default': combined_embedding})\n    if preprocess_text:\n        return hub.create_module_spec(module_fn_with_preprocessing)\n    else:\n        return hub.create_module_spec(module_fn)", "docstring": "Makes a module spec to simply perform token to embedding lookups.\n\nInput of this module is a 1-D list of string tokens. For T tokens input and\nan M dimensional embedding table, the lookup result is a [T, M] shaped Tensor.\n\nArgs:\nvocabulary_file: Text file where each line is a key in the vocabulary.\nvocab_size: The number of tokens contained in the vocabulary.\nembeddings_dim: The embedding dimension.\nnum_oov_buckets: The number of out-of-vocabulary buckets.\npreprocess_text: Whether to preprocess the input tensor by removing\npunctuation and splitting on spaces.\n\nReturns:\nA module spec object used for constructing a TF-Hub module.", "source": "codesearchnet"}
{"code": "def get_nets_jpnic(self, response):\n    nets = []\n    for match in re.finditer('^.*?(\\\\[Network Number\\\\])[^\\\\S\\\\n]+.+?>(?P<val>.+?)</A>$', response, re.MULTILINE):\n        try:\n            net = copy.deepcopy(BASE_NET)\n            tmp = ip_network(match.group(2))\n            try:\n                network_address = tmp.network_address\n            except AttributeError:\n                network_address = tmp.ip\n                pass\n            try:\n                broadcast_address = tmp.broadcast_address\n            except AttributeError:\n                broadcast_address = tmp.broadcast\n                pass\n            net['range'] = '{0} - {1}'.format((network_address + 1), broadcast_address)\n            cidr = ip_network(match.group(2).strip()).__str__()\n            net['cidr'] = cidr\n            net['start'] = match.start()\n            net['end'] = match.end()\n            nets.append(net)\n        except (ValueError, TypeError):\n            pass\n    return nets", "docstring": "The function for parsing network blocks from jpnic whois data.\n\nArgs:\nresponse (:obj:`str`): The response from the jpnic server.\n\nReturns:\nlist of dict: Mapping of networks with start and end positions.\n\n::\n\n[{\n'cidr' (str) - The network routing block\n'start' (int) - The starting point of the network\n'end' (int) - The endpoint point of the network\n}]", "source": "codesearchnet"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n    eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n    output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)\n    if token_ids_1 is not None:\n        output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)\n    return output", "docstring": "Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT\nsequence pair mask has the following format:\n\n```\n0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n| first sequence    | second sequence |\n```\n\nif token_ids_1 is None, only returns the first portion of the mask (0s).\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of ids.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).", "source": "github-repos"}
{"code": "def upload_from_url_sync(cls, url, timeout=30, interval=0.3, until_ready=False, store=None, filename=None):\n    ffu = cls.upload_from_url(url, store, filename)\n    return ffu.wait(timeout=timeout, interval=interval, until_ready=until_ready)", "docstring": "Uploads file from given url and returns ``File`` instance.\n\nArgs:\n- url (str): URL of file to upload to\n- store (Optional[bool]): Should the file be automatically stored\nupon upload. Defaults to None.\n- False - do not store file\n- True - store file (can result in error if autostore\nis disabled for project)\n- None - use project settings\n- filename (Optional[str]): Name of the uploaded file. If this not\nspecified the filename will be obtained from response headers\nor source URL. Defaults to None.\n- timeout (Optional[int]): seconds to wait for successful upload.\nDefaults to 30.\n- interval (Optional[float]): interval between upload status checks.\nDefaults to 0.3.\n- until_ready (Optional[bool]): should we wait until file is\navailable via CDN. Defaults to False.\n\nReturns:\n``File`` instance\n\nRaises:\n``TimeoutError`` if file wasn't uploaded in time", "source": "codesearchnet"}
{"code": "def get_fragment(self, list_of_indextuples, give_only_index=False, use_lookup=None):\n    if (use_lookup is None):\n        use_lookup = settings['defaults']['use_lookup']\n    exclude = [tuple[0] for tuple in list_of_indextuples]\n    index_of_atom = list_of_indextuples[0][1]\n    fragment_index = self.get_coordination_sphere(index_of_atom, exclude=set(exclude), n_sphere=float('inf'), only_surface=False, give_only_index=True, use_lookup=use_lookup)\n    if give_only_index:\n        return fragment_index\n    else:\n        return self.loc[(fragment_index, :)]", "docstring": "Get the indices of the atoms in a fragment.\n\nThe list_of_indextuples contains all bondings from the\nmolecule to the fragment. ``[(1,3), (2,4)]`` means for example that the\nfragment is connected over two bonds. The first bond is from atom 1 in\nthe molecule to atom 3 in the fragment. The second bond is from atom\n2 in the molecule to atom 4 in the fragment.\n\nArgs:\nlist_of_indextuples (list):\ngive_only_index (bool): If ``True`` a set of indices\nis returned. Otherwise a new Cartesian instance.\nuse_lookup (bool): Use a lookup variable for\n:meth:`~chemcoord.Cartesian.get_bonds`. The default is\nspecified in ``settings['defaults']['use_lookup']``\n\nReturns:\nA set of indices or a new Cartesian instance.", "source": "codesearchnet"}
{"code": "def absent(name, bridge=None):\n    ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}\n    bridge_exists = False\n    if bridge:\n        bridge_exists = __salt__['openvswitch.bridge_exists'](bridge)\n        if bridge_exists:\n            port_list = __salt__['openvswitch.port_list'](bridge)\n        else:\n            port_list = ()\n    else:\n        port_list = [name]\n    comments = {}\n    comments['comment_bridge_notexists'] = 'Bridge {0} does not exist.'.format(bridge)\n    comments['comment_port_notexists'] = 'Port {0} does not exist on bridge {1}.'.format(name, bridge)\n    comments['comment_port_deleted'] = 'Port {0} deleted.'.format(name)\n    comments['comment_port_notdeleted'] = 'Unable to delete port {0}.'.format(name)\n    comments['changes_port_deleted'] = {name: {'old': 'Port named {0} may exist.'.format(name), 'new': 'Deleted port {0}.'.format(name)}}\n    if __opts__['test']:\n        if (bridge and (not bridge_exists)):\n            ret['result'] = None\n            ret['comment'] = comments['comment_bridge_notexists']\n        elif (name not in port_list):\n            ret['result'] = True\n            ret['comment'] = comments['comment_port_notexists']\n        else:\n            ret['result'] = None\n            ret['comment'] = comments['comment_port_deleted']\n        return ret\n    if (bridge and (not bridge_exists)):\n        ret['result'] = False\n        ret['comment'] = comments['comment_bridge_notexists']\n    elif (name not in port_list):\n        ret['result'] = True\n        ret['comment'] = comments['comment_port_notexists']\n    else:\n        if bridge:\n            port_remove = __salt__['openvswitch.port_remove'](br=bridge, port=name)\n        else:\n            port_remove = __salt__['openvswitch.port_remove'](br=None, port=name)\n        if port_remove:\n            ret['result'] = True\n            ret['comment'] = comments['comment_port_deleted']\n            ret['changes'] = comments['changes_port_deleted']\n        else:\n            ret['result'] = False\n            ret['comment'] = comments['comment_port_notdeleted']\n    return ret", "docstring": "Ensures that the named port exists on bridge, eventually deletes it.\nIf bridge is not set, port is removed from  whatever bridge contains it.\n\nArgs:\nname: The name of the port.\nbridge: The name of the bridge.", "source": "codesearchnet"}
{"code": "def highlight(__text: str, *, lexer: str='diff', formatter: str='terminal') -> str:\n    if sys.stdout.isatty():\n        lexer = get_lexer_by_name(lexer)\n        formatter = get_formatter_by_name(formatter)\n        __text = pyg_highlight(__text, lexer, formatter)\n    return __text", "docstring": "Highlight text highlighted using ``pygments``.\n\nReturns text untouched if colour output is not enabled.\n\nSee also: :pypi:`Pygments`\n\nArgs:\n__text: Text to highlight\nlexer: Jinja lexer to use\nformatter: Jinja formatter to use\nReturns:\nSyntax highlighted output, when possible", "source": "codesearchnet"}
{"code": "def build_backward_pass_step(get_transition_matrix_for_timestep):\n\n    def backward_pass_step(state, filtered_parameters):\n        'Run a single step of backward smoothing.'\n        (filtered_mean, filtered_cov, predicted_mean, predicted_cov) = filtered_parameters\n        transition_matrix = get_transition_matrix_for_timestep(state.timestep)\n        next_posterior_mean = state.backward_mean\n        next_posterior_cov = state.backward_cov\n        (posterior_mean, posterior_cov) = backward_smoothing_update(filtered_mean, filtered_cov, predicted_mean, predicted_cov, next_posterior_mean, next_posterior_cov, transition_matrix)\n        return BackwardPassState(backward_mean=posterior_mean, backward_cov=posterior_cov, timestep=(state.timestep - 1))\n    return backward_pass_step", "docstring": "Build a callable that perform one step for backward smoothing.\n\nArgs:\nget_transition_matrix_for_timestep: callable taking a timestep\nas an integer `Tensor` argument, and returning a `LinearOperator`\nof shape `[latent_size, latent_size]`.\n\nReturns:\nbackward_pass_step: a callable that updates a BackwardPassState\nfrom timestep `t` to `t-1`.", "source": "codesearchnet"}
{"code": "def create(self, resource, timeout=-1):\n        \n        data = self.__default_values.copy()\n        data.update(resource)\n        return self._client.create(data, timeout=timeout)", "docstring": "Creates a Golden Image resource from the deployed OS Volume as per the attributes specified.\n\nArgs:\nresource (dict): Object to create.\ntimeout:\nTimeout in seconds. Waits for task completion by default. The timeout does not abort the operation\nin OneView, it just stops waiting for its completion.\n\nReturns:\ndict: Golden Image created.", "source": "juraj-google-style"}
{"code": "def get_urls_for_profiles(edx_video_id, profiles):\n    profiles_to_urls = {profile: None for profile in profiles}\n    try:\n        video_info = get_video_info(edx_video_id)\n    except ValVideoNotFoundError:\n        return profiles_to_urls\n    for encoded_video in video_info['encoded_videos']:\n        if (encoded_video['profile'] in profiles):\n            profiles_to_urls[encoded_video['profile']] = encoded_video['url']\n    return profiles_to_urls", "docstring": "Returns a dict mapping profiles to URLs.\n\nIf the profiles or video is not found, urls will be blank.\n\nArgs:\nedx_video_id (str): id of the video\nprofiles (list): list of profiles we want to search for\n\nReturns:\n(dict): A dict containing the profile to url pair", "source": "codesearchnet"}
{"code": "def wrap_or_copy(cls, func, **options):\n    if isinstance(func, openhtf.PhaseGroup):\n        raise PhaseWrapError(('Cannot wrap PhaseGroup <%s> as a phase.' % (func.name or 'Unnamed')))\n    if isinstance(func, cls):\n        retval = mutablerecords.CopyRecord(func)\n    else:\n        retval = cls(func)\n    retval.options.update(**options)\n    return retval", "docstring": "Return a new PhaseDescriptor from the given function or instance.\n\nWe want to return a new copy so that you can reuse a phase with different\noptions, plugs, measurements, etc.\n\nArgs:\nfunc: A phase function or PhaseDescriptor instance.\n**options: Options to update on the result.\n\nRaises:\nPhaseWrapError: if func is a openhtf.PhaseGroup.\n\nReturns:\nA new PhaseDescriptor object.", "source": "codesearchnet"}
{"code": "def csv_row_to_transaction(index, row, source_encoding='latin1', date_format='%d-%m-%Y', thousand_sep='.', decimal_sep=','):\n    (xfer, posted, message, amount, total) = row\n    xfer = Parse.date(xfer)\n    posted = Parse.date(posted)\n    message = Parse.to_utf8(message, source_encoding)\n    amount = Parse.money(amount)\n    total = Parse.money(total)\n    return Transaction(index, xfer, posted, message, amount, total)", "docstring": "Parses a row of strings to a ``Transaction`` object.\n\nArgs:\nindex: The index of this row in the original CSV file. Used for\nsorting ``Transaction``s by their order of appearance.\n\nrow: The row containing strings for [transfer_date, posted_date,\nmessage, money_amount, money_total].\n\nsource_encoding: The encoding that will be used to decode strings\nto UTF-8.\n\ndate_format: The format of dates in this row.\n\nthousand_sep: The thousand separator in money amounts.\n\ndecimal_sep: The decimal separator in money amounts.\n\nReturns:\nA ``Transaction`` object.", "source": "codesearchnet"}
{"code": "def from_bytes(b):\n        \n        if len(b) != 64:\n            raise ValueError(\"from_bytes: Signature length != 64.\")\n        r = int.from_bytes(b[0:32], 'big')\n        s = int.from_bytes(b[32:64], 'big')\n        return Signature(r, s)", "docstring": "Extracts the r and s components from a byte string.\n\nArgs:\nb (bytes): A 64-byte long string. The first 32 bytes are\nextracted as the r component and the second 32 bytes\nare extracted as the s component.\n\nReturns:\nSignature: A Signature object.\n\nRaises:\nValueError: If signature is incorrect length", "source": "juraj-google-style"}
{"code": "def decode_offset_response(cls, response):\n        \n        return [\n            kafka.structs.OffsetResponsePayload(topic, partition, error, tuple(offsets))\n            for topic, partitions in response.topics\n            for partition, error, offsets in partitions\n        ]", "docstring": "Decode OffsetResponse into OffsetResponsePayloads\n\nArguments:\nresponse: OffsetResponse\n\nReturns: list of OffsetResponsePayloads", "source": "juraj-google-style"}
{"code": "def resolve(node, source, context_filepath, context_lineno, context_col_offset):\n    code_reader = io.StringIO(source)\n    comments_map = {}\n    try:\n        for token in tokenize.generate_tokens(code_reader.readline):\n            tok_type, tok_string, loc, _, _ = token\n            srow, _ = loc\n            if tok_type == tokenize.COMMENT:\n                comments_map[srow] = tok_string.strip()[1:].strip()\n    except tokenize.TokenError:\n        if isinstance(node, gast.Lambda):\n            pass\n        else:\n            raise\n    source_lines = source.split('\\n')\n    visitor = OriginResolver(node, source_lines, comments_map, context_lineno, context_col_offset, context_filepath)\n    visitor.visit(node)", "docstring": "Adds origin information to an AST, based on the source it was loaded from.\n\nThis allows us to map the original source code line numbers to generated\nsource code.\n\nNote: the AST may be a part of a larger context (e.g. a function is part of\na module that may contain other things). However, this function does not\nassume the source argument contains the entire context, nor that it contains\nonly code corresponding to node itself. However, it assumes that node was\nparsed from the given source code.\nFor this reason, two extra arguments are required, and they indicate the\nlocation of the node in the original context.\n\nArgs:\nnode: gast.AST, the AST to annotate.\nsource: Text, the source code representing node.\ncontext_filepath: Text\ncontext_lineno: int\ncontext_col_offset: int", "source": "github-repos"}
{"code": "def default_update_stack(self, fqn, template, old_parameters, parameters, tags, stack_policy=None, **kwargs):\n    logger.debug('Using default provider mode for %s.', fqn)\n    args = generate_cloudformation_args(fqn, parameters, tags, template, service_role=self.service_role, stack_policy=stack_policy)\n    try:\n        self.cloudformation.update_stack(**args)\n    except botocore.exceptions.ClientError as e:\n        if ('No updates are to be performed.' in str(e)):\n            logger.debug('Stack %s did not change, not updating.', fqn)\n            raise exceptions.StackDidNotChange\n        elif (e.response['Error']['Message'] == 'TemplateURL must reference a valid S3 object to which you have access.'):\n            s3_fallback(fqn, template, parameters, tags, self.cloudformation.update_stack, self.service_role)\n        else:\n            raise", "docstring": "Update a Cloudformation stack in default mode.\n\nArgs:\nfqn (str): The fully qualified name of the Cloudformation stack.\ntemplate (:class:`stacker.providers.base.Template`): A Template\nobject to use when updating the stack.\nold_parameters (list): A list of dictionaries that defines the\nparameter list on the existing Cloudformation stack.\nparameters (list): A list of dictionaries that defines the\nparameter list to be applied to the Cloudformation stack.\ntags (list): A list of dictionaries that defines the tags\nthat should be applied to the Cloudformation stack.\nstack_policy (:class:`stacker.providers.base.Template`): A template\nobject representing a stack policy.", "source": "codesearchnet"}
{"code": "def add_filter(self, table, cols, condition):\n        \n        if table is not None and table not in self.relations:\n            raise ItsdbError('Cannot add filter; table \"{}\" is not defined '\n                             'by the relations file.'\n                             .format(table))\n        \n        if cols is None:\n            cols = [None]\n        self.filters[table].append((cols, condition))", "docstring": "Add a filter. When reading *table*, rows in *table* will be\nfiltered by filter_rows().\n\nArgs:\ntable: The table the filter applies to.\ncols: The columns in *table* to filter on.\ncondition: The filter function.", "source": "juraj-google-style"}
{"code": "def _GetGradReduced(output_grad, output_subs, input_subs, input_shape, reduced_label_set):\n    reduced_subs, reduced_dims, reduced_axes = _GetReducedSubscripts(reduced_label_set, input_shape, input_subs)\n    has_repeated_labels = len(set(input_subs)) + len(set(output_subs)) < len(input_subs) + len(output_subs)\n    input_subs_without_reduced_labels = ''.join([s for s in input_subs if s not in reduced_label_set])\n    if not has_repeated_labels and input_subs_without_reduced_labels == output_subs:\n        reduced_shape = math_ops.reduced_shape(input_shape, ops.convert_to_tensor(reduced_axes))\n        return array_ops.broadcast_to(array_ops.reshape(output_grad, reduced_shape), input_shape)\n    grad_shape_with_reduced_labels = array_ops.concat([reduced_dims, array_ops.shape(output_grad)], axis=0)\n    reduced_shape = array_ops.concat([array_ops.ones(len(reduced_label_set), dtype=dtypes.int32), array_ops.shape(output_grad)], axis=0)\n    broadcasted_grad = array_ops.broadcast_to(array_ops.reshape(output_grad, reduced_shape), grad_shape_with_reduced_labels)\n    return gen_linalg_ops.einsum([broadcasted_grad], '{}->{}'.format(reduced_subs + output_subs, input_subs))", "docstring": "Returns the gradient wrt input for a unary einsum with reductions.\n\nArgs:\noutput_grad: The gradient wrt the output of a unary einsum operation.\noutput_subs: The output subscript. (E.g. `ac` for equation `abc->ac`).\ninput_subs: The input subscript. (E.g. `abc` for equation `abc->ac`).\ninput_shape: A `Tensor` representing the shape of the input operand.\nreduced_label_set: The set of axis labels appearing in `input_subs` but\nnot in `output_subs`.", "source": "github-repos"}
{"code": "def _parse_exchange_token_response(content):\n    resp = {}\n    content = _helpers._from_bytes(content)\n    try:\n        resp = json.loads(content)\n    except Exception:\n        resp = _helpers.parse_unique_urlencoded(content)\n    if (resp and ('expires' in resp)):\n        resp['expires_in'] = resp.pop('expires')\n    return resp", "docstring": "Parses response of an exchange token request.\n\nMost providers return JSON but some (e.g. Facebook) return a\nurl-encoded string.\n\nArgs:\ncontent: The body of a response\n\nReturns:\nContent as a dictionary object. Note that the dict could be empty,\ni.e. {}. That basically indicates a failure.", "source": "codesearchnet"}
{"code": "def add(self, index, value):\n        \n        self.buf.append(value)\n        if (index - self.flush_at) < self.interval:\n            return\n        value = np.mean(self.buf)\n        if self.verbose:\n            logger.info(\"iter={} {{{}}}={}\".format(index, self.name, value))\n        if self.fd is not None:\n            print(\"{} {:g}\".format(index, value), file=self.fd)\n        self.flush_at = index\n        self.buf = []", "docstring": "Add a value to the series.\n\nArgs:\nindex (int): Index.\nvalue (float): Value.", "source": "juraj-google-style"}
{"code": "def download(branch=None, build=True, installdir=\"MalmoPlatform\"):\n    \n\n    if branch is None:\n        branch = malmo_version\n\n    subprocess.check_call([\"git\", \"clone\", \"-b\", branch, \"https:\n\n    return setup(build=build, installdir=installdir)", "docstring": "Download Malmo from github and build (by default) the Minecraft Mod.\nExample usage: import malmoenv.bootstrap; malmoenv.bootstrap.download()\nArgs:\nbranch: optional branch to clone. TODO Default is release version.\nbuild: build the Mod unless build arg is given as False.\ninstalldir: the install dir name. Defaults to MalmoPlatform.\nReturns:\nThe path for the Malmo Minecraft mod.", "source": "juraj-google-style"}
{"code": "def bandpass_filter(data: FLOATS_TYPE, sampling_freq_hz: float, lower_freq_hz: float, upper_freq_hz: float, numtaps: int) -> FLOATS_TYPE:\n    f1 = normalized_frequency(lower_freq_hz, sampling_freq_hz)\n    f2 = normalized_frequency(upper_freq_hz, sampling_freq_hz)\n    coeffs = firwin(numtaps=numtaps, cutoff=[f1, f2], pass_zero=False)\n    filtered_data = lfilter(b=coeffs, a=1.0, x=data)\n    return filtered_data", "docstring": "Apply a band-pass filter to the data.\n\nArgs:\ndata: time series of the data\nsampling_freq_hz: sampling frequency :math:`f_s`, in Hz\n(or other consistent units)\nlower_freq_hz: filter cutoff lower frequency in Hz\n(or other consistent units)\nupper_freq_hz: filter cutoff upper frequency in Hz\n(or other consistent units)\nnumtaps: number of filter taps\n\nReturns:\nfiltered data\n\nNote: number of filter taps = filter order + 1", "source": "codesearchnet"}
{"code": "def image_load(filename: str) -> tcod.image.Image:\n    return tcod.image.Image._from_cdata(ffi.gc(lib.TCOD_image_load(_bytes(filename)), lib.TCOD_image_delete))", "docstring": "Load an image file into an Image instance and return it.\n\nArgs:\nfilename (AnyStr): Path to a .bmp or .png image file.", "source": "codesearchnet"}
{"code": "def read_worker_creds(key='credentials'):\n    for path in CREDS_FILES:\n        if (not os.path.exists(path)):\n            continue\n        contents = load_json_or_yaml(path, is_path=True, exception=None)\n        if contents.get(key):\n            return contents[key]\n    else:\n        if ((key == 'credentials') and os.environ.get('TASKCLUSTER_ACCESS_TOKEN') and os.environ.get('TASKCLUSTER_CLIENT_ID')):\n            credentials = {'accessToken': os.environ['TASKCLUSTER_ACCESS_TOKEN'], 'clientId': os.environ['TASKCLUSTER_CLIENT_ID']}\n            if os.environ.get('TASKCLUSTER_CERTIFICATE'):\n                credentials['certificate'] = os.environ['TASKCLUSTER_CERTIFICATE']\n            return credentials", "docstring": "Get credentials from CREDS_FILES or the environment.\n\nThis looks at the CREDS_FILES in order, and falls back to the environment.\n\nArgs:\nkey (str, optional): each CREDS_FILE is a json dict.  This key's value\ncontains the credentials.  Defaults to 'credentials'.\n\nReturns:\ndict: the credentials found. None if no credentials found.", "source": "codesearchnet"}
{"code": "def encrypt_block(self, plainText):\n    if (not self.initialized):\n        raise TypeError('CamCrypt object has not been initialized')\n    if (len(plainText) != BLOCK_SIZE):\n        raise ValueError(('plainText must be %d bytes long (received %d bytes)' % (BLOCK_SIZE, len(plainText))))\n    cipher = ctypes.create_string_buffer(BLOCK_SIZE)\n    self.encblock(self.bitlen, plainText, self.keytable, cipher)\n    return cipher.raw", "docstring": "Encrypt a 16-byte block of data.\n\nNOTE: This function was formerly called `encrypt`, but was changed when\nsupport for encrypting arbitrary-length strings was added.\n\nArgs:\nplainText (str): 16-byte data.\n\nReturns:\n16-byte str.\n\nRaises:\nTypeError if CamCrypt object has not been initialized.\nValueError if `plainText` is not BLOCK_SIZE (i.e. 16) bytes.", "source": "codesearchnet"}
{"code": "def on_change(self, attr, *callbacks):\n        \n        if len(callbacks) == 0:\n            raise ValueError(\"on_change takes an attribute name and one or more callbacks, got only one parameter\")\n\n        _callbacks = self._callbacks.setdefault(attr, [])\n        for callback in callbacks:\n\n            if callback in _callbacks:\n                continue\n\n            _check_callback(callback, ('attr', 'old', 'new'))\n\n            _callbacks.append(callback)", "docstring": "Add a callback on this object to trigger when ``attr`` changes.\n\nArgs:\nattr (str) : an attribute name on this object\ncallback (callable) : a callback function to register\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def push_datapackage(descriptor, backend, **backend_options):\n    \n\n    \n    warnings.warn(\n        'Functions \"push/pull_datapackage\" are deprecated. '\n        'Please use \"Package\" class',\n        UserWarning)\n\n    \n    tables = []\n    schemas = []\n    datamap = {}\n    mapping = {}\n\n    \n    model = Package(descriptor)\n\n    \n    plugin = import_module('jsontableschema.plugins.%s' % backend)\n    storage = plugin.Storage(**backend_options)\n\n    \n    for resource in model.resources:\n        if not resource.tabular:\n            continue\n        name = resource.descriptor.get('name', None)\n        table = _convert_path(resource.descriptor['path'], name)\n        schema = resource.descriptor['schema']\n        data = resource.table.iter(keyed=True)\n        \n        def values(schema, data):\n            for item in data:\n                row = []\n                for field in schema['fields']:\n                    row.append(item.get(field['name'], None))\n                yield tuple(row)\n        tables.append(table)\n        schemas.append(schema)\n        datamap[table] = values(schema, data)\n        if name is not None:\n            mapping[name] = table\n    schemas = _convert_schemas(mapping, schemas)\n\n    \n    for table in tables:\n        if table in storage.buckets:\n            storage.delete(table)\n    storage.create(tables, schemas)\n\n    \n    for table in storage.buckets:\n        if table in datamap:\n            storage.write(table, datamap[table])\n    return storage", "docstring": "Push Data Package to storage.\n\nAll parameters should be used as keyword arguments.\n\nArgs:\ndescriptor (str): path to descriptor\nbackend (str): backend name like `sql` or `bigquery`\nbackend_options (dict): backend options mentioned in backend docs", "source": "juraj-google-style"}
{"code": "def List(device, device_path):\n    files = device.List(device_path)\n    files.sort(key=(lambda x: x.filename))\n    maxname = max((len(f.filename) for f in files))\n    maxsize = max((len(str(f.size)) for f in files))\n    for f in files:\n        mode = (((((((((('d' if stat.S_ISDIR(f.mode) else '-') + ('r' if (f.mode & stat.S_IRUSR) else '-')) + ('w' if (f.mode & stat.S_IWUSR) else '-')) + ('x' if (f.mode & stat.S_IXUSR) else '-')) + ('r' if (f.mode & stat.S_IRGRP) else '-')) + ('w' if (f.mode & stat.S_IWGRP) else '-')) + ('x' if (f.mode & stat.S_IXGRP) else '-')) + ('r' if (f.mode & stat.S_IROTH) else '-')) + ('w' if (f.mode & stat.S_IWOTH) else '-')) + ('x' if (f.mode & stat.S_IXOTH) else '-'))\n        t = time.gmtime(f.mtime)\n        (yield ('%s %*d %04d-%02d-%02d %02d:%02d:%02d %-*s\\n' % (mode, maxsize, f.size, t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, maxname, f.filename)))", "docstring": "Prints a directory listing.\n\nArgs:\ndevice_path: Directory to list.", "source": "codesearchnet"}
{"code": "def shuffle(self, func, lengths, **kwargs):\n        \n        num_splits = len(lengths)\n        \n        kwargs[\"manual_partition\"] = True\n        kwargs[\"_lengths\"] = lengths\n        args = [self.axis, func, num_splits, kwargs, False]\n        args.extend(self.list_of_blocks)\n        return self._wrap_partitions(self.deploy_axis_func(*args))", "docstring": "Shuffle the order of the data in this axis based on the `lengths`.\n\nExtends `BaseFrameAxisPartition.shuffle`.\n\nArgs:\nfunc: The function to apply before splitting.\nlengths: The list of partition lengths to split the result into.\n\nReturns:\nA list of RemotePartition objects split by `lengths`.", "source": "juraj-google-style"}
{"code": "def create_config(config_path=\"scriptworker.yaml\"):\n    \n    if not os.path.exists(config_path):\n        print(\"{} doesn't exist! Exiting...\".format(config_path), file=sys.stderr)\n        sys.exit(1)\n    with open(config_path, \"r\", encoding=\"utf-8\") as fh:\n        secrets = safe_load(fh)\n    config = dict(deepcopy(DEFAULT_CONFIG))\n    if not secrets.get(\"credentials\"):\n        secrets['credentials'] = read_worker_creds()\n    config.update(secrets)\n    apply_product_config(config)\n    messages = check_config(config, config_path)\n    if messages:\n        print('\\n'.join(messages), file=sys.stderr)\n        print(\"Exiting...\", file=sys.stderr)\n        sys.exit(1)\n    credentials = get_frozen_copy(secrets['credentials'])\n    del(config['credentials'])\n    config = get_frozen_copy(config)\n    return config, credentials", "docstring": "Create a config from DEFAULT_CONFIG, arguments, and config file.\n\nThen validate it and freeze it.\n\nArgs:\nconfig_path (str, optional): the path to the config file.  Defaults to\n\"scriptworker.yaml\"\n\nReturns:\ntuple: (config frozendict, credentials dict)\n\nRaises:\nSystemExit: on failure", "source": "juraj-google-style"}
{"code": "def FormatProblem(self, d=None):\n    \n    if not d:\n      d = self.GetDictToFormat()\n\n    output_error_text = self.__class__.ERROR_TEXT % d\n    if ('reason' in d) and d['reason']:\n      return '%s\\n%s' % (output_error_text, d['reason'])\n    else:\n      return output_error_text", "docstring": "Return a text string describing the problem.\n\nArgs:\nd: map returned by GetDictToFormat with  with formatting added", "source": "juraj-google-style"}
{"code": "def get_electron_number(self, charge=0):\n        \n        atomic_number = constants.elements['atomic_number'].to_dict()\n        return sum([atomic_number[atom] for atom in self['atom']]) - charge", "docstring": "Return the number of electrons.\n\nArgs:\ncharge (int): Charge of the molecule.\n\nReturns:\nint:", "source": "juraj-google-style"}
{"code": "def _build(self, *args):\n    \n    net = args\n\n    if not self._layers:\n      \n      \n      \n      \n      if len(args) == 1:\n        return args[0]\n      else:\n        return args\n\n    for layer in self._layers:\n      if isinstance(net, tuple):\n        net = layer(*net)\n      else:\n        net = layer(net)\n\n    return net", "docstring": "Connects the Sequential module into the graph.\n\nArgs:\n*args: A tuple of inputs, to be unpacked as the arguments to the first\nlayer.\n\nReturns:\nThe output value of the last layer.", "source": "juraj-google-style"}
{"code": "def IsErrorSuppressedByNolint(category, linenum):\n    return (_global_error_suppressions.get(category, False) or (linenum in _error_suppressions.get(category, set())) or (linenum in _error_suppressions.get(None, set())))", "docstring": "Returns true if the specified error category is suppressed on this line.\n\nConsults the global error_suppressions map populated by\nParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.\n\nArgs:\ncategory: str, the category of the error.\nlinenum: int, the current line number.\nReturns:\nbool, True iff the error should be suppressed due to a NOLINT comment or\nglobal suppression.", "source": "codesearchnet"}
{"code": "def load_hpo_bulk(self, hpo_bulk):\n        \n        LOG.debug(\"Loading hpo bulk\")\n\n        try:\n            result = self.hpo_term_collection.insert_many(hpo_bulk)\n        except (DuplicateKeyError, BulkWriteError) as err:\n            raise IntegrityError(err)\n        return result", "docstring": "Add a hpo object\n\nArguments:\nhpo_bulk(list(scout.models.HpoTerm))\n\nReturns:\nresult: pymongo bulkwrite result", "source": "juraj-google-style"}
{"code": "def get_h_product(self, vector, dtype=None):\n    \n    \n    \n    if dtype is None:\n      dtype = self.nn_dtype\n    beta = tf.cast(vector, self.nn_dtype)\n    h_beta_rows = []\n    for i in range(self.nn_params.num_hidden_layers):\n      \n      gamma = beta[self.dual_index[i]:self.dual_index[i + 1]]\n      delta = beta[self.dual_index[i + 1]:self.dual_index[i + 2]]\n\n      \n      if i == 0:\n        h_beta_rows.append(\n            tf.multiply(2 * self.lambda_lu[i], gamma) -\n            self.nn_params.forward_pass(\n                tf.multiply(self.lambda_quad[i + 1], delta),\n                i,\n                is_transpose=True))\n      else:\n        h_beta_rows[i] = (h_beta_rows[i] +\n                          tf.multiply(self.lambda_quad[i] +\n                                      self.lambda_lu[i], gamma) -\n                          self.nn_params.forward_pass(\n                              tf.multiply(self.lambda_quad[i+1], delta),\n                              i, is_transpose=True))\n\n      new_row = (\n          tf.multiply(self.lambda_quad[i + 1] + self.lambda_lu[i + 1], delta) -\n          tf.multiply(self.lambda_quad[i + 1],\n                      self.nn_params.forward_pass(gamma, i)))\n      h_beta_rows.append(new_row)\n\n    \n    h_beta_rows[self.nn_params.num_hidden_layers] = (\n        h_beta_rows[self.nn_params.num_hidden_layers] +\n        tf.multiply((self.lambda_quad[self.nn_params.num_hidden_layers] +\n                     self.lambda_lu[self.nn_params.num_hidden_layers]),\n                    delta))\n\n    h_beta = tf.concat(h_beta_rows, axis=0)\n    return tf.cast(h_beta, dtype)", "docstring": "Function that provides matrix product interface with PSD matrix.\n\nArgs:\nvector: the vector to be multiplied with matrix H\n\nReturns:\nresult_product: Matrix product of H and vector", "source": "juraj-google-style"}
{"code": "def exists(self, url: str) -> bool:\n    _, path = self._parse_url(url)\n    return self._exists(path)", "docstring": "Checks existence of url in HDFS.\n\nArgs:\nurl: String in the form hdfs://...\n\nReturns:\nTrue if url exists as a file or directory in HDFS.", "source": "github-repos"}
{"code": "def animation(frame_function: types.FrameFunction) -> types.Animation:\n    \n    animation_ = core.Animation(frame_function)\n\n    @functools.wraps(frame_function)\n    def wrapper(*args, **kwargs):\n        return animation_(*args, **kwargs)\n\n    return wrapper", "docstring": "Turn a FrameFunction into an Animation.\n\nArgs:\nframe_function: A function that returns a FrameGenerator.\n\nReturns:\nan Animation decorator function.", "source": "juraj-google-style"}
{"code": "def save_dataframes(self, outdir, prefix='df_'):\n        \n        \n        dfs = list(filter(lambda x: x.startswith(prefix), dir(self)))\n\n        counter = 0\n        for df in dfs:\n            outpath = ssbio.utils.outfile_maker(inname=df, outext='.csv', outdir=outdir)\n            my_df = getattr(self, df)\n            if not isinstance(my_df, pd.DataFrame):\n                raise TypeError('{}: object is not a Pandas DataFrame'.format(df))\n\n            if my_df.empty:\n                log.debug('{}: empty dataframe, not saving'.format(df))\n            else:\n                my_df.to_csv(outpath)\n                log.debug('{}: saved dataframe'.format(outpath))\n                counter += 1\n\n        log.debug('Saved {} dataframes at {}'.format(counter, outdir))", "docstring": "Save all attributes that start with \"df\" into a specified directory.\n\nArgs:\noutdir (str): Path to output directory\nprefix (str): Prefix that dataframe attributes start with", "source": "juraj-google-style"}
{"code": "def from_files(cls, secrets=None, storage=None, scopes=None, no_webserver=False):\n    creds = oauth2.get_credentials(scopes, secrets, storage, no_webserver)\n    return cls(creds)", "docstring": "Return a spreadsheet collection making OAauth 2.0 credentials.\n\nArgs:\nsecrets (str): location of secrets file (default: ``%r``)\nstorage (str): location of storage file (default: ``%r``)\nscopes: scope URL(s) or ``'read'`` or ``'write'`` (default: ``%r``)\nno_webserver (bool): URL/code prompt instead of webbrowser auth\nReturns:\nSheets: new Sheets instance with OAauth 2.0 credentials", "source": "codesearchnet"}
{"code": "def camelcase(string):\n    \n\n    string = re.sub(r\"^[\\-_\\.]\", '', str(string))\n    if not string:\n        return string\n    return lowercase(string[0]) + re.sub(r\"[\\-_\\.\\s]([a-z])\", lambda matched: uppercase(matched.group(1)), string[1:])", "docstring": "Convert string into camel case.\n\nArgs:\nstring: String to convert.\n\nReturns:\nstring: Camel case string.", "source": "juraj-google-style"}
{"code": "def comments_2(self, value=None):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `comments_2`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `comments_2`')\n    self._comments_2 = value", "docstring": "Corresponds to IDD Field `comments_2`\n\nArgs:\nvalue (str): value for IDD Field `comments_2`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def __init__(self,\n                 job_config: JobConfig,\n                 job: Dict,\n                 engine: Engine) -> None:\n        \n        self.job_config = job_config\n        self._job = job\n        self._engine = engine\n        self.job_resource_name = job['name']\n        self.program_resource_name = self.job_resource_name.split('/jobs')[0]\n        self._results = None", "docstring": "A job submitted to the engine.\n\nArgs:\njob_config: The JobConfig used to create the job.\njob: A full Job Dict.\nengine: Engine connected to the job.", "source": "juraj-google-style"}
{"code": "def image_to_tf_summary_value(image, tag):\n    curr_image = np.asarray(image, dtype=np.uint8)\n    (height, width, n_channels) = curr_image.shape\n    if (n_channels == 1):\n        curr_image = np.reshape(curr_image, [height, width])\n    s = io.BytesIO()\n    matplotlib_pyplot().imsave(s, curr_image, format='png')\n    img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(), height=height, width=width, colorspace=n_channels)\n    return tf.Summary.Value(tag=tag, image=img_sum)", "docstring": "Converts a NumPy image to a tf.Summary.Value object.\n\nArgs:\nimage: 3-D NumPy array.\ntag: name for tf.Summary.Value for display in tensorboard.\nReturns:\nimage_summary: A tf.Summary.Value object.", "source": "codesearchnet"}
{"code": "def compute_covariance(L_aug, Y, k, p):\n    \n    n, d = L_aug.shape\n    assert Y.shape[0] == n\n    mu = compute_mu(L_aug, Y, k, p)\n    return (L_aug.T @ L_aug) / n - mu @ np.diag(p) @ mu.T", "docstring": "Given label matrix L_aug and labels Y, compute the covariance.\n\nArgs:\nL: (np.array {0,1}) [n, d] The augmented (indicator) label matrix\nY: (np.array int) [n] The true labels in {1,...,k}\nk: (int) Cardinality\np: (np.array float) [k] The class balance", "source": "juraj-google-style"}
{"code": "def kill(self, exit_code: Any=None):\n    self._force_kill.set()\n    if (exit_code is not None):\n        self._exit_code = exit_code\n    logger.info('Killing behavior {0} with exit code: {1}'.format(self, exit_code))", "docstring": "Stops the behaviour\n\nArgs:\nexit_code (object, optional): the exit code of the behaviour (Default value = None)", "source": "codesearchnet"}
{"code": "def title_of_design_condition(self, value=None):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `title_of_design_condition`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `title_of_design_condition`')\n    self._title_of_design_condition = value", "docstring": "Corresponds to IDD Field `title_of_design_condition`\n\nArgs:\nvalue (str): value for IDD Field `title_of_design_condition`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def _sync_content_metadata(self, serialized_data, http_method):\n        \n        try:\n            status_code, response_body = getattr(self, '_' + http_method)(\n                urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.course_api_path),\n                serialized_data,\n                self.CONTENT_PROVIDER_SCOPE\n            )\n        except requests.exceptions.RequestException as exc:\n            raise ClientError(\n                'DegreedAPIClient request failed: {error} {message}'.format(\n                    error=exc.__class__.__name__,\n                    message=str(exc)\n                )\n            )\n\n        if status_code >= 400:\n            raise ClientError(\n                'DegreedAPIClient request failed with status {status_code}: {message}'.format(\n                    status_code=status_code,\n                    message=response_body\n                )\n            )", "docstring": "Synchronize content metadata using the Degreed course content API.\n\nArgs:\nserialized_data: JSON-encoded object containing content metadata.\nhttp_method: The HTTP method to use for the API request.\n\nRaises:\nClientError: If Degreed API request fails.", "source": "juraj-google-style"}
{"code": "def _generate_legacy_type_checks(types=()):\n    types = dict(types)\n\n    def gen_type_check(pytypes):\n        pytypes = _utils.flatten(pytypes)\n\n        def type_check(checker, instance):\n            if isinstance(instance, bool):\n                if (bool not in pytypes):\n                    return False\n            return isinstance(instance, pytypes)\n        return type_check\n    definitions = {}\n    for (typename, pytypes) in iteritems(types):\n        definitions[typename] = gen_type_check(pytypes)\n    return definitions", "docstring": "Generate newer-style type checks out of JSON-type-name-to-type mappings.\n\nArguments:\n\ntypes (dict):\n\nA mapping of type names to their Python types\n\nReturns:\n\nA dictionary of definitions to pass to `TypeChecker`", "source": "codesearchnet"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. BARTPho does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def _call_method_from_namespace(obj, method_name, namespace):\n    \n    method = getattr(obj, method_name)\n    method_parser = method.parser\n    arg_names = _get_args_name_from_parser(method_parser)\n    if method_name == \"__init__\":\n        return _call(obj, arg_names, namespace)\n    return _call(method, arg_names, namespace)", "docstring": "Call the method, retrieved from obj, with the correct arguments via\nthe namespace\n\nArgs:\nobj: any kind of object\nmethod_name: method to be called\nnamespace: an argparse.Namespace object containing parsed command\nline arguments", "source": "juraj-google-style"}
{"code": "def keras_model_summary(name, data, step=None):\n    summary_metadata = summary_pb2.SummaryMetadata()\n    summary_metadata.plugin_data.plugin_name = 'graph_keras_model'\n    summary_metadata.plugin_data.content = b'1'\n    try:\n        json_string = data.to_json()\n    except Exception as exc:\n        logging.warning('Model failed to serialize as JSON. Ignoring... %s', exc)\n        return False\n    with summary_ops_v2.summary_scope(name, 'graph_keras_model', [data, step]) as (tag, _):\n        with ops.device('cpu:0'):\n            tensor = constant_op.constant(json_string, dtype=dtypes.string)\n        return summary_ops_v2.write(tag=tag, tensor=tensor, step=step, metadata=summary_metadata)", "docstring": "Writes a Keras model as JSON to as a Summary.\n\nWriting the Keras model configuration allows the TensorBoard graph plugin to\nrender a conceptual graph, as opposed to graph of ops. In case the model fails\nto serialize as JSON, it ignores and returns False.\n\nArgs:\nname: A name for this summary. The summary tag used for TensorBoard will be\nthis name prefixed by any active name scopes.\ndata: A Keras Model to write.\nstep: Explicit `int64`-castable monotonic step value for this summary. If\nomitted, this defaults to `tf.summary.experimental.get_step()`, which must\nnot be None.\n\nReturns:\nTrue on success, or False if no summary was written because no default\nsummary writer was available.\n\nRaises:\nValueError: if a default writer exists, but no step was provided and\n`tf.summary.experimental.get_step()` is None.", "source": "github-repos"}
{"code": "def _create_handler(self, config):\n        \n        if config is None:\n            raise ValueError('No handler config to create handler from.')\n\n        if 'name' not in config:\n            raise ValueError('Handler name is required.')\n\n        handler_name = config['name']\n        \n        module_name = handler_name.rsplit('.', 1)[0]\n        class_name = handler_name.rsplit('.', 1)[-1]\n        module = import_module(module_name)\n        handler_class = getattr(module, class_name)\n        instance = handler_class(**config)\n\n        return instance", "docstring": "Creates a handler from its config.\n\nParams:\nconfig:      handler config\nReturns:\nhandler instance", "source": "juraj-google-style"}
{"code": "def create_object(self, obj_type, payload, return_fields=None):\n    self._validate_obj_type_or_die(obj_type)\n    query_params = self._build_query_params(return_fields=return_fields)\n    url = self._construct_url(obj_type, query_params)\n    opts = self._get_request_options(data=payload)\n    self._log_request('post', url, opts)\n    if self.session.cookies:\n        self.session.auth = None\n    r = self.session.post(url, **opts)\n    self._validate_authorized(r)\n    if (r.status_code != requests.codes.CREATED):\n        response = utils.safe_json_load(r.content)\n        already_assigned = 'is assigned to another network view'\n        if (response and (already_assigned in response.get('text'))):\n            exception = ib_ex.InfobloxMemberAlreadyAssigned\n        else:\n            exception = ib_ex.InfobloxCannotCreateObject\n        raise exception(response=response, obj_type=obj_type, content=r.content, args=payload, code=r.status_code)\n    return self._parse_reply(r)", "docstring": "Create an Infoblox object of type 'obj_type'\n\nArgs:\nobj_type        (str): Infoblox object type,\ne.g. 'network', 'range', etc.\npayload       (dict): Payload with data to send\nreturn_fields (list): List of fields to be returned\nReturns:\nThe object reference of the newly create object\nRaises:\nInfobloxException", "source": "codesearchnet"}
{"code": "def add(clss, func, deprecated_name):\n        \n\n        @Deprecator(func.__name__, deprecated_name)\n        def _old_function(*args, **kwargs):\n            return func(*args, **kwargs)\n\n        setattr(clss, deprecated_name, _old_function)", "docstring": "Add the deprecated version of a member function to the given class.\nGives a deprecation warning on usage.\n\nArgs:\nclss: the class where the deprecated function is to be added\nfunc: the actual function that is called by the deprecated version\ndeprecated_name: the deprecated name of the function", "source": "juraj-google-style"}
{"code": "def _serve_image_metadata(self, request):\n    tag = request.args.get('tag')\n    run = request.args.get('run')\n    sample = int(request.args.get('sample', 0))\n    response = self._image_response_for_run(run, tag, sample)\n    return http_util.Respond(request, response, 'application/json')", "docstring": "Given a tag and list of runs, serve a list of metadata for images.\n\nNote that the images themselves are not sent; instead, we respond with URLs\nto the images. The frontend should treat these URLs as opaque and should not\ntry to parse information about them or generate them itself, as the format\nmay change.\n\nArgs:\nrequest: A werkzeug.wrappers.Request object.\n\nReturns:\nA werkzeug.Response application.", "source": "codesearchnet"}
{"code": "def learn(self, initial_state_key, limit=1000, game_n=1):\n        \n        end_flag_list = [False] * len(self.q_learning_list)\n        for game in range(game_n):\n            state_key = copy.copy(initial_state_key)\n            self.t = 1\n            while self.t <= limit:\n                for i in range(len(self.q_learning_list)):\n                    if game + 1 == game_n:\n                        self.state_key_list.append((i, copy.copy(state_key)))\n                    self.q_learning_list[i].t = self.t\n                    next_action_list = self.q_learning_list[i].extract_possible_actions(state_key)\n\n                    if len(next_action_list):\n                        action_key = self.q_learning_list[i].select_action(\n                            state_key=state_key,\n                            next_action_list=next_action_list\n                        )\n                        reward_value = self.q_learning_list[i].observe_reward_value(state_key, action_key)\n\n                        \n                        if self.q_learning_list[i].check_the_end_flag(state_key) is True:\n                            end_flag_list[i] = True\n\n                        \n                        next_state_key = self.q_learning_list[i].update_state(\n                            state_key=state_key,\n                            action_key=action_key\n                        )\n                        next_next_action_list = self.q_learning_list[i].extract_possible_actions(next_state_key)\n                        if len(next_next_action_list):\n                            next_action_key = self.q_learning_list[i].predict_next_action(\n                                next_state_key,\n                                next_next_action_list\n                            )\n                            next_max_q = self.q_learning_list[i].extract_q_df(next_state_key, next_action_key)\n\n                            \n                            self.q_learning_list[i].update_q(\n                                state_key=state_key,\n                                action_key=action_key,\n                                reward_value=reward_value,\n                                next_max_q=next_max_q\n                            )\n\n                        \n                        state_key = next_state_key\n\n                    \n                    self.t += 1\n                    self.q_learning_list[i].t = self.t\n                    if False not in end_flag_list:\n                        break", "docstring": "Multi-Agent Learning.\n\nOverride.\n\nArgs:\ninitial_state_key:  Initial state.\nlimit:              Limit of the number of learning.\ngame_n:             The number of games.", "source": "juraj-google-style"}
{"code": "def color_scale_HSV(c: Color, scoef: float, vcoef: float) -> None:\n    \n    color_p = ffi.new(\"TCOD_color_t*\")\n    color_p.r, color_p.g, color_p.b = c.r, c.g, c.b\n    lib.TCOD_color_scale_HSV(color_p, scoef, vcoef)\n    c[:] = color_p.r, color_p.g, color_p.b", "docstring": "Scale a color's saturation and value.\n\nDoes not return a new Color.  ``c`` is modified inplace.\n\nArgs:\nc (Union[Color, List[int]]): A Color instance, or an [r, g, b] list.\nscoef (float): Saturation multiplier, from 0 to 1.\nUse 1 to keep current saturation.\nvcoef (float): Value multiplier, from 0 to 1.\nUse 1 to keep current value.", "source": "juraj-google-style"}
{"code": "def download_to_tempfile(url, file_name=None, extension=None):\n    \n\n    if not file_name:\n        file_name = generate_timestamped_string(\"wtf_temp_file\")\n\n    if extension:\n        file_path = temp_path(file_name + extension)\n    else:\n        ext = \"\"\n        try:\n            ext = re.search(u\"\\\\.\\\\w+$\", file_name).group(0)\n        except:\n            pass\n        file_path = temp_path(file_name + ext)\n\n    webFile = urllib.urlopen(url)\n    localFile = open(file_path, 'w')\n    localFile.write(webFile.read())\n    webFile.close()\n    localFile.close()\n\n    return file_path", "docstring": "Downloads a URL contents to a tempfile.  This is useful for testing downloads.\nIt will download the contents of a URL to a tempfile, which you then can\nopen and use to validate the downloaded contents.\n\nArgs:\nurl (str) : URL of the contents to download.\n\nKwargs:\nfile_name (str): Name of file.\nextension (str): Extension to use.\n\nReturn:\nstr - Returns path to the temp file.", "source": "juraj-google-style"}
{"code": "def _hdu_on_disk(self, hdulist_index):\n    if (self._tempfile is None):\n        self._tempfile = tempfile.NamedTemporaryFile(mode='r+b', suffix='.fits')\n        self.hdulist[hdulist_index].writeto(self._tempfile.name)\n    return self._tempfile.name", "docstring": "IRAF routines such as daophot need input on disk.\n\nReturns:\nfilename: str\nThe name of the file containing the FITS data.", "source": "codesearchnet"}
{"code": "def _CTCLossGrad(op, grad_loss, _):\n    return _CTCLossGradImpl(op, grad_loss, _)", "docstring": "The derivative provided by CTC Loss.\n\nArgs:\nop: the CTCLoss op.\ngrad_loss: The backprop for cost.\n\nReturns:\nThe CTC Loss gradient.", "source": "github-repos"}
{"code": "def detect_framebuffer(self, glo=None) -> 'Framebuffer':\n        \n\n        res = Framebuffer.__new__(Framebuffer)\n        res.mglo, res._size, res._samples, res._glo = self.mglo.detect_framebuffer(glo)\n        res._color_attachments = None\n        res._depth_attachment = None\n        res.ctx = self\n        res.extra = None\n        return res", "docstring": "Detect framebuffer.\n\nArgs:\nglo (int): Framebuffer object.\n\nReturns:\n:py:class:`Framebuffer` object", "source": "juraj-google-style"}
{"code": "def area_of_a_triangle_in_cartesian_space( a, b, c ):\n    \n    return 0.5 * np.linalg.norm( np.cross( b-a, c-a ) )", "docstring": "Returns the area of a triangle defined by three points in Cartesian space.\n\nArgs:\na (np.array): Cartesian coordinates of point A.\nb (np.array): Cartesian coordinates of point B.\nc (np.array): Cartesian coordinates of point C.\n\nReturns:\n(float): the area of the triangle.", "source": "juraj-google-style"}
{"code": "def sleep(sleep_microseconds):\n\n    def _apply_fn(dataset):\n        return _SleepDataset(dataset, sleep_microseconds)\n    return _apply_fn", "docstring": "Sleeps for `sleep_microseconds` before producing each input element.\n\nArgs:\nsleep_microseconds: The number of microseconds to sleep before producing an\ninput element.\n\nReturns:\nA `Dataset` transformation function, which can be passed to\n`tf.data.Dataset.apply`.", "source": "github-repos"}
{"code": "def make_ar_transition_matrix(coefficients):\n    top_row = tf.expand_dims(coefficients, (- 2))\n    coef_shape = dist_util.prefer_static_shape(coefficients)\n    (batch_shape, order) = (coef_shape[:(- 1)], coef_shape[(- 1)])\n    remaining_rows = tf.concat([tf.eye((order - 1), dtype=coefficients.dtype, batch_shape=batch_shape), tf.zeros(tf.concat([batch_shape, ((order - 1), 1)], axis=0), dtype=coefficients.dtype)], axis=(- 1))\n    ar_matrix = tf.concat([top_row, remaining_rows], axis=(- 2))\n    return ar_matrix", "docstring": "Build transition matrix for an autoregressive StateSpaceModel.\n\nWhen applied to a vector of previous values, this matrix computes\nthe expected new value (summing the previous states according to the\nautoregressive coefficients) in the top dimension of the state space,\nand moves all previous values down by one dimension, 'forgetting' the\nfinal (least recent) value. That is, it looks like this:\n\n```\nar_matrix = [ coefs[0], coefs[1], ..., coefs[order]\n1.,       0 ,       ..., 0.\n0.,       1.,       ..., 0.\n...\n0.,       0.,  ..., 1.,  0.            ]\n```\n\nArgs:\ncoefficients: float `Tensor` of shape `concat([batch_shape, [order]])`.\n\nReturns:\nar_matrix: float `Tensor` with shape `concat([batch_shape,\n[order, order]])`.", "source": "codesearchnet"}
{"code": "def remove(self, email):\n        \n        if email in self._collaborators:\n            if self._collaborators[email] == ShareRequestValue.Add:\n                del self._collaborators[email]\n            else:\n                self._collaborators[email] = ShareRequestValue.Remove\n        self._dirty = True", "docstring": "Remove a Collaborator.\n\nArgs:\nstr : Collaborator email address.", "source": "juraj-google-style"}
{"code": "def _transform_col(self, x, i):\n        \n\n        labels = self.label_encoder._transform_col(x, i)\n        label_max = self.label_encoder.label_maxes[i]\n\n        \n        index = np.array(range(len(labels)))\n        i = index[labels > 0]\n        j = labels[labels > 0] - 1  \n\n        if len(i) > 0:\n            return sparse.coo_matrix((np.ones_like(i), (i, j)),\n                                     shape=(x.shape[0], label_max))\n        else:\n            \n            return None", "docstring": "Encode one categorical column into sparse matrix with one-hot-encoding.\n\nArgs:\nx (pandas.Series): a categorical column to encode\ni (int): column index\n\nReturns:\nX (scipy.sparse.coo_matrix): sparse matrix encoding a categorical\nvariable into dummy variables", "source": "juraj-google-style"}
{"code": "def get_etexts(feature_name, value):\n    matching_etexts = MetadataExtractor.get(feature_name).get_etexts(value)\n    return frozenset(matching_etexts)", "docstring": "Looks up all the texts that have meta-data matching some criterion.\n\nArguments:\nfeature_name (str): The meta-data on which to select the texts.\nvalue (str): The value of the meta-data on which to filter the texts.\n\nReturns:\nfrozenset: The set of all the Project Gutenberg text identifiers that\nmatch the provided query.\n\nRaises:\nUnsupportedFeature: If there is no MetadataExtractor registered that\ncan extract meta-data for the given feature name.", "source": "codesearchnet"}
{"code": "def put(self, name, base):\n        \n        async def fini():\n            if self.base_by_name.get(name) is base:\n                self.base_by_name.pop(name, None)\n\n        \n        base.onfini(fini)\n        self.base_by_name[name] = base", "docstring": "Add a Base (or sub-class) to the BaseRef by name.\n\nArgs:\nname (str): The name/iden of the Base\nbase (Base): The Base instance\n\nReturns:\n(None)", "source": "juraj-google-style"}
{"code": "def __init__(self, atomic_fn: atomic_function.AtomicFunction, shared_func_graph=True):\n    self._arg_keywords = None\n    self._num_positional_args = None\n    self._func_graph = atomic_fn.graph\n    self._captured_inputs = self._func_graph.external_captures + self._func_graph.deferred_external_captures\n    self._function_type = atomic_fn.function_type\n    self._output_shapes = tuple((output.shape for output in self._func_graph.outputs))\n    self._attrs = attributes_lib.parse_func_attrs(atomic_fn.attributes or {})\n    if shared_func_graph:\n        self._garbage_collector = None\n    else:\n        self._garbage_collector = ConcreteFunctionGarbageCollector(atomic_fn.graph)\n    self._delayed_rewrite_functions = _DelayedRewriteGradientFunctions(atomic_fn, self._garbage_collector)\n    self._first_order_tape_functions = {}\n    self._higher_order_tape_functions = {}\n    self._inference_function = self._delayed_rewrite_functions.forward()", "docstring": "Initialize a `ConcreteFunction`.\n\nArgs:\natomic_fn: Inference atomic function to form basis of forward pass.\nshared_func_graph: If False, the ConcreteFunction takes ownership of\n`func_graph` and will break reference cycles when it is deleted. This\nmakes the FuncGraph inoperable.\n\nRaises:\nValueError: If number of input_placeholders is not equal to the number\nof function inputs.", "source": "github-repos"}
{"code": "def trainable_variables(self):\n    return tuple(self._flatten(predicate=_is_trainable_variable, expand_composites=True))", "docstring": "Sequence of trainable variables owned by this module and its submodules.\n\nNote: this method uses reflection to find variables on the current instance\nand submodules. For performance reasons you may wish to cache the result\nof calling this method if you don't expect the return value to change.\n\nReturns:\nA sequence of variables for the current module (sorted by attribute\nname) followed by variables from all submodules recursively (breadth\nfirst).", "source": "github-repos"}
{"code": "def get_config(self):\n    return {}", "docstring": "Returns the configuration of the initializer as a JSON-serializable dict.\n\nReturns:\nA JSON-serializable Python dict.", "source": "github-repos"}
{"code": "def __init__(self):\n        \n        super(JLinkBreakpointInfo, self).__init__()\n        self.SizeOfStruct = ctypes.sizeof(self)", "docstring": "Initializes the ``JLinkBreakpointInfo`` instance.\n\nSets the size of the structure.\n\nArgs:\nself (JLinkBreakpointInfo): the ``JLinkBreakpointInfo`` instnace\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def cardinal(self, to):\n    return sum((1 for _ in filter((lambda d: ((not d.external) and (d.target in to))), self.dependencies)))", "docstring": "Return the number of dependencies of this module to the given node.\n\nArgs:\nto (Package/Module): the target node.\n\nReturns:\nint: number of dependencies.", "source": "codesearchnet"}
{"code": "def check(self, digest):\n        \n        path = self.get_file_path(digest)\n        if self._calc_digest(path) != digest:\n            self.logger.warning(\"found corrupted file: '{0}'\".format(path))\n            return False\n        return True", "docstring": "Check the integrity of the file with the given digest\n\nArgs:\ndigest -- digest of the file to check\nReturns:\nTrue if the file is not corrupted", "source": "juraj-google-style"}
{"code": "def label_sequential_regions(inlist):\n    \n    import more_itertools as mit\n\n    df = pd.DataFrame(inlist).set_index(0)\n\n    labeled = {}\n    for label in df[1].unique():\n        iterable = df[df[1] == label].index.tolist()\n        labeled.update({'{}{}'.format(label, i + 1): items for i, items in\n                        enumerate([list(group) for group in mit.consecutive_groups(iterable)])})\n\n    return labeled", "docstring": "Input a list of labeled tuples and return a dictionary of sequentially labeled regions.\n\nArgs:\ninlist (list): A list of tuples with the first number representing the index and the second the index label.\n\nReturns:\ndict: Dictionary of labeled regions.\n\nExamples:\n\n>>> label_sequential_regions([(1, 'O'), (2, 'O'), (3, 'O'), (4, 'M'), (5, 'M'), (6, 'I'), (7, 'M'), (8, 'O'), (9, 'O')])\n{'O1': [1, 2, 3], 'M1': [4, 5], 'I1': [6], 'M2': [7], 'O2': [8, 9]}", "source": "juraj-google-style"}
{"code": "def autodiff_ast(func, wrt, motion, mode, preserve_result, check_dims, verbose):\n  \n  node = annotate.resolve_calls(func)\n  node = desugar.explicit_loop_indexes(node)\n  fence.validate(node, inspect.getsource(func))\n  node = anf_.anf(node)\n  if verbose >= 2:\n    print('ANF')\n    print(quoting.to_source(node))\n  if mode == 'reverse':\n    node, required, stack = reverse_ad.reverse_ad(node.body[0], wrt,\n                                                  preserve_result, check_dims)\n    if verbose >= 2:\n      print('RAW')\n      print(quoting.to_source(node))\n    if motion == 'split':\n      node = reverse_ad.split(node, stack)\n    else:\n      node = reverse_ad.joint(node)\n    if verbose >= 2:\n      print('MOTION')\n      print(quoting.to_source(node))\n  elif mode == 'forward':\n    node, required = forward_ad.forward_ad(node.body[0], wrt, preserve_result,\n                                           check_dims)\n  return node, required", "docstring": "Perform AD on a single function and return the AST.\n\nArgs:\nSee `grad`.\n\nReturns:\nnode: The AST of a module containing the adjoint and primal function\ndefinitions.\nrequired: A list of non-built in functions that this function called, and\nof which the primals and adjoints need to be made available in order\nfor the returned function to run.", "source": "juraj-google-style"}
{"code": "def _initialize(self, args, kwds, add_initializers_to=None):\n    created_variables = []\n\n    def variable_capturing_scope(next_creator, **kwds):\n        \n        enable_variable_lifting = kwds.get('experimental_enable_variable_lifting')\n        if enable_variable_lifting is None:\n            enable_variable_lifting = True\n        if not enable_variable_lifting:\n            return next_creator(**kwds)\n        v = UnliftedInitializerVariable(add_initializers_to=add_initializers_to, **kwds)\n        created_variables.append(weakref.ref(v))\n        return v\n    self._created_variables = created_variables\n    self._variable_creation_config = self._generate_scoped_tracing_options(variable_capturing_scope, tracing_compilation.ScopeType.VARIABLE_CREATION)\n    self._concrete_variable_creation_fn = tracing_compilation.trace_function(args, kwds, self._variable_creation_config)\n\n    def invalid_creator_scope(*unused_args, **unused_kwds):\n        \n        raise ValueError('tf.function only supports singleton tf.Variables created on the first call. Make sure the tf.Variable is only created once or created outside tf.function. See https:\n    self._no_variable_creation_config = self._generate_scoped_tracing_options(invalid_creator_scope, tracing_compilation.ScopeType.NO_VARIABLE_CREATION)", "docstring": "Initializes, on the first call.\n\nCreates two `Function`s, one that will allow creation of variables\nand one that won't.\n\nAdditionally runs a trace for the `Function` that allows creation\nof variables.\n\nArgs:\nargs: Arguments to the underlying python callable.\nkwds: Keyword arguments to the python callable.\nadd_initializers_to: Where to collect variable initializers, if not None.", "source": "github-repos"}
{"code": "def generate(self, batch_size, length, samples=1, fix_static=False, fix_dynamic=False):\n    (static_sample, _) = self.sample_static_prior(samples, batch_size, fix_static)\n    (dynamic_sample, _) = self.sample_dynamic_prior(samples, batch_size, length, fix_dynamic)\n    likelihood = self.decoder((dynamic_sample, static_sample))\n    return likelihood", "docstring": "Generate new sequences.\n\nArgs:\nbatch_size: Number of sequences to generate.\nlength: Number of timesteps to generate for each sequence.\nsamples: Number of samples to draw from the latent distributions.\nfix_static: Boolean for whether or not to share the same random\nsample of the static latent variable `f` from its prior across\nall examples.\nfix_dynamic: Boolean for whether or not to share the same random\nsample of the dynamic latent variable `z_{1:T}` from its prior\nacross all examples.\n\nReturns:\nA batched Independent distribution wrapping a set of Normal\ndistributions over the pixels of the generated sequences, where\nthe Independent distribution has event shape [height, width,\nchannels], batch shape [samples, batch_size, timesteps], and\nsample shape [sample_shape, samples, batch_size, timesteps,\nheight, width, channels].", "source": "codesearchnet"}
{"code": "def atol_for_validation(self) -> float:\n    return 1e-05", "docstring": "What absolute tolerance value to use during model conversion validation.\n\nReturns:\nFloat absolute tolerance value.", "source": "github-repos"}
{"code": "def parse_statement(self):\n    self._skip_whitespace_and_comments()\n    if (self._current_token.kind == tokenize.ENDMARKER):\n        return None\n    stmt_loc = self._current_location(ignore_char_num=True)\n    binding_key_or_keyword = self._parse_selector()\n    statement = None\n    if (self._current_token.value != '='):\n        if (binding_key_or_keyword == 'import'):\n            module = self._parse_selector(scoped=False)\n            statement = ImportStatement(module, stmt_loc)\n        elif (binding_key_or_keyword == 'include'):\n            str_loc = self._current_location()\n            (success, filename) = self._maybe_parse_basic_type()\n            if ((not success) or (not isinstance(filename, str))):\n                self._raise_syntax_error('Expected file path as string.', str_loc)\n            statement = IncludeStatement(filename, stmt_loc)\n        else:\n            self._raise_syntax_error(\"Expected '='.\")\n    else:\n        self._advance_one_token()\n        value = self.parse_value()\n        (scope, selector, arg_name) = parse_binding_key(binding_key_or_keyword)\n        statement = BindingStatement(scope, selector, arg_name, value, stmt_loc)\n    assert statement, 'Internal parsing error.'\n    if ((self._current_token.kind != tokenize.NEWLINE) and (self._current_token.kind != tokenize.ENDMARKER)):\n        self._raise_syntax_error('Expected newline.')\n    elif (self._current_token.kind == tokenize.NEWLINE):\n        self._advance_one_token()\n    return statement", "docstring": "Parse a single statement.\n\nReturns:\nEither a `BindingStatement`, `ImportStatement`, `IncludeStatement`, or\n`None` if no more statements can be parsed (EOF reached).", "source": "codesearchnet"}
{"code": "def predict(self, x, *args, **kwargs):\n    if (len(args) > 0):\n        if ((type(args[0]) == nx.Graph) or (type(args[0]) == nx.DiGraph)):\n            return self.orient_graph(x, *args, **kwargs)\n        else:\n            return self.predict_proba(x, *args, **kwargs)\n    elif (type(x) == DataFrame):\n        return self.predict_dataset(x, *args, **kwargs)\n    elif (type(x) == Series):\n        return self.predict_proba(x.iloc[0], x.iloc[1], *args, **kwargs)", "docstring": "Generic predict method, chooses which subfunction to use for a more\nsuited.\n\nDepending on the type of `x` and of `*args`, this function process to execute\ndifferent functions in the priority order:\n\n1. If ``args[0]`` is a ``networkx.(Di)Graph``, then ``self.orient_graph`` is executed.\n2. If ``args[0]`` exists, then ``self.predict_proba`` is executed.\n3. If ``x`` is a ``pandas.DataFrame``, then ``self.predict_dataset`` is executed.\n4. If ``x`` is a ``pandas.Series``, then ``self.predict_proba`` is executed.\n\nArgs:\nx (numpy.array or pandas.DataFrame or pandas.Series): First variable or dataset.\nargs (numpy.array or networkx.Graph): graph or second variable.\n\nReturns:\npandas.Dataframe or networkx.Digraph: predictions output", "source": "codesearchnet"}
{"code": "def compute_trigonometric_terms(self, thetas, phis):\n    if (len(thetas) != len(phis)):\n        raise ValueError('List of polar and azimuthal angles have to be equal!')\n    self._pow_sin_t.clear()\n    self._pow_cos_t.clear()\n    self._sin_n_p.clear()\n    self._cos_n_p.clear()\n    self._pow_sin_t[1] = [sin(float(t)) for t in thetas]\n    self._pow_cos_t[1] = [cos(float(t)) for t in thetas]\n    self._sin_n_p[1] = [sin(float(p)) for p in phis]\n    self._cos_n_p[1] = [cos(float(p)) for p in phis]\n    for i in range(2, (self._max_trig_order + 1)):\n        self._pow_sin_t[i] = [(e[0] * e[1]) for e in zip(self._pow_sin_t[(i - 1)], self._pow_sin_t[1])]\n        self._pow_cos_t[i] = [(e[0] * e[1]) for e in zip(self._pow_cos_t[(i - 1)], self._pow_cos_t[1])]\n        self._sin_n_p[i] = [sin((float(i) * float(p))) for p in phis]\n        self._cos_n_p[i] = [cos((float(i) * float(p))) for p in phis]", "docstring": "Computes trigonometric terms that are required to\ncalculate bond orientational order parameters using\ninternal variables.\n\nArgs:\nthetas ([float]): polar angles of all neighbors in radians.\nphis ([float]): azimuth angles of all neighbors in radians.\nThe list of\nazimuth angles of all neighbors in radians.  The list of\nazimuth angles is expected to have the same size as the\nlist of polar angles; otherwise, a ValueError is raised.\nAlso, the two lists of angles have to be coherent in\norder. That is, it is expected that the order in the list\nof azimuth angles corresponds to a distinct sequence of\nneighbors. And, this sequence has to equal the sequence\nof neighbors in the list of polar angles.", "source": "codesearchnet"}
{"code": "def main(args):\n    \n    args = parse_args(args)\n    setup_logging(args.loglevel)\n    _logger.info(\"Starting GramVaani importer...\")\n    _logger.info(\"Starting loading GramVaani csv...\")\n    csv = GramVaaniCSV(args.csv_filename)\n    _logger.info(\"Starting downloading GramVaani mp3's...\")\n    downloader = GramVaaniDownloader(csv, args.target_dir)\n    mp3_directory = downloader.download()\n    _logger.info(\"Starting converting GramVaani mp3's to wav's...\")\n    converter = GramVaaniConverter(args.target_dir, mp3_directory)\n    wav_directory = converter.convert()\n    datasets = GramVaaniDataSets(args.target_dir, wav_directory, csv)\n    datasets.create()\n    datasets.save()\n    _logger.info(\"Finished GramVaani importer...\")", "docstring": "Main entry point allowing external calls\nArgs:\nargs ([str]): command line parameter list", "source": "juraj-google-style"}
{"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    return metrics_utils.update_confusion_matrix_variables({self._confusion_matrix_cond: self.accumulator}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, sample_weight=sample_weight)", "docstring": "Accumulates the metric statistics.\n\nArgs:\ny_true: The ground truth values.\ny_pred: The predicted values.\nsample_weight: Optional weighting of each example. Defaults to `1`.\nCan be a tensor whose rank is either 0, or the same rank as\n`y_true`, and must be broadcastable to `y_true`.", "source": "github-repos"}
{"code": "async def end_takeout(self, success):\n    try:\n        async with _TakeoutClient(True, self, None) as takeout:\n            takeout.success = success\n    except ValueError:\n        return False\n    return True", "docstring": "Finishes a takeout, with specified result sent back to Telegram.\n\nReturns:\n``True`` if the operation was successful, ``False`` otherwise.", "source": "codesearchnet"}
{"code": "def overload(fn):\n    if (not isfunction(fn)):\n        raise TypeError('paco: fn must be a callable object')\n    spec = getargspec(fn)\n    args = spec.args\n    if ((not spec.varargs) and ((len(args) < 2) or (args[1] != 'iterable'))):\n        raise ValueError('paco: invalid function signature or arity')\n\n    @functools.wraps(fn)\n    def decorator(*args, **kw):\n        if (len(args) < 2):\n            return PipeOverloader(fn, args, kw)\n        return fn(*args, **kw)\n    return decorator", "docstring": "Overload a given callable object to be used with ``|`` operator\noverloading.\n\nThis is especially used for composing a pipeline of\ntransformation over a single data set.\n\nArguments:\nfn (function): target function to decorate.\n\nRaises:\nTypeError: if function or coroutine function is not provided.\n\nReturns:\nfunction: decorated function", "source": "codesearchnet"}
{"code": "def url(self, value):\n        \n        if value == self._defaults['url'] and 'url' in self._values:\n            del self._values['url']\n        else:\n            self._values['url'] = value", "docstring": "The url property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def nic_s(msg):\n    \n    tc = typecode(msg)\n\n    if tc != 31:\n        raise RuntimeError(\"%s: Not a status operation message, expecting TC = 31\" % msg)\n\n    msgbin = common.hex2bin(msg)\n    nic_s = int(msgbin[75])\n\n    return nic_s", "docstring": "Obtain NIC supplement bit, TC=31 message\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string\n\nReturns:\nint: NICs number (0 or 1)", "source": "juraj-google-style"}
{"code": "def _full_axis_reduce_along_select_indices(self, func, axis, index):\n    old_index = (self.index if axis else self.columns)\n    numeric_indices = [i for (i, name) in enumerate(old_index) if (name in index)]\n    result = self.data.apply_func_to_select_indices_along_full_axis(axis, func, numeric_indices)\n    return result", "docstring": "Reduce Manger along select indices using function that needs full axis.\n\nArgs:\nfunc: Callable that reduces the dimension of the object and requires full\nknowledge of the entire axis.\naxis: 0 for columns and 1 for rows. Defaults to 0.\nindex: Index of the resulting QueryCompiler.\n\nReturns:\nA new QueryCompiler object with index or BaseFrameManager object.", "source": "codesearchnet"}
{"code": "def _CheckIsDevice(self, file_entry):\n    if (definitions.FILE_ENTRY_TYPE_DEVICE not in self._file_entry_types):\n        return False\n    return file_entry.IsDevice()", "docstring": "Checks the is_device find specification.\n\nArgs:\nfile_entry (FileEntry): file entry.\n\nReturns:\nbool: True if the file entry matches the find specification, False if not.", "source": "codesearchnet"}
{"code": "def GetTemplateArgs(clean_lines, linenum):\n    func_line = linenum\n    while (func_line > 0):\n        line = clean_lines.elided[func_line]\n        if Match('^\\\\s*$', line):\n            return set()\n        if (line.find('(') >= 0):\n            break\n        func_line -= 1\n    if (func_line == 0):\n        return set()\n    argument_list = ''\n    match = Match('^(\\\\s*template\\\\s*)<', clean_lines.elided[func_line])\n    if match:\n        start_col = len(match.group(1))\n        (_, end_line, end_col) = CloseExpression(clean_lines, func_line, start_col)\n        if ((end_col > (- 1)) and (end_line == func_line)):\n            start_col += 1\n            argument_list = clean_lines.elided[func_line][start_col:end_col]\n    elif (func_line > 1):\n        match = Match('^(.*)>\\\\s*$', clean_lines.elided[(func_line - 1)])\n        if match:\n            end_col = len(match.group(1))\n            (_, start_line, start_col) = ReverseCloseExpression(clean_lines, (func_line - 1), end_col)\n            if (start_col > (- 1)):\n                start_col += 1\n                while (start_line < (func_line - 1)):\n                    argument_list += clean_lines.elided[start_line][start_col:]\n                    start_col = 0\n                    start_line += 1\n                argument_list += clean_lines.elided[(func_line - 1)][start_col:end_col]\n    if (not argument_list):\n        return set()\n    typenames = set()\n    while True:\n        match = Match('^[,\\\\s]*(?:typename|class)(?:\\\\.\\\\.\\\\.)?\\\\s+(\\\\w+)(.*)$', argument_list)\n        if (not match):\n            break\n        typenames.add(match.group(1))\n        argument_list = match.group(2)\n    return typenames", "docstring": "Find list of template arguments associated with this function declaration.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: Line number containing the start of the function declaration,\nusually one line after the end of the template-argument-list.\nReturns:\nSet of type names, or empty set if this does not appear to have\nany template parameters.", "source": "codesearchnet"}
{"code": "def observe(self, terminal, reward, index=0):\n        \n        fetches = self.episode_output\n        feed_dict = self.get_feed_dict(terminal=terminal, reward=reward, index=index)\n\n        episode = self.monitored_session.run(fetches=fetches, feed_dict=feed_dict)\n\n        return episode", "docstring": "Adds an observation (reward and is-terminal) to the model without updating its trainable variables.\n\nArgs:\nterminal (List[bool]): List of is-terminal signals.\nreward (List[float]): List of reward signals.\nindex: (int) parallel episode you want to observe\n\nReturns:\nThe value of the model-internal episode counter.", "source": "juraj-google-style"}
{"code": "def is_alias_command(subcommands, args):\n    if (not args):\n        return False\n    for subcommand in subcommands:\n        if (args[:2] == ['alias', subcommand]):\n            return True\n    return False", "docstring": "Check if the user is invoking one of the comments in 'subcommands' in the  from az alias .\n\nArgs:\nsubcommands: The list of subcommands to check through.\nargs: The CLI arguments to process.\n\nReturns:\nTrue if the user is invoking 'az alias {command}'.", "source": "codesearchnet"}
{"code": "def percent_of(percent, whole):\n    \n    percent = float(percent)\n    whole = float(whole)\n    return (percent * whole) / 100", "docstring": "Calculates the value of a percent of a number\nie: 5% of 20 is what --> 1\n\nArgs:\npercent (float): The percent of a number\nwhole (float): The whole of the number\n\nReturns:\nfloat: The value of a percent\n\nExample:\n>>> percent_of(25, 100)\n25.0\n>>> percent_of(5, 20)\n1.0", "source": "juraj-google-style"}
{"code": "def _debug(message, color=None, attrs=None):\n\t\t\n\t\tif attrs is None:\n\t\t\tattrs = []\n\t\tif color is not None:\n\t\t\tprint colored(message, color, attrs=attrs)\n\t\telse:\n\t\t\tif len(attrs) > 0:\n\t\t\t\tprint colored(message, \"white\", attrs=attrs)\n\t\t\telse:\n\t\t\t\tprint message", "docstring": "Print a message if the class attribute 'verbose' is enabled\n\nArgs:\nmessage (str): Message to print", "source": "juraj-google-style"}
{"code": "def remove_profile(self, profile=None):\n    with self.db:\n        return self.db.remove((self.query.profile == profile))", "docstring": "Remove profile from credentials file.\n\nArgs:\nprofile (str): Credentials profile to remove.\n\nReturns:\nlist: List of affected document IDs.", "source": "codesearchnet"}
{"code": "def get_block_size(self, token, resolution=None):\n    cdims = self.get_metadata(token)['dataset']['cube_dimension']\n    if (resolution is None):\n        resolution = min(cdims.keys())\n    return cdims[str(resolution)]", "docstring": "Gets the block-size for a given token at a given resolution.\n\nArguments:\ntoken (str): The token to inspect\nresolution (int : None): The resolution at which to inspect data.\nIf none is specified, uses the minimum available.\n\nReturns:\nint[3]: The xyz blocksize.", "source": "codesearchnet"}
{"code": "def _state_to_task(cls, tstate, shard_state, eta=None, countdown=None):\n    base_path = tstate.base_path\n    task_name = MapperWorkerCallbackHandler.get_task_name(tstate.shard_id, tstate.slice_id, tstate.retries)\n    headers = util._get_task_headers(tstate.mapreduce_spec.mapreduce_id)\n    headers[util._MR_SHARD_ID_TASK_HEADER] = tstate.shard_id\n    worker_task = model.HugeTask(url=((base_path + '/worker_callback/') + tstate.shard_id), params=tstate.to_dict(), name=task_name, eta=eta, countdown=countdown, parent=shard_state, headers=headers)\n    return worker_task", "docstring": "Generate task for slice according to current states.\n\nArgs:\ntstate: An instance of TransientShardState.\nshard_state: An instance of ShardState.\neta: Absolute time when the MR should execute. May not be specified\nif 'countdown' is also supplied. This may be timezone-aware or\ntimezone-naive.\ncountdown: Time in seconds into the future that this MR should execute.\nDefaults to zero.\n\nReturns:\nA model.HugeTask instance for the slice specified by current states.", "source": "codesearchnet"}
{"code": "def send_update(url_id, dataset):\n    \n    data = _convert_to_seeder_format(dataset)\n\n    if not data:\n        return\n\n    try:\n        _send_request(url_id, json=data, req_type=requests.patch)\n    except Exception as e:\n        sys.stderr.write(\"Seeder PATCH error: \")  \n        sys.stderr.write(str(e.message))\n        return None", "docstring": "Send request to Seeder's API with data changed by user.\n\nArgs:\nurl_id (str): ID used as identification in Seeder.\ndataset (dict): WA-KAT dataset sent from frontend.", "source": "juraj-google-style"}
{"code": "def _get_what_to_read_next(fp, previously_read_position, chunk_size):\n    seek_position = max((previously_read_position - chunk_size), 0)\n    read_size = chunk_size\n    while (seek_position > 0):\n        fp.seek(seek_position)\n        if _is_partially_read_new_line(fp.read(1)):\n            seek_position -= 1\n            read_size += 1\n        else:\n            break\n    read_size = min((previously_read_position - seek_position), read_size)\n    return (seek_position, read_size)", "docstring": "Return information on which file pointer position to read from and how many bytes.\n\nArgs:\nfp\npast_read_positon (int): The file pointer position that has been read previously\nchunk_size(int): ideal io chunk_size\n\nReturns:\n(int, int): The next seek position, how many bytes to read next", "source": "codesearchnet"}
{"code": "def parse_multiple_json(json_file, offset=None):\n    json_info_list = []\n    if (not os.path.exists(json_file)):\n        return json_info_list\n    try:\n        with open(json_file, 'r') as f:\n            if offset:\n                f.seek(offset)\n            for line in f:\n                if (line[(- 1)] != '\\n'):\n                    break\n                json_info = json.loads(line)\n                json_info_list.append(json_info)\n                offset += len(line)\n    except BaseException as e:\n        logging.error(e.message)\n    return (json_info_list, offset)", "docstring": "Parse multiple json records from the given file.\n\nSeek to the offset as the start point before parsing\nif offset set. return empty list if the json file does\nnot exists or exception occurs.\n\nArgs:\njson_file (str): File path to be parsed.\noffset (int): Initial seek position of the file.\n\nReturns:\nA dict of json info.\nNew offset after parsing.", "source": "codesearchnet"}
{"code": "def encode(g, top=None, cls=PENMANCodec, **kwargs):\n    \n    codec = cls(**kwargs)\n    return codec.encode(g, top=top)", "docstring": "Serialize the graph *g* from *top* to PENMAN notation.\n\nArgs:\ng: the Graph object\ntop: the node identifier for the top of the serialized graph; if\nunset, the original top of *g* is used\ncls: serialization codec class\nkwargs: keyword arguments passed to the constructor of *cls*\nReturns:\nthe PENMAN-serialized string of the Graph *g*\nExample:\n\n>>> encode(Graph([('h', 'instance', 'hi')]))\n(h / hi)", "source": "juraj-google-style"}
{"code": "def __init__(self, concentration, validate_args=False, allow_nan_stats=True, name='Dirichlet'):\n    parameters = dict(locals())\n    with ops.name_scope(name, values=[concentration]) as name:\n        self._concentration = self._maybe_assert_valid_concentration(ops.convert_to_tensor(concentration, name='concentration'), validate_args)\n        self._total_concentration = math_ops.reduce_sum(self._concentration, -1)\n    super(Dirichlet, self).__init__(dtype=self._concentration.dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, reparameterization_type=distribution.FULLY_REPARAMETERIZED, parameters=parameters, graph_parents=[self._concentration, self._total_concentration], name=name)", "docstring": "Initialize a batch of Dirichlet distributions.\n\nArgs:\nconcentration: Positive floating-point `Tensor` indicating mean number\nof class occurrences; aka \"alpha\". Implies `self.dtype`, and\n`self.batch_shape`, `self.event_shape`, i.e., if\n`concentration.shape = [N1, N2, ..., Nm, k]` then\n`batch_shape = [N1, N2, ..., Nm]` and\n`event_shape = [k]`.\nvalidate_args: Python `bool`, default `False`. When `True` distribution\nparameters are checked for validity despite possibly degrading runtime\nperformance. When `False` invalid inputs may silently render incorrect\noutputs.\nallow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n(e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\nresult is undefined. When `False`, an exception is raised if one or\nmore of the statistic's batch members are undefined.\nname: Python `str` name prefixed to Ops created by this class.", "source": "github-repos"}
{"code": "def furnish(app: web.Application):\n    \n    app_name = app['config']['name']\n    prefix = '/' + app_name.lstrip('/')\n    app.router.add_routes(routes)\n    cors_middleware.enable_cors(app)\n\n    \n    known_resources = set()\n    for route in list(app.router.routes()):\n        if route.resource in known_resources:\n            continue\n        known_resources.add(route.resource)\n        route.resource.add_prefix(prefix)\n\n    \n    \n    aiohttp_swagger.setup_swagger(app,\n                                  swagger_url=prefix + '/api/doc',\n                                  description='',\n                                  title=f'Brewblox Service \"{app_name}\"',\n                                  api_version='0.0',\n                                  contact='development@brewpi.com')\n\n    LOGGER.info('Service info: ' + getenv('SERVICE_INFO', 'UNKNOWN'))\n\n    for route in app.router.routes():\n        LOGGER.info(f'Endpoint [{route.method}] {route.resource}')\n\n    for name, impl in app.get(features.FEATURES_KEY, {}).items():\n        LOGGER.info(f'Feature [{name}] {impl}')", "docstring": "Configures Application routes, readying it for running.\n\nThis function modifies routes and resources that were added by calling code,\nand must be called immediately prior to `run(app)`.\n\nArgs:\napp (web.Application):\nThe Aiohttp Application as created by `create_app()`", "source": "juraj-google-style"}
{"code": "def get_ui(ui_type, on_ui_exit=None, available_ui_types=None, config=None):\n    if available_ui_types is None:\n        available_ui_types = copy.deepcopy(SUPPORTED_UI_TYPES)\n    if ui_type and ui_type not in available_ui_types:\n        raise ValueError(\"Invalid ui_type: '%s'\" % ui_type)\n    try:\n        if ui_type == 'readline':\n            from tensorflow.python.debug.cli import readline_ui\n            return readline_ui.ReadlineUI(on_ui_exit=on_ui_exit, config=config)\n    except ImportError:\n        available_ui_types.remove(ui_type)\n        if not available_ui_types:\n            raise ValueError('Exhausted all fallback ui_types.')\n        return get_ui(available_ui_types[0], available_ui_types=available_ui_types)", "docstring": "Create a `base_ui.BaseUI` subtype.\n\nThis factory method attempts to fallback to other available ui_types on\nImportError.\n\nArgs:\nui_type: (`str`) requested UI type. Currently supported:\n( readline)\non_ui_exit: (`Callable`) the callback to be called when the UI exits.\navailable_ui_types: (`None` or `list` of `str`) Manually-set available\nui_types.\nconfig: An instance of `cli_config.CLIConfig()` carrying user-facing\nconfigurations.\n\nReturns:\nA `base_ui.BaseUI` subtype object.\n\nRaises:\nValueError: on invalid ui_type or on exhausting or fallback ui_types.", "source": "github-repos"}
{"code": "def trace_export(name, step=None, profiler_outdir=None):\n    global _current_trace_context\n    if ops.inside_function():\n        logging.warn('Cannot export trace inside a tf.function.')\n        return\n    if not context.executing_eagerly():\n        logging.warn('Can only export trace while executing eagerly.')\n        return\n    with _current_trace_context_lock:\n        if _current_trace_context is None:\n            raise ValueError('Must enable trace before export through tf.summary.trace_on.')\n        graph, profiler = _current_trace_context\n    run_meta = context.context().export_run_metadata()\n    if graph and (not profiler):\n        run_metadata_graphs(name, run_meta, step)\n    else:\n        run_metadata(name, run_meta, step)\n    if profiler:\n        if profiler_outdir:\n            logging.warn('Ignoring `profiler_outdir` passed to trace_export(). Please pass it to trace_on() instead.')\n        _profiler.stop()\n    trace_off()", "docstring": "Stops and exports the active trace as a Summary and/or profile file.\n\nStops the trace and exports all metadata collected during the trace to the\ndefault SummaryWriter, if one has been set.\n\nArgs:\nname: A name for the summary to be written.\nstep: Explicit `int64`-castable monotonic step value for this summary. If\nomitted, this defaults to `tf.summary.experimental.get_step()`, which must\nnot be None.\nprofiler_outdir: This arg is a no-op. Please set this in trace_on().\n\nRaises:\nValueError: if a default writer exists, but no step was provided and\n`tf.summary.experimental.get_step()` is None.", "source": "github-repos"}
{"code": "def delete(self, branch, commit_message, **kwargs):\n    file_path = self.get_id().replace('/', '%2F')\n    self.manager.delete(file_path, branch, commit_message, **kwargs)", "docstring": "Delete the file from the server.\n\nArgs:\nbranch (str): Branch from which the file will be removed\ncommit_message (str): Commit message for the deletion\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabDeleteError: If the server cannot perform the request", "source": "codesearchnet"}
{"code": "def analyze_structures(self, structures, step_freq=10, most_frequent_polyhedra=15):\n    voro_dict = {}\n    step = 0\n    for structure in structures:\n        step += 1\n        if ((step % step_freq) != 0):\n            continue\n        v = []\n        for n in range(len(structure)):\n            v.append(str(self.analyze(structure, n=n).view()))\n        for voro in v:\n            if (voro in voro_dict):\n                voro_dict[voro] += 1\n            else:\n                voro_dict[voro] = 1\n    return sorted(voro_dict.items(), key=(lambda x: (x[1], x[0])), reverse=True)[:most_frequent_polyhedra]", "docstring": "Perform Voronoi analysis on a list of Structures.\nNote that this might take a significant amount of time depending on the\nsize and number of structures.\n\nArgs:\nstructures (list): list of Structures\ncutoff (float: cutoff distance around an atom to search for\nneighbors\nstep_freq (int): perform analysis every step_freq steps\nqhull_options (str): options to pass to qhull\nmost_frequent_polyhedra (int): this many unique polyhedra with\nhighest frequences is stored.\n\nReturns:\nA list of tuples in the form (voronoi_index,frequency)", "source": "codesearchnet"}
{"code": "def chop(array, epsilon=1e-10):\n    \n    ret = np.array(array)\n\n    if np.isrealobj(ret):\n        ret[abs(ret) < epsilon] = 0.0\n    else:\n        ret.real[abs(ret.real) < epsilon] = 0.0\n        ret.imag[abs(ret.imag) < epsilon] = 0.0\n    return ret", "docstring": "Truncate small values of a complex array.\n\nArgs:\narray (array_like): array to truncte small values.\nepsilon (float): threshold.\n\nReturns:\nnp.array: A new operator with small values set to zero.", "source": "juraj-google-style"}
{"code": "def _CreateLineStringForShape(self, parent, shape):\n    coordinate_list = [(longitude, latitude) for (latitude, longitude, distance) in shape.points]\n    return self._CreateLineString(parent, coordinate_list)", "docstring": "Create a KML LineString using coordinates from a shape.\n\nArgs:\nparent: The parent ElementTree.Element instance.\nshape: The transitfeed.Shape instance.\n\nReturns:\nThe LineString ElementTree.Element instance or None if coordinate_list is\nempty.", "source": "codesearchnet"}
{"code": "def _save_cached_when_graph_building(self, file_prefix, object_graph_tensor, options, update_ckpt_state=False):\n    named_saveable_objects, graph_proto, feed_additions, unused_registered_savers = self._gather_saveables(object_graph_tensor=object_graph_tensor)\n    if self._last_save_object_graph != graph_proto or context.executing_eagerly() or ops.inside_function():\n        saver = _DSaver(self._mesh, named_saveable_objects)\n        save_op = saver.save(file_prefix, options=options)\n        with ops.device('/cpu:0'):\n            with ops.control_dependencies([save_op]):\n                self._cached_save_operation = array_ops.identity(file_prefix)\n        self._last_save_object_graph = graph_proto\n    return (self._cached_save_operation, feed_additions)", "docstring": "Create or retrieve save ops, overrides parents's private method.\n\nArgs:\nfile_prefix: The prefix for saved checkpoint files.\nobject_graph_tensor: A `Tensor` to which the current object graph will be\nfed.\noptions: `CheckpointOptions` object.\nupdate_ckpt_state: Optional bool flag. Indiciate whether the internal\ncheckpoint state needs to be updated. This is used for async checkpoint,\nwhich DTrackableSaver currently does not support.\nTODO(chienchunh): Implement async checkpoint for DTrackableSaver.\n\nReturns:\nA two-element tuple with a filename tensor and a feed_dict of tensors to\nfeed when running it (if graph building). The feed dict contains the\ncurrent object graph and any Python state to be saved in the\ncheckpoint. When executing eagerly only the first argument is meaningful.", "source": "github-repos"}
{"code": "def _batch_examples(dataset, batch_size, max_length):\n    (buckets_min, buckets_max) = _create_min_max_boundaries(max_length)\n    bucket_batch_sizes = [(batch_size \n    bucket_batch_sizes = tf.constant(bucket_batch_sizes, dtype=tf.int64)\n\n    def example_to_bucket_id(example_input, example_target):\n        'Return int64 bucket id for this example, calculated based on length.'\n        seq_length = _get_example_length((example_input, example_target))\n        conditions_c = tf.logical_and(tf.less_equal(buckets_min, seq_length), tf.less(seq_length, buckets_max))\n        bucket_id = tf.reduce_min(tf.where(conditions_c))\n        return bucket_id\n\n    def window_size_fn(bucket_id):\n        'Return number of examples to be grouped when given a bucket id.'\n        return bucket_batch_sizes[bucket_id]\n\n    def batching_fn(bucket_id, grouped_dataset):\n        'Batch and add padding to a dataset of elements with similar lengths.'\n        bucket_batch_size = window_size_fn(bucket_id)\n        return grouped_dataset.padded_batch(bucket_batch_size, ([None], [None]))\n    return dataset.apply(tf.contrib.data.group_by_window(key_func=example_to_bucket_id, reduce_func=batching_fn, window_size=None, window_size_func=window_size_fn))", "docstring": "Group examples by similar lengths, and return batched dataset.\n\nEach batch of similar-length examples are padded to the same length, and may\nhave different number of elements in each batch, such that:\ngroup_batch_size * padded_length <= batch_size.\n\nThis decreases the number of padding tokens per batch, which improves the\ntraining speed.\n\nArgs:\ndataset: Dataset of unbatched examples.\nbatch_size: Max number of tokens per batch of examples.\nmax_length: Max number of tokens in an example input or target sequence.\n\nReturns:\nDataset of batched examples with similar lengths.", "source": "codesearchnet"}
{"code": "def _get_dir_size(self, path: str='.'):\n    total = 0\n    for root, _, files in os.walk(path):\n        for filename in files:\n            total += os.path.getsize(os.path.join(root, filename))\n    return total", "docstring": "Get the total size of files and sub-directories under the path.\n\nArgs:\npath: Path of a directory or a file to calculate the total size.\n\nReturns:\nTotal size of the directory or a file.", "source": "github-repos"}
{"code": "def add_to_buffer(self, content, read_position):\n    self.read_position = read_position\n    if (self.read_buffer is None):\n        self.read_buffer = content\n    else:\n        self.read_buffer = (content + self.read_buffer)", "docstring": "Add additional bytes content as read from the read_position.\n\nArgs:\ncontent (bytes): data to be added to buffer working BufferWorkSpac.\nread_position (int): where in the file pointer the data was read from.", "source": "codesearchnet"}
{"code": "def RestrictFeedItemToGeoTarget(client, feed_item, location_id):\n    feed_item_target_service = client.GetService('FeedItemTargetService', version='v201809')\n    criterion_target = {'xsi_type': 'FeedItemCriterionTarget', 'feedId': feed_item['feedId'], 'feedItemId': feed_item['feedItemId'], 'criterion': {'xsi_type': 'Location', 'id': location_id}}\n    operation = {'operator': 'ADD', 'operand': criterion_target}\n    response = feed_item_target_service.mutate([operation])\n    new_location_target = response['value'][0]\n    print(('Feed item target for feed ID %d and feed item ID %d was created to restrict serving to location ID %d.' % (new_location_target['feedId'], new_location_target['feedItemId'], new_location_target['criterion']['id'])))", "docstring": "Restrict a feed item to a geo target location.\n\nArgs:\nclient: An AdWordsClient instance.\nfeed_item: A FeedItem.\nlocation_id: The Id of the location to restrict to.", "source": "codesearchnet"}
{"code": "def create_selected_summaries_dict(summaries_list):\n    headers_summary = cellpy.parameters.internal_settings.get_headers_summary()\n    selected_summaries = dict()\n    for h in summaries_list:\n        selected_summaries[h] = headers_summary[h]\n    return selected_summaries", "docstring": "Creates a dictionary with summary column headers.\n\nExamples:\n>>> summaries_to_output = [\"discharge_capacity\", \"charge_capacity\"]\n>>> summaries_to_output_dict = create_selected_summaries_dict(\n>>>    summaries_to_output\n>>> )\n>>> print(summaries_to_output_dict)\n{'discharge_capacity': \"Discharge_Capacity(mAh/g)\",\n'charge_capacity': \"Charge_Capacity(mAh/g)}\n\nArgs:\nsummaries_list: list containing cellpy summary column id names\n\nReturns: dictionary of the form {cellpy id name: cellpy summary\nheader name,}", "source": "codesearchnet"}
{"code": "def do_hook_actions(self, actions, hook_type):\n        \n        logger.log_debug(\"call {} hook actions.\".format(hook_type))\n        for action in actions:\n\n            if isinstance(action, dict) and len(action) == 1:\n                \n                \n                var_name, hook_content = list(action.items())[0]\n                hook_content_eval = self.session_context.eval_content(hook_content)\n                logger.log_debug(\n                    \"assignment with hook: {} = {} => {}\".format(\n                        var_name, hook_content, hook_content_eval\n                    )\n                )\n                self.session_context.update_test_variables(\n                    var_name, hook_content_eval\n                )\n            else:\n                \n                logger.log_debug(\"call hook function: {}\".format(action))\n                \n                self.session_context.eval_content(action)", "docstring": "call hook actions.\n\nArgs:\nactions (list): each action in actions list maybe in two format.\n\nformat1 (dict): assignment, the value returned by hook function will be assigned to variable.\n{\"var\": \"${func()}\"}\nformat2 (str): only call hook functions.\n${func()}\n\nhook_type (enum): setup/teardown", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]:\n    residual = hidden_states\n    hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions)\n    hidden_states = residual + hidden_states\n    hidden_states = self.layer_norm1(hidden_states)\n    residual = hidden_states\n    hidden_states = self.mlp(hidden_states)\n    hidden_states = residual + hidden_states\n    hidden_states = self.layer_norm2(hidden_states)\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n`(config.encoder_attention_heads,)`.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def meta_features_path(self, path):\n        \n        return os.path.join(\n                path,\n                app.config['XCESSIV_META_FEATURES_FOLDER'],\n                str(self.id)\n            ) + '.npy'", "docstring": "Returns path for meta-features\n\nArgs:\npath (str): Absolute/local path of xcessiv folder", "source": "juraj-google-style"}
{"code": "def close_children_tasks(self, parent_task_name):\n    if (parent_task_name not in self.tasks):\n        return\n    while self.tasks:\n        next_task = reversed(self.tasks.keys()).next()\n        if (next_task == parent_task_name):\n            break\n        del self.tasks[next_task]", "docstring": "Closes all the children tasks that were open\n\nArgs:\nparent_task_name (str): Name of the parent task\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def do_command_line(infile: typing.IO[str]) -> int:\n    lines = infile.readlines()\n    tree = ast.parse(''.join(lines))\n    checker = Checker(tree, lines, infile.name)\n    checker.load()\n    errors = []\n    for func in checker.all_funcs(skip_noqa=True):\n        try:\n            errors = list(func.check_all())\n        except ValidationError as error:\n            errors = [error.to_aaa()]\n        print(func.__str__(errors), end='')\n    return len(errors)", "docstring": "Currently a small stub to create an instance of Checker for the passed\n``infile`` and run its test functions through linting.\n\nArgs:\ninfile\n\nReturns:\nint: Number of flake8 errors raised.", "source": "codesearchnet"}
{"code": "def do_get_next(endpoint, access_token):\n    \n    headers = {\"Authorization\": 'Bearer ' + access_token}\n    headers['User-Agent'] = get_user_agent()\n    looping = True\n    value_list = []\n    vm_dict = {}\n    while looping:\n        get_return = requests.get(endpoint, headers=headers).json()\n        if not 'value' in get_return:\n            return get_return\n        if not 'nextLink' in get_return:\n            looping = False\n        else:\n            endpoint = get_return['nextLink']\n        value_list += get_return['value']\n    vm_dict['value'] = value_list\n    return vm_dict", "docstring": "Do an HTTP GET request, follow the nextLink chain and return JSON.\n\nArgs:\nendpoint (str): Azure Resource Manager management endpoint.\naccess_token (str): A valid Azure authentication token.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def _getScalesDiag(self,termx=0):\n        \n        assert self.P>1, 'CVarianceDecomposition:: diagonal init_method allowed only for multi trait models' \n        assert self.noisPos!=None, 'CVarianceDecomposition:: noise term has to be set'\n        assert termx<self.n_terms-1, 'CVarianceDecomposition:: termx>=n_terms-1'\n        assert self.covar_type[self.noisPos] not in ['lowrank','block','fixed'], 'CVarianceDecimposition:: diagonal initializaiton not posible for such a parametrization'\n        assert self.covar_type[termx] not in ['lowrank','block','fixed'], 'CVarianceDecimposition:: diagonal initializaiton not posible for such a parametrization'\n        scales = []\n        res = self.estimateHeritabilities(self.vd.getTerm(termx).getK())\n        scaleg = SP.sqrt(res['varg'].mean())\n        scalen = SP.sqrt(res['varn'].mean())\n        for term_i in range(self.n_terms):\n            if term_i==termx:\n                _scales = scaleg*self.diag[term_i]\n            elif term_i==self.noisPos:\n                _scales = scalen*self.diag[term_i]\n            else:\n                _scales = 0.*self.diag[term_i]\n            if self.offset[term_i]>0:\n                _scales = SP.concatenate((_scales,SP.array([SP.sqrt(self.offset[term_i])])))\n            scales.append(_scales)\n        return SP.concatenate(scales)", "docstring": "Uses 2 term single trait model to get covar params for initialization\n\nArgs:\ntermx:      non-noise term terms that is used for initialization", "source": "juraj-google-style"}
{"code": "def waitOnUpdate(self, timeout: float=0) -> bool:\n    if timeout:\n        with suppress(asyncio.TimeoutError):\n            util.run(asyncio.wait_for(self.updateEvent, timeout))\n    else:\n        util.run(self.updateEvent)\n    return True", "docstring": "Wait on any new update to arrive from the network.\n\nArgs:\ntimeout: Maximum time in seconds to wait.\nIf 0 then no timeout is used.\n\n.. note::\nA loop with ``waitOnUpdate`` should not be used to harvest\ntick data from tickers, since some ticks can go missing.\nThis happens when multiple updates occur almost simultaneously;\nThe ticks from the first update are then cleared.\nUse events instead to prevent this.", "source": "codesearchnet"}
{"code": "def GetFeedItems(client, feed):\n    feed_item_service = client.GetService('FeedItemService', 'v201809')\n    feed_items = []\n    more_pages = True\n    selector = {'fields': ['FeedItemId', 'AttributeValues'], 'predicates': [{'field': 'Status', 'operator': 'EQUALS', 'values': ['ENABLED']}, {'field': 'FeedId', 'operator': 'EQUALS', 'values': [feed['id']]}], 'paging': {'startIndex': 0, 'numberResults': PAGE_SIZE}}\n    while more_pages:\n        page = feed_item_service.get(selector)\n        if ('entries' in page):\n            feed_items.extend(page['entries'])\n        selector['paging']['startIndex'] += PAGE_SIZE\n        more_pages = (selector['paging']['startIndex'] < int(page['totalNumEntries']))\n    return feed_items", "docstring": "Returns the Feed Items for a given Feed.\n\nArgs:\nclient: an AdWordsClient instance.\nfeed: the Feed we are retrieving Feed Items from.\n\nReturns:\nThe Feed Items associated with the given Feed.", "source": "codesearchnet"}
{"code": "def flux_up(self, fluxUpBottom, emission=None):\n    if (emission is None):\n        emission = np.zeros_like(self.absorptivity)\n    E = np.concatenate((emission, np.atleast_1d(fluxUpBottom)), axis=(- 1))\n    return np.squeeze(matrix_multiply(self.Tup, E[(..., np.newaxis)]))", "docstring": "Compute downwelling radiative flux at interfaces between layers.\n\nInputs:\n\n* fluxDownTop: flux down at top\n* emission: emission from atmospheric levels (N)\ndefaults to zero if not given\n\nReturns:\n\n* vector of downwelling radiative flux between levels (N+1)\nelement 0 is the flux down to the surface.", "source": "codesearchnet"}
{"code": "def sg_queue_context(sess=None):\n    sess = (tf.get_default_session() if (sess is None) else sess)\n    coord = tf.train.Coordinator()\n    try:\n        threads = tf.train.start_queue_runners(sess, coord)\n        (yield)\n    finally:\n        coord.request_stop()\n        coord.join(threads)", "docstring": "r\"\"\"Context helper for queue routines.\n\nArgs:\nsess: A session to open queues. If not specified, a new session is created.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def run_attack_work(self, work_id):\n    adv_batch_id = self.attack_work.work[work_id]['output_adversarial_batch_id']\n    adv_batch = self.adv_batches[adv_batch_id]\n    dataset_batch_id = adv_batch['dataset_batch_id']\n    submission_id = adv_batch['submission_id']\n    epsilon = self.dataset_batches[dataset_batch_id]['epsilon']\n    logging.info('Attack work piece: dataset_batch_id=\"%s\" submission_id=\"%s\" epsilon=%d', dataset_batch_id, submission_id, epsilon)\n    if (submission_id in self.blacklisted_submissions):\n        raise WorkerError('Blacklisted submission')\n    attack = AttackSubmission(submission_id, self.submissions, self.storage_bucket)\n    attack.download()\n    input_dir = os.path.join(LOCAL_DATASET_DIR, dataset_batch_id)\n    if (attack.type == TYPE_TARGETED):\n        target_class_filename = os.path.join(input_dir, 'target_class.csv')\n        self.dataset_meta.save_target_classes_for_batch(target_class_filename, self.dataset_batches, dataset_batch_id)\n    if os.path.exists(LOCAL_OUTPUT_DIR):\n        sudo_remove_dirtree(LOCAL_OUTPUT_DIR)\n    os.mkdir(LOCAL_OUTPUT_DIR)\n    if os.path.exists(LOCAL_PROCESSED_OUTPUT_DIR):\n        shutil.rmtree(LOCAL_PROCESSED_OUTPUT_DIR)\n    os.mkdir(LOCAL_PROCESSED_OUTPUT_DIR)\n    if os.path.exists(LOCAL_ZIPPED_OUTPUT_DIR):\n        shutil.rmtree(LOCAL_ZIPPED_OUTPUT_DIR)\n    os.mkdir(LOCAL_ZIPPED_OUTPUT_DIR)\n    elapsed_time_sec = attack.run(input_dir, LOCAL_OUTPUT_DIR, epsilon)\n    if (attack.type == TYPE_TARGETED):\n        os.remove(target_class_filename)\n    image_hashes = eval_lib.enforce_epsilon_and_compute_hash(input_dir, LOCAL_OUTPUT_DIR, LOCAL_PROCESSED_OUTPUT_DIR, epsilon)\n    if (not image_hashes):\n        logging.warning('No images saved by the attack.')\n        return (elapsed_time_sec, submission_id)\n    for (clean_image_id, hash_val) in iteritems(image_hashes):\n        adv_img_id = ((adv_batch_id + '_') + clean_image_id)\n        os.rename(os.path.join(LOCAL_PROCESSED_OUTPUT_DIR, (clean_image_id + '.png')), os.path.join(LOCAL_PROCESSED_OUTPUT_DIR, (adv_img_id + '.png')))\n        image_path = '{0}/adversarial_images/{1}/{1}.zip/{2}.png'.format(self.round_name, adv_batch_id, adv_img_id)\n        adv_batch['images'][adv_img_id] = {'clean_image_id': (u'' + str(clean_image_id)), 'image_path': (u'' + str(image_path)), 'image_hash': (u'' + str(hash_val))}\n    zipped_images_filename = os.path.join(LOCAL_ZIPPED_OUTPUT_DIR, (adv_batch_id + '.zip'))\n    try:\n        logging.debug('Compressing adversarial images to %s', zipped_images_filename)\n        shell_call(['zip', '-j', '-r', zipped_images_filename, LOCAL_PROCESSED_OUTPUT_DIR])\n    except subprocess.CalledProcessError as e:\n        raise WorkerError('Cant make archive from adversarial iamges', e)\n    dst_filename = '{0}/adversarial_images/{1}/{1}.zip'.format(self.round_name, adv_batch_id)\n    logging.debug('Copying archive with adversarial images to %s', dst_filename)\n    self.storage_client.new_blob(dst_filename).upload_from_filename(zipped_images_filename)\n    logging.debug('Writing adversarial batch to datastore')\n    self.adv_batches.write_single_batch_images_to_datastore(adv_batch_id)\n    return (elapsed_time_sec, submission_id)", "docstring": "Runs one attack work.\n\nArgs:\nwork_id: ID of the piece of work to run\n\nReturns:\nelapsed_time_sec, submission_id - elapsed time and id of the submission\n\nRaises:\nWorkerError: if error occurred during execution.", "source": "codesearchnet"}
{"code": "def norm(self, valu):\n        \n        func = self._type_norms.get(type(valu))\n        if func is None:\n            raise s_exc.NoSuchFunc(name=self.name, mesg='no norm for type: %r' % (type(valu),))\n\n        return func(valu)", "docstring": "Normalize the value for a given type.\n\nArgs:\nvalu (obj): The value to normalize.\n\nReturns:\n((obj,dict)): The normalized valu, info tuple.\n\nNotes:\nThe info dictionary uses the following key conventions:\nsubs (dict): The normalized sub-fields as name: valu entries.", "source": "juraj-google-style"}
{"code": "def __init__(self, package, device):\n    self.package = package\n    self.log = device.log\n    self.verbose_logging = True\n    self._device = device\n    self._counter = None\n    self._lock = threading.Lock()\n    self._event_client = None", "docstring": "Initializes the instance of ClientBase.\n\nArgs:\npackage: str, the user-visible name of the snippet library being\ncommunicated with.\ndevice: DeviceController, the device object associated with a client.", "source": "github-repos"}
{"code": "def pop_all(self, event_name):\n    if (not self.started):\n        raise IllegalStateError('Dispatcher needs to be started before popping.')\n    results = []\n    try:\n        self.lock.acquire()\n        while True:\n            e = self.event_dict[event_name].get(block=False)\n            results.append(e)\n    except (queue.Empty, KeyError):\n        return results\n    finally:\n        self.lock.release()", "docstring": "Return and remove all stored events of a specified name.\n\nPops all events from their queue. May miss the latest ones.\nIf no event is available, return immediately.\n\nArgs:\nevent_name: Name of the events to be popped.\n\nReturns:\nList of the desired events.\n\nRaises:\nIllegalStateError: Raised if pop is called before the dispatcher\nstarts polling.", "source": "codesearchnet"}
{"code": "def _enroll_users(cls, request, enterprise_customer, emails, mode, course_id=None, program_details=None, notify=True):\n    pending_messages = []\n    if course_id:\n        (succeeded, pending, failed) = cls.enroll_users_in_course(enterprise_customer=enterprise_customer, course_id=course_id, course_mode=mode, emails=emails)\n        all_successes = (succeeded + pending)\n        if notify:\n            enterprise_customer.notify_enrolled_learners(catalog_api_user=request.user, course_id=course_id, users=all_successes)\n        if succeeded:\n            pending_messages.append(cls.get_success_enrollment_message(succeeded, course_id))\n        if failed:\n            pending_messages.append(cls.get_failed_enrollment_message(failed, course_id))\n        if pending:\n            pending_messages.append(cls.get_pending_enrollment_message(pending, course_id))\n    if program_details:\n        (succeeded, pending, failed) = cls.enroll_users_in_program(enterprise_customer=enterprise_customer, program_details=program_details, course_mode=mode, emails=emails)\n        all_successes = (succeeded + pending)\n        if notify:\n            cls.notify_program_learners(enterprise_customer=enterprise_customer, program_details=program_details, users=all_successes)\n        program_identifier = program_details.get('title', program_details.get('uuid', _('the program')))\n        if succeeded:\n            pending_messages.append(cls.get_success_enrollment_message(succeeded, program_identifier))\n        if failed:\n            pending_messages.append(cls.get_failed_enrollment_message(failed, program_identifier))\n        if pending:\n            pending_messages.append(cls.get_pending_enrollment_message(pending, program_identifier))\n    cls.send_messages(request, pending_messages)", "docstring": "Enroll the users with the given email addresses to the courses specified, either specifically or by program.\n\nArgs:\ncls (type): The EnterpriseCustomerManageLearnersView class itself\nrequest: The HTTP request the enrollment is being created by\nenterprise_customer: The instance of EnterpriseCustomer whose attached users we're enrolling\nemails: An iterable of strings containing email addresses to enroll in a course\nmode: The enrollment mode the users will be enrolled in the course with\ncourse_id: The ID of the course in which we want to enroll\nprogram_details: Details about a program in which we want to enroll\nnotify: Whether to notify (by email) the users that have been enrolled", "source": "codesearchnet"}
{"code": "def variable(self, var_name, shape, init, dt=tf.float32, train=None):\n    dt = tf.as_dtype(dt).base_dtype\n    if (var_name in self.vars):\n        v = self.vars[var_name]\n        if (v.get_shape() != shape):\n            raise ValueError(('Shape mismatch: %s vs %s. Perhaps a UnboundVariable had incompatible values within a graph.' % (v.get_shape(), shape)))\n        return v\n    elif callable(init):\n        if (train is None):\n            train = _defaults.get('trainable_variables', True)\n        variable_collections = _defaults.get('variable_collections', ())\n        if (tf.GraphKeys.GLOBAL_VARIABLES not in variable_collections):\n            variable_collections = (list(variable_collections) + [tf.GraphKeys.GLOBAL_VARIABLES])\n        v = tf.get_variable(var_name, shape=shape, dtype=dt, initializer=init, trainable=train, collections=variable_collections)\n        self.vars[var_name] = v\n        return v\n    else:\n        v = tf.convert_to_tensor(init, name=var_name, dtype=dt)\n        v.get_shape().assert_is_compatible_with(shape)\n        self.vars[var_name] = v\n        return v", "docstring": "Adds a named variable to this bookkeeper or returns an existing one.\n\nVariables marked train are returned by the training_variables method. If\nthe requested name already exists and it is compatible (same shape, dt and\ntrain) then it is returned. In case of an incompatible type, an exception is\nthrown.\n\nArgs:\nvar_name: The unique name of this variable.  If a variable with the same\nname exists, then it is returned.\nshape: The shape of the variable.\ninit: The init function to use or a Tensor to copy.\ndt: The datatype, defaults to float.  This will automatically extract the\nbase dtype.\ntrain: Whether or not the variable should be trained; defaults to\nTrue unless a default_scope has overridden it.\nReturns:\nA TensorFlow tensor.\nRaises:\nValueError: if reuse is False (or unspecified and allow_reuse is False)\nand the variable already exists or if the specification of a reused\nvariable does not match the original.", "source": "codesearchnet"}
{"code": "def parse_GSM(filepath, entry_name=None):\n    \n    if isinstance(filepath, str):\n        with utils.smart_open(filepath) as f:\n            soft = []\n            has_table = False\n            for line in f:\n                if \"_table_begin\" in line or (not line.startswith((\"^\", \"!\", \"\n                    has_table = True\n                soft.append(line.rstrip())\n    else:\n        soft = []\n        has_table = False\n        for line in filepath:\n            if \"_table_begin\" in line or (not line.startswith((\"^\", \"!\", \"\n                has_table = True\n            soft.append(line.rstrip())\n\n    if entry_name is None:\n        sets = [i for i in soft if i.startswith(\"^\")]\n        if len(sets) > 1:\n            raise Exception(\"More than one entry in GPL\")\n        if len(sets) == 0:\n            raise NoEntriesException(\n                \"No entries found. Check the if accession is correct!\")\n        entry_name = parse_entry_name(sets[0])\n\n    columns = parse_columns(soft)\n    metadata = parse_metadata(soft)\n    if has_table:\n        table_data = parse_table_data(soft)\n    else:\n        table_data = DataFrame()\n\n    gsm = GSM(name=entry_name,\n              table=table_data,\n              metadata=metadata,\n              columns=columns)\n\n    return gsm", "docstring": "Parse GSM entry from SOFT file.\n\nArgs:\nfilepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GSM entry\nor list of lines representing GSM from GSE file.\nentry_name (:obj:`str`, optional): Name of the entry. By default it is\ninferred from the data.\n\nReturns:\n:obj:`GEOparse.GSM`: A GSM object.", "source": "juraj-google-style"}
{"code": "def _setup_transitions(tdef, states, prev=()):\n    \n    trs = list(prev)\n    for transition in tdef:\n        if len(transition) == 3:\n            (name, source, target) = transition\n            if is_string(source) or isinstance(source, State):\n                source = [source]\n            source = [states[src] for src in source]\n            target = states[target]\n            tr = Transition(name, source, target)\n        else:\n            raise TypeError(\n                \"Elements of the 'transition' attribute of a \"\n                \"workflow should be three-tuples; got %r instead.\" % (transition,)\n            )\n\n        if any(prev_tr.name == tr.name for prev_tr in trs):\n            \n            trs = [tr if prev_tr.name == tr.name else prev_tr for prev_tr in trs]\n        else:\n            trs.append(tr)\n    return TransitionList(trs)", "docstring": "Create a TransitionList object from a 'transitions' Workflow attribute.\n\nArgs:\ntdef: list of transition definitions\nstates (StateList): already parsed state definitions.\nprev (TransitionList): transition definitions from a parent.\n\nReturns:\nTransitionList: the list of transitions defined in the 'tdef' argument.", "source": "juraj-google-style"}
{"code": "def sg_summary_activation(tensor, prefix=None, name=None):\n    prefix = ('' if (prefix is None) else (prefix + '/'))\n    name = ((prefix + _pretty_name(tensor)) if (name is None) else (prefix + name))\n    _scalar((name + '/ratio'), tf.reduce_mean(tf.cast(tf.greater(tensor, 0), tf.sg_floatx)))\n    _histogram((name + '/ratio-h'), tensor)", "docstring": "r\"\"\"Register `tensor` to summary report as `activation`\n\nArgs:\ntensor: A `Tensor` to log as activation\nprefix: A `string`. A prefix to display in the tensor board web UI.\nname: A `string`. A name to display in the tensor board web UI.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def __init__(self, img, gaussian_kernel_1d=None, size=None):\n        \n        \n        self.img = img if not isinstance(img, compat.basestring) \\\n            else compat.Image.open(img)\n\n        \n        \n        if size and size != self.img.size:\n            self.img = self.img.resize(size, Image.ANTIALIAS)\n\n        \n        self.size = self.img.size\n\n        \n        \n        if gaussian_kernel_1d is not None:\n\n            self.gaussian_kernel_1d = gaussian_kernel_1d\n\n            \n            self.img_gray, self.img_alpha = to_grayscale(self.img)\n            if self.img_alpha is not None:\n                self.img_gray[self.img_alpha == 255] = 0\n\n            \n            self.img_gray_squared = self.img_gray ** 2\n\n            \n            self.img_gray_mu = convolve_gaussian_2d(\n                self.img_gray, self.gaussian_kernel_1d)\n\n            \n            self.img_gray_mu_squared = self.img_gray_mu ** 2\n\n            \n            self.img_gray_sigma_squared = convolve_gaussian_2d(\n                self.img_gray_squared, self.gaussian_kernel_1d)\n\n            \n            self.img_gray_sigma_squared -= self.img_gray_mu_squared\n\n        \n        \n        else:\n            \n            self.img_gray = ImageOps.grayscale(self.img)", "docstring": "Create an SSIMImage.\n\nArgs:\nimg (str or PIL.Image): PIL Image object or file name.\ngaussian_kernel_1d (np.ndarray, optional): Gaussian kernel\nthat was generated with utils.get_gaussian_kernel is used\nto precompute common objects for SSIM computation\nsize (tuple, optional): New image size to resize image to.", "source": "juraj-google-style"}
{"code": "def IsDataVisible(self, path):\n    \n    if path is None:\n      return (False, RESPONSES['UNKNOWN_TYPE'])\n\n    if _Matches(path, self.blacklist_patterns):\n      return (False, RESPONSES['BLACKLISTED'])\n\n    if not _Matches(path, self.whitelist_patterns):\n      return (False, RESPONSES['NOT_WHITELISTED'])\n\n    return (True, RESPONSES['VISIBLE'])", "docstring": "Returns a tuple (visible, reason) stating if the data should be visible.\n\nArgs:\npath: A dot separated path that represents a package, class, method or\nvariable.  The format is identical to pythons \"import\" statement.\n\nReturns:\n(visible, reason) where visible is a boolean that is True if the data\nshould be visible.  Reason is a string reason that can be displayed\nto the user and indicates why data is visible or not visible.", "source": "juraj-google-style"}
{"code": "def split_range(self):\n    if self.is_single_namespace:\n        return [self]\n    mid_point = ((_namespace_to_ord(self.namespace_start) + _namespace_to_ord(self.namespace_end)) \n    return [NamespaceRange(self.namespace_start, _ord_to_namespace(mid_point), _app=self.app), NamespaceRange(_ord_to_namespace((mid_point + 1)), self.namespace_end, _app=self.app)]", "docstring": "Splits the NamespaceRange into two nearly equal-sized ranges.\n\nReturns:\nIf this NamespaceRange contains a single namespace then a list containing\nthis NamespaceRange is returned. Otherwise a two-element list containing\ntwo NamespaceRanges whose total range is identical to this\nNamespaceRange's is returned.", "source": "codesearchnet"}
{"code": "def ParseFromUnicode(self, value):\n    precondition.AssertType(value, Text)\n    value = value.strip()\n    super(ClientURN, self).ParseFromUnicode(value)\n    match = self.CLIENT_ID_RE.match(self._string_urn)\n    if (not match):\n        raise type_info.TypeValueError(('Client urn malformed: %s' % value))\n    clientid = match.group('clientid')\n    clientid_correctcase = ''.join((clientid[0].upper(), clientid[1:].lower()))\n    self._string_urn = self._string_urn.replace(clientid, clientid_correctcase, 1)", "docstring": "Parse a string into a client URN.\n\nConvert case so that all URNs are of the form C.[0-9a-f].\n\nArgs:\nvalue: string value to parse", "source": "codesearchnet"}
{"code": "def strip_quotes(self, content):\n    error_msg = 'Following rule is badly quoted: {}'\n    if ((content.startswith('\"') and content.endswith('\"')) or (content.startswith(\"'\") and content.endswith(\"'\"))):\n        return content[1:(- 1)]\n    elif ((content.startswith('\"') and (not content.endswith('\"'))) or (content.startswith(\"'\") and (not content.endswith(\"'\")))):\n        raise InvalidImportRule(error_msg.format(content))\n    elif (((not content.startswith('\"')) and content.endswith('\"')) or ((not content.startswith(\"'\")) and content.endswith(\"'\"))):\n        raise InvalidImportRule(error_msg.format(content))\n    return content", "docstring": "Unquote given rule.\n\nArgs:\ncontent (str): An import rule.\n\nRaises:\nInvalidImportRule: Raise exception if the rule is badly quoted\n(not started or not ended quotes).\n\nReturns:\nstring: The given rule unquoted.", "source": "codesearchnet"}
{"code": "def find(self, name):\n        \n        collectors = self.get_collectors()\n\n        for collector in collectors:\n            if name.lower() == collector['name'].lower():\n                self.collector_id = collector['id']\n                return collector\n\n        return {'status': 'No results found.'}", "docstring": "Returns a dict of collector's details if found.\n\nArgs:\nname (str): name of collector searching for", "source": "juraj-google-style"}
{"code": "def verify_edge_segments(edge_infos):\n    if (edge_infos is None):\n        return\n    for edge_info in edge_infos:\n        num_segments = len(edge_info)\n        for index in six.moves.xrange((- 1), (num_segments - 1)):\n            (index1, start1, end1) = edge_info[index]\n            if (not (0.0 <= start1 < end1 <= 1.0)):\n                raise ValueError(BAD_SEGMENT_PARAMS, edge_info[index])\n            (index2, _, _) = edge_info[(index + 1)]\n            if (index1 == index2):\n                raise ValueError(SEGMENTS_SAME_EDGE, edge_info[index], edge_info[(index + 1)])", "docstring": "Verify that the edge segments in an intersection are valid.\n\n.. note::\n\nThis is a helper used only by :func:`generic_intersect`.\n\nArgs:\nedge_infos (Optional[list]): List of \"edge info\" lists. Each list\nrepresents a curved polygon and contains 3-tuples of edge index,\nstart and end (see the output of :func:`ends_to_curve`).\n\nRaises:\nValueError: If two consecutive edge segments lie on the same edge\nindex.\nValueError: If the start and end parameter are \"invalid\" (they should\nbe between 0 and 1 and start should be strictly less than end).", "source": "codesearchnet"}
{"code": "def put_image(self, name, val):\n        \n        assert isinstance(val, np.ndarray)\n        arr = image_to_nhwc(val)\n        self._dispatch(lambda m: m.process_image(name, arr))\n        s = create_image_summary(name, arr)\n        self._dispatch(lambda m: m.process_summary(s))", "docstring": "Put an image.\n\nArgs:\nname (str):\nval (np.ndarray): 2D, 3D (HWC) or 4D (NHWC) numpy array of images\nin range [0,255]. If channel is 3, assumed to be RGB.", "source": "juraj-google-style"}
{"code": "def convert_sum(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting Sum ...')\n\n    def target_layer(x):\n        import keras.backend as K\n        return K.sum(x)\n    lambda_layer = keras.layers.Lambda(target_layer)\n    layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert sum.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def not_storable(_type):\n    return Storable(_type, handlers=StorableHandler(poke=fake_poke, peek=fail_peek(_type)))", "docstring": "Helper for tagging unserializable types.\n\nArguments:\n\n_type (type): type to be ignored.\n\nReturns:\n\nStorable: storable instance that does not poke.", "source": "codesearchnet"}
{"code": "def ed25519_public_key_from_string(string):\n    try:\n        return Ed25519PublicKey.from_public_bytes(base64.b64decode(string))\n    except (UnsupportedAlgorithm, Base64Error) as exc:\n        raise ScriptWorkerEd25519Error(\"Can't create Ed25519PublicKey: {}!\".format(str(exc)))", "docstring": "Create an ed25519 public key from ``string``, which is a seed.\n\nArgs:\nstring (str): the string to use as a seed.\n\nReturns:\nEd25519PublicKey: the public key", "source": "codesearchnet"}
{"code": "def QA_fetch_get_sh_margin(date):\n    if (date in trade_date_sse):\n        data = pd.read_excel(_sh_url.format(QA_util_date_str2int(date)), 1).assign(date=date).assign(sse='sh')\n        data.columns = ['code', 'name', 'leveraged_balance', 'leveraged_buyout', 'leveraged_payoff', 'margin_left', 'margin_sell', 'margin_repay', 'date', 'sse']\n        return data\n    else:\n        pass", "docstring": "return shanghai margin data\n\nArguments:\ndate {str YYYY-MM-DD} -- date format\n\nReturns:\npandas.DataFrame -- res for margin data", "source": "codesearchnet"}
{"code": "def _Build(self, storage_file):\n    self._index = {}\n    for event_tag in storage_file.GetEventTags():\n        self.SetEventTag(event_tag)", "docstring": "Builds the event tag index.\n\nArgs:\nstorage_file (BaseStorageFile): storage file.", "source": "codesearchnet"}
{"code": "def included(self, start, stop):\n        \n        for event in self:\n            if (start <= event.begin <= stop \n            and start <= event.end <= stop): \n                yield event", "docstring": "Iterates (in chronological order) over every event that is included\nin the timespan between `start` and `stop`\n\nArgs:\nstart : (Arrow object)\nstop : (Arrow object)", "source": "juraj-google-style"}
{"code": "def add_keywords_from_dict(self, keyword_dict):\n    for (clean_name, keywords) in keyword_dict.items():\n        if (not isinstance(keywords, list)):\n            raise AttributeError('Value of key {} should be a list'.format(clean_name))\n        for keyword in keywords:\n            self.add_keyword(keyword, clean_name)", "docstring": "To add keywords from a dictionary\n\nArgs:\nkeyword_dict (dict): A dictionary with `str` key and (list `str`) as value\n\nExamples:\n>>> keyword_dict = {\n\"java\": [\"java_2e\", \"java programing\"],\n\"product management\": [\"PM\", \"product manager\"]\n}\n>>> keyword_processor.add_keywords_from_dict(keyword_dict)\n\nRaises:\nAttributeError: If value for a key in `keyword_dict` is not a list.", "source": "codesearchnet"}
{"code": "def encode_all_features(dataset, vocabulary):\n\n    def my_fn(features):\n        ret = {}\n        for (k, v) in features.items():\n            v = vocabulary.encode_tf(v)\n            v = tf.concat([tf.to_int64(v), [1]], 0)\n            ret[k] = v\n        return ret\n    return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)", "docstring": "Encode all features.\n\nArgs:\ndataset: a tf.data.Dataset\nvocabulary: a vocabulary.Vocabulary\nReturns:\na tf.data.Dataset", "source": "codesearchnet"}
{"code": "def word_ids(self, batch_index: int=0) -> List[Optional[int]]:\n    if not self._encodings:\n        raise ValueError('word_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast` class).')\n    return self._encodings[batch_index].word_ids", "docstring": "Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.\n\nArgs:\nbatch_index (`int`, *optional*, defaults to 0): The index to access in the batch.\n\nReturns:\n`List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the\ntokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word\n(several tokens will be mapped to the same word index if they are parts of that word).", "source": "github-repos"}
{"code": "def should_execute_serially(self, applied_ptransform):\n    if isinstance(applied_ptransform.transform, (_GroupByKeyOnly, _StreamingGroupByKeyOnly, _StreamingGroupAlsoByWindow)):\n        return True\n    elif isinstance(applied_ptransform.transform, core.ParDo) and is_stateful_dofn(applied_ptransform.transform.dofn):\n        return True\n    return False", "docstring": "Returns True if this applied_ptransform should run one bundle at a time.\n\nSome TransformEvaluators use a global state object to keep track of their\nglobal execution state. For example evaluator for _GroupByKeyOnly uses this\nstate as an in memory dictionary to buffer keys.\n\nSerially executed evaluators will act as syncing point in the graph and\nexecution will not move forward until they receive all of their inputs. Once\nthey receive all of their input, they will release the combined output.\nTheir output may consist of multiple bundles as they may divide their output\ninto pieces before releasing.\n\nArgs:\napplied_ptransform: Transform to be used for execution.\n\nReturns:\nTrue if executor should execute applied_ptransform serially.", "source": "github-repos"}
{"code": "def get_cards(self, **query_params):\n    cards = self.get_cards_json(self.base_uri, query_params=query_params)\n    cards_list = []\n    for card_json in cards:\n        cards_list.append(self.create_card(card_json))\n    return cards_list", "docstring": "Get all cards this member is attached to. Return a list of Card\nobjects.\n\nReturns:\nlist(Card): Return all cards this member is attached to", "source": "codesearchnet"}
{"code": "def _partitions_list(N):\n    \n    if N < (_NUM_PRECOMPUTED_PARTITION_LISTS):\n        return list(_partition_lists[N])\n    else:\n        raise ValueError(\n            'Partition lists not yet available for system with {} '\n            'nodes or more'.format(_NUM_PRECOMPUTED_PARTITION_LISTS))", "docstring": "Return a list of partitions of the |N| binary nodes.\n\nArgs:\nN (int): The number of nodes under consideration.\n\nReturns:\nlist[list]: A list of lists, where each inner list is the set of\nmicro-elements corresponding to a macro-element.\n\nExample:\n>>> _partitions_list(3)\n[[[0, 1], [2]], [[0, 2], [1]], [[0], [1, 2]], [[0], [1], [2]]]", "source": "juraj-google-style"}
{"code": "def calculate_embedding(self, batch_image_bytes):\n    return self.tf_session.run(self.embedding, feed_dict={self.input_jpeg: batch_image_bytes})", "docstring": "Get the embeddings for a given JPEG image.\n\nArgs:\nbatch_image_bytes: As if returned from [ff.read() for ff in file_list].\n\nReturns:\nThe Inception embeddings (bottleneck layer output)", "source": "codesearchnet"}
{"code": "def respond(self, prompt_id, response):\n    \n    _LOG.debug('Responding to prompt (%s): \"%s\"', prompt_id, response)\n    with self._cond:\n      if not (self._prompt and self._prompt.id == prompt_id):\n        return False\n      self._response = response\n      self.last_response = (prompt_id, response)\n      self.remove_prompt()\n      self._cond.notifyAll()\n    return True", "docstring": "Respond to the prompt with the given ID.\n\nIf there is no active prompt or the given ID doesn't match the active\nprompt, do nothing.\n\nArgs:\nprompt_id: A string uniquely identifying the prompt.\nresponse: A string response to the given prompt.\n\nReturns:\nTrue if the prompt with the given ID was active, otherwise False.", "source": "juraj-google-style"}
{"code": "def _FlushExportBuffer(self, output_module, deduplicate_events=True):\n    last_macb_group_identifier = None\n    last_content_identifier = None\n    macb_group = []\n    generator = self._export_event_heap.PopEvents()\n    for (macb_group_identifier, content_identifier, event) in generator:\n        if (deduplicate_events and (last_content_identifier == content_identifier)):\n            self._events_status.number_of_duplicate_events += 1\n            continue\n        if (macb_group_identifier is None):\n            if macb_group:\n                output_module.WriteEventMACBGroup(macb_group)\n                macb_group = []\n            output_module.WriteEvent(event)\n        else:\n            if ((last_macb_group_identifier == macb_group_identifier) or (not macb_group)):\n                macb_group.append(event)\n            else:\n                output_module.WriteEventMACBGroup(macb_group)\n                macb_group = [event]\n            self._events_status.number_of_macb_grouped_events += 1\n        last_macb_group_identifier = macb_group_identifier\n        last_content_identifier = content_identifier\n    if macb_group:\n        output_module.WriteEventMACBGroup(macb_group)", "docstring": "Flushes buffered events and writes them to the output module.\n\nArgs:\noutput_module (OutputModule): output module.\ndeduplicate_events (Optional[bool]): True if events should be\ndeduplicated.", "source": "codesearchnet"}
{"code": "def __init__(self, latitude, longitude, name, units='km'):\n        \n        super(NumberedPoint, self).__init__(latitude, longitude, units)\n\n        self.name = name", "docstring": "Initialise a new ``NumberedPoint`` object.\n\nArgs:\nlatitude (float): Location's latitude\nlongitude (float): Location's longitude\nname (str): Location's name or command line position\nunits (str): Unit type to be used for distances", "source": "juraj-google-style"}
{"code": "def fail_request(self, orig_request, message, start_response):\n    cors_handler = self._create_cors_handler(orig_request)\n    return util.send_wsgi_error_response(message, start_response, cors_handler=cors_handler)", "docstring": "Write an immediate failure response to outfile, no redirect.\n\nThis calls start_response and returns the error body.\n\nArgs:\norig_request: An ApiRequest, the original request from the user.\nmessage: A string containing the error message to be displayed to user.\nstart_response: A function with semantics defined in PEP-333.\n\nReturns:\nA string containing the body of the error response.", "source": "codesearchnet"}
{"code": "def passthrough_context_definition(context_params):\n    check.inst_param(context_params, 'context', ExecutionContext)\n    context_definition = PipelineContextDefinition(context_fn=(lambda *_args: context_params))\n    return {DEFAULT_CONTEXT_NAME: context_definition}", "docstring": "Create a context definition from a pre-existing context. This can be useful\nin testing contexts where you may want to create a context manually and then\npass it into a one-off PipelineDefinition\n\nArgs:\ncontext (ExecutionContext): The context that will provided to the pipeline.\nReturns:\nPipelineContextDefinition: The passthrough context definition.", "source": "codesearchnet"}
{"code": "def MakeStatResponse(self, tsk_file, tsk_attribute=None, append_name=None):\n    precondition.AssertOptionalType(append_name, Text)\n    info = tsk_file.info\n    response = rdf_client_fs.StatEntry()\n    meta = info.meta\n    if meta:\n        response.st_ino = meta.addr\n        for attribute in ['mode', 'nlink', 'uid', 'gid', 'size', 'atime', 'mtime', 'ctime', 'crtime']:\n            try:\n                value = int(getattr(meta, attribute))\n                if (value < 0):\n                    value &= 4294967295\n                setattr(response, ('st_%s' % attribute), value)\n            except AttributeError:\n                pass\n    name = info.name\n    child_pathspec = self.pathspec.Copy()\n    if (append_name is not None):\n        child_pathspec.last.path = utils.JoinPath(child_pathspec.last.path, append_name)\n    child_pathspec.last.inode = meta.addr\n    if (tsk_attribute is not None):\n        child_pathspec.last.ntfs_type = int(tsk_attribute.info.type)\n        child_pathspec.last.ntfs_id = int(tsk_attribute.info.id)\n        child_pathspec.last.stream_name = tsk_attribute.info.name\n        response.st_size = tsk_attribute.info.size\n        default = rdf_paths.PathSpec.tsk_fs_attr_type.TSK_FS_ATTR_TYPE_DEFAULT\n        last = child_pathspec.last\n        if ((last.ntfs_type != default) or last.ntfs_id):\n            response.st_mode &= (~ self.stat_type_mask)\n            response.st_mode |= stat.S_IFREG\n    else:\n        child_pathspec.last.ntfs_type = None\n        child_pathspec.last.ntfs_id = None\n        child_pathspec.last.stream_name = None\n    if name:\n        response.st_mode |= self.FILE_TYPE_LOOKUP.get(int(name.type), 0)\n    if meta:\n        response.st_mode |= self.META_TYPE_LOOKUP.get(int(meta.type), 0)\n    response.pathspec = child_pathspec\n    return response", "docstring": "Given a TSK info object make a StatEntry.\n\nNote that tsk uses two things to uniquely identify a data stream - the inode\nobject given in tsk_file and the attribute object which may correspond to an\nADS of this file for filesystems which support ADS. We store both of these\nin the stat response.\n\nArgs:\ntsk_file: A TSK File object for the specified inode.\ntsk_attribute: A TSK Attribute object for the ADS. If None we use the main\nstream.\nappend_name: If specified we append this name to the last element of the\npathspec.\n\nReturns:\nA StatEntry which can be used to re-open this exact VFS node.", "source": "codesearchnet"}
{"code": "def parse_ids(chrom, pos, ref, alt, case_id, variant_type):\n    ids = {}\n    pos = str(pos)\n    ids['simple_id'] = parse_simple_id(chrom, pos, ref, alt)\n    ids['variant_id'] = parse_variant_id(chrom, pos, ref, alt, variant_type)\n    ids['display_name'] = parse_display_name(chrom, pos, ref, alt, variant_type)\n    ids['document_id'] = parse_document_id(chrom, pos, ref, alt, variant_type, case_id)\n    return ids", "docstring": "Construct the necessary ids for a variant\n\nArgs:\nchrom(str): Variant chromosome\npos(int): Variant position\nref(str): Variant reference\nalt(str): Variant alternative\ncase_id(str): Unique case id\nvariant_type(str): 'clinical' or 'research'\n\nReturns:\nids(dict): Dictionary with the relevant ids", "source": "codesearchnet"}
{"code": "def __init__(self, num_points):\n    \n    self.num_points = num_points\n    self.column_names = []\n    self.name_to_values = {}", "docstring": "Constructs a metadata for an embedding of the specified size.\n\nArgs:\nnum_points: Number of points in the embedding.", "source": "juraj-google-style"}
{"code": "def _prep_binary_content(self):\n\n\t\t\n\n\t\t\n\t\tif not self.data and not self.location and 'Content-Location' not in self.resource.headers.keys():\n\t\t\traise Exception('creating/updating NonRDFSource requires content from self.binary.data, self.binary.location, or the Content-Location header')\n\n\t\telif 'Content-Location' in self.resource.headers.keys():\n\t\t\tlogger.debug('Content-Location header found, using')\n\t\t\tself.delivery = 'header'\n\n\t\t\n\t\telif 'Content-Location' not in self.resource.headers.keys():\n\n\t\t\t\n\t\t\tif self.location:\n\t\t\t\t\n\t\t\t\tself.resource.headers['Content-Location'] = self.location\n\t\t\t\tself.delivery = 'header'\n\n\t\t\t\n\t\t\telif self.data:\n\n\t\t\t\t\n\t\t\t\tif isinstance(self.data, io.BufferedIOBase):\n\t\t\t\t\tlogger.debug('detected file-like object')\n\t\t\t\t\tself.delivery = 'payload'\n\n\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tlogger.debug('detected bytes')\n\t\t\t\t\tself.delivery = 'payload'", "docstring": "Sets delivery method of either payload or header\nFavors Content-Location header if set\n\nArgs:\nNone\n\nReturns:\nNone: sets attributes in self.binary and headers", "source": "juraj-google-style"}
{"code": "def individuals(self, ind_ids=None):\n        \n        if ind_ids:\n            for ind_id in ind_ids:\n                for ind in self.individual_objs:\n                    if ind.ind_id == ind_id:\n                        yield ind\n        else:\n            for ind in self.individual_objs:\n                yield ind", "docstring": "Return information about individuals\n\nArgs:\nind_ids (list(str)): List of individual ids\n\nReturns:\nindividuals (Iterable): Iterable with Individuals", "source": "juraj-google-style"}
{"code": "def is_array(self, data_type):\n    \n\n    \n    data_type = data_type.split('[')[0].strip()\n\n    return data_type.lower() in self.array_types", "docstring": "Check if a type is a known array type\n\nArgs:\ndata_type (str): Name of type to check\nReturns:\nTrue if ``data_type`` is a known array type.", "source": "juraj-google-style"}
{"code": "def ParseDownloadsRow(\n      self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = FirefoxDownloadEventData()\n    event_data.full_path = self._GetRowValue(query_hash, row, 'target')\n    event_data.mime_type = self._GetRowValue(query_hash, row, 'mimeType')\n    event_data.name = self._GetRowValue(query_hash, row, 'name')\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.received_bytes = self._GetRowValue(query_hash, row, 'currBytes')\n    event_data.referrer = self._GetRowValue(query_hash, row, 'referrer')\n    event_data.temporary_location = self._GetRowValue(\n        query_hash, row, 'tempPath')\n    event_data.total_bytes = self._GetRowValue(query_hash, row, 'maxBytes')\n    event_data.url = self._GetRowValue(query_hash, row, 'source')\n\n    timestamp = self._GetRowValue(query_hash, row, 'startTime')\n    if timestamp:\n      date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n          timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_START)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'endTime')\n    if timestamp:\n      date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n          timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_END)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a downloads row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def _parse_username(self, config):\n    (username, priv, role, nopass, fmt, secret, sshkey) = config\n    resource = dict()\n    resource['privilege'] = priv\n    resource['role'] = role\n    resource['nopassword'] = (nopass == 'nopassword')\n    resource['format'] = fmt\n    resource['secret'] = secret\n    resource['sshkey'] = sshkey\n    return {username: resource}", "docstring": "Scans the config block and returns the username as a dict\n\nArgs:\nconfig (str): The config block to parse\n\nReturns:\ndict: A resource dict that is intended to be merged into the\nuser resource", "source": "codesearchnet"}
{"code": "def block(self, **kwargs):\n        \n        path = '/users/%s/block' % self.id\n        server_data = self.manager.gitlab.http_post(path, **kwargs)\n        if server_data is True:\n            self._attrs['state'] = 'blocked'\n        return server_data", "docstring": "Block the user.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabBlockError: If the user could not be blocked\n\nReturns:\nbool: Whether the user status has been changed", "source": "juraj-google-style"}
{"code": "def _set_spawn_exe_path():\n    if sys.argv[0].endswith('.py'):\n\n        def guess_path(package_root):\n            if 'bazel-out' in sys.argv[0] and package_root in sys.argv[0]:\n                package_root_base = sys.argv[0][:sys.argv[0].rfind(package_root)]\n                binary = os.environ['TEST_TARGET'][2:].replace(':', '/', 1)\n                possible_path = os.path.join(package_root_base, package_root, binary)\n                logging.info('Guessed test binary path: %s', possible_path)\n                if os.access(possible_path, os.X_OK):\n                    return possible_path\n                return None\n        path = guess_path('org_tensorflow')\n        if not path:\n            path = guess_path('org_keras')\n        if path is None:\n            logging.error('Cannot determine binary path. sys.argv[0]=%s os.environ=%s', sys.argv[0], os.environ)\n            raise RuntimeError('Cannot determine binary path')\n        sys.argv[0] = path\n    multiprocessing.get_context().set_executable(sys.argv[0])", "docstring": "Set the path to the executable for spawned processes.\n\nThis utility searches for the binary the parent process is using, and sets\nthe executable of multiprocessing's context accordingly.\n\nRaises:\nRuntimeError: If the binary path cannot be determined.", "source": "github-repos"}
{"code": "def gumbel_sample(shape):\n  \n  uniform_samples = tf.random_uniform(shape, minval=0.00001, maxval=0.99998)\n  return -tf.log(-tf.log(uniform_samples))", "docstring": "Sample from the Gumbel distribution, protect from overflows.\n\nArgs:\nshape: Shape of Gumbel samples.\n\nReturns:\nNoise drawn from Gumbel distribution.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.Invoke = channel.unary_unary(\n        '/pulumirpc.ResourceMonitor/Invoke',\n        request_serializer=provider__pb2.InvokeRequest.SerializeToString,\n        response_deserializer=provider__pb2.InvokeResponse.FromString,\n        )\n    self.ReadResource = channel.unary_unary(\n        '/pulumirpc.ResourceMonitor/ReadResource',\n        request_serializer=resource__pb2.ReadResourceRequest.SerializeToString,\n        response_deserializer=resource__pb2.ReadResourceResponse.FromString,\n        )\n    self.RegisterResource = channel.unary_unary(\n        '/pulumirpc.ResourceMonitor/RegisterResource',\n        request_serializer=resource__pb2.RegisterResourceRequest.SerializeToString,\n        response_deserializer=resource__pb2.RegisterResourceResponse.FromString,\n        )\n    self.RegisterResourceOutputs = channel.unary_unary(\n        '/pulumirpc.ResourceMonitor/RegisterResourceOutputs',\n        request_serializer=resource__pb2.RegisterResourceOutputsRequest.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def _add_sv_coordinates(self, variant):\n        \n        variant.stop_chrom = variant.CHROM\n        variant.start = int(variant.POS)\n        \n        \n        if ':' in variant.ALT:\n            other_coordinates = variant.ALT.strip('ACGTN[]').split(':')\n            variant.stop_chrom = other_coordinates[0].lstrip('chrCHR')\n            other_position = other_coordinates[1]\n            \n\n            \n            variant.sv_len = float('inf')\n            variant.sv_type = 'BND'\n        else:\n            variant.sv_len = variant.stop - variant.start\n\n        variant['cytoband_start'] = get_cytoband_coord(\n                                        chrom=variant.CHROM,\n                                        pos=variant.start\n                                        )\n\n        variant['cytoband_stop'] = get_cytoband_coord(\n                                    chrom=variant.stop_chrom,\n                                    pos=variant.stop\n                                    )", "docstring": "Add the neccesary sv coordinates for a variant\n\nArgs:\nvariant (puzzle.models.variant)", "source": "juraj-google-style"}
{"code": "def __call__(self, fn):\n        \n\n        def fail(app, *args, **kwargs):\n            \n            data = fn(app, *args, **kwargs)\n            \n            if isinstance(self.enable, bool):\n                enabled = self.enable\n                app.tcex.log.debug('Fail on output is ({}).'.format(self.enable))\n            else:\n                enabled = getattr(app.args, self.enable)\n                app.tcex.log.debug('Fail on output is ({}) for ({}).'.format(enabled, self.enable))\n                if not isinstance(enabled, bool):\n                    app.tcex.playbook.exit(\n                        1, 'The enable value must be a boolean for fail on output.'\n                    )\n\n            failed = False\n            if enabled is True:\n                if isinstance(data, list):\n                    \n                    for d in data:\n                        if d in self.values:\n                            failed = True\n                else:\n                    if data in self.values:\n                        failed = True\n\n                if failed:\n                    app.tcex.exit(1, self.msg)\n            return data\n\n        return fail", "docstring": "Implement __call__ function for decorator.\n\nArgs:\nfn (function): The decorated function.\n\nReturns:\nfunction: The custom decorator function.", "source": "juraj-google-style"}
{"code": "def get_data_csv(file_name, encoding='utf-8', file_contents=None, on_demand=False):\n    \n    def yield_csv(csv_contents, csv_file):\n        try:\n            for line in csv_contents:\n                yield line\n        finally:\n            try:\n                csv_file.close()\n            except:\n                pass\n\n    def process_csv(csv_contents, csv_file):\n        return [line for line in yield_csv(csv_contents, csv_file)]\n\n    if file_contents:\n        csv_file = BytesIO(file_contents)\n    else:\n        \n        csv_file = open(file_name, 'rb')\n    reader = csv.reader(csv_file, dialect=csv.excel, encoding=encoding)\n\n    if on_demand:\n        table = yield_csv(reader, csv_file)\n    else:\n        table = process_csv(reader, csv_file)\n\n    return [table]", "docstring": "Gets good old csv data from a file.\n\nArgs:\nfile_name: The name of the local file, or the holder for the\nextension type when the file_contents are supplied.\nencoding: Loads the file with the specified cell encoding.\nfile_contents: The file-like object holding contents of file_name.\nIf left as None, then file_name is directly loaded.\non_demand: Requests that a yielder be used in place of a full data\ncopy.", "source": "juraj-google-style"}
{"code": "def __init__(self, direct_subclasses=None, any_also_is_bottom=True):\n    self.direct_subclasses = direct_subclasses or {}\n    self.any_also_is_bottom = any_also_is_bottom\n    self.solver = booleq.Solver()\n    self._implications = {}", "docstring": "Construct.\n\nArgs:\ndirect_subclasses: A dictionary, mapping pytd.Type to lists of pytd.Type.\nany_also_is_bottom: Whether we should, (if True) consider\npytd.AnythingType() to also be at the bottom of the type hierarchy, thus\nmaking it a subclass of everything, or (if False) to be only at the top.", "source": "github-repos"}
{"code": "def set_scheduler(self, host, username='root', password=None, private_key=None, private_key_pass=None):\n    self._remote = RemoteClient(host, username, password, private_key, private_key_pass)\n    self._remote_id = uuid.uuid4().hex", "docstring": "Defines the remote scheduler\n\nArgs:\nhost (str): the hostname or ip address of the remote scheduler\nusername (str, optional): the username used to connect to the remote scheduler. Default is 'root'\npassword (str, optional): the password for username on the remote scheduler. Either the password or the private_key must be defined. Default is None.\nprivate_key (str, optional): the path to the private ssh key used to connect to the remote scheduler. Either the password or the private_key must be defined. Default is None.\nprivate_key_pass (str, optional): the passphrase for the private_key. Default is None.\n\nReturns:\nAn RemoteClient representing the remote scheduler.", "source": "codesearchnet"}
{"code": "def stage_tc_associations(self, entity1, entity2):\n        \n        \n        entity1 = self.tcex.playbook.read(entity1)\n        entity1_id = entity1.get('id')\n        entity1_owner = entity1.get('ownerName')\n        entity1_type = entity1.get('type')\n        if entity1.get('type') in self.tcex.indicator_types:\n            entity1_id = entity1.get('value')\n\n        \n        entity2 = self.tcex.playbook.read(entity2)\n        entity2_id = entity2.get('id')\n        entity2_owner = entity1.get('ownerName')\n        entity2_type = entity2.get('type')\n        if entity2.get('type') in self.tcex.indicator_types:\n            entity2_id = entity2.get('value')\n\n        if entity1_owner != entity2_owner:\n            self.log.error('[stage] Can not associate resource across owners.')\n            return\n\n        resource1 = self.tcex.resource(entity1_type)\n        resource1.http_method = 'POST'\n        resource1.owner = entity1_owner\n        resource1.resource_id(entity1_id)\n\n        resource2 = self.tcex.resource(entity2_type)\n        resource2.resource_id(entity2_id)\n\n        a_resource = resource1.associations(resource2)\n        response = a_resource.request()\n        if response.get('status') != 'Success':\n            self.log.warning(\n                '[stage] Failed associating \"{}:{}\" with \"{}:{}\" ({}).'.format(\n                    entity1_type,\n                    entity1_id,\n                    entity2_type,\n                    entity2_id,\n                    response.get('response').text,\n                )\n            )", "docstring": "Add an attribute to a resource.\n\nArgs:\nentity1 (str): A Redis variable containing a TCEntity.\nentity2 (str): A Redis variable containing a TCEntity.", "source": "juraj-google-style"}
{"code": "def RegisterDefinition(self, data_type_definition):\n    \n    name_lower = data_type_definition.name.lower()\n    if name_lower in self._definitions:\n      raise KeyError('Definition already set for name: {0:s}.'.format(\n          data_type_definition.name))\n\n    if data_type_definition.name in self._aliases:\n      raise KeyError('Alias already set for name: {0:s}.'.format(\n          data_type_definition.name))\n\n    for alias in data_type_definition.aliases:\n      if alias in self._aliases:\n        raise KeyError('Alias already set for name: {0:s}.'.format(alias))\n\n    self._definitions[name_lower] = data_type_definition\n\n    for alias in data_type_definition.aliases:\n      self._aliases[alias] = name_lower\n\n    if data_type_definition.TYPE_INDICATOR == definitions.TYPE_INDICATOR_FORMAT:\n      self._format_definitions.append(name_lower)", "docstring": "Registers a data type definition.\n\nThe data type definitions are identified based on their lower case name.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definitions.\n\nRaises:\nKeyError: if data type definition is already set for the corresponding\nname.", "source": "juraj-google-style"}
{"code": "def RunPlugins(\n      cls, artifacts_registry, file_system, mount_point, knowledge_base):\n    \n    searcher = file_system_searcher.FileSystemSearcher(file_system, mount_point)\n\n    cls.CollectFromFileSystem(\n        artifacts_registry, knowledge_base, searcher, file_system)\n\n    \n    \n\n    environment_variables = None\n    if knowledge_base:\n      environment_variables = knowledge_base.GetEnvironmentVariables()\n\n    registry_file_reader = FileSystemWinRegistryFileReader(\n        file_system, mount_point, environment_variables=environment_variables)\n    win_registry = dfwinreg_registry.WinRegistry(\n        registry_file_reader=registry_file_reader)\n\n    searcher = registry_searcher.WinRegistrySearcher(win_registry)\n\n    cls.CollectFromWindowsRegistry(\n        artifacts_registry, knowledge_base, searcher)\n\n    cls.CollectFromKnowledgeBase(knowledge_base)\n\n    if not knowledge_base.HasUserAccounts():\n      logger.warning('Unable to find any user accounts on the system.')", "docstring": "Runs the preprocessing plugins.\n\nArgs:\nartifacts_registry (artifacts.ArtifactDefinitionsRegistry): artifacts\ndefinitions registry.\nfile_system (dfvfs.FileSystem): file system to be preprocessed.\nmount_point (dfvfs.PathSpec): mount point path specification that refers\nto the base location of the file system.\nknowledge_base (KnowledgeBase): to fill with preprocessing information.", "source": "juraj-google-style"}
{"code": "def get_channel_id(turn_context: TurnContext) -> str:\n    if (turn_context.activity.channel_id is None):\n        return ''\n    else:\n        return turn_context.activity.channel_id", "docstring": "Get the Channel Id from the current Activity on the Turn Context.\n\nArgs:\nturn_context (TurnContext): The Turn Context to retrieve the Activity's Channel Id from.\n\nReturns:\nstr: The Channel Id from the Turn Context's Activity.", "source": "codesearchnet"}
{"code": "def convert_snapshot(self, shift, instruction):\n        \n        command_dict = {\n            'name': 'snapshot',\n            't0': shift+instruction.start_time,\n            'label': instruction.name,\n            'type': instruction.type\n        }\n        return self._qobj_model(**command_dict)", "docstring": "Return converted `Snapshot`.\n\nArgs:\nshift(int): Offset time.\ninstruction (Snapshot): snapshot instruction.\nReturns:\ndict: Dictionary of required parameters.", "source": "juraj-google-style"}
{"code": "def __format_error(self, error_list_tag):\n    \n    error = {'domain': self.domain(),\n             'reason': self.reason(),\n             'message': self.message()}\n    error.update(self.extra_fields() or {})\n    return {'error': {error_list_tag: [error],\n                      'code': self.status_code(),\n                      'message': self.message()}}", "docstring": "Format this error into a JSON response.\n\nArgs:\nerror_list_tag: A string specifying the name of the tag to use for the\nerror list.\n\nReturns:\nA dict containing the reformatted JSON error response.", "source": "juraj-google-style"}
{"code": "def _ParseCmdItem(self, cmd_input, template_file=None):\n    fsm = textfsm.TextFSM(template_file)\n    if (not self._keys):\n        self._keys = set(fsm.GetValuesByAttrib('Key'))\n    table = texttable.TextTable()\n    table.header = fsm.header\n    for record in fsm.ParseText(cmd_input):\n        table.Append(record)\n    return table", "docstring": "Creates Texttable with output of command.\n\nArgs:\ncmd_input: String, Device response.\ntemplate_file: File object, template to parse with.\n\nReturns:\nTextTable containing command output.\n\nRaises:\nCliTableError: A template was not found for the given command.", "source": "codesearchnet"}
{"code": "def _ParseShellItemPathSegment(self, shell_item):\n    path_segment = None\n    if isinstance(shell_item, pyfwsi.root_folder):\n        description = shell_folder_ids.DESCRIPTIONS.get(shell_item.shell_folder_identifier, None)\n        if description:\n            path_segment = description\n        else:\n            path_segment = '{{{0:s}}}'.format(shell_item.shell_folder_identifier)\n        path_segment = '<{0:s}>'.format(path_segment)\n    elif isinstance(shell_item, pyfwsi.volume):\n        if shell_item.name:\n            path_segment = shell_item.name\n        elif shell_item.identifier:\n            path_segment = '{{{0:s}}}'.format(shell_item.identifier)\n    elif isinstance(shell_item, pyfwsi.file_entry):\n        long_name = ''\n        for extension_block in shell_item.extension_blocks:\n            if isinstance(extension_block, pyfwsi.file_entry_extension):\n                long_name = extension_block.long_name\n        if long_name:\n            path_segment = long_name\n        elif shell_item.name:\n            path_segment = shell_item.name\n    elif isinstance(shell_item, pyfwsi.network_location):\n        if shell_item.location:\n            path_segment = shell_item.location\n    if ((path_segment is None) and (shell_item.class_type == 0)):\n        pass\n    if (path_segment is None):\n        path_segment = '<UNKNOWN: 0x{0:02x}>'.format(shell_item.class_type)\n    return path_segment", "docstring": "Parses a shell item path segment.\n\nArgs:\nshell_item (pyfwsi.item): shell item.\n\nReturns:\nstr: shell item path segment.", "source": "codesearchnet"}
{"code": "def GetRequestXML(self, method, *args):\n    \n    packed_args = self._PackArguments(method, args, set_type_attrs=True)\n    headers = self._GetZeepFormattedSOAPHeaders()\n\n    return self.zeep_client.create_message(\n        self.zeep_client.service, method, *packed_args, _soapheaders=headers)", "docstring": "Get the raw SOAP XML for a request.\n\nArgs:\nmethod: The method name.\n*args: A list of arguments to be passed to the method.\n\nReturns:\nAn element containing the raw XML that would be sent as the request.", "source": "juraj-google-style"}
{"code": "def __init__(self, parent_xid, relationship):\n        \n        self.xid = str(uuid.uuid4())\n        self._action_data = {\n            'indicatorXid': self.xid,\n            'relationship': relationship,\n            'parentIndicatorXid': parent_xid,\n        }\n        self._children = []", "docstring": "Initialize Class Properties.\n\n.. warning:: This code is not complete and may require some update to the API.\n\nArgs:\nparent_xid (str): The external id of the parent Indicator.\nrelationship: ???", "source": "juraj-google-style"}
{"code": "def start_at(self, document_fields):\n    query = query_mod.Query(self)\n    return query.start_at(document_fields)", "docstring": "Start query at a cursor with this collection as parent.\n\nSee\n:meth:`~.firestore_v1beta1.query.Query.start_at` for\nmore information on this method.\n\nArgs:\ndocument_fields (Union[~.firestore_v1beta1.\\\ndocument.DocumentSnapshot, dict, list, tuple]): a document\nsnapshot or a dictionary/list/tuple of fields representing a\nquery results cursor. A cursor is a collection of values that\nrepresent a position in a query result set.\n\nReturns:\n~.firestore_v1beta1.query.Query: A query with cursor.", "source": "codesearchnet"}
{"code": "def _AddCampaignsToGroup(client, campaign_group_id, campaign_ids):\n    campaign_service = client.GetService('CampaignService', version='v201809')\n    operations = [{'operator': 'SET', 'operand': {'id': campaign_id, 'campaignGroupId': campaign_group_id}} for campaign_id in campaign_ids]\n    campaign_service.mutate(operations)\n    print(('The following campaign IDs were added to the campaign group with ID \"%d\":\\n\\t%s' % (campaign_group_id, campaign_ids)))", "docstring": "Adds multiple campaigns to a campaign group.\n\nArgs:\nclient: an AdWordsClient instance.\ncampaign_group_id: an integer ID for the campaign group.\ncampaign_ids: a list of integer IDs for campaigns.", "source": "codesearchnet"}
{"code": "def check(self, digest):\n    path = self.get_file_path(digest)\n    if (self._calc_digest(path) != digest):\n        self.logger.warning(\"found corrupted file: '{0}'\".format(path))\n        return False\n    return True", "docstring": "Check the integrity of the file with the given digest\n\nArgs:\ndigest -- digest of the file to check\nReturns:\nTrue if the file is not corrupted", "source": "codesearchnet"}
{"code": "def children(self, sourcepath, recursive=True):\n    return self._get_recursive_dependancies(self._CHILDREN_MAP, sourcepath, recursive=True)", "docstring": "Recursively find all children that are imported from the given source\npath.\n\nArgs:\nsourcepath (str): Source file path to search for.\n\nKeyword Arguments:\nrecursive (bool): Switch to enabled recursive finding (if True).\nDefault to True.\n\nReturns:\nset: List of finded parents path.", "source": "codesearchnet"}
{"code": "def create_constructor_args(cls, proto_list: List[fra.ForwardRateAgreement], config: ForwardRateAgreementConfig=None) -> Dict[str, Any]:\n    fra_data = proto_utils.from_protos_v2(proto_list, config)\n    res = {}\n    for key in fra_data:\n        tensor_repr = proto_utils.tensor_repr(fra_data[key])\n        res[key] = tensor_repr\n    return res", "docstring": "Creates a dictionary to initialize ForwardRateAgreement.\n\nThe output dictionary is such that the instruments can be initialized\nas follows:\n```\ninitializer = create_constructor_args(proto_list, config)\nfras = [ForwardRateAgreement(**data) for data in initializer.values()]\n```\n\nThe keys of the output dictionary are unique identifiers of the batched\ninstruments. This is useful for identifying an existing graph that could be\nreused for the instruments without the need of rebuilding the graph.\n\nArgs:\nproto_list: A list of protos for which the initialization arguments are\nconstructed.\nconfig: An instance of `ForwardRateAgreementConfig`.\n\nReturns:\nA possibly nested dictionary such that each value provides initialization\narguments for the ForwardRateAgreement.", "source": "github-repos"}
{"code": "def pull_release(self, name, version, destfolder='.', force=False):\n    unique_id = name.replace('/', '_')\n    depdict = {'name': name, 'unique_id': unique_id, 'required_version': version, 'required_version_string': str(version)}\n    destdir = os.path.join(destfolder, unique_id)\n    if os.path.exists(destdir):\n        if (not force):\n            raise ExternalError('Output directory exists and force was not specified, aborting', output_directory=destdir)\n        shutil.rmtree(destdir)\n    result = self.update_dependency(None, depdict, destdir)\n    if (result != 'installed'):\n        raise ArgumentError('Could not find component to satisfy name/version combination')", "docstring": "Download and unpack a released iotile component by name and version range\n\nIf the folder that would be created already exists, this command fails unless\nyou pass force=True\n\nArgs:\nname (string): The name of the component to download\nversion (SemanticVersionRange): The valid versions of the component to fetch\ndestfolder (string): The folder into which to unpack the result, defaults to\nthe current working directory\nforce (bool): Forcibly overwrite whatever is currently in the folder that would\nbe fetched.\n\nRaises:\nExternalError: If the destination folder exists and force is not specified\nArgumentError: If the specified component could not be found with the required version", "source": "codesearchnet"}
{"code": "def run(self, args):\n    jlink = self.create_jlink(args)\n    erased = jlink.erase()\n    print(('Bytes Erased: %d' % erased))", "docstring": "Erases the device connected to the J-Link.\n\nArgs:\nself (EraseCommand): the ``EraseCommand`` instance\nargs (Namespace): the arguments passed on the command-line\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def write_float(self, registeraddress, value, numberOfRegisters=2):\n    _checkNumerical(value, description='input value')\n    _checkInt(numberOfRegisters, minvalue=2, maxvalue=4, description='number of registers')\n    self._genericCommand(16, registeraddress, value, numberOfRegisters=numberOfRegisters, payloadformat='float')", "docstring": "Write a floating point number to the slave.\n\nFloats are stored in two or more consecutive 16-bit registers in the slave.\n\nUses Modbus function code 16.\n\nFor discussion on precision, number of registers and on byte order, see :meth:`.read_float`.\n\nArgs:\n* registeraddress (int): The slave register start address (use decimal numbers, not hex).\n* value (float or int): The value to store in the slave\n* numberOfRegisters (int): The number of registers allocated for the float. Can be 2 or 4.\n\nReturns:\nNone\n\nRaises:\nValueError, TypeError, IOError", "source": "codesearchnet"}
{"code": "def get_membership(self, uuid=None):\n        \n        group_id = self.get_group_id(uuid=uuid)\n        uri = 'group/{group_id}/member'\n        mbr_data = self.get(uri.format(group_id=group_id), params=None)\n        return mbr_data", "docstring": "Get membership data based on uuid.\n\nArgs:\nuuid (str): optional uuid. defaults to self.cuuid\n\nRaises:\nPyLmodUnexpectedData: No data was returned.\nrequests.RequestException: Exception connection error\n\nReturns:\ndict: membership json", "source": "juraj-google-style"}
{"code": "def _execute_and_process_stdout(self, args, shell, handler) -> bytes:\n    proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, bufsize=1)\n    out = '[elided, processed via handler]'\n    try:\n        while True:\n            line = proc.stdout.readline()\n            if line:\n                handler(line)\n            else:\n                break\n    finally:\n        unexpected_out, err = proc.communicate()\n        if unexpected_out:\n            out = '[unexpected stdout] %s' % unexpected_out\n            for line in unexpected_out.splitlines():\n                handler(line)\n    ret = proc.returncode\n    logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s', utils.cli_cmd_to_string(args), out, err, ret)\n    if ret == 0:\n        return err\n    else:\n        raise AdbError(cmd=args, stdout=out, stderr=err, ret_code=ret)", "docstring": "Executes adb commands and processes the stdout with a handler.\n\nArgs:\nargs: string or list of strings, program arguments.\nSee subprocess.Popen() documentation.\nshell: bool, True to run this command through the system shell,\nFalse to invoke it directly. See subprocess.Popen() docs.\nhandler: func, a function to handle adb stdout line by line.\n\nReturns:\nThe stderr of the adb command run if exit code is 0.\n\nRaises:\nAdbError: The adb command exit code is not 0.", "source": "github-repos"}
{"code": "def StaticAdd(cls, queue_urn, rdf_value, mutation_pool=None):\n    \n    if not isinstance(rdf_value, cls.rdf_type):\n      raise ValueError(\"This collection only accepts values of type %s.\" %\n                       cls.rdf_type.__name__)\n    if mutation_pool is None:\n      raise ValueError(\"Mutation pool can't be none.\")\n\n    timestamp = rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()\n\n    if not isinstance(queue_urn, rdfvalue.RDFURN):\n      queue_urn = rdfvalue.RDFURN(queue_urn)\n\n    mutation_pool.QueueAddItem(queue_urn, rdf_value, timestamp)", "docstring": "Adds an rdf value the queue.\n\nAdds an rdf value to a queue. Does not require that the queue be locked, or\neven open. NOTE: The caller is responsible for ensuring that the queue\nexists and is of the correct type.\n\nArgs:\nqueue_urn: The urn of the queue to add to.\n\nrdf_value: The rdf value to add to the queue.\n\nmutation_pool: A MutationPool object to write to.\n\nRaises:\nValueError: rdf_value has unexpected type.", "source": "juraj-google-style"}
{"code": "def __init__(self, hash_queue, hash_analysis_queue, **kwargs):\n    \n    super(NsrlsvrAnalyzer, self).__init__(\n        hash_queue, hash_analysis_queue, **kwargs)\n    self._host = None\n    self._port = None\n    self.hashes_per_batch = 100", "docstring": "Initializes an nsrlsvr analyzer thread.\n\nArgs:\nhash_queue (Queue.queue): contains hashes to be analyzed.\nhash_analysis_queue (Queue.queue): that the analyzer will append\nHashAnalysis objects this queue.", "source": "juraj-google-style"}
{"code": "def search(self, search_phrase, limit=None):\n        \n\n        query, query_params = self._make_query_from_terms(search_phrase, limit=limit)\n\n        self._parsed_query = (str(query), query_params)\n\n        assert isinstance(query, TextClause)\n\n        datasets = {}\n\n        def make_result(vid=None, b_score=0, p_score=0):\n            res = DatasetSearchResult()\n            res.b_score = b_score\n            res.p_score = p_score\n            res.partitions = set()\n            res.vid = vid\n            return res\n\n        if query_params:\n            results = self.execute(query, **query_params)\n\n            for result in results:\n                vid, dataset_score = result\n\n                datasets[vid] = make_result(vid, b_score=dataset_score)\n\n\n        logger.debug('Extending datasets with partitions.')\n\n        for partition in self.backend.partition_index.search(search_phrase):\n\n            if partition.dataset_vid not in datasets:\n                datasets[partition.dataset_vid] = make_result(partition.dataset_vid)\n\n            datasets[partition.dataset_vid].p_score += partition.score\n            datasets[partition.dataset_vid].partitions.add(partition)\n\n        return list(datasets.values())", "docstring": "Finds datasets by search phrase.\n\nArgs:\nsearch_phrase (str or unicode):\nlimit (int, optional): how many results to return. None means without limit.\n\nReturns:\nlist of DatasetSearchResult instances.", "source": "juraj-google-style"}
{"code": "def set(self, time):\n        \n        self._time = time\n        self._pb.sec = int(self._time)\n        self._pb.nsec = int((self._time - self._pb.sec) * 10 ** 9)", "docstring": "Sets time in seconds since Epoch\n\nArgs:\ntime (:obj:`float`): time in seconds since Epoch (see time.time())\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def IsLinearOutputModule(cls, name):\n    name = name.lower()\n    output_class = cls._output_classes.get(name, None)\n    if (not output_class):\n        output_class = cls._disabled_output_classes.get(name, None)\n    if output_class:\n        return issubclass(output_class, interface.LinearOutputModule)\n    return False", "docstring": "Determines if a specific output class is a linear output module.\n\nArgs:\nname (str): name of the output module.\n\nReturns:\nTrue: if the output module is linear.", "source": "codesearchnet"}
{"code": "def parse_func_attrs(attributes, allowlist=None):\n    if not allowlist:\n        allowlist = MONOMORPHIC_FUNCTION_ALLOWLIST\n    attrs = {}\n    for key, value in attributes.items():\n        if key not in allowlist:\n            raise ValueError(f'Allowlist does not support `{key}` as an attribute.')\n        attrs[key] = _parse_func_attr_value(key, value)\n    return attrs", "docstring": "Convert the keyword arguments into function_def attributes.\n\nCurrently only support primitive types: bool, int, float and string.\n\nArgs:\nattributes: the dictionary of attributes.\nallowlist: set of attribute names allowed.\nReturns:\nA dict of attributes where the key is the name of attribute and the value\nis the AttrValue proto.\nRaises:\nValueError: If the kwargs contains unallowlisted name or unsupported value\ntypes.", "source": "github-repos"}
{"code": "def remove(package_name):\n    \n    if package_name not in packages:\n        raise HolodeckException(\"Unknown package name \" + package_name)\n    for config, path in _iter_packages():\n        if config[\"name\"] == package_name:\n            shutil.rmtree(path)", "docstring": "Removes a holodeck package.\n\nArgs:\npackage_name (str): the name of the package to remove", "source": "juraj-google-style"}
{"code": "def ParseMessageRow(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = TangoAndroidMessageEventData()\n    event_data.message_identifier = self._GetRowValue(\n        query_hash, row, 'msg_id')\n\n    \n    \n    \n\n    event_data.direction = self._GetRowValue(query_hash, row, 'direction')\n\n    timestamp = self._GetRowValue(query_hash, row, 'create_time')\n    if timestamp:\n      date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_CREATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'send_time')\n    if timestamp:\n      date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_SENT)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a message row from the database.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from query.", "source": "juraj-google-style"}
{"code": "def write_edges(edges: Mapping[(str, Any)], filename: str, jsonlines: bool=False, gzipflag: bool=False, yaml: bool=False):\n    pass", "docstring": "Write edges to file\n\nArgs:\nedges (Mapping[str, Any]): in edges JSON Schema format\nfilename (str): filename to write\njsonlines (bool): output in JSONLines format?\ngzipflag (bool): create gzipped file?\nyaml (bool): create yaml file?", "source": "codesearchnet"}
{"code": "def isdir(self, path, follow_symlinks=True):\n    return self._is_of_type(path, S_IFDIR, follow_symlinks)", "docstring": "Determine if path identifies a directory.\n\nArgs:\npath: Path to filesystem object.\n\nReturns:\n`True` if path points to a directory (following symlinks).\n\nRaises:\nTypeError: if path is None.", "source": "codesearchnet"}
{"code": "def patch_fromText(self, textline):\n    if (type(textline) == unicode):\n        textline = textline.encode('ascii')\n    patches = []\n    if (not textline):\n        return patches\n    text = textline.split('\\n')\n    while (len(text) != 0):\n        m = re.match('^@@ -(\\\\d+),?(\\\\d*) \\\\+(\\\\d+),?(\\\\d*) @@$', text[0])\n        if (not m):\n            raise ValueError(('Invalid patch string: ' + text[0]))\n        patch = patch_obj()\n        patches.append(patch)\n        patch.start1 = int(m.group(1))\n        if (m.group(2) == ''):\n            patch.start1 -= 1\n            patch.length1 = 1\n        elif (m.group(2) == '0'):\n            patch.length1 = 0\n        else:\n            patch.start1 -= 1\n            patch.length1 = int(m.group(2))\n        patch.start2 = int(m.group(3))\n        if (m.group(4) == ''):\n            patch.start2 -= 1\n            patch.length2 = 1\n        elif (m.group(4) == '0'):\n            patch.length2 = 0\n        else:\n            patch.start2 -= 1\n            patch.length2 = int(m.group(4))\n        del text[0]\n        while (len(text) != 0):\n            if text[0]:\n                sign = text[0][0]\n            else:\n                sign = ''\n            line = urllib.unquote(text[0][1:])\n            line = line.decode('utf-8')\n            if (sign == '+'):\n                patch.diffs.append((self.DIFF_INSERT, line))\n            elif (sign == '-'):\n                patch.diffs.append((self.DIFF_DELETE, line))\n            elif (sign == ' '):\n                patch.diffs.append((self.DIFF_EQUAL, line))\n            elif (sign == '@'):\n                break\n            elif (sign == ''):\n                pass\n            else:\n                raise ValueError((\"Invalid patch mode: '%s'\\n%s\" % (sign, line)))\n            del text[0]\n    return patches", "docstring": "Parse a textual representation of patches and return a list of patch\nobjects.\n\nArgs:\ntextline: Text representation of patches.\n\nReturns:\nArray of Patch objects.\n\nRaises:\nValueError: If invalid input.", "source": "codesearchnet"}
{"code": "def _build(self):\n    if ('w' not in self._initializers):\n        stddev = (1 / math.sqrt(np.prod(self._shape)))\n        self._initializers['w'] = tf.truncated_normal_initializer(stddev=stddev)\n    self._w = tf.get_variable('w', shape=self._shape, dtype=self._dtype, initializer=self._initializers['w'], partitioner=self._partitioners.get('w', None), regularizer=self._regularizers.get('w', None))\n    return self._w", "docstring": "Connects the TrainableTensor module into the graph.\n\nReturns:\nA Tensor of shape as determined in the constructor.", "source": "codesearchnet"}
{"code": "def _update_token(self, request):\n        \n\n        \n        self._source_credentials.refresh(request)\n\n        body = {\n            \"delegates\": self._delegates,\n            \"scope\": self._target_scopes,\n            \"lifetime\": str(self._lifetime) + \"s\"\n        }\n\n        headers = {\n            'Content-Type': 'application/json',\n        }\n\n        \n        self._source_credentials.apply(headers)\n\n        self.token, self.expiry = _make_iam_token_request(\n            request=request,\n            principal=self._target_principal,\n            headers=headers,\n            body=body)", "docstring": "Updates credentials with a new access_token representing\nthe impersonated account.\n\nArgs:\nrequest (google.auth.transport.requests.Request): Request object\nto use for refreshing credentials.", "source": "juraj-google-style"}
{"code": "def __init__(self, full_shape, var_offset):\n    if not isinstance(full_shape, (list, tuple)):\n        raise TypeError('`full_shape` must be a sequence (like tuple or list) instead of ' + type(full_shape).__name__)\n    if not isinstance(var_offset, (list, tuple)):\n        raise TypeError('`var_offset` must be a sequence (like tuple or list) instead of ' + type(var_offset).__name__)\n    if len(var_offset) != len(full_shape):\n        raise ValueError('Expected equal length, but `var_offset` is of length {} while full_shape is of length {}.'.format(len(var_offset), len(full_shape)))\n    for offset, shape in zip(var_offset, full_shape):\n        if offset < 0 or offset >= shape:\n            raise ValueError('Expected 0 <= offset < shape but found offset={}, shape={} for var_offset={}, full_shape={}'.format(offset, shape, var_offset, full_shape))\n    self._full_shape = full_shape\n    self._var_offset = var_offset", "docstring": "Constructor.\n\nArgs:\nfull_shape: Tuple or list of `int` indicating the full combined shape of\nthe partitioned variables.\nvar_offset: Tuple or list of `int` specifying offset of this partition\nwith respect to the full variable for each dimension.\n\nRaises:\nTypeError: If `full_shape` or `var_offset` is not a sequence.\nValueError: If `full_shape` or `var_offset` differ in length. If\n`var_offset` exceeds `full_shape` in any dimension.", "source": "github-repos"}
{"code": "def label(self):\n    with self.selenium.context(self.selenium.CONTEXT_CHROME):\n        return self.root.get_attribute('label')", "docstring": "Provide access to the notification label.\n\nReturns:\nstr: The notification label", "source": "codesearchnet"}
{"code": "def get_imagery(cls, lat, lon, date=None, dim=None, cloud_score=False):\n        \n        instance = cls('planetary/earth/imagery')\n\n        filters = {\n            'lat': lat,\n            'lon': lon,\n            'date': date,\n            'dim': dim,\n            'cloud_score': cloud_score\n        }\n\n        return instance.get_resource(**filters)", "docstring": "Returns satellite image\n\nArgs:\nlat: latitude float\nlon: longitude float\ndate: date instance of available date from `get_assets`\ndim: width and height of image in degrees as float\ncloud_score: boolean to calculate the percentage of the image covered by clouds\n\nReturns:\njson", "source": "juraj-google-style"}
{"code": "def get_extra_managed_storage_volume_paths(self, start=0, count=(- 1), filter='', sort=''):\n    uri = (self.URI + '/repair?alertFixType=ExtraManagedStorageVolumePaths')\n    return self._client.get_all(start, count, filter=filter, sort=sort, uri=uri)", "docstring": "Gets the list of extra managed storage volume paths.\n\nArgs:\nstart:\nThe first item to return, using 0-based indexing.\nIf not specified, the default is 0 - start with the first available item.\ncount:\nThe number of resources to return. A count of -1 requests all items.\nThe actual number of items in the response might differ from the requested\ncount if the sum of start and count exceeds the total number of items.\nfilter (list or str):\nA general filter/query string to narrow the list of items returned. The\ndefault is no filter; all resources are returned.\nsort:\nThe sort order of the returned data set. By default, the sort order is based\non create time with the oldest entry first.\n\nReturns:\nlist: A list of extra managed storage volume paths.", "source": "codesearchnet"}
{"code": "def build(self, var_list):\n    if self.built:\n        return\n    if var_list:\n        dtype = var_list[0].dtype\n    else:\n        dtype = backend.floatx()\n    super().build(var_list)\n    self._momentums, self._velocities = self.add_optimizer_variables(var_list, ['momentum', 'velocity'])\n    self._u_product = backend.Variable(1.0, dtype=dtype)", "docstring": "Initialize optimizer variables.\n\nNadam optimizer has 2 types of variables: momentums and velocities.\n\nArgs:\nvar_list: list of model variables to build Nadam variables on.", "source": "github-repos"}
{"code": "def make_basket_put_payoff(strikes: types.RealTensor, dtype: tf.DType=None, name: str=None) -> Callable[[types.RealTensor], types.RealTensor]:\n    name = name or 'put_valuer'\n    with tf.name_scope(name):\n        strikes = tf.convert_to_tensor(strikes, dtype=dtype, name='strikes')\n        dtype = dtype or strikes.dtype\n        put_valuer = functools.partial(_put_valuer, strikes=strikes, dtype=dtype)\n    return put_valuer", "docstring": "Produces a callable from samples to payoff of a simple basket put option.\n\nArgs:\nstrikes: A `Tensor` of `dtype` consistent with `samples` and shape\n`[num_samples, batch_size]`.\ndtype: Optional `dtype`. Either `tf.float32` or `tf.float64`. If supplied,\nrepresents the `dtype` for the 'strikes' as well as for the input\nargument of the output payoff callable.\nDefault value: `None`, which means that the `dtype` inferred from\n`strikes` is used.\nname: Python `str` name prefixed to Ops created by the callable created\nby this function.\nDefault value: `None` which is mapped to the default name 'put_valuer'\n\nReturns:\nA callable from `Tensor` of shape\n`[batch_size, num_samples, num_exercise_times, dim]`\nand a scalar `Tensor` representing current time to a `Tensor` of shape\n`[num_samples, batch_size]`.", "source": "github-repos"}
{"code": "def wait_for_import(self, connection_id, wait_interval):\n    self.stdout.write(self.style.NOTICE('Waiting for import'), ending='')\n    state = utils.ConnectionStates.IMPORT_CONFIGURATION\n    while (state == utils.ConnectionStates.IMPORT_CONFIGURATION):\n        self.stdout.write(self.style.NOTICE('.'), ending='')\n        time.sleep(wait_interval)\n        try:\n            connection = utils.get_connection(connection_id)\n        except requests.HTTPError as e:\n            raise CommandError('Failed to fetch connection information.') from e\n        else:\n            state = connection['state']\n    self.stdout.write(self.style.NOTICE(' Done!'))", "docstring": "Wait until connection state is no longer ``IMPORT_CONFIGURATION``.\n\nArgs:\nconnection_id (str): Heroku Connect connection to monitor.\nwait_interval (int): How frequently to poll in seconds.\n\nRaises:\nCommandError: If fetch connection information fails.", "source": "codesearchnet"}
{"code": "def can_process_matrix(entry, matrix_tags):\n    if (len(matrix_tags) == 0):\n        return True\n    count = 0\n    if ('tags' in entry):\n        for tag in matrix_tags:\n            if (tag in entry['tags']):\n                count += 1\n    return (count > 0)", "docstring": "Check given matrix tags to be in the given list of matric tags.\n\nArgs:\nentry (dict): matrix item (in yaml).\nmatrix_tags (list): represents --matrix-tags defined by user in command line.\nReturns:\nbool: True when matrix entry can be processed.", "source": "codesearchnet"}
{"code": "def _ParseHeader(self, parser_mediator, structure):\n    \n    _, month, day, hours, minutes, seconds, year = structure.date_time\n\n    month = timelib.MONTH_DICT.get(month.lower(), 0)\n\n    time_elements_tuple = (year, month, day, hours, minutes, seconds)\n\n    try:\n      date_time = dfdatetime_time_elements.TimeElements(\n          time_elements_tuple=time_elements_tuple)\n      date_time.is_local_time = True\n    except ValueError:\n      parser_mediator.ProduceExtractionWarning(\n          'invalid date time value: {0!s}'.format(structure.date_time))\n      return\n\n    self._last_month = month\n\n    event_data = XChatLogEventData()\n\n    if structure.log_action[0] == 'BEGIN':\n      self._xchat_year = year\n      event_data.text = 'XChat start logging'\n\n    elif structure.log_action[0] == 'END':\n      self._xchat_year = None\n      event_data.text = 'XChat end logging'\n\n    else:\n      logger.debug('Unknown log action: {0:s}.'.format(\n          ' '.join(structure.log_action)))\n      return\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_ADDED,\n        time_zone=parser_mediator.timezone)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a log header.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.", "source": "juraj-google-style"}
{"code": "def softplus(x):\n    if any_symbolic_tensors((x,)):\n        return Softplus().symbolic_call(x)\n    return backend.nn.softplus(x)", "docstring": "Softplus activation function.\n\nIt is defined as `f(x) = log(exp(x) + 1)`, where `log` is the natural\nlogarithm and `exp` is the exponential function.\n\nArgs:\nx: Input tensor.\n\nReturns:\nA tensor with the same shape as `x`.\n\nExample:\n\n>>> x = keras.ops.convert_to_tensor([-0.555, 0.0, 0.555])\n>>> keras.ops.softplus(x)\narray([0.45366603, 0.6931472, 1.008666], dtype=float32)", "source": "github-repos"}
{"code": "def get_data_dirs(__pkg: str) -> List[str]:\n    \n    dirs = [user_data(__pkg), ]\n    dirs.extend(path.expanduser(path.sep.join([d, __pkg]))\n                for d in getenv('XDG_DATA_DIRS',\n                                '/usr/local/share/:/usr/share/').split(':'))\n    return [d for d in dirs if path.isdir(d)]", "docstring": "Return all data directories for given package.\n\nArgs:\n__pkg: Package name", "source": "juraj-google-style"}
{"code": "def get_object(self, dn, filter, attributes, _connection=None):\n    connection = _connection\n    if (not connection):\n        connection = self._make_connection(bind_user=self.config.get('LDAP_BIND_USER_DN'), bind_password=self.config.get('LDAP_BIND_USER_PASSWORD'))\n        connection.bind()\n    connection.search(search_base=dn, search_filter=filter, attributes=attributes)\n    data = None\n    if (len(connection.response) > 0):\n        data = connection.response[0]['attributes']\n        data['dn'] = connection.response[0]['dn']\n    if (not _connection):\n        self.destroy_connection(connection)\n    return data", "docstring": "Gets an object at the specified dn and returns it.\n\nArgs:\ndn (str): The dn of the object to find.\nfilter (str): The LDAP syntax search filter.\nattributes (list): A list of LDAP attributes to get when searching.\n_connection (ldap3.Connection): A connection object to use when\nsearching. If not given, a temporary connection will be created,\nand destroyed after use.\n\nReturns:\ndict: A dictionary of the object info from LDAP", "source": "codesearchnet"}
{"code": "def output_csv(filehandle: TextIO, values: Iterable[str]) -> None:\n    line = ','.join(values)\n    filehandle.write((line + '\\n'))", "docstring": "Write a line of CSV. POOR; does not escape things properly. DEPRECATED.\n\nArgs:\nfilehandle: file to write to\nvalues: values", "source": "codesearchnet"}
{"code": "def squeeze(input: ragged_tensor.Ragged, axis=None, name=None):\n    with ops.name_scope(name, 'RaggedSqueeze', [input]):\n        input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input)\n        if isinstance(input, tensor.Tensor):\n            return array_ops.squeeze(input, axis, name)\n        if axis is None:\n            raise ValueError('Ragged.squeeze must have an axis argument.')\n        if isinstance(axis, int):\n            axis = [axis]\n        elif not isinstance(axis, (list, tuple)) or not all((isinstance(d, int) for d in axis)):\n            raise TypeError('Axis must be a list or tuple of integers.')\n        dense_dims = []\n        ragged_dims = []\n        axis = [array_ops.get_positive_axis(d, input.shape.ndims, 'axis[%d]' % i, 'rank(input)') for i, d in enumerate(axis)]\n        for dim in axis:\n            if dim > input.ragged_rank:\n                dense_dims.append(dim - input.ragged_rank)\n            else:\n                ragged_dims.append(dim)\n        assertion_list = []\n        scalar_tensor_one = constant_op.constant(1, dtype=input.row_splits.dtype)\n        for i, r in enumerate(input.nested_row_lengths()):\n            if i + 1 in ragged_dims:\n                assertion_list.append(control_flow_assert.Assert(math_ops.reduce_all(math_ops.equal(r, scalar_tensor_one)), ['the given axis (axis = %d) is not squeezable!' % (i + 1)]))\n        if 0 in ragged_dims:\n            scalar_tensor_two = constant_op.constant(2, dtype=dtypes.int32)\n            assertion_list.append(control_flow_assert.Assert(math_ops.equal(array_ops.size(input.row_splits), scalar_tensor_two), ['the given axis (axis = 0) is not squeezable!']))\n        squeezed_rt = None\n        squeezed_rt = control_flow_ops.with_dependencies(assertion_list, input.flat_values)\n        if dense_dims:\n            squeezed_rt = array_ops.squeeze(squeezed_rt, dense_dims)\n        remaining_row_splits = []\n        remaining_row_splits = list()\n        for i, row_split in enumerate(input.nested_row_splits):\n            if i + 1 not in ragged_dims:\n                remaining_row_splits.append(row_split)\n        if remaining_row_splits and 0 in ragged_dims:\n            remaining_row_splits.pop(0)\n        squeezed_rt = RaggedTensor.from_nested_row_splits(squeezed_rt, remaining_row_splits)\n        if set(range(0, input.ragged_rank + 1)).issubset(set(ragged_dims)):\n            squeezed_rt = array_ops.squeeze(squeezed_rt, [0], name)\n        return squeezed_rt", "docstring": "Ragged compatible squeeze.\n\nIf `input` is a `tf.Tensor`, then this calls `tf.squeeze`.\n\nIf `input` is a `tf.RaggedTensor`, then this operation takes `O(N)` time,\nwhere `N` is the number of elements in the squeezed dimensions.\n\nArgs:\ninput: A potentially ragged tensor. The input to squeeze.\naxis: An optional list of ints. Defaults to `None`. If the `input` is\nragged, it only squeezes the dimensions listed. It fails if `input` is\nragged and axis is []. If `input` is not ragged it calls tf.squeeze. Note\nthat it is an error to squeeze a dimension that is not 1. It must be in\nthe range of [-rank(input), rank(input)).\nname: A name for the operation (optional).\n\nReturns:\nA potentially ragged tensor. Contains the same data as input,\nbut has one or more dimensions of size 1 removed.", "source": "github-repos"}
{"code": "def sample(self, qubits: List[ops.Qid], repetitions: int=1):\n    return self._stepper.sample_measurements(indices=[self.qubit_map[q] for q in qubits], repetitions=repetitions)", "docstring": "Samples from the wave function at this point in the computation.\n\nNote that this does not collapse the wave function.\n\nReturns:\nMeasurement results with True corresponding to the `|1>` state.\nThe outer list is for repetitions, and the inner corresponds to\nmeasurements ordered by the supplied qubits.", "source": "codesearchnet"}
{"code": "def fill(self, background_shape, img):\n        \n        background_shape = tuple(background_shape)\n        return self._fill(background_shape, img)", "docstring": "Return a proper background image of background_shape, given img.\n\nArgs:\nbackground_shape (tuple): a shape (h, w)\nimg: an image\nReturns:\na background image", "source": "juraj-google-style"}
{"code": "def _get_lp_matrix(spin_states, nodes, edges, offset_weight, gap_weight):\n    \n    if len(spin_states) == 0:\n        return None\n\n    \n    n_states = len(spin_states)\n    m_linear = len(nodes)\n    m_quadratic = len(edges)\n    matrix = np.empty((n_states, m_linear + m_quadratic + 2))   \n\n    \n    if spin_states.ndim == 1:\n        spin_states = np.expand_dims(spin_states, 1)\n    matrix[:, :m_linear] = spin_states\n\n    \n    node_indices = dict(zip(nodes, range(m_linear)))\n    for j, (u, v) in enumerate(edges):\n        u_ind = node_indices[u]\n        v_ind = node_indices[v]\n        matrix[:, j + m_linear] = np.multiply(matrix[:, u_ind], matrix[:, v_ind])\n\n    \n    matrix[:, -2] = offset_weight\n    matrix[:, -1] = gap_weight\n    return matrix", "docstring": "Creates an linear programming matrix based on the spin states, graph, and scalars provided.\nLP matrix:\n[spin_states, corresponding states of edges, offset_weight, gap_weight]\n\nArgs:\nspin_states: Numpy array of spin states\nnodes: Iterable\nedges: Iterable of tuples\noffset_weight: Numpy 1-D array or number\ngap_weight: Numpy 1-D array or a number", "source": "juraj-google-style"}
{"code": "def __init__(self, parameters, confirms=True):\n        \n        self.confirms = confirms\n        self.protocol = FedoraMessagingProtocolV2\n        self._parameters = parameters\n        \n        self._client_deferred = defer.Deferred()\n        self._client = None\n        self._consumers = {}", "docstring": "Create a new factory for protocol objects.\n\nAny exchanges, queues, or bindings provided here will be declared and\nset up each time a new protocol instance is created. In other words,\neach time a new connection is set up to the broker, it will start with\nthe declaration of these objects.\n\nArgs:\nparameters (pika.ConnectionParameters): The connection parameters.\nconfirms (bool): If true, attempt to turn on publish confirms extension.", "source": "juraj-google-style"}
{"code": "def LSTMCell(weights, m_prev, c_prev, x, pad):\n    xm = array_ops.concat([x, m_prev], 1)\n    xmw = math_ops.matmul(xm, weights)\n    in_value, in_gate, forget_gate, out_gate = array_ops.split(value=xmw, num_or_size_splits=4, axis=1)\n    in_value = math_ops.tanh(in_value)\n    in_gate = math_ops.sigmoid(in_gate)\n    forget_gate = math_ops.sigmoid(forget_gate)\n    out_gate = math_ops.sigmoid(out_gate)\n    c_next = Clip(Clip(forget_gate * c_prev) + Clip(in_gate * in_value))\n    m_next = Clip(out_gate * c_next)\n    c_next = c_prev * pad + c_next * (1.0 - pad)\n    m_next = m_prev * pad + m_next * (1.0 - pad)\n    return (m_next, c_next)", "docstring": "Unrolls a single LSTM cell with clipped activations forward by one step.\n\nArgs:\nweights: Weight matrix with shape LSTMCellWeightsShape.\nm_prev: Previous m states with shape [batch_size, num_nodes].\nc_prev: Previous c states with shape [batch_size, num_nodes].\nx: Input with shape [batch_size, num_inputs].\npad: Padding with shape [batch_size, 1].  Each padding value is either\n0 or 1, where 1 indicates padding; i.e. the input is shorter than the\nsequence length, and the (m, c) states should simply be passed through\nfrom the previous states.\nReturns:\nThe next (m, c) states, each with shape [batch_size, num_nodes].", "source": "github-repos"}
{"code": "def mark_as_done(self, **kwargs):\n    path = ('%s/%s/mark_as_done' % (self.manager.path, self.id))\n    server_data = self.manager.gitlab.http_post(path, **kwargs)\n    self._update_attrs(server_data)", "docstring": "Mark the todo as done.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabTodoError: If the server failed to perform the request", "source": "codesearchnet"}
{"code": "def get_all_configs():\n    all_functions = collections.OrderedDict([('Platform', get_platform()), ('CPU', get_cpu_type()), ('CPU arch', get_cpu_arch()), ('Distribution', get_distrib()), ('Distribution version', get_distrib_version()), ('GPU', get_gpu_type()[1]), ('GPU count', get_gpu_count()), ('CUDA version (default)', get_cuda_version_default()), ('CUDA versions (all)', get_cuda_version_all()), ('CUDA compute capability', get_cuda_compute_capability(get_gpu_type()[1])), ('cuDNN version', get_cudnn_version()), ('GCC version', get_gcc_version()), ('Python version (default)', get_python_version()), ('GNU C Lib (glibc) version', get_glibc_version()), ('libstdc++ version', get_libstdcpp_version()), ('CPU ISA (min requirement)', get_cpu_isa_version())])\n    configs_found = []\n    json_data = {}\n    missing = []\n    warning = []\n    for config, call_func in all_functions.items():\n        ret_val = call_func\n        if not ret_val:\n            configs_found.append([config, '\\x1b[91m\\x1b[1mMissing\\x1b[0m'])\n            missing.append([config])\n            json_data[config] = ''\n        elif ret_val == 'unknown':\n            configs_found.append([config, '\\x1b[93m\\x1b[1mUnknown type\\x1b[0m'])\n            warning.append([config, ret_val])\n            json_data[config] = 'unknown'\n        elif 'ISA' in config:\n            if not ret_val[1]:\n                configs_found.append([config, ret_val[0]])\n                json_data[config] = ret_val[0]\n            else:\n                configs_found.append([config, '\\x1b[91m\\x1b[1mMissing ' + str(ret_val[1][1:-1]) + '\\x1b[0m'])\n                missing.append([config, '\\n\\t=> Found %s but missing %s' % (str(ret_val[0]), str(ret_val[1]))])\n                json_data[config] = ret_val[0]\n        else:\n            configs_found.append([config, ret_val])\n            json_data[config] = ret_val\n    return (configs_found, missing, warning, json_data)", "docstring": "Runs all functions for detecting user machine configurations.\n\nReturns:\nTuple\n(List of all configurations found,\nList of all missing configurations,\nList of all configurations found with warnings,\nDict of all configurations)", "source": "github-repos"}
{"code": "def read_message(self, timeout):\n    with self._reader_lock:\n        raw_header = self._transport.read(struct.calcsize(AdbMessage.HEADER_STRUCT_FORMAT), timeout.remaining_ms)\n        if (not raw_header):\n            raise usb_exceptions.AdbProtocolError('Adb connection lost')\n        try:\n            raw_message = RawAdbMessage(*struct.unpack(AdbMessage.HEADER_STRUCT_FORMAT, raw_header))\n        except struct.error as exception:\n            raise usb_exceptions.AdbProtocolError('Unable to unpack ADB command (%s): %s (%s)', AdbMessage.HEADER_STRUCT_FORMAT, raw_header, exception)\n        if (raw_message.data_length > 0):\n            if timeout.has_expired():\n                _LOG.warning('Timed out between AdbMessage header and data, reading data anyway with 10ms timeout')\n                timeout = timeouts.PolledTimeout.from_millis(10)\n            data = self._transport.read(raw_message.data_length, timeout.remaining_ms)\n        else:\n            data = ''\n        return raw_message.to_adb_message(data)", "docstring": "Read an AdbMessage from this transport.\n\nArgs:\ntimeout: Timeout for the entire read operation, in the form of a\ntimeouts.PolledTimeout instance.  Note that for packets with a data\npayload, two USB reads are performed.\n\nReturns:\nThe ADB message read from the device.\n\nRaises:\nUsbReadFailedError: There's an error during read, including timeout.\nAdbProtocolError: A message is incorrectly formatted.\nAdbTimeoutError: timeout is already expired, or expires before we read the\nentire message, specifically between reading header and data packets.", "source": "codesearchnet"}
{"code": "def _to_tensor(x, dtype):\n    return tensor_conversion.convert_to_tensor_v2_with_dispatch(x, dtype=dtype)", "docstring": "Convert the input `x` to a tensor of type `dtype`.\n\nArgs:\nx: An object to be converted (numpy array, list, tensors).\ndtype: The destination type.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def format_error_message(exception_message, task_exception=False):\n    \n    lines = exception_message.split(\"\\n\")\n    if task_exception:\n        \n        \n        lines = lines[0:1] + lines[3:]\n        pass\n    return \"\\n\".join(lines)", "docstring": "Improve the formatting of an exception thrown by a remote function.\n\nThis method takes a traceback from an exception and makes it nicer by\nremoving a few uninformative lines and adding some space to indent the\nremaining lines nicely.\n\nArgs:\nexception_message (str): A message generated by traceback.format_exc().\n\nReturns:\nA string of the formatted exception message.", "source": "juraj-google-style"}
{"code": "def robust_zscore(mat, ctrl_mat=None, min_mad=0.1):\n    \n\n    \n    if ctrl_mat is not None:\n        medians = ctrl_mat.median(axis=1)\n        median_devs = abs(ctrl_mat.subtract(medians, axis=0))\n\n    \n    else:\n        medians = mat.median(axis=1)\n        median_devs = abs(mat.subtract(medians, axis=0))\n\n    sub = mat.subtract(medians, axis='index')\n    mads = median_devs.median(axis=1)\n\n    \n    mads = mads.clip(lower=min_mad)\n\n    \n    \n    zscore_df = sub.divide(mads * 1.4826, axis='index')\n\n    return zscore_df.round(rounding_precision)", "docstring": "Robustly z-score a pandas df along the rows.\n\nArgs:\nmat (pandas df): Matrix of data that z-scoring will be applied to\nctrl_mat (pandas df): Optional matrix from which to compute medians and MADs\n(e.g. vehicle control)\nmin_mad (float): Minimum MAD to threshold to; tiny MAD values will cause\nz-scores to blow up\n\nReturns:\nzscore_df (pandas_df): z-scored data", "source": "juraj-google-style"}
{"code": "def Equals(self, other):\n    if (other is None):\n        return False\n    if ((other.PrevHash.ToBytes() == self.PrevHash.ToBytes()) and (other.PrevIndex == self.PrevIndex)):\n        return True\n    return False", "docstring": "Test for equality.\n\nArgs:\nother (obj):\n\nReturns:\nbool: True `other` equals self.", "source": "codesearchnet"}
{"code": "def decode(self, ids):\n    (_, tmp_file_path) = tempfile.mkstemp()\n    wavfile.write(tmp_file_path, self._sample_rate, np.asarray(ids))\n    return tmp_file_path", "docstring": "Transform a sequence of float32 into a waveform.\n\nArgs:\nids: list of integers to be converted.\n\nReturns:\nPath to the temporary file where the waveform was saved.\n\nRaises:\nValueError: if the ids are not of the appropriate size.", "source": "codesearchnet"}
{"code": "def grid_destroy_from_ids(oargrid_jobids):\n    jobs = grid_reload_from_ids(oargrid_jobids)\n    for job in jobs:\n        job.delete()\n        logger.info(('Killing the jobs %s' % oargrid_jobids))", "docstring": "Destroy all the jobs with corresponding ids\n\nArgs:\noargrid_jobids (list): the ``(site, oar_job_id)`` list of tuple\nidentifying the jobs for each site.", "source": "codesearchnet"}
{"code": "def _parallel_part_functions(fns: Sequence[PartWithMatchFn], part: _T, with_default_output: bool=False, with_always_output: bool=False) -> AsyncIterable[_T]:\n    c_iters = [_eager_run_fn(fn, part) for fn, match_fn in fns if match_fn(part)]\n\n    async def result_iter():\n        has_output = False\n        for c_iter in c_iters:\n            async for c in c_iter:\n                has_output = True\n                yield c\n        if with_always_output or (not has_output and with_default_output):\n            yield part\n    return result_iter()", "docstring": "Executes each part function in a sequence of part functions concurrently.\n\nThis method is similar to `_chain_part_functions` except that all of the\nPartFns are exectued on exactly `part` instead of being chained together.\nThe resulting AsyncIterables returned by call each fn are concatenated\ntogether in the provided fns order.\n\nThis must be called called in an async context. It immediately schedules tasks\non the event loop to execute each fn in fns on on the part.\n\nArgs:\nfns: the part functions to execute on the part.\npart: the part to execute the function on.\nwith_default_output: When True if the resulting Iterable is empty `part`\nwill be yielded.\nwith_always_output: When True the input part will be yielded regardless of\nthe output of the fns. This is a stronger condition than\n`with_default_output`. When `with_always_output` is True,\n`with_default_output` is basically ignored.\n\nReturns:\nAn AsyncIterable that can be used to retrieve the results.\n\nNOTE: this method is non-blocking.", "source": "github-repos"}
{"code": "def init(images, num_channels, dim='2d', stride=2, kernel_size=7, maxpool=True, training=True, scope='init'):\n    conv = CONFIG[dim]['conv']\n    pool = CONFIG[dim]['max_pool']\n    with tf.variable_scope(scope):\n        net = conv(images, num_channels, kernel_size, strides=stride, padding='SAME', activation=None)\n        net = tf.layers.batch_normalization(net, training=training)\n        net = tf.nn.relu(net)\n        if maxpool:\n            net = pool(net, pool_size=3, strides=stride)\n        (x1, x2) = tf.split(net, 2, axis=CONFIG[dim]['split_axis'])\n        return (x1, x2)", "docstring": "Standard ResNet initial block used as first RevNet block.\n\nArgs:\nimages: [N, H, W, 3] tensor of input images to the model.\nnum_channels: Output depth of convolutional layer in initial block.\ndim: '2d' if 2-dimensional, '3d' if 3-dimensional.\nstride: stride for the convolution and pool layer.\nkernel_size: Size of the initial convolution filter\nmaxpool: If true, apply a maxpool after the convolution\ntraining: True for train phase, False for eval phase.\nscope: Optional scope for the init block.\n\nReturns:\nTwo [N, H, W, C] output activations from input images.", "source": "codesearchnet"}
{"code": "def physical_name(self):\n    pchar = self._libinput.libinput_seat_get_physical_name(self._handle)\n    return string_at(pchar).decode()", "docstring": "The physical name of the seat.\n\nFor libinput contexts created from udev, this is always the same value\nas passed into :meth:`~libinput.LibInputUdev.assign_seat` and all\nseats from that context will have the same physical name.\n\nThe physical name of the seat is one that is usually set by the system\nor lower levels of the stack. In most cases, this is the base filter\nfor devices - devices assigned to seats outside the current seat will\nnot be available to the caller.\n\nReturns:\nstr: The physical name of this seat.", "source": "codesearchnet"}
{"code": "def load_many(self, fobjs=None):\n        \n        if fobjs is not None:\n            \n            if not hasattr(fobjs, \"__iter__\"):\n                fobjs = [fobjs]\n\n            for index, (fobj, page) in enumerate(zip(fobjs, self.pages)):\n                if fobj is None:\n                    continue\n                elif isinstance(fobj, ft.DataFile):\n                    self.load(fobj, index)\n                elif isinstance(fobj, str):\n                    self.load_filename(fobj, index)\n                else:\n                    raise TypeError(\"Invalid object of class '{}'\".format(fobj.__class__.__name__))", "docstring": "Loads as many files as the number of pages\n\nArgs:\nfobjs: [filename or DataFile obj, ...]", "source": "juraj-google-style"}
{"code": "def extract_attribute_array(self, data_array, var_name):\n    if (var_name not in self.attributes.keys()):\n        self.attributes[var_name] = []\n    for t in range(self.times.size):\n        self.attributes[var_name].append(data_array[(self.i[t], self.j[t])])", "docstring": "Extracts data from a 2D array that has the same dimensions as the grid used to identify the object.\n\nArgs:\ndata_array: 2D numpy array", "source": "codesearchnet"}
{"code": "def make_iaf_stack(total_event_size, num_hidden_layers=2, seed=None, dtype=tf.float32):\n    seed = tfd.SeedStream(seed, 'make_iaf_stack')\n\n    def make_iaf():\n        'Create an IAF.'\n        initializer = tf.compat.v2.keras.initializers.VarianceScaling((2 * 0.01), seed=(seed() % ((2 ** 31) - 1)))\n        made = tfb.AutoregressiveLayer(params=2, event_shape=[total_event_size], hidden_units=([total_event_size] * num_hidden_layers), activation=tf.nn.elu, kernel_initializer=initializer, dtype=dtype)\n\n        def shift_and_scale(x):\n            x.set_shape(x.shape.merge_with((([None] * (x.shape.ndims - 1)) + [total_event_size])))\n            return tf.unstack(made(x), num=2, axis=(- 1))\n        return tfb.Invert(tfb.MaskedAutoregressiveFlow(shift_and_scale))\n\n    def make_swap():\n        'Create an swap.'\n        permutation = list(reversed(range(total_event_size)))\n        return tfb.Permute(permutation)\n    bijector = make_iaf()\n    bijector = make_swap()(bijector)\n    bijector = make_iaf()(bijector)\n    bijector = make_swap()(bijector)\n    bijector = make_iaf()(bijector)\n    bijector = make_swap()(bijector)\n    return bijector", "docstring": "Creates an stacked IAF bijector.\n\nThis bijector operates on vector-valued events.\n\nArgs:\ntotal_event_size: Number of dimensions to operate over.\nnum_hidden_layers: How many hidden layers to use in each IAF.\nseed: Random seed for the initializers.\ndtype: DType for the variables.\n\nReturns:\nbijector: The created bijector.", "source": "codesearchnet"}
{"code": "def context_managers(self, kwargs):\n    del kwargs\n    return []", "docstring": "Return context managers for running the test combination.\n\nThe test combination will run under all context managers that all\n`TestCombination` instances return.\n\nArgs:\nkwargs:  Arguments and their values that are passed to the test\ncombination.\n\nReturns:\nA list of instantiated context managers.", "source": "github-repos"}
{"code": "def synthetic_source(self, value):\n        \n        if value == self._defaults['ai.operation.syntheticSource'] and 'ai.operation.syntheticSource' in self._values:\n            del self._values['ai.operation.syntheticSource']\n        else:\n            self._values['ai.operation.syntheticSource'] = value", "docstring": "The synthetic_source property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def Dump(self, output):\n    data = {'current_content_length': self._current_content_length, 'is_last': self._is_last, 'server': self._request_builder.GetServer(), 'upload_url': self._upload_url, 'version': self._request_builder.GetVersion()}\n    try:\n        yaml.dump(data, output)\n    except yaml.YAMLError as e:\n        raise googleads.errors.GoogleAdsError(('Error dumping IncrementalUploadHelper to file: %s' % str(e)))", "docstring": "Serialize the IncrementalUploadHelper and store in file-like object.\n\nArgs:\noutput: a file-like object where the status of the IncrementalUploadHelper\nwill be written.\n\nRaises:\nGoogleAdsError: If a YAMLError occurs while writing to the file.", "source": "codesearchnet"}
{"code": "def _DeserializeAttributeContainer(self, container_type, serialized_data):\n    if (not serialized_data):\n        return None\n    if self._serializers_profiler:\n        self._serializers_profiler.StartTiming(container_type)\n    try:\n        serialized_string = serialized_data.decode('utf-8')\n    except UnicodeDecodeError as exception:\n        raise IOError('Unable to decode serialized data: {0!s}'.format(exception))\n    attribute_container = self._serializer.ReadSerialized(serialized_string)\n    if self._serializers_profiler:\n        self._serializers_profiler.StopTiming(container_type)\n    return attribute_container", "docstring": "Deserializes an attribute container.\n\nArgs:\ncontainer_type (str): attribute container type.\nserialized_data (bytes): serialized attribute container data.\n\nReturns:\nAttributeContainer: attribute container or None.\n\nRaises:\nIOError: if the serialized data cannot be decoded.\nOSError: if the serialized data cannot be decoded.", "source": "codesearchnet"}
{"code": "def is_github_repo_owner_the_official_one(context, repo_owner):\n    \n    official_repo_owner = context.config['official_github_repos_owner']\n    if not official_repo_owner:\n        raise ConfigError(\n            'This worker does not have a defined owner for official GitHub repositories. '\n            'Given \"official_github_repos_owner\": {}'.format(official_repo_owner)\n        )\n\n    return official_repo_owner == repo_owner", "docstring": "Given a repo_owner, check if it matches the one configured to be the official one.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\nrepo_owner (str): the repo_owner to verify\n\nRaises:\nscriptworker.exceptions.ConfigError: when no official owner was defined\n\nReturns:\nbool: True when ``repo_owner`` matches the one configured to be the official one", "source": "juraj-google-style"}
{"code": "def _ExpectedKeysForEntry(self, entry):\n    return [entry.name]", "docstring": "Generate a list of expected cache keys for this type of map.\n\nArgs:\nentry: A SshkeyMapEntry\n\nReturns:\nA list of strings", "source": "github-repos"}
{"code": "def FindHeader(self, header):\n    \n    for section_list in self.include_list:\n      for f in section_list:\n        if f[0] == header:\n          return f[1]\n    return -1", "docstring": "Check if a header has already been included.\n\nArgs:\nheader: header to check.\nReturns:\nLine number of previous occurrence, or -1 if the header has not\nbeen seen before.", "source": "juraj-google-style"}
{"code": "def power_spectral_density(x, time_step, freq_range = None):\n    \n    N = len(x)\n    P = 2 * np.abs(np.fft.rfft(x))**2 / N * time_step\n    F = np.fft.rfftfreq(len(x), time_step)\n\n    if freq_range is not None:\n        brange = np.all([F >= freq_range[0], F <= freq_range[1]], axis=0)\n        P = P[brange]\n        F = F[brange]\n\n    return F, P", "docstring": "returns the *single sided* power spectral density of the time trace x which is sampled at intervals time_step\nArgs:\nx (array):  timetrace\ntime_step (float): sampling interval of x\nfreq_range (array or tuple): frequency range in the form [f_min, f_max] to return only the spectrum within this range\n\nReturns:", "source": "juraj-google-style"}
{"code": "def set_flat(self, new_weights):\n    self._check_sess()\n    shapes = [v.get_shape().as_list() for v in self.variables.values()]\n    arrays = unflatten(new_weights, shapes)\n    placeholders = [self.placeholders[k] for (k, v) in self.variables.items()]\n    self.sess.run(list(self.assignment_nodes.values()), feed_dict=dict(zip(placeholders, arrays)))", "docstring": "Sets the weights to new_weights, converting from a flat array.\n\nNote:\nYou can only set all weights in the network using this function,\ni.e., the length of the array must match get_flat_size.\n\nArgs:\nnew_weights (np.ndarray): Flat array containing weights.", "source": "codesearchnet"}
{"code": "def getStreamNetworkAsWkt(self, session, withNodes=True):\n        \n        wkt_list = []\n\n        for link in self.streamLinks:\n            wkt_link = link.getAsWkt(session)\n\n            if wkt_link:\n                wkt_list.append(wkt_link)\n\n            if withNodes:\n                for node in link.nodes:\n                    wkt_node = node.getAsWkt(session)\n\n                    if wkt_node:\n                        wkt_list.append(wkt_node)\n\n        return 'GEOMCOLLECTION ({0})'.format(', '.join(wkt_list))", "docstring": "Retrieve the stream network geometry in Well Known Text format.\n\nArgs:\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database\nwithNodes (bool, optional): Include nodes. Defaults to False.\n\nReturns:\nstr: Well Known Text string.", "source": "juraj-google-style"}
{"code": "def refresh(self, refresh_binary=True):\n    updated_self = self.repo.get_resource(self.uri)\n    if (not isinstance(self, type(updated_self))):\n        raise Exception(('Instantiated %s, but repository reports this resource is %s' % (type(updated_self), type(self))))\n    if updated_self:\n        self.status_code = updated_self.status_code\n        self.rdf.data = updated_self.rdf.data\n        self.headers = updated_self.headers\n        self.exists = updated_self.exists\n        if (type(self) != NonRDFSource):\n            self._parse_graph()\n        self.versions = SimpleNamespace()\n        if ((type(updated_self) == NonRDFSource) and refresh_binary):\n            self.binary.refresh(updated_self)\n        if hasattr(self, '_post_refresh'):\n            self._post_refresh()\n        del updated_self\n    else:\n        logger.debug('resource %s not found, dumping values')\n        self._empty_resource_attributes()", "docstring": "Performs GET request and refreshes RDF information for resource.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def object_hook(obj):\n        \n        try:\n            if '__type' in obj:\n                obj_type = obj['__type']\n                cls = getattr(cloud_inquisitor.schema, obj_type)\n                if hasattr(cls, 'from_json'):\n                    return cls.from_json(obj)\n\n            key, value = next(iter(obj.items()))\n            if key == ' t':\n                return tuple(value)\n            elif key == ' u':\n                return uuid.UUID(value)\n            elif key == ' b':\n                return b64decode(value)\n            elif key == ' m':\n                return Markup(value)\n            elif key == ' d':\n                return parse_date(value)\n\n            return obj\n        except Exception:\n            log.exception('Error during data deserialization')", "docstring": "Checks to see if the `__type`-hinting field is available in the object being de-serialized. If present, and\nthe class referenced has a `from_json` function it will return the generated object, else a standard dic\nwill be returned\n\nArgs:\nobj: Object to be deserialized\n\nReturns:\nDeserialized object or regular python objec", "source": "juraj-google-style"}
{"code": "def read(self, size=-1):\n        \n        if not self._readable:\n            raise UnsupportedOperation('read')\n\n        \n        if self._seek == self._size:\n            return b''\n\n        \n        if size == self._buffer_size:\n            queue_index = self._seek\n\n            \n            if queue_index == 0:\n                self._preload_range()\n\n            \n            with handle_os_exceptions():\n                buffer = self._read_queue.pop(queue_index).result()\n\n            \n            buffer_size = self._buffer_size\n            index = queue_index + buffer_size * self._max_buffers\n            if index < self._size:\n                self._read_queue[index] = self._workers.submit(\n                    self._read_range, index, index + buffer_size)\n\n            \n                self._seek += buffer_size\n            else:\n                self._seek = self._size\n\n            return buffer\n\n        \n        if size != -1:\n            buffer = bytearray(size)\n\n        \n        else:\n            buffer = bytearray()\n\n        read_size = self.readinto(buffer)\n        return memoryview(buffer)[:read_size].tobytes()", "docstring": "Read and return up to size bytes,\nwith at most one call to the underlying raw stream’s.\n\nUse at most one call to the underlying raw stream’s read method.\n\nArgs:\nsize (int): Number of bytes to read. -1 to read the\nstream until end.\n\nReturns:\nbytes: Object content", "source": "juraj-google-style"}
{"code": "def local_batch_predict(model_dir, csv_file_pattern, output_dir, output_format, batch_size=100):\n    file_io.recursive_create_dir(output_dir)\n    csv_files = file_io.get_matching_files(csv_file_pattern)\n    if (len(csv_files) == 0):\n        raise ValueError(('No files found given ' + csv_file_pattern))\n    with tf.Graph().as_default(), tf.Session() as sess:\n        (input_alias_map, output_alias_map) = _tf_load_model(sess, model_dir)\n        csv_tensor_name = list(input_alias_map.values())[0]\n        output_schema = _get_output_schema(sess, output_alias_map)\n        for csv_file in csv_files:\n            output_file = os.path.join(output_dir, ((('predict_results_' + os.path.splitext(os.path.basename(csv_file))[0]) + '.') + output_format))\n            with file_io.FileIO(output_file, 'w') as f:\n                prediction_source = _batch_csv_reader(csv_file, batch_size)\n                for batch in prediction_source:\n                    batch = [l.rstrip() for l in batch if l]\n                    predict_results = sess.run(fetches=output_alias_map, feed_dict={csv_tensor_name: batch})\n                    formatted_results = _format_results(output_format, output_schema, predict_results)\n                    f.write(('\\n'.join(formatted_results) + '\\n'))\n    file_io.write_string_to_file(os.path.join(output_dir, 'predict_results_schema.json'), json.dumps(output_schema, indent=2))", "docstring": "Batch Predict with a specified model.\n\nIt does batch prediction, saves results to output files and also creates an output\nschema file. The output file names are input file names prepended by 'predict_results_'.\n\nArgs:\nmodel_dir: The model directory containing a SavedModel (usually saved_model.pb).\ncsv_file_pattern: a pattern of csv files as batch prediction source.\noutput_dir: the path of the output directory.\noutput_format: csv or json.\nbatch_size: Larger batch_size improves performance but may\ncause more memory usage.", "source": "codesearchnet"}
{"code": "def concatenate_context_input(context_input, sequence_input):\n    seq_rank_check = check_ops.assert_rank(sequence_input, 3, message='sequence_input must have rank 3', data=[array_ops.shape(sequence_input)])\n    seq_type_check = check_ops.assert_type(sequence_input, dtypes.float32, message='sequence_input must have dtype float32; got {}.'.format(sequence_input.dtype))\n    ctx_rank_check = check_ops.assert_rank(context_input, 2, message='context_input must have rank 2', data=[array_ops.shape(context_input)])\n    ctx_type_check = check_ops.assert_type(context_input, dtypes.float32, message='context_input must have dtype float32; got {}.'.format(context_input.dtype))\n    with ops.control_dependencies([seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):\n        padded_length = array_ops.shape(sequence_input)[1]\n        tiled_context_input = array_ops.tile(array_ops.expand_dims(context_input, 1), array_ops.concat([[1], [padded_length], [1]], 0))\n    return array_ops.concat([sequence_input, tiled_context_input], 2)", "docstring": "Replicates `context_input` across all timesteps of `sequence_input`.\n\nExpands dimension 1 of `context_input` then tiles it `sequence_length` times.\nThis value is appended to `sequence_input` on dimension 2 and the result is\nreturned.\n\nArgs:\ncontext_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`.\nsequence_input: A `Tensor` of dtype `float32` and shape `[batch_size,\npadded_length, d0]`.\n\nReturns:\nA `Tensor` of dtype `float32` and shape `[batch_size, padded_length,\nd0 + d1]`.\n\nRaises:\nValueError: If `sequence_input` does not have rank 3 or `context_input` does\nnot have rank 2.", "source": "github-repos"}
{"code": "def mean_area_distance(item_a, item_b, max_value):\n    mean_area_a = np.mean([item_a.size(t) for t in item_a.times])\n    mean_area_b = np.mean([item_b.size(t) for t in item_b.times])\n    return (np.abs((mean_area_a - mean_area_b)) / float(max_value))", "docstring": "Absolute difference in the means of the areas of each track over time.\n\nArgs:\nitem_a: STObject from the first set in TrackMatcher\nitem_b: STObject from the second set in TrackMatcher\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "codesearchnet"}
{"code": "def corrcoef(x):\n    if any_symbolic_tensors((x,)):\n        return Corrcoef().symbolic_call(x)\n    return backend.numpy.corrcoef(x)", "docstring": "Compute the Pearson correlation coefficient matrix.\n\nArgs:\nx: A 2D tensor of shape `(N, D)`, where N is the number of variables\nand D is the number of observations.\n\nReturns:\nA tensor of shape `(N, N)` representing the correlation matrix.", "source": "github-repos"}
{"code": "def rasterize(layer, rast):\n    \n    driver = ImageDriver('MEM')\n    r2 = driver.raster(driver.ShortName, rast.size)\n    r2.affine = rast.affine\n    sref = rast.sref\n    if not sref.srid:\n        sref = SpatialReference(4326)\n    r2.sref = sref\n    ml = MemoryLayer(sref, layer.GetGeomType())\n    ml.load(layer)\n    status = gdal.RasterizeLayer(\n        r2.ds, (1,), ml.layer, options=['ATTRIBUTE=%s' % ml.id])\n    ml.close()\n    return r2", "docstring": "Returns a Raster from layer features.\n\nArguments:\nlayer -- Layer to rasterize\nrast -- Raster with target affine, size, and sref", "source": "juraj-google-style"}
{"code": "def get_local_filter_directives(ast, current_schema_type, inner_vertex_fields):\n    result = []\n    if ast.directives:\n        for directive_obj in ast.directives:\n            if (directive_obj.name.value == 'filter'):\n                filtered_field_name = get_ast_field_name_or_none(ast)\n                if is_filter_with_outer_scope_vertex_field_operator(directive_obj):\n                    if (not is_vertex_field_type(current_schema_type)):\n                        raise GraphQLCompilationError(u'Found disallowed filter on a property field: {} {} {}'.format(directive_obj, current_schema_type, filtered_field_name))\n                    elif isinstance(ast, InlineFragment):\n                        raise GraphQLCompilationError(u'Found disallowed filter on a type coercion: {} {}'.format(directive_obj, current_schema_type))\n                    else:\n                        pass\n                else:\n                    operation = FilterOperationInfo(directive=directive_obj, field_name=filtered_field_name, field_type=current_schema_type, field_ast=ast)\n                    result.append(operation)\n    if inner_vertex_fields:\n        for inner_ast in inner_vertex_fields:\n            for directive_obj in inner_ast.directives:\n                if is_filter_with_outer_scope_vertex_field_operator(directive_obj):\n                    filtered_field_name = get_ast_field_name(inner_ast)\n                    filtered_field_type = get_vertex_field_type(current_schema_type, filtered_field_name)\n                    operation = FilterOperationInfo(directive=directive_obj, field_name=filtered_field_name, field_type=filtered_field_type, field_ast=inner_ast)\n                    result.append(operation)\n    return result", "docstring": "Get all filter directives that apply to the current field.\n\nThis helper abstracts away the fact that some vertex field filtering operators apply on the\ninner scope (the scope of the inner vertex field on which they are applied), whereas some apply\non the outer scope (the scope that contains the inner vertex field).\nSee filters.py for more information.\n\nArgs:\nast: a GraphQL AST object for which to load local filters, from the graphql library\ncurrent_schema_type: GraphQLType, the schema type at the current AST location\ninner_vertex_fields: a list of inner AST objects representing vertex fields that are within\nthe current field. If currently processing a property field (i.e.\nthere are no inner vertex fields), this argument may be set to None.\n\nReturns:\nlist of FilterOperationInfo objects.\nIf the field_ast field is of type InlineFragment, the field_name field is set to None.", "source": "codesearchnet"}
{"code": "def get_cursor(self):\n    (x, y) = self._cursor\n    (width, height) = self.parent.get_size()\n    while (x >= width):\n        x -= width\n        y += 1\n    if ((y >= height) and (self.scrollMode == 'scroll')):\n        y = (height - 1)\n    return (x, y)", "docstring": "Return the virtual cursor position.\n\nThe cursor can be moved with the :any:`move` method.\n\nReturns:\nTuple[int, int]: The (x, y) coordinate of where :any:`print_str`\nwill continue from.\n\n.. seealso:: :any:move`", "source": "codesearchnet"}
{"code": "def scan_file(path):\n    path = os.path.abspath(path)\n    assert os.path.exists(path), (\"Unreachable file '%s'.\" % path)\n    result = sh.clamscan(path, no_summary=True, infected=True, _ok_code=[0, 1])\n    return _parse_result(result)", "docstring": "Scan `path` for viruses using ``clamscan`` program.\n\nArgs:\npath (str): Relative or absolute path of file/directory you need to\nscan.\n\nReturns:\ndict: ``{filename: (\"FOUND\", \"virus type\")}`` or blank dict.\n\nRaises:\nAssertionError: When the internal file doesn't exists.", "source": "codesearchnet"}
{"code": "def ParseBookmarkAnnotationRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    event_data = FirefoxPlacesBookmarkAnnotationEventData()\n    event_data.content = self._GetRowValue(query_hash, row, 'content')\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.title = self._GetRowValue(query_hash, row, 'title')\n    event_data.url = self._GetRowValue(query_hash, row, 'url')\n    timestamp = self._GetRowValue(query_hash, row, 'dateAdded')\n    if timestamp:\n        date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ADDED)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n    timestamp = self._GetRowValue(query_hash, row, 'lastModified')\n    if timestamp:\n        date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a bookmark annotation row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "codesearchnet"}
{"code": "def lstat(self, entry_path, dir_fd=None):\n    entry_path = self._path_with_dir_fd(entry_path, self.lstat, dir_fd)\n    return self.filesystem.stat(entry_path, follow_symlinks=False)", "docstring": "Return the os.stat-like tuple for entry_path, not following symlinks.\n\nArgs:\nentry_path:  path to filesystem object to retrieve.\ndir_fd: If not `None`, the file descriptor of a directory, with\n`entry_path` being relative to this directory.\nNew in Python 3.3.\n\nReturns:\nthe FakeStatResult object corresponding to `entry_path`.\n\nRaises:\nOSError: if the filesystem object doesn't exist.", "source": "codesearchnet"}
{"code": "def write_config(config, filename=None):\n    \n    if not filename:\n        filename = CONFIG_DEFAULT_PATH\n\n    with open(filename, 'w') as f:\n        json.dump(config, f, indent=4)", "docstring": "Write the provided configuration to a specific location.\n\nArgs:\nconfig (dict): a dictionary with the configuration to load.\nfilename (str): the name of the file that will store the new configuration. Defaults to ``None``.\nIf ``None``, the HOME of the current user and the string ``.bigchaindb`` will be used.", "source": "juraj-google-style"}
{"code": "def value_text(tensor, is_repr=False) -> AnyStr:\n    if tensor._prefer_custom_summarizer():\n        text = tensor._summarize_value()\n        if is_repr:\n            text = 'value=' + text\n    else:\n        text = numpy_text(tensor, is_repr=is_repr)\n        if is_repr:\n            text = 'numpy=' + text\n    return text", "docstring": "Either the NumPy value or a custom TensorFlow formatting of `tensor`.\n\nCustom formatting is used for custom device tensors, e.g. parallel tensors\nwith multiple components on different devices.\n\nArgs:\ntensor: The tensor to format.\nis_repr: Controls the style/verbosity of formatting.\n\nReturns:\nThe formatted tensor.", "source": "github-repos"}
{"code": "def set_fore(self, x: int, y: int, r: int, g: int, b: int, char: str) -> None:\n    i = ((self.width * y) + x)\n    self.fore_r[i] = r\n    self.fore_g[i] = g\n    self.fore_b[i] = b\n    self.char[i] = ord(char)", "docstring": "Set the character and foreground color of one cell.\n\nArgs:\nx (int): X position to change.\ny (int): Y position to change.\nr (int): Red foreground color, from 0 to 255.\ng (int): Green foreground color, from 0 to 255.\nb (int): Blue foreground color, from 0 to 255.\nchar (AnyStr): A single character str or bytes object.", "source": "codesearchnet"}
{"code": "def lowercase_term_id(term_id: str) -> str:\n    \n    (ns, val) = term_id.split(\":\", maxsplit=1)\n    term_id = f\"{ns}:{val.lower()}\"\n\n    return term_id", "docstring": "Lowercase the term value (not the namespace prefix)\n\nArgs:\nterm_id (str): term identifier with namespace prefix, e.g. MESH:Atherosclerosis\n\nReturns:\nstr: lowercased, e.g. MESH:atherosclerosis", "source": "juraj-google-style"}
{"code": "def then_by(self, key_selector=identity):\n    if self.closed():\n        raise ValueError('Attempt to call then_by() on a closed OrderedQueryable.')\n    if (not is_callable(key_selector)):\n        raise TypeError('then_by() parameter key_selector={key_selector} is not callable'.format(key_selector=repr(key_selector)))\n    self._funcs.append(((- 1), key_selector))\n    return self", "docstring": "Introduce subsequent ordering to the sequence with an optional key.\n\nThe returned sequence will be sorted in ascending order by the\nselected key.\n\nNote: This method uses deferred execution.\n\nArgs:\nkey_selector: A unary function the only positional argument to\nwhich is the element value from which the key will be\nselected.  The return value should be the key from that\nelement.\n\nReturns:\nAn OrderedQueryable over the sorted items.\n\nRaises:\nValueError: If the OrderedQueryable is closed().\nTypeError: If key_selector is not callable.", "source": "codesearchnet"}
{"code": "def verify(token, key, algorithms, verify=True):\n    (header, payload, signing_input, signature) = _load(token)\n    if verify:\n        _verify_signature(signing_input, header, signature, key, algorithms)\n    return payload", "docstring": "Verifies a JWS string's signature.\n\nArgs:\ntoken (str): A signed JWS to be verified.\nkey (str or dict): A key to attempt to verify the payload with. Can be\nindividual JWK or JWK set.\nalgorithms (str or list): Valid algorithms that should be used to verify the JWS.\n\nReturns:\nstr: The str representation of the payload, assuming the signature is valid.\n\nRaises:\nJWSError: If there is an exception verifying a token.\n\nExamples:\n\n>>> token = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjoiYiJ9.jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67_QGs52AzC8Ru8'\n>>> jws.verify(token, 'secret', algorithms='HS256')", "source": "codesearchnet"}
{"code": "def _prompt_split_image(self, aspect_ratio, num_patches_per_chunk):\n    img_string = '<|image_start|>'\n    ratio_h, ratio_w = aspect_ratio\n    if ratio_h * ratio_w > 1:\n        for yy in range(ratio_h):\n            for xx in range(ratio_w):\n                img_string += '<|patch|>' * num_patches_per_chunk\n                if xx < ratio_w - 1:\n                    img_string += '<|tile_x_separator|>'\n            img_string += '<|tile_y_separator|>'\n    img_string += '<|image|>'\n    img_string += '<|patch|>' * num_patches_per_chunk\n    img_string += '<|image_end|>'\n    return img_string", "docstring": "Create a structured string representation of image tokens\n\nArgs:\nnum_patches: Number of patches in the image\n\nReturns:\nString with appropriate image tokens", "source": "github-repos"}
{"code": "def update_dataset(self, dataset, fields, retry=DEFAULT_RETRY):\n    partial = dataset._build_resource(fields)\n    if (dataset.etag is not None):\n        headers = {'If-Match': dataset.etag}\n    else:\n        headers = None\n    api_response = self._call_api(retry, method='PATCH', path=dataset.path, data=partial, headers=headers)\n    return Dataset.from_api_repr(api_response)", "docstring": "Change some fields of a dataset.\n\nUse ``fields`` to specify which fields to update. At least one field\nmust be provided. If a field is listed in ``fields`` and is ``None`` in\n``dataset``, it will be deleted.\n\nIf ``dataset.etag`` is not ``None``, the update will only\nsucceed if the dataset on the server has the same ETag. Thus\nreading a dataset with ``get_dataset``, changing its fields,\nand then passing it to ``update_dataset`` will ensure that the changes\nwill only be saved if no modifications to the dataset occurred\nsince the read.\n\nArgs:\ndataset (google.cloud.bigquery.dataset.Dataset):\nThe dataset to update.\nfields (Sequence[str]):\nThe properties of ``dataset`` to change (e.g. \"friendly_name\").\nretry (google.api_core.retry.Retry, optional):\nHow to retry the RPC.\n\nReturns:\ngoogle.cloud.bigquery.dataset.Dataset:\nThe modified ``Dataset`` instance.", "source": "codesearchnet"}
{"code": "async def _send(self, request_bytes, body_bytes, h11_connection):\n        \n        await self.sock.send_all(h11_connection.send(request_bytes))\n        if body_bytes is not None:\n            await self.sock.send_all(h11_connection.send(body_bytes))\n        await self.sock.send_all(h11_connection.send(h11.EndOfMessage()))", "docstring": "Takes a package and body, combines then, then shoots 'em off in to\nthe ether.\n\nArgs:\npackage (list of str): The header package.\nbody (str): The str representation of the body.", "source": "juraj-google-style"}
{"code": "def set_secondary_ips(self, name, vrid, secondary_ips, run=True):\n    cmds = []\n    curr_sec_ips = []\n    vrrps = self.get(name)\n    if (vrrps and (vrid in vrrps)):\n        curr_sec_ips = vrrps[vrid]['secondary_ip']\n    for sec_ip in secondary_ips:\n        if ((type(sec_ip) is not str) or (not re.match('^\\\\d+\\\\.\\\\d+\\\\.\\\\d+\\\\.\\\\d+$', sec_ip))):\n            raise ValueError(\"vrrp property 'secondary_ip' must be a list of properly formatted ip address strings\")\n    intersection = list((set(curr_sec_ips) & set(secondary_ips)))\n    remove = list((set(curr_sec_ips) - set(intersection)))\n    add = list((set(secondary_ips) - set(intersection)))\n    for sec_ip in remove:\n        cmds.append(('no vrrp %d ip %s secondary' % (vrid, sec_ip)))\n    for sec_ip in add:\n        cmds.append(('vrrp %d ip %s secondary' % (vrid, sec_ip)))\n    cmds = sorted(cmds)\n    if run:\n        result = self.configure_interface(name, cmds)\n        if (result is False):\n            return self.error\n        return result\n    return cmds", "docstring": "Configure the secondary_ip property of the vrrp\n\nNotes:\nset_secondary_ips takes a list of secondary ip addresses\nwhich are to be set on the virtal router. An empty list will\nremove any existing secondary ip addresses from the vrrp.\nA list containing addresses will configure the virtual router\nwith only the addresses specified in the list - any existing\naddresses not included in the list will be removed.\n\nArgs:\nname (string): The interface to configure.\nvrid (integer): The vrid number for the vrrp to be managed.\nsecondary_ips (list): A list of secondary ip addresses to\nbe assigned to the virtual router.\nrun (boolean): Set to True to execute the command, False to\nreturn a string with the formatted command.\n\nReturns:\nIf run is True, returns True if the command executed successfully,\nerror if failure.\n\nIf run is False, returns the formatted command string which can\nbe passed to the node", "source": "codesearchnet"}
{"code": "def _compute_edges(self):\n    (nodes1, nodes2, nodes3) = _surface_helpers.compute_edge_nodes(self._nodes, self._degree)\n    edge1 = _curve_mod.Curve(nodes1, self._degree, _copy=False)\n    edge2 = _curve_mod.Curve(nodes2, self._degree, _copy=False)\n    edge3 = _curve_mod.Curve(nodes3, self._degree, _copy=False)\n    return (edge1, edge2, edge3)", "docstring": "Compute the edges of the current surface.\n\nReturns:\nTuple[~curve.Curve, ~curve.Curve, ~curve.Curve]: The edges of\nthe surface.", "source": "codesearchnet"}
{"code": "def dropout_with_broadcast_dims(x, keep_prob, broadcast_dims=None, **kwargs):\n    assert ('noise_shape' not in kwargs)\n    if broadcast_dims:\n        shape = tf.shape(x)\n        ndims = len(x.get_shape())\n        broadcast_dims = [((dim + ndims) if (dim < 0) else dim) for dim in broadcast_dims]\n        kwargs['noise_shape'] = [(1 if (i in broadcast_dims) else shape[i]) for i in range(ndims)]\n    return tf.nn.dropout(x, keep_prob, **kwargs)", "docstring": "Like tf.nn.dropout but takes broadcast_dims instead of noise_shape.\n\nInstead of specifying noise_shape, this function takes broadcast_dims -\na list of dimension numbers in which noise_shape should be 1.  The random\nkeep/drop tensor has dimensionality 1 along these dimensions.\n\nArgs:\nx: a floating point tensor.\nkeep_prob: A scalar Tensor with the same type as x.\nThe probability that each element is kept.\nbroadcast_dims: an optional list of integers\nthe dimensions along which to broadcast the keep/drop flags.\n**kwargs: keyword arguments to tf.nn.dropout other than \"noise_shape\".\n\nReturns:\nTensor of the same shape as x.", "source": "codesearchnet"}
{"code": "def __decode_dictionary(self, message_type, dictionary):\n        \n        message = message_type()\n        for key, value in six.iteritems(dictionary):\n            if value is None:\n                try:\n                    message.reset(key)\n                except AttributeError:\n                    pass  \n                continue\n\n            try:\n                field = message.field_by_name(key)\n            except KeyError:\n                \n                variant = self.__find_variant(value)\n                if variant:\n                    message.set_unrecognized_field(key, value, variant)\n                continue\n\n            if field.repeated:\n                \n                if not isinstance(value, list):\n                    value = [value]\n                valid_value = [self.decode_field(field, item)\n                               for item in value]\n                setattr(message, field.name, valid_value)\n                continue\n            \n            if value == []:\n                continue\n            try:\n                setattr(message, field.name, self.decode_field(field, value))\n            except messages.DecodeError:\n                \n                if not isinstance(field, messages.EnumField):\n                    raise\n                variant = self.__find_variant(value)\n                if variant:\n                    message.set_unrecognized_field(key, value, variant)\n\n        return message", "docstring": "Merge dictionary in to message.\n\nArgs:\nmessage: Message to merge dictionary in to.\ndictionary: Dictionary to extract information from.  Dictionary\nis as parsed from JSON.  Nested objects will also be dictionaries.", "source": "juraj-google-style"}
{"code": "def sin(x):\n    return math_ops.sin(x)", "docstring": "Computes sin of x element-wise.\n\nArgs:\nx: Tensor or variable.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def Decrement(self, key):\n    with self._lock:\n        if _IsHashable(key):\n            if key in self._d:\n                if self._d[key] > 1:\n                    self._d[key] -= 1\n                else:\n                    del self._d[key]\n        else:\n            try:\n                i = self._unhashable_items.index(key)\n                if self._unhashable_counts[i] > 1:\n                    self._unhashable_counts[i] -= 1\n                else:\n                    del self._unhashable_counts[i]\n                    del self._unhashable_items[i]\n            except ValueError:\n                pass", "docstring": "Atomically decrement a count by 1. Expunge the item if the count is 0.\n\nIf the item is not present, has no effect.\n\nArgs:\nkey: the key being counted.", "source": "github-repos"}
{"code": "def random(self, shape, tf_fn, kwargs):\n    \n    \n    slice_shape = self.slice_shape(shape)\n    x = tf_fn(slice_shape, **kwargs)\n    \n    \n    \n    layout = self.tensor_layout(shape)\n    \n    mesh_axes = [i for i in xrange(self.ndims)\n                 if i not in layout.tensor_axis_to_mesh_axis]\n    multiplier = 1.0\n    for axis in mesh_axes:\n      multiplier *= tf.cast(\n          tf.equal(self.laid_out_pcoord(axis).one_slice, 0), x.dtype)\n    x *= multiplier\n    x = self.LaidOutTensor([x])\n    x = self.allreduce(x, mesh_axes, \"SUM\")\n    return x", "docstring": "Call a random tf operation (e.g. random_uniform).\n\nArgs:\nshape: a Shape\ntf_fn: a function such as tf.random.uniform\nkwargs: kwargs to pass to tf_fn, except for seed\n\nReturns:\na LaidOutTensor", "source": "juraj-google-style"}
{"code": "def plot(self, data):\n    import IPython\n    if ((not isinstance(data, dict)) or (not all((isinstance(v, pd.DataFrame) for v in data.values())))):\n        raise ValueError('Expect a dictionary where the values are all dataframes.')\n    gfsg = GenericFeatureStatisticsGenerator()\n    data = [{'name': k, 'table': self._remove_nonascii(v)} for (k, v) in six.iteritems(data)]\n    data_proto = gfsg.ProtoFromDataFrames(data)\n    protostr = base64.b64encode(data_proto.SerializeToString()).decode('utf-8')\n    html_id = ('f' + datalab.utils.commands.Html.next_id())\n    HTML_TEMPLATE = '<link rel=\"import\" href=\"/nbextensions/gcpdatalab/extern/facets-jupyter.html\" >\\n        <facets-overview id=\"{html_id}\"></facets-overview>\\n        <script>\\n          document.querySelector(\"\n    html = HTML_TEMPLATE.format(html_id=html_id, protostr=protostr)\n    return IPython.core.display.HTML(html)", "docstring": "Plots an overview in a list of dataframes\n\nArgs:\ndata: a dictionary with key the name, and value the dataframe.", "source": "codesearchnet"}
{"code": "def __call__(self, request: beam.Row, *args, **kwargs):\n    try:\n        entity_id = request._asdict()[self.row_key]\n    except KeyError:\n        raise KeyError('Enrichment requests to Vertex AI Feature Store should contain a field: %s in the input `beam.Row` to join the input with fetched response. This is used as the `FeatureViewDataKey` to fetch feature values corresponding to this key.' % self.row_key)\n    try:\n        response = self.client.fetch_feature_values(request=aiplatform.gapic.FetchFeatureValuesRequest(data_key=aiplatform.gapic.FeatureViewDataKey(key=entity_id), feature_view=self.feature_view_path, data_format=aiplatform.gapic.FeatureViewDataFormat.PROTO_STRUCT))\n    except NotFound:\n        if self.exception_level == ExceptionLevel.WARN:\n            _LOGGER.warning(_not_found_err_message(self.feature_store_name, self.feature_view_name, entity_id))\n            return (request, beam.Row())\n        elif self.exception_level == ExceptionLevel.RAISE:\n            raise ValueError(_not_found_err_message(self.feature_store_name, self.feature_view_name, entity_id))\n    response_dict = dict(response.proto_struct)\n    return (request, beam.Row(**response_dict))", "docstring": "Fetches feature value for an entity-id from Vertex AI Feature Store.\n\nArgs:\nrequest: the input `beam.Row` to enrich.", "source": "github-repos"}
{"code": "def get_cached_response(cls, key):\n        \n        request_cached_response = DEFAULT_REQUEST_CACHE.get_cached_response(key)\n        if not request_cached_response.is_found:\n            django_cached_response = cls._get_cached_response_from_django_cache(key)\n            cls._set_request_cache_if_django_cache_hit(key, django_cached_response)\n            return django_cached_response\n\n        return request_cached_response", "docstring": "Retrieves a CachedResponse for the provided key.\n\nArgs:\nkey (string)\n\nReturns:\nA CachedResponse with is_found status and value.", "source": "juraj-google-style"}
{"code": "def print_dict(py_dict):\n    for gpu, cc in py_dict.items():\n        print('{:<25}{:<25}'.format(gpu, cc))", "docstring": "Prints dictionary with formatting (2 column table).\n\nArgs:\npy_dict: Dictionary that is to be printed out in a table format.", "source": "github-repos"}
{"code": "def __init__(self, project_key, conf_path=settings.ZEO_CLIENT_PATH):\n        \n        super(self.__class__, self).__init__(\n            conf_path=conf_path,\n            project_key=project_key\n        )", "docstring": "Constructor.\n\nArgs:\nproject_key (str): Project key which is used for the root of DB.\nconf_path (str): Path to the client zeo configuration file. Default\n:attr:`.settings.ZEO_CLIENT_PATH`.", "source": "juraj-google-style"}
{"code": "def load(self, filepath, file_encoding=None):\n        \n        with open(filepath, encoding=file_encoding) as inf:\n            for line in inf:\n                current_line = str(line).strip()\n                if current_line.startswith(\"@prefix\"):\n                    self._add_ttl_ns(current_line.replace(\"\\n\",\"\"))\n                elif len(current_line) > 10:\n                    break\n        self.__make_dicts__", "docstring": "Reads the the beginning of a turtle file and sets the prefix's used\nin that file and sets the prefix attribute\n\nArgs:\nfilepath: the path to the turtle file\nfile_encoding: specify a specific encoding if necessary", "source": "juraj-google-style"}
{"code": "def delete(self, timeout=-1, custom_headers=None, force=False):\n        \n        uri = self.data['uri']\n\n        logger.debug(\"Delete resource (uri = %s)\" % (str(uri)))\n\n        return self._helper.delete(uri, timeout=timeout,\n                                   custom_headers=custom_headers, force=force)", "docstring": "Deletes current resource.\n\nArgs:\ntimeout: Timeout in seconds.\ncustom_headers: Allows to set custom http headers.\nforce: Flag to force the operation.", "source": "juraj-google-style"}
{"code": "def add_tags(self, ID3=None):\n        \n\n        if ID3 is None:\n            ID3 = self.ID3\n        if self.tags is None:\n            self.ID3 = ID3\n            self.tags = ID3()\n        else:\n            raise error(\"an ID3 tag already exists\")", "docstring": "Add an empty ID3 tag to the file.\n\nArgs:\nID3 (ID3): An ID3 subclass to use or `None` to use the one\nthat used when loading.\n\nA custom tag reader may be used in instead of the default\n`ID3` object, e.g. an `mutagen.easyid3.EasyID3` reader.", "source": "juraj-google-style"}
{"code": "def assert_inbounds(num, low, high, msg='', eq=False, verbose=not util_arg.QUIET):\n    r\n    from utool import util_str\n    if util_arg.NO_ASSERTS:\n        return\n    passed = util_alg.inbounds(num, low, high, eq=eq)\n    if isinstance(passed, np.ndarray):\n        passflag = np.all(passed)\n    else:\n        passflag = passed\n    if not passflag:\n        failednum = num.compress(~passed) if isinstance(num, np.ndarray) else num\n        failedlow = low.compress(~passed) if isinstance(low, np.ndarray) else low\n        failedhigh = high.compress(~passed) if isinstance(high, np.ndarray) else high\n        msg_ = 'num=%r is out of bounds=(%r, %r)' % (failednum, failedlow, failedhigh)\n        raise AssertionError(msg_ + '\\n' + msg)\n    else:\n        if verbose:\n            op = '<=' if eq else '<'\n            fmtstr = 'Passed assert_inbounds: {low} {op} {num} {op} {high}'\n            print(fmtstr.format(low=low, op=op, num=util_str.truncate_str(str(num)), high=high))", "docstring": "r\"\"\"\nArgs:\nnum (scalar):\nlow (scalar):\nhigh (scalar):\nmsg (str):", "source": "juraj-google-style"}
{"code": "def data_received(self, data):\n        \n        try:\n            self.responders[-1].on_data(data)\n        except Exception as error:\n            self.handle_error(error)", "docstring": "(asyncio.Protocol member)\n\nCalled upon when there is new data to be passed to the\nprotocol.\nThe data is forwarded to the top of the responder stack (via\nthe on_data method).\nIf an excpetion occurs while this is going on, the Exception\nis forwarded to the protocol's handle_error method.\n\nArgs:\ndata (bytes): Bytes from the latest data transmission", "source": "juraj-google-style"}
{"code": "def get(self, *args, **kwargs):\n    if (not self.enabled):\n        return None\n    cache_key = self.make_key(args, kwargs)\n    with self._cache_lock:\n        if (cache_key in self._cache):\n            (expirytime, item) = self._cache[cache_key]\n            if (expirytime >= time()):\n                return item\n            else:\n                del self._cache[cache_key]\n    return None", "docstring": "Get an item from the cache for this combination of args and kwargs.\n\nArgs:\n*args: any arguments.\n**kwargs: any keyword arguments.\n\nReturns:\nobject: The object which has been found in the cache, or `None` if\nno unexpired item is found. This means that there is no point\nstoring an item in the cache if it is `None`.", "source": "codesearchnet"}
{"code": "def _publish_status(self, slug, data):\n        \n\n        status_topic = self.topics.prefix + 'devices/{}/data/status'.format(slug)\n\n        self._logger.debug(\"Publishing status message: (topic=%s) (message=%s)\", status_topic, str(data))\n        self.client.publish(status_topic, data)", "docstring": "Publish a status message for a device\n\nArgs:\nslug (string): The device slug that we are publishing on behalf of\ndata (dict): The status message data to be sent back to the caller", "source": "juraj-google-style"}
{"code": "def get_unverified_claims(token):\n    try:\n        claims = jws.get_unverified_claims(token)\n    except:\n        raise JWTError('Error decoding token claims.')\n    try:\n        claims = json.loads(claims.decode('utf-8'))\n    except ValueError as e:\n        raise JWTError(('Invalid claims string: %s' % e))\n    if (not isinstance(claims, Mapping)):\n        raise JWTError('Invalid claims string: must be a json object')\n    return claims", "docstring": "Returns the decoded claims without verification of any kind.\n\nArgs:\ntoken (str): A signed JWT to decode the headers from.\n\nReturns:\ndict: The dict representation of the token claims.\n\nRaises:\nJWTError: If there is an exception decoding the token.", "source": "codesearchnet"}
{"code": "def convert_to_eager_tensor(value, ctx, dtype=None) -> ops._EagerTensorBase:\n    if isinstance(value, np.ndarray):\n        value = value.copy()\n    if isinstance(value, ops.EagerTensor):\n        if dtype is not None and value.dtype != dtype:\n            raise TypeError(f'Expected tensor {value} with dtype {dtype!r}, but got dtype {value.dtype!r}.')\n        return value\n    if dtype is not None:\n        try:\n            dtype = dtype.as_datatype_enum\n        except AttributeError:\n            dtype = dtypes.as_dtype(dtype).as_datatype_enum\n    ctx.ensure_initialized()\n    return ops.EagerTensor(value, ctx.device_name, dtype)", "docstring": "Converts the given `value` to an `EagerTensor`.\n\nNote that this function could return cached copies of created constants for\nperformance reasons.\n\nArgs:\nvalue: value to convert to EagerTensor.\nctx: value of context.context().\ndtype: optional desired dtype of the converted EagerTensor.\n\nReturns:\nEagerTensor created from value.\n\nRaises:\nTypeError: if `dtype` is not compatible with the type of t.", "source": "github-repos"}
{"code": "def get_config(self, key_name):\n    if (key_name in self.config):\n        return self.config.get(key_name)\n    return self.Configuration.default(key_name, inst=self)", "docstring": "Return configuration value\n\nArgs:\nkey_name (str): configuration key\n\nReturns:\nThe value for the specified configuration key, or if not found\nin the config the default value specified in the Configuration Handler\nclass specified inside this component", "source": "codesearchnet"}
{"code": "def GetYearFromPosixTime(posix_time, timezone=pytz.UTC):\n  \n  datetime_object = datetime.datetime.fromtimestamp(posix_time, tz=timezone)\n  return datetime_object.year", "docstring": "Gets the year from a POSIX timestamp\n\nThe POSIX time is the number of seconds since 1970-01-01 00:00:00 UTC.\n\nArgs:\nposix_time: An integer containing the number of seconds since\n1970-01-01 00:00:00 UTC.\ntimezone: Optional timezone of the POSIX timestamp.\n\nReturns:\nThe year of the POSIX timestamp.\n\nRaises:\nValueError: If the posix timestamp is out of the range of supported values.", "source": "juraj-google-style"}
{"code": "def coordination_number_delta_E( self ):\n        \n        initial_site_neighbours = [ s for s in self.initial_site.p_neighbours if s.is_occupied ] \n        final_site_neighbours = [ s for s in self.final_site.p_neighbours if s.is_occupied and s is not self.initial_site ] \n        initial_cn_occupation_energy = ( self.initial_site.cn_occupation_energy() + \n            sum( [ site.cn_occupation_energy() for site in initial_site_neighbours ] ) +\n            sum( [ site.cn_occupation_energy() for site in final_site_neighbours ] ) )\n        final_cn_occupation_energy = ( self.final_site.cn_occupation_energy( delta_occupation = { self.initial_site.label : -1 } ) +\n            sum( [ site.cn_occupation_energy( delta_occupation = { self.initial_site.label : -1 } ) for site in initial_site_neighbours ] ) +\n            sum( [ site.cn_occupation_energy( delta_occupation = { self.final_site.label : +1 } ) for site in final_site_neighbours ] ) )\n        return ( final_cn_occupation_energy - initial_cn_occupation_energy )", "docstring": "Coordination-number dependent energy conrtibution to the change in system energy if this jump were accepted.\n\nArgs:\nNone\n\nReturns:\n(Float): delta E (coordination-number)", "source": "juraj-google-style"}
{"code": "def get_custom_modules_path() -> Path:\n    channel_path = (get_base_path() / 'modules')\n    if (not channel_path.exists()):\n        channel_path.mkdir(parents=True)\n    return channel_path", "docstring": "Get the path to custom channels\n\nReturns:\nThe path for custom channels.", "source": "codesearchnet"}
{"code": "def check_absolute_refs(self, construction_table):\n    c_table = construction_table\n    problem_index = [i for i in c_table.index[:3] if (not self._has_valid_abs_ref(i, c_table))]\n    return problem_index", "docstring": "Checks first three rows of ``construction_table`` for linear references\n\nChecks for each index from first to third row of the\n``construction_table``, if the references are colinear.\nThis case has to be specially treated, because the references\nare not only atoms (to fix internal degrees of freedom) but also points\nin cartesian space called absolute references.\n(to fix translational and rotational degrees of freedom)\n\nArgs:\nconstruction_table (pd.DataFrame):\n\nReturns:\nlist: A list of problematic indices.", "source": "codesearchnet"}
{"code": "def holiday_day(self, value=None):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `holiday_day`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `holiday_day`')\n    self._holiday_day = value", "docstring": "Corresponds to IDD Field `holiday_day`\n\nArgs:\nvalue (str): value for IDD Field `holiday_day`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def run_job(self, section_id, session=None):\n    if (not self.parser.has_section(section_id)):\n        raise KeyError('section not found: {}'.format(section_id))\n    session = (session or Session())\n    for (name, looter_cls) in six.iteritems(self._CLS_MAP):\n        targets = self.get_targets(self._get(section_id, name))\n        quiet = self._getboolean(section_id, 'quiet', self.args.get('--quiet', False))\n        if targets:\n            logger.info('Launching {} job for section {}'.format(name, section_id))\n        for (target, directory) in six.iteritems(targets):\n            try:\n                logger.info('Downloading {} to {}'.format(target, directory))\n                looter = looter_cls(target, add_metadata=self._getboolean(section_id, 'add-metadata', False), get_videos=self._getboolean(section_id, 'get-videos', False), videos_only=self._getboolean(section_id, 'videos-only', False), jobs=self._getint(section_id, 'jobs', 16), template=self._get(section_id, 'template', '{id}'), dump_json=self._getboolean(section_id, 'dump-json', False), dump_only=self._getboolean(section_id, 'dump-only', False), extended_dump=self._getboolean(section_id, 'extended-dump', False), session=session)\n                if self.parser.has_option(section_id, 'username'):\n                    looter.logout()\n                    username = self._get(section_id, 'username')\n                    password = (self._get(section_id, 'password') or getpass.getpass('Password for \"{}\": '.format(username)))\n                    looter.login(username, password)\n                n = looter.download(directory, media_count=self._getint(section_id, 'num-to-dl'), new_only=self._getboolean(section_id, 'new', False), pgpbar_cls=(None if quiet else TqdmProgressBar), dlpbar_cls=(None if quiet else TqdmProgressBar))\n                logger.success('Downloaded %i medias !', n)\n            except Exception as exception:\n                logger.error(six.text_type(exception))", "docstring": "Run a job as described in the section named ``section_id``.\n\nRaises:\nKeyError: when the section could not be found.", "source": "codesearchnet"}
{"code": "def skipForDeviceType(self, device_type: typing.List[str], reason: str, unless_device_count_equals_to=None):\n    physical_device_types = set([d.device_type for d in tf_config.list_physical_devices()])\n    for device in device_type:\n        if device == 'TPU' and is_tpu_present():\n            if unless_device_count_equals_to is None:\n                self.skipTest(reason)\n            elif len(list_local_logical_devices(device)) != unless_device_count_equals_to:\n                self.skipTest(reason)\n        if device == 'CPU' and len(physical_device_types) == 1 and ('CPU' in physical_device_types):\n            self.skipTest(reason)\n        if device == 'GPU' and 'GPU' in physical_device_types:\n            self.skipTest(reason)", "docstring": "Skip the test for the specific device_type.\n\nArgs:\ndevice_type: list of device types, one of \"CPU\", \"GPU\", or \"TPU\".\nreason: string that describe the reason for skipping the test.\nunless_device_count_equals_to: Optional int. This parameter only works if\ndevice_type is \"TPU\". If set, the test will be skipped unless the number\nof TPUs equals to the specified count.", "source": "github-repos"}
{"code": "def volume(self):\n    return np.dot(self.matrix[0], np.cross(self.matrix[1], self.matrix[2]))", "docstring": "The cell volume.\n\nArgs:\nNone\n\nReturns:\n(float): The cell volume.", "source": "codesearchnet"}
{"code": "async def on_message(message):\n    \n\n    \n    server = message.server\n    author = message.author\n    channel = message.channel\n    content = message.content\n\n    data = datatools.get_data()\n\n    \n    if server is not None and author != channel.server.me:\n        \n        prefix = data[\"discord\"][\"servers\"][server.id][\"prefix\"]\n        if content.startswith(prefix):\n            \n            package = content.split(\" \")\n            command = package[0][len(prefix):]\n            args = package[1:]\n            arg = ' '.join(args)\n\n            \n            if command == 'help':\n                if args:\n                    \n                    datapacks = api_help.get_help_datapacks(arg, prefix)\n                    \n                    if datapacks:\n                        await client.send_typing(channel)\n                        embed = ui_embed.success(channel, arg, datapacks)\n                        try:\n                            await embed.send()\n                        except discord.errors.HTTPException:\n                            embed = ui_embed.http_exception(channel, arg)\n                            await embed.send()\n                else:\n                    \n                    datapacks = api_help.get_help_commands(prefix)\n                    \n                    if datapacks:\n                        await client.send_typing(channel)\n                        embed = ui_embed.success(channel, arg, datapacks)\n                        try:\n                            await embed.send()\n                        except discord.errors.HTTPException:\n                            embed = ui_embed.http_exception(channel, arg)\n                            await embed.send()", "docstring": "The on_message event handler for this module\n\nArgs:\nmessage (discord.Message): Input message", "source": "juraj-google-style"}
{"code": "def validate_default_element(self, value):\n        \n        if isinstance(value, (six.string_types, six.integer_types)):\n            \n            \n            if self.__type:\n                self.__type(value)\n            return value\n\n        return super(EnumField, self).validate_default_element(value)", "docstring": "Validate default element of Enum field.\n\nEnum fields allow for delayed resolution of default values\nwhen the type of the field has not been resolved. The default\nvalue of a field may be a string or an integer. If the Enum\ntype of the field has been resolved, the default value is\nvalidated against that type.\n\nArgs:\nvalue: Value to validate.\n\nRaises:\nValidationError if value is not expected message type.", "source": "juraj-google-style"}
{"code": "class GraniteMoeHybridMLP(nn.Module):\n\n    def __init__(self, config: GraniteMoeHybridConfig):\n        super(GraniteMoeHybridMLP, self).__init__()\n        self.input_size = config.hidden_size\n        self.hidden_size = config.shared_intermediate_size\n        self.activation = ACT2FN[config.hidden_act]\n        self.input_linear = nn.Linear(self.input_size, self.hidden_size * 2, bias=False)\n        self.output_linear = nn.Linear(self.hidden_size, self.input_size, bias=False)\n\n    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n        hidden_states = self.input_linear(hidden_states)\n        chunked_hidden_states = hidden_states.chunk(2, dim=-1)\n        hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1]\n        hidden_states = self.output_linear(hidden_states)\n        return hidden_states", "docstring": "MLP layer for shared experts\n\nArgs:\nconfig:\nConfiguration object with model hyperparameters.", "source": "github-repos"}
{"code": "def delete_volume(self, volume_name: str):\n        \n        \n        if not self._manager:\n            raise RuntimeError('Volumes can only be deleted '\n                               'on swarm manager nodes')\n\n        \n        self._api_client.remove_volume(volume_name)", "docstring": "Removes/stops a docker volume.\n\nOnly the manager nodes can delete a volume\n\nArgs:\nvolume_name (string): Name of the volume", "source": "juraj-google-style"}
{"code": "def unionfs(rw='rw', ro=None, union='union'):\n    from functools import wraps\n\n    def wrap_in_union_fs(func):\n        '\\n        Function that wraps a given function inside the file system.\\n\\n        Args:\\n            func: The function that needs to be wrapped inside the unions fs.\\n        Return:\\n            The file system with the function wrapped inside.\\n        '\n\n        @wraps(func)\n        def wrap_in_union_fs_func(project, *args, **kwargs):\n            '\\n            Wrap the func in the UnionFS mount stack.\\n\\n            We make sure that the mount points all exist and stack up the\\n            directories for the unionfs. All directories outside of the default\\n            build environment are tracked for deletion.\\n            '\n            container = project.container\n            if ((container is None) or in_container()):\n                return func(project, *args, **kwargs)\n            build_dir = local.path(project.builddir)\n            LOG.debug('UnionFS - Project builddir: %s', project.builddir)\n            if __unionfs_is_active(root=build_dir):\n                LOG.debug('UnionFS already active in %s, nesting not supported.', build_dir)\n                return func(project, *args, **kwargs)\n            ro_dir = local.path(container.local)\n            rw_dir = (build_dir / rw)\n            un_dir = (build_dir / union)\n            LOG.debug('UnionFS - RW: %s', rw_dir)\n            unionfs_cmd = __unionfs_set_up(ro_dir, rw_dir, un_dir)\n            project_builddir_bak = project.builddir\n            project.builddir = un_dir\n            proc = unionfs_cmd.popen()\n            while ((not __unionfs_is_active(root=un_dir)) and (proc.poll() is None)):\n                pass\n            ret = None\n            if (proc.poll() is None):\n                try:\n                    with local.cwd(un_dir):\n                        ret = func(project, *args, **kwargs)\n                finally:\n                    project.builddir = project_builddir_bak\n                    from signal import SIGINT\n                    is_running = (proc.poll() is None)\n                    while (__unionfs_is_active(root=un_dir) and is_running):\n                        try:\n                            proc.send_signal(SIGINT)\n                            proc.wait(timeout=3)\n                        except subprocess.TimeoutExpired:\n                            proc.kill()\n                            is_running = False\n                    LOG.debug('Unionfs shut down.')\n            if __unionfs_is_active(root=un_dir):\n                raise UnmountError()\n            return ret\n        return wrap_in_union_fs_func\n    return wrap_in_union_fs", "docstring": "Decorator for the UnionFS feature.\n\nThis configures a unionfs for projects. The given base_dir and/or image_dir\nare layered as follows:\nimage_dir=RW:base_dir=RO\nAll writes go to the image_dir, while base_dir delivers the (read-only)\nversions of the rest of the filesystem.\n\nThe unified version will be provided in the project's builddir. Unmouting\nis done as soon as the function completes.\n\nArgs:\nrw: writeable storage area for the unified fuse filesystem.\nro: read-only storage area for the unified fuse filesystem.\nunion: mountpoint of the unified fuse filesystem.", "source": "codesearchnet"}
{"code": "class AutoContrast(BaseImagePreprocessingLayer):\n    _USE_BASE_FACTOR = False\n    _VALUE_RANGE_VALIDATION_ERROR = 'The `value_range` argument should be a list of two numbers. '\n\n    def __init__(self, value_range=(0, 255), **kwargs):\n        super().__init__(**kwargs)\n        self._set_value_range(value_range)\n\n    def _set_value_range(self, value_range):\n        if not isinstance(value_range, (tuple, list)):\n            raise ValueError(self._VALUE_RANGE_VALIDATION_ERROR + f'Received: value_range={value_range}')\n        if len(value_range) != 2:\n            raise ValueError(self._VALUE_RANGE_VALIDATION_ERROR + f'Received: value_range={value_range}')\n        self.value_range = sorted(value_range)\n\n    def transform_images(self, images, transformation=None, training=True):\n        original_images = images\n        images = self._transform_value_range(images, original_range=self.value_range, target_range=(0, 255), dtype=self.compute_dtype)\n        images = self.backend.cast(images, self.compute_dtype)\n        low = self.backend.numpy.min(images, axis=(1, 2), keepdims=True)\n        high = self.backend.numpy.max(images, axis=(1, 2), keepdims=True)\n        scale = 255.0 / (high - low)\n        offset = -low * scale\n        images = images * scale + offset\n        results = self.backend.numpy.clip(images, 0.0, 255.0)\n        results = self._transform_value_range(results, original_range=(0, 255), target_range=self.value_range, dtype=self.compute_dtype)\n        results = self.backend.numpy.where(self.backend.numpy.isnan(results), original_images, results)\n        if results.dtype == images.dtype:\n            return results\n        if backend.is_int_dtype(images.dtype):\n            results = self.backend.numpy.round(results)\n        return _saturate_cast(results, images.dtype, self.backend)\n\n    def transform_labels(self, labels, transformation, training=True):\n        return labels\n\n    def transform_bounding_boxes(self, bounding_boxes, transformation, training=True):\n        return bounding_boxes\n\n    def transform_segmentation_masks(self, segmentation_masks, transformation, training=True):\n        return segmentation_masks\n\n    def get_config(self):\n        config = super().get_config()\n        config.update({'value_range': self.value_range})\n        return config\n\n    def compute_output_shape(self, input_shape):\n        return input_shape", "docstring": "Performs the auto-contrast operation on an image.\n\nAuto contrast stretches the values of an image across the entire available\n`value_range`. This makes differences between pixels more obvious. An\nexample of this is if an image only has values `[0, 1]` out of the range\n`[0, 255]`, auto contrast will change the `1` values to be `255`.\n\nThis layer is active at both training and inference time.\n\nArgs:\nvalue_range: Range of values the incoming images will have.\nRepresented as a two number tuple written `(low, high)`.\nThis is typically either `(0, 1)` or `(0, 255)` depending\non how your preprocessing pipeline is set up.\nDefaults to `(0, 255)`.", "source": "github-repos"}
{"code": "def get_atomic_python_constant(variable: cfg.Variable, constant_type=None):\n    atomic = get_atomic_value(variable)\n    return atomic.ctx.convert.value_to_constant(atomic, constant_type)", "docstring": "Get the concrete atomic Python value stored in this variable.\n\nThis is used for things that are stored in cfg.Variable, but we\nneed the actual data in order to proceed. E.g. function / class definitions.\n\nArgs:\nvariable: A cfg.Variable. It can only have one possible value.\nconstant_type: Optionally, the required type of the constant.\n\nReturns:\nA Python constant. (Typically, a string, a tuple, or a code object.)\nRaises:\nConversionError: If the value in this Variable is purely abstract, i.e.\ndoesn't store a Python value, or if it has more than one possible value.", "source": "github-repos"}
{"code": "def _ParseOrMerge(self, lines, message):\n    \n    tokenizer = _Tokenizer(lines)\n    while not tokenizer.AtEnd():\n      self._MergeField(tokenizer, message)", "docstring": "Converts an text representation of a protocol message into a message.\n\nArgs:\nlines: Lines of a message's text representation.\nmessage: A protocol buffer message to merge into.\n\nRaises:\nParseError: On text parsing problems.", "source": "juraj-google-style"}
{"code": "def save(self, outfname):\n        \n        f = BZ2File(outfname, 'w')\n        self.doc.writexml(f, addindent='  ', newl='\\n')\n        f.close()", "docstring": "Save the environment of a sv file to be used with soniv visualiser\n\nArgs:\noutfname(str): full path to the file storing the environment", "source": "juraj-google-style"}
{"code": "def wrap(tensor, books=None, tensor_shape=None):\n    if (books is None):\n        books = bookkeeper.for_default_graph()\n    if isinstance(tensor, PrettyTensor):\n        return tensor.as_layer()\n    elif isinstance(tensor, UnboundVariable):\n\n        def set_input_from_unbound_var(data):\n            'Sets the input from the given unbound_var.'\n            if (data is not None):\n                return wrap(data, books)\n            else:\n                return None\n        return _DeferredLayer(books, set_input_from_unbound_var, [tensor], {})\n    else:\n        tensor = tf.convert_to_tensor(tensor, name='input')\n        if tensor_shape:\n            _set_shape_on_tensor(tensor, tensor_shape)\n        return Layer(books, tensor=tensor, name=tensor.name)", "docstring": "Creates an input layer representing the given tensor.\n\nArgs:\ntensor: The tensor.\nbooks: The bookkeeper; this is usually not required unless you are building\nmultiple `tf.Graphs.`\ntensor_shape: An optional shape that will be set on the Tensor or verified\nto match the tensor.\nReturns:\nA layer.", "source": "codesearchnet"}
{"code": "def __init__(\n            self,\n            cert,\n            urlbase='https:\n    ):\n        \n        \n        self.cert = cert\n\n        self.urlbase = urlbase\n        if not urlbase.endswith('/'):\n            self.urlbase += '/'\n        self._session = requests.Session()\n        self._session.cert = cert\n        self._session.timeout = self.TIMEOUT  \n        self._session.verify = True  \n        \n        self._session.mount(urlbase, HTTPAdapter(max_retries=self.RETRIES))\n\n        log.debug(\"------------------------------------------------------\")\n        log.info(\"[PyLmod] init urlbase=%s\", urlbase)", "docstring": "Initialize Base instance.\n\nArgs:\ncert (unicode): File path to the certificate used to\nauthenticate access to LMod Web service\nurlbase (str): The URL of the LMod Web service. i.e.\n``learning-modules.mit.edu`` or\n``learning-modules-test.mit.edu``", "source": "juraj-google-style"}
{"code": "def obtain(self, dest):\n    (url, rev_options) = self.get_url_rev_options(self.url)\n    if (not os.path.exists(dest)):\n        self.fetch_new(dest, url, rev_options)\n        return\n    rev_display = rev_options.to_display()\n    if self.is_repository_directory(dest):\n        existing_url = self.get_remote_url(dest)\n        if self.compare_urls(existing_url, url):\n            logger.debug('%s in %s exists, and has correct URL (%s)', self.repo_name.title(), display_path(dest), url)\n            if (not self.is_commit_id_equal(dest, rev_options.rev)):\n                logger.info('Updating %s %s%s', display_path(dest), self.repo_name, rev_display)\n                self.update(dest, url, rev_options)\n            else:\n                logger.info('Skipping because already up-to-date.')\n            return\n        logger.warning('%s %s in %s exists with URL %s', self.name, self.repo_name, display_path(dest), existing_url)\n        prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ', ('s', 'i', 'w', 'b'))\n    else:\n        logger.warning('Directory %s already exists, and is not a %s %s.', dest, self.name, self.repo_name)\n        prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b'))\n    logger.warning('The plan is to install the %s repository %s', self.name, url)\n    response = ask_path_exists(('What to do?  %s' % prompt[0]), prompt[1])\n    if (response == 'a'):\n        sys.exit((- 1))\n    if (response == 'w'):\n        logger.warning('Deleting %s', display_path(dest))\n        rmtree(dest)\n        self.fetch_new(dest, url, rev_options)\n        return\n    if (response == 'b'):\n        dest_dir = backup_dir(dest)\n        logger.warning('Backing up %s to %s', display_path(dest), dest_dir)\n        shutil.move(dest, dest_dir)\n        self.fetch_new(dest, url, rev_options)\n        return\n    if (response == 's'):\n        logger.info('Switching %s %s to %s%s', self.repo_name, display_path(dest), url, rev_display)\n        self.switch(dest, url, rev_options)", "docstring": "Install or update in editable mode the package represented by this\nVersionControl object.\n\nArgs:\ndest: the repository directory in which to install or update.", "source": "codesearchnet"}
{"code": "def _GetFormatErrorLocation(self, yaml_definition, last_definition_object):\n    name = yaml_definition.get('name', None)\n    if name:\n        error_location = 'in: {0:s}'.format((name or '<NAMELESS>'))\n    elif last_definition_object:\n        error_location = 'after: {0:s}'.format(last_definition_object.name)\n    else:\n        error_location = 'at start'\n    return error_location", "docstring": "Retrieves a format error location.\n\nArgs:\nyaml_definition (dict[str, object]): current YAML definition.\nlast_definition_object (DataTypeDefinition): previous data type\ndefinition.\n\nReturns:\nstr: format error location.", "source": "codesearchnet"}
{"code": "def asarray(self, array_like, *, xnp: numpy_utils.NpModule, casting: Union[Casting, str]=Casting.ALL):\n    casting = Casting(casting)\n    from_dtype = numpy_utils.lazy.dtype_from_array(array_like, strict=False)\n    to_dtype = self._get_target_dtype(from_dtype)\n    if casting == casting.NONE:\n        if to_dtype is None:\n            pass\n        elif from_dtype is None:\n            pass\n        elif from_dtype != to_dtype:\n            raise ValueError(f'Cannot cast {from_dtype} to {to_dtype} (casting={casting}).')\n    elif casting == casting.ALL:\n        pass\n    else:\n        raise NotImplementedError(f'Unsupported casting {casting}')\n    if to_dtype is None:\n        dtype_kwargs = {}\n    else:\n        dtype_kwargs = {'dtype': numpy_utils.lazy.as_dtype(to_dtype, xnp=xnp)}\n    if isinstance(array_like, np.ndarray) and array_like.shape == ():\n        if not dtype_kwargs:\n            dtype_kwargs = {'dtype': numpy_utils.lazy.as_dtype(array_like.dtype, xnp=xnp)}\n        array_like = array_like.item()\n    arr = xnp.asarray(array_like, **dtype_kwargs)\n    return arr", "docstring": "Creates an `xnp.ndarray` from the `array_like`.\n\nArgs:\narray_like: Any array-like\nxnp: Target numpy module\ncasting: If `NONE`, prevent casting.\n\nReturns:\narray: The xnp array.", "source": "github-repos"}
{"code": "def depth_july_average_ground_temperature(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `depth_july_average_ground_temperature`'.format(value))\n\n        self._depth_july_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_july_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_july_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def LoadGDAL(filename, no_data=None):\n    if (not GDAL_AVAILABLE):\n        raise Exception('richdem.LoadGDAL() requires GDAL.')\n    allowed_types = {gdal.GDT_Byte, gdal.GDT_Int16, gdal.GDT_Int32, gdal.GDT_UInt16, gdal.GDT_UInt32, gdal.GDT_Float32, gdal.GDT_Float64}\n    src_ds = gdal.Open(filename)\n    srcband = src_ds.GetRasterBand(1)\n    if (no_data is None):\n        no_data = srcband.GetNoDataValue()\n        if (no_data is None):\n            raise Exception('The source data did not have a NoData value. Please use the no_data argument to specify one. If should not be equal to any of the actual data values. If you are using all possible data values, then the situation is pretty hopeless - sorry.')\n    srcdata = rdarray(srcband.ReadAsArray(), no_data=no_data)\n    if (not (srcband.DataType in allowed_types)):\n        raise Exception('This datatype is not supported. Please file a bug report on RichDEM.')\n    srcdata.projection = src_ds.GetProjectionRef()\n    srcdata.geotransform = src_ds.GetGeoTransform()\n    srcdata.metadata = dict()\n    for (k, v) in src_ds.GetMetadata().items():\n        srcdata.metadata[k] = v\n    _AddAnalysis(srcdata, 'LoadGDAL(filename={0}, no_data={1})'.format(filename, no_data))\n    return srcdata", "docstring": "Read a GDAL file.\n\nOpens any file GDAL can read, selects the first raster band, and loads it\nand its metadata into a RichDEM array of the appropriate data type.\n\nIf you need to do something more complicated, look at the source of this\nfunction.\n\nArgs:\nfilename (str):    Name of the raster file to open\nno_data  (float):  Optionally, set the no_data value to this.\n\nReturns:\nA RichDEM array", "source": "codesearchnet"}
{"code": "def is_full(cm, nodes1, nodes2):\n    if ((not nodes1) or (not nodes2)):\n        return True\n    cm = cm[np.ix_(nodes1, nodes2)]\n    return (cm.sum(0).all() and cm.sum(1).all())", "docstring": "Test connectivity of one set of nodes to another.\n\nArgs:\ncm (``np.ndarrray``): The connectivity matrix\nnodes1 (tuple[int]): The nodes whose outputs to ``nodes2`` will be\ntested.\nnodes2 (tuple[int]): The nodes whose inputs from ``nodes1`` will\nbe tested.\n\nReturns:\nbool: ``True`` if all elements in ``nodes1`` output to some element in\n``nodes2`` and all elements in ``nodes2`` have an input from some\nelement in ``nodes1``, or if either set of nodes is empty; ``False``\notherwise.", "source": "codesearchnet"}
{"code": "def download_and_extract(uri, name, path):\n    if (not os.path.exists(path)):\n        os.makedirs(path)\n    if (not os.listdir(path)):\n        with tmpdir() as tmp:\n            if uri.startswith('s3:\n                dst = os.path.join(tmp, 'tar_file')\n                s3_download(uri, dst)\n                with tarfile.open(name=dst, mode='r:gz') as t:\n                    t.extractall(path=path)\n            elif os.path.isdir(uri):\n                if (uri == path):\n                    return\n                if os.path.exists(path):\n                    shutil.rmtree(path)\n                shutil.move(uri, path)\n            else:\n                shutil.copy2(uri, os.path.join(path, name))", "docstring": "Download, prepare and install a compressed tar file from S3 or local directory as an entry point.\n\nSageMaker Python SDK saves the user provided entry points as compressed tar files in S3\n\nArgs:\nname (str): name of the entry point.\nuri (str): the location of the entry point.\npath (bool): The path where the script will be installed. It will not download and install the\nif the path already has the user entry point.", "source": "codesearchnet"}
{"code": "def Collect(\n      self, knowledge_base, artifact_definition, searcher):\n    \n    for source in artifact_definition.sources:\n      if source.type_indicator not in (\n          artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY,\n          artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE):\n        continue\n\n      if source.type_indicator == (\n          artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY):\n        key_value_pairs = [{'key': key} for key in source.keys]\n      else:\n        key_value_pairs = source.key_value_pairs\n\n      for key_value_pair in key_value_pairs:\n        key_path = key_value_pair['key']\n\n        \n        \n        \n        key_path_upper = key_path.upper()\n        if key_path_upper.startswith('%%CURRENT_CONTROL_SET%%'):\n          key_path = '{0:s}{1:s}'.format(\n              'HKEY_LOCAL_MACHINE\\\\System\\\\CurrentControlSet', key_path[23:])\n\n        find_spec = registry_searcher.FindSpec(key_path_glob=key_path)\n\n        for key_path in searcher.Find(find_specs=[find_spec]):\n          try:\n            registry_key = searcher.GetKeyByPath(key_path)\n          except IOError as exception:\n            raise errors.PreProcessFail((\n                'Unable to retrieve Windows Registry key: {0:s} with error: '\n                '{1!s}').format(key_path, exception))\n\n          if registry_key:\n            value_name = key_value_pair.get('value', None)\n            self._ParseKey(knowledge_base, registry_key, value_name)", "docstring": "Collects values using a Windows Registry value artifact definition.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nartifact_definition (artifacts.ArtifactDefinition): artifact definition.\nsearcher (dfwinreg.WinRegistrySearcher): Windows Registry searcher to\npreprocess the Windows Registry.\n\nRaises:\nPreProcessFail: if the Windows Registry key or value cannot be read.", "source": "juraj-google-style"}
{"code": "def join(self, basepath, *paths):\n    if not basepath.startswith(BlobStorageFileSystem.AZURE_FILE_SYSTEM_PREFIX):\n        raise ValueError('Basepath %r must be an Azure Blob Storage path.' % basepath)\n    path = basepath\n    for p in paths:\n        path = path.rstrip('/') + '/' + p.lstrip('/')\n    return path", "docstring": "Join two or more pathname components for the filesystem\n\nArgs:\nbasepath: string path of the first component of the path\npaths: path components to be added\n\nReturns: full path after combining all the passed components", "source": "github-repos"}
{"code": "def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask):\n    exp_blocked_to_pad = jnp.concatenate([to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], axis=2)\n    band_mask = jnp.einsum('blq,blk->blqk', from_blocked_mask[:, 2:-2], exp_blocked_to_pad)\n    band_mask = jnp.expand_dims(band_mask, 1)\n    return band_mask", "docstring": "Create 3D attention mask from a 2D tensor mask.\n\nArgs:\nfrom_blocked_mask: 2D Tensor of shape [batch_size,\nfrom_seq_length//from_block_size, from_block_size].\nto_blocked_mask: int32 Tensor of shape [batch_size,\nto_seq_length//to_block_size, to_block_size].\n\nReturns:\nfloat Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size,\n3*to_block_size].", "source": "github-repos"}
{"code": "def get_experiment_from_id(self, experiment_id):\n    \n\n    experiment = self.experiment_id_map.get(experiment_id)\n\n    if experiment:\n      return experiment\n\n    self.logger.error('Experiment ID \"%s\" is not in datafile.' % experiment_id)\n    self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR))\n    return None", "docstring": "Get experiment for the provided experiment ID.\n\nArgs:\nexperiment_id: Experiment ID for which experiment is to be determined.\n\nReturns:\nExperiment corresponding to the provided experiment ID.", "source": "juraj-google-style"}
{"code": "def export_json_object(dict_obj, filename=None):\n    \n    try:\n        if filename:\n            try:\n                with open(filename, 'w') as handle:\n                    handle.write(json.dumps(dict_obj, indent=4, sort_keys=True))\n                    logger.info(\n                        '%s: Wrote %s to local filesystem location' %\n                        (inspect.stack()[0][3], filename))\n                handle.close()\n            except TypeError as e:\n                logger.warning(\n                    '%s: object in dict not serializable: %s' %\n                    (inspect.stack()[0][3], str(e)))\n        else:\n            json_str = json.dumps(dict_obj, indent=4, sort_keys=True)\n            print(highlight(json_str, lexers.JsonLexer(), formatters.TerminalFormatter()))\n            logger.info('%s: successful export to stdout' % inspect.stack()[0][3])\n            return True\n    except IOError as e:\n        logger.critical(\n            '%s: export_file_object: error writing to %s to filesystem. Error: %s' %\n            (inspect.stack()[0][3], filename, str(e)))\n        return False\n    else:\n        logger.info('export_file_object: successful export to %s' % filename)\n        return True", "docstring": "Summary:\nexports object to block filesystem object\n\nArgs:\n:dict_obj (dict): dictionary object\n:filename (str):  name of file to be exported (optional)\n\nReturns:\nTrue | False Boolean export status", "source": "juraj-google-style"}
{"code": "def _append_expectation(self, expectation_config):\n    expectation_type = expectation_config['expectation_type']\n    json.dumps(expectation_config)\n    if ('column' in expectation_config['kwargs']):\n        column = expectation_config['kwargs']['column']\n        self._expectations_config.expectations = [f for f in filter((lambda exp: ((exp['expectation_type'] != expectation_type) or (('column' in exp['kwargs']) and (exp['kwargs']['column'] != column)))), self._expectations_config.expectations)]\n    else:\n        self._expectations_config.expectations = [f for f in filter((lambda exp: (exp['expectation_type'] != expectation_type)), self._expectations_config.expectations)]\n    self._expectations_config.expectations.append(expectation_config)", "docstring": "Appends an expectation to `DataAsset._expectations_config` and drops existing expectations of the same type.\n\nIf `expectation_config` is a column expectation, this drops existing expectations that are specific to \\\nthat column and only if it is the same expectation type as `expectation_config`. Otherwise, if it's not a \\\ncolumn expectation, this drops existing expectations of the same type as `expectation config`. \\\nAfter expectations of the same type are dropped, `expectation_config` is appended to `DataAsset._expectations_config`.\n\nArgs:\nexpectation_config (json): \\\nThe JSON-serializable expectation to be added to the DataAsset expectations in `_expectations_config`.\n\nNotes:\nMay raise future errors once json-serializable tests are implemented to check for correct arg formatting", "source": "codesearchnet"}
{"code": "def _testExportImportAcrossScopes(self, graph_fn, use_resource):\n    with ops.Graph().as_default() as original_graph:\n        with variable_scope.variable_scope('dropA/dropB/keepA'):\n            graph_fn(use_resource=use_resource)\n    exported_meta_graph_def = meta_graph.export_scoped_meta_graph(graph=original_graph, export_scope='dropA/dropB')[0]\n    with ops.Graph().as_default() as imported_graph:\n        meta_graph.import_scoped_meta_graph(exported_meta_graph_def, import_scope='importA')\n    with ops.Graph().as_default() as expected_graph:\n        with variable_scope.variable_scope('importA/keepA'):\n            graph_fn(use_resource=use_resource)\n    result = meta_graph.export_scoped_meta_graph(graph=imported_graph)[0]\n    expected = meta_graph.export_scoped_meta_graph(graph=expected_graph)[0]\n    if use_resource:\n        for meta_graph_def in [result, expected]:\n            for node in meta_graph_def.graph_def.node:\n                for attr_to_remove in ['shared_name', 'debug_name']:\n                    attr_value = node.attr.get(attr_to_remove, None)\n                    if attr_value and attr_value.HasField('s'):\n                        if attr_value.s:\n                            node.attr[attr_to_remove].s = b''\n    test_util.assert_meta_graph_protos_equal(self, expected, result)", "docstring": "Tests export and importing a graph across scopes.\n\nArgs:\ngraph_fn: A closure that creates a graph on the current scope.\nuse_resource: A bool indicating whether or not to use ResourceVariables.", "source": "github-repos"}
{"code": "def log_variables(variables=None):\n    if (variables is None):\n        variables = (tf.global_variables() + tf.local_variables())\n    for row in format_variables(variables, join_lines=False):\n        tf.logging.info(row)", "docstring": "Logs variable information.\n\nThis function logs the name, shape, type, collections, and device for either\nall variables or a given iterable of variables. In the \"Device\" columns,\nthe nature of the variable (legacy or resource (for ResourceVariables)) is\nalso specified in parenthesis.\n\nArgs:\nvariables: iterable of variables; if not provided, then all variables\n(in the default graph) are logged.", "source": "codesearchnet"}
{"code": "def mean_min_time_distance(item_a, item_b, max_value):\n    \n    times_a = item_a.times.reshape((item_a.times.size, 1))\n    times_b = item_b.times.reshape((1, item_b.times.size))\n    distance_matrix = (times_a - times_b) ** 2\n    mean_min_distances = np.sqrt(distance_matrix.min(axis=0).mean() + distance_matrix.min(axis=1).mean())\n    return np.minimum(mean_min_distances, max_value) / float(max_value)", "docstring": "Calculate the mean time difference among the time steps in each object.\n\nArgs:\nitem_a: STObject from the first set in TrackMatcher\nitem_b: STObject from the second set in TrackMatcher\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "juraj-google-style"}
{"code": "def reset_logformat_timestamped(logger: logging.Logger, extraname: str='', level: int=logging.INFO) -> None:\n    namebit = ((extraname + ':') if extraname else '')\n    fmt = (('%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s:' + namebit) + '%(message)s')\n    reset_logformat(logger, fmt=fmt)\n    logger.setLevel(level)", "docstring": "Apply a simple time-stamped log format to an existing logger, and set\nits loglevel to either ``logging.DEBUG`` or ``logging.INFO``.\n\nArgs:\nlogger: logger to modify\nextraname: additional name to append to the logger's name\nlevel: log level to set", "source": "codesearchnet"}
{"code": "def GetFrequencyStartTimes(self):\n    start_times = []\n    for freq_tuple in self.GetFrequencyTuples():\n        (start_secs, end_secs, headway_secs) = freq_tuple[0:3]\n        run_secs = start_secs\n        while (run_secs < end_secs):\n            start_times.append(run_secs)\n            run_secs += headway_secs\n    return start_times", "docstring": "Return a list of start time for each headway-based run.\n\nReturns:\na sorted list of seconds since midnight, the start time of each run. If\nthis trip doesn't have headways returns an empty list.", "source": "codesearchnet"}
{"code": "def adafactor_decay_rate_adam(beta2):\n  \n  t = tf.cast(tf.train.get_or_create_global_step(), tf.float32) + 1.0\n  decay = beta2 * (1.0 - tf.pow(beta2, t - 1.0)) / (1.0 - tf.pow(beta2, t))\n  return decay", "docstring": "Second-moment decay rate like Adam, subsuming the correction factor.\n\nArgs:\nbeta2: a float between 0 and 1\nReturns:\na scalar", "source": "juraj-google-style"}
{"code": "def unroll_state_saver(input_layer, name, state_shapes, template, lengths=None):\n    state_saver = input_layer.bookkeeper.recurrent_state\n    state_names = [((STATE_NAME % name) + ('_%d' % i)) for i in xrange(len(state_shapes))]\n    if hasattr(state_saver, 'add_state'):\n        for (state_name, state_shape) in zip(state_names, state_shapes):\n            initial_state = tf.zeros(state_shape[1:], dtype=input_layer.dtype)\n            state_saver.add_state(state_name, initial_state=initial_state, batch_size=state_shape[0])\n    if (lengths is not None):\n        max_length = tf.reduce_max(lengths)\n    else:\n        max_length = None\n    results = []\n    prev_states = []\n    for (state_name, state_shape) in zip(state_names, state_shapes):\n        my_shape = list(state_shape)\n        my_shape[0] = (- 1)\n        prev_states.append(tf.reshape(state_saver.state(state_name), my_shape))\n    my_parameters = None\n    for (i, layer) in enumerate(input_layer.sequence):\n        with input_layer.g.name_scope(('unroll_%00d' % i)):\n            if ((i > 0) and (max_length is not None)):\n                result = control_flow_ops.cond((i < max_length), (lambda : unwrap_all(*template(layer, *prev_states).flatten())), (lambda : unwrap_all(out, *prev_states)))\n                out = result[0]\n                prev_states = result[1:]\n            else:\n                (out, prev_states) = template(layer, *prev_states)\n        if (my_parameters is None):\n            my_parameters = out.layer_parameters\n        results.append(prettytensor.unwrap(out))\n    updates = [state_saver.save_state(state_name, prettytensor.unwrap(prev_state)) for (state_name, prev_state) in zip(state_names, prev_states)]\n    with tf.control_dependencies(updates):\n        results[0] = tf.identity(results[0])\n    return input_layer.with_sequence(results, parameters=my_parameters)", "docstring": "Unrolls the given function with state taken from the state saver.\n\nArgs:\ninput_layer: The input sequence.\nname: The name of this layer.\nstate_shapes: A list of shapes, one for each state variable.\ntemplate: A template with unbound variables for input and states that\nreturns a RecurrentResult.\nlengths: The length of each item in the batch.  If provided, use this to\ntruncate computation.\nReturns:\nA sequence from applying the given template to each item in the input\nsequence.", "source": "codesearchnet"}
{"code": "def to_value(original_string, corenlp_value=None):\n    \n    if isinstance(original_string, Value):\n        \n        return original_string\n    if not corenlp_value:\n        corenlp_value = original_string\n    \n    amount = NumberValue.parse(corenlp_value)\n    if amount is not None:\n        return NumberValue(amount, original_string)\n    \n    ymd = DateValue.parse(corenlp_value)\n    if ymd is not None:\n        if ymd[1] == ymd[2] == -1:\n            return NumberValue(ymd[0], original_string)\n        else:\n            return DateValue(ymd[0], ymd[1], ymd[2], original_string)\n    \n    return StringValue(original_string)", "docstring": "Convert the string to Value object.\n\nArgs:\noriginal_string (basestring): Original string\ncorenlp_value (basestring): Optional value returned from CoreNLP\nReturns:\nValue", "source": "juraj-google-style"}
{"code": "def precipitable_water(self, value=999.0):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `precipitable_water`'.format(value))\n    self._precipitable_water = value", "docstring": "Corresponds to IDD Field `precipitable_water`\n\nArgs:\nvalue (float): value for IDD Field `precipitable_water`\nUnit: mm\nMissing value: 999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def read_bytes(self, length) -> bytes:\n    value = self.stream.read(length)\n    return value", "docstring": "Read the specified number of bytes from the stream.\n\nArgs:\nlength (int): number of bytes to read.\n\nReturns:\nbytes: `length` number of bytes.", "source": "codesearchnet"}
{"code": "def hash_file(path, block_size=65536):\n    sha256 = hashlib.sha256()\n    with open(path, 'rb') as f:\n        for block in iter((lambda : f.read(block_size)), b''):\n            sha256.update(block)\n    return sha256.hexdigest()", "docstring": "Returns SHA256 checksum of a file\n\nArgs:\npath (string): Absolute file path of file to hash\n\nblock_size (int, optional): Number of bytes to read per block", "source": "codesearchnet"}
{"code": "def search_by_user(self, screen_name, count=100):\n        \n        results = self._api.user_timeline(screen_name=screen_name, count=count)\n\n        return results", "docstring": "Search tweets by user.\n\nArgs:\nscreen_name: screen name\ncount: the number of tweets\n\nReturns:\nlist: tweet list", "source": "juraj-google-style"}
{"code": "def RegisterCredentials(cls, credentials):\n    \n    if credentials.type_indicator in cls._credentials:\n      raise KeyError(\n          'Credentials object already set for type indicator: {0:s}.'.format(\n              credentials.type_indicator))\n\n    cls._credentials[credentials.type_indicator] = credentials", "docstring": "Registers a path specification credentials.\n\nArgs:\ncredentials (Credentials): credentials.\n\nRaises:\nKeyError: if credentials object is already set for the corresponding\ntype indicator.", "source": "juraj-google-style"}
{"code": "def build_one_definition_example(self, def_name):\n        \n        if def_name in self.definitions_example.keys():  \n            return True\n        elif def_name not in self.specification['definitions'].keys():  \n            return False\n\n        self.definitions_example[def_name] = {}\n        def_spec = self.specification['definitions'][def_name]\n\n        if def_spec.get('type') == 'array' and 'items' in def_spec:\n            item = self.get_example_from_prop_spec(def_spec['items'])\n            self.definitions_example[def_name] = [item]\n            return True\n\n        if 'properties' not in def_spec:\n            self.definitions_example[def_name] = self.get_example_from_prop_spec(def_spec)\n            return True\n\n        \n        for prop_name, prop_spec in def_spec['properties'].items():\n            example = self.get_example_from_prop_spec(prop_spec)\n            if example is None:\n                return False\n            self.definitions_example[def_name][prop_name] = example\n\n        return True", "docstring": "Build the example for the given definition.\n\nArgs:\ndef_name: Name of the definition.\n\nReturns:\nTrue if the example has been created, False if an error occured.", "source": "juraj-google-style"}
{"code": "class AnomalyDetector(abc.ABC):\n\n    def __init__(self, model_id: Optional[str]=None, features: Optional[Iterable[str]]=None, target: Optional[str]=None, threshold_criterion: Optional[ThresholdFn]=None, **kwargs):\n        self._model_id = model_id if model_id is not None else getattr(self, 'spec_type', lambda: 'unknown')()\n        self._features = features\n        self._target = target\n        self._threshold_criterion = threshold_criterion\n\n    @abc.abstractmethod\n    def learn_one(self, x: beam.Row) -> None:\n        \n        raise NotImplementedError\n\n    @abc.abstractmethod\n    def score_one(self, x: beam.Row) -> Optional[float]:\n        \n        raise NotImplementedError", "docstring": "An abstract base class for anomaly detectors.\n\nArgs:\nmodel_id: The ID of detector (model). Defaults to the value of the\n`spec_type` attribute, or 'unknown' if not set.\nfeatures: An Iterable of strings representing the names of the input\nfeatures in the `beam.Row`\ntarget: The name of the target field in the `beam.Row`.\nthreshold_criterion: An optional `ThresholdFn` to apply to the outlier score\nand yield a label.", "source": "github-repos"}
{"code": "def make_grid_texture(num_h_lines=10, num_v_lines=10, resolution=50):\n    \n    x_h, y_h = make_lines_texture(num_h_lines, resolution)\n    y_v, x_v = make_lines_texture(num_v_lines, resolution)\n    return np.concatenate([x_h, x_v]), np.concatenate([y_h, y_v])", "docstring": "Makes a texture consisting of a grid of vertical and horizontal lines.\n\nArgs:\nnum_h_lines (int): the number of horizontal lines to draw\nnum_v_lines (int): the number of vertical lines to draw\nresolution (int): the number of midpoints to draw on each line\n\nReturns:\nA texture.", "source": "juraj-google-style"}
{"code": "def RemoveTransaction(self, tx):\n        \n        if BC.Default() is None:\n            return False\n\n        if not BC.Default().ContainsTransaction(tx.Hash):\n            return False\n\n        if tx.Hash.ToBytes() in self.MemPool:\n            del self.MemPool[tx.Hash.ToBytes()]\n            return True\n\n        return False", "docstring": "Remove a transaction from the memory pool if it is found on the blockchain.\n\nArgs:\ntx (neo.Core.TX.Transaction): instance.\n\nReturns:\nbool: True if successfully removed. False otherwise.", "source": "juraj-google-style"}
{"code": "def _cumprod(l):\n    ret = [1]\n    for item in l:\n        ret.append((ret[(- 1)] * item))\n    return ret", "docstring": "Cumulative product of a list.\n\nArgs:\nl: a list of integers\nReturns:\na list with one more element (starting with 1)", "source": "codesearchnet"}
{"code": "def __init__(self, filename, damethod, date, ensize):\n        \n        \n        self.filename = filename\n        self.damethod = damethod\n        self.date = date\n        self.ensize = ensize\n\n        \n        self.dafile = h5py.File(self.filename, \"a\")\n\n        \n        self.dafile.attrs['damethod'] = self.damethod\n        self.dafile.attrs['date'] = self.date\n        self.dafile.attrs['ensize'] = self.ensize\n\n        \n        self.dafile.create_group(\"Observation\")\n        self.dafile.create_group(\"Parameter\")\n        self.dafile.create_group(\"State\")\n        self.dafile.create_group(\"StateObservation\")\n        self.dafile.create_group(\"Simulation\")\n        self.dafile.create_group(\"Inflation\")", "docstring": "Initialize darun attributes\n\nArgs:\nfilename (str): Absolute path of file name as a string with `hdf5` extension\ndamethod (str): Name of the assimilation method used, i.e. `enkf`.\ndate (str): Date of the experiment `MM-DD-YYYY:HHHH`\nensize (int): ensemble size", "source": "juraj-google-style"}
{"code": "def _GetUserTypeAndPassword(username, password=None, is_admin=False):\n  \n  if is_admin:\n    user_type = api_user.ApiGrrUser.UserType.USER_TYPE_ADMIN\n  else:\n    user_type = api_user.ApiGrrUser.UserType.USER_TYPE_STANDARD\n  if password is None:\n    \n    \n    password = getpass.getpass(prompt=\"Please enter password for user '%s':\" %\n                               username)\n    \n  return user_type, password", "docstring": "Returns the user-type and password for a user.\n\nArgs:\nusername: Username for the user.\npassword: Password for the user. If None, or not provided, we will prompt\nfor one via the terminal.\nis_admin: Indicates whether the user should have admin privileges.", "source": "juraj-google-style"}
{"code": "def __init__(self, configs):\n    self.tests = []\n    class_identifier = self.__class__.__name__\n    if configs.test_class_name_suffix:\n        class_identifier = '%s_%s' % (class_identifier, configs.test_class_name_suffix)\n    if self.TAG is None:\n        self.TAG = class_identifier\n    self.root_output_path = configs.log_path\n    self.log_path = os.path.join(self.root_output_path, class_identifier)\n    utils.create_dir(self.log_path)\n    self.test_bed_name = configs.test_bed_name\n    self.testbed_name = configs.testbed_name\n    self.user_params = configs.user_params\n    self.results = records.TestResult()\n    self.summary_writer = configs.summary_writer\n    self._generated_test_table = collections.OrderedDict()\n    self._controller_manager = controller_manager.ControllerManager(class_name=self.TAG, controller_configs=configs.controller_configs)\n    self.controller_configs = self._controller_manager.controller_configs", "docstring": "Constructor of BaseTestClass.\n\nThe constructor takes a config_parser.TestRunConfig object and which has\nall the information needed to execute this test class, like log_path\nand controller configurations. For details, see the definition of class\nconfig_parser.TestRunConfig.\n\nArgs:\nconfigs: A config_parser.TestRunConfig object.", "source": "github-repos"}
{"code": "def read_lines(self, max_lines=None):\n    \n    if max_lines is None:\n      return self.read_stream().split('\\n')\n\n    max_to_read = self.metadata.size\n    bytes_to_read = min(100 * max_lines, self.metadata.size)\n    while True:\n      content = self.read_stream(byte_count=bytes_to_read)\n\n      lines = content.split('\\n')\n      if len(lines) > max_lines or bytes_to_read >= max_to_read:\n        break\n      \n      bytes_to_read = min(bytes_to_read * 10, max_to_read)\n\n    \n    del lines[-1]\n    return lines[0:max_lines]", "docstring": "Reads the content of this object as text, and return a list of lines up to some max.\n\nArgs:\nmax_lines: max number of lines to return. If None, return all lines.\nReturns:\nThe text content of the object as a list of lines.\nRaises:\nException if there was an error requesting the object's content.", "source": "juraj-google-style"}
{"code": "def edit_miz(  \n        infile: str,\n        outfile: str = None,\n        metar: typing.Union[str, Metar] = None,\n        time: str = None,\n        min_wind: int = 0,\n        max_wind: int = 40\n) -> str:\n    \n    \n    if outfile is None:\n        LOGGER.debug('editing in place: %s', infile)\n        outfile = infile\n    else:\n        LOGGER.debug('editing miz file: %s -> %s', infile, outfile)\n\n    mission_weather = mission_time = None\n\n    if metar:\n        error, metar = emiz.weather.custom_metar.CustomMetar.get_metar(metar)\n        if error:\n            return error\n\n        mission_weather = emiz.weather.mission_weather.MissionWeather(metar, min_wind=min_wind, max_wind=max_wind)\n\n    if time:\n        try:\n            mission_time = MissionTime.from_string(time)\n        except ValueError:\n            return f'badly formatted time string: {time}'\n\n    if not mission_weather and not mission_time:\n        return 'nothing to do!'\n\n    with Miz(infile) as miz:\n        if mission_weather:\n            LOGGER.debug('applying MissionWeather')\n            if not mission_weather.apply_to_miz(miz):\n                return 'error while applying METAR to mission'\n        if mission_time:\n            LOGGER.debug('applying MissionTime')\n            if not mission_time.apply_to_miz(miz):\n                return 'error while setting time on mission'\n\n        try:\n            miz.zip(outfile)\n            return ''\n        except OSError:\n            return f'permission error: cannot edit \"{outfile}\"; maybe it is in use ?'", "docstring": "Edit an opened MIZ file and sets the time and date and the weather\n\nArgs:\ninfile: source file\noutfile: output file (will default to source file)\nmetar: metar string, ICAO or object to apply\ntime: time string to apply (YYYYMMDDHHMMSS)\nmin_wind: minimum wind\nmax_wind: maximum wind\n\nReturns:\nString containing error", "source": "juraj-google-style"}
{"code": "class FlaubertPoolerEndLogits(nn.Module):\n\n    def __init__(self, config: FlaubertConfig):\n        super().__init__()\n        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)\n        self.activation = nn.Tanh()\n        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n        self.dense_1 = nn.Linear(config.hidden_size, 1)\n\n    def forward(self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, p_mask: Optional[torch.FloatTensor]=None) -> torch.FloatTensor:\n        \n        assert start_states is not None or start_positions is not None, 'One of start_states, start_positions should be not None'\n        if start_positions is not None:\n            slen, hsz = hidden_states.shape[-2:]\n            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)\n            start_states = hidden_states.gather(-2, start_positions)\n            start_states = start_states.expand(-1, slen, -1)\n        x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))\n        x = self.activation(x)\n        x = self.LayerNorm(x)\n        x = self.dense_1(x).squeeze(-1)\n        if p_mask is not None:\n            if p_mask.dtype == torch.float16:\n                x = x * (1 - p_mask) - 65500 * p_mask\n            else:\n                x = x * (1 - p_mask) - 1e+30 * p_mask\n        return x", "docstring": "Compute SQuAD end logits from sequence hidden states.\n\nArgs:\nconfig ([`FlaubertConfig`]):\nThe config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps`\nto use.", "source": "github-repos"}
{"code": "def get_installed_version(vcs):\n    \n    version_path = _get_version_path(vcs)\n    if not os.path.exists(version_path):\n        raise VersionNotInstalledError\n    with open(version_path, 'r') as f:\n        return f.read().strip()", "docstring": "Get the installed version for this project.\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\n\nReturns:\nstr - version number\n\nRaises:\nVersionNotInstalledError", "source": "juraj-google-style"}
{"code": "def _postprocess_for_mg_tf(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7):\n    keep_by_nms = tf.image.combined_non_max_suppression(boxes=mask_boxes.float(), scores=iou_scores, idxs=torch.zeros(mask_boxes.shape[0]), iou_threshold=amg_crops_nms_thresh)\n    iou_scores = iou_scores[keep_by_nms]\n    rle_masks = [rle_masks[i] for i in keep_by_nms]\n    mask_boxes = mask_boxes[keep_by_nms]\n    masks = [_rle_to_mask(rle) for rle in rle_masks]\n    return (masks, iou_scores, rle_masks, mask_boxes)", "docstring": "Perform NMS (Non Maximum Suppression) on the outputs.\n\nArgs:\nrle_masks (`tf.Tensor`):\nbinary masks in the RLE format\niou_scores (`tf.Tensor` of shape (nb_masks, 1)):\niou_scores predicted by the model\nmask_boxes (`tf.Tensor`):\nThe bounding boxes corresponding to segmentation masks\namg_crops_nms_thresh (`float`, *optional*, defaults to 0.7):\nNMS threshold.", "source": "github-repos"}
{"code": "def validate(self, corpus):\n        \n        invalid_utterances = {}\n\n        for utterance in corpus.utterances.values():\n            duration = utterance.duration\n            ll = utterance.label_lists[self.label_list_idx]\n\n            \n            transcription = ' '.join([l.value for l in ll])\n            num_chars = len(transcription.replace(' ', ''))\n\n            char_per_sec = num_chars / duration\n\n            if char_per_sec > self.max_characters_per_second:\n                invalid_utterances[utterance.idx] = char_per_sec\n\n        passed = len(invalid_utterances) <= 0\n        info = {\n            'Threshold max. characters per second': str(self.max_characters_per_second),\n            'Label-List ID': self.label_list_idx\n        }\n\n        return base.InvalidUtterancesResult(passed, invalid_utterances, name=self.name(), info=info)", "docstring": "Perform the validation on the given corpus.\n\nArgs:\ncorpus (Corpus): The corpus to test/validate.\n\nReturns:\nInvalidUtterancesResult: Validation result.", "source": "juraj-google-style"}
{"code": "def get_events_for_block_ids(self, block_ids, subscriptions):\n    blocks = [self._block_store[block_id] for block_id in block_ids]\n    return self.get_events_for_blocks(blocks, subscriptions)", "docstring": "Get a list of events associated with all the block ids.\n\nArgs:\nblock_ids (list of str): The block ids to search for events that\nmatch each subscription.\nsubscriptions (list of EventSubscriptions): EventFilter and\nevent type to filter events.\n\nReturns (list of Events): The Events associated which each block id.\n\nRaises:\nKeyError\nA block id isn't found within the block store or a transaction\nis missing from the receipt store.", "source": "codesearchnet"}
{"code": "def add_evolved_transformer_hparams(hparams):\n    hparams.num_encoder_layers = 3\n    hparams.num_decoder_layers = 4\n    hparams.learning_rate_constant /= (hparams.learning_rate_warmup_steps ** 0.5)\n    hparams.learning_rate_schedule = 'constant*linear_warmup*single_cycle_cos_decay*rsqrt_hidden_size'\n    hparams.learning_rate_decay_steps = 250000\n    return hparams", "docstring": "Add Evolved Transformer hparams.\n\nNote: These are for the Adam optimizer, not the Adafactor optimizer used in\nthe paper.\n\nArgs:\nhparams: Current hparams.\n\nReturns:\nhparams updated with Evolved Transformer values.", "source": "codesearchnet"}
{"code": "def device(device_name_or_function) -> ContextManager[None]:\n    if context.executing_eagerly():\n        if callable(device_name_or_function):\n            raise RuntimeError('tf.device does not support functions when eager execution is enabled.')\n        return context.device(device_name_or_function)\n    elif executing_eagerly_outside_functions():\n\n        @tf_contextlib.contextmanager\n        def combined(device_name_or_function):\n            with get_default_graph().device(device_name_or_function):\n                if not callable(device_name_or_function):\n                    with context.device(device_name_or_function):\n                        yield\n                else:\n                    yield\n        return combined(device_name_or_function)\n    else:\n        return get_default_graph().device(device_name_or_function)", "docstring": "Wrapper for `Graph.device()` using the default graph.\n\nSee `tf.Graph.device` for more details.\n\nArgs:\ndevice_name_or_function: The device name or function to use in the context.\n\nReturns:\nA context manager that specifies the default device to use for newly\ncreated ops.\n\nRaises:\nRuntimeError: If eager execution is enabled and a function is passed in.", "source": "github-repos"}
{"code": "def _parse_parameters(val_type, val):\n    \n    if val_type == \"logical\":\n        return val == \"T\"\n    elif val_type == \"int\":\n        return int(val)\n    elif val_type == \"string\":\n        return val.strip()\n    else:\n        return float(val)", "docstring": "Helper function to convert a Vasprun parameter into the proper type.\nBoolean, int and float types are converted.\n\nArgs:\nval_type: Value type parsed from vasprun.xml.\nval: Actual string value parsed for vasprun.xml.", "source": "juraj-google-style"}
{"code": "def get_resource_group(access_token, subscription_id, rgname):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', rgname, '?api-version=', RESOURCE_API])\n    return do_get(endpoint, access_token)", "docstring": "Get details about the named resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def end_parallel(self):\n    outport = self.oport\n    if isinstance(self.oport.operator, streamsx.topology.graph.Marker):\n        if (self.oport.operator.kind == '$Union$'):\n            pto = self.topology.graph.addPassThruOperator()\n            pto.addInputPort(outputPort=self.oport)\n            outport = pto.addOutputPort(schema=self.oport.schema)\n    op = self.topology.graph.addOperator('$EndParallel$')\n    op.addInputPort(outputPort=outport)\n    oport = op.addOutputPort(schema=self.oport.schema)\n    endP = Stream(self.topology, oport)\n    return endP", "docstring": "Ends a parallel region by merging the channels into a single stream.\n\nReturns:\nStream: Stream for which subsequent transformations are no longer parallelized.\n\n.. seealso:: :py:meth:`set_parallel`, :py:meth:`parallel`", "source": "codesearchnet"}
{"code": "def unindent(lines):\n    unindented_lines = []\n    for line in lines:\n        unindented_line = line.lstrip()\n        indent = (len(line) - len(unindented_line))\n        unindented_lines.append((indent, unindented_line))\n    return unindented_lines", "docstring": "Convert an iterable of indented lines into a sequence of tuples.\n\nThe first element of each tuple is the indent in number of characters, and\nthe second element is the unindented string.\n\nArgs:\nlines: A sequence of strings representing the lines of text in a docstring.\n\nReturns:\nA list of tuples where each tuple corresponds to one line of the input\nlist. Each tuple has two entries - the first is an integer giving the\nsize of the indent in characters, the second is the unindented text.", "source": "codesearchnet"}
{"code": "def _set_c_attrs(self, attrs):\n    for name, attr_value in attrs.items():\n        serialized = attr_value.SerializeToString()\n        with self._c_func.get() as func:\n            c_api.TF_FunctionSetAttrValueProto(func, compat.as_str(name), serialized)", "docstring": "Sets `attrs` as attributes of self._c_func.\n\nRequires that self._c_func is not None.\n\nArgs:\nattrs: a dictionary from attribute name to attribute proto value", "source": "github-repos"}
{"code": "def body(self, body):\n    if isinstance(body, bytes):\n        body = body.decode('utf-8')\n    self._body = body", "docstring": "Defines response body data.\n\nArguments:\nbody (str|bytes): response body to use.\n\nReturns:\nself: ``pook.Response`` current instance.", "source": "codesearchnet"}
{"code": "def add_rec_new(self, k, val):\n        \n        self.rec_new(val)\n        self[k] = val\n        return val", "docstring": "Recursively add a new value and its children to me, and assign a\nvariable to it.\n\nArgs:\nk (str): The name of the variable to assign.\nval (LispVal): The value to be added and assigned.\n\nReturns:\nLispVal: The added value.", "source": "juraj-google-style"}
{"code": "def add_subscriber(self, connection_id, subscriptions, last_known_block_id):\n    with self._subscribers_cv:\n        self._subscribers[connection_id] = EventSubscriber(connection_id, subscriptions, last_known_block_id)\n    LOGGER.debug('Added Subscriber %s for %s', connection_id, subscriptions)", "docstring": "Register the subscriber for the given event subscriptions.\n\nRaises:\nInvalidFilterError\nOne of the filters in the subscriptions is invalid.", "source": "codesearchnet"}
{"code": "def download_file(self, url):\n    response = requests.get(url, stream=True)\n    response.raise_for_status()\n    return (int(response.headers.get('content-length', 0)), response)", "docstring": "Initiate a streaming download\n\nArgs:\nurl (str): The url to download\n\nReturns:\nA tuple of the content length and the streaming response", "source": "codesearchnet"}
{"code": "def _resolve_if_choice_type(fhir_message: message.Message) -> Optional[message.Message]:\n    if annotation_utils.is_choice_type(fhir_message):\n        choice_field = fhir_message.WhichOneof('choice')\n        if choice_field is None:\n            return None\n        return cast(message.Message, proto_utils.get_value_at_field(fhir_message, choice_field))\n    return fhir_message", "docstring": "Resolve to the proper field if given a choice type, return as-is if not.\n\nEach value in a FHIR choice type is a different field on the protobuf\nrepresentation wrapped under a proto onoeof field.  Therefore, if\nan expression points to a choice type, we should return the populated\nfield -- while just returning the field as-is for non-choice types. This\nway we can simply pass nested messages through this class, and return the\npopulated item when appropriate.\n\nArgs:\nfhir_message: the evaluation result which may or may not be a choice type\n\nReturns:\nThe result value, resolved to the sub-field if it is a choice type.", "source": "github-repos"}
{"code": "def sget_voltage(self, cycle, step, set_number=None):\n    time_00 = time.time()\n    set_number = self._validate_dataset_number(set_number)\n    if (set_number is None):\n        self._report_empty_dataset()\n        return\n    cycle_index_header = self.headers_normal.cycle_index_txt\n    voltage_header = self.headers_normal.voltage_txt\n    step_index_header = self.headers_normal.step_index_txt\n    test = self.datasets[set_number].dfdata\n    if isinstance(step, (list, tuple)):\n        warnings.warn(f'The varialbe step is a list.Should be an integer.{step}')\n        step = step[0]\n    c = test[((test[cycle_index_header] == cycle) & (test[step_index_header] == step))]\n    self.logger.debug(f'(dt: {(time.time() - time_00):4.2f}s)')\n    if (not self.is_empty(c)):\n        v = c[voltage_header]\n        return v\n    else:\n        return None", "docstring": "Returns voltage for cycle, step.\n\nConvinience function; same as issuing\ndfdata[(dfdata[cycle_index_header] == cycle) &\n(dfdata[step_index_header] == step)][voltage_header]\n\nArgs:\ncycle: cycle number\nstep: step number\nset_number: the dataset number (automatic selection if None)\n\nReturns:\npandas.Series or None if empty", "source": "codesearchnet"}
{"code": "def _evalDecodeJpeg(self, image_name, parallelism, num_iters, crop_during_decode=None, crop_window=None, tile=None):\n    ops.reset_default_graph()\n    image_file_path = resource_loader.get_path_to_datafile(os.path.join('core', 'lib', 'jpeg', 'testdata', image_name))\n    if not os.path.exists(image_file_path):\n        image_file_path = resource_loader.get_path_to_datafile(os.path.join('..', '..', 'core', 'lib', 'jpeg', 'testdata', image_name))\n    if tile is None:\n        image_content = variable_scope.get_variable('image_%s' % image_name, initializer=io_ops.read_file(image_file_path))\n    else:\n        single_image = image_ops.decode_jpeg(io_ops.read_file(image_file_path), channels=3, name='single_image')\n        tiled_image = array_ops.tile(single_image, tile)\n        image_content = variable_scope.get_variable('tiled_image_%s' % image_name, initializer=image_ops.encode_jpeg(tiled_image))\n    with session.Session() as sess:\n        self.evaluate(variables.global_variables_initializer())\n        images = []\n        for _ in range(parallelism):\n            if crop_window is None:\n                image = image_ops.decode_jpeg(image_content, channels=3)\n            elif crop_during_decode:\n                image = image_ops.decode_and_crop_jpeg(image_content, crop_window, channels=3)\n            else:\n                image = image_ops.decode_jpeg(image_content, channels=3)\n                image = image_ops.crop_to_bounding_box(image, offset_height=crop_window[0], offset_width=crop_window[1], target_height=crop_window[2], target_width=crop_window[3])\n            images.append(image)\n        r = control_flow_ops.group(*images)\n        for _ in range(3):\n            self.evaluate(r)\n        start_time = time.time()\n        for _ in range(num_iters):\n            self.evaluate(r)\n        end_time = time.time()\n    return end_time - start_time", "docstring": "Evaluate DecodeJpegOp for the given image.\n\nTODO(tanmingxing): add decoding+cropping as well.\n\nArgs:\nimage_name: a string of image file name (without suffix).\nparallelism: the number of concurrent decode_jpeg ops to be run.\nnum_iters: number of iterations for evaluation.\ncrop_during_decode: If true, use fused DecodeAndCropJpeg instead of\nseparate decode and crop ops. It is ignored if crop_window is None.\ncrop_window: if not None, crop the decoded image. Depending on\ncrop_during_decode, cropping could happen during or after decoding.\ntile: if not None, tile the image to composite a larger fake image.\n\nReturns:\nThe duration of the run in seconds.", "source": "github-repos"}
{"code": "def write_to(self, content, content_type):\n    try:\n        self._api.object_upload(self._bucket, self._key, content, content_type)\n    except Exception as e:\n        raise e", "docstring": "Writes text content to this item.\n\nArgs:\ncontent: the text content to be written.\ncontent_type: the type of text content.\nRaises:\nException if there was an error requesting the item's content.", "source": "codesearchnet"}
{"code": "def stage_signature(vcs, signature):\n    \n    evidence_path = _get_staged_history_path(vcs)\n    staged = get_staged_signatures(vcs)\n    if signature in staged:\n        raise AlreadyStagedError\n    staged.append(signature)\n    string = '\\n'.join(staged)\n    with open(evidence_path, 'w') as f:\n        f.write(string)", "docstring": "Add `signature` to the list of staged signatures\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\nsignature (basestring)\n\nRaises:\nAlreadyStagedError", "source": "juraj-google-style"}
{"code": "def _to_reader_home(self):\n    self.switch_to_default_content()\n    self.get(_KindleCloudReaderBrowser._CLOUD_READER_URL)\n    if (self.title == u'Problem loading page'):\n        raise ConnectionError\n    login_or_reader_loaded = (lambda br: (br.find_elements_by_id('amzn_kcr') or br.find_elements_by_id('KindleLibraryIFrame')))\n    self._wait(5).until(login_or_reader_loaded)\n    try:\n        self._wait(5).until((lambda br: (br.title == u'Amazon.com Sign In')))\n    except TimeoutException:\n        raise BrowserError('Failed to load Kindle Cloud Reader.')\n    else:\n        self._login()", "docstring": "Navigate to the Cloud Reader library page.\n\nRaises:\nBrowserError: If the KCR homepage could not be loaded.\nConnectionError: If there was a connection error.", "source": "codesearchnet"}
{"code": "def run(self,\n            env: env_tools.PreparedEnv,\n            verbose: bool,\n            previous_failures: Set['Check']) -> CheckResult:\n        \n\n        \n        if previous_failures.intersection(self.dependencies):\n            print(shell_tools.highlight(\n                'Skipped ' + self.command_line_switch(),\n                shell_tools.YELLOW))\n            return CheckResult(\n                self, False, 'Skipped due to dependency failing.', None)\n\n        print(shell_tools.highlight(\n            'Running ' + self.command_line_switch(),\n            shell_tools.GREEN))\n        try:\n            success, message = self.perform_check(env, verbose=verbose)\n            result = CheckResult(self, success, message, None)\n        except Exception as ex:\n            result = CheckResult(self, False, 'Unexpected error.', ex)\n\n        print(shell_tools.highlight(\n            'Finished ' + self.command_line_switch(),\n            shell_tools.GREEN if result.success else shell_tools.RED))\n        if verbose:\n            print(result)\n\n        return result", "docstring": "Evaluates this check.\n\nArgs:\nenv: The prepared python environment to run the check in.\nverbose: When set, more progress output is produced.\nprevious_failures: Checks that have already run and failed.\n\nReturns:\nA CheckResult instance.", "source": "juraj-google-style"}
{"code": "def recipe_cm360_to_dv360(config, auth_dv, auth_cm, auth_sheet, auth_bigquery, recipe_name, recipe_slug, command):\n    dataset(config, {'__comment__': 'Ensure dataset exists.', 'auth': auth_bigquery, 'dataset': recipe_slug})\n    drive(config, {'__comment__': 'Copy the default template to sheet with the recipe name', 'auth': auth_sheet, 'copy': {'source': 'https:\n    cm_to_dv(config, {'__comment': 'Depending on users choice, execute a different part of the solution.', 'auth_dv': auth_dv, 'auth_cm': auth_cm, 'auth_sheets': auth_sheet, 'auth_bigquery': auth_bigquery, 'sheet': recipe_name, 'dataset': recipe_slug, 'command': command})", "docstring": "Allows bulk creating DV360 Insertion Orders and Line Items from CM360.\n\nArgs:\nauth_dv (authentication) - Credentials used for dv.\nauth_cm (authentication) - Credentials used for dv.\nauth_sheet (authentication) - Credentials used for sheet.\nauth_bigquery (authentication) - Credentials used for bigquery.\nrecipe_name (string) - Name of Google Sheet to create.\nrecipe_slug (string) - Name of Google BigQuery dataset to create.\ncommand (choice) - Action to take.", "source": "github-repos"}
{"code": "def plot_script_validate(self, script):\n        \n\n        script.plot_validate([self.matplotlibwidget_1.figure, self.matplotlibwidget_2.figure])\n        self.matplotlibwidget_1.draw()\n        self.matplotlibwidget_2.draw()", "docstring": "checks the plottype of the script and plots it accordingly\nArgs:\nscript: script to be plotted", "source": "juraj-google-style"}
{"code": "def request(self, method_name: str, *args: Any, trim_log_values: bool=False, validate_against_schema: bool=True, id_generator: Optional[Iterator]=None, **kwargs: Any) -> Response:\n    return self.send(Request(method_name, *args, id_generator=id_generator, **kwargs), trim_log_values=trim_log_values, validate_against_schema=validate_against_schema)", "docstring": "Send a request by passing the method and arguments.\n\n>>> client.request(\"cat\", name=\"Yoko\")\n<Response[1]\n\nArgs:\nmethod_name: The remote procedure's method name.\nargs: Positional arguments passed to the remote procedure.\nkwargs: Keyword arguments passed to the remote procedure.\ntrim_log_values: Abbreviate the log entries of requests and responses.\nvalidate_against_schema: Validate response against the JSON-RPC schema.\nid_generator: Iterable of values to use as the \"id\" part of the request.", "source": "codesearchnet"}
{"code": "def recommendations(self, **kwargs):\n        \n        path = self._get_id_path('recommendations')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get a list of recommended movies for a movie.\n\nArgs:\nlanguage: (optional) ISO 639-1 code.\npage: (optional) Minimum value of 1.  Expected value is an integer.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def calculate_stress(self, strain):\n    strain = np.array(strain)\n    if (strain.shape == (6,)):\n        strain = Strain.from_voigt(strain)\n    assert (strain.shape == (3, 3)), 'Strain must be 3x3 or voigt-notation'\n    stress_matrix = (self.einsum_sequence(([strain] * (self.order - 1))) / factorial((self.order - 1)))\n    return Stress(stress_matrix)", "docstring": "Calculate's a given elastic tensor's contribution to the\nstress using Einstein summation\n\nArgs:\nstrain (3x3 array-like): matrix corresponding to strain", "source": "codesearchnet"}
{"code": "def _get_music_services_data_xml(soco=None):\n    device = (soco or discovery.any_soco())\n    log.debug('Fetching music services data from %s', device)\n    available_services = device.musicServices.ListAvailableServices()\n    descriptor_list_xml = available_services['AvailableServiceDescriptorList']\n    log.debug('Services descriptor list: %s', descriptor_list_xml)\n    return descriptor_list_xml", "docstring": "Fetch the music services data xml from a Sonos device.\n\nArgs:\nsoco (SoCo): a SoCo instance to query. If none is specified, a\nrandom device will be used. Defaults to `None`.\n\nReturns:\nstr: a string containing the music services data xml", "source": "codesearchnet"}
{"code": "def update_defaults(self, new_defaults, respect_none=False):\n    for (key, value) in six.iteritems(new_defaults):\n        item = self.get_item(key)\n        if (item is None):\n            raise YapconfItemNotFound('Cannot update default for {0}, there is no config item by the name of {1}'.format(key, key), None)\n        item.update_default(value, respect_none)", "docstring": "Update items defaults to the values in the new_defaults dict.\n\nArgs:\nnew_defaults (dict): A key-value pair of new defaults to be\napplied.\nrespect_none (bool): Flag to indicate if ``None`` values should\nconstitute an update to the default.", "source": "codesearchnet"}
{"code": "def _convert_to_wakat_format(seeder_struct):\n\n    def pick_active(seeder_struct, what):\n        '\\n        From the list of dicts, choose only first of such, that contains\\n        ``\"active\": True`` item.\\n\\n        If not found, just pick the first.\\n\\n        Args:\\n            seeder_struct (dict): Dict with bunch of data.\\n            what (str): What key to use in `seeder_struct` to identify the\\n                list of dicts.\\n\\n        Returns:\\n            dict: Active or first dict.\\n        '\n        items = seeder_struct.get(what)\n        if (not items):\n            return None\n        if (not (isinstance(items, list) or isinstance(items, tuple))):\n            items = [items]\n        active_items = [item for item in items if item.get('active')]\n        if (not active_items):\n            return items[0]\n        return active_items[0]\n    if (not seeder_struct):\n        return None\n    active_seed = pick_active(seeder_struct, 'seeds')\n    publisher_contact = pick_active(seeder_struct.get('publisher', {}), 'contacts')\n    if (not active_seed):\n        active_seed = pick_active(seeder_struct, 'seed')\n        if (not active_seed):\n            return None\n    model = Model()\n    model.url = active_seed['url']\n    model.issn = seeder_struct.get('issn')\n    model.title_tags = seeder_struct.get('name')\n    model.publisher_tags = seeder_struct.get('publisher', {}).get('name')\n    model.annotation_tags = seeder_struct.get('comment')\n    if publisher_contact:\n        model.place_tags = publisher_contact.get('address')\n    rules = {}\n    rules['frequency'] = str(seeder_struct.get('frequency'))\n    _add_if_set(rules, 'budget', active_seed.get('budget'))\n    _add_if_set(rules, 'youtube', active_seed.get('youtube'))\n    _add_if_set(rules, 'calendars', active_seed.get('calendars'))\n    _add_if_set(rules, 'javascript', active_seed.get('javascript'))\n    _add_if_set(rules, 'local_traps', active_seed.get('local_traps'))\n    _add_if_set(rules, 'gentle_fetch', active_seed.get('gentle_fetch'))\n    _add_if_set(rules, 'global_reject', active_seed.get('global_reject'))\n    model.rules = rules\n    for key in model.keys():\n        val = getattr(model, key)\n        if (val and ('tags' in key)):\n            setattr(model, key, [{'val': val, 'source': 'Seeder'}])\n    return model.get_mapping()", "docstring": "Convert Seeder's structure to the internal structure used at frontend.\n\nArgs:,\nseeder_struct (dict): Dictionary with Seeder data.\n\nReturns:\nobj: :class:`Model`.", "source": "codesearchnet"}
{"code": "def mutate_list(self, dna_list: List[pg.DNA], global_state: pg.geno.AttributeDict, step: int=0) -> List[pg.DNA]:\n    results = []\n    for dna in dna_list:\n        output = self._mutate(dna, global_state=global_state, step=step)\n        if isinstance(output, list):\n            results.extend(output)\n        else:\n            results.append(output)\n    return results", "docstring": "Mutate the DNA in the input one by one and concatenate their outputs.\n\nUser should override this method instead of `mutate` if mutation depends on\nthe list-wise information. Keyword arguments `global_state` and `step` are\noptional when override.\n\nArgs:\ndna_list: a list of DNA to mutate.\nglobal_state: An `AttributeDict` object as the container of global states.\nstep: Number of examples historically proposed, which can be used for\ndetermining a mutation schedule.\n\nReturns:\na list of DNA as the result of the mutation.", "source": "github-repos"}
{"code": "def _modeIsValid(self, mode):\n        \n        try:\n            \n            return mode in self.modes.keys()\n        except AttributeError as e:\n            \n            if mode in self.isValidMode.keys():\n                if mode in self.isValidMode.keys():\n                    return True\n        return False", "docstring": "Verification of whether the mode is a correct option to be used.\n\nArgs:\n-----\nmode: Mode to be executed.\n\nReturn:\n-------\nTrue if the mode exists in the three main folders.", "source": "juraj-google-style"}
{"code": "class NougatProcessor(ProcessorMixin):\n    attributes = ['image_processor', 'tokenizer']\n    image_processor_class = 'AutoImageProcessor'\n    tokenizer_class = 'AutoTokenizer'\n\n    def __init__(self, image_processor, tokenizer):\n        super().__init__(image_processor, tokenizer)\n        self.current_processor = self.image_processor\n\n    def __call__(self, images=None, text=None, do_crop_margin: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: 'PILImageResampling'=None, do_thumbnail: Optional[bool]=None, do_align_long_axis: Optional[bool]=None, do_pad: Optional[bool]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[Union[int, float]]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, data_format: Optional['ChannelDimension']='channels_first', input_data_format: Optional[Union[str, 'ChannelDimension']]=None, text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]]=None, text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, text_pair_target: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True):\n        if images is None and text is None:\n            raise ValueError('You need to specify either an `images` or `text` input to process.')\n        if images is not None:\n            inputs = self.image_processor(images, do_crop_margin=do_crop_margin, do_resize=do_resize, size=size, resample=resample, do_thumbnail=do_thumbnail, do_align_long_axis=do_align_long_axis, do_pad=do_pad, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, return_tensors=return_tensors, data_format=data_format, input_data_format=input_data_format)\n        if text is not None:\n            encodings = self.tokenizer(text, text_pair=text_pair, text_target=text_target, text_pair_target=text_pair_target, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose)\n        if text is None:\n            return inputs\n        elif images is None:\n            return encodings\n        else:\n            inputs['labels'] = encodings['input_ids']\n            return inputs\n\n    def batch_decode(self, *args, **kwargs):\n        \n        return self.tokenizer.batch_decode(*args, **kwargs)\n\n    def decode(self, *args, **kwargs):\n        \n        return self.tokenizer.decode(*args, **kwargs)\n\n    def post_process_generation(self, *args, **kwargs):\n        \n        return self.tokenizer.post_process_generation(*args, **kwargs)", "docstring": "Constructs a Nougat processor which wraps a Nougat image processor and a Nougat tokenizer into a single processor.\n\n[`NougatProcessor`] offers all the functionalities of [`NougatImageProcessor`] and [`NougatTokenizerFast`]. See the\n[`~NougatProcessor.__call__`] and [`~NougatProcessor.decode`] for more information.\n\nArgs:\nimage_processor ([`NougatImageProcessor`]):\nAn instance of [`NougatImageProcessor`]. The image processor is a required input.\ntokenizer ([`NougatTokenizerFast`]):\nAn instance of [`NougatTokenizerFast`]. The tokenizer is a required input.", "source": "github-repos"}
{"code": "def sigmoid_cross_entropy_loss(inputs: torch.Tensor, labels: torch.Tensor, num_masks: int) -> torch.Tensor:\n    criterion = nn.BCEWithLogitsLoss(reduction='none')\n    cross_entropy_loss = criterion(inputs, labels)\n    loss = cross_entropy_loss.mean(1).sum() / num_masks\n    return loss", "docstring": "Args:\ninputs (`torch.Tensor`):\nA float tensor of arbitrary shape.\nlabels (`torch.Tensor`):\nA tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs\n(0 for the negative class and 1 for the positive class).\n\nReturns:\nloss (`torch.Tensor`): The computed loss.", "source": "github-repos"}
{"code": "def stage_tc_indicator_entity(self, indicator_data):\n        \n        path = '@.{value: summary, '\n        path += 'type: type, '\n        path += 'ownerName: ownerName, '\n        path += 'confidence: confidence || `0`, '\n        path += 'rating: rating || `0`}'\n        return self.path_data(indicator_data, path)", "docstring": "Convert JSON data to TCEntity.\n\nArgs:\nindicator_data (str): [description]\n\nReturns:\n[type]: [description]", "source": "juraj-google-style"}
{"code": "def _transform_col(self, x, i):\n        \n        return x.fillna(NAN_INT).map(self.target_encoders[i]).fillna(self.target_mean)", "docstring": "Encode one categorical column into average target values.\nArgs:\nx (pandas.Series): a categorical column to encode\ni (int): column index\nReturns:\nx (pandas.Series): a column with labels.", "source": "juraj-google-style"}
{"code": "def allows_latest(self, version_key_name):\n    \n    if not self.version_keys().has_key(version_key_name):\n      raise RuntimeError(\"service registry doesn't have a version key entry for: {}\".format(version_key_name))\n    if not self.version_keys()[version_key_name].has_key(\"allow_latest\"):\n      raise RuntimeError(\"service registry key {} doesn't have an 'allow_latest' value\".format(\n        version_key_name))\n    return self.version_keys()[version_key_name][\"allow_latest\"]", "docstring": "Does this version key allow 'latest' as an option (e.g. \"latest AMI\" makes sense and is allowed)\nArgs:\nversion_key_name: the version key to check for \"allow_latest\"\nReturns:\nTrue if the version key allows latest, False if it does not\nRaises:\nValueError if the key was not found", "source": "juraj-google-style"}
{"code": "def bounds(self, thr=0, lower_index=0, upper_index=(- 1)):\n    points = self.points[lower_index:upper_index]\n    min_lat = float('inf')\n    min_lon = float('inf')\n    max_lat = (- float('inf'))\n    max_lon = (- float('inf'))\n    for point in points:\n        min_lat = min(min_lat, point.lat)\n        min_lon = min(min_lon, point.lon)\n        max_lat = max(max_lat, point.lat)\n        max_lon = max(max_lon, point.lon)\n    return ((min_lat - thr), (min_lon - thr), (max_lat + thr), (max_lon + thr))", "docstring": "Computes the bounds of the segment, or part of it\n\nArgs:\nlower_index (int, optional): Start index. Defaults to 0\nupper_index (int, optional): End index. Defaults to 0\nReturns:\n:obj:`tuple` of :obj:`float`: Bounds of the (sub)segment, such that\n(min_lat, min_lon, max_lat, max_lon)", "source": "codesearchnet"}
{"code": "def create_indexes(names, settings=None):\n    \n    for name in names:\n        index = Index(name)\n        try:\n            if not index.exists():\n                logger.debug(\"Creating Elasticsearch index: {0}\".format(name))\n                if settings is None:\n                    index.settings(number_of_shards=1,\n                                   number_of_replicas=1)\n                else:\n                    index.settings(**settings)\n                index.create()\n        except Exception as e:\n            raise ElasticsearchError(\n                \"Elasticsearch error: {0}\".format(e.__str__()))", "docstring": "Create Elasticsearch indexes\n\nArgs:\nnames (list): A list of index names\nsettings (dict): Index settings", "source": "juraj-google-style"}
{"code": "def install_json_output_variables(self, ij=None):\n        \n        if self._install_json_output_variables is None or ij is not None:\n            self._install_json_output_variables = {}\n            \n            if ij is None:\n                ij = self.install_json\n            for p in ij.get('playbook', {}).get('outputVariables') or []:\n                self._install_json_output_variables.setdefault(p.get('name'), []).append(p)\n        return self._install_json_output_variables", "docstring": "Return install.json output variables in a dict with name param as key.\n\nArgs:\nij (dict, optional): Defaults to None. The install.json contents.\n\nReturns:\ndict: A dictionary containing the install.json output variables with name as key.", "source": "juraj-google-style"}
{"code": "def save_image(byteio, imgfmt):\n    \n    from os import path, mkdir\n    ptdir = \"{}.{}\".format(project, task)\n    uuid = str(uuid4())\n\n    \n    idir = path.join(dbdir, ptdir)\n    if not path.isdir(idir):\n        mkdir(idir)\n        \n    ipath = path.join(idir, \"{}.{}\".format(uuid, imgfmt))\n    with open(ipath, 'wb') as f:\n        f.write(byteio)\n\n    return uuid", "docstring": "Saves the specified image to disk.\n\nArgs:\nbyteio (bytes): image bytes to save to disk.\nimgfmt (str): used as the extension of the saved file.\n\nReturns:\nstr: a uuid for the saved image that can be added to the database entry.", "source": "juraj-google-style"}
{"code": "def mean_squared_error(true, pred):\n  \n  result = tf.reduce_sum(\n      tf.squared_difference(true, pred)) / tf.to_float(tf.size(pred))\n  return result", "docstring": "L2 distance between tensors true and pred.\n\nArgs:\ntrue: the ground truth image.\npred: the predicted image.\nReturns:\nmean squared error between ground truth and predicted image.", "source": "juraj-google-style"}
{"code": "def check_oversized_pickle(pickled, name, obj_type, worker):\n    length = len(pickled)\n    if (length <= ray_constants.PICKLE_OBJECT_WARNING_SIZE):\n        return\n    warning_message = 'Warning: The {} {} has size {} when pickled. It will be stored in Redis, which could cause memory issues. This may mean that its definition uses a large array or other object.'.format(obj_type, name, length)\n    push_error_to_driver(worker, ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, warning_message, driver_id=worker.task_driver_id)", "docstring": "Send a warning message if the pickled object is too large.\n\nArgs:\npickled: the pickled object.\nname: name of the pickled object.\nobj_type: type of the pickled object, can be 'function',\n'remote function', 'actor', or 'object'.\nworker: the worker used to send warning message.", "source": "codesearchnet"}
{"code": "def _detect(self):\n    results = []\n    for c in self.contracts:\n        functions = IncorrectERC20InterfaceDetection.detect_incorrect_erc20_interface(c)\n        if functions:\n            info = '{} ({}) has incorrect ERC20 function interface(s):\\n'\n            info = info.format(c.name, c.source_mapping_str)\n            for function in functions:\n                info += '\\t-{} ({})\\n'.format(function.name, function.source_mapping_str)\n            json = self.generate_json_result(info)\n            self.add_functions_to_json(functions, json)\n            results.append(json)\n    return results", "docstring": "Detect incorrect erc20 interface\n\nReturns:\ndict: [contrat name] = set(str)  events", "source": "codesearchnet"}
{"code": "def parse_args(test: ArgList=None) -> argparse.Namespace:\n    parser = argparse.ArgumentParser(description=__doc__)\n    parser.add_argument('source_data', help='File path of the source training data to extract features.')\n    parser.add_argument('-o', '--outfile', help='Output file path for the encoded training data.\\n            (default: encoded_data.txt)', default=DEFAULT_OUTPUT_FILENAME)\n    parser.add_argument('--processes', type=int, help='Number of processes to use.\\n          (default: the number of CPUs in the system)', default=None)\n    parser.add_argument('--scale', type=int, help='Weight scale for the entries. The value should be a unsigned\\n         integer. (default: 1)', default=1)\n    if test is None:\n        return parser.parse_args()\n    else:\n        return parser.parse_args(test)", "docstring": "Parses commandline arguments.\n\nArgs:\ntest (typing.Optional[typing.List[str]], optional): Commandline args for testing. Defaults to None.\n\nReturns:\nargparse.Namespace: Parsed data of args.", "source": "github-repos"}
{"code": "def set_np_doc_form(value):\n    global _np_doc_form\n    _np_doc_form = value", "docstring": "Selects the form of the original numpy docstrings.\n\nThis function sets a global variable that controls how a tf-numpy symbol's\ndocstring should refer to the original numpy docstring. If `value` is\n`'inlined'`, the numpy docstring will be verbatim copied into the tf-numpy\ndocstring. Otherwise, a link to the original numpy docstring will be\nadded. Which numpy version the link points to depends on `value`:\n* `'stable'`: the current stable version;\n* `'dev'`: the current development version;\n* pattern `\\d+(\\.\\d+(\\.\\d+)?)?`: `value` will be treated as a version number,\ne.g. '1.16'.\n\nArgs:\nvalue: the value to set the global variable to.", "source": "github-repos"}
{"code": "async def _get_socket_url(self):\n    data = (await self.api.execute_method(self.RTM_START_ENDPOINT, simple_latest=True, no_unreads=True))\n    return data['url']", "docstring": "Get the WebSocket URL for the RTM session.\n\nWarning:\nThe URL expires if the session is not joined within 30\nseconds of the API call to the start endpoint.\n\nReturns:\n:py:class:`str`: The socket URL.", "source": "codesearchnet"}
{"code": "def put(self, dash_id=0):\n        \n        data = request.get_json()\n        updated = self._update_dash(dash_id, data)\n        return build_response(dict(data=updated, code=200))", "docstring": "Update a dash meta and content, return updated dash content.\n\nArgs:\ndash_id: dashboard id.\n\nReturns:\nA dict containing the updated content of that dashboard, not include the meta info.", "source": "juraj-google-style"}
{"code": "def apply(self, *args, **kwargs):\n    arglist = list(args)\n    arglist.insert(1, self)\n    return self.pipeline.apply(*arglist, **kwargs)", "docstring": "Applies a transform or callable to a PValue.\n\nArgs:\n*args: positional arguments.\n**kwargs: keyword arguments.\n\nThe method will insert the pvalue as the next argument following an\noptional first label and a transform/callable object. It will call the\npipeline.apply() method with this modified argument list.", "source": "github-repos"}
{"code": "def ListThreads(self):\n    if self.inferior.is_running:\n        return self.inferior.threads\n    logging.error('Not attached to any process.')\n    return []", "docstring": "List the currently running python threads.\n\nReturns:\nA list of the inferior's thread idents, or None if the debugger is not\nattached to any process.", "source": "codesearchnet"}
{"code": "def elmo_loss2ppl(losses: List[np.ndarray]) -> float:\n    \n    avg_loss = np.mean(losses)\n    return float(np.exp(avg_loss))", "docstring": "Calculates perplexity by loss\n\nArgs:\nlosses: list of numpy arrays of model losses\n\nReturns:\nperplexity : float", "source": "juraj-google-style"}
{"code": "def add_item_to_sonos_playlist(self, queueable_item, sonos_playlist):\n    (response, _) = self.music_library._music_lib_search(sonos_playlist.item_id, 0, 1)\n    update_id = response['UpdateID']\n    metadata = to_didl_string(queueable_item)\n    self.avTransport.AddURIToSavedQueue([('InstanceID', 0), ('UpdateID', update_id), ('ObjectID', sonos_playlist.item_id), ('EnqueuedURI', queueable_item.resources[0].uri), ('EnqueuedURIMetaData', metadata), ('AddAtIndex', 4294967295)])", "docstring": "Adds a queueable item to a Sonos' playlist.\n\nArgs:\nqueueable_item (DidlObject): the item to add to the Sonos' playlist\nsonos_playlist (DidlPlaylistContainer): the Sonos' playlist to\nwhich the item should be added", "source": "codesearchnet"}
{"code": "def map_structprop_resnums_to_seqprop_resnums(self, resnums, structprop=None, chain_id=None, seqprop=None, use_representatives=False):\n    resnums = ssbio.utils.force_list(resnums)\n    if use_representatives:\n        seqprop = self.representative_sequence\n        structprop = self.representative_structure\n        chain_id = self.representative_chain\n        if (not structprop):\n            raise ValueError('No representative structure set, please specify sequence, structure, and chain ID')\n    elif ((not seqprop) or (not structprop) or (not chain_id)):\n        raise ValueError('Please specify sequence, structure, and chain ID')\n    if (structprop.id == self.representative_structure.id):\n        full_structure_id = '{}-{}'.format(structprop.id, chain_id).replace('REP-', '')\n    else:\n        full_structure_id = '{}-{}'.format(structprop.id, chain_id)\n    aln_id = '{}_{}'.format(seqprop.id, full_structure_id)\n    access_key = '{}_chain_index'.format(aln_id)\n    if (access_key not in seqprop.letter_annotations):\n        raise KeyError('{}: structure mapping {} not available in sequence letter annotations. Was alignment parsed? Run ``align_seqprop_to_structprop`` with ``parse=True``.'.format(access_key, aln_id))\n    chain = structprop.chains.get_by_id(chain_id)\n    chain_structure_resnum_mapping = chain.seq_record.letter_annotations['structure_resnums']\n    final_mapping = {}\n    for resnum in resnums:\n        resnum = int(resnum)\n        resnum_index = chain_structure_resnum_mapping.index(resnum)\n        struct_res_singleaa = structprop.chains.get_by_id(chain_id).seq_record[resnum_index]\n        what = seqprop.letter_annotations[access_key].index((resnum_index + 1))\n        seq_res_singleaa = seqprop[what]\n        sp_resnum = (what + 1)\n        final_mapping[resnum] = sp_resnum\n        format_data = {'seqprop_id': seqprop.id, 'seqprop_resid': seq_res_singleaa, 'seqprop_resnum': sp_resnum, 'structprop_id': structprop.id, 'structprop_chid': chain_id, 'structprop_resid': struct_res_singleaa, 'structprop_resnum': resnum}\n        if (struct_res_singleaa != seq_res_singleaa):\n            log.warning('Sequence {seqprop_id} residue {seqprop_resid}{seqprop_resnum} does not match to structure {structprop_id}-{structprop_chid} residue {structprop_resid}{structprop_resnum}. NOTE: this may be due to structural differences'.format(**format_data))\n        else:\n            log.debug('Sequence {seqprop_id} residue {seqprop_resid}{seqprop_resnum} is mapped to structure {structprop_id}-{structprop_chid} residue {structprop_resid}{structprop_resnum}'.format(**format_data))\n    return final_mapping", "docstring": "Map a residue number in any StructProp + chain ID to any SeqProp's residue number.\n\nArgs:\nresnums (int, list): Residue numbers in the structure\nstructprop (StructProp): StructProp object\nchain_id (str): Chain ID to map from\nseqprop (SeqProp): SeqProp object\nuse_representatives (bool): If the representative sequence and structure should be used. If True, seqprop,\nstructprop, and chain_id do not need to be defined.\n\nReturns:\ndict: Mapping of structure residue numbers to sequence residue numbers", "source": "codesearchnet"}
{"code": "def sg_summary_loss(tensor, prefix='losses', name=None):\n    r\n    \n    prefix = '' if prefix is None else prefix + '/'\n    \n    name = prefix + _pretty_name(tensor) if name is None else prefix + name\n    \n    _scalar(name, tf.reduce_mean(tensor))\n    _histogram(name + '-h', tensor)", "docstring": "r\"\"\"Register `tensor` to summary report as `loss`\n\nArgs:\ntensor: A `Tensor` to log as loss\nprefix: A `string`. A prefix to display in the tensor board web UI.\nname: A `string`. A name to display in the tensor board web UI.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def send_client_cmd(self, data, cmd=None, via_queue=None):\n    mq_channel = self._connect_mq()\n    if cmd:\n        data['cmd'] = cmd\n    if via_queue:\n        mq_channel.basic_publish(exchange='', routing_key=via_queue, body=json.dumps(data))\n    else:\n        mq_channel.basic_publish(exchange=self.prv_exchange, routing_key='', body=json.dumps(data))", "docstring": "Send arbitrary cmd and data to client\n\nif queue name passed by \"via_queue\" parameter,\nthat queue will be used instead of users private exchange.\nArgs:\ndata: dict\ncmd: string\nvia_queue: queue name,", "source": "codesearchnet"}
{"code": "def extract_pivots(self, assignments):\n    raise NotImplementedError()", "docstring": "Find values for every variable that appears in this term.\n\nThis finds all variables that appear in this term and limits them to the\nvalues they appear together with. For example, consider the equation\nt = v1 | (t = v2 & (t = v2 | t = v3))\nHere, t can be limited to [v1, v2]. (v3 is impossible.)\n\nArgs:\nassignments: The current \"upper bound\", i.e. all values that are still\npossible for variables. Used for extracting pivots out of Eq(var, var).\n\nReturns:\nA dictionary mapping strings (variable names) to sets of strings (value\nor variable names).", "source": "github-repos"}
{"code": "def from_scf_input(cls, workdir, scf_input, ph_ngqpt, with_becs=True, manager=None, allocate=True):\n    flow = cls(workdir, manager=manager)\n    flow.register_scf_task(scf_input)\n    scf_task = flow[0][0]\n    (scf_ngkpt, ph_ngqpt) = (np.array(scf_input['ngkpt']), np.array(ph_ngqpt))\n    if any(((scf_ngkpt % ph_ngqpt) != 0)):\n        raise ValueError(('ph_ngqpt %s should be a sub-mesh of scf_ngkpt %s' % (ph_ngqpt, scf_ngkpt)))\n    qpoints = scf_input.abiget_ibz(ngkpt=ph_ngqpt, shiftk=(0, 0, 0), kptopt=1).points\n    for qpt in qpoints:\n        if (np.allclose(qpt, 0) and with_becs):\n            ph_work = BecWork.from_scf_task(scf_task)\n        else:\n            ph_work = PhononWork.from_scf_task(scf_task, qpoints=qpt)\n        flow.register_work(ph_work)\n    if allocate:\n        flow.allocate()\n    return flow", "docstring": "Create a `PhononFlow` for phonon calculations from an `AbinitInput` defining a ground-state run.\n\nArgs:\nworkdir: Working directory of the flow.\nscf_input: :class:`AbinitInput` object with the parameters for the GS-SCF run.\nph_ngqpt: q-mesh for phonons. Must be a sub-mesh of the k-mesh used for\nelectrons. e.g if ngkpt = (8, 8, 8). ph_ngqpt = (4, 4, 4) is a valid choice\nwhereas ph_ngqpt = (3, 3, 3) is not!\nwith_becs: True if Born effective charges are wanted.\nmanager: :class:`TaskManager` object. Read from `manager.yml` if None.\nallocate: True if the flow should be allocated before returning.\n\nReturn:\n:class:`PhononFlow` object.", "source": "codesearchnet"}
{"code": "def get_lowest_decomposition(self, composition):\n        \n\n        entries_list = []\n        elements = [e.symbol for e in composition.elements]\n        for i in range(len(elements)):\n            for combi in itertools.combinations(elements, i + 1):\n                chemsys = [Element(e) for e in combi]\n                x = self.costdb.get_entries(chemsys)\n                entries_list.extend(x)\n        try:\n            pd = PhaseDiagram(entries_list)\n            return pd.get_decomposition(composition)\n        except IndexError:\n            raise ValueError(\"Error during PD building; most likely, \"\n                             \"cost data does not exist!\")", "docstring": "Get the decomposition leading to lowest cost\n\nArgs:\ncomposition:\nComposition as a pymatgen.core.structure.Composition\nReturns:\nDecomposition as a dict of {Entry: amount}", "source": "juraj-google-style"}
{"code": "def _GetResponseClass(self, method_descriptor):\n    if (method_descriptor.containing_service != self.descriptor):\n        raise RuntimeError('GetResponseClass() given method descriptor for wrong service type.')\n    return method_descriptor.output_type._concrete_class", "docstring": "Returns the class of the response protocol message.\n\nArgs:\nmethod_descriptor: Descriptor of the method for which to return the\nresponse protocol message class.\n\nReturns:\nA class that represents the output protocol message of the specified\nmethod.", "source": "codesearchnet"}
{"code": "def linkToChannelInputFile(self, session, channelInputFile, force=False):\n    if ((self.channelInputFile is not None) and (not force)):\n        return\n    self.channelInputFile = channelInputFile\n    orderedLinks = channelInputFile.getOrderedLinks(session)\n    timeSteps = self.timeSteps\n    for timeStep in timeSteps:\n        linkDatasets = timeStep.linkDatasets\n        for (l, linkDataset) in enumerate(linkDatasets):\n            streamLink = orderedLinks[l]\n            streamNodes = streamLink.nodes\n            linkDataset.link = streamLink\n            nodeDatasets = linkDataset.nodeDatasets\n            if ((len(nodeDatasets) > 0) and (len(streamNodes) > 0)):\n                for (n, nodeDataset) in enumerate(nodeDatasets):\n                    nodeDataset.node = streamNodes[n]\n    session.add(self)\n    session.commit()", "docstring": "Create database relationships between the link node dataset and the channel input file.\n\nThe link node dataset only stores references to the links and nodes--not the geometry. The link and node\ngeometries are stored in the channel input file. The two files must be linked with database relationships to\nallow the creation of link node dataset visualizations.\n\nThis process is not performed automatically during reading, because it can be very costly in terms of read time.\nThis operation can only be performed after both files have been read into the database.\n\nArgs:\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database\nchannelInputFile (:class:`gsshapy.orm.ChannelInputFile`): Channel input file object to be associated with\nthis link node dataset file.\nforce (bool, optional): Force channel input file reassignment. When false (default), channel input file\nassignment is skipped if it has already been performed.", "source": "codesearchnet"}
{"code": "def parse(path):\n        \n\n        def paired(iterable):\n            \n            cursor = iter(iterable)\n            return zip(cursor, cursor)\n\n        def unwrap_if_sexp_symbol(datum):\n            \n            return datum.value() if isinstance(datum, sexpdata.Symbol) else datum\n\n        def sexp2dict(sexps):\n            \n            newdict = {}\n\n            \n            for key, value in paired(sexps):\n                key = str(unwrap_if_sexp_symbol(key)).lstrip(':')\n\n                \n                if isinstance(value, list) and value:\n                    if isinstance(value[0], list):\n                        newdict[key] = [sexp2dict(val) for val in value]\n                    elif isinstance(value[0], sexpdata.Symbol):\n                        newdict[key] = sexp2dict(value)\n                    else:\n                        newdict[key] = value\n                else:\n                    newdict[key] = value\n\n            return newdict\n\n        conf = sexpdata.loads(Util.read_file(path))\n        return sexp2dict(conf)", "docstring": "Parse an ``.ensime`` config file from S-expressions.\n\nArgs:\npath (str): Path of an ``.ensime`` file to parse.\n\nReturns:\ndict: Configuration values with string keys.", "source": "juraj-google-style"}
{"code": "def load(fin, dtype=np.float32, max_vocab=None):\n    \n    vocab = {}\n    arr = None\n    i = 0\n    for line in fin:\n        if max_vocab is not None and i >= max_vocab:\n            break\n        try:\n            token, v = _parse_line(line, dtype)\n        except (ValueError, IndexError):\n            raise ParseError(b'Parsing error in line: ' + line)\n        if token in vocab:\n            parse_warn(b'Duplicated vocabulary ' + token)\n            continue\n        if arr is None:\n            arr = np.array(v, dtype=dtype).reshape(1, -1)\n        else:\n            if arr.shape[1] != len(v):\n                raise ParseError(b'Vector size did not match in line: ' + line)\n            arr = np.append(arr, [v], axis=0)\n        vocab[token] = i\n        i += 1\n    return arr, vocab", "docstring": "Load word embedding file.\n\nArgs:\nfin (File): File object to read. File should be open for reading ascii.\ndtype (numpy.dtype): Element data type to use for the array.\nmax_vocab (int): Number of vocabulary to read.\n\nReturns:\nnumpy.ndarray: Word embedding representation vectors\ndict: Mapping from words to vector indices.", "source": "juraj-google-style"}
{"code": "def _get_upload_cmd(self, mirror=False):\n        \n        if mirror:\n            dest_uri = self.s3_mirror_uri\n        else:\n            dest_uri = self.s3_version_uri\n\n        cmd = 'aws s3 sync {} {} --delete --exact-timestamps --profile {}'.format(self.artifact_path,\n                                                                                  dest_uri, self.env)\n        return cmd", "docstring": "Generate the S3 CLI upload command\n\nArgs:\nmirror (bool): If true, uses a flat directory structure instead of nesting under a version.\n\nReturns:\nstr: The full CLI command to run.", "source": "juraj-google-style"}
{"code": "def transformer_decoder_attention_unit(x, hparams, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, attention_dropout_broadcast_dims, save_weights_to=None, make_image_summary=True):\n    with tf.variable_scope('self_attention'):\n        y = common_attention.multihead_attention(common_layers.layer_preprocess(x, hparams), None, decoder_self_attention_bias, (hparams.attention_key_channels or hparams.hidden_size), (hparams.attention_value_channels or hparams.hidden_size), hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, save_weights_to=save_weights_to, max_relative_position=hparams.max_relative_position, cache=None, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, hard_attention_k=hparams.hard_attention_k)\n        x = common_layers.layer_postprocess(x, y, hparams)\n    if (encoder_output is not None):\n        with tf.variable_scope('encdec_attention'):\n            y = common_attention.multihead_attention(common_layers.layer_preprocess(x, hparams), encoder_output, encoder_decoder_attention_bias, (hparams.attention_key_channels or hparams.hidden_size), (hparams.attention_value_channels or hparams.hidden_size), hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, save_weights_to=save_weights_to, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, hard_attention_k=hparams.hard_attention_k)\n            x = common_layers.layer_postprocess(x, y, hparams)\n    return x", "docstring": "Applies multihead attention function which is parametrised for decoding.\n\nArgs:\nx: input (decoder input)\nhparams: model hyper-parameters\nencoder_output: Encoder representation. [batch_size, input_length,\nhidden_dim]\ndecoder_self_attention_bias: Bias and mask weights for decoder\nself-attention. [batch_size, decoder_length]\nencoder_decoder_attention_bias: Bias and mask weights for encoder-decoder\nattention. [batch_size, input_length]\nattention_dropout_broadcast_dims: Fpr noise broadcasting in the dropout\nlayers to save memory during training\nsave_weights_to: an optional dictionary to capture attention weights for\nvisualization; the weights tensor will be appended there under a string\nkey created from the variable scope (including name).\nmake_image_summary: Whether to make an attention image summary.\n\nReturns:\nThe output tensor", "source": "codesearchnet"}
{"code": "def _get_proxy_info(self, _=None):\n        \n        \n        (target_host, target_port, target_path) = self._endpoint_to_target(self._endpoint)\n\n        \n        \n        \n        \n        \n        \n        sock = None\n\n        if target_path:\n            sock = self._ssh_tunnel.forward_unix(path=target_path)\n        else:\n            sock = self._ssh_tunnel.forward_tcp(target_host, port=target_port)\n\n        \n        return SSHTunnelProxyInfo(sock=sock)", "docstring": "Generate a ProxyInfo class from a connected SSH transport\n\nArgs:\n_ (None): Ignored.  This is just here as the ProxyInfo spec requires it.\n\n\nReturns:\nSSHTunnelProxyInfo: A ProxyInfo with an active socket tunneled through SSH", "source": "juraj-google-style"}
{"code": "def setupArgparse():\n    parser = argparse.ArgumentParser()\n    parser.add_argument('callsign', help='Callsign of radio')\n    parser.add_argument('id', type=int, help='ID number radio')\n    parser.add_argument('-l', '--loopback', action='store_true', help='Use software loopback serial port')\n    parser.add_argument('-p', '--port', default='/dev/ttyUSB0', help='Physical serial port of radio')\n    return parser.parse_args()", "docstring": "Sets up argparse module to create command line options and parse them.\n\nUses the argparse module to add arguments to the command line for\nfaradayio-cli. Once the arguments are added and parsed the arguments are\nreturned\n\nReturns:\nargparse.Namespace: Populated namespace of arguments", "source": "codesearchnet"}
{"code": "def __call__(self, request: beam.Row, *args, **kwargs):\n    embedded_query = request['text']\n    base_query = f'{self.hybrid_fields}=>[KNN {self.k} @{self.vector_field} $vector AS vector_score]'\n    query = Query(base_query).return_fields(*self.return_fields).paging(0, self.k).dialect(2)\n    params_dict = {'vector': np.array(embedded_query).astype(dtype=np.float32).tobytes()}\n    results = self.client.ft(self.index_name).search(query, params_dict)\n    return (beam.Row(text=embedded_query), beam.Row(docs=results.docs))", "docstring": "Reads a row from the redis Vector DB and returns\na `Tuple` of request and response.\n\nArgs:\nrequest: the input `beam.Row` to enrich.", "source": "github-repos"}
{"code": "def merge_collections(collections, force_dense=False, sampling_rate='auto'):\n    \n    if len(listify(collections)) == 1:\n        return collections\n\n    levels = set([c.level for c in collections])\n    if len(levels) > 1:\n        raise ValueError(\"At the moment, it's only possible to merge \"\n                         \"Collections at the same level of analysis. You \"\n                         \"passed collections at levels: %s.\" % levels)\n\n    variables = list(chain(*[c.variables.values() for c in collections]))\n    cls = collections[0].__class__\n\n    variables = cls.merge_variables(variables, sampling_rate=sampling_rate)\n\n    if isinstance(collections[0], BIDSRunVariableCollection):\n        if sampling_rate == 'auto':\n            rates = [var.sampling_rate for var in variables\n                     if isinstance(var, DenseRunVariable)]\n\n            sampling_rate = rates[0] if rates else None\n\n        return cls(variables, sampling_rate)\n\n    return cls(variables)", "docstring": "Merge two or more collections at the same level of analysis.\n\nArgs:\ncollections (list): List of Collections to merge.\nsampling_rate (int, str): Sampling rate to use if it becomes necessary\nto resample DenseRunVariables. Either an integer or 'auto' (see\nmerge_variables docstring for further explanation).\n\nReturns:\nA BIDSVariableCollection or BIDSRunVariableCollection, depending\non the type of the input collections.", "source": "juraj-google-style"}
{"code": "def get_block_sysfee(self, height, id=None, endpoint=None):\n        \n        return self._call_endpoint(GET_BLOCK_SYS_FEE, params=[height], id=id, endpoint=endpoint)", "docstring": "Get the system fee of a block by height.  This is used in calculating gas claims\nArgs:\nheight: (int) height of the block to lookup\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\n\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"}
{"code": "def Mean(self):\n    mu = 0.0\n    for (x, p) in self.d.iteritems():\n        mu += (p * x)\n    return mu", "docstring": "Computes the mean of a PMF.\n\nReturns:\nfloat mean", "source": "codesearchnet"}
{"code": "def process_data(key, data_list, result_info_key, identifier_keys):\n    master_data = []\n    for item_data in data_list:\n        data = item_data[key]\n        if (data is None):\n            current_item_data = {}\n        elif (key == 'property/value'):\n            current_item_data = data['value']\n        elif (key == 'property/details'):\n            top_level_keys = ['property', 'assessment']\n            current_item_data = flatten_top_level_keys(data, top_level_keys)\n        elif (key == 'property/school'):\n            current_item_data = data['school']\n            school_list = []\n            for school_type_key in current_item_data:\n                schools = current_item_data[school_type_key]\n                for school in schools:\n                    school['school_type'] = school_type_key\n                    school['school_address'] = school['address']\n                    school['school_zipcode'] = school['zipcode']\n                    school_list.append(school)\n            current_item_data = school_list\n        elif (key == 'property/value_forecast'):\n            current_item_data = {}\n            for month_key in data:\n                current_item_data[month_key] = data[month_key]['value']\n        elif (key in ['property/value_within_block', 'property/rental_value_within_block']):\n            current_item_data = flatten_top_level_keys(data, ['housecanary_value_percentile_range', 'housecanary_value_sqft_percentile_range', 'client_value_percentile_range', 'client_value_sqft_percentile_range'])\n        elif (key in ['property/zip_details', 'zip/details']):\n            top_level_keys = ['multi_family', 'single_family']\n            current_item_data = flatten_top_level_keys(data, top_level_keys)\n        else:\n            current_item_data = data\n        if isinstance(current_item_data, dict):\n            _set_identifier_fields(current_item_data, item_data, result_info_key, identifier_keys)\n            master_data.append(current_item_data)\n        else:\n            for item in current_item_data:\n                _set_identifier_fields(item, item_data, result_info_key, identifier_keys)\n            master_data.extend(current_item_data)\n    return master_data", "docstring": "Given a key as the endpoint name, pulls the data for that endpoint out\nof the data_list for each address, processes the data into a more\nexcel-friendly format and returns that data.\n\nArgs:\nkey: the endpoint name of the data to process\ndata_list: the main data list to take the data from\nresult_info_key: the key in api_data dicts that contains the data results\nidentifier_keys: the list of keys used as requested identifiers\n(address, zipcode, block_id, etc)\n\nReturns:\nA list of dicts (rows) to be written to a worksheet", "source": "codesearchnet"}
{"code": "def heightmap_get_slope(hm: np.ndarray, x: int, y: int) -> float:\n    \n    return float(lib.TCOD_heightmap_get_slope(_heightmap_cdata(hm), x, y))", "docstring": "Return the slope between 0 and (pi / 2) at given coordinates.\n\nArgs:\nhm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.\nx (int): The x coordinate.\ny (int): The y coordinate.\n\nReturns:\nfloat: The steepness at ``x``, ``y``.  From 0 to (pi / 2)", "source": "juraj-google-style"}
{"code": "def run(self, source, **kwargs):\n        \n        kwargs['output'] = self.__graph__()\n        if isinstance(source, str):\n            import json\n            source = json.loads(source)\n        self.source = source\n        super(JSONProcessor, self).run(**kwargs)\n        self.output = kwargs['output']\n        return output", "docstring": "Method takes a JSON source and any keywords and transforms from\nJSON to Lean BIBFRAME 2.0 triples\n\nArgs:\n\n----\nsource: str, dict", "source": "juraj-google-style"}
{"code": "def lint(self, targets):\n    LinterRunner.targets = targets\n    linters = self._config.get_linter_classes()\n    with Pool() as pool:\n        out_err_none = pool.map(LinterRunner.run, linters)\n    out_err = [item for item in out_err_none if (item is not None)]\n    (stdout, stderr) = zip(*out_err)\n    return (sorted(chain.from_iterable(stdout)), chain.from_iterable(stderr))", "docstring": "Run linters in parallel and sort all results.\n\nArgs:\ntargets (list): List of files and folders to lint.", "source": "codesearchnet"}
{"code": "def save_cache(cache):\n    with open(settings.DUP_FILTER_FILE, 'w') as f:\n        f.write(json.dumps(list(cache)))", "docstring": "Save cahce to the disk.\n\nArgs:\ncache (set): Set with cached data.", "source": "codesearchnet"}
{"code": "def Scalars(self, run, tag):\n    accumulator = self.GetAccumulator(run)\n    return accumulator.Scalars(tag)", "docstring": "Retrieve the scalar events associated with a run and tag.\n\nArgs:\nrun: A string name of the run for which values are retrieved.\ntag: A string name of the tag for which values are retrieved.\n\nRaises:\nKeyError: If the run is not found, or the tag is not available for\nthe given run.\n\nReturns:\nAn array of `event_accumulator.ScalarEvents`.", "source": "codesearchnet"}
{"code": "def _update_workflow_stages(stage_data: dict, workflow_stage: WorkflowStage, docker: DockerSwarmClient):\n    service_status_complete = []\n    if (stage_data['status'] != 'complete'):\n        for (service_id, service_dict) in stage_data['services'].items():\n            service_state = docker.get_service_state(service_id)\n            if (service_state == 'shutdown'):\n                docker.delete_service(service_id)\n            service_dict['status'] = service_state\n            service_dict['complete'] = (service_state == 'shutdown')\n            service_status_complete.append(service_dict['complete'])\n            if all(service_status_complete):\n                LOG.info('Workflow stage service %s complete!', workflow_stage.id)\n                stage_data['status'] = 'complete'", "docstring": "Check and update the status of a workflow stage.\n\nThis function checks and updates the status of a workflow stage\nspecified by the parameters in the specified stage_data dictionary.\n\nIf the workflow stage is not marked as complete, this function will\ncheck with the Docker Swarm API on the status of Docker services\ndefined for the stage. If **all** services are found to be complete\n(based on their service state being reported as 'shutdown',\nthe workflow stage is marked complete.\n\nThis function is used by `execute_processing_block`.\n\nTODO(BMo) This function will need refactoring at some point as part\nof an update to the way workflow state metadata is stored in the\nconfiguration database. Currently the stage_data dictionary\nis a bit of a hack for a badly specified Configuration Database\nbacked WorkflowStage object.\n\nArgs:\nstage_data (dict): Dictionary holding workflow stage metadata.\nworkflow_stage (WorkflowStage): Workflow stage data object.\ndocker (DockerClient): Docker Swarm Client object.", "source": "codesearchnet"}
{"code": "def predict(self, features, verbose=False):\n    probs = self.clf.predict_proba(features)\n    if verbose:\n        labels = self.labels.classes_\n        res = []\n        for prob in probs:\n            vals = {}\n            for (i, val) in enumerate(prob):\n                label = labels[i]\n                vals[label] = val\n            res.append(vals)\n        return res\n    else:\n        return probs", "docstring": "Probability estimates of each feature\n\nSee sklearn's SGDClassifier predict and predict_proba methods.\n\nArgs:\nfeatures (:obj:`list` of :obj:`list` of :obj:`float`)\nverbose: Boolean, optional. If true returns an array where each\nelement is a dictionary, where keys are labels and values are\nthe respective probabilities. Defaults to False.\nReturns:\nArray of array of numbers, or array of dictionaries if verbose i\nTrue", "source": "codesearchnet"}
{"code": "def run(self, tasklet, **kwds):\n    start_time = time.time()\n    n = 1\n    while True:\n        e = None\n        result = None\n        got_result = False\n        try:\n            result = (yield tasklet(**kwds))\n            got_result = True\n            if (not self.should_retry(result)):\n                raise ndb.Return(result)\n        except runtime.DeadlineExceededError:\n            logging.debug('Tasklet has exceeded request deadline after %s seconds total', (time.time() - start_time))\n            raise\n        except self.retriable_exceptions as e:\n            pass\n        if (n == 1):\n            logging.debug('Tasklet is %r', tasklet)\n        delay = self.retry_params.delay(n, start_time)\n        if (delay <= 0):\n            logging.debug('Tasklet failed after %s attempts and %s seconds in total', n, (time.time() - start_time))\n            if got_result:\n                raise ndb.Return(result)\n            elif (e is not None):\n                raise e\n            else:\n                assert False, 'Should never reach here.'\n        if got_result:\n            logging.debug('Got result %r from tasklet.', result)\n        else:\n            logging.debug('Got exception \"%r\" from tasklet.', e)\n        logging.debug('Retry in %s seconds.', delay)\n        n += 1\n        (yield tasklets.sleep(delay))", "docstring": "Run a tasklet with retry.\n\nThe retry should be transparent to the caller: if no results\nare successful, the exception or result from the last retry is returned\nto the caller.\n\nArgs:\ntasklet: the tasklet to run.\n**kwds: keywords arguments to run the tasklet.\n\nRaises:\nThe exception from running the tasklet.\n\nReturns:\nThe result from running the tasklet.", "source": "codesearchnet"}
{"code": "def get_idiomatic_name_in_language(cls, name, language):\n        \n        if language in cls.idiomatic_methods_cache:\n            m = cls.idiomatic_methods_cache[language]\n            if not m:\n                return name\n            return m(name)\n\n        found, method = load_language_plugins(language, 'get_idiomatic_name')\n        if found:\n            cls.idiomatic_methods_cache[language] = method\n            if method:\n                return method(name)\n            else:\n                return name\n\n        module = importlib.import_module('.lang.%s' % language, package=\"monolithe.generators\")\n\n        if not hasattr(module, 'get_idiomatic_name'):\n            cls.idiomatic_methods_cache[language] = None\n            return name\n\n        method = getattr(module, 'get_idiomatic_name')\n        cls.idiomatic_methods_cache[language] = method\n        return method(name)", "docstring": "Get the name for the given language\n\nArgs:\nname (str): the name to convert\nlanguage (str): the language to use\n\nReturns:\na name in the given language\n\nExample:\nget_idiomatic_name_in_language(\"EnterpriseNetwork\", \"python\")\n>>> enterprise_network", "source": "juraj-google-style"}
{"code": "def signed_to_twos_comp(val: int, n_bits: int) -> int:\n    \n    assert n_bits % 8 == 0, \"Must specify a whole number of bytes\"\n    n_bytes = n_bits \n    b = val.to_bytes(n_bytes, byteorder=sys.byteorder, signed=True)\n    return int.from_bytes(b, byteorder=sys.byteorder, signed=False)", "docstring": "Convert a signed integer to its \"two's complement\" representation.\n\nArgs:\nval: signed integer\nn_bits: number of bits (which must reflect a whole number of bytes)\n\nReturns:\nunsigned integer: two's complement version", "source": "juraj-google-style"}
{"code": "def _tensor_product(self, other, reverse=False):\n        \n        \n        if not isinstance(other, Choi):\n            other = Choi(other)\n\n        if reverse:\n            input_dims = self.input_dims() + other.input_dims()\n            output_dims = self.output_dims() + other.output_dims()\n            data = _bipartite_tensor(\n                other.data,\n                self._data,\n                shape1=other._bipartite_shape,\n                shape2=self._bipartite_shape)\n        else:\n            input_dims = other.input_dims() + self.input_dims()\n            output_dims = other.output_dims() + self.output_dims()\n            data = _bipartite_tensor(\n                self._data,\n                other.data,\n                shape1=self._bipartite_shape,\n                shape2=other._bipartite_shape)\n        return Choi(data, input_dims, output_dims)", "docstring": "Return the tensor product channel.\n\nArgs:\nother (QuantumChannel): a quantum channel.\nreverse (bool): If False return self ⊗ other, if True return\nif True return (other ⊗ self) [Default: False\nReturns:\nChoi: the tensor product channel as a Choi object.\n\nRaises:\nQiskitError: if other is not a QuantumChannel subclass.", "source": "juraj-google-style"}
{"code": "def _cancel_batch(batch_fn, cancel_fn, ops):\n    canceled = []\n    failed = []\n\n    def handle_cancel_response(request_id, response, exception):\n        'Callback for the cancel response.'\n        del response\n        if exception:\n            msg = ('error %s: %s' % (exception.resp.status, exception.resp.reason))\n            if (exception.resp.status == FAILED_PRECONDITION_CODE):\n                detail = json.loads(exception.content)\n                status = detail.get('error', {}).get('status')\n                if (status == FAILED_PRECONDITION_STATUS):\n                    msg = 'Not running'\n            failed.append({'name': request_id, 'msg': msg})\n        else:\n            canceled.append({'name': request_id})\n        return\n    batch = batch_fn(callback=handle_cancel_response)\n    ops_by_name = {}\n    for op in ops:\n        op_name = op.get_field('internal-id')\n        ops_by_name[op_name] = op\n        batch.add(cancel_fn(name=op_name, body={}), request_id=op_name)\n    batch.execute()\n    canceled_ops = [ops_by_name[op['name']] for op in canceled]\n    error_messages = []\n    for fail in failed:\n        op = ops_by_name[fail['name']]\n        error_messages.append((\"Error canceling '%s': %s\" % (get_operation_full_job_id(op), fail['msg'])))\n    return (canceled_ops, error_messages)", "docstring": "Cancel a batch of operations.\n\nArgs:\nbatch_fn: API-specific batch function.\ncancel_fn: API-specific cancel function.\nops: A list of operations to cancel.\n\nReturns:\nA list of operations canceled and a list of error messages.", "source": "codesearchnet"}
{"code": "def _to_enos_roles(roles):\n    \n\n    def to_host(h):\n        extra = {}\n        \n        \n        for nic, roles in h[\"nics\"]:\n            for role in roles:\n                extra[role] = nic\n\n        return Host(h[\"host\"], user=\"root\", extra=extra)\n\n    enos_roles = {}\n    for role, hosts in roles.items():\n        enos_roles[role] = [to_host(h) for h in hosts]\n    logger.debug(enos_roles)\n    return enos_roles", "docstring": "Transform the roles to use enoslib.host.Host hosts.\n\nArgs:\nroles (dict): roles returned by\n:py:func:`enoslib.infra.provider.Provider.init`", "source": "juraj-google-style"}
{"code": "def MakeParser(prog):\n  \n\n  def AddStandardOptions(parser, *args):\n    \n    if 'application' in args:\n      parser.add_argument('-a', '--application', default='.',\n                          help='The path to the Python App Engine App')\n    if 'format' in args:\n      \n      \n      \n      parser.add_argument('-f', '--format', default='rest',\n                          choices=['rest'],\n                          help='The requested API protocol type (ignored)')\n    if 'hostname' in args:\n      help_text = ('Default application hostname, if none is specified '\n                   'for API service.')\n      parser.add_argument('--hostname', help=help_text)\n    if 'output' in args:\n      parser.add_argument('-o', '--output', default='.',\n                          help='The directory to store output files')\n    if 'language' in args:\n      parser.add_argument('language',\n                          help='The target output programming language')\n    if 'service' in args:\n      parser.add_argument('service', nargs='+',\n                          help='Fully qualified service class name')\n    if 'discovery_doc' in args:\n      parser.add_argument('discovery_doc', nargs=1,\n                          help='Path to the discovery document')\n    if 'build_system' in args:\n      parser.add_argument('-bs', '--build_system', default='default',\n                          help='The target build system')\n\n  parser = _EndpointsParser(prog=prog)\n  subparsers = parser.add_subparsers(\n      title='subcommands', metavar='{%s}' % ', '.join(_VISIBLE_COMMANDS))\n\n  get_client_lib = subparsers.add_parser(\n      'get_client_lib', help=('Generates discovery documents and client '\n                              'libraries from service classes'))\n  get_client_lib.set_defaults(callback=_GetClientLibCallback)\n  AddStandardOptions(get_client_lib, 'application', 'hostname', 'output',\n                     'language', 'service', 'build_system')\n\n  get_discovery_doc = subparsers.add_parser(\n      'get_discovery_doc',\n      help='Generates discovery documents from service classes')\n  get_discovery_doc.set_defaults(callback=_GenDiscoveryDocCallback)\n  AddStandardOptions(get_discovery_doc, 'application', 'format', 'hostname',\n                     'output', 'service')\n\n  get_openapi_spec = subparsers.add_parser(\n      'get_openapi_spec',\n      help='Generates OpenAPI (Swagger) specs from service classes')\n  get_openapi_spec.set_defaults(callback=_GenOpenApiSpecCallback)\n  AddStandardOptions(get_openapi_spec, 'application', 'hostname', 'output',\n                     'service')\n  get_openapi_spec.add_argument('--x-google-api-name', action='store_true',\n                                help=\"Add the 'x-google-api-name' field to the generated spec\")\n\n  \n  \n  \n  get_swagger_spec = subparsers.add_parser(\n      'get_swagger_spec',\n      help='Generates OpenAPI (Swagger) specs from service classes')\n  get_swagger_spec.set_defaults(callback=_GenOpenApiSpecCallback)\n  AddStandardOptions(get_swagger_spec, 'application', 'hostname', 'output',\n                     'service')\n\n  \n  \n  gen_api_config = subparsers.add_parser('gen_api_config')\n  gen_api_config.set_defaults(callback=_GenApiConfigCallback)\n  AddStandardOptions(gen_api_config, 'application', 'hostname', 'output',\n                     'service')\n\n  gen_discovery_doc = subparsers.add_parser('gen_discovery_doc')\n  gen_discovery_doc.set_defaults(callback=_GenDiscoveryDocCallback)\n  AddStandardOptions(gen_discovery_doc, 'application', 'format', 'hostname',\n                     'output', 'service')\n\n  gen_client_lib = subparsers.add_parser('gen_client_lib')\n  gen_client_lib.set_defaults(callback=_GenClientLibCallback)\n  AddStandardOptions(gen_client_lib, 'output', 'language', 'discovery_doc',\n                     'build_system')\n\n  return parser", "docstring": "Create an argument parser.\n\nArgs:\nprog: The name of the program to use when outputting help text.\n\nReturns:\nAn argparse.ArgumentParser built to specification.", "source": "juraj-google-style"}
{"code": "def tanh_shrink(x):\n    if any_symbolic_tensors((x,)):\n        return TanhShrink().symbolic_call(x)\n    return backend.nn.tanh_shrink(x)", "docstring": "Applies the tanh shrink function element-wise.\n\nIt is defined as:\n\n`f(x) = x - tanh(x)`.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput tensor of the same shape as `x`, where each element is\ntransformed according to the tanh shrink operation.\n\nExample:\n\n>>> x = np.array([ -1., 0., 1.])\n>>> x_tanh_shrink = keras.ops.tanh_shrink(x)\n>>> print(x_tanh_shrink)\narray([-0.23840584  0.  0.23840584], shape=(3,), dtype=float64)", "source": "github-repos"}
{"code": "def get_matcher(patterns, case_sensitive):\n    if (not patterns):\n        return (lambda name: True)\n    if case_sensitive:\n        return partial(match_any, patterns)\n    else:\n        return partial(imatch_any, patterns)", "docstring": "Get a callable that matches names against the given patterns.\n\nArguments:\npatterns (list): A list of wildcard pattern. e.g. ``[\"*.py\",\n\"*.pyc\"]``\ncase_sensitive (bool): If ``True``, then the callable will be case\nsensitive, otherwise it will be case insensitive.\n\nReturns:\ncallable: a matcher that will return `True` if the name given as\nan argument matches any of the given patterns.\n\nExample:\n>>> from fs import wildcard\n>>> is_python = wildcard.get_matcher(['*.py'], True)\n>>> is_python('__init__.py')\nTrue\n>>> is_python('foo.txt')\nFalse", "source": "codesearchnet"}
{"code": "def plot_conductivity_mu(self, temp=600, output='eig', relaxation_time=1e-14, xlim=None):\n    import matplotlib.pyplot as plt\n    cond = self._bz.get_conductivity(relaxation_time=relaxation_time, output=output, doping_levels=False)[temp]\n    plt.figure(figsize=(9, 7))\n    plt.semilogy(self._bz.mu_steps, cond, linewidth=3.0)\n    self._plot_bg_limits()\n    self._plot_doping(temp)\n    if (output == 'eig'):\n        plt.legend(['$\\\\Sigma_1$', '$\\\\Sigma_2$', '$\\\\Sigma_3$'])\n    if (xlim is None):\n        plt.xlim((- 0.5), (self._bz.gap + 0.5))\n    else:\n        plt.xlim(xlim)\n    plt.ylim([(10000000000000.0 * relaxation_time), (1e+20 * relaxation_time)])\n    plt.ylabel('conductivity,\\n $\\\\Sigma$ (1/($\\\\Omega$ m))', fontsize=30.0)\n    plt.xlabel('E-E$_f$ (eV)', fontsize=30.0)\n    plt.xticks(fontsize=25)\n    plt.yticks(fontsize=25)\n    plt.tight_layout()\n    return plt", "docstring": "Plot the conductivity in function of Fermi level. Semi-log plot\n\nArgs:\ntemp: the temperature\nxlim: a list of min and max fermi energy by default (0, and band\ngap)\ntau: A relaxation time in s. By default none and the plot is by\nunits of relaxation time\n\nReturns:\na matplotlib object", "source": "codesearchnet"}
{"code": "def _get_task_with_policy(queue_name, task_id, owner):\n    now = datetime.datetime.utcnow()\n    task = WorkQueue.query.filter_by(queue_name=queue_name, task_id=task_id).with_lockmode('update').first()\n    if (not task):\n        raise TaskDoesNotExistError(('task_id=%r' % task_id))\n    lease_delta = (now - task.eta)\n    if (lease_delta > datetime.timedelta(0)):\n        db.session.rollback()\n        raise LeaseExpiredError(('queue=%r, task_id=%r expired %s' % (task.queue_name, task_id, lease_delta)))\n    if (task.last_owner != owner):\n        db.session.rollback()\n        raise NotOwnerError(('queue=%r, task_id=%r, owner=%r' % (task.queue_name, task_id, task.last_owner)))\n    return task", "docstring": "Fetches the specified task and enforces ownership policy.\n\nArgs:\nqueue_name: Name of the queue the work item is on.\ntask_id: ID of the task that is finished.\nowner: Who or what has the current lease on the task.\n\nReturns:\nThe valid WorkQueue task that is currently owned.\n\nRaises:\nTaskDoesNotExistError if the task does not exist.\nLeaseExpiredError if the lease is no longer active.\nNotOwnerError if the specified owner no longer owns the task.", "source": "codesearchnet"}
{"code": "def update(self, resource, timeout=-1):\n        \n        return self._client.update(resource, timeout=timeout, default_values=self.DEFAULT_VALUES)", "docstring": "Updates only name for the Artifact Bundle.\n\nArgs:\nresource (dict): Object to update.\ntimeout:\nTimeout in seconds. Waits for task completion by default. The timeout does not abort the operation\nin OneView, it just stops waiting for its completion.\n\nReturns:\ndict: Updated resource.", "source": "juraj-google-style"}
{"code": "def _CountStoredAttributeContainers(self, container_type):\n    if (not (container_type in self._CONTAINER_TYPES)):\n        raise ValueError('Attribute container type {0:s} is not supported'.format(container_type))\n    if (not self._HasTable(container_type)):\n        return 0\n    query = 'SELECT MAX(_ROWID_) FROM {0:s} LIMIT 1'.format(container_type)\n    self._cursor.execute(query)\n    row = self._cursor.fetchone()\n    if (not row):\n        return 0\n    return (row[0] or 0)", "docstring": "Counts the number of attribute containers of the given type.\n\nArgs:\ncontainer_type (str): attribute container type.\n\nReturns:\nint: number of attribute containers of the given type.\n\nRaises:\nValueError: if an unsupported container_type is provided.", "source": "codesearchnet"}
{"code": "def _autopacking_helper(list_or_tuple, dtype, name):\n    if context.executing_eagerly():\n        if all((isinstance(elem, core.Tensor) for elem in list_or_tuple)):\n            return gen_array_ops.pack(list_or_tuple, name=name)\n    must_pack = False\n    converted_elems = []\n    with ops.name_scope(name) as scope:\n        for i, elem in enumerate(list_or_tuple):\n            if isinstance(elem, core.Tensor):\n                if dtype is not None and elem.dtype.base_dtype != dtype:\n                    raise TypeError(f'Cannot convert a list containing a tensor of dtype {elem.dtype} to {dtype} (Tensor is: {elem!r})')\n                converted_elems.append(elem)\n                must_pack = True\n            elif isinstance(elem, (list, tuple)):\n                converted_elem = _autopacking_helper(elem, dtype, str(i))\n                if isinstance(converted_elem, core.Tensor):\n                    must_pack = True\n                converted_elems.append(converted_elem)\n            else:\n                converted_elems.append(elem)\n        if must_pack:\n            elems_as_tensors = []\n            for i, elem in enumerate(converted_elems):\n                if isinstance(elem, core.Tensor):\n                    elems_as_tensors.append(elem)\n                else:\n                    elems_as_tensors.append(constant_op.constant(elem, dtype=dtype, name=str(i)))\n            return gen_array_ops.pack(elems_as_tensors, name=scope)\n        else:\n            return converted_elems", "docstring": "Converts the given list or tuple to a tensor by packing.\n\nArgs:\nlist_or_tuple: A (possibly nested) list or tuple containing a tensor.\ndtype: The element type of the returned tensor.\nname: A name for the returned tensor.\n\nReturns:\nA `tf.Tensor` with value equivalent to `list_or_tuple`.", "source": "github-repos"}
{"code": "def _call_rpc(self, header):\n        \n\n        length, _, cmd, feature, address = struct.unpack(\"<BBBBB\", bytes(header))\n        rpc_id = (feature << 8) | cmd\n\n        payload = self.rpc_payload[:length]\n\n        status = (1 << 6)\n        try:\n            response = self.device.call_rpc(address, rpc_id, bytes(payload))\n            if len(response) > 0:\n                status |= (1 << 7)\n        except (RPCInvalidIDError, RPCNotFoundError):\n            status = 2  \n            response = b''\n        except TileNotFoundError:\n            status = 0xFF\n            response = b''\n        except Exception:\n            status = 3\n            response = b''\n            self._logger.exception(\"Exception raise while calling rpc, header=%s, payload=%s\", header, payload)\n\n        self._audit(\n            \"RPCReceived\",\n            rpc_id=rpc_id,\n            address=address,\n            payload=binascii.hexlify(payload),\n            status=status,\n            response=binascii.hexlify(response)\n        )\n\n        resp_header = struct.pack(\"<BBBB\", status, 0, 0, len(response))\n\n        if len(response) > 0:\n            self._send_rpc_response(\n                (ReceiveHeaderChar.value_handle, resp_header),\n                (ReceivePayloadChar.value_handle, response)\n            )\n        else:\n            self._send_rpc_response((ReceiveHeaderChar.value_handle, resp_header))", "docstring": "Call an RPC given a header and possibly a previously sent payload\nIt is executed in the baBLE working thread: should not be blocking.\n\nArgs:\nheader (bytearray): The RPC header we should call", "source": "juraj-google-style"}
{"code": "def repeat(coro, times=1, step=1, limit=1, loop=None):\n    assert_corofunction(coro=coro)\n    times = max(int(times), 1)\n    iterable = range(1, (times + 1), step)\n    return (yield from map(coro, iterable, limit=limit, loop=loop))", "docstring": "Executes the coroutine function ``x`` number of  times,\nand accumulates results in order as you would use with ``map``.\n\nExecution concurrency is configurable using ``limit`` param.\n\nThis function is a coroutine.\n\nArguments:\ncoro (coroutinefunction): coroutine function to schedule.\ntimes (int): number of times to execute the coroutine.\nstep (int): increment iteration step, as with ``range()``.\nlimit (int): concurrency execution limit. Defaults to 10.\nloop (asyncio.BaseEventLoop): optional event loop to use.\n\nRaises:\nTypeError: if coro is not a coroutine function.\n\nReturns:\nlist: accumulated yielded values returned by coroutine.\n\nUsage::\n\nasync def mul_2(num):\nreturn num * 2\n\nawait paco.repeat(mul_2, times=5)\n# => [2, 4, 6, 8, 10]", "source": "codesearchnet"}
{"code": "def handle_response(self, item_session: ItemSession) -> Actions:\n    action = self.consult_response_hook(item_session)\n    if (action == Actions.RETRY):\n        item_session.set_status(Status.error)\n    elif (action == Actions.FINISH):\n        item_session.set_status(Status.done)\n    elif (action == Actions.STOP):\n        raise HookStop('Script requested immediate stop.')\n    return action", "docstring": "Generic handler for a response.\n\nReturns:\nA value from :class:`.hook.Actions`.", "source": "codesearchnet"}
{"code": "def on_train_batch_end(self, batch, logs=None):\n    self.on_batch_end(batch, logs=logs)", "docstring": "Called at the end of a training batch in `fit` methods.\n\nSubclasses should override for any actions to run.\n\nNote that if the `steps_per_execution` argument to `compile` in\n`tf.keras.Model` is set to `N`, this method will only be called every `N`\nbatches.\n\nArgs:\nbatch: Integer, index of batch within the current epoch.\nlogs: Dict. Aggregated metric results up until this batch.", "source": "github-repos"}
{"code": "def setOption(self, name, value):\n    if isinstance(value, bool):\n        lock_and_call((lambda : self._impl.setBoolOption(name, value)), self._lock)\n    elif isinstance(value, int):\n        lock_and_call((lambda : self._impl.setIntOption(name, value)), self._lock)\n    elif isinstance(value, float):\n        lock_and_call((lambda : self._impl.setDblOption(name, value)), self._lock)\n    elif isinstance(value, basestring):\n        lock_and_call((lambda : self._impl.setOption(name, value)), self._lock)\n    else:\n        raise TypeError", "docstring": "Set an AMPL option to a specified value.\n\nArgs:\nname: Name of the option to be set (alphanumeric without spaces).\n\nvalue: The value the option must be set to.\n\nRaises:\nInvalidArgumet: if the option name is not valid.\n\nTypeError: if the value has an invalid type.", "source": "codesearchnet"}
{"code": "def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):\n    if timeout is None:\n        timeout = int(os.environ.get('PYTEST_TIMEOUT', 600))\n    start_methohd = 'spawn'\n    ctx = multiprocessing.get_context(start_methohd)\n    input_queue = ctx.Queue(1)\n    output_queue = ctx.JoinableQueue(1)\n    input_queue.put(inputs, timeout=timeout)\n    process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))\n    process.start()\n    try:\n        results = output_queue.get(timeout=timeout)\n        output_queue.task_done()\n    except Exception as e:\n        process.terminate()\n        test_case.fail(e)\n    process.join(timeout=timeout)\n    if results['error'] is not None:\n        test_case.fail(f'{results['error']}')", "docstring": "To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.\n\nArgs:\ntest_case (`unittest.TestCase`):\nThe test that will run `target_func`.\ntarget_func (`Callable`):\nThe function implementing the actual testing logic.\ninputs (`dict`, *optional*, defaults to `None`):\nThe inputs that will be passed to `target_func` through an (input) queue.\ntimeout (`int`, *optional*, defaults to `None`):\nThe timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.\nvariable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.", "source": "github-repos"}
{"code": "def trace_format(self):\n    cmd = enums.JLinkTraceCommand.GET_FORMAT\n    data = ctypes.c_uint32(0)\n    res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n    if (res == 1):\n        raise errors.JLinkException('Failed to get trace format.')\n    return data.value", "docstring": "Retrieves the current format the trace buffer is using.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\nThe current format the trace buffer is using.  This is one of the\nattributes of ``JLinkTraceFormat``.", "source": "codesearchnet"}
{"code": "def dumps(xs, model=None, properties=False, indent=True, **kwargs):\n    \n    xs = list(xs)\n\n    if not xs:\n        return ''\n\n    given_class = xs[0].__class__  \n    if model is None:\n        model = xs[0].__class__\n\n    if not hasattr(model, 'to_triples'):\n        raise TypeError(\n            '{} class does not implement to_triples()'.format(model.__name__)\n        )\n\n    \n    if given_class.__name__ in ('Mrs', 'Xmrs'):\n        xs = [model.from_xmrs(x, **kwargs) for x in xs]\n    elif given_class.__name__ == 'Eds' and model.__name__ != 'Eds':\n        raise ValueError('Cannot convert EDS to non-EDS')\n\n    codec = XMRSCodec()\n    graphs = [\n        codec.triples_to_graph(model.to_triples(x, properties=properties))\n        for x in xs\n    ]\n\n    if 'pretty_print' in kwargs:\n        indent = kwargs['pretty_print']\n\n    return penman.dumps(graphs, cls=XMRSCodec, indent=indent)", "docstring": "Serialize Xmrs (or subclass) objects to PENMAN notation\n\nArgs:\nxs: iterator of :class:`~delphin.mrs.xmrs.Xmrs` objects to\nserialize\nmodel: Xmrs subclass used to get triples\nproperties: if `True`, encode variable properties\nindent: if `True`, adaptively indent; if `False` or `None`,\ndon't indent; if a non-negative integer N, indent N spaces\nper level\nReturns:\nthe PENMAN serialization of *xs*", "source": "juraj-google-style"}
{"code": "def dispatch_callback(self, items):\n    if (not self._manager.is_active):\n        return\n    batched_commands = collections.defaultdict(list)\n    for item in items:\n        batched_commands[item.__class__].append(item)\n    _LOGGER.debug('Handling %d batched requests', len(items))\n    if batched_commands[requests.LeaseRequest]:\n        self.lease(batched_commands.pop(requests.LeaseRequest))\n    if batched_commands[requests.ModAckRequest]:\n        self.modify_ack_deadline(batched_commands.pop(requests.ModAckRequest))\n    if batched_commands[requests.AckRequest]:\n        self.ack(batched_commands.pop(requests.AckRequest))\n    if batched_commands[requests.NackRequest]:\n        self.nack(batched_commands.pop(requests.NackRequest))\n    if batched_commands[requests.DropRequest]:\n        self.drop(batched_commands.pop(requests.DropRequest))", "docstring": "Map the callback request to the appropriate gRPC request.\n\nArgs:\naction (str): The method to be invoked.\nkwargs (Dict[str, Any]): The keyword arguments for the method\nspecified by ``action``.\n\nRaises:\nValueError: If ``action`` isn't one of the expected actions\n\"ack\", \"drop\", \"lease\", \"modify_ack_deadline\" or \"nack\".", "source": "codesearchnet"}
{"code": "def from_hoy(cls, hoy, leap_year=False):\n        \n        return cls.from_moy(round(hoy * 60), leap_year)", "docstring": "Create Ladybug Datetime from an hour of the year.\n\nArgs:\nhoy: A float value 0 <= and < 8760", "source": "juraj-google-style"}
{"code": "def log(self, level, msg, *args, **kwargs):\n    if (level >= logging.FATAL):\n        extra = kwargs.setdefault('extra', {})\n        extra[_ABSL_LOG_FATAL] = True\n    super(ABSLLogger, self).log(level, msg, *args, **kwargs)", "docstring": "Logs a message at a cetain level substituting in the supplied arguments.\n\nThis method behaves differently in python and c++ modes.\n\nArgs:\nlevel: int, the standard logging level at which to log the message.\nmsg: str, the text of the message to log.\n*args: The arguments to substitute in the message.\n**kwargs: The keyword arguments to substitute in the message.", "source": "codesearchnet"}
{"code": "class PatchTSMixerEncoderOutput(ModelOutput):\n    last_hidden_state: Optional[torch.FloatTensor] = None\n    hidden_states: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "Base class for `PatchTSMixerEncoderOutput`, with potential hidden states.\n\nArgs:\nlast_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, d_model)`):\nHidden-state at the output of the last layer of the model.\nhidden_states (`tuple(torch.FloatTensor)`, *optional*):\nHidden-states of the model at the output of each layer.", "source": "github-repos"}
{"code": "def _from_compatible_tensor_list(self, tensor_list: List['core_types.Symbol']) -> Any:\n    return self._from_components(nest.pack_sequence_as(self._component_specs, tensor_list, expand_composites=True))", "docstring": "Reconstructs a value from a compatible flat list of `tf.Tensor`.\n\nArgs:\ntensor_list: A flat list of `tf.Tensor`, compatible with\n`self._flat_tensor_specs`.  (Caller is responsible for ensuring\ncompatibility.)\n\nReturns:\nA value that is compatible with this `TypeSpec`.", "source": "github-repos"}
{"code": "def median(series):\n    \n\n    if np.issubdtype(series.dtype, np.number):\n        return series.median()\n    else:\n        return np.nan", "docstring": "Returns the median value of a series.\n\nArgs:\nseries (pandas.Series): column to summarize.", "source": "juraj-google-style"}
{"code": "def _AddAttribute(self, attribute):\n    \n    if attribute.identifier in self._attributes:\n      raise KeyError((\n          'Volume attribute object already set for volume attribute '\n          'identifier: {0:s}.').format(attribute.identifier))\n\n    self._attributes[attribute.identifier] = attribute", "docstring": "Adds an attribute.\n\nArgs:\nattribute (VolumeAttribute): a volume attribute.\n\nRaises:\nKeyError: if volume attribute is already set for the corresponding volume\nattribute identifier.", "source": "juraj-google-style"}
{"code": "def Close(self):\n    if self._connection:\n        self._cursor = None\n        self._connection.close()\n        self._connection = None\n    try:\n        os.remove(self._temp_file_path)\n    except (IOError, OSError):\n        pass\n    self._temp_file_path = ''", "docstring": "Closes the database file object.\n\nRaises:\nIOError: if the close failed.\nOSError: if the close failed.", "source": "codesearchnet"}
{"code": "def __init__(self,\n                 lang='en',\n                 lower=True,\n                 charset=None):\n        \n        super(CharTokenizer, self).__init__(lang, lower)\n        self.charset = charset", "docstring": "Encodes text into `(samples, characters)`\n\nArgs:\nlang: The spacy language to use. (Default value: 'en')\nlower: Lower cases the tokens if True. (Default value: True)\ncharset: The character set to use. For example `charset = 'abc123'`. If None, all characters will be used.\n(Default value: None)", "source": "juraj-google-style"}
{"code": "def libravatar_url(email=None, openid=None, size=64, default='retro'):\n    params = collections.OrderedDict([('s', size), ('d', default)])\n    query = parse.urlencode(params)\n    if email:\n        value = email\n    elif openid:\n        value = openid\n    else:\n        raise ValueError('You must provide either the email or the openid.')\n    idhash = sha256(value.encode('utf-8')).hexdigest()\n    return ('https:", "docstring": "Get the URL to an avatar from libravatar.\n\nEither the user's email or openid must be provided.\n\nIf you want to use Libravatar federation (through DNS), you should install\nand use the ``libravatar`` library instead. Check out the\n``libravatar.libravatar_url()`` function.\n\nArgs:\nemail (str): The user's email\nopenid (str): The user's OpenID\nsize (int): Size of the avatar in pixels (it's a square).\ndefault (str): Default avatar to return if not found.\nReturns:\nstr: The URL to the avatar image.\nRaises:\nValueError: If neither email nor openid are provided.", "source": "codesearchnet"}
{"code": "def os_deployment_servers(self):\n    if (not self.__os_deployment_servers):\n        self.__os_deployment_servers = OsDeploymentServers(self.__connection)\n    return self.__os_deployment_servers", "docstring": "Gets the Os Deployment Servers API client.\n\nReturns:\nOsDeploymentServers:", "source": "codesearchnet"}
{"code": "class StandardInputStep(Step):\n\n    def __init__(self, dataset_fn, distribution):\n        super(StandardInputStep, self).__init__(distribution)\n        self._iterator = distribution.make_input_fn_iterator(lambda _: dataset_fn())\n\n    def initialize(self):\n        return self._iterator.initializer", "docstring": "Step with a standard implementation of input handling.\n\nArgs:\ndataset_fn: a function that returns a tf.data Dataset that produces the\ninput for the model.", "source": "github-repos"}
{"code": "def create_webdriver(self, testname=None):\n    try:\n        driver_type = self._config_reader.get(self.DRIVER_TYPE_CONFIG)\n    except:\n        driver_type = self.DRIVER_TYPE_LOCAL\n        _wtflog.warn('%s setting is missing from config. Using default setting, %s', self.DRIVER_TYPE_CONFIG, driver_type)\n    if (driver_type == self.DRIVER_TYPE_REMOTE):\n        self.webdriver = self.__create_remote_webdriver_from_config(testname=testname)\n    else:\n        self.webdriver = self.__create_driver_from_browser_config()\n    try:\n        self.webdriver.maximize_window()\n    except:\n        time.sleep(self._timeout_mgr.BRIEF)\n        try:\n            self.webdriver.maximize_window()\n        except Exception as e:\n            if (isinstance(e, WebDriverException) and ('implemented' in e.msg.lower())):\n                pass\n            else:\n                _wtflog.warn(('Unable to maxmize browser window. ' + 'It may be possible the browser did not instantiate correctly. % s'), e)\n    return self.webdriver", "docstring": "Creates an instance of Selenium webdriver based on config settings.\nThis should only be called by a shutdown hook.  Do not call directly within\na test.\n\nKwargs:\ntestname: Optional test name to pass, this gets appended to the test name\nsent to selenium grid.\n\nReturns:\nWebDriver - Selenium Webdriver instance.", "source": "codesearchnet"}
{"code": "def remove_empty_text(utterances: List[Utterance]) -> List[Utterance]:\n    \n    return [utter for utter in utterances if utter.text.strip() != \"\"]", "docstring": "Remove empty utterances from a list of utterances\nArgs:\nutterances: The list of utterance we are processing", "source": "juraj-google-style"}
{"code": "def enable_inheritance(path, objectType, clear=False):\n    \n    dc = daclConstants()\n    objectType = dc.getObjectTypeBit(objectType)\n    path = dc.processPath(path, objectType)\n\n    return _set_dacl_inheritance(path, objectType, True, None, clear)", "docstring": "enable/disable inheritance on an object\n\nArgs:\npath: The path to the object\nobjectType: The type of object (FILE, DIRECTORY, REGISTRY)\nclear: True will remove non-Inherited ACEs from the ACL\n\nReturns (dict): A dictionary containing the results\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt 'minion-id' win_dacl.enable_inheritance c:\\temp directory", "source": "juraj-google-style"}
{"code": "def add_signature(name=None, inputs=None, outputs=None):\n    if (not name):\n        name = 'default'\n    if (inputs is None):\n        inputs = {}\n    if (outputs is None):\n        outputs = {}\n    if (not isinstance(inputs, dict)):\n        inputs = {'default': inputs}\n    if (not isinstance(outputs, dict)):\n        outputs = {'default': outputs}\n    message = find_signature_inputs_from_multivalued_ops(inputs)\n    if message:\n        logging.error(message)\n    message = find_signature_input_colocation_error(name, inputs)\n    if message:\n        raise ValueError(message)\n    saved_model_lib.add_signature(name, inputs, outputs)", "docstring": "Adds a signature to the module definition.\n\nNOTE: This must be called within a `module_fn` that is defining a Module.\n\nArgs:\nname: Signature name as a string. If omitted, it is interpreted as 'default'\nand is the signature used when `Module.__call__` `signature` is not\nspecified.\ninputs: A dict from input name to Tensor or SparseTensor to feed when\napplying the signature. If a single tensor is passed, it is interpreted\nas a dict with a single 'default' entry.\noutputs: A dict from output name to Tensor or SparseTensor to return from\napplying the signature. If a single tensor is passed, it is interpreted\nas a dict with a single 'default' entry.\n\nRaises:\nValueError: if the arguments are invalid.", "source": "codesearchnet"}
{"code": "class FlaxSuppressTokensAtBeginLogitsProcessor(FlaxLogitsProcessor):\n\n    def __init__(self, begin_suppress_tokens, begin_index):\n        self.begin_suppress_tokens = list(begin_suppress_tokens)\n        self.begin_index = begin_index\n\n    def __call__(self, input_ids, scores, cur_len: int):\n        apply_penalty = 1 - jnp.bool_(cur_len - self.begin_index)\n        scores = jnp.where(apply_penalty, scores.at[:, self.begin_suppress_tokens].set(-float('inf')), scores)\n        return scores", "docstring": "[`FlaxLogitsProcessor`] suppressing a list of tokens as soon as the `generate` function starts generating using\n`begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` are not sampled at the\nbeginning of the generation.\n\nArgs:\nbegin_suppress_tokens (`List[int]`):\nTokens to not sample.\nbegin_index (`int`):\nIndex where the tokens are suppressed.", "source": "github-repos"}
{"code": "def Scripts(unicode_dir=_UNICODE_DIR):\n  \n\n  scripts = {}\n\n  def DoLine(codes, fields):\n    \n    (_, name) = fields\n    scripts.setdefault(name, []).extend(codes)\n\n  ReadUnicodeTable(unicode_dir+\"/Scripts.txt\", 2, DoLine)\n  return scripts", "docstring": "Returns dict mapping script names to code lists.\n\nArgs:\nunicode_dir: Unicode data directory\n\nReturns:\ndict mapping script names to code lists", "source": "juraj-google-style"}
{"code": "def with_accounted_types(self, account_type_regexes):\n    self._options['account_type_regexes'] = copy.copy(account_type_regexes)\n    return self", "docstring": "Selectively counting statistics based on node types.\n\nHere, 'types' means the profiler nodes' properties. Profiler by default\nconsider device name (e.g. /job:xx/.../device:GPU:0) and operation type\n(e.g. MatMul) as profiler nodes' properties. User can also associate\ncustomized 'types' to profiler nodes through OpLogProto proto.\n\nFor example, user can select profiler nodes placed on gpu:0 with:\n`account_type_regexes=['.*gpu:0.*']`\n\nIf none of a node's properties match the specified regexes, the node is\nnot displayed nor accounted.\n\nArgs:\naccount_type_regexes: A list of regexes specifying the types.\nReturns:\nself.", "source": "github-repos"}
{"code": "def list_container_instance_groups(access_token, subscription_id, resource_group):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups', '?api-version=', CONTAINER_API])\n    return do_get(endpoint, access_token)", "docstring": "List the container groups in a resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\n\nReturns:\nHTTP response. JSON list of container groups and their properties.", "source": "codesearchnet"}
{"code": "def data_vectors(self):\n    return {field: self.record[field] for field in self.record.dtype.names if (field != 'sample')}", "docstring": "The per-sample data in a vector.\n\nReturns:\ndict: A dict where the keys are the fields in the record and the\nvalues are the corresponding arrays.\n\nExamples:\n>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,\nenergy=[-1, 1])\n>>> sampleset.data_vectors['energy']\narray([-1,  1])\n\nNote that this is equivalent to, and less performant than:\n\n>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,\nenergy=[-1, 1])\n>>> sampleset.record['energy']\narray([-1,  1])", "source": "codesearchnet"}
{"code": "def download_data_impl(self, run, tag, response_format):\n    \n    scalars_plugin_instance = self._get_scalars_plugin()\n    if not scalars_plugin_instance:\n      raise ValueError(('Failed to respond to request for /download_data. '\n                        'The scalars plugin is oddly not registered.'))\n\n    body, mime_type = scalars_plugin_instance.scalars_impl(\n        tag, run, None, response_format)\n    return body, mime_type", "docstring": "Provides a response for downloading scalars data for a data series.\n\nArgs:\nrun: The run.\ntag: The specific tag.\nresponse_format: A string. One of the values of the OutputFormat enum of\nthe scalar plugin.\n\nRaises:\nValueError: If the scalars plugin is not registered.\n\nReturns:\n2 entities:\n- A JSON object response body.\n- A mime type (string) for the response.", "source": "juraj-google-style"}
{"code": "def log(msg, level=0):\n    \n\n    red = '\\033[91m'\n    endc = '\\033[0m'\n\n    \n    cfg = {\n        'version': 1,\n        'disable_existing_loggers': False,\n        'formatters': {\n            'stdout': {\n                'format': '[%(levelname)s]: %(asctime)s - %(message)s',\n                'datefmt': '%x %X'\n            },\n            'stderr': {\n                'format': red + '[%(levelname)s]: %(asctime)s - %(message)s' + endc,\n                'datefmt': '%x %X'\n            }\n        },\n        'handlers': {\n            'stdout': {\n                'class': 'logging.StreamHandler',\n                'level': 'DEBUG',\n                'formatter': 'stdout'\n            },\n            'stderr': {\n                'class': 'logging.StreamHandler',\n                'level': 'ERROR',\n                'formatter': 'stderr'\n            }\n        },\n        'loggers': {\n            'info': {\n                'handlers': ['stdout'],\n                'level': 'INFO',\n                'propagate': True\n            },\n            'error': {\n                'handlers': ['stderr'],\n                'level': 'ERROR',\n                'propagate': False\n            }\n        }\n    }\n\n    dictConfig(cfg)\n\n    lg = 'info' if level == 0 else 'error'\n    lvl = 20 if level == 0 else 40\n\n    logger = logging.getLogger(lg)\n    logger.log(lvl, msg)", "docstring": "Logs a message to the console, with optional level paramater\n\nArgs:\n- msg (str): message to send to console\n- level (int): log level; 0 for info, 1 for error (default = 0)", "source": "juraj-google-style"}
{"code": "def _get_ngrams(segment, max_order):\n  \n  ngram_counts = collections.Counter()\n  for order in range(1, max_order + 1):\n    for i in range(0, len(segment) - order + 1):\n      ngram = tuple(segment[i:i + order])\n      ngram_counts[ngram] += 1\n  return ngram_counts", "docstring": "Extracts all n-grams up to a given maximum order from an input segment.\n\nArgs:\nsegment: text segment from which n-grams will be extracted.\nmax_order: maximum length in tokens of the n-grams returned by this\nmethods.\n\nReturns:\nThe Counter containing all n-grams up to max_order in segment\nwith a count of how many times each n-gram occurred.", "source": "juraj-google-style"}
{"code": "def append(self, species, coords, validate_proximity=True, properties=None):\n    return self.insert(len(self), species, coords, validate_proximity=validate_proximity, properties=properties)", "docstring": "Appends a site to the molecule.\n\nArgs:\nspecies: Species of inserted site\ncoords: Coordinates of inserted site\nvalidate_proximity (bool): Whether to check if inserted site is\ntoo close to an existing site. Defaults to True.\nproperties (dict): A dict of properties for the Site.\n\nReturns:\nNew molecule with inserted site.", "source": "codesearchnet"}
{"code": "def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:\n    input_length = input_ids.size(1)\n    if self.max_length == input_length + 1:\n        return (input_ids, None)\n    chosen_ids = None\n    match_found = False\n    for ngram_size in range(min(self.max_matching_ngram_size, input_length - 1), 0, -1):\n        windows = input_ids.unfold(dimension=1, size=ngram_size, step=1)\n        ngram_tensor = input_ids[0, -ngram_size:]\n        matches = (windows == ngram_tensor).all(dim=2)\n        match_indices = matches.nonzero(as_tuple=True)[1]\n        for idx in match_indices:\n            start_idx = idx + ngram_size\n            end_idx = start_idx + self.num_output_tokens\n            end_idx = min(end_idx, input_length, self.max_length)\n            if start_idx < end_idx:\n                chosen_ids = input_ids[0, start_idx:end_idx]\n                match_found = True\n                mask = isin_mps_friendly(chosen_ids, self.eos_token_id)\n                match_indices_eos = torch.nonzero(mask)\n                if match_indices_eos.numel() > 0:\n                    first_eos_index = match_indices_eos[0].item()\n                    chosen_ids = chosen_ids[:first_eos_index]\n                break\n        if match_found:\n            break\n    if chosen_ids is None or len(chosen_ids) == 0:\n        return (input_ids, None)\n    chosen_ids = chosen_ids.unsqueeze(0)\n    candidate_input_ids = torch.cat((input_ids, chosen_ids), dim=1)\n    return (candidate_input_ids, None)", "docstring": "Fetches the candidates to be tried for the current input.\n\nArgs:\ninput_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\nIndices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)\n\nReturn:\n`torch.LongTensor` of shape `(num_candidates, candidate_length)`: The candidate sequences to be tried.", "source": "github-repos"}
{"code": "def _BuildEventData(self, record):\n    event_data = FseventsdEventData()\n    event_data.path = record.path\n    event_data.flags = record.event_flags\n    event_data.event_identifier = record.event_identifier\n    event_data.node_identifier = getattr(record, 'node_identifier', None)\n    return event_data", "docstring": "Builds an FseventsdData object from a parsed structure.\n\nArgs:\nrecord (dls_record_v1|dls_record_v2): parsed record structure.\n\nReturns:\nFseventsdEventData: event data attribute container.", "source": "codesearchnet"}
{"code": "def delete_note(self, note_id):\n        \n        \n        note, status = self.trash_note(note_id)\n        if (status == -1):\n            return note, status\n\n        params = '/i/%s' % (str(note_id))\n        request = Request(url=DATA_URL+params, method='DELETE')\n        request.add_header(self.header, self.get_token())\n        try:\n            response = urllib2.urlopen(request)\n        except IOError as e:\n            return e, -1\n        except HTTPError as e:\n            if e.code == 401:\n                raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.')\n            else:\n                return e, -1\n        return {}, 0", "docstring": "Method to permanently delete a note\n\nArguments:\n- note_id (string): key of the note to trash\n\nReturns:\nA tuple `(note, status)`\n\n- note (dict): an empty dict or an error message\n- status (int): 0 on success and -1 otherwise", "source": "juraj-google-style"}
{"code": "def __init__(self, name):\n        \n        self.name = name\n        self.edges_in = set()\n        self.edges_out = set()", "docstring": "Initialization method.\n\nArgs:\nname (str): name of the vertex.", "source": "juraj-google-style"}
{"code": "def RegisterCredentials(cls, credentials):\n    if (credentials.type_indicator in cls._credentials):\n        raise KeyError('Credentials object already set for type indicator: {0:s}.'.format(credentials.type_indicator))\n    cls._credentials[credentials.type_indicator] = credentials", "docstring": "Registers a path specification credentials.\n\nArgs:\ncredentials (Credentials): credentials.\n\nRaises:\nKeyError: if credentials object is already set for the corresponding\ntype indicator.", "source": "codesearchnet"}
{"code": "def _get_value(self, scalar_data_blob, dtype_enum):\n    tensorflow_dtype = tf.DType(dtype_enum)\n    buf = np.frombuffer(scalar_data_blob, dtype=tensorflow_dtype.as_numpy_dtype)\n    return np.asscalar(buf)", "docstring": "Obtains value for scalar event given blob and dtype enum.\n\nArgs:\nscalar_data_blob: The blob obtained from the database.\ndtype_enum: The enum representing the dtype.\n\nReturns:\nThe scalar value.", "source": "codesearchnet"}
{"code": "def _remove_subsequent_result_because_of_batch_failure(self, sig):\n    batch = self._batches_by_txn_id[sig]\n    seen = []\n    for txn in batch.transactions:\n        txn_id = txn.header_signature\n        for poss_successor in self._scheduled.copy():\n            if (not self.is_transaction_in_schedule(poss_successor)):\n                continue\n            if self._is_txn_to_replay(txn_id, poss_successor, seen):\n                if self._txn_has_result(poss_successor):\n                    del self._txn_results[poss_successor]\n                    self._scheduled.remove(poss_successor)\n                    self._txns_available[poss_successor] = self._transactions[poss_successor]\n                else:\n                    self._outstanding.add(poss_successor)\n                seen.append(poss_successor)", "docstring": "Remove transactions from scheduled and txn_results for\nsuccessors of txns in a failed batch. These transactions will now,\nor in the future be rescheduled in next_transaction; giving a\nreplay ability.\n\nArgs:\nsig (str): Transaction header signature", "source": "codesearchnet"}
{"code": "def repeat(sequence):\n    N = len(sequence)\n\n    def f(i):\n        return sequence[(i % N)]\n    return partial(force, sequence=_advance(f))", "docstring": "Return a driver function that can advance a repeated of values.\n\n.. code-block:: none\n\nseq = [0, 1, 2, 3]\n\n# repeat(seq) => [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, ...]\n\nArgs:\nsequence (seq) : a sequence of values for the driver to bounce", "source": "codesearchnet"}
{"code": "def unflatten(guide, falttened_input):\n    \n    return [unflatten(sub_list, falttened_input) if isinstance(sub_list, list)\n            else next(falttened_input) for sub_list in guide]", "docstring": "Unflatten a falttened generator.\n\nArgs:\nguide: A guide list to follow the structure\nfalttened_input: A flattened iterator object\n\nUsage:\n\nguide = [[\"a\"], [\"b\",\"c\",\"d\"], [[\"e\"]], [\"f\"]]\ninput_list = [0, 1, 2, 3, 4, 5, 6, 7]\nunflatten(guide, iter(input_list))\n>> [[0], [1, 2, 3], [[4]], [5]]", "source": "juraj-google-style"}
{"code": "def _get_access_token():\n    access_token = os.environ.get(ACCESS_TOKEN_ENVIRONMENT_VARIABLE)\n    if access_token:\n        return access_token\n    else:\n        for access_token_variable in LEGACY_ACCESS_TOKEN_ENVIRONMENT_VARIABLES:\n            access_token = os.environ.get(access_token_variable)\n            if access_token:\n                env_var_deprecation_warning = PendingDeprecationWarning('Use of the `{legacy}` environment variable will be deprecated in the future.  Please update your environment(s) to use the new `{new}` environment variable.'.format(legacy=access_token, new=ACCESS_TOKEN_ENVIRONMENT_VARIABLE))\n                warnings.warn(env_var_deprecation_warning)\n                return access_token", "docstring": "Attempt to get the access token from the environment.\n\nTry using the current and legacy environment variables. If the access token\nis found in a legacy environment variable, raise a deprecation warning.\n\nReturns:\nThe access token found in the environment (str), or None.", "source": "codesearchnet"}
{"code": "def SetModifyTimestamp(self, value):\n    if value is None or isinstance(value, int):\n        self._last_modification_timestamp = value\n    else:\n        raise TypeError('timestamp can only be int or None, not %r' % value)", "docstring": "Set the last modify timestamp of this map.\n\nArgs:\nvalue: An integer containing the number of seconds since epoch, or None.\n\nRaises:\nTypeError: The argument is not an int or None.", "source": "github-repos"}
{"code": "def _dropout(x, rate, noise_shape, uniform_sampler, dummy_rng_step, name, default_name):\n    with ops.name_scope(name, default_name, [x]) as name:\n        is_rate_number = isinstance(rate, numbers.Real)\n        if is_rate_number and (rate < 0 or rate >= 1):\n            raise ValueError(f'`rate` must be a scalar tensor or a float in the range [0, 1). Received: rate={rate}')\n        x = ops.convert_to_tensor(x, name='x')\n        x_dtype = x.dtype\n        if not x_dtype.is_floating:\n            raise ValueError(f'`x.dtype` must be a floating point tensor as `x` will be scaled. Received: x_dtype={x_dtype}')\n        if is_rate_number and rate == 0:\n            dummy_rng_step()\n            return x\n        is_executing_eagerly = context.executing_eagerly()\n        if not tensor_util.is_tf_type(rate):\n            if is_rate_number:\n                keep_prob = 1 - rate\n                scale = 1 / keep_prob\n                scale = ops.convert_to_tensor(scale, dtype=x_dtype)\n                ret = gen_math_ops.mul(x, scale)\n            else:\n                raise ValueError(f'`rate` must be a scalar or scalar tensor. Received: rate={rate}')\n        else:\n            rate.get_shape().assert_has_rank(0)\n            rate_dtype = rate.dtype\n            if rate_dtype != x_dtype:\n                if not rate_dtype.is_compatible_with(x_dtype):\n                    raise ValueError(f'`x.dtype` must be compatible with `rate.dtype`. Received: x.dtype={x_dtype} and rate.dtype={rate_dtype}')\n                rate = gen_math_ops.cast(rate, x_dtype, name='rate')\n            one_tensor = constant_op.constant(1, dtype=x_dtype)\n            ret = gen_math_ops.real_div(x, gen_math_ops.sub(one_tensor, rate))\n        noise_shape = _get_noise_shape(x, noise_shape)\n        random_tensor = uniform_sampler(shape=noise_shape, dtype=x_dtype)\n        keep_mask = random_tensor >= rate\n        zero_tensor = constant_op.constant(0, dtype=x_dtype)\n        ret = array_ops.where_v2(keep_mask, ret, zero_tensor)\n        if not is_executing_eagerly:\n            ret.set_shape(x.get_shape())\n        return ret", "docstring": "Shared implementation of the various dropout functions.\n\nArgs:\nx: same as the namesake in `dropout_v2`.\nrate: same as the namesake in `dropout_v2`.\nnoise_shape: same as the namesake in `dropout_v2`.\nuniform_sampler: a callable of signature `(shape, dtype) ->\nTensor`, used to generate a tensor of uniformly-distributed\nrandom numbers in the range `[0, 1)`, of the given shape and dtype.\ndummy_rng_step: a callable of signature `() -> None`, to make a\ndummy RNG call in the fast path. In the fast path where rate is\n0, we don't need to generate random numbers, but some samplers\nstill require you to make an RNG call, to make sure that RNG\nstates won't depend on whether the fast path is taken.\nname: same as the namesake in `dropout_v2`.\ndefault_name: a default name in case `name` is `None`.\n\nReturns:\nA Tensor of the same shape and dtype of `x`.", "source": "github-repos"}
{"code": "def encode_boxes(self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]]=None, boxes: Optional[List[List[int]]]=None, word_labels: Optional[List[List[int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> List[int]:\n    encoded_inputs = self.encode_plus_boxes(text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, return_tensors=return_tensors, **kwargs)\n    return encoded_inputs['input_ids']", "docstring": "Args:\nConverts a string to a sequence of ids (integer), using the tokenizer and vocabulary. Same as doing\n`self.convert_tokens_to_ids(self.tokenize(text))`.\ntext (`str`, `List[str]` or `List[int]`):\nThe first sequence to be encoded. This can be a string, a list of strings (tokenized string using the\n`tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`\nmethod).\ntext_pair (`str`, `List[str]` or `List[int]`, *optional*):\nOptional second sequence to be encoded. This can be a string, a list of strings (tokenized string using\nthe `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`\nmethod).", "source": "github-repos"}
{"code": "def noisy_moment(self, moment: 'cirq.Moment',\n                     system_qubits: Sequence['cirq.Qid']) -> 'cirq.OP_TREE':\n        \n        if not hasattr(self.noisy_moments, '_not_overridden'):\n            return self.noisy_moments([moment], system_qubits)\n\n        if not hasattr(self.noisy_operation, '_not_overridden'):\n            return [self.noisy_operation(op) for op in moment]\n\n        assert False, 'Should be unreachable.'", "docstring": "Adds noise to the operations from a moment.\n\nArgs:\nmoment: The moment to add noise to.\nsystem_qubits: A list of all qubits in the system.\n\nReturns:\nAn OP_TREE corresponding to the noisy operations for the moment.", "source": "juraj-google-style"}
{"code": "def __create_and_save_state(cls, job_config, mapreduce_spec):\n    \n    state = model.MapreduceState.create_new(job_config.job_id)\n    state.mapreduce_spec = mapreduce_spec\n    state.active = True\n    state.active_shards = 0\n    state.app_id = job_config._app\n    config = datastore_rpc.Configuration(force_writes=job_config._force_writes)\n    state.put(config=config)\n    return state", "docstring": "Save map job state to datastore.\n\nSave state to datastore so that UI can see it immediately.\n\nArgs:\njob_config: map_job.JobConfig.\nmapreduce_spec: model.MapreduceSpec.\n\nReturns:\nmodel.MapreduceState for this job.", "source": "juraj-google-style"}
{"code": "def __init__(self, shape, scope='distribution', summary_labels=None):\n        \n        self.shape = shape\n\n        self.scope = scope\n        self.summary_labels = set(summary_labels or ())\n\n        self.variables = dict()\n        self.all_variables = dict()\n\n        def custom_getter(getter, name, registered=False, **kwargs):\n            variable = getter(name=name, registered=True, **kwargs)\n            if registered:\n                pass\n            elif name in self.all_variables:\n                assert variable is self.all_variables[name]\n                if kwargs.get('trainable', True):\n                    assert variable is self.variables[name]\n                    if 'variables' in self.summary_labels:\n                        tf.contrib.summary.histogram(name=name, tensor=variable)\n            else:\n                self.all_variables[name] = variable\n                if kwargs.get('trainable', True):\n                    self.variables[name] = variable\n                    if 'variables' in self.summary_labels:\n                        tf.contrib.summary.histogram(name=name, tensor=variable)\n            return variable\n\n        self.parameterize = tf.make_template(\n            name_=(scope + '/parameterize'),\n            func_=self.tf_parameterize,\n            custom_getter_=custom_getter\n        )\n        self.sample = tf.make_template(\n            name_=(scope + '/sample'),\n            func_=self.tf_sample,\n            custom_getter_=custom_getter\n        )\n        self.log_probability = tf.make_template(\n            name_=(scope + '/log-probability'),\n            func_=self.tf_log_probability,\n            custom_getter_=custom_getter\n        )\n        self.entropy = tf.make_template(\n            name_=(scope + '/entropy'),\n            func_=self.tf_entropy,\n            custom_getter_=custom_getter\n        )\n        self.kl_divergence = tf.make_template(\n            name_=(scope + '/kl-divergence'),\n            func_=self.tf_kl_divergence,\n            custom_getter_=custom_getter\n        )\n        self.regularization_loss = tf.make_template(\n            name_=(scope + '/regularization-loss'),\n            func_=self.tf_regularization_loss,\n            custom_getter_=custom_getter\n        )", "docstring": "Distribution.\n\nArgs:\nshape: Action shape.", "source": "juraj-google-style"}
{"code": "def set_xml(self, diagram, force=False):\n        \n        no_of_running = WFInstance.objects.filter(wf=self, finished=False, started=True).count()\n        if no_of_running and not force:\n            raise RunningInstancesExist(\n                \"Can't update WF diagram! Running %s WF instances exists for %s\" % (\n                    no_of_running, self.name\n                ))\n        else:\n            self.xml = diagram\n            parser = BPMNParser(diagram.body)\n            self.description = parser.get_description()\n            self.title = parser.get_name() or self.name.replace('_', ' ').title()\n            extensions = dict(parser.get_wf_extensions())\n            self.programmable = extensions.get('programmable', False)\n            self.task_type = extensions.get('task_type', None)\n            self.menu_category = extensions.get('menu_category', settings.DEFAULT_WF_CATEGORY_NAME)\n            self.save()", "docstring": "updates xml link if there aren't any running instances of this wf\nArgs:\ndiagram: XMLDiagram object", "source": "juraj-google-style"}
{"code": "def __init__(self, sbn):\n        \n        isbn = '0' + sbn\n        super(Sbn, self).__init__(isbn)", "docstring": "Initialise a new ``Sbn`` object.\n\nArgs:\nsbn (str): SBN string", "source": "juraj-google-style"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n    output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    if token_ids_1:\n        output += token_ids_1 + [self.sep_token_id]\n    return output", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A BERT sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def plot_state_histogram(result: trial_result.TrialResult) -> np.ndarray:\n    \n\n    \n    \n    \n    import matplotlib.pyplot as plt\n\n    num_qubits = len(result.measurements.keys())\n    states = 2**num_qubits\n    values = np.zeros(states)\n\n    \n    \n    \n    \n    \n    measurement_by_result = np.array([\n        v.transpose()[0] for k, v in result.measurements.items()]).transpose()\n\n    for meas in measurement_by_result:\n        \n        \n        state_ind = int(''.join([str(x) for x in [int(x) for x in meas]]), 2)\n        values[state_ind] += 1\n\n    plot_labels = [bin(x)[2:].zfill(num_qubits) for x in range(states)]\n    plt.bar(np.arange(states), values, tick_label=plot_labels)\n    plt.xlabel('qubit state')\n    plt.ylabel('result count')\n    plt.show()\n\n    return values", "docstring": "Plot the state histogram from a single result with repetitions.\n\nStates is a bitstring representation of all the qubit states in a single\nresult.\nCurrently this function assumes each measurement gate applies to only\na single qubit.\n\nArgs:\nresult: The trial results to plot.\n\nReturns:\nThe histogram. A list of values plotted on the y-axis.", "source": "juraj-google-style"}
{"code": "def get_enterprise_customer_user(user_id, enterprise_uuid):\n    \n    EnterpriseCustomerUser = apps.get_model('enterprise', 'EnterpriseCustomerUser')  \n    try:\n        return EnterpriseCustomerUser.objects.get(  \n            enterprise_customer__uuid=enterprise_uuid,\n            user_id=user_id\n        )\n    except EnterpriseCustomerUser.DoesNotExist:\n        return None", "docstring": "Return the object for EnterpriseCustomerUser.\n\nArguments:\nuser_id (str): user identifier\nenterprise_uuid (UUID): Universally unique identifier for the enterprise customer.\n\nReturns:\n(EnterpriseCustomerUser): enterprise customer user record", "source": "juraj-google-style"}
{"code": "def get(self):\n    parser = reqparse.RequestParser()\n    parser.add_argument('public_key', type=parameters.valid_ed25519, required=True)\n    parser.add_argument('spent', type=parameters.valid_bool)\n    args = parser.parse_args(strict=True)\n    pool = current_app.config['bigchain_pool']\n    with pool() as bigchain:\n        outputs = bigchain.get_outputs_filtered(args['public_key'], args['spent'])\n        return [{'transaction_id': output.txid, 'output_index': output.output} for output in outputs]", "docstring": "API endpoint to retrieve a list of links to transaction\noutputs.\n\nReturns:\nA :obj:`list` of :cls:`str` of links to outputs.", "source": "codesearchnet"}
{"code": "def path_is_empty(p: tcod.path.AStar) -> bool:\n    return bool(lib.TCOD_path_is_empty(p._path_c))", "docstring": "Return True if a path is empty.\n\nArgs:\np (AStar): An AStar instance.\nReturns:\nbool: True if a path is empty.  Otherwise False.", "source": "codesearchnet"}
{"code": "def ToParameter(item: StackItem):\n    if (isinstance(item, Array) or isinstance(item, Struct)):\n        items = item.GetArray()\n        output = [ContractParameter.ToParameter(subitem) for subitem in items]\n        return ContractParameter(type=ContractParameterType.Array, value=output)\n    elif isinstance(item, Boolean):\n        return ContractParameter(type=ContractParameterType.Boolean, value=item.GetBoolean())\n    elif isinstance(item, ByteArray):\n        return ContractParameter(type=ContractParameterType.ByteArray, value=item.GetByteArray())\n    elif isinstance(item, Integer):\n        return ContractParameter(type=ContractParameterType.Integer, value=str(item.GetBigInteger()))\n    elif isinstance(item, InteropInterface):\n        return ContractParameter(type=ContractParameterType.InteropInterface, value=item.GetInterface())", "docstring": "Convert a StackItem to a ContractParameter object\n\nArgs:\nitem (neo.VM.InteropService.StackItem) The item to convert to a ContractParameter object\n\nReturns:\nContractParameter", "source": "codesearchnet"}
{"code": "def mixins(self, name):\n        \n        m = self._smixins(name)\n        if m:\n            return m\n        return self._smixins(name.replace('?>?', ' '))", "docstring": "Search mixins for name.\nAllow '>' to be ignored. '.a .b()' == '.a > .b()'\nArgs:\nname (string): Search term\nReturns:\nMixin object list OR False", "source": "juraj-google-style"}
{"code": "def _wrap_result(self, response):\n    if isinstance(response, int):\n        response = self._wrap_response(response)\n    return HandlerResult(status=HandlerStatus.RETURN, message_out=self._response_proto(**response), message_type=self._response_type)", "docstring": "Wraps child's response in a HandlerResult to be sent back to client.\n\nArgs:\nresponse (enum or dict): Either an integer status enum, or a dict\nof attributes to be added to the protobuf response.", "source": "codesearchnet"}
{"code": "def output_reference(self, name):\n        \n        if name not in self.output_names:\n            raise ValueError('Invalid output \"{}\"'.format(name))\n        return Reference(step_name=self.name_in_workflow, output_name=name)", "docstring": "Return a reference to the given output for use in an input\nof a next Step.\n\nFor a Step named `echo` that has an output called `echoed`, the\nreference `echo/echoed` is returned.\n\nArgs:\nname (str): the name of the Step output\nRaises:\nValueError: The name provided is not a valid output name for this\nStep.", "source": "juraj-google-style"}
{"code": "def register_many(self, *args):\n    params = []\n    for name in args:\n        params.append(self.register(name))\n    return params", "docstring": "Register many configuration names.\n\nArguments:\n*args: Config names as strings.\n\nReturns:\nlist: List of registered configs.", "source": "codesearchnet"}
{"code": "def call(self, inputs):\n    image_shape = tf.shape(input=inputs)[(- 3):]\n    collapsed_shape = tf.concat(([(- 1)], image_shape), axis=0)\n    out = tf.reshape(inputs, collapsed_shape)\n    out = self.conv1(out)\n    out = self.conv2(out)\n    out = self.conv3(out)\n    out = self.conv4(out)\n    expanded_shape = tf.concat((tf.shape(input=inputs)[:(- 3)], [(- 1)]), axis=0)\n    return tf.reshape(out, expanded_shape)", "docstring": "Runs the model to generate an intermediate representation of x_t.\n\nArgs:\ninputs: A batch of image sequences `x_{1:T}` of shape\n`[sample_shape, batch_size, timesteps, height, width,\nchannels]`.\n\nReturns:\nA batch of intermediate representations of shape [sample_shape,\nbatch_size, timesteps, hidden_size].", "source": "codesearchnet"}
{"code": "def prepare_namespace(self, func):\n    if self.is_imethod:\n        to_run = getattr(self.obj, self.imethod_name)\n    else:\n        to_run = func\n    for (varname, modulename) in self.global_modules.items():\n        to_run.__globals__[varname] = __import__(modulename)\n    if self.global_closure:\n        to_run.__globals__.update(self.global_closure)\n    if self.global_functions:\n        to_run.__globals__.update(self.global_functions)\n    return to_run", "docstring": "Prepares the function to be run after deserializing it.\nRe-associates any previously bound variables and modules from the closure\n\nReturns:\ncallable: ready-to-call function", "source": "codesearchnet"}
{"code": "def rot90(array, k=1, axes=(0, 1)):\n    if any_symbolic_tensors((array,)):\n        return Rot90(k=k, axes=axes).symbolic_call(array)\n    return backend.numpy.rot90(array, k=k, axes=axes)", "docstring": "Rotate an array by 90 degrees in the plane specified by axes.\n\nThis function rotates an array counterclockwise\nby 90 degrees `k` times in the plane specified by `axes`.\nSupports arrays of two or more dimensions.\n\nArgs:\narray: Input array to rotate.\nk: Number of times the array is rotated by 90 degrees.\naxes: A tuple of two integers specifying the\nplane of rotation (defaults to `(0, 1)`).\n\nReturns:\nRotated array.\n\nExamples:\n\n>>> import numpy as np\n>>> from keras import ops\n>>> m = np.array([[1, 2], [3, 4]])\n>>> rotated = ops.rot90(m)\n>>> rotated\narray([[2, 4],\n[1, 3]])\n\n>>> m = np.arange(8).reshape((2, 2, 2))\n>>> rotated = ops.rot90(m, k=1, axes=(1, 2))\n>>> rotated\narray([[[1, 3],\n[0, 2]],\n[[5, 7],\n[4, 6]]])", "source": "github-repos"}
{"code": "def unravel_staff(staff_data):\n    staff_list = []\n    for (role, staff_members) in staff_data['data'].items():\n        for member in staff_members:\n            member['role'] = role\n            staff_list.append(member)\n    return staff_list", "docstring": "Unravels staff role dictionary into flat list of staff\nmembers with ``role`` set as an attribute.\n\nArgs:\nstaff_data(dict): Data return from py:method::get_staff\n\nReturns:\nlist: Flat list of staff members with ``role`` set to\nrole type (i.e. course_admin, instructor, TA, etc)", "source": "codesearchnet"}
{"code": "def forecast(self, throughputs, backlog_size, num_simulations=10000, max_periods=10000, seed=None):\n        \n        self._check_throughputs(throughputs)\n        results = []\n\n        if seed is not None:\n            random.seed(seed)\n\n        for i in range(0, num_simulations):\n            simulated_backlog = backlog_size\n            time_unit_count = 0\n            while simulated_backlog > 0:\n                simulated_backlog -= random.choice(throughputs)\n                time_unit_count += 1\n                if time_unit_count > max_periods:\n                    raise ValueError(\"More than {} periods calculated\".format(max_periods))\n            results.append(time_unit_count)\n\n        return Results(results)", "docstring": "Forecasts how long a backlog will take to complete given the historical values provided.\nArguments:\nthroughputs(List[int]): Number of units completed per unit of time (stories per week, story points per month, etc.)\nbacklog_size(int): Units in the backlog (stories, points, etc.)\nReturns:\nresults\nExceptions:\nValueError: If there aren't any positive throughputs, or the simulation takes too long.", "source": "juraj-google-style"}
{"code": "def _predictResponseSize(mode, functioncode, payloadToSlave):\n    MIN_PAYLOAD_LENGTH = 4\n    BYTERANGE_FOR_GIVEN_SIZE = slice(2, 4)\n    NUMBER_OF_PAYLOAD_BYTES_IN_WRITE_CONFIRMATION = 4\n    NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD = 1\n    RTU_TO_ASCII_PAYLOAD_FACTOR = 2\n    NUMBER_OF_RTU_RESPONSE_STARTBYTES = 2\n    NUMBER_OF_RTU_RESPONSE_ENDBYTES = 2\n    NUMBER_OF_ASCII_RESPONSE_STARTBYTES = 5\n    NUMBER_OF_ASCII_RESPONSE_ENDBYTES = 4\n    _checkMode(mode)\n    _checkFunctioncode(functioncode, None)\n    _checkString(payloadToSlave, description='payload', minlength=MIN_PAYLOAD_LENGTH)\n    if (functioncode in [5, 6, 15, 16]):\n        response_payload_size = NUMBER_OF_PAYLOAD_BYTES_IN_WRITE_CONFIRMATION\n    elif (functioncode in [1, 2, 3, 4]):\n        given_size = _twoByteStringToNum(payloadToSlave[BYTERANGE_FOR_GIVEN_SIZE])\n        if ((functioncode == 1) or (functioncode == 2)):\n            number_of_inputs = given_size\n            response_payload_size = ((NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD + (number_of_inputs \n        elif ((functioncode == 3) or (functioncode == 4)):\n            number_of_registers = given_size\n            response_payload_size = (NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD + (number_of_registers * _NUMBER_OF_BYTES_PER_REGISTER))\n    else:\n        raise ValueError('Wrong functioncode: {}. The payload is: {!r}'.format(functioncode, payloadToSlave))\n    if (mode == MODE_ASCII):\n        return ((NUMBER_OF_ASCII_RESPONSE_STARTBYTES + (response_payload_size * RTU_TO_ASCII_PAYLOAD_FACTOR)) + NUMBER_OF_ASCII_RESPONSE_ENDBYTES)\n    else:\n        return ((NUMBER_OF_RTU_RESPONSE_STARTBYTES + response_payload_size) + NUMBER_OF_RTU_RESPONSE_ENDBYTES)", "docstring": "Calculate the number of bytes that should be received from the slave.\n\nArgs:\n* mode (str): The modbus protcol mode (MODE_RTU or MODE_ASCII)\n* functioncode (int): Modbus function code.\n* payloadToSlave (str): The raw request that is to be sent to the slave (not hex encoded string)\n\nReturns:\nThe preducted number of bytes (int) in the response.\n\nRaises:\nValueError, TypeError.", "source": "codesearchnet"}
{"code": "def create_detector(self, detector):\n    resp = self._post(self._u(self._DETECTOR_ENDPOINT_SUFFIX), data=detector)\n    resp.raise_for_status()\n    return resp.json()", "docstring": "Creates a new detector.\n\nArgs:\ndetector (object): the detector model object. Will be serialized as\nJSON.\nReturns:\ndictionary of the response (created detector model).", "source": "codesearchnet"}
{"code": "def get_application_configurations(self, name=None):\n        \n        if hasattr(self, 'applicationConfigurations'):\n           return self._get_elements(self.applicationConfigurations, 'applicationConfigurations', ApplicationConfiguration, None, name)", "docstring": "Retrieves application configurations for this instance.\n\nArgs:\nname (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a\nregular expression. If `name` is not supplied, then all application configurations are returned.\n\nReturns:\nlist(ApplicationConfiguration): A list of application configurations matching the given `name`.\n\n.. versionadded 1.12", "source": "juraj-google-style"}
{"code": "def apply_cut(self, cut):\n    return MacroSubsystem(self.network, self.network_state, self.micro_node_indices, cut=cut, time_scale=self.time_scale, blackbox=self.blackbox, coarse_grain=self.coarse_grain)", "docstring": "Return a cut version of this |MacroSubsystem|.\n\nArgs:\ncut (Cut): The cut to apply to this |MacroSubsystem|.\n\nReturns:\nMacroSubsystem: The cut version of this |MacroSubsystem|.", "source": "codesearchnet"}
{"code": "def parse_user_data(variables, raw_user_data, blueprint_name):\n    variable_values = {}\n    for (key, value) in variables.items():\n        if (type(value) is CFNParameter):\n            variable_values[key] = value.to_parameter_value()\n        else:\n            variable_values[key] = value\n    template = string.Template(raw_user_data)\n    res = ''\n    try:\n        res = template.substitute(variable_values)\n    except ValueError as exp:\n        raise InvalidUserdataPlaceholder(blueprint_name, exp.args[0])\n    except KeyError as key:\n        raise MissingVariable(blueprint_name, key)\n    return res", "docstring": "Parse the given user data and renders it as a template\n\nIt supports referencing template variables to create userdata\nthat's supplemented with information from the stack, as commonly\nrequired when creating EC2 userdata files.\n\nFor example:\nGiven a raw_user_data string: 'open file ${file}'\nAnd a variables dictionary with: {'file': 'test.txt'}\nparse_user_data would output: open file test.txt\n\nArgs:\nvariables (dict): variables available to the template\nraw_user_data (str): the user_data to be parsed\nblueprint_name (str): the name of the blueprint\n\nReturns:\nstr: The parsed user data, with all the variables values and\nrefs replaced with their resolved values.\n\nRaises:\nInvalidUserdataPlaceholder: Raised when a placeholder name in\nraw_user_data is not valid.\nE.g ${100} would raise this.\nMissingVariable: Raised when a variable is in the raw_user_data that\nis not given in the blueprint", "source": "codesearchnet"}
{"code": "def _genBgTerm_fromXX(self,vTot,vCommon,XX,a=None,c=None):\n        \n        vSpecific = vTot-vCommon\n\n        SP.random.seed(0)\n        if c==None: c = SP.randn(self.P)\n        XX += 1e-3 * SP.eye(XX.shape[0])\n        L = LA.cholesky(XX,lower=True)\n\n        \n        R = self.genWeights(self.N,self.P)\n        A = self.genTraitEffect()\n        if a is not None: A[0,:] = a\n        Yc = SP.dot(L,SP.dot(R,A))\n        Yc*= SP.sqrt(vCommon)/SP.sqrt(Yc.var(0).mean())\n\n        \n        R = SP.randn(self.N,self.P)\n        Yi = SP.dot(L,SP.dot(R,SP.diag(c)))\n        Yi*= SP.sqrt(vSpecific)/SP.sqrt(Yi.var(0).mean())\n\n        return Yc, Yi", "docstring": "generate background term from SNPs\n\nArgs:\nvTot: variance of Yc+Yi\nvCommon: variance of Yc\nXX: kinship matrix\na: common scales, it can be set for debugging purposes\nc: indipendent scales, it can be set for debugging purposes", "source": "juraj-google-style"}
{"code": "def EncodeEnv(env, encoding=None):\n    encoding = encoding or _GetEncoding()\n    return {Encode(k, encoding=encoding): Encode(v, encoding=encoding) for k, v in env.items()}", "docstring": "Encodes all the key value pairs in env in preparation for subprocess.\n\nArgs:\nenv: {str: str}, The environment you are going to pass to subprocess.\nencoding: str, The encoding to use or None to use the default.\n\nReturns:\n{bytes: bytes}, The environment to pass to subprocess.", "source": "github-repos"}
{"code": "def _keyDown(key):\n    \n    if key not in keyboardMapping or keyboardMapping[key] is None:\n        return\n\n    if type(key) == int:\n        fake_input(_display, X.KeyPress, key)\n        _display.sync()\n        return\n\n    needsShift = pyautogui.isShiftCharacter(key)\n    if needsShift:\n        fake_input(_display, X.KeyPress, keyboardMapping['shift'])\n\n    fake_input(_display, X.KeyPress, keyboardMapping[key])\n\n    if needsShift:\n        fake_input(_display, X.KeyRelease, keyboardMapping['shift'])\n    _display.sync()", "docstring": "Performs a keyboard key press without the release. This will put that\nkey in a held down state.\n\nNOTE: For some reason, this does not seem to cause key repeats like would\nhappen if a keyboard key was held down on a text field.\n\nArgs:\nkey (str): The key to be pressed down. The valid names are listed in\npyautogui.KEY_NAMES.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def __init__(self, learning_rate, initial_accumulator_value=0.1, use_locking=False, name='Adagrad'):\n    if initial_accumulator_value <= 0.0:\n        raise ValueError('initial_accumulator_value must be positive: %s' % initial_accumulator_value)\n    super(AdagradOptimizer, self).__init__(use_locking, name)\n    self._learning_rate = learning_rate\n    self._initial_accumulator_value = initial_accumulator_value\n    self._learning_rate_tensor = None", "docstring": "Construct a new Adagrad optimizer.\n\nArgs:\nlearning_rate: A `Tensor` or a floating point value.  The learning rate.\ninitial_accumulator_value: A floating point value.\nStarting value for the accumulators, must be positive.\nuse_locking: If `True` use locks for update operations.\nname: Optional name prefix for the operations created when applying\ngradients.  Defaults to \"Adagrad\".\n\nRaises:\nValueError: If the `initial_accumulator_value` is invalid.", "source": "github-repos"}
{"code": "def symbol_top(body_output, targets, model_hparams, vocab_size):\n    del targets\n    if model_hparams.shared_embedding_and_softmax_weights:\n        scope_name = 'shared'\n        reuse = tf.AUTO_REUSE\n    else:\n        scope_name = 'softmax'\n        reuse = False\n    with tf.variable_scope(scope_name, reuse=reuse):\n        body_output_shape = common_layers.shape_list(body_output)\n        var = get_weights(model_hparams, vocab_size, body_output_shape[(- 1)])\n        if (model_hparams.factored_logits and (model_hparams.mode == tf.estimator.ModeKeys.TRAIN)):\n            body_output = tf.expand_dims(body_output, 3)\n            return common_layers.FactoredTensor(body_output, var)\n        else:\n            body_output = tf.reshape(body_output, [(- 1), body_output_shape[(- 1)]])\n            logits = tf.matmul(body_output, var, transpose_b=True)\n            return tf.reshape(logits, (body_output_shape[:(- 1)] + [1, vocab_size]))", "docstring": "Generate logits.\n\nArgs:\nbody_output: A Tensor with shape\n[batch, p0, p1, model_hparams.hidden_size].\ntargets: Unused.\nmodel_hparams: HParams, model hyperparmeters.\nvocab_size: int, vocabulary size.\n\nReturns:\nlogits: A Tensor with shape  [batch, p0, p1, ?, vocab_size].", "source": "codesearchnet"}
{"code": "def concurrence(state):\n    rho = np.array(state)\n    if (rho.ndim == 1):\n        rho = outer(state)\n    if (len(state) != 4):\n        raise Exception('Concurrence is only defined for more than two qubits')\n    YY = np.fliplr(np.diag([(- 1), 1, 1, (- 1)]))\n    A = rho.dot(YY).dot(rho.conj()).dot(YY)\n    w = la.eigh(A, eigvals_only=True)\n    w = np.sqrt(np.maximum(w, 0))\n    return max(0.0, (w[(- 1)] - np.sum(w[0:(- 1)])))", "docstring": "Calculate the concurrence.\n\nArgs:\nstate (np.array): a quantum state (1x4 array) or a density matrix (4x4\narray)\nReturns:\nfloat: concurrence.\nRaises:\nException: if attempted on more than two qubits.", "source": "codesearchnet"}
{"code": "def create_course_completion(self, user_id, payload):\n    return self._post(urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.completion_status_api_path), payload, self.COMPLETION_PROVIDER_SCOPE)", "docstring": "Send a completion status payload to the Degreed Completion Status endpoint\n\nArgs:\nuser_id: Unused.\npayload: JSON encoded object (serialized from DegreedLearnerDataTransmissionAudit)\ncontaining completion status fields per Degreed documentation.\n\nReturns:\nA tuple containing the status code and the body of the response.\nRaises:\nHTTPError: if we received a failure response code from Degreed", "source": "codesearchnet"}
{"code": "def add_nodes(self, nodes):\n    if (not isinstance(nodes, list)):\n        add_list = [nodes]\n    else:\n        add_list = nodes\n    self.node_list.extend(add_list)", "docstring": "Add a given node or list of nodes to self.node_list.\n\nArgs:\nnode (Node or list[Node]): the node or list of nodes to add\nto the graph\n\nReturns: None\n\nExamples:\n\nAdding one node: ::\n\n>>> from blur.markov.node import Node\n>>> graph = Graph()\n>>> node_1 = Node('One')\n>>> graph.add_nodes(node_1)\n>>> print([node.value for node in graph.node_list])\n['One']\n\nAdding multiple nodes at a time in a list: ::\n\n>>> from blur.markov.node import Node\n>>> graph = Graph()\n>>> node_1 = Node('One')\n>>> node_2 = Node('Two')\n>>> graph.add_nodes([node_1, node_2])\n>>> print([node.value for node in graph.node_list])\n['One', 'Two']", "source": "codesearchnet"}
{"code": "def initialize(self, map_arr, start_point_label=\"S\", end_point_label=\"G\", wall_label=\"\n        \n        np.set_printoptions(threshold=np.inf)\n\n        self.__agent_label = agent_label\n        self.__map_arr = map_arr\n        self.__start_point_label = start_point_label\n        start_arr_tuple = np.where(self.__map_arr == self.__start_point_label)\n        x_arr, y_arr = start_arr_tuple\n        self.__start_point_tuple = (x_arr[0], y_arr[0])\n        end_arr_tuple = np.where(self.__map_arr == self.__end_point_label)\n        x_arr, y_arr = end_arr_tuple\n        self.__end_point_tuple = (x_arr[0], y_arr[0])\n        self.__wall_label = wall_label\n\n        for x in range(self.__map_arr.shape[1]):\n            for y in range(self.__map_arr.shape[0]):\n                if (x, y) == self.__start_point_tuple or (x, y) == self.__end_point_tuple:\n                    continue\n                arr_value = self.__map_arr[y][x]\n                if arr_value == self.__wall_label:\n                    continue\n                    \n                self.save_r_df((x, y), float(arr_value))", "docstring": "Initialize map of maze and setup reward value.\n\nArgs:\nmap_arr:              Map. the 2d- `np.ndarray`.\nstart_point_label:    Label of start point.\nend_point_label:      Label of end point.\nwall_label:           Label of wall.\nagent_label:          Label of agent.", "source": "juraj-google-style"}
{"code": "def expand_value_set_url_using_service(self, value_set_url: str, terminology_service_url: str) -> value_set_pb2.ValueSet:\n    value_set_url, value_set_version = url_utils.parse_url_version(value_set_url)\n    auth = self.auth_per_terminology_server.get(terminology_service_url)\n    return self._expand_value_set_url_using_service(value_set_url=value_set_url, value_set_version=value_set_version, terminology_service_url=terminology_service_url, auth=auth)", "docstring": "Expands the value set using the requested terminology service.\n\nRequests an expansion of the value set from the terminology\nserver at `terminology_service_url` for the given URL and version if present\non the URL.\n\nIf the terminology service requires credentials to access,\n`terminology_service_url` must have an entry in the\n`auth_per_terminology_server` given to this class' constructor.\n\nRetrieves the current definition of the value set from the terminology\nservice as well as its expansion.\n\nArgs:\nvalue_set_url: The url of the value set to expand.\nterminology_service_url: The url of the terminology service to use when\nexpanding `value_set_url`.\n\nReturns:\nThe current definition of the value set from the server with its expanded\ncodes present.", "source": "github-repos"}
{"code": "async def teardown_client(self, client_id):\n    client_info = self._client_info(client_id)\n    self.adapter.remove_monitor(client_info['monitor'])\n    conns = client_info['connections']\n    for (conn_string, conn_id) in conns.items():\n        try:\n            self._logger.debug('Disconnecting client %s from conn %s at teardown', client_id, conn_string)\n            (await self.adapter.disconnect(conn_id))\n        except:\n            self._logger.exception('Error disconnecting device during teardown_client: conn_string=%s', conn_string)\n    del self._clients[client_id]", "docstring": "Release all resources held by a client.\n\nThis method must be called and awaited whenever a client is\ndisconnected.  It ensures that all of the client's resources are\nproperly released and any devices they have connected to are\ndisconnected cleanly.\n\nArgs:\nclient_id (str): The client that we should tear down.\n\nRaises:\nArgumentError: The client_id is unknown.", "source": "codesearchnet"}
{"code": "def CreateAd(client, opener, ad_group_id):\n    ad_group_ad_service = client.GetService('AdGroupAdService', 'v201809')\n    media_service = client.GetService('MediaService', 'v201809')\n    marketing_image_id = _CreateImage(media_service, opener, 'https:\n    logo_image_id = _CreateImage(media_service, opener, 'https:\n    ad = {'xsi_type': 'ResponsiveDisplayAd', 'marketingImage': {'xsi_type': 'Image', 'mediaId': marketing_image_id}, 'shortHeadline': 'Travel', 'longHeadline': 'Travel the World', 'description': 'Take to the air!', 'businessName': 'Interplanetary Cruises', 'finalUrls': ['http:\n    ad_group_ad = {'ad': ad, 'adGroupId': ad_group_id}\n    operations = [{'operation': 'ADD', 'operand': ad_group_ad}]\n    return ad_group_ad_service.mutate(operations)['value'][0]", "docstring": "Creates a ResponsiveDisplayAd.\n\nArgs:\nclient: an AdWordsClient instance.\nopener: an OpenerDirector instance.\nad_group_id: an int ad group ID.\n\nReturns:\nThe ad group ad that was successfully created.", "source": "codesearchnet"}
{"code": "def build_aspect_ratio_mask(aspect_ratios: List[List[Tuple[int, int]]], max_image_tiles: int) -> np.ndarray:\n    batch_size = len(aspect_ratios)\n    max_num_images = max([len(row) for row in aspect_ratios])\n    aspect_ratio_mask = np.zeros((batch_size, max_num_images, max_image_tiles), dtype=np.int64)\n    aspect_ratio_mask[:, :, 0] = 1\n    for i, sample_aspect_ratios in enumerate(aspect_ratios):\n        for j, (num_tiles_w, num_tiles_h) in enumerate(sample_aspect_ratios):\n            aspect_ratio_mask[i, j, :num_tiles_w * num_tiles_h] = 1\n    return aspect_ratio_mask", "docstring": "Builds a mask for the aspect ratios of the images.\n\nArgs:\naspect_ratios (`List[List[Tuple[int, int]]]`):\nA list of lists containing aspect ratios for each image in the batch.\nEach aspect ratio is represented as a tuple of (width, height) in terms of number of tiles.\nmax_image_tiles (`int`):\nThe maximum number of tiles any image can be split into.\n\nReturns:\n`np.ndarray`: A 3D numpy array of shape (batch_size, max_num_images, max_image_tiles).\nThe mask contains 1s for valid tiles and 0s for padding.", "source": "github-repos"}
{"code": "def simplify_countryname(cls, country):\n    countryupper = country.upper()\n    words = get_words_in_sentence(countryupper)\n    index = countryupper.find(',')\n    if (index != (- 1)):\n        countryupper = countryupper[:index]\n    index = countryupper.find(':')\n    if (index != (- 1)):\n        countryupper = countryupper[:index]\n    regex = re.compile('\\\\(.+?\\\\)')\n    countryupper = regex.sub('', countryupper)\n    remove = copy.deepcopy(cls.simplifications)\n    for (simplification1, simplification2) in cls.abbreviations.items():\n        countryupper = countryupper.replace(simplification1, '')\n        remove.append(simplification2)\n    for (simplification1, simplifications) in cls.multiple_abbreviations.items():\n        countryupper = countryupper.replace(simplification1, '')\n        for simplification2 in simplifications:\n            remove.append(simplification2)\n    remove = '|'.join(remove)\n    regex = re.compile((('\\\\b(' + remove) + ')\\\\b'), flags=re.IGNORECASE)\n    countryupper = regex.sub('', countryupper)\n    countryupper = countryupper.strip()\n    countryupper_words = get_words_in_sentence(countryupper)\n    if (len(countryupper_words) > 1):\n        countryupper = countryupper_words[0]\n    if countryupper:\n        words.remove(countryupper)\n    return (countryupper, words)", "docstring": "Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.\n\nArgs:\ncountry (str): Country name to simplify\n\nReturns:\nTuple[str, List[str]]: Uppercase simplified country name and list of removed words", "source": "codesearchnet"}
{"code": "def parse_verilog_file(fname):\n  \n  with open(fname, 'rt') as fh:\n    text = fh.read()\n  return parse_verilog(text)", "docstring": "Parse a named Verilog file\n\nArgs:\nfname (str): File to parse.\nReturns:\nList of parsed objects.", "source": "juraj-google-style"}
{"code": "def load_readers(filenames=None, reader=None, reader_kwargs=None, ppp_config_dir=None):\n    reader_instances = {}\n    reader_kwargs = (reader_kwargs or {})\n    reader_kwargs_without_filter = reader_kwargs.copy()\n    reader_kwargs_without_filter.pop('filter_parameters', None)\n    if (ppp_config_dir is None):\n        ppp_config_dir = get_environ_config_dir()\n    if ((not filenames) and (not reader)):\n        return {}\n    elif (reader and (filenames is not None) and (not filenames)):\n        raise ValueError(\"'filenames' was provided but is empty.\")\n    elif (not filenames):\n        LOG.warning(\"'filenames' required to create readers and load data\")\n        return {}\n    elif ((reader is None) and isinstance(filenames, dict)):\n        reader = list(filenames.keys())\n        remaining_filenames = set((f for fl in filenames.values() for f in fl))\n    elif (reader and isinstance(filenames, dict)):\n        filenames = filenames[reader]\n        remaining_filenames = set((filenames or []))\n    else:\n        remaining_filenames = set((filenames or []))\n    for (idx, reader_configs) in enumerate(configs_for_reader(reader, ppp_config_dir)):\n        if isinstance(filenames, dict):\n            readers_files = set(filenames[reader[idx]])\n        else:\n            readers_files = remaining_filenames\n        try:\n            reader_instance = load_reader(reader_configs, **reader_kwargs)\n        except (KeyError, IOError, yaml.YAMLError) as err:\n            LOG.info('Cannot use %s', str(reader_configs))\n            LOG.debug(str(err))\n            continue\n        if readers_files:\n            loadables = reader_instance.select_files_from_pathnames(readers_files)\n        if loadables:\n            reader_instance.create_filehandlers(loadables, fh_kwargs=reader_kwargs_without_filter)\n            reader_instances[reader_instance.name] = reader_instance\n            remaining_filenames -= set(loadables)\n        if (not remaining_filenames):\n            break\n    if remaining_filenames:\n        LOG.warning(\"Don't know how to open the following files: {}\".format(str(remaining_filenames)))\n    if (not reader_instances):\n        raise ValueError('No supported files found')\n    elif (not any((list(r.available_dataset_ids) for r in reader_instances.values()))):\n        raise ValueError('No dataset could be loaded. Either missing requirements (such as Epilog, Prolog) or none of the provided files match the filter parameters.')\n    return reader_instances", "docstring": "Create specified readers and assign files to them.\n\nArgs:\nfilenames (iterable or dict): A sequence of files that will be used to load data from. A ``dict`` object\nshould map reader names to a list of filenames for that reader.\nreader (str or list): The name of the reader to use for loading the data or a list of names.\nreader_kwargs (dict): Keyword arguments to pass to specific reader instances.\nppp_config_dir (str): The directory containing the configuration files for satpy.\n\nReturns: Dictionary mapping reader name to reader instance", "source": "codesearchnet"}
{"code": "def _find_root_dir(path, spor_dir):\n    start_path = pathlib.Path((os.getcwd() if (path is None) else path))\n    paths = ([start_path] + list(start_path.parents))\n    for path in paths:\n        data_dir = (path / spor_dir)\n        if (data_dir.exists() and data_dir.is_dir()):\n            return path\n    raise ValueError('No spor repository found')", "docstring": "Search for a spor repo containing `path`.\n\nThis searches for `spor_dir` in directories dominating `path`. If a\ndirectory containing `spor_dir` is found, then that directory is returned\nas a `pathlib.Path`.\n\nReturns: The dominating directory containing `spor_dir` as a\n`pathlib.Path`.\n\nRaises:\nValueError: No repository is found.", "source": "codesearchnet"}
{"code": "def last_updated(path):\n    filesystem = FileSystems.get_filesystem(path)\n    return filesystem.last_updated(path)", "docstring": "Get UNIX Epoch time in seconds on the FileSystem.\n\nArgs:\npath: string path of file.\n\nReturns: float UNIX Epoch time\n\nRaises:\n``BeamIOError``: if path doesn't exist.", "source": "github-repos"}
{"code": "def log_histogram(self, name, value, step=None):\n    if isinstance(value, six.string_types):\n        raise TypeError('\"value\" should be a number, got {}'.format(type(value)))\n    self._check_step(step)\n    tf_name = self._ensure_tf_name(name)\n    summary = self._histogram_summary(tf_name, value, step=step)\n    self._log_summary(tf_name, summary, value, step=step)", "docstring": "Log a histogram for given name on given step.\n\nArgs:\nname (str): name of the variable (it will be converted to a valid\ntensorflow summary name).\nvalue (tuple or list): either list of numbers\nto be summarized as a histogram, or a tuple of bin_edges and\nbincounts that directly define a histogram.\nstep (int): non-negative integer used for visualization", "source": "codesearchnet"}
{"code": "def map_to_pdf(map_source, zoom, x, y, width, height):\n    \n    map_source = app.config[\"mapsources\"][map_source]\n    pdf_file = print_map(map_source, x=float(x), y=float(y),\n                         zoom=int(zoom), width=float(width), height=float(height), format='pdf')\n    return send_file(pdf_file,\n                     attachment_filename=\"map.pdf\",\n                     as_attachment=True)", "docstring": "Generate a PDF at the given position.\n\nArgs:\nmap_source (str): id of the map source to print.\nzoom (int): zoom-level to print\nx (float): Center of the Map in mercator projection (EPSG:4326), x-coordinate\ny (float): Center of the Map in mercator projection (EPSG:4326), y-coordinate\nwidth (float): width of the pdf in mm\nheight (float): height of the pdf in mm\n\nReturns:", "source": "juraj-google-style"}
{"code": "def maybe(cls, val: Optional[T]) -> 'Option[T]':\n        \n        return cast('Option[T]', NONE) if val is None else cls.Some(val)", "docstring": "Shortcut method to return ``Some`` or :py:data:`NONE` based on ``val``.\n\nArgs:\nval: Some value.\n\nReturns:\n``Some(val)`` if the ``val`` is not None, otherwise :py:data:`NONE`.\n\nExamples:\n>>> Option.maybe(0)\nSome(0)\n>>> Option.maybe(None)\nNONE", "source": "juraj-google-style"}
{"code": "def compress_encoder_2d(x, hparams, name=None):\n  \n  return compress_encoder(\n      x,\n      hparams,\n      strides=(2, 2),\n      kernel_size=(hparams.kernel_size, hparams.kernel_size),\n      name=name)", "docstring": "Encoder that compresses 2-D inputs by 2**num_compress_steps.\n\nArgs:\nx: Tensor of shape [batch, height, width, channels].\nhparams: HParams.\nname: string, variable scope.\n\nReturns:\nTensor of shape [batch, latent_length, hparams.hidden_size], where\nlatent_length is\nhparams.num_latents * (height*width) / 2**(hparams.num_compress_steps).", "source": "juraj-google-style"}
{"code": "def write_to_hdf5(self, filename_out, *args, **kwargs):\n        \n\n        \n        t0 = time.time()\n\n        \n        self.__update_header()\n\n        if self.container.isheavy():\n            self.__write_to_hdf5_heavy(filename_out)\n        else:\n            self.__write_to_hdf5_light(filename_out)\n\n        t1 = time.time()\n        logger.info('Conversion time: %2.2fsec' % (t1- t0))", "docstring": "Write data to HDF5 file.\nIt check the file size then decides how to write the file.\n\nArgs:\nfilename_out (str): Name of output file", "source": "juraj-google-style"}
{"code": "def keep_doc_examples_only(content: str) -> str:\n    splits = content.split('```')\n    content = '```' + '```'.join(splits[1::2]) + '```'\n    lines_to_keep = []\n    for line in content.split('\\n'):\n        line = re.sub('\n        if len(line) != 0 and (not line.isspace()):\n            lines_to_keep.append(line)\n    return '\\n'.join(lines_to_keep)", "docstring": "Remove everything from the code content except the doc examples (used to determined if a diff should trigger doc\ntests or not).\n\nArgs:\ncontent (`str`): The code to clean\n\nReturns:\n`str`: The cleaned code.", "source": "github-repos"}
{"code": "def subscriber(address,topics,callback,message_type):\n    \n    return Subscriber(address,topics,callback,message_type)", "docstring": "Creates a subscriber binding to the given address and\nsubscribe the given topics.\nThe callback is invoked for every message received.\n\nArgs:\n- address: the address to bind the PUB socket to.\n- topics: the topics to subscribe\n- callback: the callback to invoke for every message. Must accept 2 variables - topic and message\n- message_type: the type of message to receive", "source": "juraj-google-style"}
{"code": "def set_dataset_year_range(self, dataset_year, dataset_end_year=None):\n        \n        \n        if isinstance(dataset_year, int):\n            dataset_date = '01/01/%d' % dataset_year\n        elif isinstance(dataset_year, str):\n            dataset_date = '01/01/%s' % dataset_year\n        else:\n            raise hdx.data.hdxobject.HDXError('dataset_year has type %s which is not supported!' % type(dataset_year).__name__)\n        if dataset_end_year is None:\n            dataset_end_year = dataset_year\n        if isinstance(dataset_end_year, int):\n            dataset_end_date = '31/12/%d' % dataset_end_year\n        elif isinstance(dataset_end_year, str):\n            dataset_end_date = '31/12/%s' % dataset_end_year\n        else:\n            raise hdx.data.hdxobject.HDXError('dataset_end_year has type %s which is not supported!' % type(dataset_end_year).__name__)\n        self.set_dataset_date(dataset_date, dataset_end_date)", "docstring": "Set dataset date as a range from year or start and end year.\n\nArgs:\ndataset_year (Union[str, int]): Dataset year given as string or int\ndataset_end_year (Optional[Union[str, int]]): Dataset end year given as string or int\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def dot(matrix, vector):\n    \n    matrix_weld_type = None\n    vector_weld_type = None\n\n    if isinstance(matrix, LazyOpResult):\n        matrix_weld_type = matrix.weld_type\n        matrix = matrix.expr\n    elif isinstance(matrix, np.ndarray):\n        matrix_weld_type = numpy_weld_impl.numpy_to_weld_type_mapping[\n            str(matrix.dtype)]\n\n    if isinstance(vector, LazyOpResult):\n        vector_weld_type = vector.weld_type\n        vector = vector.expr\n    elif isinstance(vector, np.ndarray):\n        vector_weld_type = numpy_weld_impl.numpy_to_weld_type_mapping[\n            str(vector.dtype)]\n\n    return NumpyArrayWeld(\n        numpy_weld_impl.dot(\n            matrix,\n            vector,\n            matrix_weld_type,\n            vector_weld_type),\n        WeldDouble())", "docstring": "Computes the dot product between a matrix and a vector.\nTODO: Make this more generic\n\nArgs:\nmatrix (TYPE): Description\nvector (TYPE): Description", "source": "juraj-google-style"}
{"code": "def trailing_stop_loss(self, accountID, **kwargs):\n        \n        return self.create(\n            accountID,\n            order=TrailingStopLossOrderRequest(**kwargs)\n        )", "docstring": "Shortcut to create a Trailing Stop Loss Order in an Account\n\nArgs:\naccountID : The ID of the Account\nkwargs : The arguments to create a TrailingStopLossOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "juraj-google-style"}
{"code": "def CrowdsaleRegister(self, wallet, register_addresses, from_addr=None):\n    invoke_args = [self.ScriptHash.ToString(), 'crowdsale_register', [PromptUtils.parse_param(p, wallet) for p in register_addresses]]\n    (tx, fee, results, num_ops, engine_success) = TestInvokeContract(wallet, invoke_args, None, True, from_addr)\n    return (tx, fee, results)", "docstring": "Register for a crowd sale.\n\nArgs:\nwallet (neo.Wallets.Wallet): a wallet instance.\nregister_addresses (list): list of public addresses to register for the sale.\n\nReturns:\ntuple:\nInvocationTransaction: the transaction.\nint: the transaction fee.\nlist: the neo VM evaluation stack results.", "source": "codesearchnet"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return self.prefix_tokens + token_ids_0 + self.suffix_tokens\n    return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. An NLLB sequence has the following format, where `X` represents the sequence:\n\n- `input_ids` (for encoder) `X [eos, src_lang_code]`\n- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`\n\nBOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a\nseparator.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def decode(self, ids, strip_extraneous=False):\n    if strip_extraneous:\n        ids = strip_ids(ids, list(range((self._num_reserved_ids or 0))))\n    return ' '.join(self.decode_list(ids))", "docstring": "Transform a sequence of int ids into a human-readable string.\n\nEOS is not expected in ids.\n\nArgs:\nids: list of integers to be converted.\nstrip_extraneous: bool, whether to strip off extraneous tokens\n(EOS and PAD).\n\nReturns:\ns: human-readable string.", "source": "codesearchnet"}
{"code": "def from_dict(event_dict):\n    return CallbackEvent(callback_id=event_dict['callbackId'], name=event_dict['name'], creation_time=event_dict['time'], data=event_dict['data'])", "docstring": "Creates a CallbackEvent object from a dictionary.\n\nArgs:\nevent_dict: dict, a dictionary representing an event.\n\nReturns:\nA CallbackEvent object.", "source": "github-repos"}
{"code": "def entry_dict_from_list(all_slab_entries):\n    entry_dict = {}\n    for entry in all_slab_entries:\n        hkl = tuple(entry.miller_index)\n        if (hkl not in entry_dict.keys()):\n            entry_dict[hkl] = {}\n        if entry.clean_entry:\n            clean = entry.clean_entry\n        else:\n            clean = entry\n        if (clean not in entry_dict[hkl].keys()):\n            entry_dict[hkl][clean] = []\n        if entry.adsorbates:\n            entry_dict[hkl][clean].append(entry)\n    return entry_dict", "docstring": "Converts a list of SlabEntry to an appropriate dictionary. It is\nassumed that if there is no adsorbate, then it is a clean SlabEntry\nand that adsorbed SlabEntry has the clean_entry parameter set.\n\nArgs:\nall_slab_entries (list): List of SlabEntry objects\n\nReturns:\n(dict): Dictionary of SlabEntry with the Miller index as the main\nkey to a dictionary with a clean SlabEntry as the key to a\nlist of adsorbed SlabEntry.", "source": "codesearchnet"}
{"code": "def _build_key_wrapping_specification(self, value):\n        \n        if value is None:\n            return None\n        if not isinstance(value, dict):\n            raise TypeError(\"Key wrapping specification must be a dictionary.\")\n\n        encryption_key_info = self._build_encryption_key_information(\n            value.get('encryption_key_information')\n        )\n        mac_signature_key_info = self._build_mac_signature_key_information(\n            value.get('mac_signature_key_information')\n        )\n\n        key_wrapping_specification = cobjects.KeyWrappingSpecification(\n            wrapping_method=value.get('wrapping_method'),\n            encryption_key_information=encryption_key_info,\n            mac_signature_key_information=mac_signature_key_info,\n            attribute_names=value.get('attribute_names'),\n            encoding_option=value.get('encoding_option')\n        )\n        return key_wrapping_specification", "docstring": "Build a KeyWrappingSpecification struct from a dictionary.\n\nArgs:\nvalue (dict): A dictionary containing the key/value pairs for a\nKeyWrappingSpecification struct.\n\nReturns:\nKeyWrappingSpecification: a KeyWrappingSpecification struct\n\nRaises:\nTypeError: if the input argument is invalid", "source": "juraj-google-style"}
{"code": "def _DiscoverElementTypeFromLocalname(self, type_localname):\n    elem_type = None\n    last_exception = None\n    for ns_prefix in self.zeep_client.wsdl.types.prefix_map.values():\n        try:\n            elem_type = self.zeep_client.get_type(('{%s}%s' % (ns_prefix, type_localname)))\n        except zeep.exceptions.LookupError as e:\n            last_exception = e\n            continue\n        break\n    if (not elem_type):\n        raise last_exception\n    return elem_type", "docstring": "Searches all namespaces for a type by name.\n\nArgs:\ntype_localname: The name of the type.\n\nReturns:\nA fully qualified SOAP type with the specified name.\n\nRaises:\nA zeep.exceptions.LookupError if the type cannot be found in any\nnamespace.", "source": "codesearchnet"}
{"code": "def decorate(self, record):\n        \n        color = 'gray'\n        if record.levelno == logging.WARNING:\n            color = 'yellow'\n        if record.levelno == logging.INFO:\n            color = 'green'\n        if record.levelno == logging.DEBUG:\n            color = 'gray'\n        if record.levelno >= logging.ERROR:\n            color = 'red'\n\n        notify = False\n        if record.levelno >= logging.ERROR:\n            nofiy = True\n\n        payload = {\n            'color': color,\n            'notify': notify,\n            'message_format': 'text'\n        }\n\n        return payload", "docstring": "Build up HipChat specific values for log record\n\nArgs:\nrecord (:obj:`logging.record`): log message object\n\nReturns:\ndict: params for POST request", "source": "juraj-google-style"}
{"code": "def call(self, inputs, states):\n    raise NotImplementedError('Abstract method')", "docstring": "The function that contains the logic for one RNN step calculation.\n\nArgs:\ninputs: the input tensor, which is a slide from the overall RNN input by\nthe time dimension (usually the second dimension).\nstates: the state tensor from previous step, which has the same shape\nas `(batch, state_size)`. In the case of timestep 0, it will be the\ninitial state user specified, or zero filled tensor otherwise.\n\nReturns:\nA tuple of two tensors:\n1. output tensor for the current timestep, with size `output_size`.\n2. state tensor for next step, which has the shape of `state_size`.", "source": "github-repos"}
{"code": "def factor_hatch(field_name, patterns, factors, start=0, end=None):\n    return field(field_name, CategoricalPatternMapper(patterns=patterns, factors=factors, start=start, end=end))", "docstring": "Create a ``DataSpec`` dict that applies a client-side\n``CategoricalPatternMapper`` transformation to a ``ColumnDataSource``\ncolumn.\n\nArgs:\nfield_name (str) : a field name to configure ``DataSpec`` with\n\npatterns (seq[string]) : a list of hatch patterns to use to map to\n\nfactors (seq) : a sequences of categorical factors corresponding to\nthe palette\n\nstart (int, optional) : a start slice index to apply when the column\ndata has factors with multiple levels. (default: 0)\n\nend (int, optional) : an end slice index to apply when the column\ndata has factors with multiple levels. (default: None)\n\nReturns:\ndict\n\nAdded in version 1.1.1", "source": "codesearchnet"}
{"code": "def check_schema_transforms_match(schema, inverted_features):\n  \n  num_target_transforms = 0\n\n  for col_schema in schema:\n    col_name = col_schema['name']\n    col_type = col_schema['type'].lower()\n\n    \n    if col_name in inverted_features:\n      for transform in inverted_features[col_name]:\n        transform_name = transform['transform']\n        if transform_name == constant.TARGET_TRANSFORM:\n          num_target_transforms += 1\n          continue\n\n        elif col_type in constant.NUMERIC_SCHEMA:\n          if transform_name not in constant.NUMERIC_TRANSFORMS:\n            raise ValueError(\n                'Transform %s not supported by schema %s' % (transform_name, col_type))\n        elif col_type == constant.STRING_SCHEMA:\n          if (transform_name not in constant.CATEGORICAL_TRANSFORMS + constant.TEXT_TRANSFORMS and\n             transform_name != constant.IMAGE_TRANSFORM):\n            raise ValueError(\n                'Transform %s not supported by schema %s' % (transform_name, col_type))\n        else:\n          raise ValueError('Unsupported schema type %s' % col_type)\n\n    \n    \n    if col_name in inverted_features:\n      transform_set = {x['transform'] for x in inverted_features[col_name]}\n      if 1 != sum([transform_set.issubset(set(constant.NUMERIC_TRANSFORMS)),\n                   transform_set.issubset(set(constant.CATEGORICAL_TRANSFORMS)),\n                   transform_set.issubset(set(constant.TEXT_TRANSFORMS)),\n                   transform_set.issubset(set([constant.IMAGE_TRANSFORM])),\n                   transform_set.issubset(set([constant.TARGET_TRANSFORM]))]):\n        message =  % (str(constant.TEXT_TRANSFORMS),\n                 str(constant.CATEGORICAL_TRANSFORMS),\n                 str(constant.NUMERIC_TRANSFORMS),\n                 constant.IMAGE_TRANSFORM,\n                 constant.TARGET_TRANSFORM,\n                 col_name,\n                 str(transform_set))\n        raise ValueError(message)\n\n  if num_target_transforms != 1:\n    raise ValueError('Must have exactly one target transform')", "docstring": "Checks that the transform and schema do not conflict.\n\nArgs:\nschema: schema list\ninverted_features: inverted_features dict\n\nRaises:\nValueError if transform cannot be applied given schema type.", "source": "juraj-google-style"}
{"code": "def get_appliance_by_name(self, appliance_name):\n    appliances = self.get_appliances()\n    if appliances:\n        for appliance in appliances:\n            if (appliance['name'] == appliance_name):\n                return appliance\n    return None", "docstring": "Gets the particular Image Streamer resource based on its name.\n\nArgs:\nappliance_name:\nThe Image Streamer resource name.\n\nReturns:\ndict: Image Streamer resource.", "source": "codesearchnet"}
{"code": "def register_frame_to_skip(method: Union[Callable[..., Any], List[Callable[..., Any]]]) -> bool:\n    register_fn = getattr(_DEFAULT_LOGGER.__class__, 'register_frame_to_skip', None)\n    if register_fn is None:\n        return False\n    methods = [method] if not isinstance(method, list) else method\n    for m in methods:\n        register_fn(inspect.getsourcefile(m), m.__name__)\n    return True", "docstring": "Skips the source of the given method when logging.\n\nArgs:\nmethod: The method to skip. Can be a single method or a list of methods.\n\nReturns:\nTrue if the method is registered to skip.\n\nRaises:\nTypeError: The source file of the method cannot be inspected.", "source": "github-repos"}
{"code": "def CmdRegister(self, challenge_param, app_param):\n    self.logger.debug('CmdRegister')\n    if ((len(challenge_param) != 32) or (len(app_param) != 32)):\n        raise errors.InvalidRequestError()\n    body = bytearray((challenge_param + app_param))\n    response = self.InternalSendApdu(apdu.CommandApdu(0, apdu.CMD_REGISTER, 3, 0, body))\n    response.CheckSuccessOrRaise()\n    return response.body", "docstring": "Register security key.\n\nAsk the security key to register with a particular origin & client.\n\nArgs:\nchallenge_param: Arbitrary 32 byte challenge string.\napp_param: Arbitrary 32 byte applciation parameter.\n\nReturns:\nA binary structure containing the key handle, attestation, and a\nsignature over that by the attestation key.  The precise format\nis dictated by the FIDO U2F specs.\n\nRaises:\nTUPRequiredError: A Test of User Precense is required to proceed.\nApduError: Something went wrong on the device.", "source": "codesearchnet"}
{"code": "def __init__(self, location, field_type):\n        \n        super(GlobalContextField, self).__init__(location, field_type)\n        self.location = location\n        self.field_type = field_type\n        self.validate()", "docstring": "Construct a new GlobalContextField object that references a field at a given location.\n\nArgs:\nlocation: Location, specifying where the field was declared.\n\nReturns:\nnew GlobalContextField object", "source": "juraj-google-style"}
{"code": "def _Check3DImage(image, require_static=True):\n    try:\n        image_shape = image.get_shape().with_rank(3)\n    except ValueError:\n        raise ValueError(\"'image' (shape %s) must be three-dimensional.\" % image.shape)\n    if require_static and (not image_shape.is_fully_defined()):\n        raise ValueError(\"'image' (shape %s) must be fully defined.\" % image_shape)\n    if any((x == 0 for x in image_shape)):\n        raise ValueError(\"all dims of 'image.shape' must be > 0: %s\" % image_shape)\n    if not image_shape.is_fully_defined():\n        return [check_ops.assert_positive(array_ops.shape(image), [\"all dims of 'image.shape' must be > 0.\"])]\n    else:\n        return []", "docstring": "Assert that we are working with a properly shaped image.\n\nArgs:\nimage: 3-D Tensor of shape [height, width, channels]\nrequire_static: If `True`, requires that all dimensions of `image` are known\nand non-zero.\n\nRaises:\nValueError: if `image.shape` is not a 3-vector.\n\nReturns:\nAn empty list, if `image` has fully defined dimensions. Otherwise, a list\ncontaining an assert op is returned.", "source": "github-repos"}
{"code": "def on_moved(self, event):\n    if (not self._event_error):\n        pathtools_options = {'included_patterns': self.patterns, 'excluded_patterns': self.ignore_patterns, 'case_sensitive': self.case_sensitive}\n        if match_path(event.dest_path, **pathtools_options):\n            self.logger.info(u'Change detected from a move on: %s', event.dest_path)\n            self.compile_dependencies(event.dest_path)", "docstring": "Called when a file or a directory is moved or renamed.\n\nMany editors don't directly change a file, instead they make a\ntransitional file like ``*.part`` then move it to the final filename.\n\nArgs:\nevent: Watchdog event, either ``watchdog.events.DirMovedEvent`` or\n``watchdog.events.FileModifiedEvent``.", "source": "codesearchnet"}
{"code": "def add_tensor_filter(self, filter_name, tensor_filter):\n    self._tensor_filters[filter_name] = tensor_filter", "docstring": "Add a tensor filter.\n\nArgs:\nfilter_name: (`str`) name of the filter.\ntensor_filter: (`callable`) the filter callable. See the doc string of\n`DebugDumpDir.find()` for more details about its signature.", "source": "github-repos"}
{"code": "def generate_key(action, path_or_id, settings=None, default=\" (default)\"):\n    \n    settings = \" {}\".format(str(sorted(settings.items()))) if settings else default\n    return \"{}: {}{}\".format(action.upper(), path_or_id, settings)", "docstring": "generate_key: generate key used for caching\nArgs:\naction (str): how video is being processed (e.g. COMPRESSED or DOWNLOADED)\npath_or_id (str): path to video or youtube_id\nsettings (dict): settings for compression or downloading passed in by user\ndefault (str): if settings are None, default to this extension (avoid overwriting keys)\nReturns: filename", "source": "juraj-google-style"}
{"code": "def predict_proba(self, a, b, nb_runs=6, nb_jobs=None, gpu=None, idx=0, verbose=None, ttest_threshold=0.01, nb_max_runs=16, train_epochs=1000, test_epochs=1000):\n    (Nb_jobs, verbose, gpu) = SETTINGS.get_default(('nb_jobs', nb_jobs), ('verbose', verbose), ('gpu', gpu))\n    x = np.stack([a.ravel(), b.ravel()], 1)\n    ttest_criterion = TTestCriterion(max_iter=nb_max_runs, runs_per_iter=nb_runs, threshold=ttest_threshold)\n    AB = []\n    BA = []\n    while ttest_criterion.loop(AB, BA):\n        if (nb_jobs != 1):\n            result_pair = Parallel(n_jobs=nb_jobs)((delayed(GNN_instance)(x, idx=idx, device=('cuda:{}'.format((run % gpu)) if gpu else 'cpu'), verbose=verbose, train_epochs=train_epochs, test_epochs=test_epochs) for run in range(ttest_criterion.iter, (ttest_criterion.iter + nb_runs))))\n        else:\n            result_pair = [GNN_instance(x, idx=idx, device=('cuda:0' if gpu else 'cpu'), verbose=verbose, train_epochs=train_epochs, test_epochs=test_epochs) for run in range(ttest_criterion.iter, (ttest_criterion.iter + nb_runs))]\n        AB.extend([runpair[0] for runpair in result_pair])\n        BA.extend([runpair[1] for runpair in result_pair])\n    if verbose:\n        print('P-value after {} runs : {}'.format(ttest_criterion.iter, ttest_criterion.p_value))\n    score_AB = np.mean(AB)\n    score_BA = np.mean(BA)\n    return ((score_BA - score_AB) / (score_BA + score_AB))", "docstring": "Run multiple times GNN to estimate the causal direction.\n\nArgs:\na (np.ndarray): Variable 1\nb (np.ndarray): Variable 2\nnb_runs (int): number of runs to execute per batch (before testing for significance with t-test).\nnb_jobs (int): number of runs to execute in parallel. (Initialized with ``cdt.SETTINGS.NB_JOBS``)\ngpu (bool): use gpu (Initialized with ``cdt.SETTINGS.GPU``)\nidx (int): (optional) index of the pair, for printing purposes\nverbose (bool): verbosity (Initialized with ``cdt.SETTINGS.verbose``)\nttest_threshold (float): threshold to stop the boostraps before ``nb_max_runs`` if the difference is significant\nnb_max_runs (int): Max number of bootstraps\ntrain_epochs (int): Number of epochs during which the model is going to be trained\ntest_epochs (int): Number of epochs during which the model is going to be tested\n\nReturns:\nfloat: Causal score of the pair (Value : 1 if a->b and -1 if b->a)", "source": "codesearchnet"}
{"code": "def compute_sub_structure(self, sub_structure, tol=0.001):\n    total_energy_matrix = self.total_energy_matrix.copy()\n\n    def find_match(site):\n        for test_site in sub_structure:\n            frac_diff = (abs((np.array(site.frac_coords) - np.array(test_site.frac_coords))) % 1)\n            frac_diff = [((abs(a) < tol) or (abs(a) > (1 - tol))) for a in frac_diff]\n            if all(frac_diff):\n                return test_site\n        return None\n    matches = []\n    for (i, site) in enumerate(self._s):\n        matching_site = find_match(site)\n        if matching_site:\n            new_charge = compute_average_oxidation_state(matching_site)\n            old_charge = self._oxi_states[i]\n            scaling_factor = (new_charge / old_charge)\n            matches.append(matching_site)\n        else:\n            scaling_factor = 0\n        total_energy_matrix[(i, :)] *= scaling_factor\n        total_energy_matrix[(:, i)] *= scaling_factor\n    if (len(matches) != len(sub_structure)):\n        output = ['Missing sites.']\n        for site in sub_structure:\n            if (site not in matches):\n                output.append('unmatched = {}'.format(site))\n        raise ValueError('\\n'.join(output))\n    return sum(sum(total_energy_matrix))", "docstring": "Gives total ewald energy for an sub structure in the same\nlattice. The sub_structure must be a subset of the original\nstructure, with possible different charges.\n\nArgs:\nsubstructure (Structure): Substructure to compute Ewald sum for.\ntol (float): Tolerance for site matching in fractional coordinates.\n\nReturns:\nEwald sum of substructure.", "source": "codesearchnet"}
{"code": "def from_conv_part_data(conv_part_data, self_user_id):\n        \n        user_id = UserID(chat_id=conv_part_data.id.chat_id,\n                         gaia_id=conv_part_data.id.gaia_id)\n        return User(user_id, conv_part_data.fallback_name, None, None, [],\n                    (self_user_id == user_id) or (self_user_id is None))", "docstring": "Construct user from ``ConversationParticipantData`` message.\n\nArgs:\nconv_part_id: ``ConversationParticipantData`` message.\nself_user_id (~hangups.user.UserID or None): The ID of the current\nuser. If ``None``, assume ``conv_part_id`` is the current user.\n\nReturns:\n:class:`~hangups.user.User` object.", "source": "juraj-google-style"}
{"code": "def record_value(self, value, count=1):\n        \n        if value < 0:\n            return False\n        counts_index = self._counts_index_for(value)\n        if (counts_index < 0) or (self.counts_len <= counts_index):\n            return False\n        self.counts[counts_index] += count\n        self.total_count += count\n        self.min_value = min(self.min_value, value)\n        self.max_value = max(self.max_value, value)\n        return True", "docstring": "Record a new value into the histogram\n\nArgs:\nvalue: the value to record (must be in the valid range)\ncount: incremental count (defaults to 1)", "source": "juraj-google-style"}
{"code": "def serialize_dtype(o):\n    \n    if len(o) == 0:\n        return dict(\n            _type='np.dtype',\n            descr=str(o))\n    return dict(\n        _type='np.dtype',\n        descr=o.descr)", "docstring": "Serializes a :obj:`numpy.dtype`.\n\nArgs:\no (:obj:`numpy.dtype`): :obj:`dtype` to be serialized.\n\nReturns:\nA dictionary that can be passed to :obj:`json.dumps`.", "source": "juraj-google-style"}
{"code": "def post(self, path, body, headers=None):\n    response = requests.post(self._url_for(path), data=json.dumps(body), headers=self._headers(headers))\n    self._handle_errors(response)\n    return response", "docstring": "Perform a POST request, providing a body, which will be JSON-encoded.\n\nArgs:\npath (str): A path that gets appended to ``base_url``.\nbody (dict): Dictionary that will be JSON-encoded and sent as the body.\n\nExample:\napi_client.post('/users', body={'name': 'Billy Jean'})\n\nReturns:\nA requests ``Response`` object.", "source": "codesearchnet"}
{"code": "def process_node(layer, node_data):\n    args, kwargs = deserialize_node(node_data, created_layers)\n    layer(*args, **kwargs)", "docstring": "Reconstruct node by linking to inbound layers\n\nArgs:\nlayer: Layer to process\nnode_data: List of layer configs", "source": "github-repos"}
{"code": "def with_rank_at_most(self, rank):\n    if self.rank is not None and self.rank > rank:\n        raise ValueError('Shape %s must have rank at most %d' % (self, rank))\n    else:\n        return self", "docstring": "Returns a shape based on `self` with at most the given rank.\n\nArgs:\nrank: An integer.\n\nReturns:\nA shape that is at least as specific as `self` with at most the given\nrank.\n\nRaises:\nValueError: If `self` does not represent a shape with at most the given\n`rank`.", "source": "github-repos"}
{"code": "def calculate_oobatake_dG(seq, temp):\n    \n\n    dH = calculate_oobatake_dH(seq, temp)\n    dS = calculate_oobatake_dS(seq, temp)\n    dG = dH - (temp + 273.15) * dS\n\n    \n    return dG - 563.552", "docstring": "Get free energy of unfolding (dG) using Oobatake method in units cal/mol.\n\nArgs:\nseq (str, Seq, SeqRecord): Amino acid sequence\ntemp (float): Temperature in degrees C\n\nReturns:\nfloat: Free energy of unfolding dG (J/mol)", "source": "juraj-google-style"}
{"code": "def dump_stats(filename):\n    \n    \n    res = _dump_impl()\n    f = open(filename, 'w')\n    json.dump(res, f, indent=4)\n    f.close()", "docstring": "Write collected information to file.\n\nArgs:\nfilename: absolute filename", "source": "juraj-google-style"}
{"code": "def __init__(self, zoom):\n        \n        self.zoom = zoom\n        super().__init__('Zoom angle should be in [0,360] (received {})'\n                         .format(zoom))", "docstring": "Initialization of instances:\n\nArgs:\nzoom (int): the invalid zoom level.\n\nAttributes:\nzoom (int): the invalid zoom level.", "source": "juraj-google-style"}
{"code": "def featurize_row(self, x, y):\n    x = x.ravel()\n    y = y.ravel()\n    b = np.ones(x.shape)\n    dx = np.cos(np.dot(self.W2, np.vstack((x, b)))).mean(1)\n    dy = np.cos(np.dot(self.W2, np.vstack((y, b)))).mean(1)\n    if (sum(dx) > sum(dy)):\n        return np.hstack((dx, dy, np.cos(np.dot(self.W, np.vstack((x, y, b)))).mean(1)))\n    else:\n        return np.hstack((dx, dy, np.cos(np.dot(self.W, np.vstack((y, x, b)))).mean(1)))", "docstring": "Projects the causal pair to the RKHS using the sampled kernel approximation.\n\nArgs:\nx (np.ndarray): Variable 1\ny (np.ndarray): Variable 2\n\nReturns:\nnp.ndarray: projected empirical distributions into a single fixed-size vector.", "source": "codesearchnet"}
{"code": "def write_genotypes(self, genotypes):\n        \n        if self._mode != \"w\":\n            raise UnsupportedOperation(\"not available in 'r' mode\")\n\n        \n        if self._nb_values is None:\n            self._nb_values = len(genotypes)\n\n        \n        if self._nb_values != len(genotypes):\n            raise ValueError(\"{:,d} samples expected, got {:,d}\".format(\n                self._nb_values,\n                len(genotypes),\n            ))\n\n        \n        byte_array = [\n            g[0] | (g[1] << 2) | (g[2] << 4) | (g[3] << 6) for g in\n            self._grouper((_byte_recode[geno] for geno in genotypes), 4)\n        ]\n        self._bed.write(bytearray(byte_array))", "docstring": "Write genotypes to binary file.\n\nArgs:\ngenotypes (numpy.ndarray): The genotypes to write in the BED file.", "source": "juraj-google-style"}
{"code": "def brake_on(self):\n    data = []\n    data.append(10)\n    data.append(self.servoid)\n    data.append(RAM_WRITE_REQ)\n    data.append(TORQUE_CONTROL_RAM)\n    data.append(1)\n    data.append(64)\n    send_data(data)", "docstring": "Set the Brakes of Herkulex\n\nIn braked mode, position control and velocity control\nwill not work, enable torque before that\n\nArgs:\nnone", "source": "codesearchnet"}
{"code": "def from_json(cls, data):\n        \n        assert 'header' in data, 'Required keyword \"header\" is missing!'\n        assert 'values' in data, 'Required keyword \"values\" is missing!'\n        return cls(Header.from_json(data['header']), data['values'])", "docstring": "Create a Data Collection from a dictionary.\n\nArgs:\n{\n\"header\": A Ladybug Header,\n\"values\": An array of values,\n}", "source": "juraj-google-style"}
{"code": "def GetGroupMap(self, since=None):\n    return GroupUpdateGetter().GetUpdates(self._GetClient(), self.conf['bucket'], self.conf['group_object'], since)", "docstring": "Return the group map from this source.\n\nArgs:\nsince: Get data only changed since this timestamp (inclusive) or None\nfor all data.\n\nReturns:\ninstance of group.GroupMap", "source": "github-repos"}
{"code": "def code_challenge(verifier):\n    digest = hashlib.sha256(verifier).digest()\n    return base64.urlsafe_b64encode(digest).rstrip(b'=')", "docstring": "Creates a 'code_challenge' as described in section 4.2 of RFC 7636\nby taking the sha256 hash of the verifier and then urlsafe\nbase64-encoding it.\n\nArgs:\nverifier: bytestring, representing a code_verifier as generated by\ncode_verifier().\n\nReturns:\nBytestring, representing a urlsafe base64-encoded sha256 hash digest,\nwithout '=' padding.", "source": "codesearchnet"}
{"code": "def sparse_dense_cwise_add(sp_t, dense_t):\n    result = gen_sparse_ops.sparse_dense_cwise_add(sp_t.indices, sp_t.values, sp_t.dense_shape, dense_t)\n    return sparse_tensor.SparseTensor(sp_t.indices, result, sp_t.dense_shape)", "docstring": "Adds up a SparseTensor and a dense Tensor, using these special rules:\n\n(1) Broadcasts the dense side to have the same shape as the sparse side, if\neligible;\n(2) Then, only the dense values pointed to by the indices of the SparseTensor\nparticipate in the cwise addition.\n\nBy the rules, the result is a logical SparseTensor with exactly the same\nindices and shape, but possibly with different non-zero values.  The output of\nthis Op is the resultant non-zero values.\n\nArgs:\nsp_t: the SparseTensor operand.\ndense_t: the dense Tensor operand; must have the same dtype and a\nbroadcast-compatible shape as `sp_t`.\n\nReturns:\noutput: the SparseTensor output.", "source": "github-repos"}
{"code": "def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, past_key_values_length=0, training=False):\n    assert not (input_ids is None and inputs_embeds is None)\n    if input_ids is not None:\n        check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n        inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n    input_shape = shape_list(inputs_embeds)[:-1]\n    if token_type_ids is None:\n        token_type_ids = tf.cast(tf.fill(dims=input_shape, value=0), tf.int64)\n    if position_ids is None:\n        if input_ids is not None:\n            position_ids = self.create_position_ids_from_input_ids(input_ids=input_ids, past_key_values_length=past_key_values_length)\n        else:\n            position_ids = tf.expand_dims(tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1, dtype=tf.int64), axis=0)\n    position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)\n    token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)\n    final_embeddings = inputs_embeds + position_embeds + token_type_embeds\n    final_embeddings = self.LayerNorm(inputs=final_embeddings)\n    final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n    return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\nfinal_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github-repos"}
{"code": "def get_cartesian(self):\n        \n        def create_cartesian(positions, row):\n            xyz_frame = pd.DataFrame(columns=['atom', 'x', 'y', 'z'],\n                                     index=self.index[:row], dtype='f8')\n            xyz_frame['atom'] = self.loc[xyz_frame.index, 'atom']\n            xyz_frame.loc[:, ['x', 'y', 'z']] = positions[:row]\n            from chemcoord.cartesian_coordinates.cartesian_class_main \\\n                import Cartesian\n            cartesian = Cartesian(xyz_frame, metadata=self.metadata)\n            return cartesian\n\n        c_table = self.loc[:, ['b', 'a', 'd']]\n        c_table = c_table.replace(constants.int_label)\n        c_table = c_table.replace({k: v for v, k in enumerate(c_table.index)})\n        c_table = c_table.values.astype('i8').T\n\n        C = self.loc[:, ['bond', 'angle', 'dihedral']].values.T\n        C[[1, 2], :] = np.radians(C[[1, 2], :])\n\n        err, row, positions = transformation.get_X(C, c_table)\n        positions = positions.T\n\n        if err == ERR_CODE_InvalidReference:\n            rename = dict(enumerate(self.index))\n            i = rename[row]\n            b, a, d = self.loc[i, ['b', 'a', 'd']]\n            cartesian = create_cartesian(positions, row)\n            raise InvalidReference(i=i, b=b, a=a, d=d,\n                                   already_built_cartesian=cartesian)\n        elif err == ERR_CODE_OK:\n            return create_cartesian(positions, row + 1)", "docstring": "Return the molecule in cartesian coordinates.\n\nRaises an :class:`~exceptions.InvalidReference` exception,\nif the reference of the i-th atom is undefined.\n\nArgs:\nNone\n\nReturns:\nCartesian: Reindexed version of the zmatrix.", "source": "juraj-google-style"}
{"code": "def account_id(self, value):\n    if (type(value) is not str):\n        raise TypeError('commit value must be string')\n    self._account_id = value", "docstring": "Sets the current account id\n\nArgs:\nvalue: current account id (string)\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def format_variant(variant, variant_type='snv'):\n    \n    chrom = variant.get('chrom')\n    pos = variant.get('start')\n    \n    ref = variant.get('ref')\n    alt = variant.get('alt')\n    \n    if variant_type == 'sv':\n        pos = int((variant['pos_left'] + variant['pos_right'])/2)\n        ref = 'N'\n        alt = f\"<{variant['sv_type']}>\"\n\n    info = None\n    \n    info = format_info(variant, variant_type=variant_type)\n\n    variant_line = f\"{chrom}\\t{pos}\\t.\\t{ref}\\t{alt}\\t.\\t.\\t{info}\"\n    \n    return variant_line", "docstring": "Convert variant information to a VCF formated string\n\nArgs:\nvariant(dict)\nvariant_type(str)\n\nReturns:\nvcf_variant(str)", "source": "juraj-google-style"}
{"code": "def connect_tcp(cls, host, port, echo=False):\n        \n\n        return cls(TCPClientSocketChannel(host, port), echo=echo)", "docstring": "Set up a :class:`TCPClientSocketChannel` and create a :class:`Flow`\ninstance for it.\n\nArgs:\nhost(str): The hostname or IP address to connect to.\nport(int): The port number to connect to.\necho(bool): Whether to echo read/written data to stdout by default.\n\nReturns:\n:class:`Flow`: A Flow instance initialised with the TCP socket\nchannel.", "source": "juraj-google-style"}
{"code": "def to_tensor(self):\n    return tf.stack((self.year(), self.month(), self.day()), axis=-1)", "docstring": "Packs the dates into a single Tensor.\n\nThe Tensor has shape `date_tensor.shape() + (3,)`, where the last dimension\nrepresents years, months and days, in this order.\n\nThis can be convenient when the dates are the final result of a computation\nin the graph mode: a `tf.function` can return `date_tensor.to_tensor()`, or,\nif one uses `tf.compat.v1.Session`, they can call\n`session.run(date_tensor.to_tensor())`.\n\nReturns:\nA Tensor of shape `date_tensor.shape() + (3,)`.\n\n#### Example\n\n```python\ndates = tff.datetime.dates_from_tuples([(2019, 1, 25), (2020, 3, 2)])\ndates.to_tensor()  # tf.Tensor with contents [[2019, 1, 25], [2020, 3, 2]].\n```", "source": "github-repos"}
{"code": "def validate(self, data):\n    user = self._confirmation.email.user\n    if (app_settings.EMAIL_VERIFICATION_PASSWORD_REQUIRED and (not user.check_password(data['password']))):\n        raise serializers.ValidationError(_('The provided password is invalid.'))\n    data['email'] = self._confirmation.email.email\n    return data", "docstring": "Validate the provided data.\n\nReturns:\ndict:\nThe validated data.\n\nRaises:\nserializers.ValidationError:\nIf the provided password is invalid.", "source": "codesearchnet"}
{"code": "def _keys(self, pattern):\n        \n        result = []\n        for client in self.redis_clients:\n            result.extend(list(client.scan_iter(match=pattern)))\n        return result", "docstring": "Execute the KEYS command on all Redis shards.\n\nArgs:\npattern: The KEYS pattern to query.\n\nReturns:\nThe concatenated list of results from all shards.", "source": "juraj-google-style"}
{"code": "def index(self, name=None):  \n    \n    try:\n      return self.header.index(name)\n    except ValueError:\n      raise TableError('Unknown index name %s.' % name)", "docstring": "Returns index number of supplied column name.\n\nArgs:\nname: string of column name.\n\nRaises:\nTableError: If name not found.\n\nReturns:\nIndex of the specified header entry.", "source": "juraj-google-style"}
{"code": "def load_map_coordinates(map_file):\n    if (map_file[(- 4):] == '.pkl'):\n        map_data = pickle.load(open(map_file))\n        lon = map_data['lon']\n        lat = map_data['lat']\n    else:\n        map_data = Dataset(map_file)\n        if ('lon' in map_data.variables.keys()):\n            lon = map_data.variables['lon'][:]\n            lat = map_data.variables['lat'][:]\n        else:\n            lon = map_data.variables['XLONG'][0]\n            lat = map_data.variables['XLAT'][0]\n    return (lon, lat)", "docstring": "Loads map coordinates from netCDF or pickle file created by util.makeMapGrids.\n\nArgs:\nmap_file: Filename for the file containing coordinate information.\n\nReturns:\nLatitude and longitude grids as numpy arrays.", "source": "codesearchnet"}
{"code": "def Export(self):\n    data = bytearray(38)\n    data[0] = 128\n    data[1:33] = self.PrivateKey[0:32]\n    data[33] = 1\n    checksum = Crypto.Default().Hash256(data[0:34])\n    data[34:38] = checksum[0:4]\n    b58 = base58.b58encode(bytes(data))\n    return b58.decode('utf-8')", "docstring": "Export this KeyPair's private key in WIF format.\n\nReturns:\nstr: The key in wif format", "source": "codesearchnet"}
{"code": "def multi(self, **kwargs):\n    path = self._get_path('multi')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Search the movie, tv show and person collections with a single query.\n\nArgs:\nquery: CGI escpaed string.\npage: (optional) Minimum value of 1. Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\ninclude_adult: (optional) Toggle the inclusion of adult titles.\nExpected value is True or False.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def compare_checkpoints(self, attr_mean):\n    if (self._cmp_greater and (attr_mean > self.best_checkpoint_attr_value)):\n        return True\n    elif ((not self._cmp_greater) and (attr_mean < self.best_checkpoint_attr_value)):\n        return True\n    return False", "docstring": "Compares two checkpoints based on the attribute attr_mean param.\nGreater than is used by default. If  command-line parameter\ncheckpoint_score_attr starts with \"min-\" less than is used.\n\nArguments:\nattr_mean: mean of attribute value for the current checkpoint\n\nReturns:\nTrue: when attr_mean is greater than previous checkpoint attr_mean\nand greater than function is selected\nwhen attr_mean is less than previous checkpoint attr_mean and\nless than function is selected\nFalse: when attr_mean is not in alignment with selected cmp fn", "source": "codesearchnet"}
{"code": "def get_username(self, userid):\n    username = self.user_map.get(userid)\n    if (not username):\n        users = self.get_users()\n        if users:\n            members = {m['id']: m['name'] for m in users.get('members', [{}]) if (m.get('id') and m.get('name'))}\n            if members:\n                self.user_map.update(members)\n            username = self.user_map.get(userid, userid)\n    return username", "docstring": "Perform a lookup of users to resolve a userid to a username\n\nArgs:\nuserid (string): Slack userid to lookup.\n\nReturns:\nstring: Human-friendly name of the user", "source": "codesearchnet"}
{"code": "def remove_species(self, species):\n        \n        new_sites = []\n        species = [get_el_sp(sp) for sp in species]\n        for site in self._sites:\n            new_sp_occu = {sp: amt for sp, amt in site.species.items()\n                           if sp not in species}\n            if len(new_sp_occu) > 0:\n                new_sites.append(Site(new_sp_occu, site.coords,\n                                      properties=site.properties))\n        self._sites = new_sites", "docstring": "Remove all occurrences of a species from a molecule.\n\nArgs:\nspecies: Species to remove.", "source": "juraj-google-style"}
{"code": "def probabilistic_collocation(order, dist, subset=.1):\n    \n    abscissas, weights = chaospy.quad.collection.golub_welsch(order, dist)\n\n    likelihood = dist.pdf(abscissas)\n\n    alpha = numpy.random.random(len(weights))\n    alpha = likelihood > alpha*subset*numpy.max(likelihood)\n\n    abscissas = abscissas.T[alpha].T\n    weights = weights[alpha]\n    return abscissas, weights", "docstring": "Probabilistic collocation method.\n\nArgs:\norder (int, numpy.ndarray) : Quadrature order along each axis.\ndist (Dist) : Distribution to generate samples from.\nsubset (float) : Rate of which to removed samples.", "source": "juraj-google-style"}
{"code": "def create(cls, name, config=None, kind=\"spark\"):\n        \n        conn = Qubole.agent()\n        return conn.post(cls.rest_entity_path,\n                         data={'name': name, 'config': config, 'kind': kind})", "docstring": "Create a new app.\n\nArgs:\n`name`: the name of the app\n\n`config`: a dictionary of key-value pairs\n\n`kind`: kind of the app (default=spark)", "source": "juraj-google-style"}
{"code": "def get_col_info(table_name, col_name, meta_file):\n    \n\n    with open(meta_file, 'r') as f:\n        meta = json.load(f)\n\n    data_table, table = load_data_table(table_name, meta_file, meta)\n\n    for field in table['fields']:\n        if field['name'] == col_name:\n            col_meta = field\n\n    col = data_table[col_name]\n\n    return (col, col_meta)", "docstring": "Return the content and metadata of a fiven column.\n\nArgs:\ntable_name(str): Name of the table.\ncol_name(str): Name of the column.\nmeta_file(str): Path to the meta.json file.\n\nReturns:\ntuple(pandas.Series, dict)", "source": "juraj-google-style"}
{"code": "def _kl_categorical_categorical(a, b, name=None):\n  \n  with tf.name_scope(name or \"kl_categorical_categorical\"):\n    \n    return tf.reduce_sum(\n        input_tensor=tf.nn.softmax(a.logits) *\n        (tf.nn.log_softmax(a.logits) - tf.nn.log_softmax(b.logits)),\n        axis=-1)", "docstring": "Calculate the batched KL divergence KL(a || b) with a, b OneHotCategorical.\n\nArgs:\na: instance of a OneHotCategorical distribution object.\nb: instance of a OneHotCategorical distribution object.\nname: (optional) Name to use for created operations.\ndefault is \"kl_categorical_categorical\".\n\nReturns:\nBatchwise KL(a || b)", "source": "juraj-google-style"}
{"code": "def _init_project_service(self, version):\n        \n        project_cfg = self._load_config_section(CONFIG_PROJECT_SECTION)\n        self._token_project = project_cfg[CONFIG_TOKEN]\n        proto = project_cfg[CONFIG_PROTOCOL]\n        host = project_cfg[CONFIG_HOST]\n\n        self._project = ProjectService(host, version)\n        self._project.base_protocol = proto\n        self._project.set_auth(self._token_project)", "docstring": "Method to initialize the Project Service from the config data\n\nArgs:\nversion (string): Version of Boss API to use.\n\nReturns:\nNone\n\nRaises:\n(KeyError): if given invalid version.", "source": "juraj-google-style"}
{"code": "def _get_test_methods(self, test_names):\n    test_methods = []\n    for test_name in test_names:\n        if test_name.startswith(TEST_SELECTOR_REGEX_PREFIX):\n            regex_matching_methods = self._get_regex_matching_test_methods(test_name.removeprefix(TEST_SELECTOR_REGEX_PREFIX))\n            test_methods += regex_matching_methods\n            continue\n        self._assert_valid_test_name(test_name)\n        if test_name not in self.get_existing_test_names():\n            raise Error(f'{self.TAG} does not have test method {test_name}.')\n        if hasattr(self, test_name):\n            test_method = getattr(self, test_name)\n        elif test_name in self._generated_test_table:\n            test_method = self._generated_test_table[test_name]\n        test_methods.append((test_name, test_method))\n    return test_methods", "docstring": "Resolves test method names to bound test methods.\n\nArgs:\ntest_names: A list of strings, each string is a test method name or a\nregex for matching test names.\n\nReturns:\nA list of tuples of (string, function). String is the test method\nname, function is the actual python method implementing its logic.\n\nRaises:\nError: The test name does not follow naming convention 'test_*'.\nThis can only be caused by user input.", "source": "github-repos"}
{"code": "class TvltProcessor(ProcessorMixin):\n    attributes = ['image_processor', 'feature_extractor']\n    image_processor_class = 'TvltImageProcessor'\n    feature_extractor_class = 'TvltFeatureExtractor'\n\n    def __init__(self, image_processor, feature_extractor):\n        super().__init__(image_processor=image_processor, feature_extractor=feature_extractor)\n        self.image_processor = image_processor\n        self.feature_extractor = feature_extractor\n\n    def __call__(self, images=None, audio=None, images_mixed=None, sampling_rate=None, mask_audio=False, mask_pixel=False, *args, **kwargs):\n        \n        if images is None and audio is None:\n            raise ValueError('You need to specify either an `images` or `audio` input to process.')\n        images_mixed_dict = None\n        if images is not None:\n            images_dict = self.image_processor(images, *args, mask_pixel=mask_pixel, **kwargs)\n        if images_mixed is not None:\n            images_mixed_dict = self.image_processor(images_mixed, *args, is_mixed=True, **kwargs)\n        if audio is not None:\n            audio_dict = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, mask_audio=mask_audio, **kwargs)\n        output_dict = {}\n        if audio is not None:\n            output_dict.update(audio_dict)\n        if images is not None:\n            output_dict.update(images_dict)\n        if images_mixed_dict is not None:\n            output_dict.update(images_mixed_dict)\n        return output_dict\n\n    @property\n    def model_input_names(self):\n        image_processor_input_names = self.image_processor.model_input_names\n        feature_extractor_input_names = self.feature_extractor.model_input_names\n        return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))", "docstring": "Constructs a TVLT processor which wraps a TVLT image processor and TVLT feature extractor into a single processor.\n\n[`TvltProcessor`] offers all the functionalities of [`TvltImageProcessor`] and [`TvltFeatureExtractor`]. See the\ndocstring of [`~TvltProcessor.__call__`] for more information.\n\nArgs:\nimage_processor (`TvltImageProcessor`):\nAn instance of [`TvltImageProcessor`]. The image processor is a required input.\nfeature_extractor (`TvltFeatureExtractor`):\nAn instance of [`TvltFeatureExtractor`]. The feature extractor is a required input.", "source": "github-repos"}
{"code": "def item_at(self, row, column):\n    return self.children[str(row)].children[str(column)]", "docstring": "Returns the TableItem instance at row, column cordinates\n\nArgs:\nrow (int): zero based index\ncolumn (int): zero based index", "source": "codesearchnet"}
{"code": "def upload_benchmark_run(self, dataset_name, table_name, run_id):\n    expected_file = os.path.join(self._logging_dir, logger.BENCHMARK_RUN_LOG_FILE_NAME)\n    with tf.gfile.GFile(expected_file) as f:\n        benchmark_json = json.load(f)\n        benchmark_json['model_id'] = run_id\n        table_ref = self._bq_client.dataset(dataset_name).table(table_name)\n        errors = self._bq_client.insert_rows_json(table_ref, [benchmark_json])\n        if errors:\n            tf.logging.error('Failed to upload benchmark info to bigquery: {}'.format(errors))", "docstring": "Upload benchmark run information to Bigquery.\n\nArgs:\ndataset_name: string, the name of bigquery dataset where the data will be\nuploaded.\ntable_name: string, the name of bigquery table under the dataset where\nthe data will be uploaded.\nrun_id: string, a unique ID that will be attached to the data, usually\nthis is a UUID4 format.", "source": "codesearchnet"}
{"code": "def get_module_object_and_name(globals_dict):\n    name = globals_dict.get('__name__', None)\n    module = sys.modules.get(name, None)\n    return _ModuleObjectAndName(module, (sys.argv[0] if (name == '__main__') else name))", "docstring": "Returns the module that defines a global environment, and its name.\n\nArgs:\nglobals_dict: A dictionary that should correspond to an environment\nproviding the values of the globals.\n\nReturns:\n_ModuleObjectAndName - pair of module object & module name.\nReturns (None, None) if the module could not be identified.", "source": "codesearchnet"}
{"code": "def from_config(cls, config):\n    return cls(**config)", "docstring": "Creates TFGPT2Tokenizer from configurations\n\nArgs:\nconfig (Dict): Dictionary with keys such as stated in `get_config`.", "source": "github-repos"}
{"code": "def data_to_unicode(self, data):\n    if isinstance(data, dict):\n        return {self.to_unicode(k): self.to_unicode(v) for (k, v) in data.iteritems()}\n    if isinstance(data, list):\n        return [self.to_unicode(l) for l in data]\n    else:\n        return self.to_unicode(data)", "docstring": "Recursively convert a list or dictionary to unicode.\n\nArgs:\ndata: The data to be unicoded.\n\nReturns:\nUnicoded data.", "source": "codesearchnet"}
{"code": "def add_case(self, case_obj):\n        \n        for ind_obj in case_obj.individuals:\n            self._add_individual(ind_obj)\n        logger.debug(\"Adding case {0} to plugin\".format(case_obj.case_id))\n        self.case_objs.append(case_obj)\n        if case_obj.tabix_index:\n            logger.debug(\"Setting filters.can_filter_range to True\")\n            self.filters.can_filter_range = True", "docstring": "Add a case obj with individuals to adapter\n\nArgs:\ncase_obj (puzzle.models.Case)", "source": "juraj-google-style"}
{"code": "def __init__(self, agent, environment, repeat_actions=1, history=None, id_=0):\n        \n        super(Runner, self).__init__(agent, environment, repeat_actions, history)\n\n        self.id = id_  \n        self.current_timestep = None", "docstring": "Initialize a single Runner object (one Agent/one Environment).\n\nArgs:\nid_ (int): The ID of this Runner (for distributed TF runs).", "source": "juraj-google-style"}
{"code": "def __parse_hgvs_syntax(self, aa_hgvs):\n        \n        self.is_valid = True  \n        self.is_synonymous = False  \n        if self.unknown_effect or self.is_no_protein:\n            \n            self.pos = None\n            pass\n        elif self.is_lost_stop:\n            self.initial = aa_hgvs[0]\n            self.mutated = re.findall('([A-Z?*]+)$', aa_hgvs)[0]\n            self.pos = int(re.findall('^\\*(\\d+)', aa_hgvs)[0])\n            self.stop_pos = None\n        elif self.is_lost_start:\n            self.initial = aa_hgvs[0]\n            self.mutated = aa_hgvs[-1]\n            self.pos = int(aa_hgvs[1:-1])\n        elif self.is_missense:\n            self.initial = aa_hgvs[0]\n            self.mutated = aa_hgvs[-1]\n            self.pos = int(aa_hgvs[1:-1])\n            self.stop_pos = None  \n            if self.initial == self.mutated:\n                self.is_synonymous = True\n                self.is_non_silent = False\n            elif self.mutated == '*':\n                self.is_nonsense_mutation = True\n        elif self.is_indel:\n            if self.is_insertion:\n                if not self.is_missing_info:\n                    self.initial = re.findall('([A-Z])\\d+', aa_hgvs)[:2]  \n                    self.pos = tuple(map(int, re.findall('[A-Z](\\d+)', aa_hgvs)[:2]))  \n                    self.mutated = re.findall('(?<=INS)[A-Z0-9?*]+', aa_hgvs)[0]\n                    self.mutated = self.mutated.strip('?')  \n                else:\n                    self.initial = ''\n                    self.pos = tuple()\n                    self.mutated = ''\n            elif self.is_deletion:\n                if not self.is_missing_info:\n                    self.initial = re.findall('([A-Z])\\d+', aa_hgvs)\n                    self.pos = tuple(map(int, re.findall('[A-Z](\\d+)', aa_hgvs)))\n                    self.mutated = re.findall('(?<=DEL)[A-Z]*', aa_hgvs)[0]\n                else:\n                    self.initial = ''\n                    self.pos = tuple()\n                    self.mutated = ''\n        elif self.is_frame_shift:\n            self.initial = aa_hgvs[0]\n            self.mutated = ''\n            try:\n                self.pos = int(re.findall('[A-Z*](\\d+)', aa_hgvs)[0])\n                if self.is_premature_stop_codon:\n                    self.stop_pos = int(re.findall('\\*>?(\\d+)$', aa_hgvs)[0])\n                else:\n                    self.stop_pos = None\n            except IndexError:\n                \n                \n                \n                \n                self.logger.debug('(Parsing-Problem) frame shift hgvs string: \"%s\"' % aa_hgvs)\n                self.pos = None\n                self.stop_pos = None\n                self.is_missing_info = True\n        elif self.is_nonsense_mutation:\n            self.initial = aa_hgvs[0]\n            self.mutated = '*'  \n            self.stop_pos = 0  \n            try:\n                self.pos = int(aa_hgvs[1:-1])\n            except ValueError:\n                \n                self.is_valid = False\n                self.pos = None\n                self.logger.debug('(Parsing-Problem) Invalid HGVS Amino Acid '\n                                  'syntax: ' + aa_hgvs)\n            if self.initial == self.mutated:\n                \n                self.is_synonymous = True\n                self.is_non_silent = False\n        else:\n            self.is_valid = False  \n            self.logger.debug('(Parsing-Problem) Invalid HGVS Amino Acid '\n                              'syntax: ' + aa_hgvs)", "docstring": "Convert HGVS syntax for amino acid change into attributes.\n\nSpecific details of the mutation are stored in attributes like\nself.intial (prior to mutation), sel.pos (mutation position),\nself.mutated (mutation), and self.stop_pos (position of stop codon,\nif any).\n\nArgs:\naa_hgvs (str): amino acid string following HGVS syntax", "source": "juraj-google-style"}
{"code": "def command(task: Task, command: str) -> Result:\n    cmd = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)\n    (stdout, stderr) = cmd.communicate()\n    stdout = stdout.decode()\n    stderr = stderr.decode()\n    if cmd.poll():\n        raise CommandError(command, cmd.returncode, stdout, stderr)\n    result = (stderr if stderr else stdout)\n    return Result(result=result, host=task.host, stderr=stderr, stdout=stdout)", "docstring": "Executes a command locally\n\nArguments:\ncommand: command to execute\n\nReturns:\nResult object with the following attributes set:\n* result (``str``): stderr or stdout\n* stdout (``str``): stdout\n* stderr (``str``): stderr\n\nRaises:\n:obj:`nornir.core.exceptions.CommandError`: when there is a command error", "source": "codesearchnet"}
{"code": "def universal_transformer_depthwise_attention(layer_inputs, step, hparams, ffn_unit, attention_unit):\n    (_, inputs, memory) = layer_inputs\n    all_states = memory\n    if hparams.depth_embedding:\n        all_states = add_depth_embedding(all_states)\n    states_so_far = all_states[(:step, :, :, :)]\n    states_so_far_weights = tf.nn.softmax(common_layers.dense(states_so_far, (hparams.hidden_size if hparams.dwa_elements else 1), activation=None, use_bias=True), axis=(- 1))\n    state_to_be_transformed = tf.reduce_sum((states_so_far * states_so_far_weights), axis=0)\n    new_state = step_preprocess(state_to_be_transformed, step, hparams)\n    for i in range(hparams.num_inrecurrence_layers):\n        with tf.variable_scope(('rec_layer_%d' % i)):\n            new_state = ffn_unit(attention_unit(new_state))\n    memory = fill_memory_slot(memory, new_state, (step + 1))\n    return (new_state, inputs, memory)", "docstring": "universal_transformer with depth-wise attention.\n\nIt uses an attention mechanism-flipped vertically-\nover all the states from previous steps to generate the new_state.\n\nArgs:\nlayer_inputs:\n- state: state\n- memory: contains states from all the previous steps.\nstep: indicating number of steps take so far\nhparams: model hyper-parameters.\nffn_unit: feed-forward unit\nattention_unit: multi-head attention unit\n\n\nReturns:\nlayer_output:\nnew_state: new state\nmemory: contains states from all the previous steps.", "source": "codesearchnet"}
{"code": "def get_reversed_statuses(context):\n    \n    _rev = {v: k for k, v in STATUSES.items()}\n    _rev.update(dict(context.config['reversed_statuses']))\n    return _rev", "docstring": "Return a mapping of exit codes to status strings.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context\n\nReturns:\ndict: the mapping of exit codes to status strings.", "source": "juraj-google-style"}
{"code": "def build_mount_env(source, mounts):\n  \n  return '\\n'.join([\n      'export {0}={1}/{2}'.format(var.name, source.rstrip('/'),\n                                  var.docker_path.rstrip('/')) for var in mounts\n  ])", "docstring": "Return a multi-line string with export statements for the variables.\n\nArguments:\nsource: Folder with the data. For example /mnt/data\nmounts: a list of MountParam\n\nReturns:\na multi-line string with a shell script that sets environment variables\ncorresponding to the mounts.", "source": "juraj-google-style"}
{"code": "def parse(lines, root=None):\n    doc = {}\n    entries = []\n    name = None\n    total = None\n    for line in lines:\n        line = line.strip()\n        if (not line):\n            continue\n        if (line and (line[0] == '/') and (line[(- 1)] == ':')):\n            if (name is None):\n                name = line[:(- 1)]\n                if entries:\n                    d = Directory(name, (total or len(entries)), entries)\n                    doc[root] = d\n                    total = None\n                    entries = []\n            else:\n                d = Directory(name, (total or len(entries)), entries)\n                doc[(name or root)] = d\n                total = None\n                entries = []\n                name = line[:(- 1)]\n            continue\n        if line.startswith('total'):\n            total = int(line.split(None, 1)[1])\n            continue\n        entries.append(line)\n    name = (name or root)\n    doc[name] = Directory(name, (total or len(entries)), entries)\n    return doc", "docstring": "Parses a list of lines from ls into dictionaries representing their\ncomponents.\n\nArgs:\nlines (list): A list of lines generated by ls.\nroot (str): The directory name to be used for ls output stanzas that\ndon't have a name.\n\nReturns:\nA dictionary representing the ls output. It's keyed by the path\ncontaining each ls stanza.", "source": "codesearchnet"}
{"code": "def recipe_dbm(config, auth_read, report, delete):\n    dbm(config, {'auth': auth_read, 'report': report, 'delete': delete})", "docstring": "Create a DV360 report.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nreport (json) - Report body and filters.\ndelete (boolean) - If report exists, delete it before creating a new one.", "source": "github-repos"}
{"code": "def output(self, _filename):\n        \n\n        for contract in self.slither.contracts_derived:\n            txt = \"\\nContract %s\"%contract.name\n            table = PrettyTable([\"Function\",\n                                 \"Modifiers\"])\n            for function in contract.functions:\n                modifiers = function.modifiers\n                for call in function.all_internal_calls():\n                    if isinstance(call, Function):\n                        modifiers += call.modifiers\n                for (_, call) in function.all_library_calls():\n                    if isinstance(call, Function):\n                        modifiers += call.modifiers\n                table.add_row([function.name, [m.name for m in set(modifiers)]])\n            txt += \"\\n\"+str(table)\n            self.info(txt)", "docstring": "_filename is not used\nArgs:\n_filename(string)", "source": "juraj-google-style"}
{"code": "def _ProduceSingleContent(self, mod, showprivate=False, showinh=False):\n    try:\n        all = mod[1].__all__\n    except AttributeError:\n        raise RuntimeError(('Module (%s) MUST have `__all__` defined.' % mod[1].__name__))\n    try:\n        name = mod[1].__displayname__\n    except AttributeError:\n        name = mod[0]\n    try:\n        category = mod[1].__category__\n        self.__categories.setdefault(category, 0)\n        self.__categories[category] += 1\n    except AttributeError:\n        pass\n    feats = inspect.getmembers(mod[1])\n    fname = (('content/' + mod[1].__name__.replace('.', '/').replace(' ', '-')) + '.rst')\n    feats = [f for f in feats if ((f[0] in all) and (showprivate or (not (f[0][0:1] == '_'))))]\n    with open(fname, 'w') as fid:\n        fid.write(Classifier.GetModuleText(name, mod[1].__name__, showprivate=showprivate))\n        for f in feats:\n            if (inspect.isclass(f[1]) or inspect.isfunction(f[1])):\n                try:\n                    featname = f[1].__displayname__\n                except AttributeError:\n                    featname = f[1].__name__\n                try:\n                    category = f[1].__category__\n                    self.__categories.setdefault(category, 0)\n                    self.__categories[category] += 1\n                except AttributeError:\n                    pass\n                if inspect.isclass(f[1]):\n                    fid.write(Classifier.GetClassText(featname, ('%s.%s' % (mod[1].__name__, f[1].__name__)), showprivate=showprivate, showinh=showinh))\n                elif inspect.isfunction(f[1]):\n                    fid.write(Classifier.GetFunctionText(featname, ('%s.%s' % (mod[1].__name__, f[1].__name__))))\n        fid.close()\n    return ('\\n   %s' % fname.split('/')[(- 1)])", "docstring": "An internal helper to create a page for a single module. This will\nautomatically generate the needed RSF to document the module\nand save the module to its own page in its appropriate location.\n\nArgs:\nmod (module): The single module to document as its own page\nshowprivate (bool): A flag for whether or not to display private members\n\nReturns:\nstr: The file name ready to be appended to a toctree", "source": "codesearchnet"}
{"code": "def open_street_map_geoloc_link(data):\n    if isinstance(data, str):\n        lat_lon = ip_geoloc(data)\n        if (lat_lon is None):\n            return ''\n        (lat, lon) = lat_lon\n    else:\n        (lat, lon) = data\n    return ('https:", "docstring": "Get a link to open street map pointing on this IP's geolocation.\n\nArgs:\ndata (str/tuple): IP address or (latitude, longitude).\n\nReturns:\nstr: a link to open street map pointing on this IP's geolocation.", "source": "codesearchnet"}
{"code": "def image(array, domain=None, width=None, format='png', **kwargs):\n  \n\n  image_data = serialize_array(array, fmt=format, domain=domain)\n  image = IPython.display.Image(data=image_data, format=format, width=width)\n  IPython.display.display(image)", "docstring": "Display an image.\n\nArgs:\narray: NumPy array representing the image\nfmt: Image format e.g. png, jpeg\ndomain: Domain of pixel values, inferred from min & max values if None\nw: width of output image, scaled using nearest neighbor interpolation.\nsize unchanged if None", "source": "juraj-google-style"}
{"code": "def uniquelines(q):\n    setoflines = set()\n    for facets in q:\n        for line in itertools.combinations(facets, 2):\n            setoflines.add(tuple(sorted(line)))\n    return setoflines", "docstring": "Given all the facets, convert it into a set of unique lines.  Specifically\nused for converting convex hull facets into line pairs of coordinates.\n\nArgs:\nq: A 2-dim sequence, where each row represents a facet. E.g.,\n[[1,2,3],[3,6,7],...]\n\nReturns:\nsetoflines:\nA set of tuple of lines.  E.g., ((1,2), (1,3), (2,3), ....)", "source": "codesearchnet"}
{"code": "def import_image_from_url(self, url, repository=None, tag=None,\n                              changes=None):\n        \n        return self.import_image(\n            src=url, repository=repository, tag=tag, changes=changes\n        )", "docstring": "Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only\nsupports importing from a URL.\n\nArgs:\nurl (str): A URL pointing to a tar file.\nrepository (str): The repository to create\ntag (str): The tag to apply", "source": "juraj-google-style"}
{"code": "def __init__(self, threshold=1e-3, symprec=0.1, **kwargs):\n        \n        self._kwargs = kwargs\n        self._sp = SubstitutionProbability(**kwargs)\n        self._threshold = threshold\n        self._symprec = symprec", "docstring": "This substitutor uses the substitution probability class to\nfind good substitutions for a given chemistry or structure.\n\nArgs:\nthreshold:\nprobability threshold for predictions\nsymprec:\nsymmetry precision to determine if two structures\nare duplicates\nkwargs:\nkwargs for the SubstitutionProbability object\nlambda_table, alpha", "source": "juraj-google-style"}
{"code": "def _VerifyValues(self, input_sizes=None, filter_sizes=None, out_backprop_sizes=None, strides=None, dilations=None, padding=None, data_format_src='NHWC', data_format_dst='NHWC', expected=None):\n    total_size_1 = np.prod(filter_sizes)\n    total_size_2 = np.prod(out_backprop_sizes)\n    x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(filter_sizes)\n    x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(out_backprop_sizes)\n    strides = [1] + strides + [1]\n    if dilations is not None:\n        dilations = [1] + dilations + [1]\n    expected = np.reshape(expected, input_sizes)\n    expected = test_utils.ConvertBetweenDataFormats(expected, data_format_src, data_format_dst)\n    x2 = test_utils.ConvertBetweenDataFormats(x2, data_format_src, data_format_dst)\n    input_sizes = test_utils.PermuteDimsBetweenDataFormats(input_sizes, data_format_src, data_format_dst)\n    out_backprop_sizes = test_utils.PermuteDimsBetweenDataFormats(out_backprop_sizes, data_format_src, data_format_dst)\n    strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src, data_format_dst)\n    if dilations is not None:\n        dilations = test_utils.PermuteDimsBetweenDataFormats(dilations, data_format_src, data_format_dst)\n    with self.session() as sess:\n        t1 = array_ops.placeholder(dtypes.float32, shape=filter_sizes)\n        t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes)\n        with self.test_scope():\n            out = gen_nn_ops.conv2d_backprop_input(input_sizes=input_sizes, filter=t1, out_backprop=t2, strides=strides, dilations=dilations, padding=padding, data_format=data_format_dst)\n        value = sess.run(out, {t1: x1, t2: x2})\n        self.assertAllEqual(input_sizes, value.shape)\n        self.assertAllClose(expected, value, 0.001)", "docstring": "Tests that gen_nn_ops.conv2d_backprop_input produces the expected output.\n\nArgs:\ninput_sizes: Input tensor dimensions in\n[batch, input_rows, input_cols, input_depth].\nfilter_sizes: Filter tensor dimensions in\n[kernel_rows, kernel_cols, input_depth, output_depth].\nout_backprop_sizes: Output gradients tensor dimensions.\nstrides: Strides.\ndilations: Dilations.\npadding: Padding type.\ndata_format_src: Data format input is in.\ndata_format_dst: Data format verification will run and input is converted\nto.\nexpected: Expected output.", "source": "github-repos"}
{"code": "def key_changes(self, from_token, to_token):\n    params = {'from': from_token, 'to': to_token}\n    return self._send('GET', '/keys/changes', query_params=params)", "docstring": "Gets a list of users who have updated their device identity keys.\n\nArgs:\nfrom_token (str): The desired start point of the list. Should be the\nnext_batch field from a response to an earlier call to /sync.\nto_token (str): The desired end point of the list. Should be the next_batch\nfield from a recent call to /sync - typically the most recent such call.", "source": "codesearchnet"}
{"code": "def get_unit_by_id(self, unit_id: str) -> typing.Optional['BaseUnit']:\n    VALID_POSITIVE_INT.validate(unit_id, 'get_unit_by_id')\n    for unit in self.units:\n        if (unit.unit_id == unit_id):\n            return unit\n    return None", "docstring": "Gets a unit from its ID\n\nArgs:\nunit_id: unit id\n\nReturns: Unit", "source": "codesearchnet"}
{"code": "def _Check3DImage(image, require_static=True):\n  \n  try:\n    image_shape = image.get_shape().with_rank(3)\n  except ValueError:\n    raise ValueError('\\'image\\' must be three-dimensional.')\n  if require_static and not image_shape.is_fully_defined():\n    raise ValueError('\\'image\\' must be fully defined.')\n  if any(x == 0 for x in image_shape):\n    raise ValueError('all dims of \\'image.shape\\' must be > 0: %s' %\n                     image_shape)", "docstring": "Assert that we are working with properly shaped image.\nArgs:\nimage: 3-D Tensor of shape [height, width, channels]\nrequire_static: If `True`, requires that all dimensions of `image` are\nknown and non-zero.\n\nRaises:\nValueError: if image.shape is not a [3] vector.", "source": "juraj-google-style"}
{"code": "def nice_join(seq, sep=', ', conjunction='or'):\n    seq = [str(x) for x in seq]\n    if ((len(seq) <= 1) or (conjunction is None)):\n        return sep.join(seq)\n    else:\n        return ('%s %s %s' % (sep.join(seq[:(- 1)]), conjunction, seq[(- 1)]))", "docstring": "Join together sequences of strings into English-friendly phrases using\na conjunction when appropriate.\n\nArgs:\nseq (seq[str]) : a sequence of strings to nicely join\n\nsep (str, optional) : a sequence delimiter to use (default: \", \")\n\nconjunction (str or None, optional) : a conjunction to use for the last\ntwo items, or None to reproduce basic join behavior (default: \"or\")\n\nReturns:\na joined string\n\nExamples:\n>>> nice_join([\"a\", \"b\", \"c\"])\n'a, b or c'", "source": "codesearchnet"}
{"code": "def _compute_nfps_real(counts, sizes):\n    nfps = np.zeros((len(sizes), len(sizes)))\n    for l in range(len(sizes)):\n        for u in range(l, len(sizes)):\n            nfps[(l, u)] = _compute_nfp_real(l, u, counts, sizes)\n    return nfps", "docstring": "Computes the matrix of expected false positives for all possible\nsub-intervals of the complete domain of set sizes.\n\nArgs:\ncounts: the complete distribution of set sizes.\nsizes: the complete domain of set sizes.\n\nReturn (np.array): the 2-D array of expected number of false positives\nfor every pair of [l, u] interval, where l is axis-0 and u is\naxis-1.", "source": "codesearchnet"}
{"code": "def ParseOptions(cls, options, configuration_object):\n    \n    if not isinstance(configuration_object, tools.CLITool):\n      raise errors.BadConfigObject(\n          'Configuration object is not an instance of CLITool')\n\n    preferred_language = cls._ParseStringOption(\n        options, 'preferred_language', default_value='en-US')\n\n    setattr(configuration_object, '_preferred_language', preferred_language)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.", "source": "juraj-google-style"}
{"code": "def _sequence_like(instance, args):\n    return nest_util.sequence_like(instance, args)", "docstring": "Converts the sequence `args` to the same type as `instance`.\n\nArgs:\ninstance: an instance of `tuple`, `list`, `namedtuple`, `dict`,\n`collections.OrderedDict`, or `composite_tensor.Composite_Tensor`\nor `type_spec.TypeSpec`.\nargs: items to be converted to the `instance` type.\n\nReturns:\n`args` with the type of `instance`.", "source": "github-repos"}
{"code": "def assertAllGreaterEqual(self, a, comparison_target):\n    a, comparison_target = self.evaluate_if_both_tensors(a, comparison_target)\n    a = self._GetNdArray(a)\n    self.assertGreaterEqual(np.min(a), comparison_target)", "docstring": "Assert element values are all greater than or equal to a target value.\n\nArgs:\na: The numpy `ndarray`, or anything that can be converted into a numpy\n`ndarray` (including Tensor).\ncomparison_target: The target value of comparison.", "source": "github-repos"}
{"code": "def l1_regularizer(weight=1.0, scope=None):\n  \n  def regularizer(tensor):\n    with tf.name_scope(scope, 'L1Regularizer', [tensor]):\n      l1_weight = tf.convert_to_tensor(weight,\n                                       dtype=tensor.dtype.base_dtype,\n                                       name='weight')\n      return tf.multiply(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value')\n  return regularizer", "docstring": "Define a L1 regularizer.\n\nArgs:\nweight: scale the loss by this factor.\nscope: Optional scope for name_scope.\n\nReturns:\na regularizer function.", "source": "juraj-google-style"}
{"code": "def get_registered_object(name, custom_objects=None, module_objects=None):\n    if name in _GLOBAL_CUSTOM_OBJECTS:\n        return _GLOBAL_CUSTOM_OBJECTS[name]\n    elif custom_objects and name in custom_objects:\n        return custom_objects[name]\n    elif module_objects and name in module_objects:\n        return module_objects[name]\n    return None", "docstring": "Returns the class associated with `name` if it is registered with Keras.\n\nThis function is part of the Keras serialization and deserialization\nframework. It maps strings to the objects associated with them for\nserialization/deserialization.\n\nExample:\n```\ndef from_config(cls, config, custom_objects=None):\nif 'my_custom_object_name' in config:\nconfig['hidden_cls'] = tf.keras.utils.get_registered_object(\nconfig['my_custom_object_name'], custom_objects=custom_objects)\n```\n\nArgs:\nname: The name to look up.\ncustom_objects: A dictionary of custom objects to look the name up in.\nGenerally, custom_objects is provided by the user.\nmodule_objects: A dictionary of custom objects to look the name up in.\nGenerally, module_objects is provided by midlevel library implementers.\n\nReturns:\nAn instantiable class associated with 'name', or None if no such class\nexists.", "source": "github-repos"}
{"code": "def run_attack_work(self, work_id):\n    \n    adv_batch_id = (\n        self.attack_work.work[work_id]['output_adversarial_batch_id'])\n    adv_batch = self.adv_batches[adv_batch_id]\n    dataset_batch_id = adv_batch['dataset_batch_id']\n    submission_id = adv_batch['submission_id']\n    epsilon = self.dataset_batches[dataset_batch_id]['epsilon']\n    logging.info('Attack work piece: '\n                 'dataset_batch_id=\"%s\" submission_id=\"%s\" '\n                 'epsilon=%d', dataset_batch_id, submission_id, epsilon)\n    if submission_id in self.blacklisted_submissions:\n      raise WorkerError('Blacklisted submission')\n    \n    attack = AttackSubmission(submission_id, self.submissions,\n                              self.storage_bucket)\n    attack.download()\n    \n    input_dir = os.path.join(LOCAL_DATASET_DIR, dataset_batch_id)\n    if attack.type == TYPE_TARGETED:\n      \n      target_class_filename = os.path.join(input_dir, 'target_class.csv')\n      self.dataset_meta.save_target_classes_for_batch(target_class_filename,\n                                                      self.dataset_batches,\n                                                      dataset_batch_id)\n    \n    if os.path.exists(LOCAL_OUTPUT_DIR):\n      sudo_remove_dirtree(LOCAL_OUTPUT_DIR)\n    os.mkdir(LOCAL_OUTPUT_DIR)\n    if os.path.exists(LOCAL_PROCESSED_OUTPUT_DIR):\n      shutil.rmtree(LOCAL_PROCESSED_OUTPUT_DIR)\n    os.mkdir(LOCAL_PROCESSED_OUTPUT_DIR)\n    if os.path.exists(LOCAL_ZIPPED_OUTPUT_DIR):\n      shutil.rmtree(LOCAL_ZIPPED_OUTPUT_DIR)\n    os.mkdir(LOCAL_ZIPPED_OUTPUT_DIR)\n    \n    elapsed_time_sec = attack.run(input_dir, LOCAL_OUTPUT_DIR, epsilon)\n    if attack.type == TYPE_TARGETED:\n      \n      os.remove(target_class_filename)\n    \n    image_hashes = eval_lib.enforce_epsilon_and_compute_hash(\n        input_dir, LOCAL_OUTPUT_DIR, LOCAL_PROCESSED_OUTPUT_DIR, epsilon)\n    if not image_hashes:\n      logging.warning('No images saved by the attack.')\n      return elapsed_time_sec, submission_id\n    \n    \n    for clean_image_id, hash_val in iteritems(image_hashes):\n      \n      \n      adv_img_id = adv_batch_id + '_' + clean_image_id\n      \n      os.rename(\n          os.path.join(LOCAL_PROCESSED_OUTPUT_DIR, clean_image_id + '.png'),\n          os.path.join(LOCAL_PROCESSED_OUTPUT_DIR, adv_img_id + '.png'))\n      \n      image_path = '{0}/adversarial_images/{1}/{1}.zip/{2}.png'.format(\n          self.round_name, adv_batch_id, adv_img_id)\n      \n      adv_batch['images'][adv_img_id] = {\n          'clean_image_id': u'' + str(clean_image_id),\n          'image_path': u'' + str(image_path),\n          'image_hash': u'' + str(hash_val),\n      }\n    \n    zipped_images_filename = os.path.join(LOCAL_ZIPPED_OUTPUT_DIR,\n                                          adv_batch_id + '.zip')\n    try:\n      logging.debug('Compressing adversarial images to %s',\n                    zipped_images_filename)\n      shell_call([\n          'zip', '-j', '-r', zipped_images_filename,\n          LOCAL_PROCESSED_OUTPUT_DIR])\n    except subprocess.CalledProcessError as e:\n      raise WorkerError('Can''t make archive from adversarial iamges', e)\n    \n    dst_filename = '{0}/adversarial_images/{1}/{1}.zip'.format(\n        self.round_name, adv_batch_id)\n    logging.debug(\n        'Copying archive with adversarial images to %s', dst_filename)\n    self.storage_client.new_blob(dst_filename).upload_from_filename(\n        zipped_images_filename)\n    \n    logging.debug('Writing adversarial batch to datastore')\n    self.adv_batches.write_single_batch_images_to_datastore(adv_batch_id)\n    return elapsed_time_sec, submission_id", "docstring": "Runs one attack work.\n\nArgs:\nwork_id: ID of the piece of work to run\n\nReturns:\nelapsed_time_sec, submission_id - elapsed time and id of the submission\n\nRaises:\nWorkerError: if error occurred during execution.", "source": "juraj-google-style"}
{"code": "def get_data_csv(file_name, encoding='utf-8', file_contents=None, on_demand=False):\n\n    def yield_csv(csv_contents, csv_file):\n        try:\n            for line in csv_contents:\n                (yield line)\n        finally:\n            try:\n                csv_file.close()\n            except:\n                pass\n\n    def process_csv(csv_contents, csv_file):\n        return [line for line in yield_csv(csv_contents, csv_file)]\n    if file_contents:\n        csv_file = BytesIO(file_contents)\n    else:\n        csv_file = open(file_name, 'rb')\n    reader = csv.reader(csv_file, dialect=csv.excel, encoding=encoding)\n    if on_demand:\n        table = yield_csv(reader, csv_file)\n    else:\n        table = process_csv(reader, csv_file)\n    return [table]", "docstring": "Gets good old csv data from a file.\n\nArgs:\nfile_name: The name of the local file, or the holder for the\nextension type when the file_contents are supplied.\nencoding: Loads the file with the specified cell encoding.\nfile_contents: The file-like object holding contents of file_name.\nIf left as None, then file_name is directly loaded.\non_demand: Requests that a yielder be used in place of a full data\ncopy.", "source": "codesearchnet"}
{"code": "def calc_attribute_statistics(self, statistic_name):\n        \n        stats = {}\n        for var, grids in self.attributes.items():\n            if len(grids) > 1:\n                stats[var] = getattr(np.array([getattr(np.ma.array(x, mask=self.masks[t] == 0), statistic_name)()\n                                               for t, x in enumerate(grids)]), statistic_name)()\n            else:\n                stats[var] = getattr(np.ma.array(grids[0], mask=self.masks[0] == 0), statistic_name)()\n        return stats", "docstring": "Calculates summary statistics over the domains of each attribute.\n\nArgs:\nstatistic_name (string): numpy statistic, such as mean, std, max, min\n\nReturns:\ndict of statistics from each attribute grid.", "source": "juraj-google-style"}
{"code": "def apply_transformations(collection, transformations, select=None):\n    for t in transformations:\n        kwargs = dict(t)\n        func = kwargs.pop('name')\n        cols = kwargs.pop('input', None)\n        if isinstance(func, string_types):\n            if (func in ('and', 'or')):\n                func += '_'\n            if (not hasattr(transform, func)):\n                raise ValueError((\"No transformation '%s' found!\" % func))\n            func = getattr(transform, func)\n            func(collection, cols, **kwargs)\n    if (select is not None):\n        transform.Select(collection, select)\n    return collection", "docstring": "Apply all transformations to the variables in the collection.\n\nArgs:\ntransformations (list): List of transformations to apply.\nselect (list): Optional list of names of variables to retain after all\ntransformations are applied.", "source": "codesearchnet"}
{"code": "def __call__(self, inputs: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, decoder_input_ids: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, freeze_feature_encoder: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    if attention_mask is None:\n        attention_mask = jnp.ones_like(inputs, dtype='i4')\n    if decoder_input_ids is None:\n        raise ValueError('`decoder_input_ids` cannot be `None`. For sequence to sequence training, `decoder_position_ids` must be specified as an input argument.')\n    if decoder_attention_mask is None:\n        decoder_attention_mask = jnp.ones_like(decoder_input_ids)\n    if decoder_position_ids is None:\n        batch_size, sequence_length = decoder_input_ids.shape\n        decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {'dropout': dropout_rng} if dropout_rng is not None else {}\n    return self.module.apply({'params': params or self.params}, inputs=jnp.array(inputs, dtype='f4'), attention_mask=jnp.array(attention_mask, dtype='i4'), decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, freeze_feature_encoder=freeze_feature_encoder, rngs=rngs)", "docstring": "Returns:\n\nExamples:\n\n```python\n>>> from transformers import FlaxSpeechEncoderDecoderModel, AutoTokenizer\n\n>>> # load a fine-tuned wav2vec2-2-bart model\n>>> model = FlaxSpeechEncoderDecoderModel.from_pretrained(\"patrickvonplaten/wav2vec2-2-bart-large\")\n>>> # load output tokenizer\n>>> tokenizer_output = AutoTokenizer.from_pretrained(\"facebook/bart-large\")\n\n>>> inputs = jnp.ones((2, 5000), dtype=jnp.float32)\n\n>>> # use bart's special bos, pad and eos tokens\n>>> model.config.decoder_start_token_id = model.decoder.config.bos_token_id\n>>> model.config.pad_token_id = model.decoder.config.pad_token_id\n>>> model.config.eos_token_id = model.decoder.config.eos_token_id\n\n>>> outputs = model.generate(inputs)\n# Assert something? More interesting input? dtype correct?\n```", "source": "github-repos"}
{"code": "def __init__(self, initial_learning_rate, decay_steps, decay_rate, staircase=False, name=None):\n    super(ExponentialDecay, self).__init__()\n    self.initial_learning_rate = initial_learning_rate\n    self.decay_steps = decay_steps\n    self.decay_rate = decay_rate\n    self.staircase = staircase\n    self.name = name", "docstring": "Applies exponential decay to the learning rate.\n\nArgs:\ninitial_learning_rate: A scalar `float32` or `float64` `Tensor` or a\nPython number.  The initial learning rate.\ndecay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.\nMust be positive.  See the decay computation above.\ndecay_rate: A scalar `float32` or `float64` `Tensor` or a\nPython number.  The decay rate.\nstaircase: Boolean.  If `True` decay the learning rate at discrete\nintervals\nname: String.  Optional name of the operation.  Defaults to\n'ExponentialDecay'.", "source": "github-repos"}
{"code": "def parse_config(data: dict) -> dict:\n    \n    return {\n        'email': data.get('email'),\n        'family': data['family_id'],\n        'samples': [{\n            'id': sample_id,\n            'type': analysis_type,\n        } for sample_id, analysis_type in data['analysis_type'].items()],\n        'config_path': data['config_file_analysis'],\n        'is_dryrun': True if 'dry_run_all' in data else False,\n        'log_path': data['log_file'],\n        'out_dir': data['outdata_dir'],\n        'priority': data['slurm_quality_of_service'],\n        'sampleinfo_path': data['sample_info_file'],\n    }", "docstring": "Parse MIP config file.\n\nArgs:\ndata (dict): raw YAML input from MIP analysis config file\n\nReturns:\ndict: parsed data", "source": "juraj-google-style"}
{"code": "def disable_eager_op_as_function(unused_msg: str) -> Callable[[_F], _F]:\n    return _disable_test(execute_func=False)", "docstring": "Decorator for a function in a with_eager_op_as_function enabled test class.\n\nBlocks the function from being run with eager_op_as_function enabled.\n\nArgs:\nunused_msg: Reason for disabling.\n\nReturns:\nThe wrapped function with _disable_eager_op_as_function attr set to True.", "source": "github-repos"}
{"code": "def collection(self, **kwargs):\n        \n        path = self._get_path('collection')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Search for collections by name.\n\nArgs:\nquery: CGI escpaed string.\npage: (optional) Minimum value of 1. Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def _process_origin(self, req, resp, origin):\n    if self._cors_config['allow_all_origins']:\n        if self.supports_credentials:\n            self._set_allow_origin(resp, origin)\n        else:\n            self._set_allow_origin(resp, '*')\n        return True\n    if (origin in self._cors_config['allow_origins_list']):\n        self._set_allow_origin(resp, origin)\n        return True\n    regex = self._cors_config['allow_origins_regex']\n    if (regex is not None):\n        if regex.match(origin):\n            self._set_allow_origin(resp, origin)\n            return True\n    return False", "docstring": "Inspects the request and adds the Access-Control-Allow-Origin\nheader if the requested origin is allowed.\n\nReturns:\n``True`` if the header was added and the requested origin\nis allowed, ``False`` if the origin is not allowed and the\nheader has not been added.", "source": "codesearchnet"}
{"code": "def get_asn_whois(self, retry_count=3):\n        \n\n        try:\n\n            \n            conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n            conn.settimeout(self.timeout)\n            log.debug('ASN query for {0}'.format(self.address_str))\n            conn.connect((CYMRU_WHOIS, 43))\n\n            \n            conn.send((\n                ' -r -a -c -p -f {0}{1}'.format(\n                    self.address_str, '\\r\\n')\n            ).encode())\n\n            data = ''\n            while True:\n\n                d = conn.recv(4096).decode()\n                data += d\n\n                if not d:\n\n                    break\n\n            conn.close()\n\n            return str(data)\n\n        except (socket.timeout, socket.error) as e:  \n\n            log.debug('ASN query socket error: {0}'.format(e))\n            if retry_count > 0:\n\n                log.debug('ASN query retrying (count: {0})'.format(\n                    str(retry_count)))\n                return self.get_asn_whois(retry_count - 1)\n\n            else:\n\n                raise ASNLookupError(\n                    'ASN lookup failed for {0}.'.format(self.address_str)\n                )\n\n        except:  \n\n            raise ASNLookupError(\n                'ASN lookup failed for {0}.'.format(self.address_str)\n            )", "docstring": "The function for retrieving ASN information for an IP address from\nCymru via port 43/tcp (WHOIS).\n\nArgs:\nretry_count (:obj:`int`): The number of times to retry in case\nsocket errors, timeouts, connection resets, etc. are\nencountered. Defaults to 3.\n\nReturns:\nstr: The raw ASN data.\n\nRaises:\nASNLookupError: The ASN lookup failed.", "source": "juraj-google-style"}
{"code": "def _flat_types(self):\n    return structure.get_flat_tensor_types(self.element_spec)", "docstring": "Returns a list `tf.DType`s for the element tensor representation.\n\nReturns:\nA list `tf.DType`s for the element tensor representation.", "source": "github-repos"}
{"code": "def default_peek(python_type, exposes):\n    with_args = False\n    make = python_type\n    try:\n        make()\n    except (SystemExit, KeyboardInterrupt):\n        raise\n    except:\n        make = (lambda : python_type.__new__(python_type))\n        try:\n            make()\n        except (SystemExit, KeyboardInterrupt):\n            raise\n        except:\n            make = (lambda args: python_type.__new__(python_type, *args))\n            with_args = True\n\n    def missing(attr):\n        return AttributeError(\"can't set attribute '{}' ({})\".format(attr, python_type))\n    if with_args:\n\n        def peek(store, container, _stack=None):\n            state = []\n            for attr in exposes:\n                if (attr in container):\n                    state.append(store.peek(attr, container, _stack=_stack))\n                else:\n                    state.append(None)\n            return make(state)\n    elif ('__dict__' in exposes):\n\n        def peek(store, container, _stack=None):\n            obj = make()\n            for attr in container:\n                val = store.peek(attr, container, _stack=_stack)\n                try:\n                    setattr(obj, attr, val)\n                except AttributeError:\n                    raise missing(attr)\n            return obj\n    else:\n\n        def peek(store, container, _stack=None):\n            obj = make()\n            for attr in exposes:\n                if (attr in container):\n                    val = store.peek(attr, container, _stack=_stack)\n                else:\n                    val = None\n                try:\n                    setattr(obj, attr, val)\n                except AttributeError:\n                    raise missing(attr)\n            return obj\n    return peek", "docstring": "Autoserializer factory.\n\nWorks best in Python 3.\n\nArguments:\n\npython_type (type): type constructor.\n\nexposes (iterable): sequence of attributes.\n\nReturns:\n\ncallable: deserializer (`peek` routine).", "source": "codesearchnet"}
{"code": "def peek_step(self, val: ArrayValue,\n                  sn: \"DataNode\") -> Tuple[ObjectValue, \"DataNode\"]:\n        \n        keys = self.parse_keys(sn)\n        for en in val:\n            flag = True\n            try:\n                for k in keys:\n                    if en[k] != keys[k]:\n                        flag = False\n                        break\n            except KeyError:\n                continue\n            if flag:\n                return (en, sn)\n        return (None, sn)", "docstring": "Return the entry addressed by the receiver + its schema node.\n\nArgs:\nval: Current value (array).\nsn:  Current schema node.", "source": "juraj-google-style"}
{"code": "def long_id(self, sample):\n    if (self.grid == 'WAC'):\n        lon = (self.CENTER_LONGITUDE + (((((sample - self.SAMPLE_PROJECTION_OFFSET) - 1) * self.MAP_SCALE) * 0.001) / (self.A_AXIS_RADIUS * np.cos(((self.CENTER_LATITUDE * np.pi) / 180.0)))))\n        return ((lon * 180) / np.pi)\n    else:\n        lon = (float(self.CENTER_LONGITUDE) + (((sample - float(self.SAMPLE_PROJECTION_OFFSET)) - 1) / float(self.MAP_RESOLUTION)))\n        return lon", "docstring": "Return the corresponding longitude\n\nArgs:\nsample (int): sample number on a line\n\nReturns:\nCorreponding longidude in degree", "source": "codesearchnet"}
{"code": "def _forward_and_backward_functions(self, inference_args, input_tangents):\n    outputs = self._func_graph.outputs[:self._num_inference_outputs]\n    return self._build_functions_for_outputs(outputs, inference_args, input_tangents)", "docstring": "Shortcut for when only first-order gradients are required.\n\nThe returned backward function does not accept gradients with respect to\nside output of forward_function. This is fine as long as the user can't\npossibly request second order tape gradients, as when they've used a single\nnon-persistent GradientTape. Since we don't need the backward function to\ntake gradients with respect to side outputs, we can skip some potentially\nslow graph building.\n\nArgs:\ninference_args: A flat list of Tensors, arguments to the inference\nfunction.\ninput_tangents: A flat list of Tensors, jvps associated with\n`inference_args`.\n\nReturns:\nA tuple of (forward_function, backward_function):\nforward_function: Takes the same inputs as the inference function, but\nreturns side outputs used by backward_function in addition to the\ninference function's outputs.\nbackward_function: Takes side outputs from forward_function and\ngradients with respect to the \"real\" outputs of forward_function and\nreturns gradients with respect to the inputs.", "source": "github-repos"}
{"code": "def _write_json_blob(encoded_value, pipeline_id=None):\n    default_bucket = app_identity.get_default_gcs_bucket_name()\n    if (default_bucket is None):\n        raise Exception('No default cloud storage bucket has been set for this application. This app was likely created before v1.9.0, please see: https:\n    path_components = ['/', default_bucket, 'appengine_pipeline']\n    if pipeline_id:\n        path_components.append(pipeline_id)\n    path_components.append(uuid.uuid4().hex)\n    file_name = posixpath.join(*path_components)\n    with cloudstorage.open(file_name, 'w', content_type='application/json') as f:\n        for start_index in xrange(0, len(encoded_value), _MAX_JSON_SIZE):\n            end_index = (start_index + _MAX_JSON_SIZE)\n            f.write(encoded_value[start_index:end_index])\n    key_str = blobstore.create_gs_key(('/gs' + file_name))\n    logging.debug('Created blob for filename = %s gs_key = %s', file_name, key_str)\n    return blobstore.BlobKey(key_str)", "docstring": "Writes a JSON encoded value to a Cloud Storage File.\n\nThis function will store the blob in a GCS file in the default bucket under\nthe appengine_pipeline directory. Optionally using another directory level\nspecified by pipeline_id\nArgs:\nencoded_value: The encoded JSON string.\npipeline_id: A pipeline id to segment files in Cloud Storage, if none,\nthe file will be created under appengine_pipeline\n\nReturns:\nThe blobstore.BlobKey for the file that was created.", "source": "codesearchnet"}
{"code": "def gmove(pattern, destination):\n    \n    for item in glob.glob(pattern):\n        if not move(item, destination):\n            return False\n    return True", "docstring": "Move all file found by glob.glob(pattern) to destination directory.\n\nArgs:\npattern (str): Glob pattern\ndestination (str): Path to the destination directory.\n\nReturns:\nbool: True if the operation is successful, False otherwise.", "source": "juraj-google-style"}
{"code": "def add_direct(self, target, var_id, var_type, data):\n    data = (struct.pack('<H', var_id) + _convert_to_bytes(var_type, data))\n    if ((self.data_size - self.data_index) < len(data)):\n        raise DataError('Not enough space for data in new conig entry', needed_space=len(data), actual_space=(self.data_size - self.data_index))\n    new_entry = ConfigEntry(target, var_id, data)\n    for entry in self.entries:\n        if ((entry.target == new_entry.target) and (entry.var_id == new_entry.var_id)):\n            entry.valid = False\n    self.entries.append(new_entry)\n    self.data_index += new_entry.data_space()", "docstring": "Directly add a config variable.\n\nThis method is meant to be called from emulation scenarios that\nwant to directly set config database entries from python.\n\nArgs:\ntarget (SlotIdentifer): The target slot for this config variable.\nvar_id (int): The config variable ID\nvar_type (str): The config variable type\ndata (bytes or int or str): The data that will be encoded according\nto var_type.", "source": "codesearchnet"}
{"code": "def setErrorHandler(self, errorhandler):\n        \n        class ErrorHandlerWrapper(ErrorHandler):\n            def __init__(self, errorhandler):\n                self.errorhandler = errorhandler\n                self.last_exception = None\n\n            def error(self, exception):\n                if isinstance(exception, amplpython.AMPLException):\n                    exception = AMPLException(exception)\n                try:\n                    self.errorhandler.error(exception)\n                except Exception as e:\n                    self.last_exception = e\n\n            def warning(self, exception):\n                if isinstance(exception, amplpython.AMPLException):\n                    exception = AMPLException(exception)\n                try:\n                    self.errorhandler.warning(exception)\n                except Exception as e:\n                    self.last_exception = e\n\n            def check(self):\n                if self.last_exception is not None:\n                    e, self.last_exception = self.last_exception, None\n                    raise e\n\n        errorhandler_wrapper = ErrorHandlerWrapper(errorhandler)\n\n        class InnerErrorHandler(amplpython.ErrorHandler):\n            def error(self, exception):\n                errorhandler_wrapper.error(exception)\n\n            def warning(self, exception):\n                errorhandler_wrapper.warning(exception)\n\n        self._errorhandler = errorhandler\n        self._errorhandler_inner = InnerErrorHandler()\n        self._errorhandler_wrapper = errorhandler_wrapper\n        lock_and_call(\n            lambda: self._impl.setErrorHandler(self._errorhandler_inner),\n            self._lock\n        )", "docstring": "Sets a new error handler.\n\nArgs:\nerrorhandler: The object handling AMPL errors and warnings.", "source": "juraj-google-style"}
{"code": "def verify_binary(flag_name, process_args=None):\n    \n    if process_args is None:\n        process_args = []\n\n    path = getattr(FLAGS, flag_name)\n    if not path:\n        logging.error('Flag %r not set' % flag_name)\n        sys.exit(1)\n\n    with open(os.devnull, 'w') as dev_null:\n        try:\n            subprocess.check_call(\n                [path] + process_args,\n                stdout=dev_null,\n                stderr=subprocess.STDOUT)\n        except:\n            logging.exception('--%s binary at path %r does not work',\n                              flag_name, path)\n            sys.exit(1)", "docstring": "Exits the program if the binary from the given flag doesn't run.\n\nArgs:\nflag_name: Name of the flag that should be the path to the binary.\nprocess_args: Args to pass to the binary to do nothing but verify\nthat it's working correctly (something like \"--version\") is good.\nOptional. Defaults to no args.\n\nRaises:\nSystemExit with error if the process did not work.", "source": "juraj-google-style"}
{"code": "def malloc(self, key, shape, dtype):\n    if ((key not in self._memory) or (self._memory[key].shape != shape) or (self._memory[key].dtype != dtype)):\n        self._memory[key] = Shmem(key, shape, dtype, self._uuid)\n    return self._memory[key].np_array", "docstring": "Allocates a block of shared memory, and returns a numpy array whose data corresponds with that block.\n\nArgs:\nkey (str): The key to identify the block.\nshape (list of int): The shape of the numpy array to allocate.\ndtype (type): The numpy data type (e.g. np.float32).\n\nReturns:\nnp.ndarray: The numpy array that is positioned on the shared memory.", "source": "codesearchnet"}
{"code": "def usergroups_users_list(self, *, usergroup: str, **kwargs) -> SlackResponse:\n        \n        self._validate_xoxp_token()\n        kwargs.update({\"usergroup\": usergroup})\n        return self.api_call(\"usergroups.users.list\", http_verb=\"GET\", params=kwargs)", "docstring": "List all users in a User Group\n\nArgs:\nusergroup (str): The encoded ID of the User Group to update.\ne.g. 'S0604QSJC'", "source": "juraj-google-style"}
{"code": "def cross_product_compare(start, candidate1, candidate2):\n    delta1 = (candidate1 - start)\n    delta2 = (candidate2 - start)\n    return cross_product(delta1, delta2)", "docstring": "Compare two relative changes by their cross-product.\n\nThis is meant to be a way to determine which vector is more \"inside\"\nrelative to ``start``.\n\n.. note::\n\nThis is a helper for :func:`_simple_convex_hull`.\n\nArgs:\nstart (numpy.ndarray): The start vector (as 1D NumPy array with\n2 elements).\ncandidate1 (numpy.ndarray): The first candidate vector (as 1D\nNumPy array with 2 elements).\ncandidate2 (numpy.ndarray): The second candidate vector (as 1D\nNumPy array with 2 elements).\n\nReturns:\nfloat: The cross product of the two differences.", "source": "codesearchnet"}
{"code": "def _find_methods(cls, *names, **kwds):\n    \n    reverse = kwds.pop('reverse', False)\n    assert not kwds, repr(kwds)\n    cache = cls.__dict__.get('_find_methods_cache')\n    if cache:\n      hit = cache.get(names)\n      if hit is not None:\n        return hit\n    else:\n      cls._find_methods_cache = cache = {}\n    methods = []\n    for c in cls.__mro__:\n      for name in names:\n        method = c.__dict__.get(name)\n        if method is not None:\n          methods.append(method)\n    if reverse:\n      methods.reverse()\n    cache[names] = methods\n    return methods", "docstring": "Compute a list of composable methods.\n\nBecause this is a common operation and the class hierarchy is\nstatic, the outcome is cached (assuming that for a particular list\nof names the reversed flag is either always on, or always off).\n\nArgs:\n*names: One or more method names.\nreverse: Optional flag, default False; if True, the list is\nreversed.\n\nReturns:\nA list of callable class method objects.", "source": "juraj-google-style"}
{"code": "def get_parameter_vector(self, include_frozen=False):\n        \n        if include_frozen:\n            return self.parameter_vector\n        return self.parameter_vector[self.unfrozen_mask]", "docstring": "Get an array of the parameter values in the correct order\n\nArgs:\ninclude_frozen (Optional[bool]): Should the frozen parameters be\nincluded in the returned value? (default: ``False``)", "source": "juraj-google-style"}
{"code": "def __init__(self, title='sdl2', x=lib.SDL_WINDOWPOS_CENTERED, y=lib.SDL_WINDOWPOS_CENTERED,\n                 w=640, h=480, flags=frozenset()):\n        \n        self._ptr = check_ptr_err(lib.SDL_CreateWindow(title.encode('utf-8'), x, y, w, h, enumtools.get_mask(flags)))", "docstring": "Create a window with the specified position, dimensions, and flags.\n\nArgs:\ntitle (str): The title of the window.\nx (int): The x postion of the window.\ny (int): The y position of the window.\nw (int): The width of the window.\nh (int): The height of the window.\nflags (Set[WindowFlags]): The flags for the window.\nRaises:\nSDLError: If the window could not be created.", "source": "juraj-google-style"}
{"code": "def softmax(input_, labels=None, name=PROVIDED, loss_weight=None, per_example_weights=None):\n    if (labels is not None):\n        full = input_.as_layer()\n        return SoftmaxResult(input_.softmax_activation(), full.cross_entropy(labels, name=name, loss_weight=loss_weight, per_example_weights=per_example_weights))\n    else:\n        return SoftmaxResult(input_.softmax_activation(), None)", "docstring": "Applies softmax and if labels is not None, then it also adds a loss.\n\nArgs:\ninput_: A rank 2 Tensor or a Pretty Tensor holding the logits.\nlabels: The target labels to learn as a float tensor.  Use None to not\ninclude a training loss.\nname: The optional name.\nloss_weight: A scalar multiplier for the loss.\nper_example_weights: A Tensor with a weight per example.\nReturns:\nA tuple of the a handle to softmax and a handle to the loss tensor.\nRaises:\nValueError: If the datatype is wrong.", "source": "codesearchnet"}
{"code": "def _block_orth(self, projection_matrix):\n    n = projection_matrix.shape.as_list()[0]\n    kernel = {}\n    eye = linalg_ops_impl.eye(n, dtype=self.dtype)\n    kernel[0] = projection_matrix\n    kernel[1] = eye - projection_matrix\n    return kernel", "docstring": "Construct a kernel.\n\nUsed to construct orthgonal kernel.\n\nArgs:\nprojection_matrix: A symmetric projection matrix of size n x n.\n\nReturns:\n[projection_matrix, (1 - projection_matrix)].", "source": "github-repos"}
{"code": "def stream_realtime(self, stream, value):\n    if (not self.stream_iface_open):\n        return\n    reading = IOTileReading(0, stream, value)\n    report = IndividualReadingReport.FromReadings(self.iotile_id, [reading])\n    self.stream(report)", "docstring": "Stream a realtime value as an IndividualReadingReport.\n\nIf the streaming interface of the VirtualInterface this\nVirtualDevice is attached to is not opened, the realtime\nreading may be dropped.\n\nArgs:\nstream (int): The stream id to send\nvalue (int): The stream value to send", "source": "codesearchnet"}
{"code": "def idxmin(self, **kwargs):\n    if self._is_transposed:\n        kwargs['axis'] = (kwargs.get('axis', 0) ^ 1)\n        return self.transpose().idxmin(**kwargs)\n    axis = kwargs.get('axis', 0)\n    index = (self.index if (axis == 0) else self.columns)\n\n    def idxmin_builder(df, **kwargs):\n        if (axis == 0):\n            df.index = index\n        else:\n            df.columns = index\n        return df.idxmin(**kwargs)\n    func = self._build_mapreduce_func(idxmin_builder, **kwargs)\n    return self._full_axis_reduce(axis, func)", "docstring": "Returns the first occurrence of the minimum over requested axis.\n\nReturns:\nA new QueryCompiler object containing the minimum of each column or axis.", "source": "codesearchnet"}
{"code": "def make_initializable_iterator(self, shared_name=None) -> iterator_ops.Iterator:\n    return self._make_initializable_iterator(shared_name)", "docstring": "Creates an iterator for elements of this dataset.\n\nNote: The returned iterator will be in an uninitialized state,\nand you must run the `iterator.initializer` operation before using it:\n\n```python\n# Building graph ...\ndataset = ...\niterator = dataset.make_initializable_iterator()\nnext_value = iterator.get_next()  # This is a Tensor.\n\n# ... from within a session ...\nsess.run(iterator.initializer)\ntry:\nwhile True:\nvalue = sess.run(next_value)\n...\nexcept tf.errors.OutOfRangeError:\npass\n```\n\nArgs:\nshared_name: (Optional.) If non-empty, the returned iterator will be\nshared under the given name across multiple sessions that share the same\ndevices (e.g. when using a remote server).\n\nReturns:\nA `tf.data.Iterator` for elements of this dataset.\n\nRaises:\nRuntimeError: If eager execution is enabled.", "source": "github-repos"}
{"code": "def _ReadLine(self, file_object):\n    \n    if len(self._buffer) < self._buffer_size:\n      content = file_object.read(self._buffer_size)\n      content = content.decode(self._encoding)\n      self._buffer = ''.join([self._buffer, content])\n\n    line, new_line, self._buffer = self._buffer.partition('\\n')\n    if not line and not new_line:\n      line = self._buffer\n      self._buffer = ''\n\n    self._current_offset += len(line)\n\n    \n    if line.endswith('\\r'):\n      line = line[:-len('\\r')]\n\n    if new_line:\n      line = ''.join([line, '\\n'])\n      self._current_offset += len('\\n')\n\n    return line", "docstring": "Reads a line from the file object.\n\nArgs:\nfile_object (dfvfs.FileIO): file-like object.\n\nReturns:\nstr: line read from the file-like object.", "source": "juraj-google-style"}
{"code": "def filter_by_moys(self, moys):\n        \n        _filt_values, _filt_datetimes = self._filter_by_moys_slow(moys)\n        collection = HourlyDiscontinuousCollection(\n            self.header.duplicate(), _filt_values, _filt_datetimes)\n        collection._validated_a_period = self._validated_a_period\n        return collection", "docstring": "Filter the Data Collection based on a list of minutes of the year.\n\nArgs:\nmoys: A List of minutes of the year [0..8759 * 60]\n\nReturn:\nA new Data Collection with filtered data", "source": "juraj-google-style"}
{"code": "def _count_righthand_zero_bits(number, bits):\n    \n    if number == 0:\n        return bits\n    return min(bits, _compat_bit_length(~number & (number - 1)))", "docstring": "Count the number of zero bits on the right hand side.\n\nArgs:\nnumber: an integer.\nbits: maximum number of bits to count.\n\nReturns:\nThe number of zero bits on the right hand side of the number.", "source": "juraj-google-style"}
{"code": "def convert_elementwise_mul(\n    params, w_name, scope_name, inputs, layers, weights, names\n):\n    \n    print('Converting elementwise_mul ...')\n    model0 = layers[inputs[0]]\n    model1 = layers[inputs[1]]\n\n    if names == 'short':\n        tf_name = 'M' + random_string(7)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    def target_layer(x):\n        layer = tf.multiply(\n            x[0],\n            x[1]\n        )\n        return layer\n\n    lambda_layer = keras.layers.Lambda(target_layer, name=tf_name)\n    layers[scope_name] = lambda_layer([layers[inputs[0]], layers[inputs[1]]])", "docstring": "Convert elementwise multiplication.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def wait_for_other_workers(self):\n    if not self._worker_barrier:\n        return\n    self._worker_barrier.wait()", "docstring": "Waits for other workers to reach the same call to this method.\n\nRaises:\nValueError: if `worker_barrier` is not passed to the __init__ method.", "source": "github-repos"}
{"code": "def _LoadDataIntoCache(self, file_object, minimum_offset, read_all_data=False):\n    if (minimum_offset < self._decompressor_state.uncompressed_offset):\n        self._ResetDecompressorState()\n    while ((not self.IsCacheFull()) or read_all_data):\n        decompressed_data = self._decompressor_state.Read(file_object)\n        if (not decompressed_data):\n            break\n        decompressed_data_length = len(decompressed_data)\n        decompressed_end_offset = self._decompressor_state.uncompressed_offset\n        decompressed_start_offset = (decompressed_end_offset - decompressed_data_length)\n        data_to_add = decompressed_data\n        added_data_start_offset = decompressed_start_offset\n        if (decompressed_start_offset < minimum_offset):\n            data_to_add = None\n        if (decompressed_start_offset < minimum_offset < decompressed_end_offset):\n            data_add_offset = (decompressed_end_offset - minimum_offset)\n            data_to_add = decompressed_data[(- data_add_offset)]\n            added_data_start_offset = (decompressed_end_offset - data_add_offset)\n        if ((not self.IsCacheFull()) and data_to_add):\n            self._cache = b''.join([self._cache, data_to_add])\n            if (self._cache_start_offset is None):\n                self._cache_start_offset = added_data_start_offset\n            if (self._cache_end_offset is None):\n                self._cache_end_offset = (self._cache_start_offset + len(data_to_add))\n            else:\n                self._cache_end_offset += len(data_to_add)\n        unused_data = self._decompressor_state.GetUnusedData()\n        if unused_data:\n            seek_offset = (- len(unused_data))\n            file_object.seek(seek_offset, os.SEEK_CUR)\n            self._ResetDecompressorState()\n            break", "docstring": "Reads and decompresses the data in the member.\n\nThis function already loads as much data as possible in the cache, up to\nUNCOMPRESSED_DATA_CACHE_SIZE bytes.\n\nArgs:\nfile_object (FileIO): file-like object.\nminimum_offset (int): offset into this member's uncompressed data at\nwhich the cache should start.\nread_all_data (bool): True if all the compressed data should be read\nfrom the member.", "source": "codesearchnet"}
{"code": "def ExpandRecursiveGlobs(cls, path, path_separator):\n    glob_regex = '(.*)?{0:s}\\\\*\\\\*(\\\\d{{1,2}})?({0:s})?$'.format(re.escape(path_separator))\n    match = re.search(glob_regex, path)\n    if (not match):\n        return [path]\n    skip_first = False\n    if match.group(3):\n        skip_first = True\n    if match.group(2):\n        iterations = int(match.group(2))\n    else:\n        iterations = cls._RECURSIVE_GLOB_LIMIT\n        logger.warning('Path \"{0:s}\" contains fully recursive glob, limiting to 10 levels'.format(path))\n    return cls.AppendPathEntries(match.group(1), path_separator, iterations, skip_first)", "docstring": "Expands recursive like globs present in an artifact path.\n\nIf a path ends in '**', with up to two optional digits such as '**10',\nthe '**' will recursively match all files and zero or more directories\nfrom the specified path. The optional digits indicate the recursion depth.\nBy default recursion depth is 10 directories.\n\nIf the glob is followed by the specified path segment separator, only\ndirectories and subdirectories will be matched.\n\nArgs:\npath (str): path to be expanded.\npath_separator (str): path segment separator.\n\nReturns:\nlist[str]: String path expanded for each glob.", "source": "codesearchnet"}
{"code": "class custom_gradient:\n\n    def __init__(self, forward_fn):\n        self.forward_fn = forward_fn\n\n    def __call__(self, *args, **kwargs):\n        return CustomGradientFunction.apply(self.forward_fn, *args, **kwargs)", "docstring": "Decorator for custom gradients.\n\nArgs:\nforward_fn: Forward pass function.", "source": "github-repos"}
{"code": "def _set_control_flow_context(self, ctx) -> None:\n    self._control_flow_context = ctx", "docstring": "Sets the current control flow context.\n\nArgs:\nctx: a context object.", "source": "github-repos"}
{"code": "def Acf(poly, dist, N=None, **kws):\n    if (N is None):\n        N = ((len(poly) / 2) + 1)\n    corr = Corr(poly, dist, **kws)\n    out = numpy.empty(N)\n    for n in range(N):\n        out[n] = numpy.mean(corr.diagonal(n), 0)\n    return out", "docstring": "Auto-correlation function.\n\nArgs:\npoly (Poly):\nPolynomial of interest. Must have ``len(poly) > N``.\ndist (Dist):\nDefines the space the correlation is taken on.\nN (int):\nThe number of time steps appart included. If omited set to\n``len(poly)/2+1``.\n\nReturns:\n(numpy.ndarray) :\nAuto-correlation of ``poly`` with shape ``(N,)``. Note that by\ndefinition ``Q[0]=1``.\n\nExamples:\n>>> poly = chaospy.prange(10)[1:]\n>>> Z = chaospy.Uniform()\n>>> print(numpy.around(chaospy.Acf(poly, Z, 5), 4))\n[1.     0.9915 0.9722 0.9457 0.9127]", "source": "codesearchnet"}
{"code": "def save_q_df(self, state_key, action_key, q_value):\n    if (isinstance(q_value, float) is False):\n        raise TypeError('The type of q_value must be float.')\n    new_q_df = pd.DataFrame([(state_key, action_key, q_value)], columns=['state_key', 'action_key', 'q_value'])\n    if (self.q_df is not None):\n        self.q_df = pd.concat([new_q_df, self.q_df])\n        self.q_df = self.q_df.drop_duplicates(['state_key', 'action_key'])\n    else:\n        self.q_df = new_q_df", "docstring": "Insert or update Q-Value in `self.q_df`.\n\nArgs:\nstate_key:      State.\naction_key:     Action.\nq_value:        Q-Value.\n\nExceptions:\nTypeError:      If the type of `q_value` is not float.", "source": "codesearchnet"}
{"code": "def _read_from_hdx(self, object_type, value, fieldname='id', action=None, **kwargs):\n    if (not fieldname):\n        raise HDXError(('Empty %s field name!' % object_type))\n    if (action is None):\n        action = self.actions()['show']\n    data = {fieldname: value}\n    data.update(kwargs)\n    try:\n        result = self.configuration.call_remoteckan(action, data)\n        return (True, result)\n    except NotFound:\n        return (False, ('%s=%s: not found!' % (fieldname, value)))\n    except Exception as e:\n        raisefrom(HDXError, ('Failed when trying to read: %s=%s! (POST)' % (fieldname, value)), e)", "docstring": "Makes a read call to HDX passing in given parameter.\n\nArgs:\nobject_type (str): Description of HDX object type (for messages)\nvalue (str): Value of HDX field\nfieldname (str): HDX field name. Defaults to id.\naction (Optional[str]): Replacement CKAN action url to use. Defaults to None.\n**kwargs: Other fields to pass to CKAN.\n\nReturns:\nTuple[bool, Union[Dict, str]]: (True/False, HDX object metadata/Error)", "source": "codesearchnet"}
{"code": "def pprint_table(table, out=sys.stdout, rstrip=False):\n\n    def max_width_col(table, col_idx):\n        '\\n        Get the maximum width of the given column index\\n        '\n        return max([len(row[col_idx]) for row in table])\n    if rstrip:\n        for (row_idx, row) in enumerate(table):\n            table[row_idx] = [c.rstrip() for c in row]\n    col_paddings = []\n    ncols = len(table[0])\n    for i in range(ncols):\n        col_paddings.append(max_width_col(table, i))\n    for row in table:\n        out.write(row[0].ljust((col_paddings[0] + 1)))\n        for i in range(1, len(row)):\n            col = row[i].rjust((col_paddings[i] + 2))\n            out.write(col)\n        out.write('\\n')", "docstring": "Prints out a table of data, padded for alignment\nEach row must have the same number of columns.\n\nArgs:\ntable: The table to print. A list of lists.\nout: Output stream (file-like object)\nrstrip: if True, trailing withespaces are removed from the entries.", "source": "codesearchnet"}
{"code": "def confirm(prompt='Really?', color='warning', yes_values=('y', 'yes'), abort_on_unconfirmed=False, abort_options=None):\n    if isinstance(yes_values, str):\n        yes_values = (yes_values,)\n    prompt = '{prompt} [{yes_value}/N] '.format(prompt=prompt, yes_value=yes_values[0])\n    if color:\n        prompt = printer.colorize(prompt, color=color)\n    try:\n        answer = input(prompt)\n    except KeyboardInterrupt:\n        print()\n        confirmed = False\n    else:\n        answer = answer.strip().lower()\n        confirmed = (answer in yes_values)\n    do_abort_on_unconfirmed = ((not confirmed) and (bool(abort_on_unconfirmed) or ((abort_on_unconfirmed == 0) and (abort_on_unconfirmed is not False))))\n    if do_abort_on_unconfirmed:\n        if (abort_options is None):\n            abort_options = {}\n        if (abort_on_unconfirmed is True):\n            abort_options.setdefault('return_code', 0)\n        elif isinstance(abort_on_unconfirmed, int):\n            abort_options.setdefault('return_code', abort_on_unconfirmed)\n        elif isinstance(abort_on_unconfirmed, str):\n            abort_options.setdefault('message', abort_on_unconfirmed)\n        else:\n            abort_options.setdefault('return_code', 0)\n        abort(**abort_options)\n    return confirmed", "docstring": "Prompt for confirmation.\n\nConfirmation can be aborted by typing in a no value instead of one\nof the yes values or with Ctrl-C.\n\nArgs:\nprompt (str): Prompt to present user [\"Really?\"]\ncolor (string|Color|bool) Color to print prompt string; can be\n``False`` or ``None`` to print without color [\"yellow\"]\nyes_values (list[str]): Values user must type in to confirm\n[(\"y\", \"yes\")]\nabort_on_unconfirmed (bool|int|str): When user does *not*\nconfirm:\n\n- If this is an integer, print \"Aborted\" to stdout if\nit's 0 or to stderr if it's not 0 and then exit with\nthis code\n- If this is a string, print it to stdout and exit with\ncode 0\n- If this is ``True`` (or any other truthy value), print\n\"Aborted\" to stdout and exit with code 0\n\nabort_options (dict): Options to pass to :func:`abort` when not\nconfirmed (these options will override any options set via\n``abort_on_unconfirmed``)", "source": "codesearchnet"}
{"code": "def run_processes(self, procdetails: List[ProcessDetails], subproc_run_timeout_sec: float=1, stop_event_timeout_ms: int=1000, kill_timeout_sec: float=5) -> None:\n\n    def cleanup():\n        self.debug('atexit function called: cleaning up')\n        for pmgr_ in self.process_managers:\n            pmgr_.stop()\n    atexit.register(cleanup)\n    self.process_managers = []\n    n = len(procdetails)\n    for (i, details) in enumerate(procdetails):\n        pmgr = ProcessManager(details, (i + 1), n, kill_timeout_sec=kill_timeout_sec, debugging=self.debugging)\n        self.process_managers.append(pmgr)\n    for pmgr in self.process_managers:\n        pmgr.start()\n    self.info('All started')\n    something_running = True\n    stop_requested = False\n    subproc_failed = False\n    while (something_running and (not stop_requested) and (not subproc_failed)):\n        if (win32event.WaitForSingleObject(self.h_stop_event, stop_event_timeout_ms) == win32event.WAIT_OBJECT_0):\n            stop_requested = True\n            self.info('Stop requested; stopping')\n        else:\n            something_running = False\n            for pmgr in self.process_managers:\n                if subproc_failed:\n                    break\n                try:\n                    retcode = pmgr.wait(timeout_s=subproc_run_timeout_sec)\n                    if (retcode != 0):\n                        subproc_failed = True\n                except subprocess.TimeoutExpired:\n                    something_running = True\n    for pmgr in self.process_managers:\n        pmgr.stop()\n    self.info('All stopped')", "docstring": "Run multiple child processes.\n\nArgs:\n\nprocdetails: list of :class:`ProcessDetails` objects (q.v.)\nsubproc_run_timeout_sec: time (in seconds) to wait for each process\nwhen polling child processes to see how they're getting on\n(default ``1``)\nstop_event_timeout_ms: time to wait (in ms) while checking the\nWindows stop event for this service (default ``1000``)\nkill_timeout_sec: how long (in seconds) will we wait for the\nsubprocesses to end peacefully, before we try to kill them?\n\n.. todo::\ncardinal_pythonlib.winservice.WindowsService: NOT YET IMPLEMENTED:\nWindows service autorestart", "source": "codesearchnet"}
{"code": "def CheckTestDependencies(self, verbose_output=True):\n    \n    if not self.CheckDependencies(verbose_output=verbose_output):\n      return False\n\n    print('Checking availability and versions of test dependencies.')\n    check_result = True\n\n    for dependency in sorted(\n        self._test_dependencies.values(),\n        key=lambda dependency: dependency.name):\n      result, status_message = self._CheckPythonModule(dependency)\n      if not result:\n        check_result = False\n\n      self._PrintCheckDependencyStatus(\n          dependency, result, status_message, verbose_output=verbose_output)\n\n    if check_result and not verbose_output:\n      print('[OK]')\n\n    print('')\n    return check_result", "docstring": "Checks the availability of the dependencies when running tests.\n\nArgs:\nverbose_output (Optional[bool]): True if output should be verbose.\n\nReturns:\nbool: True if the dependencies are available, False otherwise.", "source": "juraj-google-style"}
{"code": "def test_on_batch(model, inputs, targets, sample_weights=None, output_loss_metrics=None):\n    inputs = training_utils_v1.cast_to_model_input_dtypes(inputs, model)\n    with backend.eager_learning_phase_scope(0):\n        outs, total_loss, output_losses, masks = _model_loss(model, inputs, targets, sample_weights=sample_weights, training=False, output_loss_metrics=output_loss_metrics)\n    if not isinstance(outs, list):\n        outs = [outs]\n    metrics_results = _eager_metrics_fn(model, outs, targets, sample_weights=sample_weights, masks=masks)\n    total_loss = nest.flatten(total_loss)\n    return {'total_loss': total_loss, 'output_losses': output_losses, 'metrics': metrics_results}", "docstring": "Calculates the loss for one input batch.\n\nArgs:\nmodel: Model whose loss has to be calculated.\ninputs: Input batch data.\ntargets: Target batch data.\nsample_weights: Sample weight batch data.\noutput_loss_metrics: List of metrics that are used to aggregated output\nloss values.\n\nReturns:\nDict with three items:\n'total_loss': single tensor for overall loss,\n'output_losses': list of tensors for loss corresponding to each of the\nmodel output. Could be a empty list when model has only one output.\n'metrics': list of tensors for metric specified.", "source": "github-repos"}
{"code": "def init(scope):\n    \n    class SinonGlobals(object): \n        \n        pass\n\n    global CPSCOPE \n    CPSCOPE = SinonGlobals()\n    funcs = [obj for obj in scope.values() if isinstance(obj, FunctionType)]\n    for func in funcs:\n        setattr(CPSCOPE, func.__name__, func)\n    return CPSCOPE", "docstring": "Copy all values of scope into the class SinonGlobals\nArgs:\nscope (eg. locals() or globals())\nReturn:\nSinonGlobals instance", "source": "juraj-google-style"}
{"code": "def find_id_in_folder(self, name, parent_folder_id=0):\n    if ((name is None) or (len(name) == 0)):\n        return parent_folder_id\n    offset = 0\n    resp = self.get_folder_items(parent_folder_id, limit=1000, offset=offset, fields_list=['name'])\n    total = int(resp['total_count'])\n    while (offset < total):\n        found = self.__find_name(resp, name)\n        if (found is not None):\n            return found\n        offset += int(len(resp['entries']))\n        resp = self.get_folder_items(parent_folder_id, limit=1000, offset=offset, fields_list=['name'])\n    return None", "docstring": "Find a folder or a file ID from its name, inside a given folder.\n\nArgs:\nname (str): Name of the folder or the file to find.\n\nparent_folder_id (int): ID of the folder where to search.\n\nReturns:\nint. ID of the file or folder found. None if not found.\n\nRaises:\nBoxError: An error response is returned from Box (status_code >= 400).\n\nBoxHttpResponseError: Response from Box is malformed.\n\nrequests.exceptions.*: Any connection related problem.", "source": "codesearchnet"}
{"code": "def _render_text(self, text, preformatted=False):\n    \n    tag = 'pre' if preformatted else 'div'\n    self._segments.append('<%s>%s</%s>' % (tag, HtmlBuilder._format(text), tag))", "docstring": "Renders an HTML formatted text block with the specified text.\n\nArgs:\ntext: the text to render\npreformatted: whether the text should be rendered as preformatted", "source": "juraj-google-style"}
{"code": "def FindByName(cls, name):\n    if name.endswith('.py'):\n        return cls.LoadFromFile(name)\n    reg = ComponentRegistry()\n    for (_name, tile) in reg.load_extensions('iotile.virtual_tile', name_filter=name, class_filter=VirtualTile):\n        return tile\n    raise ArgumentError('VirtualTile could not be found by name', name=name)", "docstring": "Find an installed VirtualTile by name.\n\nThis function searches for installed virtual tiles\nusing the pkg_resources entry_point `iotile.virtual_tile`.\n\nIf name is a path ending in .py, it is assumed to point to\na module on disk and loaded directly rather than using\npkg_resources.\n\nArgs:\nname (str): The name of the tile to search\nfor.\n\nReturns:\nVirtualTile class: A virtual tile subclass that can be\ninstantiated to create a virtual tile.", "source": "codesearchnet"}
{"code": "def _find_path_between(self, p: GridQubit, q: GridQubit, used: Set[GridQubit]) -> Optional[List[GridQubit]]:\n\n    def assemble_path(n: GridQubit, parent: Dict[(GridQubit, GridQubit)]):\n        path = [n]\n        while (n in parent):\n            n = parent[n]\n            path.append(n)\n        return path\n    other = {p: q, q: p}\n    parents = {p: dict(), q: dict()}\n    visited = {p: set(), q: set()}\n    queue = collections.deque([(p, p), (q, q)])\n    while queue:\n        (n, s) = queue.popleft()\n        for n_adj in self._c_adj[n]:\n            if (n_adj in visited[other[s]]):\n                path_s = assemble_path(n, parents[s])[(- 2)::(- 1)]\n                path_other = assemble_path(n_adj, parents[other[s]])[:(- 1)]\n                path = (path_s + path_other)\n                if (s == q):\n                    path.reverse()\n                return path\n            elif ((n_adj not in used) and (n_adj not in visited[s])):\n                queue.append((n_adj, s))\n                visited[s].add(n_adj)\n                parents[s][n_adj] = n\n    return None", "docstring": "Searches for continuous sequence between two qubits.\n\nThis method runs two BFS algorithms in parallel (alternating variable s\nin each iteration); the first one starting from qubit p, and the second\none starting from qubit q. If at some point a qubit reachable from p is\nfound to be on the set of qubits already reached from q (or vice versa),\nthe search is stopped and new path returned.\n\nArgs:\np: The first qubit, start of the sequence.\nq: The second qubit, end of the sequence.\nused: Set of forbidden qubits which cannot appear on the sequence.\n\nReturns:\nContinues sequence of qubits with new path between p and q, or None\nif no path was found.", "source": "codesearchnet"}
{"code": "def sync_ik_robot(self, joint_positions, simulate=False, sync_last=True):\n        \n        num_joints = len(joint_positions)\n        if not sync_last:\n            num_joints -= 1\n        for i in range(num_joints):\n            if simulate:\n                p.setJointMotorControl2(\n                    self.ik_robot,\n                    self.actual[i],\n                    p.POSITION_CONTROL,\n                    targetVelocity=0,\n                    targetPosition=joint_positions[i],\n                    force=500,\n                    positionGain=0.5,\n                    velocityGain=1.,\n                )\n            else:\n                \n                p.resetJointState(self.ik_robot, self.actual[i], joint_positions[i])", "docstring": "Force the internal robot model to match the provided joint angles.\n\nArgs:\njoint_positions (list): a list or flat numpy array of joint positions.\nsimulate (bool): If True, actually use physics simulation, else\nwrite to physics state directly.\nsync_last (bool): If False, don't sync the last joint angle. This\nis useful for directly controlling the roll at the end effector.", "source": "juraj-google-style"}
{"code": "def Calls(self, conditions=None):\n    \n    results = set()\n    if conditions is None:\n      conditions = [None]\n    for condition in conditions:\n      for c in self.Match(*condition):\n        results.update(self._registry.get(c, []))\n    return results", "docstring": "Find the methods that evaluate data that meets this condition.\n\nArgs:\nconditions: A tuple of (artifact, os_name, cpe, label)\n\nReturns:\nA list of methods that evaluate the data.", "source": "juraj-google-style"}
{"code": "def render_text(text, preformatted=False):\n    \n    builder = HtmlBuilder()\n    builder._render_text(text, preformatted=preformatted)\n    return builder._to_html()", "docstring": "Renders an HTML formatted text block with the specified text.\n\nArgs:\ntext: the text to render\npreformatted: whether the text should be rendered as preformatted\nReturns:\nThe formatted HTML.", "source": "juraj-google-style"}
{"code": "def describe_enum(enum_definition):\n    \n    enum_descriptor = EnumDescriptor()\n    enum_descriptor.name = enum_definition.definition_name().split('.')[-1]\n\n    values = []\n    for number in enum_definition.numbers():\n        value = enum_definition.lookup_by_number(number)\n        values.append(describe_enum_value(value))\n\n    if values:\n        enum_descriptor.values = values\n\n    return enum_descriptor", "docstring": "Build descriptor for Enum class.\n\nArgs:\nenum_definition: Enum class to provide descriptor for.\n\nReturns:\nInitialized EnumDescriptor instance describing the Enum class.", "source": "juraj-google-style"}
{"code": "def are_you_sure(flag_changed, evt, parent=None, title=\"File has been changed\",\n                 msg=\"Are you sure you want to exit?\"):\n    \n    if flag_changed:\n        r = QMessageBox.question(parent, title, msg,\n             QMessageBox.Yes|QMessageBox.No, QMessageBox.Yes)\n        if r != QMessageBox.Yes:\n            evt.ignore()", "docstring": "\"Are you sure you want to exit\" question dialog.\n\nIf flag_changed, shows question dialog. If answer is not yes, calls evt.ignore()\n\nArguments:\nflag_changed\nevt -- QCloseEvent instance\nparent=None -- parent form, used to centralize the question dialog at\ntitle -- title for question dialog\nmsg -- text of question dialog\n\nReturns True or False. True means: \"yes, I want to exit\"", "source": "juraj-google-style"}
{"code": "def train_async(input_dir, batch_size, max_steps, output_dir, checkpoint=None, cloud=None):\n    with warnings.catch_warnings():\n        warnings.simplefilter('ignore')\n        if (cloud is None):\n            return _local.Local.train(input_dir, batch_size, max_steps, output_dir, checkpoint)\n        return _cloud.Cloud.train(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud)", "docstring": "Train model. The output can be used for batch prediction or online deployment.\n\nArgs:\ninput_dir: A directory path containing preprocessed results. Can be local or GCS path.\nbatch_size: size of batch used for training.\nmax_steps: number of steps to train.\noutput_dir: The output directory to use. Can be local or GCS path.\ncheckpoint: the Inception checkpoint to use. If None, a default checkpoint is used.\ncloud: a google.datalab.ml.CloudTrainingConfig object to let it run in cloud.\nIf None, it runs locally.\n\nReturns:\nA google.datalab.utils.Job object that can be used to query state from or wait.", "source": "codesearchnet"}
{"code": "def _process_from_queue(self, queue):\n    now = time.time()\n    log = self.log.bind(queue=queue)\n    batch_size = self._get_queue_batch_size(queue)\n    (queue_lock, failed_to_acquire) = self._get_queue_lock(queue, log)\n    if failed_to_acquire:\n        return ([], (- 1))\n    later = (time.time() + self.config['LOCK_RETRY'])\n    task_ids = self.scripts.zpoppush(self._key(QUEUED, queue), self._key(ACTIVE, queue), batch_size, None, now, if_exists=('add', self._key(SCHEDULED, queue), later, 'min'), on_success=('update_sets', queue, self._key(QUEUED), self._key(ACTIVE), self._key(SCHEDULED)))\n    log.debug('moved tasks', src_queue=QUEUED, dest_queue=ACTIVE, qty=len(task_ids))\n    processed_count = 0\n    if task_ids:\n        processed_count = self._process_queue_tasks(queue, queue_lock, task_ids, now, log)\n    if queue_lock:\n        queue_lock.release()\n        log.debug('released swq lock')\n    return (task_ids, processed_count)", "docstring": "Internal method to process a task batch from the given queue.\n\nArgs:\nqueue: Queue name to be processed\n\nReturns:\nTask IDs:   List of tasks that were processed (even if there was an\nerror so that client code can assume the queue is empty\nif nothing was returned)\nCount:      The number of tasks that were attempted to be executed or\n-1 if the queue lock couldn't be acquired.", "source": "codesearchnet"}
{"code": "def create_domain(provider, context, **kwargs):\n    session = get_session(provider.region)\n    client = session.client('route53')\n    domain = kwargs.get('domain')\n    if (not domain):\n        logger.error('domain argument or BaseDomain variable not provided.')\n        return False\n    zone_id = create_route53_zone(client, domain)\n    return {'domain': domain, 'zone_id': zone_id}", "docstring": "Create a domain within route53.\n\nArgs:\nprovider (:class:`stacker.providers.base.BaseProvider`): provider\ninstance\ncontext (:class:`stacker.context.Context`): context instance\n\nReturns: boolean for whether or not the hook succeeded.", "source": "codesearchnet"}
{"code": "def get_actions(self, issues):\n    actions = []\n    try:\n        for issue in issues:\n            action_item = self.determine_action(issue)\n            if (action_item['action'] != AuditActions.IGNORE):\n                action_item['owners'] = self.get_contacts(issue)\n                actions.append(action_item)\n    finally:\n        db.session.rollback()\n    return actions", "docstring": "Returns a list of actions to executed\n\nArgs:\nissues (`list` of :obj:`RequiredTagsIssue`): List of issues\n\nReturns:\n`list` of `dict`", "source": "codesearchnet"}
{"code": "def __init__(self, key, attributes):\n        \n        \n        self._attributes_normalized = {}\n        self._set_attributes(attributes if attributes else {})\n        self._key_normalized = ''\n        self._set_key(key)", "docstring": "Object initialization\n\nArgs:\nkey: String name of an attributes key that represents the unique identify of the request\nattributes: Dictionary whose keys match the string values of the request attribute's names and values correspond the the request attribute values", "source": "juraj-google-style"}
{"code": "def FillDeviceAttributes(device, descriptor):\n    attributes = HidAttributes()\n    result = hid.HidD_GetAttributes(device, ctypes.byref(attributes))\n    if (not result):\n        raise ctypes.WinError()\n    buf = ctypes.create_string_buffer(1024)\n    result = hid.HidD_GetProductString(device, buf, 1024)\n    if (not result):\n        raise ctypes.WinError()\n    descriptor.vendor_id = attributes.VendorID\n    descriptor.product_id = attributes.ProductID\n    descriptor.product_string = ctypes.wstring_at(buf)", "docstring": "Fill out the attributes of the device.\n\nFills the devices HidAttributes and product string\ninto the descriptor.\n\nArgs:\ndevice: A handle to the open device\ndescriptor: The DeviceDescriptor to populate with the\nattributes.\n\nReturns:\nNone\n\nRaises:\nWindowsError when unable to obtain attributes or product\nstring.", "source": "codesearchnet"}
{"code": "def FromJsonString(self, value):\n    if ((len(value) < 1) or (value[(- 1)] != 's')):\n        raise ParseError('Duration must end with letter \"s\": {0}.'.format(value))\n    try:\n        pos = value.find('.')\n        if (pos == (- 1)):\n            self.seconds = int(value[:(- 1)])\n            self.nanos = 0\n        else:\n            self.seconds = int(value[:pos])\n            if (value[0] == '-'):\n                self.nanos = int(round((float('-0{0}'.format(value[pos:(- 1)])) * 1000000000.0)))\n            else:\n                self.nanos = int(round((float('0{0}'.format(value[pos:(- 1)])) * 1000000000.0)))\n    except ValueError:\n        raise ParseError(\"Couldn't parse duration: {0}.\".format(value))", "docstring": "Converts a string to Duration.\n\nArgs:\nvalue: A string to be converted. The string must end with 's'. Any\nfractional digits (or none) are accepted as long as they fit into\nprecision. For example: \"1s\", \"1.01s\", \"1.0000001s\", \"-3.100s\n\nRaises:\nParseError: On parsing problems.", "source": "codesearchnet"}
{"code": "def update_score_summary(sender, **kwargs):\n        \n        score = kwargs['instance']\n        try:\n            score_summary = ScoreSummary.objects.get(\n                student_item=score.student_item\n            )\n            score_summary.latest = score\n\n            \n            if score.reset:\n                score_summary.highest = score\n            \n            \n            \n            elif score.to_float() > score_summary.highest.to_float():\n                score_summary.highest = score\n            score_summary.save()\n        except ScoreSummary.DoesNotExist:\n            ScoreSummary.objects.create(\n                student_item=score.student_item,\n                highest=score,\n                latest=score,\n            )\n        except DatabaseError as err:\n            logger.exception(\n                u\"Error while updating score summary for student item {}\"\n                .format(score.student_item)\n            )", "docstring": "Listen for new Scores and update the relevant ScoreSummary.\n\nArgs:\nsender: not used\n\nKwargs:\ninstance (Score): The score model whose save triggered this receiver.", "source": "juraj-google-style"}
{"code": "def tseries_between(self, tstart=None, tend=None):\n    if (self.tseries is None):\n        return None\n    ndat = self.tseries.shape[0]\n    if (tstart is None):\n        istart = 0\n    else:\n        igm = 0\n        igp = (ndat - 1)\n        while ((igp - igm) > 1):\n            istart = (igm + ((igp - igm) \n            if (self.tseries.iloc[istart]['t'] >= tstart):\n                igp = istart\n            else:\n                igm = istart\n        istart = igp\n    if (tend is None):\n        iend = None\n    else:\n        igm = 0\n        igp = (ndat - 1)\n        while ((igp - igm) > 1):\n            iend = (igm + ((igp - igm) \n            if (self.tseries.iloc[iend]['t'] > tend):\n                igp = iend\n            else:\n                igm = iend\n        iend = (igm + 1)\n    return self.tseries.iloc[istart:iend]", "docstring": "Return time series data between requested times.\n\nArgs:\ntstart (float): starting time. Set to None to start at the\nbeginning of available data.\ntend (float): ending time. Set to None to stop at the end of\navailable data.\nReturns:\n:class:`pandas.DataFrame`: slice of :attr:`tseries`.", "source": "codesearchnet"}
{"code": "def _get_val_list(obj, path_list, reverse=False):\n    try:\n        y = getattr(obj, path_list[0])\n    except AttributeError:\n        return []\n    if (len(path_list) == 1):\n        return [y]\n    else:\n        val_list = [x for a in y for x in _get_val_list(a, path_list[1:], reverse)]\n        if reverse:\n            val_list.reverse()\n        return val_list", "docstring": "Extract values from nested objects by attribute names.\n\nObjects contain attributes which are named references to objects. This will descend\ndown a tree of nested objects, starting at the given object, following the given\npath.\n\nArgs:\nobj: object\nAny type of object\n\npath_list: list\nAttribute names\n\nreverse: bool\nReverse the list of values before concatenation.\n\nReturns:\nlist of objects", "source": "codesearchnet"}
{"code": "def print_object_results(obj_result):\n    print_results_header(obj_result.object_id, obj_result.is_valid)\n    if obj_result.warnings:\n        print_warning_results(obj_result, 1)\n    if obj_result.errors:\n        print_schema_results(obj_result, 1)", "docstring": "Print the results of validating an object.\n\nArgs:\nobj_result: An ObjectValidationResults instance.", "source": "codesearchnet"}
{"code": "def SignFile(self, in_filename, out_filename=None):\n    if (out_filename is None):\n        out_filename = ('%s.signed' % in_filename)\n    args = ['-certs', self.cert, '-key', self.key, '-n', self.application, '-t', 'http:\n    try:\n        output_log = io.StringIO()\n        ossl = pexpect.spawn('osslsigncode', args)\n        ossl.logfile_read = output_log\n        ossl.expect('Enter PEM pass phrase')\n        ossl.sendline(self.password)\n        ossl.wait()\n    except pexpect.ExceptionPexpect:\n        output_log.seek(0)\n        logging.exception(output_log.read())\n        raise\n    if (not os.path.exists(out_filename)):\n        raise SigningError(('Expected output %s not created' % out_filename))\n    try:\n        subprocess.check_call(['osslsigncode', 'verify', '-in', out_filename])\n    except subprocess.CalledProcessError:\n        logging.exception('Bad signature verification on %s', out_filename)\n        raise SigningError(('Bad signature verification on %s' % out_filename))\n    return out_filename", "docstring": "Sign a file using osslsigncode.\n\nArgs:\nin_filename: file to read from\nout_filename: file to output to, if none we output to the same filename as\nthe input with a .signed suffix.\n\nReturns:\noutput filename string\nRaises:\npexpect.ExceptionPexpect: if the expect invocation of osslsigncode fails.\nSigningError: for signing failures.", "source": "codesearchnet"}
{"code": "def get_imap_capabilities(server):\n    \n\n    capabilities = list(map(str, list(server.capabilities())))\n    for i in range(len(capabilities)):\n        capabilities[i] = str(capabilities[i]).replace(\"b'\",\n                                                       \"\").replace(\"'\",\n                                                                   \"\")\n    logger.debug(\"IMAP server supports: {0}\".format(capabilities))\n\n    return capabilities", "docstring": "Returns a list of an IMAP server's capabilities\n\nArgs:\nserver (imapclient.IMAPClient): An instance of imapclient.IMAPClient\n\nReturns (list): A list of capabilities", "source": "juraj-google-style"}
{"code": "def scheduler(self, sleep_time=0.2):\n        \n\n        while self.listening:\n            \n            \n            if self.scheduled_calls:\n                timestamp = time.time()\n                self.scheduled_calls[:] = [item for item in self.scheduled_calls\n                                           if not self.time_reached(timestamp, item)]\n            time.sleep(sleep_time)\n\n        logger.info(\"Shutting down the call scheduler...\")", "docstring": "Starts the scheduler to check for scheduled calls and execute them\nat the correct time.\n\nArgs:\nsleep_time (float): The amount of time to wait in seconds between\neach loop iteration. This prevents the scheduler from consuming\n100% of the host's CPU. Defaults to 0.2 seconds.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def clone_with_git(repo_uri, dest_path):\n    log.info('Cloning git repo %s to %s', repo_uri, dest_path)\n    git.Repo.clone_from(repo_uri, dest_path, depth=1)", "docstring": "Create a clone by cloning a git repository.\n\nArgs:\nrepo_uri: The URI of the git repository to clone.\ndest_path: The location to clone to.", "source": "codesearchnet"}
{"code": "def incident(self, name, owner=None, **kwargs):\n        \n        return Incident(self.tcex, name, owner=owner, **kwargs)", "docstring": "Create the Incident TI object.\n\nArgs:\nowner:\nname:\n**kwargs:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def get_component(self, component_name):\n        \n        mapping = self.get_components()\n        return mapping[component_name] if component_name in mapping else None", "docstring": "Looks up a component by its name.\n\nArgs:\ncomponent_name: The name of the component to look up.\nReturns:\nThe component for the provided name or None if there is no such component.", "source": "juraj-google-style"}
{"code": "def on_message(self, event):\n    metadata = self._parse_metadata(event)\n    message = Message(text=metadata['text'], metadata=metadata).__dict__\n    if message.get('text'):\n        message['text'] = self.find_and_replace_userids(message['text'])\n        message['text'] = self.find_and_replace_channel_refs(message['text'])\n    return message", "docstring": "Runs when a message event is received\n\nArgs:\nevent: RTM API event.\n\nReturns:\nLegobot.messge", "source": "codesearchnet"}
{"code": "def _CheckAndCreateNewGroup(self, group_name, group_class):\n    \n    group = self.GetPossibleGroup()\n\n    \n    if isinstance(group, group_class) and group.group_name() == group_name:\n      group.AddMethod(self)\n      return self\n\n    \n    new_group = group_class(group_name)\n    new_group.AddMethod(self)\n    self._call_queue.append(new_group)\n    return self", "docstring": "Checks if the last method (a possible group) is an instance of our\ngroup_class. Adds the current method to this group or creates a new one.\n\nArgs:\n\ngroup_name: the name of the group.\ngroup_class: the class used to create instance of this new group", "source": "juraj-google-style"}
{"code": "def __schema_descriptor(self, services):\n    methods_desc = {}\n    for service in services:\n        protorpc_methods = service.all_remote_methods()\n        for protorpc_method_name in protorpc_methods.iterkeys():\n            rosy_method = ('%s.%s' % (service.__name__, protorpc_method_name))\n            method_id = self.__id_from_name[rosy_method]\n            request_response = {}\n            request_schema_id = self.__request_schema.get(method_id)\n            if request_schema_id:\n                request_response['request'] = {'$ref': request_schema_id}\n            response_schema_id = self.__response_schema.get(method_id)\n            if response_schema_id:\n                request_response['response'] = {'$ref': response_schema_id}\n            methods_desc[rosy_method] = request_response\n    descriptor = {'methods': methods_desc, 'schemas': self.__parser.schemas()}\n    return descriptor", "docstring": "Descriptor for the all the JSON Schema used.\n\nArgs:\nservices: List of protorpc.remote.Service instances implementing an\napi/version.\n\nReturns:\nDictionary containing all the JSON Schema used in the service.", "source": "codesearchnet"}
{"code": "def register(cls, type_name: str, subclass: Type['JSONConvertible'], override_existing: bool=False) -> None:\n    cls._TYPE_REGISTRY.register(type_name, subclass, override_existing)", "docstring": "Registers a class with a type name.\n\nThe type name will be used as the key for class lookup during\ndeserialization. A class can be registered with multiple type names, but\na type name should be uesd only for one class.\n\nArgs:\ntype_name: A global unique string identifier for subclass.\nsubclass: A subclass of JSONConvertible.\noverride_existing: If True, override the class if the type name is\nalready present in the registry. Otherwise an error will be raised.", "source": "github-repos"}
{"code": "def _tf_sess(self):\n    return self._coordinated_creator.tf_sess", "docstring": "Return underlying tf.compat.v1.Session object.\n\nWarning: accessing the returned object in user code is likely to cause races\nor \"flaky tests\".\n\nReturns:\nA tf.compat.v1.Session object.", "source": "github-repos"}
{"code": "def exception(self, timeout=None):\n        \n        \n        if not self._completed.wait(timeout=timeout):\n            raise exceptions.TimeoutError(\"Timed out waiting for result.\")\n\n        \n        if self._result != self._SENTINEL:\n            return None\n\n        \n        return self._exception", "docstring": "Return the exception raised by the call, if any.\n\nThis blocks until the message has successfully been published, and\nreturns the exception. If the call succeeded, return None.\n\nArgs:\ntimeout (Union[int, float]): The number of seconds before this call\ntimes out and raises TimeoutError.\n\nRaises:\nTimeoutError: If the request times out.\n\nReturns:\nException: The exception raised by the call, if any.", "source": "juraj-google-style"}
{"code": "def add(self, pattern: Pattern) -> int:\n        \n        inner = pattern.expression\n        if self.operation is None:\n            if not isinstance(inner, Operation) or isinstance(inner, CommutativeOperation):\n                raise TypeError(\"Pattern must be a non-commutative operation.\")\n            self.operation = type(inner)\n        elif not isinstance(inner, self.operation):\n            raise TypeError(\n                \"All patterns must be the same operation, expected {} but got {}\".format(self.operation, type(inner))\n            )\n\n        if op_len(inner) < 3:\n            raise ValueError(\"Pattern has not enough operands.\")\n\n        operands = list(op_iter(inner))\n\n        first_name = self._check_wildcard_and_get_name(operands[0])\n        last_name = self._check_wildcard_and_get_name(operands[-1])\n\n        index = len(self._patterns)\n        self._patterns.append((pattern, first_name, last_name))\n\n        flatterm = FlatTerm.merged(*(FlatTerm(o) for o in operands[1:-1]))\n        self._net.add(flatterm, index)\n\n        return index", "docstring": "Add a pattern that will be recognized by the matcher.\n\nArgs:\npattern:\nThe pattern to add.\n\nReturns:\nAn internal index for the pattern.\n\nRaises:\nValueError:\nIf the pattern does not have the correct form.\nTypeError:\nIf the pattern is not a non-commutative operation.", "source": "juraj-google-style"}
{"code": "def DeregisterPathSpec(cls, path_spec_type):\n    \n    type_indicator = path_spec_type.TYPE_INDICATOR\n    if type_indicator not in cls._path_spec_types:\n      raise KeyError(\n          'Path specification type: {0:s} not set.'.format(type_indicator))\n\n    del cls._path_spec_types[type_indicator]\n\n    if type_indicator in cls._system_level_type_indicators:\n      del cls._system_level_type_indicators[type_indicator]", "docstring": "Deregisters a path specification.\n\nArgs:\npath_spec_type (type): path specification type.\n\nRaises:\nKeyError: if path specification is not registered.", "source": "juraj-google-style"}
{"code": "def _is_closed(self):\n    return self._coordinated_creator.tf_sess is None", "docstring": "Return True if the monitored session is closed.\n\nFor tests only.\n\nReturns:\nA boolean.", "source": "github-repos"}
{"code": "async def msetup(self, text_channel):\n        \n\n        if self.mready:\n            logger.warning(\"Attempt to init music when already initialised\")\n            return\n\n        if self.state != 'starting':\n            logger.error(\"Attempt to init from wrong state ('{}'), must be 'starting'.\".format(self.state))\n            return\n\n        self.logger.debug(\"Setting up gui\")\n\n        \n        self.mchannel = text_channel\n        self.new_embed_ui()\n        await self.embed.send()\n        await self.embed.usend()\n        await self.add_reactions()\n\n        self.mready = True", "docstring": "Creates the gui\n\nArgs:\ntext_channel (discord.Channel): The channel for the embed ui to run in", "source": "juraj-google-style"}
{"code": "def _create_make_unique(inputs):\n    if (inputs.shape.ndims != 2):\n        raise ValueError(('Input of top_k_with_unique must be rank-2 but got: %s' % inputs.shape))\n    height = inputs.shape[0]\n    width = inputs.shape[1]\n    zeros = tf.zeros([height, width], dtype=tf.int32)\n    log2_ceiling = int(math.ceil(math.log(int(width), 2)))\n    next_power_of_two = (1 << log2_ceiling)\n    count_mask = (~ (next_power_of_two - 1))\n    count_mask_r0 = tf.constant(count_mask)\n    count_mask_r2 = tf.fill([height, width], count_mask_r0)\n    smallest_normal = (1 << 23)\n    smallest_normal_r0 = tf.constant(smallest_normal, dtype=tf.int32)\n    smallest_normal_r2 = tf.fill([height, width], smallest_normal_r0)\n    low_bit_mask = (~ (1 << 31))\n    low_bit_mask_r0 = tf.constant(low_bit_mask, dtype=tf.int32)\n    low_bit_mask_r2 = tf.fill([height, width], low_bit_mask_r0)\n    iota = tf.tile(tf.expand_dims(tf.range(width, dtype=tf.int32), 0), [height, 1])\n    input_r2 = tf.bitcast(inputs, tf.int32)\n    abs_r2 = tf.bitwise.bitwise_and(input_r2, low_bit_mask_r2)\n    if_zero_r2 = tf.equal(abs_r2, zeros)\n    smallest_normal_preserving_sign_r2 = tf.bitwise.bitwise_or(input_r2, smallest_normal_r2)\n    input_no_zeros_r2 = tf.where(if_zero_r2, smallest_normal_preserving_sign_r2, input_r2)\n    and_r2 = tf.bitwise.bitwise_and(input_no_zeros_r2, count_mask_r2)\n    or_r2 = tf.bitwise.bitwise_or(and_r2, iota)\n    return tf.bitcast(or_r2, tf.float32)", "docstring": "Replaces the lower bits of each element with iota.\n\nThe iota is used to derive the index, and also serves the purpose to\nmake each element unique to break ties.\n\nArgs:\ninputs: A tensor with rank of 2 and dtype of tf.float32.\n[batch_size, original_size].\n\nReturns:\nA tensor after element wise transformation, with dtype the same as inputs.\n[batch_size, original_size].\n\nRaises:\nValueError: If the rank of the input tensor does not equal 2.", "source": "codesearchnet"}
{"code": "def measure_each(*qubits: raw_types.Qid, key_func: Callable[([raw_types.Qid], str)]=str) -> List[gate_operation.GateOperation]:\n    return [MeasurementGate(1, key_func(q)).on(q) for q in qubits]", "docstring": "Returns a list of operations individually measuring the given qubits.\n\nThe qubits are measured in the computational basis.\n\nArgs:\n*qubits: The qubits to measure.\nkey_func: Determines the key of the measurements of each qubit. Takes\nthe qubit and returns the key for that qubit. Defaults to str.\n\nReturns:\nA list of operations individually measuring the given qubits.", "source": "codesearchnet"}
{"code": "def switches(self):\n    if (not self.__switches):\n        self.__switches = Switches(self.__connection)\n    return self.__switches", "docstring": "Gets the Switches API client.\n\nReturns:\nSwitches:", "source": "codesearchnet"}
{"code": "def detach(self) -> Rotation:\n    if self._rot_mats is not None:\n        return Rotation(rot_mats=self._rot_mats.detach(), quats=None)\n    elif self._quats is not None:\n        return Rotation(rot_mats=None, quats=self._quats.detach(), normalize_quats=False)\n    else:\n        raise ValueError('Both rotations are None')", "docstring": "Returns a copy of the Rotation whose underlying Tensor has been detached from its torch graph.\n\nReturns:\nA copy of the Rotation whose underlying Tensor has been detached from its torch graph", "source": "github-repos"}
{"code": "def load_disease_terms(adapter, genemap_lines, genes=None, hpo_disease_lines=None):\n    \n    \n    if not genes:\n        genes = adapter.genes_by_alias()\n\n    \n    disease_terms = get_mim_phenotypes(genemap_lines=genemap_lines)\n\n    if not hpo_disease_lines:\n        hpo_disease_lines = fetch_hpo_phenotype_to_terms()\n    hpo_diseases = parse_hpo_diseases(hpo_disease_lines)\n\n    start_time = datetime.now()\n    nr_diseases = None\n\n    LOG.info(\"Loading the hpo disease...\")\n    for nr_diseases, disease_number in enumerate(disease_terms):\n        disease_info = disease_terms[disease_number]\n        disease_id = \"OMIM:{0}\".format(disease_number)\n        \n        if disease_id in hpo_diseases:\n            hpo_terms = hpo_diseases[disease_id]['hpo_terms']\n            if hpo_terms:\n                disease_info['hpo_terms'] = hpo_terms\n        disease_obj = build_disease_term(disease_info, genes)\n\n        adapter.load_disease_term(disease_obj)\n\n    LOG.info(\"Loading done. Nr of diseases loaded {0}\".format(nr_diseases))\n    LOG.info(\"Time to load diseases: {0}\".format(datetime.now() - start_time))", "docstring": "Load the omim phenotypes into the database\n\nParse the phenotypes from genemap2.txt and find the associated hpo terms\nfrom ALL_SOURCES_ALL_FREQUENCIES_diseases_to_genes_to_phenotypes.txt.\n\nArgs:\nadapter(MongoAdapter)\ngenemap_lines(iterable(str))\ngenes(dict): Dictionary with all genes found in database\nhpo_disease_lines(iterable(str))", "source": "juraj-google-style"}
{"code": "def __init__(self, initial_op, kinds=None):\n        \n        assert isinstance(initial_op, sc_messages.Operation)\n        if kinds is None:\n            kinds = {}\n        self._kinds = kinds\n        self._metric_values_by_name_then_sign = collections.defaultdict(dict)\n        our_op = encoding.CopyProtoMessage(initial_op)\n        self._merge_metric_values(our_op)\n        our_op.metricValueSets = []\n        self._op = our_op", "docstring": "Constructor.\n\nIf kinds is not specifed, all operations will be merged assuming\nthey are of Kind ``DEFAULT_KIND``\n\nArgs:\ninitial_op (\n:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): the\ninitial version of the operation\nkinds (dict[string,[string]]): specifies the metric kind for\neach metric name", "source": "juraj-google-style"}
{"code": "def diff_cleanupSemantic(self, diffs):\n    \n    changes = False\n    equalities = []  \n    lastEquality = None  \n    pointer = 0  \n    \n    length_insertions1, length_deletions1 = 0, 0\n    \n    length_insertions2, length_deletions2 = 0, 0\n    while pointer < len(diffs):\n      if diffs[pointer][0] == self.DIFF_EQUAL:  \n        equalities.append(pointer)\n        length_insertions1, length_insertions2 = length_insertions2, 0\n        length_deletions1, length_deletions2 = length_deletions2, 0\n        lastEquality = diffs[pointer][1]\n      else:  \n        if diffs[pointer][0] == self.DIFF_INSERT:\n          length_insertions2 += len(diffs[pointer][1])\n        else:\n          length_deletions2 += len(diffs[pointer][1])\n        \n        \n        if (lastEquality and (len(lastEquality) <=\n            max(length_insertions1, length_deletions1)) and\n            (len(lastEquality) <= max(length_insertions2, length_deletions2))):\n          \n          diffs.insert(equalities[-1], (self.DIFF_DELETE, lastEquality))\n          \n          diffs[equalities[-1] + 1] = (self.DIFF_INSERT,\n              diffs[equalities[-1] + 1][1])\n          \n          equalities.pop()\n          \n          if len(equalities):\n            equalities.pop()\n          if len(equalities):\n            pointer = equalities[-1]\n          else:\n            pointer = -1\n          \n          length_insertions1, length_deletions1 = 0, 0\n          length_insertions2, length_deletions2 = 0, 0\n          lastEquality = None\n          changes = True\n      pointer += 1\n\n    \n    if changes:\n      self.diff_cleanupMerge(diffs)\n    self.diff_cleanupSemanticLossless(diffs)\n\n    \n    \n    \n    \n    \n    \n    pointer = 1\n    while pointer < len(diffs):\n      if (diffs[pointer - 1][0] == self.DIFF_DELETE and\n          diffs[pointer][0] == self.DIFF_INSERT):\n        deletion = diffs[pointer - 1][1]\n        insertion = diffs[pointer][1]\n        overlap_length1 = self.diff_commonOverlap(deletion, insertion)\n        overlap_length2 = self.diff_commonOverlap(insertion, deletion)\n        if overlap_length1 >= overlap_length2:\n          if (overlap_length1 >= len(deletion) / 2.0 or\n              overlap_length1 >= len(insertion) / 2.0):\n            \n            diffs.insert(pointer, (self.DIFF_EQUAL,\n                                   insertion[:overlap_length1]))\n            diffs[pointer - 1] = (self.DIFF_DELETE,\n                                  deletion[:len(deletion) - overlap_length1])\n            diffs[pointer + 1] = (self.DIFF_INSERT,\n                                  insertion[overlap_length1:])\n            pointer += 1\n        else:\n          if (overlap_length2 >= len(deletion) / 2.0 or\n              overlap_length2 >= len(insertion) / 2.0):\n            \n            \n            diffs.insert(pointer, (self.DIFF_EQUAL, deletion[:overlap_length2]))\n            diffs[pointer - 1] = (self.DIFF_INSERT,\n                                  insertion[:len(insertion) - overlap_length2])\n            diffs[pointer + 1] = (self.DIFF_DELETE, deletion[overlap_length2:])\n            pointer += 1\n        pointer += 1\n      pointer += 1", "docstring": "Reduce the number of edits by eliminating semantically trivial\nequalities.\n\nArgs:\ndiffs: Array of diff tuples.", "source": "juraj-google-style"}
{"code": "def parse_view(query):\n    \n\n    try:\n        idx = query.lower().index('where')\n        query = query[:idx]\n    except ValueError:\n        pass\n\n    if not query.endswith(';'):\n        query = query.strip()\n        query += ';'\n\n    result = _view_stmt.parseString(query)\n\n    return View(result)", "docstring": "Parses asql query to view object.\n\nArgs:\nquery (str): asql query\n\nReturns:\nView instance: parsed view.", "source": "juraj-google-style"}
{"code": "def CheckCommaSpacing(filename, clean_lines, linenum, error):\n  \n  raw = clean_lines.lines_without_raw_strings\n  line = clean_lines.elided[linenum]\n\n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  if (Search(r',[^,\\s]', ReplaceAll(r'\\boperator\\s*,\\s*\\(', 'F(', line)) and\n      Search(r',[^,\\s]', raw[linenum])):\n    error(filename, linenum, 'whitespace/comma', 3,\n          'Missing space after ,')\n\n  \n  \n  \n  \n  if Search(r';[^\\s};\\\\)/]', line):\n    error(filename, linenum, 'whitespace/semicolon', 3,\n          'Missing space after ;')", "docstring": "Checks for horizontal spacing near commas and semicolons.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def _FindAncestorAtIndent(node, indent):\n    if node.parent.parent is None:\n        return node\n    parent_indent = pytree_utils.GetNodeAnnotation(node.parent, pytree_utils.Annotation.CHILD_INDENT)\n    if parent_indent is not None and indent.startswith(parent_indent):\n        return node\n    else:\n        return _FindAncestorAtIndent(node.parent, indent)", "docstring": "Find an ancestor of node with the given indentation.\n\nArguments:\nnode: node to start from. This must not be the tree root.\nindent: indentation string for the ancestor we're looking for.\nSee _AnnotateIndents for more details.\n\nReturns:\nAn ancestor node with suitable indentation. If no suitable ancestor is\nfound, the closest ancestor to the tree root is returned.", "source": "github-repos"}
{"code": "def on_modified(self, event):\n        \n        if not self._event_error:\n            self.logger.info(u\"Change detected from an edit on: %s\",\n                             event.src_path)\n\n            self.compile_dependencies(event.src_path)", "docstring": "Called when a file or directory is modified.\n\nArgs:\nevent: Watchdog event, ``watchdog.events.DirModifiedEvent`` or\n``watchdog.events.FileModifiedEvent``.", "source": "juraj-google-style"}
{"code": "def _solve(self, sense=None):\n        \n        \n        while len(self._remove_constr) > 0:\n            self._remove_constr.pop().delete()\n\n        try:\n            return self._prob.solve(sense=sense)\n        except lp.SolverError as e:\n            raise_from(MOMAError(text_type(e)), e)\n        finally:\n            self._remove_constr = []", "docstring": "Remove old constraints and then solve the current problem.\n\nArgs:\nsense: Minimize or maximize the objective.\n(:class:`.lp.ObjectiveSense)\n\nReturns:\nThe Result object for the solved LP problem", "source": "juraj-google-style"}
{"code": "def read_dimvalue(self, dimname, path='/', default=NO_DEFAULT):\n    try:\n        dim = self._read_dimensions(dimname, path=path)[0]\n        return len(dim)\n    except self.Error:\n        if (default is NO_DEFAULT):\n            raise\n        return default", "docstring": "Returns the value of a dimension.\n\nArgs:\ndimname: Name of the variable\npath: path to the group.\ndefault: return `default` if `dimname` is not present and\n`default` is not `NO_DEFAULT` else raise self.Error.", "source": "codesearchnet"}
{"code": "def _CreateFeed(client):\n    feed_service = client.GetService('FeedService', version='v201809')\n    operation = {'operand': {'name': ('DSA Feed %s' % uuid.uuid4()), 'attributes': [{'type': 'URL_LIST', 'name': 'Page URL'}, {'type': 'STRING_LIST', 'name': 'Label'}], 'origin': 'USER'}, 'operator': 'ADD'}\n    feed = feed_service.mutate([operation])['value'][0]\n    return _DSAFeedDetails(feed['id'], feed['attributes'][0]['id'], feed['attributes'][1]['id'])", "docstring": "Creates the feed for DSA page URLs.\n\nArgs:\nclient: an AdWordsClient instance.\n\nReturns:\nA _DSAFeedDetails instance containing details about the created feed.", "source": "codesearchnet"}
{"code": "def get_saver(scope, collections=(tf.GraphKeys.GLOBAL_VARIABLES,), context=None, **kwargs):\n    variable_map = {}\n    for collection in collections:\n        variable_map.update(get_normalized_variable_map(scope, collection, context))\n    return tf.train.Saver(var_list=variable_map, **kwargs)", "docstring": "Builds a `tf.train.Saver` for the scope or module, with normalized names.\n\nThe names of the variables are normalized to remove the scope prefix.\nThis allows the same variables to be restored into another similar scope or\nmodule using a complementary `tf.train.Saver` object.\n\nArgs:\nscope: Scope or module. Variables within will be saved or restored.\ncollections: Sequence of collections of variables to restrict\n`tf.train.Saver` to. By default this is `tf.GraphKeys.GLOBAL_VARIABLES`\nwhich includes moving averages variables as well as trainable variables.\ncontext: Scope or module, identical to or parent of `scope`. If given, this\nwill be used as the stripped prefix.\n**kwargs: Extra keyword arguments to pass to tf.train.Saver.\n\nReturns:\nA `tf.train.Saver` object for Variables in the scope or module.", "source": "codesearchnet"}
{"code": "def set_ylim(self, ylim):\n    if (len(ylim) != 2):\n        raise ValueError('ylim must contain two elements')\n    if (ylim[1] < ylim[0]):\n        raise ValueError('Min must be less than Max')\n    self.options['min_y'] = ylim[0]\n    self.options['max_y'] = ylim[1]", "docstring": "Set y-axis limits.\n\nAccepts a two-element list to set the y-axis limits.\n\nArgs:\nylim (list): lower and upper bounds\n\nRaises:\nValueError: ylim must contain two elements\nValueError: Min must be less than max", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, position_embeddings: Tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]:\n    residual = hidden_states\n    hidden_states = self.layer_norm1(hidden_states)\n    hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, position_embeddings=position_embeddings, attention_mask=attention_mask, output_attentions=output_attentions)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.layer_norm2(hidden_states)\n    hidden_states = self.mlp(hidden_states)\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`):\nInput to the layer of shape `(batch, seq_len, embed_dim)`.\nRepresents the hidden states from the previous layer or the input embeddings.\nposition_embeddings (`Tuple[torch.Tensor, torch.Tensor]`):\nA tuple of two tensors, each of shape `(batch, seq_len, embed_dim)`.\nRepresents absolute positional embeddings for the query and key in the attention mechanism.\nattention_mask (`torch.FloatTensor`):\nAttention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.\noutput_attentions (`bool`, *optional*, defaults to `False`):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def get_container_service(access_token, subscription_id, resource_group, service_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourcegroups/', resource_group,\n                        '/providers/Microsoft.ContainerService/ContainerServices/', service_name,\n                        '?api-version=', ACS_API])\n    return do_get(endpoint, access_token)", "docstring": "Get details about an Azure Container Server\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nservice_name (str): Name of container service.\n\nReturns:\nHTTP response. JSON model.", "source": "juraj-google-style"}
{"code": "def csv_to_matrix(csv_file_path):\n    mtx = []\n    with open(csv_file_path) as csv_data_file:\n        for row in csv_data_file:\n            mtx.append(row.split(','))\n    return mtx", "docstring": "Load a CSV file into a Python matrix of strings.\n\nArgs:\ncsv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)", "source": "codesearchnet"}
{"code": "def List(self, request, global_params=None):\n    config = self.GetMethodConfig('List')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Lists previously requested builds. Previously requested builds may still be in-progress, or may have finished successfully or unsuccessfully.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsBuildsListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ListBuildsResponse) The response message.", "source": "github-repos"}
{"code": "def value_to_string(self, obj):\n        \n        value = self._get_val_from_obj(obj)\n        return self.get_prep_value(value)", "docstring": "Convert the field value from the provided model to a string.\n\nUsed during model serialization.\n\nArgs:\nobj: db.Model, model object\n\nReturns:\nstring, the serialized field value", "source": "juraj-google-style"}
{"code": "def download_image(self, handle, dest):\n    shutil.copyfile(self._prefixed(handle), dest)", "docstring": "Copies over the handl to the destination\n\nArgs:\nhandle (str): path to copy over\ndest (str): path to copy to\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def create_sketch(self, name, description):\n    \n    resource_url = '{0:s}/sketches/'.format(self.api_base_url)\n    form_data = {'name': name, 'description': description}\n    response = self.session.post(resource_url, json=form_data)\n    response_dict = response.json()\n    sketch_id = response_dict['objects'][0]['id']\n    return sketch_id", "docstring": "Create a new sketch with the specified name and description.\n\nArgs:\nname (str): Title of sketch\ndescription (str): Description of sketch\n\nReturns:\nint: ID of created sketch", "source": "juraj-google-style"}
{"code": "def dismantle_func_graph(func_graph):\n    func_graph._function_captures.clear()\n    ops.dismantle_graph(func_graph)", "docstring": "Removes reference cycles in `func_graph` FuncGraph.\n\nHelpful for making sure the garbage collector doesn't need to run when\nthe FuncGraph goes out of scope, e.g. in tests using defun with\n@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).\n\nArgs:\nfunc_graph: A `FuncGraph` object to destroy. `func_graph` is unusable after\nthis function.", "source": "github-repos"}
{"code": "def wait_for_interrupt(self, check_interval=1.0, max_time=None):\n    self.start()\n    wait = max(check_interval, 0.01)\n    accum = 0\n    try:\n        while ((max_time is None) or (accum < max_time)):\n            try:\n                time.sleep(wait)\n            except IOError:\n                pass\n            accum += wait\n    except KeyboardInterrupt:\n        pass", "docstring": "Run the event loop until we receive a ctrl-c interrupt or max_time passes.\n\nThis method will wake up every 1 second by default to check for any\ninterrupt signals or if the maximum runtime has expired.  This can be\nset lower for testing purpose to reduce latency but in production\nsettings, this can cause increased CPU usage so 1 second is an\nappropriate value.\n\nArgs:\ncheck_interval (float): How often to wake up and check for\na SIGTERM. Defaults to 1s.  Setting this faster is useful\nfor unit testing.  Cannot be < 0.01 s.\nmax_time (float): Stop the event loop after max_time seconds.\nThis is useful for testing purposes.  Defaults to None,\nwhich means run forever until interrupt.", "source": "codesearchnet"}
{"code": "def broadcast_impl(self, old_slices, old_shape, new_shape):\n    new_slice_shape = self.slice_shape(new_shape)\n\n    def tf_fn(x):\n        return (tf.zeros(new_slice_shape, dtype=x.dtype) + _expand_dims(x, old_shape, new_shape))\n    return self.slicewise(tf_fn, old_slices)", "docstring": "Implementation of a broadcast operation.\n\nArgs:\nold_slices: LaidOutTensor.\nold_shape: Shape.\nnew_shape: Shape.\n\nReturns:\nLaidOutTensor.", "source": "codesearchnet"}
{"code": "def get_complete_ph_dos(partial_dos_path, phonopy_yaml_path):\n    \n    a = np.loadtxt(partial_dos_path).transpose()\n    d = loadfn(phonopy_yaml_path)\n\n    structure = get_structure_from_dict(d['primitive_cell'])\n\n    total_dos = PhononDos(a[0], a[1:].sum(axis=0))\n\n    pdoss = {}\n    for site, pdos in zip(structure, a[1:]):\n        pdoss[site] = pdos.tolist()\n\n    return CompletePhononDos(structure, total_dos, pdoss)", "docstring": "Creates a pymatgen CompletePhononDos from a partial_dos.dat and\nphonopy.yaml files.\nThe second is produced when generating a Dos and is needed to extract\nthe structure.\n\nArgs:\npartial_dos_path: path to the partial_dos.dat file.\nphonopy_yaml_path: path to the phonopy.yaml file.", "source": "juraj-google-style"}
{"code": "def do_check_pep8(files, status):\n    \n    for file_name in files:\n\n        args = ['flake8', '--max-line-length=120', '{0}'.format(file_name)]\n        output = run(*args)\n\n        if output:\n            status.append(\"Python PEP8/Flake8: {0}: {1}\".format(file_name,\n                                                                output))\n\n    return status", "docstring": "Run the python pep8 tool against the filst of supplied files.\nAppend any linting errors to the returned status list\n\nArgs:\nfiles (str): list of files to run pep8 against\nstatus (list): list of pre-receive check failures to eventually print\nto the user\n\nReturns:\nstatus list of current pre-redeive check failures. Might be an empty\nlist.", "source": "juraj-google-style"}
{"code": "def activate_nsxcontroller(self, **kwargs):\n    name = kwargs.pop('name')\n    name_args = dict(name=name)\n    method_name = 'nsx_controller_activate'\n    method_class = self._brocade_tunnels\n    nsxcontroller_attr = getattr(method_class, method_name)\n    config = nsxcontroller_attr(**name_args)\n    output = self._callback(config)\n    return output", "docstring": "Activate NSX Controller\n\nArgs:\nname (str): nsxcontroller name\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nNone", "source": "codesearchnet"}
{"code": "def extract_header_comment_key_value_tuples_from_file(file_descriptor):\n    file_data = file_descriptor.read()\n    findall_result = re.findall(HEADER_COMMENT_KEY_VALUE_TUPLES_REGEX, file_data, (re.MULTILINE | re.DOTALL))\n    returned_list = []\n    for (header_comment, _ignored, raw_comments, key, value) in findall_result:\n        comments = re.findall('/\\\\* (.*?) \\\\*/', raw_comments)\n        if (len(comments) == 0):\n            comments = [u'']\n        returned_list.append((header_comment, comments, key, value))\n    return returned_list", "docstring": "Extracts tuples representing comments and localization entries from strings file.\n\nArgs:\nfile_descriptor (file): The file to read the tuples from\n\nReturns:\nlist : List of tuples representing the headers and localization entries.", "source": "codesearchnet"}
{"code": "def make_per_replica_value(value, devices):\n    values = []\n    for device_idx, device in enumerate(devices):\n        if callable(value):\n            v = value(device_idx)\n        elif isinstance(value, list):\n            v = value[device_idx]\n        else:\n            v = value\n        if isinstance(v, IndexedSlicesValue):\n            with ops.device(device):\n                values.append(IndexedSlices(values=array_ops.identity(v.values), indices=array_ops.identity(v.indices), dense_shape=array_ops.identity(v.dense_shape)))\n        else:\n            with ops.device(device):\n                values.append(array_ops.identity(v))\n    return value_lib.PerReplica(values)", "docstring": "Creates a `PerReplica` object whose values reside in `devices`.\n\nArgs:\nvalue: a tensor-convertible value or a `IndexedSlicesValue`, or a callable\nthat takes one argument (`device_idx`) and should return the value that is\ngoing to be created on devices[device_idx].\ndevices: a list of device strings to create `PerReplica` values on.\n\nReturns:\nA `PerReplica` object.", "source": "github-repos"}
{"code": "def _VerifyValues(self, pool_func, pool_grad_func, input_sizes, ksize, strides, padding, pool_grad_grad_func=None):\n    for data_format in GetTestConfigs():\n        self._VerifyOneTest(pool_func, pool_grad_func, input_sizes, ksize, strides, padding, data_format, pool_grad_grad_func=pool_grad_grad_func)", "docstring": "Verifies the output values of the pooling function.\n\nArgs:\npool_func: Pooling function to be called, e.g., tf.nn.max_pool2d\npool_grad_func: Corresponding pooling gradient function.\ninput_sizes: Input tensor dimensions.\nksize: The kernel size dimensions\nstrides: The stride dimensions\npadding: Padding type.\npool_grad_grad_func: Second-order gradient function, if available.", "source": "github-repos"}
{"code": "def _value_and_batch_jacobian(f, x):\n  \n  if tf.executing_eagerly():\n    with tf.GradientTape() as tape:\n      tape.watch(x)\n      value = f(x)\n    batch_jacobian = tape.batch_jacobian(value, x)\n  else:\n    value = f(x)\n    batch_jacobian = gradients.batch_jacobian(value, x)\n  return value, batch_jacobian", "docstring": "Enables uniform interface to value and batch jacobian calculation.\n\nWorks in both eager and graph modes.\n\nArguments:\nf: The scalar function to evaluate.\nx: The value at which to compute the value and the batch jacobian.\n\nReturns:\nA tuple (f(x), J(x)), where J(x) is the batch jacobian.", "source": "juraj-google-style"}
{"code": "def read_frames(file_path, frame_size, hop_size, start=0.0, end=float('inf'), buffer_size=5760000):\n    rest_samples = np.array([], dtype=np.float32)\n    for block in read_blocks(file_path, start=start, end=end, buffer_size=buffer_size):\n        block = np.concatenate([rest_samples, block])\n        current_sample = 0\n        while ((current_sample + frame_size) < block.size):\n            frame = block[current_sample:(current_sample + frame_size)]\n            (yield (frame, False))\n            current_sample += hop_size\n        rest_samples = block[current_sample:]\n    if (rest_samples.size > 0):\n        rest_samples = np.pad(rest_samples, (0, (frame_size - rest_samples.size)), mode='constant', constant_values=0)\n        (yield (rest_samples, True))", "docstring": "Read an audio file frame by frame. The frames are yielded one after another.\n\nArgs:\nfile_path (str): Path to the file to read.\nframe_size (int): The number of samples per frame.\nhop_size (int): The number of samples between two frames.\nstart (float): Start in seconds to read from.\nend (float): End in seconds to read to.\n``inf`` means to the end of the file.\nbuffer_size (int): Number of samples to load into memory at once\nand return as a single block.\nThe exact number of loaded samples depends on the\nblock-size of the audioread library. So it can be\nof x higher, where the x is typically 1024 or 4096.\n\nReturns:\nGenerator: A generator yielding a tuple for every frame.\nThe first item is the frame and\nthe second a boolean indicating if it is the last frame.", "source": "codesearchnet"}
{"code": "class RealmForOpenQAOutput(ModelOutput):\n    reader_output: dict = None\n    predicted_answer_ids: Optional[torch.LongTensor] = None", "docstring": "Outputs of [`RealmForOpenQA`] models.\n\nArgs:\nreader_output (`dict`):\nReader output.\npredicted_answer_ids (`torch.LongTensor` of shape `(answer_sequence_length)`):\nPredicted answer ids.", "source": "github-repos"}
{"code": "def gcd_float(numbers, tol=1e-8):\n    \n\n    def pair_gcd_tol(a, b):\n        \n        while b > tol:\n            a, b = b, a % b\n        return a\n\n    n = numbers[0]\n    for i in numbers:\n        n = pair_gcd_tol(n, i)\n    return n", "docstring": "Returns the greatest common divisor for a sequence of numbers.\nUses a numerical tolerance, so can be used on floats\n\nArgs:\nnumbers: Sequence of numbers.\ntol: Numerical tolerance\n\nReturns:\n(int) Greatest common divisor of numbers.", "source": "juraj-google-style"}
{"code": "def copy_pkg(self, filename, id_=-1):\n        \n        self._copy(filename, id_=id_, file_type=PKG_FILE_TYPE)", "docstring": "Copy a package to the distribution server.\n\nBundle-style packages must be zipped prior to copying.\n\nArgs:\nfilename: Full path to file to upload.\nid_: ID of Package object to associate with, or -1 for new\npackages (default).", "source": "juraj-google-style"}
{"code": "def noise_get_turbulence(n: tcod.noise.Noise, f: Sequence[float], oc: float, typ: int=NOISE_DEFAULT) -> float:\n    return float(lib.TCOD_noise_get_turbulence_ex(n.noise_c, ffi.new('float[4]', f), oc, typ))", "docstring": "Return the turbulence noise sampled from the ``f`` coordinate.\n\nArgs:\nn (Noise): A Noise instance.\nf (Sequence[float]): The point to sample the noise from.\ntyp (int): The noise algorithm to use.\noctaves (float): The level of level.  Should be more than 1.\n\nReturns:\nfloat: The sampled noise value.", "source": "codesearchnet"}
{"code": "def wait_until_finish(self, duration=None):\n    if not PipelineState.is_terminal(self._state):\n        raise NotImplementedError()", "docstring": "Waits until the pipeline finishes and returns the final status.\n\nArgs:\nduration (int): The time to wait (in milliseconds) for job to finish.\nIf it is set to :data:`None`, it will wait indefinitely until the job\nis finished.\n\nRaises:\nIOError: If there is a persistent problem getting job\ninformation.\nNotImplementedError: If the runner does not support this\noperation.\n\nReturns:\nThe final state of the pipeline, or :data:`None` on timeout.", "source": "github-repos"}
{"code": "def normalize_docroot(app, root):\n    \n\n    srcdir = app.env.srcdir\n    default_version = app.config.javalink_default_version\n\n    if isinstance(root, basestring):\n        (url, base) = _parse_docroot_str(srcdir, root)\n        return {'root': url, 'base': base, 'version': default_version}\n    else:\n        normalized = {}\n        normalized['root'] = _parse_docroot_str(srcdir, root['root'])[0]\n\n        if 'base' in root:\n            normalized['base'] = _parse_docroot_str(srcdir, root['base'])[1]\n        else:\n            normalized['base'] = _parse_docroot_str(srcdir, root['root'])[1]\n\n        if 'version' in root:\n            normalized['version'] = root['version']\n        else:\n            normalized['version'] = default_version\n\n        return normalized", "docstring": "Creates a package-list URL and a link base from a docroot element.\n\nArgs:\napp: the global app object\nroot: the docroot element [string or dictionary]", "source": "juraj-google-style"}
{"code": "def plot_state_histogram(result: trial_result.TrialResult) -> np.ndarray:\n    import matplotlib.pyplot as plt\n    num_qubits = len(result.measurements.keys())\n    states = (2 ** num_qubits)\n    values = np.zeros(states)\n    measurement_by_result = np.array([v.transpose()[0] for (k, v) in result.measurements.items()]).transpose()\n    for meas in measurement_by_result:\n        state_ind = int(''.join([str(x) for x in [int(x) for x in meas]]), 2)\n        values[state_ind] += 1\n    plot_labels = [bin(x)[2:].zfill(num_qubits) for x in range(states)]\n    plt.bar(np.arange(states), values, tick_label=plot_labels)\n    plt.xlabel('qubit state')\n    plt.ylabel('result count')\n    plt.show()\n    return values", "docstring": "Plot the state histogram from a single result with repetitions.\n\nStates is a bitstring representation of all the qubit states in a single\nresult.\nCurrently this function assumes each measurement gate applies to only\na single qubit.\n\nArgs:\nresult: The trial results to plot.\n\nReturns:\nThe histogram. A list of values plotted on the y-axis.", "source": "codesearchnet"}
{"code": "def _maybe_download_corpus(tmp_dir):\n  \n  corpus_url = (\"http:\n                \"1-billion-word-language-modeling-benchmark-r13output.tar.gz\")\n  corpus_filename = os.path.basename(corpus_url)\n  corpus_filepath = os.path.join(tmp_dir, corpus_filename)\n  if not os.path.exists(corpus_filepath):\n    generator_utils.maybe_download(tmp_dir, corpus_filename, corpus_url)\n    with tarfile.open(corpus_filepath, \"r:gz\") as corpus_tar:\n      corpus_tar.extractall(tmp_dir)", "docstring": "Download and unpack the corpus.\n\nArgs:\ntmp_dir: directory containing dataset.", "source": "juraj-google-style"}
{"code": "def __getattr__(self, name: str) -> np.ndarray:\n\t\t\n\t\ttry:\n\t\t\tvals = self.__dict__[\"storage\"][name]\n\t\t\tif vals is None:\n\t\t\t\t\n\t\t\t\ta = [\"/row_attrs/\", \"/col_attrs/\"][self.axis]\n\t\t\t\tvals = loompy.materialize_attr_values(self.ds._file[a][name][:])\n\t\t\t\tself.__dict__[\"storage\"][name] = vals\n\t\t\treturn vals\n\t\texcept KeyError:\n\t\t\traise AttributeError(f\"'{type(self)}' object has no attribute '{name}'\")", "docstring": "Return the named attribute\n\nArgs:\nname (str) \tName of the attribute\n\nRemarks:\nThe values will be loaded from file, and properly HTML unescaped", "source": "juraj-google-style"}
{"code": "def is_subdir(base_path, test_path, trailing_slash=False, wildcards=False):\n    \n    if trailing_slash:\n        base_path = base_path.rsplit('/', 1)[0] + '/'\n        test_path = test_path.rsplit('/', 1)[0] + '/'\n    else:\n        if not base_path.endswith('/'):\n            base_path += '/'\n\n        if not test_path.endswith('/'):\n            test_path += '/'\n\n    if wildcards:\n        return fnmatch.fnmatchcase(test_path, base_path)\n    else:\n        return test_path.startswith(base_path)", "docstring": "Return whether the a path is a subpath of another.\n\nArgs:\nbase_path: The base path\ntest_path: The path which we are testing\ntrailing_slash: If True, the trailing slash is treated with importance.\nFor example, ``/images/`` is a directory while ``/images`` is a\nfile.\nwildcards: If True, globbing wildcards are matched against paths", "source": "juraj-google-style"}
{"code": "def load_kbs(kbs_files):\n    \n\n    return {\n        'journals_re': build_journals_re_kb(kbs_files['journals-re']),\n        'journals': load_kb(kbs_files['journals'], build_journals_kb),\n        'report-numbers': build_reportnum_kb(kbs_files['report-numbers']),\n        'authors': build_authors_kb(kbs_files['authors']),\n        'books': build_books_kb(kbs_files['books']),\n        'publishers': load_kb(kbs_files['publishers'], build_publishers_kb),\n        'special_journals': build_special_journals_kb(kbs_files['special-journals']),\n        'collaborations': load_kb(kbs_files['collaborations'], build_collaborations_kb),\n    }", "docstring": "Load kbs (without caching)\n\nArgs:\n- kb_files: list of custom paths you can specify to override the\ndefault values\nIf path starts with \"kb:\", the kb will be loaded from the database", "source": "juraj-google-style"}
{"code": "def disable_switchport(self, inter_type, inter):\n        \n        config = ET.Element('config')\n        interface = ET.SubElement(config, 'interface',\n                                  xmlns=(\"urn:brocade.com:mgmt:\"\n                                         \"brocade-interface\"))\n        int_type = ET.SubElement(interface, inter_type)\n        name = ET.SubElement(int_type, 'name')\n        name.text = inter\n        ET.SubElement(int_type, 'switchport-basic', operation='delete')\n        try:\n            self._callback(config)\n            return True\n        \n        except Exception as error:\n            logging.error(error)\n            return False", "docstring": "Change an interface's operation to L3.\n\nArgs:\ninter_type: The type of interface you want to configure. Ex.\ntengigabitethernet, gigabitethernet, fortygigabitethernet.\ninter: The ID for the interface you want to configure. Ex. 1/0/1\n\nReturns:\nTrue if command completes successfully or False if not.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def show_inputs(self, varnames=None, nids=None, wslice=None, stream=sys.stdout):\n    if (varnames is not None):\n        varnames = [s.strip() for s in list_strings(varnames)]\n        dlist = collections.defaultdict(list)\n        for task in self.select_tasks(nids=nids, wslice=wslice):\n            dstruct = task.input.structure.as_dict(fmt='abivars')\n            for vname in varnames:\n                value = task.input.get(vname, None)\n                if (value is None):\n                    value = dstruct.get(vname, None)\n                if (value is not None):\n                    dlist[vname].append((task, value))\n        for vname in varnames:\n            tv_list = dlist[vname]\n            if (not tv_list):\n                stream.write(('[%s]: Found 0 tasks with this variable\\n' % vname))\n            else:\n                stream.write(('[%s]: Found %s tasks with this variable\\n' % (vname, len(tv_list))))\n                for (i, (task, value)) in enumerate(tv_list):\n                    stream.write(('   %s --> %s\\n' % (str(value), task)))\n            stream.write('\\n')\n    else:\n        lines = []\n        for task in self.select_tasks(nids=nids, wslice=wslice):\n            s = task.make_input(with_header=True)\n            if task.deps:\n                s += ('\\n\\nDependencies:\\n' + '\\n'.join((str(dep) for dep in task.deps)))\n            else:\n                s += '\\n\\nDependencies: None'\n            lines.append((((((2 * '\\n') + (80 * '=')) + '\\n') + s) + (2 * '\\n')))\n        stream.writelines(lines)", "docstring": "Print the input of the tasks to the given stream.\n\nArgs:\nvarnames:\nList of Abinit variables. If not None, only the variable in varnames\nare selected and printed.\nnids:\nList of node identifiers. By defaults all nodes are shown\nwslice:\nSlice object used to select works.\nstream:\nFile-like object, Default: sys.stdout", "source": "codesearchnet"}
{"code": "def hist(hist_function, *, options={}, **interact_params):\n    params = {'marks': [{'sample': _array_or_placeholder(hist_function), 'bins': _get_option('bins'), 'normalized': _get_option('normalized'), 'scales': (lambda opts: {'sample': opts['x_sc'], 'count': opts['y_sc']})}]}\n    fig = (options.get('_fig', False) or _create_fig(options=options))\n    [hist] = _create_marks(fig=fig, marks=[bq.Hist], options=options, params=params)\n    _add_marks(fig, [hist])\n\n    def wrapped(**interact_params):\n        hist.sample = util.maybe_call(hist_function, interact_params)\n    controls = widgets.interactive(wrapped, **interact_params)\n    return widgets.VBox([controls, fig])", "docstring": "Generates an interactive histogram that allows users to change the\nparameters of the input hist_function.\n\nArgs:\nhist_function (Array | (*args -> Array int | Array float)):\nFunction that takes in parameters to interact with and returns an\narray of numbers. These numbers will be plotted in the resulting\nhistogram.\n\nKwargs:\n{options}\n\ninteract_params (dict): Keyword arguments in the same format as\n`ipywidgets.interact`. One argument is required for each argument\nof `hist_function`.\n\nReturns:\nVBox with two children: the interactive controls and the figure.\n\n>>> def gen_random(n_points):\n...     return np.random.normal(size=n_points)\n>>> hist(gen_random, n_points=(0, 1000, 10))\nVBox(...)", "source": "codesearchnet"}
{"code": "def WriteExecution(self, execution):\n    debug_event = debug_event_pb2.DebugEvent(execution=execution)\n    self._EnsureTimestampAdded(debug_event)\n    _pywrap_debug_events_writer.WriteExecution(self._dump_root, debug_event)", "docstring": "Write a Execution proto with the writer.\n\nArgs:\nexecution: An Execution proto, describing a TensorFlow op or graph\nexecution event.", "source": "github-repos"}
{"code": "def process_obj(self, obj: Union[URIRef, Literal, str]) -> Union[URIRef, Literal]:\n        \n        if isinstance(obj, dict) or isinstance(obj, list):\n            exit(str(obj) + ': should be str or intended to be a URIRef or Literal.')\n\n        if isinstance(obj, Literal) or isinstance(obj, URIRef):\n            prefix = self.find_prefix(obj)\n            if prefix: self.process_prefix(prefix)\n            return obj\n\n        if len(obj) > 8:\n            if 'http' == obj[:4] and ':\n                prefix = self.find_prefix(obj)\n                if prefix: self.process_prefix(prefix)\n                return URIRef(obj)\n\n        if ':' in str(obj):\n            presumed_prefix, info = obj.split(':', 1)\n            namespace: Union[Namespace, None] = self.process_prefix(presumed_prefix)\n            if namespace: return namespace[info]\n\n        return Literal(obj)", "docstring": "Gives component the proper node type\n\nArgs:\nobj: Entity object to be converted to its appropriate node type\n\nReturns:\nURIRef or Literal type of the object provided.\n\nRaises:\nSystemExit: If object is a dict or list it becomes str with broken data. Needs to\ncome in one object at a time.", "source": "juraj-google-style"}
{"code": "def translate(self, entity, identifier):\n    if entity in self._id_map and identifier in self._id_map[entity]:\n        return self._id_map[entity][identifier]\n    return None", "docstring": "Given an id, returns its counterpart.\n\next id to cm id and vice versa.\n\nArgs:\nentity: The name of the entity for which the ID relates.\nidentifier: Ext id or actual CM id to map.", "source": "github-repos"}
{"code": "def decode_der(cert_der):\n    \n    return cryptography.x509.load_der_x509_certificate(\n        data=cert_der, backend=cryptography.hazmat.backends.default_backend()\n    )", "docstring": "Decode cert DER string to Certificate object.\n\nArgs:\ncert_der : Certificate as a DER encoded string\n\nReturns:\ncryptography.Certificate()", "source": "juraj-google-style"}
{"code": "def build_srpm(specfile, save_dir):\n    logger.info('Starting rpmbuild to build: {0} SRPM.'.format(specfile))\n    if (save_dir != get_default_save_path()):\n        try:\n            msg = subprocess.Popen(['rpmbuild', '--define', '_sourcedir {0}'.format(save_dir), '--define', '_builddir {0}'.format(save_dir), '--define', '_srcrpmdir {0}'.format(save_dir), '--define', '_rpmdir {0}'.format(save_dir), '-bs', specfile], stdout=subprocess.PIPE).communicate()[0].strip()\n        except OSError:\n            logger.error('Rpmbuild failed for specfile: {0} and save_dir: {1}'.format(specfile, save_dir), exc_info=True)\n            msg = 'Rpmbuild failed. See log for more info.'\n        return msg\n    else:\n        if (not os.path.exists(save_dir)):\n            raise IOError('Specify folder to store a file (SAVE_DIR) or install rpmdevtools.')\n        try:\n            msg = subprocess.Popen(['rpmbuild', '--define', '_sourcedir {0}'.format((save_dir + '/SOURCES')), '--define', '_builddir {0}'.format((save_dir + '/BUILD')), '--define', '_srcrpmdir {0}'.format((save_dir + '/SRPMS')), '--define', '_rpmdir {0}'.format((save_dir + '/RPMS')), '-bs', specfile], stdout=subprocess.PIPE).communicate()[0].strip()\n        except OSError:\n            logger.error('Rpmbuild failed for specfile: {0} and save_dir: {1}'.format(specfile, save_dir), exc_info=True)\n            msg = 'Rpmbuild failed. See log for more info.'\n        return msg", "docstring": "Builds a srpm from given specfile using rpmbuild.\nGenerated srpm is stored in directory specified by save_dir.\n\nArgs:\nspecfile: path to a specfile\nsave_dir: path to source and build tree", "source": "codesearchnet"}
{"code": "def set_scan_parameters(self, scan_type=ScanType.ACTIVE, interval_ms=10, window_ms=10, address_type=BluetoothAddressType.RANDOM, filter_type=ScanFilter.ALL):\n    interval_fractions = (interval_ms / MS_FRACTION_DIVIDER)\n    if ((interval_fractions < 4) or (interval_fractions > 16384)):\n        raise ValueError('Invalid interval given {}, must be in range of 2.5ms to 10240ms!'.format(interval_fractions))\n    window_fractions = (window_ms / MS_FRACTION_DIVIDER)\n    if ((window_fractions < 4) or (window_fractions > 16384)):\n        raise ValueError('Invalid window given {}, must be in range of 2.5ms to 10240ms!'.format(window_fractions))\n    (interval_fractions, window_fractions) = (int(interval_fractions), int(window_fractions))\n    scan_parameter_pkg = struct.pack('>BHHBB', scan_type, interval_fractions, window_fractions, address_type, filter_type)\n    self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_PARAMETERS, scan_parameter_pkg)", "docstring": "sets the le scan parameters\n\nArgs:\nscan_type: ScanType.(PASSIVE|ACTIVE)\ninterval: ms (as float) between scans (valid range 2.5ms - 10240ms)\n..note:: when interval and window are equal, the scan\nruns continuos\nwindow: ms (as float) scan duration (valid range 2.5ms - 10240ms)\naddress_type: Bluetooth address type BluetoothAddressType.(PUBLIC|RANDOM)\n* PUBLIC = use device MAC address\n* RANDOM = generate a random MAC address and use that\nfilter: ScanFilter.(ALL|WHITELIST_ONLY) only ALL is supported, which will\nreturn all fetched bluetooth packets (WHITELIST_ONLY is not supported,\nbecause OCF_LE_ADD_DEVICE_TO_WHITE_LIST command is not implemented)\n\nRaises:\nValueError: A value had an unexpected format or was not in range", "source": "codesearchnet"}
{"code": "def make_parser(parser_creator=None, **kwargs):\n    \n\n    if parser_creator:\n        parser = parser_creator(**kwargs)\n    else:\n        parser = argparse.ArgumentParser(**kwargs)\n\n    \n    parser.add_argument(\n        \"--run\",\n        default=None,\n        type=str,\n        help=\"The algorithm or model to train. This may refer to the name \"\n        \"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a \"\n        \"user-defined trainable function or class registered in the \"\n        \"tune registry.\")\n    parser.add_argument(\n        \"--stop\",\n        default=\"{}\",\n        type=json.loads,\n        help=\"The stopping criteria, specified in JSON. The keys may be any \"\n        \"field returned by 'train()' e.g. \"\n        \"'{\\\"time_total_s\\\": 600, \\\"training_iteration\\\": 100000}' to stop \"\n        \"after 600 seconds or 100k iterations, whichever is reached first.\")\n    parser.add_argument(\n        \"--config\",\n        default=\"{}\",\n        type=json.loads,\n        help=\"Algorithm-specific configuration (e.g. env, hyperparams), \"\n        \"specified in JSON.\")\n    parser.add_argument(\n        \"--resources-per-trial\",\n        default=None,\n        type=json_to_resources,\n        help=\"Override the machine resources to allocate per trial, e.g. \"\n        \"'{\\\"cpu\\\": 64, \\\"gpu\\\": 8}'. Note that GPUs will not be assigned \"\n        \"unless you specify them here. For RLlib, you probably want to \"\n        \"leave this alone and use RLlib configs to control parallelism.\")\n    parser.add_argument(\n        \"--num-samples\",\n        default=1,\n        type=int,\n        help=\"Number of times to repeat each trial.\")\n    parser.add_argument(\n        \"--local-dir\",\n        default=DEFAULT_RESULTS_DIR,\n        type=str,\n        help=\"Local dir to save training results to. Defaults to '{}'.\".format(\n            DEFAULT_RESULTS_DIR))\n    parser.add_argument(\n        \"--upload-dir\",\n        default=\"\",\n        type=str,\n        help=\"Optional URI to sync training results to (e.g. s3:\n    parser.add_argument(\n        \"--trial-name-creator\",\n        default=None,\n        help=\"Optional creator function for the trial string, used in \"\n        \"generating a trial directory.\")\n    parser.add_argument(\n        \"--sync-function\",\n        default=None,\n        help=\"Function for syncing the local_dir to upload_dir. If string, \"\n        \"then it must be a string template for syncer to run and needs to \"\n        \"include replacement fields '{local_dir}' and '{remote_dir}'.\")\n    parser.add_argument(\n        \"--loggers\",\n        default=None,\n        help=\"List of logger creators to be used with each Trial. \"\n        \"Defaults to ray.tune.logger.DEFAULT_LOGGERS.\")\n    parser.add_argument(\n        \"--checkpoint-freq\",\n        default=0,\n        type=int,\n        help=\"How many training iterations between checkpoints. \"\n        \"A value of 0 (default) disables checkpointing.\")\n    parser.add_argument(\n        \"--checkpoint-at-end\",\n        action=\"store_true\",\n        help=\"Whether to checkpoint at the end of the experiment. \"\n        \"Default is False.\")\n    parser.add_argument(\n        \"--keep-checkpoints-num\",\n        default=None,\n        type=int,\n        help=\"Number of last checkpoints to keep. Others get \"\n        \"deleted. Default (None) keeps all checkpoints.\")\n    parser.add_argument(\n        \"--checkpoint-score-attr\",\n        default=\"training_iteration\",\n        type=str,\n        help=\"Specifies by which attribute to rank the best checkpoint. \"\n        \"Default is increasing order. If attribute starts with min- it \"\n        \"will rank attribute in decreasing order. Example: \"\n        \"min-validation_loss\")\n    parser.add_argument(\n        \"--export-formats\",\n        default=None,\n        help=\"List of formats that exported at the end of the experiment. \"\n        \"Default is None. For RLlib, 'checkpoint' and 'model' are \"\n        \"supported for TensorFlow policy graphs.\")\n    parser.add_argument(\n        \"--max-failures\",\n        default=3,\n        type=int,\n        help=\"Try to recover a trial from its last checkpoint at least this \"\n        \"many times. Only applies if checkpointing is enabled.\")\n    parser.add_argument(\n        \"--scheduler\",\n        default=\"FIFO\",\n        type=str,\n        help=\"FIFO (default), MedianStopping, AsyncHyperBand, \"\n        \"HyperBand, or HyperOpt.\")\n    parser.add_argument(\n        \"--scheduler-config\",\n        default=\"{}\",\n        type=json.loads,\n        help=\"Config options to pass to the scheduler.\")\n\n    \n    parser.add_argument(\n        \"--restore\",\n        default=None,\n        type=str,\n        help=\"If specified, restore from this checkpoint.\")\n\n    return parser", "docstring": "Returns a base argument parser for the ray.tune tool.\n\nArgs:\nparser_creator: A constructor for the parser class.\nkwargs: Non-positional args to be passed into the\nparser class constructor.", "source": "juraj-google-style"}
{"code": "def sin(self: EventSetOrNode) -> EventSetOrNode:\n    from temporian.core.operators.unary import sin\n    return sin(self)", "docstring": "Calculates the sine of an [`EventSet`][temporian.EventSet]'s features.\n\nCan only be used on floating point features.\n\nExample:\n```python\n>>> a = tp.event_set(\n...     timestamps=[1, 2, 3, 4, 5],\n...     features={\"M\": [0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi]},\n... )\n>>> a.sin()\nindexes: ...\ntimestamps: [1. 2. 3. 4. 5.]\n'M': [ 0.0000e+00  1.0000e+00  1.2246e-16 -1.0000e+00 -2.4493e-16]\n...\n\n```\n\nReturns:\nEventSetOrNode with sine of input features.", "source": "github-repos"}
{"code": "def _get_argspec_for_partial(obj):\n    n_prune_args = len(obj.args)\n    partial_keywords = obj.keywords or {}\n    args, varargs, keywords, defaults = getargspec(obj.func)\n    args = args[n_prune_args:]\n    no_default = object()\n    all_defaults = [no_default] * len(args)\n    if defaults:\n        all_defaults[-len(defaults):] = defaults\n    for kw, default in partial_keywords.items():\n        if kw in args:\n            idx = args.index(kw)\n            all_defaults[idx] = default\n        elif not keywords:\n            raise ValueError('Function does not have **kwargs parameter, but contains an unknown partial keyword.')\n    first_default = next((idx for idx, x in enumerate(all_defaults) if x is not no_default), None)\n    if first_default is None:\n        return ArgSpec(args, varargs, keywords, None)\n    invalid_default_values = [args[i] for i, j in enumerate(all_defaults) if j is no_default and i > first_default]\n    if invalid_default_values:\n        raise ValueError('Some arguments %s do not have default value, but they are positioned after those with default values. This can not be expressed with ArgSpec.' % invalid_default_values)\n    return ArgSpec(args, varargs, keywords, tuple(all_defaults[first_default:]))", "docstring": "Implements `getargspec` for `functools.partial` objects.\n\nArgs:\nobj: The `functools.partial` object\nReturns:\nAn `inspect.ArgSpec`\nRaises:\nValueError: When callable's signature can not be expressed with\nArgSpec.", "source": "github-repos"}
{"code": "def plot(self, ax=None, legend=None, return_fig=False, **kwargs):\n    if (ax is None):\n        fig = plt.figure(figsize=(2, 10))\n        ax = fig.add_subplot(111)\n        return_ax = False\n    else:\n        return_ax = True\n    d = None\n    if (legend is not None):\n        try:\n            d = legend.get_decor(self)\n        except:\n            pass\n    if (d is not None):\n        kwargs['color'] = d.colour\n        kwargs['lw'] = (getattr(d, 'lineweight', None) or getattr(d, 'lw', 1))\n        kwargs['ls'] = (getattr(d, 'linestyle', None) or getattr(d, 'ls', '-'))\n        axkwargs = {}\n        xlim = getattr(d, 'xlim', None)\n        if (xlim is not None):\n            axkwargs['xlim'] = list(map(float, xlim.split(',')))\n        xticks = getattr(d, 'xticks', None)\n        if (xticks is not None):\n            axkwargs['xticks'] = list(map(float, xticks.split(',')))\n        xscale = getattr(d, 'xscale', None)\n        if (xscale is not None):\n            axkwargs['xscale'] = xscale\n        ax.set(**axkwargs)\n    ax.plot(self, self.basis, **kwargs)\n    ax.set_title(self.mnemonic)\n    ax.set_xlabel(self.units)\n    if False:\n        ax.xaxis.tick_top()\n    if True:\n        labels = ax.get_xticklabels()\n        for label in labels:\n            label.set_rotation(90)\n    ax.set_ylim([self.stop, self.start])\n    ax.grid('on', color='k', alpha=0.33, lw=0.33, linestyle='-')\n    if return_ax:\n        return ax\n    elif return_fig:\n        return fig\n    else:\n        return None", "docstring": "Plot a curve.\n\nArgs:\nax (ax): A matplotlib axis.\nlegend (striplog.legend): A legend. Optional.\nreturn_fig (bool): whether to return the matplotlib figure.\nDefault False.\nkwargs: Arguments for ``ax.set()``\n\nReturns:\nax. If you passed in an ax, otherwise None.", "source": "codesearchnet"}
{"code": "def close(self, reason=None):\n        \n        with self._closing:\n            if self._closed:\n                return\n\n            \n            if self.is_active:\n                _LOGGER.debug(\"Stopping consumer.\")\n                self._consumer.stop()\n            self._consumer = None\n\n            \n            _LOGGER.debug(\"Stopping scheduler.\")\n            self._scheduler.shutdown()\n            self._scheduler = None\n            _LOGGER.debug(\"Stopping leaser.\")\n            self._leaser.stop()\n            self._leaser = None\n            _LOGGER.debug(\"Stopping dispatcher.\")\n            self._dispatcher.stop()\n            self._dispatcher = None\n            _LOGGER.debug(\"Stopping heartbeater.\")\n            self._heartbeater.stop()\n            self._heartbeater = None\n\n            self._rpc = None\n            self._closed = True\n            _LOGGER.debug(\"Finished stopping manager.\")\n\n            for callback in self._close_callbacks:\n                callback(self, reason)", "docstring": "Stop consuming messages and shutdown all helper threads.\n\nThis method is idempotent. Additional calls will have no effect.\n\nArgs:\nreason (Any): The reason to close this. If None, this is considered\nan \"intentional\" shutdown. This is passed to the callbacks\nspecified via :meth:`add_close_callback`.", "source": "juraj-google-style"}
{"code": "def set_custom_predict_fn(self, predict_fn):\n    self.delete('estimator_and_spec')\n    self.store('custom_predict_fn', predict_fn)\n    self.set_inference_address('custom_predict_fn')\n    if (not self.has_model_name()):\n        self.set_model_name('1')\n    return self", "docstring": "Sets a custom function for inference.\n\nInstead of using TF Serving to host a model for WIT to query, WIT can\ndirectly use a custom function as the model to query. In this case, the\nprovided function should accept example protos and return:\n- For classification: A 2D list of numbers. The first dimension is for\neach example being predicted. The second dimension are the probabilities\nfor each class ID in the prediction.\n- For regression: A 1D list of numbers, with a regression score for each\nexample being predicted.\n\nArgs:\npredict_fn: The custom python function which will be used for model\ninference.\n\nReturns:\nself, in order to enabled method chaining.", "source": "codesearchnet"}
{"code": "def get_flat_size(self):\n    return sum((np.prod(v.get_shape().as_list()) for v in self.variables.values()))", "docstring": "Returns the total length of all of the flattened variables.\n\nReturns:\nThe length of all flattened variables concatenated.", "source": "codesearchnet"}
{"code": "def delay(self, n, start_time):\n    if ((n > self.max_retries) or ((n > self.min_retries) and ((time.time() - start_time) > self.max_retry_period))):\n        return (- 1)\n    return min((math.pow(self.backoff_factor, (n - 1)) * self.initial_delay), self.max_delay)", "docstring": "Calculate delay before the next retry.\n\nArgs:\nn: the number of current attempt. The first attempt should be 1.\nstart_time: the time when retry started in unix time.\n\nReturns:\nNumber of seconds to wait before next retry. -1 if retry should give up.", "source": "codesearchnet"}
{"code": "def build_input_fns(data_dir, batch_size):\n    with open(download(data_dir, 'vocab.pkl'), 'r') as f:\n        words_to_idx = pickle.load(f)\n    num_words = len(words_to_idx)\n    vocabulary = ([None] * num_words)\n    for (word, idx) in words_to_idx.items():\n        vocabulary[idx] = word\n\n    def train_input_fn():\n        dataset = newsgroups_dataset(data_dir, 'train', num_words, shuffle_and_repeat=True)\n        dataset = dataset.batch(batch_size).prefetch(32)\n        return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()\n\n    def eval_input_fn():\n        dataset = newsgroups_dataset(data_dir, 'test', num_words, shuffle_and_repeat=False)\n        dataset = dataset.batch(batch_size)\n        return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()\n    return (train_input_fn, eval_input_fn, vocabulary)", "docstring": "Builds iterators for train and evaluation data.\n\nEach object is represented as a bag-of-words vector.\n\nArguments:\ndata_dir: Folder in which to store the data.\nbatch_size: Batch size for both train and evaluation.\n\nReturns:\ntrain_input_fn: A function that returns an iterator over the training data.\neval_input_fn: A function that returns an iterator over the evaluation data.\nvocabulary: A mapping of word's integer index to the corresponding string.", "source": "codesearchnet"}
{"code": "def _GetByteStreamOperation(self):\n    byte_order_string = self.GetStructByteOrderString()\n    format_string = self.GetStructFormatString()\n    if (not format_string):\n        return None\n    format_string = ''.join([byte_order_string, format_string])\n    return byte_operations.StructOperation(format_string)", "docstring": "Retrieves the byte stream operation.\n\nReturns:\nByteStreamOperation: byte stream operation or None if unable to determine.", "source": "codesearchnet"}
{"code": "def cosmic_link(variant_obj):\n    \n\n    cosmic_ids = variant_obj.get('cosmic_ids')\n\n    if not cosmic_ids:\n        return None\n    else:\n        cosmic_id = cosmic_ids[0]\n        url_template = (\"https:\n\n\n    return url_template.format(cosmic_id)", "docstring": "Compose link to COSMIC Database.\n\nArgs:\nvariant_obj(scout.models.Variant)\n\nReturns:\nurl_template(str): Link to COSMIIC database if cosmic id is present", "source": "juraj-google-style"}
{"code": "def rgb_to_yuv(images):\n    images = ops.convert_to_tensor(images, name='images')\n    kernel = ops.convert_to_tensor(_rgb_to_yuv_kernel, dtype=images.dtype, name='kernel')\n    ndims = images.get_shape().ndims\n    return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])", "docstring": "Converts one or more images from RGB to YUV.\n\nOutputs a tensor of the same shape as the `images` tensor, containing the YUV\nvalue of the pixels.\nThe output is only well defined if the value in images are in [0, 1].\nThere are two ways of representing an image: [0, 255] pixel values range or\n[0, 1] (as float) pixel values range. Users need to convert the input image\ninto a float [0, 1] range.\n\nArgs:\nimages: 2-D or higher rank. Image data to convert. Last dimension must be\nsize 3.\n\nReturns:\nimages: tensor with the same shape as `images`.", "source": "github-repos"}
{"code": "def predict_raw(self, X):\n        \n        \n        b = np.ones((X.shape[0], 1))\n        w2 = self.w[-(self.h + 1):].reshape(self.h + 1, 1)\n        w1 = self.w[:-(self.h + 1)].reshape(self.i + 1, self.h)\n\n        \n        \n        \n        if X.shape[1] > self.i:\n            \n            X = X[:, :self.i]\n        elif X.shape[1] < self.i:\n            \n            \n            \n            idx = range(X.shape[1])\n            idx.append(self.i)        \n            w1 = w1[idx, :]\n\n        if sparse.issparse(X):\n            return np.hstack((sigm(sparse.hstack((X, b)).dot(w1)), b)).dot(w2)\n        else:\n            return np.hstack((sigm(np.hstack((X, b)).dot(w1)), b)).dot(w2)", "docstring": "Predict targets for a feature matrix.\n\nArgs:\nX (np.array of float): feature matrix for prediction", "source": "juraj-google-style"}
{"code": "def _parse_options(self, options):\n    for key in ('username', 'client_name', 'client_id', 'client_secret', 'trusted', 'logout_uri'):\n        value = options.get(key)\n        if (value is not None):\n            self.fields[key] = value\n    username = self.fields.pop('username', None)\n    if (username is not None):\n        try:\n            user_model = get_user_model()\n            self.fields['user'] = user_model.objects.get(username=username)\n        except user_model.DoesNotExist:\n            raise CommandError('User matching the provided username does not exist.')\n    client_name = self.fields.pop('client_name', None)\n    if (client_name is not None):\n        self.fields['name'] = client_name\n    logout_uri = self.fields.get('logout_uri')\n    if logout_uri:\n        try:\n            URLValidator()(logout_uri)\n        except ValidationError:\n            raise CommandError('The logout_uri is invalid.')", "docstring": "Parse the command's options.\n\nArguments:\noptions (dict): Options with which the command was called.\n\nRaises:\nCommandError, if a user matching the provided username does not exist.", "source": "codesearchnet"}
{"code": "def parse_non_selinux(parts):\n    (links, owner, group, last) = parts\n    result = {'links': int(links), 'owner': owner, 'group': group}\n    if (',' in last[:4]):\n        (major, minor, rest) = last.split(None, 2)\n        result['major'] = int(major.rstrip(','))\n        result['minor'] = int(minor)\n    else:\n        (size, rest) = last.split(None, 1)\n        result['size'] = int(size)\n    result['date'] = rest[:12]\n    (path, link) = parse_path(rest[13:])\n    result['name'] = path\n    if link:\n        result['link'] = link\n    return result", "docstring": "Parse part of an ls output line that isn't selinux.\n\nArgs:\nparts (list): A four element list of strings representing the initial\nparts of an ls line after the permission bits. The parts are link\ncount, owner, group, and everything else.\n\nReturns:\nA dict containing links, owner, group, date, and name. If the line\nrepresented a device, major and minor numbers are included.  Otherwise,\nsize is included. If the raw name was a symbolic link, link is\nincluded.", "source": "codesearchnet"}
{"code": "def expand_dims(self, image):\n    self._ensure_format_supported(image)\n    if isinstance(image, PIL.Image.Image):\n        return image\n    if is_torch_tensor(image):\n        image = image.unsqueeze(0)\n    else:\n        image = np.expand_dims(image, axis=0)\n    return image", "docstring": "Expands 2-dimensional `image` to 3 dimensions.\n\nArgs:\nimage (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):\nThe image to expand.", "source": "github-repos"}
{"code": "def dbmax_stddev(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `dbmax_stddev`'.format(value))\n\n        self._dbmax_stddev = value", "docstring": "Corresponds to IDD Field `dbmax_stddev`\nStandard deviation of extreme annual maximum dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmax_stddev`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def rename_next_state_fluent(name: str) -> str:\n    \n    i = name.index('/')\n    functor = name[:i-1]\n    arity = name[i+1:]\n    return \"{}/{}\".format(functor, arity)", "docstring": "Returns next state fluent canonical name.\n\nArgs:\nname (str): The current state fluent name.\n\nReturns:\nstr: The next state fluent name.", "source": "juraj-google-style"}
{"code": "def __init__(self, context):\n    \n    self._logdir = context.logdir\n    self._db_uri = context.db_uri\n    self._window_title = context.window_title\n    self._multiplexer = context.multiplexer\n    self._db_connection_provider = context.db_connection_provider\n    self._assets_zip_provider = context.assets_zip_provider", "docstring": "Instantiates CorePlugin.\n\nArgs:\ncontext: A base_plugin.TBContext instance.", "source": "juraj-google-style"}
{"code": "def __init__(self, xid=None, flags=ConfigFlag.OFPC_FRAG_NORMAL,\n                 miss_send_len=ControllerMaxLen.OFPCML_NO_BUFFER):\n        \n        super().__init__(xid, flags, miss_send_len)\n        self.header.message_type = Type.OFPT_SET_CONFIG", "docstring": "Create a SetConfig with the optional parameters below.\n\nArgs:\nxid (int): xid to be used on the message header.\nflags (:class:`~pyof.v0x01.controller2switch.common.ConfigFlag`):\nOFPC_* flags.\nmiss_send_len (int): UBInt16 max bytes of new flow that the\ndatapath should send to the controller.", "source": "juraj-google-style"}
{"code": "def query(self, terms=None, negated_terms=None):\n        \n\n        if terms is None:\n            terms = []\n        matches_all = 'owl:Thing' in terms\n        if negated_terms is None:\n            negated_terms = []\n        termset = set(terms)\n        negated_termset = set(negated_terms)\n        matches = []\n        n_terms = len(termset)\n        for subj in self.subjects:\n            if matches_all or len(termset.intersection(self.inferred_types(subj))) == n_terms:\n                if len(negated_termset.intersection(self.inferred_types(subj))) == 0:\n                    matches.append(subj)\n        return matches", "docstring": "Basic boolean query, using inference.\n\nArguments:\n\n- terms: list\n\nlist of class ids. Returns the set of subjects that have at least one inferred annotation to each of the specified classes.\n\n- negated_terms: list\n\nlist of class ids. Filters the set of subjects so that there are no inferred annotations to any of the specified classes", "source": "juraj-google-style"}
{"code": "def check_schema_equal(left: Union['bigquery.TableSchema', 'bigquery.TableFieldSchema'], right: Union['bigquery.TableSchema', 'bigquery.TableFieldSchema'], *, ignore_descriptions: bool=False, ignore_field_order: bool=False) -> bool:\n    if type(left) != type(right) or not isinstance(left, (bigquery.TableSchema, bigquery.TableFieldSchema)):\n        return False\n    if isinstance(left, bigquery.TableFieldSchema):\n        if left.name != right.name:\n            return False\n        if left.type != right.type:\n            if sorted((left.type, right.type)) not in (['BOOL', 'BOOLEAN'], ['FLOAT', 'FLOAT64'], ['INT64', 'INTEGER'], ['RECORD', 'STRUCT']):\n                return False\n        if left.mode != right.mode:\n            return False\n        if not ignore_descriptions and left.description != right.description:\n            return False\n    if isinstance(left, bigquery.TableSchema) or left.type in ('RECORD', 'STRUCT'):\n        if len(left.fields) != len(right.fields):\n            return False\n        if ignore_field_order:\n            left_fields = sorted(left.fields, key=lambda field: field.name)\n            right_fields = sorted(right.fields, key=lambda field: field.name)\n        else:\n            left_fields = left.fields\n            right_fields = right.fields\n        for left_field, right_field in zip(left_fields, right_fields):\n            if not check_schema_equal(left_field, right_field, ignore_descriptions=ignore_descriptions, ignore_field_order=ignore_field_order):\n                return False\n    return True", "docstring": "Check whether schemas are equivalent.\n\nThis comparison function differs from using == to compare TableSchema\nbecause it ignores categories, policy tags, descriptions (optionally), and\nfield ordering (optionally).\n\nArgs:\nleft (~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema, ~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableFieldSchema):\nOne schema to compare.\nright (~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema, ~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableFieldSchema):\nThe other schema to compare.\nignore_descriptions (bool): (optional) Whether or not to ignore field\ndescriptions when comparing. Defaults to False.\nignore_field_order (bool): (optional) Whether or not to ignore struct field\norder when comparing. Defaults to False.\n\nReturns:\nbool: True if the schemas are equivalent, False otherwise.", "source": "github-repos"}
{"code": "def extrapolate_points(points, n_points):\n    \n    points = points[:n_points]\n    lat = []\n    lon = []\n    last = None\n    for point in points:\n        if last is not None:\n            lat.append(last.lat-point.lat)\n            lon.append(last.lon-point.lon)\n        last = point\n\n    dts = np.mean([p.dt for p in points])\n    lons = np.mean(lon)\n    lats = np.mean(lat)\n\n    gen_sample = []\n    last = points[0]\n    for _ in range(n_points):\n        point = Point(last.lat+lats, last.lon+lons, None)\n        point.dt = dts\n        \n        gen_sample.append(point)\n        last = point\n\n    return gen_sample", "docstring": "Extrapolate a number of points, based on the first ones\n\nArgs:\npoints (:obj:`list` of :obj:`Point`)\nn_points (int): number of points to extrapolate\nReturns:\n:obj:`list` of :obj:`Point`", "source": "juraj-google-style"}
{"code": "def __lt__(self, other):\n        \n        if other.__class__ is not self.__class__:\n            return NotImplemented\n        return (\n            self._tp__get_typed_properties()\n            < other._tp__get_typed_properties()\n        )", "docstring": "Test if self is less than an object of the same class.\n\nArgs:\nother: The object to compare against.\n\nReturns:\nTrue if self is less than other; else False.\n\nRaises:\nTypeError: Raised if the objects are not of the same class.", "source": "juraj-google-style"}
{"code": "def get_feature_variable_double(self, feature_key, variable_key, user_id, attributes=None):\n    variable_type = entities.Variable.Type.DOUBLE\n    return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)", "docstring": "Returns value for a certain double variable attached to a feature flag.\n\nArgs:\nfeature_key: Key of the feature whose variable's value is being accessed.\nvariable_key: Key of the variable whose value is to be accessed.\nuser_id: ID for user.\nattributes: Dict representing user attributes.\n\nReturns:\nDouble value of the variable. None if:\n- Feature key is invalid.\n- Variable key is invalid.\n- Mismatch with type of variable.", "source": "codesearchnet"}
{"code": "def symbol(name: str=None, symbol_type: Type[Symbol]=Symbol) -> 'SymbolWildcard':\n        \n        if isinstance(name, type) and issubclass(name, Symbol) and symbol_type is Symbol:\n            return SymbolWildcard(name)\n        return SymbolWildcard(symbol_type, variable_name=name)", "docstring": "Create a `SymbolWildcard` that matches a single `Symbol` argument.\n\nArgs:\nname:\nOptional variable name for the wildcard.\nsymbol_type:\nAn optional subclass of `Symbol` to further limit which kind of symbols are\nmatched by the wildcard.\n\nReturns:\nA `SymbolWildcard` that matches the *symbol_type*.", "source": "juraj-google-style"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    self._ParseLogonApplications(parser_mediator, registry_key)\n    self._ParseRegisteredDLLs(parser_mediator, registry_key)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def HandleMessage(self, message):\n    \n    self._is_active = True\n    try:\n      action_cls = actions.ActionPlugin.classes.get(message.name)\n      if action_cls is None:\n        raise RuntimeError(\"Client action %r not known\" % message.name)\n\n      action = action_cls(grr_worker=self)\n\n      \n      self.transaction_log.Write(message)\n\n      \n      action.Progress()\n      action.Execute(message)\n\n      \n      self.transaction_log.Clear()\n    finally:\n      self._is_active = False\n      \n      self.stats_collector.RequestSend()", "docstring": "Entry point for processing jobs.\n\nArgs:\nmessage: The GrrMessage that was delivered from the server.\n\nRaises:\nRuntimeError: The client action requested was not found.", "source": "juraj-google-style"}
{"code": "def licenses(self):\n    buf_size = self.MAX_BUF_SIZE\n    buf = (ctypes.c_char * buf_size)()\n    res = self._dll.JLINK_GetAvailableLicense(buf, buf_size)\n    if (res < 0):\n        raise errors.JLinkException(res)\n    return ctypes.string_at(buf).decode()", "docstring": "Returns a string of the built-in licenses the J-Link has.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nString of the contents of the built-in licenses the J-Link has.", "source": "codesearchnet"}
{"code": "def dump_next(self):\n    if (self.dump_walker is None):\n        return pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.STREAM_WALKER_NOT_INITIALIZED)\n    try:\n        return self.dump_walker.pop()\n    except StreamEmptyError:\n        return None", "docstring": "Dump the next reading from the stream.\n\nReturns:\nIOTileReading: The next reading or None if there isn't one", "source": "codesearchnet"}
{"code": "def encrypt_block(self, plainText):\n    \n    if not self.initialized:\n      raise TypeError(\"CamCrypt object has not been initialized\")\n    if len(plainText) != BLOCK_SIZE:\n      raise ValueError(\"plainText must be %d bytes long (received %d bytes)\" %\n                       (BLOCK_SIZE, len(plainText)))\n    cipher = ctypes.create_string_buffer(BLOCK_SIZE)\n    self.encblock(self.bitlen, plainText, self.keytable, cipher)\n    return cipher.raw", "docstring": "Encrypt a 16-byte block of data.\n\nNOTE: This function was formerly called `encrypt`, but was changed when\nsupport for encrypting arbitrary-length strings was added.\n\nArgs:\nplainText (str): 16-byte data.\n\nReturns:\n16-byte str.\n\nRaises:\nTypeError if CamCrypt object has not been initialized.\nValueError if `plainText` is not BLOCK_SIZE (i.e. 16) bytes.", "source": "juraj-google-style"}
{"code": "def _new(self, name, **kwargs):\n    if self._name_path:\n        parent = self\n        for path_element in self._name_path.split('/'):\n            self._set_xml_from_keys(parent, (path_element, None))\n            parent = parent.find(path_element)\n        parent.text = name\n    else:\n        ElementTree.SubElement(self, 'name').text = name\n    for item in self.data_keys.items():\n        self._set_xml_from_keys(self, item, **kwargs)", "docstring": "Create a new JSSObject with name and \"keys\".\n\nGenerate a default XML template for this object, based on\nthe class attribute \"keys\".\n\nArgs:\nname: String name of the object to use as the\nobject's name property.\nkwargs:\nAccepted keyword args can be viewed by checking the\n\"data_keys\" class attribute. Typically, they include all\ntop-level keys, and non-duplicated keys used elsewhere.\n\nValues will be cast to string. (Int 10, bool False\nbecome string values \"10\" and \"false\").\n\nIgnores kwargs that aren't in object's keys attribute.", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n    \n    self.ListSessionEntityTypes = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.SessionEntityTypes/ListSessionEntityTypes',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.ListSessionEntityTypesRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.ListSessionEntityTypesResponse.FromString,\n        )\n    self.GetSessionEntityType = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.SessionEntityTypes/GetSessionEntityType',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.GetSessionEntityTypeRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.SessionEntityType.FromString,\n        )\n    self.CreateSessionEntityType = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.SessionEntityTypes/CreateSessionEntityType',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.CreateSessionEntityTypeRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.SessionEntityType.FromString,\n        )\n    self.UpdateSessionEntityType = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.SessionEntityTypes/UpdateSessionEntityType',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.UpdateSessionEntityTypeRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.SessionEntityType.FromString,\n        )\n    self.DeleteSessionEntityType = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.SessionEntityTypes/DeleteSessionEntityType',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.DeleteSessionEntityTypeRequest.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def upgrade(**kwargs):\n    log.warning('pkg.upgrade not implemented on Windows yet')\n    refresh = salt.utils.data.is_true(kwargs.get('refresh', True))\n    saltenv = kwargs.get('saltenv', 'base')\n    log.warning('pkg.upgrade not implemented on Windows yet refresh:%s saltenv:%s', refresh, saltenv)\n    return {}", "docstring": "Upgrade all software. Currently not implemented\n\nKwargs:\nsaltenv (str): The salt environment to use. Default ``base``.\nrefresh (bool): Refresh package metadata. Default ``True``.\n\n.. note::\nThis feature is not yet implemented for Windows.\n\nReturns:\ndict: Empty dict, until implemented\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' pkg.upgrade", "source": "codesearchnet"}
{"code": "def __init__(self, parser, codegen, writer):\n        \n        self._parser = parser\n        self._codegen = codegen\n        self._symbolgen = SymtableCodeGen()\n        self._writer = writer\n        self._sources = []\n        self._searchers = []\n        self._borrowers = []", "docstring": "Creates an instance of *MibCompiler* class.\n\nArgs:\nparser: ASN.1 MIB parser object\ncodegen: MIB transformation object\nwriter: transformed MIB storing object", "source": "juraj-google-style"}
{"code": "def from_signature(message, signature):\n    if (signature.recovery_id is None):\n        raise ValueError('The signature must have a recovery_id.')\n    msg = get_bytes(message)\n    pub_keys = bitcoin_curve.recover_public_key(msg, signature, signature.recovery_id)\n    for (k, recid) in pub_keys:\n        if ((signature.recovery_id is not None) and (recid == signature.recovery_id)):\n            return PublicKey(k.x, k.y)\n    return None", "docstring": "Attempts to create PublicKey object by deriving it\nfrom the message and signature.\n\nArgs:\nmessage (bytes): The message to be verified.\nsignature (Signature): The signature for message.\nThe recovery_id must not be None!\n\nReturns:\nPublicKey:\nA PublicKey object derived from the\nsignature, it it exists. None otherwise.", "source": "codesearchnet"}
{"code": "def replace_dimensions(tensor_or_shape, old_dim_or_dims, new_dim_or_dims):\n  \n  if isinstance(tensor_or_shape, Tensor):\n    return reshape(tensor_or_shape, replace_dimensions(\n        tensor_or_shape.shape, old_dim_or_dims, new_dim_or_dims))\n  if not isinstance(tensor_or_shape, Shape):\n    raise ValueError(\n        \"tensor_or_shape must be a Tensor or Shape got %s\" % (tensor_or_shape,))\n  in_dims = tensor_or_shape.dims\n  if isinstance(old_dim_or_dims, Dimension):\n    old_dim_or_dims = [old_dim_or_dims]\n  if isinstance(new_dim_or_dims, Dimension):\n    new_dim_or_dims = [new_dim_or_dims]\n  if not isinstance(old_dim_or_dims, list) or not old_dim_or_dims:\n    raise ValueError(\n        \"old_dim_or_dims must be a Dimension or a list of Dimension got %s\"\n        % (old_dim_or_dims,))\n  if not isinstance(new_dim_or_dims, list) or not new_dim_or_dims:\n    raise ValueError(\n        \"new_dim_or_dims must be a Dimension or a list of Dimension got %s\"\n        % (new_dim_or_dims,))\n  try:\n    positions = [in_dims.index(d) for d in old_dim_or_dims]\n    pos = positions[0]\n    if positions != list(range(pos, pos + len(positions))):\n      raise ValueError()\n  except ValueError:\n    raise ValueError(\n        \"old_dim_or_dims must be a subsequence of the input's dimensions\"\n        \" old_dim_or_dims=%s input's dimensions=%s\" %\n        (old_dim_or_dims, in_dims))\n  return Shape(in_dims[:pos] + new_dim_or_dims +\n               in_dims[pos + len(old_dim_or_dims):])", "docstring": "Replace dimensions in a Tensor or Shape.\n\nold_dim_or_dims consists of a single dimension or a list of dimensions\nthat must occur consecutively in the input shape.  They are replaced\nby the dimensions in new_dim_or_dims.\n\nArgs:\ntensor_or_shape: a Tensor or a Shape\nold_dim_or_dims: a Dimension or a list of Dimensions\nnew_dim_or_dims: a Dimensions or a list of Dimensions\nReturns:\na new Tensor or a Shape", "source": "juraj-google-style"}
{"code": "def _process_single_batch(model, inputs, targets, output_loss_metrics=None, sample_weights=None, training=False):\n    with backend.eager_learning_phase_scope(1 if training else 0), training_utils.RespectCompiledTrainableState(model):\n        with GradientTape() as tape:\n            outs, total_loss, output_losses, masks = _model_loss(model, inputs, targets, output_loss_metrics=output_loss_metrics, sample_weights=sample_weights, training=training)\n            if isinstance(model.optimizer, loss_scale_optimizer.LossScaleOptimizer):\n                scaled_total_loss = model.optimizer.get_scaled_loss(total_loss)\n            else:\n                scaled_total_loss = total_loss\n        if training:\n            trainable_weights = model.trainable_weights\n            if trainable_weights:\n                if hasattr(model, '_backwards'):\n                    model._backwards(tape, scaled_total_loss)\n                else:\n                    grads = tape.gradient(scaled_total_loss, trainable_weights)\n                    if isinstance(model.optimizer, loss_scale_optimizer.LossScaleOptimizer):\n                        grads = model.optimizer.get_unscaled_gradients(grads)\n                    model.optimizer.apply_gradients(zip(grads, trainable_weights))\n            else:\n                logging.warning('The list of trainable weights is empty. Make sure that you are not setting model.trainable to False before compiling the model.')\n        return (outs, total_loss, output_losses, masks)", "docstring": "Calculate the loss and gradient for one input batch.\n\nThe model weights are updated if training is set to True.\n\nArgs:\nmodel: Model whose loss has to be calculated.\ninputs: List of input arrays.\ntargets: List of target arrays.\noutput_loss_metrics: List of metrics that are used to aggregated output\nloss values.\nsample_weights: Optional list of sample weight arrays.\ntraining: The boolean represents if the weights of the model are updated.\n'fit' methods will set this to True while 'evaluate' methods will\nset this to False.\n\nReturns:\noutput of the model, total loss, the loss and the mask\nassociated with each output.\n\nRaises:\nValueError: If the model has no loss to optimize.", "source": "github-repos"}
{"code": "def __init__(self, value=None, tag=enums.Tags.DEFAULT):\n        \n        if value is None:\n            value = int(time.time())\n        super(DateTime, self).__init__(value, tag)\n        self.type = enums.Types.DATE_TIME", "docstring": "Create a DateTime.\n\nArgs:\nvalue (int): The value of the DateTime in number of seconds since\nthe Epoch. See the time package for additional information.\nOptional, defaults to the current time.\ntag (Tags): An enumeration defining the tag of the LongInteger.\nOptional, defaults to Tags.DEFAULT.", "source": "juraj-google-style"}
{"code": "def UploadFile(self, fd, offset=0, amount=None):\n    \n    return self._UploadChunkStream(\n        self._streamer.StreamFile(fd, offset=offset, amount=amount))", "docstring": "Uploads chunks of a given file descriptor to the transfer store flow.\n\nArgs:\nfd: A file descriptor to upload.\noffset: An integer offset at which the file upload should start on.\namount: An upper bound on number of bytes to stream. If it is `None` then\nthe whole file is uploaded.\n\nReturns:\nA `BlobImageDescriptor` object.", "source": "juraj-google-style"}
{"code": "def __one_equals_true(value):\n    if (isinstance(value, six.integer_types) and (value == 1)):\n        return True\n    elif (isinstance(value, six.string_types) and (re.match('\\\\d+', value, flags=(re.IGNORECASE + re.UNICODE)) is not None) and (six.text_type(value) == '1')):\n        return True\n    return False", "docstring": "Test for ``1`` as a number or a string and return ``True`` if it is.\n\nArgs:\nvalue: string or number or None.\n\nReturns:\nbool: ``True`` if 1 otherwise ``False``.", "source": "codesearchnet"}
{"code": "def get_module(dir_path: str, relative_to_dir: str) -> str:\n    dir_path = dir_path[len(relative_to_dir):]\n    dir_path = dir_path.replace(os.sep, '/')\n    return dir_path.replace('/', '.').strip('.')", "docstring": "Get module that corresponds to path relative to relative_to_dir.\n\nArgs:\ndir_path: Path to directory.\nrelative_to_dir: Get module relative to this directory.\n\nReturns:\nName of module that corresponds to the given directory.", "source": "github-repos"}
{"code": "def export_model(model, model_type, export_dir, model_column_fn):\n    (wide_columns, deep_columns) = model_column_fn()\n    if (model_type == 'wide'):\n        columns = wide_columns\n    elif (model_type == 'deep'):\n        columns = deep_columns\n    else:\n        columns = (wide_columns + deep_columns)\n    feature_spec = tf.feature_column.make_parse_example_spec(columns)\n    example_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)\n    model.export_savedmodel(export_dir, example_input_fn, strip_default_attrs=True)", "docstring": "Export to SavedModel format.\n\nArgs:\nmodel: Estimator object\nmodel_type: string indicating model type. \"wide\", \"deep\" or \"wide_deep\"\nexport_dir: directory to export the model.\nmodel_column_fn: Function to generate model feature columns.", "source": "codesearchnet"}
{"code": "def latents_to_observations(self, latent_means, latent_covs):\n    with tf.name_scope('latents_to_observations'):\n        pushforward_latents_step = build_pushforward_latents_step(self.get_observation_matrix_for_timestep, self.get_observation_noise_for_timestep)\n        latent_means = distribution_util.move_dimension(latent_means, source_idx=(- 2), dest_idx=0)\n        latent_means = latent_means[(..., tf.newaxis)]\n        latent_covs = distribution_util.move_dimension(latent_covs, source_idx=(- 3), dest_idx=0)\n        (initial_observation_mean, initial_observation_cov) = pushforward_latents_step(_=None, latent_t_mean_cov=(self.initial_step, latent_means[self.initial_step], latent_covs[self.initial_step]))\n        timesteps = tf.range(self.initial_step, (self.initial_step + self.num_timesteps))\n        (observation_means, observation_covs) = tf.scan(pushforward_latents_step, elems=(timesteps, latent_means, latent_covs), initializer=(initial_observation_mean, initial_observation_cov), parallel_iterations=10000)\n        observation_means = distribution_util.move_dimension(observation_means[(..., 0)], source_idx=0, dest_idx=(- 2))\n        observation_covs = distribution_util.move_dimension(observation_covs, source_idx=0, dest_idx=(- 3))\n        return (observation_means, observation_covs)", "docstring": "Push latent means and covariances forward through the observation model.\n\nArgs:\nlatent_means: float `Tensor` of shape `[..., num_timesteps, latent_size]`\nlatent_covs: float `Tensor` of shape\n`[..., num_timesteps, latent_size, latent_size]`.\n\nReturns:\nobservation_means: float `Tensor` of shape\n`[..., num_timesteps, observation_size]`\nobservation_covs: float `Tensor` of shape\n`[..., num_timesteps, observation_size, observation_size]`", "source": "codesearchnet"}
{"code": "def _CreateWindowsPathResolver(self, file_system, mount_point, environment_variables):\n    if (environment_variables is None):\n        environment_variables = []\n    path_resolver = windows_path_resolver.WindowsPathResolver(file_system, mount_point)\n    for environment_variable in environment_variables:\n        name = environment_variable.name.lower()\n        if (name not in ('systemroot', 'userprofile')):\n            continue\n        path_resolver.SetEnvironmentVariable(environment_variable.name, environment_variable.value)\n    return path_resolver", "docstring": "Create a Windows path resolver and sets the environment variables.\n\nArgs:\nfile_system (dfvfs.FileSystem): file system.\nmount_point (dfvfs.PathSpec): mount point path specification.\nenvironment_variables (list[EnvironmentVariableArtifact]): environment\nvariables.\n\nReturns:\ndfvfs.WindowsPathResolver: Windows path resolver.", "source": "codesearchnet"}
{"code": "def flatten(vari):\n    \n    if isinstance(vari, Poly):\n        shape = int(numpy.prod(vari.shape))\n        return reshape(vari, (shape,))\n\n    return numpy.array(vari).flatten()", "docstring": "Flatten a shapeable quantity.\n\nArgs:\nvari (chaospy.poly.base.Poly, numpy.ndarray):\nShapeable input quantity.\n\nReturns:\n(chaospy.poly.base.Poly, numpy.ndarray):\nSame type as ``vari`` with `len(Q.shape)==1`.\n\nExamples:\n>>> P = chaospy.reshape(chaospy.prange(4), (2,2))\n>>> print(P)\n[[1, q0], [q0^2, q0^3]]\n>>> print(chaospy.flatten(P))\n[1, q0, q0^2, q0^3]", "source": "juraj-google-style"}
{"code": "def subnet_range(ip_net, cidr):\n    \n    subnets_dict = dict()\n    subnet = whole_subnet_maker(ip_net, cidr)\n    subnets_dict['IP'] = ip_net\n    subnets_dict['NET'] = subnet\n    subnets_dict['CIDR'] = '%s/%s' % (whole_subnet_maker(ip_net, cidr), cidr)\n    if int(cidr) >= 24:\n        subnet_split = subnet.split('.')\n        first_ip = int(subnet_split[3]) + 1\n        last_ip = (int(subnet_split[3]) + 1) + (253 - int(__mask_conversion[int(cidr)]['OCT4']))\n        bcast_ip = (int(subnet_split[3]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT4']))\n        temp = '%s.%s.%s.' % (subnet_split[0], subnet_split[1], subnet_split[2])\n        subnets_dict['RANGE'] = '%s%i to %s%i' % (temp, first_ip, temp, last_ip)\n        subnets_dict['BCAST'] = '%s%i' % (temp, bcast_ip)\n        subnets_dict['MASK'] = __mask_conversion[int(cidr)]['MASK']\n        subnets_dict['INVMASK'] = __mask_conversion[int(cidr)]['INVMASK']\n        subnets_dict['CIDRVAL'] = __mask_conversion[int(cidr)]['CIDR']\n    elif int(cidr) >= 16:\n        subnet_split = subnet.split('.')\n        first_ip = int(subnet_split[2])\n        last_ip = (int(subnet_split[2]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT3']))\n        bcast_ip = (int(subnet_split[2]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT3']))\n        temp = '%s.%s.' % (subnet_split[0], subnet_split[1])\n        subnets_dict['RANGE'] = '%s%i.1 to %s%i.254' % (temp, first_ip, temp, last_ip)\n        subnets_dict['BCAST'] = '%s%i.255' % (temp, bcast_ip)\n        subnets_dict['MASK'] = __mask_conversion[int(cidr)]['MASK']\n        subnets_dict['INVMASK'] = __mask_conversion[int(cidr)]['INVMASK']\n        subnets_dict['CIDRVAL'] = __mask_conversion[int(cidr)]['CIDR']\n    elif int(cidr) >= 8:\n        subnet_split = subnet.split('.')\n        first_ip = int(subnet_split[1])\n        last_ip = (int(subnet_split[1]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT2']))\n        bcast_ip = (int(subnet_split[1]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT2']))\n        temp = '%s.' % (subnet_split[0],)\n        subnets_dict['RANGE'] = '%s%i.0.1 to %s%i.255.254' % (temp, first_ip, temp, last_ip)\n        subnets_dict['BCAST'] = '%s%i.255.255' % (temp, bcast_ip)\n        subnets_dict['MASK'] = __mask_conversion[int(cidr)]['MASK']\n        subnets_dict['INVMASK'] = __mask_conversion[int(cidr)]['INVMASK']\n        subnets_dict['CIDRVAL'] = __mask_conversion[int(cidr)]['CIDR']\n    elif int(cidr) >= 1:\n        subnet_split = subnet.split('.')\n        first_ip = int(subnet_split[0])\n        last_ip = (int(subnet_split[0]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT1']))\n        bcast_ip = (int(subnet_split[0]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT1']))\n        subnets_dict['RANGE'] = '%i.0.0.1 to %i.255.255.254' % (first_ip, last_ip)\n        subnets_dict['BCAST'] = '%i.255.255.255' % (bcast_ip,)\n        subnets_dict['MASK'] = __mask_conversion[int(cidr)]['MASK']\n        subnets_dict['INVMASK'] = __mask_conversion[int(cidr)]['INVMASK']\n        subnets_dict['CIDRVAL'] = __mask_conversion[int(cidr)]['CIDR']\n    return subnets_dict", "docstring": "Function to return a subnet range value from a IP address and CIDR pair\nArgs:\nip_net: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1\ncidr: CIDR value of 1 to 32\n\nReturns: returns a dictionary of info", "source": "juraj-google-style"}
{"code": "def wait_for(port_num, timeout):\n    \n    logger.debug(\"wait for {port_num}\".format(**locals()))\n    t_start = time.time()\n    sleeps = 0.1\n    while time.time() - t_start < timeout:\n        try:\n            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n            try:\n                s.connect((_host(), port_num))\n                return True\n            except (IOError, socket.error):\n                time.sleep(sleeps)\n        finally:\n            s.close()\n    return False", "docstring": "waits while process starts.\nArgs:\nport_num    - port number\ntimeout     - specify how long, in seconds, a command can take before times out.\nreturn True if process started, return False if not", "source": "juraj-google-style"}
{"code": "def str_to_inet(address):\n    try:\n        return socket.inet_pton(socket.AF_INET, address)\n    except socket.error:\n        return socket.inet_pton(socket.AF_INET6, address)", "docstring": "Convert an a string IP address to a inet struct\n\nArgs:\naddress (str): String representation of address\nReturns:\ninet: Inet network address", "source": "codesearchnet"}
{"code": "def add(x1, x2, output_shape=None, name=None):\n    output_shape = convert_to_shape(output_shape)\n    if (not isinstance(x2, Tensor)):\n        return ScalarAddOperation(x1, x2).outputs[0]\n    with tf.name_scope(name, default_name='add'):\n        (x1, x2) = binary_arguments_to_tensors(x1, x2)\n        return AddOperation(x1, x2, output_shape=_infer_binary_broadcast_shape(x1.shape, x2.shape, output_shape)).outputs[0]", "docstring": "Binary addition with broadcsting.\n\nArgs:\nx1: a Tensor\nx2: a Tensor\noutput_shape: an optional Shape\nname: an optional string\nReturns:\na Tensor", "source": "codesearchnet"}
{"code": "def resize(self, image: 'torch.Tensor', size: SizeDict, size_divisor: int=32, interpolation: 'F.InterpolationMode'=None, antialias: bool=True, **kwargs) -> 'torch.Tensor':\n    interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR\n    if not size.shortest_edge:\n        raise ValueError(f'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}')\n    shorter = size.shortest_edge\n    longer = int(1333 / 800 * shorter)\n    output_size = get_resize_output_image_size(image, shorter=shorter, longer=longer, size_divisor=size_divisor)\n    return F.resize(image, output_size, interpolation=interpolation, antialias=antialias)", "docstring": "Resize an image.\n\nResizes the shorter side of the image to `size[\"shortest_edge\"]` while preserving the aspect ratio. If the\nlonger side is larger than the max size `(int(`size[\"shortest_edge\"]` * 1333 / 800))`, the longer side is then\nresized to the max size while preserving the aspect ratio.\n\nArgs:\nimage (`torch.Tensor`):\nImage to resize.\nsize (`SizeDict`):\nDictionary in the format `{\"height\": int, \"width\": int}` specifying the size of the output image.\nsize_divisor (`int`, *optional*, defaults to 32):\nThe image is resized to a size that is a multiple of this value.\nresample (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):\n`InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`.\n\nReturns:\n`torch.Tensor`: The resized image.", "source": "github-repos"}
{"code": "def construct_policy(app='coreforrest', env='dev', group='forrest', region='us-east-1', pipeline_settings=None):\n    LOG.info('Create custom IAM Policy for %s.', app)\n    services = pipeline_settings.get('services', {})\n    LOG.debug('Found requested services: %s', services)\n    services = auto_service(pipeline_settings=pipeline_settings, services=services)\n    if services:\n        credential = get_env_credential(env=env)\n        account_number = credential['accountId']\n    statements = []\n    for (service, value) in services.items():\n        if (value is True):\n            items = []\n        elif isinstance(value, str):\n            items = [value]\n        else:\n            items = value\n        rendered_statements = render_policy_template(account_number=account_number, app=app, env=env, group=group, items=items, pipeline_settings=pipeline_settings, region=region, service=service)\n        statements.extend(rendered_statements)\n    if statements:\n        policy_json = get_template('infrastructure/iam/wrapper.json.j2', statements=json.dumps(statements))\n    else:\n        LOG.info('No services defined for %s.', app)\n        policy_json = None\n    return policy_json", "docstring": "Assemble IAM Policy for _app_.\n\nArgs:\napp (str): Name of Spinnaker Application.\nenv (str): Environment/Account in AWS\ngroup (str):A Application group/namespace\nregion (str): AWS region\npipeline_settings (dict): Settings from *pipeline.json*.\n\nReturns:\njson: Custom IAM Policy for _app_.\nNone: When no *services* have been defined in *pipeline.json*.", "source": "codesearchnet"}
{"code": "def is_packet_trace(path):\n    path = os.path.abspath(path)\n    if (not os.path.isfile(path)):\n        return False\n    try:\n        f = open(path, 'rb')\n    except:\n        return False\n    magic = f.read(4)\n    f.close()\n    return (magic in FILE_TYPE_HANDLER)", "docstring": "Determine if a file is a packet trace that is supported by this module.\n\nArgs:\npath (str): path to the trace file.\n\nReturns:\nbool: True if the file is a valid packet trace.", "source": "codesearchnet"}
{"code": "def blit(self, source, x=0, y=0, width=None, height=None, srcX=0, srcY=0, fg_alpha=1.0, bg_alpha=1.0):\n    assert isinstance(source, (Console, Window)), 'source muse be a Window or Console instance'\n    (x, y, width, height) = self._normalizeRect(x, y, width, height)\n    (srcX, srcY, width, height) = source._normalizeRect(srcX, srcY, width, height)\n    (srcX, srcY) = source._translate(srcX, srcY)\n    source = source.console\n    (x, y) = self._translate(x, y)\n    self = self.console\n    if (self == source):\n        tmp = Console(width, height)\n        _lib.TCOD_console_blit(source.console_c, srcX, srcY, width, height, tmp.console_c, 0, 0, fg_alpha, bg_alpha)\n        _lib.TCOD_console_blit(tmp.console_c, 0, 0, width, height, self.console_c, x, y, fg_alpha, bg_alpha)\n    else:\n        _lib.TCOD_console_blit(source.console_c, srcX, srcY, width, height, self.console_c, x, y, fg_alpha, bg_alpha)", "docstring": "Blit another console or Window onto the current console.\n\nBy default it blits the entire source to the topleft corner.\n\nArgs:\nsource (Union[tdl.Console, tdl.Window]): The blitting source.\nA console can blit to itself without any problems.\nx (int): x-coordinate of this console to blit on.\ny (int): y-coordinate of this console to blit on.\nwidth (Optional[int]): Width of the rectangle.\n\nCan be None to extend as far as possible to the\nbottom right corner of the blit area or can be a negative\nnumber to be sized reltive to the total size of the\nB{destination} console.\nheight (Optional[int]): Height of the rectangle.\nsrcX (int):  x-coordinate of the source region to blit.\nsrcY (int):  y-coordinate of the source region to blit.\nfg_alpha (float): The foreground alpha.", "source": "codesearchnet"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_stream = BytearrayStream()\n    if self._credential_type:\n        self._credential_type.write(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Credential struct missing the credential type.')\n    if self._credential_value:\n        self._credential_value.write(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Credential struct missing the credential value.')\n    self.length = local_stream.length()\n    super(Credential, self).write(output_stream, kmip_version=kmip_version)\n    output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the Credential struct to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if either the credential type or value are not\ndefined.", "source": "codesearchnet"}
{"code": "def _get_string_match(self, key):\n    expression = '(?:\\\\s*)'.join(['^', 'define', '\\\\(', \"'{}'\".format(key), ',', \"\\\\'(.*)\\\\'\", '\\\\)', ';'])\n    pattern = re.compile(expression, re.MULTILINE)\n    return pattern.search(self._content)", "docstring": "Gets a MatchObject for the given key, assuming a string value.\n\nArgs:\nkey (str): Key of the property to look-up.\n\nReturn:\nMatchObject: The discovered match.", "source": "codesearchnet"}
{"code": "def plan_scripts(self):\n    if (not self.__plan_scripts):\n        self.__plan_scripts = PlanScripts(self.__connection)\n    return self.__plan_scripts", "docstring": "Gets the Plan Scripts API client.\n\nReturns:\nPlanScripts:", "source": "codesearchnet"}
{"code": "def on_test_batch_begin(self, batch, logs=None):", "docstring": "Called at the beginning of a batch in `evaluate` methods.\n\nAlso called at the beginning of a validation batch in the `fit`\nmethods, if validation data is provided.\n\nSubclasses should override for any actions to run.\n\nNote that if the `steps_per_execution` argument to `compile` in\n`Model` is set to `N`, this method will only be called every\n`N` batches.\n\nArgs:\nbatch: Integer, index of batch within the current epoch.\nlogs: Dict. Currently no data is passed to this argument for this\nmethod but that may change in the future.", "source": "github-repos"}
{"code": "def _find(self, index):\n        \n        match = _PATTERN.search(self.text, index)\n        while self._max_tries > 0 and match is not None:\n            start = match.start()\n            candidate = self.text[start:match.end()]\n\n            \n            \n            \n            \n            candidate = self._trim_after_first_match(_SECOND_NUMBER_START_PATTERN,\n                                                     candidate)\n\n            match = self._extract_match(candidate, start)\n            if match is not None:\n                return match\n            \n            index = start + len(candidate)\n            self._max_tries -= 1\n            match = _PATTERN.search(self.text, index)\n        return None", "docstring": "Attempts to find the next subsequence in the searched sequence on or after index\nthat represents a phone number. Returns the next match, None if none was found.\n\nArguments:\nindex -- The search index to start searching at.\nReturns the phone number match found, None if none can be found.", "source": "juraj-google-style"}
{"code": "def get_tabular_stream(self, url, **kwargs):\n        \n        \n        self.close_response()\n        file_type = kwargs.get('file_type')\n        if file_type is not None:\n            kwargs['format'] = file_type\n            del kwargs['file_type']\n        try:\n            self.response = tabulator.Stream(url, **kwargs)\n            self.response.open()\n            return self.response\n        except TabulatorException as e:\n            raisefrom(DownloadError, 'Getting tabular stream for %s failed!' % url, e)", "docstring": "Get Tabulator stream.\n\nArgs:\nurl (str): URL to download\n**kwargs:\nheaders (Union[int, List[int], List[str]]): Number of row(s) containing headers or list of headers\nfile_type (Optional[str]): Type of file. Defaults to inferring.\ndelimiter (Optional[str]): Delimiter used for values in each row. Defaults to inferring.\n\nReturns:\ntabulator.Stream: Tabulator Stream object", "source": "juraj-google-style"}
{"code": "def _GetTitleFromChromeWebStore(self, extension_identifier):\n    \n    \n    if extension_identifier in self._extensions:\n      return self._extensions.get(extension_identifier)\n\n    page_content = self._GetChromeWebStorePage(extension_identifier)\n    if not page_content:\n      logger.warning(\n          '[{0:s}] no data returned for extension identifier: {1:s}'.format(\n              self.NAME, extension_identifier))\n      return None\n\n    first_line, _, _ = page_content.partition('\\n')\n    match = self._TITLE_RE.search(first_line)\n    name = None\n    if match:\n      title = match.group(1)\n      if title.startswith('Chrome Web Store - '):\n        name = title[19:]\n      elif title.endswith('- Chrome Web Store'):\n        name = title[:-19]\n\n    if not name:\n      self._extensions[extension_identifier] = 'UNKNOWN'\n      return None\n\n    self._extensions[extension_identifier] = name\n    return name", "docstring": "Retrieves the name of the extension from the Chrome store website.\n\nArgs:\nextension_identifier (str): Chrome extension identifier.\n\nReturns:\nstr: name of the extension or None.", "source": "juraj-google-style"}
{"code": "def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, **kwargs):\n    return self.tokenizer.batch_decode(generated_outputs, skip_special_tokens=skip_special_tokens, **kwargs)", "docstring": "Post-process the output of a vlm to decode the text.\n\nArgs:\ngenerated_outputs (`torch.Tensor` or `np.ndarray`):\nThe output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`\nor `(sequence_length,)`.\nskip_special_tokens (`bool`, *optional*, defaults to `True`):\nWhether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.\n**kwargs:\nAdditional arguments to be passed to the tokenizer's `batch_decode method`.\n\nReturns:\n`List[str]`: The decoded text.", "source": "github-repos"}
{"code": "def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):\n    countriesdata = cls.countriesdata(use_live=use_live)\n    iso3 = cls.get_iso3_country_code(country, use_live=use_live)\n    if (iso3 is not None):\n        return (iso3, True)\n\n    def remove_matching_from_list(wordlist, word_or_part):\n        for word in wordlist:\n            if (word_or_part in word):\n                wordlist.remove(word)\n    expanded_country_candidates = cls.expand_countryname_abbrevs(country)\n    match_strength = 0\n    matches = set()\n    for countryname in sorted(countriesdata['countrynames2iso3']):\n        for candidate in expanded_country_candidates:\n            (simplified_country, removed_words) = cls.simplify_countryname(candidate)\n            if (simplified_country in countryname):\n                words = get_words_in_sentence(countryname)\n                new_match_strength = 0\n                if simplified_country:\n                    remove_matching_from_list(words, simplified_country)\n                    new_match_strength += 32\n                for word in removed_words:\n                    if (word in countryname):\n                        remove_matching_from_list(words, word)\n                        new_match_strength += 4\n                    elif (word in cls.major_differentiators):\n                        new_match_strength -= 16\n                    else:\n                        new_match_strength -= 1\n                for word in words:\n                    if (word in cls.major_differentiators):\n                        new_match_strength -= 16\n                    else:\n                        new_match_strength -= 1\n                iso3 = countriesdata['countrynames2iso3'][countryname]\n                if (new_match_strength > match_strength):\n                    match_strength = new_match_strength\n                    matches = set()\n                if (new_match_strength == match_strength):\n                    matches.add(iso3)\n    if ((len(matches) == 1) and (match_strength > 16)):\n        return (matches.pop(), False)\n    for (iso3, regex) in countriesdata['aliases'].items():\n        index = re.search(regex, country.upper())\n        if (index is not None):\n            return (iso3, False)\n    if (exception is not None):\n        raise exception\n    return (None, False)", "docstring": "Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second\nshowing if the match is exact or not.\n\nArgs:\ncountry (str): Country for which to get ISO3 code\nuse_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\nexception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\nReturns:\nTuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).", "source": "codesearchnet"}
{"code": "def convert_coco_poly_to_mask(segmentations, height: int, width: int, device: torch.device) -> torch.Tensor:\n    try:\n        from pycocotools import mask as coco_mask\n    except ImportError:\n        raise ImportError('Pycocotools is not installed in your environment.')\n    masks = []\n    for polygons in segmentations:\n        rles = coco_mask.frPyObjects(polygons, height, width)\n        mask = coco_mask.decode(rles)\n        if len(mask.shape) < 3:\n            mask = mask[..., None]\n        mask = torch.as_tensor(mask, dtype=torch.uint8, device=device)\n        mask = torch.any(mask, axis=2)\n        masks.append(mask)\n    if masks:\n        masks = torch.stack(masks, axis=0)\n    else:\n        masks = torch.zeros((0, height, width), dtype=torch.uint8, device=device)\n    return masks", "docstring": "Convert a COCO polygon annotation to a mask.\n\nArgs:\nsegmentations (`List[List[float]]`):\nList of polygons, each polygon represented by a list of x-y coordinates.\nheight (`int`):\nHeight of the mask.\nwidth (`int`):\nWidth of the mask.", "source": "github-repos"}
{"code": "def concat(input_layer, concat_dim, other_tensors=None):\n    if input_layer.is_sequence():\n        all_tensors = input_layer.sequence\n        all_tensors.extend((other_tensors or []))\n    else:\n        all_tensors = [input_layer]\n        if (other_tensors is None):\n            raise ValueError('Other Tensors must be supplied.')\n        all_tensors.extend(other_tensors)\n    if (not all_tensors):\n        return prettytensor.wrap_sequence([])\n    else:\n        return tf.concat(all_tensors, concat_dim)", "docstring": "Concatenates input PrettyTensor with other_tensors along the specified dim.\n\nThis adds the Pretty Tensor passed via input_layer to the front of the list of\ntensors to concat.\n\nArgs:\ninput_layer: The input layer.\nconcat_dim: The dimension along which to concat.\nother_tensors: The tensors to concatenate with as an iterable or None if\nthis is called on a sequence.\nReturns:\nA new PrettyTensor.\nRaises:\nValueError: If other_tensors is None and this is not a sequence.", "source": "codesearchnet"}
{"code": "def add_oxidation_state_by_guess(self, **kwargs):\n        \n        oxid_guess = self.composition.oxi_state_guesses(**kwargs)\n        oxid_guess = oxid_guess or \\\n                     [dict([(e.symbol, 0) for e in self.composition])]\n        self.add_oxidation_state_by_element(oxid_guess[0])", "docstring": "Decorates the structure with oxidation state, guessing\nusing Composition.oxi_state_guesses()\n\nArgs:\n**kwargs: parameters to pass into oxi_state_guesses()", "source": "juraj-google-style"}
{"code": "def _acquire_given_subnet(self, uuid_path, subnet):\n    lease = self.create_lease_object_from_subnet(subnet)\n    self._take_lease(lease, uuid_path)\n    return lease.to_ip_network()", "docstring": "Try to create a lease for subnet\n\nArgs:\nuuid_path (str): Path to the uuid file of a :class:`lago.Prefix`\nsubnet (str): dotted ipv4 subnet\n(for example ```192.168.200.0```)\n\nReturns:\nnetaddr.IPNetwork: Which represents the selected subnet\n\nRaises:\nLagoSubnetLeaseException: If the requested subnet is not in the\nrange of this store or its already been taken", "source": "codesearchnet"}
{"code": "def List(self, request, global_params=None):\n    config = self.GetMethodConfig('List')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Lists snapshots.\n\nArgs:\nrequest: (DataflowProjectsLocationsSnapshotsListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ListSnapshotsResponse) The response message.", "source": "github-repos"}
{"code": "def _convert_dict_inputs(inputs, tensor_info_map):\n    dict_inputs = _prepare_dict_inputs(inputs, tensor_info_map)\n    return tensor_info.convert_dict_to_compatible_tensor(dict_inputs, tensor_info_map)", "docstring": "Converts from inputs into dict of input tensors.\n\nThis handles:\n- putting inputs into a dict, per _prepare_dict_inputs(),\n- converting all input values into tensors compatible with the\nexpected input tensor (dtype, shape).\n- check sparse/non-sparse tensor types.\n\nArgs:\ninputs: inputs fed to Module.__call__().\ntensor_info_map: A map from string to `tensor_info.ParsedTensorInfo`\ndescribing the signature inputs.\n\nReturns:\nA dict of tensors to feed to the signature instantiation.\n\nRaises:\nTypeError: If it fails to convert the input values into a dict of tensors\nto feed to the signature instantiation.", "source": "codesearchnet"}
{"code": "def __init__(self, seed, salt):\n    \n    self._seed = seed.original_seed if isinstance(seed, SeedStream) else seed\n    self._salt = salt\n    self._counter = 0", "docstring": "Initializes a `SeedStream`.\n\nArgs:\nseed: Any Python object convertible to string, supplying the\ninitial entropy.  If `None`, operations seeded with seeds\ndrawn from this `SeedStream` will follow TensorFlow semantics\nfor not being seeded.\nsalt: Any Python object convertible to string, supplying\nauxiliary entropy.  Must be unique across the Distributions\nand TensorFlow Probability code base.  See class docstring for\nrationale.", "source": "juraj-google-style"}
{"code": "def CreateAdGroup(client, campaign_id):\n    ad_group_service = client.GetService('AdGroupService', 'v201809')\n    ad_group = {'name': 'Dynamic remarketing ad group', 'campaignId': campaign_id, 'status': 'ENABLED'}\n    operations = [{'operator': 'ADD', 'operand': ad_group}]\n    return ad_group_service.mutate(operations)['value'][0]", "docstring": "Creates a dynamic remarketing campaign.\n\nArgs:\nclient: an AdWordsClient instance.\ncampaign_id: an int campaign ID.\n\nReturns:\nThe ad group that was successfully created.", "source": "codesearchnet"}
{"code": "def get_rbounds(step):\n    if (step.geom is not None):\n        rcmb = step.geom.rcmb\n    else:\n        rcmb = step.sdat.par['geometry']['r_cmb']\n        if (step.sdat.par['geometry']['shape'].lower() == 'cartesian'):\n            rcmb = 0\n    rcmb = max(rcmb, 0)\n    return (rcmb, (rcmb + 1))", "docstring": "Radial or vertical position of boundaries.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nReturns:\ntuple of floats: radial or vertical positions of boundaries of the\ndomain.", "source": "codesearchnet"}
{"code": "def create_reader_of_type(type_name):\n    \n    readers = available_readers()\n\n    if type_name not in readers.keys():\n        raise UnknownReaderException('Unknown reader: %s' % (type_name,))\n\n    return readers[type_name]()", "docstring": "Create an instance of the reader with the given name.\n\nArgs:\ntype_name: The name of a reader.\n\nReturns:\nAn instance of the reader with the given type.", "source": "juraj-google-style"}
{"code": "def uninstalled(name):\n    \n    ret = {'name': name,\n           'changes': {},\n           'result': False,\n           'comment': ''}\n\n    \n    if not __salt__['wusa.is_installed'](name):\n        ret['result'] = True\n        ret['comment'] = '{0} already uninstalled'.format(name)\n        return ret\n\n    \n    if __opts__['test'] is True:\n        ret['result'] = None\n        ret['comment'] = '{0} would be uninstalled'.format(name)\n        ret['result'] = None\n        return ret\n\n    \n    __salt__['wusa.uninstall'](name)\n\n    \n    if not __salt__['wusa.is_installed'](name):\n        ret['comment'] = '{0} was uninstalled'.format(name)\n        ret['changes'] = {'old': True, 'new': False}\n        ret['result'] = True\n    else:\n        ret['comment'] = '{0} failed to uninstall'.format(name)\n\n    return ret", "docstring": "Ensure an update is uninstalled from the minion\n\nArgs:\n\nname(str):\nName of the Windows KB (\"KB123456\")\n\nExample:\n\n.. code-block:: yaml\n\nKB123456:\nwusa.uninstalled", "source": "juraj-google-style"}
{"code": "def kl_divergence(mu, log_var, mu_p=0.0, log_var_p=0.0):\n  \n\n  batch_size = shape_list(mu)[0]\n  prior_distribution = tfp.distributions.Normal(\n      mu_p, tf.exp(tf.multiply(0.5, log_var_p)))\n  posterior_distribution = tfp.distributions.Normal(\n      mu, tf.exp(tf.multiply(0.5, log_var)))\n  kld = tfp.distributions.kl_divergence(posterior_distribution,\n                                        prior_distribution)\n  return tf.reduce_sum(kld) / to_float(batch_size)", "docstring": "KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1).\n\nArgs:\nmu: mu parameter of the distribution.\nlog_var: log(var) parameter of the distribution.\nmu_p: optional mu from a learned prior distribution\nlog_var_p: optional log(var) from a learned prior distribution\nReturns:\nthe KL loss.", "source": "juraj-google-style"}
{"code": "def __parameter_enum(self, param):\n    \n    if isinstance(param, messages.EnumField):\n      return [enum_entry[0] for enum_entry in sorted(\n          param.type.to_dict().items(), key=lambda v: v[1])]", "docstring": "Returns enum descriptor of a parameter if it is an enum.\n\nAn enum descriptor is a list of keys.\n\nArgs:\nparam: A simple field.\n\nReturns:\nThe enum descriptor for the field, if it's an enum descriptor, else\nreturns None.", "source": "juraj-google-style"}
{"code": "def _get_qubit_index(self, qubit):\n        \n        for i, bit in enumerate(self.qubit_list):\n            if qubit == bit:\n                qindex = i\n                break\n        else:\n            raise exceptions.VisualizationError(\"unable to find bit for operation\")\n        return qindex", "docstring": "Get the index number for a quantum bit\nArgs:\nqubit (tuple): The tuple of the bit of the form\n(register_name, bit_number)\nReturns:\nint: The index in the bit list\nRaises:\nVisualizationError: If the bit isn't found", "source": "juraj-google-style"}
{"code": "def getMusicAlbumList(self, tagtype = 0, startnum = 0, pagingrow = 100):\n        \n\n        url = nurls['setProperty']\n\n        data = {'userid': self.user_id,\n                'useridx': self.useridx,\n                'tagtype': tagtype,\n                'startnum': startnum,\n                'pagingrow': pagingrow,\n                }\n\n        r = self.session.post(url = url, data = data)\n\n        return resultManager(r.text)", "docstring": "GetMusicAlbumList\n\nArgs:\ntagtype = ???\nstartnum\npagingrow\n\nReturns:\n???\nFalse: Failed to get property", "source": "juraj-google-style"}
{"code": "def edit_distance_filter(source_target_input, max_equal_to_diff_ratio=0):\n  \n  thrown_out_count = 0\n  source_target_output = []\n\n  if not max_equal_to_diff_ratio:\n    return source_target_input, thrown_out_count\n\n  for src_tgt in source_target_input:\n    opcodes = fast_match_sequences(*src_tgt)\n    diff_char_count = 0\n    equal_char_count = 0\n    for tag, i1, i2, j1, j2 in opcodes:\n      if tag == \"diff\":\n        \n        diff_char_count += max(i2 - i1, j2 - j1)\n      else:\n        equal_char_count += i2 - i1\n    if diff_char_count <= max_equal_to_diff_ratio * equal_char_count:\n      source_target_output.append(src_tgt)\n    else:\n      thrown_out_count += 1\n  return source_target_output, thrown_out_count", "docstring": "Filter out examples that exceed max_edit_ratio between source and target.\n\nArgs:\nsource_target_input:     a list of [source, target] pairs\nmax_equal_to_diff_ratio: cutoff for ratio of equal chars / diff chars\nbetween source and target\n\nReturns:\nsource_target_output:    filtered subset of [source, target] input pairs\nthrown_out_count:        number of examples filtered out", "source": "juraj-google-style"}
{"code": "def open(self, filename):\n    if filename:\n        self.binary = BinaryFile(filename)\n        self.text_section = self.binary.text_section\n        self._load(arch_mode=self.binary.architecture_mode)", "docstring": "Open a file for analysis.\n\nArgs:\nfilename (str): Name of an executable file.", "source": "codesearchnet"}
{"code": "def _init_vocab_from_file(self, filename):\n    \n    with tf.gfile.Open(filename) as f:\n      tokens = [token.strip() for token in f.readlines()]\n\n    def token_gen():\n      for token in tokens:\n        yield token\n\n    self._init_vocab(token_gen(), add_reserved_tokens=False)", "docstring": "Load vocab from a file.\n\nArgs:\nfilename: The file to load vocabulary from.", "source": "juraj-google-style"}
{"code": "def from_file_obj(cls, fp):\n        \n        log.debug(\"Parsing email from file object\")\n        try:\n            fp.seek(0)\n        except IOError:\n            \n            \n            pass\n        finally:\n            s = fp.read()\n\n        return cls.from_string(s)", "docstring": "Init a new object from a file-like object.\nNot for Outlook msg.\n\nArgs:\nfp (file-like object): file-like object of raw email\n\nReturns:\nInstance of MailParser", "source": "juraj-google-style"}
{"code": "def check_tweet(tweet, validation_checking=False):\n    if ('id' not in tweet):\n        raise NotATweetError(\"This text has no 'id' key\")\n    original_format = is_original_format(tweet)\n    if original_format:\n        _check_original_format_tweet(tweet, validation_checking=validation_checking)\n    else:\n        _check_activity_streams_tweet(tweet, validation_checking=validation_checking)\n    return original_format", "docstring": "Ensures a tweet is valid and determines the type of format for the tweet.\n\nArgs:\ntweet (dict/Tweet): the tweet payload\nvalidation_checking (bool): check for valid key structure in a tweet.", "source": "codesearchnet"}
{"code": "def not_modified(cls, errors=None):\n        \n        if cls.expose_status:  \n            cls.response.content_type = 'application/json'\n            cls.response._status_line = '304 Not Modified'\n\n        return cls(304, None, errors).to_json", "docstring": "Shortcut API for HTTP 304 `Not Modified` response.\n\nArgs:\nerrors (list): Response key/value data.\n\nReturns:\nWSResponse Instance.", "source": "juraj-google-style"}
{"code": "def add_candidate_peer_endpoints(self, peer_endpoints):\n        \n        with self._lock:\n            for endpoint in peer_endpoints:\n                if endpoint not in self._candidate_peer_endpoints:\n                    self._candidate_peer_endpoints.append(endpoint)", "docstring": "Adds candidate endpoints to the list of endpoints to\nattempt to peer with.\n\nArgs:\npeer_endpoints ([str]): A list of public uri's which the\nvalidator can attempt to peer with.", "source": "juraj-google-style"}
{"code": "def capture(self, payment_id, amount, data={}, **kwargs):\n        \n        url = \"{}/{}/capture\".format(self.base_url, payment_id)\n        data['amount'] = amount\n        return self.post_url(url, data, **kwargs)", "docstring": "Capture Payment for given Id\n\nArgs:\npayment_id : Id for which payment object has to be retrieved\nAmount : Amount for which the payment has to be retrieved\n\nReturns:\nPayment dict after getting captured", "source": "juraj-google-style"}
{"code": "def writegroup(self, auth, entries, defer=False):\n    return self._call('writegroup', auth, [entries], defer)", "docstring": "Writes the given values for the respective resources in the list, all writes have same\ntimestamp.\n\nArgs:\nauth: cik for authentication.\nentries: List of key, value lists. eg. [[key, value], [k,v],,,]", "source": "codesearchnet"}
{"code": "def mesh_element(script, sample_num=1000, element='VERT'):\n    if (element.lower() == 'vert'):\n        element_num = 0\n    elif (element.lower() == 'edge'):\n        element_num = 1\n    elif (element.lower() == 'face'):\n        element_num = 2\n    filter_xml = ''.join(['  <filter name=\"Mesh Element Subsampling\">\\n', '    <Param name=\"Sampling\" ', 'value=\"{:d}\" '.format(element_num), 'description=\"Element to sample:\" ', 'enum_val0=\"Vertex\" ', 'enum_val1=\"Edge\" ', 'enum_val2=\"Face\" ', 'enum_cardinality=\"3\" ', 'type=\"RichEnum\" ', '/>\\n', '    <Param name=\"SampleNum\" ', 'value=\"{:d}\" '.format(sample_num), 'description=\"Number of samples\" ', 'type=\"RichInt\" ', '/>\\n', '  </filter>\\n'])\n    util.write_filter(script, filter_xml)\n    if isinstance(script, FilterScript):\n        script.add_layer('Sampled Mesh')\n    return None", "docstring": "Create a new layer populated with a point sampling of the current mesh,\nat most one sample for each element of the mesh is created.\n\nSamples are taking in a uniform way, one for each element\n(vertex/edge/face); all the elements have the same probabilty of being\nchoosen.\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter to.\nsample_num (int): The desired number of elements that must be chosen.\nBeing a subsampling of the original elements if this number should\nnot be larger than the number of elements of the original mesh.\nelement (enum in ['VERT', 'EDGE', 'FACE']): Choose what mesh element\nwill be used for the subsampling. At most one point sample will\nbe added for each one of the chosen elements\n\nLayer stack:\nCreates new layer 'Sampled Mesh'. Current layer is changed to the new\nlayer.\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "codesearchnet"}
{"code": "def get_updates_for(self, inputs):\n    if inputs is None:\n        return [u for u in self.updates if u._unconditional_update]\n    updates = [u for u in self.updates if not u._unconditional_update]\n    inputs = nest.flatten(inputs)\n    reachable = tf_utils.get_reachable_from_inputs(inputs, updates)\n    return [u for u in updates if u in reachable]", "docstring": "Retrieves updates relevant to a specific set of inputs.\n\nArgs:\ninputs: Input tensor or list/tuple of input tensors.\n\nReturns:\nList of update ops of the layer that depend on `inputs`.", "source": "github-repos"}
{"code": "def all_tokens(self, delimiter=' '):\n        \n        tokens = set()\n\n        for label in self:\n            tokens = tokens.union(set(label.tokenized(delimiter=delimiter)))\n\n        return tokens", "docstring": "Return a list of all tokens occurring in the label-list.\n\nArgs:\ndelimiter (str): The delimiter used to split labels into tokens\n(see :meth:`audiomate.annotations.Label.tokenized`).\n\nReturns:\n:class:`set`: A set of distinct tokens.", "source": "juraj-google-style"}
{"code": "def remove_attribute(self, attribute: str) -> None:\n    attr_index = self.__attr_index(attribute)\n    if (attr_index is not None):\n        self.yaml_node.value.pop(attr_index)", "docstring": "Remove an attribute from the node.\n\nUse only if is_mapping() returns True.\n\nArgs:\nattribute: The name of the attribute to remove.", "source": "codesearchnet"}
{"code": "def get_distance(self, i, j, jimage=None):\n    return self[i].distance(self[j], jimage)", "docstring": "Get distance between site i and j assuming periodic boundary\nconditions. If the index jimage of two sites atom j is not specified it\nselects the jimage nearest to the i atom and returns the distance and\njimage indices in terms of lattice vector translations if the index\njimage of atom j is specified it returns the distance between the i\natom and the specified jimage atom.\n\nArgs:\ni (int): Index of first site\nj (int): Index of second site\njimage: Number of lattice translations in each lattice direction.\nDefault is None for nearest image.\n\nReturns:\ndistance", "source": "codesearchnet"}
{"code": "def _RegisterProcess(self, process):\n    \n    if process is None:\n      raise ValueError('Missing process.')\n\n    if process.pid in self._processes_per_pid:\n      raise KeyError(\n          'Already managing process: {0!s} (PID: {1:d})'.format(\n              process.name, process.pid))\n\n    self._processes_per_pid[process.pid] = process", "docstring": "Registers a process with the engine.\n\nArgs:\nprocess (MultiProcessBaseProcess): process.\n\nRaises:\nKeyError: if the process is already registered with the engine.\nValueError: if the process is missing.", "source": "juraj-google-style"}
{"code": "def _initialize_tensor_name_to_ids(self):\n    tensor_name_to_ids = {}\n    for (i, operation) in enumerate(self._operations):\n        for (j, tensor) in enumerate(operation.outputs):\n            tensor_name_to_ids[tensor.name] = (i, j)\n    return tensor_name_to_ids", "docstring": "Initializer for _tensor_name_to_ids.\n\nReturns:\na {string: (int, int)}, mapping the name of tensor T to the index of T's\noperation in _operations and T's index in T's operation's outputs.", "source": "codesearchnet"}
{"code": "def Verify(self):\n    if (not (self.Hash.ToBytes() == GetGenesis().Hash.ToBytes())):\n        return False\n    bc = GetBlockchain()\n    if (not bc.ContainsBlock(self.Index)):\n        return False\n    if (self.Index > 0):\n        prev_header = GetBlockchain().GetHeader(self.PrevHash.ToBytes())\n        if (prev_header is None):\n            return False\n        if ((prev_header.Index + 1) != self.Index):\n            return False\n        if (prev_header.Timestamp >= self.Timestamp):\n            return False\n    if (not Helper.VerifyScripts(self)):\n        return False\n    return True", "docstring": "Verify block using the verification script.\n\nReturns:\nbool: True if valid. False otherwise.", "source": "codesearchnet"}
{"code": "def usufyToTextExport(d, fPath=None):\n    if (d == []):\n        return '+------------------+\\n| No data found... |\\n+------------------+'\n    import pyexcel as pe\n    import pyexcel.ext.text as text\n    if (fPath == None):\n        isTerminal = True\n    else:\n        isTerminal = False\n    try:\n        oldData = get_data(fPath)\n    except:\n        oldData = {'OSRFramework': []}\n    tabularData = _generateTabularData(d, {'OSRFramework': [[]]}, True, canUnicode=False)\n    sheet = pe.Sheet(tabularData['OSRFramework'])\n    sheet.name = (('Profiles recovered (' + getCurrentStrDatetime()) + ').')\n    sheet.name_columns_by_row(0)\n    text.TABLEFMT = 'grid'\n    try:\n        with open(fPath, 'w') as oF:\n            oF.write(str(sheet))\n    except Exception as e:\n        return unicode(sheet)", "docstring": "Workaround to export to a .txt file or to show the information.\n\nArgs:\n-----\nd: Data to export.\nfPath: File path for the output file. If None was provided, it will\nassume that it has to print it.\n\nReturns:\n--------\nunicode: It sometimes returns a unicode representation of the Sheet\nreceived.", "source": "codesearchnet"}
{"code": "def _populate_from_repo(self, example: Example):\n    path = Path(example.filepath)\n    example_folder = path.parent\n    log_file_path = example_folder / self.LOGS_FILENAME\n    if log_file_path.exists():\n        example.logs = log_file_path.read_text()\n    graph_file_path = example_folder / self.GRAPH_FILENAME\n    if graph_file_path.exists():\n        example.graph = graph_file_path.read_text()\n    output_file_path = example_folder / self.OUTPUT_FILENAME\n    if output_file_path.exists():\n        example.output = output_file_path.read_text()\n    compile_output_file_path = example_folder / self.COMPILE_OUTPUT_FILENAME\n    if compile_output_file_path.exists():\n        example.compile_output = compile_output_file_path.read_text()", "docstring": "Populate fields of the example reading them from the repository.\nArgs:\nexample: beam example that should be verified", "source": "github-repos"}
{"code": "def ParseFileDownloadedRow(\n      self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = ChromeHistoryFileDownloadedEventData()\n    event_data.full_path = self._GetRowValue(query_hash, row, 'target_path')\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.received_bytes = self._GetRowValue(\n        query_hash, row, 'received_bytes')\n    event_data.total_bytes = self._GetRowValue(query_hash, row, 'total_bytes')\n    event_data.url = self._GetRowValue(query_hash, row, 'url')\n\n    timestamp = self._GetRowValue(query_hash, row, 'start_time')\n    date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a file downloaded row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def _torch_extract_fbank_features(self, waveform: 'torch.FloatTensor', audio_lengths: 'torch.Tensor', device: str='cpu') -> 'torch.FloatTensor':\n    fft_window = torch.hamming_window(self.win_length, periodic=False, device=device, dtype=torch.float64)\n    batch_size = waveform.shape[0]\n    frames = waveform.unfold(-1, self.win_length, self.hop_length)\n    if batch_size > 1:\n        frames = frames.clone()\n        to_mask_batch_idxs = torch.arange(batch_size)[audio_lengths != audio_lengths.max()]\n        if to_mask_batch_idxs.numel() > 0:\n            batch_idxs_down = (audio_lengths[to_mask_batch_idxs] - self.win_length) \n            batch_idxs_up = audio_lengths[to_mask_batch_idxs] \n            offset_idx = batch_idxs_down.min()\n            max_idx = batch_idxs_up.max()\n            mask = torch.arange(max_idx - offset_idx, device=device).expand(to_mask_batch_idxs.shape[0], -1)\n            mask = ((batch_idxs_down - offset_idx).unsqueeze(1) <= mask) & (mask < (batch_idxs_up - offset_idx).unsqueeze(1))\n            mask = mask.unsqueeze(-1).expand(-1, -1, self.win_length)\n            masked_frames = frames[to_mask_batch_idxs, offset_idx:max_idx].masked_fill_(mask, 0)\n            frames[to_mask_batch_idxs, offset_idx:max_idx] = masked_frames\n    frames_prev = torch.roll(frames, 1, dims=-1)\n    frames_prev[:, :, 0] = frames_prev[:, :, 1]\n    frames = (frames - self.preemphasis * frames_prev) * 32768\n    S = torch.fft.rfft(fft_window * frames.view(-1, self.win_length), n=self.n_fft, dim=1)\n    S = S.view(frames.shape[0], -1, S.shape[-1])\n    S = S.to(torch.complex64)\n    spec = torch.abs(S)\n    spec_power = spec ** 2\n    mel_filters = torch.from_numpy(self.mel_filters).to(device, torch.float32)\n    log_spec = torch.clamp(spec_power @ mel_filters, min=1.0)\n    log_spec = torch.log(log_spec)\n    return log_spec", "docstring": "Compute the log mel-scaled spectrogram of batched waveforms using PyTorch's FFT implementation.\n\nArgs:\nwaveform (torch.FloatTensor` of shape `(batch_size, max_audio_length)`):\nThe batched waveforms.\naudio_lengths (`torch.Tensor` of shape `(batch_size,)`):\nThe lengths of the waveforms along the max_audio_length dimension.\ndevice (`str`, *optional*, defaults to \"cpu\"):\nThe device to run the computation on. (e.g., \"cpu\", \"cuda\")\n\nReturns:\n`torch.FloatTensor` of shape `(batch_size, max_feature_length, feature_size)`:\nThe log mel-scaled spectrogram of the batched waveforms.", "source": "github-repos"}
{"code": "def detect_alias_config_change(self):\n    if self.parse_error():\n        return False\n    alias_config_sha1 = hashlib.sha1(self.alias_config_str.encode('utf-8')).hexdigest()\n    if (alias_config_sha1 != self.alias_config_hash):\n        self.alias_config_hash = alias_config_sha1\n        return True\n    return False", "docstring": "Change if the alias configuration has changed since the last run.\n\nReturns:\nFalse if the alias configuration file has not been changed since the last run.\nOtherwise, return True.", "source": "codesearchnet"}
{"code": "def while_loop(cond_fn, body_fn, inputs, num_loop_vars=None, has_accumulators=False, **kwargs):\n    if (num_loop_vars is None):\n        return WhileLoopOperation(cond_fn, body_fn, inputs, tf_kwargs=kwargs, has_accumulators=has_accumulators).outputs\n    assert (num_loop_vars > 0)\n    extra_inputs = inputs[num_loop_vars:]\n    my_vars = []\n    for (i, x) in enumerate(extra_inputs):\n        my_vars.append(get_variable(x.mesh, ('loop_var_%d' % i), x.shape, initializer=tf.zeros_initializer(), dtype=x.dtype, collections=[tf.GraphKeys.LOCAL_VARIABLES]))\n    my_vars = tuple(my_vars)\n    first_input = depend(inputs[0], [assign(var, x) for (var, x) in zip(my_vars, extra_inputs)])\n    inputs = ([first_input] + inputs[1:num_loop_vars])\n\n    def my_cond_fn(*inputs):\n        return cond_fn(*(inputs + my_vars))\n\n    def my_body_fn(*inputs):\n        outputs = tuple(body_fn(*(inputs + my_vars)))\n        extra_outputs = outputs[num_loop_vars:]\n        first_output = depend(outputs[0], [assign(var, x) for (var, x) in zip(my_vars, extra_outputs)])\n        outputs = ((first_output,) + outputs[1:num_loop_vars])\n        return outputs\n    return WhileLoopOperation(my_cond_fn, my_body_fn, inputs, tf_kwargs=kwargs, has_accumulators=has_accumulators).outputs", "docstring": "While Loop.\n\nSee comments above for WhileLoopOperation\n\nnum_loop_vars is a hack for the multi-gpu setup.  In this case, loops\nare generally slow, as all loop variables are placed on device.  By setting\nnum_loop_vars=k, then all of the loop variables except for the first k\nare handled as mtf Variables instead of loop variables, using explicit\nupdates and control dependencies.  In this case, we only return the\nfirst num_loop_vars outputs.  Do not use this option on TPU, since it\nis unnecessary and also produces incorrect results, since xla does not\nrespect control dependencies.\n\nArgs:\ncond_fn: a function from n Tensors to scalar boolean Tensor\nbody_fn: a function from n Tensors to list of n Tensors\ninputs: a list of n Tensors\nnum_loop_vars: an optional integer.\nhas_accumulators: a boolean\n**kwargs: additional kwargs passed to tf.while_loop\n\nReturns:\na list of n Tensors.", "source": "codesearchnet"}
{"code": "def is_distributed(partition_column, lower_bound, upper_bound):\n    \n    if (\n        (partition_column is not None)\n        and (lower_bound is not None)\n        and (upper_bound is not None)\n    ):\n        if upper_bound > lower_bound:\n            return True\n        else:\n            raise InvalidArguments(\"upper_bound must be greater than lower_bound.\")\n    elif (partition_column is None) and (lower_bound is None) and (upper_bound is None):\n        return False\n    else:\n        raise InvalidArguments(\n            \"Invalid combination of partition_column, lower_bound, upper_bound.\"\n            \"All these arguments should be passed (distributed) or none of them (standard pandas).\"\n        )", "docstring": "Check if is possible distribute a query given that args\n\nArgs:\npartition_column: column used to share the data between the workers\nlower_bound: the minimum value to be requested from the partition_column\nupper_bound: the maximum value to be requested from the partition_column\n\nReturns:\nTrue for distributed or False if not", "source": "juraj-google-style"}
{"code": "def experimental_write_bytecode(filename, mlir_txt):\n    pywrap_mlir.experimental_write_bytecode(filename, mlir_txt)", "docstring": "Writes an MLIR module out as bytecode.\n\nArgs:\nfilename: The filename to write to.\nmlir_txt: The MLIR module in textual format.", "source": "github-repos"}
{"code": "def read_gold_standard_file(data_dir, fileroot, encoding=None, cetr=False):\n    fname = os.path.join(data_dir, GOLD_STANDARD_DIRNAME, (fileroot + GOLD_STANDARD_EXT))\n    encodings = ((encoding,) if encoding else ('utf-8', 'utf-16', 'iso-8859-1'))\n    for encoding in encodings:\n        try:\n            with io.open(fname, mode='rt', encoding=encoding) as f:\n                gold_standard = f.read()\n            break\n        except (UnicodeDecodeError, UnicodeError):\n            gold_standard = None\n    if (not gold_standard):\n        return [u'', u'']\n    if (not cetr):\n        content_comments = RE_COMMENTS_DELIM.split(gold_standard, maxsplit=1)\n        if (len(content_comments) == 1):\n            content_comments = [content_comments[0], u'']\n    else:\n        tree = etree.fromstring(gold_standard, parser=etree.HTMLParser())\n        content_comments = [u' '.join(text_from_subtree(tree)), u'']\n    content_comments = [ftfy.fix_encoding(content_comments[0]).strip(), ftfy.fix_encoding(content_comments[1]).strip()]\n    return content_comments", "docstring": "Read the gold standard content file corresponding to identifier ``fileroot``\nin the gold standard directory below the root ``data_dir``.\n\nArgs:\ndata_dir (str)\nfileroot (str)\nencoding (str)\ncetr (bool): if True, assume no comments and parse the gold standard\nto remove tags\n\nReturns:\nList[str, str]: contents string and comments string, respectively", "source": "codesearchnet"}
{"code": "def AsJsonString(self):\n    return json.dumps(self.AsDict(dt=False), sort_keys=True)", "docstring": "A JSON string representation of this User instance.\n\nReturns:\nA JSON string representation of this User instance", "source": "codesearchnet"}
{"code": "def local_symbol_table(imports=None, symbols=()):\n    \n    return SymbolTable(\n        table_type=LOCAL_TABLE_TYPE,\n        symbols=symbols,\n        imports=imports\n    )", "docstring": "Constructs a local symbol table.\n\nArgs:\nimports (Optional[SymbolTable]): Shared symbol tables to import.\nsymbols (Optional[Iterable[Unicode]]): Initial local symbols to add.\n\nReturns:\nSymbolTable: A mutable local symbol table with the seeded local symbols.", "source": "juraj-google-style"}
{"code": "def print_tools(self, buf=sys.stdout, verbose=False, context_name=None):\n        \n        def _get_row(entry):\n            context_name_ = entry[\"context_name\"]\n            tool_alias = entry[\"tool_alias\"]\n            tool_name = entry[\"tool_name\"]\n            properties = []\n            col = None\n\n            variant = entry[\"variant\"]\n            if isinstance(variant, set):\n                properties.append(\"(in conflict)\")\n                col = critical\n                if verbose:\n                    package = \", \".join(x.qualified_package_name for x in variant)\n                else:\n                    v = iter(variant).next()\n                    package = \"%s (+%d more)\" % (v.qualified_package_name,\n                                                 len(variant) - 1)\n            else:\n                package = variant.qualified_package_name\n\n            if tool_name == tool_alias:\n                tool_name = \"-\"\n            else:\n                properties.append(\"(aliased)\")\n                if col is None:\n                    col = alias_col\n\n            msg = \" \".join(properties)\n            row = [tool_alias, tool_name, package, context_name_, msg]\n            return row, col\n\n        if context_name:\n            self._context(context_name)  \n            context_names = [context_name]\n        else:\n            context_names = sorted(self.contexts.iterkeys())\n\n        rows = [[\"TOOL\", \"ALIASING\", \"PACKAGE\", \"CONTEXT\", \"\"],\n                [\"----\", \"--------\", \"-------\", \"-------\", \"\"]]\n        colors = [None, None]\n\n        entries_dict = defaultdict(list)\n        for d in self.get_tools().itervalues():\n            entries_dict[d[\"context_name\"]].append(d)\n\n        if verbose:\n            \n            for d in self.hidden_tools:\n                d_ = d.copy()\n                d_[\"hidden\"] = True\n                entries_dict[d[\"context_name\"]].append(d_)\n\n            \n            for docs in self.tool_conflicts.itervalues():\n                for d in docs:\n                    d_ = d.copy()\n                    d_[\"conflicting\"] = True\n                    entries_dict[d[\"context_name\"]].append(d_)\n\n        for i, context_name in enumerate(context_names):\n            entries = entries_dict.get(context_name, [])\n            if entries:\n                if i:\n                    rows.append(('', '', '', '', ''))\n                    colors.append(None)\n\n                entries = sorted(entries, key=lambda x: x[\"tool_alias\"].lower())\n                for entry in entries:\n                    row, col = _get_row(entry)\n                    if \"hidden\" in entry:\n                        row[-1] = \"(hidden)\"\n                        rows.append(row)\n                        colors.append(warning)\n                    elif \"conflicting\" in entry:\n                        row[-1] = \"(not visible)\"\n                        rows.append(row)\n                        colors.append(warning)\n                    else:\n                        rows.append(row)\n                        colors.append(col)\n\n        if rows:\n            _pr = Printer(buf)\n            for col, line in zip(colors, columnise(rows)):\n                _pr(line, col)\n        else:\n            _pr(\"No tools available.\")", "docstring": "Print table of tools available in the suite.\n\nArgs:\ncontext_name (str): If provided, only print the tools from this\ncontext.", "source": "juraj-google-style"}
{"code": "def parse_section_links(self, section_title):\n    soup = BeautifulSoup(self.html, 'html.parser')\n    headlines = soup.find_all('span', {'class': 'mw-headline'})\n    tmp_soup = BeautifulSoup(section_title, 'html.parser')\n    tmp_sec_title = tmp_soup.get_text().lower()\n    id_tag = None\n    for headline in headlines:\n        tmp_id = headline.text\n        if (tmp_id.lower() == tmp_sec_title):\n            id_tag = headline.get('id')\n            break\n    if (id_tag is not None):\n        return self._parse_section_links(id_tag)\n    return None", "docstring": "Parse all links within a section\n\nArgs:\nsection_title (str): Name of the section to pull\nReturns:\nlist: List of (title, url) tuples\nNote:\nReturns **None** if section title is not found\nNote:\nSide effect is to also pull the html which can be slow\nNote:\nThis is a parsing operation and not part of the standard API", "source": "codesearchnet"}
{"code": "def _filter_returned_ops(fn):\n    returned_ops = {}\n\n    def wrap_and_filter_returned_ops(*args, **kwargs):\n        outputs = fn(*args, **kwargs)\n        flat_outputs = nest.flatten(outputs)\n        for n in range(len(flat_outputs)):\n            output = flat_outputs[n]\n            if isinstance(output, ops.Operation):\n                returned_ops[n] = output\n                flat_outputs[n] = None\n        return nest.pack_sequence_as(outputs, flat_outputs)\n    return (wrap_and_filter_returned_ops, returned_ops)", "docstring": "Filtering out any ops returned by function.\n\nArgs:\nfn: a function\n\nReturns:\nA tuple of (\nWrapped function that returns `None` in place of any ops,\ndict that maps the index in the flat output structure to the returned op\n)", "source": "github-repos"}
{"code": "def seek(self, n):\n    if (self._mode != 'r'):\n        raise UnsupportedOperation(\"not available in 'w' mode\")\n    if (0 <= n < self._nb_markers):\n        self._n = n\n        self._bed.seek(self._get_seek_position(n))\n    else:\n        raise ValueError('invalid position in BED: {}'.format(n))", "docstring": "Gets to a certain marker position in the BED file.\n\nArgs:\nn (int): The index of the marker to seek to.", "source": "codesearchnet"}
{"code": "def receive(self, sequence, args):\n        \n\n        \n        if not self._reorder:\n            self._callback(*args)\n            return\n\n        \n        if self._next_expected is not None and sequence < self._next_expected:\n            print(\"Dropping out of order packet, seq=%d\" % sequence)\n            return\n\n        self._out_of_order.append((sequence, args))\n        self._out_of_order.sort(key=lambda x: x[0])\n\n        \n        while len(self._out_of_order) > 0:\n            seq, args = self._out_of_order[0]\n\n            if self._next_expected is not None and seq != self._next_expected:\n                return\n\n            self._callback(*args)\n            self._out_of_order.pop(0)\n            self._next_expected = seq+1", "docstring": "Receive one packet\n\nIf the sequence number is one we've already seen before, it is dropped.\n\nIf it is not the next expected sequence number, it is put into the\n_out_of_order queue to be processed once the holes in sequence number\nare filled in.\n\nArgs:\nsequence (int): The sequence number of the received packet\nargs (list): The list of packet contents that will be passed to callback\nas callback(*args)", "source": "juraj-google-style"}
{"code": "def depth(self):\n    if (self._depth_cache is not None):\n        return self._depth_cache\n    (depth, node) = (1, self)\n    while (node.package is not None):\n        depth += 1\n        node = node.package\n    self._depth_cache = depth\n    return depth", "docstring": "Property to tell the depth of the node in the tree.\n\nReturns:\nint: the node's depth in the tree.", "source": "codesearchnet"}
{"code": "def _update_exit_code_from_error(self, error):\n        \n        for error_type, exit_code in self.ERROR_CODE_MAP.items():\n            if isinstance(error, error_type):\n                self.update_exit_code(exit_code)\n                break\n        else:\n            self.update_exit_code(ExitStatus.generic_error)", "docstring": "Set the exit code based on the error type.\n\nArgs:\nerror (:class:`Exception`): An exception instance.", "source": "juraj-google-style"}
{"code": "async def send_heartbeat(self, name):\n        \n\n        await self.send_command(OPERATIONS.CMD_HEARTBEAT, {'name': name},\n                                MESSAGES.HeartbeatResponse, timeout=5.0)", "docstring": "Send a heartbeat for a service.\n\nArgs:\nname (string): The name of the service to send a heartbeat for", "source": "juraj-google-style"}
{"code": "def _checkMode(mode):\n    \n\n    if not isinstance(mode, str):\n        raise TypeError('The {0} should be a string. Given: {1!r}'.format(\"mode\", mode))\n\n    if mode not in [MODE_RTU, MODE_ASCII]:\n        raise ValueError(\"Unreconized Modbus mode given. Must be 'rtu' or 'ascii' but {0!r} was given.\".format(mode))", "docstring": "Check that the Modbus mode is valie.\n\nArgs:\nmode (string): The Modbus mode (MODE_RTU or MODE_ASCII)\n\nRaises:\nTypeError, ValueError", "source": "juraj-google-style"}
{"code": "def _get_bonds(self, mol):\n    num_atoms = len(mol)\n    if self.ignore_ionic_bond:\n        covalent_atoms = [i for i in range(num_atoms) if (mol.species[i].symbol not in self.ionic_element_list)]\n    else:\n        covalent_atoms = list(range(num_atoms))\n    all_pairs = list(itertools.combinations(covalent_atoms, 2))\n    pair_dists = [mol.get_distance(*p) for p in all_pairs]\n    elements = mol.composition.as_dict().keys()\n    unavailable_elements = list((set(elements) - set(self.covalent_radius.keys())))\n    if (len(unavailable_elements) > 0):\n        raise ValueError('The covalent radius for element {} is not available'.format(unavailable_elements))\n    bond_13 = self.get_13_bonds(self.priority_bonds)\n    max_length = [(((self.covalent_radius[mol.sites[p[0]].specie.symbol] + self.covalent_radius[mol.sites[p[1]].specie.symbol]) * (1 + (self.priority_cap if (p in self.priority_bonds) else (self.bond_length_cap if (p not in bond_13) else self.bond_13_cap)))) * (0.1 if (self.ignore_halogen_self_bond and (p not in self.priority_bonds) and (mol.sites[p[0]].specie.symbol in self.halogen_list) and (mol.sites[p[1]].specie.symbol in self.halogen_list)) else 1.0)) for p in all_pairs]\n    bonds = [bond for (bond, dist, cap) in zip(all_pairs, pair_dists, max_length) if (dist <= cap)]\n    return bonds", "docstring": "Find all the bond in a molcule\n\nArgs:\nmol: the molecule. pymatgen Molecule object\n\nReturns:\nList of tuple. Each tuple correspond to a bond represented by the\nid of the two end atoms.", "source": "codesearchnet"}
{"code": "def VerifyStructure(self, parser_mediator, line):\n    \n    \n    \n    self._line_structures = self.LINE_STRUCTURES\n\n    self._day_of_month = None\n    self._month = None\n    self._year = None\n\n    \n    \n    if self._SIGNATURE in line:\n      return True\n\n    return False", "docstring": "Verify that this file is an IIS log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between\nparsers and other components, such as storage and dfvfs.\nline (str): line from a text file.\n\nReturns:\nbool: True if the line was successfully parsed.", "source": "juraj-google-style"}
{"code": "def _powerset(iterable):\n    s = list(iterable)\n    return itertools.chain.from_iterable((itertools.combinations(s, r) for r in range(len(s) + 1)))", "docstring": "Helper for generating all possible reduction_axes arguments.\n\nExample: powerset([0,1,2]): () (0,) (1,) (2,) (0,1) (0,2) (1,2) (0,1,2)\n\nArgs:\niterable: An iterable of items to generate the powerset of.\n\nReturns:\nThe powerset of all items in iterable.", "source": "github-repos"}
{"code": "def get_hash(self):\n    if (self._hash is None):\n        self._hash = self._source.get_hash(self._handle).strip()\n    return self._hash", "docstring": "Returns the associated hash for this template version\n\nReturns:\nstr: Hash for this version", "source": "codesearchnet"}
{"code": "def relu_layer(x, weights, biases, name=None):\n    with ops.name_scope(name, 'relu_layer', [x, weights, biases]) as name:\n        x = ops.convert_to_tensor(x, name='x')\n        weights = ops.convert_to_tensor(weights, name='weights')\n        biases = ops.convert_to_tensor(biases, name='biases')\n        xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases)\n        return nn_ops.relu(xw_plus_b, name=name)", "docstring": "Computes Relu(x * weight + biases).\n\nArgs:\nx: a 2D tensor.  Dimensions typically: batch, in_units\nweights: a 2D tensor.  Dimensions typically: in_units, out_units\nbiases: a 1D tensor.  Dimensions: out_units\nname: A name for the operation (optional).  If not specified\n\"nn_relu_layer\" is used.\n\nReturns:\nA 2-D Tensor computing relu(matmul(x, weights) + biases).\nDimensions typically: batch, out_units.", "source": "github-repos"}
{"code": "def run_op_benchmark(self, op, iters=1, warmup=True, session_config=None):\n    if context.executing_eagerly():\n        return self._run_eager_benchmark(iterable=op, iters=iters, warmup=warmup)\n    return self._run_graph_benchmark(iterable=op, iters=iters, warmup=warmup, session_config=session_config)", "docstring": "Benchmarks the op.\n\nRuns the op `iters` times. In each iteration, the benchmark measures\nthe time it takes to go execute the op.\n\nArgs:\nop: The tf op to benchmark.\niters: Number of times to repeat the timing.\nwarmup: If true, warms up the session caches by running an untimed run.\nsession_config: A ConfigProto protocol buffer with configuration options\nfor the session. Applicable only for benchmarking in graph mode.\n\nReturns:\nA float, representing the per-execution wall time of the op in seconds.\nThis is the median time (with respect to `iters`) it takes for the op\nto be executed `iters` num of times.", "source": "github-repos"}
{"code": "def _create_variables(self, num_clusters):\n    init_value = array_ops.placeholder_with_default([], shape=None)\n    cluster_centers = variable_v1.VariableV1(init_value, name=CLUSTERS_VAR_NAME, validate_shape=False)\n    cluster_centers_initialized = variable_v1.VariableV1(False, dtype=dtypes.bool, name='initialized')\n    if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:\n        cluster_centers_updated = variable_v1.VariableV1(init_value, name='clusters_updated', validate_shape=False)\n        update_in_steps = variable_v1.VariableV1(self._mini_batch_steps_per_iteration, dtype=dtypes.int64, name='update_in_steps')\n        cluster_counts = variable_v1.VariableV1(array_ops.zeros([num_clusters], dtype=dtypes.int64))\n    else:\n        cluster_centers_updated = cluster_centers\n        update_in_steps = None\n        cluster_counts = variable_v1.VariableV1(array_ops.ones([num_clusters], dtype=dtypes.int64)) if self._use_mini_batch else None\n    return (cluster_centers, cluster_centers_initialized, cluster_counts, cluster_centers_updated, update_in_steps)", "docstring": "Creates variables.\n\nArgs:\nnum_clusters: an integer Tensor providing the number of clusters.\n\nReturns:\nTuple with following elements:\n- cluster_centers: a Tensor for storing cluster centers\n- cluster_centers_initialized: bool Variable indicating whether clusters\nare initialized.\n- cluster_counts: a Tensor for storing counts of points assigned to this\ncluster. This is used by mini-batch training.\n- cluster_centers_updated: Tensor representing copy of cluster centers\nthat are updated every step.\n- update_in_steps: numbers of steps left before we sync\ncluster_centers_updated back to cluster_centers.", "source": "github-repos"}
{"code": "def _source_is_newer(src_fs, src_path, dst_fs, dst_path):\n    try:\n        if dst_fs.exists(dst_path):\n            namespace = ('details', 'modified')\n            src_modified = src_fs.getinfo(src_path, namespace).modified\n            if (src_modified is not None):\n                dst_modified = dst_fs.getinfo(dst_path, namespace).modified\n                return ((dst_modified is None) or (src_modified > dst_modified))\n        return True\n    except FSError:\n        return True", "docstring": "Determine if source file is newer than destination file.\n\nArguments:\nsrc_fs (FS): Source filesystem (instance or URL).\nsrc_path (str): Path to a file on the source filesystem.\ndst_fs (FS): Destination filesystem (instance or URL).\ndst_path (str): Path to a file on the destination filesystem.\n\nReturns:\nbool: `True` if the source file is newer than the destination\nfile or file modification time cannot be determined, `False`\notherwise.", "source": "codesearchnet"}
{"code": "def get_config(self):\n    all_args = tf_inspect.getfullargspec(self.__init__).args\n    config = {'name': self.name, 'trainable': self.trainable}\n    if hasattr(self, '_batch_input_shape'):\n        config['batch_input_shape'] = self._batch_input_shape\n    config['dtype'] = policy.serialize(self._dtype_policy)\n    if hasattr(self, 'dynamic'):\n        if self.dynamic:\n            config['dynamic'] = self.dynamic\n        elif 'dynamic' in all_args:\n            all_args.remove('dynamic')\n    expected_args = config.keys()\n    extra_args = [arg for arg in all_args if arg not in expected_args]\n    if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'):\n        raise NotImplementedError('Layer %s has arguments in `__init__` and therefore must override `get_config`.' % self.__class__.__name__)\n    return config", "docstring": "Returns the config of the layer.\n\nA layer config is a Python dictionary (serializable)\ncontaining the configuration of a layer.\nThe same layer can be reinstantiated later\n(without its trained weights) from this configuration.\n\nThe config of a layer does not include connectivity\ninformation, nor the layer class name. These are handled\nby `Network` (one layer of abstraction above).\n\nNote that `get_config()` does not guarantee to return a fresh copy of dict\nevery time it is called. The callers should make a copy of the returned dict\nif they want to modify it.\n\nReturns:\nPython dictionary.", "source": "github-repos"}
{"code": "async def import_image(self, data, stream: bool=False):\n    headers = {'Content-Type': 'application/x-tar'}\n    response = (await self.docker._query_chunked_post('images/load', 'POST', data=data, headers=headers))\n    return (await json_stream_result(response, stream=stream))", "docstring": "Import tarball of image to docker.\n\nArgs:\ndata: tarball data of image to be imported\n\nReturns:\nTarball of the image", "source": "codesearchnet"}
{"code": "def initialize_or_restore(self, session=None):\n    if context.executing_eagerly():\n        return\n    if session is None:\n        session = get_session()\n    trackable_objects = util.list_objects(self._object_graph_view)\n    initializers = [c.initializer for c in trackable_objects if hasattr(c, 'initializer') and c.initializer is not None and (getattr(c, '_update_uid', self._restore_uid - 1) < self._restore_uid)]\n    session.run(initializers)", "docstring": "Runs initialization ops for variables.\n\nObjects which would be saved by `Saver.save` will be initialized, unless\nthose variables are being restored by a later call to\n`tf.train.Checkpoint.restore()`.\n\nThis method does nothing when executing eagerly (initializers get run\neagerly).\n\nArgs:\nsession: The session to run initialization ops in. If `None`, uses the\ndefault session.", "source": "github-repos"}
{"code": "def handle_run_exception(self, pipeline_key, pipeline_func, e):\n    \n    if isinstance(e, Retry):\n      retry_message = str(e)\n      logging.warning('User forced retry for pipeline ID \"%s\" of %r: %s',\n                      pipeline_key.name(), pipeline_func, retry_message)\n      self.transition_retry(pipeline_key, retry_message)\n    elif isinstance(e, Abort):\n      abort_message = str(e)\n      logging.warning('User forced abort for pipeline ID \"%s\" of %r: %s',\n                      pipeline_key.name(), pipeline_func, abort_message)\n      pipeline_func.abort(abort_message)\n    else:\n      retry_message = '%s: %s' % (e.__class__.__name__, str(e))\n      logging.exception('Generator %r\n                        pipeline_func, pipeline_key.name(), retry_message)\n      self.transition_retry(pipeline_key, retry_message)\n\n    return pipeline_func.task_retry", "docstring": "Handles an exception raised by a Pipeline's user code.\n\nArgs:\npipeline_key: The pipeline that raised the error.\npipeline_func: The class path name of the Pipeline that was running.\ne: The exception that was raised.\n\nReturns:\nTrue if the exception should be re-raised up through the calling stack\nby the caller of this method.", "source": "juraj-google-style"}
{"code": "def create_dir(path):\n    \n    full_path = abs_path(path)\n    if not os.path.exists(full_path):\n        try:\n            os.makedirs(full_path)\n        except OSError as e:\n            \n            if e.errno != os.errno.EEXIST:\n                raise", "docstring": "Creates a directory if it does not exist already.\n\nArgs:\npath: The path of the directory to create.", "source": "juraj-google-style"}
{"code": "def process_event(self, event_name: str, data: dict) -> None:\n        \n        if event_name == \"after_epoch\":\n            self.epochs_done = data[\"epochs_done\"]\n            self.batches_seen = data[\"batches_seen\"]\n            self.train_examples_seen = data[\"train_examples_seen\"]\n        return", "docstring": "Process event after epoch\nArgs:\nevent_name: whether event is send after epoch or batch.\nSet of values: ``\"after_epoch\", \"after_batch\"``\ndata: event data (dictionary)\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def parse_transcripts(transcript_lines):\n    LOG.info('Parsing transcripts')\n    if isinstance(transcript_lines, DataFrame):\n        transcripts = parse_ensembl_transcript_request(transcript_lines)\n    else:\n        transcripts = parse_ensembl_transcripts(transcript_lines)\n    parsed_transcripts = {}\n    for tx in transcripts:\n        tx_id = tx['ensembl_transcript_id']\n        ens_gene_id = tx['ensembl_gene_id']\n        if (not (tx_id in parsed_transcripts)):\n            tx_info = {'chrom': tx['chrom'], 'transcript_start': tx['transcript_start'], 'transcript_end': tx['transcript_end'], 'mrna': set(), 'mrna_predicted': set(), 'nc_rna': set(), 'ensembl_gene_id': ens_gene_id, 'ensembl_transcript_id': tx_id}\n            parsed_transcripts[tx_id] = tx_info\n        tx_info = parsed_transcripts[tx_id]\n        if tx.get('refseq_mrna_predicted'):\n            tx_info['mrna_predicted'].add(tx['refseq_mrna_predicted'])\n        if tx.get('refseq_mrna'):\n            tx_info['mrna'].add(tx['refseq_mrna'])\n        if tx.get('refseq_ncrna'):\n            tx_info['nc_rna'].add(tx['refseq_ncrna'])\n    return parsed_transcripts", "docstring": "Parse and massage the transcript information\n\nThere could be multiple lines with information about the same transcript.\nThis is why it is necessary to parse the transcripts first and then return a dictionary\nwhere all information has been merged.\n\nArgs:\ntranscript_lines(): This could be an iterable with strings or a pandas.DataFrame\n\nReturns:\nparsed_transcripts(dict): Map from enstid -> transcript info", "source": "codesearchnet"}
{"code": "def compose_path(pub, uuid_url=False):\n    if uuid_url:\n        return join('/', UUID_DOWNLOAD_KEY, str(pub.uuid))\n    return join('/', DOWNLOAD_KEY, basename(pub.file_pointer), basename(pub.filename))", "docstring": "Compose absolute path for given `pub`.\n\nArgs:\npub (obj): :class:`.DBPublication` instance.\nuuid_url (bool, default False): Compose URL using UUID.\n\nReturns:\nstr: Absolute url-path of the publication, without server's address \\\nand protocol.\n\nRaises:\nPrivatePublicationError: When the `pub` is private publication.", "source": "codesearchnet"}
{"code": "def feedforward(inputs, num_units, scope='multihead_attention'):\n    with tf.variable_scope(scope):\n        params = {'inputs': inputs, 'filters': num_units[0], 'kernel_size': 1, 'activation': tf.nn.relu, 'use_bias': True}\n        outputs = tf.layers.conv1d(**params)\n        params = {'inputs': outputs, 'filters': num_units[1], 'kernel_size': 1, 'activation': None, 'use_bias': True}\n        outputs = tf.layers.conv1d(**params)\n        outputs += inputs\n        outputs = normalize(outputs)\n    return outputs", "docstring": "Point-wise feed forward net.\n\nArgs:\ninputs: A 3d tensor with shape of [N, T, C].\nnum_units: A list of two integers.\nscope: Optional scope for `variable_scope`.\nreuse: Boolean, whether to reuse the weights of a previous layer\nby the same name.\n\nReturns:\nA 3d tensor with the same shape and dtype as inputs", "source": "codesearchnet"}
{"code": "def create_local_copy(self, effects=None, store=None):\n    effects = self._build_effects(effects)\n    store = (store or '')\n    data = {'source': self.cdn_path(effects)}\n    if store:\n        data['store'] = store\n    return rest_request('POST', 'files/', data=data)", "docstring": "Creates a Local File Copy on Uploadcare Storage.\n\nArgs:\n- effects:\nAdds CDN image effects. If ``self.default_effects`` property\nis set effects will be combined with default effects.\n- store:\nIf ``store`` option is set to False the copy of your file will\nbe deleted in 24 hour period after the upload.\nWorks only if `autostore` is enabled in the project.", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    file_size = file_object.get_size()\n\n    file_header_map = self._GetDataTypeMap('rp_log_file_header')\n\n    try:\n      file_header, _ = self._ReadStructureFromFileObject(\n          file_object, 0, file_header_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.UnableToParseFile(\n          'Unable to parse file header with error: {0!s}'.format(\n              exception))\n\n    file_footer_map = self._GetDataTypeMap('rp_log_file_footer')\n\n    file_footer_offset = file_size - file_footer_map.GetByteSize()\n\n    try:\n      file_footer, _ = self._ReadStructureFromFileObject(\n          file_object, file_footer_offset, file_footer_map)\n    except (ValueError, errors.ParseError) as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to parse file footer with error: {0!s}'.format(exception))\n      return\n\n    \n    \n    description = file_header.description.rstrip('\\0')\n\n    if file_footer.creation_time == 0:\n      date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n    else:\n      date_time = dfdatetime_filetime.Filetime(\n          timestamp=file_footer.creation_time)\n\n    event_data = RestorePointEventData()\n    event_data.description = description\n    event_data.restore_point_event_type = file_header.event_type\n    event_data.restore_point_type = file_header.restore_point_type\n    event_data.sequence_number = file_header.sequence_number\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_CREATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a Windows Restore Point (rp.log) log file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def _maybe_name(obj) -> str:\n    if obj is None:\n        return 'None'\n    elif hasattr(obj, 'name'):\n        return obj.name\n    else:\n        return '<no name for %s>' % type(obj)", "docstring": "Returns object name if it has one, or a message otherwise.\n\nThis is useful for names that apper in error messages.\nArgs:\nobj: Object to get the name of.\nReturns:\nname, \"None\", or a \"no name\" message.", "source": "github-repos"}
{"code": "def count(self) -> 'Builder':\n    return self._to_builder(_evaluation.CountFunction(self.node.context, self.node, []))", "docstring": "The FHIRPath count() function.\n\nReturns:\nAn expression that evaluates to the count of items in the parent.", "source": "github-repos"}
{"code": "def _generate_schedule(self, cpn_frequency, roll_convention):\n    if self._first_coupon_date is None and self._penultimate_coupon_date is None:\n        cpn_dates = dates.PeriodicSchedule(start_date=self._start_date, end_date=self._end_date, tenor=cpn_frequency, roll_convention=roll_convention).dates()\n        is_regular_cpn = tf.constant(True, dtype=bool, shape=cpn_dates[:, :-1].shape)\n    elif self._first_coupon_date is not None:\n        cpn_dates = dates.PeriodicSchedule(start_date=self._first_coupon_date, end_date=self._end_date, tenor=cpn_frequency, roll_convention=roll_convention).dates()\n        cpn_dates = dates.DateTensor.concat([self._start_date.expand_dims(-1), cpn_dates], axis=1)\n        is_irregular_cpn = tf.constant(False, dtype=bool, shape=self._start_date.shape)\n        is_regular_cpn = tf.concat([tf.expand_dims(is_irregular_cpn, axis=-1), tf.constant(True, dtype=bool, shape=cpn_dates[:, :-2].shape)], axis=1)\n    else:\n        cpn_dates = dates.PeriodicSchedule(start_date=self._start_date, end_date=self._penultimate_coupon_date, backward=True, tenor=cpn_frequency, roll_convention=roll_convention).dates()\n        cpn_dates = dates.DateTensor.concat([cpn_dates, self._end_date.expand_dims(-1)], axis=1)\n        is_irregular_cpn = tf.constant(False, dtype=bool, shape=self._end_date.shape)\n        is_regular_cpn = tf.concat([tf.constant(True, dtype=bool, shape=cpn_dates[:, :-2].shape), tf.expand_dims(is_irregular_cpn, axis=-1)], axis=1)\n    return (cpn_dates, is_regular_cpn)", "docstring": "Method to generate coupon dates.\n\nArgs:\ncpn_frequency: A `PeriodTensor` specifying the frequency of coupon\npayments.\nroll_convention: Scalar of type `BusinessDayConvention` specifying how\ndates are rolled if they fall on holidays.\n\nReturns:\nA tuple containing the generated date schedule and a boolean `Tensor`\nof the same shape as the schedule specifying whether the coupons are\nregular coupons.", "source": "github-repos"}
{"code": "def __init__(self, resolver_context):\n    \n    super(CPIOFile, self).__init__(resolver_context)\n    self._cpio_archive_file = None\n    self._cpio_archive_file_entry = None\n    self._current_offset = 0\n    self._file_system = None\n    self._size = 0", "docstring": "Initializes a file-like object.\n\nArgs:\nresolver_context (Context): resolver context.", "source": "juraj-google-style"}
{"code": "def verify_profile_name(msg, cfg):\n    if (msg.profile not in cfg.data):\n        raise UnknownProfileError(msg.profile)", "docstring": "Verifies the profile name exists in the config.json file.\n\nArgs:\n:msg: (Message class) an instance of a message class.\n:cfg: (jsonconfig.Config) config instance.", "source": "codesearchnet"}
{"code": "def compute_bleu_summaries(hook_args):\n    decode_hparams = hook_args.decode_hparams\n    if (not (decode_hparams.decode_reference and decode_hparams.decode_to_file)):\n        return None\n    values = []\n    bleu = (100 * bleu_hook.bleu_wrapper(decode_hparams.decode_reference, decode_hparams.decode_to_file))\n    values.append(tf.Summary.Value(tag='BLEU', simple_value=bleu))\n    tf.logging.info(('%s: BLEU = %6.2f' % (decode_hparams.decode_to_file, bleu)))\n    if hook_args.hparams.mlperf_mode:\n        current_step = decode_hparams.mlperf_decode_step\n        mlperf_log.transformer_print(key=mlperf_log.EVAL_TARGET, value=decode_hparams.mlperf_threshold)\n        mlperf_log.transformer_print(key=mlperf_log.EVAL_ACCURACY, value={'epoch': max(((current_step \n        mlperf_log.transformer_print(key=mlperf_log.EVAL_STOP)\n    if (bleu >= decode_hparams.mlperf_threshold):\n        decode_hparams.set_hparam('mlperf_success', True)\n    return values", "docstring": "Compute BLEU core summaries using the decoder output.\n\nArgs:\nhook_args: DecodeHookArgs namedtuple\nReturns:\nA list of tf.Summary values if hook_args.hparams contains the\nreference file and the translated file.", "source": "codesearchnet"}
{"code": "def from_api_repr(cls, resource):\n        \n        ref = cls()\n        ref._proto = json_format.ParseDict(resource, types.ModelReference())\n        return ref", "docstring": "Factory:  construct a model reference given its API representation\n\nArgs:\nresource (Dict[str, object]):\nModel reference representation returned from the API\n\nReturns:\ngoogle.cloud.bigquery.model.ModelReference:\nModel reference parsed from ``resource``.", "source": "juraj-google-style"}
{"code": "def single_qubit_matrix_to_phased_x_z(mat: np.ndarray, atol: float=0) -> List[ops.SingleQubitGate]:\n    (xy_turn, xy_phase_turn, total_z_turn) = _deconstruct_single_qubit_matrix_into_gate_turns(mat)\n    result = [ops.PhasedXPowGate(exponent=(2 * xy_turn), phase_exponent=(2 * xy_phase_turn)), (ops.Z ** (2 * total_z_turn))]\n    result = [g for g in result if (protocols.trace_distance_bound(g) > atol)]\n    if ((len(result) == 2) and (abs(xy_turn) >= (0.5 - atol))):\n        return [ops.PhasedXPowGate(phase_exponent=((2 * xy_phase_turn) + total_z_turn))]\n    return result", "docstring": "Implements a single-qubit operation with a PhasedX and Z gate.\n\nIf one of the gates isn't needed, it will be omitted.\n\nArgs:\nmat: The 2x2 unitary matrix of the operation to implement.\natol: A limit on the amount of error introduced by the\nconstruction.\n\nReturns:\nA list of gates that, when applied in order, perform the desired\noperation.", "source": "codesearchnet"}
{"code": "def interface_required(interface):\n\n    def _interface_required(func):\n        'Internal decorator that wraps around the decorated function.\\n\\n            Args:\\n              func (function): function being decorated\\n\\n            Returns:\\n              The wrapper function.\\n            '\n\n        @functools.wraps(func)\n        def wrapper(self, *args, **kwargs):\n            'Wrapper function to check that the given ``JLink`` has the\\n                same interface as the one specified by the decorator.\\n\\n                Args:\\n                  self (JLink): the ``JLink`` instance\\n                  args: list of arguments to pass to ``func``\\n                  kwargs: key-word arguments dict to pass to ``func``\\n\\n                Returns:\\n                  The return value of the wrapped function.\\n\\n                Raises:\\n                  JLinkException: if the current interface is not supported by\\n                      the wrapped method.\\n                '\n            if (self.tif != interface):\n                raise errors.JLinkException('Unsupported for current interface.')\n            return func(self, *args, **kwargs)\n        return wrapper\n    return _interface_required", "docstring": "Decorator to specify that a particular interface type is required\nfor the given method to be used.\n\nArgs:\ninterface (int): attribute of ``JLinkInterfaces``\n\nReturns:\nA decorator function.", "source": "codesearchnet"}
{"code": "def write(self):\n    none_type = type(None)\n    attrs = self.attribute_string()\n    for attr in self.__dict__.keys():\n        if (type(attr) == none_type):\n            setattr(self, attr, '.')\n    fstr = '{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}\\t{8}{9}'.format(self.seqid, self.source, self.type, str(self.start), str(self.end), self._score_str, self.strand, self.phase, attrs, os.linesep)\n    return fstr", "docstring": "Restore GFF3 entry to original format\n\nReturns:\nstr: properly formatted string containing the GFF3 entry", "source": "codesearchnet"}
{"code": "def find_template_filename(self, template_name):\n        \n\n        def next_file():\n            filename = self.path / template_name\n            yield filename\n            try:\n                exts = self.default_file_extensions\n            except AttributeError:\n                return\n\n            strfilename = str(filename)\n            for ext in exts:\n                yield Path(strfilename + ext)\n\n        for filename in next_file():\n            if filename.is_file():\n                return filename", "docstring": "Searches for a file matching the given template name.\n\nIf found, this method returns the pathlib.Path object of the found\ntemplate file.\n\nArgs:\ntemplate_name (str): Name of the template, with or without a file\nextension.\n\nReturns:\npathlib.Path: Path to the matching filename.", "source": "juraj-google-style"}
{"code": "def configs_for_writer(writer=None, ppp_config_dir=None):\n    search_paths = ((ppp_config_dir,) if ppp_config_dir else tuple())\n    if (writer is not None):\n        if (not isinstance(writer, (list, tuple))):\n            writer = [writer]\n        config_files = [(w if w.endswith('.yaml') else (w + '.yaml')) for w in writer]\n    else:\n        writer_configs = glob_config(os.path.join('writers', '*.yaml'), *search_paths)\n        config_files = set(writer_configs)\n    for config_file in config_files:\n        config_basename = os.path.basename(config_file)\n        writer_configs = config_search_paths(os.path.join('writers', config_basename), *search_paths)\n        if (not writer_configs):\n            LOG.warning(\"No writer configs found for '%s'\", writer)\n            continue\n        (yield writer_configs)", "docstring": "Generator of writer configuration files for one or more writers\n\nArgs:\nwriter (Optional[str]): Yield configs only for this writer\nppp_config_dir (Optional[str]): Additional configuration directory\nto search for writer configuration files.\n\nReturns: Generator of lists of configuration files", "source": "codesearchnet"}
{"code": "def read_byte(self, do_ord=True) -> int:\n        \n        try:\n            if do_ord:\n                return ord(self.stream.read(1))\n            else:\n                return self.stream.read(1)\n        except Exception as e:\n            raise SDKException(ErrorCode.read_byte_error(e.args[0]))", "docstring": "Read a single byte.\nArgs:\ndo_ord (bool): (default True) convert the byte to an ordinal first.\nReturns:\nbytes: a single byte if successful. 0 (int) if an exception occurred.", "source": "juraj-google-style"}
{"code": "def maybe_center_plot(result):\n    \n    begin = re.search('(% .* matplotlib2tikz v.*)', result)\n    if begin:\n        result = ('\\\\begin{center}\\n' + result[begin.end():] +\n                  '\\n\\\\end{center}')\n    return result", "docstring": "Embeds a possible tikz image inside a center environment.\n\nSearches for matplotlib2tikz last commend line to detect tikz images.\n\nArgs:\nresult: The code execution result\n\nReturns:\nThe input result if no tikzpicture was found, otherwise a centered\nversion.", "source": "juraj-google-style"}
{"code": "def contains(self, x: int, y: int) -> bool:\n    return ((self.x <= x < (self.x + self.width)) and (self.y <= y < (self.y + self.height)))", "docstring": "Returns True if this node contains these coordinates.\n\nArgs:\nx (int): X position to check.\ny (int): Y position to check.\n\nReturns:\nbool: True if this node contains these coordinates.\nOtherwise False.", "source": "codesearchnet"}
{"code": "def add(self, selected: 'SelectedMailbox', *,\n            replace: 'SelectedMailbox' = None) -> None:\n        \n        if replace is not None:\n            self._set.discard(replace)\n        self._set.add(selected)", "docstring": "Add a new selected mailbox object to the set, which may then be\nreturned by :meth:`.any_selected`.\n\nArgs:\nselected: The new selected mailbox object.\nreplace: An existing selected mailbox object that should be removed\nfrom the weak set.", "source": "juraj-google-style"}
{"code": "def pkg_version_list(self, pkg_id):\n        \n        pkg_data = self.__reg_software.get(pkg_id, None)\n        if not pkg_data:\n            return []\n\n        if isinstance(pkg_data, list):\n            \n            return pkg_data  \n\n        \n        installed_versions = list(pkg_data.get('version').keys())\n        return sorted(installed_versions, key=cmp_to_key(self.__oldest_to_latest_version))", "docstring": "Returns information on a package.\n\nArgs:\npkg_id (str): Package Id of the software/component.\n\nReturns:\nlist: List of version numbers installed.", "source": "juraj-google-style"}
{"code": "def __init__(self, dtypes, shapes, names, queue_ref):\n    self._dtypes = dtypes\n    if shapes is not None:\n        if len(shapes) != len(dtypes):\n            raise ValueError(f'Queue shapes must have the same length as dtypes, received len(shapes)={len(shapes)}, len(dtypes)={len(dtypes)}')\n        self._shapes = [tensor_shape.TensorShape(s) for s in shapes]\n    else:\n        self._shapes = [tensor_shape.unknown_shape() for _ in self._dtypes]\n    if names is not None:\n        if len(names) != len(dtypes):\n            raise ValueError(f'Queue names must have the same length as dtypes,received len(names)={len(names)},len {len(dtypes)}')\n        self._names = names\n    else:\n        self._names = None\n    self._queue_ref = queue_ref\n    if isinstance(queue_ref, ops.EagerTensor):\n        if context.context().scope_name:\n            self._name = context.context().scope_name\n        else:\n            self._name = 'Empty'\n        self._resource_deleter = resource_variable_ops.EagerResourceDeleter(queue_ref, None)\n    else:\n        self._name = self._queue_ref.op.name.split('/')[-1]", "docstring": "Constructs a queue object from a queue reference.\n\nThe two optional lists, `shapes` and `names`, must be of the same length\nas `dtypes` if provided.  The values at a given index `i` indicate the\nshape and name to use for the corresponding queue component in `dtypes`.\n\nArgs:\ndtypes:  A list of types.  The length of dtypes must equal the number\nof tensors in each element.\nshapes: Constraints on the shapes of tensors in an element:\nA list of shape tuples or None. This list is the same length\nas dtypes.  If the shape of any tensors in the element are constrained,\nall must be; shapes can be None if the shapes should not be constrained.\nnames: Optional list of names.  If provided, the `enqueue()` and\n`dequeue()` methods will use dictionaries with these names as keys.\nMust be None or a list or tuple of the same length as `dtypes`.\nqueue_ref: The queue reference, i.e. the output of the queue op.\n\nRaises:\nValueError: If one of the arguments is invalid.", "source": "github-repos"}
{"code": "def waiting_config_state(self, timeout=300):\n        \n        t_start = time.time()\n        while not self.check_config_state():\n            if time.time() - t_start > timeout:\n                return False\n            time.sleep(0.1)\n        return True", "docstring": "waiting while real state equal config state\nArgs:\ntimeout - specify how long, in seconds, a command can take before server times out.\n\nreturn True if operation success otherwise False", "source": "juraj-google-style"}
{"code": "def inputs_valid(self, outputs=None):\n    if (self.operation == Transaction.CREATE):\n        return self._inputs_valid(['dummyvalue' for _ in self.inputs])\n    elif (self.operation == Transaction.TRANSFER):\n        return self._inputs_valid([output.fulfillment.condition_uri for output in outputs])\n    else:\n        allowed_ops = ', '.join(self.__class__.ALLOWED_OPERATIONS)\n        raise TypeError('`operation` must be one of {}'.format(allowed_ops))", "docstring": "Validates the Inputs in the Transaction against given\nOutputs.\n\nNote:\nGiven a `CREATE` Transaction is passed,\ndummy values for Outputs are submitted for validation that\nevaluate parts of the validation-checks to `True`.\n\nArgs:\noutputs (:obj:`list` of :class:`~bigchaindb.common.\ntransaction.Output`): A list of Outputs to check the\nInputs against.\n\nReturns:\nbool: If all Inputs are valid.", "source": "codesearchnet"}
{"code": "def set_iterator_element_layouts(self, iterator_resource_dtensor, layouts: List[layout_lib.Layout]):\n    _pywrap_dtensor_device.SetIteratorElementLayouts(context.context()._handle, iterator_resource_dtensor, [layout.to_string() for layout in layouts], self._device_info)", "docstring": "Sets the element layouts on an iterator resource tensor.\n\nArgs:\niterator_resource_dtensor: a DTensor created by packing the individiual\niterator resource tensors.\nlayouts: the flattened list of layouts to be applied to the elements\nemitted by the iterator resource DTensor.", "source": "github-repos"}
{"code": "def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    if attention_mask is None:\n        attention_mask = jnp.ones_like(input_ids)\n    if position_ids is None:\n        batch_size, sequence_length = input_ids.shape\n        position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n\n    def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):\n        encode_module = module._get_encoder_module()\n        return encode_module(input_ids, attention_mask, position_ids, **kwargs)\n    return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward)", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, FlaxMarianMTModel\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"Helsinki-NLP/opus-mt-en-de\")\n>>> model = FlaxMarianMTModel.from_pretrained(\"Helsinki-NLP/opus-mt-en-de\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, max_length=64, return_tensors=\"jax\")\n>>> encoder_outputs = model.encode(**inputs)\n```", "source": "github-repos"}
{"code": "def construct_channel(self, **kwargs):\n        \n        if self.compatibility_mode:\n            \n            config.LOGGER.info(\"Populating channel... \")\n            channel = self.chef_module.construct_channel(**kwargs)\n            return channel\n        else:\n            raise NotImplementedError('Your chef class must overrride the construct_channel method')", "docstring": "Calls chef script's construct_channel method. Used only in compatibility mode.\nArgs:\nkwargs (dict): additional keyword arguments that `uploadchannel` received\nReturns: channel populated from construct_channel method", "source": "juraj-google-style"}
{"code": "def _SetAllFieldTypes(self, package, desc_proto, scope):\n    \n\n    package = _PrefixWithDot(package)\n\n    main_desc = self._GetTypeFromScope(package, desc_proto.name, scope)\n\n    if package == '.':\n      nested_package = _PrefixWithDot(desc_proto.name)\n    else:\n      nested_package = '.'.join([package, desc_proto.name])\n\n    for field_proto, field_desc in zip(desc_proto.field, main_desc.fields):\n      self._SetFieldType(field_proto, field_desc, nested_package, scope)\n\n    for extension_proto, extension_desc in (\n        zip(desc_proto.extension, main_desc.extensions)):\n      extension_desc.containing_type = self._GetTypeFromScope(\n          nested_package, extension_proto.extendee, scope)\n      self._SetFieldType(extension_proto, extension_desc, nested_package, scope)\n\n    for nested_type in desc_proto.nested_type:\n      self._SetAllFieldTypes(nested_package, nested_type, scope)", "docstring": "Sets all the descriptor's fields's types.\n\nThis method also sets the containing types on any extensions.\n\nArgs:\npackage: The current package of desc_proto.\ndesc_proto: The message descriptor to update.\nscope: Enclosing scope of available types.", "source": "juraj-google-style"}
{"code": "def stop_batch_gradient(cls, x: 'TensorFluent', stop_batch: tf.Tensor) -> 'TensorFluent':\n        \n        scope = x.scope.as_list()\n        batch = x.batch\n        tensor = tf.where(stop_batch, tf.stop_gradient(x.tensor), x.tensor)\n        return TensorFluent(tensor, scope, batch)", "docstring": "Returns a copy of the inputs fluent with stop_gradient applied at batch level.\n\nArgs:\nx: The input fluent.\nstop_batch: A boolean tf.Tensor with shape=(batch_size, ...)\n\nReturns:\nA TensorFluent that conditionally stops backpropagation of gradient computations.", "source": "juraj-google-style"}
{"code": "def last_updated(self, path):\n    if not self.exists(path):\n        raise BeamIOError('Path does not exist: %s' % path)\n    return os.path.getmtime(path)", "docstring": "Get UNIX Epoch time in seconds on the FileSystem.\n\nArgs:\npath: string path of file.\n\nReturns: float UNIX Epoch time\n\nRaises:\n``BeamIOError``: if path doesn't exist.", "source": "github-repos"}
{"code": "def from_config(cls, config: dict):\n    timestamp = config.get('timestamp', None)\n    return cls(config.get('id'), config.get('type'), config.get('data', dict()), config.get('origin', None), timestamp, config.get('object_type', None), config.get('object_id', None), config.get('object_key', None))", "docstring": "Create an event object from an event dictionary object.\n\nArgs:\nconfig (dict): Event Configuration dictionary.", "source": "codesearchnet"}
{"code": "def get_input_at(self, node_index):\n    return self._get_node_attribute_at_index(node_index, 'input_tensors', 'input')", "docstring": "Retrieves the input tensor(s) of a layer at a given node.\n\nArgs:\nnode_index: Integer, index of the node\nfrom which to retrieve the attribute.\nE.g. `node_index=0` will correspond to the\nfirst input node of the layer.\n\nReturns:\nA tensor (or list of tensors if the layer has multiple inputs).\n\nRaises:\nRuntimeError: If called in Eager mode.", "source": "github-repos"}
{"code": "def regression_errors(y, y_hat, smoothing_window=0.01, smooth=True):\n    errors = np.abs((y - y_hat))[(:, 0)]\n    if (not smooth):\n        return errors\n    smoothing_window = int((smoothing_window * len(y)))\n    return pd.Series(errors).ewm(span=smoothing_window).mean().values", "docstring": "Compute an array of absolute errors comparing predictions and expected output.\n\nIf smooth is True, apply EWMA to the resulting array of errors.\n\nArgs:\ny (array): Ground truth.\ny_hat (array): Predictions array.\nsmoothing_window (float): Size of the smoothing window, expressed as a proportion\nof the total length of y.\nsmooth (bool): whether the returned errors should be smoothed with EWMA.\n\nReturns:\n(array): errors", "source": "codesearchnet"}
{"code": "def __init__(self, sess, thread_name_filter=None, pass_through_operrors=False):\n    _check_type(sess, (session.BaseSession, monitored_session.MonitoredSession))\n    self._sess = sess\n    self._thread_name_filter_pattern = re.compile(thread_name_filter) if thread_name_filter else None\n    self._pass_through_operrors = pass_through_operrors\n    self._run_call_count = 0\n    response = self.on_session_init(OnSessionInitRequest(self._sess))\n    _check_type(response, OnSessionInitResponse)\n    if response.action == OnSessionInitAction.PROCEED:\n        pass\n    elif response.action == OnSessionInitAction.REMOTE_INSTR_LOOP:\n        raise NotImplementedError('OnSessionInitAction REMOTE_INSTR_LOOP has not been implemented.')\n    else:\n        raise ValueError('Invalid OnSessionInitAction value: %s' % response.action)\n    self._default_session_context_manager = None\n    self._cached_callables_from_options = {}", "docstring": "Constructor of `BaseDebugWrapperSession`.\n\nArgs:\nsess: An (unwrapped) TensorFlow session instance. It should be a subtype\nof `BaseSession` or `tf.MonitoredSession`.\nthread_name_filter: Regular-expression filter (allowlist) for name(s) of\nthread(s) on which the wrapper session will be active. This regular\nexpression is used in a start-anchored fashion on the thread name, i.e.,\nby applying the `match` method of the compiled pattern. The default\n`None` means that the wrapper session will be active on all threads.\nE.g., r\"MainThread$\", r\"QueueRunnerThread.*\".\npass_through_operrors: If True, all captured OpErrors will be\npropagated.  By default this captures all OpErrors.\n\nRaises:\nValueError: On invalid `OnSessionInitAction` value.\nNotImplementedError: If a non-DirectSession sess object is received.", "source": "github-repos"}
{"code": "def GetPathInfo(self, timestamp=None):\n    \n    path_info_timestamp = self._LastEntryTimestamp(self._path_infos, timestamp)\n    try:\n      result = self._path_infos[path_info_timestamp].Copy()\n    except KeyError:\n      result = rdf_objects.PathInfo(\n          path_type=self._path_type, components=self._components)\n\n    stat_entry_timestamp = self._LastEntryTimestamp(self._stat_entries,\n                                                    timestamp)\n    result.last_stat_entry_timestamp = stat_entry_timestamp\n    result.stat_entry = self._stat_entries.get(stat_entry_timestamp)\n\n    hash_entry_timestamp = self._LastEntryTimestamp(self._hash_entries,\n                                                    timestamp)\n    result.last_hash_entry_timestamp = hash_entry_timestamp\n    result.hash_entry = self._hash_entries.get(hash_entry_timestamp)\n\n    return result", "docstring": "Generates a summary about the path record.\n\nArgs:\ntimestamp: A point in time from which the data should be retrieved.\n\nReturns:\nA `rdf_objects.PathInfo` instance.", "source": "juraj-google-style"}
{"code": "def _ReadSupportedOS(self, definition_values, definition_object, name):\n    \n    supported_os = definition_values.get('supported_os', [])\n    if not isinstance(supported_os, list):\n      raise errors.FormatError(\n          'Invalid supported_os type: {0!s}'.format(type(supported_os)))\n\n    undefined_supported_os = set(supported_os).difference(self.supported_os)\n    if undefined_supported_os:\n      error_string = (\n          'Artifact definition: {0:s} undefined supported operating system: '\n          '{1:s}.').format(name, ', '.join(undefined_supported_os))\n      raise errors.FormatError(error_string)\n\n    definition_object.supported_os = supported_os", "docstring": "Reads the optional artifact or source type supported OS.\n\nArgs:\ndefinition_values (dict[str, object]): artifact definition values.\ndefinition_object (ArtifactDefinition|SourceType): the definition object.\nname (str): name of the artifact definition.\n\nRaises:\nFormatError: if there are undefined supported operating systems.", "source": "juraj-google-style"}
{"code": "def page(title, description, element_list=None, tab_list=None):\n    _page = {'Type': 'Page', 'Title': title, 'Description': description, 'Data': {}}\n    if (element_list is not None):\n        if isinstance(element_list, list):\n            _page['Data']['Elements'] = element_list\n        else:\n            _page['Data']['Elements'] = [element_list]\n    if (tab_list is not None):\n        if isinstance(tab_list, list):\n            _page['Data']['Tabs'] = tab_list\n        else:\n            _page['Data']['Tabs'] = [tab_list]\n    return _page", "docstring": "Returns a dictionary representing a new page to display elements.\nThis can be thought of as a simple container for displaying multiple\ntypes of information. The ``section`` method can be used to create\nseparate tabs.\n\nArgs:\ntitle: The title to display\ndescription: A description of the section\nelement_list: The list of elements to display. If a single element is\ngiven it will be wrapped in a list.\ntab_list: A list of tabs to display.\n\nReturns:\nA dictionary with metadata specifying that it is to be rendered\nas a page containing multiple elements and/or tabs.", "source": "codesearchnet"}
{"code": "def reindex(self, kdims=None, vdims=None):\n    gridded = self.interface.gridded\n    scalars = []\n    if gridded:\n        coords = [(d, self.interface.coords(self, d.name)) for d in self.kdims]\n        scalars = [d for (d, vs) in coords if (len(vs) == 1)]\n    if (kdims is None):\n        key_dims = [d for d in self.kdims if (((not vdims) or (d not in vdims)) and (not (d in scalars)))]\n    elif (not isinstance(kdims, list)):\n        key_dims = [self.get_dimension(kdims, strict=True)]\n    else:\n        key_dims = [self.get_dimension(k, strict=True) for k in kdims]\n    dropped = [d for d in self.kdims if ((not (d in key_dims)) and (not (d in scalars)))]\n    new_type = None\n    if (vdims is None):\n        val_dims = [d for d in self.vdims if ((not kdims) or (d not in kdims))]\n    else:\n        val_dims = [self.get_dimension(v, strict=True) for v in vdims]\n        new_type = self._vdim_reductions.get(len(val_dims), type(self))\n    data = self.interface.reindex(self, key_dims, val_dims)\n    datatype = self.datatype\n    if (gridded and dropped):\n        interfaces = self.interface.interfaces\n        datatype = [dt for dt in datatype if (not getattr(interfaces.get(dt, None), 'gridded', True))]\n    return self.clone(data, kdims=key_dims, vdims=val_dims, new_type=new_type, datatype=datatype)", "docstring": "Reindexes Dataset dropping static or supplied kdims\n\nCreates a new object with a reordered or reduced set of key\ndimensions. By default drops all non-varying key dimensions.x\n\nArgs:\nkdims (optional): New list of key dimensionsx\nvdims (optional): New list of value dimensions\n\nReturns:\nReindexed object", "source": "codesearchnet"}
{"code": "def AddInformationalOptions(self, argument_group):\n    \n    argument_group.add_argument(\n        '-d', '--debug', dest='debug', action='store_true', default=False,\n        help='Enable debug output.')\n\n    argument_group.add_argument(\n        '-q', '--quiet', dest='quiet', action='store_true', default=False,\n        help='Disable informational output.')", "docstring": "Adds the informational options to the argument group.\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "juraj-google-style"}
{"code": "def get_category(self, column):\n        \n        result = pd.Series(index=column.index)\n\n        for category, stats in self.probability_map.items():\n            start, end = stats[0]\n            result[(start < column) & (column < end)] = category\n\n        return result", "docstring": "Returns categories for the specified numeric values\n\nArgs:\ncolumn(pandas.Series): Values to transform into categories\n\nReturns:\npandas.Series", "source": "juraj-google-style"}
{"code": "def dump(self, file, payload):\n        \n        json.dump(payload, file, indent=2, ensure_ascii=False)", "docstring": "Dump json oject to open file output.\n\nWrites json with 2 spaces indentation.\n\nArgs:\nfile: Open file-like object. Must be open for writing.\npayload: The Json object to write to file.\n\nReturns:\nNone.", "source": "juraj-google-style"}
{"code": "def ignore(wrapped):\n\n    @functools.wraps(wrapped)\n    def _ignore(*args, **kwargs):\n        return wrapped(*args, **kwargs)\n    _ignore._splpy_optype = _OperatorType.Ignore\n    _ignore._splpy_file = inspect.getsourcefile(wrapped)\n    return _ignore", "docstring": "Decorator to ignore a Python function.\n\nIf a Python callable is decorated with ``@spl.ignore``\nthen function is ignored by ``spl-python-extract.py``.\n\nArgs:\nwrapped: Function that will be ignored.", "source": "codesearchnet"}
{"code": "def parse_newsgroup(line):\n    \n    parts = line.split()\n    try:\n        group = parts[0]\n        low = int(parts[1])\n        high = int(parts[2])\n        status = parts[3]\n    except (IndexError, ValueError):\n        raise ValueError(\"Invalid newsgroup info\")\n    return group, low, high, status", "docstring": "Parse a newsgroup info line to python types.\n\nArgs:\nline: An info response line containing newsgroup info.\n\nReturns:\nA tuple of group name, low-water as integer, high-water as integer and\nposting status.\n\nRaises:\nValueError: If the newsgroup info cannot be parsed.\n\nNote:\nPosting status is a character is one of (but not limited to):\n\"y\" posting allowed\n\"n\" posting not allowed\n\"m\" posting is moderated", "source": "juraj-google-style"}
{"code": "def get_by_name(self, name):\n        \n        try:\n            spec = self._dom.get('templates', {})[name]\n        except KeyError:\n            raise LagoMissingTemplateError(name, self._path)\n\n        return Template(\n            name=name,\n            versions={\n                ver_name: TemplateVersion(\n                    name='%s:%s:%s' % (self.name, name, ver_name),\n                    source=self._providers[ver_spec['source']],\n                    handle=ver_spec['handle'],\n                    timestamp=ver_spec['timestamp'],\n                )\n                for ver_name, ver_spec in spec['versions'].items()\n            },\n        )", "docstring": "Retrieve a template by it's name\n\nArgs:\nname (str): Name of the template to retrieve\n\nRaises:\nLagoMissingTemplateError: if no template is found", "source": "juraj-google-style"}
{"code": "def extend(self, name, opts, info):\n    tifo = self.info.copy()\n    tifo.update(info)\n    topt = self.opts.copy()\n    topt.update(opts)\n    tobj = self.__class__(self.modl, name, tifo, topt)\n    tobj.subof = self.name\n    return tobj", "docstring": "Extend this type to construct a sub-type.\n\nArgs:\nname (str): The name of the new sub-type.\nopts (dict): The type options for the sub-type.\ninfo (dict): The type info for the sub-type.\n\nReturns:\n(synapse.types.Type): A new sub-type instance.", "source": "codesearchnet"}
{"code": "def plot_to_svg(plot, width, height, unit=''):\n    \n    flipped_plot = [(x, -y) for x, y in plot]\n    aspect_ratio = height / width\n    view_box = calculate_view_box(flipped_plot, aspect_ratio=aspect_ratio)\n    view_box_str = '{} {} {} {}'.format(*view_box)\n    stroke_thickness = STROKE_THICKNESS * (view_box[2])\n\n    svg = ET.Element('svg', attrib={\n        'xmlns': 'http:\n        'xmlns:inkscape': 'http:\n        'width': '{}{}'.format(width, unit),\n        'height': '{}{}'.format(height, unit),\n        'viewBox': view_box_str})\n\n    for i, layer in enumerate(flipped_plot):\n        group = ET.SubElement(svg, 'g', attrib={\n            'inkscape:label': '{}-layer'.format(i),\n            'inkscape:groupmode': 'layer',\n        })\n\n        color = PLOT_COLORS[i % len(PLOT_COLORS)]\n        ET.SubElement(group, 'path', attrib={\n            'style': 'stroke-width: {}; stroke: {};'.format(stroke_thickness, color),\n            'fill': 'none',\n            'd': layer_to_path(layer)\n        })\n\n    try:\n        return ET.tostring(svg, encoding='unicode')\n    except LookupError:\n        \n        return ET.tostring(svg)", "docstring": "Converts a plot (list of layers) into an SVG document.\n\nArgs:\nplot (list): list of layers that make up the plot\nwidth (float): the width of the resulting image\nheight (float): the height of the resulting image\nunit (str): the units of the resulting image if not pixels\n\nReturns:\nstr: A stringified XML document representing the image", "source": "juraj-google-style"}
{"code": "def _locate_element(dom, el_content, transformer=None):\n    return dom.find(None, fn=utils.content_matchs(el_content, transformer))", "docstring": "Find element containing `el_content` in `dom`. Use `transformer` function\nto content of all elements in `dom` in order to correctly transforming them\nto match them with `el_content`.\n\nArgs:\ndom (obj): HTMLElement tree.\nel_content (str): Content of element will be picked from `dom`.\ntransformer (fn, default None): Transforming function.\n\nNote:\n`transformer` parameter can be for example simple lambda::\n\nlambda x: x.strip()\n\nReturns:\nlist: Matching HTMLElements.", "source": "codesearchnet"}
{"code": "def _parse_alt_title(html_chunk):\n    \n    title = html_chunk.find(\"img\", fn=has_param(\"alt\"))\n\n    if not title:\n        raise UserWarning(\"Can't find alternative title source!\")\n\n    return title[0].params[\"alt\"].strip()", "docstring": "Parse title from alternative location if not found where it should be.\n\nArgs:\nhtml_chunk (obj): HTMLElement containing slice of the page with details.\n\nReturns:\nstr: Book's title.", "source": "juraj-google-style"}
{"code": "def _set_default_torch_dtype(cls, dtype: torch.dtype) -> torch.dtype:\n    if not dtype.is_floating_point:\n        raise ValueError(f\"Can't instantiate {cls.__name__} model under dtype={dtype} since it is not a floating point dtype\")\n    logger.info(f'Instantiating {cls.__name__} model under default dtype {dtype}.')\n    dtype_orig = torch.get_default_dtype()\n    torch.set_default_dtype(dtype)\n    return dtype_orig", "docstring": "Change the default dtype and return the previous one. This is needed when wanting to instantiate the model\nunder specific dtype.\n\nArgs:\ndtype (`torch.dtype`):\na floating dtype to set to.\n\nReturns:\n`torch.dtype`: the original `dtype` that can be used to restore `torch.set_default_dtype(dtype)` if it was\nmodified. If it wasn't, returns `None`.\n\nNote `set_default_dtype` currently only works with floating-point types and asserts if for example,\n`torch.int64` is passed. So if a non-float `dtype` is passed this functions will throw an exception.", "source": "github-repos"}
{"code": "def _ParseRecord(self, parser_mediator, file_object):\n    header_record_offset = file_object.tell()\n    token_type = self._ParseTokenType(file_object, header_record_offset)\n    if (token_type not in self._HEADER_TOKEN_TYPES):\n        raise errors.ParseError('Unsupported header token type: 0x{0:02x}'.format(token_type))\n    (token_type, token_data) = self._ParseToken(file_object, header_record_offset)\n    if (token_data.format_version != 11):\n        raise errors.ParseError('Unsupported format version type: {0:d}'.format(token_data.format_version))\n    timestamp = (token_data.microseconds + (token_data.timestamp * definitions.MICROSECONDS_PER_SECOND))\n    event_type = token_data.event_type\n    header_record_size = token_data.record_size\n    record_end_offset = (header_record_offset + header_record_size)\n    event_tokens = []\n    return_token_values = None\n    file_offset = file_object.tell()\n    while (file_offset < record_end_offset):\n        (token_type, token_data) = self._ParseToken(file_object, file_offset)\n        if (not token_data):\n            raise errors.ParseError('Unsupported token type: 0x{0:02x}'.format(token_type))\n        file_offset = file_object.tell()\n        if (token_type == self._TOKEN_TYPE_AUT_TRAILER):\n            break\n        token_type_string = self._TOKEN_TYPES.get(token_type, 'UNKNOWN')\n        token_values = self._FormatTokenData(token_type, token_data)\n        event_tokens.append({token_type_string: token_values})\n        if (token_type in (self._TOKEN_TYPE_AUT_RETURN32, self._TOKEN_TYPE_AUT_RETURN64)):\n            return_token_values = token_values\n    if (token_data.signature != self._TRAILER_TOKEN_SIGNATURE):\n        raise errors.ParseError('Unsupported signature in trailer token.')\n    if (token_data.record_size != header_record_size):\n        raise errors.ParseError('Mismatch of event record size between header and trailer token.')\n    event_data = BSMEventData()\n    event_data.event_type = event_type\n    event_data.extra_tokens = event_tokens\n    event_data.offset = header_record_offset\n    event_data.record_length = header_record_size\n    event_data.return_value = return_token_values\n    date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an event record.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nParseError: if the event record cannot be read.", "source": "codesearchnet"}
{"code": "def AllTypes():\n    return [AssetType.CreditFlag, AssetType.DutyFlag, AssetType.GoverningToken, AssetType.UtilityToken, AssetType.Currency, AssetType.Share, AssetType.Invoice, AssetType.Token]", "docstring": "Get a list of all available asset types.\n\nReturns:\nlist: of AssetType items.", "source": "codesearchnet"}
{"code": "def IsCompatible(self, allow_py3=False, raise_exception=False):\n    error = None\n    if not self.version:\n        error = 'ERROR: Your current version of Python is not compatible with the Google Cloud SDK. {0}\\n'.format(self.SupportedVersionMessage(allow_py3))\n    elif self.version[0] < 3:\n        if self.version < PythonVersion.MIN_REQUIRED_PY2_VERSION:\n            error = 'ERROR: Python {0}.{1} is not compatible with the Google Cloud SDK. {2}\\n'.format(self.version[0], self.version[1], self.SupportedVersionMessage(allow_py3))\n    elif not allow_py3:\n        error = 'ERROR: Python 3 and later is not compatible with the Google Cloud SDK. {0}\\n'.format(self.SupportedVersionMessage(allow_py3))\n    elif self.version < PythonVersion.MIN_SUPPORTED_PY3_VERSION:\n        error = 'ERROR: Python {0}.{1} is not compatible with the Google Cloud SDK. {2}\\n'.format(self.version[0], self.version[1], self.SupportedVersionMessage(allow_py3))\n    if error:\n        if raise_exception:\n            raise Error(error)\n        sys.stderr.write(error)\n        sys.stderr.write(PythonVersion.ENV_VAR_MESSAGE)\n        return False\n    if self.version >= self.MIN_REQUIRED_PY2_VERSION and self.version < self.MIN_SUPPORTED_PY2_VERSION:\n        sys.stderr.write('WARNING:  Python 2.6.x is no longer officially supported by the Google Cloud SDK\\nand may not function correctly.  {0}\\n{1}'.format(self.SupportedVersionMessage(allow_py3), PythonVersion.ENV_VAR_MESSAGE))\n    return True", "docstring": "Ensure that the Python version we are using is compatible.\n\nThis will print an error message if not compatible.\n\nCompatible versions are 2.6 and 2.7 and > 3.4 if allow_py3 is True.\nWe don't guarantee support for 2.6 so we want to warn about it.\n\nArgs:\nallow_py3: bool, True if we should allow a Python 3 interpreter to run\ngcloud. If False, this returns an error for Python 3.\nraise_exception: bool, True to raise an exception rather than printing\nthe error and exiting.\n\nRaises:\nError: If not compatible and raise_exception is True.\n\nReturns:\nbool, True if the version is valid, False otherwise.", "source": "github-repos"}
{"code": "def __init__(self, sbi_id: str):\n        \n        SchedulingObject.__init__(self, SBI_KEY, sbi_id)\n        self._check_object_exists()", "docstring": "Create a SBI object.\n\nArgs:\nsbi_id (str): SBI Identifier\n\nRaises:\nKeyError, if the specified SBI does not exist.", "source": "juraj-google-style"}
{"code": "def reduce_sum(x,\n               disable_positional_args=None,\n               output_shape=None,\n               reduced_dim=None,\n               name=None):\n  \n  output_shape = convert_to_shape(output_shape)\n  reduced_dim = convert_to_dimension(reduced_dim)\n  assert disable_positional_args is None\n  output_shape = _reduction_output_shape(x, output_shape, reduced_dim)\n  if output_shape == x.shape:\n    return x\n  return ReduceOperation(x, output_shape, \"SUM\", name=name).outputs[0]", "docstring": "Reduction on 1 or more axes.\n\nIf reduced_dim is present, then only that dimension is reduced out.\nAlternatively, specify output_shape.\nDo not specify both reduced_dim and output_shape.\nIf neither is specified, then all dimensions are reduced out.\n\nArgs:\nx: a Tensor\ndisable_positional_args: None\noutput_shape: an optional Shape.  Must be a subsequence of x.shape.\nreduced_dim: a mtf.Dimension\nname: an optional string\nReturns:\na Tensor", "source": "juraj-google-style"}
{"code": "def inspect(logdir='', event_file='', tag=''):\n    print(((PRINT_SEPARATOR + 'Processing event files... (this can take a few minutes)\\n') + PRINT_SEPARATOR))\n    inspection_units = get_inspection_units(logdir, event_file, tag)\n    for unit in inspection_units:\n        if tag:\n            print('Event statistics for tag {} in {}:'.format(tag, unit.name))\n        else:\n            print('These tags are in {}:'.format(unit.name))\n            print_dict(get_unique_tags(unit.field_to_obs))\n            print(PRINT_SEPARATOR)\n            print('Event statistics for {}:'.format(unit.name))\n        print_dict(get_dict_to_print(unit.field_to_obs), show_missing=(not tag))\n        print(PRINT_SEPARATOR)", "docstring": "Main function for inspector that prints out a digest of event files.\n\nArgs:\nlogdir: A log directory that contains event files.\nevent_file: Or, a particular event file path.\ntag: An optional tag name to query for.\n\nRaises:\nValueError: If neither logdir and event_file are given, or both are given.", "source": "codesearchnet"}
{"code": "def generate_test_run_log_path(self):\n    self._logger_start_time = logger.get_log_file_timestamp()\n    self.root_output_path = os.path.join(self._log_dir, self._testbed_name, self._logger_start_time)\n    return self.root_output_path", "docstring": "Geneartes the log path for a test run.\n\nThe log path includes a timestamp that is set in this call.\n\nThere is usually a minor difference between this timestamp and the actual\nstarting point of the test run. This is because the log path must be set\nup *before* the test run actually starts, so all information of a test\nrun can be properly captured.\n\nThe generated value can be accessed via `self.root_output_path`.\n\nReturns:\nString, the generated log path.", "source": "github-repos"}
{"code": "def data_parallelism_from_flags(daisy_chain_variables=True, all_workers=False):\n    dp_arg_names = inspect.getargspec(data_parallelism).args\n    blacklist = ['daisy_chain_variables', 'all_workers']\n    kwargs = {}\n    for arg in dp_arg_names:\n        if (arg in blacklist):\n            continue\n        kwargs[arg] = getattr(tf.flags.FLAGS, arg)\n    return data_parallelism(daisy_chain_variables=daisy_chain_variables, all_workers=all_workers, **kwargs)", "docstring": "Over which devices do we split each training batch.\n\nIn old-fashioned async mode, we split the batch over all GPUs on the\ncurrent worker.\n\nIn sync mode, we split the batch over all the parameter server GPUs.\n\nThis function returns an expert_utils.Parallelism object, which can be used\nto build the model.  It is configured in a way that any variables created\nby `tf.get_variable` will be assigned to the parameter servers and shared\nbetween datashards.\n\nArgs:\ndaisy_chain_variables: whether to copy variables in a daisy chain on GPUs.\nall_workers: whether the devices are all async workers or just this one.\n\nReturns:\na expert_utils.Parallelism.", "source": "codesearchnet"}
{"code": "def _parse_apps_to_ignore(self):\n    apps_to_ignore = set()\n    section_title = 'applications_to_ignore'\n    if self._parser.has_section(section_title):\n        apps_to_ignore = set(self._parser.options(section_title))\n    return apps_to_ignore", "docstring": "Parse the applications to ignore in the config.\n\nReturns:\nset", "source": "codesearchnet"}
{"code": "def generate_workflow_description(self):\n        \n        if not self.tasks:\n            raise WorkflowError('Workflow contains no tasks, and cannot be executed.')\n\n        self.definition = self.workflow_skeleton()\n\n        if self.batch_values:\n            self.definition[\"batch_values\"] = self.batch_values\n\n        all_input_port_values = [t.inputs.__getattribute__(input_port_name).value for t in self.tasks for\n                                 input_port_name in t.inputs._portnames]\n        for task in self.tasks:\n            \n            \n            \n            \n            output_multiplex_ports_to_exclude = []\n            multiplex_output_port_names = [portname for portname in task.outputs._portnames if\n                                           task.outputs.__getattribute__(portname).is_multiplex]\n            for p in multiplex_output_port_names:\n                output_port_reference = 'source:' + task.name + ':' + p\n                if output_port_reference not in all_input_port_values:\n                    output_multiplex_ports_to_exclude.append(p)\n\n            task_def = task.generate_task_workflow_json(\n                output_multiplex_ports_to_exclude=output_multiplex_ports_to_exclude)\n            self.definition['tasks'].append(task_def)\n\n        if self.callback:\n            self.definition['callback'] = self.callback\n\n        return self.definition", "docstring": "Generate workflow json for launching the workflow against the gbdx api\n\nArgs:\nNone\n\nReturns:\njson string", "source": "juraj-google-style"}
{"code": "def MakePartialStat(self, fd):\n    is_dir = ('Container' in fd.behaviours)\n    return {'pathspec': fd.Get(fd.Schema.PATHSPEC, ''), 'st_atime': fd.Get(fd.Schema.LAST, 0), 'st_blksize': 0, 'st_blocks': 0, 'st_ctime': 0, 'st_dev': 0, 'st_gid': 0, 'st_ino': 0, 'st_mode': (self.default_dir_mode if is_dir else self.default_file_mode), 'st_mtime': 0, 'st_nlink': 0, 'st_rdev': 0, 'st_size': fd.Get(fd.Schema.SIZE, 0), 'st_uid': 0}", "docstring": "Try and give a 'stat' for something not in the data store.\n\nArgs:\nfd: The object with no stat.\n\nReturns:\nA dictionary corresponding to what we'll say the 'stat' is\nfor objects which are not actually files, so have no OS level stat.", "source": "codesearchnet"}
{"code": "def db996(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `db996`'.format(value))\n\n        self._db996 = value", "docstring": "Corresponds to IDD Field `db996`\nDry-bulb temperature corresponding to 99.6% annual cumulative\nfrequency of occurrence (cold conditions)\n\nArgs:\nvalue (float): value for IDD Field `db996`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def assert_integer(x, message=None, name=None):\n    with ops.name_scope(name, 'assert_integer', [x]):\n        x = ops.convert_to_tensor(x, name='x')\n        if not x.dtype.is_integer:\n            if context.executing_eagerly():\n                name = 'tensor'\n            else:\n                name = x.name\n            err_msg = '%sExpected \"x\" to be integer type.  Found: %s of dtype %s' % (_message_prefix(message), name, x.dtype)\n            raise TypeError(err_msg)\n        return control_flow_ops.no_op('statically_determined_was_integer')", "docstring": "Assert that `x` is of integer dtype.\n\nExample of adding a dependency to an operation:\n\n```python\nwith tf.control_dependencies([tf.compat.v1.assert_integer(x)]):\noutput = tf.reduce_sum(x)\n```\n\nArgs:\nx: `Tensor` whose basetype is integer and is not quantized.\nmessage: A string to prefix to the default message.\nname: A name for this operation (optional).  Defaults to \"assert_integer\".\n\nRaises:\nTypeError:  If `x.dtype` is anything other than non-quantized integer.\n\nReturns:\nA `no_op` that does nothing.  Type can be determined statically.", "source": "github-repos"}
{"code": "def delete(self, paths):\n    results = self._blobstorageIO().delete_paths(paths)\n    exceptions = {path: error for path, error in results.items() if error is not None}\n    if exceptions:\n        raise BeamIOError('Delete operation failed', exceptions)", "docstring": "Deletes files or directories at the provided paths.\nDirectories will be deleted recursively.\n\nArgs:\npaths: list of paths that give the file objects to be deleted\n\nRaises:\n``BeamIOError``: if any of the delete operations fail", "source": "github-repos"}
{"code": "def _make_token_async(scopes, service_account_id):\n    rpc = app_identity.create_rpc()\n    app_identity.make_get_access_token_call(rpc, scopes, service_account_id)\n    (token, expires_at) = (yield rpc)\n    raise ndb.Return((token, expires_at))", "docstring": "Get a fresh authentication token.\n\nArgs:\nscopes: A list of scopes.\nservice_account_id: Internal-use only.\n\nRaises:\nAn ndb.Return with a tuple (token, expiration_time) where expiration_time is\nseconds since the epoch.", "source": "codesearchnet"}
{"code": "def FindCheckMacro(line):\n    for macro in _CHECK_MACROS:\n        i = line.find(macro)\n        if (i >= 0):\n            matched = Match((('^(.*\\\\b' + macro) + '\\\\s*)\\\\('), line)\n            if (not matched):\n                continue\n            return (macro, len(matched.group(1)))\n    return (None, (- 1))", "docstring": "Find a replaceable CHECK-like macro.\n\nArgs:\nline: line to search on.\nReturns:\n(macro name, start position), or (None, -1) if no replaceable\nmacro is found.", "source": "codesearchnet"}
{"code": "def set_approvers(self, approver_ids=[], approver_group_ids=[], **kwargs):\n    path = ('%s/%s/approvers' % (self._parent.manager.path, self._parent.get_id()))\n    data = {'approver_ids': approver_ids, 'approver_group_ids': approver_group_ids}\n    self.gitlab.http_put(path, post_data=data, **kwargs)", "docstring": "Change MR-level allowed approvers and approver groups.\n\nArgs:\napprover_ids (list): User IDs that can approve MRs\napprover_group_ids (list): Group IDs whose members can approve MRs\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabUpdateError: If the server failed to perform the request", "source": "codesearchnet"}
{"code": "def is_feature_enabled(self, feature_key, user_id, attributes=None):\n    if (not self.is_valid):\n        self.logger.error(enums.Errors.INVALID_DATAFILE.format('is_feature_enabled'))\n        return False\n    if (not validator.is_non_empty_string(feature_key)):\n        self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('feature_key'))\n        return False\n    if (not isinstance(user_id, string_types)):\n        self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))\n        return False\n    if (not self._validate_user_inputs(attributes)):\n        return False\n    feature = self.config.get_feature_from_key(feature_key)\n    if (not feature):\n        return False\n    feature_enabled = False\n    source_info = {}\n    decision = self.decision_service.get_variation_for_feature(feature, user_id, attributes)\n    is_source_experiment = (decision.source == enums.DecisionSources.FEATURE_TEST)\n    if decision.variation:\n        if (decision.variation.featureEnabled is True):\n            feature_enabled = True\n        if is_source_experiment:\n            source_info = {'experiment_key': decision.experiment.key, 'variation_key': decision.variation.key}\n            self._send_impression_event(decision.experiment, decision.variation, user_id, attributes)\n    if feature_enabled:\n        self.logger.info(('Feature \"%s\" is enabled for user \"%s\".' % (feature_key, user_id)))\n    else:\n        self.logger.info(('Feature \"%s\" is not enabled for user \"%s\".' % (feature_key, user_id)))\n    self.notification_center.send_notifications(enums.NotificationTypes.DECISION, enums.DecisionNotificationTypes.FEATURE, user_id, (attributes or {}), {'feature_key': feature_key, 'feature_enabled': feature_enabled, 'source': decision.source, 'source_info': source_info})\n    return feature_enabled", "docstring": "Returns true if the feature is enabled for the given user.\n\nArgs:\nfeature_key: The key of the feature for which we are determining if it is enabled or not for the given user.\nuser_id: ID for user.\nattributes: Dict representing user attributes.\n\nReturns:\nTrue if the feature is enabled for the user. False otherwise.", "source": "codesearchnet"}
{"code": "def add_c_function(self, c_func):\n    self.ensure_initialized()\n    pywrap_tfe.TFE_ContextAddFunction(self._handle, c_func)", "docstring": "Add a C API TF_Function to the context.\n\nOnce added, the function (identified by its name) can be executed like any\nother operation.\n\nArgs:\nc_func: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper).", "source": "github-repos"}
{"code": "def _handle_emailauth(maildomain='', message=''): \n        \n        print('SteamGuard requires email authentication...')\n        emailauth = input('Please enter the code sent to your mail address at \"%s\": ' % maildomain)\n        emailauth.upper()\n        return emailauth", "docstring": "Called when SteamGuard requires authentication via e-mail.\nAsks the user to enter the code.\n\nArgs:\nmaildomain: Optional. The mail domain of the e-mail address the SteamGuard\ncode is send to.\nmessage: Optional. A message from Steam service.\n\nReturns:\nA string containing the code.", "source": "juraj-google-style"}
{"code": "def plot_tcoords(array, coords, scantypes=None, ax=None, **kwargs):\n    \n    if ax is None:\n        ax = plt.gca()\n\n    if scantypes is None:\n        ax.plot(array[coords[0]], array[coords[1]], label='ALL', **kwargs)\n    else:\n        for scantype in scantypes:\n            ax.plot(array[coords[0]][array.scantype == scantype],\n                    array[coords[1]][array.scantype == scantype], label=scantype, **kwargs)\n    ax.set_xlabel(coords[0])\n    ax.set_ylabel(coords[1])\n    ax.set_title('{} vs {}'.format(coords[1], coords[0]))\n    ax.legend()\n\n    logger.info('{} vs {} has been plotted.'.format(coords[1], coords[0]))", "docstring": "Plot coordinates related to the time axis.\n\nArgs:\narray (xarray.DataArray): Array which the coodinate information is included.\ncoords (list): Name of x axis and y axis.\nscantypes (list): Scantypes. If None, all scantypes are used.\nax (matplotlib.axes): Axis you want to plot on.\nkwargs (optional): Plot options passed to ax.plot().", "source": "juraj-google-style"}
{"code": "def cctop_submit(seq_str):\n    \n    url = 'http:\n    r = requests.post(url)\n    jobid = r.text.split('ID: ')[1]\n\n    return jobid", "docstring": "Submit a protein sequence string to CCTOP and return the job ID.\n\nArgs:\nseq_str (str): Protein sequence as a string\n\nReturns:\ndict: Job ID on the CCTOP server", "source": "juraj-google-style"}
{"code": "def Uninstall(self, package_name, keep_data=False, timeout_ms=None):\n        \n        cmd = ['pm uninstall']\n        if keep_data:\n            cmd.append('-k')\n        cmd.append('\"%s\"' % package_name)\n\n        return self.Shell(' '.join(cmd), timeout_ms=timeout_ms)", "docstring": "Removes a package from the device.\n\nArgs:\npackage_name: Package name of target package.\nkeep_data: whether to keep the data and cache directories\ntimeout_ms: Expected timeout for pushing and installing.\n\nReturns:\nThe pm uninstall output.", "source": "juraj-google-style"}
{"code": "def latest_vcf_filename(build):\n    \n    ftp = FTP('ftp.ncbi.nlm.nih.gov')\n    ftp.login()\n    nav_to_vcf_dir(ftp, build=build)\n    clinvar_datestamped = [f for f in ftp.nlst() if\n                           re.match('^clinvar_[0-9]{8}.vcf.gz$', f)]\n    if len(clinvar_datestamped) == 1:\n        return clinvar_datestamped[0]\n    raise IOError(\"Unable to determine the most recent ClinVar VCF file on \" +\n                  \"NCBI's FTP site.\")", "docstring": "Determine the filename for the most recent comprehensive ClinVar VCF.\n\nArgs:\nbuild: (type: string) genome build, either 'b37' or 'b38'\n\nReturns:\n(type: string) Filename of the most recent comprehensive ClinVar VCF.", "source": "juraj-google-style"}
{"code": "def __init__(self, report_job_id):\n    \n    super(AdManagerReportError, self).__init__(\n        'Ad Manager report job failed. The ID of the failed report is: %s'\n        % report_job_id)\n    self.report_job_id = report_job_id", "docstring": "Initializes a AdManagerReportError.\n\nArgs:\nreport_job_id: The ID of the report job which failed.", "source": "juraj-google-style"}
{"code": "def summarize_dist_params(dist, name, name_scope='dist_params'):\n    with tf.compat.v1.name_scope(name_scope):\n        tf.compat.v2.summary.histogram(name='{}/{}'.format(name, 'mean'), data=dist.mean(), step=tf.compat.v1.train.get_or_create_global_step())\n        tf.compat.v2.summary.histogram(name='{}/{}'.format(name, 'stddev'), data=dist.stddev(), step=tf.compat.v1.train.get_or_create_global_step())", "docstring": "Summarize the parameters of a distribution.\n\nArgs:\ndist: A Distribution object with mean and standard deviation\nparameters.\nname: The name of the distribution.\nname_scope: The name scope of this summary.", "source": "codesearchnet"}
{"code": "def LateBind(self, target=None):\n    \n    if not issubclass(target, RDFProtoStruct):\n      raise TypeError(\"Field %s expects a protobuf, but target is %s\" %\n                      (self, target))\n\n    self.late_bound = False\n\n    \n    self.type = target\n\n    \n    self.owner.AddDescriptor(self)", "docstring": "Late binding callback.\n\nThis method is called on this field descriptor when the target RDFValue\nclass is finally defined. It gives the field descriptor an opportunity to\ninitialize after the point of definition.\n\nArgs:\ntarget: The target nested class.\n\nRaises:\nTypeError: If the target class is not of the expected type.", "source": "juraj-google-style"}
{"code": "def _original_path(self, path):\n        \n\n        def components_to_path():\n            if len(path_components) > len(normalized_components):\n                normalized_components.extend(\n                    path_components[len(normalized_components):])\n            sep = self._path_separator(path)\n            normalized_path = sep.join(normalized_components)\n            if path.startswith(sep) and not normalized_path.startswith(sep):\n                normalized_path = sep + normalized_path\n            return normalized_path\n\n        if self.is_case_sensitive or not path:\n            return path\n        path_components = self._path_components(path)\n        normalized_components = []\n        current_dir = self.root\n        for component in path_components:\n            if not isinstance(current_dir, FakeDirectory):\n                return components_to_path()\n            dir_name, current_dir = self._directory_content(\n                current_dir, component)\n            if current_dir is None or (\n                            isinstance(current_dir, FakeDirectory) and\n                            current_dir._byte_contents is None and\n                            current_dir.st_size == 0):\n                return components_to_path()\n            normalized_components.append(dir_name)\n        return components_to_path()", "docstring": "Return a normalized case version of the given path for\ncase-insensitive file systems. For case-sensitive file systems,\nreturn path unchanged.\n\nArgs:\npath: the file path to be transformed\n\nReturns:\nA version of path matching the case of existing path elements.", "source": "juraj-google-style"}
{"code": "def _read_marcxml(xml):\n    \n    \n    marc_xml = _read_content_or_path(xml)\n\n\n    \n    \n    marc_xml = _oai_to_xml(marc_xml)\n    marc_xml = _add_namespace(marc_xml)\n\n    file_obj = StringIO.StringIO(marc_xml)\n\n    return ET.parse(file_obj)", "docstring": "Read MARC XML or OAI file, convert, add namespace and return XML in\nrequired format with all necessities.\n\nArgs:\nxml (str): Filename or XML string. Don't use ``\\\\n`` in case of\nfilename.\n\nReturns:\nobj: Required XML parsed with ``lxml.etree``.", "source": "juraj-google-style"}
{"code": "def sg_max(tensor, opt):\n    return tf.reduce_max(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)", "docstring": "r\"\"\"Computes the maximum of elements across axis of a tensor.\n\nSee `tf.reduce_max()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` (automatically given by chain).\nopt:\naxis : A tuple/list of integers or an integer. The axis to reduce.\nkeep_dims: If true, retains reduced dimensions with length 1.\nname: If provided, replace current tensor's name.\n\nReturns:\nA `Tensor`.", "source": "codesearchnet"}
{"code": "def get_permissions(self, namespace, explicit=False):\n        \n\n        if not isinstance(namespace, Namespace):\n            namespace = Namespace(namespace)\n        keys = namespace.keys\n        p, _ = self._check(keys, self.index, explicit=explicit)\n        return p", "docstring": "Returns the permissions level for the specified namespace\n\nArguments:\n\nnamespace -- permissioning namespace (str)\nexplicit -- require explicitly set permissions to the provided namespace\n\nReturns:\n\nint -- permissioning flags", "source": "juraj-google-style"}
{"code": "def entry_point(__func: Callable) -> Callable:\n    if (__func.__module__ == '__main__'):\n        import sys\n        sys.exit(__func())\n    else:\n        return __func", "docstring": "Execute function when module is run directly.\n\nNote:\nThis allows fall through for importing modules that use it.\n\nArgs:\n__func: Function to run", "source": "codesearchnet"}
{"code": "def _format_value(value):\n  \n  literal = repr(value)\n  try:\n    if parse_value(literal) == value:\n      return literal\n  except SyntaxError:\n    pass\n  return None", "docstring": "Returns `value` in a format parseable by `parse_value`, or `None`.\n\nSimply put, This function ensures that when it returns a string value, the\nfollowing will hold:\n\nparse_value(_format_value(value)) == value\n\nArgs:\nvalue: The value to format.\n\nReturns:\nA string representation of `value` when `value` is literally representable,\nor `None`.", "source": "juraj-google-style"}
{"code": "def json(self, ondemand=False):\n        \n        self._request_entity = 'indicator'\n        self._request_uri = '{}/{}'.format(self._api_uri, 'json')\n        self._stream = True\n        if ondemand:\n            self._request.add_payload('runNow', True)", "docstring": "Update request URI to return JSON data.\n\nFor onDemand bulk generation to work it must first be enabled in the\nThreatConnect platform under System settings.\n\nArgs:\nondemand (boolean): Enable on demand bulk generation.", "source": "juraj-google-style"}
{"code": "def get(self, accountID, **kwargs):\n    request = Request('GET', '/v3/accounts/{accountID}/pricing')\n    request.set_path_param('accountID', accountID)\n    request.set_param('instruments', kwargs.get('instruments'))\n    request.set_param('since', kwargs.get('since'))\n    request.set_param('includeUnitsAvailable', kwargs.get('includeUnitsAvailable'))\n    request.set_param('includeHomeConversions', kwargs.get('includeHomeConversions'))\n    response = self.ctx.request(request)\n    if (response.content_type is None):\n        return response\n    if (not response.content_type.startswith('application/json')):\n        return response\n    jbody = json.loads(response.raw_body)\n    parsed_body = {}\n    if (str(response.status) == '200'):\n        if (jbody.get('prices') is not None):\n            parsed_body['prices'] = [self.ctx.pricing.ClientPrice.from_dict(d, self.ctx) for d in jbody.get('prices')]\n        if (jbody.get('homeConversions') is not None):\n            parsed_body['homeConversions'] = [self.ctx.pricing.HomeConversions.from_dict(d, self.ctx) for d in jbody.get('homeConversions')]\n        if (jbody.get('time') is not None):\n            parsed_body['time'] = jbody.get('time')\n    elif (str(response.status) == '400'):\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    elif (str(response.status) == '401'):\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    elif (str(response.status) == '404'):\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    elif (str(response.status) == '405'):\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    else:\n        parsed_body = jbody\n    response.body = parsed_body\n    return response", "docstring": "Get pricing information for a specified list of Instruments within an\nAccount.\n\nArgs:\naccountID:\nAccount Identifier\ninstruments:\nList of Instruments to get pricing for.\nsince:\nDate/Time filter to apply to the response. Only prices and home\nconversions (if requested) with a time later than this filter\n(i.e. the price has changed after the since time) will be\nprovided, and are filtered independently.\nincludeUnitsAvailable:\nFlag that enables the inclusion of the unitsAvailable field in\nthe returned Price objects.\nincludeHomeConversions:\nFlag that enables the inclusion of the homeConversions field in\nthe returned response. An entry will be returned for each\ncurrency in the set of all base and quote currencies present in\nthe requested instruments list.\n\nReturns:\nv20.response.Response containing the results from submitting the\nrequest", "source": "codesearchnet"}
{"code": "def recursepath(path, reverse=False):\n    if (path in '/'):\n        return ['/']\n    path = (abspath(normpath(path)) + '/')\n    paths = ['/']\n    find = path.find\n    append = paths.append\n    pos = 1\n    len_path = len(path)\n    while (pos < len_path):\n        pos = find('/', pos)\n        append(path[:pos])\n        pos += 1\n    if reverse:\n        return paths[::(- 1)]\n    return paths", "docstring": "Get intermediate paths from the root to the given path.\n\nArguments:\npath (str): A PyFilesystem path\nreverse (bool): Reverses the order of the paths\n(default `False`).\n\nReturns:\nlist: A list of paths.\n\nExample:\n>>> recursepath('a/b/c')\n['/', '/a', '/a/b', '/a/b/c']", "source": "codesearchnet"}
{"code": "def dispatch(op, args, kwargs):\n    for dispatcher in getattr(op, FALLBACK_DISPATCH_ATTR):\n        result = dispatcher.handle(args, kwargs)\n        if result is not OpDispatcher.NOT_SUPPORTED:\n            return result\n    for dispatcher in _GLOBAL_DISPATCHERS:\n        result = dispatcher.handle(op, args, kwargs)\n        if result is not OpDispatcher.NOT_SUPPORTED:\n            return result\n    return OpDispatcher.NOT_SUPPORTED", "docstring": "Returns the result from the first successful dispatcher for a given op.\n\nCalls the `handle` method of each `OpDispatcher` that has been registered\nto handle `op`, and returns the value from the first successful handler.\n\nArgs:\nop: Python function: the operation to dispatch for.\nargs: The arguments to the operation.\nkwargs: They keyword arguments to the operation.\n\nReturns:\nThe result of the operation, or `NOT_SUPPORTED` if no registered\ndispatcher can handle the given arguments.", "source": "github-repos"}
{"code": "def DeregisterHelper(cls, analyzer_helper):\n    if (analyzer_helper.type_indicator not in cls._analyzer_helpers):\n        raise KeyError('Analyzer helper object not set for type indicator: {0:s}.'.format(analyzer_helper.type_indicator))\n    analyzer_helper = cls._analyzer_helpers[analyzer_helper.type_indicator]\n    cls._FlushCache(analyzer_helper.format_categories)\n    del cls._analyzer_helpers[analyzer_helper.type_indicator]", "docstring": "Deregisters a format analyzer helper.\n\nArgs:\nanalyzer_helper (AnalyzerHelper): analyzer helper.\n\nRaises:\nKeyError: if analyzer helper object is not set for the corresponding\ntype indicator.", "source": "codesearchnet"}
{"code": "def Set(self, name, value):\n    if (self.writeback is None):\n        logging.warning('Attempting to modify a read only config object for %s.', name)\n    if (name in self.constants):\n        raise ConstModificationError(('Attempting to modify constant value %s' % name))\n    writeback_data = self.writeback_data\n    if (value is not None):\n        if isinstance(value, Text):\n            value = self.EscapeString(value)\n    writeback_data[name] = value\n    self.FlushCache()", "docstring": "Update the configuration option with a new value.\n\nNote that this forces the value to be set for all contexts. The value is\nwritten to the writeback location if Save() is later called.\n\nArgs:\nname: The name of the parameter to set.\nvalue: The value to set it to. The value will be validated against the\noption's type descriptor.\n\nRaises:\nConstModificationError: When attempting to change a constant option.", "source": "codesearchnet"}
{"code": "def set_log_prefix(self, prefix: str) -> None:\n    self.debug('Setting the log prefix to \"%s\".', prefix)\n    self.extra[PrefixLoggerAdapter.EXTRA_KEY_LOG_PREFIX] = prefix", "docstring": "Sets the log prefix to the given string.\n\nArgs:\nprefix: The new log prefix.", "source": "github-repos"}
{"code": "def move_to_destination(source, destination, job_name, sagemaker_session):\n    \n    parsed_uri = urlparse(destination)\n    if parsed_uri.scheme == 'file':\n        recursive_copy(source, parsed_uri.path)\n        final_uri = destination\n    elif parsed_uri.scheme == 's3':\n        bucket = parsed_uri.netloc\n        path = \"%s%s\" % (parsed_uri.path.lstrip('/'), job_name)\n        final_uri = 's3:\n        sagemaker_session.upload_data(source, bucket, path)\n    else:\n        raise ValueError('Invalid destination URI, must be s3:\n\n    shutil.rmtree(source)\n    return final_uri", "docstring": "move source to destination. Can handle uploading to S3\n\nArgs:\nsource (str): root directory to move\ndestination (str): file:// or s3:// URI that source will be moved to.\njob_name (str): SageMaker job name.\nsagemaker_session (sagemaker.Session): a sagemaker_session to interact with S3 if needed\n\nReturns:\n(str): destination URI", "source": "juraj-google-style"}
{"code": "def _has_connection(hostname, port):\n    \n    try:\n        host = socket.gethostbyname(hostname)\n        socket.create_connection((host, port), 2)\n        return True\n    except Exception:  \n        return False", "docstring": "Checks if internet connection exists to host via specified port.\n\nIf any exception is raised while trying to open a socket this will return\nfalse.\n\nArgs:\nhostname (str): Hostname to connect to.\nport (int): Port to connect to\n\nReturns:\nbool: Has connection or not", "source": "juraj-google-style"}
{"code": "def output(self, original_filename):\n        \n\n        for contract in self.contracts:\n            for function in contract.functions + contract.modifiers:\n                filename = \"{}-{}-{}.dot\".format(original_filename, contract.name, function.full_name)\n                self.info('Export {}'.format(filename))\n                function.slithir_cfg_to_dot(filename)", "docstring": "_filename is not used\nArgs:\n_filename(string)", "source": "juraj-google-style"}
{"code": "def save_yaml(dictionary, path, pretty=False, sortkeys=False):\n    if sortkeys:\n        dictionary = dict(dictionary)\n    with open(path, 'w') as f:\n        if pretty:\n            pyaml.dump(dictionary, f)\n        else:\n            yaml.dump(dictionary, f, default_flow_style=None, Dumper=yamlloader.ordereddict.CDumper)", "docstring": "Save dictionary to YAML file preserving order if it is an OrderedDict\n\nArgs:\ndictionary (Dict): Python dictionary to save\npath (str): Path to YAML file\npretty (bool): Whether to pretty print. Defaults to False.\nsortkeys (bool): Whether to sort dictionary keys. Defaults to False.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def relative_probability( self, l1, l2, c1, c2 ):\n        \n        if self.site_energies:\n            site_delta_E = self.site_energies[ l2 ] - self.site_energies[ l1 ]\n        else:\n            site_delta_E = 0.0\n        if self.nn_energy:\n            delta_nn = c2 - c1 - 1 \n            site_delta_E += delta_nn * self.nn_energy\n        return metropolis( site_delta_E )", "docstring": "The relative probability for a jump between two sites with specific site types and coordination numbers.\n\nArgs:\nl1 (Str): Site label for the initial site.\nl2 (Str): Site label for the final site.\nc1 (Int): Coordination number for the initial site.\nc2 (Int): Coordination number for the final site.\n\nReturns:\n(Float): The relative probability of this jump occurring.", "source": "juraj-google-style"}
{"code": "def from_numbers(cls, dna_values: List[Union[int, float, str]], dna_spec: DNASpec) -> 'DNA':\n    context = dict(index=0)\n\n    def _next_decision():\n        if context['index'] >= len(dna_values):\n            raise ValueError(f'The input {dna_values!r} is too short for {dna_spec!r}.')\n        decision = dna_values[context['index']]\n        context['index'] += 1\n        return decision\n\n    def _bind_decisions(dna_spec):\n        value = None\n        children = None\n        if dna_spec.is_space:\n            children = [_bind_decisions(elem) for elem in dna_spec.elements]\n        elif dna_spec.is_categorical:\n            if dna_spec.num_choices == 1:\n                value = _next_decision()\n                if value < 0 or value >= len(dna_spec.candidates):\n                    raise ValueError(f\"Candidate index out of range at choice '{dna_spec.name or dna_spec.id}'. Index={value}, Number of candidates={len(dna_spec.candidates)}.\")\n                children = [_bind_decisions(dna_spec.candidates[value])]\n            else:\n                children = [_bind_decisions(spec) for spec in dna_spec.choice_specs]\n        else:\n            value = _next_decision()\n        return DNA(value, children, spec=dna_spec)\n    dna = _bind_decisions(dna_spec)\n    if context['index'] != len(dna_values):\n        end_pos = context['index']\n        raise ValueError(f'The input {dna_values!r} is too long for {dna_spec!r}. Remaining: {dna_values[end_pos:]!r}.')\n    return dna", "docstring": "Create a DNA from a flattened list of dna values.\n\nArgs:\ndna_values: A list of DNA values.\ndna_spec: DNASpec that interprets the dna values.\n\nReturns:\nA DNA object.", "source": "github-repos"}
{"code": "def set_control_scheme(self, agent_name, control_scheme):\n        \n        if agent_name not in self.agents:\n            print(\"No such agent %s\" % agent_name)\n        else:\n            self.agents[agent_name].set_control_scheme(control_scheme)", "docstring": "Set the control scheme for a specific agent.\n\nArgs:\nagent_name (str): The name of the agent to set the control scheme for.\ncontrol_scheme (int): A control scheme value (see :obj:`holodeck.agents.ControlSchemes`)", "source": "juraj-google-style"}
{"code": "def _get_flags_defined_by_module(self, module):\n    \n    if not isinstance(module, str):\n      module = module.__name__\n\n    return list(self.flags_by_module_dict().get(module, []))", "docstring": "Returns the list of flags defined by a module.\n\nArgs:\nmodule: module|str, the module to get flags from.\n\nReturns:\n[Flag], a new list of Flag instances.  Caller may update this list as\ndesired: none of those changes will affect the internals of this\nFlagValue instance.", "source": "juraj-google-style"}
{"code": "def create_alias(target_path, alias_path):\n    if platform.system() == 'Windows' and (not alias_path.endswith('.lnk')):\n        alias_path += '.lnk'\n    if os.path.lexists(alias_path):\n        os.remove(alias_path)\n    if platform.system() == 'Windows':\n        from win32com import client\n        shell = client.Dispatch('WScript.Shell')\n        shortcut = shell.CreateShortCut(alias_path)\n        shortcut.Targetpath = target_path\n        shortcut.save()\n    else:\n        os.symlink(target_path, alias_path)", "docstring": "Creates an alias at 'alias_path' pointing to the file 'target_path'.\n\nOn Unix, this is implemented via symlink. On Windows, this is done by\ncreating a Windows shortcut file.\n\nArgs:\ntarget_path: Destination path that the alias should point to.\nalias_path: Path at which to create the new alias.", "source": "github-repos"}
{"code": "def point_probability(self, threshold):\n    point_prob = np.zeros(self.data.shape[1:])\n    for t in range(self.data.shape[1]):\n        point_prob[t] = np.where((self.data[(:, t)] >= threshold), 1.0, 0.0).mean(axis=0)\n    return EnsembleConsensus(point_prob, 'point_probability', self.ensemble_name, self.run_date, (self.variable + '_{0:0.2f}_{1}'.format(threshold, self.units.replace(' ', '_'))), self.start_date, self.end_date, '')", "docstring": "Determine the probability of exceeding a threshold at a grid point based on the ensemble forecasts at\nthat point.\n\nArgs:\nthreshold: If >= threshold assigns a 1 to member, otherwise 0.\n\nReturns:\nEnsembleConsensus", "source": "codesearchnet"}
{"code": "def get_forced_variation(self, experiment, user_id):\n    forced_variations = experiment.forcedVariations\n    if (forced_variations and (user_id in forced_variations)):\n        variation_key = forced_variations.get(user_id)\n        variation = self.config.get_variation_from_key(experiment.key, variation_key)\n        if variation:\n            self.logger.info(('User \"%s\" is forced in variation \"%s\".' % (user_id, variation_key)))\n        return variation\n    return None", "docstring": "Determine if a user is forced into a variation for the given experiment and return that variation.\n\nArgs:\nexperiment: Object representing the experiment for which user is to be bucketed.\nuser_id: ID for the user.\n\nReturns:\nVariation in which the user with ID user_id is forced into. None if no variation.", "source": "codesearchnet"}
{"code": "def run(self, fn, args=(), kwargs=None, options=None):\n    return super(CentralStorageStrategy, self).run(fn, args, kwargs, options)", "docstring": "Run `fn` on each replica, with the given arguments.\n\nIn `CentralStorageStrategy`, `fn` is  called on each of the compute\nreplicas, with the provided \"per replica\" arguments specific to that device.\n\nArgs:\nfn: The function to run. The output must be a `tf.nest` of `Tensor`s.\nargs: (Optional) Positional arguments to `fn`.\nkwargs: (Optional) Keyword arguments to `fn`.\noptions: (Optional) An instance of `tf.distribute.RunOptions` specifying\nthe options to run `fn`.\n\nReturns:\nReturn value from running `fn`.", "source": "github-repos"}
{"code": "def get_if_not_set(self, addresses):\n        \n\n        with self._lock:\n            results = []\n            for add in addresses:\n                results.append(self._get_if_not_set(add))\n            return results", "docstring": "Returns the value at an address if it was an input to the txn but\nnever set. It returns None if that address was never set in the\nmerkle database, or if the address is not within the context.\n\nArgs:\naddresses (list of str): The full 70 character addresses.\n\nReturns:\n(list): bytes at that address but not set within the context", "source": "juraj-google-style"}
{"code": "def _OpenFileObject(self, path_spec):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(path_spec)\n\n    file_object = resolver.Resolver.OpenFileObject(\n        path_spec.parent, resolver_context=self._resolver_context)\n    bde_volume = pybde.volume()\n\n    bde.BDEVolumeOpen(\n        bde_volume, path_spec, file_object, resolver.Resolver.key_chain)\n    return bde_volume", "docstring": "Opens the file-like object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\npyvde.volume: BDE volume file-like object.\n\nRaises:\nPathSpecError: if the path specification is incorrect.", "source": "juraj-google-style"}
{"code": "def generate_output_newline(self, line='0', colorize=True):\n        \n\n        return generate_output(\n            line=line,\n            is_parent=True,\n            colorize=colorize\n        )", "docstring": "The function for generating a CLI output new line.\n\nArgs:\nline (:obj:`str`): The line number (0-4). Determines indentation.\nDefaults to '0'.\ncolorize (:obj:`bool`): Colorize the console output with ANSI\ncolors. Defaults to True.\n\nReturns:\nstr: The generated output.", "source": "juraj-google-style"}
{"code": "def _fix(node):\n    pri_cfg = cfg.CFG.build_cfg(node.body[0])\n    defined = cfg.Defined()\n    defined.visit(pri_cfg.entry)\n    reaching = cfg.ReachingDefinitions()\n    reaching.visit(pri_cfg.entry)\n    cfg.forward(node.body[1], cfg.Defined())\n    cfg.forward(node.body[1], cfg.ReachingDefinitions())\n    fixes.CleanStack().visit(node)\n    fixes.FixStack().visit(node.body[0])\n    fixes.CleanGrad().visit(node.body[1])\n    fixes.FixGrad().visit(node.body[1])\n    return (node, defined.exit, reaching.exit)", "docstring": "Fix the naive construction of the adjont.\n\nSee `fixes.py` for details.\n\nThis function also returns the result of reaching definitions analysis so\nthat `split` mode can use this to carry over the state from primal to\nadjoint.\n\nArgs:\nnode: A module with the primal and adjoint function definitions as returned\nby `reverse_ad`.\n\nReturns:\nnode: A module with the primal and adjoint function with additional\nvariable definitions and such added so that pushes onto the stack and\ngradient accumulations are all valid.\ndefined: The variables defined at the end of the primal.\nreaching: The variable definitions that reach the end of the primal.", "source": "codesearchnet"}
{"code": "def dump(self):\n    reading = self.reading\n    if (reading is not None):\n        reading = reading.asdict()\n    return {u'selector': str(self.selector), u'reading': reading}", "docstring": "Serialize the state of this stream walker.\n\nReturns:\ndict: The serialized state.", "source": "codesearchnet"}
{"code": "def can_fetch(self, request: Request, file=None) -> bool:\n        \n        try:\n            return self.can_fetch_pool(request)\n        except NotInPoolError:\n            pass\n\n        yield from self.fetch_robots_txt(request, file=file)\n\n        return self.can_fetch_pool(request)", "docstring": "Return whether the request can fetched.\n\nArgs:\nrequest: Request.\nfile: A file object to where the robots.txt contents are written.\n\nCoroutine.", "source": "juraj-google-style"}
{"code": "def __init__(self, code, error, content, message=None):\n    \n    super(AdWordsReportError, self).__init__(\n        message if message else ('AdWords report download failed with HTTP '\n                                 'status code: %s' % code))\n    self.code = code\n    self.error = error\n    self.content = content", "docstring": "Initializes an AdWordsReportError.\n\nArgs:\ncode: The HTTP status code number that was returned.\nerror: The urllib2.HTTPError (Python 2) or urllib.error.HTTPError\n(Python 3) describing the failure.\ncontent: The HTTP response body as a string.\n[optional]\nmessage: A user-friendly error message string. If one is not provided, a\ndefault message will be used.", "source": "juraj-google-style"}
{"code": "def wait_for_notification(self, notification_class=BaseNotification):\n    if notification_class:\n        if (notification_class is BaseNotification):\n            message = 'No notification was shown.'\n        else:\n            message = '{0} was not shown.'.format(notification_class.__name__)\n        self.wait.until((lambda _: isinstance(self.notification, notification_class)), message=message)\n        return self.notification\n    else:\n        self.wait.until((lambda _: (self.notification is None)), message='Unexpected notification shown.')", "docstring": "Wait for the specified notification to be displayed.\n\nArgs:\nnotification_class (:py:class:`BaseNotification`, optional):\nThe notification class to wait for. If `None` is specified it\nwill wait for any notification to be closed. Defaults to\n`BaseNotification`.\n\nReturns:\n:py:class:`BaseNotification`: Firefox notification.", "source": "codesearchnet"}
{"code": "def rescale_data(data, data_min, data_max, out_min=0.0, out_max=100.0):\n    return ((((out_max - out_min) / (data_max - data_min)) * (data - data_min)) + out_min)", "docstring": "Rescale your input data so that is ranges over integer values, which will perform better in the watershed.\n\nArgs:\ndata: 2D or 3D ndarray being rescaled\ndata_min: minimum value of input data for scaling purposes\ndata_max: maximum value of input data for scaling purposes\nout_min: minimum value of scaled data\nout_max: maximum value of scaled data\n\nReturns:\nLinearly scaled ndarray", "source": "codesearchnet"}
{"code": "def run(run_type, module, config):\n    print(' -----------------------------------------------------------------')\n    print((('   Beginning ' + run_type.lower()) + ' test suite '))\n    print(' -----------------------------------------------------------------')\n    print('')\n    summary = run_quiet(module, config)\n    print(' -----------------------------------------------------------------')\n    print((('   ' + run_type.capitalize()) + ' test suite complete '))\n    print(' -----------------------------------------------------------------')\n    print('')\n    return summary", "docstring": "Collects the analyses cases to be run and launches processes for each of\nthem.\n\nArgs:\nrun_type: A string representation of the run type (eg. verification)\nmodule: The module corresponding to the run.  Must have a run_suite function\nconfig: The configuration for the module", "source": "codesearchnet"}
{"code": "def _feed_to_dict(self, parse=True):\n    if not self.raw_feed:\n        return None\n    result = []\n    header = self.raw_feed[0]\n    for line in self.raw_feed[1:]:\n        if line and ''.join(line):\n            i = 0\n            item = {}\n            for index, column_header in enumerate(header):\n                if index < len(line):\n                    item[column_header] = self._parse_value(line[index].strip()) if parse else line[index].strip()\n                else:\n                    item[column_header] = ''\n            result.append(item)\n        else:\n            break\n    return result", "docstring": "Turns a raw feed from Google Sheets into a list of dictionaries.\n\nArgs:\nraw_feed: List of list of strings representing the feed from Google\nSheets.\n\nReturns:\nList of dictionaries with the data from the feed", "source": "github-repos"}
{"code": "def _add_to_schema(self, field_name, schema):\n        \n        super(ForeignKeyField, self)._add_to_schema(field_name, schema)\n\n        if self.get_field_value('convert_fks', default=True):\n            self.attribute = field_name.replace('_id', '')", "docstring": "Set the ``attribute`` attr to the field in question so this always\ngets deserialzed into the field name without ``_id``.\n\nArgs:\nfield_name (str): The name of the field (the attribute name being\nset in the schema).\nschema (marshmallow.Schema): The actual parent schema this field\nbelongs to.", "source": "juraj-google-style"}
{"code": "def write_entry_to_file(file_descriptor, entry_comment, entry_key):\n    escaped_key = re.sub('([^\\\\\\\\])\"', '\\\\1\\\\\"', entry_key)\n    file_descriptor.write((u'\\n' % entry_comment))\n    file_descriptor.write((u'\"%s\" = \"%s\";\\n' % (escaped_key, escaped_key)))", "docstring": "Writes a localization entry to the file\n\nArgs:\nfile_descriptor (file, instance): The file to write the entry to.\nentry_comment (str): The entry's comment.\nentry_key (str): The entry's key.", "source": "codesearchnet"}
{"code": "def _validate_config(config):\n    required_keys = [KEY_ADDRESS, KEY_MODEL, KEY_PORT, KEY_PATHS]\n    for key in required_keys:\n        if key not in config:\n            raise Error('Required key %s missing from config %s', (key, config))", "docstring": "Verifies that a config dict for an attenuator device is valid.\n\nArgs:\nconfig: A dict that is the configuration for an attenuator device.\n\nRaises:\nattenuator.Error: A config is not valid.", "source": "github-repos"}
{"code": "def checkCoordinatedReadGroups(self, results, num_consumers):\n    groups = [results[start:start + num_consumers] for start in range(0, len(results), num_consumers)]\n    incorrect_groups = []\n    for group in groups:\n        for offset in range(1, len(group)):\n            if group[0] + offset != group[offset]:\n                incorrect_groups.append(group)\n                break\n    self.assertEmpty(incorrect_groups, 'Incorrect groups: {}.\\nAll groups: {}'.format(incorrect_groups, groups))", "docstring": "Validates results from a `make_coordinted_read_dataset` dataset.\n\nEach group of `num_consumers` results should be consecutive, indicating that\nthey were produced by the same worker.\n\nArgs:\nresults: The elements produced by the dataset.\nnum_consumers: The number of consumers.", "source": "github-repos"}
{"code": "def task_ids(self):\n        \n        if not self.id:\n            raise WorkflowError('Workflow is not running.  Cannot get task IDs.')\n\n        if self.batch_values:\n            raise NotImplementedError(\"Query Each Workflow Id within the Batch Workflow for task IDs.\")\n\n        wf = self.workflow.get(self.id)\n\n        return [task['id'] for task in wf['tasks']]", "docstring": "Get the task IDs of a running workflow\n\nArgs:\nNone\n\nReturns:\nList of task IDs", "source": "juraj-google-style"}
{"code": "def module_help(self, module):\n    \n    helplist = []\n    self._render_our_module_key_flags(module, helplist)\n    return '\\n'.join(helplist)", "docstring": "Describes the key flags of a module.\n\nArgs:\nmodule: module|str, the module to describe the key flags for.\n\nReturns:\nstr, describing the key flags of a module.", "source": "juraj-google-style"}
{"code": "def _get_nan_block_id(partition_class, n_row=1, n_col=1, transpose=False):\n    \n    global _NAN_BLOCKS\n    if transpose:\n        n_row, n_col = n_col, n_row\n    shape = (n_row, n_col)\n    if shape not in _NAN_BLOCKS:\n        arr = np.tile(np.array(np.NaN), shape)\n        \n        _NAN_BLOCKS[shape] = partition_class.put(pandas.DataFrame(data=arr))\n    return _NAN_BLOCKS[shape]", "docstring": "A memory efficient way to get a block of NaNs.\n\nArgs:\npartition_class (BaseFramePartition): The class to use to put the object\nin the remote format.\nn_row(int): The number of rows.\nn_col(int): The number of columns.\ntranspose(bool): If true, swap rows and columns.\nReturns:\nObjectID of the NaN block.", "source": "juraj-google-style"}
{"code": "def forward(self, input):\n        \n        sl,bs = input.size()\n        if bs!=self.bs:\n            self.bs=bs\n            self.reset()\n        with set_grad_enabled(self.training):\n            emb = self.encoder_with_dropout(input, dropout=self.dropoute if self.training else 0)\n            emb = self.dropouti(emb)\n            raw_output = emb\n            new_hidden,raw_outputs,outputs = [],[],[]\n            for l, (rnn,drop) in enumerate(zip(self.rnns, self.dropouths)):\n                current_input = raw_output\n                with warnings.catch_warnings():\n                    warnings.simplefilter(\"ignore\")\n                    raw_output, new_h = rnn(raw_output, self.hidden[l])\n                new_hidden.append(new_h)\n                raw_outputs.append(raw_output)\n                if l != self.n_layers - 1: raw_output = drop(raw_output)\n                outputs.append(raw_output)\n\n            self.hidden = repackage_var(new_hidden)\n        return raw_outputs, outputs", "docstring": "Invoked during the forward propagation of the RNN_Encoder module.\nArgs:\ninput (Tensor): input of shape (sentence length x batch_size)\n\nReturns:\nraw_outputs (tuple(list (Tensor), list(Tensor)): list of tensors evaluated from each RNN layer without using\ndropouth, list of tensors evaluated from each RNN layer using dropouth,", "source": "juraj-google-style"}
{"code": "def _get_value(self, scalar_data_blob, dtype_enum):\n    \n    tensorflow_dtype = tf.DType(dtype_enum)\n    buf = np.frombuffer(scalar_data_blob, dtype=tensorflow_dtype.as_numpy_dtype)\n    return np.asscalar(buf)", "docstring": "Obtains value for scalar event given blob and dtype enum.\n\nArgs:\nscalar_data_blob: The blob obtained from the database.\ndtype_enum: The enum representing the dtype.\n\nReturns:\nThe scalar value.", "source": "juraj-google-style"}
{"code": "def cache_json(filename):\n    \n    def cache_decorator(cacheable_function):\n        @wraps(cacheable_function)\n        def cache_wrapper(*args, **kwargs):\n            path = CACHE_DIRECTORY + filename\n            check_create_folder(path)\n            if os.path.exists(path):\n                with open(path) as infile:\n                    return json.load(infile)\n            else:\n                function_output = cacheable_function(*args, **kwargs)\n                with open(path, 'w') as outfile:\n                    json.dump(function_output, outfile)\n                return function_output\n        return cache_wrapper\n    return cache_decorator", "docstring": "Caches the JSON-serializable output of the function to a given file\n\nArgs:\nfilename (str) The filename (sans directory) to store the output\n\nReturns: decorator, applicable to a function that produces JSON-serializable output", "source": "juraj-google-style"}
{"code": "def imread(img_or_path, flag='color'):\n    \n    if isinstance(img_or_path, np.ndarray):\n        return img_or_path\n    elif is_str(img_or_path):\n        flag = imread_flags[flag] if is_str(flag) else flag\n        check_file_exist(img_or_path,\n                         'img file does not exist: {}'.format(img_or_path))\n        return cv2.imread(img_or_path, flag)\n    else:\n        raise TypeError('\"img\" must be a numpy array or a filename')", "docstring": "Read an image.\n\nArgs:\nimg_or_path (ndarray or str): Either a numpy array or image path.\nIf it is a numpy array (loaded image), then it will be returned\nas is.\nflag (str): Flags specifying the color type of a loaded image,\ncandidates are `color`, `grayscale` and `unchanged`.\n\nReturns:\nndarray: Loaded image array.", "source": "juraj-google-style"}
{"code": "def get_unique_directives(ast):\n    \n    if not ast.directives:\n        return dict()\n\n    result = dict()\n    for directive_obj in ast.directives:\n        directive_name = directive_obj.name.value\n        if directive_name in ALLOWED_DUPLICATED_DIRECTIVES:\n            pass  \n        elif directive_name in result:\n            raise GraphQLCompilationError(u'Directive was unexpectedly applied twice in the same '\n                                          u'location: {} {}'.format(directive_name, ast.directives))\n        else:\n            result[directive_name] = directive_obj\n\n    return result", "docstring": "Return a dict of directive name to directive object for the given AST node.\n\nAny directives that are allowed to exist more than once on any AST node are ignored.\nFor any directives that can only exist up to once, we verify that they are not duplicated\nraising GraphQLCompilationError in case we find them more than once on the AST node.\n\nArgs:\nast: GraphQL AST node, obtained from the graphql library\n\nReturns:\ndict of string to directive object", "source": "juraj-google-style"}
{"code": "def do_put(endpoint, body, access_token):\n    \n    headers = {\"content-type\": \"application/json\", \"Authorization\": 'Bearer ' + access_token}\n    headers['User-Agent'] = get_user_agent()\n    return requests.put(endpoint, data=body, headers=headers)", "docstring": "Do an HTTP PUT request and return JSON.\n\nArgs:\nendpoint (str): Azure Resource Manager management endpoint.\nbody (str): JSON body of information to put.\naccess_token (str): A valid Azure authentication token.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def get_quats(self) -> torch.Tensor:\n    if self._rot_mats is not None:\n        return rot_to_quat(self._rot_mats)\n    elif self._quats is not None:\n        return self._quats\n    else:\n        raise ValueError('Both rotations are None')", "docstring": "Returns the underlying rotation as a quaternion tensor.\n\nDepending on whether the Rotation was initialized with a quaternion, this function may call torch.linalg.eigh.\n\nReturns:\nThe rotation as a quaternion tensor.", "source": "github-repos"}
{"code": "def __call__(self, context):\n    \n    context._counters.increment(self.counter_name, self.delta)", "docstring": "Execute operation.\n\nArgs:\ncontext: mapreduce context as context.Context.", "source": "juraj-google-style"}
{"code": "def instance_default(self, obj):\n    return self.property.themed_default(obj.__class__, self.name, obj.themed_values())", "docstring": "Get the default value that will be used for a specific instance.\n\nArgs:\nobj (HasProps) : The instance to get the default value for.\n\nReturns:\nobject", "source": "codesearchnet"}
{"code": "def create(self, subject, displayName, issuerToken, expiration, secret):\n    check_type(subject, basestring)\n    check_type(displayName, basestring)\n    check_type(issuerToken, basestring)\n    check_type(expiration, basestring)\n    check_type(secret, basestring)\n    payload = {'sub': subject, 'name': displayName, 'iss': issuerToken, 'exp': expiration}\n    key = base64.b64decode(secret)\n    jwt_token = jwt.encode(payload, key, algorithm='HS256')\n    url = (((self._session.base_url + API_ENDPOINT) + '/') + 'login')\n    headers = {'Authorization': ('Bearer ' + jwt_token.decode('utf-8'))}\n    response = requests.post(url, headers=headers)\n    check_response_code(response, EXPECTED_RESPONSE_CODE['GET'])\n    return self._object_factory(OBJECT_TYPE, response.json())", "docstring": "Create a new guest issuer using the provided issuer token.\n\nThis function returns a guest issuer with an api access token.\n\nArgs:\nsubject(basestring): Unique and public identifier\ndisplayName(basestring): Display Name of the guest user\nissuerToken(basestring): Issuer token from developer hub\nexpiration(basestring): Expiration time as a unix timestamp\nsecret(basestring): The secret used to sign your guest issuers\n\nReturns:\nGuestIssuerToken: A Guest Issuer with a valid access token.\n\nRaises:\nTypeError: If the parameter types are incorrect\nApiError: If the webex teams cloud returns an error.", "source": "codesearchnet"}
{"code": "def register_read_multiple(self, register_indices):\n        \n        num_regs = len(register_indices)\n        buf = (ctypes.c_uint32 * num_regs)(*register_indices)\n        data = (ctypes.c_uint32 * num_regs)(0)\n\n        \n        \n        statuses = (ctypes.c_uint8 * num_regs)(0)\n\n        res = self._dll.JLINKARM_ReadRegs(buf, data, statuses, num_regs)\n        if res < 0:\n            raise errors.JLinkException(res)\n\n        return list(data)", "docstring": "Retrieves the values from the registers specified.\n\nArgs:\nself (JLink): the ``JLink`` instance\nregister_indices (list): list of registers to read\n\nReturns:\nA list of values corresponding one-to-one for each of the given\nregister indices.  The returned list of values are the values in\norder of which the indices were specified.\n\nRaises:\nJLinkException: if a given register is invalid or an error occurs.", "source": "juraj-google-style"}
{"code": "def _VerifyGradient(self, pool_func, pool_grad_func, input_sizes, ksize, strides, padding, pool_grad_grad_func=None):\n    ksize = [1] + ksize + [1]\n    strides = [1] + strides + [1]\n    total_size = np.prod(input_sizes)\n    x = np.arange(1, total_size + 1, dtype=np.float32).reshape(input_sizes)\n    with self.session() as sess:\n        with ops.device('CPU'):\n            inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes)\n            outputs = pool_func(inputs, ksize=ksize, strides=strides, padding=padding)\n        output_vals = np.array(sess.run(outputs, {inputs: x}))\n        output_gradient_vals = np.arange(1, output_vals.size + 1, dtype=np.float32)\n        output_gradient_vals = output_gradient_vals.reshape(output_vals.shape)\n        output_grad_grad_vals = np.arange(1, x.size + 1, dtype=np.float32)\n        output_grad_grad_vals = output_grad_grad_vals.reshape(x.shape)\n        with ops.device('CPU'):\n            output_gradients = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)\n            expected_input_gradients = pool_grad_func(inputs, outputs, output_gradients, ksize=ksize, strides=strides, padding=padding)\n            expected_input_gradient_vals = sess.run(expected_input_gradients, {inputs: x, output_gradients: output_gradient_vals})\n            output_grad_gradients = array_ops.placeholder(dtypes.float32, shape=expected_input_gradient_vals.shape)\n            if pool_grad_grad_func is not None:\n                expected_grad_gradients = pool_grad_grad_func(inputs, outputs, output_grad_gradients, ksize=ksize, strides=strides, padding=padding, data_format='NDHWC')\n                expected_grad_gradients_vals = sess.run(expected_grad_gradients, {inputs: x, output_grad_gradients: output_grad_grad_vals})\n        with self.test_scope():\n            outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)\n            actual_input_gradients = pool_grad_func(inputs, outputs, output_gradients, ksize=ksize, strides=strides, padding=padding)\n            if pool_grad_grad_func is not None:\n                actual_grad_gradients = pool_grad_grad_func(inputs, outputs, output_grad_gradients, ksize=ksize, strides=strides, padding=padding, data_format='NDHWC')\n        actual = sess.run(actual_input_gradients, {inputs: x, outputs: output_vals, output_gradients: output_gradient_vals})\n        self.assertAllClose(expected_input_gradient_vals.flatten(), actual.flatten(), rtol=1e-05, atol=1e-06)\n        self.assertShapeEqual(actual, inputs)\n        if pool_grad_grad_func is not None:\n            actual_grad_gradients_vals = sess.run(actual_grad_gradients, {inputs: x, outputs: output_vals, output_grad_gradients: output_grad_grad_vals})\n            self.assertAllClose(expected_grad_gradients_vals, actual_grad_gradients_vals, rtol=0.0001, atol=1e-06)\n            self.assertShapeEqual(actual_grad_gradients_vals, outputs)", "docstring": "Verifies the output values of the pooling gradient function.\n\nArgs:\npool_func: Forward pooling function\npool_grad_func: Pooling gradient function for pool_grad_func\ninput_sizes: Input tensor dimensions.\nksize: The kernel size dimensions\nstrides: The stride dimensions\npadding: Padding type.\npool_grad_grad_func: Second-order gradient function, if available.", "source": "github-repos"}
{"code": "def render(self, time: float, frame_time: float):\n        \n        self.example.render(time, frame_time)", "docstring": "Renders the assigned example\n\nArgs:\ntime (float): Current time in seconds\nframe_time (float): Delta time from last frame in seconds", "source": "juraj-google-style"}
{"code": "def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n        \n        axis = self._get_axis_number(axis) if axis is not None else 0\n        if numeric_only is not None and not numeric_only:\n            self._validate_dtypes(numeric_only=True)\n        return self._reduce_dimension(\n            self._query_compiler.median(\n                axis=axis,\n                skipna=skipna,\n                level=level,\n                numeric_only=numeric_only,\n                **kwargs\n            )\n        )", "docstring": "Computes median across the DataFrame.\n\nArgs:\naxis (int): The axis to take the median on.\nskipna (bool): True to skip NA values, false otherwise.\n\nReturns:\nThe median of the DataFrame. (Pandas series)", "source": "juraj-google-style"}
{"code": "def parse_row(schema, data):\n    \n    def parse_value(data_type, value):\n      \n      if value is not None:\n        if value == 'null':\n          value = None\n        elif data_type == 'INTEGER':\n          value = int(value)\n        elif data_type == 'FLOAT':\n          value = float(value)\n        elif data_type == 'TIMESTAMP':\n          value = datetime.datetime.utcfromtimestamp(float(value))\n        elif data_type == 'BOOLEAN':\n          value = value == 'true'\n        elif (type(value) != str):\n          \n          value = str(value)\n      return value\n\n    row = {}\n    if data is None:\n      return row\n\n    for i, (field, schema_field) in enumerate(zip(data['f'], schema)):\n      val = field['v']\n      name = schema_field['name']\n      data_type = schema_field['type']\n      repeated = True if 'mode' in schema_field and schema_field['mode'] == 'REPEATED' else False\n\n      if repeated and val is None:\n        row[name] = []\n      elif data_type == 'RECORD':\n        sub_schema = schema_field['fields']\n        if repeated:\n          row[name] = [Parser.parse_row(sub_schema, v['v']) for v in val]\n        else:\n          row[name] = Parser.parse_row(sub_schema, val)\n      elif repeated:\n        row[name] = [parse_value(data_type, v['v']) for v in val]\n      else:\n        row[name] = parse_value(data_type, val)\n\n    return row", "docstring": "Parses a row from query results into an equivalent object.\n\nArgs:\nschema: the array of fields defining the schema of the data.\ndata: the JSON row from a query result.\nReturns:\nThe parsed row object.", "source": "juraj-google-style"}
{"code": "def PrintExtractionStatusHeader(self, processing_status):\n    \n    self._output_writer.Write(\n        'Source path\\t\\t: {0:s}\\n'.format(self._source_path))\n    self._output_writer.Write(\n        'Source type\\t\\t: {0:s}\\n'.format(self._source_type))\n\n    if self._artifact_filters:\n      artifacts_string = ', '.join(self._artifact_filters)\n      self._output_writer.Write('Artifact filters\\t: {0:s}\\n'.format(\n          artifacts_string))\n    if self._filter_file:\n      self._output_writer.Write('Filter file\\t\\t: {0:s}\\n'.format(\n          self._filter_file))\n\n    self._PrintProcessingTime(processing_status)\n    self._PrintTasksStatus(processing_status)\n    self._output_writer.Write('\\n')", "docstring": "Prints the extraction status header.\n\nArgs:\nprocessing_status (ProcessingStatus): processing status.", "source": "juraj-google-style"}
{"code": "def populate_native_libraries(version):\n    \n    with open(BINARY_EXT_TEMPLATE, \"r\") as file_obj:\n        template = file_obj.read()\n    contents = template.format(revision=version)\n    with open(BINARY_EXT_FILE, \"w\") as file_obj:\n        file_obj.write(contents)", "docstring": "Populates ``binary-extension.rst`` with release-specific data.\n\nArgs:\nversion (str): The current version.", "source": "juraj-google-style"}
{"code": "def _wait_for_and_process_task(self, task):\n    function_descriptor = FunctionDescriptor.from_bytes_list(task.function_descriptor_list())\n    driver_id = task.driver_id()\n    if (not task.actor_creation_id().is_nil()):\n        assert self.actor_id.is_nil()\n        self.actor_id = task.actor_creation_id()\n        self.actor_creation_task_id = task.task_id()\n        actor_class = self.function_actor_manager.load_actor_class(driver_id, function_descriptor)\n        self.actors[self.actor_id] = actor_class.__new__(actor_class)\n        self.actor_checkpoint_info[self.actor_id] = ActorCheckpointInfo(num_tasks_since_last_checkpoint=0, last_checkpoint_timestamp=int((1000 * time.time())), checkpoint_ids=[])\n    execution_info = self.function_actor_manager.get_execution_info(driver_id, function_descriptor)\n    function_name = execution_info.function_name\n    extra_data = {'name': function_name, 'task_id': task.task_id().hex()}\n    if task.actor_id().is_nil():\n        if task.actor_creation_id().is_nil():\n            title = 'ray_worker:{}()'.format(function_name)\n            next_title = 'ray_worker'\n        else:\n            actor = self.actors[task.actor_creation_id()]\n            title = 'ray_{}:{}()'.format(actor.__class__.__name__, function_name)\n            next_title = 'ray_{}'.format(actor.__class__.__name__)\n    else:\n        actor = self.actors[task.actor_id()]\n        title = 'ray_{}:{}()'.format(actor.__class__.__name__, function_name)\n        next_title = 'ray_{}'.format(actor.__class__.__name__)\n    with profiling.profile('task', extra_data=extra_data):\n        with _changeproctitle(title, next_title):\n            self._process_task(task, execution_info)\n        self.task_context.current_task_id = TaskID.nil()\n        self.task_context.task_index = 0\n        self.task_context.put_index = 1\n        if self.actor_id.is_nil():\n            self.task_driver_id = DriverID.nil()\n            ray_signal.reset()\n    self.function_actor_manager.increase_task_counter(driver_id, function_descriptor)\n    reached_max_executions = (self.function_actor_manager.get_task_counter(driver_id, function_descriptor) == execution_info.max_calls)\n    if reached_max_executions:\n        self.raylet_client.disconnect()\n        sys.exit(0)", "docstring": "Wait for a task to be ready and process the task.\n\nArgs:\ntask: The task to execute.", "source": "codesearchnet"}
{"code": "def split_arg_into_blocks(block_dims, block_dims_fn, arg, axis=-1):\n    block_sizes = [dim.value for dim in block_dims]\n    if any((d is None for d in block_sizes)):\n        block_sizes = block_dims_fn()\n    return array_ops.split(arg, block_sizes, axis=axis)", "docstring": "Split `x` into blocks matching `operators`'s `domain_dimension`.\n\nSpecifically, if we have a blockwise lower-triangular matrix, with block\nsizes along the diagonal `[M_j, M_j] j = 0,1,2..J`,  this method splits `arg`\non `axis` into `J` tensors, whose shape at `axis` is `M_j`.\n\nArgs:\nblock_dims: Iterable of `TensorShapes`.\nblock_dims_fn: Callable returning an iterable of `Tensor`s.\narg: `Tensor`. `arg` is split into `J` tensors.\naxis: Python `Integer` representing the axis to split `arg` on.\n\nReturns:\nA list of `Tensor`s.", "source": "github-repos"}
{"code": "def __get__(self, instance, owner):\n        \n        if not instance and owner:  \n            return self\n\n        value = self._cache.get(instance) if self._cache.get(instance) is not None else self.default\n\n        if hasattr(instance, 'prepare_' + self.alias):\n            return getattr(instance, 'prepare_' + self.alias)(value)\n\n        return value", "docstring": "Python descriptor protocol `__get__` magic method.\n\nArgs:\ninstance(object): The instance with descriptor attribute.\nowner(object): Instance class.\n\nReturns:\nThe cached value for the class instance or None.", "source": "juraj-google-style"}
{"code": "def annotate(self, records, **kwargs):\n    self.annotator_params.update(**kwargs)\n    chunk_size = self.annotator_params.get('chunk_size', self.CHUNK_SIZE)\n    chunk = []\n    for (i, record) in enumerate(records):\n        chunk.append(record)\n        if (((i + 1) % chunk_size) == 0):\n            for r in self._execute(chunk):\n                (yield r)\n            chunk = []\n    if chunk:\n        for r in self._execute(chunk):\n            (yield r)\n        chunk = []", "docstring": "Annotate a set of records with stored fields.\n\nArgs:\nrecords: A list or iterator (can be a Query object)\nchunk_size: The number of records to annotate at once (max 500).\n\nReturns:\nA generator that yields one annotated record at a time.", "source": "codesearchnet"}
{"code": "def get_jwt_dict(jwt_bu64):\n    \n    jwt_tup = get_jwt_tup(jwt_bu64)\n    try:\n        jwt_dict = json.loads(jwt_tup[0].decode('utf-8'))\n        jwt_dict.update(json.loads(jwt_tup[1].decode('utf-8')))\n        jwt_dict['_sig_sha1'] = hashlib.sha1(jwt_tup[2]).hexdigest()\n    except TypeError as e:\n        raise JwtException('Decode failed. error=\"{}\"'.format(e))\n    return jwt_dict", "docstring": "Parse Base64 encoded JWT and return as a dict.\n\n- JWTs contain a set of values serialized to a JSON dict. This decodes the JWT and\nreturns it as a dict containing Unicode strings.\n- In addition, a SHA1 hash is added to the dict for convenience.\n\nArgs:\njwt_bu64: bytes\nJWT, encoded using a a URL safe flavor of Base64.\n\nReturns:\ndict: Values embedded in and derived from the JWT.", "source": "juraj-google-style"}
{"code": "def _release(self, lease):\n    if lease.exist:\n        os.unlink(lease.path)\n        LOGGER.debug('Removed subnet lease {}'.format(lease.path))", "docstring": "Free the given lease\n\nArgs:\nlease (lago.subnet_lease.Lease): The lease to free", "source": "codesearchnet"}
{"code": "def _MergeSameId(self):\n    a_not_merged = []\n    b_not_merged = []\n    for a in self._GetIter(self.feed_merger.a_schedule):\n        try:\n            b = self._GetById(self.feed_merger.b_schedule, self._GetId(a))\n        except KeyError:\n            a_not_merged.append(a)\n            continue\n        try:\n            self._Add(a, b, self._MergeEntities(a, b))\n            self._num_merged += 1\n        except MergeError as merge_error:\n            a_not_merged.append(a)\n            b_not_merged.append(b)\n            self._ReportSameIdButNotMerged(self._GetId(a), merge_error)\n    for b in self._GetIter(self.feed_merger.b_schedule):\n        try:\n            a = self._GetById(self.feed_merger.a_schedule, self._GetId(b))\n        except KeyError:\n            b_not_merged.append(b)\n    for a in a_not_merged:\n        newid = self._HasId(self.feed_merger.b_schedule, self._GetId(a))\n        self._Add(a, None, self._Migrate(a, self.feed_merger.a_schedule, newid))\n    for b in b_not_merged:\n        newid = self._HasId(self.feed_merger.a_schedule, self._GetId(b))\n        self._Add(None, b, self._Migrate(b, self.feed_merger.b_schedule, newid))\n    self._num_not_merged_a = len(a_not_merged)\n    self._num_not_merged_b = len(b_not_merged)\n    return self._num_merged", "docstring": "Tries to merge entities based on their ids.\n\nThis tries to merge only the entities from the old and new schedules which\nhave the same id. These are added into the merged schedule. Entities which\ndo not merge or do not have the same id as another entity in the other\nschedule are simply migrated into the merged schedule.\n\nThis method is less flexible than _MergeDifferentId since it only tries\nto merge entities which have the same id while _MergeDifferentId tries to\nmerge everything. However, it is faster and so should be used whenever\npossible.\n\nThis method makes use of various methods like _Merge and _Migrate which\nare not implemented in the abstract DataSetMerger class. These method\nshould be overwritten in a subclass to allow _MergeSameId to work with\ndifferent entity types.\n\nReturns:\nThe number of merged entities.", "source": "codesearchnet"}
{"code": "def op(scalars_layout, collections=None):\n  \n  \n  import tensorflow.compat.v1 as tf\n\n  assert isinstance(scalars_layout, layout_pb2.Layout)\n  summary_metadata = metadata.create_summary_metadata()\n  return tf.summary.tensor_summary(name=metadata.CONFIG_SUMMARY_TAG,\n                                   tensor=tf.constant(\n                                       scalars_layout.SerializeToString(),\n                                       dtype=tf.string),\n                                   collections=collections,\n                                   summary_metadata=summary_metadata)", "docstring": "Creates a summary that contains a layout.\n\nWhen users navigate to the custom scalars dashboard, they will see a layout\nbased on the proto provided to this function.\n\nArgs:\nscalars_layout: The scalars_layout_pb2.Layout proto that specifies the\nlayout.\ncollections: Optional list of graph collections keys. The new\nsummary op is added to these collections. Defaults to\n`[Graph Keys.SUMMARIES]`.\n\nReturns:\nA tensor summary op that writes the layout to disk.", "source": "juraj-google-style"}
{"code": "def RegisterDecompressor(cls, decompressor):\n    compression_method = decompressor.COMPRESSION_METHOD.lower()\n    if (compression_method in cls._decompressors):\n        raise KeyError('Decompressor for compression method: {0:s} already set.'.format(decompressor.COMPRESSION_METHOD))\n    cls._decompressors[compression_method] = decompressor", "docstring": "Registers a decompressor for a specific compression method.\n\nArgs:\ndecompressor (type): decompressor class.\n\nRaises:\nKeyError: if the corresponding decompressor is already set.", "source": "codesearchnet"}
{"code": "def add(reader, writer, column, start, stop, value):\n  \n  for i, row in enumerate(reader):\n    if i >= start and i <= stop:\n      row[column] = type(value)(row[column]) + value\n    writer.appendRecord(row)", "docstring": "Adds a value over a range of rows.\n\nArgs:\nreader: A FileRecordStream object with input data.\nwriter: A FileRecordStream object to write output data to.\ncolumn: The column of data to modify.\nstart: The first row in the range to modify.\nend: The last row in the range to modify.\nvalue: The value to add.", "source": "juraj-google-style"}
{"code": "def setValues(self, values):\n    if isinstance(values, (list, set)):\n        if any((isinstance(value, basestring) for value in values)):\n            values = list(map(str, values))\n            self._impl.setValuesStr(values, len(values))\n        elif all((isinstance(value, Real) for value in values)):\n            values = list(map(float, values))\n            self._impl.setValuesDbl(values, len(values))\n        elif all((isinstance(value, tuple) for value in values)):\n            self._impl.setValues(Utils.toTupleArray(values), len(values))\n        else:\n            raise TypeError\n    else:\n        if ((np is not None) and isinstance(values, np.ndarray)):\n            self.setValues(DataFrame.fromNumpy(values).toList())\n            return\n        Entity.setValues(self, values)", "docstring": "Set the tuples in this set. Valid only for non-indexed sets.\n\nArgs:\nvalues: A list of tuples or a :class:`~amplpy.DataFrame`.\n\nIn the case of a :class:`~amplpy.DataFrame`, the number of indexing\ncolumns of the must be equal to the arity of the set. In the case of\na list of tuples, the arity of each tuple must be equal to the arity\nof the set.\n\nFor example, considering the following AMPL entities and corresponding\nPython objects:\n\n.. code-block:: ampl\n\nset A := 1..2;\nparam p{i in A} := i+10;\nset AA;\n\nThe following is valid:\n\n.. code-block:: python\n\nA, AA = ampl.getSet('A'), ampl.getSet('AA')\nAA.setValues(A.getValues())  # AA has now the members {1, 2}", "source": "codesearchnet"}
{"code": "def _rename_if_arg_found_and_add_loss_reduction_transformer(parent, node, full_name, name, logs, arg_names=None, arg_ok_predicate=None, remove_if_ok=False, message=None):\n    for arg_name in arg_names:\n        rename_node = _rename_if_arg_found_transformer(parent, node, full_name, name, logs, arg_name, arg_ok_predicate, remove_if_ok, message)\n        node = rename_node if rename_node else node\n    return node", "docstring": "Combination of _rename_if_arg_found and _add_loss_reduction transformers.\n\nArgs:\nparent: Parent of node.\nnode: ast.Call node to maybe modify.\nfull_name: full name of function to modify\nname: name of function to modify\nlogs: list of logs to append to\narg_names: list of names of the argument to look for\narg_ok_predicate: predicate callable with the ast of the argument value,\nreturns whether the argument value is allowed.\nremove_if_ok: remove the argument if present and ok as determined by\narg_ok_predicate.\nmessage: message to print if a non-ok arg is found (and hence, the function\nis renamed to its compat.v1 version).\n\nReturns:\nnode, if it was modified, else None.", "source": "github-repos"}
{"code": "def locked_put(self, credentials):\n    (entity, _) = self.model_class.objects.get_or_create(**{self.key_name: self.key_value})\n    setattr(entity, self.property_name, credentials)\n    entity.save()", "docstring": "Write a Credentials to the Django datastore.\n\nArgs:\ncredentials: Credentials, the credentials to store.", "source": "codesearchnet"}
{"code": "def tool(self):\n    htablettool = self._libinput.libinput_event_tablet_tool_get_tool(self._handle)\n    return TabletTool(htablettool, self._libinput)", "docstring": "The tool that was in use during this event.\n\nIf the caller keeps a reference to a tool, the tool object will\ncompare equal to the previously obtained tool object.\n\nNote:\nPhysical tool tracking requires hardware support. If unavailable,\nlibinput creates one tool per type per tablet. See\n`Tracking unique tools`_ for more details.\nReturns:\n~libinput.define.TabletTool: The new tool triggering this event.", "source": "codesearchnet"}
{"code": "def __init__(self, parent=None, range_offset=None, range_size=None, **kwargs):\n    \n    if not range_offset or not range_size or not parent:\n      raise ValueError('Missing range offset, range size or parent value.')\n\n    super(DataRangePathSpec, self).__init__(parent=parent, **kwargs)\n    self.range_offset = range_offset\n    self.range_size = range_size", "docstring": "Initializes a path specification.\n\nNote that the data range path specification must have a parent.\n\nArgs:\nparent (Optional[PathSpec]): parent path specification.\nrange_offset (Optional[int]): start offset of the data range.\nrange_size (Optional[int]): size of the data range.\n\nRaises:\nValueError: when range offset, range offset or parent are not set.", "source": "juraj-google-style"}
{"code": "def get_chunk_size(path):\n    filesystem = FileSystems.get_filesystem(path)\n    return filesystem.CHUNK_SIZE", "docstring": "Get the correct chunk size for the FileSystem.\n\nArgs:\npath: string path that needs to be checked.\n\nReturns: integer size for parallelization in the FS operations.", "source": "github-repos"}
{"code": "def CreateBudget(client):\n    budget_service = client.GetService('BudgetService', version='v201809')\n    budget = {'name': ('Interplanetary Cruise App Budget \n    budget_operations = [{'operator': 'ADD', 'operand': budget}]\n    budget_id = budget_service.mutate(budget_operations)['value'][0]['budgetId']\n    return budget_id", "docstring": "Creates a budget and returns its budgetId.\n\nArgs:\nclient: An AdWordsClient instance.\n\nReturns:\nAn int budgetId for the created Budget.", "source": "codesearchnet"}
{"code": "def parse_document(text, options=0):\n    \n    encoded_text = text.encode('utf-8')\n    return _cmark.lib.cmark_parse_document(\n        encoded_text, len(encoded_text), options)", "docstring": "Parse a document and return the root node.\n\nArgs:\ntext (str): The text to parse.\noptions (int): The cmark options.\n\nReturns:\nAny: Opaque reference to the root node of the parsed syntax tree.", "source": "juraj-google-style"}
{"code": "def _extractBoldNumbers(self, out, start_line):\n    floats = []\n    for i in range(start_line, len(out.lines)):\n        if i not in out.font_attr_segs:\n            continue\n        line_attrs = out.font_attr_segs[i]\n        for begin, end, attr_value in line_attrs:\n            if attr_value == 'bold':\n                floats.append(float(out.lines[i][begin:end]))\n    return floats", "docstring": "Extract all numbers that have the bold font attribute.\n\nArgs:\nout: An instance of RichTextLines.\nstart_line: 0-based index to start from.\n\nReturns:\nA list of floats.", "source": "github-repos"}
{"code": "def __init__(self, conf_path, project_key=None, run_asyncore_thread=True):\n        \n        self.conf_path = conf_path\n\n        super(ZEOConfWrapper, self).__init__(\n            project_key=project_key,\n            run_asyncore_thread=run_asyncore_thread,\n        )", "docstring": "Initialize the object.\n\nArgs:\nconf_path (str): See :attr:`conf_path`.\nproject_key (str, default None): See :attr:`project_key`. If not\nset, the root of the database is used (this may cause\nperformace issues).\nrun_asyncore_thread (bool, default True): Run external asyncore\nthread, which handles connections to database? Default True.", "source": "juraj-google-style"}
{"code": "def initialise_site_lookup_table( self ):\n        \n        self.site_lookup = {}\n        for site in self.sites:\n            self.site_lookup[ site.number ] = site", "docstring": "Create a lookup table allowing sites in this lattice to be queried using `self.site_lookup[n]` where `n` is the identifying site numbe.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def start_raylet(self, use_valgrind=False, use_profiler=False):\n    (stdout_file, stderr_file) = self.new_log_files('raylet')\n    process_info = ray.services.start_raylet(self._redis_address, self._node_ip_address, self._raylet_socket_name, self._plasma_store_socket_name, self._ray_params.worker_path, self._temp_dir, self._ray_params.num_cpus, self._ray_params.num_gpus, self._ray_params.resources, self._ray_params.object_manager_port, self._ray_params.node_manager_port, self._ray_params.redis_password, use_valgrind=use_valgrind, use_profiler=use_profiler, stdout_file=stdout_file, stderr_file=stderr_file, config=self._config, include_java=self._ray_params.include_java, java_worker_options=self._ray_params.java_worker_options, load_code_from_local=self._ray_params.load_code_from_local)\n    assert (ray_constants.PROCESS_TYPE_RAYLET not in self.all_processes)\n    self.all_processes[ray_constants.PROCESS_TYPE_RAYLET] = [process_info]", "docstring": "Start the raylet.\n\nArgs:\nuse_valgrind (bool): True if we should start the process in\nvalgrind.\nuse_profiler (bool): True if we should start the process in the\nvalgrind profiler.", "source": "codesearchnet"}
{"code": "def check_onnxruntime_requirements(minimum_version: Version):\n    try:\n        import onnxruntime\n        ort_version = parse(onnxruntime.__version__)\n        if ort_version < ORT_QUANTIZE_MINIMUM_VERSION:\n            raise ImportError(f'We found an older version of onnxruntime ({onnxruntime.__version__}) but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\\nPlease update onnxruntime by running `pip install --upgrade onnxruntime`')\n    except ImportError:\n        raise ImportError(\"onnxruntime doesn't seem to be currently installed. Please install the onnxruntime by running `pip install onnxruntime` and relaunch the conversion.\")", "docstring": "Check onnxruntime is installed and if the installed version match is recent enough\n\nRaises:\nImportError: If onnxruntime is not installed or too old version is found", "source": "github-repos"}
{"code": "def dispatch_non_api_requests(self, request, start_response):\n    for (path_regex, dispatch_function) in self._dispatchers:\n        if path_regex.match(request.relative_url):\n            return dispatch_function(request, start_response)\n    if (request.http_method == 'OPTIONS'):\n        cors_handler = self._create_cors_handler(request)\n        if cors_handler.allow_cors_request:\n            return util.send_wsgi_response('200', [], '', start_response, cors_handler)\n    return None", "docstring": "Dispatch this request if this is a request to a reserved URL.\n\nIf the request matches one of our reserved URLs, this calls\nstart_response and returns the response body.  This also handles OPTIONS\nCORS requests.\n\nArgs:\nrequest: An ApiRequest, the request from the user.\nstart_response: A function with semantics defined in PEP-333.\n\nReturns:\nNone if the request doesn't match one of the reserved URLs this\nhandles.  Otherwise, returns the response body.", "source": "codesearchnet"}
{"code": "def join(self, basepath: str, *paths: str) -> str:\n    raise NotImplementedError", "docstring": "Join two or more pathname components for the filesystem\n\nArgs:\nbasepath: string path of the first component of the path\npaths: path components to be added\n\nReturns: full path after combining all the passed components", "source": "github-repos"}
{"code": "def DecryptMessage(self, encrypted_response):\n    \n    try:\n      response_comms = rdf_flows.ClientCommunication.FromSerializedString(\n          encrypted_response)\n      return self.DecodeMessages(response_comms)\n    except (rdfvalue.DecodeError, type_info.TypeValueError, ValueError,\n            AttributeError) as e:\n      raise DecodingError(\"Error while decrypting messages: %s\" % e)", "docstring": "Decrypt the serialized, encrypted string.\n\nArgs:\nencrypted_response: A serialized and encrypted string.\n\nReturns:\na Packed_Message_List rdfvalue", "source": "juraj-google-style"}
{"code": "class _Call(beam.PTransform[beam.PCollection[RequestT], beam.PCollection[ResponseT]]):\n\n    def __init__(self, caller: Caller[RequestT, ResponseT], timeout: Optional[float]=DEFAULT_TIMEOUT_SECS, should_backoff: Optional[ShouldBackOff]=None, repeater: Repeater=None, throttler: PreCallThrottler=None):\n        self._caller = caller\n        self._timeout = timeout\n        self._should_backoff = should_backoff\n        self._repeater = repeater\n        self._throttler = throttler\n\n    def expand(self, requests: beam.PCollection[RequestT]) -> beam.PCollection[ResponseT]:\n        return requests | beam.ParDo(_CallDoFn(self._caller, self._timeout, self._repeater, self._throttler))", "docstring": "(Internal-only) PTransform that invokes a remote function on each element\nof the input PCollection.\n\nThis PTransform uses a `Caller` object to invoke the actual API calls,\nand uses ``__enter__`` and ``__exit__`` to manage setup and teardown of\nclients when applicable. Additionally, a timeout value is specified to\nregulate the duration of each call, defaults to 30 seconds.\n\nArgs:\ncaller: a `Caller` object that invokes API call.\ntimeout (float): timeout value in seconds to wait for response from API.\nshould_backoff: (Optional) provides methods for backoff.\nrepeater: (Optional) provides methods to repeat requests to API.\nthrottler: (Optional) provides methods to pre-throttle a request.", "source": "github-repos"}
{"code": "def convert_timestamps_to_datetimes(ts: Iterable[Timestamp]) -> List[datetime.datetime]:\n    return [convert_timestamp_to_datetime(t) for t in ts]", "docstring": "Converts unix timestamps in seconds to a list of datetimes (UTC).\n\nExample:\n```python\n> convert_timestamps_to_datetimes([0, 1689791856])\n[datetime.datetime(1970, 1, 1, 0, 0, tzinfo=datetime.timezone.utc),\ndatetime.datetime(2023, 7, 19, 18, 37, 36, tzinfo=datetime.timezone.utc)]\n\nArgs:\nts: Iterable of timestamps, in seconds.\n\nReturns:\nList of UTC datetimes.", "source": "github-repos"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    for subkey in registry_key.GetSubkeys():\n      name = subkey.name\n      if not name:\n        continue\n\n      values_dict = {}\n      values_dict['Volume'] = name\n\n      label_value = subkey.GetValueByName('_LabelFromReg')\n      if label_value:\n        values_dict['Label'] = label_value.GetDataAsObject()\n\n      if name.startswith('{'):\n        values_dict['Type'] = 'Volume'\n\n      elif name.startswith('\n        \n        values_dict['Type'] = 'Remote Drive'\n        server_name, _, share_name = name[2:].partition('\n        values_dict['Remote_Server'] = server_name\n        values_dict['Share_Name'] = '\\\\{0:s}'.format(\n            share_name.replace('\n\n      else:\n        values_dict['Type'] = 'Drive'\n\n      event_data = windows_events.WindowsRegistryEventData()\n      event_data.key_path = registry_key.path\n      event_data.offset = subkey.offset\n      event_data.regvalue = values_dict\n      event_data.urls = self.URLS\n\n      event = time_events.DateTimeValuesEvent(\n          subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "class FlaxTopKLogitsWarper(FlaxLogitsWarper):\n\n    def __init__(self, top_k: int, filter_value: float=-float('Inf'), min_tokens_to_keep: int=1):\n        if not isinstance(top_k, int) or top_k <= 0:\n            raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}')\n        self.top_k = max(top_k, min_tokens_to_keep)\n        self.filter_value = filter_value\n\n    def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:\n        batch_size, vocab_size = scores.shape\n        next_scores_flat = jnp.full(batch_size * vocab_size, self.filter_value)\n        topk = min(self.top_k, scores.shape[-1])\n        topk_scores, topk_indices = lax.top_k(scores, topk)\n        shift = jnp.broadcast_to((jnp.arange(batch_size) * vocab_size)[:, None], (batch_size, topk)).flatten()\n        topk_scores_flat = topk_scores.flatten()\n        topk_indices_flat = topk_indices.flatten() + shift\n        next_scores_flat = next_scores_flat.at[topk_indices_flat].set(topk_scores_flat)\n        next_scores = next_scores_flat.reshape(batch_size, vocab_size)\n        return next_scores", "docstring": "[`FlaxLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements.\n\nArgs:\ntop_k (`int`):\nThe number of highest probability vocabulary tokens to keep for top-k-filtering.\nfilter_value (`float`, *optional*, defaults to -inf):\nAll filtered values will be set to this float value.\nmin_tokens_to_keep (`int`, *optional*, defaults to 1):\nMinimum number of tokens that cannot be filtered.", "source": "github-repos"}
{"code": "def _create_controller_info_record(self, controller_module_name):\n        \n        module = self._controller_modules[controller_module_name]\n        controller_info = None\n        try:\n            controller_info = module.get_info(\n                copy.copy(self._controller_objects[controller_module_name]))\n        except AttributeError:\n            logging.warning('No optional debug info found for controller '\n                            '%s. To provide it, implement `get_info`.',\n                            controller_module_name)\n        try:\n            yaml.dump(controller_info)\n        except TypeError:\n            logging.warning('The info of controller %s in class \"%s\" is not '\n                            'YAML serializable! Coercing it to string.',\n                            controller_module_name, self._class_name)\n            controller_info = str(controller_info)\n        return records.ControllerInfoRecord(\n            self._class_name, module.MOBLY_CONTROLLER_CONFIG_NAME,\n            controller_info)", "docstring": "Creates controller info record for a particular controller type.\n\nInfo is retrieved from all the controller objects spawned from the\nspecified module, using the controller module's `get_info` function.\n\nArgs:\ncontroller_module_name: string, the name of the controller module\nto retrieve info from.\n\nReturns:\nA records.ControllerInfoRecord object.", "source": "juraj-google-style"}
{"code": "class DataCollatorSpeechSeq2SeqWithPadding:\n    processor: Any\n    decoder_start_token_id: int\n    forward_attention_mask: bool\n\n    def __call__(self, features: list[dict[str, Union[list[int], torch.Tensor]]]) -> dict[str, torch.Tensor]:\n        model_input_name = self.processor.model_input_names[0]\n        input_features = [{model_input_name: feature[model_input_name]} for feature in features]\n        label_features = [{'input_ids': feature['labels']} for feature in features]\n        batch = self.processor.feature_extractor.pad(input_features, return_tensors='pt')\n        if self.forward_attention_mask:\n            batch['attention_mask'] = torch.LongTensor([feature['attention_mask'] for feature in features])\n        labels_batch = self.processor.tokenizer.pad(label_features, return_tensors='pt')\n        labels = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1), -100)\n        if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item():\n            labels = labels[:, 1:]\n        batch['labels'] = labels\n        return batch", "docstring": "Data collator that will dynamically pad the inputs received.\nArgs:\nprocessor ([`WhisperProcessor`])\nThe processor used for processing the data.\ndecoder_start_token_id (`int`)\nThe begin-of-sentence of the decoder.\nforward_attention_mask (`bool`)\nWhether to return attention_mask.", "source": "github-repos"}
{"code": "def query_google(point, max_distance, key):\n    \n    if not key:\n        return []\n\n    if from_cache(GG_CACHE, point, max_distance):\n        return from_cache(GG_CACHE, point, max_distance)\n\n    req = requests.get(GOOGLE_PLACES_URL % (\n        point.lat,\n        point.lon,\n        max_distance,\n        key\n    ))\n\n    if req.status_code != 200:\n        return []\n    response = req.json()\n    results = response['results']\n    \n    final_results = []\n    for local in results:\n        final_results.append({\n            'label': local['name'],\n            'distance': Point(local['geometry']['location']['lat'], local['geometry']['location']['lng'], None).distance(point),\n            \n            'types': local['types'],\n            'suggestion_type': 'GOOGLE'\n            })\n\n    google_insert_cache(point, final_results)\n    return final_results", "docstring": "Queries google maps API for a location\n\nArgs:\npoint (:obj:`Point`): Point location to query\nmax_distance (float): Search radius, in meters\nkey (str): Valid google maps api key\nReturns:\n:obj:`list` of :obj:`dict`: List of locations with the following format:\n{\n'label': 'Coffee house',\n'types': 'Commerce',\n'suggestion_type': 'GOOGLE'\n}", "source": "juraj-google-style"}
{"code": "def _ni(field, filter_value):\n        \n        valid = False\n        if field not in filter_value:\n            valid = True\n        return valid", "docstring": "Validate field **NOT IN** string or list.\n\nArgs:\nfilter_value (string | list): A string or list of values.\n\nReturns:\n(boolean): Results of validation", "source": "juraj-google-style"}
{"code": "def del_instance(self, obj):\n        \n        to_remove = set()\n        for wrkey, _obj in self.iter_instances():\n            if obj is _obj:\n                to_remove.add(wrkey)\n        for wrkey in to_remove:\n            del self[wrkey]", "docstring": "Remove any stored instance methods that belong to an object\n\nArgs:\nobj: The instance object to remove", "source": "juraj-google-style"}
{"code": "def _AddExtractionProcessStatusTableRow(self, process_status, table_view):\n    \n    used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory)\n\n    sources = ''\n    if (process_status.number_of_produced_sources is not None and\n        process_status.number_of_produced_sources_delta is not None):\n      sources = '{0:d} ({1:d})'.format(\n          process_status.number_of_produced_sources,\n          process_status.number_of_produced_sources_delta)\n\n    events = ''\n    if (process_status.number_of_produced_events is not None and\n        process_status.number_of_produced_events_delta is not None):\n      events = '{0:d} ({1:d})'.format(\n          process_status.number_of_produced_events,\n          process_status.number_of_produced_events_delta)\n\n    \n\n    table_view.AddRow([\n        process_status.identifier, process_status.pid, process_status.status,\n        used_memory, sources, events, process_status.display_name])", "docstring": "Adds an extraction process status table row.\n\nArgs:\nprocess_status (ProcessStatus): processing status.\ntable_view (CLITabularTableView): table view.", "source": "juraj-google-style"}
{"code": "def _timesfm_shift_padded_seq(mask: torch.Tensor, seq: torch.Tensor) -> torch.Tensor:\n    batch_size, num_seq, feature_dim = seq.shape\n    new_mask: torch.BoolTensor = mask == 0\n    indices = new_mask.to(torch.int32).argmax(dim=1)\n    indices[~new_mask.any(dim=1)] = -1\n    idx_range = torch.arange(num_seq, device=seq.device).view(1, -1, 1).expand(batch_size, -1, feature_dim)\n    shifted_idx = (idx_range - indices[:, None, None]) % num_seq\n    shifted_seq = seq.gather(1, shifted_idx)\n    return shifted_seq", "docstring": "Shifts rows of seq based on the first 0 in each row of the mask.\n\nArgs:\nmask: mask tensor of shape [B, N]\nseq: seq tensor of shape [B, N, P]\n\nReturns:\nThe shifted sequence.", "source": "github-repos"}
{"code": "def binary_n(total_N, min_n=50):\n  \n  max_exp = np.log2(1.0 * total_N / min_n)\n  max_exp = int(np.floor(max_exp))\n  return [int(np.floor(1.0 * total_N / (2**i))) for i in range(1, max_exp + 1)]", "docstring": "Creates a list of values by successively halving the total length total_N\nuntil the resulting value is less than min_n.\n\nNon-integer results are rounded down.\n\nArgs:\ntotal_N (int):\ntotal length\nKwargs:\nmin_n (int):\nminimal length after division\n\nReturns:\nlist of integers:\ntotal_N/2, total_N/4, total_N/8, ... until total_N/2^i < min_n", "source": "juraj-google-style"}
{"code": "def getMAC(self, bType=MacType.RandomMac):\n        \n        print '%s call getMAC' % self.port\n\n        \n        if self.isPowerDown:\n            macAddr64 = self.mac\n        else:\n            if bType == MacType.FactoryMac:\n                macAddr64 = self.__stripValue(self.__sendCommand(WPANCTL_CMD + 'getprop -v NCP:HardwareAddress')[0])\n            elif bType == MacType.HashMac:\n                macAddr64 = self.__stripValue(self.__sendCommand(WPANCTL_CMD + 'getprop -v NCP:MACAddress')[0])\n            else:\n                macAddr64 = self.__stripValue(self.__sendCommand(WPANCTL_CMD + 'getprop -v NCP:ExtendedAddress')[0])\n\n        return int(macAddr64, 16)", "docstring": "get one specific type of MAC address\ncurrently OpenThreadWpan only supports Random MAC address\n\nArgs:\nbType: indicate which kind of MAC address is required\n\nReturns:\nspecific type of MAC address", "source": "juraj-google-style"}
{"code": "def attach_tracer(tracer_name_template=None):\n    if not _has_opentelemetry:\n        return lambda cls: cls\n\n    def decorator(cls):\n        original_init = cls.__init__\n\n        @functools.wraps(original_init)\n        def init_with_tracer(self, *args, **kwargs):\n            original_init(self, *args, **kwargs)\n            module_name = cls.__module__\n            class_name = cls.__qualname__\n            if tracer_name_template is None:\n                if module_name.startswith('transformers.'):\n                    tracer_name = f'{module_name}.{class_name}'\n                else:\n                    tracer_name = f'transformers.{module_name}.{class_name}'\n            else:\n                tracer_name = tracer_name_template.format(module=module_name, class_name=class_name)\n            self.tracer = get_tracer(tracer_name)\n        cls.__init__ = init_with_tracer\n        return cls\n    return decorator", "docstring": "Decorator that attaches a tracer to a class.\n\nThis decorator should be applied to classes that need OpenTelemetry tracing.\nIt adds a tracer attribute to the class instance that can be used by the traced decorator.\n\nArgs:\ntracer_name_template: Optional template string for the tracer name.\nIf provided, it should contain {module} which will be replaced with the class's full module path\nand {class_name} for the class name.\nIf None, a default naming scheme will be used where:\n- If the module already starts with \"transformers.\", it will use that directly\n- Otherwise, it will prepend \"transformers.\" to the module name\n\nReturns:\nClass decorator function", "source": "github-repos"}
{"code": "def parse(src, preamble_len=0, single_node=True):\n    module_node = gast.parse(src)\n    nodes = module_node.body\n    if preamble_len:\n        nodes = nodes[preamble_len:]\n    if single_node:\n        if len(nodes) != 1:\n            raise ValueError('expected exactly one node, got {}'.format(nodes))\n        return nodes[0]\n    return nodes", "docstring": "Returns the AST of given piece of code.\n\nArgs:\nsrc: Text\npreamble_len: Int, indicates leading nodes in the parsed AST which should be\ndropped.\nsingle_node: Bool, whether `src` is assumed to be represented by exactly one\nAST node.\n\nReturns:\nast.AST", "source": "github-repos"}
{"code": "def intersect(self, other):\n    intersection = Rect()\n    if lib.SDL_IntersectRect(self._ptr, self._ptr, intersection._ptr):\n        return intersection\n    else:\n        return None", "docstring": "Calculate the intersection of this rectangle and another rectangle.\n\nArgs:\nother (Rect): The other rectangle.\n\nReturns:\nRect: The intersection of this rectangle and the given other rectangle, or None if there is no such\nintersection.", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool=False):\n    residual = hidden_states\n    hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.is_training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    residual = hidden_states\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.is_training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.is_training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    if self.is_training:\n        if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():\n            clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n            hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\nInput to the layer.\nattention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\nAttention mask.\nposition_embeddings (`torch.FloatTensor`, *optional*):\nPosition embeddings, to be added to `hidden_states`.\nreference_points (`torch.FloatTensor`, *optional*):\nReference points.\nspatial_shapes (`torch.LongTensor`, *optional*):\nSpatial shapes of the backbone feature maps.\nlevel_start_index (`torch.LongTensor`, *optional*):\nLevel start index.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def __repr__(self):\n        \n        return \"ndio.remote.neuroRemote('{}', '{}')\".format(\n            self.hostname,\n            self.protocol,\n            self.meta_url,\n            self.meta_protocol\n        )", "docstring": "Return a string representation that can be used to reproduce this\ninstance. `eval(repr(this))` should return an identical copy.\n\nArguments:\nNone\n\nReturns:\nstr: Representation of reproducible instance.", "source": "juraj-google-style"}
{"code": "def validate_inference_rewrite_for_variables(graph: ops.Graph):\n    if not any((x.type == 'GuaranteeConst' for x in graph.get_operations())):\n        raise RuntimeError('No GuaranteeConst ops found in the graph after running tpu.rewrite_for_inference(...). Please check that you are using tf.get_variable() to create and access variables in your tpu computation.')", "docstring": "Validates whether rewrite_for_inference() 'worked' for variables.\n\nThe rewrite_for_inference() method is supposed to append GuaranteeConstOps\nafter ReadVariableOps, but this mechanism works only if you are using\ntf.compat.v1.get_variable() to create and access variables in your tpu\ncomputation. This validation method can be called immediately after calling\ntpu.rewrite_for_inference() to check whether GuaranteeConstOps where added\nto the graph.\n\nTypical usages:\ntpu.validate_inference_rewrite_for_variables(\ntf.compat.v1.get_default_graph())\n\ntpu.validate_inference_rewrite_for_variables(sess.graph)\n\nArgs:\ngraph: The graph which needs to be validated.\nRaises:\nRuntimeError: if validation failed.", "source": "github-repos"}
{"code": "def classes_in_module(module) -> List:\n        \n        md = module.__dict__\n        return [\n            md[c] for c in md if (\n                    isinstance(md[c], type) and\n                    issubclass(md[c], ETKModule\n                               ) and\n                    md[c].__module__ == module.__name__)\n        ]", "docstring": "Return all classes with super class ExtractionModule\n\nArgs:\nmodule:\n\nReturns: List of classes", "source": "juraj-google-style"}
{"code": "def WriteModifyTimestamp(self, timestamp):\n    if timestamp is None:\n        return True\n    self.modify_time = None\n    return self._WriteTimestamp(timestamp, self.modify_file)", "docstring": "Convenience method for writing the last modify timestamp.\n\nArgs:\ntimestamp:  An int with the number of seconds since epoch.\nIf timestamp is None, performs no action.\n\nReturns:\nA boolean indicating success of the write.", "source": "github-repos"}
{"code": "def plugins_all(self):\n    if (not self.loaded):\n        self.load_modules()\n    return get_plugins()[self.group]._filter(blacklist=self.blacklist, type_filter=self.type_filter)", "docstring": "All resulting versions of all plugins in the group filtered by ``blacklist``\n\nReturns:\ndict: Nested dictionary of plugins accessible through dot-notation.\n\nSimilar to :py:attr:`plugins`, but lowest level is a regular dictionary of\nall unfiltered plugin versions for the given plugin type and name.\n\nParent types are always included.\nChild plugins will only be included if at least one valid, non-blacklisted plugin\nis available.", "source": "codesearchnet"}
{"code": "def _powerset(iterable):\n    s = list(iterable)\n    return itertools.chain.from_iterable((itertools.combinations(s, r) for r in range(len(s) + 1)))", "docstring": "Helper for generating all possible reduction_axes arguments.\n\nExample:\npowerset([0,1,2]): () (0,) (1,) (2,) (0,1) (0,2) (1,2) (0,1,2)\n\nArgs:\niterable: An iterable of items to generate the powerset of.\n\nReturns:\nThe powerset of all items in iterable.", "source": "github-repos"}
{"code": "def get(self, timeout=None, tag=None):\n    with self._queue_lock:\n        while self._should_process_closures and self._queue.empty() and (tag is None or self._tagged_queue[tag].empty()):\n            if not self._closures_queued_condition.wait(timeout=timeout):\n                return None\n        if not self._should_process_closures:\n            return None\n        if tag is not None and (not self._tagged_queue[tag].empty()):\n            closure = self._tagged_queue[tag].get(block=False)\n            return closure\n        closure = self._queue.get(block=False)\n        metric_utils.monitor_int('queued_closures', self._queue.qsize())\n        assert closure.tag is None\n        assert tag is None or self._tagged_queue[tag].empty()\n        self._queue_free_slot_condition.notify()\n        self.inflight_closure_count += 1\n        return closure", "docstring": "Return a closure from the queue to be executed.\n\nIt will try to fetch an item from the queue with the given tag. If this\nqueue is empty, it will then check the global queue.\n\nArgs:\ntimeout: timeout when waiting for a closure to be put.\ntag: optional tag to specify which queue to query first before querying\nthe global queue.\n\nReturns:\na closure or None after timeout.", "source": "github-repos"}
{"code": "def _sample_action(self, constraints: Dict[(str, Constraints)], default: Sequence[tf.Tensor], prob: float=0.3) -> Sequence[tf.Tensor]:\n    ordering = self.compiler.rddl.domain.action_fluent_ordering\n    dtypes = map(rddl2tf.utils.range_type_to_dtype, self.compiler.rddl.action_range_type)\n    size = self.compiler.rddl.action_size\n    action = []\n    for (name, dtype, size, default_value) in zip(ordering, dtypes, size, default):\n        action_fluent = self._sample_action_fluent(name, dtype, size, constraints, default_value, prob)\n        action.append(action_fluent)\n    return tuple(action)", "docstring": "Samples action fluents respecting the given bound `constraints`.\n\nWith probability `prob` it chooses the action fluent default value,\nwith probability 1-`prob` it samples the fluent w.r.t. its bounds.\n\nArgs:\nconstraints (Dict[str, Tuple[Optional[TensorFluent], Optional[TensorFluent]]]): The bounds for each action fluent.\ndefault (Sequence[tf.Tensor]): The default action fluents.\nprob (float): A probability measure.\n\nReturns:\nSequence[tf.Tensor]: A tuple of action fluents.", "source": "codesearchnet"}
{"code": "def unsubscribe(self, peer_jid):\n    self.roster.unsubscribe(aioxmpp.JID.fromstr(peer_jid).bare())", "docstring": "Asks for unsubscription\n\nArgs:\npeer_jid (str): the JID you ask for unsubscriptiion", "source": "codesearchnet"}
{"code": "def mel_to_hertz(mels: Union[float, np.ndarray], mel_scale: str='htk') -> Union[float, np.ndarray]:\n    if mel_scale not in ['slaney', 'htk', 'kaldi']:\n        raise ValueError('mel_scale should be one of \"htk\", \"slaney\" or \"kaldi\".')\n    if mel_scale == 'htk':\n        return 700.0 * (np.power(10, mels / 2595.0) - 1.0)\n    elif mel_scale == 'kaldi':\n        return 700.0 * (np.exp(mels / 1127.0) - 1.0)\n    min_log_hertz = 1000.0\n    min_log_mel = 15.0\n    logstep = np.log(6.4) / 27.0\n    freq = 200.0 * mels / 3.0\n    if isinstance(mels, np.ndarray):\n        log_region = mels >= min_log_mel\n        freq[log_region] = min_log_hertz * np.exp(logstep * (mels[log_region] - min_log_mel))\n    elif mels >= min_log_mel:\n        freq = min_log_hertz * np.exp(logstep * (mels - min_log_mel))\n    return freq", "docstring": "Convert frequency from mels to hertz.\n\nArgs:\nmels (`float` or `np.ndarray`):\nThe frequency, or multiple frequencies, in mels.\nmel_scale (`str`, *optional*, `\"htk\"`):\nThe mel frequency scale to use, `\"htk\"`, `\"kaldi\"` or `\"slaney\"`.\n\nReturns:\n`float` or `np.ndarray`: The frequencies in hertz.", "source": "github-repos"}
{"code": "def _decode_doubles(message):\n    \n    binary = base64.b64decode(message)\n    return struct.unpack('<' + ('d' * (len(binary)", "docstring": "Helper for decode_qp, decodes a double array.\n\nThe double array is stored as little endian 64 bit doubles.\nThe array has then been base64 encoded. Since we are decoding we do these\nsteps in reverse.\n\nArgs:\nmessage: the double array\n\nReturns:\ndecoded double array", "source": "juraj-google-style"}
{"code": "def compile_action_preconditions_checking(self,\n            state: Sequence[tf.Tensor],\n            action: Sequence[tf.Tensor]) -> tf.Tensor:\n        \n        with self.graph.as_default():\n            with tf.name_scope('action_preconditions_checking'):\n                preconds = self.compile_action_preconditions(state, action)\n                all_preconds = tf.stack([p.tensor for p in preconds], axis=1)\n                checking = tf.reduce_all(all_preconds, axis=1)\n                return checking", "docstring": "Combines the action preconditions into an applicability checking op.\n\nArgs:\nstate (Sequence[tf.Tensor]): The current state fluents.\naction (Sequence[tf.Tensor]): The action fluents.\n\nReturns:\nA boolean tensor for checking if `action` is application in `state`.", "source": "juraj-google-style"}
{"code": "def __get_request(self, host, soup):\n    url = (URLHelper.make_absolute(host, self.__trim_grave_accent(soup['action'])) if soup.has_attr('action') else host)\n    method_original = (soup['method'] if soup.has_attr('method') else 'get')\n    method = ('post' if (method_original.lower() == 'post') else 'get')\n    data = self.__get_form_data(soup)\n    return Request(url, method, data)", "docstring": "Build a request from the given soup form.\n\nArgs:\nhost str: The URL of the current queue item.\nsoup (obj): The BeautifulSoup form.\n\nReturns:\n:class:`nyawc.http.Request`: The new Request.", "source": "codesearchnet"}
{"code": "def close(self, cancel_pending_enqueues=False, name=None):\n    if name is None:\n        name = '%s_BarrierClose' % self._name\n    return gen_data_flow_ops.barrier_close(self._barrier_ref, cancel_pending_enqueues=cancel_pending_enqueues, name=name)", "docstring": "Closes this barrier.\n\nThis operation signals that no more new key values will be inserted in the\ngiven barrier. Subsequent InsertMany operations with new keys will fail.\nInsertMany operations that just complement already existing keys with other\ncomponents, will continue to succeed. Subsequent TakeMany operations will\ncontinue to succeed if sufficient elements remain in the barrier. Subsequent\nTakeMany operations that would block will fail immediately.\n\nIf `cancel_pending_enqueues` is `True`, all pending requests to the\nunderlying queue will also be canceled, and completing of already\nstarted values is also not acceptable anymore.\n\nArgs:\ncancel_pending_enqueues: (Optional.) A boolean, defaulting to\n`False` (described above).\nname: Optional name for the op.\n\nReturns:\nThe operation that closes the barrier.", "source": "github-repos"}
{"code": "def _filter_ds(dataset, acceptance_dist_ds, initial_dist_ds, class_func, seed, name=None) -> DatasetV2:\n\n    def maybe_warn_on_large_rejection(accept_dist, initial_dist):\n        proportion_rejected = math_ops.reduce_sum((1 - accept_dist) * initial_dist)\n        return cond.cond(math_ops.less(proportion_rejected, 0.5), lambda: accept_dist, lambda: logging_ops.Print(accept_dist, [proportion_rejected, initial_dist, accept_dist], message='Proportion of examples rejected by sampler is high: ', summarize=100, first_n=10))\n    acceptance_dist_ds = DatasetV2.zip((acceptance_dist_ds, initial_dist_ds), name=name).map(maybe_warn_on_large_rejection, name=name)\n\n    def _gather_and_copy(acceptance_prob, data):\n        if isinstance(data, tuple):\n            class_val = class_func(*data)\n        else:\n            class_val = class_func(data)\n        return (class_val, array_ops.gather(acceptance_prob, class_val), data)\n    current_probabilities_and_class_and_data_ds = DatasetV2.zip((acceptance_dist_ds, dataset), name=name).map(_gather_and_copy, name=name)\n\n    def _reject(unused_class_val, p, unused_data):\n        return random_ops.random_uniform([], seed=seed, dtype=p.dtype) < p\n    filtered_ds = current_probabilities_and_class_and_data_ds.filter(_reject, name=name)\n    return filtered_ds.map(lambda class_value, _, data: (class_value, data), name=name)", "docstring": "Filters a dataset based on per-class acceptance probabilities.\n\nArgs:\ndataset: The dataset to be filtered.\nacceptance_dist_ds: A dataset of acceptance probabilities.\ninitial_dist_ds: A dataset of the initial probability distribution, given or\nestimated.\nclass_func: A function mapping an element of the input dataset to a scalar\n`tf.int32` tensor. Values should be in `[0, num_classes)`.\nseed: (Optional.) Python integer seed for the resampler.\nname: (Optional.) A name for the tf.data operation.\n\nReturns:\nA dataset of (class value, data) after filtering.", "source": "github-repos"}
{"code": "def _get_unique_function_name(function_type, functions):\n    \n    function_name = function_name_base = function_type\n    count = 2\n    while function_name in functions:\n        function_name = '{}_{}'.format(function_name_base, count)\n        count += 1\n    return function_name", "docstring": "Get a unique function name.\n\nArgs:\nfunction_type(str): Name of Function. Ex) Convolution, Affine\nfunctions(OrderedDict of (str, Function)\n\nReturns: str\nA unique function name", "source": "juraj-google-style"}
{"code": "def pnum_to_processor_coordinates(mesh_shape, pnum):\n  \n  ret = []\n  for dimsize in mesh_shape.to_integer_list[::-1]:\n    ret.append(pnum % dimsize)\n    pnum \n  return ret[::-1]", "docstring": "Coordinates of a processor in the mesh.\n\nArgs:\nmesh_shape: a Shape\npnum: an integer less than len(mesh_shape)\n\nReturns:\na list of integers with length len(mesh_shape)", "source": "juraj-google-style"}
{"code": "def triangle(times: np.ndarray, amp: complex, period: float, phase: float=0) -> np.ndarray:\n    return (amp * (((- 2) * np.abs(sawtooth(times, 1, period, ((phase - (np.pi / 2)) / 2)))) + 1).astype(np.complex_))", "docstring": "Continuous triangle wave.\n\nArgs:\ntimes: Times to output wave for.\namp: Pulse amplitude. Wave range is [-amp, amp].\nperiod: Pulse period, units of dt.\nphase: Pulse phase.", "source": "codesearchnet"}
{"code": "def from_tensor(cls, tensor, name=None):\n    if isinstance(tensor, core_tf_types.Value):\n        return TensorSpec(tensor.shape, tensor.dtype, name)\n    elif isinstance(tensor, core_tf_types.Symbol):\n        return TensorSpec(tensor.shape, tensor.dtype, name or tensor.op.name)\n    else:\n        raise ValueError(f'`tensor` should be a tf.Tensor, but got type {type(tensor)}.')", "docstring": "Returns a `TensorSpec` that describes `tensor`.\n\n>>> tf.TensorSpec.from_tensor(tf.constant([1, 2, 3]))\nTensorSpec(shape=(3,), dtype=tf.int32, name=None)\n\nArgs:\ntensor: The `tf.Tensor` that should be described.\nname: A name for the `TensorSpec`.  Defaults to `tensor.op.name`.\n\nReturns:\nA `TensorSpec` that describes `tensor`.", "source": "github-repos"}
{"code": "def get_devices(ads, **kwargs):\n    \n\n    def _get_device_filter(ad):\n        for k, v in kwargs.items():\n            if not hasattr(ad, k):\n                return False\n            elif getattr(ad, k) != v:\n                return False\n        return True\n\n    filtered = filter_devices(ads, _get_device_filter)\n    if not filtered:\n        raise Error(\n            'Could not find a target device that matches condition: %s.' %\n            kwargs)\n    else:\n        return filtered", "docstring": "Finds a list of AndroidDevice instance from a list that has specific\nattributes of certain values.\n\nExample:\nget_devices(android_devices, label='foo', phone_number='1234567890')\nget_devices(android_devices, model='angler')\n\nArgs:\nads: A list of AndroidDevice instances.\nkwargs: keyword arguments used to filter AndroidDevice instances.\n\nReturns:\nA list of target AndroidDevice instances.\n\nRaises:\nError: No devices are matched.", "source": "juraj-google-style"}
{"code": "def _or_join(self, terms):\n        \n        from six import text_type\n\n        if isinstance(terms, (tuple, list)):\n            if len(terms) > 1:\n                return ' | '.join(text_type(t) for t in terms)\n            else:\n                return terms[0]\n        else:\n            return terms", "docstring": "Joins terms using OR operator.\n\nArgs:\nterms (list): terms to join\n\nExamples:\nself._or_join(['term1', 'term2']) -> 'term1 | term2'\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def create_pane(widgets, horizontal, parent_widget=None, compact=False, compact_spacing=2):\n    pane = (parent_widget or QtGui.QWidget())\n    type_ = (QtGui.QHBoxLayout if horizontal else QtGui.QVBoxLayout)\n    layout = type_()\n    if compact:\n        layout.setSpacing(compact_spacing)\n        layout.setContentsMargins(compact_spacing, compact_spacing, compact_spacing, compact_spacing)\n    for widget in widgets:\n        stretch = 0\n        if isinstance(widget, tuple):\n            (widget, stretch) = widget\n        if isinstance(widget, int):\n            layout.addSpacing(widget)\n        elif widget:\n            layout.addWidget(widget, stretch)\n        else:\n            layout.addStretch()\n    pane.setLayout(layout)\n    return pane", "docstring": "Create a widget containing an aligned set of widgets.\n\nArgs:\nwidgets (list of `QWidget`).\nhorizontal (bool).\nalign (str): One of:\n- 'left', 'right' (horizontal);\n- 'top', 'bottom' (vertical)\nparent_widget (`QWidget`): Owner widget, QWidget is created if this\nis not provided.\n\nReturns:\n`QWidget`", "source": "codesearchnet"}
{"code": "def AddEventAttribute(self, attribute_name, attribute_value):\n    \n    if attribute_name in self._extra_event_attributes:\n      raise KeyError('Event attribute {0:s} already set'.format(\n          attribute_name))\n\n    self._extra_event_attributes[attribute_name] = attribute_value", "docstring": "Adds an attribute that will be set on all events produced.\n\nSetting attributes using this method will cause events produced via this\nmediator to have an attribute with the provided name set with the\nprovided value.\n\nArgs:\nattribute_name (str): name of the attribute to add.\nattribute_value (str): value of the attribute to add.\n\nRaises:\nKeyError: if the event attribute is already set.", "source": "juraj-google-style"}
{"code": "def send(query, address=DEFAULT_ADDRESS, port=DEFAULT_PORT, ttl=DEFAULT_TTL, local_only=False, timeout_s=2):\n    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n    sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)\n    if local_only:\n        sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, struct.pack('!L', LOCALHOST_ADDRESS))\n    sock.settimeout(timeout_s)\n    sock.sendto(query.encode('utf-8'), (address, port))\n    recv_queue = queue.Queue()\n\n    def _handle_responses():\n        while True:\n            try:\n                (data, address) = sock.recvfrom(MAX_MESSAGE_BYTES)\n                data = data.decode('utf-8')\n            except socket.timeout:\n                recv_queue.put(None)\n                break\n            else:\n                _LOG.debug('Multicast response to query \"%s\": %s:%s', query, address[0], data)\n                recv_queue.put((address[0], str(data)))\n    response_thread = threading.Thread(target=_handle_responses)\n    response_thread.start()\n    while response_thread.is_alive():\n        recv_tuple = recv_queue.get()\n        if (not recv_tuple):\n            break\n        (yield recv_tuple)\n    response_thread.join()", "docstring": "Sends a query to the given multicast socket and returns responses.\n\nArgs:\nquery: The string query to send.\naddress: Multicast IP address component of the socket to send to.\nport: Multicast UDP port component of the socket to send to.\nttl: TTL for multicast messages. 1 to keep traffic in-network.\ntimeout_s: Seconds to wait for responses.\n\nReturns: A set of all responses that arrived before the timeout expired.\nResponses are tuples of (sender_address, message).", "source": "codesearchnet"}
{"code": "def get_value(self, name=None):\n    raise NotImplementedError('Optional.get_value()')", "docstring": "Returns the value wrapped by this optional.\n\nIf this optional does not have a value (i.e. `self.has_value()` evaluates to\n`False`), this operation will raise `tf.errors.InvalidArgumentError` at\nruntime.\n\n>>> optional = tf.experimental.Optional.from_value(42)\n>>> print(optional.get_value())\ntf.Tensor(42, shape=(), dtype=int32)\n\nArgs:\nname: (Optional.) A name for the created operation.\n\nReturns:\nThe wrapped value.", "source": "github-repos"}
{"code": "def QA_data_tick_resample(tick, type_='1min'):\n    \n    tick = tick.assign(amount=tick.price * tick.vol)\n    resx = pd.DataFrame()\n    _temp = set(tick.index.date)\n\n    for item in _temp:\n        _data = tick.loc[str(item)]\n        _data1 = _data[time(9,\n                            31):time(11,\n                                     30)].resample(\n                                         type_,\n                                         closed='right',\n                                         base=30,\n                                         loffset=type_\n                                     ).apply(\n                                         {\n                                             'price': 'ohlc',\n                                             'vol': 'sum',\n                                             'code': 'last',\n                                             'amount': 'sum'\n                                         }\n                                     )\n\n        _data2 = _data[time(13,\n                            1):time(15,\n                                    0)].resample(\n                                        type_,\n                                        closed='right',\n                                        loffset=type_\n                                    ).apply(\n                                        {\n                                            'price': 'ohlc',\n                                            'vol': 'sum',\n                                            'code': 'last',\n                                            'amount': 'sum'\n                                        }\n                                    )\n\n        resx = resx.append(_data1).append(_data2)\n    resx.columns = resx.columns.droplevel(0)\n    return resx.reset_index().drop_duplicates().set_index(['datetime', 'code'])", "docstring": "tick采样成任意级别分钟线\n\nArguments:\ntick {[type]} -- transaction\n\nReturns:\n[type] -- [description]", "source": "juraj-google-style"}
{"code": "def _message_received(self, msg):\n        \n\n        msg = Message.from_node(msg)\n        return self.dispatch(msg)", "docstring": "Callback run when an XMPP Message is reveived.\nThis callback delivers the message to every behaviour\nthat is waiting for it. First, the aioxmpp.Message is\nconverted to spade.message.Message\n\nArgs:\nmsg (aioxmpp.Messagge): the message just received.\n\nReturns:\nlist(asyncio.Future): a list of futures of the append of the message at each matched behaviour.", "source": "juraj-google-style"}
{"code": "def visualize(\n    logdir, outdir, num_agents, num_episodes, checkpoint=None,\n    env_processes=True):\n  \n  config = utility.load_config(logdir)\n  with tf.device('/cpu:0'):\n    batch_env = utility.define_batch_env(\n        lambda: _create_environment(config, outdir),\n        num_agents, env_processes)\n    graph = utility.define_simulation_graph(\n        batch_env, config.algorithm, config)\n    total_steps = num_episodes * config.max_length\n    loop = _define_loop(graph, total_steps)\n  saver = utility.define_saver(\n      exclude=(r'.*_temporary.*', r'global_step'))\n  sess_config = tf.ConfigProto(allow_soft_placement=True)\n  sess_config.gpu_options.allow_growth = True\n  with tf.Session(config=sess_config) as sess:\n    utility.initialize_variables(\n        sess, saver, config.logdir, checkpoint, resume=True)\n    for unused_score in loop.run(sess, saver, total_steps):\n      pass\n  batch_env.close()", "docstring": "Recover checkpoint and render videos from it.\n\nArgs:\nlogdir: Logging directory of the trained algorithm.\noutdir: Directory to store rendered videos in.\nnum_agents: Number of environments to simulate in parallel.\nnum_episodes: Total number of episodes to simulate.\ncheckpoint: Checkpoint name to load; defaults to most recent.\nenv_processes: Whether to step environments in separate processes.", "source": "juraj-google-style"}
{"code": "def combine_last_two_dimensions(x):\n  \n  x_shape = common_layers.shape_list(x)\n  a, b = x_shape[-2:]\n  return tf.reshape(x, x_shape[:-2] + [a * b])", "docstring": "Reshape x so that the last two dimension become one.\n\nArgs:\nx: a Tensor with shape [..., a, b]\n\nReturns:\na Tensor with shape [..., ab]", "source": "juraj-google-style"}
{"code": "def with_contest_type(self, contest_type):\n        \n        self._validate_contest_type(contest_type)\n        if contest_type.lower() in ('by', 'by election', 'by-election'):\n            self.contest_type = 'by'\n        return self", "docstring": "Add a contest_type segment\n\nArgs:\ncontest_type (str): Invoke with ``contest_type='by'`` or\n``contest_type='by-election'`` to add a 'by' segment to the\nballot_id. Invoking with ``contest_type='election'`` is valid\nsyntax but has no effect.\n\nReturns:\nIdBuilder\n\nRaises:\nValueError", "source": "juraj-google-style"}
{"code": "def _ParseValue(value, index, arg, metadata):\n    parse_fn = parser.DefaultParseValue\n    parse_fns = metadata.get(decorators.FIRE_PARSE_FNS)\n    if parse_fns:\n        default = parse_fns['default']\n        positional = parse_fns['positional']\n        named = parse_fns['named']\n        if index is not None and 0 <= index < len(positional):\n            parse_fn = positional[index]\n        elif arg in named:\n            parse_fn = named[arg]\n        elif default is not None:\n            parse_fn = default\n    return parse_fn(value)", "docstring": "Parses value, a string, into the appropriate type.\n\nThe function used to parse value is determined by the remaining arguments.\n\nArgs:\nvalue: The string value to be parsed, typically a command line argument.\nindex: The index of the value in the function's argspec.\narg: The name of the argument the value is being parsed for.\nmetadata: Metadata about the function, typically from Fire decorators.\nReturns:\nvalue, parsed into the appropriate type for calling a function.", "source": "github-repos"}
{"code": "def ParseSmsRow(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    sms_read = self._GetRowValue(query_hash, row, 'read')\n    sms_type = self._GetRowValue(query_hash, row, 'type')\n\n    event_data = AndroidSMSEventData()\n    event_data.address = self._GetRowValue(query_hash, row, 'address')\n    event_data.body = self._GetRowValue(query_hash, row, 'body')\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.sms_read = self.SMS_READ.get(sms_read, 'UNKNOWN')\n    event_data.sms_type = self.SMS_TYPE.get(sms_type, 'UNKNOWN')\n\n    timestamp = self._GetRowValue(query_hash, row, 'date')\n    date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_CREATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an SMS row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def __init__(self, parent, discord_token, discord_client_id):\n        \n        super(Frame, self).__init__(parent)\n\n        logger.debug(\"Initialising frame\")\n\n        \n        statusbar = StatusBar(self)\n        statusbar.grid(column=0, row=1, sticky=\"W E S\")\n\n        \n        nav = ttk.Notebook(self)\n        module_frame = ModuleFrame(nav)\n        nav.add(GlobalFrame(nav, discord_token, discord_client_id, module_frame, statusbar), text=\"Global\")\n        nav.add(module_frame, text=\"Modules\")\n        nav.grid(column=0, row=0, sticky=\"W E N S\")\n\n        def on_closing():\n            \n            try:\n                from ._client import client\n                if client.loop:\n                    asyncio.run_coroutine_threadsafe(client.logout(), client.loop)\n            except RuntimeError:\n                pass\n            except Exception as e:\n                logger.exception(e)\n\n            parent.destroy()\n            import sys\n            sys.exit(0)\n\n        parent.protocol(\"WM_DELETE_WINDOW\", on_closing)\n\n        \n        self.columnconfigure(0, weight=1)\n        self.rowconfigure(0, weight=1)\n\n        \n        logger.info(\"Welcome to Modis v{} ({})\".format(datatools.version, datatools.version_nickname))\n        \n        state, response = datatools.get_compare_version()\n        logger.info(\"{}\\n\".format(response))", "docstring": "Create a new main window frame.\n\nArgs:\nparent: A tk or ttk object", "source": "juraj-google-style"}
{"code": "def save(self, new_export_dir=None):\n    is_input_text_proto = file_io.file_exists(file_io.join(compat.as_bytes(self._export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT)))\n    if not new_export_dir:\n        new_export_dir = self._export_dir\n    if is_input_text_proto:\n        path = file_io.join(compat.as_bytes(new_export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))\n        file_io.write_string_to_file(path, str(self._saved_model))\n    else:\n        path = file_io.join(compat.as_bytes(new_export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))\n        file_io.write_string_to_file(path, self._saved_model.SerializeToString(deterministic=True))\n    tf_logging.info('SavedModel written to: %s', compat.as_text(path))", "docstring": "Saves the updated `SavedModel`.\n\nArgs:\nnew_export_dir: Path where the updated `SavedModel` will be saved. If\nNone, the input `SavedModel` will be overriden with the updates.\n\nRaises:\nerrors.OpError: If there are errors during the file save operation.", "source": "github-repos"}
{"code": "async def remove(self, *, node_id: str, force: bool = False) -> Mapping[str, Any]:\n        \n\n        params = {\"force\": force}\n\n        response = await self.docker._query_json(\n            \"nodes/{node_id}\".format(node_id=node_id), method=\"DELETE\", params=params\n        )\n        return response", "docstring": "Remove a node from a swarm.\n\nArgs:\nnode_id: The ID or name of the node", "source": "juraj-google-style"}
{"code": "def prepare_image_transforms(element, image_columns):\n    import base64\n    import cStringIO\n    from PIL import Image\n    from tensorflow.python.lib.io import file_io as tf_file_io\n    from apache_beam.metrics import Metrics\n    img_error_count = Metrics.counter('main', 'ImgErrorCount')\n    img_missing_count = Metrics.counter('main', 'ImgMissingCount')\n    for name in image_columns:\n        uri = element[name]\n        if (not uri):\n            img_missing_count.inc()\n            continue\n        try:\n            with tf_file_io.FileIO(uri, 'r') as f:\n                img = Image.open(f).convert('RGB')\n        except Exception as e:\n            logging.exception('Error processing image %s: %s', uri, str(e))\n            img_error_count.inc()\n            return\n        output = cStringIO.StringIO()\n        img.save(output, 'jpeg')\n        element[name] = base64.urlsafe_b64encode(output.getvalue())\n    return element", "docstring": "Replace an images url with its jpeg bytes.\n\nArgs:\nelement: one input row, as a dict\nimage_columns: list of columns that are image paths\n\nReturn:\nelement, where each image file path has been replaced by a base64 image.", "source": "codesearchnet"}
{"code": "def get_function_def(self, name):\n    if is_oss:\n        with c_api_util.tf_buffer() as buffer_:\n            pywrap_tfe.TFE_ContextGetFunctionDef(self._handle, name, buffer_)\n            proto_data = pywrap_tf_session.TF_GetBuffer(buffer_)\n        function_def = function_pb2.FunctionDef()\n        function_def.ParseFromString(proto_data)\n    else:\n        function_def = pywrap_tfe.TFE_ContextGetFunctionDefNoSerialization(self._handle, name)\n    return function_def", "docstring": "Get a function definition from the context.\n\nArgs:\nname: function signature name.\n\nReturns:\nThe requested FunctionDef.\n\nRaises:\ntf.errors.NotFoundError: if name is not the name of a registered function.", "source": "github-repos"}
{"code": "def _NthElementGrad(op: ops.Operation, grad):\n    input = op.inputs[0]\n    output = op.outputs[0]\n    indicators = math_ops.cast(math_ops.equal(array_ops.expand_dims(output, -1), input), grad.dtype)\n    grad = array_ops.expand_dims(grad, -1)\n    num_selected = array_ops.expand_dims(math_ops.reduce_sum(indicators, -1), -1)\n    return [math_ops.divide(indicators, num_selected) * grad, None]", "docstring": "Return the gradients for NthElement.\n\nArgs:\nop: The NthElementOp for which we need to generate gradients.\ngrad: Tensor. The gradients passed to the NthElementOp\n\nReturns:\nA list of two tensors, the first being the gradient w.r.t. the input,\nthe second being the gradient w.r.t. the N (None).", "source": "github-repos"}
{"code": "def get_updated(node):\n  \n  if isinstance(node, gast.Assign):\n    return set.union(*(_get_target(target)\n                       for target in node.targets))\n  elif isinstance(node, (gast.For, gast.AugAssign)):\n    return _get_target(node.target)\n  elif isinstance(node, gast.arguments):\n    targets = set(arg.id for arg in node.args + node.kwonlyargs)\n    if node.vararg:\n      targets.add(node.vararg.id)\n    if node.kwarg:\n      targets.add(node.kwarg.id)\n    return targets\n  else:\n    return set()", "docstring": "Return the variable names created or mutated by this statement.\n\nThis function considers assign statements, augmented assign statements, and\nthe targets of for loops, as well as function arguments.\n\nFor example, `x[0] = 2` will return `x`, `x, y = 3, 4` will return `x` and\n`y`, `for i in range(x)` will return `i`, etc.\n\nArgs:\nnode: An AST node\n\nReturns:\nA set of variable names (strings) of all the variables created or mutated.", "source": "juraj-google-style"}
{"code": "def get_collections(self, unit, names=None, merge=False, sampling_rate=None, **entities):\n    nodes = self.get_nodes(unit, entities)\n    var_sets = []\n    for n in nodes:\n        var_set = list(n.variables.values())\n        var_set = [v for v in var_set if v.matches_entities(entities)]\n        if (names is not None):\n            var_set = [v for v in var_set if (v.name in names)]\n        if (unit != 'run'):\n            var_set = [v.filter(entities) for v in var_set]\n        var_sets.append(var_set)\n    if merge:\n        var_sets = [list(chain(*var_sets))]\n    results = []\n    for vs in var_sets:\n        if (not vs):\n            continue\n        if (unit == 'run'):\n            vs = clc.BIDSRunVariableCollection(vs, sampling_rate)\n        else:\n            vs = clc.BIDSVariableCollection(vs)\n        results.append(vs)\n    if merge:\n        return (results[0] if results else None)\n    return results", "docstring": "Retrieve variable data for a specified level in the Dataset.\n\nArgs:\nunit (str): The unit of analysis to return variables for. Must be\none of 'run', 'session', 'subject', or 'dataset'.\nnames (list): Optional list of variables names to return. If\nNone, all available variables are returned.\nmerge (bool): If True, variables are merged across all observations\nof the current unit. E.g., if unit='subject' and return_type=\n'collection', variablesfrom all subjects will be merged into a\nsingle collection. If False, each observation is handled\nseparately, and the result is returned as a list.\nsampling_rate (int, str): If unit='run', the sampling rate to\npass onto the returned BIDSRunVariableCollection.\nentities: Optional constraints used to limit what gets returned.\n\nReturns:", "source": "codesearchnet"}
{"code": "def coords(self):\n    if (self.type not in {EventType.TOUCH_DOWN, EventType.TOUCH_MOTION}):\n        raise AttributeError(_wrong_prop.format(self.type))\n    x = self._libinput.libinput_event_touch_get_x(self._handle)\n    y = self._libinput.libinput_event_touch_get_y(self._handle)\n    return (x, y)", "docstring": "The current absolute coordinates of the touch event,\nin mm from the top left corner of the device.\n\nTo get the corresponding output screen coordinates, use\n:meth:`transform_coords`.\n\nFor events not of type :attr:`~libinput.constant.EventType.TOUCH_DOWN`,\n:attr:`~libinput.constant.EventType.TOUCH_MOTION`, this property\nraises :exc:`AttributeError`.\n\nReturns:\n(float, float): The current absolute (x, y) coordinates.\nRaises:\nAttributeError", "source": "codesearchnet"}
{"code": "def get_intra_op_parallelism_threads():\n    return context.context().intra_op_parallelism_threads", "docstring": "Get number of threads used within an individual op for parallelism.\n\nCertain operations like matrix multiplication and reductions can utilize\nparallel threads for speed ups. A value of 0 means the system picks an\nappropriate number.\n\nReturns:\nNumber of parallel threads", "source": "github-repos"}
{"code": "def limit_string_length(string, max_len=50):\n    if max_len is None or len(string) <= max_len:\n        return string\n    else:\n        return '...' + string[len(string) - max_len:]", "docstring": "Limit the length of input string.\n\nArgs:\nstring: Input string.\nmax_len: (int or None) If int, the length limit. If None, no limit.\n\nReturns:\nPossibly length-limited string.", "source": "github-repos"}
{"code": "def __init__(self, argv_or_options, command_line=False):\n    argument_parser = make_parser()\n    if command_line:\n        assert isinstance(argv_or_options, list)\n        options = argument_parser.parse_args(argv_or_options)\n    else:\n        if isinstance(argv_or_options, list):\n            raise TypeError('Do not construct an Options object directly; call Options.create() instead.')\n        options = argv_or_options\n    for name, default in _LIBRARY_ONLY_OPTIONS.items():\n        if not hasattr(options, name):\n            setattr(options, name, default)\n    names = set(vars(options))\n    opt_map = {k: v.option_strings[-1] for k, v in argument_parser.actions.items() if v.option_strings}\n    try:\n        Postprocessor(names, opt_map, options, self).process()\n    except PostprocessingError as e:\n        if command_line:\n            argument_parser.error(str(e))\n        else:\n            raise", "docstring": "Parse and encapsulate the configuration options.\n\nAlso sets up some basic logger configuration.\n\nIMPORTANT: If creating an Options object from code, do not construct it\ndirectly! Call Options.create() instead.\n\nArgs:\nargv_or_options: Either sys.argv[1:] (sys.argv[0] is the main script), or\nalready parsed options object returned by ArgumentParser.parse_args.\ncommand_line: Set this to true when argv_or_options == sys.argv[1:].\n\nRaises:\nsys.exit(2): bad option or input filenames.", "source": "github-repos"}
{"code": "def query_band(self, value):\n        \n\n        self._query_band = value\n        \n        if value is None:\n            try:\n                del self._connectionXML.attrib['query-band-spec']\n            except KeyError:\n                pass\n        else:\n            self._connectionXML.set('query-band-spec', value)", "docstring": "Set the connection's query_band property.\n\nArgs:\nvalue:  New query_band value. String.\n\nReturns:\nNothing.", "source": "juraj-google-style"}
{"code": "def _ParseRecord(self, parser_mediator, page_data, record_offset):\n    record_header_map = self._GetDataTypeMap('binarycookies_record_header')\n    try:\n        record_header = self._ReadStructureFromByteStream(page_data[record_offset:], record_offset, record_header_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to map record header data at offset: 0x{0:08x} with error: {1!s}'.format(record_offset, exception))\n    event_data = SafariBinaryCookieEventData()\n    event_data.flags = record_header.flags\n    if record_header.url_offset:\n        data_offset = (record_offset + record_header.url_offset)\n        event_data.url = self._ParseCString(page_data, data_offset)\n    if record_header.name_offset:\n        data_offset = (record_offset + record_header.name_offset)\n        event_data.cookie_name = self._ParseCString(page_data, data_offset)\n    if record_header.path_offset:\n        data_offset = (record_offset + record_header.path_offset)\n        event_data.path = self._ParseCString(page_data, data_offset)\n    if record_header.value_offset:\n        data_offset = (record_offset + record_header.value_offset)\n        event_data.cookie_value = self._ParseCString(page_data, data_offset)\n    if record_header.creation_time:\n        date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=record_header.creation_time)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n    if record_header.expiration_time:\n        date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=record_header.expiration_time)\n    else:\n        date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_EXPIRATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n    for plugin in self._cookie_plugins:\n        if parser_mediator.abort:\n            break\n        if (event_data.cookie_name != plugin.COOKIE_NAME):\n            continue\n        try:\n            plugin.UpdateChainAndProcess(parser_mediator, cookie_name=event_data.cookie_name, cookie_data=event_data.cookie_value, url=event_data.url)\n        except Exception as exception:\n            parser_mediator.ProduceExtractionWarning('plugin: {0:s} unable to parse cookie with error: {1!s}'.format(plugin.NAME, exception))", "docstring": "Parses a record from the page data.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\npage_data (bytes): page data.\nrecord_offset (int): offset of the record relative to the start\nof the page.\n\nRaises:\nParseError: when the record cannot be parsed.", "source": "codesearchnet"}
{"code": "def createThread(parent, worker, deleteWorkerLater=False):\n    \n    thread = QtCore.QThread(parent)\n    thread.started.connect(worker.doWork)\n    worker.finished.connect(thread.quit)\n    if deleteWorkerLater:\n        thread.finished.connect(worker.deleteLater)\n\n    worker.moveToThread(thread)\n    worker.setParent(parent)\n    return thread", "docstring": "Create a new thread for given worker.\n\nArgs:\nparent (QObject): parent of thread and worker.\nworker (ProgressWorker): worker to use in thread.\ndeleteWorkerLater (bool, optional): delete the worker if thread finishes.\n\nReturns:\nQThread", "source": "juraj-google-style"}
{"code": "def download(url, fname=None):\n    \n    \n    if fname is None:\n        fname = url.split('/')[-1]\n\n    \n    with contextlib.closing(requests.get(url, stream=True)) as r:\n        try:\n            r.raise_for_status()\n        except requests.exceptions.HTTPError as error:\n            print('Error connecting to URL: \"{}\"'.format(url))\n            print(r.text)\n            raise error\n\n        with open(fname, 'wb') as f:\n            shutil.copyfileobj(r.raw, f)\n\n    return fname", "docstring": "Downloads a file.\n\nArgs:\nurl (str): The URL to download.\nfname (Optional[str]): The filename to store the downloaded file in. If\n`None`, take the filename from the URL. Defaults to `None`.\n\nReturns:\nThe filename the URL was downloaded to.\n\nRaises:\nrequests.exceptions.HTTPError: There was a problem connecting to the\nURL.", "source": "juraj-google-style"}
{"code": "def set_logical_cpu_devices(self, num_cpus, prefix=''):\n    server_def = self._server_def or self._collective_ops_server_def\n    local_prefix = ['/device']\n    if server_def is not None:\n        local_prefix.append('/job:%s/replica:0/task:%d' % (server_def.job_name, server_def.task_index))\n    logical_local_devices = [d for d in self.list_logical_devices('CPU') if d.name.startswith(tuple(local_prefix))]\n    self.ensure_initialized()\n    if len(logical_local_devices) > 1:\n        raise RuntimeError('Virtual CPUs already set, cannot modify again.')\n    pywrap_tfe.TFE_SetLogicalCpuDevices(self._context_handle, num_cpus, prefix)\n    self._initialize_logical_devices()", "docstring": "Set virtual CPU devices in context.\n\nIf virtual CPU devices are already configured at context initialization\nby tf.config.set_logical_device_configuration(), this method should not be\ncalled.\n\nArgs:\nnum_cpus: Number of virtual CPUs.\nprefix: Device name prefix.\n\nRaises:\nRuntimeError: If virtual CPUs are already configured at context\ninitialization.", "source": "github-repos"}
{"code": "def from_string(string):\n        \n        lines = string.split(\"\\n\")\n        toks = lines[0].split()\n        lengths = [float(i) for i in toks]\n        toks = lines[1].split()\n        angles = [float(i) for i in toks[0:3]]\n        latt = Lattice.from_lengths_and_angles(lengths, angles)\n        sp = []\n        coords = []\n        for l in lines[4:]:\n            m = re.match(\n                r\"\\d+\\s+(\\w+)\\s+([0-9\\-\\.]+)\\s+([0-9\\-\\.]+)\\s+([0-9\\-\\.]+)\",\n                l.strip())\n            if m:\n                sp.append(m.group(1))\n                coords.append([float(m.group(i)) for i in range(2, 5)])\n        return Cssr(Structure(latt, sp, coords))", "docstring": "Reads a string representation to a Cssr object.\n\nArgs:\nstring (str): A string representation of a CSSR.\n\nReturns:\nCssr object.", "source": "juraj-google-style"}
{"code": "def distance(self, other):\n    return np.linalg.norm((other.coords - self.coords))", "docstring": "Get distance between two sites.\n\nArgs:\nother: Other site.\n\nReturns:\nDistance (float)", "source": "codesearchnet"}
{"code": "def template_file(task: Task, template: str, path: str, jinja_filters: FiltersDict=None, **kwargs: Any) -> Result:\n    jinja_filters = (jinja_filters or {} or task.nornir.config.jinja2.filters)\n    text = jinja_helper.render_from_file(template=template, path=path, host=task.host, jinja_filters=jinja_filters, **kwargs)\n    return Result(host=task.host, result=text)", "docstring": "Renders contants of a file with jinja2. All the host data is available in the template\n\nArguments:\ntemplate: filename\npath: path to dir with templates\njinja_filters: jinja filters to enable. Defaults to nornir.config.jinja2.filters\n**kwargs: additional data to pass to the template\n\nReturns:\nResult object with the following attributes set:\n* result (``string``): rendered string", "source": "codesearchnet"}
{"code": "def _VerifyHMAC(self, comms=None):\n    \n    \n    if self.hmac_type == \"SIMPLE_HMAC\":\n      msg = comms.encrypted\n      digest = comms.hmac\n    elif self.hmac_type == \"FULL_HMAC\":\n      msg = b\"\".join([\n          comms.encrypted, comms.encrypted_cipher,\n          comms.encrypted_cipher_metadata,\n          comms.packet_iv.SerializeToString(),\n          struct.pack(\"<I\", comms.api_version)\n      ])\n      digest = comms.full_hmac\n    else:\n      raise DecryptionError(\"HMAC type no supported.\")\n\n    try:\n      rdf_crypto.HMAC(self.cipher.hmac_key).Verify(msg, digest)\n    except rdf_crypto.VerificationError as e:\n      raise DecryptionError(\"HMAC verification failed: %s\" % e)\n\n    return True", "docstring": "Verifies the HMAC.\n\nThis method raises a DecryptionError if the received HMAC does not\nverify. If the HMAC verifies correctly, True is returned.\n\nArgs:\ncomms: The comms RdfValue to verify.\n\nRaises:\nDecryptionError: The HMAC did not verify.\n\nReturns:\nTrue", "source": "juraj-google-style"}
{"code": "def to_bytesize(value, default_unit=None, base=DEFAULT_BASE):\n    \n    if isinstance(value, (int, float)):\n        return unitized(value, default_unit, base)\n\n    if value is None:\n        return None\n\n    try:\n        if value[-1].lower() == \"b\":\n            \n            value = value[:-1]\n\n        unit = value[-1:].lower()\n        if unit.isdigit():\n            unit = default_unit\n\n        else:\n            value = value[:-1]\n\n        return unitized(to_number(float, value), unit, base)\n\n    except (IndexError, TypeError, ValueError):\n        return None", "docstring": "Convert `value` to bytes, accepts notations such as \"4k\" to mean 4096 bytes\n\nArgs:\nvalue (str | unicode | int | None): Number of bytes optionally suffixed by a char from UNITS\ndefault_unit (str | unicode | None): Default unit to use for unqualified values\nbase (int): Base to use (usually 1024)\n\nReturns:\n(int | None): Deduced bytesize value, if possible", "source": "juraj-google-style"}
{"code": "def unbatch():\n\n    def _apply_fn(dataset):\n        return dataset.unbatch()\n    return _apply_fn", "docstring": "Splits elements of a dataset into multiple elements on the batch dimension.\n\nFor example, if elements of the dataset are shaped `[B, a0, a1, ...]`,\nwhere `B` may vary for each input element, then for each element in the\ndataset, the unbatched dataset will contain `B` consecutive elements\nof shape `[a0, a1, ...]`.\n\n```python\n# NOTE: The following example uses `{ ... }` to represent the contents\n# of a dataset.\na = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }\n\na.unbatch() == {\n'a', 'b', 'c', 'a', 'b', 'a', 'b', 'c', 'd'}\n```\n\nReturns:\nA `Dataset` transformation function, which can be passed to\n`tf.data.Dataset.apply`.", "source": "github-repos"}
{"code": "def get_variable_scope_name(value):\n    value = getattr(value, 'variable_scope', value)\n    if isinstance(value, tf.VariableScope):\n        return value.name\n    elif isinstance(value, six.string_types):\n        return value\n    else:\n        raise ValueError('Not a variable scope: {}'.format(value))", "docstring": "Returns the name of the variable scope indicated by the given value.\n\nArgs:\nvalue: String, variable scope, or object with `variable_scope` attribute\n(e.g., Sonnet module).\n\nReturns:\nThe name (a string) of the corresponding variable scope.\n\nRaises:\nValueError: If `value` does not identify a variable scope.", "source": "codesearchnet"}
{"code": "def vstack(tup):\n    \n    \n    arrays = list(tup)\n    for i in range(len(arrays)):\n        if arrays[i].ndim is 1:\n            arrays[i] = arrays[i][np.newaxis, :]\n    return concatenate(tup, axis=0)", "docstring": "Stack arrays in sequence vertically (row wise),\nhandling ``RemoteArray`` and ``DistArray`` without moving data.\n\nArgs:\ntup (sequence of array_like)\n\nReturns:\nres: `ndarray`, if inputs were all local\n`RemoteArray`, if inputs were all on the same remote engine\n`DistArray`, if inputs were already scattered on different engines", "source": "juraj-google-style"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    else:\n        return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + token_ids_1 + [self.sep_token_id]", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks. A sequence has the\nfollowing format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def add_review(self, reviewer, product, review, date=None):\n    if (not isinstance(reviewer, self._reviewer_cls)):\n        raise TypeError(\"Type of given reviewer isn't acceptable:\", reviewer, ', expected:', self._reviewer_cls)\n    elif (not isinstance(product, self._product_cls)):\n        raise TypeError(\"Type of given product isn't acceptable:\", product, ', expected:', self._product_cls)\n    r = self._review_cls(review, date=date)\n    self.graph.add_edge(reviewer, product, review=r)\n    return r", "docstring": "Add a new review from a given reviewer to a given product.\n\nArgs:\nreviewer: an instance of Reviewer.\nproduct: an instance of Product.\nreview: a float value.\ndate: date the review issued.\n\nReturns:\nthe added new review object.\n\nRaises:\nTypeError: when given reviewer and product aren't instance of\nspecified reviewer and product class when this graph is constructed.", "source": "codesearchnet"}
{"code": "def session_end_pb(status, end_time_secs=None):\n    if (end_time_secs is None):\n        end_time_secs = time.time()\n    session_end_info = plugin_data_pb2.SessionEndInfo(status=status, end_time_secs=end_time_secs)\n    return _summary(metadata.SESSION_END_INFO_TAG, plugin_data_pb2.HParamsPluginData(session_end_info=session_end_info))", "docstring": "Constructs a SessionEndInfo protobuffer.\n\nCreates a summary that contains status information for a completed\ntraining session. Should be exported after the training session is completed.\nOne such summary per training session should be created. Each should have\na different run.\n\nArgs:\nstatus: A tensorboard.hparams.Status enumeration value denoting the\nstatus of the session.\nend_time_secs: float. The time to use as the session end time. Represented\nas seconds since the unix epoch. If None uses the current time.\n\nReturns:\nThe summary protobuffer mentioned above.", "source": "codesearchnet"}
{"code": "def calc_shape_step(self, stat_names, time):\n        \n        ti = np.where(self.times == time)[0][0]\n        props = regionprops(self.masks[ti], self.timesteps[ti])[0]\n        shape_stats = []\n        for stat_name in stat_names:\n            if \"moments_hu\" in stat_name:\n                hu_index = int(stat_name.split(\"_\")[-1])\n                hu_name = \"_\".join(stat_name.split(\"_\")[:-1])\n                hu_val = np.log(props[hu_name][hu_index])\n                if np.isnan(hu_val):\n                    shape_stats.append(0)\n                else:\n                    shape_stats.append(hu_val)\n            else:\n                shape_stats.append(props[stat_name])\n        return shape_stats", "docstring": "Calculate shape statistics for a single time step\n\nArgs:\nstat_names: List of shape statistics calculated from region props\ntime: Time being investigated\n\nReturns:\nList of shape statistics", "source": "juraj-google-style"}
{"code": "def callEventWaitAndGetRpc(self, callback_id, event_name, timeout_sec):\n    timeout_ms = int(timeout_sec * 1000)\n    try:\n        return self._event_client.eventWaitAndGet(callback_id, event_name, timeout_ms)\n    except Exception as e:\n        if TIMEOUT_ERROR_MESSAGE in str(e):\n            raise errors.CallbackHandlerTimeoutError(self._device, f'Timed out after waiting {timeout_sec}s for event \"{event_name}\" triggered by {self._method_name} ({self.callback_id}).') from e\n        raise", "docstring": "Waits and returns an existing CallbackEvent for the specified identifier.\n\nThis function calls snippet lib's eventWaitAndGet RPC.\n\nArgs:\ncallback_id: str, the callback identifier.\nevent_name: str, the callback name.\ntimeout_sec: float, the number of seconds to wait for the event.\n\nReturns:\nThe event dictionary.\n\nRaises:\nerrors.CallbackHandlerTimeoutError: The expected event does not occur\nwithin the time limit.", "source": "github-repos"}
{"code": "def GetDefaultToken(token):\n    if (token is None):\n        token = default_token\n    if (not isinstance(token, access_control.ACLToken)):\n        raise access_control.UnauthorizedAccess('Token is not properly specified. It should be an instance of grr.lib.access_control.ACLToken()')\n    return token", "docstring": "Returns the provided token or the default token.\n\nArgs:\ntoken: A token or None.\n\nRaises:\naccess_control.UnauthorizedAccess: no token was provided.", "source": "codesearchnet"}
{"code": "def _wrap_and_check_metrics(self, metrics):\n    if not isinstance(metrics, dict):\n        metrics = {self.METRICS_NAME: metrics}\n    outputs = {}\n    for key, value in metrics.items():\n        if isinstance(value, tuple):\n            metric_val, metric_op = value\n        else:\n            metric_val = value.result()\n            assert len(value.updates) == 1\n            metric_op = value.updates[0]\n        key = self._check_output_key(key, self.METRICS_NAME)\n        key = self._prefix_key(key, self.METRICS_NAME)\n        val_name = key + self._SEPARATOR_CHAR + self.METRIC_VALUE_SUFFIX\n        op_name = key + self._SEPARATOR_CHAR + self.METRIC_UPDATE_SUFFIX\n        if not isinstance(metric_val, tensor.Tensor):\n            raise ValueError('{} output value must be a Tensor; got {}.'.format(key, metric_val))\n        if not (tensor_util.is_tensor(metric_op) or isinstance(metric_op, ops.Operation)):\n            raise ValueError('{} update_op must be a Tensor or Operation; got {}.'.format(key, metric_op))\n        metric_op_tensor = metric_op\n        if not isinstance(metric_op, tensor.Tensor):\n            with ops.control_dependencies([metric_op]):\n                metric_op_tensor = constant_op.constant([], name='metric_op_wrapper')\n        outputs[val_name] = metric_val\n        outputs[op_name] = metric_op_tensor\n    return outputs", "docstring": "Handle the saving of metrics.\n\nMetrics is either a tuple of (value, update_op), or a dict of such tuples.\nHere, we separate out the tuples and create a dict with names to tensors.\n\nArgs:\nmetrics: Dict of metric results keyed by name.\nThe values of the dict can be one of the following:\n(1) instance of `Metric` class.\n(2) (metric_value, update_op) tuples, or a single tuple.\nmetric_value must be a Tensor, and update_op must be a Tensor or Op.\n\nReturns:\ndict of output_names to tensors\n\nRaises:\nValueError: if the dict key is not a string, or the metric values or ops\nare not tensors.", "source": "github-repos"}
{"code": "def file(self, owner=None, **kwargs):\n    return File(self.tcex, owner=owner, **kwargs)", "docstring": "Create the File TI object.\n\nArgs:\nowner:\n**kwargs:\n\nReturn:", "source": "codesearchnet"}
{"code": "def _to_ascii(s):\n    \n    \n    from six import text_type, binary_type\n    if isinstance(s, text_type):\n        ascii_ = s.encode('ascii', 'ignore')\n    elif isinstance(s, binary_type):\n        ascii_ = s.decode('utf-8').encode('ascii', 'ignore')\n    else:\n        raise Exception('Unknown text type - {}'.format(type(s)))\n    return ascii_", "docstring": "Converts given string to ascii ignoring non ascii.\nArgs:\ns (text or binary):\n\nReturns:\nstr:", "source": "juraj-google-style"}
{"code": "def get_property(self, name):\n    for prop in self.resource.properties:\n        if (prop.name == name):\n            return prop\n    raise AttributeError(name)", "docstring": "Return a named property for a resource, if available. Will raise an `AttributeError` if the property\ndoes not exist\n\nArgs:\nname (str): Name of the property to return\n\nReturns:\n`ResourceProperty`", "source": "codesearchnet"}
{"code": "def fit_transform(self, X, y):\n        \n        self.target_encoders = [None] * X.shape[1]\n        self.target_mean = y.mean()\n\n        for i, col in enumerate(X.columns):\n            self.target_encoders[i] = self._get_target_encoder(X[col], y)\n\n            X.loc[:, col] = X[col].fillna(NAN_INT).map(self.target_encoders[i]).fillna(self.target_mean)\n\n        return X", "docstring": "Encode categorical columns into average target values.\nArgs:\nX (pandas.DataFrame): categorical columns to encode\ny (pandas.Series): the target column\nReturns:\nX (pandas.DataFrame): encoded columns", "source": "juraj-google-style"}
{"code": "def retrieve_object_from_file(file_name, save_key, file_location):\n    \n    shelve_store = None\n    file = __os.path.join(file_location, file_name)\n    try:\n        shelve_store = __shelve.open(file)\n    except Exception as e:\n        LOGGER.critical('Function retrieve_object_from_file Error {error} ignoring any errors'.format(error=e))\n        __sys.exit('Storage dB is not readable, closing App!!')\n    stored_object = shelve_store.get(save_key)\n    shelve_store.close()\n    return stored_object", "docstring": "Function to retrieve objects from a shelve\nArgs:\nfile_name: Shelve storage file name\nsave_key: The name of the key the item is stored in\nfile_location: The location of the file, derive from the os module\n\nReturns: Returns the stored object", "source": "juraj-google-style"}
{"code": "def GetYearFromPosixTime(posix_time, timezone=pytz.UTC):\n    datetime_object = datetime.datetime.fromtimestamp(posix_time, tz=timezone)\n    return datetime_object.year", "docstring": "Gets the year from a POSIX timestamp\n\nThe POSIX time is the number of seconds since 1970-01-01 00:00:00 UTC.\n\nArgs:\nposix_time: An integer containing the number of seconds since\n1970-01-01 00:00:00 UTC.\ntimezone: Optional timezone of the POSIX timestamp.\n\nReturns:\nThe year of the POSIX timestamp.\n\nRaises:\nValueError: If the posix timestamp is out of the range of supported values.", "source": "codesearchnet"}
{"code": "def seek(self, relative_position):\n        \n        self._player_interface.Seek(Int64(1000.0 * 1000 * relative_position))\n        self.seekEvent(self, relative_position)", "docstring": "Seek the video by `relative_position` seconds\n\nArgs:\nrelative_position (float): The position in seconds to seek to.", "source": "juraj-google-style"}
{"code": "def get_completions(prefix, paths=None, family_only=False):\n    op = None\n    if prefix:\n        if (prefix[0] in ('!', '~')):\n            if family_only:\n                return set()\n            op = prefix[0]\n            prefix = prefix[1:]\n    fam = None\n    for ch in ('-', '@', '\n        if (ch in prefix):\n            if family_only:\n                return set()\n            fam = prefix.split(ch)[0]\n            break\n    words = set()\n    if (not fam):\n        words = set((x.name for x in iter_package_families(paths=paths) if x.name.startswith(prefix)))\n        if (len(words) == 1):\n            fam = iter(words).next()\n    if family_only:\n        return words\n    if fam:\n        it = iter_packages(fam, paths=paths)\n        words.update((x.qualified_name for x in it if x.qualified_name.startswith(prefix)))\n    if op:\n        words = set(((op + x) for x in words))\n    return words", "docstring": "Get autocompletion options given a prefix string.\n\nExample:\n\n>>> get_completions(\"may\")\nset([\"maya\", \"maya_utils\"])\n>>> get_completions(\"maya-\")\nset([\"maya-2013.1\", \"maya-2015.0.sp1\"])\n\nArgs:\nprefix (str): Prefix to match.\npaths (list of str): paths to search for packages, defaults to\n`config.packages_path`.\nfamily_only (bool): If True, only match package names, do not include\nversion component.\n\nReturns:\nSet of strings, may be empty.", "source": "codesearchnet"}
{"code": "async def _check_resolver_ans(\n            self, dns_answer_list, record_name,\n            record_data_list, record_ttl, record_type_code):\n        \n        type_filtered_list = [\n            ans for ans in dns_answer_list if ans.qtype == record_type_code\n        ]\n\n        \n        \n        if len(type_filtered_list) != len(record_data_list):\n            return False\n\n        \n        for rec in type_filtered_list:\n            conditions = [rec.name == record_name,\n                          rec.ttl == record_ttl,\n                          rec.data in record_data_list]\n\n            \n            \n            if not all(conditions):\n                return False\n\n        return True", "docstring": "Check if resolver answer is equal to record data.\n\nArgs:\ndns_answer_list (list): DNS answer list contains record objects.\nrecord_name (str): Record name.\nrecord_data_list (list): List of data values for the record.\nrecord_ttl (int): Record time-to-live info.\nrecord_type_code (int): Record type code.\n\nReturns:\nboolean indicating if DNS answer data is equal to record data.", "source": "juraj-google-style"}
{"code": "def start(self, timeout=None):\n        \n        assert self.state == STOPPED, \"Process already started\"\n        self.state = STARTING\n        should_publish = self._start_controllers(\n            self._controllers.values(), timeout)\n        if should_publish:\n            self._publish_controllers(timeout)\n        self.state = STARTED", "docstring": "Start the process going\n\nArgs:\ntimeout (float): Maximum amount of time to wait for each spawned\nprocess. None means forever", "source": "juraj-google-style"}
{"code": "def extract_element_internationalized_comment(element):\n    \n    element_entry_comment = get_element_attribute_or_empty(element, 'userLabel')\n    if element_entry_comment == \"\":\n        try:\n            element_entry_comment = element.getElementsByTagName('string')[0].firstChild.nodeValue\n        except Exception:\n            element_entry_comment = \"\"\n    if not element_entry_comment.lower().startswith(JT_INTERNATIONALIZED_COMMENT_PREFIX):\n        return None\n    else:\n        return element_entry_comment[len(JT_INTERNATIONALIZED_COMMENT_PREFIX):]", "docstring": "Extracts the xib element's comment, if the element has been internationalized.\n\nArgs:\nelement (element): The element from which to extract the comment.\n\nReturns:\nThe element's internationalized comment, None if it does not exist, or hasn't been internationalized (according\nto the JTLocalize definitions).", "source": "juraj-google-style"}
{"code": "def get_output_file_info(filename: str, input_base_dir: str='', out_pattern: t.Optional[str]=None, out_dir: t.Optional[str]=None, formatting: str='') -> OutFileInfo:\n    split_name, ending = os.path.splitext(filename)\n    if ending in GRIB_FILE_ENDINGS or ending in NETCDF_FILE_ENDINGS:\n        filename = split_name\n    else:\n        ending = ''\n    if out_dir and (not formatting):\n        raise ValueError('No formatting specified when using --output-dir.')\n    if out_dir:\n        return OutFileInfo(f'{filename.replace(input_base_dir, out_dir)}', formatting, ending, [])\n    if out_pattern:\n        in_sections = []\n        path = filename\n        while path:\n            path, tail = os.path.split(path)\n            in_sections.append(tail)\n        return OutFileInfo(out_pattern, '', '', in_sections)\n    raise ValueError('no output specified.')", "docstring": "Construct the base output file name by applying the out_pattern to the\nfilename.\n\nExample:\nfilename = 'gs://my_bucket/data_to_split/2020/01/21.nc'\nout_pattern = 'gs://my_bucket/splits/{2}-{1}-{0}_old_data.'\nresulting output base = 'gs://my_bucket/splits/2020-01-21_old_data.'\nresulting file ending = '.nc'\n\nArgs:\nfilename: input file to be split\nout_pattern: pattern to apply when creating output file\nout_dir: directory to replace input base directory\nformatting: output formatting of split fields. Required when using\nout_dir, ignored when using out_pattern.\ninput_base_dir: used if out_pattern does not contain any '{}'\nsubstitutions.\nThe output file is then created by replacing this part of the input\nname with the output pattern.", "source": "github-repos"}
{"code": "def process(self, element):\n    text_line = element.strip()\n    return re.findall(\"[\\\\w\\\\']+\", text_line)", "docstring": "Returns an iterator over the words of this element.\n\nThe element is a line of text.  If the line is blank, note that, too.\n\nArgs:\nelement: the element being processed\n\nReturns:\nThe processed element.", "source": "github-repos"}
{"code": "def create_and_tag_model_card(repo_id: str, tags: Optional[list[str]]=None, token: Optional[str]=None, ignore_metadata_errors: bool=False):\n    try:\n        model_card = ModelCard.load(repo_id, token=token, ignore_metadata_errors=ignore_metadata_errors)\n    except EntryNotFoundError:\n        model_description = 'This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.'\n        card_data = ModelCardData(tags=[] if tags is None else tags, library_name='transformers')\n        model_card = ModelCard.from_template(card_data, model_description=model_description)\n    if tags is not None:\n        if model_card.data.tags is None:\n            model_card.data.tags = []\n        for model_tag in tags:\n            if model_tag not in model_card.data.tags:\n                model_card.data.tags.append(model_tag)\n    return model_card", "docstring": "Creates or loads an existing model card and tags it.\n\nArgs:\nrepo_id (`str`):\nThe repo_id where to look for the model card.\ntags (`List[str]`, *optional*):\nThe list of tags to add in the model card\ntoken (`str`, *optional*):\nAuthentication token, obtained with `huggingface_hub.HfApi.login` method. Will default to the stored token.\nignore_metadata_errors (`bool`, *optional*, defaults to `False`):\nIf True, errors while parsing the metadata section will be ignored. Some information might be lost during\nthe process. Use it at your own risk.", "source": "github-repos"}
{"code": "def WriteArtifactsFile(self, artifacts, filename):\n    with open(filename, 'w') as file_object:\n        file_object.write(self.FormatArtifacts(artifacts))", "docstring": "Writes artifact definitions to a file.\n\nArgs:\nartifacts (list[ArtifactDefinition]): artifact definitions to be written.\nfilename (str): name of the file to write artifacts to.", "source": "codesearchnet"}
{"code": "def meminfo():\n    (vm_total, vm_available, vm_percent, vm_used, vm_free) = psutil.virtual_memory()\n    (swp_total, swp_used, swp_free, swp_percent, _, _) = psutil.swap_memory()\n\n    def get_unit_value(memory):\n        symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')\n        prefix = {}\n        for (i, s) in enumerate(symbols):\n            prefix[s] = (1 << ((i + 1) * 10))\n        for s in reversed(symbols):\n            if (memory >= prefix[s]):\n                value = (float(memory) / prefix[s])\n                return {'unit': s, 'value': value}\n        return {'unit': 'B', 'value': memory}\n    return {'VmallocTotal': get_unit_value(vm_total), 'VmallocUsed': get_unit_value(vm_used), 'VmallocFree': get_unit_value(vm_free), 'VmallocAvail': get_unit_value(vm_available), 'SwapTotal': get_unit_value(swp_total), 'SwapUsed': get_unit_value(swp_used), 'SwapFree': get_unit_value(swp_free)}", "docstring": "Return information about physical and virtual memory on the system\n\nReturns:\ndict: A dictionary of information about memory on the system\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt * status.meminfo", "source": "codesearchnet"}
{"code": "def get_servo_status_detail(self):\n    data = []\n    data.append(9)\n    data.append(self.servoid)\n    data.append(RAM_READ_REQ)\n    data.append(STATUS_DETAIL_RAM)\n    data.append(BYTE1)\n    send_data(data)\n    rxdata = []\n    try:\n        rxdata = SERPORT.read(12)\n        return (ord(rxdata[9]) & 255)\n    except HerkulexError:\n        raise HerkulexError('could not communicate with motors')", "docstring": "Get the  detailed error status of servo\n\nThis function gets the  detailed error status (if any) of the servo\n\nArgs:\nnone\n\nReturns:\nint:  an integer corresponding to the servo status\n* refer datasheet", "source": "codesearchnet"}
{"code": "def swd_read8(self, offset):\n        \n        value = self._dll.JLINK_SWD_GetU8(offset)\n        return ctypes.c_uint8(value).value", "docstring": "Gets a unit of ``8`` bits from the input buffer.\n\nArgs:\nself (JLink): the ``JLink`` instance\noffset (int): the offset (in bits) from which to start reading\n\nReturns:\nThe integer read from the input buffer.", "source": "juraj-google-style"}
{"code": "def setitem(self, axis, key, value):\n\n    def setitem(df, internal_indices=[]):\n\n        def _setitem():\n            if (len(internal_indices) == 1):\n                if (axis == 0):\n                    df[df.columns[internal_indices[0]]] = value\n                else:\n                    df.iloc[internal_indices[0]] = value\n            elif (axis == 0):\n                df[df.columns[internal_indices]] = value\n            else:\n                df.iloc[internal_indices] = value\n        try:\n            _setitem()\n        except ValueError:\n            df = df.copy()\n            _setitem()\n        return df\n    if (axis == 0):\n        numeric_indices = list(self.columns.get_indexer_for([key]))\n    else:\n        numeric_indices = list(self.index.get_indexer_for([key]))\n    prepared_func = self._prepare_method(setitem)\n    if is_list_like(value):\n        new_data = self.data.apply_func_to_select_indices_along_full_axis(axis, prepared_func, numeric_indices, keep_remaining=True)\n    else:\n        new_data = self.data.apply_func_to_select_indices(axis, prepared_func, numeric_indices, keep_remaining=True)\n    return self.__constructor__(new_data, self.index, self.columns)", "docstring": "Set the column defined by `key` to the `value` provided.\n\nArgs:\nkey: The column name to set.\nvalue: The value to set the column to.\n\nReturns:\nA new QueryCompiler", "source": "codesearchnet"}
{"code": "def set_file_idx_offset(self, file_idx_offset=0):\n        \n        if isinstance(file_idx_offset, int):\n            self.file_idx_offset = file_idx_offset\n        elif file_idx_offset == 'auto':\n            self.file_idx_offset = self.storage.max_file_idx()\n        else:\n            raise ValueError('\"file_idx_offset\" must be an integer or `auto`')", "docstring": "Set offset of file index.\n\nArgs:\nfile_idx_offset: It can be either an integer or 'auto'. If set\nto an integer, the filename will start from\n``file_idx_offset`` + 1. If set to ``'auto'``, the filename\nwill start from existing max file index plus 1.", "source": "juraj-google-style"}
{"code": "def Collect(self, knowledge_base):\n    \n    environment_variable = knowledge_base.GetEnvironmentVariable(\n        'programdata')\n    allusersprofile = getattr(environment_variable, 'value', None)\n\n    if not allusersprofile:\n      environment_variable = knowledge_base.GetEnvironmentVariable(\n          'allusersprofile')\n      allusersprofile = getattr(environment_variable, 'value', None)\n\n      if allusersprofile:\n        environment_variable = artifacts.EnvironmentVariableArtifact(\n            case_sensitive=False, name='programdata', value=allusersprofile)\n\n        try:\n          logger.debug('setting environment variable: {0:s} to: \"{1:s}\"'.format(\n              'programdata', allusersprofile))\n          knowledge_base.AddEnvironmentVariable(environment_variable)\n        except KeyError:\n          \n          pass", "docstring": "Collects values from the knowledge base.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\n\nRaises:\nPreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def op_or(self, *elements):\n        \n        expression = self.add_operator(Operator(','))\n        for element in elements:\n            expression.add_element(element)\n        return expression", "docstring": "Update the ``Expression`` by joining the specified additional\n``elements`` using an \"OR\" ``Operator``\n\nArgs:\n*elements (BaseExpression): The ``Expression`` and/or\n``Constraint`` elements which the \"OR\" ``Operator`` applies\nto.\n\nReturns:\nExpression: ``self`` or related ``Expression``.", "source": "juraj-google-style"}
{"code": "def get_proj_info(self, token):\n        \n        r = self.remote_utils.get_url(self.url() + \"{}/info/\".format(token))\n        return r.json()", "docstring": "Return the project info for a given token.\n\nArguments:\ntoken (str): Token to return information for\n\nReturns:\nJSON: representation of proj_info", "source": "juraj-google-style"}
{"code": "def get_stored_metadata(self, temp_ver):\n    with open(self._prefixed(('%s.metadata' % temp_ver.name))) as f:\n        return json.load(f)", "docstring": "Retrieves the metadata for the given template version from the store\n\nArgs:\ntemp_ver (TemplateVersion): template version to retrieve the\nmetadata for\n\nReturns:\ndict: the metadata of the given template version", "source": "codesearchnet"}
{"code": "def find_documents(self, sentence, limit=None, must_sort=True,\n                       search_type='fuzzy'):\n        \n        sentence = sentence.strip()\n        sentence = strip_accents(sentence)\n\n        if sentence == u\"\":\n            return self.get_all_docs()\n\n        result_list_list = []\n        total_results = 0\n\n        for query_parser in self.search_param_list[search_type]:\n            query = query_parser[\"query_parser\"].parse(sentence)\n\n            sortedby = None\n            if must_sort and \"sortedby\" in query_parser:\n                sortedby = query_parser['sortedby']\n            if sortedby:\n                results = self.__searcher.search(\n                    query, limit=limit, sortedby=sortedby\n                )\n            else:\n                results = self.__searcher.search(\n                    query, limit=limit\n                )\n            results = [\n                (result['docid'], result['doctype'])\n                for result in results\n            ]\n\n            result_list_list.append(results)\n            total_results += len(results)\n\n            if not must_sort and total_results >= limit:\n                break\n\n        \n        docs = set()\n        for result_intermediate in result_list_list:\n            for result in result_intermediate:\n                doc = self._docs_by_id.get(result[0])\n                if doc is None:\n                    continue\n                docs.add(doc)\n\n        docs = [d for d in docs]\n\n        if not must_sort and limit is not None:\n            docs = docs[:limit]\n\n        return docs", "docstring": "Returns all the documents matching the given keywords\n\nArguments:\nsentence --- a sentenced query\nReturns:\nAn array of document (doc objects)", "source": "juraj-google-style"}
{"code": "def _apply_mask(self):\n    w = self._w\n    w_shape = w.get_shape()\n    mask_shape = self._mask.get_shape()\n    if (mask_shape.ndims > w_shape.ndims):\n        raise base.IncompatibleShapeError('Invalid mask shape: {}. Max shape: {}'.format(mask_shape.ndims, len(self._data_format)))\n    if (mask_shape != w_shape[:mask_shape.ndims]):\n        raise base.IncompatibleShapeError('Invalid mask shape: {}. Weight shape: {}'.format(mask_shape, w_shape))\n    while (self._mask.get_shape().ndims < w_shape.ndims):\n        self._mask = tf.expand_dims(self._mask, (- 1))\n    w = (w * self._mask)\n    return w", "docstring": "Applies the passed-in mask to the convolution matrix.\n\nReturns:\nw: A copy of the convolution matrix that has had the mask applied.\n\nRaises:\nbase.IncompatibleShapeError: If the mask shape has more dimensions than\nthe weight matrix.\nbase.IncompatibleShapeError: If the mask and the weight matrix don't\nmatch on shape.", "source": "codesearchnet"}
{"code": "def get_path_spec(self, path, action=None):\n        \n        \n        path_spec = None\n        path_name = None\n        for base_path in self.paths.keys():\n            if path == base_path:\n                path_spec = self.paths[base_path]\n                path_name = base_path\n\n        \n        if path_spec is None:\n            for base_path in self.paths.keys():\n                regex_from_path = re.compile(re.sub('{[^/]*}', '([^/]*)', base_path) + r'$')\n                if re.match(regex_from_path, path):\n                    path_spec = self.paths[base_path]\n                    path_name = base_path\n\n        \n        if path_spec is not None and action is not None:\n            if action not in path_spec.keys():\n                return (None, None)\n            else:\n                path_spec = path_spec[action]\n\n        return (path_name, path_spec)", "docstring": "Get the specification matching with the given path.\n\nArgs:\npath: path we want the specification.\naction: get the specification for the given action.\n\nReturns:\nA tuple with the base name of the path and the specification.\nOr (None, None) if no specification is found.", "source": "juraj-google-style"}
{"code": "def search(self, term):\n        \n        return self._result(\n            self._get(self._url(\"/images/search\"), params={'term': term}),\n            True\n        )", "docstring": "Search for images on Docker Hub. Similar to the ``docker search``\ncommand.\n\nArgs:\nterm (str): A term to search for.\n\nReturns:\n(list of dicts): The response of the search.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def GetProperties(cls, path_spec):\n    properties = {}\n    for property_name in cls.PROPERTY_NAMES:\n        if hasattr(path_spec, property_name):\n            properties[property_name] = getattr(path_spec, property_name)\n    return properties", "docstring": "Retrieves a dictionary containing the path specification properties.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\ndict[str, str]: path specification properties.\n\nRaises:\ndict: path specification properties.", "source": "codesearchnet"}
{"code": "def etherscan_verify_contract(chain_id: int, apikey: str, source_module: DeploymentModule, contract_name: str):\n    etherscan_api = api_of_chain_id[chain_id]\n    deployment_info = get_contracts_deployment_info(chain_id=chain_id, module=source_module)\n    if (deployment_info is None):\n        raise FileNotFoundError(f'Deployment file not found for chain_id={chain_id} and module={source_module}')\n    contract_manager = ContractManager(contracts_precompiled_path())\n    data = post_data_for_etherscan_verification(apikey=apikey, deployment_info=deployment_info['contracts'][contract_name], source=join_sources(source_module=source_module, contract_name=contract_name), contract_name=contract_name, metadata=json.loads(contract_manager.contracts[contract_name]['metadata']), constructor_args=get_constructor_args(deployment_info=deployment_info, contract_name=contract_name, contract_manager=contract_manager))\n    response = requests.post(etherscan_api, data=data)\n    content = json.loads(response.content.decode())\n    print(content)\n    print(f\"Status: {content['status']}; {content['message']} ; GUID = {content['result']}\")\n    etherscan_url = etherscan_api.replace('api-', '').replace('api', '')\n    etherscan_url += ('/verifyContract2?a=' + data['contractaddress'])\n    manual_submission_guide = f\n    if (content['status'] != '1'):\n        if (content['result'] == 'Contract source code already verified'):\n            return\n        else:\n            raise ValueError(('Etherscan submission failed for an unknown reason\\n' + manual_submission_guide))\n    guid = content['result']\n    status = '0'\n    retries = 10\n    while ((status == '0') and (retries > 0)):\n        retries -= 1\n        r = guid_status(etherscan_api=etherscan_api, guid=guid)\n        status = r['status']\n        if (r['result'] == 'Fail - Unable to verify'):\n            raise ValueError(manual_submission_guide)\n        if (r['result'] == 'Pass - Verified'):\n            return\n        print('Retrying...')\n        sleep(5)\n    raise TimeoutError(manual_submission_guide)", "docstring": "Calls Etherscan API for verifying the Solidity source of a contract.\n\nArgs:\nchain_id: EIP-155 chain id of the Ethereum chain\napikey: key for calling Etherscan API\nsource_module: a module name to look up contracts_source_path()\ncontract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc.", "source": "codesearchnet"}
{"code": "def close_position(self, repay_only):\n        \n        params = {'repay_only': repay_only}\n        return self._send_message('post', '/position/close',\n                                  data=json.dumps(params))", "docstring": "Close position.\n\nArgs:\nrepay_only (bool): Undocumented by cbpro.\n\nReturns:\nUndocumented", "source": "juraj-google-style"}
{"code": "def colored(text: str, color: Optional[str]=None, background: Optional[str]=None, styles: Optional[List[str]]=None) -> str:\n    if not termcolor:\n        return text\n    return termcolor.colored(text, color=color, on_color='on_' + background if background else None, attrs=styles)", "docstring": "Returns the colored text with ANSI color characters.\n\nArgs:\ntext: A string that may or may not already has ANSI color characters.\ncolor: A string for text colors. Applicable values are:\n'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'.\nbackground: A string for background colors. Applicable values are:\n'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'.\nstyles: A list of strings for applying styles on the text.\nApplicable values are:\n'bold', 'dark', 'underline', 'blink', 'reverse', 'concealed'.\n\nReturns:\nA string with ANSI color characters embracing the entire text.", "source": "github-repos"}
{"code": "def InterpolatePath(path, knowledge_base, users=None, path_args=None, depth=0):\n    sys_formatters = {'systemroot': 'c:\\\\Windows'}\n    if path_args:\n        sys_formatters.update(path_args)\n    if users:\n        results = []\n        for user in users:\n            user = GetUserInfo(knowledge_base, user)\n            if user:\n                formatters = dict(((x.name, y) for (x, y) in user.ListSetFields()))\n                formatters.update(sys_formatters)\n                try:\n                    results.append(path.format(**formatters))\n                except KeyError:\n                    pass\n        return results\n    else:\n        try:\n            path = path.format(**sys_formatters)\n        except KeyError:\n            logging.warning('Failed path interpolation on %s', path)\n            return ''\n        if (('{' in path) and (depth < 10)):\n            path = InterpolatePath(path, knowledge_base=knowledge_base, users=users, path_args=path_args, depth=(depth + 1))\n        return path", "docstring": "Take a string as a path on a client and interpolate with client data.\n\nArgs:\npath: A single string/unicode to be interpolated.\nknowledge_base: An rdf_client.KnowledgeBase object.\nusers: A list of string usernames, or None.\npath_args: A dict of additional args to use in interpolation. These take\nprecedence over any system provided variables.\ndepth: A counter for recursion depth.\n\nReturns:\nA single string if users is None, otherwise a list of strings.", "source": "codesearchnet"}
{"code": "def get_tensor_shape(self, tensor_name):\n    \n    tensor = self._name_to_tensor(tensor_name)\n\n    if isinstance(tensor, mtf.Tensor):\n      return tf.TensorShape(tensor.shape.to_integer_list)\n    else:  \n      return tensor.shape", "docstring": "The tf.TensorShape of a tensor.\n\nArgs:\ntensor_name: string, the name of a tensor in the graph.\n\nReturns:\na tf.TensorShape", "source": "juraj-google-style"}
{"code": "def create(self, data=None, uri=None, timeout=-1, custom_headers=None, force=False):\n        \n        if not uri:\n            uri = self._base_uri\n\n        if force:\n            uri += '?force={}'.format(force)\n\n        logger.debug('Create (uri = %s, resource = %s)' % (uri, str(data)))\n\n        return self.do_post(uri, data, timeout, custom_headers)", "docstring": "Makes a POST request to create a resource when a request body is required.\n\nArgs:\ndata: Additional fields can be passed to create the resource.\nuri: Resouce uri\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\ncustom_headers: Allows set specific HTTP headers.\nReturns:\nCreated resource.", "source": "juraj-google-style"}
{"code": "def ConvertCloudMetadataResponsesToCloudInstance(metadata_responses):\n    if (metadata_responses.instance_type == 'GOOGLE'):\n        cloud_instance = GoogleCloudInstance()\n        result = CloudInstance(cloud_type='GOOGLE', google=cloud_instance)\n    elif (metadata_responses.instance_type == 'AMAZON'):\n        cloud_instance = AmazonCloudInstance()\n        result = CloudInstance(cloud_type='AMAZON', amazon=cloud_instance)\n    else:\n        raise ValueError(('Unknown cloud instance type: %s' % metadata_responses.instance_type))\n    for cloud_metadata in metadata_responses.responses:\n        setattr(cloud_instance, cloud_metadata.label, cloud_metadata.text)\n    if (result.cloud_type == 'GOOGLE'):\n        cloud_instance.unique_id = MakeGoogleUniqueID(cloud_instance)\n    return result", "docstring": "Convert CloudMetadataResponses to CloudInstance proto.\n\nIdeally we'd just get the client to fill out a CloudInstance proto, but we\nneed to keep the flexibility of collecting new metadata and creating new\nfields without a client push. So instead we bring back essentially a dict of\nresults and fill the proto on the server side.\n\nArgs:\nmetadata_responses: CloudMetadataResponses object from the client.\nReturns:\nCloudInstance object\nRaises:\nValueError: if client passes bad or unset cloud type.", "source": "codesearchnet"}
{"code": "def set_room_alias(self, room_id, room_alias):\n    data = {'room_id': room_id}\n    return self._send('PUT', '/directory/room/{}'.format(quote(room_alias)), content=data)", "docstring": "Set alias to room id\n\nArgs:\nroom_id (str): The room id.\nroom_alias (str): The room wanted alias name.", "source": "codesearchnet"}
{"code": "def filter_distributed_callbacks(callbacks_list, model):\n    if not model._in_multi_worker_mode():\n        raise ValueError('filter_distributed_callbacks() should only be called when Keras is in multi worker mode.')\n    callbacks_list = callbacks_list or []\n    if not [c for c in callbacks_list if isinstance(c, callbacks.ModelCheckpoint)]:\n        logging.warning('ModelCheckpoint callback is not provided. Workers will need to restart training if any fails.')\n    if callbacks_list is None or is_current_worker_chief():\n        return callbacks_list\n    return [callback for callback in callbacks_list if not callback._chief_worker_only]", "docstring": "Filter Callbacks based on the worker context when running multi-worker.\n\nArgs:\ncallbacks_list: A list of `Callback` instances.\nmodel: Keras model instance.\n\nReturns:\nThe list of `Callback` instances that should be run on this worker.", "source": "github-repos"}
{"code": "def _validate_aud(claims, audience=None):\n    if ('aud' not in claims):\n        return\n    audience_claims = claims['aud']\n    if isinstance(audience_claims, string_types):\n        audience_claims = [audience_claims]\n    if (not isinstance(audience_claims, list)):\n        raise JWTClaimsError('Invalid claim format in token')\n    if any(((not isinstance(c, string_types)) for c in audience_claims)):\n        raise JWTClaimsError('Invalid claim format in token')\n    if (audience not in audience_claims):\n        raise JWTClaimsError('Invalid audience')", "docstring": "Validates that the 'aud' claim is valid.\n\nThe \"aud\" (audience) claim identifies the recipients that the JWT is\nintended for.  Each principal intended to process the JWT MUST\nidentify itself with a value in the audience claim.  If the principal\nprocessing the claim does not identify itself with a value in the\n\"aud\" claim when this claim is present, then the JWT MUST be\nrejected.  In the general case, the \"aud\" value is an array of case-\nsensitive strings, each containing a StringOrURI value.  In the\nspecial case when the JWT has one audience, the \"aud\" value MAY be a\nsingle case-sensitive string containing a StringOrURI value.  The\ninterpretation of audience values is generally application specific.\nUse of this claim is OPTIONAL.\n\nArgs:\nclaims (dict): The claims dictionary to validate.\naudience (str): The audience that is verifying the token.", "source": "codesearchnet"}
{"code": "def from_string(cls, public_key):\n        \n        public_key_data = _helpers.to_bytes(public_key)\n\n        if _CERTIFICATE_MARKER in public_key_data:\n            cert = cryptography.x509.load_pem_x509_certificate(\n                public_key_data, _BACKEND)\n            pubkey = cert.public_key()\n\n        else:\n            pubkey = serialization.load_pem_public_key(\n                public_key_data, _BACKEND)\n\n        return cls(pubkey)", "docstring": "Construct an Verifier instance from a public key or public\ncertificate string.\n\nArgs:\npublic_key (Union[str, bytes]): The public key in PEM format or the\nx509 public key certificate.\n\nReturns:\nVerifier: The constructed verifier.\n\nRaises:\nValueError: If the public key can't be parsed.", "source": "juraj-google-style"}
{"code": "def output(ret, **kwargs):\n    if ('opts' in kwargs):\n        global __opts__\n        __opts__ = kwargs.pop('opts')\n    base_indent = (kwargs.get('nested_indent', 0) or __opts__.get('out.table.nested_indent', 0))\n    rows_key = (kwargs.get('rows_key') or __opts__.get('out.table.rows_key'))\n    labels_key = (kwargs.get('labels_key') or __opts__.get('out.table.labels_key'))\n    title = (kwargs.get('title') or __opts__.get('out.table.title'))\n    class_kvargs = {}\n    argks = ('has_header', 'row_delimiter', 'delim', 'justify', 'separate_rows', 'prefix', 'suffix', 'width')\n    for argk in argks:\n        argv = (kwargs.get(argk) or __opts__.get('out.table.{key}'.format(key=argk)))\n        if (argv is not None):\n            class_kvargs[argk] = argv\n    table = TableDisplay(**class_kvargs)\n    out = []\n    if (title and rows_key):\n        out.append(table.ustring(base_indent, title, table.WHITE, suffix='\\n'))\n    return '\\n'.join(table.display(salt.utils.data.decode(ret), base_indent, out, rows_key=rows_key, labels_key=labels_key))", "docstring": "Display the output as table.\n\nArgs:\n\n* nested_indent: integer, specify the left alignment.\n* has_header: boolean specifying if header should be displayed. Default: True.\n* row_delimiter: character to separate rows. Default: ``_``.\n* delim: character to separate columns. Default: ``\" | \"``.\n* justify: text alignment. Default: ``center``.\n* separate_rows: boolean specifying if row separator will be displayed between consecutive rows. Default: True.\n* prefix: character at the beginning of the row. Default: ``\"| \"``.\n* suffix: character at the end of the row. Default: ``\" |\"``.\n* width: column max width. Default: ``50``.\n* rows_key: display the rows under a specific key.\n* labels_key: use the labels under a certain key. Otherwise will try to use the dictionary keys (if any).\n* title: display title when only one table is selected (using the ``rows_key`` argument).", "source": "codesearchnet"}
{"code": "def to_matrix(self):\n    (w, x, y, z) = self.normalize().data\n    mat = np.array([[((1 - (2 * (y ** 2))) - (2 * (z ** 2))), (((2 * x) * y) - ((2 * z) * w)), (((2 * x) * z) + ((2 * y) * w))], [(((2 * x) * y) + ((2 * z) * w)), ((1 - (2 * (x ** 2))) - (2 * (z ** 2))), (((2 * y) * z) - ((2 * x) * w))], [(((2 * x) * z) - ((2 * y) * w)), (((2 * y) * z) + ((2 * x) * w)), ((1 - (2 * (x ** 2))) - (2 * (y ** 2)))]], dtype=float)\n    return mat", "docstring": "Converts a unit-length quaternion to a rotation matrix.\n\nReturns:\nndarray: Rotation matrix.", "source": "codesearchnet"}
{"code": "def terminate_ec2_instance(client, resource):\n    \n    \n    instance = EC2Instance.get(resource.id)\n    if instance.state == 'terminated':\n        return ActionStatus.IGNORED, {}\n    client.terminate_instances(InstanceIds=[resource.id])\n    return ActionStatus.SUCCEED, {'instance_type': resource.instance_type, 'public_ip': resource.public_ip}", "docstring": "Terminate an EC2 Instance\n\nThis function will terminate an EC2 Instance.\n\nArgs:\nclient (:obj:`boto3.session.Session.client`): A boto3 client object\nresource (:obj:`Resource`): The resource object to terminate\n\nReturns:\n`ActionStatus`", "source": "juraj-google-style"}
{"code": "def _copy_and_clean_up_expectation(self, expectation, discard_result_format_kwargs=True, discard_include_configs_kwargs=True, discard_catch_exceptions_kwargs=True):\n    new_expectation = copy.deepcopy(expectation)\n    if ('success_on_last_run' in new_expectation):\n        del new_expectation['success_on_last_run']\n    if discard_result_format_kwargs:\n        if ('result_format' in new_expectation['kwargs']):\n            del new_expectation['kwargs']['result_format']\n    if discard_include_configs_kwargs:\n        if ('include_configs' in new_expectation['kwargs']):\n            del new_expectation['kwargs']['include_configs']\n    if discard_catch_exceptions_kwargs:\n        if ('catch_exceptions' in new_expectation['kwargs']):\n            del new_expectation['kwargs']['catch_exceptions']\n    return new_expectation", "docstring": "Returns copy of `expectation` without `success_on_last_run` and other specified key-value pairs removed\n\nReturns a copy of specified expectation will not have `success_on_last_run` key-value. The other key-value \\\npairs will be removed by default but will remain in the copy if specified.\n\nArgs:\nexpectation (json): \\\nThe expectation to copy and clean.\ndiscard_result_format_kwargs (boolean): \\\nif True, will remove the kwarg `output_format` key-value pair from the copied expectation.\ndiscard_include_configs_kwargs (boolean):\nif True, will remove the kwarg `include_configs` key-value pair from the copied expectation.\ndiscard_catch_exceptions_kwargs (boolean):\nif True, will remove the kwarg `catch_exceptions` key-value pair from the copied expectation.\n\nReturns:\nA copy of the provided expectation with `success_on_last_run` and other specified key-value pairs removed", "source": "codesearchnet"}
{"code": "def __init__(self, subject_hash, hash_information):\n    \n    self.hash_information = hash_information\n    self.subject_hash = subject_hash", "docstring": "Initializes analysis information about a hash.\n\nArgs:\nsubject_hash (str): hash that the hash_information relates to.\nhash_information (object): information about the hash. This object will be\nused by the GenerateLabels method in the HashTaggingAnalysisPlugin\nto tag events that relate to the hash.", "source": "juraj-google-style"}
{"code": "def _should_unpack(arg):\n    return type(arg) is tuple", "docstring": "Determines whether the caller needs to unpack the argument from a tuple.\n\nArgs:\narg: argument to check\n\nReturns:\nIndication of whether the caller needs to unpack the argument from a tuple.", "source": "github-repos"}
{"code": "def get_dim_index(js_dict, dim):\n    try:\n        dim_index = js_dict['dimension'][dim]['category']['index']\n    except KeyError:\n        dim_label = get_dim_label(js_dict, dim)\n        dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])), index=[0], columns=['id', 'index'])\n    else:\n        if (type(dim_index) is list):\n            dim_index = pd.DataFrame(list(zip(dim_index, range(0, len(dim_index)))), index=dim_index, columns=['id', 'index'])\n        else:\n            dim_index = pd.DataFrame(list(zip(dim_index.keys(), dim_index.values())), index=dim_index.keys(), columns=['id', 'index'])\n    dim_index = dim_index.sort_index(by='index')\n    return dim_index", "docstring": "Get index from a given dimension.\n\nArgs:\njs_dict (dict): dictionary containing dataset data and metadata.\ndim (string): dimension name obtained from JSON file.\n\nReturns:\ndim_index (pandas.DataFrame): DataFrame with index-based dimension data.", "source": "codesearchnet"}
{"code": "def get_opt_attr(obj_pyxb, attr_str, default_val=None):\n    v = getattr(obj_pyxb, attr_str, default_val)\n    return (v if (v is not None) else default_val)", "docstring": "Get an optional attribute value from a PyXB element.\n\nThe attributes for elements that are optional according to the schema and\nnot set in the PyXB object are present and set to None.\n\nPyXB validation will fail if required elements are missing.\n\nArgs:\nobj_pyxb: PyXB object\nattr_str: str\nName of an attribute that the PyXB object may contain.\n\ndefault_val: any object\nValue to return if the attribute is not present.\n\nReturns:\nstr : Value of the attribute if present, else ``default_val``.", "source": "codesearchnet"}
{"code": "def parse_fs_url(fs_url):\n    match = _RE_FS_URL.match(fs_url)\n    if (match is None):\n        raise ParseError('{!r} is not a fs2 url'.format(fs_url))\n    (fs_name, credentials, url1, url2, path) = match.groups()\n    if (not credentials):\n        username = None\n        password = None\n        url = url2\n    else:\n        (username, _, password) = credentials.partition(':')\n        username = unquote(username)\n        password = unquote(password)\n        url = url1\n    (url, has_qs, qs) = url.partition('?')\n    resource = unquote(url)\n    if has_qs:\n        _params = parse_qs(qs, keep_blank_values=True)\n        params = {k: unquote(v[0]) for (k, v) in six.iteritems(_params)}\n    else:\n        params = {}\n    return ParseResult(fs_name, username, password, resource, params, path)", "docstring": "Parse a Filesystem URL and return a `ParseResult`.\n\nArguments:\nfs_url (str): A filesystem URL.\n\nReturns:\n~fs.opener.parse.ParseResult: a parse result instance.\n\nRaises:\n~fs.errors.ParseError: if the FS URL is not valid.", "source": "codesearchnet"}
{"code": "def _compute_sequence_length_from_mask(mask, time_major):\n    timestep_index = 0 if time_major else 1\n    return tf.reduce_sum(tf.cast(mask, tf.int32), axis=timestep_index)", "docstring": "Calculate the sequence length tensor (1-D) based on the masking tensor.\n\nThe masking tensor is a 2D boolean tensor with shape [batch, timestep]. For\nany timestep that should be masked, the corresponding field will be False.\nConsider the following example:\na = [[True, True, False, False],\n[True, True, True, False]]\nIt is a (2, 4) tensor, and the corresponding sequence length result should\nbe 1D tensor with value [2, 3]. Note that the masking tensor must be right\npadded that could be checked by, e.g., `is_sequence_right_padded()`.\n\nArgs:\nmask: Boolean tensor with shape [batch, timestep] or [timestep, batch]\nif time_major=True.\ntime_major: Boolean, which indicates whether the mask is time major or\nbatch major.\n\nReturns:\nsequence_length: 1D int32 tensor.", "source": "github-repos"}
{"code": "def _checkResponseWriteData(payload, writedata):\n    _checkString(payload, minlength=4, description='payload')\n    _checkString(writedata, minlength=2, maxlength=2, description='writedata')\n    BYTERANGE_FOR_WRITEDATA = slice(2, 4)\n    receivedWritedata = payload[BYTERANGE_FOR_WRITEDATA]\n    if (receivedWritedata != writedata):\n        raise ValueError('Wrong write data in the response: {0!r}, but commanded is {1!r}. The data payload is: {2!r}'.format(receivedWritedata, writedata, payload))", "docstring": "Check that the write data as given in the response is correct.\n\nThe bytes 2 and 3 (zero based counting) in the payload holds the write data.\n\nArgs:\n* payload (string): The payload\n* writedata (string): The data to write, length should be 2 bytes.\n\nRaises:\nTypeError, ValueError", "source": "codesearchnet"}
{"code": "def AddWatchOnly(self, script_hash):\n    if (script_hash in self._contracts):\n        logger.error('Address already in contracts')\n        return\n    self._watch_only.append(script_hash)", "docstring": "Add a watch only address to the wallet.\n\nArgs:\nscript_hash (UInt160): a bytearray (len 20) representing the public key.\n\nNote:\nPrints a warning to the console if the address already exists in the wallet.", "source": "codesearchnet"}
{"code": "def view(location, browser=None, new='same', autoraise=True):\n    try:\n        new = {'same': 0, 'window': 1, 'tab': 2}[new]\n    except KeyError:\n        raise RuntimeError((\"invalid 'new' value passed to view: %r, valid values are: 'same', 'window', or 'tab'\" % new))\n    if location.startswith('http'):\n        url = location\n    else:\n        url = ('file:\n    try:\n        controller = get_browser_controller(browser)\n        controller.open(url, new=new, autoraise=autoraise)\n    except (SystemExit, KeyboardInterrupt):\n        raise\n    except:\n        pass", "docstring": "Open a browser to view the specified location.\n\nArgs:\nlocation (str) : Location to open\nIf location does not begin with \"http:\" it is assumed\nto be a file path on the local filesystem.\nbrowser (str or None) : what browser to use (default: None)\nIf ``None``, use the system default browser.\nnew (str) : How to open the location. Valid values are:\n\n``'same'`` - open in the current tab\n\n``'tab'`` - open a new tab in the current window\n\n``'window'`` - open in a new window\nautoraise (bool) : Whether to automatically raise the location\nin a new browser window (default: True)\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def get_key_delivery_url(access_token, ck_id, key_type):\n    path = '/ContentKeys'\n    full_path = ''.join([path, \"('\", ck_id, \"')\", '/GetKeyDeliveryUrl'])\n    endpoint = ''.join([ams_rest_endpoint, full_path])\n    body = (('{\"keyDeliveryType\": \"' + key_type) + '\"}')\n    return do_ams_post(endpoint, full_path, body, access_token)", "docstring": "Get Media Services Key Delivery URL.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nck_id (str): A Media Service Content Key ID.\nkey_type (str): A Media Service key Type.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def in_same_dir(as_file, target_file):\n    return os.path.abspath(os.path.join(os.path.dirname(as_file), target_file))", "docstring": "Return an absolute path to a target file that is located in the same directory as as_file\n\nArgs:\nas_file: File name (including __file__)\nUse the directory path of this file\ntarget_file: Name of the target file", "source": "codesearchnet"}
{"code": "def add_report(self, specification_name, report):\n    self._reports[specification_name] = report\n    self._total = (self._total + report.testsRun)\n    self._failures = (self._failures + len(report.failures))\n    self._errors = (self._errors + len(report.errors))\n    self._success = ((self._total - self._failures) - self._errors)", "docstring": "Adds a given report with the given specification_name as key\nto the reports list and computes the number of success, failures\nand errors\n\nArgs:\nspecification_name: string representing the specification (with \".spec\")\nreport: The", "source": "codesearchnet"}
{"code": "def VerifyStructure(self, parser_mediator, line):\n    \n    try:\n      structure = self._HEADER.parseString(line)\n    except pyparsing.ParseException:\n      logger.debug('Not a XChat log file')\n      return False\n\n    _, month, day, hours, minutes, seconds, year = structure.date_time\n\n    month = timelib.MONTH_DICT.get(month.lower(), 0)\n\n    time_elements_tuple = (year, month, day, hours, minutes, seconds)\n\n    try:\n      dfdatetime_time_elements.TimeElements(\n          time_elements_tuple=time_elements_tuple)\n    except ValueError:\n      logger.debug('Not a XChat log file, invalid date and time: {0!s}'.format(\n          structure.date_time))\n      return False\n\n    return True", "docstring": "Verify that this file is a XChat log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nline (str): line from a text file.\n\nReturns:\nbool: True if the line is in the expected format, False if not.", "source": "juraj-google-style"}
{"code": "def scatter_max(self, sparse_delta, use_locking=False, name=None):\n    raise NotImplementedError", "docstring": "Updates this variable with the max of `tf.IndexedSlices` and itself.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to use as an argument of max with this\nvariable.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nThe updated variable.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"}
{"code": "def update_restore_inputs(self, checkpoint_key, shape_and_slice_spec) -> tuple[List[str], List[str]]:\n    return ([checkpoint_key], [shape_and_slice_spec])", "docstring": "Updates the specs to restore op.\n\nOverride this method if the arguments to restore op need to be updated as\nper the resharding required.\nArgs:\ncheckpoint_key: The checkpoint key as requested by the caller\nshape_and_slice_spec: The shape and slice spec as requested by caller\n\nReturns:\nTuple of list of checkpoint_keys and specs that the restore op should fetch\nas per the resharding requirement. The length of checkpoint keys returned by\nthis method will match the length of checkpoint_values that are input to\n`reshard`.", "source": "github-repos"}
{"code": "def gzip_uncompress(data, truncated=False):\n    decompressor = SimpleGzipDecompressor()\n    inflated_data = decompressor.decompress(data)\n    if (not truncated):\n        inflated_data += decompressor.flush()\n    return inflated_data", "docstring": "Uncompress gzip data.\n\nArgs:\ndata (bytes): The gzip data.\ntruncated (bool): If True, the decompressor is not flushed.\n\nThis is a convenience function.\n\nReturns:\nbytes: The inflated data.\n\nRaises:\nzlib.error", "source": "codesearchnet"}
{"code": "def get_slot_names(self):\n    return sorted(self._slots.keys())", "docstring": "Return a list of the names of slots created by the `Optimizer`.\n\nSee `get_slot()`.\n\nReturns:\nA list of strings.", "source": "github-repos"}
{"code": "def __init__(self, build_tree=True):\n        \n        self._target_cache = {}\n        self._item_cache = {}\n        self._contains_cache = {}\n        self._matrix_cache = {}\n        self._graph_cache = {}\n        self._treemap_cache = None\n        self.modules = []\n        self.packages = []\n\n        if build_tree:\n            self.build_tree()", "docstring": "Initialization method.\n\nArgs:\nbuild_tree (bool): whether to immediately build the tree or not.", "source": "juraj-google-style"}
{"code": "def _update_seek(self, offset, whence):\n    with self._seek_lock:\n        if (whence == SEEK_SET):\n            self._seek = offset\n        elif (whence == SEEK_CUR):\n            self._seek += offset\n        elif (whence == SEEK_END):\n            self._seek = (offset + self._size)\n        else:\n            raise ValueError(('whence value %s unsupported' % whence))\n    return self._seek", "docstring": "Update seek value.\n\nArgs:\noffset (int): Offset.\nwhence (int): Whence.\n\nReturns:\nint: Seek position.", "source": "codesearchnet"}
{"code": "def get_image_data(self, ids=None, voxels=None, dense=True):\n    if (dense and (ids is None) and (voxels is None)):\n        logger.warning('Warning: get_image_data() is being called without specifying a subset of studies or voxels to retrieve. This may result in a very large amount of data (several GB) being read into memory. If you experience any problems, consider returning a sparse matrix by passing dense=False, or pass in a list of ids of voxels to retrieve only a portion of the data.')\n    result = self.data\n    if (ids is not None):\n        idxs = np.where(np.in1d(np.array(self.ids), np.array(ids)))[0]\n        result = result[(:, idxs)]\n    if (voxels is not None):\n        result = result[(voxels, :)]\n    return (result.toarray() if dense else result)", "docstring": "Slices and returns a subset of image data.\n\nArgs:\nids (list, array): A list or 1D numpy array of study ids to\nreturn. If None, returns data for all studies.\nvoxels (list, array): A list or 1D numpy array of voxel indices\n(i.e., rows) to return. If None, returns data for all voxels.\ndense (bool): Optional boolean. When True (default), convert the\nresult to a dense array before returning. When False, keep as\nsparse matrix.\n\nReturns:\nA 2D numpy array with voxels in rows and studies in columns.", "source": "codesearchnet"}
{"code": "def plot_scatter_matrix(self, freq=None, title=None, figsize=(10, 10), **kwargs):\n    if (title is None):\n        title = self._get_default_plot_title(freq, 'Return Scatter Matrix')\n    plt.figure()\n    ser = self._get_series(freq).to_returns().dropna()\n    pd.scatter_matrix(ser, figsize=figsize, **kwargs)\n    return plt.suptitle(title)", "docstring": "Wrapper around pandas' scatter_matrix.\n\nArgs:\n* freq (str): Data frequency used for display purposes.\nRefer to pandas docs for valid freq strings.\n* figsize ((x,y)): figure size\n* title (str): Title if default not appropriate\n* kwargs: passed to pandas' scatter_matrix method", "source": "codesearchnet"}
{"code": "def setup(pin, mode, pullup=None, initial=False):\n    \n    if pullup is not None:\n        raise ValueError(\"sysfs does not support pullups\")\n\n    if mode not in (IN, OUT, LOW, HIGH):\n        raise ValueError(mode)\n\n    log.debug(\"Setup {0}: {1}\".format(pin, mode))\n    f = _open[pin].direction\n    _write(f, mode)\n    if mode == OUT:\n        if initial:\n            set(pin, 1)\n        else:\n            set(pin, 0)", "docstring": "Setup pin with mode IN or OUT.\n\nArgs:\npin (int):\nmode (str): use either gpio.OUT or gpio.IN\npullup (None): rpio compatibility. If anything but None, raises\nvalue Error\npullup (bool, optional): Initial pin value. Default is False", "source": "juraj-google-style"}
{"code": "def _ParseComment(self, structure):\n    if (structure[1] == 'Date:'):\n        (self._year, self._month, self._day_of_month, _, _, _) = structure.date_time\n    elif (structure[1] == 'Fields:'):\n        self._ParseFieldsMetadata(structure)", "docstring": "Parses a comment.\n\nArgs:\nstructure (pyparsing.ParseResults): structure parsed from the log file.", "source": "codesearchnet"}
{"code": "def merge_lines(top, bot, icod=\"top\"):\n        \n        ret = \"\"\n        for topc, botc in zip(top, bot):\n            if topc == botc:\n                ret += topc\n            elif topc in '┼╪' and botc == \" \":\n                ret += \"│\"\n            elif topc == \" \":\n                ret += botc\n            elif topc in '┬╥' and botc in \" ║│\" and icod == \"top\":\n                ret += topc\n            elif topc in '┬' and botc == \" \" and icod == \"bot\":\n                ret += '│'\n            elif topc in '╥' and botc == \" \" and icod == \"bot\":\n                ret += '║'\n            elif topc in '┬│' and botc == \"═\":\n                ret += '╪'\n            elif topc in '┬│' and botc == \"─\":\n                ret += '┼'\n            elif topc in '└┘║│░' and botc == \" \" and icod == \"top\":\n                ret += topc\n            elif topc in '─═' and botc == \" \" and icod == \"top\":\n                ret += topc\n            elif topc in '─═' and botc == \" \" and icod == \"bot\":\n                ret += botc\n            elif topc in \"║╥\" and botc in \"═\":\n                ret += \"╬\"\n            elif topc in \"║╥\" and botc in \"─\":\n                ret += \"╫\"\n            elif topc in '╫╬' and botc in \" \":\n                ret += \"║\"\n            elif topc == '└' and botc == \"┌\":\n                ret += \"├\"\n            elif topc == '┘' and botc == \"┐\":\n                ret += \"┤\"\n            elif botc in \"┐┌\" and icod == 'top':\n                ret += \"┬\"\n            elif topc in \"┘└\" and botc in \"─\" and icod == 'top':\n                ret += \"┴\"\n            else:\n                ret += botc\n        return ret", "docstring": "Merges two lines (top and bot) in the way that the overlapping make senses.\nArgs:\ntop (str): the top line\nbot (str): the bottom line\nicod (top or bot): in case of doubt, which line should have priority? Default: \"top\".\nReturns:\nstr: The merge of both lines.", "source": "juraj-google-style"}
{"code": "def get_room_id(self, room_alias):\n        \n        content = self._send(\"GET\", \"/directory/room/{}\".format(quote(room_alias)))\n        return content.get(\"room_id\", None)", "docstring": "Get room id from its alias.\n\nArgs:\nroom_alias (str): The room alias name.\n\nReturns:\nWanted room's id.", "source": "juraj-google-style"}
{"code": "def list(self):\n    request = requests.Request('GET', 'https:\n    pattern = re.compile('<([^>]*)>; rel=\"([^\"]*)\"')\n    gists = []\n    while True:\n        try:\n            response = self.send(request).json()\n        except Exception:\n            break\n        for gist in response:\n            try:\n                gists.append(GistInfo(gist['id'], gist['public'], gist['description']))\n            except KeyError:\n                continue\n        try:\n            link = response.headers['link']\n            for result in pattern.finditer(link):\n                url = result.group(1)\n                rel = result.group(2)\n                if (rel == 'next'):\n                    request.url = url\n                    break\n            else:\n                return gists\n        except Exception:\n            break\n    return gists", "docstring": "Returns a list of the users gists as GistInfo objects\n\nReturns:\na list of GistInfo objects", "source": "codesearchnet"}
{"code": "def _ParseUpdateKeyValue(self, parser_mediator, registry_value, key_path):\n    if (not registry_value.DataIsString()):\n        parser_mediator.ProduceExtractionWarning('unsupported UpdateKey value data type: {0:s}'.format(registry_value.data_type_string))\n        return\n    date_time_string = registry_value.GetDataAsObject()\n    if (not date_time_string):\n        parser_mediator.ProduceExtractionWarning('missing UpdateKey value data')\n        return\n    re_match = self._UPDATE_DATE_TIME_RE.match(date_time_string)\n    if (not re_match):\n        parser_mediator.ProduceExtractionWarning('unsupported UpdateKey value data: {0!s}'.format(date_time_string))\n        return\n    (month, day_of_month, year, hours, minutes, seconds, part_of_day) = re_match.groups()\n    try:\n        year = int(year, 10)\n        month = int(month, 10)\n        day_of_month = int(day_of_month, 10)\n        hours = int(hours, 10)\n        minutes = int(minutes, 10)\n        seconds = int(seconds, 10)\n    except (TypeError, ValueError):\n        parser_mediator.ProduceExtractionWarning('invalid UpdateKey date time value: {0!s}'.format(date_time_string))\n        return\n    if (part_of_day == 'PM'):\n        hours += 12\n    time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)\n    try:\n        date_time = dfdatetime_time_elements.TimeElements(time_elements_tuple=time_elements_tuple)\n        date_time.is_local_time = True\n    except ValueError:\n        parser_mediator.ProduceExtractionWarning('invalid UpdateKey date time value: {0!s}'.format(time_elements_tuple))\n        return\n    event_data = CCleanerUpdateEventData()\n    event_data.key_path = key_path\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_UPDATE, time_zone=parser_mediator.timezone)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses the UpdateKey value.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_value (dfwinreg.WinRegistryValue): Windows Registry value.\nkey_path (str): Windows Registry key path.", "source": "codesearchnet"}
{"code": "def check_docstrings(overwrite: bool=False, check_all: bool=False):\n    module_diff_files = None\n    if not check_all:\n        module_diff_files = set()\n        repo = Repo(PATH_TO_REPO)\n        for modified_file_diff in repo.index.diff(None):\n            if modified_file_diff.a_path.startswith('src/transformers'):\n                module_diff_files.add(modified_file_diff.a_path)\n        for modified_file_diff in repo.index.diff(repo.refs.main.commit):\n            if modified_file_diff.a_path.startswith('src/transformers'):\n                module_diff_files.add(modified_file_diff.a_path)\n        if len(module_diff_files) == 0:\n            return\n        print('    Checking docstrings in the following files:' + '\\n    - ' + '\\n    - '.join(module_diff_files))\n    failures = []\n    hard_failures = []\n    to_clean = []\n    for name in dir(transformers):\n        if name.startswith('_') or ignore_undocumented(name) or name in OBJECTS_TO_IGNORE:\n            continue\n        obj = getattr(transformers, name)\n        if not callable(obj) or not isinstance(obj, type) or getattr(obj, '__doc__', None) is None:\n            continue\n        if module_diff_files is not None:\n            object_file = find_source_file(getattr(transformers, name))\n            object_file_relative_path = 'src/' + str(object_file).split('/src/')[1]\n            if object_file_relative_path not in module_diff_files:\n                continue\n        try:\n            result = match_docstring_with_signature(obj)\n            if result is not None:\n                old_doc, new_doc = result\n            else:\n                old_doc, new_doc = (None, None)\n        except Exception as e:\n            print(e)\n            hard_failures.append(name)\n            continue\n        if old_doc != new_doc:\n            print('name', name)\n            print('old_doc', old_doc)\n            print('new_doc', new_doc)\n            if overwrite:\n                fix_docstring(obj, old_doc, new_doc)\n            else:\n                failures.append(name)\n        elif not overwrite and new_doc is not None and ('<fill_type>' in new_doc or '<fill_docstring>' in new_doc):\n            to_clean.append(name)\n    error_message = ''\n    if len(hard_failures) > 0:\n        error_message += 'The argument part of the docstrings of the following objects could not be processed, check they are properly formatted.'\n        error_message += '\\n' + '\\n'.join([f'- {name}' for name in hard_failures])\n    if len(failures) > 0:\n        error_message += 'The following objects docstrings do not match their signature. Run `make fix-copies` to fix this. In some cases, this error may be raised incorrectly by the docstring checker. If you think this is the case, you can manually check the docstrings and then add the object name to `OBJECTS_TO_IGNORE` in `utils/check_docstrings.py`.'\n        error_message += '\\n' + '\\n'.join([f'- {name}' for name in failures])\n    if len(to_clean) > 0:\n        error_message += 'The following objects docstrings contain templates you need to fix: search for `<fill_type>` or `<fill_docstring>`.'\n        error_message += '\\n' + '\\n'.join([f'- {name}' for name in to_clean])\n    if len(error_message) > 0:\n        error_message = 'There was at least one problem when checking docstrings of public objects.\\n' + error_message\n        raise ValueError(error_message)", "docstring": "Check docstrings of all public objects that are callables and are documented. By default, only checks the diff.\n\nArgs:\noverwrite (`bool`, *optional*, defaults to `False`):\nWhether to fix inconsistencies or not.\ncheck_all (`bool`, *optional*, defaults to `False`):\nWhether to check all files.", "source": "github-repos"}
{"code": "def update_table(self, table, fields, retry=DEFAULT_RETRY):\n    partial = table._build_resource(fields)\n    if (table.etag is not None):\n        headers = {'If-Match': table.etag}\n    else:\n        headers = None\n    api_response = self._call_api(retry, method='PATCH', path=table.path, data=partial, headers=headers)\n    return Table.from_api_repr(api_response)", "docstring": "Change some fields of a table.\n\nUse ``fields`` to specify which fields to update. At least one field\nmust be provided. If a field is listed in ``fields`` and is ``None``\nin ``table``, it will be deleted.\n\nIf ``table.etag`` is not ``None``, the update will only succeed if\nthe table on the server has the same ETag. Thus reading a table with\n``get_table``, changing its fields, and then passing it to\n``update_table`` will ensure that the changes will only be saved if\nno modifications to the table occurred since the read.\n\nArgs:\ntable (google.cloud.bigquery.table.Table): The table to update.\nfields (Sequence[str]):\nThe fields of ``table`` to change, spelled as the Table\nproperties (e.g. \"friendly_name\").\nretry (google.api_core.retry.Retry):\n(Optional) A description of how to retry the API call.\n\nReturns:\ngoogle.cloud.bigquery.table.Table:\nThe table resource returned from the API call.", "source": "codesearchnet"}
{"code": "def cache_penalty_model(penalty_model, database=None):\n    if (not _is_index_labelled(penalty_model.graph)):\n        (mapping, __) = _graph_canonicalization(penalty_model.graph)\n        penalty_model = penalty_model.relabel_variables(mapping, inplace=False)\n    if (database is None):\n        conn = cache_connect()\n    else:\n        conn = cache_connect(database)\n    with conn as cur:\n        insert_penalty_model(cur, penalty_model)\n    conn.close()", "docstring": "Caching function for penaltymodel_cache.\n\nArgs:\npenalty_model (:class:`penaltymodel.PenaltyModel`): Penalty model to\nbe cached.\ndatabase (str, optional): The path to the desired sqlite database\nfile. If None, will use the default.", "source": "codesearchnet"}
{"code": "def do_load(self, design, init=False):\n    if design:\n        filename = self._validated_config_filename(design)\n        with open(filename, 'r') as f:\n            text = f.read()\n        structure = json_decode(text)\n    else:\n        structure = {}\n    attributes = structure.get('attributes', structure)\n    children = structure.get('children', structure)\n    (name, mri, x, y, visible) = ([], [], [], [], [])\n    for (part_name, d) in attributes.get('layout', {}).items():\n        name.append(part_name)\n        mri.append('')\n        x.append(d['x'])\n        y.append(d['y'])\n        visible.append(d['visible'])\n    self.set_layout(LayoutTable(name, mri, x, y, visible))\n    (source, export) = ([], [])\n    for (source_name, export_name) in attributes.get('exports', {}).items():\n        source.append(source_name)\n        export.append(export_name)\n    self.exports.set_value(ExportTable(source, export))\n    our_values = {k: v for (k, v) in attributes.items() if (k in self.our_config_attributes)}\n    block = self.block_view()\n    block.put_attribute_values(our_values)\n    self.run_hooks((LoadHook(p, c, children.get(p.name, {}), init) for (p, c) in self.create_part_contexts(only_visible=False).items()))\n    self._mark_clean(design, init)", "docstring": "Load a design name, running the child LoadHooks.\n\nArgs:\ndesign: Name of the design json file, without extension\ninit: Passed to the LoadHook to tell the children if this is being\nrun at Init or not", "source": "codesearchnet"}
{"code": "def _imputeMissing(X, center=True, unit=True, betaNotUnitVariance=False, betaA=1.0, betaB=1.0):\n    typeX = X.dtype\n    if (typeX != SP.int8):\n        iNanX = (X != X)\n    else:\n        iNanX = (X == (- 9))\n    if (iNanX.any() or betaNotUnitVariance):\n        if cparser:\n            print('using C-based imputer')\n            if (X.flags['C_CONTIGUOUS'] or (typeX != SP.float32)):\n                X = SP.array(X, order='F', dtype=SP.float32)\n                if (typeX == SP.int8):\n                    X[iNanX] = SP.nan\n                parser.standardize(X, betaNotUnitVariance=betaNotUnitVariance, betaA=betaA, betaB=betaB)\n                X = SP.array(X, dtype=SP.float64)\n            else:\n                parser.standardize(X, betaNotUnitVariance=betaNotUnitVariance, betaA=betaA, betaB=betaB)\n            X = SP.array(X, dtype=SP.float64)\n        else:\n            if betaNotUnitVariance:\n                raise NotImplementedError('Beta(betaA,betaB) standardization only in C-based parser, but not found')\n            nObsX = (~ iNanX).sum(0)\n            if (typeX != SP.float64):\n                X = SP.array(X, dtype=SP.float64)\n            X[iNanX] = 0.0\n            sumX = X.sum(0)\n            meanX = (sumX / nObsX)\n            if center:\n                X -= meanX\n                X[iNanX] = 0.0\n                X_ = X\n            else:\n                mean = SP.tile(meanX, (X.shape[0], 1))\n                X[iNanX] = mean[iNanX]\n                X_ = (X - mean)\n            if unit:\n                stdX = SP.sqrt(((X_ * X_).sum(0) / nObsX))\n                stdX[(stdX == 0.0)] = 1.0\n                X /= stdX\n    else:\n        if (X.dtype != SP.float64):\n            X = SP.array(X, dtype=SP.float64)\n        if center:\n            X -= X.mean(axis=0)\n        if unit:\n            stdX = X.std(axis=0)\n            stdX[(stdX == 0.0)] = 1.0\n            X /= stdX\n    return X", "docstring": "fill in missing values in the SNP matrix by the mean value\noptionally center the data and unit-variance it\n\nArgs:\nX:      scipy.array of SNP values. If dtype=='int8' the missing values are -9,\notherwise the missing values are scipy.nan\ncenter: Boolean indicator if data should be mean centered\nNot supported in C-based parser\nunit:   Boolean indicator if data should be normalized to have unit variance\nNot supported in C-based parser\nbetaNotUnitVariance:    use Beta(betaA,betaB) standardization instead of unit variance\n(only with C-based parser) (default: False)\nbetaA:  shape parameter for Beta(betaA,betaB) standardization (only with C-based parser)\nbetaB:  scale parameter for Beta(betaA,betaB) standardization (only with C-based parser)\n\nReturns:\nX:      scipy.array of standardized SNPs with scipy.float64 values", "source": "codesearchnet"}
{"code": "def dot_distance(t1, t2, name=None):\n    with tf.name_scope(name, 'dot_distance', [t1, t2]) as scope:\n        return (- dot_product(t1, t2, name=scope))", "docstring": "dot \"distance\" between t1 and t2.\n\nArgs:\nt1: A tensor.\nt2: A tensor that is the same size as t1.\nname: Optional name for this op.\nReturns:\nThe dot distance between t1 and t2.", "source": "codesearchnet"}
{"code": "def _get_base_converter_args(self):\n    args = {'input_format': constants.TENSORFLOW_GRAPHDEF, 'allow_custom_ops': self.allow_custom_ops, 'debug_info': self._debug_info, 'target_ops': self.target_spec.supported_ops, 'select_user_tf_ops': self.target_spec.experimental_select_user_tf_ops, 'supported_backends': self.target_spec.experimental_supported_backends, 'unfold_batchmatmul': self.unfold_batchmatmul, 'legalize_custom_tensor_list_ops': self.legalize_custom_tensor_list_ops, 'lower_tensor_list_ops': self._experimental_lower_tensor_list_ops, 'unfold_large_splat_constant': self._experimental_unfold_large_splat_constant, 'default_to_single_batch_in_tensor_list_ops': self._experimental_default_to_single_batch_in_tensor_list_ops, 'tf_quantization_mode': self._experimental_tf_quantization_mode, 'experimental_enable_resource_variables': self.experimental_enable_resource_variables, 'enable_dynamic_update_slice': self._experimental_enable_dynamic_update_slice, 'preserve_assert_op': self._experimental_preserve_assert_op, 'guarantee_all_funcs_one_use': self._experimental_guarantee_all_funcs_one_use, 'allow_all_select_tf_ops': self._experimental_allow_all_select_tf_ops, 'disable_fuse_mul_and_fc': self._experimental_disable_fuse_mul_and_fc, 'quantization_options': self._experimental_quantization_options, 'ir_dump_dir': self.ir_dump_dir, 'ir_dump_pass_regex': self.ir_dump_pass_regex, 'ir_dump_func_regex': self.ir_dump_func_regex, 'enable_timing': self.enable_timing, 'print_ir_before': self.print_ir_before, 'print_ir_after': self.print_ir_after, 'print_ir_module_scope': self.print_ir_module_scope, 'elide_elementsattrs_if_larger': self.elide_elementsattrs_if_larger, 'use_buffer_offset': self._experimental_use_buffer_offset, 'reduce_type_precision': self._experimental_reduce_type_precision, 'use_stablehlo_quantizer': self.experimental_use_stablehlo_quantizer, 'stablehlo_quantizer_config': self.experimental_stablehlo_quantizer_config, 'qdq_conversion_mode': self._experimental_qdq_conversion_mode, 'strict_qdq_mode': self._experimental_strict_qdq, 'disable_per_channel_quantization_for_dense_layers': self._experimental_disable_per_channel_quantization_for_dense_layers, 'enable_composite_direct_lowering': self._experimental_enable_composite_direct_lowering, 'model_origin_framework': self.model_origin_framework, 'canonicalizing_inf_as_min_max_float': self.canonicalizing_inf_as_min_max_float, 'serialize_debug_metadata': self.serialize_debug_metadata, 'unsafe_fuse_dynamic_shaped_broadcast': self._experimental_unsafe_fuse_dynamic_shaped_broadcast}\n    if self.saved_model_dir:\n        args.update({'saved_model_dir': self.saved_model_dir, 'saved_model_version': self._saved_model_version, 'saved_model_tags': self._saved_model_tags, 'saved_model_exported_names': self._saved_model_exported_names})\n    if self._experimental_quantization_options:\n        logging.warning('Configs from custom methods in experimental_quantization_options may not produce a valid tflite model. Note that currently this option only supports StableHLO path. Setting this option in TFLite path will be a no-op.')\n    if self.experimental_use_stablehlo_quantizer:\n        self._assign_stablehlo_quantization_config_or_populate_default(args)\n    elif self.experimental_stablehlo_quantizer_config is not None:\n        raise ValueError('QuantizationConfig should be provided only when experimental_use_stablehlo_quantizer is set to true.')\n    return args", "docstring": "Returns the base converter args.\n\nReturns:\n{key str: val}", "source": "github-repos"}
{"code": "def NewRow(self, value=\"\"):\n        \n        newrow = self.row_class()\n        newrow.row = self.size + 1\n        newrow.table = self\n        headers = self._Header()\n        for header in headers:\n            newrow[header] = value\n        return newrow", "docstring": "Fetches a new, empty row, with headers populated.\n\nArgs:\nvalue: Initial value to set each row entry to.\n\nReturns:\nA Row() object.", "source": "juraj-google-style"}
{"code": "def fit3d(samples, e_x, e_y, e_z, remove_zeros=False, **kw):\n    (height, width, depth) = ((len(e_y) - 1), (len(e_x) - 1), (len(e_z) - 1))\n    (p_est, _) = np.histogramdd(samples, (e_x, e_y, e_z))\n    p_est = (p_est / sum(p_est.flat))\n    p_est = p_est.flatten()\n    if remove_zeros:\n        non_zero = (~ (p_est == 0))\n    else:\n        non_zero = (p_est >= 0)\n    basis = spline_base3d(width, height, depth, **kw)\n    model = linear_model.BayesianRidge()\n    model.fit(basis[(:, non_zero)].T, p_est[(:, np.newaxis)][(non_zero, :)])\n    return (model.predict(basis.T).reshape((width, height, depth)), p_est.reshape((width, height, depth)))", "docstring": "Fits a 3D distribution with splines.\n\nInput:\nsamples: Array\nArray of samples from a probability distribution\ne_x: Array\nEdges that define the events in the probability\ndistribution along the x direction. For example,\ne_x[0] < samples[0] <= e_x[1] picks out all\nsamples that are associated with the first event.\ne_y: Array\nSee e_x, but for the y direction.\nremove_zeros: Bool\nIf True, events that are not observed will not\nbe part of the fitting process. If False, those\nevents will be modelled as finfo('float').eps\n**kw: Arguments that are passed on to spline_bse1d.\n\nReturns:\ndistribution: Array\nAn array that gives an estimate of probability for\nevents defined by e.\nknots: Tuple of arrays\nSequence of knots that were used for the spline basis (x,y)", "source": "codesearchnet"}
{"code": "def construct_error_message(driver_id, error_type, message, timestamp):\n    \n    builder = flatbuffers.Builder(0)\n    driver_offset = builder.CreateString(driver_id.binary())\n    error_type_offset = builder.CreateString(error_type)\n    message_offset = builder.CreateString(message)\n\n    ray.core.generated.ErrorTableData.ErrorTableDataStart(builder)\n    ray.core.generated.ErrorTableData.ErrorTableDataAddDriverId(\n        builder, driver_offset)\n    ray.core.generated.ErrorTableData.ErrorTableDataAddType(\n        builder, error_type_offset)\n    ray.core.generated.ErrorTableData.ErrorTableDataAddErrorMessage(\n        builder, message_offset)\n    ray.core.generated.ErrorTableData.ErrorTableDataAddTimestamp(\n        builder, timestamp)\n    error_data_offset = ray.core.generated.ErrorTableData.ErrorTableDataEnd(\n        builder)\n    builder.Finish(error_data_offset)\n\n    return bytes(builder.Output())", "docstring": "Construct a serialized ErrorTableData object.\n\nArgs:\ndriver_id: The ID of the driver that the error should go to. If this is\nnil, then the error will go to all drivers.\nerror_type: The type of the error.\nmessage: The error message.\ntimestamp: The time of the error.\n\nReturns:\nThe serialized object.", "source": "juraj-google-style"}
{"code": "def from_year_month_day(year, month, day, validate=True):\n    year = tf.convert_to_tensor(year, tf.int32)\n    month = tf.convert_to_tensor(month, tf.int32)\n    day = tf.convert_to_tensor(day, tf.int32)\n    control_deps = []\n    if validate:\n        control_deps.append(tf.debugging.assert_positive(year, message='Year must be positive.'))\n        control_deps.append(tf.debugging.assert_greater_equal(month, constants.Month.JANUARY.value, message=f'Month must be >= {constants.Month.JANUARY.value}'))\n        control_deps.append(tf.debugging.assert_less_equal(month, constants.Month.DECEMBER.value, message='Month must be <= {constants.Month.JANUARY.value}'))\n        control_deps.append(tf.debugging.assert_positive(day, message='Day must be positive.'))\n        is_leap = date_utils.is_leap_year(year)\n        days_in_months = tf.constant(_DAYS_IN_MONTHS_COMBINED, tf.int32)\n        max_days = tf.gather(days_in_months, month + 12 * tf.dtypes.cast(is_leap, np.int32))\n        control_deps.append(tf.debugging.assert_less_equal(day, max_days, message='Invalid day-month pairing.'))\n        with tf.compat.v1.control_dependencies(control_deps):\n            year = tf.identity(year)\n            month = tf.identity(month)\n            day = tf.identity(day)\n    with tf.compat.v1.control_dependencies(control_deps):\n        ordinal = date_utils.year_month_day_to_ordinal(year, month, day)\n        return DateTensor(ordinal, year, month, day)", "docstring": "Creates DateTensor from tensors of years, months and days.\n\nArgs:\nyear: Tensor of int32 type. Elements should be positive.\nmonth: Tensor of int32 type of same shape as `year`. Elements should be in\nrange `[1, 12]`.\nday: Tensor of int32 type of same shape as `year`. Elements should be in\nrange `[1, 31]` and represent valid dates together with corresponding\nelements of `month` and `year` Tensors.\nvalidate: Whether to validate the dates.\n\nReturns:\nDateTensor object.\n\n#### Example\n\n```python\nyear = tf.constant([2015, 2017], dtype=tf.int32)\nmonth = tf.constant([4, 12], dtype=tf.int32)\nday = tf.constant([15, 30], dtype=tf.int32)\ndate_tensor = tff.datetime.dates_from_year_month_day(year, month, day)\n```", "source": "github-repos"}
{"code": "def write_to_text(pcoll, path: str):\n    try:\n        field_names = [name for name, _ in schemas.named_fields_from_element_type(pcoll.element_type)]\n    except Exception as exn:\n        raise ValueError('WriteToText requires an input schema with exactly one field.') from exn\n    if len(field_names) != 1:\n        raise ValueError('WriteToText requires an input schema with exactly one field, got %s' % field_names)\n    sole_field_name, = field_names\n    return pcoll | beam.Map(lambda x: str(getattr(x, sole_field_name))) | beam.io.WriteToText(path)", "docstring": "Writes a PCollection to a (set of) text files(s).\n\nThe input must be a PCollection whose schema has exactly one field.\n\nArgs:\npath (str): The file path to write to. The files written will\nbegin with this prefix, followed by a shard identifier.", "source": "github-repos"}
{"code": "def json(cls, message):\n        \n\n        if type(message) is OrderedDict:\n            pprint(dict(message))\n        else:\n            pprint(message)", "docstring": "Print a nice JSON output\n\nArgs:\nmessage: the message to print", "source": "juraj-google-style"}
{"code": "def SetCTypesForLibrary(libname, fn_table):\n    libpath = ctypes.util.find_library(libname)\n    if (not libpath):\n        raise ErrorLibNotFound(('Library %s not found' % libname))\n    lib = ctypes.cdll.LoadLibrary(libpath)\n    for (function, args, result) in fn_table:\n        f = getattr(lib, function)\n        f.argtypes = args\n        f.restype = result\n    return lib", "docstring": "Set function argument types and return types for an ObjC library.\n\nArgs:\nlibname: Library name string\nfn_table: List of (function, [arg types], return types) tuples\nReturns:\nctypes.CDLL with types set according to fn_table\nRaises:\nErrorLibNotFound: Can't find specified lib", "source": "codesearchnet"}
{"code": "def from_dictionary(cls, options):\n    flags = []\n    for k, v in options.items():\n        if isinstance(v, bool):\n            if v:\n                flags.append('--%s' % k)\n            elif k in _FLAG_THAT_SETS_FALSE_VALUE:\n                flag_that_disables_the_option = _FLAG_THAT_SETS_FALSE_VALUE[k]\n                flags.append('--%s' % flag_that_disables_the_option)\n        elif isinstance(v, list):\n            for i in v:\n                flags.append('--%s=%s' % (k, i))\n        elif isinstance(v, dict):\n            flags.append('--%s=%s' % (k, json.dumps(v)))\n        elif v is None:\n            logging.warning('Not setting flag with value None: %s', k)\n        else:\n            flags.append('--%s=%s' % (k, v))\n    return cls(flags)", "docstring": "Returns a PipelineOptions from a dictionary of arguments.\n\nArgs:\noptions: Dictionary of argument value pairs.\n\nReturns:\nA PipelineOptions object representing the given arguments.", "source": "github-repos"}
{"code": "def abspath(self, path):\n    if ((not path.startswith(os.path.sep)) or path.startswith('~')):\n        path = os.path.expanduser(os.path.join(self.base_path, path))\n    return path", "docstring": "Transform the path to an absolute path\n\nArgs:\npath (string): The path to transform to an absolute path\n\nReturns:\nstring: The absolute path to the file", "source": "codesearchnet"}
{"code": "def DeregisterHelper(cls, resolver_helper):\n    \n    if resolver_helper.type_indicator not in cls._resolver_helpers:\n      raise KeyError(\n          'Resolver helper object not set for type indicator: {0:s}.'.format(\n              resolver_helper.type_indicator))\n\n    del cls._resolver_helpers[resolver_helper.type_indicator]", "docstring": "Deregisters a path specification resolver helper.\n\nArgs:\nresolver_helper (ResolverHelper): resolver helper.\n\nRaises:\nKeyError: if resolver helper object is not set for the corresponding\ntype indicator.", "source": "juraj-google-style"}
{"code": "def _restore_slice(file_pattern, tensor_name, shape_and_slice, tensor_type, name='restore_slice', preferred_shard=-1):\n    base_type = dtypes.as_dtype(tensor_type).base_dtype\n    return gen_io_ops.restore_slice(file_pattern, tensor_name, shape_and_slice, base_type, preferred_shard, name=name)", "docstring": "Restore a tensor slice from a set of files with a given pattern.\n\nExample usage:\nRestoreSlice(\"/foo/bar-?????-of-?????\", \"w\", \"10 10 0,2:-\", DT_FLOAT)\n\nArgs:\nfile_pattern: the file pattern used to match a set of checkpoint files.\ntensor_name: the name of the tensor to restore.\nshape_and_slice: the shape-and-slice spec of the slice.\ntensor_type: the type of the tensor to restore.\nname: string.  Optional name for the op.\npreferred_shard: Int. Optional shard to open first in the checkpoint file.\n\nReturns:\nA tensor of type \"tensor_type\".", "source": "github-repos"}
{"code": "def process(self, element):\n    \n    import collections\n    import apache_beam as beam\n\n    num_in_batch = 0\n    try:\n      assert self._session is not None\n\n      feed_dict = collections.defaultdict(list)\n      for line in element:\n\n        \n        if line.endswith('\\n'):\n          line = line[:-1]\n\n        feed_dict[self._input_alias_map.values()[0]].append(line)\n        num_in_batch += 1\n\n      \n      batch_result = self._session.run(fetches=self._tensor_names,\n                                       feed_dict=feed_dict)\n\n      \n      \n      \n      \n      \n      \n      \n      \n\n      \n      if num_in_batch > 1:\n        for result in zip(*batch_result):\n          predictions = {}\n          for name, value in zip(self._aliases, result):\n            predictions[name] = (value.tolist() if getattr(value, 'tolist', None) else value)\n          yield predictions\n      else:\n        predictions = {}\n        for i in range(len(self._aliases)):\n          value = batch_result[i]\n          value = (value.tolist() if getattr(value, 'tolist', None)\n                   else value)\n          predictions[self._aliases[i]] = value\n        yield predictions\n\n    except Exception as e:  \n      yield beam.pvalue.TaggedOutput('errors', (str(e), element))", "docstring": "Run batch prediciton on a TF graph.\n\nArgs:\nelement: list of strings, representing one batch input to the TF graph.", "source": "juraj-google-style"}
{"code": "def num_lineages_at(self, distance):\n        \n        if not isinstance(distance, float) and not isinstance(distance, int):\n            raise TypeError(\"distance must be an int or a float\")\n        if distance < 0:\n            raise RuntimeError(\"distance cannot be negative\")\n        d = dict(); q = deque(); q.append(self.root); count = 0\n        while len(q) != 0:\n            node = q.popleft()\n            if node.is_root():\n                d[node] = 0\n            else:\n                d[node] = d[node.parent]\n            if node.edge_length is not None:\n                d[node] += node.edge_length\n            if d[node] < distance:\n                q.extend(node.children)\n            elif node.parent is None or d[node.parent] < distance:\n                count += 1\n        return count", "docstring": "Returns the number of lineages of this ``Tree`` that exist ``distance`` away from the root\n\nArgs:\n``distance`` (``float``): The distance away from the root\n\nReturns:\n``int``: The number of lineages that exist ``distance`` away from the root", "source": "juraj-google-style"}
{"code": "def cellsiter_to_dataframe(cellsiter, args, drop_allna=True):\n    \n    from modelx.core.cells import shareable_parameters\n\n    if len(args):\n        indexes = shareable_parameters(cellsiter)\n    else:\n        indexes = get_all_params(cellsiter.values())\n\n    result = None\n\n    for cells in cellsiter.values():\n        df = cells_to_dataframe(cells, args)\n\n        if drop_allna and df.isnull().all().all():\n            continue  \n\n        if df.index.names != [None]:\n            if isinstance(df.index, pd.MultiIndex):\n                if _pd_ver < (0, 20):\n                    df = _reset_naindex(df)\n\n            df = df.reset_index()\n\n        missing_params = set(indexes) - set(df)\n\n        for params in missing_params:\n            df[params] = np.nan\n\n        if result is None:\n            result = df\n        else:\n            try:\n                result = pd.merge(result, df, how=\"outer\")\n            except MergeError:\n                \n                result = pd.concat([result, df], axis=1)\n            except ValueError:\n                \n                \n                cols = set(result.columns) & set(df.columns)\n                for col in cols:\n\n                    \n                    if (\n                        len(\n                            [\n                                str(frame[col].dtype)\n                                for frame in (result, df)\n                                if str(frame[col].dtype) == \"object\"\n                            ]\n                        )\n                        == 1\n                    ):\n\n                        if str(result[col].dtype) == \"object\":\n                            frame = df\n                        else:\n                            frame = result\n                        frame[[col]] = frame[col].astype(\"object\")\n\n                \n                result = pd.merge(result, df, how=\"outer\")\n\n    if result is None:\n        return pd.DataFrame()\n    else:\n        return result.set_index(indexes) if indexes else result", "docstring": "Convert multiple cells to a frame.\n\nIf args is an empty sequence, all values are included.\nIf args is specified, cellsiter must have shareable parameters.\n\nArgs:\ncellsiter: A mapping from cells names to CellsImpl objects.\nargs: A sequence of arguments", "source": "juraj-google-style"}
{"code": "def __init__(self, range_str='', make_token=AlphanumericVersionToken,\n                 invalid_bound_error=True):\n        \n        self._str = None\n        self.bounds = []  \n        if range_str is None:\n            return\n\n        try:\n            parser = _VersionRangeParser(range_str, make_token,\n                                         invalid_bound_error=invalid_bound_error)\n            bounds = parser.bounds\n        except ParseException as e:\n            raise VersionError(\"Syntax error in version range '%s': %s\"\n                               % (range_str, str(e)))\n        except VersionError as e:\n            raise VersionError(\"Invalid version range '%s': %s\"\n                               % (range_str, str(e)))\n\n        if bounds:\n            self.bounds = self._union(bounds)\n        else:\n            self.bounds.append(_Bound.any)", "docstring": "Create a VersionRange object.\n\nArgs:\nrange_str: Range string, such as \"3\", \"3+<4.5\", \"2|6+\". The range\nwill be optimised, so the string representation of this instance\nmay not match range_str. For example, \"3+<6|4+<8\" == \"3+<8\".\nmake_token: Version token class to use.\ninvalid_bound_error (bool): If True, raise an exception if an\nimpossible range is given, such as '3+<2'.", "source": "juraj-google-style"}
{"code": "def DeserializeExclusiveData(self, reader):\n        \n        if self.Version > 1:\n            logger.error(\"format exception...\")\n\n        self.Code = FunctionCode()\n        self.Code.Deserialize(reader)\n\n        if self.Version >= 1:\n            self.NeedStorage = reader.ReadBool()\n        else:\n            self.NeedStorage = False\n\n        self.Name = reader.ReadVarString()\n        self.CodeVersion = reader.ReadVarString()\n        self.Author = reader.ReadVarString()\n        self.Email = reader.ReadVarString()\n        self.Description = reader.ReadVarString()", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def is_unitary(matrix: np.ndarray, *, rtol: float=1e-05, atol: float=1e-08) -> bool:\n    return ((matrix.shape[0] == matrix.shape[1]) and np.allclose(matrix.dot(np.conj(matrix.T)), np.eye(matrix.shape[0]), rtol=rtol, atol=atol))", "docstring": "Determines if a matrix is approximately unitary.\n\nA matrix is unitary if it's square and its adjoint is its inverse.\n\nArgs:\nmatrix: The matrix to check.\nrtol: The per-matrix-entry relative tolerance on equality.\natol: The per-matrix-entry absolute tolerance on equality.\n\nReturns:\nWhether the matrix is unitary within the given tolerance.", "source": "codesearchnet"}
{"code": "def _batch_prepare_for_model(self, batch_ids_pairs: list[Union[PreTokenizedInputPair, tuple[list[int], None]]], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True, split_special_tokens: bool=False) -> BatchEncoding:\n    batch_outputs = {}\n    for first_ids, second_ids in batch_ids_pairs:\n        outputs = self.prepare_for_model(first_ids, second_ids, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose, split_special_tokens=split_special_tokens)\n        for key, value in outputs.items():\n            if key not in batch_outputs:\n                batch_outputs[key] = []\n            batch_outputs[key].append(value)\n    batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)\n    batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)\n    return batch_outputs", "docstring": "Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It\nadds special tokens, truncates sequences if overflowing while taking into account the special tokens and\nmanages a moving window (with user defined stride) for overflowing tokens\n\nArgs:\nbatch_ids_pairs: list of tokenized input ids or input ids pairs", "source": "github-repos"}
{"code": "def stop(self, block=True):\n        \n        self._stop = True\n\n        \n        self.empty_queue()\n\n        \n        \n        \n        \n        for _ in range(self.threads_active()):\n            self._queue.put(SetPrio(target=DoNothing))\n\n        if block:\n            \n            self.join()\n\n            \n            self.empty_queue()", "docstring": "Stops all active threads and rejects new tasks to be added\nArgs:\nblock (bool): If True, block until all threads are closed", "source": "juraj-google-style"}
{"code": "def notify(self, method, params=None):\n    log.debug('Sending notification: %s %s', method, params)\n    message = {'jsonrpc': JSONRPC_VERSION, 'method': method}\n    if (params is not None):\n        message['params'] = params\n    self._consumer(message)", "docstring": "Send a JSON RPC notification to the client.\n\nArgs:\nmethod (str): The method name of the notification to send\nparams (any): The payload of the notification", "source": "codesearchnet"}
{"code": "def _process_req_body(self, body):\n    \n    try:\n      return json.loads(body)\n    except ValueError:\n      return urlparse.parse_qs(body, keep_blank_values=True)", "docstring": "Process the body of the HTTP request.\n\nIf the body is valid JSON, return the JSON as a dict.\nElse, convert the key=value format to a dict and return that.\n\nArgs:\nbody: The body of the HTTP request.", "source": "juraj-google-style"}
{"code": "def process_document_events(events, use_buffers=True):\n    json_events = []\n    references = set()\n    buffers = ([] if use_buffers else None)\n    for event in events:\n        json_events.append(event.generate(references, buffers))\n    json = {'events': json_events, 'references': references_json(references)}\n    return (serialize_json(json), (buffers if use_buffers else []))", "docstring": "Create a JSON string describing a patch to be applied as well as\nany optional buffers.\n\nArgs:\nevents : list of events to be translated into patches\n\nReturns:\nstr, list :\nJSON string which can be applied to make the given updates to obj\nas well as any optional buffers", "source": "codesearchnet"}
{"code": "def update(self, iterable):\n    for pair in pairwise_longest(iterable, fillvalue=_FILL):\n        self._edges.append(pair)\n        self._results = None", "docstring": "Update with an ordered iterable of items.\n\nArgs:\niterable: An ordered iterable of items. The relative\norder of the items in this iterable will be respected\nin the TopoSet (in the absence of cycles).", "source": "codesearchnet"}
{"code": "def __init__(self,\n               max_entity_count=MAX_ENTITY_COUNT,\n               mapreduce_spec=None):\n    \n    self.max_entity_count = max_entity_count\n    params = mapreduce_spec.params if mapreduce_spec is not None else {}\n    self.force_writes = bool(params.get(\"force_ops_writes\", False))\n    self.puts = _ItemList(max_entity_count,\n                          self._flush_puts,\n                          repr_function=self._db_repr)\n    self.deletes = _ItemList(max_entity_count,\n                             self._flush_deletes)\n    self.ndb_puts = _ItemList(max_entity_count,\n                              self._flush_ndb_puts,\n                              repr_function=self._ndb_repr)\n    self.ndb_deletes = _ItemList(max_entity_count,\n                                 self._flush_ndb_deletes)", "docstring": "Constructor.\n\nArgs:\nmax_entity_count: maximum number of entities before flushing it to db.\nmapreduce_spec: An optional instance of MapperSpec.", "source": "juraj-google-style"}
{"code": "def delete_project(self, project):\n        \n        if not is_valid_uuid(project):\n            raise StorageArgumentException(\n                'Invalid UUID for project: {0}'.format(project))\n        self._authenticated_request \\\n            .to_endpoint('project/{}/'.format(project)) \\\n            .delete()", "docstring": "Delete a project. It will recursively delete all the content.\n\nArgs:\nproject (str): The UUID of the project to be deleted.\n\nReturns:\nNone\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: 403\nStorageNotFoundException: 404\nHTTPError: other non-20x error codes", "source": "juraj-google-style"}
{"code": "def buckets_delete(self, bucket):\n    url = (Api._ENDPOINT + (Api._BUCKET_PATH % bucket))\n    google.datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials, raw_response=True)", "docstring": "Issues a request to delete a bucket.\n\nArgs:\nbucket: the name of the bucket.\nRaises:\nException if there is an error performing the operation.", "source": "codesearchnet"}
{"code": "def _bisect(self, begin, end, listener):\n        \n\n        step = (end.date - begin.date) / 2\n\n        while abs(step) >= self._eps_bisect:\n            date = begin.date + step\n            if self.SPEAKER_MODE == \"global\":\n                orb = self.propagate(date)\n            else:\n                orb = begin.propagate(date)\n            if listener(begin) * listener(orb) > 0:\n                begin = orb\n            else:\n                end = orb\n            step = (end.date - begin.date) / 2\n        else:\n            end.event = listener.info(end)\n            return end", "docstring": "This method search for the zero-crossing of the watched parameter\n\nArgs:\nbegin (Orbit):\nend (Orbit)\nlistener (Listener)\nReturn\nReturn", "source": "juraj-google-style"}
{"code": "def index(self, ref, columns):\n        \n        from ambry.orm.exc import NotFoundError\n\n        logger.debug('Creating index for partition.\\n    ref: {}, columns: {}'.format(ref, columns))\n\n        connection = self._backend._get_connection()\n\n        try:\n            table_or_partition = self._library.partition(ref)\n        except NotFoundError:\n            table_or_partition = ref\n\n\n        self._backend.index(connection, table_or_partition, columns)", "docstring": "Create an index on the columns.\n\nArgs:\nref (str): id, vid, name or versioned name of the partition.\ncolumns (list of str): names of the columns needed indexes.", "source": "juraj-google-style"}
{"code": "def update(self, item):\n        \n        if item.matrix not in self.data:\n            self.data[item.matrix] = []\n\n        result = Select(self.data[item.matrix]).where(\n            lambda entry: entry.stage == item.stage).build()\n\n        if len(result) > 0:\n            stage = result[0]\n            stage.status = item.status\n            stage.add(item.timestamp, item.information)\n        else:\n            stage = CollectorStage(stage=item.stage, status=item.status)\n            stage.add(item.timestamp, item.information)\n            self.data[item.matrix].append(stage)", "docstring": "Add a collector item.\n\nArgs:\nitem (CollectorUpdate): event data like stage, timestampe and status.", "source": "juraj-google-style"}
{"code": "def __init__(self, key_dtype, value_dtype):\n    self._key_dtype = key_dtype\n    self._value_dtype = value_dtype", "docstring": "Construct a table initializer object.\n\nArgs:\nkey_dtype: Type of the table keys.\nvalue_dtype: Type of the table values.", "source": "github-repos"}
{"code": "def set(self, key, val):\n    \n    self._create_file_if_none_exists()\n    with open(self.filename, 'r+b') as file_object:\n      cache_pickle = pickle.load(file_object)\n      cache_pickle[key] = val\n      file_object.seek(0)\n      pickle.dump(cache_pickle, file_object)", "docstring": "Sets a value in a key.\n\nArgs:\nkey (str): Key for the value.\nval: Value to set.\n\nReturns:\nRetrieved value.", "source": "juraj-google-style"}
{"code": "def _FindStmtParent(node):\n    if pytree_utils.NodeName(node) in _STATEMENT_NODES:\n        return node\n    else:\n        return _FindStmtParent(node.parent)", "docstring": "Find the nearest parent of node that is a statement node.\n\nArguments:\nnode: node to start from\n\nReturns:\nNearest parent (or node itself, if suitable).", "source": "github-repos"}
{"code": "def add_mutex_switch(parser, dest, arguments=set(), default=None,\n                         single_arg=False, required=False):\n        \n\n        if default is not None:\n            assert default in arguments\n\n        if isinstance(arguments, set):\n            arguments = {k: None for k in arguments}\n\n        if not single_arg:\n            mg = parser.add_mutually_exclusive_group(required=required)\n\n            for name, help_text in arguments.items():\n                kwargs = {\n                    \"action\": \"store_const\",\n                    \"dest\": dest,\n                    \"const\": name,\n                    \"help\": help_text\n                }\n\n                if default == name:\n                    kwargs[\"default\"] = name\n\n                mg.add_argument(\"--{}\".format(name), **kwargs)\n\n            return mg\n        else:\n            kwargs = {\n                \"dest\": dest,\n                \"type\": str,\n                \"default\": default,\n                \"help\": \"\\n\".join(\"{}: {}\".format(k, v)\n                                  for k, v in arguments.items()),\n                \"choices\": list(arguments.keys())\n            }\n\n            return parser.add_argument(\"--{}\".format(dest), **kwargs)", "docstring": "Adds mutually exclusive switch arguments.\n\nArgs:\narguments: a dictionary that maps switch name to helper text. Use\nsets to skip help texts.", "source": "juraj-google-style"}
{"code": "def save_def_args_in_temp(self, call_args, def_args, line_number, saved_function_call_index, first_node):\n    args_mapping = dict()\n    last_return_value_of_nested_call = None\n    for (i, call_arg) in enumerate(call_args):\n        def_arg_temp_name = ((('temp_' + str(saved_function_call_index)) + '_') + def_args[i])\n        return_value_of_nested_call = None\n        if isinstance(call_arg, ast.Call):\n            return_value_of_nested_call = self.visit(call_arg)\n            restore_node = RestoreNode(((def_arg_temp_name + ' = ') + return_value_of_nested_call.left_hand_side), def_arg_temp_name, [return_value_of_nested_call.left_hand_side], line_number=line_number, path=self.filenames[(- 1)])\n            if (return_value_of_nested_call in self.blackbox_assignments):\n                self.blackbox_assignments.add(restore_node)\n        else:\n            call_arg_label_visitor = LabelVisitor()\n            call_arg_label_visitor.visit(call_arg)\n            call_arg_rhs_visitor = RHSVisitor()\n            call_arg_rhs_visitor.visit(call_arg)\n            restore_node = RestoreNode(((def_arg_temp_name + ' = ') + call_arg_label_visitor.result), def_arg_temp_name, call_arg_rhs_visitor.result, line_number=line_number, path=self.filenames[(- 1)])\n        if (not first_node):\n            first_node = restore_node\n        if isinstance(call_arg, ast.Call):\n            if last_return_value_of_nested_call:\n                if isinstance(return_value_of_nested_call, BBorBInode):\n                    last_return_value_of_nested_call.connect(return_value_of_nested_call)\n                else:\n                    last_return_value_of_nested_call.connect(return_value_of_nested_call.first_node)\n            elif isinstance(return_value_of_nested_call, BBorBInode):\n                first_node.inner_most_call = return_value_of_nested_call\n            else:\n                first_node.inner_most_call = return_value_of_nested_call.first_node\n            last_return_value_of_nested_call = return_value_of_nested_call\n        self.connect_if_allowed(self.nodes[(- 1)], restore_node)\n        self.nodes.append(restore_node)\n        if isinstance(call_arg, ast.Call):\n            args_mapping[return_value_of_nested_call.left_hand_side] = def_args[i]\n        else:\n            args_mapping[def_args[i]] = call_arg_label_visitor.result\n    return (args_mapping, first_node)", "docstring": "Save the arguments of the definition being called. Visit the arguments if they're calls.\n\nArgs:\ncall_args(list[ast.Name]): Of the call being made.\ndef_args(ast_helper.Arguments): Of the definition being called.\nline_number(int): Of the call being made.\nsaved_function_call_index(int): Unique number for each call.\nfirst_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function.\n\nReturns:\nargs_mapping(dict): A mapping of call argument to definition argument.\nfirst_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function.", "source": "codesearchnet"}
{"code": "def add_batch_parser(subparsers, parent_parser):\n    parser = subparsers.add_parser('batch', help='Displays information about batches and submit new batches', description='Provides subcommands to display Batch information and submit Batches to the validator via the REST API.')\n    grand_parsers = parser.add_subparsers(title='subcommands', dest='subcommand')\n    grand_parsers.required = True\n    add_batch_list_parser(grand_parsers, parent_parser)\n    add_batch_show_parser(grand_parsers, parent_parser)\n    add_batch_status_parser(grand_parsers, parent_parser)\n    add_batch_submit_parser(grand_parsers, parent_parser)", "docstring": "Adds arguments parsers for the batch list, batch show and batch status\ncommands\n\nArgs:\nsubparsers: Add parsers to this subparser object\nparent_parser: The parent argparse.ArgumentParser object", "source": "codesearchnet"}
{"code": "def applicable_decision_points(self, dna_spec: pg.geno.DNASpec, global_state: pg.geno.AttributeDict, step: int) -> List[pg.geno.DecisionPoint]:\n    applicable_points = []\n    for dp in dna_spec.decision_points:\n        if isinstance(dp, pg.geno.Choices) and dp.is_subchoice:\n            if dp.subchoice_index == 0:\n                applicable_points.append(dp.parent_spec)\n        else:\n            applicable_points.append(dp)\n    return applicable_points", "docstring": "Returns applicable decision points for this recombinator.\n\nThe default behavior is to return all decision points in the search space,\nwith multi-choice subchoices folded into a single decision point. Subclasses\ncan override this method to select applicable points according to their\nsemantics.\n\nArgs:\ndna_spec: The root DNASpec.\nglobal_state: An optional keyword argument as the global state. Subclass\ncan omit.\nstep: An optional keyword argument as current step. Subclass can omit.\n\nReturns:\nA list of targeted decision points for point-wise recombination, which\nwill be further filtered by the `where` statement later.", "source": "github-repos"}
{"code": "def calculate_focus(self, reading):\n    middle_index = (len(self.source.get_readings()) \n    middle_reading = self.source.get_reading(middle_index)\n    return self.convert_source_location(middle_reading, reading)", "docstring": "Determines what the focal point of the downloaded image should be.\n\nReturns:\nfocal_point: (x, y)\nThe location of the source in the middle observation, in the\ncoordinate system of the current source reading.", "source": "codesearchnet"}
{"code": "def ZerosLikeForExit(self, val):\n    val_shape = val.get_shape()\n    forward_ctxt = val.op._get_control_flow_context()\n    outer_forward_ctxt = forward_ctxt.outer_context\n    if outer_forward_ctxt:\n        outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()\n    outer_grad_state = None\n    if outer_forward_ctxt:\n        outer_grad_state = self._map.get(outer_forward_ctxt)\n    if outer_grad_state:\n        if val_shape.is_fully_defined():\n            outer_grad_state.grad_context.Enter()\n            result = array_ops.zeros(val_shape.dims, val.dtype)\n            outer_grad_state.grad_context.Exit()\n        else:\n            forward_ctxt.outer_context.Enter()\n            shape = array_ops.shape_internal(val, optimize=False)\n            forward_ctxt.outer_context.Exit()\n            history_shape = outer_grad_state.AddForwardAccumulator(shape)\n            outer_grad_ctxt = outer_grad_state.grad_context\n            outer_grad_ctxt.Enter()\n            real_shape = outer_grad_state.AddBackpropAccumulatedValue(history_shape, shape)\n            result = array_ops.zeros(real_shape, val.dtype)\n            outer_grad_ctxt.Exit()\n    elif val_shape.is_fully_defined():\n        result = array_ops.zeros(val_shape.dims, val.dtype)\n    else:\n        result = array_ops.zeros_like(val, optimize=False)\n    return result", "docstring": "Create zeros_like gradient for a loop exit.\n\nIf the result of a loop variable is not used but is involved in\ncomputing the result of some needed loop variable, we create a\nzero-valued tensor that is fed as gradient for the Exit node of that\nloop variable. Note that val.op is an Exit, and this method must be\ncalled in the control flow context where gradients() is called.\n\nArgs:\nval: The output tensor of an Exit op.\n\nReturns:\nA zero tensor of the same shape of val.", "source": "github-repos"}
{"code": "def stop(self, accountID, **kwargs):\n        \n        return self.create(\n            accountID,\n            order=StopOrderRequest(**kwargs)\n        )", "docstring": "Shortcut to create a Stop Order in an Account\n\nArgs:\naccountID : The ID of the Account\nkwargs : The arguments to create a StopOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "juraj-google-style"}
{"code": "def _piecewise_learning_rate(step, boundaries, values):\n    values = ([1.0] + values)\n    boundaries = [float(x) for x in boundaries]\n    return tf.train.piecewise_constant(step, boundaries, values, name='piecewise_lr')", "docstring": "Scale learning rate according to the given schedule.\n\nMultipliers are not cumulative.\n\nArgs:\nstep: global step\nboundaries: List of steps to transition on.\nvalues: Multiplier to apply at each boundary transition.\n\nReturns:\nScaled value for the learning rate.", "source": "codesearchnet"}
{"code": "def get_json_type(obj):\n    if hasattr(obj, 'get_config'):\n        return {'class_name': obj.__class__.__name__, 'config': obj.get_config()}\n    if type(obj).__module__ == np.__name__:\n        if isinstance(obj, np.ndarray):\n            return obj.tolist()\n        else:\n            return obj.item()\n    if callable(obj):\n        return obj.__name__\n    if type(obj).__name__ == type.__name__:\n        return obj.__name__\n    if isinstance(obj, tensor_shape.Dimension):\n        return obj.value\n    if isinstance(obj, tensor_shape.TensorShape):\n        return obj.as_list()\n    if isinstance(obj, dtypes.DType):\n        return obj.name\n    if isinstance(obj, collections_abc.Mapping):\n        return dict(obj)\n    if obj is Ellipsis:\n        return {'class_name': '__ellipsis__'}\n    if isinstance(obj, wrapt.ObjectProxy):\n        return obj.__wrapped__\n    raise TypeError(f'Object {obj} is not JSON-serializable. You may implement a `get_config()` method on the class (returning a JSON-serializable dictionary) to make it serializable.')", "docstring": "Serializes any object to a JSON-serializable structure.\n\nArgs:\nobj: the object to serialize\n\nReturns:\nJSON-serializable structure representing `obj`.\n\nRaises:\nTypeError: if `obj` cannot be serialized.", "source": "github-repos"}
{"code": "def db_get(table, record, column, if_exists=False):\n    \n    cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]\n    if if_exists:\n        cmd += ['--if-exists']\n    cmd += ['list', table, record]\n    result = __salt__['cmd.run_all'](cmd)\n    if result['retcode'] != 0:\n        raise CommandExecutionError(result['stderr'])\n    output = _stdout_parse_json(result['stdout'])\n    if output['data'] and output['data'][0]:\n        return output['data'][0][0]\n    else:\n        return None", "docstring": "Gets a column's value for a specific record.\n\nArgs:\ntable: A string - name of the database table.\nrecord: A string - identifier of the record.\ncolumn: A string - name of the column.\nif_exists: A boolean - if True, it is not an error if the record does\nnot exist.\n\nReturns:\nThe column's value.\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.db_get Port br0 vlan_mode", "source": "juraj-google-style"}
{"code": "def _process_scalar_value(name, parse_fn, var_type, m_dict, values, results_dictionary):\n    try:\n        parsed_value = parse_fn(m_dict['val'])\n    except ValueError:\n        _parse_fail(name, var_type, m_dict['val'], values)\n    if (not m_dict['index']):\n        if (name in results_dictionary):\n            _reuse_fail(name, values)\n        results_dictionary[name] = parsed_value\n    else:\n        if (name in results_dictionary):\n            if (not isinstance(results_dictionary.get(name), dict)):\n                _reuse_fail(name, values)\n        else:\n            results_dictionary[name] = {}\n        index = int(m_dict['index'])\n        if (index in results_dictionary[name]):\n            _reuse_fail('{}[{}]'.format(name, index), values)\n        results_dictionary[name][index] = parsed_value", "docstring": "Update results_dictionary with a scalar value.\n\nUsed to update the results_dictionary to be returned by parse_values when\nencountering a clause with a scalar RHS (e.g.  \"s=5\" or \"arr[0]=5\".)\n\nMutates results_dictionary.\n\nArgs:\nname: Name of variable in assignment (\"s\" or \"arr\").\nparse_fn: Function for parsing the actual value.\nvar_type: Type of named variable.\nm_dict: Dictionary constructed from regex parsing.\nm_dict['val']: RHS value (scalar)\nm_dict['index']: List index value (or None)\nvalues: Full expression being parsed\nresults_dictionary: The dictionary being updated for return by the parsing\nfunction.\n\nRaises:\nValueError: If the name has already been used.", "source": "codesearchnet"}
{"code": "def _item_to_document_ref(iterator, item):\n    \n    document_id = item.name.split(_helpers.DOCUMENT_PATH_DELIMITER)[-1]\n    return iterator.collection.document(document_id)", "docstring": "Convert Document resource to document ref.\n\nArgs:\niterator (google.api_core.page_iterator.GRPCIterator):\niterator response\nitem (dict): document resource", "source": "juraj-google-style"}
{"code": "def get_cuda_visible_devices():\n    gpu_ids_str = os.environ.get('CUDA_VISIBLE_DEVICES', None)\n    if (gpu_ids_str is None):\n        return None\n    if (gpu_ids_str == ''):\n        return []\n    return [int(i) for i in gpu_ids_str.split(',')]", "docstring": "Get the device IDs in the CUDA_VISIBLE_DEVICES environment variable.\n\nReturns:\nif CUDA_VISIBLE_DEVICES is set, this returns a list of integers with\nthe IDs of the GPUs. If it is not set, this returns None.", "source": "codesearchnet"}
{"code": "def __call__(self, w):\n    return w", "docstring": "Applies the constraint to the input weight variable.\n\nBy default, the inputs weight variable is not modified.\nUsers should override this method to implement their own projection\nfunction.\n\nArgs:\nw: Input weight variable.\n\nReturns:\nProjected variable (by default, returns unmodified inputs).", "source": "github-repos"}
{"code": "def market_exact(self, session, start_time: str, end_time: str) -> Session:\n    if (session not in self.exch):\n        return SessNA\n    ss = self.exch[session]\n    same_day = (ss[0] < ss[(- 1)])\n    if (not start_time):\n        s_time = ss[0]\n    else:\n        s_time = param.to_hour(start_time)\n        if same_day:\n            s_time = max(s_time, ss[0])\n    if (not end_time):\n        e_time = ss[(- 1)]\n    else:\n        e_time = param.to_hour(end_time)\n        if same_day:\n            e_time = min(e_time, ss[(- 1)])\n    if (same_day and (s_time > e_time)):\n        return SessNA\n    return Session(start_time=s_time, end_time=e_time)", "docstring": "Explicitly specify start time and end time\n\nArgs:\nsession: predefined session\nstart_time: start time in terms of HHMM string\nend_time: end time in terms of HHMM string\n\nReturns:\nSession of start_time and end_time", "source": "codesearchnet"}
{"code": "def acc_difference(points):\n    \n    data = [0]\n    for before, after in pairwise(points):\n        data.append(before.acc - after.acc)\n    return data", "docstring": "Computes the accelaration difference between each adjacent point\n\nArgs:\npoints (:obj:`Point`)\nReturns:\n:obj:`list` of int: Indexes of changepoints", "source": "juraj-google-style"}
{"code": "def list_street_poi_parking(self, **kwargs):\n    url_args = {'language': util.language_code(kwargs.get('lang')), 'address': kwargs.get('address', '')}\n    result = self.make_request('list_street_poi_parking', url_args)\n    if (not util.check_result(result)):\n        return (False, result.get('message', 'UNKNOWN ERROR'))\n    values = util.response_list(result, 'Data')\n    return (True, [emtype.ParkingPoi(**a) for a in values])", "docstring": "Obtain a list of addresses and POIs.\n\nThis endpoint uses an address to perform the search\n\nArgs:\nlang (str): Language code (*es* or *en*).\naddress (str): Address in which to perform the search.\n\nReturns:\nStatus boolean and parsed response (list[ParkingPoi]), or message\nstring in case of error.", "source": "codesearchnet"}
{"code": "def extract_value_from_output(canary, split_offset, kal_out):\n    retval = ''\n    while (retval == ''):\n        for line in kal_out.splitlines():\n            if (canary in line):\n                retval = str(line.split()[split_offset])\n        if (retval == ''):\n            retval = None\n    return retval", "docstring": "Return value parsed from output.\n\nArgs:\ncanary(str): This string must exist in the target line.\nsplit_offset(int): Split offset for target value in string.\nkal_out(int): Output from kal.", "source": "codesearchnet"}
{"code": "def transform(self, transform, desc=None):\n        \n        if desc is None:\n            desc = u'transform({})'.format(getattr(transform, '__name__', ''))\n\n        return self.replace(\n            transforms=self.transforms + [transform],\n            desc_stack=self.desc_stack + [desc]\n        )", "docstring": "Create a copy of this query, transformed by `transform`.\n\nArgs:\ntransform (callable): Callable that takes an iterable of values and\nreturns an iterable of transformed values.\n\nKeyword Args:\ndesc (str): A description of the transform, to use in log messages.\nDefaults to the name of the `transform` function.\n\nReturns:\nQuery", "source": "juraj-google-style"}
{"code": "def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE:\n    return [self.on(target) for target in targets]", "docstring": "Returns a list of operations apply this gate to each of the targets.\n\nArgs:\n*targets: The qubits to apply this gate to.\n\nReturns:\nOperations applying this gate to the target qubits.\n\nRaises:\nValueError if targets are not instances of Qid.", "source": "codesearchnet"}
{"code": "def find_layer_idx(model, layer_name):\n    layer_idx = None\n    for (idx, layer) in enumerate(model.layers):\n        if (layer.name == layer_name):\n            layer_idx = idx\n            break\n    if (layer_idx is None):\n        raise ValueError(\"No layer with name '{}' within the model\".format(layer_name))\n    return layer_idx", "docstring": "Looks up the layer index corresponding to `layer_name` from `model`.\n\nArgs:\nmodel: The `keras.models.Model` instance.\nlayer_name: The name of the layer to lookup.\n\nReturns:\nThe layer index if found. Raises an exception otherwise.", "source": "codesearchnet"}
{"code": "def _build(self, inputs):\n    input_shape = tf.shape(inputs)\n    input_dtype = inputs.dtype.as_numpy_dtype\n    batch_size = tf.expand_dims(input_shape[0], 0)\n    number_of_params = inputs.get_shape()[1]\n    if (number_of_params != self._constraints.num_free_params):\n        raise base.Error('Input size is not consistent with constraint definition: {} parameters expected, {} provided.'.format(self._constraints.num_free_params, number_of_params))\n    num_output_dimensions = (len(self._psi) \n\n    def get_input_slice(start, size):\n        'Extracts a subset of columns from the input 2D Tensor.'\n        return basic.SliceByDim([1], [start], [size])(inputs)\n    warped_grid = []\n    var_index_offset = 0\n    number_of_points = np.prod(self._output_shape)\n    for i in xrange(num_output_dimensions):\n        if (self._psi[i] is not None):\n            grid_coord = self._psi[i].astype(input_dtype)\n            num_active_vars = self._psi[i].shape[0]\n            active_vars = get_input_slice(var_index_offset, num_active_vars)\n            warped_coord = tf.matmul(active_vars, grid_coord)\n            warped_coord = tf.expand_dims(warped_coord, 1)\n            var_index_offset += num_active_vars\n            offset = self._psi[(num_output_dimensions + i)]\n            if (offset is not None):\n                offset = offset.astype(input_dtype)\n                tiling_params = tf.concat([batch_size, tf.constant(1, shape=(1,)), tf.ones_like(offset.shape)], 0)\n                offset = offset.reshape(((1, 1) + offset.shape))\n                warped_coord += tf.tile(offset, tiling_params)\n        else:\n            warped_coord = self._psi[(num_output_dimensions + i)].astype(input_dtype)\n            tiling_params = tf.concat([batch_size, tf.constant(1, shape=(1,)), tf.ones_like(warped_coord.shape)], 0)\n            warped_coord = warped_coord.reshape(((1, 1) + warped_coord.shape))\n            warped_coord = tf.tile(warped_coord, tiling_params)\n        warped_coord += self._psi[(i + (2 * num_output_dimensions))]\n        warped_coord.set_shape([None, 1, number_of_points])\n        warped_grid.append(warped_coord)\n    grid_shape = (self._output_shape + (1,))\n    warped_grid = [basic.BatchReshape(grid_shape)(grid) for grid in warped_grid]\n    return tf.concat(warped_grid, len(grid_shape))", "docstring": "Assembles the module network and adds it to the graph.\n\nThe internal computation graph is assembled according to the set of\nconstraints provided at construction time.\n\nArgs:\ninputs: Tensor containing a batch of transformation parameters.\n\nReturns:\nA batch of warped grids.\n\nRaises:\nError: If the input tensor size is not consistent with the constraints\npassed at construction time.", "source": "codesearchnet"}
{"code": "def _is_framework_filename(filename):\n    for pattern in _EXTERNAL_FILENAME_PATTERNS:\n        if pattern.search(filename):\n            return False\n    for pattern in _FRAMEWORK_FILENAME_PATTERNS:\n        if pattern.search(filename):\n            return True\n    for prefix in _FRAMEWORK_PATH_PREFIXES:\n        if filename.startswith(prefix):\n            return True\n    return False", "docstring": "Returns whether a filename should be considered a part of the framework.\n\nA file is part of the framework if it does not match a pattern in\n_EXTERNAL_FILENAME_PATTERNS and it either matches a pattern in\n_FRAMEWORK_FILENAME_PATTERNS or starts with a _FRAMEWORK_PATH_PREFIXES prefix.\n\nArgs:\nfilename: A filename string.\n\nReturns:\nWhether the filename should be considered to be internal to the\nTensorFlow framework for the purposes of reporting errors.", "source": "github-repos"}
{"code": "def get(self, *args, **kwargs):\n        \n        if not self.enabled:\n            return None\n        \n        \n        cache_key = self.make_key(args, kwargs)\n        \n        with self._cache_lock:\n            if cache_key in self._cache:\n                expirytime, item = self._cache[cache_key]\n\n                if expirytime >= time():\n                    return item\n                else:\n                    \n                    del self._cache[cache_key]\n        \n        return None", "docstring": "Get an item from the cache for this combination of args and kwargs.\n\nArgs:\n*args: any arguments.\n**kwargs: any keyword arguments.\n\nReturns:\nobject: The object which has been found in the cache, or `None` if\nno unexpired item is found. This means that there is no point\nstoring an item in the cache if it is `None`.", "source": "juraj-google-style"}
{"code": "def universal_transformer_highway(layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None):\n    (state, inputs, memory) = layer_inputs\n    new_state = step_preprocess(state, step, hparams)\n    for i in range(hparams.num_inrecurrence_layers):\n        with tf.variable_scope(('rec_layer_%d' % i)):\n            new_state = ffn_unit(attention_unit(new_state))\n    transformed_state = new_state\n    gate_inputs = []\n    if ('s' in hparams.gates_inputs):\n        gate_inputs.append(state)\n    if ('t' in hparams.gates_inputs):\n        gate_inputs.append(transformed_state)\n    if ('i' in hparams.gates_inputs):\n        gate_inputs.append(inputs)\n    gate_ffn_layer = hparams.gate_ffn_layer\n    transform_gate = _ffn_layer_multi_inputs(gate_inputs, hparams, ffn_layer_type=gate_ffn_layer, name='transform', bias_initializer=tf.constant_initializer(hparams.transform_bias_init), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=True, postprocess=True)\n    if hparams.couple_carry_transform_gates:\n        carry_gate = tf.subtract(1.0, transform_gate, name='carry')\n    else:\n        carry_gate = _ffn_layer_multi_inputs(gate_inputs, hparams, ffn_layer_type=gate_ffn_layer, name='carry', bias_initializer=tf.constant_initializer((- hparams.transform_bias_init)), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=True, postprocess=True)\n    new_state = ((state * carry_gate) + (transformed_state * transform_gate))\n    tf.contrib.summary.scalar('highway_transform_gate_layer', tf.reduce_mean(transform_gate))\n    tf.contrib.summary.scalar('highway_carry_gate_layer', tf.reduce_mean(carry_gate))\n    return (new_state, inputs, memory)", "docstring": "Universal Transformer with highway connection.\n\n\nIt transforms the state using a block contaaining sel-attention and transition\nfunction  and wrap the whole block with a highway connection.\n(the new state is a combination of the state and the transformed-state\nbased on cary/transform gates.)\n\nInteresting observation:\nControlling the cary/transform gate with the original inputs works usually\nbetter (i.e. hparams.gates_inputs=\"i\")\n\nArgs:\nlayer_inputs:\n- state: state\n- inputs: the original embedded inputs (= inputs to the first step)\nstep: indicates number of steps taken so far\nhparams: model hyper-parameters.\nffn_unit: feed-forward unit\nattention_unit: multi-head attention unit\npad_remover: to mask out padding in convolutional layers (efficiency).\n\nReturns:\nlayer_output:\nnew_state: new state\ninputs: the original embedded inputs (= inputs to the first step)", "source": "codesearchnet"}
{"code": "def DeregisterHelper(cls, analyzer_helper):\n    \n    if analyzer_helper.type_indicator not in cls._analyzer_helpers:\n      raise KeyError(\n          'Analyzer helper object not set for type indicator: {0:s}.'.format(\n              analyzer_helper.type_indicator))\n\n    analyzer_helper = cls._analyzer_helpers[analyzer_helper.type_indicator]\n\n    cls._FlushCache(analyzer_helper.format_categories)\n\n    del cls._analyzer_helpers[analyzer_helper.type_indicator]", "docstring": "Deregisters a format analyzer helper.\n\nArgs:\nanalyzer_helper (AnalyzerHelper): analyzer helper.\n\nRaises:\nKeyError: if analyzer helper object is not set for the corresponding\ntype indicator.", "source": "juraj-google-style"}
{"code": "def get_server_ipaddress(self, trust):\n    log.debug('Trust string is {!r}'.format(trust))\n    if (not trust.strip()):\n        return\n    received = self.message.get_all('received', [])\n    for i in received:\n        i = ported_string(i)\n        if (trust in i):\n            log.debug('Trust string {!r} is in {!r}'.format(trust, i))\n            check = REGXIP.findall(i[0:i.find('by')])\n            if check:\n                try:\n                    ip_str = six.text_type(check[(- 1)])\n                    log.debug('Found sender IP {!r} in {!r}'.format(ip_str, i))\n                    ip = ipaddress.ip_address(ip_str)\n                except ValueError:\n                    return\n                else:\n                    if (not ip.is_private):\n                        log.debug('IP {!r} not private'.format(ip_str))\n                        return ip_str", "docstring": "Return the ip address of sender\n\nOverview:\nExtract a reliable sender IP address heuristically for each message.\nAlthough the message format dictates a chain of relaying IP\naddresses in each message, a malicious relay can easily alter that.\nTherefore we cannot simply take the first IP in\nthe chain. Instead, our method is as follows.\nFirst we trust the sender IP reported by our mail server in the\nReceived headers, and if the previous relay IP address is on our trust\nlist (e.g. other well-known mail services), we continue to\nfollow the previous Received line, till we reach the first unrecognized\nIP address in the email header.\n\nFrom article Characterizing Botnets from Email Spam Records:\nLi Zhuang, J. D. Tygar\n\nIn our case we trust only our mail server with the trust string.\n\nArgs:\ntrust (string): String that identify our mail server\n\nReturns:\nstring with the ip address", "source": "codesearchnet"}
{"code": "def risk_score(self, domains):\n        \n        api_name = 'opendns-risk_score'\n        fmt_url_path = u'domains/risk-score/{0}'\n        return self._multi_get(api_name, fmt_url_path, domains)", "docstring": "Performs Umbrella risk score analysis on the input domains\n\nArgs:\ndomains: an enumerable of domains\nReturns:\nAn enumerable of associated domain risk scores", "source": "juraj-google-style"}
{"code": "def load_actor_class(self, driver_id, function_descriptor):\n        \n        function_id = function_descriptor.function_id\n        \n        actor_class = self._loaded_actor_classes.get(function_id, None)\n        if actor_class is None:\n            \n            if self._worker.load_code_from_local:\n                driver_id = ray.DriverID.nil()\n                \n                actor_class = self._load_actor_from_local(\n                    driver_id, function_descriptor)\n            else:\n                \n                actor_class = self._load_actor_class_from_gcs(\n                    driver_id, function_descriptor)\n            \n            self._loaded_actor_classes[function_id] = actor_class\n\n            \n            module_name = function_descriptor.module_name\n            actor_class_name = function_descriptor.class_name\n            actor_methods = inspect.getmembers(\n                actor_class, predicate=is_function_or_method)\n            for actor_method_name, actor_method in actor_methods:\n                method_descriptor = FunctionDescriptor(\n                    module_name, actor_method_name, actor_class_name)\n                method_id = method_descriptor.function_id\n                executor = self._make_actor_method_executor(\n                    actor_method_name,\n                    actor_method,\n                    actor_imported=True,\n                )\n                self._function_execution_info[driver_id][method_id] = (\n                    FunctionExecutionInfo(\n                        function=executor,\n                        function_name=actor_method_name,\n                        max_calls=0,\n                    ))\n                self._num_task_executions[driver_id][method_id] = 0\n            self._num_task_executions[driver_id][function_id] = 0\n        return actor_class", "docstring": "Load the actor class.\n\nArgs:\ndriver_id: Driver ID of the actor.\nfunction_descriptor: Function descriptor of the actor constructor.\n\nReturns:\nThe actor class.", "source": "juraj-google-style"}
{"code": "def _format_field_value(self, field_name) -> str:\n        \n\n        field_name = self._normalize_field_name(field_name)\n        field = self._get_model_field(field_name)\n\n        return SQLInsertCompiler.prepare_value(\n            self,\n            field,\n            \n            \n            \n            \n            \n            getattr(self.query.objs[0], field.attname)\n        )", "docstring": "Formats a field's value for usage in SQL.\n\nArguments:\nfield_name:\nThe name of the field to format\nthe value of.\n\nReturns:\nThe field's value formatted for usage\nin SQL.", "source": "juraj-google-style"}
{"code": "def to_representation(self, instance):\n        \n        updated_program = copy.deepcopy(instance)\n        enterprise_customer_catalog = self.context['enterprise_customer_catalog']\n        updated_program['enrollment_url'] = enterprise_customer_catalog.get_program_enrollment_url(\n            updated_program['uuid']\n        )\n        for course in updated_program['courses']:\n            course['enrollment_url'] = enterprise_customer_catalog.get_course_enrollment_url(course['key'])\n            for course_run in course['course_runs']:\n                course_run['enrollment_url'] = enterprise_customer_catalog.get_course_run_enrollment_url(\n                    course_run['key']\n                )\n        return updated_program", "docstring": "Return the updated program data dictionary.\n\nArguments:\ninstance (dict): The program data.\n\nReturns:\ndict: The updated program data.", "source": "juraj-google-style"}
{"code": "def bind(self, devices_to_bind):\n    if (self.entity_api_key == ''):\n        return {'status': 'failure', 'response': 'No API key found in request'}\n    url = (self.base_url + 'api/0.1.0/subscribe/bind')\n    headers = {'apikey': self.entity_api_key}\n    data = {'exchange': 'amq.topic', 'keys': devices_to_bind, 'queue': self.entity_id}\n    with self.no_ssl_verification():\n        r = requests.post(url, json=data, headers=headers)\n    response = dict()\n    if ('No API key' in str(r.content.decode('utf-8'))):\n        response['status'] = 'failure'\n        r = json.loads(r.content.decode('utf-8'))['message']\n    elif ('bind queue ok' in str(r.content.decode('utf-8'))):\n        response['status'] = 'success'\n        r = r.content.decode('utf-8')\n    else:\n        response['status'] = 'failure'\n        r = r.content.decode('utf-8')\n    response['response'] = str(r)\n    return response", "docstring": "This function allows an entity to list the devices to subscribe for data. This function must be called\nat least once, before doing a subscribe. Subscribe function will listen to devices that are bound here.\n\nArgs:\ndevices_to_bind  (list): an array of devices to listen to.\nExample bind([\"test100\",\"testDemo\"])", "source": "codesearchnet"}
{"code": "def add_data(self, data):\n        \n        if not self._data:\n            self._data = {}\n        self._data.update(data)", "docstring": "Add POST data.\n\nArgs:\ndata (dict): key => value dictionary", "source": "juraj-google-style"}
{"code": "def tunnel(container, local_port, remote_port=None, gateway_port=None):\n    \n    if remote_port is None:\n        remote_port = local_port\n    if gateway_port is None:\n        gateway_port = remote_port\n\n    remote_host = get_ip(container)\n\n    command =  % {\n        'key_filename': env.key_filename,\n        'local_port': local_port,\n        'gateway_port': gateway_port,\n        'gateway_user': env.user,\n        'gateway_host': env.host,\n        'remote_port': remote_port,\n        'remote_host': remote_host,\n    }\n\n    command = command.replace('\\n', '')\n\n    local(command)", "docstring": "Set up an SSH tunnel into the container, using the host as a gateway host.\n\nArgs:\n* container: Container name or ID\n* local_port: Local port\n* remote_port=None: Port on the Docker container (defaults to local_port)\n* gateway_port=None: Port on the gateway host (defaults to remote_port)", "source": "juraj-google-style"}
{"code": "def value(self):\n    binary = (UBInt8(self.sub_type).pack() + self.sub_value.pack())\n    return BinaryData(binary)", "docstring": "Return sub type and sub value as binary data.\n\nReturns:\n:class:`~pyof.foundation.basic_types.BinaryData`:\nBinaryData calculated.", "source": "codesearchnet"}
{"code": "def cluster_nodes(self, tol=0.2):\n        \n        lattice = self.structure.lattice\n\n        vfcoords = [v.frac_coords for v in self.vnodes]\n\n        \n        \n        dist_matrix = np.array(lattice.get_all_distances(vfcoords, vfcoords))\n        dist_matrix = (dist_matrix + dist_matrix.T) / 2\n        for i in range(len(dist_matrix)):\n            dist_matrix[i, i] = 0\n        condensed_m = squareform(dist_matrix)\n        z = linkage(condensed_m)\n        cn = fcluster(z, tol, criterion=\"distance\")\n        merged_vnodes = []\n        for n in set(cn):\n            poly_indices = set()\n            frac_coords = []\n            for i, j in enumerate(np.where(cn == n)[0]):\n                poly_indices.update(self.vnodes[j].polyhedron_indices)\n                if i == 0:\n                    frac_coords.append(self.vnodes[j].frac_coords)\n                else:\n                    fcoords = self.vnodes[j].frac_coords\n                    \n                    d, image = lattice.get_distance_and_image(frac_coords[0],\n                                                              fcoords)\n                    frac_coords.append(fcoords + image)\n            merged_vnodes.append(\n                VoronoiPolyhedron(lattice, np.average(frac_coords, axis=0),\n                                  poly_indices, self.coords))\n        self.vnodes = merged_vnodes\n        logger.debug(\"%d vertices after combination.\" % len(self.vnodes))", "docstring": "Cluster nodes that are too close together using a tol.\n\nArgs:\ntol (float): A distance tolerance. PBC is taken into account.", "source": "juraj-google-style"}
{"code": "def _GetFieldPathElementIndex(api_error, field):\n  \n  field_path_elements = api_error['fieldPathElements']\n\n  if field_path_elements:\n    found_index = [field_path_element['index']\n                   for field_path_element in field_path_elements\n                   if field_path_element['field'] == field]\n    if found_index:\n      return found_index\n\n  return None", "docstring": "Retrieve the index of a given field in the api_error's fieldPathElements.\n\nArgs:\napi_error: a dict containing a partialFailureError returned from the AdWords\nAPI.\nfield: a str field for which this determines the index in the api_error's\nfieldPathElements.\n\nReturns:\nAn int index of the field path element, or None if the specified field can't\nbe found in the api_error.", "source": "juraj-google-style"}
{"code": "def _GetRecordValue(self, record, value_entry):\n    \n    column_type = record.get_column_type(value_entry)\n    long_value = None\n\n    if record.is_long_value(value_entry):\n      long_value = record.get_value_data_as_long_value(value_entry)\n\n    if record.is_multi_value(value_entry):\n      \n      raise ValueError('Multi value support not implemented yet.')\n\n    if column_type == pyesedb.column_types.NULL:\n      return None\n\n    if column_type == pyesedb.column_types.BOOLEAN:\n      \n      raise ValueError('Boolean value support not implemented yet.')\n\n    if column_type in self.INTEGER_COLUMN_TYPES:\n      if long_value:\n        raise ValueError('Long integer value not supported.')\n      return record.get_value_data_as_integer(value_entry)\n\n    if column_type in self.FLOATING_POINT_COLUMN_TYPES:\n      if long_value:\n        raise ValueError('Long floating point value not supported.')\n      return record.get_value_data_as_floating_point(value_entry)\n\n    if column_type in self.STRING_COLUMN_TYPES:\n      if long_value:\n        return long_value.get_data_as_string()\n      return record.get_value_data_as_string(value_entry)\n\n    if column_type == pyesedb.column_types.GUID:\n      \n      raise ValueError('GUID value support not implemented yet.')\n\n    if long_value:\n      return long_value.get_data()\n    return record.get_value_data(value_entry)", "docstring": "Retrieves a specific value from the record.\n\nArgs:\nrecord (pyesedb.record): ESE record.\nvalue_entry (int): value entry.\n\nReturns:\nobject: value.\n\nRaises:\nValueError: if the value is not supported.", "source": "juraj-google-style"}
{"code": "def _emit_op(self, nodestats: step_stats_pb2.NodeExecStats, pid: int, is_gputrace: bool) -> None:\n    node_name = nodestats.node_name\n    start = nodestats.all_start_micros\n    duration = nodestats.all_end_rel_micros\n    tid = nodestats.thread_id\n    inputs = []\n    if is_gputrace:\n        node_name, op = self._parse_kernel_label(nodestats.timeline_label, node_name)\n    elif node_name == 'RecvTensor':\n        op = 'RecvTensor'\n    else:\n        _, op, inputs = self._parse_op_label(nodestats.timeline_label)\n    args = {'name': node_name, 'op': op}\n    if build_info.build_info['is_rocm_build']:\n        args['kernel'] = nodestats.timeline_label.split('@@')[0]\n    for i, iname in enumerate(inputs):\n        args['input%d' % i] = iname\n    self._chrome_trace.emit_region(start, duration, pid, tid, 'Op', op, args)", "docstring": "Generates a Chrome Trace event to show Op execution.\n\nArgs:\nnodestats: The 'step_stats_pb2.NodeExecStats' proto recording op\nexecution.\npid: The pid assigned for the device where this op ran.\nis_gputrace: If True then this op came from the GPUTracer.", "source": "github-repos"}
{"code": "def coordination_leader(cluster_spec):\n    cluster_spec = normalize_cluster_spec(cluster_spec)\n    if not cluster_spec.as_dict():\n        return ''\n    if 'ps' in cluster_spec.jobs:\n        return '/job:ps/replica:0/task:0'\n    if 'chief' in cluster_spec.jobs:\n        return '/job:chief/replica:0/task:0'\n    assert 'worker' in cluster_spec.jobs\n    return '/job:worker/replica:0/task:0'", "docstring": "Return the task name of the coordination service leader.\n\nArgs:\ncluster_spec: a dict, `ClusterDef` or `ClusterSpec` object sxpecifying the\ncluster configurations.\n\nReturns:\na string indicating the task name of the coordination service leader.", "source": "github-repos"}
{"code": "def _init_vocab_from_list(self, vocab_list):\n    \n    def token_gen():\n      for token in vocab_list:\n        if token not in RESERVED_TOKENS:\n          yield token\n\n    self._init_vocab(token_gen())", "docstring": "Initialize tokens from a list of tokens.\n\nIt is ok if reserved tokens appear in the vocab list. They will be\nremoved. The set of tokens in vocab_list should be unique.\n\nArgs:\nvocab_list: A list of tokens.", "source": "juraj-google-style"}
{"code": "def switch_to_window(self, window_name):\n        \n        data = {\n            'name': window_name\n        }\n        self._execute(Command.SWITCH_TO_WINDOW, data)", "docstring": "Switch to the given window.\n\nSupport:\nWeb(WebView)\n\nArgs:\nwindow_name(str): The window to change focus to.\n\nReturns:\nWebDriver Object.", "source": "juraj-google-style"}
{"code": "def delete_ldap_group_link(self, cn, provider=None, **kwargs):\n        \n        path = '/groups/%s/ldap_group_links' % self.get_id()\n        if provider is not None:\n            path += '/%s' % provider\n        path += '/%s' % cn\n        self.manager.gitlab.http_delete(path)", "docstring": "Delete an LDAP group link.\n\nArgs:\ncn (str): CN of the LDAP group\nprovider (str): LDAP provider for the LDAP group\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabDeleteError: If the server cannot perform the request", "source": "juraj-google-style"}
{"code": "def detect_unused_return_values(self, f):\n        \n        values_returned = []\n        nodes_origin = {}\n        for n in f.nodes:\n            for ir in n.irs:\n                if isinstance(ir, HighLevelCall):\n                    \n                    if ir.lvalue and not isinstance(ir.lvalue, StateVariable):\n                        values_returned.append(ir.lvalue)\n                        nodes_origin[ir.lvalue] = ir\n                for read in ir.read:\n                    if read in values_returned:\n                        values_returned.remove(read)\n\n        return [nodes_origin[value].node for value in values_returned]", "docstring": "Return the nodes where the return value of a call is unused\nArgs:\nf (Function)\nReturns:\nlist(Node)", "source": "juraj-google-style"}
{"code": "def load_parent_implems(self, parent_implems):\n    for (trname, attr, implem) in parent_implems.get_custom_implementations():\n        self.implementations[trname] = implem.copy()\n        self.transitions_at[trname] = attr\n        self.custom_implems.add(trname)", "docstring": "Import previously defined implementations.\n\nArgs:\nparent_implems (ImplementationList): List of implementations defined\nin a parent class.", "source": "codesearchnet"}
{"code": "def encode(data, scheme=None, size=None):\n    size = (size if size else 'ShapeAuto')\n    size_name = '{0}{1}'.format(ENCODING_SIZE_PREFIX, size)\n    if (not hasattr(DmtxSymbolSize, size_name)):\n        raise PyLibDMTXError('Invalid size [{0}]: should be one of {1}'.format(size, ENCODING_SIZE_NAMES))\n    size = getattr(DmtxSymbolSize, size_name)\n    scheme = (scheme if scheme else 'Ascii')\n    scheme_name = '{0}{1}'.format(ENCODING_SCHEME_PREFIX, scheme.capitalize())\n    if (not hasattr(DmtxScheme, scheme_name)):\n        raise PyLibDMTXError('Invalid scheme [{0}]: should be one of {1}'.format(scheme, ENCODING_SCHEME_NAMES))\n    scheme = getattr(DmtxScheme, scheme_name)\n    with _encoder() as encoder:\n        dmtxEncodeSetProp(encoder, DmtxProperty.DmtxPropScheme, scheme)\n        dmtxEncodeSetProp(encoder, DmtxProperty.DmtxPropSizeRequest, size)\n        if (dmtxEncodeDataMatrix(encoder, len(data), cast(data, c_ubyte_p)) == 0):\n            raise PyLibDMTXError('Could not encode data, possibly because the image is not large enough to contain the data')\n        (w, h, bpp) = map(partial(dmtxImageGetProp, encoder[0].image), (DmtxProperty.DmtxPropWidth, DmtxProperty.DmtxPropHeight, DmtxProperty.DmtxPropBitsPerPixel))\n        size = (((w * h) * bpp) \n        pixels = cast(encoder[0].image[0].pxl, ctypes.POINTER((ctypes.c_ubyte * size)))\n        return Encoded(width=w, height=h, bpp=bpp, pixels=ctypes.string_at(pixels, size))", "docstring": "Encodes `data` in a DataMatrix image.\n\nFor now bpp is the libdmtx default which is 24\n\nArgs:\ndata: bytes instance\nscheme: encoding scheme - one of `ENCODING_SCHEME_NAMES`, or `None`.\nIf `None`, defaults to 'Ascii'.\nsize: image dimensions - one of `ENCODING_SIZE_NAMES`, or `None`.\nIf `None`, defaults to 'ShapeAuto'.\n\nReturns:\nEncoded: with properties `(width, height, bpp, pixels)`.\nYou can use that result to build a PIL image:\n\nImage.frombytes('RGB', (width, height), pixels)", "source": "codesearchnet"}
{"code": "def voronoi(points, buffer_percent=100):\n    seen = set()\n    uniqpoints = [p for p in points if ((str(p[:2]) not in seen) and (not seen.add(str(p[:2]))))]\n    classpoints = [_Point(*point[:2]) for point in uniqpoints]\n    (xs, ys) = list(zip(*uniqpoints))[:2]\n    pointswidth = (max(xs) - min(xs))\n    pointsheight = (max(ys) - min(ys))\n    (xbuff, ybuff) = (((pointswidth / 100.0) * buffer_percent), ((pointsheight / 100.0) * buffer_percent))\n    (midx, midy) = ((sum(xs) / float(len(xs))), (sum(ys) / float(len(ys))))\n    bufferbox = [((midx - xbuff), midy), ((midx + xbuff), midy), (midx, (midy + ybuff)), (midx, (midy - ybuff))]\n    classpoints.extend([_Point(*corner) for corner in bufferbox])\n    (vertices, edges, poly_dict) = tesselator.computeVoronoiDiagram(classpoints)\n    polygons = list()\n    for (sitepoint, polyedges) in list(poly_dict.items()):\n        polyedges = [edge[1:] for edge in polyedges]\n        poly = list()\n        (firststart, firstend) = polyedges.pop(0)\n        poly.append(firstend)\n        while polyedges:\n            curend = poly[(- 1)]\n            for (i, other) in enumerate(polyedges):\n                (otherstart, otherend) = other\n                if (otherstart == curend):\n                    poly.append(otherend)\n                    polyedges.pop(i)\n                    break\n                elif (otherend == curend):\n                    poly.append(otherstart)\n                    polyedges.pop(i)\n                    break\n        try:\n            sitepoint = uniqpoints[sitepoint]\n        except IndexError:\n            sitepoint = None\n        poly = [vertices[vi] for vi in poly if (vi != (- 1))]\n        polygons.append((sitepoint, poly))\n    return polygons", "docstring": "Surrounds each point in an input list of xy tuples with a\nunique Voronoi polygon.\n\nArguments:\n\n- **points**: A list of xy or xyz point tuples to triangulate.\n- **buffer_percent** (optional): Controls how much bigger than\nthe original bbox of the input points to set the bbox of fake points,\nused to account for lacking values around the edges (default is 100 percent).\n\nReturns:\n\n- Returns a list of 2-tuples, with the first item in each tuple being the\noriginal input point (or None for each corner of the bounding box buffer),\nand the second item being the point's corressponding Voronoi polygon.", "source": "codesearchnet"}
{"code": "def repair(self, volume_id_or_uri, timeout=-1):\n        \n        data = {\n            \"type\": \"ExtraManagedStorageVolumePaths\",\n            \"resourceUri\": self._client.build_uri(volume_id_or_uri)\n        }\n        custom_headers = {'Accept-Language': 'en_US'}\n        uri = self.URI + '/repair'\n        return self._client.create(data, uri=uri, timeout=timeout, custom_headers=custom_headers)", "docstring": "Removes extra presentations from a specified volume on the storage system.\n\nArgs:\nvolume_id_or_uri:\nCan be either the volume id or the volume uri.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation in\nOneView, just stops waiting for its completion.\n\nReturns:\ndict: Storage volume.", "source": "juraj-google-style"}
{"code": "def generate_ast(path):\n    if os.path.isfile(path):\n        with open(path, 'r') as f:\n            try:\n                tree = ast.parse(f.read())\n                return PytTransformer().visit(tree)\n            except SyntaxError:\n                global recursive\n                if (not recursive):\n                    _convert_to_3(path)\n                    recursive = True\n                    return generate_ast(path)\n                else:\n                    raise SyntaxError('The ast module can not parse the file and the python 2 to 3 conversion also failed.')\n    raise IOError(('Input needs to be a file. Path: ' + path))", "docstring": "Generate an Abstract Syntax Tree using the ast module.\n\nArgs:\npath(str): The path to the file e.g. example/foo/bar.py", "source": "codesearchnet"}
{"code": "def _should_fetch_reason(self) -> Tuple[(bool, str)]:\n    is_redirect = False\n    if self._strong_redirects:\n        try:\n            is_redirect = self._web_client_session.redirect_tracker.is_redirect()\n        except AttributeError:\n            pass\n    return self._fetch_rule.check_subsequent_web_request(self._item_session, is_redirect=is_redirect)", "docstring": "Return info about whether the URL should be fetched.\n\nReturns:\ntuple: A two item tuple:\n\n1. bool: If True, the URL should be fetched.\n2. str: A short reason string explaining the verdict.", "source": "codesearchnet"}
{"code": "def db_en010(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `db_en010`'.format(value))\n\n        self._db_en010 = value", "docstring": "Corresponds to IDD Field `db_en010`\nmean coincident dry-bulb temperature to\nEnthalpy corresponding to 1.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `db_en010`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def iter_processed_text(self, file, encoding=None, base_url=None):\n        \n        for text, is_link in self.iter_text(file, encoding):\n            if is_link and base_url:\n                new_link = urljoin_safe(base_url, text, allow_fragments=False)\n\n                if new_link:\n                    yield (new_link, is_link)\n                else:\n                    yield (new_link, False)\n            else:\n                yield (text, is_link)", "docstring": "Return the file text and processed absolute links.\n\nArgs:\nfile: A file object containing the document.\nencoding (str): The encoding of the document.\nbase_url (str): The URL at which the document is located.\n\nReturns:\niterator: Each item is a tuple:\n\n1. str: The text\n2. bool: Whether the text a link", "source": "juraj-google-style"}
{"code": "def _load_plugins(namespace, instantiate=True):\n    mgr = ExtensionManager(namespace=namespace, on_load_failure_callback=(lambda _, ep, err: LOGGER.warning('Could not load plugin {}: {}'.format(ep.name, err))))\n    if instantiate:\n        plugins = dict(((ext.name, (ext.plugin if isinstance(ext.plugin, Plugin) else ext.plugin())) for ext in mgr))\n    else:\n        plugins = dict(((ext.name, ext.plugin) for ext in mgr))\n    return plugins", "docstring": "Loads all the plugins for the given namespace\n\nArgs:\nnamespace(str): Namespace string, as in the setuptools entry_points\ninstantiate(bool): If true, will instantiate the plugins too\n\nReturns:\ndict of str, object: Returns the list of loaded plugins", "source": "codesearchnet"}
{"code": "def master_key_from_seed(seed):\n    S = get_bytes(seed)\n    I = hmac.new(b'Bitcoin seed', S, hashlib.sha512).digest()\n    (Il, Ir) = (I[:32], I[32:])\n    parse_Il = int.from_bytes(Il, 'big')\n    if ((parse_Il == 0) or (parse_Il >= bitcoin_curve.n)):\n        raise ValueError('Bad seed, resulting in invalid key!')\n    return HDPrivateKey(key=parse_Il, chain_code=Ir, index=0, depth=0)", "docstring": "Generates a master key from a provided seed.\n\nArgs:\nseed (bytes or str): a string of bytes or a hex string\n\nReturns:\nHDPrivateKey: the master private key.", "source": "codesearchnet"}
{"code": "def _as_document(self, identifier):\n    return {'identifier': u('{}').format(identifier['identifier']), 'type': u('{}').format(identifier['type']), 'name': u('{}').format(identifier['name'])}", "docstring": "Converts given identifier to the document indexed by FTS backend.\n\nArgs:\nidentifier (dict): identifier to convert. Dict contains at\nleast 'identifier', 'type' and 'name' keys.\n\nReturns:\ndict with structure matches to BaseIdentifierIndex._schema.", "source": "codesearchnet"}
{"code": "def InitPathInfos(self, client_id, path_infos):\n    self.ClearPathHistory(client_id, path_infos)\n    self.WritePathInfos(client_id, path_infos)", "docstring": "Initializes a collection of path info records for a client.\n\nUnlike `WritePathInfo`, this method clears stat and hash histories of paths\nassociated with path info records. This method is intended to be used only\nin the data migration scripts.\n\nArgs:\nclient_id: A client identifier for which the paths are to be initialized.\npath_infos: A list of `rdf_objects.PathInfo` objects to write.", "source": "codesearchnet"}
{"code": "async def delCronJob(self, iden):\n    cron = self.cell.agenda.appts.get(iden)\n    if (cron is None):\n        raise s_exc.NoSuchIden()\n    self._trig_auth_check(cron.useriden)\n    (await self.cell.agenda.delete(iden))", "docstring": "Delete a cron job\n\nArgs:\niden (bytes):  The iden of the cron job to be deleted", "source": "codesearchnet"}
{"code": "def orient_undirected_graph(self, data, graph):\n    self.arguments['{VERBOSE}'] = str(self.verbose).upper()\n    self.arguments['{SCORE}'] = self.scores[self.score]\n    fe = DataFrame(nx.adj_matrix(graph, weight=None).todense())\n    fg = DataFrame((1 - fe.values))\n    results = self._run_gies(data, fixedGaps=fg, verbose=self.verbose)\n    return nx.relabel_nodes(nx.DiGraph(results), {idx: i for (idx, i) in enumerate(data.columns)})", "docstring": "Run GIES on an undirected graph.\n\nArgs:\ndata (pandas.DataFrame): DataFrame containing the data\ngraph (networkx.Graph): Skeleton of the graph to orient\n\nReturns:\nnetworkx.DiGraph: Solution given by the GIES algorithm.", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n    \n    self.Ping = channel.unary_unary(\n        '/processor.Processor/Ping',\n        request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n        response_deserializer=processor__pb2.Status.FromString,\n        )\n    self.Process = channel.unary_unary(\n        '/processor.Processor/Process',\n        request_serializer=message__pb2.Message.SerializeToString,\n        response_deserializer=message__pb2.Message.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def update_video(video_data):\n    try:\n        video = _get_video(video_data.get('edx_video_id'))\n    except Video.DoesNotExist:\n        error_message = u'Video not found when trying to update video with edx_video_id: {0}'.format(video_data.get('edx_video_id'))\n        raise ValVideoNotFoundError(error_message)\n    serializer = VideoSerializer(video, data=video_data)\n    if serializer.is_valid():\n        serializer.save()\n        return video_data.get('edx_video_id')\n    else:\n        raise ValCannotUpdateError(serializer.errors)", "docstring": "Called on to update Video objects in the database\n\nupdate_video is used to update Video objects by the given edx_video_id in the video_data.\n\nArgs:\nvideo_data (dict):\n{\nurl: api url to the video\nedx_video_id: ID of the video\nduration: Length of video in seconds\nclient_video_id: client ID of video\nencoded_video: a list of EncodedVideo dicts\nurl: url of the video\nfile_size: size of the video in bytes\nprofile: ID of the profile\ncourses: Courses associated with this video\n}\n\nRaises:\nRaises ValVideoNotFoundError if the video cannot be retrieved.\nRaises ValCannotUpdateError if the video cannot be updated.\n\nReturns the successfully updated Video object", "source": "codesearchnet"}
{"code": "def line_on_device(device: 'cirq.google.XmonDevice', length: int, method: LinePlacementStrategy=greedy.GreedySequenceSearchStrategy()) -> GridQubitLineTuple:\n    return method.place_line(device, length)", "docstring": "Searches for linear sequence of qubits on device.\n\nArgs:\ndevice: Google Xmon device instance.\nlength: Desired number of qubits making up the line.\nmethod: Line placement method. Defaults to\ncirq.greedy.GreedySequenceSearchMethod.\n\nReturns:\nLine sequences search results.", "source": "codesearchnet"}
{"code": "def update(self, value: int, force_update: bool=False, comment: Optional[str]=None):\n    self.value = value\n    if comment is not None:\n        self.comment = comment\n    if self.last_value is None:\n        self.start_time = self.last_time = time.time()\n        self.start_value = self.last_value = value\n        self.elapsed_time = self.predicted_remaining = None\n        self.first_calls = self.warmup\n        self.wait_for = 1\n        self.update_bar(value)\n    elif value <= self.last_value and (not force_update):\n        return\n    elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):\n        if self.first_calls > 0:\n            self.first_calls -= 1\n        current_time = time.time()\n        self.elapsed_time = current_time - self.start_time\n        if value > self.start_value:\n            self.average_time_per_item = self.elapsed_time / (value - self.start_value)\n        else:\n            self.average_time_per_item = None\n        if value >= self.total:\n            value = self.total\n            self.predicted_remaining = None\n            if not self.leave:\n                self.close()\n        elif self.average_time_per_item is not None:\n            self.predicted_remaining = self.average_time_per_item * (self.total - value)\n        self.update_bar(value)\n        self.last_value = value\n        self.last_time = current_time\n        if self.average_time_per_item is None or self.average_time_per_item == 0:\n            self.wait_for = 1\n        else:\n            self.wait_for = max(int(self.update_every / self.average_time_per_item), 1)", "docstring": "The main method to update the progress bar to `value`.\n\nArgs:\nvalue (`int`):\nThe value to use. Must be between 0 and `total`.\nforce_update (`bool`, *optional*, defaults to `False`):\nWhether or not to force and update of the internal state and display (by default, the bar will wait for\n`value` to reach the value it predicted corresponds to a time of more than the `update_every` attribute\nsince the last update to avoid adding boilerplate).\ncomment (`str`, *optional*):\nA comment to add on the left of the progress bar.", "source": "github-repos"}
{"code": "def entry_dict_from_list(all_slab_entries):\n    \n\n    entry_dict = {}\n\n    for entry in all_slab_entries:\n        hkl = tuple(entry.miller_index)\n        if hkl not in entry_dict.keys():\n            entry_dict[hkl] = {}\n        if entry.clean_entry:\n            clean = entry.clean_entry\n        else:\n            clean = entry\n        if clean not in entry_dict[hkl].keys():\n            entry_dict[hkl][clean] = []\n        if entry.adsorbates:\n            entry_dict[hkl][clean].append(entry)\n\n    return entry_dict", "docstring": "Converts a list of SlabEntry to an appropriate dictionary. It is\nassumed that if there is no adsorbate, then it is a clean SlabEntry\nand that adsorbed SlabEntry has the clean_entry parameter set.\n\nArgs:\nall_slab_entries (list): List of SlabEntry objects\n\nReturns:\n(dict): Dictionary of SlabEntry with the Miller index as the main\nkey to a dictionary with a clean SlabEntry as the key to a\nlist of adsorbed SlabEntry.", "source": "juraj-google-style"}
{"code": "def calc_timestep_statistic(self, statistic, time):\n        \n        ti = np.where(self.times == time)[0][0]\n        ma = np.where(self.masks[ti].ravel() == 1)\n        if statistic in ['mean', 'max', 'min', 'std', 'ptp']:\n            stat_val = getattr(self.timesteps[ti].ravel()[ma], statistic)()\n        elif statistic == 'median':\n            stat_val = np.median(self.timesteps[ti].ravel()[ma])\n        elif 'percentile' in statistic:\n            per = int(statistic.split(\"_\")[1])\n            stat_val = np.percentile(self.timesteps[ti].ravel()[ma], per)\n        elif 'dt' in statistic:\n            stat_name = statistic[:-3]\n            if ti == 0:\n                stat_val = 0\n            else:\n                stat_val = self.calc_timestep_statistic(stat_name, time) -\\\n                    self.calc_timestep_statistic(stat_name, time - 1)\n        else:\n            stat_val = np.nan\n        return stat_val", "docstring": "Calculate statistics from the primary attribute of the StObject.\n\nArgs:\nstatistic: statistic being calculated\ntime: Timestep being investigated\n\nReturns:\nValue of the statistic", "source": "juraj-google-style"}
{"code": "def remove_trunk_group(self, intf, value):\n        \n        string = 'no switchport trunk group {}'.format(value)\n        return self.configure_interface(intf, string)", "docstring": "Removes a specified trunk group to the interface\n\nArgs:\nintf (str): The interface name to remove the trunk group from\nvalue (str): The trunk group value\n\nReturns:\nTrue if the operation as successfully applied otherwise false", "source": "juraj-google-style"}
{"code": "def RegisterHasher(cls, hasher_class):\n    \n    hasher_name = hasher_class.NAME.lower()\n    if hasher_name in cls._hasher_classes:\n      raise KeyError((\n          'hasher class already set for name: {0:s}.').format(\n              hasher_class.NAME))\n\n    cls._hasher_classes[hasher_name] = hasher_class", "docstring": "Registers a hasher class.\n\nThe hasher classes are identified based on their lower case name.\n\nArgs:\nhasher_class (type): class object of the hasher.\n\nRaises:\nKeyError: if hasher class is already set for the corresponding name.", "source": "juraj-google-style"}
{"code": "def __init__(self, project, throttle_rampup=True, hint_num_workers=_Mutate._DEFAULT_HINT_NUM_WORKERS):\n    mutate_fn = DeleteFromDatastore._DatastoreDeleteFn(project)\n    super().__init__(mutate_fn, throttle_rampup, hint_num_workers)", "docstring": "Initialize the `DeleteFromDatastore` transform.\n\nArgs:\nproject: (:class:`str`) The ID of the project from which the entities will\nbe deleted.\nthrottle_rampup: Whether to enforce a gradual ramp-up.\nhint_num_workers: A hint for the expected number of workers, used to\nestimate appropriate limits during ramp-up throttling.", "source": "github-repos"}
{"code": "def requestedFormat(request, acceptedFormat):\n    if ('format' in request.args):\n        fieldFormat = request.args.get('format')\n        if (fieldFormat not in acceptedFormat):\n            raise ValueError(('requested format not supported: ' + fieldFormat))\n        return fieldFormat\n    else:\n        return request.accept_mimetypes.best_match(acceptedFormat)", "docstring": "Return the response format requested by client\n\nClient could specify requested format using:\n(options are processed in this order)\n- `format` field in http request\n- `Accept` header in http request\nExample:\nchooseFormat(request, ['text/html','application/json'])\nArgs:\nacceptedFormat: list containing all the accepted format\nReturns:\nstring: the user requested mime-type (if supported)\nRaises:\nValueError: if user request a mime-type not supported", "source": "codesearchnet"}
{"code": "def create(self, python=None, system_site=False, always_copy=False):\n    command = 'virtualenv'\n    if python:\n        command = '{0} --python={1}'.format(command, python)\n    if system_site:\n        command = '{0} --system-site-packages'.format(command)\n    if always_copy:\n        command = '{0} --always-copy'.format(command)\n    command = '{0} {1}'.format(command, self.path)\n    self._execute(command)", "docstring": "Create a new virtual environment.\n\nArgs:\npython (str): The name or path of a python interpreter to use while\ncreating the virtual environment.\nsystem_site (bool): Whether or not use use the system site packages\nwithin the virtual environment. Default is False.\nalways_copy (bool): Whether or not to force copying instead of\nsymlinking in the virtual environment. Default is False.", "source": "codesearchnet"}
{"code": "def update_metric_by_name(self, metric_name, metric_type, description=None, custom_properties=None, tags=None, **kwargs):\n    data = {'type': metric_type.upper(), 'description': (description or ''), 'customProperties': (custom_properties or {}), 'tags': (tags or [])}\n    resp = self._put(self._u(self._METRIC_ENDPOINT_SUFFIX, str(metric_name)), data=data, **kwargs)\n    resp.raise_for_status()\n    return resp.json()", "docstring": "Create or update a metric object\n\nArgs:\nmetric_name (string): name of metric\ntype (string): metric type, must be one of 'gauge', 'counter',\n'cumulative_counter'\ndescription (optional[string]): a description\ncustom_properties (optional[dict]): dictionary of custom properties\ntags (optional[list of strings]): list of tags associated with\nmetric", "source": "codesearchnet"}
{"code": "def _print_extension(self, extension: message.Message) -> None:\n    if not fhir_types.is_type_or_profile_of_extension(extension):\n        raise ValueError(f'Message of type: {extension.DESCRIPTOR.full_name} is not a FHIR Extension.')\n    if self.json_format == _FhirJsonFormat.ANALYTIC:\n        self.generator.push(f'\"{cast(Any, extension).url.value}\"')\n    else:\n        self._print_message(extension)", "docstring": "Pushes the Extension into the JSON text generator.\n\nIf the _FhirJsonFormat is set to ANALYTIC, this method only prints the url.\n\nArgs:\nextension: The Extension to print.", "source": "github-repos"}
{"code": "def oauth_required(self, method):\n\n    def check_oauth(request_handler, *args, **kwargs):\n        if self._in_error:\n            self._display_error_message(request_handler)\n            return\n        user = users.get_current_user()\n        if (not user):\n            request_handler.redirect(users.create_login_url(request_handler.request.uri))\n            return\n        self._create_flow(request_handler)\n        self.flow.params['state'] = _build_state_value(request_handler, user)\n        self.credentials = self._storage_class(self._credentials_class, None, self._credentials_property_name, user=user).get()\n        if (not self.has_credentials()):\n            return request_handler.redirect(self.authorize_url())\n        try:\n            resp = method(request_handler, *args, **kwargs)\n        except client.AccessTokenRefreshError:\n            return request_handler.redirect(self.authorize_url())\n        finally:\n            self.credentials = None\n        return resp\n    return check_oauth", "docstring": "Decorator that starts the OAuth 2.0 dance.\n\nStarts the OAuth dance for the logged in user if they haven't already\ngranted access for this application.\n\nArgs:\nmethod: callable, to be decorated method of a webapp.RequestHandler\ninstance.", "source": "codesearchnet"}
{"code": "def two_point_effective_mass(cartesian_k_points, eigenvalues):\n    assert (cartesian_k_points.shape[0] == 2)\n    assert (eigenvalues.size == 2)\n    dk = (cartesian_k_points[1] - cartesian_k_points[0])\n    mod_dk = np.sqrt(np.dot(dk, dk))\n    delta_e = (((eigenvalues[1] - eigenvalues[0]) * ev_to_hartree) * 2.0)\n    effective_mass = ((mod_dk * mod_dk) / delta_e)\n    return effective_mass", "docstring": "Calculate the effective mass given eigenvalues at two k-points.\nReimplemented from Aron Walsh's original effective mass Fortran code.\n\nArgs:\ncartesian_k_points (np.array): 2D numpy array containing the k-points in (reciprocal) Cartesian coordinates.\neigenvalues (np.array):        numpy array containing the eigenvalues at each k-point.\n\nReturns:\n(float): The effective mass", "source": "codesearchnet"}
{"code": "def is_function_pipelined(self, tf_function, *args):\n    attr_name = tpu_embedding_v3._PIPELINE_ATTRIBUTE\n    func_graph = tf_function.get_concrete_function(*args).graph\n    while_op = None\n    for op in func_graph.get_operations():\n        if op.name == 'while':\n            while_op = op\n            break\n    self.assertIsNotNone(while_op, 'while op not found')\n    body_name = while_op.get_attr('body').name\n    while_body_func = None\n    try:\n        while_body_func = func_graph.get_concrete_function(body_name)\n    except AttributeError as exc:\n        for func in while_op.graph._functions.values():\n            if func.name.decode() == body_name:\n                while_body_func = func\n                break\n        if while_body_func is None:\n            raise ValueError('body not found') from exc\n    while_body_graph = while_body_func.graph\n    attr_value = None\n    for op in while_body_graph.get_operations():\n        try:\n            attr = op.get_attr(attr_name)\n            logging.info('Op \"%s\" has pipelining attr: %s : %s', op.name, attr_name, attr)\n            attr_value = attr.decode('utf-8')\n            break\n        except ValueError:\n            pass\n    has_pipelining_attr = attr_value in [tpu_embedding_v3._PIPELINE_MODE_FORWARD, tpu_embedding_v3._PIPELINE_MODE_BACKWARD]\n    return has_pipelining_attr", "docstring": "Returns whether the tf_function is flagged for embedding pipelining.\n\nArgs:\ntf_function: a tf.function.\n*args: the arguments to the tf_function.\n\nReturns:\nWhether the tf_function is (will be) pipelined.\n\nThis helper looks for a while loop in the provided function. It then looks\nfor any op that has the pipelining attribute (e.g.,\nXlaSparseDenseMatmulWithCsrInput). The presence of the attribute indicates\nthat the function is to be pipelined during compilation.\n\nExample usge:\n\nwith summary_ops_v2.record_if(False):\nis_pipelined = self.is_function_pipelined(tpu_test_fn, tpu_iter)\nself.assertTrue(is_pipelined)\nwith summary_ops_v2.record_if(True):\nis_pipelined = self.is_function_pipelined(tpu_test_fn, tpu_iter)\nself.assertFalse(is_pipelined)", "source": "github-repos"}
{"code": "def _pyval_field_major_to_node_major(keys, values, depth):\n    assert keys\n    if depth == 0:\n        return dict(zip(keys, values))\n    nvals = len(values[0])\n    assert all((nvals == len(values[i]) for i in range(1, len(values))))\n    return [_pyval_field_major_to_node_major(keys, value_slice, depth - 1) for value_slice in zip(*values)]", "docstring": "Regroup each field (k, v) from dict-of-list to list-of-dict.\n\nGiven a \"field-major\" encoding of the StructuredTensor (which maps each key to\na single nested list containing the values for all structs), return a\ncorresponding \"node-major\" encoding, consisting of a nested list of dicts.\n\nArgs:\nkeys: The field names (list of string).  Must not be empty.\nvalues: The field values (list of python values).  Must have the same length\nas `keys`.\ndepth: The list depth at which dictionaries should be created.\n\nReturns:\nA nested list of dict, with depth `depth`.", "source": "github-repos"}
{"code": "def __init__(self, problems, schedule, **kwargs):\n    \n    super(MultiProblemV2, self).__init__(**kwargs)\n    self.problems = problems\n    self.schedule = schedule", "docstring": "Creates a MultiProblem object.\n\nArgs:\nproblems: A list of problem.Problem objects.\nschedule: A schedule tuple, see encode_schedule for details.\n**kwargs: Keywords for problem.Problem.__init__.", "source": "juraj-google-style"}
{"code": "def videos(self, **kwargs):\n        \n        path = self._get_series_id_season_number_path('videos')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the videos that have been added to a TV season (trailers, teasers,\netc...).\n\nArgs:\nlanguage: (optional) ISO 639 code.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def init_cache(self, batch_size, max_length):\n    input_ids = jnp.ones((batch_size, max_length))\n    attention_mask = jnp.ones_like(input_ids)\n    position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)\n    init_variables = self.module.init(jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True)\n    return init_variables['cache']", "docstring": "Args:\nbatch_size (`int`):\nbatch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.\nmax_length (`int`):\nmaximum possible length for auto-regressive decoding. Defines the sequence length of the initialized\ncache.", "source": "github-repos"}
{"code": "def get_if_deleted(self, addresses):\n    with self._lock:\n        results = []\n        for add in addresses:\n            results.append(self._get_if_deleted(add))\n        return results", "docstring": "Returns a list of addresses that have been deleted, or None if it\nhasn't been deleted.\n\nArgs:\naddresses (list of str): The addresses to check if deleted.\n\nReturns:\n(list of str): The addresses, if deleted, or None.", "source": "codesearchnet"}
{"code": "def _pad_image(self, images: 'torch.tensor', size_divisibility: int=32) -> 'torch.tensor':\n    height, width = get_image_size(images, channel_dim=ChannelDimension.FIRST)\n    pad_height = 0 if height % size_divisibility == 0 else size_divisibility - height % size_divisibility\n    pad_width = 0 if width % size_divisibility == 0 else size_divisibility - width % size_divisibility\n    if pad_width + pad_height > 0:\n        padding = (0, 0, pad_width, pad_height)\n        images = F.pad(images, padding)\n    return images", "docstring": "Pads an image or batched images constantly so that width and height are divisible by size_divisibility\n\nArgs:\nimage (`torch,tensor`):\nImage to pad.\nsize_divisibility (`int`, *optional*, defaults to 32):\nThe width and height of the image will be padded to be divisible by this number.", "source": "github-repos"}
{"code": "def __init__(self, parent):\n        \n\n        super(ChatFrame, self).__init__(parent, padding=8, text=\"Chat\")\n\n        self.channel = tk.StringVar()\n        self.message = tk.StringVar()\n\n        self.channel_frame = ttk.Frame(self)\n        self.channel_frame.grid(column=0, row=0, sticky=\"W E\")\n        self.channel_label = ttk.Label(self.channel_frame, text=\"Channel ID:\")\n        self.channel_label.grid(column=0, row=0, sticky=\"W E\")\n        self.channel_box = ttk.Entry(self.channel_frame, textvariable=self.channel)\n        self.channel_box.grid(column=0, row=1, sticky=\"W E\")\n        self.channel_frame.columnconfigure(0, weight=1)\n\n        self.message_frame = ttk.Frame(self)\n        self.message_frame.grid(column=0, row=1, pady=8, sticky=\"W E\")\n        self.message_label = ttk.Label(self.message_frame, text=\"Message:\")\n        self.message_label.grid(column=0, row=0, sticky=\"W E\")\n        self.message_box = ttk.Entry(self.message_frame, textvariable=self.message)\n        self.message_box.grid(column=0, row=1, sticky=\"W E\")\n        self.message_frame.columnconfigure(0, weight=1)\n\n        self.send_button = ttk.Button(self, command=lambda: self.add_current_message(), text=\"Send\")\n        self.send_button.grid(column=0, row=2, sticky=\"W\")\n\n        self.columnconfigure(0, weight=1)", "docstring": "Send messages from the bot\n\nArgs:\nparent:", "source": "juraj-google-style"}
{"code": "def get_process_exit_code(self, task_type, task_id):\n    with self._process_lock:\n        p = self._processes[task_type, task_id]\n    return p.exitcode if p else None", "docstring": "Returns the subprocess exit code given the task type and task id.\n\nArgs:\ntask_type: The task type.\ntask_id: The task id.\n\nReturns:\nThe subprocess exit code; `None` if the subprocess has not exited yet.\n\nRaises:\nKeyError: If the corresponding subprocess is not found with `task_type`\nand `task_id`.", "source": "github-repos"}
{"code": "def normalize_tuple(value, n, name):\n    if isinstance(value, int):\n        return (value,) * n\n    else:\n        try:\n            value_tuple = tuple(value)\n        except TypeError:\n            raise ValueError(f'Argument `{name}` must be a tuple of {str(n)} integers. Received: {str(value)}')\n        if len(value_tuple) != n:\n            raise ValueError(f'Argument `{name}` must be a tuple of {str(n)} integers. Received: {str(value)}')\n        for single_value in value_tuple:\n            try:\n                int(single_value)\n            except (ValueError, TypeError):\n                raise ValueError(f'Argument `{name}` must be a tuple of {str(n)} integers. Received: {str(value)} including element {str(single_value)} of type {str(type(single_value))}')\n        return value_tuple", "docstring": "Transforms a single integer or iterable of integers into an integer tuple.\n\nArgs:\nvalue: The value to validate and convert. Could an int, or any iterable\nof ints.\nn: The size of the tuple to be returned.\nname: The name of the argument being validated, e.g. \"strides\" or\n\"kernel_size\". This is only used to format error messages.\n\nReturns:\nA tuple of n integers.\n\nRaises:\nValueError: If something else than an int/long or iterable thereof was\npassed.", "source": "github-repos"}
{"code": "def __init__(self, examples):\n    \n    self.config = {}\n    self.set_examples(examples)\n    self.set_model_type('classification')\n    self.set_label_vocab([])", "docstring": "Constructs the WitConfigBuilder object.\n\nArgs:\nexamples: A list of tf.Example or tf.SequenceExample proto objects.\nThese are the examples that will be displayed in WIT. If not model to\ninfer these examples with is specified through the methods on this class,\nthen WIT will display the examples for exploration, but no model inference\nwill be performed by the tool.", "source": "juraj-google-style"}
{"code": "def apply_scissor(self, new_band_gap):\n        \n        if self.is_metal():\n            \n            \n            max_index = -1000\n            \n            for i in range(self.nb_bands):\n                below = False\n                above = False\n                for j in range(len(self.kpoints)):\n                    if self.bands[Spin.up][i][j] < self.efermi:\n                        below = True\n                    if self.bands[Spin.up][i][j] > self.efermi:\n                        above = True\n                if above and below:\n                    if i > max_index:\n                        max_index = i\n                        \n                if self.is_spin_polarized:\n                    below = False\n                    above = False\n                    for j in range(len(self.kpoints)):\n                        if self.bands[Spin.down][i][j] < self.efermi:\n                            below = True\n                        if self.bands[Spin.down][i][j] > self.efermi:\n                            above = True\n                    if above and below:\n                        if i > max_index:\n                            max_index = i\n                            \n            old_dict = self.as_dict()\n            shift = new_band_gap\n            for spin in old_dict['bands']:\n                for k in range(len(old_dict['bands'][spin])):\n                    for v in range(len(old_dict['bands'][spin][k])):\n                        if k >= max_index:\n                            old_dict['bands'][spin][k][v] = \\\n                                old_dict['bands'][spin][k][v] + shift\n        else:\n\n            shift = new_band_gap - self.get_band_gap()['energy']\n            old_dict = self.as_dict()\n            for spin in old_dict['bands']:\n                for k in range(len(old_dict['bands'][spin])):\n                    for v in range(len(old_dict['bands'][spin][k])):\n                        if old_dict['bands'][spin][k][v] >= \\\n                                old_dict['cbm']['energy']:\n                            old_dict['bands'][spin][k][v] = \\\n                                old_dict['bands'][spin][k][v] + shift\n            old_dict['efermi'] = old_dict['efermi'] + shift\n\n            return LobsterBandStructureSymmLine.from_dict(old_dict)", "docstring": "Apply a scissor operator (shift of the CBM) to fit the given band gap.\nIf it's a metal. We look for the band crossing the fermi level\nand shift this one up. This will not work all the time for metals!\n\nArgs:\nnew_band_gap: the band gap the scissor band structure need to have.\n\nReturns:\na BandStructureSymmLine object with the applied scissor shift", "source": "juraj-google-style"}
{"code": "def SetConfiguredUsers(self, users):\n    prefix = (self.logger.name + '-')\n    with tempfile.NamedTemporaryFile(mode='w', prefix=prefix, delete=True) as updated_users:\n        updated_users_file = updated_users.name\n        for user in users:\n            updated_users.write((user + '\\n'))\n        updated_users.flush()\n        if (not os.path.exists(self.google_users_dir)):\n            os.makedirs(self.google_users_dir)\n        shutil.copy(updated_users_file, self.google_users_file)\n    file_utils.SetPermissions(self.google_users_file, mode=384, uid=0, gid=0)", "docstring": "Set the list of configured Google user accounts.\n\nArgs:\nusers: list, the username strings of the Linux accounts.", "source": "codesearchnet"}
{"code": "def get_model():\n    if (not hasattr(g, 'model')):\n        g.model = load_model(current_app.config['MODEL_CLS_PATH'], current_app.config['MODEL_CLS_NAME'], current_app.config['MODEL_LOAD_ARGS'])\n    return g.model", "docstring": "Get the NN model that's being analyzed from the request context.  Put\nthe model in the request context if it is not yet there.\n\nReturns:\ninstance of :class:`.models.model.Model` or derived\nclass", "source": "codesearchnet"}
{"code": "def price(self, market: pmd.ProcessedMarketData, name: Optional[str]=None):\n    name = name or self._name + '_price'\n    with tf.name_scope(name):\n        discount_curve = get_discount_curve(self._discount_curve_type, market, self._mask)\n        discount_factors = discount_curve.discount_factor(self._payment_dates)\n        _, cashflows = self.cashflows(market)\n        cashflow_pvs = cashflows * discount_factors\n        return tf.math.reduce_sum(cashflow_pvs, axis=1)", "docstring": "Returns the present value of the stream on the valuation date.\n\nArgs:\nmarket: An instance of `ProcessedMarketData`.\nname: Python str. The name to give to the ops created by this function.\nDefault value: `None` which maps to 'price'.\n\nReturns:\nA `Tensor` of shape `batch_shape`  containing the modeled price of each\nstream based on the input market data.", "source": "github-repos"}
{"code": "def get_servo_torque(self):\n        \n        data = []\n        data.append(0x09)\n        data.append(self.servoid)\n        data.append(RAM_READ_REQ)\n        data.append(PWM_RAM)\n        data.append(BYTE2)\n        send_data(data)\n        rxdata = []\n        try:\n            rxdata = SERPORT.read(13)\n            if ord(rxdata[10])<=127:\n                return ((ord(rxdata[10])&0x03)<<8) | (ord(rxdata[9])&0xFF)\n            else:\n                return (ord(rxdata[10])-0xFF)*0xFF + (ord(rxdata[9])&0xFF)-0xFF\n        except HerkulexError:\n            raise HerkulexError(\"could not communicate with motors\")", "docstring": "Gets the current torque of Herkulex\n\nGives the current load on the servo shaft.\nIt is actually the PWM value to the motors\n\nArgs:\nnone\n\nReturns:\nint: the torque on servo shaft. range from -1023 to 1023\n\nRaises:\nSerialException: Error occured while opening serial port", "source": "juraj-google-style"}
{"code": "def _handle_client_exception():\n    try:\n        (yield)\n    except _ClientException as exception:\n        if (exception.http_status in _ERROR_CODES):\n            raise _ERROR_CODES[exception.http_status](exception.http_reason)\n        raise", "docstring": "Handle Swift exception and convert to class\nIO exceptions\n\nRaises:\nOSError subclasses: IO error.", "source": "codesearchnet"}
{"code": "def replace_species(self, species_mapping):\n    species_mapping = {get_el_sp(k): v for (k, v) in species_mapping.items()}\n    sp_to_replace = set(species_mapping.keys())\n    sp_in_structure = set(self.composition.keys())\n    if (not sp_in_structure.issuperset(sp_to_replace)):\n        warnings.warn(('Some species to be substituted are not present in structure. Pls check your input. Species to be substituted = %s; Species in structure = %s' % (sp_to_replace, sp_in_structure)))\n    for site in self._sites:\n        if sp_to_replace.intersection(site.species):\n            c = Composition()\n            for (sp, amt) in site.species.items():\n                new_sp = species_mapping.get(sp, sp)\n                try:\n                    c += (Composition(new_sp) * amt)\n                except Exception:\n                    c += {new_sp: amt}\n            site.species = c", "docstring": "Swap species.\n\nArgs:\nspecies_mapping (dict): dict of species to swap. Species can be\nelements too. E.g., {Element(\"Li\"): Element(\"Na\")} performs\na Li for Na substitution. The second species can be a\nsp_and_occu dict. For example, a site with 0.5 Si that is\npassed the mapping {Element('Si): {Element('Ge'):0.75,\nElement('C'):0.25} } will have .375 Ge and .125 C.", "source": "codesearchnet"}
{"code": "def _VerifyValues(self, image, ksizes, strides, padding, patches):\n    ksizes = [1] + ksizes + [1]\n    strides = [1] + strides + [1]\n    for dtype in [np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype]:\n        out_tensor = array_ops.extract_volume_patches(constant_op.constant(image.astype(dtype)), ksizes=ksizes, strides=strides, padding=padding, name='im2col_3d')\n        self.assertAllClose(patches.astype(dtype), self.evaluate(out_tensor))", "docstring": "Tests input-output pairs for the ExtractVolumePatches op.\n\nArgs:\nimage: Input tensor with shape:\n[batch, in_planes, in_rows, in_cols, depth].\nksizes: Patch size specified as: [ksize_planes, ksize_rows, ksize_cols].\nstrides: Output strides, specified as:\n[stride_planes, stride_rows, stride_cols].\npadding: Padding type.\npatches: Expected output.\n\nNote:\nrates are not supported as of now.", "source": "github-repos"}
{"code": "def __init__(self, window_size=zlib.MAX_WBITS):\n    \n    super(ZlibDecompressor, self).__init__()\n    self._zlib_decompressor = zlib.decompressobj(window_size)", "docstring": "Initializes a decompressor.\n\nArgs:\nwindow_size (Optional[int]): base two logarithm of the size of\nthe compression history buffer (aka window size). When the value\nis negative, the standard zlib data header is suppressed.", "source": "juraj-google-style"}
{"code": "def load_data(path, verbose=False, raise_errors = False):\n        \n\n\n        \n        if not os.path.exists(path):\n            if raise_errors:\n                raise AttributeError('Path given does not exist!')\n            else:\n                print('Path given does not exist!')\n                return\n\n        \n        \n        \n        path = Script.check_filename(path)\n\n        if verbose:\n            print('script path', path)\n\n\n        \n        \n        data = {}\n        \n        \n        \n        \n        \n        if 'raw_data' in os.listdir(path):  \n\n            if verbose:\n                print('raw_data subfolder found')\n            data_files = os.listdir(os.path.join(path, 'raw_data' + '/'))\n            path = os.path.join(path, 'raw_data' + '/')\n\n        else:\n            data_files = glob.glob(os.path.join(path, '*.csv'))\n\n        if verbose:\n            print('data_files found', data_files)\n\n        \n        if not data_files:\n\n            if raise_errors:\n                raise AttributeError('Could not find data files in {:s}'.format(path))\n            else:\n                print('Could not find data files in {:s}'.format(path))\n                return\n\n        \n        for data_file in data_files:\n            \n            data_name = data_file.split('-')[-1][0:-4] \n\n            try:\n                imported_data_df = pd.read_csv(os.path.join(path, data_file))\n\n                \n                \n                column_headers = list(imported_data_df.columns.values)\n                if sum([int(x.isdigit()) for x in column_headers]) != len(column_headers):\n                    data[data_name] = {h: imported_data_df[h].values for h in column_headers}\n                else:\n                    \n                    data[data_name] = np.squeeze(imported_data_df.values)\n            except pd.errors.EmptyDataError as err:\n\n                if raise_errors:\n                    raise err('data file ' + data_file + ' is empty: did not load!')\n                else:\n                    print('data file ' + data_file + ' is empty: did not load!')\n\n\n        return data", "docstring": "loads the data that has been save with Script.save.\nArgs:\npath: path to folder saved by Script.save or raw_data folder within\nverbose: if true print additional information\nraise_errors: if true raise errors if false just print to std out\nReturns:\na dictionary with the data of form\ndata = {param_1_name: param_1_data, ...}", "source": "juraj-google-style"}
{"code": "def received_response(self, value):\n        \n        if value == self._defaults['receivedResponse'] and 'receivedResponse' in self._values:\n            del self._values['receivedResponse']\n        else:\n            self._values['receivedResponse'] = value", "docstring": "The received_response property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def write_fasta_file_from_dict(indict, outname, outdir=None, outext='.faa', force_rerun=False):\n    \n\n    if not outdir:\n        outdir = ''\n    outfile = ssbio.utils.outfile_maker(inname='', outname=outname, outdir=outdir, outext=outext)\n\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n        seqs = []\n        for i, s in indict.items():\n            seq = ssbio.protein.sequence.utils.cast_to_seq_record(s, id=i)\n            seqs.append(seq)\n        SeqIO.write(seqs, outfile, \"fasta\")\n\n    return outfile", "docstring": "Write a FASTA file for a dictionary of IDs and their sequence strings.\n\nArgs:\nindict: Input dictionary with keys as IDs and values as sequence strings\noutname: Name of the output file which will have outext appended to it\noutdir: Path to directory to output sequences to\noutext: Extension of FASTA file, default \".faa\"\nforce_rerun: If file should be overwritten if it exists\n\nReturns:\nstr: Path to output FASTA file.", "source": "juraj-google-style"}
{"code": "def write_summaries(self, tagged_data, experiment_name, run_name):\n    \n    logger.debug('Writing summaries for %s tags', len(tagged_data))\n    \n    \n    \n    \n    with self._db:\n      self._db.execute('BEGIN TRANSACTION')\n      run_id = self._maybe_init_run(experiment_name, run_name)\n      tag_to_metadata = {\n          tag: tagdata.metadata for tag, tagdata in six.iteritems(tagged_data)\n      }\n      tag_to_id = self._maybe_init_tags(run_id, tag_to_metadata)\n      tensor_values = []\n      for tag, tagdata in six.iteritems(tagged_data):\n        tag_id = tag_to_id[tag]\n        for step, wall_time, tensor_proto in tagdata.values:\n          dtype = tensor_proto.dtype\n          shape = ','.join(str(d.size) for d in tensor_proto.tensor_shape.dim)\n          \n          \n          data = self._make_blob(\n              tensor_proto.tensor_content or\n              tensor_util.make_ndarray(tensor_proto).tobytes())\n          tensor_values.append((tag_id, step, wall_time, dtype, shape, data))\n      self._db.executemany(\n          ,\n          tensor_values)", "docstring": "Transactionally writes the given tagged summary data to the DB.\n\nArgs:\ntagged_data: map from tag to TagData instances.\nexperiment_name: name of experiment.\nrun_name: name of run.", "source": "juraj-google-style"}
{"code": "def find_all(container):\n    \n\n    if isinstance(container, dict):\n        names = container.keys()\n    else:\n        names = dir(container)\n\n    built_context = BasicContext()\n\n    for name in names:\n        \n        if name.startswith('_'):\n            continue\n\n        if isinstance(container, dict):\n            obj = container[name]\n        else:\n            obj = getattr(container, name)\n\n        \n        \n        \n\n        \n        if isinstance(container, dict) and isinstance(obj, str):\n            built_context[name] = obj\n        elif hasattr(obj, 'metadata') and isinstance(getattr(obj, 'metadata'), AnnotatedMetadata):\n            built_context[name] = obj\n\n    return built_context", "docstring": "Find all annotated function inside of a container.\n\nAnnotated functions are identified as those that:\n- do not start with a _ character\n- are either annotated with metadata\n- or strings that point to lazily loaded modules\n\nArgs:\ncontainer (object): The container to search for annotated functions.\n\nReturns:\ndict: A dict with all of the found functions in it.", "source": "juraj-google-style"}
{"code": "class MCTCTProcessor(ProcessorMixin):\n    feature_extractor_class = 'MCTCTFeatureExtractor'\n    tokenizer_class = 'AutoTokenizer'\n\n    def __init__(self, feature_extractor, tokenizer):\n        super().__init__(feature_extractor, tokenizer)\n        self.current_processor = self.feature_extractor\n        self._in_target_context_manager = False\n\n    def __call__(self, *args, **kwargs):\n        \n        if self._in_target_context_manager:\n            return self.current_processor(*args, **kwargs)\n        if 'raw_speech' in kwargs:\n            warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')\n            audio = kwargs.pop('raw_speech')\n        else:\n            audio = kwargs.pop('audio', None)\n        sampling_rate = kwargs.pop('sampling_rate', None)\n        text = kwargs.pop('text', None)\n        if len(args) > 0:\n            audio = args[0]\n            args = args[1:]\n        if audio is None and text is None:\n            raise ValueError('You need to specify either an `audio` or `text` input to process.')\n        if audio is not None:\n            inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)\n        if text is not None:\n            encodings = self.tokenizer(text, **kwargs)\n        if text is None:\n            return inputs\n        elif audio is None:\n            return encodings\n        else:\n            inputs['labels'] = encodings['input_ids']\n            return inputs\n\n    def batch_decode(self, *args, **kwargs):\n        \n        return self.tokenizer.batch_decode(*args, **kwargs)\n\n    def pad(self, *args, **kwargs):\n        \n        if self._in_target_context_manager:\n            return self.current_processor.pad(*args, **kwargs)\n        input_features = kwargs.pop('input_features', None)\n        labels = kwargs.pop('labels', None)\n        if len(args) > 0:\n            input_features = args[0]\n            args = args[1:]\n        if input_features is not None:\n            input_features = self.feature_extractor.pad(input_features, *args, **kwargs)\n        if labels is not None:\n            labels = self.tokenizer.pad(labels, **kwargs)\n        if labels is None:\n            return input_features\n        elif input_features is None:\n            return labels\n        else:\n            input_features['labels'] = labels['input_ids']\n            return input_features\n\n    def decode(self, *args, **kwargs):\n        \n        return self.tokenizer.decode(*args, **kwargs)\n\n    @contextmanager\n    def as_target_processor(self):\n        \n        warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.')\n        self._in_target_context_manager = True\n        self.current_processor = self.tokenizer\n        yield\n        self.current_processor = self.feature_extractor\n        self._in_target_context_manager = False", "docstring": "Constructs a MCTCT processor which wraps a MCTCT feature extractor and a MCTCT tokenizer into a single processor.\n\n[`MCTCTProcessor`] offers all the functionalities of [`MCTCTFeatureExtractor`] and [`AutoTokenizer`]. See the\n[`~MCTCTProcessor.__call__`] and [`~MCTCTProcessor.decode`] for more information.\n\nArgs:\nfeature_extractor (`MCTCTFeatureExtractor`):\nAn instance of [`MCTCTFeatureExtractor`]. The feature extractor is a required input.\ntokenizer (`AutoTokenizer`):\nAn instance of [`AutoTokenizer`]. The tokenizer is a required input.", "source": "github-repos"}
{"code": "def run(self, dag):\n    for node in dag.op_nodes():\n        basic_insts = ['measure', 'reset', 'barrier', 'snapshot']\n        if (node.name in basic_insts):\n            continue\n        if (node.name in self.basis):\n            continue\n        rule = node.op.definition\n        if (not rule):\n            raise QiskitError(('Cannot unroll the circuit to the given basis, %s. No rule to expand instruction %s.' % (str(self.basis), node.op.name)))\n        decomposition = DAGCircuit()\n        decomposition.add_qreg(rule[0][1][0][0])\n        for inst in rule:\n            decomposition.apply_operation_back(*inst)\n        unrolled_dag = self.run(decomposition)\n        dag.substitute_node_with_dag(node, unrolled_dag)\n    return dag", "docstring": "Expand all op nodes to the given basis.\n\nArgs:\ndag(DAGCircuit): input dag\n\nRaises:\nQiskitError: if unable to unroll given the basis due to undefined\ndecomposition rules (such as a bad basis) or excessive recursion.\n\nReturns:\nDAGCircuit: output unrolled dag", "source": "codesearchnet"}
{"code": "def extract_build_info(exe_path, elf_section=ELF_SECTION):\n    build_info = {}\n    with mkdtemp() as tempd, pushd(tempd):\n        proc = subprocess.Popen([OBJCOPY, DUMP_SECTION, '{secn}={ofile}'.format(secn=elf_section, ofile=BUILDINFO_FILE), exe_path], stderr=subprocess.PIPE)\n        proc.wait()\n        errno = proc.returncode\n        stderr = proc.stderr.read()\n        if (errno or len(stderr)):\n            LOGGER.warning('objcopy failed with errno %s.', errno)\n            if len(stderr):\n                LOGGER.warning('objcopy failed with following msg:\\n%s', stderr)\n            return build_info\n        with open(BUILDINFO_FILE) as build_info_f:\n            try:\n                build_info = json.load(build_info_f, object_hook=byteify)\n            except JSONDcdError as jsde:\n                LOGGER.warning('benchmark executable build is not valid json:')\n                LOGGER.warning(jsde.msg)\n                LOGGER.warning('build info section content:')\n                LOGGER.warning(jsde.doc)\n    return build_info", "docstring": "Extracts the build information from a given executable.\n\nThe build information is expected to be in json format, which is parsed\nand returned as a dictionary.\nIf no build information is found an empty dictionary is returned.\n\nThis assumes binutils 2.25 to work.\n\nArgs:\nexe_path (str): The full path to the executable to be examined\n\nReturns:\ndict: A dictionary of the extracted information.", "source": "codesearchnet"}
{"code": "def rsqrt(x):\n    if any_symbolic_tensors((x,)):\n        return Rsqrt().symbolic_call(x)\n    x = backend.convert_to_tensor(x)\n    return backend.math.rsqrt(x)", "docstring": "Computes reciprocal of square root of x element-wise.\n\nArgs:\nx: input tensor\n\nReturns:\nA tensor with the same dtype as `x`.\n\nExample:\n\n>>> x = keras.ops.convert_to_tensor([1.0, 10.0, 100.0])\n>>> keras.ops.rsqrt(x)\narray([1.0, 0.31622776, 0.1], dtype=float32)", "source": "github-repos"}
{"code": "def async_decorator(func):\n\n    @functools.wraps(func)\n    def async_wrapper(*args, **kwargs):\n        'Wraps up the call to ``func``, so that it is called from a separate\\n        thread.\\n\\n        The callback, if given, will be called with two parameters,\\n        ``exception`` and ``result`` as ``callback(exception, result)``.  If\\n        the thread ran to completion without error, ``exception`` will be\\n        ``None``, otherwise ``exception`` will be the generated exception that\\n        stopped the thread.  Result is the result of the exected function.\\n\\n        Args:\\n          callback (function): the callback to ultimately be called\\n          args: list of arguments to pass to ``func``\\n          kwargs: key-word arguments dictionary to pass to ``func``\\n\\n        Returns:\\n          A thread if the call is asynchronous, otherwise the the return value\\n          of the wrapped function.\\n\\n        Raises:\\n          TypeError: if ``callback`` is not callable or is missing\\n        '\n        if (('callback' not in kwargs) or (not kwargs['callback'])):\n            return func(*args, **kwargs)\n        callback = kwargs.pop('callback')\n        if (not callable(callback)):\n            raise TypeError(\"Expected 'callback' is not callable.\")\n\n        def thread_func(*args, **kwargs):\n            'Thread function on which the given ``func`` and ``callback``\\n            are executed.\\n\\n            Args:\\n              args: list of arguments to pass to ``func``\\n              kwargs: key-word arguments dictionary to pass to ``func``\\n\\n            Returns:\\n              Return value of the wrapped function.\\n            '\n            (exception, res) = (None, None)\n            try:\n                res = func(*args, **kwargs)\n            except Exception as e:\n                exception = e\n            return callback(exception, res)\n        thread = threads.ThreadReturn(target=thread_func, args=args, kwargs=kwargs)\n        thread.daemon = True\n        thread.start()\n        return thread\n    return async_wrapper", "docstring": "Asynchronous function decorator.  Interprets the function as being\nasynchronous, so returns a function that will handle calling the\nFunction asynchronously.\n\nArgs:\nfunc (function): function to be called asynchronously\n\nReturns:\nThe wrapped function.\n\nRaises:\nAttributeError: if ``func`` is not callable", "source": "codesearchnet"}
{"code": "def get_python_executable(version: tuple[int, ...]) -> list[str] | None:\n    if can_compile_bytecode_natively(version):\n        return None\n    for exe in _get_python_exes(version):\n        exe_version = _get_python_exe_version(exe)\n        if exe_version == version:\n            return exe\n    raise PythonNotFoundError()", "docstring": "Get a python executable corresponding to version.\n\nArgs:\nversion: The required python version\n\nReturns:\n- None: The current host interpreter can compile `version`\n- [path-to-exe, args]: A valid python-`version` interpreter\n\nRaises:\nPythonNotFoundError: if no suitable interpreter is found.", "source": "github-repos"}
{"code": "def connect():\n    upnp = miniupnpc.UPnP()\n    upnp.discoverdelay = 200\n    providers = upnp.discover()\n    if (providers > 1):\n        log.debug('multiple upnp providers found', num_providers=providers)\n    elif (providers < 1):\n        log.error('no upnp providers found')\n        return None\n    try:\n        location = upnp.selectigd()\n        log.debug('connected', upnp=upnp)\n    except Exception as e:\n        log.error('Error when connecting to uPnP provider', exception_info=e)\n        return None\n    if (not valid_mappable_ipv4(upnp.lanaddr)):\n        log.error('could not query your lanaddr', reported=upnp.lanaddr)\n        return None\n    try:\n        if (not valid_mappable_ipv4(upnp.externalipaddress())):\n            log.error('could not query your externalipaddress', reported=upnp.externalipaddress())\n            return None\n        return (upnp, location)\n    except Exception:\n        log.error('error when connecting with uPnP provider', location=location)\n        return None", "docstring": "Try to connect to the router.\n\nReturns:\nu (miniupnc.UPnP): the connected upnp-instance\nrouter (string): the connection information", "source": "codesearchnet"}
{"code": "def _split_heads(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n    batch_size, seq_length, three_times_hidden_size = fused_qkv.shape\n    fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim)\n    return (fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :])", "docstring": "Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory\nstorage as `fused_qkv`\n\nArgs:\nfused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim]\n\nReturns:\nquery: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim]\nvalue: [batch_size, seq_length, num_heads, head_dim]", "source": "github-repos"}
{"code": "def indent(text, n=2, ch=' '):\n    padding = (ch * n)\n    return '\\n'.join(((padding + line) for line in text.split('\\n')))", "docstring": "Indent all the lines in a given block of text by a specified amount.\n\nArgs:\ntext (str) :\nThe text to indent\n\nn (int, optional) :\nThe amount to indent each line by (default: 2)\n\nch (char, optional) :\nWhat character to fill the indentation with (default: \" \")", "source": "codesearchnet"}
{"code": "def index_in_block(self, channel_index: int) -> int:\n    if ((channel_index < 0) or (channel_index >= self.cdim)):\n        raise ValueError()\n    struct = self.block_structure\n    if (len(struct) == 1):\n        return (channel_index, 0)\n    i = 1\n    while ((sum(struct[:i]) <= channel_index) and (i < self.cdim)):\n        i += 1\n    block_index = (i - 1)\n    index_in_block = (channel_index - sum(struct[:block_index]))\n    return (index_in_block, block_index)", "docstring": "Return the index a channel has within the subblock it belongs to\n\nI.e., only for reducible circuits, this gives a result different from\nthe argument itself.\n\nArgs:\nchannel_index (int): The index of the external channel\n\nRaises:\nValueError: for an invalid `channel_index`", "source": "codesearchnet"}
{"code": "def decode_response(data):\n    \n    res = CaseInsensitiveDict()\n    for dataline in data.decode('utf-8').splitlines()[1:]:\n        dataline = dataline.strip()\n        if not dataline:\n            continue\n        line_parts = dataline.split(':', 1)\n        \n        if len(line_parts) < 2:\n            line_parts = (line_parts[0], '')\n        res[line_parts[0].strip()] = line_parts[1].strip()\n    return res", "docstring": "Decodes the data from a SSDP response.\n\nArgs:\ndata (bytes): The encoded response.\n\nReturns:\ndict of string -> string: Case-insensitive dictionary of header name to\nheader value pairs extracted from the response.", "source": "juraj-google-style"}
{"code": "def leapfrog_step(leapfrog_step_state: LeapFrogStepState,\n                  step_size: FloatTensor, target_log_prob_fn: PotentialFn,\n                  kinetic_energy_fn: PotentialFn\n                 ) -> Tuple[LeapFrogStepState, LeapFrogStepExtras]:\n  \n  state = leapfrog_step_state.state\n  state_grads = leapfrog_step_state.state_grads\n  momentum = leapfrog_step_state.momentum\n  step_size = maybe_broadcast_structure(step_size, state)\n\n  state = tf.nest.map_structure(tf.convert_to_tensor, state)\n  momentum = tf.nest.map_structure(tf.convert_to_tensor, momentum)\n  state = tf.nest.map_structure(tf.convert_to_tensor, state)\n\n  if state_grads is None:\n    _, _, state_grads = call_and_grads(target_log_prob_fn, state)\n  else:\n    state_grads = tf.nest.map_structure(tf.convert_to_tensor, state_grads)\n\n  momentum = tf.nest.map_structure(lambda m, sg, s: m + 0.5 * sg * s, momentum,\n                                   state_grads, step_size)\n\n  kinetic_energy, kinetic_energy_extra, momentum_grads = call_and_grads(\n      kinetic_energy_fn, momentum)\n\n  state = tf.nest.map_structure(lambda x, mg, s: x + mg * s, state,\n                                momentum_grads, step_size)\n\n  target_log_prob, state_extra, state_grads = call_and_grads(\n      target_log_prob_fn, state)\n\n  momentum = tf.nest.map_structure(lambda m, sg, s: m + 0.5 * sg * s, momentum,\n                                   state_grads, step_size)\n\n  return LeapFrogStepState(state, state_grads, momentum), LeapFrogStepExtras(\n      target_log_prob, state_extra, kinetic_energy, kinetic_energy_extra)", "docstring": "Leapfrog `TransitionOperator`.\n\nArgs:\nleapfrog_step_state: LeapFrogStepState.\nstep_size: Step size, structure broadcastable to the `target_log_prob_fn`\nstate.\ntarget_log_prob_fn: Target log prob fn.\nkinetic_energy_fn: Kinetic energy fn.\n\nReturns:\nleapfrog_step_state: LeapFrogStepState.\nleapfrog_step_extras: LeapFrogStepExtras.", "source": "juraj-google-style"}
{"code": "def after(self):\n    d = Deferred()\n    self._after_deferreds.append(d)\n    return d.chain", "docstring": "Return a deferred that will fire after the request is finished.\n\nReturns:\n\nDeferred: a new deferred that will fire appropriately", "source": "codesearchnet"}
{"code": "def val_to_mrc(code, val):\n    code = str(code)\n    if (len(code) < 3):\n        code += ((3 - len(code)) * ' ')\n    return ('%s   L %s' % (code, val))", "docstring": "Convert one single `val` to MRC.\n\nThis function may be used for control fields in MARC records.\n\nArgs:,\ncode (str): Code of the field.\nval (str): Value of the field.\n\nReturns:\nstr: Correctly padded MRC line with field.", "source": "codesearchnet"}
{"code": "def marshal_json(\n    obj,\n    types=JSON_TYPES,\n    fields=None,\n):\n    \n    return marshal_dict(\n        obj,\n        types,\n        fields=fields,\n    )", "docstring": "Recursively marshal a Python object to a JSON-compatible dict\nthat can be passed to json.{dump,dumps}, a web client,\nor a web server, etc...\n\nArgs:\nobj:    object, It's members can be nested Python\nobjects which will be converted to dictionaries\ntypes:  tuple-of-types, The JSON primitive types, typically\nyou would not change this\nfields: None-list-of-str, Explicitly marshal only these fields\nReturns:\ndict", "source": "juraj-google-style"}
{"code": "def _render_timestep(self,\n            t: int,\n            s: Fluents, a: Fluents, f: Fluents,\n            r: np.float32) -> None:\n        \n        print(\"============================\")\n        print(\"TIME = {}\".format(t))\n        print(\"============================\")\n        fluent_variables = self._compiler.rddl.action_fluent_variables\n        self._render_fluent_timestep('action', a, fluent_variables)\n        fluent_variables = self._compiler.rddl.interm_fluent_variables\n        self._render_fluent_timestep('interms', f, fluent_variables)\n        fluent_variables = self._compiler.rddl.state_fluent_variables\n        self._render_fluent_timestep('states', s, fluent_variables)\n        self._render_reward(r)", "docstring": "Prints fluents and rewards for the given timestep `t`.\n\nArgs:\nt (int): timestep\ns (Sequence[Tuple[str], np.array]: State fluents.\na (Sequence[Tuple[str], np.array]: Action fluents.\nf (Sequence[Tuple[str], np.array]: Interm state fluents.\nr (np.float32): Reward.", "source": "juraj-google-style"}
{"code": "def __init__(self, json_data):\n        \n        super(ImmutableData, self).__init__()\n        self._json_data = json_dict(json_data)", "docstring": "Init a new ImmutableData object from a dictionary or JSON string.\n\nArgs:\njson_data(dict, basestring): Input JSON string or dictionary.\n\nRaises:\nTypeError: If the input object is not a dictionary or string.", "source": "juraj-google-style"}
{"code": "def assign(self, institute, case, user, link):\n        \n        LOG.info(\"Creating event for assigning {0} to {1}\"\n                    .format(user['name'].encode('utf-8'), case['display_name']))\n\n        self.create_event(\n            institute=institute,\n            case=case,\n            user=user,\n            link=link,\n            category='case',\n            verb='assign',\n            subject=case['display_name']\n        )\n        LOG.info(\"Updating {0} to be assigned with {1}\"\n                    .format(case['display_name'], user['name']))\n\n        updated_case = self.case_collection.find_one_and_update(\n            {'_id': case['_id']},\n            {'$addToSet': {'assignees': user['_id']}},\n            return_document=pymongo.ReturnDocument.AFTER\n        )\n        return updated_case", "docstring": "Assign a user to a case.\n\nThis function will create an Event to log that a person has been assigned\nto a case. Also the user will be added to case \"assignees\".\n\nArguments:\ninstitute (dict): A institute\ncase (dict): A case\nuser (dict): A User object\nlink (str): The url to be used in the event\n\nReturns:\nupdated_case(dict)", "source": "juraj-google-style"}
{"code": "def get_keys_to_action(self):\n    keyword_to_key = {'UP': ord('w'), 'DOWN': ord('s'), 'LEFT': ord('a'), 'RIGHT': ord('d'), 'FIRE': ord(' ')}\n    keys_to_action = {}\n    for (action_id, action_meaning) in enumerate(self.action_meanings):\n        keys_tuple = tuple(sorted([key for (keyword, key) in keyword_to_key.items() if (keyword in action_meaning)]))\n        assert (keys_tuple not in keys_to_action)\n        keys_to_action[keys_tuple] = action_id\n    keys_to_action[(ord('r'),)] = self.RETURN_DONE_ACTION\n    keys_to_action[(ord('c'),)] = self.TOGGLE_WAIT_ACTION\n    keys_to_action[(ord('n'),)] = self.WAIT_MODE_NOOP_ACTION\n    return keys_to_action", "docstring": "Get mapping from keyboard keys to actions.\n\nRequired by gym.utils.play in environment or top level wrapper.\n\nReturns:\n{\nUnicode code point for keyboard key: action (formatted for step()),\n...\n}", "source": "codesearchnet"}
{"code": "def _hash_file(fpath, algorithm='sha256', chunk_size=65535):\n    if isinstance(algorithm, str):\n        hasher = _resolve_hasher(algorithm)\n    else:\n        hasher = algorithm\n    with open(fpath, 'rb') as fpath_file:\n        for chunk in iter(lambda: fpath_file.read(chunk_size), b''):\n            hasher.update(chunk)\n    return hasher.hexdigest()", "docstring": "Calculates a file sha256 or md5 hash.\n\nExample:\n\n```python\n_hash_file('/path/to/file.zip')\n'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'\n```\n\nArgs:\nfpath: path to the file being validated\nalgorithm: hash algorithm, one of `'auto'`, `'sha256'`, or `'md5'`.\nThe default `'auto'` detects the hash algorithm in use.\nchunk_size: Bytes to read at a time, important for large files.\n\nReturns:\nThe file hash", "source": "github-repos"}
{"code": "def acquire_resources(self, source):\n        \n        if source not in self.consulted:\n            self.consulted.add(source)\n            if isinstance(source, Tag):\n                res = source\n            else:\n                res = source(self.H)\n            if res is None:\n                res = set()\n            elif isinstance(res, (list, tuple)):\n                res = set(res)\n            elif isinstance(res, Tag):\n                res = {res}\n            self.resources |= res", "docstring": "Store the resources returned by ``source()``. If ``source`` has\nbeen acquired before, it will not be called a second time.\n\nArgs:\nsource (callable): A function that returns a resource or a\nlist of resources.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def register_frame_to_skip(cls, file_name, function_name, line_number=None):\n    if (line_number is not None):\n        cls._frames_to_skip.add((file_name, function_name, line_number))\n    else:\n        cls._frames_to_skip.add((file_name, function_name))", "docstring": "Registers a function name to skip when walking the stack.\n\nThe ABSLLogger sometimes skips method calls on the stack\nto make the log messages meaningful in their appropriate context.\nThis method registers a function from a particular file as one\nwhich should be skipped.\n\nArgs:\nfile_name: str, the name of the file that contains the function.\nfunction_name: str, the name of the function to skip.\nline_number: int, if provided, only the function with this starting line\nnumber will be skipped. Otherwise, all functions with the same name\nin the file will be skipped.", "source": "codesearchnet"}
{"code": "def read(self, bands=None, **kwargs):\n        \n        arr = self\n        if bands is not None:\n            arr = self[bands, ...]\n        return arr.compute(scheduler=threaded_get)", "docstring": "Reads data from a dask array and returns the computed ndarray matching the given bands\n\nArgs:\nbands (list): band indices to read from the image. Returns bands in the order specified in the list of bands.\n\nReturns:\nndarray: a numpy array of image data", "source": "juraj-google-style"}
{"code": "def get_ISBNs(self):\n    invalid_isbns = set(self.get_invalid_ISBNs())\n    valid_isbns = [self._clean_isbn(isbn) for isbn in self['020a'] if (self._clean_isbn(isbn) not in invalid_isbns)]\n    if valid_isbns:\n        return valid_isbns\n    return [self._clean_isbn(isbn) for isbn in self['901i']]", "docstring": "Get list of VALID ISBN.\n\nReturns:\nlist: List with *valid* ISBN strings.", "source": "codesearchnet"}
{"code": "def is_flat(outputs):\n    if isinstance(outputs, collections_abc.Sequence):\n        for o in outputs:\n            if isinstance(o, collections_abc.Sequence) or isinstance(o, collections_abc.Mapping) or hasattr(o.__class__, '__attrs_attrs__'):\n                return False\n    if isinstance(outputs, collections_abc.Mapping):\n        return False\n    if hasattr(outputs.__class__, '__attrs_attrs__'):\n        return False\n    return True", "docstring": "Checks if outputs is a flat structure.\n\nFollowing structures and values are considered flat:\n1) None\n2) A single object\n3) A list or tuple of Tensors/Operations\n\nThe only structures that this function understands are sequences,\ndictionaries and types defined using the attrs library.  E.g. this means\nthat if outputs contains a single user-defined Object, it is considered to\nbe flat. Errors are raised later on if that Object cannot be converted to a\nTensor.\n\nArgs:\noutputs: Output from `computation` inside `xla.compile`.\n\nReturns:\nA boolean indicates whether outputs is flat.", "source": "github-repos"}
{"code": "def cracked(self):\n    logging.info('Cracking message: {0}'.format(self.message))\n    entropy_values = {}\n    attempt_cache = {}\n    message = self.message\n    for i in range(25):\n        self.message = message\n        self.offset = (i * (- 1))\n        logging.debug('Attempting crack with offset: {0}'.format(self.offset))\n        test_cipher = self.cipher()\n        logging.debug('Attempting plaintext: {0}'.format(test_cipher))\n        entropy_values[i] = self.calculate_entropy(test_cipher)\n        attempt_cache[i] = test_cipher\n    sorted_by_entropy = sorted(entropy_values, key=entropy_values.get)\n    self.offset = (sorted_by_entropy[0] * (- 1))\n    cracked_text = attempt_cache[sorted_by_entropy[0]]\n    self.message = cracked_text\n    logging.debug('Entropy scores: {0}'.format(entropy_values))\n    logging.debug('Lowest entropy score: {0}'.format(str(entropy_values[sorted_by_entropy[0]])))\n    logging.debug('Most likely offset: {0}'.format(self.offset))\n    logging.debug('Most likely message: {0}'.format(cracked_text))\n    return cracked_text", "docstring": "Attempts to crack ciphertext using frequency of letters in English.\n\nReturns:\nString of most likely message.", "source": "codesearchnet"}
{"code": "def from_task(cls, task):\n        \n        target = cls(name=task.get_name(),\n                     params=task.get_param_string())\n\n        return target", "docstring": "Create a new target representing a task and its parameters\n\nArgs:\ntask: Task instance to create target for; the task class has to inherit\nfrom :class:`ozelot.tasks.TaskBase`.\n\nReturns:\nozelot.tasks.ORMTarget: a new target instance", "source": "juraj-google-style"}
{"code": "def parse_case(config):\n    if ('owner' not in config):\n        raise ConfigError('A case has to have a owner')\n    if ('family' not in config):\n        raise ConfigError(\"A case has to have a 'family'\")\n    individuals = parse_individuals(config['samples'])\n    case_data = {'owner': config['owner'], 'collaborators': [config['owner']], 'case_id': config['family'], 'display_name': config.get('family_name', config['family']), 'genome_build': config.get('human_genome_build'), 'rank_model_version': config.get('rank_model_version'), 'rank_score_threshold': config.get('rank_score_threshold', 0), 'analysis_date': config['analysis_date'], 'individuals': individuals, 'vcf_files': {'vcf_snv': config.get('vcf_snv'), 'vcf_sv': config.get('vcf_sv'), 'vcf_str': config.get('vcf_str'), 'vcf_cancer': config.get('vcf_cancer'), 'vcf_snv_research': config.get('vcf_snv_research'), 'vcf_sv_research': config.get('vcf_sv_research'), 'vcf_cancer_research': config.get('vcf_cancer_research')}, 'default_panels': config.get('default_gene_panels', []), 'gene_panels': config.get('gene_panels', []), 'assignee': config.get('assignee'), 'peddy_ped': config.get('peddy_ped'), 'peddy_sex': config.get('peddy_sex'), 'peddy_check': config.get('peddy_check'), 'delivery_report': config.get('delivery_report'), 'multiqc': config.get('multiqc'), 'track': config.get('track', 'rare')}\n    if ('madeline' in config):\n        mad_path = Path(config['madeline'])\n        if (not mad_path.exists()):\n            raise ValueError('madeline path not found: {}'.format(mad_path))\n        with mad_path.open('r') as in_handle:\n            case_data['madeline_info'] = in_handle.read()\n    if (case_data['vcf_files']['vcf_cancer'] or case_data['vcf_files']['vcf_cancer_research']):\n        case_data['track'] = 'cancer'\n    return case_data", "docstring": "Parse case information from config or PED files.\n\nArgs:\nconfig (dict): case config with detailed information\n\nReturns:\ndict: parsed case data", "source": "codesearchnet"}
{"code": "def GetApprovalForObject(object_urn, token=None, username=''):\n    if (token is None):\n        raise access_control.UnauthorizedAccess('No token given, cannot authenticate.')\n    if (not username):\n        username = token.username\n    approvals_root_urn = aff4.ROOT_URN.Add('ACL').Add(object_urn.Path()).Add(username)\n    children_urns = list(aff4.FACTORY.ListChildren(approvals_root_urn))\n    if (not children_urns):\n        raise access_control.UnauthorizedAccess(('No approval found for user %s' % utils.SmartStr(username)), subject=object_urn)\n    last_error = None\n    approvals = aff4.FACTORY.MultiOpen(children_urns, mode='r', aff4_type=Approval, age=aff4.ALL_TIMES, token=token)\n    for approval in approvals:\n        try:\n            test_token = access_control.ACLToken(username=username, reason=approval.Get(approval.Schema.REASON))\n            approval.CheckAccess(test_token)\n            return test_token\n        except access_control.UnauthorizedAccess as e:\n            last_error = e\n    if last_error:\n        raise access_control.UnauthorizedAccess(last_error, subject=object_urn)\n    else:\n        raise access_control.UnauthorizedAccess((\"Couldn't open any of %d approvals for user %s\" % (len(children_urns), utils.SmartStr(username))), subject=object_urn)", "docstring": "Looks for approvals for an object and returns available valid tokens.\n\nArgs:\nobject_urn: Urn of the object we want access to.\n\ntoken: The token to use to lookup the ACLs.\n\nusername: The user to get the approval for, if \"\" we get it from the\ntoken.\n\nReturns:\nA token for access to the object on success, otherwise raises.\n\nRaises:\nUnauthorizedAccess: If there are no valid approvals available.", "source": "codesearchnet"}
{"code": "def get_bytes(obj):\n    \n    try:\n        obj = obj.read(_NUM_SIGNATURE_BYTES)\n    except AttributeError:\n        \n        pass\n\n    kind = type(obj)\n\n    if kind is bytearray:\n        return signature(obj)\n\n    if kind is str:\n        return get_signature_bytes(obj)\n\n    if kind is bytes:\n        return signature(obj)\n\n    if kind is memoryview:\n        return signature(obj).tolist()\n\n    raise TypeError('Unsupported type as file input: %s' % kind)", "docstring": "Infers the input type and reads the first 262 bytes,\nreturning a sliced bytearray.\n\nArgs:\nobj: path to readable, file, bytes or bytearray.\n\nReturns:\nFirst 262 bytes of the file content as bytearray type.\n\nRaises:\nTypeError: if obj is not a supported type.", "source": "juraj-google-style"}
{"code": "def image(title, desc, image_name, group=None, height=None):\n    ie = {'Type': 'Image', 'Title': title, 'Description': desc, 'Plot File': image_name}\n    if group:\n        ie['Group'] = group\n    if height:\n        ie['Height'] = height\n    return ie", "docstring": "Builds an image element.  Image elements are primarily created\nand then wrapped into an image gallery element.  This is not required\nbehavior, however and it's independent usage should be allowed depending\non the behavior required.\n\nThe Javascript will search for the `image_name` in the component's\n`imgs` directory when rendering.  For example, all verification images\nare output to `vv_xxxx-xx-xx/verification/imgs` and then the verification\ncase's output page will search for `image_name` within that directory.\n\nArgs:\ntitle: The title to display\ndesc: A description of the image or plot\nimage_name: The filename of the image\ngroup: (optional) Title of lightbox group to join\nheight: (optional) Height of image thumbnail to draw\n\nReturns:\nA dictionary with the metadata specifying that it is to be\nrendered as an image element", "source": "codesearchnet"}
{"code": "def _set_optimizer(self, optimizer):\n    if isinstance(optimizer, (list, tuple)):\n        self.optimizer = [optimizers.get(opt) for opt in optimizer]\n    else:\n        self.optimizer = optimizers.get(optimizer)\n    if isinstance(self._dtype_policy, policy.PolicyV1):\n        loss_scale = self._dtype_policy.loss_scale\n    elif self._dtype_policy.name == 'mixed_float16':\n        loss_scale = 'dynamic'\n    else:\n        loss_scale = None\n    if loss_scale is not None and (not isinstance(self.optimizer, loss_scale_optimizer.LossScaleOptimizer)):\n        if isinstance(self.optimizer, list):\n            raise ValueError('When a dtype policy with a loss scale is used, you can only pass a single optimizer. Using policy %s and got optimizers: %s' % self._dtype_policy, self.optimizer)\n        if not isinstance(self.optimizer, optimizer_v2.OptimizerV2):\n            raise ValueError('\"optimizer\" must be an instance of tf.keras.optimizers.Optimizer when a dype policy with a loss scale  used, but got: %s. Using policy: %s' % (self.optimizer, self._dtype_policy))\n        if loss_scale == 'dynamic':\n            self.optimizer = loss_scale_optimizer.LossScaleOptimizer(self.optimizer)\n        else:\n            self.optimizer = loss_scale_optimizer.LossScaleOptimizerV1(self.optimizer, loss_scale)", "docstring": "Sets self.optimizer.\n\nSets self.optimizer to `optimizer`, potentially wrapping it with a\nLossScaleOptimizer.\n\nArgs:\noptimizer: The optimizer(s) to assign to self.optimizer.", "source": "github-repos"}
{"code": "def impute(X, value=None, train=None, dropna=True, inplace=True):\n    \n    if value is None:\n        Xfit = X[train] if train is not None else X\n        value = Xfit.mean()\n    else:\n        if train is not None:\n            raise ValueError(\"Cannot pass both train and value arguments\")\n\n    if dropna:\n        null_columns = value.index[value.isnull()]\n        if len(null_columns) > 0:\n            logging.info('Dropping null columns: \\n\\t%s' % null_columns)\n            if inplace:\n                X.drop(null_columns, axis=1, inplace=True)\n            else:\n                X = X.drop(null_columns, axis=1, inplace=False)\n\n    if inplace:\n        X.fillna(value.dropna(), inplace=True)\n    else:\n        X = X.fillna(value.dropna(), inplace=False)\n\n    return X", "docstring": "Performs mean imputation on a pandas dataframe.\nArgs:\ntrain: an optional training mask with which to compute the mean\nvalue: instead of computing the mean, use this as the value argument to fillna\ndropna: whether to drop all null columns\ninplace: whether to perform the imputation inplace\nReturns: the imputed DataFrame", "source": "juraj-google-style"}
{"code": "def OpenFile(self, filepath):\n    \n    archive = False\n    if '.zip/' in filepath:\n      archive = True\n      archive_type = '.zip'\n    if '.par/' in filepath:\n      archive = True\n      archive_type = '.par'\n    if archive:\n      path, archived_file = filepath.split(archive_type)\n      path += archive_type\n      zip_file = zipfile.ZipFile(path)\n      return zip_file.open(archived_file.strip('/'))\n    return open(filepath)", "docstring": "open()-replacement that automatically handles zip files.\n\nThis assumes there is at most one .zip in the file path.\nArgs:\nfilepath: the path to the file to open.\nReturns:\nAn open file-like object.", "source": "juraj-google-style"}
{"code": "def FormatArtifacts(self, artifacts):\n    \n    \n    artifact_definitions = [artifact.AsDict() for artifact in artifacts]\n    yaml_data = yaml.safe_dump_all(artifact_definitions)\n    return yaml_data", "docstring": "Formats artifacts to desired output format.\n\nArgs:\nartifacts (list[ArtifactDefinition]): artifact definitions.\n\nReturns:\nstr: formatted string of artifact definition.", "source": "juraj-google-style"}
{"code": "def runtime_deps(self):\n    install_requires = self.metadata['install_requires']\n    if (self.metadata['entry_points'] and ('setuptools' not in install_requires)):\n        install_requires.append('setuptools')\n    return sorted(self.name_convert_deps_list(deps_from_pyp_format(install_requires, runtime=True)))", "docstring": "Returns list of runtime dependencies of the package specified in\nsetup.py.\n\nDependencies are in RPM SPECFILE format - see dependency_to_rpm()\nfor details, but names are already transformed according to\ncurrent distro.\n\nReturns:\nlist of runtime dependencies of the package", "source": "codesearchnet"}
{"code": "def _flush_range(self, buffer, start, end):\n    with self._size_lock:\n        if (not self._size_synched):\n            self._size_synched = True\n            try:\n                self._size = self.raw._size\n            except (ObjectNotFoundError, UnsupportedOperation):\n                self._size = 0\n    while (start > self._size):\n        sleep(self._FLUSH_WAIT)\n    self._raw_flush(buffer, start, end)", "docstring": "Flush a buffer to a range of the file.\n\nMeant to be used asynchronously, used to provides parallel flushing of\nfile parts when applicable.\n\nArgs:\nbuffer (memoryview): Buffer content.\nstart (int): Start of buffer position to flush.\nend (int): End of buffer position to flush.", "source": "codesearchnet"}
{"code": "def _apply_sparse(self, grad, var):\n    raise NotImplementedError()", "docstring": "Add ops to apply sparse gradients to `var`.\n\nThe IndexedSlices object passed to `grad` in this function is by default\npre-processed in `_apply_sparse_duplicate_indices` to remove duplicate\nindices (see its docstring for details). Optimizers which can tolerate or\nhave correct special cases for duplicate sparse indices may override\n`_apply_sparse_duplicate_indices` instead of this function, avoiding that\noverhead.\n\nArgs:\ngrad: `IndexedSlices`, with no repeated indices.\nvar: A `Variable` object.\n\nReturns:\nAn `Operation`.", "source": "github-repos"}
{"code": "def delete(self, uri):\n        \n        try:\n            self.connect(uri, method='DELETE')\n            return True\n        except urllib.error.HTTPError:\n            return False", "docstring": "Method deletes a Fedora Object in the repository\n\nArgs:\nuri(str): URI of Fedora Object", "source": "juraj-google-style"}
{"code": "def render(self, mode='human'):\n        \n        if mode == 'human':\n            \n            if self.viewer is None:\n                from ._image_viewer import ImageViewer\n                \n                if self.spec is None:\n                    \n                    caption = self._rom_path.split('/')[-1]\n                else:\n                    \n                    caption = self.spec.id\n                \n                self.viewer = ImageViewer(\n                    caption=caption,\n                    height=SCREEN_HEIGHT,\n                    width=SCREEN_WIDTH,\n                )\n            \n            self.viewer.show(self.screen)\n        elif mode == 'rgb_array':\n            return self.screen\n        else:\n            \n            render_modes = [repr(x) for x in self.metadata['render.modes']]\n            msg = 'valid render modes are: {}'.format(', '.join(render_modes))\n            raise NotImplementedError(msg)", "docstring": "Render the environment.\n\nArgs:\nmode (str): the mode to render with:\n- human: render to the current display\n- rgb_array: Return an numpy.ndarray with shape (x, y, 3),\nrepresenting RGB values for an x-by-y pixel image\n\nReturns:\na numpy array if mode is 'rgb_array', None otherwise", "source": "juraj-google-style"}
{"code": "def _build_mask_ds(mask, mask_offset):\n    mask_ds = tf.data.Dataset.from_tensor_slices(mask)\n    mask_ds = mask_ds.repeat()\n    mask_ds = mask_ds.skip(mask_offset)\n    return mask_ds", "docstring": "Build the mask dataset to indicate which element to skip.\n\nArgs:\nmask: `tf.Tensor`, binary mask to apply to all following elements. This\nmask should have a length 100.\nmask_offset: `tf.Tensor`, Integer specifying from how much the mask\nshould be shifted for the first element.\n\nReturns:\nmask_ds: `tf.data.Dataset`, a dataset returning False for examples to skip\nand True for examples to keep.", "source": "codesearchnet"}
{"code": "def receive_datagram(self, data, address):\n    if (not self.app):\n        logger.debug('Packet received', address, data)\n        return False\n    try:\n        response = self.app.handle_message(data, address)\n    except Exception as err:\n        logger.error(((('Error processing message from ' + str(address)) + ':') + str(data)))\n        logger.error(traceback.format_exc())\n        return False\n    if response:\n        self.send_datagram(response, address)", "docstring": "Executes when UDP data has been received and sends the packet data\nto our app to process the request.\n\nArgs:\ndata (str): The raw serialized packet data received.\naddress (tuple): The address and port of the origin of the received\npacket. E.g. (address, port).\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def find(self, key, dynamic_default_value=None, name=None):\n    with tf.name_scope(name or '%s_lookup_table_find' % self._name):\n        key = tf.convert_to_tensor(key, dtype=self._key_dtype, name='key')\n        if dynamic_default_value is not None:\n            dynamic_default_value = tf.convert_to_tensor(dynamic_default_value, dtype=self._value_dtype, name='default_value')\n        value = gen_simple_hash_table_op.examples_simple_hash_table_find(self.resource_handle, key, dynamic_default_value if dynamic_default_value is not None else self._default_value)\n    return value", "docstring": "Looks up `key` in a table, outputs the corresponding value.\n\nThe `default_value` is used if key not present in the table.\n\nArgs:\nkey: Key to look up. Must match the table's key_dtype.\ndynamic_default_value: The value to use if the key is missing in the\ntable. If None (by default), the `table.default_value` will be used.\nname: A name for the operation (optional).\n\nReturns:\nA tensor containing the value in the same shape as `key` using the\ntable's value type.\n\nRaises:\nTypeError: when `key` do not match the table data types.", "source": "github-repos"}
{"code": "def _strip_debug_nodes(meta_graph_def: meta_graph_pb2.MetaGraphDef) -> None:\n\n    def erase_regular_node_attributes(node: node_def_pb2.NodeDef) -> None:\n        \n        attributes_to_remove = [attribute for attribute in node.attr.keys() if not attribute.startswith('_')]\n        for attribute in attributes_to_remove:\n            node.attr.pop(attribute)\n\n    def prune_all_non_t_attributes(node: node_def_pb2.NodeDef) -> None:\n        \n        if 'T' in node.attr:\n            t_value = node.attr['T']\n            node.ClearField('attr')\n            node.attr['T'].CopyFrom(t_value)\n        else:\n            node.ClearField('attr')\n\n    def is_control_input(name: str) -> str:\n        \n        return name and name[0] == '^'\n\n    def as_control_dep(name: str) -> str:\n        \n        return '^' + name.split(':')[0]\n\n    def maybe_do_strip(node: node_def_pb2.NodeDef) -> None:\n        \n        if node.op == 'Assert' or node.op == 'PrintV2':\n            node.op = 'NoOp'\n            erase_regular_node_attributes(node)\n            new_inputs = []\n            for inp in node.input:\n                if not is_control_input(inp):\n                    new_inputs.append(as_control_dep(inp))\n                else:\n                    new_inputs.append(inp)\n            node.ClearField('input')\n            node.input.extend(new_inputs)\n        elif node.op == 'CheckNumerics' or node.op == 'Print':\n            node.op = 'Identity'\n            prune_all_non_t_attributes(node)\n            for i in range(1, len(node.input)):\n                if not is_control_input(node.input[i]):\n                    node.input[i] = as_control_dep(node.input[i])\n    for node in meta_graph_def.graph_def.node:\n        maybe_do_strip(node)\n    for func in meta_graph_def.graph_def.library.function:\n        for node in func.node_def:\n            maybe_do_strip(node)", "docstring": "An experimental function to remove debug nodes from the final graph.\n\nThis function removes all Assert and CheckNumerics nodes from the meta_graph.\nIt strips the operators in both the nodes and in all of the function defs,\nwith the Assert ops being replaced by `NoOp`s and the CheckNumerics ops being\ntransformed into `Identity` ops. In addition to this, it creates control\ninputs for the nodes that are not relevant for the op. For more information\nabout control inputs please see go/how-tensors-flow#control-dependencies.\n\nArgs:\nmeta_graph_def: The meta_graph that will be exported.", "source": "github-repos"}
{"code": "def getall(self):\n    vrfs_re = re.compile('(?<=^vrf definition\\\\s)(\\\\w+)', re.M)\n    response = dict()\n    for vrf in vrfs_re.findall(self.config):\n        response[vrf] = self.get(vrf)\n    return response", "docstring": "Returns a dict object of all VRFs in the running-config\n\nReturns:\nA dict object of VRF attributes", "source": "codesearchnet"}
{"code": "def body(self, features):\n    \n    features[\"targets\"] = features[\"inputs\"]\n    is_training = self.hparams.mode == tf.estimator.ModeKeys.TRAIN\n\n    \n    inputs = tf.to_float(features[\"targets_raw\"])\n\n    \n    z = tf.random_uniform([self.hparams.batch_size,\n                           self.hparams.bottleneck_bits],\n                          minval=-1, maxval=1, name=\"z\")\n\n    \n    out_shape = common_layers.shape_list(inputs)[1:4]\n    g = self.generator(z, is_training, out_shape)\n\n    losses = self.losses(inputs, g)  \n\n    summary_g_image = tf.reshape(\n        g[0, :], [1] + common_layers.shape_list(inputs)[1:])\n    tf.summary.image(\"generated\", summary_g_image, max_outputs=1)\n\n    if is_training:  \n      return tf.zeros_like(inputs), losses\n    return tf.reshape(g, tf.shape(inputs)), losses", "docstring": "Body of the model.\n\nArgs:\nfeatures: a dictionary with the tensors.\n\nReturns:\nA pair (predictions, losses) where predictions is the generated image\nand losses is a dictionary of losses (that get added for the final loss).", "source": "juraj-google-style"}
{"code": "def delete(self, alias_name, timeout=(- 1)):\n    uri = ((self.URI + '/') + alias_name)\n    return self._client.delete(uri, timeout=timeout)", "docstring": "Revokes a certificate signed by the internal CA. If client certificate to be revoked is RabbitMQ_readonly,\nthen the internal CA root certificate, RabbitMQ client certificate and RabbitMQ server certificate will be\nregenerated. This will invalidate the previous version of RabbitMQ client certificate and the RabbitMQ server\nwill be restarted to read the latest certificates.\n\nArgs:\nalias_name (str): Alias name.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.", "source": "codesearchnet"}
{"code": "def PushEvent(self, event):\n    \n    macb_group_identifier, content_identifier = self._GetEventIdentifiers(event)\n\n    \n    \n    heap_values = (macb_group_identifier or '', content_identifier, event)\n    heapq.heappush(self._heap, heap_values)", "docstring": "Pushes an event onto the heap.\n\nArgs:\nevent (EventObject): event.", "source": "juraj-google-style"}
{"code": "def export_to_dir(network, export_dir):\n    package_path = ding0.__path__[0]\n    network.export_to_csv_folder(os.path.join(package_path, 'output', 'debug', 'grid', export_dir))", "docstring": "Exports PyPSA network as CSV files to directory\n\nArgs:\nnetwork: pypsa.Network\nexport_dir: str\nSub-directory in output/debug/grid/ where csv Files of PyPSA network are exported to.", "source": "codesearchnet"}
{"code": "def outer_id(self, value):\n        \n        if value == self._defaults['outerId'] and 'outerId' in self._values:\n            del self._values['outerId']\n        else:\n            self._values['outerId'] = value", "docstring": "The outer_id property.\n\nArgs:\nvalue (int). the property value.", "source": "juraj-google-style"}
{"code": "def api_info(self, headers=None):\n        \n        return self.transport.forward_request(\n            method='GET',\n            path=self.api_prefix,\n            headers=headers,\n        )", "docstring": "Retrieves information provided by the API root endpoint\n``'/api/v1'``.\n\nArgs:\nheaders (dict): Optional headers to pass to the request.\n\nReturns:\ndict: Details of the HTTP API provided by the BigchainDB\nserver.", "source": "juraj-google-style"}
{"code": "def _ParseIndexTable(self, file_object):\n    cache_address_map = self._GetDataTypeMap('uint32le')\n    file_offset = file_object.get_offset()\n    cache_address_data = file_object.read(4)\n    while (len(cache_address_data) == 4):\n        try:\n            value = self._ReadStructureFromByteStream(cache_address_data, file_offset, cache_address_map)\n        except (ValueError, errors.ParseError) as exception:\n            raise errors.ParseError('Unable to map cache address at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))\n        if value:\n            cache_address = CacheAddress(value)\n            self.index_table.append(cache_address)\n        file_offset += 4\n        cache_address_data = file_object.read(4)", "docstring": "Parses the index table.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object to parse.\n\nRaises:\nParseError: if the index table cannot be read.", "source": "codesearchnet"}
{"code": "def ToJsonString(self):\n    if ((self.seconds < 0) or (self.nanos < 0)):\n        result = '-'\n        seconds = ((- self.seconds) + int(((0 - self.nanos) \n        nanos = ((0 - self.nanos) % 1000000000.0)\n    else:\n        result = ''\n        seconds = (self.seconds + int((self.nanos \n        nanos = (self.nanos % 1000000000.0)\n    result += ('%d' % seconds)\n    if ((nanos % 1000000000.0) == 0):\n        return (result + 's')\n    if ((nanos % 1000000.0) == 0):\n        return (result + ('.%03ds' % (nanos / 1000000.0)))\n    if ((nanos % 1000.0) == 0):\n        return (result + ('.%06ds' % (nanos / 1000.0)))\n    return (result + ('.%09ds' % nanos))", "docstring": "Converts Duration to string format.\n\nReturns:\nA string converted from self. The string format will contains\n3, 6, or 9 fractional digits depending on the precision required to\nrepresent the exact Duration value. For example: \"1s\", \"1.010s\",\n\"1.000000100s\", \"-3.100s\"", "source": "codesearchnet"}
{"code": "def SetOption(self, section, option, value, overwrite=True):\n    \n    if not overwrite and self.config.has_option(section, option):\n      return\n    if not self.config.has_section(section):\n      self.config.add_section(section)\n    self.config.set(section, option, str(value))", "docstring": "Set the value of an option in the config file.\n\nArgs:\nsection: string, the section of the config file to check.\noption: string, the option to set the value of.\nvalue: string, the value to set the option.\noverwrite: bool, True to overwrite an existing value in the config file.", "source": "juraj-google-style"}
{"code": "def simulate(self, action):\n    \n    with tf.name_scope('environment/simulate'):\n      if action.dtype in (tf.float16, tf.float32, tf.float64):\n        action = tf.check_numerics(action, 'action')\n      observ_dtype = self._parse_dtype(self._batch_env.observation_space)\n      observ, reward, done = tf.py_func(\n          lambda a: self._batch_env.step(a)[:3], [action],\n          [observ_dtype, tf.float32, tf.bool], name='step')\n      observ = tf.check_numerics(observ, 'observ')\n      reward = tf.check_numerics(reward, 'reward')\n      return tf.group(\n          self._observ.assign(observ),\n          self._action.assign(action),\n          self._reward.assign(reward),\n          self._done.assign(done))", "docstring": "Step the batch of environments.\n\nThe results of the step can be accessed from the variables defined below.\n\nArgs:\naction: Tensor holding the batch of actions to apply.\n\nReturns:\nOperation.", "source": "juraj-google-style"}
{"code": "def last(series, order_by=None):\n    \n\n    if order_by is not None:\n        series = order_series_by(series, order_by)\n    last_s = series.iloc[series.size - 1]\n    return last_s", "docstring": "Returns the last value of a series.\n\nArgs:\nseries (pandas.Series): column to summarize.\n\nKwargs:\norder_by: a pandas.Series or list of series (can be symbolic) to order\nthe input series by before summarization.", "source": "juraj-google-style"}
{"code": "def plot(self, **plot_kwargs: Any) -> None:\n        \n        fig = plt.figure()\n        plt.plot(self._num_cfds_seq, self._gnd_state_probs, 'ro-',\n                 figure=fig, **plot_kwargs)\n        plt.xlabel(r\"Number of Cliffords\", figure=fig)\n        plt.ylabel('Ground State Probability', figure=fig)\n        fig.show()", "docstring": "Plots the average ground state probability vs the number of\nCliffords in the RB study.\n\nArgs:\n**plot_kwargs: Arguments to be passed to matplotlib.pyplot.plot.", "source": "juraj-google-style"}
{"code": "def batch_slice(dist, params_event_ndims, params_overrides, slices):\n    if (not isinstance(slices, collections.Sequence)):\n        slices = (slices,)\n    (orig_dist, slice_overrides_seq) = getattr(dist, PROVENANCE_ATTR, (dist, []))\n    slice_overrides_seq += [(slices, params_overrides)]\n    dist = _apply_slice_sequence(orig_dist, params_event_ndims, slice_overrides_seq)\n    setattr(dist, PROVENANCE_ATTR, (orig_dist, slice_overrides_seq))\n    return dist", "docstring": "Slices `dist` along its batch dimensions. Helper for tfd.Distribution.\n\nArgs:\ndist: A `tfd.Distribution` instance.\nparams_event_ndims: A `dict` of `str->int` indicating the number of\ndimensions of a given parameter required to parameterize a single event.\nparams_overrides: A `dict` of parameter overrides. (e.g. from\n`Distribution.copy`).\nslices: A `slice` or `int` or `int` `Tensor` or `tf.newaxis` or `tuple`\nthereof. (e.g. the argument of a `__getitem__` method).\n\nReturns:\nnew_dist: A batch-sliced `tfd.Distribution`.", "source": "codesearchnet"}
{"code": "def sg_lookup(tensor, opt):\n    assert (opt.emb is not None), 'emb is mandatory.'\n    return tf.nn.embedding_lookup(opt.emb, tensor, name=opt.name)", "docstring": "r\"\"\"Looks up the `tensor`, which is the embedding matrix.\n\nArgs:\ntensor: A tensor ( automatically given by chain )\nopt:\nemb: A 2-D `Tensor`. An embedding matrix.\nname: If provided, replace current tensor's name.\n\nReturns:\nA `Tensor`.", "source": "codesearchnet"}
{"code": "def take_ownership(self, **kwargs):\n    path = ('%s/%s/take_ownership' % (self.manager.path, self.get_id()))\n    server_data = self.manager.gitlab.http_post(path, **kwargs)\n    self._update_attrs(server_data)", "docstring": "Update the owner of a pipeline schedule.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabOwnershipError: If the request failed", "source": "codesearchnet"}
{"code": "def gene_by_alias(self, symbol, build='37'):\n    res = self.hgnc_collection.find({'hgnc_symbol': symbol, 'build': build})\n    if (res.count() == 0):\n        res = self.hgnc_collection.find({'aliases': symbol, 'build': build})\n    return res", "docstring": "Return a iterable with hgnc_genes.\n\nIf the gene symbol is listed as primary the iterable will only have\none result. If not the iterable will include all hgnc genes that have\nthe symbol as an alias.\n\nArgs:\nsymbol(str)\nbuild(str)\n\nReturns:\nres(pymongo.Cursor(dict))", "source": "codesearchnet"}
{"code": "def delete(self, model_name):\n    \n    full_name = model_name\n    if not model_name.startswith('projects/'):\n      full_name = ('projects/%s/models/%s' % (self._project_id, model_name))\n    response = self._api.projects().models().delete(name=full_name).execute()\n    if 'name' not in response:\n      raise Exception('Invalid response from service. \"name\" is not found.')\n    _util.wait_for_long_running_operation(response['name'])", "docstring": "Delete a model.\n\nArgs:\nmodel_name: the name of the model. It can be a model full name\n(\"projects/[project_id]/models/[model_name]\") or just [model_name].", "source": "juraj-google-style"}
{"code": "def _slice_ragged_row_dimension(rt_input, row_key):\n    if row_key.start is None and row_key.stop is None and (row_key.step is None):\n        return rt_input\n    new_starts = rt_input.row_splits[:-1][row_key]\n    new_limits = rt_input.row_splits[1:][row_key]\n    zero_pad = array_ops.zeros([1], rt_input.row_splits.dtype)\n    if row_key.step is None or row_key.step == 1:\n        new_splits = array_ops.concat([zero_pad[array_ops.size(new_starts):], new_starts[:1], new_limits], axis=0)\n        values_start = new_splits[0]\n        values_limit = new_splits[-1]\n        return ragged_tensor.RaggedTensor.from_row_splits(rt_input.values[values_start:values_limit], new_splits - values_start, validate=False)\n    else:\n        return _build_ragged_tensor_from_value_ranges(new_starts, new_limits, 1, rt_input.values)", "docstring": "Slice the outer dimension of `rt_input` according to the given `slice`.\n\nArgs:\nrt_input: The `RaggedTensor` to slice.\nrow_key: The `slice` object that should be used to slice `rt_input`.\n\nReturns:\nA `RaggedTensor` containing the indicated slice of `rt_input`.", "source": "github-repos"}
{"code": "def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):\n    line = clean_lines.elided[linenum]\n    match = Search(pattern, line)\n    if (not match):\n        return False\n    context = line[0:(match.start(1) - 1)]\n    if Match('.*\\\\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\\\\s*$', context):\n        return False\n    if (linenum > 0):\n        for i in xrange((linenum - 1), max(0, (linenum - 5)), (- 1)):\n            context = (clean_lines.elided[i] + context)\n    if Match('.*\\\\b[_A-Z][_A-Z0-9]*\\\\s*\\\\((?:\\\\([^()]*\\\\)|[^()])*$', context):\n        return False\n    if (context.endswith(' operator++') or context.endswith(' operator--')):\n        return False\n    remainder = line[match.end(0):]\n    if Match('^\\\\s*(?:;|const\\\\b|throw\\\\b|final\\\\b|override\\\\b|[=>{),]|->)', remainder):\n        return False\n    error(filename, linenum, 'readability/casting', 4, ('Using C-style cast.  Use %s<%s>(...) instead' % (cast_type, match.group(1))))\n    return True", "docstring": "Checks for a C-style cast by looking for the pattern.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\ncast_type: The string for the C++ cast to recommend.  This is either\nreinterpret_cast, static_cast, or const_cast, depending.\npattern: The regular expression used to find C-style casts.\nerror: The function to call with any errors found.\n\nReturns:\nTrue if an error was emitted.\nFalse otherwise.", "source": "codesearchnet"}
{"code": "def in_labelset(xmrs, nodeids, label=None):\n    \n    nodeids = set(nodeids)\n    if label is None:\n        label = xmrs.ep(next(iter(nodeids))).label\n    return nodeids.issubset(xmrs._vars[label]['refs']['LBL'])", "docstring": "Test if all nodeids share a label.\n\nArgs:\nnodeids: iterable of nodeids\nlabel (str, optional): the label that all nodeids must share\nReturns:\nbool: `True` if all nodeids share a label, otherwise `False`", "source": "juraj-google-style"}
{"code": "def to_schema(self, entry: dict, parents: dict={}) -> list:\n    bigquery_schema = []\n    for key, value in entry.items():\n        if not isinstance(value, dict):\n            continue\n        if '$ref' in value:\n            parents.setdefault(value['$ref'], 0)\n            if parents[value['$ref']] < self.recursion_depth:\n                parents[value['$ref']] += 1\n                bigquery_schema.append({'name': key, 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': self.to_schema(self.api_document['schemas'][value['$ref']]['properties'], parents)})\n            parents[value['$ref']] -= 1\n        elif 'items' in value:\n            if '$ref' in value['items']:\n                parents.setdefault(value['items']['$ref'], 0)\n                if parents[value['items']['$ref']] < self.recursion_depth:\n                    parents[value['items']['$ref']] += 1\n                    bigquery_schema.append({'name': key, 'type': 'RECORD', 'mode': 'REPEATED', 'fields': self.to_schema(self.api_document['schemas'][value['items']['$ref']]['properties'], parents)})\n                    parents[value['items']['$ref']] -= 1\n            elif value['items']['type'] == 'object':\n                bigquery_schema.append({'name': key, 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': self.to_schema(value['items'], parents)})\n            else:\n                bigquery_schema.append({'description': ','.join(value['items'].get('enum', []))[:DESCRIPTION_LENGTH], 'name': key, 'type': self.to_type(value['items']), 'mode': 'REPEATED'})\n        else:\n            bigquery_schema.append({'description': ','.join(value.get('enum', []))[:DESCRIPTION_LENGTH], 'name': key, 'type': self.to_type(value), 'mode': 'NULLABLE'})\n    return bigquery_schema", "docstring": "Convert a Discovery API Document schema to a BigQuery schema.\n\nRecursively crawls the discovery document reference tree to build schema.\nLeverages recursion depth passed in constructor to stop if necessary.\n\nArgs:\nentry: a discovery document schema definition.\nparents: used to track recursion depth for a specific schema branch\n\nReturns:\nA BigQuery schema object.", "source": "github-repos"}
{"code": "def _update_hasher(hasher, data, types=True):\n    if isinstance(data, (tuple, list, zip)):\n        needs_iteration = True\n    else:\n        needs_iteration = any((check(data) for check in _HASHABLE_EXTENSIONS.iterable_checks))\n    if needs_iteration:\n        SEP = b'_,_'\n        ITER_PREFIX = b'_[_'\n        ITER_SUFFIX = b'_]_'\n        iter_ = iter(data)\n        hasher.update(ITER_PREFIX)\n        try:\n            for item in iter_:\n                (prefix, hashable) = _convert_to_hashable(item, types)\n                binary_data = ((prefix + hashable) + SEP)\n                hasher.update(binary_data)\n        except TypeError:\n            _update_hasher(hasher, item, types)\n            for item in iter_:\n                _update_hasher(hasher, item, types)\n                hasher.update(SEP)\n        hasher.update(ITER_SUFFIX)\n    else:\n        (prefix, hashable) = _convert_to_hashable(data, types)\n        binary_data = (prefix + hashable)\n        hasher.update(binary_data)", "docstring": "Converts `data` into a byte representation and calls update on the hasher\n`hashlib.HASH` algorithm.\n\nArgs:\nhasher (HASH): instance of a hashlib algorithm\ndata (object): ordered data with structure\ntypes (bool): include type prefixes in the hash\n\nExample:\n>>> hasher = hashlib.sha512()\n>>> data = [1, 2, ['a', 2, 'c']]\n>>> _update_hasher(hasher, data)\n>>> print(hasher.hexdigest()[0:8])\ne2c67675\n\n2ba8d82b", "source": "codesearchnet"}
{"code": "def _get_syslog_format(event_type):\n    syslog_format_template = get_template('syslog_format.json')\n    fmt = syslog_format_template.render(event_type=event_type, host=dbconfig.get('instance_name', default='local'))\n    return json.dumps(json.loads(fmt))", "docstring": "Take an event type argument and return a python logging format\n\nIn order to properly format the syslog messages to current standard, load the template and perform necessary\nreplacements and return the string.\n\nArgs:\nevent_type (str): Event type name\n\nReturns:\n`str`", "source": "codesearchnet"}
{"code": "def _check_status(cls, response_json):\n    status = response_json['status']\n    msg = response_json['msg']\n    if (status == 400):\n        raise BadRequestException(msg)\n    elif (status == 403):\n        raise PermissionDeniedException(msg)\n    elif (status == 404):\n        raise FileNotFoundException(msg)\n    elif (status == 451):\n        raise UnavailableForLegalReasonsException(msg)\n    elif (status == 509):\n        raise BandwidthUsageExceeded(msg)\n    elif (status >= 500):\n        raise ServerErrorException(msg)", "docstring": "Check the status of the incoming response, raise exception if status is not 200.\n\nArgs:\nresponse_json (dict): results of the response of the GET request.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _subscribe(tensor, side_effects, control_cache):\n    if not tensor.dtype.is_numpy_compatible:\n        logging.debug('Tensor {} has an un-supported {} type and cannot be subscribed.'.format(tensor.name, tensor.dtype))\n        return tensor\n    if _is_subscribed_identity(tensor):\n        return _subscribe_extend(tensor, side_effects)\n    name_scope = tensor.op.name + '/subscription/Identity'\n    consumers = tensor.consumers()\n    matching_ops = [op for op in consumers if op.name.startswith(name_scope)]\n    assert len(matching_ops) <= 1, 'Op {} must only have one subscription op connected to it'.format(tensor.op.name)\n    if len(matching_ops) == 1:\n        candidate_tensor = matching_ops[0].outputs[0]\n        if _is_subscribed_identity(candidate_tensor):\n            return _subscribe_extend(candidate_tensor, side_effects)\n    return _subscribe_new(tensor, side_effects, control_cache)", "docstring": "Helper method that subscribes a single tensor to a list of side_effects.\n\nThis method will check if the given tensor has already been subscribed or if\nit's a tensor returned by a previous call to `subscribe()` and, if so, will\nreuse the existing identity op, appending the given side effects to the list\nof existing ones.\n\nArgs:\ntensor: The `tf.Tensor` to be subscribed.\nside_effects: List of side_effect functions, see subscribe for details.\ncontrol_cache: `_ControlOutputCache` helper to get control_outputs faster.\n\nReturns:\nThe modified replacement to the passed in tensor which triggers the side\neffects or the given tensor, if it was already been subscribed.", "source": "github-repos"}
{"code": "def load_model(\n            self, the_metamodel, filename, is_main_model, encoding='utf-8',\n            add_to_local_models=True):\n        \n\n        if not self.local_models.has_model(filename):\n            if self.all_models.has_model(filename):\n                new_model = self.all_models.filename_to_model[filename]\n            else:\n                \n                \n                \n                new_model = the_metamodel.internal_model_from_file(\n                    filename, pre_ref_resolution_callback=lambda\n                    other_model: self.pre_ref_resolution_callback(other_model),\n                    is_main_model=is_main_model, encoding=encoding)\n                self.all_models.filename_to_model[filename] = new_model\n            \n            if add_to_local_models:\n                self.local_models.filename_to_model[filename] = new_model\n        assert self.all_models.has_model(filename)  \n        return self.all_models.filename_to_model[filename]", "docstring": "load a single model\n\nArgs:\nthe_metamodel: the metamodel used to load the model\nfilename: the model to be loaded (if not cached)\n\nReturns:\nthe loaded/cached model", "source": "juraj-google-style"}
{"code": "def save_optimizer_weights_to_hdf5_group(hdf5_group, optimizer):\n    symbolic_weights = getattr(optimizer, 'weights')\n    if symbolic_weights:\n        weights_group = hdf5_group.create_group('optimizer_weights')\n        weight_names = [str(w.name).encode('utf8') for w in symbolic_weights]\n        save_attributes_to_hdf5_group(weights_group, 'weight_names', weight_names)\n        weight_values = backend.batch_get_value(symbolic_weights)\n        for name, val in zip(weight_names, weight_values):\n            param_dset = weights_group.create_dataset(name, val.shape, dtype=val.dtype)\n            if not val.shape:\n                param_dset[()] = val\n            else:\n                param_dset[:] = val", "docstring": "Saves optimizer weights of a optimizer to a HDF5 group.\n\nArgs:\nhdf5_group: HDF5 group.\noptimizer: optimizer instance.", "source": "github-repos"}
{"code": "def link_asset_content_key(access_token, asset_id, encryptionkey_id, ams_redirected_rest_endpoint):\n    \n    path = '/Assets'\n    full_path = ''.join([path, \"('\", asset_id, \"')\", \"/$links/ContentKeys\"])\n    full_path_encoded = urllib.parse.quote(full_path, safe='')\n    endpoint = ''.join([ams_rest_endpoint, full_path_encoded])\n    uri = ''.join([ams_redirected_rest_endpoint, 'ContentKeys', \"('\", encryptionkey_id, \"')\"])\n    body = '{\"uri\": \"' + uri + '\"}'\n    return do_ams_post(endpoint, full_path_encoded, body, access_token)", "docstring": "Link Media Service Asset and Content Key.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nasset_id (str): A Media Service Asset ID.\nencryption_id (str): A Media Service Encryption ID.\nams_redirected_rest_endpoint (str): A Media Service Redirected Endpoint.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def _run_in_hypothesis(self, mma, w_string, index):\n        \n        state = mma[0]\n        for i in range(index):\n            for arc in state:\n                if mma.isyms.find(arc.ilabel) == w_string[i]:\n                    state = mma[arc.nextstate]\n                    s_index = arc.nextstate\n\n        \n        access_string = self.observation_table.sm_vector[s_index]\n        logging.debug(\n            'Access string for %d: %s - %d ',\n            index,\n            access_string,\n            s_index)\n\n        return access_string", "docstring": "Run the string in the hypothesis automaton for index steps and then\nreturn the access string for the state reached concatanated with the\nrest of the string w.\nArgs:\nmma (DFA): The hypothesis automaton\nw_string (str): The examined string to be consumed\nindex (int): The index value for selecting the prefix of w\nReturn:\nstr: The access string", "source": "juraj-google-style"}
{"code": "def projection(self, variables):\n    variables = set(variables)\n    if (not variables.issubset(self.variables)):\n        raise ValueError('Cannot project to variables not in the constraint.')\n    idxs = [i for (i, v) in enumerate(self.variables) if (v in variables)]\n    configurations = frozenset((tuple((config[i] for i in idxs)) for config in self.configurations))\n    variables = tuple((self.variables[i] for i in idxs))\n    return self.from_configurations(configurations, variables, self.vartype)", "docstring": "Create a new constraint that is the projection onto a subset of the variables.\n\nArgs:\nvariables (iterable):\nSubset of the constraint's variables.\n\nReturns:\n:obj:`.Constraint`: A new constraint over a subset of the variables.\n\nExamples:\n\n>>> import dwavebinarycsp\n...\n>>> const = dwavebinarycsp.Constraint.from_configurations([(0, 0), (0, 1)],\n...                                                       ['a', 'b'],\n...                                                       dwavebinarycsp.BINARY)\n>>> proj = const.projection(['a'])\n>>> proj.variables\n['a']\n>>> proj.configurations\n{(0,)}", "source": "codesearchnet"}
{"code": "def top_and_tail(a):\n    \n    if np.all(np.isnan(a)):\n        return np.array([])\n    nans = np.where(~np.isnan(a))[0]\n    last = None if nans[-1]+1 == a.size else nans[-1]+1\n    return a[nans[0]:last]", "docstring": "Remove the NaNs from the top and tail (only) of a well log.\n\nArgs:\na (ndarray): An array.\nReturns:\nndarray: The top and tailed array.", "source": "juraj-google-style"}
{"code": "def restore_op(self, filename_tensor, saveable, preferred_shard):\n    tensors = []\n    for spec in saveable.specs:\n        tensors.append(io_ops.restore_v2(filename_tensor, [spec.name], [spec.slice_spec], [spec.dtype])[0])\n    return tensors", "docstring": "Create ops to restore 'saveable'.\n\nThis is intended to be overridden by subclasses that want to generate\ndifferent Ops.\n\nArgs:\nfilename_tensor: String Tensor.\nsaveable: A BaseSaverBuilder.SaveableObject object.\npreferred_shard: Int.  Shard to open first when loading a sharded file.\n\nReturns:\nA list of Tensors resulting from reading 'saveable' from\n'filename'.", "source": "github-repos"}
{"code": "def transform(self, X, y=None):\n        \n        word_ids = [self._word_vocab.doc2id(doc) for doc in X]\n        word_ids = pad_sequences(word_ids, padding='post')\n\n        char_ids = [[self._char_vocab.doc2id(w) for w in doc] for doc in X]\n        char_ids = pad_nested_sequences(char_ids)\n\n        character_ids = batch_to_ids(X)\n        elmo_embeddings = self._elmo(character_ids)['elmo_representations'][1]\n        elmo_embeddings = elmo_embeddings.detach().numpy()\n\n        features = [word_ids, char_ids, elmo_embeddings]\n\n        if y is not None:\n            y = [self._label_vocab.doc2id(doc) for doc in y]\n            y = pad_sequences(y, padding='post')\n            y = to_categorical(y, self.label_size).astype(int)\n            \n            \n            \n            \n            \n            \n            y = y if len(y.shape) == 3 else np.expand_dims(y, axis=0)\n            return features, y\n        else:\n            return features", "docstring": "Transform documents to document ids.\n\nUses the vocabulary learned by fit.\n\nArgs:\nX : iterable\nan iterable which yields either str, unicode or file objects.\ny : iterabl, label strings.\n\nReturns:\nfeatures: document id matrix.\ny: label id matrix.", "source": "juraj-google-style"}
{"code": "def add_to_screen(self, screen_width, screen):\n        \n        for lineno, fields in enumerate(self.line_fields):\n            for left, field in self.compute_positions(screen_width, fields):\n                logger.debug(\n                    \"Adding field %s to screen %s at x=%d->%d, y=%d\",\n                    field, screen.ref, left, left + field.width - 1, 1 + lineno,\n                )\n\n                self.widgets[field] = field.add_to_screen(screen, left, 1 + lineno)\n                self.register_hooks(field)", "docstring": "Add the pattern to a screen.\n\nAlso fills self.widgets.\n\nArgs:\nscreen_width (int): the width of the screen\nscreen (lcdprod.Screen): the screen to fill.", "source": "juraj-google-style"}
{"code": "def _ParseInfo2Record(\n      self, parser_mediator, file_object, record_offset, record_size):\n    \n    record_data = self._ReadData(file_object, record_offset, record_size)\n\n    record_map = self._GetDataTypeMap('recycler_info2_file_entry')\n\n    try:\n      record = self._ReadStructureFromByteStream(\n          record_data, record_offset, record_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError((\n          'Unable to map record data at offset: 0x{0:08x} with error: '\n          '{1!s}').format(record_offset, exception))\n\n    codepage = parser_mediator.codepage or 'ascii'\n\n    \n    \n    ascii_filename = record.original_filename.split(b'\\x00')[0]\n\n    try:\n      ascii_filename = ascii_filename.decode(codepage)\n    except UnicodeDecodeError:\n      ascii_filename = ascii_filename.decode(codepage, errors='replace')\n\n      parser_mediator.ProduceExtractionWarning(\n          'unable to decode original filename.')\n\n    unicode_filename = None\n    if record_size > 280:\n      record_offset += 280\n      utf16_string_map = self._GetDataTypeMap(\n          'recycler_info2_file_entry_utf16le_string')\n\n      try:\n        unicode_filename = self._ReadStructureFromByteStream(\n            record_data[280:], record_offset, utf16_string_map)\n      except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError((\n            'Unable to map record data at offset: 0x{0:08x} with error: '\n            '{1!s}').format(record_offset, exception))\n\n      unicode_filename = unicode_filename.rstrip('\\x00')\n\n    if record.deletion_time == 0:\n      date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n    else:\n      date_time = dfdatetime_filetime.Filetime(\n          timestamp=record.deletion_time)\n\n    event_data = WinRecycleBinEventData()\n    event_data.drive_number = record.drive_number\n    event_data.original_filename = unicode_filename or ascii_filename\n    event_data.file_size = record.original_file_size\n    event_data.offset = record_offset\n    event_data.record_index = record.index\n\n    if ascii_filename != unicode_filename:\n      event_data.short_filename = ascii_filename\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_DELETED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an INFO-2 record.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\nrecord_offset (int): record offset.\nrecord_size (int): record size.\n\nRaises:\nParseError: if the record cannot be read.", "source": "juraj-google-style"}
{"code": "def merge_styles(inline_style, new_styles, classes, remove_unset_properties=False):\n    styles = OrderedDict([('', OrderedDict())])\n    for pc in set(classes):\n        styles[pc] = OrderedDict()\n    for (i, style) in enumerate(new_styles):\n        for (k, v) in style:\n            styles[classes[i]][k] = v\n    if inline_style:\n        for (k, v) in csstext_to_pairs(inline_style):\n            styles[''][k] = v\n    normal_styles = []\n    pseudo_styles = []\n    for (pseudoclass, kv) in styles.items():\n        if remove_unset_properties:\n            kv = OrderedDict(((k, v) for (k, v) in kv.items() if (not (v.lower() == 'unset'))))\n        if (not kv):\n            continue\n        if pseudoclass:\n            pseudo_styles.append(('%s{%s}' % (pseudoclass, '; '.join((('%s:%s' % (k, v)) for (k, v) in kv.items())))))\n        else:\n            normal_styles.append('; '.join((('%s:%s' % (k, v)) for (k, v) in kv.items())))\n    if pseudo_styles:\n        all_styles = (([('{%s}' % ''.join(normal_styles))] + pseudo_styles) if normal_styles else pseudo_styles)\n    else:\n        all_styles = normal_styles\n    return ' '.join(all_styles).strip()", "docstring": "This will merge all new styles where the order is important\nThe last one will override the first\nWhen that is done it will apply old inline style again\nThe old inline style is always important and override\nall new ones. The inline style must be valid.\n\nArgs:\ninline_style(str): the old inline style of the element if there\nis one\nnew_styles: a list of new styles, each element should be\na list of tuple\nclasses: a list of classes which maps new_styles, important!\nremove_unset_properties(bool): Allow us to remove certain CSS\nproperties with rules that set their value to 'unset'\n\nReturns:\nstr: the final style", "source": "codesearchnet"}
{"code": "def create_run(cmd, project, exp, grp):\n    from benchbuild.utils import schema as s\n    session = s.Session()\n    run = s.Run(command=str(cmd), project_name=project.name, project_group=project.group, experiment_name=exp, run_group=str(grp), experiment_group=project.experiment.id)\n    session.add(run)\n    session.commit()\n    return (run, session)", "docstring": "Create a new 'run' in the database.\n\nThis creates a new transaction in the database and creates a new\nrun in this transaction. Afterwards we return both the transaction as\nwell as the run itself. The user is responsible for committing it when\nthe time comes.\n\nArgs:\ncmd: The command that has been executed.\nprj: The project this run belongs to.\nexp: The experiment this run belongs to.\ngrp: The run_group (uuid) we blong to.\n\nReturns:\nThe inserted tuple representing the run and the session opened with\nthe new run. Don't forget to commit it at some point.", "source": "codesearchnet"}
{"code": "def mount(self, app=None):\n        \n        for endpoint in self._routes:\n            endpoint.register_app(app)\n\n        return self", "docstring": "Mounts all registered routes to a bottle.py application instance.\n\nArgs:\napp (instance): A `bottle.Bottle()` application instance.\n\nReturns:\nThe Router instance (for chaining purposes).", "source": "juraj-google-style"}
{"code": "def __init__(self, text: str, sn: \"DataNode\"):\n        \n        super().__init__(text)\n        self.schema_node = sn", "docstring": "Extend the superclass method.\n\nArgs:\nsn: Schema node from which the path starts.", "source": "juraj-google-style"}
{"code": "def plot_chmap(cube, kidid, ax=None, **kwargs):\n    if (ax is None):\n        ax = plt.gca()\n    index = np.where((cube.kidid == kidid))[0]\n    if (len(index) == 0):\n        raise KeyError('Such a kidid does not exist.')\n    index = int(index)\n    im = ax.pcolormesh(cube.x, cube.y, cube[(:, :, index)].T, **kwargs)\n    ax.set_xlabel('x')\n    ax.set_ylabel('y')\n    ax.set_title('intensity map ch \n    return im", "docstring": "Plot an intensity map.\n\nArgs:\ncube (xarray.DataArray): Cube which the spectrum information is included.\nkidid (int): Kidid.\nax (matplotlib.axes): Axis the figure is plotted on.\nkwargs (optional): Plot options passed to ax.imshow().", "source": "codesearchnet"}
{"code": "def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):\n    summed_grad, unique_indices = _deduplicate_indexed_slices(values=grad, indices=indices)\n    return self._resource_apply_sparse(summed_grad, handle, unique_indices)", "docstring": "Add ops to apply sparse gradients to `handle`, with repeated indices.\n\nOptimizers which override this method must deal with repeated indices. See\nthe docstring of `_apply_sparse_duplicate_indices` for details. By default\nthe correct behavior, to sum non-unique indices and their associated\ngradients, is enforced by first pre-processing `grad` and `indices` and\npassing them on to `_resource_apply_sparse`. Optimizers which deal correctly\nwith duplicate indices may instead override this method to avoid the\noverhead of summing.\n\nArgs:\ngrad: a `Tensor` representing the gradient for the affected indices.\nhandle: a `Tensor` of dtype `resource` which points to the variable\nto be updated.\nindices: a `Tensor` of integral type representing the indices for\nwhich the gradient is nonzero. Indices may be repeated.\n\nReturns:\nAn `Operation` which updates the value of the variable.", "source": "github-repos"}
{"code": "def calculate_embedding(self, batch_image_bytes):\n    \n    return self.tf_session.run(\n        self.embedding, feed_dict={self.input_jpeg: batch_image_bytes})", "docstring": "Get the embeddings for a given JPEG image.\n\nArgs:\nbatch_image_bytes: As if returned from [ff.read() for ff in file_list].\n\nReturns:\nThe Inception embeddings (bottleneck layer output)", "source": "juraj-google-style"}
{"code": "def _convert_concrete_functions_to_saved_model(self, output_dir):\n    if len(self._funcs) == 0:\n        raise ValueError('No ConcreteFunction is specified.')\n    if not self.experimental_lower_to_saved_model:\n        return (None, None, None)\n    if not self._trackable_obj or isinstance(self._trackable_obj, (_function.ConcreteFunction, _def_function.Function)):\n        return (None, None, None)\n    signatures = {}\n    signature_keys = []\n    try:\n        if len(self._funcs) == 1:\n            signatures[_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = self._funcs[0]\n            signature_keys = [_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]\n        else:\n            for func in self._funcs:\n                signatures[func.graph.name] = func\n                signature_keys.append(func.graph.name)\n        _save.save(self._trackable_obj, output_dir, signatures=signatures, options=_save_options.SaveOptions(save_debug_info=True))\n    except Exception:\n        return (None, None, None)\n    self.saved_model_dir = output_dir\n    self._saved_model_tags = set([_tag_constants.SERVING])\n    self._saved_model_exported_names = signature_keys\n    self._parse_saved_model_args(always_enable_saved_model_import=True)\n    if self.saved_model_dir:\n        graph_def, input_tensors, output_tensors = self._load_saved_model(self.saved_model_dir, self._saved_model_tags)\n        self._trackable_obj = _load(self.saved_model_dir, self._saved_model_tags)\n        return (graph_def, input_tensors, output_tensors)\n    return (None, None, None)", "docstring": "Save concrete functions to the SavedModel format.\n\nArgs:\noutput_dir: The output directory to save the SavedModel.\n\nReturns:\ngraph_def: The frozen GraphDef.\ninput_tensors: List of input tensors.\noutput_tensors: List of output tensors.", "source": "github-repos"}
{"code": "def series_with_permutation(self, other):\n    combined_permutation = tuple([self.permutation[p] for p in other.permutation])\n    return CPermutation.create(combined_permutation)", "docstring": "Compute the series product with another channel permutation circuit\n\nArgs:\nother (CPermutation):\n\nReturns:\nCircuit: The composite permutation circuit (could also be the\nidentity circuit for n channels)", "source": "codesearchnet"}
{"code": "def sheets_clear(config, auth, sheet_url_or_name, sheet_tab, sheet_range):\n    if config.verbose:\n        print('SHEETS CLEAR', sheet_url_or_name, sheet_tab, sheet_range)\n    sheet_id = sheets_id(config, auth, sheet_url_or_name)\n    if sheet_id:\n        API_Sheets(config, auth).spreadsheets().values().clear(spreadsheetId=sheet_id, range=sheets_tab_range(sheet_tab, sheet_range), body={}).execute()\n    else:\n        raise ValueError('Sheet does not exist for %s: %s' % (config, auth, sheet_url_or_name))", "docstring": "Clear a sheet in the specified range.\n\nArgs:\nconfig - see starthinker/util/configuration.py\nauth - user or service\nurl_or_name - one of: URL, document title, or id\nsheet_tab - name of tab to get id for\nsheet_range - A1 notation or blank if whole sheet\n\nNo Return", "source": "github-repos"}
{"code": "def site_specific_nn_occupation( self ):\n        \n        to_return = { l : 0 for l in set( ( site.label for site in self.p_neighbours ) ) }\n        for site in self.p_neighbours:\n            if site.is_occupied:\n             to_return[ site.label ] += 1\n        return to_return", "docstring": "Returns the number of occupied nearest neighbour sites, classified by site type.\n\nArgs:\nNone\n\nReturns:\n(Dict(Str:Int)): Dictionary of nearest-neighbour occupied site numbers, classified by site label, e.g. { 'A' : 2, 'B' : 1 }.", "source": "juraj-google-style"}
{"code": "def _parse_method_block_line(self, instrumentation_block, line):\n    if line.startswith(_InstrumentationStructurePrefixes.STATUS):\n        instrumentation_block.set_key(_InstrumentationStructurePrefixes.STATUS, line)\n        return instrumentation_block\n    elif line.startswith(_InstrumentationStructurePrefixes.STATUS_CODE):\n        instrumentation_block.set_status_code(line)\n        return self._transition_instrumentation_block(instrumentation_block)\n    elif line.startswith(_InstrumentationStructurePrefixes.RESULT):\n        instrumentation_block.set_key(_InstrumentationStructurePrefixes.RESULT, line)\n        return self._parse_result_line(self._transition_instrumentation_block(instrumentation_block, new_state=_InstrumentationBlockStates.RESULT), line)\n    else:\n        instrumentation_block.add_value(line)\n        return instrumentation_block", "docstring": "Parses the instrumnetation method block's line.\n\nArgs:\ninstrumentation_block: _InstrumentationBlock, the current\ninstrumentation method block.\nline: string, the raw instrumentation output line to parse.\n\nReturns:\nThe next instrumentation block, which should be used to continue\nparsing instrumentation output.", "source": "github-repos"}
{"code": "def pie(self, key='wall_time', minfract=0.05, ax=None, **kwargs):\n    (ax, fig, plt) = get_ax_fig_plt(ax=ax)\n    ax.axis('equal')\n    (labels, vals) = self.names_and_values(key, minfract=minfract)\n    ax.pie(vals, explode=None, labels=labels, autopct='%1.1f%%', shadow=True)\n    return fig", "docstring": "Plot pie chart for this timer.\n\nArgs:\nkey: Keyword used to extract data from the timer.\nminfract: Don't show sections whose relative weight is less that minfract.\nax: matplotlib :class:`Axes` or None if a new figure should be created.\n\nReturns:\n`matplotlib` figure", "source": "codesearchnet"}
{"code": "def _process_event(self, event):\n        \n        if (not event.is_directory and\n                not event.src_path.endswith(BATCH_EXTENSION)):\n            self._logger.info('Detected file change: %s', event.src_path)\n            self._batch.process_file(event.src_path)", "docstring": "Process received events.\n\nProcess events received, applying normalization for those\nevents referencing a new or changed file and only if it's\nnot the result of a previous normalization.\n\nArgs:\nevent: Event to process.", "source": "juraj-google-style"}
{"code": "def get_nac_eigendisplacements_along_dir(self, direction):\n        \n        versor = [i / np.linalg.norm(direction) for i in direction]\n        for d, e in self.nac_eigendisplacements:\n            if np.allclose(versor, d):\n                return e\n\n        return None", "docstring": "Returns the nac_eigendisplacements for the given direction (not necessarily a versor).\nNone if the direction is not present or nac_eigendisplacements has not been calculated.\n\nArgs:\ndirection: the direction as a list of 3 elements\nReturns:\nthe eigendisplacements as a numpy array of complex numbers with shape\n(3*len(structure), len(structure), 3). None if not found.", "source": "juraj-google-style"}
{"code": "def csv_to_num_matrix(csv_file_path):\n    \n    mtx = []\n    with open(csv_file_path) as csv_data_file:\n        for row in csv_data_file:\n            mtx.append([float(val) for val in row.split(',')])\n    return mtx", "docstring": "Load a CSV file consisting only of numbers into a Python matrix of floats.\n\nArgs:\ncsv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)", "source": "juraj-google-style"}
{"code": "def get_storage(request):\n    storage_model = oauth2_settings.storage_model\n    user_property = oauth2_settings.storage_model_user_property\n    credentials_property = oauth2_settings.storage_model_credentials_property\n    if storage_model:\n        (module_name, class_name) = storage_model.rsplit('.', 1)\n        module = importlib.import_module(module_name)\n        storage_model_class = getattr(module, class_name)\n        return storage.DjangoORMStorage(storage_model_class, user_property, request.user, credentials_property)\n    else:\n        return dictionary_storage.DictionaryStorage(request.session, key=_CREDENTIALS_KEY)", "docstring": "Gets a Credentials storage object provided by the Django OAuth2 Helper\nobject.\n\nArgs:\nrequest: Reference to the current request object.\n\nReturns:\nAn :class:`oauth2.client.Storage` object.", "source": "codesearchnet"}
{"code": "def bitwise_left_shift(x, y):\n    if any_symbolic_tensors((x, y)):\n        return BitwiseLeftShift().symbolic_call(x, y)\n    return backend.numpy.bitwise_left_shift(x, y)", "docstring": "Shift the bits of an integer to the left.\n\nBits are shifted to the left by appending `y` 0s at the right of `x`.\nSince the internal representation of numbers is in binary format, this\noperation is equivalent to multiplying `x` by `2**y`.\n\nArgs:\nx: Input integer tensor.\ny: Input integer tensor.\n\nReturns:\nResult tensor.", "source": "github-repos"}
{"code": "def showAddColumnDialog(self, triggered):\n        \n        if triggered:\n            dialog = AddAttributesDialog(self)\n            dialog.accepted.connect(self.addColumn)\n            dialog.rejected.connect(self.uncheckButton)\n            dialog.show()", "docstring": "Display the dialog to add a column to the model.\n\nThis method is also a slot.\n\nArgs:\ntriggered (bool): If the corresponding button was\nactivated, the dialog will be created and shown.", "source": "juraj-google-style"}
{"code": "async def populate_jsone_context(chain, parent_link, decision_link, tasks_for):\n    task_ids = {'default': parent_link.task_id, 'decision': decision_link.task_id}\n    source_url = get_source_url(decision_link)\n    project = get_and_check_project(chain.context.config['valid_vcs_rules'], source_url)\n    log.debug('task_ids: {}'.format(task_ids))\n    jsone_context = {'now': parent_link.task['created'], 'as_slugid': (lambda x: task_ids.get(x, task_ids['default'])), 'tasks_for': tasks_for, 'repository': {'url': get_repo(decision_link.task, decision_link.context.config['source_env_prefix']), 'project': project}, 'ownTaskId': parent_link.task_id, 'taskId': None}\n    if (chain.context.config['cot_product'] in ('mobile', 'application-services')):\n        if (tasks_for == 'github-release'):\n            jsone_context.update((await _get_additional_github_releases_jsone_context(decision_link)))\n        elif (tasks_for == 'cron'):\n            jsone_context.update(_get_additional_git_cron_jsone_context(decision_link))\n        elif (tasks_for == 'github-pull-request'):\n            jsone_context.update((await _get_additional_github_pull_request_jsone_context(decision_link)))\n        elif (tasks_for == 'github-push'):\n            jsone_context.update((await _get_additional_github_push_jsone_context(decision_link)))\n        else:\n            raise CoTError('Unknown tasks_for \"{}\" for cot_product \"mobile\"!'.format(tasks_for))\n    else:\n        jsone_context['repository']['level'] = (await get_scm_level(chain.context, project))\n        if (tasks_for == 'action'):\n            jsone_context.update((await _get_additional_hg_action_jsone_context(parent_link, decision_link)))\n        elif (tasks_for == 'hg-push'):\n            jsone_context.update((await _get_additional_hg_push_jsone_context(parent_link, decision_link)))\n        elif (tasks_for == 'cron'):\n            jsone_context.update((await _get_additional_hg_cron_jsone_context(parent_link, decision_link)))\n        else:\n            raise CoTError('Unknown tasks_for {}!'.format(tasks_for))\n    log.debug('{} json-e context:'.format(parent_link.name))\n    log.debug(pprint.pformat(jsone_context))\n    return jsone_context", "docstring": "Populate the json-e context to rebuild ``parent_link``'s task definition.\n\nThis defines the context that `.taskcluster.yml` expects to be rendered\nwith.  See comments at the top of that file for details.\n\nArgs:\nchain (ChainOfTrust): the chain of trust to add to.\nparent_link (LinkOfTrust): the parent link to test.\ndecision_link (LinkOfTrust): the parent link's decision task link.\ntasks_for (str): the reason the parent link was created (cron,\nhg-push, action)\n\nRaises:\nCoTError, KeyError, ValueError: on failure.\n\nReturns:\ndict: the json-e context.", "source": "codesearchnet"}
{"code": "def add_state_sensors(self, agent_name, sensors):\n    if isinstance(sensors, list):\n        for sensor in sensors:\n            self.add_state_sensors(agent_name, sensor)\n    else:\n        if (agent_name not in self._sensor_map):\n            self._sensor_map[agent_name] = dict()\n        self._sensor_map[agent_name][sensors] = self._client.malloc(((agent_name + '_') + Sensors.name(sensors)), Sensors.shape(sensors), Sensors.dtype(sensors))", "docstring": "Adds a sensor to a particular agent. This only works if the world you are running also includes\nthat particular sensor on the agent.\n\nArgs:\nagent_name (str): The name of the agent to add the sensor to.\nsensors (:obj:`HolodeckSensor` or list of :obj:`HolodeckSensor`): Sensors to add to the agent.\nShould be objects that inherit from :obj:`HolodeckSensor`.", "source": "codesearchnet"}
{"code": "def __init__(self, server):\n        \n        self.ready = False\n        self.server = server\n\n        self.requests_seen = 0\n        self.bytes_read = 0\n        self.bytes_written = 0\n        self.start_time = None\n        self.work_time = 0\n        self.stats = {\n            'Requests': lambda s: self.requests_seen + (\n                self.start_time is None\n                and trueyzero\n                or self.conn.requests_seen\n            ),\n            'Bytes Read': lambda s: self.bytes_read + (\n                self.start_time is None\n                and trueyzero\n                or self.conn.rfile.bytes_read\n            ),\n            'Bytes Written': lambda s: self.bytes_written + (\n                self.start_time is None\n                and trueyzero\n                or self.conn.wfile.bytes_written\n            ),\n            'Work Time': lambda s: self.work_time + (\n                self.start_time is None\n                and trueyzero\n                or time.time() - self.start_time\n            ),\n            'Read Throughput': lambda s: s['Bytes Read'](s) / (\n                s['Work Time'](s) or 1e-6\n            ),\n            'Write Throughput': lambda s: s['Bytes Written'](s) / (\n                s['Work Time'](s) or 1e-6\n            ),\n        }\n        threading.Thread.__init__(self)", "docstring": "Initialize WorkerThread instance.\n\nArgs:\nserver (cheroot.server.HTTPServer): web server object\nreceiving this request", "source": "juraj-google-style"}
{"code": "def wait_for_registration(self, processor_type):\n    with self._condition:\n        self._condition.wait_for((lambda : ((processor_type in self) or self._cancelled_event.is_set())))\n        if self._cancelled_event.is_set():\n            raise WaitCancelledException()", "docstring": "Waits for a particular processor type to register or until\nis_cancelled is True. is_cancelled cannot be part of this class\nsince we aren't cancelling all waiting for a processor_type,\nbut just this particular wait.\n\nArgs:\nprocessor_type (ProcessorType): The family, and version of\nthe transaction processor.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def set_member_roles(self, guild_id: int, member_id: int, roles: List[int]):\n    self._query(f'guilds/{guild_id}/members/{member_id}', 'PATCH', {'roles': roles}, expected_status=204)", "docstring": "Set the member's roles\n\nThis method takes a list of **role ids** that you want the user to have. This\nmethod will **overwrite** all of the user's current roles with the roles in\nthe passed list of roles.\n\nWhen calling this method, be sure that the list of roles that you're setting\nfor this user is complete, not just the roles that you want to add or remove.\nFor assistance in just adding or just removing roles, set the ``add_member_roles``\nand ``remove_member_roles`` methods.\n\nArgs:\nguild_id: snowflake id of the guild\nmember_id: snowflake id of the member\nroles: list of snowflake ids of roles to set", "source": "codesearchnet"}
{"code": "def _finalize_job(cls, mapreduce_spec, mapreduce_state):\n    config = util.create_datastore_write_config(mapreduce_spec)\n    queue_name = util.get_queue_name(mapreduce_spec.params.get(model.MapreduceSpec.PARAM_DONE_CALLBACK_QUEUE))\n    done_callback = mapreduce_spec.params.get(model.MapreduceSpec.PARAM_DONE_CALLBACK)\n    done_task = None\n    if done_callback:\n        done_task = taskqueue.Task(url=done_callback, headers=util._get_task_headers(mapreduce_spec.mapreduce_id, util.CALLBACK_MR_ID_TASK_HEADER), method=mapreduce_spec.params.get('done_callback_method', 'POST'))\n\n    @db.transactional(retries=5)\n    def _put_state():\n        'Helper to store state.'\n        fresh_state = model.MapreduceState.get_by_job_id(mapreduce_spec.mapreduce_id)\n        if (not fresh_state.active):\n            logging.warning('Job %s is not active. Looks like spurious task execution. Dropping task.', mapreduce_spec.mapreduce_id)\n            return\n        mapreduce_state.put(config=config)\n        if (done_task and (not _run_task_hook(mapreduce_spec.get_hooks(), 'enqueue_done_task', done_task, queue_name))):\n            done_task.add(queue_name, transactional=True)\n    _put_state()\n    logging.info(\"Final result for job '%s' is '%s'\", mapreduce_spec.mapreduce_id, mapreduce_state.result_status)\n    cls._clean_up_mr(mapreduce_spec)", "docstring": "Finalize job execution.\n\nInvokes done callback and save mapreduce state in a transaction,\nand schedule necessary clean ups. This method is idempotent.\n\nArgs:\nmapreduce_spec: an instance of MapreduceSpec\nmapreduce_state: an instance of MapreduceState", "source": "codesearchnet"}
{"code": "def absolute_name(self, depth=0):\n    (node, node_depth) = (self, self.depth)\n    if (depth < 1):\n        depth = node_depth\n    while ((node_depth > depth) and (node.package is not None)):\n        node = node.package\n        node_depth -= 1\n    names = []\n    while (node is not None):\n        names.append(node.name)\n        node = node.package\n    return '.'.join(reversed(names))", "docstring": "Return the absolute name of the node.\n\nConcatenate names from root to self within depth.\n\nArgs:\ndepth (int): maximum depth to go to.\n\nReturns:\nstr: absolute name of the node (until given depth is reached).", "source": "codesearchnet"}
{"code": "def build(cls: Type[AN], node: ast.stmt) -> List[AN]:\n    if node_is_result_assignment(node):\n        return [cls(node, ActNodeType.result_assignment)]\n    if node_is_pytest_raises(node):\n        return [cls(node, ActNodeType.pytest_raises)]\n    if node_is_unittest_raises(node):\n        return [cls(node, ActNodeType.unittest_raises)]\n    token = node.first_token\n    if token.line.strip().endswith('\n        return [cls(node, ActNodeType.marked_act)]\n    if isinstance(node, ast.With):\n        return cls.build_body(node.body)\n    return []", "docstring": "Starting at this ``node``, check if it's an act node. If it's a context\nmanager, recurse into child nodes.\n\nReturns:\nList of all act nodes found.", "source": "codesearchnet"}
{"code": "def _set_read_only_resource_inputs_attr(op: ops.Operation, branch_graphs):\n    read_only_indices = set(range(len(op.inputs)))\n    for branch_graph in branch_graphs:\n        if not read_only_indices:\n            break\n        branch_read_only_indices = acd.get_read_only_resource_input_indices_graph(branch_graph)\n        read_only_indices = read_only_indices.intersection(branch_read_only_indices)\n    ops.set_int_list_attr(op, acd.READ_ONLY_RESOURCE_INPUTS_ATTR, sorted(read_only_indices))", "docstring": "Sets the list of resource inputs which are read-only.\n\nThis is used by AutomaticControlDependencies.\n\nArgs:\nop: While Operation.\nbranch_graphs: List of branch FuncGraphs.", "source": "github-repos"}
{"code": "def create_task(*args, **kwargs) -> asyncio.Task:\n    tg = task_group()\n    if tg is None:\n        task = asyncio.create_task(*args, **kwargs)\n        _without_context_background_tasks.add(task)\n        task.add_done_callback(_without_context_background_tasks.discard)\n        return task\n    return tg.create_task(*args, **kwargs)", "docstring": "Creates a task that uses the context TaskGroup.\n\nIf no context is available then `asyncio.create_task` will be used.\n\nArgs:\n*args: Positional arguments to pass to `asyncio.create_task`.\n**kwargs: Keyword arguments to pass to `asyncio.create_task`.\n\nReturns:\nAn asyncio task.", "source": "github-repos"}
{"code": "def divide_to_patches(image: Union[np.array, 'torch.Tensor'], patch_size: int) -> list[Union[np.array, 'torch.Tensor']]:\n    patches = []\n    height, width = get_image_size(image, channel_dim=ChannelDimension.FIRST)\n    for i in range(0, height, patch_size):\n        for j in range(0, width, patch_size):\n            patch = image[:, i:i + patch_size, j:j + patch_size]\n            patches.append(patch)\n    return patches", "docstring": "Divides an image into patches of a specified size.\n\nArgs:\nimage (`Union[np.array, \"torch.Tensor\"]`):\nThe input image.\npatch_size (`int`):\nThe size of each patch.\nReturns:\nlist: A list of Union[np.array, \"torch.Tensor\"] representing the patches.", "source": "github-repos"}
{"code": "def _AddProvidesEdges(self, rdf_artifact):\n    for attribute in rdf_artifact.provides:\n        self._AddEdge(rdf_artifact.name, attribute)", "docstring": "Add an edge for every attribute the given artifact provides.\n\nThis method adds a directed edge from the artifact node to every attribute\nthis artifact provides.\n\nArgs:\nrdf_artifact: The artifact object.", "source": "codesearchnet"}
{"code": "def cos(cls, x: 'TensorFluent') -> 'TensorFluent':\n    return cls._unary_op(x, tf.cos, tf.float32)", "docstring": "Returns a TensorFluent for the cos function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the cos function.", "source": "codesearchnet"}
{"code": "def find_sorted_task_dependencies(task, task_name, task_id):\n    \n    log.info(\"find_sorted_task_dependencies {} {}\".format(task_name, task_id))\n\n    cot_input_dependencies = [\n        _craft_dependency_tuple(task_name, task_type, task_id)\n        for task_type, task_id in task['extra'].get('chainOfTrust', {}).get('inputs', {}).items()\n    ]\n\n    upstream_artifacts_dependencies = [\n        _craft_dependency_tuple(task_name, artifact_dict['taskType'], artifact_dict['taskId'])\n        for artifact_dict in task.get('payload', {}).get('upstreamArtifacts', [])\n    ]\n\n    dependencies = [*cot_input_dependencies, *upstream_artifacts_dependencies]\n    dependencies = _sort_dependencies_by_name_then_task_id(dependencies)\n\n    parent_task_id = get_parent_task_id(task) or get_decision_task_id(task)\n    parent_task_type = 'parent'\n    \n    \n    parent_tuple = _craft_dependency_tuple(task_name, parent_task_type, parent_task_id)\n    dependencies.insert(0, parent_tuple)\n\n    log.info('found dependencies: {}'.format(dependencies))\n    return dependencies", "docstring": "Find the taskIds of the chain of trust dependencies of a given task.\n\nArgs:\ntask (dict): the task definition to inspect.\ntask_name (str): the name of the task, for logging and naming children.\ntask_id (str): the taskId of the task.\n\nReturns:\nlist: tuples associating dependent task ``name`` to dependent task ``taskId``.", "source": "juraj-google-style"}
{"code": "def resolve_class(classref):\n    \n    if classref is None:\n        return None\n    elif isinstance(classref, six.class_types):\n        return classref\n    elif isinstance(classref, six.string_types):\n        return import_class(classref)\n    else:\n        raise ValueError(\"Unable to resolve class for '%s'\" % classref)", "docstring": "Attempt to return a Python class for the input class reference.\n\nIf `classref` is a class or None, return it. If `classref` is a\npython classpath (e.g., \"foo.bar.MyClass\") import the class and return\nit.\n\nArgs:\nclassref: A fully-qualified Python path to class, or a Python class.\n\nReturns:\nA class.", "source": "juraj-google-style"}
{"code": "def handle_one_of(schema, field, validator, parent_schema):\n    if validator.choices:\n        schema['enum'] = list(validator.choices)\n        schema['enumNames'] = list(validator.labels)\n    return schema", "docstring": "Adds the validation logic for ``marshmallow.validate.OneOf`` by setting\nthe JSONSchema `enum` property to the allowed choices in the validator.\n\nArgs:\nschema (dict): The original JSON schema we generated. This is what we\nwant to post-process.\nfield (fields.Field): The field that generated the original schema and\nwho this post-processor belongs to.\nvalidator (marshmallow.validate.OneOf): The validator attached to the\npassed in field.\nparent_schema (marshmallow.Schema): The Schema instance that the field\nbelongs to.\n\nReturns:\ndict: A, possibly, new JSON Schema that has been post processed and\naltered.", "source": "codesearchnet"}
{"code": "def get_card(self, card_id, **query_params):\n    card_json = self.fetch_json(uri_path=((self.base_uri + '/cards/') + card_id))\n    return self.create_card(card_json)", "docstring": "Get a Card for a given card id. Returns a Card object.\n\nReturns:\nCard: The card with the given card_id", "source": "codesearchnet"}
{"code": "def report_list(config, auth):\n    for query in API_DBM(config, auth, iterate=True).queries().list().execute():\n        yield query", "docstring": "Lists all the DBM report configurations for the current credentials.\n\nArgs:\n* auth: (string) Either user or service.\n\nReturns:\n* Iterator of JSONs.", "source": "github-repos"}
{"code": "def check_data_type(self):\n        \n        metadata_type = self.column_metadata.get('type')\n        if self.type != metadata_type and metadata_type not in self.type:\n            raise ValueError('Types of transformer don\\'t match')", "docstring": "Check the type of the transformer and column match.\n\nArgs:\ncolumn_metadata(dict): Metadata of the column.\n\nRaises a ValueError if the types don't match", "source": "juraj-google-style"}
{"code": "def deserialize(self, stamp_token, serialized_proto):\n    return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(self.resource_handle, stamp_token, serialized_proto)", "docstring": "Deserialize the input proto and resets the ensemble from it.\n\nArgs:\nstamp_token: int64 scalar Tensor to denote the stamp of the resource.\nserialized_proto: string scalar Tensor of the serialized proto.\n\nReturns:\nOperation (for dependencies).", "source": "github-repos"}
{"code": "def from_pb(cls, pb):\n    obj = cls._from_pb(pb)\n    obj._pb = pb\n    return obj", "docstring": "Instantiate the object from a protocol buffer.\n\nArgs:\npb (protobuf)\n\nSave a reference to the protocol buffer on the object.", "source": "codesearchnet"}
{"code": "def get_regularization_losses(scope=None):\n    return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope)", "docstring": "Gets the list of regularization losses.\n\nArgs:\nscope: An optional scope name for filtering the losses to return.\n\nReturns:\nA list of regularization losses as Tensors.", "source": "github-repos"}
{"code": "def view_page(name=None):\n    if (request.method == 'POST'):\n        if (name is None):\n            if (len(request.forms.filename) > 0):\n                name = request.forms.filename\n        if (name is not None):\n            filename = '{0}.rst'.format(name)\n            file_handle = open(filename, 'w')\n            file_handle.write(request.forms.content.encode('utf-8'))\n            file_handle.close()\n            add_file_to_repo(filename)\n            commit(filename)\n    response.set_header('Cache-control', 'no-cache')\n    response.set_header('Pragma', 'no-cache')\n    if (name is None):\n        index_files = glob.glob('./[Ii][Nn][Dd][Ee][Xx].rst')\n        if (len(index_files) == 0):\n            return view_meta_index()\n        else:\n            name = index_files[0][2:(- 4)]\n    files = glob.glob('{0}.rst'.format(name))\n    if (len(files) > 0):\n        file_handle = open(files[0], 'r')\n        html_body = publish_parts(file_handle.read(), writer=AttowikiWriter(), settings=None, settings_overrides=None)['html_body']\n        history = commit_history('{0}.rst'.format(name))\n        return template('page', type='view', name=name, extended_name=None, is_repo=check_repo(), history=history, gitref=None, content=html_body)\n    else:\n        return static_file(name, '')", "docstring": "Serve a page name.\n\n.. note:: this is a bottle view\n\n* if the view is called with the POST method, write the new page\ncontent to the file, commit the modification and then display the\nhtml rendering of the restructured text file\n\n* if the view is called with the GET method, directly display the html\nrendering of the restructured text file\n\nKeyword Arguments:\n:name: (str) -- name of the rest file (without the .rst extension)\nOPTIONAL\n\nif no filename is given, first try to find a \"index.rst\" file in the\ndirectory and serve it. If not found, serve the meta page __index__\n\nReturns:\nbottle response object", "source": "codesearchnet"}
{"code": "def get_room_by_name(self, name):\n    rooms = self.get_rooms()\n    for room in (rooms or []):\n        if (room['name'] == name):\n            return self.get_room(room['id'])\n    raise RoomNotFoundException(('Room %s not found' % name))", "docstring": "Get a room by name.\n\nReturns:\n:class:`Room`. Room\n\nRaises:\nRoomNotFoundException", "source": "codesearchnet"}
{"code": "def compatible_firmware_version(self):\n    identifier = self.firmware_version.split('compiled')[0]\n    buf_size = self.MAX_BUF_SIZE\n    buf = (ctypes.c_char * buf_size)()\n    res = self._dll.JLINKARM_GetEmbeddedFWString(identifier.encode(), buf, buf_size)\n    if (res < 0):\n        raise errors.JLinkException(res)\n    return ctypes.string_at(buf).decode()", "docstring": "Returns the DLL's compatible J-Link firmware version.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nThe firmware version of the J-Link that the DLL is compatible\nwith.\n\nRaises:\nJLinkException: on error.", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]:\n    residual = hidden_states\n    hidden_states = self.layer_norm1(hidden_states)\n    hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, head_mask=attention_mask, output_attentions=output_attentions)\n    hidden_states = hidden_states + residual\n    residual = hidden_states\n    hidden_states = self.layer_norm2(hidden_states)\n    hidden_states = self.mlp(hidden_states)\n    hidden_states = hidden_states + residual\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n`(config.encoder_attention_heads,)`.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def _export_to_saved_model_graph(self, object_map, tensor_map, options, **kwargs):\n    _, _, _ = (object_map, tensor_map, options)\n    del kwargs\n    return []", "docstring": "Creates a copy of this object's tensors onto SavedModel graph.\n\nNeeds to be overridden if the class contains tensors that must be saved\ninto the graph. This method should update the `object_map` and `tensor_map`\ndictionaries.\n\nThis method is called on all nodes in the Trackable Graph (generated by\n`_trackable_children`). The nodes are traversed in the order defined by\n`_deserialization_dependencies`\n\nAll usages of _map_resources should be migrated to this method.\n\nArgs:\nobject_map: A dictionary that maps original Trackables to the copied\nTrackables. This only needs to be updated if the object is a\ntf.function, or if the copied tensors are necessary for checkpointing\nthis object.\ntensor_map: Dictionary mapping original tensors to copied tensors.\noptions: A `tf.saved_model.SaveOptions` object.\n**kwargs: Additional kwargs that may be added at a later time.\n\nReturns:\nFlat list of original tensors that have been copied.", "source": "github-repos"}
{"code": "def __init__(self, counters=None, distributions=None, gauges=None, string_sets=None, bounded_tries=None):\n    self.counters = counters or {}\n    self.distributions = distributions or {}\n    self.gauges = gauges or {}\n    self.string_sets = string_sets or {}\n    self.bounded_tries = bounded_tries or {}", "docstring": "Create a MetricUpdates object.\n\nArgs:\ncounters: Dictionary of MetricKey:MetricUpdate updates.\ndistributions: Dictionary of MetricKey:MetricUpdate objects.\ngauges: Dictionary of MetricKey:MetricUpdate objects.\nstring_sets: Dictionary of MetricKey:MetricUpdate objects.\nbounded_tries: Dictionary of MetricKey:MetricUpdate objects.", "source": "github-repos"}
{"code": "def update(self, **kwargs):\n    for key, value in kwargs.items():\n        if hasattr(self, key):\n            setattr(self, key, value)", "docstring": "Update the configuration attributes with new values.\n\nArgs:\n**kwargs: Keyword arguments representing configuration attributes and their new values.", "source": "github-repos"}
{"code": "def StringEscape(self, string, match, **unused_kwargs):\n    \n    if match.group(1) in '\\\\\\'\"rnbt\\\\.ws':\n      self.string += codecs.decode(string, 'unicode_escape')\n    else:\n      raise errors.ParseError('Invalid escape character {0:s}.'.format(string))", "docstring": "Escape backslashes found inside a string quote.\n\nBackslashes followed by anything other than [\\'\"rnbt.ws] will raise\nan Error.\n\nArgs:\nstring: The string that matched.\nmatch: the match object (instance of re.MatchObject).\nWhere match.group(1) contains the escaped code.\n\nRaises:\nParseError: When the escaped string is not one of [\\'\"rnbt]", "source": "juraj-google-style"}
{"code": "def get(self, key, mem_map=True):\n    self.raise_error_if_not_open()\n    if (key in self._file):\n        data = self._file[key]\n        sampling_rate = data.attrs[SAMPLING_RATE_ATTR]\n        if (not mem_map):\n            data = data[()]\n        data = (np.float32(data) / MAX_INT16_VALUE)\n        return (data, sampling_rate)", "docstring": "Return the samples for the given key and the sampling-rate.\n\nArgs:\nkey (str): The key to read the data from.\nmem_map (bool): If ``True`` returns the data as\nmemory-mapped array, otherwise a copy is returned.\n\nNote:\nThe container has to be opened in advance.\n\nReturns:\ntuple: A tuple containing the samples as numpy array\nwith ``np.float32`` [-1.0,1.0] and the sampling-rate.", "source": "codesearchnet"}
{"code": "def expected_exercise_fn(design, continuation_value, exercise_value):\n    batch_design = tf.broadcast_to(tf.expand_dims(design, -1), design.shape + [continuation_value.shape[-1]])\n    mask = tf.cast(exercise_value > 0, design.dtype)\n    masked = tf.transpose(batch_design * mask, perm=(2, 1, 0))\n    lhs = tf.matmul(masked, masked, transpose_a=True)\n    lhs_pinv = tf.linalg.pinv(lhs)\n    rhs = tf.matmul(masked, tf.expand_dims(tf.transpose(continuation_value), -1), transpose_a=True)\n    beta = tf.linalg.matmul(lhs_pinv, rhs)\n    continuation = tf.matmul(tf.transpose(batch_design, perm=(2, 1, 0)), beta)\n    return tf.maximum(tf.transpose(tf.squeeze(continuation, -1)), 0.0)", "docstring": "Returns the expected continuation value for each path.\n\nArgs:\ndesign: A real `Tensor` of shape `[basis_size, num_samples]`.\ncontinuation_value: A `Tensor` of shape `[num_samples, payoff_dim]` and of\nthe same dtype as `design`. The optimal value of the option conditional on\nnot exercising now or earlier, taking future information into account.\nexercise_value: A `Tensor` of the same shape and dtype as\n`continuation_value`. Value of the option if exercised immideately at\nthe current time\n\nReturns:\nA `Tensor` of the same shape and dtype as `continuation_value` whose\n`(n, v)`-th entry represents the expected continuation value of sample path\n`n` under the `v`-th payoff scheme.", "source": "github-repos"}
{"code": "def match_partial_against_complete(self, matcher, solver, partial, complete):\n    assert is_partial(partial)\n    assert is_complete(complete)\n    subst = {p.type_param: pytd.AnythingType() for p in complete.template}\n    formula = matcher.match_Class_against_Class(partial, complete, subst)\n    if formula is booleq.FALSE:\n        raise FlawedQuery(f'{partial.name} can never be {complete.name}')\n    solver.always_true(formula)", "docstring": "Match a partial class (call record) against a complete class.\n\nArgs:\nmatcher: An instance of pytd.type_match.TypeMatch.\nsolver: An instance of pytd.booleq.Solver.\npartial: The partial class to match. The class name needs to be prefixed\nwith \"~\" - the rest of the name is typically the same as complete.name.\ncomplete: A complete class to match against. (E.g. a built-in or a user\ndefined class)\n\nReturns:\nAn instance of pytd.booleq.BooleanTerm.\nRaises:\nFlawedQuery: If this call record is incompatible with the builtin.", "source": "github-repos"}
{"code": "def _init_init_op(self, init_op=USE_DEFAULT, init_feed_dict=None):\n    if init_op is Supervisor.USE_DEFAULT:\n        init_op = self._get_first_op_from_collection(ops.GraphKeys.INIT_OP)\n        if init_op is None:\n            init_op = variables.global_variables_initializer()\n            ops.add_to_collection(ops.GraphKeys.INIT_OP, init_op)\n    self._init_op = init_op\n    self._init_feed_dict = init_feed_dict", "docstring": "Initializes init_op.\n\nArgs:\ninit_op: `Operation` to initialize the variables. If set to USE_DEFAULT,\ncreate an op that initializes all variables and tables.\ninit_feed_dict: A dictionary that maps `Tensor` objects to feed values.\nThis feed dictionary will be used when `init_op` is evaluated.", "source": "github-repos"}
{"code": "def VerifyStructure(self, parser_mediator, lines):\n    \n    try:\n      structure = self._SDF_HEADER.parseString(lines)\n    except pyparsing.ParseException:\n      logger.debug('Not a SkyDrive log file')\n      return False\n\n    try:\n      dfdatetime_time_elements.TimeElementsInMilliseconds(\n          time_elements_tuple=structure.header_date_time)\n    except ValueError:\n      logger.debug(\n          'Not a SkyDrive log file, invalid date and time: {0!s}'.format(\n              structure.header_date_time))\n      return False\n\n    return True", "docstring": "Verify that this file is a SkyDrive log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nlines (str): one or more lines from the text file.\n\nReturns:\nbool: True if this is the correct parser, False otherwise.", "source": "juraj-google-style"}
{"code": "def convert_sbml_model(model):\n    biomass_reactions = set()\n    for reaction in model.reactions:\n        if (reaction.id not in model.limits):\n            (lower, upper) = parse_flux_bounds(reaction)\n            if ((lower is not None) or (upper is not None)):\n                model.limits[reaction.id] = (reaction.id, lower, upper)\n        objective = parse_objective_coefficient(reaction)\n        if ((objective is not None) and (objective != 0)):\n            biomass_reactions.add(reaction.id)\n    if (len(biomass_reactions) == 1):\n        model.biomass_reaction = next(iter(biomass_reactions))\n    convert_model_entries(model)\n    if (model.extracellular_compartment is None):\n        extracellular = detect_extracellular_compartment(model)\n        model.extracellular_compartment = extracellular\n    convert_exchange_to_compounds(model)", "docstring": "Convert raw SBML model to extended model.\n\nArgs:\nmodel: :class:`NativeModel` obtained from :class:`SBMLReader`.", "source": "codesearchnet"}
{"code": "def ifilterfalse_items(item_iter, flag_iter):\n    \n    false_items = (item for (item, flag) in zip(item_iter, flag_iter) if not flag)\n    return false_items", "docstring": "ifilterfalse_items\n\nArgs:\nitem_iter (list):\nflag_iter (list): of bools\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_iter import *  # NOQA\n>>> item_iter = [1, 2, 3, 4, 5]\n>>> flag_iter = [False, True, True, False, True]\n>>> false_items = ifilterfalse_items(item_iter, flag_iter)\n>>> result = list(false_items)\n>>> print(result)\n[1, 4]", "source": "juraj-google-style"}
{"code": "def GetGtfsClassByFileName(self, filename):\n    if (filename not in self._file_mapping):\n        return None\n    mapping = self._file_mapping[filename]\n    class_list = mapping['classes']\n    if (len(class_list) > 1):\n        raise problems.NonStandardMapping(filename)\n    else:\n        return self._class_mapping[class_list[0]]", "docstring": "Returns the transitfeed class corresponding to a GTFS file.\n\nArgs:\nfilename: The filename whose class is to be returned\n\nRaises:\nNonStandardMapping if the specified filename has more than one\ncorresponding class", "source": "codesearchnet"}
{"code": "def to_dict(self) -> Dict[str, Any]:\n    output = copy.deepcopy(self.__dict__)\n    output['feature_extractor_type'] = self.__class__.__name__\n    if 'mel_filters' in output:\n        del output['mel_filters']\n    if 'mel_filters_slaney' in output:\n        del output['mel_filters_slaney']\n    return output", "docstring": "Serializes this instance to a Python dictionary.\n\nReturns:\n`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, except for the\nmel filter banks, which do not need to be saved or printed as they are too long.", "source": "github-repos"}
{"code": "def _parse_name(self, config):\n        \n        value = NAME_RE.search(config).group('value')\n        return dict(name=value)", "docstring": "_parse_name scans the provided configuration block and extracts\nthe vlan name.  The config block is expected to always return the\nvlan name.  The return dict is intended to be merged into the response\ndict.\n\nArgs:\nconfig (str): The vlan configuration block from the nodes running\nconfiguration\n\nReturns:\ndict: resource dict attribute", "source": "juraj-google-style"}
{"code": "def executions(self, digest=False, begin=None, end=None):\n    digests = self._execution_digests\n    if begin is not None or end is not None:\n        begin = begin or 0\n        end = end or len(digests)\n        digests = digests[begin:end]\n    if digest:\n        return digests\n    else:\n        return [self.read_execution(digest) for digest in digests]", "docstring": "Get `Execution`s or `ExecutionDigest`s this reader has read so far.\n\nArgs:\ndigest: Whether the results are returned in a digest form, i.e.,\n`ExecutionDigest` format, instead of the more detailed `Execution`\nformat.\nbegin: Optional beginning index for the requested execution data objects\nor their digests. Python-style negative indices are supported.\nend: Optional ending index for the requested execution data objects or\ntheir digests. Python-style negative indices are supported.\n\nReturns:\nIf `digest`: a `list` of `ExecutionDigest` objects.\nElse: a `list` of `Execution` objects.", "source": "github-repos"}
{"code": "def children_after_parents(self, piper1, piper2):\n        \n        if piper1 in self[piper2].deep_nodes():\n            return 1\n        elif piper2 in self[piper1].deep_nodes():\n            return - 1\n        else:\n            return 0", "docstring": "Custom compare function. Returns ``1`` if the first ``Piper`` instance\nis upstream of the second ``Piper`` instance, ``-1`` if the first\n``Piper`` is downstream of the second ``Piper`` and ``0`` if the two\n``Pipers`` are independent.\n\nArguments:\n\n- piper1(``Piper``) ``Piper`` instance.\n- piper2(``Piper``) ``Piper`` instance.", "source": "juraj-google-style"}
{"code": "def _convert_dataset_to_list(dataset, dataset_type_spec, data_size_warning_flag=True, ensure_shape_similarity=True):\n    dataset_iterator = _get_data_iterator_from_dataset(dataset, dataset_type_spec)\n    dataset_as_list = []\n    start_time = time.time()\n    for sample in _get_next_sample(dataset_iterator, ensure_shape_similarity, data_size_warning_flag, start_time):\n        dataset_as_list.append(sample)\n    return dataset_as_list", "docstring": "Convert `dataset` object to a list of samples.\n\nArgs:\ndataset: A `tf.data.Dataset`, a `torch.utils.data.Dataset` object,\nor a list/tuple of arrays.\ndataset_type_spec: the type of the dataset.\ndata_size_warning_flag: If set to `True`, a warning will\nbe issued if the dataset takes longer than 10 seconds to iterate.\nDefaults to `True`.\nensure_shape_similarity: If set to `True`, the shape of\nthe first sample will be used to validate the shape of rest of the\nsamples. Defaults to `True`.\n\nReturns:\nList: A list of samples.", "source": "github-repos"}
{"code": "def commit(self, synchronized_processing_time):\n    assert not self._committed\n    self._committed = True\n    self._elements = tuple(self._elements)\n    self._synchronized_processing_time = synchronized_processing_time", "docstring": "Commits this bundle.\n\nUncommitted bundle will become committed (immutable) after this call.\n\nArgs:\nsynchronized_processing_time: the synchronized processing time at which\nthis bundle was committed", "source": "github-repos"}
{"code": "def save_dataset(self, out_file_name):\n        \n\n        for time_tag, script in self.data_sets.items():\n            script.save(os.path.join(out_file_name, '{:s}.b26s'.format(time_tag)))", "docstring": "saves current dataset to out_file_name\nArgs:\nout_file_name: name of file", "source": "juraj-google-style"}
{"code": "def is_valid(self, field_name, value) -> (bool, object):\n    if self.has_field(field_name):\n        if (self.fields_dict[field_name] == FieldType.KG_ID):\n            return (True, value)\n        if (self.fields_dict[field_name] == FieldType.NUMBER):\n            if isinstance(value, numbers.Number):\n                return (True, value)\n            else:\n                converted_number = self.parse_number(value)\n                return ((False, value) if (not converted_number) else (True, value))\n        if (self.fields_dict[field_name] == FieldType.STRING):\n            if isinstance(value, str):\n                return (True, value.strip())\n            else:\n                return (True, str(value).strip())\n        if (self.fields_dict[field_name] == FieldType.DATE):\n            (valid, d) = self.is_date(value)\n            if valid:\n                return (True, d.isoformat())\n            else:\n                return (False, value)\n        if (self.fields_dict[field_name] == FieldType.LOCATION):\n            (valid, l) = self.is_location(value)\n            if valid:\n                return (True, l)\n            else:\n                return (False, value)\n    else:\n        print('{} not found in KG Schema'.format(field_name))\n        return (False, value)", "docstring": "Return true if the value type matches or can be coerced to the defined type in schema, otherwise false.\nIf field not defined, return none\n\nArgs:\nfield_name: str\nvalue:\n\nReturns: bool, value, where the value may have been coerced to the required type.", "source": "codesearchnet"}
{"code": "def __init__(self, data_type_definition):\n    \n    super(UUIDMap, self).__init__(data_type_definition)\n    self._byte_order = data_type_definition.byte_order", "docstring": "Initializes an UUID (or GUID) data type map.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.", "source": "juraj-google-style"}
{"code": "def _lookup_namespace(self, symbol, namespace):\n    for namespace_part in symbol.parts:\n        namespace = namespace.get(namespace_part)\n        if (namespace is None):\n            break\n        if (not isinstance(namespace, dict)):\n            return namespace\n    raise Error(('%s not found' % symbol.name))", "docstring": "Helper for lookup_symbol that only looks up variables in a\nnamespace.\n\nArgs:\nsymbol: Symbol\nnamespace: pointer into self.namespaces", "source": "codesearchnet"}
{"code": "def chunk_sequence(sequence, chunk_length=200, padding_value=0):\n    if ('length' in sequence):\n        length = sequence.pop('length')\n    else:\n        length = tf.shape(tools.nested.flatten(sequence)[0])[0]\n    num_chunks = (((length - 1) \n    padding_length = ((chunk_length * num_chunks) - length)\n    padded = tools.nested.map((lambda tensor: tf.concat([tensor, ((0 * tensor[:padding_length]) + padding_value)], 0)), sequence)\n    chunks = tools.nested.map((lambda tensor: tf.reshape(tensor, ([num_chunks, chunk_length] + tensor.shape[1:].as_list()))), padded)\n    chunks['length'] = tf.concat([(chunk_length * tf.ones(((num_chunks - 1),), dtype=tf.int32)), [(chunk_length - padding_length)]], 0)\n    return chunks", "docstring": "Split a nested dict of sequence tensors into a batch of chunks.\n\nThis function does not expect a batch of sequences, but a single sequence. A\n`length` key is added if it did not exist already.\n\nArgs:\nsequence: Nested dict of tensors with time dimension.\nchunk_length: Size of chunks the sequence will be split into.\npadding_value: Value used for padding the last chunk after the sequence.\n\nReturns:\nNested dict of sequence tensors with chunk dimension.", "source": "codesearchnet"}
{"code": "def get_or_create_direct_channel(cls, initiator_key, receiver_key):\n        \n        existing = cls.objects.OR().filter(\n            code_name='%s_%s' % (initiator_key, receiver_key)).filter(\n            code_name='%s_%s' % (receiver_key, initiator_key))\n        receiver_name = UserModel.objects.get(receiver_key).full_name\n        if existing:\n            channel = existing[0]\n        else:\n            channel_name = '%s_%s' % (initiator_key, receiver_key)\n            channel = cls(is_direct=True, code_name=channel_name, typ=10).blocking_save()\n        with BlockSave(Subscriber):\n            Subscriber.objects.get_or_create(channel=channel,\n                                             user_id=initiator_key,\n                                             name=receiver_name)\n            Subscriber.objects.get_or_create(channel=channel,\n                                             user_id=receiver_key,\n                                             name=UserModel.objects.get(initiator_key).full_name)\n        return channel, receiver_name", "docstring": "Creates a  direct messaging channel between two user\n\nArgs:\ninitiator: User, who want's to make first contact\nreceiver: User, other party\n\nReturns:\n(Channel, receiver_name)", "source": "juraj-google-style"}
{"code": "def ParseMessage(descriptor, byte_str):\n  \n  result_class = MakeClass(descriptor)\n  new_msg = result_class()\n  new_msg.ParseFromString(byte_str)\n  return new_msg", "docstring": "Generate a new Message instance from this Descriptor and a byte string.\n\nArgs:\ndescriptor: Protobuf Descriptor object\nbyte_str: Serialized protocol buffer byte string\n\nReturns:\nNewly created protobuf Message object.", "source": "juraj-google-style"}
{"code": "def match(self, request):\n        \n        errors = []\n\n        def match(matcher):\n            try:\n                return matcher.match(request)\n            except Exception as err:\n                err = '{}: {}'.format(type(matcher).__name__, err)\n                errors.append(err)\n                return False\n\n        return all([match(matcher) for matcher in self]), errors", "docstring": "Match the given HTTP request instance against the registered\nmatcher functions in the current engine.\n\nArguments:\nrequest (pook.Request): outgoing request to match.\n\nReturns:\ntuple(bool, list[Exception]): ``True`` if all matcher tests\npasses, otherwise ``False``. Also returns an optional list\nof error exceptions.", "source": "juraj-google-style"}
{"code": "def _process_image_files_batch(coder, thread_index, ranges, name, filenames, texts, labels, num_shards):\n    num_threads = len(ranges)\n    assert (not (num_shards % num_threads))\n    num_shards_per_batch = int((num_shards / num_threads))\n    shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1], (num_shards_per_batch + 1)).astype(int)\n    num_files_in_thread = (ranges[thread_index][1] - ranges[thread_index][0])\n    counter = 0\n    for s in range(num_shards_per_batch):\n        shard = ((thread_index * num_shards_per_batch) + s)\n        output_filename = ('%s-%.5d-of-%.5d' % (name, shard, num_shards))\n        output_file = os.path.join(FLAGS.output_directory, output_filename)\n        writer = tf.python_io.TFRecordWriter(output_file)\n        shard_counter = 0\n        files_in_shard = np.arange(shard_ranges[s], shard_ranges[(s + 1)], dtype=int)\n        for i in files_in_shard:\n            filename = filenames[i]\n            label = labels[i]\n            text = texts[i]\n            (image_buffer, height, width) = _process_image(filename, coder)\n            example = _convert_to_example(filename, image_buffer, label, text, height, width)\n            writer.write(example.SerializeToString())\n            shard_counter += 1\n            counter += 1\n            if (not (counter % 1000)):\n                print(('%s [thread %d]: Processed %d of %d images in thread batch.' % (datetime.now(), thread_index, counter, num_files_in_thread)))\n                sys.stdout.flush()\n        writer.close()\n        print(('%s [thread %d]: Wrote %d images to %s' % (datetime.now(), thread_index, shard_counter, output_file)))\n        sys.stdout.flush()\n        shard_counter = 0\n    print(('%s [thread %d]: Wrote %d images to %d shards.' % (datetime.now(), thread_index, counter, num_files_in_thread)))\n    sys.stdout.flush()", "docstring": "Processes and saves list of images as TFRecord in 1 thread.\n\nArgs:\ncoder: instance of ImageCoder to provide TensorFlow image coding utils.\nthread_index: integer, unique batch to run index is within [0, len(ranges)).\nranges: list of pairs of integers specifying ranges of each batches to\nanalyze in parallel.\nname: string, unique identifier specifying the data set\nfilenames: list of strings; each string is a path to an image file\ntexts: list of strings; each string is human readable, e.g. 'dog'\nlabels: list of integer; each integer identifies the ground truth\nnum_shards: integer number of shards for this data set.", "source": "codesearchnet"}
{"code": "def _rewrite_output_as_tensor(body_grad_graph, grad_output_slices):\n    with body_grad_graph.as_default():\n        new_output = tensor_conversion.convert_to_tensor_v2(grad_output_slices)\n    idx = _get_tensor_index_in_iterable(body_grad_graph.structured_outputs, grad_output_slices)\n    body_grad_graph.structured_outputs[idx] = new_output\n    body_grad_graph.outputs = func_graph.flatten(body_grad_graph.structured_outputs)", "docstring": "Rewrites grad_output_slices to be a Tensor output.\n\nArgs:\nbody_grad_graph: _WhileBodyGradFuncGraph.\ngrad_output_slices: IndexedSlices output of body_grad_graph.", "source": "github-repos"}
{"code": "def _path_to_str(self, path):\n        \n        inp = ''\n        for arc in path:\n            i = self.isyms.find(arc.ilabel)\n            \n            if i != fst.EPSILON:\n                inp += i\n        return inp", "docstring": "Convert a path to the string representing the path\nArgs:\npath (tuple): A tuple of arcs\nReturns:\ninp (str): The path concatenated as as string", "source": "juraj-google-style"}
{"code": "def _get_table(name):\n    item = google.datalab.utils.commands.get_notebook_item(name)\n    if isinstance(item, bigquery.Table):\n        return item\n    try:\n        return _existing_table_cache[name]\n    except KeyError:\n        table = bigquery.Table(name)\n        if table.exists():\n            _existing_table_cache[name] = table\n            return table\n    return None", "docstring": "Given a variable or table name, get a Table if it exists.\n\nArgs:\nname: the name of the Table or a variable referencing the Table.\nReturns:\nThe Table, if found.", "source": "codesearchnet"}
{"code": "def _SetSshHostKeys(self, host_key_types=None):\n    \n    section = 'Instance'\n    instance_id = self._GetInstanceId()\n    if instance_id != self.instance_config.GetOptionString(\n        section, 'instance_id'):\n      self.logger.info('Generating SSH host keys for instance %s.', instance_id)\n      file_regex = re.compile(r'ssh_host_(?P<type>[a-z0-9]*)_key\\Z')\n      key_dir = '/etc/ssh'\n      key_files = [f for f in os.listdir(key_dir) if file_regex.match(f)]\n      key_types = host_key_types.split(',') if host_key_types else []\n      key_types_files = ['ssh_host_%s_key' % key_type for key_type in key_types]\n      for key_file in set(key_files) | set(key_types_files):\n        key_type = file_regex.match(key_file).group('type')\n        key_dest = os.path.join(key_dir, key_file)\n        self._GenerateSshKey(key_type, key_dest)\n      self._StartSshd()\n      self.instance_config.SetOption(section, 'instance_id', str(instance_id))", "docstring": "Regenerates SSH host keys when the VM is restarted with a new IP address.\n\nBooting a VM from an image with a known SSH key allows a number of attacks.\nThis function will regenerating the host key whenever the IP address\nchanges. This applies the first time the instance is booted, and each time\nthe disk is used to boot a new instance.\n\nArgs:\nhost_key_types: string, a comma separated list of host key types.", "source": "juraj-google-style"}
{"code": "def on_message(self, message):\n        \n\n        if 'content' in message['d']:\n            metadata = self._parse_metadata(message)\n            message = Message(text=message['d']['content'],\n                              metadata=metadata).__dict__\n            logger.debug(message)\n            self.baseplate.tell(message)", "docstring": "Runs on a create_message event from websocket connection\n\nArgs:\nmessage (dict): Full message from Discord websocket connection\"", "source": "juraj-google-style"}
{"code": "def append_paulis(self, paulis=None, pauli_labels=None):\n    return self.insert_paulis(None, paulis=paulis, pauli_labels=pauli_labels)", "docstring": "Append pauli at the end.\n\nArgs:\npaulis (Pauli): the to-be-inserted or appended pauli\npauli_labels (list[str]): the to-be-inserted or appended pauli label\n\nReturns:\nPauli: self", "source": "codesearchnet"}
{"code": "def __recognize_union(self, node: yaml.Node,\n                          expected_type: Type) -> RecResult:\n        \n        logger.debug('Recognizing as a union')\n        recognized_types = []\n        message = ''\n        union_types = generic_type_args(expected_type)\n        logger.debug('Union types {}'.format(union_types))\n        for possible_type in union_types:\n            recognized_type, msg = self.recognize(node, possible_type)\n            if len(recognized_type) == 0:\n                message += msg\n            recognized_types.extend(recognized_type)\n        recognized_types = list(set(recognized_types))\n        if bool in recognized_types and bool_union_fix in recognized_types:\n            recognized_types.remove(bool_union_fix)\n\n        if len(recognized_types) == 0:\n            return recognized_types, message\n        elif len(recognized_types) > 1:\n            message = ('{}{}Could not determine which of the following types'\n                       ' this is: {}').format(node.start_mark, os.linesep,\n                                              recognized_types)\n            return recognized_types, message\n\n        return recognized_types, ''", "docstring": "Recognize a node that we expect to be one of a union of types.\n\nArgs:\nnode: The node to recognize.\nexpected_type: Union[...something...]\n\nReturns:\nThe specific type that was recognized, multiple, or none.", "source": "juraj-google-style"}
{"code": "def _validate_chain_strength(sampler, chain_strength):\n    properties = sampler.properties\n    if ('extended_j_range' in properties):\n        max_chain_strength = (- min(properties['extended_j_range']))\n    elif ('j_range' in properties):\n        max_chain_strength = (- min(properties['j_range']))\n    else:\n        raise ValueError(\"input sampler should have 'j_range' and/or 'extended_j_range' property.\")\n    if (chain_strength is None):\n        chain_strength = max_chain_strength\n    elif (chain_strength > max_chain_strength):\n        raise ValueError('Provided chain strength exceedds the allowed range.')\n    return chain_strength", "docstring": "Validate the provided chain strength, checking J-ranges of the sampler's children.\n\nArgs:\nchain_strength (float) The provided chain strength.  Use None to use J-range.\n\nReturns (float):\nA valid chain strength, either provided or based on available J-range.  Positive finite float.", "source": "codesearchnet"}
{"code": "def _relative_position_to_absolute_position_masked(x):\n    (batch, heads, length, _) = common_layers.shape_list(x)\n    x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0]])\n    x = tf.reshape(x, [batch, heads, (1 + length), length])\n    x = tf.slice(x, [0, 0, 1, 0], [(- 1), (- 1), (- 1), (- 1)])\n    return x", "docstring": "Helper to dot_product_self_attention_relative_v2.\n\nRearrange an attention logits or weights Tensor.\n\nThe dimensions of the input represent:\n[batch, heads, query_position, memory_position - query_position + length - 1]\n\nThe dimensions of the output represent:\n[batch, heads, query_position, memory_position]\n\nOnly works with masked_attention.  Undefined behavior for regions of the\ninput where memory_position > query_position.\n\nArgs:\nx: a Tensor with shape [batch, heads, length, length]\n\nReturns:\na Tensor with shape [batch, heads, length, length]", "source": "codesearchnet"}
{"code": "def phone_text_subs():\n    Small = {'zero': 0, 'zer0': 0, 'one': 1, 'two': 2, 'three': 3, 'four': 4, 'fuor': 4, 'five': 5, 'fith': 5, 'six': 6, 'seven': 7, 'sven': 7, 'eight': 8, 'nine': 9, 'ten': 10, 'eleven': 11, 'twelve': 12, 'thirteen': 13, 'fourteen': 14, 'fifteen': 15, 'sixteen': 16, 'seventeen': 17, 'eighteen': 18, 'nineteen': 19, 'twenty': 20, 'thirty': 30, 'forty': 40, 'fifty': 50, 'sixty': 60, 'seventy': 70, 'eighty': 80, 'ninety': 90, 'oh': 0}\n    Magnitude = {'thousand': 0, 'million': 0}\n    Others = {'!': 1, 'o': 0, 'l': 1, 'i': 1}\n    output = {}\n    output['Small'] = Small\n    output['Magnitude'] = Magnitude\n    output['Others'] = Others\n    return output", "docstring": "Gets a dictionary of dictionaries that each contain alphabetic number manifestations mapped to their actual\nNumber value.\n\nReturns:\ndictionary of dictionaries containing Strings mapped to Numbers", "source": "codesearchnet"}
{"code": "def get_internal_modules(key='exa'):\n    key += '.'\n    return [v for (k, v) in sys.modules.items() if k.startswith(key)]", "docstring": "Get a list of modules belonging to the given package.\n\nArgs:\nkey (str): Package or library name (e.g. \"exa\")", "source": "codesearchnet"}
{"code": "def mach60(msg):\n    d = hex2bin(data(msg))\n    if (d[23] == '0'):\n        return None\n    mach = ((bin2int(d[24:34]) * 2.048) / 512.0)\n    return round(mach, 3)", "docstring": "Aircraft MACH number\n\nArgs:\nmsg (String): 28 bytes hexadecimal message (BDS60) string\n\nReturns:\nfloat: MACH number", "source": "codesearchnet"}
{"code": "def get_v2_names(symbol: Any) -> Sequence[str]:\n    names_v2 = []\n    tensorflow_api_attr = API_ATTRS[TENSORFLOW_API_NAME].names\n    keras_api_attr = API_ATTRS[KERAS_API_NAME].names\n    if not hasattr(symbol, '__dict__'):\n        return names_v2\n    if tensorflow_api_attr in symbol.__dict__:\n        names_v2.extend(getattr(symbol, tensorflow_api_attr))\n    if keras_api_attr in symbol.__dict__:\n        names_v2.extend(getattr(symbol, keras_api_attr))\n    return names_v2", "docstring": "Get a list of TF 2.0 names for this symbol.\n\nArgs:\nsymbol: symbol to get API names for.\n\nReturns:\nList of all API names for this symbol.", "source": "github-repos"}
{"code": "def get_reference_points(spatial_shapes, valid_ratios, device):\n    reference_points_list = []\n    for level, (height, width) in enumerate(spatial_shapes):\n        ref_y, ref_x = meshgrid(torch.linspace(0.5, height - 0.5, height, dtype=valid_ratios.dtype, device=device), torch.linspace(0.5, width - 0.5, width, dtype=valid_ratios.dtype, device=device), indexing='ij')\n        ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height)\n        ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width)\n        ref = torch.stack((ref_x, ref_y), -1)\n        reference_points_list.append(ref)\n    reference_points = torch.cat(reference_points_list, 1)\n    reference_points = reference_points[:, :, None] * valid_ratios[:, None]\n    return reference_points", "docstring": "Get reference points for each feature map. Used in decoder.\n\nArgs:\nspatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):\nSpatial shapes of each feature map.\nvalid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):\nValid ratios of each feature map.\ndevice (`torch.device`):\nDevice on which to create the tensors.\nReturns:\n`torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)`", "source": "github-repos"}
{"code": "async def invoke(self, context):\n        \n        try:\n            \n            \n            tasks = await self._run_cancellable(claim_work(context))\n            if not tasks or not tasks.get('tasks', []):\n                await self._run_cancellable(asyncio.sleep(context.config['poll_interval']))\n                return None\n\n            \n            \n            \n            status = None\n            for task_defn in tasks.get('tasks', []):\n                prepare_to_run_task(context, task_defn)\n                reclaim_fut = context.event_loop.create_task(reclaim_task(context, context.task))\n                try:\n                    status = await do_run_task(context, self._run_cancellable, self._to_cancellable_process)\n                    artifacts_paths = filepaths_in_dir(context.config['artifact_dir'])\n                except WorkerShutdownDuringTask:\n                    shutdown_artifact_paths = [os.path.join('public', 'logs', log_file)\n                                               for log_file in ['chain_of_trust.log', 'live_backing.log']]\n                    artifacts_paths = [path for path in shutdown_artifact_paths\n                                       if os.path.isfile(os.path.join(context.config['artifact_dir'], path))]\n                    status = STATUSES['worker-shutdown']\n                status = worst_level(status, await do_upload(context, artifacts_paths))\n                await complete_task(context, status)\n                reclaim_fut.cancel()\n                cleanup(context)\n\n            return status\n\n        except asyncio.CancelledError:\n            return None", "docstring": "Claims and processes Taskcluster work.\n\nArgs:\ncontext (scriptworker.context.Context): context of worker\n\nReturns: status code of build", "source": "juraj-google-style"}
{"code": "def _sendline(self, line):\n        \n        self.lines = []\n        try:\n            self._read()\n        except socket.error:\n            logging.debug('Nothing cleared')\n\n        logger.debug('sending [%s]', line)\n        self._write(line + '\\r\\n')\n\n        \n        time.sleep(0.5)", "docstring": "Send exactly one line to the device\n\nArgs:\nline str: data send to device", "source": "juraj-google-style"}
{"code": "def forward(ctx, scores: torch.Tensor, multiplier: torch.Tensor, selected_experts: torch.Tensor, masked_gates: torch.Tensor, mask_for_one: torch.Tensor):\n    ctx.save_for_backward(multiplier, selected_experts, masked_gates)\n    return multiplier * mask_for_one", "docstring": "Forward pass for the custom autograd function.\n\nArgs:\nctx: Context object to save information for backward computation.\nscores (torch.Tensor): Input scores tensor.\nmultiplier (torch.Tensor): Multiplier tensor.\nselected_experts (torch.Tensor): Tensor of selected experts.\nmasked_gates (torch.Tensor): Masked gates tensor.\nmask_for_one (torch.Tensor): Mask for one tensor.\n\nReturns:\ntorch.Tensor: Result of the forward pass.", "source": "github-repos"}
{"code": "def center_crop(self, image: np.ndarray, crop_size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n    crop_size = get_size_dict(crop_size, default_to_square=True)\n    if 'height' not in crop_size or 'width' not in crop_size:\n        raise ValueError('crop_size dictionary must contain height and width keys')\n    return center_crop(image, (crop_size['height'], crop_size['width']), data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Center crop an image to a certain size.\n\nArgs:\nimage (`np.ndarray`):\nImage to center crop.\ncrop_size (`Dict[str, int]`):\nThe size to center crop the image to. Must contain height and width keys.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format for the output image. If unset, the channel dimension format of the input\nimage is used.\ninput_data_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def populate_request_data(self, request_args):\n        \n        request_args['auth'] = HTTPBasicAuth(\n            self._username, self._password)\n        return request_args", "docstring": "Add the authentication info to the supplied dictionary.\n\nWe use the `requests.HTTPBasicAuth` class as the `auth` param.\n\nArgs:\n`request_args`: The arguments that will be passed to the request.\nReturns:\nThe updated arguments for the request.", "source": "juraj-google-style"}
{"code": "def from_object(cls, o, base_uri):\n        \n        if isinstance(o, list):\n            if len(o) == 1:\n                return cls.from_object(o[0], base_uri)\n\n            return [cls.from_object(x, base_uri) for x in o]\n\n        return cls(o, base_uri)", "docstring": "Returns a new ``Link`` based on a JSON object or array.\n\nArguments:\n\n- ``o``: a dictionary holding the deserializated JSON for the new\n``Link``, or a ``list`` of such documents.\n- ``base_uri``: optional URL used as the basis when expanding\nrelative URLs in the link.", "source": "juraj-google-style"}
{"code": "def __init__(self, html_template_path, export_report_path):\n    if not _file_io.file_exists(html_template_path):\n        raise IOError(\"File '{0}' does not exist.\".format(html_template_path))\n    with _file_io.FileIO(html_template_path, 'r') as f:\n        self.html_template = f.read()\n    _file_io.recursive_create_dir(os.path.dirname(export_report_path))\n    self.export_report_path = export_report_path", "docstring": "Reads the HTML template content.\n\nArgs:\nhtml_template_path: A string, path to the template HTML file.\nexport_report_path: A string, path to the generated HTML report. This path\nshould point to a '.html' file with date and time in its name.\ne.g. 2019-01-01-10:05.toco_report.html.\n\nRaises:\nIOError: File doesn't exist.", "source": "github-repos"}
{"code": "def credits(self, **kwargs):\n    path = self._get_series_id_season_number_path('credits')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the cast & crew credits for a TV season by season number.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def CoinFromRef(coin_ref, tx_output, state=CoinState.Unconfirmed, transaction=None):\n    coin = Coin(coin_reference=coin_ref, tx_output=tx_output, state=state)\n    coin._transaction = transaction\n    return coin", "docstring": "Get a Coin object using a CoinReference.\n\nArgs:\ncoin_ref (neo.Core.CoinReference): an object representing a single UTXO / transaction input.\ntx_output (neo.Core.Transaction.TransactionOutput): an object representing a transaction output.\nstate (neo.Core.State.CoinState):\n\nReturns:\nCoin: self.", "source": "codesearchnet"}
{"code": "def success(channel, stats, name, platform, dp):\n    datapacks = [('Platform', platform, False)]\n    for stat in stats:\n        if (stat[0] in ('Duel 1v1', 'Doubles 2v2', 'Solo Standard 3v3', 'Standard 3v3')):\n            stat_name = (('__' + stat[0]) + '__')\n            stat_value = (('**' + stat[1]) + '**')\n        else:\n            stat_name = stat[0]\n            stat_value = stat[1]\n        if stat[2]:\n            stat_value += ((' *(Top ' + stat[2]) + '%)*')\n        datapacks.append((stat_name, stat_value, True))\n    gui = ui_embed.UI(channel, 'Rocket League Stats: {}'.format(name), '*Stats obtained from [Rocket League Tracker Network](https:\n    return gui", "docstring": "Creates an embed UI containing the Rocket League stats\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\nstats (tuple): Tuples of (field, value, percentile)\nname (str): The name of the player\nplatform (str): The playfor to search on, can be 'steam', 'ps', or 'xbox'\ndp (str): URL to the player's dp\n\nReturns:\n(discord.Embed): The created embed", "source": "codesearchnet"}
{"code": "def get_version(self, timestamp):\n    if timestamp != 0 and timestamp != self.current_timestamp:\n        assert timestamp > self.current_timestamp\n        self.current_version = self.current_version + 1\n        self.current_timestamp = timestamp\n    return self.current_version", "docstring": "Updates version if necessary and returns the version number.\n\nArgs:\ntimestamp: (int) unix timestamp when the cache is updated. This value is\nzero if the cache has been evicted or doesn't exist.", "source": "github-repos"}
{"code": "def _AddSerializedEvent(self, event):\n    identifier = identifiers.SQLTableIdentifier(self._CONTAINER_TYPE_EVENT, (self._serialized_event_heap.number_of_events + 1))\n    event.SetIdentifier(identifier)\n    serialized_data = self._SerializeAttributeContainer(event)\n    self._serialized_event_heap.PushEvent(event.timestamp, serialized_data)\n    if (self._serialized_event_heap.data_size > self._maximum_buffer_size):\n        self._WriteSerializedAttributeContainerList(self._CONTAINER_TYPE_EVENT)", "docstring": "Adds an serialized event.\n\nArgs:\nevent (EventObject): event.\n\nRaises:\nIOError: if the event cannot be serialized.\nOSError: if the event cannot be serialized.", "source": "codesearchnet"}
{"code": "def has_overlap(self, interval: 'Interval') -> bool:\n    if ((self.begin < interval.end) and (interval.begin < self.end)):\n        return True\n    return False", "docstring": "Check if self has overlap with `interval`.\n\nArgs:\ninterval: interval to be examined\n\nReturns:\nbool: True if self has overlap with `interval` otherwise False", "source": "codesearchnet"}
{"code": "def ragged_rank(self):\n    return self._ragged_rank", "docstring": "The number of times the RaggedTensor's flat_values is partitioned.\n\nDefaults to `shape.ndims - 1`.\n\nExamples:\n\n>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])\n>>> tf.type_spec_from_value(values).ragged_rank\n1\n\n>>> rt1 = tf.RaggedTensor.from_uniform_row_length(values, 2)\n>>> tf.type_spec_from_value(rt1).ragged_rank\n2\n\nReturns:\nA Python `int` indicating the number of times the underlying `flat_values`\nTensor has been partitioned to add a new dimension.\nI.e., `tf.rank(rt) = tf.rank(rt.flat_values) + rt.ragged_rank`.", "source": "github-repos"}
{"code": "def __init__(self, performed_action, run_metadata=None, client_graph_def=None, tf_error=None):\n    _check_type(performed_action, str)\n    self.performed_action = performed_action\n    if run_metadata is not None:\n        _check_type(run_metadata, config_pb2.RunMetadata)\n    self.run_metadata = run_metadata\n    self.client_graph_def = client_graph_def\n    self.tf_error = tf_error", "docstring": "Constructor for `OnRunEndRequest`.\n\nArgs:\nperformed_action: (`OnRunStartAction`) Actually-performed action by the\ndebug-wrapper session.\nrun_metadata: run_metadata output from the run() call (if any).\nclient_graph_def: (GraphDef) GraphDef from the client side, i.e., from\nthe python front end of TensorFlow. Can be obtained with\nsession.graph.as_graph_def().\ntf_error: (errors.OpError subtypes) TensorFlow OpError that occurred\nduring the run (if any).", "source": "github-repos"}
{"code": "def get_groups(self, **kwargs):\n    params = {'cultureInfo': util.language_code(kwargs.get('lang'))}\n    result = self.make_request('geo', 'get_groups', **params)\n    if (not util.check_result(result)):\n        return (False, result.get('resultDescription', 'UNKNOWN ERROR'))\n    values = util.response_list(result, 'resultValues')\n    return (True, [emtype.GeoGroupItem(**a) for a in values])", "docstring": "Obtain line types and details.\n\nArgs:\nlang (str): Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[GeoGroupItem]), or message\nstring in case of error.", "source": "codesearchnet"}
{"code": "def Selector(fields):\n    \n\n    check_user_facing_fields_dict(fields, 'Selector')\n\n    class _Selector(_ConfigSelector):\n        def __init__(self):\n            key = 'Selector.' + str(DictCounter.get_next_count())\n            super(_Selector, self).__init__(\n                key=key,\n                name=None,\n                fields=fields,\n                \n                type_attributes=ConfigTypeAttributes(is_builtin=True),\n            )\n\n    return _Selector", "docstring": "Selectors are used when you want to be able present several different options to the user but\nforce them to select one. For example, it would not make much sense to allow them\nto say that a single input should be sourced from a csv and a parquet file: They must choose.\n\nNote that in other type systems this might be called an \"input union.\"\n\nArgs:\nfields (Dict[str, Field]):", "source": "juraj-google-style"}
{"code": "def sanity_check_states(states_spec):\n    states = copy.deepcopy(states_spec)\n    is_unique = ('shape' in states)\n    if is_unique:\n        states = dict(state=states)\n    for (name, state) in states.items():\n        if isinstance(state['shape'], int):\n            state['shape'] = (state['shape'],)\n        if ('type' not in state):\n            state['type'] = 'float'\n    return (states, is_unique)", "docstring": "Sanity checks a states dict, used to define the state space for an MDP.\nThrows an error or warns if mismatches are found.\n\nArgs:\nstates_spec (Union[None,dict]): The spec-dict to check (or None).\n\nReturns: Tuple of 1) the state space desc and 2) whether there is only one component in the state space.", "source": "codesearchnet"}
{"code": "def _set_current(self, new_current):\n    new_cur_full_path = self.join(new_current)\n    if (not os.path.exists(new_cur_full_path)):\n        raise PrefixNotFound(('Prefix \"%s\" does not exist in workdir %s' % (new_current, self.path)))\n    if os.path.lexists(self.join('current')):\n        os.unlink(self.join('current'))\n    os.symlink(new_current, self.join('current'))\n    self.current = new_current", "docstring": "Change the current default prefix, for internal usage\n\nArgs:\nnew_current(str): Name of the new current prefix, it must already\nexist\n\nReturns:\nNone\n\nRaises:\nPrefixNotFound: if the given prefix name does not exist in the\nworkdir", "source": "codesearchnet"}
{"code": "def RotateServerKey(cn=u'grr', keylength=4096):\n    ca_certificate = config.CONFIG['CA.certificate']\n    ca_private_key = config.CONFIG['PrivateKeys.ca_key']\n    if ((not ca_certificate) or (not ca_private_key)):\n        raise ValueError('No existing CA certificate found.')\n    existing_cert = config.CONFIG['Frontend.certificate']\n    serial_number = (existing_cert.GetSerialNumber() + 1)\n    EPrint((\"Generating new server key (%d bits, cn '%s', serial \n    server_private_key = rdf_crypto.RSAPrivateKey.GenerateKey(bits=keylength)\n    server_cert = key_utils.MakeCASignedCert(str(cn), server_private_key, ca_certificate, ca_private_key, serial_number=serial_number)\n    EPrint('Updating configuration.')\n    config.CONFIG.Set('Frontend.certificate', server_cert.AsPEM())\n    config.CONFIG.Set('PrivateKeys.server_key', server_private_key.AsPEM())\n    config.CONFIG.Write()\n    EPrint('Server key rotated, please restart the GRR Frontends.')", "docstring": "This function creates and installs a new server key.\n\nNote that\n\n- Clients might experience intermittent connection problems after\nthe server keys rotated.\n\n- It's not possible to go back to an earlier key. Clients that see a\nnew certificate will remember the cert's serial number and refuse\nto accept any certificate with a smaller serial number from that\npoint on.\n\nArgs:\ncn: The common name for the server to use.\nkeylength: Length in bits for the new server key.\n\nRaises:\nValueError: There is no CA cert in the config. Probably the server\nstill needs to be initialized.", "source": "codesearchnet"}
{"code": "def _HasSelf(self, sig):\n    return sig.params and sig.params[0].name == 'self'", "docstring": "True if a signature has a self parameter.\n\nThis only checks for the name, since the type can be too many different\nthings (type of the method, type of the base class, object, unknown etc.)\nand doesn't carry over to the simplified version, anyway.\n\nArguments:\nsig: Function signature (instance of pytd.Signature)\n\nReturns:\nTrue if the signature has \"self\".", "source": "github-repos"}
{"code": "def add_method(self, m, **kwargs):\n    if isinstance(m, types.FunctionType):\n        self[('function', id(m))] = m\n    else:\n        (f, obj) = get_method_vars(m)\n        wrkey = (f, id(obj))\n        self[wrkey] = obj", "docstring": "Add an instance method or function\n\nArgs:\nm: The instance method or function to store", "source": "codesearchnet"}
{"code": "def set_number_of_shards(self, number_of_shards):\n    if self._frozen:\n        if self._number_of_shards != number_of_shards:\n            raise ValueError(f\"Can't set sharding policy to use {number_of_shards} shards since it has been frozen to use {self._number_of_shards}\")\n    elif number_of_shards > 0:\n        self._number_of_shards = number_of_shards\n    else:\n        raise ValueError(f\"Can't set sharding policy to use {number_of_shards} shards; value must be > 0\")", "docstring": "Sets the number of shards for the current policy.\n\nIf the policy has been frozen then number_of_shards must match the\nexisting setting.\n\nArgs:\nnumber_of_shards: The number of shards to use in the policy.\n\nRaises:\nValueError: If the policy has been frozen and number_of_shards\ndiffers from the frozen value; or number_of_shards <= 0.", "source": "github-repos"}
{"code": "def delete_variant(self, variant):\n        \n        mongo_variant = self.get_variant(variant)\n        \n        if mongo_variant:\n            \n            if mongo_variant['observations'] == 1:\n                LOG.debug(\"Removing variant {0}\".format(\n                    mongo_variant.get('_id')\n                ))\n                message = self.db.variant.delete_one({'_id': variant['_id']})\n            else:\n                LOG.debug(\"Decreasing observations for {0}\".format(\n                    mongo_variant.get('_id')\n                ))\n                message = self.db.variant.update_one({\n                    '_id': mongo_variant['_id']\n                    },{\n                        '$inc': {\n                            'observations': -1,\n                            'homozygote': - (variant.get('homozygote', 0)),\n                            'hemizygote': - (variant.get('hemizygote', 0)),\n                        },\n                        '$pull': {\n                            'families': variant.get('case_id')\n                        }\n                    }, upsert=False)\n        return", "docstring": "Delete observation in database\n\nThis means that we take down the observations variable with one.\nIf 'observations' == 1 we remove the variant. If variant was homozygote\nwe decrease 'homozygote' with one.\nAlso remove the family from array 'families'.\n\nArgs:\nvariant (dict): A variant dictionary", "source": "juraj-google-style"}
{"code": "def _map_across_full_axis_select_indices(\n        self, axis, func, indices, keep_remaining=False\n    ):\n        \n        return self.data.apply_func_to_select_indices_along_full_axis(\n            axis, func, indices, keep_remaining\n        )", "docstring": "Maps function to select indices along full axis.\n\nArgs:\naxis: 0 for columns and 1 for rows.\nfunc: Callable mapping function over the BlockParitions.\nindices: indices along axis to map over.\nkeep_remaining: True if keep indices where function was not applied.\n\nReturns:\nBaseFrameManager containing the result of mapping func over axis on indices.", "source": "juraj-google-style"}
{"code": "def _FormatAttrToken(self, token_data):\n    \n    return {\n        'mode': token_data.file_mode,\n        'uid': token_data.user_identifier,\n        'gid': token_data.group_identifier,\n        'system_id': token_data.file_system_identifier,\n        'node_id': token_data.file_identifier,\n        'device': token_data.device}", "docstring": "Formats an attribute token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_attr32|bsm_token_data_attr64): AUT_ATTR32 or\nAUT_ATTR64 token data.\n\nReturns:\ndict[str, str]: token values.", "source": "juraj-google-style"}
{"code": "def _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None):\n    input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(input_tensor)\n    if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n        return input_tensor\n    with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value)):\n        if ignore_value is None:\n            if input_tensor.dtype == dtypes.string:\n                ignore_value = ''\n            elif input_tensor.dtype.is_integer:\n                ignore_value = -1\n            else:\n                ignore_value = input_tensor.dtype.as_numpy_dtype()\n        ignore_value = math_ops.cast(ignore_value, input_tensor.dtype, name='ignore_value')\n        indices = array_ops.where_v2(math_ops.not_equal(input_tensor, ignore_value), name='indices')\n        return sparse_tensor_lib.SparseTensor(indices=indices, values=array_ops.gather_nd(input_tensor, indices, name='values'), dense_shape=array_ops.shape(input_tensor, out_type=dtypes.int64, name='dense_shape'))", "docstring": "Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells.\n\nIf `input_tensor` is already a `SparseTensor`, just return it.\n\nArgs:\ninput_tensor: A string or integer `Tensor`.\nignore_value: Entries in `dense_tensor` equal to this value will be absent\nfrom the resulting `SparseTensor`. If `None`, default value of\n`dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`).\n\nReturns:\nA `SparseTensor` with the same shape as `input_tensor`.\n\nRaises:\nValueError: when `input_tensor`'s rank is `None`.", "source": "github-repos"}
{"code": "def find_untranscribed_wavs(wav_path: Path, transcription_path: Path, label_type: str) -> List[str]:\n    \n    audio_files = wav_path.glob(\"***.{}\".format(label_type))\n\n    transcription_file_prefixes = [t_file.stem for t_file in transcription_files]\n\n    untranscribed_prefixes = [] \n    for a_file in audio_files:\n        if a_file.stem not in transcription_file_prefixes:\n            untranscribed_prefixes.append(a_file.stem)\n    return untranscribed_prefixes", "docstring": "Find the prefixes for all the wav files that do not have an associated transcription\nArgs:\nwav_path: Path to search for wav files in\ntranscription_path: Path to search for transcriptions in\nlabel_type: The type of labels for transcriptions. Eg \"phonemes\" \"phonemes_and_tones\"\nReturns:\nA list of all untranscribed prefixes", "source": "juraj-google-style"}
{"code": "def AddNewSignature(self, pattern, offset=None):\n    self.signatures.append(Signature(pattern, offset=offset))", "docstring": "Adds a signature.\n\nArgs:\npattern (bytes): pattern of the signature.\noffset (int): offset of the signature. None is used to indicate\nthe signature has no offset. A positive offset is relative from\nthe start of the data a negative offset is relative from the end\nof the data.", "source": "codesearchnet"}
{"code": "def round(cls, x: 'TensorFluent') -> 'TensorFluent':\n    return cls._unary_op(x, tf.round, tf.float32)", "docstring": "Returns a TensorFluent for the round function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the round function.", "source": "codesearchnet"}
{"code": "def header(self, sheet, name):\n        \n\n        header = sheet.row(0)\n        for i, column in enumerate(self.headers[name]):\n            header.write(i, self.headers[name][i])", "docstring": "Write sheet header.\nArgs:\nsheet: (xlwt.Worksheet.Worksheet) instance of xlwt sheet.\nname: (unicode) name of sheet.", "source": "juraj-google-style"}
{"code": "def write_signatures(self, signatures):\n    self.fileobj.seek(self.signature_offset)\n    sig_entries = [dict(algorithm_id=id_, size=len(sig), signature=sig) for (id_, sig) in signatures]\n    sigs = sigs_header.build(dict(filesize=self.filesize, count=len(signatures), sigs=sig_entries))\n    self.fileobj.write(sigs)\n    signatures_len = len(sigs)\n    self.additional_offset = (self.signature_offset + signatures_len)\n    if (not (self.additional_offset == self.fileobj.tell())):\n        raise IOError('ended up at unexpected offset')", "docstring": "Write signature data to the MAR file.\n\nArgs:\nsignatures (list): list of signature tuples of the form\n(algorithm_id, signature_data)", "source": "codesearchnet"}
{"code": "def is_null_merge(self):\n    return not bool(self._spec.to_string())", "docstring": "Indicate whether the wrapped spec is empty.\n\nIn the degenerate case where self._spec is an empty specification, a caller\nmay wish to skip a merge step entirely. (However this class does not have\nenough information to make that determination.)\n\nReturns:\nA boolean indicating whether a device merge will be trivial.", "source": "github-repos"}
{"code": "def read_tracers_h5(xdmf_file, infoname, snapshot, position):\n    \n    xdmf_root = xmlET.parse(str(xdmf_file)).getroot()\n    tra = {}\n    tra[infoname] = [{}, {}]  \n    if position:\n        for axis in 'xyz':\n            tra[axis] = [{}, {}]\n    for elt_subdomain in xdmf_root[0][0][snapshot].findall('Grid'):\n        ibk = int(elt_subdomain.get('Name').startswith('meshYang'))\n        if position:\n            for data_attr in elt_subdomain.findall('Geometry'):\n                for data_item, axis in zip(data_attr.findall('DataItem'),\n                                           'xyz'):\n                    icore, data = _get_field(xdmf_file, data_item)\n                    tra[axis][ibk][icore] = data\n        for data_attr in elt_subdomain.findall('Attribute'):\n            if data_attr.get('Name') != infoname:\n                continue\n            icore, data = _get_field(xdmf_file, data_attr.find('DataItem'))\n            tra[infoname][ibk][icore] = data\n    for info in tra:\n        tra[info] = [trab for trab in tra[info] if trab]  \n        for iblk, trab in enumerate(tra[info]):\n            tra[info][iblk] = np.concatenate([trab[icore]\n                                              for icore in range(len(trab))])\n    return tra", "docstring": "Extract tracers data from hdf5 files.\n\nArgs:\nxdmf_file (:class:`pathlib.Path`): path of the xdmf file.\ninfoname (str): name of information to extract.\nsnapshot (int): snapshot number.\nposition (bool): whether to extract position of tracers.\nReturns:\ndict of list of numpy.array:\nTracers data organized by attribute and block.", "source": "juraj-google-style"}
{"code": "def copyglob(src: str, dest: str, allow_nothing: bool=False, allow_nonfiles: bool=False) -> None:\n    something = False\n    for filename in glob.glob(src):\n        if (allow_nonfiles or os.path.isfile(filename)):\n            shutil.copy(filename, dest)\n            something = True\n    if (something or allow_nothing):\n        return\n    raise ValueError('No files found matching: {}'.format(src))", "docstring": "Copies files whose filenames match the glob src\" into the directory\n\"dest\". Raises an error if no files are copied, unless allow_nothing is\nTrue.\n\nArgs:\nsrc: source glob (e.g. ``/somewhere/*.txt``)\ndest: destination directory\nallow_nothing: don't raise an exception if no files are found\nallow_nonfiles: copy things that are not files too (as judged by\n:func:`os.path.isfile`).\n\nRaises:\nValueError: if no files are found and ``allow_nothing`` is not set", "source": "codesearchnet"}
{"code": "async def _call_rpc(self, header):\n    (length, _, cmd, feature, address) = struct.unpack('<BBBBB', bytes(header))\n    rpc_id = ((feature << 8) | cmd)\n    payload = self.rpc_payload[:length]\n    self._logger.debug('Calling RPC %d:%04X with %s', address, rpc_id, binascii.hexlify(payload))\n    exception = None\n    response = None\n    try:\n        response = (await self.send_rpc(self.CLIENT_ID, str(self.device.iotile_id), address, rpc_id, bytes(payload), timeout=30.0))\n    except VALID_RPC_EXCEPTIONS as err:\n        exception = err\n    except Exception as err:\n        self._logger.exception('Error calling RPC %d:%04X', address, rpc_id)\n        exception = err\n    (status, response) = pack_rpc_response(response, exception)\n    resp_header = struct.pack('<BBBB', status, 0, 0, len(response))\n    (await self._send_notification(self.ReceiveHeaderHandle, resp_header))\n    if (len(response) > 0):\n        (await self._send_notification(self.ReceivePayloadHandle, response))", "docstring": "Call an RPC given a header and possibly a previously sent payload\n\nArgs:\nheader (bytearray): The RPC header we should call", "source": "codesearchnet"}
{"code": "def create(filename: str, layers: Union[(np.ndarray, Dict[(str, np.ndarray)], loompy.LayerManager)], row_attrs: Union[(loompy.AttributeManager, Dict[(str, np.ndarray)])], col_attrs: Union[(loompy.AttributeManager, Dict[(str, np.ndarray)])], *, file_attrs: Dict[(str, str)]=None) -> None:\n    if isinstance(row_attrs, loompy.AttributeManager):\n        row_attrs = {k: v[:] for (k, v) in row_attrs.items()}\n    if isinstance(col_attrs, loompy.AttributeManager):\n        col_attrs = {k: v[:] for (k, v) in col_attrs.items()}\n    if (isinstance(layers, np.ndarray) or scipy.sparse.issparse(layers)):\n        layers = {'': layers}\n    elif isinstance(layers, loompy.LayerManager):\n        layers = {k: v[(:, :)] for (k, v) in layers.items()}\n    if ('' not in layers):\n        raise ValueError('Data for default layer must be provided')\n    shape = layers[''].shape\n    if ((shape[0] == 0) or (shape[1] == 0)):\n        raise ValueError('Main matrix cannot be empty')\n    for (name, layer) in layers.items():\n        if (layer.shape != shape):\n            raise ValueError(f\"Layer '{name}' is not the same shape as the main matrix\")\n    for (name, ra) in row_attrs.items():\n        if (ra.shape[0] != shape[0]):\n            raise ValueError(f\"Row attribute '{name}' is not the same length ({ra.shape[0]}) as number of rows in main matrix ({shape[0]})\")\n    for (name, ca) in col_attrs.items():\n        if (ca.shape[0] != shape[1]):\n            raise ValueError(f\"Column attribute '{name}' is not the same length ({ca.shape[0]}) as number of columns in main matrix ({shape[1]})\")\n    try:\n        with new(filename, file_attrs=file_attrs) as ds:\n            for (key, vals) in layers.items():\n                ds.layer[key] = vals\n            for (key, vals) in row_attrs.items():\n                ds.ra[key] = vals\n            for (key, vals) in col_attrs.items():\n                ds.ca[key] = vals\n    except ValueError as ve:\n        if os.path.exists(filename):\n            os.remove(filename)\n        raise ve", "docstring": "Create a new Loom file from the given data.\n\nArgs:\nfilename (str):         The filename (typically using a ``.loom`` file extension)\nlayers:\t\t\t\t\tOne of the following:\n\n* Two-dimensional (N-by-M) numpy ndarray of float values\n* Sparse matrix (e.g. :class:`scipy.sparse.csr_matrix`)\n* Dictionary of named layers, each an N-by-M ndarray or sparse matrix\n* A :class:`.LayerManager`, with each layer an N-by-M ndarray\nrow_attrs (dict):       Row attributes, where keys are attribute names and values\nare numpy arrays (float or string) of length N\ncol_attrs (dict):       Column attributes, where keys are attribute names and\nvalues are numpy arrays (float or string) of length M\nfile_attrs (dict):      Global attributes, where keys are attribute names and\nvalues are strings\nReturns:\nNothing\n\nRemarks:\nIf the file exists, it will be overwritten.", "source": "codesearchnet"}
{"code": "def Tensors(self, run, tag):\n    accumulator = self.GetAccumulator(run)\n    return accumulator.Tensors(tag)", "docstring": "Retrieve the tensor events associated with a run and tag.\n\nArgs:\nrun: A string name of the run for which values are retrieved.\ntag: A string name of the tag for which values are retrieved.\n\nRaises:\nKeyError: If the run is not found, or the tag is not available for\nthe given run.\n\nReturns:\nAn array of `event_accumulator.TensorEvent`s.", "source": "codesearchnet"}
{"code": "def create_from_json(cls, json_data):\n        \n        prop = Property()\n        address_info = json_data[\"address_info\"]\n        prop.address = address_info[\"address\"]\n        prop.block_id = address_info[\"block_id\"]\n        prop.zipcode = address_info[\"zipcode\"]\n        prop.zipcode_plus4 = address_info[\"zipcode_plus4\"]\n        prop.address_full = address_info[\"address_full\"]\n        prop.city = address_info[\"city\"]\n        prop.county_fips = address_info[\"county_fips\"]\n        prop.geo_precision = address_info[\"geo_precision\"]\n        prop.lat = address_info[\"lat\"]\n        prop.lng = address_info[\"lng\"]\n        prop.slug = address_info[\"slug\"]\n        prop.state = address_info[\"state\"]\n        prop.unit = address_info[\"unit\"]\n\n        prop.meta = None\n        if \"meta\" in json_data:\n            prop.meta = json_data[\"meta\"]\n\n        prop.component_results = _create_component_results(json_data, \"address_info\")\n\n        return prop", "docstring": "Deserialize property json data into a Property object\n\nArgs:\njson_data (dict): The json data for this property\n\nReturns:\nProperty object", "source": "juraj-google-style"}
{"code": "def AddNEP5Token(self, token):\n        \n        if token.ScriptHash.ToBytes() in self._tokens.keys():\n            logger.error(\"Token already in wallet\")\n            return\n        self._tokens[token.ScriptHash.ToBytes()] = token", "docstring": "Add a NEP-5 compliant token to the wallet.\n\nArgs:\ntoken (NEP5Token): an instance of type neo.Wallets.NEP5Token.\n\nNote:\nPrints a warning to the console if the token already exists in the wallet.", "source": "juraj-google-style"}
{"code": "def __init__(self, storage_writer):\n    \n    super(StorageMergeReader, self).__init__()\n    self._storage_writer = storage_writer", "docstring": "Initializes a storage merge reader.\n\nArgs:\nstorage_writer (StorageWriter): storage writer.", "source": "juraj-google-style"}
{"code": "def publish(self, object_id: str, event_type: str, event_data: dict=None):\n    object_key = SchedulingObject.get_key(self.type, object_id)\n    publish(event_type=event_type, event_data=event_data, object_type=self.type, object_id=object_id, object_key=object_key, origin=None)", "docstring": "Publish a scheduling object event.\n\nArgs:\nobject_id (str): ID of the scheduling object\nevent_type (str): Type of event.\nevent_data (dict, optional): Event data.", "source": "codesearchnet"}
{"code": "def indent_css(f, output):\n    \n    line_count = get_line_count(f)\n    f = open(f, 'r+')\n    output = open(output, 'r+')\n    for line in range(line_count):\n        string = f.readline().rstrip()\n        if len(string) > 0:\n            if string[-1] == \";\":\n                output.write(\"    \" + string + \"\\n\")\n            else:\n                output.write(string + \"\\n\")\n    output.close()\n    f.close()", "docstring": "Indentes css that has not been indented and saves it to a new file.\nA new file is created if the output destination does not already exist.\n\nArgs:\nf: string, path to file.\n\noutput: string, path/name of the output file (e.g. /directory/output.css).\nprint type(response.read())\n\nReturns:\nNone.", "source": "juraj-google-style"}
{"code": "def list_channels(self, collection_name, experiment_name):\n        \n        dont_care = 'image'\n        chan = ChannelResource(\n            name='', collection_name=collection_name,\n            experiment_name=experiment_name, type=dont_care)\n        return self._list_resource(chan)", "docstring": "List all channels belonging to the named experiment that is part\nof the named collection.\n\nArgs:\ncollection_name (string): Name of the parent collection.\nexperiment_name (string): Name of the parent experiment.\n\nReturns:\n(list)\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "class TFCvtStage(keras.layers.Layer):\n\n    def __init__(self, config: CvtConfig, stage: int, **kwargs):\n        super().__init__(**kwargs)\n        self.config = config\n        self.stage = stage\n        if self.config.cls_token[self.stage]:\n            self.cls_token = self.add_weight(shape=(1, 1, self.config.embed_dim[-1]), initializer=get_initializer(self.config.initializer_range), trainable=True, name='cvt.encoder.stages.2.cls_token')\n        self.embedding = TFCvtEmbeddings(self.config, patch_size=config.patch_sizes[self.stage], num_channels=config.num_channels if self.stage == 0 else config.embed_dim[self.stage - 1], stride=config.patch_stride[self.stage], embed_dim=config.embed_dim[self.stage], padding=config.patch_padding[self.stage], dropout_rate=config.drop_rate[self.stage], name='embedding')\n        drop_path_rates = tf.linspace(0.0, config.drop_path_rate[self.stage], config.depth[stage])\n        drop_path_rates = [x.numpy().item() for x in drop_path_rates]\n        self.layers = [TFCvtLayer(config, num_heads=config.num_heads[self.stage], embed_dim=config.embed_dim[self.stage], kernel_size=config.kernel_qkv[self.stage], stride_q=config.stride_q[self.stage], stride_kv=config.stride_kv[self.stage], padding_q=config.padding_q[self.stage], padding_kv=config.padding_kv[self.stage], qkv_projection_method=config.qkv_projection_method[self.stage], qkv_bias=config.qkv_bias[self.stage], attention_drop_rate=config.attention_drop_rate[self.stage], drop_rate=config.drop_rate[self.stage], mlp_ratio=config.mlp_ratio[self.stage], drop_path_rate=drop_path_rates[self.stage], with_cls_token=config.cls_token[self.stage], name=f'layers.{j}') for j in range(config.depth[self.stage])]\n\n    def call(self, hidden_state: tf.Tensor, training: bool=False):\n        cls_token = None\n        hidden_state = self.embedding(hidden_state, training)\n        batch_size, height, width, num_channels = shape_list(hidden_state)\n        hidden_size = height * width\n        hidden_state = tf.reshape(hidden_state, shape=(batch_size, hidden_size, num_channels))\n        if self.config.cls_token[self.stage]:\n            cls_token = tf.repeat(self.cls_token, repeats=batch_size, axis=0)\n            hidden_state = tf.concat((cls_token, hidden_state), axis=1)\n        for layer in self.layers:\n            layer_outputs = layer(hidden_state, height, width, training=training)\n            hidden_state = layer_outputs\n        if self.config.cls_token[self.stage]:\n            cls_token, hidden_state = tf.split(hidden_state, [1, height * width], 1)\n        hidden_state = tf.reshape(hidden_state, shape=(batch_size, height, width, num_channels))\n        return (hidden_state, cls_token)\n\n    def build(self, input_shape=None):\n        if self.built:\n            return\n        self.built = True\n        if getattr(self, 'embedding', None) is not None:\n            with tf.name_scope(self.embedding.name):\n                self.embedding.build(None)\n        if getattr(self, 'layers', None) is not None:\n            for layer in self.layers:\n                with tf.name_scope(layer.name):\n                    layer.build(None)", "docstring": "Cvt stage (encoder block). Each stage has 2 parts :\n- (1) A Convolutional Token Embedding layer\n- (2) A Convolutional Transformer Block (layer).\nThe classification token is added only in the last stage.\n\nArgs:\nconfig ([`CvtConfig`]): Model configuration class.\nstage (`int`): Stage number.", "source": "github-repos"}
{"code": "def by_geopoint(self, lat, long):\n    (header, content) = self._http_request(self.BASE_URL, lat=lat, long=long)\n    return json.loads(content)", "docstring": "Perform a Yelp Neighborhood API Search based on a geopoint.\n\nArgs:\nlat      - geopoint latitude\nlong     - geopoint longitude", "source": "codesearchnet"}
{"code": "def init(self, force_deploy=False, client=None):\n    _force_deploy = self.provider_conf.force_deploy\n    self.provider_conf.force_deploy = (_force_deploy or force_deploy)\n    self._provider_conf = self.provider_conf.to_dict()\n    r = api.Resources(self._provider_conf, client=client)\n    r.launch()\n    roles = r.get_roles()\n    networks = r.get_networks()\n    return (_to_enos_roles(roles), _to_enos_networks(networks))", "docstring": "Reserve and deploys the nodes according to the resources section\n\nIn comparison to the vagrant provider, networks must be characterized\nas in the networks key.\n\nArgs:\nforce_deploy (bool): True iff the environment must be redeployed\nRaises:\nMissingNetworkError: If one network is missing in comparison to\nwhat is claimed.\nNotEnoughNodesError: If the `min` constraints can't be met.", "source": "codesearchnet"}
{"code": "def delete(filething):\n    \n\n    dsf_file = DSFFile(filething.fileobj)\n\n    if dsf_file.dsd_chunk.offset_metdata_chunk != 0:\n        id3_location = dsf_file.dsd_chunk.offset_metdata_chunk\n        dsf_file.dsd_chunk.offset_metdata_chunk = 0\n        dsf_file.dsd_chunk.write()\n\n        filething.fileobj.seek(id3_location)\n        filething.fileobj.truncate()", "docstring": "Remove tags from a file.\n\nArgs:\nfilething (filething)\nRaises:\nmutagen.MutagenError", "source": "juraj-google-style"}
{"code": "def timeout(seconds=0, minutes=0, hours=0):\n    \n\n    limit = seconds + 60 * minutes + 3600 * hours\n\n    def handler(signum, frame):  \n        raise TimeoutError('timed out after {} seconds'.format(limit))\n\n    try:\n        signal.signal(signal.SIGALRM, handler)\n        signal.setitimer(signal.ITIMER_REAL, limit)\n        yield\n    finally:\n        signal.alarm(0)", "docstring": "Add a signal-based timeout to any block of code.\nIf multiple time units are specified, they will be added together to determine time limit.\nUsage:\nwith timeout(seconds=5):\nmy_slow_function(...)\nArgs:\n- seconds: The time limit, in seconds.\n- minutes: The time limit, in minutes.\n- hours: The time limit, in hours.", "source": "juraj-google-style"}
{"code": "def _static_check(self):\n    my_dtype = self.dtype\n    if self._uniform_row_length is not None:\n        if self._uniform_row_length.dtype != my_dtype:\n            raise ValueError('_uniform_row_length.dtype=' + str(self._uniform_row_length.dtype) + ', not ' + str(my_dtype))\n    if self._row_lengths is not None and self._row_lengths.dtype != my_dtype:\n        raise ValueError('_row_lengths.dtype=' + str(self._row_lengths.dtype) + ', not ' + str(my_dtype))\n    if self._value_rowids is not None and self._value_rowids.dtype != my_dtype:\n        raise ValueError('_value_rowids.dtype=' + str(self._value_rowids.dtype) + ', not ' + str(my_dtype))\n    if self._nrows is not None and self._nrows.dtype != my_dtype:\n        raise ValueError('_nrows.dtype=' + str(self._nrows.dtype) + ', not ' + str(my_dtype))", "docstring": "Checks if the object is internally consistent.\n\nRaises:\nValueError if inconsistent.", "source": "github-repos"}
{"code": "def compile_function(node, globals_=None):\n    if (not isinstance(node, gast.AST)):\n        if (not isinstance(node, six.string_types)):\n            raise TypeError\n        node = gast.parse(node)\n    if isinstance(node, gast.Module):\n        for succ in node.body:\n            if isinstance(succ, gast.FunctionDef):\n                name = succ.name\n                break\n        else:\n            raise ValueError('no function found')\n    elif isinstance(node, gast.FunctionDef):\n        name = node.name\n    else:\n        raise TypeError\n    module = compile_file(node, globals_)\n    return getattr(module, name)", "docstring": "Convert an AST or string into a function with inspectable source.\n\nThis function uses `compile_file` internally, but instead of returning the\nentire module it will return the function only.\n\nArgs:\nnode: A `FunctionDef` node or a `Module` node which contains at least one\n`FunctionDef` node. If a module contains multiple functions, a handle\nto the first one will be returned.\nglobals_: See `compile_file`\n\nReturns:\nA handle to the compiled function.\n\nRaises:\nTypeError: If the input is not a string or AST.\nValueError: If no function can be found.", "source": "codesearchnet"}
{"code": "def get_effect(self, label: str) -> Effect:\n        \n        return self._get_resource(label, self._effects, \"effect\")", "docstring": "Get an effect instance by label\n\nArgs:\nlabel (str): The label for the effect instance\n\nReturns:\nEffect class instance", "source": "juraj-google-style"}
{"code": "def _batch_prepare_for_model(self, batch_text_or_text_pairs, is_pair: Optional[bool]=None, xpaths: Optional[List[List[int]]]=None, node_labels: Optional[List[List[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:\n    batch_outputs = {}\n    for idx, example in enumerate(zip(batch_text_or_text_pairs, xpaths)):\n        batch_text_or_text_pair, xpaths_example = example\n        outputs = self.prepare_for_model(batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, xpaths_example, node_labels=node_labels[idx] if node_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose)\n        for key, value in outputs.items():\n            if key not in batch_outputs:\n                batch_outputs[key] = []\n            batch_outputs[key].append(value)\n    batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)\n    batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)\n    return batch_outputs", "docstring": "Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It\nadds special tokens, truncates sequences if overflowing while taking into account the special tokens and\nmanages a moving window (with user defined stride) for overflowing tokens.\n\nArgs:\nbatch_ids_pairs: list of tokenized input ids or input ids pairs", "source": "github-repos"}
{"code": "async def get_pushlog_info(decision_link):\n    source_env_prefix = decision_link.context.config['source_env_prefix']\n    repo = get_repo(decision_link.task, source_env_prefix)\n    rev = get_revision(decision_link.task, source_env_prefix)\n    context = decision_link.context\n    pushlog_url = context.config['pushlog_url'].format(repo=repo, revision=rev)\n    log.info('Pushlog url {}'.format(pushlog_url))\n    file_path = os.path.join(context.config['work_dir'], '{}_push_log.json'.format(decision_link.name))\n    pushlog_info = (await load_json_or_yaml_from_url(context, pushlog_url, file_path, overwrite=False))\n    if (len(pushlog_info['pushes']) != 1):\n        log.warning('Pushlog error: expected a single push at {} but got {}!'.format(pushlog_url, pushlog_info['pushes']))\n    return pushlog_info", "docstring": "Get pushlog info for a decision LinkOfTrust.\n\nArgs:\ndecision_link (LinkOfTrust): the decision link to get pushlog info about.\n\nReturns:\ndict: pushlog info.", "source": "codesearchnet"}
{"code": "def wb020(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `wb020`'.format(value))\n    self._wb020 = value", "docstring": "Corresponds to IDD Field `wb020`\nWet-bulb temperature corresponding to 02.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `wb020`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def prune_graph(graph_str, package_name):\n    g = read_dot(graph_str)\n    nodes = set()\n    for (node, attrs) in g.node_attr.iteritems():\n        attr = [x for x in attrs if (x[0] == 'label')]\n        if attr:\n            label = attr[0][1]\n            try:\n                req_str = _request_from_label(label)\n                request = PackageRequest(req_str)\n            except PackageRequestError:\n                continue\n            if (request.name == package_name):\n                nodes.add(node)\n    if (not nodes):\n        raise ValueError(('The package %r does not appear in the graph.' % package_name))\n    g_rev = g.reverse()\n    accessible_nodes = set()\n    access = accessibility(g_rev)\n    for node in nodes:\n        nodes_ = access.get(node, [])\n        accessible_nodes |= set(nodes_)\n    inaccessible_nodes = (set(g.nodes()) - accessible_nodes)\n    for node in inaccessible_nodes:\n        g.del_node(node)\n    return write_dot(g)", "docstring": "Prune a package graph so it only contains nodes accessible from the\ngiven package.\n\nArgs:\ngraph_str (str): Dot-language graph string.\npackage_name (str): Name of package of interest.\n\nReturns:\nPruned graph, as a string.", "source": "codesearchnet"}
{"code": "def add(self, layer):\n    if hasattr(layer, '_keras_history'):\n        origin_layer = layer._keras_history[0]\n        if isinstance(origin_layer, input_layer.InputLayer):\n            layer = origin_layer\n            logging.warning('Please add `keras.layers.InputLayer` instead of `keras.Input` to Sequential model. `keras.Input` is intended to be used by Functional model.')\n    if isinstance(layer, module.Module):\n        if not isinstance(layer, base_layer.Layer):\n            layer = functional.ModuleWrapper(layer)\n    else:\n        raise TypeError('The added layer must be an instance of class Layer. Found: ' + str(layer))\n    tf_utils.assert_no_legacy_layers([layer])\n    if not self._is_layer_name_unique(layer):\n        raise ValueError('All layers added to a Sequential model should have unique names. Name \"%s\" is already the name of a layer in this model. Update the `name` argument to pass a unique name.' % (layer.name,))\n    self.built = False\n    set_inputs = False\n    self._maybe_create_attribute('_self_tracked_trackables', [])\n    if not self._self_tracked_trackables:\n        if isinstance(layer, input_layer.InputLayer):\n            set_inputs = True\n        else:\n            batch_shape, dtype = training_utils.get_input_shape_and_dtype(layer)\n            if batch_shape:\n                x = input_layer.Input(batch_shape=batch_shape, dtype=dtype, name=layer.name + '_input')\n                layer(x)\n                set_inputs = True\n        if set_inputs:\n            outputs = nest.flatten(layer._inbound_nodes[-1].outputs)\n            if len(outputs) != 1:\n                raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)\n            self.outputs = outputs\n            self.inputs = layer_utils.get_source_inputs(self.outputs[0])\n            self.built = True\n            self._has_explicit_input_shape = True\n    elif self.outputs:\n        output_tensor = layer(self.outputs[0])\n        if len(nest.flatten(output_tensor)) != 1:\n            raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)\n        self.outputs = [output_tensor]\n        self.built = True\n    if set_inputs or self._graph_initialized:\n        self._init_graph_network(self.inputs, self.outputs)\n        self._graph_initialized = True\n    else:\n        self._self_tracked_trackables.append(layer)\n        self._handle_deferred_layer_dependencies([layer])\n    self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)", "docstring": "Adds a layer instance on top of the layer stack.\n\nArgs:\nlayer: layer instance.\n\nRaises:\nTypeError: If `layer` is not a layer instance.\nValueError: In case the `layer` argument does not\nknow its input shape.\nValueError: In case the `layer` argument has\nmultiple output tensors, or is already connected\nsomewhere else (forbidden in `Sequential` models).", "source": "github-repos"}
{"code": "def _InfoBackup(component):\n    info = {}\n    info['type_name'] = type(component).__name__\n    info['string_form'] = str(component)\n    filename, lineno = GetFileAndLine(component)\n    info['file'] = filename\n    info['line'] = lineno\n    info['docstring'] = inspect.getdoc(component)\n    try:\n        info['length'] = str(len(component))\n    except (TypeError, AttributeError):\n        pass\n    return info", "docstring": "Returns a dict with information about the given component.\n\nThis function is to be called only in the case that IPython's\noinspect module is not available. The info dict it produces may\ncontain less information that contained in the info dict produced\nby oinspect.\n\nArgs:\ncomponent: The component to analyze.\nReturns:\nA dict with information about the component.", "source": "github-repos"}
{"code": "def selfSignCert(self, cert, pkey):\n        \n        cert.set_issuer(cert.get_subject())\n        cert.sign(pkey, self.signing_digest)", "docstring": "Self-sign a certificate.\n\nArgs:\ncert (OpenSSL.crypto.X509): The certificate to sign.\npkey (OpenSSL.crypto.PKey): The PKey with which to sign the certificate.\n\nExamples:\nSign a given certificate with a given private key:\n\ncdir.selfSignCert(mycert, myotherprivatekey)\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def data(self, rows=None):\n    \n    rows = tf.range(self._capacity) if rows is None else rows\n    assert rows.shape.ndims == 1\n    episode = tools.nested.map(lambda var: tf.gather(var, rows), self._buffers)\n    length = tf.gather(self._length, rows)\n    return episode, length", "docstring": "Access a batch of episodes from the memory.\n\nPadding elements after the length of each episode are unspecified and might\ncontain old data.\n\nArgs:\nrows: Episodes to select, defaults to all.\n\nReturns:\nTuple containing a tuple of transition quantities with batch and time\ndimensions, and a batch of sequence lengths.", "source": "juraj-google-style"}
{"code": "def bind_parameters(self, value_dict):\n    new_circuit = self.copy()\n    if (value_dict.keys() > self.parameters):\n        raise QiskitError('Cannot bind parameters ({}) not present in the circuit.'.format([str(p) for p in (value_dict.keys() - self.parameters)]))\n    for (parameter, value) in value_dict.items():\n        new_circuit._bind_parameter(parameter, value)\n    for parameter in value_dict:\n        del new_circuit._parameter_table[parameter]\n    return new_circuit", "docstring": "Assign parameters to values yielding a new circuit.\n\nArgs:\nvalue_dict (dict): {parameter: value, ...}\n\nRaises:\nQiskitError: If value_dict contains parameters not present in the circuit\n\nReturns:\nQuantumCircuit: copy of self with assignment substitution.", "source": "codesearchnet"}
{"code": "def acos(cls, x: 'TensorFluent') -> 'TensorFluent':\n        \n        return cls._unary_op(x, tf.acos, tf.float32)", "docstring": "Returns a TensorFluent for the arccos function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the arccos function.", "source": "juraj-google-style"}
{"code": "def get_array_for_fit(observables: dict, track_pt_bin: int, jet_pt_bin: int) -> histogram.Histogram1D:\n    for (name, observable) in observables.items():\n        if ((observable.track_pt_bin == track_pt_bin) and (observable.jet_pt_bin == jet_pt_bin)):\n            return histogram.Histogram1D.from_existing_hist(observable.hist)\n    raise ValueError('Cannot find fit with jet pt bin {jet_pt_bin} and track pt bin {track_pt_bin}')", "docstring": "Get a Histogram1D associated with the selected jet and track pt bins.\n\nThis is often used to retrieve data for fitting.\n\nArgs:\nobservables (dict): The observables from which the hist should be retrieved.\ntrack_pt_bin (int): Track pt bin of the desired hist.\njet_ptbin (int): Jet pt bin of the desired hist.\nReturns:\nHistogram1D: Converted TH1 or uproot histogram.\nRaises:\nValueError: If the requested observable couldn't be found.", "source": "codesearchnet"}
{"code": "def GetNumberOfEventSources(self):\n    number_of_event_sources = self._CountStoredAttributeContainers(self._CONTAINER_TYPE_EVENT_SOURCE)\n    number_of_event_sources += self._GetNumberOfSerializedAttributeContainers(self._CONTAINER_TYPE_EVENT_SOURCE)\n    return number_of_event_sources", "docstring": "Retrieves the number event sources.\n\nReturns:\nint: number of event sources.", "source": "codesearchnet"}
{"code": "def index_buffer(self, buffer, index_element_size=4):\n    if (not (type(buffer) in [moderngl.Buffer, numpy.ndarray, bytes])):\n        raise VAOError('buffer parameter must be a moderngl.Buffer, numpy.ndarray or bytes instance')\n    if isinstance(buffer, numpy.ndarray):\n        buffer = self.ctx.buffer(buffer.tobytes())\n    if isinstance(buffer, bytes):\n        buffer = self.ctx.buffer(data=buffer)\n    self._index_buffer = buffer\n    self._index_element_size = index_element_size", "docstring": "Set the index buffer for this VAO\n\nArgs:\nbuffer: ``moderngl.Buffer``, ``numpy.array`` or ``bytes``\n\nKeyword Args:\nindex_element_size (int): Byte size of each element. 1, 2 or 4", "source": "codesearchnet"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Optional[Union[int, List[int]]]=None, vision_feature_select_strategy: Optional[str]=None, **kwargs):\n    vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer\n    vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy\n    if vision_feature_select_strategy not in ['default', 'full']:\n        raise ValueError(f'Unexpected select feature strategy: {self.config.vision_feature_select_strategy}')\n    kwargs = {k: v for k, v in kwargs.items() if v is not None}\n    image_outputs = self.vision_tower(pixel_values, output_hidden_states=True, **kwargs)\n    if isinstance(vision_feature_layer, int):\n        selected_image_feature = image_outputs.hidden_states[vision_feature_layer]\n        if vision_feature_select_strategy == 'default':\n            selected_image_feature = selected_image_feature[:, 1:]\n    else:\n        hs_pool = [image_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer]\n        if vision_feature_select_strategy == 'default':\n            hs_pool = [hs[:, 1:] for hs in hs_pool]\n        selected_image_feature = torch.cat(hs_pool, dim=-1)\n    image_features = self.multi_modal_projector(selected_image_feature)\n    if 'image_sizes' in kwargs:\n        split_sizes = [height \n        image_features = torch.split(image_features.squeeze(0), split_sizes)\n    else:\n        image_features = list(image_features)\n    return image_features", "docstring": "Obtains image last hidden states from the vision tower and apply multimodal projection.\n\nArgs:\npixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):\nThe tensors corresponding to the input images.\nvision_feature_layer (`Union[int, List[int]]`, *optional*):\nThe index of the layer to select the vision feature. If multiple indices are provided,\nthe vision feature of the corresponding indices will be concatenated to form the\nvision features.\nvision_feature_select_strategy (`str`, *optional*):\nThe feature selection strategy used to select the vision feature from the vision backbone.\nCan be one of `\"default\"` or `\"full\"`\nReturns:\nimage_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).", "source": "github-repos"}
{"code": "def get_changeset(changeset):\n    url = 'https:\n    return ET.fromstring(requests.get(url).content)", "docstring": "Get the changeset using the OSM API and return the content as a XML\nElementTree.\n\nArgs:\nchangeset: the id of the changeset.", "source": "codesearchnet"}
{"code": "def stitch_values(values_and_indices_list):\n    length = 0\n    for values_and_indices in values_and_indices_list:\n        length += len(values_and_indices[0])\n    result = [None] * length\n    for values_and_indices in values_and_indices_list:\n        if values_and_indices and values_and_indices[0]:\n            for v, i in zip(*values_and_indices):\n                assert result[i] is None\n                result[i] = v\n    return result", "docstring": "Stitch values together according to their indices.\n\nArgs:\nvalues_and_indices_list: a list of tuples of values and indices indicating\nthe values and positions in the returned list.\n\nReturns:\na stitched list of values.", "source": "github-repos"}
{"code": "def _ref(self):\n    return self._variable", "docstring": "Returns a reference to this variable.\n\nYou usually do not need to call this method as all ops that need a reference\nto the variable call it automatically.\n\nReturns is a `Tensor` which holds a reference to the variable.  You can\nassign a new value to the variable by passing the tensor to an assign op.\nSee `tf.Variable.value` if you want to get the value of the\nvariable.\n\nReturns:\nA `Tensor` that is a reference to the variable.", "source": "github-repos"}
{"code": "def peek(self) -> str:\n    try:\n        return self.input[self.offset]\n    except IndexError:\n        raise EndOfInput(self)", "docstring": "Return the next character without advancing offset.\n\nRaises:\nEndOfInput: If past the end of `self.input`.", "source": "codesearchnet"}
{"code": "def device_id_to_slug(did):\n    try:\n        device_slug = IOTileDeviceSlug(did, allow_64bits=False)\n    except ValueError:\n        raise ArgumentError('Unable to recognize {} as a device id'.format(did))\n    return str(device_slug)", "docstring": "Converts a device id into a correct device slug.\n\nArgs:\ndid (long) : A device id\ndid (string) : A device slug in the form of XXXX, XXXX-XXXX-XXXX, d--XXXX, d--XXXX-XXXX-XXXX-XXXX\nReturns:\nstr: The device slug in the d--XXXX-XXXX-XXXX-XXXX format\nRaises:\nArgumentError: if the ID is not in the [1, 16**12] range, or if not a valid string", "source": "codesearchnet"}
{"code": "def _detect(self):\n    results = []\n    for c in self.contracts:\n        for f in c.functions:\n            if (f.contract != c):\n                continue\n            if (f.view or f.pure):\n                if f.contains_assembly:\n                    attr = ('view' if f.view else 'pure')\n                    info = '{}.{} ({}) is declared {} but contains assembly code\\n'\n                    info = info.format(f.contract.name, f.name, f.source_mapping_str, attr)\n                    json = self.generate_json_result(info)\n                    self.add_function_to_json(f, json)\n                    json['elements'].append({'type': 'info', 'contains_assembly': True})\n                    results.append(json)\n                variables_written = f.all_state_variables_written()\n                if variables_written:\n                    attr = ('view' if f.view else 'pure')\n                    info = '{}.{} ({}) is declared {} but changes state variables:\\n'\n                    info = info.format(f.contract.name, f.name, f.source_mapping_str, attr)\n                    for variable_written in variables_written:\n                        info += '\\t- {}.{}\\n'.format(variable_written.contract.name, variable_written.name)\n                    json = self.generate_json_result(info)\n                    self.add_function_to_json(f, json)\n                    self.add_variables_to_json(variables_written, json)\n                    json['elements'].append({'type': 'info', 'contains_assembly': False})\n                    results.append(json)\n    return results", "docstring": "Detect the constant function changing the state\n\nRecursively visit the calls\nReturns:\nlist: {'vuln', 'filename,'contract','func','#varsWritten'}", "source": "codesearchnet"}
{"code": "def extract_cookies(self, response, request, referrer_host=None):\n    new_response = HTTPResponseInfoWrapper(response)\n    new_request = convert_http_request(request, referrer_host)\n    self._cookie_jar.extract_cookies(new_response, new_request)", "docstring": "Wrapped ``extract_cookies``.\n\nArgs:\nresponse: An instance of :class:`.http.request.Response`.\nrequest: An instance of :class:`.http.request.Request`.\nreferrer_host (str): An hostname or IP address of the referrer\nURL.", "source": "codesearchnet"}
{"code": "def Serialize(self, writer):\n        \n        writer.WriteHashes(self.HashStart)\n        if self.HashStop is not None:\n            writer.WriteUInt256(self.HashStop)", "docstring": "Serialize object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def save_checkpoint(model, filename, optimizer=None, meta=None):\n    \n    if meta is None:\n        meta = {}\n    elif not isinstance(meta, dict):\n        raise TypeError('meta must be a dict or None, but got {}'.format(\n            type(meta)))\n    meta.update(mmcv_version=mmcv.__version__, time=time.asctime())\n\n    mmcv.mkdir_or_exist(osp.dirname(filename))\n    if hasattr(model, 'module'):\n        model = model.module\n\n    checkpoint = {\n        'meta': meta,\n        'state_dict': weights_to_cpu(model.state_dict())\n    }\n    if optimizer is not None:\n        checkpoint['optimizer'] = optimizer.state_dict()\n\n    torch.save(checkpoint, filename)", "docstring": "Save checkpoint to file.\n\nThe checkpoint will have 3 fields: ``meta``, ``state_dict`` and\n``optimizer``. By default ``meta`` will contain version and time info.\n\nArgs:\nmodel (Module): Module whose params are to be saved.\nfilename (str): Checkpoint filename.\noptimizer (:obj:`Optimizer`, optional): Optimizer to be saved.\nmeta (dict, optional): Metadata to be saved in checkpoint.", "source": "juraj-google-style"}
{"code": "def create_alias(alias_name, alias_command):\n    (alias_name, alias_command) = (alias_name.strip(), alias_command.strip())\n    alias_table = get_alias_table()\n    if (alias_name not in alias_table.sections()):\n        alias_table.add_section(alias_name)\n    alias_table.set(alias_name, 'command', alias_command)\n    _commit_change(alias_table)", "docstring": "Create an alias.\n\nArgs:\nalias_name: The name of the alias.\nalias_command: The command that the alias points to.", "source": "codesearchnet"}
{"code": "def sg_transpose(tensor, opt):\n    assert (opt.perm is not None), 'perm is mandatory'\n    return tf.transpose(tensor, opt.perm, name=opt.name)", "docstring": "r\"\"\"Permutes the dimensions according to `opt.perm`.\n\nSee `tf.transpose()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` (automatically given by chain).\nopt:\nperm: A permutation of the dimensions of `tensor`. The target shape.\nname: If provided, replace current tensor's name.\n\nReturns:\nA `Tensor`.", "source": "codesearchnet"}
{"code": "def _get_format(format, fname, inp=None):\n    fmt = None\n    err = True\n    if (format is not None):\n        if (format in fmt_to_exts):\n            fmt = format\n            err = False\n    elif fname:\n        file_ext = os.path.splitext(fname)[1][len(os.path.extsep):]\n        for (fmt_name, exts) in fmt_to_exts.items():\n            if (file_ext in exts):\n                fmt = fmt_name\n                err = False\n    if (fmt is None):\n        if (inp is not None):\n            fmt = _guess_fmt_from_bytes(inp)\n            err = False\n    if err:\n        err_string = 'Failed to guess markup format based on: '\n        what = []\n        for (k, v) in {format: 'specified format argument', fname: 'filename', inp: 'input string'}.items():\n            if k:\n                what.append(v)\n        if (not what):\n            what.append('nothing to guess format from!')\n        err_string += ', '.join(what)\n        raise AnyMarkupError(err_string)\n    return fmt", "docstring": "Try to guess markup format of given input.\n\nArgs:\nformat: explicit format override to use\nfname: name of file, if a file was used to read `inp`\ninp: optional bytestring to guess format of (can be None, if markup\nformat is to be guessed only from `format` and `fname`)\nReturns:\nguessed format (a key of fmt_to_exts dict)\nRaises:\nAnyMarkupError if explicit format override has unsupported value\nor if it's impossible to guess the format", "source": "codesearchnet"}
{"code": "def accuracy(y_true: [list, np.ndarray], y_predicted: [list, np.ndarray]) -> float:\n    examples_len = len(y_true)\n    correct = sum([(y1 == y2) for (y1, y2) in zip(y_true, y_predicted)])\n    return ((correct / examples_len) if examples_len else 0)", "docstring": "Calculate accuracy in terms of absolute coincidence\n\nArgs:\ny_true: array of true values\ny_predicted: array of predicted values\n\nReturns:\nportion of absolutely coincidental samples", "source": "codesearchnet"}
{"code": "def AddForwardedIp(self, address, interface):\n    \n    address = address if IP_ALIAS_REGEX.match(address) else '%s/32' % address\n    args = ['add', 'to', 'local', address]\n    options = self._CreateRouteOptions(dev=interface)\n    self._RunIpRoute(args=args, options=options)", "docstring": "Configure a new IP address on the network interface.\n\nArgs:\naddress: string, the IP address to configure.\ninterface: string, the output device to use.", "source": "juraj-google-style"}
{"code": "def convert_drive(self, shift, instruction):\n    command_dict = {'name': instruction.command.name, 't0': (shift + instruction.start_time), 'ch': instruction.channels[0].name}\n    return self._qobj_model(**command_dict)", "docstring": "Return converted `PulseInstruction`.\n\nArgs:\nshift(int): Offset time.\ninstruction (PulseInstruction): drive instruction.\nReturns:\ndict: Dictionary of required parameters.", "source": "codesearchnet"}
{"code": "def __directory_list_descriptor(self, configs):\n    descriptor = {'kind': 'discovery\n    items = []\n    for config in configs:\n        item_descriptor = self.__item_descriptor(config)\n        if item_descriptor:\n            items.append(item_descriptor)\n    if items:\n        descriptor['items'] = items\n    return descriptor", "docstring": "Builds a directory list for an API.\n\nArgs:\nconfigs: List of dicts containing the service configurations to list.\n\nReturns:\nA dictionary that can be deserialized into JSON in discovery list format.\n\nRaises:\nApiConfigurationError: If there's something wrong with the API\nconfiguration, such as a multiclass API decorated with different API\ndescriptors (see the docstring for api()), or a repeated method\nsignature.", "source": "codesearchnet"}
{"code": "def _LastEntryTimestamp(dct, upper_bound_timestamp):\n    \n    if upper_bound_timestamp is None:\n      upper_bound = lambda _: True\n    else:\n      upper_bound = lambda key: key <= upper_bound_timestamp\n\n    try:\n      return max(filter(upper_bound, iterkeys(dct)))\n    except ValueError:  \n      return None", "docstring": "Searches for greatest timestamp lower than the specified one.\n\nArgs:\ndct: A dictionary from timestamps to some items.\nupper_bound_timestamp: An upper bound for timestamp to be returned.\n\nReturns:\nGreatest timestamp that is lower than the specified one. If no such value\nexists, `None` is returned.", "source": "juraj-google-style"}
{"code": "def AddSerializedFile(self, serialized_file_desc_proto):\n    from google.protobuf import descriptor_pb2\n    file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString(serialized_file_desc_proto)\n    self.Add(file_desc_proto)", "docstring": "Adds the FileDescriptorProto and its types to this pool.\n\nArgs:\nserialized_file_desc_proto: A bytes string, serialization of the\nFileDescriptorProto to add.", "source": "codesearchnet"}
{"code": "def _parse_description(details):\n    \n    description = details.find(\"div\", {\"class\": \"detailPopis\"})\n\n    \n    if not description:\n        return None\n\n    \n    ekniha = description[0].find(\"div\", {\"class\": \"ekniha\"})\n    if ekniha:\n        ekniha[0].replaceWith(dhtmlparser.HTMLElement(\"\"))\n\n    \n    detail = description[0].find(\"p\", {\"class\": \"detailKat\"})\n    if detail:\n        detail[0].replaceWith(dhtmlparser.HTMLElement(\"\"))\n\n    \n    description = dhtmlparser.removeTags(description[0]).strip()\n\n    \n    if not description:\n        return None\n\n    return description", "docstring": "Parse description of the book.\n\nArgs:\ndetails (obj): HTMLElement containing slice of the page with details.\n\nReturns:\nstr/None: Details as string with currency or None if not found.", "source": "juraj-google-style"}
{"code": "def encode(self):\n    blob = bytearray()\n    for record in self.records:\n        blob += record.encode()\n    header = struct.pack('<LL', self.SCRIPT_MAGIC, (len(blob) + self.SCRIPT_HEADER_LENGTH))\n    blob = (header + blob)\n    sha = hashlib.sha256()\n    sha.update(blob)\n    hash_value = sha.digest()[:16]\n    return (bytearray(hash_value) + blob)", "docstring": "Encode this record into a binary blob.\n\nThis binary blob could be parsed via a call to FromBinary().\n\nReturns:\nbytearray: The binary encoded script.", "source": "codesearchnet"}
{"code": "def EncodeMessages(self, message_list, result, destination=None, timestamp=None, api_version=3):\n    if (api_version not in [3]):\n        raise RuntimeError(('Unsupported api version: %s, expected 3.' % api_version))\n    if (destination is None):\n        destination = self.server_name\n        cipher = self._GetServerCipher()\n    else:\n        remote_public_key = self._GetRemotePublicKey(destination)\n        cipher = Cipher(self.common_name, self.private_key, remote_public_key)\n    if (timestamp is None):\n        self.timestamp = timestamp = int((time.time() * 1000000))\n    packed_message_list = rdf_flows.PackedMessageList(timestamp=timestamp)\n    self.EncodeMessageList(message_list, packed_message_list)\n    result.encrypted_cipher_metadata = cipher.encrypted_cipher_metadata\n    result.encrypted_cipher = cipher.encrypted_cipher\n    serialized_message_list = packed_message_list.SerializeToString()\n    (result.packet_iv, result.encrypted) = cipher.Encrypt(serialized_message_list)\n    result.hmac = cipher.HMAC(result.encrypted)\n    result.full_hmac = cipher.HMAC(result.encrypted, result.encrypted_cipher, result.encrypted_cipher_metadata, result.packet_iv.SerializeToString(), struct.pack('<I', api_version))\n    result.api_version = api_version\n    if isinstance(result, rdfvalue.RDFValue):\n        result.num_messages = len(message_list)\n    return timestamp", "docstring": "Accepts a list of messages and encodes for transmission.\n\nThis function signs and then encrypts the payload.\n\nArgs:\nmessage_list: A MessageList rdfvalue containing a list of GrrMessages.\nresult: A ClientCommunication rdfvalue which will be filled in.\ndestination: The CN of the remote system this should go to.\ntimestamp: A timestamp to use for the signed messages. If None - use the\ncurrent time.\napi_version: The api version which this should be encoded in.\n\nReturns:\nA nonce (based on time) which is inserted to the encrypted payload. The\nclient can verify that the server is able to decrypt the message and\nreturn the nonce.\n\nRaises:\nRuntimeError: If we do not support this api version.", "source": "codesearchnet"}
{"code": "def get_poi_types(self, **kwargs):\n        \n        \n        params = {\n            'cultureInfo': util.language_code(kwargs.get('lang'))\n        }\n\n        \n        result = self.make_request('geo', 'get_poi_types', **params)\n\n        \n        values = result.get('types', [])\n        return True, [emtype.PoiType(**a) for a in values]", "docstring": "Obtain POI types.\n\nArgs:\nlang (str): Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[PoiType]), or message string\nin case of error.", "source": "juraj-google-style"}
{"code": "def lookup_symbol(self, name, namespace_stack):\n    symbol = Symbol(name, name.split('::'), namespace_stack)\n    assert symbol.parts\n    if (symbol.parts[0] == ''):\n        symbol.parts = symbol.parts[1:]\n    elif (namespace_stack is not None):\n        result = self._lookup_in_all_namespaces(symbol)\n        if result:\n            return result\n    return self._lookup_global(symbol)", "docstring": "Returns AST node and module for symbol if found.\n\nArgs:\nname: 'name of the symbol to lookup'\nnamespace_stack: None or ['namespaces', 'in', 'current', 'scope']\n\nReturns:\n(ast.Node, module (ie, any object stored with symbol)) if found\n\nRaises:\nError if the symbol cannot be found.", "source": "codesearchnet"}
{"code": "def get_summary(self):\n    func_summaries = [f.get_summary() for f in self.functions]\n    modif_summaries = [f.get_summary() for f in self.modifiers]\n    return (self.name, [str(x) for x in self.inheritance], [str(x) for x in self.variables], func_summaries, modif_summaries)", "docstring": "Return the function summary\n\nReturns:\n(str, list, list, list, list): (name, inheritance, variables, fuction summaries, modifier summaries)", "source": "codesearchnet"}
{"code": "def join_dags(self, names=None):\n    return self._client.send(Request(action='join_dags', payload={'names': names})).success", "docstring": "Wait for the specified dags to terminate.\n\nThis function blocks until the specified dags terminate. If no dags are specified\nwait for all dags of the workflow, except the dag of the task calling this signal,\nto terminate.\n\nArgs:\nnames (list): The names of the dags that have to terminate.\n\nReturns:\nbool: True if all the signal was sent successfully.", "source": "codesearchnet"}
{"code": "def ParseLines(lines, message, allow_unknown_extension=False,\n               allow_field_number=False):\n  \n  parser = _Parser(allow_unknown_extension, allow_field_number)\n  return parser.ParseLines(lines, message)", "docstring": "Parses an text representation of a protocol message into a message.\n\nArgs:\nlines: An iterable of lines of a message's text representation.\nmessage: A protocol buffer message to merge into.\nallow_unknown_extension: if True, skip over missing extensions and keep\nparsing\nallow_field_number: if True, both field number and field name are allowed.\n\nReturns:\nThe same message passed as argument.\n\nRaises:\nParseError: On text parsing problems.", "source": "juraj-google-style"}
{"code": "def getUserForHost(self, user, host):\n        \n        for name in iterFqdnUp(host):\n            usercert = '%s@%s' % (user, name)\n            if self.isUserCert(usercert):\n                return usercert", "docstring": "Gets the name of the first existing user cert for a given user and host.\n\nArgs:\nuser (str): The name of the user.\nhost (str): The name of the host.\n\nExamples:\nGet the name for the \"myuser\" user cert at \"cool.vertex.link\":\n\nusercertname = cdir.getUserForHost('myuser', 'cool.vertex.link')\n\nReturns:\nstr: The cert name, if exists.", "source": "juraj-google-style"}
{"code": "def postprocess(self, args: argparse.Namespace):\n    names = {k for k in self.pytype_single_args if hasattr(args, k)}\n    opt_map = {k: self._pytype_arg_map[k].long_opt for k in names}\n    pytype_config.Postprocessor(names, opt_map, args).process()", "docstring": "Postprocesses the subset of pytype_single_args that appear in args.\n\nArgs:\nargs: an argparse.Namespace.", "source": "github-repos"}
{"code": "def get_controller(self, path):\n    path_info = path.lstrip('/').split('/', 2)\n    try:\n        return self._routes.get(((path_info[0] + '/') + path_info[1]))\n    except (IndexError, KeyError):\n        return self._routes.get((path_info[0] or 'index'))", "docstring": "Return controller that handle given path.\n\nArgs:\n- path: requested path, like: /blog/post_view/15", "source": "codesearchnet"}
{"code": "def plot_title(ax, pretitle='', title='Figure', posttitle='', title_fontsize=14, title_arg=None):\n    current_title = ax.get_title()\n    if (not current_title):\n        current_title = ((pretitle + title) + posttitle)\n    title_arg = dict_if_none(title_arg)\n    ax.set_title(current_title, fontsize=title_fontsize, **title_arg)", "docstring": "Set title options of a matplotlib plot\n\nArgs:\nax: matplotlib axes\npretitle(str): String to include before the general title of the figure\nposttitle (str): String to include after the general title of the figure\ntitle (str): Set the title for the figure\ntitle_fontsize (int): Defines the size of the title's font\ntitle_arg (dict): Addition arguments for matplotlib.title() call", "source": "codesearchnet"}
{"code": "def get_available_palettes(chosen_palette):\n    result = None\n    try:\n        result = ALL_PALETTES[:(ALL_PALETTES.index(chosen_palette) + 1)]\n    except ValueError:\n        pass\n    return result", "docstring": "Given a chosen palette, returns tuple of those available,\nor None when not found.\n\nBecause palette support of a particular level is almost always a\nsuperset of lower levels, this should return all available palettes.\n\nReturns:\nBoolean, None: is tty or None if not found.", "source": "codesearchnet"}
{"code": "def load(cls, fh):\n        \n        dat = fh.read()\n        try:\n            ret = cls.from_json(dat)\n        except:\n            ret = cls.from_yaml(dat)\n        return ret", "docstring": "Load json or yaml data from file handle.\n\nArgs:\nfh (file): File handle to load from.\n\nExamlple:\n>>> with open('data.json', 'r') as json:\n>>>    jsdata = composite.load(json)\n>>>\n>>> with open('data.yml', 'r') as yml:\n>>>    ymldata = composite.load(yml)", "source": "juraj-google-style"}
{"code": "def sample(self, num_rows=1):\n        \n        self.check_fit()\n\n        res = {}\n        means = np.zeros(self.covariance.shape[0])\n        size = (num_rows,)\n\n        clean_cov = np.nan_to_num(self.covariance)\n        samples = np.random.multivariate_normal(means, clean_cov, size=size)\n\n        for i, (label, distrib) in enumerate(self.distribs.items()):\n            cdf = stats.norm.cdf(samples[:, i])\n            res[label] = distrib.percent_point(cdf)\n\n        return pd.DataFrame(data=res)", "docstring": "Creates sintentic values stadistically similar to the original dataset.\n\nArgs:\nnum_rows: `int` amount of samples to generate.\n\nReturns:\nnp.ndarray: Sampled data.", "source": "juraj-google-style"}
{"code": "def image_summary(predictions, targets, hparams):\n  \n  del hparams\n  results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8)\n  gold = tf.cast(targets, tf.uint8)\n  summary1 = tf.summary.image(\"prediction\", results, max_outputs=2)\n  summary2 = tf.summary.image(\"data\", gold, max_outputs=2)\n  summary = tf.summary.merge([summary1, summary2])\n  return summary, tf.zeros_like(predictions)", "docstring": "Reshapes predictions and passes it to tensorboard.\n\nArgs:\npredictions : The predicted image (logits).\ntargets : The ground truth.\nhparams: model hparams.\n\nReturns:\nsummary_proto: containing the summary images.\nweights: A Tensor of zeros of the same shape as predictions.", "source": "juraj-google-style"}
{"code": "def get_status(self, batch_id):\n    with self._lock:\n        if self._batch_committed(batch_id):\n            return ClientBatchStatus.COMMITTED\n        if (batch_id in self._invalid):\n            return ClientBatchStatus.INVALID\n        if (batch_id in self._pending):\n            return ClientBatchStatus.PENDING\n        return ClientBatchStatus.UNKNOWN", "docstring": "Returns the status enum for a batch.\n\nArgs:\nbatch_id (str): The id of the batch to get the status for\n\nReturns:\nint: The status enum", "source": "codesearchnet"}
{"code": "class IntSoftmax(nn.Module):\n\n    def __init__(self, output_bit, quant_mode=False, force_dequant='none'):\n        super().__init__()\n        self.output_bit = output_bit\n        self.max_bit = 32\n        self.quant_mode = quant_mode\n        if force_dequant in ['nonlinear', 'softmax']:\n            logger.info('Force dequantize softmax')\n            self.quant_mode = False\n        self.act = QuantAct(16, quant_mode=self.quant_mode)\n        self.x0 = -0.6931\n        self.const = 30\n        self.coef = [0.35815147, 0.96963238, 1.0]\n        self.coef[1] /= self.coef[0]\n        self.coef[2] /= self.coef[0]\n\n    def int_polynomial(self, x_int, scaling_factor):\n        with torch.no_grad():\n            b_int = torch.floor(self.coef[1] / scaling_factor)\n            c_int = torch.floor(self.coef[2] / scaling_factor ** 2)\n        z = (x_int + b_int) * x_int + c_int\n        scaling_factor = self.coef[0] * scaling_factor ** 2\n        return (z, scaling_factor)\n\n    def int_exp(self, x_int, scaling_factor):\n        with torch.no_grad():\n            x0_int = torch.floor(self.x0 / scaling_factor)\n        x_int = torch.max(x_int, self.const * x0_int)\n        q = floor_ste.apply(x_int / x0_int)\n        r = x_int - x0_int * q\n        exp_int, exp_scaling_factor = self.int_polynomial(r, scaling_factor)\n        exp_int = torch.clamp(floor_ste.apply(exp_int * 2 ** (self.const - q)), min=0)\n        scaling_factor = exp_scaling_factor / 2 ** self.const\n        return (exp_int, scaling_factor)\n\n    def forward(self, x, scaling_factor):\n        if not self.quant_mode:\n            return (nn.functional.softmax(x, dim=-1), None)\n        x_int = x / scaling_factor\n        x_int_max, _ = x_int.max(dim=-1, keepdim=True)\n        x_int = x_int - x_int_max\n        exp_int, exp_scaling_factor = self.int_exp(x_int, scaling_factor)\n        exp, exp_scaling_factor = self.act(exp_int, exp_scaling_factor)\n        exp_int = exp / exp_scaling_factor\n        exp_int_sum = exp_int.sum(dim=-1, keepdim=True)\n        factor = floor_ste.apply(2 ** self.max_bit / exp_int_sum)\n        exp_int = floor_ste.apply(exp_int * factor / 2 ** (self.max_bit - self.output_bit))\n        scaling_factor = 1 / 2 ** self.output_bit\n        return (exp_int * scaling_factor, scaling_factor)", "docstring": "Quantized version of `torch.nn.Softmax`. Adds quantization-specific arguments on top of `torch.nn.Softmax`.\n\nArgs:\noutput_bit (`int`):\nBitwidth for the layer output activation.\nquant_mode (`bool`, *optional*, defaults to `False`):\nWhether or not the layer is quantized.\nforce_dequant (`str`, *optional*, defaults to `\"none\"`):\nForce dequantize the layer if either \"softmax\" or \"nonlinear\" is given.", "source": "github-repos"}
{"code": "def _merge_doc(original, to_merge):\n    \n    \n    if not original:\n        return to_merge or ''\n    if not to_merge:\n        return original or ''\n    sections = []\n    for name in ('usage', 'arguments', 'options'):\n        sections.append(_merge_section(\n            _get_section(name, original),\n            _get_section(name, to_merge)\n        ))\n    return format_usage('\\n\\n'.join(s for s in sections).rstrip())", "docstring": "Merge two usage strings together.\n\nArgs:\noriginal: The source of headers and initial section lines.\nto_merge: The source for the additional section lines to append.\n\nReturns:\nA new usage string that contains information from both usage strings.", "source": "juraj-google-style"}
{"code": "def recipe_dynamic_costs(config, dcm_account, auth_read, configuration_sheet_url, auth_write, bigquery_dataset):\n    dynamic_costs(config, {'auth': auth_read, 'account': dcm_account, 'sheet': {'template': {'url': 'https:", "docstring": "Calculate DV360 cost at the dynamic creative combination level.\n\nArgs:\ndcm_account (string) - NA\nauth_read (authentication) - Credentials used for reading data.\nconfiguration_sheet_url (string) - NA\nauth_write (authentication) - Credentials used for writing data.\nbigquery_dataset (string) - NA", "source": "github-repos"}
{"code": "def sample_with_temperature(logits, temperature, sampling_keep_top_k=-1):\n  \n  if temperature == 0.0:\n    \n    logits_shape = shape_list(logits)\n    argmax = tf.argmax(tf.reshape(logits, [-1, logits_shape[-1]]), axis=1)\n    return tf.reshape(argmax, logits_shape[:-1])\n  else:\n    assert temperature > 0.0\n\n    if sampling_keep_top_k != -1:\n      if sampling_keep_top_k <= 0:\n        raise ValueError(\"sampling_keep_top_k must either be -1 or positive.\")\n\n      vocab_size = shape_list(logits)[1]\n\n      k_largest = tf.contrib.nn.nth_element(\n          logits, n=sampling_keep_top_k, reverse=True)\n      k_largest = tf.tile(tf.reshape(k_largest, [-1, 1]), [1, vocab_size])\n\n      \n      \n      logits = tf.where(tf.less_equal(logits, k_largest),\n                        tf.ones_like(logits)*-1e6, logits)\n\n    reshaped_logits = (\n        tf.reshape(logits, [-1, shape_list(logits)[-1]]) / temperature)\n    choices = tf.multinomial(reshaped_logits, 1)\n    choices = tf.reshape(choices,\n                         shape_list(logits)[:logits.get_shape().ndims - 1])\n    return choices", "docstring": "Either argmax or random sampling.\n\nArgs:\nlogits: a Tensor.\ntemperature: a float  0.0=argmax 1.0=random\nsampling_keep_top_k: If not -1, only sample from the top k logits.\nReturns:\na Tensor with one fewer dimension than logits.", "source": "juraj-google-style"}
{"code": "def unzip_file(source_file, dest_dir=None, mkdir=False):\n    \n    \n    if dest_dir is None:\n        dest_dir, fname = os.path.split(source_file)\n    elif not os.path.isdir(dest_dir):\n        if mkdir:\n            preparedir(dest_dir)\n        else:\n            created = preparedir(dest_dir, False)\n            if not created:\n                raise ValueError(\"Failed to find %s.\" % dest_dir)\n\n    \n    with zipfile.ZipFile(source_file) as zf:\n        for member in zf.infolist():\n            words = member.filename.split('\\\\')\n            for word in words[:-1]:\n                drive, word = os.path.splitdrive(word)\n                head, word = os.path.split(word)\n                if word in (os.curdir, os.pardir, ''):\n                    continue\n                dest_dir = os.path.join(dest_dir, word)\n            zf.extract(member, dest_dir)", "docstring": "Unzip a compressed file.\n\nArgs:\nsource_file: Full path to a valid compressed file (e.g. c:/ladybug/testPts.zip)\ndest_dir: Target folder to extract to (e.g. c:/ladybug).\nDefault is set to the same directory as the source file.\nmkdir: Set to True to create the directory if doesn't exist (Default: False)", "source": "juraj-google-style"}
{"code": "def best_case(self, matrix, m_list, indices_left):\n        \n        m_indices = []\n        fraction_list = []\n        for m in m_list:\n            m_indices.extend(m[2])\n            fraction_list.extend([m[0]] * m[1])\n\n        indices = list(indices_left.intersection(m_indices))\n\n        interaction_matrix = matrix[indices, :][:, indices]\n\n        fractions = np.zeros(len(interaction_matrix)) + 1\n        fractions[:len(fraction_list)] = fraction_list\n        fractions = np.sort(fractions)\n\n        \n        \n        sums = 2 * np.sum(matrix[indices], axis=1)\n        sums = np.sort(sums)\n\n        \n        \n        \n        \n        step1 = np.sort(interaction_matrix) * (1 - fractions)\n        step2 = np.sort(np.sum(step1, axis=1))\n        step3 = step2 * (1 - fractions)\n        interaction_correction = np.sum(step3)\n\n        if self._algo == self.ALGO_TIME_LIMIT:\n            elapsed_time = datetime.utcnow() - self._start_time\n            speedup_parameter = elapsed_time.total_seconds() / 1800\n            avg_int = np.sum(interaction_matrix, axis=None)\n            avg_frac = np.average(np.outer(1 - fractions, 1 - fractions))\n            average_correction = avg_int * avg_frac\n\n            interaction_correction = average_correction * speedup_parameter \\\n                + interaction_correction * (1 - speedup_parameter)\n\n        best_case = np.sum(matrix) + np.inner(sums[::-1], fractions - 1) \\\n            + interaction_correction\n\n        return best_case", "docstring": "Computes a best case given a matrix and manipulation list.\n\nArgs:\nmatrix: the current matrix (with some permutations already\nperformed)\nm_list: [(multiplication fraction, number_of_indices, indices,\nspecies)] describing the manipulation\nindices: Set of indices which haven't had a permutation\nperformed on them.", "source": "juraj-google-style"}
{"code": "def get_tag(self, main_type, sub_type, unique_id, tag, owner=None, params=None):\n        \n        params = params or {}\n\n        return self.tag(main_type, sub_type, unique_id, tag, owner=owner, params=params)", "docstring": "Args:\nowner:\nmain_type:\nsub_type:\nunique_id:\ntag:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def get_component(self, colour, tolerance=0, default=None):\n        \n        if not (0 <= tolerance <= np.sqrt(195075)):\n            raise LegendError('Tolerance must be between 0 and 441.67')\n\n        for decor in self.__list:\n            if colour.lower() == decor.colour:\n                return decor.component\n\n        \n        r1, g1, b1 = utils.hex_to_rgb(colour)\n\n        \n        best_match = '\n        best_match_dist = np.sqrt(r1**2. + g1**2. + b1**2.)\n\n        \n        for decor in self.__list:\n            r2, g2, b2 = decor.rgb\n            distance = np.sqrt((r2-r1)**2. + (g2-g1)**2. + (b2-b1)**2.)\n            if distance < best_match_dist:\n                best_match = decor.component\n                best_match_dist = distance\n                best_match_colour = decor.colour\n\n        if best_match_dist <= tolerance:\n            return best_match\n        else:\n            with warnings.catch_warnings():\n                warnings.simplefilter(\"always\")\n                w = \"No match found for {0} \".format(colour.lower())\n                w += \"with tolerance of {0}. Best match is \".format(tolerance)\n                w += \"{0}, {1}\".format(best_match.summary(), best_match_colour)\n                w += \", d={0}\".format(best_match_dist)\n                warnings.warn(w)\n\n            return default", "docstring": "Get the component corresponding to a display colour. This is for\ngenerating a Striplog object from a colour image of a striplog.\n\nArgs:\ncolour (str): The hex colour string to look up.\ntolerance (float): The colourspace distance within which to match.\ndefault (component or None): The component to return in the event\nof no match.\n\nReturns:\ncomponent. The component best matching the provided colour.", "source": "juraj-google-style"}
{"code": "def load(hdf5_filename):\n    \n    \n    hdf5_filename = os.path.expanduser(hdf5_filename)\n\n    try:\n        f = h5py.File(hdf5_filename, \"r\")\n        \n        data_layers = f.get('image').get('CUTOUT')\n    except Exception as e:\n        raise ValueError(\"Could not load file {0} for conversion. {}\".format(\n                         hdf5_filename, e))\n        raise\n\n    return numpy.array(data_layers)", "docstring": "Import a HDF5 file into a numpy array.\n\nArguments:\nhdf5_filename:  A string filename of a HDF5 datafile\n\nReturns:\nA numpy array with data from the HDF5 file", "source": "juraj-google-style"}
{"code": "def search(self, resources_request=None):\n        \n\n        \n        name_pattern, version_range = self._parse_request(resources_request)\n\n        family_names = set(\n            x.name for x in iter_package_families(paths=self.package_paths)\n            if fnmatch.fnmatch(x.name, name_pattern)\n        )\n\n        family_names = sorted(family_names)\n\n        \n        if self.resource_type:\n            resource_type = self.resource_type\n        elif version_range or len(family_names) == 1:\n            resource_type = \"package\"\n        else:\n            resource_type = \"family\"\n\n        if not family_names:\n            return resource_type, []\n\n        \n        if resource_type == \"family\":\n            results = [ResourceSearchResult(x, \"family\") for x in family_names]\n            return \"family\", results\n\n        results = []\n\n        \n        for name in family_names:\n            it = iter_packages(name, version_range, paths=self.package_paths)\n            packages = sorted(it, key=lambda x: x.version)\n\n            if self.latest and packages:\n                packages = [packages[-1]]\n\n            for package in packages:\n                \n                \n                try:\n                    if package.timestamp:\n                        if self.after_time and package.timestamp < self.after_time:\n                            continue\n                        if self.before_time and package.timestamp >= self.before_time:\n                            continue\n\n                    if self.validate:\n                        package.validate_data()\n\n                except ResourceContentError as e:\n                    if resource_type == \"package\":\n                        result = ResourceSearchResult(package, \"package\", str(e))\n                        results.append(result)\n\n                    continue\n\n                if resource_type == \"package\":\n                    result = ResourceSearchResult(package, \"package\")\n                    results.append(result)\n                    continue\n\n                \n                try:\n                    for variant in package.iter_variants():\n                        if self.validate:\n                            try:\n                                variant.validate_data()\n                            except ResourceContentError as e:\n                                result = ResourceSearchResult(\n                                    variant, \"variant\", str(e))\n                                results.append(result)\n                                continue\n\n                        result = ResourceSearchResult(variant, \"variant\")\n                        results.append(result)\n\n                except ResourceContentError:\n                    \n                    continue\n\n        return resource_type, results", "docstring": "Search for resources.\n\nArgs:\nresources_request (str): Resource to search, glob-style patterns\nare supported. If None, returns all matching resource types.\n\nReturns:\n2-tuple:\n- str: resource type (family, package, variant);\n- List of `ResourceSearchResult`: Matching resources. Will be in\nalphabetical order if families, and version ascending for\npackages or variants.", "source": "juraj-google-style"}
{"code": "def not_storable(_type):\n    \n    return Storable(_type, handlers=StorableHandler(poke=fake_poke, peek=fail_peek(_type)))", "docstring": "Helper for tagging unserializable types.\n\nArguments:\n\n_type (type): type to be ignored.\n\nReturns:\n\nStorable: storable instance that does not poke.", "source": "juraj-google-style"}
{"code": "def read_from_file(self, filename, negative_occupancies='warn'):\n    valid_negative_occupancies = ['warn', 'raise', 'ignore', 'zero']\n    if (negative_occupancies not in valid_negative_occupancies):\n        raise ValueError('\"{}\" is not a valid value for the keyword `negative_occupancies`.'.format(negative_occupancies))\n    with open(filename, 'r') as file_in:\n        file_in.readline()\n        (self.number_of_k_points, self.number_of_bands, self.number_of_ions) = [int(f) for f in get_numbers_from_string(file_in.readline())]\n        self.read_in = file_in.read()\n    self.parse_k_points()\n    self.parse_bands()\n    self.parse_occupancy()\n    if np.any((self.occupancy[(:, 1)] < 0)):\n        if (negative_occupancies == 'warn'):\n            warnings.warn('One or more occupancies in your PROCAR file are negative.')\n        elif (negative_occupancies == 'raise'):\n            raise ValueError('One or more occupancies in your PROCAR file are negative.')\n        elif (negative_occupancies == 'zero'):\n            self.occupancy[(self.occupancy < 0)] = 0.0\n    self.parse_projections()\n    self.sanity_check()\n    self.read_in = None\n    if self.calculation['spin_polarised']:\n        self.data = self.projection_data.reshape(self.spin_channels, self.number_of_k_points, self.number_of_bands, (self.number_of_ions + 1), self.number_of_projections)[(:, :, :, :, 1:)].swapaxes(0, 1).swapaxes(1, 2)\n    else:\n        self.data = self.projection_data.reshape(self.number_of_k_points, self.number_of_bands, self.spin_channels, (self.number_of_ions + 1), self.number_of_projections)[(:, :, :, :, 1:)]", "docstring": "Reads the projected wavefunction character of each band from a VASP PROCAR file.\n\nArgs:\nfilename (str): Filename of the PROCAR file.\nnegative_occupancies (:obj:Str, optional): Sets the behaviour for handling\nnegative occupancies. Default is `warn`.\n\nReturns:\nNone\n\nNote:\nValid options for `negative_occupancies` are:\n`warn` (default): Warn that some partial occupancies are negative,\nbut do not alter any values.\n`raise`:          Raise an AttributeError.\n`ignore`:         Do nothing.\n`zero`:           Negative partial occupancies will be set to zero.", "source": "codesearchnet"}
{"code": "def regroup_if_changed(group, op_list, name=None):\n  \n  has_deltas = isinstance(op_list, sequence_with_deltas.SequenceWithDeltas)\n  if (group is None or len(group.control_inputs) != len(op_list) or\n      (has_deltas and op_list.has_changed())):\n    if has_deltas:\n      op_list.mark()\n    if op_list:\n      return tf.group(*op_list, name=name)\n    else:\n      return tf.no_op(name=name)\n  else:\n    return group", "docstring": "Creates a new group for op_list if it has changed.\n\nArgs:\ngroup: The current group. It is returned if op_list is unchanged.\nop_list: The list of operations to check.\nname: The name to use if a new group is created.\nReturns:\nEither group or a new group (or if op_list is empty then no_op).", "source": "juraj-google-style"}
{"code": "def Scalars(self, run, tag):\n    \n    accumulator = self.GetAccumulator(run)\n    return accumulator.Scalars(tag)", "docstring": "Retrieve the scalar events associated with a run and tag.\n\nArgs:\nrun: A string name of the run for which values are retrieved.\ntag: A string name of the tag for which values are retrieved.\n\nRaises:\nKeyError: If the run is not found, or the tag is not available for\nthe given run.\n\nReturns:\nAn array of `event_accumulator.ScalarEvents`.", "source": "juraj-google-style"}
{"code": "def period_neighborhood_probability(self, radius, smoothing, threshold, stride,start_time,end_time):\n        \n        neighbor_x = self.x[::stride, ::stride]\n        neighbor_y = self.y[::stride, ::stride]\n        neighbor_kd_tree = cKDTree(np.vstack((neighbor_x.ravel(), neighbor_y.ravel())).T)\n        neighbor_prob = np.zeros((self.data.shape[0], neighbor_x.shape[0], neighbor_x.shape[1]))\n        print('Forecast Hours: {0}-{1}'.format(start_time, end_time))\n        for m in range(len(self.members)):\n            period_max = self.data[m,start_time:end_time,:,:].max(axis=0)\n            valid_i, valid_j = np.where(period_max >= threshold)\n            print(self.members[m], len(valid_i))\n            if len(valid_i) > 0:\n                var_kd_tree = cKDTree(np.vstack((self.x[valid_i, valid_j], self.y[valid_i, valid_j])).T)\n                exceed_points = np.unique(np.concatenate(var_kd_tree.query_ball_tree(neighbor_kd_tree, radius))).astype(int)\n                exceed_i, exceed_j = np.unravel_index(exceed_points, neighbor_x.shape)\n                neighbor_prob[m][exceed_i, exceed_j] = 1\n                if smoothing > 0:\n                    neighbor_prob[m] = gaussian_filter(neighbor_prob[m], smoothing,mode='constant')\n        return neighbor_prob", "docstring": "Calculate the neighborhood probability over the full period of the forecast\n\nArgs:\nradius: circular radius from each point in km\nsmoothing: width of Gaussian smoother in km\nthreshold: intensity of exceedance\nstride: number of grid points to skip for reduced neighborhood grid\n\nReturns:\n(neighborhood probabilities)", "source": "juraj-google-style"}
{"code": "def _convert_to_sparse_tensors(sp_inputs):\n    if isinstance(sp_inputs, list):\n        return [_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs]\n    if isinstance(sp_inputs, tuple):\n        return (_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs)\n    raise TypeError('Inputs must be a list or tuple.')", "docstring": "Convert `sp_inputs` to `SparseTensor` objects and return them.\n\nArgs:\nsp_inputs: `list` or `tuple` of `SparseTensor` or `SparseTensorValue`\nobjects.\n\nReturns:\n`sp_inputs` converted to `SparseTensor` objects.\n\nRaises:\nValueError: if any item in `sp_inputs` is neither `SparseTensor` nor\n`SparseTensorValue`.", "source": "github-repos"}
{"code": "def depth_june_average_ground_temperature(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `depth_june_average_ground_temperature`'.format(value))\n\n        self._depth_june_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_june_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_june_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def write(self, brightness):\n        \n        if not isinstance(brightness, (bool, int)):\n            raise TypeError(\"Invalid brightness type, should be bool or int.\")\n\n        if isinstance(brightness, bool):\n            brightness = self._max_brightness if brightness else 0\n        else:\n            if not 0 <= brightness <= self._max_brightness:\n                raise ValueError(\"Invalid brightness value, should be between 0 and %d.\" % self._max_brightness)\n\n        \n        try:\n            os.write(self._fd, b\"%d\\n\" % brightness)\n        except OSError as e:\n            raise LEDError(e.errno, \"Writing LED brightness: \" + e.strerror)\n\n        \n        try:\n            os.lseek(self._fd, 0, os.SEEK_SET)\n        except OSError as e:\n            raise LEDError(e.errno, \"Rewinding LED brightness: \" + e.strerror)", "docstring": "Set the brightness of the LED to `brightness`.\n\n`brightness` can be a boolean for on/off, or integer value for a\nspecific brightness.\n\nArgs:\nbrightness (bool, int): Brightness value to set.\n\nRaises:\nLEDError: if an I/O or OS error occurs.\nTypeError: if `brightness` type is not bool or int.", "source": "juraj-google-style"}
{"code": "def __contains__(self, nurest_object):\n        \n        for obj in self:\n            if obj.equals(nurest_object):\n                return True\n\n        return False", "docstring": "Verify if the fetcher contains the given NURESTObject\n\nArgs:\nnurest_object (bambou.NURESTObject): the NURESTObject object to verify\n\nReturns:\nReturns True if the object has been found. False otherwise", "source": "juraj-google-style"}
{"code": "def fuzzy_match(self, proc):\n    return any(((proc in row[self.command_name]) for row in self.data))", "docstring": "Are there any commands that contain the given text?\n\nReturns:\nboolean: ``True`` if the word ``proc`` appears in the command column.\n\n.. note::\n'proc' can match anywhere in the command path, name or arguments.", "source": "codesearchnet"}
{"code": "def _linear(self, inputs):\n    first_dims = shape_list(inputs)[:-1]\n    x = tf.reshape(inputs, [-1, self.hidden_size])\n    logits = tf.matmul(x, self.weight, transpose_b=True)\n    return tf.reshape(logits, first_dims + [self.vocab_size])", "docstring": "Computes logits by running inputs through a linear layer.\n\nArgs:\ninputs: A float32 tensor with shape [..., hidden_size]\n\nReturns:\nfloat32 tensor with shape [..., vocab_size].", "source": "github-repos"}
{"code": "def write_compartments(self, stream, compartments, adjacencies, properties=None):\n\n    def convert(entry):\n        return self.convert_compartment_entry(entry, adjacencies.get(entry.id))\n    self._write_entries(stream, compartments, convert, properties)", "docstring": "Write iterable of compartments as YAML object to stream.\n\nArgs:\nstream: File-like object.\ncompartments: Iterable of compartment entries.\nadjacencies: Dictionary mapping IDs to adjacent compartment IDs.\nproperties: Set of compartment properties to output (or None to\noutput all).", "source": "codesearchnet"}
{"code": "def get():\n    result = runCommand('facter --json', raise_error_on_fail=True)\n    json_facts = result[1]\n    facts = json.loads(json_facts)\n    return facts", "docstring": "Get local facts about this machine.\n\nReturns:\njson-compatible dict with all facts of this host", "source": "codesearchnet"}
{"code": "def take_profit_replace(self, accountID, orderID, **kwargs):\n        \n        return self.replace(\n            accountID,\n            orderID,\n            order=TakeProfitOrderRequest(**kwargs)\n        )", "docstring": "Shortcut to replace a pending Take Profit Order in an Account\n\nArgs:\naccountID : The ID of the Account\norderID : The ID of the Take Profit Order to replace\nkwargs : The arguments to create a TakeProfitOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "juraj-google-style"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(Credential, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = BytearrayStream(input_stream.read(self.length))\n    if self.is_tag_next(enums.Tags.CREDENTIAL_TYPE, local_stream):\n        self._credential_type = primitives.Enumeration(enum=enums.CredentialType, tag=enums.Tags.CREDENTIAL_TYPE)\n        self._credential_type.read(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Credential encoding missing the credential type.')\n    if self.is_tag_next(enums.Tags.CREDENTIAL_VALUE, local_stream):\n        if (self.credential_type == enums.CredentialType.USERNAME_AND_PASSWORD):\n            self._credential_value = UsernamePasswordCredential()\n        elif (self.credential_type == enums.CredentialType.DEVICE):\n            self._credential_value = DeviceCredential()\n        elif (self.credential_type == enums.CredentialType.ATTESTATION):\n            self._credential_value = AttestationCredential()\n        else:\n            raise ValueError('Credential encoding includes unrecognized credential type.')\n        self._credential_value.read(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Credential encoding missing the credential value.')\n    self.is_oversized(local_stream)", "docstring": "Read the data encoding the Credential struct and decode it into its\nconstituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if either the credential type or value are\nmissing from the encoding.", "source": "codesearchnet"}
{"code": "def get_graph(self, item_ids, language=None):\n        \n        def _related(item_ids):\n            if item_ids is None:\n                items = Item.objects.filter(active=True).prefetch_related('parents', 'children')\n            else:\n                item_ids = [ii for iis in item_ids.values() for ii in iis]\n                items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('parents', 'children')\n            return {item.id: sorted([_item.id for rel in [item.parents.all(), item.children.all()] for _item in rel]) for item in items}\n        if item_ids is None:\n            return self._reachable_graph(None, _related, language=language)\n        else:\n            graph = self.get_graph(None, language)\n            return self._subset_graph(graph, item_ids)", "docstring": "Get a subgraph of items reachable from the given set of items through\nany relation.\n\nArgs:\nitem_ids (list): items which are taken as roots for the reachability\nlanguage (str): if specified, filter out items which are not\navailable in the given language\n\nReturns:\ndict: item id -> list of items (parent items), root items are\nreferenced by None key", "source": "juraj-google-style"}
{"code": "def register_mbr_plugin(self, fs_id, plugin):\n    self.logger.debug('MBR: {}, FS ID: {}'.format(self.__get_plugin_name(plugin), fs_id))\n    self.__mbr_plugins[fs_id].append(plugin)", "docstring": "Used in plugin's registration routine,\nto associate it's detection method with given filesystem id\n\nArgs:\nfs_id: filesystem id that is read from MBR partition entry\nplugin: plugin that supports this filesystem", "source": "codesearchnet"}
{"code": "def hwvtep_add_loopback_interface(self, **kwargs):\n    name = kwargs.pop('name')\n    id = kwargs.pop('int_id')\n    ip_args = dict(name=name, loopback_id=id)\n    method_name = 'overlay_gateway_ip_interface_loopback_loopback_id'\n    method_class = self._brocade_tunnels\n    gw_attr = getattr(method_class, method_name)\n    config = gw_attr(**ip_args)\n    output = self._callback(config)\n    return output", "docstring": "Add loopback interface to the overlay-gateway\n\nArgs:\nname  (str): gateway-name\nint_id (int): loopback inteface id\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nNone", "source": "codesearchnet"}
{"code": "def audio(self, audio, sample_rate, name=None, subdir=''):\n        \n\n        from chainerui.report.audio_report import check_available\n        if not check_available():\n            return\n        from chainerui.report.audio_report import report as _audio\n\n        col_name = self.get_col_name(name, 'audio')\n        out_dir, rel_out_dir = self.get_subdir(subdir)\n        filename, _ = _audio(audio, sample_rate, out_dir, col_name)\n        self.audios[col_name] = os.path.join(rel_out_dir, filename)\n\n        self.count += 1", "docstring": "Summary audio to listen on web browser.\n\nArgs:\naudio (:class:`numpy.ndarray` or :class:`cupy.ndarray` or \\\n:class:`chainer.Variable`): sampled wave array.\nsample_rate (int): sampling rate.\nname (str): name of image. set as column name. when not setting,\nassigned ``'audio'`` + sequential number.\nsubdir (str): sub-directory path of output.", "source": "juraj-google-style"}
{"code": "def do_post(endpoint, body, access_token):\n    headers = {'content-type': 'application/json', 'Authorization': ('Bearer ' + access_token)}\n    headers['User-Agent'] = get_user_agent()\n    return requests.post(endpoint, data=body, headers=headers)", "docstring": "Do an HTTP POST request and return JSON.\n\nArgs:\nendpoint (str): Azure Resource Manager management endpoint.\nbody (str): JSON body of information to post.\naccess_token (str): A valid Azure authentication token.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def difference(self, *other):\n        \n        from_frozenset = self.items.difference(*map(set, other))\n        return self.from_iterable(from_frozenset, sort=True)", "docstring": "Returns a new :class:`FrameSet` with elements in `self` but not in\n`other`.\n\nArgs:\nother (:class:`FrameSet`): or objects that can cast to :class:`FrameSet`\n\nReturns:\n:class:`FrameSet`:", "source": "juraj-google-style"}
{"code": "def onScreen(x, y=None):\n    \n    x, y = _unpackXY(x, y)\n    x = int(x)\n    y = int(y)\n\n    width, height = platformModule._size()\n    return 0 <= x < width and 0 <= y < height", "docstring": "Returns whether the given xy coordinates are on the screen or not.\n\nArgs:\nEither the arguments are two separate values, first arg for x and second\nfor y, or there is a single argument of a sequence with two values, the\nfirst x and the second y.\nExample: onScreen(x, y) or onScreen([x, y])\n\nReturns:\nbool: True if the xy coordinates are on the screen at its current\nresolution, otherwise False.", "source": "juraj-google-style"}
{"code": "def merge_variables(variables, **kwargs):\n        \n        var_dict = OrderedDict()\n        for v in variables:\n            if v.name not in var_dict:\n                var_dict[v.name] = []\n            var_dict[v.name].append(v)\n        return [merge_variables(vars_, **kwargs)\n                for vars_ in list(var_dict.values())]", "docstring": "Concatenates Variables along row axis.\n\nArgs:\nvariables (list): List of Variables to merge. Variables can have\ndifferent names (and all Variables that share a name will be\nconcatenated together).\n\nReturns:\nA list of Variables.", "source": "juraj-google-style"}
{"code": "def _longToBytestring(value, signed=False, numberOfRegisters=2):\n    \n    _checkInt(value, description='inputvalue')\n    _checkBool(signed, description='signed parameter')\n    _checkInt(numberOfRegisters, minvalue=2, maxvalue=2, description='number of registers')\n\n    formatcode = '>'  \n    if signed:\n        formatcode += 'l'  \n    else:\n        formatcode += 'L'  \n\n    outstring = _pack(formatcode, value)\n    assert len(outstring) == 4\n    return outstring", "docstring": "Convert a long integer to a bytestring.\n\nLong integers (32 bits = 4 bytes) are stored in two consecutive 16-bit registers in the slave.\n\nArgs:\n* value (int): The numerical value to be converted.\n* signed (bool): Whether large positive values should be interpreted as negative values.\n* numberOfRegisters (int): Should be 2. For error checking only.\n\nReturns:\nA bytestring (4 bytes).\n\nRaises:\nTypeError, ValueError", "source": "juraj-google-style"}
{"code": "def __eq__(self, other):\n    \n    if not isinstance(other, SemanticTime):\n      return False\n\n    return self._SORT_ORDER == other._SORT_ORDER", "docstring": "Determines if the date time values are equal to other.\n\nArgs:\nother (DateTimeValues): date time values to compare against.\n\nReturns:\nbool: True if the date time values are equal to other.", "source": "juraj-google-style"}
{"code": "def get_schema_descendant(\n            self, route: SchemaRoute) -> Optional[SchemaNode]:\n        \n        node = self\n        for p in route:\n            node = node.get_child(*p)\n            if node is None:\n                return None\n        return node", "docstring": "Return descendant schema node or ``None`` if not found.\n\nArgs:\nroute: Schema route to the descendant node\n(relative to the receiver).", "source": "juraj-google-style"}
{"code": "def eval_rs(gains, losses):\n    count = (len(gains) + len(losses))\n    avg_gains = (stats.avg(gains, count=count) if gains else 1)\n    avg_losses = (stats.avg(losses, count=count) if losses else 1)\n    if (avg_losses == 0):\n        return avg_gains\n    else:\n        return (avg_gains / avg_losses)", "docstring": "Evaluates the RS variable in RSI algorithm\n\nArgs:\ngains: List of price gains.\nlosses: List of prices losses.\n\nReturns:\nFloat of average gains over average losses.", "source": "codesearchnet"}
{"code": "def ValidateSyntax(rdf_artifact):\n    if (not rdf_artifact.doc):\n        raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, 'missing doc')\n    for supp_os in rdf_artifact.supported_os:\n        valid_os = rdf_artifact.SUPPORTED_OS_LIST\n        if (supp_os not in valid_os):\n            detail = (\"invalid `supported_os` ('%s' not in %s)\" % (supp_os, valid_os))\n            raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail)\n    for condition in rdf_artifact.conditions:\n        try:\n            of = objectfilter.Parser(condition).Parse()\n            of.Compile(objectfilter.BaseFilterImplementation)\n        except rdf_artifacts.ConditionError as e:\n            detail = (\"invalid condition '%s'\" % condition)\n            raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail, e)\n    for label in rdf_artifact.labels:\n        if (label not in rdf_artifact.ARTIFACT_LABELS):\n            raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, (\"invalid label '%s'\" % label))\n    valid_provides = rdf_client.KnowledgeBase().GetKbFieldNames()\n    for kb_var in rdf_artifact.provides:\n        if (kb_var not in valid_provides):\n            detail = (\"broken `provides` ('%s' not in %s)\" % (kb_var, valid_provides))\n            raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail)\n    for dep in GetArtifactPathDependencies(rdf_artifact):\n        if (dep not in valid_provides):\n            detail = (\"broken path dependencies ('%s' not in %s)\" % (dep, valid_provides))\n            raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail)\n    for source in rdf_artifact.sources:\n        try:\n            source.Validate()\n        except rdf_artifacts.ArtifactSourceSyntaxError as e:\n            raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, 'bad source', e)", "docstring": "Validates artifact syntax.\n\nThis method can be used to validate individual artifacts as they are loaded,\nwithout needing all artifacts to be loaded first, as for Validate().\n\nArgs:\nrdf_artifact: RDF object artifact.\n\nRaises:\nArtifactSyntaxError: If artifact syntax is invalid.", "source": "codesearchnet"}
{"code": "def tuplize(nested):\n    if isinstance(nested, str):\n        return nested\n    try:\n        return tuple(map(tuplize, nested))\n    except TypeError:\n        return nested", "docstring": "Recursively converts iterables into tuples.\n\nArgs:\nnested: A nested structure of items and iterables.\n\nReturns:\nA nested structure of items and tuples.", "source": "codesearchnet"}
{"code": "def _get_ground_truth_detections(instances_file, allowlist_file=None, num_images=None):\n    with open(instances_file, 'r') as annotation_dump:\n        data_dict = ast.literal_eval(annotation_dump.readline())\n    image_data = collections.OrderedDict()\n    if allowlist_file is not None:\n        with open(allowlist_file, 'r') as allowlist:\n            image_id_allowlist = set([int(x) for x in allowlist.readlines()])\n    else:\n        image_id_allowlist = [image['id'] for image in data_dict['images']]\n    for image_dict in data_dict['images']:\n        image_id = image_dict['id']\n        if image_id not in image_id_allowlist:\n            continue\n        image_data_dict = {}\n        image_data_dict['id'] = image_dict['id']\n        image_data_dict['file_name'] = image_dict['file_name']\n        image_data_dict['height'] = image_dict['height']\n        image_data_dict['width'] = image_dict['width']\n        image_data_dict['detections'] = []\n        image_data[image_id] = image_data_dict\n    shared_image_ids = set()\n    for annotation_dict in data_dict['annotations']:\n        image_id = annotation_dict['image_id']\n        if image_id in image_data:\n            shared_image_ids.add(image_id)\n    output_image_ids = sorted(shared_image_ids)\n    if num_images:\n        if num_images <= 0:\n            logging.warning('--num_images is %d, hence outputing all annotated images.', num_images)\n        elif num_images > len(shared_image_ids):\n            logging.warning('--num_images (%d) is larger than the number of annotated images.', num_images)\n        else:\n            output_image_ids = output_image_ids[:num_images]\n    for image_id in list(image_data):\n        if image_id not in output_image_ids:\n            del image_data[image_id]\n    for annotation_dict in data_dict['annotations']:\n        image_id = annotation_dict['image_id']\n        if image_id not in output_image_ids:\n            continue\n        image_data_dict = image_data[image_id]\n        bbox = annotation_dict['bbox']\n        top = bbox[1]\n        left = bbox[0]\n        bottom = top + bbox[3]\n        right = left + bbox[2]\n        if top > image_data_dict['height'] or left > image_data_dict['width'] or bottom > image_data_dict['height'] or (right > image_data_dict['width']):\n            continue\n        object_d = {}\n        object_d['bbox'] = [top / image_data_dict['height'], left / image_data_dict['width'], bottom / image_data_dict['height'], right / image_data_dict['width']]\n        object_d['category_id'] = annotation_dict['category_id']\n        image_data_dict['detections'].append(object_d)\n    return image_data", "docstring": "Processes the annotations JSON file and returns ground truth data corresponding to allowlisted image IDs.\n\nArgs:\ninstances_file: COCO instances JSON file, usually named as\ninstances_val20xx.json.\nallowlist_file: File containing COCO minival image IDs to allowlist for\nevaluation, one per line.\nnum_images: Number of allowlisted images to pre-process. First num_images\nare chosen based on sorted list of filenames. If None, all allowlisted\nfiles are preprocessed.\n\nReturns:\nA dict mapping image id (int) to a per-image dict that contains:\n'filename', 'image' & 'height' mapped to filename & image dimensions\nrespectively\nAND\n'detections' to a list of detection dicts, with each mapping:\n'category_id' to COCO category id (starting with 1) &\n'bbox' to a list of dimension-normalized [top, left, bottom, right]\nbounding-box values.", "source": "github-repos"}
{"code": "def accept_confirm(self, text=None, wait=None):\n    with self.driver.accept_modal('confirm', text=text, wait=wait):\n        (yield)", "docstring": "Execute the wrapped code, accepting a confirm.\n\nArgs:\ntext (str | RegexObject, optional): Text to match against the text in the modal.\nwait (int | float, optional): Maximum time to wait for the modal to appear after\nexecuting the wrapped code.\n\nRaises:\nModalNotFound: If a modal dialog hasn't been found.", "source": "codesearchnet"}
{"code": "def receive(self):\n    pickled_request = self._connection.connection.lpop(self._request_key)\n    return (pickle.loads(pickled_request) if (pickled_request is not None) else None)", "docstring": "Returns a single request.\n\nTakes the first request from the list of requests and returns it. If the list\nis empty, None is returned.\n\nReturns:\nResponse: If a new request is available a Request object is returned,\notherwise None is returned.", "source": "codesearchnet"}
{"code": "def on_item_changed(self, item, new_value, row, column):\n    return (item, new_value, row, column)", "docstring": "Event for the item change.\n\nArgs:\nemitter (TableWidget): The emitter of the event.\nitem (TableItem): The TableItem instance.\nnew_value (str): New text content.\nrow (int): row index.\ncolumn (int): column index.", "source": "codesearchnet"}
{"code": "def bsp_new_with_size(x: int, y: int, w: int, h: int) -> tcod.bsp.BSP:\n    \n    return Bsp(x, y, w, h)", "docstring": "Create a new BSP instance with the given rectangle.\n\nArgs:\nx (int): Rectangle left coordinate.\ny (int): Rectangle top coordinate.\nw (int): Rectangle width.\nh (int): Rectangle height.\n\nReturns:\nBSP: A new BSP instance.\n\n.. deprecated:: 2.0\nCall the :any:`BSP` class instead.", "source": "juraj-google-style"}
{"code": "async def get_action_context_and_template(chain, parent_link, decision_link):\n    \n    actions_path = decision_link.get_artifact_full_path('public/actions.json')\n    all_actions = load_json_or_yaml(actions_path, is_path=True)['actions']\n    action_name = get_action_callback_name(parent_link.task)\n    action_defn = _get_action_from_actions_json(all_actions, action_name)\n    jsone_context = await populate_jsone_context(chain, parent_link, decision_link, \"action\")\n    if 'task' in action_defn and chain.context.config['min_cot_version'] <= 2:\n        tmpl = {'tasks': [action_defn['task']]}\n    elif action_defn.get('kind') == 'hook':\n        \n        in_tree_tmpl = await get_in_tree_template(decision_link)\n        action_perm = _get_action_perm(action_defn)\n        tmpl = _wrap_action_hook_with_let(in_tree_tmpl, action_perm)\n\n        \n        \n        \n        \n        \n        jsone_context = {\n            'payload': _render_action_hook_payload(\n                action_defn, jsone_context, parent_link\n            ),\n            'taskId': parent_link.task_id,\n            'now': jsone_context['now'],\n            'as_slugid': jsone_context['as_slugid'],\n            'clientId': jsone_context.get('clientId'),\n        }\n    elif action_defn.get('kind') == 'task':\n        \n        tmpl = await get_in_tree_template(decision_link)\n        for k in ('action', 'push', 'repository'):\n            jsone_context[k] = deepcopy(action_defn['hookPayload']['decision'].get(k, {}))\n        jsone_context['action']['repo_scope'] = get_repo_scope(parent_link.task, parent_link.name)\n    else:\n        raise CoTError('Unknown action kind `{kind}` for action `{name}`.'.format(\n            kind=action_defn.get('kind', '<MISSING>'),\n            name=action_defn.get('name', '<MISSING>'),\n        ))\n\n    return jsone_context, tmpl", "docstring": "Get the appropriate json-e context and template for an action task.\n\nArgs:\nchain (ChainOfTrust): the chain of trust.\nparent_link (LinkOfTrust): the parent link to test.\ndecision_link (LinkOfTrust): the parent link's decision task link.\ntasks_for (str): the reason the parent link was created (cron,\nhg-push, action)\n\nReturns:\n(dict, dict): the json-e context and template.", "source": "juraj-google-style"}
{"code": "def _build_js(inputs, outputs, name, implementation, support_code):\n    \n    \n    \n    input_fields = json.dumps([f[0] for f in inputs])\n\n    \n    \n    output_fields = [{'name': f[0], 'type': f[1]} for f in outputs]\n    output_fields = json.dumps(output_fields, sort_keys=True)\n\n    \n    if support_code is None:\n      support_code = ''\n    return ('{code}\\n{name}={implementation};\\nbigquery.defineFunction(\\'{name}\\', {inputs}, '\n            '{outputs}, {name});').format(code=support_code, name=name,\n                                          implementation=implementation, inputs=str(input_fields),\n                                          outputs=str(output_fields))", "docstring": "Creates a BigQuery SQL UDF javascript object.\n\nArgs:\ninputs: a list of (name, type) tuples representing the schema of input.\noutputs: a list of (name, type) tuples representing the schema of the output.\nname: the name of the function\nimplementation: a javascript function defining the UDF logic.\nsupport_code: additional javascript code that the function can use.", "source": "juraj-google-style"}
{"code": "def is_object_new(self, func):\n    self.load_lazy_attribute('__new__')\n    self.load_lazy_attribute('__new__extra_args')\n    return [func] == self.members['__new__'].data or [func] == self.members['__new__extra_args'].data", "docstring": "Whether the given function is object.__new__.\n\nArgs:\nfunc: A function.\n\nReturns:\nTrue if func equals either of the pytd definitions for object.__new__,\nFalse otherwise.", "source": "github-repos"}
{"code": "def convert_wav(org_wav_fn: Path, tgt_wav_fn: Path) -> None:\n    if (not org_wav_fn.exists()):\n        raise FileNotFoundError\n    args = [config.FFMPEG_PATH, '-i', str(org_wav_fn), '-ac', '1', '-ar', '16000', str(tgt_wav_fn)]\n    subprocess.run(args)", "docstring": "Converts the wav into a 16bit mono 16000Hz wav.\n\nArgs:\norg_wav_fn: A `Path` to the original wave file\ntgt_wav_fn: The `Path` to output the processed wave file", "source": "codesearchnet"}
{"code": "class CustomObjectScope:\n\n    def __init__(self, custom_objects):\n        self.custom_objects = custom_objects or {}\n        self.backup = None\n\n    def __enter__(self):\n        self.backup = global_state.get_global_attribute('custom_objects_scope_dict', {}).copy()\n        global_state.set_global_attribute('custom_objects_scope_dict', self.custom_objects.copy())\n        return self\n\n    def __exit__(self, *args, **kwargs):\n        global_state.set_global_attribute('custom_objects_scope_dict', self.backup.copy())", "docstring": "Exposes custom classes/functions to Keras deserialization internals.\n\nUnder a scope `with custom_object_scope(objects_dict)`, Keras methods such\nas `keras.models.load_model()` or\n`keras.models.model_from_config()` will be able to deserialize any\ncustom object referenced by a saved config (e.g. a custom layer or metric).\n\nExample:\n\nConsider a custom regularizer `my_regularizer`:\n\n```python\nlayer = Dense(3, kernel_regularizer=my_regularizer)\n# Config contains a reference to `my_regularizer`\nconfig = layer.get_config()\n...\n# Later:\nwith custom_object_scope({'my_regularizer': my_regularizer}):\nlayer = Dense.from_config(config)\n```\n\nArgs:\ncustom_objects: Dictionary of `{str: object}` pairs,\nwhere the `str` key is the object name.", "source": "github-repos"}
{"code": "def _worker(self, constructor, conn):\n    try:\n        env = constructor()\n        while True:\n            try:\n                if (not conn.poll(0.1)):\n                    continue\n                (message, payload) = conn.recv()\n            except (EOFError, KeyboardInterrupt):\n                break\n            if (message == self._ACCESS):\n                name = payload\n                result = getattr(env, name)\n                conn.send((self._RESULT, result))\n                continue\n            if (message == self._CALL):\n                (name, args, kwargs) = payload\n                result = getattr(env, name)(*args, **kwargs)\n                conn.send((self._RESULT, result))\n                continue\n            if (message == self._CLOSE):\n                assert (payload is None)\n                break\n            raise KeyError('Received message of unknown type {}'.format(message))\n    except Exception:\n        stacktrace = ''.join(traceback.format_exception(*sys.exc_info()))\n        tf.logging.error('Error in environment process: {}'.format(stacktrace))\n        conn.send((self._EXCEPTION, stacktrace))\n    conn.close()", "docstring": "The process waits for actions and sends back environment results.\n\nArgs:\nconstructor: Constructor for the OpenAI Gym environment.\nconn: Connection for communication to the main process.\n\nRaises:\nKeyError: When receiving a message of unknown type.", "source": "codesearchnet"}
{"code": "def add_ref(self, timestamp: int) -> None:\n    self._ref_times.append(timestamp)", "docstring": "Adds a reference to this tensor with the specified timestamp.\n\nArgs:\ntimestamp:  Timestamp of object reference as an integer.", "source": "github-repos"}
{"code": "def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):\n  \n  line = clean_lines.elided[linenum]\n\n  \n  \n  \n  \n  \n  \n  match = Match(r'^(.*[^ ({>]){', line)\n\n  if match:\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    leading_text = match.group(1)\n    (endline, endlinenum, endpos) = CloseExpression(\n        clean_lines, linenum, len(match.group(1)))\n    trailing_text = ''\n    if endpos > -1:\n      trailing_text = endline[endpos:]\n    for offset in xrange(endlinenum + 1,\n                         min(endlinenum + 3, clean_lines.NumLines() - 1)):\n      trailing_text += clean_lines.elided[offset]\n    \n    \n    \n    if (not Match(r'^[\\s}]*[{.;,)<>\\]:]', trailing_text)\n        and not _IsType(clean_lines, nesting_state, leading_text)):\n      error(filename, linenum, 'whitespace/braces', 5,\n            'Missing space before {')\n\n  \n  if Search(r'}else', line):\n    error(filename, linenum, 'whitespace/braces', 5,\n          'Missing space before else')\n\n  \n  \n  \n  if Search(r':\\s*;\\s*$', line):\n    error(filename, linenum, 'whitespace/semicolon', 5,\n          'Semicolon defining empty statement. Use {} instead.')\n  elif Search(r'^\\s*;\\s*$', line):\n    error(filename, linenum, 'whitespace/semicolon', 5,\n          'Line contains only semicolon. If this should be an empty statement, '\n          'use {} instead.')\n  elif (Search(r'\\s+;\\s*$', line) and\n        not Search(r'\\bfor\\b', line)):\n    error(filename, linenum, 'whitespace/semicolon', 5,\n          'Extra space before last semicolon. If this should be an empty '\n          'statement, use {} instead.')", "docstring": "Checks for horizontal spacing near commas.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nnesting_state: A NestingState instance which maintains information about\nthe current stack of nested blocks being parsed.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def build_graph(device, input_shape, output_sizes, axis):\n    with ops.device('/%s:0' % device):\n        inp = array_ops.zeros(input_shape)\n        outputs = []\n        for _ in range(100):\n            outputs.extend(array_ops.split(inp, output_sizes, axis))\n        return control_flow_ops.group(*outputs)", "docstring": "Build a graph containing a sequence of split operations.\n\nArgs:\ndevice: string, the device to run on.\ninput_shape: shape of the input tensor.\noutput_sizes: size of each output along axis.\naxis: axis to be split along.\n\nReturns:\nAn array of tensors to run()", "source": "github-repos"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        raise ValueError('With TAPAS, you must provide both question IDs and table IDs.')\n    return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + token_ids_1", "docstring": "Build model inputs from a question and flattened table for question answering or sequence classification tasks\nby concatenating and adding special tokens.\n\nArgs:\ntoken_ids_0 (`List[int]`): The ids of the question.\ntoken_ids_1 (`List[int]`, *optional*): The ids of the flattened table.\n\nReturns:\n`List[int]`: The model input with special tokens.", "source": "github-repos"}
{"code": "def shell(cmd, *args, **kwargs):\n    if (kwargs.get('rel_path') and (not cmd.startswith('/'))):\n        cmd = os.path.join(kwargs['rel_path'], cmd)\n    status = 0\n    try:\n        output = subprocess.check_output(((cmd,) + args), stderr=kwargs.get('stderr'))\n    except subprocess.CalledProcessError as e:\n        if kwargs.get('raise_on_status', True):\n            raise e\n        output = e.output\n        status = e.returncode\n    except OSError as e:\n        if kwargs.get('raise_on_status', True):\n            raise e\n        if ('stderr' in kwargs):\n            kwargs['stderr'].write(e.message)\n        return ((- 1), '')\n    if six.PY3:\n        output = output.decode('utf8')\n    return (status, output)", "docstring": "Execute shell command and return output\n\nArgs:\ncmd (str): the command itself, i.e. part until the first space\n*args: positional arguments, i.e. other space-separated parts\nrel_path (bool): execute relative to the path (default: `False`)\nraise_on_status(bool): bool, raise exception if command\nexited with non-zero status (default: `True`)\nstderr (file-like): file-like object to collect stderr output,\nNone by default\n\nReturns:\nTuple[int, str]: status, shell output", "source": "codesearchnet"}
{"code": "def _AddAttributeContainer(self, container_type, attribute_container):\n    container_list = self._GetSerializedAttributeContainerList(container_type)\n    identifier = identifiers.SQLTableIdentifier(container_type, (container_list.next_sequence_number + 1))\n    attribute_container.SetIdentifier(identifier)\n    serialized_data = self._SerializeAttributeContainer(attribute_container)\n    container_list.PushAttributeContainer(serialized_data)\n    if (container_list.data_size > self._maximum_buffer_size):\n        self._WriteSerializedAttributeContainerList(container_type)", "docstring": "Adds an attribute container.\n\nArgs:\ncontainer_type (str): attribute container type.\nattribute_container (AttributeContainer): attribute container.\n\nRaises:\nIOError: if the attribute container cannot be serialized.\nOSError: if the attribute container cannot be serialized.", "source": "codesearchnet"}
{"code": "def loads(text):\n    \n    if text.startswith(\"CCSDS_OEM_VERS\"):\n        func = _read_oem\n    elif text.startswith(\"CCSDS_OPM_VERS\"):\n        func = _read_opm\n    else:\n        raise ValueError(\"Unknown CCSDS type\")\n    return func(text)", "docstring": "Read CCSDS from a string, and provide the beyond class corresponding;\nOrbit or list of Orbit if it's an OPM, Ephem if it's an OEM.\n\nArgs:\ntext (str):\nReturn:\nOrbit or Ephem\nRaise:\nValueError: when the text is not a recognizable CCSDS format", "source": "juraj-google-style"}
{"code": "def round_f1_macro(y_true, y_predicted):\n    \n    try:\n        predictions = [np.round(x) for x in y_predicted]\n    except TypeError:\n        predictions = y_predicted\n\n    return f1_score(np.array(y_true), np.array(predictions), average=\"macro\")", "docstring": "Calculates F1 macro measure.\n\nArgs:\ny_true: list of true values\ny_predicted: list of predicted values\n\nReturns:\nF1 score", "source": "juraj-google-style"}
{"code": "def set_extana_callback(self, callback, data=None):\n    self.extana_callback = callback\n    self.extana_callback_data = data", "docstring": "Register a callback for incoming data packets from the SK8-ExtAna board.\n\nThis method allows you to pass in a callable which will be called on\nreceipt of each packet sent from the SK8-ExtAna board. Set to `None` to\ndisable it again.\n\nArgs:\ncallback: a callable with the following signature:\n(ana1, ana2, temp, seq, timestamp, data)\nwhere:\nana1, ana2 = current values of the two analogue inputs\ntemp = temperature sensor reading\nseq = packet sequence number (int, 0-255)\ntimestamp = value of time.time() when packet received\ndata = value of `data` parameter passed to this method\ndata: an optional arbitrary object that will be passed as a\nparameter to the callback", "source": "codesearchnet"}
{"code": "def list_keyvaults(access_token, subscription_id, rgname):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourcegroups/', rgname,\n                        '/providers/Microsoft.KeyVault/vaults',\n                        '?api-version=', KEYVAULT_API])\n    return do_get_next(endpoint, access_token)", "docstring": "Lists key vaults in the named resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\n\nReturns:\nHTTP response. 200 OK.", "source": "juraj-google-style"}
{"code": "def candidates(self, word):\n    if self.known([word]):\n        return {word}\n    res = [x for x in self.edit_distance_1(word)]\n    tmp = self.known(res)\n    if tmp:\n        return tmp\n    if (self._distance == 2):\n        tmp = self.known([x for x in self.__edit_distance_alt(res)])\n        if tmp:\n            return tmp\n    return {word}", "docstring": "Generate possible spelling corrections for the provided word up to\nan edit distance of two, if and only when needed\n\nArgs:\nword (str): The word for which to calculate candidate spellings\nReturns:\nset: The set of words that are possible candidates", "source": "codesearchnet"}
{"code": "def en004(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `en004`'.format(value))\n    self._en004 = value", "docstring": "Corresponds to IDD Field `en004`\nmean coincident dry-bulb temperature to\nEnthalpy corresponding to 0.4% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `en004`\nUnit: kJ/kg\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def compute_discriminator_reward(\n        self,\n        true_posterior_arr,\n        generated_posterior_arr\n    ):\n        \n        grad_arr = np.log(true_posterior_arr + 1e-08) + np.log(1 - generated_posterior_arr + 1e-08)\n        return grad_arr", "docstring": "Compute discriminator's reward.\n\nArgs:\ntrue_posterior_arr:         `np.ndarray` of `true` posterior inferenced by the discriminator.\ngenerated_posterior_arr:    `np.ndarray` of `fake` posterior inferenced by the discriminator.\n\nReturns:\n`np.ndarray` of Gradients.", "source": "juraj-google-style"}
{"code": "def _randomize_direction(base_heading, sigma) -> int:\n        \n        val = MissionWeather._gauss(base_heading, sigma)\n        val = MissionWeather._normalize_direction(val)\n        return val", "docstring": "Creates a variation in direction\n\nArgs:\nbase_heading: base direction\nsigma: sigma value for gaussian variation\n\nReturns: random direction", "source": "juraj-google-style"}
{"code": "def print_treemap(self, format=None, output=sys.stdout, **kwargs):\n        \n        treemap = self.as_treemap()\n        treemap.print(format=format, output=output, **kwargs)", "docstring": "Print the matrix for self's nodes.\n\nArgs:\nformat (str): output format (csv, json or text).\noutput (file): file descriptor on which to write.", "source": "juraj-google-style"}
{"code": "def add_ordinary_node(self, ast_node):\n    node = self._add_new_node(ast_node)\n    self.leaves = set((node,))\n    return node", "docstring": "Grows the graph by adding an ordinary CFG node.\n\nOrdinary nodes are followed by the next node, in lexical order, that is,\nthey become the new leaf set.\n\nArgs:\nast_node: ast.AST\n\nReturns:\nNode", "source": "github-repos"}
{"code": "def send_rpc(self, address, rpc_id, call_payload, timeout=3.0):\n    if (not self.connected):\n        raise HardwareError('Cannot send an RPC if we are not in a connected state')\n    if (timeout is None):\n        timeout = 3.0\n    status = (- 1)\n    payload = b''\n    recording = None\n    if self.connection_interrupted:\n        self._try_reconnect()\n    if (self._record is not None):\n        recording = _RecordedRPC(self.connection_string, address, rpc_id, call_payload)\n        recording.start()\n    try:\n        payload = self._loop.run_coroutine(self.adapter.send_rpc(0, address, rpc_id, call_payload, timeout))\n        (status, payload) = pack_rpc_response(payload, None)\n    except VALID_RPC_EXCEPTIONS as exc:\n        (status, payload) = pack_rpc_response(payload, exc)\n    if (self._record is not None):\n        recording.finish(status, payload)\n        self._recording.append(recording)\n    if self.connection_interrupted:\n        self._try_reconnect()\n    return unpack_rpc_response(status, payload, rpc_id, address)", "docstring": "Send an rpc to our connected device.\n\nThe device must already be connected and the rpc interface open.  This\nmethod will synchronously send an RPC and wait for the response.  Any\nRPC errors will be raised as exceptions and if there were no errors, the\nRPC's response payload will be returned as a binary bytearray.\n\nSee :meth:`AbstractDeviceAdapter.send_rpc` for documentation of the possible\nexceptions that can be raised here.\n\nArgs:\naddress (int): The tile address containing the RPC\nrpc_id (int): The ID of the RPC that we wish to call.\ncall_payload (bytes): The payload containing encoded arguments for the\nRPC.\ntimeout (float): The maximum number of seconds to wait for the RPC to\nfinish.  Defaults to 3s.\n\nReturns:\nbytearray: The RPC's response payload.", "source": "codesearchnet"}
{"code": "def get_exception_handlers(\n    node: astroid.node_classes.NodeNG, exception=Exception\n) -> List[astroid.ExceptHandler]:\n    \n    context = find_try_except_wrapper_node(node)\n    if isinstance(context, astroid.TryExcept):\n        return [\n            handler for handler in context.handlers if error_of_type(handler, exception)\n        ]\n    return None", "docstring": "Return the collections of handlers handling the exception in arguments.\n\nArgs:\nnode (astroid.NodeNG): A node that is potentially wrapped in a try except.\nexception (builtin.Exception or str): exception or name of the exception.\n\nReturns:\nlist: the collection of handlers that are handling the exception or None.", "source": "juraj-google-style"}
{"code": "def __add__(self, other):\n        \n        sum_ct = ContingencyTable(*(self.table + other.table).tolist())\n        return sum_ct", "docstring": "Add two contingency tables together and return a combined one.\n\nArgs:\nother: Another contingency table\n\nReturns:\nSum of contingency tables", "source": "juraj-google-style"}
{"code": "def verify(self, obj):\n        \n\n        if len(self._options) == 0:\n            raise ValidationError(\"No options\", reason='no options given in options verifier, matching not possible',\n                                  object=obj)\n\n        exceptions = {}\n\n        for i, option in enumerate(self._options):\n            try:\n                obj = option.verify(obj)\n                return obj\n            except ValidationError as exc:\n                exceptions['option_%d' % (i+1)] = exc.params['reason']\n\n        raise ValidationError(\"Object did not match any of a set of options\",\n                              reason=\"object did not match any given option (first failure = '%s')\"\n                                     % exceptions['option_1'], **exceptions)", "docstring": "Verify that the object conforms to this verifier's schema\n\nArgs:\nobj (object): A python object to verify\n\nRaises:\nValidationError: If there is a problem verifying the dictionary, a\nValidationError is thrown with at least the reason key set indicating\nthe reason for the lack of validation.", "source": "juraj-google-style"}
{"code": "def module_entry(yfile):\n    \n    ytxt = yfile.read()\n    mp = ModuleParser(ytxt)\n    mst = mp.statement()\n    submod = mst.keyword == \"submodule\"\n    import_only = True\n    rev = \"\"\n    features = []\n    includes = []\n    rec = {}\n    for sst in mst.substatements:\n        if not rev and sst.keyword == \"revision\":\n            rev = sst.argument\n        elif import_only and sst.keyword in data_kws:\n            import_only = False\n        elif sst.keyword == \"feature\":\n            features.append(sst.argument)\n        elif submod:\n            continue\n        elif sst.keyword == \"namespace\":\n            rec[\"namespace\"] = sst.argument\n        elif sst.keyword == \"include\":\n            rd = sst.find1(\"revision-date\")\n            includes.append((sst.argument, rd.argument if rd else None))\n    rec[\"import-only\"] = import_only\n    rec[\"features\"] = features\n    if submod:\n        rec[\"revision\"] = rev\n        submodmap[mst.argument] = rec\n    else:\n        rec[\"includes\"] = includes\n        modmap[(mst.argument, rev)] = rec", "docstring": "Add entry for one file containing YANG module text.\n\nArgs:\nyfile (file): File containing a YANG module or submodule.", "source": "juraj-google-style"}
{"code": "def git_branch_delete(branch_name):\n    \n    \n    if branch_name not in git.protected_branches():\n        log.info(\"Deleting branch <33>{}\", branch_name)\n        shell.run('git branch -d {}'.format(branch_name))", "docstring": "Delete the given branch.\n\nArgs:\nbranch_name (str):\nName of the branch to delete.", "source": "juraj-google-style"}
{"code": "def _without_tensor_names(self) -> 'TypeSpec':\n\n    def rename(value):\n        if isinstance(value, TypeSpec):\n            return value._without_tensor_names()\n        return value\n    return self._deserialize(nest.map_structure(rename, self._serialize()))", "docstring": "Returns a TypeSpec compatible with `self`, with tensor names removed.\n\nReturns:\nA `TypeSpec` that is compatible with `self`, where the name of any\n`TensorSpec` is set to `None`.", "source": "github-repos"}
{"code": "def RegisterParser(cls, parser_class):\n    \n    parser_name = parser_class.NAME.lower()\n    if parser_name in cls._parser_classes:\n      raise KeyError('Parser class already set for name: {0:s}.'.format(\n          parser_class.NAME))\n\n    cls._parser_classes[parser_name] = parser_class", "docstring": "Registers a parser class.\n\nThe parser classes are identified based on their lower case name.\n\nArgs:\nparser_class (type): parser class (subclass of BaseParser).\n\nRaises:\nKeyError: if parser class is already set for the corresponding name.", "source": "juraj-google-style"}
{"code": "def __init__(self, sess, hooks):\n    _WrappedSession.__init__(self, sess)\n    self._hooks = hooks\n    self._should_stop = False", "docstring": "Initializes a _HookedSession object.\n\nArgs:\nsess: A `tf.compat.v1.Session` or a `_WrappedSession` object.\nhooks: An iterable of `SessionRunHook' objects.", "source": "github-repos"}
{"code": "def _compute_hparam_info_from_values(self, name, values):\n    result = api_pb2.HParamInfo(name=name, type=api_pb2.DATA_TYPE_UNSET)\n    distinct_values = set((_protobuf_value_to_string(v) for v in values if _protobuf_value_type(v)))\n    for v in values:\n        v_type = _protobuf_value_type(v)\n        if (not v_type):\n            continue\n        if (result.type == api_pb2.DATA_TYPE_UNSET):\n            result.type = v_type\n        elif (result.type != v_type):\n            result.type = api_pb2.DATA_TYPE_STRING\n        if (result.type == api_pb2.DATA_TYPE_STRING):\n            break\n    if (result.type == api_pb2.DATA_TYPE_UNSET):\n        return None\n    if ((result.type == api_pb2.DATA_TYPE_STRING) and (len(distinct_values) <= self._max_domain_discrete_len)):\n        result.domain_discrete.extend(distinct_values)\n    return result", "docstring": "Builds an HParamInfo message from the hparam name and list of values.\n\nArgs:\nname: string. The hparam name.\nvalues: list of google.protobuf.Value messages. The list of values for the\nhparam.\n\nReturns:\nAn api_pb2.HParamInfo message.", "source": "codesearchnet"}
{"code": "def configure_ospf(self, cmd):\n        \n        config = self.get()\n        cmds = ['router ospf {}'.format(config['ospf_process_id'])]\n        cmds.extend(make_iterable(cmd))\n        return super(Ospf, self).configure(cmds)", "docstring": "Allows for a list of OSPF subcommands to be configured\"\n\nArgs:\ncmd: (list or str): Subcommand to be entered\nReturns:\nbool: True if all the commands completed successfully", "source": "juraj-google-style"}
{"code": "def publish(self, channel, message, pipeline=False):\n    if pipeline:\n        self._pipeline.publish(channel, message)\n    else:\n        self._db.publish(channel, message)", "docstring": "Post a message to a given channel.\n\nArgs:\nchannel (str): Channel where the message will be published\nmessage (str): Message to publish\npipeline (bool): True, start a transaction block. Default false.", "source": "codesearchnet"}
{"code": "def WaitForReport(self, report_job):\n    \n    service = self._GetReportService()\n    report_job_id = service.runReportJob(report_job)['id']\n\n    if self._version > 'v201502':\n      status = service.getReportJobStatus(report_job_id)\n    else:\n      status = service.getReportJob(report_job_id)['reportJobStatus']\n\n    while status != 'COMPLETED' and status != 'FAILED':\n      _data_downloader_logger.debug('Report job status: %s', status)\n      time.sleep(30)\n      if self._version > 'v201502':\n        status = service.getReportJobStatus(report_job_id)\n      else:\n        status = service.getReportJob(report_job_id)['reportJobStatus']\n\n    if status == 'FAILED':\n      raise googleads.errors.AdManagerReportError(report_job_id)\n    else:\n      _data_downloader_logger.debug('Report has completed successfully')\n      return report_job_id", "docstring": "Runs a report, then waits (blocks) for the report to finish generating.\n\nArgs:\nreport_job: The report job to wait for. This may be a dictionary or an\ninstance of the SOAP ReportJob class.\n\nReturns:\nThe completed report job's ID as a string.\n\nRaises:\nAn AdManagerReportError if the report job fails to complete.", "source": "juraj-google-style"}
{"code": "def SkipAhead(self, file_object, number_of_characters):\n    lines_size = len(self.lines)\n    while (number_of_characters >= lines_size):\n        number_of_characters -= lines_size\n        self.lines = ''\n        self.ReadLines(file_object)\n        lines_size = len(self.lines)\n        if (lines_size == 0):\n            return\n    self.lines = self.lines[number_of_characters:]", "docstring": "Skips ahead a number of characters.\n\nArgs:\nfile_object (dfvfs.FileIO): file-like object.\nnumber_of_characters (int): number of characters.", "source": "codesearchnet"}
{"code": "def sample_frames(self, video: 'torch.Tensor', metadata: Union[VideoMetadata, dict], num_frames: Optional[int]=None, fps: Optional[int]=None, skip_secs: Optional[int]=1):\n    num_frames = num_frames if num_frames is not None else self.num_frames\n    fps = fps if fps is not None else self.fps\n    total_num_frames = video.shape[0]\n    estimated_frames = int(round(fps * metadata['duration']))\n    desired_frames = min(estimated_frames, num_frames)\n    if desired_frames < 1:\n        desired_frames = 1\n    start_idx = 0\n    end_idx = total_num_frames - 1\n    if skip_secs > 0 and metadata['duration'] - 2 * skip_secs > num_frames * fps:\n        start_idx = int(skip_secs * metadata['fps'])\n        end_idx = int(total_num_frames - skip_secs * metadata['fps'])\n    start_idx = max(0, start_idx)\n    end_idx = min(end_idx, total_num_frames - 1)\n    if start_idx >= end_idx:\n        start_idx, end_idx = (0, total_num_frames - 1)\n    indices = np.linspace(start_idx, end_idx, desired_frames, dtype=int)\n    indices = np.unique(indices)\n    video = video[indices].contiguous()\n    timestamps = []\n    for idx in indices:\n        sec = idx / metadata['fps']\n        mm = int(sec \n        ss = int(sec % 60)\n        timestamps.append([mm, ss])\n    return (video, timestamps, int(metadata['duration']))", "docstring": "Video sampling function which:\n- Uses `num_frames` (if provided) or calculates it from `fps` and metadata.\n- Applies a basic center-skip if fewer frames than available, otherwise\noptionally skips `skip_secs` from both the start and end.\n- Uniformly samples the desired number of frames between the start and end indices.\n\nArgs:\nvideo (`torch.Tensor`):\nVideo that need to be sampled.\nmetadata (`VideoMetadata`):\nMetadata of the video containing information about total duration, fps and total number of frames.\nnum_frames (`int`, *optional*):\nMaximum number of frames to sample. Defaults to `self.num_frames`.\nfps (`int`, *optional*):\nTarget frames to sample per second. Defaults to `self.fps`.\nskip_secs (`float`, *optional*, defaults to `1`):\nNumber of seconds to skip from the start and end if the video is long enough.\n\nReturns:\ntorch.Tensor:\nSampled video frames.", "source": "github-repos"}
{"code": "def parse(self, text):\n        \n        tokens = self.lex(text)\n        parser = Parser(tokens)\n        return parser.parse()", "docstring": "Parse self.text.\n\nArgs:\ntext (str): the text to lex\n\nReturns:\nobject: a node representing the current rule.", "source": "juraj-google-style"}
{"code": "def safe_datetime_cast(self, col):\n    casted_dates = pd.to_datetime(col[self.col_name], format=self.date_format, errors='coerce')\n    if len(casted_dates[casted_dates.isnull()]):\n        slice_ = (casted_dates.isnull() & (~ col[self.col_name].isnull()))\n        col[slice_][self.col_name].apply(self.strptime_format)\n    return casted_dates", "docstring": "Parses string values into datetime.\n\nArgs:\ncol(pandas.DataFrame): Data to transform.\n\nReturns:\npandas.Series", "source": "codesearchnet"}
{"code": "def create(self, uri=None, graph=None, data=None):\n    if (uri is not None):\n        existing_entity = self.__dedup__(rdflib.URIRef(uri), graph)\n        if (existing_entity is not None):\n            return\n    else:\n        default_request = urllib.request.Request('/'.join([self.base_url, 'rest']), method='POST')\n        uri = urllib.request.urlopen(default_request).read().decode()\n    if (graph is not None):\n        new_graph = copy_graph(rdflib.URIRef(uri), graph)\n        create_response = self.connect(uri, data=new_graph.serialize(format='turtle'), method='PUT')\n        raw_response = create_response.read()\n    return uri", "docstring": "Method takes an optional URI and graph, first checking if the URL is already\npresent in Fedora, if not, creates a Fedora Object with the graph as\nproperties. If URI is None, uses Fedora 4 default PID minter to create\nthe object's URI.\n\nArgs:\nuri(string): String of URI, default is None\ngraph(rdflib.Graph): RDF Graph of subject, default is None\ndata(object): Binary datastream that will be saved as fcr:content\n\nReturns:\nURI(string): New Fedora URI or None if uri already exists", "source": "codesearchnet"}
{"code": "def inner_shape(self):\n    return self._inner_shape", "docstring": "The inner dimension sizes for this shape.\n\nReturns:\nA 1-D integer `Tensor`.", "source": "github-repos"}
{"code": "def convert_new_publication_info_to_old(publication_infos):\n\n    def _needs_a_hidden_pubnote(journal_title, journal_volume):\n        return ((journal_title in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE) and (journal_volume in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE[journal_title]))\n    result = []\n    for publication_info in publication_infos:\n        _publication_info = copy.deepcopy(publication_info)\n        journal_title = _publication_info.get('journal_title')\n        try:\n            journal_title = _JOURNALS_RENAMED_NEW_TO_OLD[journal_title]\n            _publication_info['journal_title'] = journal_title\n            result.append(_publication_info)\n            continue\n        except KeyError:\n            pass\n        journal_volume = _publication_info.get('journal_volume')\n        year = _publication_info.get('year')\n        if ((journal_title in _JOURNALS_WITH_YEAR_ADDED_TO_VOLUME) and year and journal_volume and (len(journal_volume) == 2)):\n            two_digit_year = str(year)[2:]\n            _publication_info['journal_volume'] = ''.join([two_digit_year, journal_volume])\n            result.append(_publication_info)\n            continue\n        if (journal_title and journal_volume):\n            match = _RE_TITLE_ENDS_WITH_A_LETTER.match(journal_title)\n            if (match and _needs_a_hidden_pubnote(journal_title, journal_volume)):\n                _publication_info['journal_title'] = match.group('title')\n                _publication_info['journal_volume'] = (journal_volume + match.group('letter'))\n                result.append(_publication_info)\n                _publication_info = copy.deepcopy(publication_info)\n                _publication_info['hidden'] = True\n                _publication_info['journal_title'] = match.group('title')\n                _publication_info['journal_volume'] = (match.group('letter') + journal_volume)\n            elif (match and (journal_title not in _JOURNALS_ALREADY_ENDING_WITH_A_LETTER)):\n                _publication_info['journal_title'] = match.group('title')\n                _publication_info['journal_volume'] = (match.group('letter') + journal_volume)\n        result.append(_publication_info)\n    return result", "docstring": "Convert back a ``publication_info`` value from the new format to the old.\n\nDoes the inverse transformation of :func:`convert_old_publication_info_to_new`,\nto be used whenever we are sending back records from Labs to Legacy.\n\nArgs:\npublication_infos: a ``publication_info`` in the new format.\n\nReturns:\nlist(dict): a ``publication_info`` in the old format.", "source": "codesearchnet"}
{"code": "def new_netting_channel(self, partner: Address, settle_timeout: int, given_block_identifier: BlockSpecification) -> ChannelID:\n    checking_block = self.client.get_checking_block()\n    self._new_channel_preconditions(partner=partner, settle_timeout=settle_timeout, block_identifier=given_block_identifier)\n    log_details = {'peer1': pex(self.node_address), 'peer2': pex(partner)}\n    gas_limit = self.proxy.estimate_gas(checking_block, 'openChannel', participant1=self.node_address, participant2=partner, settle_timeout=settle_timeout)\n    if (not gas_limit):\n        self.proxy.jsonrpc_client.check_for_insufficient_eth(transaction_name='openChannel', transaction_executed=False, required_gas=GAS_REQUIRED_FOR_OPEN_CHANNEL, block_identifier=checking_block)\n        self._new_channel_postconditions(partner=partner, block=checking_block)\n        log.critical('new_netting_channel call will fail', **log_details)\n        raise RaidenUnrecoverableError('Creating a new channel will fail')\n    log.debug('new_netting_channel called', **log_details)\n    if (gas_limit and (partner not in self.open_channel_transactions)):\n        new_open_channel_transaction = AsyncResult()\n        self.open_channel_transactions[partner] = new_open_channel_transaction\n        gas_limit = safe_gas_limit(gas_limit, GAS_REQUIRED_FOR_OPEN_CHANNEL)\n        try:\n            transaction_hash = self.proxy.transact('openChannel', gas_limit, participant1=self.node_address, participant2=partner, settle_timeout=settle_timeout)\n            self.client.poll(transaction_hash)\n            receipt_or_none = check_transaction_threw(self.client, transaction_hash)\n            if receipt_or_none:\n                self._new_channel_postconditions(partner=partner, block=receipt_or_none['blockNumber'])\n                log.critical('new_netting_channel failed', **log_details)\n                raise RaidenUnrecoverableError('creating new channel failed')\n        except Exception as e:\n            log.critical('new_netting_channel failed', **log_details)\n            new_open_channel_transaction.set_exception(e)\n            raise\n        else:\n            new_open_channel_transaction.set(transaction_hash)\n        finally:\n            self.open_channel_transactions.pop(partner, None)\n    else:\n        self.open_channel_transactions[partner].get()\n    channel_identifier: ChannelID = self._detail_channel(participant1=self.node_address, participant2=partner, block_identifier='latest').channel_identifier\n    log_details['channel_identifier'] = str(channel_identifier)\n    log.info('new_netting_channel successful', **log_details)\n    return channel_identifier", "docstring": "Creates a new channel in the TokenNetwork contract.\n\nArgs:\npartner: The peer to open the channel with.\nsettle_timeout: The settle timeout to use for this channel.\ngiven_block_identifier: The block identifier of the state change that\nprompted this proxy action\n\nReturns:\nThe ChannelID of the new netting channel.", "source": "codesearchnet"}
{"code": "def set_many(self, values, expire=0, noreply=None):\n    if (noreply is None):\n        noreply = self.default_noreply\n    result = self._store_cmd(b'set', values, expire, noreply)\n    return [k for (k, v) in six.iteritems(result) if (not v)]", "docstring": "A convenience function for setting multiple values.\n\nArgs:\nvalues: dict(str, str), a dict of keys and values, see class docs\nfor details.\nexpire: optional int, number of seconds until the item is expired\nfrom the cache, or zero for no expiry (the default).\nnoreply: optional bool, True to not wait for the reply (defaults to\nself.default_noreply).\n\nReturns:\nReturns a list of keys that failed to be inserted.\nIf noreply is True, always returns empty list.", "source": "codesearchnet"}
{"code": "def GetFeeds(client):\n  \n  feed_service = client.GetService('FeedService', 'v201809')\n\n  feeds = []\n  more_pages = True\n\n  selector = {\n      'fields': ['Id', 'Name', 'Attributes'],\n      'predicates': [\n          {\n              'field': 'Origin',\n              'operator': 'EQUALS',\n              'values': ['USER']\n          },\n          {\n              'field': 'FeedStatus',\n              'operator': 'EQUALS',\n              'values': ['ENABLED']\n          }\n      ],\n      'paging': {\n          'startIndex': 0,\n          'numberResults': PAGE_SIZE\n      }\n  }\n\n  while more_pages:\n    page = feed_service.get(selector)\n\n    if 'entries' in page:\n      feeds.extend(page['entries'])\n\n    selector['paging']['startIndex'] += PAGE_SIZE\n    more_pages = selector['paging']['startIndex'] < int(page['totalNumEntries'])\n\n  return feeds", "docstring": "Returns a list of all enabled Feeds.\n\nArgs:\nclient: an AdWordsClient instance.\n\nReturns:\nA list containing all enabled Feeds.", "source": "juraj-google-style"}
{"code": "def __init__(self, min_value: int=0, max_value: Optional[int]=None):\n    super().__init__()\n    self._min_value = min_value\n    self._max_value = max_value", "docstring": "Constructor.\n\nArgs:\nmin_value: Min value that is acceptable for the list index.\nmax_value: Max value that is acceptable for the list index. If None, there\nis no upper bound for list index.", "source": "github-repos"}
{"code": "def op(self):\n    return self._op", "docstring": "The operation that failed, if known.\n\n*N.B.* If the failed op was synthesized at runtime, e.g. a `Send`\nor `Recv` op, there will be no corresponding\n`tf.Operation`\nobject.  In that case, this will return `None`, and you should\ninstead use the `tf.errors.OpError.node_def` to\ndiscover information about the op.\n\nReturns:\nThe `Operation` that failed, or None.", "source": "github-repos"}
{"code": "def thread(self, value: str):\n        \n        if value is not None and not isinstance(value, str):\n            raise TypeError(\"'thread' MUST be a string\")\n        self._thread = value", "docstring": "Set thread id of the message\n\nArgs:\nvalue (str): the thread id", "source": "juraj-google-style"}
{"code": "def sort_servers_closest(servers: Sequence[str]) -> Sequence[Tuple[(str, float)]]:\n    if (not {urlparse(url).scheme for url in servers}.issubset({'http', 'https'})):\n        raise TransportError('Invalid server urls')\n    get_rtt_jobs = set((gevent.spawn((lambda url: (url, get_http_rtt(url))), server_url) for server_url in servers))\n    gevent.joinall(get_rtt_jobs, raise_error=False)\n    sorted_servers: List[Tuple[(str, float)]] = sorted((job.value for job in get_rtt_jobs if (job.value[1] is not None)), key=itemgetter(1))\n    log.debug('Matrix homeserver RTT times', rtt_times=sorted_servers)\n    return sorted_servers", "docstring": "Sorts a list of servers by http round-trip time\n\nParams:\nservers: sequence of http server urls\nReturns:\nsequence of pairs of url,rtt in seconds, sorted by rtt, excluding failed servers\n(possibly empty)", "source": "codesearchnet"}
{"code": "def readData(self, fileName):\n        \n        lock_and_call(\n            lambda: self._impl.readData(fileName),\n            self._lock\n        )\n        self._errorhandler_wrapper.check()", "docstring": "Interprets the specified file as an AMPL data file. As a side effect,\nit invalidates all entities (as the passed file can contain any\narbitrary command); the lists of entities will be re-populated lazily\n(at first access). After reading the file, the interpreter is put back\nto \"model\" mode.\n\nArgs:\nfileName: Full path to the file.\n\nRaises:\nRuntimeError: in case the file does not exist.", "source": "juraj-google-style"}
{"code": "def build_phenotype(phenotype_id, adapter):\n    phenotype_obj = {}\n    phenotype = adapter.hpo_term(phenotype_id)\n    if phenotype:\n        phenotype_obj['phenotype_id'] = phenotype['hpo_id']\n        phenotype_obj['feature'] = phenotype['description']\n    return phenotype", "docstring": "Build a small phenotype object\n\nBuild a dictionary with phenotype_id and description\n\nArgs:\nphenotype_id (str): The phenotype id\nadapter (scout.adapter.MongoAdapter)\n\nReturns:\nphenotype_obj (dict):\n\ndict(\nphenotype_id = str,\nfeature = str, # description of phenotype\n)", "source": "codesearchnet"}
{"code": "def is_insert_grad_of_statement(node):\n    tangent_calls = [(anno.getanno(item.context_expr, 'func', None) is utils.insert_grad_of) for item in node.items]\n    if all(tangent_calls):\n        return True\n    elif any(tangent_calls):\n        raise ValueError\n    else:\n        return False", "docstring": "Check whether a context manager calls `insert_grad_of`.\n\nArgs:\nnode: The context manager node.\n\nReturns:\nWhether or not this node contains `insert_grad_of` calls.\n\nRaises:\nValueError: If the `insert_grad_of` calls are mixed with other calls.", "source": "codesearchnet"}
{"code": "def get_id(page):\n  \n  start_pos = page.find(\"<id>\")\n  end_pos = page.find(\"</id>\")\n  assert start_pos != -1\n  assert end_pos != -1\n  start_pos += len(\"<id>\")\n  return int(page[start_pos:end_pos])", "docstring": "Extract the id from a page.\n\nArgs:\npage: a string\nReturns:\nan integer", "source": "juraj-google-style"}
{"code": "def _FormatPackedIPv6Address(self, packed_ip_address):\n    octet_pairs = zip(packed_ip_address[0::2], packed_ip_address[1::2])\n    octet_pairs = [((octet1 << 8) | octet2) for (octet1, octet2) in octet_pairs]\n    return ':'.join(['{0:04x}'.format(octet_pair) for octet_pair in octet_pairs])", "docstring": "Formats a packed IPv6 address as a human readable string.\n\nArgs:\npacked_ip_address (list[int]): packed IPv6 address.\n\nReturns:\nstr: human readable IPv6 address.", "source": "codesearchnet"}
{"code": "def encode_categorical_inputs(inputs, output_mode, depth, dtype, sparse=False, count_weights=None, backend_module=None):\n    backend_module = backend_module or backend\n    if output_mode == 'int':\n        return backend_module.cast(inputs, dtype=dtype)\n    rank_of_inputs = len(backend_module.shape(inputs))\n    if rank_of_inputs == 0:\n        inputs = backend_module.numpy.expand_dims(inputs, -1)\n        rank_of_inputs = 1\n    if backend_module.__name__.endswith('tensorflow') and rank_of_inputs <= 2 and (output_mode in ('multi_hot', 'count')):\n        try:\n            return tf_utils.tf_encode_categorical_inputs(inputs, output_mode, depth, dtype=dtype, sparse=sparse, count_weights=count_weights)\n        except ValueError:\n            pass\n    if output_mode == 'multi_hot':\n        return backend_module.nn.multi_hot(inputs, depth, dtype=dtype, sparse=sparse)\n    elif output_mode == 'one_hot':\n        input_shape = backend_module.core.shape(inputs)\n        if input_shape is not None and len(input_shape) > 1 and (input_shape[-1] == 1):\n            newshape = tuple(input_shape[:-1])\n            inputs = backend_module.numpy.reshape(inputs, newshape)\n        return backend_module.nn.one_hot(inputs, depth, dtype=dtype, sparse=sparse)\n    elif output_mode == 'count':\n        reduction_axis = 1 if len(inputs.shape) > 1 else 0\n        if count_weights is not None:\n            dtype = count_weights.dtype\n        one_hot_encoding = backend_module.nn.one_hot(inputs, depth, dtype=dtype, sparse=sparse)\n        if count_weights is not None:\n            count_weights = backend_module.numpy.expand_dims(count_weights, -1)\n            one_hot_encoding = one_hot_encoding * count_weights\n        outputs = backend_module.numpy.sum(one_hot_encoding, axis=reduction_axis)\n        return outputs", "docstring": "Encodes categorical inputs according to output_mode.\n\nArgs:\ninputs: the inputs to encode.\noutput_mode: one of `\"int\"`, `\"one_hot\"`, `\"multi_hot\"`, or `\"count\"`.\ndepth: number of classes, this will be the last dimension of the output.\ndtype: the dtype of the output, unless `count_weights` is not `None`.\nsparse: whether the output should be sparse for backends supporting it.\ncount_weights: weights to apply if `output_mode` is `\"count\"`.\nbackend_module: the backend to use instead of the current one.\n\nReturns: the encoded inputs.", "source": "github-repos"}
{"code": "def check(self, version):\n    for disjunct in self._disjuncts:\n        if self._check_insersection(version, disjunct):\n            return True\n    return False", "docstring": "Check that a version is inside this SemanticVersionRange\n\nArgs:\nversion (SemanticVersion): The version to check\n\nReturns:\nbool: True if the version is included in the range, False if not", "source": "codesearchnet"}
{"code": "def set_fore(\n        self, x: int, y: int, r: int, g: int, b: int, char: str\n    ) -> None:\n        \n        i = self.width * y + x\n        self.fore_r[i] = r\n        self.fore_g[i] = g\n        self.fore_b[i] = b\n        self.char[i] = ord(char)", "docstring": "Set the character and foreground color of one cell.\n\nArgs:\nx (int): X position to change.\ny (int): Y position to change.\nr (int): Red foreground color, from 0 to 255.\ng (int): Green foreground color, from 0 to 255.\nb (int): Blue foreground color, from 0 to 255.\nchar (AnyStr): A single character str or bytes object.", "source": "juraj-google-style"}
{"code": "def verify(self, verify_key):\n    if ((not self.mardata.signatures) or (not self.mardata.signatures.sigs)):\n        return False\n    hashers = []\n    for sig in self.mardata.signatures.sigs:\n        hashers.append((sig.algorithm_id, sig.signature, make_hasher(sig.algorithm_id)))\n    assert (len(hashers) == len(self.mardata.signatures.sigs))\n    for block in get_signature_data(self.fileobj, self.mardata.signatures.filesize):\n        [h.update(block) for (_, _, h) in hashers]\n    for (algo_id, sig, h) in hashers:\n        if (not verify_signature(verify_key, sig, h.finalize(), h.algorithm.name)):\n            return False\n    else:\n        return True", "docstring": "Verify that this MAR file has a valid signature.\n\nArgs:\nverify_key (str): PEM formatted public key\n\nReturns:\nTrue if the MAR file's signature matches its contents\nFalse otherwise; this includes cases where there is no signature.", "source": "codesearchnet"}
{"code": "def __init__(self, dataset, coordinator):\n    if isinstance(dataset, input_lib.DistributedDataset):\n        original_dataset = dataset._original_dataset\n        serialized = serialize_dataset_to_graph(original_dataset)\n\n        def dataset_fn():\n            deserialized = deserialize_dataset_from_graph(serialized, original_dataset.element_spec)\n            dataset.build(dataset_to_replace=deserialized)\n            return dataset\n    elif isinstance(dataset, input_lib.DistributedDatasetsFromFunction):\n\n        def dataset_fn():\n            dataset.build()\n            return dataset\n    elif isinstance(dataset, dataset_ops.Dataset):\n        serialized = serialize_dataset_to_graph(dataset)\n\n        def dataset_fn():\n            return deserialize_dataset_from_graph(serialized, dataset.element_spec)\n    else:\n        raise ValueError('Unexpected dataset type!')\n    super(PerWorkerDatasetFromDataset, self).__init__(dataset_fn, coordinator)", "docstring": "Makes an iterable from datasets created by the given dataset.\n\nIt creates a dataset_fn which deserializes a dataset from a graph under the\nhood.\n\nArgs:\ndataset: A tf.data.Dataset, a DistributedDataset or a\nDistributedDatasetsFromFunction\ncoordinator: a `ClusterCoordinator` object, used to create dataset\nresources.", "source": "github-repos"}
{"code": "def store_inputs(self, line_num, source, source_raw=None):\n        \n        self.old.store_inputs(line_num, source, source_raw)\n        \n        \n        self.decorator.pre_run_cell(line_num, source)", "docstring": "Store source and raw input in history and create input cache\nvariables ``_i*``.\n\nArgs:\nline_num (int): The prompt number of this input.\nsource (str): Python input.\nsource_raw (str): If given, this is the raw input without any\nIPython transformations applied to it.  If not given, ``source``\nis used.", "source": "juraj-google-style"}
{"code": "def from_iterables(ig_info: fhir_package.IgInfo, structure_definitions: Iterable[structure_definition_pb2.StructureDefinition], search_parameters: Iterable[search_parameter_pb2.SearchParameter], code_systems: Iterable[code_system_pb2.CodeSystem], value_sets: Iterable[value_set_pb2.ValueSet], resource_time_zone: str='Z') -> fhir_package.FhirPackage[structure_definition_pb2.StructureDefinition, search_parameter_pb2.SearchParameter, code_system_pb2.CodeSystem, value_set_pb2.ValueSet]:\n    return fhir_package.FhirPackage(ig_info=ig_info, structure_definitions=fhir_package.ResourceCollection.from_iterable(structure_definitions, structure_definition_pb2.StructureDefinition, _PRIMITIVE_HANDLER, resource_time_zone), search_parameters=fhir_package.ResourceCollection.from_iterable(search_parameters, search_parameter_pb2.SearchParameter, _PRIMITIVE_HANDLER, resource_time_zone), code_systems=fhir_package.ResourceCollection.from_iterable(code_systems, code_system_pb2.CodeSystem, _PRIMITIVE_HANDLER, resource_time_zone), value_sets=fhir_package.ResourceCollection.from_iterable(value_sets, value_set_pb2.ValueSet, _PRIMITIVE_HANDLER, resource_time_zone))", "docstring": "Builds a FHIR R4 `FhirPackage` containing the given resources.\n\nArgs:\nig_info: The metadata to associate with the `FhirPackage`.\nstructure_definitions: The structure definitions to include in the\n`FhirPackage`.\nsearch_parameters: The search parameters to include in the `FhirPackage`.\ncode_systems: The code systems to include in the `FhirPackage`.\nvalue_sets: The value sets to include in the `FhirPackage`.\nresource_time_zone: If additional JSON resources are added to the\n`FhirPackage`, the time zone code to parse resource dates into when adding\nthose JSON resources.\n\nReturns:\nA `FhirPackage` instance with the requested resources.", "source": "github-repos"}
{"code": "def authenticate(json_path=None):\n  \n  msg = ('budou.authentication() is deprecated. '\n         'Please use budou.get_parser() to obtain a parser instead.')\n  warnings.warn(msg, DeprecationWarning)\n  parser = get_parser('nlapi', credentials_path=json_path)\n  return parser", "docstring": "Gets a Natural Language API parser by authenticating the API.\n\n**This method is deprecated.** Please use :obj:`budou.get_parser` to obtain a\nparser instead.\n\nArgs:\njson_path (:obj:`str`, optional): The file path to the service account's\ncredentials.\n\nReturns:\nParser. (:obj:`budou.parser.NLAPIParser`)", "source": "juraj-google-style"}
{"code": "def _split_heads(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n    if self.new_decoder_architecture:\n        batch, seq_len, _ = fused_qkv.shape\n        qkv = fused_qkv.view(batch, seq_len, -1, self.num_heads \n        query = qkv[:, :, :, :-2]\n        key = qkv[:, :, :, [-2]]\n        value = qkv[:, :, :, [-1]]\n        key = torch.broadcast_to(key, query.shape)\n        value = torch.broadcast_to(value, query.shape)\n        query, key, value = [x.flatten(2, 3) for x in (query, key, value)]\n        return (query, key, value)\n    elif not self.multi_query:\n        batch_size, seq_length, three_times_hidden_size = fused_qkv.shape\n        fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim)\n        return (fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :])\n    else:\n        batch_size, seq_length, three_times_hidden_size = fused_qkv.shape\n        fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads + 2, self.head_dim)\n        return (fused_qkv[..., :-2, :], fused_qkv[..., [-2], :], fused_qkv[..., [-1], :])", "docstring": "Split the last dimension into (num_heads, head_dim), results share same memory storage as `fused_qkv`\n\nArgs:\nfused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim]\n\nReturns:\nquery: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim]\nvalue: [batch_size, seq_length, num_heads, head_dim]", "source": "github-repos"}
{"code": "def get_beta(self, kl_loss=0.0):\n    if self.hparams.latent_loss_multiplier_dynamic:\n        beta = tf.Variable(self.hparams.latent_loss_multiplier, trainable=False, dtype=tf.float32)\n        alpha = self.hparams.latent_loss_multiplier_alpha\n        epsilon = self.hparams.latent_loss_multiplier_epsilon\n        shadow_beta = (beta + (alpha * (kl_loss - epsilon)))\n        shadow_beta = tf.maximum(shadow_beta, 0.0)\n        shadow_beta = tf.minimum(shadow_beta, 1.0)\n        update_op = tf.assign(beta, shadow_beta)\n    else:\n        beta = common_video.beta_schedule(schedule=self.hparams.latent_loss_multiplier_schedule, global_step=self.get_iteration_num(), final_beta=self.hparams.latent_loss_multiplier, decay_start=(self.hparams.num_iterations_1st_stage + self.hparams.num_iterations_2nd_stage), decay_end=self.hparams.anneal_end)\n        update_op = tf.identity(beta)\n    with tf.control_dependencies([update_op]):\n        tf.summary.scalar('beta', beta)\n        return beta", "docstring": "Get the KL multiplier, either dynamically or schedule based.\n\nif hparams.latent_loss_multiplier_dynamic is set to true, then beta\nis being adjusted to keep KL under hparams.latent_loss_multiplier_epsilon.\nIn order to do so, the beta is being updated at each iteration\nby taking steps of size hparams.latent_loss_multiplier_alpha.\nThe same formulation can be retrieved by solving the Lagrangian\nwith KL < epsilon as a constraint.\n\nArgs:\nkl_loss: KL loss. Only used for dynamic adjustment.\n\nReturns:\nbeta: the final value of beta.", "source": "codesearchnet"}
{"code": "def preprocess_for_train(image, image_size=224, normalize=True):\n  \n  if normalize: image = tf.to_float(image) / 255.0\n  image = _random_crop(image, image_size)\n  if normalize: image = _normalize(image)\n  image = _flip(image)\n  image = tf.reshape(image, [image_size, image_size, 3])\n  return image", "docstring": "Preprocesses the given image for evaluation.\n\nArgs:\nimage: `Tensor` representing an image of arbitrary size.\nimage_size: int, how large the output image should be.\nnormalize: bool, if True the image is normalized.\n\nReturns:\nA preprocessed image `Tensor`.", "source": "juraj-google-style"}
{"code": "def _normalize_pattern(pattern):\n    if pattern.startswith('regex:'):\n        pattern_type = 'regex'\n        pattern = pattern[len('regex:'):]\n    elif pattern.startswith('wildcard:'):\n        pattern_type = 'wildcard'\n        pattern = pattern[len('wildcard:'):]\n    elif pattern.startswith('literal:'):\n        pattern_type = 'literal'\n        pattern = pattern[len('literal:'):]\n    elif RegexRoute.like(pattern):\n        pattern_type = 'regex'\n    elif WildcardRoute.like(pattern):\n        pattern_type = 'wildcard'\n    else:\n        pattern_type = 'literal'\n    return (pattern_type, pattern)", "docstring": "Return a normalized form of the pattern.\n\nNormalize the pattern by removing pattern type prefix if it\nexists in the pattern. Then return the pattern type and the\npattern as a tuple of two strings.\n\nArguments:\npattern (str): Route pattern to match request paths\n\nReturns:\ntuple: Ruple of pattern type (str) and pattern (str)", "source": "codesearchnet"}
{"code": "def run_missing_simulations(self, param_list, runs=None):\n    if isinstance(param_list, dict):\n        param_list = list_param_combinations(param_list)\n    self.run_simulations(self.get_missing_simulations(param_list, runs))", "docstring": "Run the simulations from the parameter list that are not yet available\nin the database.\n\nThis function also makes sure that we have at least runs replications\nfor each parameter combination.\n\nAdditionally, param_list can either be a list containing the desired\nparameter combinations or a dictionary containing multiple values for\neach parameter, to be expanded into a list.\n\nArgs:\nparam_list (list, dict): either a list of parameter combinations or\na dictionary to be expanded into a list through the\nlist_param_combinations function.\nruns (int): the number of runs to perform for each parameter\ncombination. This parameter is only allowed if the param_list\nspecification doesn't feature an 'RngRun' key already.", "source": "codesearchnet"}
{"code": "def db_dp010(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `db_dp010`'.format(value))\n\n        self._db_dp010 = value", "docstring": "Corresponds to IDD Field `db_dp010`\nmean coincident dry-bulb temperature to\nDew-point temperature corresponding to 1.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `db_dp010`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def _SerializeRequest(self, request):\n        \n        \n        parsed = urllib_parse.urlsplit(request.url)\n        request_line = urllib_parse.urlunsplit(\n            ('', '', parsed.path, parsed.query, ''))\n        if not isinstance(request_line, six.text_type):\n            request_line = request_line.decode('utf-8')\n        status_line = u' '.join((\n            request.http_method,\n            request_line,\n            u'HTTP/1.1\\n'\n        ))\n        major, minor = request.headers.get(\n            'content-type', 'application/json').split('/')\n        msg = mime_nonmultipart.MIMENonMultipart(major, minor)\n\n        \n        \n        for key, value in request.headers.items():\n            if key == 'content-type':\n                continue\n            msg[key] = value\n\n        msg['Host'] = parsed.netloc\n        msg.set_unixfrom(None)\n\n        if request.body is not None:\n            msg.set_payload(request.body)\n\n        \n        str_io = six.StringIO()\n        \n        gen = generator.Generator(str_io, maxheaderlen=0)\n        gen.flatten(msg, unixfrom=False)\n        body = str_io.getvalue()\n\n        return status_line + body", "docstring": "Convert a http_wrapper.Request object into a string.\n\nArgs:\nrequest: A http_wrapper.Request to serialize.\n\nReturns:\nThe request as a string in application/http format.", "source": "juraj-google-style"}
{"code": "def validate_detector(self, detector):\n    resp = self._post(self._u(self._DETECTOR_ENDPOINT_SUFFIX, 'validate'), data=detector)\n    resp.raise_for_status()", "docstring": "Validate a detector.\n\nValidates the given detector; throws a 400 Bad Request HTTP error if\nthe detector is invalid; otherwise doesn't return or throw anything.\n\nArgs:\ndetector (object): the detector model object. Will be serialized as\nJSON.", "source": "codesearchnet"}
{"code": "def _write_class_markdown_to_file(self, f, name, cls):\n    methods = dict(self.get_class_members(name, cls))\n    num_methods = len(methods)\n    try:\n        self._write_docstring_markdown_to_file(f, '\n    except ValueError as e:\n        raise ValueError((str(e) + (' in class `%s`' % cls.__name__)))\n    any_method_called_out = (len(methods) != num_methods)\n    if any_method_called_out:\n        other_methods = {n: m for (n, m) in methods.items() if (n in cls.__dict__)}\n        if other_methods:\n            print('\\n\n    else:\n        other_methods = methods\n    for name in sorted(other_methods):\n        self._write_member_markdown_to_file(f, '", "docstring": "Write the class doc to `f`.\n\nArgs:\nf: File to write to.\nprefix: Prefix for names.\ncls: class object.\nname: name to use.", "source": "codesearchnet"}
{"code": "def indent(lines, amount=2, char=' '):\n    r\n    lines = str(lines)\n    padding = amount * char\n    return padding + ('\\n' + padding).join(lines.split('\\n'))", "docstring": "r\"\"\"Indent a string.\n\nPrepends whitespace to every line in the passed string. (Lines are\nseparated by newline characters.)\n\nArgs:\nlines (str): The string to indent.\n\nKeyword Args:\namount (int): The number of columns to indent by.\nchar (str): The character to to use as the indentation.\n\nReturns:\nstr: The indented string.\n\nExample:\n>>> print(indent('line1\\nline2', char='*'))\n**line1\n**line2", "source": "juraj-google-style"}
{"code": "def get_gradients(self, loss, params):\n    grads = backend.gradients(loss, params)\n    if any((g is None for g in grads)):\n        raise ValueError('An operation has `None` for gradient. Please make sure that all of your ops have a gradient defined (i.e. are differentiable). Common ops without gradient: backend.argmax, backend.round, backend.eval.')\n    if hasattr(self, 'clipnorm'):\n        grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]\n    if hasattr(self, 'clipvalue'):\n        grads = [clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue) for g in grads]\n    return grads", "docstring": "Returns gradients of `loss` with respect to `params`.\n\nArgs:\nloss: Loss tensor.\nparams: List of variables.\n\nReturns:\nList of gradient tensors.\n\nRaises:\nValueError: In case any gradient cannot be computed (e.g. if gradient\nfunction not implemented).", "source": "github-repos"}
{"code": "def train(self, X):\n    _trainer = bob.learn.linear.CGLogRegTrainer(**{'lambda': self.regularizer})\n    if (len(X) == 2):\n        return _trainer.train(add_bias(X[0]), add_bias(X[1]))\n    else:\n        machines = []\n        for k in range(len(X)):\n            NC_range = (list(range(0, k)) + list(range((k + 1), len(X))))\n            machines.append(_trainer.train(add_bias(numpy.vstack(X[NC_range])), add_bias(X[k])))\n        return MultiClassMachine(machines)", "docstring": "Trains multiple logistic regression classifiers to handle the multiclass\nproblem posed by ``X``\n\nX (numpy.ndarray): The input data matrix. This must be a numpy.ndarray\nwith 3 dimensions or an iterable containing 2 numpy.ndarrays with 2\ndimensions each. Each correspond to the data for one of the input\nclasses, every row corresponds to one example of the data set, every\ncolumn, one different feature.\n\n\nReturns:\n\nMachine: A trained multiclass machine.", "source": "codesearchnet"}
{"code": "def __init__(self, sparse, map_op, rank):\n    self._sparse = sparse\n    self._map_op = map_op\n    self._rank = tensor_shape.as_dimension(rank)", "docstring": "Create the metadata.\n\nArgs:\nsparse: Python boolean.\nmap_op: The `Operation` that created the `SparseTensorsMap` in question.\nThis Op contains information about the underlying Map object and the\ndtype of the original data.\nrank: The statically known rank of the `SparseTensor`.", "source": "github-repos"}
{"code": "def is_interactive_logging_enabled():\n    return global_state.get_global_attribute('interactive_logging', True)", "docstring": "Check if interactive logging is enabled.\n\nTo switch between writing logs to stdout and `absl.logging`, you may use\n`keras.config.enable_interactive_logging()` and\n`keras.config.disable_interactive_logging()`.\n\nReturns:\nBoolean, `True` if interactive logging is enabled,\nand `False` otherwise.", "source": "github-repos"}
{"code": "def get_initialization_function(self, *args, **kwargs):\n    with self._lock:\n        if self._variable_creation_config is not None:\n            raise RuntimeError('get_initialization_function cannot be called after the function has been used')\n        initializers = []\n        self._initialize(args, kwargs, add_initializers_to=initializers)\n\n    def initialize_variables():\n        for v, init in initializers:\n            v.assign(lift_to_graph.lift_to_graph([init], ops.get_default_graph())[init], read_value=False)\n    options = tracing_compilation.TracingOptions(initialize_variables, 'initialize_variables')\n    return tracing_compilation.trace_function(tracing_options=options)", "docstring": "Returns a `ConcreteFunction` which initializes this function's variables.\n\nRequires that this function hasn't been accessed yet through either calling\nit or calling get_concrete_function. Fails if we cannot build an initializer\nfunction which does not depend on the concrete values of the inputs to this\nfunction.\n\nNote that running this function will overwrite any values currently assigned\nto variables, for example restores from a checkpoint.\n\nArgs:\n*args: arguments to the underlying python callable.\n**kwargs: keyword arguments to the python callable.\n\nReturns:\nA `ConcreteFunction` object which initializes the variables of this\nfunction.\n\nRaises:\nRuntimeError: if called after the variables have been initialized.", "source": "github-repos"}
{"code": "def _replace_row_partitions(value, new_partitions):\n    if isinstance(value, tensor.Tensor) or not new_partitions:\n        return value\n    elif isinstance(value, ragged_tensor.RaggedTensor):\n        return ragged_tensor.RaggedTensor._from_row_partition(values=_replace_row_partitions(value.values, new_partitions[1:]), row_partition=new_partitions[0])\n    else:\n        assert isinstance(value, StructuredTensor)\n        new_fields = dict(((k, _replace_row_partitions(v, new_partitions)) for k, v in value._fields.items()))\n        return StructuredTensor._old_init(fields=new_fields, shape=value.shape, nrows=value.nrows(), row_partitions=tuple(new_partitions) + tuple(value.row_partitions[len(new_partitions):]))", "docstring": "Updates `value` to use `new_partitions` as its (outer) row partitions.\n\nThis is used to ensure that all fields in a `StructuredTensor` use identical\n`RowPartition` objects for the shared dimensions.  In particular,\n`StructuredTensor.from_fields` first merges all of the row partitions from\nany fields, and then replaces the outer row partitions of all fields with\nthe merged row partitions (using this function).\n\nArgs:\nvalue: A `Tensor`, `RaggedTensor`, or `StructuredTensor`.\nnew_partitions: A list of row-partitions that should be used by `value`.\nMust be equivalent to `value`'s current row partitions.\n\nReturns:\nA value that is equivalent to `value`, where outer row partitions have been\nreplaced by `new_partitions`.", "source": "github-repos"}
{"code": "def parsed_aggregate_reports_to_csv(reports):\n    \n\n    def to_str(obj):\n        return str(obj).lower()\n\n    fields = [\"xml_schema\", \"org_name\", \"org_email\",\n              \"org_extra_contact_info\", \"report_id\", \"begin_date\", \"end_date\",\n              \"errors\", \"domain\", \"adkim\", \"aspf\", \"p\", \"sp\", \"pct\", \"fo\",\n              \"source_ip_address\", \"source_country\", \"source_reverse_dns\",\n              \"source_base_domain\", \"count\", \"disposition\", \"dkim_alignment\",\n              \"spf_alignment\", \"policy_override_reasons\",\n              \"policy_override_comments\", \"envelope_from\", \"header_from\",\n              \"envelope_to\", \"dkim_domains\", \"dkim_selectors\", \"dkim_results\",\n              \"spf_domains\", \"spf_scopes\", \"spf_results\"]\n\n    csv_file_object = StringIO(newline=\"\\n\")\n    writer = DictWriter(csv_file_object, fields)\n    writer.writeheader()\n\n    if type(reports) == OrderedDict:\n        reports = [reports]\n\n    for report in reports:\n        xml_schema = report[\"xml_schema\"]\n        org_name = report[\"report_metadata\"][\"org_name\"]\n        org_email = report[\"report_metadata\"][\"org_email\"]\n        org_extra_contact = report[\"report_metadata\"][\"org_extra_contact_info\"]\n        report_id = report[\"report_metadata\"][\"report_id\"]\n        begin_date = report[\"report_metadata\"][\"begin_date\"]\n        end_date = report[\"report_metadata\"][\"end_date\"]\n        errors = \"|\".join(report[\"report_metadata\"][\"errors\"])\n        domain = report[\"policy_published\"][\"domain\"]\n        adkim = report[\"policy_published\"][\"adkim\"]\n        aspf = report[\"policy_published\"][\"aspf\"]\n        p = report[\"policy_published\"][\"p\"]\n        sp = report[\"policy_published\"][\"sp\"]\n        pct = report[\"policy_published\"][\"pct\"]\n        fo = report[\"policy_published\"][\"fo\"]\n\n        report_dict = dict(xml_schema=xml_schema, org_name=org_name,\n                           org_email=org_email,\n                           org_extra_contact_info=org_extra_contact,\n                           report_id=report_id, begin_date=begin_date,\n                           end_date=end_date, errors=errors, domain=domain,\n                           adkim=adkim, aspf=aspf, p=p, sp=sp, pct=pct, fo=fo)\n\n        for record in report[\"records\"]:\n            row = report_dict\n            row[\"source_ip_address\"] = record[\"source\"][\"ip_address\"]\n            row[\"source_country\"] = record[\"source\"][\"country\"]\n            row[\"source_reverse_dns\"] = record[\"source\"][\"reverse_dns\"]\n            row[\"source_base_domain\"] = record[\"source\"][\"base_domain\"]\n            row[\"count\"] = record[\"count\"]\n            row[\"disposition\"] = record[\"policy_evaluated\"][\"disposition\"]\n            row[\"spf_alignment\"] = record[\"policy_evaluated\"][\"spf\"]\n            row[\"dkim_alignment\"] = record[\"policy_evaluated\"][\"dkim\"]\n            policy_override_reasons = list(map(\n                lambda r: r[\"type\"],\n                record[\"policy_evaluated\"]\n                [\"policy_override_reasons\"]))\n            policy_override_comments = list(map(\n                lambda r: r[\"comment\"] or \"none\",\n                record[\"policy_evaluated\"]\n                [\"policy_override_reasons\"]))\n            row[\"policy_override_reasons\"] = \",\".join(\n                policy_override_reasons)\n            row[\"policy_override_comments\"] = \"|\".join(\n                policy_override_comments)\n            row[\"envelope_from\"] = record[\"identifiers\"][\"envelope_from\"]\n            row[\"header_from\"] = record[\"identifiers\"][\"header_from\"]\n            envelope_to = record[\"identifiers\"][\"envelope_to\"]\n            row[\"envelope_to\"] = envelope_to\n            dkim_domains = []\n            dkim_selectors = []\n            dkim_results = []\n            for dkim_result in record[\"auth_results\"][\"dkim\"]:\n                dkim_domains.append(dkim_result[\"domain\"])\n                if \"selector\" in dkim_result:\n                    dkim_selectors.append(dkim_result[\"selector\"])\n                dkim_results.append(dkim_result[\"result\"])\n            row[\"dkim_domains\"] = \",\".join(map(to_str, dkim_domains))\n            row[\"dkim_selectors\"] = \",\".join(map(to_str, dkim_selectors))\n            row[\"dkim_results\"] = \",\".join(map(to_str, dkim_results))\n            spf_domains = []\n            spf_scopes = []\n            spf_results = []\n            for spf_result in record[\"auth_results\"][\"spf\"]:\n                spf_domains.append(spf_result[\"domain\"])\n                spf_scopes.append(spf_result[\"scope\"])\n                spf_results.append(spf_result[\"result\"])\n            row[\"spf_domains\"] = \",\".join(map(to_str, spf_domains))\n            row[\"spf_scopes\"] = \",\".join(map(to_str, spf_scopes))\n            row[\"spf_results\"] = \",\".join(map(to_str, dkim_results))\n\n            writer.writerow(row)\n            csv_file_object.flush()\n\n    return csv_file_object.getvalue()", "docstring": "Converts one or more parsed aggregate reports to flat CSV format, including\nheaders\n\nArgs:\nreports: A parsed aggregate report or list of parsed aggregate reports\n\nReturns:\nstr: Parsed aggregate report data in flat CSV format, including headers", "source": "juraj-google-style"}
{"code": "def _PageThroughPqlSet(self, pql_query, output_function, values):\n    \n    if isinstance(values, dict):\n      values = PQLHelper.GetQueryValuesFromDict(values, self._version)\n\n    pql_service = self._GetPqlService()\n    current_offset = 0\n\n    while True:\n      query_w_limit_offset = '%s LIMIT %d OFFSET %d' % (pql_query,\n                                                        SUGGESTED_PAGE_LIMIT,\n                                                        current_offset)\n      response = pql_service.select({'query': query_w_limit_offset,\n                                     'values': values})\n\n      if 'rows' in response:\n        \n        if current_offset == 0:\n          header = response['columnTypes']\n          output_function([label['labelName'] for label in header])\n\n        entities = response['rows']\n        result_set_size = len(entities)\n\n        for entity in entities:\n          output_function([self._ConvertValueForCsv(value) for value\n                           in entity['values']])\n\n        current_offset += result_set_size\n        if result_set_size != SUGGESTED_PAGE_LIMIT:\n          break\n      else:\n        break", "docstring": "Pages through a pql_query and performs an action (output_function).\n\nArgs:\npql_query: str a statement filter to apply (the query should not include\nthe limit or the offset)\noutput_function: the function to call to output the results (csv or in\nmemory)\nvalues: A dict of python objects or a list of raw SOAP values to bind\nto the pql_query.", "source": "juraj-google-style"}
{"code": "def validate_test_result(result):\n    buckets = [(result.passed, records.TestResultEnums.TEST_RESULT_PASS), (result.failed, records.TestResultEnums.TEST_RESULT_FAIL), (result.error, records.TestResultEnums.TEST_RESULT_ERROR), (result.skipped, records.TestResultEnums.TEST_RESULT_SKIP)]\n    for bucket_list, expected_enum in buckets:\n        for record in bucket_list:\n            if record.result != expected_enum:\n                raise AssertionError('Expected result %s, got %s.' % (expected_enum, record.result))", "docstring": "Validate basic properties of a test result.\n\nThe records in each bucket of the test result should have the corresponding\nresult enum.\n\nArgs:\nresult: The `records.TestResult` object to validate.", "source": "github-repos"}
{"code": "def And(exprs):\n    return simplify_exprs(exprs, _And, FALSE, TRUE)", "docstring": "Create a conjunction or its simplified equivalent.\n\nThis will ensure that, when an _And is returned, none of its immediate\nsubterms is TRUE, FALSE, or another conjunction.\n\nArgs:\nexprs: An iterable. The subterms.\n\nReturns:\nA BooleanTerm.", "source": "github-repos"}
{"code": "async def _populate_fields(self, example: Example, client: GRPCClient):\n    if example.tag.never_run:\n        logging.info('populating example fields from provided files %s', example.filepath)\n        self._populate_from_repo(example)\n    else:\n        await self._populate_from_runner(example, client)", "docstring": "Populate fields of the example reading them from the backend or from the repository.\nArgs:\nexample: beam example that should be verified", "source": "github-repos"}
{"code": "def fts_count(self, fts, inv):\n        \n        return len(list(filter(lambda s: self.fts_match(fts, s), inv)))", "docstring": "Return the count of segments in an inventory matching a given\nfeature mask.\n\nArgs:\nfts (set): feature mask given as a set of (value, feature) tuples\ninv (set): inventory of segments (as Unicode IPA strings)\n\nReturns:\nint: number of segments in `inv` that match feature mask `fts`", "source": "juraj-google-style"}
{"code": "async def has_commit_landed_on_repository(self, context, revision):\n        \n        \n        if not _is_git_full_hash(revision):\n            revision = self.get_tag_hash(tag_name=revision)\n\n        repo = self._github_repository.html_url\n\n        url = '/'.join([repo.rstrip('/'), 'branch_commits', revision])\n        html_data = await retry_request(context, url)\n        html_text = html_data.strip()\n        \n        \n        \n        return html_text != ''", "docstring": "Tell if a commit was landed on the repository or if it just comes from a pull request.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\nrevision (str): the commit hash or the tag name.\n\nReturns:\nbool: True if the commit is present in one of the branches of the main repository", "source": "juraj-google-style"}
{"code": "def add_checkpoint_values_check(object_graph_proto):\n    parents = {}\n    checkpointed_trackables = object_identity.ObjectIdentitySet()\n    checkpointed_trackables = set()\n    for node_id, object_proto in enumerate(object_graph_proto.nodes):\n        if object_proto.attributes or object_proto.slot_variables or object_proto.HasField('registered_saver'):\n            checkpointed_trackables.add(node_id)\n        for child_proto in object_proto.children:\n            child = child_proto.node_id\n            if child not in parents:\n                parents[child] = set()\n            parents[child].add(node_id)\n    to_visit = set()\n    to_visit.update(checkpointed_trackables)\n    while to_visit:\n        trackable = to_visit.pop()\n        if trackable not in parents:\n            continue\n        current_parents = parents.pop(trackable)\n        checkpointed_trackables.update(current_parents)\n        for parent in current_parents:\n            if parent in parents:\n                to_visit.add(parent)\n    for node_id, object_proto in enumerate(object_graph_proto.nodes):\n        object_proto.has_checkpoint_values.value = bool(node_id in checkpointed_trackables)", "docstring": "Determines which objects have checkpoint values and save this to the proto.\n\nArgs:\nobject_graph_proto: A `TrackableObjectGraph` proto.", "source": "github-repos"}
{"code": "def ignore_path(path):\n    ignore = False\n    for name in ['.tox', 'dist', 'build', 'node_modules', 'htmlcov']:\n        if (path.find(name) >= 0):\n            ignore = True\n            break\n    return ignore", "docstring": "Verify whether to ignore a path.\n\nArgs:\npath (str): path to check.\n\nReturns:\nbool: True when to ignore given path.", "source": "codesearchnet"}
{"code": "def is_struct(declaration):\n    \n    if not is_class(declaration):\n        return False\n    decl = class_traits.get_declaration(declaration)\n    return decl.class_type == class_declaration.CLASS_TYPES.STRUCT", "docstring": "Returns True if declaration represents a C++ struct\n\nArgs:\ndeclaration (declaration_t): the declaration to be checked.\n\nReturns:\nbool: True if declaration represents a C++ struct", "source": "juraj-google-style"}
{"code": "def dimension_name(dimension):\n    if isinstance(dimension, Dimension):\n        return dimension.name\n    elif isinstance(dimension, basestring):\n        return dimension\n    elif isinstance(dimension, tuple):\n        return dimension[0]\n    elif isinstance(dimension, dict):\n        return dimension['name']\n    elif (dimension is None):\n        return None\n    else:\n        raise ValueError(('%s type could not be interpreted as Dimension. Dimensions must be declared as a string, tuple, dictionary or Dimension type.' % type(dimension).__name__))", "docstring": "Return the Dimension.name for a dimension-like object.\n\nArgs:\ndimension: Dimension or dimension string, tuple or dict\n\nReturns:\nThe name of the Dimension or what would be the name if the\ninput as converted to a Dimension.", "source": "codesearchnet"}
{"code": "def observe(self, success, failure):\n    if (isinstance(success, int) is False):\n        if (isinstance(success, float) is False):\n            raise TypeError()\n    if (isinstance(failure, int) is False):\n        if (isinstance(failure, float) is False):\n            raise TypeError()\n    if (success <= 0):\n        raise ValueError()\n    if (failure <= 0):\n        raise ValueError()\n    self.__success += success\n    self.__failure += failure", "docstring": "Observation data.\n\nArgs:\nsuccess:      The number of success.\nfailure:      The number of failure.", "source": "codesearchnet"}
{"code": "def Read(self, file_object):\n    \n    file_object.seek(self.last_read, os.SEEK_SET)\n    read_data = file_object.read(self._MAXIMUM_READ_SIZE)\n    self.last_read = file_object.get_offset()\n    compressed_data = b''.join([self._compressed_data, read_data])\n    decompressed, extra_compressed = self._decompressor.Decompress(\n        compressed_data)\n    self._compressed_data = extra_compressed\n    self.uncompressed_offset += len(decompressed)\n    return decompressed", "docstring": "Reads the next uncompressed data from the gzip stream.\n\nArgs:\nfile_object (FileIO): file object that contains the compressed stream.\n\nReturns:\nbytes: next uncompressed data from the compressed stream.", "source": "juraj-google-style"}
{"code": "def imread(path, grayscale=False, size=None, interpolate='bilinear', channel_first=False, as_uint16=False, num_channels=(- 1)):\n    _imread_before(grayscale, num_channels)\n    f = (path if hasattr(path, 'read') else open(path, 'rb'))\n    r = png.Reader(file=f)\n    (width, height, pixels, metadata) = r.asDirect()\n    bit_depth = metadata.get('bitdepth')\n    if (bit_depth not in [8, 16]):\n        raise ValueError('The bit-depth of the image you want to read is unsupported ({}bit).Currently, pypng backend`s imread supports only [8, 16] bit-depth.the path for this image is {}'.format(bit_depth, path))\n    img = read_result_to_ndarray(pixels, width, height, metadata, grayscale, as_uint16, num_channels)\n    return _imread_after(img, size, interpolate, channel_first, imresize)", "docstring": "Read image by pypng module.\n\nArgs:\npath (str or 'file object'): File path or object to read.\ngrayscale (bool):\nsize (tupple of int):\n(width, height).\nIf None, output img shape depends on the files to read.\nchannel_first (bool):\nThis argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).\nDefault value is False, which means the img shape is (height, width, channel).\ninterpolate (str):\nmust be one of [\"nearest\", \"box\", \"bilinear\", \"hamming\", \"bicubic\", \"lanczos\"].\nas_uint16 (bool):\nIf True, this function reads image as uint16.\nnum_channels (int):\nchannel size of output array.\nDefault is -1 which preserves raw image shape.\n\nReturns:\nnumpy.ndarray", "source": "codesearchnet"}
{"code": "def init_grad(obj, allow_lazy_initializer=False):\n  \n  if obj is None:\n    \n    return 0.0\n\n  initializer, supports_lazy_initializer = grad_initializers[type(obj)]\n  if supports_lazy_initializer:\n    if isinstance(obj, ZeroGradient):\n      if allow_lazy_initializer:\n        return ZeroGradient(obj.like)\n      else:\n        \n        return obj.instantiate()\n    else:\n      if allow_lazy_initializer:\n        return ZeroGradient(obj)\n  else:\n    assert not isinstance(obj, ZeroGradient)\n  return initializer(obj)", "docstring": "Initialize the gradient for an object.\n\nArgs:\nobj: The object to initialize the gradient for, can be either a number,\narray, tuple, list, or dictionary.\nallow_lazy_initializer: Whether to allow using the ZeroGradient wrapper,\nfor efficiency.\n\nReturns:\nAn object of the same type, shape, etc. but with all numeric values set to\nzero. If the type is unknown, a zero is returned.", "source": "juraj-google-style"}
{"code": "def load_partition_data(self, index):\n    info = self.partitions[index]\n    data = PartitionData(info)\n    for utt_id in info.utt_ids:\n        utt_data = [c._file[utt_id][:] for c in self.containers]\n        data.utt_data.append(utt_data)\n    return data", "docstring": "Load and return the partition with the given index.\n\nArgs:\nindex (int): The index of partition, that refers to the index in ``self.partitions``.\n\nReturns:\nPartitionData: A PartitionData object containing the data for the partition with the given index.", "source": "codesearchnet"}
{"code": "def set_conf_str(conf, optstrs):\n    \n    falsy = ['0', 'no', 'n', 'off', 'false', 'f']\n    bool_actions = ['store_true', 'store_false', internal.Switch]\n    for optstr in optstrs:\n        opt, val = optstr.split('=', 1)\n        sec, opt = opt.split('.', 1)\n        if sec not in conf:\n            raise error.SectionError(sec)\n        if opt not in conf[sec]:\n            raise error.OptionError(opt)\n        meta = conf[sec].def_[opt]\n        if meta.default is None:\n            if 'type' in meta.cmd_kwargs:\n                cast = meta.cmd_kwargs['type']\n            else:\n                act = meta.cmd_kwargs.get('action')\n                cast = bool if act in bool_actions else str\n        else:\n            cast = type(meta.default)\n        if cast is bool and val.lower() in falsy:\n            val = ''\n        conf[sec][opt] = cast(val)", "docstring": "Set options from a list of section.option=value string.\n\nArgs:\nconf (:class:`~loam.manager.ConfigurationManager`): the conf to update.\noptstrs (list of str): the list of 'section.option=value' formatted\nstring.", "source": "juraj-google-style"}
{"code": "def _RawGlobPathSpecWithAlphabeticalSchema(file_system, parent_path_spec, segment_format, location, segment_length, upper_case=False):\n    segment_number = 0\n    segment_files = []\n    while True:\n        segment_index = segment_number\n        segment_letters = []\n        while (len(segment_letters) < segment_length):\n            (segment_index, remainder) = divmod(segment_index, 26)\n            if upper_case:\n                segment_letters.append(chr((ord('A') + remainder)))\n            else:\n                segment_letters.append(chr((ord('a') + remainder)))\n        segment_letters = ''.join(segment_letters[::(- 1)])\n        segment_location = segment_format.format(location, segment_letters)\n        kwargs = path_spec_factory.Factory.GetProperties(parent_path_spec)\n        kwargs['location'] = segment_location\n        if (parent_path_spec.parent is not None):\n            kwargs['parent'] = parent_path_spec.parent\n        segment_path_spec = path_spec_factory.Factory.NewPathSpec(parent_path_spec.type_indicator, **kwargs)\n        if (not file_system.FileEntryExistsByPathSpec(segment_path_spec)):\n            break\n        segment_files.append(segment_path_spec)\n        segment_number += 1\n    return segment_files", "docstring": "Globs for path specifications according to an alphabetical naming schema.\n\nArgs:\nfile_system (FileSystem): file system.\nparent_path_spec (PathSpec): parent path specification.\nsegment_format (str): naming schema of the segment file location.\nlocation (str): the base segment file location string.\nsegment_length (int): length (number of characters) of the segment\nindicator.\nupper_case (Optional[bool]): True if the segment name is in upper case.\n\nReturns:\nlist[PathSpec]: path specifications that match the glob.", "source": "codesearchnet"}
{"code": "def Open(self, hostname, port):\n    server_url = 'http:\n    try:\n        self._xmlrpc_proxy = xmlrpclib.ServerProxy(server_url, allow_none=True)\n    except SocketServer.socket.error as exception:\n        logger.warning('Unable to connect to RPC server on {0:s}:{1:d} with error: {2!s}'.format(hostname, port, exception))\n        return False\n    return True", "docstring": "Opens a RPC communication channel to the server.\n\nArgs:\nhostname (str): hostname or IP address to connect to for requests.\nport (int): port to connect to for requests.\n\nReturns:\nbool: True if the communication channel was established.", "source": "codesearchnet"}
{"code": "def extract(self, html_text: str, strategy: Strategy=Strategy.ALL_TEXT) -> List[Extraction]:\n    if html_text:\n        if (strategy == Strategy.ALL_TEXT):\n            soup = BeautifulSoup(html_text, 'html.parser')\n            texts = soup.findAll(text=True)\n            visible_texts = filter(self._tag_visible, texts)\n            all_text = u' '.join((t.strip() for t in visible_texts))\n            return [Extraction(all_text, self.name)]\n        else:\n            relax = (strategy == Strategy.MAIN_CONTENT_RELAXED)\n            readable = Document(html_text, recallPriority=relax).summary(html_partial=False)\n            clean_text = BeautifulSoup(readable.encode('utf-8'), 'lxml').strings\n            readability_text = ' '.join(clean_text)\n            return [Extraction(readability_text, self.name)]\n    else:\n        return []", "docstring": "Extracts text from an HTML page using a variety of strategies\n\nArgs:\nhtml_text (str): html page in string\nstrategy (enum[Strategy.ALL_TEXT, Strategy.MAIN_CONTENT_RELAXED, Strategy.MAIN_CONTENT_STRICT]): one of\nStrategy.ALL_TEXT, Strategy.MAIN_CONTENT_STRICT and Strategy.MAIN_CONTENT_RELAXED\n\nReturns:\nList[Extraction]: typically a singleton list with the extracted text", "source": "codesearchnet"}
{"code": "def copy_docstring(source_class):\n    \n    def decorator(method):\n        \n        if method.__doc__:\n            raise ValueError('Method already has a docstring.')\n\n        source_method = getattr(source_class, method.__name__)\n        method.__doc__ = source_method.__doc__\n\n        return method\n    return decorator", "docstring": "Decorator that copies a method's docstring from another class.\n\nArgs:\nsource_class (type): The class that has the documented method.\n\nReturns:\nCallable: A decorator that will copy the docstring of the same\nnamed method in the source class to the decorated method.", "source": "juraj-google-style"}
{"code": "def save_source(driver, name):\n    \n    source = driver.page_source\n    file_name = os.path.join(os.environ.get('SAVED_SOURCE_DIR'),\n                             '{name}.html'.format(name=name))\n\n    try:\n        with open(file_name, 'wb') as output_file:\n            output_file.write(source.encode('utf-8'))\n    except Exception:  \n        msg = u\"Could not save the browser page source to {}.\".format(file_name)\n        LOGGER.warning(msg)", "docstring": "Save the rendered HTML of the browser.\n\nThe location of the source can be configured\nby the environment variable `SAVED_SOURCE_DIR`.  If not set,\nthis defaults to the current working directory.\n\nArgs:\ndriver (selenium.webdriver): The Selenium-controlled browser.\nname (str): A name to use in the output file name.\nNote that \".html\" is appended automatically\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def steps(self, goal):\n    path = self.path(goal)\n    for i in range((len(path) - 1)):\n        (yield (path[i], path[(i + 1)]))", "docstring": "Get the list of individual relations leading to the targeted node\n\nArgs:\ngoal (str): Name of the targeted node\nReturn:\nlist of tuple of Node", "source": "codesearchnet"}
{"code": "def pseudo_with_symbol(self, symbol, allow_multi=False):\n        \n        pseudos = self.select_symbols(symbol, ret_list=True)\n        if not pseudos or (len(pseudos) > 1 and not allow_multi):\n            raise ValueError(\"Found %d occurrences of symbol %s\" % (len(pseudos), symbol))\n\n        if not allow_multi:\n            return pseudos[0]\n        else:\n            return pseudos", "docstring": "Return the pseudo with the given chemical symbol.\n\nArgs:\nsymbols: String with the chemical symbol of the element\nallow_multi: By default, the method raises ValueError\nif multiple occurrences are found. Use allow_multi to prevent this.\n\nRaises:\nValueError if symbol is not found or multiple occurences are present and not allow_multi", "source": "juraj-google-style"}
{"code": "def eval_single(self, key, data, data_store):\n    if (key in self):\n        value = self[key]\n        if ((value is not None) and callable(value)):\n            return value(data, data_store)\n        else:\n            return value\n    else:\n        raise AttributeError()", "docstring": "Evaluate the value of a single parameter taking into account callables .\n\nNative types are not touched and simply returned, while callable methods are\nexecuted and their return value is returned.\n\nArgs:\nkey (str): The name of the parameter that should be evaluated.\ndata (MultiTaskData): The data object that has been passed from the\npredecessor task.\ndata_store (DataStore): The persistent data store object that allows the task\nto store data for access across the current workflow\nrun.", "source": "codesearchnet"}
{"code": "def ion_or_solid_comp_object(formula):\n    \n    m = re.search(r\"\\[([^\\[\\]]+)\\]|\\(aq\\)\", formula)\n    if m:\n        comp_obj = Ion.from_formula(formula)\n    elif re.search(r\"\\(s\\)\", formula):\n        comp_obj = Composition(formula[:-3])\n    else:\n        comp_obj = Composition(formula)\n    return comp_obj", "docstring": "Returns either an ion object or composition object given\na formula.\n\nArgs:\nformula: String formula. Eg. of ion: NaOH(aq), Na[+];\nEg. of solid: Fe2O3(s), Fe(s), Na2O\n\nReturns:\nComposition/Ion object", "source": "juraj-google-style"}
{"code": "def _check_params(window_length, dtype):\n    if not dtype.is_floating:\n        raise ValueError('dtype must be a floating point type. Found %s' % dtype)\n    window_length = ops.convert_to_tensor(window_length, dtype=dtypes.int32)\n    window_length.shape.assert_has_rank(0)\n    return window_length", "docstring": "Check window_length and dtype params.\n\nArgs:\nwindow_length: A scalar value or `Tensor`.\ndtype: The data type to produce. Must be a floating point type.\n\nReturns:\nwindow_length converted to a tensor of type int32.\n\nRaises:\nValueError: If `dtype` is not a floating point type or window_length is not\na scalar.", "source": "github-repos"}
{"code": "def html_job_status(job_name, job_type, refresh_interval, html_on_running, html_on_success):\n  \n  _HTML_TEMPLATE = \n  div_id = _html.Html.next_id()\n  return IPython.core.display.HTML(_HTML_TEMPLATE % (div_id, div_id, job_name, job_type,\n                                   refresh_interval, html_on_running, html_on_success))", "docstring": "create html representation of status of a job (long running operation).\n\nArgs:\njob_name: the full name of the job.\njob_type: type of job. Can be 'local' or 'cloud'.\nrefresh_interval: how often should the client refresh status.\nhtml_on_running: additional html that the job view needs to include on job running.\nhtml_on_success: additional html that the job view needs to include on job success.", "source": "juraj-google-style"}
{"code": "def _time_step(time, output_ta_t, state):\n    if in_graph_mode:\n        input_t = tuple((ta.read(time) for ta in input_ta))\n        for input_, shape in zip(input_t, inputs_got_shape):\n            input_.set_shape(shape[1:])\n    else:\n        input_t = tuple((ta[time.numpy()] for ta in input_ta))\n    input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)\n    call_cell = lambda: cell(input_t, state)\n    if sequence_length is not None:\n        output, new_state = _rnn_step(time=time, sequence_length=sequence_length, min_sequence_length=min_sequence_length, max_sequence_length=max_sequence_length, zero_output=zero_output, state=state, call_cell=call_cell, state_size=state_size, skip_conditionals=True)\n    else:\n        output, new_state = call_cell()\n    output = nest.flatten(output)\n    if in_graph_mode:\n        output_ta_t = tuple((ta.write(time, out) for ta, out in zip(output_ta_t, output)))\n    else:\n        for ta, out in zip(output_ta_t, output):\n            ta[time.numpy()] = out\n    return (time + 1, output_ta_t, new_state)", "docstring": "Take a time step of the dynamic RNN.\n\nArgs:\ntime: int32 scalar Tensor.\noutput_ta_t: List of `TensorArray`s that represent the output.\nstate: nested tuple of vector tensors that represent the state.\n\nReturns:\nThe tuple (time + 1, output_ta_t with updated flow, new_state).", "source": "github-repos"}
{"code": "def patch_request(self, id_or_uri, body, timeout=-1, custom_headers=None):\n        \n        uri = self.build_uri(id_or_uri)\n\n        logger.debug('Patch resource (uri = %s, data = %s)' % (uri, body))\n\n        custom_headers_copy = custom_headers.copy() if custom_headers else {}\n        if self._connection._apiVersion >= 300 and 'Content-Type' not in custom_headers_copy:\n            custom_headers_copy['Content-Type'] = 'application/json-patch+json'\n\n        task, entity = self._connection.patch(uri, body, custom_headers=custom_headers_copy)\n\n        if not task:\n            return entity\n\n        return self._task_monitor.wait_for_task(task, timeout)", "docstring": "Uses the PATCH to update a resource.\n\nOnly one operation can be performed in each PATCH call.\n\nArgs:\nid_or_uri: Can be either the resource ID or the resource URI.\nbody: Patch request body\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\nUpdated resource.", "source": "juraj-google-style"}
{"code": "def annotate_source(dump, source_file_path, do_dumped_tensors=False, file_stack_top=False, min_line=None, max_line=None):\n    py_graph = dump.python_graph\n    if not py_graph:\n        raise ValueError('Cannot perform source annotation due to a lack of set Python graph in the dump object')\n    source_file_path = _norm_abs_path(source_file_path)\n    line_to_op_names = {}\n    for op in py_graph.get_operations():\n        for file_path, line_number, _, _ in reversed(dump.node_traceback(op.name)):\n            if min_line is not None and line_number < min_line or (max_line is not None and line_number >= max_line):\n                continue\n            if _norm_abs_path(file_path) != source_file_path:\n                continue\n            if do_dumped_tensors:\n                watch_keys = dump.debug_watch_keys(op.name)\n                items_to_append = list(set(map(_convert_watch_key_to_tensor_name, watch_keys)))\n            else:\n                items_to_append = [op.name]\n            if line_number in line_to_op_names:\n                line_to_op_names[line_number].extend(items_to_append)\n            else:\n                line_to_op_names[line_number] = items_to_append\n            if file_stack_top:\n                break\n    return line_to_op_names", "docstring": "Annotate a Python source file with a list of ops created at each line.\n\n(The annotation doesn't change the source file itself.)\n\nArgs:\ndump: (`DebugDumpDir`) A `DebugDumpDir` object of which the Python graph\nhas been loaded.\nsource_file_path: (`str`) Path to the source file being annotated.\ndo_dumped_tensors: (`str`) Whether dumped Tensors, instead of ops are to be\nused to annotate the source file.\nfile_stack_top: (`bool`) Whether only the top stack trace in the\nspecified source file is to be annotated.\nmin_line: (`None` or `int`) The 1-based line to start annotate the source\nfile from (inclusive).\nmax_line: (`None` or `int`) The 1-based line number to end the annotation\nat (exclusive).\n\nReturns:\nA `dict` mapping 1-based line number to a list of op name(s) created at\nthat line, or tensor names if `do_dumped_tensors` is True.\n\nRaises:\nValueError: If the dump object does not have a Python graph set.", "source": "github-repos"}
{"code": "def __item_descriptor(self, config):\n    descriptor = {'kind': 'discovery\n    description = config.get('description')\n    root_url = config.get('root')\n    name = config.get('name')\n    version = config.get('api_version')\n    relative_path = '/apis/{0}/{1}/rest'.format(name, version)\n    if description:\n        descriptor['description'] = description\n    descriptor['name'] = name\n    descriptor['version'] = version\n    descriptor['discoveryLink'] = '.{0}'.format(relative_path)\n    root_url_port = urlparse.urlparse(root_url).port\n    original_path = self.__request.reconstruct_full_url(port_override=root_url_port)\n    descriptor['discoveryRestUrl'] = '{0}/{1}/{2}/rest'.format(original_path, name, version)\n    if (name and version):\n        descriptor['id'] = '{0}:{1}'.format(name, version)\n    return descriptor", "docstring": "Builds an item descriptor for a service configuration.\n\nArgs:\nconfig: A dictionary containing the service configuration to describe.\n\nReturns:\nA dictionary that describes the service configuration.", "source": "codesearchnet"}
{"code": "def match_pattern(expr_or_pattern: object, expr: object) -> MatchDict:\n    try:\n        return expr_or_pattern.match(expr)\n    except AttributeError:\n        if (expr_or_pattern == expr):\n            return MatchDict()\n        else:\n            res = MatchDict()\n            res.success = False\n            res.reason = (\"Expressions '%s' and '%s' are not the same\" % (repr(expr_or_pattern), repr(expr)))\n            return res", "docstring": "Recursively match `expr` with the given `expr_or_pattern`\n\nArgs:\nexpr_or_pattern: either a direct expression (equal to `expr` for a\nsuccessful match), or an instance of :class:`Pattern`.\nexpr: the expression to be matched", "source": "codesearchnet"}
{"code": "def pop_parameter(key):\n    names = key.split('/')\n    if (len(names) > 1):\n        with parameter_scope(names[0]):\n            return pop_parameter('/'.join(names[1:]))\n    global current_scope\n    param = current_scope.get(key, None)\n    if (param is not None):\n        del current_scope[key]\n    return param", "docstring": "Remove and get parameter by key.\n\nArgs:\nkey(str): Key of parameter.\n\nReturns: ~nnabla.Variable\nParameter if key found, otherwise None.", "source": "codesearchnet"}
{"code": "def RetrieveAsset(logdir, plugin_name, asset_name):\n  \n\n  asset_path = os.path.join(PluginDirectory(logdir, plugin_name), asset_name)\n  try:\n    with tf.io.gfile.GFile(asset_path, \"r\") as f:\n      return f.read()\n  except tf.errors.NotFoundError:\n    raise KeyError(\"Asset path %s not found\" % asset_path)\n  except tf.errors.OpError as e:\n    raise KeyError(\"Couldn't read asset path: %s, OpError %s\" % (asset_path, e))", "docstring": "Retrieve a particular plugin asset from a logdir.\n\nArgs:\nlogdir: A directory that was created by a TensorFlow summary.FileWriter.\nplugin_name: The plugin we want an asset from.\nasset_name: The name of the requested asset.\n\nReturns:\nstring contents of the plugin asset.\n\nRaises:\nKeyError: if the asset does not exist.", "source": "juraj-google-style"}
{"code": "def directed_tripartition_indices(N):\n    result = []\n    if (N <= 0):\n        return result\n    base = [0, 1, 2]\n    for key in product(base, repeat=N):\n        part = [[], [], []]\n        for (i, location) in enumerate(key):\n            part[location].append(i)\n        result.append(tuple((tuple(p) for p in part)))\n    return result", "docstring": "Return indices for directed tripartitions of a sequence.\n\nArgs:\nN (int): The length of the sequence.\n\nReturns:\nlist[tuple]: A list of tuples containing the indices for each\npartition.\n\nExample:\n>>> N = 1\n>>> directed_tripartition_indices(N)\n[((0,), (), ()), ((), (0,), ()), ((), (), (0,))]", "source": "codesearchnet"}
{"code": "def _PrunedDenseMatrixMultiplication(a, b, indices, transpose_a=False, adjoint_a=False, transpose_b=False, adjoint_b=False):\n    transpose_a = transpose_a or adjoint_a\n    transpose_b = transpose_b or adjoint_b\n    a = math_ops.conj(a) if adjoint_a else a\n    b = math_ops.conj(b) if adjoint_b else b\n    rank = len(a.shape)\n    dense_shape = (a.shape[-1] if transpose_a else a.shape[-2], b.shape[-2] if transpose_b else b.shape[-1])\n    if rank == 2:\n        rows = indices[:, 0]\n        cols = indices[:, 1]\n        transpose = array_ops.transpose\n        gather_op = array_ops.gather\n    elif rank == 3:\n        dense_shape = (a.shape[0],) + dense_shape\n        rows = indices[:, :2]\n        cols = array_ops_stack.stack([indices[:, 0], indices[:, 2]], axis=1)\n        transpose = lambda x: array_ops.transpose(x, perm=[0, 2, 1])\n        gather_op = array_ops.gather_nd\n    a_rows = gather_op(transpose(a) if transpose_a else a, indices=rows)\n    b_cols = gather_op(b if transpose_b else transpose(b), indices=cols)\n    values = math_ops.reduce_sum(a_rows * b_cols, axis=1)\n    return sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(indices=indices, values=values, dense_shape=dense_shape)", "docstring": "Multiplies two dense matrices at selected indices.\n\nThe two inputs `a` and `b` must have matching rank (2 or 3). If using rank 3,\nthe first rank is used for the batch number. The last two dimensions should\nalso be compatible for matrix multiplication.\n\nTODO(tabakg): Consider C++ implementation. There is also a more efficient way\nto handle transposes here.\n\nArgs:\na: The left dense matrix (or batched matrices).\nb: The right dense matrix (or batched matrices).\nindices: The selected output indices where values should be produced. Other\nindices will be pruned (not computed in the first place). Indices are\nspecified as a tensor of shape (length, rank), where length is the number\nof entries and rank is the rank of the dense inputs (2 or 3).\ntranspose_a: Whether to transpose a.\nadjoint_a: Whether to take the conjugate transpose of a.\ntranspose_b: Whether to transpose b.\nadjoint_b: Whether to take the conjugate transpose of b.\n\nReturns:\nA CSR matrix.", "source": "github-repos"}
{"code": "def make_data(self, message):\n    if (not isinstance(message, Message)):\n        return message\n    return message.export(self.transport_content_type)", "docstring": "make data string from message according to transport_content_type\n\nReturns:\n\nstr: message data", "source": "codesearchnet"}
{"code": "def download(self,\n                 task,\n                 default_ext,\n                 timeout=5,\n                 max_retry=3,\n                 overwrite=False,\n                 **kwargs):\n        \n        file_url = task['file_url']\n        task['success'] = False\n        task['filename'] = None\n        retry = max_retry\n\n        if not overwrite:\n            with self.lock:\n                self.fetched_num += 1\n                filename = self.get_filename(task, default_ext)\n                if self.storage.exists(filename):\n                    self.logger.info('skip downloading file %s', filename)\n                    return\n                self.fetched_num -= 1\n\n        while retry > 0 and not self.signal.get('reach_max_num'):\n            try:\n                response = self.session.get(file_url, timeout=timeout)\n            except Exception as e:\n                self.logger.error('Exception caught when downloading file %s, '\n                                  'error: %s, remaining retry times: %d',\n                                  file_url, e, retry - 1)\n            else:\n                if self.reach_max_num():\n                    self.signal.set(reach_max_num=True)\n                    break\n                elif response.status_code != 200:\n                    self.logger.error('Response status code %d, file %s',\n                                      response.status_code, file_url)\n                    break\n                elif not self.keep_file(task, response, **kwargs):\n                    break\n                with self.lock:\n                    self.fetched_num += 1\n                    filename = self.get_filename(task, default_ext)\n                self.logger.info('image \n                self.storage.write(filename, response.content)\n                task['success'] = True\n                task['filename'] = filename\n                break\n            finally:\n                retry -= 1", "docstring": "Download the image and save it to the corresponding path.\n\nArgs:\ntask (dict): The task dict got from ``task_queue``.\ntimeout (int): Timeout of making requests for downloading images.\nmax_retry (int): the max retry times if the request fails.\n**kwargs: reserved arguments for overriding.", "source": "juraj-google-style"}
{"code": "def _execute(self, connection, query, fetch=True):\n        \n        cursor = connection.cursor()\n\n        try:\n            cursor.execute(query)\n        except Exception as e:\n            from ambry.mprlib.exceptions import BadSQLError\n            raise BadSQLError(\"Failed to execute query: {}; {}\".format(query, e))\n\n        if fetch:\n            return cursor.fetchall()\n        else:\n            return cursor", "docstring": "Executes given query using given connection.\n\nArgs:\nconnection (apsw.Connection): connection to the sqlite db who stores mpr data.\nquery (str): sql query\nfetch (boolean, optional): if True, fetch query result and return it. If False, do not fetch.\n\nReturns:\niterable with query result.", "source": "juraj-google-style"}
{"code": "def from_rfc3339_nanos(value):\n    \n    with_nanos = _RFC3339_NANOS.match(value)\n\n    if with_nanos is None:\n        raise ValueError(\n            \"Timestamp: {!r}, does not match pattern: {!r}\".format(\n                value, _RFC3339_NANOS.pattern\n            )\n        )\n\n    bare_seconds = datetime.datetime.strptime(\n        with_nanos.group(\"no_fraction\"), _RFC3339_NO_FRACTION\n    )\n    fraction = with_nanos.group(\"nanos\")\n\n    if fraction is None:\n        micros = 0\n    else:\n        scale = 9 - len(fraction)\n        nanos = int(fraction) * (10 ** scale)\n        micros = nanos \n\n    return bare_seconds.replace(microsecond=micros, tzinfo=pytz.utc)", "docstring": "Convert a nanosecond-precision timestamp to a native datetime.\n\n.. note::\nPython datetimes do not support nanosecond precision; this function\ntherefore truncates such values to microseconds.\n\nArgs:\nvalue (str): The RFC3339 string to convert.\n\nReturns:\ndatetime.datetime: The datetime object equivalent to the timestamp in\nUTC.\n\nRaises:\nValueError: If the timestamp does not match the RFC 3339\nregular expression.", "source": "juraj-google-style"}
{"code": "def send_log_message(self, message: LogMessage) -> None:\n    pass", "docstring": "Sends a log message to be handled.\n\nArgs:\n* message: LogMessage dictionary\n\nReturns:\n* None", "source": "github-repos"}
{"code": "def setNetworkName(self, networkName='GRL'):\n        \n        print '%s call setNetworkName' % self.port\n        print networkName\n        try:\n            cmd = 'networkname %s' % networkName\n            datasetCmd = 'dataset networkname %s' % networkName\n            self.hasActiveDatasetToCommit = True\n            return self.__sendCommand(cmd)[0] == 'Done' and self.__sendCommand(datasetCmd)[0] == 'Done'\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger(\"setNetworkName() Error: \" + str(e))", "docstring": "set Thread Network name\n\nArgs:\nnetworkName: the networkname string to be set\n\nReturns:\nTrue: successful to set the Thread Networkname\nFalse: fail to set the Thread Networkname", "source": "juraj-google-style"}
{"code": "def save_q_df(self, state_key, action_key, q_value):\n        \n        if isinstance(q_value, float) is False:\n            raise TypeError(\"The type of q_value must be float.\")\n\n        new_q_df = pd.DataFrame([(state_key, action_key, q_value)], columns=[\"state_key\", \"action_key\", \"q_value\"])\n        if self.q_df is not None:\n            self.q_df = pd.concat([new_q_df, self.q_df])\n            self.q_df = self.q_df.drop_duplicates([\"state_key\", \"action_key\"])\n        else:\n            self.q_df = new_q_df", "docstring": "Insert or update Q-Value in `self.q_df`.\n\nArgs:\nstate_key:      State.\naction_key:     Action.\nq_value:        Q-Value.\n\nExceptions:\nTypeError:      If the type of `q_value` is not float.", "source": "juraj-google-style"}
{"code": "def word_list(sowpods=False, start='', end=''):\n    location = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'wordlists')\n    if sowpods:\n        filename = 'sowpods.txt'\n    else:\n        filename = 'twl.txt'\n    filepath = os.path.join(location, filename)\n    with open(filepath) as wordfile:\n        for word in wordfile.readlines():\n            word = word.strip()\n            if (start and end and word.startswith(start) and word.endswith(end)):\n                (yield word)\n            elif (start and word.startswith(start) and (not end)):\n                (yield word)\n            elif (end and word.endswith(end) and (not start)):\n                (yield word)\n            elif ((not start) and (not end)):\n                (yield word)", "docstring": "Opens the word list file.\n\nArgs:\nsowpods: a boolean to declare using the sowpods list or TWL (default)\nstart: a string of starting characters to find anagrams based on\nend: a string of ending characters to find anagrams based on\n\nYeilds:\na word at a time out of 178691 words for TWL, 267751 for sowpods. Much\nless if either start or end are used (filtering is applied here)", "source": "codesearchnet"}
{"code": "def _RunIpRoute(self, args=None, options=None):\n    args = (args or [])\n    options = (options or {})\n    command = ['ip', 'route']\n    command.extend(args)\n    for item in options.items():\n        command.extend(item)\n    try:\n        process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n        (stdout, stderr) = process.communicate()\n    except OSError as e:\n        self.logger.warning('Exception running %s. %s.', command, str(e))\n    else:\n        if process.returncode:\n            message = 'Non-zero exit status running %s. %s.'\n            self.logger.warning(message, command, stderr.strip())\n        else:\n            return stdout.decode('utf-8', 'replace')\n    return ''", "docstring": "Run a command with ip route and return the response.\n\nArgs:\nargs: list, the string ip route command args to execute.\noptions: dict, the string parameters to append to the ip route command.\n\nReturns:\nstring, the standard output from the ip route command execution.", "source": "codesearchnet"}
{"code": "def Close(self):\n    if (not self._connection):\n        raise RuntimeError('Cannot close database not opened.')\n    self._connection.commit()\n    self._connection.close()\n    self._connection = None\n    self._cursor = None\n    self.filename = None\n    self.read_only = None", "docstring": "Closes the database file.\n\nRaises:\nRuntimeError: if the database is not opened.", "source": "codesearchnet"}
{"code": "def find_all_sift(im_source, im_search, min_match_count=4, maxcnt=0):\n    \n    sift = _sift_instance()\n    flann = cv2.FlannBasedMatcher({'algorithm': FLANN_INDEX_KDTREE, 'trees': 5}, dict(checks=50))\n\n    kp_sch, des_sch = sift.detectAndCompute(im_search, None)\n    if len(kp_sch) < min_match_count:\n        return None\n\n    kp_src, des_src = sift.detectAndCompute(im_source, None)\n    if len(kp_src) < min_match_count:\n        return None\n\n    h, w = im_search.shape[1:]\n\n    result = []\n    while True:\n        \n        matches = flann.knnMatch(des_sch, des_src, k=2)\n        good = []\n        for m, n in matches:\n            \n            if m.distance < 0.9 * n.distance:\n                good.append(m)\n\n        if len(good) < min_match_count:\n            break\n\n        sch_pts = np.float32([kp_sch[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n        img_pts = np.float32([kp_src[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) \n\n        \n        M, mask = cv2.findHomography(sch_pts, img_pts, cv2.RANSAC, 5.0)\n        matches_mask = mask.ravel().tolist()\n\n        \n        h, w = im_search.shape[:2]\n        pts = np.float32([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]).reshape(-1, 1, 2)\n        dst = cv2.perspectiveTransform(pts, M)\n\n        \n        \n        pypts = []\n        for npt in dst.astype(int).tolist():\n            pypts.append(tuple(npt[0]))\n\n        lt, br = pypts[0], pypts[2]\n        middle_point = (lt[0] + br[0]) / 2, (lt[1] + br[1]) / 2\n\n        result.append(dict(\n            result=middle_point,\n            rectangle=pypts,\n            confidence=(matches_mask.count(1), len(good)) \n        ))\n\n        if maxcnt and len(result) >= maxcnt:\n            break\n        \n        \n        qindexes, tindexes = [], []\n        for m in good:\n            qindexes.append(m.queryIdx) \n            tindexes.append(m.trainIdx) \n\n        def filter_index(indexes, arr):\n            r = np.ndarray(0, np.float32)\n            for i, item in enumerate(arr):\n                if i not in qindexes:\n                    r = np.append(r, item)\n            return r\n        kp_src = filter_index(tindexes, kp_src)\n        des_src = filter_index(tindexes, des_src)\n\n    return result", "docstring": "使用sift算法进行多个相同元素的查找\nArgs:\nim_source(string): 图像、素材\nim_search(string): 需要查找的图片\nthreshold: 阈值，当相识度小于该阈值的时候，就忽略掉\nmaxcnt: 限制匹配的数量\n\nReturns:\nA tuple of found [(point, rectangle), ...]\nA tuple of found [{\"point\": point, \"rectangle\": rectangle, \"confidence\": 0.76}, ...]\nrectangle is a 4 points list", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.ListGroupStats = channel.unary_unary(\n            \"/google.devtools.clouderrorreporting.v1beta1.ErrorStatsService/ListGroupStats\",\n            request_serializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_error__stats__service__pb2.ListGroupStatsRequest.SerializeToString,\n            response_deserializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_error__stats__service__pb2.ListGroupStatsResponse.FromString,\n        )\n        self.ListEvents = channel.unary_unary(\n            \"/google.devtools.clouderrorreporting.v1beta1.ErrorStatsService/ListEvents\",\n            request_serializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_error__stats__service__pb2.ListEventsRequest.SerializeToString,\n            response_deserializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_error__stats__service__pb2.ListEventsResponse.FromString,\n        )\n        self.DeleteEvents = channel.unary_unary(\n            \"/google.devtools.clouderrorreporting.v1beta1.ErrorStatsService/DeleteEvents\",\n            request_serializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_error__stats__service__pb2.DeleteEventsRequest.SerializeToString,\n            response_deserializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_error__stats__service__pb2.DeleteEventsResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def __init__(self, package_name, version_range=None, paths=None, verbose=False):\n        \n        self.package = None\n        self._verbose = verbose\n        self._sections = []\n\n        \n        package = None\n        it = iter_packages(package_name, range_=version_range)\n        packages = sorted(it, key=lambda x: x.version, reverse=True)\n        for package_ in packages:\n            if self._verbose:\n                print \"searching for help in %s...\" % package_.uri\n            if package_.help:\n                package = package_\n                break\n\n        if package:\n            help_ = package.help\n            if isinstance(help_, basestring):\n                sections = [[\"Help\", help_]]\n            elif isinstance(help_, list):\n                sections = help_\n            if self._verbose:\n                print \"found %d help entries in %s.\" % (len(sections), package.uri)\n\n            \n            if package.num_variants == 0:\n                base = package.base\n                root = base\n            else:\n                variant = package.get_variant(0)\n                base = variant.base\n                root = variant.root\n\n            formatter = scoped_formatter(\n                base=base,\n                root=root,\n                config=config,\n                version=VersionBinding(package.version),\n                system=system)\n\n            \n            for section in sections:\n                uri = section[1]\n                uri = convert_old_command_expansions(uri)\n                uri = uri.replace(\"$BROWSER\", \"\").strip()\n                uri = formatter.format(uri)\n                section[1] = uri\n\n            self.package = package\n            self._sections = sections", "docstring": "Create a PackageHelp object.\n\nArgs:\npackage_name (str): Package to search.\nversion_range (`VersionRange`): Versions to search.", "source": "juraj-google-style"}
{"code": "def offset(self, mjd, new_scale, eop):\n        \n\n        delta = 0\n        for one, two in self.steps(new_scale):\n            one = one.name.lower()\n            two = two.name.lower()\n            \n            oper = \"_scale_{}_minus_{}\".format(two, one)\n            \n            roper = \"_scale_{}_minus_{}\".format(one, two)\n            if hasattr(self, oper):\n                delta += getattr(self, oper)(mjd, eop)\n            elif hasattr(self, roper):\n                delta -= getattr(self, roper)(mjd, eop)\n            else:  \n                raise DateError(\"Unknown convertion {} => {}\".format(one, two))\n\n        return delta", "docstring": "Compute the offset necessary in order to convert from one time-scale to another\n\nArgs:\nmjd (float):\nnew_scale (str): Name of the desired scale\nReturn:\nfloat: offset to apply in seconds", "source": "juraj-google-style"}
{"code": "def make_slices(self, tf_tensor, tensor_shape):\n    \n    tensor_layout = self.tensor_layout(tensor_shape)\n    slice_shape = self.slice_shape(tensor_shape)\n    def my_fn(pnum):\n      if tensor_layout.is_fully_replicated:\n        return tf_tensor\n      else:\n        slice_begin = self.slice_begin(tensor_shape, pnum)\n        return tf.slice(tf_tensor, slice_begin, slice_shape)\n\n    return parallel([tf_tensor.device] * self.size, my_fn,\n                    list(xrange(self.size)))", "docstring": "Turns a single tf.Tensor into a list of slices, one for each processor.\n\nArgs:\ntf_tensor: tf.Tensor.\ntensor_shape: Shape.\n\nReturns:\nlist of tf.tensor with length self.size.", "source": "juraj-google-style"}
{"code": "def _has_old_request_ended(self, shard_state):\n    \n    assert shard_state.slice_start_time is not None\n    assert shard_state.slice_request_id is not None\n    request_ids = [shard_state.slice_request_id]\n    logs = None\n    try:\n      logs = list(logservice.fetch(request_ids=request_ids))\n    except (apiproxy_errors.FeatureNotEnabledError,\n        apiproxy_errors.CapabilityDisabledError) as e:\n      \n      \n      logging.warning(\"Ignoring exception: %s\", e)\n\n    if not logs or not logs[0].finished:\n      return False\n    return True", "docstring": "Whether previous slice retry has ended according to Logs API.\n\nArgs:\nshard_state: shard state.\n\nReturns:\nTrue if the request of previous slice retry has ended. False if it has\nnot or unknown.", "source": "juraj-google-style"}
{"code": "def _take_screenshot(self):\n        \n        raw_png = self._wda.screenshot()\n        img = Image.open(BytesIO(raw_png))\n        return img", "docstring": "Take a screenshot, also called by Mixin\nArgs:\n- filename(string): file name to save\n\nReturns:\nPIL Image object", "source": "juraj-google-style"}
{"code": "def symmetric_difference(self, other):\n        \n        operation = bool.__xor__\n        self.cross_product(other, operation)\n        return  self", "docstring": "Constructs an unminimized DFA recognizing\nthe symmetric difference of the languages of two given DFAs.\nArgs:\nother (DFA): The other DFA that will be used\nfor the symmetric difference operation\nReturns:\nDFA: The resulting DFA", "source": "juraj-google-style"}
{"code": "def backup_value(self, value, up_to):\n        \n        self.N += 1\n        self.W += value\n        if self.parent is None or self is up_to:\n            return\n        self.parent.backup_value(value, up_to)", "docstring": "Propagates a value estimation up to the root node.\n\nArgs:\nvalue: the value to be propagated (1 = black wins, -1 = white wins)\nup_to: the node to propagate until.", "source": "juraj-google-style"}
{"code": "def copy_assets_to_destination_dir(asset_filename_map, destination_dir, saved_files=None):\n    if saved_files is None:\n        saved_files = set()\n    assets_destination_dir = path_helpers.get_or_create_assets_dir(destination_dir)\n    for asset_basename, asset_source_filepath in asset_filename_map.items():\n        asset_destination_filepath = file_io.join(compat.as_bytes(assets_destination_dir), compat.as_bytes(asset_basename))\n        if file_io.file_exists(asset_source_filepath) and asset_source_filepath != asset_destination_filepath and (asset_destination_filepath not in saved_files):\n            file_io.copy(asset_source_filepath, asset_destination_filepath, overwrite=True)\n            saved_files.add(asset_destination_filepath)\n    tf_logging.info('Assets written to: %s', compat.as_text(assets_destination_dir))", "docstring": "Copy all assets from source path to destination path.\n\nArgs:\nasset_filename_map: a dict of filenames used for saving the asset in\nthe SavedModel to full paths from which the filenames were derived.\ndestination_dir: the destination directory that assets are stored in.\nsaved_files: a set of destination filepaths that have already been copied\nand will be skipped", "source": "github-repos"}
{"code": "def check_output_variable(self, variable):\n    match = False\n    if (variable in self.out_variables):\n        match = True\n    return match", "docstring": "Check to see if output variable was requested by downstream app.\n\nUsing the auto generated dictionary of output variables check to see if provided\nvariable was requested by downstream app.\n\nArgs:\nvariable (string): The variable name, not the full variable.\n\nReturns:\n(boolean): Boolean value indicator whether a match was found.", "source": "codesearchnet"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    dynamic_info_size_error_reported = False\n\n    tasks_key = registry_key.GetSubkeyByName('Tasks')\n    tree_key = registry_key.GetSubkeyByName('Tree')\n\n    if not tasks_key or not tree_key:\n      parser_mediator.ProduceExtractionWarning(\n          'Task Cache is missing a Tasks or Tree sub key.')\n      return\n\n    task_guids = {}\n    for sub_key in tree_key.GetSubkeys():\n      for value_key, id_value in self._GetIdValue(sub_key):\n        \n        \n        \n        id_value_data_size = len(id_value.data)\n        if id_value_data_size != 78:\n          parser_mediator.ProduceExtractionWarning(\n              'unsupported Id value data size: {0:d}.'.format(\n                  id_value_data_size))\n          continue\n\n        guid_string = id_value.GetDataAsObject()\n        task_guids[guid_string] = value_key.name\n\n    dynamic_info_map = self._GetDataTypeMap('dynamic_info_record')\n    dynamic_info2_map = self._GetDataTypeMap('dynamic_info2_record')\n\n    dynamic_info_size = dynamic_info_map.GetByteSize()\n    dynamic_info2_size = dynamic_info2_map.GetByteSize()\n\n    for sub_key in tasks_key.GetSubkeys():\n      dynamic_info_value = sub_key.GetValueByName('DynamicInfo')\n      if not dynamic_info_value:\n        continue\n\n      dynamic_info_record_map = None\n      dynamic_info_value_data_size = len(dynamic_info_value.data)\n      if dynamic_info_value_data_size == dynamic_info_size:\n        dynamic_info_record_map = dynamic_info_map\n      elif dynamic_info_value_data_size == dynamic_info2_size:\n        dynamic_info_record_map = dynamic_info2_map\n      else:\n        if not dynamic_info_size_error_reported:\n          parser_mediator.ProduceExtractionWarning(\n              'unsupported DynamicInfo value data size: {0:d}.'.format(\n                  dynamic_info_value_data_size))\n          dynamic_info_size_error_reported = True\n        continue\n\n      try:\n        dynamic_info_record = self._ReadStructureFromByteStream(\n            dynamic_info_value.data, 0, dynamic_info_record_map)\n      except (ValueError, errors.ParseError) as exception:\n        parser_mediator.ProduceExtractionWarning(\n            'unable to parse DynamicInfo record with error: {0!s}.'.format(\n                exception))\n\n      name = task_guids.get(sub_key.name, sub_key.name)\n\n      values_dict = {}\n      values_dict['Task: {0:s}'.format(name)] = '[ID: {0:s}]'.format(\n          sub_key.name)\n\n      event_data = windows_events.WindowsRegistryEventData()\n      event_data.key_path = registry_key.path\n      event_data.offset = registry_key.offset\n      event_data.regvalue = values_dict\n\n      event = time_events.DateTimeValuesEvent(\n          registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      event_data = TaskCacheEventData()\n      event_data.task_name = name\n      event_data.task_identifier = sub_key.name\n\n      last_registered_time = dynamic_info_record.last_registered_time\n      if last_registered_time:\n        \n        \n        date_time = dfdatetime_filetime.Filetime(timestamp=last_registered_time)\n        event = time_events.DateTimeValuesEvent(\n            date_time, 'Last registered time')\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      launch_time = dynamic_info_record.launch_time\n      if launch_time:\n        \n        date_time = dfdatetime_filetime.Filetime(timestamp=launch_time)\n        event = time_events.DateTimeValuesEvent(\n            date_time, 'Launch time')\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      unknown_time = getattr(dynamic_info_record, 'unknown_time', None)\n      if unknown_time:\n        date_time = dfdatetime_filetime.Filetime(timestamp=unknown_time)\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_UNKNOWN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def create_app(*, debug=False, threads=1, bigchaindb_factory=None):\n    if (not bigchaindb_factory):\n        bigchaindb_factory = BigchainDB\n    app = Flask(__name__)\n    app.wsgi_app = StripContentTypeMiddleware(app.wsgi_app)\n    CORS(app)\n    app.debug = debug\n    app.config['bigchain_pool'] = utils.pool(bigchaindb_factory, size=threads)\n    add_routes(app)\n    return app", "docstring": "Return an instance of the Flask application.\n\nArgs:\ndebug (bool): a flag to activate the debug mode for the app\n(default: False).\nthreads (int): number of threads to use\nReturn:\nan instance of the Flask application.", "source": "codesearchnet"}
{"code": "def is_storage(url, storage=None):\n    if storage:\n        return True\n    split_url = url.split(':\n    if ((len(split_url) == 2) and (split_url[0].lower() != 'file')):\n        return True\n    return False", "docstring": "Check if file is a local file or a storage file.\n\nFile is considered local if:\n- URL is a local path.\n- URL starts by \"file://\"\n- a \"storage\" is provided.\n\nArgs:\nurl (str): file path or URL\nstorage (str): Storage name.\n\nReturns:\nbool: return True if file is local.", "source": "codesearchnet"}
{"code": "def laid_out_slice_num(self, tensor_shape):\n    \n    ret = self.slicewise(lambda: tf.to_int32(0))\n    tensor_layout = self.tensor_layout(tensor_shape)\n    for mesh_axis in tensor_layout.tensor_axis_to_mesh_axis:\n      if mesh_axis is not None:\n        def my_fn(x, pcoord, mesh_dim_size):\n          return x * mesh_dim_size + pcoord\n        ret = self.slicewise(\n            my_fn, ret, self.laid_out_pcoord(mesh_axis),\n            self.shape[mesh_axis].size)\n    return ret", "docstring": "A LaidOutTensor with an int32 scalar, identical for identical slices.\n\nThis is useful for synchronizing random operations.\n\nArgs:\ntensor_shape: a TensorShape\nReturns:\na LaidOutTensor where each slice is an integer scalar.", "source": "juraj-google-style"}
{"code": "def get_configuration(variable, site_code=None):\n    name = os.environ.get(CONFIGURATION_MODULE)\n    __import__(name)\n    module = sys.modules[name]\n    setting_value = getattr(module, variable, None)\n    site_overrides = getattr(module, 'SITE_OVERRIDES', None)\n    if (site_overrides and (site_code is not None)):\n        site_specific_overrides = site_overrides.get(site_code)\n        if site_specific_overrides:\n            override_value = site_specific_overrides.get(variable)\n            if override_value:\n                setting_value = override_value\n    if (setting_value is None):\n        raise RuntimeError('Worker is improperly configured: {} is unset in {}.'.format(variable, module))\n    return setting_value", "docstring": "Get a value from configuration.\n\nRetrieves the value corresponding to the given variable from the configuration module\ncurrently in use by the app.  Specify a site_code value to check for a site-specific override.\n\nArguments:\nvariable (str): The name of a variable from the configuration module.\n\nKeyword Arguments:\nsite_code (str): The SITE_OVERRIDES key to inspect for site-specific values\n\nReturns:\nThe value corresponding to the variable, or None if the variable is not found.", "source": "codesearchnet"}
{"code": "def tensor_dim_to_mesh_dim_size(layout, mesh_shape, tensor_dim):\n    layout_rules = convert_to_layout_rules(layout)\n    mesh_shape = convert_to_shape(mesh_shape)\n    mesh_axis = layout_rules.tensor_dimension_to_mesh_axis(tensor_dim, mesh_shape)\n    if (mesh_axis is None):\n        return 1\n    else:\n        return mesh_shape.dims[mesh_axis].size", "docstring": "How many ways does a tensor dimension get split.\n\nThis is used to \"cheat\" when building the mtf graph and peek at how a\ntensor dimension will be split.  Returns 1 if the tensor dimension is not\nsplit.\n\nArgs:\nlayout: an input to convert_to_layout_rules\nmesh_shape: an input to convert_to_shape\ntensor_dim: a Dimension\n\nReturns:\nan integer", "source": "codesearchnet"}
{"code": "def fts_contrast2(self, fs, ft_name, inv):\n        \n        inv_fts = [self.fts(x) for x in inv if set(fs) <= self.fts(x)]\n        for a in inv_fts:\n            for b in inv_fts:\n                if a != b:\n                    diff = a ^ b\n                    if len(diff) == 2:\n                        if all([nm == ft_name for (_, nm) in diff]):\n                            return True\n        return False", "docstring": "Return `True` if there is a segment in `inv` that contrasts in feature\n`ft_name`.\n\nArgs:\nfs (list): feature specifications used to filter `inv`.\nft_name (str): name of the feature where contrast must be present.\ninv (list): collection of segments represented as Unicode segments.\n\nReturns:\nbool: `True` if two segments in `inv` are identical in features except\nfor feature `ft_name`", "source": "juraj-google-style"}
{"code": "def extract_xml(input_):\n    \n    if type(input_) == str:\n        file_object = open(input_, \"rb\")\n    elif type(input_) == bytes:\n        file_object = BytesIO(input_)\n    else:\n        file_object = input_\n    try:\n        header = file_object.read(6)\n        file_object.seek(0)\n        if header.startswith(MAGIC_ZIP):\n            _zip = zipfile.ZipFile(file_object)\n            xml = _zip.open(_zip.namelist()[0]).read().decode()\n        elif header.startswith(MAGIC_GZIP):\n            xml = GzipFile(fileobj=file_object).read().decode()\n        elif header.startswith(MAGIC_XML):\n            xml = file_object.read().decode()\n        else:\n            file_object.close()\n            raise InvalidAggregateReport(\"Not a valid zip, gzip, or xml file\")\n\n        file_object.close()\n\n    except UnicodeDecodeError:\n        raise InvalidAggregateReport(\"File objects must be opened in binary \"\n                                     \"(rb) mode\")\n    except Exception as error:\n        raise InvalidAggregateReport(\n            \"Invalid archive file: {0}\".format(error.__str__()))\n\n    return xml", "docstring": "Extracts xml from a zip or gzip file at the given path, file-like object,\nor bytes.\n\nArgs:\ninput_: A path to a file, a file like object, or bytes\n\nReturns:\nstr: The extracted XML", "source": "juraj-google-style"}
{"code": "def _VerifyOneType(self, pool_func, input_sizes, ksize, strides, padding, data_format, data_type, expected, use_gpu, v2, use_negative_input=False, bfloat16_rtol=0.01):\n    if use_gpu and (not test.is_gpu_available()):\n        self.skipTest('No GPU is available.')\n    if use_gpu and data_type == dtypes.float64 and test.is_built_with_rocm():\n        self.skipTest(\"ROCm pooling ops don't support float64.\")\n    if use_gpu and data_format == 'NCHW_VECT_C' and (not test.is_gpu_available(cuda_only=True, min_cuda_compute_capability=(6, 1))):\n        self.skipTest('NCHW_VECT_C requires sm61+.')\n    if v2 and data_format != 'NHWC':\n        self.skipTest('v2 not supported for %s' % data_format)\n    if v2 and (not isinstance(padding, str)):\n        self.skipTest('non-constant ksize/strides requires nonexplicit padding')\n    if data_format == 'NCHW_VECT_C':\n        if data_type != dtypes.float32:\n            self.skipTest('quantization to qint8 not implemented for %r' % data_type)\n        if input_sizes[-1] % 4 != 0:\n            self.skipTest('Skipping test for depth %d' % input_sizes[-1])\n    total_size = 1\n    for s in input_sizes:\n        total_size *= s\n    tf_logging.info('Running %s test. %r %r %d %r %r %r %s', data_format, v2, input_sizes, total_size, pool_func, ksize, strides, data_type)\n    y = -1 if use_negative_input else 1\n    x = [((f + 128) % 255 - 127) * y for f in range(total_size)]\n    with self.cached_session(use_gpu=use_gpu):\n        t = constant_op.constant(x, shape=input_sizes, dtype=data_type)\n        if data_format in ('NCHW', 'NCHW_VECT_C', 'NCW'):\n            if data_format == 'NCHW_VECT_C':\n                t = test_util.NHWCToNCHW_VECT_C(t)\n                t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8)\n            else:\n                t = test_util.NHWCToNCHW(t)\n            ksize = test_util.NHWCToNCHW(ksize)\n            strides = test_util.NHWCToNCHW(strides)\n            if isinstance(padding, list):\n                padding = test_util.NHWCToNCHW(padding)\n        ksize_placeholder = array_ops.placeholder(dtypes.int32, shape=[4])\n        strides_placeholder = array_ops.placeholder(dtypes.int32, shape=[4])\n        if v2:\n            t = pool_func(t, ksize=ksize_placeholder, strides=strides_placeholder, padding=padding, data_format=data_format)\n        else:\n            t = pool_func(t, ksize=ksize, strides=strides, padding=padding, data_format=data_format)\n        if data_format == 'NCHW_VECT_C':\n            t = gen_array_ops.dequantize(t, -128, 127)\n            t = test_util.NCHW_VECT_CToNHWC(t)\n        elif data_format == 'NCHW':\n            t = test_util.NCHWToNHWC(t)\n        if v2:\n            actual = t.eval(feed_dict={ksize_placeholder: ksize, strides_placeholder: strides})\n        else:\n            actual = self.evaluate(t)\n            self.assertShapeEqual(actual, t)\n        self.assertAllCloseAccordingToType(expected, actual.flatten(), bfloat16_rtol=bfloat16_rtol)", "docstring": "Verifies the output values of the pooling function.\n\nArgs:\npool_func: Function to be called, co.MaxPool, co.AvgPool, or the Lua\nversion.\ninput_sizes: Input tensor dimensions.\nksize: The kernel size dimensions\nstrides: The stride dimensions\npadding: Padding type.\ndata_format: The data format we use to run the pooling operation.\ndata_type: The data type to use to run the pooling operation.\nexpected: An array containing the expected operation outputs.\nuse_gpu: Whether we are running on GPU.\nv2: Whether to use v2 version.\nuse_negative_input: If the input values should be negative.\nbfloat16_rtol: relative tolerance for bfloat16.", "source": "github-repos"}
{"code": "def compile_date(self):\n    result = self._dll.JLINKARM_GetCompileDateTime()\n    return ctypes.cast(result, ctypes.c_char_p).value.decode()", "docstring": "Returns a string specifying the date and time at which the DLL was\ntranslated.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nDatetime string.", "source": "codesearchnet"}
{"code": "def thread_exists(self, thread_id):\n    return self._requests_session.head(self._url.thread_api_url(thread_id=thread_id)).ok", "docstring": "Check if a thread exists or has 404'd.\n\nArgs:\nthread_id (int): Thread ID\n\nReturns:\nbool: Whether the given thread exists on this board.", "source": "codesearchnet"}
{"code": "def resolve_attr(obj, path):\n    if (not path):\n        return obj\n    (head, _, tail) = path.partition('.')\n    head_obj = getattr(obj, head)\n    return resolve_attr(head_obj, tail)", "docstring": "A recursive version of getattr for navigating dotted paths.\n\nArgs:\nobj: An object for which we want to retrieve a nested attribute.\npath: A dot separated string containing zero or more attribute names.\n\nReturns:\nThe attribute referred to by obj.a1.a2.a3...\n\nRaises:\nAttributeError: If there is no such attribute.", "source": "codesearchnet"}
{"code": "def setup(self, universe):\n    try:\n        prices = universe[self.name]\n    except KeyError:\n        prices = None\n    if (prices is not None):\n        self._prices = prices\n        self.data = pd.DataFrame(index=universe.index, columns=['value', 'position'], data=0.0)\n        self._prices_set = True\n    else:\n        self.data = pd.DataFrame(index=universe.index, columns=['price', 'value', 'position'])\n        self._prices = self.data['price']\n        self._prices_set = False\n    self._values = self.data['value']\n    self._positions = self.data['position']\n    self.data['outlay'] = 0.0\n    self._outlays = self.data['outlay']", "docstring": "Setup Security with universe. Speeds up future runs.\n\nArgs:\n* universe (DataFrame): DataFrame of prices with security's name as\none of the columns.", "source": "codesearchnet"}
{"code": "def sh(self, cmd, ignore_error=False, cwd=None, shell=False, **kwargs):\n    kwargs.update({'shell': shell, 'cwd': (cwd or self.fpath), 'stderr': subprocess.STDOUT, 'stdout': subprocess.PIPE, 'ignore_error': ignore_error})\n    log.debug((('cmd', cmd), ('kwargs', kwargs)))\n    return sh(cmd, **kwargs)", "docstring": "Run a command with the current working directory set to self.fpath\n\nArgs:\ncmd (str or tuple): cmdstring or listlike\n\nKeyword Arguments:\nignore_error (bool): if False, raise an Exception if p.returncode is\nnot 0\ncwd (str): current working dir to run cmd with\nshell (bool): subprocess.Popen ``shell`` kwarg\n\nReturns:\nstr: stdout output of wrapped call to ``sh`` (``subprocess.Popen``)", "source": "codesearchnet"}
{"code": "def filter_single_value(cls, part_info, error_msg=None):\n        \n        \n        filtered = cls.filter_values(part_info)\n        if len(filtered) != 1:\n            if error_msg is None:\n                error_msg = \"Expected a single %s, got %s of them\" % \\\n                            (cls.__name__, len(filtered))\n            raise BadValueError(error_msg)\n        return filtered[0]", "docstring": "Filter the part_info dict list looking for a single instance of our\nclass\n\nArgs:\npart_info (dict): {part_name: [Info] or None} as returned from\nController.run_hook()\nerror_msg (str, optional): Specific error message to show if\nthere isn't a single value\n\nReturns:\ninfo subclass of cls", "source": "juraj-google-style"}
{"code": "def port(alias_name, default=None, allow_none=False):\n    \n    warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2)\n    try:\n        return int(_split_docker_link(alias_name)[2])\n    except KeyError as err:\n        if default or allow_none:\n            return default\n        else:\n            raise err", "docstring": "Get the port from the docker link alias or return the default.\n\nArgs:\nalias_name: The docker link alias\ndefault: The default value if the link isn't available\nallow_none: If the return value can be `None` (i.e. optional)\n\nExamples:\nAssuming a Docker link was created with ``docker --link postgres:db``\nand the resulting environment variable is ``DB_PORT=tcp://172.17.0.82:5432``.\n\n>>> envitro.docker.port('DB')\n5432", "source": "juraj-google-style"}
{"code": "def datetimeobj(value, fmt=None):\n    if fmt:\n        return _datetimeobj_formats.get(fmt, (lambda v: datetimeobj_fmt(v, fmt)))(value)\n    l = len(value)\n    if ((19 <= l <= 24) and (value[3] == ' ')):\n        try:\n            return datetimeobj_d_b_Y_H_M_S(value)\n        except (KeyError, ValueError):\n            pass\n    if (30 <= l <= 31):\n        try:\n            return datetimeobj_a__d_b_Y_H_M_S_z(value)\n        except (KeyError, ValueError):\n            pass\n    if (l == 14):\n        try:\n            return datetimeobj_YmdHMS(value)\n        except ValueError:\n            pass\n    try:\n        return datetimeobj_epoch(value)\n    except ValueError:\n        pass\n    return datetimeobj_any(value)", "docstring": "Parse a datetime to a datetime object.\n\nUses fast custom parsing for common datetime formats or the slow dateutil\nparser for other formats. This is a trade off between ease of use and speed\nand is very useful for fast parsing of timestamp strings whose format may\nstandard but varied or unknown prior to parsing.\n\nCommon formats include:\n1 Feb 2010 12:00:00 GMT\nMon, 1 Feb 2010 22:00:00 +1000\n20100201120000\n1383470155 (seconds since epoch)\n\nSee the other datetimeobj_*() functions for more details.\n\nArgs:\nvalue: A string representing a datetime.\n\nReturns:\nA datetime object.", "source": "codesearchnet"}
{"code": "def RegisterDefinition(self, artifact_definition):\n    artifact_definition_name = artifact_definition.name.lower()\n    if (artifact_definition_name in self._artifact_definitions):\n        raise KeyError('Artifact definition already set for name: {0:s}.'.format(artifact_definition.name))\n    self._artifact_definitions[artifact_definition_name] = artifact_definition\n    self._defined_artifact_names.add(artifact_definition.name)\n    for source in artifact_definition.sources:\n        if (source.type_indicator == definitions.TYPE_INDICATOR_ARTIFACT_GROUP):\n            self._artifact_name_references.update(source.names)", "docstring": "Registers an artifact definition.\n\nArtifact definitions are identified based on their lower case name.\n\nArgs:\nartifact_definition (ArtifactDefinition): an artifact definition.\n\nRaises:\nKeyError: if artifact definition is already set for the corresponding\nname.", "source": "codesearchnet"}
{"code": "def _psd_mask(x):\n    (eigenvalues, _) = tf.linalg.eigh(x)\n    return tf.cast((tf.reduce_min(input_tensor=eigenvalues, axis=(- 1)) >= 0), dtype=x.dtype)", "docstring": "Computes whether each square matrix in the input is positive semi-definite.\n\nArgs:\nx: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.\n\nReturns:\nmask: A floating-point `Tensor` of shape `[B1, ... Bn]`.  Each\nscalar is 1 if the corresponding matrix was PSD, otherwise 0.", "source": "codesearchnet"}
{"code": "def _GetTable(self):\n    result = []\n    lstr = str\n    for row in self._table:\n        result.append(('%s\\n' % self.separator.join((lstr(v) for v in row))))\n    return ''.join(result)", "docstring": "Returns table, with column headers and separators.\n\nReturns:\nThe whole table including headers as a string. Each row is\njoined by a newline and each entry by self.separator.", "source": "codesearchnet"}
{"code": "def load_case(adapter, case_obj, update=False):\n    \n    \n    existing_case = adapter.case(case_obj)\n    if existing_case:\n        if not update:\n            raise CaseError(\"Case {0} already exists in database\".format(case_obj['case_id']))\n        case_obj = update_case(case_obj, existing_case)\n\n    \n    try:\n        adapter.add_case(case_obj, update=update)\n    except CaseError as err:\n        raise err\n\n    return case_obj", "docstring": "Load a case to the database\n\nArgs:\nadapter: Connection to database\ncase_obj: dict\nupdate(bool): If existing case should be updated\n\nReturns:\ncase_obj(models.Case)", "source": "juraj-google-style"}
{"code": "def __init__(self, context, request):\n    \n    self._context = context\n    self._request = request\n    self._extractors = _create_extractors(request.col_params)\n    self._filters = _create_filters(request.col_params, self._extractors)\n    \n    \n    self._experiment = context.experiment()", "docstring": "Constructor.\n\nArgs:\ncontext: A backend_context.Context instance.\nrequest: A ListSessionGroupsRequest protobuf.", "source": "juraj-google-style"}
{"code": "def __init__(self, source_urn=None, token=None):\n    \n    super(InstantOutputPlugin, self).__init__()\n\n    if not source_urn:\n      raise ValueError(\"source_urn can't be empty.\")\n\n    if not token:\n      raise ValueError(\"token can't be empty.\")\n\n    self.source_urn = source_urn\n    self.token = token", "docstring": "OutputPlugin constructor.\n\nArgs:\nsource_urn: URN identifying source of the data (hunt or flow).\ntoken: Security token.\n\nRaises:\nValueError: If one of the keyword arguments is empty.", "source": "juraj-google-style"}
{"code": "def inspect_node(self, node_id):\n        \n        url = self._url('/nodes/{0}', node_id)\n        return self._result(self._get(url), True)", "docstring": "Retrieve low-level information about a swarm node\n\nArgs:\nnode_id (string): ID of the node to be inspected.\n\nReturns:\nA dictionary containing data about this node.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def _GetIntegerValue(self, row, value_name):\n    value = row.get(value_name, None)\n    try:\n        return int(value, 10)\n    except (TypeError, ValueError):\n        return None", "docstring": "Converts a specific value of the row to an integer.\n\nArgs:\nrow (dict[str, str]): fields of a single row, as specified in COLUMNS.\nvalue_name (str): name of the value within the row.\n\nReturns:\nint: value or None if the value cannot be converted.", "source": "codesearchnet"}
{"code": "def _maybe_extract(compressed_filename, directory, extension=None):\n    \n    logger.info('Extracting {}'.format(compressed_filename))\n\n    if extension is None:\n        basename = os.path.basename(compressed_filename)\n        extension = basename.split('.', 1)[1]\n\n    if 'zip' in extension:\n        with zipfile.ZipFile(compressed_filename, \"r\") as zip_:\n            zip_.extractall(directory)\n    elif 'tar' in extension or 'tgz' in extension:\n        with tarfile.open(compressed_filename, mode='r') as tar:\n            tar.extractall(path=directory)\n\n    logger.info('Extracted {}'.format(compressed_filename))", "docstring": "Extract a compressed file to ``directory``.\n\nArgs:\ncompressed_filename (str): Compressed file.\ndirectory (str): Extract to directory.\nextension (str, optional): Extension of the file; Otherwise, attempts to extract extension\nfrom the filename.", "source": "juraj-google-style"}
{"code": "def get(logdir):\n    with FileWriterCache._lock:\n        if logdir not in FileWriterCache._cache:\n            FileWriterCache._cache[logdir] = FileWriter(logdir, graph=ops.get_default_graph())\n        return FileWriterCache._cache[logdir]", "docstring": "Returns the FileWriter for the specified directory.\n\nArgs:\nlogdir: str, name of the directory.\n\nReturns:\nA `FileWriter`.", "source": "github-repos"}
{"code": "def also_run_as_tf_function(f: Callable[..., Any]) -> Callable[..., None]:\n\n    def decorated(*args, **kwds) -> None:\n\n        def bound_f() -> None:\n            f(*args, **kwds)\n        with context.eager_mode():\n            bound_f()\n            def_function.function(bound_f, autograph=False)()\n    return decorated", "docstring": "Runs the decorated test twice--once as is, once inside a tf.function.\n\nThis allows you to run a test both in eager execution and inside a\ntf.function, exercising the two execution modes supported in tf 2.0. The test\nassertions are automatically done inside tf.py_funcs, and tf.function ensures\nthat they run in the proper order and with the proper side effects.\n\nCurrently variable creation is not supported in tests annotated with this\ndecorator since it's tricky to ensure the variable doesn't get repeatedly\ncreated when retracing the tf.function.\n\nArgs:\nf: the test method to be decorated\n\nReturns:\nThe decorated test method, which will run both in eager and inside a\ntf.function.", "source": "github-repos"}
{"code": "def Add(self, path, age=None):\n    if (not isinstance(path, string_types)):\n        raise ValueError('Only strings should be added to a URN.')\n    result = rdfvalue.RDFURN(self.Copy(age))\n    result.Update(path=utils.JoinPath(self._string_urn, path))\n    return result", "docstring": "Add a relative stem to the current value and return a new RDFURN.\n\nNote that this returns an RDFURN, not a ClientURN since the resulting object\nwould not pass validation.\n\nArgs:\npath: A string containing a relative path.\nage: The age of the object. If None set to current time.\n\nReturns:\nA new RDFURN that can be chained.\n\nRaises:\nValueError: if the path component is not a string.", "source": "codesearchnet"}
{"code": "def add_sched_block_instance(self, config_dict):\n    schema = self._get_schema()\n    LOG.debug('Adding SBI with config: %s', config_dict)\n    validate(config_dict, schema)\n    updated_block = self._add_status(config_dict)\n    (scheduling_block_data, processing_block_data) = self._split_sched_block_instance(updated_block)\n    name = ('scheduling_block:' + updated_block['id'])\n    self._db.set_specified_values(name, scheduling_block_data)\n    self._db.push_event(self.scheduling_event_name, updated_block['status'], updated_block['id'])\n    for value in processing_block_data:\n        name = ((('scheduling_block:' + updated_block['id']) + ':processing_block:') + value['id'])\n        self._db.set_specified_values(name, value)\n        self._db.push_event(self.processing_event_name, value['status'], value['id'])", "docstring": "Add Scheduling Block to the database.\n\nArgs:\nconfig_dict (dict): SBI configuration", "source": "codesearchnet"}
{"code": "def from_chars(chars):\n        \n        paulis = [pauli_from_char(c, n) for n, c in enumerate(chars) if c != \"I\"]\n        if not paulis:\n            return 1.0 * I\n        if len(paulis) == 1:\n            return 1.0 * paulis[0]\n        return reduce(lambda a, b: a * b, paulis)", "docstring": "Make Pauli's Term from chars which is written by \"X\", \"Y\", \"Z\" or \"I\".\ne.g. \"XZIY\" => X(0) * Z(1) * Y(3)\n\nArgs:\nchars (str): Written in \"X\", \"Y\", \"Z\" or \"I\".\n\nReturns:\nTerm: A `Term` object.\n\nRaises:\nValueError: When chars conteins the character which is \"X\", \"Y\", \"Z\" nor \"I\".", "source": "juraj-google-style"}
{"code": "def Search(self, artifact=None, os_name=None, cpe=None, label=None):\n    return [c for c in self.conditions if c.Search(artifact, os_name, cpe, label)]", "docstring": "Find the host attributes that trigger data collection.\n\nArgs:\nartifact: An artifact name.\nos_name: An OS string.\ncpe: A CPE string.\nlabel: A label string.\n\nReturns:\nA list of conditions that contain the specified attributes.", "source": "codesearchnet"}
{"code": "def add_data(self, data):\n        \n\n        if self.state == self.ErrorState:\n            return\n\n        self.raw_data += bytearray(data)\n\n        still_processing = True\n        while still_processing:\n            still_processing = self.process_data()", "docstring": "Add data to our stream, emitting reports as each new one is seen\n\nArgs:\ndata (bytearray): A chunk of new data to add", "source": "juraj-google-style"}
{"code": "def get_directory_list_doc(self, configs):\n    if (not isinstance(configs, (tuple, list))):\n        configs = [configs]\n    util.check_list_type(configs, dict, 'configs', allow_none=False)\n    return self.__directory_list_descriptor(configs)", "docstring": "JSON dict description of a protorpc.remote.Service in list format.\n\nArgs:\nconfigs: Either a single dict or a list of dicts containing the service\nconfigurations to list.\n\nReturns:\ndict, The directory list document as a JSON dict.", "source": "codesearchnet"}
{"code": "def local_hardware_info():\n    results = {'os': platform.system(), 'memory': (psutil.virtual_memory().total / (1024 ** 3)), 'cpus': (psutil.cpu_count(logical=False) or 1)}\n    return results", "docstring": "Basic hardware information about the local machine.\n\nGives actual number of CPU's in the machine, even when hyperthreading is\nturned on. CPU count defaults to 1 when true count can't be determined.\n\nReturns:\ndict: The hardware information.", "source": "codesearchnet"}
{"code": "def to_json_str(self):\n    _json = self.to_json()\n    try:\n        return json.dumps(_json, sort_keys=True, cls=JsonEncoder)\n    except:\n        logging.exception('Could not serialize JSON: %r', _json)\n        raise", "docstring": "Convert data to json string representation.\n\nReturns:\njson representation as string.", "source": "codesearchnet"}
{"code": "def latest_db_file(paths: List[str]) -> Optional[str]:\n    \n    dbs = {}\n    for db_path in paths:\n        matches = VERSION_RE.match(os.path.basename(db_path))\n        assert matches, f'Invalid path name {db_path}'\n\n        try:\n            version = int(matches.group(1))\n        except ValueError:\n            continue\n\n        dbs[version] = db_path\n\n    if dbs:\n        highest_version = sorted(dbs)[-1]\n        return dbs[highest_version]\n\n    return None", "docstring": "Returns the path with the highest `version` number.\n\nRaises:\nAssertionError: If any of the `paths` in the list is an invalid name.\n\nArgs:\npaths: A list of file names.", "source": "juraj-google-style"}
{"code": "def to_api_repr(self):\n    config = copy.deepcopy(self._properties)\n    if (self.options is not None):\n        r = self.options.to_api_repr()\n        if (r != {}):\n            config[self.options._RESOURCE_NAME] = r\n    return config", "docstring": "Build an API representation of this object.\n\nReturns:\nDict[str, Any]:\nA dictionary in the format used by the BigQuery API.", "source": "codesearchnet"}
{"code": "def get(self, path):\n    if not path:\n        parsed_path = '/vars'\n    else:\n        parsed_path = path\n    weight_map = self.sharding_config['weight_map']\n    filenames = weight_map.get(parsed_path) or weight_map.get('/' + parsed_path + '/vars')\n    if filenames is not None:\n        if not isinstance(filenames, list):\n            filenames = [filenames]\n        self.current_shard_filenames = filenames\n        filename = filenames[0]\n    else:\n        self.current_shard_filenames = []\n        filename = None\n    if filename is not None and filename != self.current_shard_path.name:\n        self.close()\n        self.h5_file = self._get_h5_file(self.path.with_name(filename))\n    return super().get(path)", "docstring": "Get the H5 entry group.\n\nThis method is only available in read mode. If the path is not found in\nthe current shard, it will switch to the correct shard.\n\nArgs:\npath: `str`. The variable path.", "source": "github-repos"}
{"code": "def mount(dmg):\n    temp_dir = __salt__['temp.dir'](prefix='dmg-')\n    cmd = 'hdiutil attach -readonly -nobrowse -mountpoint {0} \"{1}\"'.format(temp_dir, dmg)\n    return (__salt__['cmd.run'](cmd), temp_dir)", "docstring": "Attempt to mount a dmg file to a temporary location and return the\nlocation of the pkg file inside\n\nArgs:\ndmg (str): The location of the dmg file to mount\n\nReturns:\ntuple: Tuple containing the results of the command along with the mount\npoint\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' macpackage.mount /tmp/software.dmg", "source": "codesearchnet"}
{"code": "def interpolate_to_timestep(self, timestep, cumulative=None):\n    assert ((timestep % self.header.analysis_period.timestep) == 0), 'Target timestep({}) must be divisable by current timestep({})'.format(timestep, self.header.analysis_period.timestep)\n    if (cumulative is not None):\n        assert isinstance(cumulative, bool), 'Expected Boolean. Got {}'.format(type(cumulative))\n    _new_values = []\n    _data_length = len(self._values)\n    for d in xrange(_data_length):\n        for _v in self._xxrange(self[d], self[((d + 1) % _data_length)], timestep):\n            _new_values.append(_v)\n    native_cumulative = self.header.data_type.cumulative\n    if ((cumulative is True) or ((cumulative is None) and native_cumulative)):\n        for (i, d) in enumerate(_new_values):\n            _new_values[i] = (d / timestep)\n    if (self.header.data_type.point_in_time is False):\n        shift_dist = int((timestep / 2))\n        _new_values = (_new_values[(- shift_dist):] + _new_values[:(- shift_dist)])\n    a_per = self.header.analysis_period\n    _new_a_per = AnalysisPeriod(a_per.st_month, a_per.st_day, a_per.st_hour, a_per.end_month, a_per.end_day, a_per.end_hour, timestep, a_per.is_leap_year)\n    _new_header = self.header.duplicate()\n    _new_header._analysis_period = _new_a_per\n    return HourlyContinuousCollection(_new_header, _new_values)", "docstring": "Interpolate data for a finer timestep using a linear interpolation.\n\nArgs:\ntimestep: Target timestep as an integer. Target timestep must be\ndivisable by current timestep.\ncumulative: A boolean that sets whether the interpolation\nshould treat the data colection values as cumulative, in\nwhich case the value at each timestep is the value over\nthat timestep (instead of over the hour). The default will\ncheck the DataType to see if this type of data is typically\ncumulative over time.\n\nReturn:\nA continuous hourly data collection with data interpolated to\nthe input timestep.", "source": "codesearchnet"}
{"code": "def __init__( self, matrix ):\n        \n        assert type( matrix ) is np.ndarray\n        assert matrix.shape == ( 3, 3 )\n        self.matrix = matrix \n        self.inv_matrix = np.linalg.inv( matrix )", "docstring": "Initialise a Cell object.\n\nArgs:\nmatrix (np.array): 3x3 numpy array containing the cell matrix.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def timed_operation(msg, log_start=False):\n    \n    assert len(msg)\n    if log_start:\n        logger.info('Start {} ...'.format(msg))\n    start = timer()\n    yield\n    msg = msg[0].upper() + msg[1:]\n    logger.info('{} finished, time:{:.4f} sec.'.format(\n        msg, timer() - start))", "docstring": "Surround a context with a timer.\n\nArgs:\nmsg(str): the log to print.\nlog_start(bool): whether to print also at the beginning.\n\nExample:\n.. code-block:: python\n\nwith timed_operation('Good Stuff'):\ntime.sleep(1)\n\nWill print:\n\n.. code-block:: python\n\nGood stuff finished, time:1sec.", "source": "juraj-google-style"}
{"code": "def format_returnvalue(self, value):\n    self._ensure_loaded()\n    if (not self.return_info.is_data):\n        return None\n    if (self.return_info.type_name is not None):\n        return typeinfo.type_system.format_value(value, self.return_info.type_name, self.return_info.formatter)\n    return self.return_info.formatter(value)", "docstring": "Format the return value of this function as a string.\n\nArgs:\nvalue (object): The return value that we are supposed to format.\n\nReturns:\nstr: The formatted return value, or None if this function indicates\nthat it does not return data", "source": "codesearchnet"}
{"code": "def calc_sha(self, checksum):\n        \n        with LogTask('Calculating {}'.format(checksum)):\n            with open(self.dst + '.hash', 'wt') as f:\n                sha = utils.get_hash(self.dst, checksum)\n                f.write(sha)\n            self.exported_metadata[checksum] = sha", "docstring": "Calculate the checksum of the new exported disk, write it to\na file, and update this managers 'exported_metadata'.\n\nArgs:\nchecksum(str): The type of the checksum", "source": "juraj-google-style"}
{"code": "def evaluate(condition):\n    success = False\n    if (len(condition) > 0):\n        try:\n            (rule_name, ast_tokens, evaluate_function) = Condition.find_rule(condition)\n            if (not (rule_name == 'undefined')):\n                success = evaluate_function(ast_tokens)\n        except AttributeError as exception:\n            Logger.get_logger(__name__).error('Attribute error: %s', exception)\n    else:\n        success = True\n    return success", "docstring": "Evaluate simple condition.\n\n>>> Condition.evaluate('  2  ==  2  ')\nTrue\n>>> Condition.evaluate('  not  2  ==  2  ')\nFalse\n>>> Condition.evaluate('  not  \"abc\"  ==  \"xyz\"  ')\nTrue\n>>> Condition.evaluate('2 in [2, 4, 6, 8, 10]')\nTrue\n>>> Condition.evaluate('5 in [2, 4, 6, 8, 10]')\nFalse\n>>> Condition.evaluate('\"apple\" in [\"apple\", \"kiwi\", \"orange\"]')\nTrue\n>>> Condition.evaluate('5 not in [2, 4, 6, 8, 10]')\nTrue\n>>> Condition.evaluate('\"apple\" not in [\"kiwi\", \"orange\"]')\nTrue\n\nArgs:\ncondition (str): Python condition as string.\n\nReturns:\nbool: True when condition evaluates to True.", "source": "codesearchnet"}
{"code": "def _IsComparable(target):\n    if _IsNumeric(target):\n        return True\n    for attr in _COMPARABLE_ATTRS:\n        if not hasattr(target, attr):\n            return False\n    return True", "docstring": "Returns True if the target is comparable.\n\nMany things are considered comparable. An important exception is None, which\nin Python 2 compares less than anything besides None. None is a special case\nhandled by _NoneSubject, so it's irrelevant what this returns for None.\n\nArgs:\ntarget: any object whatsoever.\n\nReturns:\nTrue if the target is comparable, otherwise False.", "source": "github-repos"}
{"code": "def compress_multiple_pdfs(source_directory, output_directory, ghostscript_binary):\n    \n    source_paths = _get_pdf_filenames_at(source_directory)\n    yield len(source_paths)\n    for source_path in source_paths:\n        output = os.path.join(output_directory, os.path.basename(source_path))\n        compress_pdf(source_path, output, ghostscript_binary)\n        yield output", "docstring": "Compress all PDF files in the current directory and place the output in the\ngiven output directory. This is a generator function that first yields the amount\nof files to be compressed, and then yields the output path of each file.\n\nArgs:\nsource_directory (str): Filepath to the source directory.\noutput_directory (str): Filepath to the output directory.\nghostscript_binary (str): Name of the Ghostscript binary.\n\nReturns:\nlist(str): paths to outputs.", "source": "juraj-google-style"}
{"code": "def _get_log_file(self, handler):\n        \n        if 'file_name_pattern' not in handler:\n            filename = '%Y-%m-%d-%H-%M-%S-{name}.pcap'\n        else:\n            filename = handler['file_name_pattern']\n\n        log_file = handler['log_dir']\n        if 'path' in handler:\n            log_file = os.path.join(log_file, handler['path'], filename)\n        else:\n            log_file = os.path.join(log_file, filename)\n\n        log_file = time.strftime(log_file, time.gmtime())\n        log_file = log_file.format(**handler)\n\n        return log_file", "docstring": "Generate log file path for a given handler\n\nArgs:\nhandler:\nThe handler configuration dictionary for which a log file\npath should be generated.", "source": "juraj-google-style"}
{"code": "def run(self, test_names=None):\n    logging.log_path = self.log_path\n    if not self._pre_run():\n        return self.results\n    logging.info('==========> %s <==========', self.TAG)\n    if not test_names:\n        if self.tests:\n            test_names = list(self.tests)\n        else:\n            test_names = self.get_existing_test_names()\n    self.results.requested = test_names\n    self.summary_writer.dump(self.results.requested_test_names_dict(), records.TestSummaryEntryType.TEST_NAME_LIST)\n    tests = self._get_test_methods(test_names)\n    try:\n        setup_class_result = self._setup_class()\n        if setup_class_result:\n            return setup_class_result\n        for test_name, test_method in tests:\n            max_consecutive_error = getattr(test_method, ATTR_MAX_CONSEC_ERROR, 0)\n            repeat_count = getattr(test_method, ATTR_REPEAT_CNT, 0)\n            max_retry_count = getattr(test_method, ATTR_MAX_RETRY_CNT, 0)\n            if max_retry_count:\n                self._exec_one_test_with_retry(test_name, test_method, max_retry_count)\n            elif repeat_count:\n                self._exec_one_test_with_repeat(test_name, test_method, repeat_count, max_consecutive_error)\n            else:\n                self.exec_one_test(test_name, test_method)\n        return self.results\n    except signals.TestAbortClass as e:\n        e.details = 'Test class aborted due to: %s' % e.details\n        self._skip_remaining_tests(e)\n        return self.results\n    except signals.TestAbortAll as e:\n        e.details = 'All remaining tests aborted due to: %s' % e.details\n        self._skip_remaining_tests(e)\n        setattr(e, 'results', self.results)\n        raise e\n    finally:\n        self._teardown_class()\n        logging.info('Summary for test class %s: %s', self.TAG, self.results.summary_str())", "docstring": "Runs tests within a test class.\n\nOne of these test method lists will be executed, shown here in priority\norder:\n\n1. The test_names list, which is passed from cmd line. Invalid names\nare guarded by cmd line arg parsing.\n2. The self.tests list defined in test class. Invalid names are\nignored.\n3. All function that matches test method naming convention in the test\nclass.\n\nArgs:\ntest_names: A list of string that are test method names requested in\ncmd line.\n\nReturns:\nThe test results object of this class.", "source": "github-repos"}
{"code": "def hstack(tup):\n    \n    \n    if all(ar.ndim is 1 for ar in tup):\n        return concatenate(tup, axis=0)\n    else:\n        return concatenate(tup, axis=1)", "docstring": "Stack arrays in sequence horizontally (column wise),\nhandling ``RemoteArray`` and ``DistArray`` without moving data.\n\nArgs:\ntup (sequence of array_like)\n\nReturns:\nres: `ndarray`, if inputs were all local\n`RemoteArray`, if inputs were all on the same remote engine\n`DistArray`, if inputs were already scattered on different engines", "source": "juraj-google-style"}
{"code": "def set_work_request(self, worker_name, sample_set, subkeys=None):\n        \n\n        \n        if self.plugin_meta[worker_name]['sample_set_input']:\n            yield self.work_request(worker_name, sample_set, subkeys)\n \n        \n        else:\n            md5_list = self.get_sample_set(sample_set)\n            for md5 in md5_list:\n                if subkeys:\n                    yield self.work_request(worker_name, md5, subkeys)\n                else:\n                    yield self.work_request(worker_name, md5)[worker_name]", "docstring": "Make a work request for an existing stored sample (or sample_set).\nArgs:\nworker_name: 'strings', 'pe_features', whatever\nsample_set: the md5 of a sample_set in the Workbench data store\nsubkeys: just get a subkey of the output: 'foo' or 'foo.bar' (None for all)\nReturns:\nThe output is a generator of the results of the worker output for the sample_set", "source": "juraj-google-style"}
{"code": "def cycle_find(key, width=4):\n    \n\n    key_len = len(key)\n    buf = ''\n\n    it = deBruijn(width, 26)\n\n    for i in range(key_len):\n        buf += chr(ord('A') + next(it))\n\n    if buf == key:\n        return 0\n\n    for i, c in enumerate(it):\n        buf = buf[1:] + chr(ord('A') + c)\n        if buf == key:\n            return i + 1\n\n    return -1", "docstring": "Given an element of a de Bruijn sequence, find its index in that sequence.\n\nArgs:\nkey(str): The piece of the de Bruijn sequence to find.\nwidth(int): The width of each element in the sequence.\n\nReturns:\nint: The index of ``key`` in the de Bruijn sequence.", "source": "juraj-google-style"}
{"code": "def tokeninfo(self, jwt):\n\n        \n        warnings.warn(\"/tokeninfo will be deprecated in future releases\", DeprecationWarning)\n        return self.post(\n            url='https:\n            data={'id_token': jwt},\n            headers={'Content-Type': 'application/json'}\n        )", "docstring": "Returns user profile based on the user's jwt\n\nValidates a JSON Web Token (signature and expiration) and returns the\nuser information associated with the user id (sub property) of\nthe token.\n\nArgs:\njwt (str): User's jwt\n\nReturns:\nThe user profile.", "source": "juraj-google-style"}
{"code": "def grid(self, dimensions=None, **kwargs):\n        \n        return self.groupby(dimensions, container_type=GridSpace, **kwargs)", "docstring": "Groups data by supplied dimension(s) laying the groups along\nthe dimension(s) out in a GridSpace.\n\nArgs:\ndimensions: Dimension/str or list\nDimension or list of dimensions to group by\n\nReturns:\ngrid: GridSpace\nGridSpace with supplied dimensions", "source": "juraj-google-style"}
{"code": "def _rowwise_unsorted_segment_sum(values, indices, n):\n  \n  batch, k = tf.unstack(tf.shape(indices), num=2)\n  indices_flat = tf.reshape(indices, [-1]) + tf.div(tf.range(batch * k), k) * n\n  ret_flat = tf.unsorted_segment_sum(\n      tf.reshape(values, [-1]), indices_flat, batch * n)\n  return tf.reshape(ret_flat, [batch, n])", "docstring": "UnsortedSegmentSum on each row.\n\nArgs:\nvalues: a `Tensor` with shape `[batch_size, k]`.\nindices: an integer `Tensor` with shape `[batch_size, k]`.\nn: an integer.\nReturns:\nA `Tensor` with the same type as `values` and shape `[batch_size, n]`.", "source": "juraj-google-style"}
{"code": "def write_plot(plot, filename, width=DEFAULT_PAGE_WIDTH, height=DEFAULT_PAGE_HEIGHT, unit=DEFAULT_PAGE_UNIT):\n    svg = plot_to_svg(plot, width, height, unit)\n    with open(filename, 'w') as outfile:\n        outfile.write(svg)", "docstring": "Writes a plot SVG to a file.\n\nArgs:\nplot (list): a list of layers to plot\nfilename (str): the name of the file to write\nwidth (float): the width of the output SVG\nheight (float): the height of the output SVG\nunit (str): the unit of the height and width", "source": "codesearchnet"}
{"code": "def chunk_embedding_fn(chunk: Chunk) -> str:\n    if chunk.embedding is None or chunk.embedding.dense_embedding is None:\n        raise ValueError(f'Expected chunk to contain embedding. {chunk}')\n    return '{' + ','.join((str(x) for x in chunk.embedding.dense_embedding)) + '}'", "docstring": "Convert embedding to PostgreSQL array string.\n\nFormats dense embedding as a PostgreSQL-compatible array string.\nExample: [1.0, 2.0] -> '{1.0,2.0}'\n\nArgs:\nchunk: Input Chunk object.\n\nReturns:\nstr: PostgreSQL array string representation of the embedding.\n\nRaises:\nValueError: If chunk has no dense embedding.", "source": "github-repos"}
{"code": "def take_shas_of_all_files(G, settings):\n    \n    global ERROR_FN\n    sprint = settings[\"sprint\"]\n    error = settings[\"error\"]\n    ERROR_FN = error\n    sha_dict = {}\n    all_files = []\n    for target in G.nodes(data=True):\n        sprint(\"About to take shas of files in target '{}'\".format(target[0]),\n               level=\"verbose\")\n        if 'dependencies' in target[1]:\n            sprint(\"It has dependencies\", level=\"verbose\")\n            deplist = []\n            for dep in target[1]['dependencies']:\n                glist = glob.glob(dep)\n                if glist:\n                    for oneglob in glist:\n                        deplist.append(oneglob)\n                else:\n                    deplist.append(dep)\n            target[1]['dependencies'] = list(deplist)\n            for dep in target[1]['dependencies']:\n                sprint(\"  - {}\".format(dep), level=\"verbose\")\n                all_files.append(dep)\n        if 'output' in target[1]:\n            sprint(\"It has outputs\", level=\"verbose\")\n            for out in acts.get_all_outputs(target[1]):\n                sprint(\"  - {}\".format(out), level=\"verbose\")\n                all_files.append(out)\n    if len(all_files):\n        sha_dict['files'] = {}\n        \n        extant_files = []\n        for item in all_files:\n            if item not in extant_files and os.path.isfile(item):\n                extant_files.append(item)\n        pool = Pool()\n        results = pool.map(get_sha, extant_files)\n        pool.close()\n        pool.join()\n        for fn, sha in zip(extant_files, results):\n            sha_dict['files'][fn] = {'sha': sha}\n        return sha_dict\n    sprint(\"No dependencies\", level=\"verbose\")", "docstring": "Takes sha1 hash of all dependencies and outputs of all targets\n\nArgs:\nThe graph we are going to build\nThe settings dictionary\n\nReturns:\nA dictionary where the keys are the filenames and the\nvalue is the sha1 hash", "source": "juraj-google-style"}
{"code": "def _update(self, item, feed_item):\n    self._api().update(profileId=self.profile_id, body=item).execute()", "docstring": "Updates a new item in CM.\n\nArgs:\nitem: The CM object to update.\nfeed_item: The feed item from the Bulkdozer feed representing the item to\nupdate.", "source": "github-repos"}
{"code": "def get_sym_eq_kpoints(self, kpoint, cartesian=False, tol=1e-2):\n        \n        if not self.structure:\n            return None\n        sg = SpacegroupAnalyzer(self.structure)\n        symmops = sg.get_point_group_operations(cartesian=cartesian)\n        points = np.dot(kpoint, [m.rotation_matrix for m in symmops])\n        rm_list = []\n        \n        for i in range(len(points) - 1):\n            for j in range(i + 1, len(points)):\n                if np.allclose(pbc_diff(points[i], points[j]), [0, 0, 0], tol):\n                    rm_list.append(i)\n                    break\n        return np.delete(points, rm_list, axis=0)", "docstring": "Returns a list of unique symmetrically equivalent k-points.\n\nArgs:\nkpoint (1x3 array): coordinate of the k-point\ncartesian (bool): kpoint is in cartesian or fractional coordinates\ntol (float): tolerance below which coordinates are considered equal\n\nReturns:\n([1x3 array] or None): if structure is not available returns None", "source": "juraj-google-style"}
{"code": "def read_vocab(args, column_name):\n    vocab_path = os.path.join(args.analysis, (feature_transforms.VOCAB_ANALYSIS_FILE % column_name))\n    if (not file_io.file_exists(vocab_path)):\n        return []\n    (vocab, _) = feature_transforms.read_vocab_file(vocab_path)\n    return vocab", "docstring": "Reads a vocab file if it exists.\n\nArgs:\nargs: command line flags\ncolumn_name: name of column to that has a vocab file.\n\nReturns:\nList of vocab words or [] if the vocab file is not found.", "source": "codesearchnet"}
{"code": "def GetTypeChecker(field):\n  \n  if (field.cpp_type == _FieldDescriptor.CPPTYPE_STRING and\n      field.type == _FieldDescriptor.TYPE_STRING):\n    return UnicodeValueChecker()\n  if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM:\n    if SupportsOpenEnums(field):\n      \n      return _VALUE_CHECKERS[_FieldDescriptor.CPPTYPE_INT32]\n    else:\n      return EnumValueChecker(field.enum_type)\n  return _VALUE_CHECKERS[field.cpp_type]", "docstring": "Returns a type checker for a message field of the specified types.\n\nArgs:\nfield: FieldDescriptor object for this field.\n\nReturns:\nAn instance of TypeChecker which can be used to verify the types\nof values assigned to a field of the specified type.", "source": "juraj-google-style"}
{"code": "def preface_inference(f):\n\n    @functools.wraps(f)\n    def wrapper(self, *args, **kwargs):\n        self._preface_inference()\n        return f(self, *args, **kwargs)\n    return wrapper", "docstring": "Wraps given function with things to run before every inference call.\n\nArgs:\nf: The method of `EnergyInference` to wrap.\n\nReturns:\nwrapper: The wrapped function.", "source": "github-repos"}
{"code": "def get_drives(self, id_or_uri):\n        \n        uri = self._client.build_uri(id_or_uri=id_or_uri) + self.DRIVES_PATH\n        return self._client.get(id_or_uri=uri)", "docstring": "Gets the list of drives allocated to this SAS logical JBOD.\n\nArgs:\nid_or_uri: Can be either the SAS logical JBOD ID or the SAS logical JBOD URI.\n\nReturns:\nlist: A list of Drives", "source": "juraj-google-style"}
{"code": "def register_for_auto_class(cls, auto_class='AutoConfig'):\n    if not isinstance(auto_class, str):\n        auto_class = auto_class.__name__\n    import transformers.models.auto as auto_module\n    if not hasattr(auto_module, auto_class):\n        raise ValueError(f'{auto_class} is not a valid auto class.')\n    cls._auto_class = auto_class", "docstring": "Register this class with a given auto class. This should only be used for custom configurations as the ones in\nthe library are already mapped with `AutoConfig`.\n\n\n\nArgs:\nauto_class (`str` or `type`, *optional*, defaults to `\"AutoConfig\"`):\nThe auto class to register this new configuration with.", "source": "github-repos"}
{"code": "def _get_token(\n    request=None, allowed_auth_schemes=('OAuth', 'Bearer'),\n    allowed_query_keys=('bearer_token', 'access_token')):\n  \n  allowed_auth_schemes = _listlike_guard(\n      allowed_auth_schemes, 'allowed_auth_schemes', iterable_only=True)\n  \n  auth_header = os.environ.get('HTTP_AUTHORIZATION')\n  if auth_header:\n    for auth_scheme in allowed_auth_schemes:\n      if auth_header.startswith(auth_scheme):\n        return auth_header[len(auth_scheme) + 1:]\n    \n    \n    return None\n\n  \n  if request:\n    allowed_query_keys = _listlike_guard(\n        allowed_query_keys, 'allowed_query_keys', iterable_only=True)\n    for key in allowed_query_keys:\n      token, _ = request.get_unrecognized_field_info(key)\n      if token:\n        return token", "docstring": "Get the auth token for this request.\n\nAuth token may be specified in either the Authorization header or\nas a query param (either access_token or bearer_token).  We'll check in\nthis order:\n1. Authorization header.\n2. bearer_token query param.\n3. access_token query param.\n\nArgs:\nrequest: The current request, or None.\n\nReturns:\nThe token in the request or None.", "source": "juraj-google-style"}
{"code": "def conv2d_bn(x, filters, kernel_size, strides=1, padding='same', activation='relu', use_bias=False, name=None):\n    x = layers.Conv2D(filters, kernel_size, strides=strides, padding=padding, use_bias=use_bias, name=name)(x)\n    if not use_bias:\n        bn_axis = 1 if backend.image_data_format() == 'channels_first' else 3\n        bn_name = None if name is None else name + '_bn'\n        x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)\n    if activation is not None:\n        ac_name = None if name is None else name + '_ac'\n        x = layers.Activation(activation, name=ac_name)(x)\n    return x", "docstring": "Utility function to apply conv + BN.\n\nArgs:\nx: input tensor.\nfilters: filters in `Conv2D`.\nkernel_size: kernel size as in `Conv2D`.\nstrides: strides in `Conv2D`.\npadding: padding mode in `Conv2D`.\nactivation: activation in `Conv2D`.\nuse_bias: whether to use a bias in `Conv2D`.\nname: name of the ops; will become `name + '_ac'`\nfor the activation and `name + '_bn'` for the batch norm layer.\n\nReturns:\nOutput tensor after applying `Conv2D` and `BatchNormalization`.", "source": "github-repos"}
{"code": "def return_selected_form_items(form_info):\n    selected_keys = []\n    selected_names = []\n    for chosen in form_info:\n        if chosen['choice']:\n            selected_keys.append(chosen['key'])\n            selected_names.append(chosen['name'])\n    return (selected_keys, selected_names)", "docstring": "It returns chosen keys list from a given form.\n\nArgs:\nform_info: serialized list of dict form data\nReturns:\nselected_keys(list): Chosen keys list\nselected_names(list): Chosen channels' or subscribers' names.", "source": "codesearchnet"}
{"code": "def string_to_scopes(scopes):\n    if (not scopes):\n        return []\n    elif isinstance(scopes, six.string_types):\n        return scopes.split(' ')\n    else:\n        return scopes", "docstring": "Converts stringifed scope value to a list.\n\nIf scopes is a list then it is simply passed through. If scopes is an\nstring then a list of each individual scope is returned.\n\nArgs:\nscopes: a string or iterable of strings, the scopes.\n\nReturns:\nThe scopes in a list.", "source": "codesearchnet"}
{"code": "def WriteToPath(obj, filepath):\n  \n  with io.open(filepath, mode=\"w\", encoding=\"utf-8\") as filedesc:\n    WriteToFile(obj, filedesc)", "docstring": "Serializes and writes given Python object to the specified YAML file.\n\nArgs:\nobj: A Python object to serialize.\nfilepath: A path to the file into which the object is to be written.", "source": "juraj-google-style"}
{"code": "def isplaybook(obj):\n    return (isinstance(obj, Iterable) and ((not isinstance(obj, string_types)) and (not isinstance(obj, Mapping))))", "docstring": "Inspects the object and returns if it is a playbook\n\nArgs:\nobj (object): The object to be inspected by this function\n\nReturns:\nboolean: True if the object is a list and False if it is not", "source": "codesearchnet"}
{"code": "def pil_image(self, fill_value=None, compute=True):\n        \n        channels, mode = self.finalize(fill_value)\n        res = channels.transpose('y', 'x', 'bands')\n        img = dask.delayed(PILImage.fromarray)(np.squeeze(res.data), mode)\n        if compute:\n            img = img.compute()\n        return img", "docstring": "Return a PIL image from the current image.\n\nArgs:\nfill_value (int or float): Value to use for NaN null values.\nSee :meth:`~trollimage.xrimage.XRImage.finalize` for more\ninfo.\ncompute (bool): Whether to return a fully computed PIL.Image\nobject (True) or return a dask Delayed object representing\nthe Image (False). This is True by default.", "source": "juraj-google-style"}
{"code": "def simple_value(self, value: Any, *, name: Optional[str]=None, parent: Any=None, root_path: Optional[KeyPath]=None, css_classes: Optional[Sequence[str]]=None, max_summary_len_for_str: int=80) -> Html:\n    del name, parent, root_path\n\n    def value_repr() -> str:\n        if isinstance(value, str):\n            if len(value) < max_summary_len_for_str:\n                return repr(value)\n            else:\n                return value\n        return utils.format(value, compact=False, verbose=False, hide_default_values=True, python_format=True, use_inferred=True, max_bytes_len=64)\n    return Html.element('span', [Html.escape(value_repr)], css_classes=['simple-value', self.css_class_name(value), css_classes]).add_style('\\n        \\n        .simple-value {\\n          color: blue;\\n          display: inline-block;\\n          white-space: pre-wrap;\\n          padding: 0.2em;\\n          margin-top: 0.15em;\\n        }\\n        .simple-value.str {\\n          color: darkred;\\n          font-style: italic;\\n        }\\n        .simple-value.int, .simple-value.float {\\n          color: darkblue;\\n        }\\n        ')", "docstring": "Renders a simple value.\n\nArgs:\nvalue: The value to render.\nname: The name of the value.\nparent: The parent of the value.\nroot_path: The root path of the value.\ncss_classes: CSS classes to add to the HTML element.\nmax_summary_len_for_str: The maximum length of the string to display.\n\nReturns:\nThe rendered HTML as the simple value.", "source": "github-repos"}
{"code": "def delete_edge(self, ind_node, dep_node):\n        \n        graph = self.graph\n        if dep_node not in graph.get(ind_node, []):\n            raise KeyError(\n                \"No edge exists between %s and %s.\" % (ind_node, dep_node)\n            )\n        graph[ind_node].remove(dep_node)", "docstring": "Delete an edge from the graph.\n\nArgs:\nind_node (str): The independent node to delete an edge from.\ndep_node (str): The dependent node that has a dependency on the\nind_node.\n\nRaises:\nKeyError: Raised when the edge doesn't already exist.", "source": "juraj-google-style"}
{"code": "def PureMultiHeadedAttention(x, params, num_heads=8, dropout=0.0,\n                             mode='train', **kwargs):\n  \n  del params\n  rng = kwargs.get('rng', None)\n  (q, k, v), mask = x\n  feature_depth = q.shape[-1]\n  assert feature_depth % num_heads == 0\n  head_depth = feature_depth \n  nbatch = np.shape(q)[0]\n  \n  def SplitHeads(x):\n    return np.transpose(\n        np.reshape(x, (nbatch, -1, num_heads, head_depth)), (0, 2, 1, 3))\n  \n  def JoinHeads(x):  \n    return np.reshape(\n        np.transpose(x, (0, 2, 1, 3)), (nbatch, -1, num_heads*head_depth))\n  \n  return JoinHeads(\n      DotProductAttention(\n          SplitHeads(q), SplitHeads(k), SplitHeads(v), mask,\n          dropout=dropout, mode=mode, rng=rng))", "docstring": "Pure transformer-style multi-headed attention.\n\nArgs:\nx: inputs ((q, k, v), mask)\nparams: parameters (none)\nnum_heads: int: number of attention heads\ndropout: float: dropout rate\nmode: str: 'train' or 'eval'\n**kwargs: other arguments including the rng\n\nReturns:\nPure Multi-headed attention layer (no Dense transforms on input).", "source": "juraj-google-style"}
{"code": "def return_item_count_on_subpage(self, subpage=1, total_items=1):\n        \n        up_to_subpage = ((subpage - 1) * self.subpage_items)\n        \n\n        if total_items > up_to_subpage:\n            \n            \n            count = total_items - up_to_subpage\n        else:\n            count = total_items\n\n        if count >= self.subpage_items:\n            \n            \n            return self.subpage_items\n        else:\n            \n            \n            return count", "docstring": "Return the number of items on page.\n\nArgs:\n* page = The Page to test for\n* total_items = the total item count\n\nReturns:\n* Integer - Which represents the calculated number of items on page.", "source": "juraj-google-style"}
{"code": "def _tokens_to_subtoken(self, tokens):\n    ret = []\n    for token in tokens:\n        ret.extend(self._escaped_token_to_subtoken_strings(_escape_token(token, self._alphabet)))\n    return ret", "docstring": "Converts a list of tokens to a list of subtoken.\n\nArgs:\ntokens: a list of strings.\nReturns:\na list of integers in the range [0, vocab_size)", "source": "codesearchnet"}
{"code": "def get_file_metadata(root):\n    \n    properties = {}\n\n    file_author = getattr(root.find('fileAuthor'), 'text', False)\n    \n    if not file_author:\n        raise MissingElementError('fileAuthor')\n    else:\n        properties['file-authors'] = [{'name': file_author}]\n\n    \n    properties['file-version'] = 0\n\n    \n    properties['chemked-version'] = __version__\n\n    return properties", "docstring": "Read and parse ReSpecTh XML file metadata (file author, version, etc.)\n\nArgs:\nroot (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file\n\nReturns:\nproperties (`dict`): Dictionary with file metadata", "source": "juraj-google-style"}
{"code": "def _retrieve_info(self, http):\n        \n        if self.invalid:\n            info = _metadata.get_service_account_info(\n                http,\n                service_account=self.service_account_email or 'default')\n            self.invalid = False\n            self.service_account_email = info['email']\n            self.scopes = info['scopes']", "docstring": "Retrieves service account info for invalid credentials.\n\nArgs:\nhttp: an object to be used to make HTTP requests.", "source": "juraj-google-style"}
{"code": "def with_past(cls, config: 'PretrainedConfig', task: str='default') -> 'OnnxConfigWithPast':\n    return cls(config, task=task, use_past=True)", "docstring": "Instantiate a OnnxConfig with `use_past` attribute set to True\n\nArgs:\nconfig: The underlying model's config to use when exporting to ONNX\n\nReturns:\nOnnxConfig with `.use_past = True`", "source": "github-repos"}
{"code": "def print_fhir_to_json_string(fhir_proto: message.Message) -> str:\n    printer = _json_printer.JsonPrinter.compact_printer(_PRIMITIVE_HANDLER)\n    return printer.print(fhir_proto)", "docstring": "Returns a FHIR JSON representation with no spaces or newlines.\n\nArgs:\nfhir_proto: The proto to serialize into a JSON string.\n\nReturns:\nA FHIR JSON representation with no spaces or newlines.", "source": "github-repos"}
{"code": "def _cmd(self, command, uid=None):\n    if not uid:\n        uid = self.uid\n    self._client_send(json.dumps({'cmd': command, 'uid': uid}))\n    return self._client_receive()", "docstring": "Send a command to the server.\n\nArgs:\ncommand: str, The name of the command to execute.\nuid: int, the uid of the session to send the command to.\n\nReturns:\nThe line that was written back.", "source": "github-repos"}
{"code": "def ParseMessageRow(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = IMessageEventData()\n    event_data.attachment_location = self._GetRowValue(\n        query_hash, row, 'attachment_location')\n    event_data.imessage_id = self._GetRowValue(query_hash, row, 'imessage_id')\n    event_data.message_type = self._GetRowValue(query_hash, row, 'message_type')\n    event_data.offset = self._GetRowValue(query_hash, row, 'ROWID')\n    event_data.query = query\n    event_data.read_receipt = self._GetRowValue(query_hash, row, 'read_receipt')\n    event_data.service = self._GetRowValue(query_hash, row, 'service')\n    event_data.text = self._GetRowValue(query_hash, row, 'text')\n\n    timestamp = self._GetRowValue(query_hash, row, 'date')\n    date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_CREATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a message row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def topological_sort(g):\n\n    def _is_loop_edge(op):\n        \n        return op.type in ['NextIteration']\n\n    def _in_op_degree(op):\n        \n        count = 0\n        for op in op.control_inputs + [in_tensor.op for in_tensor in op.inputs]:\n            if not _is_loop_edge(op):\n                count += 1\n        return count\n    sorted_ops = []\n    op_in_degree = {op: _in_op_degree(op) for op in g.get_operations()}\n    frontier = [op for op, degree in op_in_degree.items() if degree == 0]\n    frontier.sort(key=lambda op: op.name)\n    while frontier:\n        op = frontier.pop()\n        sorted_ops.append(op)\n        if _is_loop_edge(op):\n            continue\n        consumers = list(op._control_outputs)\n        for out_tensor in op.outputs:\n            consumers += [consumer_op for consumer_op in out_tensor.consumers()]\n        consumers.sort(key=lambda op: op.name)\n        for consumer in consumers:\n            op_in_degree[consumer] -= 1\n            if op_in_degree[consumer] == 0:\n                frontier.append(consumer)\n            if op_in_degree[consumer] < 0:\n                raise ValueError('consumer:%s degree mismatch' % consumer.name)\n    left_ops = set((op for op, degree in op_in_degree.items() if degree > 0))\n    if left_ops:\n        return (True, left_ops)\n    else:\n        assert len(g.get_operations()) == len(sorted_ops)\n        return (False, sorted_ops)", "docstring": "Performs topological sort on the given graph.\n\nArgs:\ng: the graph.\n\nReturns:\nA pair where the first element indicates if the topological\nsort succeeded (True if there is no cycle found; False if a\ncycle is found) and the second element is either the sorted\nlist of nodes or the cycle of nodes found.", "source": "github-repos"}
{"code": "def frozen(cls: _Cls) -> _Cls:\n    if not isinstance(cls, type):\n        raise TypeError(f'{cls.__name__} is not')\n    cls.__init__ = _wrap_init(cls.__init__)\n    cls.__setattr__ = _wrap_setattr(cls.__setattr__)\n    return cls", "docstring": "Class decorator which prevent mutating attributes after `__init__`.\n\nExample:\n\n```python\n@epy.frozen\nclass A:\n\ndef __init__(self):\nself.x = 123\n\na = A()\na.x = 456  # AttributeError\n```\n\nSupports inheritance, child classes should explicitly be marked as\n`@epy.frozen` if they mutate additional attributes in `__init__`.\n\nArgs:\ncls: The class to freeze.\n\nReturns:\ncls: The class object", "source": "github-repos"}
{"code": "def _TrimNode(node, index, depth, flags):\n        \n        if depth == 1 or node.LeftChild is None:\n            return\n\n        if depth == 2:\n            if not flags[index * 2] and not flags[index * 2 + 1]:\n                node.LeftChild = None\n                node.RightChild = None\n\n        else:\n\n            MerkleTree._TrimNode(node.LeftChild, index * 2, depth - 1, flags)\n            MerkleTree._TrimNode(node.RightChild, index * 2, depth - 1, flags)\n\n            if node.LeftChild.LeftChild is None and node.RightChild.RightChild is None:\n                node.LeftChild = None\n                node.RightChild = None", "docstring": "Internal helper method to trim a node.\n\nArgs:\nnode (MerkleTreeNode):\nindex (int): flag index.\ndepth (int): node tree depth to start trim from.\nflags (bytearray): of left/right pairs. 1 byte for the left node, 1 byte for the right node.\n00 to erase, 11 to keep. Will keep the node if either left or right is not-0", "source": "juraj-google-style"}
{"code": "def build(self, query_shape, value_shape, key_shape=None):\n    key_shape = value_shape if key_shape is None else key_shape\n    if value_shape[1:-1] != key_shape[1:-1]:\n        raise ValueError(f'All dimensions of `value` and `key`, except the last one, must be equal. Received: value_shape={value_shape} and key_shape={key_shape}')\n    query_rank = len(query_shape)\n    value_rank = len(value_shape)\n    key_rank = len(key_shape)\n    einsum_equation, bias_axes, output_rank = _build_proj_equation(query_rank - 1, bound_dims=1, output_dims=2)\n    self._query_dense = EinsumDense(einsum_equation, output_shape=_get_output_shape(output_rank - 1, [self._num_heads, self._key_dim]), bias_axes=bias_axes if self._use_bias else None, name='query', **self._get_common_kwargs_for_sublayer())\n    self._query_dense.build(query_shape)\n    einsum_equation, bias_axes, output_rank = _build_proj_equation(key_rank - 1, bound_dims=1, output_dims=2)\n    self._key_dense = EinsumDense(einsum_equation, output_shape=_get_output_shape(output_rank - 1, [self._num_heads, self._key_dim]), bias_axes=bias_axes if self._use_bias else None, name='key', **self._get_common_kwargs_for_sublayer())\n    self._key_dense.build(key_shape)\n    einsum_equation, bias_axes, output_rank = _build_proj_equation(value_rank - 1, bound_dims=1, output_dims=2)\n    self._value_dense = EinsumDense(einsum_equation, output_shape=_get_output_shape(output_rank - 1, [self._num_heads, self._value_dim]), bias_axes=bias_axes if self._use_bias else None, name='value', **self._get_common_kwargs_for_sublayer())\n    self._value_dense.build(value_shape)\n    self._build_attention(output_rank)\n    self._output_dense = self._make_output_dense(query_shape, self._get_common_kwargs_for_sublayer(), 'attention_output')\n    output_dense_input_shape = list(self._query_dense.compute_output_shape(query_shape))\n    output_dense_input_shape[-1] = self._value_dim\n    self._output_dense.build(tuple(output_dense_input_shape))", "docstring": "Builds layers and variables.\n\nArgs:\nquery_shape: Shape of the `query` tensor.\nvalue_shape: Shape of the `value` tensor.\nkey: Optional shape of the `key` tensor.", "source": "github-repos"}
{"code": "def _WaitForStartup(self, deadline):\n    start = time.time()\n    sleep = 0.05\n\n    def Elapsed():\n        return (time.time() - start)\n    while True:\n        try:\n            (response, _) = self._http.request(self._host)\n            if (response.status == 200):\n                logging.info('emulator responded after %f seconds', Elapsed())\n                return True\n        except (socket.error, httplib.ResponseNotReady):\n            pass\n        if (Elapsed() >= deadline):\n            return False\n        else:\n            time.sleep(sleep)\n            sleep *= 2", "docstring": "Waits for the emulator to start.\n\nArgs:\ndeadline: deadline in seconds\n\nReturns:\nTrue if the emulator responds within the deadline, False otherwise.", "source": "codesearchnet"}
{"code": "def _build_statistics(self, input_batch, use_batch_stats, stat_dtype):\n    \n    \n    if self.MOVING_MEAN not in self._initializers:\n      self._initializers[self.MOVING_MEAN] = create_mean_initializer()\n    self._moving_mean = tf.get_variable(\n        \"moving_mean\",\n        dtype=stat_dtype,\n        shape=(self._num_channels,),\n        collections=[\n            tf.GraphKeys.MOVING_AVERAGE_VARIABLES,\n            tf.GraphKeys.GLOBAL_VARIABLES,\n        ],\n        initializer=self._initializers[self.MOVING_MEAN],\n        trainable=False)\n\n    if self.MOVING_VARIANCE not in self._initializers:\n      self._initializers[self.MOVING_VARIANCE] = create_variance_initializer()\n    self._moving_variance = tf.get_variable(\n        \"moving_variance\",\n        dtype=stat_dtype,\n        shape=(self._num_channels,),\n        collections=[\n            tf.GraphKeys.MOVING_AVERAGE_VARIABLES,\n            tf.GraphKeys.GLOBAL_VARIABLES,\n        ],\n        initializer=self._initializers[self.MOVING_VARIANCE],\n        trainable=False)\n\n    def build_batch_stats():\n      \n      mean, variance = tf.nn.moments(input_batch, self._axis,\n                                     keep_dims=True, name=\"normalize_moments\")\n\n      return mean, variance\n\n    def build_moving_stats():\n      \n      \n      \n      input_dtype = input_batch.dtype.base_dtype\n      if stat_dtype == input_dtype:\n        return (\n            tf.identity(self._moving_mean),\n            tf.identity(self._moving_variance),\n        )\n      else:\n        return (\n            tf.cast(self._moving_mean, input_dtype),\n            tf.cast(self._moving_variance, input_dtype),\n        )\n\n    mean, variance = utils.smart_cond(\n        use_batch_stats,\n        build_batch_stats,\n        build_moving_stats,\n    )\n\n    return mean, variance", "docstring": "Builds the statistics part of the graph when using moving variance.\n\nArgs:\ninput_batch: Input batch Tensor.\nuse_batch_stats: Boolean to indicate if batch statistics should be\ncalculated, otherwise moving averages are returned.\nstat_dtype: TensorFlow datatype to use for the moving mean and variance.\n\nReturns:\nTuple of (mean, variance), each of the same datatype as `input_batch`.", "source": "juraj-google-style"}
{"code": "def tf_step(self, time, variables, **kwargs):\n        \n        deltas = self.optimizer.step(time=time, variables=variables, **kwargs)\n\n        with tf.control_dependencies(control_inputs=deltas):\n            clipped_deltas = list()\n            exceeding_deltas = list()\n            for delta in deltas:\n                clipped_delta = tf.clip_by_value(\n                    t=delta,\n                    clip_value_min=-self.clipping_value,\n                    clip_value_max=self.clipping_value\n                )\n                clipped_deltas.append(clipped_delta)\n                exceeding_deltas.append(clipped_delta - delta)\n\n        applied = self.apply_step(variables=variables, deltas=exceeding_deltas)\n\n        with tf.control_dependencies(control_inputs=(applied,)):\n            return [delta + 0.0 for delta in clipped_deltas]", "docstring": "Creates the TensorFlow operations for performing an optimization step.\n\nArgs:\ntime: Time tensor.\nvariables: List of variables to optimize.\n**kwargs: Additional arguments passed on to the internal optimizer.\n\nReturns:\nList of delta tensors corresponding to the updates for each optimized variable.", "source": "juraj-google-style"}
{"code": "def add(self, X):\n        \n        for each in X:\n            self.dpp_vector[each] = X[each]\n        self.fit(self.dpp_vector.reshape(1, -1))", "docstring": "Add data about known pipeline and scores.\n\nUpdates ``dpp_vector`` and refits model with all data.\n\nArgs:\nX (dict): mapping of pipeline indices to scores. Keys must correspond to the index of a\ncolumn in ``dpp_matrix`` and values are the corresponding score for pipeline on\nthe dataset.", "source": "juraj-google-style"}
{"code": "def encoder(self, inputs, n_layers=3):\n    latent_dims = self.hparams.z_dim\n    shape_as_list = inputs.shape.as_list()\n    if (len(shape_as_list) != 5):\n        raise ValueError(('Expected inputs to be a 5-D, got %d' % len(shape_as_list)))\n    if (inputs.dtype != tf.float32):\n        raise ValueError(('Expected dtype tf.float32, got %s' % inputs.dtype))\n    (batch_size, _) = shape_as_list[:2]\n    inputs = tf.reshape(inputs, ([(- 1)] + list(inputs.shape)[2:]))\n    n_filters = 64\n    rectified = None\n    padding = [[0, 0], [1, 1], [1, 1], [0, 0]]\n    for i in range(n_layers):\n        with tf.variable_scope(('layer_%d' % (i + 1))):\n            n_filters *= (2 ** i)\n            if i:\n                padded = tf.pad(rectified, padding)\n            else:\n                padded = tf.pad(inputs, padding)\n            convolved = tf.layers.conv2d(padded, filters=n_filters, kernel_size=4, strides=2, padding='VALID')\n            normalized = tf.contrib.layers.instance_norm(convolved)\n            rectified = tf.nn.leaky_relu(normalized, alpha=0.2)\n    pooled = tf.nn.avg_pool(rectified, (([1] + rectified.shape[1:3].as_list()) + [1]), strides=[1, 1, 1, 1], padding='VALID')\n    squeezed = tf.squeeze(pooled, [1, 2])\n    with tf.variable_scope('z_mu'):\n        z_mu = tf.layers.dense(squeezed, latent_dims)\n    with tf.variable_scope('z_log_sigma_sq'):\n        z_log_var = tf.layers.dense(squeezed, latent_dims)\n        z_log_var = tf.clip_by_value(z_log_var, (- 10), 10)\n    z_mu = tf.reshape(z_mu, (batch_size, (- 1), latent_dims))\n    z_log_var = tf.reshape(z_log_var, (batch_size, (- 1), latent_dims))\n    return (z_mu, z_log_var)", "docstring": "Convnet that encodes inputs into mean and std of a gaussian.\n\nArgs:\ninputs: 5-D Tensor, shape (batch_size, num_frames, width, height, channels)\nn_layers: Number of layers.\n\nReturns:\nz_mu: Mean of the latent gaussians.\nz_log_var: log(var) of the latent gaussians.\n\nRaises:\nValueError: If inputs is not a 5-D tensor or not float32.", "source": "codesearchnet"}
{"code": "def _dataset_merge_filestore_newresource(self, new_resource, ignore_fields, filestore_resources):\n    new_resource.check_required_fields(ignore_fields=ignore_fields)\n    self.resources.append(new_resource)\n    if new_resource.get_file_to_upload():\n        filestore_resources.append(new_resource)\n        new_resource['url'] = Dataset.temporary_url", "docstring": "Helper method to add new resource from dataset including filestore.\n\nArgs:\nnew_resource (hdx.data.Resource): New resource from dataset\nignore_fields (List[str]): List of fields to ignore when checking resource\nfilestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to)\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def __new__(cls, *args, **kwargs):\n        \n        self = super(cls, FrameSet).__new__(cls, *args, **kwargs)\n        return self", "docstring": "Initialize the :class:`FrameSet` object.\n\nArgs:\nfrange (str or :class:`FrameSet`): the frame range as a string (ie \"1-100x5\")\n\nRaises:\n:class:`.ParseException`: if the frame range\n(or a portion of it) could not be parsed.\n:class:`fileseq.exceptions.MaxSizeException`: if the range exceeds\n`fileseq.constants.MAX_FRAME_SIZE`", "source": "juraj-google-style"}
{"code": "def scale(self, new_volume: float) -> 'Lattice':\n    versors = (self.matrix / self.abc)\n    geo_factor = abs(dot(np.cross(versors[0], versors[1]), versors[2]))\n    ratios = (np.array(self.abc) / self.c)\n    new_c = ((new_volume / (geo_factor * np.prod(ratios))) ** (1 / 3.0))\n    return Lattice((versors * (new_c * ratios)))", "docstring": "Return a new Lattice with volume new_volume by performing a\nscaling of the lattice vectors so that length proportions and angles\nare preserved.\n\nArgs:\nnew_volume:\nNew volume to scale to.\n\nReturns:\nNew lattice with desired volume.", "source": "codesearchnet"}
{"code": "def More(contents, out, prompt=None, check_pager=True):\n    if not IsInteractive(output=True):\n        out.write(contents)\n        return\n    if check_pager:\n        pager = encoding.GetEncodedValue(os.environ, 'PAGER', None)\n        if pager == '-':\n            pager = None\n        elif not pager:\n            for command in ('less', 'pager'):\n                if files.FindExecutableOnPath(command):\n                    pager = command\n                    break\n        if pager:\n            less_orig = encoding.GetEncodedValue(os.environ, 'LESS', None)\n            less = '-R' + (less_orig or '')\n            encoding.SetEncodedValue(os.environ, 'LESS', less)\n            signal.signal(signal.SIGINT, signal.SIG_IGN)\n            p = subprocess.Popen(pager, stdin=subprocess.PIPE, shell=True)\n            enc = console_attr.GetConsoleAttr().GetEncoding()\n            p.communicate(input=contents.encode(enc))\n            p.wait()\n            signal.signal(signal.SIGINT, signal.SIG_DFL)\n            if less_orig is None:\n                encoding.SetEncodedValue(os.environ, 'LESS', None)\n            return\n    console_pager.Pager(contents, out, prompt).Run()", "docstring": "Run a user specified pager or fall back to the internal pager.\n\nArgs:\ncontents: The entire contents of the text lines to page.\nout: The output stream.\nprompt: The page break prompt.\ncheck_pager: Checks the PAGER env var and uses it if True.", "source": "github-repos"}
{"code": "def __init__(self, filesystem, os_module=None):\n        \n        self.filesystem = filesystem\n        self._os_path = self._OS_PATH_COPY\n        if os_module is None:\n            warnings.warn(FAKE_PATH_MODULE_DEPRECATION, DeprecationWarning,\n                          stacklevel=2)\n        self._os_path.os = self.os = os_module\n        self.sep = self.filesystem.path_separator\n        self.altsep = self.filesystem.alternative_path_separator", "docstring": "Init.\n\nArgs:\nfilesystem: FakeFilesystem used to provide file system information\nos_module: (deprecated) FakeOsModule to assign to self.os", "source": "juraj-google-style"}
{"code": "def _deduplicate_indexed_slices(values, indices):\n    \n    unique_indices, new_index_positions = tf.unique(indices)\n    summed_values = tf.unsorted_segment_sum(values,\n                                            new_index_positions,\n                                            tf.shape(unique_indices)[0])\n    return (summed_values, unique_indices)", "docstring": "Sums `values` associated with any non-unique `indices`.\nArgs:\nvalues: A `Tensor` with rank >= 1.\nindices: A one-dimensional integer `Tensor`, indexing into the first\ndimension of `values` (as in an IndexedSlices object).\nReturns:\nA tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a\nde-duplicated version of `indices` and `summed_values` contains the sum of\n`values` slices associated with each unique index.", "source": "juraj-google-style"}
{"code": "def get_rows_to_keep(gctoo, rid=None, row_bool=None, ridx=None, exclude_rid=None):\n    if (rid is not None):\n        assert (type(rid) == list), 'rid must be a list. rid: {}'.format(rid)\n        rows_to_keep = [gctoo_row for gctoo_row in gctoo.data_df.index if (gctoo_row in rid)]\n        num_missing_rids = (len(rid) - len(rows_to_keep))\n        if (num_missing_rids != 0):\n            logger.info('{} rids were not found in the GCT.'.format(num_missing_rids))\n    elif (row_bool is not None):\n        assert (len(row_bool) == gctoo.data_df.shape[0]), ('row_bool must have length equal to gctoo.data_df.shape[0]. ' + 'len(row_bool): {}, gctoo.data_df.shape[0]: {}'.format(len(row_bool), gctoo.data_df.shape[0]))\n        rows_to_keep = gctoo.data_df.index[row_bool].values\n    elif (ridx is not None):\n        assert (type(ridx[0]) is int), ('ridx must be a list of integers. ridx[0]: {}, ' + 'type(ridx[0]): {}').format(ridx[0], type(ridx[0]))\n        assert (max(ridx) <= gctoo.data_df.shape[0]), ('ridx contains an integer larger than the number of rows in ' + 'the GCToo. max(ridx): {}, gctoo.data_df.shape[0]: {}').format(max(ridx), gctoo.data_df.shape[0])\n        rows_to_keep = gctoo.data_df.index[ridx].values\n    else:\n        rows_to_keep = gctoo.data_df.index.values\n    if (exclude_rid is not None):\n        rows_to_keep = [row_to_keep for row_to_keep in rows_to_keep if (row_to_keep not in exclude_rid)]\n    return rows_to_keep", "docstring": "Figure out based on the possible row inputs which rows to keep.\n\nArgs:\ngctoo (GCToo object):\nrid (list of strings):\nrow_bool (boolean array):\nridx (list of integers):\nexclude_rid (list of strings):\n\nReturns:\nrows_to_keep (list of strings): row ids to be kept", "source": "codesearchnet"}
{"code": "def attention_lm_attention_moe_tiny():\n    hparams = attention_lm_moe_small()\n    hparams.moe_layers = ''\n    hparams.attention_num_experts = 128\n    hparams.filter_size = 8192\n    hparams.attention_type = AttentionType.LOCAL_EXPERTS\n    return hparams", "docstring": "Cheap model for debugging.\n\nReturns:\nan hparams object.", "source": "codesearchnet"}
{"code": "def _parse(json_str: str, primitive_cls: Type[DateTime], *, default_timezone: str) -> DateTime:\n    try:\n        dt = datetime.datetime.strptime(json_str, '%Y')\n        return _primitive_time_utils.build_date_like(dt, default_timezone, _primitive_time_utils.DateTimePrecision.YEAR, primitive_cls)\n    except ValueError:\n        pass\n    try:\n        dt = datetime.datetime.strptime(json_str, '%Y-%m')\n        return _primitive_time_utils.build_date_like(dt, default_timezone, _primitive_time_utils.DateTimePrecision.MONTH, primitive_cls)\n    except ValueError:\n        pass\n    try:\n        dt = datetime.datetime.strptime(json_str, '%Y-%m-%d')\n        return _primitive_time_utils.build_date_like(dt, default_timezone, _primitive_time_utils.DateTimePrecision.DAY, primitive_cls)\n    except ValueError:\n        pass\n    datetime_str, timezone_str = _primitive_time_utils.split_timezone(json_str)\n    try:\n        dt = datetime.datetime.strptime(datetime_str, '%Y-%m-%dT%H:%M:%S')\n        return _primitive_time_utils.build_date_like(dt, timezone_str, _primitive_time_utils.DateTimePrecision.SECOND, primitive_cls)\n    except ValueError:\n        pass\n    try:\n        dt = datetime.datetime.strptime(datetime_str, '%Y-%m-%dT%H:%M:%S.%f')\n        if _primitive_time_utils.PRECISION_PATTERN_MILLISECOND.search(datetime_str) is not None:\n            return _primitive_time_utils.build_date_like(dt, timezone_str, _primitive_time_utils.DateTimePrecision.MILLISECOND, primitive_cls)\n        elif _primitive_time_utils.PRECISION_PATTERN_MICROSECOND.search(datetime_str) is not None:\n            return _primitive_time_utils.build_date_like(dt, timezone_str, _primitive_time_utils.DateTimePrecision.MICROSECOND, primitive_cls)\n    except ValueError:\n        pass\n    raise fhir_errors.InvalidFhirError('Invalid DateTime.')", "docstring": "Parses the json_str into a DateTime FHIR primitive.\n\nArgs:\njson_str: The raw JSON string to parse.\nprimitive_cls: The FHIR primitive to parse into.\ndefault_timezone: The default timezone to use when parsing in the event that\nno timezone information is present.\n\nReturns:\nA FHIR primitive DateTime.\n\nRaises:\nfhir_errors.InvalidFhirError: In the event that no FHIR primitive DateTime\nformat was able to properly parse the json_str.", "source": "github-repos"}
{"code": "def merge_level_and_latent_dist(level_dist, latent_dist,\n                                merge_std=\"prev_level\"):\n  \n  level_mean, level_std = level_dist.loc, level_dist.scale\n  latent_mean, latent_std = latent_dist.loc, latent_dist.scale\n  new_mean = level_mean + latent_mean\n  if merge_std == \"normal\":\n    z_shape = common_layers.shape_list(latent_mean)\n    log_scale = tf.get_variable(\n        \"merge_std\", shape=z_shape, dtype=tf.float32,\n        initializer=tf.zeros_initializer(), trainable=False)\n    scale = tf.exp(log_scale * 3.0)\n  elif merge_std == \"prev_level\":\n    scale = level_std\n  elif merge_std == \"prev_step\":\n    scale = latent_std\n  return tfp.distributions.Normal(loc=new_mean, scale=scale)", "docstring": "Merge level_dist and latent_dist.\n\nnew_dist ~ N(level_dist.mean + latent_dis.mean, std) where std is determined\naccording to merge_std.\n\nArgs:\nlevel_dist: instance of tfp.distributions.Normal\nlatent_dist: instance of tfp.distributions.Normal\nmerge_std: can be \"prev_level\", \"prev_step\" or \"normal\".\nReturns:\nmerged_dist: instance of tfp.distributions.Normal", "source": "juraj-google-style"}
{"code": "def create_ingress_rule(self, app, rule):\n        \n        if isinstance(rule, dict):\n            \n            start_port = rule.get('start_port')\n            end_port = rule.get('end_port')\n            protocol = rule.get('protocol', 'tcp')\n\n            requested_cross_account = rule.get('env', self.env)\n            if self.env == requested_cross_account:\n                \n                \n                cross_account_env = None\n                cross_account_vpc_id = None\n            else:\n                cross_account_env = requested_cross_account\n                cross_account_vpc_id = get_vpc_id(cross_account_env, self.region)\n\n        else:\n            start_port = rule\n            end_port = rule\n            protocol = 'tcp'\n            cross_account_env = None\n            cross_account_vpc_id = None\n\n        created_rule = {\n            'app': app,\n            'start_port': start_port,\n            'end_port': end_port,\n            'protocol': protocol,\n            'cross_account_env': cross_account_env,\n            'cross_account_vpc_id': cross_account_vpc_id\n        }\n        self.log.debug('Normalized ingress rule: %s', created_rule)\n        return created_rule", "docstring": "Create a normalized ingress rule.\n\nArgs:\napp (str): Application name\nrule (dict or int): Allowed Security Group ports and protocols.\n\nReturns:\ndict: Contains app, start_port, end_port, protocol, cross_account_env and cross_account_vpc_id", "source": "juraj-google-style"}
{"code": "def get_course_id(self, course_uuid):\n        \n        course_data = self.get(\n            'courseguide/course?uuid={uuid}'.format(\n                uuid=course_uuid or self.course_id\n            ),\n            params=None\n        )\n        try:\n            return course_data['response']['docs'][0]['id']\n        except KeyError:\n            failure_message = ('KeyError in get_course_id - '\n                               'got {0}'.format(course_data))\n            log.exception(failure_message)\n            raise PyLmodUnexpectedData(failure_message)\n        except TypeError:\n            failure_message = ('TypeError in get_course_id - '\n                               'got {0}'.format(course_data))\n            log.exception(failure_message)\n            raise PyLmodUnexpectedData(failure_message)", "docstring": "Get course id based on uuid.\n\nArgs:\nuuid (str): course uuid, i.e. /project/mitxdemosite\n\nRaises:\nPyLmodUnexpectedData: No course data was returned.\nrequests.RequestException: Exception connection error\n\nReturns:\nint: numeric course id", "source": "juraj-google-style"}
{"code": "def backfill_previous_messages(self, reverse=False, limit=10):\n        \n        res = self.client.api.get_room_messages(self.room_id, self.prev_batch,\n                                                direction=\"b\", limit=limit)\n        events = res[\"chunk\"]\n        if not reverse:\n            events = reversed(events)\n        for event in events:\n            self._put_event(event)", "docstring": "Backfill handling of previous messages.\n\nArgs:\nreverse (bool): When false messages will be backfilled in their original\norder (old to new), otherwise the order will be reversed (new to old).\nlimit (int): Number of messages to go back.", "source": "juraj-google-style"}
{"code": "class AqlmConfig(QuantizationConfigMixin):\n\n    def __init__(self, in_group_size: int=8, out_group_size: int=1, num_codebooks: int=1, nbits_per_codebook: int=16, linear_weights_not_to_quantize: Optional[List[str]]=None, **kwargs):\n        self.quant_method = QuantizationMethod.AQLM\n        self.in_group_size = in_group_size\n        self.out_group_size = out_group_size\n        self.num_codebooks = num_codebooks\n        self.nbits_per_codebook = nbits_per_codebook\n        self.linear_weights_not_to_quantize = linear_weights_not_to_quantize\n        self.post_init()\n\n    def post_init(self):\n        \n        if not isinstance(self.in_group_size, int):\n            raise TypeError('in_group_size must be a float')\n        if not isinstance(self.out_group_size, int):\n            raise TypeError('out_group_size must be a float')\n        if not isinstance(self.num_codebooks, int):\n            raise TypeError('num_codebooks must be a float')\n        if not isinstance(self.nbits_per_codebook, int):\n            raise TypeError('nbits_per_codebook must be a float')\n        if self.linear_weights_not_to_quantize is not None and (not isinstance(self.linear_weights_not_to_quantize, list)):\n            raise ValueError('linear_weights_not_to_quantize must be a list of strings')\n        if self.linear_weights_not_to_quantize is None:\n            self.linear_weights_not_to_quantize = []", "docstring": "This is a wrapper class about `aqlm` parameters.\n\nArgs:\nin_group_size (`int`, *optional*, defaults to 8):\nThe group size along the input dimension.\nout_group_size (`int`, *optional*, defaults to 1):\nThe group size along the output dimension. It's recommended to always use 1.\nnum_codebooks (`int`, *optional*, defaults to 1):\nNumber of codebooks for the Additive Quantization procedure.\nnbits_per_codebook (`int`, *optional*, defaults to 16):\nNumber of bits encoding a single codebook vector. Codebooks size is 2**nbits_per_codebook.\nlinear_weights_not_to_quantize (`Optional[List[str]]`, *optional*):\nList of full paths of `nn.Linear` weight parameters that shall not be quantized.\nkwargs (`Dict[str, Any]`, *optional*):\nAdditional parameters from which to initialize the configuration object.", "source": "github-repos"}
{"code": "def load_attributes_from_hdf5_group(group, name):\n    if name in group.attrs:\n        data = [n.decode('utf8') if hasattr(n, 'decode') else n for n in group.attrs[name]]\n    else:\n        data = []\n        chunk_id = 0\n        while '%s%d' % (name, chunk_id) in group.attrs:\n            data.extend([n.decode('utf8') if hasattr(n, 'decode') else n for n in group.attrs['%s%d' % (name, chunk_id)]])\n            chunk_id += 1\n    return data", "docstring": "Loads attributes of the specified name from the HDF5 group.\n\nThis method deals with an inherent problem\nof HDF5 file which is not able to store\ndata larger than HDF5_OBJECT_HEADER_LIMIT bytes.\n\nArgs:\ngroup: A pointer to a HDF5 group.\nname: A name of the attributes to load.\n\nReturns:\ndata: Attributes data.", "source": "github-repos"}
{"code": "def replace_dimensions(cls, dimensions, overrides):\n        \n        from .dimension import Dimension\n        \n        replaced = []\n        for d in dimensions:\n            if d.name in overrides:\n                override = overrides[d.name]\n            elif d.label in overrides:\n                override = overrides[d.label]\n            else:\n                override = None\n\n            if override is None:\n                replaced.append(d)\n            elif isinstance(override, (util.basestring, tuple)):\n                replaced.append(d.clone(override))\n            elif isinstance(override, Dimension):\n                replaced.append(override)\n            elif isinstance(override, dict):\n                replaced.append(d.clone(override.get('name',None),\n                                        **{k:v for k,v in override.items() if k != 'name'}))\n            else:\n                raise ValueError('Dimension can only be overridden '\n                                 'with another dimension or a dictionary '\n                                 'of attributes')\n        return replaced", "docstring": "Replaces dimensions in list with dictionary of overrides.\n\nArgs:\ndimensions: List of dimensions\noverrides: Dictionary of dimension specs indexed by name\n\nReturns:\nlist: List of dimensions with replacements applied", "source": "juraj-google-style"}
{"code": "def load_dict(self, data, overwrite=False, auto_load_model=True):\n    for (k, v) in data.items():\n        if ((k not in self._elements.keys()) and (not auto_load_model)):\n            raise AttributeError('Model {} is not loaded'.format(k))\n        elif ((k not in self._elements.keys()) and auto_load_model):\n            self._load_model(k)\n        attr = getattr(self, k)\n        _load_dict(attr, v)", "docstring": "Load a dictionary into the model.\n\nArgs:\ndata(dict): Dictionary to load\noverwrite(bool): Whether the data present in the model should be overwritten by the\ndata in the dict or not.\nauto_load_model(bool): If set to true models will be loaded as they are needed\n\nExamples:\n\n>>> vlans_dict = {\n>>>     \"vlans\": { \"vlan\": { 100: {\n>>>                             \"config\": {\n>>>                                 \"vlan_id\": 100, \"name\": \"production\"}},\n>>>                          200: {\n>>>                             \"config\": {\n>>>                                 \"vlan_id\": 200, \"name\": \"dev\"}}}}}\n>>> config.load_dict(vlans_dict)\n>>> print(config.vlans.vlan.keys())\n... [200, 100]\n>>> print(100, config.vlans.vlan[100].config.name)\n... (100, u'production')\n>>> print(200, config.vlans.vlan[200].config.name)\n... (200, u'dev')", "source": "codesearchnet"}
{"code": "def plot_clicked(self, mouse_event):\n        \n        if isinstance(self.current_script, SelectPoints) and self.current_script.is_running:\n            if (not (mouse_event.xdata == None)):\n                if (mouse_event.button == 1):\n                    pt = np.array([mouse_event.xdata, mouse_event.ydata])\n                    self.current_script.toggle_NV(pt)\n                    self.current_script.plot([self.matplotlibwidget_1.figure])\n                    self.matplotlibwidget_1.draw()\n\n        item = self.tree_scripts.currentItem()\n\n        if item is not None:\n            if item.is_point():\n                item_x = item.child(1)\n                if mouse_event.xdata is not None:\n                    self.tree_scripts.setCurrentItem(item_x)\n                    item_x.value = float(mouse_event.xdata)\n                    item_x.setText(1, '{:0.3f}'.format(float(mouse_event.xdata)))\n                item_y = item.child(0)\n                if mouse_event.ydata is not None:\n                    self.tree_scripts.setCurrentItem(item_y)\n                    item_y.value = float(mouse_event.ydata)\n                    item_y.setText(1, '{:0.3f}'.format(float(mouse_event.ydata)))\n\n                \n                self.tree_scripts.setCurrentItem(item)\n            else:\n                if item.parent() is not None:\n                    if item.parent().is_point():\n                        if item == item.parent().child(1):\n                            if mouse_event.xdata is not None:\n                                item.setData(1, 2, float(mouse_event.xdata))\n                        if item == item.parent().child(0):\n                            if mouse_event.ydata is not None:\n                                item.setData(1, 2, float(mouse_event.ydata))", "docstring": "gets activated when the user clicks on a plot\nArgs:\nmouse_event:", "source": "juraj-google-style"}
{"code": "def _GetTimeElementsTuple(self, structure):\n    \n    month, day, hours, minutes, seconds = structure.date_time\n\n    \n    \n    month = timelib.MONTH_DICT.get(month.lower(), 0)\n\n    if month != 0 and month < self._last_month:\n      \n      self._year_use += 1\n\n    return (self._year_use, month, day, hours, minutes, seconds)", "docstring": "Retrieves a time elements tuple from the structure.\n\nArgs:\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.\n\nReturns:\ntuple: containing:\nyear (int): year.\nmonth (int): month, where 1 represents January.\nday_of_month (int): day of month, where 1 is the first day of the month.\nhours (int): hours.\nminutes (int): minutes.\nseconds (int): seconds.", "source": "juraj-google-style"}
{"code": "def _run_dnb_normalization(self, dnb_data, sza_data):\n    dnb_data = xr.DataArray(dnb_data, dims=('y', 'x'))\n    sza_data = xr.DataArray(sza_data, dims=('y', 'x'))\n    good_mask = (~ (dnb_data.isnull() | sza_data.isnull()))\n    output_dataset = dnb_data.where(good_mask)\n    output_dataset = output_dataset.values.copy()\n    dnb_data = dnb_data.values\n    sza_data = sza_data.values\n    (day_mask, mixed_mask, night_mask) = make_day_night_masks(sza_data, good_mask.values, self.high_angle_cutoff, self.low_angle_cutoff, stepsDegrees=self.mixed_degree_step)\n    did_equalize = False\n    if day_mask.any():\n        LOG.debug('Histogram equalizing DNB day data...')\n        histogram_equalization(dnb_data, day_mask, out=output_dataset)\n        did_equalize = True\n    if mixed_mask:\n        for mask in mixed_mask:\n            if mask.any():\n                LOG.debug('Histogram equalizing DNB mixed data...')\n                histogram_equalization(dnb_data, mask, out=output_dataset)\n                did_equalize = True\n    if night_mask.any():\n        LOG.debug('Histogram equalizing DNB night data...')\n        histogram_equalization(dnb_data, night_mask, out=output_dataset)\n        did_equalize = True\n    if (not did_equalize):\n        raise RuntimeError('No valid data found to histogram equalize')\n    return output_dataset", "docstring": "Scale the DNB data using a histogram equalization method.\n\nArgs:\ndnb_data (ndarray): Day/Night Band data array\nsza_data (ndarray): Solar Zenith Angle data array", "source": "codesearchnet"}
{"code": "def run(cmd: str,\n        *paths: str,\n        cwd: str = '.',\n        mute: bool = False,\n        filters: typing.Optional[typing.Union[typing.Iterable[str], str]] = None,\n        failure_ok: bool = False,\n        timeout: float = _DEFAULT_PROCESS_TIMEOUT,\n        ) -> typing.Tuple[str, int]:\n    \n\n    filters = _sanitize_filters(filters)\n\n    exe_path, args_list = _parse_cmd(cmd, *paths)\n\n    context = RunContext(  \n        exe_path=exe_path,\n        capture=sarge.Capture(),\n        failure_ok=failure_ok,\n        mute=mute,\n        args_list=args_list,\n        paths=paths,\n        cwd=cwd,\n        timeout=timeout,\n        filters=filters,\n    )\n\n    if mute:\n        context.result_buffer += f'{context.cmd_as_string}'\n    else:\n        _LOGGER_PROCESS.info('%s: running', context.cmd_as_string)\n\n    context.start_process()\n    monitor_running_process(context)\n    check_error(context)\n\n    return context.process_output_as_str, context.return_code", "docstring": "Executes a command and returns the result\n\nArgs:\ncmd: command to execute\npaths: paths to search executable in\ncwd: working directory (defaults to \".\")\nmute: if true, output will not be printed\nfilters: gives a list of partial strings to filter out from the output (stdout or stderr)\nfailure_ok: if False (default), a return code different than 0 will exit the application\ntimeout: sub-process timeout\n\nReturns: command output", "source": "juraj-google-style"}
{"code": "def load(filename, instruments = None):\n        \n        with open(filename, 'r') as infile:\n            dataPickle = infile.read()\n\n        script_as_dict = pickle.loads(dataPickle)\n        script_class = script_as_dict['_script_class']\n\n        script_instance, _, updated_instruments = Script.load_and_append({'script': script_class}, instruments = instruments)\n        script_instance = script_instance['script']\n\n        \n        instruments = script_instance._instruments\n\n        \n        script_instance.__dict__ = script_as_dict\n\n        \n        script_instance._instruments = instruments\n\n        return script_instance, updated_instruments", "docstring": "loads an script instance using pickle\nArgs:\nfilename: source filename\ninstruments:\noptional - only needed if script requires instruments\ndictionary of form\n\ninstruments = {\nname_of_instrument_1 : instance_of_instrument_1,\nname_of_instrument_2 : instance_of_instrument_2,\n...\n}\nReturns:\nscript_instance\nupdated_instruments", "source": "juraj-google-style"}
{"code": "def gff3_verifier(entries, line=None):\n    regex = ('^[a-zA-Z0-9.:^*$@!+_?-|]+\\\\t.+\\\\t.+\\\\t\\\\d+\\\\t\\\\d+\\\\t' + '\\\\d*\\\\.?\\\\d*\\\\t[+-.]\\\\t[.0-2]\\\\t.+{0}$'.format(os.linesep))\n    delimiter = '\\\\t'\n    for entry in entries:\n        try:\n            entry_verifier([entry.write()], regex, delimiter)\n        except FormatError as error:\n            if line:\n                intro = 'Line {0}'.format(str(line))\n            elif (error.part == 0):\n                intro = 'Entry with source {0}'.format(entry.source)\n            else:\n                intro = 'Entry with Sequence ID {0}'.format(entry.seqid)\n            if (error.part == 0):\n                msg = '{0} has no Sequence ID'.format(intro)\n            elif (error.part == 1):\n                msg = '{0} has no source'.format(intro)\n            elif (error.part == 2):\n                msg = '{0} has non-numerical characters in type'.format(intro)\n            elif (error.part == 3):\n                msg = '{0} has non-numerical characters in start position'.format(intro)\n            elif (error.part == 4):\n                msg = '{0} has non-numerical characters in end position'.format(intro)\n            elif (error.part == 5):\n                msg = '{0} has non-numerical characters in score'.format(intro)\n            elif (error.part == 6):\n                msg = '{0} strand not in [+-.]'.format(intro)\n            elif (error.part == 7):\n                msg = '{0} phase not in [.0-2]'.format(intro)\n            elif (error.part == 8):\n                msg = '{0} has no attributes'.format(intro)\n            else:\n                msg = 'Unknown Error: Likely a Bug'\n            raise FormatError(message=msg)\n        if line:\n            line += 1", "docstring": "Raises error if invalid GFF3 format detected\n\nArgs:\nentries (list): A list of GFF3Entry instances\n\nline (int): Line number of first entry\n\nRaises:\nFormatError: Error when GFF3 format incorrect with descriptive message", "source": "codesearchnet"}
{"code": "def __init__(self, *,  \n                 picos: Union[int, float] = 0,\n                 nanos: Union[int, float] = 0) -> None:\n        \n\n        if picos and nanos:\n            self._picos = picos + nanos * 1000\n        else:\n            \n            self._picos = nanos * 1000 if nanos else picos", "docstring": "Initializes a Timestamp with a time specified in ns and/or ps.\n\nThe time is relative to some unspecified \"time zero\". If both picos and\nnanos are specified, their contributions away from zero are added.\n\nArgs:\npicos: How many picoseconds away from time zero?\nnanos: How many nanoseconds away from time zero?", "source": "juraj-google-style"}
{"code": "def _kl_chi_chi(a, b, name=None):\n  \n  with tf.name_scope(name or \"kl_chi_chi\"):\n    \n    \n    \n    \n    return (0.5 * tf.math.digamma(0.5 * a.df) * (a.df - b.df) +\n            tf.math.lgamma(0.5 * b.df) - tf.math.lgamma(0.5 * a.df))", "docstring": "Calculate the batched KL divergence KL(a || b) with a and b Chi.\n\nArgs:\na: instance of a Chi distribution object.\nb: instance of a Chi distribution object.\nname: (optional) Name to use for created operations.\ndefault is \"kl_chi_chi\".\n\nReturns:\nBatchwise KL(a || b)", "source": "juraj-google-style"}
{"code": "def parse_header(line):\n    \n    if not line or line == \"\\r\\n\":\n        return None\n    if line[0] in \" \\t\":\n        return line[1:].rstrip()\n    name, value = line.split(\":\", 1)\n    return (name.strip(), value.strip())", "docstring": "Parse a header line.\n\nArgs:\nline: A header line as a string.\n\nReturns:\nNone if end of headers is found. A string giving the continuation line\nif a continuation is found. A tuple of name, value when a header line is\nfound.\n\nRaises:\nValueError: If the line cannot be parsed as a header.", "source": "juraj-google-style"}
{"code": "def _build_trial_meta(cls, expr_dir):\n    meta_file = os.path.join(expr_dir, EXPR_META_FILE)\n    meta = parse_json(meta_file)\n    if (not meta):\n        job_id = expr_dir.split('/')[(- 2)]\n        trial_id = expr_dir[(- 8):]\n        params = parse_json(os.path.join(expr_dir, EXPR_PARARM_FILE))\n        meta = {'trial_id': trial_id, 'job_id': job_id, 'status': 'RUNNING', 'type': 'TUNE', 'start_time': os.path.getctime(expr_dir), 'end_time': None, 'progress_offset': 0, 'result_offset': 0, 'params': params}\n    if (not meta.get('start_time', None)):\n        meta['start_time'] = os.path.getctime(expr_dir)\n    if isinstance(meta['start_time'], float):\n        meta['start_time'] = timestamp2date(meta['start_time'])\n    if meta.get('end_time', None):\n        meta['end_time'] = timestamp2date(meta['end_time'])\n    meta['params'] = parse_json(os.path.join(expr_dir, EXPR_PARARM_FILE))\n    return meta", "docstring": "Build meta file for trial.\n\nArgs:\nexpr_dir (str): Directory path of the experiment.\n\nReturn:\nA dict of trial meta info.", "source": "codesearchnet"}
{"code": "def checksum(self, path):\n    try:\n        return s3io.S3IO(options=self._options).checksum(path)\n    except Exception as e:\n        raise BeamIOError('Checksum operation failed', {path: e})", "docstring": "Fetch checksum metadata of a file on the\n:class:`~apache_beam.io.filesystem.FileSystem`.\n\nArgs:\npath: string path of a file.\n\nReturns: string containing checksum\n\nRaises:\n``BeamIOError``: if path isn't a file or doesn't exist.", "source": "github-repos"}
{"code": "def append(self, item):\n        \n        if isinstance(item, list):\n            self.extend(item)\n        elif issubclass(item.__class__, self._pyof_class):\n            list.append(self, item)\n        else:\n            raise exceptions.WrongListItemType(item.__class__.__name__,\n                                               self._pyof_class.__name__)", "docstring": "Append one item to the list.\n\nArgs:\nitem: Item to be appended. Its type must match the one defined in\nthe constructor.\n\nRaises:\n:exc:`~.exceptions.WrongListItemType`: If the item has a different\ntype than the one specified in the constructor.", "source": "juraj-google-style"}
{"code": "def with_min_memory(self, min_bytes=0, min_peak_bytes=0, min_residual_bytes=0, min_output_bytes=0):\n    self._options['min_bytes'] = min_bytes\n    self._options['min_peak_bytes'] = min_peak_bytes\n    self._options['min_residual_bytes'] = min_residual_bytes\n    self._options['min_output_bytes'] = min_output_bytes\n    return self", "docstring": "Only show profiler nodes consuming no less than 'min_bytes'.\n\nArgs:\nmin_bytes: Only show profiler nodes requested to allocate no less bytes\nthan this.\nmin_peak_bytes: Only show profiler nodes using no less than this bytes\nat peak (high watermark). For profiler nodes consist of multiple\ngraph nodes, it sums the graph nodes' peak_bytes.\nmin_residual_bytes: Only show profiler nodes have no less than\nthis bytes not being de-allocated after Compute() ends. For\nprofiler nodes consist of multiple graph nodes, it sums the\ngraph nodes' residual_bytes.\nmin_output_bytes: Only show profiler nodes have no less than this bytes\noutput. The output are not necessarily allocated by this profiler\nnodes.\nReturns:\nself", "source": "github-repos"}
{"code": "def generate(self, pixel_values: torch.FloatTensor, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **generate_kwargs) -> torch.LongTensor:\n    if hasattr(self, 'hf_device_map'):\n        self._preprocess_accelerate()\n    batch_size = pixel_values.shape[0]\n    image_embeds = self.vision_model(pixel_values, return_dict=True, interpolate_pos_encoding=interpolate_pos_encoding).last_hidden_state\n    image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)\n    query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)\n    query_outputs = self.qformer(query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True)\n    query_output = query_outputs.last_hidden_state\n    if query_output.dtype != image_embeds.dtype:\n        query_output = query_output.to(image_embeds.dtype)\n    language_model_inputs = self.language_projection(query_output)\n    language_attention_mask = torch.ones(language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device)\n    if input_ids is None:\n        start_tokens = [self.config.text_config.bos_token_id]\n        if getattr(self.config, 'image_token_id', None) is not None:\n            start_tokens = [self.config.image_token_id] * self.config.num_query_tokens + start_tokens\n        input_ids = torch.tensor([start_tokens], dtype=torch.long, device=image_embeds.device)\n        input_ids = input_ids.repeat(batch_size, 1)\n    inputs_embeds = self.get_input_embeddings()(input_ids)\n    if attention_mask is None:\n        attention_mask = torch.ones_like(input_ids)\n    if getattr(self.config, 'image_token_id', None) is not None:\n        special_image_mask = (input_ids == self.config.image_token_id).unsqueeze(-1).expand_as(inputs_embeds)\n        inputs_embeds[special_image_mask] = language_model_inputs.flatten()\n    else:\n        logger.warning_once('Expanding inputs for image tokens in BLIP-2 should be done in processing. Please follow instruction here (https:\n        inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)\n        attention_mask = torch.cat([language_attention_mask, attention_mask.to(language_attention_mask.device)], dim=1)\n        if not self.language_model.config.is_encoder_decoder:\n            generate_kwargs['max_length'] = generate_kwargs.get('max_length', 20) + language_model_inputs.shape[1] - 1\n            generate_kwargs['min_length'] = generate_kwargs.get('min_length', 0) + language_model_inputs.shape[1]\n    inputs = {'inputs_embeds': inputs_embeds, 'attention_mask': attention_mask}\n    if not self.language_model.config.is_encoder_decoder:\n        inputs['input_ids'] = input_ids\n    outputs = self.language_model.generate(**inputs, **generate_kwargs)\n    return outputs", "docstring": "Overrides `generate` function to be able to use the model as a conditional generator.\n\nArgs:\npixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)):\nInput images to be processed.\ninput_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):\nThe sequence used as a prompt for the generation.\nattention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):\nMask to avoid performing attention on padding token indices\n\nReturns:\ncaptions (list): A list of strings of length batch_size * num_captions.", "source": "github-repos"}
{"code": "def _get_parseable_methods(cls):\n    \n    _LOG.debug(\"Retrieving parseable methods for '%s'\", cls.__name__)\n    init_parser = None\n    methods_to_parse = {}\n    for name, obj in vars(cls).items():\n        \n        \n        \n        \n        if callable(obj) and hasattr(obj, \"parser\"):\n            _LOG.debug(\"Found method '%s'\", name)\n            if name == \"__init__\":\n                \n                \n                init_parser = obj.parser\n            else:\n                methods_to_parse[obj.__name__] = obj.parser\n    return (init_parser, methods_to_parse)", "docstring": "Return all methods of cls that are parseable i.e. have been decorated\nby '@create_parser'.\n\nArgs:\ncls: the class currently being decorated\n\nNote:\nclassmethods will not be included as they can only be referenced once\nthe class has been defined\nReturns:\na 2-tuple with the parser of the __init__ method if any and a dict\nof the form {'method_name': associated_parser}", "source": "juraj-google-style"}
{"code": "def RunStateMethod(self, method_name, request=None, responses=None):\n    \n    if self._TerminationPending():\n      return\n\n    client_id = None\n    try:\n      self.context.current_state = method_name\n      if request and responses:\n        client_id = request.client_id or self.runner_args.client_id\n        logging.debug(\"%s Running %s with %d responses from %s\",\n                      self.session_id, method_name, len(responses), client_id)\n\n      else:\n        logging.debug(\"%s Running state method %s\", self.session_id,\n                      method_name)\n\n      \n      self.flow_obj.HeartBeat()\n      try:\n        method = getattr(self.flow_obj, method_name)\n      except AttributeError:\n        raise FlowRunnerError(\"Flow %s has no state method %s\" %\n                              (self.flow_obj.__class__.__name__, method_name))\n\n      \n      responses = flow_responses.Responses.FromLegacyResponses(\n          request=request, responses=responses)\n\n      self.SaveResourceUsage(responses.status)\n\n      stats_collector_instance.Get().IncrementCounter(\"grr_worker_states_run\")\n\n      if method_name == \"Start\":\n        stats_collector_instance.Get().IncrementCounter(\n            \"flow_starts\", fields=[self.flow_obj.Name()])\n        method()\n      else:\n        method(responses)\n\n      if self.sent_replies:\n        self.ProcessRepliesWithOutputPlugins(self.sent_replies)\n        self.sent_replies = []\n\n    \n    \n    except Exception as e:  \n      \n\n      \n      stats_collector_instance.Get().IncrementCounter(\"grr_flow_errors\")\n\n      stats_collector_instance.Get().IncrementCounter(\n          \"flow_errors\", fields=[self.flow_obj.Name()])\n      logging.exception(\"Flow %s raised %s.\", self.session_id, e)\n\n      self.Error(traceback.format_exc(), client_id=client_id)", "docstring": "Completes the request by calling the state method.\n\nArgs:\nmethod_name: The name of the state method to call.\nrequest: A RequestState protobuf.\nresponses: A list of GrrMessages responding to the request.", "source": "juraj-google-style"}
{"code": "def validate(filename=None, ocrd_page=None, ocrd_file=None, strictness='strict', strategy='index1'):\n    if ocrd_page:\n        validator = PageValidator(ocrd_page, strictness, strategy)\n    elif ocrd_file:\n        validator = PageValidator(page_from_file(ocrd_file), strictness, strategy)\n    elif filename:\n        validator = PageValidator(parse(filename, silence=True), strictness, strategy)\n    else:\n        raise Exception('At least one of ocrd_page, ocrd_file or filename must be set')\n    return validator._validate()", "docstring": "Validates a PAGE file for consistency by filename, OcrdFile or passing OcrdPage directly.\n\nArguments:\nfilename (string): Path to PAGE\nocrd_page (OcrdPage): OcrdPage instance\nocrd_file (OcrdFile): OcrdFile instance wrapping OcrdPage\nstrictness (string): 'strict', 'lax', 'fix' or 'off'\nstrategy (string): Currently only 'index1'\n\nReturns:\nreport (:class:`ValidationReport`) Report on the validity", "source": "codesearchnet"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_2_0):\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        raise exceptions.VersionNotSupported('KMIP {} does not support the Attributes object.'.format(kmip_version.value))\n    super(Attributes, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = BytearrayStream(input_stream.read(self.length))\n    while True:\n        if (len(local_stream) < 3):\n            break\n        tag = struct.unpack('!I', (b'\\x00' + local_stream.peek(3)))[0]\n        if enums.is_enum_value(enums.Tags, tag):\n            tag = enums.Tags(tag)\n            if (not enums.is_attribute(tag, kmip_version=kmip_version)):\n                raise exceptions.AttributeNotSupported('Attribute {} is not supported by KMIP {}.'.format(tag.name, kmip_version.value))\n            value = self._factory.create_attribute_value_by_enum(tag, None)\n            value.read(local_stream, kmip_version=kmip_version)\n            self._attributes.append(value)\n        else:\n            break\n    self.is_oversized(local_stream)", "docstring": "Read the data stream and decode the Attributes structure into its\nparts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method.\nkmip_version (enum): A KMIPVersion enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 2.0.\n\nRaises:\nAttributeNotSupported: Raised if an unsupported attribute is\nencountered while decoding.\nVersionNotSupported: Raised when a KMIP version is provided that\ndoes not support the Attributes object.", "source": "codesearchnet"}
{"code": "def conversations_setTopic(self, *, channel: str, topic: str, **kwargs) -> SlackResponse:\n    kwargs.update({'channel': channel, 'topic': topic})\n    return self.api_call('conversations.setTopic', json=kwargs)", "docstring": "Sets the topic for a conversation.\n\nArgs:\nchannel (str): The channel id. e.g. 'C1234567890'\ntopic (str): The new topic for the channel. e.g. 'My Topic'", "source": "codesearchnet"}
{"code": "def _testZeroDensity(self, alpha):\n    try:\n        from scipy import stats\n    except ImportError as e:\n        tf_logging.warn('Cannot test zero density proportions: %s' % e)\n        return\n    allowable_zeros = {dtypes.float16: stats.gamma(alpha).cdf(np.finfo(np.float16).tiny), dtypes.float32: stats.gamma(alpha).cdf(np.finfo(np.float32).tiny), dtypes.float64: stats.gamma(alpha).cdf(np.finfo(np.float64).tiny)}\n    failures = []\n    for dt in (dtypes.float16, dtypes.float32, dtypes.float64):\n        sampler = self._Sampler(10000, alpha, 1.0, dt, seed=12345)\n        x = sampler()\n        allowable = allowable_zeros[dt] * x.size\n        allowable = allowable * 2 if allowable < 10 else allowable * 1.05\n        if np.sum(x <= 0) > allowable:\n            failures += [dt]\n    self.assertEqual([], failures)", "docstring": "Zero isn't in the support of the gamma distribution.\n\nBut quantized floating point math has its limits.\nTODO(bjp): Implement log-gamma sampler for small-shape distributions.\n\nArgs:\nalpha: float shape value to test", "source": "github-repos"}
{"code": "def _ParseCacheEntries(self, parser_mediator, index_table, data_block_files):\n    for cache_address in index_table:\n        cache_address_chain_length = 0\n        while (cache_address.value != 0):\n            if (cache_address_chain_length >= 64):\n                parser_mediator.ProduceExtractionWarning('Maximum allowed cache address chain length reached.')\n                break\n            data_block_file_object = data_block_files.get(cache_address.filename, None)\n            if (not data_block_file_object):\n                message = 'Cache address: 0x{0:08x} missing data file.'.format(cache_address.value)\n                parser_mediator.ProduceExtractionWarning(message)\n                break\n            try:\n                cache_entry = self._data_block_file_parser.ParseCacheEntry(data_block_file_object, cache_address.block_offset)\n            except (IOError, errors.ParseError) as exception:\n                parser_mediator.ProduceExtractionWarning('Unable to parse cache entry with error: {0!s}'.format(exception))\n                break\n            event_data = ChromeCacheEntryEventData()\n            event_data.original_url = cache_entry.original_url\n            date_time = dfdatetime_webkit_time.WebKitTime(timestamp=cache_entry.creation_time)\n            event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n            parser_mediator.ProduceEventWithEventData(event, event_data)\n            cache_address = cache_entry.next\n            cache_address_chain_length += 1", "docstring": "Parses Chrome Cache file entries.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nindex_table (list[CacheAddress]): the cache addresses which are stored in\nthe index file.\ndata_block_files (dict[str: file]): look up table for the data block\nfile-like object handles.", "source": "codesearchnet"}
{"code": "def check_valid(money):\n    if (not isinstance(money, sc_messages.Money)):\n        raise ValueError((u'Inputs should be of type %s' % (sc_messages.Money,)))\n    currency = money.currencyCode\n    if ((not currency) or (len(currency) != 3)):\n        raise ValueError(_MSG_3_LETTERS_LONG)\n    units = money.units\n    nanos = money.nanos\n    if (((units > 0) and (nanos < 0)) or ((units < 0) and (nanos > 0))):\n        raise ValueError(_MSG_UNITS_NANOS_MISMATCH)\n    if (abs(nanos) > MAX_NANOS):\n        raise ValueError(_MSG_NANOS_OOB)", "docstring": "Determine if an instance of `Money` is valid.\n\nArgs:\nmoney (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): the\ninstance to test\n\nRaises:\nValueError: if the money instance is invalid", "source": "codesearchnet"}
{"code": "def BindScope(self, scope_id, values):\n    \n    if scope_id not in self._scopes:\n      raise KeyError(scope_id)\n\n    keys = set(iterkeys(values))\n    if keys != self._scopes[scope_id]:\n      raise KeyError(keys ^ self._scopes[scope_id])\n\n    self._scope_bindings[scope_id].append(values)", "docstring": "Associates given values with given scope.\n\nThis can be called multiple times to associate multiple values.\n\nArgs:\nscope_id: A scope id to bind the values to.\nvalues: A mapping from scope variable ids to values to bind in scope.\n\nRaises:\nKeyError: If given scope or scope variable is not specified in the\npattern.", "source": "juraj-google-style"}
{"code": "def lap(self):\n    now = time.time()\n    lap_time = (now - self.lap_time)\n    total_time = (now - self.start)\n    self.lap_time = now\n    return (lap_time, total_time)", "docstring": "Calculate lap time.\n\nReturns:\nfloat: Lap time. The duration from the previous call of ``lap()``\nor initialization at first call.\nfloat: Total time. The duration from initialization.", "source": "codesearchnet"}
{"code": "def MessageToJson(message, including_default_value_fields=False):\n    js = _MessageToJsonObject(message, including_default_value_fields)\n    return json.dumps(js, indent=2)", "docstring": "Converts protobuf message to JSON format.\n\nArgs:\nmessage: The protocol buffers message instance to serialize.\nincluding_default_value_fields: If True, singular primitive fields,\nrepeated fields, and map fields will always be serialized.  If\nFalse, only serialize non-empty fields.  Singular message fields\nand oneof fields are not affected by this option.\n\nReturns:\nA string containing the JSON formatted protocol buffer message.", "source": "codesearchnet"}
{"code": "def range_dimension(self):\n    if self.shape.dims:\n        return self.shape.dims[-2]\n    else:\n        return tensor_shape.Dimension(None)", "docstring": "Dimension (in the sense of vector spaces) of the range of this operator.\n\nIf this operator acts like the batch matrix `A` with\n`A.shape = [B1,...,Bb, M, N]`, then this returns `M`.\n\nReturns:\n`Dimension` object.", "source": "github-repos"}
{"code": "def get_loss_func(self, C=1.0, k=1):\n\n    def lf(x):\n        (mu, ln_var) = self.encode(x)\n        batchsize = len(mu.data)\n        rec_loss = 0\n        for l in six.moves.range(k):\n            z = F.gaussian(mu, ln_var)\n            rec_loss += (F.bernoulli_nll(x, self.decode(z, sigmoid=False)) / (k * batchsize))\n        self.rec_loss = rec_loss\n        self.loss = (self.rec_loss + ((C * gaussian_kl_divergence(mu, ln_var)) / batchsize))\n        return self.loss\n    return lf", "docstring": "Get loss function of VAE.\n\nThe loss value is equal to ELBO (Evidence Lower Bound)\nmultiplied by -1.\n\nArgs:\nC (int): Usually this is 1.0. Can be changed to control the\nsecond term of ELBO bound, which works as regularization.\nk (int): Number of Monte Carlo samples used in encoded vector.", "source": "codesearchnet"}
{"code": "def check_value(config, section, option, jinja_pattern=JINJA_PATTERN):\n    value = config[section][option]\n    if re.match(jinja_pattern, value):\n        return None\n    return value", "docstring": "try to figure out if value is valid or jinja2 template value\n\nArgs:\nconfig (:obj:`configparser.ConfigParser`): config object to read key from\nsection (str): name of section in configparser\noption (str): name of option in configparser\njinja_pattern (:obj:`_sre.SRE_Pattern`): a `re.compile()` pattern to match on\n\nReturns:\nstr: value if value, else None\n\nRaises:\nKeyError:\nconfigparser.NoOptionError:\nconfigparser.NoSectionError:", "source": "codesearchnet"}
{"code": "def _do_retrieve_scopes(self, http, token):\n    logger.info('Refreshing scopes')\n    query_params = {'access_token': token, 'fields': 'scope'}\n    token_info_uri = _helpers.update_query_params(self.token_info_uri, query_params)\n    (resp, content) = transport.request(http, token_info_uri)\n    content = _helpers._from_bytes(content)\n    if (resp.status == http_client.OK):\n        d = json.loads(content)\n        self.scopes = set(_helpers.string_to_scopes(d.get('scope', '')))\n    else:\n        error_msg = 'Invalid response {0}.'.format(resp.status)\n        try:\n            d = json.loads(content)\n            if ('error_description' in d):\n                error_msg = d['error_description']\n        except (TypeError, ValueError):\n            pass\n        raise Error(error_msg)", "docstring": "Retrieves the list of authorized scopes from the OAuth2 provider.\n\nArgs:\nhttp: an object to be used to make HTTP requests.\ntoken: A string used as the token to identify the credentials to\nthe provider.\n\nRaises:\nError: When refresh fails, indicating the the access token is\ninvalid.", "source": "codesearchnet"}
{"code": "def update_state(self, values, sample_weight=None):\n    values = math_ops.cast(values, self._dtype)\n    if not self._built:\n        self._build(values.shape)\n    elif values.shape != self._shape:\n        raise ValueError('MeanTensor input values must always have the same shape. Expected shape (set during the first call): {}. Got: {}'.format(self._shape, values.shape))\n    num_values = array_ops.ones_like(values)\n    if sample_weight is not None:\n        sample_weight = math_ops.cast(sample_weight, self._dtype)\n        values, _, sample_weight = losses_utils.squeeze_or_expand_dimensions(values, sample_weight=sample_weight)\n        try:\n            sample_weight = weights_broadcast_ops.broadcast_weights(sample_weight, values)\n        except ValueError:\n            ndim = backend.ndim(values)\n            weight_ndim = backend.ndim(sample_weight)\n            values = math_ops.reduce_mean(values, axis=list(range(weight_ndim, ndim)))\n        num_values = math_ops.multiply(num_values, sample_weight)\n        values = math_ops.multiply(values, sample_weight)\n    update_total_op = self._total.assign_add(values)\n    with ops.control_dependencies([update_total_op]):\n        return self._count.assign_add(num_values)", "docstring": "Accumulates statistics for computing the element-wise mean.\n\nArgs:\nvalues: Per-example value.\nsample_weight: Optional weighting of each example. Defaults to 1.\n\nReturns:\nUpdate op.", "source": "github-repos"}
{"code": "def add(self, key, value):\n    if isinstance(value, list):\n        for val in value:\n            self._add_arg_python(key, val)\n    elif isinstance(value, dict):\n        err = 'Dictionary types are not currently supported for field.'\n        print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, err))\n    else:\n        mask = False\n        env_var = re.compile('^\\\\$env\\\\.(.*)$')\n        envs_var = re.compile('^\\\\$envs\\\\.(.*)$')\n        if env_var.match(str(value)):\n            env_key = env_var.match(str(value)).groups()[0]\n            value = os.environ.get(env_key, value)\n        elif envs_var.match(str(value)):\n            env_key = envs_var.match(str(value)).groups()[0]\n            value = os.environ.get(env_key, value)\n            mask = True\n        self._add_arg(key, value, mask)", "docstring": "Add CLI Arg to lists value.\n\nArgs:\nkey (string): The CLI Args key (e.g., --name).\nvalue (string): The CLI Args value (e.g., bob).", "source": "codesearchnet"}
{"code": "def set(pb_or_dict, key, value):\n    \n    \n    \n    \n\n    \n    if not isinstance(pb_or_dict, (collections.MutableMapping, Message)):\n        raise TypeError('Tried to set a key %s on an invalid object; '\n                        'expected a dict or protobuf message.' % key)\n\n    \n    key, subkey = _resolve_subkeys(key)\n\n    \n    \n    if subkey is not None:\n        if isinstance(pb_or_dict, collections.MutableMapping):\n            pb_or_dict.setdefault(key, {})\n        set(get(pb_or_dict, key), subkey, value)\n        return\n\n    \n    \n    if isinstance(pb_or_dict, collections.MutableMapping):\n        pb_or_dict[key] = value\n    elif isinstance(value, (collections.MutableSequence, tuple)):\n        \n        \n        while getattr(pb_or_dict, key):\n            getattr(pb_or_dict, key).pop()\n\n        \n        for item in value:\n            if isinstance(item, collections.Mapping):\n                getattr(pb_or_dict, key).add(**item)\n            else:\n                getattr(pb_or_dict, key).extend([item])\n    elif isinstance(value, collections.Mapping):\n        \n        for item_key, item_value in value.items():\n            set(getattr(pb_or_dict, key), item_key, item_value)\n    elif isinstance(value, Message):\n        \n        for item_key, item_value in value.ListFields():\n            set(getattr(pb_or_dict, key), item_key.name, item_value)\n    else:\n        setattr(pb_or_dict, key, value)", "docstring": "Set the given key on the object.\n\nArgs:\npb_or_dict (Union[~google.protobuf.message.Message, Mapping]): the\nobject.\nkey (str): The key on the object in question.\nvalue (Any): The value to set.\n\nRaises:\nTypeError: If pb_or_dict is not a Message or Mapping.", "source": "juraj-google-style"}
{"code": "def write(self, output, mode='w', keep_rc=False):\n    if isinstance(output, six.string_types):\n        already_exists = os.path.exists(output)\n        try:\n            with open(output, mode) as f:\n                p = self._build_pipes(f)\n                rc = p.wait()\n                if keep_rc:\n                    return rc\n                if rc:\n                    raise CalledProcessError(rc, self.cmds[0], '')\n        except BaseException as be:\n            if ((not already_exists) and os.path.exists(output)):\n                os.remove(output)\n            six.reraise(be.__class__, be, sys.exc_info()[2])\n    else:\n        p = self._build_pipes(output)\n        rc = p.wait()\n        if keep_rc:\n            return rc\n        if rc:\n            raise CalledProcessError(rc, self.cmds[0], '')", "docstring": "Executes the pipeline and writes the results to the supplied output.\nIf output is a filename and the file didn't already exist before trying\nto write, the file will be removed if an exception is raised.\n\nArgs:\noutput (str or file like object): will create a new file of this\nname or overwrite an existing file. If output is already a file\nlike object, it is used.\nmode (str): mode to use when creating or opening the provided file\nname if it is a string. Ignored if output is a file like object.\n\nReturns:\nThe final output of the pipeline.\nRaises:\nCalledProcessError if any return code in the pipeline is nonzero.", "source": "codesearchnet"}
{"code": "def __build_completer_map(cls):\n    ret = {}\n    for name in dir(cls):\n        obj = getattr(cls, name)\n        if iscompleter(obj):\n            for cmd in obj.__complete_targets__:\n                if (cmd in ret.keys()):\n                    raise PyShellError(\"The command '{}' already has complter method '{}', cannot register a second method '{}'.\".format(cmd, ret[cmd], obj.__name__))\n                ret[cmd] = obj.__name__\n    return ret", "docstring": "Build a mapping from command names to completer names.\n\nOne command name maps to at most one completer method.\nMultiple command names can map to the same completer method.\n\nOnly used by __init__() to initialize self._cmd_map. MUST NOT be used\nelsewhere.\n\nRaises:\nPyShellError: A command maps to multiple helper methods.", "source": "codesearchnet"}
{"code": "def sysname(self):\n    pchar = self._libinput.libinput_device_get_sysname(self._handle)\n    return string_at(pchar).decode()", "docstring": "The system name of the device.\n\nTo get the descriptive device name, use :attr:`name`.\n\nReturns:\nstr: System name of the device.", "source": "codesearchnet"}
{"code": "def diff_halfMatch(self, text1, text2):\n    \n    if self.Diff_Timeout <= 0:\n      \n      return None\n    if len(text1) > len(text2):\n      (longtext, shorttext) = (text1, text2)\n    else:\n      (shorttext, longtext) = (text1, text2)\n    if len(longtext) < 4 or len(shorttext) * 2 < len(longtext):\n      return None  \n\n    def diff_halfMatchI(longtext, shorttext, i):\n      \n      seed = longtext[i:i + len(longtext) \n      best_common = ''\n      j = shorttext.find(seed)\n      while j != -1:\n        prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:])\n        suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j])\n        if len(best_common) < suffixLength + prefixLength:\n          best_common = (shorttext[j - suffixLength:j] +\n              shorttext[j:j + prefixLength])\n          best_longtext_a = longtext[:i - suffixLength]\n          best_longtext_b = longtext[i + prefixLength:]\n          best_shorttext_a = shorttext[:j - suffixLength]\n          best_shorttext_b = shorttext[j + prefixLength:]\n        j = shorttext.find(seed, j + 1)\n\n      if len(best_common) * 2 >= len(longtext):\n        return (best_longtext_a, best_longtext_b,\n                best_shorttext_a, best_shorttext_b, best_common)\n      else:\n        return None\n\n    \n    hm1 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 3) \n    \n    hm2 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 1) \n    if not hm1 and not hm2:\n      return None\n    elif not hm2:\n      hm = hm1\n    elif not hm1:\n      hm = hm2\n    else:\n      \n      if len(hm1[4]) > len(hm2[4]):\n        hm = hm1\n      else:\n        hm = hm2\n\n    \n    if len(text1) > len(text2):\n      (text1_a, text1_b, text2_a, text2_b, mid_common) = hm\n    else:\n      (text2_a, text2_b, text1_a, text1_b, mid_common) = hm\n    return (text1_a, text1_b, text2_a, text2_b, mid_common)", "docstring": "Do the two texts share a substring which is at least half the length of\nthe longer text?\nThis speedup can produce non-minimal diffs.\n\nArgs:\ntext1: First string.\ntext2: Second string.\n\nReturns:\nFive element Array, containing the prefix of text1, the suffix of text1,\nthe prefix of text2, the suffix of text2 and the common middle.  Or None\nif there was no match.", "source": "juraj-google-style"}
{"code": "def auth(self, skypeToken):\n    token = expiry = endpoint = None\n    msgsHost = SkypeConnection.API_MSGSHOST\n    while (not token):\n        secs = int(time.time())\n        hash = self.getMac256Hash(str(secs))\n        headers = {'LockAndKey': 'appId=msmsgs@msnmsgr.com; time={0}; lockAndKeyResponse={1}'.format(secs, hash), 'Authentication': ('skypetoken=' + skypeToken), 'BehaviorOverride': 'redirectAs404'}\n        endpointResp = self.conn('POST', '{0}/users/ME/endpoints'.format(msgsHost), codes=(200, 201, 404), headers=headers, json={'endpointFeatures': 'Agent'})\n        regTokenHead = endpointResp.headers.get('Set-RegistrationToken')\n        locHead = endpointResp.headers.get('Location')\n        if locHead:\n            locParts = re.search('(https:\n            if locParts[2]:\n                endpoint = SkypeEndpoint(self.conn, locParts[2].replace('%7B', '{').replace('%7D', '}'))\n            if (not (locParts[0] == msgsHost)):\n                msgsHost = locHead.rsplit('/', (4 if locParts[2] else 3))[0]\n                continue\n        if regTokenHead:\n            token = re.search('(registrationToken=[a-z0-9\\\\+/=]+)', regTokenHead, re.I).group(1)\n            regExpiry = re.search('expires=(\\\\d+)', regTokenHead).group(1)\n            expiry = datetime.fromtimestamp(int(regExpiry))\n            regEndMatch = re.search('endpointId=({[a-z0-9\\\\-]+})', regTokenHead)\n            if regEndMatch:\n                endpoint = SkypeEndpoint(self.conn, regEndMatch.group(1))\n        if ((not endpoint) and (endpointResp.status_code == 200) and endpointResp.json()):\n            endpoint = SkypeEndpoint(self.conn, endpointResp.json()[0]['id'])\n    return (token, expiry, msgsHost, endpoint)", "docstring": "Request a new registration token using a current Skype token.\n\nArgs:\nskypeToken (str): existing Skype token\n\nReturns:\n(str, datetime.datetime, str, SkypeEndpoint) tuple: registration token, associated expiry if known,\nresulting endpoint hostname, endpoint if provided\n\nRaises:\n.SkypeAuthException: if the login request is rejected\n.SkypeApiException: if the login form can't be processed", "source": "codesearchnet"}
{"code": "def failure_packages(self, failure_index=None):\n        \n        phase, _ = self._get_failed_phase(failure_index)\n        fr = phase.failure_reason\n        return fr.involved_requirements() if fr else None", "docstring": "Get packages involved in a failure.\n\nArgs:\nfailure_index: See `failure_reason`.\n\nReturns:\nA list of Requirement objects.", "source": "juraj-google-style"}
{"code": "def _Open(self, path_spec, mode='rb'):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    file_object = resolver.Resolver.OpenFileObject(\n        path_spec.parent, resolver_context=self._resolver_context)\n\n    try:\n      tsk_image_object = tsk_image.TSKFileSystemImage(file_object)\n      tsk_volume = pytsk3.Volume_Info(tsk_image_object)\n    except:\n      file_object.close()\n      raise\n\n    self._file_object = file_object\n    self._tsk_volume = tsk_volume", "docstring": "Opens the file system object defined by path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\nmode (Optional[str]): file access mode. The default is 'rb' which\nrepresents read-only binary.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file system object could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def _GetSanitizedEventValues(self, event):\n    event_values = {}\n    for (attribute_name, attribute_value) in event.GetAttributes():\n        if (attribute_name == 'regvalue'):\n            continue\n        if (attribute_name == 'pathspec'):\n            try:\n                attribute_value = JsonPathSpecSerializer.WriteSerialized(attribute_value)\n            except TypeError:\n                continue\n        event_values[attribute_name] = attribute_value\n    try:\n        attribute_value = timelib.Timestamp.RoundToSeconds(event.timestamp)\n    except TypeError as exception:\n        logger.warning('Unable to round timestamp {0!s}. error: {1!s}. Defaulting to 0'.format(event.timestamp, exception))\n        attribute_value = 0\n    attribute_value = timelib.Timestamp.CopyToIsoFormat(attribute_value, timezone=self._output_mediator.timezone)\n    event_values['datetime'] = attribute_value\n    (message, _) = self._output_mediator.GetFormattedMessages(event)\n    if (message is None):\n        data_type = getattr(event, 'data_type', 'UNKNOWN')\n        raise errors.NoFormatterFound('Unable to find event formatter for: {0:s}.'.format(data_type))\n    event_values['message'] = message\n    try:\n        labels = list(event_values['tag'].labels)\n    except (KeyError, AttributeError):\n        labels = []\n    event_values['tag'] = labels\n    (source_short, source) = self._output_mediator.GetFormattedSources(event)\n    if ((source is None) or (source_short is None)):\n        data_type = getattr(event, 'data_type', 'UNKNOWN')\n        raise errors.NoFormatterFound('Unable to find event formatter for: {0:s}.'.format(data_type))\n    event_values['source_short'] = source_short\n    event_values['source_long'] = source\n    return event_values", "docstring": "Sanitizes the event for use in Elasticsearch.\n\nThe event values need to be sanitized to prevent certain values from\ncausing problems when indexing with Elasticsearch. For example the path\nspecification is a nested dictionary which will cause problems for\nElasticsearch automatic indexing.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\ndict[str, object]: sanitized event values.\n\nRaises:\nNoFormatterFound: if no event formatter can be found to match the data\ntype in the event.", "source": "codesearchnet"}
{"code": "def add(queue_name, payload=None, content_type=None, source=None, task_id=None, build_id=None, release_id=None, run_id=None):\n    if task_id:\n        task = WorkQueue.query.filter_by(task_id=task_id).first()\n        if task:\n            return task.task_id\n    else:\n        task_id = uuid.uuid4().hex\n    if (payload and (not content_type) and (not isinstance(payload, basestring))):\n        payload = json.dumps(payload)\n        content_type = 'application/json'\n    now = datetime.datetime.utcnow()\n    task = WorkQueue(task_id=task_id, queue_name=queue_name, eta=now, source=source, build_id=build_id, release_id=release_id, run_id=run_id, payload=payload, content_type=content_type)\n    db.session.add(task)\n    return task.task_id", "docstring": "Adds a work item to a queue.\n\nArgs:\nqueue_name: Name of the queue to add the work item to.\npayload: Optional. Payload that describes the work to do as a string.\nIf not a string and content_type is not provided, then this\nfunction assumes the payload is a JSON-able Python object.\ncontent_type: Optional. Content type of the payload.\nsource: Optional. Who or what originally created the task.\ntask_id: Optional. When supplied, only enqueue this task if a task\nwith this ID does not already exist. If a task with this ID already\nexists, then this function will do nothing.\nbuild_id: Build ID to associate with this task. May be None.\nrelease_id: Release ID to associate with this task. May be None.\nrun_id: Run ID to associate with this task. May be None.\n\nReturns:\nID of the task that was added.", "source": "codesearchnet"}
{"code": "def __init__(self, _args):\n        \n        super(TcExPackage, self).__init__(_args)\n\n        \n        self.features = ['aotExecutionEnabled', 'secureParams']\n\n        \n        self._app_packages = []\n        self.package_data = {'errors': [], 'updates': [], 'bundle': [], 'package': []}\n        self.validation_data = {}", "docstring": "Initialize Class properties.\n\nArgs:\n_args (namespace): The argparser args Namespace.", "source": "juraj-google-style"}
{"code": "def connection_lost(self, exc):\n        \n        if exc:\n            log.error(\"{:d} connection_lost {}\", id(self), exc)\n        else:\n            log.info(\"{:d} connection_lost\", id(self))", "docstring": "(asyncio.Protocol member)\n\nCalled upon when a socket closes.\nThis class simply logs the disconnection\n\nArgs:\nexc (Exception or None): Error if connection closed\nunexpectedly, None if closed cleanly.", "source": "juraj-google-style"}
{"code": "def _transform_feature(self, inputs):\n    pass", "docstring": "Returns intermediate representation (usually a `Tensor`).\n\nUses `inputs` to create an intermediate representation (usually a `Tensor`)\nthat other feature columns can use.\n\nExample usage of `inputs`:\nLet's say a Feature column depends on raw feature ('raw') and another\n`_FeatureColumn` (input_fc). To access corresponding `Tensor`s, inputs will\nbe used as follows:\n\n```python\nraw_tensor = inputs.get('raw')\nfc_tensor = inputs.get(input_fc)\n```\n\nArgs:\ninputs: A `_LazyBuilder` object to access inputs.\n\nReturns:\nTransformed feature `Tensor`.", "source": "github-repos"}
{"code": "def make_batched_images(images) -> List[List[ImageInput]]:\n    if isinstance(images, (list, tuple)) and isinstance(images[0], (list, tuple)) and is_valid_image(images[0][0]):\n        return [img for img_list in images for img in img_list]\n    elif isinstance(images, (list, tuple)) and is_valid_image(images[0]):\n        return images\n    elif is_valid_image(images):\n        return [images]\n    raise ValueError(f'Could not make batched images from {images}')", "docstring": "Accepts images in list or nested list format, and makes a list of images for preprocessing.\n\nArgs:\nimages (`Union[List[List[ImageInput]], List[ImageInput], ImageInput]`):\nThe input image.\n\nReturns:\nlist: A list of images.", "source": "github-repos"}
{"code": "def create(self, reference, document_data):\n    write_pbs = _helpers.pbs_for_create(reference._document_path, document_data)\n    self._add_write_pbs(write_pbs)", "docstring": "Add a \"change\" to this batch to create a document.\n\nIf the document given by ``reference`` already exists, then this\nbatch will fail when :meth:`commit`-ed.\n\nArgs:\nreference (~.firestore_v1beta1.document.DocumentReference): A\ndocument reference to be created in this batch.\ndocument_data (dict): Property names and values to use for\ncreating a document.", "source": "codesearchnet"}
{"code": "def with_doc(fn_with_doc_to_copy):\n  \n\n  def decorator(wrapper_init):\n    \n    \n    @wrapt.decorator\n    def wrapping_fn(unused_wrapped, instance, args, kwargs):\n      wrapper_init(instance, *args, **kwargs)\n    return wrapping_fn(fn_with_doc_to_copy)  \n\n  return decorator", "docstring": "Returns a decorator to copy documentation from the given function.\n\nDocstring is copied, including *args and **kwargs documentation.\n\nArgs:\nfn_with_doc_to_copy: Function whose docstring, including *args and\n**kwargs documentation, is to be copied.\n\nReturns:\nDecorated version of `wrapper_init` with documentation copied from\n`fn_with_doc_to_copy`.", "source": "juraj-google-style"}
{"code": "def top(self, container, ps_args=None):\n    u = self._url('/containers/{0}/top', container)\n    params = {}\n    if (ps_args is not None):\n        params['ps_args'] = ps_args\n    return self._result(self._get(u, params=params), True)", "docstring": "Display the running processes of a container.\n\nArgs:\ncontainer (str): The container to inspect\nps_args (str): An optional arguments passed to ps (e.g. ``aux``)\n\nReturns:\n(str): The output of the top\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def listen(self):\n    self.listening = True\n    if self.threading:\n        from threading import Thread\n        self.listen_thread = Thread(target=self.listen_loop)\n        self.listen_thread.daemon = True\n        self.listen_thread.start()\n        self.scheduler_thread = Thread(target=self.scheduler)\n        self.scheduler_thread.daemon = True\n        self.scheduler_thread.start()\n    else:\n        self.listen_loop()", "docstring": "Starts the listen loop. If threading is enabled, then the loop will\nbe started in its own thread.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _subdivide_nodes(nodes):\n    \n    _, num_nodes = np.shape(nodes)\n    if num_nodes == 2:\n        left_nodes = _helpers.matrix_product(nodes, _LINEAR_SUBDIVIDE_LEFT)\n        right_nodes = _helpers.matrix_product(nodes, _LINEAR_SUBDIVIDE_RIGHT)\n    elif num_nodes == 3:\n        left_nodes = _helpers.matrix_product(nodes, _QUADRATIC_SUBDIVIDE_LEFT)\n        right_nodes = _helpers.matrix_product(\n            nodes, _QUADRATIC_SUBDIVIDE_RIGHT\n        )\n    elif num_nodes == 4:\n        left_nodes = _helpers.matrix_product(nodes, _CUBIC_SUBDIVIDE_LEFT)\n        right_nodes = _helpers.matrix_product(nodes, _CUBIC_SUBDIVIDE_RIGHT)\n    else:\n        left_mat, right_mat = make_subdivision_matrices(num_nodes - 1)\n        left_nodes = _helpers.matrix_product(nodes, left_mat)\n        right_nodes = _helpers.matrix_product(nodes, right_mat)\n    return left_nodes, right_nodes", "docstring": "Subdivide a curve into two sub-curves.\n\nDoes so by taking the unit interval (i.e. the domain of the surface) and\nsplitting it into two sub-intervals by splitting down the middle.\n\n.. note::\n\nThere is also a Fortran implementation of this function, which\nwill be used if it can be built.\n\nArgs:\nnodes (numpy.ndarray): The nodes defining a B |eacute| zier curve.\n\nReturns:\nTuple[numpy.ndarray, numpy.ndarray]: The nodes for the two sub-curves.", "source": "juraj-google-style"}
{"code": "def truncate(text, max_len=350, end='...'):\n    \n    if len(text) <= max_len:\n        return text\n    return text[:max_len].rsplit(' ', maxsplit=1)[0] + end", "docstring": "Truncate the supplied text for display.\n\nArguments:\ntext (:py:class:`str`): The text to truncate.\nmax_len (:py:class:`int`, optional): The maximum length of the\ntext before truncation (defaults to 350 characters).\nend (:py:class:`str`, optional): The ending to use to show that\nthe text was truncated (defaults to ``'...'``).\n\nReturns:\n:py:class:`str`: The truncated text.", "source": "juraj-google-style"}
{"code": "def link(target, link_to):\n    \n    assert isinstance(target, str)\n    assert os.path.exists(target)\n    assert isinstance(link_to, str)\n\n    \n    abs_path = os.path.dirname(os.path.abspath(link_to))\n    if not os.path.isdir(abs_path):\n        os.makedirs(abs_path)\n\n    \n    chmod(target)\n\n    \n    os.symlink(target, link_to)", "docstring": "Create a link to a target file or a folder.\n\nFor simplicity sake, both target and link_to must be absolute path and must\ninclude the filename of the file or folder.\nAlso do not include any trailing slash.\n\ne.g. link('/path/to/file', '/path/to/link')\n\nBut not: link('/path/to/file', 'path/to/')\nor link('/path/to/folder/', '/path/to/link')\n\nArgs:\ntarget (str): file or folder the link will point to\nlink_to (str): Link to create", "source": "juraj-google-style"}
{"code": "def import_tf_tensor(self, x, tf_x):\n    \n    return self.LaidOutTensor(self.make_slices(tf_x, x.shape))", "docstring": "Import a tf.Tensor, producing a LaidOutTensor.\n\nArgs:\nx: a Tensor\ntf_x: a tf.Tensor\nReturns:\na LaidOutTensor", "source": "juraj-google-style"}
{"code": "def ValidatePassword(self, password):\n        \n        password = to_aes_key(password)\n        return hashlib.sha256(password).digest() == self.LoadStoredData('PasswordHash')", "docstring": "Validates if the provided password matches with the stored password.\n\nArgs:\npassword (string): a password.\n\nReturns:\nbool: the provided password matches with the stored password.", "source": "juraj-google-style"}
{"code": "def get_num_days_required(offset, period='d', perc_required=0.9):\n    x = pd.to_datetime('2010-01-01')\n    delta = (x - (x - offset))\n    days = (delta.days * 0.69)\n    if (period == 'd'):\n        req = (days * perc_required)\n    elif (period == 'm'):\n        req = ((days / 20) * perc_required)\n    elif (period == 'y'):\n        req = ((days / 252) * perc_required)\n    else:\n        raise NotImplementedError('period not supported. Supported periods are d, m, y')\n    return req", "docstring": "Estimates the number of days required to assume that data is OK.\n\nHelper function used to determine if there are enough \"good\" data\ndays over a given period.\n\nArgs:\n* offset (DateOffset): Offset (lookback) period.\n* period (str): Period string.\n* perc_required (float): percentage of number of days\nexpected required.", "source": "codesearchnet"}
{"code": "def update_firmware(self, device, id_override=None, type_override=None):\n    object_id = (id_override or device.object_id())\n    object_type = (type_override or device.object_type())\n    url_string = '{}/{}s/{}/update_firmware'.format(self.BASE_URL, object_type, object_id)\n    try:\n        arequest = requests.post(url_string, headers=API_HEADERS)\n        response_json = arequest.json()\n        return response_json\n    except requests.exceptions.RequestException:\n        return None", "docstring": "Make a call to the update_firmware endpoint. As far as I know this\nis only valid for Wink hubs.\n\nArgs:\ndevice (WinkDevice): The device the change is being requested for.\nid_override (String, optional): A device ID used to override the\npassed in device's ID. Used to make changes on sub-devices.\ni.e. Outlet in a Powerstrip. The Parent device's ID.\ntype_override (String, optional): Used to override the device type\nwhen a device inherits from a device other than WinkDevice.\nReturns:\nresponse_json (Dict): The API's response in dictionary format", "source": "codesearchnet"}
{"code": "def get_predicted_structure(self, structure, ref_structure):\n        \n        new_structure = structure.copy()\n        new_structure.scale_lattice(self.predict(structure, ref_structure))\n\n        return new_structure", "docstring": "Given a structure, returns back the structure scaled to predicted\nvolume.\nArgs:\nstructure (Structure): structure w/unknown volume\nref_structure (Structure): A reference structure with a similar\nstructure but different species.\nReturns:\na Structure object with predicted volume", "source": "juraj-google-style"}
{"code": "def _TestGradient(self, nccl_reduce, numpy_fn):\n\n    def _Gradient(tensors, devices):\n        inputs = [array_ops.placeholder(t.dtype, t.shape) for t in tensors]\n        reduce_tensors = nccl_reduce(inputs, devices)\n        losses = _DeviceTensors(tensors, [t.device for t in reduce_tensors])\n        grads = gradients.gradients(reduce_tensors, inputs, losses, colocate_gradients_with_ops=True)\n        return [g for g in grads if g is not None]\n    self._Test(_Gradient, numpy_fn)", "docstring": "Tests the gradient of nccl_reduce.\n\nArgs:\nnccl_reduce: A function taking a list of tensors and a list of devices,\nand returns a list of reduced tensors and a list of ops to perform the\nreduction.\nnumpy_fn: A function taking two tensors and returning the gradient of the\nreduction of the two.", "source": "github-repos"}
{"code": "def DistFitDataset(Dat):\n    (r, c) = Dat.shape\n    Poiss = np.zeros(r)\n    Norm = np.zeros(r)\n    LogNorm = np.zeros(r)\n    for i in range(r):\n        temp = GetDistFitError(Dat[i])\n        Poiss[i] = temp['poiss']\n        Norm[i] = temp['norm']\n        LogNorm[i] = temp['lognorm']\n    d = {}\n    d['poiss'] = Poiss\n    d['norm'] = Norm\n    d['lognorm'] = LogNorm\n    return d", "docstring": "Given a data matrix, this returns the per-gene fit error for the\nPoisson, Normal, and Log-Normal distributions.\n\nArgs:\nDat (array): numpy array with shape (genes, cells)\n\nReturns:\nd (dict): 'poiss', 'norm', 'lognorm' give the fit error for each distribution.", "source": "codesearchnet"}
{"code": "def relativefrom(base, path):\n    base_parts = list(iteratepath(base))\n    path_parts = list(iteratepath(path))\n    common = 0\n    for (component_a, component_b) in zip(base_parts, path_parts):\n        if (component_a != component_b):\n            break\n        common += 1\n    return '/'.join(((['..'] * (len(base_parts) - common)) + path_parts[common:]))", "docstring": "Return a path relative from a given base path.\n\nInsert backrefs as appropriate to reach the path from the base.\n\nArguments:\nbase (str): Path to a directory.\npath (str): Path to make relative.\n\nReturns:\nstr: the path to ``base`` from ``path``.\n\n>>> relativefrom(\"foo/bar\", \"baz/index.html\")\n'../../baz/index.html'", "source": "codesearchnet"}
{"code": "def stop(self) -> float:\n    self.stop_time = time.time()\n    return ((self.stop_time - self.start_time) - self.offset)", "docstring": "Stop the timer\n\nReturns:\nThe time the timer was stopped", "source": "codesearchnet"}
{"code": "def inference(self, state_arr, limit=1000):\n        \n        self.__inferencing_flag = True\n\n        agent_x, agent_y = np.where(state_arr[0] == 1)\n        agent_x, agent_y = agent_x[0], agent_y[0]\n        self.__create_enemy(self.__map_arr)\n        result_list = [(agent_x, agent_y, 0.0)]\n        result_val_list = [agent_x, agent_y]\n        for e in range(self.__enemy_num):\n            result_val_list.append(self.__enemy_pos_list[e][0])\n            result_val_list.append(self.__enemy_pos_list[e][1])\n        result_val_list.append(0.0)\n        result_list.append(tuple(result_val_list))\n\n        self.t = 0\n        while self.t < limit:\n            next_action_arr = self.extract_possible_actions(state_arr)\n            next_q_arr = self.function_approximator.inference_q(next_action_arr)\n            action_arr, q = self.select_action(next_action_arr, next_q_arr)\n            self.__move_enemy(action_arr)\n\n            agent_x, agent_y = np.where(action_arr[0] == 1)\n            agent_x, agent_y = agent_x[0], agent_y[0]\n            \n            result_val_list = [agent_x, agent_y]\n            for e in range(self.__enemy_num):\n                result_val_list.append(self.__enemy_pos_list[e][0])\n                result_val_list.append(self.__enemy_pos_list[e][1])\n            try:\n                result_val_list.append(q[0])\n            except IndexError:\n                result_val_list.append(q)\n\n            result_list.append(tuple(result_val_list))\n\n            \n            state_arr = self.update_state(state_arr, action_arr)\n\n            \n            self.t += 1\n            \n            end_flag = self.check_the_end_flag(state_arr)\n            if end_flag is True:\n                break\n\n        return result_list", "docstring": "Infernce.\n\nArgs:\nstate_arr:    `np.ndarray` of state.\nlimit:        The number of inferencing.\n\nReturns:\n`list of `np.ndarray` of an optimal route.", "source": "juraj-google-style"}
{"code": "def _CheckCompositeMap(self, data_type_definition):\n    \n    if not data_type_definition:\n      raise errors.FormatError('Missing data type definition')\n\n    members = getattr(data_type_definition, 'members', None)\n    if not members:\n      raise errors.FormatError('Invalid data type definition missing members')\n\n    is_composite_map = False\n    last_member_byte_order = data_type_definition.byte_order\n\n    for member_definition in members:\n      if member_definition.IsComposite():\n        is_composite_map = True\n        break\n\n      \n      \n      if (last_member_byte_order != definitions.BYTE_ORDER_NATIVE and\n          member_definition.byte_order != definitions.BYTE_ORDER_NATIVE and\n          last_member_byte_order != member_definition.byte_order):\n        is_composite_map = True\n        break\n\n      last_member_byte_order = member_definition.byte_order\n\n    return is_composite_map", "docstring": "Determines if the data type definition needs a composite map.\n\nArgs:\ndata_type_definition (DataTypeDefinition): structure data type definition.\n\nReturns:\nbool: True if a composite map is needed, False otherwise.\n\nRaises:\nFormatError: if a composite map is needed cannot be determined from the\ndata type definition.", "source": "juraj-google-style"}
{"code": "def tomof(self, maxline=MAX_MOF_LINE):\n    mof = []\n    mof.append(u'Qualifier ')\n    mof.append(self.name)\n    mof.append(u' : ')\n    mof.append(self.type)\n    if self.is_array:\n        mof.append(u'[')\n        if (self.array_size is not None):\n            mof.append(six.text_type(self.array_size))\n        mof.append(u']')\n    if (self.value is not None):\n        mof.append(u' = ')\n        if isinstance(self.value, list):\n            mof.append(u'{ ')\n        mof_str = u''.join(mof)\n        line_pos = ((len(mof_str) - mof_str.rfind('\\n')) - 1)\n        (val_str, line_pos) = _value_tomof(self.value, self.type, MOF_INDENT, maxline, line_pos, 3, False)\n        mof.append(val_str)\n        if isinstance(self.value, list):\n            mof.append(u' }')\n    mof.append(u',\\n')\n    mof.append(_indent_str((MOF_INDENT + 1)))\n    mof.append(u'Scope(')\n    mof_scopes = []\n    for scope in self._ordered_scopes:\n        if self.scopes.get(scope, False):\n            mof_scopes.append(scope.lower())\n    mof.append(u', '.join(mof_scopes))\n    mof.append(u')')\n    mof_flavors = []\n    if (self.overridable is True):\n        mof_flavors.append('EnableOverride')\n    elif (self.overridable is False):\n        mof_flavors.append('DisableOverride')\n    if (self.tosubclass is True):\n        mof_flavors.append('ToSubclass')\n    elif (self.tosubclass is False):\n        mof_flavors.append('Restricted')\n    if self.translatable:\n        mof_flavors.append('Translatable')\n    if mof_flavors:\n        mof.append(u',\\n')\n        mof.append(_indent_str((MOF_INDENT + 1)))\n        mof.append(u'Flavor(')\n        mof.append(u', '.join(mof_flavors))\n        mof.append(u')')\n    mof.append(u';\\n')\n    return u''.join(mof)", "docstring": "Return a MOF string with the declaration of this CIM qualifier type.\n\nThe returned MOF string conforms to the ``qualifierDeclaration``\nABNF rule defined in :term:`DSP0004`.\n\nQualifier flavors are included in the returned MOF string only when\nthe information is available (i.e. the value of the corresponding\nattribute is not `None`).\n\nBecause :term:`DSP0004` does not support instance qualifiers, and thus\ndoes not define a flavor keyword for the\n:attr:`~pywbem.CIMQualifierDeclaration.toinstance` attribute, that\nflavor is not included in the returned MOF string.\n\nReturns:\n\n:term:`unicode string`: MOF string.", "source": "codesearchnet"}
{"code": "def _approx_eq_iterables(val: Any, other: Any, *, atol: Union[(int, float)]) -> bool:\n\n    def get_iter(iterable):\n        try:\n            return iter(iterable)\n        except TypeError:\n            return None\n    val_it = get_iter(val)\n    other_it = get_iter(other)\n    if ((val_it is not None) and (other_it is not None)):\n        while True:\n            try:\n                val_next = next(val_it)\n            except StopIteration:\n                try:\n                    next(other_it)\n                    return False\n                except StopIteration:\n                    return True\n            try:\n                other_next = next(other_it)\n            except StopIteration:\n                return False\n            result = approx_eq(val_next, other_next, atol=atol)\n            if (result is not True):\n                return result\n    return NotImplemented", "docstring": "Iterates over arguments and calls approx_eq recursively.\n\nTypes of `val` and `other` does not necessarily needs to match each other.\nThey just need to be iterable of the same length and have the same\nstructure, approx_eq() will be called on each consecutive element of `val`\nand `other`.\n\nArgs:\nval: Source for approximate comparison.\nother: Target for approximate comparison.\natol: The minimum absolute tolerance. See np.isclose() documentation for\ndetails.\n\nReturns:\nTrue if objects are approximately equal, False otherwise. Returns\nNotImplemented when approximate equality is not implemented for given\ntypes.", "source": "codesearchnet"}
{"code": "def _create_interval_filter(interval):\n  \n  def filter_fn(value):\n    if (not isinstance(value, six.integer_types) and\n        not isinstance(value, float)):\n      raise error.HParamsError(\n          'Cannot use an interval filter for a value of type: %s, Value: %s' %\n          (type(value), value))\n    return interval.min_value <= value and value <= interval.max_value\n\n  return filter_fn", "docstring": "Returns a function that checkes whether a number belongs to an interval.\n\nArgs:\ninterval: A tensorboard.hparams.Interval protobuf describing the interval.\nReturns:\nA function taking a number (a float or an object of a type in\nsix.integer_types) that returns True if the number belongs to (the closed)\n'interval'.", "source": "juraj-google-style"}
{"code": "def poly_energies(samples_like, poly):\n    msg = 'poly_energies is deprecated and will be removed in dimod 0.9.0.In the future, use BinaryPolynomial.energies'\n    warnings.warn(msg, DeprecationWarning)\n    return BinaryPolynomial(poly, 'SPIN').energies(samples_like)", "docstring": "Calculates energy of samples from a higher order polynomial.\n\nArgs:\nsample (samples_like):\nA collection of raw samples. `samples_like` is an extension of\nNumPy's array_like structure. See :func:`.as_samples`.\n\npoly (dict):\nPolynomial as a dict of form {term: bias, ...}, where `term` is a\ntuple of variables and `bias` the associated bias. Variable\nlabeling/indexing of terms in poly dict must match that of the\nsample(s).\n\nReturns:\nlist/:obj:`numpy.ndarray`: The energy of the sample(s).", "source": "codesearchnet"}
{"code": "def patch(self, payload, append_to_arrays=True):\n        \n        if not isinstance(payload, dict):\n            raise ValueError(\"The 'payload' parameter must be provided a dictionary object.\")\n        payload = self.__class__.set_id_in_fkeys(payload)\n        if append_to_arrays:\n            for key in payload:\n                val = payload[key]\n                if type(val) == list:\n                    val.extend(getattr(self, key))\n                    payload[key] = list(set(val))\n        payload = self.check_boolean_fields(payload)\n        payload = self.__class__.add_model_name_to_payload(payload)\n        self.debug_logger.debug(\"PATCHING payload {}\".format(json.dumps(payload, indent=4)))\n        res = requests.patch(url=self.record_url, json=payload, headers=HEADERS, verify=False)\n        self.write_response_html_to_file(res,\"bob.html\")\n        res.raise_for_status()\n        json_res = res.json()\n        self.debug_logger.debug(\"Success\")\n        self.attrs = json_res\n        return json_res", "docstring": "Patches current record and udpates the current instance's 'attrs'\nattribute to reflect the new changes.\n\nArgs:\npayload - hash. This will be JSON-formatted prior to sending the request.\n\nReturns:\n`dict`. The JSON formatted response.\n\nRaises:\n`requests.exceptions.HTTPError`: The status code is not ok.", "source": "juraj-google-style"}
{"code": "def suggest_charges(self, tolerance=0.1):\n        \n        recommendations = {}\n\n        for def_type in self.defect_types:\n            test_charges = np.arange(\n                np.min(self.stable_charges[def_type]) - 1,\n                np.max(self.stable_charges[def_type]) + 2)\n            test_charges = [charge for charge in test_charges if charge not in self.finished_charges[def_type]]\n\n            if len(self.transition_level_map[def_type].keys()):\n                \n                \n                min_tl = min(self.transition_level_map[def_type].keys())\n                if min_tl < tolerance:\n                    max_charge = max(self.transition_level_map[def_type][min_tl])\n                    test_charges = [charge for charge in test_charges if charge < max_charge]\n\n                \n                \n                max_tl = max(self.transition_level_map[def_type].keys())\n                if max_tl > (self.band_gap - tolerance):\n                    min_charge = min(self.transition_level_map[def_type][max_tl])\n                    test_charges = [charge for charge in test_charges if charge > min_charge]\n            else:\n                test_charges = [charge for charge in test_charges if charge not in self.stable_charges[def_type]]\n\n            recommendations[def_type] = test_charges\n\n        return recommendations", "docstring": "Suggest possible charges for defects to computee based on proximity\nof known transitions from entires to VBM and CBM\n\nArgs:\ntolerance (float): tolerance with respect to the VBM and CBM to\n`          continue to compute new charges", "source": "juraj-google-style"}
{"code": "def top_k(x, k, sorted=True):\n    if any_symbolic_tensors((x,)):\n        return TopK(k, sorted).symbolic_call(x)\n    return backend.math.top_k(x, k, sorted)", "docstring": "Finds the top-k values and their indices in a tensor.\n\nArgs:\nx: Input tensor.\nk: An integer representing the number of top elements to retrieve.\nsorted: A boolean indicating whether to sort the output in\ndescending order. Defaults to `True`.\n\nReturns:\nA tuple containing two tensors. The first tensor contains the\ntop-k values, and the second tensor contains the indices of the\ntop-k values in the input tensor.\n\nExample:\n\n>>> x = keras.ops.convert_to_tensor([5, 2, 7, 1, 9, 3])\n>>> values, indices = top_k(x, k=3)\n>>> print(values)\narray([9 7 5], shape=(3,), dtype=int32)\n>>> print(indices)\narray([4 2 0], shape=(3,), dtype=int32)", "source": "github-repos"}
{"code": "def make_hex_texture(grid_size=2, resolution=1):\n    (grid_x, grid_y) = np.meshgrid(np.arange(grid_size), np.arange(grid_size))\n    ROOT_3_OVER_2 = (np.sqrt(3) / 2)\n    ONE_HALF = 0.5\n    grid_x = ((grid_x * np.sqrt(3)) + ((grid_y % 2) * ROOT_3_OVER_2)).flatten()\n    grid_y = (grid_y.flatten() * 1.5)\n    grid_points = grid_x.shape[0]\n    x_offsets = np.interp(np.arange((4 * resolution)), (np.arange(4) * resolution), [ROOT_3_OVER_2, 0.0, (- ROOT_3_OVER_2), (- ROOT_3_OVER_2)])\n    y_offsets = np.interp(np.arange((4 * resolution)), (np.arange(4) * resolution), [(- ONE_HALF), (- 1.0), (- ONE_HALF), ONE_HALF])\n    tmx = (4 * resolution)\n    x_t = (np.tile(grid_x, (tmx, 1)) + x_offsets.reshape((tmx, 1)))\n    y_t = (np.tile(grid_y, (tmx, 1)) + y_offsets.reshape((tmx, 1)))\n    x_t = np.vstack([x_t, np.tile(np.nan, (1, grid_x.size))])\n    y_t = np.vstack([y_t, np.tile(np.nan, (1, grid_y.size))])\n    return fit_texture((x_t.flatten('F'), y_t.flatten('F')))", "docstring": "Makes a texture consisting on a grid of hexagons.\n\nArgs:\ngrid_size (int): the number of hexagons along each dimension of the grid\nresolution (int): the number of midpoints along the line of each hexagon\n\nReturns:\nA texture.", "source": "codesearchnet"}
{"code": "def run_ppm_server(pdb_file, outfile, force_rerun=False):\n    if ssbio.utils.force_rerun(outfile=outfile, flag=force_rerun):\n        url = 'http:\n        files = {'userfile': open(pdb_file, 'rb')}\n        r = requests.post(url, files=files)\n        info = r.text\n        with open(outfile, 'w') as f:\n            f.write(info)\n    else:\n        with open(outfile, 'r') as f:\n            info = f.read()\n    t = info.replace('\\n', '')\n    tt = t.replace('\\r', '')\n    ttt = tt.replace('\\t', '')\n    soup = BeautifulSoup(ttt, 'lxml')\n    tables = soup.find_all('table', attrs={'class': 'data'})\n    info_dict = {}\n    table_index = 0\n    for t in tables:\n        data_index = 0\n        for data in t.find_all('tr', attrs={'class': 'row1'}):\n            data_list = list(data.strings)\n            if (table_index == 0):\n                info_dict['Depth/Hydrophobic Thickness'] = data_list[0]\n                info_dict['deltaG_transfer'] = data_list[2]\n                info_dict['Tilt Angle'] = data_list[3]\n            if ((table_index == 1) and (data_index == 0)):\n                info_dict['Embedded_residues_Tilt'] = data_list[0]\n                info_dict['Embedded_residues'] = data_list[1]\n            if ((table_index == 1) and (data_index == 1)):\n                info_dict['Transmembrane_secondary_structure_segments_Tilt'] = data_list[0]\n                info_dict['Transmembrane_secondary_structure_segments'] = data_list[1]\n            if (table_index == 2):\n                info_dict['Output Messages'] = data_list[1]\n            if (table_index == 3):\n                baseurl = 'http:\n                a = data.find('a', href=True)\n                download_url = (baseurl + a['href'].replace('./', ''))\n                info_dict['Output file download link'] = download_url\n            data_index += 1\n        table_index += 1\n    return info_dict", "docstring": "Run the PPM server from OPM to predict transmembrane residues.\n\nArgs:\npdb_file (str): Path to PDB file\noutfile (str): Path to output HTML results file\nforce_rerun (bool): Flag to rerun PPM if HTML results file already exists\n\nReturns:\ndict: Dictionary of information from the PPM run, including a link to download the membrane protein file", "source": "codesearchnet"}
{"code": "def s_add(self, path, function, method=None, type_cast=None):\n    with self._lock:\n        try:\n            path = '^/{}'.format(path.lstrip('/'))\n            path = '{}/$'.format(path.rstrip('/'))\n            path = path.replace('<', '(?P<')\n            path = path.replace('>', '>[^/]*)')\n            self.add(path, function, method, type_cast)\n        except Exception:\n            pass", "docstring": "Function for registering a simple path.\n\nArgs:\npath (str): Path to be matched.\nfunction (function): Function to associate with this path.\nmethod (str, optional): Usually used to define one of GET, POST,\nPUT, DELETE. You may use whatever fits your situation though.\nDefaults to None.\ntype_cast (dict, optional): Mapping between the param name and\none of `int`, `float` or `bool`. The value reflected by the\nprovided param name will than be casted to the given type.\nDefaults to None.", "source": "codesearchnet"}
{"code": "def parse_GSM(filepath, entry_name=None):\n    if isinstance(filepath, str):\n        with utils.smart_open(filepath) as f:\n            soft = []\n            has_table = False\n            for line in f:\n                if (('_table_begin' in line) or (not line.startswith(('^', '!', '\n                    has_table = True\n                soft.append(line.rstrip())\n    else:\n        soft = []\n        has_table = False\n        for line in filepath:\n            if (('_table_begin' in line) or (not line.startswith(('^', '!', '\n                has_table = True\n            soft.append(line.rstrip())\n    if (entry_name is None):\n        sets = [i for i in soft if i.startswith('^')]\n        if (len(sets) > 1):\n            raise Exception('More than one entry in GPL')\n        if (len(sets) == 0):\n            raise NoEntriesException('No entries found. Check the if accession is correct!')\n        entry_name = parse_entry_name(sets[0])\n    columns = parse_columns(soft)\n    metadata = parse_metadata(soft)\n    if has_table:\n        table_data = parse_table_data(soft)\n    else:\n        table_data = DataFrame()\n    gsm = GSM(name=entry_name, table=table_data, metadata=metadata, columns=columns)\n    return gsm", "docstring": "Parse GSM entry from SOFT file.\n\nArgs:\nfilepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GSM entry\nor list of lines representing GSM from GSE file.\nentry_name (:obj:`str`, optional): Name of the entry. By default it is\ninferred from the data.\n\nReturns:\n:obj:`GEOparse.GSM`: A GSM object.", "source": "codesearchnet"}
{"code": "def _should_catch_error(self, error, errors=()):\n    caught_errors = (errors or (self.session.driver.invalid_element_errors + (ElementNotFound,)))\n    return isinstance(error, caught_errors)", "docstring": "Returns whether to catch the given error.\n\nArgs:\nerror (Exception): The error to consider.\nerrors (Tuple[Type[Exception], ...], optional): The exception types that should be\ncaught. Defaults to :class:`ElementNotFound` plus any driver-specific invalid\nelement errors.\n\nReturns:\nbool: Whether to catch the given error.", "source": "codesearchnet"}
{"code": "def has_no_narrow_neurite_section(neuron,\n                                  neurite_filter,\n                                  radius_threshold=0.05,\n                                  considered_section_min_length=50):\n    \n\n    considered_sections = (sec for sec in iter_sections(neuron, neurite_filter=neurite_filter)\n                           if sec.length > considered_section_min_length)\n\n    def narrow_section(section):\n        \n        return section.points[:, COLS.R].mean() < radius_threshold\n\n    bad_ids = [(section.id, section.points[1])\n               for section in considered_sections if narrow_section(section)]\n    return CheckResult(len(bad_ids) == 0, bad_ids)", "docstring": "Check if the neuron has dendrites with narrow sections\n\nArguments:\nneuron(Neuron): The neuron object to test\nneurite_filter(callable): filter the neurites by this callable\nradius_threshold(float): radii below this are considered narro\nconsidered_section_min_length(float): sections with length below\nthis are not taken into account\n\nReturns:\nCheckResult with result. result.info contains the narrow section ids and their\nfirst point", "source": "juraj-google-style"}
{"code": "def get_gitlab_project(self):\n    self.server = gitlab.Gitlab(GIT_URL, private_token=GITLAB_TOKEN, api_version=4)\n    project = self.server.projects.get(self.git_short)\n    if (not project):\n        raise GitLabApiError('Could not get Project \"{0}\" from GitLab API.'.format(self.git_short))\n    self.project = project\n    return self.project", "docstring": "Get numerical GitLab Project ID.\n\nReturns:\nint: Project ID number.\n\nRaises:\nforemast.exceptions.GitLabApiError: GitLab responded with bad status\ncode.", "source": "codesearchnet"}
{"code": "def _create_sagemaker_model(self, *args):  \n        \n        if self.algorithm_arn:\n            \n            \n            if self._created_model_package_name is None:\n                model_package_name = self._create_sagemaker_model_package()\n                self.sagemaker_session.wait_for_model_package(model_package_name)\n                self._created_model_package_name = model_package_name\n            model_package_name = self._created_model_package_name\n        else:\n            \n            model_package_name = self.model_package_arn\n\n        container_def = {\n            'ModelPackageName': model_package_name,\n        }\n\n        if self.env != {}:\n            container_def['Environment'] = self.env\n\n        model_package_short_name = model_package_name.split('/')[-1]\n        enable_network_isolation = self.enable_network_isolation()\n        self.name = self.name or utils.name_from_base(model_package_short_name)\n        self.sagemaker_session.create_model(self.name, self.role, container_def,\n                                            vpc_config=self.vpc_config,\n                                            enable_network_isolation=enable_network_isolation)", "docstring": "Create a SageMaker Model Entity\n\nArgs:\n*args: Arguments coming from the caller. This class\ndoes not require any so they are ignored.", "source": "juraj-google-style"}
{"code": "def removeColumns(self, columnNames):\n        \n        model = self.tableView.model()\n\n        if model is not None:\n            model.removeDataFrameColumns(columnNames)\n\n        self.removeColumnButton.setChecked(False)", "docstring": "Removes one or multiple columns from the model.\n\nThis method is also a slot.\n\nArgs:\ncolumnNames (list): A list of columns, which shall\nbe removed from the model.", "source": "juraj-google-style"}
{"code": "def query_op_traceback(self, op_name):\n    for op_log_proto in self._graph_tracebacks:\n        for log_entry in op_log_proto.log_entries:\n            if log_entry.name == op_name:\n                return self._code_def_to_traceback(log_entry.code_def, op_log_proto.id_to_string)\n    raise ValueError(\"Op '%s' does not exist in the tracebacks received by the debug server.\" % op_name)", "docstring": "Query the traceback of an op.\n\nArgs:\nop_name: Name of the op to query.\n\nReturns:\nThe traceback of the op, as a list of 3-tuples:\n(filename, lineno, function_name)\n\nRaises:\nValueError: If the op cannot be found in the tracebacks received by the\nserver so far.", "source": "github-repos"}
{"code": "def CopyToDateTimeString(self):\n    if (self._number_of_seconds is None):\n        return None\n    return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:01d}'.format(self.year, self.month, self.day_of_month, self.hours, self.minutes, self.seconds, self.deciseconds)", "docstring": "Copies the RFC2579 date-time to a date and time string.\n\nReturns:\nstr: date and time value formatted as: \"YYYY-MM-DD hh:mm:ss.#\" or\nNone if the number of seconds is missing.", "source": "codesearchnet"}
{"code": "def directional_emd(direction, d1, d2):\n    if (direction == Direction.CAUSE):\n        func = hamming_emd\n    elif (direction == Direction.EFFECT):\n        func = effect_emd\n    else:\n        validate.direction(direction)\n    return round(func(d1, d2), config.PRECISION)", "docstring": "Compute the EMD between two repertoires for a given direction.\n\nThe full EMD computation is used for cause repertoires. A fast analytic\nsolution is used for effect repertoires.\n\nArgs:\ndirection (Direction): |CAUSE| or |EFFECT|.\nd1 (np.ndarray): The first repertoire.\nd2 (np.ndarray): The second repertoire.\n\nReturns:\nfloat: The EMD between ``d1`` and ``d2``, rounded to |PRECISION|.\n\nRaises:\nValueError: If ``direction`` is invalid.", "source": "codesearchnet"}
{"code": "def psnr_and_ssim(output, target):\n  \n  output = tf.cast(output, dtype=tf.int32)\n  target = tf.cast(target, dtype=tf.int32)\n  psnr = tf.image.psnr(output, target, max_val=255)\n  ssim = tf.image.ssim(output, target, max_val=255)\n  return psnr, ssim", "docstring": "Compute the PSNR and SSIM.\n\nArgs:\noutput: 4-D Tensor, shape=(num_frames, height, width, num_channels)\ntarget: 4-D Tensor, shape=(num_frames, height, width, num_channels)\nReturns:\npsnr: 1-D Tensor, shape=(num_frames,)\nssim: 1-D Tensor, shape=(num_frames,)", "source": "juraj-google-style"}
{"code": "def get_parameter_dict(self, include_frozen=False):\n    return OrderedDict(zip(self.get_parameter_names(include_frozen=include_frozen), self.get_parameter_vector(include_frozen=include_frozen)))", "docstring": "Get an ordered dictionary of the parameters\n\nArgs:\ninclude_frozen (Optional[bool]): Should the frozen parameters be\nincluded in the returned value? (default: ``False``)", "source": "codesearchnet"}
{"code": "def explicit_pass(msg, extras=None):\n    raise signals.TestPass(msg, extras)", "docstring": "Explicitly pass a test.\n\nThis will pass the test explicitly regardless of any other error happened\nin the test body. E.g. even if errors have been recorded with `expects`,\nthe test will still be marked pass if this is called.\n\nA test without uncaught exception will pass implicitly so this should be\nused scarcely.\n\nArgs:\nmsg: A string explaining the details of the passed test.\nextras: An optional field for extra information to be included in\ntest result.\n\nRaises:\nsignals.TestPass: Mark a test as passed.", "source": "github-repos"}
{"code": "def constant_to_value(self, pyval, subst=None, node=None):\n    node = node or self.ctx.root_node\n    if pyval.__class__ is tuple:\n        type_key = tuple((type(v) for v in pyval))\n    else:\n        type_key = type(pyval)\n    key = ('constant', pyval, type_key)\n    if key in self._convert_cache:\n        if self._convert_cache[key] is None:\n            self._convert_cache[key] = self.unsolvable\n            if not self.ctx.recursion_allowed:\n                name = getattr(pyval, 'name', None) or pyval.__class__.__name__\n                self.ctx.errorlog.recursion_error(self.ctx.vm.frames, name)\n        return self._convert_cache[key]\n    else:\n        self._convert_cache[key] = None\n        need_node = [False]\n\n        def get_node():\n            need_node[0] = True\n            return node\n        recursive = isinstance(pyval, pytd.LateType) and pyval.recursive\n        if recursive:\n            context = self.ctx.allow_recursive_convert()\n        else:\n            context = contextlib.nullcontext()\n        with context:\n            try:\n                value = self._constant_to_value(pyval, subst, get_node)\n            except NotImplementedError:\n                del self._convert_cache[key]\n                raise\n        if not need_node[0] or node is self.ctx.root_node:\n            if recursive:\n                annot = abstract.LateAnnotation(pyval.name, self.ctx.vm.frames, self.ctx)\n                annot.set_type(value)\n                value = annot\n            self._convert_cache[key] = value\n        return value", "docstring": "Like constant_to_var, but convert to an abstract.BaseValue.\n\nThis also memoizes the results.  We don't memoize on name, as builtin types\nlike str or list might be reinitialized under different names (e.g. \"param\n1\"), but we want the canonical name and type.  We *do* memoize on the type\nas well, to make sure that e.g. \"1.0\" and \"1\" get converted to different\nconstants.  Memoization is an optimization, but an important one - mapping\nconstants like \"None\" to the same AbstractValue greatly simplifies the\ncfg structures we're building.\n\nArgs:\npyval: The constant to convert.\nsubst: The current type parameters.\nnode: The current CFG node. (For instances)\n\nReturns:\nThe converted constant. (Instance of BaseValue)", "source": "github-repos"}
{"code": "def get_atten(self, idx=0):\n    if not self.is_open:\n        raise attenuator.Error('Connection to attenuator at %s is not open!' % self._telnet_client.host)\n    if idx + 1 > self.path_count or idx < 0:\n        raise IndexError('Attenuator index out of range!', self.path_count, idx)\n    telnet_cmd = ':ATT?' if self.path_count == 1 else 'CHAN:%s:ATT?' % (idx + 1)\n    atten_val_str = self._telnet_client.cmd(telnet_cmd)\n    atten_val = float(atten_val_str)\n    return atten_val", "docstring": "This function returns the current attenuation from an attenuator at a\ngiven index in the instrument.\n\nArgs:\nidx: This zero-based index is the identifier for a particular\nattenuator in an instrument.\n\nRaises:\nError: The underlying telnet connection to the instrument is not\nopen.\n\nReturns:\nA float that is the current attenuation value.", "source": "github-repos"}
{"code": "def set_room_name(self, room_id, name, timestamp=None):\n        \n        body = {\n            \"name\": name\n        }\n        return self.send_state_event(room_id, \"m.room.name\", body, timestamp=timestamp)", "docstring": "Perform PUT /rooms/$room_id/state/m.room.name\nArgs:\nroom_id (str): The room ID\nname (str): The new room name\ntimestamp (int): Set origin_server_ts (For application services only)", "source": "juraj-google-style"}
{"code": "def get_local_config_filepath(\n        config_filepath,\n        force_local=False,\n):\n    \n    local_config_name = path.basename(config_filepath).split('.')[0] + '_local.cfg'\n    local_config_filepath = path.join(path.split(config_filepath)[0], local_config_name)\n\n    real_config_filepath = ''\n    if path.isfile(local_config_filepath) or force_local:\n        \n        real_config_filepath = local_config_filepath\n    else:\n        \n        real_config_filepath = config_filepath\n\n    return real_config_filepath", "docstring": "helper for finding local filepath for config\n\nArgs:\nconfig_filepath (str): path to local config abspath > relpath\nforce_local (bool): force return of _local.cfg version\n\nReturns:\nstr: Path to local config, or global if path DNE", "source": "juraj-google-style"}
{"code": "def split(self, bitindex):\n    if (bitindex < 0):\n        raise ValueError('bitindex must be larger or equal to 0.')\n    if (bitindex == 0):\n        return (None, self)\n    lastend = 0\n    split_promise = False\n    for (splitindex, p) in enumerate(self._promises):\n        if (bitindex in range(lastend, p._bitstart)):\n            split_promise = False\n            break\n        if (bitindex in range(p._bitstart, p._bitend)):\n            if ((bitindex - p._bitstart) == 0):\n                split_promise = False\n            else:\n                split_promise = True\n            break\n        lastend = p._bitend\n    else:\n        raise Exception('Should be impossible')\n    processed_left = TDOPromiseCollection(self._chain)\n    processed_right = TDOPromiseCollection(self._chain)\n    if split_promise:\n        (left, right) = p.split((bitindex - p._bitstart))\n        for i in range(splitindex):\n            processed_left.add(self._promises[i], 0)\n        processed_left.add(left, 0)\n        processed_right.add(right, 0)\n        for tmpprim in self._promises[(splitindex + 1):]:\n            processed_right.add(tmpprim, (- bitindex))\n        return (processed_left, processed_right)\n    else:\n        for i in range(splitindex):\n            processed_left.add(self._promises[i], 0)\n        for i in range(splitindex, len(self._promises)):\n            processed_right.add(self._promises[i], (- bitindex))\n        return (processed_left, processed_right)", "docstring": "Split a promise into two promises. A tail bit, and the 'rest'.\n\nSame operation as the one on TDOPromise, except this works\nwith a collection of promises and splits the appropriate one.\n\nReturns:\nThe 'Rest' and the 'Tail'.\nThe 'Rest' is TDOPromiseCollection containing the first\nchunk of the original TDOPromiseCollection.\nThe 'Tail' is a single bit sub promise for the final bit\nin the operation\n\nIf the 'Rest' would have a length of 0, None is returned", "source": "codesearchnet"}
{"code": "def add_scales_bar(img, bbox):\n    \n    tc = TileCoordinate(bbox.min.zoom, bbox.min.x, bbox.min.y)\n    meters_per_pixel = tc.resolution()\n    one_km_bar = int(1000 * (1 / meters_per_pixel))\n    col_black = (0, 0, 0)\n\n    line_start = (100, img.size[1] - 100)  \n    line_end = (line_start[0] + one_km_bar, line_start[1])\n    whiskers_left = [line_start[0], line_start[1] - 15, line_start[0], line_start[1] + 15]\n    whiskers_right = [line_end[0], line_end[1] - 15, line_end[0], line_end[1] + 15]\n\n    draw = ImageDraw.Draw(img)\n    draw.line([line_start, line_end], fill=col_black, width=5)\n    draw.line(whiskers_left, fill=col_black, width=2)\n    draw.line(whiskers_right, fill=col_black, width=2)\n    draw.text((line_start[0] + 10, line_start[1] + 10), fill=col_black, text=\"1 km\")\n    del draw", "docstring": "Add a scales bar to the  map.\n\nCalculates the resolution at the current latitude and\ninserts the corresponding scales bar on the map.\n\nArgs:\nimg (Image): Image object to which the scales bar will be added.\nbbox (TileBB): boundaries of the map", "source": "juraj-google-style"}
{"code": "def _ParsePage(self, parser_mediator, file_offset, page_data):\n    \n    page_header_map = self._GetDataTypeMap('binarycookies_page_header')\n\n    try:\n      page_header = self._ReadStructureFromByteStream(\n          page_data, file_offset, page_header_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError((\n          'Unable to map page header data at offset: 0x{0:08x} with error: '\n          '{1!s}').format(file_offset, exception))\n\n    for record_offset in page_header.offsets:\n      if parser_mediator.abort:\n        break\n\n      self._ParseRecord(parser_mediator, page_data, record_offset)", "docstring": "Parses a page.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nfile_offset (int): offset of the data relative from the start of\nthe file-like object.\npage_data (bytes): page data.\n\nRaises:\nParseError: when the page cannot be parsed.", "source": "juraj-google-style"}
{"code": "def ReadArtifactDefinitionValues(self, artifact_definition_values):\n    if (not artifact_definition_values):\n        raise errors.FormatError('Missing artifact definition values.')\n    different_keys = (set(artifact_definition_values) - definitions.TOP_LEVEL_KEYS)\n    if different_keys:\n        different_keys = ', '.join(different_keys)\n        raise errors.FormatError('Undefined keys: {0:s}'.format(different_keys))\n    name = artifact_definition_values.get('name', None)\n    if (not name):\n        raise errors.FormatError('Invalid artifact definition missing name.')\n    description = artifact_definition_values.get('doc', None)\n    if (not description):\n        raise errors.FormatError('Invalid artifact definition: {0:s} missing description.'.format(name))\n    artifact_definition = artifact.ArtifactDefinition(name, description=description)\n    if artifact_definition_values.get('collectors', []):\n        raise errors.FormatError('Invalid artifact definition: {0:s} still uses collectors.'.format(name))\n    urls = artifact_definition_values.get('urls', [])\n    if (not isinstance(urls, list)):\n        raise errors.FormatError('Invalid artifact definition: {0:s} urls is not a list.'.format(name))\n    artifact_definition.conditions = artifact_definition_values.get('conditions', [])\n    artifact_definition.provides = artifact_definition_values.get('provides', [])\n    self._ReadLabels(artifact_definition_values, artifact_definition, name)\n    self._ReadSupportedOS(artifact_definition_values, artifact_definition, name)\n    artifact_definition.urls = urls\n    self._ReadSources(artifact_definition_values, artifact_definition, name)\n    return artifact_definition", "docstring": "Reads an artifact definition from a dictionary.\n\nArgs:\nartifact_definition_values (dict[str, object]): artifact definition\nvalues.\n\nReturns:\nArtifactDefinition: an artifact definition.\n\nRaises:\nFormatError: if the format of the artifact definition is not set\nor incorrect.", "source": "codesearchnet"}
{"code": "def get_operation_mtf_dimension_names(self, operation_name):\n    \n    mtf_dimension_names = set()\n    for tensor_name in self.get_operation_input_names(operation_name):\n      mtf_dimension_names.update(self.get_tensor_mtf_dimension_names(\n          tensor_name))\n    for tensor_name in self.get_operation_output_names(operation_name):\n      mtf_dimension_names.update(self.get_tensor_mtf_dimension_names(\n          tensor_name))\n    return mtf_dimension_names", "docstring": "The Mesh TensorFlow dimensions associated with an operation.\n\nArgs:\noperation_name: a string, name of an operation in the graph.\n\nReturns:\na set(string), the names of Mesh TensorFlow dimensions.", "source": "juraj-google-style"}
{"code": "def forward(self, grid, interpolate_pos_encoding: bool=False):\n    batch_size, num_frames, height, width, num_channels = grid.shape\n    grid = grid.mean(1)\n    grid = self.add_2d_positional_embeddings(grid, interpolate_pos_encoding=interpolate_pos_encoding)\n    visual_tokens = grid.view(batch_size, -1, num_channels)\n    visual_tokens_shape = visual_tokens.shape[:-1]\n    device = visual_tokens.device\n    token_type_ids = torch.zeros(visual_tokens_shape, dtype=torch.long, device=device)\n    token_type_embeddings = self.token_type_embeddings(token_type_ids)\n    embeddings = visual_tokens + token_type_embeddings\n    embeddings = self.layer_norm(embeddings)\n    embeddings = self.dropout(embeddings)\n    return embeddings", "docstring": "Args:\ngrid: Array of shape (batch_size, num_frames, height, width, num_channels).\nIt contains processed frames extracted from videos, and is generated by Tvp image preprocessor. Note,\nnum_frames can be 1\ninterpolate_pos_encoding: (bool, *optional*, defaults to `False`):\nWhether to interpolate the pre-trained position encodings.\n\nReturns:\nembeddings: The embedding of grid with size (batch_size, height*width, num_channels)", "source": "github-repos"}
{"code": "def _event_size(event_shape, name=None):\n    with tf.compat.v1.name_scope(name, 'event_size', [event_shape]):\n        event_shape = tf.convert_to_tensor(value=event_shape, dtype=tf.int32, name='event_shape')\n        event_shape_const = tf.get_static_value(event_shape)\n        if (event_shape_const is not None):\n            return np.prod(event_shape_const)\n        else:\n            return tf.reduce_prod(input_tensor=event_shape)", "docstring": "Computes the number of elements in a tensor with shape `event_shape`.\n\nArgs:\nevent_shape: A tensor shape.\nname: The name to use for the tensor op to compute the number of elements\n(if such an op needs to be created).\n\nReturns:\nevent_size: The number of elements in `tensor_shape`.  Returns a numpy int\nwhen the number of elements can be computed immediately.  Otherwise, returns\na scalar tensor.", "source": "codesearchnet"}
{"code": "def _describe_bitmask(\n    bits: int, table: Dict[Any, str], default: str = \"0\"\n) -> str:\n    \n    result = []\n    for bit, name in table.items():\n        if bit & bits:\n            result.append(name)\n    if not result:\n        return default\n    return \"|\".join(result)", "docstring": "Returns a bitmask in human readable form.\n\nThis is a private function, used internally.\n\nArgs:\nbits (int): The bitmask to be represented.\ntable (Dict[Any,str]): A reverse lookup table.\ndefault (Any): A default return value when bits is 0.\n\nReturns: str: A printable version of the bits variable.", "source": "juraj-google-style"}
{"code": "def download_from_url(path, url):\n    filename = url.split('/')[(- 1)]\n    found_file = find_file(path, filename, max_depth=0)\n    if (found_file is None):\n        filename = os.path.join(path, filename)\n        tf.logging.info(('Downloading from %s to %s.' % (url, filename)))\n        inprogress_filepath = (filename + '.incomplete')\n        (inprogress_filepath, _) = urllib.request.urlretrieve(url, inprogress_filepath, reporthook=download_report_hook)\n        print()\n        tf.gfile.Rename(inprogress_filepath, filename)\n        return filename\n    else:\n        tf.logging.info(('Already downloaded: %s (at %s).' % (url, found_file)))\n        return found_file", "docstring": "Download content from a url.\n\nArgs:\npath: string directory where file will be downloaded\nurl: string url\n\nReturns:\nFull path to downloaded file", "source": "codesearchnet"}
{"code": "def from_fortran_src(cls, fortran_src: str, dir: str='.'):\n    import tempfile\n    fp = tempfile.NamedTemporaryFile('w+t', delete=False, dir=dir)\n    fp.writelines(fortran_src)\n    fp.close()\n    G = cls.from_fortran_file(fp.name, dir)\n    os.remove(fp.name)\n    return G", "docstring": "Create a GroundedFunctionNetwork instance from a string with raw\nFortran code.\n\nArgs:\nfortran_src: A string with Fortran source code.\ndir: (Optional) - the directory in which the temporary Fortran file\nwill be created (make sure you have write permission!) Defaults to\nthe current directory.\nReturns:\nA GroundedFunctionNetwork instance", "source": "codesearchnet"}
{"code": "def read(self, offset, size):\n    self._file_object.seek(offset, os.SEEK_SET)\n    return self._file_object.read(size)", "docstring": "Reads a byte string from the image object at the specified offset.\n\nArgs:\noffset (int): offset where to start reading.\nsize (int): number of bytes to read.\n\nReturns:\nbytes: data read.", "source": "codesearchnet"}
{"code": "def tz_convert(dt, to_tz, from_tz=None) -> str:\n    logger = logs.get_logger(tz_convert, level='info')\n    (f_tz, t_tz) = (get_tz(from_tz), get_tz(to_tz))\n    from_dt = pd.Timestamp(str(dt), tz=f_tz)\n    logger.debug(f'converting {str(from_dt)} from {f_tz} to {t_tz} ...')\n    return str(pd.Timestamp(str(from_dt), tz=t_tz))", "docstring": "Convert to tz\n\nArgs:\ndt: date time\nto_tz: to tz\nfrom_tz: from tz - will be ignored if tz from dt is given\n\nReturns:\nstr: date & time\n\nExamples:\n>>> dt_1 = pd.Timestamp('2018-09-10 16:00', tz='Asia/Hong_Kong')\n>>> tz_convert(dt_1, to_tz='NY')\n'2018-09-10 04:00:00-04:00'\n>>> dt_2 = pd.Timestamp('2018-01-10 16:00')\n>>> tz_convert(dt_2, to_tz='HK', from_tz='NY')\n'2018-01-11 05:00:00+08:00'\n>>> dt_3 = '2018-09-10 15:00'\n>>> tz_convert(dt_3, to_tz='NY', from_tz='JP')\n'2018-09-10 02:00:00-04:00'", "source": "codesearchnet"}
{"code": "def _dqdv_split_frames(cell, tidy=False, **kwargs):\n    (charge_dfs, cycles, minimum_v, maximum_v) = _collect_capacity_curves(cell, direction='charge')\n    ica_charge_dfs = _make_ica_charge_curves(charge_dfs, cycles, minimum_v, maximum_v, **kwargs)\n    ica_charge_df = pd.concat(ica_charge_dfs, axis=1, keys=[k.name for k in ica_charge_dfs])\n    (dcharge_dfs, cycles, minimum_v, maximum_v) = _collect_capacity_curves(cell, direction='discharge')\n    ica_dcharge_dfs = _make_ica_charge_curves(dcharge_dfs, cycles, minimum_v, maximum_v, **kwargs)\n    ica_discharge_df = pd.concat(ica_dcharge_dfs, axis=1, keys=[k.name for k in ica_dcharge_dfs])\n    ica_charge_df.columns.names = ['cycle', 'value']\n    ica_discharge_df.columns.names = ['cycle', 'value']\n    if tidy:\n        ica_charge_df = ica_charge_df.melt('voltage', var_name='cycle', value_name='dq', col_level=0)\n        ica_discharge_df = ica_discharge_df.melt('voltage', var_name='cycle', value_name='dq', col_level=0)\n    return (ica_charge_df, ica_discharge_df)", "docstring": "Returns dqdv data as pandas.DataFrames for all cycles.\n\nArgs:\ncell (CellpyData-object).\ntidy (bool): return in wide format if False (default),\nlong (tidy) format if True.\n\nReturns:\n(charge_ica_frame, discharge_ica_frame) where the frames are\npandas.DataFrames where the first column is voltage ('v') and\nthe following columns are the incremental capcaity for each\ncycle (multi-indexed, where cycle number is on the top level).\n\nExample:\n>>> from cellpy.utils import ica\n>>> charge_ica_df, dcharge_ica_df = ica.ica_frames(my_cell)\n>>> charge_ica_df.plot(x=(\"voltage\", \"v\"))", "source": "codesearchnet"}
{"code": "def load_intent(self, name, file_name, reload_cache=False):\n    self.intents.load(name, file_name, reload_cache)\n    with open(file_name) as f:\n        self.padaos.add_intent(name, f.read().split('\\n'))\n    self.must_train = True", "docstring": "Loads an intent, optionally checking the cache first\n\nArgs:\nname (str): The associated name of the intent\nfile_name (str): The location of the intent file\nreload_cache (bool): Whether to refresh all of cache", "source": "codesearchnet"}
{"code": "def copy(self, name=None):\n        \n        cpy = copy.copy(self)\n        if name:\n            cpy.name = name\n        return cpy", "docstring": "shallow copy of the instruction.\n\nArgs:\nname (str): name to be given to the copied circuit,\nif None then the name stays the same\n\nReturns:\nInstruction: a shallow copy of the current instruction, with the name\nupdated if it was provided", "source": "juraj-google-style"}
{"code": "def add_connection(self, connection_id, internal_id, context):\n        \n        \n        if self._get_connection_state(connection_id) != self.Disconnected:\n            return\n        if self._get_connection_state(internal_id) != self.Disconnected:\n            return\n\n        conn_data = {\n            'state': self.Idle,\n            'microstate': None,\n            'connection_id': connection_id,\n            'internal_id': internal_id,\n            'context': context\n        }\n\n        self._connections[connection_id] = conn_data\n        self._int_connections[internal_id] = conn_data", "docstring": "Add an already created connection. Used to register devices connected before starting the device adapter.\n\nArgs:\nconnection_id (int): The external connection id\ninternal_id (string): An internal identifier for the connection\ncontext (dict): Additional information to associate with this context", "source": "juraj-google-style"}
{"code": "def lint(exclude, skip_untracked, commit_only):\n    \n    \n    exclude = list(exclude) + conf.get('lint.exclude', [])\n    runner = LintRunner(exclude, skip_untracked, commit_only)\n\n    if not runner.run():\n        exit(1)", "docstring": "Lint python files.\n\nArgs:\nexclude (list[str]):\nA list of glob string patterns to test against. If the file/path\nmatches any of those patters, it will be filtered out.\nskip_untracked (bool):\nIf set to **True** it will skip all files not tracked by git.\ncommit_only (bool):\nOnly lint files that are staged for commit.", "source": "juraj-google-style"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        super(GetRequestPayload, self).read(\n            input_stream,\n            kmip_version=kmip_version\n        )\n        local_stream = utils.BytearrayStream(input_stream.read(self.length))\n\n        if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):\n            self._unique_identifier = primitives.TextString(\n                tag=enums.Tags.UNIQUE_IDENTIFIER\n            )\n            self._unique_identifier.read(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        if self.is_tag_next(enums.Tags.KEY_FORMAT_TYPE, local_stream):\n            self._key_format_type = primitives.Enumeration(\n                enum=enums.KeyFormatType,\n                tag=enums.Tags.KEY_FORMAT_TYPE\n            )\n            self._key_format_type.read(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        if self.is_tag_next(enums.Tags.KEY_COMPRESSION_TYPE, local_stream):\n            self._key_compression_type = primitives.Enumeration(\n                enum=enums.KeyCompressionType,\n                tag=enums.Tags.KEY_COMPRESSION_TYPE\n            )\n            self._key_compression_type.read(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        if self.is_tag_next(\n                enums.Tags.KEY_WRAPPING_SPECIFICATION,\n                local_stream\n        ):\n            self._key_wrapping_specification = \\\n                objects.KeyWrappingSpecification()\n            self._key_wrapping_specification.read(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        self.is_oversized(local_stream)", "docstring": "Read the data encoding the Get request payload and decode it into its\nconstituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def modify_ack_deadline(self, items):\n        \n        ack_ids = [item.ack_id for item in items]\n        seconds = [item.seconds for item in items]\n\n        request = types.StreamingPullRequest(\n            modify_deadline_ack_ids=ack_ids, modify_deadline_seconds=seconds\n        )\n        self._manager.send(request)", "docstring": "Modify the ack deadline for the given messages.\n\nArgs:\nitems(Sequence[ModAckRequest]): The items to modify.", "source": "juraj-google-style"}
{"code": "def run_graph_optimizations(graph_def, input_arrays, output_arrays, config, graph=None):\n    meta_graph = _export_meta_graph(graph_def=graph_def, graph=graph)\n    signature = _meta_graph_pb2.SignatureDef()\n    for array in input_arrays:\n        signature.inputs[array.name].name = array.name\n        signature.inputs[array.name].dtype = array.dtype.as_datatype_enum\n        signature.inputs[array.name].tensor_shape.CopyFrom(array.shape.as_proto())\n    for array in output_arrays:\n        signature.outputs[array.name].name = array.name\n        signature.outputs[array.name].dtype = array.dtype.as_datatype_enum\n        signature.outputs[array.name].tensor_shape.CopyFrom(array.shape.as_proto())\n    meta_graph.signature_def['not_used_key'].CopyFrom(signature)\n    fetch_collection = _meta_graph_pb2.CollectionDef()\n    for array in input_arrays + output_arrays:\n        fetch_collection.node_list.value.append(array.name)\n    meta_graph.collection_def['train_op'].CopyFrom(fetch_collection)\n    return tf_optimizer.OptimizeGraph(config, meta_graph)", "docstring": "Apply standard TensorFlow optimizations to the graph_def.\n\nArgs:\ngraph_def: Frozen GraphDef to be optimized.\ninput_arrays: List of arrays that are considered inputs of the graph.\noutput_arrays: List of arrays that are considered outputs of the graph.\nconfig: tf.ConfigProto.\ngraph: TensorFlow Graph. Required when Eager mode is enabled. (default None)\n\nReturns:\nA new, optimized GraphDef.", "source": "github-repos"}
{"code": "def add_to_buffer(self, content, read_position):\n        \n        self.read_position = read_position\n        if self.read_buffer is None:\n            self.read_buffer = content\n        else:\n            self.read_buffer = content + self.read_buffer", "docstring": "Add additional bytes content as read from the read_position.\n\nArgs:\ncontent (bytes): data to be added to buffer working BufferWorkSpac.\nread_position (int): where in the file pointer the data was read from.", "source": "juraj-google-style"}
{"code": "def run_and_gather_logs(name, test_name, test_args, benchmark_type, skip_processing_logs=False):\n    if not (test_name and test_name.startswith('\n        raise ValueError('Expected test_name parameter with a unique test, e.g.: --test_name=\n    test_executable = test_name.rstrip().strip('/').replace(':', '/')\n    if gfile.Exists(os.path.join('bazel-bin', test_executable)):\n        test_executable = os.path.join('bazel-bin', test_executable)\n    else:\n        test_executable = os.path.join('.', test_executable)\n    test_adjusted_name = name\n    gpu_config = gpu_info_lib.gather_gpu_devices()\n    if gpu_config:\n        gpu_name = gpu_config[0].model\n        gpu_short_name_match = re.search('(Tesla|NVIDIA) (K40|K80|P100|V100|A100)', gpu_name)\n        if gpu_short_name_match:\n            gpu_short_name = gpu_short_name_match.group(0)\n            test_adjusted_name = name + '|' + gpu_short_name.replace(' ', '_')\n    temp_directory = tempfile.mkdtemp(prefix='run_and_gather_logs')\n    mangled_test_name = test_adjusted_name.strip('/').replace('|', '_').replace('/', '_').replace(':', '_')\n    test_file_prefix = os.path.join(temp_directory, mangled_test_name)\n    test_file_prefix = '%s.' % test_file_prefix\n    try:\n        if not gfile.Exists(test_executable):\n            test_executable_py3 = test_executable + '.python3'\n            if not gfile.Exists(test_executable_py3):\n                raise ValueError('Executable does not exist: %s' % test_executable)\n            test_executable = test_executable_py3\n        test_args = shlex.split(test_args)\n        os.environ['TEST_REPORT_FILE_PREFIX'] = test_file_prefix\n        start_time = time.time()\n        subprocess.check_call([test_executable] + test_args)\n        if skip_processing_logs:\n            return (None, test_adjusted_name)\n        run_time = time.time() - start_time\n        log_files = gfile.Glob('{}*'.format(test_file_prefix))\n        if not log_files:\n            raise MissingLogsError('No log files found at %s.' % test_file_prefix)\n        return (process_test_logs(test_adjusted_name, test_name=test_name, test_args=test_args, benchmark_type=benchmark_type, start_time=int(start_time), run_time=run_time, log_files=log_files), test_adjusted_name)\n    finally:\n        try:\n            gfile.DeleteRecursively(temp_directory)\n        except OSError:\n            pass", "docstring": "Run the bazel test given by test_name.  Gather and return the logs.\n\nArgs:\nname: Benchmark target identifier.\ntest_name: A unique bazel target, e.g. \"//path/to:test\"\ntest_args: A string containing all arguments to run the target with.\nbenchmark_type: A string representing the BenchmarkType enum; the\nbenchmark type for this target.\nskip_processing_logs: Whether to skip processing test results from log\nfiles.\n\nReturns:\nA tuple (test_results, mangled_test_name), where\ntest_results: A test_log_pb2.TestResults proto, or None if log processing\nis skipped.\ntest_adjusted_name: Unique benchmark name that consists of\nbenchmark name optionally followed by GPU type.\n\nRaises:\nValueError: If the test_name is not a valid target.\nsubprocess.CalledProcessError: If the target itself fails.\nIOError: If there are problems gathering test log output from the test.\nMissingLogsError: If we couldn't find benchmark logs.", "source": "github-repos"}
{"code": "def Parse(text, message):\n    if (not isinstance(text, six.text_type)):\n        text = text.decode('utf-8')\n    try:\n        if (sys.version_info < (2, 7)):\n            js = json.loads(text)\n        else:\n            js = json.loads(text, object_pairs_hook=_DuplicateChecker)\n    except ValueError as e:\n        raise ParseError('Failed to load JSON: {0}.'.format(str(e)))\n    _ConvertMessage(js, message)\n    return message", "docstring": "Parses a JSON representation of a protocol message into a message.\n\nArgs:\ntext: Message JSON representation.\nmessage: A protocol beffer message to merge into.\n\nReturns:\nThe same message passed as argument.\n\nRaises::\nParseError: On JSON parsing problems.", "source": "codesearchnet"}
{"code": "def unregister(self, alias):\n    if alias not in self._service_objects:\n        raise Error(self._device, 'No service is registered with alias \"%s\".' % alias)\n    service_obj = self._service_objects.pop(alias)\n    if service_obj.is_alive:\n        with expects.expect_no_raises('Failed to stop service instance \"%s\".' % alias):\n            service_obj.stop()", "docstring": "Unregisters a service instance.\n\nStops a service and removes it from the manager.\n\nArgs:\nalias: string, the alias of the service instance to unregister.", "source": "github-repos"}
{"code": "def _get_config_files():\n    config_paths = []\n    if os.environ.get('FEDMSG_CONFIG'):\n        config_location = os.environ['FEDMSG_CONFIG']\n    else:\n        config_location = '/etc/fedmsg.d'\n    if os.path.isfile(config_location):\n        config_paths.append(config_location)\n    elif os.path.isdir(config_location):\n        possible_config_files = [os.path.join(config_location, p) for p in os.listdir(config_location) if p.endswith('.py')]\n        for p in possible_config_files:\n            if os.path.isfile(p):\n                config_paths.append(p)\n    if (not config_paths):\n        _log.info('No configuration files found in %s', config_location)\n    return config_paths", "docstring": "Load the list of file paths for fedmsg configuration files.\n\nReturns:\nlist: List of files containing fedmsg configuration.", "source": "codesearchnet"}
{"code": "def sphere(radius=0.5, sectors=32, rings=16) -> VAO:\n    R = (1.0 / (rings - 1))\n    S = (1.0 / (sectors - 1))\n    vertices = ([0] * ((rings * sectors) * 3))\n    normals = ([0] * ((rings * sectors) * 3))\n    uvs = ([0] * ((rings * sectors) * 2))\n    (v, n, t) = (0, 0, 0)\n    for r in range(rings):\n        for s in range(sectors):\n            y = math.sin((((- math.pi) / 2) + ((math.pi * r) * R)))\n            x = (math.cos((((2 * math.pi) * s) * S)) * math.sin(((math.pi * r) * R)))\n            z = (math.sin((((2 * math.pi) * s) * S)) * math.sin(((math.pi * r) * R)))\n            uvs[t] = (s * S)\n            uvs[(t + 1)] = (r * R)\n            vertices[v] = (x * radius)\n            vertices[(v + 1)] = (y * radius)\n            vertices[(v + 2)] = (z * radius)\n            normals[n] = x\n            normals[(n + 1)] = y\n            normals[(n + 2)] = z\n            t += 2\n            v += 3\n            n += 3\n    indices = ((([0] * rings) * sectors) * 6)\n    i = 0\n    for r in range((rings - 1)):\n        for s in range((sectors - 1)):\n            indices[i] = ((r * sectors) + s)\n            indices[(i + 1)] = (((r + 1) * sectors) + (s + 1))\n            indices[(i + 2)] = ((r * sectors) + (s + 1))\n            indices[(i + 3)] = ((r * sectors) + s)\n            indices[(i + 4)] = (((r + 1) * sectors) + s)\n            indices[(i + 5)] = (((r + 1) * sectors) + (s + 1))\n            i += 6\n    vbo_vertices = numpy.array(vertices, dtype=numpy.float32)\n    vbo_normals = numpy.array(normals, dtype=numpy.float32)\n    vbo_uvs = numpy.array(uvs, dtype=numpy.float32)\n    vbo_elements = numpy.array(indices, dtype=numpy.uint32)\n    vao = VAO('sphere', mode=mlg.TRIANGLES)\n    vao.buffer(vbo_vertices, '3f', ['in_position'])\n    vao.buffer(vbo_normals, '3f', ['in_normal'])\n    vao.buffer(vbo_uvs, '2f', ['in_uv'])\n    vao.index_buffer(vbo_elements, index_element_size=4)\n    return vao", "docstring": "Creates a sphere.\n\nKeyword Args:\nradius (float): Radius or the sphere\nrings (int): number or horizontal rings\nsectors (int): number of vertical segments\n\nReturns:\nA :py:class:`demosys.opengl.vao.VAO` instance", "source": "codesearchnet"}
{"code": "def cmap_from_color(color, dark=False):\n    if dark:\n        return sns.dark_palette(color, as_cmap=True)\n    else:\n        return sns.light_palette(color, as_cmap=True)", "docstring": "Generates a matplotlib colormap from a single color.\n\nColormap will be built, by default, from white to ``color``.\n\nArgs:\n\ncolor: Can be one of several things:\n\n1. Hex code\n2. HTML color name\n3. RGB tuple\n\ndark (bool): If ``True``, colormap will be built from ``color`` to\nblack. Default is ``False``, which builds a colormap from\nwhite to ``color``.\n\nReturns:\n\ncolormap: A matplotlib colormap", "source": "codesearchnet"}
{"code": "def _getFuncArgs(func):\n  r\n  code = func.func_code\n  Defaults = func.func_defaults\n\n  nargs = code.co_argcount\n  ArgNames = code.co_varnames[:nargs]\n\n  Args = OrderedDict()\n  argCount = len(ArgNames)\n  defCount = len(Defaults) if Defaults else 0\n  diff = argCount - defCount\n\n  for i in range(0, diff):\n    Args[ArgNames[i]] = {}\n\n  for i in range(diff, argCount):\n    Args[ArgNames[i]] = {'default': Defaults[i - diff]}\n\n  return Args", "docstring": "r\"\"\"Gives the details on the args of the given func.\n\nArgs:\nfunc (function): The function to get details on.", "source": "juraj-google-style"}
{"code": "def _extract_response_xml(self, domain, response):\n    attributes = {}\n    alexa_keys = {'POPULARITY': 'TEXT', 'REACH': 'RANK', 'RANK': 'DELTA'}\n    try:\n        xml_root = ET.fromstring(response._content)\n        for xml_child in xml_root.findall('SD\n            if ((xml_child.tag in alexa_keys) and (alexa_keys[xml_child.tag] in xml_child.attrib)):\n                attributes[xml_child.tag.lower()] = xml_child.attrib[alexa_keys[xml_child.tag]]\n    except ParseError:\n        pass\n    attributes['domain'] = domain\n    return {'attributes': attributes}", "docstring": "Extract XML content of an HTTP response into dictionary format.\n\nArgs:\nresponse: HTML Response objects\nReturns:\nA dictionary: {alexa-ranking key : alexa-ranking value}.", "source": "codesearchnet"}
{"code": "def functional_pulse(func):\n    \n    @functools.wraps(func)\n    def to_pulse(duration, *args, name=None, **kwargs):\n        \n        if isinstance(duration, int) and duration > 0:\n            samples = func(duration, *args, **kwargs)\n            samples = np.asarray(samples, dtype=np.complex128)\n            return SamplePulse(samples=samples, name=name)\n        raise PulseError('The first argument must be an integer value representing duration.')\n\n    return to_pulse", "docstring": "A decorator for generating SamplePulse from python callable.\nArgs:\nfunc (callable): A function describing pulse envelope.\nRaises:\nPulseError: when invalid function is specified.", "source": "juraj-google-style"}
{"code": "def get_rebind_dict(rebinder: Callable, target: Symbolic) -> Dict[str, Any]:\n    signature = pg_typing.signature(rebinder, auto_typing=False, auto_doc=False)\n    if len(signature.args) == 2:\n        select_fn = lambda k, v, p: rebinder(k, v)\n    elif len(signature.args) == 3:\n        select_fn = rebinder\n    else:\n        raise TypeError(f\"Rebinder function '{signature.id}' should accept 2 or 3 arguments (key_path, value, [parent]). Encountered: {signature.args}.\")\n    path_value_pairs = dict()\n\n    def _fill_rebind_dict(path, value, parent):\n        new_value = select_fn(path, value, parent)\n        if new_value is not value:\n            path_value_pairs[str(path)] = new_value\n            return TraverseAction.CONTINUE\n        return TraverseAction.ENTER\n    traverse(target, _fill_rebind_dict)\n    return path_value_pairs", "docstring": "Generate rebind dict using rebinder on target value.\n\nArgs:\nrebinder: A callable object with signature: (key_path: utils.KeyPath, value:\nAny) -> Any or (key_path: utils.KeyPath, value: Any, parent: Any) -> Any.\nIf rebinder returns the same value from input, the value is considered\nunchanged. Otherwise it will be put into the returning rebind dict. See\n`Symbolic.rebind` for more details.\ntarget: Upon which value the rebind dict is computed.\n\nReturns:\nAn ordered dict of key path string to updated value.", "source": "github-repos"}
{"code": "def attribute_label(self, attribute_id, label, action='GET', params=None):\n        \n        if params is None:\n            params = {}\n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        if action == 'GET':\n            return self.tc_requests.get_attribute_label(\n                self.api_type,\n                self.api_sub_type,\n                self.unique_id,\n                attribute_id,\n                label,\n                owner=self.owner,\n                params=params,\n            )\n        if action == 'DELETE':\n            return self.tc_requests.delete_attribute_label(\n                self.api_type,\n                self.api_sub_type,\n                self.unique_id,\n                attribute_id,\n                label,\n                owner=self.owner,\n            )\n\n        self._tcex.handle_error(925, ['action', 'attribute_label', 'action', 'action', action])\n        return None", "docstring": "Gets a security labels from a attribute\n\nArgs:\nattribute_id:\nlabel:\naction:\nparams:\n\nReturns: Security label json", "source": "juraj-google-style"}
{"code": "def Serialize(self, writer):\n        \n        super(AssetState, self).Serialize(writer)\n        writer.WriteUInt256(self.AssetId)\n        writer.WriteByte(self.AssetType)\n        writer.WriteVarString(self.Name)\n\n        if self.Amount.value > -1:\n            writer.WriteFixed8(self.Amount, unsigned=True)\n        else:\n            writer.WriteFixed8(self.Amount)\n\n        if type(self.Available) is not Fixed8:\n            raise Exception(\"AVAILABLE IS NOT FIXED 8!\")\n        writer.WriteFixed8(self.Available, unsigned=True)\n        writer.WriteByte(self.Precision)\n        writer.WriteByte(b'\\x00')\n        writer.WriteFixed8(self.Fee)\n        writer.WriteUInt160(self.FeeAddress)\n        self.Owner.Serialize(writer)\n        writer.WriteUInt160(self.Admin)\n        writer.WriteUInt160(self.Issuer)\n        writer.WriteUInt32(self.Expiration)\n        writer.WriteBool(self.IsFrozen)", "docstring": "Serialize full object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def _measure_tensor_list_column_widths(self, data):\n    max_timestamp_width = 0\n    if data:\n        max_rel_time_ms = (data[-1].timestamp - self._debug_dump.t0) / 1000.0\n        max_timestamp_width = len('[%.3f] ' % max_rel_time_ms) + 1\n    max_timestamp_width = max(max_timestamp_width, len(self._TIMESTAMP_COLUMN_HEAD) + 1)\n    max_dump_size_width = 0\n    for dump in data:\n        dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)\n        if len(dump_size_str) + 1 > max_dump_size_width:\n            max_dump_size_width = len(dump_size_str) + 1\n    max_dump_size_width = max(max_dump_size_width, len(self._DUMP_SIZE_COLUMN_HEAD) + 1)\n    max_op_type_width = 0\n    for dump in data:\n        op_type = self._debug_dump.node_op_type(dump.node_name)\n        if len(op_type) + 1 > max_op_type_width:\n            max_op_type_width = len(op_type) + 1\n    max_op_type_width = max(max_op_type_width, len(self._OP_TYPE_COLUMN_HEAD) + 1)\n    return (max_timestamp_width, max_dump_size_width, max_op_type_width)", "docstring": "Determine the maximum widths of the timestamp and op-type column.\n\nThis method assumes that data is sorted in the default order, i.e.,\nby ascending timestamps.\n\nArgs:\ndata: (list of DebugTensorDaum) the data based on which the maximum\ncolumn widths will be determined.\n\nReturns:\n(int) maximum width of the timestamp column. 0 if data is empty.\n(int) maximum width of the dump size column. 0 if data is empty.\n(int) maximum width of the op type column. 0 if data is empty.", "source": "github-repos"}
{"code": "def _check_not_finalized(self) -> None:\n    if self._finalized:\n        raise RuntimeError('Graph is finalized and cannot be modified.')", "docstring": "Check if the graph is finalized.\n\nRaises:\nRuntimeError: If the graph finalized.", "source": "github-repos"}
{"code": "def iter_predict(self, X, include_init=False):\n    for probas in self.iter_predict_proba(X, include_init=include_init):\n        (yield self.encoder_.inverse_transform(np.argmax(probas, axis=1)))", "docstring": "Returns the predicted classes for ``X`` at every stage of the boosting procedure.\n\nArguments:\nX (array-like or sparse matrix of shape (n_samples, n_features)): The input samples.\nSparse matrices are accepted only if they are supported by the weak model.\ninclude_init (bool, default=False): If ``True`` then the prediction from\n``init_estimator`` will also be returned.\n\nReturns:\niterator of arrays of shape (n_samples, n_classes) containing the predicted classes at\neach stage.", "source": "codesearchnet"}
{"code": "def set_video_pos(self, x1, y1, x2, y2):\n    position = ('%s %s %s %s' % (str(x1), str(y1), str(x2), str(y2)))\n    self._player_interface.VideoPos(ObjectPath('/not/used'), String(position))", "docstring": "Set the video position on the screen\n\nArgs:\nx1 (int): Top left x coordinate (px)\ny1 (int): Top left y coordinate (px)\nx2 (int): Bottom right x coordinate (px)\ny2 (int): Bottom right y coordinate (px)", "source": "codesearchnet"}
{"code": "def qualifyContracts(self, *contracts: List[Contract]) -> List[Contract]:\n        \n        return self._run(self.qualifyContractsAsync(*contracts))", "docstring": "Fully qualify the given contracts in-place. This will fill in\nthe missing fields in the contract, especially the conId.\n\nReturns a list of contracts that have been successfully qualified.\n\nThis method is blocking.\n\nArgs:\ncontracts: Contracts to qualify.", "source": "juraj-google-style"}
{"code": "def from_sub_models_config(cls, text_encoder_config: PretrainedConfig, audio_encoder_config: PretrainedConfig, decoder_config: MusicgenMelodyDecoderConfig, **kwargs):\n    return cls(text_encoder=text_encoder_config.to_dict(), audio_encoder=audio_encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`MusicgenMelodyConfig`] (or a derived class) from text encoder, audio encoder and decoder\nconfigurations.\n\nReturns:\n[`MusicgenMelodyConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def apply(self, pts: torch.Tensor) -> torch.Tensor:\n    rot_mats = self.get_rot_mats()\n    return rot_vec_mul(rot_mats, pts)", "docstring": "Apply the current Rotation as a rotation matrix to a set of 3D coordinates.\n\nArgs:\npts:\nA [*, 3] set of points\nReturns:\n[*, 3] rotated points", "source": "github-repos"}
{"code": "def init_logger(self, log_dir=None, level=logging.INFO):\n        \n        logging.basicConfig(\n            format='%(asctime)s - %(levelname)s - %(message)s', level=level)\n        logger = logging.getLogger(__name__)\n        if log_dir and self.rank == 0:\n            filename = '{}.log'.format(self.timestamp)\n            log_file = osp.join(log_dir, filename)\n            self._add_file_handler(logger, log_file, level=level)\n        return logger", "docstring": "Init the logger.\n\nArgs:\nlog_dir(str, optional): Log file directory. If not specified, no\nlog file will be used.\nlevel (int or str): See the built-in python logging module.\n\nReturns:\n:obj:`~logging.Logger`: Python logger.", "source": "juraj-google-style"}
{"code": "def trimpath(attributes):\n    \n    if 'pathdepth' in attributes:\n        if attributes['pathdepth'] != 'full':\n            pathelements = []\n            remainder = attributes['file']\n            limit = int(attributes['pathdepth'])\n            while len(pathelements) < limit and remainder:\n                remainder, pe = os.path.split(remainder)\n                pathelements.insert(0, pe)\n            return os.path.join(*pathelements)\n        return attributes['file']\n    return os.path.basename(attributes['file'])", "docstring": "Simplifies the given path.\n\nIf pathdepth is in attributes, the last pathdepth elements will be\nreturned. If pathdepth is \"full\", the full path will be returned.\nOtherwise the filename only will be returned.\n\nArgs:\nattributes: The element attributes.\n\nReturns:\nThe trimmed path.", "source": "juraj-google-style"}
{"code": "def _build(self, inputs, is_training=True, dropout_keep_prob=0.5):\n    \n    self._input_shape = tuple(inputs.get_shape().as_list())\n    net = inputs\n\n    final_index = self._num_layers - 1\n    for layer_id in xrange(self._num_layers):\n      net = self._layers[layer_id](net)\n\n      if final_index != layer_id or self._activate_final:\n        \n        if self._use_dropout:\n          keep_prob = utils.smart_cond(\n              is_training, true_fn=lambda: dropout_keep_prob,\n              false_fn=lambda: tf.constant(1.0)\n          )\n          net = tf.nn.dropout(net, keep_prob=keep_prob)\n        net = self._activation(net)\n\n    return net", "docstring": "Assembles the `MLP` and connects it to the graph.\n\nArgs:\ninputs: A 2D Tensor of size `[batch_size, input_size]`.\nis_training: A bool or tf.Bool Tensor. Indicates whether we are\ncurrently training. Defaults to `True`.\ndropout_keep_prob: The probability that each element is kept when\nboth `use_dropout` and `is_training` are True. Defaults to 0.5.\nReturns:\nA 2D Tensor of size `[batch_size, output_sizes[-1]]`.", "source": "juraj-google-style"}
{"code": "def content_type(self):\n    if ((self.media_type is not None) and self.media_type.startswith('text/') and (self.charset is not None)):\n        return ((self.media_type + '; charset=') + self.charset)\n    else:\n        return self.media_type", "docstring": "Return the value of Content-Type header field.\n\nThe value for the Content-Type header field is determined from\nthe :attr:`media_type` and :attr:`charset` data attributes.\n\nReturns:\nstr: Value of Content-Type header field", "source": "codesearchnet"}
{"code": "def union(self, other):\n        \n        operation = bool.__or__\n        self.cross_product(other, operation)\n        return self", "docstring": "Constructs an unminimized DFA recognizing the union of the languages of two given DFAs.\nArgs:\nother (DFA): The other DFA that will be used\nfor the union operation\nReturns:\nDFA: The resulting DFA", "source": "juraj-google-style"}
{"code": "def __call__(self, *args, **kwargs):", "docstring": "Executes this callable.\n\nThis behaves like a regular op - in eager mode, it immediately starts\nexecution, returning results. In graph mode, it creates ops which return\nsymbolic TensorFlow values (like `tf.Tensor`, `tf.data.Dataset`,\netc.). For example, `tf.function` callables typically generate a\n`tf.raw_ops.PartitionedCall` op, but not always - the\nexact operations being generated are an internal implementation detail.\n\nArgs:\n*args: positional argument for this call\n**kwargs: keyword arguments for this call\nReturns:\nThe execution results.", "source": "github-repos"}
{"code": "def _SetExtractionParsersAndPlugins(self, configuration, session):\n    names_generator = parsers_manager.ParsersManager.GetParserAndPluginNames(parser_filter_expression=configuration.parser_filter_expression)\n    session.enabled_parser_names = list(names_generator)\n    session.parser_filter_expression = configuration.parser_filter_expression", "docstring": "Sets the parsers and plugins before extraction.\n\nArgs:\nconfiguration (ProcessingConfiguration): processing configuration.\nsession (Session): session.", "source": "codesearchnet"}
{"code": "def commit_offsets_sync(self, offsets):\n        \n        assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'\n        assert all(map(lambda k: isinstance(k, TopicPartition), offsets))\n        assert all(map(lambda v: isinstance(v, OffsetAndMetadata),\n                       offsets.values()))\n        self._invoke_completed_offset_commit_callbacks()\n        if not offsets:\n            return\n\n        while True:\n            self.ensure_coordinator_ready()\n\n            future = self._send_offset_commit_request(offsets)\n            self._client.poll(future=future)\n\n            if future.succeeded():\n                return future.value\n\n            if not future.retriable():\n                raise future.exception \n\n            time.sleep(self.config['retry_backoff_ms'] / 1000)", "docstring": "Commit specific offsets synchronously.\n\nThis method will retry until the commit completes successfully or an\nunrecoverable error is encountered.\n\nArguments:\noffsets (dict {TopicPartition: OffsetAndMetadata}): what to commit\n\nRaises error on failure", "source": "juraj-google-style"}
{"code": "def random_weights(n, bounds=(0.0, 1.0), total=1.0):\n    low = bounds[0]\n    high = bounds[1]\n    if (high < low):\n        raise ValueError('Higher bound must be greater or equal to lower bound')\n    if (((n * high) < total) or ((n * low) > total)):\n        raise ValueError('solution not possible with given n and bounds')\n    w = ([0] * n)\n    tgt = (- float(total))\n    for i in range(n):\n        rn = ((n - i) - 1)\n        rhigh = (rn * high)\n        rlow = (rn * low)\n        lowb = max(((- rhigh) - tgt), low)\n        highb = min(((- rlow) - tgt), high)\n        rw = random.uniform(lowb, highb)\n        w[i] = rw\n        tgt += rw\n    random.shuffle(w)\n    return w", "docstring": "Generate pseudo-random weights.\n\nReturns a list of random weights that is of length\nn, where each weight is in the range bounds, and\nwhere the weights sum up to total.\n\nUseful for creating random portfolios when benchmarking.\n\nArgs:\n* n (int): number of random weights\n* bounds ((low, high)): bounds for each weight\n* total (float): total sum of the weights", "source": "codesearchnet"}
{"code": "def apply(self, read_tuple_name, read_tuple_id=None, synchronize_widths=True):\n    parts = read_tuple_name.split('__')\n    parts[0] = self._fill_right(parts[0], '-', self.prefix_width)\n    if (read_tuple_id is not None):\n        parts[1] = '{:x}'.format(read_tuple_id)\n    parts[1] = self._fill_left(parts[1], '0', self.read_tuple_id_width)\n    if synchronize_widths:\n        new_segments = []\n        segments = parts[2][1:(- 1)].split('),(')\n        for segment in segments:\n            values = segment.split(',')\n            values[0] = values[0].zfill(self.genome_id_width)\n            values[1] = values[1].zfill(self.chr_id_width)\n            values[3] = values[3].zfill(self.coor_width)\n            values[4] = values[4].zfill(self.coor_width)\n            new_segments.append((('(' + ','.join(values)) + ')'))\n        parts[2] = ','.join(new_segments)\n    return '__'.join(parts)", "docstring": "Apply profile on a read tuple name and update read tuple ID.\n\nArgs:\nread_tuple_name (str): Read tuple name to be updated.\nread_tuple_id (id): New read tuple ID.\nsynchronize_widths (bool): Update widths (in accordance to this profile).", "source": "codesearchnet"}
{"code": "def scandir(path='.'):\n    scandir_path = fsdecode(path).replace('\\\\', '/')\n    if (not is_storage(scandir_path)):\n        return os_scandir(scandir_path)\n    return _scandir_generator(is_bytes=isinstance(fspath(path), (bytes, bytearray)), scandir_path=scandir_path, system=get_instance(scandir_path))", "docstring": "Return an iterator of os.DirEntry objects corresponding to the entries in\nthe directory given by path. The entries are yielded in arbitrary order,\nand the special entries '.' and '..' are not included.\n\nEquivalent to \"os.scandir\".\n\nArgs:\npath (path-like object): Path or URL.\nIf path is of type bytes (directly or indirectly through the\nPathLike interface), the type of the name and path attributes\nof each os.DirEntry will be bytes; in all other circumstances,\nthey will be of type str.\n\nReturns:\nGenerator of os.DirEntry: Entries information.", "source": "codesearchnet"}
{"code": "def _free_up_space(self, size, this_rel_path=None):\n    space = ((self.size + size) - self.maxsize)\n    if (space <= 0):\n        return\n    removes = []\n    for row in self.database.execute('SELECT path, size, time FROM files ORDER BY time ASC'):\n        if (space > 0):\n            removes.append(row[0])\n            space -= row[1]\n        else:\n            break\n    for rel_path in removes:\n        if (rel_path != this_rel_path):\n            global_logger.debug('Deleting {}'.format(rel_path))\n            self.remove(rel_path)", "docstring": "If there are not size bytes of space left, delete files\nuntil there is\n\nArgs:\nsize: size of the current file\nthis_rel_path: rel_pat to the current file, so we don't delete it.", "source": "codesearchnet"}
{"code": "def setup_data_split(X, y, tokenizer, proc_data_dir, **kwargs):\n    (X_train, X_val, X_test, y_train, y_val, y_test) = split_data(X, y)\n    tokenizer.build_vocab(X_train)\n    process_save(X_train, y_train, tokenizer, path.join(proc_data_dir, 'train.bin'), train=True, **kwargs)\n    process_save(X_val, y_val, tokenizer, path.join(proc_data_dir, 'val.bin'), **kwargs)\n    process_save(X_test, y_test, tokenizer, path.join(proc_data_dir, 'test.bin'), **kwargs)", "docstring": "Setup data while splitting into a training, validation, and test set.\n\nArgs:\nX: text data,\ny: data labels,\ntokenizer: A Tokenizer instance\nproc_data_dir: Directory for the split and processed data", "source": "codesearchnet"}
{"code": "def batch_dot(x0: FloatArray['... n'], x1: FloatArray['... n'], *, keepdims: bool=False, xnp: numpy_utils.NpModule=...) -> FloatArray['... 1?']:\n    y = xnp.einsum('...m,...m->...', x0, x1)\n    return y[..., None] if keepdims else y", "docstring": "Dot product on the last dimension, with broadcasting support.\n\nContrary to `np.dot`, the behavior is consistent for 1-dim vs n-dim (while\ndot act as matmul).\nFirst dimensions are always broadcasted.\n\nArgs:\nx0: Vector array\nx1: Vector array\nkeepdims: If True, returns `FloatArray['... 1']`\nxnp: Numpy module to use\n\nReturns:\nThe dot product along the last axis.", "source": "github-repos"}
{"code": "def _get_mutation(self, node: cfg.CFGNode, arg_dict: dict[str, cfg.Variable], subst: datatypes.AliasingDict[str, cfg.Variable], retvar: cfg.Variable) -> list[function.Mutation]:\n    mutations = []\n    if any((f.mutated_type for f in self.pytd_sig.params)):\n        subst = abstract_utils.with_empty_substitutions(subst, self.pytd_sig, node, self.ctx)\n    for formal in self.pytd_sig.params:\n        actual = arg_dict[formal.name]\n        if formal.mutated_type is None:\n            continue\n        args = actual.data\n        for arg in args:\n            if isinstance(arg, _instance_base.SimpleValue):\n                for names_actuals in self.mutated_type_parameters[formal]:\n                    for tparam, type_actual in names_actuals:\n                        log.info('Mutating %s to %s', tparam.name, pytd_utils.Print(type_actual))\n                        type_actual_val = self.ctx.convert.pytd_cls_to_instance_var(type_actual, subst, node, discard_concrete_values=True)\n                        mutations.append(function.Mutation(arg, tparam.full_name, type_actual_val))\n    if self.name == '__new__':\n        for ret in retvar.data:\n            if ret.cls.full_name != 'builtins.type':\n                for t in ret.cls.template:\n                    if t.full_name in subst:\n                        mutations.append(function.Mutation(ret, t.full_name, subst[t.full_name]))\n    return mutations", "docstring": "Mutation for changing the type parameters of mutable arguments.\n\nThis will adjust the type parameters as needed for pytd functions like:\ndef append_float(x: list[int]):\nx = list[int or float]\nThis is called after all the signature matching has succeeded, and we\nknow we're actually calling this function.\n\nArgs:\nnode: The current CFG node.\narg_dict: A map of strings to cfg.Variable instances.\nsubst: Current type parameters.\nretvar: A variable of the return value.\n\nReturns:\nA list of Mutation instances.\nRaises:\nValueError: If the pytd contains invalid information for mutated params.", "source": "github-repos"}
{"code": "def is_legal_object(self, c: OntologyClass) -> bool:\n        \n        ranges = self.included_ranges()\n        return not ranges or c in ranges or c.super_classes_closure() & ranges", "docstring": "is_legal_object(c) = true if\n- c in included_ranges(self) or\n- super_classes_closure(c) intersection included_ranges(self) is not empty\n\nArgs:\nc:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _create_variables(self, values, trainable):\n\n    def create_variable(value):\n        if backend.is_tensor(value) or isinstance(value, (np.ndarray, np.generic)):\n            dtype = value.dtype\n            if is_float_dtype(dtype):\n                dtype = None\n            return self.add_weight(value.shape, initializer=value, dtype=dtype, trainable=trainable)\n        elif isinstance(value, (bool, int, float)):\n            dtype = standardize_dtype(type(value))\n            if is_float_dtype(dtype):\n                dtype = None\n            return self.add_weight((), initializer=backend.convert_to_tensor(value), dtype=dtype, trainable=trainable)\n        else:\n            return value\n    variables = jax.tree_util.tree_map(create_variable, values)\n    if trainable:\n        self.params = variables\n    else:\n        self.state = variables\n    flat_variables, _ = jax.tree_util.tree_flatten(variables)\n    return flat_variables", "docstring": "Create a structure of variables from a structure of JAX arrays.\n\n`values` is traversed via JAX's `tree_map`. When a leaf is a JAX array\nor a tensor-like object, a corresponding variable is created with it as\nthe initial value. The resulting structure of variables is assigned to\n`self.params` or `self.state` depending on `trainable`. Then, a\nflattened version of the variables is returned for tracking.\n`self.params` or `self.state` are intentionally not tracked because\nstructures like `TrackedList` interfere with `jax.tree_utils`.\nNote that leaf objects that are not JAX arrays and not tensor-like are\nleft intact as they are assumed to be configuration used by the model.\n\nArgs:\nvalues: the structure of values to traverse.\ntrainable: whether to create trainable variables.\n\nReturns:\nflat list of variables initialized with `values` for tracking.", "source": "github-repos"}
{"code": "def tracers(tracersfile):\n    \n    if not tracersfile.is_file():\n        return None\n    tra = {}\n    with tracersfile.open('rb') as fid:\n        readbin = partial(_readbin, fid)\n        magic = readbin()\n        if magic > 8000:  \n            magic -= 8000\n            readbin()\n            readbin = partial(readbin, file64=True)\n        if magic < 100:\n            raise ParsingError(tracersfile,\n                               'magic > 100 expected to get tracervar info')\n        nblk = magic % 100\n        readbin('f', 2)  \n        readbin()  \n        readbin('f')  \n        ninfo = readbin()\n        ntra = readbin(nwords=nblk, unpack=False)\n        readbin('f')  \n        curv = readbin()\n        if curv:\n            readbin('f')  \n        infos = []  \n        for _ in range(ninfo):\n            infos.append(b''.join(readbin('b', 16)).strip().decode())\n            tra[infos[-1]] = []\n        if magic > 200:\n            ntrace_elt = readbin()\n            if ntrace_elt > 0:\n                readbin('f', ntrace_elt)  \n        for ntrab in ntra:  \n            data = readbin('f', ntrab * ninfo)\n            for idx, info in enumerate(infos):\n                tra[info].append(data[idx::ninfo])\n    return tra", "docstring": "Extract tracers data.\n\nArgs:\ntracersfile (:class:`pathlib.Path`): path of the binary tracers file.\n\nReturns:\ndict of list of numpy.array:\nTracers data organized by attribute and block.", "source": "juraj-google-style"}
{"code": "def _int_to_pos(self, flat_position):\n        \n        return flat_position % self.env.action_space.screen_shape[0],\\\n            flat_position % self.env.action_space.screen_shape[1]", "docstring": "Returns x, y from flat_position integer.\n\nArgs:\nflat_position: flattened position integer\n\nReturns: x, y", "source": "juraj-google-style"}
{"code": "def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):\n        \n        init_params = super(Chainer, cls)._prepare_init_params_from_job_description(job_details, model_channel_name)\n\n        for argument in [Chainer._use_mpi, Chainer._num_processes, Chainer._process_slots_per_host,\n                         Chainer._additional_mpi_options]:\n\n            value = init_params['hyperparameters'].pop(argument, None)\n            if value:\n                init_params[argument[len('sagemaker_'):]] = value\n\n        image_name = init_params.pop('image')\n        framework, py_version, tag, _ = framework_name_from_image(image_name)\n\n        if not framework:\n            \n            \n            init_params['image_name'] = image_name\n            return init_params\n\n        init_params['py_version'] = py_version\n        init_params['framework_version'] = framework_version_from_tag(tag)\n\n        training_job_name = init_params['base_job_name']\n\n        if framework != cls.__framework_name__:\n            raise ValueError(\"Training job: {} didn't use image for requested framework\".format(training_job_name))\n        return init_params", "docstring": "Convert the job description to init params that can be handled by the class constructor\n\nArgs:\njob_details: the returned job details from a describe_training_job API call.\nmodel_channel_name (str): Name of the channel where pre-trained model data will be downloaded.\n\nReturns:\ndictionary: The transformed init_params", "source": "juraj-google-style"}
{"code": "def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        super(RevokeRequestPayload, self).read(\n            istream,\n            kmip_version=kmip_version\n        )\n        tstream = BytearrayStream(istream.read(self.length))\n\n        self.unique_identifier = attributes.UniqueIdentifier()\n        self.unique_identifier.read(tstream, kmip_version=kmip_version)\n\n        self.revocation_reason = objects.RevocationReason()\n        self.revocation_reason.read(tstream, kmip_version=kmip_version)\n\n        if self.is_tag_next(enums.Tags.COMPROMISE_OCCURRENCE_DATE, tstream):\n            self.compromise_occurrence_date = primitives.DateTime(\n                tag=enums.Tags.COMPROMISE_OCCURRENCE_DATE)\n            self.compromise_occurrence_date.read(\n                tstream,\n                kmip_version=kmip_version\n            )\n\n        self.is_oversized(tstream)\n        self.validate()", "docstring": "Read the data encoding the RevokeRequestPayload object and decode it\ninto its constituent parts.\nArgs:\nistream (Stream): A data stream containing encoded object data,\nsupporting a read method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def clean_for_serialization(self, data):\n    if isinstance(data, dict):\n        for k in data.keys():\n            if k.startswith('__'):\n                del data[k]\n            elif isinstance(data[k], bson.objectid.ObjectId):\n                del data[k]\n            elif isinstance(data[k], datetime.datetime):\n                data[k] = (data[k].isoformat() + 'Z')\n            elif isinstance(data[k], dict):\n                data[k] = self.clean_for_serialization(data[k])\n            elif isinstance(data[k], list):\n                data[k] = [self.clean_for_serialization(item) for item in data[k]]\n    return data", "docstring": "Clean data in preparation for serialization.\n\nDeletes items having key either a BSON, datetime, dict or a list instance, or\nstarting with __.\n\nArgs:\ndata: Sample data to be serialized.\n\nReturns:\nCleaned data dictionary.", "source": "codesearchnet"}
{"code": "def add_scales_bar(img, bbox):\n    tc = TileCoordinate(bbox.min.zoom, bbox.min.x, bbox.min.y)\n    meters_per_pixel = tc.resolution()\n    one_km_bar = int((1000 * (1 / meters_per_pixel)))\n    col_black = (0, 0, 0)\n    line_start = (100, (img.size[1] - 100))\n    line_end = ((line_start[0] + one_km_bar), line_start[1])\n    whiskers_left = [line_start[0], (line_start[1] - 15), line_start[0], (line_start[1] + 15)]\n    whiskers_right = [line_end[0], (line_end[1] - 15), line_end[0], (line_end[1] + 15)]\n    draw = ImageDraw.Draw(img)\n    draw.line([line_start, line_end], fill=col_black, width=5)\n    draw.line(whiskers_left, fill=col_black, width=2)\n    draw.line(whiskers_right, fill=col_black, width=2)\n    draw.text(((line_start[0] + 10), (line_start[1] + 10)), fill=col_black, text='1 km')\n    del draw", "docstring": "Add a scales bar to the  map.\n\nCalculates the resolution at the current latitude and\ninserts the corresponding scales bar on the map.\n\nArgs:\nimg (Image): Image object to which the scales bar will be added.\nbbox (TileBB): boundaries of the map", "source": "codesearchnet"}
{"code": "def remove(path, follow_symlink=False):\n    \n    if os.path.isfile(path):\n        os.remove(path)\n    elif os.path.islink(path):\n        if follow_symlink:\n            remove(os.readlink(path))\n        os.unlink(path)\n    else:\n        shutil.rmtree(path)", "docstring": "Implements an remove function that will delete files, folder trees and symlink trees\n\n1.) Remove a file\n2.) Remove a symlink and follow into with a recursive rm if follow_symlink\n3.) Remove directory with rmtree\n\nArgs:\npath (str): path to remove\nfollow_symlink(bool): follow symlinks and removes whatever is in them", "source": "juraj-google-style"}
{"code": "def _make_query_from_terms(self, terms):\n        \n\n        expanded_terms = self._expand_terms(terms)\n\n        cterms = ''\n\n        if expanded_terms['doc']:\n            cterms = self.backend._and_join(expanded_terms['doc'])\n\n        if expanded_terms['keywords']:\n            if cterms:\n                cterms = self.backend._and_join(\n                    cterms, self.backend._join_keywords(expanded_terms['keywords']))\n            else:\n                cterms = self.backend._join_keywords(expanded_terms['keywords'])\n\n        logger.debug('Dataset terms conversion: `{}` terms converted to `{}` query.'.format(terms, cterms))\n        return cterms", "docstring": "Creates a query for dataset from decomposed search terms.\n\nArgs:\nterms (dict or unicode or string):\n\nReturns:\ntuple: First element is str with FTS query, second is parameters of the query.", "source": "juraj-google-style"}
{"code": "def sca_xsect(scatterer, h_pol=True):\n    \n\n    if scatterer.psd_integrator is not None:\n        return scatterer.psd_integrator.get_angular_integrated(\n            scatterer.psd, scatterer.get_geometry(), \"sca_xsect\")\n\n    old_geom = scatterer.get_geometry()    \n\n    def d_xsect(thet, phi):\n        (scatterer.phi, scatterer.thet) = (phi*rad_to_deg, thet*rad_to_deg)        \n        Z = scatterer.get_Z()        \n        I = sca_intensity(scatterer, h_pol)\n        return I * np.sin(thet)\n\n    try:\n        xsect = dblquad(d_xsect, 0.0, 2*np.pi, lambda x: 0.0, \n            lambda x: np.pi)[0]\n    finally:\n        scatterer.set_geometry(old_geom)\n\n    return xsect", "docstring": "Scattering cross section for the current setup, with polarization.\n\nArgs:\nscatterer: a Scatterer instance.\nh_pol: If True (default), use horizontal polarization.\nIf False, use vertical polarization.\n\nReturns:\nThe scattering cross section.", "source": "juraj-google-style"}
{"code": "def default_metric_definitions(cls, toolkit):\n        \n        if toolkit is RLToolkit.COACH:\n            return [\n                {'Name': 'reward-training',\n                 'Regex': '^Training>.*Total reward=(.*?),'},\n                {'Name': 'reward-testing',\n                 'Regex': '^Testing>.*Total reward=(.*?),'}\n            ]\n        elif toolkit is RLToolkit.RAY:\n            float_regex = \"[-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?\"  \n            return [\n                {'Name': 'episode_reward_mean',\n                 'Regex': 'episode_reward_mean: (%s)' % float_regex},\n                {'Name': 'episode_reward_max',\n                 'Regex': 'episode_reward_max: (%s)' % float_regex}\n            ]", "docstring": "Provides default metric definitions based on provided toolkit.\n\nArgs:\ntoolkit(sagemaker.rl.RLToolkit): RL Toolkit to be used for training.\n\nReturns:\nlist: metric definitions", "source": "juraj-google-style"}
{"code": "def starting_wall_time(self):\n    return self._starting_wall_time", "docstring": "Get the starting timestamp of the instrumented TensorFlow program.\n\nWhen there are multiple hosts (i.e., multiple tfdbg file sets), the earliest\ntimestamp among the file sets is returned. It is assumed to be the job that\nstarts first (e.g., the coordinator).\n\nReturns:\nStarting timestamp in seconds since the epoch, as a float.", "source": "github-repos"}
{"code": "def _load_from_hdx(self, object_type, id_field):\n    (success, result) = self._read_from_hdx(object_type, id_field)\n    if success:\n        self.old_data = self.data\n        self.data = result\n        return True\n    logger.debug(result)\n    return False", "docstring": "Helper method to load the HDX object given by identifier from HDX\n\nArgs:\nobject_type (str): Description of HDX object type (for messages)\nid_field (str): HDX object identifier\n\nReturns:\nbool: True if loaded, False if not", "source": "codesearchnet"}
{"code": "def get_latest_package_from_string(txt, paths=None, error=False):\n    \n    from rez.utils.formatting import PackageRequest\n\n    req = PackageRequest(txt)\n    return get_latest_package(name=req.name,\n                              range_=req.range_,\n                              paths=paths,\n                              error=error)", "docstring": "Get the latest package found within the given request string.\n\nArgs:\ntxt (str): Request, eg 'foo-1.2+'\npaths (list of str, optional): paths to search for package families,\ndefaults to `config.packages_path`.\nerror (bool): If True, raise an error if no package is found.\n\nReturns:\n`Package` object, or None if no package is found.", "source": "juraj-google-style"}
{"code": "def get_token(wallet: 'Wallet', token_str: str) -> 'NEP5Token.NEP5Token':\n    \n    if token_str.startswith('0x'):\n        token_str = token_str[2:]\n\n    token = None\n    for t in wallet.GetTokens().values():\n        if token_str in [t.symbol, t.ScriptHash.ToString()]:\n            token = t\n            break\n\n    if not isinstance(token, NEP5Token.NEP5Token):\n        raise ValueError(\"The given token argument does not represent a known NEP5 token\")\n    return token", "docstring": "Try to get a NEP-5 token based on the symbol or script_hash\n\nArgs:\nwallet: wallet instance\ntoken_str: symbol or script_hash (accepts script hash with or without 0x prefix)\nRaises:\nValueError: if token is not found\n\nReturns:\nNEP5Token instance if found.", "source": "juraj-google-style"}
{"code": "def _read_marcxml(xml):\n    marc_xml = _read_content_or_path(xml)\n    marc_xml = _oai_to_xml(marc_xml)\n    marc_xml = _add_namespace(marc_xml)\n    file_obj = StringIO.StringIO(marc_xml)\n    return ET.parse(file_obj)", "docstring": "Read MARC XML or OAI file, convert, add namespace and return XML in\nrequired format with all necessities.\n\nArgs:\nxml (str): Filename or XML string. Don't use ``\\\\n`` in case of\nfilename.\n\nReturns:\nobj: Required XML parsed with ``lxml.etree``.", "source": "codesearchnet"}
{"code": "def multiplicative_jitter(x, epsilon=1e-2):\n  \n  if epsilon == 0:\n    return x\n  return x * mtf.random_uniform(\n      x.mesh, x.shape, minval=1.0 - epsilon, maxval=1.0+epsilon, dtype=x.dtype)", "docstring": "Multiply values by a random number between 1-epsilon and 1+epsilon.\n\nMakes models more resilient to rounding errors introduced by bfloat16.\nThis seems particularly important for logits.\n\nArgs:\nx: a mtf.Tensor\nepsilon: a floating point value\n\nReturns:\na mtf.Tensor with the same type and shape as x.", "source": "juraj-google-style"}
{"code": "def tinsel(to_patch, module_name, decorator=mock_decorator):\n    \n    def fn_decorator(function):\n        def wrapper(*args, **kwargs):\n            with patch(to_patch, decorator):\n                m = importlib.import_module(module_name)\n                reload(m)\n                function(*args, **kwargs)\n\n            reload(m)\n        return wrapper\n    return fn_decorator", "docstring": "Decorator for simple in-place decorator mocking for tests\n\nArgs:\nto_patch: the string path of the function to patch\nmodule_name: complete string path of the module to reload\ndecorator (optional): replacement decorator. By default a pass-through\nwill be used.\n\nReturns:\nA wrapped test function, during the context of execution the specified\npath is patched.", "source": "juraj-google-style"}
{"code": "def create_config_profile(msg_type):\n    \n    msg_type = msg_type.lower()\n\n    if msg_type not in CONFIG.keys():\n        raise UnsupportedMessageTypeError(msg_type)\n\n    display_required_items(msg_type)\n\n    if get_user_ack():\n        profile_name = input(\"Profile Name: \")\n        data = get_data_from_user(msg_type)\n        auth = get_auth_from_user(msg_type)\n        configure_profile(msg_type, profile_name, data, auth)", "docstring": "Create a profile for the given message type.\n\nArgs:\n:msg_type: (str) message type to create config entry.", "source": "juraj-google-style"}
{"code": "def OptimizeGraph(config_proto, metagraph, verbose=True, graph_id=b'graph_to_optimize', cluster=None, strip_default_attributes=False):\n    if not isinstance(config_proto, config_pb2.ConfigProto):\n        raise TypeError(f'Argument `config_proto` should be a tf.ConfigProto, received type: {type(config_proto).__name__}')\n    if is_oss:\n        optimize_method = tf_opt.TF_OptimizeGraphSerialized\n        metagraph = metagraph.SerializeToString()\n    else:\n        optimize_method = tf_opt.TF_OptimizeGraph\n    if cluster is not None:\n        out_graph = optimize_method(cluster.tf_cluster, config_proto.SerializeToString(), metagraph, verbose, graph_id, strip_default_attributes)\n    else:\n        with _OPTIMIZE_GRAPH_CLUSTER_LOCK:\n            cluster = gcluster.Cluster()\n            try:\n                out_graph = optimize_method(cluster.tf_cluster, config_proto.SerializeToString(), metagraph, verbose, graph_id, strip_default_attributes)\n            finally:\n                cluster.Shutdown()\n    if is_oss:\n        out_graph = graph_pb2.GraphDef.FromString(out_graph)\n    return out_graph", "docstring": "Optimize the provided metagraph.\n\nFor best results, the signature_def field in `metagraph` should be populated\nwith information about input (feed) and output (fetch) tensors.\n\nArgs:\nconfig_proto: a ConfigProto protobuf.\nmetagraph: a MetagraphDef protobuf.\nverbose: whether to log optimization results.\ngraph_id: a string identifying this graph.\ncluster: a grappler cluster object representing hardware resources\navailable to run this graph.\nstrip_default_attributes: whether graph node attributes having default\nvalues should be removed after all the optimization passes. This\noption is useful if the resulting graph will be executed by an older\nprocess that might not know some of the recently added attributes.", "source": "github-repos"}
{"code": "def entitlements(self, request, pk=None):\n    enterprise_customer_user = self.get_object()\n    instance = {'entitlements': enterprise_customer_user.entitlements}\n    serializer = serializers.EnterpriseCustomerUserEntitlementSerializer(instance, context={'request': request})\n    return Response(serializer.data)", "docstring": "Retrieve the list of entitlements available to this learner.\n\nOnly those entitlements are returned that satisfy enterprise customer's data sharing setting.\n\nArguments:\nrequest (HttpRequest): Reference to in-progress request instance.\npk (Int): Primary key value of the selected enterprise learner.\n\nReturns:\n(HttpResponse): Response object containing a list of learner's entitlements.", "source": "codesearchnet"}
{"code": "def set_peer_link(self, value=None, default=False, disable=False):\n        \n        return self._configure_mlag('peer-link', value, default, disable)", "docstring": "Configures the mlag peer-link value\n\nArgs:\nvalue (str): The value to configure the peer-link\ndefault (bool): Configures the peer-link using the\ndefault keyword\ndisable (bool): Negates the peer-link using the no keyword\n\nReturns:\nbool: Returns True if the commands complete successfully", "source": "juraj-google-style"}
{"code": "def make_datastore_query(self, cursor=None):\n    \n    filters = {}\n    filters['__key__ >= '] = _key_for_namespace(\n        self.namespace_start, self.app)\n    filters['__key__ <= '] = _key_for_namespace(\n        self.namespace_end, self.app)\n\n    return datastore.Query('__namespace__',\n                           filters=filters,\n                           keys_only=True,\n                           cursor=cursor,\n                           _app=self.app)", "docstring": "Returns a datastore.Query that generates all namespaces in the range.\n\nArgs:\ncursor: start cursor for the query.\n\nReturns:\nA datastore.Query instance that generates db.Keys for each namespace in\nthe NamespaceRange.", "source": "juraj-google-style"}
{"code": "def run_eagerly(self):\n    if self.dynamic and self._run_eagerly is False:\n        raise ValueError('Your model contains layers that can only be successfully run in eager execution (layers constructed with `dynamic=True`). You cannot set `run_eagerly=False`.')\n    if self._cluster_coordinator and self._run_eagerly:\n        raise ValueError('When using `Model` with `ParameterServerStrategy`, `run_eagerly` is not supported.')\n    return self.dynamic or self._run_eagerly or (def_function.functions_run_eagerly() and self._run_eagerly is None)", "docstring": "Settable attribute indicating whether the model should run eagerly.\n\nRunning eagerly means that your model will be run step by step,\nlike Python code. Your model might run slower, but it should become easier\nfor you to debug it by stepping into individual layer calls.\n\nBy default, we will attempt to compile your model to a static graph to\ndeliver the best execution performance.\n\nReturns:\nBoolean, whether the model should run eagerly.", "source": "github-repos"}
{"code": "def _update_listing_client_kwargs(client_kwargs, max_request_entries):\n        \n        client_kwargs = client_kwargs.copy()\n        if max_request_entries:\n            client_kwargs['num_results'] = max_request_entries\n        return client_kwargs", "docstring": "Updates client kwargs for listing functions.\n\nArgs:\nclient_kwargs (dict): Client arguments.\nmax_request_entries (int): If specified, maximum entries returned\nby request.\n\nReturns:\ndict: Updated client_kwargs", "source": "juraj-google-style"}
{"code": "def convert_exchange_to_compounds(model):\n    \n    \n    exchanges = set()\n    for reaction in model.reactions:\n        equation = reaction.properties.get('equation')\n        if equation is None:\n            continue\n\n        if len(equation.compounds) != 1:\n            \n            \n            if (len(equation.left) == 0) != (len(equation.right) == 0):\n                logger.warning('Exchange reaction {} has more than one'\n                               ' compound, it was not converted to'\n                               ' exchange compound'.format(reaction.id))\n            continue\n\n        exchanges.add(reaction.id)\n\n    \n    for reaction_id in exchanges:\n        equation = model.reactions[reaction_id].equation\n        compound, value = equation.compounds[0]\n        if compound.compartment != model.extracellular_compartment:\n            continue\n\n        if compound in model.exchange:\n            logger.warning(\n                'Compound {} is already defined in the exchange'\n                ' definition'.format(compound))\n            continue\n\n        \n        \n        \n        \n        lower_flux, upper_flux = None, None\n        if reaction_id in model.limits:\n            _, lower, upper = model.limits[reaction_id]\n            if lower is not None:\n                lower_flux = lower * abs(value)\n            if upper is not None:\n                upper_flux = upper * abs(value)\n\n        if lower_flux is None and equation.direction == Direction.Forward:\n            lower_flux = 0\n        if upper_flux is None and equation.direction == Direction.Reverse:\n            upper_flux = 0\n\n        \n        \n        if value > 0:\n            lower_flux, upper_flux = (\n                -upper_flux if upper_flux is not None else None,\n                -lower_flux if lower_flux is not None else None)\n\n        model.exchange[compound] = (\n            compound, reaction_id, lower_flux, upper_flux)\n\n        model.reactions.discard(reaction_id)\n        model.limits.pop(reaction_id, None)", "docstring": "Convert exchange reactions in model to exchange compounds.\n\nOnly exchange reactions in the extracellular compartment are converted.\nThe extracelluar compartment must be defined for the model.\n\nArgs:\nmodel: :class:`NativeModel`.", "source": "juraj-google-style"}
{"code": "def trailing_stop_loss(self, accountID, **kwargs):\n    return self.create(accountID, order=TrailingStopLossOrderRequest(**kwargs))", "docstring": "Shortcut to create a Trailing Stop Loss Order in an Account\n\nArgs:\naccountID : The ID of the Account\nkwargs : The arguments to create a TrailingStopLossOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "codesearchnet"}
{"code": "def delete_service(self, service: str):\n        \n        \n        if not self._manager:\n            raise RuntimeError('Services can only be deleted '\n                               'on swarm manager nodes')\n\n        \n        self._api_client.remove_service(service)", "docstring": "Removes/stops a docker service.\n\nOnly the manager nodes can delete a service\n\nArgs:\nservice (string): Service name or ID", "source": "juraj-google-style"}
{"code": "def _apply_op(self, op_fn):\n    raise NotImplementedError()", "docstring": "Applies given tensor-to-tensor op.\n\nThis method is used for implementing ops that take a tensor and return a new\ntensor, such as tf.expand_dims or tf.transpose. Implementing wrappers\nshould apply `op_fn` to the backing tensor(s) and return an new wrapper\ninstance with the updated backing tensor.\n\nArgs:\nop_fn: Callable that applies tensor-to-tensor op to the given Tensor.\nE.g. applies tf.expand_dims.\n\nReturns:\nA TensorWrapper instance with updated backing tensor(s).", "source": "github-repos"}
{"code": "def write_gtiff_file(f_name, n_rows, n_cols, data, geotransform, srs, nodata_value,\n                         gdal_type=GDT_Float32):\n        \n        UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(f_name)))\n        driver = gdal_GetDriverByName(str('GTiff'))\n        try:\n            ds = driver.Create(f_name, n_cols, n_rows, 1, gdal_type)\n        except Exception:\n            print('Cannot create output file %s' % f_name)\n            return\n        ds.SetGeoTransform(geotransform)\n        try:\n            ds.SetProjection(srs.ExportToWkt())\n        except AttributeError or Exception:\n            ds.SetProjection(srs)\n        ds.GetRasterBand(1).SetNoDataValue(nodata_value)\n        \n        if isinstance(data, numpy.ndarray) and data.dtype in [numpy.dtype('int'),\n                                                              numpy.dtype('float')]:\n            data = numpy.where(numpy.isnan(data), nodata_value, data)\n        ds.GetRasterBand(1).WriteArray(data)\n        ds = None", "docstring": "Output Raster to GeoTiff format file.\n\nArgs:\nf_name: output gtiff file name.\nn_rows: Row count.\nn_cols: Col count.\ndata: 2D array data.\ngeotransform: geographic transformation.\nsrs: coordinate system.\nnodata_value: nodata value.\ngdal_type (:obj:`pygeoc.raster.GDALDataType`): output raster data type,\nGDT_Float32 as default.", "source": "juraj-google-style"}
{"code": "def _get_attribute(self, node, obj, cls, name, valself):\n    if cls:\n        node, attr = self._get_attribute_computed(node, cls, name, valself, compute_function='__getattribute__')\n    else:\n        attr = None\n    if attr is None:\n        if isinstance(obj, abstract.Class):\n            node, attr = self._lookup_from_mro_and_handle_descriptors(node, obj, name, valself, skip=())\n        else:\n            node, attr = self._get_member(node, obj, name, valself)\n    is_unknown_instance_attribute = attr is None and obj.maybe_missing_members\n    if attr is None and cls:\n        node, attr = self.get_attribute(node, cls, name, valself)\n        if attr:\n            if is_unknown_instance_attribute:\n                attr2 = self._lookup_from_mro(node, cls, name, valself, ())\n                if any((isinstance(v, abstract.FUNCTION_TYPES) for v in attr2.data)):\n                    is_unknown_instance_attribute = False\n        elif not is_unknown_instance_attribute:\n            node, attr = self._get_attribute_computed(node, cls, name, valself, compute_function='__getattr__')\n    if is_unknown_instance_attribute:\n        attr = self.ctx.new_unsolvable(node)\n    if attr is not None:\n        attr = self._filter_var(node, attr)\n    return (node, attr)", "docstring": "Get an attribute from an object or its class.\n\nThe underlying method called by all of the (_)get_(x_)attribute methods.\nAttempts to resolve an attribute first with __getattribute__, then by\nfetching it from the object, then by fetching it from the class, and\nfinally with __getattr__.\n\nArguments:\nnode: The current node.\nobj: The object.\ncls: The object's class, may be None.\nname: The attribute name.\nvalself: The binding to the self reference.\n\nReturns:\nA tuple of the node and the attribute, or None if it was not found.", "source": "github-repos"}
{"code": "def run_pipeline_steps(steps, context):\n    \n    logger.debug(\"starting\")\n    assert isinstance(\n        context, dict), \"context must be a dictionary, even if empty {}.\"\n\n    if steps is None:\n        logger.debug(\"No steps found to execute.\")\n    else:\n        step_count = 0\n\n        for step in steps:\n            step_instance = Step(step)\n            step_instance.run_step(context)\n            step_count += 1\n\n        logger.debug(f\"executed {step_count} steps\")\n\n    logger.debug(\"done\")", "docstring": "Run the run_step(context) method of each step in steps.\n\nArgs:\nsteps: list. Sequence of Steps to execute\ncontext: pypyr.context.Context. The pypyr context. Will mutate.", "source": "juraj-google-style"}
{"code": "def create(self, resource, timeout=-1):\n        \n        return self._client.create(resource, timeout=timeout, default_values=self.DEFAULT_VALUES)", "docstring": "Creates a scope.\n\nArgs:\nresource (dict): Object to create.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.\n\nReturns:\ndict: Created scope.", "source": "juraj-google-style"}
{"code": "def partitioned_call_op(name: str, args: Sequence[core.Tensor], is_stateful: bool, tout: Sequence[Any], config: Any=None, executor_type: Optional[str]=None, xla_compile_attr: Any=None) -> ops.Operation:\n    if config is None:\n        config = function_utils.get_disabled_rewriter_config()\n    if executor_type is None:\n        executor_type = ''\n    args = [ops.convert_to_tensor(x) for x in args]\n    tin_attr = attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(type=[x.dtype.as_datatype_enum for x in args]))\n    tout_attr = attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(type=tout))\n    func_attr = attr_value_pb2.AttrValue(func=attr_value_pb2.NameAttrList(name=name))\n    executor_type_attr = attr_value_pb2.AttrValue(s=compat.as_bytes(executor_type))\n    config_proto = attr_value_pb2.AttrValue(s=config)\n    op_name = 'StatefulPartitionedCall' if is_stateful else 'PartitionedCall'\n    op_attrs = {'Tin': tin_attr, 'Tout': tout_attr, 'f': func_attr, 'config_proto': config_proto, 'executor_type': executor_type_attr}\n    if xla_compile_attr is not None:\n        op_attrs[attributes_lib.XLA_COMPILE] = xla_compile_attr\n    op = ops.get_default_graph().create_op(op_name, args, tout, name=op_name, attrs=op_attrs)\n    return op", "docstring": "Generates a function call op respecting device annotations.\n\nArgs:\nname: Name of the function to call.\nargs: The arguments of the function, including captured inputs.\nis_stateful: If the function is stateful.\ntout: a list containing the output dtypes enums\nconfig: (Optional) A `tensorflow::ConfigProto` proto, serialized. If `None`,\nall optimizations are disabled. Currently only handled for eager defined\nfunctions.\nexecutor_type: (Optional) A string for the name of the executor to be used\nin the function call. If not set, or set to an empty string, the default\ntensorflow executor will be used.\nxla_compile_attr: (Optional) value of the XLA compilation attribute.\n\nReturns:\nReturns the operation.", "source": "github-repos"}
{"code": "def update_aliases(self):\n    changed = False\n    try:\n        response = self.client.api.get_room_state(self.room_id)\n    except MatrixRequestError:\n        return False\n    for chunk in response:\n        content = chunk.get('content')\n        if content:\n            if ('aliases' in content):\n                aliases = content['aliases']\n                if (aliases != self.aliases):\n                    self.aliases = aliases\n                    changed = True\n            if (chunk.get('type') == 'm.room.canonical_alias'):\n                canonical_alias = content['alias']\n                if (self.canonical_alias != canonical_alias):\n                    self.canonical_alias = canonical_alias\n                    changed = True\n    if (changed and self.aliases and (not self.canonical_alias)):\n        self.canonical_alias = self.aliases[0]\n    return changed", "docstring": "Get aliases information from room state\n\nReturns:\nboolean: True if the aliases changed, False if not", "source": "codesearchnet"}
{"code": "def GetUsernameByIdentifier(self, user_identifier, session_identifier=CURRENT_SESSION):\n    user_accounts = self._user_accounts.get(session_identifier, {})\n    user_account = user_accounts.get(user_identifier, None)\n    if (not user_account):\n        return ''\n    return (user_account.username or '')", "docstring": "Retrieves the username based on an user identifier.\n\nArgs:\nuser_identifier (str): user identifier, either a UID or SID.\nsession_identifier (Optional[str])): session identifier, where\nCURRENT_SESSION represents the active session.\n\nReturns:\nstr: username.", "source": "codesearchnet"}
{"code": "def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):\n    if context.executing_eagerly() and (not hasattr(input, 'graph')) and (not isinstance(input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue))):\n        input = ops.convert_to_tensor(input)\n        np_out_type = out_type.as_numpy_dtype\n        num_elements = np.prod(input._shape_tuple(), dtype=np_out_type)\n        return ops.convert_to_tensor(num_elements, dtype=out_type)\n    with ops.name_scope(name, 'Size', [input]) as name:\n        if isinstance(input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n            return gen_math_ops.prod(gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)\n        else:\n            input = ops.convert_to_tensor(input)\n            input_shape = input.get_shape()\n            if optimize:\n                if input_shape.is_fully_defined():\n                    return constant(input_shape.num_elements(), out_type, name=name)\n                if input_shape.dims and any((dim == 0 for dim in input_shape.dims)):\n                    return constant(0, out_type, name=name)\n            return gen_array_ops.size(input, name=name, out_type=out_type)", "docstring": "Returns the size of a tensor.\n\nArgs:\ninput: A `Tensor` or `SparseTensor`.\nname: A name for the operation (optional).\noptimize: if true, encode the size as a constant when possible.\nout_type: (Optional) The specified non-quantized numeric output type of the\noperation. Defaults to `tf.int32`.\n\nReturns:\nA `Tensor` of type `out_type`. Defaults to `tf.int32`.", "source": "github-repos"}
{"code": "def query(self, queryEngine, query=None, vendorSpecific=None, **kwargs):\n        \n        response = self.queryResponse(queryEngine, query, vendorSpecific, **kwargs)\n        return self._read_stream_response(response)", "docstring": "See Also: queryResponse()\n\nArgs:\nqueryEngine:\nquery:\nvendorSpecific:\n**kwargs:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def subtract(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return Subtract().symbolic_call(x1, x2)\n    return backend.numpy.subtract(x1, x2)", "docstring": "Subtract arguments element-wise.\n\nArgs:\nx1: First input tensor.\nx2: Second input tensor.\n\nReturns:\nOutput tensor, element-wise difference of `x1` and `x2`.", "source": "github-repos"}
{"code": "def write_command(self, command: Command):\n    _logger.debug('Write command.')\n    data = command.to_bytes()\n    (yield from self._connection.write(data))\n    self._data_event_dispatcher.notify_write(data)", "docstring": "Write a command to the stream.\n\nArgs:\ncommand: The command.\n\nCoroutine.", "source": "codesearchnet"}
{"code": "def sql_column_like_drug(self, column_name: str) -> str:\n        \n        clauses = [\n            \"{col} LIKE {fragment}\".format(\n                col=column_name,\n                fragment=sql_string_literal(f))\n            for f in self.sql_like_fragments\n        ]\n        return \"({})\".format(\" OR \".join(clauses))", "docstring": "Returns SQL like\n\n.. code-block:: sql\n\n(column_name LIKE '%drugname1%' OR\ncolumn_name LIKE '%drugname2%')\n\nfor the drug names that this Drug object knows about.\n\nArgs:\ncolumn_name: column name, pre-escaped if necessary\n\nReturns:\nSQL fragment as above", "source": "juraj-google-style"}
{"code": "def export_outputs_for_mode(mode, serving_export_outputs=None, predictions=None, loss=None, metrics=None):\n    if mode not in SIGNATURE_KEY_MAP:\n        raise ValueError(f'Export output type not found for `mode`: {mode}. Expected one of: {list(SIGNATURE_KEY_MAP.keys())}.')\n    signature_key = SIGNATURE_KEY_MAP[mode]\n    if mode_keys.is_predict(mode):\n        return get_export_outputs(serving_export_outputs, predictions)\n    elif mode_keys.is_train(mode):\n        return {signature_key: export_output_lib.TrainOutput(loss=loss, predictions=predictions, metrics=metrics)}\n    else:\n        return {signature_key: export_output_lib.EvalOutput(loss=loss, predictions=predictions, metrics=metrics)}", "docstring": "Util function for constructing a `ExportOutput` dict given a mode.\n\nThe returned dict can be directly passed to `build_all_signature_defs` helper\nfunction as the `export_outputs` argument, used for generating a SignatureDef\nmap.\n\nArgs:\nmode: A `ModeKeys` specifying the mode.\nserving_export_outputs: Describes the output signatures to be exported to\n`SavedModel` and used during serving. Should be a dict or None.\npredictions: A dict of Tensors or single Tensor representing model\npredictions. This argument is only used if serving_export_outputs is not\nset.\nloss: A dict of Tensors or single Tensor representing calculated loss.\nmetrics: A dict of (metric_value, update_op) tuples, or a single tuple.\nmetric_value must be a Tensor, and update_op must be a Tensor or Op\n\nReturns:\nDictionary mapping the key to an `ExportOutput` object.\nThe key is the expected SignatureDef key for the mode.\n\nRaises:\nValueError: if an appropriate ExportOutput cannot be found for the mode.", "source": "github-repos"}
{"code": "def finalize_variable_values(self, var_list):\n    if self.use_ema:\n        self._overwrite_model_variables_with_average_value(var_list)", "docstring": "Set the final value of model's trainable variables.\n\nSometimes there are some extra steps before ending the variable updates,\nsuch as overriding the model variables with its average value.\n\nArgs:\nvar_list: list of model variables.", "source": "github-repos"}
{"code": "def download(self, resource_id):\n    self.resource_id(str(resource_id))\n    self._request_uri = '{}/download'.format(self._request_uri)", "docstring": "Update the request URI to download the document for this resource.\n\nArgs:\nresource_id (integer): The group id.", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    if (file_object.read(1) != b'{'):\n        raise errors.UnableToParseFile('is not a valid JSON file, missing opening brace.')\n    file_object.seek(0, os.SEEK_SET)\n    file_entry = parser_mediator.GetFileEntry()\n    file_system = file_entry.GetFileSystem()\n    json_file_path = parser_mediator.GetDisplayName()\n    split_path = file_system.SplitPath(json_file_path)\n    try:\n        if ('containers' in split_path):\n            if ('config.json' in split_path):\n                self._ParseContainerConfigJSON(parser_mediator, file_object)\n            if json_file_path.endswith('-json.log'):\n                self._ParseContainerLogJSON(parser_mediator, file_object)\n        elif ('graph' in split_path):\n            if ('json' in split_path):\n                self._ParseLayerConfigJSON(parser_mediator, file_object)\n    except ValueError as exception:\n        if (exception == 'No JSON object could be decoded'):\n            raise errors.UnableToParseFile(exception)\n        else:\n            raise", "docstring": "Parses various Docker configuration and log files in JSON format.\n\nThis methods checks whether the file_object points to a docker JSON config\nor log file, and calls the corresponding _Parse* function to generate\nEvents.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.\nValueError: if the JSON file cannot be decoded.", "source": "codesearchnet"}
{"code": "def added_tokens_decoder(self) -> dict[int, AddedToken]:\n    return self._tokenizer.get_added_tokens_decoder()", "docstring": "Returns the added tokens in the vocabulary as a dictionary of index to AddedToken.\n\nReturns:\n`Dict[str, int]`: The added tokens.", "source": "github-repos"}
{"code": "def ExamineEvent(self, mediator, event):\n    \n    pathspec = getattr(event, 'pathspec', None)\n    if pathspec is None:\n      return\n    if self._paths_with_hashes.get(pathspec, None):\n      \n      \n      return\n    hash_attributes = {}\n    for attribute_name, attribute_value in event.GetAttributes():\n      if attribute_name.endswith('_hash'):\n        hash_attributes[attribute_name] = attribute_value\n    self._paths_with_hashes[pathspec] = hash_attributes", "docstring": "Analyzes an event and creates extracts hashes as required.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between\nanalysis plugins and other components, such as storage and dfvfs.\nevent (EventObject): event to examine.", "source": "juraj-google-style"}
{"code": "async def get_json(self, url, json_callback=None, **kwargs):\n    if (not json_callback):\n        json_callback = json.loads\n    response = (await self.request(method='get', url=url, **kwargs))\n    return json_callback(response)", "docstring": "Get a URL and return its JSON response.\n\nArgs:\nurl (str): URL to be requested.\njson_callback (func): Custom JSON loader function. Defaults\nto :meth:`json.loads`.\nkwargs (dict): Additional arguments to pass through to the\nrequest.\nReturns:\nresponse body returned by :func:`json_callback` function.", "source": "codesearchnet"}
{"code": "def set_headline(self, level, message, timestamp=None, now_reference=None):\n    if ((self.headline is not None) and (self.headline.message == message)):\n        self.headline.created = monotonic()\n        self.headline.count += 1\n        return\n    msg_object = ServiceMessage(level, message, self._last_message_id, timestamp, now_reference)\n    self.headline = msg_object\n    self._last_message_id += 1", "docstring": "Set the persistent headline message for this service.\n\nArgs:\nlevel (int): The level of the message (info, warning, error)\nmessage (string): The message contents\ntimestamp (float): An optional monotonic value in seconds for when the message was created\nnow_reference (float): If timestamp is not relative to monotonic() as called from this\nmodule then this should be now() as seen by whoever created the timestamp.", "source": "codesearchnet"}
{"code": "def clear_agent(self, short_name, client_id):\n    if (short_name not in self.services):\n        raise ArgumentError('Unknown service name', short_name=short_name)\n    if (short_name not in self.agents):\n        raise ArgumentError('No agent registered for service', short_name=short_name)\n    if (client_id != self.agents[short_name]):\n        raise ArgumentError('Client was not registered for service', short_name=short_name, client_id=client_id, current_client=self.agents[short_name])\n    del self.agents[short_name]", "docstring": "Remove a client id from being the command handler for a service.\n\nArgs:\nshort_name (str): The name of the service to set an agent\nfor.\nclient_id (str): A globally unique id for the client that\nshould no longer receive commands for this service.", "source": "codesearchnet"}
{"code": "def print(self, tag=None, name=None):\n    _name = name\n    if (_name is None):\n        _name = 'print'\n    fn = streamsx.topology.functions.print_flush\n    if (tag is not None):\n        tag = (str(tag) + ': ')\n        fn = (lambda v: streamsx.topology.functions.print_flush((tag + str(v))))\n    sp = self.for_each(fn, name=_name)\n    sp._op().sl = _SourceLocation(_source_info(), 'print')\n    return sp", "docstring": "Prints each tuple to stdout flushing after each tuple.\n\nIf `tag` is not `None` then each tuple has \"tag: \" prepended\nto it before printing.\n\nArgs:\ntag: A tag to prepend to each tuple.\nname(str): Name of the resulting stream.\nWhen `None` defaults to a generated name.\nReturns:\nstreamsx.topology.topology.Sink: Stream termination.\n\n.. versionadded:: 1.6.1 `tag`, `name` parameters.\n\n.. versionchanged:: 1.7\nNow returns a :py:class:`Sink` instance.", "source": "codesearchnet"}
{"code": "def _add_monomer(self, monomer, mon_vector, move_direction):\n    translate_by = (self.molecule.cart_coords[self.end] + (self.link_distance * move_direction))\n    monomer.translate_sites(range(len(monomer)), translate_by)\n    if (not self.linear_chain):\n        self._align_monomer(monomer, mon_vector, move_direction)\n    does_cross = False\n    for (i, site) in enumerate(monomer):\n        try:\n            self.molecule.append(site.specie, site.coords, properties=site.properties)\n        except:\n            does_cross = True\n            polymer_length = len(self.molecule)\n            self.molecule.remove_sites(range((polymer_length - i), polymer_length))\n            break\n    if (not does_cross):\n        self.length += 1\n        self.end += len(self.monomer)", "docstring": "extend the polymer molecule by adding a monomer along mon_vector direction\n\nArgs:\nmonomer (Molecule): monomer molecule\nmon_vector (numpy.array): monomer vector that points from head to tail.\nmove_direction (numpy.array): direction along which the monomer\nwill be positioned", "source": "codesearchnet"}
{"code": "def verify_dataset_shuffled(x):\n    assert isinstance(x, data_types.DatasetV2)\n    graph_def = get_dataset_graph_def(x)\n    for node in graph_def.node:\n        if node.op.startswith('ShuffleDataset'):\n            return True\n    for function in graph_def.library.function:\n        for node in function.node_def:\n            if node.op.startswith('ShuffleDataset'):\n                return True\n    logging.warning('Expected a shuffled dataset but input dataset `x` is not shuffled. Please invoke `shuffle()` on input dataset.')\n    return False", "docstring": "Verifies that the dataset is shuffled.\n\nArgs:\nx: Dataset passed as an input to the model.\n\nReturns:\nboolean, whether the input dataset is shuffled or not.", "source": "github-repos"}
{"code": "def set_license(self, license, **kwargs):\n    data = {'license': license}\n    return self.http_post('/license', post_data=data, **kwargs)", "docstring": "Add a new license.\n\nArgs:\nlicense (str): The license string\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabPostError: If the server cannot perform the request\n\nReturns:\ndict: The new license information", "source": "codesearchnet"}
{"code": "def dense_labels_to_sparse(dense, length):\n    flat_values = array_ops.reshape(dense, [-1])\n    flat_indices = math_ops.range(array_ops.shape(flat_values, out_type=dtypes.int64)[0])\n    mask = array_ops.sequence_mask(length, maxlen=array_ops.shape(dense)[1])\n    flat_mask = array_ops.reshape(mask, [-1])\n    indices = array_ops.expand_dims(array_ops.boolean_mask(flat_indices, flat_mask), 1)\n    values = array_ops.boolean_mask(flat_values, flat_mask)\n    sparse = sparse_tensor.SparseTensor(indices=indices, values=math_ops.cast(values, dtypes.int32), dense_shape=array_ops.shape(flat_values, out_type=dtypes.int64))\n    reshaped = sparse_ops.sparse_reshape(sparse, array_ops.shape(dense))\n    max_length = math_ops.reduce_max(length)\n    return sparse_tensor.SparseTensor(indices=reshaped.indices, values=reshaped.values, dense_shape=[math_ops.cast(reshaped.dense_shape[0], dtypes.int64), math_ops.cast(max_length, dtypes.int64)])", "docstring": "Convert dense labels with sequence lengths to sparse tensor.\n\nArgs:\ndense: tensor of shape [batch, max_length]\nlength: int tensor of shape [batch] The length of each sequence in dense.\n\nReturns:\ntf.sparse.SparseTensor with values only for the valid elements of sequences.", "source": "github-repos"}
{"code": "def _send_to_consumer(self, block):\n        \n        self._consumer.write(block)\n        self._sent += len(block)\n        if self._callback:\n            self._callback(self._sent, self.length)", "docstring": "Send a block of bytes to the consumer.\n\nArgs:\nblock (str): Block of bytes", "source": "juraj-google-style"}
{"code": "def as_string(self) -> str:\n    if len(self._messages) != 1:\n        raise ValueError('FHIRPath did not evaluate to a single string.')\n    if fhir_types.is_type_or_profile_of_code(self._messages[0]):\n        return codes.get_code_as_string(self._messages[0])\n    return proto_utils.get_value_at_field(self._messages[0], 'value')", "docstring": "Returns the result as a string.\n\nRaises:\nValueError if the `EvaluationResult` is not a single string.", "source": "github-repos"}
{"code": "def _generate_bucket_value(self, bucketing_id):\n    \n\n    ratio = float(self._generate_unsigned_hash_code_32_bit(bucketing_id)) / MAX_HASH_VALUE\n    return math.floor(ratio * MAX_TRAFFIC_VALUE)", "docstring": "Helper function to generate bucket value in half-closed interval [0, MAX_TRAFFIC_VALUE).\n\nArgs:\nbucketing_id: ID for bucketing.\n\nReturns:\nBucket value corresponding to the provided bucketing ID.", "source": "juraj-google-style"}
{"code": "def export(self, name=None):\n    with ops.name_scope(name, '%s_lookup_table_export_values' % self.name, [self.resource_handle]):\n        with ops.colocate_with(self.resource_handle):\n            exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2(self.resource_handle, self._key_dtype, self._value_dtype)\n    return (exported_keys, exported_values)", "docstring": "Returns tensors of all keys and values in the table.\n\nArgs:\nname: A name for the operation (optional).\n\nReturns:\nA pair of tensors with the first tensor containing all keys and the\nsecond tensors containing all values in the table.", "source": "github-repos"}
{"code": "def read_elastic_tensor(self):\n    header_pattern = 'TOTAL ELASTIC MODULI \\\\(kBar\\\\)\\\\s+Direction\\\\s+([X-Z][X-Z]\\\\s+)+\\\\-+'\n    row_pattern = ('[X-Z][X-Z]\\\\s+' + '\\\\s+'.join((['(\\\\-*[\\\\.\\\\d]+)'] * 6)))\n    footer_pattern = '\\\\-+'\n    et_table = self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float)\n    self.data['elastic_tensor'] = et_table", "docstring": "Parse the elastic tensor data.\n\nReturns:\n6x6 array corresponding to the elastic tensor from the OUTCAR.", "source": "codesearchnet"}
{"code": "def post(self, url=None, post_data={}, parse_data=False, key=None, parameters=None, listener=None):\n    return self._fetch('POST', url, post_data=post_data, parse_data=parse_data, key=key, parameters=parameters, listener=listener, full_return=True)", "docstring": "Issue a POST request.\n\nKwargs:\nurl (str): Destination URL\npost_data (dict): Dictionary of parameter and values\nparse_data (bool): If true, parse response data\nkey (string): If parse_data==True, look for this key when parsing data\nparameters (dict): Additional GET parameters to append to the URL\nlistener (func): callback called when uploading a file\n\nReturns:\ndict. Response (a dict with keys: success, data, info, body)\n\nRaises:\nAuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception", "source": "codesearchnet"}
{"code": "def convert_to_experiment_list(experiments):\n    \n    exp_list = experiments\n\n    \n    if experiments is None:\n        exp_list = []\n    elif isinstance(experiments, Experiment):\n        exp_list = [experiments]\n    elif type(experiments) is dict:\n        exp_list = [\n            Experiment.from_json(name, spec)\n            for name, spec in experiments.items()\n        ]\n\n    \n    if (type(exp_list) is list\n            and all(isinstance(exp, Experiment) for exp in exp_list)):\n        if len(exp_list) > 1:\n            logger.warning(\"All experiments will be \"\n                           \"using the same SearchAlgorithm.\")\n    else:\n        raise TuneError(\"Invalid argument: {}\".format(experiments))\n\n    return exp_list", "docstring": "Produces a list of Experiment objects.\n\nConverts input from dict, single experiment, or list of\nexperiments to list of experiments. If input is None,\nwill return an empty list.\n\nArguments:\nexperiments (Experiment | list | dict): Experiments to run.\n\nReturns:\nList of experiments.", "source": "juraj-google-style"}
{"code": "def serialize_ndarray_npy(o):\n    \n    with io.BytesIO() as f:\n        np.save(f, o)\n        f.seek(0)\n        serialized = json.dumps(f.read().decode('latin-1'))\n    return dict(\n        _type='np.ndarray',\n        npy=serialized)", "docstring": "Serializes a :obj:`numpy.ndarray` using numpy's built-in :obj:`save` function.\nThis produces totally unreadable (and very un-JSON-like) results (in \"npy\"\nformat), but it's basically guaranteed to work in 100% of cases.\n\nArgs:\no (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized.\n\nReturns:\nA dictionary that can be passed to :obj:`json.dumps`.", "source": "juraj-google-style"}
{"code": "def get_point_group_symbol(self):\n    rotations = self._space_group_data['rotations']\n    if (len(rotations) == 0):\n        return '1'\n    return spglib.get_pointgroup(rotations)[0].strip()", "docstring": "Get the point group associated with the structure.\n\nReturns:\n(Pointgroup): Point group for structure.", "source": "codesearchnet"}
{"code": "def _destructively_move(self, dest_doc):\n    if (dest_doc is self):\n        raise RuntimeError('Attempted to overwrite a document with itself')\n    dest_doc.clear()\n    roots = []\n    self._push_all_models_freeze()\n    try:\n        while self.roots:\n            r = next(iter(self.roots))\n            self.remove_root(r)\n            roots.append(r)\n    finally:\n        self._pop_all_models_freeze()\n    for r in roots:\n        if (r.document is not None):\n            raise RuntimeError((\"Somehow we didn't detach %r\" % r))\n    if (len(self._all_models) != 0):\n        raise RuntimeError(('_all_models still had stuff in it: %r' % self._all_models))\n    for r in roots:\n        dest_doc.add_root(r)\n    dest_doc.title = self.title", "docstring": "Move all data in this doc to the dest_doc, leaving this doc empty.\n\nArgs:\ndest_doc (Document) :\nThe Bokeh document to populate with data from this one\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def Draw(self, stoplist=None, triplist=None, height=520):\n    output = str()\n    if (not triplist):\n        triplist = []\n    if (not stoplist):\n        stoplist = []\n    if ((not self._cache) or triplist or stoplist):\n        self._gheight = height\n        self._tlist = triplist\n        self._slist = stoplist\n        self._decorators = []\n        self._stations = self._BuildStations(stoplist)\n        self._cache = ('%s %s %s %s' % (self._DrawBox(), self._DrawHours(), self._DrawStations(), self._DrawTrips(triplist)))\n    output = ('%s %s %s %s' % (self._DrawHeader(), self._cache, self._DrawDecorators(), self._DrawFooter()))\n    return output", "docstring": "Main interface for drawing the marey graph.\n\nIf called without arguments, the data generated in the previous call\nwill be used. New decorators can be added between calls.\n\nArgs:\n# Class Stop is defined in transitfeed.py\nstoplist: [Stop, Stop, ...]\n# Class Trip is defined in transitfeed.py\ntriplist: [Trip, Trip, ...]\n\nReturns:\n# A string that contain a svg/xml web-page with a marey graph.\n\" <svg  width=\"1440\" height=\"520\" version=\"1.1\" ... \"", "source": "codesearchnet"}
{"code": "def to_representation(self, value):\n    if (not value):\n        return None\n    image = get_thumbnail(value, self.geometry_string, **self.options)\n    try:\n        request = self.context.get('request', None)\n        return request.build_absolute_uri(image.url)\n    except:\n        try:\n            return super(HyperlinkedSorlImageField, self).to_representation(image)\n        except AttributeError:\n            return super(HyperlinkedSorlImageField, self).to_native(image.url)", "docstring": "Perform the actual serialization.\n\nArgs:\nvalue: the image to transform\nReturns:\na url pointing at a scaled and cached image", "source": "codesearchnet"}
{"code": "def objects_patch(self, bucket, key, info):\n    \n    url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key)))\n    return google.datalab.utils.Http.request(url, method='PATCH', data=info,\n                                             credentials=self._credentials)", "docstring": "Updates the metadata associated with an object.\n\nArgs:\nbucket: the name of the bucket containing the object.\nkey: the key of the object being updated.\ninfo: the metadata to update.\nReturns:\nA parsed object information dictionary.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"}
{"code": "def create_jlink(self, args):\n        \n        jlink = pylink.JLink()\n        jlink.open(args.serial_no, args.ip_addr)\n\n        if hasattr(args, 'tif') and args.tif is not None:\n            if args.tif.lower() == 'swd':\n                jlink.set_tif(pylink.JLinkInterfaces.SWD)\n            else:\n                jlink.set_tif(pylink.JLinkInterfaces.JTAG)\n\n        if hasattr(args, 'device') and args.device is not None:\n            jlink.connect(args.device)\n\n        return jlink", "docstring": "Creates an instance of a J-Link from the given arguments.\n\nArgs:\nself (Command): the ``Command`` instance\nargs (Namespace): arguments to construct the ``JLink`` instance from\n\nReturns:\nAn instance of a ``JLink``.", "source": "juraj-google-style"}
{"code": "def nrows(self):\n    if self.rank == 0:\n        return None\n    return self._ragged_shape[0]", "docstring": "The number of rows in this StructuredTensor (if rank>0).\n\nThis means the length of the outer-most dimension of the StructuredTensor.\n\nNotice that if `self.rank > 1`, then this equals the number of rows\nof the first row partition. That is,\n`self.nrows() == self.row_partitions[0].nrows()`.\n\nOtherwise `self.nrows()` will be the first dimension of the field values.\n\nReturns:\nA scalar integer `Tensor` (or `None` if `self.rank == 0`).", "source": "github-repos"}
{"code": "def _get_source_chunks(self, input_text, language=None):\n    chunks = ChunkList()\n    seek = 0\n    result = self._get_annotations(input_text, language=language)\n    tokens = result['tokens']\n    language = result['language']\n    for (i, token) in enumerate(tokens):\n        word = token['text']['content']\n        begin_offset = token['text']['beginOffset']\n        label = token['dependencyEdge']['label']\n        pos = token['partOfSpeech']['tag']\n        if (begin_offset > seek):\n            chunks.append(Chunk.space())\n            seek = begin_offset\n        chunk = Chunk(word, pos, label)\n        if (chunk.label in _DEPENDENT_LABEL):\n            chunk.dependency = (i < token['dependencyEdge']['headTokenIndex'])\n        if chunk.is_punct():\n            chunk.dependency = chunk.is_open_punct()\n        chunks.append(chunk)\n        seek += len(word)\n    return (chunks, language)", "docstring": "Returns a chunk list retrieved from Syntax Analysis results.\n\nArgs:\ninput_text (str): Text to annotate.\nlanguage (:obj:`str`, optional): Language of the text.\n\nReturns:\nA chunk list. (:obj:`budou.chunk.ChunkList`)", "source": "codesearchnet"}
{"code": "def prepare_csv_read(data, field_names, *args, **kwargs):\n    \n    if hasattr(data, 'readlines') or isinstance(data, list):\n        pass\n    elif isinstance(data, basestring):\n        data = open(data)\n    else:\n        raise TypeError('Unable to handle data of type %r' % type(data))\n    return csv.DictReader(data, field_names, *args, **kwargs)", "docstring": "Prepare various input types for CSV parsing.\n\nArgs:\ndata (iter): Data to read\nfield_names (tuple of str): Ordered names to assign to fields\n\nReturns:\ncsv.DictReader: CSV reader suitable for parsing\n\nRaises:\nTypeError: Invalid value for data", "source": "juraj-google-style"}
{"code": "def get_botcust2():\n    logger.debug('Getting new botcust2')\n    params = {'botid': 'f6a012073e345a08', 'amp;skin': 'chat'}\n    headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate, sdch, br', 'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive', 'DNT': '1', 'Host': 'kakko.pandorabots.com', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}\n    logger.debug('Sending POST request')\n    response = requests.post(url, params=params, headers=headers)\n    logger.debug('POST response {}'.format(response))\n    try:\n        result = response.headers['set-cookie'][9:25]\n        logger.debug('Getting botcust2 successful')\n    except IndexError:\n        result = False\n        logger.critical('Getting botcust2 from html failed')\n    return result", "docstring": "Gets a botcust2, used to identify a speaker with Mitsuku\n\nReturns:\nbotcust2 (str): The botcust2 identifier", "source": "codesearchnet"}
{"code": "def hstack(gctoos, remove_all_metadata_fields=False, error_report_file=None, fields_to_remove=[], reset_ids=False):\n    row_meta_dfs = []\n    col_meta_dfs = []\n    data_dfs = []\n    srcs = []\n    for g in gctoos:\n        row_meta_dfs.append(g.row_metadata_df)\n        col_meta_dfs.append(g.col_metadata_df)\n        data_dfs.append(g.data_df)\n        srcs.append(g.src)\n    logger.debug('shapes of row_meta_dfs:  {}'.format([x.shape for x in row_meta_dfs]))\n    all_row_metadata_df = assemble_common_meta(row_meta_dfs, fields_to_remove, srcs, remove_all_metadata_fields, error_report_file)\n    all_col_metadata_df = assemble_concatenated_meta(col_meta_dfs, remove_all_metadata_fields)\n    all_data_df = assemble_data(data_dfs, 'horiz')\n    assert (all_data_df.shape[0] == all_row_metadata_df.shape[0]), 'Number of rows in metadata does not match number of rows in data - all_data_df.shape[0]:  {}  all_row_metadata_df.shape[0]:  {}'.format(all_data_df.shape[0], all_row_metadata_df.shape[0])\n    assert (all_data_df.shape[1] == all_col_metadata_df.shape[0]), 'Number of columns in data does not match number of columns metadata - all_data_df.shape[1]:  {}  all_col_metadata_df.shape[0]:  {}'.format(all_data_df.shape[1], all_col_metadata_df.shape[0])\n    if reset_ids:\n        do_reset_ids(all_col_metadata_df, all_data_df, 'horiz')\n    logger.info('Build GCToo of all...')\n    concated = GCToo.GCToo(row_metadata_df=all_row_metadata_df, col_metadata_df=all_col_metadata_df, data_df=all_data_df)\n    return concated", "docstring": "Horizontally concatenate gctoos.\n\nArgs:\ngctoos (list of gctoo objects)\nremove_all_metadata_fields (bool):  ignore/strip all common metadata when combining gctoos\nerror_report_file (string):  path to write file containing error report indicating\nproblems that occurred during hstack, mainly for inconsistencies in common metadata\nfields_to_remove (list of strings): fields to be removed from the\ncommon metadata because they don't agree across files\nreset_ids (bool): set to True if sample ids are not unique\n\nReturn:\nconcated (gctoo object)", "source": "codesearchnet"}
{"code": "def __init__(self, file_system, path_spec):\n    \n    super(SQLiteBlobDirectory, self).__init__(file_system, path_spec)\n    self._number_of_entries = None", "docstring": "Initializes a directory.\n\nArgs:\nfile_system (SQLiteBlobFileSystem): file system.\npath_spec (SQLiteBlobPathSpec): path specification.", "source": "juraj-google-style"}
{"code": "def __call__(self, shape, dtype=None):\n    raise NotImplementedError('Initializer subclasses must implement the `__call__()` method.')", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor.", "source": "github-repos"}
{"code": "def build_institute(internal_id, display_name, sanger_recipients=None, coverage_cutoff=None, frequency_cutoff=None):\n    LOG.info('Building institute %s with display name %s', internal_id, display_name)\n    institute_obj = Institute(internal_id=internal_id, display_name=display_name, sanger_recipients=sanger_recipients, coverage_cutoff=coverage_cutoff, frequency_cutoff=frequency_cutoff)\n    for key in list(institute_obj):\n        if (institute_obj[key] is None):\n            institute_obj.pop(key)\n    return institute_obj", "docstring": "Build a institute object\n\nArgs:\ninternal_id(str)\ndisplay_name(str)\nsanger_recipients(list(str)): List with email addresses\n\nReturns:\ninstitute_obj(scout.models.Institute)", "source": "codesearchnet"}
{"code": "def default_memcache_timeout_policy(key):\n    \n    timeout = None\n    if key is not None and isinstance(key, model.Key):\n      modelclass = model.Model._kind_map.get(key.kind())\n      if modelclass is not None:\n        policy = getattr(modelclass, '_memcache_timeout', None)\n        if policy is not None:\n          if isinstance(policy, (int, long)):\n            timeout = policy\n          else:\n            timeout = policy(key)\n    return timeout", "docstring": "Default memcache timeout policy.\n\nThis defers to _memcache_timeout on the Model class.\n\nArgs:\nkey: Key instance.\n\nReturns:\nMemcache timeout to use (integer), or None.", "source": "juraj-google-style"}
{"code": "def pmap(f, axis_name=None, devices=None):\n    if devices is None:\n        devices = accelerators()\n    if not isinstance(devices, (list, tuple)):\n        raise ValueError('Must pass a list or tuple of devices')\n    num_devices = len(devices)\n    if not num_devices:\n        raise ValueError('There must be at least 1 device')\n    has_tpu = bool(tpu_devices(devices))\n    pmap_fn = _get_pmap_impl(f, devices, has_tpu)\n\n    def wrapper(*args):\n        \n        if _pmap_config.devices() is not None:\n            raise ValueError('Found a surrounding pmap. Nested pmap is not supported yet.')\n        flattened_input_args = nest.flatten(args)\n        flattened_per_device_args = [[] for _ in devices]\n        for arg in flattened_input_args:\n            if isinstance(arg, tensor_lib.Tensor):\n                if not arg.shape.rank or arg.shape[0] != len(devices):\n                    raise ValueError('Input tensors need to have a first dimension equal to the number of devices; got tensor of shape %s and %s devices' % (arg.shape, len(devices)))\n                for j, device in enumerate(devices):\n                    updated_arg = array_ops.gather_v2(arg, j)\n                    if not has_tpu:\n                        with ops.device(device):\n                            updated_arg = array_ops.identity(updated_arg)\n                    flattened_per_device_args[j].append(updated_arg)\n            elif isinstance(arg, ShardedNdArray):\n                for device_args, tensor in zip(flattened_per_device_args, arg.tensors):\n                    device_args.append(tensor)\n            else:\n                for device_args in flattened_per_device_args:\n                    device_args.append(arg)\n        all_per_device_args = [nest.pack_sequence_as(args, device_args) for device_args in flattened_per_device_args]\n        with pmap_config(axis_name, devices):\n            results = pmap_fn(all_per_device_args)\n        flattened_results = [nest.flatten(result) for result in results]\n        final_tree = []\n        for i in range(len(flattened_results[0])):\n            tensors = []\n            for j, device in enumerate(devices):\n                assert isinstance(flattened_results[j][i], tensor_lib.Tensor), 'currently only tensor return items are supported'\n                tensors.append(flattened_results[j][i])\n            final_tree.append(ShardedNdArray(tensors))\n        return nest.pack_sequence_as(results[0], final_tree)\n    return wrapper", "docstring": "Transforms a function into a multi-device function.\n\nThe semantics are similar to JAX's pmap.\n\nArgs:\nf: The function to be converted.\naxis_name: Used for nested pmap, which is not supported yet.\ndevices: The devices over which the returned function will run.\n\nReturns:\nA function that runs the underlying function `f` on `devices`. Its arguments\ncan be `ShardedNdArray`s, tensors or other Python objects, and its return\nvalues are all `ShardedNdArray`s. If an input is a tensor, the length of its\nfirst dimension must equal the number of devices, and the tensor will be\nsplitted along its first dimension among the devices. If an input is an\nunknown Python object, it will be replicated among the devices.", "source": "github-repos"}
{"code": "def redo(self):\n    trigger_log = self._to_live_trigger_log(state=TRIGGER_LOG_STATE['NEW'])\n    trigger_log.save(force_insert=True)\n    self.state = TRIGGER_LOG_STATE['REQUEUED']\n    self.save(update_fields=['state'])\n    return trigger_log", "docstring": "Re-sync the change recorded in this trigger log.\n\nCreates a ``NEW`` live trigger log from the data in this archived trigger log and sets\nthe state of this archived instance to ``REQUEUED``.\n\n.. seealso:: :meth:`.TriggerLog.redo`\n\nReturns:\nThe :class:`.TriggerLog` instance that was created from the data of this archived log.", "source": "codesearchnet"}
{"code": "def latent_dirichlet_allocation(concentration, topics_words):\n    topics = ed.Dirichlet(concentration=concentration, name='topics')\n    word_probs = tf.matmul(topics, topics_words)\n    bag_of_words = ed.OneHotCategorical(probs=word_probs, name='bag_of_words')\n    return bag_of_words", "docstring": "Latent Dirichlet Allocation in terms of its generative process.\n\nThe model posits a distribution over bags of words and is parameterized by\na concentration and the topic-word probabilities. It collapses per-word\ntopic assignments.\n\nArgs:\nconcentration: A Tensor of shape [1, num_topics], which parameterizes the\nDirichlet prior over topics.\ntopics_words: A Tensor of shape [num_topics, num_words], where each row\n(topic) denotes the probability of each word being in that topic.\n\nReturns:\nbag_of_words: A random variable capturing a sample from the model, of shape\n[1, num_words]. It represents one generated document as a bag of words.", "source": "codesearchnet"}
{"code": "def apply_encoding_options(self, min_token_count=1, limit_top_tokens=None):\n    if (not self.has_vocab):\n        raise ValueError('You need to build the vocabulary using `build_vocab` before using `apply_encoding_options`')\n    if (min_token_count < 1):\n        raise ValueError('`min_token_count` should atleast be 1')\n    token_counts = list(self._token_counts.items())\n    token_counts = [x for x in token_counts if (x[1] >= min_token_count)]\n    if (limit_top_tokens is not None):\n        token_counts.sort(key=(lambda x: x[1]), reverse=True)\n        filtered_tokens = list(zip(*token_counts))[0]\n        filtered_tokens = filtered_tokens[:limit_top_tokens]\n    else:\n        filtered_tokens = zip(*token_counts)[0]\n    self.create_token_indices(filtered_tokens)", "docstring": "Applies the given settings for subsequent calls to `encode_texts` and `decode_texts`. This allows you to\nplay with different settings without having to re-run tokenization on the entire corpus.\n\nArgs:\nmin_token_count: The minimum token count (frequency) in order to include during encoding. All tokens\nbelow this frequency will be encoded to `0` which corresponds to unknown token. (Default value = 1)\nlimit_top_tokens: The maximum number of tokens to keep, based their frequency. Only the most common `limit_top_tokens`\ntokens will be kept. Set to None to keep everything. (Default value: None)", "source": "codesearchnet"}
{"code": "def __new__(cls, x=None, y=None, ildj=None, kwargs=None):\n    \n    return super(_Mapping, cls).__new__(cls, x, y, ildj, kwargs)", "docstring": "Custom __new__ so namedtuple items have defaults.\n\nArgs:\nx: `Tensor` or None. Input to forward; output of inverse.\ny: `Tensor` or None. Input to inverse; output of forward.\nildj: `Tensor`. This is the (un-reduce_sum'ed) inverse log det jacobian.\nkwargs: Python dictionary. Extra args supplied to forward/inverse/etc\nfunctions.\n\nReturns:\nmapping: New instance of _Mapping.", "source": "juraj-google-style"}
{"code": "def vae(x, z_size, name=None):\n  \n  with tf.variable_scope(name, default_name=\"vae\"):\n    mu = tf.layers.dense(x, z_size, name=\"mu\")\n    log_sigma = tf.layers.dense(x, z_size, name=\"log_sigma\")\n    shape = common_layers.shape_list(x)\n    epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])\n    z = mu + tf.exp(log_sigma / 2) * epsilon\n    kl = 0.5 * tf.reduce_mean(\n        tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)\n    free_bits = z_size \n    kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))\n    return z, kl_loss, mu, log_sigma", "docstring": "Simple variational autoencoder without discretization.\n\nArgs:\nx: Input to the discretization bottleneck.\nz_size: Number of bits, where discrete codes range from 1 to 2**z_size.\nname: Name for the bottleneck scope.\n\nReturns:\nEmbedding function, latent, loss, mu and log_simga.", "source": "juraj-google-style"}
{"code": "def from_json(cls, json):\n    if (json['name'] in _KEYRANGES_CLASSES):\n        return _KEYRANGES_CLASSES[json['name']].from_json(json)\n    raise ValueError('Invalid json %s', json)", "docstring": "Deserialize from json.\n\nArgs:\njson: a dict of json compatible fields.\n\nReturns:\na KeyRanges object.\n\nRaises:\nValueError: if the json is invalid.", "source": "codesearchnet"}
{"code": "def contrast(x, severity=1):\n    c = [0.4, 0.3, 0.2, 0.1, 0.05][(severity - 1)]\n    x = (np.array(x) / 255.0)\n    means = np.mean(x, axis=(0, 1), keepdims=True)\n    x_clip = (np.clip((((x - means) * c) + means), 0, 1) * 255)\n    return around_and_astype(x_clip)", "docstring": "Change contrast of images.\n\nArgs:\nx: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\nseverity: integer, severity of corruption.\n\nReturns:\nnumpy array, image with uint8 pixels in [0,255]. Changed contrast.", "source": "codesearchnet"}
{"code": "def _update_album_art_to_full_uri(self, item):\n    if getattr(item, 'album_art_uri', False):\n        item.album_art_uri = self.build_album_art_full_uri(item.album_art_uri)", "docstring": "Update an item's Album Art URI to be an absolute URI.\n\nArgs:\nitem: The item to update the URI for", "source": "codesearchnet"}
{"code": "async def _on_state_update(self, state_update):\n    notification_type = state_update.WhichOneof('state_update')\n    if state_update.HasField('conversation'):\n        try:\n            (await self._handle_conversation_delta(state_update.conversation))\n        except exceptions.NetworkError:\n            logger.warning('Discarding %s for %s: Failed to fetch conversation', notification_type.replace('_', ' '), state_update.conversation.conversation_id.id)\n            return\n    if (notification_type == 'typing_notification'):\n        (await self._handle_set_typing_notification(state_update.typing_notification))\n    elif (notification_type == 'watermark_notification'):\n        (await self._handle_watermark_notification(state_update.watermark_notification))\n    elif (notification_type == 'event_notification'):\n        (await self._on_event(state_update.event_notification.event))", "docstring": "Receive a StateUpdate and fan out to Conversations.\n\nArgs:\nstate_update: hangouts_pb2.StateUpdate instance", "source": "codesearchnet"}
{"code": "def speed(self):\n    if self._stalled:\n        return 0\n    time_sum = 0\n    data_len_sum = 0\n    for (time_diff, data_len) in self._samples:\n        time_sum += time_diff\n        data_len_sum += data_len\n    if time_sum:\n        return (data_len_sum / time_sum)\n    else:\n        return 0", "docstring": "Return the current transfer speed.\n\nReturns:\nint: The speed in bytes per second.", "source": "codesearchnet"}
{"code": "def _measure_list_profile_column_widths(self, profile_data):\n    num_columns = len(profile_data.column_names())\n    widths = [len(column_name) for column_name in profile_data.column_names()]\n    for row in range(profile_data.row_count()):\n        for col in range(num_columns):\n            widths[col] = max(widths[col], len(str(profile_data.row_values(row)[col])) + 2)\n    return widths", "docstring": "Determine the maximum column widths for each data list.\n\nArgs:\nprofile_data: list of ProfileDatum objects.\n\nReturns:\nList of column widths in the same order as columns in data.", "source": "github-repos"}
{"code": "def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    if attention_mask is None:\n        attention_mask = jnp.ones_like(input_ids)\n    if position_ids is None:\n        batch_size, sequence_length = input_ids.shape\n        position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n\n    def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):\n        encode_module = module._get_encoder_module()\n        return encode_module(input_ids, attention_mask, position_ids, **kwargs)\n    return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward)", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration\n\n>>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained(\"facebook/blenderbot_small-90M\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/blenderbot_small-90M\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, max_length=1024, return_tensors=\"np\")\n>>> encoder_outputs = model.encode(**inputs)\n```", "source": "github-repos"}
{"code": "def plot(self, data, height=1000, render_large_data=False):\n    \n\n    import IPython\n\n    if not isinstance(data, pd.DataFrame):\n      raise ValueError('Expect a DataFrame.')\n\n    if (len(data) > 10000 and not render_large_data):\n      raise ValueError('Facets dive may not work well with more than 10000 rows. ' +\n                       'Reduce data or set \"render_large_data\" to True.')\n\n    jsonstr = data.to_json(orient='records')\n    html_id = 'f' + datalab.utils.commands.Html.next_id()\n    HTML_TEMPLATE = \n    html = HTML_TEMPLATE.format(html_id=html_id, jsonstr=jsonstr, height=height)\n    return IPython.core.display.HTML(html)", "docstring": "Plots a detail view of data.\n\nArgs:\ndata: a Pandas dataframe.\nheight: the height of the output.", "source": "juraj-google-style"}
{"code": "def brightness(im):\n    \n    im_hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n    h, s, v = cv2.split(im_hsv) \n    height, weight = v.shape[:2]\n    total_bright = 0\n    for i in v:\n        total_bright = total_bright+sum(i)\n    return float(total_bright)/(height*weight)", "docstring": "Return the brightness of an image\nArgs:\nim(numpy): image\n\nReturns:\nfloat, average brightness of an image", "source": "juraj-google-style"}
{"code": "def format(sql, args=None):\n    \n    resolved_vars = {}\n    code = []\n    SqlStatement._find_recursive_dependencies(sql, args, code=code,\n                                              resolved_vars=resolved_vars)\n\n    \n    \n    \n    parts = []\n    for (escape, placeholder, _, literal) in SqlStatement._get_tokens(sql):\n      if escape:\n        parts.append('$')\n      elif placeholder:\n        variable = placeholder[1:]\n        try:\n          value = resolved_vars[variable]\n        except KeyError as e:\n          raise Exception('Invalid sql. Unable to substitute $%s.' % e.args[0])\n\n        if isinstance(value, types.ModuleType):\n          value = _utils.get_default_query_from_module(value)\n\n        if isinstance(value, SqlStatement):\n          sql = value.format(value._sql, resolved_vars)\n          value = '(%s)' % sql\n        elif '_repr_sql_' in dir(value):\n          \n          value = value._repr_sql_()\n        elif isinstance(value, basestring):\n          value = SqlStatement._escape_string(value)\n        elif isinstance(value, list) or isinstance(value, tuple):\n          if isinstance(value, tuple):\n            value = list(value)\n          expansion = '('\n          for v in value:\n            if len(expansion) > 1:\n              expansion += ', '\n            if isinstance(v, basestring):\n              expansion += SqlStatement._escape_string(v)\n            else:\n              expansion += str(v)\n          expansion += ')'\n          value = expansion\n        else:\n          value = str(value)\n        parts.append(value)\n      elif literal:\n        parts.append(literal)\n\n    expanded = ''.join(parts)\n    return expanded", "docstring": "Resolve variable references in a query within an environment.\n\nThis computes and resolves the transitive dependencies in the query and raises an\nexception if that fails due to either undefined or circular references.\n\nArgs:\nsql: query to format.\nargs: a dictionary of values to use in variable expansion.\n\nReturns:\nThe resolved SQL text with variables expanded.\n\nRaises:\nException on failure.", "source": "juraj-google-style"}
{"code": "def _FormatHumanReadableSize(self, size):\n    \n    magnitude_1000 = 0\n    size_1000 = float(size)\n    while size_1000 >= 1000:\n      size_1000 /= 1000\n      magnitude_1000 += 1\n\n    magnitude_1024 = 0\n    size_1024 = float(size)\n    while size_1024 >= 1024:\n      size_1024 /= 1024\n      magnitude_1024 += 1\n\n    size_string_1000 = None\n    if 0 < magnitude_1000 <= 7:\n      size_string_1000 = '{0:.1f}{1:s}'.format(\n          size_1000, self._UNITS_1000[magnitude_1000])\n\n    size_string_1024 = None\n    if 0 < magnitude_1024 <= 7:\n      size_string_1024 = '{0:.1f}{1:s}'.format(\n          size_1024, self._UNITS_1024[magnitude_1024])\n\n    if not size_string_1000 or not size_string_1024:\n      return '{0:d} B'.format(size)\n\n    return '{0:s} / {1:s} ({2:d} B)'.format(\n        size_string_1024, size_string_1000, size)", "docstring": "Represents a number of bytes as a human readable string.\n\nArgs:\nsize (int): size in bytes.\n\nReturns:\nstr: human readable string of the size.", "source": "juraj-google-style"}
{"code": "def chmod(target):\n    \n    assert isinstance(target, str)\n    assert os.path.exists(target)\n\n    file_mode = stat.S_IRUSR | stat.S_IWUSR\n    folder_mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR\n\n    \n    remove_immutable_attribute(target)\n\n    if os.path.isfile(target):\n        os.chmod(target, file_mode)\n\n    elif os.path.isdir(target):\n        \n        os.chmod(target, folder_mode)\n\n        \n        for root, dirs, files in os.walk(target):\n            for cur_dir in dirs:\n                os.chmod(os.path.join(root, cur_dir), folder_mode)\n            for cur_file in files:\n                os.chmod(os.path.join(root, cur_file), file_mode)\n\n    else:\n        raise ValueError(\"Unsupported file type: {}\".format(target))", "docstring": "Recursively set the chmod for files to 0600 and 0700 for folders.\n\nIt's ok unless we need something more specific.\n\nArgs:\ntarget (str): Root file or folder", "source": "juraj-google-style"}
{"code": "def get_crate(version, crate_root=None):\n    if (not crate_root):\n        crate_root = _crates_cache()\n        _remove_old_crates(crate_root)\n    if _is_project_repo(version):\n        return _extract_tarball(_build_tarball(version))\n    m = BRANCH_VERSION_RE.match(version)\n    if m:\n        return _build_from_release_branch(m.group(0), crate_root)\n    uri = _lookup_uri(version)\n    crate_dir = _download_and_extract(uri, crate_root)\n    return crate_dir", "docstring": "Retrieve a Crate tarball, extract it and return the path.\n\nArgs:\nversion: The Crate version to get.\nCan be specified in different ways:\n\n- A concrete version like '0.55.0'\n- A version including a `x` as wildcards. Like: '1.1.x' or '1.x.x'.\nThis will use the latest version that matches.\n- Release branch, like `3.1`\n- An alias: 'latest-stable' or 'latest-testing'\n- A URI pointing to a crate tarball\ncrate_root: Where to extract the tarball to.\nIf this isn't specified ``$XDG_CACHE_HOME/.cache/cr8/crates``\nwill be used.", "source": "codesearchnet"}
{"code": "def is_scalar_batch(self, name='is_scalar_batch'):\n    with self._name_scope(name):\n        return ops.convert_to_tensor(self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor), name='is_scalar_batch')", "docstring": "Indicates that `batch_shape == []`.\n\nArgs:\nname: Python `str` prepended to names of ops created by this function.\n\nReturns:\nis_scalar_batch: `bool` scalar `Tensor`.", "source": "github-repos"}
{"code": "def download_extract_tar(tar_url, folder, tar_filename=''):\n    \n    try:\n        makedirs(folder)\n    except OSError:\n        if not isdir(folder):\n            raise\n    data_file = tar_filename\n    if not data_file:\n        fd, data_file = mkstemp('.tar.gz')\n        download(tar_url, os.fdopen(fd, 'wb'))\n    else:\n        download(tar_url, data_file)\n\n    with tarfile.open(data_file) as tar:\n        tar.extractall(path=folder)", "docstring": "Download and extract the tar at the url to the given folder\n\nArgs:\ntar_url (str): URL of tar file to download\nfolder (str): Location of parent directory to extract to. Doesn't have to exist\ntar_filename (str): Location to download tar. Default is to a temp file", "source": "juraj-google-style"}
{"code": "def btemp_threshold(img, min_in, max_in, threshold, threshold_out=None, **kwargs):\n    threshold_out = (threshold_out if (threshold_out is not None) else (176 / 255.0))\n    low_factor = ((threshold_out - 1.0) / (min_in - threshold))\n    low_offset = (1.0 + (low_factor * min_in))\n    high_factor = (threshold_out / (max_in - threshold))\n    high_offset = (high_factor * max_in)\n\n    def _bt_threshold(band_data):\n        return da.where((band_data >= threshold), (high_offset - (high_factor * band_data)), (low_offset - (low_factor * band_data)))\n    return apply_enhancement(img.data, _bt_threshold, pass_dask=True)", "docstring": "Scale data linearly in two separate regions.\n\nThis enhancement scales the input data linearly by splitting the data\ninto two regions; min_in to threshold and threshold to max_in. These\nregions are mapped to 1 to threshold_out and threshold_out to 0\nrespectively, resulting in the data being \"flipped\" around the\nthreshold. A default threshold_out is set to `176.0 / 255.0` to\nmatch the behavior of the US National Weather Service's forecasting\ntool called AWIPS.\n\nArgs:\nimg (XRImage): Image object to be scaled\nmin_in (float): Minimum input value to scale\nmax_in (float): Maximum input value to scale\nthreshold (float): Input value where to split data in to two regions\nthreshold_out (float): Output value to map the input `threshold`\nto. Optional, defaults to 176.0 / 255.0.", "source": "codesearchnet"}
{"code": "def _create_hparam_extractor(hparam_name):\n\n    def extractor_fn(session_group):\n        if (hparam_name in session_group.hparams):\n            return _value_to_python(session_group.hparams[hparam_name])\n        return None\n    return extractor_fn", "docstring": "Returns an extractor function that extracts an hparam from a session group.\n\nArgs:\nhparam_name: str. Identies the hparam to extract from the session group.\nReturns:\nA function that takes a tensorboard.hparams.SessionGroup protobuffer and\nreturns the value, as a native Python object, of the hparam identified by\n'hparam_name'.", "source": "codesearchnet"}
{"code": "def save_page(self, path=None):\n        \n\n        path = _prepare_path(path, \"html\")\n\n        with open(path, \"wb\") as f:\n            f.write(encode_string(self.body))\n\n        return path", "docstring": "Save a snapshot of the page.\n\nIf invoked without arguments, it will save a file to :data:`capybara.save_path` and the\nfile will be given a randomly generated filename. If invoked with a relative path, the path\nwill be relative to :data:`capybara.save_path`.\n\nArgs:\npath (str, optional): The path to where it should be saved.\n\nReturns:\nstr: The path to which the file was saved.", "source": "juraj-google-style"}
{"code": "def nodeid(self, iv, quantifier=False):\n        \n        return next(iter(self.nodeids(ivs=[iv], quantifier=quantifier)), None)", "docstring": "Return the nodeid of the predication selected by *iv*.\n\nArgs:\niv: the intrinsic variable of the predication to select\nquantifier: if `True`, treat *iv* as a bound variable and\nfind its quantifier; otherwise the non-quantifier will\nbe returned", "source": "juraj-google-style"}
{"code": "def execute_command(self, command):\n        \n\n        self.info_log(\"executing command: %s\" % command)\n\n        try:\n            ssh = paramiko.SSHClient()\n            ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n            k = paramiko.RSAKey.from_private_key_file(\n                self.browser_config.get('ssh_key_path')\n            )\n            ssh.connect(\n                self.private_ip,\n                username=self.browser_config.get('username'),\n                pkey=k\n            )\n\n            sleep_time = 0.1\n            stdout = []\n            stderr = []\n\n            ssh_transport = ssh.get_transport()\n            channel = ssh_transport.open_session()\n            channel.setblocking(0)\n            channel.exec_command(command)\n\n            while True:\n\n                while channel.recv_ready():\n                    stdout.append(channel.recv(1000))\n\n                while channel.recv_stderr_ready():\n                    stderr.append(channel.recv_stderr(1000))\n\n                if channel.exit_status_ready():\n                    break\n\n                sleep(sleep_time)\n\n            \n            ssh_transport.close()\n\n            ssh.close()\n\n            return b''.join(stdout), b''.join(stderr)\n\n        except Exception as e:\n            msg = \"Execute_command exception: %s\" % str(e)\n            self.error_log(msg)\n            raise Exception(msg)", "docstring": "Execute a command on the node\n\nArgs:\ncommand (str)", "source": "juraj-google-style"}
{"code": "def FromTrimmedData(data, index):\n        \n        header = Header()\n\n        ms = StreamManager.GetStream(data)\n\n        reader = BinaryReader(ms)\n        header.DeserializeUnsigned(reader)\n        reader.ReadByte()\n\n        witness = Witness()\n        witness.Deserialize(reader)\n        header.Script = witness\n\n        StreamManager.ReleaseStream(ms)\n\n        return header", "docstring": "Deserialize into a Header object from the provided data.\n\nArgs:\ndata (bytes):\nindex: UNUSED\n\nReturns:\nHeader:", "source": "juraj-google-style"}
{"code": "def get_by_uri(self, uri):\n    self._helper.validate_resource_uri(uri)\n    data = self._helper.do_get(uri)\n    if data:\n        new_resource = self.new(self._connection, data)\n    else:\n        new_resource = None\n    return new_resource", "docstring": "Retrieves a resource by its URI\n\nArgs:\nuri: URI of the resource\n\nReturns:\nResource object", "source": "codesearchnet"}
{"code": "def xmoe_tr_dense_2k():\n    hparams = mtf_transformer2.mtf_bitransformer_base()\n    hparams.encoder_layers = (['self_att', 'drd'] * 4)\n    hparams.decoder_layers = (['self_att', 'enc_att', 'drd'] * 4)\n    hparams.batch_size = 64\n    hparams.shared_embedding_and_softmax_weights = True\n    hparams.mesh_shape = 'batch:8'\n    return hparams", "docstring": "Series of architectural experiments on Translation.\n\n# run on 8-core setup\n\n119M params, einsum=0.95e13\n\nReturns:\na hparams", "source": "codesearchnet"}
{"code": "def UploadUsers(self, hash_algorithm, hash_key, accounts):\n    return self.rpc_helper.UploadAccount(hash_algorithm, base64.urlsafe_b64encode(hash_key), [GitkitUser.ToRequest(i) for i in accounts])", "docstring": "Uploads multiple users to Gitkit server.\n\nArgs:\nhash_algorithm: string, the hash algorithm.\nhash_key: array, raw key of the hash algorithm.\naccounts: list of GitkitUser.\n\nReturns:\nA dict of failed accounts. The key is the index of the 'accounts' list,\nstarting from 0.", "source": "codesearchnet"}
{"code": "def create(self, callback_url):\n        \n        resource = self.resource.create({'subscribed_to': 'address',\n                                         'callback_url': callback_url})\n        subscription = self.wrap(resource)\n        self.add(subscription)\n        return subscription", "docstring": "Register a new Subscription on this collection's parent object.\n\nArgs:\ncallback_url (str): URI of an active endpoint which can receive\nnotifications.\n\nReturns:\nA round.Subscription object if successful.", "source": "juraj-google-style"}
{"code": "def _verify_pipeline_uuid(self, pipeline_uuid):\n    try:\n        uuid.UUID(pipeline_uuid)\n    except ValueError as ve:\n        raise ValueError(f\"Incorrect pipeline uuid: '{pipeline_uuid}'\") from ve", "docstring": "Verify the received pipeline_uuid format\n\nArgs:\npipeline_uuid: uuid of the pipeline\n\nReturns:\nIf pipeline ID is not verified, will raise an exception", "source": "github-repos"}
{"code": "def _preprocess_movie_lens(ratings_df):\n    ratings_df['data'] = 1.0\n    num_timestamps = ratings_df[['userId', 'timestamp']].groupby('userId').nunique()\n    last_user_timestamp = ratings_df[['userId', 'timestamp']].groupby('userId').max()\n    ratings_df['numberOfTimestamps'] = ratings_df['userId'].apply((lambda x: num_timestamps['timestamp'][x]))\n    ratings_df['lastTimestamp'] = ratings_df['userId'].apply((lambda x: last_user_timestamp['timestamp'][x]))\n    ratings_df = ratings_df[(ratings_df['numberOfTimestamps'] > 2)]\n    ratings_df = _create_row_col_indices(ratings_df)\n    train_ratings_df = ratings_df[(ratings_df['timestamp'] < ratings_df['lastTimestamp'])]\n    test_ratings_df = ratings_df[(ratings_df['timestamp'] == ratings_df['lastTimestamp'])]\n    return (ratings_df, train_ratings_df, test_ratings_df)", "docstring": "Separate the rating datafram into train and test sets.\n\nFilters out users with less than two distinct timestamps. Creates train set\nand test set. The test set contains all the last interactions of users with\nmore than two distinct timestamps.\n\nArgs:\nratings_df: pandas dataframe with columns 'userId', 'movieId', 'rating',\n'timestamp'.\n\nReturns:\ntuple of dataframes (filtered_ratings, train_ratings, test_ratings).", "source": "codesearchnet"}
{"code": "class BlipProcessor(ProcessorMixin):\n    attributes = ['image_processor', 'tokenizer']\n    image_processor_class = ('BlipImageProcessor', 'BlipImageProcessorFast')\n    tokenizer_class = ('BertTokenizer', 'BertTokenizerFast')\n\n    def __init__(self, image_processor, tokenizer, **kwargs):\n        tokenizer.return_token_type_ids = False\n        super().__init__(image_processor, tokenizer)\n        self.current_processor = self.image_processor\n\n    def __call__(self, images: ImageInput=None, text: Optional[Union[str, List[str], TextInput, PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[BlipProcessorKwargs]) -> BatchEncoding:\n        \n        if images is None and text is None:\n            raise ValueError('You have to specify either images or text.')\n        text_encoding = None\n        output_kwargs = self._merge_kwargs(BlipProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)\n        if text is not None:\n            text_encoding = self.tokenizer(text, **output_kwargs['text_kwargs'])\n        if images is not None:\n            encoding_image_processor = self.image_processor(images, **output_kwargs['images_kwargs'])\n            if text_encoding is not None:\n                encoding_image_processor.update(text_encoding)\n            return encoding_image_processor\n        return text_encoding\n\n    def batch_decode(self, *args, **kwargs):\n        \n        return self.tokenizer.batch_decode(*args, **kwargs)\n\n    def decode(self, *args, **kwargs):\n        \n        return self.tokenizer.decode(*args, **kwargs)\n\n    @property\n    def model_input_names(self):\n        tokenizer_input_names = self.tokenizer.model_input_names\n        image_processor_input_names = self.image_processor.model_input_names\n        return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))", "docstring": "Constructs a BLIP processor which wraps a BERT tokenizer and BLIP image processor into a single processor.\n\n[`BlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`BertTokenizerFast`]. See the\ndocstring of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information.\n\nArgs:\nimage_processor (`BlipImageProcessor`):\nAn instance of [`BlipImageProcessor`]. The image processor is a required input.\ntokenizer (`BertTokenizerFast`):\nAn instance of ['BertTokenizerFast`]. The tokenizer is a required input.", "source": "github-repos"}
{"code": "def evalAsync(self, amplstatements, callback, **kwargs):\n    if (self._langext is not None):\n        amplstatements = self._langext.translate(amplstatements, **kwargs)\n\n    def async_call():\n        self._lock.acquire()\n        try:\n            self._impl.eval(amplstatements)\n            self._errorhandler_wrapper.check()\n        except Exception:\n            self._lock.release()\n            raise\n        else:\n            self._lock.release()\n            callback.run()\n    Thread(target=async_call).start()", "docstring": "Interpret the given AMPL statement asynchronously.\n\nArgs:\namplstatements: A collection of AMPL statements and declarations to\nbe passed to the interpreter.\n\ncallback: Callback to be executed when the statement has been\ninterpreted.\n\nRaises:\nRuntimeError: if the input is not a complete AMPL statement (e.g.\nif it does not end with semicolon) or if the underlying\ninterpreter is not running.", "source": "codesearchnet"}
{"code": "def get_nn_info(self, structure, n):\n    site = structure[n]\n    neighs_dists = structure.get_neighbors(site, self.cutoff)\n    siw = []\n    if (self.get_all_sites == True):\n        for (s, dist) in neighs_dists:\n            w = dist\n            siw.append({'site': s, 'image': self._get_image(structure, s), 'weight': w, 'site_index': self._get_original_site(structure, s)})\n    else:\n        min_dist = min([dist for (neigh, dist) in neighs_dists])\n        for (s, dist) in neighs_dists:\n            if (dist < ((1.0 + self.tol) * min_dist)):\n                w = (min_dist / dist)\n                siw.append({'site': s, 'image': self._get_image(structure, s), 'weight': w, 'site_index': self._get_original_site(structure, s)})\n    return siw", "docstring": "Get all near-neighbor sites as well as the associated image locations\nand weights of the site with index n using the closest neighbor\ndistance-based method.\n\nArgs:\nstructure (Structure): input structure.\nn (integer): index of site for which to determine near\nneighbors.\n\nReturns:\nsiw (list of tuples (Site, array, float)): tuples, each one\nof which represents a neighbor site, its image location,\nand its weight.", "source": "codesearchnet"}
{"code": "def _sparse_tensor(self, data, batch_size=-1):\n    indices = []\n    values = []\n    max_col_count = 0\n    for batch, batch_ix in zip(data, range(len(data))):\n        for column, column_ix in zip(batch, range(len(batch))):\n            indices.append([batch_ix, column_ix])\n            values.append(column)\n            max_col_count = max(max_col_count, column_ix + 1)\n    shape = [batch_size if batch_size != -1 else len(data), max_col_count]\n    value_type = dtypes.string if not values or isinstance(values[0], str) else dtypes.int64\n    return sparse_tensor.SparseTensor(constant_op.constant(indices, dtypes.int64, [len(indices), 2]), constant_op.constant(values, value_type, [len(indices)]), constant_op.constant(shape, dtypes.int64))", "docstring": "Generates a SparseTensor.\n\nArgs:\ndata: Should be a list of list of strings or int64. Each item of the outer\nlist represents a batch. Each item of the batch is a feature of a\nspecific feature column.\nbatch_size: optional batch size, especially for cases when data has no\nentry for some batches.\n\nReturns:\nA SparseTensor.", "source": "github-repos"}
{"code": "def download_artifact_bundle(self, id_or_uri, file_path):\n        \n        uri = self.DOWNLOAD_PATH + '/' + extract_id_from_uri(id_or_uri)\n        return self._client.download(uri, file_path)", "docstring": "Download the Artifact Bundle.\n\nArgs:\nid_or_uri: ID or URI of the Artifact Bundle.\nfile_path(str): Destination file path.\n\nReturns:\nbool: Successfully downloaded.", "source": "juraj-google-style"}
{"code": "def post(cls, payload):\n    if (not isinstance(payload, dict)):\n        raise ValueError(\"The 'payload' parameter must be provided a dictionary object.\")\n    payload = cls.set_id_in_fkeys(payload)\n    payload = cls.check_boolean_fields(payload)\n    payload = cls.add_model_name_to_payload(payload)\n    payload = cls.prepost_hooks(payload)\n    cls.debug_logger.debug('POSTING payload {}'.format(json.dumps(payload, indent=4)))\n    res = requests.post(url=cls.URL, json=payload, headers=HEADERS, verify=False)\n    cls.write_response_html_to_file(res, 'bob.html')\n    if (not res.ok):\n        cls.log_error(res.text)\n        res_json = res.json()\n        if ('exception' in res_json):\n            exc_type = res_json['exception']\n            if (exc_type == 'ActiveRecord::RecordNotUnique'):\n                raise RecordNotUnique()\n    res.raise_for_status()\n    res = res.json()\n    cls.log_post(res)\n    cls.debug_logger.debug('Success')\n    return res", "docstring": "Posts the data to the specified record.\n\nArgs:\npayload: `dict`. This will be JSON-formatted prior to sending the request.\n\nReturns:\n`dict`. The JSON formatted response.\n\nRaises:\n`Requests.exceptions.HTTPError`: The status code is not ok.\n`RecordNotUnique`: The Rails server returned the exception ActiveRecord::RecordNotUnique.", "source": "codesearchnet"}
{"code": "def set_token(self, token):\n    self.token = token\n    self.set_header('Authorization', 'Bearer {}'.format(token))", "docstring": "Set the token for the v20 context\n\nArgs:\ntoken: The token used to access the v20 REST api", "source": "codesearchnet"}
{"code": "def save_r_df(self, state_key, r_value, action_key=None):\n        \n        if action_key is not None:\n            add_r_df = pd.DataFrame([(state_key, action_key, r_value)], columns=[\"state_key\", \"action_key\", \"r_value\"])\n        else:\n            add_r_df = pd.DataFrame([(state_key, r_value)], columns=[\"state_key\", \"r_value\"])\n\n        if self.r_df is not None:\n            self.r_df = pd.concat([add_r_df, self.r_df])\n            if action_key is not None:\n                self.r_df = self.r_df.drop_duplicates([\"state_key\", \"action_key\"])\n            else:\n                self.r_df = self.r_df.drop_duplicates([\"state_key\"])\n        else:\n            self.r_df = add_r_df", "docstring": "Insert or update R-Value in `self.r_df`.\n\nArgs:\nstate_key:     The key of state.\nr_value:       R-Value(Reward).\naction_key:    The key of action if it is nesesary for the parametar of value function.\n\nExceptions:\nTypeError:      If the type of `r_value` is not float.", "source": "juraj-google-style"}
{"code": "def read(self, n):\n        \n\n        d = b''\n        while n:\n            try:\n                block = self._process.stdout.read(n)\n            except ValueError:\n                block = None\n            if not block:\n                self._process.poll()\n                raise EOFError('Process ended')\n            d += block\n            n -= len(block)\n        return d", "docstring": "Read *n* bytes from the subprocess' output channel.\n\nArgs:\nn(int): The number of bytes to read.\n\nReturns:\nbytes: *n* bytes of output.\n\nRaises:\nEOFError: If the process exited.", "source": "juraj-google-style"}
{"code": "def get_es_value(obj, def_obj):\n    \n    def get_dict_val(item):\n        \n        if isinstance(item, dict):\n            return str(item.get('value'))\n        return str(item)\n\n    value_flds = []\n    if def_obj.es_defs.get('kds_esValue'):\n        value_flds = def_obj.es_defs['kds_esValue'].copy()\n    else:\n        \n        value_flds = set(obj).difference(__ALL_IGN__)\n        value_flds = list(value_flds)\n    value_flds += __COMBINED__\n    try:\n        obj['value'] = [obj.get(label) for label in value_flds\n                        if obj.get(label)][0]\n    except IndexError:\n        obj['value'] = \", \".join([\"%s: %s\" % (value.get('label'),\n                                              value.get('value'))\n                                  for prop, value in obj.items()\n                                  if isinstance(value, dict) and \\\n                                  value.get('label')])\n\n    if isinstance(obj['value'], list):\n        obj['value'] = \", \".join([get_dict_val(item) for item in obj['value']])\n    else:\n        obj['value'] = get_dict_val(obj['value'])\n    if str(obj['value']).strip().endswith(\"/\"):\n        obj['value'] = str(obj['value']).strip()[:-1].strip()\n    if not obj['value']:\n        obj['value'] = obj.get('uri', '')\n    return obj", "docstring": "Returns the value for an object that goes into the elacticsearch 'value'\nfield\n\nargs:\nobj: data object to update\ndef_obj: the class instance that has defintion values", "source": "juraj-google-style"}
{"code": "def dict_itemstr_list(dict_, **dictkw):\n    r\n    import utool as ut\n\n    explicit = dictkw.get('explicit', False)\n    dictkw['explicit'] = _rectify_countdown_or_bool(explicit)\n\n    dosort = dictkw.get('sorted_', None)\n    if dosort is None:\n        dosort = True\n\n    if dosort and not isinstance(dict_, collections.OrderedDict):\n        key_order = dictkw.get('key_order', None)\n        def iteritems(d):\n            if key_order is None:\n                \n                try:\n                    return iter(sorted(six.iteritems(d)))\n                except TypeError:\n                    \n                    return six.iteritems(d)\n            else:\n                \n                \n                unordered_keys = list(d.keys())\n                other_keys = sorted(list(set(unordered_keys) - set(key_order)))\n                keys = key_order + other_keys\n                return ((key, d[key]) for key in keys)\n    else:\n        iteritems = six.iteritems\n\n    _valstr = _make_valstr(**dictkw)\n\n    precision = dictkw.get('precision', None)\n    kvsep = dictkw.get('kvsep', ': ')\n    if explicit:\n        kvsep = '='\n\n    def make_item_str(key, val):\n        if explicit or dictkw.get('strkeys', False):\n            key_str = six.text_type(key)\n        else:\n            key_str = repr2(key, precision=precision)\n\n        prefix = key_str + kvsep\n        val_str = _valstr(val)\n\n        \n        if util_type.HAVE_NUMPY and isinstance(val, np.ndarray):\n            item_str = hz_str(prefix, val_str)\n        else:\n            \n            \n            item_str = prefix + val_str\n        return item_str\n\n    itemstr_list = [make_item_str(key, val)\n                    for (key, val) in iteritems(dict_)]\n\n    reverse = False\n    key_order_metric = dictkw.get('key_order_metric', None)\n    if key_order_metric is not None:\n        if key_order_metric.startswith('-'):\n            key_order_metric = key_order_metric[1:]\n            reverse = True\n\n    if key_order_metric == 'strlen':\n        metric_list = [len(itemstr) for itemstr in itemstr_list]\n        itemstr_list = ut.sortedby(itemstr_list, metric_list, reverse=reverse)\n    elif key_order_metric == 'val':\n        metric_list = [val for (key, val) in iteritems(dict_)]\n        itemstr_list = ut.sortedby(itemstr_list, metric_list, reverse=reverse)\n\n    maxlen = dictkw.get('maxlen', None)\n    if maxlen is not None and len(itemstr_list) > maxlen:\n        itemstr_list = itemstr_list[0:maxlen]\n    return itemstr_list", "docstring": "r\"\"\"\nReturns:\nlist: a list of human-readable dictionary items\n\nArgs:\nexplicit : if True uses dict(key=val,...) format instead of {key:val,...}", "source": "juraj-google-style"}
{"code": "def get_testcase_io(testcase):\n    \n    test_runner = testcase.runner\n    variables = testcase.config.get(\"variables\", {})\n    output_list = testcase.config.get(\"output\", [])\n    output_mapping = test_runner.extract_output(output_list)\n\n    return {\n        \"in\": variables,\n        \"out\": output_mapping\n    }", "docstring": "get and print testcase input(variables) and output.\n\nArgs:\ntestcase (unittest.suite.TestSuite): corresponding to one YAML/JSON file, it has been set two attributes:\nconfig: parsed config block\nrunner: initialized runner.Runner() with config\nReturns:\ndict: input(variables) and output mapping.", "source": "juraj-google-style"}
{"code": "def line_similarity(p1a, p1b, p2a, p2b, T=CLOSE_DISTANCE_THRESHOLD):\n    \n    d = line_distance_similarity(p1a, p1b, p2a, p2b, T=T)\n    a = abs(angle_similarity(normalize(line(p1a, p1b)), normalize(line(p2a, p2b))))\n    return d * a", "docstring": "Similarity between two lines\n\nArgs:\np1a ([float, float]): x and y coordinates. Line A start\np1b ([float, float]): x and y coordinates. Line A end\np2a ([float, float]): x and y coordinates. Line B start\np2b ([float, float]): x and y coordinates. Line B end\nReturns:\nfloat: between 0 and 1. Where 1 is very similar and 0 is completely different", "source": "juraj-google-style"}
{"code": "def register_for_auto_class(cls, auto_class='AutoFeatureExtractor'):\n    if not isinstance(auto_class, str):\n        auto_class = auto_class.__name__\n    import transformers.models.auto as auto_module\n    if not hasattr(auto_module, auto_class):\n        raise ValueError(f'{auto_class} is not a valid auto class.')\n    cls._auto_class = auto_class", "docstring": "Register this class with a given auto class. This should only be used for custom feature extractors as the ones\nin the library are already mapped with `AutoFeatureExtractor`.\n\n\n\nArgs:\nauto_class (`str` or `type`, *optional*, defaults to `\"AutoFeatureExtractor\"`):\nThe auto class to register this new feature extractor with.", "source": "github-repos"}
{"code": "def __init__(self, id, buckets=None, **kwargs):\n        \n        buckets = buckets or []\n        super(Song, self).__init__(id, buckets, **kwargs)", "docstring": "Song class\n\nArgs:\nid (str): a song ID\n\nKwargs:\nbuckets (list): A list of strings specifying which buckets to retrieve\n\nReturns:\nA Song object\n\nExample:\n\n>>> s = song.Song('SOPEXHZ12873FD2AC7', buckets=['song_hotttnesss', 'artist_hotttnesss'])\n>>> s.song_hotttnesss\n0.58602500000000002\n>>> s.artist_hotttnesss\n0.80329715999999995\n>>>", "source": "juraj-google-style"}
{"code": "def info(self, **kwargs):\n        \n        path = self._get_series_id_season_number_episode_number_path('info')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the primary information about a TV episode by combination of a\nseason and episode number.\n\nArgs:\nlanguage: (optional) ISO 639 code.\nappend_to_response: (optional) Comma separated, any TV series\nmethod.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def embed(self, url, format='json', **opt):\n        \n        if format not in ['json', 'xml']:\n            raise OEmbedInvalidRequest('Format must be json or xml')\n        opt['format'] = format\n        return self._request(url, **opt)", "docstring": "Get an OEmbedResponse from one of the providers configured in this\nconsumer according to the resource url.\n\nArgs:\nurl: The url of the resource to get.\nformat: Desired response format.\n**opt: Optional parameters to pass in the url to the provider.\n\nReturns:\nOEmbedResponse object.", "source": "juraj-google-style"}
{"code": "def __init__(\n        self,\n        learning_rate,\n        cg_max_iterations=20,\n        cg_damping=1e-3,\n        cg_unroll_loop=False,\n        scope='natural-gradient',\n        summary_labels=()\n    ):\n        \n        assert learning_rate > 0.0\n        self.learning_rate = learning_rate\n\n        self.solver = ConjugateGradient(\n            max_iterations=cg_max_iterations,\n            damping=cg_damping,\n            unroll_loop=cg_unroll_loop\n        )\n\n        super(NaturalGradient, self).__init__(scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new natural gradient optimizer instance.\n\nArgs:\nlearning_rate: Learning rate, i.e. KL-divergence of distributions between optimization steps.\ncg_max_iterations: Conjugate gradient solver max iterations.\ncg_damping: Conjugate gradient solver damping factor.\ncg_unroll_loop: Unroll conjugate gradient loop if true.", "source": "juraj-google-style"}
{"code": "def validate_yaml(self, properties):\n    validator = OurValidator(schema)\n    if (not validator.validate(properties)):\n        for (key, value) in validator.errors.items():\n            if any([('unallowed value' in v) for v in value]):\n                print('{key} has an illegal value. Allowed values are {values} and are case sensitive.'.format(key=key, values=schema[key]['allowed']))\n        raise ValueError(validator.errors)", "docstring": "Validate the parsed YAML file for adherance to the ChemKED format.\n\nArguments:\nproperties (`dict`): Dictionary created from the parsed YAML file\n\nRaises:\n`ValueError`: If the YAML file cannot be validated, a `ValueError` is raised whose\nstring contains the errors that are present.", "source": "codesearchnet"}
{"code": "def _add_sample_measure(self, measure_params, num_samples):\n    measured_qubits = list({qubit for (qubit, cmembit) in measure_params})\n    num_measured = len(measured_qubits)\n    axis = list(range(self._number_of_qubits))\n    for qubit in reversed(measured_qubits):\n        axis.remove(((self._number_of_qubits - 1) - qubit))\n    probabilities = np.reshape(np.sum((np.abs(self._statevector) ** 2), axis=tuple(axis)), (2 ** num_measured))\n    samples = self._local_random.choice(range((2 ** num_measured)), num_samples, p=probabilities)\n    memory = []\n    for sample in samples:\n        classical_memory = self._classical_memory\n        for (count, (qubit, cmembit)) in enumerate(sorted(measure_params)):\n            qubit_outcome = int(((sample & (1 << count)) >> count))\n            membit = (1 << cmembit)\n            classical_memory = ((classical_memory & (~ membit)) | (qubit_outcome << cmembit))\n        value = bin(classical_memory)[2:]\n        memory.append(hex(int(value, 2)))\n    return memory", "docstring": "Generate memory samples from current statevector.\n\nArgs:\nmeasure_params (list): List of (qubit, cmembit) values for\nmeasure instructions to sample.\nnum_samples (int): The number of memory samples to generate.\n\nReturns:\nlist: A list of memory values in hex format.", "source": "codesearchnet"}
{"code": "def assert_no_text(self, *args, **kwargs):\n        \n\n        query = TextQuery(*args, **kwargs)\n\n        @self.synchronize(wait=query.wait)\n        def assert_no_text():\n            count = query.resolve_for(self)\n\n            if matches_count(count, query.options) and (\n                   count > 0 or expects_none(query.options)):\n                raise ExpectationNotMet(query.negative_failure_message)\n\n            return True\n\n        return assert_no_text()", "docstring": "Asserts that the page or current node doesn't have the given text content, ignoring any\nHTML tags.\n\nArgs:\n*args: Variable length argument list for :class:`TextQuery`.\n**kwargs: Arbitrary keyword arguments for :class:`TextQuery`.\n\nReturns:\nTrue\n\nRaises:\nExpectationNotMet: If the assertion hasn't succeeded during the wait time.", "source": "juraj-google-style"}
{"code": "def on_core_metadata_event(self, event):\n    \n    core_metadata = json.loads(event.log_message.message)\n    input_names = ','.join(core_metadata['input_names'])\n    output_names = ','.join(core_metadata['output_names'])\n    target_nodes = ','.join(core_metadata['target_nodes'])\n\n    self._run_key = RunKey(input_names, output_names, target_nodes)\n    if not self._graph_defs:\n      self._graph_defs_arrive_first = False\n    else:\n      for device_name in self._graph_defs:\n        self._add_graph_def(device_name, self._graph_defs[device_name])\n\n    self._outgoing_channel.put(_comm_metadata(self._run_key, event.wall_time))\n\n    \n    logger.info('on_core_metadata_event() waiting for client ack (meta)...')\n    self._incoming_channel.get()\n    logger.info('on_core_metadata_event() client ack received (meta).')", "docstring": "Implementation of the core metadata-carrying Event proto callback.\n\nArgs:\nevent: An Event proto that contains core metadata about the debugged\nSession::Run() in its log_message.message field, as a JSON string.\nSee the doc string of debug_data.DebugDumpDir.core_metadata for details.", "source": "juraj-google-style"}
{"code": "def run_cell(self, cell):\n    globals = self.ipy_shell.user_global_ns\n    locals = self.ipy_shell.user_ns\n    globals.update({'__ipy_scope__': None})\n    try:\n        with redirect_stdout(self.stdout):\n            self.run(cell, globals, locals)\n    except:\n        self.code_error = True\n        if self.options.debug:\n            raise BdbQuit\n    finally:\n        self.finalize()", "docstring": "Run the Cell code using the IPython globals and locals\n\nArgs:\ncell (str): Python code to be executed", "source": "codesearchnet"}
{"code": "def add_site_property(self, property_name, values):\n        \n        if len(values) != len(self.sites):\n            raise ValueError(\"Values must be same length as sites.\")\n        for site, val in zip(self.sites, values):\n            site.properties[property_name] = val", "docstring": "Adds a property to a site.\n\nArgs:\nproperty_name (str): The name of the property to add.\nvalues (list): A sequence of values. Must be same length as\nnumber of sites.", "source": "juraj-google-style"}
{"code": "def normalize_genotypes(genotypes):\n    genotypes = genotypes.genotypes\n    return ((genotypes - np.nanmean(genotypes)) / np.nanstd(genotypes))", "docstring": "Normalize the genotypes.\n\nArgs:\ngenotypes (Genotypes): The genotypes to normalize.\n\nReturns:\nnumpy.array: The normalized genotypes.", "source": "codesearchnet"}
{"code": "def check(self, namespace, level, explicit=False):\n    return ((self.get_permissions(namespace, explicit=explicit) & level) != 0)", "docstring": "Checks if the permset has permission to the specified namespace\nat the specified level\n\nArguments:\n\nnamespace -- permissioning namespace (str)\nlevel -- permissioning level (int) (PERM_READ for example)\nexplicit -- require explicitly set permissions to the provided namespace\n\nReturns:\n\nbool", "source": "codesearchnet"}
{"code": "def __init__(self, image_true_sampler):\n        \n        if isinstance(image_true_sampler, ImageTrueSampler) is False:\n            raise TypeError()\n        self.__image_true_sampler = image_true_sampler", "docstring": "Init.\n\nArgs:\nimage_true_sampler:     is-a `ImageTrueSampler`.", "source": "juraj-google-style"}
{"code": "def put(self, key, value):\n    \n    key = self._service_key(key)\n    self._service_ops['put'](key, value)", "docstring": "Stores the object `value` named by `key` in `service`.\n\nArgs:\nkey: Key naming `value`.\nvalue: the object to store.", "source": "juraj-google-style"}
{"code": "def _update_general_statistics(a_float, dist):\n    \n    if not dist.count:\n        dist.count = 1\n        dist.maximum = a_float\n        dist.minimum = a_float\n        dist.mean = a_float\n        dist.sumOfSquaredDeviation = 0\n    else:\n        old_count = dist.count\n        old_mean = dist.mean\n        new_mean = ((old_count * old_mean) + a_float) / (old_count + 1)\n        delta_sum_squares = (a_float - old_mean) * (a_float - new_mean)\n        dist.count += 1\n        dist.mean = new_mean\n        dist.maximum = max(a_float, dist.maximum)\n        dist.minimum = min(a_float, dist.minimum)\n        dist.sumOfSquaredDeviation += delta_sum_squares", "docstring": "Adds a_float to distribution, updating the statistics fields.\n\nArgs:\na_float (float): a new value\ndist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):\nthe Distribution being updated", "source": "juraj-google-style"}
{"code": "def skip_log_prefix(func):\n    if callable(func):\n        func_code = getattr(func, '__code__', None)\n        if (func_code is None):\n            raise ValueError('Input callable does not have a function code object.')\n        file_name = func_code.co_filename\n        func_name = func_code.co_name\n        func_lineno = func_code.co_firstlineno\n    elif isinstance(func, six.string_types):\n        file_name = get_absl_logger().findCaller()[0]\n        func_name = func\n        func_lineno = None\n    else:\n        raise TypeError('Input is neither callable nor a string.')\n    ABSLLogger.register_frame_to_skip(file_name, func_name, func_lineno)\n    return func", "docstring": "Skips reporting the prefix of a given function or name by ABSLLogger.\n\nThis is a convenience wrapper function / decorator for\n`ABSLLogger.register_frame_to_skip`.\n\nIf a callable function is provided, only that function will be skipped.\nIf a function name is provided, all functions with the same name in the\nfile that this is called in will be skipped.\n\nThis can be used as a decorator of the intended function to be skipped.\n\nArgs:\nfunc: Callable function or its name as a string.\n\nReturns:\nfunc (the input, unchanged).\n\nRaises:\nValueError: The input is callable but does not have a function code object.\nTypeError: The input is neither callable nor a string.", "source": "codesearchnet"}
{"code": "def create_empty_output_dir(output_directory: str, overwrite: bool=True) -> None:\n    if overwrite and file_io.file_exists_v2(output_directory):\n        logging.info('Deleting existing output directory: %s .', output_directory)\n        file_io.delete_recursively_v2(output_directory)\n    file_io.recursive_create_dir_v2(output_directory)", "docstring": "Creates the `output_directory`.\n\nIf `output_directory` already exists, it recursively deletes all contents\ninside the directory.\n\nAlso creates the parent & intermediate directories.\n\nArgs:\noutput_directory: Output directory.\noverwrite: Where to clean the output directory if exists.", "source": "github-repos"}
{"code": "def __init__(self, timestamp, family=None, reverse=False):\n        \n        self.timestamp = timestamp\n        self.reverse = reverse\n        self._family = family", "docstring": "Create a timestamp rule.\n\nArgs:\ntimestamp (int): Epoch time.\nfamily (str): Package family to apply the rule to.\nreverse (bool): If True, reverse the logic so that packages released\n*after* the timestamp are matched.", "source": "juraj-google-style"}
{"code": "def expand_value_set_url(self, value_set_url: str) -> value_set_pb2.ValueSet:\n    value_set_url, value_set_version = url_utils.parse_url_version(value_set_url)\n    base_url, terminology_service_url = _expansion_request_url_for_value_set_url(value_set_url)\n    auth = self.auth_per_terminology_server.get(base_url)\n    return self._expand_value_set_url_using_service(value_set_url=value_set_url, value_set_version=value_set_version, terminology_service_url=terminology_service_url, auth=auth)", "docstring": "Expands the value set using a terminology server.\n\nRequests an expansion of the value set from the appropriate terminology\nserver for the given URL and version if present on the URL. The terminology\nservice is chosen based on the domain of `value_set_url`.\n\nRetrieves the current definition of the value set from the terminology\nservice as well as its expansion.\n\nArgs:\nvalue_set_url: The url of the value set to expand.\n\nRaises:\nValueError: If a terminology service can not be found for `value_set_url`.\n\nReturns:\nThe current definition of the value set from the server with its expanded\ncodes present.", "source": "github-repos"}
{"code": "def verify_oauth2_token(id_token, request, audience=None):\n    return verify_token(id_token, request, audience=audience, certs_url=_GOOGLE_OAUTH2_CERTS_URL)", "docstring": "Verifies an ID Token issued by Google's OAuth 2.0 authorization server.\n\nArgs:\nid_token (Union[str, bytes]): The encoded token.\nrequest (google.auth.transport.Request): The object used to make\nHTTP requests.\naudience (str): The audience that this token is intended for. This is\ntypically your application's OAuth 2.0 client ID. If None then the\naudience is not verified.\n\nReturns:\nMapping[str, Any]: The decoded token.", "source": "codesearchnet"}
{"code": "def merge(self, other_rel):\n        \n        if other_rel.thresholds.size == self.thresholds.size and np.all(other_rel.thresholds == self.thresholds):\n            self.frequencies += other_rel.frequencies\n        else:\n            print(\"Input table thresholds do not match.\")", "docstring": "Ingest another DistributedReliability and add its contents to the current object.\n\nArgs:\nother_rel: a Distributed reliability object.", "source": "juraj-google-style"}
{"code": "def object(self, key):\n    return _object.Object(self._name, key, context=self._context)", "docstring": "Retrieves a Storage Object for the specified key in this bucket.\n\nThe object need not exist.\n\nArgs:\nkey: the key of the object within the bucket.\nReturns:\nAn Object instance representing the specified key.", "source": "codesearchnet"}
{"code": "def AddRow(self, values):\n    \n    if self._number_of_columns and len(values) != self._number_of_columns:\n      raise ValueError('Number of values is out of bounds.')\n\n    if not self._column_sizes and self._columns:\n      self._column_sizes = [len(column) for column in self._columns]\n\n    value_strings = []\n    for value_index, value_string in enumerate(values):\n      if not isinstance(value_string, py2to3.UNICODE_TYPE):\n        value_string = '{0!s}'.format(value_string)\n      value_strings.append(value_string)\n\n      self._column_sizes[value_index] = max(\n          self._column_sizes[value_index], len(value_string))\n\n    self._rows.append(value_strings)\n\n    if not self._number_of_columns:\n      self._number_of_columns = len(value_strings)", "docstring": "Adds a row of values.\n\nArgs:\nvalues (list[object]): values.\n\nRaises:\nValueError: if the number of values is out of bounds.", "source": "juraj-google-style"}
{"code": "def success(self, value):\n        \n        if value == self._defaults['success'] and 'success' in self._values:\n            del self._values['success']\n        else:\n            self._values['success'] = value", "docstring": "The success property.\n\nArgs:\nvalue (bool). the property value.", "source": "juraj-google-style"}
{"code": "def vert_quality(script, min_quality=0.0, max_quality=0.05, inclusive=True):\n    filter_xml = ''.join(['  <filter name=\"Select by Vertex Quality\">\\n', '    <Param name=\"minQ\" ', 'value=\"{}\" '.format(min_quality), 'description=\"Min Quality\" ', 'min=\"0\" ', 'max=\"{}\" '.format((2 * max_quality)), 'type=\"RichDynamicFloat\" ', '/>\\n', '    <Param name=\"maxQ\" ', 'value=\"{}\" '.format(max_quality), 'description=\"Max Quality\" ', 'min=\"0\" ', 'max=\"{}\" '.format((2 * max_quality)), 'type=\"RichDynamicFloat\" ', '/>\\n', '    <Param name=\"Inclusive\" ', 'value=\"{}\" '.format(str(inclusive).lower()), 'description=\"Inclusive Sel.\" ', 'type=\"RichBool\" ', '/>\\n', '  </filter>\\n'])\n    util.write_filter(script, filter_xml)\n    return None", "docstring": "Select all the faces and vertexes within the specified vertex quality\nrange.\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter] to.\nmin_quality (float): Minimum acceptable quality value.\nmax_quality (float): Maximum acceptable quality value.\ninclusive (bool): If True only the faces with ALL the vertices within\nthe specified range are selected. Otherwise any face with at least\none vertex within the range is selected.\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "codesearchnet"}
{"code": "def dates(self):\n    return _gen_periodic_schedule(self._start_date, self._end_date, self._tenor, holiday_calendar=self._holiday_calendar, roll_convention=self._roll_convention, backward=self._backward, end_of_month=self._end_of_month)", "docstring": "Returns the dates as computed from the schedule as a DateTensor.\n\nConstructs the date schedule from the supplied data. For more details see\nthe initializer docstring.\n\nReturns:\n`DateTensor` of rank one more than `start_date` or `end_date`\n(depending on `backwards`), representing schedules for each element\nof the input.", "source": "github-repos"}
{"code": "def update_q(self, predicted_q_arr, reward_value_arr, next_max_q_arr):\n        \n        \n        return predicted_q_arr + (self.alpha_value * (reward_value_arr + (self.gamma_value * next_max_q_arr) - predicted_q_arr))", "docstring": "Update Q.\n\nArgs:\npredicted_q_arr:    `np.ndarray` of predicted Q-Values.\nreward_value_arr:   `np.ndarray` of reward values.\nnext_max_q_arr:     `np.ndarray` of maximum Q-Values in next time step.\n\nReturns:\n`np.ndarray` of real Q-Values.", "source": "juraj-google-style"}
{"code": "def verify_bitcoin(message, signature, address):\n        \n        magic_sig = base64.b64decode(signature)\n\n        magic = magic_sig[0]\n        sig = Signature.from_bytes(magic_sig[1:])\n        sig.recovery_id = (magic - 27) & 0x3\n        compressed = ((magic - 27) & 0x4) != 0\n\n        \n        msg = b\"\\x18Bitcoin Signed Message:\\n\" + bytes([len(message)]) + message\n        msg_hash = hashlib.sha256(msg).digest()\n\n        derived_public_key = PublicKey.from_signature(msg_hash, sig)\n        if derived_public_key is None:\n            raise ValueError(\"Could not recover public key from the provided signature.\")\n\n        ver, h160 = address_to_key_hash(address)\n        hash160 = derived_public_key.hash160(compressed)\n        if hash160 != h160:\n            return False\n\n        return derived_public_key.verify(msg_hash, sig)", "docstring": "Verifies a message signed using PrivateKey.sign_bitcoin()\nor any of the bitcoin utils (e.g. bitcoin-cli, bx, etc.)\n\nArgs:\nmessage(bytes): The message that the signature corresponds to.\nsignature (bytes or str): A Base64 encoded signature\naddress (str): Base58Check encoded address.\n\nReturns:\nbool: True if the signature verified properly, False otherwise.", "source": "juraj-google-style"}
{"code": "def GetDevicePath(device_handle):\n    io_service_obj = iokit.IOHIDDeviceGetService(device_handle)\n    str_buffer = ctypes.create_string_buffer(DEVICE_PATH_BUFFER_SIZE)\n    iokit.IORegistryEntryGetPath(io_service_obj, K_IO_SERVICE_PLANE, str_buffer)\n    return str_buffer.value", "docstring": "Obtains the unique path for the device.\n\nArgs:\ndevice_handle: reference to the device\n\nReturns:\nA unique path for the device, obtained from the IO Registry", "source": "codesearchnet"}
{"code": "def vq_gating(x,\n              num_experts,\n              k,\n              bneck,\n              hparams=None,\n              name=\"vq_gating\"):\n  \n  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n\n    if hparams.use_scales:\n      scales = tf.get_variable(\n          \"scales\", [num_experts],\n          tf.float32,\n          initializer=tf.ones_initializer())\n      scales = tf.nn.softmax(scales)\n      hparams.scales = scales\n    input_size = x.get_shape().as_list()[-1]\n    batch_size = common_layers.shape_list(x)[0]\n\n    if k > 1:\n      \n      \n      \n      x = tf.layers.dense(x, input_size * k)\n      \n      x = tf.reshape(x, [batch_size * k, input_size])\n    inputs = tf.expand_dims(x, axis=1)\n    inputs = tf.expand_dims(inputs, axis=1)\n    \n    hparams.z_size = int(math.log(num_experts, 2))\n    hparams.hidden_size = input_size\n    hparams.top_k = k\n    d = bneck.discrete_bottleneck(inputs)\n    centroids = None\n    exp_discrete = d[\"discrete\"]\n    embed_lookup = d[\"embed\"]\n    extra_loss = d[\"loss\"]\n    if hparams.residual_centroids:\n      centroids = embed_lookup(exp_discrete)  \n    top_k_indices = tf.squeeze(exp_discrete, axis=1)\n    tf.summary.histogram(\"discrete_counts\", top_k_indices)\n    \n    \n    if k > 1:\n      top_k_indices = tf.reshape(top_k_indices, [batch_size, k])\n    \n    top_k_gates = tf.ones([batch_size, k])\n    \n    \n    gates = _rowwise_unsorted_segment_sum(top_k_gates, top_k_indices,\n                                          num_experts)\n    \n    \n    \n    count_per_expert = tf.reduce_sum(gates, axis=0)\n    if hparams.use_scales:\n      scale_loss = tf.reduce_mean(tf.to_float(count_per_expert) * scales)\n      extra_loss += scale_loss\n    if common_layers.should_generate_summaries():\n      tf.summary.histogram(\"vq_loss\", extra_loss)\n      tf.summary.historgram(\"scale_loss\", scale_loss)\n    return gates, extra_loss, centroids", "docstring": "VQ gating.\n\nArgs:\nx: input Tensor with shape [batch_size, input_size]\nnum_experts: an integer\nk: an integer - number of experts per example\nbneck: a bottleneck object\nhparams: optional hparams\nname: an optional string\n\nReturns:\ngates: a Tensor with shape [batch_size, num_experts]\nload: a Tensor with shape [num_experts]", "source": "juraj-google-style"}
{"code": "def get_num_filters(layer):\n    if (K.ndim(layer.output) == 2):\n        return K.int_shape(layer.output)[(- 1)]\n    channel_idx = (1 if (K.image_data_format() == 'channels_first') else (- 1))\n    return K.int_shape(layer.output)[channel_idx]", "docstring": "Determines the number of filters within the given `layer`.\n\nArgs:\nlayer: The keras layer to use.\n\nReturns:\nTotal number of filters within `layer`.\nFor `keras.layers.Dense` layer, this is the total number of outputs.", "source": "codesearchnet"}
{"code": "def get_sites_in_sphere(self, pt, r, include_index=False, include_image=False):\n    site_fcoords = np.mod(self.frac_coords, 1)\n    neighbors = []\n    for (fcoord, dist, i, img) in self._lattice.get_points_in_sphere(site_fcoords, pt, r):\n        nnsite = PeriodicSite(self[i].species, fcoord, self._lattice, properties=self[i].properties)\n        nn_data = ((nnsite, dist) if (not include_index) else (nnsite, dist, i))\n        if include_image:\n            nn_data += (img,)\n        neighbors.append(nn_data)\n    return neighbors", "docstring": "Find all sites within a sphere from the point. This includes sites\nin other periodic images.\n\nAlgorithm:\n\n1. place sphere of radius r in crystal and determine minimum supercell\n(parallelpiped) which would contain a sphere of radius r. for this\nwe need the projection of a_1 on a unit vector perpendicular\nto a_2 & a_3 (i.e. the unit vector in the direction b_1) to\ndetermine how many a_1\"s it will take to contain the sphere.\n\nNxmax = r * length_of_b_1 / (2 Pi)\n\n2. keep points falling within r.\n\nArgs:\npt (3x1 array): cartesian coordinates of center of sphere.\nr (float): Radius of sphere.\ninclude_index (bool): Whether the non-supercell site index\nis included in the returned data\ninclude_image (bool): Whether to include the supercell image\nis included in the returned data\n\nReturns:\n[(site, dist) ...] since most of the time, subsequent processing\nrequires the distance.", "source": "codesearchnet"}
{"code": "def __init__(\n      self, resolver_context, file_system, path_spec, is_root=False,\n      is_virtual=False):\n    \n    gzip_file = resolver.Resolver.OpenFileObject(\n        path_spec, resolver_context=resolver_context)\n    if not gzip_file:\n      raise errors.BackEndError('Missing gzip file.')\n\n    super(GzipFileEntry, self).__init__(\n        resolver_context, file_system, path_spec, is_root=is_root,\n        is_virtual=is_virtual)\n    self._gzip_file = gzip_file\n    self.entry_type = definitions.FILE_ENTRY_TYPE_FILE", "docstring": "Initializes a file entry.\n\nArgs:\nresolver_context (Context): resolver context.\nfile_system (FileSystem): file system.\npath_spec (PathSpec): path specification.\nis_root (Optional[bool]): True if the file entry is the root file entry\nof the corresponding file system.\nis_virtual (Optional[bool]): True if the file entry is a virtual file\n\nRaises:\nBackEndError: when the gzip file is missing.", "source": "juraj-google-style"}
{"code": "def _ConvertDateTimeToOffset(self, date_time_value):\n    \n    date_time_obj = datetime.datetime(int(date_time_value['date']['year']),\n                                      int(date_time_value['date']['month']),\n                                      int(date_time_value['date']['day']),\n                                      int(date_time_value['hour']),\n                                      int(date_time_value['minute']),\n                                      int(date_time_value['second']))\n    \n    if self._version > 'v201808':\n      time_zone_str = 'timeZoneId'\n    else:\n      time_zone_str = 'timeZoneID'\n    date_time_str = pytz.timezone(\n        date_time_value[time_zone_str]).localize(date_time_obj).isoformat()\n\n    if date_time_str[-5:] == '00:00':\n      return date_time_str[:-6] + 'Z'\n    else:\n      return date_time_str", "docstring": "Converts the PQL formatted response for a dateTime object.\n\nOutput conforms to ISO 8061 format, e.g. 'YYYY-MM-DDTHH:MM:SSz.'\n\nArgs:\ndate_time_value: dict The date time value from the PQL response.\n\nReturns:\nstr: A string representation of the date time value uniform to\nReportService.", "source": "juraj-google-style"}
{"code": "def sia_bipartitions(nodes, node_labels=None):\n    if config.CUT_ONE_APPROXIMATION:\n        bipartitions = directed_bipartition_of_one(nodes)\n    else:\n        bipartitions = directed_bipartition(nodes, nontrivial=True)\n    return [Cut(bipartition[0], bipartition[1], node_labels) for bipartition in bipartitions]", "docstring": "Return all |big_phi| cuts for the given nodes.\n\nThis value changes based on :const:`config.CUT_ONE_APPROXIMATION`.\n\nArgs:\nnodes (tuple[int]): The node indices to partition.\nReturns:\nlist[Cut]: All unidirectional partitions.", "source": "codesearchnet"}
{"code": "def write(name, value):\n    \n    def wrapped(func):\n        @functools.wraps(func)\n        def _decorator(*args, **kwargs):\n            existing_env = core.read(name, allow_none=True)\n            core.write(name, value)\n            func_val = func(*args, **kwargs)\n            core.write(name, existing_env)\n            return func_val\n        return _decorator\n    return wrapped", "docstring": "Temporarily change or set the environment variable during the execution of a function.\n\nArgs:\nname: The name of the environment variable\nvalue: A value to set for the environment variable\n\nReturns:\nThe function return value.", "source": "juraj-google-style"}
{"code": "def _obtain_health_pills_at_step(self, events_directory, node_names, step):\n    pattern = os.path.join(events_directory, _DEBUGGER_EVENTS_GLOB_PATTERN)\n    file_paths = glob.glob(pattern)\n    if (not file_paths):\n        raise IOError(('No events files found that matches the pattern %r.' % pattern))\n    file_paths.sort()\n    mapping = collections.defaultdict(list)\n    node_name_set = frozenset(node_names)\n    for file_path in file_paths:\n        should_stop = self._process_health_pill_event(node_name_set, mapping, step, file_path)\n        if should_stop:\n            break\n    return mapping", "docstring": "Reads disk to obtain the health pills for a run at a specific step.\n\nThis could be much slower than the alternative path of just returning all\nhealth pills sampled by the event multiplexer. It could take tens of minutes\nto complete this call for large graphs for big step values (in the\nthousands).\n\nArgs:\nevents_directory: The directory containing events for the desired run.\nnode_names: A list of node names for which to retrieve health pills.\nstep: The step to obtain health pills for.\n\nReturns:\nA dictionary mapping from node name to a list of health pill objects (see\ndocs for _serve_health_pills_handler for properties of those objects).\n\nRaises:\nIOError: If no files with health pill events could be found.", "source": "codesearchnet"}
{"code": "def stack1d(*points):\n    \n    result = np.empty((2, len(points)), order=\"F\")\n    for index, point in enumerate(points):\n        result[:, index] = point\n    return result", "docstring": "Fill out the columns of matrix with a series of points.\n\nThis is because ``np.hstack()`` will just make another 1D vector\nout of them and ``np.vstack()`` will put them in the rows.\n\nArgs:\npoints (Tuple[numpy.ndarray, ...]): Tuple of 1D points (i.e.\narrays with shape ``(2,)``.\n\nReturns:\nnumpy.ndarray: The array with each point in ``points`` as its\ncolumns.", "source": "juraj-google-style"}
{"code": "def _get_flow_for_token(csrf_token, request):\n    \n    flow_pickle = request.session.get(_FLOW_KEY.format(csrf_token), None)\n    return None if flow_pickle is None else jsonpickle.decode(flow_pickle)", "docstring": "Looks up the flow in session to recover information about requested\nscopes.\n\nArgs:\ncsrf_token: The token passed in the callback request that should\nmatch the one previously generated and stored in the request on the\ninitial authorization view.\n\nReturns:\nThe OAuth2 Flow object associated with this flow based on the\nCSRF token.", "source": "juraj-google-style"}
{"code": "def __delitem__(self, key):\n        \n        path = self.keypath(key)\n        if fs.exists(path):\n            fs.rm(path)\n        else:\n            raise KeyError(key)", "docstring": "Delete cached file.\n\nArguments:\nkey: Key.\n\nRaises:\nKeyError: If file not in cache.", "source": "juraj-google-style"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3):\n    if (kmip_version < enums.KMIPVersion.KMIP_1_3):\n        raise exceptions.VersionNotSupported('KMIP {} does not support the RNGParameters object.'.format(kmip_version.value))\n    local_buffer = BytearrayStream()\n    if self._rng_algorithm:\n        self._rng_algorithm.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The RNGParameters structure is missing the RNG algorithm field.')\n    if self._cryptographic_algorithm:\n        self._cryptographic_algorithm.write(local_buffer, kmip_version=kmip_version)\n    if self._cryptographic_length:\n        self._cryptographic_length.write(local_buffer, kmip_version=kmip_version)\n    if self._hashing_algorithm:\n        self._hashing_algorithm.write(local_buffer, kmip_version=kmip_version)\n    if self._drbg_algorithm:\n        self._drbg_algorithm.write(local_buffer, kmip_version=kmip_version)\n    if self._recommended_curve:\n        self._recommended_curve.write(local_buffer, kmip_version=kmip_version)\n    if self._fips186_variation:\n        self._fips186_variation.write(local_buffer, kmip_version=kmip_version)\n    if self._prediction_resistance:\n        self._prediction_resistance.write(local_buffer, kmip_version=kmip_version)\n    self.length = local_buffer.length()\n    super(RNGParameters, self).write(output_buffer, kmip_version=kmip_version)\n    output_buffer.write(local_buffer.buffer)", "docstring": "Write the RNGParameters structure encoding to the data stream.\n\nArgs:\noutput_buffer (stream): A data stream in which to encode\nAttributes structure data, supporting a write method.\nkmip_version (enum): A KMIPVersion enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 2.0.\n\nRaises:\nInvalidField: Raised if the RNG algorithm field is not defined.\nVersionNotSupported: Raised when a KMIP version is provided that\ndoes not support the RNGParameters structure.", "source": "codesearchnet"}
{"code": "def ExamineEvent(self, mediator, event):\n    \n    \n    url = getattr(event, 'url', None)\n    if not url:\n      return\n\n    \n    \n    source, _ = formatters_manager.FormattersManager.GetSourceStrings(event)\n\n    if source != 'WEBHIST':\n      return\n\n    for engine, url_expression, method_name in self._URL_FILTERS:\n      callback_method = getattr(self, method_name, None)\n      if not callback_method:\n        logger.warning('Missing method: {0:s}'.format(callback_method))\n        continue\n\n      match = url_expression.search(url)\n      if not match:\n        continue\n\n      search_query = callback_method(url)\n      if not search_query:\n        logger.warning('Missing search query for URL: {0:s}'.format(url))\n        continue\n\n      search_query = self._DecodeURL(search_query)\n      if not search_query:\n        continue\n\n      event_tag = self._CreateEventTag(\n          event, self._EVENT_TAG_COMMENT, self._EVENT_TAG_LABELS)\n      mediator.ProduceEventTag(event_tag)\n\n      self._counter['{0:s}:{1:s}'.format(engine, search_query)] += 1\n\n      \n      timestamp = getattr(event, 'timestamp', 0)\n      source = getattr(event, 'parser', 'N/A')\n      source = getattr(event, 'plugin', source)\n      self._search_term_timeline.append(\n          SEARCH_OBJECT(timestamp, source, engine, search_query))", "docstring": "Analyzes an event.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between\nanalysis plugins and other components, such as storage and dfvfs.\nevent (EventObject): event to examine.", "source": "juraj-google-style"}
{"code": "def load_json(path):\n    with open(path, 'rt') as f:\n        jsondict = json.loads(f.read(), object_pairs_hook=OrderedDict)\n    if (not jsondict):\n        raise LoadError(('JSON file: %s is empty!' % path))\n    return jsondict", "docstring": "Load JSON file into an ordered dictionary\n\nArgs:\npath (str): Path to JSON file\n\nReturns:\nOrderedDict: Ordered dictionary containing loaded JSON file", "source": "codesearchnet"}
{"code": "def Parse(self, raw_data):\n    self.results = set()\n    if (not self.filters):\n        self.results.update(raw_data)\n    else:\n        for f in self.filters:\n            self.results.update(f.Parse(raw_data))\n    return list(self.results)", "docstring": "Take the data and yield results that passed through the filters.\n\nThe output of each filter is added to a result set. So long as the filter\nselects, but does not modify, raw data, the result count will remain\naccurate.\n\nArgs:\nraw_data: An iterable series of rdf values.\n\nReturns:\nA list of rdf values that matched at least one filter.", "source": "codesearchnet"}
{"code": "def _HasExpectedLineLength(self, file_object):\n    original_file_position = file_object.tell()\n    line_reader = self._CreateLineReader(file_object)\n    for _ in range(0, 20):\n        sample_line = line_reader.readline((self._maximum_line_length + 1))\n        if (len(sample_line) > self._maximum_line_length):\n            file_object.seek(original_file_position)\n            return False\n    file_object.seek(original_file_position)\n    return True", "docstring": "Determines if a file begins with lines of the expected length.\n\nAs we know the maximum length of valid lines in the DSV file, the presence\nof lines longer than this indicates that the file will not be parsed\nsuccessfully, without reading excessive data from a large file.\n\nArgs:\nfile_object (dfvfs.FileIO): file-like object.\n\nReturns:\nbool: True if the file has lines of the expected length.", "source": "codesearchnet"}
{"code": "def _from_dict_record(data):\n    \n    return [Schema._get_field_entry(name, value) for name, value in list(data.items())]", "docstring": "Infer a BigQuery table schema from a dictionary. If the dictionary has entries that\nare in turn OrderedDicts these will be turned into RECORD types. Ideally this will\nbe an OrderedDict but it is not required.\n\nArgs:\ndata: The dict to infer a schema from.\nReturns:\nA list of dictionaries containing field 'name' and 'type' entries, suitable for use in a\nBigQuery Tables resource schema.", "source": "juraj-google-style"}
{"code": "def remove_chars(str_, char_list):\n    outstr = str_[:]\n    for char in char_list:\n        outstr = outstr.replace(char, '')\n    return outstr", "docstring": "removes all chars in char_list from str_\n\nArgs:\nstr_ (str):\nchar_list (list):\n\nReturns:\nstr: outstr\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_str import *  # NOQA\n>>> str_ = '1, 2, 3, 4'\n>>> char_list = [',']\n>>> result = remove_chars(str_, char_list)\n>>> print(result)\n1 2 3 4", "source": "codesearchnet"}
{"code": "def has_strategy():\n    return get_strategy() is not _get_default_strategy()", "docstring": "Return if there is a current non-default `tf.distribute.Strategy`.\n\n```\nassert not tf.distribute.has_strategy()\nwith strategy.scope():\nassert tf.distribute.has_strategy()\n```\n\nReturns:\nTrue if inside a `with strategy.scope():`.", "source": "github-repos"}
{"code": "def convert_nested_model(weights):\n    trainable_weights = weights[:len(layer.trainable_weights)]\n    non_trainable_weights = weights[len(layer.trainable_weights):]\n    new_trainable_weights = []\n    new_non_trainable_weights = []\n    for sublayer in layer.layers:\n        num_trainable_weights = len(sublayer.trainable_weights)\n        num_non_trainable_weights = len(sublayer.non_trainable_weights)\n        if sublayer.weights:\n            preprocessed = preprocess_weights_for_loading(layer=sublayer, weights=trainable_weights[:num_trainable_weights] + non_trainable_weights[:num_non_trainable_weights], original_keras_version=original_keras_version, original_backend=original_backend)\n            new_trainable_weights.extend(preprocessed[:num_trainable_weights])\n            new_non_trainable_weights.extend(preprocessed[num_trainable_weights:])\n            trainable_weights = trainable_weights[num_trainable_weights:]\n            non_trainable_weights = non_trainable_weights[num_non_trainable_weights:]\n    return new_trainable_weights + new_non_trainable_weights", "docstring": "Converts layers nested in `Model` or `Sequential`.\n\nThis function uses `preprocess_weights_for_loading()` for converting nested\nlayers.\n\nArgs:\nweights: List of weights values (Numpy arrays).\n\nReturns:\nA list of weights values (Numpy arrays).", "source": "github-repos"}
{"code": "def GetMap(self, cache_info, data):\n    for line in cache_info:\n        line = line.rstrip('\\n')\n        if not line or line[0] == '\n            continue\n        entry = self._ReadEntry(line)\n        if entry is None:\n            self.log.warning('Could not create entry from line %r in cache, skipping', line)\n            continue\n        if not data.Add(entry):\n            self.log.warning('Could not add entry %r read from line %r in cache', entry, line)\n    return data", "docstring": "Returns a map from a cache.\n\nArgs:\ncache_info: file like object containing the cache.\ndata: a Map to populate.\nReturns:\nA child of Map containing the cache data.", "source": "github-repos"}
{"code": "def _compute_euclidean_distance(cls, inputs, clusters):\n    output = []\n    for inp in inputs:\n        with ops.colocate_with(inp, ignore_existing=True):\n            squared_distance = math_ops.reduce_sum(math_ops.square(inp), 1, keepdims=True) - 2 * math_ops.matmul(inp, clusters, transpose_b=True) + array_ops.transpose(math_ops.reduce_sum(math_ops.square(clusters), 1, keepdims=True))\n            output.append(squared_distance)\n    return output", "docstring": "Computes Euclidean distance between each input and each cluster center.\n\nArgs:\ninputs: list of input Tensors.\nclusters: cluster Tensor.\n\nReturns:\nlist of Tensors, where each element corresponds to each element in inputs.\nThe value is the distance of each row to all the cluster centers.", "source": "github-repos"}
{"code": "def format_map(self, format_string, mapping):\n    return self.vformat(format_string, args=None, kwargs=mapping)", "docstring": "format a string by a map\n\nArgs:\nformat_string(str): A format string\nmapping(dict): A map to format the string\n\nReturns:\nA formatted string.\n\nRaises:\nKeyError: if key is not provided by the given map.", "source": "codesearchnet"}
{"code": "def _get_present_locations(match_traversals):\n    present_locations = set()\n    present_non_optional_locations = set()\n    for match_traversal in match_traversals:\n        for step in match_traversal:\n            if (step.as_block is not None):\n                (location_name, _) = step.as_block.location.get_location_name()\n                present_locations.add(location_name)\n                if (isinstance(step.root_block, Traverse) and (not step.root_block.optional)):\n                    present_non_optional_locations.add(location_name)\n    if (not present_non_optional_locations.issubset(present_locations)):\n        raise AssertionError(u'present_non_optional_locations {} was not a subset of present_locations {}. THis hould never happen.'.format(present_non_optional_locations, present_locations))\n    return (present_locations, present_non_optional_locations)", "docstring": "Return the set of locations and non-optional locations present in the given match traversals.\n\nWhen enumerating the possibilities for optional traversals,\nthe resulting match traversals may have sections of the query omitted.\nThese locations will not be included in the returned `present_locations`.\nAll of the above locations that are not optional traverse locations\nwill be included in present_non_optional_locations.\n\nArgs:\nmatch_traversals: one possible list of match traversals generated from a query\ncontaining @optional traversal(s)\n\nReturns:\ntuple (present_locations, present_non_optional_locations):\n- present_locations: set of all locations present in the given match traversals\n- present_non_optional_locations: set of all locations present in the match traversals\nthat are not reached through optional traverses.\nGuaranteed to be a subset of present_locations.", "source": "codesearchnet"}
{"code": "def scatter_update(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n    return self._lazy_read(gen_resource_variable_ops.resource_scatter_update(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))", "docstring": "Assigns `tf.IndexedSlices` to this variable.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to be assigned to this variable.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nThe updated variable.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"}
{"code": "def _get_required_params_for_impression(self, experiment, variation_id):\n    snapshot = {}\n    snapshot[self.EventParams.DECISIONS] = [{self.EventParams.EXPERIMENT_ID: experiment.id, self.EventParams.VARIATION_ID: variation_id, self.EventParams.CAMPAIGN_ID: experiment.layerId}]\n    snapshot[self.EventParams.EVENTS] = [{self.EventParams.EVENT_ID: experiment.layerId, self.EventParams.TIME: self._get_time(), self.EventParams.KEY: 'campaign_activated', self.EventParams.UUID: str(uuid.uuid4())}]\n    return snapshot", "docstring": "Get parameters that are required for the impression event to register.\n\nArgs:\nexperiment: Experiment for which impression needs to be recorded.\nvariation_id: ID for variation which would be presented to user.\n\nReturns:\nDict consisting of decisions and events info for impression event.", "source": "codesearchnet"}
{"code": "def _build(self, inputs):\n    \n    input_shape = tf.shape(inputs)\n    input_dtype = inputs.dtype.as_numpy_dtype\n    batch_size = tf.expand_dims(input_shape[0], 0)\n    number_of_params = inputs.get_shape()[1]\n    if number_of_params != self._constraints.num_free_params:\n      raise base.Error('Input size is not consistent with constraint '\n                       'definition: {} parameters expected, {} provided.'\n                       .format(self._constraints.num_free_params,\n                               number_of_params))\n    num_output_dimensions = len(self._psi) \n    def get_input_slice(start, size):\n      \n      return basic.SliceByDim([1], [start], [size])(inputs)\n\n    warped_grid = []\n    var_index_offset = 0\n    number_of_points = np.prod(self._output_shape)\n    for i in xrange(num_output_dimensions):\n      if self._psi[i] is not None:\n        \n        \n        grid_coord = self._psi[i].astype(input_dtype)\n\n        num_active_vars = self._psi[i].shape[0]\n        active_vars = get_input_slice(var_index_offset, num_active_vars)\n        warped_coord = tf.matmul(active_vars, grid_coord)\n        warped_coord = tf.expand_dims(warped_coord, 1)\n        var_index_offset += num_active_vars\n        offset = self._psi[num_output_dimensions + i]\n        if offset is not None:\n          offset = offset.astype(input_dtype)\n          \n          \n          tiling_params = tf.concat(\n              [\n                  batch_size, tf.constant(\n                      1, shape=(1,)), tf.ones_like(offset.shape)\n              ],\n              0)\n          offset = offset.reshape((1, 1) + offset.shape)\n          warped_coord += tf.tile(offset, tiling_params)\n\n      else:\n        \n        \n        warped_coord = self._psi[num_output_dimensions + i].astype(input_dtype)\n        tiling_params = tf.concat(\n            [\n                batch_size, tf.constant(\n                    1, shape=(1,)), tf.ones_like(warped_coord.shape)\n            ],\n            0)\n        warped_coord = warped_coord.reshape((1, 1) + warped_coord.shape)\n        warped_coord = tf.tile(warped_coord, tiling_params)\n\n      warped_coord += self._psi[i + 2 * num_output_dimensions]\n      \n      \n      warped_coord.set_shape([None, 1, number_of_points])\n      warped_grid.append(warped_coord)\n\n    \n    \n    grid_shape = self._output_shape + (1,)\n    warped_grid = [basic.BatchReshape(grid_shape)(grid) for grid in warped_grid]\n    return tf.concat(warped_grid, len(grid_shape))", "docstring": "Assembles the module network and adds it to the graph.\n\nThe internal computation graph is assembled according to the set of\nconstraints provided at construction time.\n\nArgs:\ninputs: Tensor containing a batch of transformation parameters.\n\nReturns:\nA batch of warped grids.\n\nRaises:\nError: If the input tensor size is not consistent with the constraints\npassed at construction time.", "source": "juraj-google-style"}
{"code": "def rename_v2(src, dst, overwrite=False):\n    _pywrap_file_io.RenameFile(compat.path_to_bytes(src), compat.path_to_bytes(dst), overwrite)", "docstring": "Rename or move a file / directory.\n\nArgs:\nsrc: string, pathname for a file\ndst: string, pathname to which the file needs to be moved\noverwrite: boolean, if false it's an error for `dst` to be occupied by an\nexisting file.\n\nRaises:\nerrors.OpError: If the operation fails.", "source": "github-repos"}
{"code": "def _get_session(self):\n    if (self._session is None):\n        self._session = _boto3.session.Session(**self._storage_parameters.get('session', dict()))\n    return self._session", "docstring": "S3 Boto3 Session.\n\nReturns:\nboto3.session.Session: session", "source": "codesearchnet"}
{"code": "def __init__(self,\n                 kw: YangIdentifier,\n                 arg: Optional[str],\n                 pref: YangIdentifier = None):\n        \n        self.prefix = pref\n        self.keyword = kw\n        self.argument = arg\n        self.superstmt = None\n        self.substatements = []", "docstring": "Initialize the class instance.\n\nArgs:\nkw: Keyword.\narg: Argument.\nsup: Parent statement.\nsub: List of substatements.\npref: Keyword prefix (``None`` for built-in statements).", "source": "juraj-google-style"}
{"code": "def print_colored_columns(printer, rows, padding=2):\n    rows_ = [x[:(- 1)] for x in rows]\n    colors = [x[(- 1)] for x in rows]\n    for (col, line) in zip(colors, columnise(rows_, padding=padding)):\n        printer(line, col)", "docstring": "Like `columnise`, but with colored rows.\n\nArgs:\nprinter (`colorize.Printer`): Printer object.\n\nNote:\nThe last entry in each row is the row color, or None for no coloring.", "source": "codesearchnet"}
{"code": "def sheets_get(config, auth, sheet_url_or_name):\n    sheet_id = sheets_id(config, auth, sheet_url_or_name)\n    if sheet_id:\n        return API_Sheets(config, auth).spreadsheets().get(spreadsheetId=sheet_id).execute()\n    else:\n        return None", "docstring": "Get sheets definition.\n\nArgs:\nconfig - see starthinker/util/configuration.py\nauth - user or service\nsheet_url_or_name - one of: URL, document title, or id\n\nReturns:\nDictionary with all sheets information from Rest API.", "source": "github-repos"}
{"code": "def export_tensorflow(preprocessor: Union['PreTrainedTokenizer', 'FeatureExtractionMixin'], model: 'TFPreTrainedModel', config: OnnxConfig, opset: int, output: Path, tokenizer: Optional['PreTrainedTokenizer']=None) -> Tuple[List[str], List[str]]:\n    import onnx\n    import tensorflow as tf\n    import tf2onnx\n    if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None:\n        raise ValueError('You cannot provide both a tokenizer and preprocessor to export the model.')\n    if tokenizer is not None:\n        warnings.warn('The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use `preprocessor` instead.', FutureWarning)\n        logger.info('Overwriting the `preprocessor` argument with `tokenizer` to generate dummy inputs.')\n        preprocessor = tokenizer\n    model.config.return_dict = True\n    if config.values_override is not None:\n        logger.info(f'Overriding {len(config.values_override)} configuration item(s)')\n        for override_config_key, override_config_value in config.values_override.items():\n            logger.info(f'\\t- {override_config_key} -> {override_config_value}')\n            setattr(model.config, override_config_key, override_config_value)\n    model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.TENSORFLOW)\n    inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys())\n    onnx_outputs = list(config.outputs.keys())\n    input_signature = [tf.TensorSpec([None] * tensor.ndim, dtype=tensor.dtype, name=key) for key, tensor in model_inputs.items()]\n    onnx_model, _ = tf2onnx.convert.from_keras(model, input_signature, opset=opset)\n    onnx.save(onnx_model, output.as_posix())\n    config.restore_ops()\n    return (matched_inputs, onnx_outputs)", "docstring": "Export a TensorFlow model to an ONNX Intermediate Representation (IR)\n\nArgs:\npreprocessor: ([`PreTrainedTokenizer`] or [`FeatureExtractionMixin`]):\nThe preprocessor used for encoding the data.\nmodel ([`TFPreTrainedModel`]):\nThe model to export.\nconfig ([`~onnx.config.OnnxConfig`]):\nThe ONNX configuration associated with the exported model.\nopset (`int`):\nThe version of the ONNX operator set to use.\noutput (`Path`):\nDirectory to store the exported ONNX model.\n\nReturns:\n`Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from\nthe ONNX configuration.", "source": "github-repos"}
{"code": "def _apply_options(self, token):\n    if (token.is_punct and self.remove_punct):\n        return None\n    if (token.is_stop and self.remove_stop_words):\n        return None\n    if (token.is_digit and self.remove_digits):\n        return None\n    if (token.is_oov and self.exclude_oov):\n        return None\n    if (token.pos_ in self.exclude_pos_tags):\n        return None\n    if (token.ent_type_ in self.exclude_entities):\n        return None\n    if self.lemmatize:\n        return token.lemma_\n    if self.lower:\n        return token.lower_\n    return token.orth_", "docstring": "Applies various filtering and processing options on token.\n\nReturns:\nThe processed token. None if filtered.", "source": "codesearchnet"}
{"code": "def print_file_results(file_result):\n    print_results_header(file_result.filepath, file_result.is_valid)\n    for object_result in file_result.object_results:\n        if object_result.warnings:\n            print_warning_results(object_result, 1)\n        if object_result.errors:\n            print_schema_results(object_result, 1)\n    if file_result.fatal:\n        print_fatal_results(file_result.fatal, 1)", "docstring": "Print the results of validating a file.\n\nArgs:\nfile_result: A FileValidationResults instance.", "source": "codesearchnet"}
{"code": "def Update(self, attribute=None):\n    client_id = self.urn.Split()[0]\n    if (attribute == 'CONTAINS'):\n        flow_id = flow.StartAFF4Flow(client_id=client_id, flow_name='ListDirectory', pathspec=self.real_pathspec, notify_to_user=False, token=self.token)\n        return flow_id", "docstring": "Refresh an old attribute.\n\nNote that refreshing the attribute is asynchronous. It does not change\nanything about the current object - you need to reopen the same URN some\ntime later to get fresh data.\n\nAttributes: CONTAINS - Refresh the content of the directory listing.\nArgs:\nattribute: An attribute object as listed above.\n\nReturns:\nThe Flow ID that is pending\n\nRaises:\nIOError: If there has been an error starting the flow.", "source": "codesearchnet"}
{"code": "def _get_file_iterator(self, file_obj):\n    file_obj.seek(0)\n    return iter((lambda : file_obj.read(self.read_bs)), '')", "docstring": "For given `file_obj` return iterator, which will read the file in\n`self.read_bs` chunks.\n\nArgs:\nfile_obj (file): File-like object.\n\nReturn:\niterator: Iterator reading the file-like object in chunks.", "source": "codesearchnet"}
{"code": "def reduce_to_best_decode(metrics, reduce_func):\n  \n  num_videos = metrics.shape[1]\n  \n  \n  mean_across_frames = np.mean(metrics, axis=-1)\n\n  \n  best_decode_ind = reduce_func(mean_across_frames, axis=0)\n  best_metrics = metrics[best_decode_ind, np.arange(num_videos), :]\n  return best_metrics, best_decode_ind", "docstring": "Extracts the best-decode from the metrics according to reduce_func.\n\nArgs:\nmetrics: 3-D numpy array, shape=(num_decodes, num_samples, num_frames)\nreduce_func: callable, np.argmax or np.argmin.\nReturns:\nbest_metrics: 2-D numpy array, shape=(num_samples, num_frames).\nbest_decode_ind: 1-D numpy array, shape=(num_samples,)", "source": "juraj-google-style"}
{"code": "def read_bytes(self, length) -> bytes:\n        \n        value = self.stream.read(length)\n        return value", "docstring": "Read the specified number of bytes from the stream.\n\nArgs:\nlength (int): number of bytes to read.\n\nReturns:\nbytes: `length` number of bytes.", "source": "juraj-google-style"}
{"code": "def calc_update_events(self, asin_to_progress):\n    new_events = []\n    for (asin, new_progress) in asin_to_progress.iteritems():\n        try:\n            book_snapshot = self.get_book(asin)\n        except KeyError:\n            new_events.append(AddEvent(asin))\n        else:\n            if (book_snapshot.status == ReadingStatus.CURRENT):\n                change = (new_progress - book_snapshot.progress)\n                if (change > 0):\n                    new_events.append(ReadEvent(asin, change))\n    return new_events", "docstring": "Calculate and return an iterable of `KindleEvent`s which, when\napplied to the current snapshot, result in the the current snapshot\nreflecting the progress state of the `asin_to_progress` mapping.\n\nFunctionally, this method generates `AddEvent`s and `ReadEvent`s from\nupdated Kindle Library state.\n\nArgs:\nasin_to_progress: A map of book asins to the integral\nrepresentation of progress used in the current snapshot.\n\nReturns:\nA list of Event objects that account for the changes detected in\nthe `asin_to_progress`.", "source": "codesearchnet"}
{"code": "def _validate(cls, message):\n    valid = False\n    if ((('name' in message) and ('value' in message)) or (('id' in message) and ('data' in message))):\n        valid = True\n    return valid", "docstring": "Confirm the validitiy of a given dict as an OpenXC message.\n\nReturns:\n``True`` if the message contains at least a ``name`` and ``value``.", "source": "codesearchnet"}
{"code": "def save(self, data: Union[dict, List[dict]]):\n    raise NotImplementedError()", "docstring": "Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`].\n\nArgs:\ndata (`dict` or list of `dict`): The data to store.", "source": "github-repos"}
{"code": "def transpile(circuits, backend=None, basis_gates=None, coupling_map=None, initial_layout=None, seed_mapper=None, pass_manager=None):\n    warnings.warn('qiskit.transpiler.transpile() has been deprecated and will be removed in the 0.9 release. Use qiskit.compiler.transpile() instead.', DeprecationWarning)\n    return compiler.transpile(circuits=circuits, backend=backend, basis_gates=basis_gates, coupling_map=coupling_map, initial_layout=initial_layout, seed_transpiler=seed_mapper, pass_manager=pass_manager)", "docstring": "transpile one or more circuits.\n\nArgs:\ncircuits (QuantumCircuit or list[QuantumCircuit]): circuits to compile\nbackend (BaseBackend): a backend to compile for\nbasis_gates (list[str]): list of basis gate names supported by the\ntarget. Default: ['u1','u2','u3','cx','id']\ncoupling_map (list): coupling map (perhaps custom) to target in mapping\n\ninitial_layout (Layout or dict or list):\nInitial position of virtual qubits on physical qubits. The final\nlayout is not guaranteed to be the same, as the transpiler may permute\nqubits through swaps or other means.\n\nseed_mapper (int): random seed for the swap_mapper\npass_manager (PassManager): a pass_manager for the transpiler stages\n\nReturns:\nQuantumCircuit or list[QuantumCircuit]: transpiled circuit(s).\n\nRaises:\nTranspilerError: in case of bad inputs to transpiler or errors in passes", "source": "codesearchnet"}
{"code": "def get_user(self, user_id):\n        \n        try:\n            return User.objects.get(id=user_id)\n        except User.DoesNotExist:\n            return None", "docstring": "Returns a user, given his or her user id. Required for a custom authentication backend.\nArgs:\nuser_id\nThe user id of the user to fetch.\nReturns:\nUser or None", "source": "juraj-google-style"}
{"code": "class BeamJarExpansionService(JavaJarExpansionService):\n\n    def __init__(self, gradle_target, extra_args=None, gradle_appendix=None, classpath=None, append_args=None):\n        path_to_jar = subprocess_server.JavaJarServer.path_to_beam_jar(gradle_target, gradle_appendix)\n        self.gradle_target = gradle_target\n        super().__init__(path_to_jar, extra_args, classpath=classpath, append_args=append_args)", "docstring": "An expansion service based on an Beam Java Jar file.\n\nAttempts to use a locally-built copy of the jar based on the gradle target,\nif it exists, otherwise attempts to download and cache the released artifact\ncorresponding to this version of Beam from the apache maven repository.\n\nArgs:\ngradle_target: Beam Gradle target for building an executable jar which will\nbe used to start the expansion service.\nextra_args: arguments to be provided when starting up the\nexpansion service using the jar file. These arguments will replace the\ndefault arguments.\ngradle_appendix: Gradle appendix of the artifact.\nclasspath: Additional dependencies to be added to the classpath.\nappend_args: arguments to be provided when starting up the\nexpansion service using the jar file. These arguments will be appended to\nthe default arguments.", "source": "github-repos"}
{"code": "def make_client(servers: Sequence[str], *args, **kwargs) -> GMatrixClient:\n    \n    if len(servers) > 1:\n        sorted_servers = [\n            server_url\n            for (server_url, _) in sort_servers_closest(servers)\n        ]\n        log.info(\n            'Automatically selecting matrix homeserver based on RTT',\n            sorted_servers=sorted_servers,\n        )\n    elif len(servers) == 1:\n        sorted_servers = servers\n    else:\n        raise TransportError('No valid servers list given')\n\n    last_ex = None\n    for server_url in sorted_servers:\n        server_url: str = server_url\n        client = GMatrixClient(server_url, *args, **kwargs)\n        try:\n            client.api._send('GET', '/versions', api_path='/_matrix/client')\n        except MatrixError as ex:\n            log.warning('Selected server not usable', server_url=server_url, _exception=ex)\n            last_ex = ex\n        else:\n            break\n    else:\n        raise TransportError(\n            'Unable to find a reachable Matrix server. Please check your network connectivity.',\n        ) from last_ex\n    return client", "docstring": "Given a list of possible servers, chooses the closest available and create a GMatrixClient\n\nParams:\nservers: list of servers urls, with scheme (http or https)\nRest of args and kwargs are forwarded to GMatrixClient constructor\nReturns:\nGMatrixClient instance for one of the available servers", "source": "juraj-google-style"}
{"code": "def set_logging_levels(remote=None, local=None):\n    \n\n    logging_options = ['emergency',\n                       'alert',\n                       'critical',\n                       'error',\n                       'warning',\n                       'notice',\n                       'informational',\n                       'debug']\n\n    query = \"\"\n\n    if remote:\n        if remote in logging_options:\n            query += ' remoteSeverity=\"{0}\"'.format(remote)\n        else:\n            raise salt.exceptions.CommandExecutionError(\"Remote Severity option is not valid.\")\n\n    if local:\n        if local in logging_options:\n            query += ' localSeverity=\"{0}\"'.format(local)\n        else:\n            raise salt.exceptions.CommandExecutionError(\"Local Severity option is not valid.\")\n\n    dn = \"sys/svc-ext/syslog\"\n    inconfig = .format(query)\n\n    ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False)\n\n    return ret", "docstring": "Sets the logging levels of the CIMC devices. The logging levels must match\nthe following options: emergency, alert, critical, error, warning, notice,\ninformational, debug.\n\n.. versionadded:: 2019.2.0\n\nArgs:\nremote(str): The logging level for SYSLOG logs.\n\nlocal(str): The logging level for the local device.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' cimc.set_logging_levels remote=error local=notice", "source": "juraj-google-style"}
{"code": "def _add_unique_metric_name(self, metric_name, metric_fn, output_index):\n    if len(self.output_names) > 1:\n        if not getattr(metric_fn, '_from_serialized', False):\n            metric_name = '%s_%s' % (self.output_names[output_index], metric_name)\n    j = 1\n    base_metric_name = metric_name\n    while metric_name in self.metrics_names:\n        metric_name = '%s_%d' % (base_metric_name, j)\n        j += 1\n    return metric_name", "docstring": "Makes the metric name unique.\n\nIf there are multiple outputs for which the metrics are calculated, the\nmetric names have to be made unique by appending an integer.\n\nArgs:\nmetric_name: Metric name that corresponds to the metric specified by the\nuser. For example: 'acc'.\nmetric_fn: The Metric object.\noutput_index: The index of the model output for which the metric name is\nbeing added.\n\nReturns:\nstring, name of the model's unique metric name", "source": "github-repos"}
{"code": "def _patch_expand_paths(self, settings, name, value):\n    return [self._patch_expand_path(settings, name, item) for item in value]", "docstring": "Apply ``SettingsPostProcessor._patch_expand_path`` to each element in\nlist.\n\nArgs:\nsettings (dict): Current settings.\nname (str): Setting name.\nvalue (list): List of paths to patch.\n\nReturns:\nlist: Patched path list to an absolute path.", "source": "codesearchnet"}
{"code": "def parse(path):\n\n    def paired(iterable):\n        's -> (s0, s1), (s2, s3), (s4, s5), ...'\n        cursor = iter(iterable)\n        return zip(cursor, cursor)\n\n    def unwrap_if_sexp_symbol(datum):\n        \"Convert Symbol(':key') to ':key' (Symbol isn't hashable for dict keys).\\n            \"\n        return (datum.value() if isinstance(datum, sexpdata.Symbol) else datum)\n\n    def sexp2dict(sexps):\n        'Transforms a nested list structure from sexpdata to dict.'\n        newdict = {}\n        for (key, value) in paired(sexps):\n            key = str(unwrap_if_sexp_symbol(key)).lstrip(':')\n            if (isinstance(value, list) and value):\n                if isinstance(value[0], list):\n                    newdict[key] = [sexp2dict(val) for val in value]\n                elif isinstance(value[0], sexpdata.Symbol):\n                    newdict[key] = sexp2dict(value)\n                else:\n                    newdict[key] = value\n            else:\n                newdict[key] = value\n        return newdict\n    conf = sexpdata.loads(Util.read_file(path))\n    return sexp2dict(conf)", "docstring": "Parse an ``.ensime`` config file from S-expressions.\n\nArgs:\npath (str): Path of an ``.ensime`` file to parse.\n\nReturns:\ndict: Configuration values with string keys.", "source": "codesearchnet"}
{"code": "def auto_convert_string_cell(flagable, cell_str, position, worksheet, flags, units, parens_as_neg=True):\n    conversion = cell_str.strip()\n    if re.search(allregex.control_wrapping_regex, cell_str):\n        stripped_cell = cell_str.strip()\n        mod_cell_str = stripped_cell[1:][:(- 1)].strip()\n        neg_mult = False\n        if ((stripped_cell[0] == '(') and (stripped_cell[(- 1)] == ')') and re.search(allregex.contains_numerical_regex, mod_cell_str)):\n            neg_mult = True\n        flagable.flag_change(flags, 'interpreted', position, worksheet, flagable.FLAGS['removed-wrapping'])\n        converted_value = auto_convert_cell(flagable, mod_cell_str, position, worksheet, flags, units)\n        neg_mult = (neg_mult and check_cell_type(converted_value, get_cell_type(0)))\n        if (neg_mult and parens_as_neg):\n            flagable.flag_change(flags, 'interpreted', position, worksheet, flagable.FLAGS['converted-wrapping-to-neg'])\n        return ((- converted_value) if neg_mult else converted_value)\n    elif re.search(allregex.contains_numerical_regex, cell_str):\n        conversion = auto_convert_numeric_string_cell(flagable, conversion, position, worksheet, flags, units)\n    elif re.search(allregex.bool_regex, cell_str):\n        flagable.flag_change(flags, 'interpreted', position, worksheet, flagable.FLAGS['bool-to-int'])\n        conversion = (1 if re.search(allregex.true_bool_regex, cell_str) else 0)\n    return conversion", "docstring": "Handles the string case of cell and attempts auto-conversion\nfor auto_convert_cell.\n\nArgs:\nparens_as_neg: Converts numerics surrounded by parens to negative values", "source": "codesearchnet"}
{"code": "def download_file_from_google_drive(file_id, root, filename=None, md5=None):\n    \n    \n    import requests\n    url = \"https:\n\n    root = os.path.expanduser(root)\n    if not filename:\n        filename = file_id\n    fpath = os.path.join(root, filename)\n\n    makedir_exist_ok(root)\n\n    if os.path.isfile(fpath) and check_integrity(fpath, md5):\n        print('Using downloaded and verified file: ' + fpath)\n    else:\n        session = requests.Session()\n\n        response = session.get(url, params={'id': file_id}, stream=True)\n        token = _get_confirm_token(response)\n\n        if token:\n            params = {'id': file_id, 'confirm': token}\n            response = session.get(url, params=params, stream=True)\n\n        _save_response_content(response, fpath)", "docstring": "Download a Google Drive file from  and place it in root.\n\nArgs:\nfile_id (str): id of file to be downloaded\nroot (str): Directory to place downloaded file in\nfilename (str, optional): Name to save the file under. If None, use the id of the file.\nmd5 (str, optional): MD5 checksum of the download. If None, do not check", "source": "juraj-google-style"}
{"code": "def Copy(self, name=None):\n        \n        if name is None:\n            name = self.name\n        return Cdf(list(self.xs), list(self.ps), name)", "docstring": "Returns a copy of this Cdf.\n\nArgs:\nname: string name for the new Cdf", "source": "juraj-google-style"}
{"code": "def setup(self,\n            file_path_list,\n            reason, grr_server_url, grr_username, grr_password, approvers=None,\n            verify=True):\n    \n    super(GRRHuntFileCollector, self).setup(\n        reason, grr_server_url, grr_username, grr_password,\n        approvers=approvers, verify=verify)\n    self.file_path_list = [item.strip() for item\n                           in file_path_list.strip().split(',')]\n    if not file_path_list:\n      self.state.add_error('Files must be specified for hunts', critical=True)", "docstring": "Initializes a GRR Hunt file collector.\n\nArgs:\nfile_path_list: comma-separated list of file paths.\nreason: justification for GRR access.\ngrr_server_url: GRR server URL.\ngrr_username: GRR username.\ngrr_password: GRR password.\napprovers: comma-separated list of GRR approval recipients.\nverify: boolean, whether to verify the GRR server's x509 certificate.", "source": "juraj-google-style"}
{"code": "def non_serializable():\n\n    def _apply_fn(dataset):\n        \n        return _NonSerializableDataset(dataset)\n    return _apply_fn", "docstring": "A non-serializable identity transformation.\n\nReturns:\nA `Dataset` transformation function, which can be passed to\n`tf.data.Dataset.apply`.", "source": "github-repos"}
{"code": "def sym_init_args(self) -> pg_dict.Dict:\n    return self._sym_attributes", "docstring": "Returns the symbolic attributes which are also the `__init__` args.\n\nReturns:\nA symbolic Dict as evaluated symbolic attributes, meaning that all\n``pg.ContextValue`` will be resolved.", "source": "github-repos"}
{"code": "def edge_length_sum(self, terminal=True, internal=True):\n        \n        if not isinstance(terminal, bool):\n            raise TypeError(\"leaves must be a bool\")\n        if not isinstance(internal, bool):\n            raise TypeError(\"internal must be a bool\")\n        return sum(node.edge_length for node in self.traverse_preorder() if node.edge_length is not None and ((terminal and node.is_leaf()) or (internal and not node.is_leaf())))", "docstring": "Compute the sum of all selected edge lengths in this ``Tree``\n\nArgs:\n``terminal`` (``bool``): ``True`` to include terminal branches, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal branches, otherwise ``False``\n\nReturns:\n``float``: Sum of all selected edge lengths in this ``Tree``", "source": "juraj-google-style"}
{"code": "def set_route_name(self, ip_dest, next_hop, **kwargs):\n    return self._set_route(ip_dest, next_hop, **kwargs)", "docstring": "Set the route_name value for the specified route\n\nArgs:\nip_dest (string): The ip address of the destination in the\nform of A.B.C.D/E\nnext_hop (string): The next hop interface or ip address\n**kwargs['next_hop_ip'] (string): The next hop address on\ndestination interface\n**kwargs['distance'] (string): Administrative distance for this\nroute\n**kwargs['tag'] (string): Route tag\n**kwargs['route_name'] (string): Route name\n\nReturns:\nTrue if the operation succeeds, otherwise False.\n\nNotes:\nAny existing tag value must be included in call to\nset_route_name, otherwise the tag will be reset\nby the call to EOS.", "source": "codesearchnet"}
{"code": "def _get_available_gpus():\n    if ops.executing_eagerly_outside_functions():\n        return [d.name for d in config.list_logical_devices('GPU')]\n    global _LOCAL_DEVICES\n    if _LOCAL_DEVICES is None:\n        _LOCAL_DEVICES = get_session().list_devices()\n    return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU']", "docstring": "Get a list of available GPU devices (formatted as strings).\n\nReturns:\nA list of available GPU devices.", "source": "github-repos"}
{"code": "def auto_cast_partition_dtype():\n    return False", "docstring": "Whether incompatible row-partitioning dtypes should be auto-converted.\n\nIf true, then operations that combine RaggedTensors but have different\nrow-partitioning tensor dtypes will be automatically cast to a\ncompatible dtype (`tf.int64`).  If false, then such operations will result\nin an error.\n\nReturns:\n`bool`", "source": "github-repos"}
{"code": "def search(self, **kwargs):\n    path = self._get_path('search')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get movies that match the search query string from the API.\n\nArgs:\nq (optional): plain text search query; remember to URI encode\npage_limit (optional): number of search results to show per page,\ndefault=30\npage (optional): results page number, default=1\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def get_lock_config(self, device_label):\n        \n        response = None\n        try:\n            response = requests.get(\n                urls.lockconfig(self._giid, device_label),\n                headers={\n                    'Accept': 'application/json, text/javascript, */*; q=0.01',\n                    'Cookie': 'vid={}'.format(self._vid)})\n        except requests.exceptions.RequestException as ex:\n            raise RequestError(ex)\n        _validate_response(response)\n        return json.loads(response.text)", "docstring": "Get lock configuration\n\nArgs:\ndevice_label (str): device label of lock", "source": "juraj-google-style"}
{"code": "def matches_function(function: _evaluation.MatchesFunction, operand_result: Optional[_sql_data_types.Select], params_result: Collection[_sql_data_types.StandardSqlExpression]) -> _sql_data_types.Select:\n    del function\n    if operand_result is None:\n        raise ValueError('matches() cannot be called without an operand.')\n    sql_alias = 'matches_'\n    sql_data_type = _sql_data_types.Boolean\n    if not params_result:\n        return _sql_data_types.Select(select_part=_sql_data_types.RawExpression('NULL', _sql_alias=sql_alias, _sql_data_type=sql_data_type), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK)\n    else:\n        param_to_evaluate = [param for param in params_result]\n        return dataclasses.replace(operand_result, select_part=_sql_data_types.FunctionCall(name='REGEXP', params=(operand_result.select_part, param_to_evaluate[0]), _sql_alias=sql_alias, _sql_data_type=sql_data_type))", "docstring": "Generates Spark SQL representing the FHIRPath matches() function.\n\nReturns `TRUE` if the operand matches the regex in the given param.\n\nThis function takes one param (`pattern`) in addition to the operand. If\n`pattern` is not provided the matches function returns the empty set which in\nthis function translates to NULL.\n\nThe returned SQL expression is a table of cardinality 1, whose value is of\n`BOOL` type. By default, `_MatchesFunction` will return `FALSE` if given no\noperand.\n\nReturns an error In the event that the input collection contains multiple\nitems.\n\nArgs:\nfunction: The FHIRPath AST `MatchesFunction` node\noperand_result: The expression which is being evaluated\nparams_result: The parameter passed in to function\n\nReturns:\nA compiled Spark SQL expression.\n\nRaises:\nValueError: When the function is called without an operand", "source": "github-repos"}
{"code": "def _replace_variables_by_constants(converter_data):\n    input_graph = _GraphDef(converter_data.graph_def)\n    for tensor_name, tensor_data in converter_data.tensor_data.items():\n        input_graph.nodes[tensor_name].convert_variable_to_constant(None, tensor_data)\n    converted_graph = input_graph.converted_self().graph_def\n    converted_input_indices = {t.index for t in converter_data.tensor_data.values() if t.index is not None}\n    return (converted_graph, converted_input_indices)", "docstring": "Replaces variables by constants on a given graph.\n\nGiven a _ConverterData instance with converted variables in its tensor_data\nfield, create a new graph where the respective variables are replaced with the\nconverted constants.\n\nArgs:\nconverter_data: A pre-populated _ConverterData instance.\n\nReturns:\nThe converted graph.", "source": "github-repos"}
{"code": "def sharded_filename(self, filename_tensor, shard, num_shards):\n    return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards)", "docstring": "Append sharding information to a filename.\n\nArgs:\nfilename_tensor: A string tensor.\nshard: Integer.  The shard for the filename.\nnum_shards: An int Tensor for the number of shards.\n\nReturns:\nA string tensor.", "source": "github-repos"}
{"code": "def _FlagIsRegistered(self, flag_obj):\n    \n    flag_dict = self.FlagDict()\n    \n    name = flag_obj.name\n    if flag_dict.get(name, None) == flag_obj:\n      return True\n    \n    short_name = flag_obj.short_name\n    if (short_name is not None and\n        flag_dict.get(short_name, None) == flag_obj):\n      return True\n    return False", "docstring": "Checks whether a Flag object is registered under long name or short name.\n\nArgs:\nflag_obj: A Flag object.\n\nReturns:\nA boolean: True iff flag_obj is registered under long name or short name.", "source": "juraj-google-style"}
{"code": "def _install_signal_handler(self, signal_number, signal_name):\n    \n    old_signal_handler = None  \n    def handler(handled_signal_number, frame):\n      \n      \n      signal.signal(signal_number, signal.SIG_DFL)\n      sys.stderr.write(\"TensorBoard caught %s; exiting...\\n\" % signal_name)\n      \n      \n      if old_signal_handler not in (signal.SIG_IGN, signal.SIG_DFL):\n        old_signal_handler(handled_signal_number, frame)\n      sys.exit(0)\n    old_signal_handler = signal.signal(signal_number, handler)", "docstring": "Set a signal handler to gracefully exit on the given signal.\n\nWhen this process receives the given signal, it will run `atexit`\nhandlers and then exit with `0`.\n\nArgs:\nsignal_number: The numeric code for the signal to handle, like\n`signal.SIGTERM`.\nsignal_name: The human-readable signal name.", "source": "juraj-google-style"}
{"code": "def log_softmax(x, reduced_dim, extra_logit=None, name=None):\n  \n  return x - reduce_logsumexp(\n      x, reduced_dim, extra_logit=extra_logit, name=name)", "docstring": "log(softmax(x)).\n\nArgs:\nx: a Tensor whose shape contains vocab_dim\nreduced_dim: a Dimension\nextra_logit: an optional Tensor broadcastable to (x.shape - reduced_dim)\nname: an optional string\n\nReturns:\na Tensor with the same shape as x", "source": "juraj-google-style"}
{"code": "def set_marked(self, name: str, marked: bool = False,\n                   unmarked: bool = False) -> None:\n        \n        if marked:\n            self._marked[name] = True\n        elif unmarked:\n            self._marked[name] = False\n        else:\n            self._marked.pop(name, None)", "docstring": "Add or remove the ``\\\\Marked`` and ``\\\\Unmarked`` mailbox\nattributes.\n\nArgs:\nname: The name of the mailbox.\nmarked: True if the ``\\\\Marked`` attribute should be added.\nunmarked: True if the ``\\\\Unmarked`` attribute should be added.", "source": "juraj-google-style"}
{"code": "def export_as_package(self, package_path, cv_source):\n    if os.path.exists(package_path):\n        raise exceptions.UserError('{} already exists'.format(package_path))\n    package_name = os.path.basename(os.path.normpath(package_path))\n    os.makedirs(package_path)\n    with open(os.path.join(package_path, '__init__.py'), 'wb') as f:\n        f.write('from {}.builder import xcessiv_ensemble'.format(package_name).encode('utf8'))\n    os.makedirs(os.path.join(package_path, 'baselearners'))\n    open(os.path.join(package_path, 'baselearners', '__init__.py'), 'a').close()\n    for (idx, base_learner) in enumerate(self.base_learners):\n        base_learner.export_as_file(os.path.join(package_path, 'baselearners', ('baselearner' + str(idx))))\n    self.base_learner_origin.export_as_file(os.path.join(package_path, 'metalearner'), self.secondary_learner_hyperparameters)\n    with open(os.path.join(package_path, 'cv.py'), 'wb') as f:\n        f.write(cv_source.encode('utf8'))\n    ensemble_source = ''\n    stacker_file_loc = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'stacker.py')\n    with open(stacker_file_loc) as f:\n        ensemble_source += f.read()\n    ensemble_source += '\\n\\n    def {}(self, X):\\n        return self._process_using_meta_feature_generator(X, \"{}\")\\n\\n'.format(self.base_learner_origin.meta_feature_generator, self.base_learner_origin.meta_feature_generator)\n    with open(os.path.join(package_path, 'stacker.py'), 'wb') as f:\n        f.write(ensemble_source.encode('utf8'))\n    builder_source = ''\n    for (idx, base_learner) in enumerate(self.base_learners):\n        builder_source += 'from {}.baselearners import baselearner{}\\n'.format(package_name, idx)\n    builder_source += 'from {}.cv import return_splits_iterable\\n'.format(package_name)\n    builder_source += 'from {} import metalearner\\n'.format(package_name)\n    builder_source += 'from {}.stacker import XcessivStackedEnsemble\\n'.format(package_name)\n    builder_source += '\\nbase_learners = [\\n'\n    for (idx, base_learner) in enumerate(self.base_learners):\n        builder_source += '    baselearner{}.base_learner,\\n'.format(idx)\n    builder_source += ']\\n'\n    builder_source += '\\nmeta_feature_generators = [\\n'\n    for (idx, base_learner) in enumerate(self.base_learners):\n        builder_source += '    baselearner{}.meta_feature_generator,\\n'.format(idx)\n    builder_source += ']\\n'\n    builder_source += '\\nxcessiv_ensemble = XcessivStackedEnsemble(base_learners=base_learners, meta_feature_generators=meta_feature_generators, secondary_learner=metalearner.base_learner, cv_function=return_splits_iterable)\\n'\n    with open(os.path.join(package_path, 'builder.py'), 'wb') as f:\n        f.write(builder_source.encode('utf8'))", "docstring": "Exports the ensemble as a Python package and saves it to `package_path`.\n\nArgs:\npackage_path (str, unicode): Absolute/local path of place to save package in\n\ncv_source (str, unicode): String containing actual code for base learner\ncross-validation used to generate secondary meta-features.\n\nRaises:\nexceptions.UserError: If os.path.join(path, name) already exists.", "source": "codesearchnet"}
{"code": "def connections(self, origin, destination, dt=datetime.now(), only_direct=False):\n    query = {'S': origin, 'Z': destination, 'date': dt.strftime('%d.%m.%y'), 'time': dt.strftime('%H:%M'), 'start': 1, 'REQ0JourneyProduct_opt0': (1 if only_direct else 0)}\n    rsp = requests.get('http:\n    return parse_connections(rsp.text)", "docstring": "Find connections between two stations\n\nArgs:\norigin (str): origin station\ndestination (str): destination station\ndt (datetime): date and time for query\nonly_direct (bool): only direct connections", "source": "codesearchnet"}
{"code": "def get_all(self, uids: Iterable[int]) -> Mapping[(int, Record)]:\n    return {uid: self._records[uid] for uid in uids if (uid in self._records)}", "docstring": "Get records by a set of UIDs.\n\nArgs:\nuids: The message UIDs.", "source": "codesearchnet"}
{"code": "def get_phrases_from_posmap(posmaps, input_ids):\n    left_idx = 0\n    right_idx = posmaps.shape[-1] - 1\n    posmaps = posmaps.clone()\n    posmaps[:, 0:left_idx + 1] = False\n    posmaps[:, right_idx:] = False\n    token_ids = []\n    for posmap in posmaps:\n        non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist()\n        token_ids.append([input_ids[i] for i in non_zero_idx])\n    return token_ids", "docstring": "Get token ids of phrases from posmaps and input_ids.\n\nArgs:\nposmaps (`torch.BoolTensor` of shape `(num_boxes, hidden_size)`):\nA boolean tensor of text-thresholded logits related to the detected bounding boxes.\ninput_ids (`torch.LongTensor`) of shape `(sequence_length, )`):\nA tensor of token ids.", "source": "github-repos"}
{"code": "def _call_location():\n    frame = tf_inspect.currentframe()\n    assert frame.f_back.f_code.co_name == '_tfmw_add_deprecation_warning', 'This function should be called directly from _tfmw_add_deprecation_warning, as the caller is identified heuristically by chopping off the top stack frames.'\n    for _ in range(3):\n        parent = frame.f_back\n        if parent is None:\n            break\n        frame = parent\n    return '{}:{}'.format(frame.f_code.co_filename, frame.f_lineno)", "docstring": "Extracts the caller filename and line number as a string.\n\nReturns:\nA string describing the caller source location.", "source": "github-repos"}
{"code": "def batch_dense(inputs, units, activation=None, kernel_initializer=None, reuse=None, name=None):\n    inputs_shape = shape_list(inputs)\n    if (len(inputs_shape) != 3):\n        raise ValueError('inputs must have 3 dimensions')\n    batch = inputs_shape[0]\n    input_units = inputs_shape[2]\n    if ((not isinstance(batch, int)) or (not isinstance(input_units, int))):\n        raise ValueError('inputs must have static dimensions 0 and 2')\n    with tf.variable_scope(name, default_name='batch_dense', values=[inputs], reuse=reuse, dtype=inputs.dtype):\n        if (kernel_initializer is None):\n            kernel_initializer = tf.random_normal_initializer(stddev=(input_units ** (- 0.5)))\n        w = tf.get_variable('w', [batch, input_units, units], initializer=kernel_initializer, dtype=inputs.dtype)\n        y = tf.matmul(inputs, w)\n        if (activation is not None):\n            y = activation(y)\n        return y", "docstring": "Multiply a batch of input matrices by a batch of parameter matrices.\n\nEach input matrix is multiplied by the corresponding parameter matrix.\n\nThis is useful in a mixture-of-experts where the batch represents different\nexperts with different inputs.\n\nArgs:\ninputs: a Tensor with shape [batch, length, input_units]\nunits: an integer\nactivation: an optional activation function to apply to the output\nkernel_initializer: an optional initializer\nreuse: whether to reuse the varaible scope\nname: an optional string\n\nReturns:\na Tensor with shape [batch, length, units]\n\nRaises:\nValueError: if the \"batch\" or \"input_units\" dimensions of inputs are not\nstatically known.", "source": "codesearchnet"}
{"code": "async def loadCoreModule(self, ctor, conf=None):\n    if (conf is None):\n        conf = {}\n    modu = self._loadCoreModule(ctor, conf=conf)\n    try:\n        (await s_coro.ornot(modu.preCoreModule))\n    except asyncio.CancelledError:\n        raise\n    except Exception:\n        logger.exception(f'module preCoreModule failed: {ctor}')\n        self.modules.pop(ctor, None)\n        return\n    mdefs = modu.getModelDefs()\n    self.model.addDataModels(mdefs)\n    cmds = modu.getStormCmds()\n    [self.addStormCmd(c) for c in cmds]\n    try:\n        (await s_coro.ornot(modu.initCoreModule))\n    except asyncio.CancelledError:\n        raise\n    except Exception:\n        logger.exception(f'module initCoreModule failed: {ctor}')\n        self.modules.pop(ctor, None)\n        return\n    (await self.fire('core:module:load', module=ctor))\n    return modu", "docstring": "Load a single cortex module with the given ctor and conf.\n\nArgs:\nctor (str): The python module class path\nconf (dict):Config dictionary for the module", "source": "codesearchnet"}
{"code": "def update_from_group(self, data=None, timeout=(- 1)):\n    uri = '{}/updateFromGroup'.format(self.data['uri'])\n    return self._helper.update(data, uri, timeout=timeout)", "docstring": "Use this action to make a logical enclosure consistent with the enclosure group when the logical enclosure is\nin the Inconsistent state.\n\nArgs:\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Logical enclosure.", "source": "codesearchnet"}
{"code": "def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:\n    if not os.path.isdir(save_directory):\n        logger.error(f'Vocabulary path ({save_directory}) should be a directory')\n        return\n    vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])\n    with open(vocab_file, 'w', encoding='utf-8') as f:\n        f.write(json.dumps(self.get_vocab(), ensure_ascii=False))\n    return (vocab_file,)", "docstring": "Save the vocabulary and special tokens file to a directory.\n\nArgs:\nsave_directory (`str`):\nThe directory in which to save the vocabulary.\n\nReturns:\n`Tuple(str)`: Paths to the files saved.", "source": "github-repos"}
{"code": "def _make_shred(self, c, name, feature_extractors, sheet_name):\n        \n        height, width, channels = self.orig_img.shape\n\n        \n        r_x, r_y, r_w, r_h = cv2.boundingRect(c)\n\n        \n        epsilon = 0.01 * cv2.arcLength(c, True)\n        simplified_contour = cv2.approxPolyDP(c, epsilon, True)\n\n        \n        if self.px_to_mm(r_w) <= 3 or self.px_to_mm(r_h) <= 3:\n            print(\"Skipping piece \n                name, r_w, r_h))\n            return None\n\n        if self.px_to_mm(r_w) >= 100 and self.px_to_mm(r_h) >= 100:\n            print(\"Skipping piece \n                name, r_w, r_h))\n            return None\n\n        \n        \n        box_center, bbox, angle = cv2.minAreaRect(c)\n\n        \n        if bbox[0] > bbox[1]:\n            angle += 90\n            bbox = (bbox[1], bbox[0])\n\n        if bbox[1] / float(bbox[0]) > 70:\n            print(\"Skipping piece \n            return None\n\n        \n        \n        y1 = math.floor(box_center[1] - bbox[1] / 2)\n        x1 = math.floor(box_center[0] - bbox[0] / 2)\n        bbox = tuple(map(int, map(math.ceil, bbox)))\n\n        \n        piece_mask = np.zeros([height, width, 1], dtype=np.uint8)\n        cv2.drawContours(piece_mask, [c], -1, 255, cv2.FILLED)\n\n        \n        img_crp = self.orig_img[r_y:r_y + r_h, r_x:r_x + r_w]\n        piece_in_context = self.save_image(\n            \"pieces/%s_ctx\" % name,\n            self.orig_img[max(r_y - 10, 0):r_y + r_h + 10,\n                          max(r_x - 10, 0):r_x + r_w + 10])\n\n        mask = piece_mask[r_y:r_y + r_h, r_x:r_x + r_w]\n        img_roi = cv2.bitwise_and(img_crp, img_crp, mask=mask)\n\n        \n        img_roi = cv2.cvtColor(img_roi, cv2.COLOR_BGR2BGRA)\n        img_roi[:, :, 3] = mask[:, :, 0]\n\n        \n        \n        \n        \n        M = cv2.getRotationMatrix2D((box_center[0] - r_x,\n                                     box_center[1] - r_y), angle, 1)\n\n        \n        \n        \n        M[0][2] += r_x - x1\n        M[1][2] += r_y - y1\n\n        \n        img_roi = cv2.warpAffine(img_roi, M, bbox)\n        piece_fname = self.save_image(\"pieces/%s\" % name, img_roi, \"png\")\n\n        \n        \n        \n        _, _, _, mask = cv2.split(img_roi)\n\n        _, contours, _ = cv2.findContours(mask.copy(), cv2.RETR_TREE,\n                                          cv2.CHAIN_APPROX_SIMPLE)\n\n        if len(contours) != 1:\n            print(\"Piece \n\n        cnt = contours[0]\n\n        features_fname = self.save_image(\"pieces/%s_mask\" % name, mask, \"png\")\n\n        base_features = {\n            \n            \"on_sheet_x\": r_x,\n            \"on_sheet_y\": r_y,\n            \"on_sheet_width\": r_w,\n            \"on_sheet_height\": r_h,\n            \"on_sheet_angle\": angle,\n            \"width\": img_roi.shape[1],\n            \"height\": img_roi.shape[0],\n        }\n\n        tags_suggestions = []\n        for feat in feature_extractors:\n            fts, tags = feat.get_info(img_roi, cnt, name)\n            base_features.update(fts)\n            tags_suggestions += tags\n\n        if tags_suggestions:\n            print(name, tags_suggestions)\n\n        return Shred(\n            contour=c,\n            features=base_features,\n            features_fname=features_fname,\n            img_roi=img_roi,\n            name=name,\n            piece_fname=piece_fname,\n            piece_in_context_fname=piece_in_context,\n            sheet=sheet_name,\n            simplified_contour=simplified_contour,\n            tags_suggestions=tags_suggestions,\n        )", "docstring": "Creates a Shred instances from a given contour.\n\nArgs:\nc: cv2 contour object.\nname: string shred name within a sheet.\nfeature_extractors: iterable of AbstractShredFeature instances.\n\nReturns:\nA new Shred instance or None on failure.", "source": "juraj-google-style"}
{"code": "def get_lb_nat_rule(access_token, subscription_id, resource_group, lb_name, rule_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/loadBalancers/', lb_name, '/inboundNatRules/', rule_name, '?api-version=', NETWORK_API])\n    return do_get(endpoint, access_token)", "docstring": "Get details about a load balancer inbound NAT rule.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nlb_name (str): Name of the load balancer.\nrule_name (str): Name of the NAT rule.\n\nReturns:\nHTTP response. JSON body of rule.", "source": "codesearchnet"}
{"code": "def process_entry(self, entry):\n    try:\n        corrections = self.get_corrections_dict(entry)\n    except CompatibilityError:\n        return None\n    entry.correction = sum(corrections.values())\n    return entry", "docstring": "Process a single entry with the chosen Corrections.\n\nArgs:\nentry: A ComputedEntry object.\n\nReturns:\nAn adjusted entry if entry is compatible, otherwise None is\nreturned.", "source": "codesearchnet"}
{"code": "def requires_open_handle(method):\n\n    @functools.wraps(method)\n    def wrapper_requiring_open_handle(self, *args, **kwargs):\n        'The wrapper to be returned.'\n        if self.is_closed():\n            raise usb_exceptions.HandleClosedError()\n        return method(self, *args, **kwargs)\n    return wrapper_requiring_open_handle", "docstring": "Decorator to ensure a handle is open for certain methods.\n\nSubclasses should decorate their Read() and Write() with this rather than\nchecking their own internal state, keeping all \"is this handle open\" logic\nin is_closed().\n\nArgs:\nmethod: A class method on a subclass of UsbHandle\n\nRaises:\nHandleClosedError: If this handle has been closed.\n\nReturns:\nA wrapper around method that ensures the handle is open before calling through\nto the wrapped method.", "source": "codesearchnet"}
{"code": "def set_files(self, files_downloaded, files_failed):\n        \n        self.files_downloaded = files_downloaded\n        self.files_failed = files_failed\n        self.__record_progress(Status.GET_FILE_DIFF)", "docstring": "set_files: records progress from downloading files\nArgs:\nfiles_downloaded ([str]): list of files that have been downloaded\nfiles_failed ([str]): list of files that failed to download\nReturns: None", "source": "juraj-google-style"}
{"code": "def keys_of_type_exist(self, *keys):\n    keys_exist = [(key, (key in self.keys()), expected_type) for (key, expected_type) in keys]\n    return tuple((ContextItemInfo(key=k[0], key_in_context=k[1], expected_type=k[2], is_expected_type=(isinstance(self[k[0]], k[2]) if k[1] else None), has_value=(k[1] and (not (self[k[0]] is None)))) for k in keys_exist))", "docstring": "Check if keys exist in context and if types are as expected.\n\nArgs:\n*keys: *args for keys to check in context.\nEach arg is a tuple (str, type)\n\nReturns:\nTuple of namedtuple ContextItemInfo, same order as *keys.\nContextItemInfo(key,\nkey_in_context,\nexpected_type,\nis_expected_type)\n\nRemember if there is only one key in keys, the return assignment\nneeds an extra comma to remind python that it's a tuple:\n# one\na, = context.keys_of_type_exist('a')\n# > 1\na, b = context.keys_of_type_exist('a', 'b')", "source": "codesearchnet"}
{"code": "def get_frame(self, frame_id):\n    if ((frame_id < 0) or (frame_id >= self._frame_cnt)):\n        raise IndexError('\"frame_id\" must be between 0 and {}'.format((self._frame_cnt - 1)))\n    if (frame_id == self._position):\n        return self.read()\n    if self._cache:\n        img = self._cache.get(frame_id)\n        if (img is not None):\n            self._position = (frame_id + 1)\n            return img\n    self._set_real_position(frame_id)\n    (ret, img) = self._vcap.read()\n    if ret:\n        if self._cache:\n            self._cache.put(self._position, img)\n        self._position += 1\n    return img", "docstring": "Get frame by index.\n\nArgs:\nframe_id (int): Index of the expected frame, 0-based.\n\nReturns:\nndarray or None: Return the frame if successful, otherwise None.", "source": "codesearchnet"}
{"code": "def from_dir(cls, top, exts=None, exclude_dirs='_*'):\n    pseudos = []\n    if (exts == 'all_files'):\n        for f in [os.path.join(top, fn) for fn in os.listdir(top)]:\n            if os.path.isfile(f):\n                try:\n                    p = Pseudo.from_file(f)\n                    if p:\n                        pseudos.append(p)\n                    else:\n                        logger.info(('Skipping file %s' % f))\n                except:\n                    logger.info(('Skipping file %s' % f))\n        if (not pseudos):\n            logger.warning(('No pseudopotentials parsed from folder %s' % top))\n            return None\n        logger.info(('Creating PseudoTable with %i pseudopotentials' % len(pseudos)))\n    else:\n        if (exts is None):\n            exts = ('psp8',)\n        for p in find_exts(top, exts, exclude_dirs=exclude_dirs):\n            try:\n                pseudos.append(Pseudo.from_file(p))\n            except Exception as exc:\n                logger.critical(('Error in %s:\\n%s' % (p, exc)))\n    return cls(pseudos).sort_by_z()", "docstring": "Find all pseudos in the directory tree starting from top.\n\nArgs:\ntop: Top of the directory tree\nexts: List of files extensions. if exts == \"all_files\"\nwe try to open all files in top\nexclude_dirs: Wildcard used to exclude directories.\n\nreturn: :class:`PseudoTable` sorted by atomic number Z.", "source": "codesearchnet"}
{"code": "def multiple_replace(string, replacements):\n    \n    \n    pattern = re.compile(\"|\".join([re.escape(k) for k in sorted(replacements, key=len, reverse=True)]), flags=re.DOTALL)\n    return pattern.sub(lambda x: replacements[x.group(0)], string)", "docstring": "Simultaneously replace multiple strigns in a string\n\nArgs:\nstring (str): Input string\nreplacements (Dict[str,str]): Replacements dictionary\n\nReturns:\nstr: String with replacements", "source": "juraj-google-style"}
{"code": "def obj_with_unit(obj, unit):\n    unit_type = _UNAME2UTYPE[unit]\n    if isinstance(obj, numbers.Number):\n        return FloatWithUnit(obj, unit=unit, unit_type=unit_type)\n    elif isinstance(obj, collections.Mapping):\n        return {k: obj_with_unit(v, unit) for (k, v) in obj.items()}\n    else:\n        return ArrayWithUnit(obj, unit=unit, unit_type=unit_type)", "docstring": "Returns a `FloatWithUnit` instance if obj is scalar, a dictionary of\nobjects with units if obj is a dict, else an instance of\n`ArrayWithFloatWithUnit`.\n\nArgs:\nunit: Specific units (eV, Ha, m, ang, etc.).", "source": "codesearchnet"}
{"code": "def translate_ostat(ostat):\n    \n    ostat_lower = ostat.strip().lower()\n    if ostat_lower == 'monomer':\n        return 1\n    elif ostat_lower == 'homo-dimer':\n        return 2\n    elif ostat_lower == 'homo-trimer':\n        return 3\n    elif ostat_lower == 'homo-tetramer':\n        return 4\n    elif ostat_lower == 'homo-pentamer':\n        return 5\n    elif ostat_lower == 'homo-hexamer':\n        return 6\n    elif ostat_lower == 'homo-heptamer':\n        return 7\n    elif ostat_lower == 'homo-octamer':\n        return 8\n    else:\n        num = int(ostat_lower.split('-')[1])\n        return num", "docstring": "Translate the OSTAT field to an integer.\n\nAs of 2018-02-26, works on all E. coli models. Untested on other pre-made organism models.\n\nArgs:\nostat (str): Predicted oligomeric state of the PDB file\n\nReturns:\nint: Translated string to integer", "source": "juraj-google-style"}
{"code": "def _add_to_collections(var, weight_collections):\n    for weight_collection in weight_collections:\n        if weight_collection == ops.GraphKeys.GLOBAL_VARIABLES:\n            continue\n        if isinstance(var, variables.PartitionedVariable):\n            for constituent_var in list(var):\n                ops.add_to_collection(weight_collection, constituent_var)\n        else:\n            ops.add_to_collection(weight_collection, var)", "docstring": "Adds a var to the list of weight_collections provided.\n\nHandles the case for partitioned and non-partitioned variables.\n\nArgs:\nvar: A variable or Partitioned Variable.\nweight_collections: List of collections to add variable to.", "source": "github-repos"}
{"code": "def load(cls, archive_file: PackageSource, handler: primitive_handler.PrimitiveHandler, struct_def_class: Type[_StructDefT], search_param_class: Type[_SearchParameterT], code_system_class: Type[_CodeSystemT], value_set_class: Type[_ValueSetT], resource_time_zone: str='Z') -> 'FhirPackage[_StructDefT, _SearchParameterT, _CodeSystemT, _ValueSetT]':\n    collections_per_resource_type = {'StructureDefinition': ResourceCollection[_StructDefT](struct_def_class, handler, resource_time_zone), 'SearchParameter': ResourceCollection[_SearchParameterT](search_param_class, handler, resource_time_zone), 'CodeSystem': ResourceCollection[_CodeSystemT](code_system_class, handler, resource_time_zone), 'ValueSet': ResourceCollection[_ValueSetT](value_set_class, handler, resource_time_zone)}\n    with _open_path_or_factory(archive_file) as fd:\n        if not isinstance(fd.name, str) or fd.name.endswith('.zip'):\n            json_files = _read_fhir_package_zip(fd)\n        elif fd.name.endswith('.tar.gz') or fd.name.endswith('.tgz'):\n            json_files = _read_fhir_package_npm(fd)\n        else:\n            raise ValueError(f'Unsupported file type from {fd.name}')\n        ig_info: Optional[IgInfo] = None\n        for file_name, raw_json in json_files:\n            json_obj = json.loads(raw_json, parse_float=decimal.Decimal, parse_int=decimal.Decimal)\n            if not isinstance(json_obj, dict):\n                continue\n            if os.path.basename(file_name) == 'package.json':\n                ig_info = _parse_ig_info(json_obj)\n            _add_resource_to_collection(json_obj, json_obj, collections_per_resource_type)\n    if ig_info is None:\n        raise ValueError(f'Package {fd.name} does not contain a package.json file stating its URL and version.')\n    return FhirPackage(ig_info=ig_info, structure_definitions=collections_per_resource_type['StructureDefinition'], search_parameters=collections_per_resource_type['SearchParameter'], code_systems=collections_per_resource_type['CodeSystem'], value_sets=collections_per_resource_type['ValueSet'])", "docstring": "Instantiates and returns a new `FhirPackage` from a `.zip` file.\n\nMost users should not use this directly, but rather use the load methods\nin FHIR version-specific packages.\n\nArgs:\narchive_file: A path to the `.zip`, `.tar.gz` or `.tgz` file containing\nthe `FhirPackage` contents.\nhandler: The FHIR primitive handler used for resource parsing.\nstruct_def_class: The StructureDefinition proto class to use.\nsearch_param_class: The SearchParameter proto class to use.\ncode_system_class: The CodeSystem proto class to use.\nvalue_set_class: The Valueset proto class to use.\nresource_time_zone: The time zone code to parse resource dates into.\n\nReturns:\nAn instance of `FhirPackage`.\n\nRaises:\nValueError: In the event that the file or contents are invalid.", "source": "github-repos"}
{"code": "def new_from_list(cls, items, **kwargs):\n        \n        obj = cls(**kwargs)\n        for item in items:\n            obj.append(ListItem(item))\n        return obj", "docstring": "Populates the ListView with a string list.\n\nArgs:\nitems (list): list of strings to fill the widget with.", "source": "juraj-google-style"}
{"code": "def load_env(workdir, logfile=None, loglevel=logging.INFO):\n    \n\n    setup_sdk_logging(logfile, loglevel)\n    workdir = os.path.abspath(workdir)\n    loaded_workdir = lago_workdir.Workdir(path=workdir)\n    prefix = loaded_workdir.get_prefix('current')\n    return SDK(loaded_workdir, prefix)", "docstring": "Load an existing Lago environment\n\nArgs:\nworkdir(str): Path to the workdir directory, as created by\n:func:`~lago.sdk.init` or created by the CLI.\nlogfile(str): A Path to setup a log file.\nloglevel(int): :mod:`logging` log level.\n\nReturns:\n:class:`~lago.sdk.SDK`: Initialized Lago environment\n\nRaises:\n:exc:`~lago.utils.LagoException`: If loading the environment failed.", "source": "juraj-google-style"}
{"code": "def add(self, value, date=None, return_value=False, key=None):\n    data = {}\n    if (self._metric_id is None):\n        self.tcex.handle_error(715, [self._metric_name])\n    body = {'value': value}\n    if (date is not None):\n        body['date'] = self.tcex.utils.format_datetime(date, date_format='%Y-%m-%dT%H:%M:%SZ')\n    if (key is not None):\n        body['name'] = key\n    self.tcex.log.debug('metric data: {}'.format(body))\n    params = {}\n    if return_value:\n        params = {'returnValue': 'true'}\n    url = '/v2/customMetrics/{}/data'.format(self._metric_id)\n    r = self.tcex.session.post(url, json=body, params=params)\n    if ((r.status_code == 200) and ('application/json' in r.headers.get('content-type', ''))):\n        data = r.json()\n    elif (r.status_code == 204):\n        pass\n    else:\n        self.tcex.handle_error(710, [r.status_code, r.text])\n    return data", "docstring": "Add metrics data to collection.\n\nArgs:\nvalue (str): The value of the metric.\ndate (str, optional): The optional date of the metric.\nreturn_value (bool, default:False): Tell the API to return the updates metric value.\nkey (str, optional): The key value for keyed metrics.\n\nReturn:\ndict: If return_value is True a dict with the current value for the time period\nis returned.", "source": "codesearchnet"}
{"code": "def matches_count(count, options):\n    \n\n    if options.get(\"count\") is not None:\n        return count == int(options[\"count\"])\n    if options.get(\"maximum\") is not None and int(options[\"maximum\"]) < count:\n        return False\n    if options.get(\"minimum\") is not None and int(options[\"minimum\"]) > count:\n        return False\n    if options.get(\"between\") is not None and count not in options[\"between\"]:\n        return False\n    return True", "docstring": "Returns whether the given count matches the given query options.\n\nIf no quantity options are specified, any count is considered acceptable.\n\nArgs:\ncount (int): The count to be validated.\noptions (Dict[str, int | Iterable[int]]): A dictionary of query options.\n\nReturns:\nbool: Whether the count matches the options.", "source": "juraj-google-style"}
{"code": "def RemoveProcessedTaskStorage(self, task):\n    \n    if task.identifier not in self._task_storage_writers:\n      raise IOError('Storage writer for task: {0:s} does not exist.'.format(\n          task.identifier))\n\n    del self._task_storage_writers[task.identifier]", "docstring": "Removes a processed task storage.\n\nArgs:\ntask (Task): task.\n\nRaises:\nIOError: if the task storage does not exist.\nOSError: if the task storage does not exist.", "source": "juraj-google-style"}
{"code": "def get_connection(db_type, db_pth, user=None, password=None, name=None):\n    \n    if db_type == 'sqlite':\n        print(db_pth)\n        conn = sqlite3.connect(db_pth)\n    elif db_type == 'mysql':\n        import mysql.connector\n        conn = mysql.connector.connect(user=user, password=password, database=name)\n    elif db_type == 'django_mysql':\n        from django.db import connection as conn\n    else:\n        print('unsupported database type: {}, choices are \"sqlite\", \"mysql\" or \"django_mysql\"'.format(db_type))\n\n    return conn", "docstring": "Get a connection to a SQL database. Can be used for SQLite, MySQL or Django MySQL database\n\nExample:\n>>> from msp2db.db import get_connection\n>>> conn = get_connection('sqlite', 'library.db')\n\nIf using \"mysql\" mysql.connector needs to be installed.\n\nIf using \"django_mysql\" Django needs to be installed.\n\nArgs:\ndb_type (str): Type of database can either be \"sqlite\", \"mysql\" or \"django_mysql\"\n\n\nReturns:\nsql connection object", "source": "juraj-google-style"}
{"code": "def _handle_oss_error():\n    try:\n        (yield)\n    except _OssError as exception:\n        if (exception.status in _ERROR_CODES):\n            raise _ERROR_CODES[exception.status](exception.details.get('Message', ''))\n        raise", "docstring": "Handle OSS exception and convert to class IO exceptions\n\nRaises:\nOSError subclasses: IO error.", "source": "codesearchnet"}
{"code": "def __init__(self, obj=None, prop=None):\n        \n        if not hasattr(self, \"args_type\"):\n            self.__set_type(obj, prop)\n            self.obj, self.prop = obj, prop\n            self.__check_lock()\n            self.wrap2spy()\n            self.is_in_queue = False", "docstring": "It will create the true base\nflow:\n__new__ => __init__\n=> set type based on arguments\n=> check the arguments is valid or not based on type\n=> wrap the target\nArgs:\nobj: None / function / instance method / module / class\nInspected target\nIf the target is None, it will create a Pure() class\nprop: None / string\nInspected target when obj contains callable things", "source": "juraj-google-style"}
{"code": "def repeat(sequence):\n    \n    N = len(sequence)\n    def f(i):\n        return sequence[i%N]\n    return partial(force, sequence=_advance(f))", "docstring": "Return a driver function that can advance a repeated of values.\n\n.. code-block:: none\n\nseq = [0, 1, 2, 3]\n\n# repeat(seq) => [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, ...]\n\nArgs:\nsequence (seq) : a sequence of values for the driver to bounce", "source": "juraj-google-style"}
{"code": "def plot_compare(self, other_plotter, legend=True):\n        \n        \n        import matplotlib.lines as mlines\n        plt = self.get_plot()\n        data_orig = self.bs_plot_data()\n        data = other_plotter.bs_plot_data()\n        band_linewidth = 1\n        for i in range(other_plotter._nb_bands):\n            for d in range(len(data_orig['distances'])):\n                plt.plot(data_orig['distances'][d],\n                         [e[str(Spin.up)][i] for e in data['energy']][d],\n                         'c-', linewidth=band_linewidth)\n                if other_plotter._bs.is_spin_polarized:\n                    plt.plot(data_orig['distances'][d],\n                             [e[str(Spin.down)][i] for e in data['energy']][d],\n                             'm--', linewidth=band_linewidth)\n        if legend:\n            handles = [mlines.Line2D([], [], linewidth=2,\n                                     color='b', label='bs 1 up'),\n                       mlines.Line2D([], [], linewidth=2,\n                                     color='r', label='bs 1 down',\n                                     linestyle=\"--\"),\n                       mlines.Line2D([], [], linewidth=2,\n                                     color='c', label='bs 2 up'),\n                       mlines.Line2D([], [], linewidth=2,\n                                     color='m', linestyle=\"--\",\n                                     label='bs 2 down')]\n\n            plt.legend(handles=handles)\n        return plt", "docstring": "plot two band structure for comparison. One is in red the other in blue\n(no difference in spins). The two band structures need to be defined\non the same symmetry lines! and the distance between symmetry lines is\nthe one of the band structure used to build the BSPlotter\n\nArgs:\nanother band structure object defined along the same symmetry lines\n\nReturns:\na matplotlib object with both band structures", "source": "juraj-google-style"}
{"code": "def load_variant_bulk(self, variants):\n        \n        if not len(variants) > 0:\n            return\n\n        LOG.debug(\"Loading variant bulk\")\n        try:\n            result = self.variant_collection.insert_many(variants)\n        except (DuplicateKeyError, BulkWriteError) as err:\n            \n            \n            for var_obj in variants:\n                try:\n                    self.upsert_variant(var_obj)\n                except IntegrityError as err:\n                    pass\n\n        return", "docstring": "Load a bulk of variants\n\nArgs:\nvariants(iterable(scout.models.Variant))\n\nReturns:\nobject_ids", "source": "juraj-google-style"}
{"code": "def __init__(self, loc, scale, validate_args=False, allow_nan_stats=True, name='Normal'):\n    parameters = dict(locals())\n    with ops.name_scope(name, values=[loc, scale]) as name:\n        with ops.control_dependencies([check_ops.assert_positive(scale)] if validate_args else []):\n            self._loc = array_ops.identity(loc, name='loc')\n            self._scale = array_ops.identity(scale, name='scale')\n            check_ops.assert_same_float_dtype([self._loc, self._scale])\n    super(Normal, self).__init__(dtype=self._scale.dtype, reparameterization_type=distribution.FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._loc, self._scale], name=name)", "docstring": "Construct Normal distributions with mean and stddev `loc` and `scale`.\n\nThe parameters `loc` and `scale` must be shaped in a way that supports\nbroadcasting (e.g. `loc + scale` is a valid operation).\n\nArgs:\nloc: Floating point tensor; the means of the distribution(s).\nscale: Floating point tensor; the stddevs of the distribution(s).\nMust contain only positive values.\nvalidate_args: Python `bool`, default `False`. When `True` distribution\nparameters are checked for validity despite possibly degrading runtime\nperformance. When `False` invalid inputs may silently render incorrect\noutputs.\nallow_nan_stats: Python `bool`, default `True`. When `True`,\nstatistics (e.g., mean, mode, variance) use the value \"`NaN`\" to\nindicate the result is undefined. When `False`, an exception is raised\nif one or more of the statistic's batch members are undefined.\nname: Python `str` name prefixed to Ops created by this class.\n\nRaises:\nTypeError: if `loc` and `scale` have different `dtype`.", "source": "github-repos"}
{"code": "def report_validation_warning(self, element_path: str, msg: str) -> None:", "docstring": "Reports the given warning during FHIR validation.\n\nThis indicates that the element complies with the FHIR specification, but\nmay be missing some desired-but-not-required property, like additional\nfields that are useful to consumers.\n\nArgs:\nelement_path: The path to the field where the issue occurred.\nmsg: The warning message that was produced.", "source": "github-repos"}
{"code": "def mat2euler(rmat, axes='sxyz'):\n    try:\n        (firstaxis, parity, repetition, frame) = _AXES2TUPLE[axes.lower()]\n    except (AttributeError, KeyError):\n        (firstaxis, parity, repetition, frame) = axes\n    i = firstaxis\n    j = _NEXT_AXIS[(i + parity)]\n    k = _NEXT_AXIS[((i - parity) + 1)]\n    M = np.array(rmat, dtype=np.float32, copy=False)[(:3, :3)]\n    if repetition:\n        sy = math.sqrt(((M[(i, j)] * M[(i, j)]) + (M[(i, k)] * M[(i, k)])))\n        if (sy > EPS):\n            ax = math.atan2(M[(i, j)], M[(i, k)])\n            ay = math.atan2(sy, M[(i, i)])\n            az = math.atan2(M[(j, i)], (- M[(k, i)]))\n        else:\n            ax = math.atan2((- M[(j, k)]), M[(j, j)])\n            ay = math.atan2(sy, M[(i, i)])\n            az = 0.0\n    else:\n        cy = math.sqrt(((M[(i, i)] * M[(i, i)]) + (M[(j, i)] * M[(j, i)])))\n        if (cy > EPS):\n            ax = math.atan2(M[(k, j)], M[(k, k)])\n            ay = math.atan2((- M[(k, i)]), cy)\n            az = math.atan2(M[(j, i)], M[(i, i)])\n        else:\n            ax = math.atan2((- M[(j, k)]), M[(j, j)])\n            ay = math.atan2((- M[(k, i)]), cy)\n            az = 0.0\n    if parity:\n        (ax, ay, az) = ((- ax), (- ay), (- az))\n    if frame:\n        (ax, az) = (az, ax)\n    return vec((ax, ay, az))", "docstring": "Converts given rotation matrix to euler angles in radian.\n\nArgs:\nrmat: 3x3 rotation matrix\naxes: One of 24 axis sequences as string or encoded tuple\n\nReturns:\nconverted euler angles in radian vec3 float", "source": "codesearchnet"}
{"code": "def decode(self, ids, strip_extraneous=False):\n    \n    del strip_extraneous\n    _, tmp_file_path = tempfile.mkstemp(\"_decode.png\")\n    if self._height is None or self._width is None:\n      size = int(math.sqrt(len(ids) / self._channels))\n      length = size * size * self._channels\n    else:\n      size = None\n      length = self._height * self._width * self._channels\n    if len(ids) != length:\n      raise ValueError(\"Length of ids (%d) must be height (%d) x width (%d) x \"\n                       \"channels (%d); %d != %d.\\n Ids: %s\"\n                       % (len(ids), self._height, self._width, self._channels,\n                          len(ids), length, \" \".join([str(i) for i in ids])))\n    with tf.Graph().as_default():\n      raw = tf.constant(ids, dtype=tf.uint8)\n      if size is None:\n        img = tf.reshape(raw, [self._height, self._width, self._channels])\n      else:\n        img = tf.reshape(raw, [size, size, self._channels])\n      png = tf.image.encode_png(img)\n      op = tf.write_file(tmp_file_path, png)\n      with tf.Session() as sess:\n        sess.run(op)\n    return tmp_file_path", "docstring": "Transform a sequence of int ids into an image file.\n\nArgs:\nids: list of integers to be converted.\nstrip_extraneous: unused\n\nReturns:\nPath to the temporary file where the image was saved.\n\nRaises:\nValueError: if the ids are not of the appropriate size.", "source": "juraj-google-style"}
{"code": "def restore_from_checkpoint(self, checkpoint_path):\n    import tensorflow as tf\n    all_vars = tf.contrib.slim.get_variables_to_restore(exclude=['InceptionV3/AuxLogits', 'InceptionV3/Logits', 'global_step'])\n    saver = tf.train.Saver(all_vars)\n    saver.restore(self.tf_session, checkpoint_path)", "docstring": "To restore inception model variables from the checkpoint file.\n\nSome variables might be missing in the checkpoint file, so it only\nloads the ones that are avialable, assuming the rest would be\ninitialized later.\nArgs:\ncheckpoint_path: Path to the checkpoint file for the Inception graph.", "source": "codesearchnet"}
{"code": "def get(self, node_id):\n    return (self._nodes[_node.Root.ID].get(node_id) or self._nodes[_node.Root.ID].get(self._sid_map.get(node_id)))", "docstring": "Get a note with the given ID.\n\nArgs:\nnode_id (str): The note ID.\n\nReturns:\ngkeepapi.node.TopLevelNode: The Note or None if not found.", "source": "codesearchnet"}
{"code": "def _generate_security_groups(config_key):\n    raw_default_groups = validate_key_values(CONFIG, 'base', config_key, default='')\n    default_groups = _convert_string_to_native(raw_default_groups)\n    LOG.debug('Default security group for %s is %s', config_key, default_groups)\n    entries = {}\n    for env in ENVS:\n        entries[env] = []\n    if isinstance(default_groups, list):\n        groups = _remove_empty_entries(default_groups)\n        for env in entries:\n            entries[env] = groups\n    elif isinstance(default_groups, dict):\n        entries.update(default_groups)\n    LOG.debug('Generated security group: %s', entries)\n    return entries", "docstring": "Read config file and generate security group dict by environment.\n\nArgs:\nconfig_key (str): Configuration file key\n\nReturns:\ndict: of environments in {'env1': ['group1', 'group2']} format", "source": "codesearchnet"}
{"code": "def get_nmr_prize_pool(self, round_num=0, tournament=1):\n        \n        tournaments = self.get_competitions(tournament)\n        tournaments.sort(key=lambda t: t['number'])\n        if round_num == 0:\n            t = tournaments[-1]\n        else:\n            tournaments = [t for t in tournaments if t['number'] == round_num]\n            if len(tournaments) == 0:\n                raise ValueError(\"invalid round number\")\n            t = tournaments[0]\n        return t['prizePoolNmr']", "docstring": "Get NMR prize pool for the given round and tournament.\n\nArgs:\nround_num (int, optional): The round you are interested in,\ndefaults to current round.\ntournament (int, optional): ID of the tournament, defaults to 1\n\nReturns:\ndecimal.Decimal: prize pool in NMR\n\nRaises:\nValue Error: in case of invalid round number", "source": "juraj-google-style"}
{"code": "def assert_iter(**kw):\n    \n    for name, value in kw.items():\n        if not isiter(value):\n            raise TypeError(\n                'paco: {} must be an iterable object'.format(name))", "docstring": "Asserts if a given values implements a valid iterable interface.\n\nArguments:\n**kw (mixed): value to check if it is an iterable.\n\nRaises:\nTypeError: if assertion fails.", "source": "juraj-google-style"}
{"code": "def lyap_r_len(**kwargs):\n    min_len = (((kwargs['emb_dim'] - 1) * kwargs['lag']) + 1)\n    min_len += (kwargs['trajectory_len'] - 1)\n    min_len += ((kwargs['min_tsep'] * 2) + 1)\n    return min_len", "docstring": "Helper function that calculates the minimum number of data points required\nto use lyap_r.\n\nNote that none of the required parameters may be set to None.\n\nKwargs:\nkwargs(dict):\narguments used for lyap_r (required: emb_dim, lag, trajectory_len and\nmin_tsep)\n\nReturns:\nminimum number of data points required to call lyap_r with the given\nparameters", "source": "codesearchnet"}
{"code": "def get_weights(self):\n    strategy = self._distribution_strategy or self._compile_time_distribution_strategy\n    if strategy:\n        with strategy.scope():\n            return base_layer.Layer.get_weights(self)\n    return base_layer.Layer.get_weights(self)", "docstring": "Retrieves the weights of the model.\n\nReturns:\nA flat list of Numpy arrays.", "source": "github-repos"}
{"code": "def failure_packages(self, failure_index=None):\n    (phase, _) = self._get_failed_phase(failure_index)\n    fr = phase.failure_reason\n    return (fr.involved_requirements() if fr else None)", "docstring": "Get packages involved in a failure.\n\nArgs:\nfailure_index: See `failure_reason`.\n\nReturns:\nA list of Requirement objects.", "source": "codesearchnet"}
{"code": "def read_raw(self, key):\n        \n        data = None\n        if key is not None:\n            data = self.db.read(key.strip())\n        else:\n            self.tcex.log.warning(u'The key field was None.')\n        return data", "docstring": "Read method of CRUD operation for raw data.\n\nArgs:\nkey (string): The variable to read from the DB.\n\nReturns:\n(any): Results retrieved from DB.", "source": "juraj-google-style"}
{"code": "def row(self, content='', align='left'):\n    return u'{lm}{vert}{cont}{vert}'.format(lm=(' ' * self.margins.left), vert=self.border_style.outer_vertical, cont=self._format_content(content, align))", "docstring": "A row of the menu, which comprises the left and right verticals plus the given content.\n\nReturns:\nstr: A row of this menu component with the specified content.", "source": "codesearchnet"}
{"code": "def eval_autoregressive(self, features=None, decode_length=50):\n    results = self._slow_greedy_infer(features, decode_length=decode_length)\n    return (results['logits'], results['losses'])", "docstring": "Autoregressive eval.\n\nQuadratic time in decode_length.\n\nArgs:\nfeatures: an map of string to `Tensor`\ndecode_length: an integer.  How many additional timesteps to decode.\n\nReturns:\nlogits: `Tensor`\nlosses: a dictionary: {loss-name (string): floating point `Scalar`}.\nContains a single key \"training\".", "source": "codesearchnet"}
{"code": "def serialize_state(self, name=None):\n    if self._reader_ref.dtype == dtypes.resource:\n        return gen_io_ops.reader_serialize_state_v2(self._reader_ref, name=name)\n    else:\n        return gen_io_ops.reader_serialize_state(self._reader_ref, name=name)", "docstring": "Produce a string tensor that encodes the state of a reader.\n\nNot all Readers support being serialized, so this can produce an\nUnimplemented error.\n\nArgs:\nname: A name for the operation (optional).\n\nReturns:\nA string Tensor.", "source": "github-repos"}
{"code": "def fetch(self, x, y, w, h):\n    if (not at_least_libvips(8, 8)):\n        raise Error('libvips too old')\n    psize = ffi.new('size_t *')\n    pointer = vips_lib.vips_region_fetch(self.pointer, x, y, w, h, psize)\n    if (pointer == ffi.NULL):\n        raise Error('unable to fetch from region')\n    pointer = ffi.gc(pointer, glib_lib.g_free)\n    return ffi.buffer(pointer, psize[0])", "docstring": "Fill a region with pixel data.\n\nPixels are filled with data!\n\nReturns:\nPixel data.\n\nRaises:\n:class:`.Error`", "source": "codesearchnet"}
{"code": "def read_int16(self, little_endian=True):\n        \n        if little_endian:\n            endian = \"<\"\n        else:\n            endian = \">\"\n        return self.unpack('%sh' % endian, 2)", "docstring": "Read 2 byte as a signed integer value from the stream.\n\nArgs:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint:", "source": "juraj-google-style"}
{"code": "def findall(self, title=None):\n        \n        if title is None:\n            return list(self._sheets)\n        if title not in self._titles:\n            return []\n        return list(self._titles[title])", "docstring": "Return a list of worksheets with the given title.\n\nArgs:\ntitle(str): title/name of the worksheets to return, or ``None`` for all\nReturns:\nlist: list of contained worksheet instances (possibly empty)", "source": "juraj-google-style"}
{"code": "def BindVar(self, var_id, value):\n    \n    if var_id not in self._vars:\n      raise KeyError(var_id)\n\n    self._var_bindings[var_id].append(value)", "docstring": "Associates a value with given variable.\n\nThis can be called multiple times to associate multiple values.\n\nArgs:\nvar_id: A variable id to bind the values to.\nvalue: A value to bind to the specified variable.\n\nRaises:\nKeyError: If given variable is not specified in the pattern.", "source": "juraj-google-style"}
{"code": "def getEstTraitCorrCoef(self,term_i=None):\n        \n        cov = self.getEstTraitCovar(term_i)\n        stds=SP.sqrt(cov.diagonal())[:,SP.newaxis]\n        RV = cov/stds/stds.T\n        return RV", "docstring": "Returns the estimated trait correlation matrix\n\nArgs:\nterm_i:     index of the term we are interested in", "source": "juraj-google-style"}
{"code": "def write_structure(times=None):\n    if (times is None):\n        return report_loc.write_structure(f.root.times)\n    else:\n        if (not isinstance(times, Times)):\n            raise TypeError(\"Expected Times instance for param 'times' (default is root).\")\n        return report_loc.write_structure(times)", "docstring": "Produce a formatted record of a times data structure.\n\nArgs:\ntimes (Times, optional): If not provided, uses the current root timer.\n\nReturns:\nstr: Timer tree hierarchy in a formatted string.\n\nRaises:\nTypeError: If provided argument is not a Times object.", "source": "codesearchnet"}
{"code": "def _get_stats(self):\n    return _pywrap_dtensor_device.GetStats(context.context()._handle, self._device_info)", "docstring": "Returns the number of cache hit and miss for function compilation.\n\nReturns:\nA dictionary.\n'miss': number of cache misses;\n'hit': number of cache hits; and\n'size': size of cache;\nmiss count.", "source": "github-repos"}
{"code": "def _GetEventData(\n      self, parser_mediator, record_index, evtx_record, recovered=False):\n    \n    event_data = WinEvtxRecordEventData()\n\n    try:\n      event_data.record_number = evtx_record.identifier\n    except OverflowError as exception:\n      parser_mediator.ProduceExtractionWarning((\n          'unable to read record identifier from event record: {0:d} '\n          'with error: {1!s}').format(record_index, exception))\n\n    try:\n      event_identifier = evtx_record.event_identifier\n    except OverflowError as exception:\n      parser_mediator.ProduceExtractionWarning((\n          'unable to read event identifier from event record: {0:d} '\n          'with error: {1!s}').format(record_index, exception))\n\n      event_identifier = None\n\n    try:\n      event_identifier_qualifiers = evtx_record.event_identifier_qualifiers\n    except OverflowError as exception:\n      parser_mediator.ProduceExtractionWarning((\n          'unable to read event identifier qualifiers from event record: '\n          '{0:d} with error: {1!s}').format(record_index, exception))\n\n      event_identifier_qualifiers = None\n\n    event_data.offset = evtx_record.offset\n    event_data.recovered = recovered\n\n    if event_identifier is not None:\n      event_data.event_identifier = event_identifier\n\n      if event_identifier_qualifiers is not None:\n        event_data.message_identifier = (\n            (event_identifier_qualifiers << 16) | event_identifier)\n\n    event_data.event_level = evtx_record.event_level\n    event_data.source_name = evtx_record.source_name\n\n    \n    \n    event_data.computer_name = evtx_record.computer_name\n    event_data.user_sid = evtx_record.user_security_identifier\n\n    event_data.strings = list(evtx_record.strings)\n\n    event_data.strings_parsed = {}\n    if event_identifier in self._EVTX_FIELD_MAP.keys():\n      rules = self._EVTX_FIELD_MAP.get(event_identifier, [])\n      for rule in rules:\n        if len(evtx_record.strings) <= rule.index:\n          parser_mediator.ProduceExtractionWarning((\n              'evtx_record.strings has unexpected length of {0:d} '\n              '(expected at least {1:d})'.format(\n                  len(evtx_record.strings), rule.index)))\n\n        event_data.strings_parsed[rule.name] = evtx_record.strings[rule.index]\n\n    event_data.xml_string = evtx_record.xml_string\n\n    return event_data", "docstring": "Extract data from a Windows XML EventLog (EVTX) record.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrecord_index (int): event record index.\nevtx_record (pyevtx.record): event record.\nrecovered (Optional[bool]): True if the record was recovered.\n\nReturn:\nWinEvtxRecordEventData: event data.", "source": "juraj-google-style"}
{"code": "def nx_gen_edge_values(G, key, edges=None, default=util_const.NoParam,\n                       on_missing='error', on_keyerr='default'):\n    \n    if edges is None:\n        edges = G.edges()\n    if on_missing is None:\n        on_missing = 'error'\n    if on_keyerr is None:\n        on_keyerr = 'default'\n    if default is util_const.NoParam and on_keyerr == 'default':\n        on_keyerr = 'error'\n    \n    if on_missing == 'error':\n        data_iter = (G.adj[u][v] for u, v in edges)\n    elif on_missing == 'default':\n        data_iter = (G.adj[u][v] if G.has_edge(u, v) else {}\n                     for u, v in edges)\n    else:\n        raise KeyError('on_missing={} must be error, filter or default'.format(\n            on_missing))\n    \n    if on_keyerr == 'error':\n        value_iter = (d[key] for d in data_iter)\n    elif on_keyerr == 'default':\n        value_iter = (d.get(key, default) for d in data_iter)\n    else:\n        raise KeyError('on_keyerr={} must be error or default'.format(on_keyerr))\n    return value_iter", "docstring": "Generates attributes values of specific edges\n\nArgs:\non_missing (str): Strategy for handling nodes missing from G.\nCan be {'error', 'default'}.  defaults to 'error'.\non_keyerr (str): Strategy for handling keys missing from node dicts.\nCan be {'error', 'default'}.  defaults to 'default'\nif default is specified, otherwise defaults to 'error'.", "source": "juraj-google-style"}
{"code": "def add(self, label):\n        \n        label.label_list = self\n        self.label_tree.addi(label.start, label.end, label)", "docstring": "Add a label to the end of the list.\n\nArgs:\nlabel (Label): The label to add.", "source": "juraj-google-style"}
{"code": "def __init__(self, model_class, key_name, key_value, property_name):\n        \n        super(DjangoORMStorage, self).__init__()\n        self.model_class = model_class\n        self.key_name = key_name\n        self.key_value = key_value\n        self.property_name = property_name", "docstring": "Constructor for Storage.\n\nArgs:\nmodel: string, fully qualified name of db.Model model class.\nkey_name: string, key name for the entity that has the credentials\nkey_value: string, key value for the entity that has the\ncredentials.\nproperty_name: string, name of the property that is an\nCredentialsProperty.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.CreateWorkflowTemplate = channel.unary_unary(\n            \"/google.cloud.dataproc.v1beta2.WorkflowTemplateService/CreateWorkflowTemplate\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.CreateWorkflowTemplateRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.WorkflowTemplate.FromString,\n        )\n        self.GetWorkflowTemplate = channel.unary_unary(\n            \"/google.cloud.dataproc.v1beta2.WorkflowTemplateService/GetWorkflowTemplate\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.GetWorkflowTemplateRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.WorkflowTemplate.FromString,\n        )\n        self.InstantiateWorkflowTemplate = channel.unary_unary(\n            \"/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateWorkflowTemplate\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.InstantiateWorkflowTemplateRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n        self.InstantiateInlineWorkflowTemplate = channel.unary_unary(\n            \"/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateInlineWorkflowTemplate\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.InstantiateInlineWorkflowTemplateRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n        self.UpdateWorkflowTemplate = channel.unary_unary(\n            \"/google.cloud.dataproc.v1beta2.WorkflowTemplateService/UpdateWorkflowTemplate\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.UpdateWorkflowTemplateRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.WorkflowTemplate.FromString,\n        )\n        self.ListWorkflowTemplates = channel.unary_unary(\n            \"/google.cloud.dataproc.v1beta2.WorkflowTemplateService/ListWorkflowTemplates\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.ListWorkflowTemplatesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.ListWorkflowTemplatesResponse.FromString,\n        )\n        self.DeleteWorkflowTemplate = channel.unary_unary(\n            \"/google.cloud.dataproc.v1beta2.WorkflowTemplateService/DeleteWorkflowTemplate\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.DeleteWorkflowTemplateRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def _prune_nodes_from_input_and_recipient_maps(self, nodes_to_prune):\n    for node in nodes_to_prune:\n        del self._node_inputs[node]\n        del self._node_ctrl_inputs[node]\n        del self._node_recipients[node]\n        del self._node_ctrl_recipients[node]", "docstring": "Prune nodes out of input and recipient maps.\n\nArgs:\nnodes_to_prune: (`list` of `str`) Names of the nodes to be pruned.", "source": "github-repos"}
{"code": "def dot(A, B):\n    try:\n        result = A.__matmul__(B)\n        if (result is NotImplemented):\n            result = B.__rmatmul__(A)\n    except AttributeError:\n        result = B.__rmatmul__(A)\n    return result", "docstring": "Matrix multiplication between A and B\n\nThis function is equivalent to ``A @ B``, which is unfortunately\nnot possible under python 2.x.\n\nArgs:\nA (sequence):\nB (sequence):\n\nReturns:\nsequence:", "source": "codesearchnet"}
{"code": "def sort_auto_mapping(fname: str, overwrite: bool=False) -> Optional[bool]:\n    with open(fname, 'r', encoding='utf-8') as f:\n        content = f.read()\n    lines = content.split('\\n')\n    new_lines = []\n    line_idx = 0\n    while line_idx < len(lines):\n        if _re_intro_mapping.search(lines[line_idx]) is not None:\n            indent = len(re.search('^(\\\\s*)\\\\S', lines[line_idx]).groups()[0]) + 8\n            while not lines[line_idx].startswith(' ' * indent + '('):\n                new_lines.append(lines[line_idx])\n                line_idx += 1\n            blocks = []\n            while lines[line_idx].strip() != ']':\n                if lines[line_idx].strip() == '(':\n                    start_idx = line_idx\n                    while not lines[line_idx].startswith(' ' * indent + ')'):\n                        line_idx += 1\n                    blocks.append('\\n'.join(lines[start_idx:line_idx + 1]))\n                else:\n                    blocks.append(lines[line_idx])\n                line_idx += 1\n            blocks = sorted(blocks, key=lambda x: _re_identifier.search(x).groups()[0])\n            new_lines += blocks\n        else:\n            new_lines.append(lines[line_idx])\n            line_idx += 1\n    if overwrite:\n        with open(fname, 'w', encoding='utf-8') as f:\n            f.write('\\n'.join(new_lines))\n    else:\n        return '\\n'.join(new_lines) != content", "docstring": "Sort all auto mappings in a file.\n\nArgs:\nfname (`str`): The name of the file where we want to sort auto-mappings.\noverwrite (`bool`, *optional*, defaults to `False`): Whether or not to fix and overwrite the file.\n\nReturns:\n`Optional[bool]`: Returns `None` if `overwrite=True`. Otherwise returns `True` if the file has an auto-mapping\nimproperly sorted, `False` if the file is okay.", "source": "github-repos"}
{"code": "def get_api_init_text(packages, packages_to_ignore, output_package, api_name, api_version, compat_api_versions=None, lazy_loading=_LAZY_LOADING, use_relative_imports=False):\n    if compat_api_versions is None:\n        compat_api_versions = []\n    module_code_builder = _ModuleInitCodeBuilder(output_package, api_version, lazy_loading, use_relative_imports)\n\n    def in_packages(m):\n        return any((package in m for package in packages))\n    for module in list(sys.modules.values()):\n        if not module or not hasattr(module, '__name__') or module.__name__ is None or (not in_packages(module.__name__)):\n            continue\n        if packages_to_ignore and any([p for p in packages_to_ignore if p in module.__name__]):\n            continue\n        if ('.contrib.' in module.__name__ or module.__name__.endswith('.contrib')) and '.lite' not in module.__name__:\n            continue\n        for module_contents_name in dir(module):\n            if module.__name__ + '.' + module_contents_name in _SYMBOLS_TO_SKIP_EXPLICITLY:\n                continue\n            attr = getattr(module, module_contents_name)\n            _, attr = tf_decorator.unwrap(attr)\n            add_imports_for_symbol(module_code_builder, attr, module.__name__, module_contents_name, api_name, api_version)\n            for compat_api_version in compat_api_versions:\n                add_imports_for_symbol(module_code_builder, attr, module.__name__, module_contents_name, api_name, compat_api_version, _COMPAT_MODULE_TEMPLATE % compat_api_version)\n    if compat_api_versions:\n        add_nested_compat_imports(module_code_builder, compat_api_versions, output_package)\n    return module_code_builder.build()", "docstring": "Get a map from destination module to __init__.py code for that module.\n\nArgs:\npackages: Base python packages containing python with target tf_export\ndecorators.\npackages_to_ignore: python packages to be ignored when checking for\ntf_export decorators.\noutput_package: Base output python package where generated API will be\nadded.\napi_name: API you want to generate Currently, only `tensorflow`.\napi_version: API version you want to generate (1 or 2).\ncompat_api_versions: Additional API versions to generate under compat/\ndirectory.\nlazy_loading: Boolean flag. If True, a lazy loading `__init__.py` file is\nproduced and if `False`, static imports are used.\nuse_relative_imports: True if we should use relative imports when importing\nsubmodules.\n\nReturns:\nA dictionary where\nkey: (string) destination module (for e.g. tf or tf.consts).\nvalue: (string) text that should be in __init__.py files for\ncorresponding modules.", "source": "github-repos"}
{"code": "def set_signature_defs(tflite_model, signature_def_map):\n    model = tflite_model\n    if not isinstance(tflite_model, bytearray):\n        model = bytearray(tflite_model)\n    serialized_signature_def_map = {k: v.SerializeToString() for k, v in signature_def_map.items()}\n    model_buffer = signature_def_util.SetSignatureDefMap(model, serialized_signature_def_map)\n    return model_buffer", "docstring": "Sets SignatureDefs to the Metadata of a TfLite flatbuffer buffer.\n\nArgs:\ntflite_model: Binary TFLite model (bytes or bytes-like object) to which to\nadd signature_def.\nsignature_def_map: dict containing SignatureDefs to store in metadata.\nReturns:\nbuffer: A TFLite model binary identical to model buffer with\nmetadata field containing SignatureDef.\n\nRaises:\nValueError:\ntflite_model buffer does not contain a valid TFLite model.\nsignature_def_map is empty or does not contain a SignatureDef.", "source": "github-repos"}
{"code": "def predict_dataset(self, df):\n    if (len(list(df.columns)) == 2):\n        df.columns = ['A', 'B']\n    if (self.model is None):\n        raise AssertionError('Model has not been trained before predictions')\n    df2 = DataFrame()\n    for (idx, row) in df.iterrows():\n        df2 = df2.append(row, ignore_index=True)\n        df2 = df2.append({'A': row['B'], 'B': row['A']}, ignore_index=True)\n    return predict.predict(deepcopy(df2), deepcopy(self.model))[::2]", "docstring": "Runs Jarfo independently on all pairs.\n\nArgs:\nx (pandas.DataFrame): a CEPC format Dataframe.\nkwargs (dict): additional arguments for the algorithms\n\nReturns:\npandas.DataFrame: a Dataframe with the predictions.", "source": "codesearchnet"}
{"code": "def get_projection_on_elements(self, structure):\n        \n        dico = {}\n        for spin in self.data.keys():\n            dico[spin] = [[defaultdict(float)\n                           for i in range(self.nkpoints)]\n                          for j in range(self.nbands)]\n\n        for iat in range(self.nions):\n            name = structure.species[iat].symbol\n            for spin, d in self.data.items():\n                for k, b in itertools.product(range(self.nkpoints),\n                                              range(self.nbands)):\n                    dico[spin][b][k][name] = np.sum(d[k, b, iat, :])\n\n        return dico", "docstring": "Method returning a dictionary of projections on elements.\n\nArgs:\nstructure (Structure): Input structure.\n\nReturns:\na dictionary in the {Spin.up:[k index][b index][{Element:values}]]", "source": "juraj-google-style"}
{"code": "def plot_spectra_stacked(ss, title=None, num_rows=None, setup=_default_setup):\n    draw_spectra_stacked(ss, title, num_rows, setup)\n    plt.show()", "docstring": "Plots one or more stacked in subplots sharing same x-axis.\n\nArgs:\nss: list of Spectrum objects\ntitle=None: window title\nnum_rows=None: (optional) number of rows for subplot grid. If not passed,\nnum_rows will be the number of plots, and the number of columns will be 1.\nIf passed, number of columns is calculated automatically.\nsetup: PlotSpectrumSetup object", "source": "codesearchnet"}
{"code": "def lstsq(A, b):\n    A = asarray(A, float)\n    b = asarray(b, float)\n    if (A.ndim == 1):\n        A = A[(:, newaxis)]\n    if (A.shape[1] == 1):\n        return (dot(A.T, b) / squeeze(dot(A.T, A)))\n    rcond = (finfo(double).eps * max(*A.shape))\n    return npy_lstsq(A, b, rcond=rcond)[0]", "docstring": "r\"\"\"Return the least-squares solution to a linear matrix equation.\n\nArgs:\nA (array_like): Coefficient matrix.\nb (array_like): Ordinate values.\n\nReturns:\n:class:`numpy.ndarray`: Least-squares solution.", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n        \n        self.ExportAssets = channel.unary_unary(\n            \"/google.cloud.asset.v1beta1.AssetService/ExportAssets\",\n            request_serializer=google_dot_cloud_dot_asset__v1beta1_dot_proto_dot_asset__service__pb2.ExportAssetsRequest.SerializeToString,\n            response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n        )\n        self.BatchGetAssetsHistory = channel.unary_unary(\n            \"/google.cloud.asset.v1beta1.AssetService/BatchGetAssetsHistory\",\n            request_serializer=google_dot_cloud_dot_asset__v1beta1_dot_proto_dot_asset__service__pb2.BatchGetAssetsHistoryRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_asset__v1beta1_dot_proto_dot_asset__service__pb2.BatchGetAssetsHistoryResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def ask(question, default_answer=False, default_answer_str='no'):\n    response = default_answer\n\n    def should_ignore_tty():\n        '\\n        Check, if we want to ignore an opened tty result.\\n        '\n        ret_to_bool = {'yes': True, 'no': False, 'true': True, 'false': False}\n        envs = [os.getenv('CI', default='no'), os.getenv('TEST', default='no')]\n        vals = [ret_to_bool[val] for val in envs if (val in ret_to_bool)]\n        return any(vals)\n    ignore_stdin_istty = should_ignore_tty()\n    has_tty = (sys.stdin.isatty() and (not ignore_stdin_istty))\n    if has_tty:\n        response = query_yes_no(question, default_answer_str)\n    else:\n        LOG.debug('NoTTY: %s -> %s', question, response)\n    return response", "docstring": "Ask for user input.\n\nThis asks a yes/no question with a preset default.\nYou can bypass the user-input and fetch the default answer, if\nyou set\n\nArgs:\nquestion: The question to ask on stdout.\ndefault_answer: The default value to return.\ndefault_answer_str:\nThe default answer string that we present to the user.\n\nTests:\n>>> os.putenv(\"TEST\", \"yes\"); ask(\"Test?\", default_answer=True)\nTrue\n>>> os.putenv(\"TEST\", \"yes\"); ask(\"Test?\", default_answer=False)\nFalse", "source": "codesearchnet"}
{"code": "def cv_score_mean(self, X, y):\n        \n\n        X, y = self._format_inputs(X, y)\n\n        if self.problem_type.binary_classification:\n            kf = StratifiedKFold(\n                shuffle=True, random_state=RANDOM_STATE + 3)\n        elif self.problem_type.multi_classification:\n            self.target_type_transformer.inverse_transform(y)\n            transformer = self.target_type_transformer\n            kf = StratifiedKFoldMultiClassIndicator(\n                transformer, shuffle=True, n_splits=3,\n                random_state=RANDOM_STATE + 3)\n        elif self.problem_type.regression:\n            kf = KFold(shuffle=True, n_splits=3, random_state=RANDOM_STATE + 4)\n        else:\n            raise NotImplementedError\n\n        scoring = {\n            scorer_info.name: scorer_info.scorer\n            for scorer_info in self.scorers_info\n        }\n        cv_results = cross_validate(\n            self.estimator, X, y,\n            scoring=scoring, cv=kf, return_train_score=False)\n\n        \n        results = self._process_cv_results(cv_results)\n        return results", "docstring": "Compute mean score across cross validation folds.\n\nSplit data and labels into cross validation folds and fit the model for\neach fold. Then, for each scoring type in scorings, compute the score.\nFinally, average the scores across folds. Returns a dictionary mapping\nscoring to score.\n\nArgs:\nX (np.array): data\ny (np.array): labels\nscorings (List[str]): scoring types", "source": "juraj-google-style"}
{"code": "def check_num_samples(ins, batch_size=None, steps=None, steps_name='steps'):\n    if steps is not None and batch_size is not None:\n        raise ValueError('If ' + steps_name + ' is set, the `batch_size` must be None.')\n    if check_steps_argument(ins, steps, steps_name):\n        return None\n    if hasattr(ins[0], 'shape'):\n        return int(ins[0].shape[0])\n    return None", "docstring": "Determine the number of samples provided for training and evaluation.\n\nThe number of samples is not defined when running with `steps`,\nin which case the number of samples is set to `None`.\n\nArgs:\nins: List of tensors to be fed to the Keras function.\nbatch_size: Integer batch size or `None` if not defined.\nsteps: Total number of steps (batches of samples) before declaring\n`_predict_loop` finished. Ignored with the default value of `None`.\nsteps_name: The public API's parameter name for `steps`.\n\nRaises:\nValueError: when `steps` is `None` and the attribute `ins.shape`\ndoes not exist. Also raises ValueError when `steps` is not `None`\nand `batch_size` is not `None` because they are mutually\nexclusive.\n\nReturns:\nWhen steps is `None`, returns the number of samples to be\nprocessed based on the size of the first dimension of the\nfirst input numpy array. When steps is not `None` and\n`batch_size` is `None`, returns `None`.", "source": "github-repos"}
{"code": "def _free_up_space(self, size, this_rel_path=None):\n        \n\n        \n        space = self.size + size - self.maxsize\n\n        if space <= 0:\n            return\n\n        removes = []\n\n        for row in self.database.execute(\"SELECT path, size, time FROM files ORDER BY time ASC\"):\n\n            if space > 0:\n                removes.append(row[0])\n                space -= row[1]\n            else:\n                break\n\n        for rel_path in removes:\n            if rel_path != this_rel_path:\n                global_logger.debug(\"Deleting {}\".format(rel_path))\n                self.remove(rel_path)", "docstring": "If there are not size bytes of space left, delete files\nuntil there is\n\nArgs:\nsize: size of the current file\nthis_rel_path: rel_pat to the current file, so we don't delete it.", "source": "juraj-google-style"}
{"code": "class FbgemmFp8Config(QuantizationConfigMixin):\n\n    def __init__(self, activation_scale_ub: float=1200.0, modules_to_not_convert: Optional[List]=None, **kwargs):\n        self.quant_method = QuantizationMethod.FBGEMM_FP8\n        self.activation_scale_ub = activation_scale_ub\n        self.modules_to_not_convert = modules_to_not_convert\n\n    def get_loading_attributes(self):\n        attibutes_dict = copy.deepcopy(self.__dict__)\n        loading_attibutes = ['activation_scale_ub']\n        loading_attibutes_dict = {i: j for i, j in attibutes_dict.items() if i in loading_attibutes}\n        return loading_attibutes_dict", "docstring": "This is a wrapper class about all possible attributes and features that you can play with a model that has been\nloaded using fbgemm fp8 quantization.\n\nArgs:\nactivation_scale_ub (`float`, *optional*, defaults to 1200.0):\nThe activation scale upper bound. This is used when quantizing the input activation.\nmodules_to_not_convert (`list`, *optional*, default to `None`):\nThe list of modules to not quantize, useful for quantizing models that explicitly require to have\nsome modules left in their original precision.", "source": "github-repos"}
{"code": "def load_metrics(event_dir, epoch):\n  \n  metrics = {}\n  for filename in tf.gfile.ListDirectory(event_dir):\n    path = os.path.join(event_dir, filename)\n    for event in tf.train.summary_iterator(path):\n      if event.step == epoch and event.HasField(\"summary\"):\n        value = event.summary.value[0]\n        metrics[value.tag] = value.simple_value\n  return metrics", "docstring": "Loads metrics for this epoch if they have already been written.\n\nThis reads the entire event file but it's small with just per-epoch metrics.\n\nArgs:\nevent_dir: TODO(koz4k): Document this.\nepoch: TODO(koz4k): Document this.\n\nReturns:\nmetrics.", "source": "juraj-google-style"}
{"code": "def connect(self, host='localhost'):\n    get_logger().info('Connecting to RabbitMQ server...')\n    self._conn = pika.BlockingConnection(pika.ConnectionParameters(host=host))\n    self._channel = self._conn.channel()\n    get_logger().info('Declaring topic exchanger {}...'.format(self.exchange))\n    self._channel.exchange_declare(exchange=self.exchange, type='topic')\n    get_logger().info('Creating RabbitMQ queue...')\n    result = self._channel.queue_declare(exclusive=True)\n    self._queue_name = result.method.queue\n    if self.listen_all:\n        get_logger().info('Binding queue to exchanger {} (listen all)...'.format(self.exchange))\n        self._channel.queue_bind(exchange=self.exchange, queue=self._queue_name, routing_key='*')\n    else:\n        for routing_key in self.topics:\n            get_logger().info('Binding queue to exchanger {} with routing key {}...'.format(self.exchange, routing_key))\n            self._channel.queue_bind(exchange=self.exchange, queue=self._queue_name, routing_key=routing_key)\n    get_logger().info('Binding callback...')\n    self._channel.basic_consume(self._callback, queue=self._queue_name, no_ack=True)", "docstring": "Connect to the server and set everything up.\n\nArgs:\nhost: hostname to connect to", "source": "codesearchnet"}
{"code": "def FromDictionary(cls, dictionary):\n    if ('user_id' in dictionary):\n        raise errors.GitkitClientError('use localId instead')\n    if ('localId' not in dictionary):\n        raise errors.GitkitClientError('must specify localId')\n    if ('email' not in dictionary):\n        raise errors.GitkitClientError('must specify email')\n    return cls(decode=False, **dictionary)", "docstring": "Initializes from user specified dictionary.\n\nArgs:\ndictionary: dict of user specified attributes\nReturns:\nGitkitUser object", "source": "codesearchnet"}
{"code": "def _match_dbname(self, dbname):\n    for config in self._clusters:\n        if re.match(config['pattern'], dbname):\n            return config\n    raise Exception(('No such database %s.' % dbname))", "docstring": "Map a database name to the Cluster that holds the database.\n\nArgs:\ndbname: A database name.\n\nReturns:\nA dict containing the information about the Cluster that holds the\ndatabase.", "source": "codesearchnet"}
{"code": "def fetch(self, card_id, data={}, **kwargs):\n        \n        return super(Card, self).fetch(card_id, data, **kwargs)", "docstring": "Fetch Card for given Id\n\nArgs:\ncard_id : Id for which card object has to be retrieved\n\nReturns:\nCard dict for given card Id", "source": "juraj-google-style"}
{"code": "def set_direct(self, address_value_dict):\n        \n\n        with self._lock:\n            for address, value in address_value_dict.items():\n                self._validate_write(address)\n                if address in self._state:\n                    self._state[address].set_result(result=value)\n                else:\n                    fut = _ContextFuture(address=address)\n                    self._state[address] = fut\n                    fut.set_result(result=value)", "docstring": "Called in the context manager's set method to either overwrite the\nvalue for an address, or create a new future and immediately set a\nvalue in the future.\n\nArgs:\naddress_value_dict (dict of str:bytes): The unique full addresses\nwith bytes to set at that address.\n\nRaises:\nAuthorizationException", "source": "juraj-google-style"}
{"code": "def __call__(self, *args: Union[str, List[str]], **kwargs: Any) -> Union[Any, List[Any]]:\n    return super().__call__(*args, **kwargs)", "docstring": "Extract the features of the input(s) text.\n\nArgs:\nargs (`str` or `List[str]`): One or several texts (or one list of texts) to get the features of.\n\nReturn:\nA nested list of `float`: The features computed by the model.", "source": "github-repos"}
{"code": "class XLMPoolerEndLogits(nn.Module):\n\n    def __init__(self, config: XLMConfig):\n        super().__init__()\n        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)\n        self.activation = nn.Tanh()\n        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n        self.dense_1 = nn.Linear(config.hidden_size, 1)\n\n    def forward(self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, p_mask: Optional[torch.FloatTensor]=None) -> torch.FloatTensor:\n        \n        assert start_states is not None or start_positions is not None, 'One of start_states, start_positions should be not None'\n        if start_positions is not None:\n            slen, hsz = hidden_states.shape[-2:]\n            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)\n            start_states = hidden_states.gather(-2, start_positions)\n            start_states = start_states.expand(-1, slen, -1)\n        x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))\n        x = self.activation(x)\n        x = self.LayerNorm(x)\n        x = self.dense_1(x).squeeze(-1)\n        if p_mask is not None:\n            if p_mask.dtype == torch.float16:\n                x = x * (1 - p_mask) - 65500 * p_mask\n            else:\n                x = x * (1 - p_mask) - 1e+30 * p_mask\n        return x", "docstring": "Compute SQuAD end logits from sequence hidden states.\n\nArgs:\nconfig ([`XLMConfig`]):\nThe config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps`\nto use.", "source": "github-repos"}
{"code": "def get_for_type(input_type=\"text\"):\n        \n\n        if input_type in RandomInputHelper.cache:\n            return RandomInputHelper.cache[input_type]\n\n        types = {\n            \"text\": RandomInputHelper.get_random_value,\n            \"hidden\": RandomInputHelper.get_random_value,\n            \"search\": RandomInputHelper.get_random_value,\n            \"color\": RandomInputHelper.get_random_color,\n            \"week\": {\"function\": RandomInputHelper.get_random_value, \"params\": [2, [\"1234\"]]},\n            \"password\": RandomInputHelper.get_random_password,\n            \"number\": RandomInputHelper.get_random_number,\n            \"tel\": RandomInputHelper.get_random_telephonenumber,\n            \"url\": RandomInputHelper.get_random_url,\n            \"textarea\": RandomInputHelper.get_random_text,\n            \"email\": RandomInputHelper.get_random_email\n        }\n\n        if types.get(input_type) is None:\n            return \"\"\n\n        if type(types.get(input_type)) is dict:\n            generator = types.get(input_type)\n            value = generator.get(\"function\")(*generator.get(\"params\"))\n        else:\n            value = types.get(input_type)()\n\n        RandomInputHelper.cache[input_type] = value\n\n        return value", "docstring": "Get a random string for the given html input type\n\nArgs:\ninput_type (str): The input type (e.g. email).\n\nReturns:\nstr: The (cached) random value.", "source": "juraj-google-style"}
{"code": "def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int=-1):\n    lr_lambda = partial(_get_constant_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps)\n    return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)", "docstring": "Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate\nincreases linearly between 0 and the initial lr set in the optimizer.\n\nArgs:\noptimizer ([`~torch.optim.Optimizer`]):\nThe optimizer for which to schedule the learning rate.\nnum_warmup_steps (`int`):\nThe number of steps for the warmup phase.\nlast_epoch (`int`, *optional*, defaults to -1):\nThe index of the last epoch when resuming training.\n\nReturn:\n`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.", "source": "github-repos"}
{"code": "def is_valid_op(self, symmop):\n        \n        coords = self.centered_mol.cart_coords\n        for site in self.centered_mol:\n            coord = symmop.operate(site.coords)\n            ind = find_in_coord_list(coords, coord, self.tol)\n            if not (len(ind) == 1\n                    and self.centered_mol[ind[0]].species\n                    == site.species):\n                return False\n        return True", "docstring": "Check if a particular symmetry operation is a valid symmetry operation\nfor a molecule, i.e., the operation maps all atoms to another\nequivalent atom.\n\nArgs:\nsymmop (SymmOp): Symmetry operation to test.\n\nReturns:\n(bool): Whether SymmOp is valid for Molecule.", "source": "juraj-google-style"}
{"code": "def Run(self, conf, args):\n    try:\n        options, args = self.parser.parse_args(args)\n    except SystemExit as e:\n        return e.code\n    if options.maps:\n        self.log.info('Setting configured maps to %s', options.maps)\n        conf.maps = options.maps\n    warnings, errors = (0, 0)\n    self.log.info('Verifying program and system configuration.')\n    config_warnings, config_errors = config.VerifyConfiguration(conf)\n    warnings += config_warnings\n    errors += config_errors\n    self.log.info('Verifying data sources.')\n    errors += self.VerifySources(conf)\n    self.log.info('Verifying data caches.')\n    errors += self.VerifyMaps(conf)\n    self.log.info('Verification result: %d warnings, %d errors', warnings, errors)\n    if warnings + errors:\n        self.log.info('Verification failed!')\n    else:\n        self.log.info('Verification passed!')\n    return warnings + errors", "docstring": "Run the Verify command.\n\nSee Command.Run() for full documentation on the Run() method.\n\nArgs:\nconf: nss_cache.config.Config object\nargs: list of arguments to be parsed\n\nReturns:\ncount of warnings and errors detected when verifying", "source": "github-repos"}
{"code": "def verify(self, obj):\n        \n\n        if isinstance(obj, str):\n            raise ValidationError(\"Object was not a list\", reason=\"a string was passed instead of a list\", object=obj)\n\n        out_obj = []\n        if self._min_length is not None and len(obj) < self._min_length:\n            raise ValidationError(\"List was too short\",\n                                  reason=\"list length %d was less than the minimum %d\" % (len(obj), self._min_length),\n                                  min_length=self._min_length, actual_length=len(obj))\n\n        if self._max_length is not None and len(obj) > self._max_length:\n            raise ValidationError(\"List was too long\",\n                                  reason=\"list length %d was greater than the max %d\" % (len(obj), self._max_length),\n                                  min_length=self._max_length, actual_length=len(obj))\n\n        for val in obj:\n            out_obj.append(self._verifier.verify(val))\n\n        return out_obj", "docstring": "Verify that the object conforms to this verifier's schema\n\nArgs:\nobj (object): A python object to verify\n\nRaises:\nValidationError: If there is a problem verifying the dictionary, a\nValidationError is thrown with at least the reason key set indicating\nthe reason for the lack of validation.", "source": "juraj-google-style"}
{"code": "def generate_match_query(field, value, with_operator_and):\n    parsed_value = None\n    try:\n        parsed_value = json.loads(value.lower())\n    except (ValueError, TypeError, AttributeError):\n        pass\n    if isinstance(value, bool):\n        return {'match': {field: value}}\n    elif isinstance(parsed_value, bool):\n        return {'match': {field: value.lower()}}\n    if with_operator_and:\n        return {'match': {field: {'query': value, 'operator': 'and'}}}\n    return {'match': {field: value}}", "docstring": "Helper for generating a match query.\n\nArgs:\nfield (six.text_type): The ES field to be queried.\nvalue (six.text_type/bool): The value of the query (bool for the case of type-code query [\"core: true\"]).\nwith_operator_and (bool): Flag that signifies whether to generate the explicit notation of the query, along\nwith '\"operator\": \"and\"', so that all tokens of the query value are required to match.\n\nNotes:\nIf value is of instance bool, then the shortened version of the match query is generated, at all times.", "source": "codesearchnet"}
{"code": "def movies(self, **kwargs):\n    path = self._get_id_path('movies')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the list of movies for a particular genre by id. By default, only\nmovies with 10 or more votes are included.\n\nArgs:\npage: (optional) Minimum 1, maximum 1000.\nlanguage: (optional) ISO 639-1 code.\ninclude_all_movies: (optional) Toggle the inclusion of all movies\nand not just those with 10 or more ratings.\nExpected value is: True or False.\ninclude_adult: (optional) Toggle the inclusion of adult titles.\nExpected value is: True or False.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def copyright_model_factory(*, validator=validators.is_copyright_model, **kwargs):\n    kwargs['ld_type'] = 'Copyright'\n    return _model_factory(validator=validator, **kwargs)", "docstring": "Generate a Copyright model.\n\nExpects ``data``, ``validator``, ``model_cls``, and ``ld_context``\nas keyword arguments.\n\nRaises:\n:exc:`ModelError`: If a non-'Copyright' ``ld_type`` keyword\nargument is given.", "source": "codesearchnet"}
{"code": "def Artifacts(self, os_name=None, cpe=None, label=None):\n    return [c.artifact for c in self.conditions if c.Artifacts(os_name, cpe, label)]", "docstring": "Find the artifacts that correspond with other trigger conditions.\n\nArgs:\nos_name: An OS string.\ncpe: A CPE string.\nlabel: A label string.\n\nReturns:\nA list of artifacts to be processed.", "source": "codesearchnet"}
{"code": "def attention_bias_ignore_padding(memory_padding):\n  \n  ret = memory_padding * large_compatible_negative(memory_padding.dtype)\n  return tf.expand_dims(tf.expand_dims(ret, axis=1), axis=1)", "docstring": "Create an bias tensor to be added to attention logits.\n\nArgs:\nmemory_padding: a float `Tensor` with shape [batch, memory_length].\n\nReturns:\na `Tensor` with shape [batch, 1, 1, memory_length].", "source": "juraj-google-style"}
{"code": "def value_ranges(self, value_ranges):\n        \n\n        self._value_ranges = value_ranges\n        self._logger.log('debug', 'Value ranges set to {}'.format(\n            value_ranges\n        ))", "docstring": "Set the types, min/max values for tunable parameters\n\nArgs:\nvalue_ranges (list): each element defines a tunable variable in\nthe form \"(type ('int' or 'float'), (min_val, max_val))\";\ninitial, random values for each bee will between \"min_val\" and\n\"max_val\"", "source": "juraj-google-style"}
{"code": "def filter_pem(data):\n    assert isinstance(data, bytes), 'Expect bytes. Got {}.'.format(type(data))\n    certs = set()\n    new_list = []\n    in_pem_block = False\n    for line in re.split(b'[\\\\r\\\\n]+', data):\n        if (line == b'-----BEGIN CERTIFICATE-----'):\n            assert (not in_pem_block)\n            in_pem_block = True\n        elif (line == b'-----END CERTIFICATE-----'):\n            assert in_pem_block\n            in_pem_block = False\n            content = b''.join(new_list)\n            content = rewrap_bytes(content)\n            certs.add(((b'-----BEGIN CERTIFICATE-----\\n' + content) + b'\\n-----END CERTIFICATE-----\\n'))\n            new_list = []\n        elif in_pem_block:\n            new_list.append(line)\n    return certs", "docstring": "Processes the bytes for PEM certificates.\n\nReturns:\n``set`` containing each certificate", "source": "codesearchnet"}
{"code": "def check_tape_safe(self, operator, skip_options=None):\n    skip_options = skip_options or []\n    if not operator.variables:\n        raise AssertionError('`operator.variables` was empty')\n\n    def _assert_not_none(iterable):\n        for item in iterable:\n            self.assertIsNotNone(item)\n    with backprop.GradientTape() as tape:\n        grad = tape.gradient(operator.to_dense(), operator.variables)\n        _assert_not_none(grad)\n    with backprop.GradientTape() as tape:\n        var_grad = tape.gradient(operator, operator.variables)\n        _assert_not_none(var_grad)\n        nest.assert_same_structure(var_grad, grad)\n    with backprop.GradientTape() as tape:\n        _assert_not_none(tape.gradient(operator.adjoint().to_dense(), operator.variables))\n    x = math_ops.cast(array_ops.ones(shape=operator.H.shape_tensor()[:-1]), operator.dtype)\n    with backprop.GradientTape() as tape:\n        _assert_not_none(tape.gradient(operator.matvec(x), operator.variables))\n    if not operator.is_square:\n        return\n    for option in [CheckTapeSafeSkipOptions.DETERMINANT, CheckTapeSafeSkipOptions.LOG_ABS_DETERMINANT, CheckTapeSafeSkipOptions.DIAG_PART, CheckTapeSafeSkipOptions.TRACE]:\n        with backprop.GradientTape() as tape:\n            if option not in skip_options:\n                _assert_not_none(tape.gradient(getattr(operator, option)(), operator.variables))\n    if operator.is_non_singular is False:\n        return\n    with backprop.GradientTape() as tape:\n        _assert_not_none(tape.gradient(operator.inverse().to_dense(), operator.variables))\n    with backprop.GradientTape() as tape:\n        _assert_not_none(tape.gradient(operator.solvevec(x), operator.variables))\n    if not (operator.is_self_adjoint and operator.is_positive_definite):\n        return\n    with backprop.GradientTape() as tape:\n        _assert_not_none(tape.gradient(operator.cholesky().to_dense(), operator.variables))", "docstring": "Check gradients are not None w.r.t. operator.variables.\n\nMeant to be called from the derived class.\n\nThis ensures grads are not w.r.t every variable in operator.variables.  If\nmore fine-grained testing is needed, a custom test should be written.\n\nArgs:\noperator: LinearOperator.  Exact checks done will depend on hints.\nskip_options: Optional list of CheckTapeSafeSkipOptions.\nMakes this test skip particular checks.", "source": "github-repos"}
{"code": "def remove_send_last_message(self, connection):\n    if (connection in self._send_last_message):\n        del self._send_last_message[connection]\n        LOGGER.debug('Removed send_last_message function for connection %s', connection)\n    else:\n        LOGGER.warning('Attempted to remove send_last_message function for connection %s, but no send_last_message function was registered', connection)", "docstring": "Removes a send_last_message function previously registered\nwith the Dispatcher.\n\nArgs:\nconnection (str): A locally unique identifier provided\nby the receiver of messages.", "source": "codesearchnet"}
{"code": "def repeat(self, caller: Caller[RequestT, ResponseT], request: RequestT, timeout: float, metrics_collector: Optional[_MetricsCollector]) -> ResponseT:\n    pass", "docstring": "Implements a repeater strategy for RequestResponseIO when a repeater\nis enabled.\n\nArgs:\ncaller: a `~apache_beam.io.requestresponse.Caller` object that\ncalls the API.\nrequest: input request to repeat.\ntimeout: time to wait for the request to complete.\nmetrics_collector: (Optional) a\n`~apache_beam.io.requestresponse._MetricsCollector` object\nto collect the metrics for RequestResponseIO.", "source": "github-repos"}
{"code": "def _GetTypeFromScope(self, package, type_name, scope):\n    if (type_name not in scope):\n        components = _PrefixWithDot(package).split('.')\n        while components:\n            possible_match = '.'.join((components + [type_name]))\n            if (possible_match in scope):\n                type_name = possible_match\n                break\n            else:\n                components.pop((- 1))\n    return scope[type_name]", "docstring": "Finds a given type name in the current scope.\n\nArgs:\npackage: The package the proto should be located in.\ntype_name: The name of the type to be found in the scope.\nscope: Dict mapping short and full symbols to message and enum types.\n\nReturns:\nThe descriptor for the requested type.", "source": "codesearchnet"}
{"code": "def author_name_contains_fullnames(author_name):\n\n    def _is_initial(name_part):\n        return ((len(name_part) == 1) or (u'.' in name_part))\n    parsed_name = ParsedName(author_name)\n    if (len(parsed_name) == 1):\n        return False\n    elif any([_is_initial(name_part) for name_part in parsed_name]):\n        return False\n    return True", "docstring": "Recognizes whether the name contains full name parts and not initials or only lastname.\n\nReturns:\nbool: True if name has only full name parts, e.g. 'Ellis John', False otherwise. So for example, False is\nreturned for 'Ellis, J.' or 'Ellis'.", "source": "codesearchnet"}
{"code": "def chip_as_adjacency_list(device: 'cirq.google.XmonDevice') -> Dict[(GridQubit, List[GridQubit])]:\n    c_set = set(device.qubits)\n    c_adj = {}\n    for n in device.qubits:\n        c_adj[n] = []\n        for m in [above(n), left_of(n), below(n), right_of(n)]:\n            if (m in c_set):\n                c_adj[n].append(m)\n    return c_adj", "docstring": "Gives adjacency list representation of a chip.\n\nThe adjacency list is constructed in order of above, left_of, below and\nright_of consecutively.\n\nArgs:\ndevice: Chip to be converted.\n\nReturns:\nMap from nodes to list of qubits which represent all the neighbours of\ngiven qubit.", "source": "codesearchnet"}
{"code": "def param_static_shapes(cls, sample_shape):\n    if isinstance(sample_shape, tensor_shape.TensorShape):\n        if not sample_shape.is_fully_defined():\n            raise ValueError('TensorShape sample_shape must be fully defined')\n        sample_shape = sample_shape.as_list()\n    params = cls.param_shapes(sample_shape)\n    static_params = {}\n    for name, shape in params.items():\n        static_shape = tensor_util.constant_value(shape)\n        if static_shape is None:\n            raise ValueError('sample_shape must be a fully-defined TensorShape or list/tuple')\n        static_params[name] = tensor_shape.TensorShape(static_shape)\n    return static_params", "docstring": "param_shapes with static (i.e. `TensorShape`) shapes.\n\nThis is a class method that describes what key/value arguments are required\nto instantiate the given `Distribution` so that a particular shape is\nreturned for that instance's call to `sample()`. Assumes that the sample's\nshape is known statically.\n\nSubclasses should override class method `_param_shapes` to return\nconstant-valued tensors when constant values are fed.\n\nArgs:\nsample_shape: `TensorShape` or python list/tuple. Desired shape of a call\nto `sample()`.\n\nReturns:\n`dict` of parameter name to `TensorShape`.\n\nRaises:\nValueError: if `sample_shape` is a `TensorShape` and is not fully defined.", "source": "github-repos"}
{"code": "def StartsWithIgnoreCase(self, value):\n    self._awql = self._CreateSingleValueCondition(value, 'STARTS_WITH_IGNORE_CASE')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"starts with ignore case\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "codesearchnet"}
{"code": "def export_disks(self, standalone, dst_dir, compress, collect_only=False, with_threads=True, *args, **kwargs):\n    vm_export_mgr = export.VMExportManager(*args, disks=self.vm.disks, dst=dst_dir, compress=compress, with_threads=with_threads, standalone=standalone, **kwargs)\n    if collect_only:\n        return {self.vm.name(): vm_export_mgr.collect_paths()}\n    else:\n        return {self.vm.name(): vm_export_mgr.export()}", "docstring": "Export all the disks of self.\n\nArgs:\nstandalone (bool): if true, merge the base images and the layered\nimage into a new file (Supported only in qcow2 format)\ndst_dir (str): dir to place the exported disks\ncompress(bool): if true, compress each disk.\ncollect_only(bool): If true, return only a dict which maps between\nthe name of the vm to the paths of the disks that will be\nexported (don't export anything).\nwith_threads(bool): If True, export disks in parallel\n\nReturns:\n(dict): which maps between the name of the vm to the paths of\nthe disks that will be exported", "source": "codesearchnet"}
{"code": "def assign_selective_dynamics(self, slab):\n        \n        sd_list = []\n        sd_list = [[False, False, False] if site.properties['surface_properties'] == 'subsurface'\n                   else [True, True, True] for site in slab.sites]\n        new_sp = slab.site_properties\n        new_sp['selective_dynamics'] = sd_list\n        return slab.copy(site_properties=new_sp)", "docstring": "Helper function to assign selective dynamics site_properties\nbased on surface, subsurface site properties\n\nArgs:\nslab (Slab): slab for which to assign selective dynamics", "source": "juraj-google-style"}
{"code": "def smash(self):\n        \n        self._initialize_smash()\n        try:\n            stack_name = self._config.get('environment', {}).get('stack_name', None)\n            response = self._cloudFormation.describe_stacks(StackName=stack_name)\n            logging.debug('smash pre-flight returned: {}'.format(\n                json.dumps(response,\n                           indent=4,\n                           default=json_util.default\n                           )))\n        except ClientError as wtf:\n            logging.warning('your stack is in another castle [0].')\n            return False\n        except Exception as wtf:\n            logging.error('failed to find intial status of smash candidate: {}'.format(wtf))\n            return False\n\n        response = self._cloudFormation.delete_stack(StackName=stack_name)\n        logging.info('delete started for stack: {}'.format(stack_name))\n        logging.debug('delete_stack returned: {}'.format(json.dumps(response, indent=4)))\n        return self.poll_stack()", "docstring": "Smash the given stack\n\nArgs:\nNone\n\nReturns:\nTrue if True\n\nTodo:\nFigure out what could go wrong and take steps\nto hanlde problems.", "source": "juraj-google-style"}
{"code": "def connections(self):\n    if (not self.__connections):\n        self.__connections = Connections(self.__connection)\n    return self.__connections", "docstring": "Gets the Connections API client.\n\nReturns:\nConnections:", "source": "codesearchnet"}
{"code": "def data_it(db_data, user_type):\n        \n        data_type = {\n            'array': (list),\n            \n            \n            'dict': (dict),\n            'entity': (dict),\n            'list': (list),\n            'str': (string_types),\n            'string': (string_types),\n        }\n        \n        \n        if user_type is None:\n            if db_data is None:\n                return True\n        elif user_type.lower() in ['null', 'none']:\n            if db_data is None:\n                return True\n        elif user_type.lower() in 'binary':\n            \n            try:\n                base64.b64decode(db_data)\n                return True\n            except Exception:\n                return False\n        elif data_type.get(user_type.lower()) is not None:\n            if isinstance(db_data, data_type.get(user_type.lower())):\n                return True\n        return False", "docstring": "Validate data is type.\n\nArgs:\ndb_data (dict|str|list): The data store in Redis.\nuser_data (str): The user provided data.\n\nReturns:\nbool: True if the data passed validation.", "source": "juraj-google-style"}
{"code": "def get_actions(self, parent_environ=None):\n    interp = Python(target_environ={}, passive=True)\n    executor = self._create_executor(interp, parent_environ)\n    self._execute(executor)\n    return executor.actions", "docstring": "Get the list of rex.Action objects resulting from interpreting this\ncontext. This is provided mainly for testing purposes.\n\nArgs:\nparent_environ Environment to interpret the context within,\ndefaults to os.environ if None.\n\nReturns:\nA list of rex.Action subclass instances.", "source": "codesearchnet"}
{"code": "def ParseChat(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    participants = self._GetRowValue(query_hash, row, 'participants')\n    author = self._GetRowValue(query_hash, row, 'author')\n    dialog_partner = self._GetRowValue(query_hash, row, 'dialog_partner')\n    from_displayname = self._GetRowValue(query_hash, row, 'from_displayname')\n\n    accounts = []\n    participants = participants.split(' ')\n    for participant in participants:\n      if participant != author:\n        accounts.append(participant)\n\n    to_account = ', '.join(accounts)\n    if not to_account:\n      to_account = dialog_partner or 'Unknown User'\n\n    from_account = '{0:s} <{1:s}>'.format(from_displayname, author)\n\n    event_data = SkypeChatEventData()\n    event_data.from_account = from_account\n    event_data.query = query\n    event_data.text = self._GetRowValue(query_hash, row, 'body_xml')\n    event_data.title = self._GetRowValue(query_hash, row, 'title')\n    event_data.to_account = to_account\n\n    timestamp = self._GetRowValue(query_hash, row, 'timestamp')\n    if timestamp:\n      date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(date_time, 'Chat from Skype')\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a chat message.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from query.", "source": "juraj-google-style"}
{"code": "def create_customer(self, *, full_name, email):\n        \n        payload = {\n            \"fullName\": full_name,\n            \"email\": email\n        }\n        return self.client._post(self.url + 'customers', json=payload, headers=self.get_headers())", "docstring": "Creation of a customer in the system.\n\nArgs:\nfull_name: Customer's complete name.\nAlphanumeric. Max: 255.\n\nemail: Customer's email address.\nAlphanumeric. Max: 255.\n\nReturns:", "source": "juraj-google-style"}
{"code": "def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):\n  \n  line = clean_lines.elided[linenum]\n  match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)\n  if match:\n    error(filename, linenum, 'build/explicit_make_pair',\n          4,  \n          'For C++11-compatibility, omit template arguments from make_pair'\n          ' OR use pair directly OR if appropriate, construct a pair directly')", "docstring": "Check that make_pair's template arguments are deduced.\n\nG++ 4.6 in C++11 mode fails badly if make_pair's template arguments are\nspecified explicitly, and such use isn't intended in any case.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "class TFDebertaV2StableDropout(keras.layers.Layer):\n\n    def __init__(self, drop_prob, **kwargs):\n        super().__init__(**kwargs)\n        self.drop_prob = drop_prob\n\n    @tf.custom_gradient\n    def xdropout(self, inputs):\n        \n        mask = tf.cast(1 - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)), tf.bool)\n        scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=self.compute_dtype)\n        if self.drop_prob > 0:\n            inputs = tf.where(mask, tf.cast(0.0, dtype=self.compute_dtype), inputs) * scale\n\n        def grad(upstream):\n            if self.drop_prob > 0:\n                return tf.where(mask, tf.cast(0.0, dtype=self.compute_dtype), upstream) * scale\n            else:\n                return upstream\n        return (inputs, grad)\n\n    def call(self, inputs: tf.Tensor, training: tf.Tensor=False):\n        if training:\n            return self.xdropout(inputs)\n        return inputs", "docstring": "Optimized dropout module for stabilizing the training\n\nArgs:\ndrop_prob (float): the dropout probabilities", "source": "github-repos"}
{"code": "async def trio_open_connection(host, port, *, ssl=False, **kwargs):\n    import trio\n    if (not ssl):\n        sock = (await trio.open_tcp_stream(host, port))\n    else:\n        if isinstance(ssl, bool):\n            ssl_context = None\n        else:\n            ssl_context = ssl\n        sock = (await trio.open_ssl_over_tcp_stream(host, port, ssl_context=ssl_context))\n        (await sock.do_handshake())\n    sock.close = sock.aclose\n    return sock", "docstring": "Allows connections to be made that may or may not require ssl.\nSomewhat surprisingly trio doesn't have an abstraction for this like\ncurio even though it's fairly trivial to write. Down the line hopefully.\n\nArgs:\nhost (str): Network location, either by domain or IP.\nport (int): The requested port.\nssl (bool or SSLContext): If False or None, SSL is not required. If\nTrue, the context returned by trio.ssl.create_default_context will\nbe used. Otherwise, this may be an SSLContext object.\nkwargs: A catch all to soak up curio's additional kwargs and\nignore them.", "source": "codesearchnet"}
{"code": "async def download_file(context, url, abs_filename, session=None, chunk_size=128):\n    session = (session or context.session)\n    loggable_url = get_loggable_url(url)\n    log.info('Downloading %s', loggable_url)\n    parent_dir = os.path.dirname(abs_filename)\n    async with session.get(url) as resp:\n        if (resp.status == 404):\n            (await _log_download_error(resp, '404 downloading %(url)s: %(status)s; body=%(body)s'))\n            raise Download404('{} status {}!'.format(loggable_url, resp.status))\n        elif (resp.status != 200):\n            (await _log_download_error(resp, 'Failed to download %(url)s: %(status)s; body=%(body)s'))\n            raise DownloadError('{} status {} is not 200!'.format(loggable_url, resp.status))\n        makedirs(parent_dir)\n        with open(abs_filename, 'wb') as fd:\n            while True:\n                chunk = (await resp.content.read(chunk_size))\n                if (not chunk):\n                    break\n                fd.write(chunk)\n    log.info('Done')", "docstring": "Download a file, async.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\nurl (str): the url to download\nabs_filename (str): the path to download to\nsession (aiohttp.ClientSession, optional): the session to use.  If\nNone, use context.session.  Defaults to None.\nchunk_size (int, optional): the chunk size to read from the response\nat a time.  Default is 128.", "source": "codesearchnet"}
{"code": "def __init__(self, *args, **kwargs):\n        \n        super(ContractTransaction, self).__init__(*args, **kwargs)\n        self.Type = TransactionType.ContractTransaction", "docstring": "Create an instance.\n\nArgs:\n*args:\n**kwargs:", "source": "juraj-google-style"}
{"code": "def raster_to_gtiff(tif, geotif, change_nodata=False, change_gdal_type=False):\n    rst_file = RasterUtilClass.read_raster(tif)\n    nodata = rst_file.noDataValue\n    if change_nodata:\n        if (not MathClass.floatequal(rst_file.noDataValue, DEFAULT_NODATA)):\n            nodata = DEFAULT_NODATA\n            rst_file.data[(rst_file.data == rst_file.noDataValue)] = DEFAULT_NODATA\n    gdal_type = rst_file.dataType\n    if change_gdal_type:\n        gdal_type = GDT_Float32\n    RasterUtilClass.write_gtiff_file(geotif, rst_file.nRows, rst_file.nCols, rst_file.data, rst_file.geotrans, rst_file.srs, nodata, gdal_type)", "docstring": "Converting Raster format to GeoTIFF.\n\nArgs:\ntif: source raster file path.\ngeotif: output raster file path.\nchange_nodata: change NoDataValue to -9999 or not.\ngdal_type (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default.\nchange_gdal_type: If True, output the Float32 data type.", "source": "codesearchnet"}
{"code": "def run_inference(self, batch: Sequence[pandas.DataFrame], model: Union[xgboost.Booster, xgboost.XGBModel], inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n    return self._inference_fn(batch, model, inference_args)", "docstring": "Runs inferences on a batch of pandas dataframes.\n\nArgs:\nbatch: A sequence of examples as pandas dataframes. Each\nrow in a dataframe is a single example. The dimensions\nmust match the dimensions of the data used to train\nthe model.\nmodel: XGBoost booster or XBGModel (sklearn interface). Must\nimplement predict(X). Where the parameter X is a pandas dataframe.\ninference_args: Any additional arguments for an inference.\n\nReturns:\nAn Iterable of type PredictionResult.", "source": "github-repos"}
{"code": "def __init__(self, expected_methods):\n    \n\n    if not expected_methods:\n      raise ValueError(\"There must be at least one expected method\")\n    Error.__init__(self)\n    self._expected_methods = expected_methods", "docstring": "Init exception.\n\nArgs:\n# expected_methods: A sequence of MockMethod objects that should have been\n#   called.\nexpected_methods: [MockMethod]\n\nRaises:\nValueError: if expected_methods contains no methods.", "source": "juraj-google-style"}
{"code": "def verify(self, token, **kwargs):\n    path = '/runners/verify'\n    post_data = {'token': token}\n    self.gitlab.http_post(path, post_data=post_data, **kwargs)", "docstring": "Validates authentication credentials for a registered Runner.\n\nArgs:\ntoken (str): The runner's authentication token\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabVerifyError: If the server failed to verify the token", "source": "codesearchnet"}
{"code": "def cmd_path(self, cmd):\n    for binscript in self.bin.files:\n        if binscript.path.endswith('/{0}'.format(cmd)):\n            return binscript.path\n    raise ValueError('The command {0} was not found.'.format(cmd))", "docstring": "Get the path of a command in the virtual if it exists.\n\nArgs:\ncmd (str): The command to look for.\n\nReturns:\nstr: The full path to the command.\n\nRaises:\nValueError: If the command is not present.", "source": "codesearchnet"}
{"code": "def archive(self, output_path):\n    if (self.path is None):\n        raise ArgumentError('Cannot archive a recipe yet without a reference to its original yaml file in self.path')\n    outfile = zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED)\n    outfile.write(self.path, arcname='recipe_script.yaml')\n    written_files = set()\n    for (_factory, args, _resources, files) in self.steps:\n        for arg_name in files:\n            file_path = args[arg_name]\n            if (file_path in written_files):\n                continue\n            if (os.path.basename(file_path) != file_path):\n                raise ArgumentError('Cannot archive a recipe yet that references file not in the same directory as the recipe')\n            full_path = os.path.join(os.path.dirname(self.path), file_path)\n            outfile.write(full_path, arcname=file_path)\n            written_files.add(file_path)", "docstring": "Archive this recipe and all associated files into a .ship archive.\n\nArgs:\noutput_path (str): The path where the .ship file should be saved.", "source": "codesearchnet"}
{"code": "def __rmod__(self, other):\n    other = as_dimension(other)\n    return other % self", "docstring": "Returns `other` modulo `self`.\n\nArgs:\nother: Another Dimension, or a value accepted by `as_dimension`.\n\nReturns:\nA Dimension whose value is `other` modulo `self`.", "source": "github-repos"}
{"code": "def set_float(self, option, value):\n    if (not isinstance(value, float)):\n        raise TypeError('Value must be a float')\n    self.options[option] = value", "docstring": "Set a float option.\n\nArgs:\noption (str): name of option.\nvalue (float): value of the option.\n\nRaises:\nTypeError: Value must be a float.", "source": "codesearchnet"}
{"code": "def get_font(self, weight='medium', slant='upright', width='normal'):\n        \n        def find_closest_style(style, styles, alternatives):\n            try:\n                return style, styles[style]\n            except KeyError:\n                for option in alternatives[style]:\n                    try:\n                        return option, styles[option]\n                    except KeyError:\n                        continue\n\n        def find_closest_weight(weight, weights):\n            index = FontWeight.values.index(weight)\n            min_distance = len(FontWeight.values)\n            closest = None\n            for i, option in enumerate(FontWeight.values):\n                if option in weights and abs(index - i) < min_distance:\n                    min_distance = abs(index - i)\n                    closest = option\n            return closest, weights[closest]\n\n        available_width, slants = find_closest_style(width, self,\n                                                     FontWidth.alternatives)\n        available_slant, weights = find_closest_style(slant, slants,\n                                                      FontSlant.alternatives)\n        available_weight, font = find_closest_weight(weight, weights)\n\n        if (available_width != width or available_slant != slant or\n            available_weight != weight):\n            warn('{} does not include a {} {} {} font. Falling back to {} {} '\n                 '{}'.format(self.name, width, weight, slant, available_width,\n                             available_weight, available_slant))\n        return font", "docstring": "Return the font matching or closest to the given style\n\nIf a font with the given weight, slant and width is available, return\nit. Otherwise, return the font that is closest in style.\n\nArgs:\nweight (FontWeight): weight of the font\nslant (FontSlant): slant of the font\nwidth (FontWidth): width of the font\n\nReturns:\nFont: the requested font", "source": "juraj-google-style"}
{"code": "def rename(self, new_folder_name):\n    headers = self.headers\n    endpoint = ('https:\n    payload = (('{ \"DisplayName\": \"' + new_folder_name) + '\"}')\n    r = requests.patch(endpoint, headers=headers, data=payload)\n    if check_response(r):\n        return_folder = r.json()\n        return self._json_to_folder(self.account, return_folder)", "docstring": "Renames the Folder to the provided name.\n\nArgs:\nnew_folder_name: A string of the replacement name.\n\nRaises:\nAuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token.\n\nReturns:\nA new Folder representing the folder with the new name on Outlook.", "source": "codesearchnet"}
{"code": "def step1_get_device_and_user_codes(self, http=None):\n    if (self.device_uri is None):\n        raise ValueError('The value of device_uri must not be None.')\n    body = urllib.parse.urlencode({'client_id': self.client_id, 'scope': self.scope})\n    headers = {'content-type': 'application/x-www-form-urlencoded'}\n    if (self.user_agent is not None):\n        headers['user-agent'] = self.user_agent\n    if (http is None):\n        http = transport.get_http_object()\n    (resp, content) = transport.request(http, self.device_uri, method='POST', body=body, headers=headers)\n    content = _helpers._from_bytes(content)\n    if (resp.status == http_client.OK):\n        try:\n            flow_info = json.loads(content)\n        except ValueError as exc:\n            raise OAuth2DeviceCodeError('Could not parse server response as JSON: \"{0}\", error: \"{1}\"'.format(content, exc))\n        return DeviceFlowInfo.FromResponse(flow_info)\n    else:\n        error_msg = 'Invalid response {0}.'.format(resp.status)\n        try:\n            error_dict = json.loads(content)\n            if ('error' in error_dict):\n                error_msg += ' Error: {0}'.format(error_dict['error'])\n        except ValueError:\n            pass\n        raise OAuth2DeviceCodeError(error_msg)", "docstring": "Returns a user code and the verification URL where to enter it\n\nReturns:\nA user code as a string for the user to authorize the application\nAn URL as a string where the user has to enter the code", "source": "codesearchnet"}
{"code": "def get_country_name(self, callsign, timestamp=timestamp_now):\n    return self.get_all(callsign, timestamp)[const.COUNTRY]", "docstring": "Returns the country name where the callsign is located\n\nArgs:\ncallsign (str): Amateur Radio callsign\ntimestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)\n\nReturns:\nstr: name of the Country\n\nRaises:\nKeyError: No Country found for callsign\n\nNote:\nDon't rely on the country name when working with several instances of\npy:class:`Callinfo`. Clublog and Country-files.org use slightly different names\nfor countries. Example:\n\n- Country-files.com: \"Fed. Rep. of Germany\"\n- Clublog: \"FEDERAL REPUBLIC OF GERMANY\"", "source": "codesearchnet"}
{"code": "def process_event(self, event_name: str, data: dict) -> None:\n        \n        if event_name == \"after_validation\":\n            if data['impatience'] > self._learning_rate_last_impatience:\n                self._learning_rate_cur_impatience += 1\n            else:\n                self._learning_rate_cur_impatience = 0\n\n            self._learning_rate_last_impatience = data['impatience']\n\n            if (self._learning_rate_drop_patience is not None) and\\\n                    (self._learning_rate_cur_impatience >=\n                     self._learning_rate_drop_patience):\n                self._learning_rate_cur_impatience = 0\n                self._learning_rate_cur_div *= self._learning_rate_drop_div\n                self._lr /= self._learning_rate_drop_div\n                self._update_graph_variables(learning_rate=self._lr)\n                log.info(f\"New learning rate dividor = {self._learning_rate_cur_div}\")\n        if event_name == 'after_batch':\n            if (self._lr is not None) and self._lr_update_on_batch:\n                self._lr = self._lr_schedule.next_val() / self._learning_rate_cur_div\n                self._update_graph_variables(learning_rate=self._lr)\n            if (self._mom is not None) and self._mom_update_on_batch:\n                self._mom = min(1., max(0., self._mom_schedule.next_val()))\n                self._update_graph_variables(momentum=self._mom)\n        if event_name == 'after_epoch':\n            if (self._lr is not None) and not self._lr_update_on_batch:\n                self._lr = self._lr_schedule.next_val() / self._learning_rate_cur_div\n                self._update_graph_variables(learning_rate=self._lr)\n            if (self._mom is not None) and not self._mom_update_on_batch:\n                self._mom = min(1., max(0., self._mom_schedule.next_val()))\n                self._update_graph_variables(momentum=self._mom)\n        if event_name == 'after_train_log':\n            if (self._lr is not None) and ('learning_rate' not in data):\n                data['learning_rate'] = self._lr\n            if (self._mom is not None) and ('momentum' not in data):\n                data['momentum'] = self._mom", "docstring": "Update learning rate and momentum variables after event (given by `event_name`)\n\nArgs:\nevent_name: name of event after which the method was called.\nSet of values: `\"after_validation\"`, `\"after_batch\"`, `\"after_epoch\"`, `\"after_train_log\"`\ndata: dictionary with parameters values\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def dark(app):\n    \n\n    _apply_base_theme(app)\n\n    darkPalette = QPalette()\n\n    \n    darkPalette.setColor(QPalette.WindowText, QColor(180, 180, 180))\n    darkPalette.setColor(QPalette.Button, QColor(53, 53, 53))\n    darkPalette.setColor(QPalette.Light, QColor(180, 180, 180))\n    darkPalette.setColor(QPalette.Midlight, QColor(90, 90, 90))\n    darkPalette.setColor(QPalette.Dark, QColor(35, 35, 35))\n    darkPalette.setColor(QPalette.Text, QColor(180, 180, 180))\n    darkPalette.setColor(QPalette.BrightText, QColor(180, 180, 180))\n    darkPalette.setColor(QPalette.ButtonText, QColor(180, 180, 180))\n    darkPalette.setColor(QPalette.Base, QColor(42, 42, 42))\n    darkPalette.setColor(QPalette.Window, QColor(53, 53, 53))\n    darkPalette.setColor(QPalette.Shadow, QColor(20, 20, 20))\n    darkPalette.setColor(QPalette.Highlight, QColor(42, 130, 218))\n    darkPalette.setColor(QPalette.HighlightedText, QColor(180, 180, 180))\n    darkPalette.setColor(QPalette.Link, QColor(56, 252, 196))\n    darkPalette.setColor(QPalette.AlternateBase, QColor(66, 66, 66))\n    darkPalette.setColor(QPalette.ToolTipBase, QColor(53, 53, 53))\n    darkPalette.setColor(QPalette.ToolTipText, QColor(180, 180, 180))\n\n    \n    darkPalette.setColor(QPalette.Disabled, QPalette.WindowText,\n                         QColor(127, 127, 127))\n    darkPalette.setColor(QPalette.Disabled, QPalette.Text,\n                         QColor(127, 127, 127))\n    darkPalette.setColor(QPalette.Disabled, QPalette.ButtonText,\n                         QColor(127, 127, 127))\n    darkPalette.setColor(QPalette.Disabled, QPalette.Highlight,\n                         QColor(80, 80, 80))\n    darkPalette.setColor(QPalette.Disabled, QPalette.HighlightedText,\n                         QColor(127, 127, 127))\n\n    app.setPalette(darkPalette)", "docstring": "Apply Dark Theme to the Qt application instance.\n\nArgs:\napp (QApplication): QApplication instance.", "source": "juraj-google-style"}
{"code": "def from_config(cls, config):\n    config = config.copy()\n    function_keys = ['kernel_posterior_fn', 'kernel_posterior_tensor_fn', 'kernel_prior_fn', 'kernel_divergence_fn', 'bias_posterior_fn', 'bias_posterior_tensor_fn', 'bias_prior_fn', 'bias_divergence_fn']\n    for function_key in function_keys:\n        serial = config[function_key]\n        function_type = config.pop((function_key + '_type'))\n        if (serial is not None):\n            config[function_key] = tfp_layers_util.deserialize_function(serial, function_type=function_type)\n    return cls(**config)", "docstring": "Creates a layer from its config.\n\nThis method is the reverse of `get_config`, capable of instantiating the\nsame layer from the config dictionary.\n\nArgs:\nconfig: A Python dictionary, typically the output of `get_config`.\n\nReturns:\nlayer: A layer instance.", "source": "codesearchnet"}
{"code": "def can_api_key_access_build(param_name):\n    build_id = (request.args.get(param_name, type=int) or request.form.get(param_name, type=int) or request.json[param_name])\n    utils.jsonify_assert(build_id, 'build_id required')\n    if app.config.get('IGNORE_AUTH'):\n        api_key = models.ApiKey(id='anonymous_superuser', secret='', superuser=True)\n        build = models.Build.query.get(build_id)\n        utils.jsonify_assert((build is not None), 'build must exist', 404)\n    else:\n        ops = _get_api_key_ops()\n        (api_key, build) = ops.can_access_build(build_id)\n    return (api_key, build)", "docstring": "Determines if the current API key can access the build in the request.\n\nArgs:\nparam_name: Parameter name to use for getting the build ID from the\nrequest. Will fetch from GET or POST requests.\n\nReturns:\n(api_key, build) The API Key and the Build it has access to.", "source": "codesearchnet"}
{"code": "def agent_heartbeat(self, agent_id, metrics, run_states):\n        \n        mutation = gql()\n        try:\n            response = self.gql(mutation, variable_values={\n                'id': agent_id,\n                'metrics': json.dumps(metrics),\n                'runState': json.dumps(run_states)})\n        except Exception as e:\n            \n            message = ast.literal_eval(e.args[0])[\"message\"]\n            logger.error('Error communicating with W&B: %s', message)\n            return []\n        else:\n            return json.loads(response['agentHeartbeat']['commands'])", "docstring": "Notify server about agent state, receive commands.\n\nArgs:\nagent_id (str): agent_id\nmetrics (dict): system metrics\nrun_states (dict): run_id: state mapping\nReturns:\nList of commands to execute.", "source": "juraj-google-style"}
{"code": "def _CreateFeedItems(client, feed_details, label_name):\n    feed_item_service = client.GetService('FeedItemService', version='v201809')\n    urls = ('http:\n    operations = [{'operand': {'feedId': feed_details.feed_id, 'attributeValues': [{'feedAttributeId': feed_details.url_attribute_id, 'stringValues': [url]}, {'feedAttributeId': feed_details.label_attribute_id, 'stringValues': [label_name]}]}, 'operator': 'ADD'} for url in urls]\n    feed_item_service.mutate(operations)", "docstring": "Creates the page URLs in the DSA page feed.\n\nArgs:\nclient: an AdWordsClient instance.\nfeed_details: a _DSAFeedDetails instance.\nlabel_name: a str containing the page feed URL label.", "source": "codesearchnet"}
{"code": "def _process_new(self, feed_item):\n    return {'name': feed_item.get(FieldMap.CAMPAIGN_LANDING_PAGE_NAME, None), 'url': feed_item.get(FieldMap.CAMPAIGN_LANDING_PAGE_URL, None), 'advertiserId': feed_item.get(FieldMap.ADVERTISER_ID, None)}", "docstring": "Creates a new landing page DCM object from a feed item representing a landing page from the Bulkdozer feed.\n\nThis function simply creates the object to be inserted later by the BaseDAO\nobject.\n\nArgs:\nfeed_item: Feed item representing the landing page from the Bulkdozer\nfeed.\n\nReturns:\nAn landing page object ready to be inserted in DCM through the API.", "source": "github-repos"}
{"code": "def __init__(self, worker):\n    \n    super(ClientStatsCollector, self).__init__()\n    self.daemon = True\n\n    self._worker = worker\n\n    self._process = psutil.Process()\n    self._cpu_samples = []\n    self._io_samples = []\n\n    self._last_send_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0)\n    self._should_send = False", "docstring": "Initializes the stat collector.\n\nArgs:\nworker: A `GRRClientWorker` instance that spawned this stat collector.", "source": "juraj-google-style"}
{"code": "def free(object_ids, local_only=False, delete_creating_tasks=False):\n    worker = ray.worker.get_global_worker()\n    if (ray.worker._mode() == ray.worker.LOCAL_MODE):\n        return\n    if isinstance(object_ids, ray.ObjectID):\n        object_ids = [object_ids]\n    if (not isinstance(object_ids, list)):\n        raise TypeError('free() expects a list of ObjectID, got {}'.format(type(object_ids)))\n    for object_id in object_ids:\n        if (not isinstance(object_id, ray.ObjectID)):\n            raise TypeError('Attempting to call `free` on the value {}, which is not an ray.ObjectID.'.format(object_id))\n    worker.check_connected()\n    with profiling.profile('ray.free'):\n        if (len(object_ids) == 0):\n            return\n        worker.raylet_client.free_objects(object_ids, local_only, delete_creating_tasks)", "docstring": "Free a list of IDs from object stores.\n\nThis function is a low-level API which should be used in restricted\nscenarios.\n\nIf local_only is false, the request will be send to all object stores.\n\nThis method will not return any value to indicate whether the deletion is\nsuccessful or not. This function is an instruction to object store. If\nthe some of the objects are in use, object stores will delete them later\nwhen the ref count is down to 0.\n\nArgs:\nobject_ids (List[ObjectID]): List of object IDs to delete.\nlocal_only (bool): Whether only deleting the list of objects in local\nobject store or all object stores.\ndelete_creating_tasks (bool): Whether also delete the object creating\ntasks.", "source": "codesearchnet"}
{"code": "def forward(self, x):\n    if self.training and self.drop_prob > 0:\n        return XDropout.apply(x, self.get_context())\n    return x", "docstring": "Call the module\n\nArgs:\nx (`torch.tensor`): The input tensor to apply dropout", "source": "github-repos"}
{"code": "def pull(handle, enumerate=False):\n    assert isinstance(handle, Handle), handle\n    return Pull(handle, enumerate)", "docstring": "Pulls next message for handle.\n\nArgs:\nhandle: A :class:`.stream.Handle` or GroupHandle.\nenumerate (bool): boolean to indicate whether a tuple ``(idx, msg)``\nshould be returned, not unlike Python's enumerate().\n\nReturns:\nA :class:`Pull` task to be yielded. Marv will send the\ncorresponding message as soon as it is available. For groups\nthis message will be a handle to a member of the\ngroup. Members of groups are either streams or groups.\n\nExamples:\nPulling (enumerated) message from stream::\n\nmsg = yield marv.pull(stream)\nidx, msg = yield marv.pull(stream, enumerate=True)\n\nPulling stream from group and message from stream::\n\nstream = yield marv.pull(group)  # a group of streams\nmsg = yield marv.pull(stream)", "source": "codesearchnet"}
{"code": "def __parameter_enum(self, final_subfield):\n    if isinstance(final_subfield, messages.EnumField):\n        enum_descriptor = {}\n        for enum_value in final_subfield.type.to_dict().keys():\n            enum_descriptor[enum_value] = {'backendValue': enum_value}\n        return enum_descriptor", "docstring": "Returns enum descriptor of final subfield if it is an enum.\n\nAn enum descriptor is a dictionary with keys as the names from the enum and\neach value is a dictionary with a single key \"backendValue\" and value equal\nto the same enum name used to stored it in the descriptor.\n\nThe key \"description\" can also be used next to \"backendValue\", but protorpc\nEnum classes have no way of supporting a description for each value.\n\nArgs:\nfinal_subfield: A simple field from the end of a subfield list.\n\nReturns:\nThe enum descriptor for the field, if it's an enum descriptor, else\nreturns None.", "source": "codesearchnet"}
{"code": "def create_reset_score(cls, student_item):\n        \n        \n        \n        \n        \n        return cls.objects.create(\n            student_item=student_item,\n            submission=None,\n            points_earned=0,\n            points_possible=0,\n            reset=True,\n        )", "docstring": "Create a \"reset\" score (a score with a null submission).\n\nOnly scores created after the most recent \"reset\" score\nshould be used to determine a student's effective score.\n\nArgs:\nstudent_item (StudentItem): The student item model.\n\nReturns:\nScore: The newly created \"reset\" score.\n\nRaises:\nDatabaseError: An error occurred while creating the score", "source": "juraj-google-style"}
{"code": "def __init__(self,\n                 application,\n                 project_id,\n                 control_client,\n                 next_operation_id=_next_operation_uuid,\n                 timer=datetime.utcnow):\n        \n        self._application = application\n        self._project_id = project_id\n        self._control_client = control_client\n        self._next_operation_id = next_operation_id\n        self._timer = timer", "docstring": "Initializes a new Middleware instance.\n\nArgs:\napplication: the wrapped wsgi application\nproject_id: the project_id thats providing service control support\ncontrol_client: the service control client instance\nnext_operation_id (func): produces the next operation\ntimer (func[[datetime.datetime]]): a func that obtains the current time", "source": "juraj-google-style"}
{"code": "def early_stop_by_value(step_values: List[Tuple[int, float]], metric: Union[str, Callable[[pg.tuning.Measurement], float]]='reward', maximize: bool=True):\n    assert isinstance(step_values, list), step_values\n    for v in step_values:\n        if not isinstance(v, tuple) or len(v) != 2 or (not isinstance(v[0], int)) or (not isinstance(v[1], numbers.Number)):\n            raise ValueError(f'Invalid definition in `step_values`: {v}. Expect a tuple of 2 elements: (step: int, threshold: float).')\n\n    def _cmp(x, y) -> bool:\n        return x < y if maximize else x > y\n\n    def _value(m: pg.tuning.Measurement) -> float:\n        if isinstance(metric, str):\n            return m.reward if metric == 'reward' else m.metrics[metric]\n        assert callable(metric), metric\n        return metric(m)\n\n    def _make_predicate(threshold: float):\n\n        def _predicate(m: pg.tuning.Measurement, unused_history):\n            v = _value(m)\n            ret = _cmp(v, threshold)\n            return ret\n        return _predicate\n    return StepWise([(step, _make_predicate(threshold)) for step, threshold in step_values])", "docstring": "Step-wise early stopping policy based on the value of reward/metric.\n\nExample::\n\npolicy = early_stop_by_value([\n# Stop at step 1 if trial reward is less than 0.2.\n(1, 0.2),\n\n# Stop at step 2 if trial reward is less than 0.8.\n(2, 0.8),\n])()\n\nArgs:\nstep_values: A list of tuple (gating step, value threshold).\ngating step - At which step this rule will be triggered.\nvalue threshold - A float number indicating the threshold value for\nearly stopping.\nmetric: Based on which metric the value should be compared against.\nUse str for metric name or a callable object that takes a measurement\nobject at a given step as input and returns a float value.\nmaximize: If True, reward or metric value below the threshold will be\nstopped, otherwise trials with values above the threshold will be stopped.\n\nReturns:\nA `StepWise` early stopping policy.", "source": "github-repos"}
{"code": "def get_sequence_properties(self, clean_seq=False, representative_only=True):\n        \n        if representative_only:\n            \n            if not self.representative_sequence:\n                log.warning('{}: no representative sequence set, cannot get sequence properties'.format(self.id))\n                return\n\n            \n            if not self.representative_sequence.seq:\n                log.warning('{}: representative sequence {} set, but no sequence stored. '\n                            'Cannot get sequence properties.'.format(self.id, self.representative_sequence.id))\n                return\n\n            self.representative_sequence.get_biopython_pepstats(clean_seq=clean_seq)\n            self.representative_sequence.get_emboss_pepstats()\n\n        if not representative_only:\n            for s in self.sequences:\n                \n                if not s.seq:\n                    log.warning('{}: no sequence stored. '\n                                'Cannot get sequence properties.'.format(s.id))\n                    continue\n\n                else:\n                    s.get_biopython_pepstats(clean_seq=clean_seq)\n                    s.get_emboss_pepstats()", "docstring": "Run Biopython ProteinAnalysis and EMBOSS pepstats to summarize basic statistics of the protein sequences.\nResults are stored in the protein's respective SeqProp objects at ``.annotations``\n\nArgs:\nrepresentative_only (bool): If analysis should only be run on the representative sequence", "source": "juraj-google-style"}
{"code": "def addFixedEffect(self, F=None, A=None):\n    if (A == None):\n        A = SP.eye(self.P)\n    if (F == None):\n        F = SP.ones((self.N, 1))\n    assert (A.shape[1] == self.P), 'Incompatible shape'\n    assert (F.shape[0] == self.N), 'Incompatible shape'\n    if (F.shape[1] > 1):\n        for m in range(F.shape[1]):\n            self.vd.addFixedEffTerm(A, F[(:, m:(m + 1))])\n    else:\n        self.vd.addFixedEffTerm(A, F)\n    self.gp = None\n    self.init = False\n    self.fast = False\n    self.optimum = None\n    self.cache['Sigma'] = None\n    self.cache['Hessian'] = None\n    self.cache['Lparams'] = None\n    self.cache['paramsST'] = None", "docstring": "add fixed effect to the model\n\nArgs:\nF: fixed effect matrix [N,1]\nA: design matrix [K,P] (e.g. SP.ones((1,P)) common effect; SP.eye(P) any effect)", "source": "codesearchnet"}
{"code": "def encrypt(self, message, public_key):\n        \n        \n        max_str_len = rsa.common.byte_size(public_key.n) - 11\n\n        \n        \n        if len(message) > max_str_len:\n            message = textwrap.wrap(message, width=max_str_len)\n        else:\n            message = [message]\n\n        \n        enc_msg = []\n\n        \n        \n        for line in message:\n\n            \n            enc_line = rsa.encrypt(line, public_key)\n\n            \n            \n            enc_line_converted = binascii.b2a_base64(enc_line)\n\n            enc_msg.append(enc_line_converted)\n\n        \n        enc_msg = json.dumps(enc_msg)\n\n        \n        return enc_msg", "docstring": "Encrypts a string using a given rsa.PublicKey object. If the message\nis larger than the key, it will split it up into a list and encrypt\neach line in the list.\n\nArgs:\nmessage (string): The string to encrypt.\npublic_key (rsa.PublicKey): The key object used to encrypt the\nmessage. Only the paired private key can decrypt it.\n\nReturns:\nA json string of the list of encrypted lines of the message.", "source": "juraj-google-style"}
{"code": "def readlink(path):\n    if (sys.getwindowsversion().major < 6):\n        raise SaltInvocationError('Symlinks are only supported on Windows Vista or later.')\n    try:\n        return salt.utils.path.readlink(path)\n    except OSError as exc:\n        if (exc.errno == errno.EINVAL):\n            raise CommandExecutionError('{0} is not a symbolic link'.format(path))\n        raise CommandExecutionError(exc.__str__())\n    except Exception as exc:\n        raise CommandExecutionError(exc)", "docstring": "Return the path that a symlink points to\n\nThis is only supported on Windows Vista or later.\n\nInline with Unix behavior, this function will raise an error if the path is\nnot a symlink, however, the error raised will be a SaltInvocationError, not\nan OSError.\n\nArgs:\npath (str): The path to the symlink\n\nReturns:\nstr: The path that the symlink points to\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' file.readlink /path/to/link", "source": "codesearchnet"}
{"code": "def gpio_set(self, pins, states):\n        \n        if len(pins) != len(states):\n            raise ValueError('Length mismatch between pins and states.')\n\n        size = len(pins)\n        indices = (ctypes.c_uint8 * size)(*pins)\n        states = (ctypes.c_uint8 * size)(*states)\n        result_states = (ctypes.c_uint8 * size)()\n        result = self._dll.JLINK_EMU_GPIO_SetState(ctypes.byref(indices),\n                                                   ctypes.byref(states),\n                                                   ctypes.byref(result_states),\n                                                   size)\n        if result < 0:\n            raise errors.JLinkException(result)\n\n        return list(result_states)", "docstring": "Sets the state for one or more user-controllable GPIOs.\n\nFor each of the given pins, sets the the corresponding state based on\nthe index.\n\nArgs:\nself (JLink): the ``JLink`` instance\npins (list): list of GPIO indices\nstates (list): list of states to set\n\nReturns:\nA list of updated states.\n\nRaises:\nJLinkException: on error.\nValueError: if ``len(pins) != len(states)``", "source": "juraj-google-style"}
{"code": "def references_json(references):\n    \n\n    references_json = []\n    for r in references:\n        ref = r.ref\n        ref['attributes'] = r._to_json_like(include_defaults=False)\n        references_json.append(ref)\n\n    return references_json", "docstring": "Given a list of all models in a graph, return JSON representing\nthem and their properties.\n\nArgs:\nreferences (seq[Model]) :\nA list of models to convert to JSON\n\nReturns:\nlist", "source": "juraj-google-style"}
{"code": "def run(self, dag):\n    if (self.initial_layout is None):\n        if self.property_set['layout']:\n            self.initial_layout = self.property_set['layout']\n        else:\n            self.initial_layout = Layout.generate_trivial_layout(*dag.qregs.values())\n    if (len(dag.qubits()) != len(self.initial_layout)):\n        raise TranspilerError('The layout does not match the amount of qubits in the DAG')\n    if (len(self.coupling_map.physical_qubits) != len(self.initial_layout)):\n        raise TranspilerError('Mappers require to have the layout to be the same size as the coupling map')\n    self.input_layout = self.initial_layout.copy()\n    self.qregs = dag.qregs\n    if (self.seed is None):\n        self.seed = np.random.randint(0, np.iinfo(np.int32).max)\n    self.rng = np.random.RandomState(self.seed)\n    logger.debug('StochasticSwap RandomState seeded with seed=%s', self.seed)\n    new_dag = self._mapper(dag, self.coupling_map, trials=self.trials)\n    return new_dag", "docstring": "Run the StochasticSwap pass on `dag`.\n\nArgs:\ndag (DAGCircuit): DAG to map.\n\nReturns:\nDAGCircuit: A mapped DAG.\n\nRaises:\nTranspilerError: if the coupling map or the layout are not\ncompatible with the DAG", "source": "codesearchnet"}
{"code": "def search(self, files=None, defined_fields=None, **kwargs):\n    if (defined_fields is None):\n        defined_fields = []\n    all_keys = (set(defined_fields) | set(kwargs.keys()))\n    if (not all_keys):\n        raise ValueError('At least one field to search on must be passed.')\n    if (files is None):\n        files = set(self.layout.files.keys())\n    for f in files:\n        self.index_file(f)\n    filesets = [set(self.key_index.get(k, [])) for k in all_keys]\n    matches = reduce((lambda x, y: (x & y)), filesets)\n    if (files is not None):\n        matches &= set(files)\n    if (not matches):\n        return []\n\n    def check_matches(f, key, val):\n        if (isinstance(val, six.string_types) and ('*' in val)):\n            val = ('^%s$' % val).replace('*', '.*')\n            return (re.search(str(self.file_index[f][key]), val) is not None)\n        else:\n            return (val == self.file_index[f][key])\n    for (k, val) in kwargs.items():\n        matches = list(filter((lambda x: check_matches(x, k, val)), matches))\n        if (not matches):\n            return []\n    return matches", "docstring": "Search files in the layout by metadata fields.\n\nArgs:\nfiles (list): Optional list of names of files to search. If None,\nall files in the layout are scanned.\ndefined_fields (list): Optional list of names of fields that must\nbe defined in the JSON sidecar in order to consider the file a\nmatch, but which don't need to match any particular value.\nkwargs: Optional keyword arguments defining search constraints;\nkeys are names of metadata fields, and values are the values\nto match those fields against (e.g., SliceTiming=0.017) would\nreturn all files that have a SliceTiming value of 0.071 in\nmetadata.\n\nReturns: A list of filenames that match all constraints.", "source": "codesearchnet"}
{"code": "def get_hostname(url):\n        \n\n        if url not in URLHelper.__cache:\n            URLHelper.__cache[url] = urlparse(url)\n\n        parts = URLHelper.__cache[url].netloc.split(\".\")\n\n        if len(parts) == 1:\n            return parts[0]\n        else:\n            return \".\".join(parts[-2:-1])", "docstring": "Get the hostname of the given URL.\n\nArgs:\nurl (str): The URL to get the hostname from.\n\nReturns:\nstr: The hostname", "source": "juraj-google-style"}
{"code": "def remove_server_data(server_id):\n    logger.debug('Removing server from serverdata')\n    data = datatools.get_data()\n    if (server_id in data['discord']['servers']):\n        data['discord']['servers'].pop(server_id)\n        datatools.write_data(data)", "docstring": "Remove a server from the server data\n\nArgs:\nserver_id (int): The server to remove from the server data", "source": "codesearchnet"}
{"code": "def name_from_scope_name(name) -> str:\n    return name[:-1] if name and name[-1] == '/' else name", "docstring": "Returns the name of an op given the name of its scope.\n\nArgs:\nname: the name of the scope.\n\nReturns:\nthe name of the op (equal to scope name minus any trailing slash).", "source": "github-repos"}
{"code": "def size(self, path: str) -> int:\n    raise NotImplementedError", "docstring": "Get size in bytes of a file on the FileSystem.\n\nArgs:\npath: string filepath of file.\n\nReturns: int size of file according to the FileSystem.\n\nRaises:\n``BeamIOError``: if path doesn't exist.", "source": "github-repos"}
{"code": "def epoch_to_log_line_timestamp(epoch_time, time_zone=None):\n    \n    s, ms = divmod(epoch_time, 1000)\n    d = datetime.datetime.fromtimestamp(s, tz=time_zone)\n    return d.strftime('%m-%d %H:%M:%S.') + str(ms)", "docstring": "Converts an epoch timestamp in ms to log line timestamp format, which\nis readible for humans.\n\nArgs:\nepoch_time: integer, an epoch timestamp in ms.\ntime_zone: instance of tzinfo, time zone information.\nUsing pytz rather than python 3.2 time_zone implementation for\npython 2 compatibility reasons.\n\nReturns:\nA string that is the corresponding timestamp in log line timestamp\nformat.", "source": "juraj-google-style"}
{"code": "def heightmap_clamp(hm: np.ndarray, mi: float, ma: float) -> None:\n    \n    hm.clip(mi, ma)", "docstring": "Clamp all values on this heightmap between ``mi`` and ``ma``\n\nArgs:\nhm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.\nmi (float): The lower bound to clamp to.\nma (float): The upper bound to clamp to.\n\n.. deprecated:: 2.0\nDo ``hm.clip(mi, ma)`` instead.", "source": "juraj-google-style"}
{"code": "def create_forwarding_information_base(self, timeout=-1):\n        \n        uri = \"{}{}\".format(self.data[\"uri\"], self.FORWARDING_INFORMATION_PATH)\n        return self._helper.do_post(uri, None, timeout, None)", "docstring": "Generates the forwarding information base dump file for a logical interconnect.\n\nArgs:\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation in\nOneView, just stops waiting for its completion.\n\nReturns: Interconnect Forwarding Information Base DataInfo.", "source": "juraj-google-style"}
{"code": "def save_config(self):\n    if (not self.opts['dirty_config'][1]):\n        if logger.isEnabledFor(logging.INFO):\n            logger.info('Config not saved (not modified)')\n        return 1\n    txt = '\n    copyfile(self.config_file, (self.config_file + '.restore'))\n    if (self.opts['default_station'][1] is None):\n        self.opts['default_station'][1] = '-1'\n    try:\n        with open(self.config_file, 'w') as cfgfile:\n            cfgfile.write(txt.format(self.opts['player'][1], self.opts['default_playlist'][1], self.opts['default_station'][1], self.opts['default_encoding'][1], self.opts['connection_timeout'][1], self.opts['theme'][1], self.opts['use_transparency'][1], self.opts['confirm_station_deletion'][1], self.opts['confirm_playlist_reload'][1], self.opts['auto_save_playlist'][1]))\n    except:\n        if logger.isEnabledFor(logging.ERROR):\n            logger.error('Error saving config')\n        return (- 1)\n    try:\n        remove((self.config_file + '.restore'))\n    except:\n        pass\n    if logger.isEnabledFor(logging.INFO):\n        logger.info('Config saved')\n    self.opts['dirty_config'][1] = False\n    return 0", "docstring": "Save config file\n\nCreates config.restore (back up file)\nReturns:\n-1: Error saving config\n0: Config saved successfully\n1: Config not saved (not modified", "source": "codesearchnet"}
{"code": "def get_output_embeddings(self) -> Union[None, keras.layers.Layer]:\n    if self.get_lm_head() is not None:\n        lm_head = self.get_lm_head()\n        try:\n            return lm_head.get_output_embeddings()\n        except AttributeError:\n            logger.info('Building the model')\n            self.build_in_name_scope()\n            return lm_head().get_output_embeddings()\n    return None", "docstring": "Returns the model's output embeddings\n\nReturns:\n`tf.Variable`: The new weights mapping vocabulary to hidden states.", "source": "github-repos"}
{"code": "def resolve_revision(self, dest, url, rev_options):\n        \n        rev = rev_options.arg_rev\n        sha, is_branch = self.get_revision_sha(dest, rev)\n\n        if sha is not None:\n            rev_options = rev_options.make_new(sha)\n            rev_options.branch_name = rev if is_branch else None\n\n            return rev_options\n\n        \n        \n        if not looks_like_hash(rev):\n            logger.warning(\n                \"Did not find branch or tag '%s', assuming revision or ref.\",\n                rev,\n            )\n\n        if not rev.startswith('refs/'):\n            return rev_options\n\n        \n        self.run_command(\n            ['fetch', '-q', url] + rev_options.to_args(),\n            cwd=dest,\n        )\n        \n        sha = self.get_revision(dest, rev='FETCH_HEAD')\n        rev_options = rev_options.make_new(sha)\n\n        return rev_options", "docstring": "Resolve a revision to a new RevOptions object with the SHA1 of the\nbranch, tag, or ref if found.\n\nArgs:\nrev_options: a RevOptions object.", "source": "juraj-google-style"}
{"code": "def ricker(f, length, dt):\n    \n    t = np.linspace(-int(length/2), int((length-dt)/2), int(length/dt))\n    y = (1. - 2.*(np.pi**2)*(f**2)*(t**2))*np.exp(-(np.pi**2)*(f**2)*(t**2))\n    return t, y", "docstring": "A Ricker wavelet.\n\nArgs:\nf (float): frequency in Haz, e.g. 25 Hz.\nlength (float): Length in s, e.g. 0.128.\ndt (float): sample interval in s, e.g. 0.001.\n\nReturns:\ntuple. time basis, amplitude values.", "source": "juraj-google-style"}
{"code": "def __init__(self, context):\n    \n    self._credentials = context.credentials\n    self._project_id = context.project_id", "docstring": "Initializes the Storage helper with context information.\n\nArgs:\ncontext: a Context object providing project_id and credentials.", "source": "juraj-google-style"}
{"code": "def _start_reader_thread(self, stream, chunks):\n    import io\n    import threading\n\n    def target():\n        while True:\n            chunk = stream.read(io.DEFAULT_BUFFER_SIZE)\n            if (not chunk):\n                break\n            chunks.append(chunk)\n    thread = threading.Thread(target=target)\n    thread.start()\n    return thread", "docstring": "Starts a thread for reading output from FFMPEG.\n\nThe thread reads consecutive chunks from the stream and saves them in\nthe given list.\n\nArgs:\nstream: output stream of the FFMPEG process.\nchunks: list to save output chunks to.\n\nReturns:\nThread", "source": "codesearchnet"}
{"code": "def window_partition(self, hidden_states: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:\n    batch_size, height, width, channel = hidden_states.shape\n    pad_h = (window_size - height % window_size) % window_size\n    pad_w = (window_size - width % window_size) % window_size\n    hidden_states = F.pad(hidden_states, (0, 0, 0, pad_w, 0, pad_h))\n    pad_height, pad_width = (height + pad_h, width + pad_w)\n    hidden_states = hidden_states.reshape(batch_size, pad_height \n    windows = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(-1, window_size, window_size, channel)\n    return (windows, (pad_height, pad_width))", "docstring": "Args:\nPartition into non-overlapping windows with padding if needed.\nhidden_states (tensor): input tokens with [batch_size, height, width, channel]. window_size (int): window\nsize.\n\nReturns:\nwindows: windows after partition with [batch_size * num_windows, window_size, window_size, channel].\n(pad_height, pad_width): padded height and width before partition", "source": "github-repos"}
{"code": "def set_extra_selections(self, key, extra_selections):\n        \n        \n        draw_order = DRAW_ORDERS.get(key)\n        if draw_order is None:\n            draw_order = DRAW_ORDERS.get('on_top')\n\n        for selection in extra_selections:\n            selection.draw_order = draw_order\n\n        self.clear_extra_selections(key)\n        self.extra_selections_dict[key] = extra_selections", "docstring": "Set extra selections for a key.\n\nAlso assign draw orders to leave current_cell and current_line\nin the backgrund (and avoid them to cover other decorations)\n\nNOTE: This will remove previous decorations added to  the same key.\n\nArgs:\nkey (str) name of the extra selections group.\nextra_selections (list of sourcecode.api.TextDecoration).", "source": "juraj-google-style"}
{"code": "def get_keys_from_ldap(self, username=None):\n        \n        result_dict = {}\n        filter = ['(sshPublicKey=*)']\n        if username is not None:\n            filter.append('(uid={})'.format(username))\n        attributes = ['uid', 'sshPublicKey']\n        results = self.client.search(filter, attributes)\n        for result in results:\n            result_dict[result.uid.value] = result.sshPublicKey.values\n        return result_dict", "docstring": "Fetch keys from ldap.\n\nArgs:\nusername Username associated with keys to fetch (optional)\n\nReturns:\nArray of dictionaries in '{username: [public keys]}' format", "source": "juraj-google-style"}
{"code": "def _create_state_graph(self, name):\n    \n    import_collections = [\n        tf_v1.GraphKeys.GLOBAL_VARIABLES,\n        tf_v1.GraphKeys.MODEL_VARIABLES,\n        tf_v1.GraphKeys.TABLE_INITIALIZERS,\n        tf_v1.GraphKeys.ASSET_FILEPATHS,  \n        tf_v1.GraphKeys.COND_CONTEXT,\n        tf_v1.GraphKeys.WHILE_CONTEXT,\n    ]\n    if self._trainable:\n      \n      import_collections.extend([tf_v1.GraphKeys.TRAINABLE_VARIABLES,\n                                 tf_v1.GraphKeys.REGULARIZATION_LOSSES])\n\n    absolute_scope_name = tf_v1.get_default_graph().unique_name(\n        name, mark_as_used=False)\n    relative_scope_name = absolute_scope_name.split(\"/\")[-1]\n    assert relative_scope_name == name  \n\n    meta_graph = meta_graph_pb2.MetaGraphDef()\n    meta_graph.CopyFrom(self._meta_graph)\n\n    meta_graph_lib.filter_collections(meta_graph, import_collections)\n    meta_graph_lib.prefix_shared_name_attributes(meta_graph,\n                                                 absolute_scope_name)\n\n    tf_v1.train.import_meta_graph(\n        meta_graph,\n        input_map={},\n        import_scope=relative_scope_name)\n\n    \n    \n    variables_tensor_map = {}\n    for var in tf_v1.global_variables():\n      if var.op.name.startswith(absolute_scope_name + \"/\"):\n        variables_tensor_map[var.name[len(absolute_scope_name)+1:]] = var\n\n    \n    \n    def _get_tensor(tensor_name):\n      return tf_v1.get_default_graph().get_tensor_by_name(\n          meta_graph_lib.prepend_name_scope(\n              tensor_name, import_scope=absolute_scope_name))\n\n    state_op_names = list_registered_stateful_ops_without_inputs()\n    state_map = get_state_map(meta_graph, state_op_names, set(), _get_tensor)\n\n    return variables_tensor_map, state_map", "docstring": "Creates the graph nodes that hold the state of the Module.\n\nArgs:\nname: name scope to create the state graph in.\n\nReturns:\nA tuple consisting of:\nvariables_tensor_map: a map from tensor names in the original graph def\nto the created Variables objects.\nstate_map: a map from tensors names in the original graph def to the\ninstantiated tensors to be used as a state_map.", "source": "juraj-google-style"}
{"code": "def __init__(self, port=None, max_length=UBINT16_MAX_VALUE):\n        \n        super().__init__(action_type=ActionType.OFPAT_OUTPUT, length=8)\n        self.port = port\n        self.max_length = max_length", "docstring": "Create an ActionOutput with the optional parameters below.\n\nArgs:\nport (:class:`~pyof.v0x01.common.phy_port.Port` or :class:`int`):\nOutput port.\nmax_length (int): Max length to send to controller.", "source": "juraj-google-style"}
{"code": "def get_trace(self, project_id, trace_id):\n        \n        trace_pb = self._gapic_api.get_trace(project_id, trace_id)\n        trace_mapping = _parse_trace_pb(trace_pb)\n        return trace_mapping", "docstring": "Gets a single trace by its ID.\n\nArgs:\ntrace_id (str): ID of the trace to return.\nproject_id (str): Required. ID of the Cloud project where the trace\ndata is stored.\n\nReturns:\nA Trace dict.", "source": "juraj-google-style"}
{"code": "def init_app(self, app, client_id=None):\n        \n        if not self.client_id:\n            if client_id:\n                self.client_id = client_id\n            else:\n                self.client_id = app.name", "docstring": "Initialize the Micropub extension if it was not given app\nin the constructor.\n\nArgs:\napp (flask.Flask): the flask application to extend.\nclient_id (string, optional): the IndieAuth client id, will be\ndisplayed when the user is asked to authorize this client. If not\nprovided, the app name will be used.", "source": "juraj-google-style"}
{"code": "def delete(self, filename):\n    for repo in self._children:\n        if hasattr(repo, 'delete'):\n            repo.delete(filename)", "docstring": "Delete a file from all repositories which support it.\n\nIndividual repositories will determine correct location to\ndelete from (Scripts vs. Packages).\n\nThis will not remove the corresponding Package or Script object\nfrom the JSS's database!\n\nArgs:\nfilename: The filename you wish to delete (do not include a\npath).", "source": "codesearchnet"}
{"code": "def from_authorized_user_file(cls, filename, scopes=None):\n        \n        with io.open(filename, 'r', encoding='utf-8') as json_file:\n            data = json.load(json_file)\n            return cls.from_authorized_user_info(data, scopes)", "docstring": "Creates a Credentials instance from an authorized user json file.\n\nArgs:\nfilename (str): The path to the authorized user json file.\nscopes (Sequence[str]): Optional list of scopes to include in the\ncredentials.\n\nReturns:\ngoogle.oauth2.credentials.Credentials: The constructed\ncredentials.\n\nRaises:\nValueError: If the file is not in the expected format.", "source": "juraj-google-style"}
{"code": "def get_centered_molecule(self):\n    center = self.center_of_mass\n    new_coords = (np.array(self.cart_coords) - center)\n    return self.__class__(self.species_and_occu, new_coords, charge=self._charge, spin_multiplicity=self._spin_multiplicity, site_properties=self.site_properties)", "docstring": "Returns a Molecule centered at the center of mass.\n\nReturns:\nMolecule centered with center of mass at origin.", "source": "codesearchnet"}
{"code": "def singleprint(self) -> fingerprinting_pywrap.Singleprint:\n    try:\n        return fingerprinting_pywrap.Singleprint(self.graph_def_program_hash, self.signature_def_hash, self.saved_object_graph_hash, self.checkpoint_hash)\n    except (TypeError, fingerprinting_pywrap.FingerprintException) as e:\n        raise ValueError(f'Encounted invalid fingerprint values when constructing singleprint.graph_def_program_hash: {self.graph_def_program_hash}signature_def_hash: {self.signature_def_hash}saved_object_graph_hash: {self.saved_object_graph_hash}checkpoint_hash: {self.checkpoint_hash}{e}') from None", "docstring": "Canonical fingerprinting ID for a SavedModel.\n\nUniquely identifies a SavedModel based on the regularized fingerprint\nattributes. (saved_model_checksum is sensitive to immaterial changes and\nthus non-deterministic.)\n\nReturns:\nThe string concatenation of `graph_def_program_hash`,\n`signature_def_hash`, `saved_object_graph_hash`, and `checkpoint_hash`\nfingerprint attributes (separated by '/').\n\nRaises:\nValueError: If the fingerprint fields cannot be used to construct the\nsingleprint.", "source": "github-repos"}
{"code": "def _ProcessGRRMessages(self, fs_client_id, grr_messages):\n    \n    grr_client_id = fleetspeak_utils.FleetspeakIDToGRRID(fs_client_id)\n    for grr_message in grr_messages:\n      grr_message.source = grr_client_id\n      grr_message.auth_state = (\n          rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED)\n    client_is_new = self.frontend.EnrolFleetspeakClient(client_id=grr_client_id)\n    if not client_is_new and data_store.RelationalDBEnabled():\n      data_store.REL_DB.WriteClientMetadata(\n          grr_client_id, last_ping=rdfvalue.RDFDatetime.Now())\n    self.frontend.ReceiveMessages(\n        client_id=grr_client_id, messages=grr_messages)", "docstring": "Handles messages from GRR clients received via Fleetspeak.\n\nThis method updates the last-ping timestamp of the client before beginning\nprocessing.\n\nArgs:\nfs_client_id: The Fleetspeak client-id for the client.\ngrr_messages: An Iterable of GrrMessages.", "source": "juraj-google-style"}
{"code": "def handle(self, message):\n        \n\n        opcode = message['op']\n        if opcode == 10:\n            self.on_hello(message)\n        elif opcode == 11:\n            self.on_heartbeat(message)\n        elif opcode == 0:\n            self.on_message(message)\n        else:\n            logger.debug(\"Not a message we handle: OPCODE {}\".format(opcode))\n        return", "docstring": "Dispatches messages to appropriate handler based on opcode\n\nArgs:\nmessage (dict): Full message from Discord websocket connection", "source": "juraj-google-style"}
{"code": "def save_subset_weights_to_hdf5_group(f, weights):\n    weight_values = [backend.convert_to_numpy(w) for w in weights]\n    weight_names = [str(w.path).encode('utf8') for w in weights]\n    save_attributes_to_hdf5_group(f, 'weight_names', weight_names)\n    for name, val in zip(weight_names, weight_values):\n        param_dset = f.create_dataset(name, val.shape, dtype=val.dtype)\n        if not val.shape:\n            param_dset[()] = val\n        else:\n            param_dset[:] = val", "docstring": "Save top-level weights of a model to a HDF5 group.\n\nArgs:\nf: HDF5 group.\nweights: List of weight variables.", "source": "github-repos"}
{"code": "def _FormatTokenData(self, token_type, token_data):\n    \n    token_data_format_function = self._TOKEN_DATA_FORMAT_FUNCTIONS.get(\n        token_type)\n    if token_data_format_function:\n      token_data_format_function = getattr(\n          self, token_data_format_function, None)\n\n    if not token_data_format_function:\n      return {}\n\n    return token_data_format_function(token_data)", "docstring": "Formats the token data as a dictionary of values.\n\nArgs:\ntoken_type (int): token type.\ntoken_data (object): token data.\n\nReturns:\ndict[str, str]: formatted token values or an empty dictionary if no\nformatted token values could be determined.", "source": "juraj-google-style"}
{"code": "def build_pipeline_args(cls, project, script, job_params, task_params, reserved_labels, preemptible, logging_uri, scopes, keep_alive):\n    inputs = {}\n    inputs.update({SCRIPT_VARNAME: script})\n    inputs.update({var.name: var.value for var in (job_params['envs'] | task_params['envs']) if var.value})\n    inputs.update({var.name: var.uri for var in (job_params['inputs'] | task_params['inputs']) if ((not var.recursive) and var.value)})\n    outputs = {}\n    for var in (job_params['outputs'] | task_params['outputs']):\n        if (var.recursive or (not var.value)):\n            continue\n        if ('*' in var.uri.basename):\n            outputs[var.name] = var.uri.path\n        else:\n            outputs[var.name] = var.uri\n    labels = {}\n    labels.update({label.name: (label.value if label.value else '') for label in ((reserved_labels | job_params['labels']) | task_params['labels'])})\n    args = {'pipelineArgs': {'projectId': project, 'resources': {'preemptible': preemptible}, 'inputs': inputs, 'outputs': outputs, 'labels': labels, 'serviceAccount': {'email': 'default', 'scopes': scopes}, 'logging': {'gcsPath': logging_uri}}}\n    if keep_alive:\n        args['pipelineArgs']['keep_vm_alive_on_failure_duration'] = ('%ss' % keep_alive)\n    return args", "docstring": "Builds pipeline args for execution.\n\nArgs:\nproject: string name of project.\nscript: Body of the script to execute.\njob_params: dictionary of values for labels, envs, inputs, and outputs\nfor this job.\ntask_params: dictionary of values for labels, envs, inputs, and outputs\nfor this task.\nreserved_labels: dictionary of reserved labels (e.g. task-id,\ntask-attempt)\npreemptible: use a preemptible VM for the job\nlogging_uri: path for job logging output.\nscopes: list of scope.\nkeep_alive: Seconds to keep VM alive on failure\n\nReturns:\nA nested dictionary with one entry under the key pipelineArgs containing\nthe pipeline arguments.", "source": "codesearchnet"}
{"code": "def _build_insert_compiler(self, rows: List[Dict]):\n    objs = []\n    field_count = len(rows[0])\n    for (index, row) in enumerate(rows):\n        if (field_count != len(row)):\n            raise SuspiciousOperation('In bulk upserts, you cannot have rows with different field configurations. Row {0} has a different field config than the first row.'.format(index))\n        objs.append(self.model(**row))\n    self._for_write = True\n    (insert_fields, update_fields) = self._get_upsert_fields(rows[0])\n    query = PostgresInsertQuery(self.model)\n    query.conflict_action = self.conflict_action\n    query.conflict_target = self.conflict_target\n    query.index_predicate = self.index_predicate\n    query.values(objs, insert_fields, update_fields)\n    connection = django.db.connections[self.db]\n    compiler = PostgresInsertCompiler(query, connection, self.db)\n    return compiler", "docstring": "Builds the SQL compiler for a insert query.\n\nArguments:\nrows:\nA list of dictionaries, where each entry\ndescribes a record to insert.\n\nReturns:\nThe SQL compiler for the insert.", "source": "codesearchnet"}
{"code": "def upload_benchmark_data(client, data):\n    test_result = json.loads(data)\n    test_name = str(test_result['name'])\n    start_time = datetime.datetime.utcfromtimestamp(float(test_result['startTime']))\n    batch = []\n    t_key = client.key('Test')\n    t_val = datastore.Entity(t_key, exclude_from_indexes=['info'])\n    t_val.update({'test': test_name, 'start': start_time, 'info': str(data)})\n    batch.append(t_val)\n    for ent in test_result['entries'].get('entry', []):\n        ent_name = str(ent['name'])\n        e_key = client.key('Entry')\n        e_val = datastore.Entity(e_key, exclude_from_indexes=['info'])\n        e_val.update({'test': test_name, 'start': start_time, 'entry': ent_name, 'timing': ent['wallTime'], 'info': str(json.dumps(ent))})\n        batch.append(e_val)\n    client.put_multi(batch)", "docstring": "Parse benchmark data and use the client to upload it to the datastore.\n\nParse the given benchmark data from the serialized JSON-format used to write\nthe test results file.  Create the different datastore Entities from that data\nand upload them to the datastore in a batch using the client connection.\n\nArgs:\nclient: datastore client connection\ndata: JSON-encoded benchmark data", "source": "github-repos"}
{"code": "def console_get_background_flag(con: tcod.console.Console) -> int:\n    \n    return int(lib.TCOD_console_get_background_flag(_console(con)))", "docstring": "Return this consoles current blend mode.\n\nArgs:\ncon (Console): Any Console instance.\n\n.. deprecated:: 8.5\nCheck :any:`Console.default_bg_blend` instead.", "source": "juraj-google-style"}
{"code": "def save_own_variables(self, store):\n    all_vars = self._trainable_variables + self._non_trainable_variables\n    for i, v in enumerate(all_vars):\n        store[f'{i}'] = v", "docstring": "Saves the state of the layer.\n\nYou can override this method to take full control of how the state of\nthe layer is saved upon calling `model.save()`.\n\nArgs:\nstore: Dict where the state of the model will be saved.", "source": "github-repos"}
{"code": "def date_added(self, date_added):\n    date_added = self._utils.format_datetime(date_added, date_format='%Y-%m-%dT%H:%M:%SZ')\n    self._data['dateAdded'] = date_added\n    request = self._base_request\n    request['dateAdded'] = date_added\n    return self._tc_requests.update(request, owner=self.owner)", "docstring": "Updates the security labels date_added\n\nArgs:\ndate_added: Converted to %Y-%m-%dT%H:%M:%SZ date format", "source": "codesearchnet"}
{"code": "def __driver_completer(self, toks, text, state):\n        \n        if state != 0:\n            return self.__completion_candidates[state]\n\n        \n\n        \n        \n        if not toks or (len(toks) == 1 and text == toks[0]):\n            try:\n                self.__completion_candidates = self.__complete_cmds(text)\n            except:\n                self.stderr.write('\\n')\n                self.stderr.write(traceback.format_exc())\n                self.__completion_candidates = []\n            return self.__completion_candidates[state]\n\n        \n        cmd = toks[0]\n        args = toks[1:] if len(toks) > 1 else None\n        if text and args:\n            del args[-1]\n        if cmd in self._completer_map.keys():\n            completer_name = self._completer_map[cmd]\n            completer_method = getattr(self, completer_name)\n            try:\n                self.__completion_candidates = completer_method(cmd, args, text)\n            except:\n                self.stderr.write('\\n')\n                self.stderr.write(traceback.format_exc())\n                self.__completion_candidates = []\n        else:\n            self.__completion_candidates = []\n\n        return self.__completion_candidates[state]", "docstring": "Driver level completer.\n\nArguments:\ntoks: A list of tokens, tokenized from the original input line.\ntext: A string, the text to be replaced if a completion candidate is\nchosen.\nstate: An integer, the index of the candidate out of the list of\ncandidates.\n\nReturns:\nA string, the candidate.", "source": "juraj-google-style"}
{"code": "def _map_query_path_to_location_info(query_metadata_table):\n    \n    query_path_to_location_info = {}\n    for location, location_info in query_metadata_table.registered_locations:\n        if not isinstance(location, Location):\n            continue\n        if location.query_path in query_path_to_location_info:\n            \n            \n            equivalent_location_info = query_path_to_location_info[location.query_path]\n            if not _location_infos_equal(location_info, equivalent_location_info):\n                raise AssertionError(\n                    u'Differing LocationInfos at query_path {} between {} and {}. Expected '\n                    u'parent_location.query_path, optional_scopes_depth, recursive_scopes_depth '\n                    u'and types to be equal for LocationInfos sharing the same query path.'.format(\n                        location.query_path, location_info, equivalent_location_info))\n\n        query_path_to_location_info[location.query_path] = location_info\n    return query_path_to_location_info", "docstring": "Create a map from each query path to a LocationInfo at that path.\n\nArgs:\nquery_metadata_table: QueryMetadataTable, object containing all metadata collected during\nquery processing, including location metadata (e.g. which locations\nare folded or optional).\n\nReturns:\nDict[Tuple[str], LocationInfo], dictionary mapping query path to LocationInfo at that path.", "source": "juraj-google-style"}
{"code": "def __init__(self, message=None, parser_chain=None, path_spec=None):\n    \n    super(ExtractionWarning, self).__init__()\n    self.message = message\n    self.parser_chain = parser_chain\n    self.path_spec = path_spec", "docstring": "Initializes an extraction warning.\n\nArgs:\nmessage (Optional[str]): warning message.\nparser_chain (Optional[str]): parser chain to which the warning applies.\npath_spec (Optional[dfvfs.PathSpec]): path specification of the file entry\nto which the warning applies.", "source": "juraj-google-style"}
{"code": "def mark_typed_map(self, name, type_object):\n    if (not hasattr(type_object, 'dump')):\n        raise ArgumentError(('The passed type object %s is missing required method: dump()' % type_object))\n    if (not hasattr(type_object, 'Restore')):\n        raise ArgumentError(('The passed type object %s is missing required method: Restore()' % type_object))\n\n    def _dump_map(obj):\n        if (obj is None):\n            return None\n        if (not isinstance(obj, dict)):\n            raise DataError(('Property %s marked as list was not a dict: %s' % (name, repr(obj))))\n        return {key: val.dump() for (key, val) in obj.items()}\n\n    def _restore_map(obj):\n        if (obj is None):\n            return obj\n        return {key: type_object.Restore(val) for (key, val) in obj.items()}\n    self.mark_complex(name, _dump_map, _restore_map)", "docstring": "Mark a property as containing a map str to serializable object.\n\nThis convenience method allows you to avoid having to call\n``mark_complex()`` whenever you need to serialize a dict of objects.\nThis method requires that all members of the given dict be of a single\nclass that contains a dump() method and a Restore() class method where\ntype_object.Restore(x.dump()) == x.\n\nArgs:\nname (str): The name of the complex property.\ntype_object: The class object that will be contained inside\nthis dict.", "source": "codesearchnet"}
{"code": "def process_node(layer, node_data):\n    input_tensors = []\n    for input_data in nest.flatten(node_data):\n        input_data = input_data.as_list()\n        inbound_layer_name = input_data[0]\n        inbound_node_index = input_data[1]\n        inbound_tensor_index = input_data[2]\n        if len(input_data) == 3:\n            kwargs = {}\n        elif len(input_data) == 4:\n            kwargs = input_data[3]\n            try:\n                kwargs = _deserialize_keras_tensors(kwargs, created_layers)\n            except IndexError:\n                add_unprocessed_node(layer, node_data)\n                return\n        else:\n            raise ValueError('Improperly formatted model config.')\n        if inbound_layer_name != node_module._CONSTANT_VALUE:\n            inbound_layer = created_layers[inbound_layer_name]\n            inbound_node_index = get_node_index(inbound_layer, inbound_node_index)\n            if inbound_node_index is None:\n                add_unprocessed_node(layer, node_data)\n                return\n            inbound_node = inbound_layer._inbound_nodes[inbound_node_index]\n            input_tensors.append(nest.flatten(inbound_node.outputs)[inbound_tensor_index])\n        else:\n            input_tensors.append(inbound_tensor_index)\n    input_tensors = nest.pack_sequence_as(node_data, input_tensors)\n    if input_tensors is not None:\n        if not layer._preserve_input_structure_in_config:\n            input_tensors = base_layer_utils.unnest_if_single_tensor(input_tensors)\n        output_tensors = layer(input_tensors, **kwargs)\n        output_index = nest.flatten(output_tensors)[0]._keras_history.node_index\n        node_index_map[layer.name, node_count_by_layer[layer]] = output_index\n        node_count_by_layer[layer] += 1", "docstring": "Deserialize a node.\n\nArgs:\nlayer: layer instance.\nnode_data: Nested structure of `ListWrapper`.\n\nRaises:\nValueError: In case of improperly formatted `node_data`.", "source": "github-repos"}
{"code": "def g_step(self, gen_frames, fake_logits_stop):\n    \n    hparam_to_gen_loss = {\n        \"least_squares\": gan_losses.least_squares_generator_loss,\n        \"cross_entropy\": gan_losses.modified_generator_loss,\n        \"wasserstein\": gan_losses.wasserstein_generator_loss\n    }\n\n    fake_logits = self.discriminator(gen_frames)\n    mean_fake_logits = tf.reduce_mean(fake_logits)\n    tf.summary.scalar(\"mean_fake_logits\", mean_fake_logits)\n\n    \n    \n    \n    \n    \n    \n    generator_loss_func = hparam_to_gen_loss[self.hparams.gan_loss]\n    gan_g_loss_pos_d = generator_loss_func(\n        discriminator_gen_outputs=fake_logits, add_summaries=True)\n    gan_g_loss_neg_d = -generator_loss_func(\n        discriminator_gen_outputs=fake_logits_stop, add_summaries=True)\n    return gan_g_loss_pos_d, gan_g_loss_neg_d", "docstring": "Performs the generator step in computing the GAN loss.\n\nArgs:\ngen_frames: Generated frames\nfake_logits_stop: Logits corresponding to the generated frames as per\nthe discriminator. Assumed to have a stop-gradient term.\nReturns:\ngan_g_loss_pos_d: Loss.\ngan_g_loss_neg_d: -gan_g_loss_pos_d but with a stop gradient on generator.", "source": "juraj-google-style"}
{"code": "def validate_file(fn, options=None):\n    \n    file_results = FileValidationResults(filepath=fn)\n    output.info(\"Performing JSON schema validation on %s\" % fn)\n\n    if not options:\n        options = ValidationOptions(files=fn)\n\n    try:\n        with open(fn) as instance_file:\n            file_results.object_results = validate(instance_file, options)\n\n    except Exception as ex:\n        if 'Expecting value' in str(ex):\n            line_no = str(ex).split()[3]\n            file_results.fatal = ValidationErrorResults(\n                'Invalid JSON input on line %s' % line_no\n            )\n        else:\n            file_results.fatal = ValidationErrorResults(ex)\n\n        msg = (\"Unexpected error occurred with file '{fn}'. No further \"\n               \"validation will be performed: {error}\")\n        output.info(msg.format(fn=fn, error=str(ex)))\n\n    file_results.is_valid = (all(object_result.is_valid\n                                 for object_result in file_results.object_results)\n                             and not file_results.fatal)\n\n    return file_results", "docstring": "Validate the input document `fn` according to the options passed in.\n\nIf any exceptions are raised during validation, no further validation\nwill take place.\n\nArgs:\nfn: The filename of the JSON file to be validated.\noptions: An instance of ``ValidationOptions``.\n\nReturns:\nAn instance of FileValidationResults.", "source": "juraj-google-style"}
{"code": "def _get_stack_depth(package, fqdn, defdepth=_def_stackdepth):\n    global _stack_config\n    if (package not in _stack_config):\n        from acorn.config import settings\n        spack = settings(package)\n        _stack_config[package] = {}\n        secname = 'logging.depth'\n        if spack.has_section(secname):\n            for ofqdn in spack.options(secname):\n                _stack_config[package][ofqdn] = spack.getint(secname, ofqdn)\n    usedef = True\n    if (fqdn in _stack_config[package]):\n        result = _stack_config[package][fqdn]\n        usedef = False\n    elif ('*' in _stack_config[package]):\n        result = _stack_config[package]['*']\n        usedef = False\n    else:\n        result = defdepth\n    if (not usedef):\n        msg.gen('Using {} for {} stack depth.'.format(result, fqdn), 3)\n    return result", "docstring": "Loads the stack depth settings from the config file for the specified\npackage.\n\nArgs:\npackage (str): name of the package to get stack depth info for.\nfqdn (str): fully qualified domain name of the member in the package.\ndefdepth (int): default depth when one has not been configured.", "source": "codesearchnet"}
{"code": "def training_step(self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], num_items_in_batch: Optional[torch.Tensor]=None) -> torch.Tensor:\n    model.train()\n    if hasattr(self.optimizer, 'train') and callable(self.optimizer.train):\n        self.optimizer.train()\n    inputs = self._prepare_inputs(inputs)\n    if is_sagemaker_mp_enabled():\n        loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)\n        return loss_mb.reduce_mean().detach().to(self.args.device)\n    with self.compute_loss_context_manager():\n        loss = self.compute_loss(model, inputs, num_items_in_batch=num_items_in_batch)\n    del inputs\n    if self.args.torch_empty_cache_steps is not None and self.state.global_step % self.args.torch_empty_cache_steps == 0:\n        if is_torch_xpu_available():\n            torch.xpu.empty_cache()\n        elif is_torch_mlu_available():\n            torch.mlu.empty_cache()\n        elif is_torch_musa_available():\n            torch.musa.empty_cache()\n        elif is_torch_npu_available():\n            torch.npu.empty_cache()\n        elif is_torch_mps_available():\n            torch.mps.empty_cache()\n        elif is_torch_hpu_available():\n            logger.warning('`torch_empty_cache_steps` is set but HPU device/backend does not support empty_cache().')\n        else:\n            torch.cuda.empty_cache()\n    kwargs = {}\n    if self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]:\n        kwargs['learning_rate'] = self._get_learning_rate()\n    if self.args.n_gpu > 1:\n        loss = loss.mean()\n    if self.use_apex:\n        from apex import amp\n        with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n            scaled_loss.backward()\n    else:\n        if (not self.model_accepts_loss_kwargs or num_items_in_batch is None) and self.compute_loss_func is None:\n            loss = loss / self.args.gradient_accumulation_steps\n        if self.accelerator.distributed_type == DistributedType.DEEPSPEED:\n            kwargs['scale_wrt_gas'] = False\n        self.accelerator.backward(loss, **kwargs)\n        return loss.detach()", "docstring": "Perform a training step on a batch of inputs.\n\nSubclass and override to inject custom behavior.\n\nArgs:\nmodel (`nn.Module`):\nThe model to train.\ninputs (`Dict[str, Union[torch.Tensor, Any]]`):\nThe inputs and targets of the model.\n\nThe dictionary will be unpacked before being fed to the model. Most models expect the targets under the\nargument `labels`. Check your model's documentation for all accepted arguments.\n\nReturn:\n`torch.Tensor`: The tensor with training loss on this batch.", "source": "github-repos"}
{"code": "def __spread__(y, yy, n, x, m):\n  \n  nfac=[0,1,1,2,6,24,120,720,5040,40320,362880]\n  if m > 10. :\n    print('factorial table too small in spread')\n    return\n\n  ix=long(x)\n  if x == float(ix):\n    yy[ix]=yy[ix]+y\n  else:\n    ilo = long(x-0.5*float(m)+1.0)\n    ilo = min( max( ilo , 1 ), n-m+1 )\n    ihi = ilo+m-1\n    nden = nfac[m]\n    fac=x-ilo\n    for j in range(ilo+1,ihi+1): fac = fac*(x-j)\n    yy[ihi] = yy[ihi] + y*fac/(nden*(x-ihi))\n    for j in range(ihi-1,ilo-1,-1):\n      nden=(nden/(j+1-ilo))*(j-ihi)\n      yy[j] = yy[j] + y*fac/(nden*(x-j))", "docstring": "Given an array yy(0:n-1), extirpolate (spread) a value y into\nm actual array elements that best approximate the \"fictional\"\n(i.e., possible noninteger) array element number x. The weights\nused are coefficients of the Lagrange interpolating polynomial\nArguments:\ny :\nyy :\nn :\nx :\nm :\nReturns:", "source": "juraj-google-style"}
{"code": "def is_applicable_python_file(rel_path: str) -> bool:\n    \n    return (rel_path.endswith('.py') and\n            not any(re.search(pat, rel_path) for pat in IGNORED_FILE_PATTERNS))", "docstring": "Determines if a file should be included in incremental coverage analysis.\n\nArgs:\nrel_path: The repo-relative file path being considered.\nReturns:\nWhether to include the file.", "source": "juraj-google-style"}
{"code": "def IsRaised(self, matching=None, containing=None):\n\n    class IsRaisedContext(_EmptySubject):\n        \n\n        def __init__(self, actual, matching=None, containing=None):\n            super(IsRaisedContext, self).__init__(actual)\n            self._matching = matching\n            self._containing = containing\n\n        def __enter__(self):\n            return self\n\n        @asserts_truth\n        def __exit__(self, exc_type, exc, exc_tb):\n            if exc:\n                if issubclass(exc_type, self._actual):\n                    if self._matching is not None:\n                        AssertThat(exc).HasMessageThat().ContainsMatch(self._matching)\n                    if self._containing is not None:\n                        AssertThat(exc).HasMessageThat().Contains(self._containing)\n                else:\n                    self._FailWithSubject('should have been raised, but caught <{0!r}>'.format(exc))\n            else:\n                self._Resolve()\n                self._FailWithSubject('should have been raised, but was not')\n            return True\n    return IsRaisedContext(self._actual, matching=matching, containing=containing)", "docstring": "Asserts that an exception matching this subject is raised.\n\nThe raised exception must be the same type as (or a subclass of) this\nsubject's. None, one, or both of matching= and containing= may be specified.\n\nArgs:\nmatching: string or regex object. If present, the raised exception's\n\"message\" attribute must contain this value, as a regular expression.\ncontaining: string. If present, the raised exception's \"message\" attribute\nmust contain this literal string value.\n\nReturns:\nA context within which an expected exception may be raised and tested.", "source": "github-repos"}
{"code": "def process_output(meta_file, outfile_name, code_links):\n    \n\n    \n    doc_str = '\n    doc_str += 'Generated by [py2md](https:\n    doc_str += strftime(\"%Y-%m-%d %H:%M:%S \") + '\\n\\n'\n\n    \n    \n    if len(meta_file['modules']) > 1:\n        doc_str += \"\n        chapter_num = 1\n        for meta_doc in meta_file['modules']:\n            chapter_name = meta_doc['summary_comment']\n            chapter_link = chapter_name.lstrip().replace('.', '').replace(' ', '-').lower()\n            doc_str += str(chapter_num) + \\\n                '. [' + chapter_name + '](\n            chapter_num += 1\n\n\n    \n    for meta_doc in meta_file['modules']:\n        doc_str += '\n        doc_str += '[source file](' + meta_doc['source_file'] + ')' + '\\n'\n        for function_info in meta_doc['functions']:\n            doc_str += '\n            doc_str += function_info['definition'] + '\\n\\n'\n            if 'comments' in function_info:\n                doc_str += '```\\n' + function_info['comments'] + '\\n```\\n\\n'\n\n    \n    print('Writing file: ' + outfile_name)\n    out_file = open(outfile_name, 'w')\n    out_file.write(doc_str)\n    out_file.close()", "docstring": "Create a markdown format documentation file.\n\nArgs:\nmeta_file (dict): Dictionary with documentation metadata.\noutfile_name (str): Markdown file to write to.", "source": "juraj-google-style"}
{"code": "def ParseCookieRow(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    cookie_name = self._GetRowValue(query_hash, row, 'name')\n    cookie_value = self._GetRowValue(query_hash, row, 'value')\n    path = self._GetRowValue(query_hash, row, 'path')\n\n    hostname = self._GetRowValue(query_hash, row, 'domain')\n    if hostname.startswith('.'):\n      hostname = hostname[1:]\n\n    secure = self._GetRowValue(query_hash, row, 'secure')\n    \n    \n    secure = secure != 0\n\n    if secure:\n      scheme = 'https'\n    else:\n      scheme = 'http'\n\n    url = '{0:s}:\n\n    event_data = WebViewCookieEventData()\n    event_data.cookie_name = cookie_name\n    event_data.data = cookie_value\n    event_data.host = hostname\n    event_data.offset = self._GetRowValue(query_hash, row, '_id')\n    event_data.path = path\n    event_data.query = query\n    event_data.secure = secure\n    event_data.url = url\n\n    timestamp = self._GetRowValue(query_hash, row, 'expires')\n    if timestamp:\n      date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n    else:\n      date_time = dfdatetime_semantic_time.SemanticTime('Infinity')\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_EXPIRATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    \n    \n    for cookie_plugin in self._cookie_plugins:\n      try:\n        cookie_plugin.UpdateChainAndProcess(\n            parser_mediator, cookie_name=cookie_name,\n            cookie_data=cookie_value, url=url)\n      except errors.WrongPlugin:\n        pass", "docstring": "Parses a row from the database.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def closestsites(struct_blk, struct_def, pos):\n    \n    blk_close_sites = struct_blk.get_sites_in_sphere(pos, 5, include_index=True)\n    blk_close_sites.sort(key=lambda x: x[1])\n    def_close_sites = struct_def.get_sites_in_sphere(pos, 5, include_index=True)\n    def_close_sites.sort(key=lambda x: x[1])\n\n    return blk_close_sites[0], def_close_sites[0]", "docstring": "Returns closest site to the input position\nfor both bulk and defect structures\nArgs:\nstruct_blk: Bulk structure\nstruct_def: Defect structure\npos: Position\nReturn: (site object, dist, index)", "source": "juraj-google-style"}
{"code": "def __init__(self, action_type=None, length=None):\n        \n        super().__init__()\n        self.action_type = action_type\n        self.length = length", "docstring": "Create an ActionHeader with the optional parameters below.\n\nArgs:\naction_type (~pyof.v0x01.common.action.ActionType):\nThe type of the action.\nlength (int): Length of action, including this header.", "source": "juraj-google-style"}
{"code": "def __get_path_parameters(self, path):\n    path_parameters_by_segment = {}\n    for format_var_name in re.findall(_PATH_VARIABLE_PATTERN, path):\n        first_segment = format_var_name.split('.', 1)[0]\n        matches = path_parameters_by_segment.setdefault(first_segment, [])\n        matches.append(format_var_name)\n    return path_parameters_by_segment", "docstring": "Parses path paremeters from a URI path and organizes them by parameter.\n\nSome of the parameters may correspond to message fields, and so will be\nrepresented as segments corresponding to each subfield; e.g. first.second if\nthe field \"second\" in the message field \"first\" is pulled from the path.\n\nThe resulting dictionary uses the first segments as keys and each key has as\nvalue the list of full parameter values with first segment equal to the key.\n\nIf the match path parameter is null, that part of the path template is\nignored; this occurs if '{}' is used in a template.\n\nArgs:\npath: String; a URI path, potentially with some parameters.\n\nReturns:\nA dictionary with strings as keys and list of strings as values.", "source": "codesearchnet"}
{"code": "def make_analogous_scheme(self, angle=30, mode='ryb'):\n    (h, s, l) = self.__hsl\n    if (mode == 'ryb'):\n        h = rgb_to_ryb(h)\n    h += 360\n    h1 = ((h - angle) % 360)\n    h2 = ((h + angle) % 360)\n    if (mode == 'ryb'):\n        h1 = ryb_to_rgb(h1)\n        h2 = ryb_to_rgb(h2)\n    return (Color((h1, s, l), 'hsl', self.__a, self.__wref), Color((h2, s, l), 'hsl', self.__a, self.__wref))", "docstring": "Return two colors analogous to this one.\n\nArgs:\n:angle:\nThe angle between the hues of the created colors and this one.\n:mode:\nSelect which color wheel to use for the generation (ryb/rgb).\n\nReturns:\nA tuple of grapefruit.Colors analogous to this one.\n\n>>> c1 = Color.from_hsl(30, 1, 0.5)\n\n>>> c2, c3 = c1.make_analogous_scheme(angle=60, mode='rgb')\n>>> c2.hsl\n(330.0, 1.0, 0.5)\n>>> c3.hsl\n(90.0, 1.0, 0.5)\n\n>>> c2, c3 = c1.make_analogous_scheme(angle=10, mode='rgb')\n>>> c2.hsl\n(20.0, 1.0, 0.5)\n>>> c3.hsl\n(40.0, 1.0, 0.5)", "source": "codesearchnet"}
{"code": "def get_branches(self):\n    branches = []\n    for (age, level) in enumerate(self.nodes):\n        branches.append([])\n        for (n, node) in enumerate(level):\n            if (age == 0):\n                p_node = Node(self.pos[:2])\n            else:\n                p_node = self._get_node_parent((age - 1), n)\n            branches[age].append((p_node.get_tuple() + node.get_tuple()))\n    return branches", "docstring": "Get the tree branches as list.\n\nReturns:\nlist: A 2d-list holding the grown branches coordinates as tupel for every age.\nExample:\n[\n[(10, 40, 90, 30)],\n[(90, 30, 100, 40), (90, 30, 300, 60)],\n[(100, 40, 120, 70), (100, 40, 150, 90), ...],\n...\n]", "source": "codesearchnet"}
{"code": "def heightmap_add_hm(hm1: np.ndarray, hm2: np.ndarray, hm3: np.ndarray) -> None:\n    hm3[:] = (hm1[:] + hm2[:])", "docstring": "Add two heightmaps together and stores the result in ``hm3``.\n\nArgs:\nhm1 (numpy.ndarray): The first heightmap.\nhm2 (numpy.ndarray): The second heightmap to add to the first.\nhm3 (numpy.ndarray): A destination heightmap to store the result.\n\n.. deprecated:: 2.0\nDo ``hm3[:] = hm1[:] + hm2[:]`` instead.", "source": "codesearchnet"}
{"code": "def create_blocking_connection(host):\n    return pika.BlockingConnection(amqpdaemon.getConParams(settings.get_amqp_settings()[host.lower()]['vhost']))", "docstring": "Return properly created blocking connection.\n\nArgs:\nhost (str): Host as it is defined in :func:`.get_amqp_settings`.\n\nUses :func:`edeposit.amqp.amqpdaemon.getConParams`.", "source": "codesearchnet"}
{"code": "def adapt(self, all_ouputs: AllOutputs) -> DataPacket:\n        \n        adapted = {}\n        for name, recipe in self.adapting_recipes.items():\n            adapted[name] = self._construct(all_ouputs, recipe)\n        return adapted", "docstring": "Adapt inputs for the transformer included in the step.\n\nArgs:\nall_ouputs: Dict of outputs from parent steps. The keys should\nmatch the names of these steps and the values should be their\nrespective outputs.\n\nReturns:\nDictionary with the same keys as `adapting_recipes` and values\nconstructed according to the respective recipes.", "source": "juraj-google-style"}
{"code": "def calculate(cls, order_id, shipping=None, refund_line_items=None):\n        \n        data = {}\n        if shipping:\n            data['shipping'] = shipping\n        data['refund_line_items'] = refund_line_items or []\n        body = {'refund': data}\n        resource = cls.post(\n            \"calculate\", order_id=order_id, body=json.dumps(body).encode()\n        )\n        return cls(\n            cls.format.decode(resource.body),\n            prefix_options={'order_id': order_id}\n        )", "docstring": "Calculates refund transactions based on line items and shipping.\nWhen you want to create a refund, you should first use the calculate\nendpoint to generate accurate refund transactions.\n\nArgs:\norder_id: Order ID for which the Refund has to created.\nshipping: Specify how much shipping to refund.\nrefund_line_items: A list of line item IDs and quantities to refund.\nReturns:\nUnsaved refund record", "source": "juraj-google-style"}
{"code": "def wrap_module(module, names: Optional[Sequence[str]]=None, where: Optional[Callable[[Type['ClassWrapper']], bool]]=None, export_to: Optional[types.ModuleType]=None, **kwargs):\n    wrapper_classes = []\n    module_name = export_to.__name__ if export_to else None\n    origin_cls_to_wrap_cls = {}\n    for symbol_name in names or dir(module):\n        s = getattr(module, symbol_name)\n        if inspect.isclass(s) and (not where or where(s)):\n            if s in origin_cls_to_wrap_cls:\n                wrapper_class = origin_cls_to_wrap_cls[s]\n            else:\n                wrapper_class = wrap(s, module_name=module_name, **kwargs)\n                origin_cls_to_wrap_cls[s] = wrapper_class\n                wrapper_classes.append(wrapper_class)\n            if export_to:\n                setattr(export_to, symbol_name, wrapper_class)\n    return wrapper_classes", "docstring": "Wrap classes from a module.\n\nFor example, users can wrap all subclasses of `xxx.Base` under module `xxx`::\n\nimport xxx\n\npg.wrap_module(\nxxx, where=lambda c: isinstance(c, xxx.Base))\n\nArgs:\nmodule: A container that contains classes to wrap.\nnames: An optional list of class names. If not provided, all classes under\n`module` will be considered candidates.\nwhere: An optional filter function in signature (user_class) -> bool.\nOnly the classes under `module` with True return value will be wrapped.\nexport_to: An optional module to export the wrapper classes.\n**kwargs: Keyword arguments passed to `wrap`\n\nReturns:\nWrapper classes.", "source": "github-repos"}
{"code": "def getTextlength(text, fontname='helv', fontsize=11, encoding=0):\n    fontname = fontname.lower()\n    basename = Base14_fontdict.get(fontname, None)\n    glyphs = None\n    if (basename == 'Symbol'):\n        glyphs = symbol_glyphs\n    if (basename == 'ZapfDingbats'):\n        glyphs = zapf_glyphs\n    if (glyphs is not None):\n        w = sum([(glyphs[ord(c)][1] if (ord(c) < 256) else glyphs[183][1]) for c in text])\n        return (w * fontsize)\n    if (fontname in Base14_fontdict.keys()):\n        return TOOLS.measure_string(text, Base14_fontdict[fontname], fontsize, encoding)\n    if (fontname in ['china-t', 'china-s', 'china-ts', 'china-ss', 'japan', 'japan-s', 'korea', 'korea-s']):\n        return (len(text) * fontsize)\n    raise ValueError((\"Font '%s' is unsupported\" % fontname))", "docstring": "Calculate length of a string for a given built-in font.\n\nArgs:\nfontname: name of the font.\nfontsize: size of font in points.\nencoding: encoding to use (0=Latin, 1=Greek, 2=Cyrillic).\nReturns:\n(float) length of text.", "source": "codesearchnet"}
{"code": "def stream_file(self, url, folder=None, filename=None, overwrite=False):\n    path = self.get_path_for_url(url, folder, filename, overwrite)\n    f = None\n    try:\n        f = open(path, 'wb')\n        for chunk in self.response.iter_content(chunk_size=10240):\n            if chunk:\n                f.write(chunk)\n                f.flush()\n        return f.name\n    except Exception as e:\n        raisefrom(DownloadError, ('Download of %s failed in retrieval of stream!' % url), e)\n    finally:\n        if f:\n            f.close()", "docstring": "Stream file from url and store in provided folder or temporary folder if no folder supplied.\nMust call setup method first.\n\nArgs:\nurl (str): URL to download\nfilename (Optional[str]): Filename to use for downloaded file. Defaults to None (derive from the url).\nfolder (Optional[str]): Folder to download it to. Defaults to None (temporary folder).\noverwrite (bool): Whether to overwrite existing file. Defaults to False.\n\nReturns:\nstr: Path of downloaded file", "source": "codesearchnet"}
{"code": "def first_timestamp(self, event_key=None):\n    \n    if event_key is None:\n      timestamps = [self._trackers[key].first_timestamp\n                    for key in self._trackers]\n      return min(timestamp for timestamp in timestamps if timestamp >= 0)\n    else:\n      return self._trackers[event_key].first_timestamp", "docstring": "Obtain the first timestamp.\n\nArgs:\nevent_key: the type key of the sought events (e.g., constants.NAN_KEY).\nIf None, includes all event type keys.\n\nReturns:\nFirst (earliest) timestamp of all the events of the given type (or all\nevent types if event_key is None).", "source": "juraj-google-style"}
{"code": "def create(self):\n    logging.info('Compiling under python %s...', sys.version)\n    logging.info('Making parfile [%s]...', self.output_filename)\n    remove_if_present(self.output_filename)\n    logging.debug('Compiling file list from [%s]', self.manifest_filename)\n    manifest = manifest_parser.parse(self.manifest_filename)\n    stored_resources = self.scan_manifest(manifest)\n    temp_parfile = self.create_temp_parfile()\n    try:\n        logging.debug('Writing parfile to temp file [%s]...', temp_parfile.name)\n        self.write_bootstrap(temp_parfile)\n        self.write_zip_data(temp_parfile, stored_resources)\n        temp_parfile.close()\n        self.create_final_from_temp(temp_parfile.name)\n    finally:\n        remove_if_present(temp_parfile.name)\n    logging.info('Success!')", "docstring": "Create a .par file on disk\n\nRaises:\nError, IOError, SystemError", "source": "github-repos"}
{"code": "def noise_set_type(n: tcod.noise.Noise, typ: int) -> None:\n    \n    n.algorithm = typ", "docstring": "Set a Noise objects default noise algorithm.\n\nArgs:\ntyp (int): Any NOISE_* constant.", "source": "juraj-google-style"}
{"code": "def check_timer(self, timer_name):\n    if timer_name in self._timers:\n        elapsed = datetime.datetime.now() - self._timers[timer_name]\n        print('%s: %d.%d' % (timer_name, elapsed.seconds, (elapsed.microseconds - elapsed.seconds * 60 * 1000000) / 1000))\n    else:\n        print('timer %s not defined' % timer_name)", "docstring": "Checks and prints the elapsed time of a given timer.\n\nArgs:\ntimer_name: Name of the timer to check and print, it must have been\ninitialized with start_timer.", "source": "github-repos"}
{"code": "def __init__(self,\n                 unique_identifier=None,\n                 revocation_reason=None,\n                 compromise_occurrence_date=None):\n        \n        super(RevokeRequestPayload, self).__init__(\n            tag=enums.Tags.REQUEST_PAYLOAD)\n        self.unique_identifier = unique_identifier\n        self.compromise_occurrence_date = compromise_occurrence_date\n        self.revocation_reason = revocation_reason\n        if self.revocation_reason is None:\n            self.revocation_reason = objects.RevocationReason()\n        self.validate()", "docstring": "Construct a RevokeRequestPayload object.\nArgs:\nunique_identifier (UniqueIdentifier): The UUID of a managed\ncryptographic object.\nrevocation_reason (RevocationReason): The reason why the object was\nrevoked.\ncompromise_occurrence_date (DateTime): the datetime when the object\nwas first believed to be compromised.", "source": "juraj-google-style"}
{"code": "def get_shared_files_from_shake(self, shake_id=None, before=None, after=None):\n    if (before and after):\n        raise Exception('You cannot specify both before and after keys')\n    endpoint = '/api/shakes'\n    if shake_id:\n        endpoint += '/{0}'.format(shake_id)\n    if before:\n        endpoint += '/before/{0}'.format(before)\n    elif after:\n        endpoint += '/after/{0}'.format(after)\n    data = self._make_request(verb='GET', endpoint=endpoint)\n    return [SharedFile.NewFromJSON(f) for f in data['sharedfiles']]", "docstring": "Returns a list of SharedFile objects from a particular shake.\n\nArgs:\nshake_id (int): Shake from which to get a list of SharedFiles\nbefore (str): get 10 SharedFile objects before (but not including)\nthe SharedFile given by `before` for the given Shake.\nafter (str): get 10 SharedFile objects after (but not including)\nthe SharedFile give by `after' for the given Shake.\n\nReturns:\nList (list) of SharedFiles.", "source": "codesearchnet"}
{"code": "def hpo_terms(store, query = None, limit = None):\n    \n    hpo_phenotypes = {}\n    if limit:\n        limit=int(limit)\n\n    hpo_phenotypes['phenotypes'] = list(store.hpo_terms(text=query, limit=limit))\n    return hpo_phenotypes", "docstring": "Retrieves a list of HPO terms from scout database\n\nArgs:\nstore (obj): an adapter to the scout database\nquery (str): the term to search in the database\nlimit (str): the number of desired results\n\nReturns:\nhpo_phenotypes (dict): the complete list of HPO objects stored in scout", "source": "juraj-google-style"}
{"code": "def value_to_single_key_strokes(value):\n    \n    result = []\n    if isinstance(value, Integral):\n        value = str(value)\n\n    for v in value:\n        if isinstance(v, Keys):\n            result.append(v.value)\n        elif isinstance(v, Integral):\n            result.append(str(v))\n        else:\n            result.append(v)\n    return result", "docstring": "Convert value to a list of key strokes\n>>> value_to_single_key_strokes(123)\n['1', '2', '3']\n>>> value_to_single_key_strokes('123')\n['1', '2', '3']\n>>> value_to_single_key_strokes([1, 2, 3])\n['1', '2', '3']\n>>> value_to_single_key_strokes(['1', '2', '3'])\n['1', '2', '3']\nArgs:\nvalue(int|str|list)\nReturns:\nA list of string.", "source": "juraj-google-style"}
{"code": "def _is_propertyable(\n    names,  \n    attrs,  \n    annotations,  \n    attr,  \n):\n    \n    \n    return (\n        attr in annotations\n        and not attr.startswith(\"_\")\n        and not attr.isupper()\n        and \"__{}\".format(attr) not in names\n        and not isinstance(getattr(attrs, attr, None), types.MethodType)\n    )", "docstring": "Determine if an attribute can be replaced with a property.\n\nArgs:\nnames: The complete list of all attribute names for the class.\nattrs: The attribute dict returned by __prepare__.\nannotations: A mapping of all defined annotations for the class.\nattr: The attribute to test.\n\nReturns:\nTrue if the attribute can be replaced with a property; else False.", "source": "juraj-google-style"}
{"code": "def calc_track_errors(model_tracks, obs_tracks, track_pairings):\n    columns = ['obs_track_id', 'translation_error_x', 'translation_error_y', 'start_time_difference', 'end_time_difference']\n    track_errors = pd.DataFrame(index=list(range(len(model_tracks))), columns=columns)\n    for (p, pair) in enumerate(track_pairings):\n        model_track = model_tracks[pair[0]]\n        if (type(pair[1]) in [int, np.int64]):\n            obs_track = obs_tracks[pair[1]]\n        else:\n            obs_track = obs_tracks[pair[1][0]]\n        model_com = model_track.center_of_mass(model_track.start_time)\n        obs_com = obs_track.center_of_mass(obs_track.start_time)\n        track_errors.loc[(pair[0], 'obs_track_id')] = (pair[1] if (type(pair[1]) in [int, np.int64]) else pair[1][0])\n        track_errors.loc[(pair[0], 'translation_error_x')] = (model_com[0] - obs_com[0])\n        track_errors.loc[(pair[0], 'translation_error_y')] = (model_com[1] - obs_com[1])\n        track_errors.loc[(pair[0], 'start_time_difference')] = (model_track.start_time - obs_track.start_time)\n        track_errors.loc[(pair[0], 'end_time_difference')] = (model_track.end_time - obs_track.end_time)\n    return track_errors", "docstring": "Calculates spatial and temporal translation errors between matched\nforecast and observed tracks.\n\nArgs:\nmodel_tracks: List of model track STObjects\nobs_tracks: List of observed track STObjects\ntrack_pairings: List of tuples pairing forecast and observed tracks.\n\nReturns:\npandas DataFrame containing different track errors", "source": "codesearchnet"}
{"code": "def dump(self, destination, with_defaults=False):\n    if isinstance(destination, six.string_types):\n        with open(destination, 'w', encoding='utf-8') as f:\n            self._rw.dump_config_to_file(self._config, f, with_defaults=with_defaults)\n    else:\n        self._rw.dump_config_to_file(self._config, destination, with_defaults=with_defaults)", "docstring": "Write configuration values to the specified destination.\n\nArgs:\ndestination:\nwith_defaults (bool): if ``True``, values of items with no custom values will be included in the output\nif they have a default value set.", "source": "codesearchnet"}
{"code": "def _get_proxy_info(self, _=None):\n    (target_host, target_port, target_path) = self._endpoint_to_target(self._endpoint)\n    sock = None\n    if target_path:\n        sock = self._ssh_tunnel.forward_unix(path=target_path)\n    else:\n        sock = self._ssh_tunnel.forward_tcp(target_host, port=target_port)\n    return SSHTunnelProxyInfo(sock=sock)", "docstring": "Generate a ProxyInfo class from a connected SSH transport\n\nArgs:\n_ (None): Ignored.  This is just here as the ProxyInfo spec requires it.\n\n\nReturns:\nSSHTunnelProxyInfo: A ProxyInfo with an active socket tunneled through SSH", "source": "codesearchnet"}
{"code": "def run_makeblastdb(infile, dbtype, outdir=''):\n    (og_dir, name, ext) = utils.split_folder_and_path(infile)\n    if (not outdir):\n        outdir = og_dir\n    outfile_basename = op.join(outdir, name)\n    if (dbtype == 'nucl'):\n        outext = ['.nhr', '.nin', '.nsq']\n    elif (dbtype == 'prot'):\n        outext = ['.phr', '.pin', '.psq']\n    else:\n        raise ValueError('dbtype must be \"nucl\" or \"prot\"')\n    outfile_all = [(outfile_basename + x) for x in outext]\n    db_made = True\n    for f in outfile_all:\n        if (not op.exists(f)):\n            db_made = False\n    if db_made:\n        log.debug('BLAST database already exists at {}'.format(outfile_basename))\n        return outfile_all\n    else:\n        retval = subprocess.call('makeblastdb -in {} -dbtype {} -out {}'.format(infile, dbtype, outfile_basename), shell=True)\n        if (retval == 0):\n            log.debug('Made BLAST database at {}'.format(outfile_basename))\n            return outfile_all\n        else:\n            log.error('Error running makeblastdb, exit code {}'.format(retval))", "docstring": "Make the BLAST database for a genome file.\n\nArgs:\ninfile (str): path to genome FASTA file\ndbtype (str): \"nucl\" or \"prot\" - what format your genome files are in\noutdir (str): path to directory to output database files (default is original folder)\n\nReturns:\nPaths to BLAST databases.", "source": "codesearchnet"}
{"code": "def trimpath(attributes):\n    if ('pathdepth' in attributes):\n        if (attributes['pathdepth'] != 'full'):\n            pathelements = []\n            remainder = attributes['file']\n            limit = int(attributes['pathdepth'])\n            while ((len(pathelements) < limit) and remainder):\n                (remainder, pe) = os.path.split(remainder)\n                pathelements.insert(0, pe)\n            return os.path.join(*pathelements)\n        return attributes['file']\n    return os.path.basename(attributes['file'])", "docstring": "Simplifies the given path.\n\nIf pathdepth is in attributes, the last pathdepth elements will be\nreturned. If pathdepth is \"full\", the full path will be returned.\nOtherwise the filename only will be returned.\n\nArgs:\nattributes: The element attributes.\n\nReturns:\nThe trimmed path.", "source": "codesearchnet"}
{"code": "def dirhash(self, path, **dirhash_opts):\n        \n        path = fs.path(path)\n        last_modified = time.ctime(max(\n            max(os.path.getmtime(os.path.join(root, file)) for file in files)\n            for root,_,files in os.walk(path)))\n\n        db = sqlite3.connect(self.path)\n        c = db.cursor()\n        c.execute(\"SELECT date, hash FROM dirhashcache WHERE path=?\", (path,))\n        cached = c.fetchone()\n\n        if cached:\n            cached_date, cached_hash = cached\n            if cached_date == last_modified:\n                \n                dirhash = cached_hash\n            else:\n                \n                dirhash = checksumdir.dirhash(path, self.hash, **dirhash_opts)\n                c.execute(\"UPDATE dirhashcache SET date=?, hash=? WHERE path=?\",\n                          (last_modified, dirhash, path))\n                db.commit()\n        else:\n            \n            dirhash = checksumdir.dirhash(path, self.hash, **dirhash_opts)\n            c.execute(\"INSERT INTO dirhashcache VALUES (?,?,?)\",\n                      (path, last_modified, dirhash))\n            db.commit()\n\n        db.close()\n        return dirhash", "docstring": "Compute the hash of a directory.\n\nArguments:\npath: Directory.\n**dirhash_opts: Additional options to checksumdir.dirhash().\n\nReturns:\nstr: Checksum of directory.", "source": "juraj-google-style"}
{"code": "def scaled_dot_product_attention_simple(q, k, v, bias, name=None):\n    with tf.variable_scope(name, default_name='scaled_dot_product_attention_simple'):\n        scalar = tf.rsqrt(tf.to_float(common_layers.shape_list(q)[2]))\n        logits = tf.matmul((q * scalar), k, transpose_b=True)\n        if (bias is not None):\n            logits += bias\n        weights = tf.nn.softmax(logits, name='attention_weights')\n        if common_layers.should_generate_summaries():\n            tf.summary.image('attention', tf.expand_dims(tf.pow(weights, 0.2), 3), max_outputs=1)\n        return tf.matmul(weights, v)", "docstring": "Scaled dot-product attention. One head. One spatial dimension.\n\nArgs:\nq: a Tensor with shape [batch, length_q, depth_k]\nk: a Tensor with shape [batch, length_kv, depth_k]\nv: a Tensor with shape [batch, length_kv, depth_v]\nbias: optional Tensor broadcastable to [batch, length_q, length_kv]\nname: an optional string\n\nReturns:\nA Tensor.", "source": "codesearchnet"}
{"code": "def is_process_running(process_name):\n    is_running = False\n    if os.path.isfile('/usr/bin/pgrep'):\n        dev_null = open(os.devnull, 'wb')\n        returncode = subprocess.call(['/usr/bin/pgrep', process_name], stdout=dev_null)\n        is_running = bool((returncode == 0))\n    return is_running", "docstring": "Check if a process with the given name is running.\n\nArgs:\n(str): Process name, e.g. \"Sublime Text\"\n\nReturns:\n(bool): True if the process is running", "source": "codesearchnet"}
{"code": "def _ensure_safe(self):\n    if not self._safe_to_run():\n        raise RuntimeError('There is at least 1 reference to internal data\\n      in the interpreter in the form of a numpy array or slice. Be sure to\\n      only hold the function returned from tensor() if you are using raw\\n      data access.')", "docstring": "Makes sure no numpy arrays pointing to internal buffers are active.\n\nThis should be called from any function that will call a function on\n_interpreter that may reallocate memory e.g. invoke(), ...\n\nRaises:\nRuntimeError: If there exist numpy objects pointing to internal memory\nthen we throw.", "source": "github-repos"}
{"code": "def all_genes(self, build='37'):\n    LOG.info('Fetching all genes')\n    return self.hgnc_collection.find({'build': build}).sort('chromosome', 1)", "docstring": "Fetch all hgnc genes\n\nReturns:\nresult()", "source": "codesearchnet"}
{"code": "def Var(poly, dist=None, **kws):\n    if isinstance(poly, distributions.Dist):\n        x = polynomials.variable(len(poly))\n        (poly, dist) = (x, poly)\n    else:\n        poly = polynomials.Poly(poly)\n    dim = len(dist)\n    if (poly.dim < dim):\n        polynomials.setdim(poly, dim)\n    shape = poly.shape\n    poly = polynomials.flatten(poly)\n    keys = poly.keys\n    N = len(keys)\n    A = poly.A\n    keys1 = numpy.array(keys).T\n    if (dim == 1):\n        keys1 = keys1[0]\n        keys2 = sum(numpy.meshgrid(keys, keys))\n    else:\n        keys2 = numpy.empty((dim, N, N))\n        for i in range(N):\n            for j in range(N):\n                keys2[(:, i, j)] = (keys1[(:, i)] + keys1[(:, j)])\n    m1 = numpy.outer(*([dist.mom(keys1, **kws)] * 2))\n    m2 = dist.mom(keys2, **kws)\n    mom = (m2 - m1)\n    out = numpy.zeros(poly.shape)\n    for i in range(N):\n        a = A[keys[i]]\n        out += ((a * a) * mom[(i, i)])\n        for j in range((i + 1), N):\n            b = A[keys[j]]\n            out += (((2 * a) * b) * mom[(i, j)])\n    out = out.reshape(shape)\n    return out", "docstring": "Element by element 2nd order statistics.\n\nArgs:\npoly (Poly, Dist):\nInput to take variance on.\ndist (Dist):\nDefines the space the variance is taken on. It is ignored if\n``poly`` is a distribution.\n\nReturns:\n(numpy.ndarray):\nElement for element variance along ``poly``, where\n``variation.shape == poly.shape``.\n\nExamples:\n>>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2))\n>>> print(chaospy.Var(dist))\n[1. 4.]\n>>> x, y = chaospy.variable(2)\n>>> poly = chaospy.Poly([1, x, y, 10*x*y])\n>>> print(chaospy.Var(poly, dist))\n[  0.   1.   4. 800.]", "source": "codesearchnet"}
{"code": "def filter_iqr(array, lower, upper):\n    \n    upper, lower = iqr(array, upper, lower)\n\n    new = list(array)\n    for x in new[:]:\n        if x < lower or x > upper:\n            new.remove(x)\n\n    return new", "docstring": "Return elements which falls within specified interquartile range.\n\nArguments:\n\narray (list): Sequence of numbers.\nlower (float): Lower bound for IQR, in range 0 <= lower <= 1.\nupper (float): Upper bound for IQR, in range 0 <= upper <= 1.\n\nReturns:\n\nlist: Copy of original list, with elements outside of IQR\nremoved.", "source": "juraj-google-style"}
{"code": "def _module_info_to_proto(module_info, export_scope=None):\n\n    def strip_name_scope(name_scope):\n        return ops.strip_name_scope(name_scope, export_scope)\n\n    def process_leafs(value):\n        return strip_name_scope(_graph_element_to_path(value))\n    module_info_def = module_pb2.SonnetModule(module_name=module_info.module_name, scope_name=strip_name_scope(module_info.scope_name), class_name=module_info.class_name)\n    for connected_subgraph in module_info.connected_subgraphs:\n        connected_subgraph_info_def = module_info_def.connected_subgraphs.add()\n        connected_subgraph_info_def.name_scope = strip_name_scope(connected_subgraph.name_scope)\n        _nested_to_proto(connected_subgraph.inputs, connected_subgraph_info_def.inputs, process_leafs, set())\n        _nested_to_proto(connected_subgraph.outputs, connected_subgraph_info_def.outputs, process_leafs, set())\n    return module_info_def", "docstring": "Serializes `module_into`.\n\nArgs:\nmodule_info: An instance of `ModuleInfo`.\nexport_scope: Optional `string`. Name scope to remove.\n\nReturns:\nAn instance of `module_pb2.SonnetModule`.", "source": "codesearchnet"}
{"code": "def exponential(data):\n    \n    data = np.hstack(([0.0], np.array(data)))\n    cumm = np.cumsum(data)\n\n    def cost(s, t):\n        \n        return -1*(t-s) * (np.log(t-s) - np.log(cumm[t] - cumm[s]))\n\n    return cost", "docstring": "Creates a segment cost function for a time series with a\nexponential distribution with changing mean\n\nArgs:\ndata (:obj:`list` of float): 1D time series data\nReturns:\nfunction: Function with signature\n(int, int) -> float\nwhere the first arg is the starting index, and the second\nis the last arg. Returns the cost of that segment", "source": "juraj-google-style"}
{"code": "class Hinge(reduction_metrics.MeanMetricWrapper):\n\n    def __init__(self, name='hinge', dtype=None):\n        super().__init__(fn=hinge, name=name, dtype=dtype)\n        self._direction = 'down'\n\n    def get_config(self):\n        return {'name': self.name, 'dtype': self.dtype}", "docstring": "Computes the hinge metric between `y_true` and `y_pred`.\n\n`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are\nprovided we will convert them to -1 or 1.\n\nArgs:\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.\n\nExamples:\n\n>>> m = keras.metrics.Hinge()\n>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])\n>>> m.result()\n1.3\n>>> m.reset_state()\n>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],\n...                sample_weight=[1, 0])\n>>> m.result()\n1.1", "source": "github-repos"}
{"code": "def ones_like(tensor, dtype=None, name=None, optimize=True):\n    return ones_like_impl(tensor, dtype, name, optimize)", "docstring": "Creates a tensor with all elements set to 1.\n\nSee also `tf.ones`.\n\nGiven a single tensor (`tensor`), this operation returns a tensor of the same\ntype and shape as `tensor` with all elements set to 1. Optionally, you can\nspecify a new type (`dtype`) for the returned tensor.\n\nFor example:\n\n```python\ntensor = tf.constant([[1, 2, 3], [4, 5, 6]])\ntf.ones_like(tensor)  # [[1, 1, 1], [1, 1, 1]]\n```\n\nArgs:\ntensor: A `Tensor`.\ndtype: A type for the returned `Tensor`. Must be `float32`, `float64`,\n`int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`,\n`complex128` or `bool`.\nname: A name for the operation (optional).\noptimize: if true, attempt to statically determine the shape of 'tensor' and\nencode it as a constant.\n\nReturns:\nA `Tensor` with all elements set to 1.", "source": "github-repos"}
{"code": "def _indexed_case_verify_and_canonicalize_args(branch_fns, default, branch_index):\n    if not isinstance(branch_index, tensor.Tensor):\n        raise TypeError(\"'branch_index' must be a Tensor, got {}\".format(type(branch_index)))\n    if not branch_index.dtype.is_integer:\n        raise TypeError(\"'branch_index' must be an integer Tensor, got {}\".format(branch_index.dtype))\n    if not branch_fns:\n        raise ValueError(\"Must provide at least one item in 'branch_fns'\")\n    if not isinstance(branch_fns, (list, tuple, dict)):\n        raise TypeError(\"'branch_fns' must be a list, tuple, or dict\")\n    if isinstance(branch_fns, dict):\n        branch_fns = branch_fns.items()\n    if all((callable(fn) for fn in branch_fns)):\n        branch_fns = list(enumerate(branch_fns))\n    for key_fn_pair in branch_fns:\n        if not isinstance(key_fn_pair, tuple) or len(key_fn_pair) != 2:\n            raise TypeError(f\"Each entry in 'branch_fns' must be a 2-tuple. Received {key_fn_pair}.\")\n        key, branch_fn = key_fn_pair\n        if not isinstance(key, int):\n            raise TypeError('key must be a Python `int`, got {}'.format(type(key)))\n        if not callable(branch_fn):\n            raise TypeError('fn for key {} must be callable.'.format(key))\n    keys = [p[0] for p in branch_fns]\n    if min(keys) < 0 or max(keys) >= len(keys) or len(set(keys)) != len(keys):\n        raise ValueError('branch indices (keys) must form contiguous range of [0 to {}) but found {{{}}}'.format(len(keys), ','.join(map(str, sorted(keys)))))\n    actions = [p[1] for p in sorted(branch_fns)]\n    if default is not None:\n        actions.append(default)\n    return actions", "docstring": "Verifies input arguments for the case function.\n\nArgs:\nbranch_fns: Dict or list of pairs of an `int` and a callable which returns a\nlist of tensors.\ndefault: Optional callable that returns a list of tensors.\nbranch_index: Optional int `Tensor`, which selects for the corresponding\npred_fn_pair.\n\nRaises:\nTypeError: If `branch_fns` is not a list/dictionary.\nTypeError: If `branch_fns` is a list but does not contain 2-tuples or\ncallables.\nTypeError: If `fns[i]` is not callable for any i, or `default` is not\ncallable.\n\nReturns:\nbranch_fns: validated list of callables for each branch (default last).", "source": "github-repos"}
{"code": "def edit_miz(infile: str, outfile: str=None, metar: typing.Union[(str, Metar)]=None, time: str=None, min_wind: int=0, max_wind: int=40) -> str:\n    if (outfile is None):\n        LOGGER.debug('editing in place: %s', infile)\n        outfile = infile\n    else:\n        LOGGER.debug('editing miz file: %s -> %s', infile, outfile)\n    mission_weather = mission_time = None\n    if metar:\n        (error, metar) = emiz.weather.custom_metar.CustomMetar.get_metar(metar)\n        if error:\n            return error\n        mission_weather = emiz.weather.mission_weather.MissionWeather(metar, min_wind=min_wind, max_wind=max_wind)\n    if time:\n        try:\n            mission_time = MissionTime.from_string(time)\n        except ValueError:\n            return f'badly formatted time string: {time}'\n    if ((not mission_weather) and (not mission_time)):\n        return 'nothing to do!'\n    with Miz(infile) as miz:\n        if mission_weather:\n            LOGGER.debug('applying MissionWeather')\n            if (not mission_weather.apply_to_miz(miz)):\n                return 'error while applying METAR to mission'\n        if mission_time:\n            LOGGER.debug('applying MissionTime')\n            if (not mission_time.apply_to_miz(miz)):\n                return 'error while setting time on mission'\n        try:\n            miz.zip(outfile)\n            return ''\n        except OSError:\n            return f'permission error: cannot edit \"{outfile}\"; maybe it is in use ?'", "docstring": "Edit an opened MIZ file and sets the time and date and the weather\n\nArgs:\ninfile: source file\noutfile: output file (will default to source file)\nmetar: metar string, ICAO or object to apply\ntime: time string to apply (YYYYMMDDHHMMSS)\nmin_wind: minimum wind\nmax_wind: maximum wind\n\nReturns:\nString containing error", "source": "codesearchnet"}
{"code": "def save(self, path='speech'):\n    if (self._data is None):\n        raise Exception(\"There's nothing to save\")\n    extension = ('.' + self.__params['format'])\n    if (os.path.splitext(path)[1] != extension):\n        path += extension\n    with open(path, 'wb') as f:\n        for d in self._data:\n            f.write(d)\n    return path", "docstring": "Save data in file.\n\nArgs:\npath (optional): A path to save file. Defaults to \"speech\".\nFile extension is optional. Absolute path is allowed.\n\nReturns:\nThe path to the saved file.", "source": "codesearchnet"}
{"code": "def save(self):\n    args = [('StartLocalTime', self.start_time.strftime(TIME_FORMAT)), ('Duration', ('' if (self.duration is None) else self.duration.strftime(TIME_FORMAT))), ('Recurrence', self.recurrence), ('Enabled', ('1' if self.enabled else '0')), ('RoomUUID', self.zone.uid), ('ProgramURI', ('x-rincon-buzzer:0' if (self.program_uri is None) else self.program_uri)), ('ProgramMetaData', self.program_metadata), ('PlayMode', self.play_mode), ('Volume', self.volume), ('IncludeLinkedZones', ('1' if self.include_linked_zones else '0'))]\n    if (self._alarm_id is None):\n        response = self.zone.alarmClock.CreateAlarm(args)\n        self._alarm_id = response['AssignedID']\n        Alarm._all_alarms[self._alarm_id] = self\n    else:\n        args.insert(0, ('ID', self._alarm_id))\n        self.zone.alarmClock.UpdateAlarm(args)", "docstring": "Save the alarm to the Sonos system.\n\nRaises:\n~soco.exceptions.SoCoUPnPException: if the alarm cannot be created\nbecause there\nis already an alarm for this room at the specified time.", "source": "codesearchnet"}
{"code": "def contextmanager(target):\n    context_manager = _contextlib.contextmanager(target)\n    return tf_decorator.make_decorator(target, context_manager, 'contextmanager')", "docstring": "A tf_decorator-aware wrapper for `contextlib.contextmanager`.\n\nUsage is identical to `contextlib.contextmanager`.\n\nArgs:\ntarget: A callable to be wrapped in a contextmanager.\nReturns:\nA callable that can be used inside of a `with` statement.", "source": "github-repos"}
{"code": "def _full_axis_reduce_along_select_indices(self, func, axis, index):\n        \n        \n        old_index = self.index if axis else self.columns\n        numeric_indices = [i for i, name in enumerate(old_index) if name in index]\n        result = self.data.apply_func_to_select_indices_along_full_axis(\n            axis, func, numeric_indices\n        )\n        return result", "docstring": "Reduce Manger along select indices using function that needs full axis.\n\nArgs:\nfunc: Callable that reduces the dimension of the object and requires full\nknowledge of the entire axis.\naxis: 0 for columns and 1 for rows. Defaults to 0.\nindex: Index of the resulting QueryCompiler.\n\nReturns:\nA new QueryCompiler object with index or BaseFrameManager object.", "source": "juraj-google-style"}
{"code": "def __request_start(self, queue_item):\n    try:\n        action = self.__options.callbacks.request_before_start(self.queue, queue_item)\n    except Exception as e:\n        action = None\n        print(e)\n        print(traceback.format_exc())\n    if (action == CrawlerActions.DO_STOP_CRAWLING):\n        self.__should_stop = True\n    if (action == CrawlerActions.DO_SKIP_TO_NEXT):\n        self.queue.move(queue_item, QueueItem.STATUS_FINISHED)\n        self.__should_spawn_new_requests = True\n    if ((action == CrawlerActions.DO_CONTINUE_CRAWLING) or (action is None)):\n        self.queue.move(queue_item, QueueItem.STATUS_IN_PROGRESS)\n        thread = CrawlerThread(self.__request_finish, self.__lock, self.__options, queue_item)\n        self.__threads[queue_item.get_hash()] = thread\n        thread.daemon = True\n        thread.start()", "docstring": "Execute the request in given queue item.\n\nArgs:\nqueue_item (:class:`nyawc.QueueItem`): The request/response pair to scrape.", "source": "codesearchnet"}
{"code": "def bleu_score(predictions, labels, **unused_kwargs):\n  \n  outputs = tf.to_int32(tf.argmax(predictions, axis=-1))\n  \n  outputs = tf.squeeze(outputs, axis=[-1, -2])\n  labels = tf.squeeze(labels, axis=[-1, -2])\n\n  bleu = tf.py_func(compute_bleu, (labels, outputs), tf.float32)\n  return bleu, tf.constant(1.0)", "docstring": "BLEU score computation between labels and predictions.\n\nAn approximate BLEU scoring method since we do not glue word pieces or\ndecode the ids and tokenize the output. By default, we use ngram order of 4\nand use brevity penalty. Also, this does not have beam search.\n\nArgs:\npredictions: tensor, model predictions\nlabels: tensor, gold output.\n\nReturns:\nbleu: int, approx bleu score", "source": "juraj-google-style"}
{"code": "async def get_match(self, m_id, force_update=False) -> Match:\n    found_m = self._find_match(m_id)\n    if (force_update or (found_m is None)):\n        (await self.get_matches())\n        found_m = self._find_match(m_id)\n    return found_m", "docstring": "get a single match by id\n\n|methcoro|\n\nArgs:\nm_id: match id\nforce_update (default=False): True to force an update to the Challonge API\n\nReturns:\nMatch\n\nRaises:\nAPIException", "source": "codesearchnet"}
{"code": "def shell(commands, splitlines=False, ignore_errors=False):\n    \n\n    if isinstance(commands, six.string_types):\n        commands = [commands]\n\n    all_stdout = []\n\n    \n    \n    print_output = (\n        pseudo_state.print_output\n        if pseudo_state.isset()\n        else False\n    )\n\n    for command in commands:\n        print_prefix = 'localhost: '\n\n        if print_output:\n            print('{0}>>> {1}'.format(print_prefix, command))\n\n        process = Popen(command, shell=True, stdout=PIPE, stderr=STDOUT)\n\n        stdout = read_buffer(\n            process.stdout,\n            print_output=print_output,\n            print_func=lambda line: '{0}{1}'.format(print_prefix, line),\n        )\n\n        \n        result = process.wait()\n\n        \n        process.stdout.close()\n\n        if result > 0 and not ignore_errors:\n            raise PyinfraError(\n                'Local command failed: {0}\\n{1}'.format(command, stdout),\n            )\n\n        all_stdout.extend(stdout)\n\n    if not splitlines:\n        return '\\n'.join(all_stdout)\n\n    return all_stdout", "docstring": "Subprocess based implementation of pyinfra/api/ssh.py's ``run_shell_command``.\n\nArgs:\ncommands (string, list): command or list of commands to execute\nspltlines (bool): optionally have the output split by lines\nignore_errors (bool): ignore errors when executing these commands", "source": "juraj-google-style"}
{"code": "def _add_arg_java(self, key, value, mask=False):\n        \n        if isinstance(value, bool):\n            value = int(value)\n        self._data[key] = value\n        self._args.append('{}{}={}'.format('-D', key, value))\n        self._args_quoted.append(self.quote('{}{}={}'.format('-D', key, value)))\n        if mask:\n            value = 'x' * len(str(value))\n        self._args_masked.append('{}{}={}'.format('-D', key, value))", "docstring": "Add CLI Arg formatted specifically for Java.\n\nArgs:\nkey (string): The CLI Args key (e.g., --name).\nvalue (string): The CLI Args value (e.g., bob).\nmask (boolean, default:False): Indicates whether no mask value.", "source": "juraj-google-style"}
{"code": "def load(fh, single=False):\n    \n    ms = deserialize(fh)\n    if single:\n        ms = next(ms)\n    return ms", "docstring": "Deserialize DMRX from a file (handle or filename)\n\nArgs:\nfh (str, file): input filename or file object\nsingle: if `True`, only return the first read Xmrs object\nReturns:\na generator of Xmrs objects (unless the *single* option is\n`True`)", "source": "juraj-google-style"}
{"code": "def call(self, context: tf.Tensor, latents: tf.Tensor) -> tf.Tensor:\n    context = self.context_layer_norm(context)\n    latents = self.latents_layer_norm(latents)\n    batch_size, seq_length, embed_dim = shape_list(context)\n    q = self.q_proj(latents)\n    k = self.k_proj(tf.concat([context, latents], axis=-2))\n    v = self.v_proj(tf.concat([context, latents], axis=-2))\n    q, k, v = [tf.transpose(tf.reshape(x, (batch_size, x.shape[1], self.n_heads, self.head_dim)), perm=[0, 2, 1, 3]) for x in (q, k, v)]\n    if self.qk_layer_norms:\n        q = self.q_layer_norm(q)\n        k = self.k_layer_norm(k)\n    scores = tf.einsum('... i d, ... j d -> ... i j', q * self.qk_scale, k)\n    stabilized_scores = scores - tf.reduce_max(scores, axis=-1, keepdims=True)\n    attn = tf.nn.softmax(stabilized_scores, axis=-1)\n    resampled = tf.einsum('... i j, ... j d -> ... i d', attn, v)\n    return self.output_proj(tf.reshape(tf.transpose(resampled, perm=[0, 2, 1, 3]), (batch_size, -1, self.n_heads * self.head_dim)))", "docstring": "Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension!\n\nArgs:\ncontext (`tf.Tensor`):\nTensor of shape `[bsz, seq, embed_dim]` representing long-form context to resample.\nlatents (`tf.Tensor`):\nTensor of shape `[bsz, n_latents, embed_dim]` representing fixed length latents to compress to.\n\nReturns:\n`tf.Tensor`: Tensor of shape `[bsz, n_latents, embed_dim]` representing attention over latents w/ cross\nfrom context.", "source": "github-repos"}
{"code": "def cleave_sequence(input_layer, unroll=None):\n    if (unroll is None):\n        raise ValueError('You must set unroll either here or in the defaults.')\n    shape = input_layer.shape\n    if ((shape[0] is not None) and ((shape[0] % unroll) != 0)):\n        raise ValueError(('Must divide the split dimension evenly: %d mod %d != 0' % (shape[0], unroll)))\n    if (unroll <= 0):\n        raise ValueError(('Unroll must be > 0: %s' % unroll))\n    elif (unroll == 1):\n        splits = [input_layer.tensor]\n    else:\n        splits = tf.split(value=input_layer.tensor, num_or_size_splits=unroll, axis=0)\n    result = input_layer.with_sequence(splits)\n    defaults = result.defaults\n    if ('unroll' in defaults):\n        del defaults['unroll']\n    return result", "docstring": "Cleaves a tensor into a sequence, this is the inverse of squash.\n\nRecurrent methods unroll across an array of Tensors with each one being a\ntimestep.  This cleaves the first dim so that each it is an array of Tensors.\nIt is the inverse of squash_sequence.\n\nArgs:\ninput_layer: The input layer.\nunroll: The number of time steps.\nReturns:\nA PrettyTensor containing an array of tensors.\nRaises:\nValueError: If unroll is not specified and it has no default or it is <= 0.", "source": "codesearchnet"}
{"code": "def print_generic_type(self, t: types.BaseValue) -> str:", "docstring": "Returns a string of the generic type of t.\n\nFor example, if t is `[0]`, then this method returns \"list[int]\".\n\nArgs:\nt: An abstract value.", "source": "github-repos"}
{"code": "def astimezone(self, tzinfo):\n    assert (self.tzinfo is not None)\n    tzinfo = _tzinfome(tzinfo)\n    d = self.asdatetime(naive=False).astimezone(tzinfo)\n    return type(self)(d)", "docstring": "Returns a version of this timestamp converted to the given timezone.\n\nArgs:\ntzinfo: Either a datetime.tzinfo object or a string (which will be looked\nup in pytz.\n\nReturns:\nA datetime_tz object in the given timezone.", "source": "codesearchnet"}
{"code": "def listChecksumAlgorithms(self, vendorSpecific=None):\n        \n        response = self.listChecksumAlgorithmsResponse(vendorSpecific)\n        return self._read_dataone_type_response(response, 'ChecksumAlgorithmList')", "docstring": "See Also: listChecksumAlgorithmsResponse()\n\nArgs:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def chunk_sequence(sequence, chunk_length=200, padding_value=0):\n  \n  if 'length' in sequence:\n    length = sequence.pop('length')\n  else:\n    length = tf.shape(tools.nested.flatten(sequence)[0])[0]\n  num_chunks = (length - 1) \n  padding_length = chunk_length * num_chunks - length\n  padded = tools.nested.map(\n      \n      lambda tensor: tf.concat([\n          tensor, 0 * tensor[:padding_length] + padding_value], 0),\n      sequence)\n  chunks = tools.nested.map(\n      \n      lambda tensor: tf.reshape(\n          tensor, [num_chunks, chunk_length] + tensor.shape[1:].as_list()),\n      padded)\n  chunks['length'] = tf.concat([\n      chunk_length * tf.ones((num_chunks - 1,), dtype=tf.int32),\n      [chunk_length - padding_length]], 0)\n  return chunks", "docstring": "Split a nested dict of sequence tensors into a batch of chunks.\n\nThis function does not expect a batch of sequences, but a single sequence. A\n`length` key is added if it did not exist already.\n\nArgs:\nsequence: Nested dict of tensors with time dimension.\nchunk_length: Size of chunks the sequence will be split into.\npadding_value: Value used for padding the last chunk after the sequence.\n\nReturns:\nNested dict of sequence tensors with chunk dimension.", "source": "juraj-google-style"}
{"code": "def get_sessions(self, app_path=None):\n    if (app_path is not None):\n        return self._tornado.get_sessions(app_path)\n    all_sessions = []\n    for path in self._tornado.app_paths:\n        all_sessions += self._tornado.get_sessions(path)\n    return all_sessions", "docstring": "Gets all currently active sessions for applications.\n\nArgs:\napp_path (str, optional) :\nThe configured application path for the application to return\nsessions for. If None, return active sessions for all\napplications. (default: None)\n\nReturns:\nlist[ServerSession]", "source": "codesearchnet"}
{"code": "def submit(self, job):\n    self._check_job(job)\n    if (job.workingdir is None):\n        job.workingdir = self.default_wdir\n    job.imageid = du.create_provisioned_image(self.client, job.image, job.workingdir, job.inputs)\n    container_args = self._generate_container_args(job)\n    job.rundata.container = self.client.create_container(job.imageid, **container_args)\n    self.client.start(job.rundata.container)\n    job.rundata.containerid = job.rundata.container['Id']\n    job.jobid = job.rundata.containerid", "docstring": "Submit job to the engine\n\nArgs:\njob (pyccc.job.Job): Job to submit", "source": "codesearchnet"}
{"code": "def _flag_value_as_int_list(self, wanted_flag_name):\n    int_list = []\n    found, flag_value = self.get_flag_value(wanted_flag_name)\n    if found and flag_value:\n        try:\n            integer_values = flag_value.split(',')\n            int_list = [int(int_val) for int_val in integer_values]\n        except ValueError:\n            logging.warning('Cannot convert %s to int for flag %s', int_list, wanted_flag_name)\n    return int_list", "docstring": "Returns the integer list of a TensorTracer flag.\n\nArgs:\nwanted_flag_name: the name of the flag we are looking for.\n\nReturns:\nthe value of the flag.\nRaises:\nRuntimeError: If supposedly deadcode is reached.", "source": "github-repos"}
{"code": "def saveable_objects_from_trackable(obj, tf1_saver=False):\n    if isinstance(obj, python_state.PythonState):\n        return {python_state.PYTHON_STATE: functools.partial(_PythonStringStateSaveable, state_callback=obj.serialize, restore_callback=obj.deserialize)}\n    if tf1_saver:\n        saveable_factories = obj._gather_saveables_for_checkpoint()\n        if saveable_factories:\n            return saveable_factories\n    if trackable_has_serialize_to_tensor(obj):\n\n        def create_saveable(name='', call_with_mapped_captures=None):\n            save_fn = obj._serialize_to_tensors\n            if call_with_mapped_captures and isinstance(save_fn, core.ConcreteFunction):\n                tensor_dict = call_with_mapped_captures(save_fn, [])\n            else:\n                tensor_dict = save_fn()\n            specs = []\n            local_names = []\n            for tensor_name, maybe_tensor in tensor_dict.items():\n                local_names.append(tensor_name)\n                if not isinstance(maybe_tensor, dict):\n                    maybe_tensor = {'': maybe_tensor}\n                spec_name = name + trackable_utils.escape_local_name(tensor_name)\n                for slice_spec, tensor in maybe_tensor.items():\n                    if isinstance(tensor, saveable_object.SaveSpec):\n                        spec = tensor\n                        spec.name = spec_name\n                        spec.slice_spec = slice_spec\n                    else:\n                        spec = saveable_object.SaveSpec(tensor, slice_spec, spec_name)\n                    specs.append(spec)\n            return TrackableSaveable(obj=obj, specs=specs, name=name, local_names=local_names, prefix=saveable_compat.get_saveable_name(obj) or '', call_with_mapped_captures=call_with_mapped_captures)\n        return {trackable_utils.SERIALIZE_TO_TENSORS_NAME: create_saveable}\n    else:\n        return obj._gather_saveables_for_checkpoint()", "docstring": "Returns SaveableObject factory dict from a Trackable.\n\nArgs:\nobj: A `Trackable`\ntf1_saver: Boolean, whether this is being called from a TF1 Saver (\n`tf.compat.v1.train.Saver`). When this is True, the SaveableObject will\nbe generated from `obj`'s legacy `_gather_saveables_for_checkpoint` fn.\nWhen saving with TF2, `Trackable._serialize_from_tensors` is preferred.\n\nReturns:\nA dict mapping attribute names to SaveableObject factories (callables that\nproduce a SaveableObject).", "source": "github-repos"}
{"code": "def validate_format(self, **kwargs):\n    args = dict(dict_type=self._dict, allow_no_value=self._allow_no_value, inline_comment_prefixes=self._inline_comment_prefixes, strict=self._strict, empty_lines_in_values=self._empty_lines_in_values)\n    args.update(kwargs)\n    parser = ConfigParser(**args)\n    updated_cfg = str(self)\n    parser.read_string(updated_cfg)", "docstring": "Call ConfigParser to validate config\n\nArgs:\nkwargs: are passed to :class:`configparser.ConfigParser`", "source": "codesearchnet"}
{"code": "def generate_main(self, main_filename, boilerplate_contents):\n    with io.open(main_filename, 'rt', encoding='latin-1') as main_file:\n        original_content = main_file.read()\n    match = re.match(_boilerplate_insertion_regex, original_content)\n    assert match, original_content\n    assert len(match.group('before')) + len(match.group('after')) == len(original_content), (match, original_content)\n    new_content = match.group('before') + boilerplate_contents + match.group('after')\n    encoded_content = new_content.encode('latin-1')\n    return stored_resource.StoredContent('__main__.py', self.timestamp_tuple, encoded_content)", "docstring": "Generate the contents of the __main__.py file\n\nWe take the module that is specified as the main entry point,\nand insert some boilerplate to invoke import helper code.\n\nReturns:\nA StoredResource", "source": "github-repos"}
{"code": "def build_data(data_path, size, dataset):\n    image_size = 32\n    if (dataset == 'cifar10'):\n        label_bytes = 1\n        label_offset = 0\n    elif (dataset == 'cifar100'):\n        label_bytes = 1\n        label_offset = 1\n    depth = 3\n    image_bytes = ((image_size * image_size) * depth)\n    record_bytes = ((label_bytes + label_offset) + image_bytes)\n\n    def load_transform(value):\n        record = tf.reshape(tf.decode_raw(value, tf.uint8), [record_bytes])\n        label = tf.cast(tf.slice(record, [label_offset], [label_bytes]), tf.int32)\n        depth_major = tf.reshape(tf.slice(record, [label_bytes], [image_bytes]), [depth, image_size, image_size])\n        image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)\n        return (image, label)\n    data_files = tf.gfile.Glob(data_path)\n    data = tf.contrib.data.FixedLengthRecordDataset(data_files, record_bytes=record_bytes)\n    data = data.map(load_transform)\n    data = data.batch(size)\n    iterator = data.make_one_shot_iterator()\n    return iterator.get_next()", "docstring": "Creates the queue and preprocessing operations for the dataset.\n\nArgs:\ndata_path: Filename for cifar10 data.\nsize: The number of images in the dataset.\ndataset: The dataset we are using.\n\nReturns:\nqueue: A Tensorflow queue for extracting the images and labels.", "source": "codesearchnet"}
{"code": "def get_text_features(self, input_ids=None, attention_mask=None, position_ids=None, token_type_ids=None, output_attentions=None, output_hidden_states=None, return_dict=None):\n    text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n    pooled_output = text_outputs[1]\n    text_features = self.text_projection(pooled_output)\n    return text_features", "docstring": "Returns:\ntext_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by\napplying the projection layer to the pooled output of [`CLIPTextModel`].\n\nExamples:\n\n```python\n>>> from transformers import VisionTextDualEncoderModel, AutoTokenizer\n\n>>> model = VisionTextDualEncoderModel.from_pretrained(\"clip-italian/clip-italian\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"clip-italian/clip-italian\")\n\n>>> inputs = tokenizer([\"una foto di un gatto\", \"una foto di un cane\"], padding=True, return_tensors=\"pt\")\n>>> text_features = model.get_text_features(**inputs)\n```", "source": "github-repos"}
{"code": "def check(self, url: str) -> Optional[dict]:\n        \n        data = self.data.get(url)\n        if data:\n            data = self._check_expiration(url, data)\n        return data.data if data else None", "docstring": "Check if data for a url has expired.\n\nData is not fetched again if it has expired.\n\nArgs:\nurl: url to check expiration on\n\nReturns:\nvalue of the data, possibly None", "source": "juraj-google-style"}
{"code": "def versions(self):\n    if (not self.__versions):\n        self.__versions = Versions(self.__connection)\n    return self.__versions", "docstring": "Gets the Version API client.\n\nReturns:\nVersion:", "source": "codesearchnet"}
{"code": "def _get_flow_for_token(csrf_token, request):\n    flow_pickle = request.session.get(_FLOW_KEY.format(csrf_token), None)\n    return (None if (flow_pickle is None) else jsonpickle.decode(flow_pickle))", "docstring": "Looks up the flow in session to recover information about requested\nscopes.\n\nArgs:\ncsrf_token: The token passed in the callback request that should\nmatch the one previously generated and stored in the request on the\ninitial authorization view.\n\nReturns:\nThe OAuth2 Flow object associated with this flow based on the\nCSRF token.", "source": "codesearchnet"}
{"code": "def deprecate_entity(\n        self,\n        ilx_id: str,\n        note = None,\n        ) -> None:\n        \n\n        term_id, term_version = [(d['id'], d['version'])\n            for d in self.ilxSearches([ilx_id], crawl=True, _print=False).values()][0]\n\n        annotations = [{\n            'tid': term_id,\n            'annotation_tid': '306375', \n            'value': 'True',\n            'term_version': term_version,\n            'annotation_term_version': '1', \n        }]\n        if note:\n            editor_note = {\n                'tid': term_id,\n                'annotation_tid': '306378', \n                'value': note,\n                'term_version': term_version,\n                'annotation_term_version': '1', \n            }\n            annotations.append(editor_note)\n        self.addAnnotations(annotations, crawl=True, _print=False)\n        print(annotations)", "docstring": "Tagged term in interlex to warn this term is no longer used\n\nThere isn't an proper way to delete a term and so we have to mark it so I can\nextrapolate that in mysql/ttl loads.\n\nArgs:\nterm_id: id of the term of which to be deprecated\nterm_version: version of the term of which to be deprecated\n\nExample: deprecateTerm('ilx_0101431', '6')", "source": "juraj-google-style"}
{"code": "def remove_model_config_classes_from_config_check(model_config_classes):\n    filename = REPO_PATH / 'utils/check_config_attributes.py'\n    with open(filename, 'r') as f:\n        check_config_attributes = f.read()\n    in_special_cases_to_allow = False\n    in_indent = False\n    new_file_lines = []\n    for line in check_config_attributes.split('\\n'):\n        indent = get_line_indent(line)\n        if line.strip() == 'SPECIAL_CASES_TO_ALLOW = {' or line.strip() == 'SPECIAL_CASES_TO_ALLOW.update(':\n            in_special_cases_to_allow = True\n        elif in_special_cases_to_allow and indent == 0 and (line.strip() in ('}', ')')):\n            in_special_cases_to_allow = False\n        if in_indent:\n            if line.strip().endswith((']', '],')):\n                in_indent = False\n            continue\n        if in_special_cases_to_allow and any((model_config_class in line for model_config_class in model_config_classes)):\n            while new_file_lines[-1].strip().startswith('\n                new_file_lines.pop()\n            if line.strip().endswith('['):\n                in_indent = True\n            continue\n        elif any((model_config_class in line for model_config_class in model_config_classes)):\n            continue\n        new_file_lines.append(line)\n    with open(filename, 'w') as f:\n        f.write('\\n'.join(new_file_lines))", "docstring": "Remove the deprecated model config classes from the check_config_attributes.py file\n\nArgs:\nmodel_config_classes (List[str]): The model config classes to remove e.g. [\"BertConfig\", \"DistilBertConfig\"]", "source": "github-repos"}
{"code": "def get_roles(client):\n    done = False\n    marker = None\n    roles = []\n    while (not done):\n        if marker:\n            response = client.list_roles(Marker=marker)\n        else:\n            response = client.list_roles()\n        roles += response['Roles']\n        if response['IsTruncated']:\n            marker = response['Marker']\n        else:\n            done = True\n    return roles", "docstring": "Returns a list of all the roles for an account. Returns a list containing all the roles for the account.\n\nArgs:\nclient (:obj:`boto3.session.Session`): A boto3 Session object\n\nReturns:\n:obj:`list` of `dict`", "source": "codesearchnet"}
{"code": "def scalar(self, tag, value, step=None):\n    \n    value = float(onp.array(value))\n    if step is None:\n      step = self._step\n    else:\n      self._step = step\n    summary = Summary(value=[Summary.Value(tag=tag, simple_value=value)])\n    self.add_summary(summary, step)", "docstring": "Saves scalar value.\n\nArgs:\ntag: str: label for this data\nvalue: int/float: number to log\nstep: int: training step", "source": "juraj-google-style"}
{"code": "def _Completion(self, match):\n    word = str(match.group())[2:(- 2)]\n    return (('(' + '('.join(word)) + (')?' * len(word)))", "docstring": "r\"\"\"Replaces double square brackets with variable length completion.\n\nCompletion cannot be mixed with regexp matching or '\\' characters\ni.e. '[[(\\n)]] would become (\\(n)?)?.'\n\nArgs:\nmatch: A regex Match() object.\n\nReturns:\nString of the format '(a(b(c(d)?)?)?)?'.", "source": "codesearchnet"}
{"code": "def copy_modified_gene(self, modified_gene, ignore_model_attributes=True):\n        \n        ignore = ['_model', '_reaction', '_functional', 'model', 'reaction', 'functional']\n        for attr in filter(lambda a: not a.startswith('__') and not isinstance(getattr(type(self), a, None), property) and not callable(getattr(self, a)),\n                           dir(modified_gene)):\n            if attr not in ignore and ignore_model_attributes:\n                setattr(self, attr, getattr(modified_gene, attr))", "docstring": "Copy attributes of a Gene object over to this Gene, given that the modified gene has the same ID.\n\nArgs:\nmodified_gene (Gene, GenePro): Gene with modified attributes that you want to copy over.\nignore_model_attributes (bool): If you want to ignore copying over attributes related to metabolic models.", "source": "juraj-google-style"}
{"code": "def update_device_info(self, device_id, display_name):\n        \n        content = {\n            \"display_name\": display_name\n        }\n        return self._send(\"PUT\", \"/devices/%s\" % device_id, content=content)", "docstring": "Update the display name of a device.\n\nArgs:\ndevice_id (str): The device ID of the device to update.\ndisplay_name (str): New display name for the device.", "source": "juraj-google-style"}
{"code": "def get_url(profile, resource):\n    repo = profile['repo']\n    url = ((((GITHUB_API_BASE_URL + 'repos/') + repo) + '/git') + resource)\n    return url", "docstring": "Get the URL for a resource.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nresource\nThe part of a Github API URL that comes after ``.../:repo/git``.\nFor instance, for ``.../:repo/git/commits``, it's ``/commits``.\n\nReturns:\nThe full URL for the specified resource under the specified profile.", "source": "codesearchnet"}
{"code": "def run_display_app_output(self, out):\n    if ((not self.profile.get('quiet')) and (not self.args.quiet)):\n        print('App Output:')\n        for o in out.decode('utf-8').split('\\n'):\n            print('  {}{}{}'.format(c.Style.BRIGHT, c.Fore.CYAN, o))\n            self.log.debug('[tcrun] App output: {}'.format(o))", "docstring": "Print any App output.\n\nArgs:\nout (str): One or more lines of output messages.", "source": "codesearchnet"}
{"code": "def destroy_s3_event(app, env, region):\n    generated = get_details(app=app, env=env)\n    bucket = generated.s3_app_bucket()\n    session = boto3.Session(profile_name=env, region_name=region)\n    s3_client = session.client('s3')\n    config = {}\n    s3_client.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=config)\n    LOG.debug('Deleted Lambda S3 notification')\n    return True", "docstring": "Destroy S3 event.\n\nArgs:\napp (str): Spinnaker Application name.\nenv (str): Deployment environment.\nregion (str): AWS region.\nReturns:\nbool: True upon successful completion.", "source": "codesearchnet"}
{"code": "def watch(self, selector, callback):\n        \n\n        if selector not in self._monitors:\n            self._monitors[selector] = set()\n\n        self._monitors[selector].add(callback)", "docstring": "Call a function whenever a stream changes.\n\nArgs:\nselector (DataStreamSelector): The selector to watch.\nIf this is None, it is treated as a wildcard selector\nthat matches every stream.\ncallback (callable): The function to call when a new\nreading is pushed.  Callback is called as:\ncallback(stream, value)", "source": "juraj-google-style"}
{"code": "def get_plan(self, plan_code):\n        \n        return self.client._get(self.url + 'plans/{}'.format(plan_code), headers=self.get_headers())", "docstring": "Check all the information of a plan for subscriptions associated with the merchant.\n\nArgs:\nplan_code: Plan’s identification code for the merchant.\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _wake_up_timer(self, kill_event):\n        \n\n        while True:\n            prev = self._wake_up_time\n\n            \n            \n            time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))\n\n            if time_to_die:\n                return\n\n            if prev == self._wake_up_time:\n                self.make_callback(kind='timer')\n            else:\n                print(\"Sleeping a bit more\")", "docstring": "Internal. This is the function that the thread will execute.\nwaits on an event so that the thread can make a quick exit when close() is called\n\nArgs:\n- kill_event (threading.Event) : Event to wait on", "source": "juraj-google-style"}
{"code": "async def _handle_watermark_notification(self, watermark_notification):\n    conv_id = watermark_notification.conversation_id.id\n    res = parsers.parse_watermark_notification(watermark_notification)\n    (await self.on_watermark_notification.fire(res))\n    try:\n        conv = (await self._get_or_fetch_conversation(conv_id))\n    except exceptions.NetworkError:\n        logger.warning('Failed to fetch conversation for watermark notification: %s', conv_id)\n    else:\n        (await conv.on_watermark_notification.fire(res))", "docstring": "Receive WatermarkNotification and update the conversation.\n\nArgs:\nwatermark_notification: hangouts_pb2.WatermarkNotification instance", "source": "codesearchnet"}
{"code": "def initialize_particle(rng, domain, fitness_function):\n    \n    position = rng.uniform(domain.lower, domain.upper, domain.dimension)\n    fitness = fitness_function(position)\n    return Particle(position=position,\n                    velocity=np.zeros(domain.dimension),\n                    fitness=fitness,\n                    best_fitness=fitness,\n                    best_position=position)", "docstring": "Initializes a particle within a domain.\nArgs:\nrng: numpy.random.RandomState: The random number generator.\ndomain: cipy.problems.core.Domain: The domain of the problem.\n\nReturns:\ncipy.algorithms.pso.Particle: A new, fully initialized particle.", "source": "juraj-google-style"}
{"code": "def consume(generator):  \n    \n    \n    if hasattr(generator, '__next__'):\n        return list(generator)\n\n    if not PY_35:\n        raise RuntimeError(\n            'paco: asynchronous iterator protocol not supported')\n\n    \n    buf = []\n    while True:\n        try:\n            buf.append((yield from generator.__anext__()))\n        except StopAsyncIteration:  \n            break\n\n    return buf", "docstring": "Helper function to consume a synchronous or asynchronous generator.\n\nArguments:\ngenerator (generator|asyncgenerator): generator to consume.\n\nReturns:\nlist", "source": "juraj-google-style"}
{"code": "def airborne_position_with_ref(msg, lat_ref, lon_ref):\n    mb = common.hex2bin(msg)[32:]\n    cprlat = (common.bin2int(mb[22:39]) / 131072.0)\n    cprlon = (common.bin2int(mb[39:56]) / 131072.0)\n    i = int(mb[21])\n    d_lat = ((360.0 / 59) if i else (360.0 / 60))\n    j = (common.floor((lat_ref / d_lat)) + common.floor(((0.5 + ((lat_ref % d_lat) / d_lat)) - cprlat)))\n    lat = (d_lat * (j + cprlat))\n    ni = (common.cprNL(lat) - i)\n    if (ni > 0):\n        d_lon = (360.0 / ni)\n    else:\n        d_lon = 360.0\n    m = (common.floor((lon_ref / d_lon)) + common.floor(((0.5 + ((lon_ref % d_lon) / d_lon)) - cprlon)))\n    lon = (d_lon * (m + cprlon))\n    return (round(lat, 5), round(lon, 5))", "docstring": "Decode airborne position with only one message,\nknowing reference nearby location, such as previously calculated location,\nground station, or airport location, etc. The reference position shall\nbe with in 180NM of the true position.\n\nArgs:\nmsg (string): even message (28 bytes hexadecimal string)\nlat_ref: previous known latitude\nlon_ref: previous known longitude\n\nReturns:\n(float, float): (latitude, longitude) of the aircraft", "source": "codesearchnet"}
{"code": "def InTemplateArgumentList(self, clean_lines, linenum, pos):\n    while (linenum < clean_lines.NumLines()):\n        line = clean_lines.elided[linenum]\n        match = Match('^[^{};=\\\\[\\\\]\\\\.<>]*(.)', line[pos:])\n        if (not match):\n            linenum += 1\n            pos = 0\n            continue\n        token = match.group(1)\n        pos += len(match.group(0))\n        if (token in ('{', '}', ';')):\n            return False\n        if (token in ('>', '=', '[', ']', '.')):\n            return True\n        if (token != '<'):\n            pos += 1\n            if (pos >= len(line)):\n                linenum += 1\n                pos = 0\n            continue\n        (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, (pos - 1))\n        if (end_pos < 0):\n            return False\n        linenum = end_line\n        pos = end_pos\n    return False", "docstring": "Check if current position is inside template argument list.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\npos: position just after the suspected template argument.\nReturns:\nTrue if (linenum, pos) is inside template arguments.", "source": "codesearchnet"}
{"code": "def upsert(self):\n    required_parameters = []\n    self._stackParameters = []\n    try:\n        self._initialize_upsert()\n    except Exception:\n        return False\n    try:\n        available_parameters = self._parameters.keys()\n        for parameter_name in self._template.get('Parameters', {}):\n            required_parameters.append(str(parameter_name))\n        logging.info((' required parameters: ' + str(required_parameters)))\n        logging.info(('available parameters: ' + str(available_parameters)))\n        parameters = []\n        for required_parameter in required_parameters:\n            parameter = {}\n            parameter['ParameterKey'] = str(required_parameter)\n            required_parameter = str(required_parameter)\n            if (required_parameter in self._parameters):\n                parameter['ParameterValue'] = self._parameters[required_parameter]\n            else:\n                parameter['ParameterValue'] = self._parameters[required_parameter.lower()]\n            parameters.append(parameter)\n        if (not self._analyze_stuff()):\n            sys.exit(1)\n        if self._config.get('dryrun', False):\n            logging.info('Generating change set')\n            set_id = self._generate_change_set(parameters)\n            if set_id:\n                self._describe_change_set(set_id)\n            logging.info('This was a dryrun')\n            sys.exit(0)\n        self._tags.append({'Key': 'CODE_VERSION_SD', 'Value': self._config.get('codeVersion')})\n        self._tags.append({'Key': 'ANSWER', 'Value': str(42)})\n        if self._updateStack:\n            stack = self._cloudFormation.update_stack(StackName=self._config.get('environment', {}).get('stack_name', None), TemplateURL=self._templateUrl, Parameters=parameters, Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'], Tags=self._tags, ClientRequestToken=str(uuid.uuid4()))\n            logging.info('existing stack ID: {}'.format(stack.get('StackId', 'unknown')))\n        else:\n            stack = self._cloudFormation.create_stack(StackName=self._config.get('environment', {}).get('stack_name', None), TemplateURL=self._templateUrl, Parameters=parameters, Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'], Tags=self._tags, ClientRequestToken=str(uuid.uuid4()))\n            logging.info('new stack ID: {}'.format(stack.get('StackId', 'unknown')))\n    except Exception as x:\n        if self._verbose:\n            logging.error(x, exc_info=True)\n        else:\n            logging.error(x, exc_info=False)\n        return False\n    return True", "docstring": "The main event of the utility. Create or update a Cloud Formation\nstack. Injecting properties where needed\n\nArgs:\nNone\n\nReturns:\nTrue if the stack create/update is started successfully else\nFalse if the start goes off in the weeds.\n\nExits:\nIf the user asked for a dryrun exit(with a code 0) the thing here. There is no\npoint continuing after that point.", "source": "codesearchnet"}
{"code": "def filter(self, scored_list):\n        \n        if len(scored_list) > 0:\n            avg = np.mean([s[1] for s in scored_list])\n            std = np.std([s[1] for s in scored_list])\n        else:\n            avg = 0\n            std = 0\n        limiter = avg + 0.5 * std\n        mean_scored = [(sent_idx, score) for (sent_idx, score) in scored_list if score > limiter]\n        return mean_scored", "docstring": "Filtering with std.\n\nArgs:\nscored_list:    The list of scoring.\n\nRetruns:\nThe list of filtered result.", "source": "juraj-google-style"}
{"code": "def stop_batch_gradient(cls, x: 'TensorFluent', stop_batch: tf.Tensor) -> 'TensorFluent':\n    scope = x.scope.as_list()\n    batch = x.batch\n    tensor = tf.where(stop_batch, tf.stop_gradient(x.tensor), x.tensor)\n    return TensorFluent(tensor, scope, batch)", "docstring": "Returns a copy of the inputs fluent with stop_gradient applied at batch level.\n\nArgs:\nx: The input fluent.\nstop_batch: A boolean tf.Tensor with shape=(batch_size, ...)\n\nReturns:\nA TensorFluent that conditionally stops backpropagation of gradient computations.", "source": "codesearchnet"}
{"code": "def _store_checkpoint(self, sess, saver, global_step):\n    \n    if not self._logdir or not saver:\n      return\n    tf.gfile.MakeDirs(self._logdir)\n    filename = os.path.join(self._logdir, 'model.ckpt')\n    saver.save(sess, filename, global_step)", "docstring": "Store a checkpoint if a log directory was provided to the constructor.\n\nThe directory will be created if needed.\n\nArgs:\nsess: Session containing variables to store.\nsaver: Saver used for checkpointing.\nglobal_step: Step number of the checkpoint name.", "source": "juraj-google-style"}
{"code": "def load_lines(filename):\n    with open(filename, 'r', encoding='utf-8') as f:\n        return [line.rstrip('\\n') for line in f.readlines()]", "docstring": "Load a text file as an array of lines.\n\nArgs:\nfilename: Path to the input file.\n\nReturns:\nAn array of strings, each representing an individual line.", "source": "codesearchnet"}
{"code": "def expire_key(self, key):\n    value = self.base_dict[key]\n    del self[key]\n    if (self.callback is not None):\n        self.callback(key, value, *self.callback_args, **self.callback_kwargs)", "docstring": "Expire the key, delete the value, and call the callback function\nif one is specified.\n\nArgs:\nkey: The ``TimedDict`` key", "source": "codesearchnet"}
{"code": "def __call__(self, request: beam.Row, *args, **kwargs):\n    response_dict: dict[str, Any] = {}\n    row_key_str: str = ''\n    try:\n        if self._row_key_fn:\n            row_key = self._row_key_fn(request)\n        else:\n            request_dict = request._asdict()\n            row_key_str = str(request_dict[self._row_key])\n            row_key = row_key_str.encode(self._encoding)\n        row = self._table.read_row(row_key, filter_=self._row_filter)\n        if row:\n            for cf_id, cf_v in row.cells.items():\n                response_dict[cf_id] = {}\n                for col_id, col_v in cf_v.items():\n                    if self._include_timestamp:\n                        response_dict[cf_id][col_id.decode(self._encoding)] = [(v.value.decode(self._encoding), v.timestamp) for v in col_v]\n                    else:\n                        response_dict[cf_id][col_id.decode(self._encoding)] = col_v[0].value.decode(self._encoding)\n        elif self._exception_level == ExceptionLevel.WARN:\n            _LOGGER.warning('no matching row found for row_key: %s with row_filter: %s' % (row_key_str, self._row_filter))\n        elif self._exception_level == ExceptionLevel.RAISE:\n            raise ValueError('no matching row found for row_key: %s with row_filter=%s' % (row_key_str, self._row_filter))\n    except KeyError:\n        raise KeyError('row_key %s not found in input PCollection.' % row_key_str)\n    except NotFound:\n        raise NotFound('GCP BigTable cluster `%s:%s:%s` not found.' % (self._project_id, self._instance_id, self._table_id))\n    except Exception as e:\n        raise e\n    return (request, beam.Row(**response_dict))", "docstring": "Reads a row from the GCP BigTable and returns\na `Tuple` of request and response.\n\nArgs:\nrequest: the input `beam.Row` to enrich.", "source": "github-repos"}
{"code": "def update_utxoset(self, transaction):\n    spent_outputs = [spent_output for spent_output in transaction.spent_outputs]\n    if spent_outputs:\n        self.delete_unspent_outputs(*spent_outputs)\n    self.store_unspent_outputs(*[utxo._asdict() for utxo in transaction.unspent_outputs])", "docstring": "Update the UTXO set given ``transaction``. That is, remove\nthe outputs that the given ``transaction`` spends, and add the\noutputs that the given ``transaction`` creates.\n\nArgs:\ntransaction (:obj:`~bigchaindb.models.Transaction`): A new\ntransaction incoming into the system for which the UTXO\nset needs to be updated.", "source": "codesearchnet"}
{"code": "def respond(self, prompt_id, response):\n    _LOG.debug('Responding to prompt (%s): \"%s\"', prompt_id, response)\n    with self._cond:\n        if (not (self._prompt and (self._prompt.id == prompt_id))):\n            return False\n        self._response = response\n        self.last_response = (prompt_id, response)\n        self.remove_prompt()\n        self._cond.notifyAll()\n    return True", "docstring": "Respond to the prompt with the given ID.\n\nIf there is no active prompt or the given ID doesn't match the active\nprompt, do nothing.\n\nArgs:\nprompt_id: A string uniquely identifying the prompt.\nresponse: A string response to the given prompt.\n\nReturns:\nTrue if the prompt with the given ID was active, otherwise False.", "source": "codesearchnet"}
{"code": "def parse_mmcif_header(infile):\n    from Bio.PDB.MMCIF2Dict import MMCIF2Dict\n    newdict = {}\n    try:\n        mmdict = MMCIF2Dict(infile)\n    except ValueError as e:\n        log.exception(e)\n        return newdict\n    chemical_ids_exclude = ['HOH']\n    chemical_types_exclude = ['l-peptide linking', 'peptide linking']\n    if ('_struct.title' in mmdict):\n        newdict['pdb_title'] = mmdict['_struct.title']\n    else:\n        log.debug('{}: No title field'.format(infile))\n    if ('_struct.pdbx_descriptor' in mmdict):\n        newdict['description'] = mmdict['_struct.pdbx_descriptor']\n    else:\n        log.debug('{}: no description field'.format(infile))\n    if ('_pdbx_database_status.recvd_initial_deposition_date' in mmdict):\n        newdict['date'] = mmdict['_pdbx_database_status.recvd_initial_deposition_date']\n    elif ('_database_PDB_rev.date' in mmdict):\n        newdict['date'] = mmdict['_database_PDB_rev.date']\n    else:\n        log.debug('{}: no date field'.format(infile))\n    if ('_exptl.method' in mmdict):\n        newdict['experimental_method'] = mmdict['_exptl.method']\n    else:\n        log.debug('{}: no experimental method field'.format(infile))\n    if ('_refine.ls_d_res_high' in mmdict):\n        try:\n            if isinstance(mmdict['_refine.ls_d_res_high'], list):\n                newdict['resolution'] = [float(x) for x in mmdict['_refine.ls_d_res_high']]\n            else:\n                newdict['resolution'] = float(mmdict['_refine.ls_d_res_high'])\n        except:\n            try:\n                newdict['resolution'] = float(mmdict['_em_3d_reconstruction.resolution'])\n            except:\n                log.debug('{}: no resolution field'.format(infile))\n    else:\n        log.debug('{}: no resolution field'.format(infile))\n    if ('_chem_comp.id' in mmdict):\n        chemicals_filtered = ssbio.utils.filter_list_by_indices(mmdict['_chem_comp.id'], ssbio.utils.not_find(mmdict['_chem_comp.type'], chemical_types_exclude, case_sensitive=False))\n        chemicals_fitered = ssbio.utils.filter_list(chemicals_filtered, chemical_ids_exclude, case_sensitive=True)\n        newdict['chemicals'] = chemicals_fitered\n    else:\n        log.debug('{}: no chemical composition field'.format(infile))\n    if ('_entity_src_gen.pdbx_gene_src_scientific_name' in mmdict):\n        newdict['taxonomy_name'] = mmdict['_entity_src_gen.pdbx_gene_src_scientific_name']\n    else:\n        log.debug('{}: no organism field'.format(infile))\n    return newdict", "docstring": "Parse a couple important fields from the mmCIF file format with some manual curation of ligands.\n\nIf you want full access to the mmCIF file just use the MMCIF2Dict class in Biopython.\n\nArgs:\ninfile: Path to mmCIF file\n\nReturns:\ndict: Dictionary of parsed header", "source": "codesearchnet"}
{"code": "def encode_tf(self, s):\n    ids = subword_text_encoder_ops.subword_text_encoder_encode(s, self._filepath)\n    return ids[:(- 1)]", "docstring": "Encode a tf.Scalar string to a tf.Tensor.\n\nThis will be necessary for on-the-fly tokenization.\n\nArgs:\ns: a tf.Scalar with dtype tf.string\nReturns:\na 1d tf.Tensor with dtype tf.int32", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, use_cache: Optional[bool]=None):\n    hidden_states = self.self_att(hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, past_key_values=past_key_values, use_cache=use_cache)\n    hidden_states, attn_weights, current_key_value = hidden_states\n    hidden_states = self.ffn(hidden_states)\n    return (hidden_states, attn_weights, current_key_value)", "docstring": "Args:\nhidden_states (`torch.Tensor`):\nInput to the layer of shape `(batch, seq_len, dim_model)`\nattention_mask (`torch.Tensor`):\nAvoid invalid areas to participate in the calculation of shape `(batch, seq_len, seq_len)`\nposition_bias (`torch.Tensor`):\nProvides position information to attention mechanism of shape `(num_heads, seq_len, seq_len)`\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers.\npast_key_values (`Tuple[torch.Tensor, torch.Tensor])`, *optional*):\nCached past key and value projection states\nuse_cache (`bool`, *optional*):\nIf set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n(see `past_key_values`).", "source": "github-repos"}
{"code": "def attached_dependencies(self):\n    return self._attached_dependencies", "docstring": "Returns list of dependencies that should be saved in the checkpoint.\n\nThese dependencies are not tracked by root, but are in the checkpoint.\nThis is defined when the user creates a Checkpoint with both root and kwargs\nset.\n\nReturns:\nA list of TrackableReferences.", "source": "github-repos"}
{"code": "def coupling(self, source_y, target_y, weight):\n        \n        v_pyramidal = source_y[1] - source_y[2]\n        return (np.array([0, 0, 0, 0, 0, 1.0, 0, 0]) *\n                (weight*self.g1*self.He2*self.ke2*self.S(v_pyramidal)))", "docstring": "How to couple the output of one node to the input of another.\nArgs:\nsource_y (array of shape (8,)): state of the source node\ntarget_y (array of shape (8,)): state of the target node\nweight (float): the connection strength\nReturns:\ninput (array of shape (8,)): value to drive each variable of the\ntarget node.", "source": "juraj-google-style"}
{"code": "def decode_list_oov(self, ids, source_oov_id_to_token):\n    seq = (reversed(ids) if self._reverse else ids)\n    tokens = []\n    for cur_id in seq:\n        if (cur_id in self._id_to_token):\n            tokens.append(self._id_to_token[cur_id])\n        else:\n            tokens.append(source_oov_id_to_token[(cur_id - self.vocab_size)])\n    return tokens", "docstring": "decode ids back to tokens, considering OOVs temporary IDs.\n\nArgs:\nids: vocab ids. Could possibly include source temporary OOV ID starting\nfrom vocab_size.\nsource_oov_id_to_token: a list of source OOV tokens, with the order the\nsame as they appear in the source.\n\nReturns:\ndecoded tokens, possibly including source OOV tokens.", "source": "codesearchnet"}
{"code": "def batch_set_value(tuples):\n    if context.executing_eagerly() or ops.inside_function():\n        for x, value in tuples:\n            x.assign(numpy_compat.np_asarray(value, dtype=dtype_numpy(x)))\n    else:\n        with get_graph().as_default():\n            if tuples:\n                assign_ops = []\n                feed_dict = {}\n                for x, value in tuples:\n                    value = numpy_compat.np_asarray(value, dtype=dtype_numpy(x))\n                    tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])\n                    if hasattr(x, '_assign_placeholder'):\n                        assign_placeholder = x._assign_placeholder\n                        assign_op = x._assign_op\n                    else:\n                        placeholder_shape = tensor_shape.TensorShape([None] * value.ndim)\n                        assign_placeholder = array_ops.placeholder(tf_dtype, shape=placeholder_shape)\n                        assign_op = x.assign(assign_placeholder)\n                        x._assign_placeholder = assign_placeholder\n                        x._assign_op = assign_op\n                    assign_ops.append(assign_op)\n                    feed_dict[assign_placeholder] = value\n                get_session().run(assign_ops, feed_dict=feed_dict)", "docstring": "Sets the values of many tensor variables at once.\n\nArgs:\ntuples: a list of tuples `(tensor, value)`.\n`value` should be a Numpy array.", "source": "github-repos"}
{"code": "def get_point_index(self, point):\n        \n        for i, segment in enumerate(self.segments):\n            idx = segment.getPointIndex(point)\n            if idx != -1:\n                return i, idx\n        return -1, -1", "docstring": "Gets of the closest first point\n\nArgs:\npoint (:obj:`Point`)\nReturns:\n(int, int): Segment id and point index in that segment", "source": "juraj-google-style"}
{"code": "def add(TargetGroup, NewMember, Config=None, Args=None):\n    Member = (Task(NewMember, (Args or {}), (Config or {})) if isfunction(NewMember) else Group(NewMember, (Config or {})))\n    ParentMembers = TargetGroup.__ec_member__.Members\n    ParentMembers[Member.Config['name']] = Member\n    alias = Member.Config.get('alias')\n    if alias:\n        ParentMembers[alias] = Member", "docstring": "r\"\"\"Adds members to an existing group.\n\nArgs:\nTargetGroup (Group): The target group for the addition.\nNewMember (Group / Task): The member to be added.\nConfig (dict): The config for the member.\nArgs (OrderedDict): ArgConfig for the NewMember, if it's a task (optional).", "source": "codesearchnet"}
{"code": "def coord_list_mapping(subset, superset, atol=1e-08):\n    c1 = np.array(subset)\n    c2 = np.array(superset)\n    inds = np.where(np.all(np.isclose(c1[(:, None, :)], c2[(None, :, :)], atol=atol), axis=2))[1]\n    result = c2[inds]\n    if (not np.allclose(c1, result, atol=atol)):\n        if (not is_coord_subset(subset, superset)):\n            raise ValueError('subset is not a subset of superset')\n    if (not (result.shape == c1.shape)):\n        raise ValueError('Something wrong with the inputs, likely duplicates in superset')\n    return inds", "docstring": "Gives the index mapping from a subset to a superset.\nSubset and superset cannot contain duplicate rows\n\nArgs:\nsubset, superset: List of coords\n\nReturns:\nlist of indices such that superset[indices] = subset", "source": "codesearchnet"}
{"code": "def emergence(network, state, do_blackbox=False, do_coarse_grain=True, time_scales=None):\n    micro_phi = compute.major_complex(network, state).phi\n    max_phi = float('-inf')\n    max_network = None\n    for subsystem in all_macro_systems(network, state, do_blackbox=do_blackbox, do_coarse_grain=do_coarse_grain, time_scales=time_scales):\n        phi = compute.phi(subsystem)\n        if ((phi - max_phi) > constants.EPSILON):\n            max_phi = phi\n            max_network = MacroNetwork(network=network, macro_phi=phi, micro_phi=micro_phi, system=subsystem.micro_node_indices, time_scale=subsystem.time_scale, blackbox=subsystem.blackbox, coarse_grain=subsystem.coarse_grain)\n    return max_network", "docstring": "Check for the emergence of a micro-system into a macro-system.\n\nChecks all possible blackboxings and coarse-grainings of a system to find\nthe spatial scale with maximum integrated information.\n\nUse the ``do_blackbox`` and ``do_coarse_grain`` args to specifiy whether to\nuse blackboxing, coarse-graining, or both. The default is to just\ncoarse-grain the system.\n\nArgs:\nnetwork (Network): The network of the micro-system under investigation.\nstate (tuple[int]): The state of the network.\ndo_blackbox (bool): Set to ``True`` to enable blackboxing. Defaults to\n``False``.\ndo_coarse_grain (bool): Set to ``True`` to enable coarse-graining.\nDefaults to ``True``.\ntime_scales (list[int]): List of all time steps over which to check\nfor emergence.\n\nReturns:\nMacroNetwork: The maximal macro-system generated from the\nmicro-system.", "source": "codesearchnet"}
{"code": "def irreducible_purviews(cm, direction, mechanism, purviews):\n\n    def reducible(purview):\n        'Return ``True`` if purview is trivially reducible.'\n        (_from, to) = direction.order(mechanism, purview)\n        return connectivity.block_reducible(cm, _from, to)\n    return [purview for purview in purviews if (not reducible(purview))]", "docstring": "Return all purviews which are irreducible for the mechanism.\n\nArgs:\ncm (np.ndarray): An |N x N| connectivity matrix.\ndirection (Direction): |CAUSE| or |EFFECT|.\npurviews (list[tuple[int]]): The purviews to check.\nmechanism (tuple[int]): The mechanism in question.\n\nReturns:\nlist[tuple[int]]: All purviews in ``purviews`` which are not reducible\nover ``mechanism``.\n\nRaises:\nValueError: If ``direction`` is invalid.", "source": "codesearchnet"}
{"code": "def _run_internal_graph(self, inputs, training=None, mask=None):\n    inputs = self._flatten_to_reference_inputs(inputs)\n    if mask is None:\n        masks = [None] * len(inputs)\n    else:\n        masks = self._flatten_to_reference_inputs(mask)\n    for input_t, mask in zip(inputs, masks):\n        input_t._keras_mask = mask\n    tensor_dict = {}\n    tensor_usage_count = self._tensor_usage_count\n    for x, y in zip(self.inputs, inputs):\n        y = self._conform_to_reference_input(y, ref_input=x)\n        x_id = str(id(x))\n        tensor_dict[x_id] = [y] * tensor_usage_count[x_id]\n    nodes_by_depth = self._nodes_by_depth\n    depth_keys = list(nodes_by_depth.keys())\n    depth_keys.sort(reverse=True)\n    for depth in depth_keys:\n        nodes = nodes_by_depth[depth]\n        for node in nodes:\n            if node.is_input:\n                continue\n            if any((t_id not in tensor_dict for t_id in node.flat_input_ids)):\n                continue\n            args, kwargs = node.map_arguments(tensor_dict)\n            outputs = node.layer(*args, **kwargs)\n            for x_id, y in zip(node.flat_output_ids, nest.flatten(outputs)):\n                tensor_dict[x_id] = [y] * tensor_usage_count[x_id]\n    output_tensors = []\n    for x in self.outputs:\n        x_id = str(id(x))\n        assert x_id in tensor_dict, 'Could not compute output ' + str(x)\n        output_tensors.append(tensor_dict[x_id].pop())\n    return nest.pack_sequence_as(self._nested_outputs, output_tensors)", "docstring": "Computes output tensors for new inputs.\n\n# Note:\n- Can be run on non-Keras tensors.\n\nArgs:\ninputs: Tensor or nested structure of Tensors.\ntraining: Boolean learning phase.\nmask: (Optional) Tensor or nested structure of Tensors.\n\nReturns:\noutput_tensors", "source": "github-repos"}
{"code": "def indexes(self, collection=None):\n        \n        \n        indexes = []\n\n        for collection_name in self.collections():\n            if collection and collection != collection_name:\n                continue\n            for index_name in self.db[collection_name].index_information():\n                if index_name != '_id_':\n                    indexes.append(index_name)\n        return indexes", "docstring": "Return a list with the current indexes\n\nSkip the mandatory _id_ indexes\n\nArgs:\ncollection(str)\n\nReturns:\nindexes(list)", "source": "juraj-google-style"}
{"code": "def do_transaction(args):\n    \n    rest_client = RestClient(args.url, args.user)\n\n    if args.subcommand == 'list':\n        transactions = rest_client.list_transactions()\n        keys = ('transaction_id', 'family', 'version', 'size', 'payload')\n        headers = tuple(k.upper() if k != 'version' else 'VERS' for k in keys)\n\n        def parse_txn_row(transaction, decode=True):\n            decoded = b64decode(transaction['payload'])\n            return (\n                transaction['header_signature'],\n                transaction['header']['family_name'],\n                transaction['header']['family_version'],\n                len(decoded),\n                str(decoded) if decode else transaction['payload'])\n\n        if args.format == 'default':\n            fmt.print_terminal_table(headers, transactions, parse_txn_row)\n\n        elif args.format == 'csv':\n            fmt.print_csv(headers, transactions, parse_txn_row)\n\n        elif args.format == 'json' or args.format == 'yaml':\n            data = [{k: d for k, d in zip(keys, parse_txn_row(b, False))}\n                    for b in transactions]\n\n            if args.format == 'yaml':\n                fmt.print_yaml(data)\n            elif args.format == 'json':\n                fmt.print_json(data)\n            else:\n                raise AssertionError('Missing handler: {}'.format(args.format))\n\n        else:\n            raise AssertionError('Missing handler: {}'.format(args.format))\n\n    if args.subcommand == 'show':\n        output = rest_client.get_transaction(args.transaction_id)\n\n        if args.key:\n            if args.key == 'payload':\n                output = b64decode(output['payload'])\n            elif args.key in output:\n                output = output[args.key]\n            elif args.key in output['header']:\n                output = output['header'][args.key]\n            else:\n                raise CliException(\n                    'Key \"{}\" not found in transaction or header'.format(\n                        args.key))\n\n        if args.format == 'yaml':\n            fmt.print_yaml(output)\n        elif args.format == 'json':\n            fmt.print_json(output)\n        else:\n            raise AssertionError('Missing handler: {}'.format(args.format))", "docstring": "Runs the transaction list or show command, printing to the console\n\nArgs:\nargs: The parsed arguments sent to the command at runtime", "source": "juraj-google-style"}
{"code": "def _should_get_another_batch(self, content):\n    \n    if ('max-keys' in self._options and\n        self._options['max-keys'] <= common._MAX_GET_BUCKET_RESULT):\n      return False\n\n    elements = self._find_elements(\n        content, set([common._T_IS_TRUNCATED,\n                      common._T_NEXT_MARKER]))\n    if elements.get(common._T_IS_TRUNCATED, 'false').lower() != 'true':\n      return False\n\n    next_marker = elements.get(common._T_NEXT_MARKER)\n    if next_marker is None:\n      self._options.pop('marker', None)\n      return False\n    self._options['marker'] = next_marker\n    return True", "docstring": "Whether to issue another GET bucket call.\n\nArgs:\ncontent: response XML.\n\nReturns:\nTrue if should, also update self._options for the next request.\nFalse otherwise.", "source": "juraj-google-style"}
{"code": "def _get_weight_param_summary(wp):\n    \n    summary_str = ''\n    if wp.HasField('quantization'):\n        nbits = wp.quantization.numberOfBits\n        quant_type = 'linearly' if wp.quantization.HasField('linearQuantization') else 'lookup-table'\n        summary_str += '{}-bit {} quantized'.format(nbits, quant_type)\n\n    if len(wp.floatValue) > 0:\n        summary_str += '({} floatValues)'.format(len(wp.floatValue))\n    if len(wp.float16Value) > 0:\n        summary_str += '({} bytes float16Values)'.format(len(wp.float16Value))\n    if len(wp.rawValue) > 0:\n        summary_str += '({} bytes rawValues)'.format(len(wp.rawValue))\n\n    return summary_str", "docstring": "Get a summary of _NeuralNetwork_pb2.WeightParams\nArgs:\nwp : _NeuralNetwork_pb2.WeightParams - the _NeuralNetwork_pb2.WeightParams message to display\nReturns:\na str summary for wp", "source": "juraj-google-style"}
{"code": "def get_aggregation_propensity(self, seq, outdir, cutoff_v=5, cutoff_n=5, run_amylmuts=False):\n    seq = ssbio.protein.sequence.utils.cast_to_str(seq)\n    results = self.run_amylpred2(seq=seq, outdir=outdir, run_amylmuts=run_amylmuts)\n    (agg_index, agg_conf) = self.parse_for_consensus_aggregation(N=len(seq), results=results, cutoff_v=cutoff_v, cutoff_n=cutoff_n)\n    return agg_index", "docstring": "Run the AMYLPRED2 web server for a protein sequence and get the consensus result for aggregation propensity.\n\nArgs:\nseq (str, Seq, SeqRecord): Amino acid sequence\noutdir (str): Directory to where output files should be saved\ncutoff_v (int): The minimal number of methods that agree on a residue being a aggregation-prone residue\ncutoff_n (int): The minimal number of consecutive residues to be considered as a 'stretch' of\naggregation-prone region\nrun_amylmuts (bool): If AMYLMUTS method should be run, default False. AMYLMUTS is optional as it is the most\ntime consuming and generates a slightly different result every submission.\n\nReturns:\nint: Aggregation propensity - the number of aggregation-prone segments on an unfolded protein sequence", "source": "codesearchnet"}
{"code": "def decode(token, certs=None, verify=True, audience=None):\n    (header, payload, signed_section, signature) = _unverified_decode(token)\n    if (not verify):\n        return payload\n    if isinstance(certs, collections.Mapping):\n        key_id = header.get('kid')\n        if key_id:\n            if (key_id not in certs):\n                raise ValueError('Certificate for key id {} not found.'.format(key_id))\n            certs_to_check = [certs[key_id]]\n        else:\n            certs_to_check = certs.values()\n    else:\n        certs_to_check = certs\n    if (not crypt.verify_signature(signed_section, signature, certs_to_check)):\n        raise ValueError('Could not verify token signature.')\n    _verify_iat_and_exp(payload)\n    if (audience is not None):\n        claim_audience = payload.get('aud')\n        if (audience != claim_audience):\n            raise ValueError('Token has wrong audience {}, expected {}'.format(claim_audience, audience))\n    return payload", "docstring": "Decode and verify a JWT.\n\nArgs:\ntoken (str): The encoded JWT.\ncerts (Union[str, bytes, Mapping[str, Union[str, bytes]]]): The\ncertificate used to validate the JWT signature. If bytes or string,\nit must the the public key certificate in PEM format. If a mapping,\nit must be a mapping of key IDs to public key certificates in PEM\nformat. The mapping must contain the same key ID that's specified\nin the token's header.\nverify (bool): Whether to perform signature and claim validation.\nVerification is done by default.\naudience (str): The audience claim, 'aud', that this JWT should\ncontain. If None then the JWT's 'aud' parameter is not verified.\n\nReturns:\nMapping[str, str]: The deserialized JSON payload in the JWT.\n\nRaises:\nValueError: if any verification checks failed.", "source": "codesearchnet"}
{"code": "def set_card_standard(self, title, text, smallImageUrl=None, largeImageUrl=None):\n    self.response.card.type = 'Standard'\n    self.response.card.title = title\n    self.response.card.text = text\n    if smallImageUrl:\n        self.response.card.image.smallImageUrl = smallImageUrl\n    if largeImageUrl:\n        self.response.card.image.largeImageUrl = largeImageUrl", "docstring": "Set response card as standard type.\n\ntitle, text, and image cannot exceed 8,000 characters.\n\nArgs:\ntitle: str. Title of Simple or Standard type card.\ntext: str. Content of Standard type card.\nsmallImageUrl: str. URL of small image. Cannot exceed 2,000\ncharacters. Recommended pixel size: 720w x 480h.\nlargeImageUrl: str. URL of large image. Cannot exceed 2,000\ncharacters. Recommended pixel size: 1200w x 800h.", "source": "codesearchnet"}
{"code": "def bfs(graph, start):\n    \n    \n    queue = []\n    visited = []\n    \n    \n    queue.append([['', start]])\n    while queue:\n        \n        path = queue.pop(0)\n        \n        node = path[-1][1]\n        if node.stateid not in visited:\n            visited.append(node.stateid)\n            \n            if node.final != TropicalWeight(float('inf')):\n                return \"\".join([mnode[0] for mnode in path])\n            \n            \n            for arc in node.arcs:\n                char = graph.isyms.find(arc.ilabel)\n                next_state = graph[arc.nextstate]\n                \n                if next_state.stateid not in visited:\n                    new_path = list(path)\n                    new_path.append([char, next_state])\n                    queue.append(new_path)", "docstring": "Finds the shortest string using BFS\nArgs:\ngraph (DFA): The DFA states\nstart (DFA state): The DFA initial state\nReturns:\nstr: The shortest string", "source": "juraj-google-style"}
{"code": "def aggregate_kernel_metrics(metrics: list[str], kernel_metrics: list[dict[str, tuple[str, str]]]) -> list[list[str]]:\n    if not kernel_metrics:\n        raise app.UsageError('no metrics found')\n    results: dict[str, tuple[list[float], str]] = {}\n    for vals in kernel_metrics:\n        for name in metrics:\n            if name not in vals:\n                raise app.UsageError(f\"metric '{name}' is not found\")\n            value, unit = vals[name]\n            if name not in results:\n                results[name] = ([], unit)\n            if results[name][1] != unit:\n                raise app.UsageError(f\"unit mismatch for metric '{name}'\")\n            results[name][0].append(float(value.replace(',', '')))\n    kernel_metrics = []\n    for name, (values, unit) in results.items():\n        a = aggregate(values, name)\n        if round(a) == a:\n            kernel_metrics.append([name, f'{round(a)}', unit])\n        else:\n            kernel_metrics.append([name, f'{round(a, 2)}', unit])\n    return kernel_metrics", "docstring": "Aggregates and returns the metrics for the given kernels.\n\nArgs:\nmetrics: list of metrics names to print\nkernel_metrics: dictionary of metrics by kernel\n\nReturns:\nlist of rows [name, value, unit] per metric.", "source": "github-repos"}
{"code": "def gather(values, index, name='segmented_gather'):\n    indices = index.indices\n    if len(values.shape[index.batch_dims:]) < 2:\n        return torch.gather(values, index.batch_dims, indices.view(values.size()[0], -1)).view(indices.size())\n    else:\n        indices = indices.unsqueeze(-1).expand(values.shape)\n        return torch.gather(values, index.batch_dims, indices)", "docstring": "Gathers from *values* using the index map. For each element in the domain of the index map this operation looks up\na value for that index in *values*. Two elements from the same segment always get assigned the same value.\n\nArgs:\nvalues (`torch.Tensor` of shape (B1, ..., Bn, num_segments, V1, ...)):\nTensor with segment values.\nindex (`IndexMap` of shape (B1, ..., Bn, I1, ..., Ik)):\nIndexMap.\nname (`str`, *optional*, defaults to 'segmented_gather'):\nName for the operation. Currently not used\n\nReturns:\n`tuple(torch.Tensor)`: Tensor of shape (B1, ..., Bn, I1, ..., Ik, V1, ...) with the gathered values.", "source": "github-repos"}
{"code": "def GenesisBlock() -> Block:\n    prev_hash = UInt256(data=bytearray(32))\n    timestamp = int(datetime(2016, 7, 15, 15, 8, 21, tzinfo=pytz.utc).timestamp())\n    index = 0\n    consensus_data = 2083236893\n    next_consensus = Blockchain.GetConsensusAddress(Blockchain.StandbyValidators())\n    script = Witness(bytearray(0), bytearray(PUSHT))\n    mt = MinerTransaction()\n    mt.Nonce = 2083236893\n    output = TransactionOutput(Blockchain.SystemShare().Hash, Blockchain.SystemShare().Amount, Crypto.ToScriptHash(Contract.CreateMultiSigRedeemScript((int((len(Blockchain.StandbyValidators()) / 2)) + 1), Blockchain.StandbyValidators())))\n    it = IssueTransaction([], [output], [], [script])\n    return Block(prev_hash, timestamp, index, consensus_data, next_consensus, script, [mt, Blockchain.SystemShare(), Blockchain.SystemCoin(), it], True)", "docstring": "Create the GenesisBlock.\n\nReturns:\nBLock:", "source": "codesearchnet"}
{"code": "def _prepare_controller(self, controller, template):\n        \n        if template:\n            fn = aiohttp_jinja2.template(template_name=template)(controller)\n        else:\n            fn = self._parse_json_response(controller)\n        return fn", "docstring": "Wraps the controller wether to render a jinja template or to return a json response (if template is None)\nArgs:\ncontroller (coroutine): the coroutine to be wrapped\ntemplate (str): the name of the template or None\n\nReturns:\ncoroutine: a wrapped coroutine of the controller", "source": "juraj-google-style"}
{"code": "def GetUnclaimedCoins(self):\n    unclaimed = []\n    neo = Blockchain.SystemShare().Hash\n    for coin in self.GetCoins():\n        if ((coin.Output.AssetId == neo) and ((coin.State & CoinState.Confirmed) > 0) and ((coin.State & CoinState.Spent) > 0) and ((coin.State & CoinState.Claimed) == 0) and ((coin.State & CoinState.Frozen) == 0) and ((coin.State & CoinState.WatchOnly) == 0)):\n            unclaimed.append(coin)\n    return unclaimed", "docstring": "Gets coins in the wallet that have not been 'claimed', or redeemed for their gas value on the blockchain.\n\nReturns:\nlist: a list of ``neo.Wallet.Coin`` that have 'claimable' value", "source": "codesearchnet"}
{"code": "def _build_parser(self):\n    main_parser = argparse.ArgumentParser(description=self.common.help, prefix_chars='-+')\n    self._add_options_to_parser(self._opt_bare, main_parser)\n    main_parser.set_defaults(**self.common.defaults)\n    if (self.bare is not None):\n        main_parser.set_defaults(**self.bare.defaults)\n    subparsers = main_parser.add_subparsers(dest='loam_sub_name')\n    for (cmd_name, meta) in self.subcmds.items():\n        kwargs = {'prefix_chars': '+-', 'help': meta.help}\n        dummy_parser = subparsers.add_parser(cmd_name, **kwargs)\n        self._add_options_to_parser(self._opt_cmds[cmd_name], dummy_parser)\n        dummy_parser.set_defaults(**meta.defaults)\n    return main_parser", "docstring": "Build command line argument parser.\n\nReturns:\n:class:`argparse.ArgumentParser`: the command line argument parser.\nYou probably won't need to use it directly. To parse command line\narguments and update the :class:`ConfigurationManager` instance\naccordingly, use the :meth:`parse_args` method.", "source": "codesearchnet"}
{"code": "def get_alignment_df(a_aln_seq, b_aln_seq, a_seq_id=None, b_seq_id=None):\n    \n    if len(a_aln_seq) != len(b_aln_seq):\n        raise ValueError('Sequence lengths not equal - was an alignment run?')\n\n    if not a_seq_id:\n        a_seq_id = 'a_seq'\n    if not b_seq_id:\n        b_seq_id = 'b_seq'\n\n    a_aln_seq = ssbio.protein.sequence.utils.cast_to_str(a_aln_seq)\n    b_aln_seq = ssbio.protein.sequence.utils.cast_to_str(b_aln_seq)\n\n    a_idx = 1\n    b_idx = 1\n\n    appender = []\n\n    for i, (a,b) in enumerate(zip(a_aln_seq, b_aln_seq)):\n        to_append = {}\n\n        if a == b and a != '-' and b != '-':\n            aa_flag = 'match'\n        elif a != b and a == '-' and b != '-':\n            aa_flag = 'insertion'\n        elif a != b and a != '-' and b == '-':\n            aa_flag = 'deletion'\n        elif a != b and a != '-' and b == 'X':\n            aa_flag = 'unresolved'\n        elif a != b and b != '-' and a == 'X':\n            aa_flag = 'unresolved'\n        elif a != b and a != '-' and b != '-':\n            aa_flag = 'mutation'\n\n        to_append['id_a'] = a_seq_id\n        to_append['id_b'] = b_seq_id\n        to_append['type'] = aa_flag\n\n        if aa_flag == 'match' or aa_flag == 'unresolved' or aa_flag == 'mutation':\n            to_append['id_a_aa'] = a\n            to_append['id_a_pos'] = int(a_idx)\n            to_append['id_b_aa'] = b\n            to_append['id_b_pos'] = int(b_idx)\n            a_idx += 1\n            b_idx += 1\n\n        if aa_flag == 'deletion':\n            to_append['id_a_aa'] = a\n            to_append['id_a_pos'] = int(a_idx)\n            a_idx += 1\n\n        if aa_flag == 'insertion':\n            to_append['id_b_aa'] = b\n            to_append['id_b_pos'] = int(b_idx)\n            b_idx += 1\n\n        appender.append(to_append)\n\n    cols = ['id_a', 'id_b', 'type', 'id_a_aa', 'id_a_pos', 'id_b_aa', 'id_b_pos']\n    alignment_df = pd.DataFrame.from_records(appender, columns=cols)\n    alignment_df = alignment_df.fillna(value=np.nan)\n\n    return alignment_df", "docstring": "Summarize two alignment strings in a dataframe.\n\nArgs:\na_aln_seq (str): Aligned sequence string\nb_aln_seq (str): Aligned sequence string\na_seq_id (str): Optional ID of a_seq\nb_seq_id (str): Optional ID of b_aln_seq\n\nReturns:\nDataFrame: a per-residue level annotation of the alignment", "source": "juraj-google-style"}
{"code": "def from_statements(\n        cls, sts: List[Influence], assign_default_polarities: bool = True\n    ):\n        \n\n        _dict = {}\n        for s in sts:\n            if assign_default_polarities:\n                for delta in deltas(s):\n                    if delta[\"polarity\"] is None:\n                        delta[\"polarity\"] = 1\n            concepts = nameTuple(s)\n\n            \n            if concepts[0] != concepts[1]:\n                if all(\n                    map(exists, (delta[\"polarity\"] for delta in deltas(s)))\n                ):\n                    if concepts in _dict:\n                        _dict[concepts].append(s)\n                    else:\n                        _dict[concepts] = [s]\n\n        edges = [\n            (*concepts, {\"InfluenceStatements\": statements})\n            for concepts, statements in _dict.items()\n        ]\n        return cls(edges)", "docstring": "Construct an AnalysisGraph object from a list of INDRA statements.\nUnknown polarities are set to positive by default.\n\nArgs:\nsts: A list of INDRA Statements\n\nReturns:\nAn AnalysisGraph instance constructed from a list of INDRA\nstatements.", "source": "juraj-google-style"}
{"code": "def sample(self, size=None):\n    self._recompute()\n    if (size is None):\n        n = np.random.randn(len(self._t))\n    else:\n        n = np.random.randn(len(self._t), size)\n    n = self.solver.dot_L(n)\n    if (size is None):\n        return (self.mean.get_value(self._t) + n[(:, 0)])\n    return (self.mean.get_value(self._t)[(None, :)] + n.T)", "docstring": "Sample from the prior distribution over datasets\n\nArgs:\nsize (Optional[int]): The number of samples to draw.\n\nReturns:\narray[n] or array[size, n]: The samples from the prior\ndistribution over datasets.", "source": "codesearchnet"}
{"code": "def __init__(self, data, limit=None):\n        \n        self._data = data\n        self._limit = limit", "docstring": "Initialise the Action object.\n\nArgs:\ndata (MultiTaskData): The processed data from the task that should be passed\non to successor tasks.\nlimit (list): A list of names of all immediate successor tasks that\nshould be executed.", "source": "juraj-google-style"}
{"code": "def prepare_loss_functions(loss, output_names):\n    if isinstance(loss, collections.abc.Mapping):\n        generic_utils.check_for_unexpected_keys('loss', loss, output_names)\n        loss_functions = []\n        for name in output_names:\n            if name not in loss:\n                logging.warning('Output {0} missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to {0}.'.format(name))\n            loss_functions.append(get_loss_function(loss.get(name, None)))\n    elif isinstance(loss, str):\n        loss_functions = [get_loss_function(loss) for _ in output_names]\n    elif isinstance(loss, collections.abc.Sequence):\n        if len(loss) != len(output_names):\n            raise ValueError('When passing a list as loss, it should have one entry per model outputs. The model has {} outputs, but you passed loss={}'.format(len(output_names), loss))\n        loss_functions = nest.map_structure(get_loss_function, loss)\n    else:\n        loss_functions = [get_loss_function(loss) for _ in range(len(output_names))]\n    return loss_functions", "docstring": "Converts loss to a list of loss functions.\n\nArgs:\nloss: String (name of objective function), objective function or\n`tf.losses.Loss` instance. See `tf.losses`. If the model has multiple\noutputs, you can use a different loss on each output by passing a\ndictionary or a list of losses. The loss value that will be minimized by\nthe model will then be the sum of all individual losses.\noutput_names: List of model output names.\n\nReturns:\nA list of loss objective functions.\n\nRaises:\nValueError: If loss is a dict with keys not in model output names,\nor if loss is a list with len not equal to model outputs.", "source": "github-repos"}
{"code": "def _parse_test_option_args(self, argv):\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--test-pipeline-options', type=str, action='store', help='only run tests providing service options')\n    parser.add_argument('--not-use-test-runner-api', action='store_true', default=False, help='whether not to use test-runner-api')\n    known, unused_argv = parser.parse_known_args(argv)\n    test_pipeline_options = known.test_pipeline_options or TestPipeline.pytest_test_pipeline_options\n    if self.is_integration_test and (not test_pipeline_options):\n        raise SkipTest('IT is skipped because --test-pipeline-options is not specified')\n    self.not_use_test_runner_api = known.not_use_test_runner_api\n    return shlex.split(test_pipeline_options) if test_pipeline_options else []", "docstring": "Parse value of command line argument: --test-pipeline-options to get\npipeline options.\n\nArgs:\nargv: An iterable of command line arguments to be used. If not specified\nthen sys.argv will be used as input for parsing arguments.\n\nReturns:\nAn argument list of options that can be parsed by argparser or directly\nbuild a pipeline option.", "source": "github-repos"}
{"code": "def cached_name_scope(name, top_level=True):\n    \n    if not top_level:\n        current_ns = tf.get_default_graph().get_name_scope()\n        if current_ns:\n            name = current_ns + '/' + name\n    ns = _get_cached_ns(name)\n    with tf.name_scope(ns):\n        yield ns", "docstring": "Return a context which either opens and caches a new name scope,\nor reenter an existing one.\n\nArgs:\ntop_level(bool): if True, the name scope will always be top-level.\nIt will not be nested under any existing name scope of the caller.", "source": "juraj-google-style"}
{"code": "def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs):\n    return cls(backbone_config=backbone_config, **kwargs)", "docstring": "Instantiate a [`Mask2FormerConfig`] (or a derived class) from a pre-trained backbone model configuration.\n\nArgs:\nbackbone_config ([`PretrainedConfig`]):\nThe backbone configuration.\n\nReturns:\n[`Mask2FormerConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def to_str(value):\n    \n    if sys.version_info.major < 3 and isinstance(value, six.string_types):\n        return value\n    return str(value)", "docstring": "Convert the input to a string, unless it is a unicode string in Python 2.\n\nUnicode strings are supported as native strings in Python 3, but ``str()`` cannot be\ninvoked on unicode strings in Python 2, so we need to check for that case when\nconverting user-specified values to strings.\n\nArgs:\nvalue: The value to convert to a string.\n\nReturns:\nstr or unicode: The string representation of the value or the unicode string itself.", "source": "juraj-google-style"}
{"code": "def get_idiomatic_name_in_language(cls, name, language):\n    if (language in cls.idiomatic_methods_cache):\n        m = cls.idiomatic_methods_cache[language]\n        if (not m):\n            return name\n        return m(name)\n    (found, method) = load_language_plugins(language, 'get_idiomatic_name')\n    if found:\n        cls.idiomatic_methods_cache[language] = method\n        if method:\n            return method(name)\n        else:\n            return name\n    module = importlib.import_module(('.lang.%s' % language), package='monolithe.generators')\n    if (not hasattr(module, 'get_idiomatic_name')):\n        cls.idiomatic_methods_cache[language] = None\n        return name\n    method = getattr(module, 'get_idiomatic_name')\n    cls.idiomatic_methods_cache[language] = method\n    return method(name)", "docstring": "Get the name for the given language\n\nArgs:\nname (str): the name to convert\nlanguage (str): the language to use\n\nReturns:\na name in the given language\n\nExample:\nget_idiomatic_name_in_language(\"EnterpriseNetwork\", \"python\")\n>>> enterprise_network", "source": "codesearchnet"}
{"code": "def swo_speed_info(self):\n    info = structs.JLinkSWOSpeedInfo()\n    res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.GET_SPEED_INFO, ctypes.byref(info))\n    if (res < 0):\n        raise errors.JLinkException(res)\n    return info", "docstring": "Retrieves information about the supported SWO speeds.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nA ``JLinkSWOSpeedInfo`` instance describing the target's supported\nSWO speeds.\n\nRaises:\nJLinkException: on error", "source": "codesearchnet"}
{"code": "def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n    known_args, pipeline_args = parse_known_args(argv)\n    pipeline_options = PipelineOptions(pipeline_args)\n    pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n    model_loader = TFModelHandlerTensor(model_uri=known_args.model_path).with_preprocess_fn(lambda image_name: read_image(image_name, known_args.image_dir))\n    pipeline = test_pipeline\n    if not test_pipeline:\n        pipeline = beam.Pipeline(options=pipeline_options)\n    image = pipeline | 'ReadImageNames' >> beam.io.ReadFromText(known_args.input) | 'FilterEmptyLines' >> beam.ParDo(filter_empty_lines)\n    predictions = image | 'RunInference' >> RunInference(model_loader) | 'PostProcessOutputs' >> beam.ParDo(PostProcessor())\n    _ = predictions | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)\n    result = pipeline.run()\n    result.wait_until_finish()\n    return result", "docstring": "Args:\nargv: Command line arguments defined for this example.\nsave_main_session: Used for internal testing.\ntest_pipeline: Used for internal testing.", "source": "github-repos"}
{"code": "def _validate_query_parameters(self, query, action_spec):\n        \n        processed_params = []\n        for param_name, param_value in query.items():\n            if param_name in action_spec['parameters'].keys():\n                processed_params.append(param_name)\n\n                \n                if action_spec['parameters'][param_name]['type'] == 'array':\n                    if not isinstance(param_value, list):  \n                        return False\n                    else:\n                        for i in param_value:  \n                            if not self.check_type(i, action_spec['parameters'][param_name]['items']['type']):\n                                return False\n\n                elif not self.check_type(param_value, action_spec['parameters'][param_name]['type']):\n                    return False\n\n        \n        if not all(param in processed_params for param, spec in action_spec['parameters'].items()\n                   if spec['in'] == 'query' and 'required' in spec and spec['required']):\n            return False\n        return True", "docstring": "Check the query parameter for the action specification.\n\nArgs:\nquery: query parameter to check.\naction_spec: specification of the action.\n\nReturns:\nTrue if the query is valid.", "source": "juraj-google-style"}
{"code": "def __init__(self, x: int, *args, y: str, **kwargs) -> float:\n    del x, y, args, kwargs", "docstring": "Constructor.\n\nArgs:\nx: Input 1.\n*args: Variable positional args.\ny: Input 2.\n**kwargs: Variable keyword args.\n\nReturns:\nThe result.", "source": "github-repos"}
{"code": "def get_poi(self, **kwargs):\n        \n        \n        params = {\n            'coordinateX': kwargs.get('longitude'),\n            'coordinateY': kwargs.get('latitude'),\n            'tipos': util.ints_to_string(kwargs.get('types')),\n            'Radius': kwargs.get('radius'),\n            'cultureInfo': util.language_code(kwargs.get('lang'))\n        }\n\n        \n        result = self.make_request('geo', 'get_poi', **params)\n\n        \n        if not util.check_result(result, 'poiList'):\n            return False, 'UNKNOWN ERROR'\n\n        \n        values = util.response_list(result, 'poiList')\n        return True, [emtype.Poi(**a) for a in values]", "docstring": "Obtain a list of POI in the given radius.\n\nArgs:\nlatitude (double): Latitude in decimal degrees.\nlongitude (double): Longitude in decimal degrees.\ntypes (list[int] | int): POI IDs (or empty list to get all).\nradius (int): Radius (in meters) of the search.\nlang (str): Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[Poi]), or message string\nin case of error.", "source": "juraj-google-style"}
{"code": "def tar_extract(context):\n    logger.debug('start')\n    mode = get_file_mode_for_reading(context)\n    for item in context['tar']['extract']:\n        source = context.get_formatted_string(item['in'])\n        destination = context.get_formatted_string(item['out'])\n        with tarfile.open(source, mode) as extract_me:\n            logger.debug(f\"Extracting '{source}' to '{destination}'\")\n            extract_me.extractall(destination)\n            logger.info(f\"Extracted '{source}' to '{destination}'\")\n    logger.debug('end')", "docstring": "Extract all members of tar archive to specified path.\n\nArgs:\ncontext: dictionary-like. context is mandatory.\ncontext['tar']['extract'] must exist. It's a dictionary.\nkeys are the path to the tar to extract.\nvalues are the destination paths.\n\nExample:\ntar:\nextract:\n- in: path/to/my.tar.xs\nout: /path/extract/here\n- in: another/tar.xs\nout: .\n\nThis will extract path/to/my.tar.xs to /path/extract/here, and also\nextract another/tar.xs to $PWD.", "source": "codesearchnet"}
{"code": "def _get_object_from_version(cls, operations, ident):\n        \n        version, objname = ident.split(\".\")\n\n        module_ = operations.get_context().script.get_revision(version).module\n        obj = getattr(module_, objname)\n        return obj", "docstring": "Returns a Python object from an Alembic migration module (script).\n\nArgs:\noperations: instance of ``alembic.operations.base.Operations``\nident: string of the format ``version.objname``\n\nReturns:\nthe object whose name is ``objname`` within the Alembic migration\nscript identified by ``version``", "source": "juraj-google-style"}
{"code": "def Trim(self):\n    ms = StreamManager.GetStream()\n    writer = BinaryWriter(ms)\n    self.SerializeUnsigned(writer)\n    writer.WriteByte(1)\n    self.Script.Serialize(writer)\n    writer.WriteHashes([tx.Hash.ToBytes() for tx in self.Transactions])\n    retVal = ms.ToArray()\n    StreamManager.ReleaseStream(ms)\n    return retVal", "docstring": "Returns a byte array that contains only the block header and transaction hash.\n\nReturns:\nbytes:", "source": "codesearchnet"}
{"code": "def as_dict_summary(self, print_subelectrodes=True):\n    chg_comp = self.fully_charged_entry.composition\n    dischg_comp = self.fully_discharged_entry.composition\n    ion = self.working_ion\n    d = {'average_voltage': self.get_average_voltage(), 'max_voltage': self.max_voltage, 'min_voltage': self.min_voltage, 'max_delta_volume': self.max_delta_volume, 'max_voltage_step': self.max_voltage_step, 'capacity_grav': self.get_capacity_grav(), 'capacity_vol': self.get_capacity_vol(), 'energy_grav': self.get_specific_energy(), 'energy_vol': self.get_energy_density(), 'working_ion': self._working_ion.symbol, 'nsteps': self.num_steps, 'framework': self._vpairs[0].framework.to_data_dict, 'formula_charge': chg_comp.reduced_formula, 'id_charge': self.fully_charged_entry.entry_id, 'formula_discharge': dischg_comp.reduced_formula, 'id_discharge': self.fully_discharged_entry.entry_id, 'fracA_charge': chg_comp.get_atomic_fraction(ion), 'fracA_discharge': dischg_comp.get_atomic_fraction(ion), 'max_instability': self.get_max_instability(), 'min_instability': self.get_min_instability(), 'material_ids': [itr_ent.entry_id for itr_ent in self._entries], 'stable_material_ids': [itr_ent.entry_id for itr_ent in self.get_stable_entries()], 'unstable_material_ids': [itr_ent.entry_id for itr_ent in self.get_unstable_entries()]}\n    if all([('decomposition_energy' in itr_ent.data) for itr_ent in self._entries]):\n        d.update({'stability_charge': self.fully_charged_entry.data['decomposition_energy'], 'stability_discharge': self.fully_discharged_entry.data['decomposition_energy'], 'stability_data': {itr_ent.entry_id: itr_ent.data['decomposition_energy'] for itr_ent in self._entries}})\n    if all([('muO2' in itr_ent.data) for itr_ent in self._entries]):\n        d.update({'muO2_data': {itr_ent.entry_id: itr_ent.data['muO2'] for itr_ent in self._entries}})\n    if print_subelectrodes:\n        f_dict = (lambda c: c.as_dict_summary(print_subelectrodes=False))\n        d['adj_pairs'] = list(map(f_dict, self.get_sub_electrodes(adjacent_only=True)))\n        d['all_pairs'] = list(map(f_dict, self.get_sub_electrodes(adjacent_only=False)))\n    return d", "docstring": "Generate a summary dict.\n\nArgs:\nprint_subelectrodes: Also print data on all the possible\nsubelectrodes.\n\nReturns:\nA summary of this electrode\"s properties in dict format.", "source": "codesearchnet"}
{"code": "def target_optimizer_arguments(self):\n    variables = (self.target_network.get_variables() + [variable for name in sorted(self.target_distributions) for variable in self.target_distributions[name].get_variables()])\n    source_variables = (self.network.get_variables() + [variable for name in sorted(self.distributions) for variable in self.distributions[name].get_variables()])\n    arguments = dict(time=self.global_timestep, variables=variables, source_variables=source_variables)\n    if (self.global_model is not None):\n        arguments['global_variables'] = (self.global_model.target_network.get_variables() + [variable for name in sorted(self.global_model.target_distributions) for variable in self.global_model.target_distributions[name].get_variables()])\n    return arguments", "docstring": "Returns the target optimizer arguments including the time, the list of variables to\noptimize, and various functions which the optimizer might require to perform an update\nstep.\n\nReturns:\nTarget optimizer arguments as dict.", "source": "codesearchnet"}
{"code": "def define_singleton(carrier, name, cls, cls_args={}):\n    instance_name = '__{}'.format(name)\n    setattr(carrier, instance_name, None)\n\n    def getter(self):\n        instance = getattr(carrier, instance_name)\n        if (instance is None):\n            instance = cls(**cls_args)\n            setattr(carrier, instance_name, instance)\n        return instance\n    setattr(type(carrier), name, property(getter))", "docstring": "Creates a property with the given name, but the cls will created only with the first call\n\nArgs:\ncarrier: an instance of the class where want to reach the cls instance\nname (str): the variable name of the cls instance\ncls (type): the singleton object type\ncls_args (dict): optional dict for createing cls", "source": "codesearchnet"}
{"code": "async def from_api_token(cls, token=None, api_cls=SlackBotApi):\n        \n        api = api_cls.from_env() if token is None else api_cls(api_token=token)\n        data = await api.execute_method(cls.API_AUTH_ENDPOINT)\n        return cls(data['user_id'], data['user'], api)", "docstring": "Create a new instance from the API token.\n\nArguments:\ntoken (:py:class:`str`, optional): The bot's API token\n(defaults to ``None``, which means looking in the\nenvironment).\napi_cls (:py:class:`type`, optional): The class to create\nas the ``api`` argument for API access (defaults to\n:py:class:`aslack.slack_api.SlackBotApi`).\n\nReturns:\n:py:class:`SlackBot`: The new instance.", "source": "juraj-google-style"}
{"code": "def inner(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return Inner().symbolic_call(x1, x2)\n    return backend.numpy.inner(x1, x2)", "docstring": "Return the inner product of two tensors.\n\nOrdinary inner product of vectors for 1-D tensors\n(without complex conjugation), in higher dimensions\na sum product over the last axes.\n\nMultidimensional arrays are treated as vectors by flattening\nall but their last axes. The resulting dot product is performed\nover their last axes.\n\nArgs:\nx1: First input tensor.\nx2: Second input tensor. The last dimension of `x1` and `x2`\nmust match.\n\nReturns:\nOutput tensor. The shape of the output is determined by\nbroadcasting the shapes of `x1` and `x2` after removing\ntheir last axes.", "source": "github-repos"}
{"code": "def count_params(weights):\n    unique_weights = {id(w): w for w in weights}.values()\n    weight_shapes = [w.shape.as_list() for w in unique_weights]\n    standardized_weight_shapes = [[0 if w_i is None else w_i for w_i in w] for w in weight_shapes]\n    return int(sum((np.prod(p) for p in standardized_weight_shapes)))", "docstring": "Count the total number of scalars composing the weights.\n\nArgs:\nweights: An iterable containing the weights on which to compute params\n\nReturns:\nThe total number of scalars composing the weights", "source": "github-repos"}
{"code": "def dump_ddl(metadata: MetaData,\n             dialect_name: str,\n             fileobj: TextIO = sys.stdout,\n             checkfirst: bool = True) -> None:\n    \n    \n    \n    \n    \n    def dump(querysql, *multiparams, **params):\n        compsql = querysql.compile(dialect=engine.dialect)\n        writeline_nl(fileobj, \"{sql};\".format(sql=compsql))\n\n    writeline_nl(fileobj,\n                 sql_comment(\"Schema (for dialect {}):\".format(dialect_name)))\n    engine = create_engine('{dialect}:\n                           strategy='mock', executor=dump)\n    metadata.create_all(engine, checkfirst=checkfirst)", "docstring": "Sends schema-creating DDL from the metadata to the dump engine.\nThis makes ``CREATE TABLE`` statements.\n\nArgs:\nmetadata: SQLAlchemy :class:`MetaData`\ndialect_name: string name of SQL dialect to generate DDL in\nfileobj: file-like object to send DDL to\ncheckfirst: if ``True``, use ``CREATE TABLE IF NOT EXISTS`` or\nequivalent.", "source": "juraj-google-style"}
{"code": "def choose_palette(stream=sys.stdout, basic_palette=None):\n    result = None\n    pal = basic_palette\n    log.debug('console version: %s', __version__)\n    log.debug('X11_RGB_PATHS: %r', X11_RGB_PATHS)\n    if color_is_forced():\n        (result, pal) = (detect_palette_support(basic_palette=pal) or 'basic')\n    elif (is_a_tty(stream=stream) and color_is_allowed()):\n        (result, pal) = detect_palette_support(basic_palette=pal)\n    proximity.build_color_tables(pal)\n    log.debug('Basic palette: %r', pal)\n    log.debug('%r', result)\n    return result", "docstring": "Make a best effort to automatically determine whether to enable\nANSI sequences, and if so, which color palettes are available.\n\nThis is the main function of the module—meant to be used unless\nsomething more specific is needed.\n\nTakes the following factors into account:\n\n- Whether output stream is a TTY.\n- ``TERM``, ``ANSICON`` environment variables\n- ``CLICOLOR``, ``NO_COLOR`` environment variables\n\nArguments:\nstream:             Which output file to check: stdout, stderr\nbasic_palette:      Force the platform-dependent 16 color palette,\nfor testing.  List of 16 rgb-int tuples.\nReturns:\nNone, str: 'basic', 'extended', or 'truecolor'", "source": "codesearchnet"}
{"code": "def Increment(self, key):\n    with self._lock:\n        if _IsHashable(key):\n            if key in self._d:\n                self._d[key] += 1\n            else:\n                self._d[key] = 1\n        else:\n            try:\n                i = self._unhashable_items.index(key)\n                self._unhashable_counts[i] += 1\n            except ValueError:\n                self._unhashable_items.append(key)\n                self._unhashable_counts.append(1)", "docstring": "Atomically increment a count by 1. Insert the item if not present.\n\nArgs:\nkey: the key being counted.", "source": "github-repos"}
{"code": "def sort_prefixes(orig, prefixes='@+'):\n    new = ''\n    for prefix in prefixes:\n        if (prefix in orig):\n            new += prefix\n    return new", "docstring": "Returns a sorted list of prefixes.\n\nArgs:\norig (str): Unsorted list of prefixes.\nprefixes (str): List of prefixes, from highest-priv to lowest.", "source": "codesearchnet"}
{"code": "def get_all_models_including_attached_models(model):\n    \n    if (hasattr(model, \"_tx_model_repository\")):\n        models = list(\n            model._tx_model_repository.all_models.filename_to_model.values())\n        if model not in models:\n            models.append(model)\n    else:\n        models = [model]\n    return models", "docstring": "get a list of all models stored within a model\n(including the owning model).\n\nArgs:\nmodel: the owning model\n\nReturns:\na list of all models", "source": "juraj-google-style"}
{"code": "def dump(self):\n    return {u'storage_data': [x.asdict() for x in self.storage_data], u'streaming_data': [x.asdict() for x in self.streaming_data]}", "docstring": "Serialize the state of this InMemoryStorageEngine to a dict.\n\nReturns:\ndict: The serialized data.", "source": "codesearchnet"}
{"code": "def mktemp(self, container: Container) -> str:\n    r = self.__api.post('containers/{}/tempfile'.format(container.uid))\n    if (r.status_code == 200):\n        return r.json()\n    self.__api.handle_erroneous_response(r)", "docstring": "Generates a temporary file for a given container.\n\nReturns:\nthe path to the temporary file inside the given container.", "source": "codesearchnet"}
{"code": "def lsfiles(root=\".\", **kwargs):\n    \n    paths = ls(root=root, **kwargs)\n    if isfile(root):\n        return paths\n    return [_path for _path in paths if isfile(path(root, _path))]", "docstring": "Return only files from a directory listing.\n\nArguments:\n\nroot (str): Path to directory. Can be relative or absolute.\n**kwargs: Any additional arguments to be passed to ls().\n\nReturns:\n\nlist of str: A list of file paths.\n\nRaises:\n\nOSError: If root directory does not exist.", "source": "juraj-google-style"}
{"code": "def send_rpc_sync(self, conn_id, address, rpc_id, payload, timeout):\n    done = threading.Event()\n    result = {}\n\n    def send_rpc_done(conn_id, adapter_id, status, reason, rpc_status, resp_payload):\n        result['success'] = status\n        result['failure_reason'] = reason\n        result['status'] = rpc_status\n        result['payload'] = resp_payload\n        done.set()\n    self.send_rpc_async(conn_id, address, rpc_id, payload, timeout, send_rpc_done)\n    done.wait()\n    return result", "docstring": "Synchronously send an RPC to this IOTile device\n\nArgs:\nconn_id (int): A unique identifier that will refer to this connection\naddress (int): the address of the tile that we wish to send the RPC to\nrpc_id (int): the 16-bit id of the RPC we want to call\npayload (bytearray): the payload of the command\ntimeout (float): the number of seconds to wait for the RPC to execute\n\nReturns:\ndict: A dictionary with four elements\n'success': a bool indicating whether we received a response to our attempted RPC\n'failure_reason': a string with the reason for the failure if success == False\n'status': the one byte status code returned for the RPC if success == True else None\n'payload': a bytearray with the payload returned by RPC if success == True else None", "source": "codesearchnet"}
{"code": "def create_fork_relation(self, forked_from_id, **kwargs):\n        \n        path = '/projects/%s/fork/%s' % (self.get_id(), forked_from_id)\n        self.manager.gitlab.http_post(path, **kwargs)", "docstring": "Create a forked from/to relation between existing projects.\n\nArgs:\nforked_from_id (int): The ID of the project that was forked from\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabCreateError: If the relation could not be created", "source": "juraj-google-style"}
{"code": "def find(self, name):\n    \n    for i in range(0, len(self)):\n      if self[i].name == name:\n        return i\n    return -1", "docstring": "Get the index of a field in the flattened list given its (fully-qualified) name.\n\nArgs:\nname: the fully-qualified name of the field.\nReturns:\nThe index of the field, if found; else -1.", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    filename = parser_mediator.GetFilename()\n    file_size = file_object.get_size()\n\n    if file_size <= 0:\n      raise errors.UnableToParseFile(\n          'File size: {0:d} bytes is less equal 0.'.format(file_size))\n\n    \n    if file_size > 50000000:\n      raise errors.UnableToParseFile(\n          'File size: {0:d} bytes is larger than 50 MB.'.format(file_size))\n\n    top_level_object = self.GetTopLevel(file_object)\n    if not top_level_object:\n      raise errors.UnableToParseFile(\n          'Unable to parse: {0:s} skipping.'.format(filename))\n\n    \n    matching_plugin = None\n    for plugin in self._plugins:\n      try:\n        plugin.UpdateChainAndProcess(\n            parser_mediator, plist_name=filename, top_level=top_level_object)\n        matching_plugin = plugin\n\n      except errors.WrongPlistPlugin as exception:\n        logger.debug('Wrong plugin: {0:s} for: {1:s}'.format(\n            exception.args[0], exception.args[1]))\n\n    if not matching_plugin and self._default_plugin:\n      self._default_plugin.UpdateChainAndProcess(\n          parser_mediator, plist_name=filename, top_level=top_level_object)", "docstring": "Parses a plist file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def add_note(self, note):\n        \n        notes = self.cached_json\n\n        if not note.moderator:\n            note.moderator = self.r.user.me().name\n\n        \n        \n        try:\n            mod_index = notes['constants']['users'].index(note.moderator)\n        except ValueError:\n            notes['constants']['users'].append(note.moderator)\n            mod_index = notes['constants']['users'].index(note.moderator)\n\n        \n        \n        try:\n            warn_index = notes['constants']['warnings'].index(note.warning)\n        except ValueError:\n            if note.warning in Note.warnings:\n                notes['constants']['warnings'].append(note.warning)\n                warn_index = notes['constants']['warnings'].index(note.warning)\n            else:\n                raise ValueError('Warning type not valid: ' + note.warning)\n\n        new_note = {\n            'n': note.note,\n            't': note.time,\n            'm': mod_index,\n            'l': note.link,\n            'w': warn_index\n        }\n\n        try:\n            notes['users'][note.username]['ns'].insert(0, new_note)\n        except KeyError:\n            notes['users'][note.username] = {'ns': [new_note]}\n\n        return '\"create new note on user {}\" via puni'.format(note.username)", "docstring": "Add a note to the usernotes wiki page.\n\nArguments:\nnote: the note to be added (Note)\n\nReturns the update message for the usernotes wiki\n\nRaises:\nValueError when the warning type of the note can not be found in the\nstored list of warnings.", "source": "juraj-google-style"}
{"code": "def get_status_tree(root_pipeline_id):\n    root_pipeline_key = db.Key.from_path(_PipelineRecord.kind(), root_pipeline_id)\n    root_pipeline_record = db.get(root_pipeline_key)\n    if (root_pipeline_record is None):\n        raise PipelineStatusError(('Could not find pipeline ID \"%s\"' % root_pipeline_id))\n    actual_root_key = _PipelineRecord.root_pipeline.get_value_for_datastore(root_pipeline_record)\n    if (actual_root_key != root_pipeline_key):\n        root_pipeline_key = actual_root_key\n        root_pipeline_id = root_pipeline_key.id_or_name()\n        root_pipeline_record = db.get(root_pipeline_key)\n        if (not root_pipeline_record):\n            raise PipelineStatusError(('Could not find pipeline ID \"%s\"' % root_pipeline_id))\n    queries = {}\n    for model in (_PipelineRecord, _SlotRecord, _BarrierRecord, _StatusRecord):\n        queries[model] = model.all().filter('root_pipeline =', root_pipeline_key).run(batch_size=1000)\n    found_pipeline_dict = dict(((stage.key(), stage) for stage in queries[_PipelineRecord]))\n    found_slot_dict = dict(((slot.key(), slot) for slot in queries[_SlotRecord]))\n    found_barrier_dict = dict(((barrier.key(), barrier) for barrier in queries[_BarrierRecord]))\n    found_status_dict = dict(((status.key(), status) for status in queries[_StatusRecord]))\n    valid_pipeline_keys = set([root_pipeline_key])\n    slot_filler_dict = {}\n    expand_stack = [root_pipeline_record]\n    while expand_stack:\n        old_stack = expand_stack\n        expand_stack = []\n        for pipeline_record in old_stack:\n            for child_pipeline_key in pipeline_record.fanned_out:\n                child_pipeline_record = found_pipeline_dict.get(child_pipeline_key)\n                if (child_pipeline_record is None):\n                    raise PipelineStatusError(('Pipeline ID \"%s\" points to child ID \"%s\" which does not exist.' % (pipeline_record.key().name(), child_pipeline_key.name())))\n                expand_stack.append(child_pipeline_record)\n                valid_pipeline_keys.add(child_pipeline_key)\n                child_outputs = child_pipeline_record.params['output_slots']\n                for output_slot_key in child_outputs.itervalues():\n                    slot_filler_dict[db.Key(output_slot_key)] = child_pipeline_key\n    output = {'rootPipelineId': root_pipeline_id, 'slots': {}, 'pipelines': {}}\n    for pipeline_key in found_pipeline_dict.keys():\n        if (pipeline_key not in valid_pipeline_keys):\n            continue\n        output['pipelines'][pipeline_key.name()] = _get_internal_status(pipeline_key=pipeline_key, pipeline_dict=found_pipeline_dict, slot_dict=found_slot_dict, barrier_dict=found_barrier_dict, status_dict=found_status_dict)\n    for (slot_key, filler_pipeline_key) in slot_filler_dict.iteritems():\n        output['slots'][str(slot_key)] = _get_internal_slot(slot_key=slot_key, filler_pipeline_key=filler_pipeline_key, slot_dict=found_slot_dict)\n    return output", "docstring": "Gets the full status tree of a pipeline.\n\nArgs:\nroot_pipeline_id: The pipeline ID to get status for.\n\nReturns:\nDictionary with the keys:\nrootPipelineId: The ID of the root pipeline.\nslots: Mapping of slot IDs to result of from _get_internal_slot.\npipelines: Mapping of pipeline IDs to result of _get_internal_status.\n\nRaises:\nPipelineStatusError if any input is bad.", "source": "codesearchnet"}
{"code": "def splat(f: Callable[(..., A)]) -> Callable[([Iterable], A)]:\n\n    def splatted(args):\n        return f(*args)\n    return splatted", "docstring": "Convert a function taking multiple arguments into a function taking a single iterable argument.\n\nArgs:\nf: Any function\n\nReturns:\nA function that accepts a single iterable argument. Each element of this iterable argument is passed as an\nargument to ``f``.\n\nExample:\n$ def f(a, b, c):\n$     return a + b + c\n$\n$ f(1, 2, 3)  # 6\n$ g = splat(f)\n$ g([1, 2, 3])  # 6", "source": "codesearchnet"}
{"code": "def Analyze(self, hashes):\n    \n    logger.debug(\n        'Opening connection to {0:s}:{1:d}'.format(self._host, self._port))\n\n    nsrl_socket = self._GetSocket()\n    if not nsrl_socket:\n      self.SignalAbort()\n      return []\n\n    hash_analyses = []\n    for digest in hashes:\n      response = self._QueryHash(nsrl_socket, digest)\n      if response is None:\n        continue\n\n      hash_analysis = interface.HashAnalysis(digest, response)\n      hash_analyses.append(hash_analysis)\n\n    nsrl_socket.close()\n\n    logger.debug(\n        'Closed connection to {0:s}:{1:d}'.format(self._host, self._port))\n\n    return hash_analyses", "docstring": "Looks up hashes in nsrlsvr.\n\nArgs:\nhashes (list[str]): hash values to look up.\n\nReturns:\nlist[HashAnalysis]: analysis results, or an empty list on error.", "source": "juraj-google-style"}
{"code": "def set_hyperparameters(self, hyperparameters):\n        \n        for block_name, block_hyperparams in hyperparameters.items():\n            self.blocks[block_name].set_hyperparameters(block_hyperparams)", "docstring": "Set new hyperparameter values for some blocks.\n\nArgs:\nhyperparameters (dict): A dictionary containing the block names as\nkeys and the new hyperparameters dictionary\nas values.", "source": "juraj-google-style"}
{"code": "def _prepare_for_training(self, job_name=None):\n    super(Framework, self)._prepare_for_training(job_name=job_name)\n    if (self.source_dir and (not self.source_dir.lower().startswith('s3:\n        validate_source_dir(self.entry_point, self.source_dir)\n    local_code = get_config_value('local.local_code', self.sagemaker_session.config)\n    if (self.sagemaker_session.local_mode and local_code):\n        if (self.source_dir is None):\n            self.source_dir = os.path.dirname(self.entry_point)\n        self.entry_point = os.path.basename(self.entry_point)\n        code_dir = ('file:\n        script = self.entry_point\n    else:\n        self.uploaded_code = self._stage_user_code_in_s3()\n        code_dir = self.uploaded_code.s3_prefix\n        script = self.uploaded_code.script_name\n    self._hyperparameters[DIR_PARAM_NAME] = code_dir\n    self._hyperparameters[SCRIPT_PARAM_NAME] = script\n    self._hyperparameters[CLOUDWATCH_METRICS_PARAM_NAME] = self.enable_cloudwatch_metrics\n    self._hyperparameters[CONTAINER_LOG_LEVEL_PARAM_NAME] = self.container_log_level\n    self._hyperparameters[JOB_NAME_PARAM_NAME] = self._current_job_name\n    self._hyperparameters[SAGEMAKER_REGION_PARAM_NAME] = self.sagemaker_session.boto_region_name", "docstring": "Set hyperparameters needed for training. This method will also validate ``source_dir``.\n\nArgs:\n* job_name (str): Name of the training job to be created. If not specified, one is generated,\nusing the base name given to the constructor if applicable.", "source": "codesearchnet"}
{"code": "def main(jlink_serial, device):\n    \n    buf = StringIO.StringIO()\n    jlink = pylink.JLink(log=buf.write, detailed_log=buf.write)\n    jlink.open(serial_no=jlink_serial)\n\n    \n    jlink.set_tif(pylink.enums.JLinkInterfaces.SWD)\n    jlink.connect(device, verbose=True)\n\n    sys.stdout.write('ARM Id: %d\\n' % jlink.core_id())\n    sys.stdout.write('CPU Id: %d\\n' % jlink.core_cpu())\n    sys.stdout.write('Core Name: %s\\n' % jlink.core_name())\n    sys.stdout.write('Device Family: %d\\n' % jlink.device_family())", "docstring": "Prints the core's information.\n\nArgs:\njlink_serial (str): the J-Link serial number\ndevice (str): the target CPU\n\nReturns:\nAlways returns ``0``.\n\nRaises:\nJLinkException: on error", "source": "juraj-google-style"}
{"code": "def pull(self, device_filename, dest_file=None, timeout_ms=None):\n    should_return_data = (dest_file is None)\n    if isinstance(dest_file, six.string_types):\n        dest_file = open(dest_file, 'w')\n    elif (dest_file is None):\n        dest_file = six.StringIO()\n    self.filesync_service.recv(device_filename, dest_file, timeouts.PolledTimeout.from_millis(timeout_ms))\n    if should_return_data:\n        return dest_file.getvalue()", "docstring": "Pull file from device.\n\nArguments:\ndevice_filename: The filename on the device to pull.\ndest_file: If set, a filename or writable file-like object.\ntimeout_ms: Expected timeout for the pull.\n\nReturns:\nThe file data if dest_file is not set, None otherwise.", "source": "codesearchnet"}
{"code": "def get_bq_tableschema(schema):\n    if isinstance(schema, (bigquery.TableSchema, value_provider.ValueProvider)) or callable(schema) or schema is None:\n        return schema\n    elif isinstance(schema, str):\n        return get_table_schema_from_string(schema)\n    elif isinstance(schema, dict):\n        schema_string = json.dumps(schema)\n        return parse_table_schema_from_json(schema_string)\n    else:\n        raise TypeError('Unexpected schema argument: %s.' % schema)", "docstring": "Convert the table schema to a TableSchema object.\n\nArgs:\nschema (str, dict, ~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema):\nThe schema to be used if the BigQuery table to write has to be created.\nThis can either be a dict or string or in the TableSchema format.\n\nReturns:\n~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema: The schema as a TableSchema object.", "source": "github-repos"}
{"code": "def __init__(self, scope, parent, id, name, result, definition=True):\n        \n        CodeEntity.__init__(self, scope, parent)\n        self.id = id\n        self.name = name\n        self.result = result\n        self.parameters = []\n        self.body = CodeBlock(self, self, explicit=True)\n        self.member_of = None\n        self.references = []\n        self._definition = self if definition else None", "docstring": "Constructor for functions.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.\nid: An unique identifier for this function.\nname (str): The name of the function in the program.\nresult (str): The return type of the function in the program.", "source": "juraj-google-style"}
{"code": "def get_lagged_subsequences(self, sequence: torch.Tensor, subsequences_length: int, shift: int=0) -> torch.Tensor:\n    sequence_length = sequence.shape[1]\n    indices = [lag - shift for lag in self.config.lags_sequence]\n    if max(indices) + subsequences_length > sequence_length:\n        raise ValueError(f'lags cannot go further than history length, found lag {max(indices)} while history length is only {sequence_length}')\n    lagged_values = []\n    for lag_index in indices:\n        begin_index = -lag_index - subsequences_length\n        end_index = -lag_index if lag_index > 0 else None\n        lagged_values.append(sequence[:, begin_index:end_index, ...])\n    return torch.stack(lagged_values, dim=-1)", "docstring": "Returns lagged subsequences of a given sequence. Returns a tensor of shape (N, S, C, I),\nwhere S = subsequences_length and I = len(indices), containing lagged subsequences. Specifically, lagged[i,\nj, :, k] = sequence[i, -indices[k]-S+j, :].\n\nArgs:\nsequence: Tensor\nThe sequence from which lagged subsequences should be extracted. Shape: (N, T, C).\nsubsequences_length : int\nLength of the subsequences to be extracted.\nshift: int\nShift the lags by this amount back.", "source": "github-repos"}
{"code": "def check_target_module_exists(optim_target_modules, key: str, return_is_regex: bool=False):\n    target_module_found = False\n    is_regex = False\n    if isinstance(optim_target_modules, str):\n        target_module_found = bool(re.fullmatch(optim_target_modules, key))\n        is_regex = True if not optim_target_modules == key else False\n    elif key in optim_target_modules:\n        target_module_found = True\n    elif any((target_key in key for target_key in optim_target_modules)):\n        target_module_found = True\n    elif any((bool(re.fullmatch(optim_target_module, key)) for optim_target_module in optim_target_modules)):\n        target_module_found = True\n        is_regex = True\n    if return_is_regex:\n        return (target_module_found, is_regex)\n    return target_module_found", "docstring": "A helper method to check if the passed module's key name matches any of the target modules in the optim_target_modules.\n\nArgs:\noptim_target_modules (`Union[str, List[str]]`):\nA list of strings to try to match. Can be also a full string.\nkey (`str`):\nA key to search any matches in optim_target_modules\nreturn_is_regex (`bool`):\nIf set to `True`, the method will return whether the passed `optim_target_modules`\nis a regex or not.\n\nReturns:\n`bool` : True of match object if key matches any target modules from config, False or\nNone if no match found\n`bool` : If the matched target module is a regex to silence out the warnings in Trainer\nfor extra modules being found (only if `target_module_found=True` for an array of regex).", "source": "github-repos"}
{"code": "def pgm(X, prox_f, step_f, accelerated=False, relax=None, e_rel=1e-06, max_iter=1000, traceback=None):\n    stepper = utils.NesterovStepper(accelerated=accelerated)\n    if (relax is not None):\n        assert ((relax > 0) and (relax < 1.5))\n    if (traceback is not None):\n        traceback.update_history(0, X=X, step_f=step_f)\n        if accelerated:\n            traceback.update_history(0, omega=0)\n        if (relax is not None):\n            traceback.update_history(0, relax=relax)\n    for it in range(max_iter):\n        omega = stepper.omega\n        if (omega > 0):\n            _X = (X + (omega * (X - X_)))\n        else:\n            _X = X\n        X_ = X.copy()\n        X[:] = prox_f(_X, step_f)\n        if (relax is not None):\n            X += ((relax - 1) * (X - X_))\n        if (traceback is not None):\n            traceback.update_history((it + 1), X=X, step_f=step_f)\n            if accelerated:\n                traceback.update_history((it + 1), omega=omega)\n            if (relax is not None):\n                traceback.update_history((it + 1), relax=relax)\n        converged = (utils.l2sq((X - X_)) <= ((e_rel ** 2) * utils.l2sq(X)))\n        if converged:\n            break\n    logger.info('Completed {0} iterations'.format((it + 1)))\n    if (not converged):\n        logger.warning('Solution did not converge')\n    return (converged, (X - X_))", "docstring": "Proximal Gradient Method\n\nAdapted from Combettes 2009, Algorithm 3.4.\nThe accelerated version is Algorithm 3.6 with modifications\nfrom Xu & Yin (2015).\n\nArgs:\nX: initial X, will be updated\nprox_f: proxed function f (the forward-backward step)\nstep_f: step size, < 1/L with L being the Lipschitz constant of grad f\naccelerated: If Nesterov acceleration should be used\nrelax: (over)relaxation parameter, 0 < relax < 1.5\ne_rel: relative error of X\nmax_iter: maximum iteration, irrespective of residual error\ntraceback: utils.Traceback to hold variable histories\n\nReturns:\nconverged: whether the optimizer has converged within e_rel\nerror: X^it - X^it-1", "source": "codesearchnet"}
{"code": "def CopyConfig(self):\n    newconf = self.MakeNewConfig()\n    newconf.raw_data = copy.deepcopy(self.raw_data)\n    newconf.files = copy.deepcopy(self.files)\n    newconf.secondary_config_parsers = copy.deepcopy(self.secondary_config_parsers)\n    newconf.writeback = copy.deepcopy(self.writeback)\n    newconf.writeback_data = copy.deepcopy(self.writeback_data)\n    newconf.global_override = copy.deepcopy(self.global_override)\n    newconf.context_descriptions = copy.deepcopy(self.context_descriptions)\n    newconf.constants = copy.deepcopy(self.constants)\n    newconf.initialized = copy.deepcopy(self.initialized)\n    return newconf", "docstring": "Make a complete new copy of the current config.\n\nThis includes all options as they currently are. If you want a base config\nwith defaults use MakeNewConfig.\n\nReturns:\nA new config object with the same data as self.", "source": "codesearchnet"}
{"code": "def create_requests(\n    requests: Union[Dict, List], *, context: Any = NOCONTEXT, convert_camel_case: bool\n) -> Union[Request, Set[Request]]:\n    \n    if isinstance(requests, list):\n        return {\n            Request(context=context, convert_camel_case=convert_camel_case, **request)\n            for request in requests\n        }\n    return Request(context=context, convert_camel_case=convert_camel_case, **requests)", "docstring": "Create a Request object from a dictionary (or list of them).\n\nArgs:\nrequests: Request object, or a collection of them.\nmethods: The list of methods that can be called.\ncontext: If specified, will be the first positional argument in all requests.\nconvert_camel_case: Will convert the method name/any named params to snake case.\n\nReturns:\nA Request object, or a collection of them.", "source": "juraj-google-style"}
{"code": "def Serialize(self, writer):\n        \n        writer.WriteUInt32(self.Magic)\n        writer.WriteFixedString(self.Command, 12)\n        writer.WriteUInt32(len(self.Payload))\n        writer.WriteUInt32(self.Checksum)\n        writer.WriteBytes(self.Payload)", "docstring": "Serialize object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def seek(self, offset, whence=os.SEEK_SET):\n    \n    if not self._gzip_file_object:\n      raise IOError('Not opened.')\n\n    if whence == os.SEEK_CUR:\n      offset += self._current_offset\n    elif whence == os.SEEK_END:\n      offset += self.uncompressed_data_size\n    elif whence != os.SEEK_SET:\n      raise IOError('Unsupported whence.')\n\n    if offset < 0:\n      raise IOError('Invalid offset value less than zero.')\n\n    self._current_offset = offset", "docstring": "Seeks to an offset within the file-like object.\n\nArgs:\noffset (int): offset to seek to.\nwhence (Optional(int)): value that indicates whether offset is an absolute\nor relative position within the file.\n\nRaises:\nIOError: if the seek failed or the file has not been opened.\nOSError: if the seek failed or the file has not been opened.", "source": "juraj-google-style"}
{"code": "def removeRouterPrefix(self, prefixEntry):\n        \n        print '%s call removeRouterPrefix' % self.port\n        print prefixEntry\n        prefix = self.__convertIp6PrefixStringToIp6Address(str(prefixEntry))\n        try:\n            prefixLen = 64\n            cmd = 'prefix remove %s/%d' % (prefix, prefixLen)\n            print cmd\n            if self.__sendCommand(cmd)[0] == 'Done':\n                \n                return self.__sendCommand('netdataregister')[0] == 'Done'\n            else:\n                return False\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger(\"removeRouterPrefix() Error: \" + str(e))", "docstring": "remove the configured prefix on a border router\n\nArgs:\nprefixEntry: a on-mesh prefix entry\n\nReturns:\nTrue: successful to remove the prefix entry from border router\nFalse: fail to remove the prefix entry from border router", "source": "juraj-google-style"}
{"code": "def orient_averaged_fixed(tm):\n    S = np.zeros((2, 2), dtype=complex)\n    Z = np.zeros((4, 4))\n    ap = np.linspace(0, 360, (tm.n_alpha + 1))[:(- 1)]\n    aw = (1.0 / tm.n_alpha)\n    for alpha in ap:\n        for (beta, w) in zip(tm.beta_p, tm.beta_w):\n            (S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)\n            S += (w * S_ang)\n            Z += (w * Z_ang)\n    sw = tm.beta_w.sum()\n    S *= (aw / sw)\n    Z *= (aw / sw)\n    return (S, Z)", "docstring": "Compute the T-matrix using variable orientation scatterers.\n\nThis method uses a fast Gaussian quadrature and is suitable\nfor most use. Uses the set particle orientation PDF, ignoring\nthe alpha and beta attributes.\n\nArgs:\ntm: TMatrix (or descendant) instance.\n\nReturns:\nThe amplitude (S) and phase (Z) matrices.", "source": "codesearchnet"}
{"code": "def compute_stats(input_handle, stats_path, max_rows=None, for_eval=False, pipeline_args=None, publish_to_bq=None, metrics_dataset=None, metrics_table=None, project=None):\n    namespace = metrics_table\n    pipeline = beam.Pipeline(argv=pipeline_args)\n    metrics_monitor = None\n    if publish_to_bq:\n        metrics_monitor = MetricsReader(publish_to_bq=publish_to_bq, project_name=project, bq_table=metrics_table, bq_dataset=metrics_dataset, namespace=namespace, filters=MetricsFilter().with_namespace(namespace))\n    query = taxi.make_sql(table_name=input_handle, max_rows=max_rows, for_eval=for_eval)\n    raw_data = pipeline | 'ReadBigQuery' >> ReadFromBigQuery(query=query, project=project, use_standard_sql=True) | 'Measure time: Start' >> beam.ParDo(MeasureTime(namespace)) | 'ConvertToTFDVInput' >> beam.Map(lambda x: {key: np.asarray([x[key]]) for key in x if x[key] is not None})\n    _ = raw_data | 'GenerateStatistics' >> tfdv.GenerateStatistics() | 'Measure time: End' >> beam.ParDo(MeasureTime(namespace)) | 'WriteStatsOutput' >> beam.io.WriteToTFRecord(stats_path, shard_name_template='', coder=beam.coders.ProtoCoder(statistics_pb2.DatasetFeatureStatisticsList))\n    result = pipeline.run()\n    result.wait_until_finish()\n    if metrics_monitor:\n        metrics_monitor.publish_metrics(result)", "docstring": "Computes statistics on the input data.\n\nArgs:\ninput_handle: BigQuery table name to process specified as DATASET.TABLE or\npath to csv file with input data.\nstats_path: Directory in which stats are materialized.\nmax_rows: Number of rows to query from BigQuery\nfor_eval: Query for eval set rows from BigQuery\npipeline_args: additional DataflowRunner or DirectRunner args passed to the\nbeam pipeline.", "source": "github-repos"}
{"code": "def from_operator(cls, operator):\n    validation_fields = ('is_non_singular', 'is_self_adjoint', 'is_positive_definite', 'is_square')\n    kwargs = _extract_attrs(operator, keys=set(operator._composite_tensor_fields + validation_fields))\n    non_tensor_params = {}\n    param_specs = {}\n    for k, v in list(kwargs.items()):\n        type_spec_or_v = _extract_type_spec_recursively(v)\n        is_tensor = [isinstance(x, type_spec.TypeSpec) for x in nest.flatten(type_spec_or_v)]\n        if all(is_tensor):\n            param_specs[k] = type_spec_or_v\n        elif not any(is_tensor):\n            non_tensor_params[k] = v\n        else:\n            raise NotImplementedError(f'Field {k} contains a mix of `Tensor` and  non-`Tensor` values.')\n    return cls(param_specs=param_specs, non_tensor_params=non_tensor_params, prefer_static_fields=operator._composite_tensor_prefer_static_fields)", "docstring": "Builds a `_LinearOperatorSpec` from a `LinearOperator` instance.\n\nArgs:\noperator: An instance of `LinearOperator`.\n\nReturns:\nlinear_operator_spec: An instance of `_LinearOperatorSpec` to be used as\nthe `TypeSpec` of `operator`.", "source": "github-repos"}
{"code": "def traverse(self, index=0):\n        \n        if index < len(self.nodes):\n            for entity in self.nodes[index]:\n                for next_result in self.traverse(index=index+1):\n                    if isinstance(entity, list):\n                        yield entity + next_result\n                    else:\n                        yield [entity] + next_result\n        else:\n            yield []", "docstring": "This is used to produce a list of lists where each each item\nin that list is a diffrent combination of items from the lists\nwithin with every combination of such values.\n\nArgs:\nindex (int) : the index at witch to start the list.\nNote this is used only in the function as a processing\n\nReturns:\nlist : is every combination.", "source": "juraj-google-style"}
{"code": "def __init__(self, output_mediator):\n    \n    super(SQLite4n6TimeOutputModule, self).__init__(output_mediator)\n    self._connection = None\n    self._count = 0\n    self._cursor = None\n    self._filename = None", "docstring": "Initializes the output module object.\n\nArgs:\noutput_mediator (OutputMediator): output mediator.\n\nRaises:\nValueError: if the file handle is missing.", "source": "juraj-google-style"}
{"code": "def current(sam=False):\n    try:\n        if sam:\n            user_name = win32api.GetUserNameEx(win32con.NameSamCompatible)\n        else:\n            user_name = win32api.GetUserName()\n    except pywintypes.error as exc:\n        log.error('Failed to get current user')\n        log.error('nbr: %s', exc.winerror)\n        log.error('ctx: %s', exc.funcname)\n        log.error('msg: %s', exc.strerror)\n        raise CommandExecutionError('Failed to get current user', info=exc)\n    if (not user_name):\n        raise CommandExecutionError('Failed to get current user')\n    return user_name", "docstring": "Get the username that salt-minion is running under. If salt-minion is\nrunning as a service it should return the Local System account. If salt is\nrunning from a command prompt it should return the username that started the\ncommand prompt.\n\n.. versionadded:: 2015.5.6\n\nArgs:\nsam (bool, optional): False returns just the username without any domain\nnotation. True returns the domain with the username in the SAM\nformat. Ie: ``domain\\\\username``\n\nReturns:\nstr: Returns username\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' user.current", "source": "codesearchnet"}
{"code": "def openbin(self, path, mode='r', buffering=(- 1), **options):\n    self.check()\n    _path = self.validatepath(path)\n    _mode = Mode(mode)\n    _mode.validate_bin()\n    with self._lock:\n        if _mode.exclusive:\n            if self.exists(_path):\n                raise errors.FileExists(path)\n            else:\n                _mode = Mode(''.join(set(mode.replace('x', 'w'))))\n        elif (_mode.reading and (not _mode.create) and (not self.exists(_path))):\n            raise errors.ResourceNotFound(path)\n        elif self.isdir(_path):\n            raise errors.FileExpected(path)\n        with convert_sshfs_errors('openbin', path):\n            _sftp = self._client.open_sftp()\n            handle = _sftp.open(_path, mode=_mode.to_platform_bin(), bufsize=buffering)\n            handle.set_pipelined(options.get('pipelined', True))\n            return SSHFile(handle)", "docstring": "Open a binary file-like object.\n\nArguments:\npath (str): A path on the filesystem.\nmode (str): Mode to open the file (must be a valid, non-text mode).\nSince this method only opens binary files, the ``b`` in the mode\nis implied.\nbuffering (int): the buffering policy (-1 to use default buffering,\n0 to disable completely, 1 to enable line based buffering, or\nany larger positive integer for a custom buffer size).\n\nKeyword Arguments:\npipelined (bool): Set the transfer in pipelined mode (should\nimprove transfer speed). Defaults to ``True``.\n\nRaises:\nfs.errors.FileExpected: if the path if not a file.\nfs.errors.FileExists: if the file already exists and\n*exclusive mode* is specified (``x`` in the mode).\nfs.errors.ResourceNotFound: if the path does not exist.\n\nReturns:\nio.IOBase: a file handle.", "source": "codesearchnet"}
{"code": "def bessel_y1(x, name=None):\n    with ops.name_scope(name, 'bessel_y1', [x]):\n        return gen_special_math_ops.bessel_y1(x)", "docstring": "Computes the Bessel y1 function of `x` element-wise.\n\nModified Bessel function of order 1.\n\n>>> tf.math.special.bessel_y1([0.5, 1., 2., 4.]).numpy()\narray([-1.47147239, -0.78121282, -0.10703243,  0.39792571], dtype=float32)\n\nArgs:\nx: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n`float32`, `float64`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.y1\n@end_compatibility", "source": "github-repos"}
{"code": "def get_generation_mode(self, assistant_model: Optional['PreTrainedModel']=None) -> GenerationMode:\n    if self.constraints is not None or self.force_words_ids is not None:\n        generation_mode = GenerationMode.CONSTRAINED_BEAM_SEARCH\n    elif self.num_beams == 1:\n        if self.do_sample is False:\n            if self.top_k is not None and self.top_k > 1 and (self.penalty_alpha is not None) and (self.penalty_alpha > 0):\n                generation_mode = GenerationMode.CONTRASTIVE_SEARCH\n            else:\n                generation_mode = GenerationMode.GREEDY_SEARCH\n        else:\n            generation_mode = GenerationMode.SAMPLE\n    elif self.num_beam_groups > 1:\n        generation_mode = GenerationMode.GROUP_BEAM_SEARCH\n    elif self.do_sample is True:\n        generation_mode = GenerationMode.BEAM_SAMPLE\n    else:\n        generation_mode = GenerationMode.BEAM_SEARCH\n    if assistant_model is not None or self.prompt_lookup_num_tokens is not None or self.assistant_early_exit is not None:\n        if generation_mode in ('greedy_search', 'sample'):\n            generation_mode = GenerationMode.ASSISTED_GENERATION\n        else:\n            logger.warning(f\"You've set `assistant_model`, which triggers assisted generate. Currently, assisted generate is only supported with Greedy Search and Sample. However, the base decoding mode (based on current flags) is {generation_mode} -- some of the set flags will be ignored.\")\n    if self.dola_layers is not None:\n        if generation_mode in ('greedy_search', 'sample'):\n            generation_mode = GenerationMode.DOLA_GENERATION\n        else:\n            logger.warning(f\"You've set `dola_layers`, which triggers DoLa generate. Currently, DoLa generate is only supported with Greedy Search and Sample.  However, the base decoding mode (based on current flags) is {generation_mode} -- some of the set flags will be ignored.\")\n    return generation_mode", "docstring": "Returns the generation mode triggered by the [`GenerationConfig`] instance.\n\nArg:\nassistant_model (`PreTrainedModel`, *optional*):\nThe assistant model to be used for assisted generation. If set, the generation mode will be\nassisted generation.\n\nReturns:\n`GenerationMode`: The generation mode triggered by the instance.", "source": "github-repos"}
{"code": "def secure(cls):\n    builtin_mechs = cls._get_builtin_mechanisms()\n    secure_mechs = [mech for (_, mech) in builtin_mechs.items() if ((not mech.insecure) and (mech.priority is not None))]\n    return SASLAuth(secure_mechs)", "docstring": "Uses only authentication mechanisms that are secure for use in\nnon-encrypted sessions.\n\nReturns:\nA new :class:`SASLAuth` object.", "source": "codesearchnet"}
{"code": "def _ParseTimestamp(self, parser_mediator, row):\n    timestamp = row.get('timestamp', None)\n    if (timestamp is not None):\n        try:\n            timestamp = int(timestamp, 10)\n        except (ValueError, TypeError):\n            parser_mediator.ProduceExtractionWarning('Unable to parse timestamp value: {0!s}'.format(timestamp))\n        return dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    try:\n        return self._ConvertToTimestamp(row['date'], row['time'])\n    except ValueError as exception:\n        parser_mediator.ProduceExtractionWarning('Unable to parse time string: \"{0:s} {1:s}\" with error: {2!s}'.format(repr(row['date']), repr(row['time']), exception))", "docstring": "Provides a timestamp for the given row.\n\nIf the Trend Micro log comes from a version that provides a POSIX timestamp,\nuse that directly; it provides the advantages of UTC and of second\nprecision. Otherwise fall back onto the local-timezone date and time.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrow (dict[str, str]): fields of a single row, as specified in COLUMNS.\n\nReturns:\ndfdatetime.interface.DateTimeValue: date and time value.", "source": "codesearchnet"}
{"code": "def build_results(self, values):\n    raise NotImplementedError('build_results must be implemented by subclasses')", "docstring": "Build results that match the original shape of the fetch.\n\nArgs:\nvalues: List of values returned by run(). The values correspond exactly to\nthe list tensors or ops returned by unique_fetches().\n\nReturns:\nA struct of the same shape as the original fetch object handled by\nthis fetch mapper.  In the returned struct, the original fetches are\nreplaced by their fetched values.", "source": "github-repos"}
{"code": "def add_send_message(self, connection, send_message):\n    self._send_message[connection] = send_message\n    LOGGER.debug('Added send_message function for connection %s', connection)", "docstring": "Adds a send_message function to the Dispatcher's\ndictionary of functions indexed by connection.\n\nArgs:\nconnection (str): A locally unique identifier\nprovided by the receiver of messages.\nsend_message (fn): The method that should be called\nby the dispatcher to respond to messages which\narrive via connection.", "source": "codesearchnet"}
{"code": "def to_html(self):\n        \n        if self.items is None:\n            return\n        else:\n            html = '<ol%s>\\n' % self.html_attributes()\n            for item in self.items:\n                html += '<li>%s</li>\\n' % item.to_html()\n            html += '</ol>'\n            return html", "docstring": "Render a Text MessageElement as html\n\nArgs:\nNone\n\nReturns:\nStr the html representation of the Text MessageElement\n\nRaises:\nErrors are propagated", "source": "juraj-google-style"}
{"code": "def add_package(package, ignore_check=False, prevent_pending=False, image=None, restart=False):\n    cmd = ['DISM', '/Quiet', ('/Image:{0}'.format(image) if image else '/Online'), '/Add-Package', '/PackagePath:{0}'.format(package)]\n    if ignore_check:\n        cmd.append('/IgnoreCheck')\n    if prevent_pending:\n        cmd.append('/PreventPending')\n    if (not restart):\n        cmd.append('/NoRestart')\n    return __salt__['cmd.run_all'](cmd)", "docstring": "Install a package using DISM\n\nArgs:\npackage (str):\nThe package to install. Can be a .cab file, a .msu file, or a folder\n\n.. note::\nAn `.msu` package is supported only when the target image is\noffline, either mounted or applied.\n\nignore_check (Optional[bool]):\nSkip installation of the package if the applicability checks fail\n\nprevent_pending (Optional[bool]):\nSkip the installation of the package if there are pending online\nactions\n\nimage (Optional[str]):\nThe path to the root directory of an offline Windows image. If\n``None`` is passed, the running operating system is targeted.\nDefault is None.\n\nrestart (Optional[bool]):\nReboot the machine if required by the install\n\nReturns:\ndict: A dictionary containing the results of the command\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' dism.add_package C:\\\\Packages\\\\package.cab", "source": "codesearchnet"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        super(KeyWrappingData, self).read(\n            input_stream,\n            kmip_version=kmip_version\n        )\n        local_stream = BytearrayStream(input_stream.read(self.length))\n\n        if self.is_tag_next(enums.Tags.WRAPPING_METHOD, local_stream):\n            self._wrapping_method = primitives.Enumeration(\n                enum=enums.WrappingMethod,\n                tag=enums.Tags.WRAPPING_METHOD\n            )\n            self._wrapping_method.read(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        else:\n            raise ValueError(\n                \"Invalid struct missing the wrapping method attribute.\"\n            )\n\n        if self.is_tag_next(\n                enums.Tags.ENCRYPTION_KEY_INFORMATION,\n                local_stream\n        ):\n            self._encryption_key_information = EncryptionKeyInformation()\n            self._encryption_key_information.read(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self.is_tag_next(\n                enums.Tags.MAC_SIGNATURE_KEY_INFORMATION,\n                local_stream\n        ):\n            self._mac_signature_key_information = MACSignatureKeyInformation()\n            self._mac_signature_key_information.read(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        if self.is_tag_next(enums.Tags.MAC_SIGNATURE, local_stream):\n            self._mac_signature = primitives.ByteString(\n                tag=enums.Tags.MAC_SIGNATURE\n            )\n            self._mac_signature.read(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        if self.is_tag_next(enums.Tags.IV_COUNTER_NONCE, local_stream):\n            self._iv_counter_nonce = primitives.ByteString(\n                tag=enums.Tags.IV_COUNTER_NONCE\n            )\n            self._iv_counter_nonce.read(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        if self.is_tag_next(enums.Tags.ENCODING_OPTION, local_stream):\n            self._encoding_option = primitives.Enumeration(\n                enum=enums.EncodingOption,\n                tag=enums.Tags.ENCODING_OPTION\n            )\n            self._encoding_option.read(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        self.is_oversized(local_stream)", "docstring": "Read the data encoding the KeyWrappingData struct and decode it into\nits constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def AddBasicOptions(self, argument_group):\n    version_string = self.GetVersionInformation()\n    argument_group.add_argument('-h', '--help', action='help', help='Show this help message and exit.')\n    argument_group.add_argument('--troubles', dest='show_troubleshooting', action='store_true', default=False, help='Show troubleshooting information.')\n    argument_group.add_argument('-V', '--version', dest='version', action='version', version=version_string, help='Show the version information.')", "docstring": "Adds the basic options to the argument group.\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "codesearchnet"}
{"code": "def get_dict(self, name, default=None):\n        \n        if name not in self:\n            if default is not None:\n                return default\n            raise EnvironmentError.not_found(self._prefix, name)\n        return dict(**self.get(name))", "docstring": "Retrieves an environment variable value as a dictionary.\n\nArgs:\nname (str): The case-insensitive, unprefixed variable name.\ndefault: If provided, a default value will be returned\ninstead of throwing ``EnvironmentError``.\n\nReturns:\ndict: The environment variable's value as a ``dict``.\n\nRaises:\nEnvironmentError: If the environment variable does not\nexist, and ``default`` was not provided.", "source": "juraj-google-style"}
{"code": "def make_mixture_prior(latent_size, mixture_components):\n    if (mixture_components == 1):\n        return tfd.MultivariateNormalDiag(loc=tf.zeros([latent_size]), scale_identity_multiplier=1.0)\n    loc = tf.compat.v1.get_variable(name='loc', shape=[mixture_components, latent_size])\n    raw_scale_diag = tf.compat.v1.get_variable(name='raw_scale_diag', shape=[mixture_components, latent_size])\n    mixture_logits = tf.compat.v1.get_variable(name='mixture_logits', shape=[mixture_components])\n    return tfd.MixtureSameFamily(components_distribution=tfd.MultivariateNormalDiag(loc=loc, scale_diag=tf.nn.softplus(raw_scale_diag)), mixture_distribution=tfd.Categorical(logits=mixture_logits), name='prior')", "docstring": "Creates the mixture of Gaussians prior distribution.\n\nArgs:\nlatent_size: The dimensionality of the latent representation.\nmixture_components: Number of elements of the mixture.\n\nReturns:\nrandom_prior: A `tfd.Distribution` instance representing the distribution\nover encodings in the absence of any evidence.", "source": "codesearchnet"}
{"code": "def clip_and_copy_attack_outputs(self, attack_name, is_targeted):\n    \n    if is_targeted:\n      self._targeted_attack_names.add(attack_name)\n    else:\n      self._attack_names.add(attack_name)\n    attack_dir = os.path.join(self.targeted_attacks_output_dir\n                              if is_targeted\n                              else self.attacks_output_dir,\n                              attack_name)\n    for fname in os.listdir(attack_dir):\n      if not (fname.endswith('.png') or fname.endswith('.jpg')):\n        continue\n      image_id = fname[:-4]\n      if image_id not in self.dataset_max_clip:\n        continue\n      image_max_clip = self.dataset_max_clip[image_id]\n      image_min_clip = self.dataset_min_clip[image_id]\n      adversarial_image = np.array(\n          Image.open(os.path.join(attack_dir, fname)).convert('RGB'))\n      clipped_adv_image = np.clip(adversarial_image,\n                                  image_min_clip,\n                                  image_max_clip)\n      output_basename = '{0:08d}'.format(self._output_image_idx)\n      self._output_image_idx += 1\n      self._output_to_attack_mapping[output_basename] = (attack_name,\n                                                         is_targeted,\n                                                         image_id)\n      if is_targeted:\n        self._targeted_attack_image_count += 1\n      else:\n        self._attack_image_count += 1\n      Image.fromarray(clipped_adv_image).save(\n          os.path.join(self.all_adv_examples_dir, output_basename + '.png'))", "docstring": "Clips results of attack and copy it to directory with all images.\n\nArgs:\nattack_name: name of the attack.\nis_targeted: if True then attack is targeted, otherwise non-targeted.", "source": "juraj-google-style"}
{"code": "def FinalizeTaskStorage(self, task):\n    \n    if self._storage_type != definitions.STORAGE_TYPE_SESSION:\n      raise IOError('Unsupported storage type.')\n\n    storage_file_path = self._GetTaskStorageFilePath(task)\n    processed_storage_file_path = self._GetProcessedStorageFilePath(task)\n\n    try:\n      os.rename(storage_file_path, processed_storage_file_path)\n    except OSError as exception:\n      raise IOError((\n          'Unable to rename task storage file: {0:s} with error: '\n          '{1!s}').format(storage_file_path, exception))", "docstring": "Finalizes a processed task storage.\n\nMoves the task storage file from its temporary directory to the processed\ndirectory.\n\nArgs:\ntask (Task): task.\n\nRaises:\nIOError: if the storage type is not supported or\nif the storage file cannot be renamed.\nOSError: if the storage type is not supported or\nif the storage file cannot be renamed.", "source": "juraj-google-style"}
{"code": "def create_app(self):\n    self.appinfo['accounts'] = self.get_accounts()\n    self.log.debug('Pipeline Config\\n%s', pformat(self.pipeline_config))\n    self.log.debug('App info:\\n%s', pformat(self.appinfo))\n    jsondata = self.retrieve_template()\n    wait_for_task(jsondata)\n    self.log.info('Successfully created %s application', self.appname)\n    return jsondata", "docstring": "Send a POST to spinnaker to create a new application with class variables.\n\nRaises:\nAssertionError: Application creation failed.", "source": "codesearchnet"}
{"code": "def FromHttpToTimestamp(self, http_ts_string):\n    t = time.strptime(http_ts_string, '%a, %d %b %Y %H:%M:%S GMT')\n    return int(calendar.timegm(t))", "docstring": "Converts HTTP timestamp string to internal nss_cache timestamp.\n\nArgs:\nHTTP format timestamp string\nReturns:\nnumber of seconds since epoch", "source": "github-repos"}
{"code": "def name(self, name):\n    self._data['name'] = name\n    request = self._base_request\n    request['name'] = name\n    return self._tc_requests.update(request, owner=self.owner)", "docstring": "Updates the security labels name.\n\nArgs:\nname:", "source": "codesearchnet"}
{"code": "def __init__(self, mtf_graph, mesh_shape):\n    \n    self._splittable_mtf_dimension_names = self._initialize_splittable_dimensions(\n        mtf_graph)\n    self._mtf_dimension_name_to_size_gcd = (\n        self._initialize_mtf_dimension_name_to_size_gcd(mtf_graph))\n    self._mesh_dimension_name_to_size = self._initialize_mesh_dimension_name_to_size(\n        mesh_shape)", "docstring": "Initializer.\n\nArgs:\nmtf_graph: an mtf.Graph, representing the Mesh TensorFlow computation of\ninterest.\nmesh_shape: an mtf.Shape, representing the mesh of interest.", "source": "juraj-google-style"}
{"code": "def RetrievePluginAsset(self, run, plugin_name, asset_name):\n    accumulator = self.GetAccumulator(run)\n    return accumulator.RetrievePluginAsset(plugin_name, asset_name)", "docstring": "Return the contents for a specific plugin asset from a run.\n\nArgs:\nrun: The string name of the run.\nplugin_name: The string name of a plugin.\nasset_name: The string name of an asset.\n\nReturns:\nThe string contents of the plugin asset.\n\nRaises:\nKeyError: If the asset is not available.", "source": "codesearchnet"}
{"code": "def ScanForFileSystem(self, source_path_spec):\n    \n    if source_path_spec.type_indicator == (\n        definitions.TYPE_INDICATOR_APFS_CONTAINER):\n      \n      \n      \n      return path_spec_factory.Factory.NewPathSpec(\n          definitions.TYPE_INDICATOR_APFS, location='/',\n          parent=source_path_spec)\n\n    try:\n      type_indicators = analyzer.Analyzer.GetFileSystemTypeIndicators(\n          source_path_spec, resolver_context=self._resolver_context)\n    except RuntimeError as exception:\n      raise errors.BackEndError((\n          'Unable to process source path specification with error: '\n          '{0!s}').format(exception))\n\n    if not type_indicators:\n      return None\n\n    type_indicator = type_indicators[0]\n    if len(type_indicators) > 1:\n      if definitions.PREFERRED_NTFS_BACK_END not in type_indicators:\n        raise errors.BackEndError(\n            'Unsupported source found more than one file system types.')\n\n      type_indicator = definitions.PREFERRED_NTFS_BACK_END\n\n    \n    if type_indicator == definitions.TYPE_INDICATOR_NTFS:\n      root_location = '\\\\'\n    else:\n      root_location = '/'\n\n    file_system_path_spec = path_spec_factory.Factory.NewPathSpec(\n        type_indicator, location=root_location, parent=source_path_spec)\n\n    if type_indicator == definitions.TYPE_INDICATOR_TSK:\n      \n      \n      try:\n        file_system = resolver.Resolver.OpenFileSystem(\n            file_system_path_spec, resolver_context=self._resolver_context)\n        file_system.Close()\n      except errors.BackEndError:\n        file_system_path_spec = None\n\n    return file_system_path_spec", "docstring": "Scans the path specification for a supported file system format.\n\nArgs:\nsource_path_spec (PathSpec): source path specification.\n\nReturns:\nPathSpec: file system path specification or None if no supported file\nsystem type was found.\n\nRaises:\nBackEndError: if the source cannot be scanned or more than one file\nsystem type is found.", "source": "juraj-google-style"}
{"code": "def get(self, network_id, *args, **kwargs):\n    return self.prepare_model(self.client.api.inspect_network(network_id, *args, **kwargs))", "docstring": "Get a network by its ID.\n\nArgs:\nnetwork_id (str): The ID of the network.\nverbose (bool): Retrieve the service details across the cluster in\nswarm mode.\nscope (str): Filter the network by scope (``swarm``, ``global``\nor ``local``).\n\nReturns:\n(:py:class:`Network`) The network.\n\nRaises:\n:py:class:`docker.errors.NotFound`\nIf the network does not exist.\n\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def el_to_path_vector(el):\n    path = []\n    while el.parent:\n        path.append(el)\n        el = el.parent\n    return list(reversed((path + [el])))", "docstring": "Convert `el` to vector of foregoing elements.\n\nAttr:\nel (obj): Double-linked HTMLElement instance.\n\nReturns:\nlist: HTMLElements which considered as path from root to `el`.", "source": "codesearchnet"}
{"code": "def find_replace(obj, find, replace):\n    try:\n        if isinstance(obj, dict):\n            return {find_replace(key, find, replace): find_replace(value, find, replace) for (key, value) in obj.items()}\n        elif isinstance(obj, list):\n            return [find_replace(element, find, replace) for element in obj]\n        elif (obj == find):\n            return unicode_convert(replace)\n        else:\n            try:\n                return unicode_convert(find_replace_string(obj, find, replace))\n            except:\n                return unicode_convert(obj)\n    except:\n        (line, filename, synerror) = trace()\n        raise ArcRestHelperError({'function': 'find_replace', 'line': line, 'filename': filename, 'synerror': synerror})\n    finally:\n        pass", "docstring": "Searches an object and performs a find and replace.\n\nArgs:\nobj (object): The object to iterate and find/replace.\nfind (str): The string to search for.\nreplace (str): The string to replace with.\nReturns:\nobject: The object with replaced strings.", "source": "codesearchnet"}
{"code": "def contains(self, time: datetime.datetime, inclusive: bool=True) -> bool:\n    if inclusive:\n        return (self.start <= time <= self.end)\n    else:\n        return (self.start < time < self.end)", "docstring": "Does the interval contain a momentary time?\n\nArgs:\ntime: the ``datetime.datetime`` to check\ninclusive: use inclusive rather than exclusive range checks?", "source": "codesearchnet"}
{"code": "def _apply(self, ctx: ExtensionContext) -> Any:\n        \n        _, external_path = ctx.node\n        return ctx.mentor.load_yaml(self.locator(\n            external_path,\n            cast(str, ctx.document) if Validator.is_file(document=ctx.document) else None\n        ))", "docstring": "Loads a yaml fragment from an external file.\n\nArgs:\nctx: The processing context.\n\nReturns:\nThe external resource as a python dictionary. The fragment is already send through\nthe processor as well.", "source": "juraj-google-style"}
{"code": "def run_cmd(self, *args, **kwargs):\n    timeout = kwargs.pop('timeout', None)\n    p = self.raw_cmd(*args, **kwargs)\n    return p.communicate(timeout=timeout)[0].decode('utf-8').replace('\\r\\n', '\\n')", "docstring": "Unix style output, already replace \\r\\n to \\n\n\nArgs:\n- timeout (float): timeout for a command exec", "source": "codesearchnet"}
{"code": "def _FormatHostname(self, event):\n    \n    hostname = self._output_mediator.GetHostname(event)\n    return self._FormatField(hostname)", "docstring": "Formats the hostname.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: formatted hostname field.", "source": "juraj-google-style"}
{"code": "def service_headline(self, short_name):\n    if (short_name not in self.services):\n        raise ArgumentError('Unknown service name', short_name=short_name)\n    return self.services[short_name]['state'].headline", "docstring": "Get the headline stored for a service.\n\nArgs:\nshort_name (string): The short name of the service to get messages for\n\nReturns:\nServiceMessage: the headline or None if there is no headline", "source": "codesearchnet"}
{"code": "def eval(self, session=None, feed_dict=None):\n    return self.value.eval(session=session, feed_dict=feed_dict)", "docstring": "In a session, computes and returns the value of this random variable.\n\nThis is not a graph construction method, it does not add ops to the graph.\n\nThis convenience method requires a session where the graph\ncontaining this variable has been launched. If no session is\npassed, the default session is used.\n\nArgs:\nsession: tf.BaseSession.\nThe `tf.Session` to use to evaluate this random variable. If\nnone, the default session is used.\nfeed_dict: dict.\nA dictionary that maps `tf.Tensor` objects to feed values. See\n`tf.Session.run()` for a description of the valid feed values.\n\nReturns:\nValue of the random variable.\n\n#### Examples\n\n```python\nx = Normal(0.0, 1.0)\nwith tf.Session() as sess:\n# Usage passing the session explicitly.\nprint(x.eval(sess))\n# Usage with the default session.  The 'with' block\n# above makes 'sess' the default session.\nprint(x.eval())\n```", "source": "codesearchnet"}
{"code": "def inquire(self, name=True, lifetime=True, usage=True, mechs=True):\n    res = rcreds.inquire_cred(self, name, lifetime, usage, mechs)\n    if (res.name is not None):\n        res_name = names.Name(res.name)\n    else:\n        res_name = None\n    return tuples.InquireCredResult(res_name, res.lifetime, res.usage, res.mechs)", "docstring": "Inspect these credentials for information\n\nThis method inspects these credentials for information about them.\n\nArgs:\nname (bool): get the name associated with the credentials\nlifetime (bool): get the remaining lifetime for the credentials\nusage (bool): get the usage for the credentials\nmechs (bool): get the mechanisms associated with the credentials\n\nReturns:\nInquireCredResult: the information about the credentials,\nwith None used when the corresponding argument was False\n\nRaises:\nMissingCredentialsError\nInvalidCredentialsError\nExpiredCredentialsError", "source": "codesearchnet"}
{"code": "def get_storage_usage(access_token, subscription_id, location):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/providers/Microsoft.Storage/locations/', location,\n                        '/usages',\n                        '?api-version=', STORAGE_API])\n    return do_get(endpoint, access_token)", "docstring": "Returns storage usage and quota information for the specified subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body of storage account usage.", "source": "juraj-google-style"}
{"code": "def get_structure_from_name(self, structure_name):\n        \n        return next((st for st in self.structures if st.name == structure_name), None)", "docstring": "Return a structure from a name\nArgs:\nstructure_name (str): name of the structure\nReturns:\nStructure", "source": "juraj-google-style"}
{"code": "def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):\n    split_image = images_kwargs.get('split_image', None) or self.split_image\n    max_image_size = images_kwargs.get('max_image_size', None) or self.max_image_size\n    resized_height, resized_width = select_best_resolution((height, width), self.split_resolutions)\n    num_patches = 1 if not split_image else resized_height \n    return num_patches", "docstring": "A utility that returns number of image patches for a given image size.\n\nArgs:\nheight (`int`):\nHeight of the input image.\nwidth (`int`):\nWidth of the input image.\nimages_kwargs (`dict`, *optional*)\nAny kwargs to override defaults of the image processor.\nReturns:\n`int`: Number of patches per image.", "source": "github-repos"}
{"code": "def ricker(f, length, dt):\n    t = np.linspace((- int((length / 2))), int(((length - dt) / 2)), int((length / dt)))\n    y = ((1.0 - (((2.0 * (np.pi ** 2)) * (f ** 2)) * (t ** 2))) * np.exp((((- (np.pi ** 2)) * (f ** 2)) * (t ** 2))))\n    return (t, y)", "docstring": "A Ricker wavelet.\n\nArgs:\nf (float): frequency in Haz, e.g. 25 Hz.\nlength (float): Length in s, e.g. 0.128.\ndt (float): sample interval in s, e.g. 0.001.\n\nReturns:\ntuple. time basis, amplitude values.", "source": "codesearchnet"}
{"code": "def debug_op(self):\n    return self._debug_op", "docstring": "Name of the debug op.\n\nReturns:\n(`str`) debug op name (e.g., `DebugIdentity`).", "source": "github-repos"}
{"code": "def _process_for_docstring(self, node, node_type):\n        \n        \n        if node.doc is not None:\n\n            \n            \n            \n            \n            if node_type == 'module':\n\n                \n                \n                if not node.body:\n                    \n                    \n                    \n                    \n                    \n                    \n                    for key in list(self._tokenized_triple_quotes.keys()):\n                        quote_record = self._tokenized_triple_quotes.get(key)\n                        if quote_record:\n                            self._check_docstring_quotes(quote_record)\n                            del self._tokenized_triple_quotes[key]\n\n                else:\n                    for i in range(0, node.body[0].lineno):\n                        quote_record = self._tokenized_triple_quotes.get(i)\n                        if quote_record:\n                            self._check_docstring_quotes(quote_record)\n                            del self._tokenized_triple_quotes[i]\n                            break\n\n            else:\n                \n                \n                \n\n                if not node.body:\n                    \n                    \n                    \n                    lineno = self._find_docstring_line_for_no_body(node.fromlineno)\n                    quote_record = self._tokenized_triple_quotes.get(lineno)\n                    if quote_record:\n                        self._check_docstring_quotes(quote_record)\n                        del self._tokenized_triple_quotes[lineno]\n\n                else:\n                    doc_row = self._find_docstring_line(node.fromlineno, node.tolineno)\n                    quote_record = self._tokenized_triple_quotes.get(doc_row)\n                    if quote_record:\n                        self._check_docstring_quotes(quote_record)\n                        del self._tokenized_triple_quotes[doc_row]", "docstring": "Check for docstring quote consistency.\n\nArgs:\nnode: the AST node being visited.\nnode_type: the type of node being operated on.", "source": "juraj-google-style"}
{"code": "def _handle_error_response(response_body):\n    try:\n        error_data = json.loads(response_body)\n        error_details = '{}: {}'.format(error_data['error'], error_data.get('error_description'))\n    except (KeyError, ValueError):\n        error_details = response_body\n    raise exceptions.RefreshError(error_details, response_body)", "docstring": "Translates an error response into an exception.\n\nArgs:\nresponse_body (str): The decoded response data.\n\nRaises:\ngoogle.auth.exceptions.RefreshError", "source": "codesearchnet"}
{"code": "def _serialize_scalar_from_string_representation_factory(type_name, types, str_func=str):\n    \n    def serialize(ion_event):\n        value = ion_event.value\n        validate_scalar_value(value, types)\n        return six.b(str_func(value))\n    serialize.__name__ = '_serialize_' + type_name\n    return serialize", "docstring": "Builds functions that leverage Python ``str()`` or similar functionality.\n\nArgs:\ntype_name (str): The name of the Ion type.\ntypes (Union[Sequence[type],type]): The Python types to validate for.\nstr_func (Optional[Callable]): The function to convert the value with, defaults to ``str``.\n\nReturns:\nfunction: The function for serializing scalars of a given type to Ion text bytes.", "source": "juraj-google-style"}
{"code": "def build_markdown_table(headers, rows, row_keys=None):\n    row_maxes = _find_row_maxes(headers, rows)\n    row_keys = (row_keys or [key for (key, value) in headers.items()])\n    table = [_build_row(headers, row_maxes, row_keys), _build_separator(row_maxes, row_keys)]\n    for row in rows:\n        table.append(_build_row(row, row_maxes, row_keys))\n    return ('\\n'.join(table) + '\\n')", "docstring": "Build a lined up markdown table.\n\nArgs:\nheaders (dict): A key -> value pairing fo the headers.\nrows (list): List of dictionaries that contain all the keys listed in\nthe headers.\nrow_keys (list): A sorted list of keys to display\n\nReturns:\nA valid Markdown Table as a string.", "source": "codesearchnet"}
{"code": "def pre_scan(self, func=operator.add, seed=0):\n    if self.closed():\n        raise ValueError('Attempt to call pre_scan() on a closed Queryable.')\n    if (not is_callable(func)):\n        raise TypeError('pre_scan() parameter func={0} is not callable'.format(repr(func)))\n    return self._create(self._generate_pre_scan_result(func, seed))", "docstring": "An exclusive prefix sum which returns the cumulative application of the\nsupplied function up to but excluding the current element.\n\nArgs:\nfunc: An optional binary function which is commutative - that is,\nthe order of the arguments is unimportant.  Defaults to a\nsumming operator.\n\nseed: The first element of the prefix sum and therefore also the\nfirst element of the returned sequence.\n\nReturns:\nA Queryable such that the nth element is the sum of the first n-1\nelements of the source sequence.\n\nRaises:\nValueError: If the Queryable has been closed.\nTypeError: If func is not callable.", "source": "codesearchnet"}
{"code": "def serializable_value(self, obj):\n        \n        value = self.__get__(obj, obj.__class__)\n        return self.property.serialize_value(value)", "docstring": "Produce the value as it should be serialized.\n\nSometimes it is desirable for the serialized value to differ from\nthe ``__get__`` in order for the ``__get__`` value to appear simpler\nfor user or developer convenience.\n\nArgs:\nobj (HasProps) : the object to get the serialized attribute for\n\nReturns:\nJSON-like", "source": "juraj-google-style"}
{"code": "def get_glob(path):\n    if isinstance(path, str):\n        return glob.glob(path, recursive=True)\n    if isinstance(path, os.PathLike):\n        return glob.glob(str(path), recursive=True)\n    elif isinstance(path, (list, tuple)):\n        return list(chain.from_iterable((glob.glob(str(p), recursive=True) for p in path)))\n    else:\n        raise TypeError(f\"path should be string, path-like or a list. Instead, it's a {type(path)}\")", "docstring": "Process the input path, applying globbing and formatting.\n\nDo note that this will returns files AND directories that match the glob.\n\nNo tilde expansion is done, but *, ?, and character ranges expressed with\n[] will be correctly matched.\n\nEscape all special characters ('?', '*' and '['). For a literal match, wrap\nthe meta-characters in brackets. For example, '[?]' matches the character\n'?'.\n\nIf passing in an iterable of paths, will expand matches for each path in\nthe iterable. The function will return all the matches for each path\nglob expression combined into a single list.\n\nArgs:\npath: Path-like string, or iterable (list or tuple ) of paths.\n\nReturns:\nCombined list of paths found for input glob.", "source": "codesearchnet"}
{"code": "def format_map(self, format_string, mapping):\n        \n        return self.vformat(format_string, args=None, kwargs=mapping)", "docstring": "format a string by a map\n\nArgs:\nformat_string(str): A format string\nmapping(dict): A map to format the string\n\nReturns:\nA formatted string.\n\nRaises:\nKeyError: if key is not provided by the given map.", "source": "juraj-google-style"}
{"code": "def _best_subset(self, n_qubits):\n    if (n_qubits == 1):\n        return np.array([0])\n    device_qubits = self.coupling_map.size()\n    cmap = np.asarray(self.coupling_map.get_edges())\n    data = np.ones_like(cmap[(:, 0)])\n    sp_cmap = sp.coo_matrix((data, (cmap[(:, 0)], cmap[(:, 1)])), shape=(device_qubits, device_qubits)).tocsr()\n    best = 0\n    best_map = None\n    for k in range(sp_cmap.shape[0]):\n        bfs = cs.breadth_first_order(sp_cmap, i_start=k, directed=False, return_predecessors=False)\n        connection_count = 0\n        sub_graph = []\n        for i in range(n_qubits):\n            node_idx = bfs[i]\n            for j in range(sp_cmap.indptr[node_idx], sp_cmap.indptr[(node_idx + 1)]):\n                node = sp_cmap.indices[j]\n                for counter in range(n_qubits):\n                    if (node == bfs[counter]):\n                        connection_count += 1\n                        sub_graph.append([node_idx, node])\n                        break\n        if (connection_count > best):\n            best = connection_count\n            best_map = bfs[0:n_qubits]\n            mapping = {}\n            for edge in range(best_map.shape[0]):\n                mapping[best_map[edge]] = edge\n            new_cmap = [[mapping[c[0]], mapping[c[1]]] for c in sub_graph]\n            rows = [edge[0] for edge in new_cmap]\n            cols = [edge[1] for edge in new_cmap]\n            data = ([1] * len(rows))\n            sp_sub_graph = sp.coo_matrix((data, (rows, cols)), shape=(n_qubits, n_qubits)).tocsr()\n            perm = cs.reverse_cuthill_mckee(sp_sub_graph)\n            best_map = best_map[perm]\n    return best_map", "docstring": "Computes the qubit mapping with the best connectivity.\n\nArgs:\nn_qubits (int): Number of subset qubits to consider.\n\nReturns:\nndarray: Array of qubits to use for best connectivity mapping.", "source": "codesearchnet"}
{"code": "def setErrorHandler(self, errorhandler):\n\n    class ErrorHandlerWrapper(ErrorHandler):\n\n        def __init__(self, errorhandler):\n            self.errorhandler = errorhandler\n            self.last_exception = None\n\n        def error(self, exception):\n            if isinstance(exception, amplpython.AMPLException):\n                exception = AMPLException(exception)\n            try:\n                self.errorhandler.error(exception)\n            except Exception as e:\n                self.last_exception = e\n\n        def warning(self, exception):\n            if isinstance(exception, amplpython.AMPLException):\n                exception = AMPLException(exception)\n            try:\n                self.errorhandler.warning(exception)\n            except Exception as e:\n                self.last_exception = e\n\n        def check(self):\n            if (self.last_exception is not None):\n                (e, self.last_exception) = (self.last_exception, None)\n                raise e\n    errorhandler_wrapper = ErrorHandlerWrapper(errorhandler)\n\n    class InnerErrorHandler(amplpython.ErrorHandler):\n\n        def error(self, exception):\n            errorhandler_wrapper.error(exception)\n\n        def warning(self, exception):\n            errorhandler_wrapper.warning(exception)\n    self._errorhandler = errorhandler\n    self._errorhandler_inner = InnerErrorHandler()\n    self._errorhandler_wrapper = errorhandler_wrapper\n    lock_and_call((lambda : self._impl.setErrorHandler(self._errorhandler_inner)), self._lock)", "docstring": "Sets a new error handler.\n\nArgs:\nerrorhandler: The object handling AMPL errors and warnings.", "source": "codesearchnet"}
{"code": "class XSoftmax(torch.autograd.Function):\n\n    @staticmethod\n    def forward(ctx, input, mask, dim):\n        ctx.dim = dim\n        rmask = ~mask.to(torch.bool)\n        output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min))\n        output = torch.softmax(output, ctx.dim)\n        output.masked_fill_(rmask, 0)\n        ctx.save_for_backward(output)\n        return output\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        output, = ctx.saved_tensors\n        inputGrad = softmax_backward_data(ctx, grad_output, output, ctx.dim, output)\n        return (inputGrad, None, None)\n\n    @staticmethod\n    def symbolic(g, self, mask, dim):\n        import torch.onnx.symbolic_helper as sym_help\n        from torch.onnx.symbolic_opset9 import masked_fill, softmax\n        mask_cast_value = g.op('Cast', mask, to_i=sym_help.cast_pytorch_to_onnx['Long'])\n        r_mask = g.op('Cast', g.op('Sub', g.op('Constant', value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value), to_i=sym_help.cast_pytorch_to_onnx['Bool'])\n        output = masked_fill(g, self, r_mask, g.op('Constant', value_t=torch.tensor(torch.finfo(self.type().dtype()).min)))\n        output = softmax(g, output, dim)\n        return masked_fill(g, output, r_mask, g.op('Constant', value_t=torch.tensor(0, dtype=torch.bool)))", "docstring": "Masked Softmax which is optimized for saving memory\n\nArgs:\ninput (`torch.tensor`): The input tensor that will apply softmax.\nmask (`torch.IntTensor`):\nThe mask matrix where 0 indicate that element will be ignored in the softmax calculation.\ndim (int): The dimension that will apply softmax\n\nExample:\n\n```python\n>>> import torch\n>>> from transformers.models.deberta_v2.modeling_deberta_v2 import XSoftmax\n\n>>> # Make a tensor\n>>> x = torch.randn([4, 20, 100])\n\n>>> # Create a mask\n>>> mask = (x > 0).int()\n\n>>> # Specify the dimension to apply softmax\n>>> dim = -1\n\n>>> y = XSoftmax.apply(x, mask, dim)\n```", "source": "github-repos"}
{"code": "def _compute_template(val: BaseValue) -> Sequence[BaseValue]:\n    if isinstance(val, _abstract.PyTDClass):\n        return [val.ctx.convert.constant_to_value(itm.type_param) for itm in val.pytd_cls.template]\n    elif not isinstance(val, _abstract.InterpreterClass):\n        return ()\n    bases = [abstract_utils.get_atomic_value(base, default=val.ctx.convert.unsolvable) for base in val.bases()]\n    template = []\n    for base in bases:\n        if base.full_name == 'typing.Generic':\n            if isinstance(base, _abstract.PyTDClass):\n                raise abstract_utils.GenericTypeError(val, 'Cannot inherit from plain Generic')\n            if template:\n                raise abstract_utils.GenericTypeError(val, 'Cannot inherit from Generic[...] multiple times')\n            for item in base.template:\n                param = base.formal_type_parameters.get(item.name)\n                template.append(param.with_scope(val.full_name))\n    if template:\n        for base in bases:\n            if base.full_name != 'typing.Generic':\n                if isinstance(base, _abstract.ParameterizedClass):\n                    for item in base.template:\n                        param = base.formal_type_parameters.get(item.name)\n                        if isinstance(base, _abstract.TypeParameter):\n                            t = param.with_scope(val.full_name)\n                            if t not in template:\n                                raise abstract_utils.GenericTypeError(val, 'Generic should contain all the type variables')\n    else:\n        seqs = []\n        for base in bases:\n            if isinstance(base, _abstract.ParameterizedClass):\n                seq = []\n                for item in base.template:\n                    param = base.formal_type_parameters.get(item.name)\n                    if isinstance(param, _abstract.TypeParameter):\n                        seq.append(param.with_scope(val.full_name))\n                seqs.append(seq)\n        try:\n            template.extend(mro.MergeSequences(seqs))\n        except ValueError as e:\n            raise abstract_utils.GenericTypeError(val, f'Illegal type parameter order in class {val.name}') from e\n    return template", "docstring": "Compute the precedence list of template parameters according to C3.\n\n1. For the base class list, if it contains `typing.Generic`, then all the\ntype parameters should be provided. That means we don't need to parse extra\nbase class and then we can get all the type parameters.\n2. If there is no `typing.Generic`, parse the precedence list according to\nC3 based on all the base classes.\n3. If `typing.Generic` exists, it must contain at least one type parameters.\nAnd there is at most one `typing.Generic` in the base classes. Report error\nif the check fails.\n\nArgs:\nval: The abstract.BaseValue to compute a template for.\n\nReturns:\nparsed type parameters\n\nRaises:\nGenericTypeError: if the type annotation for generic type is incorrect", "source": "github-repos"}
{"code": "def GetSystemConfigurationArtifact(self, session_identifier=CURRENT_SESSION):\n    system_configuration = artifacts.SystemConfigurationArtifact()\n    system_configuration.code_page = self.GetValue('codepage', default_value=self._codepage)\n    system_configuration.hostname = self._hostnames.get(session_identifier, None)\n    system_configuration.keyboard_layout = self.GetValue('keyboard_layout')\n    system_configuration.operating_system = self.GetValue('operating_system')\n    system_configuration.operating_system_product = self.GetValue('operating_system_product')\n    system_configuration.operating_system_version = self.GetValue('operating_system_version')\n    date_time = datetime.datetime(2017, 1, 1)\n    time_zone = self._time_zone.tzname(date_time)\n    if (time_zone and isinstance(time_zone, py2to3.BYTES_TYPE)):\n        time_zone = time_zone.decode('ascii')\n    system_configuration.time_zone = time_zone\n    user_accounts = self._user_accounts.get(session_identifier, {})\n    system_configuration.user_accounts = list(user_accounts.values())\n    return system_configuration", "docstring": "Retrieves the knowledge base as a system configuration artifact.\n\nArgs:\nsession_identifier (Optional[str])): session identifier, where\nCURRENT_SESSION represents the active session.\n\nReturns:\nSystemConfigurationArtifact: system configuration artifact.", "source": "codesearchnet"}
{"code": "def retry(retries=0, delay=timedelta(), conditions=[]):\n    delay_in_seconds = delay.total_seconds()\n\n    def decorator(function):\n        '\\n        The actual decorator for retrying.\\n        '\n\n        @wraps(function)\n        def wrapper(*args, **kwargs):\n            '\\n            The actual wrapper for retrying.\\n            '\n            func = partial(function, *args, **kwargs)\n            return retry_loop(retries, delay_in_seconds, conditions, func)\n        return wrapper\n    return decorator", "docstring": "A decorator for making a function that retries on failure.\n\nArgs:\nretries (Integral): The number of times to retry if a failure occurs.\ndelay (timedelta, optional, 0 seconds): A timedelta representing\nthe amount of time to delay between retries.\nconditions (list): A list of retry conditions.", "source": "codesearchnet"}
{"code": "def build(cls, **kwargs):\n    return cls.add(cls.new(**kwargs), commit=False)", "docstring": "Similar to create. But the transaction is not committed\n\nArgs:\n\n**kwargs : The keyword arguments for the constructor\n\nReturns:\n\nA model instance which has been added to db session. But session\ntransaction has not been committed yet.", "source": "codesearchnet"}
{"code": "def run_docker(self, commands):\n        \n        try:\n            import docker\n        except ImportError:\n            print(\n                '{}{}Could not import docker module (try \"pip install docker\").'.format(\n                    c.Style.BRIGHT, c.Fore.RED\n                )\n            )\n            sys.exit(1)\n\n        \n        app_args_data = self.profile.get('profile_args').data\n        install_json = self.profile.get('install_json')\n\n        \n        client = docker.from_env()\n\n        \n        app_dir = os.getcwd()\n        \n\n        \n        ports = {}\n        if self.args.vscd:\n            ports = {'{}/tcp'.format(self.args.vscd_port): self.args.vscd_port}\n\n        \n        volumes = {}\n        in_path = '{}/{}'.format(app_dir, app_args_data.get('tc_in_path'))\n        if app_args_data.get('tc_in_path') is not None:\n            volumes[in_path] = {'bind': in_path}\n        log_path = '{}/{}'.format(app_dir, app_args_data.get('tc_log_path'))\n        if app_args_data.get('tc_log_path') is not None:\n            volumes[log_path] = {'bind': log_path}\n        out_path = '{}/{}'.format(app_dir, app_args_data.get('tc_out_path'))\n        if app_args_data.get('tc_out_path') is not None:\n            volumes[out_path] = {'bind': out_path}\n        temp_path = '{}/{}'.format(app_dir, app_args_data.get('tc_temp_path'))\n        if app_args_data.get('tc_temp_path') is not None:\n            volumes[temp_path] = {'bind': temp_path}\n        volumes[app_dir] = {'bind': app_dir}\n\n        if self.args.docker_image is not None:\n            \n            docker_image = self.args.docker_image\n        else:\n            \n            \n            docker_image = self.profile.get(\n                'dockerImage', install_json.get('dockerImage', self.docker_image)\n            )\n\n        status_code = 1\n        try:\n            self.container = client.containers.run(\n                docker_image,\n                entrypoint=commands.get('cli_command'),\n                environment=['PYTHONPATH={}/lib_latest'.format(app_dir)],\n                detach=True,\n                \n                ports=ports,\n                remove=True,\n                volumes=volumes,\n                working_dir=app_dir,\n            )\n            results = self.container.wait()\n            status_code = results.get('StatusCode')\n            error = results.get('Error')\n            if error:\n                print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, error))\n        except Exception as e:\n            print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, e))\n            sys.exit()\n\n        \n        return self.run_exit_code(status_code)", "docstring": "Run App in Docker Container.\n\nArgs:\ncommands (dict): A dictionary of the CLI commands.\n\nReturns:\nint: The exit code of the subprocess command.", "source": "juraj-google-style"}
{"code": "def get_assets(cls, lat, lon, begin=None, end=None):\n        \n        instance = cls('planetary/earth/assets')\n\n        filters = {\n            'lat': lat,\n            'lon': lon,\n            'begin': begin,\n            'end': end,\n        }\n\n        return instance.get_resource(**filters)", "docstring": "Returns date and ids of flyovers\n\nArgs:\nlat: latitude float\nlon: longitude float\nbegin: date instance\nend: date instance\n\nReturns:\njson", "source": "juraj-google-style"}
{"code": "def __init__(self, redir_file, to_file):\n        \n        self.redir_file = redir_file\n        self._from_fd = redir_file.fileno()\n        self._to_fd = to_file.fileno()\n        \n        \n        \n        self.orig_file = os.fdopen(os.dup(self._from_fd), 'wb', 0)", "docstring": "Constructor\n\nArgs:\nredir_file: (file) The file object to redirect\nto_file: (file) The file object `redir_file` should be redirected to.", "source": "juraj-google-style"}
{"code": "def clean(exclude):\n    \n    \n    pretend = context.get('pretend', False)\n    exclude = list(exclude) + conf.get('clean.exclude', [])\n    clean_patterns = conf.get('clean.patterns', [\n        '*__pycache__*',\n        '*.py[cod]',\n        '*.swp',\n    ])\n\n    num_files = 0\n    with util.timed_block() as t:\n        files = fs.filtered_walk(conf.proj_path(), clean_patterns, exclude)\n        for path in files:\n            try:\n                num_files += 1\n\n                if not isdir(path):\n                    log.info('  <91>[file] <90>{}', path)\n                    not pretend and os.remove(path)\n                else:\n                    log.info('  <91>[dir]  <90>{}', path)\n                    not pretend and rmtree(path)\n\n            except OSError:\n                log.info(\"<33>Failed to remove <90>{}\", path)\n\n    if pretend:\n        msg = \"Would delete <33>{}<32> files. Took <33>{}<32>s\"\n    else:\n        msg = \"Deleted <33>{}<32> files in <33>{}<32>s\"\n\n    log.info(msg.format(num_files, t.elapsed_s))", "docstring": "Remove all unnecessary files.\n\nArgs:\npretend (bool):\nIf set to **True**, do not delete any files, just show what would be\ndeleted.\nexclude (list[str]):\nA list of path patterns to exclude from deletion.", "source": "juraj-google-style"}
{"code": "def _RecurseOverObject(obj, factory, parent=None):\n  \n  if _IsSudsIterable(obj):\n    \n    \n    copy_of_obj = tuple(obj)\n    for item in copy_of_obj:\n      if _IsSudsIterable(item):\n        if 'xsi_type' in item:\n          if isinstance(obj, tuple):\n            parent[obj[0]] = _PackForSuds(obj[1], factory)\n          else:\n            obj.remove(item)\n            obj.append(_PackForSuds(item, factory))\n        _RecurseOverObject(item, factory, obj)", "docstring": "Recurses over a nested structure to look for changes in Suds objects.\n\nArgs:\nobj: A parameter for a SOAP request field which is to be inspected and\nwill be packed for Suds if an xsi_type is specified, otherwise will be\nleft unaltered.\nfactory: The suds.client.Factory object which can create instances of the\nclasses generated from the WSDL.\nparent: The parent object that contains the obj parameter to be inspected.", "source": "juraj-google-style"}
{"code": "def run_graph(self, device, n, m, k, transpose_a, transpose_b, num_iters, dtype):\n    graph = ops.Graph()\n    with graph.as_default():\n        output = build_graph(device, n, m, k, transpose_a, transpose_b, dtype)\n        with session_lib.Session(graph=graph) as session:\n            variables.global_variables_initializer().run()\n            for _ in range(500):\n                session.run(output)\n            start_time = time.time()\n            for _ in range(num_iters):\n                session.run(output)\n            duration = time.time() - start_time\n            num_items = n * m * k * 2\n            throughput = num_items * num_iters / duration / 1000000000.0\n            print('%s %s input_info:%s %d %.4fsec, %.4fGitems/s.' % (device, str(dtype), str(n) + 'x' + str(m) + 'x' + str(k) + ',ta:' + str(transpose_a) + '.tb:' + str(transpose_b), num_iters, duration, throughput))\n    name_template = 'matmul_{device}_{dtype}_input_info_{inputinfo}'\n    self.report_benchmark(name=name_template.format(device=device, dtype=str(dtype).replace(' ', ''), inputinfo=str(n) + 'x' + str(m) + 'x' + str(k) + ',ta:' + str(transpose_a) + ',tb:' + str(transpose_b)).replace(' ', ''), iters=num_iters, wall_time=duration)\n    return duration", "docstring": "Run the graph and print its execution time.\n\nArgs:\ndevice: String, the device to run on.\nn: tensor A's first dimension size.\nm: tensor A's second dimension size.\nk: tensor B's second dimension size.\ntranspose_a: boolean value to show if tensor A is transposed.\ntranspose_b: boolean value to show if tensor B is transposed.\nnum_iters: number of iterations to run the benchmark.\ndtype: numpy data type of the input tensor.\n\nReturns:\nThe duration of the run in seconds.", "source": "github-repos"}
{"code": "def find(lst, a, case_sensitive=True):\n    \n    a = force_list(a)\n\n    if not case_sensitive:\n        lst = [x.lower() for x in lst]\n        a = [y.lower() for y in a]\n\n    return [i for i, x in enumerate(lst) if x in a]", "docstring": "Return indices of a list which have elements that match an object or list of objects\n\nArgs:\nlst: list of values\na: object(s) to check equality\ncase_sensitive: if the search should be case sensitive\n\nReturns:\nlist: list of indicies of lst which equal a", "source": "juraj-google-style"}
{"code": "def upsert_sweep(self, config):\n        \n        mutation = gql()\n\n        \n        \n        def no_retry_400_or_404(e):\n            if not isinstance(e, requests.HTTPError):\n                return True\n            if e.response.status_code != 400 and e.response.status_code != 404:\n                return True\n            body = json.loads(e.response.content)\n            raise UsageError(body['errors'][0]['message'])\n\n        response = self.gql(mutation, variable_values={\n            'config': yaml.dump(config),\n            'description': config.get(\"description\"),\n            'entityName': self.settings(\"entity\"),\n            'projectName': self.settings(\"project\")},\n            check_retry_fn=no_retry_400_or_404)\n        return response['upsertSweep']['sweep']['name']", "docstring": "Upsert a sweep object.\n\nArgs:\nconfig (str): sweep config (will be converted to yaml)", "source": "juraj-google-style"}
{"code": "def __init__(self, name, property):\n        \n        super(BasicPropertyDescriptor, self).__init__(name)\n        self.property = property\n        self.__doc__ = self.property.__doc__", "docstring": "Create a PropertyDescriptor for basic Bokeh properties.\n\nArgs:\nname (str) : The attribute name that this property is for\nproperty (Property) : A basic property to create a descriptor for", "source": "juraj-google-style"}
{"code": "def _ungroup_and_make_mirrored(grouped_reduced, destinations, reduce_op, num_between_graph_workers=1):\n    num_replicas = len(get_devices_from(destinations)) * num_between_graph_workers\n    index = [[] for _ in range(len(grouped_reduced[0]))]\n    for per_replica_reduced in grouped_reduced:\n        for i, (v, _) in enumerate(per_replica_reduced):\n            if reduce_op == reduce_util.ReduceOp.MEAN:\n                with ops.device(v.device):\n                    index[i].append(v / num_replicas)\n            else:\n                index[i].append(v)\n    return [distribute_utils.regroup(v, wrap_class=value_lib.Mirrored) for v in index]", "docstring": "Ungroup results from all-reduce and make Mirrored objects.\n\nEach all-reduce result will be divided by the number of destinations before\nMirrored objects are created if reduce_op is \"mean\".\n\nArgs:\ngrouped_reduced: a list of lists, each sublist has components for each\ndevice, paired with a None. It is the result from\ncross_device_utils.aggregate_gradients_using*.\ndestinations: a value to colocate the result with.\nreduce_op: Indicates how values will be aggregated. Accepted values\nare `tf.distribute.ReduceOp.SUM`, `tf.distribute.ReduceOp.MEAN`.\nnum_between_graph_workers: number of workers in the between-graph\nreplication.\n\nReturns:\na list of Mirrored objects.", "source": "github-repos"}
{"code": "def primal_and_adjoint_for_tracing(self, node):\n    primal_template = grads.primals[tracing.Traceable]\n    adjoint_template = grads.adjoints[tracing.Traceable]\n    to_pack = node.args\n    target = ast_.copy_node(self.orig_target)\n    vjp = quoting.quote(self.namer.unique(('%s_grad' % node.func.id)))\n    tmp = create.create_temp(quoting.quote('tmp'), self.namer)\n    assert (len(node.keywords) == 0)\n    primal = template.replace(primal_template, namer=self.namer, result=target, fn=node.func, tmp=tmp, vjp=vjp, args=gast.Tuple(elts=to_pack, ctx=gast.Load()))\n    dto_pack = gast.Tuple(elts=[create.create_temp_grad(arg, self.namer) for arg in to_pack], ctx=gast.Store())\n    adjoint = template.replace(adjoint_template, namer=self.namer, result=target, vjp=vjp, dargs=dto_pack)\n    return (primal, adjoint)", "docstring": "Build the primal and adjoint of a traceable function.\n\nArgs:\nnode: ast.Call node of a function we wish to trace, instead of transform\n\nReturns:\nprimal: new ast.Assign node to replace the original primal call\nadjoint: new ast.Assign node using the VJP generated in primal to\ncalculate the adjoint.", "source": "codesearchnet"}
{"code": "def has_no_flat_neurites(neuron, tol=0.1, method='ratio'):\n    return CheckResult((len(get_flat_neurites(neuron, tol, method)) == 0))", "docstring": "Check that a neuron has no flat neurites\n\nArguments:\nneuron(Neuron): The neuron object to test\ntol(float): tolerance\nmethod(string): way of determining flatness, 'tolerance', 'ratio' \\\nas described in :meth:`neurom.check.morphtree.get_flat_neurites`\n\nReturns:\nCheckResult with result", "source": "codesearchnet"}
{"code": "def get(self, item):\n        \n        if item not in self._item_cache:\n            try:\n                item = self.__getitem__(item)\n            except KeyError:\n                item = None\n            self._item_cache[item] = item\n        return self._item_cache[item]", "docstring": "Get item through ``__getitem__`` and cache the result.\n\nArgs:\nitem (str): name of package or module.\n\nReturns:\nPackage/Module: the corresponding object.", "source": "juraj-google-style"}
{"code": "def start(self, input_data, output_data, transform_resources, **kwargs):\n    self.transform_resources = transform_resources\n    self.input_data = input_data\n    self.output_data = output_data\n    image = self.primary_container['Image']\n    instance_type = transform_resources['InstanceType']\n    instance_count = 1\n    environment = self._get_container_environment(**kwargs)\n    self.container = _SageMakerContainer(instance_type, instance_count, image, self.local_session)\n    self.container.serve(self.primary_container['ModelDataUrl'], environment)\n    serving_port = (get_config_value('local.serving_port', self.local_session.config) or 8080)\n    _wait_for_serving_container(serving_port)\n    endpoint_url = ('http:\n    (response, code) = _perform_request(endpoint_url)\n    if (code == 200):\n        execution_parameters = json.loads(response.read())\n        for setting in ('BatchStrategy', 'MaxPayloadInMB'):\n            if ((setting not in kwargs) and (setting in execution_parameters)):\n                kwargs[setting] = execution_parameters[setting]\n    kwargs.update(self._get_required_defaults(**kwargs))\n    self.start_time = datetime.datetime.now()\n    self.batch_strategy = kwargs['BatchStrategy']\n    if ('Environment' in kwargs):\n        self.environment = kwargs['Environment']\n    self._perform_batch_inference(input_data, output_data, **kwargs)\n    self.end_time = datetime.datetime.now()\n    self.state = self._COMPLETED", "docstring": "Start the Local Transform Job\n\nArgs:\ninput_data (dict): Describes the dataset to be transformed and the location where it is stored.\noutput_data (dict): Identifies the location where to save the results from the transform job\ntransform_resources (dict): compute instances for the transform job. Currently only supports local or\nlocal_gpu\n**kwargs: additional arguments coming from the boto request object", "source": "codesearchnet"}
{"code": "def create_saveable_object(name, key, factory, call_with_mapped_captures):\n    if call_with_mapped_captures is None:\n        return factory(name=key)\n    if name == trackable_utils.SERIALIZE_TO_TENSORS_NAME:\n        return factory(name=key, call_with_mapped_captures=call_with_mapped_captures)\n    elif is_factory_for_restored_saveable_object(factory):\n        concrete_save_fn = factory.keywords['save_function']\n\n        def save_fn(name):\n            return call_with_mapped_captures(concrete_save_fn, [name])\n        concrete_restore_fn = factory.keywords['restore_function']\n\n        def restore_fn(*restored_tensors):\n            return call_with_mapped_captures(concrete_restore_fn, restored_tensors)\n        return factory(save_function=save_fn, restore_function=restore_fn, name=key)\n    else:\n        return factory(name=key)", "docstring": "Creates a SaveableObject while potentially in a different graph.\n\nWhen creating the frozen saver for SavedModel, the save and restore ops are\nplaced in a separate graph. Since RestoredSaveableObject uses tf.functions to\nsave and restore, the function captures must be mapped to the new graph.\n\nArgs:\nname: Name of SaveableObject factory.\nkey: Checkpoint key of this SaveableObject.\nfactory: Factory method for creating the SaveableObject.\ncall_with_mapped_captures: Helper that calls a tf.function while remapping\nthe captures.\n\nReturns:\na SaveableObject.", "source": "github-repos"}
{"code": "def start_engine(self, **kwargs):\n        \n        self.current = WFCurrent(**kwargs)\n        self.wf_state = {'in_external': False, 'finished': False}\n        if not self.current.new_token:\n            self.wf_state = self.current.wf_cache.get(self.wf_state)\n            self.current.workflow_name = self.wf_state['name']\n            \n            \n            if 'subject' in self.wf_state:\n                self.current.input['id'] = self.wf_state['subject']\n                self.current.task_data['object_id'] = self.wf_state['subject']\n        self.check_for_authentication()\n        self.check_for_permission()\n        self.workflow = self.load_or_create_workflow()\n\n        \n        \n        if 'form' in self.current.input:\n            form = self.current.input['form']\n            if 'form_name' in form:\n                self.current.task_data[form['form_name']] = form\n\n        \n        \n        start_init_values = self.workflow_spec.wf_properties.get('init', 'False') == 'True'\n        if start_init_values:\n            WFInit = get_object_from_path(settings.WF_INITIAL_VALUES)()\n            WFInit.assign_wf_initial_values(self.current)\n\n        log_msg = (\"\\n\\n::::::::::: ENGINE STARTED :::::::::::\\n\"\n                   \"\\tWF: %s (Possible) TASK:%s\\n\"\n                   \"\\tCMD:%s\\n\"\n                   \"\\tSUBCMD:%s\" % (\n                       self.workflow.name,\n                       self.workflow.get_tasks(Task.READY),\n                       self.current.input.get('cmd'), self.current.input.get('subcmd')))\n        log.debug(log_msg)\n        sys._zops_wf_state_log = log_msg\n        self.current.workflow = self.workflow", "docstring": "Initializes the workflow with given request, response objects and diagram name.\n\nArgs:\nsession:\ninput:\nworkflow_name (str): Name of workflow diagram without \".bpmn\" suffix.\nFile must be placed under one of configured :py:attr:`~zengine.settings.WORKFLOW_PACKAGES_PATHS`", "source": "juraj-google-style"}
{"code": "def _model_ready_for_local_init(self, sess: session.Session) -> Tuple[bool, Optional[str]]:\n    return _ready(self._ready_for_local_init_op, sess, 'Model not ready for local init')", "docstring": "Checks if the model is ready to run local_init_op.\n\nArgs:\nsess: A `Session`.\n\nReturns:\nA tuple (is_ready, msg), where is_ready is True if ready to run\nlocal_init_op and False otherwise, and msg is `None` if the model is\nready to run local_init_op, a `String` with the reason why it is not ready\notherwise.", "source": "github-repos"}
{"code": "def list_objects(root_trackable):\n    return util.list_objects(graph_view_lib.ObjectGraphView(root_trackable))", "docstring": "Traverse the object graph and list all accessible objects.\n\nLooks for `Trackable` objects which are dependencies of\n`root_trackable`. Includes slot variables only if the variable they are\nslotting for and the optimizer are dependencies of `root_trackable`\n(i.e. if they would be saved with a checkpoint).\n\nArgs:\nroot_trackable: A `Trackable` object whose dependencies should be flattened.\n\nReturns:\nA flat list of objects.", "source": "github-repos"}
{"code": "def setZeroResettableKWH(self, password='00000000'):\n    result = False\n    self.setContext('setZeroResettableKWH')\n    try:\n        if (not self.requestA()):\n            self.writeCmdMsg('Bad read CRC on setting')\n        elif (not self.serialCmdPwdAuth(password)):\n            self.writeCmdMsg('Password failure')\n        else:\n            req_str = '0157310230304433282903'\n            req_str += self.calc_crc16(req_str[2:].decode('hex'))\n            self.m_serial_port.write(req_str.decode('hex'))\n            if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'):\n                self.writeCmdMsg('Success: 06 returned.')\n                result = True\n        self.serialPostEnd()\n    except:\n        ekm_log(traceback.format_exc(sys.exc_info()))\n    self.setContext('')\n    return result", "docstring": "Serial call to zero resettable kWh registers.\n\nArgs:\npassword (str): Optional password.\n\nReturns:\nbool: True on completion and ACK.", "source": "codesearchnet"}
{"code": "def get_collection_ref(key) -> list[Any]:\n    return get_default_graph().get_collection_ref(key)", "docstring": "Wrapper for `Graph.get_collection_ref()` using the default graph.\n\nSee `tf.Graph.get_collection_ref`\nfor more details.\n\nArgs:\nkey: The key for the collection. For example, the `GraphKeys` class contains\nmany standard names for collections.\n\nReturns:\nThe list of values in the collection with the given `name`, or an empty\nlist if no value has been added to that collection.  Note that this returns\nthe collection list itself, which can be modified in place to change the\ncollection.\n\n@compatibility(eager)\nCollections are not supported when eager execution is enabled.\n@end_compatibility", "source": "github-repos"}
{"code": "def containsParamSubset(self, params):\n    for key in params.keys():\n        if (key not in self.params):\n            return False\n        if (params[key] != self.params[key]):\n            return False\n    return True", "docstring": "Test whether this element contains at least all `params`, or more.\n\nArgs:\nparams (dict/SpecialDict): Subset of parameters.\n\nReturns:\nbool: True if all `params` are contained in this element.", "source": "codesearchnet"}
{"code": "def AddArguments(cls, argument_group):\n    \n    argument_group.add_argument(\n        '--slice', metavar='DATE', dest='slice', type=str, default='',\n        action='store', help=(\n            'Create a time slice around a certain date. This parameter, if '\n            'defined will display all events that happened X minutes before '\n            'and after the defined date. X is controlled by the parameter '\n            '--slice_size but defaults to 5 minutes.'))\n\n    argument_group.add_argument(\n        '--slice_size', '--slice-size', dest='slice_size', type=int, default=5,\n        action='store', help=(\n            'Defines the slice size. In the case of a regular time slice it '\n            'defines the number of minutes the slice size should be. In the '\n            'case of the --slicer it determines the number of events before '\n            'and after a filter match has been made that will be included in '\n            'the result set. The default value is 5. See --slice or --slicer '\n            'for more details about this option.'))\n\n    argument_group.add_argument(\n        '--slicer', dest='slicer', action='store_true', default=False, help=(\n            'Create a time slice around every filter match. This parameter, '\n            'if defined will save all X events before and after a filter '\n            'match has been made. X is defined by the --slice_size '\n            'parameter.'))\n\n    argument_group.add_argument(\n        'filter', nargs='?', action='store', metavar='FILTER', default=None,\n        type=str, help=(\n            'A filter that can be used to filter the dataset before it '\n            'is written into storage. More information about the filters '\n            'and how to use them can be found here: {0:s}').format(\n                cls._DOCUMENTATION_URL))", "docstring": "Adds command line arguments to an argument group.\n\nThis function takes an argument parser or an argument group object and adds\nto it all the command line arguments this helper supports.\n\nArgs:\nargument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\nargparse group.", "source": "juraj-google-style"}
{"code": "def transition_scope(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> Dict[(str, TensorFluent)]:\n    scope = {}\n    scope.update(self.non_fluents_scope())\n    scope.update(self.state_scope(state))\n    scope.update(self.action_scope(action))\n    return scope", "docstring": "Returns the complete transition fluent scope\nfor the current `state` and `action` fluents.\n\nArgs:\nstate (Sequence[tf.Tensor]): The current state fluents.\naction (Sequence[tf.Tensor]): The action fluents.\n\nReturns:\nA mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.", "source": "codesearchnet"}
{"code": "def fallback_move(fobj, dest, src, count, BUFFER_SIZE=2 ** 16):\n    \n\n    if dest < 0 or src < 0 or count < 0:\n        raise ValueError\n\n    fobj.seek(0, 2)\n    filesize = fobj.tell()\n\n    if max(dest, src) + count > filesize:\n        raise ValueError(\"area outside of file\")\n\n    if src > dest:\n        moved = 0\n        while count - moved:\n            this_move = min(BUFFER_SIZE, count - moved)\n            fobj.seek(src + moved)\n            buf = fobj.read(this_move)\n            fobj.seek(dest + moved)\n            fobj.write(buf)\n            moved += this_move\n        fobj.flush()\n    else:\n        while count:\n            this_move = min(BUFFER_SIZE, count)\n            fobj.seek(src + count - this_move)\n            buf = fobj.read(this_move)\n            fobj.seek(count + dest - this_move)\n            fobj.write(buf)\n            count -= this_move\n        fobj.flush()", "docstring": "Moves data around using read()/write().\n\nArgs:\nfileobj (fileobj)\ndest (int): The destination offset\nsrc (int): The source offset\ncount (int) The amount of data to move\nRaises:\nIOError: In case an operation on the fileobj fails\nValueError: In case invalid parameters were given", "source": "juraj-google-style"}
{"code": "def _gen_indicator_method(self, name, custom_class, value_count):\n        \n        method_name = name.replace(' ', '_').lower()\n        tcex = self.tcex\n\n        \n        def method_1(owner, value1, **kwargs):  \n            \n            return custom_class(tcex, value1, owner=owner, **kwargs)\n\n        def method_2(owner, value1, value2, **kwargs):  \n            \n            return custom_class(tcex, value1, value2, owner=owner, **kwargs)\n\n        def method_3(owner, value1, value2, value3, **kwargs):  \n            \n            return custom_class(tcex, value1, value2, value3, owner=owner, **kwargs)\n\n        method = locals()['method_{}'.format(value_count)]\n        setattr(self, method_name, method)", "docstring": "Dynamically generate custom Indicator methods.\n\nArgs:\nname (str): The name of the method.\ncustom_class (object): The class to add.\nvalue_count (int): The number of value parameters to support.", "source": "juraj-google-style"}
{"code": "def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs):\n    \n    for entry in top_level:\n      datetime_value = entry.get('date', None)\n      package_identifiers = entry.get('packageIdentifiers', [])\n\n      if not datetime_value or not package_identifiers:\n        continue\n\n      display_name = entry.get('displayName', '<UNKNOWN>')\n      display_version = entry.get('displayVersion', '<DISPLAY_VERSION>')\n      process_name = entry.get('processName', '<PROCESS_NAME>')\n      package_identifiers = ', '.join(package_identifiers)\n\n      event_data = plist_event.PlistTimeEventData()\n      event_data.desc = (\n          'Installation of [{0:s} {1:s}] using [{2:s}]. Packages: '\n          '{3:s}.').format(\n              display_name, display_version, process_name, package_identifiers)\n      event_data.key = ''\n      event_data.root = '/item'\n\n      event = time_events.PythonDatetimeEvent(\n          datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts relevant install history entries.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ntop_level (dict[str, object]): plist top-level key.", "source": "juraj-google-style"}
{"code": "def binarize_sets(df, columns, cast=False, drop=True, min_freq=None):\n    \n    for column in columns:\n        d = df[column].dropna()  \n        if cast:\n            d = d.apply(set)\n\n        values = columns[column] if isinstance(columns, dict) else util.union(d)\n        for value in values:\n            name = values[value] if type(values) is dict else str(value)\n            column_name = column + '_' + name.replace(' ', '_')\n            series = d.apply(lambda c: value in c)\n            series.fillna(0, inplace=True)\n            if not min_freq or series.sum() >= min_freq:\n                df[column_name] = series\n\n    if drop:\n        \n        df.drop(list(columns), axis=1, inplace=True)\n\n    return df", "docstring": "Create dummies for the elements of a set-valued column. Operates in place.\nArgs:\ndf: data frame\ncolumns: either a dictionary of column: values pairs or a collection of columns.\ncast: whether or not to cast values to set\ndrop: whether or not to drop the binarized columns\nTODO: make interface same as binarize(). merge the two?", "source": "juraj-google-style"}
{"code": "def _get_compile_args(self, user_metrics=True):\n    self._assert_compile_was_called()\n    saved_metrics = self.compiled_metrics._user_metrics\n    saved_weighted_metrics = self.compiled_metrics._user_weighted_metrics\n    if not user_metrics:\n        if saved_metrics is not None:\n            saved_metrics = self.compiled_metrics._metrics\n        if saved_weighted_metrics is not None:\n            saved_weighted_metrics = self.compiled_metrics._weighted_metrics\n    compile_args = {'optimizer': self.optimizer, 'loss': self.compiled_loss._user_losses, 'metrics': saved_metrics, 'weighted_metrics': saved_weighted_metrics, 'loss_weights': self.compiled_loss._user_loss_weights}\n    return compile_args", "docstring": "Used for saving or cloning a Model.\n\nArgs:\nuser_metrics: Whether to return user-supplied metrics or `Metric` objects.\nDefaults to returning the user-supplied metrics.\n\nReturns:\nDictionary of arguments that were used when compiling the model.", "source": "github-repos"}
{"code": "def register_command(self, name: str, f: Callable):\n    self._commands.append((name, f))", "docstring": "Registers an existing callable object as a command callback\n\nThis method can be used instead of the ``@command`` decorator. Both\ndo the same thing, but this method is useful for registering callbacks\nfor methods defined before or outside the scope of your bot object,\nallowing you to define methods in another file or wherever, import them,\nand register them.\n\nSee the documentation for the ``@command`` decorator for more information\non what you method will receive.\n\nExample:\n\ndef process_hello(data):\n# do stuff\n\n# later, somewhere else, etc.\n\npycord.register_command('hello', process_hello)\n\nArgs:\nname: the command to trigger the callback (see ``@command`` documentation)\nf: callable that will be triggered on command processing", "source": "codesearchnet"}
{"code": "def variant_case(store, case_obj, variant_obj):\n    case_obj['bam_files'] = []\n    case_obj['mt_bams'] = []\n    case_obj['bai_files'] = []\n    case_obj['mt_bais'] = []\n    case_obj['sample_names'] = []\n    for individual in case_obj['individuals']:\n        bam_path = individual.get('bam_file')\n        mt_bam = individual.get('mt_bam')\n        case_obj['sample_names'].append(individual.get('display_name'))\n        if (bam_path and os.path.exists(bam_path)):\n            case_obj['bam_files'].append(individual['bam_file'])\n            case_obj['bai_files'].append(find_bai_file(individual['bam_file']))\n        if (mt_bam and os.path.exists(mt_bam)):\n            case_obj['mt_bams'].append(individual['mt_bam'])\n            case_obj['mt_bais'].append(find_bai_file(individual['mt_bam']))\n        else:\n            LOG.debug('%s: no bam file found', individual['individual_id'])\n    try:\n        genes = variant_obj.get('genes', [])\n        if (len(genes) == 1):\n            hgnc_gene_obj = store.hgnc_gene(variant_obj['genes'][0]['hgnc_id'])\n            if hgnc_gene_obj:\n                vcf_path = store.get_region_vcf(case_obj, gene_obj=hgnc_gene_obj)\n                case_obj['region_vcf_file'] = vcf_path\n            else:\n                case_obj['region_vcf_file'] = None\n        elif (len(genes) > 1):\n            chrom = variant_obj['genes'][0]['common']['chromosome']\n            start = min((gene['common']['start'] for gene in variant_obj['genes']))\n            end = max((gene['common']['end'] for gene in variant_obj['genes']))\n            vcf_path = store.get_region_vcf(case_obj, chrom=chrom, start=start, end=end)\n            case_obj['region_vcf_file'] = vcf_path\n    except (SyntaxError, Exception):\n        LOG.warning('skip VCF region for alignment view')", "docstring": "Pre-process case for the variant view.\n\nAdds information about files from case obj to variant\n\nArgs:\nstore(scout.adapter.MongoAdapter)\ncase_obj(scout.models.Case)\nvariant_obj(scout.models.Variant)", "source": "codesearchnet"}
{"code": "def get_symmetrically_distinct_miller_indices(structure, max_index):\n    \n\n    r = list(range(-max_index, max_index + 1))\n    r.reverse()\n\n    \n    conv_hkl_list = [miller for miller in itertools.product(r, r, r) if any([i != 0 for i in miller])]\n\n    sg = SpacegroupAnalyzer(structure)\n    \n    if sg.get_crystal_system() == \"trigonal\":\n        transf = sg.get_conventional_to_primitive_transformation_matrix()\n        miller_list = [hkl_transformation(transf, hkl) for hkl in conv_hkl_list]\n        prim_structure = SpacegroupAnalyzer(structure).get_primitive_standard_structure()\n        symm_ops = get_recp_symmetry_operation(prim_structure)\n    else:\n        miller_list = conv_hkl_list\n        symm_ops = get_recp_symmetry_operation(structure)\n\n    unique_millers, unique_millers_conv = [], []\n\n    def is_already_analyzed(miller_index):\n        for op in symm_ops:\n            if in_coord_list(unique_millers, op.operate(miller_index)):\n                return True\n        return False\n\n    for i, miller in enumerate(miller_list):\n        d = abs(reduce(gcd, miller))\n        miller = tuple([int(i / d) for i in miller])\n        if not is_already_analyzed(miller):\n            if sg.get_crystal_system() == \"trigonal\":\n                \n                \n                \n                unique_millers.append(miller)\n                d = abs(reduce(gcd, conv_hkl_list[i]))\n                cmiller = tuple([int(i / d) for i in conv_hkl_list[i]])\n                unique_millers_conv.append(cmiller)\n            else:\n                unique_millers.append(miller)\n                unique_millers_conv.append(miller)\n\n    return unique_millers_conv", "docstring": "Returns all symmetrically distinct indices below a certain max-index for\na given structure. Analysis is based on the symmetry of the reciprocal\nlattice of the structure.\nArgs:\nstructure (Structure): input structure.\nmax_index (int): The maximum index. For example, a max_index of 1\nmeans that (100), (110), and (111) are returned for the cubic\nstructure. All other indices are equivalent to one of these.", "source": "juraj-google-style"}
{"code": "def get_bool(self, name, default=None):\n    if (name not in self):\n        if (default is not None):\n            return default\n        raise EnvironmentError.not_found(self._prefix, name)\n    return bool(self.get_int(name))", "docstring": "Retrieves an environment variable value as ``bool``.\n\nInteger values are converted as expected: zero evaluates to\n``False``, and non-zero to ``True``. String values of ``'true'``\nand ``'false'`` are evaluated case insensitive.\n\nArgs:\nname (str): The case-insensitive, unprefixed variable name.\ndefault: If provided, a default value will be returned\ninstead of throwing ``EnvironmentError``.\n\nReturns:\nbool: The environment variable's value as a ``bool``.\n\nRaises:\nEnvironmentError: If the environment variable does not\nexist, and ``default`` was not provided.\nValueError: If the environment variable value could not be\ninterpreted as a ``bool``.", "source": "codesearchnet"}
{"code": "def build_batch(cls, size, **kwargs):\n    return [cls.build(**kwargs) for _ in range(size)]", "docstring": "Build a batch of instances of the given class, with overriden attrs.\n\nArgs:\nsize (int): the number of instances to build\n\nReturns:\nobject list: the built instances", "source": "codesearchnet"}
{"code": "def _scan(\n        self,\n        fs,  \n        dir_path,  \n        namespaces=None,  \n    ):\n        \n        \n        try:\n            for info in fs.scandir(dir_path, namespaces=namespaces):\n                yield info\n        except FSError as error:\n            if not self.on_error(dir_path, error):\n                six.reraise(type(error), error)", "docstring": "Get an iterator of `Info` objects for a directory path.\n\nArguments:\nfs (FS): A filesystem instance.\ndir_path (str): A path to a directory on the filesystem.\nnamespaces (list): A list of additional namespaces to\ninclude in the `Info` objects.\n\nReturns:\n~collections.Iterator: iterator of `Info` objects for\nresources within the given path.", "source": "juraj-google-style"}
{"code": "def make_parser():\n\n    def add_kythe_field(parser, field):\n        parser.add_argument('--' + field, dest=field, type=str, action='store', default='', help=\"Part of kythe's file-level vname proto.\")\n    parser = argparse.ArgumentParser(usage='%(prog)s [options] input')\n    add_kythe_field(parser, 'kythe_corpus')\n    add_kythe_field(parser, 'kythe_root')\n    add_kythe_field(parser, 'kythe_path')\n    parser.add_argument('--show-types', action='store_true', dest='show_types', default=None, help='Display inferred types.')\n    parser.add_argument('--show-kythe', action='store_true', dest='show_kythe', default=None, help='Display kythe facts.')\n    parser.add_argument('--show-spans', action='store_true', dest='show_spans', default=None, help='Display kythe spans.')\n    parser.add_argument('--skip-stdlib', action='store_true', dest='skip_stdlib', default=None, help='Display inferred types.')\n    wrapper = datatypes.ParserWrapper(parser)\n    pytype_config.add_basic_options(wrapper)\n    with wrapper.add_only(['--imports_info', '--debug']):\n        pytype_config.add_infrastructure_options(wrapper)\n        pytype_config.add_debug_options(wrapper)\n    wrapper.add_argument('input', metavar='input', nargs=1, help='A .py file to index')\n    return XrefParser(parser, pytype_single_args=wrapper.actions)", "docstring": "Make parser for command line args.\n\nReturns:\nA Parser object.", "source": "github-repos"}
{"code": "def kill(self, signal=None):\n        \n\n        return self.client.api.kill(self.id, signal=signal)", "docstring": "Kill or send a signal to the container.\n\nArgs:\nsignal (str or int): The signal to send. Defaults to ``SIGKILL``\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def get_assigned_value(self, name):\n    message_type = type(self)\n    try:\n        field = message_type.field_by_name(name)\n    except KeyError:\n        raise AttributeError(('Message %s has no field %s' % (message_type.__name__, name)))\n    return self.__tags.get(field.number)", "docstring": "Get the assigned value of an attribute.\n\nGet the underlying value of an attribute. If value has not\nbeen set, will not return the default for the field.\n\nArgs:\nname: Name of attribute to get.\n\nReturns:\nValue of attribute, None if it has not been set.", "source": "codesearchnet"}
{"code": "def make_config_get(conf_path):\n    project_root = _get_project_root_from_conf_path(conf_path)\n    config = load_config_in_dir(project_root)\n    return partial(config_get, config)", "docstring": "Return a function to get configuration options for a specific project\n\nArgs:\nconf_path (path-like): path to project's conf file (i.e. foo.conf\nmodule)", "source": "codesearchnet"}
{"code": "async def add(self, useriden, query: str, reqs, incunit=None, incvals=None):\n    iden = s_common.guid()\n    recur = (incunit is not None)\n    indx = self._next_indx\n    self._next_indx += 1\n    if (reqs is None):\n        reqs = {}\n    if (not query):\n        raise ValueError('empty query')\n    if ((not reqs) and (incunit is None)):\n        raise ValueError('at least one of reqs and incunit must be non-empty')\n    if ((incunit is not None) and (incvals is None)):\n        raise ValueError('incvals must be non-None if incunit is non-None')\n    if isinstance(reqs, Mapping):\n        reqs = [reqs]\n    recs = []\n    for req in reqs:\n        reqdicts = self._dictproduct(req)\n        if (not isinstance(incvals, Iterable)):\n            incvals = (incvals,)\n        recs.extend((ApptRec(rd, incunit, v) for (rd, v) in itertools.product(reqdicts, incvals)))\n    appt = _Appt(iden, recur, indx, query, useriden, recs)\n    self._addappt(iden, appt)\n    (await self._storeAppt(appt))\n    return iden", "docstring": "Persistently adds an appointment\n\nArgs:\nquery (str):\nstorm query to run\nreqs (Union[None, Dict[TimeUnit, Union[int, Tuple[int]], List[...]):\none or more dicts of the fixed aspects of the appointment.  dict value may be a single or multiple.\nMay be an empty dict or None.\nincunit (Union[None, TimeUnit]):\nthe unit that changes for recurring, or None for non-recurring.  It is an error for this value to match\na key in reqdict.\nincvals (Union[None, int, Iterable[int]): count of units of incunit or explicit day of week or day of month.\nNot allowed for incunit == None, required for others (1 would be a typical\nvalue)\n\nNotes:\nFor values in reqs that are lists and incvals if a list, all combinations of all values (the product) are\nused\n\nReturns:\niden of new appointment", "source": "codesearchnet"}
{"code": "def xor_bytes(a, b):\n    assert isinstance(a, bytes)\n    assert isinstance(b, bytes)\n    assert (len(a) == len(b))\n    res = bytearray()\n    for i in range(len(a)):\n        res.append((a[i] ^ b[i]))\n    return bytes(res)", "docstring": "XOR on two bytes objects\n\nArgs:\na (bytes): object 1\nb (bytes): object 2\n\nReturns:\nbytes: The XOR result", "source": "codesearchnet"}
{"code": "def get_asset_path(self, filename):\n    if os.path.exists(os.path.join(self._asset_path, filename)):\n        return os.path.join(self._asset_path, filename)\n    else:\n        raise AssetNotFoundError(u('Cannot find asset: {0}').format(filename))", "docstring": "Get the full system path of a given asset if it exists.  Otherwise it throws\nan error.\n\nArgs:\nfilename (str) - File name of a file in /assets folder to fetch the path for.\n\nReturns:\nstr - path to the target file.\n\nRaises:\nAssetNotFoundError - if asset does not exist in the asset folder.\n\nUsage::\npath = WTF_ASSET_MANAGER.get_asset_path(\"my_asset.png\")\n# path = /your/workspace/location/WTFProjectName/assets/my_asset.png", "source": "codesearchnet"}
{"code": "def distribute_tensor(tensor, layout):\n    from keras.src.distribution import TensorLayout\n    if isinstance(layout, TensorLayout):\n        layout = layout.backend_layout\n    if jax_utils.is_in_jax_tracing_scope():\n        return jax.lax.with_sharding_constraint(tensor, layout)\n    if isinstance(tensor, jax.Array):\n        if isinstance(layout, jax.sharding.Sharding) and tensor.sharding.is_equivalent_to(layout, ndim=len(tensor.shape)):\n            return tensor\n        elif isinstance(layout, jax_layout.Layout):\n            current_layout = getattr(tensor, 'layout', None)\n            if current_layout == layout:\n                return tensor\n    return jax.device_put(tensor, layout)", "docstring": "Distribute the tensor based on the layout.\n\nNote that this function can be used both in eager context, or within a\njitted function.\n\nArgs:\ntensor: `jax.Array` that need to be distributed.\nlayout: `TensorLayout` for the created variable, or a\nJAX-supported layout instance\n(e.g. `jax.experimental.layout.Layout`, `jax.sharding.Sharding`).\n\nReturns:\nDistributed value.", "source": "github-repos"}
{"code": "def setTime(self, yy, mm, dd, hh, minutes, ss, password=\"00000000\"):\n        \n        result = False\n        self.setContext(\"setTime\")\n        try:\n            if mm < 1 or mm > 12:\n                self.writeCmdMsg(\"Month must be between 1 and 12\")\n                self.setContext(\"\")\n                return result\n\n            if dd < 1 or dd > 31:\n                self.writeCmdMsg(\"Day must be between 1 and 31\")\n                self.setContext(\"\")\n                return result\n\n            if hh < 0 or hh > 23:\n                self.writeCmdMsg(\"Hour must be between 0 and 23, inclusive\")\n                self.setContext(\"\")\n                return result\n\n            if minutes < 0 or minutes > 59:\n                self.writeCmdMsg(\"Minutes must be between 0 and 59, inclusive\")\n                self.setContext(\"\")\n                return result\n\n            if ss < 0 or ss > 59:\n                self.writeCmdMsg(\"Seconds must be between 0 and 59, inclusive\")\n                self.setContext(\"\")\n                return result\n\n            if len(password) != 8:\n                self.writeCmdMsg(\"Invalid password length.\")\n                self.setContext(\"\")\n                return result\n\n            if not self.request(False):\n                self.writeCmdMsg(\"Bad read CRC on setting\")\n            else:\n                if not self.serialCmdPwdAuth(password):\n                    self.writeCmdMsg(\"Password failure\")\n                else:\n                    dt_buf = datetime.datetime(int(yy), int(mm), int(dd), int(hh), int(minutes), int(ss))\n                    ekm_log(\"Writing Date and Time \" + dt_buf.strftime(\"%Y-%m-%d %H:%M\"))\n                    dayofweek = dt_buf.date().isoweekday()\n                    ekm_log(\"Calculated weekday \" + str(dayofweek))\n\n                    req_str = \"015731023030363028\"\n                    req_str += binascii.hexlify(str(yy)[-2:])\n                    req_str += binascii.hexlify(str(mm).zfill(2))\n                    req_str += binascii.hexlify(str(dd).zfill(2))\n                    req_str += binascii.hexlify(str(dayofweek).zfill(2))\n                    req_str += binascii.hexlify(str(hh).zfill(2))\n                    req_str += binascii.hexlify(str(minutes).zfill(2))\n                    req_str += binascii.hexlify(str(ss).zfill(2))\n                    req_str += \"2903\"\n                    req_str += self.calc_crc16(req_str[2:].decode(\"hex\"))\n                    self.m_serial_port.write(req_str.decode(\"hex\"))\n                    if self.m_serial_port.getResponse(self.getContext()).encode(\"hex\") == \"06\":\n                        self.writeCmdMsg(\"Success(setTime): 06 returned.\")\n                        result = True\n            self.serialPostEnd()\n        except:\n            ekm_log(traceback.format_exc(sys.exc_info()))\n\n        self.setContext(\"\")\n        return result", "docstring": "Serial set time with day of week calculation.\n\nArgs:\nyy (int): Last two digits of year.\nmm (int): Month 1-12.\ndd (int): Day 1-31\nhh (int): Hour 0 to 23.\nminutes (int): Minutes 0 to 59.\nss (int): Seconds 0 to 59.\npassword (str): Optional password.\n\nReturns:\nbool: True on completion and ACK.", "source": "juraj-google-style"}
{"code": "def dec(self, byts):\n    envl = s_msgpack.un(byts)\n    iv = envl.get('iv', b'')\n    asscd = envl.get('asscd', b'')\n    data = envl.get('data', b'')\n    decryptor = AESGCM(self.ekey)\n    try:\n        data = decryptor.decrypt(iv, data, asscd)\n    except Exception:\n        logger.exception('Error decrypting data')\n        return None\n    return data", "docstring": "Decode an envelope dict and decrypt the given bytes.\n\nArgs:\nbyts (bytes): Bytes to decrypt.\n\nReturns:\nbytes: Decrypted message.", "source": "codesearchnet"}
{"code": "def Process(self, parser_mediator, **kwargs):\n    if kwargs:\n        raise ValueError('Unused keyword arguments: {0:s}.'.format(', '.join(kwargs.keys())))", "docstring": "Evaluates if this is the correct plugin and processes data accordingly.\n\nThe purpose of the process function is to evaluate if this particular\nplugin is the correct one for the particular data structure at hand.\nThis function accepts one value to use for evaluation, that could be\na registry key, list of table names for a database or any other criteria\nthat can be used to evaluate if the plugin should be run or not.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between\nparsers and other components, such as storage and dfvfs.\nkwargs (dict[str, object]): Depending on the plugin they may require\ndifferent sets of arguments to be able to evaluate whether or not\nthis is the correct plugin.\n\nRaises:\nValueError: when there are unused keyword arguments.", "source": "codesearchnet"}
{"code": "def build_masked_loss(loss_function, mask_value):\n\n    def masked_loss_function(y_true, y_pred):\n        mask = K.cast(K.not_equal(y_true, mask_value), K.floatx())\n        return loss_function((y_true * mask), (y_pred * mask))\n    return masked_loss_function", "docstring": "Builds a loss function that masks based on targets\n\nArgs:\nloss_function: The loss function to mask\nmask_value: The value to mask in the targets\n\nReturns:\nfunction: a loss function that acts like loss_function with masked inputs", "source": "codesearchnet"}
{"code": "def div(numerator, denominator):\n    \n    try:\n        return numerator/denominator\n    except ZeroDivisionError:\n        if numerator == 0:\n            return 0.\n        elif denominator == 0:\n            return float('inf')\n\n        else:\n            return numerator/denominator", "docstring": "Returns numerator / denominator, but instead of a ZeroDivisionError:\n0 / 0 = 0.\nx / 0 = float('inf')\nThis is not mathematically correct, but often practically OK.\n\nArgs:\nnumerator (float or int)\ndenominator (float or int)\nReturns:\n(float)\nRaises:\n-", "source": "juraj-google-style"}
{"code": "def renumerate_stages(pipeline):\n    stages = pipeline['stages']\n    main_index = 0\n    branch_index = 0\n    previous_refid = ''\n    for stage in stages:\n        current_refid = stage['refId'].lower()\n        if (current_refid == 'master'):\n            if (main_index == 0):\n                stage['requisiteStageRefIds'] = []\n            else:\n                stage['requisiteStageRefIds'] = [str(main_index)]\n            main_index += 1\n            stage['refId'] = str(main_index)\n        elif (current_refid == 'branch'):\n            if (previous_refid == 'branch'):\n                branch_index += 1\n            else:\n                branch_index = 0\n            stage['refId'] = str(((main_index * 100) + branch_index))\n            stage['requisiteStageRefIds'] = [str(main_index)]\n        elif (current_refid == 'merge'):\n            pass\n        previous_refid = current_refid\n        LOG.debug('step=%(name)s\\trefId=%(refId)s\\trequisiteStageRefIds=%(requisiteStageRefIds)s', stage)\n    return pipeline", "docstring": "Renumber Pipeline Stage reference IDs to account for dependencies.\n\nstage order is defined in the templates. The ``refId`` field dictates\nif a stage should be mainline or parallel to other stages.\n\n* ``master`` - A mainline required stage. Other stages depend on it\n* ``branch`` - A stage that should be ran in parallel to master stages.\n* ``merge`` - A stage thatis parallel but other stages still depend on it.\n\nArgs:\npipeline (dict): Completed Pipeline ready for renumeration.\n\nReturns:\ndict: Pipeline ready to be sent to Spinnaker.", "source": "codesearchnet"}
{"code": "def create(cls, five9, data, refresh=False):\n    return cls._call_and_serialize(five9.configuration.createDisposition, data, refresh)", "docstring": "Create a record on Five9.\n\nArgs:\nfive9 (five9.Five9): The authenticated Five9 remote.\ndata (dict): A data dictionary that can be fed to ``deserialize``.\nrefresh (bool, optional): Set to ``True`` to get the record data\nfrom Five9 before returning the record.\n\nReturns:\nBaseModel: The newly created record. If ``refresh`` is ``True``,\nthis will be fetched from Five9. Otherwise, it's the data\nrecord that was sent to the server.", "source": "codesearchnet"}
{"code": "def get_jwt_claims(self, auth_token):\n\n    def _decode_and_verify():\n        jwt_claims = jwt.JWT().unpack(auth_token).payload()\n        _verify_required_claims_exist(jwt_claims)\n        issuer = jwt_claims[u'iss']\n        keys = self._jwks_supplier.supply(issuer)\n        try:\n            return jws.JWS().verify_compact(auth_token, keys)\n        except (jwkest.BadSignature, jws.NoSuitableSigningKeys, jws.SignerAlgError) as exception:\n            raise suppliers.UnauthenticatedException(u'Signature verification failed', exception)\n    return self._cache.get_or_create(auth_token, _decode_and_verify)", "docstring": "Decodes the auth_token into JWT claims represented as a JSON object.\n\nThis method first tries to look up the cache and returns the result\nimmediately in case of a cache hit. When cache misses, the method tries to\ndecode the given auth token, verify its signature, and check the existence\nof required JWT claims. When successful, the decoded JWT claims are loaded\ninto the cache and then returned.\n\nArgs:\nauth_token: the auth token to be decoded.\n\nReturns:\nThe decoded JWT claims.\n\nRaises:\nUnauthenticatedException: When the signature verification fails, or when\nrequired claims are missing.", "source": "codesearchnet"}
{"code": "def send(self, **req_kwargs):\n        \n        i = 0\n        while True:\n            response = self._send(**req_kwargs).json()\n            if 'error' not in response:\n                break\n\n            error = response['error']\n            if error['code'] != 401:\n                raise exception.APIException(error['code'], error)\n\n            if i >= self.RETRY_CNT:\n                raise exception.APIException(error['code'], error)\n\n            logger.info('Refreshing access token')\n            self._auth.refresh()\n            i += 1\n\n        return response", "docstring": "Send an authenticated request to a Google API.\nAutomatically retries if the access token has expired.\n\nArgs:\n**req_kwargs: Arbitrary keyword arguments to pass to Requests.\n\nReturn:\ndict: The parsed JSON response.\n\nRaises:\nAPIException: If the server returns an error.\nLoginException: If :py:meth:`login` has not been called.", "source": "juraj-google-style"}
{"code": "def netflix(es, ps, e0, l=0.0001):\n    m = len(es)\n    n = len(ps[0])\n    X = np.stack(ps).T\n    pTy = (0.5 * (((n * (e0 ** 2)) + (X ** 2).sum(axis=0)) - (n * (np.array(es) ** 2))))\n    w = np.linalg.pinv((X.T.dot(X) + ((l * n) * np.eye(m)))).dot(pTy)\n    return (X.dot(w), w)", "docstring": "Combine predictions with the optimal weights to minimize RMSE.\n\nArgs:\nes (list of float): RMSEs of predictions\nps (list of np.array): predictions\ne0 (float): RMSE of all zero prediction\nl (float): lambda as in the ridge regression\n\nReturns:\nEnsemble prediction (np.array) and weights (np.array) for input predictions", "source": "codesearchnet"}
{"code": "def install_table(self, connection, table, logger = None):\n        \n        \n\n        queries = []\n        query_tmpl = 'SELECT * FROM {}'\n        for partition in table.partitions:\n            partition.localize()\n            installed_name = self.install(connection, partition)\n            queries.append(query_tmpl.format(installed_name))\n\n        \n        query = 'CREATE VIEW {} AS {} '.format( table.vid, '\\nUNION ALL\\n'.join(queries))\n        logger.debug('Creating view for table.\\n    table: {}\\n    query: {}'.format(table.vid, query))\n        self._execute(connection, query, fetch=False)", "docstring": "Installs all partitons of the table and create view with union of all partitons.\n\nArgs:\nconnection: connection to database who stores mpr data.\ntable (orm.Table):", "source": "juraj-google-style"}
{"code": "def ast_to_html(self, ast, link_resolver):\n        \n        out, _ = cmark.ast_to_html(ast, link_resolver)\n        return out", "docstring": "See the documentation of `to_ast` for\nmore information.\n\nArgs:\nast: PyCapsule, a capsule as returned by `to_ast`\nlink_resolver: hotdoc.core.links.LinkResolver, a link\nresolver instance.", "source": "juraj-google-style"}
{"code": "def getall(self, key, default=[]):\n    return (self.data[key] if (key in self.data) else default)", "docstring": "Return the list of all values for the specified key.\n\nArguments:\nkey (object): Key\ndefault (list): Default value to return if the key does not\nexist, defaults to ``[]``, i.e. an empty list.\n\nReturns:\nlist: List of all values for the specified key if the key\nexists, ``default`` otherwise.", "source": "codesearchnet"}
{"code": "def __init__(self, replay_dir, data_dir, tmp_dir, cwd=None, env=None):\n    \n    self.replay_dir = replay_dir\n    self.data_dir = data_dir\n    self.tmp_dir = tmp_dir\n    self.cwd = cwd\n    self.env = env", "docstring": "Initialize the runconfig with the various directories needed.\n\nArgs:\nreplay_dir: Where to find replays. Might not be accessible to SC2.\ndata_dir: Where SC2 should find the data and battle.net cache.\ntmp_dir: The temporary directory. None is system default.\ncwd: Where to set the current working directory.\nenv: What to pass as the environment variables.", "source": "juraj-google-style"}
{"code": "def AddTripDecoration(self, triplist, color='\n    tmpstr = self._DrawTrips(triplist, color)\n    self._decorators.append(tmpstr)", "docstring": "Flushes existing decorations and highlights the given trips.\n\nArgs:\n# Class Trip is defined in transitfeed.py\ntriplist: [Trip, Trip, ...]\n# An optional string with a html color code\ncolor: \"#fff\"", "source": "codesearchnet"}
{"code": "def _build_query_components(query: str, found: Dict[str, beam.PCollection], output_name: str, run: bool=True) -> Tuple[str, Union[Dict[str, beam.PCollection], beam.PCollection, beam.Pipeline], SqlChain]:\n    if found:\n        user_pipeline = ie.current_env().user_pipeline(next(iter(found.values())).pipeline)\n        sql_pipeline = beam.Pipeline(options=user_pipeline._options)\n        ie.current_env().add_derived_pipeline(user_pipeline, sql_pipeline)\n        sql_source = {}\n        if run:\n            if has_source_to_cache(user_pipeline):\n                sql_source = pcolls_from_streaming_cache(user_pipeline, sql_pipeline, found)\n            else:\n                cache_manager = ie.current_env().get_cache_manager(user_pipeline, create_if_absent=True)\n                for pcoll_name, pcoll in found.items():\n                    cache_key = CacheKey.from_pcoll(pcoll_name, pcoll).to_str()\n                    sql_source[pcoll_name] = unreify_from_cache(pipeline=sql_pipeline, cache_key=cache_key, cache_manager=cache_manager, element_type=pcoll.element_type)\n        else:\n            sql_source = found\n        if len(sql_source) == 1:\n            query = replace_single_pcoll_token(query, next(iter(sql_source.keys())))\n            sql_source = next(iter(sql_source.values()))\n        node = SqlNode(output_name=output_name, source=set(found.keys()), query=query)\n        chain = ie.current_env().get_sql_chain(user_pipeline, set_user_pipeline=True).append(node)\n    else:\n        sql_source = beam.Pipeline()\n        ie.current_env().add_user_pipeline(sql_source)\n        node = SqlNode(output_name=output_name, source=sql_source, query=query)\n        chain = ie.current_env().get_sql_chain(sql_source).append(node)\n    return (query, sql_source, chain)", "docstring": "Builds necessary components needed to apply the SqlTransform.\n\nArgs:\nquery: The SQL query to be executed by the magic.\nfound: The PCollections with variable names found to be used by the query.\noutput_name: The output variable name in __main__ module.\nrun: Whether to prepare components for a local run or not.\n\nReturns:\nThe processed query to be executed by the magic; a source to apply the\nSqlTransform to: a dictionary of tagged PCollections, or a single\nPCollection, or the pipeline to execute the query; the chain of applied\nbeam_sql magics this one belongs to.", "source": "github-repos"}
{"code": "def validate_options(options):\n    if (not options):\n        return\n    for (k, v) in options.iteritems():\n        if (not isinstance(k, str)):\n            raise TypeError(('option %r should be a str.' % k))\n        if (not any((k.lower().startswith(valid) for valid in _GCS_OPTIONS))):\n            raise ValueError(('option %s is not supported.' % k))\n        if (not isinstance(v, basestring)):\n            raise TypeError(('value %r for option %s should be of type basestring.' % (v, k)))", "docstring": "Validate Google Cloud Storage options.\n\nArgs:\noptions: a str->basestring dict of options to pass to Google Cloud Storage.\n\nRaises:\nValueError: if option is not supported.\nTypeError: if option is not of type str or value of an option\nis not of type basestring.", "source": "codesearchnet"}
{"code": "def events(self, institute, case=None, variant_id=None, level=None,\n               comments=False, panel=None):\n        \n\n        query = {}\n\n        if variant_id:\n            if comments:\n                \n                LOG.debug(\"Fetching all comments for institute {0} case {1} variant {2}\".format(\n                          institute['_id'], case['_id'], variant_id))\n                query = {\n                    '$or': [\n                        {\n                            'category' : 'variant',\n                            'variant_id' : variant_id,\n                            'verb' : 'comment',\n                            'level' : 'global'\n                        },\n                        {\n                            'category' : 'variant',\n                            'variant_id' : variant_id,\n                            'institute' : institute['_id'],\n                            'case' : case['_id'],\n                            'verb' : 'comment',\n                            'level' : 'specific'\n                        }\n                    ]\n                }\n            else: \n                query['institute'] = institute['_id']\n                query['category'] = 'variant'\n                query['variant_id'] = variant_id\n                query['case'] = case['_id']\n        else:\n            query['institute'] = institute['_id']\n            if panel:\n                query['panel'] = panel\n            \n            else:\n                query['category'] = 'case'\n\n                if case:\n                    query['case'] = case['_id']\n\n                if comments:\n                    query['verb'] = 'comment'\n\n\n        return self.event_collection.find(query).sort('created_at', pymongo.DESCENDING)", "docstring": "Fetch events from the database.\n\nArgs:\ninstitute (dict): A institute\ncase (dict): A case\nvariant_id (str, optional): global variant id\nlevel (str, optional): restrict comments to 'specific' or 'global'\ncomments (bool, optional): restrict events to include only comments\npanel (str): A panel name\n\nReturns:\npymongo.Cursor: Query result", "source": "juraj-google-style"}
{"code": "def parseConfig(cls, value):\n    if ('enabled' in value):\n        value['enabled'] = bool(value['enabled'])\n    if ('exclude_paths' in value):\n        value['exclude_paths'] = [n.strip() for n in ast.literal_eval(value['exclude_paths'])]\n    return value", "docstring": "Parse the config values\n\nArgs:\nvalue (dict): Dictionary which contains the checker config\n\nReturns:\ndict: The checker config with parsed values", "source": "codesearchnet"}
{"code": "def build_polyline_dict(self, path, stroke_color='\n    if (not isinstance(path, list)):\n        raise AttributeError('To build a map path a list of dictionaries of latitude and logitudes is required')\n    polyline = {'path': path, 'stroke_color': stroke_color, 'stroke_opacity': stroke_opacity, 'stroke_weight': stroke_weight}\n    return polyline", "docstring": "Set a dictionary with the javascript class Polyline parameters\n\nThis function sets a default drawing configuration if the user just\npass the polyline path, but also allows to set each parameter\nindividually if the user wish so.\n\nArgs:\npath (list): A list of latitude and longitude point for the\npolyline stroke_color (str): Sets the color of the rectangle\nborder using hexadecimal color notation\nstroke_opacity (float): Sets the opacity of the rectangle border\nin percentage. If stroke_opacity = 0, the border is transparent\nstroke_weight (int): Sets the stroke girth in pixels.", "source": "codesearchnet"}
{"code": "def layer_norm(x, dim, epsilon=1e-06, name='layer_prepostprocess'):\n    with tf.variable_scope((name + '/layer_norm')):\n        scale = mtf.get_variable(x.mesh, 'layer_norm_scale', mtf.Shape([dim]), initializer=tf.ones_initializer(), activation_dtype=x.dtype)\n        bias = mtf.get_variable(x.mesh, 'layer_norm_bias', mtf.Shape([dim]), initializer=tf.zeros_initializer(), activation_dtype=x.dtype)\n        reduced_shape = (x.shape - dim)\n        mean = mtf.reduce_mean(x, output_shape=reduced_shape)\n        variance = mtf.reduce_mean(mtf.square((x - mean)), output_shape=reduced_shape)\n        norm_x = ((x - mean) * mtf.rsqrt((variance + epsilon)))\n        return ((norm_x * scale) + bias)", "docstring": "Layer normalization over dimension dim.\n\nArgs:\nx: a mtf.Tensor whose shape contains dim.\ndim: a mtf.Dimension\nepsilon: a floating point number\nname: a string. variable scope.\n\nReturns:\na mtf.Tensor with same shape as x.", "source": "codesearchnet"}
{"code": "def copy_cwl_files(from_dir=CWL_PATH, to_dir=None):\n    \n    cwl_files = glob.glob('{}{}*.cwl'.format(from_dir, os.sep))\n    \n    if len(cwl_files) > 0:\n        create_dirs(to_dir)\n    for fi in cwl_files:\n        fo = os.path.join(to_dir, os.path.basename(fi))\n        shutil.copy2(fi, fo)\n\n    return len(cwl_files)", "docstring": "Copy cwl files to a directory where the cwl-runner can find them.\n\nArgs:\nfrom_dir (str): Path to directory where to copy files from (default:\nthe cwl directory of nlppln).\nto_dir (str): Path to directory where the files should be copied to\n(e.g., the CWL working directory).", "source": "juraj-google-style"}
{"code": "def _generate_mark_code(rule_name):\n    code = ''.join([i for i in str(rule_name) if i.isdigit()])\n    code = code.zfill(2)\n    return code", "docstring": "Generates a two digit string based on a provided string\n\nArgs:\nrule_name (str): A configured rule name 'pytest_mark3'.\n\nReturns:\nstr: A two digit code based on the provided string '03'", "source": "codesearchnet"}
{"code": "def retrieve_file_from_url(url):\n    \n    try:\n        alias_source, _ = urlretrieve(url)\n        \n        with open(alias_source, 'r') as f:\n            content = f.read()\n            if content[:3].isdigit():\n                raise CLIError(ALIAS_FILE_URL_ERROR.format(url, content.strip()))\n    except Exception as exception:\n        if isinstance(exception, CLIError):\n            raise\n\n        \n        raise CLIError(ALIAS_FILE_URL_ERROR.format(url, exception))\n\n    return alias_source", "docstring": "Retrieve a file from an URL\n\nArgs:\nurl: The URL to retrieve the file from.\n\nReturns:\nThe absolute path of the downloaded file.", "source": "juraj-google-style"}
{"code": "def reports_progress(reporter):\n    \n\n    def decorator(func):  \n        @wraps(func)\n        def wrapper(*args, **kwargs):  \n            with progress_reporter(reporter):\n                return func(*args, **kwargs)\n\n        return wrapper\n\n    return decorator", "docstring": "A decorator factory to mark functions which report progress.\n\nArgs:\nreporter: A zero-argument callable to report progress.\nThe callable provided should have the means to both\nretrieve and display current progress information.", "source": "juraj-google-style"}
{"code": "def register_lookup_handler(lookup_type, handler_or_path):\n    \n    handler = handler_or_path\n    if isinstance(handler_or_path, basestring):\n        handler = load_object_from_string(handler_or_path)\n    LOOKUP_HANDLERS[lookup_type] = handler\n    if type(handler) != type:\n        \n        logger = logging.getLogger(__name__)\n        logger.warning(\"Registering lookup `%s`: Please upgrade to use the \"\n                       \"new style of Lookups.\" % lookup_type)\n        warnings.warn(\n            \n            \n            \"Lookup `%s`: Please upgrade to use the new style of Lookups\"\n            \".\" % lookup_type,\n            DeprecationWarning,\n            stacklevel=2,\n        )", "docstring": "Register a lookup handler.\n\nArgs:\nlookup_type (str): Name to register the handler under\nhandler_or_path (OneOf[func, str]): a function or a path to a handler", "source": "juraj-google-style"}
{"code": "def _GetRecord(self, offset, record_size):\n    \n    record_header = \"<4sLQQL\"\n    get4 = lambda x: struct.unpack(\"<L\", self.input_dat[x:x + 4])[0]\n    url_offset = struct.unpack(\"B\", self.input_dat[offset + 52:offset + 53])[0]\n    if url_offset in [0xFF, 0xFE]:\n      return None\n    data_offset = get4(offset + 68)\n    data_size = get4(offset + 72)\n    start_pos = offset + data_offset\n    data = struct.unpack(\"{0}s\".format(data_size),\n                         self.input_dat[start_pos:start_pos + data_size])[0]\n    fmt = record_header\n    unknown_size = url_offset - struct.calcsize(fmt)\n    fmt += \"{0}s\".format(unknown_size)\n    fmt += \"{0}s\".format(record_size - struct.calcsize(fmt))\n    dat = struct.unpack(fmt, self.input_dat[offset:offset + record_size])\n    header, blocks, mtime, ctime, ftime, _, url = dat\n    url = url.split(b\"\\x00\")[0].decode(\"utf-8\")\n    if mtime:\n      mtime = mtime \n    if ctime:\n      ctime = ctime \n    return {\n        \"header\": header,  \n        \"blocks\": blocks,  \n        \"urloffset\": url_offset,  \n        \"data_offset\": data_offset,  \n        \"data_size\": data_size,  \n        \"data\": data,  \n        \"mtime\": mtime,  \n        \"ctime\": ctime,  \n        \"ftime\": ftime,  \n        \"url\": url  \n    }", "docstring": "Retrieve a single record from the file.\n\nArgs:\noffset: offset from start of input_dat where header starts\nrecord_size: length of the header according to file (untrusted)\n\nReturns:\nA dict containing a single browser history record.", "source": "juraj-google-style"}
{"code": "def run_ops(state, serial=False, no_wait=False):\n    \n\n    \n    state.deploying = True\n\n    \n    if serial:\n        _run_serial_ops(state)\n\n    \n    elif no_wait:\n        _run_no_wait_ops(state)\n\n    \n    for op_hash in state.get_op_order():\n        _run_single_op(state, op_hash)", "docstring": "Runs all operations across all servers in a configurable manner.\n\nArgs:\nstate (``pyinfra.api.State`` obj): the deploy state to execute\nserial (boolean): whether to run operations host by host\nno_wait (boolean): whether to wait for all hosts between operations", "source": "juraj-google-style"}
{"code": "def is_admin(name):\n    groups = get_user_groups(name, True)\n    for group in groups:\n        if (group in ('S-1-5-32-544', 'S-1-5-18')):\n            return True\n    return False", "docstring": "Is the passed user a member of the Administrators group\n\nArgs:\nname (str): The name to check\n\nReturns:\nbool: True if user is a member of the Administrators group, False\notherwise", "source": "codesearchnet"}
{"code": "def get_resource_id(prefix, *data):\n    parts = flatten(data)\n    for part in parts:\n        if (type(part) not in (str, int, float)):\n            raise ValueError('Supported data types: int, float, list, tuple, str. Got: {}'.format(type(part)))\n    return '{}-{}'.format(prefix, get_hash('-'.join(sorted(map(str, parts))))[(- 16):])", "docstring": "Returns a unique ID based on the SHA256 hash of the provided data. The input data is flattened and sorted to\nensure identical hashes are generated regardless of the order of the input. Values must be of types `str`, `int` or\n`float`, any other input type will raise a `ValueError`\n\n>>> get_resource_id('ec2', 'lots', 'of', 'data')\n'ec2-1d21940125214123'\n>>> get_resource_id('ecs', 'foo', ['more', 'data', 'here', 2, 3])\n'ecs-e536b036ea6fd463'\n>>> get_resource_id('ecs', ['more'], 'data', 'here', [[2], 3], 'foo')\n'ecs-e536b036ea6fd463'\n\nArgs:\nprefix (`str`): Key prefix\n*data (`str`, `int`, `float`, `list`, `tuple`): Data used to generate a unique ID\n\nReturns:\n`str`", "source": "codesearchnet"}
{"code": "def get_sid_string(principal):\n    \n    \n    if principal is None:\n        principal = 'NULL SID'\n\n    try:\n        return win32security.ConvertSidToStringSid(principal)\n    except TypeError:\n        \n        principal = get_sid(principal)\n\n    try:\n        return win32security.ConvertSidToStringSid(principal)\n    except pywintypes.error:\n        log.exception('Invalid principal %s', principal)\n        raise CommandExecutionError('Invalid principal {0}'.format(principal))", "docstring": "Converts a PySID object to a string SID.\n\nArgs:\n\nprincipal(str):\nThe principal to lookup the sid. Must be a PySID object.\n\nReturns:\nstr: A string sid\n\nUsage:\n\n.. code-block:: python\n\n# Get a PySID object\npy_sid = salt.utils.win_dacl.get_sid('jsnuffy')\n\n# Get the string version of the SID\nsalt.utils.win_dacl.get_sid_string(py_sid)", "source": "juraj-google-style"}
{"code": "def label(self, name):\n        \n        if isinstance(name, str):\n            self._label = name\n        else:\n            raise TypeError('label expects a string')", "docstring": "Set snapshot label to name\n\nArgs:\nname (str or None): label to assign unitary\n\nRaises:\nTypeError: name is not string or None.", "source": "juraj-google-style"}
{"code": "def connect(self, container, *args, **kwargs):\n    if isinstance(container, Container):\n        container = container.id\n    return self.client.api.connect_container_to_network(container, self.id, *args, **kwargs)", "docstring": "Connect a container to this network.\n\nArgs:\ncontainer (str): Container to connect to this network, as either\nan ID, name, or :py:class:`~docker.models.containers.Container`\nobject.\naliases (:py:class:`list`): A list of aliases for this endpoint.\nNames in that list can be used within the network to reach the\ncontainer. Defaults to ``None``.\nlinks (:py:class:`list`): A list of links for this endpoint.\nContainers declared in this list will be linkedto this\ncontainer. Defaults to ``None``.\nipv4_address (str): The IP address of this container on the\nnetwork, using the IPv4 protocol. Defaults to ``None``.\nipv6_address (str): The IP address of this container on the\nnetwork, using the IPv6 protocol. Defaults to ``None``.\nlink_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)\naddresses.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def ParseInteger(text, is_signed=False, is_long=False):\n  \n  \n  result = _ParseAbstractInteger(text, is_long=is_long)\n\n  \n  checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]\n  checker.CheckValue(result)\n  return result", "docstring": "Parses an integer.\n\nArgs:\ntext: The text to parse.\nis_signed: True if a signed integer must be parsed.\nis_long: True if a long integer must be parsed.\n\nReturns:\nThe integer value.\n\nRaises:\nValueError: Thrown Iff the text is not a valid integer.", "source": "juraj-google-style"}
{"code": "def shift(self, time: int) -> 'TimeslotCollection':\n    slots = [Timeslot(slot.interval.shift(time), slot.channel) for slot in self.timeslots]\n    return TimeslotCollection(*slots)", "docstring": "Return a new TimeslotCollection shifted by `time`.\n\nArgs:\ntime: time to be shifted by", "source": "codesearchnet"}
{"code": "def GetCompressedStreamTypeIndicators(cls, path_spec, resolver_context=None):\n    if ((cls._compressed_stream_remainder_list is None) or (cls._compressed_stream_store is None)):\n        (specification_store, remainder_list) = cls._GetSpecificationStore(definitions.FORMAT_CATEGORY_COMPRESSED_STREAM)\n        cls._compressed_stream_remainder_list = remainder_list\n        cls._compressed_stream_store = specification_store\n    if (cls._compressed_stream_scanner is None):\n        cls._compressed_stream_scanner = cls._GetSignatureScanner(cls._compressed_stream_store)\n    return cls._GetTypeIndicators(cls._compressed_stream_scanner, cls._compressed_stream_store, cls._compressed_stream_remainder_list, path_spec, resolver_context=resolver_context)", "docstring": "Determines if a file contains a supported compressed stream types.\n\nArgs:\npath_spec (PathSpec): path specification.\nresolver_context (Optional[Context]): resolver context, where None\nrepresents the built-in context which is not multi process safe.\n\nReturns:\nlist[str]: supported format type indicators.", "source": "codesearchnet"}
{"code": "def inputs(dataset, batch_size=None, num_preprocess_threads=None):\n    if (not batch_size):\n        batch_size = FLAGS.batch_size\n    with tf.device('/cpu:0'):\n        (images, labels) = batch_inputs(dataset, batch_size, train=False, num_preprocess_threads=num_preprocess_threads, num_readers=1)\n    return (images, labels)", "docstring": "Generate batches of ImageNet images for evaluation.\n\nUse this function as the inputs for evaluating a network.\n\nNote that some (minimal) image preprocessing occurs during evaluation\nincluding central cropping and resizing of the image to fit the network.\n\nArgs:\ndataset: instance of Dataset class specifying the dataset.\nbatch_size: integer, number of examples in batch\nnum_preprocess_threads: integer, total number of preprocessing threads but\nNone defaults to FLAGS.num_preprocess_threads.\n\nReturns:\nimages: Images. 4D tensor of size [batch_size, FLAGS.image_size,\nimage_size, 3].\nlabels: 1-D integer Tensor of [FLAGS.batch_size].", "source": "codesearchnet"}
{"code": "def QA_data_ctptick_resample(tick, type_='1min'):\n    \n\n    resx = pd.DataFrame()\n    _temp = set(tick.TradingDay)\n\n    for item in _temp:\n\n        _data = tick.query('TradingDay==\"{}\"'.format(item))\n        try:\n            _data.loc[time(20, 0):time(21, 0), 'volume'] = 0\n        except:\n            pass\n\n        _data.volume = _data.volume.diff()\n        _data = _data.assign(amount=_data.LastPrice * _data.volume)\n        _data0 = _data[time(0,\n                            0):time(2,\n                                    30)].resample(\n                                        type_,\n                                        closed='right',\n                                        base=30,\n                                        loffset=type_\n                                    ).apply(\n                                        {\n                                            'LastPrice': 'ohlc',\n                                            'volume': 'sum',\n                                            'code': 'last',\n                                            'amount': 'sum'\n                                        }\n                                    )\n\n        _data1 = _data[time(9,\n                            0):time(11,\n                                    30)].resample(\n                                        type_,\n                                        closed='right',\n                                        base=30,\n                                        loffset=type_\n                                    ).apply(\n                                        {\n                                            'LastPrice': 'ohlc',\n                                            'volume': 'sum',\n                                            'code': 'last',\n                                            'amount': 'sum'\n                                        }\n                                    )\n\n        _data2 = _data[time(13,\n                            1):time(15,\n                                    0)].resample(\n                                        type_,\n                                        closed='right',\n                                        base=30,\n                                        loffset=type_\n                                    ).apply(\n                                        {\n                                            'LastPrice': 'ohlc',\n                                            'volume': 'sum',\n                                            'code': 'last',\n                                            'amount': 'sum'\n                                        }\n                                    )\n\n        _data3 = _data[time(21,\n                            0):time(23,\n                                    59)].resample(\n                                        type_,\n                                        closed='left',\n                                        loffset=type_\n                                    ).apply(\n                                        {\n                                            'LastPrice': 'ohlc',\n                                            'volume': 'sum',\n                                            'code': 'last',\n                                            'amount': 'sum'\n                                        }\n                                    )\n\n        resx = resx.append(_data0).append(_data1).append(_data2).append(_data3)\n    resx.columns = resx.columns.droplevel(0)\n    return resx.reset_index().drop_duplicates().set_index(['datetime',\n                                                           'code']).sort_index()", "docstring": "tick采样成任意级别分钟线\n\nArguments:\ntick {[type]} -- transaction\n\nReturns:\n[type] -- [description]", "source": "juraj-google-style"}
{"code": "def nonoverlap(item_a, time_a, item_b, time_b, max_value):\n    return (np.minimum((1 - item_a.count_overlap(time_a, item_b, time_b)), max_value) / float(max_value))", "docstring": "Percentage of pixels in each object that do not overlap with the other object\n\nArgs:\nitem_a: STObject from the first set in ObjectMatcher\ntime_a: Time integer being evaluated\nitem_b: STObject from the second set in ObjectMatcher\ntime_b: Time integer being evaluated\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "codesearchnet"}
{"code": "def du(*components, **kwargs):\n    \n    human_readable = kwargs.get(\"human_readable\", True)\n\n    _path = path(*components)\n    if not exists(_path):\n        raise Error(\"file '{}' not found\".format(_path))\n    size = os.stat(_path).st_size\n    if human_readable:\n        return naturalsize(size)\n    else:\n        return size", "docstring": "Get the size of a file in bytes or as a human-readable string.\n\nArguments:\n\n*components (str[]): Path to file.\n**kwargs: If \"human_readable\" is True, return a formatted string,\ne.g. \"976.6 KiB\" (default True)\n\nReturns:\nint or str: If \"human_readble\" kwarg is True, return str, else int.", "source": "juraj-google-style"}
{"code": "def fill(self, config, section):\n        \n        if config.has_section(section):\n            default_url = self.DEFAULT_REPOSITORIES.get(self.name, '')\n            self.url = RepositoryURL(config_get(config, section, 'repository', default_url))\n            self.username = config_get(config, section, 'username', '')\n            self.password = config_get(config, section, 'password', '')", "docstring": "Fill data from a given configuration section.\n\nArgs:\nconfig (configparser): the configuration file\nsection (str): the section to use", "source": "juraj-google-style"}
{"code": "def pxbounds(self, geom, clip=False):\n    try:\n        if isinstance(geom, dict):\n            if ('geometry' in geom):\n                geom = shape(geom['geometry'])\n            else:\n                geom = shape(geom)\n        elif isinstance(geom, BaseGeometry):\n            geom = shape(geom)\n        else:\n            geom = wkt.loads(geom)\n    except:\n        raise TypeError('Invalid geometry object')\n    if geom.disjoint(shape(self)):\n        raise ValueError('Geometry outside of image bounds')\n    (xmin, ymin, xmax, ymax) = ops.transform(self.__geo_transform__.rev, geom).bounds\n    (_nbands, ysize, xsize) = self.shape\n    if clip:\n        xmin = max(xmin, 0)\n        ymin = max(ymin, 0)\n        xmax = min(xmax, xsize)\n        ymax = min(ymax, ysize)\n    return (xmin, ymin, xmax, ymax)", "docstring": "Returns the bounds of a geometry object in pixel coordinates\n\nArgs:\ngeom: Shapely geometry object or GeoJSON as Python dictionary or WKT string\nclip (bool): Clip the bounds to the min/max extent of the image\n\nReturns:\nlist: bounds in pixels [min x, min y, max x, max y] clipped to image bounds", "source": "codesearchnet"}
{"code": "def _run_post_configure_callbacks(self, configure_args):\n    resulting_configuration = ImmutableDict(self.config)\n    multiple_callbacks = copy.copy(self._post_configure_callbacks['multiple'])\n    single_callbacks = copy.copy(self._post_configure_callbacks['single'])\n    self._post_configure_callbacks['single'] = []\n    for callback in multiple_callbacks:\n        callback(resulting_configuration, configure_args)\n    for callback in single_callbacks:\n        callback(resulting_configuration, configure_args)", "docstring": "Run all post configure callbacks we have stored.\n\nFunctions are passed the configuration that resulted from the call to\n:meth:`configure` as the first argument, in an immutable form; and are\ngiven the arguments passed to :meth:`configure` for the second\nargument.\n\nReturns from callbacks are ignored in all fashion.\n\nArgs:\nconfigure_args (list[object]):\nThe full list of arguments passed to :meth:`configure`.\n\nReturns:\nNone:\nDoes not return anything.", "source": "codesearchnet"}
{"code": "def apply_sync(processor: Processor | PartProcessor, content: Iterable[ProcessorPart]) -> list[ProcessorPart]:\n    return asyncio.run(apply_async(processor, content))", "docstring": "Applies a Processor synchronously.\n\nWhen a part processor is given as input, this method will first turn it into\na processor and then will process the content concurrently.\n\nArgs:\nprocessor: the Processor to apply to the content.\ncontent: a collection of ProcessorParts on which to apply the Processor.\n\nReturns:\nthe content, with the Processor applied to each content part.", "source": "github-repos"}
{"code": "def get_book_progress(self, asin):\n    \n    kbp = self._get_api_call('get_book_progress', '\"%s\"' % asin)\n    return KindleCloudReaderAPI._kbp_to_progress(kbp)", "docstring": "Returns the progress data available for a book.\n\nNOTE: A summary of the two progress formats can be found in the\ndocstring for `ReadingProgress`.\n\nArgs:\nasin: The asin of the book to be queried.\n\nReturns:\nA `ReadingProgress` instance corresponding to the book associated with\n`asin`.", "source": "juraj-google-style"}
{"code": "def queryString_required_ClassVersion(strList):\n\t\n\tdef _dec(function):\n\t\t@wraps(function)\n\t\tdef _wrap(classInstance, request, *args, **kwargs):\n\t\t\tfor i in strList:\n\t\t\t\tif i not in request.GET:\n\t\t\t\t\traise Http404(\"api does not exist\")\n\t\t\treturn function(classInstance, request, *args, **kwargs)\n\t\treturn _wrap\n\treturn _dec", "docstring": "An decorator checking whether queryString key is valid or not\nArgs:\nstr: allowed queryString key\n\nReturns:\nif contains invalid queryString key, it will raise exception.", "source": "juraj-google-style"}
{"code": "def get_dataset_split(tmp_dir, split, use_control_set):\n  \n  if not use_control_set:\n    dataset_split = {\n        problem.DatasetSplit.TRAIN: [\n            f for f in tf.gfile.Glob(\n                os.path.join(tmp_dir, \"train-novels*.txt\"))\n        ],\n        problem.DatasetSplit.EVAL: [\n            os.path.join(tmp_dir, \"lambada_control_test_data_plain_text.txt\")\n        ],\n    }\n\n  return dataset_split[split]", "docstring": "Gives the file paths with regards to the given split.\n\nArgs:\ntmp_dir: temp directory\nsplit: dataset split\nuse_control_set: uses control dataset if true.\n\nReturns:\nlist of file paths.", "source": "juraj-google-style"}
{"code": "def recipe_drive_copy(config, auth_read, source, destination):\n    drive(config, {'auth': auth_read, 'copy': {'source': source, 'destination': destination}})", "docstring": "Copy a drive document.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nsource (string) - Name or URL of document to copy from.\ndestination (string) - Name document to copy to.", "source": "github-repos"}
{"code": "def find_trivial_constructor(type_):\n    \n    assert isinstance(type_, class_declaration.class_t)\n\n    trivial = type_.constructors(\n        lambda x: is_trivial_constructor(x),\n        recursive=False,\n        allow_empty=True)\n    if trivial:\n        return trivial[0]\n\n    return None", "docstring": "Returns reference to trivial constructor.\n\nArgs:\ntype_ (declarations.class_t): the class to be searched.\n\nReturns:\ndeclarations.constructor_t: the trivial constructor", "source": "juraj-google-style"}
{"code": "def flush_redis_unsafe(redis_client=None):\n    \n    if redis_client is None:\n        ray.worker.global_worker.check_connected()\n        redis_client = ray.worker.global_worker.redis_client\n\n    \n    keys = redis_client.keys(\"LOGFILE:*\")\n    if len(keys) > 0:\n        num_deleted = redis_client.delete(*keys)\n    else:\n        num_deleted = 0\n    print(\"Deleted {} log files from Redis.\".format(num_deleted))\n\n    \n    keys = redis_client.keys(\"event_log:*\")\n    if len(keys) > 0:\n        num_deleted = redis_client.delete(*keys)\n    else:\n        num_deleted = 0\n    print(\"Deleted {} event logs from Redis.\".format(num_deleted))", "docstring": "This removes some non-critical state from the primary Redis shard.\n\nThis removes the log files as well as the event log from Redis. This can\nbe used to try to address out-of-memory errors caused by the accumulation\nof metadata in Redis. However, it will only partially address the issue as\nmuch of the data is in the task table (and object table), which are not\nflushed.\n\nArgs:\nredis_client: optional, if not provided then ray.init() must have been\ncalled.", "source": "juraj-google-style"}
{"code": "def sample(self, hashes):\n    api_name = 'opendns-sample'\n    fmt_url_path = u'sample/{0}'\n    return self._multi_get(api_name, fmt_url_path, hashes)", "docstring": "Get the information about a sample based on its hash.\n\nArgs:\nhashes: an enumerable of strings as hashes\nReturns:\nAn enumerable of arrays which contains the information\nabout the original samples", "source": "codesearchnet"}
{"code": "def cast_to_seq(obj, alphabet=IUPAC.extended_protein):\n    \n\n    if isinstance(obj, Seq):\n        return obj\n    if isinstance(obj, SeqRecord):\n        return obj.seq\n    if isinstance(obj, str):\n        obj = obj.upper()\n        return Seq(obj, alphabet)\n    else:\n        raise ValueError('Must provide a string, Seq, or SeqRecord object.')", "docstring": "Return a Seq representation of a string or SeqRecord object.\n\nArgs:\nobj (str, Seq, SeqRecord): Sequence string or Biopython SeqRecord object\nalphabet: See Biopython SeqRecord docs\n\nReturns:\nSeq: Seq representation of the sequence", "source": "juraj-google-style"}
{"code": "async def get_me(self, input_peer=False):\n        \n        if input_peer and self._self_input_peer:\n            return self._self_input_peer\n\n        try:\n            me = (await self(\n                functions.users.GetUsersRequest([types.InputUserSelf()])))[0]\n\n            self._bot = me.bot\n            if not self._self_input_peer:\n                self._self_input_peer = utils.get_input_peer(\n                    me, allow_self=False\n                )\n\n            return self._self_input_peer if input_peer else me\n        except errors.UnauthorizedError:\n            return None", "docstring": "Gets \"me\" (the self user) which is currently authenticated,\nor None if the request fails (hence, not authenticated).\n\nArgs:\ninput_peer (`bool`, optional):\nWhether to return the :tl:`InputPeerUser` version or the normal\n:tl:`User`. This can be useful if you just need to know the ID\nof yourself.\n\nReturns:\nYour own :tl:`User`.", "source": "juraj-google-style"}
{"code": "def weights_to_cpu(state_dict):\n    \n    state_dict_cpu = OrderedDict()\n    for key, val in state_dict.items():\n        state_dict_cpu[key] = val.cpu()\n    return state_dict_cpu", "docstring": "Copy a model state_dict to cpu.\n\nArgs:\nstate_dict (OrderedDict): Model weights on GPU.\n\nReturns:\nOrderedDict: Model weights on GPU.", "source": "juraj-google-style"}
{"code": "def global_horizontal_illuminance(self, value=999999.0):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `global_horizontal_illuminance`'.format(value))\n        if (value < 0.0):\n            raise ValueError('value need to be greater or equal 0.0 for field `global_horizontal_illuminance`')\n    self._global_horizontal_illuminance = value", "docstring": "Corresponds to IDD Field `global_horizontal_illuminance`\nwill be missing if >= 999900\n\nArgs:\nvalue (float): value for IDD Field `global_horizontal_illuminance`\nUnit: lux\nvalue >= 0.0\nMissing value: 999999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def get_version_string(version):\n    version_len = len(version)\n    if (version_len == 3):\n        version_string = ('%d.%d.%d' % version)\n    elif (version_len == 4):\n        version_string = ('%d.%d.%d-%s' % version)\n    else:\n        raise Exception('Version tuple is non-semver-compliant {} length!'.format(version_len))\n    return version_string", "docstring": "Translate a version tuple into a string.\n\nSpecify the __version__ as a tuple for more precise comparisons, and\ntranslate it to __version_string__ for when that's needed.\n\nThis function exists primarily for easier unit testing.\n\nArgs:\nversion (Tuple[int, int, int, str]): three ints and an optional string.\n\nReturns:\nversion_string (str): the tuple translated into a string per semver.org", "source": "codesearchnet"}
{"code": "def _CheckCacheFileForMatch(self, cache_filename, scopes):\n        \n        creds = {  \n            'scopes': sorted(list(scopes)) if scopes else None,\n            'svc_acct_name': self.__service_account_name,\n        }\n        cache_file = _MultiProcessCacheFile(cache_filename)\n        try:\n            cached_creds_str = cache_file.LockedRead()\n            if not cached_creds_str:\n                return None\n            cached_creds = json.loads(cached_creds_str)\n            if creds['svc_acct_name'] == cached_creds['svc_acct_name']:\n                if creds['scopes'] in (None, cached_creds['scopes']):\n                    return cached_creds['scopes']\n        except KeyboardInterrupt:\n            raise\n        except:  \n            \n            pass", "docstring": "Checks the cache file to see if it matches the given credentials.\n\nArgs:\ncache_filename: Cache filename to check.\nscopes: Scopes for the desired credentials.\n\nReturns:\nList of scopes (if cache matches) or None.", "source": "juraj-google-style"}
{"code": "def set_timeout(self, network_timeout):\n        \n        \n        if network_timeout == self._network_timeout:\n            return\n        self._network_timeout = network_timeout\n        self._disconnect()", "docstring": "Set the timeout for existing and future Clients.\n\nClose all current connections. This will cause future operations to\ncreate new Clients with the network_timeout passed through\nsocketTimeoutMS optional parameter.\n\nArgs:\nnetwork_timeout: The new value in milliseconds for the timeout.", "source": "juraj-google-style"}
{"code": "def _any(objs, query):\n    \n    for obj in objs:\n        if isinstance(obj, Document):\n            if _any(obj.roots, query):\n                return True\n        else:\n            if any(query(ref) for ref in obj.references()):\n                return True\n    else:\n        return False", "docstring": "Whether any of a collection of objects satisfies a given query predicate\n\nArgs:\nobjs (seq[Model or Document]) :\n\nquery (callable)\n\nReturns:\nTrue, if ``query(obj)`` is True for some object in ``objs``, else False", "source": "juraj-google-style"}
{"code": "def __init__(self, xid=None, flags=None, miss_send_len=None):\n        \n        super().__init__(xid, flags, miss_send_len)\n        self.header.message_type = Type.OFPT_SET_CONFIG", "docstring": "Create a SetConfig with the optional parameters below.\n\nArgs:\nxid (int): xid to be used on the message header.\nflags (~pyof.v0x01.controller2switch.common.ConfigFlag):\nOFPC_* flags.\nmiss_send_len (int): UBInt16 max bytes of new flow that the\ndatapath should send to the controller.", "source": "juraj-google-style"}
{"code": "def request(self, subject, callback, msg=None):\n        \n        inbox = self._build_inbox()\n        s = self.subscribe(inbox, callback)\n        self.unsubscribe(s, 1)\n        self.publish(subject, msg, inbox)\n\n        return s", "docstring": "ublish a message with an implicit inbox listener as the reply.\nMessage is optional.\n\nArgs:\nsubject (string): a string with the subject\ncallback (function): callback to be called\nmsg (string=None): payload string", "source": "juraj-google-style"}
{"code": "def __init__(self, labels=None, _deprecated=None, *, formatter=_formats.default_formatter):\n        \n        self._columns = collections.OrderedDict()\n        self._formats = dict()\n        self.formatter = formatter\n\n        if _deprecated is not None:\n            warnings.warn(\"Two-argument __init__ is deprecated. Use Table().with_columns(...)\", FutureWarning)\n            columns, labels = labels, _deprecated\n            columns = columns if columns is not None else []\n            labels = labels if labels is not None else []\n            assert len(labels) == len(columns), 'label/column number mismatch'\n        else:\n            labels = labels if labels is not None else []\n            columns = [[] for _ in labels]\n\n        self._num_rows = 0 if len(columns) is 0 else len(columns[0])\n\n        \n        for column, label in zip(columns, labels):\n            self[label] = column\n\n        self.take = _RowTaker(self)\n        self.exclude = _RowExcluder(self)", "docstring": "Create an empty table with column labels.\n\n>>> tiles = Table(make_array('letter', 'count', 'points'))\n>>> tiles\nletter | count | points\n\nArgs:\n``labels`` (list of strings): The column labels.\n\n``formatter`` (Formatter): An instance of :class:`Formatter` that\nformats the columns' values.", "source": "juraj-google-style"}
{"code": "def _create(cls, model_class, *args, **kwargs):\n    manager = cls._get_manager(model_class)\n    return manager.create_user(*args, **kwargs)", "docstring": "Create a new user instance.\n\nArgs:\nmodel_class:\nThe type of model to create an instance of.\nargs:\nPositional arguments to create the instance with.\nkwargs:\nKeyword arguments to create the instance with.\n\nReturns:\nA new user instance of the type specified by\n``model_class``.", "source": "codesearchnet"}
{"code": "def _explode_shorthand_ip_string(self):\n        \n        if isinstance(self, _BaseNet):\n            ip_str = str(self.ip)\n        else:\n            ip_str = str(self)\n\n        ip_int = self._ip_int_from_string(ip_str)\n        parts = []\n        for i in xrange(self._HEXTET_COUNT):\n            parts.append('%04x' % (ip_int & 0xFFFF))\n            ip_int >>= 16\n        parts.reverse()\n        if isinstance(self, _BaseNet):\n            return '%s/%d' % (':'.join(parts), self.prefixlen)\n        return ':'.join(parts)", "docstring": "Expand a shortened IPv6 address.\n\nArgs:\nip_str: A string, the IPv6 address.\n\nReturns:\nA string, the expanded IPv6 address.", "source": "juraj-google-style"}
{"code": "def sanitize(s, normalize_whitespace=True, normalize_unicode=True, form='NFKC', enforce_encoding=True, encoding='utf-8'):\n    if enforce_encoding:\n        s = s.encode(encoding, errors='ignore').decode(encoding, errors='ignore')\n    if normalize_unicode:\n        s = unicodedata.normalize(form, s)\n    if normalize_whitespace:\n        s = re.sub('\\\\s+', ' ', s).strip()\n    return s", "docstring": "Normalize a string\n\nArgs:\ns (unicode string): input unicode string\nnormalize_whitespace (bool): if True, normalize all whitespace to single spaces (including newlines),\nstrip whitespace at start/end\nnormalize_unicode (bool): if True, normalize unicode form to 'form'\nform (str): unicode form\nenforce_encoding (bool): if True, encode string to target encoding and re-decode, ignoring errors\nand stripping all characters not part of the encoding\nencoding (str): target encoding for the above\n\nReturns:\nstr: unicode output string", "source": "codesearchnet"}
{"code": "def get_metrics_by_kernel(rows: list[list[str]]) -> list[dict[str, tuple[str, str]]]:\n    name_index = {}\n    units = rows[1]\n    for i, name in enumerate(rows[0]):\n        name_index[name] = i\n    results = []\n    for kernel in rows[2:]:\n        values = {}\n        for idx, name in enumerate(rows[0]):\n            values[name] = (kernel[idx], units[idx])\n        results.append(values)\n    return results", "docstring": "Converts ncu-rep table to a dictionary of metrics by kernel.\n\nArgs:\nrows: ncu-rep table rows\n\nReturns:\ndictionary of metrics by kernel", "source": "github-repos"}
{"code": "def db020(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `db020`'.format(value))\n\n        self._db020 = value", "docstring": "Corresponds to IDD Field `db020`\nmean coincident wet-bulb temperature to\nDry-bulb temperature corresponding to 2.0% annual cumulative frequency of occurrence (warm conditions)\n\nArgs:\nvalue (float): value for IDD Field `db020`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def PathToComponents(path):\n  \n  precondition.AssertType(path, Text)\n  if path and not path.startswith(\"/\"):\n    raise ValueError(\"Path '{}' is not absolute\".format(path))\n\n  if path:\n    return tuple(path.split(\"/\")[1:])\n  else:\n    return ()", "docstring": "Converts a canonical path representation to a list of components.\n\nArgs:\npath: A canonical MySQL path representation.\n\nReturns:\nA sequence of path components.", "source": "juraj-google-style"}
{"code": "def forward_ad(node, wrt, preserve_result=False, check_dims=True):\n    if (not isinstance(node, gast.FunctionDef)):\n        raise TypeError\n    cfg_obj = cfg.CFG.build_cfg(node)\n    cfg.Active(range(len(node.args.args))).visit(cfg_obj.entry)\n    fad = ForwardAD(wrt, preserve_result, check_dims)\n    node = fad.visit(node)\n    node = annotate.find_stacks(node)\n    node = gast.Module([node])\n    anno.clearanno(node)\n    return (node, fad.required)", "docstring": "Perform forward-mode AD on an AST.\n\nThis function analyses the AST to determine which variables are active and\nproceeds by taking the naive derivative. Before returning the primal and\nadjoint it annotates push and pop statements as such.\n\nArgs:\nnode: A `FunctionDef` AST node.\nwrt: A tuple of argument indices with respect to which we take the\nderivative.\npreserve_result: A boolean indicating whether the original\nnon-differentiated function value should be returned\ncheck_dims: A boolean indicating whether the provided derivatives should\nhave the same shape as their corresponding arguments.\n\nReturns:\nmod: A `Module` node containing the naive primal and adjoint of the\nfunction which can be fed to the `split` and `joint` functions.\nrequired: A list of tuples of functions and argument indices. These\nfunctions were called by the function but did not have an adjoint.", "source": "codesearchnet"}
{"code": "def add(self, value):\n        \n        \n        value = int(value)\n        if value < 10:\n            value = 10\n        if value > 600:\n            value = 600\n\n        \n        self._data.setdefault(value, 0)\n        self._data[value] += 1\n        self._len += 1", "docstring": "Add the value to this histogram.\n\nArgs:\nvalue (int): The value. Values outside of ``10 <= x <= 600``\nwill be raised to ``10`` or reduced to ``600``.", "source": "juraj-google-style"}
{"code": "def select_bucket_region(custom_bucket, hook_region, stacker_bucket_region, provider_region):\n    region = None\n    if custom_bucket:\n        region = hook_region\n    else:\n        region = stacker_bucket_region\n    return (region or provider_region)", "docstring": "Returns the appropriate region to use when uploading functions.\n\nSelect the appropriate region for the bucket where lambdas are uploaded in.\n\nArgs:\ncustom_bucket (str, None): The custom bucket name provided by the\n`bucket` kwarg of the aws_lambda hook, if provided.\nhook_region (str): The contents of the `bucket_region` argument to\nthe hook.\nstacker_bucket_region (str): The contents of the\n`stacker_bucket_region` global setting.\nprovider_region (str): The region being used by the provider.\n\nReturns:\nstr: The appropriate region string.", "source": "codesearchnet"}
{"code": "def with_flat_values(self, new_values):\n    if isinstance(self._values, RaggedTensor):\n        return self.with_values(self.values.with_flat_values(new_values))\n    else:\n        new_values = _convert_to_ragged_tensor_values(new_values)\n    return self.with_values(new_values)", "docstring": "Returns a copy of `self` with `flat_values` replaced by `new_value`.\n\nPreserves cached row-partitioning tensors such as `self.cached_nrows` and\n`self.cached_value_rowids` if they have values.\n\nArgs:\nnew_values: Potentially ragged tensor that should replace\n`self.flat_values`.  Must have `rank > 0`, and must have the same number\nof rows as `self.flat_values`.\n\nReturns:\nA `RaggedTensor`.\n`result.rank = self.ragged_rank + new_values.rank`.\n`result.ragged_rank = self.ragged_rank + new_values.ragged_rank`.", "source": "github-repos"}
{"code": "def _project_TH2(self, hist: Hist) -> Any:\n    if (len(self.projection_axes) != 1):\n        raise ValueError(len(self.projection_axes), 'Invalid number of axes')\n    projection_func_map = {TH1AxisType.x_axis.value: hist.ProjectionX, TH1AxisType.y_axis.value: hist.ProjectionY}\n    try:\n        axis_type = self.projection_axes[0].axis_type.value\n    except ValueError:\n        axis_type = self.axis_type\n    projection_func = projection_func_map[axis_type]\n    logger.info(f'Projecting onto axis range {self.projection_axes[0].name} from hist {hist.GetName()}')\n    projected_hist = projection_func()\n    return projected_hist", "docstring": "Perform the actual TH2 -> TH1 projection.\n\nThis projection can only be to 1D.\n\nArgs:\nhist (ROOT.TH2): Histogram from which the projections should be performed.\nReturns:\nROOT.TH1: The projected histogram.", "source": "codesearchnet"}
{"code": "def __init__(self, index: int,\n                 lo_freq: float = None,\n                 lo_freq_range: Tuple[float, float] = (0, float(\"inf\"))):\n        \n        super().__init__(index, lo_freq, lo_freq_range)", "docstring": "Create new drive (d) channel.\n\nArgs:\nindex (int): index of the channel\nlo_freq (float): default frequency of LO (local oscillator)\nlo_freq_range (tuple): feasible range of LO frequency", "source": "juraj-google-style"}
{"code": "def DeserializeFromDB(buffer):\n        \n        m = StreamManager.GetStream(buffer)\n        reader = BinaryReader(m)\n        account = AccountState()\n        account.Deserialize(reader)\n\n        StreamManager.ReleaseStream(m)\n\n        return account", "docstring": "Deserialize full object.\n\nArgs:\nbuffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from.\n\nReturns:\nAccountState:", "source": "juraj-google-style"}
{"code": "def get_box_comments(self, box_key):\n\t\t\n\t\turi = '/'.join([\n\t\t\t\t\t\tself.api_uri,\n\t\t\t\t\t\tself.boxes_suffix,\n\t\t\t\t\t\tbox_key,\n\t\t\t\t\t\tself.comments_suffix\n\t\t\t\t\t\t])\n\t\treturn self._req('get', uri)", "docstring": "Gets comments in a box with the provided attributes.\nArgs:\nbox_key\t\t\tkey for box\nreturn\t\t\t(status code, list of comment dicts)", "source": "juraj-google-style"}
{"code": "def createDomains(tlds, nicks=None, nicksFile=None):\n    \n    domain_candidates = []\n    if nicks != None:\n        for n in nicks:\n            for t in tlds:\n                tmp = {\n                    \"domain\" : n + t[\"tld\"],\n                    \"type\" : t[\"type\"],\n                    \"tld\": t[\"tld\"]\n                }\n                domain_candidates.append(tmp)\n    elif nicksFile != None:\n        with open(nicksFile, \"r\") as iF:\n            nicks = iF.read().splitlines()\n            for n in nicks:\n                for t in tlds:\n                    tmp = {\n                        \"domain\" : n + t[\"tld\"],\n                        \"type\" : t[\"type\"],\n                        \"tld\": t[\"tld\"]\n                    }\n                    domain_candidates.append(tmp)\n    return domain_candidates", "docstring": "Method that globally permits to generate the domains to be checked.\n\nArgs:\n-----\ntlds: List of tlds.\nnicks: List of aliases.\nnicksFile: The filepath to the aliases file.\n\nReturns:\n--------\nlist: list of domains to be checked.", "source": "juraj-google-style"}
{"code": "def read_int8(self, little_endian=True):\n    if little_endian:\n        endian = '<'\n    else:\n        endian = '>'\n    return self.unpack(('%sb' % endian))", "docstring": "Read 1 byte as a signed integer value from the stream.\n\nArgs:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint:", "source": "codesearchnet"}
{"code": "def plot_tree(ax, tree, plane='xy', diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA):\n    (plane0, plane1) = _plane2col(plane)\n    segs = [((s[0][plane0], s[0][plane1]), (s[1][plane0], s[1][plane1])) for s in iter_segments(tree)]\n    linewidth = _get_linewidth(tree, diameter_scale=diameter_scale, linewidth=linewidth)\n    color = _get_color(color, tree.type)\n    collection = LineCollection(segs, color=color, linewidth=linewidth, alpha=alpha)\n    ax.add_collection(collection)", "docstring": "Plots a 2d figure of the tree's segments\n\nArgs:\nax(matplotlib axes): on what to plot\ntree(neurom.core.Tree or neurom.core.Neurite): plotted tree\nplane(str): Any pair of 'xyz'\ndiameter_scale(float): Scale factor multiplied with segment diameters before plotting\nlinewidth(float): all segments are plotted with this width, but only if diameter_scale=None\ncolor(str or None): Color of plotted values, None corresponds to default choice\nalpha(float): Transparency of plotted values\n\nNote:\nIf the tree contains one single point the plot will be empty\nsince no segments can be constructed.", "source": "codesearchnet"}
{"code": "def dbmin10years(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `dbmin10years`'.format(value))\n    self._dbmin10years = value", "docstring": "Corresponds to IDD Field `dbmin10years`\n10-year return period values for minimum extreme dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmin10years`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def summarize(self, test_arr, vectorizable_token, sentence_list, limit=5):\n        \n        if isinstance(vectorizable_token, VectorizableToken) is False:\n            raise TypeError()\n\n        _ = self.inference(test_arr)\n        score_arr = self.__encoder_decoder_controller.get_reconstruction_error()\n        score_arr = score_arr.reshape((\n            score_arr.shape[0],\n            -1\n        )).mean(axis=1)\n\n        score_list = score_arr.tolist()\n\n        abstract_list = []\n        for i in range(limit):\n            if self.__normal_prior_flag is True:\n                key = score_arr.argmin()\n            else:\n                key = score_arr.argmax()\n\n            score = score_list.pop(key)\n            score_arr = np.array(score_list)\n\n            seq_arr = test_arr[key]\n            token_arr = vectorizable_token.tokenize(seq_arr.tolist())\n            s = \" \".join(token_arr.tolist())\n            _s = \"\".join(token_arr.tolist())\n\n            for sentence in sentence_list:\n                if s in sentence or _s in sentence:\n                    abstract_list.append(sentence)\n                    abstract_list = list(set(abstract_list))\n\n            if len(abstract_list) >= limit:\n                break\n\n        return abstract_list", "docstring": "Summarize input document.\n\nArgs:\ntest_arr:               `np.ndarray` of observed data points..\nvectorizable_token:     is-a `VectorizableToken`.\nsentence_list:          `list` of all sentences.\nlimit:                  The number of selected abstract sentence.\n\nReturns:\n`np.ndarray` of scores.", "source": "juraj-google-style"}
{"code": "def update_display(self, force=False):\n    with self._lock:\n        stats_updated = False\n        for pcoll_id, stats in self._pcollection_stats.items():\n            cache_label = stats['cache_label']\n            version = stats['version']\n            if force or not self._cache_manager.is_latest_version(version, 'sample', cache_label):\n                pcoll_list, version = self._cache_manager.read('sample', cache_label)\n                stats['sample'] = list(pcoll_list)\n                stats['version'] = version\n                stats_updated = True\n                if pcoll_id in self._analyzer.tl_referenced_pcoll_ids():\n                    self._text_to_print[pcoll_id] = str('%s produced %s' % (self._producers[pcoll_id], interactive_pipeline_graph.format_sample(pcoll_list, 5)))\n        if force or stats_updated:\n            self._pipeline_graph.update_pcollection_stats(self._pcollection_stats)\n            if IPython:\n                from IPython import display\n                display.clear_output(True)\n                rendered_graph = self._renderer.render_pipeline_graph(self._pipeline_graph)\n                display.display(display.HTML(rendered_graph))\n            _display_progress('Running...')\n            for text in self._text_to_print.values():\n                if text != '':\n                    _display_progress(text)", "docstring": "Updates display on the frontend.\n\nRetrieves the latest execution status by querying CacheManager and updates\ndisplay on the fronend. The assumption is that there is only one pipeline in\na cell, because it clears up everything in the cell output every update\ncycle.\n\nArgs:\nforce: (bool) whether to force updating when no stats change happens.", "source": "github-repos"}
{"code": "def _merge_with(self, other: 'DynamicRaggedShape.Spec') -> 'DynamicRaggedShape.Spec':\n    max_num_row_partitions = max(self.num_row_partitions, other.num_row_partitions)\n    a = self._with_num_row_partitions(max_num_row_partitions)\n    b = other._with_num_row_partitions(max_num_row_partitions)\n    new_rp = [a._merge_with(b) for a, b in zip(a._row_partitions, b._row_partitions)]\n    new_static_inner_shape = a._static_inner_shape.merge_with(b._static_inner_shape)\n    dtype = b.dtype if a.dtype == dtypes.int32 else dtypes.int64\n    return DynamicRaggedShape.Spec(new_rp, new_static_inner_shape, dtype=dtype)", "docstring": "Merges all information between two specs.\n\nSpecs are expected to represent the same information modulo\nnum_row_partitons.\n\nIf the specs are of different ranks, then fail.\n\nArgs:\nother: another Spec of the same rank.\n\nReturns:\na Spec with the union of information.", "source": "github-repos"}
{"code": "def parse(cls, args):\n        \n\n        try:\n            (options, args) = cls.optparser.parse_args(args)\n            if options.latin_statements is None and options.script_location is None:\n                raise ParseError(\"One of script or it's location\"\n                                 \" must be specified\",\n                                 cls.optparser.format_help())\n        except OptionParsingError as e:\n            raise ParseError(e.msg, cls.optparser.format_help())\n        except OptionParsingExit as e:\n            return None\n\n        if options.script_location is not None:\n            if options.latin_statements is not None:\n                raise ParseError(\n                    \"Both script and script_location cannot be specified\",\n                    cls.optparser.format_help())\n\n            if ((options.script_location.find(\"s3:\n                (options.script_location.find(\"s3n:\n\n                \n\n                try:\n                    s = open(options.script_location).read()\n                except IOError as e:\n                    raise ParseError(\"Unable to open script location: %s\" %\n                                     str(e),\n                                     cls.optparser.format_help())\n                options.script_location = None\n                options.latin_statements = s\n\n            if (args is not None) and (len(args) > 0):\n                if options.latin_statements is not None:\n                    raise ParseError(\n                        \"Extra arguments can only be \"\n                        \"supplied with a script_location in S3 right now\",\n                        cls.optparser.format_help())\n\n                p = {}\n                for a in args:\n                    kv = a.split('=')\n                    if len(kv) != 2:\n                        raise ParseError(\"Arguments to pig script must be of this format k1=v1 k2=v2 k3=v3...\")\n                    p[kv[0]] = kv[1]\n                setattr(options, 'parameters', p)\n\n        else:\n            if (args is not None) and (len(args) > 0):\n                raise ParseError(\n                    \"Extra arguments can only be supplied with a script_location\",\n                    cls.optparser.format_help())\n\n        v = vars(options)\n        v[\"command_type\"] = \"PigCommand\"\n        return v", "docstring": "Parse command line arguments to construct a dictionary of command\nparameters that can be used to create a command\n\nArgs:\n`args`: sequence of arguments\n\nReturns:\nDictionary that can be used in create method\n\nRaises:\nParseError: when the arguments are not correct", "source": "juraj-google-style"}
{"code": "def assert_rank_at_most(x, rank, data=None, summarize=None, message=None, name=None):\n    with tf.compat.v2.name_scope((name or 'assert_rank_at_most')):\n        return tf.compat.v1.assert_less_equal(tf.rank(x), rank, data=data, summarize=summarize, message=message)", "docstring": "Assert `x` has rank equal to `rank` or smaller.\n\nExample of adding a dependency to an operation:\n\n```python\nwith tf.control_dependencies([tf.assert_rank_at_most(x, 2)]):\noutput = tf.reduce_sum(x)\n```\n\nArgs:\nx:  Numeric `Tensor`.\nrank:  Scalar `Tensor`.\ndata:  The tensors to print out if the condition is False.  Defaults to\nerror message and first few entries of `x`.\nsummarize: Print this many entries of each tensor.\nmessage: A string to prefix to the default message.\nname: A name for this operation (optional).\nDefaults to \"assert_rank_at_most\".\n\nReturns:\nOp raising `InvalidArgumentError` unless `x` has specified rank or lower.\nIf static checks determine `x` has correct rank, a `no_op` is returned.\n\nRaises:\nValueError:  If static checks determine `x` has wrong rank.", "source": "codesearchnet"}
{"code": "def find_faces(self, image, draw_box=False):\n        \n        frame_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n        faces = self.cascade.detectMultiScale(\n            frame_gray,\n            scaleFactor=1.3,\n            minNeighbors=5,\n            minSize=(50, 50),\n            flags=0)\n\n        if draw_box:\n            for x, y, w, h in faces:\n                cv2.rectangle(image, (x, y),\n                              (x + w, y + h), (0, 255, 0), 2)\n        return faces", "docstring": "Uses a haarcascade to detect faces inside an image.\n\nArgs:\nimage: The image.\ndraw_box: If True, the image will be marked with a rectangle.\n\nReturn:\nThe faces as returned by OpenCV's detectMultiScale method for\ncascades.", "source": "juraj-google-style"}
{"code": "def convert_unicode(value):\n    \n    if isinstance(value, dict):\n        return {convert_unicode(key): convert_unicode(value)\n                for key, value in value.iteritems()}\n    elif isinstance(value, list):\n        return [convert_unicode(item) for item in value]\n    elif isinstance(value, unicode):\n        return value.encode('utf-8')\n    else:\n        return value", "docstring": "Resolves python 2 issue with json loading in unicode instead of string\n\nArgs:\nvalue (str): Unicode value to be converted\n\nReturns:\n(str): converted string", "source": "juraj-google-style"}
{"code": "def remove_phenotype(self, institute, case, user, link, phenotype_id, is_group=False):\n    LOG.info('Removing HPO term from case {0}'.format(case['display_name']))\n    if is_group:\n        updated_case = self.case_collection.find_one_and_update({'_id': case['_id']}, {'$pull': {'phenotype_terms': {'phenotype_id': phenotype_id}, 'phenotype_groups': {'phenotype_id': phenotype_id}}}, return_document=pymongo.ReturnDocument.AFTER)\n    else:\n        updated_case = self.case_collection.find_one_and_update({'_id': case['_id']}, {'$pull': {'phenotype_terms': {'phenotype_id': phenotype_id}}}, return_document=pymongo.ReturnDocument.AFTER)\n    LOG.info('Creating event for removing phenotype term {0} from case {1}'.format(phenotype_id, case['display_name']))\n    self.create_event(institute=institute, case=case, user=user, link=link, category='case', verb='remove_phenotype', subject=case['display_name'])\n    LOG.debug('Case updated')\n    return updated_case", "docstring": "Remove an existing phenotype from a case\n\nArgs:\ninstitute (dict): A Institute object\ncase (dict): Case object\nuser (dict): A User object\nlink (dict): The url to be used in the event\nphenotype_id (str): A phenotype id\n\nReturns:\nupdated_case(dict)", "source": "codesearchnet"}
{"code": "def wait_for_tuning_job(self, job, poll=5):\n        \n        desc = _wait_until(lambda: _tuning_job_status(self.sagemaker_client, job), poll)\n        self._check_job_status(job, desc, 'HyperParameterTuningJobStatus')\n        return desc", "docstring": "Wait for an Amazon SageMaker hyperparameter tuning job to complete.\n\nArgs:\njob (str): Name of the tuning job to wait for.\npoll (int): Polling interval in seconds (default: 5).\n\nReturns:\n(dict): Return value from the ``DescribeHyperParameterTuningJob`` API.\n\nRaises:\nValueError: If the hyperparameter tuning job fails.", "source": "juraj-google-style"}
{"code": "def CheckInputFromValidContext(op, input_op):\n    op_ctxt = op._get_control_flow_context()\n    input_ctxt = GetOutputContext(input_op)\n    valid = False\n    if not input_ctxt:\n        valid = True\n    elif op_ctxt is input_ctxt:\n        valid = True\n    else:\n        while_ctxt = GetContainingWhileContext(op_ctxt)\n        input_while_ctxt = GetContainingWhileContext(input_ctxt)\n        if while_ctxt is None:\n            if input_while_ctxt is None:\n                valid = True\n            if IsLoopEnter(op):\n                valid = True\n            if IsSwitch(op):\n                valid = True\n        elif IsContainingContext(while_ctxt, input_while_ctxt):\n            valid = True\n        elif while_ctxt.grad_state and IsContainingContext(while_ctxt.grad_state.forward_context, input_while_ctxt):\n            valid = True\n        elif while_ctxt.grad_state and while_ctxt.grad_state.forward_context is input_while_ctxt._outer_context:\n            valid = True\n        elif input_while_ctxt.grad_state and input_while_ctxt.grad_state.forward_context is while_ctxt:\n            valid = True\n        elif input_while_ctxt.grad_state and input_ctxt.grad_state.forward_context.grad_state and (input_ctxt.grad_state.forward_context.grad_state.forward_context is while_ctxt):\n            valid = True\n    if not valid:\n        if while_ctxt:\n            error_msg = f\"Cannot use '{input_op.name}' as input to '{op.name}' because they are in different while loops.\"\n        else:\n            error_msg = f\"Cannot use '{input_op.name}' as input to '{op.name}' because '{input_op.name}' is in a while loop.\"\n        log_msg = error_msg\n        log_msg += '\\n\\n%s while context: %s' % (op.name, while_ctxt)\n        log_msg += '\\n%s while context: %s' % (input_op.name, input_while_ctxt)\n        log_msg += '\\n\\nTraceback for %s:\\n%s\\nTraceback for %s:\\n%s\\n' % (op.name, ''.join(traceback.format_list(op.traceback)), input_op.name, ''.join(traceback.format_list(input_op.traceback)))\n        logging.info(log_msg)\n        raise ValueError(error_msg + ' See info log for more details.')", "docstring": "Returns whether `input_op` can be used from `op`s context.\n\nConceptually, only inputs from op's while context or any ancestor while\ncontext (including outside of any context) are valid. In practice, there are\nmany other edge cases as well.\n\nArgs:\nop: Operation\ninput_op: Operation\n\nRaises:\nValueError: if input_op is from an invalid context.", "source": "github-repos"}
{"code": "def get(self, key, value):\n    if (key == 'id'):\n        response = self._swimlane.request('get', 'groups/{}'.format(value))\n        return Group(self._swimlane, response.json())\n    else:\n        response = self._swimlane.request('get', 'groups/lookup?name={}'.format(value))\n        matched_groups = response.json()\n        for group_data in matched_groups:\n            if (group_data.get('name') == value):\n                return Group(self._swimlane, group_data)\n        raise ValueError('Unable to find group with name \"{}\"'.format(value))", "docstring": "Retrieve single group record by id or name\n\nSupports resource cache\n\nKeyword Args:\nid (str): Full Group ID\nname (str): Group name\n\nRaises:\nTypeError: Unexpected or more than one keyword argument provided\nValueError: No matching group found based on provided inputs\n\nReturns:\nGroup: Group instance matching provided inputs", "source": "codesearchnet"}
{"code": "def taylor_approx(target, stencil, values):\n    batch_shape, ndim = (target.shape[:-1], target.shape[-1])\n    stencil = np.broadcast_to(stencil, batch_shape + (triangular(ndim + 1), ndim))\n    values = np.broadcast_to(values, stencil.shape[:-1])\n    delta_x = stencil - np.expand_dims(target, axis=-2)\n    delta_xy = np.matmul(np.expand_dims(delta_x, axis=-1), np.expand_dims(delta_x, axis=-2))\n    i = np.arange(ndim)\n    j, k = np.triu_indices(ndim, k=1)\n    coeffs = np.concatenate([np.ones(delta_x.shape[:-1] + (1,)), delta_x, delta_xy[..., i, i] / 2, delta_xy[..., j, k]], axis=-1)\n    return np.squeeze(np.matmul(np.linalg.inv(coeffs), values[..., np.newaxis]), axis=-1)", "docstring": "Use taylor series to approximate up to second order derivatives.\n\nArgs:\ntarget: An array of shape (..., n), a batch of n-dimensional points\nwhere one wants to approximate function value and derivatives.\nstencil: An array of shape broadcastable to (..., k, n), for each target\npoint a set of k = triangle(n + 1) points to use on its approximation.\nvalues: An array of shape broadcastable to (..., k), the function value at\neach of the stencil points.\n\nReturns:\nAn array of shape (..., k), for each target point the approximated\nfunction value, gradient and hessian evaluated at that point (flattened\nand in the same order as returned by derivative_names).", "source": "github-repos"}
{"code": "def outer(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return Outer().symbolic_call(x1, x2)\n    return backend.numpy.outer(x1, x2)", "docstring": "Compute the outer product of two vectors.\n\nGiven two vectors `x1` and `x2`, the outer product is:\n\n```\nout[i, j] = x1[i] * x2[j]\n```\n\nArgs:\nx1: First input tensor.\nx2: Second input tensor.\n\nReturns:\nOuter product of `x1` and `x2`.", "source": "github-repos"}
{"code": "def __init__(self, a_file, import_resolver=None):\n    \n    self._sections = []\n    self._original_content = a_file.read()\n    self._import_resolver = import_resolver\n    self._processed_content = None", "docstring": "Initializes the file reading in the file.\n\nArgs:\na_file: The file to read in.\nimport_resolver: a function that given a path will return a stream for\nthe contents.\n\nRaises:\nPDDMError if there are any issues.", "source": "juraj-google-style"}
{"code": "def wait_for_disappearance(self, timeout=120):\n        \n\n        start = time.time()\n        while self.exists():\n            self.poco.sleep_for_polling_interval()\n            if time.time() - start > timeout:\n                raise PocoTargetTimeout('disappearance', self)", "docstring": "Block and wait until the UI element **disappears** within the given timeout.\n\nArgs:\ntimeout: maximum waiting time in seconds\n\nRaises:\nPocoTargetTimeout: when timeout", "source": "juraj-google-style"}
{"code": "def output_summary(fqn, action, changeset, params_diff, replacements_only=False):\n    replacements = []\n    changes = []\n    for change in changeset:\n        resource = change['ResourceChange']\n        replacement = (resource.get('Replacement') == 'True')\n        summary = ('- %s %s (%s)' % (resource['Action'], resource['LogicalResourceId'], resource['ResourceType']))\n        if replacement:\n            replacements.append(summary)\n        else:\n            changes.append(summary)\n    summary = ''\n    if params_diff:\n        summary += summarize_params_diff(params_diff)\n    if replacements:\n        if (not replacements_only):\n            summary += 'Replacements:\\n'\n        summary += '\\n'.join(replacements)\n    if changes:\n        if summary:\n            summary += '\\n'\n        summary += ('Changes:\\n%s' % '\\n'.join(changes))\n    logger.info('%s %s:\\n%s', fqn, action, summary)", "docstring": "Log a summary of the changeset.\n\nArgs:\nfqn (string): fully qualified name of the stack\naction (string): action to include in the log message\nchangeset (list): AWS changeset\nparams_diff (list): A list of dictionaries detailing the differences\nbetween two parameters returned by\n:func:`stacker.actions.diff.diff_dictionaries`\nreplacements_only (bool, optional): boolean for whether or not we only\nwant to list replacements", "source": "codesearchnet"}
{"code": "def row_lengths(self):\n    if self._row_lengths is not None:\n        return self._row_lengths\n    splits = self._row_splits\n    return splits[1:] - splits[:-1]", "docstring": "Returns the lengths of rows in this `RowPartition`.\n\nReturns:\nA 1-D integer Tensor with shape `[self.nrows]`.\nThe returned tensor is nonnegative.\n`tf.reduce_sum(self.row_lengths) == self.nvals()`.", "source": "github-repos"}
{"code": "def sample(self, signum, frame):\n    stack = []\n    while (frame and (frame != self.base_frame)):\n        stack.append((frame.f_code.co_name, frame.f_code.co_filename, frame.f_code.co_firstlineno))\n        frame = frame.f_back\n    self._stats[tuple(stack)] += 1\n    signal.setitimer(signal.ITIMER_PROF, _SAMPLE_INTERVAL)", "docstring": "Samples current stack and adds result in self._stats.\n\nArgs:\nsignum: Signal that activates handler.\nframe: Frame on top of the stack when signal is handled.", "source": "codesearchnet"}
{"code": "def pretty_description(description, wrap_at=None, indent=0):\n    \n    if wrap_at is None or wrap_at < 0:\n        width = console_width(default=79)\n        if wrap_at is None:\n            wrap_at = width\n        else:\n            wrap_at += width\n\n    indent = ' ' * indent\n    text_wrapper = textwrap.TextWrapper(\n        width=wrap_at, replace_whitespace=False,\n        initial_indent=indent, subsequent_indent=indent)\n    new_desc = []\n    for line in description.split('\\n'):\n        new_desc.append(line.replace('\\n', '').strip())\n    while not new_desc[0]:\n        del new_desc[0]\n    while not new_desc[-1]:\n        del new_desc[-1]\n    separators = [i for i, l in enumerate(new_desc) if not l]\n    paragraphs = []\n    if separators:\n        start, end = 0, separators[0]\n        paragraphs.append(new_desc[start:end])\n        for i in range(len(separators) - 1):\n            start = end + 1\n            end = separators[i + 1]\n            paragraphs.append(new_desc[start:end])\n        paragraphs.append(new_desc[end + 1:])\n        return '\\n\\n'.join(text_wrapper.fill(' '.join(p)) for p in paragraphs)\n    return text_wrapper.fill(' '.join(new_desc))", "docstring": "Return a pretty formatted string given some text.\n\nArgs:\ndescription (str): string to format.\nwrap_at (int): maximum length of a line.\nindent (int): level of indentation.\n\nReturns:\nstr: pretty formatted string.", "source": "juraj-google-style"}
{"code": "def get_knowledge_base(project_id, knowledge_base_id):\n    \n    import dialogflow_v2beta1 as dialogflow\n    client = dialogflow.KnowledgeBasesClient()\n    knowledge_base_path = client.knowledge_base_path(\n        project_id, knowledge_base_id)\n\n    response = client.get_knowledge_base(knowledge_base_path)\n\n    print('Got Knowledge Base:')\n    print(' - Display Name: {}'.format(response.display_name))\n    print(' - Knowledge ID: {}'.format(response.name))", "docstring": "Gets a specific Knowledge base.\n\nArgs:\nproject_id: The GCP project linked with the agent.\nknowledge_base_id: Id of the Knowledge base.", "source": "juraj-google-style"}
{"code": "def detect_overflow(var, ctx):\n    detected = False\n    if torch.isnan(var).any().item():\n        detected = True\n        print(f'{ctx} has nans')\n    if torch.isinf(var).any().item():\n        detected = True\n        print(f'{ctx} has infs')\n    if 0:\n        n100 = var[torch.ge(var.abs(), 100)]\n        if n100.numel() > 0:\n            print(f'{ctx}:  n100={n100.numel()}')\n        n1000 = var[torch.ge(var.abs(), 1000)]\n        if n1000.numel() > 0:\n            print(f'{ctx}: n1000={n1000.numel()}')\n        n10000 = var[torch.ge(var.abs(), 10000)]\n        if n10000.numel() > 0:\n            print(f'{ctx}: n10000={n10000.numel()}')\n    if 0:\n        print(f'min={var.min():9.2e} max={var.max():9.2e}')\n    if 0:\n        print(f'min={var.min():9.2e} max={var.max():9.2e} var={var.var():9.2e} mean={var.mean():9.2e} ({ctx})')\n    return detected", "docstring": "Report whether the tensor contains any `nan` or `inf` entries.\n\nThis is useful for detecting overflows/underflows and best to call right after the function that did some math that\nmodified the tensor in question.\n\nThis function contains a few other helper features that you can enable and tweak directly if you want to track\nvarious other things.\n\nArgs:\nvar: the tensor variable to check\nctx: the message to print as a context\n\nReturn:\n`True` if `inf` or `nan` was detected, `False` otherwise", "source": "github-repos"}
{"code": "def onTagDel(self, name, func):\n    if ('*' in name):\n        self.ontagdelglobs.add(name, func)\n    else:\n        self.ontagdels[name].append(func)", "docstring": "Register a callback for tag deletion.\n\nArgs:\nname (str): The name of the tag or tag glob.\nfunc (function): The callback func(node, tagname, tagval).", "source": "codesearchnet"}
{"code": "def bulk_write(self, metrics):\n        \n        try:\n            for metric in metrics:\n                self.producer.send(self.topic, metric)\n            self.producer.flush()\n        except (KafkaTimeoutError, NoBrokersAvailable) as exc:\n            logger.warning('bulk_write metrics %r failure %r', metrics, exc)", "docstring": "Write multiple metrics to kafka in one request\n\nArgs:\nmetrics (list):", "source": "juraj-google-style"}
{"code": "def downstream(self, node):\n        \n        graph = self.graph\n        if node not in graph:\n            raise KeyError('node %s is not in graph' % node)\n        return list(graph[node])", "docstring": "Returns a list of all nodes this node has edges towards.\n\nArgs:\nnode (str): The node whose downstream nodes you want to find.\n\nReturns:\nlist: A list of nodes that are immediately downstream from the\nnode.", "source": "juraj-google-style"}
{"code": "def scan_file(path):\n    \n    path = os.path.abspath(path)\n    assert os.path.exists(path), \"Unreachable file '%s'.\" % path\n\n    result = sh.clamscan(path, no_summary=True, infected=True, _ok_code=[0, 1])\n\n    return _parse_result(result)", "docstring": "Scan `path` for viruses using ``clamscan`` program.\n\nArgs:\npath (str): Relative or absolute path of file/directory you need to\nscan.\n\nReturns:\ndict: ``{filename: (\"FOUND\", \"virus type\")}`` or blank dict.\n\nRaises:\nAssertionError: When the internal file doesn't exists.", "source": "juraj-google-style"}
{"code": "def ApplyParsersToResponses(parser_factory, responses, flow_obj):\n  \n  \n  knowledge_base = flow_obj.state.knowledge_base\n\n  parsed_responses = []\n\n  if parser_factory.HasSingleResponseParsers():\n    for response in responses:\n      for parser in parser_factory.SingleResponseParsers():\n        parsed_responses.extend(\n            parser.ParseResponse(knowledge_base, response,\n                                 flow_obj.args.path_type))\n\n  for parser in parser_factory.MultiResponseParsers():\n    parsed_responses.extend(parser.ParseResponses(knowledge_base, responses))\n\n  has_single_file_parsers = parser_factory.HasSingleFileParsers()\n  has_multi_file_parsers = parser_factory.HasMultiFileParsers()\n\n  if has_single_file_parsers or has_multi_file_parsers:\n    precondition.AssertIterableType(responses, rdf_client_fs.StatEntry)\n    pathspecs = [response.pathspec for response in responses]\n    if data_store.RelationalDBEnabled():\n      \n      \n      \n      filedescs = []\n      for pathspec in pathspecs:\n        client_path = db.ClientPath.FromPathSpec(flow_obj.client_id, pathspec)\n        filedescs.append(file_store.OpenFile(client_path))\n    else:\n      filedescs = MultiOpenAff4File(flow_obj, pathspecs)\n\n  if has_single_file_parsers:\n    for response, filedesc in zip(responses, filedescs):\n      for parser in parser_factory.SingleFileParsers():\n        parsed_responses.extend(\n            parser.ParseFile(knowledge_base, response.pathspec, filedesc))\n\n  if has_multi_file_parsers:\n    for parser in parser_factory.MultiFileParsers():\n      parsed_responses.extend(\n          parser.ParseFiles(knowledge_base, pathspecs, filedescs))\n\n  return parsed_responses or responses", "docstring": "Parse responses with applicable parsers.\n\nArgs:\nparser_factory: A parser factory for specific artifact.\nresponses: A list of responses from the client.\nflow_obj: An artifact collection flow.\n\nReturns:\nA list of (possibly parsed) responses.", "source": "juraj-google-style"}
{"code": "def ws025(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `ws025`'.format(value))\n\n        self._ws025 = value", "docstring": "Corresponds to IDD Field `ws025`\nWind speed corresponding to 2.5% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `ws025`\nUnit: m/s\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def _QueryProcessStatus(self, process):\n    process_is_alive = process.is_alive()\n    if process_is_alive:\n        rpc_client = self._rpc_clients_per_pid.get(process.pid, None)\n        process_status = rpc_client.CallFunction()\n    else:\n        process_status = None\n    return process_status", "docstring": "Queries a process to determine its status.\n\nArgs:\nprocess (MultiProcessBaseProcess): process to query for its status.\n\nReturns:\ndict[str, str]: status values received from the worker process.", "source": "codesearchnet"}
{"code": "def _FormatSocketInet128Token(self, token_data):\n    \n    protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.socket_family, 'UNKNOWN')\n    ip_address = self._FormatPackedIPv6Address(token_data.ip_addresss)\n    return {\n        'protocols': protocol,\n        'family': token_data.socket_family,\n        'port': token_data.port_number,\n        'address': ip_address}", "docstring": "Formats an Internet socket token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_sockinet64): AUT_SOCKINET128 token data.\n\nReturns:\ndict[str, str]: token values.", "source": "juraj-google-style"}
{"code": "def _default_tolerance(dtype):\n    if dtype == dtypes_lib.bfloat16.as_numpy_dtype:\n        return 0.005\n    if dtype == np.float16:\n        return 0.005\n    elif dtype in (np.float32, np.complex64):\n        return 0.001\n    elif dtype in (np.float64, np.complex128):\n        return 1e-05\n    else:\n        return None", "docstring": "Returns a sensible default tolerance for comparing results of a given type.\n\nArgs:\ndtype: A datatype.", "source": "github-repos"}
{"code": "def __init__(self, strategy, replica_id_in_sync_group):\n    self._strategy = strategy\n    self._thread_context = _InReplicaThreadMode(self)\n    if not (replica_id_in_sync_group is None or tensor_util.is_tf_type(replica_id_in_sync_group) or isinstance(replica_id_in_sync_group, int)):\n        raise ValueError('replica_id_in_sync_group can only be an integer, a Tensor or None.')\n    self._replica_id_in_sync_group = replica_id_in_sync_group\n    if strategy:\n        self._local_replica_id = strategy.extended._get_local_replica_id(replica_id_in_sync_group)\n    self._summary_recording_distribution_strategy = None", "docstring": "Creates a ReplicaContext.\n\nArgs:\nstrategy: A `tf.distribute.Strategy`.\nreplica_id_in_sync_group: An integer, a `Tensor` or None. Prefer an\ninteger whenever possible to avoid issues with nested `tf.function`. It\naccepts a `Tensor` only to be compatible with `tpu.replicate`.", "source": "github-repos"}
{"code": "def get_learning_rate(self, iter):\n    return (self.init_lr * ((math.cos((((iter * 1.0) / self.max_iter) * math.pi)) + 1.0) * 0.5))", "docstring": "Get learning rate with cosine decay based on current iteration.\n\nArgs:\niter (int): Current iteration (starting with 0).\n\nReturns:\nfloat: Learning rate", "source": "codesearchnet"}
{"code": "def after_run(self, run_context, run_values):  \n    \n    global_step = run_values.results\n\n    if self._timer.should_trigger_for_step(\n        global_step) and global_step > self._warm_steps:\n      elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(\n          global_step)\n      if elapsed_time is not None:\n        self._step_train_time += elapsed_time\n        self._total_steps += elapsed_steps\n\n        \n        \n        average_examples_per_sec = self._batch_size * (\n            self._total_steps / self._step_train_time)\n        \n        \n        current_examples_per_sec = self._batch_size * (\n            elapsed_steps / elapsed_time)\n        \n        tf.logging.info('Batch [%g]:  current exp/sec = %g, average exp/sec = '\n                        '%g', self._total_steps, current_examples_per_sec,\n                        average_examples_per_sec)", "docstring": "Called after each call to run().\n\nArgs:\nrun_context: A SessionRunContext object.\nrun_values: A SessionRunValues object.", "source": "juraj-google-style"}
{"code": "def search(self, search_phrase, limit=None):\n        \n        query_string = self._make_query_from_terms(search_phrase)\n        self._parsed_query = query_string\n        schema = self._get_generic_schema()\n\n        parser = QueryParser('doc', schema=schema)\n\n        query = parser.parse(query_string)\n\n        datasets = defaultdict(DatasetSearchResult)\n\n        \n        logger.debug('Searching datasets using `{}` query.'.format(query))\n        with self.index.searcher() as searcher:\n            results = searcher.search(query, limit=limit)\n            for hit in results:\n                vid = hit['vid']\n                datasets[vid].vid = hit['vid']\n                datasets[vid].b_score += hit.score\n\n        \n        logger.debug('Extending datasets with partitions.')\n        for partition in self.backend.partition_index.search(search_phrase):\n            datasets[partition.dataset_vid].p_score += partition.score\n            datasets[partition.dataset_vid].partitions.add(partition)\n        return list(datasets.values())", "docstring": "Finds datasets by search phrase.\n\nArgs:\nsearch_phrase (str or unicode):\nlimit (int, optional): how many results to return. None means without limit.\n\nReturns:\nlist of DatasetSearchResult instances.", "source": "juraj-google-style"}
{"code": "def get_panel_info(panel_lines=None, panel_id=None, institute=None, version=None, date=None, display_name=None):\n    panel_info = {'panel_id': panel_id, 'institute': institute, 'version': version, 'date': date, 'display_name': display_name}\n    if panel_lines:\n        for line in panel_lines:\n            line = line.rstrip()\n            if (not line.startswith('\n                break\n            info = line[2:].split('=')\n            field = info[0]\n            value = info[1]\n            if (not panel_info.get(field)):\n                panel_info[field] = value\n    panel_info['date'] = get_date(panel_info['date'])\n    return panel_info", "docstring": "Parse metadata for a gene panel\n\nFor historical reasons it is possible to include all information about a gene panel in the\nheader of a panel file. This function parses the header.\n\nArgs:\npanel_lines(iterable(str))\n\nReturns:\npanel_info(dict): Dictionary with panel information", "source": "codesearchnet"}
{"code": "def set_bfd_ip(self, name, vrid, value=None, disable=False, default=False, run=True):\n    if ((not default) and (not disable)):\n        if (not re.match('^\\\\d+\\\\.\\\\d+\\\\.\\\\d+\\\\.\\\\d+$', str(value))):\n            raise ValueError(\"vrrp property 'bfd_ip' must be a properly formatted IP address\")\n    cmd = self.command_builder(('vrrp %d bfd ip' % vrid), value=value, default=default, disable=disable)\n    if run:\n        result = self.configure_interface(name, cmd)\n        if (result is False):\n            return self.error\n        return result\n    return cmd", "docstring": "Set the bfd_ip property of the vrrp\n\nArgs:\nname (string): The interface to configure.\nvrid (integer): The vrid number for the vrrp to be managed.\nvalue (string): The bfd ip address to be set.\ndisable (boolean): Unset bfd ip if True.\ndefault (boolean): Set bfd ip to default if True.\nrun (boolean): Set to True to execute the command, False to\nreturn a string with the formatted command.\n\nReturns:\nIf run is True, returns True if the command executed successfully,\nerror if failure.\n\nIf run is False, returns the formatted command string which can\nbe passed to the node", "source": "codesearchnet"}
{"code": "def get_extrapolated_diffusivity(temps, diffusivities, new_temp):\n    \n    Ea, c, _ = fit_arrhenius(temps, diffusivities)\n    return c * np.exp(-Ea / (const.k / const.e * new_temp))", "docstring": "Returns (Arrhenius) extrapolated diffusivity at new_temp\n\nArgs:\ntemps ([float]): A sequence of temperatures. units: K\ndiffusivities ([float]): A sequence of diffusivities (e.g.,\nfrom DiffusionAnalyzer.diffusivity). units: cm^2/s\nnew_temp (float): desired temperature. units: K\n\nReturns:\n(float) Diffusivity at extrapolated temp in mS/cm.", "source": "juraj-google-style"}
{"code": "def __init__(self, dump_root, partition_graphs=None, validate=True):\n    if not gfile.IsDirectory(dump_root):\n        raise IOError('Dump root directory %s does not exist' % dump_root)\n    self._core_metadata = []\n    self._dump_root = dump_root\n    self._load_core_metadata()\n    self._load_fetches_info()\n    self._load_feeds_info()\n    self._load_all_device_dumps(partition_graphs, validate)\n    self._python_graph = None", "docstring": "`DebugDumpDir` constructor.\n\nArgs:\ndump_root: (`str`) path to the dump root directory.\npartition_graphs: A repeated field of GraphDefs representing the\npartition graphs executed by the TensorFlow runtime.\nvalidate: (`bool`) whether the dump files are to be validated against the\npartition graphs.\n\nRaises:\nIOError: If dump_root does not exist as a directory.\nValueError: If more than one core metadata file is found under the dump\nroot directory.", "source": "github-repos"}
{"code": "def db_set(table, record, column, value, if_exists=False):\n    cmd = ['ovs-vsctl']\n    if if_exists:\n        cmd += ['--if-exists']\n    cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))]\n    result = __salt__['cmd.run_all'](cmd)\n    if (result['retcode'] != 0):\n        return result['stderr']\n    else:\n        return None", "docstring": "Sets a column's value for a specific record.\n\nArgs:\ntable: A string - name of the database table.\nrecord: A string - identifier of the record.\ncolumn: A string - name of the column.\nvalue: A string - the value to be set\nif_exists: A boolean - if True, it is not an error if the record does\nnot exist.\n\nReturns:\nNone on success and an error message on failure.\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07", "source": "codesearchnet"}
{"code": "def _process_book(book_url):\n    \n    data = DOWNER.download(book_url)\n    dom = dhtmlparser.parseString(data)\n\n    details_tags = dom.find(\"div\", {\"id\": \"contentDetail\"})\n\n    assert details_tags, \"Can't find details of the book.\"\n\n    details = details_tags[0]\n\n    \n    title = _parse_title(dom, details)\n    authors = _parse_authors(details)\n    publisher = _parse_publisher(details)\n    price = _parse_price(details)\n    pages, binding = _parse_pages_binding(details)\n\n    pub = Publication(\n        title,\n        authors,\n        price,\n        publisher\n    )\n\n    \n    pub.optionals.URL = book_url\n    pub.optionals.binding = binding\n\n    pub.optionals.pages = pages\n    pub.optionals.ISBN, pub.optionals.EAN = _parse_ISBN_EAN(details)\n    pub.optionals.edition = _parse_edition(details)\n    pub.optionals.description = _parse_description(details)\n\n    return pub", "docstring": "Parse available informations about book from the book details page.\n\nArgs:\nbook_url (str): Absolute URL of the book.\n\nReturns:\nobj: :class:`structures.Publication` instance with book details.", "source": "juraj-google-style"}
{"code": "def ValidateAccessAndSubjects(requested_access, subjects):\n  \n\n  if not requested_access:\n    raise access_control.UnauthorizedAccess(\n        \"Must specify requested access type for %s\" % subjects)\n\n  for s in requested_access:\n    if s not in \"rwq\":\n      raise ValueError(\n          \"Invalid access requested for %s: %s\" % (subjects, requested_access))\n\n  if \"q\" in requested_access and \"r\" not in requested_access:\n    raise access_control.UnauthorizedAccess(\n        \"Invalid access request: query permissions require read permissions \"\n        \"for %s\" % subjects,\n        requested_access=requested_access)\n\n  return True", "docstring": "Does basic requested access validation.\n\nArgs:\nrequested_access: String consisting or 'r', 'w' and 'q' characters.\nsubjects: A list of subjects that are about to be accessed with a given\nrequested_access. Used for logging purposes only.\n\nReturns:\nTrue if requested_access is valid.\n\nRaises:\naccess_control.UnauthorizedAccess: if requested_access is not valid.\nValueError: if subjects list is empty.", "source": "juraj-google-style"}
{"code": "def create_pipeline(self, name, description, **kwargs):\n\t\t\n\t\t\n\t\tif not (name and description):\n\t\t\treturn requests.codes.bad_request, None\n\n\t\tkwargs.update({'name':name, 'description':description})\n\n\t\tnew_pl = StreakPipeline(**kwargs)\n\t\turi = '/'.join([\n\t\t\t\t\t\tself.api_uri,\n\t\t\t\t\t\tself.pipelines_suffix\n\t\t\t\t\t\t])\n\t\tcode, r_data = self._req('put', uri, new_pl.to_dict())\n\t\t\n\t\treturn code, r_data", "docstring": "Creates a pipeline with the provided attributes.\nArgs:\nname\trequired name string\nkwargs\t{name, description, orgWide, aclEntries} user\nspecifiable ones only\nreturn\t(status code, pipeline_dict) (as created)", "source": "juraj-google-style"}
{"code": "def calculate(self, token_list_x, token_list_y):\n        \n        if len(token_list_x) == 0 or len(token_list_y) == 0:\n            return 0.0\n\n        document_list = token_list_x.copy()\n        [document_list.append(v) for v in token_list_y]\n        document_list = list(set(document_list))\n\n        tfidf_vectorizer = TfidfVectorizer(document_list)\n\n        vector_list_x = tfidf_vectorizer.vectorize(token_list_x)\n        vector_list_y = tfidf_vectorizer.vectorize(token_list_y)\n        \n        if len(vector_list_x) > len(vector_list_y):\n            [vector_list_y.append(0.0) for _ in range(len(vector_list_x) - len(vector_list_y))]\n        elif len(vector_list_y) > len(vector_list_x):\n            [vector_list_x.append(0.0) for _ in range(len(vector_list_y) - len(vector_list_x))]\n\n        dot_prod = np.dot(vector_list_x, vector_list_y)\n        norm_x = np.linalg.norm(vector_list_x)\n        norm_y = np.linalg.norm(vector_list_y)\n        try:\n            result = dot_prod / (norm_x * norm_y)\n            if np.isnan(result) is True:\n                return 0.0\n            else:\n                return result\n        except ZeroDivisionError:\n            return 0.0", "docstring": "Calculate similarity with the so-called Cosine similarity of Tf-Idf vectors.\n\nConcrete method.\n\nArgs:\ntoken_list_x:    [token, token, token, ...]\ntoken_list_y:    [token, token, token, ...]\n\nReturns:\nSimilarity.", "source": "juraj-google-style"}
{"code": "def __init__(self, setup):\n    pass", "docstring": "Construct a NotebookTestCase.\n\nArgs:\nsetup: arbitrary JSON-serializable object specified by test spec", "source": "github-repos"}
{"code": "def get_inputs_outputs(signature_def):\n    inputs_tensor_info = signature_def.inputs\n    outputs_tensor_info = signature_def.outputs\n\n    def gather_names(tensor_info):\n        return [tensor_info[key].name for key in tensor_info]\n    inputs = gather_names(inputs_tensor_info)\n    outputs = gather_names(outputs_tensor_info)\n    return (inputs, outputs)", "docstring": "Get inputs and outputs from SignatureDef.\n\nArgs:\nsignature_def: SignatureDef in the meta_graph_def for conversion.\n\nReturns:\nThe inputs and outputs in the graph for conversion.", "source": "github-repos"}
{"code": "def murmur2(key):\n    \n\n    \n    if isinstance(key, bytearray) or (six.PY3 and isinstance(key, bytes)):\n        data = key\n    else:\n        data = bytearray(str(key).encode())\n\n    length = len(data)\n    seed = 0x9747b28c\n    \n    \n    m = 0x5bd1e995\n    r = 24\n\n    \n    h = seed ^ length\n    length4 = length \n\n    for i in range(length4):\n        i4 = i * 4\n        k = ((data[i4 + 0] & 0xff) +\n             ((data[i4 + 1] & 0xff) << 8) +\n             ((data[i4 + 2] & 0xff) << 16) +\n             ((data[i4 + 3] & 0xff) << 24))\n        k &= 0xffffffff\n        k *= m\n        k &= 0xffffffff\n        k ^= (k % 0x100000000) >> r  \n        k &= 0xffffffff\n        k *= m\n        k &= 0xffffffff\n\n        h *= m\n        h &= 0xffffffff\n        h ^= k\n        h &= 0xffffffff\n\n    \n    extra_bytes = length % 4\n    if extra_bytes >= 3:\n        h ^= (data[(length & ~3) + 2] & 0xff) << 16\n        h &= 0xffffffff\n    if extra_bytes >= 2:\n        h ^= (data[(length & ~3) + 1] & 0xff) << 8\n        h &= 0xffffffff\n    if extra_bytes >= 1:\n        h ^= (data[length & ~3] & 0xff)\n        h &= 0xffffffff\n        h *= m\n        h &= 0xffffffff\n\n    h ^= (h % 0x100000000) >> 13  \n    h &= 0xffffffff\n    h *= m\n    h &= 0xffffffff\n    h ^= (h % 0x100000000) >> 15  \n    h &= 0xffffffff\n\n    return h", "docstring": "Pure-python Murmur2 implementation.\n\nBased on java client, see org.apache.kafka.common.utils.Utils.murmur2\n\nArgs:\nkey: if not a bytes type, encoded using default encoding\n\nReturns: MurmurHash2 of key bytearray", "source": "juraj-google-style"}
{"code": "def coords2px(y, x):\n    \n    rows = np.rint([y[0], y[0], y[2], y[2]]).astype(int)\n    cols = np.rint([y[1], y[3], y[1], y[3]]).astype(int)\n    r,c,*_ = x.shape\n    Y = np.zeros((r, c))\n    Y[rows, cols] = 1\n    return Y", "docstring": "Transforming coordinates to pixels.\n\nArguments:\ny : np array\nvector in which (y[0], y[1]) and (y[2], y[3]) are the\nthe corners of a bounding box.\nx : image\nan image\nReturns:\nY : image\nof shape x.shape", "source": "juraj-google-style"}
{"code": "def __init__(self, spin_mode=\"polarized\", smearing=\"fermi_dirac:0.1 eV\",\n                 algorithm=None, nband=None, fband=None, charge=0.0, comment=None):  \n        \n        super().__init__()\n\n        self.comment = comment\n        self.smearing = Smearing.as_smearing(smearing)\n        self.spin_mode = SpinMode.as_spinmode(spin_mode)\n        self.nband = nband\n        self.fband = fband\n        self.charge = charge\n        self.algorithm = algorithm", "docstring": "Constructor for Electrons object.\n\nArgs:\ncomment: String comment for Electrons\ncharge: Total charge of the system. Default is 0.", "source": "juraj-google-style"}
{"code": "def append(self, future):\n    future.prev = self.tail\n    if (self.tail is None):\n        assert (self.head is None)\n        self.head = future\n    else:\n        self.tail.next = future\n    self.tail = future\n    future.add_done_callback(self.remove)", "docstring": "Append an object to the linked list.\n\nArgs:\nfuture (PlasmaObjectFuture): A PlasmaObjectFuture instance.", "source": "codesearchnet"}
{"code": "def __init__(self, graph, control_inputs) -> None:\n    self._graph = graph\n    if control_inputs is None:\n        self._control_inputs_val = []\n        self._new_stack = True\n    else:\n        self._control_inputs_val = control_inputs\n        self._new_stack = False\n    self._seen_nodes = set()\n    self._old_stack = None\n    self._old_control_flow_context = None", "docstring": "Create a new `_ControlDependenciesController`.\n\nA `_ControlDependenciesController` is the context manager for\n`with tf.control_dependencies()` blocks.  These normally nest,\nas described in the documentation for `control_dependencies()`.\n\nThe `control_inputs` argument list control dependencies that must be\nadded to the current set of control dependencies.  Because of\nuniquification the set can be empty even if the caller passed a list of\nops.  The special value `None` indicates that we want to start a new\nempty set of control dependencies instead of extending the current set.\n\nIn that case we also clear the current control flow context, which is an\nadditional mechanism to add control dependencies.\n\nArgs:\ngraph: The graph that this controller is managing.\ncontrol_inputs: List of ops to use as control inputs in addition to the\ncurrent control dependencies.  None to indicate that the dependencies\nshould be cleared.", "source": "github-repos"}
{"code": "def direct_normal_radiation(self, value=9999.0):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `direct_normal_radiation`'.format(value))\n            if value < 0.0:\n                raise ValueError('value need to be greater or equal 0.0 '\n                                 'for field `direct_normal_radiation`')\n\n        self._direct_normal_radiation = value", "docstring": "Corresponds to IDD Field `direct_normal_radiation`\n\nArgs:\nvalue (float): value for IDD Field `direct_normal_radiation`\nUnit: Wh/m2\nvalue >= 0.0\nMissing value: 9999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def ParseOptions(cls, options, analysis_plugin):\n    \n    if not isinstance(analysis_plugin, sessionize.SessionizeAnalysisPlugin):\n      raise errors.BadConfigObject(\n          'Analysis plugin is not an instance of SessionizeAnalysisPlugin')\n\n    maximum_pause = cls._ParseNumericOption(\n        options, 'sessionize_maximumpause', default_value=10)\n\n    if maximum_pause <= 0:\n      raise errors.BadConfigOption(\n          'Maximum pause value {0:d} is not supported. '\n          'Value must be greater than 0.'.format(maximum_pause))\n    analysis_plugin.SetMaximumPause(maximum_pause)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nanalysis_plugin (OutputModule): analysis_plugin to configure.\n\nRaises:\nBadConfigObject: when the output module object is of the wrong type.\nBadConfigOption: when a configuration parameter fails validation.", "source": "juraj-google-style"}
{"code": "def _WriteFile(output_path, name, content):\n  \n  path = os.path.join(output_path, name)\n  with open(path, 'wb') as f:\n    f.write(content)\n  return path", "docstring": "Write given content to a file in a given directory.\n\nArgs:\noutput_path: The directory to store the file in.\nname: The name of the file to store the content in.\ncontent: The content to write to the file.close\n\nReturns:\nThe full path to the written file.", "source": "juraj-google-style"}
{"code": "def get_absolute_name(package, relative_name):\n    \n    path = package.split('.') if package else []\n    name = relative_name.lstrip('.')\n    ndots = len(relative_name) - len(name)\n    if ndots > len(path):\n        return relative_name\n    absolute_path = path[:len(path) + 1 - ndots]\n    if name:\n        absolute_path.append(name)\n    return '.'.join(absolute_path)", "docstring": "Joins a package name and a relative name.\n\nArgs:\npackage: A dotted name, e.g. foo.bar.baz\nrelative_name: A dotted name with possibly some leading dots, e.g. ..x.y\n\nReturns:\nThe relative name appended to the parent's package, after going up one\nlevel for each leading dot.\ne.g. foo.bar.baz + ..hello.world -> foo.hello.world\nThe unchanged relative_name if it does not start with a dot\nor has too many leading dots.", "source": "juraj-google-style"}
{"code": "def how_vulnerable(chain, blackbox_mapping, sanitiser_nodes, potential_sanitiser, blackbox_assignments, interactive, vuln_deets):\n    for (i, current_node) in enumerate(chain):\n        if (current_node in sanitiser_nodes):\n            vuln_deets['sanitiser'] = current_node\n            vuln_deets['confident'] = True\n            return (VulnerabilityType.SANITISED, interactive)\n        if isinstance(current_node, BBorBInode):\n            if (current_node.func_name in blackbox_mapping['propagates']):\n                continue\n            elif (current_node.func_name in blackbox_mapping['does_not_propagate']):\n                return (VulnerabilityType.FALSE, interactive)\n            elif interactive:\n                user_says = input('Is the return value of {} with tainted argument \"{}\" vulnerable? ([Y]es/[N]o/[S]top asking)'.format(current_node.label, chain[(i - 1)].left_hand_side)).lower()\n                if user_says.startswith('s'):\n                    interactive = False\n                    vuln_deets['unknown_assignment'] = current_node\n                    return (VulnerabilityType.UNKNOWN, interactive)\n                if user_says.startswith('n'):\n                    blackbox_mapping['does_not_propagate'].append(current_node.func_name)\n                    return (VulnerabilityType.FALSE, interactive)\n                blackbox_mapping['propagates'].append(current_node.func_name)\n            else:\n                vuln_deets['unknown_assignment'] = current_node\n                return (VulnerabilityType.UNKNOWN, interactive)\n    if potential_sanitiser:\n        vuln_deets['sanitiser'] = potential_sanitiser\n        vuln_deets['confident'] = False\n        return (VulnerabilityType.SANITISED, interactive)\n    return (VulnerabilityType.TRUE, interactive)", "docstring": "Iterates through the chain of nodes and checks the blackbox nodes against the blackbox mapping and sanitiser dictionary.\n\nNote: potential_sanitiser is the only hack here, it is because we do not take p-use's into account yet.\ne.g. we can only say potentially instead of definitely sanitised in the path_traversal_sanitised_2.py test.\n\nArgs:\nchain(list(Node)): A path of nodes between source and sink.\nblackbox_mapping(dict): A map of blackbox functions containing whether or not they propagate taint.\nsanitiser_nodes(set): A set of nodes that are sanitisers for the sink.\npotential_sanitiser(Node): An if or elif node that can potentially cause sanitisation.\nblackbox_assignments(set[AssignmentNode]): set of blackbox assignments, includes the ReturnNode's of BBorBInode's.\ninteractive(bool): determines if we ask the user about blackbox functions not in the mapping file.\nvuln_deets(dict): vulnerability details.\n\nReturns:\nA VulnerabilityType depending on how vulnerable the chain is.", "source": "codesearchnet"}
{"code": "def resume_training(self, train_data, model_path, valid_data=None):\n        \n        restore_state = self.checkpointer.restore(model_path)\n        loss_fn = self._get_loss_fn()\n        self.train()\n        self._train_model(\n            train_data=train_data,\n            loss_fn=loss_fn,\n            valid_data=valid_data,\n            restore_state=restore_state,\n        )", "docstring": "This model resume training of a classifier by reloading the appropriate state_dicts for each model\n\nArgs:\ntrain_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of\nX (data) and Y (labels) for the train split\nmodel_path: the path to the saved checpoint for resuming training\nvalid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of\nX (data) and Y (labels) for the dev split", "source": "juraj-google-style"}
{"code": "def capture_image(self, device_label):\n    response = None\n    try:\n        response = requests.post(urls.imagecapture(self._giid, device_label), headers={'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)})\n    except requests.exceptions.RequestException as ex:\n        raise RequestError(ex)\n    _validate_response(response)", "docstring": "Capture smartcam image\n\nArgs:\ndevice_label (str): device label of camera", "source": "codesearchnet"}
{"code": "def spawn_reader_writer(get_data_fn, put_data_fn):\n    \n    def _reader_thread():\n        while True:\n            out = get_data_fn()\n            put_data_fn(out)\n            if not out:\n                \n                \n                \n                break\n\n    t = threading.Thread(target=_reader_thread)\n    t.daemon = True\n    t.start()\n    return t", "docstring": "Spawn a thread that reads from a data source and writes to a sink.\n\nThe thread will terminate if it receives a Falsey value from the source.\n\nArgs:\nget_data_fn: Data-reading function. Called repeatedly until it returns\nFalse-y to indicate that the thread should terminate.\nput_data_fn: Data-writing function.\nReturns: threading.Thread", "source": "juraj-google-style"}
{"code": "def wmo(self, value=None):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type str '\n                                 'for field `wmo`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `wmo`')\n\n        self._wmo = value", "docstring": "Corresponds to IDD Field `wmo` usually a 6 digit field. Used as\nalpha in EnergyPlus.\n\nArgs:\nvalue (str): value for IDD Field `wmo`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def match_shortname(self, name, filled_args=None):\n    filled_count = 0\n    if (filled_args is not None):\n        filled_count = len(filled_args)\n    possible = [x for x in self.arg_names[filled_count:] if x.startswith(name)]\n    if (len(possible) == 0):\n        raise ArgumentError('Could not convert short-name full parameter name, none could be found', short_name=name, parameters=self.arg_names)\n    elif (len(possible) > 1):\n        raise ArgumentError('Short-name is ambiguous, could match multiple keyword parameters', short_name=name, possible_matches=possible)\n    return possible[0]", "docstring": "Try to convert a prefix into a parameter name.\n\nIf the result could be ambiguous or there is no matching\nparameter, throw an ArgumentError\n\nArgs:\nname (str): A prefix for a parameter name\nfilled_args (list): A list of filled positional arguments that will be\nremoved from consideration.\n\nReturns:\nstr: The full matching parameter name", "source": "codesearchnet"}
{"code": "def emboss_pepstats_on_fasta(infile, outfile='', outdir='', outext='.pepstats', force_rerun=False):\n    outfile = ssbio.utils.outfile_maker(inname=infile, outname=outfile, outdir=outdir, outext=outext)\n    program = 'pepstats'\n    pepstats_args = '-sequence=\"{}\" -outfile=\"{}\"'.format(infile, outfile)\n    cmd_string = '{} {}'.format(program, pepstats_args)\n    ssbio.utils.command_runner(cmd_string, force_rerun_flag=force_rerun, outfile_checker=outfile, silent=True)\n    return outfile", "docstring": "Run EMBOSS pepstats on a FASTA file.\n\nArgs:\ninfile: Path to FASTA file\noutfile: Name of output file without extension\noutdir: Path to output directory\noutext: Extension of results file, default is \".pepstats\"\nforce_rerun: Flag to rerun pepstats\n\nReturns:\nstr: Path to output file.", "source": "codesearchnet"}
{"code": "def sym_has(self, path: Union[utils.KeyPath, str, int]) -> bool:\n    return utils.KeyPath.from_value(path).exists(self)", "docstring": "Returns True if a path exists in the sub-tree.\n\nArgs:\npath: A KeyPath object or equivalence.\n\nReturns:\nTrue if the path exists in current sub-tree, otherwise False.", "source": "github-repos"}
{"code": "def preprocess_dataset(ingested_dataset_path: str, preprocessed_dataset_path: str, base_artifact_path: str, gcp_project_id: str, region: str, dataflow_staging_root: str, beam_runner: str):\n    timestamp = time.time()\n    target_path = f'{base_artifact_path}/preprocessing/preprocessed_dataset_{timestamp}'\n    Path(preprocessed_dataset_path).parent.mkdir(parents=True, exist_ok=True)\n    with open(preprocessed_dataset_path, 'w') as f:\n        f.write(target_path)\n    pipeline_options = PipelineOptions(runner=beam_runner, project=gcp_project_id, job_name=f'preprocessing-{int(time.time())}', temp_location=dataflow_staging_root, region=region, requirements_file='/requirements.txt', save_main_session=True)\n    with beam.Pipeline(options=pipeline_options) as pipeline:\n        pipeline | 'Read input jsonlines file' >> beam.io.ReadFromText(ingested_dataset_path) | 'Load json' >> beam.Map(json.loads) | 'Filter licenses' >> beam.Filter(valid_license) | 'Download image from URL' >> beam.FlatMap(download_image_from_url) | 'Resize image' >> beam.Map(resize_image, size=IMAGE_SIZE) | 'Clean Text' >> beam.Map(clean_text) | 'Serialize Example' >> beam.Map(serialize_example) | 'Write to Avro files' >> beam.io.WriteToAvro(file_path_prefix=target_path, schema={'namespace': 'preprocessing.example', 'type': 'record', 'name': 'Sample', 'fields': [{'name': 'id', 'type': 'int'}, {'name': 'caption', 'type': 'string'}, {'name': 'image', 'type': 'bytes'}]}, file_name_suffix='.avro')", "docstring": "Preprocess the ingested raw dataset and write the result to avro format.\n\nArgs:\ningested_dataset_path (str): Path to the ingested dataset\npreprocessed_dataset_path (str): Path to where the preprocessed dataset will be saved\nbase_artifact_path (str): path to the base directory of where artifacts can be stored for\nthis component.\ngcp_project_id (str): ID for the google cloud project to deploy the pipeline to.\nregion (str): Region in which to deploy the pipeline.\ndataflow_staging_root (str): Path to staging directory for the dataflow runner.\nbeam_runner (str): Beam runner: DataflowRunner or DirectRunner.", "source": "github-repos"}
{"code": "def valid_scrabble_word(word):\n    letters_in_bag = {'a': 9, 'b': 2, 'c': 2, 'd': 4, 'e': 12, 'f': 2, 'g': 3, 'h': 2, 'i': 9, 'j': 1, 'k': 1, 'l': 4, 'm': 2, 'n': 6, 'o': 8, 'p': 2, 'q': 1, 'r': 6, 's': 4, 't': 6, 'u': 4, 'v': 2, 'w': 2, 'x': 1, 'y': 2, 'z': 1, '_': 2}\n    for letter in word:\n        if (letter == '?'):\n            continue\n        try:\n            letters_in_bag[letter] -= 1\n        except KeyError:\n            return False\n        if (letters_in_bag[letter] < 0):\n            letters_in_bag['_'] -= 1\n            if (letters_in_bag['_'] < 0):\n                return False\n    return True", "docstring": "Checks if the input word could be played with a full bag of tiles.\n\nReturns:\nTrue or false", "source": "codesearchnet"}
{"code": "def assignment_propagation(node):\n    n_reads = read_counts(node)\n    to_remove = []\n    for succ in gast.walk(node):\n        if (isinstance(succ, gast.Assign) and isinstance(succ.value, gast.Name) and (len(succ.targets) == 1) and isinstance(succ.targets[0], gast.Name)):\n            rhs_name = succ.value.id\n            rhs_defs = [def_[1] for def_ in anno.getanno(succ, 'definitions_in') if (def_[0] == rhs_name)]\n            if ((len(rhs_defs) == 1) and isinstance(rhs_defs[0], gast.Assign) and (n_reads[rhs_defs[0]] == 1) and isinstance(rhs_defs[0].value, gast.Name) and isinstance(rhs_defs[0].targets[0], gast.Name)):\n                to_remove.append(rhs_defs[0])\n                succ.value = rhs_defs[0].value\n    transformers.Remove(to_remove).visit(node)\n    anno.clearanno(node)\n    return node", "docstring": "Perform assignment propagation.\n\nAssignment propagation is not a compiler optimization as much as a\nreadability optimization. If a variable name is used only once, it gets\nrenamed when possible e.g. `y = x; z = y` will become `z = x`.\n\nArgs:\nnode: The AST to optimize.\n\nReturns:\nThe optimized AST.", "source": "codesearchnet"}
{"code": "def run_inference(self, batch: Sequence[torch.Tensor], model: torch.nn.Module, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n    inference_args = {} if not inference_args else inference_args\n    model_id = self._state_dict_path if not self._torch_script_model_path else self._torch_script_model_path\n    return self._inference_fn(batch, model, self._device, inference_args, model_id)", "docstring": "Runs inferences on a batch of Tensors and returns an Iterable of\nTensor Predictions.\n\nThis method stacks the list of Tensors in a vectorized format to optimize\nthe inference call.\n\nArgs:\nbatch: A sequence of Tensors. These Tensors should be batchable, as this\nmethod will call `torch.stack()` and pass in batched Tensors with\ndimensions (batch_size, n_features, etc.) into the model's forward()\nfunction.\nmodel: A PyTorch model.\ninference_args: Non-batchable arguments required as inputs to the model's\nforward() function. Unlike Tensors in `batch`, these parameters will\nnot be dynamically batched\n\nReturns:\nAn Iterable of type PredictionResult.", "source": "github-repos"}
{"code": "def with_rank_at_least(self, rank):\n    if self.rank is not None and self.rank < rank:\n        raise ValueError('Shape %s must have rank at least %d' % (self, rank))\n    else:\n        return self", "docstring": "Returns a shape based on `self` with at least the given rank.\n\nArgs:\nrank: An integer.\n\nReturns:\nA shape that is at least as specific as `self` with at least the given\nrank.\n\nRaises:\nValueError: If `self` does not represent a shape with at least the given\n`rank`.", "source": "github-repos"}
{"code": "def vector(p1, p2):\n    return np.subtract(p1[COLS.XYZ], p2[COLS.XYZ])", "docstring": "compute vector between two 3D points\n\nArgs:\np1, p2: indexable objects with\nindices 0, 1, 2 corresponding to 3D cartesian coordinates.\n\nReturns:\n3-vector from p1 - p2", "source": "codesearchnet"}
{"code": "def extractTimes(self, inp):\n        \n        def handleMatch(time):\n            relative = False\n\n            if not time:\n                return None\n\n            \n            elif time.group(1) == 'morning':\n                h = 8\n                m = 0\n            elif time.group(1) == 'afternoon':\n                h = 12\n                m = 0\n            elif time.group(1) == 'evening':\n                h = 19\n                m = 0\n            elif time.group(4) and time.group(5):\n                h, m = 0, 0\n\n                \n                converter = NumberService()\n                try:\n                    diff = converter.parse(time.group(4))\n                except:\n                    return None\n\n                if time.group(5) == 'hours':\n                    h += diff\n                else:\n                    m += diff\n\n                \n                if time.group(6):\n                    converter = NumberService()\n                    try:\n                        diff = converter.parse(time.group(7))\n                    except:\n                        return None\n\n                    if time.group(8) == 'hours':\n                        h += diff\n                    else:\n                        m += diff\n\n                relative = True\n            else:\n                \n                t = time.group(2)\n                h, m = int(t.split(':')[0]) % 12, int(t.split(':')[1])\n\n                try:\n                    if time.group(3) == 'pm':\n                        h += 12\n                except IndexError:\n                    pass\n\n            if relative:\n                return self.now + datetime.timedelta(hours=h, minutes=m)\n            else:\n                return datetime.datetime(\n                    self.now.year, self.now.month, self.now.day, h, m\n                )\n\n        inp = self._preprocess(inp)\n        return [handleMatch(time) for time in self._timeRegex.finditer(inp)]", "docstring": "Extracts time-related information from an input string.\nIgnores any information related to the specific date, focusing\non the time-of-day.\n\nArgs:\ninp (str): Input string to be parsed.\n\nReturns:\nA list of datetime objects containing the extracted times from the\ninput snippet, or an empty list if none found.", "source": "juraj-google-style"}
{"code": "def __init__(self, shape, min_value, max_value, alpha=0.0, beta=0.0, scope='beta', summary_labels=()):\n        \n        assert min_value is None or max_value > min_value\n        self.shape = shape\n        self.min_value = min_value\n        self.max_value = max_value\n        action_size = util.prod(self.shape)\n\n        self.alpha = Linear(size=action_size, bias=alpha, scope='alpha', summary_labels=summary_labels)\n        self.beta = Linear(size=action_size, bias=beta, scope='beta', summary_labels=summary_labels)\n\n        super(Beta, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels)", "docstring": "Beta distribution.\n\nArgs:\nshape: Action shape.\nmin_value: Minimum value of continuous actions.\nmax_value: Maximum value of continuous actions.\nalpha: Optional distribution bias for the alpha value.\nbeta: Optional distribution bias for the beta value.", "source": "juraj-google-style"}
{"code": "def ch_start_time(self, *channels: List[Channel]) -> int:\n    return self.timeslots.ch_start_time(*channels)", "docstring": "Return minimum start time for supplied channels.\n\nArgs:\n*channels: Supplied channels", "source": "codesearchnet"}
{"code": "def _get_addresses(tx):\n    from_address = set([vin['address'] for vin in tx['vins']])\n    if (len(from_address) != 1):\n        raise InvalidTransactionError('Transaction should have inputs from only one address {}'.format(from_address))\n    vouts = sorted(tx['vouts'], key=(lambda d: d['n']))[:(- 1)]\n    piece_address = vouts[0]['address']\n    to_address = vouts[(- 1)]['address']\n    from_address = from_address.pop()\n    return (from_address, to_address, piece_address)", "docstring": "Checks for the from, to, and piece address of a SPOOL transaction.\n\nArgs:\ntx (dict): Transaction payload, as returned by\n:meth:`transactions.Transactions.get()`.\n\n.. note:: Formats as returned by JSON-RPC API\n``decoderawtransaction`` have yet to be supported.\n\nReturns:\nTuple([str]): Sender, receiver, and piece addresses.", "source": "codesearchnet"}
{"code": "def decode_spans(start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int, undesired_tokens: np.ndarray) -> Tuple:\n    if start.ndim == 1:\n        start = start[None]\n    if end.ndim == 1:\n        end = end[None]\n    outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1))\n    candidates = np.tril(np.triu(outer), max_answer_len - 1)\n    scores_flat = candidates.flatten()\n    if topk == 1:\n        idx_sort = [np.argmax(scores_flat)]\n    elif len(scores_flat) < topk:\n        idx_sort = np.argsort(-scores_flat)\n    else:\n        idx = np.argpartition(-scores_flat, topk)[0:topk]\n        idx_sort = idx[np.argsort(-scores_flat[idx])]\n    starts, ends = np.unravel_index(idx_sort, candidates.shape)[1:]\n    desired_spans = np.isin(starts, undesired_tokens.nonzero()) & np.isin(ends, undesired_tokens.nonzero())\n    starts = starts[desired_spans]\n    ends = ends[desired_spans]\n    scores = candidates[0, starts, ends]\n    return (starts, ends, scores)", "docstring": "Take the output of any `ModelForQuestionAnswering` and will generate probabilities for each span to be the actual\nanswer.\n\nIn addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or\nanswer end position being before the starting position. The method supports output the k-best answer through the\ntopk argument.\n\nArgs:\nstart (`np.ndarray`): Individual start probabilities for each token.\nend (`np.ndarray`): Individual end probabilities for each token.\ntopk (`int`): Indicates how many possible answer span(s) to extract from the model output.\nmax_answer_len (`int`): Maximum size of the answer to extract from the model's output.\nundesired_tokens (`np.ndarray`): Mask determining tokens that can be part of the answer", "source": "github-repos"}
{"code": "def tas50(msg):\n    \n    d = hex2bin(data(msg))\n\n    if d[45] == '0':\n        return None\n\n    tas = bin2int(d[46:56]) * 2   \n    return tas", "docstring": "Aircraft true airspeed, BDS 5,0 message\n\nArgs:\nmsg (String): 28 bytes hexadecimal message (BDS50) string\n\nReturns:\nint: true airspeed in knots", "source": "juraj-google-style"}
{"code": "def fetch_mim_files(api_key, mim2genes=False, mimtitles=False, morbidmap=False, genemap2=False):\n    \n\n    LOG.info(\"Fetching OMIM files from https:\n    mim2genes_url =  'https:\n    mimtitles_url= 'https:\n    morbidmap_url = 'https:\n    genemap2_url =  'https:\n        \n    mim_files = {}\n    mim_urls = {}\n    \n    if mim2genes is True:\n        mim_urls['mim2genes'] = mim2genes_url\n    if mimtitles is True:\n        mim_urls['mimtitles'] = mimtitles_url\n    if morbidmap is True:\n        mim_urls['morbidmap'] = morbidmap_url\n    if genemap2 is True:\n        mim_urls['genemap2'] = genemap2_url\n\n    for file_name in mim_urls:\n        url = mim_urls[file_name]\n        mim_files[file_name] = fetch_resource(url)\n\n    return mim_files", "docstring": "Fetch the necessary mim files using a api key\n\nArgs:\napi_key(str): A api key necessary to fetch mim data\n\nReturns:\nmim_files(dict): A dictionary with the neccesary files", "source": "juraj-google-style"}
{"code": "def get_varname_from_locals(val, locals_, default='varname-not-found',\n                            strict=False, cmpfunc_=operator.is_):\n    \n    if val is None or isinstance(val, (int, float, bool)):\n        \n        return default\n    try:\n        for count, val_ in enumerate(six.itervalues(locals_)):\n            if cmpfunc_(val, val_):\n                index_ = count\n        varname = six.text_type(list(locals_.keys())[index_])\n    except NameError:\n        varname = default\n        if strict:\n            raise\n    return varname", "docstring": "Finds the string name which has where locals_[name] is val\n\nCheck the varname is in the parent namespace\nThis will only work with objects not primatives\n\nArgs:\nval (): some value\nlocals_ (dict): local dictionary to search\ndefault (str):\nstrict (bool):\n\nReturns:\nstr: the varname which is Val (if it exists)", "source": "juraj-google-style"}
{"code": "def ensure_crossplat_path(path, winroot='C:'):\n    r\n    cplat_path = path.replace('\\\\', '/')\n    if cplat_path == winroot:\n        cplat_path += '/'\n    return cplat_path", "docstring": "r\"\"\"\nensure_crossplat_path\n\nArgs:\npath (str):\n\nReturns:\nstr: crossplat_path\n\nExample(DOCTEST):\n>>> # ENABLE_DOCTEST\n>>> from utool.util_path import *  # NOQA\n>>> path = r'C:\\somedir'\n>>> cplat_path = ensure_crossplat_path(path)\n>>> result = cplat_path\n>>> print(result)\nC:/somedir", "source": "juraj-google-style"}
{"code": "def clone_with_copy(src_path, dest_path):\n    log.info('Cloning directory tree %s to %s', src_path, dest_path)\n    shutil.copytree(src_path, dest_path)", "docstring": "Clone a directory try by copying it.\n\nArgs:\nsrc_path: The directory to be copied.\ndest_path: The location to copy the directory to.", "source": "codesearchnet"}
{"code": "def _get_version(self, root):\n    version = self.get_version(root)\n    if version:\n        return StrictVersion(version)\n    raise UnknownVersionError('Unable to determine the version of the input document. No version information found on the root element.')", "docstring": "Return the version of the root element passed in.\n\nArgs:\nroot (etree.Element)\n\nReturns:\ndistutils.StrictVersion\n\nRaises:\nUnknownVersionError", "source": "codesearchnet"}
{"code": "def class_logit(layer, label):\n  \n  def inner(T):\n    if isinstance(label, int):\n      class_n = label\n    else:\n      class_n = T(\"labels\").index(label)\n    logits = T(layer)\n    logit = tf.reduce_sum(logits[:, class_n])\n    return logit\n  return inner", "docstring": "Like channel, but for softmax layers.\n\nArgs:\nlayer: A layer name string.\nlabel: Either a string (refering to a label in model.labels) or an int\nlabel position.\n\nReturns:\nObjective maximizing a logit.", "source": "juraj-google-style"}
{"code": "def version():\n    cmd = ['dot', '-V']\n    (out, _) = run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n    info = out.decode('ascii')\n    ma = re.search('graphviz version (\\\\d+\\\\.\\\\d+(?:\\\\.\\\\d+)?) ', info)\n    if (ma is None):\n        raise RuntimeError\n    return tuple((int(d) for d in ma.group(1).split('.')))", "docstring": "Return the version number tuple from the ``stderr`` output of ``dot -V``.\n\nReturns:\nTwo or three ``int`` version ``tuple``.\nRaises:\ngraphviz.ExecutableNotFound: If the Graphviz executable is not found.\nsubprocess.CalledProcessError: If the exit status is non-zero.\nRuntimmeError: If the output cannot be parsed into a version number.", "source": "codesearchnet"}
{"code": "def U(data, bits=None, endian=None, target=None):\n    return globals()[('U%d' % _get_bits(bits, target))](data, endian=endian, target=target)", "docstring": "Unpack an unsigned pointer for a given target.\n\nArgs:\ndata(bytes): The data to unpack.\nbits(:class:`pwnypack.target.Target.Bits`): Override the default\nword size. If ``None`` it will look at the word size of\n``target``.\nendian(:class:`~pwnypack.target.Target.Endian`): Override the default\nbyte order. If ``None``, it will look at the byte order of\nthe ``target`` argument.\ntarget(:class:`~pwnypack.target.Target`): Override the default byte\norder. If ``None``, it will look at the byte order of\nthe global :data:`~pwnypack.target.target`.\n\nReturns:\nint: The pointer value.", "source": "codesearchnet"}
{"code": "def logical_name(self):\n    pchar = self._libinput.libinput_seat_get_logical_name(self._handle)\n    return string_at(pchar).decode()", "docstring": "The logical name of the seat.\n\nThis is an identifier to group sets of devices within the compositor.\n\nReturns:\nstr: The logical name of this seat.", "source": "codesearchnet"}
{"code": "def _ip_string_from_prefix(self, prefixlen=None):\n        \n        if not prefixlen:\n            prefixlen = self._prefixlen\n        return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen))", "docstring": "Turn a prefix length into a dotted decimal string.\n\nArgs:\nprefixlen: An integer, the netmask prefix length.\n\nReturns:\nA string, the dotted decimal netmask string.", "source": "juraj-google-style"}
{"code": "def fill(self, name_or_slot, value):\n    \n    if isinstance(name_or_slot, basestring):\n      slot = getattr(self.outputs, name_or_slot)\n    elif isinstance(name_or_slot, Slot):\n      slot = name_or_slot\n    else:\n      raise UnexpectedPipelineError(\n          'Could not fill invalid output name: %r' % name_or_slot)\n\n    if not slot._exists:\n      raise SlotNotDeclaredError(\n          'Cannot fill output with name \"%s\" that was just '\n          'declared within the Pipeline context.' % slot.name)\n\n    self._context.fill_slot(self._pipeline_key, slot, value)", "docstring": "Fills an output slot required by this Pipeline.\n\nArgs:\nname_or_slot: The name of the slot (a string) or Slot record to fill.\nvalue: The serializable value to assign to this slot.\n\nRaises:\nUnexpectedPipelineError if the Slot no longer exists. SlotNotDeclaredError\nif trying to output to a slot that was not declared ahead of time.", "source": "juraj-google-style"}
{"code": "def validate_detector(self, detector):\n        \n        resp = self._post(self._u(self._DETECTOR_ENDPOINT_SUFFIX, 'validate'),\n                          data=detector)\n        resp.raise_for_status()", "docstring": "Validate a detector.\n\nValidates the given detector; throws a 400 Bad Request HTTP error if\nthe detector is invalid; otherwise doesn't return or throw anything.\n\nArgs:\ndetector (object): the detector model object. Will be serialized as\nJSON.", "source": "juraj-google-style"}
{"code": "def GetRawKeyFunction():\n    for get_raw_key_function in (_GetRawKeyFunctionPosix, _GetRawKeyFunctionWindows):\n        try:\n            return get_raw_key_function()\n        except:\n            pass\n    return lambda: None", "docstring": "Returns a function that reads one keypress from stdin with no echo.\n\nReturns:\nA function that reads one keypress from stdin with no echo or a function\nthat always returns None if stdin does not support it.", "source": "github-repos"}
{"code": "def get_mapping(version=1, exported_at=None, app_name=None):\n    if (exported_at is None):\n        exported_at = timezone.now()\n    app_name = (app_name or settings.HEROKU_CONNECT_APP_NAME)\n    return {'version': version, 'connection': {'organization_id': settings.HEROKU_CONNECT_ORGANIZATION_ID, 'app_name': app_name, 'exported_at': exported_at.isoformat()}, 'mappings': [model.get_heroku_connect_mapping() for model in get_heroku_connect_models()]}", "docstring": "Return Heroku Connect mapping for the entire project.\n\nArgs:\nversion (int): Version of the Heroku Connect mapping, default: ``1``.\nexported_at (datetime.datetime): Time the export was created, default is ``now()``.\napp_name (str): Name of Heroku application associated with Heroku Connect the add-on.\n\nReturns:\ndict: Heroku Connect mapping.\n\nNote:\nThe version does not need to be incremented. Exports from the Heroku Connect\nwebsite will always have the version number ``1``.", "source": "codesearchnet"}
{"code": "def rmtree(self, exclude_wildcard=\"\"):\n        \n        if not exclude_wildcard:\n            shutil.rmtree(self.workdir)\n\n        else:\n            w = WildCard(exclude_wildcard)\n            for dirpath, dirnames, filenames in os.walk(self.workdir):\n                for fname in filenames:\n                    path = os.path.join(dirpath, fname)\n                    if not w.match(fname):\n                        os.remove(path)", "docstring": "Remove all files and directories in the working directory\n\nArgs:\nexclude_wildcard: Optional string with regular expressions separated by `|`.\nFiles matching one of the regular expressions will be preserved.\nexample: exclude_wildard=\"*.nc|*.txt\" preserves all the files\nwhose extension is in [\"nc\", \"txt\"].", "source": "juraj-google-style"}
{"code": "def get_min_eig_vec_proxy(self, use_tf_eig=False):\n    if use_tf_eig:\n        return tf.cond((self.smooth_placeholder < 1e-08), self.tf_min_eig_vec, self.tf_smooth_eig_vec)\n    min_eigen_tf = autograph.to_graph(utils.minimum_eigen_vector)\n\n    def _vector_prod_fn(x):\n        return self.dual_object.get_psd_product(x)\n    estimated_eigen_vector = min_eigen_tf(x=self.eig_init_vec_placeholder, num_steps=self.eig_num_iter_placeholder, learning_rate=self.params['eig_learning_rate'], vector_prod_fn=_vector_prod_fn)\n    return estimated_eigen_vector", "docstring": "Computes the min eigen value and corresponding vector of matrix M.\n\nArgs:\nuse_tf_eig: Whether to use tf's default full eigen decomposition\nReturns:\neig_vec: Minimum absolute eigen value\neig_val: Corresponding eigen vector", "source": "codesearchnet"}
{"code": "def get_execution_info(self, driver_id, function_descriptor):\n        \n        if self._worker.load_code_from_local:\n            \n            \n            \n            driver_id = ray.DriverID.nil()\n            if not function_descriptor.is_actor_method():\n                self._load_function_from_local(driver_id, function_descriptor)\n        else:\n            \n            \n            \n            \n            \n            \n            with profiling.profile(\"wait_for_function\"):\n                self._wait_for_function(function_descriptor, driver_id)\n        try:\n            function_id = function_descriptor.function_id\n            info = self._function_execution_info[driver_id][function_id]\n        except KeyError as e:\n            message = (\"Error occurs in get_execution_info: \"\n                       \"driver_id: %s, function_descriptor: %s. Message: %s\" %\n                       (driver_id, function_descriptor, e))\n            raise KeyError(message)\n        return info", "docstring": "Get the FunctionExecutionInfo of a remote function.\n\nArgs:\ndriver_id: ID of the driver that the function belongs to.\nfunction_descriptor: The FunctionDescriptor of the function to get.\n\nReturns:\nA FunctionExecutionInfo object.", "source": "juraj-google-style"}
{"code": "def response(self, in_thread: Optional[bool]=None) -> 'Message':\n    data = {'channel': self['channel']}\n    if in_thread:\n        if ('message' in self):\n            data['thread_ts'] = (self['message'].get('thread_ts') or self['message']['ts'])\n        else:\n            data['thread_ts'] = (self.get('thread_ts') or self['ts'])\n    elif (in_thread is None):\n        if (('message' in self) and ('thread_ts' in self['message'])):\n            data['thread_ts'] = self['message']['thread_ts']\n        elif ('thread_ts' in self):\n            data['thread_ts'] = self['thread_ts']\n    return Message(data)", "docstring": "Create a response message.\n\nDepending on the incoming message the response can be in a thread. By default the response follow where the\nincoming message was posted.\n\nArgs:\nin_thread (boolean): Overwrite the `threading` behaviour\n\nReturns:\na new :class:`slack.event.Message`", "source": "codesearchnet"}
{"code": "def requirements(requirements_file):\n    \n    return [\n        str(pkg.req) for pkg in parse_requirements(\n            requirements_file, session=pip_download.PipSession()) if pkg.req is not None]", "docstring": "Return packages mentioned in the given file.\n\nArgs:\nrequirements_file (str): path to the requirements file to be parsed.\n\nReturns:\n(list): 3rd-party package dependencies contained in the file.", "source": "juraj-google-style"}
{"code": "def velocity(msg):\n    \n\n    if 5 <= typecode(msg) <= 8:\n        return surface_velocity(msg)\n\n    elif typecode(msg) == 19:\n        return airborne_velocity(msg)\n\n    else:\n        raise RuntimeError(\"incorrect or inconsistant message types, expecting 4<TC<9 or TC=19\")", "docstring": "Calculate the speed, heading, and vertical rate\n(handles both airborne or surface message)\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string\n\nReturns:\n(int, float, int, string): speed (kt), ground track or heading (degree),\nrate of climb/descend (ft/min), and speed type\n('GS' for ground speed, 'AS' for airspeed)", "source": "juraj-google-style"}
{"code": "def run_resume_status(self, entity, project_name, name):\n        \n        query = gql()\n\n        response = self.gql(query, variable_values={\n            'entity': entity, 'project': project_name, 'name': name,\n        })\n\n        if 'model' not in response or 'bucket' not in response['model']:\n            return None\n\n        project = response['model']\n        self.set_setting('project', project_name)\n        if 'entity' in project:\n            self.set_setting('entity', project['entity']['name'])\n\n        return project['bucket']", "docstring": "Check if a run exists and get resume information.\n\nArgs:\nentity (str, optional): The entity to scope this project to.\nproject_name (str): The project to download, (can include bucket)\nrun (str, optional): The run to download", "source": "juraj-google-style"}
{"code": "def to_text(self):\n        \n        if self.items is None:\n            return\n        else:\n            text = ''\n            for i, item in enumerate(self.items):\n                text += ' %s. %s\\n' % (i + 1, item.to_text())\n\n            return text", "docstring": "Render a Text MessageElement as plain text\n\nArgs:\nNone\n\nReturns:\nStr the plain text representation of the Text MessageElement\n\nRaises:\nErrors are propagated", "source": "juraj-google-style"}
{"code": "def get_hash(self):\n    if self.__index_hash:\n        return self.__index_hash\n    key = self.request.method\n    key += URLHelper.get_protocol(self.request.url)\n    key += URLHelper.get_subdomain(self.request.url)\n    key += URLHelper.get_hostname(self.request.url)\n    key += URLHelper.get_tld(self.request.url)\n    key += URLHelper.get_path(self.request.url)\n    key += str(URLHelper.get_ordered_params(self.request.url))\n    if (self.request.data is not None):\n        key += str(self.request.data.keys())\n    self.__index_hash = key\n    return self.__index_hash", "docstring": "Generate and return the dict index hash of the given queue item.\n\nNote:\nCookies should not be included in the hash calculation because\notherwise requests are crawled multiple times with e.g. different\nsession keys, causing infinite crawling recursion.\n\nNote:\nAt this moment the keys do not actually get hashed since it works perfectly without and\nsince hashing the keys requires us to built hash collision management.\n\nReturns:\nstr: The hash of the given queue item.", "source": "codesearchnet"}
{"code": "def _logmessage_transform(cls, s, by=2):\n        \n        if len(s) >= by:\n            return s[by:].strip('\\n')\n        return s.strip('\\n')", "docstring": "Preprocess/cleanup a bzr log message before parsing\n\nArgs:\ns (str): log message string\nby (int): cutoff threshold for log message length\n\nReturns:\nstr: preprocessed log message string", "source": "juraj-google-style"}
{"code": "def get_session(self, app_path, session_id):\n    if (app_path not in self._applications):\n        raise ValueError(('Application %s does not exist on this server' % app_path))\n    return self._applications[app_path].get_session(session_id)", "docstring": "Get an active a session by name application path and session ID.\n\nArgs:\napp_path (str) :\nThe configured application path for the application to return\na session for.\n\nsession_id (str) :\nThe session ID of the session to retrieve.\n\nReturns:\nServerSession", "source": "codesearchnet"}
{"code": "def forward(self, probabilities, temperature=1.0, eps=0.0001):\n    if probabilities.ndim == 3:\n        probabilities = probabilities.unsqueeze(1)\n    one_minus_probabilities = torch.clamp(1 - probabilities, eps, 1)\n    probabilities = torch.clamp(probabilities, eps, 1)\n    y = log_binom(self.k_minus_1, self.k_idx) + self.k_idx * torch.log(probabilities) + (self.k_minus_1 - self.k_idx) * torch.log(one_minus_probabilities)\n    return self.act(y / temperature, dim=1)", "docstring": "Compute the log binomial distribution for probabilities.\n\nArgs:\nprobabilities (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):\nTensor containing probabilities of each class.\ntemperature (`float` or `torch.Tensor` of shape `(batch_size, num_channels, height, width)`, *optional*, defaults to 1):\nTemperature of distribution.\neps (`float`, *optional*, defaults to 1e-4):\nSmall number for numerical stability.\n\nReturns:\n`torch.Tensor` of shape `(batch_size, num_channels, height, width)`:\nLog binomial distribution logbinomial(p;t).", "source": "github-repos"}
{"code": "def create(rpc_layer, address):\n    if rpc_layer != 'grpc':\n        raise ValueError('Only GRPC backend is supported at the moment.')\n    return GrpcServer(address=address)", "docstring": "Create TF RPC server at given address.\n\nArgs:\nrpc_layer: Communication layer between client and server. Only \"grpc\" rpc\nlayer is supported at the moment.\naddress: Address where RPC server is hosted.\n\nReturns:\nAn instance of `tf.distribute.experimental.rpc.Server` class.\n\nRaises:\nA ValueError if rpc_layer other than \"grpc\" is used. Only GRPC\nis supported at the moment.\n\nExample usage:\n\n>>> import portpicker\n>>> @tf.function(input_signature=[\n...      tf.TensorSpec([], tf.int32),\n...      tf.TensorSpec([], tf.int32)])\n... def remote_fn(a, b):\n...   return tf.add(a, b)\n\n>>> port = portpicker.pick_unused_port()\n>>> address = \"localhost:{}\".format(port)\n>>> server = tf.distribute.experimental.rpc.Server.create(\"grpc\", address)\n>>> server.register(\"addition\", remote_fn)\n>>> server.start()", "source": "github-repos"}
{"code": "def region_code_for_number(numobj):\n    \n    country_code = numobj.country_code\n    regions = COUNTRY_CODE_TO_REGION_CODE.get(country_code, None)\n    if regions is None:\n        return None\n\n    if len(regions) == 1:\n        return regions[0]\n    else:\n        return _region_code_for_number_from_list(numobj, regions)", "docstring": "Returns the region where a phone number is from.\n\nThis could be used for geocoding at the region level. Only guarantees\ncorrect results for valid, full numbers (not short-codes, or invalid\nnumbers).\n\nArguments:\nnumobj -- The phone number object whose origin we want to know\n\nReturns the region where the phone number is from, or None if no region\nmatches this calling code.", "source": "juraj-google-style"}
{"code": "def sort_segment_points(Aps, Bps):\n    mid = []\n    j = 0\n    mid.append(Aps[0])\n    for i in range((len(Aps) - 1)):\n        dist = distance_tt_point(Aps[i], Aps[(i + 1)])\n        for m in range(j, len(Bps)):\n            distm = distance_tt_point(Aps[i], Bps[m])\n            if (dist > distm):\n                direction = dot(normalize(line(Aps[i].gen2arr(), Aps[(i + 1)].gen2arr())), normalize(Bps[m].gen2arr()))\n                if (direction > 0):\n                    j = (m + 1)\n                    mid.append(Bps[m])\n                    break\n        mid.append(Aps[(i + 1)])\n    for m in range(j, len(Bps)):\n        mid.append(Bps[m])\n    return mid", "docstring": "Takes two line segments and sorts all their points,\nso that they form a continuous path\n\nArgs:\nAps: Array of tracktotrip.Point\nBps: Array of tracktotrip.Point\nReturns:\nArray with points ordered", "source": "codesearchnet"}
{"code": "def end_statement(self, stmt):\n    self.active_stmts.remove(stmt)", "docstring": "Marks the end of a statement.\n\nArgs:\nstmt: Hashable, a key by which the statement can be identified in the\nCFG's stmt_prev and stmt_next attributes; must match a key previously\npassed to begin_statement.", "source": "github-repos"}
{"code": "def _get_all_groups():\n    with salt.utils.winapi.Com():\n        nt = win32com.client.Dispatch('AdsNameSpaces')\n    results = nt.GetObject('', 'WinNT:\n    results.Filter = ['group']\n    return results", "docstring": "A helper function that gets a list of group objects for all groups on the\nmachine\n\nReturns:\niter: A list of objects for all groups on the machine", "source": "codesearchnet"}
{"code": "def cancelOrder(self, order: Order) -> Trade:\n    self.client.cancelOrder(order.orderId)\n    now = datetime.datetime.now(datetime.timezone.utc)\n    key = self.wrapper.orderKey(order.clientId, order.orderId, order.permId)\n    trade = self.wrapper.trades.get(key)\n    if trade:\n        if (not trade.isDone()):\n            status = trade.orderStatus.status\n            if (((status == OrderStatus.PendingSubmit) and (not order.transmit)) or (status == OrderStatus.Inactive)):\n                newStatus = OrderStatus.Cancelled\n            else:\n                newStatus = OrderStatus.PendingCancel\n            logEntry = TradeLogEntry(now, newStatus, '')\n            trade.log.append(logEntry)\n            trade.orderStatus.status = newStatus\n            self._logger.info(f'cancelOrder: {trade}')\n            trade.cancelEvent.emit(trade)\n            trade.statusEvent.emit(trade)\n            self.cancelOrderEvent.emit(trade)\n            self.orderStatusEvent.emit(trade)\n            if (newStatus == OrderStatus.Cancelled):\n                trade.cancelledEvent.emit(trade)\n    else:\n        self._logger.error(f'cancelOrder: Unknown orderId {order.orderId}')\n    return trade", "docstring": "Cancel the order and return the Trade it belongs to.\n\nArgs:\norder: The order to be canceled.", "source": "codesearchnet"}
{"code": "def getDocumentIDs(aleph_search_result, number_of_docs=(- 1)):\n    downer = Downloader()\n    if ('set_number' not in aleph_search_result):\n        return []\n    set_number = str(aleph_search_result['set_number'])\n    if (len(set_number) < 6):\n        set_number = (((6 - len(set_number)) * '0') + set_number)\n    if (number_of_docs <= 0):\n        number_of_docs = aleph_search_result['no_entries']\n    set_data = downer.download((ALEPH_URL + Template(SET_URL_TEMPLATE).substitute(SET_NUMBER=set_number, NUMBER_OF_DOCS=number_of_docs)))\n    dom = dhtmlparser.parseString(set_data)\n    set_data = dom.find('ill-get-set')\n    if (len(set_data) <= 0):\n        raise AlephException(\"Aleph didn't returned set data.\")\n    ids = []\n    for library in set_data:\n        documents = _alephResultToDict(library)\n        if ('error' in documents):\n            raise AlephException(('getDocumentIDs: ' + documents['error']))\n        if isinstance(documents['doc-number'], list):\n            ids.extend(map((lambda x: DocumentID(x, documents['set-library'], aleph_search_result['base'])), set(documents['doc-number'])))\n        else:\n            ids.append(DocumentID(documents['doc-number'], documents['set-library'], aleph_search_result['base']))\n    return ids", "docstring": "Get IDs, which can be used as parameters for other functions.\n\nArgs:\naleph_search_result (dict): returned from :func:`searchInAleph`\nnumber_of_docs (int, optional): how many :class:`DocumentID` from set\ngiven by `aleph_search_result` should be returned.\nDefault -1 for all of them.\n\nReturns:\nlist: :class:`DocumentID` named tuples to given `aleph_search_result`.\n\nRaises:\nAlephException: If Aleph returns unknown format of data.\n\nNote:\nReturned :class:`DocumentID` can be used as parameters to\n:func:`downloadMARCXML`.", "source": "codesearchnet"}
{"code": "def configure_collective_ops(self, collective_leader='', scoped_allocator_enabled_ops=('CollectiveReduce',), use_nccl_communication=False, device_filters=None):\n    if self._collective_leader is not None:\n        if self._collective_leader != collective_leader or self._collective_scoped_allocator_enabled_ops != scoped_allocator_enabled_ops or self._collective_use_nccl_communication != use_nccl_communication or (self._collective_device_filters != device_filters):\n            raise ValueError('Collective ops are already configured.')\n        else:\n            return\n    if self._context_handle is not None:\n        raise RuntimeError('Collective ops must be configured at program startup')\n    self._collective_leader = collective_leader\n    self._collective_scoped_allocator_enabled_ops = scoped_allocator_enabled_ops\n    self._collective_use_nccl_communication = use_nccl_communication\n    self._collective_device_filters = device_filters", "docstring": "Configure collective ops.\n\nCollective group leader is necessary for collective ops to run, other\nconfigurations are mainly for the purpose of performance.\n\nArgs:\ncollective_leader: a device string for collective leader, e.g.\n\"/job:worker/replica:0/task:0\"; empty string means local execution of\ncollective ops.\nscoped_allocator_enabled_ops: a tuple or a list of op names for scoped\nallocator to run with.\nuse_nccl_communication: whether to use nccl communication for collective\nops.\ndevice_filters: a tuple or a list of device strings. If set, corresponding\ntask can only see the devices filtered by these device filters.\n\nRaises:\nRuntimeError: if this method is not called at program startup.", "source": "github-repos"}
{"code": "def as_json(self, entity_url, context=None):\n    try:\n        urllib.request.urlopen(entity_url)\n    except urllib.error.HTTPError:\n        raise ValueError('Cannot open {}'.format(entity_url))\n    entity_graph = self.read(entity_url)\n    entity_json = json.loads(entity_graph.serialize(format='json-ld', context=context).decode())\n    return json.dumps(entity_json)", "docstring": "Method takes a entity uri and attempts to return the Fedora Object\nas a JSON-LD.\n\nArgs:\nentity_url(str): Fedora Commons URL of Entity\ncontext(None): Returns JSON-LD with Context, default is None\n\nReturns:\nstr: JSON-LD of Fedora Object", "source": "codesearchnet"}
{"code": "def save_to_well_known_file(credentials, well_known_file=None):\n    \n    \n    \n\n    if well_known_file is None:\n        well_known_file = _get_well_known_file()\n\n    config_dir = os.path.dirname(well_known_file)\n    if not os.path.isdir(config_dir):\n        raise OSError(\n            'Config directory does not exist: {0}'.format(config_dir))\n\n    credentials_data = credentials.serialization_data\n    _save_private_file(well_known_file, credentials_data)", "docstring": "Save the provided GoogleCredentials to the well known file.\n\nArgs:\ncredentials: the credentials to be saved to the well known file;\nit should be an instance of GoogleCredentials\nwell_known_file: the name of the file where the credentials are to be\nsaved; this parameter is supposed to be used for\ntesting only", "source": "juraj-google-style"}
{"code": "def _parse_doc(doc):\n    lines = doc.split('\\n')\n    descriptions = list(itertools.takewhile(_checker(_KEYWORDS), lines))\n    if (len(descriptions) < 3):\n        description = lines[0]\n    else:\n        description = '{0}\\n\\n{1}'.format(lines[0], textwrap.dedent('\\n'.join(descriptions[2:])))\n    args = list(itertools.takewhile(_checker(_KEYWORDS_OTHERS), itertools.dropwhile(_checker(_KEYWORDS_ARGS), lines)))\n    argmap = {}\n    if (len(args) > 1):\n        for pair in args[1:]:\n            kv = [v.strip() for v in pair.split(':')]\n            if (len(kv) >= 2):\n                argmap[kv[0]] = ':'.join(kv[1:])\n    return dict(headline=descriptions[0], description=description, args=argmap)", "docstring": "Parse a docstring.\n\nParse a docstring and extract three components; headline, description,\nand map of arguments to help texts.\n\nArgs:\ndoc: docstring.\n\nReturns:\na dictionary.", "source": "codesearchnet"}
{"code": "def get_path_list(self, type_str=None):\n        \n        return list(\n            reversed(\n                [v.label_str for v in self.parent_gen if type_str in (None, v.type_str)]\n            )\n        )", "docstring": "Get list of the labels of the nodes leading up to this node from the root.\n\nArgs:\ntype_str:\nSUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include\ninformation from nodes of that type.\n\nReturns:\nlist of str: The labels of the nodes leading up to this node from the root.", "source": "juraj-google-style"}
{"code": "def send_html(self, html, body=None, msgtype='m.text'):\n    return self.client.api.send_message_event(self.room_id, 'm.room.message', self.get_html_content(html, body, msgtype))", "docstring": "Send an html formatted message.\n\nArgs:\nhtml (str): The html formatted message to be sent.\nbody (str): The unformatted body of the message to be sent.", "source": "codesearchnet"}
{"code": "def parse_genes(gene_lines):\n    \n    genes = []\n    header = []\n    hgnc_identifiers = set()\n    delimiter = '\\t'\n    \n    delimiters = ['\\t', ' ', ';']\n\n    \n    \n    \n    for i,line in enumerate(gene_lines):\n        line = line.rstrip()\n        if not len(line) > 0:\n            continue\n        if line.startswith('\n            if not line.startswith('\n                \n                \n                line_length = 0\n                delimiter = None\n                for alt in delimiters:\n                    head_line = line.split(alt)\n                    if len(head_line) > line_length:\n                        line_length = len(head_line)\n                        delimiter = alt\n\n                header = [word.lower() for word in line[1:].split(delimiter)]\n        else:\n            \n            if i == 0:\n                line_length = 0\n                for alt in delimiters:\n                    head_line = line.split(alt)\n                    if len(head_line) > line_length:\n                        line_length = len(head_line)\n                        delimiter = alt\n                \n                if ('hgnc' in line or 'HGNC' in line):\n                    header = [word.lower() for word in line.split(delimiter)]\n                    continue\n                \n                \n                if line.split(delimiter)[0].isdigit():\n                    header = ['hgnc_id']\n                else:\n                    header = ['hgnc_symbol']\n\n            splitted_line = line.split(delimiter)\n            gene_info = dict(zip(header, splitted_line))\n\n            \n            \n            info_found = False\n            for key in gene_info:\n                if gene_info[key]:\n                    info_found = True\n                    break\n            \n            if not info_found:\n                continue\n\n            try:\n                gene = parse_gene(gene_info)\n            except Exception as e:\n                LOG.warning(e)\n                raise SyntaxError(\"Line {0} is malformed\".format(i + 1))\n\n            identifier = gene.pop('identifier')\n\n            if not identifier in hgnc_identifiers:\n                hgnc_identifiers.add(identifier)\n                genes.append(gene)\n\n    return genes", "docstring": "Parse a file with genes and return the hgnc ids\n\nArgs:\ngene_lines(iterable(str)): Stream with genes\n\nReturns:\ngenes(list(dict)): Dictionaries with relevant gene info", "source": "juraj-google-style"}
{"code": "def __init__(self, location=None, parent=None, **kwargs):\n    \n    if not location:\n      raise ValueError('Missing location value.')\n\n    super(LocationPathSpec, self).__init__(parent=parent, **kwargs)\n    self.location = location", "docstring": "Initializes a path specification.\n\nArgs:\nlocation (Optional[str]): location.\nparent (Optional[PathSpec]): parent path specification.\n\nRaises:\nValueError: when location is not set.", "source": "juraj-google-style"}
{"code": "def get_effect_class(self, class_name, package_name=None) -> Type[Effect]:\n        \n        if package_name:\n            return effects.find_effect_class(\"{}.{}\".format(package_name, class_name))\n\n        return effects.find_effect_class(class_name)", "docstring": "Get an effect class from the effect registry.\n\nArgs:\nclass_name (str): The exact class name of the effect\n\nKeyword Args:\npackage_name (str): The python path to the effect package the effect name is located.\nThis is optional and can be used to avoid issue with class name collisions.\n\nReturns:\nEffect class", "source": "juraj-google-style"}
{"code": "def wtime_to_minutes(time_string):\n    \n    hours, mins, seconds = time_string.split(':')\n    return int(hours) * 60 + int(mins) + 1", "docstring": "wtime_to_minutes\n\nConvert standard wallclock time string to minutes.\n\nArgs:\n- Time_string in HH:MM:SS format\n\nReturns:\n(int) minutes", "source": "juraj-google-style"}
{"code": "def simplify_U(theta, phi, lam):\n    \n    gate = U3Gate(theta, phi, lam)\n    \n    if abs(gate.params[0] % (2.0 * math.pi)) < _CUTOFF_PRECISION:\n        gate = U1Gate(gate.params[0] + gate.params[1] + gate.params[2])\n    \n    if isinstance(gate, U3Gate):\n        \n        if abs((gate.params[0] - math.pi / 2) % (2.0 * math.pi)) < _CUTOFF_PRECISION:\n            gate = U2Gate(gate.params[1],\n                          gate.params[2] + (gate.params[0] - math.pi / 2))\n        \n        if abs((gate.params[0] + math.pi / 2) % (2.0 * math.pi)) < _CUTOFF_PRECISION:\n            gate = U2Gate(gate.params[1] + math.pi,\n                          gate.params[2] - math.pi + (gate.params[0] + math.pi / 2))\n    \n    if isinstance(gate, U1Gate) and abs(gate.params[0] % (4.0 * math.pi)) < _CUTOFF_PRECISION:\n        gate = IdGate()\n    return gate", "docstring": "Return the gate u1, u2, or u3 implementing U with the fewest pulses.\n\nThe returned gate implements U exactly, not up to a global phase.\n\nArgs:\ntheta, phi, lam: input Euler rotation angles for a general U gate\n\nReturns:\nGate: one of IdGate, U1Gate, U2Gate, U3Gate.", "source": "juraj-google-style"}
{"code": "def _get_user_command_string(self):\n    sdk_version = int(self._device.build_info['build_version_sdk'])\n    if sdk_version < 24:\n        return ''\n    return f'--user {self.user_id}'", "docstring": "Gets the appropriate command argument for specifying device user ID.\n\nBy default, this client operates within the current user. We\ndon't add the `--user {ID}` argument when Android's SDK is below 24,\nwhere multi-user support is not well implemented.\n\nReturns:\nA string of the command argument section to be formatted into\nadb commands.", "source": "github-repos"}
{"code": "def _create_validation_schema(schema_cls):\n    validation_schema = schema_cls()\n    for (_, field) in validation_schema.fields.items():\n        if isinstance(field, ModelTypeValidator):\n            validate_function = field.__class__.check_type\n            field._deserialize = MethodType(validate_function, field)\n    return validation_schema", "docstring": "Create a patched Schema for validating models.\n\nModel validation is not part of Marshmallow. Schemas have a ``validate``\nmethod but this delegates execution on ``load`` and discards the result.\nSimilarly, ``load`` will call ``_deserialize`` on every field in the\nschema.\n\nThis function patches the ``_deserialize`` instance method of each\nfield to make it call a custom defined method ``check_type``\nprovided by Qiskit in the different fields at\n``qiskit.validation.fields``.\n\nReturns:\nBaseSchema: a copy of the original Schema, overriding the\n``_deserialize()`` call of its fields.", "source": "codesearchnet"}
{"code": "def _colourise(text: str, colour: str) -> str:\n    if COLOUR:\n        text = style(text, fg=colour, bold=True)\n    return text", "docstring": "Colour text, if possible.\n\nArgs:\ntext: Text to colourise\ncolour: Colour to display text in\nReturns:\nColourised text, if possible", "source": "codesearchnet"}
{"code": "def delete_token(self,\n                     token_name,\n                     project_name,\n                     dataset_name):\n        \n        return self.resources.delete_token(token_name,\n                                           project_name,\n                                           dataset_name)", "docstring": "Delete a token with the given parameters.\nArguments:\nproject_name (str): Project name\ndataset_name (str): Dataset name project is based on\ntoken_name (str): Token name\nchannel_name (str): Channel name project is based on\nReturns:\nbool: True if project deleted, false if not deleted.", "source": "juraj-google-style"}
{"code": "def _ParseAbstractInteger(text, is_long=False):\n  \n  \n  try:\n    \n    \n    \n    if is_long:\n      return long(text, 0)\n    else:\n      return int(text, 0)\n  except ValueError:\n    raise ValueError('Couldn\\'t parse integer: %s' % text)", "docstring": "Parses an integer without checking size/signedness.\n\nArgs:\ntext: The text to parse.\nis_long: True if the value should be returned as a long integer.\n\nReturns:\nThe integer value.\n\nRaises:\nValueError: Thrown Iff the text is not a valid integer.", "source": "juraj-google-style"}
{"code": "def eq_or_parent(self, other):\n    return (self.parts[:len(other.parts)] == other.parts[:len(self.parts)])", "docstring": "Check whether ``other`` is an ancestor.\n\nReturns:\n(bool) True IFF ``other`` is an ancestor or equal to ``self``,\nelse False.", "source": "codesearchnet"}
{"code": "def fetch(self, rebuild=False, cache=True):\n        \n        if rebuild:\n            return self._process_table(cache)\n        try:\n            return self.read_cache()\n        except FileNotFoundError:\n            return self._process_table(cache)", "docstring": "Fetches the table and applies all post processors.\nArgs:\nrebuild (bool): Rebuild the table and ignore cache. Default: False\ncache (bool): Cache the finished table for faster future loading.\nDefault: True", "source": "juraj-google-style"}
{"code": "def _VerifyRecord(self, pls_record):\n    \n    \n    \n    \n    \n    \n    \n    future_timestamp = (\n        timelib.Timestamp.GetNow() + self._SIX_YEARS_IN_MICRO_SECONDS)\n\n    if pls_record.last_written_time > future_timestamp:\n      return False\n\n    \n    \n    first_word, _, _ = pls_record.query.partition(' ')\n\n    if first_word.lower() not in self._PLS_KEYWORD:\n      return False\n\n    return True", "docstring": "Verifies a PLS Recall record.\n\nArgs:\npls_record (pls_recall_record): a PLS Recall record to verify.\n\nReturns:\nbool: True if this is a valid PLS Recall record, False otherwise.", "source": "juraj-google-style"}
{"code": "def max(x, axis=None, keepdims=False):\n    return math_ops.reduce_max(x, axis, keepdims)", "docstring": "Maximum value in a tensor.\n\nArgs:\nx: A tensor or variable.\naxis: An integer, the axis to find maximum values.\nkeepdims: A boolean, whether to keep the dimensions or not.\nIf `keepdims` is `False`, the rank of the tensor is reduced\nby 1. If `keepdims` is `True`,\nthe reduced dimension is retained with length 1.\n\nReturns:\nA tensor with maximum values of `x`.", "source": "github-repos"}
{"code": "def bofh_excuse(how_many=1):\n    \n\n    excuse_path = os.path.join(os.path.dirname(__file__), 'bofh_excuses.json')\n    with open(excuse_path, 'r') as _f:\n        excuse_dict = json.load(_f)\n\n    return [generate_random_string(excuse_dict) for _ in range(int(how_many))]", "docstring": "Generate random BOFH themed technical excuses!\n\nArgs:\nhow_many: Number of excuses to generate. (Default: 1)\n\nReturns:\nA list of BOFH excuses.", "source": "juraj-google-style"}
{"code": "def _ParseItem(self, parser_mediator, olecf_item):\n    \n    result = False\n\n    event_data = OLECFItemEventData()\n    event_data.name = olecf_item.name\n    event_data.offset = 0\n    event_data.size = olecf_item.size\n\n    creation_time, modification_time = self._GetTimestamps(olecf_item)\n    if creation_time:\n      date_time = dfdatetime_filetime.Filetime(timestamp=creation_time)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_CREATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n      result = True\n\n    if modification_time:\n      date_time = dfdatetime_filetime.Filetime(timestamp=modification_time)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n      result = True\n\n    for sub_item in olecf_item.sub_items:\n      if self._ParseItem(parser_mediator, sub_item):\n        result = True\n\n    return result", "docstring": "Parses an OLECF item.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nolecf_item (pyolecf.item): OLECF item.\n\nReturns:\nbool: True if an event was produced.", "source": "juraj-google-style"}
{"code": "def threat(self, name, **kwargs):\n        \n        group_obj = Threat(name, **kwargs)\n        return self._group(group_obj)", "docstring": "Add Threat data to Batch object\n\nArgs:\nname (str): The name for this Group.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nxid (str, kwargs): The external id for this Group.\n\nReturns:\nobj: An instance of Threat.", "source": "juraj-google-style"}
{"code": "def encode(self, obj):\n        \n        if isinstance(obj, np.ndarray):\n            if obj.ndim == 1 and obj.dtype == 'int16':\n                numpy_to_weld = self.utils.numpy_to_weld_int16_arr\n            elif obj.ndim == 1 and obj.dtype == 'int32':\n                numpy_to_weld = self.utils.numpy_to_weld_int_arr\n            elif obj.ndim == 1 and obj.dtype == 'int64':\n                numpy_to_weld = self.utils.numpy_to_weld_long_arr\n            elif obj.ndim == 1 and obj.dtype == 'float32':\n                numpy_to_weld = self.utils.numpy_to_weld_float_arr\n            elif obj.ndim == 1 and obj.dtype == 'float64':\n                numpy_to_weld = self.utils.numpy_to_weld_double_arr\n            elif obj.ndim == 2 and obj.dtype == 'int16':\n                numpy_to_weld = self.utils.numpy_to_weld_int16_arr_arr\n            elif obj.ndim == 2 and obj.dtype == 'int32':\n                numpy_to_weld = self.utils.numpy_to_weld_int_arr_arr\n            elif obj.ndim == 2 and obj.dtype == 'int64':\n                numpy_to_weld = self.utils.numpy_to_weld_long_arr_arr\n            elif obj.ndim == 2 and obj.dtype == 'float32':\n                numpy_to_weld = self.utils.numpy_to_weld_float_arr_arr\n            elif obj.ndim == 2 and obj.dtype == 'float64':\n                numpy_to_weld = self.utils.numpy_to_weld_double_arr_arr\n            elif obj.ndim == 2 and obj.dtype == 'bool':\n                numpy_to_weld = self.utils.numpy_to_weld_bool_arr_arr\n            elif obj.ndim == 1 and obj.dtype == 'bool':\n                numpy_to_weld = self.utils.numpy_to_weld_bool_arr\n            else:\n                numpy_to_weld = self.utils.numpy_to_weld_char_arr_arr\n        elif isinstance(obj, str):\n            numpy_to_weld = self.utils.numpy_to_weld_char_arr\n        else:\n            raise Exception(\"Unable to encode; invalid object type\")\n\n        numpy_to_weld.restype = self.py_to_weld_type(obj).ctype_class\n        numpy_to_weld.argtypes = [py_object]\n        weld_vec = numpy_to_weld(obj)\n        return weld_vec", "docstring": "Converts Python object to Weld object.\n\nArgs:\nobj: Python object that needs to be converted to Weld format\n\nReturns:\nWeld formatted object", "source": "juraj-google-style"}
{"code": "def broadcast(self, gossip_message, message_type, exclude=None):\n        \n        with self._lock:\n            if exclude is None:\n                exclude = []\n            for connection_id in self._peers.copy():\n                if connection_id not in exclude and \\\n                        self._network.is_connection_handshake_complete(\n                            connection_id):\n                    self.send(\n                        message_type,\n                        gossip_message.SerializeToString(),\n                        connection_id,\n                        one_way=True)", "docstring": "Broadcast gossip messages.\n\nBroadcast the message to all peers unless they are in the excluded\nlist.\n\nArgs:\ngossip_message: The message to be broadcast.\nmessage_type: Type of the message.\nexclude: A list of connection_ids that should be excluded from this\nbroadcast.", "source": "juraj-google-style"}
{"code": "def make_sharded_variable_creator(strategy: distribute_lib.Strategy) -> Callable[..., Any]:\n    tpu_devices = strategy.extended._tpu_devices\n\n    def _create_sharded_variable(next_creator, *args, **kwargs):\n        \n        kwargs['skip_mirrored_creator'] = True\n        shard_dim = 0\n        num_replicas, num_cores_per_replica = tpu_devices.shape\n        is_ckpt_init_value = is_checkpoint_initial_value(kwargs['initial_value'])\n        arg_spec = tf_inspect.getfullargspec(kwargs['initial_value'])\n        if is_ckpt_init_value and 'shard_info' not in arg_spec.args and ('shard_info' not in arg_spec.kwonlyargs):\n            raise ValueError('When a sharded variable is initialized from a checkpoint, shard_info must be in arguments of the init function.')\n        name, shape, dtype, unwrapped_initial_value, restore_uid = extract_variable_info(kwargs)\n        shape = ops.tensor_shape.TensorShape(shape)\n        num_devices = num_replicas * num_cores_per_replica\n        if shape[shard_dim] % num_devices != 0:\n            raise ValueError('Only evenly sharding across devices is currently supported. Got shape {} and {} devices'.format(shape, num_devices))\n        partition_shape = shape.as_list()\n        partition_shape[shard_dim] = partition_shape[shard_dim] \n        unwrapped_arg_spec = tf_inspect.getargspec(unwrapped_initial_value)\n        sharding_aware = 'shard_info' in unwrapped_arg_spec.args\n        variables = []\n        partition_offset = [0] * len(shape)\n        for replica_id in range(num_replicas):\n            for logic_core_id in range(num_cores_per_replica):\n                with ops.device(tpu_devices[replica_id][logic_core_id]):\n                    kwargs['name'] = f'{name}/{replica_id}'\n                    kwargs['shape'] = partition_shape\n                    if sharding_aware:\n                        shard_info = base.ShardInfo(tensor_shape.as_shape(partition_shape), copy.deepcopy(partition_offset))\n                        kwargs['initial_value'] = functools.partial(kwargs['initial_value'], shard_info=shard_info)\n                        partition_offset[shard_dim] += partition_shape[shard_dim]\n                    else:\n                        kwargs['initial_value'] = functools.partial(unwrapped_initial_value, shape=partition_shape, dtype=dtype)\n                    variables.append(next_creator(*args, **kwargs))\n        result = TPUEmbeddingShardedVariable(strategy, variables, tf_variables.VariableAggregation.NONE, None)\n        if restore_uid is not None:\n            result._maybe_initialize_trackable()\n            result._update_uid = restore_uid\n        return result\n    return _create_sharded_variable", "docstring": "Create a variable creator which shards across all the tpu device.\n\nArgs:\nstrategy: a TPUStrategy object.\n\nReturns:\nThe sharded variable creator.", "source": "github-repos"}
{"code": "def _wrap_callback_errors(callback, message):\n    \n    try:\n        callback(message)\n    except Exception:\n        \n        \n        \n        _LOGGER.exception(\n            \"Top-level exception occurred in callback while processing a \" \"message\"\n        )\n        message.nack()", "docstring": "Wraps a user callback so that if an exception occurs the message is\nnacked.\n\nArgs:\ncallback (Callable[None, Message]): The user callback.\nmessage (~Message): The Pub/Sub message.", "source": "juraj-google-style"}
{"code": "def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):\n\n    def regularizer(tensor):\n        with tf.name_scope(scope, 'L1L2Regularizer', [tensor]):\n            weight_l1_t = tf.convert_to_tensor(weight_l1, dtype=tensor.dtype.base_dtype, name='weight_l1')\n            weight_l2_t = tf.convert_to_tensor(weight_l2, dtype=tensor.dtype.base_dtype, name='weight_l2')\n            reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(tensor)), name='value_l1')\n            reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(tensor), name='value_l2')\n            return tf.add(reg_l1, reg_l2, name='value')\n    return regularizer", "docstring": "Define a L1L2 regularizer.\n\nArgs:\nweight_l1: scale the L1 loss by this factor.\nweight_l2: scale the L2 loss by this factor.\nscope: Optional scope for name_scope.\n\nReturns:\na regularizer function.", "source": "codesearchnet"}
{"code": "def deserialize(config, custom_objects=None):\n    from keras.src.saving import serialization_lib\n    return serialization_lib.deserialize_keras_object(config, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects)", "docstring": "Deserializes a serialized `DTypePolicy` instance.\n\nArgs:\nconfig: `DTypePolicy` configuration.\ncustom_objects: Optional dictionary mapping names (strings) to custom\nobjects (classes and functions) to be considered during\ndeserialization.\n\nReturns:\nA Keras `DTypePolicy` instance.", "source": "github-repos"}
{"code": "def make_collective(self, num_processes, gpu_per_process):\n    cluster_resolver = cluster_resolver_lib.TFConfigClusterResolver()\n    devices = ['/job:worker/replica:0/task:%d/device:CPU:0' % cluster_resolver.task_id]\n    if gpu_per_process > 0:\n        devices = ['/job:worker/replica:0/task:%d/device:GPU:%d' % (cluster_resolver.task_id, i) for i in range(gpu_per_process)]\n    group_size = num_processes * len(devices)\n    collective = cross_device_ops_lib.CollectiveAllReduce(devices=devices, group_size=group_size, options=collective_util.Options())\n    return (collective, devices, cluster_resolver.task_id)", "docstring": "Returns collectives and other info to be used in tests.\n\nArgs:\nnum_processes: an integer indicating the number of processes that\nparticipate in the collective.\ngpu_per_process: number of GPUs (0 if no GPUs) used by each process.\n\nReturns:\nA tuple of (collective, devices, pid) where collective is a instance\nof `CollectiveAllReduce`, devices are a list of local devices (str)\nattached to the current process, and pid is the id of this process among\nall participant processes.", "source": "github-repos"}
{"code": "def process_file(self, path):\n    if self._config.verbose:\n        self._logger.info('Processing file \"%s\"', path)\n    output_path = ('%s%s' % (path, BATCH_EXTENSION))\n    with open(output_path, 'w') as file:\n        for line in lines_generator(path):\n            file.write(('%s\\n' % self._cucco.normalize(line.encode().decode('utf-8'))))\n    self._logger.debug('Created file \"%s\"', output_path)", "docstring": "Process a file applying normalizations.\n\nGet a file as input and generate a new file with the\nresult of applying normalizations to every single line\nin the original file. The extension for the new file\nwill be the one defined in BATCH_EXTENSION.\n\nArgs:\npath: Path to the file.", "source": "codesearchnet"}
{"code": "def _merge_bee(self, bee):\n        \n\n        random_dimension = randint(0, len(self._value_ranges) - 1)\n        second_bee = randint(0, self._num_employers - 1)\n        while (bee.id == self._employers[second_bee].id):\n            second_bee = randint(0, self._num_employers - 1)\n        new_bee = deepcopy(bee)\n        new_bee.values[random_dimension] = self.__onlooker.calculate_positions(\n            new_bee.values[random_dimension],\n            self._employers[second_bee].values[random_dimension],\n            self._value_ranges[random_dimension]\n        )\n        fitness_score = new_bee.get_score(self._fitness_fxn(\n            new_bee.values,\n            **self._args\n        ))\n        return (fitness_score, new_bee.values, new_bee.error)", "docstring": "Shifts a random value for a supplied bee with in accordance with\nanother random bee's value\n\nArgs:\nbee (EmployerBee): supplied bee to merge\n\nReturns:\ntuple: (score of new position, values of new position, fitness\nfunction return value of new position)", "source": "juraj-google-style"}
{"code": "def getModPath(self, *paths):\n    dirn = self.getModDir()\n    return s_common.genpath(dirn, *paths)", "docstring": "Construct a path relative to this module's working directory.\n\nArgs:\n*paths: A list of path strings\n\nNotes:\nThis creates the module specific directory if it does not exist.\n\nReturns:\n(str): The full path (or None if no cortex dir is configured).", "source": "codesearchnet"}
{"code": "def _get_full_signature_list(self):\n    return self._interpreter.GetSignatureDefs()", "docstring": "Gets list of SignatureDefs in the model.\n\nExample,\n```\nsignatures = interpreter._get_full_signature_list()\nprint(signatures)\n\n# {\n#   'add': {'inputs': {'x': 1, 'y': 0}, 'outputs': {'output_0': 4}}\n# }\n\nThen using the names in the signature list you can get a callable from\nget_signature_runner().\n```\n\nReturns:\nA list of SignatureDef details in a dictionary structure.\nIt is keyed on the SignatureDef method name, and the value holds\ndictionary of inputs and outputs.", "source": "github-repos"}
{"code": "def CopyFrom(self, other_msg):\n    if (self is other_msg):\n        return\n    self.Clear()\n    self.MergeFrom(other_msg)", "docstring": "Copies the content of the specified message into the current message.\n\nThe method clears the current message and then merges the specified\nmessage using MergeFrom.\n\nArgs:\nother_msg: Message to copy into the current one.", "source": "codesearchnet"}
{"code": "def _preprocess_sqlite_index(asql_query, library, backend, connection):\n    \n\n    new_query = None\n\n    if asql_query.strip().lower().startswith('index'):\n\n        logger.debug(\n            '_preprocess_index: create index query found.\\n    asql query: {}'\n            .format(asql_query))\n\n        index = parse_index(asql_query)\n        partition = library.partition(index.source)\n        table = backend.install(connection, partition, materialize=True)\n        index_name = '{}_{}_ind'.format(partition.vid, '_'.join(index.columns))\n        new_query = 'CREATE INDEX IF NOT EXISTS {index} ON {table} ({columns});'.format(\n            index=index_name, table=table, columns=','.join(index.columns))\n\n    logger.debug(\n        '_preprocess_index: preprocess finished.\\n    asql query: {}\\n    new query: {}'\n        .format(asql_query, new_query))\n\n    return new_query or asql_query", "docstring": "Creates materialized view for each indexed partition found in the query.\n\nArgs:\nasql_query (str): asql query\nlibrary (ambry.Library):\nbackend (SQLiteBackend):\nconnection (apsw.Connection):\n\nReturns:\nstr: converted asql if it contains index query. If not, returns asql_query as is.", "source": "juraj-google-style"}
{"code": "def load(self):\n    from scipy.io import netcdf_file\n    from scipy import interpolate\n    import numpy as np\n    f = netcdf_file(self.input_file)\n    out = dict()\n    lats = f.variables['lat'][:].copy()\n    lons = f.variables['lon'][:].copy()\n    out['data'] = np.roll(f.variables[self.variable_name][(:, :, :)].copy(), shift=(len(lons) \n    lons = np.roll(lons, shift=(len(lons) \n    lons[(lons > 180)] -= 360\n    out['data'] = np.ma.array(out['data'])\n    out['data'][(out['data'] < (- 1000000.0))] = np.ma.masked\n    out['lat_idx'] = interpolate.interp1d(x=lats, y=np.arange(len(lats)))\n    out['lon_idx'] = interpolate.interp1d(x=lons, y=np.arange(len(lons)))\n    f.close()\n    return out", "docstring": "Load the climate data as a map\n\nReturns:\ndict: {data: masked 3D numpy array containing climate data per month (first axis),\nlat_idx: function converting a latitude to the (fractional) row index in the map,\nlon_idx: function converting a longitude to the (fractional) column index in the map}", "source": "codesearchnet"}
{"code": "def isUserCert(self, name):\n        \n        crtpath = self._getPathJoin('users', '%s.crt' % name)\n        return os.path.isfile(crtpath)", "docstring": "Checks if a user certificate exists.\n\nArgs:\nname (str): The name of the user keypair.\n\nExamples:\nCheck if the user cert \"myuser\" exists:\n\nexists = cdir.isUserCert('myuser')\n\nReturns:\nbool: True if the certificate is present, False otherwise.", "source": "juraj-google-style"}
{"code": "def _construct_context_for_args(args):\n  \n  global_default_context = google.datalab.Context.default()\n  config = {}\n  for key in global_default_context.config:\n    config[key] = global_default_context.config[key]\n\n  billing_tier_arg = args.get('billing', None)\n  if billing_tier_arg:\n    config['bigquery_billing_tier'] = billing_tier_arg\n\n  return google.datalab.Context(\n    project_id=global_default_context.project_id,\n    credentials=global_default_context.credentials,\n    config=config)", "docstring": "Construct a new Context for the parsed arguments.\n\nArgs:\nargs: the dictionary of magic arguments.\nReturns:\nA new Context based on the current default context, but with any explicitly\nspecified arguments overriding the default's config.", "source": "juraj-google-style"}
{"code": "def disassemble(qobj):\n    run_config = qobj.config.to_dict()\n    user_qobj_header = qobj.header.to_dict()\n    circuits = _experiments_to_circuits(qobj)\n    return (circuits, run_config, user_qobj_header)", "docstring": "Dissasemble a qobj and return the circuits, run_config, and user header\n\nArgs:\nqobj (Qobj): The input qobj object to dissasemble\nReturns:\ncircuits (list): A list of quantum circuits\nrun_config (dict): The dist of the run config\nuser_qobj_header (dict): The dict of any user headers in the qobj", "source": "codesearchnet"}
{"code": "def create(self, resource, id=None, timeout=(- 1)):\n    if (not id):\n        available_id = self.__get_first_available_id()\n        uri = ('%s/%s' % (self.URI, str(available_id)))\n    else:\n        uri = ('%s/%s' % (self.URI, str(id)))\n    return self._client.create(resource, uri=uri, timeout=timeout)", "docstring": "Adds the specified trap forwarding destination.\nThe trap destination associated with the specified id will be created if trap destination with that id does not exists.\nThe id can only be an integer greater than 0.\n\nArgs:\nresource (dict): Object to create.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.\n\nReturns:\ndict: Created resource.", "source": "codesearchnet"}
{"code": "def read(self, vals):\n        \n        i = 0\n        if len(vals[i]) == 0:\n            self.city = None\n        else:\n            self.city = vals[i]\n        i += 1\n        if len(vals[i]) == 0:\n            self.state_province_region = None\n        else:\n            self.state_province_region = vals[i]\n        i += 1\n        if len(vals[i]) == 0:\n            self.country = None\n        else:\n            self.country = vals[i]\n        i += 1\n        if len(vals[i]) == 0:\n            self.source = None\n        else:\n            self.source = vals[i]\n        i += 1\n        if len(vals[i]) == 0:\n            self.wmo = None\n        else:\n            self.wmo = vals[i]\n        i += 1\n        if len(vals[i]) == 0:\n            self.latitude = None\n        else:\n            self.latitude = vals[i]\n        i += 1\n        if len(vals[i]) == 0:\n            self.longitude = None\n        else:\n            self.longitude = vals[i]\n        i += 1\n        if len(vals[i]) == 0:\n            self.timezone = None\n        else:\n            self.timezone = vals[i]\n        i += 1\n        if len(vals[i]) == 0:\n            self.elevation = None\n        else:\n            self.elevation = vals[i]\n        i += 1", "docstring": "Read values.\n\nArgs:\nvals (list): list of strings representing values", "source": "juraj-google-style"}
{"code": "def get_saved_issue_data(self, issue, namespace='open'):\n    if isinstance(issue, int):\n        issue_number = str(issue)\n    elif isinstance(issue, basestring):\n        issue_number = issue\n    else:\n        issue_number = issue.number\n    issue_data_key = self._issue_data_key(namespace)\n    issue_data = self.data.get(issue_data_key, {})\n    _data = issue_data.get(str(issue_number), {})\n    issue_data[str(issue_number)] = _data\n    return _data", "docstring": "Returns issue data from local data.\n\nArgs:\nissue:\n`int`. Github issue number.\nnamespace:\n`str`. Namespace for storing this issue.", "source": "codesearchnet"}
{"code": "def get_nets_arin(self, response):\n    nets = []\n    pattern = re.compile('^NetRange:[^\\\\S\\\\n]+(.+)$', re.MULTILINE)\n    temp = pattern.search(response)\n    net_range = None\n    net_range_start = None\n    if (temp is not None):\n        net_range = temp.group(1).strip()\n        net_range_start = temp.start()\n    for match in re.finditer('^CIDR:[^\\\\S\\\\n]+(.+?,[^\\\\S\\\\n].+|.+)$', response, re.MULTILINE):\n        try:\n            net = copy.deepcopy(BASE_NET)\n            if (len(nets) > 0):\n                temp = pattern.search(response, match.start())\n                net_range = None\n                net_range_start = None\n                if (temp is not None):\n                    net_range = temp.group(1).strip()\n                    net_range_start = temp.start()\n            if (net_range is not None):\n                if ((net_range_start < match.start()) or (len(nets) > 0)):\n                    try:\n                        net['range'] = ('{0} - {1}'.format(ip_network(net_range)[0].__str__(), ip_network(net_range)[(- 1)].__str__()) if ('/' in net_range) else net_range)\n                    except ValueError:\n                        net['range'] = net_range\n            net['cidr'] = ', '.join([ip_network(c.strip()).__str__() for c in match.group(1).split(', ')])\n            net['start'] = match.start()\n            net['end'] = match.end()\n            nets.append(net)\n        except ValueError:\n            pass\n    return nets", "docstring": "The function for parsing network blocks from ARIN whois data.\n\nArgs:\nresponse (:obj:`str`): The response from the ARIN whois server.\n\nReturns:\nlist of dict: Mapping of networks with start and end positions.\n\n::\n\n[{\n'cidr' (str) - The network routing block\n'start' (int) - The starting point of the network\n'end' (int) - The endpoint point of the network\n}]", "source": "codesearchnet"}
{"code": "def get_tool_variants(self, tool_name):\n        \n        variants = set()\n        tools_dict = self.get_tools(request_only=False)\n        for variant, tools in tools_dict.itervalues():\n            if tool_name in tools:\n                variants.add(variant)\n        return variants", "docstring": "Get the variant(s) that provide the named tool.\n\nIf there are more than one variants, the tool is in conflict, and Rez\ndoes not know which variant's tool is actually exposed.\n\nArgs:\ntool_name(str): Name of the tool to search for.\n\nReturns:\nSet of `Variant` objects. If no variant provides the tool, an\nempty set is returned.", "source": "juraj-google-style"}
{"code": "def _build(self, inputs):\n    \n    if nest.is_sequence(inputs):\n      merged_tensors = [self._merge(tensor) for tensor in nest.flatten(inputs)]\n      return nest.pack_sequence_as(inputs, merged_tensors)\n\n    \n    return self._merge(inputs)", "docstring": "Connects the MergeDims module into the graph.\n\nArgs:\ninputs: Tensor or a nested list of Tensors to merge. Its rank must be\ngreater than or equal to `start` + `size`.\n\nReturns:\nThe merged Tensor or a nested list of merged Tensors.\n\nRaises:\nValueError: If any of the `inputs` tensors has insufficient rank.", "source": "juraj-google-style"}
{"code": "def to_timestamp(dt, timestamp):\n  \n  if dt.tzinfo:\n    \n    raise TypeError('Cannot store a timezone aware datetime. '\n                    'Convert to UTC and store the naive datetime.')\n  timestamp.seconds = calendar.timegm(dt.timetuple())\n  timestamp.nanos = dt.microsecond * _NANOS_PER_MICRO", "docstring": "Convert datetime to google.protobuf.Timestamp.\n\nArgs:\ndt: a timezone naive datetime.\ntimestamp: a google.protobuf.Timestamp to populate.\n\nRaises:\nTypeError: if a timezone aware datetime was provided.", "source": "juraj-google-style"}
{"code": "def update_memo(self, task_id, task, r):\n    if ((not self.memoize) or (not task['memoize'])):\n        return\n    if (task['hashsum'] in self.memo_lookup_table):\n        logger.info(('Updating appCache entry with latest %s:%s call' % (task['func_name'], task_id)))\n        self.memo_lookup_table[task['hashsum']] = r\n    else:\n        self.memo_lookup_table[task['hashsum']] = r", "docstring": "Updates the memoization lookup table with the result from a task.\n\nArgs:\n- task_id (int): Integer task id\n- task (dict) : A task dict from dfk.tasks\n- r (Result future): Result future\n\nA warning is issued when a hash collision occurs during the update.\nThis is not likely.", "source": "codesearchnet"}
{"code": "def get_matching_text_in_strs(a, b, match_min_size=30, ignore='', end_characters=''):\n    \n    \n    compare = difflib.SequenceMatcher(lambda x: x in ignore)\n    compare.set_seqs(a=a, b=b)\n    matching_text = list()\n\n    for match in compare.get_matching_blocks():\n        start = match.a\n        text = a[start: start+match.size]\n        if end_characters:\n            prev_text = text\n            while len(text) != 0 and text[0] in end_characters:\n                text = text[1:]\n            while len(text) != 0 and text[-1] not in end_characters:\n                text = text[:-1]\n            if len(text) == 0:\n                text = prev_text\n        if len(text) >= match_min_size:\n            matching_text.append(text)\n    return matching_text", "docstring": "Returns a list of matching blocks of text in a and b\n\nArgs:\na (str): First string to match\nb (str): Second string to match\nmatch_min_size (int): Minimum block size to match on. Defaults to 30.\nignore (str): Any characters to ignore in matching. Defaults to ''.\nend_characters (str): End characters to look for. Defaults to ''.\n\nReturns:\nList[str]: List of matching blocks of text", "source": "juraj-google-style"}
{"code": "def forward(self, inference_args=None, input_tangents=None):\n    del inference_args\n    if input_tangents:\n        raise errors.InternalError('unexpectedly got forwardprop information in a class that does not support forwardprop.')\n    return self._inference_function", "docstring": "A forward function with only user-specified outputs.\n\nThe call operation for the returned inference function can be rewritten into\na forward function. This only happens if the backward function (from the\n`backward` method) ends up being used to compute gradients.\n\nThis approach avoids constructing unnecessary graphs, but it only works if\nwe are calling this function when not executing eagerly.\n\nArgs:\ninference_args: A flat list of Tensors, arguments to the inference\nfunction. Unused, but taken for compatibility with\n_TapeGradientFunctions.\ninput_tangents: A flat list of Tensors, jvps associated with\n`inference_args`. Unused; if required, tape functions must be used\ninstead.\n\nReturns:\nAn atomic_function.AtomicFunction.", "source": "github-repos"}
{"code": "def set_evaluation_parameter(self, parameter_name, parameter_value):\n        \n\n        if 'evaluation_parameters' not in self._expectations_config:\n            self._expectations_config['evaluation_parameters'] = {}\n\n        self._expectations_config['evaluation_parameters'].update(\n            {parameter_name: parameter_value})", "docstring": "Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate\nparameterized expectations.\n\nArgs:\nparameter_name (string): The name of the kwarg to be replaced at evaluation time\nparameter_value (any): The value to be used", "source": "juraj-google-style"}
{"code": "def parse_auth(cls, entries, raise_on_error=False):\n    conf = {}\n    for (registry, entry) in six.iteritems(entries):\n        if (not isinstance(entry, dict)):\n            log.debug('Config entry for key {0} is not auth config'.format(registry))\n            if raise_on_error:\n                raise errors.InvalidConfigFile('Invalid configuration for registry {0}'.format(registry))\n            return {}\n        if ('identitytoken' in entry):\n            log.debug('Found an IdentityToken entry for registry {0}'.format(registry))\n            conf[registry] = {'IdentityToken': entry['identitytoken']}\n            continue\n        if ('auth' not in entry):\n            log.debug('Auth data for {0} is absent. Client might be using a credentials store instead.'.format(registry))\n            conf[registry] = {}\n            continue\n        (username, password) = decode_auth(entry['auth'])\n        log.debug('Found entry (registry={0}, username={1})'.format(repr(registry), repr(username)))\n        conf[registry] = {'username': username, 'password': password, 'email': entry.get('email'), 'serveraddress': registry}\n    return conf", "docstring": "Parses authentication entries\n\nArgs:\nentries:        Dict of authentication entries.\nraise_on_error: If set to true, an invalid format will raise\nInvalidConfigFile\n\nReturns:\nAuthentication registry.", "source": "codesearchnet"}
{"code": "def add_arg_scope(func):\n  \n  @functools.wraps(func)\n  def func_with_args(*args, **kwargs):\n    current_scope = _current_arg_scope()\n    current_args = kwargs\n    key_func = (func.__module__, func.__name__)\n    if key_func in current_scope:\n      current_args = current_scope[key_func].copy()\n      current_args.update(kwargs)\n    return func(*args, **current_args)\n  _add_op(func)\n  return func_with_args", "docstring": "Decorates a function with args so it can be used within an arg_scope.\n\nArgs:\nfunc: function to decorate.\n\nReturns:\nA tuple with the decorated function func_with_args().", "source": "juraj-google-style"}
{"code": "def remove(self, id):\n        \n        before_len = len(self.model.db)\n        self.model.db = [t for t in self.model.db if t[\"id\"] != id]\n        if not self._batch.enable.is_set():\n            self.model.save_db()\n        return before_len - len(self.model.db)", "docstring": "Remove a object by id\nArgs:\nid (int): Object's id should be deleted\nReturns:\nlen(int): affected rows", "source": "juraj-google-style"}
{"code": "def compute_mask(self, inputs, mask=None):\n    if not self.supports_masking:\n        if any((m is not None for m in nest.flatten(mask))):\n            raise TypeError('Layer ' + self.name + ' does not support masking, but was passed an input_mask: ' + str(mask))\n        return None\n    return mask", "docstring": "Computes an output mask tensor.\n\nArgs:\ninputs: Tensor or list of tensors.\nmask: Tensor or list of tensors.\n\nReturns:\nNone or a tensor (or list of tensors,\none per output tensor of the layer).", "source": "github-repos"}
{"code": "def extract_async(self, destination, format='csv', csv_delimiter=None, csv_header=True, compress=False):\n    format = format.upper()\n    if (format == 'JSON'):\n        format = 'NEWLINE_DELIMITED_JSON'\n    if ((format == 'CSV') and (csv_delimiter is None)):\n        csv_delimiter = ','\n    try:\n        response = self._api.table_extract(self._name_parts, destination, format, compress, csv_delimiter, csv_header)\n        return self._init_job_from_response(response)\n    except Exception as e:\n        raise google.datalab.JobError(location=traceback.format_exc(), message=str(e), reason=str(type(e)))", "docstring": "Starts a job to export the table to GCS.\n\nArgs:\ndestination: the destination URI(s). Can be a single URI or a list.\nformat: the format to use for the exported data; one of 'csv', 'json', or 'avro'\n(default 'csv').\ncsv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','\ncsv_header: for CSV exports, whether to include an initial header line. Default true.\ncompress: whether to compress the data on export. Compression is not supported for\nAVRO format. Defaults to False.\nReturns:\nA Job object for the export Job if it was started successfully; else None.", "source": "codesearchnet"}
{"code": "def rouge_2_fscore(predictions, labels, **unused_kwargs):\n  \n\n  outputs = tf.to_int32(tf.argmax(predictions, axis=-1))\n  \n  outputs = tf.squeeze(outputs, axis=[-1, -2])\n  labels = tf.squeeze(labels, axis=[-1, -2])\n  rouge_2_f_score = tf.py_func(rouge_n, (outputs, labels), tf.float32)\n  return rouge_2_f_score, tf.constant(1.0)", "docstring": "ROUGE-2 F1 score computation between labels and predictions.\n\nThis is an approximate ROUGE scoring method since we do not glue word pieces\nor decode the ids and tokenize the output.\n\nArgs:\npredictions: tensor, model predictions\nlabels: tensor, gold output.\n\nReturns:\nrouge2_fscore: approx rouge-2 f1 score.", "source": "juraj-google-style"}
{"code": "def get_replacement_transform_for_applied_ptransform(self, applied_ptransform):\n    return self.get_replacement_transform(applied_ptransform.transform)", "docstring": "Provides a runner specific override for a given `AppliedPTransform`.\n\nArgs:\napplied_ptransform: `AppliedPTransform` containing the `PTransform` to be\nreplaced.\n\nReturns:\nA `PTransform` that will be the replacement for the `PTransform` inside\nthe `AppliedPTransform` given as an argument.", "source": "github-repos"}
{"code": "def unused(node):\n  \n  cfg.forward(node, cfg.ReachingDefinitions())\n  unused_obj = Unused()\n  unused_obj.visit(node)\n  return unused_obj.unused", "docstring": "Find unused definitions that can be remove.\n\nThis runs reaching definitions analysis followed by a walk over the AST to\nfind all variable definitions that are not used later on.\n\nArgs:\nnode: The AST of e.g. a function body to find unused variable definitions.\n\nReturns:\nunused: After visiting all the nodes, this attribute contanis a set of\ndefinitions in the form of `(variable_name, node)` pairs which are\nunused in this AST.", "source": "juraj-google-style"}
{"code": "def get_port_map(self, id_or_uri):\n        \n        uri = self._client.build_uri(id_or_uri) + self.PORT_MAP_PATH\n        return self._client.get(id_or_uri=uri)", "docstring": "Use to get the drive enclosure I/O adapter port to SAS interconnect port connectivity.\n\nArgs:\nid_or_uri: Can be either the resource ID or the resource URI.\n\nReturns:\ndict: Drive Enclosure Port Map", "source": "juraj-google-style"}
{"code": "def converted_self(self):\n    if self._converted_self is None:\n        source = self._function or self._enclosing_graph\n        self._converted_self = source.converted_self().nodes[self._node.name]\n    return self._converted_self", "docstring": "The NodeDef to be converted.\n\nReturns:\nThe NodeDef to be converted, which can come from either a graph for a\nfunction. Derived classes should call this (via 'super') to make sure the\nnode is retrieved from the right place.", "source": "github-repos"}
{"code": "def start(self, interval_s):\n    if self.running:\n        return False\n    self.stopped.clear()\n\n    def _execute():\n        if ((not self.method()) and self.stop_if_false):\n            return\n        while (not self.stopped.wait(interval_s)):\n            if ((not self.method()) and self.stop_if_false):\n                return\n    self.thread = threading.Thread(target=_execute)\n    self.thread.daemon = True\n    self.thread.start()\n    return True", "docstring": "Starts executing the method at the specified interval.\n\nArgs:\ninterval_s: The amount of time between executions of the method.\nReturns:\nFalse if the interval was already running.", "source": "codesearchnet"}
{"code": "def add_backend_policy(self, json_data):\n    env = boto3.session.Session(profile_name=self.env, region_name=self.region)\n    elbclient = env.client('elb')\n    for job in json.loads(json_data)['job']:\n        for listener in job['listeners']:\n            instance_port = listener['internalPort']\n            backend_policy_list = listener['backendPolicies']\n            if backend_policy_list:\n                LOG.info('Adding backend server policies: %s', backend_policy_list)\n                elbclient.set_load_balancer_policies_for_backend_server(LoadBalancerName=self.app, InstancePort=instance_port, PolicyNames=backend_policy_list)", "docstring": "Attaches backend server policies to an ELB\n\nArgs:\njson_data (json): return data from ELB upsert", "source": "codesearchnet"}
{"code": "def format_param_list(listed_params, output_name):\n        \n        output_payload = {}\n        if listed_params:\n            for index, item in enumerate(listed_params):\n                output_payload[str(output_name) + \"[\" + str(index) + \"]\" ] = item\n        return output_payload", "docstring": "Utility method for formatting lists of parameters for api consumption\nUseful for email address lists, etc\nArgs:\nlisted_params (list of values) - the list to format\noutput_name (str) - the parameter name to prepend to each key", "source": "juraj-google-style"}
{"code": "def load_extension(self, path, name_filter=None, class_filter=None, unique=False, component=None):\n    import_name = None\n    if (component is not None):\n        import_name = _ensure_package_loaded(path, component)\n    (name, ext) = _try_load_module(path, import_name=import_name)\n    if ((name_filter is not None) and (name != name_filter)):\n        return []\n    found = [(name, x) for x in self._filter_subclasses(ext, class_filter)]\n    found = [(name, x) for (name, x) in found if self._filter_nonextensions(x)]\n    if (not unique):\n        return found\n    if (len(found) > 1):\n        raise ArgumentError(('Extension %s should have had exactly one instance of class %s, found %d' % (path, class_filter.__name__, len(found))), classes=found)\n    elif (len(found) == 0):\n        raise ArgumentError(('Extension %s had no instances of class %s' % (path, class_filter.__name__)))\n    return found[0]", "docstring": "Load a single python module extension.\n\nThis function is similar to using the imp module directly to load a\nmodule and potentially inspecting the objects it declares to filter\nthem by class.\n\nArgs:\npath (str): The path to the python file to load\nname_filter (str): If passed, the basename of the module must match\nname or nothing is returned.\nclass_filter (type): If passed, only instance of this class are returned.\nunique (bool): If True (default is False), there must be exactly one object\nfound inside this extension that matches all of the other criteria.\ncomponent (IOTile): The component that this extension comes from if it is\nloaded from an installed component.  This is used to properly import\nthe extension as a submodule of the component's support package.\n\nReturns:\nlist of (name, type): A list of the objects found at the extension path.\n\nIf unique is True, then the list only contains a single entry and that\nentry will be directly returned.", "source": "codesearchnet"}
{"code": "def save(self, config_loc=None):\n    if (not os.path.exists(_USER_CONFIG_DIR)):\n        'create directory if not exists'\n        os.makedirs(_USER_CONFIG_DIR)\n    with open(_DEFAULT_PATH, 'w') as f:\n        json.dump({'key': self._key, 'base_url': self._base_url, 'username': self._username}, f)", "docstring": "Saves current user credentials to user directory.\n\nArgs:\nconfig_loc (str, optional): Location where credentials are to be\nstored. If no argument is provided, it will be send to the\ndefault location.\n\nExample:\n\n.. code::\n\nfrom cartoframes import Credentials\ncreds = Credentials(username='eschbacher', key='abcdefg')\ncreds.save()  # save to default location", "source": "codesearchnet"}
{"code": "def checkPermissions(permissions=[], obj=None):\n    \n    if not obj:\n        return False\n    sm = getSecurityManager()\n    for perm in permissions:\n        if not sm.checkPermission(perm, obj):\n            return ''\n    return True", "docstring": "Checks if a user has permissions for a given object.\n\nArgs:\npermissions: The permissions the current user must be compliant with\nobj: The object for which the permissions apply\n\nReturns:\n1 if the user complies with all the permissions for the given object.\nOtherwise, it returns empty.", "source": "juraj-google-style"}
{"code": "def get_extra_vars():\n    g = ops.get_default_graph()\n    if isinstance(g, _FuncGraph):\n        return g.extra_vars\n    else:\n        return []", "docstring": "Returns the captured variables by the function.\n\nReturns:\nIf the default graph is being used to define a function, the\nreturned list of variables are those created inside the function\nbody so far. Otherwise, returns an empty list.", "source": "github-repos"}
{"code": "def model_fn(hparams, seed):\n    rng = random.Random(seed)\n    model = tf.keras.models.Sequential()\n    model.add(tf.keras.layers.Input(INPUT_SHAPE))\n    model.add(tf.keras.layers.Reshape((INPUT_SHAPE + (1,))))\n    conv_filters = 8\n    for _ in xrange(hparams[HP_CONV_LAYERS]):\n        model.add(tf.keras.layers.Conv2D(filters=conv_filters, kernel_size=hparams[HP_CONV_KERNEL_SIZE], padding='same', activation='relu'))\n        model.add(tf.keras.layers.MaxPool2D(pool_size=2, padding='same'))\n        conv_filters *= 2\n    model.add(tf.keras.layers.Flatten())\n    model.add(tf.keras.layers.Dropout(hparams[HP_DROPOUT], seed=rng.random()))\n    dense_neurons = 32\n    for _ in xrange(hparams[HP_DENSE_LAYERS]):\n        model.add(tf.keras.layers.Dense(dense_neurons, activation='relu'))\n        dense_neurons *= 2\n    model.add(tf.keras.layers.Dense(OUTPUT_CLASSES, activation='softmax'))\n    model.compile(loss='sparse_categorical_crossentropy', optimizer=hparams[HP_OPTIMIZER], metrics=['accuracy'])\n    return model", "docstring": "Create a Keras model with the given hyperparameters.\n\nArgs:\nhparams: A dict mapping hyperparameters in `HPARAMS` to values.\nseed: A hashable object to be used as a random seed (e.g., to\nconstruct dropout layers in the model).\n\nReturns:\nA compiled Keras model.", "source": "codesearchnet"}
{"code": "def convert_elementwise_sub(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting elementwise_sub ...')\n    model0 = layers[inputs[0]]\n    model1 = layers[inputs[1]]\n    if (names == 'short'):\n        tf_name = ('S' + random_string(7))\n    elif (names == 'keep'):\n        tf_name = w_name\n    else:\n        tf_name = (w_name + str(random.random()))\n    sub = keras.layers.Subtract(name=tf_name)\n    layers[scope_name] = sub([model0, model1])", "docstring": "Convert elementwise subtraction.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    \n    \n    if not self._line_structures:\n      raise errors.UnableToParseFile(\n          'Line structure undeclared, unable to proceed.')\n\n    encoding = self._ENCODING or parser_mediator.codepage\n    text_file_object = text_file.TextFile(file_object, encoding=encoding)\n\n    try:\n      line = self._ReadLine(text_file_object, max_len=self.MAX_LINE_LENGTH)\n    except UnicodeDecodeError:\n      raise errors.UnableToParseFile(\n          'Not a text file or encoding not supported.')\n\n    if not line:\n      raise errors.UnableToParseFile('Not a text file.')\n\n    if len(line) == self.MAX_LINE_LENGTH or len(\n        line) == self.MAX_LINE_LENGTH - 1:\n      logger.debug((\n          'Trying to read a line and reached the maximum allowed length of '\n          '{0:d}. The last few bytes of the line are: {1:s} [parser '\n          '{2:s}]').format(\n              self.MAX_LINE_LENGTH, repr(line[-10:]), self.NAME))\n\n    if not self._IsText(line):\n      raise errors.UnableToParseFile('Not a text file, unable to proceed.')\n\n    if not self.VerifyStructure(parser_mediator, line):\n      raise errors.UnableToParseFile('Wrong file structure.')\n\n    consecutive_line_failures = 0\n    index = None\n    \n    self._current_offset = 0\n    \n    while line:\n      if parser_mediator.abort:\n        break\n      parsed_structure = None\n      use_key = None\n      \n      for index, (key, structure) in enumerate(self._line_structures):\n        try:\n          parsed_structure = structure.parseString(line)\n        except pyparsing.ParseException:\n          pass\n        if parsed_structure:\n          use_key = key\n          break\n\n      if parsed_structure:\n        self.ParseRecord(parser_mediator, use_key, parsed_structure)\n        consecutive_line_failures = 0\n        if index is not None and index != 0:\n          key_structure = self._line_structures.pop(index)\n          self._line_structures.insert(0, key_structure)\n      else:\n        if len(line) > 80:\n          line = '{0:s}...'.format(line[:77])\n        parser_mediator.ProduceExtractionWarning(\n            'unable to parse log line: {0:s} at offset: {1:d}'.format(\n                repr(line), self._current_offset))\n        consecutive_line_failures += 1\n        if (consecutive_line_failures >\n            self.MAXIMUM_CONSECUTIVE_LINE_FAILURES):\n          raise errors.UnableToParseFile(\n              'more than {0:d} consecutive failures to parse lines.'.format(\n                  self.MAXIMUM_CONSECUTIVE_LINE_FAILURES))\n\n      self._current_offset = text_file_object.get_offset()\n\n      try:\n        line = self._ReadLine(text_file_object, max_len=self.MAX_LINE_LENGTH)\n      except UnicodeDecodeError:\n        parser_mediator.ProduceExtractionWarning(\n            'unable to read and decode log line at offset {0:d}'.format(\n                self._current_offset))\n        break", "docstring": "Parses a text file-like object using a pyparsing definition.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def Write2000256List(self, arr):\n    for item in arr:\n        ba = bytearray(binascii.unhexlify(item))\n        ba.reverse()\n        self.WriteBytes(ba)", "docstring": "Write an array of 64 byte items to the stream.\n\nArgs:\narr (list): a list of 2000 items of 64 bytes in size.", "source": "codesearchnet"}
{"code": "def build_genotype(gt_call):\n    \n    gt_obj = dict(\n        sample_id = gt_call['individual_id'],\n        display_name = gt_call['display_name'],\n        genotype_call = gt_call['genotype_call'],\n        allele_depths = [gt_call['ref_depth'], gt_call['alt_depth']],\n        read_depth = gt_call['read_depth'],\n        genotype_quality = gt_call['genotype_quality']\n    )\n    \n    return gt_obj", "docstring": "Build a genotype call\n\nArgs:\ngt_call(dict)\n\nReturns:\ngt_obj(dict)\n\ngt_call = dict(\nsample_id = str,\ndisplay_name = str,\ngenotype_call = str,\nallele_depths = list, # int\nread_depth = int,\ngenotype_quality = int,\n)", "source": "juraj-google-style"}
{"code": "def get_supported_features_for_model_type(model_type: str, model_name: Optional[str]=None) -> Dict[str, Callable[[PretrainedConfig], OnnxConfig]]:\n    model_type = model_type.lower()\n    if model_type not in FeaturesManager._SUPPORTED_MODEL_TYPE:\n        model_type_and_model_name = f'{model_type} ({model_name})' if model_name else model_type\n        raise KeyError(f'{model_type_and_model_name} is not supported yet. Only {list(FeaturesManager._SUPPORTED_MODEL_TYPE.keys())} are supported. If you want to support {model_type} please propose a PR or open up an issue.')\n    return FeaturesManager._SUPPORTED_MODEL_TYPE[model_type]", "docstring": "Tries to retrieve the feature -> OnnxConfig constructor map from the model type.\n\nArgs:\nmodel_type (`str`):\nThe model type to retrieve the supported features for.\nmodel_name (`str`, *optional*):\nThe name attribute of the model object, only used for the exception message.\n\nReturns:\nThe dictionary mapping each feature to a corresponding OnnxConfig constructor.", "source": "github-repos"}
{"code": "def save_link(self, path_info):\n    assert (path_info['scheme'] == 'local')\n    path = path_info['path']\n    if (not os.path.exists(path)):\n        return\n    (mtime, _) = get_mtime_and_size(path)\n    inode = get_inode(path)\n    relpath = os.path.relpath(path, self.root_dir)\n    cmd = 'REPLACE INTO {}(path, inode, mtime) VALUES (\"{}\", {}, \"{}\")'.format(self.LINK_STATE_TABLE, relpath, self._to_sqlite(inode), mtime)\n    self._execute(cmd)", "docstring": "Adds the specified path to the list of links created by dvc. This\nlist is later used on `dvc checkout` to cleanup old links.\n\nArgs:\npath_info (dict): path info to add to the list of links.", "source": "codesearchnet"}
{"code": "def Sign(message, private_key):\n        \n\n        hash = hashlib.sha256(binascii.unhexlify(message)).hexdigest()\n\n        v, r, s = bitcoin.ecdsa_raw_sign(hash, private_key)\n\n        rb = bytearray(r.to_bytes(32, 'big'))\n        sb = bytearray(s.to_bytes(32, 'big'))\n\n        sig = rb + sb\n\n        return sig", "docstring": "Sign the message with the given private key.\n\nArgs:\nmessage (str): message to be signed\nprivate_key (str): 32 byte key as a double digit hex string (e.g. having a length of 64)\nReturns:\nbytearray: the signature of the message.", "source": "juraj-google-style"}
{"code": "def __init__(self, path=None):\n        \n        self.path = None\n        if path is None:\n            self.path = self.get_working_directory()\n        else:\n            self.path = path\n        assert self.exists()", "docstring": "Initialize a new Vcs object for a repository located at `path`.\nIf `path` is `None`, then `get_working_directory` is used to identify\nthe path.\n\nArgs:\npath (str) - optional. The path to the repo working directory.", "source": "juraj-google-style"}
{"code": "def _ensure_tuple(item):\n    \n    if isinstance(item, tuple):\n        return item\n    elif isinstance(item, list):\n        return tuple(item)\n    elif isinstance(item, np.ndarray):\n        return tuple(item.tolist())\n    else:\n        raise NotImplementedError", "docstring": "Simply ensure that the passed item is a tuple.  If it is not, then\nconvert it if possible, or raise a NotImplementedError\n\nArgs:\nitem: the item that needs to become a tuple\n\nReturns:\nthe item casted as a tuple\n\nRaises:\nNotImplementedError: if converting the given item to a tuple\nis not implemented.", "source": "juraj-google-style"}
{"code": "def google_api(config, task):\n    if config.verbose:\n        print('GOOGLE_API', task['api'], task['version'], task['function'])\n    api_call = {'auth': task['auth'], 'api': task['api'], 'version': task['version'], 'function': task['function'], 'iterate': task.get('iterate', False), 'limit': task.get('limit'), 'key': task.get('key', config.key), 'labels': task.get('labels'), 'headers': task.get('headers')}\n    append = task.get('append')\n    results = google_api_build_results(config, task['auth'], api_call, task.get('results', {}))\n    errors = google_api_build_errors(config, task['auth'], api_call, task.get('errors', {}))\n    if 'kwargs' in task:\n        kwargs_list = task['kwargs'] if isinstance(task['kwargs'], (list, tuple)) else [task['kwargs']]\n    elif 'kwargs_remote' in task:\n        kwargs_list = get_rows(config, task['auth'], task['kwargs_remote'], as_object=True)\n    else:\n        kwargs_list = [{}]\n\n    def google_api_combine():\n        for kwargs in kwargs_list:\n            api_call['kwargs'] = kwargs\n            google_api_initilaize(config, api_call, task.get('alias'))\n            yield from google_api_execute(config, task['auth'], api_call, results, errors, append)\n    if append:\n        results['bigquery']['schema'].extend(append)\n    return put_rows(config, task['auth'], results, google_api_combine())", "docstring": "Task handler for recipe, delegates all JSON parameters to functions.\n\nExecutes the following steps:\n1. Define the API call.\n2. Define the results destination.\n3. Define the error destination.\n\nThe results table for BigQuery is created first as blank, this allows\nwrites from multiple API calls to aggregate into a single table.\n\nThe API call can be specified via kwargs or kwargs_remote.\nkwargs - hard coded values for the API call as a dictionary.\nkwargs_remote - values loaded from a source such as BigQuery.\n\nArgs:\nNone, all parameters are exposed via task.\n\nReturns:\nNone, all data is read and written as a side effect.\n\nRaises:\nValueError: If a required key in the recipe is missing.", "source": "github-repos"}
{"code": "def gets(self, key, default=None, cas_default=None):\n        \n        defaults = (default, cas_default)\n        return self._fetch_cmd(b'gets', [key], True).get(key, defaults)", "docstring": "The memcached \"gets\" command for one key, as a convenience.\n\nArgs:\nkey: str, see class docs for details.\ndefault: value that will be returned if the key was not found.\ncas_default: same behaviour as default argument.\n\nReturns:\nA tuple of (value, cas)\nor (default, cas_defaults) if the key was not found.", "source": "juraj-google-style"}
{"code": "def _get_sql_args(parser, args=None):\n    overrides = None\n    if (args is None):\n        tokens = []\n    elif isinstance(args, basestring):\n        command_line = ' '.join(args.split('\\n'))\n        tokens = shlex.split(command_line)\n    elif isinstance(args, dict):\n        overrides = args\n        tokens = []\n    else:\n        tokens = args\n    args = ({} if (parser is None) else vars(parser.parse_args(tokens)))\n    if overrides:\n        args.update(overrides)\n    return {arg: value for (arg, value) in args.items() if (value is not None)}", "docstring": "Parse a set of %%sql arguments or get the default value of the arguments.\n\nArgs:\nparser: the argument parser to use.\nargs: the argument flags. May be a string or a list. If omitted the empty string is used so\nwe can get the default values for the arguments. These are all used to override the\narg parser. Alternatively args may be a dictionary, in which case it overrides the\ndefault values from the arg parser.\nReturns:\nA dictionary of argument names and values.", "source": "codesearchnet"}
{"code": "def _get_resource_from_obj(self, resource):\n        \n        \n        if isinstance(resource, str):\n            if is_valid_uuid(resource) is False:\n                raise HDXError('%s is not a valid resource id!' % resource)\n            resource = hdx.data.resource.Resource.read_from_hdx(resource, configuration=self.configuration)\n        elif isinstance(resource, dict):\n            resource = hdx.data.resource.Resource(resource, configuration=self.configuration)\n        if not isinstance(resource, hdx.data.resource.Resource):\n            raise HDXError('Type %s cannot be added as a resource!' % type(resource).__name__)\n        return resource", "docstring": "Add new or update existing resource in dataset with new metadata\n\nArgs:\nresource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary\n\nReturns:\nhdx.data.resource.Resource: Resource object", "source": "juraj-google-style"}
{"code": "def dense_to_sparse(x, ignore_value=None, name=None):\n    with tf.compat.v1.name_scope(name, 'dense_to_sparse', [x, ignore_value]):\n        x = tf.convert_to_tensor(value=x, name='x')\n        if (ignore_value is None):\n            if (x.dtype.base_dtype == tf.string):\n                ignore_value = ''\n            else:\n                ignore_value = x.dtype.as_numpy_dtype(0)\n            ignore_value = tf.cast(ignore_value, x.dtype, name='ignore_value')\n        indices = tf.where(tf.not_equal(x, ignore_value), name='indices')\n        return tf.SparseTensor(indices=indices, values=tf.gather_nd(x, indices, name='values'), dense_shape=tf.shape(input=x, out_type=tf.int64, name='dense_shape'))", "docstring": "Converts dense `Tensor` to `SparseTensor`, dropping `ignore_value` cells.\n\nArgs:\nx: A `Tensor`.\nignore_value: Entries in `x` equal to this value will be\nabsent from the return `SparseTensor`. If `None`, default value of\n`x` dtype will be used (e.g. '' for `str`, 0 for `int`).\nname: Python `str` prefix for ops created by this function.\n\nReturns:\nsparse_x: A `tf.SparseTensor` with the same shape as `x`.\n\nRaises:\nValueError: when `x`'s rank is `None`.", "source": "codesearchnet"}
{"code": "def with_extrapolation(points, noise, n_points):\n    \n    n_points = 10\n    return kalman_filter(extrapolate_points(points, n_points) + points, noise)[n_points:]", "docstring": "Smooths a set of points, but it extrapolates some points at the beginning\n\nArgs:\npoints (:obj:`list` of :obj:`Point`)\nnoise (float): Expected noise, the higher it is the more the path will\nbe smoothed.\nReturns:\n:obj:`list` of :obj:`Point`", "source": "juraj-google-style"}
{"code": "def _save_model(self, epoch, logs):\n    logs = logs or {}\n    if isinstance(self.save_freq, int) or self.epochs_since_last_save >= self.period:\n        logs = tf_utils.sync_to_numpy_or_python_type(logs)\n        self.epochs_since_last_save = 0\n        filepath = self._get_file_path(epoch, logs)\n        try:\n            if self.save_best_only:\n                current = logs.get(self.monitor)\n                if current is None:\n                    logging.warning('Can save best model only with %s available, skipping.', self.monitor)\n                elif self.monitor_op(current, self.best):\n                    if self.verbose > 0:\n                        print('\\nEpoch %05d: %s improved from %0.5f to %0.5f, saving model to %s' % (epoch + 1, self.monitor, self.best, current, filepath))\n                    self.best = current\n                    if self.save_weights_only:\n                        self.model.save_weights(filepath, overwrite=True, options=self._options)\n                    else:\n                        self.model.save(filepath, overwrite=True, options=self._options)\n                elif self.verbose > 0:\n                    print('\\nEpoch %05d: %s did not improve from %0.5f' % (epoch + 1, self.monitor, self.best))\n            else:\n                if self.verbose > 0:\n                    print('\\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))\n                if self.save_weights_only:\n                    self.model.save_weights(filepath, overwrite=True, options=self._options)\n                else:\n                    self.model.save(filepath, overwrite=True, options=self._options)\n            self._maybe_remove_file()\n        except IsADirectoryError as e:\n            raise IOError('Please specify a non-directory filepath for ModelCheckpoint. Filepath used is an existing directory: {}'.format(filepath))\n        except IOError as e:\n            if 'is a directory' in str(e.args[0]).lower():\n                raise IOError('Please specify a non-directory filepath for ModelCheckpoint. Filepath used is an existing directory: {}'.format(filepath))\n            raise e", "docstring": "Saves the model.\n\nArgs:\nepoch: the epoch this iteration is in.\nlogs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.", "source": "github-repos"}
{"code": "def pull_doc(self, document):\n        \n        msg = self._protocol.create('PULL-DOC-REQ')\n        reply = self._send_message_wait_for_reply(msg)\n        if reply is None:\n            raise RuntimeError(\"Connection to server was lost\")\n        elif reply.header['msgtype'] == 'ERROR':\n            raise RuntimeError(\"Failed to pull document: \" + reply.content['text'])\n        else:\n            reply.push_to_document(document)", "docstring": "Pull a document from the server, overwriting the passed-in document\n\nArgs:\ndocument : (Document)\nThe document to overwrite with server content.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def MakeMixture(metapmf, name='mix'):\n    mix = Pmf(name=name)\n    for (pmf, p1) in metapmf.Items():\n        for (x, p2) in pmf.Items():\n            mix.Incr(x, (p1 * p2))\n    return mix", "docstring": "Make a mixture distribution.\n\nArgs:\nmetapmf: Pmf that maps from Pmfs to probs.\nname: string name for the new Pmf.\n\nReturns: Pmf object.", "source": "codesearchnet"}
{"code": "def separate(df, column, into, sep='[\\\\W_]+', remove=True, convert=False, extra='drop', fill='right'):\n    assert isinstance(into, (tuple, list))\n    if isinstance(sep, (tuple, list)):\n        inds = ([0] + list(sep))\n        if (len(inds) > len(into)):\n            if (extra == 'drop'):\n                inds = inds[:(len(into) + 1)]\n            elif (extra == 'merge'):\n                inds = (inds[:len(into)] + [None])\n        else:\n            inds = (inds + [None])\n        splits = df[column].map((lambda x: [(str(x)[slice(inds[i], inds[(i + 1)])] if (i < (len(inds) - 1)) else np.nan) for i in range(len(into))]))\n    else:\n        maxsplit = ((len(into) - 1) if (extra == 'merge') else 0)\n        splits = df[column].map((lambda x: re.split(sep, x, maxsplit)))\n    right_filler = (lambda x: (x + [np.nan for i in range((len(into) - len(x)))]))\n    left_filler = (lambda x: ([np.nan for i in range((len(into) - len(x)))] + x))\n    if (fill == 'right'):\n        splits = [right_filler(x) for x in splits]\n    elif (fill == 'left'):\n        splits = [left_filler(x) for x in splits]\n    for (i, split_col) in enumerate(into):\n        df[split_col] = [(x[i] if (not (x[i] == '')) else np.nan) for x in splits]\n    if convert:\n        df = convert_type(df, into)\n    if remove:\n        df.drop(column, axis=1, inplace=True)\n    return df", "docstring": "Splits columns into multiple columns.\n\nArgs:\ndf (pandas.DataFrame): DataFrame passed in through the pipe.\ncolumn (str, symbolic): Label of column to split.\ninto (list): List of string names for new columns.\n\nKwargs:\nsep (str or list): If a string, the regex string used to split the\ncolumn. If a list, a list of integer positions to split strings\non.\nremove (bool): Boolean indicating whether to remove the original column.\nconvert (bool): Boolean indicating whether the new columns should be\nconverted to the appropriate type.\nextra (str): either `'drop'`, where split pieces beyond the specified\nnew columns are dropped, or `'merge'`, where the final split piece\ncontains the remainder of the original column.\nfill (str): either `'right'`, where `np.nan` values are filled in the\nright-most columns for missing pieces, or `'left'` where `np.nan`\nvalues are filled in the left-most columns.", "source": "codesearchnet"}
{"code": "def backend_config_to_configparser(config):\n    \n    def get_store():\n        return config.get('store')\n\n    def get_day_start():\n        day_start = config.get('day_start')\n        if day_start:\n            day_start = day_start.strftime('%H:%M:%S')\n        return day_start\n\n    def get_fact_min_delta():\n        return text_type(config.get('fact_min_delta'))\n\n    def get_tmpfile_path():\n        return text_type(config.get('tmpfile_path'))\n\n    def get_db_engine():\n        return text_type(config.get('db_engine'))\n\n    def get_db_path():\n        return text_type(config.get('db_path'))\n\n    def get_db_host():\n        return text_type(config.get('db_host'))\n\n    def get_db_port():\n        return text_type(config.get('db_port'))\n\n    def get_db_name():\n        return text_type(config.get('db_name'))\n\n    def get_db_user():\n        return text_type(config.get('db_user'))\n\n    def get_db_password():\n        return text_type(config.get('db_password'))\n\n    cp_instance = SafeConfigParser()\n    cp_instance.add_section('Backend')\n    cp_instance.set('Backend', 'store', get_store())\n    cp_instance.set('Backend', 'day_start', get_day_start())\n    cp_instance.set('Backend', 'fact_min_delta', get_fact_min_delta())\n    cp_instance.set('Backend', 'tmpfile_path', get_tmpfile_path())\n    cp_instance.set('Backend', 'db_engine', get_db_engine())\n    cp_instance.set('Backend', 'db_path', get_db_path())\n    cp_instance.set('Backend', 'db_host', get_db_host())\n    cp_instance.set('Backend', 'db_port', get_db_port())\n    cp_instance.set('Backend', 'db_name', get_db_name())\n    cp_instance.set('Backend', 'db_user', get_db_user())\n    cp_instance.set('Backend', 'db_password', get_db_password())\n\n    return cp_instance", "docstring": "Return a ConfigParser instance representing a given backend config dictionary.\n\nArgs:\nconfig (dict): Dictionary of config key/value pairs.\n\nReturns:\nSafeConfigParser: SafeConfigParser instance representing config.\n\nNote:\nWe do not provide *any* validation about mandatory values what so ever.", "source": "juraj-google-style"}
{"code": "def set_attribute(self, key, value):\n        \n        if not isinstance(key, str) or not isinstance(value, str):\n            raise ValueError(\"The arguments 'key' and 'value' must both be \"\n                             \"strings. Instead they are {} and {}.\".format(\n                                 key, value))\n        self.extra_data[key] = value", "docstring": "Add a key-value pair to the extra_data dict.\n\nThis can be used to add attributes that are not available when\nray.profile was called.\n\nArgs:\nkey: The attribute name.\nvalue: The attribute value.", "source": "juraj-google-style"}
{"code": "def create(self, domain, type_name, search_command, body):\n        \n        return self._request(domain, type_name, search_command, 'POST', body)", "docstring": "Create entry in ThreatConnect Data Store\n\nArgs:\ndomain (string): One of 'local', 'organization', or 'system'.\ntype_name (string): This is a free form index type name. The ThreatConnect API will use\nthis resource verbatim.\nsearch_command (string): Search command to pass to ES.\nbody (str): JSON serialized data.", "source": "juraj-google-style"}
{"code": "def results_tc(self, key, value):\n    if os.access(self.default_args.tc_out_path, os.W_OK):\n        results_file = '{}/results.tc'.format(self.default_args.tc_out_path)\n    else:\n        results_file = 'results.tc'\n    new = True\n    open(results_file, 'a').close()\n    with open(results_file, 'r+') as fh:\n        results = ''\n        for line in fh.read().strip().split('\\n'):\n            if (not line):\n                continue\n            try:\n                (k, v) = line.split(' = ')\n            except ValueError:\n                (k, v) = line.split(' =')\n            if (k == key):\n                v = value\n                new = False\n            if (v is not None):\n                results += '{} = {}\\n'.format(k, v)\n        if (new and (value is not None)):\n            results += '{} = {}\\n'.format(key, value)\n        fh.seek(0)\n        fh.write(results)\n        fh.truncate()", "docstring": "Write data to results_tc file in TcEX specified directory.\n\nThe TcEx platform support persistent values between executions of the App.  This\nmethod will store the values for TC to read and put into the Database.\n\nArgs:\nkey (string): The data key to be stored.\nvalue (string): The data value to be stored.", "source": "codesearchnet"}
{"code": "def plot_probabilities_histogram(Y_p, title=None):\n    if (Y_p.ndim > 1):\n        msg = f'Arg Y_p should be a 1-dimensional np.ndarray, not of shape {Y_p.shape}.'\n        raise ValueError(msg)\n    plt.hist(Y_p, bins=20)\n    plt.xlim((0, 1.025))\n    plt.xlabel('Probability')\n    plt.ylabel('\n    if isinstance(title, str):\n        plt.title(title)\n    plt.show()", "docstring": "Plot a histogram from a numpy array of probabilities\n\nArgs:\nY_p: An [n] or [n, 1] np.ndarray of probabilities (floats in [0,1])", "source": "codesearchnet"}
{"code": "def _validate_representative_dataset(representative_dataset: rd.RepresentativeDatasetOrMapping, signature_keys: Collection[str]) -> None:\n    if isinstance(representative_dataset, Mapping):\n        if set(signature_keys) != set(representative_dataset.keys()):\n            raise ValueError(f'The signature keys and the keys of representative dataset map do not match. Signature keys: {set(signature_keys)}, representative dataset map: {set(representative_dataset.keys())}.')\n    elif len(signature_keys) > 1:\n        raise ValueError(f'Representative dataset is not a mapping (got: {type(representative_dataset)}), but there is more than one signature key provided. Please provide a map of {{signature_key -> dataset}} with more than one signature key.')", "docstring": "Validates the representative dataset, based on the signature keys.\n\nRepresentative dataset can be provided in two different forms: a single\ninstance of `RepresentativeDataset` or a map of signature key to the\ncorresponding `RepresentativeDataset`. These have a relationship with\n`signature_keys`.\n\nThis function validates the following conditions:\n* If `len(signature_keys) > 1`, then `representative_dataset` should be a\nmapping where the keys exactly match the elements in `signature_keys`.\n* If `len(signature_keys) == 1`, then both a mapping and a single instance of\n`RepresentativeDataset` are allowed.\n* This function also assumes `len(signature_keys) > 0`.\n\nArgs:\nrepresentative_dataset: A `RepresentativeDataset` or a map of string to\n`RepresentativeDataset` to be validated.\nsignature_keys: A collection of strings that contains the signature keys,\neach identifying a `SignatureDef`.\n\nRaises:\nValueError: Iff `representative_dataset` does not satisfy the conditions\nabove.", "source": "github-repos"}
{"code": "def print_info(self, capture):\n        \n        self.frame_offset += 1\n        ret, frame = capture.read()\n        if ret:\n            print('Capture Information')\n            print('\\tDimensions (HxW): {}x{}'.format(*frame.shape[0:2]))\n            print('\\tColor channels:   {}'.format(frame.shape[2] if\n                                                  len(frame.shape) > 2 else 1))\n            print('\\tColor range:      {}-{}'.format(np.min(frame),\n                                                     np.max(frame)))\n            print('\\tdtype:            {}'.format(frame.dtype))\n        else:\n            print('No source found.')", "docstring": "Prints information about the unprocessed image.\n\nReads one frame from the source to determine image colors, dimensions\nand data types.\n\nArgs:\ncapture: the source to read from.", "source": "juraj-google-style"}
{"code": "def _zip_from_file_patterns(root, includes, excludes, follow_symlinks):\n    logger.info('lambda: base directory: %s', root)\n    files = list(_find_files(root, includes, excludes, follow_symlinks))\n    if (not files):\n        raise RuntimeError('Empty list of files for Lambda payload. Check your include/exclude options for errors.')\n    logger.info('lambda: adding %d files:', len(files))\n    for fname in files:\n        logger.debug('lambda: + %s', fname)\n    return _zip_files(files, root)", "docstring": "Generates a ZIP file in-memory from file search patterns.\n\nArgs:\nroot (str): base directory to list files from.\nincludes (list[str]): inclusion patterns. Only files  matching those\npatterns will be included in the result.\nexcludes (list[str]): exclusion patterns. Files matching those\npatterns will be excluded from the result. Exclusions take\nprecedence over inclusions.\nfollow_symlinks (bool): If true, symlinks will be included in the\nresulting zip file\n\nSee Also:\n:func:`_zip_files`, :func:`_find_files`.\n\nRaises:\nRuntimeError: when the generated archive would be empty.", "source": "codesearchnet"}
{"code": "def notify_txn_invalid(self, txn_id, message=None, extended_data=None):\n    invalid_txn_info = {'id': txn_id}\n    if (message is not None):\n        invalid_txn_info['message'] = message\n    if (extended_data is not None):\n        invalid_txn_info['extended_data'] = extended_data\n    with self._lock:\n        for (batch_id, txn_ids) in self._batch_info.items():\n            if (txn_id in txn_ids):\n                if (batch_id not in self._invalid):\n                    self._invalid[batch_id] = [invalid_txn_info]\n                else:\n                    self._invalid[batch_id].append(invalid_txn_info)\n                self._pending.discard(batch_id)\n                self._update_observers(batch_id, ClientBatchStatus.INVALID)\n                return", "docstring": "Adds a batch id to the invalid cache along with the id of the\ntransaction that was rejected and any error message or extended data.\nRemoves that batch id from the pending set. The cache is only\ntemporary, and the batch info will be purged after one hour.\n\nArgs:\ntxn_id (str): The id of the invalid batch\nmessage (str, optional): Message explaining why batch is invalid\nextended_data (bytes, optional): Additional error data", "source": "codesearchnet"}
{"code": "def load(cls, path: str, password: str=None) -> 'Account':\n    with open(path) as f:\n        keystore = json.load(f)\n    if (not check_keystore_json(keystore)):\n        raise ValueError('Invalid keystore file')\n    return Account(keystore, password, path=path)", "docstring": "Load an account from a keystore file.\n\nArgs:\npath: full path to the keyfile\npassword: the password to decrypt the key file or `None` to leave it encrypted", "source": "codesearchnet"}
{"code": "def splitext2(filepath):\n    (root, filename) = os.path.split(safepath(filepath))\n    (filename, ext) = os.path.splitext(safepath(filename))\n    return (root, filename, ext)", "docstring": "Split filepath into root, filename, ext\n\nArgs:\nfilepath (str, path): file path\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def _GetRowValue(self, query_hash, row, value_name):\n    keys_name_to_index_map = self._keys_per_query.get(query_hash, None)\n    if (not keys_name_to_index_map):\n        keys_name_to_index_map = {name: index for (index, name) in enumerate(row.keys())}\n        self._keys_per_query[query_hash] = keys_name_to_index_map\n    value_index = keys_name_to_index_map.get(value_name)\n    return row[value_index]", "docstring": "Retrieves a value from the row.\n\nArgs:\nquery_hash (int): hash of the query, that uniquely identifies the query\nthat produced the row.\nrow (sqlite3.Row): row.\nvalue_name (str): name of the value.\n\nReturns:\nobject: value.", "source": "codesearchnet"}
{"code": "def cartesian(self, subsets=None, step_pixels=100, max_distance_pixels=150, *args, **kwargs):\n    n = Cartesian.read_cellframe(self, *args, subsets=subsets, step_pixels=step_pixels, max_distance_pixels=max_distance_pixels, prune_neighbors=False, **kwargs)\n    if ('measured_regions' in kwargs):\n        n.measured_regions = kwargs['measured_regions']\n    else:\n        n.measured_regions = self.get_measured_regions()\n    if ('measured_phenotypes' in kwargs):\n        n.measured_phenotypes = kwargs['measured_phenotypes']\n    else:\n        n.measured_phenotypes = self.phenotypes\n    n.microns_per_pixel = self.microns_per_pixel\n    return n", "docstring": "Return a class that can be used to create honeycomb plots\n\nArgs:\nsubsets (list): list of SubsetLogic objects\nstep_pixels (int): distance between hexagons\nmax_distance_pixels (int): the distance from each point by which to caclulate the quanitty of the phenotype for that area\n\nReturns:\nCartesian: returns a class that holds the layout of the points to plot.", "source": "codesearchnet"}
{"code": "def rank(input, name=None):\n    return rank_internal(input, name, optimize=True)", "docstring": "Returns the rank of a tensor.\n\nSee also `tf.shape`.\n\nReturns a 0-D `int32` `Tensor` representing the rank of `input`.\n\nFor example:\n\n```python\n# shape of tensor 't' is [2, 2, 3]\nt = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\ntf.rank(t)  # 3\n```\n\n**Note**: The rank of a tensor is not the same as the rank of a matrix. The\nrank of a tensor is the number of indices required to uniquely select each\nelement of the tensor. Rank is also known as \"order\", \"degree\", or \"ndims.\"\n\nArgs:\ninput: A `Tensor` or `SparseTensor`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` of type `int32`.\n\n@compatibility(numpy)\nEquivalent to np.ndim\n@end_compatibility", "source": "github-repos"}
{"code": "def get_valid_build_systems(working_dir, package=None):\n    from rez.plugin_managers import plugin_manager\n    from rez.exceptions import PackageMetadataError\n    try:\n        package = (package or get_developer_package(working_dir))\n    except PackageMetadataError:\n        pass\n    if package:\n        if (getattr(package, 'build_command', None) is not None):\n            buildsys_name = 'custom'\n        else:\n            buildsys_name = getattr(package, 'build_system', None)\n        if buildsys_name:\n            cls = plugin_manager.get_plugin_class('build_system', buildsys_name)\n            return [cls]\n    clss = []\n    for buildsys_name in get_buildsys_types():\n        cls = plugin_manager.get_plugin_class('build_system', buildsys_name)\n        if cls.is_valid_root(working_dir, package=package):\n            clss.append(cls)\n    child_clss = set((x.child_build_system() for x in clss))\n    clss = list((set(clss) - child_clss))\n    return clss", "docstring": "Returns the build system classes that could build the source in given dir.\n\nArgs:\nworking_dir (str): Dir containing the package definition and potentially\nbuild files.\npackage (`Package`): Package to be built. This may or may not be needed\nto determine the build system. For eg, cmake just has to look for\na CMakeLists.txt file, whereas the 'build_command' package field\nmust be present for the 'custom' build system type.\n\nReturns:\nList of class: Valid build system class types.", "source": "codesearchnet"}
{"code": "def router_id(self, **kwargs):\n    router_id = kwargs.pop('router_id')\n    rbridge_id = kwargs.pop('rbridge_id', '1')\n    callback = kwargs.pop('callback', self._callback)\n    rid_args = dict(rbridge_id=rbridge_id, router_id=router_id)\n    config = self._rbridge.rbridge_id_ip_rtm_config_router_id(**rid_args)\n    return callback(config)", "docstring": "Configures device's Router ID.\n\nArgs:\nrouter_id (str): Router ID for the device.\nrbridge_id (str): The rbridge ID of the device on which BGP will be\nconfigured in a VCS fabric.\ncallback (function): A function executed upon completion of the\nmethod.  The only parameter passed to `callback` will be the\n``ElementTree`` `config`.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nKeyError: if `router_id` is not specified.\n\nExamples:\n>>> import pynos.device\n>>> conn = ('10.24.39.211', '22')\n>>> auth = ('admin', 'password')\n>>> with pynos.device.Device(conn=conn, auth=auth) as dev:\n...     output = dev.system.router_id(router_id='10.24.39.211',\n...     rbridge_id='225')\n...     dev.system.router_id() # doctest: +IGNORE_EXCEPTION_DETAIL\nTraceback (most recent call last):\nKeyError", "source": "codesearchnet"}
{"code": "def process_actions(self, actions):\n    notices = {}\n    notification_contacts = {}\n    for action in actions:\n        resource = action['resource']\n        action_status = ActionStatus.SUCCEED\n        try:\n            if (action['action'] == AuditActions.REMOVE):\n                action_status = self.process_action(resource, AuditActions.REMOVE)\n                if (action_status == ActionStatus.SUCCEED):\n                    db.session.delete(action['issue'].issue)\n            elif (action['action'] == AuditActions.STOP):\n                action_status = self.process_action(resource, AuditActions.STOP)\n                if (action_status == ActionStatus.SUCCEED):\n                    action['issue'].update({'missing_tags': action['missing_tags'], 'notes': action['notes'], 'last_alert': action['last_alert'], 'state': action['action']})\n            elif (action['action'] == AuditActions.FIXED):\n                db.session.delete(action['issue'].issue)\n            elif (action['action'] == AuditActions.ALERT):\n                action['issue'].update({'missing_tags': action['missing_tags'], 'notes': action['notes'], 'last_alert': action['last_alert'], 'state': action['action']})\n            db.session.commit()\n            if (action_status == ActionStatus.SUCCEED):\n                for owner in [dict(t) for t in {tuple(d.items()) for d in (action['owners'] + self.permanent_emails)}]:\n                    if (owner['value'] not in notification_contacts):\n                        contact = NotificationContact(type=owner['type'], value=owner['value'])\n                        notification_contacts[owner['value']] = contact\n                        notices[contact] = {'fixed': [], 'not_fixed': []}\n                    else:\n                        contact = notification_contacts[owner['value']]\n                    if (action['action'] == AuditActions.FIXED):\n                        notices[contact]['fixed'].append(action)\n                    else:\n                        notices[contact]['not_fixed'].append(action)\n        except Exception as ex:\n            self.log.exception('Unexpected error while processing resource {}/{}/{}/{}'.format(action['resource'].account.account_name, action['resource'].id, action['resource'], ex))\n    return notices", "docstring": "Process the actions we want to take\n\nArgs:\nactions (`list`): List of actions we want to take\n\nReturns:\n`list` of notifications", "source": "codesearchnet"}
{"code": "def __request_finish(self, queue_item, new_requests, request_failed=False):\n        \n\n        if self.__stopping:\n            return\n\n        del self.__threads[queue_item.get_hash()]\n\n        if request_failed:\n            new_queue_items = []\n            self.queue.move(queue_item, QueueItem.STATUS_ERRORED)\n        else:\n            self.routing.increase_route_count(queue_item.request)\n            new_queue_items = self.__add_scraped_requests_to_queue(queue_item, new_requests)\n            self.queue.move(queue_item, QueueItem.STATUS_FINISHED)\n\n        try:\n            action = self.__options.callbacks.request_after_finish(self.queue, queue_item, new_queue_items)\n        except Exception as e:\n            action = None\n            print(e)\n            print(traceback.format_exc())\n        \n        queue_item.decompose()\n\n        if action == CrawlerActions.DO_STOP_CRAWLING:\n            self.__should_stop = True\n\n        if action == CrawlerActions.DO_CONTINUE_CRAWLING or action is None:\n            self.__should_spawn_new_requests = True", "docstring": "Called when the crawler finished the given queue item.\n\nArgs:\nqueue_item (:class:`nyawc.QueueItem`): The request/response pair that finished.\nnew_requests list(:class:`nyawc.http.Request`): All the requests that were found during this request.\nrequest_failed (bool): True if the request failed (if needs to be moved to errored).", "source": "juraj-google-style"}
{"code": "def __init__(self, env_id):\n        \n        self.env_id = env_id\n        self.env = gym.make(env_id)", "docstring": "Initialize OpenAI universe environment.\n\nArgs:\nenv_id: string with id/descriptor of the universe environment, e.g. 'HarvestDay-v0'.", "source": "juraj-google-style"}
{"code": "def updateParams(self, newvalues):\n    for (param, value) in newvalues.items():\n        if (param not in self.model.freeparams):\n            raise RuntimeError(\"Can't handle param: {0}\".format(param))\n    if newvalues:\n        self.model.updateParams(newvalues)\n        self._updateInternals()\n        self._paramsarray = None", "docstring": "Update model parameters and re-compute likelihoods.\n\nThis method is the **only** acceptable way to update model\nparameters. The likelihood is re-computed as needed\nby this method.\n\nArgs:\n`newvalues` (dict)\nA dictionary keyed by param name and with value as new\nvalue to set. Each parameter name must either be a\nvalid model parameter (in `model.freeparams`).", "source": "codesearchnet"}
{"code": "def _index_filter(index_data, filter_value, filter_operator, field_converter=None):\n        \n\n        filtered_data = []\n        if filter_operator == operator.eq:\n            if field_converter is not None:\n                filter_value = field_converter(filter_value)\n            \n            \n            filtered_data = index_data.get(filter_value)\n\n        else:\n            for field, data_obj_list in index_data.items():\n                if field_converter is not None:\n                    field = field_converter(field)\n\n                if filter_operator(field, filter_value):  \n                    filtered_data.extend(data_obj_list)\n                    \n                    \n\n        return filtered_data", "docstring": "Post Filter\n\nArgs:\nindex_data (dictionary): The indexed data for the provided field.\nfield (string): The field to filter on.\nfilter_value (string | list): The value to match.\nfilter_operator (string): The operator for comparison.\nfield_converter (method): A method used to convert the field before comparison.\n\nReturns:\n(list): Matching data objects", "source": "juraj-google-style"}
{"code": "def unpack_iterator_input(iterator):\n    try:\n        next_element = iterator.get_next()\n    except errors.OutOfRangeError:\n        raise RuntimeError('Your dataset iterator ran out of data; Make sure that your dataset can generate required number of samples.')\n    if isinstance(next_element, (list, tuple)):\n        if len(next_element) not in [2, 3]:\n            raise ValueError('Please provide model inputs as a list or tuple of 2 or 3 elements: (input, target) or (input, target, sample_weights) Received %s' % next_element)\n        if len(next_element) == 2:\n            x, y = next_element\n            weights = None\n        else:\n            x, y, weights = next_element\n    else:\n        x = next_element\n        y = None\n        weights = None\n    return (x, y, weights)", "docstring": "Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`.\n\nArgs:\niterator: Instance of a dataset iterator.\n\nReturns:\nTuple of tensors `x, y, weights`. `y` and `weights` entry may be None.", "source": "github-repos"}
{"code": "def delete_subscription(self, subscription_id):\n    return self.client._delete((self.url + 'subscriptions/{}'.format(subscription_id)), headers=self.get_headers())", "docstring": "Unsubscribe, delete the relationship of the customer with the plan.\n\nArgs:\nsubscription_id: Identification of the subscription.\n\nReturns:", "source": "codesearchnet"}
{"code": "def conformPadding(cls, chars):\n        \n        pad = chars\n        if pad and pad[0] not in PAD_MAP:\n            pad = cls.getPaddingChars(cls.getPaddingNum(pad))\n        return pad", "docstring": "Ensure alternate input padding formats are conformed\nto formats defined in PAD_MAP\n\nIf chars is already a format defined in PAD_MAP, then\nit is returned unmodified.\n\nExample::\n'#'    -> '#'\n'@@@@' -> '@@@@'\n'%04d' -> '#'\n\nArgs:\nchars (str): input padding chars\n\nReturns:\nstr: conformed padding chars\n\nRaises:\nValueError: If chars contains invalid padding characters", "source": "juraj-google-style"}
{"code": "def ExamineEvent(self, mediator, event):\n    event_data_type = getattr(event, 'data_type', '')\n    if (event_data_type == 'windows:registry:service'):\n        service = WindowsService.FromEvent(event)\n        self._service_collection.AddService(service)", "docstring": "Analyzes an event and creates Windows Services as required.\n\nAt present, this method only handles events extracted from the Registry.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between analysis\nplugins and other components, such as storage and dfvfs.\nevent (EventObject): event to examine.", "source": "codesearchnet"}
{"code": "def parse_psqs(psqs_results_file):\n    psqs_results = pd.read_csv(psqs_results_file, sep='\\t', header=None)\n    psqs_results['pdb_file'] = psqs_results[0].apply((lambda x: str(x).strip('./').strip('.pdb')))\n    psqs_results = psqs_results.rename(columns={1: 'psqs_local', 2: 'psqs_burial', 3: 'psqs_contact', 4: 'psqs_total'}).drop(0, axis=1)\n    psqs_results['u_pdb'] = psqs_results['pdb_file'].apply((lambda x: (x.upper() if (len(x) == 4) else np.nan)))\n    psqs_results['i_entry_name'] = psqs_results['pdb_file'].apply((lambda x: (x.split('_model1')[0] if (len(x) > 4) else np.nan)))\n    psqs_results = psqs_results[pd.notnull(psqs_results.psqs_total)]\n    return psqs_results", "docstring": "Parse a PSQS result file and returns a Pandas DataFrame of the results\n\nArgs:\npsqs_results_file: Path to psqs results file\n\nReturns:\nPandas DataFrame: Summary of PSQS results", "source": "codesearchnet"}
{"code": "def _sorted_results(self, results_dicts):\n    print('results dicts:', results_dicts)\n    sorted_dict = sorted(results_dicts, key=(lambda k: k['start_time']))\n    results = []\n    for entry in sorted_dict:\n        results.append(entry['dt'])\n    return results", "docstring": "Sorts dict of results based on log start_time.\n\nSorts the results and returns an array with only the values but sorted\nby oldest value first.value\n\nArgs:\nresults_dicts: List of result dicts\n\nReturns:\nList of only the time but sorted oldest first.", "source": "codesearchnet"}
{"code": "def fuse_awq_modules(model, quantization_config):\n    if isinstance(quantization_config, dict):\n        quantization_config = AwqConfig.from_dict(quantization_config)\n    backend = quantization_config.backend\n    modules_to_fuse = get_modules_to_fuse(model, quantization_config)\n    modules_to_not_convert = getattr(quantization_config, 'modules_to_not_convert', None)\n    if backend == AwqBackendPackingMethod.AUTOAWQ:\n        from awq.modules.fused.attn import QuantAttentionFused\n        from awq.modules.fused.mlp import QuantFusedMLP\n        from awq.modules.fused.norm import FasterTransformerRMSNorm\n    else:\n        raise ValueError('Fusing is only supported for the AutoAWQ backend')\n    fused_attention_modules = []\n    for name, module in model.named_modules():\n        if modules_to_not_convert is not None:\n            if any((module_name_to_not_convert in name for module_name_to_not_convert in modules_to_not_convert)):\n                continue\n        _fuse_awq_layernorm(modules_to_fuse['layernorm'], module, FasterTransformerRMSNorm)\n        if quantization_config.version != 'ipex':\n            _fuse_awq_mlp(model, name, modules_to_fuse['mlp'], module, QuantFusedMLP)\n        else:\n            logger.info('The IPEX version AWQ does not support fuse mlp for now.')\n        attention_has_been_fused = _fuse_awq_attention_layers(model, module, modules_to_fuse, name, QuantAttentionFused)\n        if attention_has_been_fused:\n            fused_attention_modules.append(name.split('.')[0])\n    if len(fused_attention_modules) > 0:\n        for module_name, module in model.named_modules():\n            if any((module_name in fused_attention_modules for fused_attention_parent_module in fused_attention_modules)):\n                if hasattr(module, 'config') and hasattr(module.config, '_attn_implementation'):\n                    module.config._attn_implementation = 'custom'\n    return model", "docstring": "Optionally fuse some modules in the model to speedup inference.\n\nArgs:\nmodel (`~PreTrainedModel`):\nThe model to fuse - note this model should have been converted into AWQ format beforehand.\nquantization_config (`Union[AwqConfig, dict]`):\nThe quantization configuration to use.", "source": "github-repos"}
{"code": "def dbmin_stddev(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `dbmin_stddev`'.format(value))\n\n        self._dbmin_stddev = value", "docstring": "Corresponds to IDD Field `dbmin_stddev`\nStandard deviation of extreme annual minimum dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmin_stddev`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def world_info(world_name, world_config=None, initial_indent='', next_indent='  '):\n    if (world_config is None):\n        for (config, _) in _iter_packages():\n            for world in config['maps']:\n                if (world['name'] == world_name):\n                    world_config = world\n    if (world_config is None):\n        raise HolodeckException((\"Couldn't find world \" + world_name))\n    second_indent = (initial_indent + next_indent)\n    agent_indent = (second_indent + next_indent)\n    sensor_indent = (agent_indent + next_indent)\n    print(initial_indent, world_config['name'])\n    print(second_indent, 'Resolution:', world_config['window_width'], 'x', world_config['window_height'])\n    print(second_indent, 'Agents:')\n    for agent in world_config['agents']:\n        print(agent_indent, 'Name:', agent['agent_name'])\n        print(agent_indent, 'Type:', agent['agent_type'])\n        print(agent_indent, 'Sensors:')\n        for sensor in agent['sensors']:\n            print(sensor_indent, sensor)", "docstring": "Gets and prints the information of a world.\n\nArgs:\nworld_name (str): the name of the world to retrieve information for\nworld_config (dict optional): A dictionary containing the world's configuration. Will find the config if None. Defaults to None.\ninitial_indent (str optional): This indent will apply to each output line. Defaults to \"\".\nnext_indent (str optional): This indent will be applied within each nested line. Defaults to \"  \".", "source": "codesearchnet"}
{"code": "def get_tests_from_description(name,\n                               descriptions,\n                               parsed=None):\n    \n    tests = []\n    if not parsed:\n        parsed = []\n\n    description = descriptions.get(name, None)\n    if not description:\n        raise IpaUtilsException(\n            'Test description file with name: %s cannot be located.'\n            % name\n        )\n\n    if description in parsed:\n        return tests\n\n    parsed.append(description)\n    test_data = get_yaml_config(description)\n\n    if 'tests' in test_data:\n        tests += test_data.get('tests')\n\n    if 'include' in test_data:\n        for description_name in test_data.get('include'):\n            tests += get_tests_from_description(\n                description_name,\n                descriptions,\n                parsed\n            )\n\n    return tests", "docstring": "Recursively collect all tests in test description.\n\nArgs:\nname (str): Yaml test description file name.\ndescriptions (dict): Dict of test description name\n(key) and absolute file paths\n(value).\nparsed (list): List of description paths which have\nalready been parsed to prevent infinte\nrecursion.\nReturns:\nA list of expanded test files.", "source": "juraj-google-style"}
{"code": "def _path_formatter(self, suffix):\n    if (suffix.lower() == 'mirror'):\n        path_items = [self.bucket, self.s3path]\n    else:\n        path_items = [self.bucket, self.s3path, suffix]\n    path = '/'.join(path_items)\n    s3_format = 's3:\n    formatted_path = path.replace('\n    full_path = s3_format.format(formatted_path)\n    return full_path", "docstring": "Format the s3 path properly.\n\nArgs:\nsuffix (str): suffix to add on to an s3 path\n\nReturns:\nstr: formatted path", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    olecf_file = pyolecf.file()\n    olecf_file.set_ascii_codepage(parser_mediator.codepage)\n\n    try:\n      olecf_file.open_file_object(file_object)\n    except IOError as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to open file with error: {0!s}'.format(exception))\n      return\n\n    root_item = olecf_file.root_item\n    if not root_item:\n      return\n\n    \n    item_names = [item.name for item in root_item.sub_items]\n\n    \n    \n    \n    \n\n    item_names = frozenset(item_names)\n\n    try:\n      for plugin in self._plugins:\n        if parser_mediator.abort:\n          break\n\n        if not plugin.REQUIRED_ITEMS.issubset(item_names):\n          continue\n\n        try:\n          plugin.UpdateChainAndProcess(parser_mediator, root_item=root_item)\n\n        except Exception as exception:  \n          parser_mediator.ProduceExtractionWarning((\n              'plugin: {0:s} unable to parse OLECF file with error: '\n              '{1!s}').format(plugin.NAME, exception))\n\n      if self._default_plugin and not parser_mediator.abort:\n        try:\n          self._default_plugin.UpdateChainAndProcess(\n              parser_mediator, root_item=root_item)\n\n        except Exception as exception:  \n          parser_mediator.ProduceExtractionWarning((\n              'plugin: {0:s} unable to parse OLECF file with error: '\n              '{1!s}').format(self._default_plugin.NAME, exception))\n\n    finally:\n      olecf_file.close()", "docstring": "Parses an OLE Compound File (OLECF) file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.", "source": "juraj-google-style"}
{"code": "def get_structural_variant(self, variant):\n        \n        \n        \n        \n        query = {\n                'chrom': variant['chrom'],\n                'end_chrom': variant['end_chrom'],\n                'sv_type': variant['sv_type'],\n                '$and': [\n                    {'pos_left': {'$lte': variant['pos']}},\n                    {'pos_right': {'$gte': variant['pos']}},\n                ]\n            }\n\n        res = self.db.structural_variant.find(query).sort('pos_left',1)\n        match = None\n        distance = None\n        closest_hit = None\n        \n        \n        for hit in res:\n            \n            \n            \n            \n            if hit['end_left'] > variant['end']:\n                continue\n            if hit['end_right'] < variant['end']:\n                continue\n\n            \n            distance = (abs(variant['pos'] - (hit['pos_left'] + hit['pos_right'])/2) + \n                        abs(variant['end'] - (hit['end_left'] + hit['end_right'])/2))\n\n            \n            if closest_hit is None:\n                match = hit\n                closest_hit = distance\n                continue\n\n            \n            if distance < closest_hit:\n                \n                match = hit\n                \n                closest_hit = distance\n\n        return match", "docstring": "Check if there are any overlapping sv clusters\n\nSearch the sv variants with chrom start end_chrom end and sv_type\n\nArgs:\nvariant (dict): A variant dictionary\n\nReturns:\nvariant (dict): A variant dictionary", "source": "juraj-google-style"}
{"code": "def make_transaction(self):\n    if self.pk:\n        raise CannotRecreateTransactionOnRecurredCost('The transaction for this recurred cost has already been created. You cannot create it again.')\n    amount = self.recurring_cost.get_amount(self.billing_cycle)\n    if (not amount):\n        return None\n    self.transaction = Transaction.objects.create(description='Created by recurring cost', date=self.billing_cycle.date_range.lower)\n    splits = self.recurring_cost.splits.all().split(amount)\n    self.transaction.legs.add(Leg.objects.create(transaction=self.transaction, amount=Money(amount, self.recurring_cost.currency), account=self.recurring_cost.to_account))\n    for (split, split_amount) in splits:\n        if split_amount:\n            self.transaction.legs.add(Leg.objects.create(transaction=self.transaction, amount=Money((split_amount * (- 1)), self.recurring_cost.currency), account=split.from_account))\n    return self.transaction", "docstring": "Create the transaction for this RecurredCost\n\nMay only be used to create the RecurredCost's initial transaction.\n\nReturns:\nTransaction: The created transaction, also assigned to self.transaction. None if the amount is zero.", "source": "codesearchnet"}
{"code": "def metadata_path(self, m_path):\n        \n        if not m_path:\n            self.metadata_dir = None\n            self.metadata_file = None\n\n        else:\n            if not op.exists(m_path):\n                raise OSError('{}: file does not exist!'.format(m_path))\n\n            if not op.dirname(m_path):\n                self.metadata_dir = '.'\n            else:\n                self.metadata_dir = op.dirname(m_path)\n            self.metadata_file = op.basename(m_path)\n\n            \n            \n            self.update(parse_kegg_gene_metadata(self.metadata_path), overwrite=True)", "docstring": "Provide pointers to the paths of the metadata file\n\nArgs:\nm_path: Path to metadata file", "source": "juraj-google-style"}
{"code": "def parse_GSE(filepath):\n    gpls = {}\n    gsms = {}\n    series_counter = 0\n    database = None\n    metadata = {}\n    gse_name = None\n    with utils.smart_open(filepath) as soft:\n        groupper = groupby(soft, (lambda x: x.startswith('^')))\n        for (is_new_entry, group) in groupper:\n            if is_new_entry:\n                (entry_type, entry_name) = __parse_entry(next(group))\n                logger.debug(('%s: %s' % (entry_type.upper(), entry_name)))\n                if (entry_type == 'SERIES'):\n                    gse_name = entry_name\n                    series_counter += 1\n                    if (series_counter > 1):\n                        raise Exception('GSE file should contain only one series entry!')\n                    (is_data, data_group) = next(groupper)\n                    message = 'The key is not False, probably there is an error in the SOFT file'\n                    assert (not is_data), message\n                    metadata = parse_metadata(data_group)\n                elif (entry_type == 'SAMPLE'):\n                    (is_data, data_group) = next(groupper)\n                    gsms[entry_name] = parse_GSM(data_group, entry_name)\n                elif (entry_type == 'PLATFORM'):\n                    (is_data, data_group) = next(groupper)\n                    gpls[entry_name] = parse_GPL(data_group, entry_name)\n                elif (entry_type == 'DATABASE'):\n                    (is_data, data_group) = next(groupper)\n                    database_metadata = parse_metadata(data_group)\n                    database = GEODatabase(name=entry_name, metadata=database_metadata)\n                else:\n                    logger.error(('Cannot recognize type %s' % entry_type))\n    gse = GSE(name=gse_name, metadata=metadata, gpls=gpls, gsms=gsms, database=database)\n    return gse", "docstring": "Parse GSE SOFT file.\n\nArgs:\nfilepath (:obj:`str`): Path to GSE SOFT file.\n\nReturns:\n:obj:`GEOparse.GSE`: A GSE object.", "source": "codesearchnet"}
{"code": "def _cell_magic(line, query):\n    \n    args = magic_arguments.parse_argstring(_cell_magic, line)\n\n    params = []\n    if args.params is not None:\n        try:\n            params = _helpers.to_query_parameters(\n                ast.literal_eval(\"\".join(args.params))\n            )\n        except Exception:\n            raise SyntaxError(\n                \"--params is not a correctly formatted JSON string or a JSON \"\n                \"serializable dictionary\"\n            )\n\n    project = args.project or context.project\n    client = bigquery.Client(project=project, credentials=context.credentials)\n    bqstorage_client = _make_bqstorage_client(\n        args.use_bqstorage_api or context.use_bqstorage_api, context.credentials\n    )\n    job_config = bigquery.job.QueryJobConfig()\n    job_config.query_parameters = params\n    job_config.use_legacy_sql = args.use_legacy_sql\n    query_job = _run_query(client, query, job_config)\n\n    if not args.verbose:\n        display.clear_output()\n\n    result = query_job.to_dataframe(bqstorage_client=bqstorage_client)\n    if args.destination_var:\n        IPython.get_ipython().push({args.destination_var: result})\n    else:\n        return result", "docstring": "Underlying function for bigquery cell magic\n\nNote:\nThis function contains the underlying logic for the 'bigquery' cell\nmagic. This function is not meant to be called directly.\n\nArgs:\nline (str): \"%%bigquery\" followed by arguments as required\nquery (str): SQL query to run\n\nReturns:\npandas.DataFrame: the query results.", "source": "juraj-google-style"}
{"code": "def overlay(self, dimensions=None, **kwargs):\n        \n        if dimensions is None:\n            dimensions = self.kdims\n        else:\n            if not isinstance(dimensions, (list, tuple)):\n                dimensions = [dimensions]\n            dimensions = [self.get_dimension(d, strict=True)\n                          for d in dimensions]\n        dims = [d for d in self.kdims if d not in dimensions]\n        return self.groupby(dims, group_type=NdOverlay)", "docstring": "Group by supplied dimension(s) and overlay each group\n\nGroups data by supplied dimension(s) overlaying the groups\nalong the dimension(s).\n\nArgs:\ndimensions: Dimension(s) of dimensions to group by\n\nReturns:\nNdOverlay object(s) with supplied dimensions", "source": "juraj-google-style"}
{"code": "def gradients(loss, variables):\n    return gradients_module.gradients(loss, variables, colocate_gradients_with_ops=True)", "docstring": "Returns the gradients of `loss` w.r.t. `variables`.\n\nArgs:\nloss: Scalar tensor to minimize.\nvariables: List of variables.\n\nReturns:\nA gradients tensor.", "source": "github-repos"}
{"code": "def has_entities(status):\n    try:\n        if (sum((len(v) for v in status.entities.values())) > 0):\n            return True\n    except AttributeError:\n        if (sum((len(v) for v in status['entities'].values())) > 0):\n            return True\n    return False", "docstring": "Returns true if a Status object has entities.\n\nArgs:\nstatus: either a tweepy.Status object or a dict returned from Twitter API", "source": "codesearchnet"}
{"code": "def quarter_boundaries(quarter):\n    (year, quarter) = quarter.split('Q')\n    year = int(year)\n    quarter = int(quarter)\n    first_month_of_quarter = ((3 * quarter) - 2)\n    last_month_of_quarter = (3 * quarter)\n    first_day = date(year, first_month_of_quarter, 1)\n    last_day = date(year, last_month_of_quarter, monthrange(year, last_month_of_quarter)[1])\n    return (first_day, last_day)", "docstring": "Returns first and last day of a quarter\n\nArgs:\nquarter (str) quarter, in format '2015Q1'\n\nReturns: (tuple) datetime.dates for the first and last days of the quarter", "source": "codesearchnet"}
{"code": "def gen_ref_docs(gen_index=False):\n    try:\n        from refdoc import generate_docs\n    except ImportError as ex:\n        msg = 'You need to install sphinx-refdoc if you want to generate code reference docs.'\n        print(msg, file=sys.stderr)\n        log.err('Exception: {}'.format(ex))\n        sys.exit((- 1))\n    pretend = context.get('pretend', False)\n    docs_dir = conf.get_path('docs.path', 'docs')\n    docs_ref_dir = os.path.join(docs_dir, 'ref')\n    refdoc_paths = conf.get('docs.reference', [])\n    if os.path.exists(docs_ref_dir):\n        if (not pretend):\n            log.info('Removing existing reference docs')\n            shutil.rmtree(docs_ref_dir)\n        else:\n            log.info('Would remove old reference docs')\n    args = {'out_dir': docs_ref_dir, 'verbose': context.get('verbose', 0)}\n    if gen_index:\n        args['gen_index'] = True\n    pkg_paths = [conf.proj_path(p) for p in refdoc_paths]\n    if (not pretend):\n        log.info('Generating reference documentation')\n        generate_docs(pkg_paths, **args)\n    else:\n        log.info('Would generate reference docs with the following params')\n        shell.cprint('<90>{}', util.yaml_dump(args).rstrip())\n        shell.cprint('<90>paths:\\n<34>{}', util.yaml_dump(pkg_paths).rstrip())", "docstring": "Generate reference documentation for the project.\n\nThis will use **sphinx-refdoc** to generate the source .rst files for the\nreference documentation.\n\nArgs:\ngen_index (bool):\nSet it to **True** if you want to generate the index file with the\nlist of top-level packages. This is set to default as in most cases\nyou only have one package per project so you can link directly to\nthat package reference (and if index were generated sphinx would\ncomplain about file not included in toctree).", "source": "codesearchnet"}
{"code": "def DeviceReadThread(hid_device):\n    hid_device.run_loop_ref = cf.CFRunLoopGetCurrent()\n    if (not hid_device.run_loop_ref):\n        logger.error('Failed to get current run loop')\n        return\n    iokit.IOHIDDeviceScheduleWithRunLoop(hid_device.device_handle, hid_device.run_loop_ref, K_CF_RUNLOOP_DEFAULT_MODE)\n    run_loop_run_result = K_CF_RUN_LOOP_RUN_TIMED_OUT\n    while ((run_loop_run_result == K_CF_RUN_LOOP_RUN_TIMED_OUT) or (run_loop_run_result == K_CF_RUN_LOOP_RUN_HANDLED_SOURCE)):\n        run_loop_run_result = cf.CFRunLoopRunInMode(K_CF_RUNLOOP_DEFAULT_MODE, 1000, False)\n    if (run_loop_run_result != K_CF_RUN_LOOP_RUN_STOPPED):\n        logger.error('Unexpected run loop exit code: %d', run_loop_run_result)\n    iokit.IOHIDDeviceUnscheduleFromRunLoop(hid_device.device_handle, hid_device.run_loop_ref, K_CF_RUNLOOP_DEFAULT_MODE)", "docstring": "Binds a device to the thread's run loop, then starts the run loop.\n\nArgs:\nhid_device: The MacOsHidDevice object\n\nThe HID manager requires a run loop to handle Report reads. This thread\nfunction serves that purpose.", "source": "codesearchnet"}
{"code": "def read_video_torchvision(video_path: str, sample_indices_fn: Callable, **kwargs):\n    video, _, info = torchvision_io.read_video(video_path, start_pts=0.0, end_pts=None, pts_unit='sec', output_format='THWC')\n    video_fps = info['video_fps']\n    total_num_frames = video.size(0)\n    duration = total_num_frames / video_fps if video_fps else 0\n    metadata = VideoMetadata(total_num_frames=int(total_num_frames), fps=float(video_fps), duration=float(duration), video_backend='torchvision')\n    indices = sample_indices_fn(metadata=metadata, **kwargs)\n    video = video[indices].contiguous().numpy()\n    metadata.frames_indices = indices\n    return (video, metadata)", "docstring": "Decode the video with torchvision decoder.\n\nArgs:\nvideo_path (`str`):\nPath to the video file.\nsample_indices_fn (`Callable`, *optional*):\nA callable function that will return indices at which the video should be sampled. If the video has to be loaded using\nby a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`.\nIf not provided, simple uniform sampling with fps is performed.\nExample:\ndef sample_indices_fn(metadata, **kwargs):\nreturn np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int)\n\nReturns:\nTuple[`np.array`, `VideoMetadata`]: A tuple containing:\n- Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).\n- `VideoMetadata` object.", "source": "github-repos"}
{"code": "def view_as(self, cls: Type[PipelineOptionsT]) -> PipelineOptionsT:\n    view = cls(self._flags)\n    for option_name in view._visible_option_list():\n        if option_name not in self._all_options:\n            self._all_options[option_name] = getattr(view._visible_options, option_name)\n    view._all_options = self._all_options\n    return view", "docstring": "Returns a view of current object as provided PipelineOption subclass.\n\nExample Usage::\n\noptions = PipelineOptions(['--runner', 'Direct', '--streaming'])\nstandard_options = options.view_as(StandardOptions)\nif standard_options.streaming:\n# ... start a streaming job ...\n\nNote that options objects may have multiple views, and modifications\nof values in any view-object will apply to current object and other\nview-objects.\n\nArgs:\ncls: PipelineOptions class or any of its subclasses.\n\nReturns:\nAn instance of cls that is initialized using options contained in current\nobject.", "source": "github-repos"}
{"code": "def find_subclass_in_module(base_class, module):\n    subclasses = find_subclasses_in_module([base_class], module)\n    if len(subclasses) != 1:\n        raise ValueError('Expected 1 subclass of %s per module, found %s.' % (base_class.__name__, [subclass.__name__ for subclass in subclasses]))\n    return subclasses[0]", "docstring": "Finds the single subclass of the given base class in the given module.\n\nArgs:\nbase_class: class, the base class to look for a subclass of in the module.\nmodule: module, the module to look for the single subclass in.\n\nReturns:\nThe single subclass of the given base class.\n\nRaises:\nValueError: If the number of subclasses found was not exactly one.", "source": "github-repos"}
{"code": "def _GetArgSpecInfo(fn):\n    skip_arg = False\n    if inspect.isclass(fn):\n        skip_arg = True\n    elif inspect.ismethod(fn):\n        skip_arg = fn.__self__ is not None\n    elif inspect.isbuiltin(fn):\n        if not isinstance(fn.__self__, types.ModuleType):\n            skip_arg = True\n    elif not inspect.isfunction(fn):\n        skip_arg = True\n    return (fn, skip_arg)", "docstring": "Gives information pertaining to computing the ArgSpec of fn.\n\nDetermines if the first arg is supplied automatically when fn is called.\nThis arg will be supplied automatically if fn is a bound method or a class\nwith an __init__ method.\n\nAlso returns the function who's ArgSpec should be used for determining the\ncalling parameters for fn. This may be different from fn itself if fn is a\nclass with an __init__ method.\n\nArgs:\nfn: The function or class of interest.\nReturns:\nA tuple with the following two items:\nfn: The function to use for determining the arg spec of this function.\nskip_arg: Whether the first argument will be supplied automatically, and\nhence should be skipped when supplying args from a Fire command.", "source": "github-repos"}
{"code": "def _load_callwraps(packname, package):\n    \n    global _callwraps\n    from acorn.config import settings\n    from acorn.logging.descriptors import _obj_getattr\n    spack = settings(packname)\n    if spack is not None:\n        if spack.has_section(\"callwrap\"):\n            wrappings = dict(spack.items(\"callwrap\"))\n            for fqdn, target in wrappings.items():\n                caller = _obj_getattr(package, target)\n                _callwraps[fqdn] = caller", "docstring": "Loads the special call wrapping settings for functions in the specified\npackage. This allows the result of the original method call to be cast as a\ndifferent type, or passed to a different constructor before returning from\nthe wrapped function.\n\nArgs:\npackname (str): name of the package to get config settings for.\npackage: actual package object.", "source": "juraj-google-style"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_buffer = utils.BytearrayStream()\n    if self._located_items:\n        self._located_items.write(local_buffer, kmip_version=kmip_version)\n    if self._unique_identifiers:\n        for unique_identifier in self._unique_identifiers:\n            unique_identifier.write(local_buffer, kmip_version=kmip_version)\n    self.length = local_buffer.length()\n    super(LocateResponsePayload, self).write(output_buffer, kmip_version=kmip_version)\n    output_buffer.write(local_buffer.buffer)", "docstring": "Write the data encoding the Locate response payload to a buffer.\n\nArgs:\noutput_buffer (stream): A data buffer in which to encode object\ndata, supporting a write method.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def clean(self, force: bool=False):\n    assert (not self._closed)\n    with (yield from self._host_pools_lock):\n        for (key, pool) in tuple(self._host_pools.items()):\n            (yield from pool.clean(force=force))\n            if ((not self._host_pool_waiters[key]) and pool.empty()):\n                del self._host_pools[key]\n                del self._host_pool_waiters[key]", "docstring": "Clean all closed connections.\n\nArgs:\nforce: Clean connected and idle connections too.\n\nCoroutine.", "source": "codesearchnet"}
{"code": "def orthologize(self, species_id: str) -> 'BEL':\n    if (not self.ast):\n        return self\n    if (not self.ast.collected_orthologs):\n        self = self.collect_orthologs([species_id])\n    self.ast.species = set()\n    self.ast = bel_utils.orthologize(self.ast, self, species_id)\n    return self", "docstring": "Orthologize BEL AST to given species_id\n\nWill return original entity (ns:value) if no ortholog found.\n\nArgs:\nspecies_id (str): species id to convert genes/rna/proteins into\n\nReturns:\nBEL: returns self", "source": "codesearchnet"}
{"code": "def tersoff_input(self, structure, periodic=False, uc=True, *keywords):\n        \n        \n        gin = self.keyword_line(*keywords)\n        gin += self.structure_lines(\n            structure, cell_flg=periodic, frac_flg=periodic,\n            anion_shell_flg=False, cation_shell_flg=False, symm_flg=not uc\n        )\n        gin += self.tersoff_potential(structure)\n        return gin", "docstring": "Gets a GULP input with Tersoff potential for an oxide structure\n\nArgs:\nstructure: pymatgen.core.structure.Structure\nperiodic (Default=False): Flag denoting whether periodic\nboundary conditions are used\nlibrary (Default=None): File containing the species and potential.\nuc (Default=True): Unit Cell Flag.\nkeywords: GULP first line keywords.", "source": "juraj-google-style"}
{"code": "def get_dm_channel(self, userid):\n        \n\n        dm_open = self.slack_client.api_call('im.open', user=userid)\n        return dm_open['channel']['id']", "docstring": "Perform a lookup of users to resolve a userid to a DM channel\n\nArgs:\nuserid (string): Slack userid to lookup.\n\nReturns:\nstring: DM channel ID of user", "source": "juraj-google-style"}
{"code": "def is_mobile(user_agent):\n    if user_agent:\n        b = reg_b.search(user_agent)\n        v = reg_v.search(user_agent[0:4])\n        return (b or v)\n    return False", "docstring": "Checks if the user browser from the given user agent is mobile.\n\nArgs:\nuser_agent: A given user agent.\n\nReturns: True if the browser from the user agent is mobile.", "source": "codesearchnet"}
{"code": "def date_to_datetime(self, time_input, tz=None):\n        \n        dt = None\n        try:\n            \n            dt = parser.parse(time_input)\n            \n            if tz is not None and tz != dt.tzname():\n                if dt.tzinfo is None:\n                    dt = self._replace_timezone(dt)\n                dt = dt.astimezone(timezone(tz))\n        except IndexError:\n            pass\n        except TypeError:\n            pass\n        except ValueError:\n            pass\n        return dt", "docstring": "Convert ISO 8601 and other date strings to datetime.datetime type.\n\nArgs:\ntime_input (string): The time input string (see formats above).\ntz (string): The time zone for the returned data.\n\nReturns:\n(datetime.datetime): Python datetime.datetime object.", "source": "juraj-google-style"}
{"code": "def add_api_key(key, value):\n    if ((key is None) or (key == '')):\n        logger.error('Key cannot be empty')\n    if ((value is None) or (value == '')):\n        logger.error('Value cannot be empty')\n    from .. import datatools\n    data = datatools.get_data()\n    if ('keys' not in data['discord']):\n        data['discord']['keys'] = {}\n    is_key_new = False\n    if (key not in data['discord']['keys']):\n        is_key_new = True\n    elif (data['discord']['keys'][key] == value):\n        logger.info(\"API key '{}' already has value '{}'\".format(key, value))\n        return\n    data['discord']['keys'][key] = value\n    datatools.write_data(data)\n    key_text = ('added' if is_key_new else 'updated')\n    logger.info(\"API key '{}' {} with value '{}'\".format(key, key_text, value))", "docstring": "Adds a key to the bot's data\n\nArgs:\nkey: The name of the key to add\nvalue: The value for the key", "source": "codesearchnet"}
{"code": "def dump(collection: BioCCollection, fp, pretty_print: bool = True):\n    \n    fp.write(dumps(collection, pretty_print))", "docstring": "Serialize ``collection`` as a BioC formatted stream to ``fp``.\n\nArgs:\ncollection: the BioC collection\nfp: a ``.write()``-supporting file-like object\npretty_print: enables formatted XML", "source": "juraj-google-style"}
{"code": "def tile_and_concat(image, latent, concat_latent=True):\n  \n  if not concat_latent:\n    return image\n  image_shape = common_layers.shape_list(image)\n  latent_shape = common_layers.shape_list(latent)\n  height, width = image_shape[1], image_shape[2]\n  latent_dims = latent_shape[1]\n  height_multiples = height \n  pad = height - (height_multiples * latent_dims)\n  latent = tf.reshape(latent, (-1, latent_dims, 1, 1))\n  latent = tf.tile(latent, (1, height_multiples, width, 1))\n  latent = tf.pad(latent, [[0, 0], [pad \n  return tf.concat([image, latent], axis=-1)", "docstring": "Tile latent and concatenate to image across depth.\n\nArgs:\nimage: 4-D Tensor, (batch_size X height X width X channels)\nlatent: 2-D Tensor, (batch_size X latent_dims)\nconcat_latent: If set to False, the image is returned as is.\n\nReturns:\nconcat_latent: 4-D Tensor, (batch_size X height X width X channels+1)\nlatent tiled and concatenated to the image across the channels.", "source": "juraj-google-style"}
{"code": "def get_body(name):\n    body = Pck()[name]\n    body.propagate = (lambda date: get_orbit(name, date))\n    return body", "docstring": "Retrieve the Body structure of a JPL .bsp file object\n\nArgs:\nname (str)\nReturn:\n:py:class:`~beyond.constants.Body`", "source": "codesearchnet"}
{"code": "def __init__(self, string_or_filelike, parser_delegate):\n    \n    if hasattr(string_or_filelike, 'readline'):\n      line_reader = string_or_filelike.readline\n    else:  \n      if six.PY2:\n        string_or_filelike = unicode(string_or_filelike)\n      string_io = io.StringIO(string_or_filelike)\n      line_reader = string_io.readline\n\n    def _text_line_reader():\n      line = line_reader()\n      if isinstance(line, bytes):\n        line = line.decode('utf8')\n      return line\n\n    self._token_generator = tokenize.generate_tokens(_text_line_reader)\n    self._filename = getattr(string_or_filelike, 'name', None)\n    self._current_token = None\n    self._delegate = parser_delegate\n    self._advance_one_token()", "docstring": "Construct the parser.\n\nArgs:\nstring_or_filelike: Either the string to parse, or a file-like object\nsupporting the readline method.\nparser_delegate: An instance of the ParserDelegate class, that will be\nresponsible for constructing appropriate objects for configurable\nreferences and macros.", "source": "juraj-google-style"}
{"code": "def _slice_shape(self, start, stop):\n    if stop <= start:\n        return DynamicRaggedShape._from_inner_shape([])\n    elif start == 0:\n        if stop <= self.num_row_partitions:\n            if stop == 1:\n                return DynamicRaggedShape._from_inner_shape([self.row_partitions[0].nrows()])\n            new_row_partitions = self.row_partitions[:stop - 1]\n            new_inner_shape = [new_row_partitions[-1].nvals()]\n            return DynamicRaggedShape(new_row_partitions, new_inner_shape)\n        else:\n            if self.rank is None:\n                new_inner_rank = stop - self.num_row_partitions\n                new_inner_shape = self.inner_shape[:new_inner_rank]\n                return DynamicRaggedShape(row_partitions=self.row_partitions, inner_shape=new_inner_shape, static_inner_shape=None, validate=False)\n            elif self.rank <= stop:\n                return self\n            new_inner_rank = stop - self.num_row_partitions\n            new_inner_shape = self.inner_shape[:new_inner_rank]\n            return DynamicRaggedShape(row_partitions=self.row_partitions, inner_shape=new_inner_shape, static_inner_shape=tensor_shape.TensorShape([None] * new_inner_rank), validate=False)\n    else:\n        if self.rank is None or stop < self.rank:\n            partial = self._slice_shape(0, stop)\n        else:\n            partial = self\n        for x in partial.row_partitions:\n            if not x.is_uniform():\n                raise ValueError('All relevant dimensions must be uniform')\n        if partial.rank is None:\n            raise NotImplementedError('__getitem__[start:stop] where start > 0 not implemented')\n        return DynamicRaggedShape._from_inner_shape(partial._with_num_row_partitions(0).inner_shape[start:])", "docstring": "Returns a shape self[start:stop].\n\nIf start == 0, then this truncates dimensions after stop.\nIf start != 0, then this will return a shape with num_row_partitions == 0.\n\nSee __getitem__.\n\nArgs:\nstart: the first dimension. 0 <= start <= rank\nstop: the last dimension (exclusive). 0 <= stop <= rank", "source": "github-repos"}
{"code": "def scanJoiner(self, xEUI='*', strPSKd='threadjpaketest'):\n        \n        print '%s call scanJoiner' % self.port\n        if not isinstance(xEUI, str):\n            eui64 = self.__convertLongToString(xEUI)\n\n            \n            if len(eui64) < 16:\n                eui64 = eui64.zfill(16)\n                print eui64\n        else:\n            eui64 = xEUI\n\n        \n        timeout = 500\n\n        cmd = WPANCTL_CMD + 'commissioner joiner-add %s %s %s' % (eui64, str(timeout), strPSKd)\n        print cmd\n        if not self.isActiveCommissioner:\n            self.startCollapsedCommissioner()\n        if self.__sendCommand(cmd)[0] != 'Fail':\n            return True\n        else:\n            return False", "docstring": "scan Joiner\n\nArgs:\nxEUI: Joiner's EUI-64\nstrPSKd: Joiner's PSKd for commissioning\n\nReturns:\nTrue: successful to add Joiner's steering data\nFalse: fail to add Joiner's steering data", "source": "juraj-google-style"}
{"code": "def _export_files(self, bq: bigquery_tools.BigQueryWrapper, element: 'ReadFromBigQueryRequest', table_reference: TableReference):\n    job_labels = self._get_bq_metadata().add_additional_bq_job_labels(self.bigquery_job_labels)\n    export_job_name = bigquery_tools.generate_bq_job_name(self._job_name, self._source_uuid, bigquery_tools.BigQueryJobTypes.EXPORT, element.obj_id)\n    temp_location = self.options.view_as(GoogleCloudOptions).temp_location\n    gcs_location = bigquery_export_destination_uri(self.gcs_location, temp_location, '%s%s' % (self._source_uuid, element.obj_id))\n    try:\n        if self.use_json_exports:\n            job_ref = bq.perform_extract_job([gcs_location], export_job_name, table_reference, bigquery_tools.FileFormat.JSON, project=self._get_project(), job_labels=job_labels, include_header=False)\n        else:\n            job_ref = bq.perform_extract_job([gcs_location], export_job_name, table_reference, bigquery_tools.FileFormat.AVRO, project=self._get_project(), include_header=False, job_labels=job_labels, use_avro_logical_types=True)\n        bq.wait_for_bq_job(job_ref)\n    except Exception as exn:\n        logging.warning('Error exporting table: %s. Note that external tables cannot be exported: https:\n        raise\n    metadata_list = FileSystems.match([gcs_location])[0].metadata_list\n    if isinstance(table_reference, ValueProvider):\n        table_ref = bigquery_tools.parse_table_reference(element.table, project=self._get_project())\n    else:\n        table_ref = table_reference\n    table = bq.get_table(table_ref.projectId, table_ref.datasetId, table_ref.tableId)\n    return (table.schema, metadata_list)", "docstring": "Runs a BigQuery export job.\n\nReturns:\nbigquery.TableSchema instance, a list of FileMetadata instances", "source": "github-repos"}
{"code": "def _make_env(resultdir=None):\n    env = {'config': {}, 'resultdir': '', 'config_file': '', 'nodes': {}, 'phase': '', 'user': '', 'cwd': os.getcwd()}\n    if resultdir:\n        env_path = os.path.join(resultdir, 'env')\n        if os.path.isfile(env_path):\n            with open(env_path, 'r') as f:\n                env.update(yaml.load(f))\n                logger.debug('Loaded environment %s', env_path)\n        if (('config_file' in env) and (env['config_file'] is not None)):\n            if os.path.isfile(env['config_file']):\n                with open(env['config_file'], 'r') as f:\n                    env['config'].update(yaml.load(f))\n                    logger.debug('Reloaded config %s', env['config'])\n    return env", "docstring": "Loads the env from `resultdir` if not `None` or makes a new one.\n\nAn Enos environment handles all specific variables of an\nexperiment. This function either generates a new environment or\nloads a previous one. If the value of `resultdir` is `None`, then\nthis function makes a new environment and return it. If the value\nis a directory path that contains an Enos environment, then this function\nloads and returns it.\n\nIn case of a directory path, this function also rereads the\nconfiguration file (the reservation.yaml) and reloads it. This\nlets the user update his configuration between each phase.\n\nArgs:\nresultdir (str): directory path to load the env from.", "source": "codesearchnet"}
{"code": "def strides(self) -> List[int]:\n    return _compute_mesh_strides(self.shape())", "docstring": "Returns the strides tensor array for this mesh.\n\nIf the mesh shape is `[a, b, c, d]`, then the strides array can be computed\nas `[b*c*d, c*d, d, 1]`. This array can be useful in computing local device\noffsets given a device ID. Using the same example, the device coordinates of\nthe mesh can be computed as:\n\n```\n[(device_id / (b*c*d)) % a,\n(device_id / (c*d))   % b,\n(device_id / (d))     % c,\n(device_id)           % d]\n```\n\nThis is the same as `(device_id // mesh.strides) % mesh.shape`.\n\nReturns:\nThe mesh strides as an integer tensor.", "source": "github-repos"}
{"code": "def content(self, request, id):\n    gist = self.send(request, id).json()\n\n    def convert(data):\n        return base64.b64decode(data).decode('utf-8')\n    content = {}\n    for (name, data) in gist['files'].items():\n        content[name] = convert(data['content'])\n    return content", "docstring": "Returns the content of the gist\n\nArguments:\nrequest: an initial request object\nid:      the gist identifier\n\nReturns:\nA dict containing the contents of each file in the gist", "source": "codesearchnet"}
{"code": "def _check_triple_quotes(self, quote_record):\n    (_, triple, row, col) = quote_record\n    if (triple != TRIPLE_QUOTE_OPTS.get(self.config.triple_quote)):\n        self._invalid_triple_quote(triple, row, col)", "docstring": "Check if the triple quote from tokenization is valid.\n\nArgs:\nquote_record: a tuple containing the info about the string\nfrom tokenization, giving the (token, quote, row number, column).", "source": "codesearchnet"}
{"code": "def Get(self, path, follow_symlink=True):\n    key = self._Key(path=path, follow_symlink=follow_symlink)\n    try:\n        return self._cache[key]\n    except KeyError:\n        value = Stat.FromPath(path, follow_symlink=follow_symlink)\n        self._cache[key] = value\n        if ((not follow_symlink) and (not value.IsSymlink())):\n            self._cache[self._Key(path=path, follow_symlink=True)] = value\n        return value", "docstring": "Stats given file or returns a cached result if available.\n\nArgs:\npath: A path to the file to perform `stat` on.\nfollow_symlink: True if `stat` of a symlink should be returned instead of\na file that it points to. For non-symlinks this setting has no effect.\n\nReturns:\n`Stat` object corresponding to the given path.", "source": "codesearchnet"}
{"code": "def __setitem__(self,key,value):\n        \n\n        self.rdb.hset(self.session_hash,key,value)\n        self.rdb.expire(self.session_hash,self.ttl)", "docstring": "Set an existing or new key, value association.\n\nArgs:\nkey (str): The dictionary key.\nvalue (str): The dictionary value", "source": "juraj-google-style"}
{"code": "def get_day_end(config):\n    \n    day_start_datetime = datetime.datetime.combine(datetime.date.today(), config['day_start'])\n    day_end_datetime = day_start_datetime - datetime.timedelta(seconds=1)\n    return day_end_datetime.time()", "docstring": "Get the day end time given the day start. This assumes full 24h day.\n\nArgs:\nconfig (dict): Configdict. Needed to extract ``day_start``.\n\nNote:\nThis is merely a convinience funtion so we do not have to deduct this from ``day_start``\nby hand all the time.", "source": "juraj-google-style"}
{"code": "def push(self, value):\n    stream = DataStream.FromEncoded(value.stream)\n    if (stream.stream_type == DataStream.OutputType):\n        if (len(self.streaming_data) == self.streaming_length):\n            raise StorageFullError('Streaming buffer full')\n        self.streaming_data.append(value)\n    else:\n        if (len(self.storage_data) == self.storage_length):\n            raise StorageFullError('Storage buffer full')\n        self.storage_data.append(value)", "docstring": "Store a new value for the given stream.\n\nArgs:\nvalue (IOTileReading): The value to store.  The stream\nparameter must have the correct value", "source": "codesearchnet"}
{"code": "def cancel(self, queue):\n        \n        try:\n            consumer = self._consumers[queue]\n            yield consumer.channel.basic_cancel(consumer_tag=consumer.tag)\n        except pika.exceptions.AMQPChannelError:\n            \n            \n            pass\n        except KeyError:\n            defer.returnValue(None)\n\n        try:\n            yield consumer.channel.close()\n        except pika.exceptions.AMQPChannelError:\n            pass\n\n        del self._consumers[queue]", "docstring": "Cancel the consumer for a queue.\n\nArgs:\nqueue (str): The name of the queue the consumer is subscribed to.\n\nReturns:\ndefer.Deferred: A Deferred that fires when the consumer\nis canceled, or None if the consumer was already canceled. Wrap\nthe call in :func:`.defer.maybeDeferred` to always receive a Deferred.", "source": "juraj-google-style"}
{"code": "def __tf_tracing_type__(self, context: TracingContext) -> TraceType:", "docstring": "Returns the tracing type of this object.\n\nThe tracing type is used to build the signature of a tf.function\nwhen traced, and to match arguments with existing signatures.\nWhen a Function object is called, tf.function looks at the tracing type\nof the call arguments. If an existing signature of matching type exists,\nit will be used. Otherwise, a new function is traced, and its signature\nwill use the tracing type of the call arguments.\n\nArgs:\ncontext: a context reserved for internal/future usage.\n\nReturns:\nThe tracing type of this object.", "source": "github-repos"}
{"code": "def add_answer(self, vote, rationale):\n    self.raw_answers.append({VOTE_KEY: vote, RATIONALE_KEY: rationale})", "docstring": "Add an answer\n\nArgs:\nvote (int): the option that student voted for\nrationale (str): the reason why the student vote for the option", "source": "codesearchnet"}
{"code": "def scale(self, scalar, ignored_variables=None, ignored_interactions=None, ignore_offset=False):\n    if (ignored_variables is None):\n        ignored_variables = set()\n    elif (not isinstance(ignored_variables, abc.Container)):\n        ignored_variables = set(ignored_variables)\n    if (ignored_interactions is None):\n        ignored_interactions = set()\n    elif (not isinstance(ignored_interactions, abc.Container)):\n        ignored_interactions = set(ignored_interactions)\n    linear = self.linear\n    for v in linear:\n        if (v in ignored_variables):\n            continue\n        linear[v] *= scalar\n    quadratic = self.quadratic\n    for (u, v) in quadratic:\n        if (((u, v) in ignored_interactions) or ((v, u) in ignored_interactions)):\n            continue\n        quadratic[(u, v)] *= scalar\n    if (not ignore_offset):\n        self.offset *= scalar\n    try:\n        self._counterpart.scale(scalar, ignored_variables=ignored_variables, ignored_interactions=ignored_interactions)\n    except AttributeError:\n        pass", "docstring": "Multiply by the specified scalar all the biases and offset of a binary quadratic model.\n\nArgs:\nscalar (number):\nValue by which to scale the energy range of the binary quadratic model.\n\nignored_variables (iterable, optional):\nBiases associated with these variables are not scaled.\n\nignored_interactions (iterable[tuple], optional):\nAs an iterable of 2-tuples. Biases associated with these interactions are not scaled.\n\nignore_offset (bool, default=False):\nIf True, the offset is not scaled.\n\nExamples:\n\nThis example creates a binary quadratic model and then scales it to half\nthe original energy range.\n\n>>> import dimod\n...\n>>> bqm = dimod.BinaryQuadraticModel({'a': -2.0, 'b': 2.0}, {('a', 'b'): -1.0}, 1.0, dimod.SPIN)\n>>> bqm.scale(0.5)\n>>> bqm.linear['a']\n-1.0\n>>> bqm.quadratic[('a', 'b')]\n-0.5\n>>> bqm.offset\n0.5", "source": "codesearchnet"}
{"code": "def _get_variation_id(value, capital=False):\n        \n        \n        value = int(value)\n        base_power = base_start = base_end = 0\n        while value >= base_end:\n            base_power += 1\n            base_start = base_end\n            base_end += pow(26, base_power)\n        base_index = value - base_start\n\n        \n        alphas = ['a'] * base_power\n        for index in range(base_power - 1, -1, -1):\n            alphas[index] = chr(int(97 + (base_index % 26)))\n            base_index /= 26\n\n        characters = ''.join(alphas)\n        return characters.upper() if capital else characters", "docstring": "Convert an integer value to a character. a-z then double aa-zz etc\nArgs:\nvalue (int): integer index we're looking up\ncapital (bool): whether we convert to capitals or not\nReturns (str): alphanumeric representation of the index", "source": "juraj-google-style"}
{"code": "def _infer(self, request):\n    \n    label_vocab = inference_utils.get_label_vocab(\n      request.args.get('label_vocab_path'))\n\n    try:\n      if request.method != 'GET':\n        logger.error('%s requests are forbidden.', request.method)\n        return http_util.Respond(request, {'error': 'invalid non-GET request'},\n                                    'application/json', code=405)\n\n      (inference_addresses, model_names, model_versions,\n          model_signatures) = self._parse_request_arguments(request)\n\n      indices_to_infer = sorted(self.updated_example_indices)\n      examples_to_infer = [self.examples[index] for index in indices_to_infer]\n      infer_objs = []\n      for model_num in xrange(len(inference_addresses)):\n        serving_bundle = inference_utils.ServingBundle(\n            inference_addresses[model_num],\n            model_names[model_num],\n            request.args.get('model_type'),\n            model_versions[model_num],\n            model_signatures[model_num],\n            request.args.get('use_predict') == 'true',\n            request.args.get('predict_input_tensor'),\n            request.args.get('predict_output_tensor'))\n        infer_objs.append(inference_utils.run_inference_for_inference_results(\n          examples_to_infer, serving_bundle))\n\n      resp = {'indices': indices_to_infer, 'results': infer_objs}\n      self.updated_example_indices = set()\n      return http_util.Respond(request, {'inferences': json.dumps(resp),\n                                         'vocab': json.dumps(label_vocab)},\n                               'application/json')\n    except common_utils.InvalidUserInputError as e:\n      return http_util.Respond(request, {'error': e.message},\n                               'application/json', code=400)\n    except AbortionError as e:\n      return http_util.Respond(request, {'error': e.details},\n                               'application/json', code=400)", "docstring": "Returns JSON for the `vz-line-chart`s for a feature.\n\nArgs:\nrequest: A request that should contain 'inference_address', 'model_name',\n'model_type, 'model_version', 'model_signature' and 'label_vocab_path'.\n\nReturns:\nA list of JSON objects, one for each chart.", "source": "juraj-google-style"}
{"code": "def remove_species(self, species):\n    new_sites = []\n    species = [get_el_sp(s) for s in species]\n    for site in self._sites:\n        new_sp_occu = {sp: amt for (sp, amt) in site.species.items() if (sp not in species)}\n        if (len(new_sp_occu) > 0):\n            new_sites.append(PeriodicSite(new_sp_occu, site.frac_coords, self._lattice, properties=site.properties))\n    self._sites = new_sites", "docstring": "Remove all occurrences of several species from a structure.\n\nArgs:\nspecies: Sequence of species to remove, e.g., [\"Li\", \"Na\"].", "source": "codesearchnet"}
{"code": "def __init__(self, script_hash=None, key=None):\n        \n        self.ScriptHash = script_hash\n        self.Key = key", "docstring": "Create an instance.\n\nArgs:\nscript_hash (UInt160):\nkey (bytes):", "source": "juraj-google-style"}
{"code": "def __pad_value(value, pad_len_multiple, pad_char):\n    \n    assert pad_len_multiple > 0\n    assert len(pad_char) == 1\n    padding_length = (pad_len_multiple -\n                      (len(value) % pad_len_multiple)) % pad_len_multiple\n    return value + pad_char * padding_length", "docstring": "Add padding characters to the value if needed.\n\nArgs:\nvalue: The string value to be padded.\npad_len_multiple: Pad the result so its length is a multiple\nof pad_len_multiple.\npad_char: The character to use for padding.\n\nReturns:\nThe string value with padding characters added.", "source": "juraj-google-style"}
{"code": "def stop(self, consumer):\n    stopped_workflows = []\n    for request in [r for r in consumer.controller.state.active_requests]:\n        job = AsyncResult(request.id)\n        workflow_id = job.result['workflow_id']\n        if (workflow_id not in stopped_workflows):\n            client = Client(SignalConnection(**consumer.app.user_options['config'].signal, auto_connect=True), request_key=workflow_id)\n            client.send(Request(action='stop_workflow'))\n            stopped_workflows.append(workflow_id)", "docstring": "This function is called when the worker received a request to terminate.\n\nUpon the termination of the worker, the workflows for all running jobs are\nstopped gracefully.\n\nArgs:\nconsumer (Consumer): Reference to the consumer object that handles messages\nfrom the broker.", "source": "codesearchnet"}
{"code": "def get_soundcloud_data(url):\n    data = {}\n    request = requests.get(url)\n    title_tag = request.text.split('<title>')[1].split('</title')[0]\n    data['title'] = title_tag.split(' by ')[0].strip()\n    data['artist'] = title_tag.split(' by ')[1].split('|')[0].strip()\n    return data", "docstring": "Scrapes a SoundCloud page for a track's important information.\n\nReturns:\ndict: of audio data", "source": "codesearchnet"}
{"code": "def translate_ostat(ostat):\n    ostat_lower = ostat.strip().lower()\n    if (ostat_lower == 'monomer'):\n        return 1\n    elif (ostat_lower == 'homo-dimer'):\n        return 2\n    elif (ostat_lower == 'homo-trimer'):\n        return 3\n    elif (ostat_lower == 'homo-tetramer'):\n        return 4\n    elif (ostat_lower == 'homo-pentamer'):\n        return 5\n    elif (ostat_lower == 'homo-hexamer'):\n        return 6\n    elif (ostat_lower == 'homo-heptamer'):\n        return 7\n    elif (ostat_lower == 'homo-octamer'):\n        return 8\n    else:\n        num = int(ostat_lower.split('-')[1])\n        return num", "docstring": "Translate the OSTAT field to an integer.\n\nAs of 2018-02-26, works on all E. coli models. Untested on other pre-made organism models.\n\nArgs:\nostat (str): Predicted oligomeric state of the PDB file\n\nReturns:\nint: Translated string to integer", "source": "codesearchnet"}
{"code": "def find_or_create_all(cls, list_of_kwargs, keys=[]):\n    (list_of_kwargs_wo_dupes, markers) = remove_and_mark_duplicate_dicts(list_of_kwargs, keys)\n    added_objs = cls.add_all([(cls.first(**subdict(kwargs, keys)) or cls.new(**kwargs)) for kwargs in list_of_kwargs_wo_dupes])\n    result_objs = []\n    iterator_of_added_objs = iter(added_objs)\n    for idx in range(len(list_of_kwargs)):\n        if (idx in markers):\n            result_objs.append(added_objs[markers[idx]])\n        else:\n            result_objs.append(next(iterator_of_added_objs))\n    return result_objs", "docstring": "Batch method for querying for a list of instances and\ncreating them if required\n\nArgs:\nlist_of_kwargs(list of dicts): A list of dicts where\neach dict denotes the keyword args that you would pass\nto the create method separately\n\nkeys (list, optional): A list of keys to use for the\ninitial finding step. Matching is done only on these\nattributes.\n\nExamples:\n\n>>> Customer.find_or_create_all([\n... {'name': 'Vicky', 'email': 'vicky@x.com', 'age': 34},\n... {'name': 'Ron', 'age': 40, 'email': 'ron@x.com',\n... 'gender': 'Male'}], keys=['name', 'email'])", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states, output_router_logits):\n    forwarded_states, router_tuple = self.mlp(hidden_states)\n    forwarded_states += torch.tanh(self.soft_bypass_mlp(hidden_states))\n    output = hidden_states + self.norm(forwarded_states)\n    if output_router_logits and router_tuple is not None:\n        return (output, router_tuple)\n    else:\n        return output", "docstring": "Args:\nhidden_states (`torch.Tensor`) :\n[num_groups, tokens_per_group, hidden_dim] inputs to send to experts.\noutput_router_logits (`bool`) :\noutput experts router output.\nReturns:\ntorch.Tensor[num_groups, tokens_per_group, hidden_dim]", "source": "github-repos"}
{"code": "def _GetFormatErrorLocation(\n      self, yaml_definition, last_definition_object):\n    \n    name = yaml_definition.get('name', None)\n    if name:\n      error_location = 'in: {0:s}'.format(name or '<NAMELESS>')\n    elif last_definition_object:\n      error_location = 'after: {0:s}'.format(last_definition_object.name)\n    else:\n      error_location = 'at start'\n\n    return error_location", "docstring": "Retrieves a format error location.\n\nArgs:\nyaml_definition (dict[str, object]): current YAML definition.\nlast_definition_object (DataTypeDefinition): previous data type\ndefinition.\n\nReturns:\nstr: format error location.", "source": "juraj-google-style"}
{"code": "def ParseStatusRow(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = TwitterIOSStatusEventData()\n    event_data.favorite_count = self._GetRowValue(\n        query_hash, row, 'favoriteCount')\n    event_data.favorited = self._GetRowValue(query_hash, row, 'favorited')\n    event_data.name = self._GetRowValue(query_hash, row, 'name')\n    event_data.query = query\n    event_data.retweet_count = self._GetRowValue(\n        query_hash, row, 'retweetCount')\n    event_data.text = self._GetRowValue(query_hash, row, 'text')\n    event_data.user_id = self._GetRowValue(query_hash, row, 'user_id')\n\n    timestamp = self._GetRowValue(query_hash, row, 'date')\n    if timestamp:\n      \n      timestamp = int(timestamp)\n      date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_CREATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'updatedAt')\n    if timestamp:\n      \n      timestamp = int(timestamp)\n      date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_UPDATE)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a contact row from the database.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from query.", "source": "juraj-google-style"}
{"code": "def __init__(self, data_type_definition):\n    \n    super(StreamMap, self).__init__(data_type_definition)\n    self._fold_byte_stream = None\n    self._map_byte_stream = None\n\n    if self._element_data_type_definition.IsComposite():\n      raise errors.FormatError('Unsupported composite element data type')", "docstring": "Initializes a stream data type map.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\n\nRaises:\nFormatError: if the data type map cannot be determined from the data\ntype definition.", "source": "juraj-google-style"}
{"code": "def reset(self, name=None):\n    if self._reader_ref.dtype == dtypes.resource:\n        return gen_io_ops.reader_reset_v2(self._reader_ref, name=name)\n    else:\n        return gen_io_ops.reader_reset(self._reader_ref, name=name)", "docstring": "Restore a reader to its initial clean state.\n\nArgs:\nname: A name for the operation (optional).\n\nReturns:\nThe created Operation.", "source": "github-repos"}
{"code": "def ParseOptions(cls, options, configuration_object):\n    \n    if not isinstance(configuration_object, tools.CLITool):\n      raise errors.BadConfigObject(\n          'Configuration object is not an instance of CLITool')\n\n    preferred_year = cls._ParseNumericOption(options, 'preferred_year')\n\n    process_archives = getattr(options, 'process_archives', False)\n    process_compressed_streams = getattr(\n        options, 'process_compressed_streams', True)\n\n    setattr(configuration_object, '_preferred_year', preferred_year)\n    setattr(configuration_object, '_process_archives', process_archives)\n    setattr(\n        configuration_object, '_process_compressed_streams',\n        process_compressed_streams)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.", "source": "juraj-google-style"}
{"code": "def _create_dom(data):\n    if (not isinstance(data, dhtmlparser.HTMLElement)):\n        data = dhtmlparser.parseString(utils.handle_encodnig(data))\n    dhtmlparser.makeDoubleLinked(data)\n    return data", "docstring": "Creates doublelinked DOM from `data`.\n\nArgs:\ndata (str/HTMLElement): Either string or HTML element.\n\nReturns:\nobj: HTMLElement containing double linked DOM.", "source": "codesearchnet"}
{"code": "async def _get_async(self, url, session):\n    data = None\n    async with session.get(url) as resp:\n        if (resp.status == 200):\n            data = (await resp.json())\n    return data", "docstring": "Asynchronous internal method used for GET requests\n\nArgs:\nurl (str): URL to fetch\nsession (obj): aiohttp client session for async loop\n\nReturns:\ndata (obj): Individual URL request's response corountine", "source": "codesearchnet"}
{"code": "def saml_metadata(self, client_id):\n    return self.get(url='https:", "docstring": "Get SAML2.0 Metadata.\n\nArgs:\nclient_id (str): Client Id of the application to get the SAML metadata for.", "source": "codesearchnet"}
{"code": "def require_params(self, req):\n        \n        params = {}\n\n        for name, param in self.params.items():\n            if name not in req.params and param.required:\n                \n                \n                \n                missing = set(\n                    p for p in self.params\n                    if self.params[p].required\n                ) - set(req.params.keys())\n\n                raise errors.HTTPMissingParam(\", \".join(missing))\n\n            elif name in req.params or param.default:\n                \n                \n                \n                \n                \n                \n                \n                try:\n                    if param.many:\n                        \n                        values = req.get_param_as_list(\n                            \n                            \n                            \n                            name, param.validated_value\n                        ) or [\n                            param.default and\n                            param.validated_value(param.default)\n                        ]\n                        params[name] = param.container(values)\n                    else:\n                        \n                        \n                        \n                        \n                        params[name] = param.validated_value(\n                            req.get_param(name, default=param.default)\n                        )\n\n                except ValidationError as err:\n                    \n                    \n                    raise err.as_invalid_param(name)\n\n                except ValueError as err:\n                    \n                    raise errors.HTTPInvalidParam(str(err), name)\n\n        return params", "docstring": "Require all defined parameters from request query string.\n\nRaises ``falcon.errors.HTTPMissingParam`` exception if any of required\nparameters is missing and ``falcon.errors.HTTPInvalidParam`` if any\nof parameters could not be understood (wrong format).\n\nArgs:\nreq (falcon.Request): request object", "source": "juraj-google-style"}
{"code": "def fit(self, train_x, train_y):\n        \n        if self.first_fitted:\n            self.incremental_fit(train_x, train_y)\n        else:\n            self.first_fit(train_x, train_y)", "docstring": "Fit the regressor with more data.\nArgs:\ntrain_x: A list of NetworkDescriptor.\ntrain_y: A list of metric values.", "source": "juraj-google-style"}
{"code": "def log_likelihood(self, y, _const=math.log((2.0 * math.pi)), quiet=False):\n    y = self._process_input(y)\n    resid = (y - self.mean.get_value(self._t))\n    try:\n        self._recompute()\n    except solver.LinAlgError:\n        if quiet:\n            return (- np.inf)\n        raise\n    if (len(y.shape) > 1):\n        raise ValueError('dimension mismatch')\n    logdet = self.solver.log_determinant()\n    if (not np.isfinite(logdet)):\n        return (- np.inf)\n    loglike = ((- 0.5) * ((self.solver.dot_solve(resid) + logdet) + (len(y) * _const)))\n    if (not np.isfinite(loglike)):\n        return (- np.inf)\n    return loglike", "docstring": "Compute the marginalized likelihood of the GP model\n\nThe factorized matrix from the previous call to :func:`GP.compute` is\nused so ``compute`` must be called first.\n\nArgs:\ny (array[n]): The observations at coordinates ``x`` from\n:func:`GP.compute`.\nquiet (bool): If true, return ``-numpy.inf`` for non-positive\ndefinite matrices instead of throwing an error.\n\nReturns:\nfloat: The marginalized likelihood of the GP model.\n\nRaises:\nValueError: For mismatched dimensions.\nsolver.LinAlgError: For non-positive definite matrices.", "source": "codesearchnet"}
{"code": "def from_corpus(cls, corpus):\n        \n\n        ds = Corpus()\n\n        \n        tracks = copy.deepcopy(list(corpus.tracks.values()))\n        track_mapping = ds.import_tracks(tracks)\n\n        \n        issuers = copy.deepcopy(list(corpus.issuers.values()))\n        issuer_mapping = ds.import_issuers(issuers)\n\n        \n        utterances = copy.deepcopy(list(corpus.utterances.values()))\n        for utterance in utterances:\n            utterance.track = track_mapping[utterance.track.idx]\n\n            if utterance.issuer is not None:\n                utterance.issuer = issuer_mapping[utterance.issuer.idx]\n\n        ds.import_utterances(utterances)\n\n        \n        subviews = copy.deepcopy(corpus.subviews)\n        for subview_idx, subview in subviews.items():\n            ds.import_subview(subview_idx, subview)\n\n        \n        for feat_container_idx, feature_container in corpus.feature_containers.items():\n            ds.new_feature_container(feat_container_idx, feature_container.path)\n\n        return ds", "docstring": "Create a new modifiable corpus from any other CorpusView.\nThis for example can be used to create a independent modifiable corpus from a subview.\n\nArgs:\ncorpus (CorpusView): The corpus to create a copy from.\n\nReturns:\nCorpus: A new corpus with the same data as the given one.", "source": "juraj-google-style"}
{"code": "def __init__(self, faulty_file, msg):\n        \n        self.file = faulty_file\n        self.msg = msg\n        super().__init__(faulty_file, msg)", "docstring": "Initialization of instances:\n\nArgs:\nfaulty_file (pathlike): path of the file where a parsing problem\nwas encountered.\nmsg (str): error message.\n\nAttributes:\nfile (pathlike): path of the file where a parsing problem was\nencountered.\nmsg (str): error message.", "source": "juraj-google-style"}
{"code": "def pi_to_number(self, page=1, item=1):\n        \n        if page > 1:\n            return ((page - 1) * self.page_items) + item\n        else:\n            return 0 + item", "docstring": "Convert subpage & subitem to a integer\n\n* if page == 1, then return 0, since the item count is the true # of items\n* if page == 2, then return, page-1 * items_per_page, since we are\nreturning the # of items on a full page.\n\nArgs:\n* None\n\nReturns:\n* Integer - Which represents the number of items up to the page.", "source": "juraj-google-style"}
{"code": "def __getitem__(self, column):\n        \n        if isinstance(column, (list, tuple)):\n            ret = []\n            for col in column:\n                ret.append(self[col])\n            return ret\n\n        try:\n            return self._values[self._index[column]]\n        except (KeyError, TypeError, ValueError):\n            pass\n\n        \n        try:\n            return self._values[column]\n        except (IndexError, TypeError):\n            pass\n\n        raise IndexError('No such column \"%s\" in row.' % column)", "docstring": "Support for [] notation.\n\nArgs:\ncolumn: Tuple of column names, or a (str) column name, or positional\ncolumn number, 0-indexed.\n\nReturns:\nA list or string with column value(s).\n\nRaises:\nIndexError: The given column(s) were not found.", "source": "juraj-google-style"}
{"code": "def classify_coincident(st_vals, coincident):\n    if (not coincident):\n        return None\n    if ((st_vals[(0, 0)] >= st_vals[(0, 1)]) or (st_vals[(1, 0)] >= st_vals[(1, 1)])):\n        return UNUSED_T\n    else:\n        return CLASSIFICATION_T.COINCIDENT", "docstring": "r\"\"\"Determine if coincident parameters are \"unused\".\n\n.. note::\n\nThis is a helper for :func:`surface_intersections`.\n\nIn the case that ``coincident`` is :data:`True`, then we'll have two\nsets of parameters :math:`(s_1, t_1)` and :math:`(s_2, t_2)`.\n\nIf one of :math:`s1 < s2` or :math:`t1 < t2` is not satisfied, the\ncoincident segments will be moving in opposite directions, hence don't\ndefine an interior of an intersection.\n\n.. warning::\n\nIn the \"coincident\" case, this assumes, but doesn't check, that\n``st_vals`` is ``2 x 2``.\n\nArgs:\nst_vals (numpy.ndarray): ``2 X N`` array of intersection parameters.\ncoincident (bool): Flag indicating if the intersections are the\nendpoints of coincident segments of two curves.\n\nReturns:\nOptional[.IntersectionClassification]: The classification of the\nintersections.", "source": "codesearchnet"}
{"code": "def _InvokeGitkitApi(self, method, params=None, need_service_account=True):\n    body = (simplejson.dumps(params) if params else None)\n    req = urllib_request.Request((self.google_api_url + method))\n    req.add_header('Content-type', 'application/json')\n    if need_service_account:\n        if self.credentials:\n            access_token = self.credentials.get_access_token().access_token\n        elif (self.service_account_email and self.service_account_key):\n            access_token = self._GetAccessToken()\n        else:\n            raise errors.GitkitClientError('Missing service account credentials')\n        req.add_header('Authorization', ('Bearer ' + access_token))\n    try:\n        binary_body = (body.encode('utf-8') if body else None)\n        raw_response = urllib_request.urlopen(req, binary_body).read()\n    except urllib_request.HTTPError as err:\n        if (err.code == 400):\n            raw_response = err.read()\n        else:\n            raise\n    return self._CheckGitkitError(raw_response)", "docstring": "Invokes Gitkit API, with optional access token for service account.\n\nArgs:\nmethod: string, the api method name.\nparams: dict of optional parameters for the API.\nneed_service_account: false if service account is not needed.\n\nRaises:\nGitkitClientError: if the request is bad.\nGitkitServerError: if Gitkit can not handle the request.\n\nReturns:\nAPI response as dict.", "source": "codesearchnet"}
{"code": "def num_gpus():\n    return context().num_gpus()", "docstring": "Get the number of available GPU devices.\n\nReturns:\nThe number of available GPU devices.", "source": "github-repos"}
{"code": "def putenv(key, value):\n    key = path2fsn(key)\n    value = path2fsn(value)\n    if (is_win and PY2):\n        try:\n            set_windows_env_var(key, value)\n        except WindowsError:\n            raise ValueError\n    else:\n        try:\n            os.putenv(key, value)\n        except OSError:\n            raise ValueError", "docstring": "Like `os.putenv` but takes unicode under Windows + Python 2\n\nArgs:\nkey (pathlike): The env var to get\nvalue (pathlike): The value to set\nRaises:\nValueError", "source": "codesearchnet"}
{"code": "def _example_from_array_spec(self, prop_spec):\n    if isinstance(prop_spec['items'], list):\n        return [self.get_example_from_prop_spec(item_prop_spec) for item_prop_spec in prop_spec['items']]\n    elif ('type' in prop_spec['items'].keys()):\n        if (('format' in prop_spec['items'].keys()) and (prop_spec['items']['format'] == 'date-time')):\n            return self._get_example_from_basic_type('datetime')\n        else:\n            return self._get_example_from_basic_type(prop_spec['items']['type'])\n    elif (('$ref' in prop_spec['items'].keys()) or (('schema' in prop_spec) and ('$ref' in prop_spec['schema']['items'].keys()))):\n        definition_name = (self.get_definition_name_from_ref(prop_spec['items']['$ref']) or self.get_definition_name_from_ref(prop_spec['schema']['items']['$ref']))\n        if self.build_one_definition_example(definition_name):\n            example_dict = self.definitions_example[definition_name]\n            if (not isinstance(example_dict, dict)):\n                return [example_dict]\n            if (len(example_dict) == 1):\n                try:\n                    res = example_dict[example_dict.keys()[0]]\n                except TypeError:\n                    res = example_dict[list(example_dict)[0]]\n                return res\n            else:\n                return_value = {}\n                for (example_name, example_value) in example_dict.items():\n                    return_value[example_name] = example_value\n                return [return_value]\n    elif ('properties' in prop_spec['items']):\n        prop_example = {}\n        for (prop_name, prop_spec) in prop_spec['items']['properties'].items():\n            example = self.get_example_from_prop_spec(prop_spec)\n            if (example is not None):\n                prop_example[prop_name] = example\n        return [prop_example]", "docstring": "Get an example from a property specification of an array.\n\nArgs:\nprop_spec: property specification you want an example of.\n\nReturns:\nAn example array.", "source": "codesearchnet"}
{"code": "def apply_transformation(self, structure):\n        \n        if structure.is_ordered:\n            return structure\n\n        species = [dict(sp) for sp in structure.species_and_occu]\n\n        for sp in species:\n            for k, v in sp.items():\n                old_occ = sp[k]\n                new_occ = float(\n                    Fraction(old_occ).limit_denominator(self.max_denominator))\n                if self.fix_denominator:\n                    new_occ = around(old_occ*self.max_denominator)\\\n                        / self.max_denominator\n                if round(abs(old_occ - new_occ), 6) > self.tol:\n                    raise RuntimeError(\n                        \"Cannot discretize structure within tolerance!\")\n                sp[k] = new_occ\n\n        return Structure(structure.lattice, species, structure.frac_coords)", "docstring": "Discretizes the site occupancies in the structure.\n\nArgs:\nstructure: disordered Structure to discretize occupancies\n\nReturns:\nA new disordered Structure with occupancies discretized", "source": "juraj-google-style"}
{"code": "def _make_dense_default(self, key, shape, dtype):\n    default_value = self.dense_defaults.get(key)\n    if shape.ndims is not None and shape.ndims > 0 and (shape.dims[0].value is None):\n        if default_value is None:\n            default_value = ops.convert_to_tensor('' if dtype == dtypes.string else 0, dtype=dtype)\n        else:\n            key_name = 'padding_' + re.sub('[^A-Za-z0-9_.\\\\-/]', '_', key)\n            default_value = ops.convert_to_tensor(default_value, dtype=dtype, name=key_name)\n            default_value = array_ops.reshape(default_value, [])\n    elif default_value is None:\n        default_value = constant_op.constant([], dtype=dtype)\n    elif not isinstance(default_value, tensor.Tensor):\n        key_name = 'key_' + re.sub('[^A-Za-z0-9_.\\\\-/]', '_', key)\n        default_value = ops.convert_to_tensor(default_value, dtype=dtype, name=key_name)\n        default_value = array_ops.reshape(default_value, shape)\n    return default_value", "docstring": "Construct the default value tensor for a specified dense feature.\n\nArgs:\nkey: The key string identifying the dense feature.\nshape: The dense feature's shape.\ndtype: The dense feature's dtype.\n\nReturns:\nA Tensor.", "source": "github-repos"}
{"code": "def _enrichment_test_preprocessor(test_spec: dict, expected: List[str], env: TestEnvironment):\n    if (pipeline := test_spec.get('pipeline', None)):\n        for transform in pipeline.get('transforms', []):\n            if transform.get('type', '').startswith('Enrichment'):\n                transform['type'] = 'TestEnrichment'\n    return test_spec", "docstring": "Preprocessor for tests that involve the Enrichment transform.\n\nThis preprocessor replaces the actual Enrichment transform with a mock\n`TestEnrichment` transform. This allows the test to verify the pipeline's\ncorrectness without requiring external services like BigTable or BigQuery.\n\nArgs:\ntest_spec: The dictionary representation of the YAML pipeline specification.\nexpected: A list of strings representing the expected output of the\npipeline.\nenv: The TestEnvironment object providing utilities for creating temporary\nfiles.\n\nReturns:\nThe modified test_spec dictionary with Enrichment transforms replaced.", "source": "github-repos"}
{"code": "def structure_path(self, path):\n    if (not path):\n        self.structure_dir = None\n        self.structure_file = None\n    else:\n        if (not op.exists(path)):\n            raise OSError('{}: file does not exist!'.format(path))\n        if (not op.dirname(path)):\n            self.structure_dir = '.'\n        else:\n            self.structure_dir = op.dirname(path)\n        self.structure_file = op.basename(path)", "docstring": "Provide pointers to the paths of the structure file\n\nArgs:\npath: Path to structure file", "source": "codesearchnet"}
{"code": "def update(self, *args, **kwargs):\n    for next_dict in chain(args, (kwargs,)):\n        for (k, v) in next_dict.items():\n            self[k] = v", "docstring": "Equivalent to the python dict update method.\n\nUpdate the dictionary with the key/value pairs from other, overwriting\nexisting keys.\n\nArgs:\nother (dict): The source of key value pairs to add to headers\nKeyword Args:\nAll keyword arguments are stored in header directly\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def add_node(self, node_id, name, labels):\n    node = self.graph_db.get_or_create_indexed_node('Node', 'node_id', node_id, {'node_id': node_id, 'name': name})\n    try:\n        node.add_labels(*labels)\n    except NotImplementedError:\n        pass", "docstring": "Add the node with name and labels.\n\nArgs:\nnode_id: Id for the node.\nname: Name for the node.\nlabels: Label for the node.\n\nRaises:\nNotImplementedError: When adding labels is not supported.", "source": "codesearchnet"}
{"code": "def apply_grad_processors(opt, gradprocs):\n    assert isinstance(gradprocs, (list, tuple)), gradprocs\n    for gp in gradprocs:\n        assert isinstance(gp, GradientProcessor), gp\n\n    class _ApplyGradientProcessor(ProxyOptimizer):\n\n        def __init__(self, opt, gradprocs):\n            self._gradprocs = gradprocs[:]\n            super(_ApplyGradientProcessor, self).__init__(opt)\n\n        def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n            g = self._apply(grads_and_vars)\n            return self._opt.apply_gradients(g, global_step, name)\n\n        def _apply(self, g):\n            for proc in self._gradprocs:\n                g = proc.process(g)\n            return g\n    return _ApplyGradientProcessor(opt, gradprocs)", "docstring": "Wrapper around optimizers to apply gradient processors.\n\nArgs:\nopt (tf.train.Optimizer):\ngradprocs (list[GradientProcessor]): gradient processors to add to the\noptimizer.\n\nReturns:\na :class:`tf.train.Optimizer` instance which runs the gradient\nprocessors before updating the variables.", "source": "codesearchnet"}
{"code": "def __init__(self, model: PreTrainedModel):\n    super().__init__()\n    if model.generation_config is None:\n        raise AssertionError('The model must have a generation config to be exported with static caching. Please set `generation_config`.')\n    if not model.generation_config.use_cache:\n        raise AssertionError('The model must have caching enabled to be exported with static caching. Please set `generation_config.use_cache=True`.')\n    if model.generation_config.cache_implementation != 'static':\n        raise AssertionError(\"The model must use a 'static' caching implementation to be exported with static caching. Please set `generation_config.cache_implementation='static'`.\")\n    self.model = model\n    self.static_cache = StaticCache(config=self.model.config, max_batch_size=self.model.generation_config.cache_config.batch_size, max_cache_len=self.model.generation_config.cache_config.max_cache_len, device=self.model.generation_config.cache_config.device, dtype=self.model.dtype)\n    for i in range(len(self.static_cache.key_cache)):\n        self.register_buffer(f'key_cache_{i}', self.static_cache.key_cache[i], persistent=False)\n        self.register_buffer(f'value_cache_{i}', self.static_cache.value_cache[i], persistent=False)", "docstring": "Initializes the wrapper module with the pretrained model.\n\nArgs:\nmodel (`PreTrainedModel`): The pretrained model to wrap. The model must have caching\nenabled and use a 'static' caching implementation.\n\nRaises:\nAssertionError: If the pretrained model does not have caching enabled or if it does\nnot use a 'static' caching implementation in `model.generation_config`.", "source": "github-repos"}
{"code": "def RegisterImplementation(cache_name, map_name, cache):\n    global _cache_implementations\n    if cache_name not in _cache_implementations:\n        logging.info('Registering [%s] cache for [%s].', cache_name, map_name)\n        _cache_implementations[cache_name] = {}\n    _cache_implementations[cache_name][map_name] = cache", "docstring": "Register a Cache implementation with the CacheFactory.\n\nChild modules are expected to call this method in the file-level scope\nso that the CacheFactory is aware of them.\n\nArgs:\ncache_name: (string) The name of the NSS backend.\nmap_name: (string) The name of the map handled by this Cache.\ncache: A class type that is a subclass of Cache.\n\nReturns: Nothing", "source": "github-repos"}
{"code": "def get_selector(self, name):\n        \n\n        try:\n            return self.matcher.by_name[name]\n        except (AttributeError, KeyError):\n            if self.base is not None:\n                return self.base.get_selector(name)\n            else:\n                raise KeyError(\"No selector found for style '{}'\".format(name))", "docstring": "Find a selector mapped to a style in this or a base style sheet.\n\nArgs:\nname (str): a style name\n\nReturns:\n:class:`.Selector`: the selector mapped to the style `name`\n\nRaises:\nKeyError: if the style `name` was not found in this or a base\nstyle sheet", "source": "juraj-google-style"}
{"code": "def op(name, data, bucket_count=None, display_name=None, description=None, collections=None):\n    import tensorflow.compat.v1 as tf\n    if (display_name is None):\n        display_name = name\n    summary_metadata = metadata.create_summary_metadata(display_name=display_name, description=description)\n    with tf.name_scope(name):\n        tensor = _buckets(data, bucket_count=bucket_count)\n        return tf.summary.tensor_summary(name='histogram_summary', tensor=tensor, collections=collections, summary_metadata=summary_metadata)", "docstring": "Create a legacy histogram summary op.\n\nArguments:\nname: A unique name for the generated summary node.\ndata: A `Tensor` of any shape. Must be castable to `float64`.\nbucket_count: Optional positive `int`. The output will have this\nmany buckets, except in two edge cases. If there is no data, then\nthere are no buckets. If there is data but all points have the\nsame value, then there is one bucket whose left and right\nendpoints are the same.\ndisplay_name: Optional name for this summary in TensorBoard, as a\nconstant `str`. Defaults to `name`.\ndescription: Optional long-form description for this summary, as a\nconstant `str`. Markdown is supported. Defaults to empty.\ncollections: Optional list of graph collections keys. The new\nsummary op is added to these collections. Defaults to\n`[Graph Keys.SUMMARIES]`.\n\nReturns:\nA TensorFlow summary op.", "source": "codesearchnet"}
{"code": "def setDocuments(self, documenting_pid, documented_pid):\n        \n        self._check_initialized()\n        documenting_id = self.getObjectByPid(documenting_pid)\n        documented_id = self.getObjectByPid(documented_pid)\n        self.add((documenting_id, CITO.documents, documented_id))", "docstring": "Add a CiTO, the Citation Typing Ontology, triple asserting that\n``documenting_pid`` documents ``documented_pid``.\n\nAdds assertion: ``documenting_pid cito:documents documented_pid``\n\nArgs:\ndocumenting_pid: str\nPID of a Science Object that documents ``documented_pid``.\n\ndocumented_pid: str\nPID of a Science Object that is documented by ``documenting_pid``.", "source": "juraj-google-style"}
{"code": "def print_stack_info(self):\n        \n        try:\n            rest_api_id = None\n            deployment_found = False\n\n            response = self._cf_client.describe_stack_resources(\n                StackName=self._stack_name\n            )\n\n            print('\\nThe following resources were created:')\n            rows = []\n            for resource in response['StackResources']:\n                if resource['ResourceType'] == 'AWS::ApiGateway::RestApi':\n                    rest_api_id = resource['PhysicalResourceId']\n                elif resource['ResourceType'] == 'AWS::ApiGateway::Deployment':\n                    deployment_found = True\n\n                row = []\n                row.append(resource['ResourceType'])\n                row.append(resource['LogicalResourceId'])\n                row.append(resource['PhysicalResourceId'])\n                rows.append(row)\n                \n            print(tabulate(rows, headers=['Resource Type', 'Logical ID', 'Physical ID']))\n\n            if rest_api_id and deployment_found:\n                url = 'https:\n                    rest_api_id,\n                    self._region,\n                    '<stage>'\n                )\n                print('\\nThe deployed service can be found at this URL:')\n                print('\\t{}\\n'.format(url))\n\n            return response\n        except Exception as wtf:\n            print(wtf)\n            return None", "docstring": "List resources from the given stack\n\nArgs:\nNone\n\nReturns:\nA dictionary filled resources or None if things went sideways", "source": "juraj-google-style"}
{"code": "def WriteOutput(self, output_file, feed_merger,\n                  old_feed_path, new_feed_path, merged_feed_path):\n    \n    if merged_feed_path is None:\n      html_merged_feed_path = ''\n    else:\n      html_merged_feed_path = '<p>Merged feed created: <code>%s</code></p>' % (\n          merged_feed_path)\n\n    html_header =  % locals()\n\n    html_stats = self._GenerateStatsTable(feed_merger)\n    html_summary = self._GenerateSummary()\n    html_notices = self._GenerateNotices()\n    html_errors = self._GenerateSection(transitfeed.TYPE_ERROR)\n    html_warnings = self._GenerateSection(transitfeed.TYPE_WARNING)\n\n    html_footer =  % (transitfeed.__version__,\n              time.strftime('%B %d, %Y at %I:%M %p %Z'))\n\n    output_file.write(transitfeed.EncodeUnicode(html_header))\n    output_file.write(transitfeed.EncodeUnicode(html_stats))\n    output_file.write(transitfeed.EncodeUnicode(html_summary))\n    output_file.write(transitfeed.EncodeUnicode(html_notices))\n    output_file.write(transitfeed.EncodeUnicode(html_errors))\n    output_file.write(transitfeed.EncodeUnicode(html_warnings))\n    output_file.write(transitfeed.EncodeUnicode(html_footer))", "docstring": "Write the HTML output to a file.\n\nArgs:\noutput_file: The file object that the HTML output will be written to.\nfeed_merger: The FeedMerger instance.\nold_feed_path: The path to the old feed file as a string.\nnew_feed_path: The path to the new feed file as a string\nmerged_feed_path: The path to the merged feed file as a string. This\nmay be None if no merged feed was written.", "source": "juraj-google-style"}
{"code": "def conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1):\n    if data_format is None:\n        data_format = image_data_format()\n    if data_format not in {'channels_first', 'channels_last'}:\n        raise ValueError('Unknown data_format: ' + str(data_format))\n    kernel_shape = kernel.shape.as_list()\n    if padding == 'causal':\n        left_pad = dilation_rate * (kernel_shape[0] - 1)\n        x = temporal_padding(x, (left_pad, 0))\n        padding = 'valid'\n    padding = _preprocess_padding(padding)\n    x, tf_data_format = _preprocess_conv1d_input(x, data_format)\n    x = nn.convolution(input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format)\n    if data_format == 'channels_first' and tf_data_format == 'NWC':\n        x = array_ops.transpose(x, (0, 2, 1))\n    return x", "docstring": "1D convolution.\n\nArgs:\nx: Tensor or variable.\nkernel: kernel tensor.\nstrides: stride integer.\npadding: string, `\"same\"`, `\"causal\"` or `\"valid\"`.\ndata_format: string, one of \"channels_last\", \"channels_first\".\ndilation_rate: integer dilate rate.\n\nReturns:\nA tensor, result of 1D convolution.\n\nRaises:\nValueError: if `data_format` is neither `channels_last` or\n`channels_first`.", "source": "github-repos"}
{"code": "def _section_from_possible_title(possible_title):\n    for section in SECTION_TITLES:\n        if _matches_section(possible_title, section):\n            return section\n    return None", "docstring": "Returns a section matched by the possible title, or None if none match.\n\nArgs:\npossible_title: A string that may be the title of a new section.\nReturns:\nA Section type if one matches, or None if no section type matches.", "source": "github-repos"}
{"code": "def randint(self, low: int, high: int) -> int:\n        \n        return int(lib.TCOD_random_get_i(self.random_c, low, high))", "docstring": "Return a random integer within the linear range: low <= n <= high.\n\nArgs:\nlow (int): The lower bound of the random range.\nhigh (int): The upper bound of the random range.\n\nReturns:\nint: A random integer.", "source": "juraj-google-style"}
{"code": "def get_google_drive_folder_location():\n    gdrive_db_path = 'Library/Application Support/Google/Drive/sync_config.db'\n    yosemite_gdrive_db_path = 'Library/Application Support/Google/Drive/user_default/sync_config.db'\n    yosemite_gdrive_db = os.path.join(os.environ['HOME'], yosemite_gdrive_db_path)\n    if os.path.isfile(yosemite_gdrive_db):\n        gdrive_db_path = yosemite_gdrive_db\n    googledrive_home = None\n    gdrive_db = os.path.join(os.environ['HOME'], gdrive_db_path)\n    if os.path.isfile(gdrive_db):\n        con = sqlite3.connect(gdrive_db)\n        if con:\n            cur = con.cursor()\n            query = \"SELECT data_value FROM data WHERE entry_key = 'local_sync_root_path';\"\n            cur.execute(query)\n            data = cur.fetchone()\n            googledrive_home = str(data[0])\n            con.close()\n    if (not googledrive_home):\n        error('Unable to find your Google Drive install =(')\n    return googledrive_home", "docstring": "Try to locate the Google Drive folder.\n\nReturns:\n(str) Full path to the current Google Drive folder", "source": "codesearchnet"}
{"code": "def set_cellpy_datadir(self, directory=None):\n        \n\n        if directory is None:\n            self.logger.info(\"no directory name given\")\n            return\n        if not os.path.isdir(directory):\n            self.logger.info(\"directory does not exist\")\n            return\n        self.cellpy_datadir = directory", "docstring": "Set the directory containing .hdf5-files.\n\nUsed for setting directory for looking for hdf5-files.\nA valid directory name is required.\n\nArgs:\ndirectory (str): path to hdf5-directory\n\nExample:\n>>> d = CellpyData()\n>>> directory = \"MyData/HDF5\"\n>>> d.set_raw_datadir(directory)", "source": "juraj-google-style"}
{"code": "def DeregisterPlugin(cls, plugin_class):\n    name = getattr(plugin_class, 'ARTIFACT_DEFINITION_NAME', plugin_class.__name__)\n    name = name.lower()\n    if (name not in cls._plugins):\n        raise KeyError('Artifact plugin class not set for name: {0:s}.'.format(name))\n    del cls._plugins[name]\n    if (name in cls._file_system_plugins):\n        del cls._file_system_plugins[name]\n    if (name in cls._knowledge_base_plugins):\n        del cls._knowledge_base_plugins[name]\n    if (name in cls._windows_registry_plugins):\n        del cls._windows_registry_plugins[name]", "docstring": "Deregisters an preprocess plugin class.\n\nArgs:\nplugin_class (type): preprocess plugin class.\n\nRaises:\nKeyError: if plugin class is not set for the corresponding name.\nTypeError: if the source type of the plugin class is not supported.", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n        \n        self.Lookup = channel.unary_unary(\n            \"/google.datastore.v1.Datastore/Lookup\",\n            request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.LookupRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.LookupResponse.FromString,\n        )\n        self.RunQuery = channel.unary_unary(\n            \"/google.datastore.v1.Datastore/RunQuery\",\n            request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.RunQueryRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.RunQueryResponse.FromString,\n        )\n        self.BeginTransaction = channel.unary_unary(\n            \"/google.datastore.v1.Datastore/BeginTransaction\",\n            request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.BeginTransactionRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.BeginTransactionResponse.FromString,\n        )\n        self.Commit = channel.unary_unary(\n            \"/google.datastore.v1.Datastore/Commit\",\n            request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.CommitRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.CommitResponse.FromString,\n        )\n        self.Rollback = channel.unary_unary(\n            \"/google.datastore.v1.Datastore/Rollback\",\n            request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.RollbackRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.RollbackResponse.FromString,\n        )\n        self.AllocateIds = channel.unary_unary(\n            \"/google.datastore.v1.Datastore/AllocateIds\",\n            request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.AllocateIdsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.AllocateIdsResponse.FromString,\n        )\n        self.ReserveIds = channel.unary_unary(\n            \"/google.datastore.v1.Datastore/ReserveIds\",\n            request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.ReserveIdsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.ReserveIdsResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def UpdateFrom(self, src):\n    if (not isinstance(src, PathInfo)):\n        raise TypeError(('expected `%s` but got `%s`' % (PathInfo, type(src))))\n    if (self.path_type != src.path_type):\n        raise ValueError(('src [%s] does not represent the same path type as self [%s]' % (src.path_type, self.path_type)))\n    if (self.components != src.components):\n        raise ValueError(('src [%s] does not represent the same path as self [%s]' % (src.components, self.components)))\n    if src.HasField('stat_entry'):\n        self.stat_entry = src.stat_entry\n    self.last_stat_entry_timestamp = max(self.last_stat_entry_timestamp, src.last_stat_entry_timestamp)\n    self.directory = (self.directory or src.directory)", "docstring": "Merge path info records.\n\nMerges src into self.\nArgs:\nsrc: An rdfvalues.objects.PathInfo record, will be merged into self.\n\nRaises:\nValueError: If src does not represent the same path.", "source": "codesearchnet"}
{"code": "def __init__(self, callback):\n    \n    super(RPCServer, self).__init__()\n    self._callback = callback", "docstring": "Initializes the RPC server object.\n\nArgs:\ncallback (function): callback to invoke on get status RPC request.", "source": "juraj-google-style"}
{"code": "def _process_book(link):\n    \n    \n    data = DOWNER.download(link)\n    dom = dhtmlparser.parseString(\n        utils.handle_encodnig(data)\n    )\n    dhtmlparser.makeDoubleLinked(dom)\n\n    \n    \n    price = None\n    try:\n        price = _strip_content(zapi.get_price(dom))\n    except UserWarning:\n        price = dom.find(\"p\", {\"class\": \"vaseCena\"})\n\n        if price:\n            price = price[0].getContent().replace(\"&nbsp;\", \" \")\n            price = filter(lambda x: x.isdigit(), price.strip())\n\n            if price:\n                price = price[0] + \"kč\"\n            else:\n                price = \"-1\"\n        else:\n            price = \"-1\"\n\n    \n    pub = Publication(\n        title=_strip_content(zapi.get_title(dom)),\n        authors=_parse_authors(zapi.get_author(dom)),\n        price=price,\n        publisher=_strip_content(zapi.get_publisher(dom))\n    )\n\n    \n    pub.optionals.URL = link\n    pub.optionals.pages = _strip_content(zapi.get_pages(dom))\n    pub.optionals.pub_date = _strip_content(zapi.get_pub_date(dom))\n    pub.optionals.ISBN = _strip_content(zapi.get_ISBN(dom))\n    pub.optionals.binding = _strip_content(zapi.get_binding(dom))\n\n    \n    if pub.title.startswith(\"E-kniha:\"):\n        pub.title = pub.title.replace(\"E-kniha:\", \"\", 1).strip()\n        pub.optionals.is_ebook = True\n\n    if pub.optionals.ISBN:\n        if \" \" in pub.optionals.ISBN:\n            pub.optionals.ISBN = pub.optionals.ISBN.split(\" \")[0]\n\n        if \"(\" in pub.optionals.ISBN:\n            pub.optionals.ISBN = pub.optionals.ISBN.split(\"(\")[0]\n\n    return pub", "docstring": "Download and parse available informations about book from the publishers\nwebpages.\n\nArgs:\nlink (str): URL of the book at the publishers webpages.\n\nReturns:\nobj: :class:`.Publication` instance with book details.", "source": "juraj-google-style"}
{"code": "def __init__(self, file_path_regex=None, log_format_regex=None, top_dir=None):\n        \n        if file_path_regex is not None:\n            self.file_path_regex = file_path_regex\n        if log_format_regex is not None:\n            self.log_format_regex = log_format_regex\n        if top_dir is not None:\n            self.top_dir = top_dir\n        self._content = None", "docstring": "Init method.\n\nArgs:\nfile_path_regex (regex): the regex to find the log files.\nlog_format_regex (regex): the regex to parse the log files.\ntop_dir (str): the path to the root directory containing the logs.", "source": "juraj-google-style"}
{"code": "def fit(self, X):\n        \n        if isinstance(X, (pd.Series, pd.DataFrame)):\n            self.name = X.name\n\n        self.constant_value = self._get_constant_value(X)\n\n        if self.constant_value is None:\n            self.mean = np.mean(X)\n            self.std = np.std(X)\n\n        else:\n            self._replace_constant_methods()\n\n        self.fitted = True", "docstring": "Fit the model.\n\nArguments:\nX: `np.ndarray` of shape (n, 1).\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def RegisterMessage(self, message):\n    desc = message.DESCRIPTOR\n    self._symbols[desc.full_name] = message\n    if (desc.file.name not in self._symbols_by_file):\n        self._symbols_by_file[desc.file.name] = {}\n    self._symbols_by_file[desc.file.name][desc.full_name] = message\n    self.pool.AddDescriptor(desc)\n    return message", "docstring": "Registers the given message type in the local database.\n\nArgs:\nmessage: a message.Message, to be registered.\n\nReturns:\nThe provided message.", "source": "codesearchnet"}
{"code": "def run_program(self, name, arguments=[], timeout=30, exclusive=False):\n    logger.debug('Running program ...')\n    if exclusive:\n        kill_longrunning(self.config)\n    prog = RunningProgram(self, name, arguments, timeout)\n    return prog.expect_end()", "docstring": "Runs a program in the working directory to completion.\n\nArgs:\nname (str):        The name of the program to be executed.\narguments (tuple): Command-line arguments for the program.\ntimeout (int):     The timeout for execution.\nexclusive (bool):  Prevent parallel validation runs on the\ntest machines, e.g. when doing performance\nmeasurements for submitted code.\n\nReturns:\ntuple: A tuple of the exit code, as reported by the operating system,\nand the output produced during the execution.", "source": "codesearchnet"}
{"code": "def util_granulate_time_series(time_series, scale):\n    \n    n = len(time_series)\n    b = int(np.fix(n / scale))\n    temp = np.reshape(time_series[0:b*scale], (b, scale))\n    cts = np.mean(temp, axis = 1)\n    return cts", "docstring": "Extract coarse-grained time series\n\nArgs:\ntime_series: Time series\nscale: Scale factor\n\nReturns:\nVector of coarse-grained time series with given scale factor", "source": "juraj-google-style"}
{"code": "def __init__(self, optimizer, num_steps=10, unroll_loop=False, scope='multi-step', summary_labels=()):\n        \n        assert isinstance(num_steps, int) and num_steps > 0\n        self.num_steps = num_steps\n\n        assert isinstance(unroll_loop, bool)\n        self.unroll_loop = unroll_loop\n\n        super(MultiStep, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new multi-step meta optimizer instance.\n\nArgs:\noptimizer: The optimizer which is modified by this meta optimizer.\nnum_steps: Number of optimization steps to perform.", "source": "juraj-google-style"}
{"code": "def _apply_filters_to_first_location_occurrence(match_traversal, location_to_filters, already_filtered_locations):\n    new_match_traversal = []\n    newly_filtered_locations = set()\n    for match_step in match_traversal:\n        current_location = match_step.as_block.location\n        if (current_location in newly_filtered_locations):\n            raise AssertionError(u'The same location {} was encountered twice in a single match traversal: {}. This should never happen.'.format(current_location, match_traversal))\n        if all(((current_location in location_to_filters), (current_location not in already_filtered_locations))):\n            where_block = Filter(_filter_list_to_conjunction_expression(location_to_filters[current_location]))\n            newly_filtered_locations.add(current_location)\n        else:\n            where_block = None\n        new_match_step = MatchStep(root_block=match_step.root_block, coerce_type_block=match_step.coerce_type_block, where_block=where_block, as_block=match_step.as_block)\n        new_match_traversal.append(new_match_step)\n    return (new_match_traversal, newly_filtered_locations)", "docstring": "Apply all filters for a specific location into its first occurrence in a given traversal.\n\nFor each location in the given match traversal,\nconstruct a conjunction of all filters applied to that location,\nand apply the resulting Filter to the first instance of the location.\n\nArgs:\nmatch_traversal: list of MatchStep objects to be lowered\nlocation_to_filters: dict mapping each location in the MatchQuery which contains\nthe given match traversal to a list of filters applied at that location\nalready_filtered_locations: set of locations that have already had their filters applied\n\nReturns:\nnew list of MatchStep objects with all filters for any given location composed into\na single filter which is applied to the first instance of that location", "source": "codesearchnet"}
{"code": "def movies_upcoming(self, **kwargs):\n        \n        path = self._get_path('movies_upcoming')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Gets the upcoming movies from the API.\n\nArgs:\npage_limit (optional): number of movies to show per page, default=16\npage (optional): results page number, default=1\ncountry (optional): localized data for selected country, default=\"us\"\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def format_usage(doc, width=None):\n    sections = doc.replace('\\r', '').split('\\n\\n')\n    width = (width or get_terminal_size().columns or 80)\n    return '\\n\\n'.join((_wrap_section(s.strip(), width) for s in sections))", "docstring": "Format the docstring for display to the user.\n\nArgs:\ndoc: The docstring to reformat for display.\n\nReturns:\nThe docstring formatted to parse and display to the user. This includes\ndedenting, rewrapping, and translating the docstring if necessary.", "source": "codesearchnet"}
{"code": "def stage_out(self, file, executor):\n    if ((file.scheme == 'http') or (file.scheme == 'https')):\n        raise Exception('HTTP/HTTPS file staging out is not supported')\n    elif (file.scheme == 'ftp'):\n        raise Exception('FTP file staging out is not supported')\n    elif (file.scheme == 'globus'):\n        globus_ep = self._get_globus_endpoint(executor)\n        stage_out_app = self._globus_stage_out_app()\n        return stage_out_app(globus_ep, inputs=[file])\n    else:\n        raise Exception('Staging out with unknown file scheme {} is not supported'.format(file.scheme))", "docstring": "Transport the file from the local filesystem to the remote Globus endpoint.\n\nThis function returns a DataFuture.\n\nArgs:\n- self\n- file (File) - file to stage out\n- executor (str) - Which executor the file is going to be staged out from.\nIf the executor argument is not specified for a file\nwith the 'globus' scheme, the file will be staged in to\nthe first executor with the \"globus\" key in a config.", "source": "codesearchnet"}
{"code": "def normalize_url(base_url, rel_url):\n    \n    if not rel_url:\n        return None\n\n    if not is_absolute_url(rel_url):\n        rel_url = rel_url.replace(\"../\", \"/\")\n\n        if (not base_url.endswith(\"/\")) and (not rel_url.startswith(\"/\")):\n            return base_url + \"/\" + rel_url.replace(\"../\", \"/\")\n\n        return base_url + rel_url.replace(\"../\", \"/\")\n\n    return rel_url", "docstring": "Normalize the `url` - from relative, create absolute URL.\n\nArgs:\nbase_url (str): Domain with ``protocol://`` string\nrel_url (str): Relative or absolute url.\n\nReturns:\nstr/None: Normalized URL or None if `url` is blank.", "source": "juraj-google-style"}
{"code": "def mobility(sdat, tstart=None, tend=None):\n    \n    tseries = sdat.tseries_between(tstart, tend)\n    steps = sdat.steps[tseries.index[0]:tseries.index[-1]]\n    time = []\n    mob = []\n    for step in steps.filter(rprof=True):\n        time.append(step.timeinfo['t'])\n        mob.append(step.rprof.iloc[-1].loc['vrms'] / step.timeinfo['vrms'])\n    return np.array(mob), np.array(time)", "docstring": "Plates mobility.\n\nCompute the ratio vsurf / vrms.\n\nArgs:\nsdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.\ntstart (float): time at which the computation should start. Use the\nbeginning of the time series data if set to None.\ntend (float): time at which the computation should end. Use the\nend of the time series data if set to None.\nReturns:\ntuple of :class:`numpy.array`: mobility and time arrays.", "source": "juraj-google-style"}
{"code": "def request(self, request):\n    url = '{}{}'.format(self._base_url, request.path)\n    timeout = self.poll_timeout\n    if (request.stream is True):\n        timeout = self.stream_timeout\n    try:\n        http_response = self._session.request(request.method, url, headers=self._headers, params=request.params, data=request.body, stream=request.stream, timeout=timeout)\n    except requests.exceptions.ConnectionError:\n        raise V20ConnectionError(url)\n    except requests.exceptions.ConnectTimeout:\n        raise V20Timeout(url, 'connect')\n    except requests.exceptions.ReadTimeout:\n        raise V20Timeout(url, 'read')\n    request.headers = http_response.request.headers\n    response = Response(request, request.method, http_response.url, http_response.status_code, http_response.reason, http_response.headers)\n    if request.stream:\n        response.set_line_parser(request.line_parser)\n        response.set_lines(http_response.iter_lines(self.stream_chunk_size))\n    else:\n        response.set_raw_body(http_response.text)\n    return response", "docstring": "Perform an HTTP request through the context\n\nArgs:\nrequest: A v20.request.Request object\n\nReturns:\nA v20.response.Response object", "source": "codesearchnet"}
{"code": "def _replace_args_with_defaults(self, _args=None, **kwargs):\n    \n    if _args is None:\n      _args = six.iterkeys(kwargs)\n    my_defaults = self.defaults\n    for k in _args:\n      if k not in kwargs:\n        if k in my_defaults:\n          kwargs[k] = my_defaults[k]\n        elif k in _defaults:\n          kwargs[k] = _defaults[k]\n    return kwargs", "docstring": "Internal method to fill absent values in the kwargs with the defaults.\n\nArgs:\n_args: A list of arguments to replace if a subset is required.  Name\nchosen to prevent conflicts with kwargs.\n**kwargs: The arguments to replace with defaults.\nReturns:\nA map with the same fields as kwargs, but absent values are filled with\ndefaults.", "source": "juraj-google-style"}
{"code": "def parse_environment_file(filename, world_size=(60, 60)):\n    \n\n    infile = open(filename)\n    lines = infile.readlines()\n    infile.close()\n\n    tasks = []\n\n    \n    res_order = []\n    res_dict = {}\n    for line in lines:\n        if line.startswith(\"GRADIENT_RESOURCE\"):\n            name, cells = parse_gradient(line, world_size)\n        elif line.startswith(\"CELL\"):\n            name, cells = parse_cell(line, world_size)\n        elif line.startswith(\"REACTION\"):\n            task = parse_reaction(line)\n            if task not in tasks:\n                tasks.append(task)\n        else:\n            continue\n\n        dict_increment(res_dict, name, cells)\n        if name not in res_order:\n            res_order.append(name)\n\n    \n    grid = make_niche_grid(res_dict, world_size)\n\n    return EnvironmentFile(grid, res_order, world_size, filename, tasks)", "docstring": "Extract information about spatial resources from an environment file.\n\nArguments:\nfilename - a string representing the path to the environment file.\nworld_size - a tuple representing the x and y coordinates of the world.\n(default: 60x60)\n\nReturns a list of lists of sets indicating the set of resources\navailable at each x,y location in the Avida grid.", "source": "juraj-google-style"}
{"code": "def add(self, layers, above=None, below=None):\n        \n\n        def add_named_layer(name, image):\n            image = self.get_image(image, output='vector')\n            if above is not None:\n                image[image < above] = 0.\n            if below is not None:\n                image[image > below] = 0.\n            self.layers[name] = image\n            self.stack.append(name)\n\n        if isinstance(layers, dict):\n            for (name, image) in layers.items():\n                add_named_layer(name, image)\n\n        else:\n            if not isinstance(layers, list):\n                layers = [layers]\n            for image in layers:\n                name = 'layer_%d' % len(self.stack)\n                add_named_layer(name, image)\n\n        self.set_mask()", "docstring": "Add one or more layers to the stack of masking layers.\nArgs:\nlayers: A string, NiBabel image, list, or dict. If anything other\nthan a dict is passed, assigns sequential layer names based on\nthe current position in stack; if a dict, uses key as the name\nand value as the mask image.", "source": "juraj-google-style"}
{"code": "def options(self):\n    if context.executing_eagerly():\n        options = self._options_tensor_to_options(self._options())\n        options._set_mutable(False)\n        return options\n    warnings.warn('To make it possible to preserve tf.data options across serialization boundaries, their implementation has moved to be part of the TensorFlow graph. As a consequence, the options value is in general no longer known at graph construction time. Invoking this method in graph mode retains the legacy behavior of the original implementation, but note that the returned value might not reflect the actual value of the options.')\n    return self._options_attr", "docstring": "Returns the options for this dataset and its inputs.\n\nReturns:\nA `tf.data.Options` object representing the dataset options.", "source": "github-repos"}
{"code": "def create_html_from_fragment(tag):\n    \n\n    try:\n        assert isinstance(tag, bs4.element.Tag)\n    except AssertionError:\n        raise TypeError\n    try:\n        assert tag.find_all('body') == []\n    except AssertionError:\n        raise ValueError\n\n    soup = BeautifulSoup('<html><head></head><body></body></html>', 'html.parser')\n    soup.body.append(tag)\n    return soup", "docstring": "Creates full html tree from a fragment. Assumes that tag should be wrapped in a body and is currently not\n\nArgs:\ntag: a bs4.element.Tag\n\nReturns:\"\nbs4.element.Tag: A bs4 tag representing a full html document", "source": "juraj-google-style"}
{"code": "def claim(self, file_readers):\n    (prefix_to_readers, filter_files, unclaimed_set) = self._find_varscan_files(file_readers)\n    prefix_by_patients = self._split_prefix_by_patient(prefix_to_readers)\n    self._validate_vcf_readers(prefix_by_patients)\n    vcf_hc_pairs = self._pair_files(prefix_to_readers, filter_files)\n    self._validate_vcf_hc_pairs(vcf_hc_pairs)\n    vcf_readers = self._create_vcf_readers(vcf_hc_pairs)\n    return (list(unclaimed_set), vcf_readers)", "docstring": "Recognizes and claims VarScan VCFs form the set of all input VCFs.\n\nEach defined caller has a chance to evaluate and claim all the incoming\nfiles as something that it can process. Since VarScan can claim\nhigh-confidence files as well, this process is significantly more\ncomplex than for other callers.\n\nArgs:\nfile_readers: the collection of currently unclaimed files\n\nReturns:\nA tuple of unclaimed readers and VarScanVcfReaders.", "source": "codesearchnet"}
{"code": "def generate_tuple_zip(self, token_list, n=2):\n        \n        return zip(*[token_list[i:] for i in range(n)])", "docstring": "Generate the N-gram.\n\nArgs:\ntoken_list:     The list of tokens.\nn               N\n\nReturns:\nzip of Tuple(N-gram)", "source": "juraj-google-style"}
{"code": "def add_to_graph(self, g=None, overwrite=False):\n    if not context.executing_eagerly() and (not g):\n        g = ops.get_default_graph()\n    if g is not None:\n        g._add_function_recursive(self._delayed_rewrite_functions.forward())", "docstring": "Registers the function, adds it to the graph g or default graph.\n\nArgs:\ng: If specified, registers the function with this graph. Defaults to the\ncurrent context (either the default graph or the eager context).\noverwrite: A bool. If True, its forward function will overwrite\nany existing function of the same signature name in the graph `g`.", "source": "github-repos"}
{"code": "def concat_video(video_list,\n                 out_file,\n                 vcodec=None,\n                 acodec=None,\n                 log_level='info',\n                 print_cmd=False,\n                 **kwargs):\n    \n    _, tmp_filename = tempfile.mkstemp(suffix='.txt', text=True)\n    with open(tmp_filename, 'w') as f:\n        for filename in video_list:\n            f.write('file {}\\n'.format(osp.abspath(filename)))\n    options = {'log_level': log_level}\n    if vcodec is None:\n        options['vcodec'] = 'copy'\n    if acodec is None:\n        options['acodec'] = 'copy'\n    convert_video(\n        tmp_filename,\n        out_file,\n        print_cmd,\n        pre_options='-f concat -safe 0',\n        **options)\n    os.remove(tmp_filename)", "docstring": "Concatenate multiple videos into a single one.\n\nArgs:\nvideo_list (list): A list of video filenames\nout_file (str): Output video filename\nvcodec (None or str): Output video codec, None for unchanged\nacodec (None or str): Output audio codec, None for unchanged\nlog_level (str): Logging level of ffmpeg.\nprint_cmd (bool): Whether to print the final ffmpeg command.", "source": "juraj-google-style"}
{"code": "def _map_column_names_to_types(self, row_type):\n    try:\n        if not isinstance(row_type, RowTypeConstraint):\n            row_type = RowTypeConstraint.from_user_type(row_type)\n        inferred_types = {name: typ for name, typ in row_type._fields}\n        for k, t in inferred_types.items():\n            if t in _primitive_types_to_typing_container_type:\n                inferred_types[k] = _primitive_types_to_typing_container_type[t]\n        for name, typ in inferred_types.items():\n            if isinstance(typ, np.dtype):\n                inferred_types[name] = typ.type\n        return inferred_types\n    except:\n        return {}", "docstring": "Return a dictionary of column names and types.\nArgs:\nelement_type: A type of the element. This could be a NamedTuple or a Row.\nReturns:\nA dictionary of column names and types.", "source": "github-repos"}
{"code": "def update_configuration(self, timeout=-1):\n        \n        uri = \"{}/configuration\".format(self.data['uri'])\n        return self.update_with_zero_body(uri=uri, timeout=timeout)", "docstring": "Reapplies the appliance's configuration on the enclosure. This includes running the same configure steps\nthat were performed as part of the enclosure add.\n\nArgs:\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\nEnclosure", "source": "juraj-google-style"}
{"code": "def update_configuration(self, timeout=(- 1)):\n    uri = '{}/configuration'.format(self.data['uri'])\n    return self.update_with_zero_body(uri=uri, timeout=timeout)", "docstring": "Reapplies the appliance's configuration on the enclosure. This includes running the same configure steps\nthat were performed as part of the enclosure add.\n\nArgs:\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\nEnclosure", "source": "codesearchnet"}
{"code": "def remove_keywords_from_list(self, keyword_list):\n    if (not isinstance(keyword_list, list)):\n        raise AttributeError('keyword_list should be a list')\n    for keyword in keyword_list:\n        self.remove_keyword(keyword)", "docstring": "To remove keywords present in list\n\nArgs:\nkeyword_list (list(str)): List of keywords to remove\n\nExamples:\n>>> keyword_processor.remove_keywords_from_list([\"java\", \"python\"]})\nRaises:\nAttributeError: If `keyword_list` is not a list.", "source": "codesearchnet"}
{"code": "def generate(self, text):\n    if (not text):\n        raise Exception('No text to speak')\n    if (len(text) >= self.MAX_CHARS):\n        raise Exception('Number of characters must be less than 2000')\n    params = self.__params.copy()\n    params['text'] = text\n    self._data = requests.get(self.TTS_URL, params=params, stream=False).iter_content()", "docstring": "Try to get the generated file.\n\nArgs:\ntext: The text that you want to generate.", "source": "codesearchnet"}
{"code": "def check(self, orb):\n    return ((self.prev is not None) and (np.sign(self(orb)) != np.sign(self(self.prev))))", "docstring": "Method that check whether or not the listener is triggered\n\nArgs:\norb (Orbit):\n\nReturn:\nbool: True if there is a zero-crossing for the parameter watched by the listener", "source": "codesearchnet"}
{"code": "def _keys(self, pattern):\n    result = []\n    for client in self.redis_clients:\n        result.extend(list(client.scan_iter(match=pattern)))\n    return result", "docstring": "Execute the KEYS command on all Redis shards.\n\nArgs:\npattern: The KEYS pattern to query.\n\nReturns:\nThe concatenated list of results from all shards.", "source": "codesearchnet"}
{"code": "def build_pipeline(cls, project, zones, min_cores, min_ram, disk_size, boot_disk_size, preemptible, accelerator_type, accelerator_count, image, script_name, envs, inputs, outputs, pipeline_name):\n    if (min_cores is None):\n        min_cores = job_model.DEFAULT_MIN_CORES\n    if (min_ram is None):\n        min_ram = job_model.DEFAULT_MIN_RAM\n    if (disk_size is None):\n        disk_size = job_model.DEFAULT_DISK_SIZE\n    if (boot_disk_size is None):\n        boot_disk_size = job_model.DEFAULT_BOOT_DISK_SIZE\n    if (preemptible is None):\n        preemptible = job_model.DEFAULT_PREEMPTIBLE\n    docker_command = cls._build_pipeline_docker_command(script_name, inputs, outputs, envs)\n    input_envs = ([{'name': SCRIPT_VARNAME}] + [{'name': env.name} for env in envs if env.value])\n    input_files = [cls._build_pipeline_input_file_param(var.name, var.docker_path) for var in inputs if ((not var.recursive) and var.value)]\n    output_files = [cls._build_pipeline_file_param(var.name, var.docker_path) for var in outputs if ((not var.recursive) and var.value)]\n    return {'ephemeralPipeline': {'projectId': project, 'name': pipeline_name, 'resources': {'minimumCpuCores': min_cores, 'minimumRamGb': min_ram, 'bootDiskSizeGb': boot_disk_size, 'preemptible': preemptible, 'zones': google_base.get_zones(zones), 'acceleratorType': accelerator_type, 'acceleratorCount': accelerator_count, 'disks': [{'name': 'datadisk', 'autoDelete': True, 'sizeGb': disk_size, 'mountPoint': providers_util.DATA_MOUNT_POINT}]}, 'inputParameters': (input_envs + input_files), 'outputParameters': output_files, 'docker': {'imageName': image, 'cmd': docker_command}}}", "docstring": "Builds a pipeline configuration for execution.\n\nArgs:\nproject: string name of project.\nzones: list of zone names for jobs to be run at.\nmin_cores: int number of CPU cores required per job.\nmin_ram: int GB of RAM required per job.\ndisk_size: int GB of disk to attach under /mnt/data.\nboot_disk_size: int GB of disk for boot.\npreemptible: use a preemptible VM for the job\naccelerator_type: string GCE defined accelerator type.\naccelerator_count: int number of accelerators of the specified type to\nattach.\nimage: string Docker image name in which to run.\nscript_name: file name of the script to run.\nenvs: list of EnvParam objects specifying environment variables to set\nwithin each job.\ninputs: list of FileParam objects specifying input variables to set\nwithin each job.\noutputs: list of FileParam objects specifying output variables to set\nwithin each job.\npipeline_name: string name of pipeline.\n\nReturns:\nA nested dictionary with one entry under the key ephemeralPipeline\ncontaining the pipeline configuration.", "source": "codesearchnet"}
{"code": "def _update_repo(repo_config, store, tags_only):\n    repo_path = store.clone(repo_config['repo'], repo_config['rev'])\n    cmd_output('git', 'fetch', cwd=repo_path)\n    tag_cmd = ('git', 'describe', 'origin/master', '--tags')\n    if tags_only:\n        tag_cmd += ('--abbrev=0',)\n    else:\n        tag_cmd += ('--exact',)\n    try:\n        rev = cmd_output(*tag_cmd, cwd=repo_path)[1].strip()\n    except CalledProcessError:\n        tag_cmd = ('git', 'rev-parse', 'origin/master')\n        rev = cmd_output(*tag_cmd, cwd=repo_path)[1].strip()\n    if (rev == repo_config['rev']):\n        return repo_config\n    try:\n        path = store.clone(repo_config['repo'], rev)\n        manifest = load_manifest(os.path.join(path, C.MANIFEST_FILE))\n    except InvalidManifestError as e:\n        raise RepositoryCannotBeUpdatedError(six.text_type(e))\n    hooks = {hook['id'] for hook in repo_config['hooks']}\n    hooks_missing = (hooks - {hook['id'] for hook in manifest})\n    if hooks_missing:\n        raise RepositoryCannotBeUpdatedError('Cannot update because the tip of master is missing these hooks:\\n{}'.format(', '.join(sorted(hooks_missing))))\n    new_config = repo_config.copy()\n    new_config['rev'] = rev\n    return new_config", "docstring": "Updates a repository to the tip of `master`.  If the repository cannot\nbe updated because a hook that is configured does not exist in `master`,\nthis raises a RepositoryCannotBeUpdatedError\n\nArgs:\nrepo_config - A config for a repository", "source": "codesearchnet"}
{"code": "def count(self):\n    e = ((self.alpha * float((self.m ** 2))) / np.sum((2.0 ** (- self.reg))))\n    if (e <= ((5.0 / 2.0) * self.m)):\n        num_zero = (self.m - np.count_nonzero(self.reg))\n        return self._linearcounting(num_zero)\n    if (e <= ((1.0 / 30.0) * (1 << 32))):\n        return e\n    return self._largerange_correction(e)", "docstring": "Estimate the cardinality of the data values seen so far.\n\nReturns:\nint: The estimated cardinality.", "source": "codesearchnet"}
{"code": "def log_variable_sizes(var_list, tag, verbose=True, mesh_to_impl=None):\n  \n  if not var_list:\n    return\n\n  name_to_var = {v.name: v for v in var_list}\n  total_size = 0\n  total_slice_size = 0\n  for v_name in sorted(list(name_to_var)):\n    v = name_to_var[v_name]\n    v_size = v.shape.size\n    if mesh_to_impl is not None:\n      slice_size = mesh_to_impl[v.mesh].slice_size(v.shape)\n    else:\n      slice_size = 0\n    total_slice_size += slice_size\n    if verbose:\n      tf.logging.info(\n          \"Variable %s size %s slice_size %s %s\",\n          v.name.ljust(60),\n          str(v_size).ljust(12),\n          str(slice_size).ljust(12),\n          str(v.shape).ljust(60))\n      if isinstance(v, StackedVariable):\n        for n in v.original_names:\n          tf.logging.info(\"    \" + n)\n    total_size += v_size\n  tf.logging.info(\"%s count: %s  Total size: %s  Total slice_size: %s\",\n                  tag.ljust(30), str(len(var_list)).ljust(6),\n                  str(total_size).ljust(15),\n                  str(total_slice_size).ljust(15))", "docstring": "Log the sizes and shapes of variables, and the total size.\n\nArgs:\nvar_list: a list of variables; defaults to trainable_variables\ntag: a string; defaults to \"Trainable Variables\"\nverbose: bool, if True, log every weight; otherwise, log total size only.\nmesh_to_impl: an optional map from Mesh to MeshImpl", "source": "juraj-google-style"}
{"code": "def list_attributes(self, name):\n    result = self.client.service.getListAttributes(name, self.proxy_id)\n    if (isinstance(result, list) and (len(result) == 1)):\n        return result[0]\n    return result", "docstring": "Look up the attributes of a list.\n\nArgs:\nname (str): The name of the list\n\nReturns:\ndict: attributes of the list", "source": "codesearchnet"}
{"code": "def resize_attention_map(attentions, height, width, align_corners=False):\n    scale = (height * width \n    if height > width:\n        feat_width = int(np.round(width / scale))\n        feat_height = attentions.shape[2] \n    else:\n        feat_height = int(np.round(height / scale))\n        feat_width = attentions.shape[2] \n    batch_size = attentions.shape[0]\n    groups = attentions.shape[1]\n    attentions = attentions.reshape(batch_size, groups, feat_height, feat_width)\n    attentions = nn.functional.interpolate(attentions, size=(height, width), mode='bilinear', align_corners=align_corners)\n    return attentions", "docstring": "Args:\nattentions (`torch.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width]\nheight (`int`): height of the output attention map\nwidth (`int`): width of the output attention map\nalign_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`.\n\nReturns:\n`torch.Tensor`: resized attention map of shape [batch_size, groups, height, width]", "source": "github-repos"}
{"code": "def submit(self, **kwargs):\n    (verbose, dry_run) = (kwargs.pop('verbose', 0), kwargs.pop('dry_run', False))\n    if (not self.flows):\n        print('Cannot submit an empty list of flows!')\n        return 0\n    if hasattr(self, 'qjob'):\n        print(('BatchLauncher has qjob %s' % self.qjob))\n        if (not self.batch_pid_file.exists):\n            print(\"It seems that the batch script reached the end. Wont' try to submit it again\")\n            return 0\n        msg = 'Here I have to understand if qjob is in the queue. but I need an abstract API that can retrieve info from the queue id'\n        raise RuntimeError(msg)\n        if self.qjob.in_status('Running|Queued'):\n            print('Job is still running. Cannot submit')\n        else:\n            del self.qjob\n    (script, num_flows_inbatch) = self._get_script_nflows()\n    if (num_flows_inbatch == 0):\n        print(\"All flows have reached all_ok! Batch script won't be submitted\")\n        return 0\n    if verbose:\n        print('*** submission script ***')\n        print(script)\n    self.script_file.write(script)\n    self.script_file.chmod(480)\n    for flow in self.flows:\n        flow.build_and_pickle_dump()\n    if dry_run:\n        return (- 1)\n    print(('Will submit %s flows in batch script' % len(self.flows)))\n    (self.qjob, process) = self.qadapter.submit_to_queue(self.script_file.path)\n    self.batch_pidfile.write(str(self.qjob.qid))\n    self.pickle_dump()\n    process.wait()\n    return dict2namedtuple(retcode=process.returncode, qjob=self.qjob, num_flows_inbatch=num_flows_inbatch)", "docstring": "Submit a job script that will run the schedulers with `abirun.py`.\n\nArgs:\nverbose: Verbosity level\ndry_run: Don't submit the script if dry_run. Default: False\n\nReturns:\nnamedtuple with attributes:\nretcode: Return code as returned by the submission script.\nqjob: :class:`QueueJob` object.\nnum_flows_inbatch: Number of flows executed by the batch script\n\nReturn code of the job script submission.", "source": "codesearchnet"}
{"code": "def indicator(self, indicator_type=None, owner=None, **kwargs):\n        \n        if not indicator_type:\n            return Indicator(self.tcex, None, owner=owner, **kwargs)\n\n        upper_indicator_type = indicator_type.upper()\n\n        indicator = None\n        if upper_indicator_type == 'ADDRESS':\n            indicator = Address(self.tcex, kwargs.pop('ip', None), owner=owner, **kwargs)\n        elif upper_indicator_type == 'EMAILADDRESS':\n            indicator = EmailAddress(self.tcex, kwargs.pop('address', None), owner=owner, **kwargs)\n        elif upper_indicator_type == 'FILE':\n            indicator = File(self.tcex, **kwargs)\n        elif upper_indicator_type == 'HOST':\n            indicator = Host(self.tcex, kwargs.pop('hostname', None), owner=owner, **kwargs)\n        elif upper_indicator_type == 'URL':\n            indicator = URL(self.tcex, kwargs.pop('url', None), owner=owner, **kwargs)\n        else:\n            try:\n                if upper_indicator_type in self._custom_indicator_classes.keys():\n                    custom_indicator_details = self._custom_indicator_classes[indicator_type]\n                    value_fields = custom_indicator_details.get('value_fields')\n                    c = getattr(module, custom_indicator_details.get('branch'))\n                    if len(value_fields) == 1:\n                        indicator = c(value_fields[0], owner=owner, **kwargs)\n                    elif len(value_fields) == 2:\n                        indicator = c(value_fields[0], value_fields[1], owner=owner, **kwargs)\n                    elif len(value_fields) == 3:\n                        indicator = c(value_fields[0], value_fields[2], owner=owner, **kwargs)\n            except Exception:\n                return None\n        return indicator", "docstring": "Create the Indicator TI object.\n\nArgs:\nowner:\nindicator_type:\n**kwargs:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def CopyFromDateTimeString(self, time_string):\n    date_time_values = self._CopyDateTimeFromString(time_string)\n    year = date_time_values.get('year', 0)\n    month = date_time_values.get('month', 0)\n    day_of_month = date_time_values.get('day_of_month', 0)\n    hours = date_time_values.get('hours', 0)\n    minutes = date_time_values.get('minutes', 0)\n    seconds = date_time_values.get('seconds', 0)\n    microseconds = date_time_values.get('microseconds', 0)\n    (milliseconds, _) = divmod(microseconds, definitions.MICROSECONDS_PER_MILLISECOND)\n    if ((year < 1601) or (year > 30827)):\n        raise ValueError('Unsupported year value: {0:d}.'.format(year))\n    self._normalized_timestamp = None\n    self._number_of_seconds = self._GetNumberOfSecondsFromElements(year, month, day_of_month, hours, minutes, seconds)\n    self.year = year\n    self.month = month\n    self.day_of_month = day_of_month\n    self.day_of_week = None\n    self.hours = hours\n    self.minutes = minutes\n    self.seconds = seconds\n    self.milliseconds = milliseconds\n    self.is_local_time = False", "docstring": "Copies a SYSTEMTIME structure from a date and time string.\n\nArgs:\ntime_string (str): date and time value formatted as:\nYYYY-MM-DD hh:mm:ss.######[+-]##:##\n\nWhere # are numeric digits ranging from 0 to 9 and the seconds\nfraction can be either 3 or 6 digits. The time of day, seconds\nfraction and time zone offset are optional. The default time zone\nis UTC.\n\nRaises:\nValueError: if the date string is invalid or not supported.", "source": "codesearchnet"}
{"code": "def unique_flags(items, key=None):\n    len_ = len(items)\n    if (key is None):\n        item_to_index = dict(zip(reversed(items), reversed(range(len_))))\n        indices = item_to_index.values()\n    else:\n        indices = argunique(items, key=key)\n    flags = boolmask(indices, len_)\n    return flags", "docstring": "Returns a list of booleans corresponding to the first instance of each\nunique item.\n\nArgs:\nitems (Sequence): indexable collection of items\n\nkey (Callable, optional): custom normalization function.\nIf specified returns items where `key(item)` is unique.\n\nReturns:\nList[bool] : flags the items that are unique\n\nExample:\n>>> import ubelt as ub\n>>> items = [0, 2, 1, 1, 0, 9, 2]\n>>> flags = unique_flags(items)\n>>> assert flags == [True, True, True, False, False, True, False]\n>>> flags = unique_flags(items, key=lambda x: x % 2 == 0)\n>>> assert flags == [True, False, True, False, False, False, False]", "source": "codesearchnet"}
{"code": "def supervisor(self):\n    supervisor = self._cached_client('supervisor')\n    if (not self._api_supervisor_session):\n        self._api_supervisor_session = self.__create_supervisor_session(supervisor)\n    return supervisor", "docstring": "Return an authenticated connection for use, open new if required.\n\nReturns:\nSupervisorWebService: New or existing session with the Five9\nStatistics API.", "source": "codesearchnet"}
{"code": "def __init__(self, liblightning=None, program=None):\n        \n        self._load(liblightning)\n        self._set_signatures()\n        self._init()\n        self._executable = None", "docstring": "Bindings to GNU Lightning library.\n\nArgs:\nliblightning: Set to override path to liblightning.\nprogram: Set to override argument to init_jit, used with bfd.", "source": "juraj-google-style"}
{"code": "def _checkpoint_adapter(self, path: str):\n    del path\n    return None", "docstring": "Returns a checkpoint adapter for this object.\n\nNeeds to be overridden if the `Trackable` requires adapter at restore.\nOverride this method to define callbacks for checkpoint positions to be\napplied at restore time.\n\nArgs:\npath: Checkpoint path.\nReturns:\nA subclass of AbstractCheckpointAdapter that defines callbacks at restore\nfor this trackable.", "source": "github-repos"}
{"code": "def CacheFileObject(self, path_spec, file_object):\n    self._file_object_cache.CacheObject(path_spec.comparable, file_object)", "docstring": "Caches a file-like object based on a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nfile_object (FileIO): file-like object.", "source": "codesearchnet"}
{"code": "def __init__(self, gans_value_function=None):\n        \n        if gans_value_function is None:\n            gans_value_function = MiniMax()\n\n        if isinstance(gans_value_function, GANsValueFunction) is False:\n            raise TypeError(\"The type of `gans_value_function` must be `GANsValueFunction`.\")\n        self.__gans_value_function = gans_value_function\n        self.__logger = getLogger(\"pygan\")\n\n        super().__init__(gans_value_function)", "docstring": "Init.\n\nArgs:\ngans_value_function:        is-a `GANsValueFunction`.", "source": "juraj-google-style"}
{"code": "def __init__(self, data):\n    \n    if isinstance(data, py2to3.INTEGER_TYPES):\n      self.data = data\n      self.text = '{0:d}'.format(data)\n\n    elif isinstance(data, float):\n      self.data = py2to3.LONG_TYPE(data)\n      self.text = '{0:f}'.format(data)\n\n    elif isinstance(data, py2to3.STRING_TYPES):\n      if isinstance(data, py2to3.BYTES_TYPE):\n        self.text = data.decode('utf-8', errors='ignore')\n      else:\n        self.text = data\n\n      try:\n        self.data = timelib.Timestamp.FromTimeString(self.text)\n      except (ValueError, errors.TimestampError):\n        raise ValueError('Wrongly formatted date string: {0:s}'.format(\n            self.text))\n\n    elif isinstance(data, datetime.datetime):\n      posix_time = int(calendar.timegm(data.utctimetuple()))\n      self.data = (\n          posix_time * definitions.MICROSECONDS_PER_SECOND) + data.microsecond\n      self.text = '{0!s}'.format(data)\n\n    elif isinstance(data, DateCompareObject):\n      self.data = data.data\n      self.text = '{0!s}'.format(data)\n\n    else:\n      raise ValueError('Unsupported type: {0:s}.'.format(type(data)))", "docstring": "Take a date object and use that for comparison.\n\nArgs:\ndata: A string, datetime object or an integer containing the number\nof micro seconds since January 1, 1970, 00:00:00 UTC.\n\nRaises:\nValueError: if the date string is invalid.", "source": "juraj-google-style"}
{"code": "def en020(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `en020`'.format(value))\n    self._en020 = value", "docstring": "Corresponds to IDD Field `en020`\nmean coincident dry-bulb temperature to\nEnthalpy corresponding to 2.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `en020`\nUnit: kJ/kg\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def _VerifyHMAC(self, comms=None):\n    if (self.hmac_type == 'SIMPLE_HMAC'):\n        msg = comms.encrypted\n        digest = comms.hmac\n    elif (self.hmac_type == 'FULL_HMAC'):\n        msg = b''.join([comms.encrypted, comms.encrypted_cipher, comms.encrypted_cipher_metadata, comms.packet_iv.SerializeToString(), struct.pack('<I', comms.api_version)])\n        digest = comms.full_hmac\n    else:\n        raise DecryptionError('HMAC type no supported.')\n    try:\n        rdf_crypto.HMAC(self.cipher.hmac_key).Verify(msg, digest)\n    except rdf_crypto.VerificationError as e:\n        raise DecryptionError(('HMAC verification failed: %s' % e))\n    return True", "docstring": "Verifies the HMAC.\n\nThis method raises a DecryptionError if the received HMAC does not\nverify. If the HMAC verifies correctly, True is returned.\n\nArgs:\ncomms: The comms RdfValue to verify.\n\nRaises:\nDecryptionError: The HMAC did not verify.\n\nReturns:\nTrue", "source": "codesearchnet"}
{"code": "def _cookiecutter_configs_have_changed(template, old_version, new_version):\n    temple.check.is_git_ssh_path(template)\n    repo_path = temple.utils.get_repo_path(template)\n    github_client = temple.utils.GithubClient()\n    api = '/repos/{}/contents/cookiecutter.json'.format(repo_path)\n    old_config_resp = github_client.get(api, params={'ref': old_version})\n    old_config_resp.raise_for_status()\n    new_config_resp = github_client.get(api, params={'ref': new_version})\n    new_config_resp.raise_for_status()\n    return (old_config_resp.json()['content'] != new_config_resp.json()['content'])", "docstring": "Given an old version and new version, check if the cookiecutter.json files have changed\n\nWhen the cookiecutter.json files change, it means the user will need to be prompted for\nnew context\n\nArgs:\ntemplate (str): The git SSH path to the template\nold_version (str): The git SHA of the old version\nnew_version (str): The git SHA of the new version\n\nReturns:\nbool: True if the cookiecutter.json files have been changed in the old and new versions", "source": "codesearchnet"}
{"code": "def inputs(eval_data, data_dir, batch_size):\n  \n  if not eval_data:\n    filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)\n                 for i in xrange(1, 6)]\n    num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN\n  else:\n    filenames = [os.path.join(data_dir, 'test_batch.bin')]\n    num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\n\n  for f in filenames:\n    if not tf.gfile.Exists(f):\n      raise ValueError('Failed to find file: ' + f)\n\n  \n  filename_queue = tf.train.string_input_producer(filenames)\n\n  \n  read_input = read_cifar10(filename_queue)\n  reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n  height = IMAGE_SIZE\n  width = IMAGE_SIZE\n\n  \n  \n  resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,\n                                                         height, width)\n\n  \n  float_image = tf.image.per_image_standardization(resized_image)\n\n  \n  float_image.set_shape([height, width, 3])\n  read_input.label.set_shape([1])\n\n  \n  min_fraction_of_examples_in_queue = 0.4\n  min_queue_examples = int(num_examples_per_epoch *\n                           min_fraction_of_examples_in_queue)\n\n  \n  return _generate_image_and_label_batch(float_image, read_input.label,\n                                         min_queue_examples, batch_size,\n                                         shuffle=False)", "docstring": "Construct input for CIFAR evaluation using the Reader ops.\n\nArgs:\neval_data: bool, indicating if one should use the train or eval data set.\ndata_dir: Path to the CIFAR-10 data directory.\nbatch_size: Number of images per batch.\n\nReturns:\nimages: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\nlabels: Labels. 1D tensor of [batch_size] size.", "source": "juraj-google-style"}
{"code": "def _set_grid_info(self, which, low, high, num, scale, name):\n    setattr(self.generate_info, (which + '_low'), low)\n    setattr(self.generate_info, (which + '_high'), high)\n    setattr(self.generate_info, ('num_' + which), num)\n    setattr(self.generate_info, (which + 'val_name'), name)\n    if (scale not in ['lin', 'log']):\n        raise ValueError('{} scale must be lin or log.'.format(which))\n    setattr(self.generate_info, (which + 'scale'), scale)\n    return", "docstring": "Set the grid values for x or y.\n\nCreate information for the grid of x and y values.\n\nArgs:\nwhich (str): `x` or `y`.\nlow/high (float): Lowest/highest value for the axis.\nnum (int): Number of points on axis.\nscale (str): Scale of the axis. Choices are 'log' or 'lin'.\nname (str): Name representing the axis. See GenerateContainer documentation\nfor options for the name.\nunit (str): Unit for this axis quantity. See GenerateContainer documentation\nfor options for the units.\n\nRaises:\nValueError: If scale is not 'log' or 'lin'.", "source": "codesearchnet"}
{"code": "def _get_schema(cls, schema):\n        \n        if isinstance(schema, string_types):\n            schema = cls._get_object_from_python_path(schema)\n\n        if isclass(schema):\n            schema = schema()\n\n        if not isinstance(schema, Schema):\n            raise TypeError(\"The schema must be a path to a Marshmallow \"\n                            \"schema or a Marshmallow schema.\")\n\n        return schema", "docstring": "Method that will fetch a Marshmallow schema flexibly.\n\nArgs:\nschema (marshmallow.Schema|str): Either the schema class, an\ninstance of a schema, or a Python path to a schema.\n\nReturns:\nmarshmallow.Schema: The desired schema.\n\nRaises:\nTypeError: This is raised if the provided object isn't\na Marshmallow schema.", "source": "juraj-google-style"}
{"code": "def Serialize(self, writer):\n        \n        writer.WriteUInt32(self.Timestamp)\n        writer.WriteUInt64(self.Services)\n        \n        octets = bytearray(map(lambda oct: int(oct), self.Address.split('.')))\n        \n        octets += bytearray(12)\n        \n        writer.WriteBytes(octets)\n        writer.WriteUInt16(self.Port, endian='>')", "docstring": "Serialize object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def __init__(self, num_evals, log_progress=True):\n    self._num_evals = num_evals\n    self._evals_completed = None\n    self._log_progress = log_progress\n    self._log_frequency = 1 if num_evals is None or num_evals < 20 else math.floor(num_evals / 10.0)", "docstring": "Constructs the run hook.\n\nArgs:\nnum_evals: The number of evaluations to run for. if set to None, will\niterate the dataset until all inputs are exhausted.\nlog_progress: Whether to log evaluation progress, defaults to True.", "source": "github-repos"}
{"code": "def MakeCACert(private_key,\n               common_name=u\"grr\",\n               issuer_cn=u\"grr_test\",\n               issuer_c=u\"US\"):\n  \n  public_key = private_key.GetPublicKey()\n\n  builder = x509.CertificateBuilder()\n\n  issuer = x509.Name([\n      x509.NameAttribute(oid.NameOID.COMMON_NAME, issuer_cn),\n      x509.NameAttribute(oid.NameOID.COUNTRY_NAME, issuer_c)\n  ])\n  subject = x509.Name(\n      [x509.NameAttribute(oid.NameOID.COMMON_NAME, common_name)])\n  builder = builder.subject_name(subject)\n  builder = builder.issuer_name(issuer)\n\n  valid_from = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration(\"1d\")\n  valid_until = rdfvalue.RDFDatetime.Now() + rdfvalue.Duration(\"3650d\")\n  builder = builder.not_valid_before(valid_from.AsDatetime())\n  builder = builder.not_valid_after(valid_until.AsDatetime())\n\n  builder = builder.serial_number(1)\n  builder = builder.public_key(public_key.GetRawPublicKey())\n\n  builder = builder.add_extension(\n      x509.BasicConstraints(ca=True, path_length=None), critical=True)\n  builder = builder.add_extension(\n      x509.SubjectKeyIdentifier.from_public_key(public_key.GetRawPublicKey()),\n      critical=False)\n  certificate = builder.sign(\n      private_key=private_key.GetRawPrivateKey(),\n      algorithm=hashes.SHA256(),\n      backend=openssl.backend)\n  return rdf_crypto.RDFX509Cert(certificate)", "docstring": "Generate a CA certificate.\n\nArgs:\nprivate_key: The private key to use.\ncommon_name: Name for cert.\nissuer_cn: Name for issuer.\nissuer_c: Country for issuer.\n\nReturns:\nThe certificate.", "source": "juraj-google-style"}
{"code": "def replace(s, pattern, replacement):\n\n    def _replacement(matchobj):\n        return replacement\n    return re.sub(pattern, _replacement, s)", "docstring": "Replaces occurrences of a match string in a given\nstring and returns the new string. The match string\ncan be a regex expression.\n\nArgs:\ns (str):           the string to modify\npattern (str):     the search expression\nreplacement (str): the string to replace each match with", "source": "codesearchnet"}
{"code": "def is_workdir(cls, path):\n        \n        try:\n            cls(path=path).load()\n        except MalformedWorkdir:\n            return False\n\n        return True", "docstring": "Check if the given path is a workdir\n\nArgs:\npath(str): Path to check\n\nReturn:\nbool: True if the given path is a workdir", "source": "juraj-google-style"}
{"code": "def notes_to_midi(self, notes: np.ndarray, beatstep: np.ndarray, offset_sec: int=0.0):\n    requires_backends(self, ['pretty_midi'])\n    new_pm = pretty_midi.PrettyMIDI(resolution=384, initial_tempo=120.0)\n    new_inst = pretty_midi.Instrument(program=0)\n    new_notes = []\n    for onset_idx, offset_idx, pitch, velocity in notes:\n        new_note = pretty_midi.Note(velocity=velocity, pitch=pitch, start=beatstep[onset_idx] - offset_sec, end=beatstep[offset_idx] - offset_sec)\n        new_notes.append(new_note)\n    new_inst.notes = new_notes\n    new_pm.instruments.append(new_inst)\n    new_pm.remove_invalid_notes()\n    return new_pm", "docstring": "Converts notes to Midi.\n\nArgs:\nnotes (`numpy.ndarray`):\nThis is used to create Pretty Midi objects.\nbeatstep (`numpy.ndarray`):\nThis is the extrapolated beatstep that we get from feature extractor.\noffset_sec (`int`, *optional*, defaults to 0.0):\nThis represents the offset seconds which is used while creating each Pretty Midi Note.", "source": "github-repos"}
{"code": "class FlaxMinLengthLogitsProcessor(FlaxLogitsProcessor):\n\n    def __init__(self, min_length: int, eos_token_id: int):\n        if not isinstance(min_length, int) or min_length < 0:\n            raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}')\n        if not isinstance(eos_token_id, int) or eos_token_id < 0:\n            raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}')\n        self.min_length = min_length\n        self.eos_token_id = eos_token_id\n\n    def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:\n        apply_penalty = 1 - jnp.clip(cur_len - self.min_length, 0, 1)\n        scores = jnp.where(apply_penalty, scores.at[:, self.eos_token_id].set(-float('inf')), scores)\n        return scores", "docstring": "[`FlaxLogitsProcessor`] enforcing a min-length by setting EOS probability to 0.\n\nArgs:\nmin_length (`int`):\nThe minimum length below which the score of `eos_token_id` is set to `-float(\"Inf\")`.\neos_token_id (`int`):\nThe id of the *end-of-sequence* token.", "source": "github-repos"}
{"code": "def __init__(self, start_timestamp, end_timestamp):\n    \n    if start_timestamp is None or end_timestamp is None:\n      raise ValueError(\n          'Time range must have either a start and an end timestamp.')\n\n    if start_timestamp > end_timestamp:\n      raise ValueError(\n          'Invalid start must be earlier than end timestamp.')\n\n    super(TimeRange, self).__init__()\n    self.duration = end_timestamp - start_timestamp\n    self.end_timestamp = end_timestamp\n    self.start_timestamp = start_timestamp", "docstring": "Initializes a date and time range.\n\nThe timestamp are integers containing the number of microseconds\nsince January 1, 1970, 00:00:00 UTC.\n\nArgs:\nstart_timestamp (int): timestamp that marks the start of the range.\nend_timestamp (int): timestamp that marks the end of the range.\n\nRaises:\nValueError: If the time range is badly formed.", "source": "juraj-google-style"}
{"code": "def _Verify(self):\n    if self._expected_calls_queue:\n        if ((len(self._expected_calls_queue) == 1) and isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and self._expected_calls_queue[0].IsSatisfied()):\n            pass\n        else:\n            raise ExpectedMethodCallsError(self._expected_calls_queue)", "docstring": "Verify that all of the expected calls have been made.\n\nRaises:\nExpectedMethodCallsError: if there are still more method calls in the\nexpected queue.", "source": "codesearchnet"}
{"code": "def add_parameter(self, name, min_val, max_val):\n        \n\n        self.__parameters.append(Parameter(name, min_val, max_val))", "docstring": "Adds a paramber to the Population\n\nArgs:\nname (str): name of the parameter\nmin_val (int or float): minimum value for the parameter\nmax_val (int or float): maximum value for the parameter", "source": "juraj-google-style"}
{"code": "def make_decoder(num_topics, num_words):\n    topics_words_logits = tf.compat.v1.get_variable('topics_words_logits', shape=[num_topics, num_words], initializer=tf.compat.v1.glorot_normal_initializer())\n    topics_words = tf.nn.softmax(topics_words_logits, axis=(- 1))\n\n    def decoder(topics):\n        word_probs = tf.matmul(topics, topics_words)\n        return tfd.OneHotCategorical(probs=word_probs, name='bag_of_words')\n    return (decoder, topics_words)", "docstring": "Create the decoder function.\n\nArgs:\nnum_topics: The number of topics.\nnum_words: The number of words.\n\nReturns:\ndecoder: A `callable` mapping a `Tensor` of encodings to a\n`tfd.Distribution` instance over words.", "source": "codesearchnet"}
{"code": "def _read(self, entry):\n    start_time = time.time()\n    content = self._zip.read(entry.filename)\n    ctx = context.get()\n    if ctx:\n        operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx)\n        operation.counters.Increment(COUNTER_IO_READ_MSEC, int(((time.time() - start_time) * 1000)))(ctx)\n    return content", "docstring": "Read entry content.\n\nArgs:\nentry: zip file entry as zipfile.ZipInfo.\nReturns:\nEntry content as string.", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context):\n    \n    super(APFSContainerFileSystem, self).__init__(resolver_context)\n    self._file_object = None\n    self._fsapfs_container = None", "docstring": "Initializes an APFS container file system.\n\nArgs:\nresolver_context (resolver.Context): resolver context.", "source": "juraj-google-style"}
{"code": "def extrinsic_events(network, previous_state, current_state, next_state, indices=None, major_complex=None):\n    if major_complex:\n        mc_nodes = major_complex.subsystem.node_indices\n    elif indices:\n        mc_nodes = indices\n    else:\n        major_complex = compute.major_complex(network, current_state)\n        mc_nodes = major_complex.subsystem.node_indices\n    mechanisms = list(utils.powerset(mc_nodes, nonempty=True))\n    all_nodes = network.node_indices\n    return events(network, previous_state, current_state, next_state, all_nodes, mechanisms=mechanisms)", "docstring": "Set of all mechanisms that are in the major complex but which have true\ncauses and effects within the entire network.\n\nArgs:\nnetwork (Network): The network to analyze.\nprevious_state (tuple[int]): The state of the network at ``t - 1``.\ncurrent_state (tuple[int]): The state of the network at ``t``.\nnext_state (tuple[int]): The state of the network at ``t + 1``.\n\nKeyword Args:\nindices (tuple[int]): The indices of the major complex.\nmajor_complex (AcSystemIrreducibilityAnalysis): The major complex. If\n``major_complex`` is given then ``indices`` is ignored.\n\nReturns:\ntuple(actions): List of extrinsic events in the major complex.", "source": "codesearchnet"}
{"code": "def rpm_versioned_name(cls, name, version, default_number=False):\n        \n        regexp = re.compile(r'^python(\\d*|)-(.*)')\n        auto_provides_regexp = re.compile(r'^python(\\d*|)dist(.*)')\n\n        if (not version or version == cls.get_default_py_version() and\n                not default_number):\n            found = regexp.search(name)\n            \n            \n            if found and found.group(2) != 'devel':\n                if 'epel' not in cls.template:\n                    return 'python-{0}'.format(regexp.search(name).group(2))\n            return name\n\n        versioned_name = name\n        if version:\n\n            if regexp.search(name):\n                versioned_name = re.sub(r'^python(\\d*|)-', 'python{0}-'.format(\n                    version), name)\n            elif auto_provides_regexp.search(name):\n                versioned_name = re.sub(\n                    r'^python(\\d*|)dist', 'python{0}dist'.format(\n                        version), name)\n\n            else:\n                versioned_name = 'python{0}-{1}'.format(version, name)\n            if ('epel' in cls.template and version !=\n                    cls.get_default_py_version()):\n                versioned_name = versioned_name.replace('{0}'.format(\n                    version), '%{{python{0}_pkgversion}}'.format(version))\n        return versioned_name", "docstring": "Properly versions the name.\nFor example:\nrpm_versioned_name('python-foo', '26') will return python26-foo\nrpm_versioned_name('pyfoo, '3') will return python3-pyfoo\n\nIf version is same as settings.DEFAULT_PYTHON_VERSION, no change\nis done.\n\nArgs:\nname: name to version\nversion: version or None\nReturns:\nVersioned name or the original name if given version is None.", "source": "juraj-google-style"}
{"code": "def method_exists(cls, method):\n        \n        methods = cls.API_METHODS\n        for key in method.split('.'):\n            methods = methods.get(key)\n            if methods is None:\n                break\n        if isinstance(methods, str):\n            logger.debug('%r: %r', method, methods)\n            return True\n        return False", "docstring": "Whether a given method exists in the known API.\n\nArguments:\nmethod (:py:class:`str`): The name of the method.\n\nReturns:\n:py:class:`bool`: Whether the method is in the known API.", "source": "juraj-google-style"}
{"code": "def __init__(self, adapter_id):\n        \n\n        super(ConnectionManager, self).__init__()\n\n        self.id = adapter_id\n        self._stop_event = threading.Event()\n        self._actions = queue.Queue()\n        self._connections = {}\n        self._int_connections = {}\n        self._data_lock = threading.Lock()\n\n        \n        self.daemon = True\n\n        self._logger = logging.getLogger(__name__)\n        self._logger.addHandler(logging.NullHandler())\n        self._logger.setLevel(logging.INFO)", "docstring": "Constructor.\n\nArgs:\nadapter_id (int): Since the ConnectionManager responds to callbacks on behalf\nof a DeviceAdapter, it needs to know what adapter_id to send with the\ncallbacks.", "source": "juraj-google-style"}
{"code": "def plot_time_series(self, f_start=None, f_stop=None, if_id=0, logged=True, orientation='h', MJD_time=False, **kwargs):\n        \n\n        ax = plt.gca()\n        plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)\n\n        if logged and self.header[b'nbits'] >= 8:\n            plot_data = db(plot_data)\n\n        \n        if len(plot_data.shape) > 1:\n            plot_data = plot_data.mean(axis=1)\n        else:\n            plot_data = plot_data.mean()\n\n        \n        extent = self._calc_extent(plot_f=plot_f,plot_t=self.timestamps,MJD_time=MJD_time)\n        plot_t = np.linspace(extent[2],extent[3],len(self.timestamps))\n\n        if MJD_time:\n            tlabel = \"Time [MJD]\"\n        else:\n            tlabel = \"Time [s]\"\n\n        if logged:\n            plabel = \"Power [dB]\"\n        else:\n            plabel = \"Power [counts]\"\n\n        \n        if 'v' in orientation:\n            plt.plot(plot_data, plot_t, **kwargs)\n            plt.xlabel(plabel)\n\n        else:\n            plt.plot(plot_t, plot_data, **kwargs)\n            plt.xlabel(tlabel)\n            plt.ylabel(plabel)\n\n        ax.autoscale(axis='both',tight=True)", "docstring": "Plot the time series.\n\nArgs:\nf_start (float): start frequency, in MHz\nf_stop (float): stop frequency, in MHz\nlogged (bool): Plot in linear (False) or dB units (True),\nkwargs: keyword args to be passed to matplotlib imshow()", "source": "juraj-google-style"}
{"code": "def GetAvailableClaimTotal(self):\n    coinrefs = [coin.Reference for coin in self.GetUnclaimedCoins()]\n    bonus = Blockchain.CalculateBonusIgnoreClaimed(coinrefs, True)\n    return bonus", "docstring": "Gets the total amount of Gas that this wallet is able to claim at a given moment.\n\nReturns:\nFixed8: the amount of Gas available to claim as a Fixed8 number.", "source": "codesearchnet"}
{"code": "def refresh(self, access_token=None, **kwargs):\n        \n        if not self.token_lock.locked():\n            with self.token_lock:\n                if access_token == self.access_token or access_token is None:\n                    if self.developer_token is not None:\n                        r = self._httpclient.request(\n                            method='POST',\n                            url=self.developer_token_url,\n                            path='/request_token',\n                            headers={\n                                'Authorization': 'Bearer {}'.format(\n                                    self.developer_token\n                                )\n                            },\n                            timeout=30,\n                            raise_for_status=True\n                        )\n\n                    elif all(\n                        [\n                            self.client_id,\n                            self.client_secret,\n                            self.refresh_token\n                        ]\n                    ):\n                        data = {\n                            'client_id': self.client_id,\n                            'client_secret': self.client_secret,\n                            'refresh_token': self.refresh_token,\n                            'grant_type': 'refresh_token'\n                        }\n                        r = self._httpclient.request(\n                            method='POST',\n                            url=self.token_url,\n                            json=data,\n                            path='/api/oauth2/RequestToken',\n                            **kwargs\n                        )\n                    else:\n                        raise PartialCredentialsError(\n                            \"Missing one or more required credentials\"\n                        )\n\n                    if r:\n                        if not r.ok:\n                            raise PanCloudError(\n                                '%s %s: %s' % (\n                                r.status_code, r.reason, r.text)\n                            )\n                        try:\n                            r_json = r.json()\n                        except ValueError as e:\n                            raise PanCloudError(\"Invalid JSON: %s\" % e)\n                        else:\n                            if r.json().get(\n                                'error_description'\n                            ) or r.json().get(\n                                'error'\n                            ):\n                                raise PanCloudError(r.text)\n                            self.access_token = r_json.get(\n                                'access_token', None\n                            )\n                            self.jwt_exp = self._decode_exp(\n                                self.access_token_)\n                            if r_json.get('refresh_token', None):\n                                self.refresh_token = \\\n                                    r_json.get('refresh_token')\n                            self.write_credentials()\n                        return self.access_token_", "docstring": "Refresh access and refresh tokens.\n\nArgs:\naccess_token (str): Access token to refresh. Defaults to ``None``.\n\nReturns:\nstr: Refreshed access token.", "source": "juraj-google-style"}
{"code": "def __init__(self, provider, template, **kwargs):\n        \n        super(StatikJinjaTemplate, self).__init__(template.filename, **kwargs)\n        self.provider = provider\n        self.template = template", "docstring": "Constructor.\n\nArgs:\nprovider: The provider that created this template.\ntemplate: The Jinja2 template to wrap.", "source": "juraj-google-style"}
{"code": "def send_message(\n        self, request: str, response_expected: bool, **kwargs: Any\n    ) -> Response:\n        \n        response = self.session.post(self.endpoint, data=request.encode(), **kwargs)\n        return Response(response.text, raw=response)", "docstring": "Transport the message to the server and return the response.\n\nArgs:\nrequest: The JSON-RPC request string.\nresponse_expected: Whether the request expects a response.\n\nReturns:\nA Response object.", "source": "juraj-google-style"}
{"code": "def from_lasio(cls, l, remap=None, funcs=None):\n        \n        params = {}\n        funcs = funcs or {}\n        funcs['location'] = str\n        for field, (sect, code) in las_fields['location'].items():\n            params[field] = utils.lasio_get(l,\n                                            sect,\n                                            code,\n                                            remap=remap,\n                                            funcs=funcs)\n        return cls(params)", "docstring": "Make a Location object from a lasio object. Assumes we're starting\nwith a lasio object, l.\n\nArgs:\nl (lasio).\nremap (dict): Optional. A dict of 'old': 'new' LAS field names.\nfuncs (dict): Optional. A dict of 'las field': function() for\nimplementing a transform before loading. Can be a lambda.\n\nReturns:\nLocation. An instance of this class.", "source": "juraj-google-style"}
{"code": "def _prepare_swaption_indices(tensor_shape):\n    tensor_shape = np.array(tensor_shape, dtype=np.int64)\n    batch_shape = tensor_shape[1:-1]\n    batch_size = np.prod(batch_shape)\n    index_list = []\n    for i in range(len(tensor_shape)):\n        index = np.arange(0, tensor_shape[i], dtype=np.int64)\n        if i == 0 or i == len(tensor_shape) - 1:\n            index = tf.tile(index, [batch_size])\n        else:\n            index = np.tile(np.repeat(index, np.prod(tensor_shape[i + 1:])), [np.prod(tensor_shape[1:i])])\n        index_list.append(index)\n    return tf.stack(index_list, axis=-1)", "docstring": "Indices for `gather_nd` for analytic valuation.\n\nFor a `Tensor` x of shape `tensor_shape` = [n] + batch_shape + [n], this\nfunction returns indices for tf.gather_nd to get `x[i,...,i]`\n\nArgs:\ntensor_shape: A list of length `k` representing shape of the `Tensor`.\n\nReturns:\nA `Tensor` of shape (num_elements, k) where num_elements= n * batch_size\nof dtype tf.int64.", "source": "github-repos"}
{"code": "def __similarity(s1, s2, ngrams_fn, n=3):\n    \n    ngrams1, ngrams2 = set(ngrams_fn(s1, n=n)), set(ngrams_fn(s2, n=n))\n    matches = ngrams1.intersection(ngrams2)\n    return 2 * len(matches) / (len(ngrams1) + len(ngrams2))", "docstring": "The fraction of n-grams matching between two sequences\n\nArgs:\ns1: a string\ns2: another string\nn: an int for the n in n-gram\n\nReturns:\nfloat: the fraction of n-grams matching", "source": "juraj-google-style"}
{"code": "def getISOSetupList(self):\n        \n        transfer_p = self.__transfer\n        transfer = transfer_p.contents\n        \n        if transfer.type != TRANSFER_TYPE_ISOCHRONOUS:\n            \n            raise TypeError(\n                'This method cannot be called on non-iso transfers.'\n            )\n        return [\n            {\n                'length': x.length,\n                'actual_length': x.actual_length,\n                'status': x.status,\n            }\n            for x in libusb1.get_iso_packet_list(transfer_p)\n        ]", "docstring": "Get individual ISO transfer's setup.\nReturns a list of dicts, each containing an individual ISO transfer\nparameters:\n- length\n- actual_length\n- status\n(see libusb1's API documentation for their signification)\nReturned list is consistent with getISOBufferList return value.\nShould not be called on a submitted transfer (except for 'length'\nvalues).", "source": "juraj-google-style"}
{"code": "def add_relations(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]:\n    \n\n    \n    spec_dict[\"relations\"][\"list\"] = []\n    spec_dict[\"relations\"][\"list_short\"] = []\n    spec_dict[\"relations\"][\"list_long\"] = []\n    spec_dict[\"relations\"][\"to_short\"] = {}\n    spec_dict[\"relations\"][\"to_long\"] = {}\n\n    for relation_name in spec_dict[\"relations\"][\"info\"]:\n\n        abbreviated_name = spec_dict[\"relations\"][\"info\"][relation_name][\"abbreviation\"]\n        spec_dict[\"relations\"][\"list\"].extend((relation_name, abbreviated_name))\n        spec_dict[\"relations\"][\"list_long\"].append(relation_name)\n        spec_dict[\"relations\"][\"list_short\"].append(abbreviated_name)\n\n        spec_dict[\"relations\"][\"to_short\"][relation_name] = abbreviated_name\n        spec_dict[\"relations\"][\"to_short\"][abbreviated_name] = abbreviated_name\n\n        spec_dict[\"relations\"][\"to_long\"][abbreviated_name] = relation_name\n        spec_dict[\"relations\"][\"to_long\"][relation_name] = relation_name\n\n    return spec_dict", "docstring": "Add relation keys to spec_dict\n\nArgs:\nspec_dict (Mapping[str, Any]): bel specification dictionary\n\nReturns:\nMapping[str, Any]: bel specification dictionary with added relation keys", "source": "juraj-google-style"}
{"code": "def experimental_local_results(self, value):\n    return self._extended._local_results(value)", "docstring": "Returns the list of all local per-replica values contained in `value`.\n\nNote: This only returns values on the worker initiated by this client.\nWhen using a `tf.distribute.Strategy` like\n`tf.distribute.experimental.MultiWorkerMirroredStrategy`, each worker\nwill be its own client, and this function will only return values\ncomputed on that worker.\n\nArgs:\nvalue: A value returned by `experimental_run()`, `run(), or a variable\ncreated in `scope`.\n\nReturns:\nA tuple of values contained in `value` where ith element corresponds to\nith replica. If `value` represents a single value, this returns\n`(value,).`", "source": "github-repos"}
{"code": "def institutes(self, institute_ids=None):\n        \n        query = {}\n        if institute_ids:\n            query['_id'] = {'$in': institute_ids}\n        LOG.debug(\"Fetching all institutes\")\n        return self.institute_collection.find(query)", "docstring": "Fetch all institutes.\n\nArgs:\ninstitute_ids(list(str))\n\nReturns:\nres(pymongo.Cursor)", "source": "juraj-google-style"}
{"code": "def features(self):\n    buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n    self._dll.JLINKARM_GetFeatureString(buf)\n    result = ctypes.string_at(buf).decode().strip()\n    if (len(result) == 0):\n        return list()\n    return result.split(', ')", "docstring": "Returns a list of the J-Link embedded features.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nA list of strings, each a feature.  Example:\n``[ 'RDI', 'FlashBP', 'FlashDL', 'JFlash', 'GDB' ]``", "source": "codesearchnet"}
{"code": "def _save_env(env):\n    env_path = os.path.join(env['resultdir'], 'env')\n    if os.path.isdir(env['resultdir']):\n        with open(env_path, 'w') as f:\n            yaml.dump(env, f)", "docstring": "Saves one environment.\n\nArgs:\nenv (dict): the env dict to save.", "source": "codesearchnet"}
{"code": "def load(self, read_tuple_name):\n        \n        self.prefix_width = 0\n        self.read_tuple_id_width = 0\n        self.genome_id_width = 0\n        self.chr_id_width = 0\n        self.coor_width = 0\n\n        parts = read_tuple_name.split(\"__\")\n        self.prefix_width = len(parts[0])\n        self.read_tuple_id_width = len(parts[1])\n\n        segments = parts[2][1:-1].split(\"),(\")\n        for segment in segments:\n            int_widths = list(map(len, segment.split(\",\")))\n            self.genome_id_width = max(self.genome_id_width, int_widths[0])\n            self.chr_id_width = max(self.chr_id_width, int_widths[1])\n            self.coor_width = max(self.coor_width, int_widths[2], int_widths[3])", "docstring": "Load RNF values from a read tuple name.\n\nArgs:\nread_tuple_name (str): Read tuple name which the values are taken from.", "source": "juraj-google-style"}
{"code": "def shutdown(self, vm_names=None, reboot=False):\n    self.virt_env.shutdown(vm_names, reboot)", "docstring": "Shutdown this prefix\n\nArgs:\nvm_names(list of str): List of the vms to shutdown\nreboot(bool): If true, reboot the requested vms\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def subscribe(self, devices_to_bind=[]):\n        \n        if self.entity_api_key == \"\":\n            return {'status': 'failure', 'response': 'No API key found in request'}\n        self.bind(devices_to_bind)\n        loop = asyncio.new_event_loop()\n        t1 = threading.Thread(target=self.start_subscribe_worker, args=(loop,))\n        t1.daemon = True\n        t1.start()", "docstring": "This function allows an entity to subscribe for data from the devices specified in the bind operation. It\ncreates a thread with an event loop to manager the tasks created in start_subscribe_worker.\n\nArgs:\ndevices_to_bind (list): an array of devices to listen to", "source": "juraj-google-style"}
{"code": "def npy_to_numpy(npy_array):  \n    \n    stream = BytesIO(npy_array)\n    return np.load(stream, allow_pickle=True)", "docstring": "Convert an NPY array into numpy.\n\nArgs:\nnpy_array (npy array): to be converted to numpy array\nReturns:\n(np.array): converted numpy array.", "source": "juraj-google-style"}
{"code": "def scale(p, factor, o=(0, 0)):\n    \n    v = vector(o, p)\n    sv = v[0] * factor, v[1] * factor\n    return translate(sv, o)", "docstring": "scale vector\nArgs:\np: point (x, y)\nfactor: scaling factor\no: origin (x, y)", "source": "juraj-google-style"}
{"code": "def write_new_config(self, updates):\n    with open(self._new_config, 'w') as config_file:\n        for update in updates:\n            line = '{0}=={1}  \n            config_file.write(line)", "docstring": "Given a list of updates, write the updates out to the provided\nconfiguartion file.\n\nArgs:\nupdates (list): List of Update objects.", "source": "codesearchnet"}
{"code": "def record(self, value=1.0, time_ms=None):\n        \n        if time_ms is None:\n            time_ms = time.time() * 1000\n        self._last_record_time = time_ms\n        with self._lock:  \n            \n            for stat in self._stats:\n                stat.record(self._config, value, time_ms)\n            self._check_quotas(time_ms)\n        for parent in self._parents:\n            parent.record(value, time_ms)", "docstring": "Record a value at a known time.\nArguments:\nvalue (double): The value we are recording\ntime_ms (int): A POSIX timestamp in milliseconds.\nDefault: The time when record() is evaluated (now)\n\nRaises:\nQuotaViolationException: if recording this value moves a\nmetric beyond its configured maximum or minimum bound", "source": "juraj-google-style"}
{"code": "def enroll_user_in_course(self, username, course_id, mode, cohort=None):\n    return self.client.enrollment.post({'user': username, 'course_details': {'course_id': course_id}, 'mode': mode, 'cohort': cohort})", "docstring": "Call the enrollment API to enroll the user in the course specified by course_id.\n\nArgs:\nusername (str): The username by which the user goes on the OpenEdX platform\ncourse_id (str): The string value of the course's unique identifier\nmode (str): The enrollment mode which should be used for the enrollment\ncohort (str): Add the user to this named cohort\n\nReturns:\ndict: A dictionary containing details of the enrollment, including course details, mode, username, etc.", "source": "codesearchnet"}
{"code": "def encode_bu64(b):\n    \n    s = base64.standard_b64encode(b)\n    s = s.rstrip('=')\n    s = s.replace('+', '-')\n    s = s.replace('/', '_')\n    return s", "docstring": "Encode bytes to a URL safe flavor of Base64 used by JWTs.\n\n- Reverse of decode_bu64().\n\nArgs:\nb: bytes\nBytes to Base64 encode.\n\nReturns:\nbytes: URL safe Base64 encoded version of input.", "source": "juraj-google-style"}
{"code": "def setCTRatio(self, new_ct, password='00000000'):\n    ret = False\n    self.setContext('setCTRatio')\n    try:\n        self.clearCmdMsg()\n        if ((new_ct != CTRatio.Amps_100) and (new_ct != CTRatio.Amps_200) and (new_ct != CTRatio.Amps_400) and (new_ct != CTRatio.Amps_600) and (new_ct != CTRatio.Amps_800) and (new_ct != CTRatio.Amps_1000) and (new_ct != CTRatio.Amps_1200) and (new_ct != CTRatio.Amps_1500) and (new_ct != CTRatio.Amps_2000) and (new_ct != CTRatio.Amps_3000) and (new_ct != CTRatio.Amps_4000) and (new_ct != CTRatio.Amps_5000)):\n            self.writeCmdMsg(('Legal CT Ratios: 100, 200, 400, 600, ' + '800, 1000, 1200, 1500, 2000, 3000, 4000 and 5000'))\n            self.setContext('')\n            return ret\n        if (len(password) != 8):\n            self.writeCmdMsg('Invalid password length.')\n            self.setContext('')\n            return ret\n        if (not self.request(False)):\n            self.writeCmdMsg('Bad read CRC on setting')\n        elif (not self.serialCmdPwdAuth(password)):\n            self.writeCmdMsg('Password failure')\n        else:\n            req_str = (('015731023030443028' + binascii.hexlify(str(new_ct).zfill(4))) + '2903')\n            req_str += self.calc_crc16(req_str[2:].decode('hex'))\n            self.m_serial_port.write(req_str.decode('hex'))\n            if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'):\n                self.writeCmdMsg('Success(setCTRatio): 06 returned.')\n                ret = True\n        self.serialPostEnd()\n    except:\n        ekm_log(traceback.format_exc(sys.exc_info()))\n    self.setContext('')\n    return ret", "docstring": "Serial call to set CT ratio for attached inductive pickup.\n\nArgs:\nnew_ct (int): A :class:`~ekmmeters.CTRatio` value, a legal amperage setting.\npassword (str): Optional password.\n\nReturns:\nbool: True on completion with ACK.", "source": "codesearchnet"}
{"code": "def get_collectors(self, limit=1000, offset=0):\n        \n        options = {\n            'limit': limit,\n            'offset': offset,\n        }\n        request = requests.get(self.url, params=options, auth=self.auth)\n\n        try:\n            results = request.json()['collectors']\n        except KeyError:\n            results = request.json()\n        except json.decoder.JSONDecodeError:\n            results = []\n\n        return results", "docstring": "Returns a dict of collectors.\n\nArgs:\nlimit (int): number of collectors to return\noffset (int): the offset of where the list of collectors should begin from", "source": "juraj-google-style"}
{"code": "def daemonize(pidfile=None):\n    \n\n    \n    resource.setrlimit(resource.RLIMIT_CORE, (0, 0))\n\n    \n    os.chdir(\"/\")\n\n    \n    os.umask(0)\n\n    \n    pid = os.fork()\n    if pid > 0:\n        os._exit(0)\n    os.setsid()\n    pid = os.fork()\n    if pid > 0:\n        os._exit(0)\n\n    \n    def terminate(signal, stack_frame):\n        msg = 'Terminating on signal {}'.format(signal)\n        logger.info(msg)\n        raise SystemExit(msg)\n    signal.signal(signal.SIGTERM, terminate)\n\n    \n    streams = [sys.stdin, sys.stdout, sys.stderr]\n    for stream in streams:\n        devnull = os.open(os.devnull, os.O_RDWR)\n        os.dup2(devnull, stream.fileno())\n\n    \n    for fd in [stream.fileno() for stream in streams]:\n        try:\n            os.close(fd)\n        except OSError as err:\n            if err.errno == errno.EBADF:\n                \n                pass\n\n    \n    if pidfile is None or pidfile.strip() == '':\n        logger.debug('Empty pidfile set')\n    else:\n        pid = os.getpid()\n        try:\n            with open(pidfile, 'w') as f:\n                f.write('{}\\n'.format(pid))\n                f.close()\n        except EnvironmentError:\n            logger.error('Failed to create pidfile at {}'.format(pidfile))\n\n        def remove_pid_file():\n            os.remove(pidfile)\n\n        atexit.register(remove_pid_file)\n\n    logger.debug('Process daemonized')", "docstring": "Turn the running process into a proper daemon according to PEP3143.\n\nArgs:\npidfile --The pidfile to create.", "source": "juraj-google-style"}
{"code": "def assert_input_compatibility(input_spec, inputs, layer_name):\n    if not input_spec:\n        return\n    input_spec = tree.flatten(input_spec)\n    if isinstance(inputs, dict):\n        names = [spec.name for spec in input_spec]\n        if all(names):\n            list_inputs = []\n            for name in names:\n                if name not in inputs:\n                    raise ValueError(f'Missing data for input \"{name}\". You passed a data dictionary with keys {list(inputs.keys())}. Expected the following keys: {names}')\n                list_inputs.append(inputs[name])\n            inputs = list_inputs\n    inputs = tree.flatten(inputs)\n    if len(inputs) != len(input_spec):\n        raise ValueError(f'Layer \"{layer_name}\" expects {len(input_spec)} input(s), but it received {len(inputs)} input tensors. Inputs received: {inputs}')\n    for input_index, (x, spec) in enumerate(zip(inputs, input_spec)):\n        if spec is None:\n            continue\n        if x is None and spec.optional:\n            continue\n        if not hasattr(x, 'shape'):\n            raise ValueError(f\"Inputs to a layer should be tensors. Got '{x}' (of type {type(x)}) as input for layer '{layer_name}'.\")\n        shape = backend.standardize_shape(x.shape)\n        ndim = len(shape)\n        if spec.ndim is not None and (not spec.allow_last_axis_squeeze):\n            if ndim != spec.ndim:\n                raise ValueError(f'Input {input_index} of layer \"{layer_name}\" is incompatible with the layer: expected ndim={spec.ndim}, found ndim={ndim}. Full shape received: {shape}')\n        if spec.max_ndim is not None:\n            if ndim is not None and ndim > spec.max_ndim:\n                raise ValueError(f'Input {input_index} of layer \"{layer_name}\" is incompatible with the layer: expected max_ndim={spec.max_ndim}, found ndim={ndim}')\n        if spec.min_ndim is not None:\n            if ndim is not None and ndim < spec.min_ndim:\n                raise ValueError(f'Input {input_index} of layer \"{layer_name}\" is incompatible with the layer: expected min_ndim={spec.min_ndim}, found ndim={ndim}. Full shape received: {shape}')\n        if spec.dtype is not None:\n            dtype = backend.standardize_dtype(x.dtype)\n            if dtype != spec.dtype:\n                raise ValueError(f'Input {input_index} of layer \"{layer_name}\" is incompatible with the layer: expected dtype={spec.dtype}, found dtype={dtype}')\n        if spec.axes:\n            for axis, value in spec.axes.items():\n                if value is not None and shape[axis] not in {value, None}:\n                    raise ValueError(f'Input {input_index} of layer \"{layer_name}\" is incompatible with the layer: expected axis {axis} of input shape to have value {value}, but received input with shape {shape}')\n        if spec.shape is not None:\n            spec_shape = spec.shape\n            if spec.allow_last_axis_squeeze:\n                if shape and shape[-1] == 1:\n                    shape = shape[:-1]\n                if spec_shape and spec_shape[-1] == 1:\n                    spec_shape = spec_shape[:-1]\n            for spec_dim, dim in zip(spec_shape, shape):\n                if spec_dim is not None and dim is not None:\n                    if spec_dim != dim:\n                        raise ValueError(f'Input {input_index} of layer \"{layer_name}\" is incompatible with the layer: expected shape={spec.shape}, found shape={shape}')", "docstring": "Checks compatibility between the layer and provided inputs.\n\nThis checks that the tensor(s) `inputs` verify the input assumptions\nof a layer (if any). If not, a clear and actional exception gets raised.\n\nArgs:\ninput_spec: An InputSpec instance, list of InputSpec instances, a nested\nstructure of InputSpec instances, or None.\ninputs: Input tensor, list of input tensors, or a nested structure of\ninput tensors.\nlayer_name: String, name of the layer (for error message formatting).\n\nRaises:\nValueError: in case of mismatch between\nthe provided inputs and the expectations of the layer.", "source": "github-repos"}
{"code": "def detect_phantomjs(version='2.1'):\n    \n    if settings.phantomjs_path() is not None:\n        phantomjs_path = settings.phantomjs_path()\n    else:\n        if hasattr(shutil, \"which\"):\n            phantomjs_path = shutil.which(\"phantomjs\") or \"phantomjs\"\n        else:\n            \n            phantomjs_path = \"phantomjs\"\n\n    try:\n        proc = Popen([phantomjs_path, \"--version\"], stdout=PIPE, stderr=PIPE)\n        proc.wait()\n        out = proc.communicate()\n\n        if len(out[1]) > 0:\n            raise RuntimeError('Error encountered in PhantomJS detection: %r' % out[1].decode('utf8'))\n\n        required = V(version)\n        installed = V(out[0].decode('utf8'))\n        if installed < required:\n            raise RuntimeError('PhantomJS version to old. Version>=%s required, installed: %s' % (required, installed))\n\n    except OSError:\n        raise RuntimeError('PhantomJS is not present in PATH or BOKEH_PHANTOMJS_PATH. Try \"conda install phantomjs\" or \\\n            \"npm install -g phantomjs-prebuilt\"')\n\n    return phantomjs_path", "docstring": "Detect if PhantomJS is avaiable in PATH, at a minimum version.\n\nArgs:\nversion (str, optional) :\nRequired minimum version for PhantomJS (mostly for testing)\n\nReturns:\nstr, path to PhantomJS", "source": "juraj-google-style"}
{"code": "def _convert_rnn_weights(layer, weights):\n\n    def transform_kernels(kernels, func, n_gates):\n        \n        return np.hstack([func(k) for k in np.hsplit(kernels, n_gates)])\n\n    def transpose_input(from_cudnn):\n        \n        order = 'F' if from_cudnn else 'C'\n\n        def transform(kernel):\n            return kernel.T.reshape(kernel.shape, order=order)\n        return transform\n    target_class = layer.__class__.__name__\n    if target_class in ['LSTM', 'CuDNNLSTM'] and len(weights) == 3:\n        units = weights[1].shape[0]\n        bias_shape = weights[2].shape\n        n_gates = 4\n        if bias_shape == (2 * units * n_gates,):\n            source = 'CuDNNLSTM'\n        elif bias_shape == (units * n_gates,):\n            source = 'LSTM'\n        else:\n            raise ValueError('Invalid bias shape: ' + str(bias_shape))\n\n        def convert_lstm_weights(weights, from_cudnn=True):\n            \n            kernels = transform_kernels(weights[0], transpose_input(from_cudnn), n_gates)\n            recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates)\n            if from_cudnn:\n                biases = np.sum(np.split(weights[2], 2, axis=0), axis=0)\n            else:\n                biases = np.tile(0.5 * weights[2], 2)\n            return [kernels, recurrent_kernels, biases]\n        if source != target_class:\n            weights = convert_lstm_weights(weights, from_cudnn=source == 'CuDNNLSTM')\n    if target_class in ['GRU', 'CuDNNGRU'] and len(weights) == 3:\n        units = weights[1].shape[0]\n        bias_shape = weights[2].shape\n        n_gates = 3\n\n        def convert_gru_weights(weights, from_cudnn=True):\n            \n            kernels = transform_kernels(weights[0], transpose_input(from_cudnn), n_gates)\n            recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates)\n            biases = np.array(weights[2]).reshape((2, -1) if from_cudnn else -1)\n            return [kernels, recurrent_kernels, biases]\n        if bias_shape == (2 * units * n_gates,):\n            source = 'CuDNNGRU'\n        elif bias_shape == (2, units * n_gates):\n            source = 'GRU(reset_after=True)'\n        elif bias_shape == (units * n_gates,):\n            source = 'GRU(reset_after=False)'\n        else:\n            raise ValueError('Invalid bias shape: ' + str(bias_shape))\n        if target_class == 'CuDNNGRU':\n            target = 'CuDNNGRU'\n        elif layer.reset_after:\n            target = 'GRU(reset_after=True)'\n        else:\n            target = 'GRU(reset_after=False)'\n        if source != target:\n            types = (source, target)\n            if 'GRU(reset_after=False)' in types:\n                raise ValueError('%s is not compatible with %s' % types)\n            if source == 'CuDNNGRU':\n                weights = convert_gru_weights(weights, from_cudnn=True)\n            elif source == 'GRU(reset_after=True)':\n                weights = convert_gru_weights(weights, from_cudnn=False)\n    return weights", "docstring": "Converts weights for RNN layers between native and CuDNN format.\n\nInput kernels for each gate are transposed and converted between Fortran\nand C layout, recurrent kernels are transposed. For LSTM biases are summed/\nsplit in half, for GRU biases are reshaped.\n\nWeights can be converted in both directions between `LSTM` and`CuDNNSLTM`\nand between `CuDNNGRU` and `GRU(reset_after=True)`. Default `GRU` is not\ncompatible with `CuDNNGRU`.\n\nFor missing biases in `LSTM`/`GRU` (`use_bias=False`) no conversion is made.\n\nArgs:\nlayer: Target layer instance.\nweights: List of source weights values (input kernels, recurrent\nkernels, [biases]) (Numpy arrays).\n\nReturns:\nA list of converted weights values (Numpy arrays).\n\nRaises:\nValueError: for incompatible GRU layer/weights or incompatible biases", "source": "github-repos"}
{"code": "class DistributionMetric(Metric):\n\n    def __init__(self, dist_metric, submit_timestamp, metric_id, metric_type):\n        custom_label = dist_metric.key.metric.namespace + '_' + parse_step(dist_metric.key.step) + '_' + metric_type + '_' + dist_metric.key.metric.name\n        value = getattr(dist_metric.result, metric_type)\n        if value is None:\n            msg = '%s: the result is expected to be an integer, not None.' % custom_label\n            _LOGGER.debug(msg)\n            raise ValueError(msg)\n        super().__init__(submit_timestamp, metric_id, value, dist_metric, custom_label)", "docstring": "The Distribution Metric in ready-to-publish format.\n\nArgs:\ndist_metric (object): distribution metric object from MetricResult\nsubmit_timestamp (float): date-time of saving metric to database\nmetric_id (uuid): unique id to identify test run", "source": "github-repos"}
{"code": "def predict_features(self, df_features, df_target, idx=0, **kwargs):\n        \n        X = df_features.values\n        y = df_target.values\n        clf = ard(compute_score=True)\n        clf.fit(X, y.ravel())\n\n        return np.abs(clf.coef_)", "docstring": "For one variable, predict its neighbouring nodes.\n\nArgs:\ndf_features (pandas.DataFrame):\ndf_target (pandas.Series):\nidx (int): (optional) for printing purposes\nkwargs (dict): additional options for algorithms\n\nReturns:\nlist: scores of each feature relatively to the target", "source": "juraj-google-style"}
{"code": "def get_angle(self, i: int, j: int, k: int) -> float:\n        \n        v1 = self[i].coords - self[j].coords\n        v2 = self[k].coords - self[j].coords\n        return get_angle(v1, v2, units=\"degrees\")", "docstring": "Returns angle specified by three sites.\n\nArgs:\ni: Index of first site.\nj: Index of second site.\nk: Index of third site.\n\nReturns:\nAngle in degrees.", "source": "juraj-google-style"}
{"code": "def getall(self):\n    interfaces_re = re.compile('(?<=^interface\\\\s)([Et|Po].+)$', re.M)\n    response = dict()\n    for name in interfaces_re.findall(self.config):\n        interface = self.get(name)\n        if interface:\n            response[name] = interface\n    return response", "docstring": "Returns a dict object to all Switchports\n\nThis method will return all of the configured switchports as a\ndictionary object keyed by the interface identifier.\n\nReturns:\nA Python dictionary object that represents all configured\nswitchports in the current running configuration", "source": "codesearchnet"}
{"code": "def convert(self, vroot, entry_variables):\n        \n\n        for converter in self.converters:\n            vroot = converter.convert(vroot, entry_variables)\n        return vroot", "docstring": "Convert a given graph.\n\nConvert a given graph using the `converters` in the order of the registeration, i.e., sequentially.\n\nArgs:\nvroot (:obj:`Variable`): NNabla Variable\nentry_variables (:obj:`Variable`): Entry variable from which the conversion starts.", "source": "juraj-google-style"}
{"code": "def remove_item(self, **kwargs):\n        \n        path = self._get_id_path('remove_item')\n        kwargs.update({'session_id': self.session_id})\n\n        payload = {\n            'media_id': kwargs.pop('media_id', None), \n        }\n\n        response = self._POST(path, kwargs, payload)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Delete movies from a list that the user created.\n\nA valid session id is required.\n\nArgs:\nmedia_id: A movie id.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def assign_group_v2(group_assignment, device_index, base_key):\n    group_size, group_key = gen_collective_ops.collective_assign_group_v2(group_assignment=group_assignment, device_index=device_index, base_key=base_key)\n    return (group_size, group_key)", "docstring": "Assign group key based on group_assignment.\n\nArgs:\ngroup_assignment: a 2 dimensional integer Tensor that encodes which devices\nbelong to the same group. The values are indices of the devices within 0\nto number of devices.\ndevice_index: integer for the index of the current device\nbase_key: integer to offset the resulted group_key. The base key shall be\nunique for different values of group_assignment in the same tf.function.\nNotes: The device_index argument must be consistent with the index of the\ndevice of this Op in the device assignment list. The behavior of this Op is\nundefined if they are inconsistent.\n\nReturns:\ngroup_size, group_key: The group size and group key for the current device.", "source": "github-repos"}
{"code": "def div(x, y, name=None):\n    return _div_python2(x, y, name)", "docstring": "Divides x / y elementwise (using Python 2 division operator semantics).\n\n@compatibility(TF2)\nThis function is deprecated in TF2. Prefer using the Tensor division operator,\n`tf.divide`, or `tf.math.divide`, which obey the Python 3 division operator\nsemantics.\n@end_compatibility\n\n\nThis function divides `x` and `y`, forcing Python 2 semantics. That is, if `x`\nand `y` are both integers then the result will be an integer. This is in\ncontrast to Python 3, where division with `/` is always a float while division\nwith `//` is always an integer.\n\nArgs:\nx: `Tensor` numerator of real numeric type.\ny: `Tensor` denominator of real numeric type.\nname: A name for the operation (optional).\n\nReturns:\n`x / y` returns the quotient of x and y.", "source": "github-repos"}
{"code": "def get_dataset(dataset='s2l1c'):\n    if (dataset == 's2l1c'):\n        search_string = os.path.join(DIR_DATA, dataset, '**', '*_B??.jp2')\n        files = glob.glob(search_string, recursive=True)\n        if (not files):\n            raise IOError(f'Could not find raster files of the s2l1c dataset. Search string: {search_string}')\n        basename_splitted = [pth.replace('.jp2', '').split('_')[(- 2):] for pth in files]\n        dset = {'raster_files': files, 'raster_bands': [ele[1] for ele in basename_splitted], 'raster_times': [ele[0] for ele in basename_splitted], 'vector_file': os.path.join(DIR_DATA, 's2l1c', 's2l1c_ref.gpkg'), 'vector_file_osm': os.path.join(DIR_DATA, 's2l1c', 'gis_osm_landuse-water_a_free_1_area-10000-to-500000.gpkg')}\n    elif (dataset == 'lsts'):\n        search_string = os.path.join(DIR_DATA, dataset, '**', '*.tif')\n        files = glob.glob(search_string, recursive=True)\n        if (not files):\n            raise IOError(f'Could not find raster files of the lsts dataset. Search string: {search_string}')\n        basename_splitted = [os.path.basename(pth).replace('.tif', '').split('_') for pth in files]\n        dset = {'raster_files': files, 'raster_bands': [ele[1] for ele in basename_splitted], 'raster_times': [ele[0][9:16] for ele in basename_splitted]}\n    return dset", "docstring": "Get a specific sampledata to play around.\n\nSo far the following sampledata exist:\n\n* 's2l1c': One Sentinel-2 Level 1C scene with a reference dataset.\n* 'lsts': A time series of 105 Landsat scenes each with the bands b3 (red), b4 (nir), b5 (swir1) and fmask.\n\nKeyword Arguments:\ndataset {str} -- The name of the dataset (default: {'s2l1c'}).\n\nReturns:\n[dict] -- A dictionary with paths and information about the sampledata.", "source": "codesearchnet"}
{"code": "def get_user_groups(self, user):\n    self.project_service.set_auth(self._token_project)\n    return self.project_service.get_user_groups(user)", "docstring": "Get user's group memberships.\n\nArgs:\nuser (string): User name.\n\nReturns:\n(list): User's groups.\n\nRaises:\nrequests.HTTPError on failure.", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: List[torch.Tensor], patch_height: Optional[int]=None, patch_width: Optional[int]=None, prompt_depth: Optional[torch.Tensor]=None) -> List[torch.Tensor]:\n    if not isinstance(hidden_states, (tuple, list)):\n        raise TypeError('hidden_states should be a tuple or list of tensors')\n    if len(hidden_states) != len(self.config.neck_hidden_sizes):\n        raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.')\n    hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)\n    features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]\n    output = self.fusion_stage(features, prompt_depth=prompt_depth)\n    return output", "docstring": "Args:\nhidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):\nList of hidden states from the backbone.", "source": "github-repos"}
{"code": "def _HasId(self, schedule, entity_id):\n    try:\n        self._GetById(schedule, entity_id)\n        has = True\n    except KeyError:\n        has = False\n    return has", "docstring": "Check if the schedule has an entity with the given id.\n\nArgs:\nschedule: The transitfeed.Schedule instance to look in.\nentity_id: The id of the entity.\n\nReturns:\nTrue if the schedule has an entity with the id or False if not.", "source": "codesearchnet"}
{"code": "def __init__(self, children: Optional[List['AbstractSyntaxTree']]=None) -> None:\n    self.data_type: Optional[_fhir_path_data_types.FhirPathDataType] = None\n    self.parent = None\n    self._children = children\n    for c in self._children or []:\n        c.parent = weakref.proxy(self)", "docstring": "Initializes an `AbstractSyntaxTree` with an optional list of children.\n\nNote that the `parent` property is set for children at the time their parent\nis initialized. It is set as a weak reference to avoid retain cycles.\n\nArgs:\nchildren: The optional list of children belonging to this node.", "source": "github-repos"}
{"code": "def new(self, index=None):\n    if (index is None):\n        try:\n            return next(self.select(New, None, False))\n        except StopIteration:\n            raise NoSuchAnnotation\n    else:\n        for e in self.select(New, None, False):\n            return e[index]\n        raise NoSuchAnnotation", "docstring": "Get the new corrected annotation.\n\nThis returns only one annotation if multiple exist, use `index` to select another in the sequence.\n\nReturns:\nan annotation element (:class:`AbstractElement`)\n\nRaises:\n:class:`NoSuchAnnotation`", "source": "codesearchnet"}
{"code": "def call_with_captures(self, args, kwargs, captures):", "docstring": "Calls this AtomicFunction with captures as defined by its FunctionType.\n\nArgs:\nargs: Tuple containing positional arguments\nkwargs: Dict containing keyword arguments\ncaptures: Tuple of tensors supplying captured tensor values.\n\nReturns:\nA structured output value based on the inputs.", "source": "github-repos"}
{"code": "def add_keywords_from_list(self, keyword_list):\n        \n        if not isinstance(keyword_list, list):\n            raise AttributeError(\"keyword_list should be a list\")\n\n        for keyword in keyword_list:\n            self.add_keyword(keyword)", "docstring": "To add keywords from a list\n\nArgs:\nkeyword_list (list(str)): List of keywords to add\n\nExamples:\n>>> keyword_processor.add_keywords_from_list([\"java\", \"python\"]})\nRaises:\nAttributeError: If `keyword_list` is not a list.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n               offsets,\n               max_values_count,\n               max_values_size):\n    \n    self._offsets = offsets\n    self._max_values_count = max_values_count\n    self._max_values_size = max_values_size", "docstring": "Constructor.\n\nArgs:\noffsets: offsets for each input file to start from as list of ints.\nmax_values_count: maximum number of values to yield for a single value at\na time. Ignored if -1.\nmax_values_size: maximum total size of yielded values.  Ignored if -1", "source": "juraj-google-style"}
{"code": "def simple_generate_batch(cls, create, size, **kwargs):\n        \n        strategy = enums.CREATE_STRATEGY if create else enums.BUILD_STRATEGY\n        return cls.generate_batch(strategy, size, **kwargs)", "docstring": "Generate a batch of instances.\n\nThese instances will be either 'built' or 'created'.\n\nArgs:\nsize (int): the number of instances to generate\ncreate (bool): whether to 'build' or 'create' the instances.\n\nReturns:\nobject list: the generated instances", "source": "juraj-google-style"}
{"code": "def get(cls, issue_type):\n        \n        if isinstance(issue_type, str):\n            obj = getattr(db, cls.__name__).find_one(cls.issue_type == issue_type)\n\n        elif isinstance(issue_type, int):\n            obj = getattr(db, cls.__name__).find_one(cls.issue_type_id == issue_type)\n\n        elif isinstance(issue_type, cls):\n            return issue_type\n\n        else:\n            obj = None\n\n        if not obj:\n            obj = cls()\n            obj.issue_type = issue_type\n\n            db.session.add(obj)\n            db.session.commit()\n            db.session.refresh(obj)\n\n        return obj", "docstring": "Returns the IssueType object for `issue_type`. If no existing object was found, a new type will\nbe created in the database and returned\n\nArgs:\nissue_type (str,int,IssueType): Issue type name, id or class\n\nReturns:\n:obj:`IssueType`", "source": "juraj-google-style"}
{"code": "def bind(self, attribute, cls, buffer, fmt, *, offset=0, stride=0, divisor=0, normalize=False) -> None:\n    self.mglo.bind(attribute, cls, buffer.mglo, fmt, offset, stride, divisor, normalize)", "docstring": "Bind individual attributes to buffers.\n\nArgs:\nlocation (int): The attribute location.\ncls (str): The attribute class. Valid values are ``f``, ``i`` or ``d``.\nbuffer (Buffer): The buffer.\nformat (str): The buffer format.\n\nKeyword Args:\noffset (int): The offset.\nstride (int): The stride.\ndivisor (int): The divisor.\nnormalize (bool): The normalize parameter, if applicable.", "source": "codesearchnet"}
{"code": "def getConstraint(self, name):\n        \n        return lock_and_call(\n            lambda: Constraint(self._impl.getConstraint(name)),\n            self._lock\n        )", "docstring": "Get the constraint with the corresponding name.\n\nArgs:\nname: Name of the constraint to be found.\n\nRaises:\nTypeError: if the specified constraint does not exist.", "source": "juraj-google-style"}
{"code": "def _CheckLocation(self, file_entry, search_depth):\n    \n    if self._location_segments is None:\n      return False\n\n    if search_depth < 0 or search_depth > self._number_of_location_segments:\n      return False\n\n    \n    \n    if search_depth == 0:\n      segment_name = ''\n    else:\n      segment_name = self._location_segments[search_depth - 1]\n\n      if self._is_regex:\n        if isinstance(segment_name, py2to3.STRING_TYPES):\n          \n          \n          flags = re.DOTALL | re.UNICODE\n          if not self._is_case_sensitive:\n            flags |= re.IGNORECASE\n\n          try:\n            segment_name = r'^{0:s}$'.format(segment_name)\n            segment_name = re.compile(segment_name, flags=flags)\n          except sre_constants.error:\n            \n            return False\n\n          self._location_segments[search_depth - 1] = segment_name\n\n      elif not self._is_case_sensitive:\n        segment_name = segment_name.lower()\n        self._location_segments[search_depth - 1] = segment_name\n\n    if search_depth > 0:\n      if self._is_regex:\n        if not segment_name.match(file_entry.name):  \n          return False\n\n      elif self._is_case_sensitive:\n        if segment_name != file_entry.name:\n          return False\n\n      elif segment_name != file_entry.name.lower():\n        return False\n\n    return True", "docstring": "Checks the location find specification.\n\nArgs:\nfile_entry (FileEntry): file entry.\nsearch_depth (int): number of location path segments to compare.\n\nReturns:\nbool: True if the file entry matches the find specification, False if not.", "source": "juraj-google-style"}
{"code": "def from_row_limits(cls, row_limits, validate=True, dtype=None, dtype_hint=None):\n    if not isinstance(validate, bool):\n        raise TypeError('validate must have type bool')\n    with ops.name_scope(None, 'RowPartitionFromRowLimits', [row_limits]):\n        row_limits = cls._convert_row_partition(row_limits, 'row_limits', dtype_hint=dtype_hint, dtype=dtype)\n        row_limits.shape.assert_has_rank(1)\n        if validate:\n            msg = 'Arguments to from_row_limits do not form a valid RaggedTensor'\n            checks = [check_ops.assert_rank(row_limits, 1, message=msg), check_ops.assert_non_negative(row_limits[:1], message=msg), _assert_monotonic_increasing(row_limits, message=msg)]\n            row_limits = control_flow_ops.with_dependencies(checks, row_limits)\n        zero = array_ops.zeros([1], row_limits.dtype)\n        row_splits = array_ops.concat([zero, row_limits], axis=0)\n        return cls(row_splits=row_splits, internal=_row_partition_factory_key)", "docstring": "Creates a `RowPartition` with rows partitioned by `row_limits`.\n\nEquivalent to: `from_row_splits(values, concat([0, row_limits], axis=0))`.\n\nArgs:\nrow_limits: A 1-D integer tensor with shape `[nrows]`.  Must be sorted in\nascending order.\nvalidate: If true, then use assertions to check that the arguments form a\nvalid `RowPartition`.\ndtype: Optional dtype for the RowPartition. If missing, the type\nis inferred from the type of `row_limits`, dtype_hint, or tf.int64.\ndtype_hint: Optional dtype for the RowPartition, used when dtype\nis None. In some cases, a caller may not have a dtype in mind when\nconverting to a tensor, so dtype_hint can be used as a soft preference.\nIf the conversion to `dtype_hint` is not possible, this argument has no\neffect.\n\nReturns:\nA `RowPartition`.", "source": "github-repos"}
{"code": "def attribute(*args, **kw):\n    \n    return operator(kind=Operator.Type.ATTRIBUTE, *args, **kw)", "docstring": "Registers a new attribute only operator function in the test engine.\n\nArguments:\n*args: variadic arguments.\n**kw: variadic keyword arguments.\n\nReturns:\nfunction", "source": "juraj-google-style"}
{"code": "def with_min_occurrence(self, min_occurrence):\n    self._options['min_occurrence'] = min_occurrence\n    return self", "docstring": "Only show profiler nodes including no less than 'min_occurrence' graph nodes.\n\nA \"node\" means a profiler output node, which can be a python line\n(code view), an operation type (op view), or a graph node\n(graph/scope view). A python line includes all graph nodes created by that\nline, while an operation type includes all graph nodes of that type.\n\nArgs:\nmin_occurrence: Only show nodes including no less than this.\nReturns:\nself", "source": "github-repos"}
{"code": "def _CreateFolder(self, parent, name, visible=True, description=None):\n    folder = ET.SubElement(parent, 'Folder')\n    name_tag = ET.SubElement(folder, 'name')\n    name_tag.text = name\n    if (description is not None):\n        desc_tag = ET.SubElement(folder, 'description')\n        desc_tag.text = description\n    if (not visible):\n        visibility = ET.SubElement(folder, 'visibility')\n        visibility.text = '0'\n    return folder", "docstring": "Create a KML Folder element.\n\nArgs:\nparent: The parent ElementTree.Element instance.\nname: The folder name as a string.\nvisible: Whether the folder is initially visible or not.\ndescription: A description string or None.\n\nReturns:\nThe folder ElementTree.Element instance.", "source": "codesearchnet"}
{"code": "def _ReadTableHeader(self, file_object, table_header_offset):\n    data_type_map = self._GetDataTypeMap('keychain_table_header')\n    (table_header, _) = self._ReadStructureFromFileObject(file_object, table_header_offset, data_type_map)\n    return table_header", "docstring": "Reads the table header.\n\nArgs:\nfile_object (file): file-like object.\ntable_header_offset (int): offset of the tables header relative to\nthe start of the file.\n\nReturns:\nkeychain_table_header: table header.\n\nRaises:\nParseError: if the table header cannot be read.", "source": "codesearchnet"}
{"code": "def __init__(self, datafile, logger, error_handler):\n    \n\n    config = json.loads(datafile)\n    self.logger = logger\n    self.error_handler = error_handler\n    self.version = config.get('version')\n    if self.version not in SUPPORTED_VERSIONS:\n      raise exceptions.UnsupportedDatafileVersionException(\n        enums.Errors.UNSUPPORTED_DATAFILE_VERSION.format(self.version)\n      )\n\n    self.account_id = config.get('accountId')\n    self.project_id = config.get('projectId')\n    self.revision = config.get('revision')\n    self.groups = config.get('groups', [])\n    self.experiments = config.get('experiments', [])\n    self.events = config.get('events', [])\n    self.attributes = config.get('attributes', [])\n    self.audiences = config.get('audiences', [])\n    self.typed_audiences = config.get('typedAudiences', [])\n    self.feature_flags = config.get('featureFlags', [])\n    self.rollouts = config.get('rollouts', [])\n    self.anonymize_ip = config.get('anonymizeIP', False)\n    self.bot_filtering = config.get('botFiltering', None)\n\n    \n    self.group_id_map = self._generate_key_map(self.groups, 'id', entities.Group)\n    self.experiment_key_map = self._generate_key_map(self.experiments, 'key', entities.Experiment)\n    self.event_key_map = self._generate_key_map(self.events, 'key', entities.Event)\n    self.attribute_key_map = self._generate_key_map(self.attributes, 'key', entities.Attribute)\n\n    self.audience_id_map = self._generate_key_map(self.audiences, 'id', entities.Audience)\n\n    \n    \n    for typed_audience in self.typed_audiences:\n      typed_audience['conditions'] = json.dumps(typed_audience['conditions'])\n    typed_audience_id_map = self._generate_key_map(self.typed_audiences, 'id', entities.Audience)\n    self.audience_id_map.update(typed_audience_id_map)\n\n    self.rollout_id_map = self._generate_key_map(self.rollouts, 'id', entities.Layer)\n    for layer in self.rollout_id_map.values():\n      for experiment in layer.experiments:\n        self.experiment_key_map[experiment['key']] = entities.Experiment(**experiment)\n\n    self.audience_id_map = self._deserialize_audience(self.audience_id_map)\n    for group in self.group_id_map.values():\n      experiments_in_group_key_map = self._generate_key_map(group.experiments, 'key', entities.Experiment)\n      for experiment in experiments_in_group_key_map.values():\n        experiment.__dict__.update({\n          'groupId': group.id,\n          'groupPolicy': group.policy\n        })\n      self.experiment_key_map.update(experiments_in_group_key_map)\n\n    self.experiment_id_map = {}\n    self.variation_key_map = {}\n    self.variation_id_map = {}\n    self.variation_variable_usage_map = {}\n    for experiment in self.experiment_key_map.values():\n      self.experiment_id_map[experiment.id] = experiment\n      self.variation_key_map[experiment.key] = self._generate_key_map(\n        experiment.variations, 'key', entities.Variation\n      )\n      self.variation_id_map[experiment.key] = {}\n      for variation in self.variation_key_map.get(experiment.key).values():\n        self.variation_id_map[experiment.key][variation.id] = variation\n        self.variation_variable_usage_map[variation.id] = self._generate_key_map(\n          variation.variables, 'id', entities.Variation.VariableUsage\n        )\n\n    self.feature_key_map = self._generate_key_map(self.feature_flags, 'key', entities.FeatureFlag)\n\n    \n    \n    self.experiment_feature_map = {}\n    for feature in self.feature_key_map.values():\n      feature.variables = self._generate_key_map(feature.variables, 'key', entities.Variable)\n\n      for exp_id in feature.experimentIds:\n        \n        self.experiment_feature_map[exp_id] = [feature.id]\n\n        experiment_in_feature = self.experiment_id_map[exp_id]\n        \n        if experiment_in_feature.groupId:\n          feature.groupId = experiment_in_feature.groupId\n          \n          break\n\n    \n    \n    \n    \n    self.forced_variation_map = {}", "docstring": "ProjectConfig init method to load and set project config data.\n\nArgs:\ndatafile: JSON string representing the project.\nlogger: Provides a log message to send log messages to.\nerror_handler: Provides a handle_error method to handle exceptions.", "source": "juraj-google-style"}
{"code": "def call(self, sequence_output, cell_index, cell_mask, allow_empty_column_selection) -> tf.Tensor:\n    token_logits = tf.einsum('bsj,j->bs', sequence_output, self.column_output_weights) + self.column_output_bias\n    cell_logits, cell_logits_index = reduce_mean(token_logits, cell_index)\n    column_index = cell_index.project_inner(cell_logits_index)\n    column_logits, out_index = reduce_sum(cell_logits * cell_mask, column_index)\n    cell_count, _ = reduce_sum(cell_mask, column_index)\n    column_logits /= cell_count + EPSILON_ZERO_DIVISION\n    is_padding = tf.logical_and(cell_count < 0.5, tf.not_equal(out_index.indices, 0))\n    column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * tf.cast(is_padding, tf.float32)\n    if not allow_empty_column_selection:\n        column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * tf.cast(tf.equal(out_index.indices, 0), tf.float32)\n    return column_logits", "docstring": "Computes the column logits.\n\nArgs:\nsequence_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):\nAlso known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the\nmodel.\ncell_index (`ProductIndexMap`):\nIndex that groups tokens into cells.\ncell_mask (`tf.Tensor` of shape `(batch_size, max_num_rows * max_num_cols)`):\nMask for cells that exist in the table (i.e. that are not padding).\nallow_empty_column_selection (`bool`):\nWhether to allow not to select any column\n\nReturns:\ncolumn_logits (`tf.Tensor`of shape `(batch_size, max_num_cols)`): Tensor containing the column logits for\nevery example in the batch.", "source": "github-repos"}
{"code": "def generate_password(length=32):\n    \n    return ''.join(random.SystemRandom().choice(string.ascii_letters + '!@", "docstring": "Generate a cryptographically secure random string to use for passwords\n\nArgs:\nlength (int): Length of password, defaults to 32 characters\n\nReturns:\nRandomly generated string", "source": "juraj-google-style"}
{"code": "def save_counter(self):\n    return self.checkpointer().save_counter", "docstring": "An integer variable numbering the checkpoint events.\n\nThis is maintained by the underlying tf.train.Checkpoint object employed by\nAsyncCheckpoint class. The number starts at 0 and gets incremented for each\ncheckpoint event.\n\nReturns:\nThe save counter variable.", "source": "github-repos"}
{"code": "def generate_hashfile(directory, blacklist=_BLACKLIST):\n    \n    checksums = generate_checksums(directory, blacklist)\n\n    out = \"\"\n    for fn, checksum in sorted(checksums.items()):\n        out += \"%s %s\\n\" % (checksum, fn)\n\n    return out", "docstring": "Compute checksum for each file in `directory`, with exception of files\nspecified in `blacklist`.\n\nArgs:\ndirectory (str): Absolute or relative path to the directory.\nblacklist (list/set/tuple): List of blacklisted filenames. Only\nfilenames are checked, not paths!\n\nReturns:\nstr: Content of hashfile as it is specified in ABNF specification for \\\nproject.", "source": "juraj-google-style"}
{"code": "def handle_worker_messages(self, timeout):\n    msgs = self.messaging_backend.popn(self.incoming_mailbox, n=20)\n    for msg in msgs:\n        self.handle_single_message(msg)", "docstring": "Read messages that are placed in self.incoming_mailbox,\nand then update the job states corresponding to each message.\n\nArgs:\ntimeout: How long to wait for an incoming message, if the mailbox is empty right now.\n\nReturns: None", "source": "codesearchnet"}
{"code": "def load_flag_values(self, flags=None):\n    \n    if flags is None:\n      flags = self._flags\n    for keyval in flags.config_value:\n      k, v = keyval.split('=', 1)\n      v = self._modules['yaml'].load(v) if isinstance(v, str) else v\n\n      \n      k = k.decode() if isinstance(k, bytes) else k\n      v = v.decode() if isinstance(v, bytes) else v\n\n      self._flag_values.setdefault(k, v)", "docstring": "Load flag values given from command line flags.\n\nArgs:\nflags: An argparse Namespace containing the command line flags.", "source": "juraj-google-style"}
{"code": "def get_template_edit_url(self, template_id):\n        \n        request = self._get_request()\n        return request.get(self.EMBEDDED_TEMPLATE_EDIT_URL + template_id)", "docstring": "Retrieves a embedded template for editing\n\nRetrieves an embedded object containing a template url that can be opened in an iFrame.\n\nArgs:\n\ntemplate_id (str): The id of the template to get a signature url for\n\nReturns:\nAn Embedded object", "source": "juraj-google-style"}
{"code": "def get_substring_idxs(substr, string):\n    \n    return [match.start() for match in re.finditer(substr, string)]", "docstring": "Return a list of indexes of substr. If substr not found, list is\nempty.\n\nArguments:\nsubstr (str): Substring to match.\nstring (str): String to match in.\n\nReturns:\nlist of int: Start indices of substr.", "source": "juraj-google-style"}
{"code": "def run(self):\n    for aws_region in AWS_REGIONS:\n        self.log.debug('Checking trails for {}/{}'.format(self.account.account_name, aws_region))\n        ct = self.session.client('cloudtrail', region_name=aws_region)\n        trails = ct.describe_trails()\n        if (len(trails['trailList']) == 0):\n            if (aws_region == self.global_ct_region):\n                self.create_cloudtrail(aws_region)\n        else:\n            for trail in trails['trailList']:\n                if (trail['Name'] in ('Default', self.trail_name)):\n                    if (not trail['IsMultiRegionTrail']):\n                        if ((trail['Name'] == self.trail_name) and (self.global_ct_region == aws_region)):\n                            ct.update_trail(Name=trail['Name'], IncludeGlobalServiceEvents=True, IsMultiRegionTrail=True)\n                            auditlog(event='cloudtrail.update_trail', actor=self.ns, data={'trailName': trail['Name'], 'account': self.account.account_name, 'region': aws_region, 'changes': [{'setting': 'IsMultiRegionTrail', 'oldValue': False, 'newValue': True}]})\n                        else:\n                            ct.delete_trail(name=trail['Name'])\n                            auditlog(event='cloudtrail.delete_trail', actor=self.ns, data={'trailName': trail['Name'], 'account': self.account.account_name, 'region': aws_region, 'reason': 'Incorrect region, name or not multi-regional'})\n                    elif (trail['HomeRegion'] == aws_region):\n                        if ((self.global_ct_region != aws_region) or (trail['Name'] == 'Default')):\n                            ct.delete_trail(Name=trail['Name'])\n                            auditlog(event='cloudtrail.delete_trail', actor=self.ns, data={'trailName': trail['Name'], 'account': self.account.account_name, 'region': aws_region, 'reason': 'Incorrect name or region for multi-region trail'})\n        trails = ct.describe_trails()\n        for trail in trails['trailList']:\n            if ((trail['Name'] == self.trail_name) and (trail['HomeRegion'] == aws_region)):\n                self.validate_trail_settings(ct, aws_region, trail)", "docstring": "Configures and enables a CloudTrail trail and logging on a single AWS Account.\n\nHas the capability to create both single region and multi-region trails.\n\nWill automatically create SNS topics, subscribe to SQS queues and turn on logging for the account in question,\nas well as reverting any manual changes to the trails if applicable.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def get_collection(self, lang=None, task=None):\n    if lang:\n        id = '{}{}'.format(Downloader.LANG_PREFIX, lang)\n    elif task:\n        id = '{}{}'.format(Downloader.TASK_PREFIX, task)\n    else:\n        raise ValueError('You should pass either the task or the lang')\n    try:\n        return self.info(id)\n    except ValueError as e:\n        if lang:\n            raise LanguageNotSupported('Language {} is not supported'.format(id))\n        if task:\n            raise TaskNotSupported('Task {} is not supported'.format(id))", "docstring": "Return the collection that represents a specific language or task.\n\nArgs:\nlang (string): Language code.\ntask (string): Task name.", "source": "codesearchnet"}
{"code": "def remove_duplicate_sg(security_groups):\n    for (each_sg, duplicate_sg_name) in SECURITYGROUP_REPLACEMENTS.items():\n        if ((each_sg in security_groups) and (duplicate_sg_name in security_groups)):\n            LOG.info('Duplicate SG found. Removing %s in favor of %s.', duplicate_sg_name, each_sg)\n            security_groups.remove(duplicate_sg_name)\n    return security_groups", "docstring": "Removes duplicate Security Groups that share a same name alias\n\nArgs:\nsecurity_groups (list): A list of security group id to compare against SECURITYGROUP_REPLACEMENTS\n\nReturns:\nsecurity_groups (list): A list of security groups with duplicate aliases removed", "source": "codesearchnet"}
{"code": "def numeric_columns(self, include_bool=True):\n    columns = []\n    for (col, dtype) in zip(self.columns, self.dtypes):\n        if (is_numeric_dtype(dtype) and (include_bool or ((not include_bool) and (dtype != np.bool_)))):\n            columns.append(col)\n    return columns", "docstring": "Returns the numeric columns of the Manager.\n\nReturns:\nList of index names.", "source": "codesearchnet"}
{"code": "def rematch_entry(envkernel, gamma = 0.1, threshold = 1e-6):\n    \n    n, m = envkernel.shape\n    K = np.exp(-(1 - envkernel) / gamma)\n\n    \n    u = np.ones((n,)) / n\n    v = np.ones((m,)) / m\n\n    en = np.ones((n,)) / float(n)\n    em = np.ones((m,)) / float(m)\n\n    Kp = (1 / en).reshape(-1, 1) * K\n\n    \n    itercount = 0\n    error = 1\n    while (error > threshold):\n        uprev = u\n        vprev = v\n        v = np.divide(em, np.dot(K.T, u))\n        u = np.divide(en, np.dot(K, v))\n\n        \n        if itercount % 5:\n            error = np.sum((u - uprev) ** 2) / np.sum((u) ** 2) + np.sum((v - vprev) ** 2) / np.sum((v) ** 2)\n        itercount += 1\n\n    \n    \n    \n    pity = np.multiply( np.multiply(K, u.reshape((-1,1))) , v)\n\n    glosim = np.sum( np.multiply( pity, envkernel))\n\n    return glosim", "docstring": "Compute the global similarity between two structures A and B.\nIt uses the Sinkhorn algorithm as reported in:\nPhys. Chem. Chem. Phys., 2016, 18, p. 13768\nArgs:\nenvkernel: NxM matrix of structure A with\nN and structure B with M atoms\ngamma: parameter to control between best match gamma = 0\nand average kernel gamma = inf.", "source": "juraj-google-style"}
{"code": "def build(cls, seqs: Iterable[int], uid: bool=False) -> 'SequenceSet':\n    seqs_list = sorted(set(seqs))\n    groups: List[Union[(int, Tuple[(int, int)])]] = []\n    group: Union[(int, Tuple[(int, int)])] = seqs_list[0]\n    for i in range(1, len(seqs_list)):\n        group_i = seqs_list[i]\n        if isinstance(group, int):\n            if (group_i == (group + 1)):\n                group = (group, group_i)\n            else:\n                groups.append(group)\n                group = group_i\n        elif isinstance(group, tuple):\n            if (group_i == (group[1] + 1)):\n                group = (group[0], group_i)\n            else:\n                groups.append(group)\n                group = group_i\n    groups.append(group)\n    return SequenceSet(groups, uid)", "docstring": "Build a new sequence set that contains the given values using as\nfew groups as possible.\n\nArgs:\nseqs: The sequence values to build.\nuid: True if the sequences refer to message UIDs.", "source": "codesearchnet"}
{"code": "def match_global_phase(a: np.ndarray,\n                       b: np.ndarray\n                       ) -> Tuple[np.ndarray, np.ndarray]:\n    \n\n    \n    if a.shape != b.shape:\n        return a, b\n\n    \n    k = max(np.ndindex(*a.shape), key=lambda t: abs(b[t]))\n\n    def dephase(v):\n        r = np.real(v)\n        i = np.imag(v)\n\n        \n        if i == 0:\n            return -1 if r < 0 else 1\n        if r == 0:\n            return 1j if i < 0 else -1j\n\n        return np.exp(-1j * np.arctan2(i, r))\n\n    \n    return a * dephase(a[k]), b * dephase(b[k])", "docstring": "Phases the given matrices so that they agree on the phase of one entry.\n\nTo maximize precision, the position with the largest entry from one of the\nmatrices is used when attempting to compute the phase difference between\nthe two matrices.\n\nArgs:\na: A numpy array.\nb: Another numpy array.\n\nReturns:\nA tuple (a', b') where a' == b' implies a == b*exp(i t) for some t.", "source": "juraj-google-style"}
{"code": "def _find_index_of_defining_frame(tb):\n    size = len(tb)\n    filenames = [frame.filename for frame in tb]\n    for idx, filename in enumerate(reversed(filenames)):\n        is_framework = _is_framework_filename(filename)\n        if not is_framework:\n            return size - idx - 1\n    return 0", "docstring": "Return index in op.traceback with first 'useful' frame.\n\nThis method reads through the stack stored in op.traceback looking for the\ninnermost frame which (hopefully) belongs to the caller.  It accomplishes this\nby rejecting frames deemed to be part of the TensorFlow framework (by\npattern matching the filename).\n\nArgs:\ntb: A list of traceback frames (as from Operation.traceback).\n\nReturns:\nInteger index into op.traceback where the first non-TF file was found\n(innermost to outermost), or 0 (for the outermost stack frame) if all files\ncame from TensorFlow.", "source": "github-repos"}
{"code": "def authentication(self, username, password):\n        \n        _auth_text = '{}:{}'.format(username, password)\n\n        \n        if int(sys.version[0]) > 2:\n            \n            _auth_bin = base64.encodebytes(_auth_text.encode())\n            _auth = _auth_bin.decode()\n            _auth = _auth.replace('\\n', '')\n            self._auth = _auth\n        else:\n            \n            _auth = base64.encodestring(_auth_text)\n            self._auth = str(_auth).replace('\\n', '')\n\n        _LOGGER.debug('Autentication string is: {}:***'.format(username))", "docstring": "Configures the user authentication for eAPI\n\nThis method configures the username and password combination to use\nfor authenticating to eAPI.\n\nArgs:\nusername (str): The username to use to authenticate the eAPI\nconnection with\npassword (str): The password in clear text to use to authenticate\nthe eAPI connection with", "source": "juraj-google-style"}
{"code": "def list_vnets(access_token, subscription_id):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Network/', '/virtualNetworks?api-version=', NETWORK_API])\n    return do_get(endpoint, access_token)", "docstring": "List the VNETs in a subscription\t.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body of VNets list with properties.", "source": "codesearchnet"}
{"code": "def reserve(self, *args, **kwargs):\n    data = self.get_data('floating_ips/', type=POST, params={'region': self.region_slug})\n    if data:\n        self.ip = data['floating_ip']['ip']\n        self.region = data['floating_ip']['region']\n    return self", "docstring": "Creates a FloatingIP in a region without assigning\nit to a specific Droplet.\n\nNote: Every argument and parameter given to this method will be\nassigned to the object.\n\nArgs:\nregion_slug: str - region's slug (e.g. 'nyc3')", "source": "codesearchnet"}
{"code": "def convolution_kernel(self, name='convolution_kernel'):\n    with self._name_scope(name):\n        h = self._ifft(_to_complex(self.spectrum))\n        return math_ops.cast(h, self.dtype)", "docstring": "Convolution kernel corresponding to `self.spectrum`.\n\nThe `D` dimensional DFT of this kernel is the frequency domain spectrum of\nthis operator.\n\nArgs:\nname:  A name to give this `Op`.\n\nReturns:\n`Tensor` with `dtype` `self.dtype`.", "source": "github-repos"}
{"code": "def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, utc=None, box=True, format=None, exact=True, unit=None, infer_datetime_format=False, origin='unix', cache=False):\n    if (not isinstance(arg, DataFrame)):\n        return pandas.to_datetime(arg, errors=errors, dayfirst=dayfirst, yearfirst=yearfirst, utc=utc, box=box, format=format, exact=exact, unit=unit, infer_datetime_format=infer_datetime_format, origin=origin, cache=cache)\n    pandas.to_datetime(pandas.DataFrame(columns=arg.columns), errors=errors, dayfirst=dayfirst, yearfirst=yearfirst, utc=utc, box=box, format=format, exact=exact, unit=unit, infer_datetime_format=infer_datetime_format, origin=origin, cache=cache)\n    return arg._query_compiler.to_datetime()", "docstring": "Convert the arg to datetime format. If not Ray DataFrame, this falls\nback on pandas.\n\nArgs:\nerrors ('raise' or 'ignore'): If 'ignore', errors are silenced.\nPandas blatantly ignores this argument so we will too.\ndayfirst (bool): Date format is passed in as day first.\nyearfirst (bool): Date format is passed in as year first.\nutc (bool): retuns a UTC DatetimeIndex if True.\nbox (bool): If True, returns a DatetimeIndex.\nformat (string): strftime to parse time, eg \"%d/%m/%Y\".\nexact (bool): If True, require an exact format match.\nunit (string, default 'ns'): unit of the arg.\ninfer_datetime_format (bool): Whether or not to infer the format.\norigin (string): Define the reference date.\n\nReturns:\nType depends on input:\n\n- list-like: DatetimeIndex\n- Series: Series of datetime64 dtype\n- scalar: Timestamp", "source": "codesearchnet"}
{"code": "def _UpdateEtag(self, response):\n    \n    etag = response.headers.get('etag', self.etag)\n    etag_updated = self.etag != etag\n    self.etag = etag\n    return etag_updated", "docstring": "Update the etag from an API response.\n\nArgs:\nresponse: HTTP response with a header field.\n\nReturns:\nbool, True if the etag in the response header updated.", "source": "juraj-google-style"}
{"code": "def List(self, request, global_params=None):\n    config = self.GetMethodConfig('List')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "List all `BitbucketServerConfigs` for a given project. This API is experimental.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsBitbucketServerConfigsListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ListBitbucketServerConfigsResponse) The response message.", "source": "github-repos"}
{"code": "def _parse_line(cls, line):\n        \n        try:\n            pkg, rest = line.split(None, 1)\n        except ValueError:\n            rpm = cls._parse_package(line.strip())\n            return rpm\n        rpm = cls._parse_package(pkg)\n        rest = rest.split('\\t')\n        for i, value in enumerate(rest):\n            rpm[cls.SOSREPORT_KEYS[i]] = value\n        return rpm", "docstring": "Helper method for parsing package line with or without SOS report information.\n\nArgs:\nline (str): package line with or without SOS report information\n\nReturns:\ndict: dictionary containing 'name', 'version', 'release' and 'arch' keys plus\nadditionally 'installtime', 'buildtime', 'vendor', 'buildserver', 'pgpsig',\n'pgpsig_short' if these are present.", "source": "juraj-google-style"}
{"code": "def GetPointWithDistanceTraveled(self, shape_dist_traveled):\n    \n    if not self.distance:\n      return None\n    if shape_dist_traveled <= self.distance[0]:\n      return self.points[0]\n    if shape_dist_traveled >= self.distance[-1]:\n      return self.points[-1]\n\n    index = bisect.bisect(self.distance, shape_dist_traveled)\n    (lat0, lng0, dist0) = self.points[index - 1]\n    (lat1, lng1, dist1) = self.points[index]\n\n    \n    \n    \n    \n    \n    \n    \n    ca = shape_dist_traveled - dist0\n    bc = dist1 - shape_dist_traveled\n    ba = bc + ca\n    if ba == 0:\n      \n      \n      return None\n    \n    \n    lat = (lat1 * ca + lat0 * bc) / ba\n    lng = (lng1 * ca + lng0 * bc) / ba\n    return (lat, lng, shape_dist_traveled)", "docstring": "Returns a point on the shape polyline with the input shape_dist_traveled.\n\nArgs:\nshape_dist_traveled: The input shape_dist_traveled.\n\nReturns:\nThe shape point as a tuple (lat, lng, shape_dist_traveled), where lat and\nlng is the location of the shape point, and shape_dist_traveled is an\nincreasing metric representing the distance traveled along the shape.\nReturns None if there is data error in shape.", "source": "juraj-google-style"}
{"code": "def SetStorageProfiler(self, storage_profiler):\n    \n    self._storage_profiler = storage_profiler\n    if self._storage_file:\n      self._storage_file.SetStorageProfiler(storage_profiler)", "docstring": "Sets the storage profiler.\n\nArgs:\nstorage_profiler (StorageProfiler): storage profiler.", "source": "juraj-google-style"}
{"code": "def refresh(self, id_or_uri, timeout=(- 1)):\n    uri = (self._client.build_uri(id_or_uri) + '/refresh')\n    return self._client.update_with_zero_body(uri, timeout=timeout)", "docstring": "The Refresh action reclaims the top-of-rack switches in a logical switch.\n\nArgs:\nid_or_uri:\nCan be either the Logical Switch ID or URI\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.\n\nReturns:\ndict: The Logical Switch", "source": "codesearchnet"}
{"code": "def generate_flat_data(self):\n    all_statements = []\n    all_targets = []\n    self.sequence_sizes_in = []\n    self.sequence_sizes_out = []\n    for _ in six.moves.range(self._batch_size):\n        (length, nest) = self.curriculum_obj.fetch()\n        seq_size_in = self._max_seq_length\n        is_valid_sample = False\n        tries_remaining = 10\n        while (not is_valid_sample):\n            (value, code) = generate_code(length, nest, self._ops)\n            (tokens_in, seq_size_in) = self.tokenize(code, self._max_seq_length, self._token_by_char)\n            (tokens_out, seq_size_out) = self.tokenize(value, self._max_seq_length, self._token_by_char)\n            is_valid_sample = (self._max_seq_length >= seq_size_in)\n            if is_valid_sample:\n                self.sequence_sizes_in.append(seq_size_in)\n                self.sequence_sizes_out.append(seq_size_out)\n            if (tries_remaining == 0):\n                raise ValueError('Could not generate a sample below the allowable maximum, consider reducing either max_length or max_nest.')\n            else:\n                tries_remaining -= 1\n        all_statements += tokens_in\n        all_targets += tokens_out\n    self.flat_data = np.array(all_statements, dtype=np.int64)\n    self.num_tokens = self.flat_data.shape[0]\n    self.flat_targets = np.array(all_targets, dtype=np.int64)\n    self.num_tokens_target = self.flat_targets.shape[0]\n    self.start_token = np.array(self.tokenize([get_start_token()], 1)[0], dtype=np.int64)\n    self.end_token = np.array(self.tokenize([get_end_token()], 1)[0], dtype=np.int64)", "docstring": "Generates batched data in flat numpy arrays.\n\nRaises:\nValueError: When too many generate calls are required.", "source": "codesearchnet"}
{"code": "def request_status(r, detailed=False):\n\t\n\t\n\tbase_string = \"HTTP {r.request.method} {r.request.url}: {r.status_code}\"\n\t\n\tif r.status_code in range(200,99):\n\t\tstring = base_string\n\t\tif detailed is True:\n\t\t\tstring += \" - {r.json()}\"\n\t\telse:\n\t\t\tstring += \" - 👍\"\n\t\treturn string.format(r=r)\n\telse:\n\t\tstring = base_string\n\t\treturn string.format(r=r)", "docstring": "Returns a formatted string about the status, useful for logging.\n\nargs:\nr - takes requests.models.Response", "source": "juraj-google-style"}
{"code": "def add_observer(self, observer, identify_observed=False):\n    if hasattr(observer, '__self__'):\n        result = self._add_bound_method(observer, identify_observed)\n    else:\n        result = self._add_function(observer, identify_observed)\n    return result", "docstring": "Register an observer to observe me.\n\nArgs:\nobserver: The callable to register as an observer.\nidentify_observed: If True, then the observer will get myself\npassed as an additional first argument whenever it is invoked.\nSee ObserverFunction and ObserverBoundMethod to see how this\nworks.\n\nReturns:\nTrue if the observer was added, False otherwise.\n\nThe observing function or method will be called whenever I am called,\nand with the same arguments and keyword arguments.\n\nIf a bound method or function has already been registered as an\nobserver, trying to add it again does nothing. In other words, there is\nno way to sign up an observer to be called back multiple times. This\nwas a conscious design choice which users are invited to complain about\nif there is a compelling use case where this is inconvenient.", "source": "codesearchnet"}
{"code": "def retrieve_clang_version(clang_executable):\n    stderr = open(os.devnull, 'wb')\n    curr_version = run_shell([clang_executable, '--version'], allow_non_zero=True, stderr=stderr)\n    curr_version_split = curr_version.lower().split('clang version ')\n    if len(curr_version_split) > 1:\n        curr_version = curr_version_split[1].split()[0].split('git')\n    if len(curr_version) > 1:\n        print('WARNING: current clang installation is not a release version.\\n')\n    curr_version = curr_version[0]\n    curr_version_int = convert_version_to_int(curr_version)\n    if not curr_version_int:\n        print('WARNING: current clang installation version unknown.\\n')\n        return None\n    print('You have Clang %s installed.\\n' % curr_version)\n    return curr_version", "docstring": "Retrieve installed clang version.\n\nArgs:\nclang_executable: (String) path to clang executable\n\nReturns:\nThe clang version detected.", "source": "github-repos"}
{"code": "def name(self):\n    return ctypes.cast(self.sName, ctypes.c_char_p).value.decode()", "docstring": "Returns the name of the device.\n\nArgs:\nself (JLinkDeviceInfo): the ``JLinkDeviceInfo`` instance\n\nReturns:\nDevice name.", "source": "codesearchnet"}
{"code": "def _GetPathSegmentIndexForValueWeights(self, value_weights):\n    largest_weight = value_weights.GetLargestWeight()\n    if (largest_weight > 0):\n        value_weight_indexes = value_weights.GetIndexesForWeight(largest_weight)\n    else:\n        value_weight_indexes = []\n    if value_weight_indexes:\n        path_segment_index = value_weight_indexes[0]\n    else:\n        path_segment_index = value_weights.GetFirstAvailableIndex()\n    if (path_segment_index is None):\n        raise RuntimeError('No path segment index found.')\n    return path_segment_index", "docstring": "Retrieves the index of the path segment based on value weights.\n\nArgs:\nvalue_weights: the value weights object (instance of _PathSegmentWeights).\n\nReturns:\nAn integer containing the path segment index.\n\nRaises:\nRuntimeError: is no path segment index can be found.", "source": "codesearchnet"}
{"code": "def matches_all(expected):\n\n    def _matches(actual):\n        from hamcrest.core import assert_that as hamcrest_assert\n        from hamcrest.library.collection import contains_inanyorder\n        expected_list = list(expected)\n        hamcrest_assert(actual, contains_inanyorder(*expected_list))\n    return _matches", "docstring": "Matcher used by assert_that to check a set of matchers.\n\nArgs:\nexpected: A list of elements or hamcrest matchers to be used to match\nthe elements of a single PCollection.", "source": "github-repos"}
{"code": "def transform(self, data, data_type='S3Prefix', content_type=None, compression_type=None, split_type=None, job_name=None):\n    local_mode = self.sagemaker_session.local_mode\n    if ((not local_mode) and (not data.startswith('s3:\n        raise ValueError('Invalid S3 URI: {}'.format(data))\n    if (job_name is not None):\n        self._current_job_name = job_name\n    else:\n        base_name = (self.base_transform_job_name or base_name_from_image(self._retrieve_image_name()))\n        self._current_job_name = name_from_base(base_name)\n    if (self.output_path is None):\n        self.output_path = 's3:\n    self.latest_transform_job = _TransformJob.start_new(self, data, data_type, content_type, compression_type, split_type)", "docstring": "Start a new transform job.\n\nArgs:\ndata (str): Input data location in S3.\ndata_type (str): What the S3 location defines (default: 'S3Prefix'). Valid values:\n\n* 'S3Prefix' - the S3 URI defines a key name prefix. All objects with this prefix will be used as\ninputs for the transform job.\n* 'ManifestFile' - the S3 URI points to a single manifest file listing each S3 object to use as\nan input for the transform job.\n\ncontent_type (str): MIME type of the input data (default: None).\ncompression_type (str): Compression type of the input data, if compressed (default: None).\nValid values: 'Gzip', None.\nsplit_type (str): The record delimiter for the input object (default: 'None').\nValid values: 'None', 'Line', 'RecordIO', and 'TFRecord'.\njob_name (str): job name (default: None). If not specified, one will be generated.", "source": "codesearchnet"}
{"code": "def make_processor(self, name, mappings, processor_type, **kwargs):\n        \n        from .processor import Processor\n        if self.processors.get(name):\n            raise LookupError(\"processor has already been created\")\n        if isinstance(mappings, list):\n            mappings = [self.get_rml(item) for item in mappings]\n        else:\n            mappings = [self.get_rml(mappings)]\n        self.processors[name] = Processor[processor_type](mappings, **kwargs)\n        self.processors[name].name = name\n        return self.processors[name]", "docstring": "Instantiates a RmlProcessor and registers it in the manager\n\nArgs:\n-----\nname: the name to register the processor\nmappings: the list RML mapping definitions to use\nprocessor_type: the name of the RML processor to use", "source": "juraj-google-style"}
{"code": "def _compute_elemwise_op_output_shape(self, shape1, shape2):\n    if None in [shape1, shape2]:\n        return None\n    elif len(shape1) < len(shape2):\n        return self._compute_elemwise_op_output_shape(shape2, shape1)\n    elif not shape2:\n        return shape1\n    output_shape = list(shape1[:-len(shape2)])\n    for i, j in zip(shape1[-len(shape2):], shape2):\n        if i is None or j is None:\n            output_shape.append(None)\n        elif i == 1:\n            output_shape.append(j)\n        elif j == 1:\n            output_shape.append(i)\n        else:\n            if i != j:\n                raise ValueError('Operands could not be broadcast together with shapes ' + str(shape1) + ' ' + str(shape2))\n            output_shape.append(i)\n    return tuple(output_shape)", "docstring": "Computes the shape of the resultant of an elementwise operation.\n\nArgs:\nshape1: tuple or None. Shape of the first tensor\nshape2: tuple or None. Shape of the second tensor\n\nReturns:\nexpected output shape when an element-wise operation is\ncarried out on 2 tensors with shapes shape1 and shape2.\ntuple or None.\n\nRaises:\nValueError: if shape1 and shape2 are not compatible for\nelement-wise operations.", "source": "github-repos"}
{"code": "def export(self, path, session):\n    if (self._graph is not tf_v1.get_default_graph()):\n        raise RuntimeError('default graph differs from the graph where the module was instantiated.')\n    if (self._graph is not session.graph):\n        raise RuntimeError('session graph differs from the graph where the module was instantiated.')\n    self._impl.export(path, session)", "docstring": "Exports the module with the variables from the session in `path`.\n\nNote that it is the module definition in the ModuleSpec used to create this\nmodule that gets exported. The session is only used to provide the value\nof variables.\n\nArgs:\npath: path where to export the module to.\nsession: session where to export the variables from.\n\nRaises:\nRuntimeError: if there is an issue during the export.", "source": "codesearchnet"}
{"code": "def reduce_max(x,\n               disable_positional_args=None,\n               output_shape=None,\n               reduced_dim=None,\n               name=None):\n  \n  output_shape = convert_to_shape(output_shape)\n  reduced_dim = convert_to_dimension(reduced_dim)\n  assert disable_positional_args is None\n  output_shape = _reduction_output_shape(x, output_shape, reduced_dim)\n  if output_shape is None:\n    output_shape = Shape([])\n  if output_shape == x.shape:\n    return x\n  return ReduceOperation(\n      x, output_shape, \"MAX\", name=name or \"reduce_max\").outputs[0]", "docstring": "Reduction on 1 or more axes.\n\nArgs:\nx: a Tensor\ndisable_positional_args: None\noutput_shape: an optional Shape.  Must be a subsequence of x.shape.\nreduced_dim: an optional Dimension\nname: an optional string\nReturns:\na Tensor", "source": "juraj-google-style"}
{"code": "def __init__(self, reader, genTexts=False):\n        \n        if genTexts is not None:\n            self.genTexts = genTexts\n\n        self._reader = reader", "docstring": "Creates an instance of *Borrower* class.\n\nArgs:\nreader: a *reader* object\n\nKeyword Args:\ngenText: indicates whether this borrower should be looking\nfor transformed MIBs that include human-oriented texts", "source": "juraj-google-style"}
{"code": "def Match(self, encoded):\n    logging.log(1, 'Decoding %s: %s', self.name, encoded)\n    decoded = self.msg.encoding.ParseFromString(encoded, self.msg)\n    logging.info('Matching message value:\\nExpected: %s\\nActual: %s\\n', self.value_dict_or_array, decoded)\n    return MessageValue._MatchValue(self.value_dict_or_array, decoded)", "docstring": "Whether or not |encoded| is compatible with this message instance.\n\nIf |encoded| has all required fields, and values of all fields are same to\nthose of this message instance, it is compatible. Otherwise, i.e\n1) it doesn't have some required fields\n2) it has some values of fields different from specified in |value_dict| of\nthis message instance\n\nArgs:\nencoded: A string expected to be encoded with same encoding method of\nthis message instance.\n\nReturns:\nWhether or not |encoded| is compatible with this message instance.", "source": "github-repos"}
{"code": "def force_in_A_to_force_in_B(force_A, torque_A, pose_A_in_B):\n    pos_A_in_B = pose_A_in_B[(:3, 3)]\n    rot_A_in_B = pose_A_in_B[(:3, :3)]\n    skew_symm = _skew_symmetric_translation(pos_A_in_B)\n    force_B = rot_A_in_B.T.dot(force_A)\n    torque_B = ((- rot_A_in_B.T.dot(skew_symm.dot(force_A))) + rot_A_in_B.T.dot(torque_A))\n    return (force_B, torque_B)", "docstring": "Converts linear and rotational force at a point in frame A to the equivalent in frame B.\n\nArgs:\nforce_A: 3-dim iterable for linear force in A\ntorque_A: 3-dim iterable for rotational force (moment) in A\npose_A_in_B: numpy array of shape (4,4) corresponding to the pose of A in frame B\n\nReturns:\nforce_B, torque_B: two numpy arrays of shape (3,) for the forces in B", "source": "codesearchnet"}
{"code": "def remove_item(self, item):\n    for (idx, _item) in enumerate(self.items):\n        if (item == _item):\n            del self.items[idx]\n            return True\n    return False", "docstring": "Remove the specified item from the menu.\n\nArgs:\nitem (MenuItem): the item to be removed.\n\nReturns:\nbool: True if the item was removed; False otherwise.", "source": "codesearchnet"}
{"code": "def box_area(boxes):\n    boxes = _upcast(boxes)\n    return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "docstring": "Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates.\n\nArgs:\nboxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`):\nBoxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1\n< x2` and `0 <= y1 < y2`.\nReturns:\n`torch.FloatTensor`: a tensor containing the area for each box.", "source": "github-repos"}
{"code": "def parse_cartouche_text(lines):\n    \n    indent_lines = unindent(lines)\n    indent_lines = pad_blank_lines(indent_lines)\n    indent_lines = first_paragraph_indent(indent_lines)\n    indent_paragraphs = gather_lines(indent_lines)\n    parse_tree = group_paragraphs(indent_paragraphs)\n    syntax_tree = extract_structure(parse_tree)\n    result = syntax_tree.render_rst()\n    ensure_terminal_blank(result)\n    return result", "docstring": "Parse text in cartouche format and return a reStructuredText equivalent\n\nArgs:\nlines: A sequence of strings representing the lines of a single\ndocstring as read from the source by Sphinx. This string should be\nin a format that can be parsed by cartouche.\n\nReturns:\nA list of lines containing the transformed docstring as\nreStructuredText as produced by cartouche.\n\nRaises:\nRuntimeError: If the docstring cannot be parsed.", "source": "juraj-google-style"}
{"code": "def query_parameters(param_list, defaults=None):\n    script_params = collections.OrderedDict(([k, []] for k in param_list))\n    for (param, default) in zip(list(script_params.keys()), defaults):\n        user_input = click.prompt(('%s' % param), default=default)\n        script_params[param] = ast.literal_eval(user_input)\n    return script_params", "docstring": "Asks the user for parameters. If available, proposes some defaults.\n\nArgs:\nparam_list (list): List of parameters to ask the user for values.\ndefaults (list): A list of proposed defaults. It must be a list of the\nsame length as param_list. A value of None in one element of the\nlist means that no default will be proposed for the corresponding\nparameter.", "source": "codesearchnet"}
{"code": "def __init__(self, text: str, sctx: SchemaContext):\n        \n        super().__init__(text)\n        self.sctx = sctx", "docstring": "Initialize the parser instance.\n\nArgs:\nsctx: Schema context for XPath expression parsing.", "source": "juraj-google-style"}
{"code": "def ExportClientsByKeywords(keywords, filename, token=None):\n    index = client_index.CreateClientIndex(token=token)\n    client_list = index.LookupClients(keywords)\n    logging.info('found %d clients', len(client_list))\n    if (not client_list):\n        return\n    writer = csv.DictWriter([u'client_id', u'hostname', u'last_seen', u'os', u'os_release', u'os_version', u'users', u'ips', u'macs'])\n    writer.WriteHeader()\n    for client in aff4.FACTORY.MultiOpen(client_list, token=token):\n        s = client.Schema\n        writer.WriteRow({u'client_id': client.urn.Basename(), u'hostname': client.Get(s.HOSTNAME), u'os': client.Get(s.SYSTEM), u'os_release': client.Get(s.OS_RELEASE), u'os_version': client.Get(s.OS_VERSION), u'ips': client.Get(s.HOST_IPS), u'macs': client.Get(s.MAC_ADDRESS), u'users': '\\n'.join(client.Get(s.USERNAMES, [])), u'last_seen': client.Get(s.PING)})\n    with io.open(filename, 'w') as csv_out:\n        csv_out.write(writer.Content())", "docstring": "r\"\"\"A script to export clients summaries selected by a keyword search.\n\nThis script does a client search for machines matching all of keywords and\nwrites a .csv summary of the results to filename. Multi-value fields are '\\n'\nseparated.\n\nArgs:\nkeywords: a list of keywords to search for\nfilename: the name of the file to write to, will be replaced if already\npresent\ntoken: datastore token.", "source": "codesearchnet"}
{"code": "def construct(name, exec_, terminal=False, additional_opts={}):\n\t\n\n\tdesktop_file = '[Desktop Entry]\\n'\n\n\tdesktop_file_dict = {\n\t\t'Name': name,\n\t\t'Exec': exec_,\n\t\t'Terminal': 'true' if terminal else 'false',\n\t\t'Comment': additional_opts.get('Comment', name)\n\t}\n\n\tdesktop_file = ('[Desktop Entry]\\nName={name}\\nExec={exec_}\\n'\n\t\t\t\t\t'Terminal={terminal}\\nComment={comment}\\n')\n\n\tdesktop_file = desktop_file.format(name=desktop_file_dict['Name'],\n\t\t\t\t\t\t\t\t\t   exec_=desktop_file_dict['Exec'],\n\t\t\t\t\t\t\t\t\t   terminal=desktop_file_dict['Terminal'],\n\t\t\t\t\t\t\t\t\t   comment=desktop_file_dict['Comment'])\n\n\tif additional_opts is None:\n\t\tadditional_opts = {}\n\n\tfor option in additional_opts:\n\t\tif not option in desktop_file_dict:\n\t\t\tdesktop_file += '%s=%s\\n' % (option, additional_opts[option])\n\n\treturn desktop_file", "docstring": "Construct a .desktop file and return it as a string.\nCreate a standards-compliant .desktop file, returning it as a string.\nArgs:\nname\t\t\t(str) : The program's name.\nexec\\_\t\t  (str) : The command.\nterminal\t\t(bool): Determine if program should be run in a terminal emulator or not. Defaults to ``False``.\nadditional_opts (dict): Any additional fields.\nReturns:\nstr: The constructed .desktop file.", "source": "juraj-google-style"}
{"code": "def get_services_health(self) -> dict:\n    services_health = {}\n    services_ids = self._get_services()\n    for service_id in services_ids:\n        service_name = DC.get_service_name(service_id)\n        if (DC.get_replicas(service_id) != DC.get_actual_replica(service_id)):\n            services_health[service_name] = 'Unhealthy'\n        else:\n            services_health[service_name] = 'Healthy'\n    return services_health", "docstring": "Get the health of all services.\n\nReturns:\ndict, services id and health status", "source": "codesearchnet"}
{"code": "def Delete(self, request, global_params=None):\n    config = self.GetMethodConfig('Delete')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must delete all its tables, either manually or by specifying deleteContents. Immediately after deletion, you can create another dataset with the same name.\n\nArgs:\nrequest: (BigqueryDatasetsDeleteRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(BigqueryDatasetsDeleteResponse) The response message.", "source": "github-repos"}
{"code": "class Kosmos2VisionEncoder(nn.Module):\n\n    def __init__(self, config: Kosmos2VisionConfig):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([Kosmos2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for idx, encoder_layer in enumerate(self.layers):\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions)\n            else:\n                layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`Kosmos2VisionEncoderLayer`].\n\nArgs:\nconfig: Kosmos2VisionConfig", "source": "github-repos"}
{"code": "def depth_november_average_ground_temperature(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `depth_november_average_ground_temperature`'.format(value))\n    self._depth_november_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_november_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_november_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def consume_json(request):\n    client = OEmbedConsumer()\n    urls = request.GET.getlist('urls')\n    width = request.GET.get('width')\n    height = request.GET.get('height')\n    template_dir = request.GET.get('template_dir')\n    output = {}\n    ctx = RequestContext(request)\n    for url in urls:\n        try:\n            provider = oembed.site.provider_for_url(url)\n        except OEmbedMissingEndpoint:\n            oembeds = None\n            rendered = None\n        else:\n            oembeds = url\n            rendered = client.parse_text(url, width, height, context=ctx, template_dir=template_dir)\n        output[url] = {'oembeds': oembeds, 'rendered': rendered}\n    return HttpResponse(simplejson.dumps(output), mimetype='application/json')", "docstring": "Extract and return oembed content for given urls.\n\nRequired GET params:\nurls - list of urls to consume\n\nOptional GET params:\nwidth - maxwidth attribute for oembed content\nheight - maxheight attribute for oembed content\ntemplate_dir - template_dir to use when rendering oembed\n\nReturns:\nlist of dictionaries with oembed metadata and renderings, json encoded", "source": "codesearchnet"}
{"code": "def __init__(self, graph_view):\n    self._graph_view = graph_view\n    if context.executing_eagerly():\n        self._cache = None\n        self._saveables_cache = None\n    else:\n        self._cache = object_identity.ObjectIdentityWeakKeyDictionary()\n        self._saveables_cache = object_identity.ObjectIdentityWeakKeyDictionary()\n    self._file_prefix_placeholder = None\n    self._object_graph_feed_tensor = None\n    self._last_save_object_graph = None\n    self._file_prefix_feed_tensor = None\n    self._cached_save_operation = None\n    self._restore_op_cache = {}\n    self._object_map = None", "docstring": "Configure saving.\n\nArgs:\ngraph_view: An `ObjectGraphView` object containing a description of the\nobject graph to save.", "source": "github-repos"}
{"code": "def split_data(*inputs, splits=[0.5, 0.5], shuffle=True, stratify_by=None, index_only=False, seed=None):\n\n    def fractions_to_counts(fracs, n):\n        'Converts a list of fractions to a list of counts that sum to n'\n        counts = [int(np.round((n * frac))) for frac in fracs]\n        counts[(- 1)] = (n - sum(counts[:(- 1)]))\n        return counts\n\n    def slice_data(data, indices):\n        if (isinstance(data, list) or isinstance(data, tuple)):\n            return [d for (i, d) in enumerate(data) if (i in set(indices))]\n        else:\n            try:\n                return data[indices]\n            except TypeError:\n                raise Exception(f'split_data() currently only accepts inputs of type tuple, list, np.ndarray, scipy.sparse, or torch.Tensor; not {type(data)}')\n    if (seed is not None):\n        random.seed(seed)\n    try:\n        n = len(inputs[0])\n    except TypeError:\n        n = inputs[0].shape[0]\n    num_splits = len(splits)\n    if all((isinstance(x, int) for x in splits)):\n        if (not (sum(splits) == n)):\n            raise ValueError(f'Provided split counts must sum to n ({n}), not {sum(splits)}.')\n        fracs = [(count / n) for count in splits]\n    elif all((isinstance(x, float) for x in splits)):\n        if (not (sum(splits) == 1.0)):\n            raise ValueError(f'Split fractions must sum to 1.0, not {sum(splits)}.')\n        fracs = splits\n    else:\n        raise ValueError('Splits must contain all ints or all floats.')\n    if (stratify_by is None):\n        pools = [np.arange(n)]\n    else:\n        pools = defaultdict(list)\n        for (i, val) in enumerate(stratify_by):\n            pools[val].append(i)\n        pools = list(pools.values())\n    assignments = [[] for _ in range(num_splits)]\n    for pool in pools:\n        if (shuffle or (stratify_by is not None)):\n            random.shuffle(pool)\n        counts = fractions_to_counts(fracs, len(pool))\n        counts.insert(0, 0)\n        cum_counts = np.cumsum(counts)\n        for i in range(num_splits):\n            assignments[i].extend(pool[cum_counts[i]:cum_counts[(i + 1)]])\n    if index_only:\n        return assignments\n    else:\n        outputs = []\n        for data in inputs:\n            data_splits = []\n            for split in range(num_splits):\n                data_splits.append(slice_data(data, assignments[split]))\n            outputs.append(data_splits)\n        if (len(outputs) == 1):\n            return outputs[0]\n        else:\n            return outputs", "docstring": "Splits inputs into multiple splits of defined sizes\n\nArgs:\ninputs: correlated tuples/lists/arrays/matrices/tensors to split\nsplits: list containing split sizes (fractions or counts);\nshuffle: if True, shuffle the data before splitting\nstratify_by: (None or an input) if not None, use these labels to\nstratify the splits (separating the data into groups by these\nlabels and sampling from those, rather than from the population at\nlarge); overrides shuffle\nindex_only: if True, return only the indices of the new splits, not the\nsplit data itself\nseed: (int) random seed\n\nExample usage:\nLs, Xs, Ys = split_data(L, X, Y, splits=[0.8, 0.1, 0.1])\nOR\nassignments = split_data(Y, splits=[0.8, 0.1, 0.1], index_only=True)\n\nNote: This is very similar to scikit-learn's train_test_split() method,\nbut with support for more than two splits.", "source": "codesearchnet"}
{"code": "def parse_mapping(mapping_file: Optional[str]) -> configparser.ConfigParser:\n    \n    LOGGER.debug('Parsing mapping file. Command line: %s', mapping_file)\n\n    def parse(mapping_file):\n        config = configparser.ConfigParser()\n        config.read_file(mapping_file)\n        return config\n\n    \n    if mapping_file is not None:\n        LOGGER.debug('Parsing command line mapping file')\n        return parse(mapping_file)\n\n    \n    xdg_config_dir = xdg.BaseDirectory.load_first_config('pass-git-helper')\n    if xdg_config_dir is None:\n        raise RuntimeError(\n            'No mapping configured so far at any XDG config location. '\n            'Please create {config_file}'.format(\n                config_file=DEFAULT_CONFIG_FILE))\n    mapping_file = os.path.join(xdg_config_dir, CONFIG_FILE_NAME)\n    LOGGER.debug('Parsing mapping file %s', mapping_file)\n    with open(mapping_file, 'r') as file_handle:\n        return parse(file_handle)", "docstring": "Parse the file containing the mappings from hosts to pass entries.\n\nArgs:\nmapping_file:\nName of the file to parse. If ``None``, the default file from the\nXDG location is used.", "source": "juraj-google-style"}
{"code": "def share(self, group_id, group_access, expires_at=None, **kwargs):\n    path = ('/projects/%s/share' % self.get_id())\n    data = {'group_id': group_id, 'group_access': group_access, 'expires_at': expires_at}\n    self.manager.gitlab.http_post(path, post_data=data, **kwargs)", "docstring": "Share the project with a group.\n\nArgs:\ngroup_id (int): ID of the group.\ngroup_access (int): Access level for the group.\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabCreateError: If the server failed to perform the request", "source": "codesearchnet"}
{"code": "def collapse_address_list(addresses):\n    \n    i = 0\n    addrs = []\n    ips = []\n    nets = []\n\n    \n    for ip in addresses:\n        if isinstance(ip, _BaseIP):\n            if ips and ips[-1]._version != ip._version:\n                raise TypeError(\"%s and %s are not of the same version\" % (\n                        str(ip), str(ips[-1])))\n            ips.append(ip)\n        elif ip._prefixlen == ip._max_prefixlen:\n            if ips and ips[-1]._version != ip._version:\n                raise TypeError(\"%s and %s are not of the same version\" % (\n                        str(ip), str(ips[-1])))\n            ips.append(ip.ip)\n        else:\n            if nets and nets[-1]._version != ip._version:\n                raise TypeError(\"%s and %s are not of the same version\" % (\n                        str(ip), str(nets[-1])))\n            nets.append(ip)\n\n    \n    ips = sorted(set(ips))\n    nets = sorted(set(nets))\n\n    while i < len(ips):\n        (first, last) = _find_address_range(ips[i:])\n        i = ips.index(last) + 1\n        addrs.extend(summarize_address_range(first, last))\n\n    return _collapse_address_list_recursive(sorted(\n        addrs + nets, key=_BaseNet._get_networks_key))", "docstring": "Collapse a list of IP objects.\n\nExample:\ncollapse_address_list([IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) ->\n[IPv4('1.1.0.0/23')]\n\nArgs:\naddresses: A list of IPv4Network or IPv6Network objects.\n\nReturns:\nA list of IPv4Network or IPv6Network objects depending on what we\nwere passed.\n\nRaises:\nTypeError: If passed a list of mixed version objects.", "source": "juraj-google-style"}
{"code": "def _get_input_to_checker_function(self, flag_values):\n    \n    return dict([key, flag_values[key].value] for key in self.flag_names)", "docstring": "Given flag values, returns the input to be given to checker.\n\nArgs:\nflag_values: flags.FlagValues, the FlagValues instance to get flags from.\nReturns:\ndict, with keys() being self.lag_names, and value for each key\nbeing the value of the corresponding flag (string, boolean, etc).", "source": "juraj-google-style"}
{"code": "def read(self, vals):\n        \n        i = 0\n        if len(vals[i]) == 0:\n            self.holiday_name = None\n        else:\n            self.holiday_name = vals[i]\n        i += 1\n        if len(vals[i]) == 0:\n            self.holiday_day = None\n        else:\n            self.holiday_day = vals[i]\n        i += 1", "docstring": "Read values.\n\nArgs:\nvals (list): list of strings representing values", "source": "juraj-google-style"}
{"code": "def _ragged_tensor_apply_loss(loss_fn, y_true, y_pred, y_pred_extra_dim=False):\n\n    def rt_is_equiv_dense(rt):\n        \n        return math_ops.reduce_all([math_ops.equal(math_ops.reduce_variance(math_ops.cast(row_lens, backend.floatx())), constant_op.constant([0.0])) for row_lens in rt.nested_row_lengths()])\n\n    def _convert_to_dense(inputs):\n        return tuple((rt.to_tensor() if isinstance(rt, ragged_tensor.RaggedTensor) else rt for rt in inputs))\n\n    def _call_loss(inputs, ragged_output):\n        \n        r = loss_fn(*inputs)\n        if ragged_output and (not isinstance(r, ragged_tensor.RaggedTensor)):\n            r = ragged_tensor.RaggedTensor.from_tensor(r)\n        elif not ragged_output and isinstance(r, ragged_tensor.RaggedTensor):\n            r = r.to_tensor()\n        return r\n\n    def _wrapper(inputs, ragged_output):\n        _, y_pred = inputs\n        if isinstance(y_pred, ragged_tensor.RaggedTensor):\n            return cond.cond(rt_is_equiv_dense(y_pred), lambda: _call_loss(_convert_to_dense(inputs), ragged_output), lambda: _call_loss(inputs, ragged_output))\n        return loss_fn(*inputs)\n    if not isinstance(y_true, ragged_tensor.RaggedTensor):\n        return loss_fn(y_true, y_pred.to_tensor())\n    lshape = y_pred.shape.as_list()[1:-1]\n    if len(lshape) > 0:\n        spec = ragged_tensor.RaggedTensorSpec(shape=lshape, dtype=y_pred.dtype)\n    else:\n        spec = tensor_spec.TensorSpec(shape=[], dtype=y_pred.dtype)\n    nested_splits_list = [rt.nested_row_splits for rt in (y_true, y_pred)]\n    if y_pred_extra_dim:\n        rdims = [len(slist) for slist in nested_splits_list]\n        if rdims[0] == rdims[1] - 1:\n            nested_splits_list[1] = nested_splits_list[1][:-1]\n    map_fn = functools.partial(_wrapper, ragged_output=len(lshape) > 1)\n    assertion_list = ragged_util.assert_splits_match(nested_splits_list)\n    with ops.control_dependencies(assertion_list):\n        return ragged_map_ops.map_fn(map_fn, elems=(y_true, y_pred), dtype=spec)", "docstring": "Apply a loss function on a per batch basis.\n\nArgs:\nloss_fn: The loss function\ny_true: truth values (RaggedTensor)\ny_pred: predicted values (RaggedTensor)\ny_pred_extra_dim: whether y_pred has an additional dimension compared to\ny_true\n\nReturns:\nLoss-function result. A dense tensor if the output has a single dimension\n(per-batch loss value); a ragged tensor otherwise.", "source": "github-repos"}
{"code": "def dump(self, include_address=True, include_id=True) -> str:\n    d = {'crypto': self.keystore['crypto'], 'version': self.keystore['version']}\n    if (include_address and (self.address is not None)):\n        d['address'] = remove_0x_prefix(encode_hex(self.address))\n    if (include_id and (self.uuid is not None)):\n        d['id'] = self.uuid\n    return json.dumps(d)", "docstring": "Dump the keystore for later disk storage.\n\nThe result inherits the entries `'crypto'` and `'version`' from `account.keystore`, and\nadds `'address'` and `'id'` in accordance with the parameters `'include_address'` and\n`'include_id`'.\n\nIf address or id are not known, they are not added, even if requested.\n\nArgs:\ninclude_address: flag denoting if the address should be included or not\ninclude_id: flag denoting if the id should be included or not", "source": "codesearchnet"}
{"code": "def step_preprocess(x, step, hparams):\n    original_channel_size = common_layers.shape_list(x)[(- 1)]\n    if hparams.add_position_timing_signal:\n        x = add_position_timing_signal(x, step, hparams)\n    if hparams.add_step_timing_signal:\n        x = add_step_timing_signal(x, step, hparams)\n    if ((hparams.add_position_timing_signal or hparams.add_position_timing_signal) and (hparams.add_or_concat_timing_signal == 'concat')):\n        x = common_layers.dense(x, original_channel_size, activation=None, use_bias=False)\n    if hparams.add_sru:\n        x = common_layers.sru(x)\n    return x", "docstring": "Preprocess the input at the beginning of each step.\n\nArgs:\nx: input tensor\nstep: step\nhparams: model hyper-parameters\n\nReturns:\npreprocessed input.", "source": "codesearchnet"}
{"code": "def verify_firebase_token(id_token, request, audience=None):\n    return verify_token(id_token, request, audience=audience, certs_url=_GOOGLE_APIS_CERTS_URL)", "docstring": "Verifies an ID Token issued by Firebase Authentication.\n\nArgs:\nid_token (Union[str, bytes]): The encoded token.\nrequest (google.auth.transport.Request): The object used to make\nHTTP requests.\naudience (str): The audience that this token is intended for. This is\ntypically your Firebase application ID. If None then the audience\nis not verified.\n\nReturns:\nMapping[str, Any]: The decoded token.", "source": "codesearchnet"}
{"code": "def _CreateTaskStorageWriter(self, path, task):\n    \n    return SQLiteStorageFileWriter(\n        self._session, path,\n        storage_type=definitions.STORAGE_TYPE_TASK, task=task)", "docstring": "Creates a task storage writer.\n\nArgs:\npath (str): path to the storage file.\ntask (Task): task.\n\nReturns:\nSQLiteStorageFileWriter: storage writer.", "source": "juraj-google-style"}
{"code": "def CreateExtensionSetting(client, feed_items, campaign_feed, feed_item_ids, platform_restrictions=None):\n    campaign_extension_setting_service = client.GetService('CampaignExtensionSettingService', 'v201809')\n    extension_feed_items = [{CreateSitelinkFeedItem(feed_items, feed_item_id)} for feed_item_id in feed_item_ids]\n    extension_setting = {'extensions': extension_feed_items}\n    if platform_restrictions:\n        extension_setting['platformRestrictions'] = platform_restrictions\n    campaign_extension_setting = {'campaignId': campaign_feed['campaignId'], 'extensionType': 'SITELINK', 'extensionSetting': extension_setting}\n    operation = {'operand': campaign_extension_setting, 'operator': 'ADD'}\n    campaign_extension_setting_service.mutate([operation])", "docstring": "Creates the extension setting for a list of Feed Items.\n\nArgs:\nclient: an AdWordsClient instance.\nfeed_items: the list of all Feed Items.\ncampaign_feed: the original Campaign Feed.\nfeed_item_ids: the Ids of the feed items for which extension settings should\nbe created.\nplatform_restrictions: an optional Platform Restriction for the Feed items.", "source": "codesearchnet"}
{"code": "def load(cls, path, reader=None):\n        \n\n        if reader is None:\n            from . import io\n            reader = io.DefaultReader()\n\n        elif type(reader) == str:\n            from . import io\n            reader = io.create_reader_of_type(reader)\n\n        return reader.load(path)", "docstring": "Loads the corpus from the given path, using the given reader. If no reader is given the\n:py:class:`audiomate.corpus.io.DefaultReader` is used.\n\nArgs:\npath (str): Path to load the corpus from.\nreader (str, CorpusReader): The reader or the name of the reader to use.\n\nReturns:\nCorpus: The loaded corpus.", "source": "juraj-google-style"}
{"code": "def _read_data_handler(length, whence, ctx, skip=False, stream_event=ION_STREAM_INCOMPLETE_EVENT):\n    \n    trans = None\n    queue = ctx.queue\n\n    if length > ctx.remaining:\n        raise IonException('Length overrun: %d bytes, %d remaining' % (length, ctx.remaining))\n\n    \n    queue_len = len(queue)\n    if queue_len > 0:\n        \n        stream_event = ION_STREAM_INCOMPLETE_EVENT\n    length -= queue_len\n\n    if skip:\n        \n        if length >= 0:\n            queue.skip(queue_len)\n        else:\n            queue.skip(queue_len + length)\n\n    while True:\n        data_event, self = (yield trans)\n        if data_event is not None and data_event.data is not None:\n            data = data_event.data\n            data_len = len(data)\n            if data_len > 0:\n                \n                stream_event = ION_STREAM_INCOMPLETE_EVENT\n            length -= data_len\n            if not skip:\n                queue.extend(data)\n            else:\n                pos_adjustment = data_len\n                if length < 0:\n                    pos_adjustment += length\n                    \n                    queue.extend(data[length:])\n                queue.position += pos_adjustment\n        if length <= 0:\n            \n            yield Transition(None, whence)\n\n        trans = Transition(stream_event, self)", "docstring": "Creates a co-routine for retrieving data up to a requested size.\n\nArgs:\nlength (int): The minimum length requested.\nwhence (Coroutine): The co-routine to return to after the data is satisfied.\nctx (_HandlerContext): The context for the read.\nskip (Optional[bool]): Whether the requested number of bytes should be skipped.\nstream_event (Optional[IonEvent]): The stream event to return if no bytes are read or\navailable.", "source": "juraj-google-style"}
{"code": "def start(self) -> None:\n    self._server.start()", "docstring": "Starts this server.\n\nRaises:\ntf.errors.OpError: Or one of its subclasses if an error occurs while\nstarting the server.", "source": "github-repos"}
{"code": "def _get_type(points, soma_class):\n    \n    assert soma_class in (SOMA_CONTOUR, SOMA_CYLINDER)\n\n    npoints = len(points)\n    if soma_class == SOMA_CONTOUR:\n        return {0: None,\n                1: SomaSinglePoint,\n                2: None}.get(npoints, SomaSimpleContour)\n\n    if(npoints == 3 and\n       points[0][COLS.P] == -1 and\n       points[1][COLS.P] == 1 and\n       points[2][COLS.P] == 1):\n        L.warning('Using neuromorpho 3-Point soma')\n        \n        \n        \n        \n        \n\n        return SomaNeuromorphoThreePointCylinders\n\n    return {0: None,\n            1: SomaSinglePoint}.get(npoints, SomaCylinders)", "docstring": "get the type of the soma\n\nArgs:\npoints: Soma points\nsoma_class(str): one of 'contour' or 'cylinder' to specify the type", "source": "juraj-google-style"}
{"code": "def find_stacks(node, strict=False):\n  \n  \n  fso = FindStackOps()\n  fso.visit(node)\n  \n  AnnotateStacks(fso.push_pop_pairs, strict).visit(node)\n  return node", "docstring": "Find pushes and pops to the stack and annotate them as such.\n\nArgs:\nnode: An AST node that might contain stack pushes and pops.\nstrict: A boolean indicating whether to stringently test whether each\npush and pop are matched. This is not always possible when taking\nhigher-order derivatives of code generated in split-motion.\n\nReturns:\nnode: The node passed in, but with pushes and pops annotated in AST nodes.", "source": "juraj-google-style"}
{"code": "def Append(self, value=None, **kwarg):\n    if (self.rdf_type is not None):\n        if (isinstance(value, rdfvalue.RDFValue) and (value.__class__ != self.rdf_type)):\n            raise ValueError(('Can only accept %s' % self.rdf_type))\n        try:\n            value = self.rdf_type(value, **kwarg)\n        except (TypeError, ValueError):\n            raise ValueError(('Unable to initialize %s from type %s' % (self.__class__.__name__, type(value))))\n    self.content.Append(DataBlob().SetValue(value))", "docstring": "Add another member to the array.\n\nArgs:\nvalue: The new data to append to the array.\n**kwarg:  Create a new element from these keywords.\n\nReturns:\nThe value which was added. This can be modified further by the caller and\nchanges will be propagated here.\n\nRaises:\nValueError: If the value to add is not allowed.", "source": "codesearchnet"}
{"code": "def run_from_ufos(self, ufos, output=(), **kwargs):\n        \n\n        if set(output) == {\"ufo\"}:\n            return\n\n        \n        \n        ufo_paths = []\n        if isinstance(ufos, basestring):\n            ufo_paths = glob.glob(ufos)\n            ufos = [Font(x) for x in ufo_paths]\n        elif isinstance(ufos, list):\n            \n            ufos = [Font(x) if isinstance(x, basestring) else x for x in ufos]\n            ufo_paths = [x.path for x in ufos]\n        else:\n            raise FontmakeError(\n                \"UFOs parameter is neither a defcon.Font object, a path or a glob, \"\n                \"nor a list of any of these.\",\n                ufos,\n            )\n\n        need_reload = False\n        if \"otf\" in output:\n            self.build_otfs(ufos, **kwargs)\n            need_reload = True\n\n        if \"ttf\" in output:\n            if need_reload:\n                ufos = [Font(path) for path in ufo_paths]\n            self.build_ttfs(ufos, **kwargs)\n            need_reload = True", "docstring": "Run toolchain from UFO sources.\n\nArgs:\nufos: List of UFO sources, as either paths or opened objects.\noutput: List of output formats to generate.\nkwargs: Arguments passed along to save_otfs.", "source": "juraj-google-style"}
{"code": "def summarize(values, epsilon):\n    values = np.reshape(values, [-1])\n    values = np.sort(values)\n    elements = np.size(values)\n    num_buckets = 1.0 / epsilon\n    increment = elements / num_buckets\n    start = increment\n    step = max(increment, 1)\n    boundaries = values[int(start)::int(step)]\n    weights = np.ones_like(boundaries)\n    weights = weights * step\n    return np.stack([boundaries, weights])", "docstring": "Reduce a 1D sequence of values to a summary.\n\nThis algorithm is based on numpy.quantiles but modified to allow for\nintermediate steps between multiple data sets. It first finds the target\nnumber of bins as the reciprocal of epsilon and then takes the individual\nvalues spaced at appropriate intervals to arrive at that target.\nThe final step is to return the corresponding counts between those values\nIf the target num_bins is larger than the size of values, the whole array is\nreturned (with weights of 1).\n\nArgs:\nvalues: 1D `np.ndarray` to be summarized.\nepsilon: A `'float32'` that determines the approximate desired\nprecision.\n\nReturns:\nA 2D `np.ndarray` that is a summary of the inputs. First column is the\ninterpolated partition values, the second is the weights (counts).", "source": "github-repos"}
{"code": "def pnum_to_group(mesh_shape, group_dims, pnum):\n  \n  coord = pnum_to_processor_coordinates(mesh_shape, pnum)\n  remaining_shape = Shape(\n      [d for i, d in enumerate(mesh_shape) if i not in group_dims])\n  remaining_coord = [d for i, d in enumerate(coord) if i not in group_dims]\n  return processor_coordinates_to_pnum(remaining_shape, remaining_coord)", "docstring": "Group number for grouped allreduce.\n\nArgs:\nmesh_shape: a Shape\ngroup_dims: a list of integers (the dimensions reduced over)\npnum: an integer\n\nReturns:\nan integer", "source": "juraj-google-style"}
{"code": "def closest(self, coords=[], **kwargs):\n        \n        if self.ndims > 1:\n            raise NotImplementedError(\"Closest method currently only \"\n                                      \"implemented for 1D Elements\")\n\n        if kwargs:\n            if len(kwargs) > 1:\n                raise NotImplementedError(\"Closest method currently only \"\n                                          \"supports 1D indexes\")\n            samples = list(kwargs.values())[0]\n            coords = samples if isinstance(samples, list) else [samples]\n\n        xs = self.dimension_values(0)\n        if xs.dtype.kind in 'SO':\n            raise NotImplementedError(\"Closest only supported for numeric types\")\n        idxs = [np.argmin(np.abs(xs-coord)) for coord in coords]\n        return [xs[idx] for idx in idxs]", "docstring": "Snaps coordinate(s) to closest coordinate in Dataset\n\nArgs:\ncoords: List of coordinates expressed as tuples\n**kwargs: Coordinates defined as keyword pairs\n\nReturns:\nList of tuples of the snapped coordinates\n\nRaises:\nNotImplementedError: Raised if snapping is not supported", "source": "juraj-google-style"}
{"code": "def binary_cross_entropy_with_logits(input_, target, name=PROVIDED, loss_weight=None, per_example_weights=None, per_output_weights=None):\n    if (target is None):\n        raise ValueError('target must be set')\n    target = _convert_and_assert_tensors_compatible(input_, target)\n    with tf.name_scope('stats'):\n        (selected, sum_retrieved, sum_relevant) = _compute_precision_recall(input_, target, 0, per_example_weights)\n        precision = (selected / sum_retrieved)\n        recall = (selected / sum_relevant)\n        if precision.get_shape().is_fully_defined():\n            input_.bookkeeper.add_average_summary(precision, ('average_precision_%s' % name))\n        if recall.get_shape().is_fully_defined():\n            input_.bookkeeper.add_average_summary(recall, ('average_recall_%s' % name))\n        input_.bookkeeper.add_scalar_summary(tf.reduce_sum(tf.to_float(tf.greater(input_, 0))), 'activations')\n        if (per_output_weights is not None):\n            per_output_weights = tf.convert_to_tensor(per_output_weights, name='per_output_weights', dtype=input_.dtype.base_dtype)\n            input_.get_shape().assert_is_compatible_with(per_output_weights.get_shape())\n\n    def _batch_sum_bce(x, target, name='binary_cross_entropy'):\n        logits = functions.binary_cross_entropy_loss_with_logits(x, target, name=name)\n        if (per_output_weights is not None):\n            logits *= per_output_weights\n        return functions.reduce_batch_sum(logits)\n    return apply_regression(input_, _batch_sum_bce, target, [], name=('%s_bce_loss' % name), loss_weight=loss_weight, per_example_weights=per_example_weights)", "docstring": "Calculates the binary cross entropy of the input_ vs inputs.\n\nExpects unscaled logits. Do not pass in results of sigmoid operation.\n\nArgs:\ninput_: A rank 2 Tensor or a Pretty Tensor holding the logits.\ntarget: A rank 2 tf.float32 or tf.float64 tensor containing class label\nprobabilities. Note that binary cross entropy is equivalent to logistic\nloss.\nname: The optional name.\nloss_weight: A scalar multiplier for the loss.\nper_example_weights: A `Tensor` with a weight per example.\nper_output_weights: A weight `Tensor` that is the same shape as the\ninput_ that can be used to scale individual prediction losses.  See\n`tf.tile` to turn a per-column weight vector into a `per_output_weights`\n`Tensor`.\nReturns:\nBinary cross entropy loss after sigmoid operation.\nRaises:\nValueError: if target is None or the type is not float or double.", "source": "codesearchnet"}
{"code": "def __init__(self, xid=None, multipart_type=None, flags=0, body=b''):\n        \n        super().__init__(xid)\n        self.multipart_type = multipart_type\n        self.flags = flags\n        self.body = body", "docstring": "Create a MultipartRequest with the optional parameters below.\n\nArgs:\nxid (int): xid to the header.\nmultipart_type (int): One of the OFPMP_* constants.\nflags (int): OFPMPF_REQ_* flags.\nbody (bytes): Body of the request.", "source": "juraj-google-style"}
{"code": "def get_port(self, id_or_uri, port_id_or_uri):\n        \n        uri = self._client.build_subresource_uri(id_or_uri, port_id_or_uri, \"ports\")\n        return self._client.get(uri)", "docstring": "Gets an interconnect port.\n\nArgs:\nid_or_uri: Can be either the interconnect id or uri.\nport_id_or_uri: The interconnect port id or uri.\n\nReturns:\ndict: The interconnect port.", "source": "juraj-google-style"}
{"code": "def parse_timers(self):\n    filenames = list(filter(os.path.exists, [task.output_file.path for task in self]))\n    parser = AbinitTimerParser()\n    parser.parse(filenames)\n    return parser", "docstring": "Parse the TIMER section reported in the ABINIT output files.\n\nReturns:\n:class:`AbinitTimerParser` object", "source": "codesearchnet"}
{"code": "def authenticate(self, username, password, attribute=None, base_dn=None, search_filter=None, search_scope=SUBTREE):\n    valid_dn = False\n    try:\n        parse_dn(username)\n        valid_dn = True\n    except LDAPInvalidDnError:\n        pass\n    if (valid_dn is False):\n        user_filter = '({0}={1})'.format(attribute, username)\n        if (search_filter is not None):\n            user_filter = '(&{0}{1})'.format(user_filter, search_filter)\n        try:\n            self.connection.search(base_dn, user_filter, search_scope, attributes=[attribute])\n            response = self.connection.response\n            username = response[0]['dn']\n        except (LDAPInvalidDnError, LDAPInvalidFilterError, IndexError):\n            return False\n    try:\n        conn = self.connect(username, password)\n        conn.unbind()\n        return True\n    except LDAPBindError:\n        return False", "docstring": "Attempts to bind a user to the LDAP server.\n\nArgs:\nusername (str): DN or the username to attempt to bind with.\npassword (str): The password of the username.\nattribute (str): The LDAP attribute for the username.\nbase_dn (str): The LDAP basedn to search on.\nsearch_filter (str): LDAP searchfilter to attempt the user\nsearch with.\n\nReturns:\nbool: ``True`` if successful or ``False`` if the\ncredentials are invalid.", "source": "codesearchnet"}
{"code": "def GetFeedItemIdsForCampaign(campaign_feed):\n  \n  feed_item_ids = set()\n\n  try:\n    lhs_operand = campaign_feed['matchingFunction']['lhsOperand']\n  except KeyError:\n    lhs_operand = None\n\n  if (lhs_operand and lhs_operand[0]['FunctionArgumentOperand.Type'] ==\n      'RequestContextOperand'):\n    request_context_operand = lhs_operand[0]\n\n    if (request_context_operand['contextType'] == 'FEED_ITEM_ID' and\n        campaign_feed['matchingFunction']['operator'] == 'IN'):\n      for argument in campaign_feed['matchingFunction']['rhsOperand']:\n        if argument['xsi_type'] == 'ConstantOperand':\n          feed_item_ids.add(argument['longValue'])\n\n  return feed_item_ids", "docstring": "Gets the Feed Item Ids used by a campaign through a given Campaign Feed.\n\nArgs:\ncampaign_feed: the Campaign Feed we are retrieving Feed Item Ids from.\n\nReturns:\nA list of Feed Item IDs.", "source": "juraj-google-style"}
{"code": "def _read_single(parser, filepath):\n    \n    from os import path\n    global packages\n    if path.isfile(filepath):\n        parser.readfp(open(filepath))", "docstring": "Reads a single config file into the parser, silently failing if the file\ndoes not exist.\n\nArgs:\nparser (ConfigParser): parser to read the file into.\nfilepath (str): full path to the config file.", "source": "juraj-google-style"}
{"code": "def retrieve_pwd_from_config(msg, cfg):\n    msg_type = msg.__class__.__name__.lower()\n    key_fmt = ((msg.profile + '_') + msg_type)\n    pwd = cfg.pwd[key_fmt].split(' :: ')\n    if (len(pwd) == 1):\n        msg.auth = pwd[0]\n    else:\n        msg.auth = tuple(pwd)", "docstring": "Retrieve auth from profile configuration and set in msg.auth attr.\n\nArgs:\n:msg: (Message class) an instance of a message class.\n:cfg: (jsonconfig.Config) config instance.", "source": "codesearchnet"}
{"code": "def output(self, filename):\n        \n        info = 'Inheritance\\n'\n\n        if not self.contracts:\n            return\n\n        info += blue('Child_Contract -> ') + green('Immediate_Base_Contracts')\n        info += green(' [Not_Immediate_Base_Contracts]')\n        for child in self.contracts:\n            info += blue(f'\\n+ {child.name}')\n            if child.inheritance:\n                immediate = child.immediate_inheritance\n                not_immediate = [i for i in child.inheritance if i not in immediate]\n                info += ' -> ' + green(\", \".join(map(str, immediate)))\n                if not_immediate:\n                    info += \", [\"+ green(\", \".join(map(str, not_immediate))) + \"]\"\n\n        info += green('\\n\\nBase_Contract -> ') + blue('Immediate_Child_Contracts')\n        info += blue(' [Not_Immediate_Child_Contracts]')\n        for base in self.contracts:\n            info += green(f'\\n+ {base.name}')\n            children = list(self._get_child_contracts(base))\n            if children:\n                immediate = [child for child in children if base in child.immediate_inheritance]\n                not_immediate = [child for child in children if not child in immediate]\n                info += ' -> ' + blue(\", \".join(map(str, immediate)))\n                if not_immediate:\n                    info += ', [' + blue(\", \".join(map(str, not_immediate))) + ']'\n        self.info(info)", "docstring": "Output the inheritance relation\n\n_filename is not used\nArgs:\n_filename(string)", "source": "juraj-google-style"}
{"code": "def _get_ami_file(region='us-east-1'):\n    \n    LOG.info(\"Getting AMI from Gitlab\")\n    lookup = FileLookup(git_short='devops/ansible')\n    filename = 'scripts/{0}.json'.format(region)\n    ami_contents = lookup.remote_file(filename=filename, branch='master')\n    LOG.debug('AMI file contents in %s: %s', filename, ami_contents)\n    return ami_contents", "docstring": "Get file from Gitlab.\n\nArgs:\nregion (str): AWS Region to find AMI ID.\n\nReturns:\nstr: Contents in json format.", "source": "juraj-google-style"}
{"code": "def search(self, query, results=10, suggestion=False):\n    self._check_query(query, 'Query must be specified')\n    search_params = {'list': 'search', 'srprop': '', 'srlimit': results, 'srsearch': query}\n    if suggestion:\n        search_params['srinfo'] = 'suggestion'\n    raw_results = self.wiki_request(search_params)\n    self._check_error_response(raw_results, query)\n    search_results = [d['title'] for d in raw_results['query']['search']]\n    if suggestion:\n        sug = None\n        if raw_results['query'].get('searchinfo'):\n            sug = raw_results['query']['searchinfo']['suggestion']\n        return (search_results, sug)\n    return search_results", "docstring": "Search for similar titles\n\nArgs:\nquery (str): Page title\nresults (int): Number of pages to return\nsuggestion (bool): Use suggestion\nReturns:\ntuple or list: tuple (list results, suggestion) if \\\nsuggestion is **True**; list of results \\\notherwise", "source": "codesearchnet"}
{"code": "def disconnect(self, container, *args, **kwargs):\n    if isinstance(container, Container):\n        container = container.id\n    return self.client.api.disconnect_container_from_network(container, self.id, *args, **kwargs)", "docstring": "Disconnect a container from this network.\n\nArgs:\ncontainer (str): Container to disconnect from this network, as\neither an ID, name, or\n:py:class:`~docker.models.containers.Container` object.\nforce (bool): Force the container to disconnect from a network.\nDefault: ``False``\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def SetDefault(self, name, value):\n    \n    fl = self.FlagDict()\n    if name not in fl:\n      self._SetUnknownFlag(name, value)\n      return\n    if self.IsParsed():\n      logging.warn(\n          'FLAGS.SetDefault called on flag \"%s\" after flag parsing. Call this '\n          'method at the top level of a module to avoid overwriting the value '\n          'passed at the command line.',\n          name)\n    fl[name]._set_default(value)  \n    self._AssertValidators(fl[name].validators)", "docstring": "Changes the default value (and current value) of the named flag object.\n\nCall this method at the top level of a module to avoid overwriting the value\npassed at the command line.\n\nArgs:\nname: A string, the name of the flag to modify.\nvalue: The new default value.\n\nRaises:\nUnrecognizedFlagError: When there is no registered flag named name.\nIllegalFlagValueError: When value is not valid.", "source": "juraj-google-style"}
{"code": "def format_formula(formula):\n    \n\n    formatted_formula = \"\"\n    number_format = \"\"\n    for i, s in enumerate(formula):\n        if s.isdigit():\n            if not number_format:\n                number_format = \"_{\"\n            number_format += s\n            if i == len(formula) - 1:\n                number_format += \"}\"\n                formatted_formula += number_format\n        else:\n            if number_format:\n                number_format += \"}\"\n                formatted_formula += number_format\n                number_format = \"\"\n            formatted_formula += s\n\n    return r\"$%s$\" % (formatted_formula)", "docstring": "Converts str of chemical formula into\nlatex format for labelling purposes\n\nArgs:\nformula (str): Chemical formula", "source": "juraj-google-style"}
{"code": "def _serialize_normalized_array(array, fmt='png', quality=70):\n  \n  dtype = array.dtype\n  assert np.issubdtype(dtype, np.unsignedinteger)\n  assert np.max(array) <= np.iinfo(dtype).max\n  assert array.shape[-1] > 1  \n\n  image = PIL.Image.fromarray(array)\n  image_bytes = BytesIO()\n  image.save(image_bytes, fmt, quality=quality)\n  \n  image_data = image_bytes.getvalue()\n  return image_data", "docstring": "Given a normalized array, returns byte representation of image encoding.\n\nArgs:\narray: NumPy array of dtype uint8 and range 0 to 255\nfmt: string describing desired file format, defaults to 'png'\nquality: specifies compression quality from 0 to 100 for lossy formats\n\nReturns:\nimage data as BytesIO buffer", "source": "juraj-google-style"}
{"code": "def validate(data):\n    try:\n        return Schema(Validator.SCHEMA).validate(data)\n    except SchemaError as exception:\n        logging.getLogger(__name__).error(exception)\n        return None", "docstring": "Validate data against the schema.\n\nArgs:\ndata(dict): data structure to validate.\n\nReturns:\ndict: data as provided and defaults where defined in schema.", "source": "codesearchnet"}
{"code": "def remove_token(self, *, payer_id, credit_card_token_id):\n    payload = {'language': self.client.language.value, 'command': PaymentCommand.REMOVE_TOKEN.value, 'merchant': {'apiLogin': self.client.api_login, 'apiKey': self.client.api_key}, 'removeCreditCardToken': {'payerId': payer_id, 'creditCardTokenId': credit_card_token_id}, 'test': self.client.is_test}\n    return self.client._post(self.url, json=payload)", "docstring": "This feature allows you to delete a tokenized credit card register.\n\nArgs:\npayer_id:\ncredit_card_token_id:\n\nReturns:", "source": "codesearchnet"}
{"code": "def add_layout(self, obj, place='center'):\n    valid_places = ['left', 'right', 'above', 'below', 'center']\n    if (place not in valid_places):\n        raise ValueError((\"Invalid place '%s' specified. Valid place values are: %s\" % (place, nice_join(valid_places))))\n    getattr(self, place).append(obj)", "docstring": "Adds an object to the plot in a specified place.\n\nArgs:\nobj (Renderer) : the object to add to the Plot\nplace (str, optional) : where to add the object (default: 'center')\nValid places are: 'left', 'right', 'above', 'below', 'center'.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def read(*components, **kwargs):\n    rstrip = kwargs.get('rstrip', True)\n    comment_char = kwargs.get('comment_char', None)\n    ignore_comments = (comment_char is not None)\n    file = open(path(*components))\n    lines = file.readlines()\n    file.close()\n    if ignore_comments:\n        comment_line_re = re.compile('^\\\\s*{char}'.format(char=comment_char))\n        not_comment_re = re.compile('[^{char}]+'.format(char=comment_char))\n        if rstrip:\n            return [re.match(not_comment_re, line).group(0).rstrip() for line in lines if (not re.match(comment_line_re, line))]\n        else:\n            return [re.match(not_comment_re, line).group(0) for line in lines if (not re.match(comment_line_re, line))]\n    elif rstrip:\n        return [line.rstrip() for line in lines]\n    else:\n        return lines", "docstring": "Read file and return a list of lines. If comment_char is set, ignore the\ncontents of lines following the comment_char.\n\nRaises:\n\nIOError: if reading path fails", "source": "codesearchnet"}
{"code": "def check_updates(transformers, datastore=None, stimuli=None):\n    \n    \n    datastore = datastore or expanduser('~/.pliers_updates')\n    prior_data = pd.read_csv(datastore) if exists(datastore) else None\n\n    \n    stimuli = stimuli or glob.glob(\n        join(dirname(realpath(__file__)), '../tests/data/image/CC0/*'))\n    stimuli = load_stims(stimuli)\n\n    \n    loaded_transformers = {get_transformer(name, **params): (name, params)\n                           for name, params in transformers}\n\n    \n    results = pd.DataFrame({'time_extracted': [datetime.datetime.now()]})\n    for trans in loaded_transformers.keys():\n        for stim in stimuli:\n            if trans._stim_matches_input_types(stim):\n                res = trans.transform(stim)\n\n                try: \n                    res = [getattr(res, '_data', res.data) for r in res]\n                except TypeError:\n                    res = getattr(res, '_data', res.data)\n\n                res = hash_data(res)\n\n                results[\"{}.{}\".format(trans.__hash__(), stim.name)] = [res]\n\n    \n    mismatches = []\n    if prior_data is not None:\n        last = prior_data[\n            prior_data.time_extracted == prior_data.time_extracted.max()]. \\\n            iloc[0].drop('time_extracted')\n\n        for label, value in results.iteritems():\n            old = last.get(label)\n            new = value.values[0]\n\n            if old is not None:\n                if isinstance(new, str):\n                    if new != old:\n                        mismatches.append(label)\n                elif not np.isclose(old, new):\n                    mismatches.append(label)\n\n        results = prior_data.append(results)\n\n    results.to_csv(datastore, index=False)\n\n    \n    def get_trans(hash_tr):\n        for obj, attr in loaded_transformers.items():\n            if str(obj.__hash__()) == hash_tr:\n                return attr\n\n    delta_t = set([m.split('.')[0] for m in mismatches])\n    delta_t = [get_trans(dt) for dt in delta_t]\n\n    return {'transformers': delta_t, 'mismatches': mismatches}", "docstring": "Run transformers through a battery of stimuli, and check if output has\nchanged. Store results in csv file for comparison.\n\nArgs:\ntransformers (list): A list of tuples of transformer names and\ndictionary of parameters to instantiate with (or empty dict).\ndatastore (str): Filepath of CSV file with results. Stored in home dir\nby default.\nstimuli (list): List of stimuli file paths to extract from. If None,\nuse test data.", "source": "juraj-google-style"}
{"code": "def user_entry(entry_int, num_inst, command):\n    valid_entry = False\n    if (not entry_int):\n        print('{}aborting{} - {} instance\\n'.format(C_ERR, C_NORM, command))\n        sys.exit()\n    elif ((entry_int >= 1) and (entry_int <= num_inst)):\n        entry_idx = (entry_int - 1)\n        valid_entry = True\n    else:\n        print('{}Invalid entry:{} enter a number between 1 and {}.'.format(C_ERR, C_NORM, num_inst))\n        entry_idx = entry_int\n    return (entry_idx, valid_entry)", "docstring": "Validate user entry and returns index and validity flag.\n\nProcesses the user entry and take the appropriate action: abort\nif '0' entered, set validity flag and index is valid entry, else\nreturn invalid index and the still unset validity flag.\n\nArgs:\nentry_int (int): a number entered or 999 if a non-int was entered.\nnum_inst (int): the largest valid number that can be entered.\ncommand (str): program command to display in prompt.\nReturns:\nentry_idx(int): the dictionary index number of the targeted instance\nvalid_entry (bool): specifies if entry_idx is valid.\nRaises:\nSystemExit: if the user enters 0 when they are choosing from the\nlist it triggers the \"abort\" option offered to the user.", "source": "codesearchnet"}
{"code": "def setup(__pkg: str) -> jinja2.Environment:\n    dirs = [path.join(d, 'templates') for d in xdg_basedir.get_data_dirs(__pkg)]\n    env = jinja2.Environment(autoescape=jinja2.select_autoescape(['html', 'xml']), loader=jinja2.ChoiceLoader([jinja2.FileSystemLoader(s) for s in dirs]))\n    env.loader.loaders.append(jinja2.PackageLoader(__pkg, 'templates'))\n    env.filters.update(FILTERS)\n    return env", "docstring": "Configure a new Jinja environment with our filters.\n\nArgs:\n__pkg: Package name to use as base for templates searches\nReturns:\nConfigured Jinja environment", "source": "codesearchnet"}
{"code": "def find_existing_record(env, zone_id, dns_name, check_key=None, check_value=None):\n    \n    client = boto3.Session(profile_name=env).client('route53')\n    pager = client.get_paginator('list_resource_record_sets')\n    existingrecord = None\n    for rset in pager.paginate(HostedZoneId=zone_id):\n        for record in rset['ResourceRecordSets']:\n            if check_key:\n                if record['Name'].rstrip('.') == dns_name and record.get(check_key) == check_value:\n                    LOG.info(\"Found existing record: %s\", record)\n                    existingrecord = record\n                    break\n    return existingrecord", "docstring": "Check if a specific DNS record exists.\n\nArgs:\nenv (str): Deployment environment.\nzone_id (str): Route53 zone id.\ndns_name (str): FQDN of application's dns entry to add/update.\ncheck_key(str): Key to look for in record. Example: \"Type\"\ncheck_value(str): Value to look for with check_key. Example: \"CNAME\"\n\nReturns:\njson: Found Record. Returns None if no record found", "source": "juraj-google-style"}
{"code": "def _check_mr_state(cls, state, mr_id):\n    \n    if state is None:\n      logging.warning(\n          \"Mapreduce State for job %s is missing. Dropping Task.\",\n          mr_id)\n      return False\n    if not state.active:\n      logging.warning(\n          \"Mapreduce %s is not active. Looks like spurious task \"\n          \"execution. Dropping Task.\", mr_id)\n      return False\n    return True", "docstring": "Check MapreduceState.\n\nArgs:\nstate: an MapreduceState instance.\nmr_id: mapreduce id.\n\nReturns:\nTrue if state is valid. False if not and this task should be dropped.", "source": "juraj-google-style"}
{"code": "def load_plugins(self):\n    for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.collectors']['plugins']:\n        cls = entry_point.load()\n        if cls.enabled():\n            self.log.debug('Collector loaded: {} in module {}'.format(cls.__name__, cls.__module__))\n            self.collectors.setdefault(cls.type, []).append(Worker(cls.name, cls.interval, {'name': entry_point.name, 'module_name': entry_point.module_name, 'attrs': entry_point.attrs}))\n        else:\n            self.log.debug('Collector disabled: {} in module {}'.format(cls.__name__, cls.__module__))\n    for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.auditors']['plugins']:\n        cls = entry_point.load()\n        if cls.enabled():\n            self.log.debug('Auditor loaded: {} in module {}'.format(cls.__name__, cls.__module__))\n            self.auditors.append(Worker(cls.name, cls.interval, {'name': entry_point.name, 'module_name': entry_point.module_name, 'attrs': entry_point.attrs}))\n        else:\n            self.log.debug('Auditor disabled: {} in module {}'.format(cls.__name__, cls.__module__))\n    collector_count = sum((len(x) for x in self.collectors.values()))\n    auditor_count = len(self.auditors)\n    if ((collector_count + auditor_count) == 0):\n        raise Exception('No auditors or collectors loaded, aborting scheduler')\n    self.log.info('Scheduler loaded {} collectors and {} auditors'.format(collector_count, auditor_count))", "docstring": "Refresh the list of available collectors and auditors\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def scatter_mul(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta)\n    return gen_state_ops.scatter_mul(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)", "docstring": "Multiply this variable by `tf.IndexedSlices`.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to multiply this variable by.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nA `Tensor` that will hold the new value of this variable after\nthe scattered multiplication has completed.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"}
{"code": "def _get_localized_fn(path, root_dir):\n    \n    local_fn = path\n    if path.startswith(root_dir):\n        local_fn = path.replace(root_dir, \"\", 1)\n\n    if not local_fn.startswith(\"/\"):\n        return \"/\" + local_fn\n\n    return local_fn", "docstring": "Return absolute `path` relative to `root_dir`.\n\nWhen `path` == ``/home/xex/somefile.txt`` and `root_dir` == ``/home``,\nreturned path will be ``/xex/somefile.txt``.\n\nArgs:\npath (str): Absolute path beginning in `root_dir`.\nroot_dir (str): Absolute path containing `path` argument.\n\nReturns:\nstr: Local `path` when `root_dir` is considered as root of FS.", "source": "juraj-google-style"}
{"code": "def AddArguments(cls, argument_group):\n    \n    default_fields = ','.join(cls._DEFAULT_FIELDS)\n    argument_group.add_argument(\n        '--fields', dest='fields', type=str, action='store',\n        default=default_fields, help=(\n            'Defines which fields should be included in the output.'))\n\n    default_fields = ', '.join(cls._DEFAULT_FIELDS)\n    argument_group.add_argument(\n        '--additional_fields', dest='additional_fields', type=str,\n        action='store', default='', help=(\n            'Defines extra fields to be included in the output, in addition to'\n            ' the default fields, which are {0:s}.'.format(default_fields)))", "docstring": "Adds command line arguments the helper supports to an argument group.\n\nThis function takes an argument parser or an argument group object and adds\nto it all the command line arguments this helper supports.\n\nArgs:\nargument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\nargparse group.", "source": "juraj-google-style"}
{"code": "def ttr(self, kloc, acc=(10 ** 3), verbose=1):\n    kloc = numpy.asarray(kloc, dtype=int)\n    shape = kloc.shape\n    kloc = kloc.reshape(len(self), (- 1))\n    cache = {}\n    out = [evaluation.evaluate_recurrence_coefficients(self, k) for k in kloc.T]\n    out = numpy.array(out).T\n    return out.reshape(((2,) + shape))", "docstring": "Three terms relation's coefficient generator\n\nArgs:\nk (numpy.ndarray, int):\nThe order of the coefficients.\nacc (int):\nAccuracy of discretized Stieltjes if analytical methods are\nunavailable.\n\nReturns:\n(Recurrence coefficients):\nWhere out[0] is the first (A) and out[1] is the second\ncoefficient With ``out.shape==(2,)+k.shape``.", "source": "codesearchnet"}
{"code": "def read(self, offset, length):\n    if (not isinstance(offset, (int, long))):\n        raise TypeError('Invalid offset type, should be integer.')\n    offset = self._adjust_offset(offset)\n    self._validate_offset(offset, length)\n    return bytes(self.mapping[offset:(offset + length)])", "docstring": "Read a string of bytes from the specified `offset` in bytes,\nrelative to the base physical address of the MMIO region.\n\nArgs:\noffset (int, long): offset from base physical address, in bytes.\nlength (int): number of bytes to read.\n\nReturns:\nbytes: bytes read.\n\nRaises:\nTypeError: if `offset` type is invalid.\nValueError: if `offset` is out of bounds.", "source": "codesearchnet"}
{"code": "def __init__(self, port, log=False):\n        \n        super(OpenThreadController, self).__init__()\n        self.port = port\n        self.handle = None\n        self.lines = []\n        self._log = log\n        self._is_net = False\n        self._init()", "docstring": "Initialize the controller\n\nArgs:\nport (str): serial port's path or name(windows)", "source": "juraj-google-style"}
{"code": "def collect_doc(module, base_class=None, prefix=\"\", flag_exclude_prefix=False):\n    \n\n    ret = []\n    for attrname in module.__all__:\n        if prefix and not attrname.startswith(prefix):\n            continue\n\n        attr = module.__getattribute__(attrname)\n\n        if base_class is not None and not issubclass(attr, base_class):\n            continue\n\n        spec = inspect.signature(attr)\n\n        ret.append((attrname if not flag_exclude_prefix else attrname[len(prefix):], spec, attr.__doc__))\n\n    return ret", "docstring": "Collects class names and docstrings in module for classes starting with prefix\n\nArguments:\nmodule -- Python module\nprefix -- argument for str.startswith(); if not passed, does not filter\nbase_class -- filters only descendants of this class\nflag_exclude_prefix -- whether or not to exclude prefix from class name in result\n\nReturns: [(classname0, signature, docstring0), ...]", "source": "juraj-google-style"}
{"code": "def __init__(self, descriptor_db=None):\n    \n\n    self._internal_db = descriptor_database.DescriptorDatabase()\n    self._descriptor_db = descriptor_db\n    self._descriptors = {}\n    self._enum_descriptors = {}\n    self._file_descriptors = {}", "docstring": "Initializes a Pool of proto buffs.\n\nThe descriptor_db argument to the constructor is provided to allow\nspecialized file descriptor proto lookup code to be triggered on demand. An\nexample would be an implementation which will read and compile a file\nspecified in a call to FindFileByName() and not require the call to Add()\nat all. Results from this database will be cached internally here as well.\n\nArgs:\ndescriptor_db: A secondary source of file descriptors.", "source": "juraj-google-style"}
{"code": "def repr(self, changed_widgets=None):\n        \n        if changed_widgets is None:\n            changed_widgets={}\n        local_changed_widgets = {}\n        self._set_updated()\n        return ''.join(('<', self.type, '>\\n', self.innerHTML(local_changed_widgets), '\\n</', self.type, '>'))", "docstring": "It is used to automatically represent the object to HTML format\npacks all the attributes, children and so on.\n\nArgs:\nchanged_widgets (dict): A dictionary containing a collection of tags that have to be updated.\nThe tag that have to be updated is the key, and the value is its textual repr.", "source": "juraj-google-style"}
{"code": "def find_local_maxima(self, input_grid):\n        \n        pixels, q_data = self.quantize(input_grid)\n        centers = OrderedDict()\n        for p in pixels.keys():\n            centers[p] = []\n        marked = np.ones(q_data.shape, dtype=int) * self.UNMARKED\n        MIN_INFL = int(np.round(1 + 0.5 * np.sqrt(self.max_size)))\n        MAX_INFL = 2 * MIN_INFL\n        marked_so_far = []\n        \n        \n        \n        \n        \n        for b in sorted(pixels.keys(),reverse=True):\n            \n            infl_dist = MIN_INFL + int(np.round(float(b) / self.max_bin * (MAX_INFL - MIN_INFL)))\n            for p in pixels[b]:\n                if marked[p] == self.UNMARKED:\n                    ok = False\n                    del marked_so_far[:]\n                    \n                    \n                    \n                    \n                    for (i, j), v in np.ndenumerate(marked[p[0] - infl_dist:p[0] + infl_dist + 1,\n                                                    p[1] - infl_dist:p[1]+ infl_dist + 1]):\n                        if v == self.UNMARKED:\n                            ok = True\n                            marked[i - infl_dist + p[0],j - infl_dist + p[1]] = b\n                           \n                            marked_so_far.append((i - infl_dist + p[0],j - infl_dist + p[1]))\n                        else:\n                            \n                            ok = False\n                            break\n                    \n                    if ok:\n                        \n                        centers[b].append(p)\n                    else:\n                        for m in marked_so_far:\n                            marked[m] = self.UNMARKED\n        \n        marked[:, :] = self.UNMARKED\n        deferred_from_last = []\n        deferred_to_next = []\n        \n        for delta in range(0, self.delta + 1):\n            \n            for b in sorted(centers.keys(), reverse=True):\n                bin_lower = b - delta\n                deferred_from_last[:] = deferred_to_next[:]\n                del deferred_to_next[:]\n                foothills = []\n                n_centers = len(centers[b])\n                tot_centers = n_centers + len(deferred_from_last)\n                for i in range(tot_centers):\n                    \n                    if i < n_centers:\n                        center = centers[b][i]\n                    else:\n                        center = deferred_from_last[i - n_centers]\n                    if bin_lower < 0:\n                        bin_lower = 0\n                    if marked[center] == self.UNMARKED:\n                        captured = self.set_maximum(q_data, marked, center, bin_lower, foothills)\n                        if not captured:\n                            \n                            deferred_to_next.append(center)\n                        else:\n                            pass\n                \n                self.remove_foothills(q_data, marked, b, bin_lower, centers, foothills)\n            del deferred_from_last[:]\n            del deferred_to_next[:]\n        return marked", "docstring": "Finds the local maxima in the inputGrid and perform region growing to identify objects.\n\nArgs:\ninput_grid: Raw input data.\n\nReturns:\narray with labeled objects.", "source": "juraj-google-style"}
{"code": "def write_int8(self, value, little_endian=True):\n    if little_endian:\n        endian = '<'\n    else:\n        endian = '>'\n    return self.pack(('%sb' % endian), value)", "docstring": "Pack the value as a signed byte and write 1 byte to the stream.\n\nArgs:\nvalue:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint: the number of bytes written.", "source": "codesearchnet"}
{"code": "def defer_remainder(self, deferred_time=None):\n    with self._lock:\n        self._timestamp = Timestamp.now()\n        if deferred_time and (not isinstance(deferred_time, (Duration, Timestamp))):\n            raise ValueError('The timestamp of deter_remainder() should be a Duration or a Timestamp, or None.')\n        self._deferred_timestamp = deferred_time\n        checkpoint = self.try_split(0)\n        if checkpoint:\n            _, self._deferred_residual = checkpoint", "docstring": "Performs self-checkpoint on current processing restriction with an\nexpected resuming time.\n\nSelf-checkpoint could happen during processing elements. When executing an\nDoFn.process(), you may want to stop processing an element and resuming\nlater if current element has been processed quit a long time or you also\nwant to have some outputs from other elements. ``defer_remainder()`` can be\ncalled on per element if needed.\n\nArgs:\ndeferred_time: A relative ``Duration`` that indicates the ideal time gap\nbetween now and resuming, or an absolute ``Timestamp`` for resuming\nexecution time. If the time_delay is None, the deferred work will be\nexecuted as soon as possible.", "source": "github-repos"}
{"code": "def add_gene_links(gene_obj, build=37):\n    \n    try:\n        build = int(build)\n    except ValueError:\n        build = 37\n    \n    hgnc_id = gene_obj['hgnc_id']\n\n    gene_obj['hgnc_link'] = genenames(hgnc_id)\n    gene_obj['omim_link'] = omim(hgnc_id)\n    \n    if not 'ensembl_id' in gene_obj:\n        ensembl_id = gene_obj.get('common',{}).get('ensembl_id')\n    else:\n        ensembl_id = gene_obj['ensembl_id']\n    ensembl_37_link = ensembl(ensembl_id, build=37)\n    ensembl_38_link = ensembl(ensembl_id, build=38)\n    gene_obj['ensembl_37_link'] = ensembl_37_link\n    gene_obj['ensembl_38_link'] = ensembl_38_link\n    gene_obj['ensembl_link'] = ensembl_37_link\n    if build == 38:\n        gene_obj['ensembl_link'] = ensembl_38_link\n    gene_obj['hpa_link'] = hpa(ensembl_id)\n    gene_obj['string_link'] = string(ensembl_id)\n    gene_obj['reactome_link'] = reactome(ensembl_id)\n    gene_obj['clingen_link'] = clingen(hgnc_id)\n    gene_obj['expression_atlas_link'] = expression_atlas(ensembl_id)\n    gene_obj['exac_link'] = exac(ensembl_id)\n    \n    gene_obj['entrez_link'] = entrez(gene_obj.get('entrez_id'))\n    \n    gene_obj['omim_link'] = omim(gene_obj.get('omim_id'))\n    \n    gene_obj['ppaint_link'] = ppaint(gene_obj['hgnc_symbol'])\n    \n    gene_obj['vega_link'] = vega(gene_obj.get('vega_id'))\n    \n    gene_obj['ucsc_link'] = ucsc(gene_obj.get('ucsc_id'))", "docstring": "Update a gene object with links\n\nArgs:\ngene_obj(dict)\nbuild(int)\n\nReturns:\ngene_obj(dict): gene_obj updated with many links", "source": "juraj-google-style"}
{"code": "def __init__(self,\n               validate_args=False,\n               name=\"exp\"):\n    \n    \n    \n    super(Exp, self).__init__(\n        validate_args=validate_args,\n        name=name)", "docstring": "Instantiates the `Exp` bijector.\n\nArgs:\nvalidate_args: Python `bool` indicating whether arguments should be\nchecked for correctness.\nname: Python `str` name given to ops managed by this object.", "source": "juraj-google-style"}
{"code": "def contains_vasp_input(dir_name):\n    for f in ['INCAR', 'POSCAR', 'POTCAR', 'KPOINTS']:\n        if ((not os.path.exists(os.path.join(dir_name, f))) and (not os.path.exists(os.path.join(dir_name, (f + '.orig'))))):\n            return False\n    return True", "docstring": "Checks if a directory contains valid VASP input.\n\nArgs:\ndir_name:\nDirectory name to check.\n\nReturns:\nTrue if directory contains all four VASP input files (INCAR, POSCAR,\nKPOINTS and POTCAR).", "source": "codesearchnet"}
{"code": "def coerce_to_pendulum_date(x: PotentialDatetimeType,\n                            assume_local: bool = False) -> Optional[Date]:\n    \n    p = coerce_to_pendulum(x, assume_local=assume_local)\n    return None if p is None else p.date()", "docstring": "Converts something to a :class:`pendulum.Date`.\n\nArgs:\nx: something that may be coercible to a date\nassume_local: if ``True``, assume local timezone; if ``False``, assume\nUTC\n\nReturns:\na :class:`pendulum.Date`, or ``None``.\n\nRaises:\npendulum.parsing.exceptions.ParserError: if a string fails to parse\nValueError: if no conversion possible", "source": "juraj-google-style"}
{"code": "def validate(source, scheme=None, format=None):\n    (detected_scheme, detected_format) = helpers.detect_scheme_and_format(source)\n    scheme = (scheme or detected_scheme)\n    format = (format or detected_format)\n    if (scheme is not None):\n        if (scheme not in config.LOADERS):\n            raise exceptions.SchemeError(('Scheme \"%s\" is not supported' % scheme))\n    if (format not in config.PARSERS):\n        raise exceptions.FormatError(('Format \"%s\" is not supported' % format))\n    return True", "docstring": "Check if tabulator is able to load the source.\n\nArgs:\nsource (Union[str, IO]): The source path or IO object.\nscheme (str, optional): The source scheme. Auto-detect by default.\nformat (str, optional): The source file format. Auto-detect by default.\n\nReturns:\nbool: Whether tabulator is able to load the source file.\n\nRaises:\n`tabulator.exceptions.SchemeError`: The file scheme is not supported.\n`tabulator.exceptions.FormatError`: The file format is not supported.", "source": "codesearchnet"}
{"code": "def course_blocks(self, course_id, username):\n        \n        resp = self.requester.get(\n            urljoin(self.base_url, '/api/courses/v1/blocks/'),\n            params={\n                \"depth\": \"all\",\n                \"username\": username,\n                \"course_id\": course_id,\n                \"requested_fields\": \"children,display_name,id,type,visible_to_staff_only\",\n            })\n\n        resp.raise_for_status()\n\n        return Structure(resp.json())", "docstring": "Fetches course blocks.\n\nArgs:\ncourse_id (str): An edx course id.\nusername (str): username of the user to query for (can reveal hidden\nmodules)\n\nReturns:\nStructure", "source": "juraj-google-style"}
{"code": "def is_number_match(num1, num2):\n    if (isinstance(num1, PhoneNumber) and isinstance(num2, PhoneNumber)):\n        return _is_number_match_OO(num1, num2)\n    elif isinstance(num1, PhoneNumber):\n        return _is_number_match_OS(num1, num2)\n    elif isinstance(num2, PhoneNumber):\n        return _is_number_match_OS(num2, num1)\n    else:\n        return _is_number_match_SS(num1, num2)", "docstring": "Takes two phone numbers and compares them for equality.\n\nFor example, the numbers +1 345 657 1234 and 657 1234 are a SHORT_NSN_MATCH.\nThe numbers +1 345 657 1234 and 345 657 are a NO_MATCH.\n\nArguments\nnum1 -- First number object or string to compare. Can contain formatting,\nand can have country calling code specified with + at the start.\nnum2 -- Second number object or string to compare. Can contain formatting,\nand can have country calling code specified with + at the start.\n\nReturns:\n- EXACT_MATCH if the country_code, NSN, presence of a leading zero for\nItalian numbers and any extension present are the same.\n- NSN_MATCH if either or both has no region specified, and the NSNs and\nextensions are the same.\n- SHORT_NSN_MATCH if either or both has no region specified, or the\nregion specified is the same, and one NSN could be a shorter version of\nthe other number. This includes the case where one has an extension\nspecified, and the other does not.\n- NO_MATCH otherwise.", "source": "codesearchnet"}
{"code": "def debug_string(self, with_typing: bool=False, indent: int=0) -> str:\n    operand_name = f'{self} '\n    operand_prints = ''.join(('\\n' + op.debug_string(with_typing, indent + 1) for op in self.operands))\n    type_print = f' type={self.return_type}' if with_typing else ''\n    return f'{'| ' * indent}+ {operand_name}<{self.__class__.__name__}{type_print}> ({operand_prints})'", "docstring": "Builds a string describing the expression tree starting from this node.\n\nArgs:\nwith_typing: If true, includes the type each node evaluates to.\nindent: The initial number of spaces to use as indentation for the debug\nstring.\n\nReturns:\nA string which recursively describes this node and its operands.", "source": "github-repos"}
{"code": "def _get_bucketing_id(self, user_id, attributes):\n    \n\n    attributes = attributes or {}\n    bucketing_id = attributes.get(enums.ControlAttributes.BUCKETING_ID)\n\n    if bucketing_id is not None:\n      if isinstance(bucketing_id, string_types):\n        return bucketing_id\n\n      self.logger.warning('Bucketing ID attribute is not a string. Defaulted to user_id.')\n\n    return user_id", "docstring": "Helper method to determine bucketing ID for the user.\n\nArgs:\nuser_id: ID for user.\nattributes: Dict representing user attributes. May consist of bucketing ID to be used.\n\nReturns:\nString representing bucketing ID if it is a String type in attributes else return user ID.", "source": "juraj-google-style"}
{"code": "def construct_concept_to_indicator_mapping(n: int = 1) -> Dict[str, List[str]]:\n    \n\n    df = pd.read_sql_table(\"concept_to_indicator_mapping\", con=engine)\n    gb = df.groupby(\"Concept\")\n\n    _dict = {\n        k: [get_variable_and_source(x) for x in take(n, v[\"Indicator\"].values)]\n        for k, v in gb\n    }\n    return _dict", "docstring": "Create a dictionary mapping high-level concepts to low-level indicators\n\nArgs:\nn: Number of indicators to return\n\nReturns:\nDictionary that maps concept names to lists of indicator names.", "source": "juraj-google-style"}
{"code": "def add_identifier(self, name, obj):\n        \n\n        name = str(name)\n        self._known_identifiers[name] = obj", "docstring": "Add a known identifier resolution.\n\nArgs:\nname (str): The name of the identifier\nobj (object): The object that is should resolve to", "source": "juraj-google-style"}
{"code": "def FromString(cls, desc):\n    if (language.stream is None):\n        language.get_language()\n    parse_exp = (((Optional((time_interval('time') - Literal(':').suppress())) - language.stream('stream')) - Literal('=').suppress()) - number('value'))\n    try:\n        data = parse_exp.parseString(desc)\n        time = 0\n        if ('time' in data):\n            time = data['time'][0]\n        return SimulationStimulus(time, data['stream'][0], data['value'])\n    except (ParseException, ParseSyntaxException):\n        raise ArgumentError('Could not parse stimulus descriptor', descriptor=desc)", "docstring": "Create a new stimulus from a description string.\n\nThe string must have the format:\n\n[time: ][system ]input X = Y\nwhere X and Y are integers.  The time, if given must\nbe a time_interval, which is an integer followed by a\ntime unit such as second(s), minute(s), etc.\n\nArgs:\ndesc (str): A string description of the stimulus.\n\nReturns:\nSimulationStimulus: The parsed stimulus object.", "source": "codesearchnet"}
{"code": "def download(timestamp, dataset, path=None, products=None,\n             levels=None, offset=0):\n    \n    if path is None:\n        path = DATA_PATH\n    closest = timestamp.hour\n    filename = dataset(closest, offset)\n    gfs_timestamp = '%s%02d' % (timestamp.strftime('%Y%m%d'), closest)\n\n    url = baseurl(gfs_timestamp, filename)\n    index = url + '.idx'\n    messages = message_index(index)\n    segments = _filter_messages(messages, products, levels)\n    dl_path = path + '/%s/' % gfs_timestamp\n    _verify_path(dl_path)\n    _download_segments(path + filename, url, segments)", "docstring": "save GFS grib file to DATA_PATH.\n\nArgs:\ndataset(function): naming convention function.  eg. pgrb2\ntimestamp(datetime): ???\npath(str): if None defaults to DATA_PATH\nproducts(list): TMP, etc. if None downloads all.\nlayers(list): surface, etc. if None downloads all.\noffset(int): should be multiple of 3", "source": "juraj-google-style"}
{"code": "def update_fetch_positions(self, partitions):\n    for tp in partitions:\n        if (not self._subscriptions.is_assigned(tp)):\n            log.warning('partition %s is not assigned - skipping offset update', tp)\n            continue\n        elif self._subscriptions.is_fetchable(tp):\n            log.warning('partition %s is still fetchable -- skipping offset update', tp)\n            continue\n        if self._subscriptions.is_offset_reset_needed(tp):\n            self._reset_offset(tp)\n        elif (self._subscriptions.assignment[tp].committed is None):\n            self._subscriptions.need_offset_reset(tp)\n            self._reset_offset(tp)\n        else:\n            committed = self._subscriptions.assignment[tp].committed\n            log.debug('Resetting offset for partition %s to the committed offset %s', tp, committed)\n            self._subscriptions.seek(tp, committed)", "docstring": "Update the fetch positions for the provided partitions.\n\nArguments:\npartitions (list of TopicPartitions): partitions to update\n\nRaises:\nNoOffsetForPartitionError: if no offset is stored for a given\npartition and no reset policy is available", "source": "codesearchnet"}
{"code": "def get_periodic_soap_locals(obj, Hpos, alp, bet, rCut=5.0, nMax=5, Lmax=5, crossOver=True, all_atomtypes=None, eta=1.0):\n    suce = _get_supercell(obj, rCut)\n    arrsoap = get_soap_locals(suce, Hpos, alp, bet, rCut, nMax=nMax, Lmax=Lmax, crossOver=crossOver, all_atomtypes=all_atomtypes, eta=eta)\n    return arrsoap", "docstring": "Get the RBF basis SOAP output for the given position in a periodic system.\n\nArgs:\nobj(ase.Atoms): Atomic structure for which the SOAP output is\ncalculated.\nalp: Alphas\nbet: Betas\nrCut: Radial cutoff.\nnMax: Maximum nmber of radial basis functions\nLmax: Maximum spherical harmonics degree\ncrossOver:\nall_atomtypes: Can be used to specify the atomic elements for which to\ncalculate the output. If given the output is calculated only for the\ngiven species.\neta: The gaussian smearing width.\n\nReturns:\nnp.ndarray: SOAP output for the given position.", "source": "codesearchnet"}
{"code": "def get_environment_details(zone, environment):\n    \n    default_context = google.datalab.Context.default()\n    url = (Api._ENDPOINT + (Api._ENVIRONMENTS_PATH_FORMAT % (default_context.project_id, zone,\n                                                             environment)))\n\n    return google.datalab.utils.Http.request(url, credentials=default_context.credentials)", "docstring": "Issues a request to Composer to get the environment details.\n\nArgs:\nzone: GCP zone of the composer environment\nenvironment: name of the Composer environment\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"}
{"code": "def _FormatSource(self, event):\n    \n    _, source = self._output_mediator.GetFormattedSources(event)\n    if source is None:\n      data_type = getattr(event, 'data_type', 'UNKNOWN')\n      raise errors.NoFormatterFound(\n          'Unable to find event formatter for: {0:s}.'.format(data_type))\n\n    return source", "docstring": "Formats the source.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: source field.\n\nRaises:\nNoFormatterFound: if no event formatter can be found to match the data\ntype in the event.", "source": "juraj-google-style"}
{"code": "def setup(self, reason, grr_server_url, grr_username, grr_password,\n            approvers=None, verify=True):\n    \n    grr_auth = (grr_username, grr_password)\n    self.approvers = []\n    if approvers:\n      self.approvers = [item.strip() for item in approvers.strip().split(',')]\n    self.grr_api = grr_api.InitHttp(api_endpoint=grr_server_url,\n                                    auth=grr_auth,\n                                    verify=verify)\n    self.output_path = tempfile.mkdtemp()\n    self.reason = reason", "docstring": "Initializes a GRR hunt result collector.\n\nArgs:\nreason: justification for GRR access.\ngrr_server_url: GRR server URL.\ngrr_username: GRR username.\ngrr_password: GRR password.\napprovers: list of GRR approval recipients.\nverify: boolean, whether to verify the GRR server's x509 certificate.", "source": "juraj-google-style"}
{"code": "def __init__(self, enterprise_configuration):\n        \n        super(SAPSuccessFactorsAPIClient, self).__init__(enterprise_configuration)\n        self.global_sap_config = apps.get_model('sap_success_factors', 'SAPSuccessFactorsGlobalConfiguration').current()\n        self._create_session()", "docstring": "Instantiate a new client.\n\nArgs:\nenterprise_configuration (SAPSuccessFactorsEnterpriseCustomerConfiguration): An enterprise customers's\nconfiguration model for connecting with SAP SuccessFactors", "source": "juraj-google-style"}
{"code": "def transpose(self, name=None):\n    if (name is None):\n        name = (self.module_name + '_transpose')\n    if (self._data_format == DATA_FORMAT_NHWC):\n        stride = self._stride[1:(- 1)]\n    else:\n        stride = self._stride[2:]\n    return Conv2D(output_channels=(lambda : self.input_channels), kernel_shape=self._kernel_shape, stride=stride, padding=self._padding, use_bias=self._use_bias, initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers, data_format=self._data_format, custom_getter=self._custom_getter, name=name)", "docstring": "Returns matching `Conv2D` module.\n\nArgs:\nname: Optional string assigning name of transpose module. The default name\nis constructed by appending \"_transpose\" to `self.name`.\n\nReturns:\n`Conv2D` module.", "source": "codesearchnet"}
{"code": "def compare_version(a, b):\n    aa = string.split(a, '.')\n    bb = string.split(b, '.')\n    for i in range(0, 4):\n        if (aa[i] != bb[i]):\n            return cmp(int(aa[i]), int(bb[i]))\n    return 0", "docstring": "Compare two version number strings of the form W.X.Y.Z.\n\nThe numbers are compared most-significant to least-significant.\nFor example, 12.345.67.89 > 2.987.88.99.\n\nArgs:\na: First version number string to compare\nb: Second version number string to compare\n\nReturns:\n0 if the numbers are identical, a positive number if 'a' is larger, and\na negative number if 'b' is larger.", "source": "codesearchnet"}
{"code": "def parse_coach_ec_df(infile):\n    ec_df = pd.read_table(infile, delim_whitespace=True, names=['pdb_template', 'tm_score', 'rmsd', 'seq_ident', 'seq_coverage', 'c_score', 'ec_number', 'binding_residues'])\n    ec_df['pdb_template_id'] = ec_df['pdb_template'].apply((lambda x: x[:4]))\n    ec_df['pdb_template_chain'] = ec_df['pdb_template'].apply((lambda x: x[4]))\n    ec_df = ec_df[['pdb_template_id', 'pdb_template_chain', 'tm_score', 'rmsd', 'seq_ident', 'seq_coverage', 'c_score', 'ec_number', 'binding_residues']]\n    ec_df['c_score'] = pd.to_numeric(ec_df.c_score, errors='coerce')\n    return ec_df", "docstring": "Parse the EC.dat output file of COACH and return a dataframe of results\n\nEC.dat contains the predicted EC number and active residues.\nThe columns are: PDB_ID, TM-score, RMSD, Sequence identity,\nCoverage, Confidence score, EC number, and Active site residues\n\nArgs:\ninfile (str): Path to EC.dat\n\nReturns:\nDataFrame: Pandas DataFrame summarizing EC number predictions", "source": "codesearchnet"}
{"code": "def __init__(self, name, num_qubits, params, label=None):\n        \n        self._label = label\n        super().__init__(name, num_qubits, 0, params)", "docstring": "Create a new gate.\n\nArgs:\nname (str): the Qobj name of the gate\nnum_qubits (int): the number of qubits the gate acts on.\nparams (list): a list of parameters.\nlabel (str or None): An optional label for the gate [Default: None]", "source": "juraj-google-style"}
{"code": "def HashFile(self, fd, byte_count):\n    while (byte_count > 0):\n        buf_size = min(byte_count, constants.CLIENT_MAX_BUFFER_SIZE)\n        buf = fd.read(buf_size)\n        if (not buf):\n            break\n        self.HashBuffer(buf)\n        byte_count -= buf_size", "docstring": "Updates underlying hashers with a given file.\n\nArgs:\nfd: A file object that is going to be fed to the hashers.\nbyte_count: A maximum number of bytes that are going to be processed.", "source": "codesearchnet"}
{"code": "def get_client_kwargs(self, path):\n        \n        \n        path = path.split('?', 1)[0]\n\n        share_name, relpath = self.split_locator(path)\n        kwargs = dict(share_name=share_name)\n\n        \n        if relpath and relpath[-1] == '/':\n            kwargs['directory_name'] = relpath.rstrip('/')\n\n        \n        elif relpath:\n            try:\n                kwargs['directory_name'], kwargs['file_name'] = relpath.rsplit(\n                    '/', 1)\n            except ValueError:\n                kwargs['directory_name'] = ''\n                kwargs['file_name'] = relpath\n\n        \n        return kwargs", "docstring": "Get base keyword arguments for client for a\nspecific path.\n\nArgs:\npath (str): Absolute path or URL.\n\nReturns:\ndict: client args", "source": "juraj-google-style"}
{"code": "def affine_transform(boxes, angle, translate_x, translate_y, scale, shear_x, shear_y, height, width, center_x=None, center_y=None, bounding_box_format='xyxy'):\n    if bounding_box_format != 'xyxy':\n        raise NotImplementedError\n    box_utils = BoundingBox()\n    if backend_utils.in_tf_graph():\n        box_utils.backend.set_backend('tensorflow')\n    boxes = box_utils.affine(boxes, angle, translate_x, translate_y, scale, shear_x, shear_y, height, width, center_x=center_x, center_y=center_y)\n    box_utils.backend.reset()\n    return boxes", "docstring": "Applies an affine transformation to the bounding boxes.\n\nThe `height` and `width` parameters are used to normalize the\ntranslation and scaling factors.\n\nArgs:\nboxes: The bounding boxes to transform, a tensor/array of shape\n`(N, 4)` or `(batch_size, N, 4)`.\nangle: Rotation angle in degrees.\ntranslate_x: Horizontal translation fraction.\ntranslate_y: Vertical translation fraction.\nscale: Scaling factor.\nshear_x: Shear angle in x-direction (degrees).\nshear_y: Shear angle in y-direction (degrees).\nheight: Height of the image/data.\nwidth: Width of the image/data.\ncenter_x:  x-coordinate of the transformation center (fraction).\ncenter_y: y-coordinate of the transformation center (fraction).\nbounding_box_format: The format of the input bounding boxes. Defaults to\n`\"xyxy\"`.\n\nReturns:\nThe transformed bounding boxes, a tensor/array with the same shape\nas the input `boxes`.", "source": "github-repos"}
{"code": "def copy_graph(subject, existing_graph):\n    new_graph = rdflib.Graph()\n    for (predicate, object_) in existing_graph.predicate_objects():\n        new_graph.add((subject, predicate, object_))\n    return new_graph", "docstring": "Function takes a subject and an existing graph, returns a new graph with\nall predicate and objects of the existing graph copied to the new_graph with\nsubject as the new subject\n\nArgs:\nsubject(rdflib.URIRef): A URIRef subject\nexisting_graph(rdflib.Graph): A rdflib.Graph\n\nReturns:\nrdflib.Graph", "source": "codesearchnet"}
{"code": "def to_wider_model(self, pre_layer_id, n_add):\n        \n        self.operation_history.append((\"to_wider_model\", pre_layer_id, n_add))\n        pre_layer = self.layer_list[pre_layer_id]\n        output_id = self.layer_id_to_output_node_ids[pre_layer_id][0]\n        dim = layer_width(pre_layer)\n        self.vis = {}\n        self._search(output_id, dim, dim, n_add)\n        \n        for u in self.topological_order:\n            for v, layer_id in self.adj_list[u]:\n                self.node_list[v].shape = self.layer_list[layer_id].output_shape", "docstring": "Widen the last dimension of the output of the pre_layer.\nArgs:\npre_layer_id: The ID of a convolutional layer or dense layer.\nn_add: The number of dimensions to add.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n               mesh_impl,\n               laid_out_input,\n               mesh_axes,\n               add_counter_fn=None):\n    \n    self.mesh_impl = mesh_impl\n    self.laid_out_input = laid_out_input\n    self.mesh_axes = mesh_axes\n    self.add_counter_fn = add_counter_fn\n    self._reduced = None", "docstring": "Create a LazyAllreduceSum.\n\nArgs:\nmesh_impl: a mesh_impl\nlaid_out_input: a LaidOutTensor\nmesh_axes: a list of mesh axes\nadd_counter_fn: a function taking no arguments which calls\nlowering.add_counter if and when the allreduce executes.\nReturns:\na LazyAllreduceSum", "source": "juraj-google-style"}
{"code": "class ProgbarLogger(Callback):\n\n    def __init__(self):\n        super().__init__()\n        self.seen = 0\n        self.progbar = None\n        self.target = None\n        self.verbose = 1\n        self.epochs = 1\n        self._called_in_fit = False\n\n    def set_params(self, params):\n        verbose = params['verbose']\n        if verbose == 'auto':\n            verbose = 1\n        self.verbose = verbose\n        self.epochs = params['epochs']\n        self.target = params['steps']\n\n    def on_train_begin(self, logs=None):\n        self._called_in_fit = True\n\n    def on_test_begin(self, logs=None):\n        if not self._called_in_fit:\n            self._reset_progbar()\n            self._maybe_init_progbar()\n\n    def on_predict_begin(self, logs=None):\n        self._reset_progbar()\n        self._maybe_init_progbar()\n\n    def on_epoch_begin(self, epoch, logs=None):\n        self._reset_progbar()\n        self._maybe_init_progbar()\n        if self.verbose and self.epochs > 1:\n            io_utils.print_msg(f'Epoch {epoch + 1}/{self.epochs}')\n\n    def on_train_batch_end(self, batch, logs=None):\n        self._update_progbar(batch, logs)\n\n    def on_test_batch_end(self, batch, logs=None):\n        if not self._called_in_fit:\n            self._update_progbar(batch, logs)\n\n    def on_predict_batch_end(self, batch, logs=None):\n        self._update_progbar(batch, None)\n\n    def on_epoch_end(self, epoch, logs=None):\n        self._finalize_progbar(logs)\n\n    def on_test_end(self, logs=None):\n        if not self._called_in_fit:\n            self._finalize_progbar(logs)\n\n    def on_predict_end(self, logs=None):\n        self._finalize_progbar(logs)\n\n    def _reset_progbar(self):\n        self.seen = 0\n        self.progbar = None\n\n    def _maybe_init_progbar(self):\n        if self.progbar is None:\n            self.progbar = Progbar(target=self.target, verbose=self.verbose, unit_name='step')\n\n    def _update_progbar(self, batch, logs=None):\n        \n        logs = logs or {}\n        self._maybe_init_progbar()\n        self.seen = batch + 1\n        if self.verbose == 1:\n            self.progbar.update(self.seen, list(logs.items()), finalize=False)\n\n    def _finalize_progbar(self, logs):\n        logs = logs or {}\n        if self.target is None:\n            self.target = self.seen\n            self.progbar.target = self.target\n        self.progbar.update(self.target, list(logs.items()), finalize=True)", "docstring": "Callback that prints metrics to stdout.\n\nArgs:\ncount_mode: One of `\"steps\"` or `\"samples\"`.\nWhether the progress bar should\ncount samples seen or steps (batches) seen.\n\nRaises:\nValueError: In case of invalid `count_mode`.", "source": "github-repos"}
{"code": "def from_json(cls, json):\n    \n    return cls(\n        namespace_range.NamespaceRange.from_json_object(\n            json[cls.NAMESPACE_RANGE_PARAM]),\n        json[cls.BATCH_SIZE_PARAM])", "docstring": "Create new DatastoreInputReader from the json, encoded by to_json.\n\nArgs:\njson: json map representation of DatastoreInputReader.\n\nReturns:\nan instance of DatastoreInputReader with all data deserialized from json.", "source": "juraj-google-style"}
{"code": "def next_event(self, event_id, prev=False):\n        \n        i = self.events.index(self._events_dict[event_id])\n        if prev and i > 0:\n            return self.events[i - 1]\n        elif not prev and i + 1 < len(self.events):\n            return self.events[i + 1]\n        else:\n            return None", "docstring": "Get the event following another event in this conversation.\n\nArgs:\nevent_id (str): ID of the event.\nprev (bool): If ``True``, return the previous event rather than the\nnext event. Defaults to ``False``.\n\nRaises:\nKeyError: If no such :class:`.ConversationEvent` is known.\n\nReturns:\n:class:`.ConversationEvent` or ``None`` if there is no following\nevent.", "source": "juraj-google-style"}
{"code": "def generate_nodes(tpm, cm, network_state, indices, node_labels=None):\n    \n    if node_labels is None:\n        node_labels = NodeLabels(None, indices)\n\n    node_state = utils.state_of(indices, network_state)\n\n    return tuple(Node(tpm, cm, index, state, node_labels)\n                 for index, state in zip(indices, node_state))", "docstring": "Generate |Node| objects for a subsystem.\n\nArgs:\ntpm (np.ndarray): The system's TPM\ncm (np.ndarray): The corresponding CM.\nnetwork_state (tuple): The state of the network.\nindices (tuple[int]): Indices to generate nodes for.\n\nKeyword Args:\nnode_labels (|NodeLabels|): Textual labels for each node.\n\nReturns:\ntuple[Node]: The nodes of the system.", "source": "juraj-google-style"}
{"code": "def extract_storm_objects(label_grid, data, x_grid, y_grid, times, dx=1, dt=1, obj_buffer=0):\n    storm_objects = []\n    if (len(label_grid.shape) == 3):\n        ij_grid = np.indices(label_grid.shape[1:])\n        for (t, time) in enumerate(times):\n            storm_objects.append([])\n            object_slices = list(find_objects(label_grid[t], label_grid[t].max()))\n            if (len(object_slices) > 0):\n                for (o, obj_slice) in enumerate(object_slices):\n                    if (obj_buffer > 0):\n                        obj_slice_buff = [slice(np.maximum(0, (osl.start - obj_buffer)), np.minimum((osl.stop + obj_buffer), label_grid.shape[(l + 1)])) for (l, osl) in enumerate(obj_slice)]\n                    else:\n                        obj_slice_buff = obj_slice\n                    storm_objects[(- 1)].append(STObject(data[t][obj_slice_buff], np.where((label_grid[t][obj_slice_buff] == (o + 1)), 1, 0), x_grid[obj_slice_buff], y_grid[obj_slice_buff], ij_grid[0][obj_slice_buff], ij_grid[1][obj_slice_buff], time, time, dx=dx, step=dt))\n                    if (t > 0):\n                        dims = storm_objects[(- 1)][(- 1)].timesteps[0].shape\n                        storm_objects[(- 1)][(- 1)].estimate_motion(time, data[(t - 1)], dims[1], dims[0])\n    else:\n        ij_grid = np.indices(label_grid.shape)\n        storm_objects.append([])\n        object_slices = list(find_objects(label_grid, label_grid.max()))\n        if (len(object_slices) > 0):\n            for (o, obj_slice) in enumerate(object_slices):\n                if (obj_buffer > 0):\n                    obj_slice_buff = [slice(np.maximum(0, (osl.start - obj_buffer)), np.minimum((osl.stop + obj_buffer), label_grid.shape[(l + 1)])) for (l, osl) in enumerate(obj_slice)]\n                else:\n                    obj_slice_buff = obj_slice\n                storm_objects[(- 1)].append(STObject(data[obj_slice_buff], np.where((label_grid[obj_slice_buff] == (o + 1)), 1, 0), x_grid[obj_slice_buff], y_grid[obj_slice_buff], ij_grid[0][obj_slice_buff], ij_grid[1][obj_slice_buff], times, times, dx=dx, step=dt))\n    return storm_objects", "docstring": "After storms are labeled, this method extracts the storm objects from the grid and places them into STObjects.\nThe STObjects contain intensity, location, and shape information about each storm at each timestep.\n\nArgs:\nlabel_grid: 2D or 3D array output by label_storm_objects.\ndata: 2D or 3D array used as input to label_storm_objects.\nx_grid: 2D array of x-coordinate data, preferably on a uniform spatial grid with units of length.\ny_grid: 2D array of y-coordinate data.\ntimes: List or array of time values, preferably as integers\ndx: grid spacing in same units as x_grid and y_grid.\ndt: period elapsed between times\nobj_buffer: number of extra pixels beyond bounding box of object to store in each STObject\n\nReturns:\nstorm_objects: list of lists containing STObjects identified at each time.", "source": "codesearchnet"}
{"code": "def _from_safe_path_param_name(safe_parameter):\n    \n    assert safe_parameter.startswith('_')\n    safe_parameter_as_base32 = safe_parameter[1:]\n\n    padding_length = - len(safe_parameter_as_base32) % 8\n    padding = '=' * padding_length\n    return base64.b32decode(safe_parameter_as_base32 + padding)", "docstring": "Takes a safe regex group name and converts it back to the original value.\n\nOnly alphanumeric characters and underscore are allowed in variable name\ntokens, and numeric are not allowed as the first character.\n\nThe safe_parameter is a base32 representation of the actual value.\n\nArgs:\nsafe_parameter: A string that was generated by _to_safe_path_param_name.\n\nReturns:\nA string, the parameter matched from the URL template.", "source": "juraj-google-style"}
{"code": "def _get_time(header, keys, name):\n        \n        for key in keys:\n            try:\n                date_value = header.pop(key)\n            except KeyError:\n                continue\n            try:\n                \n                return to_timestamp(parse(date_value))\n            except TypeError:\n                \n                return float(date_value)\n        raise UnsupportedOperation(name)", "docstring": "Get time from header\n\nArgs:\nheader (dict): Object header.\nkeys (tuple of str): Header keys.\nname (str): Method name.\n\nReturns:\nfloat: The number of seconds since the epoch", "source": "juraj-google-style"}
{"code": "def get_controller_info_records(self):\n    info_records = []\n    for controller_module_name in self._controller_objects.keys():\n        with expects.expect_no_raises('Failed to collect controller info from %s' % controller_module_name):\n            record = self._create_controller_info_record(controller_module_name)\n            if record:\n                info_records.append(record)\n    return info_records", "docstring": "Get the info records for all the controller objects in the manager.\n\nNew info records for each controller object are created for every call\nso the latest info is included.\n\nReturns:\nList of records.ControllerInfoRecord objects. Each opject conatins\nthe info of a type of controller", "source": "github-repos"}
{"code": "def generate_identifier(sender, instance, **kwargs):\n    identifier = Concept.create_identifier(instance.query)\n    qs = Concept.objects.filter(identifier=identifier, lang=instance.lang)\n    if instance.pk:\n        qs = qs.exclude(pk=instance.pk)\n    if (qs.count() > 0):\n        raise ValueError('Concept identifier conflict')\n    instance.identifier = identifier", "docstring": "Generate and set identifier of concept before saving object to DB\n\nArgs:\nsender (class): should be Concept\ninstance (Concept): saving concept", "source": "codesearchnet"}
{"code": "def get_ss_class(pdb_file, dssp_file, chain):\n    \n    prag = pr.parsePDB(pdb_file)\n    pr.parseDSSP(dssp_file, prag)\n    alpha, threeTen, beta = get_dssp_ss_content_multiplechains(prag, chain)\n\n    if alpha == 0 and beta > 0:\n        classification = 'all-beta'\n    elif beta == 0 and alpha > 0:\n        classification = 'all-alpha'\n    elif beta == 0 and alpha == 0:\n        classification = 'mixed'\n    elif float(alpha) / beta >= 20:\n        classification = 'all-alpha'\n    else:\n        classification = 'mixed'\n\n    return classification", "docstring": "Define the secondary structure class of a PDB file at the specific chain\n\nArgs:\npdb_file:\ndssp_file:\nchain:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    cls_len = int(getattr(self, 'cls_token_id', None) is not None)\n    sep_len = int(getattr(self, 'sep_token_id', None) is not None)\n    if token_ids_1 is None:\n        return [0] * (cls_len + len(token_ids_0) + sep_len)\n    return [0] * (cls_len + len(token_ids_0) + sep_len) + [1] * (len(token_ids_1) + sep_len)", "docstring": "Create the token type IDs corresponding to the sequences passed. [What are token type\nIDs?](../glossary#token-type-ids)\n\nShould be overridden in a subclass if the model has a special way of building those.\n\nArgs:\ntoken_ids_0 (`List[int]`): The first tokenized sequence.\ntoken_ids_1 (`List[int]`, *optional*): The second tokenized sequence.\n\nReturns:\n`List[int]`: The token type ids.", "source": "github-repos"}
{"code": "def set_intra_op_parallelism_threads(num_threads):\n    context.context().intra_op_parallelism_threads = num_threads", "docstring": "Set number of threads used within an individual op for parallelism.\n\nCertain operations like matrix multiplication and reductions can utilize\nparallel threads for speed ups. A value of 0 means the system picks an\nappropriate number.\n\nArgs:\nnum_threads: Number of parallel threads", "source": "github-repos"}
{"code": "def _GetCheckpointFilename(save_dir, latest_filename):\n    if latest_filename is None:\n        latest_filename = 'checkpoint'\n    return os.path.join(save_dir, latest_filename)", "docstring": "Returns a filename for storing the CheckpointState.\n\nArgs:\nsave_dir: The directory for saving and restoring checkpoints.\nlatest_filename: Name of the file in 'save_dir' that is used\nto store the CheckpointState.\n\nReturns:\nThe path of the file that contains the CheckpointState proto.", "source": "github-repos"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_stream = utils.BytearrayStream()\n    if (self._unique_identifier is not None):\n        self._unique_identifier.write(local_stream, kmip_version=kmip_version)\n    if (self._key_format_type is not None):\n        self._key_format_type.write(local_stream, kmip_version=kmip_version)\n    if (self._key_compression_type is not None):\n        self._key_compression_type.write(local_stream, kmip_version=kmip_version)\n    if (self._key_wrapping_specification is not None):\n        self._key_wrapping_specification.write(local_stream, kmip_version=kmip_version)\n    self.length = local_stream.length()\n    super(GetRequestPayload, self).write(output_stream, kmip_version=kmip_version)\n    output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the Get request payload to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def BreachDepressions(\n  dem,\n  in_place = False,\n  topology = 'D8'\n):\n  \n  if type(dem) is not rdarray:\n    raise Exception(\"A richdem.rdarray or numpy.ndarray is required!\")\n\n  if topology not in ['D8','D4']:\n    raise Exception(\"Unknown topology!\")\n\n  if not in_place:\n    dem = dem.copy()\n\n  _AddAnalysis(dem, \"BreachDepressions(dem)\")\n\n  demw = dem.wrap()\n\n  if topology=='D8':\n    _richdem.rdBreachDepressionsD8(demw)\n  elif topology=='D4':\n    _richdem.rdBreachDepressionsD4(demw)\n\n  dem.copyFromWrapped(demw)\n\n  if not in_place:\n    return dem", "docstring": "Breaches all depressions in a DEM.\n\nArgs:\ndem     (rdarray): An elevation model\nin_place (bool):   If True, the DEM is modified in place and there is\nno return; otherwise, a new, altered DEM is returned.\ntopology (string): A topology indicator\n\nReturns:\nDEM without depressions.", "source": "juraj-google-style"}
{"code": "def to_bqm(self, model):\n    linear = ((v, float(model.get_py_value(bias))) for (v, bias) in self.linear.items())\n    quadratic = ((u, v, float(model.get_py_value(bias))) for ((u, v), bias) in self.quadratic.items())\n    offset = float(model.get_py_value(self.offset))\n    return dimod.BinaryQuadraticModel(linear, quadratic, offset, dimod.SPIN)", "docstring": "Given a pysmt model, return a bqm.\n\nAdds the values of the biases as determined by the SMT solver to a bqm.\n\nArgs:\nmodel: A pysmt model.\n\nReturns:\n:obj:`dimod.BinaryQuadraticModel`", "source": "codesearchnet"}
{"code": "def _convert_variables_to_tensors(self):\n    components = self._type_spec._to_components(self)\n    tensor_components = variable_utils.convert_variables_to_tensors(components)\n    return self._type_spec._from_components(tensor_components)", "docstring": "Recursively converts ResourceVariables in the LinearOperator to Tensors.\n\nThe usage of `self._type_spec._from_components` violates the contract of\n`CompositeTensor`, since it is called on a different nested structure\n(one containing only `Tensor`s) than `self.type_spec` specifies (one that\nmay contain `ResourceVariable`s). Since `LinearOperator`'s\n`_from_components` method just passes the contents of the nested structure\nto `__init__` to rebuild the operator, and any `LinearOperator` that may be\ninstantiated with `ResourceVariables` may also be instantiated with\n`Tensor`s, this usage is valid.\n\nReturns:\ntensor_operator: `self` with all internal Variables converted to Tensors.", "source": "github-repos"}
{"code": "def _FormatDescription(self, event):\n    date_time_string = timelib.Timestamp.CopyToIsoFormat(event.timestamp, timezone=self._output_mediator.timezone)\n    timestamp_description = (event.timestamp_desc or 'UNKNOWN')\n    (message, _) = self._output_mediator.GetFormattedMessages(event)\n    if (message is None):\n        data_type = getattr(event, 'data_type', 'UNKNOWN')\n        raise errors.NoFormatterFound('Unable to find event formatter for: {0:s}.'.format(data_type))\n    description = '{0:s}; {1:s}; {2:s}'.format(date_time_string, timestamp_description, message.replace(self._DESCRIPTION_FIELD_DELIMITER, ' '))\n    return self._SanitizeField(description)", "docstring": "Formats the description.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: formatted description field.", "source": "codesearchnet"}
{"code": "def volumes(self):\n    return [EBSVolume(res) for res in db.Resource.join(ResourceProperty, (Resource.resource_id == ResourceProperty.resource_id)).filter((Resource.resource_type_id == ResourceType.get('aws_ebs_volume').resource_type_id), (ResourceProperty.name == 'attachments'), func.JSON_CONTAINS(ResourceProperty.value, func.JSON_QUOTE(self.id))).all()]", "docstring": "Returns a list of the volumes attached to the instance\n\nReturns:\n`list` of `EBSVolume`", "source": "codesearchnet"}
{"code": "def connect(portname, baudrate):\n    \n    global SERPORT\n    try:\n        SERPORT = serial.Serial(portname, baudrate, timeout = 0.1)\n\n    except:\n        raise HerkulexError(\"could not open the serial port\")", "docstring": "Connect to the Herkulex bus\n\nConnect to serial port to which Herkulex Servos are attatched\n\nArgs:\nportname (str): The serial port name\nbaudrate (int): The serial port baudrate\nRaises:\nSerialException: Error occured while opening serial port", "source": "juraj-google-style"}
{"code": "def download(s3_conn, out_filename, s3_path):\n    (bucket_name, prefix) = split_s3_path(s3_path)\n    bucket = s3_conn.get_bucket(bucket_name)\n    key = boto.s3.key.Key(bucket=bucket, name=prefix)\n    logging.info('loading from %s into %s', key, out_filename)\n    key.get_contents_to_filename(out_filename, cb=log_download_progress)", "docstring": "Downloads the given s3_path\n\nArgs:\ns3_conn (boto.s3.connection) a boto s3 connection\nout_filename (str) local filename to save the file\ns3_path (str) the source path on s3", "source": "codesearchnet"}
{"code": "def GetTSKVsPartByPathSpec(tsk_volume, path_spec):\n    location = getattr(path_spec, 'location', None)\n    part_index = getattr(path_spec, 'part_index', None)\n    start_offset = getattr(path_spec, 'start_offset', None)\n    partition_index = None\n    if (part_index is None):\n        if (location is not None):\n            if location.startswith('/p'):\n                try:\n                    partition_index = (int(location[2:], 10) - 1)\n                except ValueError:\n                    pass\n            if ((partition_index is None) or (partition_index < 0)):\n                location = None\n        if ((location is None) and (start_offset is None)):\n            return (None, None)\n    bytes_per_sector = TSKVolumeGetBytesPerSector(tsk_volume)\n    current_part_index = 0\n    current_partition_index = 0\n    tsk_vs_part = None\n    tsk_vs_part_list = list(tsk_volume)\n    number_of_tsk_vs_parts = len(tsk_vs_part_list)\n    if (number_of_tsk_vs_parts > 0):\n        if ((part_index is not None) and ((part_index < 0) or (part_index >= number_of_tsk_vs_parts))):\n            return (None, None)\n        for tsk_vs_part in tsk_vs_part_list:\n            if TSKVsPartIsAllocated(tsk_vs_part):\n                if (partition_index is not None):\n                    if (partition_index == current_partition_index):\n                        break\n                current_partition_index += 1\n            if ((part_index is not None) and (part_index == current_part_index)):\n                break\n            if (start_offset is not None):\n                start_sector = TSKVsPartGetStartSector(tsk_vs_part)\n                if (start_sector is not None):\n                    start_sector *= bytes_per_sector\n                    if (start_sector == start_offset):\n                        break\n            current_part_index += 1\n    if ((tsk_vs_part is None) or (current_part_index >= number_of_tsk_vs_parts)):\n        return (None, None)\n    if (not TSKVsPartIsAllocated(tsk_vs_part)):\n        current_partition_index = None\n    return (tsk_vs_part, current_partition_index)", "docstring": "Retrieves the TSK volume system part object from the TSK volume object.\n\nArgs:\ntsk_volume (pytsk3.Volume_Info): TSK volume information.\npath_spec (PathSpec): path specification.\n\nReturns:\ntuple: contains:\n\npytsk3.TSK_VS_PART_INFO: TSK volume system part information or\nNone on error.\nint: partition index or None if not available.", "source": "codesearchnet"}
{"code": "def mash_dist_trusted(fasta_path):\n    args = [MASH_BIN, 'dist', MASH_SKETCH_FILE, fasta_path]\n    p = Popen(args, stderr=PIPE, stdout=PIPE)\n    (stdout, stderr) = p.communicate()\n    retcode = p.returncode\n    if (retcode != 0):\n        raise Exception('Could not run Mash dist {}'.format(stderr))\n    return stdout", "docstring": "Compute Mash distances of sketch file of genome fasta to RefSeq sketch DB.\n\nArgs:\nmash_bin (str): Mash binary path\n\nReturns:\n(str): Mash STDOUT string", "source": "codesearchnet"}
{"code": "def _init_header(self, string):\n        \n\n        taf_header_pattern = \n\n        metar_header_pattern = \n\n        header_taf = re.match(taf_header_pattern, string, re.VERBOSE)\n        header_metar = re.match(metar_header_pattern, string, re.VERBOSE)\n\n        \n        \n        \n        if header_taf:\n            header_dict = header_taf.groupdict()\n            header_dict['form'] = 'taf'\n        elif header_metar:\n            header_dict = header_metar.groupdict()\n            header_dict['form'] = 'metar'\n        else:\n            raise MalformedTAF(\"No valid TAF/METAR header found\")\n\n        return header_dict", "docstring": "Extracts header part from TAF/METAR string and populates header dict\n\nArgs:\nTAF/METAR report string\n\nRaises:\nMalformedTAF: An error parsing the report\n\nReturns:\nHeader dictionary", "source": "juraj-google-style"}
{"code": "def get_percentage_bond_dist_changes(self, max_radius=3.0):\n    data = collections.defaultdict(dict)\n    for inds in itertools.combinations(list(range(len(self.initial))), 2):\n        (i, j) = sorted(inds)\n        initial_dist = self.initial[i].distance(self.initial[j])\n        if (initial_dist < max_radius):\n            final_dist = self.final[i].distance(self.final[j])\n            data[i][j] = ((final_dist / initial_dist) - 1)\n    return data", "docstring": "Returns the percentage bond distance changes for each site up to a\nmaximum radius for nearest neighbors.\n\nArgs:\nmax_radius (float): Maximum radius to search for nearest\nneighbors. This radius is applied to the initial structure,\nnot the final structure.\n\nReturns:\nBond distance changes as a dict of dicts. E.g.,\n{index1: {index2: 0.011, ...}}. For economy of representation, the\nindex1 is always less than index2, i.e., since bonding between\nsite1 and siten is the same as bonding between siten and site1,\nthere is no reason to duplicate the information or computation.", "source": "codesearchnet"}
{"code": "async def update_server_data(server):\n    data = datatools.get_data()\n    send_welcome_message = False\n    if (server.id not in data['discord']['servers']):\n        logger.debug('Adding new server to serverdata')\n        data['discord']['servers'][server.id] = {'prefix': '!'}\n        if (('mute_intro' not in data) or (not data['mute_intro'])):\n            send_welcome_message = True\n    _dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n    _dir_modules = '{}/../'.format(_dir)\n    for module_name in os.listdir(_dir_modules):\n        if (module_name.startswith('_') or module_name.startswith('!')):\n            continue\n        if (not os.path.isfile('{}/{}/_data.py'.format(_dir_modules, module_name))):\n            logger.warning('No _data.py file found for module {}'.format(module_name))\n            continue\n        try:\n            import_name = '.discord_modis.modules.{}.{}'.format(module_name, '_data')\n            _data = importlib.import_module(import_name, 'modis')\n            if (_data.modulename not in data['discord']['servers'][server.id]):\n                data['discord']['servers'][server.id][_data.modulename] = _data.sd_structure\n                datatools.write_data(data)\n        except Exception as e:\n            logger.error('Could not initialise module {}'.format(module_name))\n            logger.exception(e)\n    datatools.write_data(data)\n    if send_welcome_message:\n        default_channel = server.default_channel\n        if (not default_channel):\n            for channel in server.channels:\n                if (channel.name == 'general'):\n                    default_channel = channel\n                    break\n        if (not default_channel):\n            for channel in server.channels:\n                if ('general' in channel.name):\n                    default_channel = channel\n                    break\n        if (not default_channel):\n            for channel in server.channels:\n                if (channel.type == discord.ChannelType.text):\n                    default_channel = channel\n                    break\n        if default_channel:\n            hello_message = (((\"Hello! I'm Modis.\\n\\n\" + 'The prefix is currently `!`, and can be changed at any time using `!prefix`\\n\\n') + 'You can use `!help` to get help commands for all modules, ') + 'or {} me to get the server prefix and help commands.'.format(server.me.mention))\n            (await client.send_message(default_channel, hello_message))", "docstring": "Updates the server info for the given server\n\nArgs:\nserver: The Discord server to update info for", "source": "codesearchnet"}
{"code": "def verify_signature(message, signature, certs):\n    if isinstance(certs, (six.text_type, six.binary_type)):\n        certs = [certs]\n    for cert in certs:\n        verifier = rsa.RSAVerifier.from_string(cert)\n        if verifier.verify(message, signature):\n            return True\n    return False", "docstring": "Verify an RSA cryptographic signature.\n\nChecks that the provided ``signature`` was generated from ``bytes`` using\nthe private key associated with the ``cert``.\n\nArgs:\nmessage (Union[str, bytes]): The plaintext message.\nsignature (Union[str, bytes]): The cryptographic signature to check.\ncerts (Union[Sequence, str, bytes]): The certificate or certificates\nto use to check the signature.\n\nReturns:\nbool: True if the signature is valid, otherwise False.", "source": "codesearchnet"}
{"code": "def PrepareMergeTaskStorage(self, task):\n    \n    if self._storage_type != definitions.STORAGE_TYPE_SESSION:\n      raise IOError('Unsupported storage type.')\n\n    merge_storage_file_path = self._GetMergeTaskStorageFilePath(task)\n    processed_storage_file_path = self._GetProcessedStorageFilePath(task)\n\n    task.storage_file_size = os.path.getsize(processed_storage_file_path)\n\n    try:\n      os.rename(processed_storage_file_path, merge_storage_file_path)\n    except OSError as exception:\n      raise IOError((\n          'Unable to rename task storage file: {0:s} with error: '\n          '{1!s}').format(processed_storage_file_path, exception))", "docstring": "Prepares a task storage for merging.\n\nMoves the task storage file from the processed directory to the merge\ndirectory.\n\nArgs:\ntask (Task): task.\n\nRaises:\nIOError: if the storage type is not supported or\nif the storage file cannot be renamed.\nOSError: if the storage type is not supported or\nif the storage file cannot be renamed.", "source": "juraj-google-style"}
{"code": "def delete(self, filename):\n        \n        for repo in self._children:\n            if hasattr(repo, \"delete\"):\n                repo.delete(filename)", "docstring": "Delete a file from all repositories which support it.\n\nIndividual repositories will determine correct location to\ndelete from (Scripts vs. Packages).\n\nThis will not remove the corresponding Package or Script object\nfrom the JSS's database!\n\nArgs:\nfilename: The filename you wish to delete (do not include a\npath).", "source": "juraj-google-style"}
{"code": "def __get_unused_context(self, parse_result, context):\n        \n        tags_keys = set([t['key'] for t in parse_result['tags'] if t['from_context']])\n        result_context = [c for c in context if c['key'] not in tags_keys]\n        return result_context", "docstring": "Used to get unused context from context.  Any keys not in\nparse_result\n\nArgs:\nparse_results(list): parsed results used to identify what keys\nin the context are used.\ncontext(list): this is the context used to match with parsed results\nkeys missing in the parsed results are the unused context\n\nReturns:\nlist: A list of the unused context results.", "source": "juraj-google-style"}
{"code": "def decode(cls, command_str):\n    (name, _, arg) = command_str.partition(' ')\n    args = []\n    if (len(arg) > 0):\n        if ((arg[0] != '{') or (arg[(- 1)] != '}')):\n            raise DataError('Invalid command, argument is not contained in { and }', arg=arg, cmd=name)\n        arg = arg[1:(- 1)]\n        args = arg.split(',')\n    proc = []\n    for arg in args:\n        if arg.startswith('hex:'):\n            arg = unhexlify(arg[4:]).decode('utf-8')\n        proc.append(arg)\n    return Command(name, proc)", "docstring": "Decode a string encoded command back into a Command object.\n\nArgs:\ncommand_str (str): The encoded command string output from a\nprevious call to encode.\n\nReturns:\nCommand: The decoded Command object.", "source": "codesearchnet"}
{"code": "def _add_message_field(self, field_name, value, params):\n    if ('.' not in field_name):\n        params[field_name] = value\n        return\n    (root, remaining) = field_name.split('.', 1)\n    sub_params = params.setdefault(root, {})\n    self._add_message_field(remaining, value, sub_params)", "docstring": "Converts a . delimitied field name to a message field in parameters.\n\nThis adds the field to the params dict, broken out so that message\nparameters appear as sub-dicts within the outer param.\n\nFor example:\n{'a.b.c': ['foo']}\nbecomes:\n{'a': {'b': {'c': ['foo']}}}\n\nArgs:\nfield_name: A string containing the '.' delimitied name to be converted\ninto a dictionary.\nvalue: The value to be set.\nparams: The dictionary holding all the parameters, where the value is\neventually set.", "source": "codesearchnet"}
{"code": "def gen_conversion_log_html(conversion_log_dir, quantization_enabled, tflite_graph_path):\n    template_filename = _resource_loader.get_path_to_datafile('template.html')\n    if not os.path.exists(template_filename):\n        raise IOError(\"Failed to generate HTML: file '{0}' doesn't exist.\".format(template_filename))\n    toco_log_before_path = os.path.join(conversion_log_dir, 'toco_log_before.pb')\n    toco_log_after_path = os.path.join(conversion_log_dir, 'toco_log_after.pb')\n    dot_before_path = os.path.join(conversion_log_dir, 'toco_tf_graph.dot')\n    dot_after_path = os.path.join(conversion_log_dir, 'toco_tflite_graph.dot')\n    if not os.path.exists(toco_log_before_path):\n        raise IOError(\"Failed to generate HTML: file '{0}' doesn't exist.\".format(toco_log_before_path))\n    if not os.path.exists(toco_log_after_path):\n        raise IOError(\"Failed to generate HTML: file '{0}' doesn't exist.\".format(toco_log_after_path))\n    if not os.path.exists(dot_before_path):\n        raise IOError(\"Failed to generate HTML: file '{0}' doesn't exist.\".format(dot_before_path))\n    if not os.path.exists(dot_after_path):\n        raise IOError(\"Failed to generate HTML: file '{0}' doesn't exist.\".format(dot_after_path))\n    html_generator = HTMLGenerator(template_filename, os.path.join(conversion_log_dir, 'toco_conversion_summary.html'))\n    toco_conversion_log_before = _toco_conversion_log_pb2.TocoConversionLog()\n    toco_conversion_log_after = _toco_conversion_log_pb2.TocoConversionLog()\n    with open(toco_log_before_path, 'rb') as f:\n        toco_conversion_log_before.ParseFromString(f.read())\n    with open(toco_log_after_path, 'rb') as f:\n        toco_conversion_log_after.ParseFromString(f.read())\n    with io.open(dot_before_path, 'r', encoding='utf-8') as f:\n        dot_before = f.read().rstrip()\n    with io.open(dot_after_path, 'r', encoding='utf-8') as f:\n        dot_after = f.read().rstrip()\n    html_generator.generate(toco_conversion_log_before, toco_conversion_log_after, quantization_enabled, dot_before, dot_after, toco_conversion_log_after.toco_err_logs, tflite_graph_path)", "docstring": "Generates an HTML report about the conversion process.\n\nArgs:\nconversion_log_dir: A string specifying the file directory of the conversion\nlogs. It's required that before calling this function, the\n`conversion_log_dir`\nalready contains the following files: `toco_log_before.pb`,\n`toco_log_after.pb`, `toco_tf_graph.dot`,\n`toco_tflite_graph.dot`.\nquantization_enabled: A boolean, passed from the tflite converter to\nindicate whether post-training quantization is enabled during conversion.\ntflite_graph_path: A string, the filepath to the converted TFLite model.\n\nRaises:\nIOError: When any of the required files doesn't exist.", "source": "github-repos"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    \n    values_dict = {}\n    for registry_value in registry_key.GetValues():\n      \n      if not registry_value.name or not self._RE_VALUE_NAME.search(\n          registry_value.name):\n        continue\n\n      \n      if not registry_value.data or not registry_value.DataIsString():\n        continue\n\n      value_string = registry_value.GetDataAsObject()\n      values = self._RE_VALUE_DATA.findall(value_string)\n\n      \n      if len(values) != 1 or len(values[0]) != 2:\n        continue\n\n      try:\n        timestamp = int(values[0][0], 16)\n      except ValueError:\n        parser_mediator.ProduceExtractionWarning((\n            'unable to convert filetime string to an integer for '\n            'value: {0:s}.').format(registry_value.name))\n        continue\n\n      event_data = OfficeMRUWindowsRegistryEventData()\n      event_data.key_path = registry_key.path\n      event_data.offset = registry_value.offset\n      \n      event_data.value_string = value_string\n\n      values_dict[registry_value.name] = value_string\n\n      if not timestamp:\n        date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n      else:\n        date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n\n      \n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_WRITTEN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    event_data = windows_events.WindowsRegistryEventData()\n    event_data.key_path = registry_key.path\n    event_data.offset = registry_key.offset\n    event_data.regvalue = values_dict\n    event_data.source_append = self._SOURCE_APPEND\n\n    event = time_events.DateTimeValuesEvent(\n        registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    encoder_hidden_states = encoder_outputs[0]\n    if encoder_attention_mask is None:\n        batch_size, sequence_length = encoder_hidden_states.shape[:2]\n        encoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    batch_size, sequence_length = decoder_input_ids.shape\n    if decoder_attention_mask is None:\n        decoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n    inputs = {'params': params or self.params}\n    if past_key_values:\n        inputs['cache'] = past_key_values\n        mutable = ['cache']\n    else:\n        mutable = False\n\n    def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs):\n        decoder_module = module._get_decoder_module()\n        return decoder_module(decoder_input_ids, decoder_attention_mask, **kwargs)\n    outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)\n    if past_key_values is not None and return_dict:\n        outputs, past = outputs\n        outputs['past_key_values'] = unfreeze(past['cache'])\n        return outputs\n    elif past_key_values is not None and (not return_dict):\n        outputs, past = outputs\n        outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration\n>>> import jax.numpy as jnp\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google-t5/t5-base\")\n>>> model = FlaxLongT5ForConditionalGeneration.from_pretrained(\"google/long-t5-local-base\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, return_tensors=\"np\")\n>>> encoder_outputs = model.encode(**inputs)\n\n>>> decoder_start_token_id = model.config.decoder_start_token_id\n>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype=\"i4\") * decoder_start_token_id\n\n>>> outputs = model.decode(decoder_input_ids, encoder_outputs)\n>>> logits = outputs.logits\n```", "source": "github-repos"}
{"code": "def _GenOpenApiSpec(service_class_names, output_path, hostname=None, application_path=None, x_google_api_name=False):\n    output_files = []\n    service_configs = GenApiConfig(service_class_names, hostname=hostname, config_string_generator=openapi_generator.OpenApiGenerator(), application_path=application_path, x_google_api_name=x_google_api_name)\n    for (api_name_version, config) in service_configs.iteritems():\n        openapi_name = (api_name_version.replace('-', '') + 'openapi.json')\n        output_files.append(_WriteFile(output_path, openapi_name, config))\n    return output_files", "docstring": "Write openapi documents generated from the service classes to file.\n\nArgs:\nservice_class_names: A list of fully qualified ProtoRPC service names.\noutput_path: The directory to which to output the OpenAPI specs.\nhostname: A string hostname which will be used as the default version\nhostname. If no hostname is specified in the @endpoints.api decorator,\nthis value is the fallback. Defaults to None.\napplication_path: A string containing the path to the AppEngine app.\n\nReturns:\nA list of OpenAPI spec filenames.", "source": "codesearchnet"}
{"code": "def Getattr(self, path, fh=None):\n    del fh\n    if (not path):\n        raise fuse.FuseOSError(errno.ENOENT)\n    if (path != self.root):\n        full_path = self.root.Add(path)\n    else:\n        full_path = path\n    fd = aff4.FACTORY.Open(full_path, token=self.token)\n    if (full_path == '/'):\n        return self.MakePartialStat(fd)\n    fd = aff4.FACTORY.Open(full_path, token=self.token)\n    aff4_stat = fd.Get(fd.Schema.STAT)\n    if aff4_stat:\n        return aff4_stat.AsDict()\n    elif (fd.Get(fd.Schema.LAST) is None):\n        raise fuse.FuseOSError(errno.ENOENT)\n    else:\n        pass\n    return self.MakePartialStat(fd)", "docstring": "Performs a stat on a file or directory.\n\nArgs:\npath: The path to stat.\nfh: A file handler. Not used.\n\nReturns:\nA dictionary mapping st_ names to their values.\n\nRaises:\nFuseOSError: When a path is supplied that grr doesn't know about, ie an\ninvalid file path.\nValueError: If an empty path is passed. (The empty string, when passed to\nself.root.Add, returns a path for aff4:/, the root directory, which is not\nthe behaviour we want.)", "source": "codesearchnet"}
{"code": "def get_video_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Union[int, List[int]], vision_feature_select_strategy: str):\n    batch_size, frames, channels, height, width = pixel_values.shape\n    pixel_values = pixel_values.view(batch_size * frames, channels, height, width)\n    video_features = self.vision_tower(pixel_values, output_hidden_states=True)\n    if isinstance(vision_feature_layer, int):\n        selected_video_feature = video_features.hidden_states[vision_feature_layer]\n    else:\n        hs_pool = [video_features.hidden_states[layer_idx] for layer_idx in vision_feature_layer]\n        selected_video_feature = torch.cat(hs_pool, dim=-1)\n    if vision_feature_select_strategy == 'default':\n        selected_video_feature = selected_video_feature[:, 1:]\n    elif vision_feature_select_strategy == 'full':\n        selected_video_feature = selected_video_feature\n    video_features = self.multi_modal_projector(selected_video_feature)\n    video_features = self.apply_pooling(video_features)\n    video_features = video_features.reshape(batch_size, frames * video_features.shape[1], -1)\n    return video_features", "docstring": "Obtains video last hidden states from the vision tower, apply multimodal projection and pooling.\n\nArgs:\npixel_values (`torch.FloatTensor]` of shape `(batch_size, num_frames, channels, height, width)`)\nThe tensors corresponding to the input video.\nvision_feature_layer (`Union[int, List[int]], *optional*, defaults to -2`):\nThe index of the layer to select the vision feature. If multiple indices are provided,\nthe vision feature of the corresponding indices will be concatenated to form the\nvision features.\nvision_feature_select_strategy (`str`):\nThe feature selection strategy used to select the vision feature from the vision backbone.\nCan be one of `\"default\"` or `\"full\"`\nReturns:\nvideo_features (List[`torch.Tensor`]): List of video feature tensor, each contains all the visual feature of all patches\nand are of shape `(num_videos, video_length, embed_dim)`).", "source": "github-repos"}
{"code": "def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs):\n    vision_data = {}\n    if image_sizes is not None:\n        images_kwargs = Qwen2VLProcessorKwargs._defaults.get('images_kwargs', {})\n        images_kwargs.update(kwargs)\n        merge_size = images_kwargs.get('merge_size', None) or self.image_processor.merge_size\n        num_image_patches = [self.image_processor.get_number_of_image_patches(*image_size, images_kwargs) for image_size in image_sizes]\n        num_image_tokens = [num_patches \n        vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})\n    if video_sizes is not None:\n        videos_kwargs = Qwen2VLProcessorKwargs._defaults.get('videos_kwargs', {})\n        videos_kwargs.update(kwargs)\n        num_video_patches = [self.video_processor.get_number_of_video_patches(*video_size, videos_kwargs) for video_size in video_sizes]\n        num_video_tokens = [num_patches \n        vision_data['num_video_tokens'] = num_video_tokens\n    return MultiModalData(**vision_data)", "docstring": "Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.\nArgs:\nimage_sizes (`List[List[int]]`, *optional*):\nThe input sizes formatted as (height, width) per each image.\nvideo_sizes (`List[List[int]]`, *optional*):\nThe input sizes formatted as (num_frames, height, width) per each video.\nReturns:\n`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided\ninput modalities, along with other useful data.", "source": "github-repos"}
{"code": "def __init__(self, weekend_mask=None, holidays=None):\n    if weekend_mask is not None:\n        weekend_mask = tf.cast(weekend_mask, dtype=tf.bool)\n    if holidays is not None:\n        holidays = dt.convert_to_date_tensor(holidays).ordinal()\n    self._to_biz_space, self._from_biz_space = hol.business_day_mappers(weekend_mask=weekend_mask, holidays=holidays)", "docstring": "Initializer.\n\nArgs:\nweekend_mask: Boolean `Tensor` of 7 elements one for each day of the week\nstarting with Monday at index 0. A `True` value indicates the day is\nconsidered a weekend day and a `False` value implies a week day.\nDefault value: None which means no weekends are applied.\nholidays: Defines the holidays that are added to the weekends defined by\n`weekend_mask`. An instance of `dates.DateTensor` or an object\nconvertible to `DateTensor`.\nDefault value: None which means no holidays other than those implied by\nthe weekends (if any).", "source": "github-repos"}
{"code": "def filter_aliases(alias_table):\n    for alias in alias_table.sections():\n        if alias_table.has_option(alias, 'command'):\n            (yield (alias.split()[0], remove_pos_arg_placeholders(alias_table.get(alias, 'command'))))", "docstring": "Filter aliases that does not have a command field in the configuration file.\n\nArgs:\nalias_table: The alias table.\n\nYield:\nA tuple with [0] being the first word of the alias and\n[1] being the command that the alias points to.", "source": "codesearchnet"}
{"code": "def update(self, forecasts, observations):\n        \n        for t, threshold in enumerate(self.thresholds):\n            tp = np.count_nonzero((forecasts >= threshold) & (observations >= self.obs_threshold))\n            fp = np.count_nonzero((forecasts >= threshold) &\n                                  (observations < self.obs_threshold))\n            fn = np.count_nonzero((forecasts < threshold) &\n                                  (observations >= self.obs_threshold))\n            tn = np.count_nonzero((forecasts < threshold) &\n                                  (observations < self.obs_threshold))\n            self.contingency_tables.iloc[t] += [tp, fp, fn, tn]", "docstring": "Update the ROC curve with a set of forecasts and observations\n\nArgs:\nforecasts: 1D array of forecast values\nobservations: 1D array of observation values.", "source": "juraj-google-style"}
{"code": "def __validate(self, value, validate_element):\n    if (not self.repeated):\n        return validate_element(value)\n    elif isinstance(value, (list, tuple)):\n        result = []\n        for element in value:\n            if (element is None):\n                try:\n                    name = self.name\n                except AttributeError:\n                    raise ValidationError(('Repeated values for %s may not be None' % self.__class__.__name__))\n                else:\n                    raise ValidationError(('Repeated values for field %s may not be None' % name))\n            result.append(validate_element(element))\n        return result\n    elif (value is not None):\n        try:\n            name = self.name\n        except AttributeError:\n            raise ValidationError(('%s is repeated. Found: %s' % (self.__class__.__name__, value)))\n        else:\n            raise ValidationError(('Field %s is repeated. Found: %s' % (name, value)))\n    return value", "docstring": "Internal validation function.\n\nValidate an internal value using a function to validate\nindividual elements.\n\nArgs:\nvalue: Value to validate.\nvalidate_element: Function to use to validate individual elements.\n\nRaises:\nValidationError if value is not expected type.", "source": "codesearchnet"}
{"code": "def extract_xml(input_):\n    if (type(input_) == str):\n        file_object = open(input_, 'rb')\n    elif (type(input_) == bytes):\n        file_object = BytesIO(input_)\n    else:\n        file_object = input_\n    try:\n        header = file_object.read(6)\n        file_object.seek(0)\n        if header.startswith(MAGIC_ZIP):\n            _zip = zipfile.ZipFile(file_object)\n            xml = _zip.open(_zip.namelist()[0]).read().decode()\n        elif header.startswith(MAGIC_GZIP):\n            xml = GzipFile(fileobj=file_object).read().decode()\n        elif header.startswith(MAGIC_XML):\n            xml = file_object.read().decode()\n        else:\n            file_object.close()\n            raise InvalidAggregateReport('Not a valid zip, gzip, or xml file')\n        file_object.close()\n    except UnicodeDecodeError:\n        raise InvalidAggregateReport('File objects must be opened in binary (rb) mode')\n    except Exception as error:\n        raise InvalidAggregateReport('Invalid archive file: {0}'.format(error.__str__()))\n    return xml", "docstring": "Extracts xml from a zip or gzip file at the given path, file-like object,\nor bytes.\n\nArgs:\ninput_: A path to a file, a file like object, or bytes\n\nReturns:\nstr: The extracted XML", "source": "codesearchnet"}
{"code": "def get_realtime_urls(admin_view_func=(lambda x: x)):\n    from .widgets import REALTIME_WIDGETS\n    return [url(w.url_regex, admin_view_func(w.as_view()), name=w.url_name) for w in REALTIME_WIDGETS]", "docstring": "Get the URL for real-time widgets.\n\nArgs:\nadmin_view_func (callable): an admin_view method from an AdminSite\ninstance. By default: identity.\n\nReturns:\nlist: the list of the real-time URLs as django's ``url()``.", "source": "codesearchnet"}
{"code": "def parse_exac_line(line, header):\n    \n    exac_gene = {}\n    splitted_line = line.rstrip().split('\\t')\n    exac_gene = dict(zip(header, splitted_line))\n    exac_gene['hgnc_symbol'] = exac_gene['gene']\n    exac_gene['pli_score'] = float(exac_gene['pLI'])\n    exac_gene['raw'] = line\n    \n    return exac_gene", "docstring": "Parse an exac formated line\n\nArgs:\nline(list): A list with exac gene info\nheader(list): A list with the header info\n\nReturns:\nexac_info(dict): A dictionary with the relevant info", "source": "juraj-google-style"}
{"code": "def sample_id(self, lon):\n    if (self.grid == 'WAC'):\n        sample = np.rint(((float(self.SAMPLE_PROJECTION_OFFSET) + 1.0) + ((((((lon * np.pi) / 180.0) - float(self.CENTER_LONGITUDE)) * self.A_AXIS_RADIUS) * np.cos(((self.CENTER_LATITUDE * np.pi) / 180.0))) / (self.MAP_SCALE * 0.001))))\n    else:\n        sample = (np.rint((float(self.SAMPLE_PROJECTION_OFFSET) + (float(self.MAP_RESOLUTION) * (lon - float(self.CENTER_LONGITUDE))))) + 1)\n    return self._control_sample(sample)", "docstring": "Return the corresponding sample\n\nArgs:\nlon (int): longidute in degree\n\nReturns:\nCorreponding sample", "source": "codesearchnet"}
{"code": "def ContainsNone(self, *values):\n    self._awql = self._CreateMultipleValuesCondition(values, 'CONTAINS_NONE')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"contains none\".\n\nArgs:\n*values: The values to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "codesearchnet"}
{"code": "def add_log_file(path):\n    logfile_handler = RotatingFileHandler(path, maxBytes=50000, backupCount=2)\n    formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(module)s - %(message)s', datefmt='%d-%b-%Y %H:%M:%S')\n    logfile_handler.setFormatter(formatter)\n    geoparse_logger.addHandler(logfile_handler)", "docstring": "Add log file.\n\nArgs:\npath (:obj:`str`): Path to the log file.", "source": "codesearchnet"}
{"code": "def run(self, args):\n        \n        jlink = pylink.JLink()\n\n        if args.test:\n            if jlink.test():\n                print('Self-test succeeded.')\n            else:\n                print('Self-test failed.')\n        elif args.list is None or args.list in ['usb', 'ip']:\n            host = pylink.JLinkHost.USB_OR_IP\n            if args.list == 'usb':\n                host = pylink.JLinkHost.USB\n            elif args.list == 'ip':\n                host = pylink.JLinkHost.IP\n\n            emulators = jlink.connected_emulators(host)\n            for (index, emulator) in enumerate(emulators):\n                if index > 0:\n                    print('')\n\n                print('Product Name: %s' % emulator.acProduct.decode())\n                print('Serial Number: %s' % emulator.SerialNumber)\n\n                usb = bool(emulator.Connection)\n                if not usb:\n                    print('Nickname: %s' % emulator.acNickname.decode())\n                    print('Firmware: %s' % emulator.acFWString.decode())\n\n                print('Connection: %s' % ('USB' if usb else 'IP'))\n\n                if not usb:\n                    print('IP Address: %s' % emulator.aIPAddr)\n        elif args.supported is not None:\n            device = args.supported[0]\n            num_supported_devices = jlink.num_supported_devices()\n            for i in range(num_supported_devices):\n                found_device = jlink.supported_device(i)\n                if device.lower() == found_device.name.lower():\n                    print('Device Name: %s' % device)\n                    print('Core ID: %s' % found_device.CoreId)\n                    print('Flash Address: %s' % found_device.FlashAddr)\n                    print('Flash Size: %s bytes' % found_device.FlashSize)\n                    print('RAM Address: %s' % found_device.RAMAddr)\n                    print('RAM Size: %s bytes' % found_device.RAMSize)\n                    print('Manufacturer: %s' % found_device.manufacturer)\n                    break\n            else:\n                print('%s is not supported :(' % device)\n\n        return None", "docstring": "Runs the emulator command.\n\nArgs:\nself (EmulatorCommand): the ``EmulatorCommand`` instance\nargs (Namespace): arguments to parse\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def replace_gradient_components(self, value, component_grads):\n    raise NotImplementedError(f'{type(self).__name__}.replace_gradient_components()')", "docstring": "Replaces the gradient components in `value` with `component_grads`.\n\nArgs:\nvalue: A value with its gradient components compatible with\n`component_grads`.\ncomponent_grads: A nested structure of `Tensor` or `IndexedSlices` or\n`None` (for unconnected gradients).\n\nReturns:\nA copy of `value`, where the components that should be included in\ngradients have been replaced by `component_grads`; or `None` (if\n`component_grads` includes `None`).", "source": "github-repos"}
{"code": "def assert_pipeline_equal(test_case, expected_pipeline, actual_pipeline):\n    expected_pipeline_proto = expected_pipeline.to_runner_api(use_fake_coders=True)\n    actual_pipeline_proto = actual_pipeline.to_runner_api(use_fake_coders=True)\n    assert_pipeline_proto_equal(test_case, expected_pipeline_proto, actual_pipeline_proto)", "docstring": "Asserts the equivalence between two given apache_beam.Pipeline instances.\n\nArgs:\ntest_case: (unittest.TestCase) the unittest testcase where it asserts.\nexpected_pipeline: (Pipeline) the pipeline instance expected.\nactual_pipeline: (Pipeline) the actual pipeline instance to be asserted.", "source": "github-repos"}
{"code": "def to_json(self, is_admin=False):\n        \n        if is_admin:\n            return {\n                'accountId': self.account_id,\n                'accountName': self.account_name,\n                'accountType': self.account_type,\n                'contacts': self.contacts,\n                'enabled': True if self.enabled == 1 else False,\n                'requiredRoles': self.required_roles,\n                'properties': {to_camelcase(prop.name): prop.value for prop in self.account.properties}\n            }\n        else:\n            return {\n                'accountId': self.account_id,\n                'accountName': self.account_name,\n                'contacts': self.contacts\n            }", "docstring": "Returns a dict representation of the object\n\nArgs:\nis_admin (`bool`): If true, include information about the account that should be avaiable only to admins\n\nReturns:\n`dict`", "source": "juraj-google-style"}
{"code": "def subscribe(self, callback, filter_):\n        \n        sub_id = \"subscriber_{uuid}\".format(uuid=uuid.uuid4())\n        \n        sub = pd.DataFrame({sub_id: filter_}).T\n        sub['callback'] = callback\n        self.subscribers = self.subscribers.append(sub)\n\n        \n        this_subscriber_metrics = self.__filter(self.metrics_meta, filter_)\n        if this_subscriber_metrics.empty:\n            logger.debug('Metrics for subscriber %s not found', sub_id)\n        else:\n            logger.debug('Found metrics for this subscriber, subscribing...: %s', this_subscriber_metrics)\n            \n            this_subscriber_metrics['callback'] = callback\n            prepared_callbacks = this_subscriber_metrics[['callback']]\n            \n            self.callbacks = self.callbacks.append(prepared_callbacks)", "docstring": "Create and register metric subscriber,\nfind metrics for this subscriber (using filter_) and subscribe\n\nArgs:\ncallback (object method): subscriber's callback\nfilter_ (dict): filter dict\n\nfilter sample:\n{'type': 'metrics', 'source': 'gun'}", "source": "juraj-google-style"}
{"code": "def zip_ll_row(params, data_row):\n    l = params[0]\n    pi = params[1]\n    d0 = (data_row == 0)\n    likelihood = ((d0 * pi) + ((1 - pi) * poisson.pmf(data_row, l)))\n    return (- np.log((likelihood + eps)).sum())", "docstring": "Returns the negative log-likelihood of a row given ZIP data.\n\nArgs:\nparams (list): [lambda zero-inf]\ndata_row (array): 1d array\n\nReturns:\nnegative log-likelihood", "source": "codesearchnet"}
{"code": "def _multi_get(self, cache_api_name, fmt_url_path, url_params, query_params=None):\n    all_responses = {}\n    if self._cache:\n        all_responses = self._cache.bulk_lookup(cache_api_name, url_params)\n        url_params = [key for key in url_params if (key not in all_responses.keys())]\n    if len(url_params):\n        urls = self._to_urls(fmt_url_path, url_params)\n        responses = self._requests.multi_get(urls, query_params)\n        for (url_param, response) in zip(url_params, responses):\n            if self._cache:\n                self._cache.cache_value(cache_api_name, url_param, response)\n            all_responses[url_param] = response\n    return all_responses", "docstring": "Makes multiple GETs to an OpenDNS endpoint.\n\nArgs:\ncache_api_name: string api_name for caching\nfmt_url_path: format string for building URL paths\nurl_params: An enumerable of strings used in building URLs\nquery_params - None / dict / list of dicts containing query params\nReturns:\nA dict of {url_param: api_result}", "source": "codesearchnet"}
{"code": "def retrieve_products(self, reviewer):\n    if (not isinstance(reviewer, self._reviewer_cls)):\n        raise TypeError(\"Type of given reviewer isn't acceptable:\", reviewer, ', expected:', self._reviewer_cls)\n    return list(self.graph.successors(reviewer))", "docstring": "Retrieve products reviewed by a given reviewer.\n\nArgs:\nreviewer: A reviewer.\n\nReturns:\nA list of products which the reviewer reviews.\n\nRaises:\nTypeError: when given reviewer isn't instance of specified reviewer\nclass when this graph is constructed.", "source": "codesearchnet"}
{"code": "def AppendContent(self, src_fd):\n    while 1:\n        blob = src_fd.read(self.chunksize)\n        if (not blob):\n            break\n        blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(blob)\n        self.AddBlob(blob_id, len(blob))\n    self.Flush()", "docstring": "Create new blob hashes and append to BlobImage.\n\nWe don't support writing at arbitrary file offsets, but this method provides\na convenient way to add blobs for a new file, or append content to an\nexisting one.\n\nArgs:\nsrc_fd: source file handle open for read\n\nRaises:\nIOError: if blob has already been finalized.", "source": "codesearchnet"}
{"code": "def _ExpectedKeysForEntry(self, entry):\n    return [entry.name]", "docstring": "Generate a list of expected cache keys for this type of map.\n\nArgs:\nentry: A PasswdMapEntry\n\nReturns:\nA list of strings", "source": "github-repos"}
{"code": "def __init__(self, generator_function, *args, **kwargs):\n        \n        if not inspect.isgeneratorfunction(generator_function):\n            raise TypeError(\"generator_function must be a generator function.\")\n\n        self.generator_function = generator_function\n\n        if sys.version_info[0] < 3:\n            self.arguments = inspect.getcallargs(\n                self.generator_function,\n                *args,\n                **kwargs\n            )\n        else:\n            signature = inspect.signature(self.generator_function)\n            bound_arguments = signature.bind(*args, **kwargs)\n            self.arguments = bound_arguments.arguments", "docstring": "Init a new GeneratorContainer.\n\nArgs:\ngenerator_function(func): The generator function.\n*args: The arguments passed to the generator function.\n**kwargs: The keyword arguments passed to the generator function.", "source": "juraj-google-style"}
{"code": "def from_utc_datetime(cls, dt: datetime.datetime) -> 'Timestamp':\n    if dt.tzinfo is None:\n        raise ValueError('dt has no timezone info ' + '(https:\n    if dt.tzinfo != pytz.utc and dt.tzinfo != datetime.timezone.utc:\n        raise ValueError('dt not in UTC: %s' % dt)\n    duration = dt - cls._epoch_datetime_utc()\n    return Timestamp(duration.total_seconds())", "docstring": "Create a ``Timestamp`` instance from a ``datetime.datetime`` object.\n\nArgs:\ndt: A ``datetime.datetime`` object in UTC (offset-aware).", "source": "github-repos"}
{"code": "def line_init(xo: int, yo: int, xd: int, yd: int) -> None:\n    lib.TCOD_line_init(xo, yo, xd, yd)", "docstring": "Initilize a line whose points will be returned by `line_step`.\n\nThis function does not return anything on its own.\n\nDoes not include the origin point.\n\nArgs:\nxo (int): X starting point.\nyo (int): Y starting point.\nxd (int): X destination point.\nyd (int): Y destination point.\n\n.. deprecated:: 2.0\nUse `line_iter` instead.", "source": "codesearchnet"}
{"code": "def simulate_w(self,\n                   index: int,\n                   half_turns: float,\n                   axis_half_turns: float):\n        \n        args = self._shard_num_args({\n            'index': index,\n            'half_turns': half_turns,\n            'axis_half_turns': axis_half_turns\n        })\n        if index >= self._num_shard_qubits:\n            \n            self._pool.map(_clear_scratch, args)\n            self._pool.map(_w_between_shards, args)\n            self._pool.map(_copy_scratch_to_state, args)\n        else:\n            \n            self._pool.map(_w_within_shard, args)\n\n        \n        norm_squared = np.sum(self._pool.map(_norm_squared, args))\n        args = self._shard_num_args({\n            'norm_squared': norm_squared\n        })\n        self._pool.map(_renorm, args)", "docstring": "Simulate a single qubit rotation gate about a X + b Y.\n\nThe gate simulated is U = exp(-i pi/2 W half_turns)\nwhere W = cos(pi axis_half_turns) X + sin(pi axis_half_turns) Y\n\nArgs:\nindex: The qubit to act on.\nhalf_turns: The amount of the overall rotation, see the formula\nabove.\naxis_half_turns: The angle between the pauli X and Y operators,\nsee the formula above.", "source": "juraj-google-style"}
{"code": "async def with_call(self, request_iterator, timeout=None, metadata=None, credentials=None):\n    fut = self.future(request_iterator, timeout, metadata, credentials)\n    try:\n        result = (await fut)\n        return (result, fut)\n    finally:\n        if (not fut.done()):\n            fut.cancel()", "docstring": "Synchronously invokes the underlying RPC on the client.\n\nArgs:\nrequest_iterator: An ASYNC iterator that yields request values for the RPC.\ntimeout: An optional duration of time in seconds to allow for the RPC.\nIf None, the timeout is considered infinite.\nmetadata: Optional :term:`metadata` to be transmitted to the\nservice-side of the RPC.\ncredentials: An optional CallCredentials for the RPC.\n\nReturns:\nThe response value for the RPC and a Call object for the RPC.\n\nRaises:\nRpcError: Indicating that the RPC terminated with non-OK status. The\nraised RpcError will also be a Call for the RPC affording the RPC's\nmetadata, status code, and details.", "source": "codesearchnet"}
{"code": "def get_image_path(image_lists, label_name, index, image_dir, category):\n    if (label_name not in image_lists):\n        tf.logging.fatal('Label does not exist %s.', label_name)\n    label_lists = image_lists[label_name]\n    if (category not in label_lists):\n        tf.logging.fatal('Category does not exist %s.', category)\n    category_list = label_lists[category]\n    if (not category_list):\n        tf.logging.fatal('Label %s has no images in the category %s.', label_name, category)\n    mod_index = (index % len(category_list))\n    base_name = category_list[mod_index]\n    sub_dir = label_lists['dir']\n    full_path = os.path.join(image_dir, sub_dir, base_name)\n    return full_path", "docstring": "Returns a path to an image for a label at the given index.\n\nArgs:\nimage_lists: OrderedDict of training images for each label.\nlabel_name: Label string we want to get an image for.\nindex: Int offset of the image we want. This will be moduloed by the\navailable number of images for the label, so it can be arbitrarily large.\nimage_dir: Root folder string of the subfolders containing the training\nimages.\ncategory: Name string of set to pull images from - training, testing, or\nvalidation.\n\nReturns:\nFile system path string to an image that meets the requested parameters.", "source": "codesearchnet"}
{"code": "def filter_by_hoys(self, hoys):\n    existing_hoys = self.header.analysis_period.hoys\n    hoys = [h for h in hoys if (h in existing_hoys)]\n    _moys = tuple((int((hour * 60)) for hour in hoys))\n    return self.filter_by_moys(_moys)", "docstring": "Filter the Data Collection based onva list of hoys.\n\nArgs:\nhoys: A List of hours of the year 0..8759\n\nReturn:\nA new Data Collection with filtered data", "source": "codesearchnet"}
{"code": "def latent_to_dist(name, x, hparams, output_channels=None):\n    architecture = hparams.get('latent_architecture', 'single_conv')\n    depth = hparams.get('latent_encoder_depth', 1)\n    pre_output_channels = hparams.get('latent_pre_output_channels', 512)\n    width = hparams.get('latent_encoder_width', 512)\n    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n        x_shape = common_layers.shape_list(x)\n        if (output_channels is None):\n            output_channels = x_shape[(- 1)]\n        if (architecture == 'single_conv'):\n            return single_conv_dist('single_conv', x, output_channels)\n        if (architecture == 'glow_nn'):\n            mean_log_scale = x\n            for layer in range(1, (depth + 1)):\n                mid_channels = (pre_output_channels \n                mean_log_scale = conv_block(('glow_nn_%d' % layer), mean_log_scale, mid_channels=mid_channels)\n            mean_log_scale = conv('glow_nn_zeros', mean_log_scale, filter_size=[3, 3], stride=[1, 1], output_channels=(2 * output_channels), apply_actnorm=False, conv_init='zeros')\n        elif (architecture == 'glow_resnet'):\n            h = x\n            for layer in range(depth):\n                h3 = conv_stack(('latent_resnet_%d' % layer), h, mid_channels=width, output_channels=x_shape[(- 1)], dropout=hparams.coupling_dropout)\n                h += h3\n            mean_log_scale = conv('glow_res_final', h, conv_init='zeros', output_channels=(2 * output_channels), apply_actnorm=False)\n        else:\n            raise ValueError(('expected architecture to be single_conv or glow_nn got %s' % architecture))\n        mean = mean_log_scale[(:, :, :, 0::2)]\n        log_scale = mean_log_scale[(:, :, :, 1::2)]\n        return tfp.distributions.Normal(mean, tf.exp(log_scale))", "docstring": "Map latent to the mean and log-scale of a Gaussian.\n\nArgs:\nname: variable scope.\nx: 4-D Tensor of shape (NHWC)\nhparams: HParams.\nlatent_architecture - can be \"single_conv\", \"glow_nn\" or \"glow_resnet\",\ndefault = single_conv\nlatent_encoder_depth - int, depth of architecture, valid if\nlatent_architecture is \"glow_nn\" or \"glow_resnet\".\nlatent_pre_output_channels - 512, valid only when latent_architecture\nis \"glow_nn\".\nlatent_encoder_width - 512, maximum width of the network\noutput_channels: int, number of output channels of the mean (and std).\nif not provided, set it to be the output channels of x.\nReturns:\ndist: instance of tfp.distributions.Normal\nRaises:\nValueError: If architecture not in [\"single_conv\", \"glow_nn\"]", "source": "codesearchnet"}
{"code": "def register_date_conversion_handler(date_specifier_patterns):\n    \n\n    def _decorator(func):\n        global DATE_SPECIFIERS_CONVERSION_HANDLERS\n        DATE_SPECIFIERS_CONVERSION_HANDLERS[DATE_SPECIFIERS_REGEXES[date_specifier_patterns]] = func\n        return func\n\n    return _decorator", "docstring": "Decorator for registering handlers that convert text dates to dates.\n\nArgs:\ndate_specifier_patterns (str): the date specifier (in regex pattern format) for which the handler is registered", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    try:\n        file_header = self._ReadFileHeader(file_object)\n    except (ValueError, errors.ParseError):\n        raise errors.UnableToParseFile('Unable to parse file header.')\n    tables = self._ReadTablesArray(file_object, file_header.tables_array_offset)\n    table = tables.get(self._RECORD_TYPE_APPLICATION_PASSWORD, None)\n    if table:\n        for record in table.records:\n            self._ParseApplicationPasswordRecord(parser_mediator, record)\n    table = tables.get(self._RECORD_TYPE_INTERNET_PASSWORD, None)\n    if table:\n        for record in table.records:\n            self._ParseInternetPasswordRecord(parser_mediator, record)", "docstring": "Parses a MacOS keychain file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def make_acro(past, prefix, s):  \n    \n\n    def _make_acro(s, t=0):\n        \n\n        \n        v = ['a', 'e', 'i', 'o', 'u', 'y']\n        c = [chr(x) for x in six_xrange(ord('a'), ord('z') + 1) if chr(x) not in v]\n\n        s = re.sub(r'\\W+', '', s.lower())\n\n        vx = [x for x in s if x in v]  \n        cx = [x for x in s if x in c]  \n\n        if s.startswith('Mc'):\n\n            if t < 1:\n                return 'Mc' + v[0]\n            if t < 2:\n                return 'Mc' + c[0]\n\n        if s[0] in v:  \n            if t < 1:\n                return vx[0] + cx[0] + cx[1]\n            if t < 2:\n                return vx[0] + vx[1] + cx[0]\n\n        if s[0] in c and s[1] in c:  \n            if t < 1:\n                return cx[0] + cx[1] + vx[0]\n            if t < 2:\n                return cx[0] + cx[1] + cx[2]\n\n        if t < 3:\n            return cx[0] + vx[0] + cx[1]\n        if t < 4:\n            return cx[0] + cx[1] + cx[2]\n        if t < 5:\n            return cx[0] + vx[0] + vx[1]\n        if t < 6:\n            return cx[0] + cx[1] + cx[-1]\n\n        \n\n        if t < 7:\n            return s[0:3]\n        if t < 8:\n            return s[1:4]\n        if t < 9:\n            return s[2:5]\n        if t < 10:\n            return s[3:6]\n\n        return None\n\n    for t in six_xrange(11): \n\n        try:\n            a = _make_acro(s, t)\n\n            if a is not None:\n                if prefix:\n                    aps = prefix + a\n                else:\n                    aps = a\n\n                if aps not in past:\n                    past.add(aps)\n                    return a\n\n        except IndexError:\n            pass\n\n    raise Exception('Could not get acronym.')", "docstring": "Create a three letter acronym from the input string s.\n\nArgs:\npast: A set object, for storing acronyms that have already been created\nprefix: A prefix added to the acronym before storing in the set\ns: The string to create the acronym from.", "source": "juraj-google-style"}
{"code": "def _extract_id_token(id_token):\n    \n    if type(id_token) == bytes:\n        segments = id_token.split(b'.')\n    else:\n        segments = id_token.split(u'.')\n\n    if len(segments) != 3:\n        raise VerifyJwtTokenError(\n            'Wrong number of segments in token: {0}'.format(id_token))\n\n    return json.loads(\n        _helpers._from_bytes(_helpers._urlsafe_b64decode(segments[1])))", "docstring": "Extract the JSON payload from a JWT.\n\nDoes the extraction w/o checking the signature.\n\nArgs:\nid_token: string or bytestring, OAuth 2.0 id_token.\n\nReturns:\nobject, The deserialized JSON payload.", "source": "juraj-google-style"}
{"code": "def call_and_grads(fn: TransitionOperator, args: Union[(Tuple[Any], Any)]) -> Tuple[(tf.Tensor, TensorNest, TensorNest)]:\n    with tf.GradientTape() as tape:\n        tape.watch(args)\n        (ret, extra) = call_fn(fn, args)\n    grads = tape.gradient(ret, args)\n    return (ret, extra, grads)", "docstring": "Calls `fn` and returns the gradients with respect to `fn`'s first output.\n\nArgs:\nfn: A `TransitionOperator`.\nargs: Arguments to `fn`\n\nReturns:\nret: First output of `fn`.\nextra: Second output of `fn`.\ngrads: Gradients of `ret` with respect to `args`.", "source": "codesearchnet"}
{"code": "def increment_max_models(self, increment: int):\n    if self._max_models is None:\n        self._max_models = 0\n    self._max_models += increment", "docstring": "Increments the number of models that this instance of a _ModelManager is\nable to hold. If it is never called, no limit is imposed.\nArgs:\nincrement: the amount by which we are incrementing the number of models.", "source": "github-repos"}
{"code": "def GetTaskPendingMerge(self, current_task):\n    next_task = self._tasks_pending_merge.PeekTask()\n    if (not next_task):\n        return None\n    if (current_task and (next_task.merge_priority > current_task.merge_priority)):\n        return None\n    with self._lock:\n        next_task = self._tasks_pending_merge.PopTask()\n    self._tasks_merging[next_task.identifier] = next_task\n    return next_task", "docstring": "Retrieves the first task that is pending merge or has a higher priority.\n\nThis function will check if there is a task with a higher merge priority\nthan the current_task being merged. If so, that task with the higher\npriority is returned.\n\nArgs:\ncurrent_task (Task): current task being merged or None if no such task.\n\nReturns:\nTask: the next task to merge or None if there is no task pending merge or\nwith a higher priority.", "source": "codesearchnet"}
{"code": "def whole_subnet_maker(ip_addr, cidr):\n    \n    if ucast_ip(ip_addr, False) == False and mcast_ip(ip_addr, False) == False:\n        LOGGER.critical('Function whole_subnet_maker ip_addr {item}'.format(item=ip_addr))\n        raise ValueError(\"Not a good ipv4 address\")\n    if not cidr_check(cidr, False):\n        LOGGER.critical('Function whole_subnet_maker cidr {item}'.format(item=cidr))\n        raise ValueError(\"Not a good CIDR value should be 0 to 32\")\n\n    def subnet_corrector(octet, cidr):\n        \n        cidr_int = int(cidr)\n        octet_int = int(octet)\n        if cidr_int >= 24:\n            cidr_int = __mask_conversion[cidr_int][\"OCT4\"]\n        elif cidr_int >= 16:\n            cidr_int = __mask_conversion[cidr_int][\"OCT3\"]\n        elif cidr_int >= 8:\n            cidr_int = __mask_conversion[cidr_int][\"OCT2\"]\n        elif cidr_int >= 1:\n            cidr_int = __mask_conversion[cidr_int][\"OCT1\"]\n        cidr_count = 0\n        cidr_v = 256 - cidr_int\n        cidr_2 = 256 - cidr_int\n        while cidr_count < 300:\n            if octet_int >= cidr_count and octet_int <= cidr_2:\n                    cidr_int = cidr_count\n            cidr_count = cidr_2\n            cidr_2 = cidr_2 + cidr_v\n        return str(cidr_int)\n    ip_addr_split = ip_addr.split(\".\")\n    if int(cidr) >= 24:\n        octet = subnet_corrector(ip_addr_split[3], cidr)\n        completed = ip_addr_split[0] + \".\" + ip_addr_split[1] + \".\" + ip_addr_split[2] + \".\" + octet\n        return completed\n    elif int(cidr) >= 16:\n        octet = subnet_corrector(ip_addr_split[2], cidr)\n        completed = ip_addr_split[0] + \".\" + ip_addr_split[1] + \".\" + octet + \".0\"\n        return completed\n    elif int(cidr) >= 8:\n        octet = subnet_corrector(ip_addr_split[1], cidr)\n        completed = ip_addr_split[0] + \".\" + octet + \".0.0\"\n        return completed\n    elif int(cidr) >= 1:\n        octet = subnet_corrector(ip_addr_split[0], cidr)\n        completed = octet + \".0.0.0\"\n        return completed\n    else:\n        return \"0.0.0.0\"", "docstring": "Function to return a whole subnet value from a IP address and CIDR pair\nArgs:\nip_addr: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1\ncidr: CIDR value of 0 to 32\n\nReturns: returns the corrected whole subnet", "source": "juraj-google-style"}
{"code": "def __init__(self, control_handler, data_plane_handler, state, provision_info):\n    self.control_handler = control_handler\n    self.data_plane_handler = data_plane_handler\n    self.state = state\n    self.provision_info = provision_info\n    with WorkerHandler._lock:\n        WorkerHandler._worker_id_counter += 1\n        self.worker_id = 'worker_%s' % WorkerHandler._worker_id_counter", "docstring": "Initialize a WorkerHandler.\n\nArgs:\ncontrol_handler:\ndata_plane_handler (data_plane.DataChannel):\nstate:\nprovision_info:", "source": "github-repos"}
{"code": "def dumps(collection: BioCCollection, pretty_print: bool = True) -> str:\n    \n    doc = etree.ElementTree(BioCXMLEncoder().encode(collection))\n    s = etree.tostring(doc, pretty_print=pretty_print, encoding=collection.encoding,\n                       standalone=collection.standalone)\n    return s.decode(collection.encoding)", "docstring": "Serialize ``collection`` to a BioC formatted ``str``.\n\nArgs:\ncollection: the BioC collection\npretty_print: enables formatted XML\n\nReturns:\na BioC formatted ``str``", "source": "juraj-google-style"}
{"code": "def parse(self):\n    (options, args) = self.parser.parse_args()\n    self._set_attributes(args, options)\n    return self._create_dictionary()", "docstring": "Parse command line arguments and options.\n\nReturns:\nDictionary containing all given command line arguments and options.", "source": "codesearchnet"}
{"code": "def GetStorageMediaImageTypeIndicators(cls, path_spec, resolver_context=None):\n    \n    if (cls._storage_media_image_remainder_list is None or\n        cls._storage_media_image_store is None):\n      specification_store, remainder_list = cls._GetSpecificationStore(\n          definitions.FORMAT_CATEGORY_STORAGE_MEDIA_IMAGE)\n      cls._storage_media_image_remainder_list = remainder_list\n      cls._storage_media_image_store = specification_store\n\n    if cls._storage_media_image_scanner is None:\n      cls._storage_media_image_scanner = cls._GetSignatureScanner(\n          cls._storage_media_image_store)\n\n    return cls._GetTypeIndicators(\n        cls._storage_media_image_scanner, cls._storage_media_image_store,\n        cls._storage_media_image_remainder_list, path_spec,\n        resolver_context=resolver_context)", "docstring": "Determines if a file contains a supported storage media image types.\n\nArgs:\npath_spec (PathSpec): path specification.\nresolver_context (Optional[Context]): resolver context, where None\nrepresents the built-in context which is not multi process safe.\n\nReturns:\nlist[str]: supported format type indicators.", "source": "juraj-google-style"}
{"code": "def expect_true(condition, msg, extras=None):\n    \n    try:\n        asserts.assert_true(condition, msg, extras)\n    except signals.TestSignal as e:\n        logging.exception('Expected a `True` value, got `False`.')\n        recorder.add_error(e)", "docstring": "Expects an expression evaluates to True.\n\nIf the expectation is not met, the test is marked as fail after its\nexecution finishes.\n\nArgs:\nexpr: The expression that is evaluated.\nmsg: A string explaining the details in case of failure.\nextras: An optional field for extra information to be included in test\nresult.", "source": "juraj-google-style"}
{"code": "def value_to_message(self, value):\n        \n        if not isinstance(value, self.type):\n            raise EncodeError('Expected type %s, got %s: %r' %\n                              (self.type.__name__,\n                               type(value).__name__,\n                               value))\n        return value", "docstring": "Convert a value instance to a message.\n\nUsed by serializers to convert Python user types to underlying\nmessages for transmission.\n\nArgs:\nvalue: A value of type self.type.\n\nReturns:\nAn instance of type self.message_type.", "source": "juraj-google-style"}
{"code": "def class_label_top(body_output, targets, model_hparams, vocab_size):\n    del targets\n    with tf.variable_scope(('class_label_modality_%d_%d' % (vocab_size, model_hparams.hidden_size))):\n        x = body_output\n        x = tf.reduce_mean(x, axis=[1, 2], keepdims=True)\n        res = tf.layers.dense(x, vocab_size)\n        return tf.expand_dims(res, 3)", "docstring": "Transform inputs from model space to target space.\n\nAverage over inner dims and a linear layer to logits.\n\nArgs:\nbody_output: A Tensor with shape [batch, ?, ?, body_output_size].\ntargets:\nmodel_hparams: HParams, model hyperparmeters.\nvocab_size: int, vocabulary size.\n\nReturns:\na Tensors, each with shape [batch_size, 1, 1, 1, vocab_size]", "source": "codesearchnet"}
{"code": "def _prepare(f, xs_dtypes, xs_shapes):\n    if context.executing_eagerly():\n\n        def decorated_eager(*xs_data):\n            return f(*map(ops.convert_to_tensor, xs_data))\n        return decorated_eager\n    xs = [array_ops.placeholder(x_dtype, shape=x_shape) for x_dtype, x_shape in zip(xs_dtypes, xs_shapes)]\n    y = f(*xs)\n    sess = ops.get_default_session()\n\n    def decorated_graph(*xs_data):\n        xs_data = [_to_numpy(a) for a in xs_data]\n        return sess.run(y, feed_dict=dict(zip(xs, xs_data)))\n    return decorated_graph", "docstring": "Return a function that executes 'f'.\n\nIn TF 2.x, this is the same as `f`.\nIn TF 1.x, returns a Python function that executes the graph defined by `f`\nin a Session.\n\nArgs:\nf: the function.\nxs_dtypes: dtypes of f's arguments.\nxs_shapes: shapes of f's arguments.\n\nReturns:", "source": "github-repos"}
{"code": "def _AddVolume(self, volume):\n    \n    if volume.identifier in self._volumes:\n      raise KeyError(\n          'Volume object already set for volume identifier: {0:s}'.format(\n              volume.identifier))\n\n    self._volumes[volume.identifier] = volume\n    self._volume_identifiers.append(volume.identifier)", "docstring": "Adds a volume.\n\nArgs:\nvolume (Volume): a volume.\n\nRaises:\nKeyError: if volume is already set for the corresponding volume\nidentifier.", "source": "juraj-google-style"}
{"code": "def get_wigner_seitz_cell(self) -> List[List[np.ndarray]]:\n    vec1 = self._matrix[0]\n    vec2 = self._matrix[1]\n    vec3 = self._matrix[2]\n    list_k_points = []\n    for (i, j, k) in itertools.product([(- 1), 0, 1], [(- 1), 0, 1], [(- 1), 0, 1]):\n        list_k_points.append((((i * vec1) + (j * vec2)) + (k * vec3)))\n    from scipy.spatial import Voronoi\n    tess = Voronoi(list_k_points)\n    to_return = []\n    for r in tess.ridge_dict:\n        if ((r[0] == 13) or (r[1] == 13)):\n            to_return.append([tess.vertices[i] for i in tess.ridge_dict[r]])\n    return to_return", "docstring": "Returns the Wigner-Seitz cell for the given lattice.\n\nReturns:\nA list of list of coordinates.\nEach element in the list is a \"facet\" of the boundary of the\nWigner Seitz cell. For instance, a list of four coordinates will\nrepresent a square facet.", "source": "codesearchnet"}
{"code": "def profile_args(_args):\n    if ((_args.get('app', {}).get('optional') is not None) or (_args.get('app', {}).get('required') is not None)):\n        app_args_optional = _args.get('app', {}).get('optional', {})\n        app_args_required = _args.get('app', {}).get('required', {})\n        default_args = _args.get('default', {})\n        _args = {}\n        _args.update(app_args_optional)\n        _args.update(app_args_required)\n        _args.update(default_args)\n    elif ((_args.get('app') is not None) and (_args.get('default') is not None)):\n        app_args = _args.get('app', {})\n        default_args = _args.get('default', {})\n        _args = {}\n        _args.update(app_args)\n        _args.update(default_args)\n    return _args", "docstring": "Return args for v1, v2, or v3 structure.\n\nArgs:\n_args (dict): The args section from the profile.\n\nReturns:\ndict: A collapsed version of the args dict.", "source": "codesearchnet"}
{"code": "def get_template(template_file='', **kwargs):\n    \n    template = get_template_object(template_file)\n\n    LOG.info('Rendering template %s', template.filename)\n    for key, value in kwargs.items():\n        LOG.debug('%s => %s', key, value)\n\n    rendered_json = template.render(**kwargs)\n    LOG.debug('Rendered JSON:\\n%s', rendered_json)\n\n    return rendered_json", "docstring": "Get the Jinja2 template and renders with dict _kwargs_.\n\nArgs:\ntemplate_file (str): name of the template file\nkwargs: Keywords to use for rendering the Jinja2 template.\n\nReturns:\nString of rendered JSON template.", "source": "juraj-google-style"}
{"code": "def determine_framework(model: str, framework: Optional[str]=None) -> str:\n    if framework is not None:\n        return framework\n    framework_map = {'pt': 'PyTorch', 'tf': 'TensorFlow'}\n    exporter_map = {'pt': 'torch', 'tf': 'tf2onnx'}\n    if os.path.isdir(model):\n        if os.path.isfile(os.path.join(model, WEIGHTS_NAME)):\n            framework = 'pt'\n        elif os.path.isfile(os.path.join(model, TF2_WEIGHTS_NAME)):\n            framework = 'tf'\n        else:\n            raise FileNotFoundError(f'Cannot determine framework from given checkpoint location. There should be a {WEIGHTS_NAME} for PyTorch or {TF2_WEIGHTS_NAME} for TensorFlow.')\n        logger.info(f'Local {framework_map[framework]} model found.')\n    elif is_torch_available():\n        framework = 'pt'\n    elif is_tf_available():\n        framework = 'tf'\n    else:\n        raise OSError('Neither PyTorch nor TensorFlow found in environment. Cannot export to ONNX.')\n    logger.info(f'Framework not requested. Using {exporter_map[framework]} to export to ONNX.')\n    return framework", "docstring": "Determines the framework to use for the export.\n\nThe priority is in the following order:\n1. User input via `framework`.\n2. If local checkpoint is provided, use the same framework as the checkpoint.\n3. Available framework in environment, with priority given to PyTorch\n\nArgs:\nmodel (`str`):\nThe name of the model to export.\nframework (`str`, *optional*, defaults to `None`):\nThe framework to use for the export. See above for priority if none provided.\n\nReturns:\nThe framework to use for the export.", "source": "github-repos"}
{"code": "def _get_single_set(self, num_objects, num_features):\n    data = np.random.uniform((- 1), 1, size=(num_objects, num_features))\n    distances = spdistance.squareform(spdistance.pdist(data))\n    distance_idx = np.argsort(distances)\n    nth = np.random.randint(0, num_objects)\n    nth_furthest = distance_idx[(:, nth)]\n    reference = np.random.randint(0, num_objects)\n    labels = nth_furthest[reference]\n    object_ids = np.identity(num_objects)\n    nth_matrix = np.zeros((num_objects, num_objects))\n    nth_matrix[(:, nth)] = 1\n    reference_object = np.zeros((num_objects, num_objects))\n    reference_object[(:, reference)] = 1\n    inputs = np.concatenate([data, object_ids, reference_object, nth_matrix], axis=(- 1))\n    inputs = np.random.permutation(inputs)\n    labels = np.expand_dims(labels, axis=0)\n    return (inputs.astype(np.float32), labels.astype(np.float32))", "docstring": "Generate one input sequence and output label.\n\nEach sequences of objects has a feature that consists of the feature vector\nfor that object plus the encoding for its ID, the reference vector ID and\nthe n-th value relative ID for a total feature size of:\n\n`num_objects` * 3  + `num_features`\n\nArgs:\nnum_objects: int. number of objects in the sequence.\nnum_features: int. feature size of each object.\n\nReturns:\n1. np.ndarray (`num_objects`, (`num_features` + 3 * `num_objects`)).\n2. np.ndarray (1,). Output object reference label.", "source": "codesearchnet"}
{"code": "def _get_table_names(statement):\n    \n\n    parts = statement.to_unicode().split()\n\n    tables = set()\n\n    for i, token in enumerate(parts):\n        if token.lower() == 'from' or token.lower().endswith('join'):\n            tables.add(parts[i + 1].rstrip(';'))\n\n    return list(tables)", "docstring": "Returns table names found in the query.\n\nNOTE. This routine would use the sqlparse parse tree, but vnames don't parse very well.\n\nArgs:\nstatement (sqlparse.sql.Statement): parsed by sqlparse sql statement.\n\nReturns:\nlist of str", "source": "juraj-google-style"}
{"code": "def passthrough_context_definition(context_params):\n        \n\n        check.inst_param(context_params, 'context', ExecutionContext)\n        context_definition = PipelineContextDefinition(context_fn=lambda *_args: context_params)\n        return {DEFAULT_CONTEXT_NAME: context_definition}", "docstring": "Create a context definition from a pre-existing context. This can be useful\nin testing contexts where you may want to create a context manually and then\npass it into a one-off PipelineDefinition\n\nArgs:\ncontext (ExecutionContext): The context that will provided to the pipeline.\nReturns:\nPipelineContextDefinition: The passthrough context definition.", "source": "juraj-google-style"}
{"code": "def unapprove(self, **kwargs):\n    path = ('%s/%s/unapprove' % (self.manager.path, self.get_id()))\n    data = {}\n    server_data = self.manager.gitlab.http_post(path, post_data=data, **kwargs)\n    self._update_attrs(server_data)", "docstring": "Unapprove the merge request.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabMRApprovalError: If the unapproval failed", "source": "codesearchnet"}
{"code": "def serialCmdPwdAuth(self, password_str):\n    result = False\n    try:\n        req_start = (('0150310228' + binascii.hexlify(password_str)) + '2903')\n        req_crc = self.calc_crc16(req_start[2:].decode('hex'))\n        req_str = (req_start + req_crc)\n        self.m_serial_port.write(req_str.decode('hex'))\n        if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'):\n            ekm_log((('Password accepted (' + self.getContext()) + ')'))\n            result = True\n        else:\n            ekm_log((('Password call failure no 06(' + self.getContext()) + ')'))\n    except:\n        ekm_log((('Password call failure by exception(' + self.getContext()) + ')'))\n        ekm_log(traceback.format_exc(sys.exc_info()))\n    return result", "docstring": "Password step of set commands\n\nThis method is normally called within another serial command, so it\ndoes not issue a termination string.  Any default password is set\nin the caller parameter list, never here.\n\nArgs:\npassword_str (str): Required password.\n\nReturns:\nbool: True on completion and ACK.", "source": "codesearchnet"}
{"code": "def log(msg, level=0):\n    red = '\\x1b[91m'\n    endc = '\\x1b[0m'\n    cfg = {'version': 1, 'disable_existing_loggers': False, 'formatters': {'stdout': {'format': '[%(levelname)s]: %(asctime)s - %(message)s', 'datefmt': '%x %X'}, 'stderr': {'format': ((red + '[%(levelname)s]: %(asctime)s - %(message)s') + endc), 'datefmt': '%x %X'}}, 'handlers': {'stdout': {'class': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter': 'stdout'}, 'stderr': {'class': 'logging.StreamHandler', 'level': 'ERROR', 'formatter': 'stderr'}}, 'loggers': {'info': {'handlers': ['stdout'], 'level': 'INFO', 'propagate': True}, 'error': {'handlers': ['stderr'], 'level': 'ERROR', 'propagate': False}}}\n    dictConfig(cfg)\n    lg = ('info' if (level == 0) else 'error')\n    lvl = (20 if (level == 0) else 40)\n    logger = logging.getLogger(lg)\n    logger.log(lvl, msg)", "docstring": "Logs a message to the console, with optional level paramater\n\nArgs:\n- msg (str): message to send to console\n- level (int): log level; 0 for info, 1 for error (default = 0)", "source": "codesearchnet"}
{"code": "def day(self, value=None):\n    if (value is not None):\n        try:\n            value = int(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type int for field `day`'.format(value))\n        if (value < 1):\n            raise ValueError('value need to be greater or equal 1 for field `day`')\n        if (value > 31):\n            raise ValueError('value need to be smaller 31 for field `day`')\n    self._day = value", "docstring": "Corresponds to IDD Field `day`\n\nArgs:\nvalue (int): value for IDD Field `day`\nvalue >= 1\nvalue <= 31\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def update_profiles(adapter):\n    for case in adapter.cases():\n        if case.get('profile_path'):\n            profiles = get_profiles(adapter, case['profile_path'])\n            profiled_individuals = deepcopy(case['individuals'])\n            for individual in profiled_individuals:\n                ind_id = individual['ind_id']\n                try:\n                    profile = profiles[ind_id]\n                    individual['profile'] = profile\n                except KeyError:\n                    LOG.warning(f\"sample IDs in vcf does not match for case {case['case_id']}\")\n            updated_case = deepcopy(case)\n            updated_case['individuals'] = profiled_individuals\n            adapter.add_case(updated_case, update=True)", "docstring": "For all cases having vcf_path, update the profile string for the samples\n\nArgs:\nadapter (MongoAdapter): Adapter to mongodb", "source": "codesearchnet"}
{"code": "def foreach_model(self, fn):\n    results = ray.get([w.foreach_model.remote(fn) for w in self.workers])\n    out = []\n    for r in results:\n        out.extend(r)\n    return out", "docstring": "Apply the given function to each model replica in each worker.\n\nReturns:\nList of results from applying the function.", "source": "codesearchnet"}
{"code": "def find_bucket(self, bucketing_id, parent_id, traffic_allocations):\n    bucketing_key = BUCKETING_ID_TEMPLATE.format(bucketing_id=bucketing_id, parent_id=parent_id)\n    bucketing_number = self._generate_bucket_value(bucketing_key)\n    self.config.logger.debug(('Assigned bucket %s to user with bucketing ID \"%s\".' % (bucketing_number, bucketing_id)))\n    for traffic_allocation in traffic_allocations:\n        current_end_of_range = traffic_allocation.get('endOfRange')\n        if (bucketing_number < current_end_of_range):\n            return traffic_allocation.get('entityId')\n    return None", "docstring": "Determine entity based on bucket value and traffic allocations.\n\nArgs:\nbucketing_id: ID to be used for bucketing the user.\nparent_id: ID representing group or experiment.\ntraffic_allocations: Traffic allocations representing traffic allotted to experiments or variations.\n\nReturns:\nEntity ID which may represent experiment or variation.", "source": "codesearchnet"}
{"code": "def _make_ctx_options(ctx_options, config_cls=ContextOptions):\n  \n  if not ctx_options:\n    return None\n  for key in list(ctx_options):\n    translation = _OPTION_TRANSLATIONS.get(key)\n    if translation:\n      if translation in ctx_options:\n        raise ValueError('Cannot specify %s and %s at the same time' %\n                         (key, translation))\n      ctx_options[translation] = ctx_options.pop(key)\n  return config_cls(**ctx_options)", "docstring": "Helper to construct a ContextOptions object from keyword arguments.\n\nArgs:\nctx_options: A dict of keyword arguments.\nconfig_cls: Optional Configuration class to use, default ContextOptions.\n\nNote that either 'options' or 'config' can be used to pass another\nConfiguration object, but not both.  If another Configuration\nobject is given it provides default values.\n\nReturns:\nA Configuration object, or None if ctx_options is empty.", "source": "juraj-google-style"}
{"code": "def random_tril_matrix(shape, dtype, force_well_conditioned=False, remove_upper=True):\n    with ops.name_scope('random_tril_matrix'):\n        tril = random_normal(shape, dtype=dtype)\n        if remove_upper:\n            tril = array_ops.matrix_band_part(tril, -1, 0)\n        if force_well_conditioned:\n            maxval = ops.convert_to_tensor(np.sqrt(2.0), dtype=dtype.real_dtype)\n            diag = random_sign_uniform(shape[:-1], dtype=dtype, minval=1.0, maxval=maxval)\n            tril = array_ops.matrix_set_diag(tril, diag)\n        return tril", "docstring": "[batch] lower triangular matrix.\n\nArgs:\nshape:  `TensorShape` or Python `list`.  Shape of the returned matrix.\ndtype:  `TensorFlow` `dtype` or Python dtype\nforce_well_conditioned:  Python `bool`. If `True`, returned matrix will have\neigenvalues with modulus in `(1, 2)`.  Otherwise, eigenvalues are unit\nnormal random variables.\nremove_upper:  Python `bool`.\nIf `True`, zero out the strictly upper triangle.\nIf `False`, the lower triangle of returned matrix will have desired\nproperties, but will not have the strictly upper triangle zero'd out.\n\nReturns:\n`Tensor` with desired shape and dtype.", "source": "github-repos"}
{"code": "def disconnect_container_from_network(self, container, net_id, force=False):\n    data = {'Container': container}\n    if force:\n        if version_lt(self._version, '1.22'):\n            raise InvalidVersion('Forced disconnect was introduced in API 1.22')\n        data['Force'] = force\n    url = self._url('/networks/{0}/disconnect', net_id)\n    res = self._post_json(url, data=data)\n    self._raise_for_status(res)", "docstring": "Disconnect a container from a network.\n\nArgs:\ncontainer (str): container ID or name to be disconnected from the\nnetwork\nnet_id (str): network ID\nforce (bool): Force the container to disconnect from a network.\nDefault: ``False``", "source": "codesearchnet"}
{"code": "def list_vms(access_token, subscription_id, resource_group):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachines', '?api-version=', COMP_API])\n    return do_get(endpoint, access_token)", "docstring": "List VMs in a resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\n\nReturns:\nHTTP response. JSON body of a list of VM model views.", "source": "codesearchnet"}
{"code": "def create_jlink(self, args):\n    jlink = pylink.JLink()\n    jlink.open(args.serial_no, args.ip_addr)\n    if (hasattr(args, 'tif') and (args.tif is not None)):\n        if (args.tif.lower() == 'swd'):\n            jlink.set_tif(pylink.JLinkInterfaces.SWD)\n        else:\n            jlink.set_tif(pylink.JLinkInterfaces.JTAG)\n    if (hasattr(args, 'device') and (args.device is not None)):\n        jlink.connect(args.device)\n    return jlink", "docstring": "Creates an instance of a J-Link from the given arguments.\n\nArgs:\nself (Command): the ``Command`` instance\nargs (Namespace): arguments to construct the ``JLink`` instance from\n\nReturns:\nAn instance of a ``JLink``.", "source": "codesearchnet"}
{"code": "def is_duplicated(self, item):\n    if isinstance(item, dict):\n        hashable_item = json.dumps(item, sort_keys=True)\n    elif isinstance(item, list):\n        hashable_item = frozenset(item)\n    else:\n        hashable_item = item\n    if (hashable_item in self._cache):\n        return True\n    else:\n        if ((self.cache_capacity > 0) and (len(self._cache) >= self.cache_capacity)):\n            self._cache.popitem(False)\n        self._cache[hashable_item] = 1\n        return False", "docstring": "Check whether the item has been in the cache\n\nIf the item has not been seen before, then hash it and put it into\nthe cache, otherwise indicates the item is duplicated. When the cache\nsize exceeds capacity, discard the earliest items in the cache.\n\nArgs:\nitem (object): The item to be checked and stored in cache. It must\nbe immutable or a list/dict.\nReturns:\nbool: Whether the item has been in cache.", "source": "codesearchnet"}
{"code": "def avg(vals, count=None):\n     \n    sum = 0\n    for v in vals:\n        sum += v\n    if count is None:\n        count = len(vals)\n    return float(sum) / count", "docstring": "Returns the average value\n\nArgs:\nvals: List of numbers to calculate average from.\ncount: Int of total count that vals was part of.\n\nReturns:\nFloat average value throughout a count.", "source": "juraj-google-style"}
{"code": "def list_to_file(orig_list, file_name, file_location):\n    \n    file = __os.path.join(file_location, file_name)\n\n    def add_line_break(list_line):\n        \n        list_line = ('%s\\n' % (list_line,))\n        return list_line\n    write_file = open(file, \"a\")\n    for orig_list_line in orig_list:\n        write_file.write(add_line_break(str(orig_list_line)))\n    write_file.close()\n    return file_name", "docstring": "Function to export a list to a text file\nArgs:\norig_list: The list you want exported\nfile_name: The name of the exported file\nfile_location: The location of the file, derive from the os module\n\nReturns: returns the filename info", "source": "juraj-google-style"}
{"code": "def ae_latent_softmax(latents_pred, latents_discrete_hot, vocab_size, hparams):\n  \n  with tf.variable_scope(\"latent_logits\"):\n    latents_logits = tf.layers.dense(latents_pred, vocab_size,\n                                     name=\"logits_dense\")\n    if hparams.logit_normalization:\n      latents_logits *= tf.rsqrt(1e-8 +\n                                 tf.reduce_mean(tf.square(latents_logits)))\n    loss = tf.nn.softmax_cross_entropy_with_logits_v2(\n        labels=latents_discrete_hot, logits=latents_logits)\n\n    \n    \n    sample = multinomial_sample(latents_logits,\n                                vocab_size,\n                                hparams.sampling_method,\n                                hparams.sampling_temp)\n    return sample, loss", "docstring": "Latent prediction and loss.\n\nArgs:\nlatents_pred: Tensor of shape [..., depth].\nlatents_discrete_hot: Tensor of shape [..., vocab_size].\nvocab_size: an int representing the vocab size.\nhparams: HParams.\n\nReturns:\nsample: Tensor of shape [...], a sample from a multinomial distribution.\nloss: Tensor of shape [...], the softmax cross-entropy.", "source": "juraj-google-style"}
{"code": "def push(self, x):\n    if not math.isnan(x):\n        self._sorted_items.add(x)\n    if self._window_mode == WindowMode.SLIDING:\n        if len(self._queue) >= self._window_size and (not math.isnan((old_x := self.pop()))):\n            self._sorted_items.discard(old_x)\n        super().push(x)", "docstring": "Pushes a new value, maintains the sorted list, and manages the window.\n\nArgs:\nx: The new value to be pushed.", "source": "github-repos"}
{"code": "def safe_url(self, url, errors='strict'):\n    if (url is not None):\n        url = quote(self.s(url, errors=errors), safe='~')\n    return url", "docstring": "URL encode value for safe HTTP request.\n\nArgs:\nurl (string): The string to URL Encode.\n\nReturns:\n(string): The urlencoded string.", "source": "codesearchnet"}
{"code": "def _submit_request(self, url, params=None, data=None, headers=None, method='GET'):\n    if (headers is None):\n        headers = {}\n    if (self._auth_header is not None):\n        headers['Authorization'] = self._auth_header\n    try:\n        if (method == 'POST'):\n            result = requests.post(url, params=params, data=data, headers=headers)\n        elif (method == 'GET'):\n            result = requests.get(url, params=params, data=data, headers=headers)\n        result.raise_for_status()\n        return (result.status_code, result.json())\n    except requests.exceptions.HTTPError as e:\n        return (e.response.status_code, e.response.reason)\n    except RemoteDisconnected as e:\n        raise CliException(e)\n    except (requests.exceptions.MissingSchema, requests.exceptions.InvalidURL) as e:\n        raise CliException(e)\n    except requests.exceptions.ConnectionError as e:\n        raise CliException('Unable to connect to \"{}\": make sure URL is correct'.format(self._base_url))", "docstring": "Submits the given request, and handles the errors appropriately.\n\nArgs:\nurl (str): the request to send.\nparams (dict): params to be passed along to get/post\ndata (bytes): the data to include in the request.\nheaders (dict): the headers to include in the request.\nmethod (str): the method to use for the request, \"POST\" or \"GET\".\n\nReturns:\ntuple of (int, str): The response status code and the json parsed\nbody, or the error message.\n\nRaises:\n`CliException`: If any issues occur with the URL.", "source": "codesearchnet"}
{"code": "def getprops(self, prop_names):\n    attempts = DEFAULT_GETPROPS_ATTEMPTS\n    results = {}\n    for attempt in range(attempts):\n        raw_output = self.shell(['getprop'], timeout=DEFAULT_GETPROP_TIMEOUT_SEC)\n        properties = self._parse_getprop_output(raw_output)\n        if properties:\n            for name in prop_names:\n                if name in properties:\n                    results[name] = properties[name]\n            break\n        if attempt < attempts - 1:\n            time.sleep(DEFAULT_GETPROPS_RETRY_SLEEP_SEC)\n    return results", "docstring": "Get multiple properties of the device.\n\nThis is a convenience wrapper for `adb shell getprop`. Use this to\nreduce the number of adb calls when getting multiple properties.\n\nArgs:\nprop_names: list of strings, the names of the properties to get.\n\nReturns:\nA dict containing name-value pairs of the properties requested, if\nthey exist.", "source": "github-repos"}
{"code": "def set_child_node(self, name, node):\n        \n        assert isinstance(node, TreeMapNode)\n        self._nodes[name] = node\n        node.set_parent(self)", "docstring": "Add one child node to this node.\n\nArgs:\nname (str): Name of the child.\nnode (TreeMapNode): Node to add.\n\nWarning:\nNo test is done to see whether or not a node was already attached with that name. If this is the case, the\nnew node takes the place of the old one that is now unreachable. See :meth:`set_unique_child_node`.", "source": "juraj-google-style"}
{"code": "def _PrintSessionsOverview(self, storage_reader):\n    \n    table_view = views.ViewsFactory.GetTableView(\n        self._views_format_type, title='Sessions')\n\n    for session in storage_reader.GetSessions():\n      start_time = timelib.Timestamp.CopyToIsoFormat(\n          session.start_time)\n      session_identifier = uuid.UUID(hex=session.identifier)\n      session_identifier = '{0!s}'.format(session_identifier)\n      table_view.AddRow([session_identifier, start_time])\n\n    table_view.Write(self._output_writer)", "docstring": "Prints a sessions overview.\n\nArgs:\nstorage_reader (StorageReader): storage reader.", "source": "juraj-google-style"}
{"code": "def getJsonFromApi(view, request):\n\t\n\tjsonText = view(request)\n\tjsonText = json.loads(jsonText.content.decode('utf-8'))\n\treturn jsonText", "docstring": "Return json from querying Web Api\n\nArgs:\nview: django view function.\nrequest: http request object got from django.\n\nReturns: json format dictionary", "source": "juraj-google-style"}
{"code": "def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):\n  \n  with tf.name_scope(\"loss\", [logits, labels]):\n    logits, labels = _pad_tensors_to_same_length(logits, labels)\n\n    \n    with tf.name_scope(\"smoothing_cross_entropy\", [logits, labels]):\n      confidence = 1.0 - smoothing\n      low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)\n      soft_targets = tf.one_hot(\n          tf.cast(labels, tf.int32),\n          depth=vocab_size,\n          on_value=confidence,\n          off_value=low_confidence)\n      xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(\n          logits=logits, labels=soft_targets)\n\n      \n      \n      normalizing_constant = -(\n          confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *\n          low_confidence * tf.log(low_confidence + 1e-20))\n      xentropy -= normalizing_constant\n\n    weights = tf.to_float(tf.not_equal(labels, 0))\n    return xentropy * weights, weights", "docstring": "Calculate cross entropy loss while ignoring padding.\n\nArgs:\nlogits: Tensor of size [batch_size, length_logits, vocab_size]\nlabels: Tensor of size [batch_size, length_labels]\nsmoothing: Label smoothing constant, used to determine the on and off values\nvocab_size: int size of the vocabulary\nReturns:\nReturns a float32 tensor with shape\n[batch_size, max(length_logits, length_labels)]", "source": "juraj-google-style"}
{"code": "def _order_pases(self, passes):\n        \n\n        passes = set(passes)\n\n        pass_deps = {}\n\n        for opt in passes:\n            _, before, after = self._known_passes[opt]\n\n            if opt not in pass_deps:\n                pass_deps[opt] = set()\n\n            for after_pass in after:\n                pass_deps[opt].add(after_pass)\n\n            \n            \n            for other in before:\n                if other not in passes:\n                    continue\n\n                if other not in pass_deps:\n                    pass_deps[other] = set()\n\n                pass_deps[other].add(opt)\n\n        return toposort_flatten(pass_deps)", "docstring": "Topologically sort optimization passes.\n\nThis ensures that the resulting passes are run in order\nrespecting before/after constraints.\n\nArgs:\npasses (iterable): An iterable of pass names that should\nbe included in the optimization passes run.", "source": "juraj-google-style"}
{"code": "def __security_definitions_descriptor(self, issuers):\n    if (not issuers):\n        result = {_DEFAULT_SECURITY_DEFINITION: {'authorizationUrl': '', 'flow': 'implicit', 'type': 'oauth2', 'x-google-issuer': 'https:\n        return result\n    result = {}\n    for (issuer_key, issuer_value) in issuers.items():\n        result[issuer_key] = {'authorizationUrl': '', 'flow': 'implicit', 'type': 'oauth2', 'x-google-issuer': issuer_value.issuer}\n        if issuer_value.jwks_uri:\n            result[issuer_key]['x-google-jwks_uri'] = issuer_value.jwks_uri\n    return result", "docstring": "Create a descriptor for the security definitions.\n\nArgs:\nissuers: dict, mapping issuer names to Issuer tuples\n\nReturns:\nThe dict representing the security definitions descriptor.", "source": "codesearchnet"}
{"code": "def tool_cancellation(self) -> str | None:\n    if not self.part.function_response:\n        return None\n    if self.part.function_response.name != 'tool_cancellation':\n        return None\n    if not self.part.function_response.response:\n        return None\n    return self.part.function_response.response.get('function_call_id', None)", "docstring": "Returns an id of a function call to be cancelled.\n\nIf the part is not a tool cancellation request, returns None.\n\nReturns:\nThe id of the function call to be cancelled or None if this part is not a\ntool cancellation from the model.", "source": "github-repos"}
{"code": "def decode_field(self, field, value):\n    \n    \n    \n    \n    if isinstance(field, messages.BytesField):\n      try:\n        \n        \n        padded_value = self.__pad_value(str(value), 4, '=')\n        return base64.urlsafe_b64decode(padded_value)\n      except (TypeError, UnicodeEncodeError), err:\n        raise messages.DecodeError('Base64 decoding error: %s' % err)\n\n    return super(EndpointsProtoJson, self).decode_field(field, value)", "docstring": "Decode a JSON value to a python value.\n\nArgs:\nfield: A ProtoRPC field instance.\nvalue: A serialized JSON value.\n\nReturns:\nA Python value compatible with field.", "source": "juraj-google-style"}
{"code": "def _CheckLogicalLines(self, llines, list_of_expected):\n    actual = []\n    for lline in llines:\n        filtered_values = [ft.value for ft in lline.tokens if ft.name not in pytree_utils.NONSEMANTIC_TOKENS]\n        actual.append((lline.depth, filtered_values))\n    self.assertEqual(list_of_expected, actual)", "docstring": "Check that the given LogicalLines match expectations.\n\nArgs:\nllines: list of LogicalLine\nlist_of_expected: list of (depth, values) pairs. Non-semantic tokens are\nfiltered out from the expected values.", "source": "github-repos"}
{"code": "def as_text_with_reasoning(content: ProcessorContentTypes, *, strict: bool=False) -> tuple[str, str]:\n    text_parts = []\n    thought_parts = []\n    for mime, p in ProcessorContent(content).items():\n        if is_text(mime):\n            if p.part.thought:\n                thought_parts.append(p.text)\n            else:\n                text_parts.append(p.text)\n        elif strict:\n            raise ValueError(f'Unsupported content type {mime}.')\n    return (''.join(text_parts), ''.join(thought_parts))", "docstring": "Returns a tuple of the final and reasoning text representing content.\n\nThe returned tuple contains two elements:\n- The first element (index 0) is a string representing the main text\nextracted\nfrom the input `content`.\n- The second element (index 1) is a string representing the reasoning or\nthoughts associated with the input `content`.\n\nArgs:\ncontent: The content to process. This can be of various types as defined by\n`ProcessorContentTypes`.\nstrict: If True, unsupported content types will raise a ValueError.\nOtherwise, they will be ignored.\n\nReturns:\nA tuple containing two strings: (text, reasoning).", "source": "github-repos"}
{"code": "def _operation_status_message(self):\n    metadata = self._op['metadata']\n    if (not self._op['done']):\n        if (('events' in metadata) and metadata['events']):\n            last_event = metadata['events'][(- 1)]\n            msg = last_event['description']\n            ds = last_event['startTime']\n        else:\n            msg = 'Pending'\n            ds = metadata['createTime']\n    else:\n        ds = metadata['endTime']\n        if ('error' in self._op):\n            msg = self._op['error']['message']\n        else:\n            msg = 'Success'\n    return (msg, google_base.parse_rfc3339_utc_string(ds))", "docstring": "Returns the most relevant status string and last updated date string.\n\nThis string is meant for display only.\n\nReturns:\nA printable status string and date string.", "source": "codesearchnet"}
{"code": "def Main(url):\n    \n    \n    web_scrape = WebScraping()\n    \n    web_scrape.readable_web_pdf = WebPDFReading()\n    \n    document = web_scrape.scrape(url)\n    \n    auto_abstractor = AutoAbstractor()\n    \n    auto_abstractor.tokenizable_doc = MeCabTokenizer()\n    \n    abstractable_doc = TopNRankAbstractor()\n    \n    result_dict = auto_abstractor.summarize(document, abstractable_doc)\n    \n    [print(sentence) for sentence in result_dict[\"summarize_result\"]]", "docstring": "Entry Point.\n\nArgs:\nurl:    PDF url.", "source": "juraj-google-style"}
{"code": "def get_all_plugin_assets(graph=None):\n    if graph is None:\n        graph = ops.get_default_graph()\n    out = []\n    for name in graph.get_collection(_PLUGIN_ASSET_PREFIX):\n        collection = graph.get_collection(_PLUGIN_ASSET_PREFIX + name)\n        if len(collection) != 1:\n            raise ValueError('Collection for %s had %d items, expected 1' % (name, len(collection)))\n        out.append(collection[0])\n    return out", "docstring": "Retrieve all PluginAssets stored in the graph collection.\n\nArgs:\ngraph: Optionally, the graph to get assets from. If unspecified, the default\ngraph is used.\n\nReturns:\nA list with all PluginAsset instances in the graph.\n\nRaises:\nValueError: if we unexpectedly find a collection with the wrong number of\nPluginAssets.", "source": "github-repos"}
{"code": "def match_validator(expression):\n    if isinstance(expression, str):\n        compiled = re.compile(expression)\n    elif hasattr(expression, 'match'):\n        compiled = expression\n    else:\n        raise TypeError('Provided match is nor a string nor has a match method (like re expressions)')\n\n    def validator(value):\n        if (not compiled.match(value)):\n            raise ValidationError('{} does not match pattern: {}'.format(value, (compiled.pattern if hasattr(compiled, 'pattern') else compiled)))\n    return validator", "docstring": "Return validator function that will check if matches given expression.\n\nArgs:\nmatch: if string then this will be converted to regular expression\nusing ``re.compile``. Can be also any object that has ``match()``\nmethod like already compiled regular regular expression or custom\nmatching object/class.", "source": "codesearchnet"}
{"code": "def _GetSpecificationStore(cls, format_category):\n    \n    specification_store = specification.FormatSpecificationStore()\n    remainder_list = []\n\n    for analyzer_helper in iter(cls._analyzer_helpers.values()):\n      if not analyzer_helper.IsEnabled():\n        continue\n\n      if format_category in analyzer_helper.format_categories:\n        format_specification = analyzer_helper.GetFormatSpecification()\n\n        if format_specification is not None:\n          specification_store.AddSpecification(format_specification)\n        else:\n          remainder_list.append(analyzer_helper)\n\n    return specification_store, remainder_list", "docstring": "Retrieves the specification store for specified format category.\n\nArgs:\nformat_category (str): format category.\n\nReturns:\ntuple[FormatSpecificationStore, list[AnalyzerHelper]]: a format\nspecification store and remaining analyzer helpers that do not have\na format specification.", "source": "juraj-google-style"}
{"code": "def Relay(self, inventory):\n    inventory = InvPayload(type=inventory.InventoryType, hashes=[inventory.Hash.ToBytes()])\n    m = Message('inv', inventory)\n    self.SendSerializedMessage(m)\n    return True", "docstring": "Wrap the inventory in a InvPayload object and send it over the write to the remote node.\n\nArgs:\ninventory:\n\nReturns:\nbool: True (fixed)", "source": "codesearchnet"}
{"code": "def users_setPresence(self, *, presence: str, **kwargs) -> SlackResponse:\n    kwargs.update({'presence': presence})\n    return self.api_call('users.setPresence', json=kwargs)", "docstring": "Manually sets user presence.\n\nArgs:\npresence (str): Either 'auto' or 'away'.", "source": "codesearchnet"}
{"code": "def _sample(self, nmr_samples, thinning=1, return_output=True):\n    kernel_data = self._get_kernel_data(nmr_samples, thinning, return_output)\n    sample_func = self._get_compute_func(nmr_samples, thinning, return_output)\n    sample_func.evaluate(kernel_data, self._nmr_problems, use_local_reduction=all((env.is_gpu for env in self._cl_runtime_info.cl_environments)), cl_runtime_info=self._cl_runtime_info)\n    self._sampling_index += (nmr_samples * thinning)\n    if return_output:\n        return (kernel_data['samples'].get_data(), kernel_data['log_likelihoods'].get_data(), kernel_data['log_priors'].get_data())", "docstring": "Sample the given number of samples with the given thinning.\n\nIf ``return_output`` we will return the samples, log likelihoods and log priors. If not, we will advance the\nstate of the sampler without returning storing the samples.\n\nArgs:\nnmr_samples (int): the number of iterations to advance the sampler\nthinning (int): the thinning to apply\nreturn_output (boolean): if we should return the output\n\nReturns:\nNone or tuple: if ``return_output`` is True three ndarrays as (samples, log_likelihoods, log_priors)", "source": "codesearchnet"}
{"code": "def fit1d(samples, e, remove_zeros=False, **kw):\n    samples = samples[(~ np.isnan(samples))]\n    length = (len(e) - 1)\n    (hist, _) = np.histogramdd(samples, (e,))\n    hist = (hist / sum(hist))\n    (basis, knots) = spline_base1d(length, marginal=hist, **kw)\n    non_zero = (hist > 0)\n    model = linear_model.BayesianRidge()\n    if remove_zeros:\n        model.fit(basis[(non_zero, :)], hist[(:, np.newaxis)][(non_zero, :)])\n    else:\n        hist[(~ non_zero)] = np.finfo(float).eps\n        model.fit(basis, hist[(:, np.newaxis)])\n    return (model.predict(basis), hist, knots)", "docstring": "Fits a 1D distribution with splines.\n\nInput:\nsamples: Array\nArray of samples from a probability distribution\ne: Array\nEdges that define the events in the probability\ndistribution. For example, e[0] < x <= e[1] is\nthe range of values that are associated with the\nfirst event.\n**kw: Arguments that are passed on to spline_bse1d.\n\nReturns:\ndistribution: Array\nAn array that gives an estimate of probability for\nevents defined by e.\nknots: Array\nSequence of knots that were used for the spline basis", "source": "codesearchnet"}
{"code": "def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None, scope=None, syntax=None):\n    if package:\n        desc_name = '.'.join((package, desc_proto.name))\n    else:\n        desc_name = desc_proto.name\n    if (file_desc is None):\n        file_name = None\n    else:\n        file_name = file_desc.name\n    if (scope is None):\n        scope = {}\n    nested = [self._ConvertMessageDescriptor(nested, desc_name, file_desc, scope, syntax) for nested in desc_proto.nested_type]\n    enums = [self._ConvertEnumDescriptor(enum, desc_name, file_desc, None, scope) for enum in desc_proto.enum_type]\n    fields = [self._MakeFieldDescriptor(field, desc_name, index) for (index, field) in enumerate(desc_proto.field)]\n    extensions = [self._MakeFieldDescriptor(extension, desc_name, index, is_extension=True) for (index, extension) in enumerate(desc_proto.extension)]\n    oneofs = [descriptor.OneofDescriptor(desc.name, '.'.join((desc_name, desc.name)), index, None, [], desc.options) for (index, desc) in enumerate(desc_proto.oneof_decl)]\n    extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range]\n    if extension_ranges:\n        is_extendable = True\n    else:\n        is_extendable = False\n    desc = descriptor.Descriptor(name=desc_proto.name, full_name=desc_name, filename=file_name, containing_type=None, fields=fields, oneofs=oneofs, nested_types=nested, enum_types=enums, extensions=extensions, options=_OptionsOrNone(desc_proto), is_extendable=is_extendable, extension_ranges=extension_ranges, file=file_desc, serialized_start=None, serialized_end=None, syntax=syntax)\n    for nested in desc.nested_types:\n        nested.containing_type = desc\n    for enum in desc.enum_types:\n        enum.containing_type = desc\n    for (field_index, field_desc) in enumerate(desc_proto.field):\n        if field_desc.HasField('oneof_index'):\n            oneof_index = field_desc.oneof_index\n            oneofs[oneof_index].fields.append(fields[field_index])\n            fields[field_index].containing_oneof = oneofs[oneof_index]\n    scope[_PrefixWithDot(desc_name)] = desc\n    self._descriptors[desc_name] = desc\n    return desc", "docstring": "Adds the proto to the pool in the specified package.\n\nArgs:\ndesc_proto: The descriptor_pb2.DescriptorProto protobuf message.\npackage: The package the proto should be located in.\nfile_desc: The file containing this message.\nscope: Dict mapping short and full symbols to message and enum types.\nsyntax: string indicating syntax of the file (\"proto2\" or \"proto3\")\n\nReturns:\nThe added descriptor.", "source": "codesearchnet"}
{"code": "def asdate(self):\n    return datetime.date(self.year, self.month, self.day)", "docstring": "Return this datetime_tz as a date object.\n\nReturns:\nThis datetime_tz as a date object.", "source": "codesearchnet"}
{"code": "def insert_varargs_and_kwargs(self, args: Iterable[str]):\n    varargs_names = []\n    kwargs_names = []\n    for name in args:\n        if self.has_param(name):\n            continue\n        if pytd_utils.ANON_PARAM.match(name):\n            varargs_names.append(name)\n        else:\n            kwargs_names.append(name)\n    new_param_names = self.param_names + tuple(sorted(varargs_names)) + tuple(sorted(kwargs_names))\n    return self._replace(param_names=new_param_names)", "docstring": "Insert varargs and kwargs from args into the signature.\n\nArgs:\nargs: An iterable of passed arg names.\n\nReturns:\nA copy of this signature with the passed varargs and kwargs inserted.", "source": "github-repos"}
{"code": "def valid(self, name):\n    \n    name = re.sub('[^0-9a-zA-Z_]', '', name)\n    if re.match('[0-9]', name):\n      name = '_' + name\n    return name", "docstring": "Ensure a variable name is valid.\n\nNote: Assumes variable names are ASCII, which isn't necessarily true in\nPython 3.\n\nArgs:\nname: A proposed variable name.\n\nReturns:\nA valid version of the name.", "source": "juraj-google-style"}
{"code": "def get_filetypes(self):\n    if (not self.is_requestable()):\n        return [resource.get_file_type() for resource in self.get_resources()]\n    return self._get_stringlist_from_commastring('file_types')", "docstring": "Return list of filetypes in your data\n\nReturns:\nList[str]: List of filetypes", "source": "codesearchnet"}
{"code": "def read_eof(self, echo=None):\n        \n\n        d = b''\n        while True:\n            try:\n                d += self.read(1, echo)\n            except EOFError:\n                return d", "docstring": "Read until the channel is closed.\n\nArgs:\necho(bool): Whether to write the read data to stdout.\n\nReturns:\nbytes: The read data.", "source": "juraj-google-style"}
{"code": "def stat(filename, retry_params=None, _account_id=None):\n    common.validate_file_path(filename)\n    api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id)\n    (status, headers, content) = api.head_object(api_utils._quote_filename(filename))\n    errors.check_status(status, [200], filename, resp_headers=headers, body=content)\n    file_stat = common.GCSFileStat(filename=filename, st_size=common.get_stored_content_length(headers), st_ctime=common.http_time_to_posix(headers.get('last-modified')), etag=headers.get('etag'), content_type=headers.get('content-type'), metadata=common.get_metadata(headers))\n    return file_stat", "docstring": "Get GCSFileStat of a Google Cloud storage file.\n\nArgs:\nfilename: A Google Cloud Storage filename of form '/bucket/filename'.\nretry_params: An api_utils.RetryParams for this call to GCS. If None,\nthe default one is used.\n_account_id: Internal-use only.\n\nReturns:\na GCSFileStat object containing info about this file.\n\nRaises:\nerrors.AuthorizationError: if authorization failed.\nerrors.NotFoundError: if an object that's expected to exist doesn't.", "source": "codesearchnet"}
{"code": "def load_cobra_model(self, model):\n    self.model = ModelPro(model)\n    for g in self.model.genes:\n        if self.genes_dir:\n            g.root_dir = self.genes_dir\n        g.protein.pdb_file_type = self.pdb_file_type\n    self.genes = self.model.genes\n    log.info('{}: loaded model'.format(model.id))\n    log.info('{}: number of reactions'.format(len(self.model.reactions)))\n    log.info('{}: number of reactions linked to a gene'.format(ssbio.core.modelpro.true_num_reactions(self.model)))\n    log.info('{}: number of genes (excluding spontaneous)'.format(ssbio.core.modelpro.true_num_genes(self.model, custom_spont_id=self.custom_spont_id)))\n    log.info('{}: number of metabolites'.format(len(self.model.metabolites)))\n    log.warning('IMPORTANT: All Gene objects have been transformed into GenePro objects, and will be for any new ones')", "docstring": "Load a COBRApy Model object into the GEM-PRO project.\n\nArgs:\nmodel (Model): COBRApy ``Model`` object", "source": "codesearchnet"}
{"code": "def match_all_args(ctx: 'context.Context', node: cfg.CFGNode, func: '_function_base.NativeFunction|_interpreter_function.InterpreterFunction', args: 'Args') -> 'tuple[Args, Sequence[tuple[Exception, str, _base.BaseValue]]]':\n    positional_names = func.get_positional_names()\n    needs_checking = True\n    errors = []\n    while needs_checking:\n        try:\n            func.match_args(node, args)\n        except error_types.FailedFunctionCall as e:\n            if isinstance(e, error_types.WrongKeywordArgs):\n                errors.append((e, e.extra_keywords[0], None))\n                for i in e.extra_keywords:\n                    args = args.delete_namedarg(i)\n            elif isinstance(e, error_types.DuplicateKeyword):\n                errors.append((e, e.duplicate, None))\n                args = args.delete_namedarg(e.duplicate)\n            elif isinstance(e, error_types.MissingParameter):\n                errors.append((e, e.missing_parameter, None))\n                args = args.replace_namedarg(e.missing_parameter, ctx.new_unsolvable(node))\n            elif isinstance(e, error_types.WrongArgTypes):\n                arg_name = e.bad_call.bad_param.name\n                for name, value in e.bad_call.passed_args:\n                    if name != arg_name:\n                        continue\n                    errors.append((e, name, value))\n                    try:\n                        pos = positional_names.index(name)\n                    except ValueError:\n                        args = args.replace_namedarg(name, ctx.new_unsolvable(node))\n                    else:\n                        args = args.replace_posarg(pos, ctx.new_unsolvable(node))\n                    break\n                else:\n                    raise AssertionError(f'Mismatched parameter {arg_name} not found in passed_args') from e\n            else:\n                raise\n        else:\n            needs_checking = False\n    return (args, errors)", "docstring": "Call match_args multiple times to find all type errors.\n\nArgs:\nctx: The abstract context.\nnode: The current CFG node.\nfunc: An abstract function\nargs: An Args object to match against func\n\nReturns:\nA tuple of (new_args, errors)\nwhere new_args = args with all incorrectly typed values set to Any\nerrors = a list of [(type mismatch error, arg name, value)]\n\nReraises any error that is not InvalidParameters", "source": "github-repos"}
{"code": "def wait(self, timeout=None):\n    with self._put_wait_lock, self._queue_lock:\n        logging.info('Waiting for all global closures to be finished.')\n        while not self._error and (not self._queue.empty() or self._inflight_closure_count > 0):\n            if not self._stop_waiting_condition.wait(timeout=timeout):\n                return False\n        self._raise_if_error()\n        return True", "docstring": "Wait for all closures to be finished before returning.\n\nIf `mark_failed` was called before or during `wait`, the error from the\nfirst invocation of `mark_failed` will be raised.\n\nArgs:\ntimeout: A float specifying a timeout for the wait in seconds.\n\nReturns:\nTrue unless the given timeout expired, in which case it returns False.", "source": "github-repos"}
{"code": "def __init__(self, target_pixels=None, **kwargs):\n    \n    super(DiabeticRetinopathyDetectionConfig, self).__init__(**kwargs)\n    self._target_pixels = target_pixels", "docstring": "BuilderConfig for DiabeticRetinopathyDetection.\n\nArgs:\ntarget_pixels: If given, rescale the images so that the total number of\npixels is roughly this value.\n**kwargs: keyword arguments forward to super.", "source": "juraj-google-style"}
{"code": "def _unpack_sequence(self, state, n_before, n_after=-1):\n    assert n_after >= -1\n    state, seq = state.pop()\n    options = []\n    nontuple_seq = self.ctx.program.NewVariable()\n    has_slurp = n_after > -1\n    count = n_before + max(n_after, 0)\n    nondeterministic_iterable = False\n    for b in abstract_utils.expand_type_parameter_instances(seq.bindings):\n        if b.data.full_name in ('builtins.set', 'builtins.frozenset'):\n            nondeterministic_iterable = True\n        tup = self._get_literal_sequence(b.data)\n        if tup is not None:\n            if has_slurp and len(tup) >= count:\n                options.append(self._restructure_tuple(state, tup, n_before, n_after))\n                continue\n            elif len(tup) == count:\n                options.append(tup)\n                continue\n            else:\n                self.ctx.errorlog.bad_unpacking(self.frames, len(tup), count)\n        if b.IsVisible(state.node):\n            nontuple_seq.PasteBinding(b, state.node)\n    if nontuple_seq.bindings:\n        state, itr = self._get_iter(state, nontuple_seq)\n        state, itr_result = self._call(state, itr, '__next__', ())\n    elif not options:\n        itr_result = self.ctx.new_unsolvable(state.node)\n    else:\n        itr_result = None\n    if itr_result:\n        option = [itr_result for _ in range(count)]\n        if has_slurp:\n            slurp = self.ctx.convert.build_list_of_type(state.node, itr_result)\n            option = option[:n_before] + [slurp] + option[n_before:]\n        options.append(option)\n    values = tuple((self.ctx.convert.build_content(value, discard_concrete_values=False) for value in zip(*options)))\n    if len(values) > 1 and nondeterministic_iterable:\n        self.ctx.errorlog.nondeterministic_unpacking(self.frames)\n    for value in reversed(values):\n        if not value.bindings:\n            value = self.ctx.convert.empty.to_variable(state.node)\n        state = state.push(value)\n    return state", "docstring": "Pops a tuple (or other iterable) and pushes it onto the VM's stack.\n\nSupports destructuring assignment with potentially a single list variable\nthat slurps up the remaining elements:\n1. a, b, c = ...  # UNPACK_SEQUENCE\n2. a, *b, c = ... # UNPACK_EX\n\nArgs:\nstate: The current VM state\nn_before: Number of elements before the list (n_elements for case 1)\nn_after: Number of elements after the list (-1 for case 1)\n\nReturns:\nThe new state.", "source": "github-repos"}
{"code": "def cancel(self):\n        \n        if not self.id:\n            raise WorkflowError('Workflow is not running.  Cannot cancel.')\n\n        if self.batch_values:\n            self.workflow.batch_workflow_cancel(self.id)\n        else:\n            self.workflow.cancel(self.id)", "docstring": "Cancel a running workflow.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _start_app_and_connect(self):\n        \n        self._check_app_installed()\n        self.disable_hidden_api_blacklist()\n\n        persists_shell_cmd = self._get_persist_command()\n        \n        \n        \n        \n        self.log.info('Launching snippet apk %s with protocol %d.%d',\n                      self.package, _PROTOCOL_MAJOR_VERSION,\n                      _PROTOCOL_MINOR_VERSION)\n        cmd = _LAUNCH_CMD % (persists_shell_cmd, self.package)\n        start_time = time.time()\n        self._proc = self._do_start_app(cmd)\n\n        \n        line = self._read_protocol_line()\n        match = re.match('^SNIPPET START, PROTOCOL ([0-9]+) ([0-9]+)$', line)\n        if not match or match.group(1) != '1':\n            raise ProtocolVersionError(self._ad, line)\n\n        line = self._read_protocol_line()\n        match = re.match('^SNIPPET SERVING, PORT ([0-9]+)$', line)\n        if not match:\n            raise ProtocolVersionError(self._ad, line)\n        self.device_port = int(match.group(1))\n\n        \n        self.host_port = utils.get_available_host_port()\n        self._adb.forward(\n            ['tcp:%d' % self.host_port,\n             'tcp:%d' % self.device_port])\n        self.connect()\n\n        \n        self.log.debug('Snippet %s started after %.1fs on host port %s',\n                       self.package, time.time() - start_time, self.host_port)", "docstring": "Starts snippet apk on the device and connects to it.\n\nAfter prechecks, this launches the snippet apk with an adb cmd in a\nstanding subprocess, checks the cmd response from the apk for protocol\nversion, then sets up the socket connection over adb port-forwarding.\n\nArgs:\nProtocolVersionError, if protocol info or port info cannot be\nretrieved from the snippet apk.", "source": "juraj-google-style"}
{"code": "def _contains(self, item):\n        \n        if self is item:\n            return True\n        for m in self.modules:\n            if item in m:\n                return True\n        for p in self.packages:\n            if item in p:\n                return True\n        return False", "docstring": "Whether given item is contained inside the node modules/packages.\n\nArgs:\nitem (Package/Module): a package or module.\n\nReturns:\nbool: True if self is item or item in self's packages/modules.", "source": "juraj-google-style"}
{"code": "def _get_sorted_inputs(filename, delimiter=\"\\n\"):\n  \n  tf.logging.info(\"Getting sorted inputs\")\n  with tf.gfile.Open(filename) as f:\n    text = f.read()\n    records = text.split(delimiter)\n    inputs = [record.strip() for record in records]\n    \n    if not inputs[-1]:\n      inputs.pop()\n  input_lens = [(i, -len(line.split())) for i, line in enumerate(inputs)]\n  sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1))\n  \n  sorted_keys = {}\n  sorted_inputs = []\n  for i, (index, _) in enumerate(sorted_input_lens):\n    sorted_inputs.append(inputs[index])\n    sorted_keys[index] = i\n  return sorted_inputs, sorted_keys", "docstring": "Returning inputs sorted according to decreasing length.\n\nThis causes inputs of similar lengths to be processed in the same batch,\nfacilitating early stopping for short sequences.\n\nLonger sequences are sorted first so that if you're going to get OOMs,\nyou'll see it in the first batch.\n\nArgs:\nfilename: path to file with inputs, 1 per line.\ndelimiter: str, delimits records in the file.\n\nReturns:\na sorted list of inputs", "source": "juraj-google-style"}
{"code": "def mean_absolute_error(y_true, y_pred):\n    y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n    y_true = math_ops.cast(y_true, y_pred.dtype)\n    return backend.mean(math_ops.abs(y_pred - y_true), axis=-1)", "docstring": "Computes the mean absolute error between labels and predictions.\n\n`loss = mean(abs(y_true - y_pred), axis=-1)`\n\nStandalone usage:\n\n>>> y_true = np.random.randint(0, 2, size=(2, 3))\n>>> y_pred = np.random.random(size=(2, 3))\n>>> loss = tf.keras.losses.mean_absolute_error(y_true, y_pred)\n>>> assert loss.shape == (2,)\n>>> assert np.array_equal(\n...     loss.numpy(), np.mean(np.abs(y_true - y_pred), axis=-1))\n\nArgs:\ny_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\ny_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n\nReturns:\nMean absolute error values. shape = `[batch_size, d0, .. dN-1]`.", "source": "github-repos"}
{"code": "def launch_batch_workflow(self, batch_workflow):\n        \n\n        \n        url = '%(base_url)s/batch_workflows' % {\n            'base_url': self.base_url\n        }\n        try:\n            r = self.gbdx_connection.post(url, json=batch_workflow)\n            batch_workflow_id = r.json()['batch_workflow_id']\n            return batch_workflow_id\n        except TypeError as e:\n            self.logger.debug('Batch Workflow not launched, reason: {0}'.format(e))", "docstring": "Launches GBDX batch workflow.\n\nArgs:\nbatch_workflow (dict): Dictionary specifying batch workflow tasks.\n\nReturns:\nBatch Workflow id (str).", "source": "juraj-google-style"}
{"code": "def in_coord_list_pbc(fcoord_list, fcoord, atol=1e-08):\n    return (len(find_in_coord_list_pbc(fcoord_list, fcoord, atol=atol)) > 0)", "docstring": "Tests if a particular fractional coord is within a fractional coord_list.\n\nArgs:\nfcoord_list: List of fractional coords to test\nfcoord: A specific fractional coord to test.\natol: Absolute tolerance. Defaults to 1e-8.\n\nReturns:\nTrue if coord is in the coord list.", "source": "codesearchnet"}
{"code": "def get_models(self, uniprot_acc):\n    if (uniprot_acc in self.all_models):\n        return self.all_models[uniprot_acc]\n    else:\n        log.error('{}: no SWISS-MODELs available'.format(uniprot_acc))\n        return None", "docstring": "Return all available models for a UniProt accession number.\n\nArgs:\nuniprot_acc (str): UniProt ACC/ID\n\nReturns:\ndict: All available models in SWISS-MODEL for this UniProt entry", "source": "codesearchnet"}
{"code": "def unflat_take(items_list, unflat_index_list):\n    return [(unflat_take(items_list, xs) if isinstance(xs, list) else take(items_list, xs)) for xs in unflat_index_list]", "docstring": "r\"\"\"\nReturns nested subset of items_list\n\nArgs:\nitems_list (list):\nunflat_index_list (list): nested list of indices\n\nCommandLine:\npython -m utool.util_list --exec-unflat_take\n\nSeeAlso:\nut.take\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_list import *  # NOQA\n>>> items_list = [1, 2, 3, 4, 5]\n>>> unflat_index_list = [[0, 1], [2, 3], [0, 4]]\n>>> result = unflat_take(items_list, unflat_index_list)\n>>> print(result)\n[[1, 2], [3, 4], [1, 5]]", "source": "codesearchnet"}
{"code": "def check_config(config):\n    \n    for section, expected_section_keys in SECTION_KEYS.items():\n        section_content = config.get(section)\n        if not section_content:\n            raise ConfigurationError(\"Config file badly formed! Section {} is missing.\"\n                                     .format(section))\n        elif not _section_is_healthy(section_content, expected_section_keys):\n            raise ConfigurationError(\"The {} section of the configuration file is badly formed!\"\n                                     .format(section))", "docstring": "Check that all sections of the config contain the keys that they should.\n\nArgs:\nconfig (defaultdict): A defaultdict.\nRaises:\nConfigurationError", "source": "juraj-google-style"}
{"code": "def get_builtin_code_from_operator_code(opcode):\n    if hasattr(opcode, 'BuiltinCode') and callable(opcode.BuiltinCode):\n        return max(opcode.BuiltinCode(), opcode.DeprecatedBuiltinCode())\n    return max(opcode.builtinCode, opcode.deprecatedBuiltinCode)", "docstring": "Return the builtin code of the given operator code.\n\nThe following method is introduced to resolve op builtin code shortage\nproblem. The new builtin operator will be assigned to the extended builtin\ncode field in the flatbuffer schema. Those methods helps to hide builtin code\ndetails.\n\nArgs:\nopcode: Operator code.\n\nReturns:\nThe builtin code of the given operator code.", "source": "github-repos"}
{"code": "def get(self, identifier, default=None):\n    split_label = (tuple(identifier.split('.')) if isinstance(identifier, str) else tuple(identifier))\n    if (len(split_label) == 1):\n        identifier = split_label[0]\n        return self.__dict__.get(identifier, default)\n    path_item = self\n    for identifier in split_label:\n        if ((path_item == default) or (path_item is None)):\n            return default\n        path_item = path_item.get(identifier, default)\n    return path_item", "docstring": "Get a node of the AttrTree using its path string.\n\nArgs:\nidentifier: Path string of the node to return\ndefault: Value to return if no node is found\n\nReturns:\nThe indexed node of the AttrTree", "source": "codesearchnet"}
{"code": "def element_creator(namespace=None):\n    ELEMENT_MAKER = _objectify.ElementMaker(namespace=namespace, annotate=False)\n\n    def create_elem(tag, attr=None, text=None):\n        ':class:`objectify.Element` wrapper with namespace defined.\\n\\n        Args:\\n            tag (str): Tag name\\n            attr (dict): Default attributes for tag\\n            text (str): Text content for the tag\\n\\n        Returns:\\n            _objectify.ObjectifiedElement: objectify element\\n        '\n        if (not attr):\n            attr = {}\n        if text:\n            element = getattr(ELEMENT_MAKER, tag)(text, **attr)\n        else:\n            element = getattr(ELEMENT_MAKER, tag)(**attr)\n        return element\n    return create_elem", "docstring": "Create a simple namespace-aware objectify element creator.\n\nArgs:\nnamespace (str): Namespace to work in\n\nReturns:\nfunction: Namespace-aware element creator", "source": "codesearchnet"}
{"code": "def on_created(self, event):\n    if (not self._event_error):\n        self.logger.info(u'Change detected from a create on: %s', event.src_path)\n        self.compile_dependencies(event.src_path)", "docstring": "Called when a new file or directory is created.\n\nTodo:\nThis should be also used (extended from another class?) to watch\nfor some special name file (like \".boussole-watcher-stop\" create to\nraise a KeyboardInterrupt, so we may be able to unittest the\nwatcher (click.CliRunner is not able to send signal like CTRL+C\nthat is required to watchdog observer loop)\n\nArgs:\nevent: Watchdog event, either ``watchdog.events.DirCreatedEvent``\nor ``watchdog.events.FileCreatedEvent``.", "source": "codesearchnet"}
{"code": "def compute_author_match_score(x_authors, y_authors):\n    if ((not x_authors) or (not y_authors)):\n        return 0.0\n    matches = get_number_of_author_matches(x_authors, y_authors)\n    max_length = max(len(x_authors), len(y_authors))\n    return (matches / float(max_length))", "docstring": "Return the matching score of 2 given lists of authors.\n\nArgs:\nx_authors (list(dict)): first schema-compliant list of authors.\ny_authors (list(dict)): second schema-compliant list of authors.\n\nReturns:\nfloat: matching score of authors.", "source": "codesearchnet"}
{"code": "def __init__(self, xid=None, command=None, group_type=None, group_id=None,\n                 buckets=None):\n        \n        super().__init__(xid)\n        self.command = command\n        self.group_type = group_type\n        self.group_id = group_id\n        self.buckets = buckets", "docstring": "Create a GroupMod with the optional parameters below.\n\nArgs:\nxid (int): Header's transaction id. Defaults to random.\ncommand (GroupModCommand): One of OFPGC_*.\ngroup_type (GroupType): One of OFPGT_*.\ngroup_id (int): Group identifier.\nbuckets (:class:`ListOfBuckets`): The length of the bucket\narray is inferred from the length field in the header.", "source": "juraj-google-style"}
{"code": "def new(cls, script, commit, params, campaign_dir, overwrite=False):\n    if (not Path(campaign_dir).is_absolute()):\n        raise ValueError('Path is not absolute')\n    if (Path(campaign_dir).exists() and (not overwrite)):\n        raise FileExistsError('The specified directory already exists')\n    elif (Path(campaign_dir).exists() and overwrite):\n        campaign_dir_name = os.path.basename(campaign_dir)\n        folder_contents = set(os.listdir(campaign_dir))\n        allowed_files = set((['data', ('%s.json' % campaign_dir_name)] + [os.path.basename(os.path.normpath(f)) for f in glob.glob(os.path.join(campaign_dir, '.*'))]))\n        if (not folder_contents.issubset(allowed_files)):\n            raise ValueError('The specified directory cannot be overwritten because it contains user files.')\n        shutil.rmtree(campaign_dir)\n    os.makedirs(campaign_dir)\n    tinydb = TinyDB(os.path.join(campaign_dir, ('%s.json' % os.path.basename(campaign_dir))))\n    config = {'script': script, 'commit': commit, 'params': sorted(params)}\n    tinydb.table('config').insert(config)\n    return cls(tinydb, campaign_dir)", "docstring": "Initialize a new class instance with a set configuration and filename.\n\nThe created database has the same name of the campaign directory.\n\nArgs:\nscript (str): the ns-3 name of the script that will be used in this\ncampaign;\ncommit (str): the commit of the ns-3 installation that is used to\nrun the simulations.\nparams (list): a list of the parameters that can be used on the\nscript.\ncampaign_dir (str): The path of the file where to save the DB.\noverwrite (bool): Whether or not existing directories should be\noverwritten.", "source": "codesearchnet"}
{"code": "def add_tools(self, *tools):\n    for tool in tools:\n        if (not isinstance(tool, Tool)):\n            raise ValueError('All arguments to add_tool must be Tool subclasses.')\n        self.toolbar.tools.append(tool)", "docstring": "Adds tools to the plot.\n\nArgs:\n*tools (Tool) : the tools to add to the Plot\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def __init__(self, module_to_name, members, filename_to_library_map,\n               path_prefix):\n    \n    self._module_to_name = module_to_name\n    self._members = members\n    self._filename_to_library_map = filename_to_library_map\n    self._path_prefix = path_prefix", "docstring": "Creates a new Index.\n\nArgs:\nmodule_to_name: Dictionary mapping modules to short names.\nmembers: Dictionary mapping member name to (fullname, member).\nfilename_to_library_map: A list of (filename, Library) pairs. The order\ncorresponds to the order in which the libraries appear in the index.\npath_prefix: Prefix to add to links in the index.", "source": "juraj-google-style"}
{"code": "def findAllSingle(self, selfValue):\n        \n        resultList = []\n        for element in selfValue:\n            if isinstance(element, Single):\n                resultList.append(element)\n                resultList += element.findAllSingle()\n        return resultList", "docstring": "Looks for all the single values and subclasses *recursively* and returns a list of them\n\nArgs:\nselfValue: A list of single, str, int. Normally just ``self.value``\n\nReturns:\nlist: A list contains only singles and subclasses.", "source": "juraj-google-style"}
{"code": "def forward(self, key_value_states, hidden_states, attn_mask=None):\n    query = self.q_proj(self.layer_norm(hidden_states))\n    key_value_states = self.layer_norm_kv(key_value_states)\n    key = self.k_proj(key_value_states)\n    value = self.v_proj(key_value_states)\n    attn_output, _ = self.multihead_attn(query, key, value, attn_mask=attn_mask)\n    attn_output = self.dropout(self.linear(attn_output))\n    return attn_output", "docstring": "Forward pass of the AriaCrossAttention module.\n\nArgs:\nkey_value_states (`torch.Tensor`):\nInput tensor for key and value.\nhidden_states (`torch.Tensor`):\nInput tensor for query.\nattn_mask (`torch.Tensor`, *optional*, defaults to None):\nAttention mask.\n\nReturns:\ntorch.Tensor:\nOutput tensor after cross-attention.", "source": "github-repos"}
{"code": "def write_compartments(self, stream, compartments, adjacencies,\n                           properties=None):\n        \n        def convert(entry):\n            return self.convert_compartment_entry(\n                entry, adjacencies.get(entry.id))\n\n        self._write_entries(stream, compartments, convert, properties)", "docstring": "Write iterable of compartments as YAML object to stream.\n\nArgs:\nstream: File-like object.\ncompartments: Iterable of compartment entries.\nadjacencies: Dictionary mapping IDs to adjacent compartment IDs.\nproperties: Set of compartment properties to output (or None to\noutput all).", "source": "juraj-google-style"}
{"code": "def _reduce_pseudo_inverse(nodes):\n    \n    _, num_nodes = np.shape(nodes)\n    if num_nodes == 2:\n        reduction = _REDUCTION0\n        denom = _REDUCTION_DENOM0\n    elif num_nodes == 3:\n        reduction = _REDUCTION1\n        denom = _REDUCTION_DENOM1\n    elif num_nodes == 4:\n        reduction = _REDUCTION2\n        denom = _REDUCTION_DENOM2\n    elif num_nodes == 5:\n        reduction = _REDUCTION3\n        denom = _REDUCTION_DENOM3\n    else:\n        raise _helpers.UnsupportedDegree(num_nodes - 1, supported=(1, 2, 3, 4))\n\n    result = _helpers.matrix_product(nodes, reduction)\n    result /= denom\n    return result", "docstring": "Performs degree-reduction for a B |eacute| zier curve.\n\nDoes so by using the pseudo-inverse of the degree elevation\noperator (which is overdetermined).\n\n.. note::\n\nThere is also a Fortran implementation of this function, which\nwill be used if it can be built.\n\nArgs:\nnodes (numpy.ndarray): The nodes in the curve.\n\nReturns:\nnumpy.ndarray: The reduced nodes.\n\nRaises:\n.UnsupportedDegree: If the degree is not 1, 2, 3 or 4.", "source": "juraj-google-style"}
{"code": "def get(self, key, mem_map=True):\n    self.raise_error_if_not_open()\n    if (key in self._file):\n        data = self._file[key]\n        if (not mem_map):\n            data = data[()]\n        return data\n    else:\n        return None", "docstring": "Read and return the data stored for the given key.\n\nArgs:\nkey (str): The key to read the data from.\nmem_map (bool): If ``True`` returns the data as\nmemory-mapped array, otherwise a copy is returned.\n\nNote:\nThe container has to be opened in advance.\n\nReturns:\nnumpy.ndarray: The stored data.", "source": "codesearchnet"}
{"code": "def __init__(self, desc='Loading...', end='', timeout=0.1):\n    self.desc = desc\n    self.end = end\n    self.timeout = timeout\n    self._thread = Thread(target=self._animate, daemon=True)\n    self.steps = ['⢿', '⣻', '⣽', '⣾', '⣷', '⣯', '⣟', '⡿']\n    self.done = False", "docstring": "A loader-like context manager\n\nArgs:\ndesc (str, optional): The loader's description. Defaults to \"Loading...\".\nend (str, optional): Final print. Defaults to \"Done!\".\ntimeout (float, optional): Sleep time between prints. Defaults to 0.1.", "source": "github-repos"}
{"code": "def ikey(self, value):\n        \n        if value == self._defaults['iKey'] and 'iKey' in self._values:\n            del self._values['iKey']\n        else:\n            self._values['iKey'] = value", "docstring": "The ikey property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def extend(self, base: 'ValueSpec') -> 'ValueSpec':", "docstring": "Extends a base spec with current spec's rules.\n\nArgs:\nbase: Base ValueSpec to extend.\n\nReturns:\nValueSpec itself.\n\nRaises:\nTypeError: When this value spec cannot extend from base.", "source": "github-repos"}
{"code": "def __init__(self, *elements, **kwargs):\n        \n        if not all([isinstance(e, Row) or issubclass(type(e), Box)\n                    for e in elements]):\n            raise TypeError('All elements of Column must '\n                            'be Row or Box instances')\n        width = kwargs.pop('width', 12)\n        if width not in range(1, 13):\n            raise ValueError('Column width must be between 1 and 12')\n\n        self.type = 'column'\n        self.elements = elements\n        self.width = width", "docstring": "Init method.\n\nArgs:\n*elements (): the rows or boxes.\n**kwargs: the width can be passed through the keyword args [1-12].", "source": "juraj-google-style"}
{"code": "def swap(self, old_chunks, new_chunk):\n    indexes = [self.index(chunk) for chunk in old_chunks]\n    del self[indexes[0]:(indexes[(- 1)] + 1)]\n    self.insert(indexes[0], new_chunk)", "docstring": "Swaps old consecutive chunks with new chunk.\n\nArgs:\nold_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to\nbe removed.\nnew_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted.", "source": "codesearchnet"}
{"code": "def plot(self, pts_per_edge, color=None, ax=None):\n        \n        if ax is None:\n            ax = _plot_helpers.new_axis()\n        _plot_helpers.add_patch(ax, color, pts_per_edge, *self._edges)\n        return ax", "docstring": "Plot the current curved polygon.\n\nArgs:\npts_per_edge (int): Number of points to plot per curved edge.\ncolor (Optional[Tuple[float, float, float]]): Color as RGB profile.\nax (Optional[matplotlib.artist.Artist]): matplotlib axis object\nto add plot to.\n\nReturns:\nmatplotlib.artist.Artist: The axis containing the plot. This\nmay be a newly created axis.", "source": "juraj-google-style"}
{"code": "def get_changed_files(self) -> List[str]:\n    out = shell_tools.output_of('git', 'diff', '--name-only', self.compare_commit_id, self.actual_commit_id, '--', cwd=self.destination_directory)\n    return [e for e in out.split('\\n') if e.strip()]", "docstring": "Get the files changed on one git branch vs another.\n\nReturns:\nList[str]: File paths of changed files, relative to the git repo\nroot.", "source": "codesearchnet"}
{"code": "def Format(pb):\n    if isinstance(pb, message.Message):\n        return dict(((desc.number, value) for desc, value in pb.ListFields()))\n    elif _IsMap(pb):\n        return dict(pb.items())\n    elif _IsRepeatedContainer(pb):\n        return dict(enumerate(list(pb)))\n    else:\n        return pb", "docstring": "Returns a dictionary or unchanged pb bases on its type.\n\nSpecifically, this function returns a dictionary that maps tag\nnumber (for messages) or element index (for repeated fields) to\nvalue, or just pb unchanged if it's neither.\n\nArgs:\npb: A proto2 message or a primitive.\nReturns:\nA dict or unchanged pb.", "source": "github-repos"}
{"code": "def stop(self, name: str) -> None:\n        \n        if not self._timing:\n            return\n        now = get_now_utc_pendulum()\n\n        \n        if not self._stack:\n            raise AssertionError(\"MultiTimer.stop() when nothing running\")\n        if self._stack[-1] != name:\n            raise AssertionError(\n                \"MultiTimer.stop({}) when {} is running\".format(\n                    repr(name), repr(self._stack[-1])))\n\n        \n        self._totaldurations[name] += now - self._starttimes[name]\n        self._stack.pop()\n\n        \n        \n        if self._stack:\n            last = self._stack[-1]\n            self._starttimes[last] = now", "docstring": "Stop a named timer.\n\nArgs:\nname: timer to stop", "source": "juraj-google-style"}
{"code": "async def selfplay(state, flagfile='selfplay'):\n    output_dir = os.path.join(fsdb.selfplay_dir(), state.output_model_name)\n    holdout_dir = os.path.join(fsdb.holdout_dir(), state.output_model_name)\n    lines = (await run('bazel-bin/cc/selfplay', '--flagfile={}.flags'.format(os.path.join(FLAGS.flags_dir, flagfile)), '--model={}'.format(state.best_model_path), '--output_dir={}'.format(output_dir), '--holdout_dir={}'.format(holdout_dir), '--seed={}'.format(state.seed)))\n    result = '\\n'.join(lines[(- 6):])\n    logging.info(result)\n    stats = parse_win_stats_table(result, 1)[0]\n    num_games = stats.total_wins\n    logging.info('Black won %0.3f, white won %0.3f', (stats.black_wins.total / num_games), (stats.white_wins.total / num_games))\n    pattern = os.path.join(output_dir, '*', '*.zz')\n    random.seed(state.seed)\n    tf.set_random_seed(state.seed)\n    np.random.seed(state.seed)\n    buffer = example_buffer.ExampleBuffer(sampling_frac=1.0)\n    logging.info('Writing golden chunk from \"{}\"'.format(pattern))\n    buffer.parallel_fill(tf.gfile.Glob(pattern))\n    buffer.flush(os.path.join(fsdb.golden_chunk_dir(), (state.output_model_name + '.tfrecord.zz')))", "docstring": "Run selfplay and write a training chunk to the fsdb golden_chunk_dir.\n\nArgs:\nstate: the RL loop State instance.\nflagfile: the name of the flagfile to use for selfplay, either 'selfplay'\n(the default) or 'boostrap'.", "source": "codesearchnet"}
{"code": "def first_dna(self) -> geno.DNA:\n    if self.next_dna.__code__ is CustomHyper.next_dna.__code__:\n        raise NotImplementedError(f'{self.__class__!r} must implement method `next_dna` to be used in dynamic evaluation mode.')\n    return self.next_dna(None)", "docstring": "Returns the first DNA of current sub-space.\n\nReturns:\nA string-valued DNA.", "source": "github-repos"}
{"code": "def validate_context(\n        self, context: Mapping[str, Any]\n    ) -> Tuple[bool, List[Tuple[str, str]]]:\n        \n\n        url = f'{self.endpoint}/terms/{context[\"id\"]}'\n\n        res = requests.get(url)\n        if res.status_code == 200:\n            return (True, [])\n        else:\n            return (False, [(\"WARNING\", f'Context {context[\"id\"]} not found at {url}')])", "docstring": "Validate context\n\nArgs:\ncontext (Mapping[str, Any]): context dictionary of type, id and label\n\nReturns:\nTuple[bool, List[Tuple[str, str]]]:\nbool: Is valid?  Yes = True, No = False\nList[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg)\ne.g. [('WARNING', \"Context ID not found\")]", "source": "juraj-google-style"}
{"code": "def compute(self, x_arr, y_arr):\n        \n        y_arr += 1e-08\n        return np.sum(x_arr * np.log(x_arr / y_arr), axis=-1)", "docstring": "Compute distance.\n\nArgs:\nx_arr:      `np.ndarray` of vectors.\ny_arr:      `np.ndarray` of vectors.\n\nRetruns:\n`np.ndarray` of distances.", "source": "juraj-google-style"}
{"code": "def check_version(version, range_=None):\n    if (range_ and (version not in range_)):\n        raise RezBindError(('found version %s is not within range %s' % (str(version), str(range_))))", "docstring": "Check that the found software version is within supplied range.\n\nArgs:\nversion: Version of the package as a Version object.\nrange_: Allowable version range as a VersionRange object.", "source": "codesearchnet"}
{"code": "def update_model_handler(self, key: str, model_path: str, previous_key: str):\n    if self._key_to_last_update[key] == model_path:\n        return\n    self._key_to_last_update[key] = model_path\n    if key not in self._mh_map:\n        self._mh_map[key] = deepcopy(self._mh_map[previous_key])\n    self._mh_map[key].update_model_path(model_path)\n    if key in self._tag_map:\n        tag_to_remove = self._tag_map[key]\n        shared_handle, model_to_remove = self._proxy_map[tag_to_remove]\n        shared_handle.release(model_to_remove)\n        del self._tag_map[key]\n        del self._proxy_map[tag_to_remove]", "docstring": "Updates the model path of this model handler and removes it from memory so\nthat it can be reloaded with the updated path. No-ops if no model update\nneeds to be applied.\nArgs:\nkey: the key associated with the model we'd like to update.\nmodel_path: the new path to the model we'd like to load.\nprevious_key: the key that is associated with the old version of this\nmodel. This will often be the same as the current key, but sometimes\nwe will want to keep both the old and new models to serve different\ncohorts. In that case, the keys should be different.", "source": "github-repos"}
{"code": "def seek(self, offset, whence=os.SEEK_SET):\n    \n    if not self._is_open:\n      raise IOError('Not opened.')\n\n    if self._current_offset < 0:\n      raise IOError(\n          'Invalid current offset: {0:d} value less than zero.'.format(\n              self._current_offset))\n\n    if whence == os.SEEK_CUR:\n      offset += self._current_offset\n\n    elif whence == os.SEEK_END:\n      if self._decrypted_stream_size is None:\n        self._decrypted_stream_size = self._GetDecryptedStreamSize()\n        if self._decrypted_stream_size is None:\n          raise IOError('Invalid decrypted stream size.')\n\n      offset += self._decrypted_stream_size\n\n    elif whence != os.SEEK_SET:\n      raise IOError('Unsupported whence.')\n\n    if offset < 0:\n      raise IOError('Invalid offset value less than zero.')\n\n    if offset != self._current_offset:\n      self._current_offset = offset\n      self._realign_offset = True", "docstring": "Seeks to an offset within the file-like object.\n\nArgs:\noffset (int): offset to seek.\nwhence (Optional[int]): value that indicates whether offset is an\nabsolute or relative position within the file.\n\nRaises:\nIOError: if the seek failed.\nOSError: if the seek failed.", "source": "juraj-google-style"}
{"code": "def error(self, status=None):\n\n    def decorator(callback):\n        self._error_handlers[status] = callback\n        return callback\n    return decorator", "docstring": "Decorator to add a callback that generates error page.\n\nThe *status* parameter specifies the HTTP response status code\nfor which the decorated callback should be invoked. If the\n*status* argument is not specified, then the decorated callable\nis considered to be a fallback callback.\n\nA fallback callback, when defined, is invoked to generate the\nerror page for any HTTP response representing an error when\nthere is no error handler defined explicitly for the response\ncode of the HTTP response.\n\nArguments:\nstatus(int, optional): HTTP response status code.\n\nReturns:\nfunction: Decorator function to add error handler.", "source": "codesearchnet"}
{"code": "def get_decomposition(self, comp):\n    (facet, simplex) = self._get_facet_and_simplex(comp)\n    decomp_amts = simplex.bary_coords(self.pd_coords(comp))\n    return {self.qhull_entries[f]: amt for (f, amt) in zip(facet, decomp_amts) if (abs(amt) > PhaseDiagram.numerical_tol)}", "docstring": "Provides the decomposition at a particular composition.\n\nArgs:\ncomp: A composition\n\nReturns:\nDecomposition as a dict of {Entry: amount}", "source": "codesearchnet"}
{"code": "def tags():\n    return shell.run('git tag --sort=v:refname', capture=True, never_pretend=True).stdout.strip().splitlines()", "docstring": "Returns all tags in the repo.\n\nReturns:\nlist[str]: List of all tags in the repo, sorted as versions.\n\nAll tags returned by this function will be parsed as if the contained\nversions (using ``v:refname`` sorting).", "source": "codesearchnet"}
{"code": "def constant(x: A) -> Callable[..., A]:\n    \n\n    def constanted(*args, **kwargs):\n        return x\n\n    return constanted", "docstring": "Produce a function that always returns a supplied value.\n\nArgs:\nx: Any object.\n\nReturns:\nA function that accepts any number of positional and keyword arguments, discards them, and returns ``x``.", "source": "juraj-google-style"}
{"code": "def getctime(self, path=None, client_kwargs=None, header=None):\n        \n        return self._getctime_from_header(\n            self.head(path, client_kwargs, header))", "docstring": "Return the creation time of path.\n\nArgs:\npath (str): File path or URL.\nclient_kwargs (dict): Client arguments.\nheader (dict): Object header.\n\nReturns:\nfloat: The number of seconds since the epoch\n(see the time module).", "source": "juraj-google-style"}
{"code": "def check_alive(self, worker_name):\n    if self._context_handle:\n        return pywrap_tfe.TFE_ContextCheckAlive(self._context_handle, worker_name)\n    else:\n        raise ValueError('Context is not initialized.')", "docstring": "Checks whether a remote worker is alive or not.\n\nArgs:\nworker_name: a string representing the remote worker. It must be a fully\nspecified name like \"/job:worker/replica:0/task:0\".\n\nReturns:\na boolean indicating whether the remote worker is alive or not.\n\nRaises:\nValueError: if context is not initialized.", "source": "github-repos"}
{"code": "def document(self, document_id=None):\n        \n        if document_id is None:\n            document_id = _auto_id()\n\n        child_path = self._path + (document_id,)\n        return self._client.document(*child_path)", "docstring": "Create a sub-document underneath the current collection.\n\nArgs:\ndocument_id (Optional[str]): The document identifier\nwithin the current collection. If not provided, will default\nto a random 20 character string composed of digits,\nuppercase and lowercase and letters.\n\nReturns:\n~.firestore_v1beta1.document.DocumentReference: The child\ndocument.", "source": "juraj-google-style"}
{"code": "def load(self, raw):\n        \n        try:\n            self._load(raw)\n        except (KeyError, ValueError) as e:\n            raise_from(exception.ParseException('Parse error in %s' % (type(self)), raw), e)", "docstring": "Unserialize from raw representation. (Wrapper)\n\nArgs:\nraw (dict): Raw.\nRaises:\nParseException: If there was an error parsing data.", "source": "juraj-google-style"}
{"code": "def _GetCompressedStreamTypes(self, mediator, path_spec):\n    \n    try:\n      type_indicators = analyzer.Analyzer.GetCompressedStreamTypeIndicators(\n          path_spec, resolver_context=mediator.resolver_context)\n    except IOError as exception:\n      type_indicators = []\n\n      warning_message = (\n          'analyzer failed to determine compressed stream type indicators '\n          'with error: {0!s}').format(exception)\n      mediator.ProduceExtractionWarning(warning_message, path_spec=path_spec)\n\n    return type_indicators", "docstring": "Determines if a data stream contains a compressed stream such as: gzip.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\npath_spec (dfvfs.PathSpec): path specification of the data stream.\n\nReturns:\nlist[str]: dfVFS compressed stream type indicators found in\nthe data stream.", "source": "juraj-google-style"}
{"code": "def __setitem__(self, key, layout):\n    if key in self._layout_map:\n        raise ValueError(f'{key} already exist in the LayoutMap with value {self._layout_map[key]}. Please make sure to not use duplicated keys.')\n    if isinstance(layout, tuple):\n        layout = TensorLayout(axes=layout, device_mesh=None)\n    if not isinstance(layout, TensorLayout):\n        raise ValueError(f'{layout} should be a TensorLayout type, got {type(layout)}')\n    self._maybe_populate_device_mesh(layout)\n    self._layout_map[key] = layout", "docstring": "Insert TensorLayout to the LayoutMap.\n\nArgs:\nkey: String key for the `TensorLayout`.\nlayout: The `TensorLayout`. As a shortcut, tuple of string and None\nare also acceptable, and will be converted to `TensorLayout`.", "source": "github-repos"}
{"code": "def number_of_decimals(num):\n    r\n    exp = decimal.Decimal(str(num)).as_tuple().exponent\n    return max(0, -exp)", "docstring": "r\"\"\"\nArgs:\nnum (float):\n\nReferences:\nstackoverflow.com/questions/6189956/finding-decimal-places\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_alg import *  # NOQA\n>>> num = 15.05\n>>> result = number_of_decimals(num)\n>>> print(result)\n2", "source": "juraj-google-style"}
{"code": "def sg_symbol_from_int_number(int_number, hexagonal=True):\n    syms = []\n    for (n, v) in get_symm_data('space_group_encoding').items():\n        if (v['int_number'] == int_number):\n            syms.append(n)\n    if (len(syms) == 0):\n        raise ValueError('Invalid international number!')\n    if (len(syms) == 2):\n        if hexagonal:\n            syms = list(filter((lambda s: s.endswith('H')), syms))\n        else:\n            syms = list(filter((lambda s: (not s.endswith('H'))), syms))\n    return syms.pop()", "docstring": "Obtains a SpaceGroup name from its international number.\n\nArgs:\nint_number (int): International number.\nhexagonal (bool): For rhombohedral groups, whether to return the\nhexagonal setting (default) or rhombohedral setting.\n\nReturns:\n(str) Spacegroup symbol", "source": "codesearchnet"}
{"code": "def time2timestr(time, fmt='hhmmss'):\n    \n    if fmt.count(':') == 2:\n        if not fmt.index('h') < fmt.index('m') < fmt.index('s'):\n            raise ValueError('Invalid format string. {}'.format(\n                    VALID_TIME_FORMATS_TEXT))\n        h, m, s = fmt.split(':')\n    elif fmt.count(':') == 1:\n        if not fmt.index('h') < fmt.index('m'):\n            raise ValueError('Invalid format string. {}'.format(\n                    VALID_TIME_FORMATS_TEXT))\n        h, m = fmt.split(':')\n        s = None\n    elif any(c not in 'hms' for c in fmt) or len(fmt) != 6:\n        raise ValueError('Invalid character in format string. {}'.format(\n                VALID_TIME_FORMATS_TEXT))\n    else:\n        if not fmt.index('h') < fmt.index('m') < fmt.index('s'):\n            raise ValueError('Invalid format string. {}'.format(\n                    VALID_TIME_FORMATS_TEXT))\n        h, m, s = fmt[:-4], fmt[-4:-2], fmt[-2:]\n    for string, char in ((h, 'h'), (m, 'm'), (s, 's')):\n        if string is not None and any(c != char for c in string):\n            raise ValueError('Invalid date format: {} is not {}'.\\\n                    format(char, string))\n    if len(h) == 2:\n        fmt = fmt.replace('hh', '%H', 1)\n    elif len(h) == 1:\n        fmt = fmt.replace('h', 'X%H', 1)\n    else:\n        raise ValueError('Invalid format string, hour must have 1 or 2 digits')\n    if len(m) == 2:\n        fmt = fmt.replace('mm', '%M', 1)\n    else:\n        raise ValueError('Invalid format string, minutes must have 2 digits')\n    if s is not None and len(s) == 2:\n        fmt = fmt. replace('ss', '%S', 1)\n    elif s is not None:\n        raise ValueError('Invalid format string, seconds must have 2 digits')\n    return time.strftime(fmt).replace('X0','X').replace('X','')", "docstring": "Turns a datetime.time object into a string. The string must have one of the\nformats from VALID_TIME_FORMATS_TEXT to make it compatible with\ntimestr2time.\n\nArgs:\ntime (datetime.time) the time to be translated\nfmt (str) a format string.\nReturns:\n(str) that represents a time.\nRaises:\nValueError if the format is not valid.", "source": "juraj-google-style"}
{"code": "def wait_for(self, timeout=None, **kwargs):\n    if (len(kwargs) == 0):\n        raise ArgumentError('You must specify at least one message field to wait on')\n    spec = MessageSpec(**kwargs)\n    future = self._add_waiter(spec)\n    future.add_done_callback((lambda x: self._remove_waiter(spec, future)))\n    return asyncio.wait_for(future, timeout=timeout)", "docstring": "Wait for a specific matching message or timeout.\n\nYou specify the message by passing name=value keyword arguments to\nthis method.  The first message received after this function has been\ncalled that has all of the given keys with the given values will be\nreturned when this function is awaited.\n\nIf no matching message is received within the specified timeout (if\ngiven), then asyncio.TimeoutError will be raised.\n\nThis function only matches a single message and removes itself once\nthe message is seen or the timeout expires.\n\nArgs:\ntimeout (float): Optional timeout, defaults to None for no timeout.\n**kwargs: Keys to match in the message with their corresponding values.\nYou must pass at least one keyword argument so there is something\nto look for.\n\nReturns:\nawaitable: The response", "source": "codesearchnet"}
{"code": "def update_batch(self, loss_per_instance):\n    if (self.batch_indices is None):\n        raise TensorForceError('Need to call get_batch before each update_batch call.')\n    for (index, loss) in zip(self.batch_indices, loss_per_instance):\n        new_priority = ((np.abs(loss) + self.prioritization_constant) ** self.prioritization_weight)\n        self.observations._move(index, new_priority)\n        self.none_priority_index += 1", "docstring": "Computes priorities according to loss.\n\nArgs:\nloss_per_instance:", "source": "codesearchnet"}
{"code": "def rename_keys(d: Dict[str, Any], mapping: Dict[str, str]) -> Dict[str, Any]:\n    \n    result = {}  \n    for k, v in d.items():\n        if k in mapping:\n            k = mapping[k]\n        result[k] = v\n    return result", "docstring": "Returns a copy of the dictionary ``d`` with its keys renamed according to\n``mapping``.\n\nArgs:\nd: the starting dictionary\nmapping: a dictionary of the format ``{old_key_name: new_key_name}``\n\nReturns:\na new dictionary\n\nKeys that are not in ``mapping`` are left unchanged.\nThe input parameters are not modified.", "source": "juraj-google-style"}
{"code": "def copy_submission_locally(self, cloud_path):\n    local_path = os.path.join(self.download_dir, os.path.basename(cloud_path))\n    cmd = ['gsutil', 'cp', cloud_path, local_path]\n    if (subprocess.call(cmd) != 0):\n        logging.error(\"Can't copy submission locally\")\n        return None\n    return local_path", "docstring": "Copies submission from Google Cloud Storage to local directory.\n\nArgs:\ncloud_path: path of the submission in Google Cloud Storage\n\nReturns:\nname of the local file where submission is copied to", "source": "codesearchnet"}
{"code": "def ExtractEventsFromSources(self):\n    self._CheckStorageFile(self._storage_file_path, warn_about_existing=True)\n    scan_context = self.ScanSource(self._source_path)\n    self._source_type = scan_context.source_type\n    self._status_view.SetMode(self._status_view_mode)\n    self._status_view.SetSourceInformation(self._source_path, self._source_type, artifact_filters=self._artifact_filters, filter_file=self._filter_file)\n    status_update_callback = self._status_view.GetExtractionStatusUpdateCallback()\n    self._output_writer.Write('\\n')\n    self._status_view.PrintExtractionStatusHeader(None)\n    self._output_writer.Write('Processing started.\\n')\n    session = engine.BaseEngine.CreateSession(artifact_filter_names=self._artifact_filters, command_line_arguments=self._command_line_arguments, debug_mode=self._debug_mode, filter_file_path=self._filter_file, preferred_encoding=self.preferred_encoding, preferred_time_zone=self._preferred_time_zone, preferred_year=self._preferred_year)\n    storage_writer = storage_factory.StorageFactory.CreateStorageWriter(self._storage_format, session, self._storage_file_path)\n    if (not storage_writer):\n        raise errors.BadConfigOption('Unsupported storage format: {0:s}'.format(self._storage_format))\n    single_process_mode = self._single_process_mode\n    if (self._source_type == dfvfs_definitions.SOURCE_TYPE_FILE):\n        single_process_mode = True\n    if single_process_mode:\n        extraction_engine = single_process_engine.SingleProcessEngine()\n    else:\n        extraction_engine = multi_process_engine.TaskMultiProcessEngine(use_zeromq=self._use_zeromq)\n    if (self._source_type in self._SOURCE_TYPES_TO_PREPROCESS):\n        self._PreprocessSources(extraction_engine)\n    configuration = self._CreateProcessingConfiguration(extraction_engine.knowledge_base)\n    self._SetExtractionParsersAndPlugins(configuration, session)\n    self._SetExtractionPreferredTimeZone(extraction_engine.knowledge_base)\n    try:\n        filter_find_specs = extraction_engine.BuildFilterFindSpecs(self._artifact_definitions_path, self._custom_artifacts_path, extraction_engine.knowledge_base, self._artifact_filters, self._filter_file)\n    except errors.InvalidFilter as exception:\n        raise errors.BadConfigOption('Unable to build filter specification: {0!s}'.format(exception))\n    processing_status = None\n    if single_process_mode:\n        logger.debug('Starting extraction in single process mode.')\n        processing_status = extraction_engine.ProcessSources(self._source_path_specs, storage_writer, self._resolver_context, configuration, filter_find_specs=filter_find_specs, status_update_callback=status_update_callback)\n    else:\n        logger.debug('Starting extraction in multi process mode.')\n        processing_status = extraction_engine.ProcessSources(session.identifier, self._source_path_specs, storage_writer, configuration, enable_sigsegv_handler=self._enable_sigsegv_handler, filter_find_specs=filter_find_specs, number_of_worker_processes=self._number_of_extraction_workers, status_update_callback=status_update_callback, worker_memory_limit=self._worker_memory_limit)\n    self._status_view.PrintExtractionSummary(processing_status)", "docstring": "Processes the sources and extracts events.\n\nRaises:\nBadConfigOption: if the storage file path is invalid or the storage\nformat not supported or an invalid filter was specified.\nSourceScannerError: if the source scanner could not find a supported\nfile system.\nUserAbort: if the user initiated an abort.", "source": "codesearchnet"}
{"code": "def call(self, inputs, state):\n    _check_rnn_cell_input_dtypes([inputs, state])\n    num_proj = self._num_units if self._num_proj is None else self._num_proj\n    sigmoid = math_ops.sigmoid\n    if self._state_is_tuple:\n        c_prev, m_prev = state\n    else:\n        c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])\n        m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])\n    input_size = inputs.get_shape().with_rank(2).dims[1].value\n    if input_size is None:\n        raise ValueError('Could not infer input size from inputs.get_shape()[-1]')\n    lstm_matrix = math_ops.matmul(array_ops.concat([inputs, m_prev], 1), self._kernel)\n    lstm_matrix = nn_ops.bias_add(lstm_matrix, self._bias)\n    i, j, f, o = array_ops.split(value=lstm_matrix, num_or_size_splits=4, axis=1)\n    if self._use_peepholes:\n        c = sigmoid(f + self._forget_bias + self._w_f_diag * c_prev) * c_prev + sigmoid(i + self._w_i_diag * c_prev) * self._activation(j)\n    else:\n        c = sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * self._activation(j)\n    if self._cell_clip is not None:\n        c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)\n    if self._use_peepholes:\n        m = sigmoid(o + self._w_o_diag * c) * self._activation(c)\n    else:\n        m = sigmoid(o) * self._activation(c)\n    if self._num_proj is not None:\n        m = math_ops.matmul(m, self._proj_kernel)\n        if self._proj_clip is not None:\n            m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)\n    new_state = LSTMStateTuple(c, m) if self._state_is_tuple else array_ops.concat([c, m], 1)\n    return (m, new_state)", "docstring": "Run one step of LSTM.\n\nArgs:\ninputs: input Tensor, must be 2-D, `[batch, input_size]`.\nstate: if `state_is_tuple` is False, this must be a state Tensor, `2-D,\n[batch, state_size]`.  If `state_is_tuple` is True, this must be a tuple\nof state Tensors, both `2-D`, with column sizes `c_state` and `m_state`.\n\nReturns:\nA tuple containing:\n\n- A `2-D, [batch, output_dim]`, Tensor representing the output of the\nLSTM after reading `inputs` when previous state was `state`.\nHere output_dim is:\nnum_proj if num_proj was set,\nnum_units otherwise.\n- Tensor(s) representing the new state of LSTM after reading `inputs` when\nthe previous state was `state`.  Same type and shape(s) as `state`.\n\nRaises:\nValueError: If input size cannot be inferred from inputs via\nstatic shape inference.", "source": "github-repos"}
{"code": "def _bdtr(k, n, p):\n  \n  \n  \n  \n  \n  ones = tf.ones_like(n - k)\n  k_eq_n = tf.equal(k, n)\n  safe_dn = tf.where(k_eq_n, ones, n - k)\n  dk = tf.math.betainc(a=safe_dn, b=k + 1, x=1 - p)\n  return tf.where(k_eq_n, ones, dk)", "docstring": "The binomial cumulative distribution function.\n\nArgs:\nk: floating point `Tensor`.\nn: floating point `Tensor`.\np: floating point `Tensor`.\n\nReturns:\n`sum_{j=0}^k p^j (1 - p)^(n - j)`.", "source": "juraj-google-style"}
{"code": "def strip_name_scope(name: str, export_scope) -> str:\n    if export_scope:\n        if export_scope[-1] == '/':\n            export_scope = export_scope[:-1]\n        try:\n            str_to_replace = '([\\\\^]|loc:@|^)' + export_scope + '[\\\\/]+(.*)'\n            return re.sub(str_to_replace, '\\\\1\\\\2', compat.as_str(name), count=1)\n        except TypeError as e:\n            logging.warning(e)\n            return name\n    else:\n        return name", "docstring": "Removes name scope from a name.\n\nArgs:\nname: A `string` name.\nexport_scope: Optional `string`. Name scope to remove.\n\nReturns:\nName with name scope removed, or the original name if export_scope\nis None.", "source": "github-repos"}
{"code": "def get_issue_description(test_config_container: TestConfigContainer, metric_container: MetricContainer, change_point_index: int, max_results_to_display: int=5) -> str:\n    description = []\n    description.append(_ISSUE_DESCRIPTION_TEMPLATE.format(test_config_container.test_id, test_config_container.metric_name))\n    if test_config_container.test_name:\n        description.append('`test_name:` ' + f'{test_config_container.test_name}')\n    if test_config_container.test_description:\n        description.append('`Test description:` ' + f'{test_config_container.test_description}')\n    description.append('```')\n    runs_to_display = []\n    max_timestamp_index = min(change_point_index + max_results_to_display, len(metric_container.values) - 1)\n    min_timestamp_index = max(0, change_point_index - max_results_to_display)\n    for i in reversed(range(min_timestamp_index, max_timestamp_index + 1)):\n        row_template = _METRIC_INFO_TEMPLATE.format(metric_container.timestamps[i].ctime(), format(metric_container.values[i], '.2f'))\n        if i == change_point_index:\n            row_template += constants._ANOMALY_MARKER\n        runs_to_display.append(row_template)\n    description.append(os.linesep.join(runs_to_display))\n    description.append('```')\n    return (2 * os.linesep).join(description)", "docstring": "Args:\ntest_config_container: TestConfigContainer containing test metadata.\nmetric_container: MetricContainer containing metric data.\nchange_point_index: Index of the change point in the metric data.\nmax_results_to_display: Max number of results to display from the change\npoint index, in both directions of the change point index.\n\nReturns:\nstr: Description used to fill the GitHub issues description.", "source": "github-repos"}
{"code": "def _update_listing_client_kwargs(client_kwargs, max_request_entries):\n    client_kwargs = client_kwargs.copy()\n    if max_request_entries:\n        client_kwargs['num_results'] = max_request_entries\n    return client_kwargs", "docstring": "Updates client kwargs for listing functions.\n\nArgs:\nclient_kwargs (dict): Client arguments.\nmax_request_entries (int): If specified, maximum entries returned\nby request.\n\nReturns:\ndict: Updated client_kwargs", "source": "codesearchnet"}
{"code": "def from_backbone_configs(cls, backbone_config: PretrainedConfig, **kwargs):\n    return cls(backbone_config=backbone_config, **kwargs)", "docstring": "Instantiate a [`DFineConfig`] (or a derived class) from a pre-trained backbone model configuration and DETR model\nconfiguration.\n\nArgs:\nbackbone_config ([`PretrainedConfig`]):\nThe backbone configuration.\n\nReturns:\n[`DFineConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def save(self):\n    if (not os.path.exists(self.paths.virt())):\n        os.makedirs(self.paths.virt())\n    self._save_metadata()\n    self.virt_env.save()", "docstring": "Save this prefix to persistent storage\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def predict_task_proba(self, X, t=0, **kwargs):\n        \n        return self.predict_proba(X, **kwargs)[t]", "docstring": "Predicts probabilistic labels for an input X on task t\n\nArgs:\nX: The input for the predict_proba method\nt: The task index to predict for which to predict probabilities\nReturns:\nAn [n, K_t] tensor of predictions for task t\nNOTE: By default, this method calls predict_proba and extracts element\nt. If it is possible to predict individual tasks in isolation, however,\nthis method may be overriden for efficiency's sake.", "source": "juraj-google-style"}
{"code": "def can_api_key_access_build(param_name):\n    \n    build_id = (\n        request.args.get(param_name, type=int) or\n        request.form.get(param_name, type=int) or\n        request.json[param_name])\n    utils.jsonify_assert(build_id, 'build_id required')\n\n    if app.config.get('IGNORE_AUTH'):\n        api_key = models.ApiKey(\n            id='anonymous_superuser',\n            secret='',\n            superuser=True)\n        build = models.Build.query.get(build_id)\n        utils.jsonify_assert(build is not None, 'build must exist', 404)\n    else:\n        ops = _get_api_key_ops()\n        api_key, build = ops.can_access_build(build_id)\n\n    return api_key, build", "docstring": "Determines if the current API key can access the build in the request.\n\nArgs:\nparam_name: Parameter name to use for getting the build ID from the\nrequest. Will fetch from GET or POST requests.\n\nReturns:\n(api_key, build) The API Key and the Build it has access to.", "source": "juraj-google-style"}
{"code": "def output(self, _filename):\n        \n\n        txt = \"Analyze of {}\\n\".format(self.slither.filename)\n        txt += self.get_detectors_result()\n        for contract in self.slither.contracts_derived:\n            txt += \"\\nContract {}\\n\".format(contract.name)\n            txt += self.is_complex_code(contract)\n            is_erc20 = contract.is_erc20()\n            txt += '\\tNumber of functions:{}'.format(self._number_functions(contract))\n            txt += \"\\tIs ERC20 token: {}\\n\".format(contract.is_erc20())\n            if is_erc20:\n                txt += self.get_summary_erc20(contract)\n\n        self.info(txt)", "docstring": "_filename is not used\nArgs:\n_filename(string)", "source": "juraj-google-style"}
{"code": "def drift_fn(self):\n    pass", "docstring": "Python callable calculating instantaneous drift.\n\nThe callable should accept two real `Tensor` arguments of the same dtype.\nThe first argument is the scalar time t, the second argument is the value of\nIto process X - `Tensor` of shape `batch_shape + [dim]`. Here `batch_shape`\nis an arbitrary shape. The result is the  value of drift a(t, X). The return\nvalue of the callable is a real `Tensor` of the same dtype as the input\narguments and of shape `batch_shape + [dim]`.\n\nReturns:\nThe instantaneous drift rate callable.", "source": "github-repos"}
{"code": "def _validate_bn_layer(self, layer):\n    if ((not isinstance(layer, tf.keras.layers.BatchNormalization)) and (not isinstance(layer, tf.compat.v1.layers.BatchNormalization))):\n        raise ValueError('batchnorm_layer must be an instance of BatchNormalization layer.')\n    if layer.renorm:\n        raise ValueError('BatchNorm Bijector does not support renormalization.')\n    if layer.virtual_batch_size:\n        raise ValueError('BatchNorm Bijector does not support virtual batch sizes.')", "docstring": "Check for valid BatchNormalization layer.\n\nArgs:\nlayer: Instance of `tf.layers.BatchNormalization`.\nRaises:\nValueError: If batchnorm_layer argument is not an instance of\n`tf.layers.BatchNormalization`, or if `batchnorm_layer.renorm=True` or\nif `batchnorm_layer.virtual_batch_size` is specified.", "source": "codesearchnet"}
{"code": "def expand_var(v, env):\n  \n  if len(v) == 0:\n    return v\n  \n  if v[0] == '$':\n    v = v[1:]\n    if len(v) and v[0] != '$':\n      if v in env:\n        v = env[v]\n      else:\n        raise Exception('Cannot expand variable $%s' % v)\n  return v", "docstring": "If v is a variable reference (for example: '$myvar'), replace it using the supplied\nenv dictionary.\n\nArgs:\nv: the variable to replace if needed.\nenv: user supplied dictionary.\n\nRaises:\nException if v is a variable reference but it is not found in env.", "source": "juraj-google-style"}
{"code": "def create_version(self, version_label):\n\n\t\t\n\n\t\t\n\t\tversion_response = self.repo.api.http_request('POST', '%s/fcr:versions' % self.uri, data=None, headers={'Slug':version_label})\n\n\t\t\n\t\tif version_response.status_code == 201:\n\t\t\tlogger.debug('version created: %s' % version_response.headers['Location'])\n\n\t\t\t\n\t\t\tself._affix_version(version_response.headers['Location'], version_label)", "docstring": "method to create a new version of the resource as it currently stands\n\n- Note: this will create a version based on the current live instance of the resource,\nnot the local version, which might require self.update() to update.\n\nArgs:\nversion_label (str): label to be used for version\n\nReturns:\n(ResourceVersion): instance of ResourceVersion, also appended to self.versions", "source": "juraj-google-style"}
{"code": "def broadcast_tensor(self, tensor):\n    return array_ops.gather(tensor, self.gather_index)", "docstring": "Broadcast from a dense tensor.\n\nIt is assumed that the first axis of the dense tensor is indexed by the\nsource shape, and at the end, the first axis of the dense tensor is\nindexed by the destination shape.\n\nArgs:\ntensor: a dense tensor.\n\nReturns:\nA dense tensor.", "source": "github-repos"}
{"code": "def write_tabular(obj, filepath):\n    (_, fn, ext) = splitext2(filepath)\n    if (ext == '.h5'):\n        _write_tabular_h5(obj, filepath)\n    elif (ext == '.pkl'):\n        _write_tabular_pickle(obj, filepath)\n    else:\n        raise NotImplementedError", "docstring": "Write tabular object in HDF5 or pickle format\n\nArgs:\nobj (array or DataFrame): tabular object to write\nfilepath (path-like): path to write to; must end in '.h5' or '.pkl'", "source": "codesearchnet"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        local_buffer = utils.BytearrayStream()\n\n        if self._unique_identifier:\n            self._unique_identifier.write(\n                local_buffer,\n                kmip_version=kmip_version\n            )\n\n        self.length = local_buffer.length()\n        super(GetAttributeListRequestPayload, self).write(\n            output_buffer,\n            kmip_version=kmip_version\n        )\n        output_buffer.write(local_buffer.buffer)", "docstring": "Write the data encoding the GetAttributeList request payload to a\nstream.\n\nArgs:\noutput_buffer (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def register(self, table):\n    if table.table_type.is_system:\n        raise ValueError('Cannot add system table to catalog')\n    if (not table.table_type.is_shared):\n        raise ValueError('Cannot add local table to catalog')\n    if table.is_substitute:\n        raise ValueError('Cannot add substitute table to catalog')\n    versions = self.__tables.get(table.name)\n    if (versions is None):\n        versions = {}\n        self.__tables[table.name] = versions\n    versions[table.version] = table", "docstring": "Adds a shared table to the catalog.\n\nArgs:\ntable (SymbolTable): A non-system, shared symbol table.", "source": "codesearchnet"}
{"code": "def insert_values_in_args(args, kwargs, values):\n    v_iter = iter(values)\n    new_args = [next(v_iter) if isinstance(arg, ArgumentPlaceholder) else arg for arg in args]\n    new_kwargs = dict(((k, next(v_iter)) if isinstance(v, ArgumentPlaceholder) else (k, v) for k, v in sorted(kwargs.items())))\n    return (new_args, new_kwargs)", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\nReplaces all placeholders in args/kwargs with actual values.\n\nArgs:\nargs: A list of positional arguments.\nkwargs: A dictionary of keyword arguments.\nvalues: A list of values that will be used to replace placeholder values.\n\nReturns:\nA 2-tuple containing a modified list of positional arguments, and a\nmodified dictionary of keyword arguments.", "source": "github-repos"}
{"code": "def _TextJustify(self, text, col_size):\n    result = []\n    if ('\\n' in text):\n        for paragraph in text.split('\\n'):\n            result.extend(self._TextJustify(paragraph, col_size))\n        return result\n    wrapper = textwrap.TextWrapper(width=(col_size - 2), break_long_words=False, expand_tabs=False)\n    try:\n        text_list = wrapper.wrap(text)\n    except ValueError:\n        raise TableError('Field too small (minimum width: 3)')\n    if (not text_list):\n        return [(' ' * col_size)]\n    for current_line in text_list:\n        stripped_len = len(terminal.StripAnsiText(current_line))\n        ansi_color_adds = (len(current_line) - stripped_len)\n        if ((stripped_len + 2) > col_size):\n            raise TableError('String contains words that do not fit in column.')\n        result.append((' %-*s' % (((col_size - 1) + ansi_color_adds), current_line)))\n    return result", "docstring": "Formats text within column with white space padding.\n\nA single space is prefixed, and a number of spaces are added as a\nsuffix such that the length of the resultant string equals the col_size.\n\nIf the length of the text exceeds the column width available then it\nis split into words and returned as a list of string, each string\ncontains one or more words padded to the column size.\n\nArgs:\ntext: String of text to format.\ncol_size: integer size of column to pad out the text to.\n\nReturns:\nList of strings col_size in length.\n\nRaises:\nTableError: If col_size is too small to fit the words in the text.", "source": "codesearchnet"}
{"code": "def set_all_file_column_labels(self, xlabel=None, ylabel=None):\n    if (xlabel is not None):\n        self.general.x_column_label = xlabel\n    if (ylabel is not None):\n        self.general.y_column_label = ylabel\n    if ((xlabel is None) and (ylabel is None)):\n        warnings.warn(('is not specifying x or y lables even' + 'though column labels function is called.'), UserWarning)\n    return", "docstring": "Indicate general x,y column labels.\n\nThis sets the general x and y column labels into data files for all plots.\nIt can be overridden for specific plots.\n\nArgs:\nxlabel/ylabel (str, optional): String indicating column label for x,y values\ninto the data files. Default is None.\n\nRaises:\nUserWarning: If xlabel and ylabel are both not specified,\nThe user will be alerted, but the code will not stop.", "source": "codesearchnet"}
{"code": "def VerifyGitkitToken(self, jwt):\n    \n    certs = self.rpc_helper.GetPublicCert()\n    crypt.MAX_TOKEN_LIFETIME_SECS = 30 * 86400  \n    parsed = None\n    for aud in filter(lambda x: x is not None, [self.project_id, self.client_id]):\n      try:\n        parsed = crypt.verify_signed_jwt_with_certs(jwt, certs, aud)\n      except crypt.AppIdentityError as e:\n        if \"Wrong recipient\" not in e.message:\n          return None\n      if parsed:\n        return GitkitUser.FromToken(parsed)\n    return None", "docstring": "Verifies a Gitkit token string.\n\nArgs:\njwt: string, the token to be checked\n\nReturns:\nGitkitUser, if the token is valid. None otherwise.", "source": "juraj-google-style"}
{"code": "def _CheckLine(self, line):\n    \n    for rule in self._cur_state:\n      matched = self._CheckRule(rule, line)\n      if matched:\n        for value in matched.groupdict():\n          self._AssignVar(matched, value)\n\n        if self._Operations(rule):\n          \n          if rule.new_state:\n            if rule.new_state not in ('End', 'EOF'):\n              self._cur_state = self.states[rule.new_state]\n            self._cur_state_name = rule.new_state\n          break", "docstring": "Passes the line through each rule until a match is made.\n\nArgs:\nline: A string, the current input line.", "source": "juraj-google-style"}
{"code": "def GetValueLength(rd, pos):\n  \n  rd = bytearray(rd)\n  key = rd[pos]\n  if key == LONG_ITEM_ENCODING:\n    \n    \n    \n    if pos + 1 < len(rd):\n      return (3, rd[pos + 1])\n    else:\n      raise errors.HidError('Malformed report descriptor')\n\n  else:\n    \n    \n    \n    \n    code = key & 0x03\n    if code <= 0x02:\n      return (1, code)\n    elif code == 0x03:\n      return (1, 4)\n\n  raise errors.HidError('Cannot happen')", "docstring": "Get value length for a key in rd.\n\nFor a key at position pos in the Report Descriptor rd, return the length\nof the associated value.  This supports both short and long format\nvalues.\n\nArgs:\nrd: Report Descriptor\npos: The position of the key in rd.\n\nReturns:\n(key_size, data_len) where key_size is the number of bytes occupied by\nthe key and data_len is the length of the value associated by the key.", "source": "juraj-google-style"}
{"code": "def get_associated_profiles(self):\n    uri = '{}/associatedProfiles'.format(self.data['uri'])\n    return self._helper.do_get(uri)", "docstring": "Gets the URIs of profiles which are using an Ethernet network.\n\nArgs:\nid_or_uri: Can be either the logical interconnect group id or the logical interconnect group uri\n\nReturns:\nlist: URIs of the associated profiles.", "source": "codesearchnet"}
{"code": "def add_multiple(self, flags):\n        \n        if not isinstance(flags, list):\n            raise TypeError(\"Expected list of flags, got object of type{}\".format(type(flags)))\n        for flag in flags:\n            if isinstance(flag, Flag):\n                self.add_item(flag)\n            elif isinstance(flag, tuple):\n                try:\n                    item = Flag(*flag)\n                    self.add_item(item)\n                except TypeError as e:\n                    raise TypeError(\"Invalid arguments to initialize a flag definition, expect ({0} [, {1}]) but got {3}\"\n                        .format(\", \".join(Flag.REQUIRED_FIELDS),\n                        \", \".join(Flag.OPTIONAL_FIELDS), flag))", "docstring": "Add multiple command line flags\n\nArguments:\nflags (:obj:`list` of :obj:`tuple`): List of flags\nin tuples (name, flag_type, description, (optional) default)\n\nRaises:\nTypeError: Provided wrong arguments or arguments of wrong types, method will raise TypeError", "source": "juraj-google-style"}
{"code": "def __init__(self, target_pixels=None, **kwargs):\n    \n    super(OpenImagesV4Config, self).__init__(**kwargs)\n    self._target_pixels = target_pixels", "docstring": "BuilderConfig for OpenImagesV4.\n\nArgs:\ntarget_pixels: If given, rescale the images so that the number of pixels\nis roughly this value.\n**kwargs: keyword arguments forward to super.", "source": "juraj-google-style"}
{"code": "def get_structures(self, primitive=True):\n    structures = []\n    for d in self._cif.data.values():\n        try:\n            s = self._get_structure(d, primitive)\n            if s:\n                structures.append(s)\n        except (KeyError, ValueError) as exc:\n            self.errors.append(str(exc))\n            warnings.warn(str(exc))\n    if self.errors:\n        warnings.warn('Issues encountered while parsing CIF:')\n        for error in self.errors:\n            warnings.warn(error)\n    if (len(structures) == 0):\n        raise ValueError('Invalid cif file with no structures!')\n    return structures", "docstring": "Return list of structures in CIF file. primitive boolean sets whether a\nconventional cell structure or primitive cell structure is returned.\n\nArgs:\nprimitive (bool): Set to False to return conventional unit cells.\nDefaults to True. With magnetic CIF files, will return primitive\nmagnetic cell which may be larger than nuclear primitive cell.\n\nReturns:\nList of Structures.", "source": "codesearchnet"}
{"code": "def loads(serialized_messages):\n    \n    try:\n        messages_dicts = json.loads(serialized_messages)\n    except ValueError:\n        _log.error(\"Loading serialized messages failed.\")\n        raise\n\n    messages = []\n    for message_dict in messages_dicts:\n        try:\n            headers = message_dict[\"headers\"]\n        except KeyError:\n            _log.error(\"Message saved without headers.\")\n            raise\n\n        try:\n            MessageClass = get_class(headers[\"fedora_messaging_schema\"])\n        except KeyError:\n            _log.error(\"Message (headers=%r) saved without a schema header.\", headers)\n            raise\n\n        try:\n            body = message_dict[\"body\"]\n        except KeyError:\n            _log.error(\"Message saved without body.\")\n            raise\n\n        try:\n            id = message_dict[\"id\"]\n        except KeyError:\n            _log.error(\"Message saved without id.\")\n            raise\n\n        try:\n            queue = message_dict[\"queue\"]\n        except KeyError:\n            _log.warning(\"Message saved without queue.\")\n            queue = None\n\n        try:\n            topic = message_dict[\"topic\"]\n        except KeyError:\n            _log.error(\"Message saved without topic.\")\n            raise\n\n        try:\n            severity = headers[\"fedora_messaging_severity\"]\n        except KeyError:\n            _log.error(\"Message saved without a severity.\")\n            raise\n\n        message = MessageClass(\n            body=body, topic=topic, headers=headers, severity=severity\n        )\n        try:\n            message.validate()\n            _log.debug(\"Successfully validated message %r\", message)\n        except jsonschema.exceptions.ValidationError as e:\n            _log.error(\"Message validation of %r failed: %r\", message, e)\n            raise ValidationError(e)\n\n        message.queue = queue\n        message.id = id\n        messages.append(message)\n\n    return messages", "docstring": "Deserialize messages from a JSON formatted str\n\nArgs:\nserialized_messages (JSON str):\n\nReturns:\nlist: Deserialized message objects.\n\nRaises:\nValidationError: If deserialized message validation failed.\nKeyError: If serialized_messages aren't properly serialized.\nValueError: If serialized_messages is not valid JSON", "source": "juraj-google-style"}
{"code": "def context(name=None):\n\n    def _context(cls):\n        annotated(cls, name)\n        cls.context = True\n        return cls\n    return _context", "docstring": "Declare that a class defines a context.\n\nContexts are for use with HierarchicalShell for discovering\nand using functionality from the command line.\n\nArgs:\nname (str): Optional name for this context if you don't want\nto just use the class name.", "source": "codesearchnet"}
{"code": "def lighten(self, amount):\n        \n        hsl = self.to_hsl()\n        hsl.l = self.clamp(hsl.l + amount, 1)\n        return self.from_hsl(hsl)", "docstring": "Lighten (increase the luminance) of this color.\n\nArgs:\namount (float) :\nAmount to increase the luminance by (clamped above zero)\n\nReturns:\nColor", "source": "juraj-google-style"}
{"code": "def __init__(self, timestamp, timestamp_description, data_type=None):\n    \n    super(TimestampEvent, self).__init__()\n    self.timestamp = timestamp\n    self.timestamp_desc = timestamp_description\n\n    if data_type:\n      self.data_type = data_type", "docstring": "Initializes an event.\n\nArgs:\ntimestamp (int): timestamp, which contains the number of microseconds\nsince January 1, 1970, 00:00:00 UTC.\ntimestamp_description (str): description of the meaning of the timestamp\nvalue.\ndata_type (Optional[str]): event data type. If the data type is not set\nit is derived from the DATA_TYPE class attribute.", "source": "juraj-google-style"}
{"code": "def bullet_base_pose_to_world_pose(self, pose_in_base):\n    pose_in_base = T.pose2mat(pose_in_base)\n    base_pos_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[0])\n    base_orn_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[1])\n    base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world))\n    pose_in_world = T.pose_in_A_to_pose_in_B(pose_A=pose_in_base, pose_A_in_B=base_pose_in_world)\n    return T.mat2pose(pose_in_world)", "docstring": "Convert a pose in the base frame to a pose in the world frame.\n\nArgs:\npose_in_base: a (pos, orn) tuple.\n\nReturns:\npose_in world: a (pos, orn) tuple.", "source": "codesearchnet"}
{"code": "def _TensorArrayReadGrad(op: ops.Operation, grad):\n    handle = op.inputs[0]\n    index = op.inputs[1]\n    flow = op.inputs[2]\n    dtype = op.get_attr('dtype')\n    grad_source = _GetGradSource(grad)\n    g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow, colocate_with_first_write_call=False).grad(source=grad_source, flow=flow)\n    w_g = g.write(index, grad)\n    return [None, None, w_g.flow]", "docstring": "Gradient for TensorArrayRead.\n\nArgs:\nop: Forward TensorArrayRead op.\ngrad: Gradient `Tensor` to TensorArrayRead.\n\nReturns:\nA flow `Tensor`, which can be used in control dependencies to\nforce the write of `grad` to the gradient `TensorArray`.", "source": "github-repos"}
{"code": "def time_range_to_frame_range(self, start, end, sr):\n        \n\n        start_sample = seconds_to_sample(start, sr)\n        end_sample = seconds_to_sample(end, sr)\n\n        return self.sample_to_frame_range(start_sample)[0], self.sample_to_frame_range(end_sample - 1)[1]", "docstring": "Calculate the frames containing samples from the given time range in seconds.\n\nArgs:\nstart (float): Start time in seconds.\nend (float): End time in seconds.\nsr (int): The sampling rate to use for time-to-sample conversion.\n\nReturns:\ntuple: A tuple containing the start and end (exclusive) frame indices.", "source": "juraj-google-style"}
{"code": "def get_last_release_time(name, paths=None):\n    entries = _get_families(name, paths)\n    max_time = 0\n    for (repo, family_resource) in entries:\n        time_ = repo.get_last_release_time(family_resource)\n        if (time_ == 0):\n            return 0\n        max_time = max(max_time, time_)\n    return max_time", "docstring": "Returns the most recent time this package was released.\n\nNote that releasing a variant into an already-released package is also\nconsidered a package release.\n\nReturns:\nint: Epoch time of last package release, or zero if this cannot be\ndetermined.", "source": "codesearchnet"}
{"code": "def _add_individual(self, ind_obj):\n        \n        logger.debug(\"Adding individual {0} to plugin\".format(ind_obj.ind_id))\n        self.individual_objs.append(ind_obj)", "docstring": "Add a individual to the adapter\n\nArgs:\nind_obj (puzzle.models.Individual)", "source": "juraj-google-style"}
{"code": "async def verify_docker_worker_task(chain, link):\n    if (chain != link):\n        check_interactive_docker_worker(link)\n        verify_docker_image_sha(chain, link)", "docstring": "Docker-worker specific checks.\n\nArgs:\nchain (ChainOfTrust): the chain we're operating on\nlink (ChainOfTrust or LinkOfTrust): the trust object for the signing task.\n\nRaises:\nCoTError: on failure.", "source": "codesearchnet"}
{"code": "def _read(**kwargs):\n    \n    pd_obj = BaseFactory.read_csv(**kwargs)\n    \n    if isinstance(pd_obj, pandas.io.parsers.TextFileReader):\n        reader = pd_obj.read\n        pd_obj.read = lambda *args, **kwargs: DataFrame(\n            query_compiler=reader(*args, **kwargs)\n        )\n        return pd_obj\n    return DataFrame(query_compiler=pd_obj)", "docstring": "Read csv file from local disk.\nArgs:\nfilepath_or_buffer:\nThe filepath of the csv file.\nWe only support local files for now.\nkwargs: Keyword arguments in pandas.read_csv", "source": "juraj-google-style"}
{"code": "def _set_value_test(self, filler_pipeline_key, value):\n    \n    self.filled = True\n    self._filler_pipeline_key = filler_pipeline_key\n    self._fill_datetime = datetime.datetime.utcnow()\n    \n    self._value = json.loads(json.dumps(\n        value, cls=mr_util.JsonEncoder), cls=mr_util.JsonDecoder)", "docstring": "Sets the value of this slot for use in testing.\n\nArgs:\nfiller_pipeline_key: The db.Key of the _PipelineRecord that filled\nthis slot.\nvalue: The serializable value set for this slot.", "source": "juraj-google-style"}
{"code": "def get_label_set(self, type_str=None):\n    return {v.label_str for v in self.node_gen if (type_str in (None, v.type_str))}", "docstring": "Get a set of label_str for the tree rooted at this node.\n\nArgs:\ntype_str:\nSUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include\ninformation from nodes of that type.\n\nReturns:\nset: The labels of the nodes leading up to this node from the root.", "source": "codesearchnet"}
{"code": "def set_data(self, data):\n    for name in self._fields:\n        setattr(self, name, data.get(name))\n    return self", "docstring": "Fills form with data\n\nArgs:\ndata (dict): Data to assign form fields.\n\nReturns:\nSelf. Form object.", "source": "codesearchnet"}
{"code": "def _convert_json(obj):\n    if isinstance(obj, dict):\n        return {_convert_json(key): _convert_json(val) for (key, val) in six.iteritems(obj)}\n    elif (isinstance(obj, list) and (len(obj) == 2)):\n        first = obj[0]\n        second = obj[1]\n        if ((first == 'set') and isinstance(second, list)):\n            return [_convert_json(elem) for elem in second]\n        elif ((first == 'map') and isinstance(second, list)):\n            for elem in second:\n                if ((not isinstance(elem, list)) or (len(elem) != 2)):\n                    return obj\n            return {elem[0]: _convert_json(elem[1]) for elem in second}\n        else:\n            return obj\n    elif isinstance(obj, list):\n        return [_convert_json(elem) for elem in obj]\n    else:\n        return obj", "docstring": "Converts from the JSON output provided by ovs-vsctl into a usable Python\nobject tree. In particular, sets and maps are converted from lists to\nactual sets or maps.\n\nArgs:\nobj: Object that shall be recursively converted.\n\nReturns:\nConverted version of object.", "source": "codesearchnet"}
{"code": "def filter_keys_by_dataset_id(did, key_container):\n    \n    keys = iter(key_container)\n\n    for key in DATASET_KEYS:\n        if getattr(did, key) is not None:\n            if key == \"wavelength\":\n                keys = [k for k in keys\n                        if (getattr(k, key) is not None and\n                            DatasetID.wavelength_match(getattr(k, key),\n                                                       getattr(did, key)))]\n            else:\n                keys = [k for k in keys\n                        if getattr(k, key) is not None and getattr(k, key)\n                        == getattr(did, key)]\n\n    return keys", "docstring": "Filer provided key iterable by the provided `DatasetID`.\n\nNote: The `modifiers` attribute of `did` should be `None` to allow for\n**any** modifier in the results.\n\nArgs:\ndid (DatasetID): Query parameters to match in the `key_container`.\nkey_container (iterable): Set, list, tuple, or dict of `DatasetID`\nkeys.\n\nReturns (list): List of keys matching the provided parameters in no\nspecific order.", "source": "juraj-google-style"}
{"code": "def _ParseRecordLogline(self, parser_mediator, structure):\n    date_time = dfdatetime_time_elements.TimeElementsInMilliseconds()\n    try:\n        datetime_iso8601 = self._GetISO8601String(structure.date_time)\n        date_time.CopyFromStringISO8601(datetime_iso8601)\n    except ValueError:\n        parser_mediator.ProduceExtractionWarning('invalid date time value: {0!s}'.format(structure.date_time))\n        return\n    event_data = GoogleDriveSyncLogEventData()\n    event_data.log_level = structure.log_level\n    event_data.pid = structure.pid\n    event_data.thread = structure.thread\n    event_data.source_code = structure.source_code\n    event_data.message = structure.message.replace('\\n', ' ')\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ADDED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a logline record structure and produces events.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.", "source": "codesearchnet"}
{"code": "def device_type_from_string(cl_device_type_str):\n    cl_device_type_str = cl_device_type_str.upper()\n    if hasattr(cl.device_type, cl_device_type_str):\n        return getattr(cl.device_type, cl_device_type_str)\n    return None", "docstring": "Converts values like ``gpu`` to a pyopencl device type string.\n\nSupported values are: ``accelerator``, ``cpu``, ``custom``, ``gpu``. If ``all`` is given, None is returned.\n\nArgs:\ncl_device_type_str (str): The string we want to convert to a device type.\n\nReturns:\ncl.device_type: the pyopencl device type.", "source": "codesearchnet"}
{"code": "def _get_node(self, loc_descriptor, create_non_existing_nodes=False):\n        \n        node = self._root_node\n\n        for location in loc_descriptor.generate_all_sub_locations():\n            child = node.get_child_node_or_default(location, None)\n            if child is None:\n                if not create_non_existing_nodes:\n                    raise RuntimeError(\"Node at location '%s' in '%s' does not exist!\" % (location, loc_descriptor.to_string()))\n                else:\n                    \n                    child = TreeMapNode(None)\n                    node.set_child_node(location, child)\n                    self._nbr_of_nodes += 1\n            node = child\n\n        return node", "docstring": "Get node corresponding to last location in a :class:`LocationDescriptor` object.\n\nArgs:\nloc_descriptor: A  :class:`LocationDescriptor` object\ncreate_non_existing_nodes (bool): Do we create non existing nodes along the way (including last node)?\n\nRaises:\nRuntimeError if a node along the path given in by the :class:`LocationDescriptor` object does not exist\n**if** ``create_non_existing_nodes`` is set to ``False``.", "source": "juraj-google-style"}
{"code": "def enable_nested_function_shape_inference(fn: _F) -> _F:\n\n    def wrapper(*args, **kwargs):\n        if flags.config().enable_nested_function_shape_inference.value():\n            return fn(*args, **kwargs)\n        flags.config().enable_nested_function_shape_inference.reset(True)\n        try:\n            return fn(*args, **kwargs)\n        finally:\n            flags.config().enable_nested_function_shape_inference.reset(False)\n    return wrapper", "docstring": "Decorator for enabling nested_function_shape_inference on a test.\n\nThis function returns a decorator intended to be applied to test methods in\na `tf.test.TestCase` class. Doing so will set nested_function_shape_inference,\nreset the context, execute the test, then reset the context to the state\nit was in prior to this test.\n\nExample:\n\nclass MyTest(test.TestCase):\n\n@enable_nested_function_shape_inference\ndef testFoo(self):\n...\n\nArgs:\nfn: the function to be wrapped.\n\nReturns:\nThe wrapped function.", "source": "github-repos"}
{"code": "def stack_template_key_name(blueprint):\n    name = blueprint.name\n    return ('stack_templates/%s/%s-%s.json' % (blueprint.context.get_fqn(name), name, blueprint.version))", "docstring": "Given a blueprint, produce an appropriate key name.\n\nArgs:\nblueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint\nobject to create the key from.\n\nReturns:\nstring: Key name resulting from blueprint.", "source": "codesearchnet"}
{"code": "def expression(value):\n    if isinstance(value, Expression):\n        return Expression(value._type, value._value)\n    if hasattr(value, 'spl_json'):\n        sj = value.spl_json()\n        return Expression(sj['type'], sj['value'])\n    return Expression('splexpr', value)", "docstring": "Create an SPL expression.\n\nArgs:\nvalue: Expression as a string or another `Expression`. If value is an instance of `Expression` then a new instance is returned containing the same type and value.\n\nReturns:\nExpression: SPL expression from `value`.", "source": "codesearchnet"}
{"code": "def weights_prepend_inputs_to_targets(labels):\n  \n  past_first_zero = tf.cumsum(to_float(tf.equal(labels, 0)), axis=1)\n  nonzero = to_float(labels)\n  return to_float(tf.not_equal(past_first_zero * nonzero, 0))", "docstring": "Assign weight 1.0 to only the \"targets\" portion of the labels.\n\nWeight 1.0 is assigned to all nonzero labels past the first zero.\nSee prepend_mode in common_hparams.py\n\nArgs:\nlabels: A Tensor of int32s.\n\nReturns:\nA Tensor of floats.", "source": "juraj-google-style"}
{"code": "def read_excitation_energies(self):\n    transitions = list()\n    with zopen(self.filename, 'r') as f:\n        line = f.readline()\n        td = False\n        while (line != ''):\n            if re.search('^\\\\sExcitation energies and oscillator strengths:', line):\n                td = True\n            if td:\n                if re.search('^\\\\sExcited State\\\\s*\\\\d', line):\n                    val = [float(v) for v in float_patt.findall(line)]\n                    transitions.append(tuple(val[0:3]))\n            line = f.readline()\n    return transitions", "docstring": "Read a excitation energies after a TD-DFT calculation.\n\nReturns:\n\nA list: A list of tuple for each transition such as\n[(energie (eV), lambda (nm), oscillatory strength), ... ]", "source": "codesearchnet"}
{"code": "def __init__(self, callback):\n    \n    super(ThreadedXMLRPCServer, self).__init__(callback)\n    self._rpc_thread = None\n    self._xmlrpc_server = None", "docstring": "Initialize a threaded RPC server.\n\nArgs:\ncallback (function): callback function to invoke on get status RPC\nrequest.", "source": "juraj-google-style"}
{"code": "def load_filename(self, filename, index=None):\n        \n        filename = str(filename)  \n        if index is None:\n            index = self._get_tab_index()\n        page = self.pages[index]\n\n        \n        self.load_dir, _ = os.path.split(filename)\n\n        clss = page.clss_load\n        if len(clss) == 1:\n            \n            \n            f = clss[0]()\n            f.load(filename)\n        else:\n            \n            \n            f = f311.load_with_classes(filename, page.clss_load)\n            if f is None:\n                raise RuntimeError(\"Could not load '{0!s}'\".format(filename))\n\n        self.load(f, index)", "docstring": "Loads file given filename\n\nArgs:\nfilename:\nindex: tab index to load file into. If not passed, loads into current tab", "source": "juraj-google-style"}
{"code": "def orient_averaged_adaptive(tm):\n    \n    S = np.zeros((2,2), dtype=complex)\n    Z = np.zeros((4,4))\n\n    def Sfunc(beta, alpha, i, j, real):\n        (S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)\n        s = S_ang[i,j].real if real else S_ang[i,j].imag            \n        return s * tm.or_pdf(beta)\n\n    ind = range(2)\n    for i in ind:\n        for j in ind:\n            S.real[i,j] = dblquad(Sfunc, 0.0, 360.0, \n                lambda x: 0.0, lambda x: 180.0, (i,j,True))[0]/360.0        \n            S.imag[i,j] = dblquad(Sfunc, 0.0, 360.0, \n                lambda x: 0.0, lambda x: 180.0, (i,j,False))[0]/360.0\n\n    def Zfunc(beta, alpha, i, j):\n        (S_and, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)\n        return Z_ang[i,j] * tm.or_pdf(beta)\n\n    ind = range(4)\n    for i in ind:\n        for j in ind:\n            Z[i,j] = dblquad(Zfunc, 0.0, 360.0, \n                lambda x: 0.0, lambda x: 180.0, (i,j))[0]/360.0\n\n    return (S, Z)", "docstring": "Compute the T-matrix using variable orientation scatterers.\n\nThis method uses a very slow adaptive routine and should mainly be used\nfor reference purposes. Uses the set particle orientation PDF, ignoring\nthe alpha and beta attributes.\n\nArgs:\ntm: TMatrix (or descendant) instance\n\nReturns:\nThe amplitude (S) and phase (Z) matrices.", "source": "juraj-google-style"}
{"code": "class GroundingDinoImageLoss(ImageLoss):\n\n    def __init__(self, matcher, focal_alpha, losses):\n        nn.Module.__init__(self)\n        self.matcher = matcher\n        self.focal_alpha = focal_alpha\n        self.losses = losses\n\n    def _get_target_classes_one_hot(self, outputs, targets, indices):\n        \n        logits = outputs['logits']\n        class_labels = torch.cat([target['class_labels'][J] + len(outputs['label_maps'][i]) if i > 0 else target['class_labels'][J] for i, (target, (_, J)) in enumerate(zip(targets, indices))])\n        label_maps = torch.cat(outputs['label_maps'], dim=0)\n        idx = self._get_source_permutation_idx(indices)\n        target_classes_onehot = torch.zeros_like(logits, device=logits.device, dtype=torch.long)\n        target_classes_onehot[idx] = label_maps[class_labels].to(torch.long)\n        return target_classes_onehot\n\n    def loss_labels(self, outputs, targets, indices, num_boxes):\n        \n        if 'logits' not in outputs:\n            raise KeyError('No logits were found in the outputs')\n        if 'text_mask' not in outputs:\n            raise KeyError('No text_mask were found in the outputs')\n        target_classes_onehot = self._get_target_classes_one_hot(outputs, targets, indices)\n        source_logits = outputs['logits']\n        text_mask = outputs['text_mask']\n        source_logits = torch.masked_select(source_logits, text_mask)\n        target_classes_onehot = torch.masked_select(target_classes_onehot, text_mask)\n        target_classes_onehot = target_classes_onehot.float()\n        loss_ce = sigmoid_focal_loss(inputs=source_logits, targets=target_classes_onehot, num_boxes=num_boxes, alpha=self.focal_alpha, gamma=2)\n        losses = {'loss_ce': loss_ce}\n        return losses", "docstring": "This class computes the losses for `GroundingDinoForObjectDetection`. The process happens in two steps: 1) we\ncompute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of\nmatched ground-truth / prediction (supervise class and box).\n\nArgs:\nmatcher (`GroundingDinoHungarianMatcher`):\nModule able to compute a matching between targets and proposals.\nfocal_alpha (`float`):\nAlpha parameter in focal loss.\nlosses (`List[str]`):\nList of all the losses to be applied. See `get_loss` for a list of all available losses.", "source": "github-repos"}
{"code": "def __init__(self, minimum=None, maximum=None):\n        \n        super(IntegerTypeChecker, self).__init__(base_type=int)\n        self.minimum = minimum\n        self.maximum = maximum", "docstring": "Initialization method.\n\nArgs:\nminimum (int): a minimum value (included).\nmaximum (int): a maximum value (included).", "source": "juraj-google-style"}
{"code": "def fill_datetime(self):\n    if (not self.filled):\n        raise SlotNotFilledError(('Slot with name \"%s\", key \"%s\" not yet filled.' % (self.name, self.key)))\n    return self._fill_datetime", "docstring": "Returns when the slot was filled.\n\nReturns:\nA datetime.datetime.\n\nRaises:\nSlotNotFilledError if the value hasn't been filled yet.", "source": "codesearchnet"}
{"code": "def shannon_entropy(time_series):\n    if (not isinstance(time_series, str)):\n        time_series = list(time_series)\n    data_set = list(set(time_series))\n    freq_list = []\n    for entry in data_set:\n        counter = 0.0\n        for i in time_series:\n            if (i == entry):\n                counter += 1\n        freq_list.append((float(counter) / len(time_series)))\n    ent = 0.0\n    for freq in freq_list:\n        ent += (freq * np.log2(freq))\n    ent = (- ent)\n    return ent", "docstring": "Return the Shannon Entropy of the sample data.\n\nArgs:\ntime_series: Vector or string of the sample data\n\nReturns:\nThe Shannon Entropy as float value", "source": "codesearchnet"}
{"code": "def rank_internal(input, name=None, optimize=True):\n    with ops.name_scope(name, 'Rank', [input]) as name:\n        if isinstance(input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n            return gen_array_ops.size(input.dense_shape, name=name)\n        else:\n            input = ops.convert_to_tensor(input)\n            input_shape = input.get_shape()\n            if optimize and input_shape.ndims is not None:\n                return constant(input_shape.ndims, dtypes.int32, name=name)\n            return gen_array_ops.rank(input, name=name)", "docstring": "Returns the rank of a tensor.\n\nArgs:\ninput: A `Tensor` or `SparseTensor`.\nname: A name for the operation (optional).\noptimize: if true, encode the rank as a constant when possible.\n\nReturns:\nA `Tensor` of type `int32`.", "source": "github-repos"}
{"code": "def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    if attention_mask is None:\n        attention_mask = jnp.ones_like(input_ids)\n    if position_ids is None:\n        batch_size, sequence_length = input_ids.shape\n        position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n\n    def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):\n        encode_module = module._get_encoder_module()\n        return encode_module(input_ids, attention_mask, position_ids, **kwargs)\n    return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward)", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration\n\n>>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained(\"facebook/blenderbot-400M-distill\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/blenderbot-400M-distill\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, max_length=1024, return_tensors=\"jax\")\n>>> encoder_outputs = model.encode(**inputs)\n```", "source": "github-repos"}
{"code": "def create_bagit_stream(dir_name, payload_info_list):\n    \n    zip_file = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED)\n    _add_path(dir_name, payload_info_list)\n    payload_byte_count, payload_file_count = _add_payload_files(\n        zip_file, payload_info_list\n    )\n    tag_info_list = _add_tag_files(\n        zip_file, dir_name, payload_info_list, payload_byte_count, payload_file_count\n    )\n    _add_manifest_files(zip_file, dir_name, payload_info_list, tag_info_list)\n    _add_tag_manifest_file(zip_file, dir_name, tag_info_list)\n    return zip_file", "docstring": "Create a stream containing a BagIt zip archive.\n\nArgs:\ndir_name : str\nThe name of the root directory in the zip file, under which all the files\nare placed (avoids \"zip bombs\").\n\npayload_info_list: list\nList of payload_info_dict, each dict describing a file.\n\n- keys: pid, filename, iter, checksum, checksum_algorithm\n- If the filename is None, the pid is used for the filename.", "source": "juraj-google-style"}
{"code": "def process_document_events(events, use_buffers=True):\n    \n\n    json_events = []\n    references = set()\n\n    buffers = [] if use_buffers else None\n\n    for event in events:\n        json_events.append(event.generate(references, buffers))\n\n    json = {\n        'events'     : json_events,\n        'references' : references_json(references),\n    }\n\n    return serialize_json(json), buffers if use_buffers else []", "docstring": "Create a JSON string describing a patch to be applied as well as\nany optional buffers.\n\nArgs:\nevents : list of events to be translated into patches\n\nReturns:\nstr, list :\nJSON string which can be applied to make the given updates to obj\nas well as any optional buffers", "source": "juraj-google-style"}
{"code": "def get_distance_and_image(self, frac_coords1: Vector3Like, frac_coords2: Vector3Like, jimage: Optional[Union[(List[int], np.ndarray)]]=None) -> Tuple[(float, np.ndarray)]:\n    if (jimage is None):\n        (v, d2) = pbc_shortest_vectors(self, frac_coords1, frac_coords2, return_d2=True)\n        fc = ((self.get_fractional_coords(v[0][0]) + frac_coords1) - frac_coords2)\n        fc = np.array(np.round(fc), dtype=np.int)\n        return (np.sqrt(d2[(0, 0)]), fc)\n    jimage = np.array(jimage)\n    mapped_vec = self.get_cartesian_coords(((jimage + frac_coords2) - frac_coords1))\n    return (np.linalg.norm(mapped_vec), jimage)", "docstring": "Gets distance between two frac_coords assuming periodic boundary\nconditions. If the index jimage is not specified it selects the j\nimage nearest to the i atom and returns the distance and jimage\nindices in terms of lattice vector translations. If the index jimage\nis specified it returns the distance between the frac_coords1 and\nthe specified jimage of frac_coords2, and the given jimage is also\nreturned.\n\nArgs:\nfcoords1 (3x1 array): Reference fcoords to get distance from.\nfcoords2 (3x1 array): fcoords to get distance from.\njimage (3x1 array): Specific periodic image in terms of\nlattice translations, e.g., [1,0,0] implies to take periodic\nimage that is one a-lattice vector away. If jimage is None,\nthe image that is nearest to the site is found.\n\nReturns:\n(distance, jimage): distance and periodic lattice translations\nof the other site for which the distance applies. This means that\nthe distance between frac_coords1 and (jimage + frac_coords2) is\nequal to distance.", "source": "codesearchnet"}
{"code": "def get_default_backend_config(appdirs):\n    \n    return {\n        'store': 'sqlalchemy',\n        'day_start': datetime.time(5, 30, 0),\n        'fact_min_delta': 1,\n        'tmpfile_path': os.path.join(appdirs.user_data_dir, '{}.tmp'.format(appdirs.appname)),\n        'db_engine': 'sqlite',\n        'db_path': os.path.join(appdirs.user_data_dir, '{}.sqlite'.format(appdirs.appname)),\n    }", "docstring": "Return a default config dictionary.\n\nArgs:\nappdirs (HamsterAppDirs): ``HamsterAppDirs`` instance encapsulating the apps details.\n\nReturns:\ndict: Dictionary with a default configuration.\n\nNote:\nThose defaults are independent of the particular config-store.", "source": "juraj-google-style"}
{"code": "def get(self, context_id, address_list):\n    if (context_id not in self._contexts):\n        return []\n    for add in address_list:\n        if (not self.address_is_valid(address=add)):\n            raise AuthorizationException(address=add)\n    context = self._contexts[context_id]\n    addresses_in_ctx = [add for add in address_list if (add in context)]\n    addresses_not_in_ctx = list((set(address_list) - set(addresses_in_ctx)))\n    values = context.get(addresses_in_ctx)\n    values_list = list(zip(addresses_in_ctx, values))\n    if addresses_not_in_ctx:\n        for address in addresses_not_in_ctx:\n            context.validate_read(address)\n        try:\n            (address_values, reads) = self._find_address_values_in_chain(base_contexts=[context_id], addresses_to_find=addresses_not_in_ctx)\n        except KeyError:\n            return []\n        values_list.extend(address_values)\n        if reads:\n            tree = MerkleDatabase(self._database, context.merkle_root)\n            add_values = []\n            for add in reads:\n                value = None\n                try:\n                    value = tree.get(add)\n                except KeyError:\n                    pass\n                add_values.append((add, value))\n            values_list.extend(add_values)\n        values_list.sort(key=(lambda x: address_list.index(x[0])))\n    return values_list", "docstring": "Get the values associated with list of addresses, for a specific\ncontext referenced by context_id.\n\nArgs:\ncontext_id (str): the return value of create_context, referencing\na particular context.\naddress_list (list): a list of address strs\n\nReturns:\nvalues_list (list): a list of (address, value) tuples\n\nRaises:\nAuthorizationException: Raised when an address in address_list is\nnot authorized either by not being in the inputs for the\ntxn associated with this context, or it is under a namespace\nbut the characters that are under the namespace are not valid\naddress characters.", "source": "codesearchnet"}
{"code": "def ResolveFlats(dem, in_place=False):\n    if (type(dem) is not rdarray):\n        raise Exception('A richdem.rdarray or numpy.ndarray is required!')\n    if (not in_place):\n        dem = dem.copy()\n    _AddAnalysis(dem, 'ResolveFlats(dem, in_place={in_place})'.format(in_place=in_place))\n    demw = dem.wrap()\n    _richdem.rdResolveFlatsEpsilon(demw)\n    dem.copyFromWrapped(demw)\n    if (not in_place):\n        return dem", "docstring": "Attempts to resolve flats by imposing a local gradient\n\nArgs:\ndem          (rdarray):   An elevation model\nin_place (bool):   If True, the DEM is modified in place and there is\nno return; otherwise, a new, altered DEM is returned.\n\nReturns:\nDEM modified such that all flats drain.", "source": "codesearchnet"}
{"code": "def __init__(self, var_config, scope_config):\n    \n    self._substs = {}\n    self._var_config = var_config\n    self._scope_config = scope_config\n\n    for var_id, var_value in iteritems(var_config):\n      key = \"%%{var}%%\".format(var=var_id)\n      self._substs[key] = str(var_value)\n\n    for scope_id, var_config in iteritems(scope_config):\n      for var_id, var_value in iteritems(var_config):\n        key = \"%%{scope}.{var}%%\".format(scope=scope_id, var=var_id)\n        self._substs[key] = str(var_value)", "docstring": "Initializes the substitution environment.\n\nArgs:\nvar_config: A configuration (concrete values) of pattern variables.\nscope_config: A configuration (concrete values) of pattern scopes.", "source": "juraj-google-style"}
{"code": "def get_actions(self, parent_environ=None):\n        \n        interp = Python(target_environ={}, passive=True)\n        executor = self._create_executor(interp, parent_environ)\n        self._execute(executor)\n        return executor.actions", "docstring": "Get the list of rex.Action objects resulting from interpreting this\ncontext. This is provided mainly for testing purposes.\n\nArgs:\nparent_environ Environment to interpret the context within,\ndefaults to os.environ if None.\n\nReturns:\nA list of rex.Action subclass instances.", "source": "juraj-google-style"}
{"code": "def get_heading_encoding(response):\n    \n    encoding = wpull.protocol.http.util.parse_charset(\n        response.fields.get('content-type', ''))\n\n    if encoding:\n        return wpull.string.normalize_codec_name(encoding)\n    else:\n        return None", "docstring": "Return the document encoding from a HTTP header.\n\nArgs:\nresponse (Response): An instance of :class:`.http.Response`.\n\nReturns:\n``str``, ``None``: The codec name.", "source": "juraj-google-style"}
{"code": "def request(self, session=None):\n    try:\n        from .tcex_request import TcExRequest\n        r = TcExRequest(self, session)\n        if ((session is None) and self.default_args.tc_proxy_external):\n            self.log.info('Using proxy server for external request {}:{}.'.format(self.default_args.tc_proxy_host, self.default_args.tc_proxy_port))\n            r.proxies = self.proxies\n        return r\n    except ImportError as e:\n        self.handle_error(105, [e])", "docstring": "Return an instance of the Request Class.\n\nA wrapper on the Python Requests module that provides a different interface for creating\nrequests. The session property of this instance has built-in logging, session level\nretries, and preconfigured proxy configuration.\n\nReturns:\n(object): An instance of Request Class", "source": "codesearchnet"}
{"code": "def build_variant_query(self, query=None, category='snv', variant_type=['clinical']):\n    query = (query or {})\n    mongo_variant_query = {}\n    LOG.debug(('Building a mongo query for %s' % query))\n    if query.get('hgnc_symbols'):\n        mongo_variant_query['hgnc_symbols'] = {'$in': query['hgnc_symbols']}\n    mongo_variant_query['variant_type'] = {'$in': variant_type}\n    mongo_variant_query['category'] = category\n    rank_score = (query.get('rank_score') or 15)\n    mongo_variant_query['rank_score'] = {'$gte': rank_score}\n    LOG.debug(('Querying %s' % mongo_variant_query))\n    return mongo_variant_query", "docstring": "Build a mongo query across multiple cases.\nTranslate query options from a form into a complete mongo query dictionary.\n\nBeware that unindexed queries against a large variant collection will\nbe extremely slow.\n\nCurrently indexed query options:\nhgnc_symbols\nrank_score\nvariant_type\ncategory\n\nArgs:\nquery(dict): A query dictionary for the database, from a query form.\ncategory(str): 'snv', 'sv', 'str' or 'cancer'\nvariant_type(str): 'clinical' or 'research'\n\nReturns:\nmongo_query : A dictionary in the mongo query format.", "source": "codesearchnet"}
{"code": "def period_start_day(self, value=None):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `period_start_day`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `period_start_day`')\n    self._period_start_day = value", "docstring": "Corresponds to IDD Field `period_start_day`\n\nArgs:\nvalue (str): value for IDD Field `period_start_day`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def loss_labels(self, class_queries_logits: Tensor, class_labels: List[Tensor], indices: Tuple[np.array]) -> Dict[str, Tensor]:\n    pred_logits = class_queries_logits\n    batch_size, num_queries, _ = pred_logits.shape\n    criterion = nn.CrossEntropyLoss(weight=self.empty_weight)\n    idx = self._get_predictions_permutation_indices(indices)\n    target_classes_o = torch.cat([target[j] for target, (_, j) in zip(class_labels, indices)])\n    target_classes = torch.full((batch_size, num_queries), fill_value=self.num_classes, dtype=torch.int64, device=pred_logits.device)\n    target_classes[idx] = target_classes_o\n    pred_logits_transposed = pred_logits.transpose(1, 2)\n    loss_ce = criterion(pred_logits_transposed, target_classes)\n    losses = {'loss_cross_entropy': loss_ce}\n    return losses", "docstring": "Compute the losses related to the labels using cross entropy.\n\nArgs:\nclass_queries_logits (`torch.Tensor`):\nA tensor of shape `batch_size, num_queries, num_labels`\nclass_labels (`List[torch.Tensor]`):\nList of class labels of shape `(labels)`.\nindices (`Tuple[np.array])`:\nThe indices computed by the Hungarian matcher.\n\nReturns:\n`Dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:\n- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.", "source": "github-repos"}
{"code": "def ensure_dir(path):\n    \n    dirpath = os.path.dirname(path)\n    if dirpath and not os.path.exists(dirpath):\n        os.makedirs(dirpath)", "docstring": "Ensure directory exists.\n\nArgs:\npath(str): dir path", "source": "juraj-google-style"}
{"code": "def normalize_whitespace(text):\n    return re.sub('\\\\s+', ' ', text, flags=re.UNICODE).strip()", "docstring": "Returns the given text with outer whitespace removed and inner whitespace collapsed.\n\nArgs:\ntext (str): The text to normalize.\n\nReturns:\nstr: The normalized text.", "source": "codesearchnet"}
{"code": "def group_device_names(devices, group_size):\n    \n    num_devices = len(devices)\n    if group_size > num_devices:\n        raise ValueError(\n            \"only %d devices, but group_size=%d\" % (num_devices, group_size))\n    num_groups = (\n        num_devices \n                                     (num_devices % group_size != 0) else 0))\n    groups = [[] for i in range(num_groups)]\n    for i in range(0, num_groups * group_size):\n        groups[i % num_groups].append(devices[i % num_devices])\n    return groups", "docstring": "Group device names into groups of group_size.\n\nArgs:\ndevices: list of strings naming devices.\ngroup_size: int >= 1\n\nReturns:\nlist of lists of devices, where each inner list is group_size long,\nand each device appears at least once in an inner list.  If\nlen(devices) % group_size = 0 then each device will appear\nexactly once.\n\nRaises:\nValueError: group_size > len(devices)", "source": "juraj-google-style"}
{"code": "def _random_stateless_uniform(shape: types.IntTensor, num_digits: types.IntTensor, seed: int, validate_args: bool=False, dtype: tf.DType=None, name: str=None) -> types.IntTensor:\n    with tf.name_scope(name or 'random_stateless_uniform'):\n        dtype = dtype or tf.int32\n        shape = tf.convert_to_tensor(shape, dtype=dtype, name='dim')\n        num_digits = tf.convert_to_tensor(num_digits, dtype=dtype, name='num_digits')\n        control_deps = []\n        if validate_args:\n            control_deps.append(tf.debugging.assert_positive(shape, message='shape must be positive'))\n            control_deps.append(tf.debugging.assert_positive(num_digits, message='num_digits must be positive'))\n        with tf.control_dependencies(control_deps):\n            minval = tf.cast(utils.exp2(num_digits - 1), dtype=dtype)\n            maxval = tf.cast(utils.exp2(num_digits), dtype=dtype)\n            return tf.random.stateless_uniform(shape, seed, minval=minval, maxval=maxval, dtype=dtype)", "docstring": "Returns a `Tensor` drawn from a uniform distribution with a given `shape`.\n\nArgs:\nshape: Positive scalar `Tensor` of integers with rank 1. The shape of the\nreturned `Tensor`.\nnum_digits: Positive scalar `Tensor` of integers with rank 0. the base-2\nprecision of the points which can be sampled from `generating_matrices`.\nseed: Positive scalar `Tensor` with shape [2] and dtype `int32` used as seed\nfor the random generator.\nvalidate_args: Python `bool` indicating whether to validate arguments.\nDefault value: `False`.\ndtype: Optional `dtype`. The `dtype` of the output `Tensor` (either\n`tf.int32` or `tf.int64`).\nDefault value: `None` which maps to `tf.int32`.\nname: Python `str` name prefixed to ops created by this function.\nDefault value: `None` which maps to `random_stateless_uniform`.\n\nReturns:\nA `Tensor` with the requested `shape`.", "source": "github-repos"}
{"code": "def post_process_depth_estimation(self, outputs: 'DepthProDepthEstimatorOutput', target_sizes: Optional[Union[TensorType, List[Tuple[int, int]], None]]=None) -> Dict[str, List[TensorType]]:\n    requires_backends(self, 'torch')\n    predicted_depth = outputs.predicted_depth\n    fov = outputs.field_of_view\n    batch_size = len(predicted_depth)\n    if target_sizes is not None and batch_size != len(target_sizes):\n        raise ValueError('Make sure that you pass in as many fov values as the batch dimension of the predicted depth')\n    results = []\n    fov = [None] * batch_size if fov is None else fov\n    target_sizes = [None] * batch_size if target_sizes is None else target_sizes\n    for depth, fov_value, target_size in zip(predicted_depth, fov, target_sizes):\n        focal_length = None\n        if target_size is not None:\n            if fov_value is not None:\n                width = target_size[1]\n                focal_length = 0.5 * width / torch.tan(0.5 * torch.deg2rad(fov_value))\n                depth = depth * width / focal_length\n            depth = torch.nn.functional.interpolate(input=depth.unsqueeze(0).unsqueeze(1), size=target_size, mode=pil_torch_interpolation_mapping[self.resample].value).squeeze()\n        depth = 1.0 / torch.clamp(depth, min=0.0001, max=10000.0)\n        results.append({'predicted_depth': depth, 'field_of_view': fov_value, 'focal_length': focal_length})\n    return results", "docstring": "Post-processes the raw depth predictions from the model to generate\nfinal depth predictions which is caliberated using the field of view if provided\nand resized to specified target sizes if provided.\n\nArgs:\noutputs ([`DepthProDepthEstimatorOutput`]):\nRaw outputs of the model.\ntarget_sizes (`Optional[Union[TensorType, List[Tuple[int, int]], None]]`, *optional*, defaults to `None`):\nTarget sizes to resize the depth predictions. Can be a tensor of shape `(batch_size, 2)`\nor a list of tuples `(height, width)` for each image in the batch. If `None`, no resizing\nis performed.\n\nReturns:\n`List[Dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth\npredictions, and field of view (degrees) and focal length (pixels) if `field_of_view` is given in `outputs`.\n\nRaises:\n`ValueError`:\nIf the lengths of `predicted_depths`, `fovs`, or `target_sizes` are mismatched.", "source": "github-repos"}
{"code": "def _handle_stop_workflow(self, request):\n    self._stop_workflow = True\n    for (name, dag) in self._dags_running.items():\n        if (name not in self._stop_dags):\n            self._stop_dags.append(name)\n    return Response(success=True, uid=request.uid)", "docstring": "The handler for the stop_workflow request.\n\nThe stop_workflow request adds all running dags to the list of dags\nthat should be stopped and prevents new dags from being started. The dags will\nthen stop queueing new tasks, which will terminate the dags and in turn the\nworkflow.\n\nArgs:\nrequest (Request): Reference to a request object containing the\nincoming request.\n\nReturns:\nResponse: A response object containing the following fields:\n- success: True if the dags were added successfully to the list\nof dags that should be stopped.", "source": "codesearchnet"}
{"code": "def initialize(self, table):\n    check_table_dtypes(table, self.key_dtype, self.value_dtype)\n    with ops.name_scope(self._name, 'text_file_init', (table.resource_handle,)):\n        filename = ops.convert_to_tensor(self._filename, dtypes.string, name='asset_filepath')\n        init_op = gen_lookup_ops.initialize_table_from_text_file_v2(table.resource_handle, filename, self._key_index, self._value_index, -1 if self._vocab_size is None else self._vocab_size, self._delimiter, self._offset)\n    ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)\n    if not context.executing_eagerly() and constant_op.is_constant(filename):\n        ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, filename)\n    return init_op", "docstring": "Initializes the table from a text file.\n\nArgs:\ntable: The table to be initialized.\n\nReturns:\nThe operation that initializes the table.\n\nRaises:\nTypeError: when the keys and values data types do not match the table\nkey and value data types.", "source": "github-repos"}
{"code": "def potential_jumps( self ):\n        \n        jumps = []\n        if self.number_of_occupied_sites <= self.number_of_sites / 2:\n            for occupied_site in self.occupied_sites():\n                unoccupied_neighbours = [ site for site in [ self.site_with_id( n ) for n in occupied_site.neighbours ] if not site.is_occupied ]\n                for vacant_site in unoccupied_neighbours:\n                    jumps.append( jump.Jump( occupied_site, vacant_site, self.nn_energy, self.cn_energies, self.jump_lookup_table ) )\n        else:\n            for vacant_site in self.vacant_sites():\n                occupied_neighbours = [ site for site in [ self.site_with_id( n ) for n in vacant_site.neighbours ] if site.is_occupied ]\n                for occupied_site in occupied_neighbours:\n                    jumps.append( jump.Jump( occupied_site, vacant_site, self.nn_energy, self.cn_energies, self.jump_lookup_table ) )\n        return jumps", "docstring": "All nearest-neighbour jumps not blocked by volume exclusion\n(i.e. from occupied to neighbouring unoccupied sites).\n\nArgs:\nNone\n\nReturns:\n(List(Jump)): List of possible jumps.", "source": "juraj-google-style"}
{"code": "def from_event(cls, ion_event):\n    if (ion_event.value is not None):\n        (args, kwargs) = cls._to_constructor_args(ion_event.value)\n    else:\n        (args, kwargs) = ((), {})\n    value = cls(*args, **kwargs)\n    value.ion_event = ion_event\n    value.ion_type = ion_event.ion_type\n    value.ion_annotations = ion_event.annotations\n    return value", "docstring": "Constructs the given native extension from the properties of an event.\n\nArgs:\nion_event (IonEvent): The event to construct the native value from.", "source": "codesearchnet"}
{"code": "def HandleExceptionsAndRebuildHttpConnections(retry_args):\n    \n    \n    \n    retry_after = None\n\n    \n    if isinstance(retry_args.exc, (http_client.BadStatusLine,\n                                   http_client.IncompleteRead,\n                                   http_client.ResponseNotReady)):\n        logging.debug('Caught HTTP error %s, retrying: %s',\n                      type(retry_args.exc).__name__, retry_args.exc)\n    elif isinstance(retry_args.exc, socket.error):\n        logging.debug('Caught socket error, retrying: %s', retry_args.exc)\n    elif isinstance(retry_args.exc, socket.gaierror):\n        logging.debug(\n            'Caught socket address error, retrying: %s', retry_args.exc)\n    elif isinstance(retry_args.exc, socket.timeout):\n        logging.debug(\n            'Caught socket timeout error, retrying: %s', retry_args.exc)\n    elif isinstance(retry_args.exc, httplib2.ServerNotFoundError):\n        logging.debug(\n            'Caught server not found error, retrying: %s', retry_args.exc)\n    elif isinstance(retry_args.exc, ValueError):\n        \n        \n        \n        logging.debug('Response content was invalid (%s), retrying',\n                      retry_args.exc)\n    elif (isinstance(retry_args.exc, TokenRefreshError) and\n          hasattr(retry_args.exc, 'status') and\n          (retry_args.exc.status == TOO_MANY_REQUESTS or\n           retry_args.exc.status >= 500)):\n        logging.debug(\n            'Caught transient credential refresh error (%s), retrying',\n            retry_args.exc)\n    elif isinstance(retry_args.exc, exceptions.RequestError):\n        logging.debug('Request returned no response, retrying')\n    \n    elif isinstance(retry_args.exc, exceptions.BadStatusCodeError):\n        logging.debug('Response returned status %s, retrying',\n                      retry_args.exc.status_code)\n    elif isinstance(retry_args.exc, exceptions.RetryAfterError):\n        logging.debug('Response returned a retry-after header, retrying')\n        retry_after = retry_args.exc.retry_after\n    else:\n        raise retry_args.exc\n    RebuildHttpConnections(retry_args.http)\n    logging.debug('Retrying request to url %s after exception %s',\n                  retry_args.http_request.url, retry_args.exc)\n    time.sleep(\n        retry_after or util.CalculateWaitForRetry(\n            retry_args.num_retries, max_wait=retry_args.max_retry_wait))", "docstring": "Exception handler for http failures.\n\nThis catches known failures and rebuilds the underlying HTTP connections.\n\nArgs:\nretry_args: An ExceptionRetryArgs tuple.", "source": "juraj-google-style"}
{"code": "def getObjective(self, name):\n        \n        return lock_and_call(\n            lambda: Objective(self._impl.getObjective(name)),\n            self._lock\n        )", "docstring": "Get the objective with the corresponding name.\n\nArgs:\nname: Name of the objective to be found.\n\nRaises:\nTypeError: if the specified objective does not exist.", "source": "juraj-google-style"}
{"code": "def _convert_to_dict(data):\n    if isinstance(data, dict):\n        return data\n    if (isinstance(data, list) or isinstance(data, tuple)):\n        if _all_correct_list(data):\n            return dict(data)\n        else:\n            data = zip(data[::2], data[1::2])\n            return dict(data)\n    else:\n        raise MetaParsingException(\"Can't decode provided metadata - unknown structure.\")", "docstring": "Convert `data` to dictionary.\n\nTries to get sense in multidimensional arrays.\n\nArgs:\ndata: List/dict/tuple of variable dimension.\n\nReturns:\ndict: If the data can be converted to dictionary.\n\nRaises:\nMetaParsingException: When the data are unconvertible to dict.", "source": "codesearchnet"}
{"code": "def file_md5(filename):\n    with zopen(filename, 'r') as f:\n        file_string = f.read()\n    try:\n        file_string = file_string.decode()\n    except AttributeError:\n        pass\n    return md5sum(file_string)", "docstring": "Generate the md5 checksum for a file\n\nArgs:\nfilename (Str): The file to be checksummed.\n\nReturns:\n(Str): The hex checksum\n\nNotes:\nIf the file is gzipped, the md5 checksum returned is\nfor the uncompressed ASCII file.", "source": "codesearchnet"}
{"code": "def load_architecture(self, name, arch_info, disassembler, translator):\n        \n        \n        self.name = name\n        self.arch_info = arch_info\n        self.disassembler = disassembler\n        self.ir_translator = translator\n\n        \n        self._setup_analysis_modules()", "docstring": "Translate to REIL instructions.\n\nArgs:\nname (str): Architecture's name.\narch_info (ArchitectureInformation): Architecture information object.\ndisassembler (Disassembler): Disassembler for the architecture.\ntranslator (Translator): Translator for the architecture.", "source": "juraj-google-style"}
{"code": "def get_pattern_actual_step(self, patternnumber):\n        \n        _checkPatternNumber(patternnumber)\n        \n        address = _calculateRegisterAddress('actualstep', patternnumber)\n        return self.read_register(address, 0)", "docstring": "Get the 'actual step' parameter for a given pattern.\n\nArgs:\npatternnumber (integer): 0-7\n\nReturns:\nThe 'actual step' parameter (int).", "source": "juraj-google-style"}
{"code": "def validate(self, corpus):\n        \n\n        overflow_segments = {}\n\n        for utterance in corpus.utterances.values():\n            utt_segments = self.validate_utterance(utterance)\n\n            if len(utt_segments) > 0:\n                overflow_segments[utterance.idx] = utt_segments\n\n        passed = len(overflow_segments) <= 0\n        info = {\n            'Label-List ID': self.label_list_idx,\n            'Threshold': str(self.threshold)\n        }\n\n        return LabelOverflowValidationResult(passed, overflow_segments, self.name(), info)", "docstring": "Perform the validation on the given corpus.\n\nArgs:\ncorpus (Corpus): The corpus to test/validate.\n\nReturns:\nInvalidUtterancesResult: Validation result.", "source": "juraj-google-style"}
{"code": "def add(self, layer, rebuild=True):\n    if not self._layers:\n        if getattr(layer, '_input_shape_arg', None) is not None:\n            self.add(InputLayer(shape=layer._input_shape_arg))\n    if hasattr(layer, '_keras_history'):\n        origin_layer = layer._keras_history[0]\n        if isinstance(origin_layer, InputLayer):\n            layer = origin_layer\n    if not isinstance(layer, Layer):\n        raise ValueError(f'Only instances of `keras.Layer` can be added to a Sequential model. Received: {layer} (of type {type(layer)})')\n    if not self._is_layer_name_unique(layer):\n        raise ValueError(f\"All layers added to a Sequential model should have unique names. Name '{layer.name}' is already the name of a layer in this model. Update the `name` argument to pass a unique name.\")\n    if isinstance(layer, InputLayer) and self._layers and isinstance(self._layers[0], InputLayer):\n        raise ValueError(f\"Sequential model '{self.name}' has already been configured to use input shape {self._layers[0].batch_shape}. You cannot add a different Input layer to it.\")\n    self._layers.append(layer)\n    if rebuild:\n        self._maybe_rebuild()\n    else:\n        self.built = False\n        self._functional = None", "docstring": "Adds a layer instance on top of the layer stack.\n\nArgs:\nlayer: layer instance.", "source": "github-repos"}
{"code": "def _test_streaming(self, with_attributes):\n    state_verifier = PipelineStateMatcher(PipelineState.RUNNING)\n    expected_messages = self.EXPECTED_OUTPUT_MESSAGES[self.runner_name]\n    if not with_attributes:\n        expected_messages = [pubsub_msg.data for pubsub_msg in expected_messages]\n    if self.runner_name == 'TestDirectRunner':\n        strip_attributes = None\n    else:\n        strip_attributes = [self.ID_LABEL, self.TIMESTAMP_ATTRIBUTE]\n    pubsub_msg_verifier = PubSubMessageMatcher(self.project, self.output_sub.name, expected_messages, timeout=MESSAGE_MATCHER_TIMEOUT_S, with_attributes=with_attributes, strip_attributes=strip_attributes)\n    extra_opts = {'input_subscription': self.input_sub.name, 'output_topic': self.output_topic.name, 'wait_until_finish_duration': TEST_PIPELINE_DURATION_MS, 'on_success_matcher': all_of(state_verifier, pubsub_msg_verifier)}\n    for msg in self.INPUT_MESSAGES[self.runner_name]:\n        self.pub_client.publish(self.input_topic.name, msg.data, **msg.attributes).result()\n    pubsub_it_pipeline.run_pipeline(argv=self.test_pipeline.get_full_options_as_args(**extra_opts), with_attributes=with_attributes, id_label=self.ID_LABEL, timestamp_attribute=self.TIMESTAMP_ATTRIBUTE)", "docstring": "Runs IT pipeline with message verifier.\n\nArgs:\nwith_attributes: False - Reads and writes message data only.\nTrue - Reads and writes message data and attributes. Also verifies\nid_label and timestamp_attribute features.", "source": "github-repos"}
{"code": "def process_tokens(self, tokens):\n    for (tok_type, token, (start_row, start_col), _, _) in tokens:\n        if (tok_type == tokenize.STRING):\n            self._process_string_token(token, start_row, start_col)", "docstring": "Process the token stream.\n\nThis is required to override the parent class' implementation.\n\nArgs:\ntokens: the tokens from the token stream to process.", "source": "codesearchnet"}
{"code": "def to_numbers(self, flatten: bool=True) -> Union[List[Union[int, float, str]], utils.Nestable[Union[int, float, str]]]:\n    if flatten:\n        decisions = [self.value] if self.value is not None else []\n        for c in self.children:\n            decisions.extend(c.to_numbers(flatten))\n        return decisions\n    elif self.value is None:\n        return [c.to_numbers(flatten) for c in self.children]\n    elif not self.children:\n        return self.value\n    elif len(self.children) == 1:\n        child = self.children[0].to_numbers(flatten)\n        if isinstance(child, tuple):\n            return tuple([self.value, list(child)])\n        else:\n            return (self.value, child)\n    else:\n        assert len(self.children) > 1\n        return (self.value, [c.to_numbers(flatten) for c in self.children])", "docstring": "Returns a (maybe) nested structure of numbers as decisions.\n\nArgs:\nflatten: If True, the hierarchy of the numbers will not be preserved.\nDecisions will be returned as a flat list in DFS order. Otherwise, a\nnestable structure of numbers will be returned.\n\nReturns:\nA flat list or a hierarchical structure of numbers as the decisions made\nfor each decision point.", "source": "github-repos"}
{"code": "def parse_done(self, buf: memoryview) -> Tuple[bool, memoryview]:\n        \n        match = self._pattern.match(buf)\n        if not match:\n            raise NotParseable(buf)\n        done = match.group(1).upper() == self.continuation\n        buf = buf[match.end(0):]\n        return done, buf", "docstring": "Parse the continuation line sent by the client to end the ``IDLE``\ncommand.\n\nArgs:\nbuf: The continuation line to parse.", "source": "juraj-google-style"}
{"code": "def export_model(model, model_type, export_dir, model_column_fn):\n  \n  wide_columns, deep_columns = model_column_fn()\n  if model_type == 'wide':\n    columns = wide_columns\n  elif model_type == 'deep':\n    columns = deep_columns\n  else:\n    columns = wide_columns + deep_columns\n  feature_spec = tf.feature_column.make_parse_example_spec(columns)\n  example_input_fn = (\n      tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec))\n  model.export_savedmodel(export_dir, example_input_fn,\n                          strip_default_attrs=True)", "docstring": "Export to SavedModel format.\n\nArgs:\nmodel: Estimator object\nmodel_type: string indicating model type. \"wide\", \"deep\" or \"wide_deep\"\nexport_dir: directory to export the model.\nmodel_column_fn: Function to generate model feature columns.", "source": "juraj-google-style"}
{"code": "def comments(self, case_id=None, variant_id=None, username=None):\n        \n        logger.debug(\"Looking for comments\")\n        comment_objs = self.query(Comment)\n\n        if case_id:\n            comment_objs = comment_objs.filter_by(case_id=case_id)\n\n        if variant_id:\n            comment_objs = comment_objs.filter_by(variant_id=variant_id)\n        elif case_id:\n            comment_objs = comment_objs.filter_by(variant_id=None)\n\n        return comment_objs", "docstring": "Return comments for a case or variant.\n\nArgs:\ncase_id (str): id for a related case\nvariant_id (Optional[str]): id for a related variant", "source": "juraj-google-style"}
{"code": "def put(self, url, params=None, data=None, files=None, **kwargs):\n        \n        return self.call_api(\n            \"PUT\",\n            url,\n            params=params,\n            data=data,\n            files=files,\n            **kwargs\n        )", "docstring": "Call the API with a PUT request.\n\nArgs:\nurl (str): Resource location relative to the base URL.\nparams (dict or None): Query-string parameters.\ndata (dict or None): Request body contents.\nfiles (dict or None: Files to be passed to the request.\n\nReturns:\nAn instance of ResultParser or ErrorParser.", "source": "juraj-google-style"}
{"code": "def _load_config_file(path):\n    with io.open(utils.abs_path(path), 'r', encoding='utf-8') as f:\n        conf = yaml.safe_load(f)\n        return conf", "docstring": "Loads a test config file.\n\nThe test config file has to be in YAML format.\n\nArgs:\npath: A string that is the full path to the config file, including the\nfile name.\n\nReturns:\nA dict that represents info in the config file.", "source": "github-repos"}
{"code": "def read_local_files(*file_paths: str) -> str:\n    \n\n    def _read_single_file(file_path):\n        with open(file_path) as f:\n            filename = os.path.splitext(file_path)[0]\n            title = f'{filename}\\n{\"=\" * len(filename)}'\n            return '\\n\\n'.join((title, f.read()))\n\n    return '\\n' + '\\n\\n'.join(map(_read_single_file, file_paths))", "docstring": "Reads one or more text files and returns them joined together.\nA title is automatically created based on the file name.\n\nArgs:\n*file_paths: list of files to aggregate\n\nReturns: content of files", "source": "juraj-google-style"}
{"code": "def distance_to_line(a, b, p):\n    \n    return distance(closest_point(a, b, p), p)", "docstring": "Closest distance between a line segment and a point\n\nArgs:\na ([float, float]): x and y coordinates. Line start\nb ([float, float]): x and y coordinates. Line end\np ([float, float]): x and y coordinates. Point to compute the distance\nReturns:\nfloat", "source": "juraj-google-style"}
{"code": "def apply_inverse(self, y, in_place=False):\n    return cho_solve(self._factor, y, overwrite_b=in_place)", "docstring": "r\"\"\"\nApply the inverse of the covariance matrix to the input by solving\n\n.. math::\n\nK\\,x = y\n\nArgs:\ny (ndarray[nsamples] or ndadrray[nsamples, nrhs]): The vector or\nmatrix :math:`y`.\nin_place (Optional[bool]): Should the data in ``y`` be overwritten\nwith the result :math:`x`? (default: ``False``)", "source": "codesearchnet"}
{"code": "def ParseLastVisitedRow(self, parser_mediator, query, row, cache=None, database=None, **unused_kwargs):\n    query_hash = hash(query)\n    hidden = self._GetRowValue(query_hash, row, 'hidden')\n    transition = self._GetRowValue(query_hash, row, 'transition')\n    visit_identifier = self._GetRowValue(query_hash, row, 'visit_id')\n    from_visit = self._GetRowValue(query_hash, row, 'from_visit')\n    event_data = ChromeHistoryPageVisitedEventData()\n    event_data.from_visit = self._GetUrl(from_visit, cache, database)\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.page_transition_type = (transition & self._PAGE_TRANSITION_CORE_MASK)\n    event_data.title = self._GetRowValue(query_hash, row, 'title')\n    event_data.typed_count = self._GetRowValue(query_hash, row, 'typed_count')\n    event_data.url = self._GetRowValue(query_hash, row, 'url')\n    event_data.url_hidden = (hidden == '1')\n    event_data.visit_source = self._GetVisitSource(visit_identifier, cache, database)\n    timestamp = self._GetRowValue(query_hash, row, 'visit_time')\n    date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a last visited row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.\ncache (SQLiteCache): cache which contains cached results from querying\nthe visits and urls tables.\ndatabase (Optional[SQLiteDatabase]): database.", "source": "codesearchnet"}
{"code": "def button_state(self):\n    if (self.type != EventType.POINTER_BUTTON):\n        raise AttributeError(_wrong_prop.format(self.type))\n    return self._libinput.libinput_event_pointer_get_button_state(self._handle)", "docstring": "The button state that triggered this event.\n\nFor pointer events that are not of type\n:attr:`~libinput.constant.EventType.POINTER_BUTTON`, this property\nraises :exc:`AttributeError`.\n\nReturns:\n~libinput.constant.ButtonState: The button state triggering this\nevent.\nRaises:\nAttributeError", "source": "codesearchnet"}
{"code": "def __register_services(api_name_version_map, api_config_registry):\n    generator = api_config.ApiConfigGenerator()\n    protorpc_services = []\n    for service_factories in api_name_version_map.itervalues():\n        service_classes = [service_factory.service_class for service_factory in service_factories]\n        config_dict = generator.get_config_dict(service_classes)\n        api_config_registry.register_backend(config_dict)\n        for service_factory in service_factories:\n            protorpc_class_name = service_factory.service_class.__name__\n            root = ('%s%s' % (service_factory.service_class.api_info.base_path, protorpc_class_name))\n            if any((((service_map[0] == root) or (service_map[1] == service_factory)) for service_map in protorpc_services)):\n                raise api_config.ApiConfigurationError((\"Can't reuse the same class in multiple APIs: %s\" % protorpc_class_name))\n            protorpc_services.append((root, service_factory))\n    return protorpc_services", "docstring": "Register & return a list of each URL and class that handles that URL.\n\nThis finds every service class in api_name_version_map, registers it with\nthe given ApiConfigRegistry, builds the URL for that class, and adds\nthe URL and its factory to a list that's returned.\n\nArgs:\napi_name_version_map: A mapping from (api name, api version) to a list of\nservice factories, as returned by __create_name_version_map.\napi_config_registry: The ApiConfigRegistry where service classes will\nbe registered.\n\nReturns:\nA list of (URL, service_factory) for each service class in\napi_name_version_map.\n\nRaises:\nApiConfigurationError: If a Service class appears more than once\nin api_name_version_map.  This could happen if one class is used to\nimplement multiple APIs.", "source": "codesearchnet"}
{"code": "def halo_exchange(x, blocks_dim, block_size_dim, halo_size, wrap=False):\n  \n  if halo_size == 0:\n    return x\n\n  block_size = block_size_dim.size\n  partial_size = halo_size % block_size\n  num_complete_blocks = halo_size \n  parts = [x]\n\n  for i in xrange(1, num_complete_blocks + 1):\n    parts = ([shift(x, i, blocks_dim, wrap)] + parts +\n             [shift(x, -i, blocks_dim, wrap)])\n  if partial_size > 0:\n    left_margin = mtf_slice(x, 0, partial_size, block_size_dim.name)\n    right_margin = mtf_slice(\n        x, block_size_dim.size - partial_size, partial_size,\n        block_size_dim.name)\n    parts = (\n        [shift(right_margin, num_complete_blocks + 1, blocks_dim, wrap)]\n        + parts +\n        [shift(left_margin, -(num_complete_blocks + 1), blocks_dim, wrap)])\n  return concat(parts, block_size_dim.name)", "docstring": "Concat each block with the margins of adjacent blocks.\n\nGet left and right blocks_dim and concatenate along block_size_dim.\n\nArgs:\nx: a Tensor.\nblocks_dim: a Dimension in x.shape\nblock_size_dim: a Dimension in x.shape\nhalo_size: an integer\nwrap: a boolean\n\nReturns:\na Tensor with the same shape as x, other than in block_size_dim, whose\nsize is increased by 2*halo_size.", "source": "juraj-google-style"}
{"code": "def plot_correlation(self, freq=None, title=None, figsize=(12, 6), **kwargs):\n    if (title is None):\n        title = self._get_default_plot_title(freq, 'Return Correlation Matrix')\n    rets = self._get_series(freq).to_returns().dropna()\n    return rets.plot_corr_heatmap(title=title, figsize=figsize, **kwargs)", "docstring": "Utility function to plot correlations.\n\nArgs:\n* freq (str): Pandas data frequency alias string\n* title (str): Plot title\n* figsize (tuple (x,y)): figure size\n* kwargs: passed to Pandas' plot_corr_heatmap function", "source": "codesearchnet"}
{"code": "def tag(self, name, formatter=None):\n    tag = Tag(name, formatter)\n    for tag_data in self._tags:\n        if (tag_data.name == name):\n            tag = tag_data\n            break\n    else:\n        self._tags.append(tag)\n    return tag", "docstring": "Return instance of Tag.\n\nArgs:\nname (str): The value for this tag.\nformatter (method, optional): A method that take a tag value and returns a\nformatted tag.\n\nReturns:\nobj: An instance of Tag.", "source": "codesearchnet"}
{"code": "def colored(cls, color, message):\n    return ((getattr(cls, color.upper()) + message) + cls.DEFAULT)", "docstring": "Small function to wrap a string around a color\n\nArgs:\ncolor (str): name of the color to wrap the string with, must be one\nof the class properties\nmessage (str): String to wrap with the color\n\nReturns:\nstr: the colored string", "source": "codesearchnet"}
{"code": "def get_vmss(access_token, subscription_id, resource_group, vmss_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,\n                        '?api-version=', COMP_API])\n    return do_get(endpoint, access_token)", "docstring": "Get virtual machine scale set details.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvmss_name (str): Name of the virtual machine scale set.\n\nReturns:\nHTTP response. JSON body of scale set properties.", "source": "juraj-google-style"}
{"code": "def _send_notification(self, handle, payload):\n        \n\n        self.bable.notify(\n            connection_handle=self._connection_handle,\n            attribute_handle=handle,\n            value=payload\n        )", "docstring": "Send a notification over BLE\nIt is executed in the baBLE working thread: should not be blocking.\n\nArgs:\nhandle (int): The handle to notify on\npayload (bytearray): The value to notify", "source": "juraj-google-style"}
{"code": "def get_timestamped_export_dir(export_dir_base):\n    attempts = 0\n    while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS:\n        timestamp = int(time.time())\n        result_dir = os.path.join(compat.as_bytes(export_dir_base), compat.as_bytes(str(timestamp)))\n        if not gfile.Exists(result_dir):\n            return result_dir\n        time.sleep(1)\n        attempts += 1\n        logging.warning('Directory {} already exists; retrying (attempt {}/{})'.format(compat.as_str(result_dir), attempts, MAX_DIRECTORY_CREATION_ATTEMPTS))\n    raise RuntimeError('Failed to obtain a unique export directory name after {} attempts.'.format(MAX_DIRECTORY_CREATION_ATTEMPTS))", "docstring": "Builds a path to a new subdirectory within the base directory.\n\nEach export is written into a new subdirectory named using the\ncurrent time.  This guarantees monotonically increasing version\nnumbers even across multiple runs of the pipeline.\nThe timestamp used is the number of seconds since epoch UTC.\n\nArgs:\nexport_dir_base: A string containing a directory to write the exported\ngraph and checkpoints.\nReturns:\nThe full path of the new subdirectory (which is not actually created yet).\n\nRaises:\nRuntimeError: if repeated attempts fail to obtain a unique timestamped\ndirectory name.", "source": "github-repos"}
{"code": "def _unpack(formatstring, packed):\n    _checkString(formatstring, description='formatstring', minlength=1)\n    _checkString(packed, description='packed string', minlength=1)\n    if (sys.version_info[0] > 2):\n        packed = bytes(packed, encoding='latin1')\n    try:\n        value = struct.unpack(formatstring, packed)[0]\n    except:\n        errortext = 'The received bytestring is probably wrong, as the bytestring-to-num conversion failed.'\n        errortext += ' Bytestring: {0!r} Struct format code is: {1}'\n        raise ValueError(errortext.format(packed, formatstring))\n    return value", "docstring": "Unpack a bytestring into a value.\n\nUses the built-in :mod:`struct` Python module.\n\nArgs:\n* formatstring (str): String for the packing. See the :mod:`struct` module for details.\n* packed (str): The bytestring to be unpacked.\n\nReturns:\nA value. The type depends on the formatstring.\n\nRaises:\nValueError\n\nNote that the :mod:`struct` module wants byte buffers for Python3,\nbut bytestrings for Python2. This is compensated for automatically.", "source": "codesearchnet"}
{"code": "def resolve_object_property(obj, path: str):\n    value = obj\n    for path_part in path.split('.'):\n        value = getattr(value, path_part)\n    return value", "docstring": "Resolves the value of a property on an object.\n\nIs able to resolve nested properties. For example,\na path can be specified:\n\n'other.beer.name'\n\nRaises:\nAttributeError:\nIn case the property could not be resolved.\n\nReturns:\nThe value of the specified property.", "source": "codesearchnet"}
{"code": "def _WriteAttributeContainer(self, attribute_container):\n    \n    if attribute_container.CONTAINER_TYPE == self._CONTAINER_TYPE_EVENT:\n      timestamp, serialized_data = self._serialized_event_heap.PopEvent()\n    else:\n      serialized_data = self._SerializeAttributeContainer(attribute_container)\n\n    if self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB:\n      compressed_data = zlib.compress(serialized_data)\n      serialized_data = sqlite3.Binary(compressed_data)\n    else:\n      compressed_data = ''\n\n    if self._storage_profiler:\n      self._storage_profiler.Sample(\n          'write', attribute_container.CONTAINER_TYPE, len(serialized_data),\n          len(compressed_data))\n\n    if attribute_container.CONTAINER_TYPE == self._CONTAINER_TYPE_EVENT:\n      query = 'INSERT INTO event (_timestamp, _data) VALUES (?, ?)'\n      self._cursor.execute(query, (timestamp, serialized_data))\n    else:\n      query = 'INSERT INTO {0:s} (_data) VALUES (?)'.format(\n          attribute_container.CONTAINER_TYPE)\n      self._cursor.execute(query, (serialized_data, ))\n\n    identifier = identifiers.SQLTableIdentifier(\n        attribute_container.CONTAINER_TYPE, self._cursor.lastrowid)\n    attribute_container.SetIdentifier(identifier)", "docstring": "Writes an attribute container.\n\nThe table for the container type must exist.\n\nArgs:\nattribute_container (AttributeContainer): attribute container.", "source": "juraj-google-style"}
{"code": "def load(f, _dict=dict, decoder=None):\n    if _ispath(f):\n        with io.open(_getpath(f), encoding='utf-8') as ffile:\n            return loads(ffile.read(), _dict, decoder)\n    elif isinstance(f, list):\n        from os import path as op\n        from warnings import warn\n        if (not [path for path in f if op.exists(path)]):\n            error_msg = 'Load expects a list to contain filenames only.'\n            error_msg += linesep\n            error_msg += 'The list needs to contain the path of at least one existing file.'\n            raise FNFError(error_msg)\n        if (decoder is None):\n            decoder = TomlDecoder()\n        d = decoder.get_empty_table()\n        for l in f:\n            if op.exists(l):\n                d.update(load(l, _dict, decoder))\n            else:\n                warn('Non-existent filename in list with at least one valid filename')\n        return d\n    else:\n        try:\n            return loads(f.read(), _dict, decoder)\n        except AttributeError:\n            raise TypeError('You can only load a file descriptor, filename or list')", "docstring": "Parses named file or files as toml and returns a dictionary\n\nArgs:\nf: Path to the file to open, array of files to read into single dict\nor a file descriptor\n_dict: (optional) Specifies the class of the returned toml dictionary\n\nReturns:\nParsed toml file represented as a dictionary\n\nRaises:\nTypeError -- When f is invalid type\nTomlDecodeError: Error while decoding toml\nIOError / FileNotFoundError -- When an array with no valid (existing)\n(Python 2 / Python 3)          file paths is passed", "source": "codesearchnet"}
{"code": "def _read_output(self, stream, callback, output_file):\n        \n        if (callback is None and output_file is None) or stream.closed:\n            return False\n\n        line = stream.readline()\n        if line:\n            if callback is not None:\n                callback(line.decode(),\n                         self._data, self._store, self._signal, self._context)\n\n            if output_file is not None:\n                output_file.write(line)\n\n            return True\n        else:\n            return False", "docstring": "Read the output of the process, executed the callback and save the output.\n\nArgs:\nstream: A file object pointing to the output stream that should be read.\ncallback(callable, None): A callback function that is called for each new\nline of output.\noutput_file: A file object to which the full output is written.\n\nReturns:\nbool: True if a line was read from the output, otherwise False.", "source": "juraj-google-style"}
{"code": "def hurst_compare_nvals(data, nvals=None):\n  \n  import matplotlib.pyplot as plt\n  data = np.asarray(data)\n  n_all = np.arange(2,len(data)+1)\n  dd_all = nolds.hurst_rs(data, nvals=n_all, debug_data=True, fit=\"poly\")\n  dd_def = nolds.hurst_rs(data, debug_data=True, fit=\"poly\")\n  n_def = np.round(np.exp(dd_def[1][0])).astype(\"int32\")\n  n_div = n_all[np.where(len(data) % n_all[:-1] == 0)]\n  dd_div = nolds.hurst_rs(data, nvals=n_div, debug_data=True, fit=\"poly\")\n  def corr(nvals):\n    return [np.log(nolds.expected_rs(n)) for n in nvals]\n\n\n  l_all = plt.plot(dd_all[1][0], dd_all[1][1] - corr(n_all), \"o\")\n  l_def = plt.plot(dd_def[1][0], dd_def[1][1] - corr(n_def), \"o\")\n  l_div = plt.plot(dd_div[1][0], dd_div[1][1] - corr(n_div), \"o\")\n  l_cst = []\n  t_cst = []\n\n  if nvals is not None:\n    dd_cst = nolds.hurst_rs(data, nvals=nvals, debug_data=True, fit=\"poly\")\n    l_cst = plt.plot(dd_cst[1][0], dd_cst[1][1] - corr(nvals), \"o\")\n    l_cst = l_cst\n    t_cst = [\"custom\"]\n  plt.xlabel(\"log(n)\")\n  plt.ylabel(\"log((R/S)_n - E[(R/S)_n])\")\n  plt.legend(l_all + l_def + l_div + l_cst, [\"all\", \"default\", \"divisors\"] + t_cst)\n  labeled_data = zip([dd_all[0], dd_def[0], dd_div[0]], [\"all\", \"def\", \"div\"])\n  for data, label in labeled_data:\n    print(\"%s: %.3f\" % (label, data))\n  if nvals is not None:\n    print(\"custom: %.3f\" % dd_cst[0])\n  plt.show()", "docstring": "Creates a plot that compares the results of different choices for nvals\nfor the function hurst_rs.\n\nArgs:\ndata (array-like of float):\nthe input data from which the hurst exponent should be estimated\n\nKwargs:\nnvals (array of int):\na manually selected value for the nvals parameter that should be plotted\nin comparison to the default choices", "source": "juraj-google-style"}
{"code": "def valUserCert(self, byts, cacerts=None):\n    cert = crypto.load_certificate(crypto.FILETYPE_PEM, byts)\n    if (cacerts is None):\n        cacerts = self.getCaCerts()\n    store = crypto.X509Store()\n    [store.add_cert(cacert) for cacert in cacerts]\n    ctx = crypto.X509StoreContext(store, cert)\n    ctx.verify_certificate()\n    return cert", "docstring": "Validate the PEM encoded x509 user certificate bytes and return it.\n\nArgs:\nbyts (bytes): The bytes for the User Certificate.\ncacerts (tuple): A tuple of OpenSSL.crypto.X509 CA Certificates.\n\nRaises:\nOpenSSL.crypto.X509StoreContextError: If the certificate is not valid.\n\nReturns:\nOpenSSL.crypto.X509: The certificate, if it is valid.", "source": "codesearchnet"}
{"code": "def add_arguments(self, parser):\n        \n        group = parser.add_mutually_exclusive_group(required=True)\n        group.add_argument('-l', '--list', nargs='?',\n                           type=str.lower, default='_',\n                           choices=['usb', 'ip'],\n                           help='list all the connected emulators')\n        group.add_argument('-s', '--supported', nargs=1,\n                           help='query whether a device is supported')\n        group.add_argument('-t', '--test', action='store_true',\n                           help='perform a self-test')\n        return None", "docstring": "Adds the arguments for the emulator command.\n\nArgs:\nself (EmulatorCommand): the ``EmulatorCommand`` instance\nparser (argparse.ArgumentParser): parser to add the commands to\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def download_apcor(self, uri):\n    local_file = os.path.basename(uri)\n    if os.access(local_file, os.F_OK):\n        fobj = open(local_file)\n    else:\n        fobj = storage.vofile(uri, view='data')\n        fobj.seek(0)\n    str = fobj.read()\n    fobj.close()\n    apcor_str = str\n    return ApcorData.from_string(apcor_str)", "docstring": "Downloads apcor data.\n\nArgs:\nuri: The URI of the apcor data file.\n\nReturns:\napcor: ossos.downloads.core.ApcorData", "source": "codesearchnet"}
{"code": "def read_local_files(*file_paths: str) -> str:\n\n    def _read_single_file(file_path):\n        with open(file_path) as f:\n            filename = os.path.splitext(file_path)[0]\n            title = f\n            return '\\n\\n'.join((title, f.read()))\n    return ('\\n' + '\\n\\n'.join(map(_read_single_file, file_paths)))", "docstring": "Reads one or more text files and returns them joined together.\nA title is automatically created based on the file name.\n\nArgs:\n*file_paths: list of files to aggregate\n\nReturns: content of files", "source": "codesearchnet"}
{"code": "def _zip_request_params(self, urls, query_params, data):\n    if (not isinstance(urls, list)):\n        urls = [urls]\n    if (not isinstance(query_params, list)):\n        query_params = [query_params]\n    if (not isinstance(data, list)):\n        data = [data]\n    url_count = len(urls)\n    query_param_count = len(query_params)\n    data_count = len(data)\n    max_count = max(url_count, query_param_count, data_count)\n    if ((max_count > url_count > 1) or (max_count > query_param_count > 1) or (max_count > data_count > 1)):\n        raise InvalidRequestError('Mismatched parameter count url_count:{0} query_param_count:{1} data_count:{2} max_count:{3}', url_count, query_param_count, data_count, max_count)\n    if (url_count < max_count):\n        urls = (urls * max_count)\n    if (query_param_count < max_count):\n        query_params = (query_params * max_count)\n    if (data_count < max_count):\n        data = (data * max_count)\n    return list(zip(urls, query_params, data))", "docstring": "Massages inputs and returns a list of 3-tuples zipping them up.\n\nThis is all the smarts behind deciding how many requests to issue.\nIt's fine for an input to have 0, 1, or a list of values.\nIf there are two inputs each with a list of values, the cardinality of those lists much match.\n\nArgs:\nurls - 1 string URL or a list of URLs\nquery_params - None, 1 dict, or a list of dicts\ndata - None, 1 dict or string, or a list of dicts or strings\nReturns:\nA list of 3-tuples (url, query_param, data)\nRaises:\nInvalidRequestError - if cardinality of lists does not match", "source": "codesearchnet"}
{"code": "def register(self, user_dict):\n    endpoint = os.path.join(self._config.get('napps', 'api'), 'users', '')\n    res = self.make_request(endpoint, method='POST', json=user_dict)\n    return res.content.decode('utf-8')", "docstring": "Send an user_dict to NApps server using POST request.\n\nArgs:\nuser_dict(dict): Dictionary with user attributes.\n\nReturns:\nresult(string): Return the response of Napps server.", "source": "codesearchnet"}
{"code": "def _set_details(self, content):\n    try:\n        self.details = str(content)\n    except UnicodeEncodeError:\n        if (sys.version_info < (3, 0)):\n            self.details = unicode(content)\n        else:\n            logging.error('Unable to decode \"%s\" in Py3, encoding in utf-8.', content)\n            self.details = content.encode('utf-8')", "docstring": "Sets the `details` field.\n\nArgs:\ncontent: the content to extract details from.", "source": "codesearchnet"}
{"code": "def _get_req_fp(self, op):\n\t\t\n\t\tif(op):\n\t\t\top = op.lower()\n\t\t\tif op == 'get':\n\t\t\t\treturn requests.get, None\n\t\t\tif op == 'put':\n\t\t\t\treturn requests.put, {'Content-Type': 'application/x-www-form-urlencoded'}\n\t\t\tif op == 'post':\n\t\t\t\treturn requests.post, {'Content-Type': 'application/json'}\n\t\t\tif op == 'delete':\n\t\t\t\treturn requests.delete, None\n\t\telse:\n\t\t\traise NotImplementedError('Operation {} is not supported!'.format(op))", "docstring": "Decisions on what verb to use and content headers happen here\nArgs:\nop \t\t\ta string specifying a http verb", "source": "juraj-google-style"}
{"code": "def _handle_message_for_stream(self, stream_transport, message, timeout):\n    if (message.command not in ('OKAY', 'CLSE', 'WRTE')):\n        raise usb_exceptions.AdbProtocolError('%s received unexpected message: %s', self, message)\n    if (message.arg1 == stream_transport.local_id):\n        if (message.command == 'WRTE'):\n            if (not stream_transport.remote_id):\n                raise usb_exceptions.AdbProtocolError('%s received WRTE before OKAY/CLSE: %s', stream_transport, message)\n            self.transport.write_message(adb_message.AdbMessage('OKAY', stream_transport.local_id, stream_transport.remote_id), timeout)\n        elif (message.command == 'CLSE'):\n            self.close_stream_transport(stream_transport, timeout)\n        return message\n    else:\n        with self._stream_transport_map_lock:\n            dest_transport = self._stream_transport_map.get(message.arg1)\n        if dest_transport:\n            if (message.command == 'CLSE'):\n                self.close_stream_transport(dest_transport, timeout)\n            dest_transport.enqueue_message(message, timeout)\n        else:\n            _LOG.warning('Received message for unknown local-id: %s', message)", "docstring": "Handle an incoming message, check if it's for the given stream.\n\nIf the message is not for the stream, then add it to the appropriate\nmessage queue.\n\nArgs:\nstream_transport: AdbStreamTransport currently waiting on a message.\nmessage: Message to check and handle.\ntimeout: Timeout to use for the operation, should be an instance of\ntimeouts.PolledTimeout.\n\nReturns:\nThe message read if it was for this stream, None otherwise.\n\nRaises:\nAdbProtocolError: If we receive an unexpected message type.", "source": "codesearchnet"}
{"code": "def ParseDict(js_dict, message, ignore_unknown_fields=False):\n  \n  parser = _Parser(ignore_unknown_fields)\n  parser.ConvertMessage(js_dict, message)\n  return message", "docstring": "Parses a JSON dictionary representation into a message.\n\nArgs:\njs_dict: Dict representation of a JSON message.\nmessage: A protocol buffer message to merge into.\nignore_unknown_fields: If True, do not raise errors for unknown fields.\n\nReturns:\nThe same message passed as argument.", "source": "juraj-google-style"}
{"code": "def GetZipInfo(self):\n    if (not self._zip_info):\n        location = getattr(self.path_spec, 'location', None)\n        if (location is None):\n            raise errors.PathSpecError('Path specification missing location.')\n        if (not location.startswith(self._file_system.LOCATION_ROOT)):\n            raise errors.PathSpecError('Invalid location in path specification.')\n        if (len(location) == 1):\n            return None\n        zip_file = self._file_system.GetZipFile()\n        try:\n            self._zip_info = zip_file.getinfo(location[1:])\n        except KeyError:\n            pass\n    return self._zip_info", "docstring": "Retrieves the ZIP info object.\n\nReturns:\nzipfile.ZipInfo: a ZIP info object or None if not available.\n\nRaises:\nPathSpecError: if the path specification is incorrect.", "source": "codesearchnet"}
{"code": "def stop(self, name: str) -> None:\n    if (not self._timing):\n        return\n    now = get_now_utc_pendulum()\n    if (not self._stack):\n        raise AssertionError('MultiTimer.stop() when nothing running')\n    if (self._stack[(- 1)] != name):\n        raise AssertionError('MultiTimer.stop({}) when {} is running'.format(repr(name), repr(self._stack[(- 1)])))\n    self._totaldurations[name] += (now - self._starttimes[name])\n    self._stack.pop()\n    if self._stack:\n        last = self._stack[(- 1)]\n        self._starttimes[last] = now", "docstring": "Stop a named timer.\n\nArgs:\nname: timer to stop", "source": "codesearchnet"}
{"code": "def word_list(sowpods=False, start=\"\", end=\"\"):\n    \n\n    location = os.path.join(\n        os.path.dirname(os.path.realpath(__file__)),\n        \"wordlists\",\n    )\n\n    if sowpods:\n        filename = \"sowpods.txt\"\n    else:\n        filename = \"twl.txt\"\n\n    filepath = os.path.join(location, filename)\n\n    with open(filepath) as wordfile:\n        for word in wordfile.readlines():\n            word = word.strip()\n            if start and end and word.startswith(start) and word.endswith(end):\n                yield word\n            elif start and word.startswith(start) and not end:\n                yield word\n            elif end and word.endswith(end) and not start:\n                yield word\n            elif not start and not end:\n                yield word", "docstring": "Opens the word list file.\n\nArgs:\nsowpods: a boolean to declare using the sowpods list or TWL (default)\nstart: a string of starting characters to find anagrams based on\nend: a string of ending characters to find anagrams based on\n\nYeilds:\na word at a time out of 178691 words for TWL, 267751 for sowpods. Much\nless if either start or end are used (filtering is applied here)", "source": "juraj-google-style"}
{"code": "def __init__(self, n, key=None, reverse=False):\n    self._n = n\n    self._key = key\n    self._reverse = reverse", "docstring": "Creates a per-key Top operation.\n\nThe arguments 'key' and 'reverse' may be passed as keyword arguments,\nand have the same meaning as for Python's sort functions.\n\nArgs:\nn: number of elements to extract from pcoll.\nkey: (optional) a mapping of elements to a comparable key, similar to\nthe key argument of Python's sorting methods.\nreverse: (optional) whether to order things smallest to largest, rather\nthan largest to smallest", "source": "github-repos"}
{"code": "def set_data(self, data):\n        \n        for name in self._fields:\n            setattr(self, name, data.get(name))\n        return self", "docstring": "Fills form with data\n\nArgs:\ndata (dict): Data to assign form fields.\n\nReturns:\nSelf. Form object.", "source": "juraj-google-style"}
{"code": "def Compile(self, filter_implementation):\n    self.attribute = self.swap_source.get(self.attribute, self.attribute)\n    arguments = [self.attribute]\n    op_str = self.operator.lower()\n    operator = filter_implementation.OPS.get(op_str, None)\n    if (not operator):\n        raise errors.ParseError('Unknown operator {0:s} provided.'.format(self.operator))\n    if (self.attribute == 'timestamp'):\n        args = []\n        for argument in self.args:\n            args.append(DateCompareObject(argument))\n        self.args = args\n    for argument in self.args:\n        if isinstance(argument, DateCompareObject):\n            if ('Less' in str(operator)):\n                TimeRangeCache.SetUpperTimestamp(argument.data)\n            else:\n                TimeRangeCache.SetLowerTimestamp(argument.data)\n    arguments.extend(self.args)\n    expander = filter_implementation.FILTERS['ValueExpander']\n    ops = operator(arguments=arguments, value_expander=expander)\n    if (not self.bool_value):\n        if hasattr(ops, 'FlipBool'):\n            ops.FlipBool()\n    return ops", "docstring": "Compiles the filter implementation.\n\nArgs:\nfilter_implementation: a filter object (instance of objectfilter.TODO).\n\nReturns:\nA filter operator (instance of TODO).\n\nRaises:\nParserError: if an unknown operator is provided.", "source": "codesearchnet"}
{"code": "def _unsorted_segment_N(data, segment_ids, num_segments):\n    num_segments = ops.convert_to_tensor(num_segments)\n    segment_ids_shape = array_ops.shape_internal(segment_ids)\n    ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype)\n    n = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments)\n    broadcastable_shape = array_ops.concat([num_segments[array_ops.newaxis], array_ops.ones([array_ops.rank(data) - array_ops.rank(segment_ids)], dtype=num_segments.dtype)], axis=0)\n    n = array_ops.reshape(n, broadcastable_shape)\n    return gen_math_ops.maximum(n, 1)", "docstring": "Helper function for unsorted_segment_mean/_sqrtN.\n\nComputes the number of segment entries with 0-entries set to 1 to allow\ndivision by N.\n\nArgs:\ndata: A `Tensor` with data that will be assembled in the output.\nsegment_ids: An integer tensor whose shape is a prefix of `data.shape`. The\nvalues must be in the range `[0, num_segments)`. The values are always\nvalidated to be in range on CPU, never validated on TPU/GPU.\nnum_segments: An integer scalar `Tensor`. The number of distinct segment\nIDs.\n\nReturns:\nA `Tensor` with the number of segment entries with 0-entries set to 1.", "source": "github-repos"}
{"code": "def set_epsilon(value):\n    global _EPSILON\n    _EPSILON = value", "docstring": "Set the value of the fuzz factor used in numeric expressions.\n\nArgs:\nvalue: float. New value of epsilon.\n\nExamples:\n>>> keras.config.epsilon()\n1e-07\n\n>>> keras.config.set_epsilon(1e-5)\n>>> keras.config.epsilon()\n1e-05\n\n>>> # Set it back to the default value.\n>>> keras.config.set_epsilon(1e-7)", "source": "github-repos"}
{"code": "async def addNode(self, name, valu, props=None):\n        \n\n        try:\n\n            fnib = self._getNodeFnib(name, valu)\n            retn = await self._addNodeFnib(fnib, props=props)\n            return retn\n\n        except asyncio.CancelledError:\n            raise\n\n        except Exception:\n\n            mesg = f'Error adding node: {name} {valu!r} {props!r}'\n            logger.exception(mesg)\n            if self.strict:\n                raise\n\n            return None", "docstring": "Add a node by form name and value with optional props.\n\nArgs:\nname (str): The form of node to add.\nvalu (obj): The value for the node.\nprops (dict): Optional secondary properties for the node.", "source": "juraj-google-style"}
{"code": "def are_equal_xml(a_xml, b_xml):\n    \n    a_dom = xml.dom.minidom.parseString(a_xml)\n    b_dom = xml.dom.minidom.parseString(b_xml)\n    return are_equal_elements(a_dom.documentElement, b_dom.documentElement)", "docstring": "Normalize and compare XML documents for equality. The document may or may not be\na DataONE type.\n\nArgs:\na_xml: str\nb_xml: str\nXML documents to compare for equality.\n\nReturns:\nbool: ``True`` if the XML documents are semantically equivalent.", "source": "juraj-google-style"}
{"code": "def coresight_configure(self, ir_pre=0, dr_pre=0, ir_post=0, dr_post=0, ir_len=0, perform_tif_init=True):\n    if (self.tif == enums.JLinkInterfaces.SWD):\n        res = self._dll.JLINKARM_CORESIGHT_Configure('')\n        if (res < 0):\n            raise errors.JLinkException(res)\n        return None\n    config_string = 'IRPre=%s;DRPre=%s;IRPost=%s;DRPost=%s;IRLenDevice=%s;'\n    config_string = (config_string % (ir_pre, dr_pre, ir_post, dr_post, ir_len))\n    if (not perform_tif_init):\n        config_string = (config_string + 'PerformTIFInit=0;')\n    res = self._dll.JLINKARM_CORESIGHT_Configure(config_string.encode())\n    if (res < 0):\n        raise errors.JLinkException(res)\n    return None", "docstring": "Prepares target and J-Link for CoreSight function usage.\n\nArgs:\nself (JLink): the ``JLink`` instance\nir_pre (int): sum of instruction register length of all JTAG devices\nin the JTAG chain, close to TDO than the actual one, that J-Link\nshall communicate with\ndr_pre (int): number of JTAG devices in the JTAG chain, closer to TDO\nthan the actual one, that J-Link shall communicate with\nir_post (int): sum of instruction register length of all JTAG devices\nin the JTAG chain, following the actual one, that J-Link shall\ncommunicate with\ndr_post (int): Number of JTAG devices in the JTAG chain, following\nthe actual one, J-Link shall communicate with\nir_len (int): instruction register length of the actual device that\nJ-Link shall communicate with\nperform_tif_init (bool): if ``False``, then do not output switching\nsequence on completion\n\nReturns:\n``None``\n\nNote:\nThis must be called before calling ``coresight_read()`` or\n``coresight_write()``.", "source": "codesearchnet"}
{"code": "def FormatTypeSummaryTable(self, level_name, name_to_problist):\n    output = []\n    output.append('<table>')\n    for classname in sorted(name_to_problist.keys()):\n        problist = name_to_problist[classname]\n        human_name = MaybePluralizeWord(problist.count, UnCamelCase(classname))\n        output.append(('<tr><td>%d</td><td><a href=\"\n    output.append('</table>\\n')\n    return ''.join(output)", "docstring": "Return an HTML table listing the number of problems by class name.\n\nArgs:\nlevel_name: string such as \"Error\" or \"Warning\"\nname_to_problist: dict mapping class name to an BoundedProblemList object\n\nReturns:\nHTML in a string", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n    \n    self.Health = channel.unary_unary(\n        '/health.Health/Health',\n        request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def escape(inp, quote='\"'):\n    \n    output = \"\"\n\n    for c in inp:\n        if c == quote:\n            output += '\\\\'\n\n        output += c\n\n    return output", "docstring": "Escape `quote` in string `inp`.\n\nExample usage::\n\n>>> escape('hello \"')\n'hello \\\\\"'\n>>> escape('hello \\\\\"')\n'hello \\\\\\\\\"'\n\nArgs:\ninp (str): String in which `quote` will be escaped.\nquote (char, default \"): Specify which character will be escaped.\n\nReturns:\nstr: Escaped string.", "source": "juraj-google-style"}
{"code": "def _create_controller_info_record(self, controller_module_name):\n    module = self._controller_modules[controller_module_name]\n    controller_info = None\n    try:\n        controller_info = module.get_info(copy.copy(self._controller_objects[controller_module_name]))\n    except AttributeError:\n        logging.warning('No optional debug info found for controller %s. To provide it, implement `get_info`.', controller_module_name)\n    try:\n        yaml.dump(controller_info)\n    except TypeError:\n        logging.warning('The info of controller %s in class \"%s\" is not YAML serializable! Coercing it to string.', controller_module_name, self._class_name)\n        controller_info = str(controller_info)\n    return records.ControllerInfoRecord(self._class_name, module.MOBLY_CONTROLLER_CONFIG_NAME, controller_info)", "docstring": "Creates controller info record for a particular controller type.\n\nInfo is retrieved from all the controller objects spawned from the\nspecified module, using the controller module's `get_info` function.\n\nArgs:\ncontroller_module_name: string, the name of the controller module\nto retrieve info from.\n\nReturns:\nA records.ControllerInfoRecord object.", "source": "github-repos"}
{"code": "def save_b26_file(filename, instruments=None, scripts=None, probes=None, overwrite=False, verbose=False):\n    \n\n    \n    if os.path.isfile(filename) and overwrite == False:\n        data_dict = load_b26_file(filename)\n    else:\n        data_dict = {}\n\n    if instruments is not None:\n        if 'instruments' in data_dict:\n            data_dict['instruments'].update(instruments)\n        else:\n            data_dict['instruments'] = instruments\n\n    if scripts is not None:\n        if 'scripts' in data_dict:\n            data_dict['scripts'].update(scripts)\n        else:\n            data_dict['scripts'] = scripts\n\n    if probes is not None:\n        probe_instruments = list(probes.keys())\n        if 'probes' in data_dict:\n            \n            probe_instruments= set(probe_instruments + list(data_dict['probes'].keys()))\n        else:\n            data_dict.update({'probes':{}})\n\n        for instrument in probe_instruments:\n            if instrument in data_dict['probes'] and instrument in probes:\n                \n                data_dict['probes'][instrument] = ','.join(set(data_dict['probes'][instrument].split(',') + probes[instrument].split(',')))\n            else:\n                data_dict['probes'].update(probes)\n\n    if verbose:\n        print(('writing ', filename))\n\n    if data_dict != {}:\n\n        \n        \n        \n        \n        \n        if verbose:\n            print(('filename', filename))\n            print(('exists', os.path.exists(os.path.dirname(filename))))\n\n        if os.path.exists(os.path.dirname(filename)) is False:\n            \n            os.makedirs(os.path.dirname(filename))\n\n        with open(filename, 'w') as outfile:\n            tmp = json.dump(data_dict, outfile, indent=4)", "docstring": "save instruments, scripts and probes as a json file\nArgs:\nfilename:\ninstruments:\nscripts:\nprobes: dictionary of the form {instrument_name : probe_1_of_intrument, probe_2_of_intrument, ...}\n\nReturns:", "source": "juraj-google-style"}
{"code": "def validate(self, corpus):\n        \n\n        passed = True\n        results = {}\n\n        for validator in self.validators:\n            sub_result = validator.validate(corpus)\n            results[validator.name()] = sub_result\n\n            if not sub_result.passed:\n                passed = False\n\n        return CombinedValidationResult(passed, results)", "docstring": "Perform validation on the given corpus.\n\nArgs:\ncorpus (Corpus): The corpus to test/validate.", "source": "juraj-google-style"}
{"code": "def GetSortedEvents(self, time_range=None):\n    \n    if not self._storage_file:\n      raise IOError('Unable to read from closed storage writer.')\n\n    return self._storage_file.GetSortedEvents(time_range=time_range)", "docstring": "Retrieves the events in increasing chronological order.\n\nThis includes all events written to the storage including those pending\nbeing flushed (written) to the storage.\n\nArgs:\ntime_range (Optional[TimeRange]): time range used to filter events\nthat fall in a specific period.\n\nReturns:\ngenerator(EventObject): event generator.\n\nRaises:\nIOError: when the storage writer is closed.\nOSError: when the storage writer is closed.", "source": "juraj-google-style"}
{"code": "def validate(request: Union[(Dict, List)], schema: dict) -> Union[(Dict, List)]:\n    jsonschema_validate(request, schema)\n    return request", "docstring": "Wraps jsonschema.validate, returning the same object passed in.\n\nArgs:\nrequest: The deserialized-from-json request.\nschema: The jsonschema schema to validate against.\n\nRaises:\njsonschema.ValidationError", "source": "codesearchnet"}
{"code": "def __init__(self, min_obs=10):\n        \n\n        self.min_obs = min_obs\n        self.label_encoder = LabelEncoder(min_obs)", "docstring": "Initialize the OneHotEncoder class object.\n\nArgs:\nmin_obs (int): minimum number of observation to create a dummy variable\nlabel_encoder (LabelEncoder): LabelEncoder that transofrm", "source": "juraj-google-style"}
{"code": "def removeRow(self, triggered):\n        \n        if triggered:\n            model = self.tableView.model()\n            selection = self.tableView.selectedIndexes()\n\n            rows = [index.row() for index in selection]\n            model.removeDataFrameRows(set(rows))\n            self.sender().setChecked(False)", "docstring": "Removes a row to the model.\n\nThis method is also a slot.\n\nArgs:\ntriggered (bool): If the corresponding button was\nactivated, the selected row will be removed\nfrom the model.", "source": "juraj-google-style"}
{"code": "def overlay(self, feature, color='Blue', opacity=0.6):\n    result = self.copy()\n    if (type(feature) == Table):\n        if ('feature' in feature):\n            feature = feature['feature']\n        else:\n            feature = Circle.map_table(feature)\n    if (type(feature) in [list, np.ndarray]):\n        for f in feature:\n            f._attrs['fill_color'] = color\n            f._attrs['fill_opacity'] = opacity\n            f.draw_on(result._folium_map)\n    elif (type(feature) == Map):\n        for i in range(len(feature._features)):\n            f = feature._features[i]\n            f._attrs['fill_color'] = color\n            f._attrs['fill_opacity'] = opacity\n            f.draw_on(result._folium_map)\n    elif (type(feature) == Region):\n        feature._attrs['fill_color'] = color\n        feature._attrs['fill_opacity'] = opacity\n        feature.draw_on(result._folium_map)\n    return result", "docstring": "Overlays ``feature`` on the map. Returns a new Map.\n\nArgs:\n``feature``: a ``Table`` of map features, a list of map features,\na Map, a Region, or a circle marker map table. The features will\nbe overlayed on the Map with specified ``color``.\n\n``color`` (``str``): Color of feature. Defaults to 'Blue'\n\n``opacity`` (``float``): Opacity of overlain feature. Defaults to\n0.6.\n\nReturns:\nA new ``Map`` with the overlain ``feature``.", "source": "codesearchnet"}
{"code": "def get_vulnerability(source, sink, triggers, lattice, cfg, interactive, blackbox_mapping):\n    nodes_in_constraint = [secondary for secondary in reversed(source.secondary_nodes) if lattice.in_constraint(secondary, sink.cfg_node)]\n    nodes_in_constraint.append(source.cfg_node)\n    if sink.trigger.all_arguments_propagate_taint:\n        sink_args = get_sink_args(sink.cfg_node)\n    else:\n        sink_args = get_sink_args_which_propagate(sink, sink.cfg_node.ast_node)\n    tainted_node_in_sink_arg = get_tainted_node_in_sink_args(sink_args, nodes_in_constraint)\n    if tainted_node_in_sink_arg:\n        vuln_deets = {'source': source.cfg_node, 'source_trigger_word': source.trigger_word, 'sink': sink.cfg_node, 'sink_trigger_word': sink.trigger_word}\n        sanitiser_nodes = set()\n        potential_sanitiser = None\n        if sink.sanitisers:\n            for sanitiser in sink.sanitisers:\n                for cfg_node in triggers.sanitiser_dict[sanitiser]:\n                    if isinstance(cfg_node, AssignmentNode):\n                        sanitiser_nodes.add(cfg_node)\n                    elif isinstance(cfg_node, IfNode):\n                        potential_sanitiser = cfg_node\n        def_use = build_def_use_chain(cfg.nodes, lattice)\n        for chain in get_vulnerability_chains(source.cfg_node, sink.cfg_node, def_use):\n            (vulnerability_type, interactive) = how_vulnerable(chain, blackbox_mapping, sanitiser_nodes, potential_sanitiser, cfg.blackbox_assignments, interactive, vuln_deets)\n            if (vulnerability_type == VulnerabilityType.FALSE):\n                continue\n            vuln_deets['reassignment_nodes'] = chain\n            return (vuln_factory(vulnerability_type)(**vuln_deets), interactive)\n    return (None, interactive)", "docstring": "Get vulnerability between source and sink if it exists.\n\nUses triggers to find sanitisers.\n\nNote: When a secondary node is in_constraint with the sink\nbut not the source, the secondary is a save_N_LHS\nnode made in process_function in expr_visitor.\n\nArgs:\nsource(TriggerNode): TriggerNode of the source.\nsink(TriggerNode): TriggerNode of the sink.\ntriggers(Triggers): Triggers of the CFG.\nlattice(Lattice): the lattice we're analysing.\ncfg(CFG): .blackbox_assignments used in is_unknown, .nodes used in build_def_use_chain\ninteractive(bool): determines if we ask the user about blackbox functions not in the mapping file.\nblackbox_mapping(dict): A map of blackbox functions containing whether or not they propagate taint.\n\nReturns:\nA Vulnerability if it exists, else None", "source": "codesearchnet"}
{"code": "def add(self, username, user_api, filename=None):\n    keys = API.__get_keys(filename)\n    user = user_api.find(username)[0]\n    distinguished_name = user.entry_dn\n    if ('ldapPublicKey' not in user.objectClass):\n        raise ldap3.core.exceptions.LDAPNoSuchAttributeResult(('LDAP Public Key Object Class not found. ' + 'Please ensure user was created correctly.'))\n    else:\n        for key in list(set(keys)):\n            print(key)\n            try:\n                SSHKey(key).parse()\n            except Exception as err:\n                raise err from None\n            else:\n                operation = {'sshPublicKey': [(ldap3.MODIFY_ADD, [key])]}\n                self.client.modify(distinguished_name, operation)", "docstring": "Add SSH public key to a user's profile.\n\nArgs:\nusername: Username to attach SSH public key to\nfilename: Filename containing keys to add (optional)\n\nRaises:\nldap3.core.exceptions.LDAPNoSuchAttributeResult:\nldapPublicKey isn't attached to objectClass", "source": "codesearchnet"}
{"code": "def add_update_resources(self, resources, ignore_datasetid=False):\n    if (not isinstance(resources, list)):\n        raise HDXError('Resources should be a list!')\n    for resource in resources:\n        self.add_update_resource(resource, ignore_datasetid)", "docstring": "Add new or update existing resources with new metadata to the dataset\n\nArgs:\nresources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries\nignore_datasetid (bool): Whether to ignore dataset id in the resource. Defaults to False.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def mv(src, dst):\n    \n    if not exists(src):\n        raise File404(src)\n\n    try:\n        shutil.move(src, dst)\n    except Exception as e:\n        raise IOError(str(e))", "docstring": "Move a file or directory.\n\nIf the destination already exists, this will attempt to overwrite\nit.\n\nArguments:\n\nsrc (string): path to the source file or directory.\ndst (string): path to the destination file or directory.\n\nRaises:\n\nFile404: if source does not exist.\nIOError: in case of error.", "source": "juraj-google-style"}
{"code": "def AnalyzeClient(self, client):\n    \n\n    client_id = self._ClientIdFromURN(client.urn)\n\n    \n    \n    \n    \n    \n    \n\n    keywords = [self._NormalizeKeyword(client_id), \".\"]\n\n    def TryAppend(prefix, keyword):\n      precondition.AssertType(prefix, Text)\n      if keyword:\n        keyword_string = self._NormalizeKeyword(Text(keyword))\n        keywords.append(keyword_string)\n        if prefix:\n          keywords.append(prefix + \":\" + keyword_string)\n\n    def TryAppendPrefixes(prefix, keyword, delimiter):\n      if keyword is None:\n        return 0\n      TryAppend(prefix, keyword)\n      segments = keyword.split(delimiter)\n      for i in range(1, len(segments)):\n        TryAppend(prefix, delimiter.join(segments[0:i]))\n      return len(segments)\n\n    def TryAppendIP(ip):\n      TryAppend(\"ip\", ip)\n      \n      if TryAppendPrefixes(\"ip\", str(ip), \".\") == 4:\n        return\n      \n      TryAppendPrefixes(\"ip\", str(ip), \":\")\n\n    def TryAppendMac(mac):\n      TryAppend(\"mac\", mac)\n      if len(mac) == 12:\n        \n        \n        TryAppend(\"mac\", \":\".join([mac[i:i + 2] for i in range(0, 12, 2)]))\n\n    s = client.Schema\n    TryAppend(\"host\", client.Get(s.HOSTNAME))\n    TryAppendPrefixes(\"host\", client.Get(s.HOSTNAME), \"-\")\n    TryAppend(\"host\", client.Get(s.FQDN))\n    TryAppendPrefixes(\"host\", client.Get(s.FQDN), \".\")\n    TryAppend(\"\", client.Get(s.SYSTEM))\n    TryAppend(\"\", client.Get(s.UNAME))\n    TryAppend(\"\", client.Get(s.OS_RELEASE))\n    TryAppend(\"\", client.Get(s.OS_VERSION))\n    TryAppend(\"\", client.Get(s.KERNEL))\n    TryAppend(\"\", client.Get(s.ARCH))\n\n    kb = client.Get(s.KNOWLEDGE_BASE)\n    if kb:\n      for user in kb.users:\n        TryAppend(\"user\", user.username)\n        TryAppend(\"\", user.full_name)\n        if user.full_name:\n          for name in user.full_name.split():\n            \n            \n            \n            TryAppend(\"\", name.strip(\"\\\"'()\"))\n\n    for username in client.Get(s.USERNAMES, []):\n      TryAppend(\"user\", username)\n\n    for interface in client.Get(s.INTERFACES, []):\n      if interface.mac_address:\n        TryAppendMac(interface.mac_address.human_readable_address)\n      for ip in interface.GetIPAddresses():\n        TryAppendIP(ip)\n\n    \n    \n    if client.Get(s.MAC_ADDRESS):\n      for mac in str(client.Get(s.MAC_ADDRESS)).split(\"\\n\"):\n        TryAppendMac(mac)\n    ip_list = client.Get(s.HOST_IPS, \"\")\n    for ip in str(ip_list).split(\"\\n\"):\n      TryAppendIP(ip)\n\n    client_info = client.Get(s.CLIENT_INFO)\n    if client_info:\n      TryAppend(\"client\", client_info.client_name)\n      TryAppend(\"client\", client_info.client_version)\n      if client_info.labels:\n        for label in client_info.labels:\n          TryAppend(\"label\", label)\n\n    for label in client.GetLabelsNames():\n      TryAppend(\"label\", label)\n\n    return client_id, keywords", "docstring": "Finds the client_id and keywords for a client.\n\nArgs:\nclient: A VFSGRRClient record to find keywords for.\n\nReturns:\nA tuple (client_id, keywords) where client_id is the client identifier and\nkeywords is a list of keywords related to client.", "source": "juraj-google-style"}
{"code": "def request_json(link, outfile, force_rerun_flag, outdir=None):\n    \n    if not outdir:\n        outdir = ''\n    outfile = op.join(outdir, outfile)\n\n    if force_rerun(flag=force_rerun_flag, outfile=outfile):\n        text_raw = requests.get(link)\n        my_dict = text_raw.json()\n        with open(outfile, 'w') as f:\n            json.dump(my_dict, f)\n\n        log.debug('Loaded and saved {} to {}'.format(link, outfile))\n    else:\n        with open(outfile, 'r') as f:\n            my_dict = json.load(f)\n        log.debug('Loaded {}'.format(outfile))\n\n    return my_dict", "docstring": "Download a file in JSON format from a web request\n\nArgs:\nlink: Link to web request\noutfile: Name of output file\noutdir: Directory of output file\nforce_rerun_flag: If true, redownload the file\n\nReturns:\ndict: contents of the JSON request", "source": "juraj-google-style"}
{"code": "def delete(self, vid):\n    command = ('no vlan %s' % vid)\n    return (self.configure(command) if isvlan(vid) else False)", "docstring": "Deletes a VLAN from the running configuration\n\nArgs:\nvid (str): The VLAN ID to delete\n\nReturns:\nTrue if the operation was successful otherwise False", "source": "codesearchnet"}
{"code": "def get_student_certificates(self, username, course_ids=None):\n        \n        \n        if course_ids is None:\n            enrollments_client = CourseEnrollments(self.requester, self.base_url)\n            enrollments = enrollments_client.get_student_enrollments()\n            course_ids = list(enrollments.get_enrolled_course_ids())\n\n        all_certificates = []\n        for course_id in course_ids:\n            try:\n                all_certificates.append(self.get_student_certificate(username, course_id))\n            except HTTPError as error:\n                if error.response.status_code >= 500:\n                    raise\n\n        return Certificates(all_certificates)", "docstring": "Returns an Certificates object with the user certificates\n\nArgs:\nusername (str): an edx user's username\ncourse_ids (list): a list of edX course ids.\n\nReturns:\nCertificates: object representing the student certificates for a course", "source": "juraj-google-style"}
{"code": "def read(file_path):\n    \n    \n    actual_file_path = os.path.expanduser(file_path)\n    with open(actual_file_path, 'r') as f:\n        lines = f.readlines()\n    \n    \n    gmt = []\n    \n    \n    for line_num, line in enumerate(lines):\n        \n        fields = line.split('\\t')\n\n        assert len(fields) > 2, (\n            \"Each line must have at least 3 tab-delimited items. \" +\n            \"line_num: {}, fields: {}\").format(line_num, fields)\n        \n        \n        fields[-1] = fields[-1].rstrip()\n        \n        \n        entries = fields[2:]\n        \n        \n        entries = [x for x in entries if x]\n\n        assert len(set(entries)) == len(entries), (\n            \"There should not be duplicate entries for the same set. \" +\n            \"line_num: {}, entries: {}\").format(line_num, entries)\n\n        \n        line_dict = {SET_IDENTIFIER_FIELD: fields[0],\n                     SET_DESC_FIELD: fields[1],\n                     SET_MEMBERS_FIELD: entries}\n        gmt.append(line_dict)\n\n    verify_gmt_integrity(gmt)\n\n    return gmt", "docstring": "Read a gmt file at the path specified by file_path.\n\nArgs:\nfile_path (string): path to gmt file\n\nReturns:\ngmt (GMT object): list of dicts, where each dict corresponds to one\nline of the GMT file", "source": "juraj-google-style"}
{"code": "def _create_extension(o, otype, fqdn, pmodule):\n    import types\n    xdict = {'__acornext__': o, '__doc__': o.__doc__}\n    if (otype == 'classes'):\n        classname = o.__name__\n        try:\n            if (fqdn in _explicit_subclasses):\n                xclass = eval(_explicit_subclasses[fqdn])\n                xclass.__acornext__ = o\n            else:\n                xclass = type(classname, (o,), xdict)\n            xclass.__module__ = o.__module__\n            return xclass\n        except TypeError:\n            _final_objs.append(id(o))\n            return o\n    elif ((otype in ['functions', 'descriptors', 'unknowns']) or ((otype == 'builtins') and (isinstance(o, types.BuiltinFunctionType) or isinstance(o, types.BuiltinMethodType)))):\n\n        def xwrapper(*args, **kwargs):\n            try:\n                return o(*args, **kwargs)\n            except:\n                targs = list(map(type, args))\n                kargs = list(kwargs.keys())\n                msg.err('xwrapper: {}({}, {})'.format(o, targs, kargs), 2)\n                pass\n        for (attr, val) in xdict.items():\n            setattr(xwrapper, attr, val)\n        failed = False\n        setattr(xwrapper, '__getattribute__', _safe_getattr(xwrapper))\n        failed = (not _update_attrs(xwrapper, o, ['__call__']))\n        if ((otype in ['descriptors', 'unknowns']) and inspect.ismodule(pmodule)):\n            if hasattr(o, '__objclass__'):\n                setattr(xwrapper, '__module__', pmodule.__name__)\n            elif (hasattr(o, '__class__') and (o.__class__ is not None)):\n                setattr(xwrapper, '__module__', pmodule.__name__)\n        if (not failed):\n            return xwrapper", "docstring": "Creates an extension object to represent `o` that can have attributes\nset, but which behaves identically to the given object.\n\nArgs:\no: object to create an extension for; no checks are performed to see if\nextension is actually required.\notype (str): object types; one of [\"classes\", \"functions\", \"methods\",\n\"modules\"].\nfqdn (str): fully qualified name of the package that the object belongs\nto.\npmodule: the parent module (or class) that `o` belongs to; used for setting\nthe special __module__ attribute.", "source": "codesearchnet"}
{"code": "def parse_arguments(argv):\n    parser = argparse.ArgumentParser(description='online-clustering')\n    parser.add_argument('-m', '--mode', help='Mode to run pipeline in.', choices=['local', 'cloud'], default='local')\n    parser.add_argument('-p', '--project', help='GCP project to run pipeline on.', default=cfg.PROJECT_ID)\n    args, _ = parser.parse_known_args(args=argv)\n    return args", "docstring": "Parses the arguments passed to the command line and returns them as an object\n\nArgs:\nargv: The arguments passed to the command line.\n\nReturns:\nThe arguments that are being passed in.", "source": "github-repos"}
{"code": "def recursive_create_dir_v2(path):\n    _pywrap_file_io.RecursivelyCreateDir(compat.path_to_bytes(path))", "docstring": "Creates a directory and all parent/intermediate directories.\n\nIt succeeds if path already exists and is writable.\n\nArgs:\npath: string, name of the directory to be created\n\nRaises:\nerrors.OpError: If the operation fails.", "source": "github-repos"}
{"code": "def add_candidate_peer_endpoints(self, peer_endpoints):\n        \n        if self._topology:\n            self._topology.add_candidate_peer_endpoints(peer_endpoints)\n        else:\n            LOGGER.debug(\"Could not add peer endpoints to topology. \"\n                         \"ConnectionManager does not exist.\")", "docstring": "Adds candidate endpoints to the list of endpoints to\nattempt to peer with.\n\nArgs:\npeer_endpoints ([str]): A list of public uri's which the\nvalidator can attempt to peer with.", "source": "juraj-google-style"}
{"code": "def parent_nodes(self):\n    node_deps = []\n    for kt in self.arguments.keras_tensors:\n        op = kt._keras_history.operation\n        node_index = kt._keras_history.node_index\n        if op is not None:\n            node_deps.append(op._inbound_nodes[node_index])\n    return node_deps", "docstring": "The parent `Node`s.\n\nReturns:\nall the `Node`s whose output this node immediately depends on.", "source": "github-repos"}
{"code": "def sample(self, samples=[], bounds=None, **sample_values):\n    if util.config.future_deprecations:\n        self.param.warning('The HoloMap.sample method is deprecated, for equivalent functionality use HoloMap.apply.sample().collapse().')\n    dims = self.last.ndims\n    if (isinstance(samples, tuple) or np.isscalar(samples)):\n        if (dims == 1):\n            xlim = self.last.range(0)\n            (lower, upper) = ((xlim[0], xlim[1]) if (bounds is None) else bounds)\n            edges = np.linspace(lower, upper, (samples + 1))\n            linsamples = [((l + u) / 2.0) for (l, u) in zip(edges[:(- 1)], edges[1:])]\n        elif (dims == 2):\n            (rows, cols) = samples\n            if bounds:\n                (l, b, r, t) = bounds\n            else:\n                (l, r) = self.last.range(0)\n                (b, t) = self.last.range(1)\n            xedges = np.linspace(l, r, (cols + 1))\n            yedges = np.linspace(b, t, (rows + 1))\n            xsamples = [((lx + ux) / 2.0) for (lx, ux) in zip(xedges[:(- 1)], xedges[1:])]\n            ysamples = [((ly + uy) / 2.0) for (ly, uy) in zip(yedges[:(- 1)], yedges[1:])]\n            (Y, X) = np.meshgrid(ysamples, xsamples)\n            linsamples = list(zip(X.flat, Y.flat))\n        else:\n            raise NotImplementedError('Regular sampling not implemented for elements with more than two dimensions.')\n        samples = list(util.unique_iterator(self.last.closest(linsamples)))\n    sampled = self.clone([(k, view.sample(samples, closest=False, **sample_values)) for (k, view) in self.data.items()])\n    from ..element import Table\n    return Table(sampled.collapse())", "docstring": "Samples element values at supplied coordinates.\n\nAllows sampling of element with a list of coordinates matching\nthe key dimensions, returning a new object containing just the\nselected samples. Supports multiple signatures:\n\nSampling with a list of coordinates, e.g.:\n\nds.sample([(0, 0), (0.1, 0.2), ...])\n\nSampling a range or grid of coordinates, e.g.:\n\n1D: ds.sample(3)\n2D: ds.sample((3, 3))\n\nSampling by keyword, e.g.:\n\nds.sample(x=0)\n\nArgs:\nsamples: List of nd-coordinates to sample\nbounds: Bounds of the region to sample\nDefined as two-tuple for 1D sampling and four-tuple\nfor 2D sampling.\nclosest: Whether to snap to closest coordinates\n**kwargs: Coordinates specified as keyword pairs\nKeywords of dimensions and scalar coordinates\n\nReturns:\nA Table containing the sampled coordinates", "source": "codesearchnet"}
{"code": "def set_geometry(self, geom):\n        \n        (self.thet0, self.thet, self.phi0, self.phi, self.alpha, \n            self.beta) = geom", "docstring": "A convenience function to set the geometry variables.\n\nArgs:\ngeom: A tuple containing (thet0, thet, phi0, phi, alpha, beta).\nSee the Scatterer class documentation for a description of these\nangles.", "source": "juraj-google-style"}
{"code": "def __init__(\n      self, resolver_context, file_system, path_spec, is_root=False,\n      is_virtual=False):\n    \n    encrypted_stream = resolver.Resolver.OpenFileObject(\n        path_spec, resolver_context=resolver_context)\n    if not encrypted_stream:\n      raise errors.BackEndError(\n          'Unable to open encrypted stream: {0:s}.'.format(\n              self.path_spec.comparable))\n\n    super(EncryptedStreamFileEntry, self).__init__(\n        resolver_context, file_system, path_spec, is_root=is_root,\n        is_virtual=is_virtual)\n    self._encrypted_stream = encrypted_stream\n    self.entry_type = definitions.FILE_ENTRY_TYPE_FILE", "docstring": "Initializes a file entry.\n\nArgs:\nresolver_context (Context): resolver context.\nfile_system (FileSystem): file system.\npath_spec (PathSpec): path specification.\nis_root (Optional[bool]): True if the file entry is the root file entry\nof the corresponding file system.\nis_virtual (Optional[bool]): True if the file entry is a virtual file\n\nRaises:\nBackEndError: when the encrypted stream is missing.", "source": "juraj-google-style"}
{"code": "def pretty_description(description, wrap_at=None, indent=0):\n    if ((wrap_at is None) or (wrap_at < 0)):\n        width = console_width(default=79)\n        if (wrap_at is None):\n            wrap_at = width\n        else:\n            wrap_at += width\n    indent = (' ' * indent)\n    text_wrapper = textwrap.TextWrapper(width=wrap_at, replace_whitespace=False, initial_indent=indent, subsequent_indent=indent)\n    new_desc = []\n    for line in description.split('\\n'):\n        new_desc.append(line.replace('\\n', '').strip())\n    while (not new_desc[0]):\n        del new_desc[0]\n    while (not new_desc[(- 1)]):\n        del new_desc[(- 1)]\n    separators = [i for (i, l) in enumerate(new_desc) if (not l)]\n    paragraphs = []\n    if separators:\n        (start, end) = (0, separators[0])\n        paragraphs.append(new_desc[start:end])\n        for i in range((len(separators) - 1)):\n            start = (end + 1)\n            end = separators[(i + 1)]\n            paragraphs.append(new_desc[start:end])\n        paragraphs.append(new_desc[(end + 1):])\n        return '\\n\\n'.join((text_wrapper.fill(' '.join(p)) for p in paragraphs))\n    return text_wrapper.fill(' '.join(new_desc))", "docstring": "Return a pretty formatted string given some text.\n\nArgs:\ndescription (str): string to format.\nwrap_at (int): maximum length of a line.\nindent (int): level of indentation.\n\nReturns:\nstr: pretty formatted string.", "source": "codesearchnet"}
{"code": "def zero_state(self, batch_size, dtype=LayersConfig.tf_dtype):\n        \n        shape = self.shape\n        num_features = self.num_features\n        \n        zeros = tf.zeros([batch_size, shape[0], shape[1], num_features * 2], dtype=dtype)\n        return zeros", "docstring": "Return zero-filled state tensor(s).\nArgs:\nbatch_size: int, float, or unit Tensor representing the batch size.\nReturns:\ntensor of shape '[batch_size x shape[0] x shape[1] x num_features]\nfilled with zeros", "source": "juraj-google-style"}
{"code": "def _has_valid_tensors(self):\n    return self._input_tensors is not None and self._output_tensors", "docstring": "Checks if the input and output tensors have been initialized.\n\nReturns:\nBool.", "source": "github-repos"}
{"code": "def expired(self, cfgstr=None, product=None):\n    products = self._rectify_products(product)\n    certificate = self._get_certificate(cfgstr=cfgstr)\n    if (certificate is None):\n        is_expired = True\n    elif (products is None):\n        is_expired = False\n    elif (not all(map(os.path.exists, products))):\n        is_expired = True\n    else:\n        product_file_hash = self._product_file_hash(products)\n        certificate_hash = certificate.get('product_file_hash', None)\n        is_expired = (product_file_hash != certificate_hash)\n    return is_expired", "docstring": "Check to see if a previously existing stamp is still valid and if the\nexpected result of that computation still exists.\n\nArgs:\ncfgstr (str, optional): override the default cfgstr if specified\nproduct (PathLike or Sequence[PathLike], optional): override the\ndefault product if specified", "source": "codesearchnet"}
{"code": "def unpackVersion(ver):\n    \n    major = (ver >> 20 * 2) & mask20\n    minor = (ver >> 20) & mask20\n    patch = ver & mask20\n    return major, minor, patch", "docstring": "Unpack a system normalized integer representing a softare version into its component parts.\n\nArgs:\nver (int): System normalized integer value to unpack into a tuple.\n\nReturns:\n(int, int, int): A tuple containing the major, minor and patch values shifted out of the integer.", "source": "juraj-google-style"}
{"code": "def obj_from_dict(info, parent=None, default_args=None):\n    assert (isinstance(info, dict) and ('type' in info))\n    assert (isinstance(default_args, dict) or (default_args is None))\n    args = info.copy()\n    obj_type = args.pop('type')\n    if mmcv.is_str(obj_type):\n        if (parent is not None):\n            obj_type = getattr(parent, obj_type)\n        else:\n            obj_type = sys.modules[obj_type]\n    elif (not isinstance(obj_type, type)):\n        raise TypeError('type must be a str or valid type, but got {}'.format(type(obj_type)))\n    if (default_args is not None):\n        for (name, value) in default_args.items():\n            args.setdefault(name, value)\n    return obj_type(**args)", "docstring": "Initialize an object from dict.\n\nThe dict must contain the key \"type\", which indicates the object type, it\ncan be either a string or type, such as \"list\" or ``list``. Remaining\nfields are treated as the arguments for constructing the object.\n\nArgs:\ninfo (dict): Object types and arguments.\nparent (:class:`module`): Module which may containing expected object\nclasses.\ndefault_args (dict, optional): Default arguments for initializing the\nobject.\n\nReturns:\nany type: Object built from the dict.", "source": "codesearchnet"}
{"code": "def opt(parser: Union[(Parser, Sequence[Input])]) -> OptionalParser:\n    if isinstance(parser, str):\n        parser = lit(parser)\n    return OptionalParser(parser)", "docstring": "Optionally match a parser.\n\nAn ``OptionalParser`` attempts to match ``parser``. If it succeeds, it\nreturns a list of length one with the value returned by the parser as the\nonly element. If it fails, it returns an empty list.\n\nArgs:\nparser: Parser or literal", "source": "codesearchnet"}
{"code": "def sun_rise_set(latitude, longitude, date, mode='rise', timezone=0, zenith=None):\n    if (not date):\n        date = datetime.date.today()\n    zenith = ZENITH[zenith]\n    n = (date - datetime.date((date.year - 1), 12, 31)).days\n    lng_hour = (longitude / 15)\n    if (mode == 'rise'):\n        t = (n + ((6 - lng_hour) / 24))\n    elif (mode == 'set'):\n        t = (n + ((18 - lng_hour) / 24))\n    else:\n        raise ValueError(('Unknown mode value %r' % mode))\n    m = ((0.9856 * t) - 3.289)\n    l = (((m + (1.916 * math.sin(math.radians(m)))) + (0.02 * math.sin((2 * math.radians(m))))) + 282.634)\n    l = (abs(l) % 360)\n    ra = math.degrees(math.atan((0.91764 * math.tan(math.radians(l)))))\n    l_quandrant = (math.floor((l / 90)) * 90)\n    ra_quandrant = (math.floor((ra / 90)) * 90)\n    ra = (ra + (l_quandrant - ra_quandrant))\n    ra = (ra / 15)\n    sin_dec = (0.39782 * math.sin(math.radians(l)))\n    cos_dec = math.cos(math.asin(sin_dec))\n    cos_h = ((math.radians(zenith) - (sin_dec * math.sin(math.radians(latitude)))) / (cos_dec * math.cos(math.radians(latitude))))\n    if (cos_h > 1):\n        return None\n    elif (cos_h < (- 1)):\n        return None\n    if (mode == 'rise'):\n        h = (360 - math.degrees(math.acos(cos_h)))\n    else:\n        h = math.degrees(math.acos(cos_h))\n    h = (h / 15)\n    t = (((h + ra) - (0.06571 * t)) - 6.622)\n    utc = (t - lng_hour)\n    local_t = (utc + (timezone / 60))\n    if (local_t < 0):\n        local_t += 24\n    elif (local_t > 23):\n        local_t -= 24\n    hour = int(local_t)\n    if (hour == 0):\n        minute = int((60 * local_t))\n    else:\n        minute = int((60 * (local_t % hour)))\n    if (minute < 0):\n        minute += 60\n    return datetime.time(hour, minute)", "docstring": "Calculate sunrise or sunset for a specific location.\n\nThis function calculates the time sunrise or sunset, or optionally the\nbeginning or end of a specified twilight period.\n\nSource::\n\nAlmanac for Computers, 1990\npublished by Nautical Almanac Office\nUnited States Naval Observatory\nWashington, DC 20392\n\nArgs:\nlatitude (float): Location's latitude\nlongitude (float): Location's longitude\ndate (datetime.date): Calculate rise or set for given date\nmode (str): Which time to calculate\ntimezone (int): Offset from UTC in minutes\nzenith (str): Calculate rise/set events, or twilight times\n\nReturns:\ndatetime.time or None: The time for the given event in the specified\ntimezone, or ``None`` if the event doesn't occur on the given date\n\nRaises:\nValueError: Unknown value for ``mode``", "source": "codesearchnet"}
{"code": "def Uniform(cls, low: 'TensorFluent', high: 'TensorFluent', batch_size: Optional[int]=None) -> Tuple[(Distribution, 'TensorFluent')]:\n    if (low.scope != high.scope):\n        raise ValueError('Uniform distribution: parameters must have same scope!')\n    dist = tf.distributions.Uniform(low.tensor, high.tensor)\n    batch = (low.batch or high.batch)\n    if ((not batch) and (batch_size is not None)):\n        t = dist.sample(batch_size)\n        batch = True\n    else:\n        t = dist.sample()\n    scope = low.scope.as_list()\n    return (dist, TensorFluent(t, scope, batch=batch))", "docstring": "Returns a TensorFluent for the Uniform sampling op with given low and high parameters.\n\nArgs:\nlow: The low parameter of the Uniform distribution.\nhigh: The high parameter of the Uniform distribution.\nbatch_size: The size of the batch (optional).\n\nReturns:\nThe Uniform distribution and a TensorFluent sample drawn from the distribution.\n\nRaises:\nValueError: If parameters do not have the same scope.", "source": "codesearchnet"}
{"code": "def FindEndOfExpressionInLine(line, startpos, stack):\n    for i in xrange(startpos, len(line)):\n        char = line[i]\n        if (char in '([{'):\n            stack.append(char)\n        elif (char == '<'):\n            if ((i > 0) and (line[(i - 1)] == '<')):\n                if (stack and (stack[(- 1)] == '<')):\n                    stack.pop()\n                    if (not stack):\n                        return ((- 1), None)\n            elif ((i > 0) and Search('\\\\boperator\\\\s*$', line[0:i])):\n                continue\n            else:\n                stack.append('<')\n        elif (char in ')]}'):\n            while (stack and (stack[(- 1)] == '<')):\n                stack.pop()\n            if (not stack):\n                return ((- 1), None)\n            if (((stack[(- 1)] == '(') and (char == ')')) or ((stack[(- 1)] == '[') and (char == ']')) or ((stack[(- 1)] == '{') and (char == '}'))):\n                stack.pop()\n                if (not stack):\n                    return ((i + 1), None)\n            else:\n                return ((- 1), None)\n        elif (char == '>'):\n            if ((i > 0) and ((line[(i - 1)] == '-') or Search('\\\\boperator\\\\s*$', line[0:(i - 1)]))):\n                continue\n            if stack:\n                if (stack[(- 1)] == '<'):\n                    stack.pop()\n                    if (not stack):\n                        return ((i + 1), None)\n        elif (char == ';'):\n            while (stack and (stack[(- 1)] == '<')):\n                stack.pop()\n            if (not stack):\n                return ((- 1), None)\n    return ((- 1), stack)", "docstring": "Find the position just after the end of current parenthesized expression.\n\nArgs:\nline: a CleansedLines line.\nstartpos: start searching at this position.\nstack: nesting stack at startpos.\n\nReturns:\nOn finding matching end: (index just after matching end, None)\nOn finding an unclosed expression: (-1, None)\nOtherwise: (-1, new stack at end of this line)", "source": "codesearchnet"}
{"code": "def manual_shuffle(self, axis, shuffle_func, lengths):\n        \n        if axis:\n            partitions = self.row_partitions\n        else:\n            partitions = self.column_partitions\n        func = self.preprocess_func(shuffle_func)\n        result = np.array([part.shuffle(func, lengths) for part in partitions])\n        return self.__constructor__(result) if axis else self.__constructor__(result.T)", "docstring": "Shuffle the partitions based on the `shuffle_func`.\n\nArgs:\naxis: The axis to shuffle across.\nshuffle_func: The function to apply before splitting the result.\nlengths: The length of each partition to split the result into.\n\nReturns:\nA new BaseFrameManager object, the type of object that called this.", "source": "juraj-google-style"}
{"code": "def get_config():\n    return context().config", "docstring": "Get the ConfigProto of Context.\n\nReturns:\nThe ConfigProto of Context.", "source": "github-repos"}
{"code": "def segment(self, eps, min_time):\n    new_segments = []\n    for segment in self.segments:\n        segmented = segment.segment(eps, min_time)\n        for seg in segmented:\n            new_segments.append(Segment(seg))\n    self.segments = new_segments\n    return self", "docstring": "In-place segmentation of segments\n\nSpatio-temporal segmentation of each segment\nThe number of segments may increse after this step\n\nReturns:\nThis track", "source": "codesearchnet"}
{"code": "def _flat_structure(self):\n    return {'output_shapes': self._flat_shapes, 'output_types': self._flat_types}", "docstring": "Helper for setting `output_shapes` and `output_types` attrs of an op.\n\nMost dataset op constructors expect `output_shapes` and `output_types`\narguments that represent the flattened structure of an element. This helper\nfunction generates these attrs as a keyword argument dictionary, allowing\n`Dataset._variant_tensor` implementations to pass `**self._flat_structure`\nto the op constructor.\n\nReturns:\nA dictionary of keyword arguments that can be passed to a dataset op\nconstructor.", "source": "github-repos"}
{"code": "def open_workshared_model(self, model_path, central=False,\n                              detached=False, keep_worksets=True, audit=False,\n                              show_workset_config=1):\n        \n        if detached:\n            if audit:\n                if keep_worksets:\n                    self._add_entry(\n                        templates.CENTRAL_OPEN_DETACH_AUDIT\n                                 .format(model_path=model_path,\n                                         workset_config=show_workset_config)\n                                 )\n                else:\n                    self._add_entry(\n                        templates.CENTRAL_OPEN_DETACH_AUDIT_DISCARD\n                                 .format(model_path=model_path,\n                                         workset_config=show_workset_config)\n                                 )\n            else:\n                if keep_worksets:\n                    self._add_entry(\n                        templates.CENTRAL_OPEN_DETACH\n                                 .format(model_path=model_path,\n                                         workset_config=show_workset_config)\n                                 )\n                else:\n                    self._add_entry(\n                        templates.CENTRAL_OPEN_DETACH_DISCARD\n                                 .format(model_path=model_path,\n                                         workset_config=show_workset_config)\n                                 )\n        elif central:\n            if audit:\n                self._add_entry(\n                    templates.CENTRAL_OPEN_AUDIT\n                             .format(model_path=model_path,\n                                     workset_config=show_workset_config)\n                             )\n            else:\n                self._add_entry(\n                    templates.CENTRAL_OPEN\n                             .format(model_path=model_path,\n                                     workset_config=show_workset_config)\n                             )\n        else:\n            if audit:\n                self._add_entry(\n                    templates.WORKSHARED_OPEN_AUDIT\n                             .format(model_path=model_path,\n                                     workset_config=show_workset_config)\n                             )\n            else:\n                self._add_entry(\n                    templates.WORKSHARED_OPEN\n                             .format(model_path=model_path,\n                                     workset_config=show_workset_config)\n                             )", "docstring": "Append a open workshared model entry to the journal.\n\nThis instructs Revit to open a workshared model.\n\nArgs:\nmodel_path (str): full path to workshared model\ncentral (bool): if True opens central model and not local\ndetached (bool): if True opens a detached model\nkeep_worksets (bool): if True keeps worksets when detaching\naudit (bool): if True audits the model when opening", "source": "juraj-google-style"}
{"code": "def CmdVersion(self):\n    self.logger.debug('CmdVersion')\n    response = self.InternalSendApdu(apdu.CommandApdu(0, apdu.CMD_VERSION, 0, 0))\n    if (not response.IsSuccess()):\n        raise errors.ApduError(response.sw1, response.sw2)\n    return response.body", "docstring": "Obtain the version of the device and test transport format.\n\nObtains the version of the device and determines whether to use ISO\n7816-4 or the U2f variant.  This function should be called at least once\nbefore CmdAuthenticate or CmdRegister to make sure the object is using the\nproper transport for the device.\n\nReturns:\nThe version of the U2F protocol in use.", "source": "codesearchnet"}
{"code": "def dbInsert(self, def_buf, raw_a, raw_b):\n        \n        self.dbExec(self.sqlInsert(def_buf, raw_a, raw_b))", "docstring": "Call overridden dbExec() with built insert statement.\nArgs:\ndef_buf (SerialBlock): Block of read buffer fields to write.\nraw_a (str): Hex string of raw A read.\nraw_b (str): Hex string of raw B read or empty.", "source": "juraj-google-style"}
{"code": "def load(png_filename):\n    png_filename = os.path.expanduser(png_filename)\n    try:\n        img = Image.open(png_filename)\n    except Exception as e:\n        raise ValueError('Could not load file {0} for conversion.'.format(png_filename))\n        raise\n    return numpy.array(img)", "docstring": "Import a png file into a numpy array.\n\nArguments:\npng_filename (str): A string filename of a png datafile\n\nReturns:\nA numpy array with data from the png file", "source": "codesearchnet"}
{"code": "def WritePreprocessingInformation(self, knowledge_base):\n    \n    self._RaiseIfNotWritable()\n\n    if self._storage_type != definitions.STORAGE_TYPE_SESSION:\n      raise IOError('Preprocessing information not supported by storage type.')", "docstring": "Writes preprocessing information.\n\nArgs:\nknowledge_base (KnowledgeBase): used to store the preprocessing\ninformation.\n\nRaises:\nIOError: if the storage type does not support writing preprocessing\ninformation or when the storage writer is closed.\nOSError: if the storage type does not support writing preprocessing\ninformation or when the storage writer is closed.", "source": "juraj-google-style"}
{"code": "def _CreateComplexTypeFromData(\n      self, elem_type, type_is_override, data, set_type_attrs):\n    \n    elem_arguments = dict(elem_type.elements)\n\n    \n    \n    instantiated_arguments = {\n        k: self._PackArgumentsHelper(elem_arguments[k], v, set_type_attrs)\n        for k, v in data if k != 'xsi_type'}\n    if set_type_attrs:\n      found_type_attr = next((e_name for e_name, _ in elem_type.elements\n                              if e_name.endswith('.Type')), None)\n      if found_type_attr and type_is_override:\n        instantiated_arguments[found_type_attr] = elem_type.qname.localname\n    \n    return elem_type(**instantiated_arguments)", "docstring": "Initialize a SOAP element with specific data.\n\nArgs:\nelem_type: The type of the element to create.\ntype_is_override: A boolean specifying if the type is being overridden.\ndata: The data to hydrate the type with.\nset_type_attrs: A boolean indicating whether or not attributes that end\nin .Type should be set. This is only necessary for batch job service.\n\nReturns:\nAn fully initialized SOAP element.", "source": "juraj-google-style"}
{"code": "def __setitem__(self, key, value):\n    \n    if not isinstance(key, tuple) or len(key) != 2:\n      raise IndexError('Invalid index: {0}'.format(key))\n    self._dim0.add(key[0])\n    self._dim1.add(key[1])\n    self._items[key] = value", "docstring": "Sets element of the matrix at position indexed by key.\n\nArgs:\nkey: tuple of (row_idx, column_idx)\nvalue: new value of the element of the matrix\n\nRaises:\nIndexError: if key is invalid.", "source": "juraj-google-style"}
{"code": "def ParseBookmarkFolderRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    title = self._GetRowValue(query_hash, row, 'title')\n    event_data = FirefoxPlacesBookmarkFolderEventData()\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.title = (title or 'N/A')\n    timestamp = self._GetRowValue(query_hash, row, 'dateAdded')\n    if timestamp:\n        date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ADDED)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n    timestamp = self._GetRowValue(query_hash, row, 'lastModified')\n    if timestamp:\n        date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a bookmark folder row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "codesearchnet"}
{"code": "def find_elements_by_name(self, name, update=False) -> Elements:\n        \n        return self.find_elements(by=By.NAME, value=name, update=update)", "docstring": "Finds multiple elements by name.\n\nArgs:\nname: The name of the elements to be found.\nupdate: If the interface has changed, this option should be True.\n\nReturns:\nA list with elements if any was found. An empty list if not.\n\nRaises:\nNoSuchElementException - If the element wasn't found.\n\nUsage:\nelements = driver.find_elements_by_name('foo')", "source": "juraj-google-style"}
{"code": "def Main(url, similarity_mode=\"TfIdfCosine\", similarity_limit=0.75):\n    \n    \n    web_scrape = WebScraping()\n    \n    web_scrape.readable_web_pdf = WebPDFReading()\n    \n    document = web_scrape.scrape(url)\n\n    if similarity_mode == \"TfIdfCosine\":\n        \n        \n        similarity_filter = TfIdfCosine()\n\n    elif similarity_mode == \"Dice\":\n        \n        \n        similarity_filter = Dice()\n\n    elif similarity_mode == \"Jaccard\":\n        \n        \n        similarity_filter = Jaccard()\n    \n    elif similarity_mode == \"Simpson\":\n        \n        \n        similarity_filter = Simpson()\n    \n    else:\n        raise ValueError()\n\n\n    \n    nlp_base = NlpBase()\n    \n    nlp_base.tokenizable_doc = MeCabTokenizer()\n    \n    similarity_filter.nlp_base = nlp_base\n    \n    similarity_filter.similarity_limit = similarity_limit\n\n    \n    auto_abstractor = AutoAbstractor()\n    \n    auto_abstractor.tokenizable_doc = MeCabTokenizer()\n    \n    abstractable_doc = TopNRankAbstractor()\n    \n    result_dict = auto_abstractor.summarize(document, abstractable_doc, similarity_filter)\n    \n    [print(result_dict[\"summarize_result\"][i]) for i in range(len(result_dict[\"summarize_result\"])) if i < 3]", "docstring": "Entry Point.\n\nArgs:\nurl:    PDF url.", "source": "juraj-google-style"}
{"code": "def encode(self, table: 'pd.DataFrame', query: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> List[int]:\n    encoded_inputs = self.encode_plus(table, query=query, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, **kwargs)\n    return encoded_inputs['input_ids']", "docstring": "Prepare a table and a string for the model. This method does not return token type IDs, attention masks, etc.\nwhich are necessary for the model to work correctly. Use that method if you want to build your processing on\nyour own, otherwise refer to `__call__`.\n\nArgs:\ntable (`pd.DataFrame`):\nTable containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas\ndataframe to convert it to string.\nquery (`str` or `List[str]`):\nQuestion related to a table to be encoded.", "source": "github-repos"}
{"code": "def fog(x, severity=1):\n  \n  c = [(1.5, 2), (2., 2), (2.5, 1.7), (2.5, 1.5), (3., 1.4)][severity - 1]\n  x = np.array(x) / 255.\n  max_val = x.max()\n  mapsize = 512\n  shape = x.shape\n  max_length = max(shape[0], shape[1])\n  if max_length > mapsize:\n    mapsize = 2**int(np.ceil(np.log2(float(max_length))))\n  tmp = plasma_fractal(mapsize=mapsize, wibbledecay=c[1])\n  tmp = tmp[:x.shape[0], :x.shape[1]]\n  tmp = tmp[..., np.newaxis]\n  x += c[0] * tmp\n  x_clip = np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255\n  return around_and_astype(x_clip)", "docstring": "Fog corruption to images.\n\nAdding fog to images. Fog is generated by diamond-square algorithm.\n\nArgs:\nx: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\nseverity: integer, severity of corruption.\n\nReturns:\nnumpy array, image with uint8 pixels in [0,255]. Added fog.", "source": "juraj-google-style"}
{"code": "def _get_bucket_statistics(self, bucket_name, bucket_region, storage_type, statistic, days):\n        \n\n        cw = self.session.client('cloudwatch', region_name=bucket_region)\n\n        \n\n        try:\n            obj_stats = cw.get_metric_statistics(\n                Namespace='AWS/S3',\n                MetricName=statistic,\n                Dimensions=[\n                    {\n                        'Name': 'StorageType',\n                        'Value': storage_type\n                    },\n                    {\n                        'Name': 'BucketName',\n                        'Value': bucket_name\n                    }\n                ],\n                Period=86400,\n                StartTime=datetime.utcnow() - timedelta(days=days),\n                EndTime=datetime.utcnow(),\n                Statistics=[\n                    'Average'\n                ]\n            )\n            stat_value = obj_stats['Datapoints'][0]['Average'] if obj_stats['Datapoints'] else 'NO_DATA'\n\n            return stat_value\n\n        except Exception as e:\n            self.log.error(\n                'Could not get bucket statistic for account {} / bucket {} / {}'.format(self.account.account_name,\n                                                                                        bucket_name, e))\n\n        finally:\n            del cw", "docstring": "Returns datapoints from cloudwatch for bucket statistics.\n\nArgs:\nbucket_name `(str)`: The name of the bucket\nstatistic `(str)`: The statistic you want to fetch from\ndays `(int)`: Sample period for the statistic", "source": "juraj-google-style"}
{"code": "def predict(self, X):\n        \n        logger.info('predicting ...')\n        ps = self.predict_raw(X)\n\n        return sigm(ps[:, 0])", "docstring": "Predict targets for a feature matrix.\n\nArgs:\nX (np.array of float): feature matrix for prediction\n\nReturns:\nprediction (np.array)", "source": "juraj-google-style"}
{"code": "def _setup_selection_range(self, f_start=None, f_stop=None, t_start=None, t_stop=None, init=False):\n    if (init is True):\n        if (t_start is None):\n            t_start = self.t_begin\n        if (t_stop is None):\n            t_stop = self.t_end\n        if (f_start is None):\n            f_start = self.f_begin\n        if (f_stop is None):\n            f_stop = self.f_end\n    else:\n        if (f_start is None):\n            f_start = self.f_start\n        if (f_stop is None):\n            f_stop = self.f_stop\n        if (t_start is None):\n            t_start = self.t_start\n        if (t_stop is None):\n            t_stop = self.t_stop\n    if ((t_stop >= 0) and (t_start >= 0) and (t_stop < t_start)):\n        (t_stop, t_start) = (t_start, t_stop)\n        logger.warning('Given t_stop < t_start, assuming reversed values.')\n    if (f_stop and f_start and (f_stop < f_start)):\n        (f_stop, f_start) = (f_start, f_stop)\n        logger.warning('Given f_stop < f_start, assuming reversed values.')\n    if ((t_start >= self.t_begin) and (t_start < self.t_end)):\n        self.t_start = int(t_start)\n    else:\n        if ((init is False) or (t_start != None)):\n            logger.warning(('Setting t_start = %f, since t_start not given or not valid.' % self.t_begin))\n        self.t_start = self.t_begin\n    if ((t_stop <= self.t_end) and (t_stop > self.t_begin)):\n        self.t_stop = int(t_stop)\n    else:\n        if ((init is False) or t_stop):\n            logger.warning(('Setting t_stop = %f, since t_stop not given or not valid.' % self.t_end))\n        self.t_stop = self.t_end\n    if ((f_start >= self.f_begin) and (f_start < self.f_end)):\n        self.f_start = f_start\n    else:\n        if ((init is False) or f_start):\n            logger.warning(('Setting f_start = %f, since f_start not given or not valid.' % self.f_begin))\n        self.f_start = self.f_begin\n    if ((f_stop <= self.f_end) and (f_stop > self.f_begin)):\n        self.f_stop = f_stop\n    else:\n        if ((init is False) or f_stop):\n            logger.warning(('Setting f_stop = %f, since f_stop not given or not valid.' % self.f_end))\n        self.f_stop = self.f_end\n    self.selection_shape = self._calc_selection_shape()", "docstring": "Making sure the selection if time and frequency are within the file limits.\n\nArgs:\ninit (bool): If call during __init__", "source": "codesearchnet"}
{"code": "def __init__(self, loss=None, predictions=None, metrics=None):\n    if loss is not None:\n        loss_dict = self._wrap_and_check_outputs(loss, self.LOSS_NAME)\n        self._loss = self._prefix_output_keys(loss_dict, self.LOSS_NAME)\n    if predictions is not None:\n        pred_dict = self._wrap_and_check_outputs(predictions, self.PREDICTIONS_NAME)\n        self._predictions = self._prefix_output_keys(pred_dict, self.PREDICTIONS_NAME)\n    if metrics is not None:\n        self._metrics = self._wrap_and_check_metrics(metrics)", "docstring": "Constructor for SupervisedOutput (ie, Train or Eval output).\n\nArgs:\nloss: dict of Tensors or single Tensor representing calculated loss.\npredictions: dict of Tensors or single Tensor representing model\npredictions.\nmetrics: Dict of metric results keyed by name.\nThe values of the dict can be one of the following:\n(1) instance of `Metric` class.\n(2) (metric_value, update_op) tuples, or a single tuple.\nmetric_value must be a Tensor, and update_op must be a Tensor or Op.\n\nRaises:\nValueError: if any of the outputs' dict keys are not strings or tuples of\nstrings or the values are not Tensors (or Operations in the case of\nupdate_op).", "source": "github-repos"}
{"code": "def reset(self):\n    self.output_file.remove()\n    self.log_file.remove()\n    self.stderr_file.remove()\n    self.start_lockfile.remove()\n    self.qerr_file.remove()\n    self.qout_file.remove()\n    if self.mpiabort_file.exists:\n        self.mpiabort_file.remove()\n    self.set_status(self.S_INIT, msg=('Reset on %s' % time.asctime()))\n    self.num_restarts = 0\n    self.set_qjob(None)\n    self.work.finalized = False\n    self.flow.finalized = False\n    return 0", "docstring": "Reset the task status. Mainly used if we made a silly mistake in the initial\nsetup of the queue manager and we want to fix it and rerun the task.\n\nReturns:\n0 on success, 1 if reset failed.", "source": "codesearchnet"}
{"code": "def write(self, writer: WriteStream) -> None:\n        \n        for part in self._raw:\n            writer.write(bytes(part))", "docstring": "Write the object to the stream, with one or more calls to\n:meth:`~pymap.bytes.WriteStream.write`.\n\nArgs:\nwriter: The output stream.", "source": "juraj-google-style"}
{"code": "def get_course_enrollments(self, enterprise_customer, days):\n        \n        return CourseEnrollment.objects.filter(\n            created__gt=datetime.datetime.now() - datetime.timedelta(days=days)\n        ).filter(\n            user_id__in=enterprise_customer.enterprise_customer_users.values_list('user_id', flat=True)\n        )", "docstring": "Get course enrollments for all the learners of given enterprise customer.\n\nArguments:\nenterprise_customer (EnterpriseCustomer): Include Course enrollments for learners\nof this enterprise customer.\ndays (int): Include course enrollment of this number of days.\n\nReturns:\n(list): A list of CourseEnrollment objects.", "source": "juraj-google-style"}
{"code": "def _orthogonal_kernel(self, ksize, cin, cout):\n    if cin > cout:\n        raise ValueError(f'The number of input channels (cin={cin}) cannot exceed the number of output channels (cout={cout}).')\n    orth = self._orthogonal_matrix(cout)[0:cin, :]\n    if ksize == 1:\n        return array_ops.expand_dims(array_ops.expand_dims(array_ops.expand_dims(orth, 0), 0), 0)\n    p = self._block_orth(self._symmetric_projection(cout), self._symmetric_projection(cout), self._symmetric_projection(cout))\n    for _ in range(ksize - 2):\n        temp = self._block_orth(self._symmetric_projection(cout), self._symmetric_projection(cout), self._symmetric_projection(cout))\n        p = self._matrix_conv(p, temp)\n    for i in range(ksize):\n        for j in range(ksize):\n            for k in range(ksize):\n                p[i, j, k] = math_ops.matmul(orth, p[i, j, k])\n    return self._dict_to_tensor(p, ksize, ksize, ksize)", "docstring": "Construct orthogonal kernel for convolution.\n\nArgs:\nksize: Kernel size.\ncin: Number of input channels.\ncout: Number of output channels.\n\nReturns:\nAn [ksize, ksize, ksize, cin, cout] orthogonal kernel.\nRaises:\nValueError: If cin > cout.", "source": "github-repos"}
{"code": "def __call__(self, data, dtype=None):\n        \n        if isinstance(data, np.ndarray):\n            if not data.size > 0:\n                raise ValueError(\"empty array can't be serialized\")\n            return _npy_serialize(data)\n\n        if isinstance(data, list):\n            if not len(data) > 0:\n                raise ValueError(\"empty array can't be serialized\")\n            return _npy_serialize(np.array(data, dtype))\n\n        \n        if hasattr(data, 'read'):\n            return data.read()\n\n        return _npy_serialize(np.array(data))", "docstring": "Serialize data into the request body in NPY format.\n\nArgs:\ndata (object): Data to be serialized. Can be a numpy array, list, file, or buffer.\n\nReturns:\nobject: NPY serialized data used for the request.", "source": "juraj-google-style"}
{"code": "def __enter__(self) -> TestCheckWriter:\n    if self._context_manager_active:\n        raise RuntimeError('Tried to enter two simultaneous `with` blocks managing the same `TestCheckWriter` instance.')\n    self._context_manager_active = True\n    assert self._worker_pool is None\n    if self._worker_count is None or self._worker_count > 1:\n        self._worker_pool = multiprocessing.pool.ThreadPool(self._worker_count)\n    return self", "docstring": "Context manager setup.\n\nInitializes `self._worker_pool` if `self._worker_count` is either `None`\nor an integer greater than 1. (In the former case, the worker count will\nbe inferred.)\n\nReturns:\n`self`.\n\nRaises:\nRuntimeError: If this instance already has an active context manager.", "source": "github-repos"}
{"code": "def shortest_undirected_path(self, physical_qubit1, physical_qubit2):\n        \n        try:\n            return nx.shortest_path(self.graph.to_undirected(as_view=True), source=physical_qubit1,\n                                    target=physical_qubit2)\n        except nx.exception.NetworkXNoPath:\n            raise CouplingError(\n                \"Nodes %s and %s are not connected\" % (str(physical_qubit1), str(physical_qubit2)))", "docstring": "Returns the shortest undirected path between physical_qubit1 and physical_qubit2.\nArgs:\nphysical_qubit1 (int): A physical qubit\nphysical_qubit2 (int): Another physical qubit\nReturns:\nList: The shortest undirected path\nRaises:\nCouplingError: When there is no path between physical_qubit1, physical_qubit2.", "source": "juraj-google-style"}
{"code": "def get_path(*args, module=a99):\n    \n\n    p = os.path.abspath(os.path.join(os.path.split(module.__file__)[0], *args))\n    return p", "docstring": "Returns full path to specified module\n\nArgs:\n*args: are added at the end of module path with os.path.join()\nmodule: Python module, defaults to a99\n\nReturns: path string\n\n>>> get_path()", "source": "juraj-google-style"}
{"code": "def build_graph(steps):\n    \n\n    graph = Graph()\n\n    for step in steps:\n        graph.add_step(step)\n\n    for step in steps:\n        for dep in step.requires:\n            graph.connect(step.name, dep)\n\n        for parent in step.required_by:\n            graph.connect(parent, step.name)\n\n    return graph", "docstring": "Builds a graph of steps.\nArgs:\nsteps (list): a list of :class:`Step` objects to execute.", "source": "juraj-google-style"}
{"code": "def Var(self, mu=None):\n        \n        if mu is None:\n            mu = self.Mean()\n\n        var = 0.0\n        for x, p in self.d.iteritems():\n            var += p * (x - mu) ** 2\n        return var", "docstring": "Computes the variance of a PMF.\n\nArgs:\nmu: the point around which the variance is computed;\nif omitted, computes the mean\n\nReturns:\nfloat variance", "source": "juraj-google-style"}
{"code": "def _EvaluateElementsDataSize(self, context):\n    elements_data_size = None\n    if self._data_type_definition.elements_data_size:\n        elements_data_size = self._data_type_definition.elements_data_size\n    elif self._data_type_definition.elements_data_size_expression:\n        expression = self._data_type_definition.elements_data_size_expression\n        namespace = {}\n        if (context and context.values):\n            namespace.update(context.values)\n        namespace['__builtins__'] = {}\n        try:\n            elements_data_size = eval(expression, namespace)\n        except Exception as exception:\n            raise errors.MappingError('Unable to determine elements data size with error: {0!s}'.format(exception))\n    if ((elements_data_size is None) or (elements_data_size < 0)):\n        raise errors.MappingError('Invalid elements data size: {0!s}'.format(elements_data_size))\n    return elements_data_size", "docstring": "Evaluates elements data size.\n\nArgs:\ncontext (DataTypeMapContext): data type map context.\n\nReturns:\nint: elements data size.\n\nRaises:\nMappingError: if the elements data size cannot be determined.", "source": "codesearchnet"}
{"code": "def bbox_scaling(bboxes, scale, clip_shape=None):\n    \n    if float(scale) == 1.0:\n        scaled_bboxes = bboxes.copy()\n    else:\n        w = bboxes[..., 2] - bboxes[..., 0] + 1\n        h = bboxes[..., 3] - bboxes[..., 1] + 1\n        dw = (w * (scale - 1)) * 0.5\n        dh = (h * (scale - 1)) * 0.5\n        scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1)\n    if clip_shape is not None:\n        return bbox_clip(scaled_bboxes, clip_shape)\n    else:\n        return scaled_bboxes", "docstring": "Scaling bboxes w.r.t the box center.\n\nArgs:\nbboxes (ndarray): Shape(..., 4).\nscale (float): Scaling factor.\nclip_shape (tuple, optional): If specified, bboxes that exceed the\nboundary will be clipped according to the given shape (h, w).\n\nReturns:\nndarray: Scaled bboxes.", "source": "juraj-google-style"}
{"code": "def get_cpu_type():\n    key = 'cpu_type'\n    out, err = run_shell_cmd(cmds_all[PLATFORM][key])\n    cpu_detected = out.split(b':')[1].strip()\n    if err and FLAGS.debug:\n        print('Error in detecting CPU type:\\n %s' % str(err))\n    return cpu_detected", "docstring": "Retrieves CPU (type) information.\n\nReturns:\nString that is name of the CPU.\ne.g. 'GenuineIntel'", "source": "github-repos"}
{"code": "def add_service(self, name, long_name, preregistered=False, notify=True):\n    if (name in self.services):\n        raise ArgumentError('Could not add service because the long_name is taken', long_name=long_name)\n    serv_state = states.ServiceState(name, long_name, preregistered)\n    service = {'state': serv_state, 'heartbeat_threshold': 600}\n    self.services[name] = service\n    if notify:\n        return self._notify_update(name, 'new_service', self.service_info(name))\n    return None", "docstring": "Add a service to the list of tracked services.\n\nArgs:\nname (string): A unique short service name for the service\nlong_name (string): A longer, user friendly name for the service\npreregistered (bool): Whether this service is an expected preregistered\nservice.\nnotify (bool): Send notifications about this service to all clients\n\nReturns:\nawaitable: If notify is True, an awaitable for the notifications.\n\nOtherwise None.", "source": "codesearchnet"}
{"code": "def t_seg(p1, p2, t, align=0):\n    \n    v = vector(p1, p2)\n    result = {\n        1: lambda a, b: (a, translate(b, scale(v, -t))),\n        2: lambda a, b: (translate(a, scale(v, t)), b),\n        0: lambda a, b: (translate(a, scale(v, t / 2)),\n                         translate(b, scale(v, -t / 2)))\n    }\n    return result[align](p1, p2)", "docstring": "trim segment\nArgs:\np1, p2: point(x, y)\nt: scaling factor (1 - trimed segment / original segment)\nalign: 1: trim p2, 2: trim p1, 0: both side\nReturn:\ntrimmed segment(p1, p2)", "source": "juraj-google-style"}
{"code": "def exit(self, code=None, msg=None):\n    if (code is None):\n        code = self.tcex.exit_code\n        if (code == 3):\n            self.tcex.log.info(u'Changing exit code from 3 to 0.')\n            code = 0\n    elif (code not in [0, 1]):\n        code = 1\n    self.tcex.exit(code, msg)", "docstring": "Playbook wrapper on TcEx exit method\n\nPlaybooks do not support partial failures so we change the exit method from 3 to 1 and call\nit a partial success instead.\n\nArgs:\ncode (Optional [integer]): The exit code value for the app.", "source": "codesearchnet"}
{"code": "def transform(self, column):\n        \n        self.check_data_type()\n\n        return pd.DataFrame({self.col_name: np.exp(column[self.col_name])})", "docstring": "Applies an exponential to values to turn them positive numbers.\n\nArgs:\ncolumn (pandas.DataFrame): Data to transform.\n\nReturns:\npd.DataFrame", "source": "juraj-google-style"}
{"code": "def _RemoveUsers(self, remove_users):\n    for username in remove_users:\n        self.utils.RemoveUser(username)\n        self.user_ssh_keys.pop(username, None)\n    self.invalid_users -= set(remove_users)", "docstring": "Deprovision Linux user accounts that do not appear in account metadata.\n\nArgs:\nremove_users: list, the username strings of the Linux accounts to remove.", "source": "codesearchnet"}
{"code": "def exit(self, code=None, msg=None):\n    if (msg is not None):\n        if ((code in [0, 3]) or ((code is None) and (self.exit_code in [0, 3]))):\n            self.log.info(msg)\n        else:\n            self.log.error(msg)\n        self.message_tc(msg)\n    if (code is None):\n        code = self.exit_code\n    elif (code in [0, 1, 3]):\n        pass\n    else:\n        self.log.error(u'Invalid exit code')\n        code = 1\n    if self.default_args.tc_aot_enabled:\n        self.playbook.aot_rpush(code)\n    self.log.info(u'Exit Code: {}'.format(code))\n    sys.exit(code)", "docstring": "Application exit method with proper exit code\n\nThe method will run the Python standard sys.exit() with the exit code\npreviously defined via :py:meth:`~tcex.tcex.TcEx.exit_code` or provided\nduring the call of this method.\n\nArgs:\ncode (Optional [integer]): The exit code value for the app.\nmsg (Optional [string]): A message to log and add to message tc output.", "source": "codesearchnet"}
{"code": "def __init__(self, base_core, input_shape=None, name=\"skip_connection_core\"):\n    \n    super(SkipConnectionCore, self).__init__(name=name)\n    self._base_core = base_core\n    self._input_shape = input_shape", "docstring": "Construct a SkipConnectionCore.\n\nArgs:\nbase_core: Base RNNCore to wrap.\ninput_shape: Shape of the input as tuple, excluding the batch size.\nname: Name of the module.", "source": "juraj-google-style"}
{"code": "def _inputs_valid(self, output_condition_uris):\n    if (len(self.inputs) != len(output_condition_uris)):\n        raise ValueError('Inputs and output_condition_uris must have the same count')\n    tx_dict = (self.tx_dict if self.tx_dict else self.to_dict())\n    tx_dict = Transaction._remove_signatures(tx_dict)\n    tx_dict['id'] = None\n    tx_serialized = Transaction._to_str(tx_dict)\n\n    def validate(i, output_condition_uri=None):\n        'Validate input against output condition URI'\n        return self._input_valid(self.inputs[i], self.operation, tx_serialized, output_condition_uri)\n    return all((validate(i, cond) for (i, cond) in enumerate(output_condition_uris)))", "docstring": "Validates an Input against a given set of Outputs.\n\nNote:\nThe number of `output_condition_uris` must be equal to the\nnumber of Inputs a Transaction has.\n\nArgs:\noutput_condition_uris (:obj:`list` of :obj:`str`): A list of\nOutputs to check the Inputs against.\n\nReturns:\nbool: If all Outputs are valid.", "source": "codesearchnet"}
{"code": "def console_set_color_control(\n    con: int, fore: Tuple[int, int, int], back: Tuple[int, int, int]\n) -> None:\n    \n    lib.TCOD_console_set_color_control(con, fore, back)", "docstring": "Configure :any:`color controls`.\n\nArgs:\ncon (int): :any:`Color control` constant to modify.\nfore (Union[Tuple[int, int, int], Sequence[int]]):\nAn (r, g, b) sequence or Color instance.\nback (Union[Tuple[int, int, int], Sequence[int]]):\nAn (r, g, b) sequence or Color instance.", "source": "juraj-google-style"}
{"code": "def list_cover(list1, list2):\n    set2 = set(list2)\n    incover_list = [(item1 in set2) for item1 in list1]\n    return incover_list", "docstring": "r\"\"\"\nreturns boolean for each position in list1 if it is in list2\n\nArgs:\nlist1 (list):\nlist2 (list):\n\nReturns:\nlist: incover_list - true where list1 intersects list2\n\nCommandLine:\npython -m utool.util_list --test-list_cover\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_list import *  # NOQA\n>>> # build test data\n>>> list1 = [1, 2, 3, 4, 5, 6]\n>>> list2 = [2, 3, 6]\n>>> # execute function\n>>> incover_list = list_cover(list1, list2)\n>>> # verify results\n>>> result = str(incover_list)\n>>> print(result)\n[False, True, True, False, False, True]", "source": "codesearchnet"}
{"code": "def put_member(self, name: InstanceName, value: Value,\n                   raw: bool = False) -> \"InstanceNode\":\n        \n        if not isinstance(self.value, ObjectValue):\n            raise InstanceValueError(self.json_pointer(), \"member of non-object\")\n        csn = self._member_schema_node(name)\n        newval = self.value.copy()\n        newval[name] = csn.from_raw(value, self.json_pointer()) if raw else value\n        return self._copy(newval)._member(name)", "docstring": "Return receiver's member with a new value.\n\nIf the member is permitted by the schema but doesn't exist, it\nis created.\n\nArgs:\nname: Instance name of the member.\nvalue: New value of the member.\nraw: Flag to be set if `value` is raw.\n\nRaises:\nNonexistentSchemaNode: If member `name` is not permitted by the\nschema.\nInstanceValueError: If the receiver's value is not an object.", "source": "juraj-google-style"}
{"code": "def probe_async(self, callback):\n        \n\n        def _on_finished(_name, control_info, exception):\n            if exception is not None:\n                callback(self.id, False, str(exception))\n                return\n\n            self._control_info = control_info\n\n            try:\n                info = {\n                    'connection_string': \"direct\",\n                    'uuid': control_info.uuid,\n                    'signal_strength': 100\n                }\n\n                self._trigger_callback('on_scan', self.id, info, self.ExpirationTime)\n            finally:\n                callback(self.id, True, None)\n\n        self._control_thread.command(JLinkControlThread.FIND_CONTROL, _on_finished, self._device_info.ram_start, self._device_info.ram_size)", "docstring": "Send advertisements for all connected devices.\n\nArgs:\ncallback (callable): A callback for when the probe operation has completed.\ncallback should have signature callback(adapter_id, success, failure_reason) where:\nsuccess: bool\nfailure_reason: None if success is True, otherwise a reason for why we could not probe", "source": "juraj-google-style"}
{"code": "def get_day_end(config):\n    day_start_datetime = datetime.datetime.combine(datetime.date.today(), config['day_start'])\n    day_end_datetime = (day_start_datetime - datetime.timedelta(seconds=1))\n    return day_end_datetime.time()", "docstring": "Get the day end time given the day start. This assumes full 24h day.\n\nArgs:\nconfig (dict): Configdict. Needed to extract ``day_start``.\n\nNote:\nThis is merely a convinience funtion so we do not have to deduct this from ``day_start``\nby hand all the time.", "source": "codesearchnet"}
{"code": "def to_voxels(array):\n    \n    if type(array) is not numpy.ndarray:\n        raise ValueError(\"array argument must be of type numpy.ndarray\")\n    return numpy.argwhere(array)", "docstring": "Converts an array to its voxel list.\n\nArguments:\narray (numpy.ndarray): A numpy nd array. This must be boolean!\n\nReturns:\nA list of n-tuples", "source": "juraj-google-style"}
{"code": "def getsize(self, path):\n    try:\n        file_obj = self.filesystem.resolve(path)\n        if (self.filesystem.ends_with_path_separator(path) and (S_IFMT(file_obj.st_mode) != S_IFDIR)):\n            error_nr = (errno.EINVAL if self.filesystem.is_windows_fs else errno.ENOTDIR)\n            self.filesystem.raise_os_error(error_nr, path)\n        return file_obj.st_size\n    except IOError as exc:\n        raise os.error(exc.errno, exc.strerror)", "docstring": "Return the file object size in bytes.\n\nArgs:\npath:  path to the file object.\n\nReturns:\nfile size in bytes.", "source": "codesearchnet"}
{"code": "def assign(var, new_val, assign_fn=assign_slice):\n  \n  if isinstance(var, Tensor):\n    var = var.operation\n  if not isinstance(var, Variable):\n    raise ValueError(\"var must be a mtf.Variable or its output Tensor.\")\n  return Assign([var], [new_val], assign_fn=assign_fn)", "docstring": "Assign a new value to a variable.\n\nArgs:\nvar: either a Variable operation or its output Tensor.\nnew_val: a Tensor\nassign_fn: a function from\n(mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation\nReturns:\nan Operation\nRaises:\nValueError: if var is not a Variable and var.operation is not a Variable", "source": "juraj-google-style"}
{"code": "def path_to_string(path):\n    if isinstance(path, os.PathLike):\n        return os.fspath(path)\n    return path", "docstring": "Convert `PathLike` objects to their string representation.\n\nIf given a non-string typed path object, converts it to its string\nrepresentation.\n\nIf the object passed to `path` is not among the above, then it is\nreturned unchanged. This allows e.g. passthrough of file objects\nthrough this function.\n\nArgs:\npath: `PathLike` object that represents a path\n\nReturns:\nA string representation of the path argument, if Python support exists.", "source": "github-repos"}
{"code": "def get_cuda_version_default():\n    key = 'cuda_ver_dflt'\n    out = ''\n    cmd_list = cmds_all[PLATFORM.lower()][key]\n    for i, cmd in enumerate(cmd_list):\n        try:\n            out, err = run_shell_cmd(cmd)\n            if not out:\n                raise Exception(err)\n        except Exception as e:\n            if FLAGS.debug:\n                print('\\nWarning: Encountered issue while retrieving default CUDA version. (%s) Trying a different method...\\n' % e)\n            if i == len(cmd_list) - 1:\n                if FLAGS.debug:\n                    print('Error: Cannot retrieve CUDA default version.\\nStopping...')\n            else:\n                pass\n    return out.strip('\\n')", "docstring": "Retrieves default CUDA version.\n\nDefault version is the version found in `/usr/local/cuda/` installation.\n\nstderr is silenced by default. Setting FLAGS.debug mode will not enable it.\nRemove `2> /dev/null` command from `cmds_linux['cuda_ver_dflt']` to enable\nstderr.\n\nIt iterates through two types of version retrieval method:\n1) Using `nvcc`: If `nvcc` is not available, then it uses next method.\n2) Read version file (`version.txt`) found in CUDA install directory.\n\nReturns:\nString that is the default CUDA version.\ne.g. '10.1'", "source": "github-repos"}
{"code": "def _randomize_speed(base_speed: int, sigma: int = None) -> int:\n        \n        if sigma is None:\n            int_sigma = int(base_speed / 4)\n        else:\n            int_sigma = sigma\n        val = MissionWeather._gauss(base_speed, int_sigma)\n        if val < 0:\n            return 0\n        return min(val, 50)", "docstring": "Creates a variation in wind speed\n\nArgs:\nbase_speed: base wind speed\nsigma: sigma value for gaussian variation\n\nReturns: random wind speed", "source": "juraj-google-style"}
{"code": "def export_aliases(export_path=None, exclusions=None):\n    \n    if not export_path:\n        export_path = os.path.abspath(ALIAS_FILE_NAME)\n\n    alias_table = get_alias_table()\n    for exclusion in exclusions or []:\n        if exclusion not in alias_table.sections():\n            raise CLIError(ALIAS_NOT_FOUND_ERROR.format(exclusion))\n        alias_table.remove_section(exclusion)\n\n    _commit_change(alias_table, export_path=export_path, post_commit=False)\n    logger.warning(POST_EXPORT_ALIAS_MSG, export_path)", "docstring": "Export all registered aliases to a given path, as an INI configuration file.\n\nArgs:\nexport_path: The path of the alias configuration file to export to.\nexclusions: Space-separated aliases excluded from export.", "source": "juraj-google-style"}
{"code": "def dispatch(self, inp):\n    inp = tf.reshape(inp, [(self._batch * self._length), (- 1)])\n    ret = tf.gather(inp, self._flat_indices)\n    return ret", "docstring": "Send the inputs to the experts.\n\nArgs:\ninp: a `Tensor` of shape \"[batch, length, depth]`\nReturns:\na tensor with shape [batch, num_experts, expert_capacity, depth]", "source": "codesearchnet"}
{"code": "def GetBasePathSpecs(self, source_path):\n    if (not source_path):\n        raise errors.ScannerError('Invalid source path.')\n    if ((not source_path.startswith('\\\\\\\\.\\\\')) and (not os.path.exists(source_path))):\n        raise errors.ScannerError('No such device, file or directory: {0:s}.'.format(source_path))\n    scan_context = source_scanner.SourceScannerContext()\n    scan_context.OpenSourcePath(source_path)\n    try:\n        self._source_scanner.Scan(scan_context)\n    except (ValueError, errors.BackEndError) as exception:\n        raise errors.ScannerError('Unable to scan source with error: {0!s}'.format(exception))\n    self._source_path = source_path\n    self._source_type = scan_context.source_type\n    if (self._source_type not in [definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE, definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE]):\n        scan_node = scan_context.GetRootScanNode()\n        return [scan_node.path_spec]\n    scan_node = scan_context.GetRootScanNode()\n    while (len(scan_node.sub_nodes) == 1):\n        scan_node = scan_node.sub_nodes[0]\n    base_path_specs = []\n    if (scan_node.type_indicator != definitions.TYPE_INDICATOR_TSK_PARTITION):\n        self._ScanVolume(scan_context, scan_node, base_path_specs)\n    else:\n        partition_identifiers = self._GetTSKPartitionIdentifiers(scan_node)\n        for partition_identifier in partition_identifiers:\n            location = '/{0:s}'.format(partition_identifier)\n            sub_scan_node = scan_node.GetSubNodeByLocation(location)\n            self._ScanVolume(scan_context, sub_scan_node, base_path_specs)\n    return base_path_specs", "docstring": "Determines the base path specifications.\n\nArgs:\nsource_path (str): source path.\n\nReturns:\nlist[PathSpec]: path specifications.\n\nRaises:\nScannerError: if the source path does not exists, or if the source path\nis not a file or directory, or if the format of or within the source\nfile is not supported.", "source": "codesearchnet"}
{"code": "def ask_to_proceed_with_overwrite(filepath):\n    overwrite = input('[WARNING] %s already exists - overwrite? [y/n]' % filepath).strip().lower()\n    while overwrite not in ('y', 'n'):\n        overwrite = input('Enter \"y\" (overwrite) or \"n\" (cancel).').strip().lower()\n    if overwrite == 'n':\n        return False\n    print('[TIP] Next time specify overwrite=True!')\n    return True", "docstring": "Produces a prompt asking about overwriting a file.\n\nArgs:\nfilepath: the path to the file to be overwritten.\n\nReturns:\nTrue if we can proceed with overwrite, False otherwise.", "source": "github-repos"}
{"code": "def call_servo(examples, serving_bundle):\n    parsed_url = urlparse(('http:\n    channel = implementations.insecure_channel(parsed_url.hostname, parsed_url.port)\n    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)\n    if serving_bundle.use_predict:\n        request = predict_pb2.PredictRequest()\n    elif (serving_bundle.model_type == 'classification'):\n        request = classification_pb2.ClassificationRequest()\n    else:\n        request = regression_pb2.RegressionRequest()\n    request.model_spec.name = serving_bundle.model_name\n    if (serving_bundle.model_version is not None):\n        request.model_spec.version.value = serving_bundle.model_version\n    if (serving_bundle.signature is not None):\n        request.model_spec.signature_name = serving_bundle.signature\n    if serving_bundle.use_predict:\n        request.inputs[serving_bundle.predict_input_tensor].CopyFrom(tf.compat.v1.make_tensor_proto(values=[ex.SerializeToString() for ex in examples], dtype=types_pb2.DT_STRING))\n    else:\n        request.input.example_list.examples.extend(examples)\n    if serving_bundle.use_predict:\n        return common_utils.convert_predict_response(stub.Predict(request, 30.0), serving_bundle)\n    elif (serving_bundle.model_type == 'classification'):\n        return stub.Classify(request, 30.0)\n    else:\n        return stub.Regress(request, 30.0)", "docstring": "Send an RPC request to the Servomatic prediction service.\n\nArgs:\nexamples: A list of examples that matches the model spec.\nserving_bundle: A `ServingBundle` object that contains the information to\nmake the serving request.\n\nReturns:\nA ClassificationResponse or RegressionResponse proto.", "source": "codesearchnet"}
{"code": "def is_connectable(host: str, port: Union[int, str]) -> bool:\n    \n    socket_ = None\n    try:\n        socket_ = socket.create_connection((host, port), 1)\n        result = True\n    except socket.timeout:\n        result = False\n    finally:\n        if socket_:\n            socket_.close()\n    return result", "docstring": "Tries to connect to the device to see if it is connectable.\n\nArgs:\nhost: The host to connect.\nport: The port to connect.\n\nReturns:\nTrue or False.", "source": "juraj-google-style"}
{"code": "def to_dict(self):\n    out_dict = {}\n    out_dict['commands'] = self.commands\n    out_dict['configs'] = self.configs\n    out_dict['short_name'] = self.name\n    out_dict['versions'] = {'module': self.module_version, 'api': self.api_version}\n    return out_dict", "docstring": "Convert this object into a dictionary.\n\nReturns:\ndict: A dict with the same information as this object.", "source": "codesearchnet"}
{"code": "def _row_partitions_for_uniform_shape(shape, rank):\n    shape_cumprod = math_ops.cumprod(shape[:rank])\n    return tuple([RowPartition.from_uniform_row_length(uniform_row_length=shape[i + 1], nvals=shape_cumprod[i + 1], nrows=shape_cumprod[i]) for i in range(rank - 1)])", "docstring": "Returns row partitions for the given shape Tensor.\n\nArgs:\nshape: A vector describing a uniform shape.\nrank: The number of dimensions to generate row partitions for\n\nReturns:\nA list of (rank-1) `RowPartition`s with uniform row length.", "source": "github-repos"}
{"code": "def shell_call(command, **kwargs):\n    CMD_VARIABLE_RE = re.compile('^\\\\$\\\\{(\\\\w+)\\\\}$')\n    command = list(command)\n    for i in range(len(command)):\n        m = CMD_VARIABLE_RE.match(command[i])\n        if m:\n            var_id = m.group(1)\n            if (var_id in kwargs):\n                command[i] = kwargs[var_id]\n    str_command = ' '.join(command)\n    logging.debug(('Executing shell command: %s' % str_command))\n    return subprocess.check_output(command)", "docstring": "Calls shell command with argument substitution.\n\nArgs:\ncommand: command represented as a list. Each element of the list is one\ntoken of the command. For example \"cp a b\" becomes ['cp', 'a', 'b']\nIf any element of the list looks like '${NAME}' then it will be replaced\nby value from **kwargs with key 'NAME'.\n**kwargs: dictionary with argument substitution\n\nReturns:\noutput of the command\n\nRaises:\nsubprocess.CalledProcessError if command return value is not zero\n\nThis function is useful when you need to do variable substitution prior\nrunning the command. Below are few examples of how it works:\n\nshell_call(['cp', 'a', 'b'], a='asd') calls command 'cp a b'\n\nshell_call(['cp', '${a}', 'b'], a='asd') calls command 'cp asd b',\n'${a}; was replaced with 'asd' before calling the command", "source": "codesearchnet"}
{"code": "def filter_data(self, field, filter_value, filter_operator, field_converter=None):\n        \n        data = []\n        if self._indexes.get(field) is not None:\n            data = self._index_filter(\n                self._indexes.get(field), filter_value, filter_operator, field_converter\n            )\n        \n        \n\n        \n        \n        \n        \n        return set(data)", "docstring": "Filter the data given the provided.\n\nArgs:\nfield (string): The field to filter on.\nfilter_value (string | list): The value to match.\nfilter_operator (string): The operator for comparison.\nfield_converter (method): A method used to convert the field before comparison.\n\nReturns:\n(set): List of matching data objects", "source": "juraj-google-style"}
{"code": "def QA_fetch_get_sz_margin(date):\n    \n\n    if date in trade_date_sse:\n        return pd.read_excel(_sz_url.format(date)).assign(date=date).assign(sse='sz')", "docstring": "return shenzhen margin data\n\nArguments:\ndate {str YYYY-MM-DD} -- date format\n\nReturns:\npandas.DataFrame -- res for margin data", "source": "juraj-google-style"}
{"code": "def validate(self, value):\n    errors = []\n    self._used_validator = []\n    for val in self._validators:\n        try:\n            val.validate(value)\n            self._used_validator.append(val)\n        except ValidatorException as e:\n            errors.append(e)\n        except Exception as e:\n            errors.append(ValidatorException('Unknown Error', e))\n    if (len(errors) > 0):\n        raise ValidatorException.from_list(errors)\n    return value", "docstring": "validate function form OrValidator\n\nReturns:\nTrue if at least one of the validators\nvalidate function return True", "source": "codesearchnet"}
{"code": "def configureDevicesForMultiDeviceTest(self, num_devices):\n    cpus = config.list_physical_devices('CPU')\n    gpus = config.list_physical_devices('GPU')\n    config.set_logical_device_configuration(cpus[0], [context.LogicalDeviceConfiguration() for _ in range(num_devices)])\n    devices = ['/device:CPU:' + str(i) for i in range(num_devices - 1)]\n    if gpus:\n        devices.append('/device:GPU:0')\n    else:\n        devices.append('/device:CPU:' + str(num_devices - 1))\n    return devices", "docstring": "Configures number of logical devices for multi-device tests.\n\nIt returns a list of device names. If invoked in GPU-enabled runtime, the\nlast device name will be for a GPU device. Otherwise, all device names will\nbe for a CPU device.\n\nArgs:\nnum_devices: The number of devices to configure.\n\nReturns:\nA list of device names to use for a multi-device test.", "source": "github-repos"}
{"code": "def peek(self, key, indices=None, name=None):\n    if name is None:\n        name = '%s_pop' % self._name\n    indices, dtypes = self._get_indices_and_dtypes(indices)\n    with ops.colocate_with(self._coloc_op):\n        result = self._peek_fn(key, shared_name=self._name, indices=indices, dtypes=dtypes, name=name, capacity=self._capacity, memory_limit=self._memory_limit)\n    return self._get_return_value(result, indices)", "docstring": "Peeks at staging area data associated with the key.\n\nIf the key is not in the staging area, it will block\nuntil the associated (key, value) is inserted.\n\nArgs:\nkey: Key associated with the required data\nindices: Partial list of tensors to retrieve (optional).\nA list of integer or string indices.\nString indices are only valid if the Staging Area\nhas names associated with it.\nname: A name for the operation (optional)\n\nReturns:\nThe created op", "source": "github-repos"}
{"code": "def _ParseRecurseKeys(self, parser_mediator, root_key):\n    for registry_key in root_key.RecurseKeys():\n        if parser_mediator.abort:\n            break\n        self._ParseKey(parser_mediator, registry_key)", "docstring": "Parses the Registry keys recursively.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nroot_key (dfwinreg.WinRegistryKey): root Windows Registry key.", "source": "codesearchnet"}
{"code": "def add_squashed_change(self, path, data):\n        \n        \n        assert self._squashed_count, \"Called while not squashing changes\"\n        self._squashed_changes.append([path[1:], data])", "docstring": "Register a squashed change to a particular path\n\nArgs:\npath (list): The path of what has changed, relative from Block\ndata (object): The new data", "source": "juraj-google-style"}
{"code": "def assert_child_key_has_value(self, parent, child, caller):\n    assert parent, 'parent parameter must be specified.'\n    assert child, 'child parameter must be specified.'\n    self.assert_key_has_value(parent, caller)\n    try:\n        child_exists = (child in self[parent])\n    except TypeError as err:\n        raise ContextError(f\"context['{parent}'] must be iterable and contain '{child}' for {caller}. {err}\") from err\n    if child_exists:\n        if (self[parent][child] is None):\n            raise KeyInContextHasNoValueError(f\"context['{parent}']['{child}'] must have a value for {caller}.\")\n    else:\n        raise KeyNotInContextError(f\"context['{parent}']['{child}'] doesn't exist. It must exist for {caller}.\")", "docstring": "Assert that context contains key that has child which has a value.\n\nArgs:\nparent: parent key\nchild: validate this sub-key of parent exists AND isn't None.\ncaller: string. calling function name - this used to construct\nerror messages\n\nRaises:\nKeyNotInContextError: Key doesn't exist\nKeyInContextHasNoValueError: context[key] is None\nAssertionError: if key is None", "source": "codesearchnet"}
{"code": "def copy_file_content(self, file_id, source_file):\n    if (not is_valid_uuid(file_id)):\n        raise StorageArgumentException('Invalid UUID for file_id: {0}'.format(file_id))\n    if (not is_valid_uuid(source_file)):\n        raise StorageArgumentException('Invalid UUID for source_file: {0}'.format(source_file))\n    self._authenticated_request.to_endpoint('file/{}/content/'.format(file_id)).with_headers({'X-Copy-From': source_file}).put()", "docstring": "Copy file content from source file to target file.\n\nArgs:\nfile_id (str): The UUID of the file whose content is written.\nsource_file (str): The UUID of the file whose content is copied.\n\nReturns:\nNone\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "codesearchnet"}
{"code": "def get_permalink(*, url: str, template_params: dict[str, Any] | tuple[tuple[str, Any, Any], ...]) -> str:\n    if url.startswith('go/'):\n        url = f'http:\n    if not isinstance(template_params, dict):\n        template_params = {name: value for name, value, default in template_params if value != default}\n    template_params = json_std.dumps(template_params)\n    template_params = urllib.parse.quote(template_params)\n    return f'{url}", "docstring": "Get the permalink for the current colab.\n\nArgs:\nurl: The base URL.\ntemplate_params: A dict of name to value. Can also be a list of (name,\nvalue, default) tuples, in which case only the value != default are added\n(to make the url shorter).\n\nReturns:\nThe permalink.", "source": "github-repos"}
{"code": "def translate_job_state(code):\n    \n    code_description = \"\"\n    if code == \"0\":\n        code_description = \"Queued\"\n    if code == \"1\":\n        code_description = \"Scheduled\"\n    if code == \"2\":\n        code_description = \"Processing\"\n    if code == \"3\":\n        code_description = \"Finished\"\n    if code == \"4\":\n        code_description = \"Error\"\n    if code == \"5\":\n        code_description = \"Canceled\"\n    if code == \"6\":\n        code_description = \"Canceling\"\n\n    return code_description", "docstring": "AUX Function to translate the (numeric) state of a Job.\n\nArgs:\nnr (int): A valid number to translate.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def _get_args_to_parse(args, sys_argv):\n    arguments = (args if (args is not None) else sys_argv[1:])\n    _LOG.debug('Parsing arguments: %s', arguments)\n    return arguments", "docstring": "Return the given arguments if it is not None else sys.argv if it contains\nsomething, an empty list otherwise.\n\nArgs:\nargs: argument to be parsed\nsys_argv: arguments of the command line i.e. sys.argv", "source": "codesearchnet"}
{"code": "def member_del(self, repl_id, member_id):\n        \n        repl = self[repl_id]\n        result = repl.member_del(member_id)\n        self[repl_id] = repl\n        return result", "docstring": "remove member from replica set (reconfig replica)\nArgs:\nrepl_id - replica set identity\nmember_id - member index", "source": "juraj-google-style"}
{"code": "def _guessEncoding(self, path):\n        \n        if os.path.exists(path) and path.lower().endswith('csv'):\n            \n            encoding = None\n\n            if encoding is not None:\n                if encoding.startswith('utf'):\n                    encoding = encoding.replace('-', '')\n                encoding = encoding.replace('-','_')\n\n                viewValue = _encodings.get(encoding)\n\n                self._encodingKey = encoding\n\n                index = self._encodingComboBox.findText(viewValue.upper())\n                self._encodingComboBox.setCurrentIndex(index)", "docstring": "Opens a file from the given `path` and checks the file encoding.\n\nThe file must exists on the file system and end with the extension\n`.csv`. The file is read line by line until the encoding could be\nguessed.\nOn a successfull identification, the widgets of this dialog will be\nupdated.\n\nArgs:\npath (string): Path to a csv file on the file system.", "source": "juraj-google-style"}
{"code": "def _ExtractContentFromDataStream(self, mediator, file_entry, data_stream_name):\n    self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING\n    if self._processing_profiler:\n        self._processing_profiler.StartTiming('extracting')\n    self._event_extractor.ParseDataStream(mediator, file_entry, data_stream_name)\n    if self._processing_profiler:\n        self._processing_profiler.StopTiming('extracting')\n    self.processing_status = definitions.STATUS_INDICATOR_RUNNING\n    self.last_activity_timestamp = time.time()", "docstring": "Extracts content from a data stream.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\nfile_entry (dfvfs.FileEntry): file entry to extract its content.\ndata_stream_name (str): name of the data stream whose content is to be\nextracted.", "source": "codesearchnet"}
{"code": "def _cmp_rel(self, state, op_name, x, y):\n    ret = self.ctx.program.NewVariable()\n    leftover_x = self.ctx.program.NewVariable()\n    leftover_y = self.ctx.program.NewVariable()\n    op_not_eq = op_name not in ('EQ', 'NE')\n    reported = False\n    for b1 in x.bindings:\n        for b2 in y.bindings:\n            op = getattr(slots, op_name)\n            try:\n                err = False\n                val = compare.cmp_rel(self.ctx, op, b1.data, b2.data)\n            except compare.CmpTypeError:\n                val = None\n                if state.node.HasCombination([b1, b2]):\n                    err = True\n                    reported = True\n                    self.ctx.errorlog.unsupported_operands(self.frames, op, x, y)\n            if val is None:\n                if op_not_eq and isinstance(b1.data, abstract.Class) and err:\n                    ret.AddBinding(self.ctx.convert.unsolvable, {b1, b2}, state.node)\n                elif isinstance(b1.data, abstract.SequenceLength):\n                    ret.AddBinding(self.ctx.convert.bool_values[val], {b1, b2}, state.node)\n                else:\n                    leftover_x.PasteBinding(b1, state.node)\n                    leftover_y.PasteBinding(b2, state.node)\n            else:\n                ret.AddBinding(self.ctx.convert.bool_values[val], {b1, b2}, state.node)\n    if leftover_x.bindings:\n        op = f'__{op_name.lower()}__'\n        report_errors = op_not_eq and (not bool(ret.bindings)) and (not reported)\n        state, leftover_ret = vm_utils.call_binary_operator(state, op, leftover_x, leftover_y, report_errors=report_errors, ctx=self.ctx)\n        ret.PasteVariable(leftover_ret, state.node)\n    return (state, ret)", "docstring": "Implementation of relational operators CMP_(LT|LE|EQ|NE|GE|GT).\n\nArgs:\nstate: Initial FrameState.\nop_name: An operator name, e.g., \"EQ\".\nx: A variable of the lhs value.\ny: A variable of the rhs value.\n\nReturns:\nA tuple of the new FrameState and the return variable.", "source": "github-repos"}
{"code": "def run(self, host=\"localhost\", port=8000, shutdown_timeout=60.0, **kwargs):\n        \n        print(\"Running service on http:\n                                            \"Press Ctrl+C to terminate.\")\n\n        \n        self.config.port = port\n        self.config.host = host\n\n        \n        try:\n            \n            if self.event_broker:\n                \n                self.event_broker.start()\n                \n                self.loop.run_until_complete(self.announce())\n\n            \n            http_handler = self.app.make_handler()\n            \n            self._http_server = self.loop.create_server(http_handler, host, port)\n\n            \n            self._server_handler = self.loop.run_until_complete(self._http_server)\n            \n            self.loop.run_forever()\n\n        \n        except KeyboardInterrupt:\n            \n            pass\n\n        \n        finally:\n            try:\n                \n                self.cleanup()\n            \n            except UnboundLocalError:\n                \n                pass\n\n            \n            self.loop.close()", "docstring": "This function starts the service's network intefaces.\n\nArgs:\nport (int): The port for the http server.", "source": "juraj-google-style"}
{"code": "def apply_to_operation(self, operation):\n    attr_value = attr_value_pb2.AttrValue(s=self._proto.SerializeToString())\n    operation._set_attr('_XlaSharding', attr_value)", "docstring": "Applies this Sharding attribute to `operation`.\n\nArgs:\noperation: A tf.Operation to add sharding annotation.", "source": "github-repos"}
{"code": "def _flush(self, buffer):\n        \n        with _handle_client_error():\n            self._client.put_object(\n                Body=buffer.tobytes(), **self._client_kwargs)", "docstring": "Flush the write buffers of the stream if applicable.\n\nArgs:\nbuffer (memoryview): Buffer content.", "source": "juraj-google-style"}
{"code": "def diff_halfMatch(self, text1, text2):\n    if (self.Diff_Timeout <= 0):\n        return None\n    if (len(text1) > len(text2)):\n        (longtext, shorttext) = (text1, text2)\n    else:\n        (shorttext, longtext) = (text1, text2)\n    if ((len(longtext) < 4) or ((len(shorttext) * 2) < len(longtext))):\n        return None\n\n    def diff_halfMatchI(longtext, shorttext, i):\n        'Does a substring of shorttext exist within longtext such that the\\n      substring is at least half the length of longtext?\\n      Closure, but does not reference any external variables.\\n\\n      Args:\\n        longtext: Longer string.\\n        shorttext: Shorter string.\\n        i: Start index of quarter length substring within longtext.\\n\\n      Returns:\\n        Five element Array, containing the prefix of longtext, the suffix of\\n        longtext, the prefix of shorttext, the suffix of shorttext and the\\n        common middle.  Or None if there was no match.\\n      '\n        seed = longtext[i:(i + (len(longtext) \n        best_common = ''\n        j = shorttext.find(seed)\n        while (j != (- 1)):\n            prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:])\n            suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j])\n            if (len(best_common) < (suffixLength + prefixLength)):\n                best_common = (shorttext[(j - suffixLength):j] + shorttext[j:(j + prefixLength)])\n                best_longtext_a = longtext[:(i - suffixLength)]\n                best_longtext_b = longtext[(i + prefixLength):]\n                best_shorttext_a = shorttext[:(j - suffixLength)]\n                best_shorttext_b = shorttext[(j + prefixLength):]\n            j = shorttext.find(seed, (j + 1))\n        if ((len(best_common) * 2) >= len(longtext)):\n            return (best_longtext_a, best_longtext_b, best_shorttext_a, best_shorttext_b, best_common)\n        else:\n            return None\n    hm1 = diff_halfMatchI(longtext, shorttext, ((len(longtext) + 3) \n    hm2 = diff_halfMatchI(longtext, shorttext, ((len(longtext) + 1) \n    if ((not hm1) and (not hm2)):\n        return None\n    elif (not hm2):\n        hm = hm1\n    elif (not hm1):\n        hm = hm2\n    elif (len(hm1[4]) > len(hm2[4])):\n        hm = hm1\n    else:\n        hm = hm2\n    if (len(text1) > len(text2)):\n        (text1_a, text1_b, text2_a, text2_b, mid_common) = hm\n    else:\n        (text2_a, text2_b, text1_a, text1_b, mid_common) = hm\n    return (text1_a, text1_b, text2_a, text2_b, mid_common)", "docstring": "Do the two texts share a substring which is at least half the length of\nthe longer text?\nThis speedup can produce non-minimal diffs.\n\nArgs:\ntext1: First string.\ntext2: Second string.\n\nReturns:\nFive element Array, containing the prefix of text1, the suffix of text1,\nthe prefix of text2, the suffix of text2 and the common middle.  Or None\nif there was no match.", "source": "codesearchnet"}
{"code": "def _dump_eager_tensors(self, tensors, op_type, input_tensor_ids, output_tensor_device_ids, graph_id=None):\n    tensor_debug_mode = self._tensor_debug_mode\n    output_tensor_ids = [t._id for t in tensors]\n    assert len(tensors) == len(output_tensor_device_ids)\n    if tensor_debug_mode == debug_event_pb2.TensorDebugMode.NO_TENSOR:\n        return debug_event_pb2.Execution(op_type=op_type, graph_id=graph_id, num_outputs=len(tensors), input_tensor_ids=input_tensor_ids, output_tensor_ids=output_tensor_ids, output_tensor_device_ids=output_tensor_device_ids, tensor_debug_mode=tensor_debug_mode, code_location=self._process_stack_frames())\n    elif tensor_debug_mode in (debug_event_pb2.TensorDebugMode.CURT_HEALTH, debug_event_pb2.TensorDebugMode.CONCISE_HEALTH, debug_event_pb2.TensorDebugMode.FULL_HEALTH, debug_event_pb2.TensorDebugMode.SHAPE, debug_event_pb2.TensorDebugMode.FULL_TENSOR):\n        execution_proto = debug_event_pb2.Execution(op_type=op_type, num_outputs=len(tensors), graph_id=graph_id, input_tensor_ids=input_tensor_ids, output_tensor_ids=output_tensor_ids, output_tensor_device_ids=output_tensor_device_ids, tensor_debug_mode=tensor_debug_mode, code_location=self._process_stack_frames())\n        for tensor in tensors:\n            if self._should_dump_tensor(op_type, tensor.dtype) and tensor.dtype.is_numpy_compatible:\n                if tensor_debug_mode in (debug_event_pb2.TensorDebugMode.CURT_HEALTH, debug_event_pb2.TensorDebugMode.CONCISE_HEALTH, debug_event_pb2.TensorDebugMode.FULL_HEALTH):\n                    if tensor.dtype.is_floating:\n                        tensor_proto = _concrete_tensor_to_proto(gen_debug_ops.debug_numeric_summary_v2(tensor, tensor_debug_mode=tensor_debug_mode, output_dtype=dtypes.float64))\n                    else:\n                        tensor_proto = tensor_pb2.TensorProto()\n                elif tensor_debug_mode == debug_event_pb2.TensorDebugMode.SHAPE:\n                    if tensor.dtype.is_floating or tensor.dtype.is_integer or tensor.dtype.is_bool:\n                        tensor_proto = _concrete_tensor_to_proto(gen_debug_ops.debug_numeric_summary_v2(tensor, tensor_debug_mode=tensor_debug_mode, output_dtype=dtypes.float64))\n                    else:\n                        tensor_proto = tensor_pb2.TensorProto()\n                elif tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR:\n                    tensor_proto = _concrete_tensor_to_proto(tensor)\n                if tensor_proto:\n                    execution_proto.tensor_protos.append(tensor_proto)\n        return execution_proto\n    else:\n        raise NotImplementedError('Tensor instrumentation is not implemented for debug mode %s yet ' % self._tensor_debug_mode)", "docstring": "Dump the value of eager tensors.\n\nThe destination of the dumping is determined by the dump_root of the\ncurrently enabled dumping callback. The tensors may be transformed prior to\ndumping (e.g., reduced as summary statistics such as minimum, maximum and\narithmetic  mean). The details of this transformation (if any) depends on\nthe tensor_debug_mode of the currently enabled dumping callback.\n\nArgs:\ntensors: The EagerTensors whose values are to be dumped, with or without\nvalue transform.\nop_type: Type of the op that generates the tensors, as a string.\ninput_tensor_ids: IDs of the input EagerTensors to the op.\noutput_tensor_device_ids: Debugged-generated IDs for the devices on which\nthe output tensors are allocated, as a `list` of `int`s. Must match\n`tensors` in length.\ngraph_id: ID of the executed graph, applicable only to eager execution of\na FuncGraph.\n\nReturns:\nA tfdbg Execution protocol buffer.", "source": "github-repos"}
{"code": "def hostname(hn, ft, si):\n    if ((not hn) or (not hn.fqdn)):\n        hn = ft\n    if (hn and hn.fqdn):\n        fqdn = hn.fqdn\n        hostname = (hn.hostname if hn.hostname else fqdn.split('.')[0])\n        domain = (hn.domain if hn.domain else '.'.join(fqdn.split('.')[1:]))\n        return Hostname(fqdn, hostname, domain)\n    else:\n        fqdn = (si.get('profile_name') if si else None)\n        if fqdn:\n            hostname = fqdn.split('.')[0]\n            domain = '.'.join(fqdn.split('.')[1:])\n            return Hostname(fqdn, hostname, domain)\n    raise Exception('Unable to get hostname.')", "docstring": "Check hostname, facter and systemid to get the fqdn, hostname and domain.\n\nPrefer hostname to facter and systemid.\n\nReturns:\ninsights.combiners.hostname.Hostname: A named tuple with `fqdn`,\n`hostname` and `domain` components.\n\nRaises:\nException: If no hostname can be found in any of the three parsers.", "source": "codesearchnet"}
{"code": "def apply_enhancement(data, func, exclude=None, separate=False,\n                      pass_dask=False):\n    \n    attrs = data.attrs\n    bands = data.coords['bands'].values\n    if exclude is None:\n        exclude = ['A'] if 'A' in bands else []\n\n    if separate:\n        data_arrs = []\n        for idx, band_name in enumerate(bands):\n            band_data = data.sel(bands=[band_name])\n            if band_name in exclude:\n                \n                data_arrs.append(band_data)\n                continue\n\n            if pass_dask:\n                dims = band_data.dims\n                coords = band_data.coords\n                d_arr = func(band_data.data, index=idx)\n                band_data = xr.DataArray(d_arr, dims=dims, coords=coords)\n            else:\n                band_data = func(band_data, index=idx)\n            data_arrs.append(band_data)\n            \n            attrs.update(band_data.attrs)\n\n        data.data = xr.concat(data_arrs, dim='bands').data\n        data.attrs = attrs\n        return data\n    else:\n        band_data = data.sel(bands=[b for b in bands\n                                    if b not in exclude])\n        if pass_dask:\n            dims = band_data.dims\n            coords = band_data.coords\n            d_arr = func(band_data.data)\n            band_data = xr.DataArray(d_arr, dims=dims, coords=coords)\n        else:\n            band_data = func(band_data)\n\n        attrs.update(band_data.attrs)\n        \n        new_data = xr.concat([band_data, data.sel(bands=exclude)],\n                             dim='bands')\n        data.data = new_data.sel(bands=bands).data\n        data.attrs = attrs\n\n    return data", "docstring": "Apply `func` to the provided data.\n\nArgs:\ndata (xarray.DataArray): Data to be modified inplace.\nfunc (callable): Function to be applied to an xarray\nexclude (iterable): Bands in the 'bands' dimension to not include\nin the calculations.\nseparate (bool): Apply `func` one band at a time. Default is False.\npass_dask (bool): Pass the underlying dask array instead of the\nxarray.DataArray.", "source": "juraj-google-style"}
{"code": "def execute_on(self, worker):\n    replica_args = _select_worker_slice(worker.worker_index, self._args)\n    replica_kwargs = _select_worker_slice(worker.worker_index, self._kwargs)\n    e = _get_error_from_remote_values(replica_args) or _get_error_from_remote_values(replica_kwargs)\n    if e:\n        if not isinstance(e, ClosureInputError):\n            e = ClosureInputError(e)\n        raise e\n    with ops.device(worker.device_name):\n        with context.executor_scope(worker.executor):\n            with coordinator_context.with_dispatch_context(worker):\n                with metric_utils.monitored_timer('closure_execution'):\n                    output_values = self._function(*nest.map_structure(coordinator_context.maybe_get_remote_value, replica_args), **nest.map_structure(coordinator_context.maybe_get_remote_value, replica_kwargs))\n    self.maybe_call_with_output_remote_value(lambda r: r._set_values(output_values))", "docstring": "Executes the closure on the given worker.\n\nArgs:\nworker: a `Worker` object.", "source": "github-repos"}
{"code": "def get_hook(hook_name):\n    if (not pkg_resources.resource_exists(__name__, hook_name)):\n        raise HookNotFoundError\n    return pkg_resources.resource_string(__name__, hook_name)", "docstring": "Returns the specified hook.\n\nArgs:\nhook_name (str)\n\nReturns:\nstr - (the content of) the hook\n\nRaises:\nHookNotFoundError", "source": "codesearchnet"}
{"code": "def top_k_categorical_accuracy(y_true, y_pred, k=5):\n    return math_ops.cast(nn.in_top_k(y_pred, math_ops.argmax(y_true, axis=-1), k), backend.floatx())", "docstring": "Computes how often targets are in the top `K` predictions.\n\nStandalone usage:\n>>> y_true = [[0, 0, 1], [0, 1, 0]]\n>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]\n>>> m = tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)\n>>> assert m.shape == (2,)\n>>> m.numpy()\narray([1., 1.], dtype=float32)\n\nArgs:\ny_true: The ground truth values.\ny_pred: The prediction values.\nk: (Optional) Number of top elements to look at for computing accuracy.\nDefaults to 5.\n\nReturns:\nTop K categorical accuracy value.", "source": "github-repos"}
{"code": "def get_tree(profile, sha, recursive=True):\n    resource = ('/trees/' + sha)\n    if recursive:\n        resource += '?recursive=1'\n    data = api.get_request(profile, resource)\n    return prepare(data)", "docstring": "Fetch a tree.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nsha\nThe SHA of the tree to fetch.\n\nrecursive\nIf ``True``, traverse all subtrees and their subtrees, all the\nway down. That will return a list of all objects in the tree,\nall levels deep.\n\nReturns:\nA dict with data about the tree.", "source": "codesearchnet"}
{"code": "def all_label_values(self, label_list_ids=None):\n        \n        values = set()\n\n        for label_list in self.label_lists.values():\n            if label_list_ids is None or label_list.idx in label_list_ids:\n                values = values.union(label_list.label_values())\n\n        return values", "docstring": "Return a set of all label-values occurring in this utterance.\n\nArgs:\nlabel_list_ids (list): If not None, only label-values from\nlabel-lists with an id contained in this list\nare considered.\n\nReturns:\n:class:`set`: A set of distinct label-values.", "source": "juraj-google-style"}
{"code": "def write(self, destination, filename, template_name, **kwargs):\n        \n        template = self.env.get_template(template_name)\n        content = template.render(kwargs)\n        super(TemplateFileWriter, self).write(destination=destination, filename=filename, content=content)", "docstring": "Write a file according to the template name\n\nArgs:\ndestination (string): the destination location\nfilename (string): the filename that will be written\ntemplate_name (string): the name of the template\nkwargs (dict): all attribute that will be passed to the template", "source": "juraj-google-style"}
{"code": "def validate_and_decode(jwt_bu64, cert_obj):\n    try:\n        return jwt.decode(jwt_bu64.strip(), cert_obj.public_key(), algorithms=['RS256'], verify=True)\n    except jwt.InvalidTokenError as e:\n        raise JwtException('Signature is invalid. error=\"{}\"'.format(str(e)))", "docstring": "Validate the JWT and return as a dict.\n\n- JWTs contain a set of values serialized to a JSON dict. This decodes the JWT and\nreturns it as a dict.\n\nArgs:\njwt_bu64: bytes\nThe JWT encoded using a a URL safe flavor of Base64.\n\ncert_obj: cryptography.Certificate\nPublic certificate used for signing the JWT (typically the CN cert).\n\nRaises:\nJwtException: If validation fails.\n\nReturns:\ndict: Values embedded in the JWT.", "source": "codesearchnet"}
{"code": "def get_dimension_indices(self, query):\n        \n        ids = self['id'] if self.get('id') else self['dimension']['id']\n        indices = []\n\n        for idx, id in enumerate(ids):\n            indices.append(self.get_dimension_index(id,\n                                                    [d.get(id) for d in query\n                                                     if id in d][0]))\n\n        return indices", "docstring": "Converts a dimension/category list of dicts into a list of \\\ndimensions’ indices.\nArgs:\nquery(list): dimension/category list of dicts.\n\nReturns:\nindices(list): list of dimensions' indices.", "source": "juraj-google-style"}
{"code": "def _parse_dtype(self, space):\n    if isinstance(space, gym.spaces.Discrete):\n        return tf.int32\n    if isinstance(space, gym.spaces.Box):\n        return tf.float32\n    raise NotImplementedError()", "docstring": "Get a tensor dtype from a OpenAI Gym space.\n\nArgs:\nspace: Gym space.\n\nRaises:\nNotImplementedError: For spaces other than Box and Discrete.\n\nReturns:\nTensorFlow data type.", "source": "codesearchnet"}
{"code": "def _atoms(atoms_string):\n    \n    atoms = {}\n    for split in atoms_string.split(','):\n        sites = split.split('.')\n        el = sites.pop(0)\n        sites = list(map(int, sites))\n        atoms[el] = np.array(sites) - 1\n    return atoms", "docstring": "Parse the atom string.\n\nArgs:\natoms_string (str): The atoms to plot, in the form ``\"C.1.2.3,\"``.\n\nReturns:\ndict: The atomic indices over which to sum the DOS. Formatted as::\n\n{Element: [atom_indices]}.\n\nIndices are zero indexed for each atomic species. If an element symbol\nis included with an empty list, then all sites for that species are\nconsidered.", "source": "juraj-google-style"}
{"code": "def DoesNotContainIgnoreCase(self, value):\n    self._awql = self._CreateSingleValueCondition(value, 'DOES_NOT_CONTAIN_IGNORE_CASE')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"doesn not contain ignore case\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "codesearchnet"}
{"code": "def _SparseAddGrad(op: ops.Operation, *grads):\n    val_grad = grads[1]\n    a_indices = op.inputs[0]\n    b_indices = op.inputs[3]\n    sum_indices = op.outputs[0]\n    a_val_grad, b_val_grad = gen_sparse_ops.sparse_add_grad(val_grad, a_indices, b_indices, sum_indices)\n    a_val_grad.set_shape(op.inputs[1].get_shape())\n    b_val_grad.set_shape(op.inputs[4].get_shape())\n    return (None, a_val_grad, None, None, b_val_grad, None, None)", "docstring": "The backward operator for the SparseAdd op.\n\nThe SparseAdd op calculates A + B, where A, B, and the sum are all represented\nas `SparseTensor` objects.  This op takes in the upstream gradient w.r.t.\nnon-empty values of the sum, and outputs the gradients w.r.t. the non-empty\nvalues of A and B.\n\nArgs:\nop: the SparseAdd op\n*grads: the incoming gradients, one element per output of `op`\n\nReturns:\nGradient for each of the 6 input tensors of SparseAdd:\n(a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh)\nThe gradients for the indices, shapes, and the threshold are None.", "source": "github-repos"}
{"code": "def all_arguments(cls, function, arguments):\n    if isinstance(arguments, dict):\n        arguments = Arguments(**arguments)\n    elif (not isinstance(arguments, Arguments)):\n        arguments = Arguments(*arguments)\n    return cls(function, arguments)", "docstring": "Helper function for creating `FunctionCall`s with `Arguments`.\n\nArgs:\nfunction: The value to store for the action function.\narguments: The values to store for the arguments of the action. Can either\nbe an `Arguments` object, a `dict`, or an iterable. If a `dict` or an\niterable is provided, the values will be unpacked into an `Arguments`\nobject.\n\nReturns:\nA new `FunctionCall` instance.", "source": "codesearchnet"}
{"code": "def GreaterThan(self, value):\n    \n    self._awql = self._CreateSingleValueCondition(value, '>')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"greater than\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "juraj-google-style"}
{"code": "def vals2colors(vals,cmap='GnBu_d',res=100):\n    \n    \n    if any(isinstance(el, list) for el in vals):\n        vals = list(itertools.chain(*vals))\n\n    \n    palette = np.array(sns.color_palette(cmap, res))\n    ranks = np.digitize(vals, np.linspace(np.min(vals), np.max(vals)+1, res+1)) - 1\n    return [tuple(i) for i in palette[ranks, :]]", "docstring": "Maps values to colors\nArgs:\nvalues (list or list of lists) - list of values to map to colors\ncmap (str) - color map (default is 'husl')\nres (int) - resolution of the color map (default: 100)\nReturns:\nlist of rgb tuples", "source": "juraj-google-style"}
{"code": "def tensor_rank_tensor(self, name='tensor_rank_tensor'):\n    with self._name_scope(name):\n        return self._tensor_rank_tensor()", "docstring": "Rank (in the sense of tensors) of matrix corresponding to this operator.\n\nIf this operator acts like the batch matrix `A` with\n`A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.\n\nArgs:\nname:  A name for this `Op`.\n\nReturns:\n`int32` `Tensor`, determined at runtime.", "source": "github-repos"}
{"code": "def Analyze(self, source_path, output_writer):\n    \n    if not os.path.exists(source_path):\n      raise RuntimeError('No such source: {0:s}.'.format(source_path))\n\n    scan_context = source_scanner.SourceScannerContext()\n    scan_path_spec = None\n    scan_step = 0\n\n    scan_context.OpenSourcePath(source_path)\n\n    while True:\n      self._source_scanner.Scan(\n          scan_context, auto_recurse=self._auto_recurse,\n          scan_path_spec=scan_path_spec)\n\n      if not scan_context.updated:\n        break\n\n      if not self._auto_recurse:\n        output_writer.WriteScanContext(scan_context, scan_step=scan_step)\n      scan_step += 1\n\n      \n      if scan_context.source_type in [\n          definitions.SOURCE_TYPE_DIRECTORY, definitions.SOURCE_TYPE_FILE]:\n        break\n\n      \n      \n      for locked_scan_node in scan_context.locked_scan_nodes:\n        self._PromptUserForEncryptedVolumeCredential(\n            scan_context, locked_scan_node, output_writer)\n\n      if not self._auto_recurse:\n        scan_node = scan_context.GetUnscannedScanNode()\n        if not scan_node:\n          return\n        scan_path_spec = scan_node.path_spec\n\n    if self._auto_recurse:\n      output_writer.WriteScanContext(scan_context)", "docstring": "Analyzes the source.\n\nArgs:\nsource_path (str): the source path.\noutput_writer (StdoutWriter): the output writer.\n\nRaises:\nRuntimeError: if the source path does not exists, or if the source path\nis not a file or directory, or if the format of or within the source\nfile is not supported.", "source": "juraj-google-style"}
{"code": "def on_element(self, element, window, context):\n    pass", "docstring": "Called when a new element arrives in a window.\n\nArgs:\nelement: the element being added\nwindow: the window to which the element is being added\ncontext: a context (e.g. a TriggerContext instance) for managing state\nand setting timers", "source": "github-repos"}
{"code": "def get_float_list(self, min_length=_MIN_LENGTH, max_length=_MAX_LENGTH):\n    length = self.get_int(min_length, max_length)\n    return self.fdp.ConsumeFloatListInRange(length, _MIN_FLOAT, _MAX_FLOAT)", "docstring": "Consume a float list with given constraints.\n\nArgs:\nmin_length: The minimum length of the list.\nmax_length: The maximum length of the list.\n\nReturns:\nConsumed integer list based on input bytes and constraints.", "source": "github-repos"}
{"code": "def _generate_enqueue_op(self, inputs, name_prefix, index, device=None, tpu_ordinal=-1):\n    full_name = '%s/%d' % (name_prefix, index)\n    shapes = [t.shape for t in inputs]\n    if device is None:\n        devices = [t.device for t in inputs]\n        for i in range(1, self.number_of_tuple_elements):\n            if devices[0] != devices[i]:\n                raise ValueError(f'input devices for shard {index} are {str(devices)}, but should all be the same')\n        with ops.colocate_with(inputs[0]):\n            return tpu_ops.infeed_enqueue_tuple(inputs=inputs, shapes=shapes, name=full_name, device_ordinal=tpu_ordinal)\n    else:\n        with ops.device(device):\n            return tpu_ops.infeed_enqueue_tuple(inputs=inputs, shapes=shapes, name=full_name, device_ordinal=tpu_ordinal)", "docstring": "Generate a host-side Op to enqueue a tuple to the queue.\n\nIf device is None the inputs are all required to have the same\ndevice specification, and the enqueue Op is colocated with\ninputs[0]. Otherwise the enqueue Op is placed on 'device'.\n\nArgs:\ninputs: a list of Tensors with the types and shapes of the tuple elements.\nname_prefix: the base name for the Op.\nindex: the shard index, used to uniquify the Op name.\ndevice: device to place the Op on, or None if it should be\ncolocated with the inputs.\ntpu_ordinal: ordinal of the TPU device on the host to use for\ninfeed if device is a CPU device. Should be set to -1 if device\nis a TPU device.\n\nReturns:\nAn Op corresponding to a shard of infeed enqueued at the host,\nsuitable for use within a replicated block.\n\nRaises:\nValueError: if device is None and inputs do not all have the\nsame device specification.", "source": "github-repos"}
{"code": "def _swap_where(condition, x, y):\n    return (tf.where(condition, y, x), tf.where(condition, x, y))", "docstring": "Swaps the elements of `x` and `y` based on `condition`.\n\nArgs:\ncondition: A `Tensor` of dtype bool.\nx: A `Tensor` with the same shape as `condition`.\ny: A `Tensor` with the same shape and dtype as `x`.\n\nReturns:\nTwo `Tensors` with the same shape as `x` and `y`.", "source": "github-repos"}
{"code": "def write_config_json(config_file, data):\n    outfile = None\n    try:\n        with open(config_file, 'w') as outfile:\n            json.dump(data, outfile)\n    except:\n        (line, filename, synerror) = trace()\n        raise ArcRestHelperError({'function': 'init_config_json', 'line': line, 'filename': filename, 'synerror': synerror})\n    finally:\n        outfile = None\n        del outfile\n        gc.collect()", "docstring": "Serializes an object to disk.\n\nArgs:\nconfig_file (str): The path on disk to save the file.\ndata (object): The object to serialize.", "source": "codesearchnet"}
{"code": "def __init__(self, sdat):\n        \n        self.sdat = sdat\n        super().__init__('Stagnant lid regime for {}'.format(sdat))", "docstring": "Initialization of instances:\n\nArgs:\nsdat (:class:`~stagpy.stagyydata.StagyyData`): the StagyyData\ninstance for which a stagnant lid regime was found.\n\nAttributes:\nsdat (:class:`~stagpy.stagyydata.StagyyData`): the StagyyData\ninstance for which a stagnant lid regime was found.", "source": "juraj-google-style"}
{"code": "def convertDay(self, day, prefix='', weekday=False):\n\n    def sameDay(d1, d2):\n        d = (d1.day == d2.day)\n        m = (d1.month == d2.month)\n        y = (d1.year == d2.year)\n        return (d and m and y)\n    tom = (self.now + datetime.timedelta(days=1))\n    if sameDay(day, self.now):\n        return 'today'\n    elif sameDay(day, tom):\n        return 'tomorrow'\n    if weekday:\n        dayString = day.strftime('%A, %B %d')\n    else:\n        dayString = day.strftime('%B %d')\n    if (not int(dayString[(- 2)])):\n        dayString = (dayString[:(- 2)] + dayString[(- 1)])\n    return ((prefix + ' ') + dayString)", "docstring": "Convert a datetime object representing a day into a human-ready\nstring that can be read, spoken aloud, etc.\n\nArgs:\nday (datetime.date): A datetime object to be converted into text.\nprefix (str): An optional argument that prefixes the converted\nstring. For example, if prefix=\"in\", you'd receive \"in two\ndays\", rather than \"two days\", while the method would still\nreturn \"tomorrow\" (rather than \"in tomorrow\").\nweekday (bool): An optional argument that returns \"Monday, Oct. 1\"\nif True, rather than \"Oct. 1\".\n\nReturns:\nA string representation of the input day, ignoring any time-related\ninformation.", "source": "codesearchnet"}
{"code": "def size(self) -> int:\n    return sizeof(self.value)", "docstring": "Number of byte required for this data type\n\nReturns:\nInteger > 0", "source": "github-repos"}
{"code": "def get_structure_from_id(self, task_id, final_structure=True):\n    args = {'task_id': task_id}\n    field = ('output.crystal' if final_structure else 'input.crystal')\n    results = tuple(self.query([field], args))\n    if (len(results) > 1):\n        raise QueryError('More than one result found for task_id {}!'.format(task_id))\n    elif (len(results) == 0):\n        raise QueryError('No structure found for task_id {}!'.format(task_id))\n    c = results[0]\n    return Structure.from_dict(c[field])", "docstring": "Returns a structure from the database given the task id.\n\nArgs:\ntask_id:\nThe task_id to query for.\nfinal_structure:\nWhether to obtain the final or initial structure. Defaults to\nTrue.", "source": "codesearchnet"}
{"code": "def reindex(self, kdims=[], force=False):\n    old_kdims = [d.name for d in self.kdims]\n    if (not isinstance(kdims, list)):\n        kdims = [kdims]\n    elif (not len(kdims)):\n        kdims = [d for d in old_kdims if (not (len(set(self.dimension_values(d))) == 1))]\n    indices = [self.get_dimension_index(el) for el in kdims]\n    keys = [tuple((k[i] for i in indices)) for k in self.data.keys()]\n    reindexed_items = OrderedDict(((k, v) for (k, v) in zip(keys, self.data.values())))\n    reduced_dims = set([d.name for d in self.kdims]).difference(kdims)\n    dimensions = [self.get_dimension(d) for d in kdims if (d not in reduced_dims)]\n    if ((len(set(keys)) != len(keys)) and (not force)):\n        raise Exception('Given dimension labels not sufficientto address all values uniquely')\n    if len(keys):\n        cdims = {self.get_dimension(d): self.dimension_values(d)[0] for d in reduced_dims}\n    else:\n        cdims = {}\n    with item_check((indices == sorted(indices))):\n        return self.clone(reindexed_items, kdims=dimensions, cdims=cdims)", "docstring": "Reindexes object dropping static or supplied kdims\n\nCreates a new object with a reordered or reduced set of key\ndimensions. By default drops all non-varying key dimensions.\n\nReducing the number of key dimensions will discard information\nfrom the keys. All data values are accessible in the newly\ncreated object as the new labels must be sufficient to address\neach value uniquely.\n\nArgs:\nkdims (optional): New list of key dimensions after reindexing\nforce (bool, optional): Whether to drop non-unique items\n\nReturns:\nReindexed object", "source": "codesearchnet"}
{"code": "def similar_artists(self, artist_id: str) -> List[NameExternalIDPair]:\n        \n        response: requests.Response = requests.get(\n            self._API_URL_TEMPLATE.format(\"artists/{}/related-artists\".format(artist_id)),\n            headers={\"Authorization\": \"Bearer {}\".format(self._token.access_token)}\n        )\n\n        \n\n        response.raise_for_status()\n        if not response.text:\n            return []\n\n        result: List[NameExternalIDPair] = []\n        data: List[Dict] = response.json()[\"artists\"]\n        for artist in data:\n            artist = NameExternalIDPair(artist[\"name\"], artist[\"id\"])\n            if artist.name is None or artist.external_id is None:\n                raise SpotifyClientError(\"Name or ID is missing\")\n            result.append(artist)\n\n        return result", "docstring": "Returns zero or more similar artists (in the form of artist name - external ID pairs)\nto the one corresponding to the given artist ID.\n\nArguments:\nartist_id ([str]): The Spotify ID of the artist for whom similar artists are requested.\n\nReturns:\nZero or more artist name - external ID pairs.\n\nRaises:\nrequests.HTTPError: If an HTTP error occurred during the request.\nSpotifyClientError: If an invalid item is found.", "source": "juraj-google-style"}
{"code": "def read_vocab_file(file_path):\n  \n  with file_io.FileIO(file_path, 'r') as f:\n    vocab_pd = pd.read_csv(\n        f,\n        header=None,\n        names=['vocab', 'count'],\n        dtype=str,  \n        na_filter=False)  \n\n  vocab = vocab_pd['vocab'].tolist()\n  ex_count = vocab_pd['count'].astype(int).tolist()\n\n  return vocab, ex_count", "docstring": "Reads a vocab file to memeory.\n\nArgs:\nfile_path: Each line of the vocab is in the form \"token,example_count\"\n\nReturns:\nTwo lists, one for the vocab, and one for just the example counts.", "source": "juraj-google-style"}
{"code": "def is_profile_of(url: str, message_or_descriptor: annotation_utils.MessageOrDescriptorBase) -> bool:\n    options = annotation_utils.get_options(message_or_descriptor)\n    return url in options.Extensions[annotations_pb2.fhir_profile_base]", "docstring": "Returns True if message_or_descriptor is a profile of url.\n\nArgs:\nurl: The FHIR structure definition URL to compare against.\nmessage_or_descriptor: The Message or Descriptor to examine.\n\nReturns:\nTrue if message_or_descriptor's fhir_profile_base extension list contains\nurl.", "source": "github-repos"}
{"code": "def write_tms_tdi_bits(self, tmsdata, tdidata, return_tdo=False):\n    self._check_jtag()\n    if (len(tmsdata) != len(tdidata)):\n        raise Exception('TMSdata and TDIData must be the same length')\n    self._update_scanchain(tmsdata)\n    count = len(tmsdata)\n    t = time()\n    outdata = bitarray([val for pair in zip(tmsdata, tdidata) for val in pair])\n    outdata = build_byte_align_buff(outdata).tobytes()[::(- 1)]\n    if (self._scanchain and self._scanchain._print_statistics):\n        print('TDI/TDI DATA PREP TIME', (time() - t))\n        t = time()\n    self.bulkCommandDefault((_BMSG_WRITE_TMS_TDI % (return_tdo, count.to_bytes(4, 'little'))))\n    self.bulkWriteData(outdata)\n    if (self._scanchain and self._scanchain._print_statistics):\n        print('TRANSFER TIME', (time() - t))\n        t = time()\n    tdo_bits = (self._read_tdo(count) if return_tdo else None)\n    if (self._scanchain and self._scanchain._print_statistics):\n        print('TDO READ TIME', (time() - t))\n    self._get_adv_trans_stats(10, return_tdo)\n    return tdo_bits", "docstring": "Command controller to write arbitrary TDI and TMS data to the\nphysical scan chain. Optionally return TDO bits sent back\nfrom the scan chain.\n\nArgs:\ntmsdata - bits to send over TMS line of scan chain (bitarray)\nmust be the same length ad tdidata\ntdidata - bits to send over TDI line of scan chain (bitarray)\nmust be the same length ad tmsdata\nreturn_tdo (bool) - return the devices bitarray response\n\nReturns:\nNone by default or the (bitarray) response of the device\nafter receiving data, if return_tdo is True.\n\nUsage:\n>>> from proteusisc import getAttachedControllers, bitarray\n>>> c = getAttachedControllers()[0]\n>>> c.jtag_enable()\n>>> c.write_tms_tdi_bits(bitarray(\"00001\"),\nbitarray(\"11111\"), return_tdo=True)\n>>> c.jtag_disable()", "source": "codesearchnet"}
{"code": "def dummy_inputs(self) -> Dict[str, tf.Tensor]:\n    return {self.main_input_name: tf.random.uniform([1, self.config.num_mel_bins, self.config.max_source_positions * 2 - 1], dtype=tf.float32), 'decoder_input_ids': tf.constant([[1, 3]], dtype=tf.int32)}", "docstring": "Dummy inputs to build the network.\n\nReturns:\n`Dict[str, tf.Tensor]`: The dummy inputs.", "source": "github-repos"}
{"code": "def ParseRow(self, parser_mediator, row_offset, row):\n    \n    time_elements_tuple = self._GetTimeElementsTuple(row['time'])\n\n    try:\n      date_time = dfdatetime_time_elements.TimeElements(\n          time_elements_tuple=time_elements_tuple)\n      date_time.is_local_time = True\n    except ValueError:\n      parser_mediator.ProduceExtractionWarning(\n          'invalid date time value: {0!s}'.format(time_elements_tuple))\n      return\n\n    \n    event_data = SymantecEventData()\n    event_data.access = row.get('access', None)\n    event_data.action0 = row.get('action0', None)\n    event_data.action1 = row.get('action1', None)\n    event_data.action1_status = row.get('action1_status', None)\n    event_data.action2 = row.get('action2', None)\n    event_data.action2_status = row.get('action2_status', None)\n    event_data.address = row.get('address', None)\n    event_data.backup_id = row.get('backup_id', None)\n    event_data.cat = row.get('cat', None)\n    event_data.cleaninfo = row.get('cleaninfo', None)\n    event_data.clientgroup = row.get('clientgroup', None)\n    event_data.compressed = row.get('compressed', None)\n    event_data.computer = row.get('computer', None)\n    event_data.definfo = row.get('definfo', None)\n    event_data.defseqnumber = row.get('defseqnumber', None)\n    event_data.deleteinfo = row.get('deleteinfo', None)\n    event_data.depth = row.get('depth', None)\n    event_data.description = row.get('description', None)\n    event_data.domain_guid = row.get('domain_guid', None)\n    event_data.domainname = row.get('domainname', None)\n    event_data.err_code = row.get('err_code', None)\n    event_data.event_data = row.get('event_data', None)\n    event_data.event = row.get('event', None)\n    event_data.extra = row.get('extra', None)\n    event_data.file = row.get('file', None)\n    event_data.flags = row.get('flags', None)\n    event_data.groupid = row.get('groupid', None)\n    event_data.guid = row.get('guid', None)\n    event_data.license_expiration_dt = row.get('license_expiration_dt', None)\n    event_data.license_feature_name = row.get('license_feature_name', None)\n    event_data.license_feature_ver = row.get('license_feature_ver', None)\n    event_data.license_fulfillment_id = row.get('license_fulfillment_id', None)\n    event_data.license_lifecycle = row.get('license_lifecycle', None)\n    event_data.license_seats_delta = row.get('license_seats_delta', None)\n    event_data.license_seats = row.get('license_seats', None)\n    event_data.license_seats_total = row.get('license_seats_total', None)\n    event_data.license_serial_num = row.get('license_serial_num', None)\n    event_data.license_start_dt = row.get('license_start_dt', None)\n    event_data.logger = row.get('logger', None)\n    event_data.login_domain = row.get('login_domain', None)\n    event_data.log_session_guid = row.get('log_session_guid', None)\n    event_data.macaddr = row.get('macaddr', None)\n    event_data.new_ext = row.get('new_ext', None)\n    event_data.ntdomain = row.get('ntdomain', None)\n    event_data.offset = row_offset\n    event_data.parent = row.get('parent', None)\n    event_data.quarfwd_status = row.get('quarfwd_status', None)\n    event_data.remote_machine_ip = row.get('remote_machine_ip', None)\n    event_data.remote_machine = row.get('remote_machine', None)\n    event_data.scanid = row.get('scanid', None)\n    event_data.snd_status = row.get('snd_status', None)\n    event_data.status = row.get('status', None)\n    event_data.still_infected = row.get('still_infected', None)\n    event_data.time = row.get('time', None)\n    event_data.user = row.get('user', None)\n    event_data.vbin_id = row.get('vbin_id', None)\n    event_data.vbin_session_id = row.get('vbin_session_id', None)\n    event_data.version = row.get('version:', None)\n    event_data.virus_id = row.get('virus_id', None)\n    event_data.virus = row.get('virus', None)\n    event_data.virustype = row.get('virustype', None)\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a line of the log file and produces events.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrow_offset (int): line number of the row.\nrow (dict[str, str]): fields of a single row, as specified in COLUMNS.", "source": "juraj-google-style"}
{"code": "def filter_sequences(self, seq_type):\n        \n        return DictList(x for x in self.sequences if isinstance(x, seq_type))", "docstring": "Return a DictList of only specified types in the sequences attribute.\n\nArgs:\nseq_type (SeqProp): Object type\n\nReturns:\nDictList: A filtered DictList of specified object type only", "source": "juraj-google-style"}
{"code": "def QA_fetch_ctp_tick(code, start, end, frequence, format='pd', collections=DATABASE.ctp_tick):\n    \n\n    code = QA_util_code_tolist(code, auto_fill=False)\n    cursor = collections.find({\n        'InstrumentID': {'$in': code}, \"time_stamp\": {\n            \"$gte\": QA_util_time_stamp(start),\n            \"$lte\": QA_util_time_stamp(end)\n        }, 'type': frequence\n    }, {\"_id\": 0}, batch_size=10000)\n\n    hq = pd.DataFrame([data for data in cursor]).replace(1.7976931348623157e+308,\n                                                         numpy.nan).replace('', numpy.nan).dropna(axis=1)\n    p1 = hq.loc[:, ['ActionDay', 'AskPrice1', 'AskVolume1', 'AveragePrice', 'BidPrice1',\n                    'BidVolume1', 'HighestPrice', 'InstrumentID', 'LastPrice',\n                    'OpenInterest', 'TradingDay', 'UpdateMillisec',\n                    'UpdateTime', 'Volume']]\n    p1 = p1.assign(datetime=p1.ActionDay.apply(QA_util_date_int2str)+' '+p1.UpdateTime + (p1.UpdateMillisec/1000000).apply(lambda x: str('%.6f' % x)[1:]),\n                   code=p1.InstrumentID)\n    p1.datetime = pd.to_datetime(p1.datetime)\n    return p1.set_index(p1.datetime)", "docstring": "仅供存储的ctp tick使用\n\nArguments:\ncode {[type]} -- [description]\n\nKeyword Arguments:\nformat {str} -- [description] (default: {'pd'})\ncollections {[type]} -- [description] (default: {DATABASE.ctp_tick})\n\nReturns:\n[type] -- [description]", "source": "juraj-google-style"}
{"code": "def getent(refresh=False):\n    \n    if 'user.getent' in __context__ and not refresh:\n        return __context__['user.getent']\n\n    ret = []\n    for user in __salt__['user.list_users']():\n        stuff = {}\n        user_info = __salt__['user.info'](user)\n\n        stuff['gid'] = ''\n        stuff['groups'] = user_info['groups']\n        stuff['home'] = user_info['home']\n        stuff['name'] = user_info['name']\n        stuff['passwd'] = user_info['passwd']\n        stuff['shell'] = ''\n        stuff['uid'] = user_info['uid']\n\n        ret.append(stuff)\n\n    __context__['user.getent'] = ret\n    return ret", "docstring": "Return the list of all info for all users\n\nArgs:\nrefresh (bool, optional): Refresh the cached user information. Useful\nwhen used from within a state function. Default is False.\n\nReturns:\ndict: A dictionary containing information about all users on the system\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' user.getent", "source": "juraj-google-style"}
{"code": "def _VerifyOneTest(self, pool_func, input_sizes, window, strides, padding, data_format, data_type, expected, use_gpu):\n    total_size = 1\n    for s in input_sizes:\n        total_size *= s\n    x = [f * 1.0 for f in range(1, total_size + 1)]\n    if data_type == dtypes.bfloat16:\n        x = [f * 0.1 for f in x]\n        expected = [f * 0.1 for f in expected]\n    with self.cached_session(use_gpu=use_gpu):\n        t = constant_op.constant(x, shape=input_sizes, dtype=data_type)\n        window = [1] + list(window) + [1]\n        strides = [1] + list(strides) + [1]\n        if data_format == 'NCDHW':\n            t = test_util.NHWCToNCHW(t)\n            window = test_util.NHWCToNCHW(window)\n            strides = test_util.NHWCToNCHW(strides)\n        t = pool_func(t, ksize=window, strides=strides, padding=padding, data_format=data_format)\n        if data_format == 'NCDHW':\n            t = test_util.NCHWToNHWC(t)\n        vals = self.evaluate(t)\n    actual = vals.flatten()\n    rtol = atol = 1e-06\n    if data_type == dtypes.bfloat16:\n        rtol = atol = 0.02\n    self.assertAllClose(expected, actual, rtol=rtol, atol=atol)", "docstring": "Verifies the output values of the pooling function.\n\nArgs:\npool_func: Function to be called: co.MaxPool, co.AvgPool.\ninput_sizes: Input tensor dimensions.\nwindow: Tuple of kernel dims: planes, rows, cols.\nstrides: Tuple of strides for dims: planes, rows, cols.\npadding: Padding type.\ndata_format: The data format we use to run the pooling operation.\ndata_type: The data type to use to run the pooling operation.\nexpected: An array containing the expected operation outputs.\nuse_gpu: Whether to run ops on GPU.", "source": "github-repos"}
{"code": "def __init__(self, service_name, user_name):\n        \n        super(Storage, self).__init__(lock=threading.Lock())\n        self._service_name = service_name\n        self._user_name = user_name", "docstring": "Constructor.\n\nArgs:\nservice_name: string, The name of the service under which the\ncredentials are stored.\nuser_name: string, The name of the user to store credentials for.", "source": "juraj-google-style"}
{"code": "def fft2(x):\n    if any_symbolic_tensors(x):\n        return FFT2().symbolic_call(x)\n    return backend.math.fft2(x)", "docstring": "Computes the 2D Fast Fourier Transform along the last two axes of input.\n\nArgs:\nx: Tuple of the real and imaginary parts of the input tensor. Both\ntensors in the tuple should be of floating type.\n\nReturns:\nA tuple containing two tensors - the real and imaginary parts of the\noutput.\n\nExample:\n\n>>> x = (\n...     keras.ops.convert_to_tensor([[1., 2.], [2., 1.]]),\n...     keras.ops.convert_to_tensor([[0., 1.], [1., 0.]]),\n... )\n>>> fft2(x)\n(array([[ 6.,  0.],\n[ 0., -2.]], dtype=float32), array([[ 2.,  0.],\n[ 0., -2.]], dtype=float32))", "source": "github-repos"}
{"code": "def get_dummies(self, columns, **kwargs):\n    cls = type(self)\n    if (columns is None):\n        columns = [c for c in self.columns if (not is_numeric_dtype(self.dtypes[c]))]\n        if (len(columns) == 0):\n            return self.copy()\n    elif (not is_list_like(columns)):\n        columns = [columns]\n\n    def set_columns(df, columns):\n        df.columns = columns\n        return df\n    set_cols = self.columns\n    columns_applied = self._map_across_full_axis(1, (lambda df: set_columns(df, set_cols)))\n    if (len(columns) == len(self.columns)):\n\n        def get_dummies_builder(df):\n            if (df is not None):\n                if (not df.empty):\n                    return pandas.get_dummies(df, **kwargs)\n                else:\n                    return pandas.DataFrame([])\n        func = self._prepare_method((lambda df: get_dummies_builder(df)))\n        new_data = columns_applied.map_across_full_axis(0, func)\n        untouched_data = None\n    else:\n\n        def get_dummies_builder(df, internal_indices=[]):\n            return pandas.get_dummies(df.iloc[(:, internal_indices)], columns=None, **kwargs)\n        numeric_indices = list(self.columns.get_indexer_for(columns))\n        new_data = columns_applied.apply_func_to_select_indices_along_full_axis(0, get_dummies_builder, numeric_indices, keep_remaining=False)\n        untouched_data = self.drop(columns=columns)\n    final_columns = self.compute_index(1, new_data, False)\n    if (len(columns) != len(self.columns)):\n        new_data = untouched_data.data.concat(1, new_data)\n        final_columns = untouched_data.columns.append(pandas.Index(final_columns))\n    return cls(new_data, self.index, final_columns)", "docstring": "Convert categorical variables to dummy variables for certain columns.\n\nArgs:\ncolumns: The columns to convert.\n\nReturns:\nA new QueryCompiler.", "source": "codesearchnet"}
{"code": "def parse(cls: Type[MessageT], uid: int, data: bytes,\n              permanent_flags: Iterable[Flag], internal_date: datetime,\n              expunged: bool = False, **kwargs: Any) -> MessageT:\n        \n        content = MessageContent.parse(data)\n        return cls(uid, permanent_flags, internal_date, expunged,\n                   content, **kwargs)", "docstring": "Parse the given file object containing a MIME-encoded email message\ninto a :class:`BaseLoadedMessage` object.\n\nArgs:\nuid: The UID of the message.\ndata: The raw contents of the message.\npermanent_flags: Permanent flags for the message.\ninternal_date: The internal date of the message.\nexpunged: True if this message has been expunged from the mailbox.", "source": "juraj-google-style"}
{"code": "def gen_permutations(self, index=0, args=None):\n    if (args is None):\n        args = []\n    try:\n        name = self.layout_json_names[index]\n        display = self.layout_json_params.get(name, {}).get('display')\n        input_type = self.install_json_params().get(name, {}).get('type')\n        if self.validate_layout_display(self.input_table, display):\n            if (input_type.lower() == 'boolean'):\n                for val in [True, False]:\n                    args.append({'name': name, 'value': val})\n                    self.db_update_record(self.input_table, name, val)\n                    self.gen_permutations((index + 1), list(args))\n                    args.pop()\n            elif (input_type.lower() == 'choice'):\n                valid_values = self.expand_valid_values(self.install_json_params().get(name, {}).get('validValues', []))\n                for val in valid_values:\n                    args.append({'name': name, 'value': val})\n                    self.db_update_record(self.input_table, name, val)\n                    self.gen_permutations((index + 1), list(args))\n                    args.pop()\n            else:\n                args.append({'name': name, 'value': None})\n                self.gen_permutations((index + 1), list(args))\n        else:\n            self.gen_permutations((index + 1), list(args))\n    except IndexError:\n        self._input_permutations.append(args)\n        outputs = []\n        for o_name in self.install_json_output_variables():\n            if (self.layout_json_outputs.get(o_name) is not None):\n                display = self.layout_json_outputs.get(o_name, {}).get('display')\n                valid = self.validate_layout_display(self.input_table, display)\n                if ((display is None) or (not valid)):\n                    continue\n            for ov in self.install_json_output_variables().get(o_name):\n                outputs.append(ov)\n        self._output_permutations.append(outputs)", "docstring": "Iterate recursively over layout.json parameter names.\n\nTODO: Add indicator values.\n\nArgs:\nindex (int, optional): The current index position in the layout names list.\nargs (list, optional): Defaults to None. The current list of args.", "source": "codesearchnet"}
{"code": "def _multi_worker_test(test_method):\n\n    def decorator(self, has_chief, num_workers, num_ps, share_gpu, runner, **kwargs):\n        if _num_total_workers(has_chief, num_workers) == 1 or _running_in_worker or (test_util.is_xla_enabled() and num_ps > 0):\n            with _multi_worker_session(kwargs):\n                test_method(self, **kwargs)\n            return\n        test_id = self.id()\n        if runner:\n            results = runner.run(_test_runner, args=(test_id, _env))\n        else:\n            cluster_spec = multi_worker_test_base.create_cluster_spec(has_chief=has_chief, num_workers=num_workers, num_ps=num_ps, has_eval=False)\n            ephemeral_runner = multi_process_runner.MultiProcessRunner(_test_runner, cluster_spec, share_gpu=share_gpu, args=(test_id, _env), dependence_on_chief=has_chief)\n            ephemeral_runner.start()\n            results = ephemeral_runner.join().return_value\n        skip_reason = None\n        for result in results:\n            if result.status == 'failure':\n                self.fail(result.message)\n                break\n            elif result.status == 'skipped':\n                skip_reason = result.message\n        if skip_reason is not None:\n            self.skipTest(skip_reason)\n    argspec = tf_inspect.getfullargspec(test_method)\n    decorator_args = (argspec.args or []) + ['has_chief', 'num_workers', 'num_ps', 'share_gpu', 'runner']\n    decorator_argspec = argspec._replace(args=decorator_args)\n    return tf_decorator.make_decorator(test_method, decorator, decorator_argspec=decorator_argspec)", "docstring": "Decorate test_method so that it runs in each worker.\n\nWe use `multi_process_runner` to simulate multiple workers. Since we run the\nthis function in the main process and all worker processes, this decoration\nbehaves differently in the main process and worker procssses. In the main\nprocess, it spawns subprocesses and runs the test on each of them; in a worker\nprocess, it executes test in the same way as a normal test, e.g.\nsetUp()/tearDown() are called before/after the test.\n\nArgs:\ntest_method: a function which must be a test method.\n\nReturns:\nDecorated `test_method`. Note that the decorated function has additional\narguments.", "source": "github-repos"}
{"code": "def expect_end(self):\n    logger.debug(\"Waiting for termination of '{0}'\".format(self.name))\n    try:\n        self._spawn.expect(pexpect.EOF)\n        self._spawn.wait()\n        dircontent = str(os.listdir(self.job.working_dir))\n        logger.debug(('Working directory after execution: ' + dircontent))\n        return (self.get_exitstatus(), self.get_output())\n    except pexpect.exceptions.EOF as e:\n        logger.debug('Raising termination exception.')\n        raise TerminationException(instance=self, real_exception=e, output=self.get_output())\n    except pexpect.exceptions.TIMEOUT as e:\n        logger.debug('Raising timeout exception.')\n        raise TimeoutException(instance=self, real_exception=e, output=self.get_output())\n    except Exception as e:\n        logger.debug('Waiting for expected program end failed.')\n        raise NestedException(instance=self, real_exception=e, output=self.get_output())", "docstring": "Wait for the running program to finish.\n\nReturns:\nA tuple with the exit code, as reported by the operating system, and the output produced.", "source": "codesearchnet"}
{"code": "def plot(self, data, height=1000, render_large_data=False):\n    import IPython\n    if (not isinstance(data, pd.DataFrame)):\n        raise ValueError('Expect a DataFrame.')\n    if ((len(data) > 10000) and (not render_large_data)):\n        raise ValueError(('Facets dive may not work well with more than 10000 rows. ' + 'Reduce data or set \"render_large_data\" to True.'))\n    jsonstr = data.to_json(orient='records')\n    html_id = ('f' + datalab.utils.commands.Html.next_id())\n    HTML_TEMPLATE = '\\n        <link rel=\"import\" href=\"/nbextensions/gcpdatalab/extern/facets-jupyter.html\">\\n        <facets-dive id=\"{html_id}\" height=\"{height}\"></facets-dive>\\n        <script>\\n          var data = {jsonstr};\\n          document.querySelector(\"\n    html = HTML_TEMPLATE.format(html_id=html_id, jsonstr=jsonstr, height=height)\n    return IPython.core.display.HTML(html)", "docstring": "Plots a detail view of data.\n\nArgs:\ndata: a Pandas dataframe.\nheight: the height of the output.", "source": "codesearchnet"}
{"code": "def overwrite_view_source(project, dir_path):\n    \n\n    project_html_location = dir_path / project / HTML_LOCATION\n    if not project_html_location.exists():\n        return\n\n    files_to_overwrite = [\n        f for f in project_html_location.iterdir() if \"html\" in f.suffix\n    ]\n\n    for html_file in files_to_overwrite:\n        with open(html_file, \"r\") as f:\n            html = f.readlines()\n        for i, l in enumerate(html):\n            if TO_REPLACE_WITH_HOME in l:\n                html[i] = NEW_HOME_LINK\n                break\n        with open(html_file, \"w\") as f:\n            f.writelines(html)", "docstring": "In the project's index.html built file, replace the top \"source\"\nlink with a link to the documentation's home, which is mkdoc's home\n\nArgs:\nproject (str): project to update\ndir_path (pathlib.Path): this file's path", "source": "juraj-google-style"}
{"code": "def _download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=None):\n    \n    \n    def _callback(downloaded, total):\n        \n        if (total is 0) or (downloaded == total):\n            return\n        progress = downloaded*100/total\n        sys.stderr.write('\\r[{0}] {1}%'.format('\n        sys.stderr.flush()\n        \n    m = _URI_RE.match(s3_path)\n    bucket_name = m.group(1)\n    bucket = boto_conn.get_bucket(bucket_name)\n    retries = 6\n    if s3_path.endswith('/') is False:\n        \n        key_name = m.group(2)\n        key_instance = bucket.get_key(key_name)\n        while key_instance is None and retries > 0:\n            retries = retries - 1\n            log.info(\"Results file is not available on s3. Retry: \" + str(6-retries))\n            time.sleep(10)\n            key_instance = bucket.get_key(key_name)\n        if key_instance is None:\n            raise Exception(\"Results file not available on s3 yet. This can be because of s3 eventual consistency issues.\")\n        log.info(\"Downloading file from %s\" % s3_path)\n        if delim is None:\n            try:\n                key_instance.get_contents_to_file(fp)  \n            except boto.exception.S3ResponseError as e:\n                if (e.status == 403):\n                    \n                    \n                    log.warn(\"Access denied while fetching the s3 object. Retrying without specifying the version....\")\n                    key_instance.open()\n                    fp.write(key_instance.read())\n                    key_instance.close()\n                else:\n                    raise\n        else:\n            \n            _read_iteratively(key_instance, fp, delim=delim)\n\n    else:\n        \n        key_prefix = m.group(2)\n        bucket_paths = bucket.list(key_prefix)\n        for one_path in bucket_paths:\n            name = one_path.name\n\n            \n            if name.endswith('$folder$'):\n                continue\n\n            log.info(\"Downloading file from %s\" % name)\n            if delim is None:\n                one_path.get_contents_to_file(fp)  \n            else:\n                _read_iteratively(one_path, fp, delim=delim)", "docstring": "Downloads the contents of all objects in s3_path into fp\n\nArgs:\n`boto_conn`: S3 connection object\n\n`s3_path`: S3 path to be downloaded\n\n`fp`: The file object where data is to be downloaded", "source": "juraj-google-style"}
{"code": "def list_windowsfeatures():\n    choc_path = _find_chocolatey(__context__, __salt__)\n    cmd = [choc_path, 'list', '--source', 'windowsfeatures']\n    result = __salt__['cmd.run_all'](cmd, python_shell=False)\n    if (result['retcode'] != 0):\n        raise CommandExecutionError('Running chocolatey failed: {0}'.format(result['stdout']))\n    return result['stdout']", "docstring": "Instructs Chocolatey to pull a full package list from the Windows Features\nlist, via the Deployment Image Servicing and Management tool.\n\nReturns:\nstr: List of Windows Features\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' chocolatey.list_windowsfeatures", "source": "codesearchnet"}
{"code": "def enter_section(self, section_id):\n    assert section_id not in self.exits\n    self.exits[section_id] = set()", "docstring": "Enters a regular section.\n\nRegular sections admit exit jumps, which end the section.\n\nArgs:\nsection_id: Hashable, the same node that will be used in calls to the\nast_node arg passed to add_exit_node", "source": "github-repos"}
{"code": "def is_emulator(self):\n    if EMULATOR_SERIAL_REGEX.match(self.serial):\n        return True\n    elif self.build_info['build_characteristics'] == 'emulator':\n        return True\n    elif self.build_info['hardware'] in ['ranchu', 'goldfish', 'cutf_cvm']:\n        return True\n    else:\n        return False", "docstring": "Whether this device is probably an emulator.\n\nReturns:\nTrue if this is probably an emulator.", "source": "github-repos"}
{"code": "def _ScanEncryptedVolume(self, scan_context, scan_node):\n    \n    if not scan_node or not scan_node.path_spec:\n      raise errors.SourceScannerError('Invalid or missing scan node.')\n\n    credentials = credentials_manager.CredentialsManager.GetCredentials(\n        scan_node.path_spec)\n    if not credentials:\n      raise errors.SourceScannerError('Missing credentials for scan node.')\n\n    credentials_dict = {\n        credential_type: credential_data\n        for credential_type, credential_data in self._credentials}\n\n    is_unlocked = False\n    for credential_type in credentials.CREDENTIALS:\n      credential_data = credentials_dict.get(credential_type, None)\n      if not credential_data:\n        continue\n\n      is_unlocked = self._source_scanner.Unlock(\n          scan_context, scan_node.path_spec, credential_type, credential_data)\n      if is_unlocked:\n        break\n\n    if not is_unlocked:\n      is_unlocked = self._PromptUserForEncryptedVolumeCredential(\n          scan_context, scan_node, credentials)\n\n    if is_unlocked:\n      self._source_scanner.Scan(\n          scan_context, scan_path_spec=scan_node.path_spec)", "docstring": "Scans an encrypted volume scan node for volume and file systems.\n\nArgs:\nscan_context (SourceScannerContext): source scanner context.\nscan_node (SourceScanNode): volume scan node.\n\nRaises:\nSourceScannerError: if the format of or within the source is not\nsupported, the scan node is invalid or there are no credentials\ndefined for the format.", "source": "juraj-google-style"}
{"code": "def MethodCalled(self, mock_method):\n    \n\n    \n    \n\n    for method in self._methods:\n      if method == mock_method:\n        self._methods_called.add(mock_method)\n        \n        \n        mock_method._call_queue.appendleft(self)\n        return self, method\n\n    if self.IsSatisfied():\n      next_method = mock_method._PopNextMethod();\n      return next_method, None\n    else:\n      raise UnexpectedMethodCallError(mock_method, self)", "docstring": "Remove a method call from the group.\n\nIf the method is not in the set, an UnexpectedMethodCallError will be\nraised.\n\nArgs:\nmock_method: a mock method that should be equal to a method in the group.\n\nReturns:\nThe mock method from the group\n\nRaises:\nUnexpectedMethodCallError if the mock_method was not in the group.", "source": "juraj-google-style"}
{"code": "def add_tile(self, address, tile):\n        \n\n        if address in self._tiles:\n            raise ArgumentError(\"Tried to add two tiles at the same address\", address=address)\n\n        self._tiles[address] = tile", "docstring": "Add a tile to handle all RPCs at a given address.\n\nArgs:\naddress (int): The address of the tile\ntile (RPCDispatcher): A tile object that inherits from RPCDispatcher", "source": "juraj-google-style"}
{"code": "def _RegisterProcess(self, process):\n    if (process is None):\n        raise ValueError('Missing process.')\n    if (process.pid in self._processes_per_pid):\n        raise KeyError('Already managing process: {0!s} (PID: {1:d})'.format(process.name, process.pid))\n    self._processes_per_pid[process.pid] = process", "docstring": "Registers a process with the engine.\n\nArgs:\nprocess (MultiProcessBaseProcess): process.\n\nRaises:\nKeyError: if the process is already registered with the engine.\nValueError: if the process is missing.", "source": "codesearchnet"}
{"code": "def update_task_ids(self, encoder_vocab_size):\n    \n    for idx, task in enumerate(self.task_list):\n      task.set_task_id(idx + encoder_vocab_size)\n      tf.logging.info(\"Task %d (%s) has id %d.\" %\n                      (idx, task.name, task.task_id))", "docstring": "Generate task_ids for each problem.\n\nThese ids correspond to the index of the task in the task_list.\n\nArgs:\nencoder_vocab_size: the size of the vocab which is used to compute\nthe index offset.", "source": "juraj-google-style"}
{"code": "def _login(self, max_tries=2):\n    if (not self.current_url.startswith(_KindleCloudReaderBrowser._SIGNIN_URL)):\n        raise BrowserError(('Current url \"%s\" is not a signin url (\"%s\")' % (self.current_url, _KindleCloudReaderBrowser._SIGNIN_URL)))\n    email_field_loaded = (lambda br: br.find_elements_by_id('ap_email'))\n    self._wait().until(email_field_loaded)\n    tries = 0\n    while (tries < max_tries):\n        email_elem = self.find_element_by_id('ap_email')\n        email_elem.clear()\n        email_elem.send_keys(self._uname)\n        pword_elem = self.find_element_by_id('ap_password')\n        pword_elem.clear()\n        pword_elem.send_keys(self._pword)\n\n        def creds_entered(_):\n            'Returns whether the credentials were properly entered.'\n            email_ok = (email_elem.get_attribute('value') == self._uname)\n            pword_ok = (pword_elem.get_attribute('value') == self._pword)\n            return (email_ok and pword_ok)\n        kcr_page_loaded = (lambda br: (br.title == u'Kindle Cloud Reader'))\n        try:\n            self._wait(5).until(creds_entered)\n            self.find_element_by_id('signInSubmit-input').click()\n            self._wait(5).until(kcr_page_loaded)\n        except TimeoutException:\n            tries += 1\n        else:\n            return\n    raise LoginError", "docstring": "Logs in to Kindle Cloud Reader.\n\nArgs:\nmax_tries: The maximum number of login attempts that will be made.\n\nRaises:\nBrowserError: If method called when browser not at a signin URL.\nLoginError: If login unsuccessful after `max_tries` attempts.", "source": "codesearchnet"}
{"code": "async def make_request(self, redirect=False):\n    h11_connection = h11.Connection(our_role=h11.CLIENT)\n    (self.scheme, self.host, self.path, self.uri_parameters, self.query, _) = urlparse(self.uri)\n    if (not redirect):\n        self.initial_scheme = self.scheme\n        self.initial_netloc = self.host\n    host = (self.host if ((self.port == '80') or (self.port == '443')) else ((self.host.split(':')[0] + ':') + self.port))\n    asks_headers = c_i_dict([('Host', host), ('Connection', 'keep-alive'), ('Accept-Encoding', 'gzip, deflate'), ('Accept', '*/*'), ('Content-Length', '0'), ('User-Agent', 'python-asks/2.2.2')])\n    if (self.persist_cookies is not None):\n        self.cookies.update(self.persist_cookies.get_additional_cookies(self.host, self.path))\n    self._build_path()\n    body = ''\n    if any((self.data, self.files, (self.json is not None))):\n        (content_type, content_len, body) = (await self._formulate_body())\n        asks_headers['Content-Type'] = content_type\n        asks_headers['Content-Length'] = content_len\n    if (self.headers is not None):\n        asks_headers.update(self.headers)\n    if (self.auth is not None):\n        asks_headers.update((await self._auth_handler_pre()))\n        asks_headers.update((await self._auth_handler_post_get_auth()))\n    if self.cookies:\n        cookie_str = ''\n        for (k, v) in self.cookies.items():\n            cookie_str += '{}={}; '.format(k, v)\n        asks_headers['Cookie'] = cookie_str[:(- 1)]\n    if body:\n        if (not isinstance(body, bytes)):\n            body = bytes(body, self.encoding)\n            asks_headers['Content-Length'] = str(len(body))\n        req_body = h11.Data(data=body)\n    else:\n        req_body = None\n    req = h11.Request(method=self.method, target=self.path, headers=asks_headers.items())\n    response_obj = (await self._request_io(req, req_body, h11_connection))\n    if redirect:\n        if (not ((self.scheme == self.initial_scheme) and (self.host == self.initial_netloc))):\n            self.sock._active = False\n    if self.streaming:\n        return (None, response_obj)\n    return (self.sock, response_obj)", "docstring": "Acts as the central hub for preparing requests to be sent, and\nreturning them upon completion. Generally just pokes through\nself's attribs and makes decisions about what to do.\n\nReturns:\nsock: The socket to be returned to the calling session's\npool.\nResponse: The response object, after any redirects. If there were\nredirects, the redirect responses will be stored in the final\nresponse object's `.history`.", "source": "codesearchnet"}
{"code": "def _readline(self):\n    if (len(self.lines) > 1):\n        return self.lines.pop(0)\n    tail = ''\n    if len(self.lines):\n        tail = self.lines.pop()\n    try:\n        tail += self._read()\n    except socket.error:\n        logging.exception('No new data')\n        time.sleep(0.1)\n    self.lines += linesepx.split(tail)\n    if (len(self.lines) > 1):\n        return self.lines.pop(0)", "docstring": "Read exactly one line from the device, nonblocking.\n\nReturns:\nNone on no data", "source": "codesearchnet"}
{"code": "def split(content: AsyncIterable[_T], *, n: int=2, with_copy: bool=False) -> tuple[AsyncIterable[_T], ...]:\n    if n == 0:\n        raise ValueError('Cannot split a stream in n=0 streams.')\n    if n == 1:\n        return (content,)\n    queues = [asyncio.Queue() for _ in range(n)]\n\n    async def enqueue() -> None:\n        async for part in content:\n            for queue in queues:\n                if with_copy:\n                    queue.put_nowait(copy.deepcopy(part))\n                else:\n                    queue.put_nowait(part)\n        for queue in queues:\n            queue.put_nowait(None)\n\n    async def dequeue(queue: asyncio.Queue[_T]) -> AsyncIterable[_T]:\n        while (part := (await queue.get())) is not None:\n            yield part\n    context.create_task(enqueue())\n    return tuple((dequeue(queue) for queue in queues))", "docstring": "Split a stream into `n` identical streams.\n\nRecommended to be used with processor.context to ensure error propagation.\n\nArgs:\ncontent: content to be split\nn: number of streams to return\nwith_copy: whether to copy the items of the streams or not. It is\nrecommended to copy the items when side effects between streams can\nhappen. This is the case when one processor changes a part in place (e.g.\nupdate its metadata). As this can be expensive if the items are large and\nthe number of streams is high, the default is to not copy. Consider\nsetting this to True if there is a chance that a part can be modified in\nplace.\n\nReturns:\nn streams of content.\n\nRaises:\nValueError if n=0", "source": "github-repos"}
{"code": "def find_overlaps(self, index=False):\n    return self.__find_incongruities(op=operator.gt, index=index)", "docstring": "Find overlaps in a striplog.\n\nArgs:\nindex (bool): If True, returns indices of intervals with\ngaps after them.\n\nReturns:\nStriplog: A striplog of all the overlaps as intervals.", "source": "codesearchnet"}
{"code": "def _find_experiment_tag(self):\n    with self._experiment_from_tag_lock:\n        if (self._experiment_from_tag is None):\n            mapping = self.multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME)\n            for tag_to_content in mapping.values():\n                if (metadata.EXPERIMENT_TAG in tag_to_content):\n                    self._experiment_from_tag = metadata.parse_experiment_plugin_data(tag_to_content[metadata.EXPERIMENT_TAG])\n                    break\n    return self._experiment_from_tag", "docstring": "Finds the experiment associcated with the metadata.EXPERIMENT_TAG tag.\n\nCaches the experiment if it was found.\n\nReturns:\nThe experiment or None if no such experiment is found.", "source": "codesearchnet"}
{"code": "def check_syntax(self, app_path=None):\n        \n        app_path = app_path or '.'\n\n        for filename in sorted(os.listdir(app_path)):\n            error = None\n            status = True\n            if filename.endswith('.py'):\n                try:\n                    with open(filename, 'rb') as f:\n                        ast.parse(f.read(), filename=filename)\n                except SyntaxError:\n                    status = False\n                    \n                    e = []\n                    for line in traceback.format_exc().split('\\n')[-5:-2]:\n                        e.append(line.strip())\n                    error = ' '.join(e)\n\n            elif filename.endswith('.json'):\n                try:\n                    with open(filename, 'r') as fh:\n                        json.load(fh)\n                except ValueError as e:\n                    status = False\n                    error = e\n            else:\n                \n                continue\n\n            if error:\n                \n                self.validation_data['errors'].append(\n                    'Syntax validation failed for {} ({}).'.format(filename, error)\n                )\n\n            \n            self.validation_data['fileSyntax'].append({'filename': filename, 'status': status})", "docstring": "Run syntax on each \".py\" and \".json\" file.\n\nArgs:\napp_path (str, optional): Defaults to None. The path of Python files.", "source": "juraj-google-style"}
{"code": "def get_session(region, profile=None):\n    if (profile is None):\n        logger.debug('No AWS profile explicitly provided. Falling back to default.')\n        profile = default_profile\n    logger.debug(('Building session using profile \"%s\" in region \"%s\"' % (profile, region)))\n    session = boto3.Session(region_name=region, profile_name=profile)\n    c = session._session.get_component('credential_provider')\n    provider = c.get_provider('assume-role')\n    provider.cache = credential_cache\n    provider._prompter = ui.getpass\n    return session", "docstring": "Creates a boto3 session with a cache\n\nArgs:\nregion (str): The region for the session\nprofile (str): The profile for the session\n\nReturns:\n:class:`boto3.session.Session`: A boto3 session with\ncredential caching", "source": "codesearchnet"}
{"code": "def VerifyStructure(self, parser_mediator, line):\n    \n    return max([parser.matches(line) for _, parser in self.LINE_STRUCTURES])", "docstring": "Verifies that this is an apache access log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nline (str): line from the text file.\n\nReturns:\nbool: True if this is the correct parser, False otherwise.", "source": "juraj-google-style"}
{"code": "def receive_data(socket):\n    \n    answer = b\"\"\n    while True:\n        packet = socket.recv(4096)\n        if not packet: break\n        answer += packet\n    response = pickle.loads(answer)\n    socket.close()\n    return response", "docstring": "Receive an answer from the daemon and return the response.\n\nArgs:\nsocket (socket.socket): A socket that is connected to the daemon.\n\nReturns:\ndir or string: The unpickled answer.", "source": "juraj-google-style"}
{"code": "def symbolize(flt: float) -> sympy.Symbol:\n    try:\n        ratio = rationalize(flt)\n        res = sympy.simplify(ratio)\n    except ValueError:\n        ratio = rationalize((flt / np.pi))\n        res = (sympy.simplify(ratio) * sympy.pi)\n    return res", "docstring": "Attempt to convert a real number into a simpler symbolic\nrepresentation.\n\nReturns:\nA sympy Symbol. (Convert to string with str(sym) or to latex with\nsympy.latex(sym)\nRaises:\nValueError:     If cannot simplify float", "source": "codesearchnet"}
{"code": "def devices(self):\n    return self._device_names", "docstring": "Get the list of device names.\n\nReturns:\n(`list` of `str`) names of the devices.", "source": "github-repos"}
{"code": "def _ReadRecord(self, tables, file_object, record_offset, record_type):\n    table = tables.get(record_type, None)\n    if (not table):\n        raise errors.ParseError('Missing table for relation identifier: 0x{0:08}'.format(record_type))\n    record_header = self._ReadRecordHeader(file_object, record_offset)\n    record = collections.OrderedDict()\n    if table.columns:\n        attribute_value_offsets = self._ReadRecordAttributeValueOffset(file_object, (record_offset + 24), len(table.columns))\n    file_offset = file_object.tell()\n    record_data_offset = (file_offset - record_offset)\n    record_data_size = (record_header.data_size - (file_offset - record_offset))\n    record_data = file_object.read(record_data_size)\n    if (record_header.key_data_size > 0):\n        record['_key_'] = record_data[:record_header.key_data_size]\n    if table.columns:\n        for (index, column) in enumerate(table.columns):\n            attribute_data_read_function = self._ATTRIBUTE_DATA_READ_FUNCTIONS.get(column.attribute_data_type, None)\n            if attribute_data_read_function:\n                attribute_data_read_function = getattr(self, attribute_data_read_function, None)\n            if (not attribute_data_read_function):\n                attribute_value = None\n            else:\n                attribute_value = attribute_data_read_function(record_data, record_offset, record_data_offset, attribute_value_offsets[index])\n            record[column.attribute_name] = attribute_value\n    table.records.append(record)", "docstring": "Reads the record.\n\nArgs:\ntables (dict[int, KeychainDatabaseTable]): tables per identifier.\nfile_object (file): file-like object.\nrecord_offset (int): offset of the record relative to the start of\nthe file.\nrecord_type (int): record type, which should correspond to a relation\nidentifier of a table defined in the schema.\n\nRaises:\nParseError: if the record cannot be read.", "source": "codesearchnet"}
{"code": "def delete(self, remove_tombstone=True):\n    response = self.repo.api.http_request('DELETE', self.uri)\n    if (response.status_code == 204):\n        self._empty_resource_attributes()\n    if remove_tombstone:\n        self.repo.api.http_request('DELETE', ('%s/fcr:tombstone' % self.uri))\n    return True", "docstring": "Method to delete resources.\n\nArgs:\nremove_tombstone (bool): If True, will remove tombstone at uri/fcr:tombstone when removing resource.\n\nReturns:\n(bool)", "source": "codesearchnet"}
{"code": "def get_all_pipelines(app=''):\n    \n    url = '{host}/applications/{app}/pipelineConfigs'.format(host=API_URL, app=app)\n    response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n\n    assert response.ok, 'Could not retrieve Pipelines for {0}.'.format(app)\n\n    pipelines = response.json()\n    LOG.debug('Pipelines:\\n%s', pipelines)\n\n    return pipelines", "docstring": "Get a list of all the Pipelines in _app_.\n\nArgs:\napp (str): Name of Spinnaker Application.\n\nReturns:\nrequests.models.Response: Response from Gate containing Pipelines.", "source": "juraj-google-style"}
{"code": "def to_dataframe(self, bqstorage_client=None, dtypes=None, progress_bar_type=None):\n    if (pandas is None):\n        raise ValueError(_NO_PANDAS_ERROR)\n    return pandas.DataFrame()", "docstring": "Create an empty dataframe.\n\nArgs:\nbqstorage_client (Any):\nIgnored. Added for compatibility with RowIterator.\ndtypes (Any):\nIgnored. Added for compatibility with RowIterator.\nprogress_bar_type (Any):\nIgnored. Added for compatibility with RowIterator.\n\nReturns:\npandas.DataFrame:\nAn empty :class:`~pandas.DataFrame`.", "source": "codesearchnet"}
{"code": "def _do_logon(self):\n    if (self._userid is None):\n        raise ClientAuthError('Userid is not provided.')\n    if (self._password is None):\n        if self._get_password:\n            self._password = self._get_password(self._host, self._userid)\n        else:\n            raise ClientAuthError('Password is not provided.')\n    logon_uri = '/api/sessions'\n    logon_body = {'userid': self._userid, 'password': self._password}\n    self._headers.pop('X-API-Session', None)\n    self._session = self._new_session(self.retry_timeout_config)\n    logon_res = self.post(logon_uri, logon_body, logon_required=False)\n    self._session_id = logon_res['api-session']\n    self._headers['X-API-Session'] = self._session_id", "docstring": "Log on, unconditionally. This can be used to re-logon.\nThis requires credentials to be provided.\n\nRaises:\n\n:exc:`~zhmcclient.ClientAuthError`\n:exc:`~zhmcclient.ServerAuthError`\n:exc:`~zhmcclient.ConnectionError`\n:exc:`~zhmcclient.ParseError`\n:exc:`~zhmcclient.HTTPError`", "source": "codesearchnet"}
{"code": "def modified_lu(q):\n    \n    q = q.assemble()\n    m, b = q.shape[0], q.shape[1]\n    S = np.zeros(b)\n\n    q_work = np.copy(q)\n\n    for i in range(b):\n        S[i] = -1 * np.sign(q_work[i, i])\n        q_work[i, i] -= S[i]\n        \n        q_work[(i + 1):m, i] /= q_work[i, i]\n        \n        q_work[(i + 1):m, (i + 1):b] -= np.outer(q_work[(i + 1):m, i],\n                                                 q_work[i, (i + 1):b])\n\n    L = np.tril(q_work)\n    for i in range(b):\n        L[i, i] = 1\n    U = np.triu(q_work)[:b, :]\n    \n    return ray.get(core.numpy_to_dist.remote(ray.put(L))), U, S", "docstring": "Perform a modified LU decomposition of a matrix.\n\nThis takes a matrix q with orthonormal columns, returns l, u, s such that\nq - s = l * u.\n\nArgs:\nq: A two dimensional orthonormal matrix q.\n\nReturns:\nA tuple of a lower triangular matrix l, an upper triangular matrix u,\nand a a vector representing a diagonal matrix s such that\nq - s = l * u.", "source": "juraj-google-style"}
{"code": "def _convert_pandas_csv_options(pandas_options, columns):\n    \n\n    _columns = pandas_options.pop('names', columns)\n    header = pandas_options.pop('header', None)\n    pandas_options.pop('encoding', None)\n\n    if header == 'infer':\n        header_line_number = 0 if not bool(_columns) else None\n    else:\n        header_line_number = header\n\n    return _columns, header_line_number", "docstring": "Translate `pd.read_csv()` options into `pd.DataFrame()` especially for header.\n\nArgs:\npandas_option (dict):\npandas options like {'header': None}.\ncolumns (list):\nlist of column name.", "source": "juraj-google-style"}
{"code": "def bruteVersionStr(self, valu):\n        \n        try:\n            valu, info = self.core.model.type('it:semver').norm(valu)\n            subs = info.get('subs')\n            return valu, subs\n        except s_exc.BadTypeValu:\n            \n            subs = s_version.parseVersionParts(valu)\n            if subs is None:\n                raise s_exc.BadTypeValu(valu=valu, name='bruteVersionStr',\n                                        mesg='Unable to brute force version parts out of the string')\n            if subs:\n                valu = s_version.packVersion(subs.get('major'),\n                                             subs.get('minor', 0),\n                                             subs.get('patch', 0))\n                return valu, subs", "docstring": "Brute force the version out of a string.\n\nArgs:\nvalu (str): String to attempt to get version information for.\n\nNotes:\nThis first attempts to parse strings using the it:semver normalization\nbefore attempting to extract version parts out of the string.\n\nReturns:\nint, dict: The system normalized version integer and a subs dictionary.", "source": "juraj-google-style"}
{"code": "def get_leaves(self, item_ids=None, language=None, forbidden_item_ids=None):\n    forbidden_item_ids = (set() if (forbidden_item_ids is None) else set(forbidden_item_ids))\n    children = self.get_children_graph(item_ids, language=language, forbidden_item_ids=forbidden_item_ids)\n    counts = self.get_children_counts(active=None)\n    if (item_ids is None):\n        item_ids = set(children.keys())\n\n    def _get_leaves(item_id):\n        leaves = set()\n\n        def __search(item_ids):\n            result = set(flatten([children.get(item_id, []) for item_id in item_ids]))\n            new_leaves = {item_id for item_id in result if (item_id not in children.keys())}\n            leaves.update(new_leaves)\n            return (result - new_leaves)\n        fixed_point(is_zero=(lambda to_visit: (len(to_visit) == 0)), minus=(lambda to_visit, visited: (to_visit - visited)), plus=(lambda visited_x, visited_y: (visited_x | visited_y)), f=__search, x={item_id})\n        leaves = {leaf for leaf in leaves if (counts[leaf] == 0)}\n        if (len(leaves) > 0):\n            return leaves\n        if ((counts[item_id] == 0) and (item_id not in forbidden_item_ids)):\n            return {item_id}\n        return set()\n    return {item_id: _get_leaves(item_id) for item_id in item_ids}", "docstring": "Get mapping of items to their reachable leaves. Leaves having\ninactive relations to other items are omitted.\n\nArgs:\nitem_ids (list): items which are taken as roots for the reachability\nlanguage (str): if specified, filter out items which are not\navailable in the given language\n\nReturns:\ndict: item id -> list of items (reachable leaves)", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor):\n    if hidden_states.size(-1) != self.dim_norm:\n        raise AssertionError('hidden_states.size(-1) != self.dim_norm')\n    old_dtype = hidden_states.dtype\n    variance = hidden_states.to(torch.float32).pow(2).mean(dim=-1, keepdim=True)\n    hidden_states = (hidden_states * torch.rsqrt(variance + self.eps)).to(old_dtype) * self.weight\n    return hidden_states", "docstring": "Args:\nhidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`)", "source": "github-repos"}
{"code": "def update(self, teamId, name=None, **request_parameters):\n    check_type(teamId, basestring, may_be_none=False)\n    check_type(name, basestring)\n    put_data = dict_from_items_with_values(request_parameters, name=name)\n    json_data = self._session.put(((API_ENDPOINT + '/') + teamId), json=put_data)\n    return self._object_factory(OBJECT_TYPE, json_data)", "docstring": "Update details for a team, by ID.\n\nArgs:\nteamId(basestring): The team ID.\nname(basestring): A user-friendly name for the team.\n**request_parameters: Additional request parameters (provides\nsupport for parameters that may be added in the future).\n\nReturns:\nTeam: A Team object with the updated Webex Teams team details.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"}
{"code": "def get_structure_from_mp(formula):\n    \n    m = MPRester()\n    entries = m.get_entries(formula, inc_structure=\"final\")\n    if len(entries) == 0:\n        raise ValueError(\"No structure with formula %s in Materials Project!\" %\n                         formula)\n    elif len(entries) > 1:\n        warnings.warn(\"%d structures with formula %s found in Materials \"\n                      \"Project. The lowest energy structure will be returned.\" %\n                      (len(entries), formula))\n    return min(entries, key=lambda e: e.energy_per_atom).structure", "docstring": "Convenience method to get a crystal from the Materials Project database via\nthe API. Requires PMG_MAPI_KEY to be set.\n\nArgs:\nformula (str): A formula\n\nReturns:\n(Structure) The lowest energy structure in Materials Project with that\nformula.", "source": "juraj-google-style"}
{"code": "def cancelMktDepth(self, contract: Contract, isSmartDepth=False):\n        \n        ticker = self.ticker(contract)\n        reqId = self.wrapper.endTicker(ticker, 'mktDepth')\n        if reqId:\n            self.client.cancelMktDepth(reqId, isSmartDepth)\n        else:\n            self._logger.error(\n                f'cancelMktDepth: No reqId found for contract {contract}')", "docstring": "Unsubscribe from market depth data.\n\nArgs:\ncontract: The exact contract object that was used to\nsubscribe with.", "source": "juraj-google-style"}
{"code": "def isHostCert(self, name):\n    crtpath = self._getPathJoin('hosts', ('%s.crt' % name))\n    return os.path.isfile(crtpath)", "docstring": "Checks if a host certificate exists.\n\nArgs:\nname (str): The name of the host keypair.\n\nExamples:\nCheck if the host cert \"myhost\" exists:\n\nexists = cdir.isUserCert('myhost')\n\nReturns:\nbool: True if the certificate is present, False otherwise.", "source": "codesearchnet"}
{"code": "def _wrap_and_check_metrics(self, metrics):\n    if not isinstance(metrics, dict):\n        metrics = {self.METRICS_NAME: metrics}\n    outputs = {}\n    for key, value in metrics.items():\n        if isinstance(value, tuple):\n            metric_val, metric_op = value\n        else:\n            metric_val = value.result()\n            assert len(value.updates) == 1\n            metric_op = value.updates[0]\n        key = self._check_output_key(key, self.METRICS_NAME)\n        key = self._prefix_key(key, self.METRICS_NAME)\n        val_name = key + self._SEPARATOR_CHAR + self.METRIC_VALUE_SUFFIX\n        op_name = key + self._SEPARATOR_CHAR + self.METRIC_UPDATE_SUFFIX\n        if not isinstance(metric_val, tensor.Tensor):\n            raise ValueError('{} output value must be a Tensor; got {}.'.format(key, metric_val))\n        if not (tensor_util.is_tf_type(metric_op) or isinstance(metric_op, ops.Operation)):\n            raise ValueError('{} update_op must be a Tensor or Operation; got {}.'.format(key, metric_op))\n        metric_op_tensor = metric_op\n        if not isinstance(metric_op, tensor.Tensor):\n            with ops.control_dependencies([metric_op]):\n                metric_op_tensor = constant_op.constant([], name='metric_op_wrapper')\n        outputs[val_name] = metric_val\n        outputs[op_name] = metric_op_tensor\n    return outputs", "docstring": "Handle the saving of metrics.\n\nMetrics is either a tuple of (value, update_op), or a dict of such tuples.\nHere, we separate out the tuples and create a dict with names to tensors.\n\nArgs:\nmetrics: Dict of metric results keyed by name.\nThe values of the dict can be one of the following:\n(1) instance of `Metric` class.\n(2) (metric_value, update_op) tuples, or a single tuple.\nmetric_value must be a Tensor, and update_op must be a Tensor or Op.\n\nReturns:\ndict of output_names to tensors\n\nRaises:\nValueError: if the dict key is not a string, or the metric values or ops\nare not tensors.", "source": "github-repos"}
{"code": "def diff_contains_doc_examples(repo: Repo, branching_point: str, filename: str) -> bool:\n    folder = Path(repo.working_dir)\n    with checkout_commit(repo, branching_point):\n        with open(folder / filename, 'r', encoding='utf-8') as f:\n            old_content = f.read()\n    with open(folder / filename, 'r', encoding='utf-8') as f:\n        new_content = f.read()\n    old_content_clean = keep_doc_examples_only(old_content)\n    new_content_clean = keep_doc_examples_only(new_content)\n    return old_content_clean != new_content_clean", "docstring": "Check if the diff is only in code examples of the doc in a filename.\n\nArgs:\nrepo (`git.Repo`): A git repository (for instance the Transformers repo).\nbranching_point (`str`): The commit reference of where to compare for the diff.\nfilename (`str`): The filename where we want to know if the diff is only in codes examples.\n\nReturns:\n`bool`: Whether the diff is only in code examples of the doc or not.", "source": "github-repos"}
{"code": "def __init__(self, message):\n        \n        super(KeyCompressionTypeNotSupported, self).__init__(\n            reason=enums.ResultReason.KEY_COMPRESSION_TYPE_NOT_SUPPORTED,\n            message=message\n        )", "docstring": "Create a KeyCompressionTypeNotSupported exception.\n\nArgs:\nmessage (string): A string containing information about the error.", "source": "juraj-google-style"}
{"code": "def run_without_time_limit(self, cmd):\n    \n    cmd = [DOCKER_BINARY, 'run', DOCKER_NVIDIA_RUNTIME] + cmd\n    logging.info('Docker command: %s', ' '.join(cmd))\n    start_time = time.time()\n    retval = subprocess.call(cmd)\n    elapsed_time_sec = int(time.time() - start_time)\n    logging.info('Elapsed time of attack: %d', elapsed_time_sec)\n    logging.info('Docker retval: %d', retval)\n    if retval != 0:\n      logging.warning('Docker returned non-zero retval: %d', retval)\n      raise WorkerError('Docker returned non-zero retval ' + str(retval))\n    return elapsed_time_sec", "docstring": "Runs docker command without time limit.\n\nArgs:\ncmd: list with the command line arguments which are passed to docker\nbinary\n\nReturns:\nhow long it took to run submission in seconds\n\nRaises:\nWorkerError: if error occurred during execution of the submission", "source": "juraj-google-style"}
{"code": "def build(cls, local_scheduler=True, **task_params):\n        \n        luigi.build([cls(**task_params)], local_scheduler=local_scheduler)", "docstring": "Instantiate the task and build it with luigi\n\nArgs:\nlocal_scheduler (bool): use a local scheduler (True, default) or a remote scheduler\ntask_params: parameters to pass to task for instantiation", "source": "juraj-google-style"}
{"code": "def sg_summary_image(tensor, prefix=None, name=None):\n    prefix = ('' if (prefix is None) else (prefix + '/'))\n    name = ((prefix + _pretty_name(tensor)) if (name is None) else (prefix + name))\n    if (not tf.get_variable_scope().reuse):\n        tf.summary.image((name + '-im'), tensor)", "docstring": "r\"\"\"Register `tensor` to summary report as `image`\n\nArgs:\ntensor: A tensor to log as image\nprefix: A `string`. A prefix to display in the tensor board web UI.\nname: A `string`. A name to display in the tensor board web UI.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def check_R_package(self, package):\n        \n        test_package = not bool(launch_R_script(\"{}/R_templates/test_import.R\".format(os.path.dirname(os.path.realpath(__file__))),                                      {\"{package}\": package}, verbose=True))\n        return test_package", "docstring": "Execute a subprocess to check the package's availability.\n\nArgs:\npackage (str): Name of the package to be tested.\n\nReturns:\nbool: `True` if the package is available, `False` otherwise", "source": "juraj-google-style"}
{"code": "def load_data_table(table_name, meta_file, meta):\n    \n    for table in meta['tables']:\n        if table['name'] == table_name:\n            prefix = os.path.dirname(meta_file)\n            relative_path = os.path.join(prefix, meta['path'], table['path'])\n            return pd.read_csv(relative_path), table", "docstring": "Return the contents and metadata of a given table.\n\nArgs:\ntable_name(str): Name of the table.\nmeta_file(str): Path to the meta.json file.\nmeta(dict): Contents of meta.json.\n\nReturns:\ntuple(pandas.DataFrame, dict)", "source": "juraj-google-style"}
{"code": "def from_list(index, queues):\n    if not queues or not isinstance(queues, list) or (not all((isinstance(x, QueueBase) for x in queues))):\n        raise TypeError('A list of queues expected')\n    dtypes = queues[0].dtypes\n    if not all((dtypes == q.dtypes for q in queues[1:])):\n        raise TypeError('Queues do not have matching component dtypes.')\n    names = queues[0].names\n    if not all((names == q.names for q in queues[1:])):\n        raise TypeError('Queues do not have matching component names.')\n    queue_shapes = [q.shapes for q in queues]\n    reduced_shapes = [functools.reduce(_shape_common, s) for s in zip(*queue_shapes)]\n    queue_refs = array_ops_stack.stack([x.queue_ref for x in queues])\n    selected_queue = array_ops.gather(queue_refs, index)\n    return QueueBase(dtypes=dtypes, shapes=reduced_shapes, names=names, queue_ref=selected_queue)", "docstring": "Create a queue using the queue reference from `queues[index]`.\n\nArgs:\nindex: An integer scalar tensor that determines the input that gets\nselected.\nqueues: A list of `QueueBase` objects.\n\nReturns:\nA `QueueBase` object.\n\nRaises:\nTypeError: When `queues` is not a list of `QueueBase` objects,\nor when the data types of `queues` are not all the same.", "source": "github-repos"}
{"code": "def add_dynamic_element(self, name, description):\n    self._pb.add(Name=name, Description=description, Value='*')\n    return self", "docstring": "Adds a dynamic namespace element to the end of the Namespace.\n\nA dynamic namespace element is defined by an element that contains a\nnon-static data relative to the metric being collected.  For instance,\nwhen collecting metrics for a given virtual machine the namespace\nelement that contains the virtual-machine-id would be dynamic.  This is\nmodeled by the a NamespaceElement when its `name` attribute contains the\nvalue 'virtual-machine-id'.  In this example the `value` attribute would\nbe set to the ID of the virtual machine when the metric is collected.\n\nArgs:\nvalue (:py:class:`snap_plugin.v1.namespace_element.NamespaceElement`):\nnamespace element\n\nReturns:\n:py:class:`snap_plugin.v1.namespace.Namespace`", "source": "codesearchnet"}
{"code": "def get_location(self, locations=None):\n        \n        \n        countries = self.data.get('groups', None)\n        if not countries:\n            return list()\n        return [Locations.get_location_from_HDX_code(x['name'], locations=locations,\n                                                     configuration=self.configuration) for x in countries]", "docstring": "Return the dataset's location\n\nArgs:\nlocations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX.\n\nReturns:\nList[str]: list of locations or [] if there are none", "source": "juraj-google-style"}
{"code": "def _validate_alias_command(alias_command):\n    \n    if not alias_command:\n        raise CLIError(EMPTY_ALIAS_ERROR)\n\n    split_command = shlex.split(alias_command)\n    boundary_index = len(split_command)\n    for i, subcommand in enumerate(split_command):\n        if not re.match('^[a-z]', subcommand.lower()) or i > COLLISION_CHECK_LEVEL_DEPTH:\n            boundary_index = i\n            break\n\n    \n    command_to_validate = ' '.join(split_command[:boundary_index]).lower()\n    for command in azext_alias.cached_reserved_commands:\n        if re.match(r'([a-z\\-]*\\s)*{}($|\\s)'.format(command_to_validate), command):\n            return\n\n    _validate_positional_arguments(shlex.split(alias_command))", "docstring": "Check if the alias command is valid.\n\nArgs:\nalias_command: The command to validate.", "source": "juraj-google-style"}
{"code": "def function_cyl_co(script, r_func='r', theta_func='theta', z_func='z'):\n    r = 'sqrt(x^2+y^2)'\n    if (isinstance(script, FilterScript) and (script.ml_version >= '2016.12')):\n        theta = 'atan2(y, x)'\n    else:\n        theta = mp_func.mp_atan2('y', 'x')\n    r_func = re.sub('\\\\br\\\\b', r, r_func).replace('theta', theta)\n    theta_func = re.sub('\\\\br\\\\b', r, theta_func).replace('theta', theta)\n    z_func = re.sub('\\\\br\\\\b', r, z_func).replace('theta', theta)\n    x_func = '(r)*cos(theta)'.replace('r', r_func).replace('theta', theta_func)\n    y_func = '(r)*sin(theta)'.replace('r', r_func).replace('theta', theta_func)\n    vert_function(script, x_func, y_func, z_func)\n    return None", "docstring": "Geometric function using cylindrical coordinates.\n\nDefine functions in Z up cylindrical coordinates, with radius 'r',\nangle 'theta', and height 'z'\n\nSee \"function\" docs for additional usage info and accepted parameters.\n\nArgs:\nr_func (str): function to generate new coordinates for radius\ntheta_func (str): function to generate new coordinates for angle.\n0 degrees is on the +X axis.\nz_func (str): function to generate new coordinates for height\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "codesearchnet"}
{"code": "def get_choices(field):\n\t\t\n\tempty_label = getattr(field.field, \"empty_label\", False)\n\tneeds_empty_value = False\n\tchoices = []\n\n\t\n\tif hasattr(field.field, \"_choices\"):\n\t\tchoices = field.field._choices\n\n\t\n\telif hasattr(field.field, \"_queryset\"):\n\t\tqueryset = field.field._queryset\n\t\tfield_name = getattr(field.field, \"to_field_name\") or \"pk\"\n\t\tchoices += ((getattr(obj, field_name), str(obj)) for obj in queryset)\n\n\t\n\tif choices and (choices[0][1] == BLANK_CHOICE_DASH[0][1] or choices[0][0]):\n\t\tneeds_empty_value = True\n\n\t\t\n\t\tif not choices[0][0]:\n\t\t\tdel choices[0]\n\n\t\n\tif empty_label == BLANK_CHOICE_DASH[0][1]:\n\t\tempty_label = None\n\n\t\n\tif empty_label or not field.field.required:\n\t\tif needs_empty_value:\n\t\t\tchoices.insert(0, (\"\", empty_label or BLANK_CHOICE_DASH[0][1]))\n\n\treturn choices", "docstring": "Find choices of a field, whether it has choices or has a queryset.\n\nArgs:\nfield (BoundField): Django form boundfield\n\nReturns:\nlist: List of choices", "source": "juraj-google-style"}
{"code": "def __init__(self, cflags):\n        \n        super(cxx_standard, self).__init__()\n\n        self._stdcxx = None\n        self._is_implicit = False\n        for key in cxx_standard.__STD_CXX:\n            if key in cflags:\n                self._stdcxx = key\n                self._cplusplus = cxx_standard.__STD_CXX[key]\n\n        if not self._stdcxx:\n            if '-std=' in cflags:\n                raise RuntimeError('Unknown -std=c++xx flag used')\n\n            \n            self._stdcxx = '-std=c++03'\n            self._cplusplus = cxx_standard.__STD_CXX['-std=c++03']\n            self._is_implicit = True", "docstring": "Class constructor that parses the XML generator's command line\n\nArgs:\ncflags (str): cflags command line arguments passed to the XML\ngenerator", "source": "juraj-google-style"}
{"code": "def create_graph_from_data(self, data):\n    self.arguments['{SCORE}'] = self.score\n    self.arguments['{VERBOSE}'] = str(self.verbose).upper()\n    self.arguments['{BETA}'] = str(self.beta)\n    self.arguments['{OPTIM}'] = str(self.optim).upper()\n    self.arguments['{ALPHA}'] = str(self.alpha)\n    results = self._run_bnlearn(data, verbose=self.verbose)\n    graph = nx.DiGraph()\n    graph.add_edges_from(results)\n    return graph", "docstring": "Run the algorithm on data.\n\nArgs:\ndata (pandas.DataFrame): DataFrame containing the data\n\nReturns:\nnetworkx.DiGraph: Solution given by the algorithm.", "source": "codesearchnet"}
{"code": "def is_line_in_file(filename: str, line: str) -> bool:\n    \n    assert \"\\n\" not in line\n    with open(filename, \"r\") as file:\n        for fileline in file:\n            if fileline == line:\n                return True\n        return False", "docstring": "Detects whether a line is present within a file.\n\nArgs:\nfilename: file to check\nline: line to search for (as an exact match)", "source": "juraj-google-style"}
{"code": "def _flush(self, buffer):\n        \n        container, obj = self._client_args\n        with _handle_client_exception():\n            self._client.put_object(container, obj, buffer)", "docstring": "Flush the write buffers of the stream if applicable.\n\nArgs:\nbuffer (memoryview): Buffer content.", "source": "juraj-google-style"}
{"code": "def transform(self, X):\n    assert (np.shape(X)[0] == len(self._weights)), 'BlendingOptimizer: Number of models to blend its predictions and weights does not match: n_models={}, weights_len={}'.format(np.shape(X)[0], len(self._weights))\n    blended_predictions = (np.average(np.power(X, self._power), weights=self._weights, axis=0) ** (1.0 / self._power))\n    return {'y_pred': blended_predictions}", "docstring": "Performs predictions blending using the trained weights.\n\nArgs:\nX (array-like): Predictions of different models.\nReturns: dict with blended predictions (key is 'y_pred').", "source": "codesearchnet"}
{"code": "class IdeficsVisionEncoder(nn.Module):\n\n    def __init__(self, config: IdeficsVisionConfig):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([IdeficsVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for idx, encoder_layer in enumerate(self.layers):\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions)\n            else:\n                layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`IdeficsVisionEncoderLayer`].\n\nArgs:\nconfig: IdeficsVisionConfig", "source": "github-repos"}
{"code": "def export(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):\n        \n        return self.client.api.export(self.id, chunk_size)", "docstring": "Export the contents of the container's filesystem as a tar archive.\n\nArgs:\nchunk_size (int): The number of bytes returned by each iteration\nof the generator. If ``None``, data will be streamed as it is\nreceived. Default: 2 MB\n\nReturns:\n(str): The filesystem tar archive\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def union(self, *others: 'Substitution') -> 'Substitution':\n    new_subst = Substitution(self)\n    for other in others:\n        for (variable_name, replacement) in other.items():\n            new_subst.try_add_variable(variable_name, replacement)\n    return new_subst", "docstring": "Try to merge the substitutions.\n\nIf a variable occurs in multiple substitutions, try to merge the replacements.\nSee :meth:`union_with_variable` to see how replacements are merged.\n\nDoes not modify any of the original substitutions.\n\nExample:\n\n>>> subst1 = Substitution({'x': Multiset(['a', 'b']), 'z': a})\n>>> subst2 = Substitution({'x': ('a', 'b'), 'y': ('c', )})\n>>> print(subst1.union(subst2))\n{x ↦ (a, b), y ↦ (c), z ↦ a}\n\nArgs:\nothers:\nThe other substitutions to merge with this one.\n\nReturns:\nThe new substitution with the other substitutions merged.\n\nRaises:\nValueError:\nif a variable occurs in multiple substitutions but cannot be merged because the\nsubstitutions conflict.", "source": "codesearchnet"}
{"code": "def put(self, entity):\n    self._cur_batch.put(entity)\n    self._num_mutations += 1\n    if (self._num_mutations >= MAX_MUTATIONS_IN_BATCH):\n        self.commit()\n        self.begin()", "docstring": "Adds mutation of the entity to the mutation buffer.\n\nIf mutation buffer reaches its capacity then this method commit all pending\nmutations from the buffer and emties it.\n\nArgs:\nentity: entity which should be put into the datastore", "source": "codesearchnet"}
{"code": "def count_divisors(n):\n    if (not isinstance(n, int)):\n        raise TypeError('Expecting a strictly positive integer')\n    if (n <= 0):\n        raise ValueError('Expecting a strictly positive integer')\n    number_of_divisors = 1\n    remain = n\n    for p in prime_generator():\n        if (p > n):\n            return number_of_divisors\n        exponent = 1\n        while ((remain % p) == 0):\n            remain = (remain \n            exponent += 1\n        number_of_divisors *= exponent\n        if (remain == 1):\n            return number_of_divisors", "docstring": "Count the number of divisors of an integer n\n\nArgs:\nn (int): strictly positive integer\n\nReturns:\nThe number of distinct divisors of n\n\nRaises:\nTypeError: if n is not an integer\nValueError: if n is negative", "source": "codesearchnet"}
{"code": "def on_test_batch_end(self, batch, logs=None):", "docstring": "Called at the end of a batch in `evaluate` methods.\n\nAlso called at the end of a validation batch in the `fit`\nmethods, if validation data is provided.\n\nSubclasses should override for any actions to run.\n\nNote that if the `steps_per_execution` argument to `compile` in\n`Model` is set to `N`, this method will only be called every\n`N` batches.\n\nArgs:\nbatch: Integer, index of batch within the current epoch.\nlogs: Dict. Aggregated metric results up until this batch.", "source": "github-repos"}
{"code": "def add(self, element):\n    assert not self._committed\n    if not self._stacked:\n        self._elements.append(element)\n        return\n    if self._elements and isinstance(self._elements[-1], (WindowedValue, _Bundle._StackedWindowedValues)) and (self._elements[-1].timestamp == element.timestamp) and (self._elements[-1].windows == element.windows) and (self._elements[-1].pane_info == element.pane_info):\n        if isinstance(self._elements[-1], WindowedValue):\n            self._elements[-1] = _Bundle._StackedWindowedValues(self._elements[-1])\n        self._elements[-1].add_value(element.value)\n    else:\n        self._elements.append(element)", "docstring": "Outputs an element to this bundle.\n\nArgs:\nelement: WindowedValue", "source": "github-repos"}
{"code": "def to_proto(self, export_scope=None):\n    if export_scope is None or self._variable.name.startswith(export_scope):\n        var_def = variable_pb2.VariableDef()\n        var_def.variable_name = ops.strip_name_scope(self._variable.name, export_scope)\n        if self._initial_value is not None:\n            var_def.initial_value_name = ops.strip_name_scope(self._initial_value.name, export_scope)\n        var_def.trainable = self.trainable\n        var_def.synchronization = self.synchronization.value\n        var_def.aggregation = self.aggregation.value\n        var_def.initializer_name = ops.strip_name_scope(self.initializer.name, export_scope)\n        var_def.snapshot_name = ops.strip_name_scope(self._snapshot.name, export_scope)\n        if self._save_slice_info:\n            var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto(export_scope=export_scope))\n        return var_def\n    else:\n        return None", "docstring": "Converts a `Variable` to a `VariableDef` protocol buffer.\n\nArgs:\nexport_scope: Optional `string`. Name scope to remove.\n\nReturns:\nA `VariableDef` protocol buffer, or `None` if the `Variable` is not\nin the specified name scope.", "source": "github-repos"}
{"code": "def filter_spent_outputs(self, outputs):\n        \n        links = [o.to_dict() for o in outputs]\n        txs = list(query.get_spending_transactions(self.connection, links))\n        spends = {TransactionLink.from_dict(input_['fulfills'])\n                  for tx in txs\n                  for input_ in tx['inputs']}\n        return [ff for ff in outputs if ff not in spends]", "docstring": "Remove outputs that have been spent\n\nArgs:\noutputs: list of TransactionLink", "source": "juraj-google-style"}
{"code": "def list_devices(device_type=None):\n    device_type = device_type.upper() if device_type else None\n    tf_devices = tf.config.list_logical_devices(device_type=device_type)\n    cpu_devices = []\n    other_devices = []\n    for device in tf_devices:\n        if device.device_type.lower() == 'cpu':\n            cpu_devices.append(device)\n        else:\n            other_devices.append(device)\n    if device_type is None:\n        tf_devices = other_devices if len(other_devices) > 0 else cpu_devices\n    return [f'{device.device_type.lower()}:{device.name.split(':')[-1]}' for device in tf_devices]", "docstring": "Return all the available devices based on the device type.\n\nNote that this should return the global devices in a distributed setting.\n\nArgs:\ndevice_type: string of `\"cpu\"`, `\"gpu\"` or `\"tpu\"`. Default to `gpu` or\n`tpu` if available when device_type is not provided. Otherwise will\nreturn the `cpu` devices.\n\nReturn:\nList of devices that are available for distribute computation.", "source": "github-repos"}
{"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, class_id=self.class_id, sample_weight=sample_weight)", "docstring": "Accumulates confusion matrix statistics.\n\nArgs:\ny_true: The ground truth values.\ny_pred: The predicted values.\nsample_weight: Optional weighting of each example. Defaults to `1`.\nCan be a tensor whose rank is either 0, or the same rank as\n`y_true`, and must be broadcastable to `y_true`.", "source": "github-repos"}
{"code": "def release_dates(self, **kwargs):\n    path = self._get_id_path('release_dates')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the release dates and certification for a specific movie id.\n\nArgs:\nappend_to_response: (optional) Comma separated, any movie method.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def to_csv(evset: EventSet, path: str, sep: str=',', na_rep: Optional[str]=None, columns: Optional[List[str]]=None):\n    df = to_pandas(evset)\n    df.to_csv(path, index=False, sep=sep, na_rep=na_rep, columns=columns)", "docstring": "Saves an [`EventSet`][temporian.EventSet] to a CSV file.\n\nExample:\n```python\n>>> output_path = str(tmp_dir / \"output_data.csv\")\n>>> evset = tp.event_set(timestamps=[1,], features={\"f1\": [0.1]})\n>>> tp.to_csv(evset, output_path)\n\n```\n\nArgs:\nevset: EventSet to save.\npath: Path to the file.\nsep: Separator to use.\nna_rep: Representation to use for missing values.\ncolumns: Columns to save. If `None`, saves all columns.", "source": "github-repos"}
{"code": "def idle(self, stop_signals: tuple=(SIGINT, SIGTERM, SIGABRT)):\n\n    def signal_handler(*args):\n        self.is_idle = False\n    for s in stop_signals:\n        signal(s, signal_handler)\n    self.is_idle = True\n    while self.is_idle:\n        time.sleep(1)\n    self.stop()", "docstring": "Blocks the program execution until one of the signals are received,\nthen gently stop the Client by closing the underlying connection.\n\nArgs:\nstop_signals (``tuple``, *optional*):\nIterable containing signals the signal handler will listen to.\nDefaults to (SIGINT, SIGTERM, SIGABRT).", "source": "codesearchnet"}
{"code": "def check_config(config, path):\n    messages = []\n    config_copy = get_frozen_copy(config)\n    missing_keys = (set(DEFAULT_CONFIG.keys()) - set(config_copy.keys()))\n    if missing_keys:\n        messages.append('Missing config keys {}!'.format(missing_keys))\n    for (key, value) in config_copy.items():\n        if (key not in DEFAULT_CONFIG):\n            messages.append('Unknown key {} in {}!'.format(key, path))\n            continue\n        if (value is None):\n            messages.append(_VALUE_UNDEFINED_MESSAGE.format(path=path, key=key))\n        else:\n            value_type = type(value)\n            if (isinstance(DEFAULT_CONFIG[key], Mapping) and ('by-cot-product' in DEFAULT_CONFIG[key])):\n                default_type = type(DEFAULT_CONFIG[key]['by-cot-product'][config['cot_product']])\n            else:\n                default_type = type(DEFAULT_CONFIG[key])\n            if (value_type is not default_type):\n                messages.append('{} {}: type {} is not {}!'.format(path, key, value_type, default_type))\n        if (value in ('...', b'...')):\n            messages.append(_VALUE_UNDEFINED_MESSAGE.format(path=path, key=key))\n        if ((key in ('provisioner_id', 'worker_group', 'worker_type', 'worker_id')) and (not _is_id_valid(value))):\n            messages.append('{} doesn\\'t match \"{}\" (required by Taskcluster)'.format(key, _GENERIC_ID_REGEX.pattern))\n    return messages", "docstring": "Validate the config against DEFAULT_CONFIG.\n\nAny unknown keys or wrong types will add error messages.\n\nArgs:\nconfig (dict): the running config.\npath (str): the path to the config file, used in error messages.\n\nReturns:\nlist: the error messages found when validating the config.", "source": "codesearchnet"}
{"code": "def _fill_parameters(self):\n    self._parameters = self._config.get('parameters', {})\n    self._fill_defaults()\n    for k in self._parameters.keys():\n        try:\n            if (self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']')):\n                parts = self._parameters[k].split(':')\n                tmp = parts[1].replace(']', '')\n                val = self._get_ssm_parameter(tmp)\n                if val:\n                    self._parameters[k] = val\n                else:\n                    logging.error('SSM parameter {} not found'.format(tmp))\n                    return False\n            elif (self._parameters[k] == self.ASK):\n                val = None\n                a1 = '__x___'\n                a2 = '__y___'\n                prompt1 = \"Enter value for '{}': \".format(k)\n                prompt2 = \"Confirm value for '{}': \".format(k)\n                while (a1 != a2):\n                    a1 = getpass.getpass(prompt=prompt1)\n                    a2 = getpass.getpass(prompt=prompt2)\n                    if (a1 == a2):\n                        val = a1\n                    else:\n                        print('values do not match, try again')\n                self._parameters[k] = val\n        except:\n            pass\n    return True", "docstring": "Fill in the _parameters dict from the properties file.\n\nArgs:\nNone\n\nReturns:\nTrue\n\nTodo:\nFigure out what could go wrong and at least acknowledge the the\nfact that Murphy was an optimist.", "source": "codesearchnet"}
{"code": "def _MergeEventTag(self, storage_writer, attribute_container):\n    \n    if attribute_container.CONTAINER_TYPE != 'event_tag':\n      return\n\n    event_identifier = attribute_container.GetEventIdentifier()\n    if not event_identifier:\n      return\n\n    \n    \n    stored_event_tag = self._event_tag_index.GetEventTagByIdentifier(\n        storage_writer, event_identifier)\n    if stored_event_tag:\n      attribute_container.AddComment(stored_event_tag.comment)\n      attribute_container.AddLabels(stored_event_tag.labels)\n\n    self._event_tag_index.SetEventTag(attribute_container)", "docstring": "Merges an event tag with the last stored event tag.\n\nIf there is an existing event the provided event tag is updated with\nthe contents of the existing one. After which the event tag index is\nupdated.\n\nArgs:\nstorage_writer (StorageWriter): storage writer.\nattribute_container (AttributeContainer): container.", "source": "juraj-google-style"}
{"code": "def wrap_layer_functions(layer, serialization_cache):\n    if isinstance(layer, keras_load.RevivedLayer) and (not isinstance(layer, sequential_lib.Sequential)):\n        return {fn_name: getattr(layer.keras_api, fn_name, None) for fn_name in serialized_attributes.LayerAttributes.all_functions}\n    original_fns = _replace_child_layer_functions(layer, serialization_cache)\n    original_losses = _reset_layer_losses(layer)\n    call_collection = LayerCallCollection(layer)\n    call_fn_with_losses = call_collection.add_function(_wrap_call_and_conditional_losses(layer), '{}_layer_call_and_return_conditional_losses'.format(layer.name), match_layer_training_arg=True)\n    call_fn = call_collection.add_function(_extract_outputs_from_fn(layer, call_fn_with_losses), '{}_layer_call_fn'.format(layer.name), match_layer_training_arg=False)\n    fns = {'call_and_return_conditional_losses': call_fn_with_losses, '__call__': call_fn}\n    if layer._activity_regularizer is not None:\n        fns['activity_regularizer_fn'] = _wrap_activity_regularizer(layer)\n        fns['call_and_return_all_conditional_losses'] = call_collection.add_function(_append_activity_regularizer_loss(layer, call_fn_with_losses, fns['activity_regularizer_fn']), '{}_layer_call_and_return_all_conditional_losses'.format(layer.name), match_layer_training_arg=False)\n    else:\n        fns['activity_regularizer_fn'] = None\n        fns['call_and_return_all_conditional_losses'] = call_fn_with_losses\n    with tracing_scope():\n        call_collection.trace_with_input_signature()\n        with base_layer_utils.call_context().enter(layer, inputs=None, build_graph=True, training=None, saving=True):\n            for fn in fns.values():\n                if fn is not None and fn.input_signature is not None:\n                    if isinstance(fn, LayerCall):\n                        fn = fn.wrapped_call\n                    fn.get_concrete_function()\n    _restore_child_layer_functions(original_fns)\n    _restore_layer_losses(original_losses)\n    return fns", "docstring": "Returns dict of wrapped layer call function and losses in tf.functions.\n\nArgs:\nlayer: Keras Layer object.\nserialization_cache: Dictionary shared between all objects during\nserialization.\n\nReturns:\nA dictionary containing all keras tf.functions to serialize. See\nLayerAttributes and ModelAttributes for the list of all attributes.", "source": "github-repos"}
{"code": "def __init__(self,\n                 entries: Iterable[Tuple[int, TItem]] = (),\n                 *,\n                 drop_duplicate_entries: bool=False):\n        \n        self._buckets = []  \n        self._offset = 0\n        self._len = 0\n        self._drop_set = (set()\n                          if drop_duplicate_entries\n                          else None)  \n\n        for p, e in entries:\n            self.enqueue(p, e)", "docstring": "Initializes a new priority queue.\n\nArgs:\nentries: Initial contents of the priority queue.\ndrop_duplicate_entries: If set, the priority queue will ignore\noperations that enqueue a (priority, item) pair that is already\nin the priority queue. Note that duplicates of an item may still\nbe enqueued, as long as they have different priorities.", "source": "juraj-google-style"}
{"code": "def _project_dict(self, **kwargs: Dict[(str, Any)]) -> Dict[(str, Hist)]:\n    get_hist_args = copy.deepcopy(kwargs)\n    projection_name_args = copy.deepcopy(kwargs)\n    for (key, input_observable) in self.observable_to_project_from.items():\n        (output_hist, projection_name, projection_name_args) = self._project_observable(input_key=key, input_observable=input_observable, get_hist_args=get_hist_args, projection_name_args=projection_name_args, **kwargs)\n        output_hist_args = projection_name_args\n        output_hist_args.update({'output_hist': output_hist, 'projection_name': projection_name})\n        output_key_name = self.output_key_name(**output_hist_args)\n        self.output_observable[output_key_name] = self.output_hist(**output_hist_args)\n    return self.output_observable", "docstring": "Driver function for projecting and storing a dictionary of observables.\n\nArgs:\nkwargs (dict): Additional named args to be passed to projection_name(...) and output_key_name(...)\nReturns:\nThe projected histograms. The projected histograms are also stored in ``output_observable``.", "source": "codesearchnet"}
{"code": "def get_subscribed_services_names(cls):\n    accounts_for_service = Account.get_accounts_for_service\n    service_data = cls._get_music_services_data().values()\n    return [service['Name'] for service in service_data if (len(accounts_for_service(service['ServiceType'])) > 0)]", "docstring": "Get a list of the names of all subscribed music services.\n\nReturns:\nlist: A list of strings.", "source": "codesearchnet"}
{"code": "def sync_l(self, option: str='all') -> None:\n    if (option in ['system', 'vendor', 'oem', 'data', 'all']):\n        self._execute('-s', self.device_sn, 'sync', '-l', option)\n    else:\n        raise ValueError('There is no option named: {!r}.'.format(option))", "docstring": "List but don't copy.\n\nArgs:\noption: 'system', 'vendor', 'oem', 'data', 'all'", "source": "codesearchnet"}
{"code": "def _SigSegvHandler(self, signal_number, stack_frame):\n    \n    self._OnCriticalError()\n\n    \n    if self._original_sigsegv_handler is not None:\n      \n      signal.signal(signal.SIGSEGV, self._original_sigsegv_handler)\n      os.kill(self._pid, signal.SIGSEGV)", "docstring": "Signal handler for the SIGSEGV signal.\n\nArgs:\nsignal_number (int): numeric representation of the signal.\nstack_frame (frame): current stack frame or None.", "source": "juraj-google-style"}
{"code": "def from_maildir(self, codes: str) -> FrozenSet[Flag]:\n    flags = set()\n    for code in codes:\n        if (code == ','):\n            break\n        to_sys = self._to_sys.get(code)\n        if (to_sys is not None):\n            flags.add(to_sys)\n        else:\n            to_kwd = self._to_kwd.get(code)\n            if (to_kwd is not None):\n                flags.add(to_kwd)\n    return frozenset(flags)", "docstring": "Return the set of IMAP flags that correspond to the letter codes.\n\nArgs:\ncodes: The letter codes to map.", "source": "codesearchnet"}
{"code": "def load_api_folder(api_folder_path):\n    api_definition_mapping = {}\n    api_items_mapping = load_folder_content(api_folder_path)\n    for (api_file_path, api_items) in api_items_mapping.items():\n        if isinstance(api_items, list):\n            for api_item in api_items:\n                (key, api_dict) = api_item.popitem()\n                api_id = (api_dict.get('id') or api_dict.get('def') or api_dict.get('name'))\n                if ((key != 'api') or (not api_id)):\n                    raise exceptions.ParamsError('Invalid API defined in {}'.format(api_file_path))\n                if (api_id in api_definition_mapping):\n                    raise exceptions.ParamsError('Duplicated API ({}) defined in {}'.format(api_id, api_file_path))\n                else:\n                    api_definition_mapping[api_id] = api_dict\n        elif isinstance(api_items, dict):\n            if (api_file_path in api_definition_mapping):\n                raise exceptions.ParamsError('Duplicated API defined: {}'.format(api_file_path))\n            else:\n                api_definition_mapping[api_file_path] = api_items\n    return api_definition_mapping", "docstring": "load api definitions from api folder.\n\nArgs:\napi_folder_path (str): api files folder.\n\napi file should be in the following format:\n[\n{\n\"api\": {\n\"def\": \"api_login\",\n\"request\": {},\n\"validate\": []\n}\n},\n{\n\"api\": {\n\"def\": \"api_logout\",\n\"request\": {},\n\"validate\": []\n}\n}\n]\n\nReturns:\ndict: api definition mapping.\n\n{\n\"api_login\": {\n\"function_meta\": {\"func_name\": \"api_login\", \"args\": [], \"kwargs\": {}}\n\"request\": {}\n},\n\"api_logout\": {\n\"function_meta\": {\"func_name\": \"api_logout\", \"args\": [], \"kwargs\": {}}\n\"request\": {}\n}\n}", "source": "codesearchnet"}
{"code": "def prepare_xml_read(data, objectify=False):\n    \n    mod = _objectify if objectify else etree\n    if hasattr(data, 'readlines'):\n        data = mod.parse(data).getroot()\n    elif isinstance(data, list):\n        data = mod.fromstring(''.join(data))\n    elif isinstance(data, basestring):\n        data = mod.parse(open(data)).getroot()\n    else:\n        raise TypeError('Unable to handle data of type %r' % type(data))\n    return data", "docstring": "Prepare various input types for XML parsing.\n\nArgs:\ndata (iter): Data to read\nobjectify (bool): Parse using lxml's objectify data binding\n\nReturns:\netree.ElementTree: Tree suitable for parsing\n\nRaises:\nTypeError: Invalid value for data", "source": "juraj-google-style"}
{"code": "def _cancel_grpc(operations_stub, operation_name):\n    request_pb = operations_pb2.CancelOperationRequest(name=operation_name)\n    operations_stub.CancelOperation(request_pb)", "docstring": "Cancel an operation using a gRPC client.\n\nArgs:\noperations_stub (google.longrunning.operations_pb2.OperationsStub):\nThe gRPC operations stub.\noperation_name (str): The name of the operation.", "source": "codesearchnet"}
{"code": "def depth_april_average_ground_temperature(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `depth_april_average_ground_temperature`'.format(value))\n    self._depth_april_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_april_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_april_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def call(self, input_values: tf.Tensor, attention_mask: tf.Tensor | None=None, token_type_ids: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:\n    output_hidden_states = output_hidden_states if output_hidden_states else self.config.output_hidden_states\n    output_attentions = output_attentions if output_attentions else self.config.output_attentions\n    return_dict = return_dict if return_dict else self.config.return_dict\n    outputs = self.hubert(input_values=input_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import AutoProcessor, TFHubertModel\n>>> from datasets import load_dataset\n>>> import soundfile as sf\n\n>>> processor = AutoProcessor.from_pretrained(\"facebook/hubert-large-ls960-ft\")\n>>> model = TFHubertModel.from_pretrained(\"facebook/hubert-large-ls960-ft\")\n\n\n>>> def map_to_array(batch):\n...     speech, _ = sf.read(batch[\"file\"])\n...     batch[\"speech\"] = speech\n...     return batch\n\n\n>>> ds = load_dataset(\"hf-internal-testing/librispeech_asr_dummy\", \"clean\", split=\"validation\")\n>>> ds = ds.map(map_to_array)\n\n>>> input_values = processor(ds[\"speech\"][0], return_tensors=\"tf\").input_values  # Batch size 1\n>>> hidden_states = model(input_values).last_hidden_state\n```", "source": "github-repos"}
{"code": "def base_type(self, value):\n        \n        if value == self._defaults['baseType'] and 'baseType' in self._values:\n            del self._values['baseType']\n        else:\n            self._values['baseType'] = value", "docstring": "The base_type property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def create_app(config=None, config_obj=None):\n    \n    app = Flask(__name__)\n    \n    configure_app(app, config=config, config_obj=config_obj)\n    \n    register_blueprints(app)\n    \n    bind_extensions(app)\n    return app", "docstring": "Flask app factory function.\n\nArgs:\nconfig (Optional[path]): path to a Python module config file\nconfig_obj (Optional[class]): Python config object", "source": "juraj-google-style"}
{"code": "def __init__(self, return_type, cl_function_name, parameter_list, cl_code_file,\n                 var_replace_dict=None, **kwargs):\n        \n        self._var_replace_dict = var_replace_dict\n\n        with open(os.path.abspath(cl_code_file), 'r') as f:\n            code = f.read()\n\n        if var_replace_dict is not None:\n            code = code % var_replace_dict\n\n        super().__init__(return_type, cl_function_name, parameter_list, code, **kwargs)\n        self._code = code", "docstring": "Create a CL function for a library function.\n\nThese functions are not meant to be optimized, but can be used a helper functions in models.\n\nArgs:\ncl_function_name (str): The name of the CL function\ncl_code_file (str): The location of the code file\nvar_replace_dict (dict): In the cl_code file these replacements will be made\n(using the % format function of Python)", "source": "juraj-google-style"}
{"code": "def mock(self, url=None, **kw):\n    if kw.get('activate'):\n        kw.pop('activate')\n        self.activate()\n    mock = Mock(url=url, **kw)\n    mock._engine = self\n    self.add_mock(mock)\n    return mock", "docstring": "Creates and registers a new HTTP mock in the current engine.\n\nArguments:\nurl (str): request URL to mock.\nactivate (bool): force mock engine activation.\nDefaults to ``False``.\n**kw (mixed): variadic keyword arguments for ``Mock`` constructor.\n\nReturns:\npook.Mock: new mock instance.", "source": "codesearchnet"}
{"code": "def _process_tensor_event(self, event, thresholds):\n    \n    return self._make_pr_entry(\n        event.step,\n        event.wall_time,\n        tensor_util.make_ndarray(event.tensor_proto),\n        thresholds)", "docstring": "Converts a TensorEvent into a dict that encapsulates information on it.\n\nArgs:\nevent: The TensorEvent to convert.\nthresholds: An array of floats that ranges from 0 to 1 (in that\ndirection and inclusive of 0 and 1).\n\nReturns:\nA JSON-able dictionary of PR curve data for 1 step.", "source": "juraj-google-style"}
{"code": "def get_learning_rate(self, iter):\n        \n        lr = self.init_lr\n        for iter_step in self.iter_steps:\n            if iter >= iter_step:\n                lr *= self.gamma\n        return lr", "docstring": "Get learning rate with exponential decay based on current iteration.\n\nArgs:\niter (int): Current iteration (starting with 0).\n\nReturns:\nfloat: Learning rate", "source": "juraj-google-style"}
{"code": "def ctc_batch_cost(y_true, y_pred, input_length, label_length):\n    label_length = math_ops.cast(array_ops.squeeze(label_length, axis=-1), dtypes_module.int32)\n    input_length = math_ops.cast(array_ops.squeeze(input_length, axis=-1), dtypes_module.int32)\n    sparse_labels = math_ops.cast(ctc_label_dense_to_sparse(y_true, label_length), dtypes_module.int32)\n    y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())\n    return array_ops.expand_dims(ctc.ctc_loss(inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)", "docstring": "Runs CTC loss algorithm on each batch element.\n\nArgs:\ny_true: tensor `(samples, max_string_length)`\ncontaining the truth labels.\ny_pred: tensor `(samples, time_steps, num_categories)`\ncontaining the prediction, or output of the softmax.\ninput_length: tensor `(samples, 1)` containing the sequence length for\neach batch item in `y_pred`.\nlabel_length: tensor `(samples, 1)` containing the sequence length for\neach batch item in `y_true`.\n\nReturns:\nTensor with shape (samples,1) containing the\nCTC loss of each element.", "source": "github-repos"}
{"code": "def measure_power(self, hz, duration, tag, offset=30):\n    num = (duration * hz)\n    oset = (offset * hz)\n    data = None\n    self.usb('auto')\n    time.sleep(1)\n    with self.dut.handle_usb_disconnect():\n        time.sleep(1)\n        try:\n            data = self.take_samples(hz, num, sample_offset=oset)\n            if (not data):\n                raise MonsoonError(('No data was collected in measurement %s.' % tag))\n            data.tag = tag\n            self.dut.log.info('Measurement summary: %s', repr(data))\n            return data\n        finally:\n            self.mon.StopDataCollection()\n            self.log.info('Finished taking samples, reconnecting to dut.')\n            self.usb('on')\n            self.dut.adb.wait_for_device(timeout=DEFAULT_TIMEOUT_USB_ON)\n            time.sleep(10)\n            self.dut.log.info('Dut reconnected.')", "docstring": "Measure power consumption of the attached device.\n\nBecause it takes some time for the device to calm down after the usb\nconnection is cut, an offset is set for each measurement. The default\nis 30s. The total time taken to measure will be (duration + offset).\n\nArgs:\nhz: Number of samples to take per second.\nduration: Number of seconds to take samples for in each step.\noffset: The number of seconds of initial data to discard.\ntag: A string that's the name of the collected data group.\n\nReturns:\nA MonsoonData object with the measured power data.", "source": "codesearchnet"}
{"code": "def _Completion(self, match):\n    \n    r\n    \n    word = str(match.group())[2:-2]\n    return '(' + ('(').join(word) + ')?' * len(word)", "docstring": "r\"\"\"Replaces double square brackets with variable length completion.\n\nCompletion cannot be mixed with regexp matching or '\\' characters\ni.e. '[[(\\n)]] would become (\\(n)?)?.'\n\nArgs:\nmatch: A regex Match() object.\n\nReturns:\nString of the format '(a(b(c(d)?)?)?)?'.", "source": "juraj-google-style"}
{"code": "def _evolve_subsystem(self, state, qargs):\n        \n        mat = np.reshape(self.data, self._shape)\n        \n        \n        state_size = len(state)\n        state_dims = self._automatic_dims(None, state_size)\n        if self.input_dims() != len(qargs) * (2, ):\n            raise QiskitError(\n                \"Channel input dimensions are not compatible with state subsystem dimensions.\"\n            )\n        \n        tensor = np.reshape(state, 2 * state_dims)\n        num_inidices = len(state_dims)\n        indices = [num_inidices - 1 - qubit for qubit in qargs\n                   ] + [2 * num_inidices - 1 - qubit for qubit in qargs]\n        tensor = self._einsum_matmul(tensor, mat, indices)\n        return np.reshape(tensor, [state_size, state_size])", "docstring": "Evolve a quantum state by the operator.\n\nArgs:\nstate (QuantumState): The input statevector or density matrix.\nqargs (list): a list of QuantumState subsystem positions to apply\nthe operator on.\n\nReturns:\nQuantumState: the output quantum state.\n\nRaises:\nQiskitError: if the operator dimension does not match the\nspecified QuantumState subsystem dimensions.", "source": "juraj-google-style"}
{"code": "def enable_save_as_bf16(variables: List[tf_variables.Variable]):\n    for v in variables:\n        if isinstance(v, d_variable.DVariable):\n            v.save_as_bf16 = True", "docstring": "Allows float32 DVariables to be checkpointed and restored as bfloat16.\n\nThe method only affects the DVariable part inside the model and leaves\nnon-DTensor Variables/Tensors untouched.\n\nArgs:\nvariables: A list of tf.Variable to be enabled with bfloat16 save/restore.\nOnly has effect on DTensor Variables as they go through d_variables with\nDTensor Specific logis.", "source": "github-repos"}
{"code": "def from_json(cls, data):\n        \n        \n        required_keys = ('solar_model', 'month', 'day_of_month')\n        for key in required_keys:\n            assert key in data, 'Required key \"{}\" is missing!'.format(key)\n\n        if data['solar_model'] == 'ASHRAEClearSky':\n            return OriginalClearSkyCondition.from_json(data)\n        if data['solar_model'] == 'ASHRAETau':\n            return RevisedClearSkyCondition.from_json(data)\n\n        if 'daylight_savings_indicator' not in data:\n            data['daylight_savings_indicator'] = 'No'\n        optional_keys = ('beam_shced', 'diff_sched')\n        for key in optional_keys:\n            if key not in data:\n                data[key] = ''\n\n        return cls(data['month'], data['day_of_month'], data['clearness'],\n                   data['daylight_savings_indicator'],\n                   data['beam_shced'], data['diff_sched'])", "docstring": "Create a Sky Condition from a dictionary.\n\nArgs:\ndata = {\n\"solar_model\": string,\n\"month\": int,\n\"day_of_month\": int,\n\"daylight_savings_indicator\": string // \"Yes\" or \"No\"}", "source": "juraj-google-style"}
{"code": "def WriteUInt160(self, value):\n        \n        if type(value) is UInt160:\n            value.Serialize(self)\n        else:\n            raise Exception(\"value must be UInt160 instance \")", "docstring": "Write a UInt160 type to the stream.\n\nArgs:\nvalue (UInt160):\n\nRaises:\nException: when `value` is not of neocore.UInt160 type.", "source": "juraj-google-style"}
{"code": "def _truncate_float(matchobj, format_str='0.2g'):\n    \n    if matchobj.group(0):\n        return format(float(matchobj.group(0)), format_str)\n    return ''", "docstring": "Truncate long floats\n\nArgs:\nmatchobj (re.Match): contains original float\nformat_str (str): format specifier\nReturns:\nstr: returns truncated float", "source": "juraj-google-style"}
{"code": "def block(self,\n              cutoffs=None,\n              values=None,\n              n_bins=0,\n              right=False,\n              function=None):\n        \n        \n        params = self.__dict__.copy()\n\n        if (values is not None) and (cutoffs is None):\n            cutoffs = values[1:]\n\n        if (cutoffs is None) and (n_bins == 0):\n            cutoffs = np.mean(self)\n\n        if (n_bins != 0) and (cutoffs is None):\n            mi, ma = np.amin(self), np.amax(self)\n            cutoffs = np.linspace(mi, ma, n_bins+1)\n            cutoffs = cutoffs[:-1]\n\n        try:  \n            data = np.digitize(self, cutoffs, right)\n        except ValueError:  \n            data = np.digitize(self, [cutoffs], right)\n\n        if (function is None) and (values is None):\n            return Curve(data, params=params)\n\n        data = data.astype(float)\n\n        \n        f = function or utils.null\n\n        \n        tops, vals = utils.find_edges(data)\n\n        \n        \n        \n        \n\n        if values is None:\n            \n            for top, base in zip(tops[:-1], tops[1:]):\n                data[top:base] = f(np.copy(self[top:base]))\n            data[base:] = f(np.copy(self[base:]))  \n        else:\n            for top, base, val in zip(tops[:-1], tops[1:], vals[:-1]):\n                data[top:base] = values[int(val)]\n            data[base:] = values[int(vals[-1])]  \n\n        return Curve(data, params=params)", "docstring": "Block a log based on number of bins, or on cutoffs.\n\nArgs:\ncutoffs (array)\nvalues (array): the values to map to. Defaults to [0, 1, 2,...]\nn_bins (int)\nright (bool)\nfunction (function): transform the log if you want.\n\nReturns:\nCurve.", "source": "juraj-google-style"}
{"code": "def parse(lines, root=None):\n    \n    doc = {}\n    entries = []\n    name = None\n    total = None\n    for line in lines:\n        line = line.strip()\n        if not line:\n            continue\n        if line and line[0] == \"/\" and line[-1] == \":\":\n            if name is None:\n                name = line[:-1]\n                if entries:\n                    d = Directory(name, total or len(entries), entries)\n                    doc[root] = d\n                    total = None\n                    entries = []\n            else:\n                d = Directory(name, total or len(entries), entries)\n                doc[name or root] = d\n                total = None\n                entries = []\n                name = line[:-1]\n            continue\n        if line.startswith(\"total\"):\n            total = int(line.split(None, 1)[1])\n            continue\n        entries.append(line)\n    name = name or root\n    doc[name] = Directory(name, total or len(entries), entries)\n    return doc", "docstring": "Parses a list of lines from ls into dictionaries representing their\ncomponents.\n\nArgs:\nlines (list): A list of lines generated by ls.\nroot (str): The directory name to be used for ls output stanzas that\ndon't have a name.\n\nReturns:\nA dictionary representing the ls output. It's keyed by the path\ncontaining each ls stanza.", "source": "juraj-google-style"}
{"code": "def which_with_envpath(executable: str, env: Dict[str, str]) -> str:\n    \n    oldpath = os.environ.get(\"PATH\", \"\")\n    os.environ[\"PATH\"] = env.get(\"PATH\")\n    which = shutil.which(executable)\n    os.environ[\"PATH\"] = oldpath\n    return which", "docstring": "Performs a :func:`shutil.which` command using the PATH from the specified\nenvironment.\n\nReason: when you use ``run([executable, ...], env)`` and therefore\n``subprocess.run([executable, ...], env=env)``, the PATH that's searched\nfor ``executable`` is the parent's, not the new child's -- so you have to\nfind the executable manually.\n\nArgs:\nexecutable: executable to find\nenv: environment to fetch the PATH variable from", "source": "juraj-google-style"}
{"code": "def get_metric_parsers(metric_packages=tuple(), include_defaults=True):\n    \n    metric_parsers = set()\n\n    if include_defaults:\n        import git_code_debt.metrics\n        metric_parsers.update(discover(git_code_debt.metrics, is_metric_cls))\n\n    for metric_package in metric_packages:\n        metric_parsers.update(discover(metric_package, is_metric_cls))\n    return metric_parsers", "docstring": "Gets all of the metric parsers.\n\nArgs:\nmetric_packages - Defaults to no extra packages. An iterable of\nmetric containing packages.  A metric inherits DiffParserBase\nand does not have __metric__ = False\nA metric package must be imported using import a.b.c\ninclude_defaults - Whether to include the generic metric parsers", "source": "juraj-google-style"}
{"code": "def __init__(self, cbFun, cbCtx=None):\n        \n        self._cbFun = cbFun\n        self._cbCtx = cbCtx", "docstring": "Create an instance of *CallbackReader* bound to specific URL.\n\nArgs:\ncbFun (callable): user callable accepting *MIB name* and *cbCtx* objects\n\nKeyword Args:\ncbCtx (object): user object that can be used to communicate state information\nbetween user-scope code and the *cbFun* callable scope", "source": "juraj-google-style"}
{"code": "def add_edge(self, a, b):\n        \n        neighbors_of_a = self.adjacency_lists.get(a)\n        if not neighbors_of_a:\n            neighbors_of_a = set()\n            self.adjacency_lists[a] = neighbors_of_a\n\n        neighbors_of_a.add(b)\n\n        neighbors_of_b = self.adjacency_lists.get(b)\n        if not neighbors_of_b:\n            neighbors_of_b = set()\n            self.adjacency_lists[b] = neighbors_of_b\n\n        neighbors_of_b.add(a)", "docstring": "Used to add edges to the graph. 'a' and 'b' are vertexes and\nif 'a' or 'b' doesn't exisit then the vertex is created\n\nArgs:\na (hash): is one vertex of the edge\nb (hash): is another vertext of the edge", "source": "juraj-google-style"}
{"code": "def minute(self, value=None):\n        \n        if value is not None:\n            try:\n                value = int(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type int '\n                                 'for field `minute`'.format(value))\n            if value < 0:\n                raise ValueError('value need to be greater or equal 0 '\n                                 'for field `minute`')\n            if value > 60:\n                raise ValueError('value need to be smaller 60 '\n                                 'for field `minute`')\n\n        self._minute = value", "docstring": "Corresponds to IDD Field `minute`\n\nArgs:\nvalue (int): value for IDD Field `minute`\nvalue >= 0\nvalue <= 60\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def on_test_batch_end(self, batch, logs=None):\n    if self._should_call_test_batch_hooks:\n        self._call_batch_hook(ModeKeys.TEST, 'end', batch, logs=logs)", "docstring": "Calls the `on_test_batch_end` methods of its callbacks.\n\nArgs:\nbatch: Integer, index of batch within the current epoch.\nlogs: Dict. Aggregated metric results up until this batch.", "source": "github-repos"}
{"code": "def ListAssets(logdir, plugin_name):\n  \n  plugin_dir = PluginDirectory(logdir, plugin_name)\n  try:\n    \n    return [x.rstrip('/') for x in tf.io.gfile.listdir(plugin_dir)]\n  except tf.errors.NotFoundError:\n    return []", "docstring": "List all the assets that are available for given plugin in a logdir.\n\nArgs:\nlogdir: A directory that was created by a TensorFlow summary.FileWriter.\nplugin_name: A string name of a plugin to list assets for.\n\nReturns:\nA string list of available plugin assets. If the plugin subdirectory does\nnot exist (either because the logdir doesn't exist, or because the plugin\ndidn't register) an empty list is returned.", "source": "juraj-google-style"}
{"code": "def _average_precision(self, rec, prec):\n    ap = 0.0\n    for t in np.arange(0.0, 1.1, 0.1):\n        if (np.sum((rec >= t)) == 0):\n            p = 0\n        else:\n            p = np.max(prec[(rec >= t)])\n        ap += (p / 11.0)\n    return ap", "docstring": "calculate average precision, override the default one,\nspecial 11-point metric\n\nParams:\n----------\nrec : numpy.array\ncumulated recall\nprec : numpy.array\ncumulated precision\nReturns:\n----------\nap as float", "source": "codesearchnet"}
{"code": "def inspect_repo(self, repo_name):\n        \n        req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))\n        res = self.stub.InspectRepo(req, metadata=self.metadata)\n        return res", "docstring": "Returns info about a specific Repo.\n\nParams:\n* repo_name: Name of the repo.", "source": "juraj-google-style"}
{"code": "def step(self, actions):\n    if (self._store_rollouts and self._rollouts_by_epoch_and_split[self.current_epoch]):\n        raise ValueError('Data for current epoch has already been loaded from disk.')\n    (obs, unclipped_rewards, dones) = self._step(actions)\n    obs = self._preprocess_observations(obs)\n    (min_reward, max_reward) = self.reward_range\n    rewards = np.around(np.clip(unclipped_rewards, min_reward, max_reward))\n    if self._store_rollouts:\n        unclipped_rewards = unclipped_rewards.astype(np.float64)\n        encoded_obs = self._encode_observations(obs)\n        for (rollout, frame, action) in zip(self._current_batch_rollouts, self._current_batch_frames, actions):\n            rollout.append(frame._replace(action=action))\n        self._current_batch_frames = [Frame(*orud, action=None) for orud in zip(encoded_obs, rewards, unclipped_rewards, dones)]\n    return (obs, rewards, dones)", "docstring": "Makes a step in all environments.\n\nDoes any preprocessing and records frames.\n\nArgs:\nactions: Batch of actions.\n\nReturns:\n(obs, rewards, dones) - batches of observations, rewards and done flags\nrespectively.\n\nRaises:\nValueError: when the data for current epoch has already been loaded.", "source": "codesearchnet"}
{"code": "def __parse(self, function_meta):\n        \n        self._func = get_mapping_function(\n            function_meta[\"func_name\"],\n            self.functions_mapping\n        )\n        self.func_name = self._func.__name__\n        self._args = prepare_lazy_data(\n            function_meta.get(\"args\", []),\n            self.functions_mapping,\n            self.check_variables_set\n        )\n        self._kwargs = prepare_lazy_data(\n            function_meta.get(\"kwargs\", {}),\n            self.functions_mapping,\n            self.check_variables_set\n        )\n\n        if self.func_name == \"load_csv_file\":\n            if len(self._args) != 1 or self._kwargs:\n                raise exceptions.ParamsError(\"P() should only pass in one argument!\")\n            self._args = [self._args[0]]\n        elif self.func_name == \"get_os_environ\":\n            if len(self._args) != 1 or self._kwargs:\n                raise exceptions.ParamsError(\"ENV() should only pass in one argument!\")\n            self._args = [self._args[0]]", "docstring": "init func as lazy functon instance\n\nArgs:\nfunction_meta (dict): function meta including name, args and kwargs", "source": "juraj-google-style"}
{"code": "def show(self, xlim=None, ylim=None, units=\"thz\"):\n        \n        plt = self.get_plot(xlim, ylim, units=units)\n        plt.show()", "docstring": "Show the plot using matplotlib.\n\nArgs:\nxlim: Specifies the x-axis limits. Set to None for automatic\ndetermination.\nylim: Specifies the y-axis limits.\nunits: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.", "source": "juraj-google-style"}
{"code": "def add_parents(self, parents):\n        \n\n        self._parents += [p for p in parents if p not in self._parents]", "docstring": "Adds new parent nodes after filtering for duplicates\n\nArgs:\nparents (list): list of OmniTree nodes to add as parents", "source": "juraj-google-style"}
{"code": "def convert_result(r):\n    \n    if (isinstance(r, collections.Sequence) and\n            not isinstance(r, string_types)):\n        rs = []\n        for subresult in r:\n            rs.append(convert_result(subresult))\n        return rs\n    if isinstance(r, ipyparallel.AsyncResult):\n        r = r.r\n    if isinstance(r, Ref):\n        RemoteClass = distob.engine.proxy_types[r.type]\n        r = RemoteClass(r)\n    return r", "docstring": "Waits for and converts any AsyncResults. Converts any Ref into a Remote.\nArgs:\nr: can be an ordinary object, ipyparallel.AsyncResult, a Ref, or a\nSequence of objects, AsyncResults and Refs.\nReturns:\neither an ordinary object or a Remote instance", "source": "juraj-google-style"}
{"code": "def global_horizontal_illuminance(self, value=999999.0):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `global_horizontal_illuminance`'.format(value))\n            if value < 0.0:\n                raise ValueError('value need to be greater or equal 0.0 '\n                                 'for field `global_horizontal_illuminance`')\n\n        self._global_horizontal_illuminance = value", "docstring": "Corresponds to IDD Field `global_horizontal_illuminance`\nwill be missing if >= 999900\n\nArgs:\nvalue (float): value for IDD Field `global_horizontal_illuminance`\nUnit: lux\nvalue >= 0.0\nMissing value: 999999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def get_config_bool_option(parser: ConfigParser,\n                           section: str,\n                           option: str,\n                           default: bool = None) -> bool:\n    \n    if not parser.has_section(section):\n        raise ValueError(\"config missing section: \" + section)\n    return parser.getboolean(section, option, fallback=default)", "docstring": "Retrieves a boolean value from a parser.\n\nArgs:\nparser: instance of :class:`ConfigParser`\nsection: section name within config file\noption: option (variable) name within that section\ndefault: value to return if option is absent\n\nReturns:\nstring value\n\nRaises:\nValueError: if the section is absent", "source": "juraj-google-style"}
{"code": "def dark(app):\n    _apply_base_theme(app)\n    darkPalette = QPalette()\n    darkPalette.setColor(QPalette.WindowText, QColor(180, 180, 180))\n    darkPalette.setColor(QPalette.Button, QColor(53, 53, 53))\n    darkPalette.setColor(QPalette.Light, QColor(180, 180, 180))\n    darkPalette.setColor(QPalette.Midlight, QColor(90, 90, 90))\n    darkPalette.setColor(QPalette.Dark, QColor(35, 35, 35))\n    darkPalette.setColor(QPalette.Text, QColor(180, 180, 180))\n    darkPalette.setColor(QPalette.BrightText, QColor(180, 180, 180))\n    darkPalette.setColor(QPalette.ButtonText, QColor(180, 180, 180))\n    darkPalette.setColor(QPalette.Base, QColor(42, 42, 42))\n    darkPalette.setColor(QPalette.Window, QColor(53, 53, 53))\n    darkPalette.setColor(QPalette.Shadow, QColor(20, 20, 20))\n    darkPalette.setColor(QPalette.Highlight, QColor(42, 130, 218))\n    darkPalette.setColor(QPalette.HighlightedText, QColor(180, 180, 180))\n    darkPalette.setColor(QPalette.Link, QColor(56, 252, 196))\n    darkPalette.setColor(QPalette.AlternateBase, QColor(66, 66, 66))\n    darkPalette.setColor(QPalette.ToolTipBase, QColor(53, 53, 53))\n    darkPalette.setColor(QPalette.ToolTipText, QColor(180, 180, 180))\n    darkPalette.setColor(QPalette.Disabled, QPalette.WindowText, QColor(127, 127, 127))\n    darkPalette.setColor(QPalette.Disabled, QPalette.Text, QColor(127, 127, 127))\n    darkPalette.setColor(QPalette.Disabled, QPalette.ButtonText, QColor(127, 127, 127))\n    darkPalette.setColor(QPalette.Disabled, QPalette.Highlight, QColor(80, 80, 80))\n    darkPalette.setColor(QPalette.Disabled, QPalette.HighlightedText, QColor(127, 127, 127))\n    app.setPalette(darkPalette)", "docstring": "Apply Dark Theme to the Qt application instance.\n\nArgs:\napp (QApplication): QApplication instance.", "source": "codesearchnet"}
{"code": "def _begin_operation_action(self, action):\n    conn_key = action.data['id']\n    callback = action.data['callback']\n    if (self._get_connection_state(conn_key) != self.Idle):\n        callback(conn_key, self.id, False, 'Cannot start operation, connection is not idle')\n        return\n    data = self._get_connection(conn_key)\n    data['state'] = self.InProgress\n    data['microstate'] = action.data['operation_name']\n    data['action'] = action", "docstring": "Begin an attempted operation.\n\nArgs:\naction (ConnectionAction): the action object describing what we are\noperating on", "source": "codesearchnet"}
{"code": "def add(self, origin):\n    digest = self._calc_digest(origin)\n    if self.exists(digest):\n        self.logger.debug('Added File: [{0}] ( Already exists. Skipping transfer)'.format(digest))\n        return digest\n    absPath = self.get_file_path(digest)\n    absFolderPath = os.path.dirname(absPath)\n    self._makedirs(absFolderPath)\n    self._copy_content(origin, absPath)\n    self.logger.debug('Added file: \"{0}\" [{1}]'.format(digest, absPath))\n    return digest", "docstring": "Add new element to fsdb.\n\nArgs:\norigin -- could be the path of a file or a readable/seekable object ( fileobject, stream, stringIO...)\nReturns:\nString rapresenting the digest of the file", "source": "codesearchnet"}
{"code": "def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple[int, int]]]=None):\n    class_queries_logits = outputs.logits\n    masks_queries_logits = outputs.pred_masks\n    masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]\n    masks_probs = masks_queries_logits.sigmoid()\n    segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs)\n    batch_size = class_queries_logits.shape[0]\n    if target_sizes is not None:\n        if batch_size != len(target_sizes):\n            raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n        semantic_segmentation = []\n        for idx in range(batch_size):\n            resized_logits = nn.functional.interpolate(segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)\n            semantic_map = resized_logits[0].argmax(dim=0)\n            semantic_segmentation.append(semantic_map)\n    else:\n        semantic_segmentation = segmentation.argmax(dim=1)\n        semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]\n    return semantic_segmentation", "docstring": "Converts the output of [`ConditionalDetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch.\n\nArgs:\noutputs ([`ConditionalDetrForSegmentation`]):\nRaw outputs of the model.\ntarget_sizes (`List[Tuple[int, int]]`, *optional*):\nA list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the\nbatch. If unset, predictions will not be resized.\nReturns:\n`List[torch.Tensor]`:\nA list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)\ncorresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each\n`torch.Tensor` correspond to a semantic class id.", "source": "github-repos"}
{"code": "def start_server(port):\n    _pywrap_profiler.start_server(port)", "docstring": "Start a profiler grpc server that listens to given port.\n\nThe profiler server will exit when the process finishes. The service is\ndefined in tensorflow/core/profiler/profiler_service.proto.\n\nArgs:\nport: port profiler server listens to.\nExample usage: ```python tf.profiler.experimental.server.start(6009) # do\nyour training here.", "source": "github-repos"}
{"code": "def detect_intent_knowledge(project_id, session_id, language_code, knowledge_base_id, texts):\n    import dialogflow_v2beta1 as dialogflow\n    session_client = dialogflow.SessionsClient()\n    session_path = session_client.session_path(project_id, session_id)\n    print('Session path: {}\\n'.format(session_path))\n    for text in texts:\n        text_input = dialogflow.types.TextInput(text=text, language_code=language_code)\n        query_input = dialogflow.types.QueryInput(text=text_input)\n        knowledge_base_path = dialogflow.knowledge_bases_client.KnowledgeBasesClient.knowledge_base_path(project_id, knowledge_base_id)\n        query_params = dialogflow.types.QueryParameters(knowledge_base_names=[knowledge_base_path])\n        response = session_client.detect_intent(session=session_path, query_input=query_input, query_params=query_params)\n        print(('=' * 20))\n        print('Query text: {}'.format(response.query_result.query_text))\n        print('Detected intent: {} (confidence: {})\\n'.format(response.query_result.intent.display_name, response.query_result.intent_detection_confidence))\n        print('Fulfillment text: {}\\n'.format(response.query_result.fulfillment_text))\n        print('Knowledge results:')\n        knowledge_answers = response.query_result.knowledge_answers\n        for answers in knowledge_answers.answers:\n            print(' - Answer: {}'.format(answers.answer))\n            print(' - Confidence: {}'.format(answers.match_confidence))", "docstring": "Returns the result of detect intent with querying Knowledge Connector.\n\nArgs:\nproject_id: The GCP project linked with the agent you are going to query.\nsession_id: Id of the session, using the same `session_id` between requests\nallows continuation of the conversation.\nlanguage_code: Language of the queries.\nknowledge_base_id: The Knowledge base's id to query against.\ntexts: A list of text queries to send.", "source": "codesearchnet"}
{"code": "def VisitFunction(self, f):\n    groups = self._GroupByArguments(f.signatures)\n    new_signatures = []\n    for stripped_signature, ret_exc in groups.items():\n        ret = pytd_utils.JoinTypes(ret_exc.return_types)\n        exc = tuple(ret_exc.exceptions)\n        new_signatures.append(stripped_signature.Replace(return_type=ret, exceptions=exc))\n    return f.Replace(signatures=tuple(new_signatures))", "docstring": "Merge signatures of a function.\n\nThis groups signatures by arguments and then for each group creates a\nsingle signature that joins the return values / exceptions using \"or\".\n\nArguments:\nf: A pytd.Function instance\n\nReturns:\nFunction with simplified / combined signatures.", "source": "github-repos"}
{"code": "def _ResizeNearestNeighborGrad(op: ops.Operation, grad):\n    image = op.inputs[0]\n    if image.get_shape()[1:3].is_fully_defined():\n        image_shape = image.get_shape()[1:3]\n    else:\n        image_shape = array_ops.shape(image)[1:3]\n    grads = gen_image_ops.resize_nearest_neighbor_grad(grad, image_shape, align_corners=op.get_attr('align_corners'), half_pixel_centers=op.get_attr('half_pixel_centers'))\n    return [grads, None]", "docstring": "The derivatives for nearest neighbor resizing.\n\nArgs:\nop: The ResizeNearestNeighbor op.\ngrad: The tensor representing the gradient w.r.t. the output.\n\nReturns:\nThe gradients w.r.t. the input and the output.", "source": "github-repos"}
{"code": "def save(self, sess, save_path, timestep=None):\n    if (self._saver is None):\n        raise TensorForceError('register_saver_ops should be called before save')\n    return self._saver.save(sess=sess, save_path=save_path, global_step=timestep, write_meta_graph=False, write_state=True)", "docstring": "Saves this component's managed variables.\n\nArgs:\nsess: The session for which to save the managed variables.\nsave_path: The path to save data to.\ntimestep: Optional, the timestep to append to the file name.\n\nReturns:\nCheckpoint path where the model was saved.", "source": "codesearchnet"}
{"code": "def snake_case_to_headless_camel_case(snake_string):\n    return ''.join(([snake_string.split('_')[0]] + list((sub_string.capitalize() for sub_string in snake_string.split('_')[1:]))))", "docstring": "Convert snake_case to headlessCamelCase.\n\nArgs:\nsnake_string: The string to be converted.\nReturns:\nThe input string converted to headlessCamelCase.", "source": "codesearchnet"}
{"code": "def encrypt_encoded(self, encoding, r_value):\n        \n        \n        obfuscator = r_value or 1\n        ciphertext = self.raw_encrypt(encoding.encoding, r_value=obfuscator)\n        encrypted_number = EncryptedNumber(self, ciphertext, encoding.exponent)\n        if r_value is None:\n            encrypted_number.obfuscate()\n        return encrypted_number", "docstring": "Paillier encrypt an encoded value.\n\nArgs:\nencoding: The EncodedNumber instance.\nr_value (int): obfuscator for the ciphertext; by default (i.e.\nif *r_value* is None), a random value is used.\n\nReturns:\nEncryptedNumber: An encryption of *value*.", "source": "juraj-google-style"}
{"code": "def _make_sent_vector(self, sent: List, bucket_length: int=None) -> np.ndarray:\n    bucket_length = (bucket_length or len(sent))\n    answer = np.zeros(shape=(bucket_length, (MAX_WORD_LENGTH + 2)), dtype=np.int32)\n    for (i, word) in enumerate(sent):\n        answer[(i, 0)] = self.tags.tok2idx('BEGIN')\n        m = min(len(word), MAX_WORD_LENGTH)\n        for (j, x) in enumerate(word[(- m):]):\n            answer[(i, (j + 1))] = self.symbols.tok2idx(x)\n        answer[(i, (m + 1))] = self.tags.tok2idx('END')\n        answer[(i, (m + 2):)] = self.tags.tok2idx('PAD')\n    return answer", "docstring": "Transforms a sentence to Numpy array, which will be the network input.\n\nArgs:\nsent: input sentence\nbucket_length: the width of the bucket\n\nReturns:\nA 3d array, answer[i][j][k] contains the index of k-th letter\nin j-th word of i-th input sentence.", "source": "codesearchnet"}
{"code": "def _GenerateNonImplementedMethod(self, method):\n    return (lambda inst, rpc_controller, request, callback: self._NonImplementedMethod(method.name, rpc_controller, callback))", "docstring": "Generates and returns a method that can be set for a service methods.\n\nArgs:\nmethod: Descriptor of the service method for which a method is to be\ngenerated.\n\nReturns:\nA method that can be added to the service class.", "source": "codesearchnet"}
{"code": "def update(self, rec=None, drop=None, tables=None, install=None, materialize=None,\n               indexes=None, joins=0, views=0):\n        \n        if not drop:\n            drop = []\n\n        if not tables:\n            tables = set()\n\n        if not install:\n            install = set()\n\n        if not materialize:\n            materialize = set()\n\n        if not indexes:\n            indexes = set()\n\n        if rec:\n            self.update(\n                drop=rec.drop, tables=rec.tables, install=rec.install, materialize=rec.materialize,\n                indexes=rec.indexes, joins=rec.joins\n            )\n\n        self.drop += drop\n        self.tables |= set(tables)\n        self.install |= set(install)\n        self.materialize |= set(materialize)\n        self.indexes |= set(indexes)\n\n        self.joins += joins\n        self.views += views\n\n        \n        if self.joins > 0 or self.views > 0:\n            self.materialize |= self.install\n            self.install = set()", "docstring": "Updates current record.\n\nArgs:\nrec (FIMRecord):", "source": "juraj-google-style"}
{"code": "def collapse_addresses(addresses):\n    \n    addrs = []\n    ips = []\n    nets = []\n\n    \n    for ip in addresses:\n        if isinstance(ip, _BaseAddress):\n            if ips and ips[-1]._version != ip._version:\n                raise TypeError(\"%s and %s are not of the same version\" % (\n                                ip, ips[-1]))\n            ips.append(ip)\n        elif ip._prefixlen == ip._max_prefixlen:\n            if ips and ips[-1]._version != ip._version:\n                raise TypeError(\"%s and %s are not of the same version\" % (\n                                ip, ips[-1]))\n            try:\n                ips.append(ip.ip)\n            except AttributeError:\n                ips.append(ip.network_address)\n        else:\n            if nets and nets[-1]._version != ip._version:\n                raise TypeError(\"%s and %s are not of the same version\" % (\n                                ip, nets[-1]))\n            nets.append(ip)\n\n    \n    ips = sorted(set(ips))\n\n    \n    if ips:\n        for first, last in _find_address_range(ips):\n            addrs.extend(summarize_address_range(first, last))\n\n    return _collapse_addresses_internal(addrs + nets)", "docstring": "Collapse a list of IP objects.\n\nExample:\ncollapse_addresses([IPv4Network('192.0.2.0/25'),\nIPv4Network('192.0.2.128/25')]) ->\n[IPv4Network('192.0.2.0/24')]\n\nArgs:\naddresses: An iterator of IPv4Network or IPv6Network objects.\n\nReturns:\nAn iterator of the collapsed IPv(4|6)Network objects.\n\nRaises:\nTypeError: If passed a list of mixed version objects.", "source": "juraj-google-style"}
{"code": "def unenroll_user_from_course(self, username, course_id):\n        \n        enrollment = self.get_course_enrollment(username, course_id)\n        if enrollment and enrollment['is_active']:\n            response = self.client.enrollment.post({\n                'user': username,\n                'course_details': {'course_id': course_id},\n                'is_active': False,\n                'mode': enrollment['mode']\n            })\n            return not response['is_active']\n\n        return False", "docstring": "Call the enrollment API to unenroll the user in the course specified by course_id.\nArgs:\nusername (str): The username by which the user goes on the OpenEdx platform\ncourse_id (str): The string value of the course's unique identifier\nReturns:\nbool: Whether the unenrollment succeeded", "source": "juraj-google-style"}
{"code": "def format_and_is_storage(path):\n    \n    if not hasattr(path, 'read'):\n        path = fsdecode(path).replace('\\\\', '/')\n        return path, is_storage(path)\n    return path, True", "docstring": "Checks if path is storage and format it.\n\nIf path is an opened file-like object, returns is storage as True.\n\nArgs:\npath (path-like object or file-like object):\n\nReturns:\ntuple: str or file-like object (Updated path),\nbool (True if is storage).", "source": "juraj-google-style"}
{"code": "def _Dhcpcd(self, interfaces, logger):\n    for interface in interfaces:\n        dhcpcd = ['/sbin/dhcpcd']\n        try:\n            subprocess.check_call((dhcpcd + ['-x', interface]))\n        except subprocess.CalledProcessError:\n            logger.info('Dhcpcd not yet running for interface %s.', interface)\n        try:\n            subprocess.check_call((dhcpcd + [interface]))\n        except subprocess.CalledProcessError:\n            logger.warning('Could not activate interface %s.', interface)", "docstring": "Use dhcpcd to activate the interfaces.\n\nArgs:\ninterfaces: list of string, the output device names to enable.\nlogger: logger object, used to write to SysLog and serial port.", "source": "codesearchnet"}
{"code": "def setValues(self, values):\n    ncols = self.getNumCols()\n    nindices = self.getNumIndices()\n    for (key, value) in values.items():\n        key = Utils.convToList(key)\n        assert (len(key) == nindices)\n        value = Utils.convToList(value)\n        assert (len(value) == (ncols - nindices))\n        self.addRow((key + value))", "docstring": "Set the values of a DataFrame from a dictionary.\n\nArgs:\nvalues: Dictionary with the values to set.", "source": "codesearchnet"}
{"code": "def find_newline(self, size=(- 1)):\n    if (size < 0):\n        return self._buffer.find('\\n', self._offset)\n    return self._buffer.find('\\n', self._offset, (self._offset + size))", "docstring": "Search for newline char in buffer starting from current offset.\n\nArgs:\nsize: number of bytes to search. -1 means all.\n\nReturns:\noffset of newline char in buffer. -1 if doesn't exist.", "source": "codesearchnet"}
{"code": "def parse_received(received):\n    values_by_clause = {}\n    for pattern in RECEIVED_COMPILED_LIST:\n        matches = [match for match in pattern.finditer(received)]\n        if (len(matches) == 0):\n            log.debug(('No matches found for %s in %s' % (pattern.pattern, received)))\n            continue\n        elif (len(matches) > 1):\n            msg = ('More than one match found for %s in %s' % (pattern.pattern, received))\n            log.error(msg)\n            raise MailParserReceivedParsingError(msg)\n        else:\n            log.debug(('Found one match for %s in %s' % (pattern.pattern, received)))\n            match = matches[0].groupdict()\n            if six.PY2:\n                values_by_clause[match.keys()[0]] = match.values()[0]\n            elif six.PY3:\n                key = list(match.keys())[0]\n                value = list(match.values())[0]\n                values_by_clause[key] = value\n    if (len(values_by_clause) == 0):\n        msg = ('Unable to match any clauses in %s' % received)\n        log.error(msg)\n        raise MailParserReceivedParsingError(msg)\n    return values_by_clause", "docstring": "Parse a single received header.\nReturn a dictionary of values by clause.\n\nArguments:\nreceived {str} -- single received header\n\nRaises:\nMailParserReceivedParsingError -- Raised when a\nreceived header cannot be parsed\n\nReturns:\ndict -- values by clause", "source": "codesearchnet"}
{"code": "def validate_definition(self, definition_name, dict_to_test, definition=None):\n        \n        if (definition_name not in self.specification['definitions'].keys() and\n                definition is None):\n            \n            return False\n\n        \n        spec_def = definition or self.specification['definitions'][definition_name]\n        all_required_keys_present = all(req in dict_to_test.keys() for req in spec_def.get('required', {}))\n        if 'required' in spec_def and not all_required_keys_present:\n            return False\n\n        \n        properties_dict = spec_def.get('properties', {})\n        for key, value in dict_to_test.items():\n            if value is not None:\n                if key not in properties_dict:  \n                    return False\n                else:  \n                    if not self._validate_type(properties_dict[key], value):\n                        return False\n\n        return True", "docstring": "Validate the given dict according to the given definition.\n\nArgs:\ndefinition_name: name of the the definition.\ndict_to_test: dict to test.\n\nReturns:\nTrue if the given dict match the definition, False otherwise.", "source": "juraj-google-style"}
{"code": "def set_hyperparameters(self, hyperparameters):\n    self._hyperparameters.update(hyperparameters)\n    if self._class:\n        LOGGER.debug('Creating a new primitive instance for %s', self.name)\n        self.instance = self.primitive(**self._hyperparameters)", "docstring": "Set new hyperparameters.\n\nOnly the specified hyperparameters are modified, so any other\nhyperparameter keeps the value that had been previously given.\n\nIf necessary, a new instance of the primitive is created.\n\nArgs:\nhyperparameters (dict): Dictionary containing as keys the name\nof the hyperparameters and as values\nthe values to be used.", "source": "codesearchnet"}
{"code": "def zip_ll_row(params, data_row):\n    \n    l = params[0]\n    pi = params[1]\n    d0 = (data_row==0)\n    likelihood = d0*pi + (1-pi)*poisson.pmf(data_row, l)\n    return -np.log(likelihood+eps).sum()", "docstring": "Returns the negative log-likelihood of a row given ZIP data.\n\nArgs:\nparams (list): [lambda zero-inf]\ndata_row (array): 1d array\n\nReturns:\nnegative log-likelihood", "source": "juraj-google-style"}
{"code": "def load(self, response):\n        \n        self._response = response\n\n        if self.next_location(raw=True):\n            self._num_redirects += 1", "docstring": "Load the response and increment the counter.\n\nArgs:\nresponse (:class:`.http.request.Response`): The response from\na previous request.", "source": "juraj-google-style"}
{"code": "def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1):\n    if data_format is None:\n        data_format = image_data_format()\n    if data_format not in {'channels_first', 'channels_last'}:\n        raise ValueError('Unknown data_format: ' + str(data_format))\n    if isinstance(strides, int):\n        strides = (strides,)\n    if isinstance(dilation_rate, int):\n        dilation_rate = (dilation_rate,)\n    x, tf_data_format = _preprocess_conv1d_input(x, data_format)\n    padding = _preprocess_padding(padding)\n    if not isinstance(strides, tuple):\n        strides = tuple(strides)\n    if tf_data_format == 'NWC':\n        spatial_start_dim = 1\n        strides = (1,) + strides * 2 + (1,)\n    else:\n        spatial_start_dim = 2\n        strides = (1, 1) + strides * 2\n    x = array_ops.expand_dims(x, spatial_start_dim)\n    depthwise_kernel = array_ops.expand_dims(depthwise_kernel, 0)\n    pointwise_kernel = array_ops.expand_dims(pointwise_kernel, 0)\n    dilation_rate = (1,) + dilation_rate\n    x = nn.separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=strides, padding=padding, rate=dilation_rate, data_format=tf_data_format)\n    x = array_ops.squeeze(x, [spatial_start_dim])\n    if data_format == 'channels_first' and tf_data_format == 'NWC':\n        x = array_ops.transpose(x, (0, 2, 1))\n    return x", "docstring": "1D convolution with separable filters.\n\nArgs:\nx: input tensor\ndepthwise_kernel: convolution kernel for the depthwise convolution.\npointwise_kernel: kernel for the 1x1 convolution.\nstrides: stride integer.\npadding: string, `\"same\"` or `\"valid\"`.\ndata_format: string, `\"channels_last\"` or `\"channels_first\"`.\ndilation_rate: integer dilation rate.\n\nReturns:\nOutput tensor.\n\nRaises:\nValueError: if `data_format` is neither `channels_last` or\n`channels_first`.", "source": "github-repos"}
{"code": "def get_rng(obj=None):\n    \n    seed = (id(obj) + os.getpid() +\n            int(datetime.now().strftime(\"%Y%m%d%H%M%S%f\"))) % 4294967295\n    if _RNG_SEED is not None:\n        seed = _RNG_SEED\n    return np.random.RandomState(seed)", "docstring": "Get a good RNG seeded with time, pid and the object.\n\nArgs:\nobj: some object to use to generate random seed.\nReturns:\nnp.random.RandomState: the RNG.", "source": "juraj-google-style"}
{"code": "def add_headers(vcf_obj, nr_cases=None, sv=False):\n    \n\n    vcf_obj.add_info_to_header(\n        {\n            'ID':\"Obs\",\n            'Number': '1',\n            'Type': 'Integer',\n            'Description': \"The number of observations for the variant\"}\n    )\n    if not sv:\n        vcf_obj.add_info_to_header(\n            {\n                'ID':\"Hom\",\n                'Number': '1',\n                'Type': 'Integer',\n                'Description': \"The number of observed homozygotes\"}\n        )\n        vcf_obj.add_info_to_header(\n            {\n                'ID':\"Hem\",\n                'Number': '1',\n                'Type': 'Integer',\n                'Description': \"The number of observed hemizygotes\"}\n        )\n    if nr_cases:\n        case_header = \"\n        vcf_obj.add_to_header(case_header)\n    \n    return", "docstring": "Add loqus specific information to a VCF header\n\nArgs:\nvcf_obj(cyvcf2.VCF)", "source": "juraj-google-style"}
{"code": "def unload(self):\n    unloaded = False\n    if (self._lib is not None):\n        if (self._winlib is not None):\n            ctypes.windll.kernel32.FreeLibrary.argtypes = (ctypes.c_void_p,)\n            ctypes.windll.kernel32.FreeLibrary(self._lib._handle)\n            ctypes.windll.kernel32.FreeLibrary(self._winlib._handle)\n            self._lib = None\n            self._winlib = None\n            unloaded = True\n        else:\n            del self._lib\n            self._lib = None\n            unloaded = True\n    if (self._temp is not None):\n        os.remove(self._temp.name)\n        self._temp = None\n    return unloaded", "docstring": "Unloads the library's DLL if it has been loaded.\n\nThis additionally cleans up the temporary DLL file that was created\nwhen the library was loaded.\n\nArgs:\nself (Library): the ``Library`` instance\n\nReturns:\n``True`` if the DLL was unloaded, otherwise ``False``.", "source": "codesearchnet"}
{"code": "def get_variable_dtype(\n    master_dtype=tf.bfloat16,\n    slice_dtype=tf.float32,\n    activation_dtype=tf.float32):\n  \n  return mtf.VariableDType(\n      master_dtype=tf.as_dtype(master_dtype),\n      slice_dtype=tf.as_dtype(slice_dtype),\n      activation_dtype=tf.as_dtype(activation_dtype))", "docstring": "Datatypes to use for the run.\n\nArgs:\nmaster_dtype: string, datatype for checkpoints\nkeep this the same between training and eval/inference\nslice_dtype: string, datatype for variables in memory\nmust be tf.float32 for training\nactivation_dtype: string, datatype for activations\nless memory usage if tf.bfloat16 but possible numerical issues\nReturns:\na mtf.VariableDtype", "source": "juraj-google-style"}
{"code": "def check_semidefinite_positiveness(A):\n    \n    B = empty_like(A)\n    B[:] = A\n    B[diag_indices_from(B)] += sqrt(finfo(float).eps)\n    try:\n        cholesky(B)\n    except LinAlgError:\n        return False\n    return True", "docstring": "Check if ``A`` is a semi-definite positive matrix.\n\nArgs:\nA (array_like): Matrix.\n\nReturns:\nbool: ``True`` if ``A`` is definite positive; ``False`` otherwise.", "source": "juraj-google-style"}
{"code": "def compile_default_action(self, batch_size: Optional[int]=None) -> Sequence[tf.Tensor]:\n    with self.graph.as_default():\n        with tf.name_scope('default_action'):\n            self._initialize_default_action_fluents()\n            if (batch_size is None):\n                return self.default_action_fluents\n            return self._compile_batch_fluents(self.default_action_fluents, batch_size)", "docstring": "Returns a tuple of tensors representing the default action fluents.\n\nArgs:\nbatch_size (int): The batch size.\n\nReturns:\nSequence[tf.Tensor]: A tuple of tensors.", "source": "codesearchnet"}
{"code": "def single_gate_params(gate, params=None):\n    if (gate in ('U', 'u3')):\n        return (params[0], params[1], params[2])\n    elif (gate == 'u2'):\n        return ((np.pi / 2), params[0], params[1])\n    elif (gate == 'u1'):\n        return (0, 0, params[0])\n    elif (gate == 'id'):\n        return (0, 0, 0)\n    raise QiskitError(('Gate is not among the valid types: %s' % gate))", "docstring": "Apply a single qubit gate to the qubit.\n\nArgs:\ngate(str): the single qubit gate name\nparams(list): the operation parameters op['params']\nReturns:\ntuple: a tuple of U gate parameters (theta, phi, lam)\nRaises:\nQiskitError: if the gate name is not valid", "source": "codesearchnet"}
{"code": "def _GetAPFSVolumeIdentifiers(self, scan_node):\n    \n    if not scan_node or not scan_node.path_spec:\n      raise errors.ScannerError('Invalid scan node.')\n\n    volume_system = apfs_volume_system.APFSVolumeSystem()\n    volume_system.Open(scan_node.path_spec)\n\n    volume_identifiers = self._source_scanner.GetVolumeIdentifiers(\n        volume_system)\n    if not volume_identifiers:\n      return []\n\n    if len(volume_identifiers) > 1:\n      if not self._mediator:\n        raise errors.ScannerError(\n            'Unable to proceed. APFS volumes found but no mediator to '\n            'determine how they should be used.')\n\n      try:\n        volume_identifiers = self._mediator.GetAPFSVolumeIdentifiers(\n            volume_system, volume_identifiers)\n      except KeyboardInterrupt:\n        raise errors.UserAbort('File system scan aborted.')\n\n    return self._NormalizedVolumeIdentifiers(\n        volume_system, volume_identifiers, prefix='apfs')", "docstring": "Determines the APFS volume identifiers.\n\nArgs:\nscan_node (SourceScanNode): scan node.\n\nReturns:\nlist[str]: APFS volume identifiers.\n\nRaises:\nScannerError: if the format of or within the source is not supported\nor the the scan node is invalid.\nUserAbort: if the user requested to abort.", "source": "juraj-google-style"}
{"code": "def setWeekendHolidaySchedules(self, new_wknd, new_hldy, password=\"00000000\"):\n        \n        result = False\n        self.setContext(\"setWeekendHolidaySchedules\")\n        try:\n            if not self.request(False):\n                self.writeCmdMsg(\"Bad read CRC on setting\")\n            else:\n                if not self.serialCmdPwdAuth(password):\n                    self.writeCmdMsg(\"Password failure\")\n                else:\n                    req_wkd = binascii.hexlify(str(new_wknd).zfill(2))\n                    req_hldy = binascii.hexlify(str(new_hldy).zfill(2))\n                    req_str = \"015731023030433028\" + req_wkd + req_hldy + \"2903\"\n                    req_str += self.calc_crc16(req_str[2:].decode(\"hex\"))\n                    self.m_serial_port.write(req_str.decode(\"hex\"))\n                    if self.m_serial_port.getResponse(self.getContext()).encode(\"hex\") == \"06\":\n                        self.writeCmdMsg(\"Success(setWeekendHolidaySchedules): 06 returned.\")\n                        result = True\n            self.serialPostEnd()\n        except:\n            ekm_log(traceback.format_exc(sys.exc_info()))\n\n        self.setContext(\"\")\n        return result", "docstring": "Serial call to set weekend and holiday :class:`~ekmmeters.Schedules`.\n\nArgs:\nnew_wknd (int): :class:`~ekmmeters.Schedules` value to assign.\nnew_hldy (int): :class:`~ekmmeters.Schedules` value to assign.\npassword (str): Optional password..\n\nReturns:\nbool: True on completion and ACK.", "source": "juraj-google-style"}
{"code": "def get_git_commit_sha():\n    return os.getenv('GIT_COMMIT')", "docstring": "Get git commit SHA for this build.\n\nAttempt to get the SHA from environment variable GIT_COMMIT, which should\nbe available on Jenkins build agents.\n\nReturns:\nSHA hash of the git commit used for the build, if available", "source": "github-repos"}
{"code": "def _force_float(v):\n    try:\n        return float(v)\n    except Exception as exc:\n        return float('nan')\n        logger.warning('Failed to convert {} to float with {} error. Using 0 instead.'.format(v, exc))", "docstring": "Converts given argument to float. On fail logs warning and returns 0.0.\n\nArgs:\nv (any): value to convert to float\n\nReturns:\nfloat: converted v or 0.0 if conversion failed.", "source": "codesearchnet"}
{"code": "def output(self):\n    return self._nested_outputs", "docstring": "Retrieves the output tensor(s) of a layer.\n\nOnly applicable if the layer has exactly one output,\ni.e. if it is connected to one incoming layer.\n\nReturns:\nOutput tensor or list of output tensors.\n\nRaises:\nAttributeError: if the layer is connected to more than one incoming\nlayers.\nRuntimeError: if called in Eager mode.", "source": "github-repos"}
{"code": "def local_service(self, name_or_id):\n    if (not self._loop.inside_loop()):\n        self._state_lock.acquire()\n    try:\n        if isinstance(name_or_id, int):\n            if (name_or_id not in self._name_map):\n                raise ArgumentError('Unknown ID used to look up service', id=name_or_id)\n            name = self._name_map[name_or_id]\n        else:\n            name = name_or_id\n        if (name not in self.services):\n            raise ArgumentError('Unknown service name', name=name)\n        return copy(self.services[name])\n    finally:\n        if (not self._loop.inside_loop()):\n            self._state_lock.release()", "docstring": "Get the locally synced information for a service.\n\nThis method is safe to call outside of the background event loop\nwithout any race condition.  Internally it uses a thread-safe mutex to\nprotect the local copies of supervisor data and ensure that it cannot\nchange while this method is iterating over it.\n\nArgs:\nname_or_id (string or int): Either a short name for the service or\na numeric id.\n\nReturns:\nServiceState: the current state of the service synced locally\nat the time of the call.", "source": "codesearchnet"}
{"code": "def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):\n    self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)", "docstring": "Asserts that two numpy arrays have near values.\n\nArgs:\nndarray1: a numpy ndarray.\nndarray2: a numpy ndarray.\nerr: a float. The maximum absolute difference allowed.\nmsg: Optional message to report on failure.", "source": "github-repos"}
{"code": "def get_ip_prefixes_from_bird(filename):\n    prefixes = []\n    with open(filename, 'r') as bird_conf:\n        lines = bird_conf.read()\n    for line in lines.splitlines():\n        line = line.strip(', ')\n        if valid_ip_prefix(line):\n            prefixes.append(line)\n    return prefixes", "docstring": "Build a list of IP prefixes found in Bird configuration.\n\nArguments:\nfilename (str): The absolute path of the Bird configuration file.\n\nNotes:\nIt can only parse a file with the following format\n\ndefine ACAST_PS_ADVERTISE =\n[\n10.189.200.155/32,\n10.189.200.255/32\n];\n\nReturns:\nA list of IP prefixes.", "source": "codesearchnet"}
{"code": "def memory_read16(self, addr, num_halfwords, zone=None):\n        \n        return self.memory_read(addr, num_halfwords, zone=zone, nbits=16)", "docstring": "Reads memory from the target system in units of 16-bits.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): start address to read from\nnum_halfwords (int): number of half words to read\nzone (str): memory zone to read from\n\nReturns:\nList of halfwords read from the target system.\n\nRaises:\nJLinkException: if memory could not be read", "source": "juraj-google-style"}
{"code": "def list(\n        self,\n        **kwargs\n    ):\n        \n\n        request = Request(\n            'GET',\n            '/v3/accounts'\n        )\n\n        response = self.ctx.request(request)\n\n\n        if response.content_type is None:\n            return response\n\n        if not response.content_type.startswith(\"application/json\"):\n            return response\n\n        jbody = json.loads(response.raw_body)\n\n        parsed_body = {}\n\n        \n        \n        \n        if str(response.status) == \"200\":\n            if jbody.get('accounts') is not None:\n                parsed_body['accounts'] = [\n                    self.ctx.account.AccountProperties.from_dict(d, self.ctx)\n                    for d in jbody.get('accounts')\n                ]\n\n        elif str(response.status) == \"401\":\n            if jbody.get('errorCode') is not None:\n                parsed_body['errorCode'] = \\\n                    jbody.get('errorCode')\n\n            if jbody.get('errorMessage') is not None:\n                parsed_body['errorMessage'] = \\\n                    jbody.get('errorMessage')\n\n        elif str(response.status) == \"405\":\n            if jbody.get('errorCode') is not None:\n                parsed_body['errorCode'] = \\\n                    jbody.get('errorCode')\n\n            if jbody.get('errorMessage') is not None:\n                parsed_body['errorMessage'] = \\\n                    jbody.get('errorMessage')\n\n        \n        \n        \n        else:\n            parsed_body = jbody\n\n        response.body = parsed_body\n\n        return response", "docstring": "Get a list of all Accounts authorized for the provided token.\n\nArgs:\n\nReturns:\nv20.response.Response containing the results from submitting the\nrequest", "source": "juraj-google-style"}
{"code": "def get_project(self, resource):\n        \n        self.project_service.set_auth(self._token_project)\n        return self.project_service.get(resource)", "docstring": "Get attributes of the data model object named by the given resource.\n\nArgs:\nresource (intern.resource.boss.BossResource): resource.name as well\nas any parents must be identified to succeed.\n\nReturns:\n(intern.resource.boss.BossResource): Returns resource of type\nrequested on success.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n               task_name,\n               queue_name,\n               base_path):\n    \n    self.task_name = task_name\n    self.queue_name = queue_name\n    self.base_path = base_path\n    self.barrier_handler_path = '%s/output' % base_path\n    self.pipeline_handler_path = '%s/run' % base_path\n    self.finalized_handler_path = '%s/finalized' % base_path\n    self.fanout_handler_path = '%s/fanout' % base_path\n    self.abort_handler_path = '%s/abort' % base_path\n    self.fanout_abort_handler_path = '%s/fanout_abort' % base_path\n    self.session_filled_output_names = set()", "docstring": "Initializer.\n\nArgs:\ntask_name: The name of the currently running task or empty if there\nis no task running.\nqueue_name: The queue this pipeline should run on (may not be the\ncurrent queue this request is on).\nbase_path: Relative URL for the pipeline's handlers.", "source": "juraj-google-style"}
{"code": "def multiply(self, other):\n        \n        if not isinstance(other, Number):\n            raise QiskitError(\"other is not a number\")\n        return Operator(other * self.data, self.input_dims(),\n                        self.output_dims())", "docstring": "Return the operator self + other.\n\nArgs:\nother (complex): a complex number.\n\nReturns:\nOperator: the operator other * self.\n\nRaises:\nQiskitError: if other is not a valid complex number.", "source": "juraj-google-style"}
{"code": "def __init__(self, parameter_name, value):\n    \n    super(InvalidParameterError, self).__init__()\n    self.parameter_name = parameter_name\n    self.value = value", "docstring": "Constructor for InvalidParameterError.\n\nArgs:\nparameter_name: String; the name of the parameter which had a value\nrejected.\nvalue: The actual value passed in for the parameter. Usually string.", "source": "juraj-google-style"}
{"code": "def CreateServiceProto(job):\n    service = rdf_client.OSXServiceInformation(label=job.get('Label'), program=job.get('Program'), sessiontype=job.get('LimitLoadToSessionType'), lastexitstatus=int(job['LastExitStatus']), timeout=int(job['TimeOut']), ondemand=bool(job['OnDemand']))\n    for arg in job.get('ProgramArguments', '', stringify=False):\n        service.args.Append(str(arg))\n    mach_dict = job.get('MachServices', {}, stringify=False)\n    for (key, value) in iteritems(mach_dict):\n        service.machservice.Append(('%s:%s' % (key, value)))\n    job_mach_dict = job.get('PerJobMachServices', {}, stringify=False)\n    for (key, value) in iteritems(job_mach_dict):\n        service.perjobmachservice.Append(('%s:%s' % (key, value)))\n    if ('PID' in job):\n        service.pid = job['PID'].value\n    return service", "docstring": "Create the Service protobuf.\n\nArgs:\njob: Launchdjobdict from servicemanagement framework.\n\nReturns:\nsysinfo_pb2.OSXServiceInformation proto", "source": "codesearchnet"}
{"code": "def _copy_file_or_directory(self, source, destination_directory):\n    \n    if os.path.isdir(source):\n      for item in os.listdir(source):\n        full_source = os.path.join(source, item)\n        full_destination = os.path.join(destination_directory, item)\n        shutil.copytree(full_source, full_destination)\n    else:\n      shutil.copy2(source, destination_directory)", "docstring": "Recursively copies files from source to destination_directory.\n\nArgs:\nsource: source file or directory to copy into destination_directory\ndestination_directory: destination directory in which to copy source", "source": "juraj-google-style"}
{"code": "def start_after(self, document_fields):\n    return self._cursor_helper(document_fields, before=False, start=True)", "docstring": "Start query results after a particular document value.\n\nThe result set will **exclude** the document specified by\n``document_fields``.\n\nIf the current query already has specified a start cursor -- either\nvia this method or\n:meth:`~.firestore_v1beta1.query.Query.start_at` -- this will\noverwrite it.\n\nWhen the query is sent to the server, the ``document_fields`` will\nbe used in the order given by fields set by\n:meth:`~.firestore_v1beta1.query.Query.order_by`.\n\nArgs:\ndocument_fields (Union[~.firestore_v1beta1.\\\ndocument.DocumentSnapshot, dict, list, tuple]): a document\nsnapshot or a dictionary/list/tuple of fields representing a\nquery results cursor. A cursor is a collection of values that\nrepresent a position in a query result set.\n\nReturns:\n~.firestore_v1beta1.query.Query: A query with cursor. Acts as\na copy of the current query, modified with the newly added\n\"start after\" cursor.", "source": "codesearchnet"}
{"code": "def update_state_wrapper(update_state_fn):\n\n    def decorated(metric_obj, *args, **kwargs):\n        \n        strategy = distribute_lib.get_strategy()\n        for weight in metric_obj.weights:\n            if backend.is_tpu_strategy(strategy) and (not strategy.extended.variable_created_in_scope(weight)) and (not distribute_lib.in_cross_replica_context()):\n                raise ValueError('Trying to run metric.update_state in replica context when the metric was not created in TPUStrategy scope. Make sure the keras Metric is created in TPUstrategy scope. ')\n        with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs):\n            update_op = update_state_fn(*args, **kwargs)\n        if update_op is not None:\n            metric_obj.add_update(update_op)\n        return update_op\n    return tf_decorator.make_decorator(update_state_fn, decorated)", "docstring": "Decorator to wrap metric `update_state()` with `add_update()`.\n\nArgs:\nupdate_state_fn: function that accumulates metric statistics.\n\nReturns:\nDecorated function that wraps `update_state_fn()` with `add_update()`.", "source": "github-repos"}
{"code": "def track_trace(self, name, properties=None, severity=None):\n        \n        data = channel.contracts.MessageData()\n        data.message = name or NULL_CONSTANT_STRING\n        if properties:\n            data.properties = properties\n        if severity is not None:\n            data.severity_level = channel.contracts.MessageData.PYTHON_LOGGING_LEVELS.get(severity)\n\n        self.track(data, self._context)", "docstring": "Sends a single trace statement.\n\nArgs:\nname (str). the trace statement.\\n\nproperties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\\n\nseverity (str). the severity level of this trace, one of DEBUG, INFO, WARNING, ERROR, CRITICAL", "source": "juraj-google-style"}
{"code": "def automatic_density(cls, structure, kppa, chksymbreak=None, use_symmetries=True, use_time_reversal=True,\n                          shifts=(0.5, 0.5, 0.5)):\n        \n        lattice = structure.lattice\n        lengths = lattice.abc\n        shifts = np.reshape(shifts, (-1, 3))\n        ngrid = kppa / structure.num_sites / len(shifts)\n\n        mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3.)\n\n        num_div = [int(round(1.0 / lengths[i] * mult)) for i in range(3)]\n        \n        num_div = [i if i > 0 else 1 for i in num_div]\n\n        angles = lattice.angles\n        hex_angle_tol = 5      \n        hex_length_tol = 0.01  \n\n        right_angles = [i for i in range(3) if abs(angles[i] - 90) < hex_angle_tol]\n\n        hex_angles = [i for i in range(3)\n                      if abs(angles[i] - 60) < hex_angle_tol or\n                      abs(angles[i] - 120) < hex_angle_tol]\n\n        is_hexagonal = (len(right_angles) == 2 and len(hex_angles) == 1\n                        and abs(lengths[right_angles[0]] -\n                                lengths[right_angles[1]]) < hex_length_tol)\n\n        \n        \n        \n        \n\n        comment = \"pymatge.io.abinit generated KPOINTS with grid density = \" + \"{} / atom\".format(kppa)\n\n        return cls(\n            mode=\"monkhorst\", num_kpts=0, kpts=[num_div], kpt_shifts=shifts,\n            use_symmetries=use_symmetries, use_time_reversal=use_time_reversal, chksymbreak=chksymbreak,\n            comment=comment)", "docstring": "Returns an automatic Kpoint object based on a structure and a kpoint\ndensity. Uses Gamma centered meshes for hexagonal cells and Monkhorst-Pack grids otherwise.\n\nAlgorithm:\nUses a simple approach scaling the number of divisions along each\nreciprocal lattice vector proportional to its length.\n\nArgs:\nstructure: Input structure\nkppa: Grid density", "source": "juraj-google-style"}
{"code": "def _probe_services(self, handle):\n        \n\n        code = 0x2800\n\n        def event_filter_func(event):\n            if (event.command_class == 4 and event.command == 2):\n                event_handle, = unpack(\"B\", event.payload[0:1])\n                return event_handle == handle\n\n            return False\n\n        def end_filter_func(event):\n            if (event.command_class == 4 and event.command == 1):\n                event_handle, = unpack(\"B\", event.payload[0:1])\n                return event_handle == handle\n\n            return False\n\n        payload = struct.pack('<BHHBH', handle, 1, 0xFFFF, 2, code)\n\n        try:\n            response = self._send_command(4, 1, payload)\n        except InternalTimeoutError:\n            return False, {'reason': 'Timeout waiting for command response'}\n\n        handle, result = unpack(\"<BH\", response.payload)\n        if result != 0:\n            return False, None\n\n        events = self._wait_process_events(0.5, event_filter_func, end_filter_func)\n        gatt_events = [x for x in events if event_filter_func(x)]\n        end_events = [x for x in events if end_filter_func(x)]\n\n        if len(end_events) == 0:\n            return False, None\n\n        \n        end_event = end_events[0]\n        _, result, _ = unpack(\"<BHH\", end_event.payload)\n        if result != 0:\n            self._logger.warn(\"Error enumerating GATT table, protocol error code = %d (0x%X)\" % (result, result))\n            return False, None\n\n        services = {}\n        for event in gatt_events:\n            process_gatt_service(services, event)\n\n        return True, {'services': services}", "docstring": "Probe for all primary services and characteristics in those services\n\nArgs:\nhandle (int): the connection handle to probe", "source": "juraj-google-style"}
{"code": "def assert_no_title(self, title, **kwargs):\n    query = TitleQuery(title, **kwargs)\n\n    @self.synchronize(wait=query.wait)\n    def assert_no_title():\n        if query.resolves_for(self):\n            raise ExpectationNotMet(query.negative_failure_message)\n        return True\n    return assert_no_title()", "docstring": "Asserts that the page doesn't have the given title.\n\nArgs:\ntitle (str | RegexObject): The string that the title should include.\n**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.\n\nReturns:\nTrue\n\nRaises:\nExpectationNotMet: If the assertion hasn't succeeded during the wait time.", "source": "codesearchnet"}
{"code": "def _do_refresh_request(self, http):\n    body = self._generate_refresh_request_body()\n    headers = self._generate_refresh_request_headers()\n    logger.info('Refreshing access_token')\n    (resp, content) = transport.request(http, self.token_uri, method='POST', body=body, headers=headers)\n    content = _helpers._from_bytes(content)\n    if (resp.status == http_client.OK):\n        d = json.loads(content)\n        self.token_response = d\n        self.access_token = d['access_token']\n        self.refresh_token = d.get('refresh_token', self.refresh_token)\n        if ('expires_in' in d):\n            delta = datetime.timedelta(seconds=int(d['expires_in']))\n            self.token_expiry = (delta + _UTCNOW())\n        else:\n            self.token_expiry = None\n        if ('id_token' in d):\n            self.id_token = _extract_id_token(d['id_token'])\n            self.id_token_jwt = d['id_token']\n        else:\n            self.id_token = None\n            self.id_token_jwt = None\n        self.invalid = False\n        if self.store:\n            self.store.locked_put(self)\n    else:\n        logger.info('Failed to retrieve access token: %s', content)\n        error_msg = 'Invalid response {0}.'.format(resp.status)\n        try:\n            d = json.loads(content)\n            if ('error' in d):\n                error_msg = d['error']\n                if ('error_description' in d):\n                    error_msg += (': ' + d['error_description'])\n                self.invalid = True\n                if (self.store is not None):\n                    self.store.locked_put(self)\n        except (TypeError, ValueError):\n            pass\n        raise HttpAccessTokenRefreshError(error_msg, status=resp.status)", "docstring": "Refresh the access_token using the refresh_token.\n\nArgs:\nhttp: an object to be used to make HTTP requests.\n\nRaises:\nHttpAccessTokenRefreshError: When the refresh fails.", "source": "codesearchnet"}
{"code": "def _initialize_memory(self, policy_params):\n    \n    \n    template = (\n        self._batch_env.observ[0],\n        self._batch_env.action[0],\n        tools.nested.map(lambda x: x[0, 0], policy_params),\n        self._batch_env.reward[0])\n    with tf.variable_scope('ppo_temporary'):\n      self._current_episodes = parts.EpisodeMemory(\n          template, len(self._batch_env), self._config.max_length, 'episodes')\n    self._finished_episodes = parts.EpisodeMemory(\n        template, self._config.update_every, self._config.max_length, 'memory')\n    self._num_finished_episodes = tf.Variable(0, False)", "docstring": "Initialize temporary and permanent memory.\n\nArgs:\npolicy_params: Nested tuple of policy parameters with all dimensions set.\n\nInitializes the attributes `self._current_episodes`,\n`self._finished_episodes`, and `self._num_finished_episodes`. The episodes\nmemory serves to collect multiple episodes in parallel. Finished episodes\nare copied into the next free slot of the second memory. The memory index\npoints to the next free slot.", "source": "juraj-google-style"}
{"code": "def wrap_sequence(sequence, books=None, tensor_shape=None):\n    if (books is None):\n        books = bookkeeper.for_default_graph()\n    my_sequence = [wrap(t, books=books, tensor_shape=tensor_shape) for t in sequence]\n    return Layer(books, sequence=my_sequence, name=my_sequence[0].name)", "docstring": "Creates an input layer representing the given sequence of tensors.\n\nArgs:\nsequence: A sequence of tensors.\nbooks: The bookkeeper.\ntensor_shape: An optional shape that will be set on the Tensor or verified\nto match the tensor.\nReturns:\nA layer.", "source": "codesearchnet"}
{"code": "def stops_when(iterable, condition):\n    \n    \n    if not callable(condition):\n        cond_value = condition\n\n        def condition(x):\n            return x == cond_value\n    return itertools.takewhile(lambda x: not condition(x), iterable)", "docstring": "Stop yielding items when a condition arise.\n\nArgs:\niterable: the iterable to filter.\ncondition: if the callable returns True once, stop yielding\nitems. If it's not a callable, it will be converted\nto one as `lambda condition: condition == item`.\n\nExample:\n\n>>> list(stops_when(range(10), lambda x: x > 5))\n[0, 1, 2, 3, 4, 5]\n>>> list(stops_when(range(10), 7))\n[0, 1, 2, 3, 4, 5, 6]", "source": "juraj-google-style"}
{"code": "def dbclass(self, value):\n    if (not is_valid_dbclass(value)):\n        raise AttributeError(\"'{}' is not a valid database type\".format(value))\n    self._class = value\n    self._connectionXML.set('class', value)", "docstring": "Set the connection's dbclass property.\n\nArgs:\nvalue:  New dbclass value. String.\n\nReturns:\nNothing.", "source": "codesearchnet"}
{"code": "def _live_tensors(f, attr_name='inputs'):\n    node, _ = parser.parse_entity(f, ())\n    entity_info = transformer.EntityInfo(name=f.__name__, source_code=None, source_file=None, future_features=(), namespace=sys.modules[f.__module__].__dict__)\n    ctx = transformer.Context(entity_info, None, None)\n    graphs = cfg.build(node)\n    node = qual_names.resolve(node)\n    node = activity.resolve(node, ctx, None)\n    node = reaching_fndefs.resolve(node, ctx, graphs)\n    node = liveness.resolve(node, ctx, graphs)\n    op_arg_name = anno.getanno(node.args.args[0], anno.Basic.QN)\n    op_inputs_outputs_name = qual_names.QN(op_arg_name, attr=attr_name)\n    special_tracker = _SubscriptUseTracker(ctx, (op_inputs_outputs_name,))\n    node = special_tracker.visit(node)\n    live_vars_in = anno.getanno(node.body[0], anno.Static.LIVE_VARS_IN)\n    inputs_outputs_used_qns = set()\n    for v in special_tracker.complex_reads:\n        if v == op_inputs_outputs_name:\n            return _ALL\n    for v in live_vars_in:\n        if v in special_tracker.reads:\n            if v.has_subscript() and v.parent == op_inputs_outputs_name:\n                inputs_outputs_used_qns.add(v)\n            elif v == op_inputs_outputs_name:\n                return _ALL\n    function_calls_tracker = _FunctionCallsTracker(ctx, op_arg_name)\n    node = function_calls_tracker.visit(node)\n    input_output_indices = set()\n    for called_f in function_calls_tracker.calls:\n        child_indices = _live_tensors(called_f, attr_name=attr_name)\n        if child_indices is _ALL:\n            return _ALL\n        input_output_indices |= child_indices\n    for v in inputs_outputs_used_qns:\n        assert v.has_subscript()\n        _, subscript = v.qn\n        if not subscript.is_simple():\n            return _ALL\n        subscript_val, = subscript.qn\n        if not isinstance(subscript_val, qual_names.Literal) and (not isinstance(subscript_val.value, int)):\n            return _ALL\n        input_output_indices.add(subscript_val.value)\n    return input_output_indices", "docstring": "Returns the indices of the used inputs.\n\nNote: This currently only handles direct index accesses e.g. op.inputs[1].\nIf the function has slicing or list comprehension on attr_name then returns\n_ALL. This ensure that this is correct even if inefficient.\n\nArgs:\nf: A grad function, taking the op as first argument.\nattr_name: op attr to track. \"inputs\" or \"outputs\".\n\nReturns:\nEither one of:\n* set of integers representing individual indices of inputs used\n* the value _ALL, if indices are used but cannot be determined which\n* empty set, if no inputs are used", "source": "github-repos"}
{"code": "def run_std_server(self):\n    config = tf.estimator.RunConfig()\n    server = tf.train.Server(config.cluster_spec, job_name=config.task_type, task_index=config.task_id, protocol=config.protocol)\n    server.join()", "docstring": "Starts a TensorFlow server and joins the serving thread.\n\nTypically used for parameter servers.\n\nRaises:\nValueError: if not enough information is available in the estimator's\nconfig to create a server.", "source": "codesearchnet"}
{"code": "def get_extended_surface_mesh(self, repeat=(5, 5, 1)):\n    surf_str = Structure.from_sites(self.surface_sites)\n    surf_str.make_supercell(repeat)\n    return surf_str", "docstring": "Gets an extended surface mesh for to use for adsorption\nsite finding by constructing supercell of surface sites\n\nArgs:\nrepeat (3-tuple): repeat for getting extended surface mesh", "source": "codesearchnet"}
{"code": "def tag(self, resource_id):\n        \n        self._request_uri = '{}/{}'.format(self._request_uri, self.tcex.safetag(resource_id))", "docstring": "Update the request URI to include the Tag for specific retrieval.\n\nArgs:\nresource_id (string): The tag name.", "source": "juraj-google-style"}
{"code": "def AddMemberDefinition(self, member_definition):\n    \n    self._byte_size = None\n    self.members.append(member_definition)\n\n    if self.sections:\n      section_definition = self.sections[-1]\n      section_definition.members.append(member_definition)", "docstring": "Adds a member definition.\n\nArgs:\nmember_definition (DataTypeDefinition): member data type definition.", "source": "juraj-google-style"}
{"code": "def jt_aggregate(func, is_create=False, has_pk=False):\n\n    def helper(kwargs, obj):\n        'The helper function preceding actual function that aggregates\\n        unified jt fields.\\n        '\n        unified_job_template = None\n        for item in UNIFIED_JT:\n            if (kwargs.get(item, None) is not None):\n                jt_id = kwargs.pop(item)\n                if (unified_job_template is None):\n                    unified_job_template = (item, jt_id)\n                else:\n                    raise exc.UsageError('More than one unified job template fields provided, please tighten your criteria.')\n        if (unified_job_template is not None):\n            kwargs['unified_job_template'] = unified_job_template[1]\n            obj.identity = tuple((list(obj.identity) + ['unified_job_template']))\n            return '/'.join([UNIFIED_JT[unified_job_template[0]], str(unified_job_template[1]), 'schedules/'])\n        elif is_create:\n            raise exc.UsageError('You must provide exactly one unified job template field during creation.')\n\n    def decorator_without_pk(obj, *args, **kwargs):\n        old_endpoint = obj.endpoint\n        new_endpoint = helper(kwargs, obj)\n        if is_create:\n            obj.endpoint = new_endpoint\n        result = func(obj, *args, **kwargs)\n        obj.endpoint = old_endpoint\n        return result\n\n    def decorator_with_pk(obj, pk=None, *args, **kwargs):\n        old_endpoint = obj.endpoint\n        new_endpoint = helper(kwargs, obj)\n        if is_create:\n            obj.endpoint = new_endpoint\n        result = func(obj, *args, pk=pk, **kwargs)\n        obj.endpoint = old_endpoint\n        return result\n    decorator = (decorator_with_pk if has_pk else decorator_without_pk)\n    for item in CLICK_ATTRS:\n        setattr(decorator, item, getattr(func, item, []))\n    decorator.__doc__ = func.__doc__\n    return decorator", "docstring": "Decorator to aggregate unified_jt-related fields.\n\nArgs:\nfunc: The CURD method to be decorated.\nis_create: Boolean flag showing whether this method is create.\nhas_pk: Boolean flag showing whether this method uses pk as argument.\n\nReturns:\nA function with necessary click-related attributes whose keyworded\narguments are aggregated.\n\nRaises:\nexc.UsageError: Either more than one unified jt fields are\nprovided, or none is provided when is_create flag is set.", "source": "codesearchnet"}
{"code": "def mount_share_at_path(share_path, mount_path):\n    sh_url = CFURLCreateWithString(None, share_path, None)\n    mo_url = CFURLCreateWithString(None, mount_path, None)\n    open_options = {NetFS.kNAUIOptionKey: NetFS.kNAUIOptionNoUI}\n    mount_options = {NetFS.kNetFSAllowSubMountsKey: True, NetFS.kNetFSMountAtMountDirKey: True}\n    (result, output) = NetFS.NetFSMountURLSync(sh_url, mo_url, None, None, open_options, mount_options, None)\n    if (result != 0):\n        raise Exception(('Error mounting url \"%s\" at path \"%s\": %s' % (share_path, mount_path, output)))\n    return str(output[0])", "docstring": "Mounts a share at the specified path\n\nArgs:\nshare_path: String URL with all auth info to connect to file share.\nmount_path: Path to mount share on.\n\nReturns:\nThe mount point or raises an error", "source": "codesearchnet"}
{"code": "def _from_string(cls, serialized):\n        \n        try:\n            usage_key, aside_type = _split_keys_v1(serialized)\n            return cls(UsageKey.from_string(usage_key), aside_type)\n        except ValueError as exc:\n            raise InvalidKeyError(cls, exc.args)", "docstring": "Return an instance of `cls` parsed from its `serialized` form.\n\nArgs:\ncls: The :class:`OpaqueKey` subclass.\nserialized (unicode): A serialized :class:`OpaqueKey`, with namespace already removed.\n\nRaises:\nInvalidKeyError: Should be raised if `serialized` is not a valid serialized key\nunderstood by `cls`.", "source": "juraj-google-style"}
{"code": "def get_raw_mempool(self, id=None, endpoint=None):\n        \n        return self._call_endpoint(GET_RAW_MEMPOOL, id=id, endpoint=endpoint)", "docstring": "Returns the tx that are in the memorypool of the endpoint\nArgs:\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"}
{"code": "def delete_folder(self, folder_id, recursive=True):\n        \n        return self.__request(\"DELETE\", \"folders/%s\" % (folder_id, ),\n                        querystring={'recursive': unicode(recursive).lower()})", "docstring": "Delete an existing folder\n\nArgs:\nfolder_id (int): ID of the folder to delete.\nrecursive (bool): Delete all subfolder if True.\n\nReturns:\ndict. Response from Box.\n\nRaises:\nBoxError: An error response is returned from Box (status_code >= 400).\n\nBoxHttpResponseError: Response from Box is malformed.\n\nrequests.exceptions.*: Any connection related problem.", "source": "juraj-google-style"}
{"code": "def setup(template, version=None):\n    \n    temple.check.is_git_ssh_path(template)\n    temple.check.not_in_git_repo()\n\n    repo_path = temple.utils.get_repo_path(template)\n    msg = (\n        'You will be prompted for the parameters of your new project.'\n        ' Please read the docs at https:\n    ).format(repo_path)\n    print(msg)\n\n    cc_repo_dir, config = temple.utils.get_cookiecutter_config(template, version=version)\n\n    if not version:\n        with temple.utils.cd(cc_repo_dir):\n            ret = temple.utils.shell('git rev-parse HEAD', stdout=subprocess.PIPE)\n            version = ret.stdout.decode('utf-8').strip()\n\n    _generate_files(repo_dir=cc_repo_dir, config=config, template=template, version=version)", "docstring": "Sets up a new project from a template\n\nNote that the `temple.constants.TEMPLE_ENV_VAR` is set to 'setup' during the duration\nof this function.\n\nArgs:\ntemplate (str): The git SSH path to a template\nversion (str, optional): The version of the template to use when updating. Defaults\nto the latest version", "source": "juraj-google-style"}
{"code": "def __init__(self, name=None):\n    self._name = name\n    self._items = []", "docstring": "Menu constructor.\n\nArgs:\nname: (str or None) name of this menu.", "source": "github-repos"}
{"code": "def _write(self, save_path, options=None):\n    write_start_time = time.time()\n    if not self._initialized:\n        self._ensure_initialized()\n    else:\n        self._queue.join()\n        self._copy_to_cpu()\n    self._check_async_thread_error()\n    context.async_wait()\n    self._save_file_prefix = save_path\n    self._use_checkpoint_save = False\n    self._checkpoint_options = copy.copy(options) if options else None\n    if self._checkpoint_options:\n        self._checkpoint_options.experimental_enable_async_checkpoint = False\n    self._queue.put(True)\n    write_end_time = time.time()\n    metrics.AddCheckpointWriteDuration(api_label=_ASYNC_CHECKPOINT, microseconds=_get_duration_microseconds(write_start_time, write_end_time))\n    return save_path", "docstring": "Save the checkpointed variables.\n\nThis method has exactly the same logic as save(), except it does not\nincrement the underlying save_counter, which is done by the caller, e.g.,\nCheckpointManager.\n\nArgs:\nsave_path: The file prefix of the checkpoint file.\noptions: Optional CheckpointOption instance.\n\nReturns:\nThe full path of the checkpoint file.", "source": "github-repos"}
{"code": "def get_field(self, field_type):\n        \n        for field in self.oxm_match_fields:\n            if field.oxm_field == field_type:\n                return field.oxm_value\n\n        return None", "docstring": "Return the value for the 'field_type' field in oxm_match_fields.\n\nArgs:\nfield_type (~pyof.v0x04.common.flow_match.OxmOfbMatchField,\n~pyof.v0x04.common.flow_match.OxmMatchFields):\nThe type of the OXM field you want the value.\n\nReturns:\nThe integer number of the 'field_type' if it exists. Otherwise\nreturn None.", "source": "juraj-google-style"}
{"code": "def get_bond_order(sp1, sp2, dist, tol=0.2, default_bl=None):\n    all_lengths = obtain_all_bond_lengths(sp1, sp2, default_bl)\n    lengths_list = ([(all_lengths[1] * (1 + tol))] + [all_lengths[(idx + 1)] for idx in range(len(all_lengths))])\n    trial_bond_order = 0\n    while (trial_bond_order < len(lengths_list)):\n        if (lengths_list[trial_bond_order] < dist):\n            if (trial_bond_order == 0):\n                return trial_bond_order\n            else:\n                low_bl = lengths_list[trial_bond_order]\n                high_bl = lengths_list[(trial_bond_order - 1)]\n                return (trial_bond_order - ((dist - low_bl) / (high_bl - low_bl)))\n        trial_bond_order += 1\n    if (dist < (lengths_list[(- 1)] * (1 - tol))):\n        warnings.warn(('%.2f angstrom distance is too short for %s and %s' % (dist, sp1, sp2)))\n    return (trial_bond_order - 1)", "docstring": "Calculate the bond order given the distance of 2 species\n\nArgs:\nsp1 (Specie): First specie.\nsp2 (Specie): Second specie.\ndist: Their distance in angstrom\ntol (float): Relative tolerance to test. Basically, the code\nchecks if the distance between the sites is larger than\n(1 + tol) * the longest bond distance or smaller than\n(1 - tol) * the shortest bond distance to determine if\nthey are bonded or the distance is too short.\nDefaults to 0.2.\ndefault_bl: If a particular type of bond does not exist, use this\nbond length (bond order = 1). If None, a ValueError will be thrown.\n\nReturns:\nFloat value of bond order. For example, for C-C bond in benzene,\nreturn 1.7.", "source": "codesearchnet"}
{"code": "def fit(self, X):\n        \n        LOGGER.debug('Fitting Gaussian Copula')\n        column_names = self.get_column_names(X)\n        distribution_class = import_object(self.distribution)\n\n        for column_name in column_names:\n            self.distribs[column_name] = distribution_class()\n            column = self.get_column(X, column_name)\n            self.distribs[column_name].fit(column)\n\n        self.covariance = self._get_covariance(X)\n        self.fitted = True", "docstring": "Compute the distribution for each variable and then its covariance matrix.\n\nArgs:\nX(numpy.ndarray or pandas.DataFrame): Data to model.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def trace_stop(self):\n        \n        cmd = enums.JLinkTraceCommand.STOP\n        res = self._dll.JLINKARM_TRACE_Control(cmd, 0)\n        if (res == 1):\n            raise errors.JLinkException('Failed to stop trace.')\n        return None", "docstring": "Stops collecting trace data.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def events_from_multifile_logdir(logdir):\n    assert gfile.Exists(logdir)\n    files = [file for file in gfile.ListDirectory(logdir) if 'tfevents' in file]\n    return {file: events_from_file(os.path.join(logdir, file)) for file in files}", "docstring": "Returns map of filename to events for all `tfevents` files in the logdir.\n\nArgs:\nlogdir: The directory from which to load events.\n\nReturns:\nA dict mapping from relative filenames to lists of tf.Event protos.\n\nRaises:\nAssertionError: If logdir does not contain exactly one file.", "source": "github-repos"}
{"code": "def _restore_from_tensors(self, restored_tensors):\n    raise NotImplementedError", "docstring": "Restores checkpointed values to this `Trackable`.\n\nPlease see the documentation for `Trackable._serialize_to_tensors`.\n\nArgs:\nrestored_tensors: A dictionary mapping names to tensors. The keys to this\ndictionary matches the names passed to _serialize_to_tensors.\n\nReturns:\nAn op that runs the restoration.", "source": "github-repos"}
{"code": "def generate_defect_structure(self, supercell=(1, 1, 1)):\n        \n        defect_structure = self.bulk_structure.copy()\n        defect_structure.make_supercell(supercell)\n\n        \n        struct_for_defect_site = Structure( self.bulk_structure.copy().lattice,\n                                             [self.site.specie],\n                                             [self.site.frac_coords],\n                                             to_unit_cell=True)\n        struct_for_defect_site.make_supercell(supercell)\n        defect_site = struct_for_defect_site[0]\n\n        poss_deflist = sorted(\n            defect_structure.get_sites_in_sphere(defect_site.coords, 2, include_index=True), key=lambda x: x[1])\n        defindex = poss_deflist[0][2]\n        defect_structure.remove_sites([defindex])\n        defect_structure.set_charge(self.charge)\n        return defect_structure", "docstring": "Returns Defective Vacancy structure, decorated with charge\nArgs:\nsupercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix", "source": "juraj-google-style"}
{"code": "def adversary(self, name, **kwargs):\n    group_obj = Adversary(name, **kwargs)\n    return self._group(group_obj)", "docstring": "Add Adversary data to Batch object.\n\nArgs:\nname (str): The name for this Group.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nxid (str, kwargs): The external id for this Group.\n\nReturns:\nobj: An instance of Adversary.", "source": "codesearchnet"}
{"code": "def in_top_k(predictions, targets, k):\n    return nn.in_top_k(predictions, targets, k)", "docstring": "Returns whether the `targets` are in the top `k` `predictions`.\n\nArgs:\npredictions: A tensor of shape `(batch_size, classes)` and type `float32`.\ntargets: A 1D tensor of length `batch_size` and type `int32` or `int64`.\nk: An `int`, number of top elements to consider.\n\nReturns:\nA 1D tensor of length `batch_size` and type `bool`.\n`output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`\nvalues of `predictions[i]`.", "source": "github-repos"}
{"code": "def status(self, **kwargs):\n        \n        path = '/geo_nodes/%s/status' % self.get_id()\n        return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "Get the status of the geo node.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the server failed to perform the request\n\nReturns:\ndict: The status of the geo node", "source": "juraj-google-style"}
{"code": "def forward(self, inputs: torch.Tensor):\n    if 'batch' in self.norm_mlp.lower():\n        inputs_reshaped = torch.reshape(inputs, (inputs.shape[0] * inputs.shape[1], inputs.shape[2], inputs.shape[3]))\n        inputs_reshaped = self.norm(inputs_reshaped)\n        inputs = torch.reshape(inputs_reshaped, inputs.shape)\n    else:\n        inputs = self.norm(inputs)\n    return inputs", "docstring": "Args:\ninputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`):\nInput to the normalization layer.\nReturns:\n`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`", "source": "github-repos"}
{"code": "def get_passage(self, offset: int) -> BioCPassage or None:\n        \n        for passage in self.passages:\n            if passage.offset == offset:\n                return passage\n        return None", "docstring": "Gets passage\n\nArgs:\noffset: passage offset\n\nReturn:\nthe passage with specified offset", "source": "juraj-google-style"}
{"code": "def banner_print(msg, color='', width=60, file=sys.stdout, logger=_LOG):\n    if logger:\n        logger.debug(ANSI_ESC_RE.sub('', msg))\n    if CLI_QUIET:\n        return\n    lpad = (int(math.ceil((((width - _printed_len(msg)) - 2) / 2.0))) * '=')\n    rpad = (int(math.floor((((width - _printed_len(msg)) - 2) / 2.0))) * '=')\n    file.write('{sep}{color}{lpad} {msg} {rpad}{reset}{sep}{sep}'.format(sep=_linesep_for_file(file), color=color, lpad=lpad, msg=msg, rpad=rpad, reset=colorama.Style.RESET_ALL))\n    file.flush()", "docstring": "Print the message as a banner with a fixed width.\n\nAlso logs the message (un-bannered) to the given logger at the debug level.\n\nArgs:\nmsg: The message to print.\ncolor: Optional colorama color string to be applied to the message. You can\nconcatenate colorama color strings together in order to get any set of\neffects you want.\nwidth: Total width for the resulting banner.\nfile: A file object to which the banner text will be written. Intended for\nuse with CLI output file objects like sys.stdout.\nlogger: A logger to use, or None to disable logging.\n\nExample:\n\n>>> banner_print('Foo Bar Baz')\n\n======================== Foo Bar Baz =======================", "source": "codesearchnet"}
{"code": "def get_items_by_ids(self, item_ids, item_type=None):\n    urls = [urljoin(self.item_url, f'{i}.json') for i in item_ids]\n    result = self._run_async(urls=urls)\n    items = [Item(r) for r in result if r]\n    if item_type:\n        return [item for item in items if (item.item_type == item_type)]\n    else:\n        return items", "docstring": "Given a list of item ids, return all the Item objects\n\nArgs:\nitem_ids (obj): List of item IDs to query\nitem_type (str): (optional) Item type to filter results with\n\nReturns:\nList of `Item` objects for given item IDs and given item type", "source": "codesearchnet"}
{"code": "def remove(self, block_id):\n    with self._mutex:\n        entry = self._block_map[block_id]\n        self._queue.remove(entry)", "docstring": "Remove a Processing Block from the queue.\n\nArgs:\nblock_id (str):", "source": "codesearchnet"}
{"code": "def _start_services_on_ads(ads):\n    \n    running_ads = []\n    for ad in ads:\n        running_ads.append(ad)\n        start_logcat = not getattr(ad, KEY_SKIP_LOGCAT,\n                                   DEFAULT_VALUE_SKIP_LOGCAT)\n        try:\n            ad.services.register(\n                SERVICE_NAME_LOGCAT, logcat.Logcat, start_service=start_logcat)\n        except Exception:\n            is_required = getattr(ad, KEY_DEVICE_REQUIRED,\n                                  DEFAULT_VALUE_DEVICE_REQUIRED)\n            if is_required:\n                ad.log.exception('Failed to start some services, abort!')\n                destroy(running_ads)\n                raise\n            else:\n                ad.log.exception('Skipping this optional device because some '\n                                 'services failed to start.')", "docstring": "Starts long running services on multiple AndroidDevice objects.\n\nIf any one AndroidDevice object fails to start services, cleans up all\nexisting AndroidDevice objects and their services.\n\nArgs:\nads: A list of AndroidDevice objects whose services to start.", "source": "juraj-google-style"}
{"code": "def isconst(cls, val):\n        \n        return isinstance(val, string_types) and \\\n               ((len(val) == 7 and val[0] == \"", "docstring": "Whether the value is a string color literal.\n\nChecks for a well-formed hexadecimal color value or a named color.\n\nArgs:\nval (str) : the value to check\n\nReturns:\nTrue, if the value is a string color literal", "source": "juraj-google-style"}
{"code": "def get_location(self, locations=None):\n    countries = self.data.get('groups', None)\n    if (not countries):\n        return list()\n    return [Locations.get_location_from_HDX_code(x['name'], locations=locations, configuration=self.configuration) for x in countries]", "docstring": "Return the dataset's location\n\nArgs:\nlocations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX.\n\nReturns:\nList[str]: list of locations or [] if there are none", "source": "codesearchnet"}
{"code": "def _update_seek(self, offset, whence):\n        \n        with self._seek_lock:\n            if whence == SEEK_SET:\n                self._seek = offset\n            elif whence == SEEK_CUR:\n                self._seek += offset\n            elif whence == SEEK_END:\n                self._seek = offset + self._size\n            else:\n                raise ValueError('whence value %s unsupported' % whence)\n        return self._seek", "docstring": "Update seek value.\n\nArgs:\noffset (int): Offset.\nwhence (int): Whence.\n\nReturns:\nint: Seek position.", "source": "juraj-google-style"}
{"code": "def check_network_connection(server, port):\n    \n    logger = logging.getLogger(__name__)\n    logger.debug(\"Checking network connection to server '%s'...\", server)\n    try:\n        \n        \n        host = socket.gethostbyname(server)\n        \n        \n        sock = socket.create_connection((host, port), 2)\n        sock.close()\n    except Exception:  \n        logger.debug(\"Network connection not working\")\n        return False\n    logger.debug(\"Network connection working\")\n    return True", "docstring": "Checks if jasper can connect a network server.\nArguments:\nserver -- (optional) the server to connect with (Default:\n\"www.google.com\")\nReturns:\nTrue or False", "source": "juraj-google-style"}
{"code": "def Process(self, path):\n    path = re.sub(self.SYSTEMROOT_RE, '%systemroot%', path, count=1)\n    path = re.sub(self.SYSTEM32_RE, '%systemroot%\\\\\\\\system32', path, count=1)\n    matches_iter = self.WIN_ENVIRON_REGEX.finditer(path)\n    var_names = set((m.group(1).lower() for m in matches_iter))\n    results = [path]\n    for var_name in var_names:\n        try:\n            (var_regex, var_value) = self.vars_map[var_name]\n        except KeyError:\n            continue\n        if isinstance(var_value, string_types):\n            replacements = [var_value]\n        else:\n            replacements = var_value\n        processed_results = []\n        for result in results:\n            for repl in replacements:\n                processed_results.append(var_regex.sub((lambda _: repl), result))\n        results = processed_results\n    return results", "docstring": "Processes a given path.\n\nArgs:\npath: Path (as a string) to post-process.\n\nReturns:\nA list of paths with environment variables replaced with their\nvalues. If the mapping had a list of values for a particular variable,\ninstead of just one value, then all possible replacements will be\nreturned.", "source": "codesearchnet"}
{"code": "def getConParams(virtualhost):\n    return pika.ConnectionParameters(host=settings.RABBITMQ_HOST, port=int(settings.RABBITMQ_PORT), virtual_host=virtualhost, credentials=pika.PlainCredentials(settings.RABBITMQ_USER_NAME, settings.RABBITMQ_USER_PASSWORD))", "docstring": "Connection object builder.\n\nArgs:\nvirtualhost (str): selected virtualhost in rabbitmq\n\nReturns:\npika.ConnectionParameters: object filled by `constants` from\n:class:`edeposit.amqp.settings`.", "source": "codesearchnet"}
{"code": "def __init__(\n        self,\n        function_approximator,\n        batch_size=4,\n        map_size=(10, 10),\n        memory_num=4,\n        repeating_penalty=0.5,\n        enemy_num=2,\n        enemy_init_dist=5\n    ):\n        \n        self.__map_arr = self.__create_map(map_size)\n        self.__agent_pos = self.START_POS\n\n        self.__enemy_num = enemy_num\n        self.__enemy_pos_list = [None] * enemy_num\n        self.__enemy_init_dist = enemy_init_dist\n        self.__create_enemy(self.__map_arr)\n\n        self.__reward_list = []\n        self.__route_memory_list = []\n        self.__memory_num = memory_num\n        self.__repeating_penalty = repeating_penalty\n        \n        self.__batch_size = batch_size\n\n        super().__init__(function_approximator)\n        self.__inferencing_flag = False", "docstring": "Init.\n\nArgs:\nfunction_approximator:  is-a `FunctionApproximator`.\nmap_size:               Size of map.\nmemory_num:             The number of step of agent's memory.\nrepeating_penalty:      The value of penalty in the case that agent revisit.\nenemy_num:              The number of enemies.\nenemy_init_dist:        Minimum euclid distance of initial position of agent and enemies.", "source": "juraj-google-style"}
{"code": "def __init__(self, app_id=None):\n        \n        self.valid = Valid(app_id)\n        self.request = RequestBody()\n        self.response = ResponseBody()\n        self.logic = dict()\n        self.launch = self.register('LaunchRequest')\n        self.intent = self.register\n        self.session_ended = self.register('SessionEndedRequest')", "docstring": "Inits a Skill class with proxy request and response.\n\nArgs:\napp_id: str, default None. Skill application ID, declare\nto validate against application ID in the request.", "source": "juraj-google-style"}
{"code": "def __init__(self, func=None, *, animation_gen, step=.1):\n        \n        if not callable(func):\n            raise TypeError(\"argument 'func' for {!r} must be \"\n                            \"callable\".format(self.__class__.__name__))\n        self._raise_if_annotated(func)\n        self._func = func\n        self._animation_gen = animation_gen\n        self._step = step\n        functools.update_wrapper(self, func)", "docstring": "Constructor.\n\nArgs:\nfunc: If Animate is used without kwargs, then the\nfunction it decorates is passed in here. Otherwise, this is None.\nThis argument should NOT be given directly via keyword assignment.\nanimation_gen: A generator that yields strings for the animation.\nstep: Seconds between each animation frame.", "source": "juraj-google-style"}
{"code": "def make_prior(num_topics, initial_value):\n\n    def _softplus_inverse(x):\n        return np.log(np.expm1(x))\n    logit_concentration = tf.compat.v1.get_variable('logit_concentration', shape=[1, num_topics], initializer=tf.compat.v1.initializers.constant(_softplus_inverse(initial_value)))\n    concentration = _clip_dirichlet_parameters(tf.nn.softplus(logit_concentration))\n\n    def prior():\n        return tfd.Dirichlet(concentration=concentration, name='topics_prior')\n    prior_variables = [logit_concentration]\n    return (prior, prior_variables)", "docstring": "Create the prior distribution.\n\nArgs:\nnum_topics: Number of topics.\ninitial_value: The starting value for the prior parameters.\n\nReturns:\nprior: A `callable` that returns a `tf.distribution.Distribution`\ninstance, the prior distribution.\nprior_variables: A `list` of `Variable` objects, the trainable parameters\nof the prior.", "source": "codesearchnet"}
{"code": "def __init__(self, environ, base_paths=None):\n    \n    self.headers = util.get_headers_from_environ(environ)\n    self.http_method = environ['REQUEST_METHOD']\n    self.url_scheme = environ['wsgi.url_scheme']\n    self.server = environ['SERVER_NAME']\n    self.port = environ['SERVER_PORT']\n    self.path = environ['PATH_INFO']\n    self.request_uri = environ.get('REQUEST_URI')\n    if self.request_uri is not None and len(self.request_uri) < len(self.path):\n      self.request_uri = None\n    self.query = environ.get('QUERY_STRING')\n    self.body = environ['wsgi.input'].read()\n    if self.body and self.headers.get('CONTENT-ENCODING') == 'gzip':\n      \n      \n      \n      \n      \n      try:\n        self.body = zlib.decompress(self.body, 16 + zlib.MAX_WBITS)\n      except zlib.error:\n        pass\n    if _METHOD_OVERRIDE in self.headers:\n      \n      self.http_method = self.headers[_METHOD_OVERRIDE]\n      del self.headers[_METHOD_OVERRIDE]  \n    self.source_ip = environ.get('REMOTE_ADDR')\n    self.relative_url = self._reconstruct_relative_url(environ)\n\n    if not base_paths:\n      base_paths = set()\n    elif isinstance(base_paths, list):\n      base_paths = set(base_paths)\n\n    \n    for base_path in base_paths:\n      if self.path.startswith(base_path):\n        self.path = self.path[len(base_path):]\n        if self.request_uri is not None:\n          self.request_uri = self.request_uri[len(base_path):]\n        self.base_path = base_path\n        break\n    else:\n      raise ValueError('Invalid request path: %s' % self.path)\n\n    if self.query:\n      self.parameters = urlparse.parse_qs(self.query, keep_blank_values=True)\n    else:\n      self.parameters = {}\n    self.body_json = self._process_req_body(self.body) if self.body else {}\n    self.request_id = None\n\n    \n    \n    \n    \n    if isinstance(self.body_json, list):\n      if len(self.body_json) != 1:\n        _logger.warning('Batch requests with more than 1 element aren\\'t '\n                        'supported in devappserver2.  Only the first element '\n                        'will be handled.  Found %d elements.',\n                        len(self.body_json))\n      else:\n        _logger.info('Converting batch request to single request.')\n      self.body_json = self.body_json[0]\n      self.body = json.dumps(self.body_json)\n      self._is_batch = True\n    else:\n      self._is_batch = False", "docstring": "Constructor.\n\nArgs:\nenviron: An environ dict for the request as defined in PEP-333.\n\nRaises:\nValueError: If the path for the request is invalid.", "source": "juraj-google-style"}
{"code": "def tag(self, name, formatter=None):\n        \n        tag = Tag(name, formatter)\n        for tag_data in self._tags:\n            if tag_data.name == name:\n                tag = tag_data\n                break\n        else:\n            self._tags.append(tag)\n        return tag", "docstring": "Return instance of Tag.\n\nArgs:\nname (str): The value for this tag.\nformatter (method, optional): A method that take a tag value and returns a\nformatted tag.\n\nReturns:\nobj: An instance of Tag.", "source": "juraj-google-style"}
{"code": "def ticker(self, contract: Contract) -> Ticker:\n        \n        return self.wrapper.tickers.get(id(contract))", "docstring": "Get ticker of the given contract. It must have been requested before\nwith reqMktData with the same contract object. The ticker may not be\nready yet if called directly after :meth:`.reqMktData`.\n\nArgs:\ncontract: Contract to get ticker for.", "source": "juraj-google-style"}
{"code": "def owned_by(self, owner, also_check_group=False):\n    if also_check_group:\n        return ((self.owner == owner) and (self.group == owner))\n    else:\n        return (self.owner == owner)", "docstring": "Checks if the specified user or user and group own the file.\n\nArgs:\nowner (str): the user (or group) name for which we ask about ownership\nalso_check_group (bool): if set to True, both user owner and group owner checked\nif set to False, only user owner checked\n\nReturns:\nbool: True if owner of the file is the specified owner", "source": "codesearchnet"}
{"code": "def get_mock_ads(num):\n    ads = []\n    for i in range(num):\n        ad = mock.MagicMock(name='AndroidDevice', serial=str(i), h_port=None)\n        ad.skip_logcat = False\n        ads.append(ad)\n    return ads", "docstring": "Generates a list of mock AndroidDevice objects.\n\nThe serial number of each device will be integer 0 through num - 1.\n\nArgs:\nnum: An integer that is the number of mock AndroidDevice objects to\ncreate.", "source": "github-repos"}
{"code": "def _request(self, method, resource_uri, **kwargs):\n    data = kwargs.get('data')\n    response = method((self.API_BASE_URL + resource_uri), json=data, headers=self.headers)\n    response.raise_for_status()\n    return response.json()", "docstring": "Perform a method on a resource.\n\nArgs:\nmethod: requests.`method`\nresource_uri: resource endpoint\nRaises:\nHTTPError\nReturns:\nJSON Response", "source": "codesearchnet"}
{"code": "def release_docs_side_effect(content):\n    result = content.replace('{', '{{').replace('}', '}}')\n    result = result.replace('{{version}}', '{version}')\n    result = result.replace('{{circleci_build}}', '{circleci_build}')\n    result = result.replace('{{travis_build}}', '{travis_build}')\n    result = result.replace('{{appveyor_build}}', '{appveyor_build}')\n    result = result.replace('{{coveralls_build}}', '{coveralls_build}')\n    return result", "docstring": "Updates the template so that curly braces are escaped correctly.\n\nArgs:\ncontent (str): The template for ``docs/index.rst.release.template``.\n\nReturns:\nstr: The updated template with properly escaped curly braces.", "source": "codesearchnet"}
{"code": "def rep1(parser: Union[Parser, Sequence[Input]]) -> RepeatedOnceParser:\n    \n    if isinstance(parser, str):\n        parser = lit(parser)\n    return RepeatedOnceParser(parser)", "docstring": "Match a parser one or more times repeatedly.\n\nThis matches ``parser`` multiple times in a row. If it matches as least\nonce, it returns a list of values from each time ``parser`` matched. If it\ndoes not match ``parser`` at all, it fails.\n\nArgs:\nparser: Parser or literal", "source": "juraj-google-style"}
{"code": "def _from_row_partition(cls, values, row_partition, validate=True):\n    if not isinstance(row_partition, RowPartition):\n        raise TypeError(f'Argument `row_partition` must be a RowPartition. Received {row_partition}.')\n    if not isinstance(validate, bool):\n        raise TypeError(f'Argument `validate` must have type bool. Received {validate}.')\n    values, row_partition = cls._convert_values_and_partition(values, row_partition, 'partition')\n    if row_partition._has_precomputed_value_rowids():\n        value_rowids_shape = row_partition.value_rowids().shape\n        values.shape[:1].assert_is_compatible_with(value_rowids_shape)\n    if validate:\n        msg = 'Arguments to _from_row_partition do not form a valid RaggedTensor'\n        nvals = _nrows(values, row_partition.dtype)\n        checks = [check_ops.assert_equal(math_ops.cast(row_partition.nvals(), row_partition.dtype), nvals, message=msg)]\n        if not isinstance(values, RaggedTensor):\n            checks.append(check_ops.assert_rank_at_least(values, 1))\n        row_partition = row_partition._with_dependencies(checks)\n    return cls(values=values, internal=True, row_partition=row_partition)", "docstring": "Creates a `RaggedTensor` with a row partition.\n\nThis is used as a way for RaggedTensors to share row partitions.\n\nThe outer dimension of values must be equal to `partition.nvals()`.\n\nArgs:\nvalues: A potentially ragged tensor.\nrow_partition: a `RowPartition`: can be shared between tensors.\nvalidate: If true, then use assertions to check that the arguments form a\nvalid `RaggedTensor`.\n\nReturns:\nA `RaggedTensor`.  `result.rank = values.rank + 1`.\n`result.ragged_rank = values.ragged_rank + 1`.\n\nRaises:\nValueError: If partition.nvals() != _nrows(values)", "source": "github-repos"}
{"code": "def __init__(self, *, allow_partial: bool, accessor_writable: bool, sealed: bool, root_path: Optional[utils.KeyPath], init_super: bool=True):\n    self._set_raw_attr('_allow_partial', allow_partial)\n    self._set_raw_attr('_accessor_writable', accessor_writable)\n    self._set_raw_attr('_sealed', sealed)\n    self._set_raw_attr('_sym_parent', None)\n    self._set_raw_attr('_sym_path', root_path or utils.KeyPath())\n    self._set_raw_attr('_sym_puresymbolic', None)\n    self._set_raw_attr('_sym_missing_values', None)\n    self._set_raw_attr('_sym_nondefault_values', None)\n    origin = Origin(None, '__init__') if flags.is_tracking_origin() else None\n    self._set_raw_attr('_sym_origin', origin)\n    if init_super:\n        super().__init__()\n    else:\n        object.__init__(self)", "docstring": "Constructor.\n\nArgs:\nallow_partial: Whether to allow required fields to be MISSING_VALUE or\npartial.\naccessor_writable: Whether to allow write access via attributes. This flag\nis useful when we want to enforce update of fields using `rebind`\nmethod, which leads to better trackability and batched field update\nnotification.\nsealed: Whether object is sealed that cannot be changed. This flag is\nuseful when we don't want downstream to modify the object.\nroot_path: KeyPath of current object in its context (object tree).\ninit_super: If True, call super.__init__, otherwise short-circuit. This\nflag is useful when user want to explicitly implement `__init__` for\nmulti-inheritance, which is needed to pass different arguments to\ndifferent bases. Please see `symbolic_test.py#testMultiInheritance`\nfor more details.", "source": "github-repos"}
{"code": "def is_scalar(value):\n    \n    return np.isscalar(value) or (isinstance(value, np.ndarray) and (len(np.squeeze(value).shape) == 0))", "docstring": "Test if the given value is a scalar.\n\nThis function also works with memory mapped array values, in contrast to the numpy is_scalar method.\n\nArgs:\nvalue: the value to test for being a scalar value\n\nReturns:\nboolean: if the given value is a scalar or not", "source": "juraj-google-style"}
{"code": "def remove_option(self, section, name, value=None):\n    if self._is_live():\n        raise RuntimeError('Submitted units cannot update their options')\n    removed = 0\n    for option in list(self._data['options']):\n        if (option['section'] == section):\n            if (option['name'] == name):\n                if ((value is None) or (option['value'] == value)):\n                    self._data['options'].remove(option)\n                    removed += 1\n    if (removed > 0):\n        return True\n    return False", "docstring": "Remove an option from a unit\n\nArgs:\nsection (str): The section to remove from.\nname (str): The item to remove.\nvalue (str, optional): If specified, only the option matching this value will be removed\nIf not specified, all options with ``name`` in ``section`` will be removed\n\nReturns:\nTrue: At least one item was removed\nFalse: The item requested to remove was not found", "source": "codesearchnet"}
{"code": "def determine_alert(self, action_schedule, issue_creation_time, last_alert):\n        \n        issue_age = time.time() - issue_creation_time\n        alert_schedule_lookup = {pytimeparse.parse(action_time): action_time for action_time in action_schedule}\n        alert_schedule = sorted(alert_schedule_lookup.keys())\n        last_alert_time = pytimeparse.parse(last_alert)\n\n        for alert_time in alert_schedule:\n            if last_alert_time < alert_time <= issue_age and last_alert_time != alert_time:\n                return alert_schedule_lookup[alert_time]\n        else:\n            return None", "docstring": "Determine if we need to trigger an alert\n\nArgs:\naction_schedule (`list`): A list contains the alert schedule\nissue_creation_time (`int`): Time we create the issue\nlast_alert (`str`): Time we sent the last alert\n\nReturns:\n(`None` or `str`)\nNone if no alert should be sent. Otherwise return the alert we should send", "source": "juraj-google-style"}
{"code": "def update(self, identity, params=None, headers=None):\n    path = self._sub_url_params('/payments/:identity', {'identity': identity})\n    if (params is not None):\n        params = {self._envelope_key(): params}\n    response = self._perform_request('PUT', path, params, headers, retry_failures=True)\n    return self._resource_for(response)", "docstring": "Update a payment.\n\nUpdates a payment object. This accepts only the metadata parameter.\n\nArgs:\nidentity (string): Unique identifier, beginning with \"PM\".\nparams (dict, optional): Request body.\n\nReturns:\nListResponse of Payment instances", "source": "codesearchnet"}
{"code": "def parameter_attention(x, total_key_depth, total_value_depth, output_depth, memory_rows, num_heads, dropout_rate, name=None):\n    with tf.variable_scope(name, default_name='parameter_attention', values=[x]):\n        head_size_k = (total_key_depth \n        head_size_v = (total_value_depth \n        var_shape_k = [num_heads, memory_rows, head_size_k]\n        var_shape_v = [num_heads, memory_rows, head_size_v]\n        k = tf.get_variable('k', var_shape_k, initializer=tf.random_normal_initializer(0, ((output_depth ** (- 0.5)) * (num_heads ** 0.5))))\n        v = tf.get_variable('v', var_shape_v, initializer=tf.random_normal_initializer(0, ((output_depth ** (- 0.5)) * (output_depth ** 0.5))))\n        batch_size = common_layers.shape_list(x)[0]\n        length = common_layers.shape_list(x)[1]\n        q = common_layers.dense(x, total_key_depth, use_bias=False, name='q_transform')\n        if dropout_rate:\n            v = tf.nn.dropout(v, (1.0 - dropout_rate), noise_shape=[num_heads, memory_rows, 1])\n        q = tf.reshape(q, [batch_size, length, num_heads, head_size_k])\n        q = tf.transpose(q, [2, 0, 1, 3])\n        q = tf.reshape(q, [num_heads, (batch_size * length), head_size_k])\n        weights = tf.matmul(q, k, transpose_b=True)\n        weights = tf.nn.softmax(weights)\n        y = tf.matmul(weights, v)\n        y = tf.reshape(y, [num_heads, batch_size, length, head_size_v])\n        y = tf.transpose(y, [1, 2, 0, 3])\n        y = tf.reshape(y, [batch_size, length, total_value_depth])\n        y.set_shape([None, None, total_value_depth])\n        y = common_layers.dense(y, output_depth, use_bias=False, name='output_transform')\n        return y", "docstring": "Attention over parameters.\n\nWe use the same multi-headed attention as in the other layers, but the memory\nkeys and values are model parameters. There are no linear transformation on\nthe keys or values.\n\nWe are also a bit more careful about memory usage, since the number of\nmemory positions may be very large.\n\nArgs:\nx: a Tensor with shape [batch, length_q, channels]\ntotal_key_depth: an integer\ntotal_value_depth: an integer\noutput_depth: an integer\nmemory_rows: an integer\nnum_heads: an integer dividing total_key_depth and total_value_depth\ndropout_rate: a floating point number\nname: an optional string\n\nReturns:\nA Tensor with shape [batch, length_q, output_depth].", "source": "codesearchnet"}
{"code": "def fixings(self, date: types.DateTensor, fixing_type: curve_types.RateIndexCurve) -> Tuple[tf.Tensor, daycount_conventions.DayCountConventions]:\n    index_type = fixing_type.index.type.value\n    currency = fixing_type.currency.value\n    if isinstance(date, tf.Tensor):\n        date = dateslib.dates_from_tensor(date)\n    else:\n        date = dateslib.convert_to_date_tensor(date)\n    try:\n        curve_data = self._market_data_dict['rates'][currency][index_type]\n        fixing_dates = curve_data['fixing_dates']\n        fixing_rates = curve_data['fixing_rates']\n    except KeyError:\n        return (tf.zeros(tf.shape(date.ordinal()), dtype=self._dtype, name='fixings'), None)\n    if isinstance(fixing_dates, tf.Tensor):\n        fixing_dates = dateslib.dates_from_tensor(fixing_dates)\n    else:\n        fixing_dates = dateslib.convert_to_date_tensor(fixing_dates)\n    if 'fixing_daycount' not in curve_data:\n        raise ValueError(f'`fixing_daycount` should be specified for {index_type}.')\n    fixing_daycount = curve_data['fixing_daycount']\n    fixing_daycount = daycount_conventions.DayCountConventions(fixing_daycount)\n    fixing_rates = tf.convert_to_tensor(fixing_rates, dtype=self._dtype)\n    fixing_dates_ordinal = fixing_dates.ordinal()\n    date_ordinal = date.ordinal()\n    batch_shape = tf.shape(date_ordinal)[:-1]\n    fixing_dates_ordinal += tf.expand_dims(tf.zeros(batch_shape, dtype=tf.int32), axis=-1)\n    inds = tf.searchsorted(fixing_dates_ordinal, date_ordinal)\n    inds = tf.maximum(inds, 0)\n    inds = tf.minimum(inds, tf.shape(fixing_dates_ordinal)[-1] - 1)\n    return (tf.gather(fixing_rates, inds), fixing_daycount)", "docstring": "Returns past fixings of the market rates at the specified dates.\n\nThe fixings are represented asannualized simple rates. When fixings are not\nprovided for a curve, they are assumed to be zero for any date. Otherwise,\nit is assumed that the fixings are a left-continuous piecewise-constant\nof time with jumps being the supplied fixings.\n\nArgs:\ndate: The dates at which the fixings are computed. Should precede the\nvaluation date. When passed as an integet `Tensor`, should be of shape\n`batch_shape + [3]` and contain `[year, month, day]` for each date.\nfixing_type: Rate index curve type for which the fixings are computed.\n\nReturns:\nA `Tensor` of the same shape of `date` and of `self.dtype` dtype.\nRepresents fixings at the requested `date`.", "source": "github-repos"}
{"code": "def to_las3(self, use_descriptions=False, dlm=\",\", source=\"Striplog\"):\n        \n        data = self.to_csv(use_descriptions=use_descriptions,\n                           dlm=dlm,\n                           header=False)\n\n        return templates.section.format(name='Lithology',\n                                        short=\"LITH\",\n                                        source=source,\n                                        data=data)", "docstring": "Returns an LAS 3.0 section string.\n\nArgs:\nuse_descriptions (bool): Whether to use descriptions instead\nof summaries, if available.\ndlm (str): The delimiter.\nsource (str): The sourse of the data.\n\nReturns:\nstr: A string forming Lithology section of an LAS3 file.", "source": "juraj-google-style"}
{"code": "def next_state_fluent_ordering(self) -> List[str]:\n    key = (lambda x: x.name)\n    return [cpf.name for cpf in sorted(self.state_cpfs, key=key)]", "docstring": "The list of next state-fluent names in canonical order.\n\nReturns:\nList[str]: A list of fluent names.", "source": "codesearchnet"}
{"code": "def items_purchased(self, category=None):\n        \n        return self._items(commerce.Cart.STATUS_PAID, category=category)", "docstring": "Aggregates the items that this user has purchased.\n\nArguments:\ncategory (Optional[models.inventory.Category]): the category\nof items to restrict to.\n\nReturns:\n[ProductAndQuantity, ...]: A list of product-quantity pairs,\naggregating like products from across multiple invoices.", "source": "juraj-google-style"}
{"code": "def repository_tree(self, path='', ref='', recursive=False, **kwargs):\n    gl_path = ('/projects/%s/repository/tree' % self.get_id())\n    query_data = {'recursive': recursive}\n    if path:\n        query_data['path'] = path\n    if ref:\n        query_data['ref'] = ref\n    return self.manager.gitlab.http_list(gl_path, query_data=query_data, **kwargs)", "docstring": "Return a list of files in the repository.\n\nArgs:\npath (str): Path of the top folder (/ by default)\nref (str): Reference to a commit or branch\nrecursive (bool): Whether to get the tree recursively\nall (bool): If True, return all the items, without pagination\nper_page (int): Number of items to retrieve per request\npage (int): ID of the page to return (starts with page 1)\nas_list (bool): If set to False and no pagination option is\ndefined, return a generator instead of a list\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the server failed to perform the request\n\nReturns:\nlist: The representation of the tree", "source": "codesearchnet"}
{"code": "def _execute(self, command, data=None, unpack=True):\n        \n        if not data:\n            data = {}\n        if self.session_id is not None:\n            data.setdefault('session_id', self.session_id)\n        data = self._wrap_el(data)\n        res = self.remote_invoker.execute(command, data)\n        ret = WebDriverResult.from_object(res)\n        ret.raise_for_status()\n        ret.value = self._unwrap_el(ret.value)\n        if not unpack:\n            return ret\n        return ret.value", "docstring": "Private method to execute command.\n\nArgs:\ncommand(Command): The defined command.\ndata(dict): The uri variable and body.\nuppack(bool): If unpack value from result.\n\nReturns:\nThe unwrapped value field in the json response.", "source": "juraj-google-style"}
{"code": "def to_json(self, **kwargs):\n    from keras.src.saving import serialization_lib\n    model_config = serialization_lib.serialize_keras_object(self)\n    return json.dumps(model_config, **kwargs)", "docstring": "Returns a JSON string containing the network configuration.\n\nTo load a network from a JSON save file, use\n`keras.models.model_from_json(json_string, custom_objects={...})`.\n\nArgs:\n**kwargs: Additional keyword arguments to be passed to\n`json.dumps()`.\n\nReturns:\nA JSON string.", "source": "github-repos"}
{"code": "def search(self, query_term):\n    fedora_search_url = '/'.join([self.base_url, 'rest', 'fcr:search'])\n    fedora_search_url = '{}?{}'.format(fedora_search_url, urllib.parse.urlencode({'q': query_term}))\n    search_request = urllib.request.Request(fedora_search_url, method='GET')\n    search_request.add_header('Accept', 'text/turtle')\n    try:\n        search_response = urllib.request.urlopen(search_request)\n    except urllib.error.URLError as error:\n        raise error\n    fedora_results = rdflib.Graph().parse(data=search_response.read(), format='turtle')\n    return fedora_results", "docstring": "DEPRECIATED\nMethod takes a query term and searches Fedora Repository using SPARQL\nsearch endpoint and returns a RDF graph of the search results.\n\nArgs:\nquery_term(str): String to search repository\n\nReturns:\nrdflib.Graph()", "source": "codesearchnet"}
{"code": "def _select_in_voltage_range(self, min_voltage=None, max_voltage=None):\n    min_voltage = (min_voltage if (min_voltage is not None) else self.min_voltage)\n    max_voltage = (max_voltage if (max_voltage is not None) else self.max_voltage)\n    return list(filter((lambda p: (min_voltage <= p.voltage <= max_voltage)), self.voltage_pairs))", "docstring": "Selects VoltagePairs within a certain voltage range.\n\nArgs:\nmin_voltage (float): The minimum allowable voltage for a given\nstep.\nmax_voltage (float): The maximum allowable voltage allowable for a\ngiven step.\n\nReturns:\nA list of VoltagePair objects", "source": "codesearchnet"}
{"code": "def from_rfc3339(cls, stamp):\n        \n        with_nanos = _RFC3339_NANOS.match(stamp)\n        if with_nanos is None:\n            raise ValueError(\n                \"Timestamp: {}, does not match pattern: {}\".format(\n                    stamp, _RFC3339_NANOS.pattern\n                )\n            )\n        bare = datetime.datetime.strptime(\n            with_nanos.group(\"no_fraction\"), _RFC3339_NO_FRACTION\n        )\n        fraction = with_nanos.group(\"nanos\")\n        if fraction is None:\n            nanos = 0\n        else:\n            scale = 9 - len(fraction)\n            nanos = int(fraction) * (10 ** scale)\n        return cls(\n            bare.year,\n            bare.month,\n            bare.day,\n            bare.hour,\n            bare.minute,\n            bare.second,\n            nanosecond=nanos,\n            tzinfo=pytz.UTC,\n        )", "docstring": "Parse RFC 3339-compliant timestamp, preserving nanoseconds.\n\nArgs:\nstamp (str): RFC 3339 stamp, with up to nanosecond precision\n\nReturns:\n:class:`DatetimeWithNanoseconds`:\nan instance matching the timestamp string\n\nRaises:\nValueError: if `stamp` does not match the expected format", "source": "juraj-google-style"}
{"code": "def tf2():\n    if tf.__version__.startswith('2.'):\n        return tf\n    elif (hasattr(tf, 'compat') and hasattr(tf.compat, 'v2')):\n        return tf.compat.v2\n    raise ImportError('cannot import tensorflow 2.0 API')", "docstring": "Provide the root module of a TF-2.0 API for use within TensorBoard.\n\nReturns:\nThe root module of a TF-2.0 API, if available.\n\nRaises:\nImportError: if a TF-2.0 API is not available.", "source": "codesearchnet"}
{"code": "def _starts_with_drive_letter(self, file_path):\n    colon = self._matching_string(file_path, ':')\n    return (self.is_windows_fs and (len(file_path) >= 2) and file_path[:1].isalpha and (file_path[1:2] == colon))", "docstring": "Return True if file_path starts with a drive letter.\n\nArgs:\nfile_path: the full path to be examined.\n\nReturns:\n`True` if drive letter support is enabled in the filesystem and\nthe path starts with a drive letter.", "source": "codesearchnet"}
{"code": "def add_timeline_to_sketch(self, sketch_id, index_id):\n    resource_url = '{0:s}/sketches/{1:d}/timelines/'.format(self.api_base_url, sketch_id)\n    form_data = {'timeline': [index_id]}\n    self.session.post(resource_url, json=form_data)", "docstring": "Associate the specified timeline and sketch.\n\nArgs:\nsketch_id (int): ID of sketch\nindex_id (int): ID of timeline to add to sketch", "source": "codesearchnet"}
{"code": "def get_most_severe_consequence(transcripts):\n    most_severe_consequence = None\n    most_severe_score = None\n    for transcript in transcripts:\n        for consequence in transcript['consequence'].split('&'):\n            logger.debug('Checking severity score for consequence: {0}'.format(consequence))\n            severity_score = SEVERITY_DICT.get(consequence)\n            logger.debug('Severity score found: {0}'.format(severity_score))\n            if (severity_score != None):\n                if most_severe_score:\n                    if (severity_score < most_severe_score):\n                        most_severe_consequence = consequence\n                        most_severe_score = severity_score\n                else:\n                    most_severe_consequence = consequence\n                    most_severe_score = severity_score\n    return most_severe_consequence", "docstring": "Get the most severe consequence\n\nGo through all transcripts and get the most severe consequence\n\nArgs:\ntranscripts (list): A list of transcripts to evaluate\n\nReturns:\nmost_severe_consequence (str): The most severe consequence", "source": "codesearchnet"}
{"code": "def to_numpy_array(self, image, rescale=None, channel_first=True):\n    self._ensure_format_supported(image)\n    if isinstance(image, PIL.Image.Image):\n        image = np.array(image)\n    if is_torch_tensor(image):\n        image = image.numpy()\n    rescale = isinstance(image.flat[0], np.integer) if rescale is None else rescale\n    if rescale:\n        image = self.rescale(image.astype(np.float32), 1 / 255.0)\n    if channel_first and image.ndim == 3:\n        image = image.transpose(2, 0, 1)\n    return image", "docstring": "Converts `image` to a numpy array. Optionally rescales it and puts the channel dimension as the first\ndimension.\n\nArgs:\nimage (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):\nThe image to convert to a NumPy array.\nrescale (`bool`, *optional*):\nWhether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Will\ndefault to `True` if the image is a PIL Image or an array/tensor of integers, `False` otherwise.\nchannel_first (`bool`, *optional*, defaults to `True`):\nWhether or not to permute the dimensions of the image to put the channel dimension first.", "source": "github-repos"}
{"code": "def SetCodepage(self, codepage):\n    \n    try:\n      codecs.getencoder(codepage)\n      self._codepage = codepage\n    except LookupError:\n      raise ValueError('Unsupported codepage: {0:s}'.format(codepage))", "docstring": "Sets the codepage.\n\nArgs:\ncodepage (str): codepage.\n\nRaises:\nValueError: if the codepage is not supported.", "source": "juraj-google-style"}
{"code": "def parse_page(raw_page):\n    ret = {'title': get_title(raw_page), 'id': get_id(raw_page)}\n    if (':' in ret['title']):\n        return None\n    ret['revisions'] = get_revisions(raw_page)\n    return ret", "docstring": "Create a dictionary with title, id, and list of revisions.\n\nThe dictionary contains:\n\"title\": a string\n\"id\": an integer\n\"revisions\": a list of strings\n\nArgs:\nraw_page: a string\n\nReturns:\na dictionary, or None in the case of an error.", "source": "codesearchnet"}
{"code": "def on_created(self, event):\n        \n        self._logger.debug('Detected create event on watched path: %s', event.src_path)\n\n        self._process_event(event)", "docstring": "Function called everytime a new file is created.\n\nArgs:\nevent: Event to process.", "source": "juraj-google-style"}
{"code": "def getlibversion():\n    \n\n    status, major_v, minor_v, release, info = _C.Hgetlibversion()\n    _checkErr('getlibversion', status, \"cannot get lib version\")\n    return major_v, minor_v, release, info", "docstring": "Get the library version info.\n\nArgs:\nno argument\nReturns:\n4-element tuple with the following components:\n-major version number (int)\n-minor version number (int)\n-complete library version number (int)\n-additional information (string)\n\nC library equivalent : Hgetlibversion", "source": "juraj-google-style"}
{"code": "def _get_batches_of_transformed_samples(self, index_array):\n    batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=self.dtype)\n    filepaths = self.filepaths\n    for i, j in enumerate(index_array):\n        img = image_utils.load_img(filepaths[j], color_mode=self.color_mode, target_size=self.target_size, interpolation=self.interpolation, keep_aspect_ratio=self.keep_aspect_ratio)\n        x = image_utils.img_to_array(img, data_format=self.data_format)\n        if hasattr(img, 'close'):\n            img.close()\n        if self.image_data_generator:\n            params = self.image_data_generator.get_random_transform(x.shape)\n            x = self.image_data_generator.apply_transform(x, params)\n            x = self.image_data_generator.standardize(x)\n        batch_x[i] = x\n    if self.save_to_dir:\n        for i, j in enumerate(index_array):\n            img = image_utils.array_to_img(batch_x[i], self.data_format, scale=True)\n            fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix, index=j, hash=np.random.randint(10000000.0), format=self.save_format)\n            img.save(os.path.join(self.save_to_dir, fname))\n    if self.class_mode == 'input':\n        batch_y = batch_x.copy()\n    elif self.class_mode in {'binary', 'sparse'}:\n        batch_y = np.empty(len(batch_x), dtype=self.dtype)\n        for i, n_observation in enumerate(index_array):\n            batch_y[i] = self.classes[n_observation]\n    elif self.class_mode == 'categorical':\n        batch_y = np.zeros((len(batch_x), len(self.class_indices)), dtype=self.dtype)\n        for i, n_observation in enumerate(index_array):\n            batch_y[i, self.classes[n_observation]] = 1.0\n    elif self.class_mode == 'multi_output':\n        batch_y = [output[index_array] for output in self.labels]\n    elif self.class_mode == 'raw':\n        batch_y = self.labels[index_array]\n    else:\n        return batch_x\n    if self.sample_weight is None:\n        return (batch_x, batch_y)\n    else:\n        return (batch_x, batch_y, self.sample_weight[index_array])", "docstring": "Gets a batch of transformed samples.\n\nArgs:\nindex_array: Array of sample indices to include in batch.\nReturns:\nA batch of transformed samples.", "source": "github-repos"}
{"code": "def logaddexp(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return Logaddexp().symbolic_call(x1, x2)\n    return backend.numpy.logaddexp(x1, x2)", "docstring": "Logarithm of the sum of exponentiations of the inputs.\n\nCalculates `log(exp(x1) + exp(x2))`.\n\nArgs:\nx1: Input tensor.\nx2: Input tensor.\n\nReturns:\nOutput tensor, element-wise logarithm of the sum of exponentiations\nof the inputs.", "source": "github-repos"}
{"code": "def __setattr__(self, __key: Hashable, __value: Any) -> None:\n        \n        try:\n            self[__key] = __value\n        except Exception as err:\n            raise AttributeError(str(err))", "docstring": "Support item assignment via dot notation.\n\nArgs:\n__key: Key to set value for\n__value: Value to set key to", "source": "juraj-google-style"}
{"code": "def start_queue_runners(self, sess, queue_runners=None):\n    if context.executing_eagerly():\n        raise RuntimeError('Queues are not compatible with eager execution.')\n    if queue_runners is None:\n        queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)\n    threads = []\n    for qr in queue_runners:\n        threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True, start=True))\n    return threads", "docstring": "Start threads for `QueueRunners`.\n\nNote that the queue runners collected in the graph key `QUEUE_RUNNERS`\nare already started automatically when you create a session with the\nsupervisor, so unless you have non-collected queue runners to start\nyou do not need to call this explicitly.\n\nArgs:\nsess: A `Session`.\nqueue_runners: A list of `QueueRunners`. If not specified, we'll use the\nlist of queue runners gathered in the graph under the key\n`GraphKeys.QUEUE_RUNNERS`.\n\nReturns:\nThe list of threads started for the `QueueRunners`.\n\nRaises:\nRuntimeError: If called with eager execution enabled.\n\n@compatibility(eager)\nQueues are not compatible with eager execution. To ingest data when eager\nexecution is enabled, use the `tf.data` API.\n@end_compatibility", "source": "github-repos"}
{"code": "def __init__(self, api_key):\n        \n        self.session = Session()\n        self.session.auth = HTTPBasicAuth(api_key, 'NoPassBecauseKey!')\n        self._load_apis()", "docstring": "Initialize a new HelpScout client.\n\nArgs:\napi_key (str): The API key to use for this session.", "source": "juraj-google-style"}
{"code": "def _strip_leading_zeros(coeffs, threshold=_COEFFICIENT_THRESHOLD):\n    while (np.abs(coeffs[(- 1)]) < threshold):\n        coeffs = coeffs[:(- 1)]\n    return coeffs", "docstring": "r\"\"\"Strip leading zero coefficients from a polynomial.\n\n.. note::\n\nThis assumes the polynomial :math:`f` defined by ``coeffs``\nhas been normalized (via :func:`.normalize_polynomial`).\n\nArgs:\ncoeffs (numpy.ndarray): ``d + 1``-array of coefficients in monomial /\npower basis.\nthreshold (Optional[float]): The point :math:`\\tau` below which a\na coefficient will be considered to be numerically zero.\n\nReturns:\nnumpy.ndarray: The same coefficients without any unnecessary zero\nterms.", "source": "codesearchnet"}
{"code": "def __init__(self, learning_rate=0.001, rho=0.95, epsilon=1e-08, use_locking=False, name='Adadelta'):\n    super(AdadeltaOptimizer, self).__init__(use_locking, name)\n    self._lr = learning_rate\n    self._rho = rho\n    self._epsilon = epsilon\n    self._lr_t = None\n    self._rho_t = None\n    self._epsilon_t = None", "docstring": "Construct a new Adadelta optimizer.\n\nArgs:\nlearning_rate: A `Tensor` or a floating point value. The learning rate.\nTo match the exact form in the original paper use 1.0.\nrho: A `Tensor` or a floating point value. The decay rate.\nepsilon: A `Tensor` or a floating point value.  A constant epsilon used\nto better conditioning the grad update.\nuse_locking: If `True` use locks for update operations.\nname: Optional name prefix for the operations created when applying\ngradients.  Defaults to \"Adadelta\".", "source": "github-repos"}
{"code": "def __init__(self, channel):\n        \n        self.CreateTopic = channel.unary_unary(\n            \"/google.pubsub.v1.Publisher/CreateTopic\",\n            request_serializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.Topic.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.Topic.FromString,\n        )\n        self.UpdateTopic = channel.unary_unary(\n            \"/google.pubsub.v1.Publisher/UpdateTopic\",\n            request_serializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.UpdateTopicRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.Topic.FromString,\n        )\n        self.Publish = channel.unary_unary(\n            \"/google.pubsub.v1.Publisher/Publish\",\n            request_serializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.PublishRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.PublishResponse.FromString,\n        )\n        self.GetTopic = channel.unary_unary(\n            \"/google.pubsub.v1.Publisher/GetTopic\",\n            request_serializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.GetTopicRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.Topic.FromString,\n        )\n        self.ListTopics = channel.unary_unary(\n            \"/google.pubsub.v1.Publisher/ListTopics\",\n            request_serializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.ListTopicsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.ListTopicsResponse.FromString,\n        )\n        self.ListTopicSubscriptions = channel.unary_unary(\n            \"/google.pubsub.v1.Publisher/ListTopicSubscriptions\",\n            request_serializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.ListTopicSubscriptionsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.ListTopicSubscriptionsResponse.FromString,\n        )\n        self.ListTopicSnapshots = channel.unary_unary(\n            \"/google.pubsub.v1.Publisher/ListTopicSnapshots\",\n            request_serializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.ListTopicSnapshotsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.ListTopicSnapshotsResponse.FromString,\n        )\n        self.DeleteTopic = channel.unary_unary(\n            \"/google.pubsub.v1.Publisher/DeleteTopic\",\n            request_serializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.DeleteTopicRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def load_recipe(self, recipe):\n    self.recipe = recipe\n    for module_description in recipe['modules']:\n        module_name = module_description['name']\n        module = self.config.get_module(module_name)(self)\n        self._module_pool[module_name] = module", "docstring": "Populates the internal module pool with modules declared in a recipe.\n\nArgs:\nrecipe: Dict, recipe declaring modules to load.", "source": "codesearchnet"}
{"code": "def _list_like_func(self, func, axis, *args, **kwargs):\n    func_prepared = self._prepare_method((lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs))))\n    new_data = self._map_across_full_axis(axis, func_prepared)\n    new_index = ([(f if isinstance(f, string_types) else f.__name__) for f in func] if (axis == 0) else self.index)\n    new_columns = ([(f if isinstance(f, string_types) else f.__name__) for f in func] if (axis == 1) else self.columns)\n    return self.__constructor__(new_data, new_index, new_columns)", "docstring": "Apply list-like function across given axis.\n\nArgs:\nfunc: The function to apply.\naxis: Target axis to apply the function along.\n\nReturns:\nA new PandasQueryCompiler.", "source": "codesearchnet"}
{"code": "def weighting_function(max_num_bins: int, up: torch.Tensor, reg_scale: int) -> torch.Tensor:\n    upper_bound1 = abs(up[0]) * abs(reg_scale)\n    upper_bound2 = abs(up[0]) * abs(reg_scale) * 2\n    step = (upper_bound1 + 1) ** (2 / (max_num_bins - 2))\n    left_values = [-step ** i + 1 for i in range(max_num_bins \n    right_values = [step ** i - 1 for i in range(1, max_num_bins \n    values = [-upper_bound2] + left_values + [torch.zeros_like(up[0][None])] + right_values + [upper_bound2]\n    values = torch.cat(values, 0)\n    return values", "docstring": "Generates the non-uniform Weighting Function W(n) for bounding box regression.\n\nArgs:\nmax_num_bins (int): Max number of the discrete bins.\nup (Tensor): Controls upper bounds of the sequence,\nwhere maximum offset is ±up * H / W.\nreg_scale (float): Controls the curvature of the Weighting Function.\nLarger values result in flatter weights near the central axis W(max_num_bins/2)=0\nand steeper weights at both ends.\nReturns:\nTensor: Sequence of Weighting Function.", "source": "github-repos"}
{"code": "def add_to_loader(loader_cls: Type, classes: List[Type]) -> None:\n    \n    if not isinstance(classes, list):\n        classes = [classes]  \n\n    for class_ in classes:\n        tag = '!{}'.format(class_.__name__)\n        if issubclass(class_, enum.Enum):\n            loader_cls.add_constructor(tag, EnumConstructor(class_))\n        elif issubclass(class_, str) or issubclass(class_, UserString):\n            loader_cls.add_constructor(tag, UserStringConstructor(class_))\n        else:\n            loader_cls.add_constructor(tag, Constructor(class_))\n\n        if not hasattr(loader_cls, '_registered_classes'):\n            loader_cls._registered_classes = dict()\n        loader_cls._registered_classes[tag] = class_", "docstring": "Registers one or more classes with a YAtiML loader.\n\nOnce a class has been registered, it can be recognized and \\\nconstructed when reading a YAML text.\n\nArgs:\nloader_cls: The loader to register the classes with.\nclasses: The class(es) to register, a plain Python class or a \\\nlist of them.", "source": "juraj-google-style"}
{"code": "def seconds(value: Union[int, float]) -> Duration:\n    return float(value)", "docstring": "Converts input value from seconds to a `Duration` in seconds.\n\nSince the `Duration` object is equivalent to a `float` value in seconds,\nthis method does nothing else than casting the input to `float`. It may be\nused in order to make the code more explicit.\n\nExplicit time units:\n```python\n>>> duration = tp.duration.seconds(3)\n>>> duration\n3.0\n\n>>> # Usage in a window operation\n>>> a = tp.event_set(\n...     timestamps=[1, 2, 6],\n...     features={\"f1\": [1, 5, -5]},\n... )\n>>> a.moving_sum(window_length=duration)\nindexes: ...\ntimestamps: [1. 2. 6.]\n'f1': [ 1 6 -5]\n...\n\n```\n\nArgs:\nvalue: Number of seconds.\n\nReturns:\nSame number of seconds.", "source": "github-repos"}
{"code": "def deprecate_moved_module(deprecated_name, new_module, deletion_version):\n\n    def getter(name):\n        if getter not in _PRINTED_WARNING and _PRINT_DEPRECATION_WARNINGS:\n            _PRINTED_WARNING[getter] = True\n            _log_deprecation('Please fix your imports. Module %s has been moved to %s. The old module will be deleted in version %s.', deprecated_name, new_module.__name__, deletion_version)\n        return getattr(new_module, name)\n    return getter", "docstring": "Logs a warning when a module that has been moved to a new location is used.\n\nCopy the following code into the old module:\n\n```\nimport deprecation\nimport new_module\n\n__getattr__ = deprecation.deprecate_moved_module(\n__name__, new_module, \"2.9\")  # adjust version number.\n```\n\nArgs:\ndeprecated_name: Name of old module.\nnew_module: Module to replace the old module.\ndeletion_version: Version of TensorFlow in which the old module will be\nremoved.\n\nReturns:\nA function that logs a warning and returns the symbol from the new module.\nSet this function as the module's `__getattr__`.", "source": "github-repos"}
{"code": "def pretty_print_fhir_to_json_string(fhir_proto: message.Message, *, indent_size: int=2) -> str:\n    printer = _json_printer.JsonPrinter.pretty_printer(_PRIMITIVE_HANDLER, indent_size=indent_size)\n    return printer.print(fhir_proto)", "docstring": "Returns a FHIR JSON representation with spaces and newlines.\n\nArgs:\nfhir_proto: The proto to serialize into a \"pretty\" JSON string.\nindent_size: An integer denoting the size of space indentation for lexical\nscoping. Defaults to 2.\n\nReturns:\nA FHIR JSON string representation with spaces and newlines.", "source": "github-repos"}
{"code": "def _create_delegate_handler(delegate):\n\n    @coroutine\n    def handler(*args):\n        (yield)\n        (yield delegate.send(Transition(args, delegate)))\n    return handler", "docstring": "Creates a handler function that creates a co-routine that can yield once with the given\npositional arguments to the delegate as a transition.\n\nArgs:\ndelegate (Coroutine): The co-routine to delegate to.\n\nReturns:\nA :class:`callable` handler that returns a co-routine that ignores the data it receives\nand sends with the arguments given to the handler as a :class:`Transition`.", "source": "codesearchnet"}
{"code": "def maybe_get_static_value(x, dtype=None):\n  \n  if x is None:\n    return x\n  try:\n    \n    x_ = tf.get_static_value(x)\n  except TypeError:\n    x_ = x\n  if x_ is None or dtype is None:\n    return x_\n  return np.array(x_, dtype)", "docstring": "Helper which tries to return a static value.\n\nGiven `x`, extract it's value statically, optionally casting to a specific\ndtype. If this is not possible, None is returned.\n\nArgs:\nx: `Tensor` for which to extract a value statically.\ndtype: Optional dtype to cast to.\n\nReturns:\nStatically inferred value if possible, otherwise None.", "source": "juraj-google-style"}
{"code": "def __init__(self, columns: list[str], min_value: float=0.0, max_value: float=1.0, name: Optional[str]=None):\n    super().__init__(columns)\n    self.min_value = min_value\n    self.max_value = max_value\n    self.name = name\n    if self.max_value <= self.min_value:\n        raise ValueError('max_value must be greater than min_value')", "docstring": "This function applies a scaling transformation on the given columns\nof incoming data. The transformation scales the input values to the\nrange [min_value, max_value].\n\nArgs:\ncolumns: A list of column names to apply the transformation on.\nmin_value: The minimum value of the output range.\nmax_value: The maximum value of the output range.\nname: A name for the operation (optional).", "source": "github-repos"}
{"code": "def SaveTransaction(self, tx):\n    coins = self.GetCoins()\n    changed = []\n    added = []\n    deleted = []\n    found_coin = False\n    for input in tx.inputs:\n        coin = None\n        for coinref in coins:\n            test_coin = coinref.Reference\n            if (test_coin == input):\n                coin = coinref\n        if (coin is None):\n            return False\n        if ((coin.State & CoinState.Spent) > 0):\n            return False\n        elif ((coin.State & CoinState.Confirmed) == 0):\n            return False\n        coin.State |= CoinState.Spent\n        coin.State &= (~ CoinState.Confirmed)\n        changed.append(coin)\n    for (index, output) in enumerate(tx.outputs):\n        state = self.CheckAddressState(output.ScriptHash)\n        key = CoinReference(tx.Hash, index)\n        if ((state & AddressState.InWallet) > 0):\n            newcoin = Coin.CoinFromRef(coin_ref=key, tx_output=output, state=CoinState.Unconfirmed)\n            self._coins[key] = newcoin\n            if ((state & AddressState.WatchOnly) > 0):\n                newcoin.State |= CoinState.WatchOnly\n            added.append(newcoin)\n    if isinstance(tx, ClaimTransaction):\n        for claim in tx.Claims:\n            claim_coin = self._coins[claim]\n            claim_coin.State |= CoinState.Claimed\n            claim_coin.State &= (~ CoinState.Confirmed)\n            changed.append(claim_coin)\n    self.OnSaveTransaction(tx, added, changed, deleted)\n    return True", "docstring": "This method is used to after a transaction has been made by this wallet.  It updates the states of the coins\nIn the wallet to reflect the new balance, but the coins remain in a ``CoinState.UNCONFIRMED`` state until\nThe transaction has been processed by the network.\n\nThe results of these updates can be used by overriding the ``OnSaveTransaction`` method, and, for example\npersisting the results to a database.\n\nArgs:\ntx (Transaction): The transaction that has been made by this wallet.\n\nReturns:\nbool: True is successfully processes, otherwise False if input is not in the coin list, already spent or not confirmed.", "source": "codesearchnet"}
{"code": "def __init__(self, metadata=0, metadata_mask=0):\n        \n        super().__init__(InstructionType.OFPIT_WRITE_METADATA)\n        self.metadata = metadata\n        self.metadata_mask = metadata_mask", "docstring": "Create InstructionWriteMetadata with the optional parameters below.\n\nArgs:\nmetadata (int): Metadata value to write.\nmetadata_mask (int): Metadata write bitmask.", "source": "juraj-google-style"}
{"code": "def run_profilers(run_object, prof_config, verbose=False):\n    if (len(prof_config) > len(set(prof_config))):\n        raise AmbiguousConfigurationError(('Profiler configuration %s is ambiguous' % prof_config))\n    available_profilers = {opt for (opt, _) in _PROFILERS}\n    for option in prof_config:\n        if (option not in available_profilers):\n            raise BadOptionError(('Unknown option: %s' % option))\n    run_stats = OrderedDict()\n    present_profilers = ((o, p) for (o, p) in _PROFILERS if (o in prof_config))\n    for (option, prof) in present_profilers:\n        curr_profiler = prof(run_object)\n        if verbose:\n            print(('Running %s...' % curr_profiler.__class__.__name__))\n        run_stats[option] = curr_profiler.run()\n    return run_stats", "docstring": "Runs profilers on run_object.\n\nArgs:\nrun_object: An object (string or tuple) for profiling.\nprof_config: A string with profilers configuration.\nverbose: True if info about running profilers should be shown.\nReturns:\nAn ordered dictionary with collected stats.\nRaises:\nAmbiguousConfigurationError: when prof_config is ambiguous.\nBadOptionError: when unknown options are present in configuration.", "source": "codesearchnet"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    if token_ids_1 is None:\n        return len(token_ids_0 + sep) * [0]\n    return len(token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLMProphetNet\ndoes not make use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def broadcast_structure(to_structure, from_structure):\n    from_parts = tf.nest.flatten(from_structure)\n    if (len(from_parts) == 1):\n        from_structure = tf.nest.map_structure((lambda _: from_parts[0]), to_structure)\n    return from_structure", "docstring": "Broadcasts `from_structure` to `to_structure`.\n\nThis is useful for downstream usage of `zip` or `tf.nest.map_structure`.\n\nIf `from_structure` is a singleton, it is tiled to match the structure of\n`to_structure`. Note that the elements in `from_structure` are not copied if\nthis tiling occurs.\n\nArgs:\nto_structure: A structure.\nfrom_structure: A structure.\n\nReturns:\nnew_from_structure: Same structure as `to_structure`.\n\n#### Example:\n\n```python\na_structure = ['a', 'b', 'c']\nb_structure = broadcast_structure(a_structure, 'd')\n# -> ['d', 'd', 'd']\nc_structure = tf.nest.map_structure(\nlambda a, b: a + b, a_structure, b_structure)\n# -> ['ad', 'bd', 'cd']\n```", "source": "codesearchnet"}
{"code": "def __init__(self, item_cls, data):\n        \n        super(ResourceList, self).__init__()\n        if data is not None:\n\n            data = json.loads(data) if type(data) is not dict else data\n            paging = data['list_info']\n            raw_items = data.get(self.items_keys[item_cls.__name__])\n\n            if raw_items:\n\n                \n                for raw_item in raw_items:\n                    self.append(item_cls(raw_item))\n\n                \n                self.page = paging['page']\n                self.num_pages = paging['num_pages']\n                self.num_results = paging['num_results']\n                self.page_size = paging['page_size']", "docstring": "Initialization of the list\n\nArgs:\nitem_cls (str): Object class matching the list items\ndata (str or dict): A dictionary or raw JSON string that is returned by a request.", "source": "juraj-google-style"}
{"code": "def __init__(self, resolver_context, file_object=None):\n    \n    if file_object:\n      raise ValueError('File object value set.')\n\n    super(RawFile, self).__init__(resolver_context)\n    self._file_objects = []", "docstring": "Initializes a file-like object.\n\nArgs:\nresolver_context (Context): resolver context.\nfile_object (Optional[FileIO]): file-like object.\n\nRaises:\nValueError: when file_object is set.", "source": "juraj-google-style"}
{"code": "def write_bit(self, registeraddress, value, functioncode=5):\n        \n        _checkFunctioncode(functioncode, [5, 15])\n        _checkInt(value, minvalue=0, maxvalue=1, description='input value')\n        self._genericCommand(functioncode, registeraddress, value)", "docstring": "Write one bit to the slave.\n\nArgs:\n* registeraddress (int): The slave register address (use decimal numbers, not hex).\n* value (int): 0 or 1\n* functioncode (int): Modbus function code. Can be 5 or 15.\n\nReturns:\nNone\n\nRaises:\nValueError, TypeError, IOError", "source": "juraj-google-style"}
{"code": "def logical_or(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return LogicalOr().symbolic_call(x1, x2)\n    return backend.numpy.logical_or(x1, x2)", "docstring": "Computes the element-wise logical OR of the given input tensors.\n\nZeros are treated as `False` and non-zeros are treated as `True`.\n\nArgs:\nx1: Input tensor.\nx2: Input tensor.\n\nReturns:\nOutput tensor, element-wise logical OR of the inputs.", "source": "github-repos"}
{"code": "def _call_api(self, verb, url, **request_kwargs):\n        \n        api = 'https:\n        auth_headers = {'Authorization': 'token {}'.format(self.api_token)}\n        headers = {**auth_headers, **request_kwargs.pop('headers', {})}\n        return getattr(requests, verb)(api, headers=headers, **request_kwargs)", "docstring": "Perform a github API call\n\nArgs:\nverb (str): Can be \"post\", \"put\", or \"get\"\nurl (str): The base URL with a leading slash for Github API (v3)\nauth (str or HTTPBasicAuth): A Github API token or a HTTPBasicAuth object", "source": "juraj-google-style"}
{"code": "def _build_insert_compiler(self, rows: List[Dict]):\n        \n\n        \n        \n        \n        \n        \n        objs = []\n        field_count = len(rows[0])\n        for index, row in enumerate(rows):\n            if field_count != len(row):\n                raise SuspiciousOperation((\n                    'In bulk upserts, you cannot have rows with different field '\n                    'configurations. Row {0} has a different field config than '\n                    'the first row.'\n                ).format(index))\n\n            objs.append(self.model(**row))\n\n        \n        self._for_write = True\n\n        \n        insert_fields, update_fields = self._get_upsert_fields(rows[0])\n\n        \n        query = PostgresInsertQuery(self.model)\n        query.conflict_action = self.conflict_action\n        query.conflict_target = self.conflict_target\n        query.index_predicate = self.index_predicate\n        query.values(objs, insert_fields, update_fields)\n\n        \n        \n        connection = django.db.connections[self.db]\n        compiler = PostgresInsertCompiler(query, connection, self.db)\n\n        return compiler", "docstring": "Builds the SQL compiler for a insert query.\n\nArguments:\nrows:\nA list of dictionaries, where each entry\ndescribes a record to insert.\n\nReturns:\nThe SQL compiler for the insert.", "source": "juraj-google-style"}
{"code": "def to_yaml(obj):\n    \n    if not isinstance(obj, CompoundValue) and hasattr(obj, 'transfer'):\n        if hasattr(obj, 'message'):\n            payload = obj.message\n            header = 'Message'\n        elif hasattr(obj, 'request'):\n            payload = obj.request\n            header = 'Request'\n        elif hasattr(obj, 'response'):\n            payload = obj.response\n            header = 'Response'\n        else:\n            raise ValueError('Cannot generate YAML representation for %r' % type(obj))\n\n        prefix = '\n                 (header,\n                  obj.transfer.source_node_id or 'Anon',\n                  obj.transfer.dest_node_id or 'All',\n                  obj.transfer.ts_monotonic, obj.transfer.ts_real)\n\n        return prefix + _to_yaml_impl(payload)\n    else:\n        return _to_yaml_impl(obj)", "docstring": "This function returns correct YAML representation of a UAVCAN structure (message, request, or response), or\na DSDL entity (array or primitive), or a UAVCAN transfer, with comments for human benefit.\nArgs:\nobj:            Object to convert.\n\nReturns: Unicode string containing YAML representation of the object.", "source": "juraj-google-style"}
{"code": "def densifying_unary(default_value):\n\n    def wrap_densifying_unary(func):\n\n        @functools.wraps(func)\n        def sparse_wrapper(x, *args, **kwargs):\n            if isinstance(x, tf.SparseTensor):\n                sparse_output = sparse_with_values(x, func(x.values, *args, **kwargs))\n                return sparse_to_dense(sparse_output, tf.cast(default_value, sparse_output.values.dtype))\n            elif isinstance(x, tf.IndexedSlices):\n                sparse_output_values = func(x.values, *args, **kwargs)\n                output = tf.fill(x.dense_shape, tf.cast(default_value, sparse_output_values.dtype))\n                return tf.tensor_scatter_nd_update(output, tf.expand_dims(x.indices, 1), sparse_output_values)\n            return func(x, *args, **kwargs)\n        return sparse_wrapper\n    return wrap_densifying_unary", "docstring": "Decorator to add support for `tf.SparseTensor` and `tf.IndexedSlices` to\na non-zero-preserving element-wise unary operator.\n\nThere are requirements on the operator for this decorator to work correctly:\n\n- The operator must be element-wise\n- The operator must be unary (one input tensor and one output tensor)\n- The operator must return a tensor of the same shape.\n\nAdditional arguments to the function (besides the input tensor) are\nsupported. The returned result is a dense tensor and contains\n`default_value` outside of the indices of the input tensor.\n\nArgs:\ndefault_value: The value to use outside of indices. It must be the value\nthat the operator returns for zero values.\nReturns:\nWrapped function that supports `tf.SparseTensor` and `tf.IndexedSlices`.", "source": "github-repos"}
{"code": "def query(self, query):\n    \n\n    \n    cursor = self.child_datastore.query(query)\n\n    \n    cursor._iterable = deserialized_gen(self.serializer, cursor._iterable)\n\n    return cursor", "docstring": "Returns an iterable of objects matching criteria expressed in `query`\nDe-serializes values on the way out, using a :ref:`deserialized_gen` to\navoid incurring the cost of de-serializing all data at once, or ever, if\niteration over results does not finish (subject to order generator\nconstraint).\n\nArgs:\nquery: Query object describing the objects to return.\n\nRaturns:\niterable cursor with all objects matching criteria", "source": "juraj-google-style"}
{"code": "def update(self, webhookId, name=None, targetUrl=None, **request_parameters):\n    check_type(webhookId, basestring, may_be_none=False)\n    check_type(name, basestring)\n    check_type(targetUrl, basestring)\n    put_data = dict_from_items_with_values(request_parameters, name=name, targetUrl=targetUrl)\n    json_data = self._session.put(((API_ENDPOINT + '/') + webhookId), json=put_data)\n    return self._object_factory(OBJECT_TYPE, json_data)", "docstring": "Update a webhook, by ID.\n\nArgs:\nwebhookId(basestring): The webhook ID.\nname(basestring): A user-friendly name for this webhook.\ntargetUrl(basestring): The URL that receives POST requests for\neach event.\n**request_parameters: Additional request parameters (provides\nsupport for parameters that may be added in the future).\n\nReturns:\nWebhook: A Webhook object with the updated Webex Teams webhook\ndetails.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"}
{"code": "class Partition(PTransformWithSideInputs):\n\n    class ApplyPartitionFnFn(DoFn):\n        \n\n        def process(self, element, partitionfn, n, *args, **kwargs):\n            partition = partitionfn.partition_for(element, n, *args, **kwargs)\n            if not 0 <= partition < n:\n                raise ValueError('PartitionFn specified out-of-bounds partition index: %d not in [0, %d)' % (partition, n))\n            yield pvalue.TaggedOutput(str(partition), element)\n\n    def make_fn(self, fn, has_side_inputs):\n        return fn if isinstance(fn, PartitionFn) else CallableWrapperPartitionFn(fn)\n\n    def expand(self, pcoll):\n        n = int(self.args[0])\n        args, kwargs = util.insert_values_in_args(self.args, self.kwargs, self.side_inputs)\n        return pcoll | ParDo(self.ApplyPartitionFnFn(), self.fn, *args, **kwargs).with_outputs(*[str(t) for t in range(n)])", "docstring": "Split a PCollection into several partitions.\n\nUses the specified PartitionFn to separate an input PCollection into the\nspecified number of sub-PCollections.\n\nWhen apply()d, a Partition() PTransform requires the following:\n\nArgs:\npartitionfn: a PartitionFn, or a callable with the signature described in\nCallableWrapperPartitionFn.\nn: number of output partitions.\n\nThe result of this PTransform is a simple list of the output PCollections\nrepresenting each of n partitions, in order.", "source": "github-repos"}
{"code": "def design_stat_cooling(self, value='Cooling'):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `design_stat_cooling`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `design_stat_cooling`')\n        vals = set()\n        vals.add('Cooling')\n        if (value not in vals):\n            raise ValueError('value {} is not an accepted value for field `design_stat_cooling`'.format(value))\n    self._design_stat_cooling = value", "docstring": "Corresponds to IDD Field `design_stat_cooling`\n\nArgs:\nvalue (str): value for IDD Field `design_stat_cooling`\nAccepted values are:\n- Cooling\nDefault value: Cooling\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def __init__(self, path):\n        \n        self.me = Path(path)\n        self.tag = 'dict'\n        self.value = []", "docstring": "init\n\nArgs:\npath (str): The absolute path of the plist", "source": "juraj-google-style"}
{"code": "def from_json(cls, key, scopes, subject=None):\n    credentials_type = key['type']\n    if (credentials_type != 'service_account'):\n        raise ValueError(('key: expected type service_account (got %s)' % credentials_type))\n    email = key['client_email']\n    key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, key['private_key'])\n    return cls(key=key, email=email, scopes=scopes, subject=subject)", "docstring": "Alternate constructor intended for using JSON format of private key.\n\nArgs:\nkey (dict) - Parsed JSON with service account credentials.\nscopes (Union[str, collections.Iterable[str]]) -\nList of permissions that the application requests.\nsubject (str) - The email address of the user for which\nthe application is requesting delegated access.\n\nReturns:\nServiceAccount", "source": "codesearchnet"}
{"code": "def str2fl(x):\n\n    def helper_to_fl(s_):\n        ' deals with odd string imports converts to float'\n        if (s_ == ''):\n            return 'null'\n        elif (',' in s_):\n            s_ = s_.replace(',', '')\n        try:\n            return float(s_)\n        except:\n            return s_\n    fl_lst = []\n    if isinstance(x[0], str):\n        for xi in range(len(x)):\n            fl_lst.append(helper_to_fl(x[xi]))\n    elif isinstance(x[0], list):\n        for xi in range(len(x)):\n            fl_lst.append(str2fl(x[xi]))\n    else:\n        return False\n    return fl_lst", "docstring": "Recurses through lists and converts lists of string to float\n\nArgs:\nx: string or list of strings", "source": "codesearchnet"}
{"code": "def searchFor(page, text, hit_max = 16, quads = False):\n    \n    CheckParent(page)\n    dl = page.getDisplayList()         \n    tp = dl.getTextPage()              \n    \n    rlist = tp.search(text, hit_max = hit_max, quads = quads)\n    dl = None\n    tp = None\n    return rlist", "docstring": "Search for a string on a page.\n\nArgs:\ntext: string to be searched for\nhit_max: maximum hits\nquads: return quads instead of rectangles\nReturns:\na list of rectangles or quads, each containing one occurrence.", "source": "juraj-google-style"}
{"code": "def PureDotProductAttention(dropout=0.0, mode='train'):\n  \n  def init_fun(_, input_shapes):  \n    q_shape, _, v_shape, _ = input_shapes\n    output_shape = q_shape[:-1] + (v_shape[-1],)\n    return output_shape, ()\n  def apply_fun(params, inputs, **kwargs):  \n    del params\n    q, k, v, mask = inputs\n    rng = kwargs.get('rng', None)\n    return DotProductAttention(q, k, v, mask,\n                               dropout=dropout, mode=mode, rng=rng)\n  return init_fun, apply_fun", "docstring": "Pure single-headed self-attention.\n\nArgs:\ndropout: float: dropout rate\nmode: str: 'train' or 'eval'\n\nReturns:\nPure single-headed attention layer. (No Dense transforms on input.)", "source": "juraj-google-style"}
{"code": "def add_region_location(self, region, locations=None, use_live=True):\n    return self.add_country_locations(Country.get_countries_in_region(region, exception=HDXError, use_live=use_live), locations=locations)", "docstring": "Add all countries in a region. If a 3 digit UNStats M49 region code is not provided, value is parsed as a\nregion name. If any country is already added, it is ignored.\n\nArgs:\nregion (str): M49 region, intermediate region or subregion to add\nlocations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX.\nuse_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True.\n\nReturns:\nbool: True if all countries in region added or False if any already present.", "source": "codesearchnet"}
{"code": "def __init__(self, step):\n        \n        logger.debug(\"starting\")\n\n        \n        self.description = None\n        self.foreach_items = None\n        self.in_parameters = None\n        self.retry_decorator = None\n        self.run_me = True\n        self.skip_me = False\n        self.swallow_me = False\n        self.name = None\n        self.while_decorator = None\n\n        if isinstance(step, dict):\n            self.name = step['name']\n            logger.debug(f\"{self.name} is complex.\")\n\n            self.in_parameters = step.get('in', None)\n\n            \n            self.description = step.get('description', None)\n            if self.description:\n                logger.info(f\"{self.name}: {self.description}\")\n\n            \n            self.foreach_items = step.get('foreach', None)\n\n            \n            retry_definition = step.get('retry', None)\n            if retry_definition:\n                self.retry_decorator = RetryDecorator(retry_definition)\n\n            \n            self.run_me = step.get('run', True)\n\n            \n            self.skip_me = step.get('skip', False)\n\n            \n            self.swallow_me = step.get('swallow', False)\n\n            \n            while_definition = step.get('while', None)\n            if while_definition:\n                self.while_decorator = WhileDecorator(while_definition)\n\n        else:\n            \n            \n            \n            logger.debug(f\"{step} is a simple string.\")\n            self.name = step\n\n        self.module = pypyr.moduleloader.get_module(self.name)\n        try:\n            self.run_step_function = getattr(self.module, 'run_step')\n        except AttributeError:\n            logger.error(f\"The step {self.name} in module {self.module} \"\n                         \"doesn't have a run_step(context) function.\")\n            raise\n\n        logger.debug(\"done\")", "docstring": "Initialize the class. No duh, huh?.\n\nYou can happily expect the initializer to initialize all\nmember attributes.\n\nArgs:\nstep: a string or a dict. This is the actual step as it exists in\nthe pipeline yaml - which is to say it can just be a string\nfor a simple step, or a dict for a complex step.", "source": "juraj-google-style"}
{"code": "def orphan_entry(self, rval: RawObject) -> 'ArrayEntry':\n    val = self.entry_from_raw(rval)\n    return ArrayEntry(0, EmptyList(), EmptyList(), val, None, self, val.timestamp)", "docstring": "Return an isolated entry of the receiver.\n\nArgs:\nrval: Raw object to be used for the returned entry.", "source": "codesearchnet"}
{"code": "def lookup_rest_method(self, orig_request):\n    \n    method_name, method, params = self.config_manager.lookup_rest_method(\n        orig_request.path, orig_request.request_uri, orig_request.http_method)\n    orig_request.method_name = method_name\n    return method, params", "docstring": "Looks up and returns rest method for the currently-pending request.\n\nArgs:\norig_request: An ApiRequest, the original request from the user.\n\nReturns:\nA tuple of (method descriptor, parameters), or (None, None) if no method\nwas found for the current request.", "source": "juraj-google-style"}
{"code": "def addSources(self, *sources):\n        \n        self._sources.extend(sources)\n\n        debug.logger & debug.flagCompiler and debug.logger(\n            'current MIB source(s): %s' % ', '.join([str(x) for x in self._sources]))\n\n        return self", "docstring": "Add more ASN.1 MIB source repositories.\n\nMibCompiler.compile will invoke each of configured source objects\nin order of their addition asking each to fetch MIB module specified\nby name.\n\nArgs:\nsources: reader object(s)\n\nReturns:\nreference to itself (can be used for call chaining)", "source": "juraj-google-style"}
{"code": "def _get_snpeff_transcript(self, transcript_info):\n    transcript = Transcript(hgnc_symbol=transcript_info.get('Gene_Name'), transcript_id=transcript_info.get('Feature'), ensembl_id=transcript_info.get('Gene_ID'), biotype=transcript_info.get('Transcript_BioType'), consequence=transcript_info.get('Annotation'), exon=transcript_info.get('Rank'), HGVSc=transcript_info.get('HGVS.c'), HGVSp=transcript_info.get('HGVS.p'))\n    return transcript", "docstring": "Create a transcript based on the snpeff annotation\n\nArgs:\ntranscript_info (dict): A dict with snpeff info\n\nReturns:\ntranscript (puzzle.models.Transcript): A Transcripts", "source": "codesearchnet"}
{"code": "def _CopyFileObjectToTemporaryFile(self, file_object, temporary_file):\n    file_object.seek(0, os.SEEK_SET)\n    data = file_object.read(self._READ_BUFFER_SIZE)\n    while data:\n        temporary_file.write(data)\n        data = file_object.read(self._READ_BUFFER_SIZE)", "docstring": "Copies the contents of the file-like object to a temporary file.\n\nArgs:\nfile_object (dfvfs.FileIO): file-like object.\ntemporary_file (file): temporary file.", "source": "codesearchnet"}
{"code": "def _generate_api_config_with_root(self, request):\n    actual_root = self._get_actual_root(request)\n    generator = api_config.ApiConfigGenerator()\n    api = request.body_json['api']\n    version = request.body_json['version']\n    lookup_key = (api, version)\n    service_factories = self._backend.api_name_version_map.get(lookup_key)\n    if (not service_factories):\n        return None\n    service_classes = [service_factory.service_class for service_factory in service_factories]\n    config_dict = generator.get_config_dict(service_classes, hostname=actual_root)\n    for config in config_dict.get('items', []):\n        lookup_key_with_root = (config.get('name', ''), config.get('version', ''), actual_root)\n        self._config_manager.save_config(lookup_key_with_root, config)\n    return config_dict", "docstring": "Generate an API config with a specific root hostname.\n\nThis uses the backend object and the ApiConfigGenerator to create an API\nconfig specific to the hostname of the incoming request. This allows for\nflexible API configs for non-standard environments, such as localhost.\n\nArgs:\nrequest: An ApiRequest, the transformed request sent to the Discovery API.\n\nReturns:\nA string representation of the generated API config.", "source": "codesearchnet"}
{"code": "def set_back(self, x: int, y: int, r: int, g: int, b: int) -> None:\n        \n        i = self.width * y + x\n        self.back_r[i] = r\n        self.back_g[i] = g\n        self.back_b[i] = b", "docstring": "Set the background color of one cell.\n\nArgs:\nx (int): X position to change.\ny (int): Y position to change.\nr (int): Red background color, from 0 to 255.\ng (int): Green background color, from 0 to 255.\nb (int): Blue background color, from 0 to 255.", "source": "juraj-google-style"}
{"code": "def tanh(x):\n    return nn.tanh(x)", "docstring": "Element-wise tanh.\n\nArgs:\nx: A tensor or variable.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def _slice_single_param(param, param_ndims_to_matrix_ndims, slices, batch_shape):\n    param = _broadcast_parameter_with_batch_shape(param, param_ndims_to_matrix_ndims, array_ops.ones_like(batch_shape))\n    if hasattr(param, 'batch_shape_tensor'):\n        param_batch_shape = param.batch_shape_tensor()\n    else:\n        param_batch_shape = array_ops.shape(param)\n    param_batch_rank = array_ops.size(param_batch_shape)\n    param_batch_shape = param_batch_shape[:param_batch_rank - param_ndims_to_matrix_ndims]\n    if tensor_util.constant_value(array_ops.size(batch_shape)) != 0 and tensor_util.constant_value(array_ops.size(param_batch_shape)) == 0:\n        return param\n    param_slices = _sanitize_slices(slices, intended_shape=batch_shape, deficient_shape=param_batch_shape)\n    if param_ndims_to_matrix_ndims > 0:\n        if Ellipsis not in [slc for slc in slices if not tensor_util.is_tensor(slc)]:\n            param_slices.append(Ellipsis)\n        param_slices += [slice(None)] * param_ndims_to_matrix_ndims\n    return param.__getitem__(tuple(param_slices))", "docstring": "Slices into the batch shape of a single parameter.\n\nArgs:\nparam: The original parameter to slice; either a `Tensor` or an object\nwith batch shape (LinearOperator).\nparam_ndims_to_matrix_ndims: `int` number of right-most dimensions used for\ninferring matrix shape of the `LinearOperator`. For non-Tensor\nparameters, this is the number of this param's batch dimensions used by\nthe matrix shape of the parent object.\nslices: iterable of slices received by `__getitem__`.\nbatch_shape: The parameterized object's batch shape `Tensor`.\n\nReturns:\nnew_param: Instance of the same type as `param`, batch-sliced according to\n`slices`.", "source": "github-repos"}
{"code": "def _find_op(graph: ops.Graph, op_name: Optional[str]) -> Optional[ops.Operation]:\n    if not op_name:\n        return None\n    init_op = graph.get_operation_by_name(op_name)\n    logging.debug('Op found in the graph: %s', op_name)\n    return init_op", "docstring": "Finds the operation with `op_name`.\n\nArgs:\ngraph: The graph to find from.\nop_name: Name of the node.\n\nReturns:\nThe operation that corresponds to `op_name`. Returns None iff op_name is an\nempty string or None.\n\nRaises:\nValueError: `op_name` is malformed.", "source": "github-repos"}
{"code": "def delete_url(self, url, token=''):\n    if (token == ''):\n        token = self._user_token\n    return requests.delete(url, headers={'Authorization': 'Token {}'.format(token)}, verify=False)", "docstring": "Returns a delete resquest object taking in a url and user token.\n\nArguments:\nurl (str): The url to make post to\ntoken (str): The authentication token\n\nReturns:\nobj: Delete request object", "source": "codesearchnet"}
{"code": "def is_mod_class(mod, cls):\n    \n    return inspect.isclass(cls) and inspect.getmodule(cls) == mod", "docstring": "Checks if a class in a module was declared in that module.\n\nArgs:\nmod: the module\ncls: the class", "source": "juraj-google-style"}
{"code": "def extraterrestrial_horizontal_radiation(self, value=9999.0):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `extraterrestrial_horizontal_radiation`'.format(value))\n        if (value < 0.0):\n            raise ValueError('value need to be greater or equal 0.0 for field `extraterrestrial_horizontal_radiation`')\n    self._extraterrestrial_horizontal_radiation = value", "docstring": "Corresponds to IDD Field `extraterrestrial_horizontal_radiation`\n\nArgs:\nvalue (float): value for IDD Field `extraterrestrial_horizontal_radiation`\nUnit: Wh/m2\nvalue >= 0.0\nMissing value: 9999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def _init_journal(self, permissive=True):\n        \n        nowstamp = datetime.now().strftime(\"%d-%b-%Y %H:%M:%S.%f\")[:-3]\n        self._add_entry(templates.INIT\n                                 .format(time_stamp=nowstamp))\n        if permissive:\n            self._add_entry(templates.INIT_DEBUG)", "docstring": "Add the initialization lines to the journal.\n\nBy default adds JrnObj variable and timestamp to the journal contents.\n\nArgs:\npermissive (bool): if True most errors in journal will not\ncause Revit to stop journal execution.\nSome still do.", "source": "juraj-google-style"}
{"code": "def _kl_dirichlet_dirichlet(d1, d2, name=None):\n  \n  with tf.name_scope(name or \"kl_dirichlet_dirichlet\"):\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n\n    digamma_sum_d1 = tf.math.digamma(\n        tf.reduce_sum(input_tensor=d1.concentration, axis=-1, keepdims=True))\n    digamma_diff = tf.math.digamma(d1.concentration) - digamma_sum_d1\n    concentration_diff = d1.concentration - d2.concentration\n\n    return (\n        tf.reduce_sum(input_tensor=concentration_diff * digamma_diff, axis=-1) -\n        tf.math.lbeta(d1.concentration) + tf.math.lbeta(d2.concentration))", "docstring": "Batchwise KL divergence KL(d1 || d2) with d1 and d2 Dirichlet.\n\nArgs:\nd1: instance of a Dirichlet distribution object.\nd2: instance of a Dirichlet distribution object.\nname: (optional) Name to use for created operations.\ndefault is \"kl_dirichlet_dirichlet\".\n\nReturns:\nBatchwise KL(d1 || d2)", "source": "juraj-google-style"}
{"code": "def get_analysis_types(adapter, total_cases, institute_id=None, slice_query=None):\n    \n    \n    query = {}\n\n    subquery = {}\n    if institute_id and slice_query:\n        subquery = adapter.cases(owner=institute_id, name_query=slice_query,\n                              yield_query=True)\n    elif institute_id:\n        subquery = adapter.cases(owner=institute_id, yield_query=True)\n    elif slice_query:\n        subquery = adapter.cases(name_query=slice_query, yield_query=True)\n\n    query = {'$match': subquery}\n\n    pipeline = []\n    if query:\n        pipeline.append(query)\n\n    pipeline.append({'$unwind': '$individuals'})\n    pipeline.append({'$group': {'_id': '$individuals.analysis_type', 'count': {'$sum': 1}}})\n    analysis_query = adapter.case_collection.aggregate(pipeline)\n    analysis_types = [{'name': group['_id'], 'count': group['count']} for group in analysis_query]\n\n    return analysis_types", "docstring": "Return information about analysis types.\nGroup cases based on analysis type for the individuals.\nArgs:\nadapter(adapter.MongoAdapter)\ntotal_cases(int): Total number of cases\ninstitute_id(str)\nslice_query(str): Query to filter cases to obtain statistics for.\nReturns:\nanalysis_types array of hashes with name: analysis_type(str), count: count(int)", "source": "juraj-google-style"}
{"code": "def forward_log_det_jacobian(self, x, event_ndims, name='forward_log_det_jacobian'):\n    return self._call_forward_log_det_jacobian(x, event_ndims, name)", "docstring": "Returns both the forward_log_det_jacobian.\n\nArgs:\nx: `Tensor`. The input to the \"forward\" Jacobian determinant evaluation.\nevent_ndims: Number of dimensions in the probabilistic events being\ntransformed. Must be greater than or equal to\n`self.forward_min_event_ndims`. The result is summed over the final\ndimensions to produce a scalar Jacobian determinant for each event,\ni.e. it has shape `x.shape.ndims - event_ndims` dimensions.\nname: The name to give this op.\n\nReturns:\n`Tensor`, if this bijector is injective.\nIf not injective this is not implemented.\n\nRaises:\nTypeError: if `self.dtype` is specified and `y.dtype` is not\n`self.dtype`.\nNotImplementedError: if neither `_forward_log_det_jacobian`\nnor {`_inverse`, `_inverse_log_det_jacobian`} are implemented, or\nthis is a non-injective bijector.", "source": "github-repos"}
{"code": "def get_table_map_prompt() -> t.Tuple:\n    table_prompts = []\n    table_map = get_table_map()\n    for k, v in table_map.items():\n        data_str = f\"Table name is {k}.\\n        It's located at {v['uri']} and containing following columns: {', '.join(v['columns'])}\"\n        table_prompts.append(data_str)\n    return ('\\n'.join(table_prompts), table_map)", "docstring": "Generate a prompt containing information about each table in the dataset.\n\nReturns:\ntuple: A tuple containing the prompt string and the table map dictionary.", "source": "github-repos"}
{"code": "def transfer(self, data):\n    if (not isinstance(data, (bytes, bytearray, list))):\n        raise TypeError('Invalid data type, should be bytes, bytearray, or list.')\n    try:\n        buf = array.array('B', data)\n    except OverflowError:\n        raise ValueError('Invalid data bytes.')\n    (buf_addr, buf_len) = buf.buffer_info()\n    spi_xfer = _CSpiIocTransfer()\n    spi_xfer.tx_buf = buf_addr\n    spi_xfer.rx_buf = buf_addr\n    spi_xfer.len = buf_len\n    try:\n        fcntl.ioctl(self._fd, SPI._SPI_IOC_MESSAGE_1, spi_xfer)\n    except OSError as e:\n        raise SPIError(e.errno, ('SPI transfer: ' + e.strerror))\n    if isinstance(data, bytes):\n        return bytes(bytearray(buf))\n    elif isinstance(data, bytearray):\n        return bytearray(buf)\n    elif isinstance(data, list):\n        return buf.tolist()", "docstring": "Shift out `data` and return shifted in data.\n\nArgs:\ndata (bytes, bytearray, list): a byte array or list of 8-bit integers to shift out.\n\nReturns:\nbytes, bytearray, list: data shifted in.\n\nRaises:\nSPIError: if an I/O or OS error occurs.\nTypeError: if `data` type is invalid.\nValueError: if data is not valid bytes.", "source": "codesearchnet"}
{"code": "def save_data(X, y, path):\n    \n    catalog = {'.csv': save_csv, '.sps': save_libsvm, '.h5': save_hdf5}\n\n    ext = os.path.splitext(path)[1]\n    func = catalog[ext]\n\n    if y is None:\n        y = np.zeros((X.shape[0], ))\n\n    func(X, y, path)", "docstring": "Save data as a CSV, LibSVM or HDF5 file based on the file extension.\n\nArgs:\nX (numpy or scipy sparse matrix): Data matrix\ny (numpy array): Target vector. If None, all zero vector will be saved.\npath (str): Path to the CSV, LibSVM or HDF5 file to save data.", "source": "juraj-google-style"}
{"code": "def node_op_type(self, node_name, device_name=None):\n    if not self._debug_graphs:\n        raise LookupError('Node op types are not loaded from partition graphs yet.')\n    device_name = self._infer_device_name(device_name, node_name)\n    return self._debug_graphs[device_name].node_op_types[node_name]", "docstring": "Get the op type of given node.\n\nArgs:\nnode_name: (`str`) name of the node.\ndevice_name: (`str`) name of the device. If there is only one device or if\nnode_name exists on only one device, this argument is optional.\n\nReturns:\n(`str`) op type of the node.\n\nRaises:\nLookupError: If node op types have not been loaded\nfrom partition graphs yet.", "source": "github-repos"}
{"code": "def make_basket_put_payoff(strike_price, dtype=None, name=None):\n    strike_price = tf.convert_to_tensor(strike_price, dtype=dtype, name='strike_price')\n    put_valuer = functools.partial(_put_valuer, strike_price=strike_price, dtype=dtype, name=name)\n    return put_valuer", "docstring": "Produces a callable from samples to payoff of a simple basket put option.\n\nArgs:\nstrike_price: A `Tensor` of `dtype` consistent with `samples` and shape\n`[num_samples, num_strikes]`.\ndtype: Optional `dtype`. Either `tf.float32` or `tf.float64`. The `dtype`\nIf supplied, represents the `dtype` for the 'strike_price' as well as\nfor the input argument of the output payoff callable.\nDefault value: `None`, which means that the `dtype` inferred by TensorFlow\nis used.\nname: Python `str` name prefixed to Ops created by the callable created\nby this function.\nDefault value: `None` which is mapped to the default name 'put_valuer'\n\nReturns:\nA callable from `Tensor` of shape `[num_samples, num_exercise_times, dim]`\nand a scalar `Tensor` representing current time to a `Tensor` of shape\n`[num_samples, num_strikes]`.", "source": "github-repos"}
{"code": "def apply(self, predictions: Iterable[AnomalyPrediction]) -> AnomalyPrediction:\n    result_dict: dict[str, Any] = {}\n    _AggModelIdMixin.add_model_id(self, result_dict)\n    _SourcePredictionMixin.add_source_predictions(self, result_dict, predictions)\n    labels = [prediction.label for prediction in predictions if prediction.label is not None and prediction.label != self._missing_label]\n    if len(labels) > 0:\n        result_dict['label'] = self._agg(labels)\n    elif all(map(lambda x: x.label is None, predictions)):\n        result_dict['label'] = None\n    else:\n        result_dict['label'] = self._missing_label\n    return AnomalyPrediction(**result_dict)", "docstring": "Applies the label aggregation function to a list of predictions.\n\nArgs:\npredictions (Iterable[AnomalyPrediction]): A collection of\n`AnomalyPrediction` objects to be aggregated.\n\nReturns:\nAnomalyPrediction: A single `AnomalyPrediction` object with the\naggregated label. The aggregated label is determined as follows:\n\n- If there are any non-missing and non-error labels, the `agg_func` is\napplied to aggregate them.\n- If all labels are error labels (`None`), the aggregated label is also\n`None`.\n- If there are a mix of missing and error labels, the aggregated label\nis the `missing_label`.", "source": "github-repos"}
{"code": "def readSchedules(self, tableset):\n        \n        self.setContext(\"readSchedules\")\n        try:\n            req_table = binascii.hexlify(str(tableset).zfill(1))\n            req_str = \"01523102303037\" + req_table + \"282903\"\n\n            self.request(False)\n            req_crc = self.calc_crc16(req_str[2:].decode(\"hex\"))\n            req_str += req_crc\n            self.m_serial_port.write(req_str.decode(\"hex\"))\n            raw_ret = self.m_serial_port.getResponse(self.getContext())\n            self.serialPostEnd()\n            return_crc = self.calc_crc16(raw_ret[1:-2])\n\n            if tableset == ReadSchedules.Schedules_1_To_4:\n                unpacked_read = self.unpackStruct(raw_ret, self.m_schd_1_to_4)\n                self.convertData(unpacked_read, self.m_schd_1_to_4, self.m_kwh_precision)\n                if str(return_crc) == str(self.m_schd_1_to_4[\"crc16\"][MeterData.StringValue]):\n                    ekm_log(\"Schedules 1 to 4 CRC success (06 return\")\n                    self.setContext(\"\")\n                    return True\n\n            elif tableset == ReadSchedules.Schedules_5_To_6:\n                unpacked_read = self.unpackStruct(raw_ret, self.m_schd_5_to_6)\n                self.convertData(unpacked_read, self.m_schd_5_to_6, self.m_kwh_precision)\n                if str(return_crc) == str(self.m_schd_5_to_6[\"crc16\"][MeterData.StringValue]):\n                    ekm_log(\"Schedules 5 to 8 CRC success (06 return)\")\n                    self.setContext(\"\")\n                    return True\n        except:\n            ekm_log(traceback.format_exc(sys.exc_info()))\n\n        self.setContext(\"\")\n        return False", "docstring": "Serial call to read schedule tariffs buffer\n\nArgs:\ntableset (int): :class:`~ekmmeters.ReadSchedules` buffer to return.\n\nReturns:\nbool: True on completion and ACK.", "source": "juraj-google-style"}
{"code": "def should_fire(self, time_domain, timestamp, window, context):\n    pass", "docstring": "Whether this trigger should cause the window to fire.\n\nArgs:\ntime_domain: WATERMARK for event-time timers and REAL_TIME for\nprocessing-time timers.\ntimestamp: for time_domain WATERMARK, it represents the\nwatermark: (a lower bound on) the watermark of the system\nand for time_domain REAL_TIME, it represents the\ntrigger: timestamp of the processing-time timer.\nwindow: the window whose trigger is being considered\ncontext: a context (e.g. a TriggerContext instance) for managing state\nand setting timers\n\nReturns:\nwhether this trigger should cause a firing", "source": "github-repos"}
{"code": "def _validate_path(self, settings, name, value):\n    if (not os.path.exists(value)):\n        raise SettingsInvalidError(\"Path from setting '{name}' does not exists: {value}\".format(name=name, value=value))\n    return value", "docstring": "Validate path exists\n\nArgs:\nsettings (dict): Current settings.\nname (str): Setting name.\nvalue (str): Path to validate.\n\nRaises:\nboussole.exceptions.SettingsInvalidError: If path does not exists.\n\nReturns:\nstr: Validated path.", "source": "codesearchnet"}
{"code": "def _match_elements(dom, matches):\n    out = {}\n    for (key, content) in matches.items():\n        pattern = content['data'].strip()\n        if ('\\n' in pattern):\n            pattern = pattern.split()\n            transformer = (lambda x: x.strip().split())\n        else:\n            transformer = (lambda x: x.strip())\n        matching_elements = _locate_element(dom, pattern, transformer=transformer)\n        not_found_msg = content.get('notfoundmsg', '').replace('$name', key)\n        if (not not_found_msg.strip()):\n            not_found_msg = (\"Can't locate variable '%s' with content '%s'!\" % (key, pattern))\n        content['notfoundmsg'] = not_found_msg\n        tagname = content.get('tagname', '').strip().lower()\n        if tagname:\n            matching_elements = filter((lambda x: (x.getTagName().strip().lower() == tagname)), matching_elements)\n        if (not matching_elements):\n            raise UserWarning(not_found_msg)\n        if (len(matching_elements) > 1):\n            raise UserWarning(((\"Ambigious content '%s'!\" % content) + 'Content was found in multiple elements!'))\n        out[key] = matching_elements[0]\n    return out", "docstring": "Find location of elements matching patterns specified in `matches`.\n\nArgs:\ndom (obj): HTMLElement DOM tree.\nmatches (dict): Structure: ``{\"var\": {\"data\": \"match\", ..}, ..}``.\n\nReturns:\ndict: Structure: ``{\"var\": {\"data\": HTMLElement_obj, ..}, ..}``", "source": "codesearchnet"}
{"code": "def get_url(access_token, endpoint=ams_rest_endpoint, flag=True):\n    \n    return do_ams_get_url(endpoint, access_token, flag)", "docstring": "Get Media Services Final Endpoint URL.\nArgs:\naccess_token (str): A valid Azure authentication token.\nendpoint (str): Azure Media Services Initial Endpoint.\nflag (bol): flag.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def reload_config(self, dockercfg_path=None):\n    self._auth_configs = auth.load_config(dockercfg_path, credstore_env=self.credstore_env)", "docstring": "Force a reload of the auth configuration\n\nArgs:\ndockercfg_path (str): Use a custom path for the Docker config file\n(default ``$HOME/.docker/config.json`` if present,\notherwise``$HOME/.dockercfg``)\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def set(self, response: 'requests.Response') -> None:\n        \n        self.data[response.url] = SavedEndpoint(\n            response.json(),\n            self._get_expiration(response.headers)\n        )", "docstring": "Adds a response to the cache.\n\nArgs:\nresponse: response from ESI\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def Where(self, field):\n    where_builder = _WhereBuilder(self, field)\n    self.where_builders.append(where_builder)\n    return where_builder", "docstring": "Creates a WHERE builder using a provided field.\n\nArgs:\nfield: the field to be added as an argument in the WHERE clause.\n\nReturns:\nThe created WHERE builder.", "source": "codesearchnet"}
{"code": "def Allowance(self, wallet, owner_addr, requestor_addr):\n    invoke_args = [self.ScriptHash.ToString(), 'allowance', [PromptUtils.parse_param(owner_addr, wallet), PromptUtils.parse_param(requestor_addr, wallet)]]\n    (tx, fee, results, num_ops, engine_success) = TestInvokeContract(wallet, invoke_args, None, True)\n    return (tx, fee, results)", "docstring": "Return the amount of tokens that the `requestor_addr` account can transfer from the `owner_addr` account.\n\nArgs:\nwallet (neo.Wallets.Wallet): a wallet instance.\nowner_addr (str): public address of the account to transfer the given amount from.\nrequestor_addr (str): public address of the account that requests the transfer.\n\nReturns:\ntuple:\nInvocationTransaction: the transaction.\nint: the transaction fee.\nlist: the neo VM evaluation stack results.", "source": "codesearchnet"}
{"code": "def get_variant_dict(variant_line, header_line=None):\n    \n    if not header_line:\n        logger.debug(\"No header line, use only first 8 mandatory fields\")\n        header_line = ['CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO']\n    \n    logger.debug(\"Building variant dict from variant line {0} and header\"\\\n    \" line {1}\".format(variant_line, '\\t'.join(header_line)))\n    \n    splitted_line = variant_line.rstrip().split('\\t')\n    if len(splitted_line) < len(header_line):\n        logger.info('\\t'.join(header_line))\n        logger.info('\\t'.join(splitted_line))\n        raise SyntaxError(\"Length of variant line differs from length of\"\\\n                            \" header line\")\n    \n    return dict(zip(header_line, splitted_line))", "docstring": "Parse a variant line\n\nSplit a variant line and map the fields on the header columns\n\nArgs:\nvariant_line (str): A vcf variant line\nheader_line (list): A list with the header columns\nReturns:\nvariant_dict (dict): A variant dictionary", "source": "juraj-google-style"}
{"code": "def get_course_grade(self, course_id, username):\n    results = self.client.courses(course_id).get(username=username)\n    for row in results:\n        if (row.get('username') == username):\n            return row\n    raise HttpNotFoundError('No grade record found for course={}, username={}'.format(course_id, username))", "docstring": "Retrieve the grade for the given username for the given course_id.\n\nArgs:\n* ``course_id`` (str): The string value of the course's unique identifier\n* ``username`` (str): The username ID identifying the user for which to retrieve the grade.\n\nRaises:\n\nHttpNotFoundError if no grade found for the given user+course.\n\nReturns:\n\na dict containing:\n\n* ``username``: A string representation of a user's username passed in the request.\n* ``course_key``: A string representation of a Course ID.\n* ``passed``: Boolean representing whether the course has been passed according the course's grading policy.\n* ``percent``: A float representing the overall grade for the course\n* ``letter_grade``: A letter grade as defined in grading_policy (e.g. 'A' 'B' 'C' for 6.002x) or None", "source": "codesearchnet"}
{"code": "def channels_rename(self, *, channel: str, name: str, **kwargs) -> SlackResponse:\n        \n        self._validate_xoxp_token()\n        kwargs.update({\"channel\": channel, \"name\": name})\n        return self.api_call(\"channels.rename\", json=kwargs)", "docstring": "Renames a channel.\n\nArgs:\nchannel (str): The channel id. e.g. 'C1234567890'\nname (str): The new channel name. e.g. 'newchannel'", "source": "juraj-google-style"}
{"code": "def ping(self, destination, length=20):\n        \n        print '%s call ping' % self.port\n        print 'destination: %s' %destination\n        try:\n            cmd = 'ping %s -c 1 -s %s -I %s'  % (destination, str(length), WPAN_INTERFACE)\n            if self._is_net:\n                ssh_stdin, ssh_stdout, ssh_stderr = self.handle.exec_command(cmd)\n            else:\n                self._sendline(cmd)\n                self._expect(cmd)\n            \n            time.sleep(1)\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger('ping() Error: ' + str(e))", "docstring": "send ICMPv6 echo request with a given length to a unicast destination\naddress\n\nArgs:\ndestination: the unicast destination address of ICMPv6 echo request\nlength: the size of ICMPv6 echo request payload", "source": "juraj-google-style"}
{"code": "def ndcg(truth, recommend, k=None):\n    \n    if k is None:\n        k = len(recommend)\n\n    def idcg(n_possible_truth):\n        res = 0.\n        for n in range(n_possible_truth):\n            res += 1. / np.log2(n + 2)\n        return res\n\n    dcg = 0.\n    for n, r in enumerate(recommend[:k]):\n        if r not in truth:\n            continue\n        dcg += 1. / np.log2(n + 2)\n\n    res_idcg = idcg(np.min([truth.size, k]))\n    if res_idcg == 0.:\n        return 0.\n    return dcg / res_idcg", "docstring": "Normalized Discounted Cumulative Grain (NDCG).\n\nArgs:\ntruth (numpy 1d array): Set of truth samples.\nrecommend (numpy 1d array): Ordered set of recommended samples.\nk (int): Top-k items in `recommend` will be recommended.\n\nReturns:\nfloat: NDCG.", "source": "juraj-google-style"}
{"code": "def get_upstream_artifacts_full_paths_per_task_id(context):\n    upstream_artifacts = context.task['payload']['upstreamArtifacts']\n    task_ids_and_relative_paths = [(artifact_definition['taskId'], artifact_definition['paths']) for artifact_definition in upstream_artifacts]\n    optional_artifacts_per_task_id = get_optional_artifacts_per_task_id(upstream_artifacts)\n    upstream_artifacts_full_paths_per_task_id = {}\n    failed_paths_per_task_id = {}\n    for (task_id, paths) in task_ids_and_relative_paths:\n        for path in paths:\n            try:\n                path_to_add = get_and_check_single_upstream_artifact_full_path(context, task_id, path)\n                add_enumerable_item_to_dict(dict_=upstream_artifacts_full_paths_per_task_id, key=task_id, item=path_to_add)\n            except ScriptWorkerTaskException:\n                if (path in optional_artifacts_per_task_id.get(task_id, [])):\n                    log.warning('Optional artifact \"{}\" of task \"{}\" not found'.format(path, task_id))\n                    add_enumerable_item_to_dict(dict_=failed_paths_per_task_id, key=task_id, item=path)\n                else:\n                    raise\n    return (upstream_artifacts_full_paths_per_task_id, failed_paths_per_task_id)", "docstring": "List the downloaded upstream artifacts.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\n\nReturns:\ndict, dict: lists of the paths to upstream artifacts, sorted by task_id.\nFirst dict represents the existing upstream artifacts. The second one\nmaps the optional artifacts that couldn't be downloaded\n\nRaises:\nscriptworker.exceptions.ScriptWorkerTaskException: when an artifact doesn't exist.", "source": "codesearchnet"}
{"code": "def Snapshot(self, request, global_params=None):\n    config = self.GetMethodConfig('Snapshot')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Snapshot the state of a streaming job.\n\nArgs:\nrequest: (DataflowProjectsJobsSnapshotRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Snapshot) The response message.", "source": "github-repos"}
{"code": "async def on_message(message):\n    \n\n    \n    server = message.server\n    author = message.author\n    channel = message.channel\n    content = message.content\n\n    data = datatools.get_data()\n\n    if not data[\"discord\"][\"servers\"][server.id][_data.modulename][\"activated\"]:\n        return\n\n    \n    if server is not None and author != channel.server.me:\n        \n        normal_replies = data[\"discord\"][\"servers\"][server.id][_data.modulename][\"normal\"]\n        tts_replies = data[\"discord\"][\"servers\"][server.id][_data.modulename][\"tts\"]\n\n        \n        for r in normal_replies.keys():\n            if r in content.lower().replace(' ', ''):\n                await client.send_typing(channel)\n                await client.send_message(channel, normal_replies[r])\n\n        \n        for r in tts_replies.keys():\n            if r in content.lower().replace(' ', ''):\n                await client.send_typing(channel)\n                await client.send_message(channel, tts_replies[r])", "docstring": "The on_message event handler for this module\n\nArgs:\nmessage (discord.Message): Input message", "source": "juraj-google-style"}
{"code": "def period_length_in_days(self, period_tensor):\n    return (self + period_tensor).ordinal() - self._ordinals", "docstring": "Computes the number of days in each period.\n\nArgs:\nperiod_tensor: A PeriodTensor object broadcastable to the shape of \"self\".\n\nReturns:\nAn int32 tensor with numbers of days each period takes.\n\n#### Example\n\n```python\ndates = tff.datetime.dates_from_tuples([(2020, 2, 25), (2020, 3, 2)])\ndates.period_length_in_days(month())  # [29, 31]\n\nperiods = tff.datetime.months([1, 2])\ndates.period_length_in_days(periods)  # [29, 61]\n```", "source": "github-repos"}
{"code": "def assert_visible(self, selector, testid=None, **kwargs):\n        \n        self.info_log(\n            \"Assert visible selector(%s) testid(%s)\" % (selector, testid)\n        )\n\n        highlight = kwargs.get(\n            'highlight',\n            BROME_CONFIG['highlight']['highlight_on_assertion_success']\n        )\n        self.debug_log(\"effective highlight: %s\" % highlight)\n\n        wait_until_visible = kwargs.get(\n            'wait_until_visible',\n            BROME_CONFIG['proxy_driver']['wait_until_visible_before_assert_visible']  \n        )\n        self.debug_log(\"effective wait_until_visible: %s\" % wait_until_visible)\n\n        if wait_until_visible:\n            self.wait_until_visible(selector, raise_exception=False)\n\n        element = self.find(\n            selector,\n            raise_exception=False,\n            wait_until_visible=False,\n            wait_until_present=False\n        )\n        if element and element.is_displayed(raise_exception=False):\n            if highlight:\n                element.highlight(\n                    style=BROME_CONFIG['highlight']['style_on_assertion_success']  \n                )\n            if testid is not None:\n                self.create_test_result(testid, True)\n\n            return True\n        else:\n            if testid is not None:\n                self.create_test_result(testid, False)\n\n            return False", "docstring": "Assert that the element is visible in the dom\n\nArgs:\nselector (str): the selector used to find the element\ntestid (str): the test_id or a str\n\nKwargs:\nwait_until_visible (bool)\nhighlight (bool)\n\nReturns:\nbool: True is the assertion succeed; False otherwise.", "source": "juraj-google-style"}
{"code": "def template_file(\n    task: Task,\n    template: str,\n    path: str,\n    jinja_filters: FiltersDict = None,\n    **kwargs: Any\n) -> Result:\n    \n    jinja_filters = jinja_filters or {} or task.nornir.config.jinja2.filters\n    text = jinja_helper.render_from_file(\n        template=template,\n        path=path,\n        host=task.host,\n        jinja_filters=jinja_filters,\n        **kwargs\n    )\n    return Result(host=task.host, result=text)", "docstring": "Renders contants of a file with jinja2. All the host data is available in the template\n\nArguments:\ntemplate: filename\npath: path to dir with templates\njinja_filters: jinja filters to enable. Defaults to nornir.config.jinja2.filters\n**kwargs: additional data to pass to the template\n\nReturns:\nResult object with the following attributes set:\n* result (``string``): rendered string", "source": "juraj-google-style"}
{"code": "def unpack(self, buff, offset=0):\n        \n        begin = offset\n        hexas = []\n        while begin < offset + 8:\n            number = struct.unpack(\"!B\", buff[begin:begin+1])[0]\n            hexas.append(\"%.2x\" % number)\n            begin += 1\n        self._value = ':'.join(hexas)", "docstring": "Unpack a binary message into this object's attributes.\n\nUnpack the binary value *buff* and update this object attributes based\non the results.\n\nArgs:\nbuff (bytes): Binary data package to be unpacked.\noffset (int): Where to begin unpacking.\n\nRaises:\nException: If there is a struct unpacking error.", "source": "juraj-google-style"}
{"code": "def do_check(func, files, status):\n    for file_name in files:\n        with open(file_name, 'r') as f:\n            output = func.parse(f.read(), file_name)\n        if output:\n            status.append('{0}: {1}'.format(file_name, output))\n    return status", "docstring": "Generic do_check helper method\n\nArgs:\nfunc (function): Specific function to call\nfiles (list): list of files to run against\nstatus (list): list of pre-receive check failures to eventually print\nto the user\n\nReturns:\nstatus list of current pre-redeive check failures. Might be an empty\nlist.", "source": "codesearchnet"}
{"code": "def add(self, key, minhash):\n        \n        if len(minhash) < self.k*self.l:\n            raise ValueError(\"The num_perm of MinHash out of range\")\n        if key in self.keys:\n            raise ValueError(\"The given key has already been added\")\n        self.keys[key] = [self._H(minhash.hashvalues[start:end])\n                for start, end in self.hashranges]\n        for H, hashtable in zip(self.keys[key], self.hashtables):\n            hashtable[H].append(key)", "docstring": "Add a unique key, together\nwith a MinHash (or weighted MinHash) of the set referenced by the key.\n\nNote:\nThe key won't be searchbale until the\n:func:`datasketch.MinHashLSHForest.index` method is called.\n\nArgs:\nkey (hashable): The unique identifier of the set.\nminhash (datasketch.MinHash): The MinHash of the set.", "source": "juraj-google-style"}
{"code": "def _rollback(self):\n    if (not self.in_progress):\n        raise ValueError(_CANT_ROLLBACK)\n    try:\n        self._client._firestore_api.rollback(self._client._database_string, self._id, metadata=self._client._rpc_metadata)\n    finally:\n        self._clean_up()", "docstring": "Roll back the transaction.\n\nRaises:\nValueError: If no transaction is in progress.", "source": "codesearchnet"}
{"code": "def _message_received(self, msg):\n    msg = Message.from_node(msg)\n    return self.dispatch(msg)", "docstring": "Callback run when an XMPP Message is reveived.\nThis callback delivers the message to every behaviour\nthat is waiting for it. First, the aioxmpp.Message is\nconverted to spade.message.Message\n\nArgs:\nmsg (aioxmpp.Messagge): the message just received.\n\nReturns:\nlist(asyncio.Future): a list of futures of the append of the message at each matched behaviour.", "source": "codesearchnet"}
{"code": "def remove_attribute(self, attr):\n    update = [fapi._attr_rem(attr)]\n    r = fapi.update_workspace_attributes(self.namespace, self.name, update, self.api_url)\n    self.data['workspace']['attributes'].pop(attr, None)\n    fapi._check_response_code(r, 200)", "docstring": "Remove attribute from a workspace.\n\nArgs:\nattr (str): attribute name", "source": "codesearchnet"}
{"code": "def get_upstream_fork_point(self):\n    possible_relatives = []\n    try:\n        if (not self.repo):\n            return None\n        try:\n            active_branch = self.repo.active_branch\n        except (TypeError, ValueError):\n            logger.debug('git is in a detached head state')\n            return None\n        else:\n            tracking_branch = active_branch.tracking_branch()\n            if tracking_branch:\n                possible_relatives.append(tracking_branch.commit)\n        if (not possible_relatives):\n            for branch in self.repo.branches:\n                tracking_branch = branch.tracking_branch()\n                if (tracking_branch is not None):\n                    possible_relatives.append(tracking_branch.commit)\n        head = self.repo.head\n        most_recent_ancestor = None\n        for possible_relative in possible_relatives:\n            for ancestor in self.repo.merge_base(head, possible_relative):\n                if (most_recent_ancestor is None):\n                    most_recent_ancestor = ancestor\n                elif self.repo.is_ancestor(most_recent_ancestor, ancestor):\n                    most_recent_ancestor = ancestor\n        return most_recent_ancestor\n    except exc.GitCommandError as e:\n        logger.debug('git remote upstream fork point could not be found')\n        logger.debug(e.message)\n        return None", "docstring": "Get the most recent ancestor of HEAD that occurs on an upstream\nbranch.\n\nFirst looks at the current branch's tracking branch, if applicable. If\nthat doesn't work, looks at every other branch to find the most recent\nancestor of HEAD that occurs on a tracking branch.\n\nReturns:\ngit.Commit object or None", "source": "codesearchnet"}
{"code": "def _convert_ddb_list_to_list(conversion_list):\n    ret_list = []\n    for v in conversion_list:\n        for v1 in v:\n            ret_list.append(v[v1])\n    return ret_list", "docstring": "Given a dynamodb list, it will return a python list without the dynamodb\ndatatypes\n\nArgs:\nconversion_list (dict): a dynamodb list which includes the\ndatatypes\n\nReturns:\nlist: Returns a sanitized list without the dynamodb datatypes", "source": "codesearchnet"}
{"code": "def fit(self, X):\n        \n\n        self.constant_value = self._get_constant_value(X)\n\n        if self.constant_value is None:\n            self.model = scipy.stats.gaussian_kde(X)\n\n        else:\n            self._replace_constant_methods()\n\n        self.fitted = True", "docstring": "Fit Kernel density estimation to an list of values.\n\nArgs:\nX: 1-d `np.ndarray` or `pd.Series` or `list` datapoints to be estimated from.\n\nThis function will fit a gaussian_kde model to a list of datapoints\nand store it as a class attribute.", "source": "juraj-google-style"}
{"code": "def ch_start_time(self, *channels: List[Channel]) -> int:\n    intervals = list(itertools.chain(*(self._table[chan] for chan in channels if (chan in self._table))))\n    if intervals:\n        return min((interval.begin for interval in intervals))\n    return 0", "docstring": "Return earliest start time in this collection.\n\nArgs:\n*channels: Channels over which to obtain start_time.", "source": "codesearchnet"}
{"code": "def get_first_content(el_list, alt=None, strip=True):\n    if (not el_list):\n        return alt\n    content = el_list[0].getContent()\n    if strip:\n        content = content.strip()\n    if (not content):\n        return alt\n    return content", "docstring": "Return content of the first element in `el_list` or `alt`. Also return `alt`\nif the content string of first element is blank.\n\nArgs:\nel_list (list): List of HTMLElement objects.\nalt (default None): Value returner when list or content is blank.\nstrip (bool, default True): Call .strip() to content.\n\nReturns:\nstr or alt: String representation of the content of the first element \\\nor `alt` if not found.", "source": "codesearchnet"}
{"code": "def evaluate_stacked_ensemble(path, ensemble_id):\n    \n    with functions.DBContextManager(path) as session:\n        stacked_ensemble = session.query(models.StackedEnsemble).filter_by(\n            id=ensemble_id).first()\n        if not stacked_ensemble:\n            raise exceptions.UserError('Stacked ensemble {} '\n                                       'does not exist'.format(ensemble_id))\n\n        stacked_ensemble.job_id = get_current_job().id\n        stacked_ensemble.job_status = 'started'\n\n        session.add(stacked_ensemble)\n        session.commit()\n\n        try:\n            meta_features_list = []\n            for base_learner in stacked_ensemble.base_learners:\n                mf = np.load(base_learner.meta_features_path(path))\n                if len(mf.shape) == 1:\n                    mf = mf.reshape(-1, 1)\n                meta_features_list.append(mf)\n\n            secondary_features = np.concatenate(meta_features_list, axis=1)\n\n            \n            extraction = session.query(models.Extraction).first()\n            return_splits_iterable = functions.import_object_from_string_code(\n                extraction.meta_feature_generation['source'],\n                'return_splits_iterable'\n            )\n            X, y = extraction.return_train_dataset()\n\n            \n            indices_list = [test_index for train_index, test_index in return_splits_iterable(X, y)]\n            indices = np.concatenate(indices_list)\n            X, y = X[indices], y[indices]\n\n            est = stacked_ensemble.return_secondary_learner()\n\n            return_splits_iterable_stacked_ensemble = functions.import_object_from_string_code(\n                extraction.stacked_ensemble_cv['source'],\n                'return_splits_iterable'\n            )\n            preds = []\n            trues_list = []\n            for train_index, test_index in return_splits_iterable_stacked_ensemble(secondary_features, y):\n                X_train, X_test = secondary_features[train_index], secondary_features[test_index]\n                y_train, y_test = y[train_index], y[test_index]\n                est = est.fit(X_train, y_train)\n                preds.append(\n                    getattr(est, stacked_ensemble.base_learner_origin.\n                            meta_feature_generator)(X_test)\n                )\n                trues_list.append(y_test)\n            preds = np.concatenate(preds, axis=0)\n            y_true = np.concatenate(trues_list)\n\n            for key in stacked_ensemble.base_learner_origin.metric_generators:\n                metric_generator = functions.import_object_from_string_code(\n                    stacked_ensemble.base_learner_origin.metric_generators[key],\n                    'metric_generator'\n                )\n                stacked_ensemble.individual_score[key] = metric_generator(y_true, preds)\n\n            stacked_ensemble.job_status = 'finished'\n            session.add(stacked_ensemble)\n            session.commit()\n\n        except:\n            session.rollback()\n            stacked_ensemble.job_status = 'errored'\n            stacked_ensemble.description['error_type'] = repr(sys.exc_info()[0])\n            stacked_ensemble.description['error_value'] = repr(sys.exc_info()[1])\n            stacked_ensemble.description['error_traceback'] = \\\n                traceback.format_exception(*sys.exc_info())\n            session.add(stacked_ensemble)\n            session.commit()\n            raise", "docstring": "Evaluates the ensemble and updates the database when finished/\n\nArgs:\npath (str): Path to Xcessiv notebook\n\nensemble_id (str): Ensemble ID", "source": "juraj-google-style"}
{"code": "def tokenize(self, text, never_split=None):\n    never_split = self.never_split.union(set(never_split)) if never_split else self.never_split\n    text = self._clean_text(text)\n    if self.tokenize_chinese_chars:\n        text = self._tokenize_chinese_chars(text)\n    orig_tokens = whitespace_tokenize(text)\n    split_tokens = []\n    for token in orig_tokens:\n        if token not in never_split:\n            if self.do_lower_case:\n                token = token.lower()\n                if self.strip_accents is not False:\n                    token = self._run_strip_accents(token)\n            elif self.strip_accents:\n                token = self._run_strip_accents(token)\n        split_tokens.extend(self._run_split_on_punc(token, never_split))\n    output_tokens = whitespace_tokenize(' '.join(split_tokens))\n    return output_tokens", "docstring": "Basic Tokenization of a piece of text. Split on \"white spaces\" only, for sub-word tokenization, see\nWordPieceTokenizer.\n\nArgs:\nnever_split (`List[str]`, *optional*)\nKept for backward compatibility purposes. Now implemented directly at the base class level (see\n[`PreTrainedTokenizer.tokenize`]) List of token not to split.", "source": "github-repos"}
{"code": "def force_list(val=None):\n    if (val is None):\n        return []\n    if isinstance(val, pd.Series):\n        return val.tolist()\n    return (val if isinstance(val, list) else [val])", "docstring": "Force a list representation of an object\n\nArgs:\nval: object to parse into a list\n\nReturns:", "source": "codesearchnet"}
{"code": "def render_html_report(summary, report_template=None, report_dir=None):\n    if (not report_template):\n        report_template = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'templates', 'report_template.html')\n        logger.log_debug('No html report template specified, use default.')\n    else:\n        logger.log_info('render with html report template: {}'.format(report_template))\n    logger.log_info('Start to render Html report ...')\n    report_dir = (report_dir or os.path.join(os.getcwd(), 'reports'))\n    if (not os.path.isdir(report_dir)):\n        os.makedirs(report_dir)\n    start_at_timestamp = int(summary['time']['start_at'])\n    summary['time']['start_datetime'] = datetime.fromtimestamp(start_at_timestamp).strftime('%Y-%m-%d %H:%M:%S')\n    report_path = os.path.join(report_dir, '{}.html'.format(start_at_timestamp))\n    with io.open(report_template, 'r', encoding='utf-8') as fp_r:\n        template_content = fp_r.read()\n        with io.open(report_path, 'w', encoding='utf-8') as fp_w:\n            rendered_content = Template(template_content, extensions=['jinja2.ext.loopcontrols']).render(summary)\n            fp_w.write(rendered_content)\n    logger.log_info('Generated Html report: {}'.format(report_path))\n    return report_path", "docstring": "render html report with specified report name and template\n\nArgs:\nreport_template (str): specify html report template path\nreport_dir (str): specify html report save directory", "source": "codesearchnet"}
{"code": "def _lower_if_str(item):\n    try:\n        string_type = basestring\n    except NameError:\n        string_type = str\n    if isinstance(item, string_type):\n        return item.lower()\n    return item", "docstring": "Try to convert item to lowercase, if it is string.\n\nArgs:\nitem (obj): Str, unicode or any other object.\n\nReturns:\nobj: ``item.lower()`` if `item` is ``str`` or ``unicode``, else just \\\n`item` itself.", "source": "codesearchnet"}
{"code": "def _parse_phone(self, val):\n        \n\n        ret = {\n            'type': None,\n            'value': None\n        }\n\n        try:\n\n            ret['type'] = val[1]['type']\n\n        except (IndexError, KeyError, ValueError, TypeError):\n\n                pass\n\n        ret['value'] = val[3].strip()\n\n        try:\n\n            self.vars['phone'].append(ret)\n\n        except AttributeError:\n\n            self.vars['phone'] = []\n            self.vars['phone'].append(ret)", "docstring": "The function for parsing the vcard phone numbers.\n\nArgs:\nval (:obj:`list`): The value to parse.", "source": "juraj-google-style"}
{"code": "def GetRowCache(self, query):\n    \n    query_hash = hash(query)\n    if query_hash not in self._row_caches:\n      self._row_caches[query_hash] = set()\n    return self._row_caches[query_hash]", "docstring": "Retrieves the row cache for a specific query.\n\nThe row cache is a set that contains hashes of values in a row. The row\ncache is used to find duplicate row when a database and a database with\na WAL file is parsed.\n\nArgs:\nquery (str): query.\n\nReturns:\nset: hashes of the rows that have been parsed.", "source": "juraj-google-style"}
{"code": "def do_usufy(self, query, **kwargs):\n        \n        results = []\n\n        test = self.check_usufy(query, **kwargs)\n\n        if test:\n            r = {\n                \"type\": \"i3visio.profile\",\n                \"value\": self.platformName + \" - \" + query,\n                \"attributes\": []\n            }\n\n            \n            aux = {}\n            aux[\"type\"] = \"i3visio.uri\"\n            aux[\"value\"] = self.createURL(word=query, mode=\"usufy\")\n            aux[\"attributes\"] = []\n            r[\"attributes\"].append(aux)\n            \n            aux = {}\n            aux[\"type\"] = \"i3visio.alias\"\n            aux[\"value\"] = query\n            aux[\"attributes\"] = []\n            r[\"attributes\"].append(aux)\n            \n            aux = {}\n            aux[\"type\"] = \"i3visio.platform\"\n            aux[\"value\"] = self.platformName\n            aux[\"attributes\"] = []\n            r[\"attributes\"].append(aux)\n\n            r[\"attributes\"] += self.process_usufy(test)\n\n            results.append(r)\n        return results", "docstring": "Verifying a usufy query in this platform.\n\nThis might be redefined in any class inheriting from Platform.\n\nArgs:\n-----\nquery: The element to be searched.\n\nReturn:\n-------\nA list of elements to be appended.", "source": "juraj-google-style"}
{"code": "def port_list(br):\n    cmd = 'ovs-vsctl list-ports {0}'.format(br)\n    result = __salt__['cmd.run_all'](cmd)\n    retcode = result['retcode']\n    stdout = result['stdout']\n    return _stdout_list_split(retcode, stdout)", "docstring": "Lists all of the ports within bridge.\n\nArgs:\nbr: A string - bridge name.\n\nReturns:\nList of bridges (or empty list), False on failure.\n\n.. versionadded:: 2016.3.0\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.port_list br0", "source": "codesearchnet"}
{"code": "def _group_similar(items: List[T], comparer: Callable[([T, T], bool)]) -> List[List[T]]:\n    groups = []\n    used = set()\n    for i in range(len(items)):\n        if (i not in used):\n            group = [items[i]]\n            for j in range((i + 1), len(items)):\n                if ((j not in used) and comparer(items[i], items[j])):\n                    used.add(j)\n                    group.append(items[j])\n            groups.append(group)\n    return groups", "docstring": "Combines similar items into groups.\n\nArgs:\nitems: The list of items to group.\ncomparer: Determines if two items are similar.\n\nReturns:\nA list of groups of items.", "source": "codesearchnet"}
{"code": "def plugin_class_validation(self, plugin_class):\n        \n\n        try:\n            getattr(plugin_class, 'dependencies')\n            getattr(plugin_class, 'execute')\n        except AttributeError:\n            return False\n\n        return True", "docstring": "Plugin validation\n\nEvery workbench plugin must have a dependencies list (even if it's empty).\nEvery workbench plugin must have an execute method.\n\nArgs:\nplugin_class: The loaded plugun class.\n\nReturns:\nTrue if dependencies and execute are present, else False.", "source": "juraj-google-style"}
{"code": "def handle_error(err, halt=True):\n    print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, err))\n    if halt:\n        sys.exit(1)", "docstring": "Print errors message and optionally exit.\n\nArgs:\nerr (str): The error message to print.\nhalt (bool, optional): Defaults to True. If True the script will exit.", "source": "codesearchnet"}
{"code": "def normalize(tensor, mean, std, inplace=False):\n    if (not _is_tensor_image(tensor)):\n        raise TypeError('tensor is not a torch image.')\n    if (not inplace):\n        tensor = tensor.clone()\n    mean = torch.as_tensor(mean, dtype=torch.float32, device=tensor.device)\n    std = torch.as_tensor(std, dtype=torch.float32, device=tensor.device)\n    tensor.sub_(mean[(:, None, None)]).div_(std[(:, None, None)])\n    return tensor", "docstring": "Normalize a tensor image with mean and standard deviation.\n\n.. note::\nThis transform acts out of place by default, i.e., it does not mutates the input tensor.\n\nSee :class:`~torchvision.transforms.Normalize` for more details.\n\nArgs:\ntensor (Tensor): Tensor image of size (C, H, W) to be normalized.\nmean (sequence): Sequence of means for each channel.\nstd (sequence): Sequence of standard deviations for each channel.\n\nReturns:\nTensor: Normalized Tensor image.", "source": "codesearchnet"}
{"code": "def _parse_shape(self, space):\n    \n    if isinstance(space, gym.spaces.Discrete):\n      return ()\n    if isinstance(space, gym.spaces.Box):\n      return space.shape\n    raise NotImplementedError()", "docstring": "Get a tensor shape from a OpenAI Gym space.\n\nArgs:\nspace: Gym space.\n\nRaises:\nNotImplementedError: For spaces other than Box and Discrete.\n\nReturns:\nShape tuple.", "source": "juraj-google-style"}
{"code": "def _handle_client_error():\n    try:\n        (yield)\n    except _ClientError as exception:\n        error = exception.response['Error']\n        if (error['Code'] in _ERROR_CODES):\n            raise _ERROR_CODES[error['Code']](error['Message'])\n        raise", "docstring": "Handle boto exception and convert to class\nIO exceptions\n\nRaises:\nOSError subclasses: IO error.", "source": "codesearchnet"}
{"code": "def play(env, transpose=True, fps=30, nop_=0):\n    \n    \n    assert isinstance(env.observation_space, gym.spaces.box.Box)\n    \n    obs_s = env.observation_space\n    is_bw = len(obs_s.shape) == 2\n    is_rgb = len(obs_s.shape) == 3 and obs_s.shape[2] in [1, 3]\n    assert is_bw or is_rgb\n    \n    if hasattr(env, 'get_keys_to_action'):\n        keys_to_action = env.get_keys_to_action()\n    \n    elif hasattr(env.unwrapped, 'get_keys_to_action'):\n        keys_to_action = env.unwrapped.get_keys_to_action()\n    else:\n        raise ValueError('env has no get_keys_to_action method')\n    relevant_keys = set(sum(map(list, keys_to_action.keys()), []))\n    \n    video_size = env.observation_space.shape[0], env.observation_space.shape[1]\n    if transpose:\n        video_size = tuple(reversed(video_size))\n    \n    pressed_keys = []\n    running = True\n    env_done = True\n    \n    flags = pygame.RESIZABLE | pygame.HWSURFACE | pygame.DOUBLEBUF\n    screen = pygame.display.set_mode(video_size, flags)\n    pygame.event.set_blocked(pygame.MOUSEMOTION)\n    \n    if env.spec is not None:\n        pygame.display.set_caption(env.spec.id)\n    \n    else:\n        pygame.display.set_caption('nes-py')\n    \n    clock = pygame.time.Clock()\n    \n    while running:\n        \n        if env_done:\n            env_done = False\n            obs = env.reset()\n        \n        else:\n            \n            action = keys_to_action.get(tuple(sorted(pressed_keys)), nop_)\n            obs, rew, env_done, info = env.step(action)\n        \n        if obs is not None:\n            \n            if len(obs.shape) == 2:\n                \n                obs = obs[:, :, None]\n            \n            if obs.shape[2] == 1:\n                \n                obs = obs.repeat(3, axis=2)\n            \n            display_arr(screen, obs, video_size, transpose)\n\n        \n        for event in pygame.event.get():\n            \n            if event.type == pygame.KEYDOWN:\n                \n                if event.key in relevant_keys:\n                    \n                    pressed_keys.append(event.key)\n                \n                elif event.key == 27:\n                    running = False\n                \n                elif event.key == ord('e'):\n                    env.unwrapped._backup()\n                elif event.key == ord('r'):\n                    env.unwrapped._restore()\n            \n            elif event.type == pygame.KEYUP:\n                \n                if event.key in relevant_keys:\n                    \n                    pressed_keys.remove(event.key)\n            \n            elif event.type == pygame.QUIT:\n                running = False\n\n        \n        pygame.display.flip()\n        \n        clock.tick(fps)\n    \n    pygame.quit()", "docstring": "Play the game using the keyboard as a human.\n\nArgs:\nenv (gym.Env): the environment to use for playing\ntranspose (bool): whether to transpose frame before viewing them\nfps (int): number of steps of the environment to execute every second\nnop_ (any): the object to use as a null op action for the environment\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def coordinate_filter(self, query, mongo_query):\n    LOG.debug('Adding genomic coordinates to the query')\n    chromosome = query['chrom']\n    mongo_query['chromosome'] = chromosome\n    if (query.get('start') and query.get('end')):\n        mongo_query['position'] = {'$lte': int(query['end'])}\n        mongo_query['end'] = {'$gte': int(query['start'])}\n    return mongo_query", "docstring": "Adds genomic coordinated-related filters to the query object\n\nArgs:\nquery(dict): a dictionary of query filters specified by the users\nmongo_query(dict): the query that is going to be submitted to the database\n\nReturns:\nmongo_query(dict): returned object contains coordinate filters", "source": "codesearchnet"}
{"code": "def enable_plugin(self, name, timeout=0):\n    url = self._url('/plugins/{0}/enable', name)\n    params = {'timeout': timeout}\n    res = self._post(url, params=params)\n    self._raise_for_status(res)\n    return True", "docstring": "Enable an installed plugin.\n\nArgs:\nname (string): The name of the plugin. The ``:latest`` tag is\noptional, and is the default if omitted.\ntimeout (int): Operation timeout (in seconds). Default: 0\n\nReturns:\n``True`` if successful", "source": "codesearchnet"}
{"code": "def PopAttributeContainer(self):\n    try:\n        serialized_data = self._list.pop(0)\n        self.data_size -= len(serialized_data)\n        return serialized_data\n    except IndexError:\n        return None", "docstring": "Pops a serialized attribute container from the list.\n\nReturns:\nbytes: serialized attribute container data.", "source": "codesearchnet"}
{"code": "def files_from_list(*paths):\n    ret = []\n    for path in paths:\n        if isfile(path):\n            ret.append(abspath(path))\n        elif isdir(path):\n            ret += [f for f in ls(path, abspaths=True, recursive=True) if isfile(f)]\n        else:\n            raise File404(path)\n    return ret", "docstring": "Return a list of all file paths from a list of files or directories.\n\nFor each path in the input: if it is a file, return it; if it is a\ndirectory, return a list of files in the directory.\n\nArguments:\npaths (list of str): List of file and directory paths.\n\nReturns:\nlist of str: Absolute file paths.\n\nRaises:\nFile404: If any of the paths do not exist.", "source": "codesearchnet"}
{"code": "def inflate_plugin(self, identifier, definition=None, cls=None):\n    cls = self.get_plugin(identifier, cls)\n    return cls(**(definition or {}))", "docstring": "Inflate a plugin thanks to it's identifier, definition and class.\n\nArgs:\nidentifier (str): the plugin identifier.\ndefinition (dict): the kwargs to instantiate the plugin with.\ncls (str): \"provider\", \"checker\", or None.\n\nReturns:\nProvider/Checker: instance of plugin.", "source": "codesearchnet"}
{"code": "def _CreateStopsFolder(self, schedule, doc):\n    if (not schedule.GetStopList()):\n        return None\n    stop_folder = self._CreateFolder(doc, 'Stops')\n    stop_folder_selection = self._StopFolderSelectionMethod(stop_folder)\n    stop_style_selection = self._StopStyleSelectionMethod(doc)\n    stops = list(schedule.GetStopList())\n    stops.sort(key=(lambda x: x.stop_name))\n    for stop in stops:\n        (folder, pathway_folder) = stop_folder_selection(stop)\n        (style_id, pathway_style_id) = stop_style_selection(stop)\n        self._CreateStopPlacemark(folder, stop, style_id)\n        if (self.show_stop_hierarchy and (stop.location_type != transitfeed.Stop.LOCATION_TYPE_STATION) and stop.parent_station and (stop.parent_station in schedule.stops)):\n            placemark = self._CreatePlacemark(pathway_folder, stop.stop_name, pathway_style_id)\n            parent_station = schedule.stops[stop.parent_station]\n            coordinates = [(stop.stop_lon, stop.stop_lat), (parent_station.stop_lon, parent_station.stop_lat)]\n            self._CreateLineString(placemark, coordinates)\n    return stop_folder", "docstring": "Create a KML Folder containing placemarks for each stop in the schedule.\n\nIf there are no stops in the schedule then no folder is created.\n\nArgs:\nschedule: The transitfeed.Schedule instance.\ndoc: The KML Document ElementTree.Element instance.\n\nReturns:\nThe Folder ElementTree.Element instance or None if there are no stops.", "source": "codesearchnet"}
{"code": "def get_by_contract(self, contract_hash):\n        \n        hash = contract_hash\n        if isinstance(contract_hash, str) and len(contract_hash) == 40:\n            hash = UInt160.ParseString(contract_hash)\n\n        if not isinstance(hash, UInt160):\n            raise Exception(\"Incorrect address format\")\n\n        contractlist_snapshot = self.db.prefixed_db(NotificationPrefix.PREFIX_CONTRACT).snapshot()\n        results = []\n\n        for val in contractlist_snapshot.iterator(prefix=bytes(hash.Data), include_key=False):\n            if len(val) > 4:\n                try:\n                    event = SmartContractEvent.FromByteArray(val)\n                    results.append(event)\n                except Exception as e:\n                    logger.error(\"could not parse event: %s %s\" % (e, val))\n        return results", "docstring": "Look up a set of notifications by the contract they are associated with\nArgs:\ncontract_hash (UInt160 or str): hash of contract for notifications to be retreived\n\nReturns:\nlist: a list of notifications", "source": "juraj-google-style"}
{"code": "def VerifyStructure(self, parser_mediator, line):\n    self._last_month = 0\n    self._year_use = parser_mediator.GetEstimatedYear()\n    key = 'header'\n    try:\n        structure = self._MAC_WIFI_HEADER.parseString(line)\n    except pyparsing.ParseException:\n        structure = None\n    if (not structure):\n        key = 'turned_over_header'\n        try:\n            structure = self._MAC_WIFI_TURNED_OVER_HEADER.parseString(line)\n        except pyparsing.ParseException:\n            structure = None\n    if (not structure):\n        logger.debug('Not a Mac Wifi log file')\n        return False\n    time_elements_tuple = self._GetTimeElementsTuple(key, structure)\n    try:\n        dfdatetime_time_elements.TimeElementsInMilliseconds(time_elements_tuple=time_elements_tuple)\n    except ValueError:\n        logger.debug('Not a Mac Wifi log file, invalid date and time: {0!s}'.format(structure.date_time))\n        return False\n    self._last_month = time_elements_tuple[1]\n    return True", "docstring": "Verify that this file is a Mac Wifi log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nline (str): line from a text file.\n\nReturns:\nbool: True if the line is in the expected format, False if not.", "source": "codesearchnet"}
{"code": "def non_trainable_weights(self):\n    if self.trainable:\n        children_weights = self._gather_children_attribute('non_trainable_variables')\n        non_trainable_weights = self._non_trainable_weights + children_weights\n    else:\n        children_weights = self._gather_children_attribute('variables')\n        non_trainable_weights = self._trainable_weights + self._non_trainable_weights + children_weights\n    return self._dedup_weights(non_trainable_weights)", "docstring": "List of all non-trainable weights tracked by this layer.\n\nNon-trainable weights are *not* updated during training. They are expected\nto be updated manually in `call()`.\n\nReturns:\nA list of non-trainable variables.", "source": "github-repos"}
{"code": "def defer(target, args=None, kwargs=None, callback=None):\n    \n\n    obj = _defer(target, args, kwargs, callback)\n    obj.finished.connect(lambda: _defer_cleanup(obj))\n    obj.start()\n    _defer_threads.append(obj)\n    return obj", "docstring": "Perform operation in thread with callback\n\nInstances are cached until finished, at which point\nthey are garbage collected. If we didn't do this,\nPython would step in and garbage collect the thread\nbefore having had time to finish, resulting in an\nexception.\n\nArguments:\ntarget (callable): Method or function to call\ncallback (callable, optional): Method or function to call\nonce `target` has finished.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def parse(self, values):\n    type_map = {}\n    for (name, t) in self._hparam_types.items():\n        (param_type, _) = t\n        type_map[name] = param_type\n    values_map = parse_values(values, type_map)\n    return self.override_from_dict(values_map)", "docstring": "Override existing hyperparameter values, parsing new values from a string.\n\nSee parse_values for more detail on the allowed format for values.\n\nArgs:\nvalues: String.  Comma separated list of `name=value` pairs where 'value'\nmust follow the syntax described above.\n\nReturns:\nThe `HParams` instance.\n\nRaises:\nValueError: If `values` cannot be parsed or a hyperparameter in `values`\ndoesn't exist.", "source": "codesearchnet"}
{"code": "def add_member_to_list(self, username, listname, member_type=\"USER\"):\n        \n        return self.client.service.addMemberToList(\n            listname, username, member_type, self.proxy_id\n        )", "docstring": "Add a member to an existing list.\n\nArgs:\nusername (str): The username of the user to add\nlistname (str): The name of the list to add the user to\nmember_type (str): Normally, this should be \"USER\".\nIf you are adding a list as a member of another list,\nset this to \"LIST\", instead.", "source": "juraj-google-style"}
{"code": "def get_enabled_features(self, user_id, attributes=None):\n    enabled_features = []\n    if (not self.is_valid):\n        self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_enabled_features'))\n        return enabled_features\n    if (not isinstance(user_id, string_types)):\n        self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))\n        return enabled_features\n    if (not self._validate_user_inputs(attributes)):\n        return enabled_features\n    for feature in self.config.feature_key_map.values():\n        if self.is_feature_enabled(feature.key, user_id, attributes):\n            enabled_features.append(feature.key)\n    return enabled_features", "docstring": "Returns the list of features that are enabled for the user.\n\nArgs:\nuser_id: ID for user.\nattributes: Dict representing user attributes.\n\nReturns:\nA list of the keys of the features that are enabled for the user.", "source": "codesearchnet"}
{"code": "def Parse(self, conditions, host_data):\n    \n    processed = []\n    probes = self.triggers.Calls(conditions)\n    for p in probes:\n      \n      \n      \n      artifact_data = host_data.get(p.artifact)\n      if not p.result_context:\n        rdf_data = artifact_data[\"PARSER\"]\n      else:\n        rdf_data = artifact_data.get(str(p.result_context))\n      try:\n        result = p.Parse(rdf_data)\n      except ProcessingError as e:\n        raise ProcessingError(\"Bad artifact %s: %s\" % (p.artifact, e))\n      if result:\n        processed.append(result)\n    \n    return self.matcher.Detect(probes, processed)", "docstring": "Runs probes that evaluate whether collected data has an issue.\n\nArgs:\nconditions: The trigger conditions.\nhost_data: A map of artifacts and rdf data.\n\nReturns:\nAnomalies if an issue exists.", "source": "juraj-google-style"}
{"code": "def fetch(self, pageNum, itemsPerPage):\n        \n        return self.get_all_alerts(self.status, pageNum, itemsPerPage)", "docstring": "Intermediate fetching\n\nArgs:\npageNum (int): Page number\nitemsPerPage (int): Number of Users per Page\n\nReturns:\ndict: Response payload", "source": "juraj-google-style"}
{"code": "def parse_changes(json):\n    \n    changes = []\n    dates = len(json)\n    for date in range(1, dates): \n        last_close = json[date - 1]['close']\n        now_close = json[date]['close']\n        changes.append(now_close - last_close)\n    logger.debug('Market Changes (from JSON):\\n{0}'.format(changes))\n    return changes", "docstring": "Gets price changes from JSON\n\nArgs:\njson: JSON data as a list of dict dates, where the keys are\nthe raw market statistics.\n\nReturns:\nList of floats of price changes between entries in JSON.", "source": "juraj-google-style"}
{"code": "def reset(self, name):\n    message_type = type(self)\n    try:\n        field = message_type.field_by_name(name)\n    except KeyError:\n        if (name not in message_type.__by_name):\n            raise AttributeError(('Message %s has no field %s' % (message_type.__name__, name)))\n    if field.repeated:\n        self.__tags[field.number] = FieldList(field, [])\n    else:\n        self.__tags.pop(field.number, None)", "docstring": "Reset assigned value for field.\n\nResetting a field will return it to its default value or None.\n\nArgs:\nname: Name of field to reset.", "source": "codesearchnet"}
{"code": "def get_entity_group_version(key):\n  \n\n  eg = EntityGroup.key_for_entity_group(key).get()\n  if eg:\n    return eg.version\n  else:\n    return None", "docstring": "Return the version of the entity group containing key.\n\nArgs:\nkey: a key for an entity group whose __entity_group__ key you want.\n\nReturns:\nThe version of the entity group containing key. This version is\nguaranteed to increase on every change to the entity group. The version\nmay increase even in the absence of user-visible changes to the entity\ngroup. May return None if the entity group was never written to.\n\nOn non-HR datatores, this function returns None.", "source": "juraj-google-style"}
{"code": "def lf_polarities(L):\n    polarities = [sorted(list(set(L[(:, i)].data))) for i in range(L.shape[1])]\n    return [(p[0] if (len(p) == 1) else p) for p in polarities]", "docstring": "Return the polarities of each LF based on evidence in a label matrix.\n\nArgs:\nL: an n x m scipy.sparse matrix where L_{i,j} is the label given by the\njth LF to the ith candidate", "source": "codesearchnet"}
{"code": "def assert_raises(ex_type, func, *args, **kwargs):\n    try:\n        func(*args, **kwargs)\n    except Exception as ex:\n        assert isinstance(ex, ex_type), ('Raised %r but type should have been %r' % (ex, ex_type))\n        return True\n    else:\n        raise AssertionError('No error was raised')", "docstring": "r\"\"\"\nChecks that a function raises an error when given specific arguments.\n\nArgs:\nex_type (Exception): exception type\nfunc (callable): live python function\n\nCommandLine:\npython -m utool.util_assert assert_raises --show\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_assert import *  # NOQA\n>>> import utool as ut\n>>> ex_type = AssertionError\n>>> func = len\n>>> # Check that this raises an error when something else does not\n>>> assert_raises(ex_type, assert_raises, ex_type, func, [])\n>>> # Check this does not raise an error when something else does\n>>> assert_raises(ValueError, [].index, 0)", "source": "codesearchnet"}
{"code": "def _expand_to_beam_size(tensor, beam_size):\n    tensor = tf.expand_dims(tensor, axis=1)\n    tile_dims = ([1] * tensor.shape.ndims)\n    tile_dims[1] = beam_size\n    return tf.tile(tensor, tile_dims)", "docstring": "Tiles a given tensor by beam_size.\n\nArgs:\ntensor: tensor to tile [batch_size, ...]\nbeam_size: How much to tile the tensor by.\n\nReturns:\nTiled tensor [batch_size, beam_size, ...]", "source": "codesearchnet"}
{"code": "def create_audit_student_enrollment(self, course_id):\n        \n        audit_enrollment = {\n            \"mode\": \"audit\",\n            \"course_details\": {\"course_id\": course_id}\n        }\n        \n        resp = self.requester.post(\n            urljoin(self.base_url, self.enrollment_url),\n            json=audit_enrollment\n        )\n        resp.raise_for_status()\n        return Enrollment(resp.json())", "docstring": "Creates an audit enrollment for the user in a given course\n\nArgs:\ncourse_id (str): an edX course id\n\nReturns:\nEnrollment: object representing the student enrollment in the provided course", "source": "juraj-google-style"}
{"code": "def add(self, *l):\n        \n        for a in flatten(l):\n            self._add([self.Inner(a)], self.l)", "docstring": "add inner to outer\n\nArgs:\n*l: element that is passed into Inner init", "source": "juraj-google-style"}
{"code": "def eval_math_expression(expression: str) -> Optional[Union[float, int]]:\n    try:\n        return eval_node(ast.parse(expression, mode='eval').body)\n    except TypeError:\n        return", "docstring": "Evaluate (safely) a mathematial expression and returns its value.\n\nArgs:\nexpression (`str`): The expression to evaluate.\n\nReturns:\n`Optional[Union[float, int]]`: Returns `None` if the evaluation fails in any way and the value computed\notherwise.\n\nExample:\n\n```py\n>>> eval_expr('2^6')\n4\n>>> eval_expr('2**6')\n64\n>>> eval_expr('1 + 2*3**(4^5) / (6 + -7)')\n-5.0\n```", "source": "github-repos"}
{"code": "def fts_match_any(self, fts, inv):\n        \n        return any([self.fts_match(fts, s) for s in inv])", "docstring": "Return `True` if any segment in `inv` matches the features in `fts`\n\nArgs:\nfts (list): a collection of (value, feature) tuples\ninv (list): a collection of IPA segments represented as Unicode\nstrings\n\nReturns:\nbool: `True` if any segment in `inv` matches the features in `fts`", "source": "juraj-google-style"}
{"code": "def main():\n    logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s')\n    try:\n        cli()\n        return 0\n    except LocationsError as error:\n        print(error)\n        return 2\n    except RuntimeError as error:\n        print(error)\n        return 255\n    except OSError as error:\n        return error.errno", "docstring": "Main script handler.\n\nReturns:\nint: 0 for success, >1 error code", "source": "codesearchnet"}
{"code": "def map_placement_transcode_configs(self, placement_feed, transcode_configs_feed, pricing_schedule_feed):\n    for placement in placement_feed:\n        placement['pricing_schedule'] = []\n        for pricing_schedule in pricing_schedule_feed:\n            if placement.get(FieldMap.PLACEMENT_ID, '') == pricing_schedule.get(FieldMap.PLACEMENT_ID, None):\n                placement['pricing_schedule'].append(pricing_schedule)\n        transcode_id = placement.get(FieldMap.TRANSCODE_ID, '')\n        placement['transcode_config'] = []\n        if transcode_id:\n            for transcode_config in transcode_configs_feed:\n                if transcode_id == transcode_config.get(FieldMap.TRANSCODE_ID, None):\n                    placement['transcode_config'].append(transcode_config)", "docstring": "Maps sub feeds with the parent feed based on placement id.\n\nArgs:\nplacement_feed: Bulkdozer feed representing the placements configurations.\ntrascode_configs_feed: Bulkdozer feed representing the transcode configs.\npricing_schedule_feed: Bulkdozer feed representing the pricing schedules.", "source": "github-repos"}
{"code": "def CreateCampaign(client, merchant_id, budget_id):\n  \n  campaign_service = client.GetService('CampaignService', 'v201809')\n\n  campaign = {\n      'name': 'Shopping campaign \n      \n      \n      'advertisingChannelType': 'DISPLAY',\n      'status': 'PAUSED',\n      'budget': {\n          'budgetId': budget_id\n      },\n      \n      \n      \n      'biddingStrategyConfiguration': {\n          'biddingStrategyType': 'MANUAL_CPC'\n      },\n      'settings': [{\n          'xsi_type': 'ShoppingSetting',\n          \n          \n          'campaignPriority': 0,\n          'merchantId': merchant_id,\n          \n          \n          \n          \n          'salesCountry': 'ZZ',\n          \n          \n          'enableLocal': True,\n      }]\n  }\n\n  operations = [{\n      'operator': 'ADD',\n      'operand': campaign\n  }]\n\n  return campaign_service.mutate(operations)['value'][0]", "docstring": "Creates a new Display Network campaign.\n\nArgs:\nclient: an AdWordsClient instance.\nmerchant_id: a int merchant center ID.\nbudget_id: a int budget ID.\n\nReturns:\nThe campaign that was successfully created.", "source": "juraj-google-style"}
{"code": "def version(self):\n    if (self._server_version is None):\n        try:\n            data = self.http_get('/version')\n            self._server_version = data['version']\n            self._server_revision = data['revision']\n        except Exception:\n            self._server_version = self._server_revision = 'unknown'\n    return (self._server_version, self._server_revision)", "docstring": "Returns the version and revision of the gitlab server.\n\nNote that self.version and self.revision will be set on the gitlab\nobject.\n\nReturns:\ntuple (str, str): The server version and server revision.\n('unknown', 'unknwown') if the server doesn't\nperform as expected.", "source": "codesearchnet"}
{"code": "def deploy_raiden_contracts(\n            self,\n            max_num_of_token_networks: Optional[int],\n    ) -> DeployedContracts:\n        \n\n        deployed_contracts: DeployedContracts = {\n            'contracts_version': self.contract_version_string(),\n            'chain_id': int(self.web3.version.network),\n            'contracts': {},\n        }\n\n        self._deploy_and_remember(CONTRACT_ENDPOINT_REGISTRY, [], deployed_contracts)\n        secret_registry = self._deploy_and_remember(\n            contract_name=CONTRACT_SECRET_REGISTRY,\n            arguments=[],\n            deployed_contracts=deployed_contracts,\n        )\n        token_network_registry_args = [\n            secret_registry.address,\n            deployed_contracts['chain_id'],\n            DEPLOY_SETTLE_TIMEOUT_MIN,\n            DEPLOY_SETTLE_TIMEOUT_MAX,\n        ]\n        if max_num_of_token_networks:\n            token_network_registry_args.append(max_num_of_token_networks)\n        self._deploy_and_remember(\n            contract_name=CONTRACT_TOKEN_NETWORK_REGISTRY,\n            arguments=token_network_registry_args,\n            deployed_contracts=deployed_contracts,\n        )\n\n        return deployed_contracts", "docstring": "Deploy all required raiden contracts and return a dict of contract_name:address\n\nArgs:\nmax_num_of_token_networks (Optional[int]): The max number of tokens that can be\nregistered to the TokenNetworkRegistry. If None, the argument is omitted from\nthe call to the constructor of TokenNetworkRegistry.", "source": "juraj-google-style"}
{"code": "def write(self, dataset):\n    if not isinstance(dataset, data_types.DatasetV2):\n        raise TypeError(f'Invalid `dataset.` Expected a `tf.data.Dataset` object but got {type(dataset)}.')\n    if not dataset_ops.get_structure(dataset).is_compatible_with(tensor_spec.TensorSpec([], dtypes.string)):\n        raise TypeError(f'Invalid `dataset`. Expected a`dataset` that produces scalar `tf.string` elements, but got a dataset which produces elements with shapes {dataset_ops.get_legacy_output_shapes(dataset)} and types {dataset_ops.get_legacy_output_types(dataset)}.')\n    dataset = dataset._apply_debug_options()\n    return gen_experimental_dataset_ops.dataset_to_tf_record(dataset._variant_tensor, self._filename, self._compression_type)", "docstring": "Writes a dataset to a TFRecord file.\n\nAn operation that writes the content of the specified dataset to the file\nspecified in the constructor.\n\nIf the file exists, it will be overwritten.\n\nArgs:\ndataset: a `tf.data.Dataset` whose elements are to be written to a file\n\nReturns:\nIn graph mode, this returns an operation which when executed performs the\nwrite. In eager mode, the write is performed by the method itself and\nthere is no return value.\n\nRaises\nTypeError: if `dataset` is not a `tf.data.Dataset`.\nTypeError: if the elements produced by the dataset are not scalar strings.", "source": "github-repos"}
{"code": "def parse_mmtf_header(infile):\n    \n    infodict = {}\n\n    mmtf_decoder = mmtf.parse(infile)\n    infodict['date'] = mmtf_decoder.deposition_date\n    infodict['release_date'] = mmtf_decoder.release_date\n    try:\n        infodict['experimental_method'] = [x.decode() for x in mmtf_decoder.experimental_methods]\n    except AttributeError:\n        infodict['experimental_method'] = [x for x in mmtf_decoder.experimental_methods]\n    infodict['resolution'] = mmtf_decoder.resolution\n    infodict['description'] = mmtf_decoder.title\n\n    group_name_exclude = ['HOH']\n    chem_comp_type_exclude = ['l-peptide linking', 'peptide linking']\n    chemicals = list(set([mmtf_decoder.group_list[idx]['groupName'] for idx in mmtf_decoder.group_type_list if mmtf_decoder.group_list[idx]['chemCompType'].lower() not in chem_comp_type_exclude and mmtf_decoder.group_list[idx]['groupName'] not in group_name_exclude]))\n    infodict['chemicals'] = chemicals\n    return infodict", "docstring": "Parse an MMTF file and return basic header-like information.\n\nArgs:\ninfile (str): Path to MMTF file\n\nReturns:\ndict: Dictionary of parsed header\n\nTodo:\n- Can this be sped up by not parsing the 3D coordinate info somehow?\n- OR just store the sequences when this happens since it is already being parsed.", "source": "juraj-google-style"}
{"code": "def add_path_argument(cls, group, argname, dest=None, help_=None):\n    prefixed = ('%s-%s' % (cls.argument_prefix, argname))\n    if (dest is None):\n        dest = prefixed.replace('-', '_')\n        final_dest = dest[(len(cls.argument_prefix) + 1):]\n    else:\n        final_dest = dest\n        dest = ('%s_%s' % (cls.argument_prefix, dest))\n    group.add_argument(('--%s' % prefixed), action='store', dest=dest, help=help_)\n    cls.path_arguments[dest] = final_dest", "docstring": "Subclasses may call this to expose a path argument.\n\nArgs:\ngroup: arparse.ArgumentGroup, the extension argument group\nargname: str, the name of the argument, will be namespaced.\ndest: str, similar to the `dest` argument of\n`argparse.ArgumentParser.add_argument`, will be namespaced.\nhelp_: str, similar to the `help` argument of\n`argparse.ArgumentParser.add_argument`.", "source": "codesearchnet"}
{"code": "def _serialize_container_factory(suffix, container_map):\n    \n    def serialize(ion_event):\n        if not ion_event.ion_type.is_container:\n            raise TypeError('Expected container type')\n        return container_map[ion_event.ion_type]\n    serialize.__name__ = '_serialize_container_' + suffix\n    return serialize", "docstring": "Returns a function that serializes container start/end.\n\nArgs:\nsuffix (str): The suffix to name the function with.\ncontainer_map (Dictionary[core.IonType, bytes]): The\n\nReturns:\nfunction: The closure for serialization.", "source": "juraj-google-style"}
{"code": "def get(self, po):\n        \n        name = po.name\n        typ = po.typ\n        default = po.default\n\n        handler = getattr(self, '_get_{}'.format(typ), None)\n        if handler is None:\n            raise ValueError(typ)\n        self.seen.add(name)\n\n        \n        if not self.parser.has_option(self.section, name):\n            if default is REQUIRED:\n                raise NameError(self.section, name)\n            if isinstance(default, INHERIT_GLOBAL):\n                return handler('global', name, default.default)\n            \n            \n\n        return handler(self.section, name, default)", "docstring": "Lookup value for a PluginOption instance\n\nArgs:\npo: PluginOption\n\nReturns: converted value", "source": "juraj-google-style"}
{"code": "def transform(self, sents):\n\n    def convert(tokens):\n        return torch.tensor([self.vocab.stoi[t] for t in tokens], dtype=torch.long)\n    if (self.vocab is None):\n        raise Exception('Must run .fit() for .fit_transform() before calling .transform().')\n    seqs = sorted([convert(s) for s in sents], key=(lambda x: (- len(x))))\n    X = torch.LongTensor(pad_sequence(seqs, batch_first=True))\n    return X", "docstring": "Converts lists of tokens into a Tensor of embedding indices.\n\nArgs:\nsents: A list of lists of tokens (representing sentences)\nNOTE: These sentences should already be marked using the\nmark_entities() helper.\nReturns:\nX: A Tensor of shape (num_items, max_seq_len)", "source": "codesearchnet"}
{"code": "def compute(self, x, yerr):\n        \n        \n        K = self.kernel.get_value(x)\n        K[np.diag_indices_from(K)] += yerr ** 2\n\n        \n        self._factor = (cholesky(K, overwrite_a=True, lower=False), False)\n        self.log_determinant = 2 * np.sum(np.log(np.diag(self._factor[0])))\n        self.computed = True", "docstring": "Compute and factorize the covariance matrix.\n\nArgs:\nx (ndarray[nsamples, ndim]): The independent coordinates of the\ndata points.\nyerr (ndarray[nsamples] or float): The Gaussian uncertainties on\nthe data points at coordinates ``x``. These values will be\nadded in quadrature to the diagonal of the covariance matrix.", "source": "juraj-google-style"}
{"code": "def _act(self, utterance: str) -> list:\n    if self.stateful:\n        utterance = [[utterance], [self.key]]\n    else:\n        utterance = [[utterance]]\n    agent_response: list = self.agent(*utterance)\n    return agent_response", "docstring": "Infers DeepPavlov agent with raw user input extracted from Alexa request.\n\nArgs:\nutterance: Raw user input extracted from Alexa request.\nReturns:\nresponse: DeepPavlov agent response.", "source": "codesearchnet"}
{"code": "def _run_function_for_calibration_graph_mode(sess: session.Session, signature_def: meta_graph_pb2.SignatureDef, representative_dataset: rd.RepresentativeDataset) -> None:\n    output_tensor_names = [output_tensor_info.name for output_tensor_info in signature_def.outputs.values()]\n    sample_validator = _create_sample_validator(expected_input_keys=signature_def.inputs.keys())\n    for sample in map(sample_validator, _log_sample_num_for_calibration(representative_dataset)):\n        feed_dict = rd.create_feed_dict_from_input_data(sample, signature_def)\n        sess.run(output_tensor_names, feed_dict=feed_dict)", "docstring": "Runs the representative dataset through a function for calibration.\n\nNOTE: This is intended to be run in graph mode (TF1).\n\nThe function is identified by the SignatureDef.\n\nArgs:\nsess: The Session object to run the function in.\nsignature_def: A SignatureDef that identifies a function by specifying the\ninputs and outputs.\nrepresentative_dataset: The representative dataset to run through the\nfunction.", "source": "github-repos"}
{"code": "def log_deprecated(name='', text='', eos=''):\n    assert (name or text)\n    if eos:\n        eos = ('after ' + datetime(*map(int, eos.split('-'))).strftime('%d %b'))\n    if name:\n        if eos:\n            warn_msg = ('%s will be deprecated %s. %s' % (name, eos, text))\n        else:\n            warn_msg = ('%s was deprecated. %s' % (name, text))\n    else:\n        warn_msg = text\n        if eos:\n            warn_msg += (' Legacy period ends %s' % eos)\n    logger.warn(('[Deprecated] ' + warn_msg))", "docstring": "Log deprecation warning.\n\nArgs:\nname (str): name of the deprecated item.\ntext (str, optional): information about the deprecation.\neos (str, optional): end of service date such as \"YYYY-MM-DD\".", "source": "codesearchnet"}
{"code": "def clear(self, name=None):\n    if name is None:\n        name = '%s_clear' % self._name\n    return self._clear_fn(shared_name=self._name, name=name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit)", "docstring": "Clears the staging area.\n\nArgs:\nname: A name for the operation (optional)\n\nReturns:\nThe created op", "source": "github-repos"}
{"code": "def dimension_values(self, dimension, expanded=True, flat=True):\n        \n        index = self.get_dimension_index(dimension)\n        if index in [0, 1]:\n            return np.array([point[index] for point in self.data[0]])\n        else:\n            return super(Spline, self).dimension_values(dimension)", "docstring": "Return the values along the requested dimension.\n\nArgs:\ndimension: The dimension to return values for\nexpanded (bool, optional): Whether to expand values\nflat (bool, optional): Whether to flatten array\n\nReturns:\nNumPy array of values along the requested dimension", "source": "juraj-google-style"}
{"code": "def CheckOperatorSpacing(filename, clean_lines, linenum, error):\n    line = clean_lines.elided[linenum]\n    while True:\n        match = Match('^(.*\\\\boperator\\\\b)(\\\\S+)(\\\\s*\\\\(.*)$', line)\n        if match:\n            line = ((match.group(1) + ('_' * len(match.group(2)))) + match.group(3))\n        else:\n            break\n    if ((Search('[\\\\w.]=', line) or Search('=[\\\\w.]', line)) and (not Search('\\\\b(if|while|for) ', line)) and (not Search('(>=|<=|==|!=|&=|\\\\^=|\\\\|=|\\\\+=|\\\\*=|\\\\/=|\\\\%=)', line)) and (not Search('operator=', line))):\n        error(filename, linenum, 'whitespace/operators', 4, 'Missing spaces around =')\n    match = Search('[^<>=!\\\\s](==|!=|<=|>=|\\\\|\\\\|)[^<>=!\\\\s,;\\\\)]', line)\n    if match:\n        error(filename, linenum, 'whitespace/operators', 3, ('Missing spaces around %s' % match.group(1)))\n    elif (not Match('\n        match = Match('^(.*[^\\\\s<])<[^\\\\s=<,]', line)\n        if match:\n            (_, _, end_pos) = CloseExpression(clean_lines, linenum, len(match.group(1)))\n            if (end_pos <= (- 1)):\n                error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <')\n        match = Match('^(.*[^-\\\\s>])>[^\\\\s=>,]', line)\n        if match:\n            (_, _, start_pos) = ReverseCloseExpression(clean_lines, linenum, len(match.group(1)))\n            if (start_pos <= (- 1)):\n                error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >')\n    match = Search('(operator|[^\\\\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\\\\s,=<])', line)\n    if (match and (not (match.group(1).isdigit() and match.group(2).isdigit())) and (not ((match.group(1) == 'operator') and (match.group(2) == ';')))):\n        error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <<')\n    match = Search('>>[a-zA-Z_]', line)\n    if match:\n        error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >>')\n    match = Search('(!\\\\s|~\\\\s|[\\\\s]--[\\\\s;]|[\\\\s]\\\\+\\\\+[\\\\s;])', line)\n    if match:\n        error(filename, linenum, 'whitespace/operators', 4, ('Extra space for operator %s' % match.group(1)))", "docstring": "Checks for horizontal spacing around operators.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def resume_training(self, train_data, model_path, valid_data=None):\n    restore_state = self.checkpointer.restore(model_path)\n    loss_fn = self._get_loss_fn()\n    self.train()\n    self._train_model(train_data=train_data, loss_fn=loss_fn, valid_data=valid_data, restore_state=restore_state)", "docstring": "This model resume training of a classifier by reloading the appropriate state_dicts for each model\n\nArgs:\ntrain_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of\nX (data) and Y (labels) for the train split\nmodel_path: the path to the saved checpoint for resuming training\nvalid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of\nX (data) and Y (labels) for the dev split", "source": "codesearchnet"}
{"code": "def transform(self, transform, desc=None):\n    if (desc is None):\n        desc = u'transform({})'.format(getattr(transform, '__name__', ''))\n    return self.replace(transforms=(self.transforms + [transform]), desc_stack=(self.desc_stack + [desc]))", "docstring": "Create a copy of this query, transformed by `transform`.\n\nArgs:\ntransform (callable): Callable that takes an iterable of values and\nreturns an iterable of transformed values.\n\nKeyword Args:\ndesc (str): A description of the transform, to use in log messages.\nDefaults to the name of the `transform` function.\n\nReturns:\nQuery", "source": "codesearchnet"}
{"code": "def resize(self, image: 'torch.Tensor', size: SizeDict, interpolation: 'F.InterpolationMode'=None, antialias: bool=True, **kwargs) -> 'torch.Tensor':\n    interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR\n    if size.shortest_edge and size.longest_edge:\n        new_size = get_size_with_aspect_ratio(image.size()[-2:], size.shortest_edge, size.longest_edge)\n    elif size.shortest_edge:\n        new_size = get_resize_output_image_size(image, size=size.shortest_edge, default_to_square=False, input_data_format=ChannelDimension.FIRST)\n    elif size.max_height and size.max_width:\n        new_size = get_image_size_for_max_height_width(image.size()[-2:], size.max_height, size.max_width)\n    elif size.height and size.width:\n        new_size = (size.height, size.width)\n    else:\n        raise ValueError(f\"Size must contain 'height' and 'width' keys, or 'max_height' and 'max_width', or 'shortest_edge' key. Got {size}.\")\n    return F.resize(image, new_size, interpolation=interpolation, antialias=antialias)", "docstring": "Resize an image to `(size[\"height\"], size[\"width\"])`.\n\nArgs:\nimage (`torch.Tensor`):\nImage to resize.\nsize (`SizeDict`):\nDictionary in the format `{\"height\": int, \"width\": int}` specifying the size of the output image.\ninterpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):\n`InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`.\n\nReturns:\n`torch.Tensor`: The resized image.", "source": "github-repos"}
{"code": "def _maybe_add_default_serving_output(export_outputs):\n    if len(export_outputs) == 1:\n        (key, value), = export_outputs.items()\n        if key != signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n            export_outputs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = value\n    if len(export_outputs) > 1:\n        if signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY not in export_outputs:\n            raise ValueError('Multiple `export_outputs` were provided, but none of them are specified as the default. Use`tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY` to specify a default.')\n    return export_outputs", "docstring": "Add a default serving output to the export_outputs if not present.\n\nArgs:\nexport_outputs: Describes the output signatures to be exported to\n`SavedModel` and used during serving. Should be a dict.\n\nReturns:\nexport_outputs dict with default serving signature added if necessary\n\nRaises:\nValueError: if multiple export_outputs were provided without a default\nserving key.", "source": "github-repos"}
{"code": "def register_hook(self, hook, priority='NORMAL'):\n    assert isinstance(hook, Hook)\n    if hasattr(hook, 'priority'):\n        raise ValueError('\"priority\" is a reserved attribute for hooks')\n    priority = get_priority(priority)\n    hook.priority = priority\n    inserted = False\n    for i in range((len(self._hooks) - 1), (- 1), (- 1)):\n        if (priority >= self._hooks[i].priority):\n            self._hooks.insert((i + 1), hook)\n            inserted = True\n            break\n    if (not inserted):\n        self._hooks.insert(0, hook)", "docstring": "Register a hook into the hook list.\n\nArgs:\nhook (:obj:`Hook`): The hook to be registered.\npriority (int or str or :obj:`Priority`): Hook priority.\nLower value means higher priority.", "source": "codesearchnet"}
{"code": "def EnterClassType(self, node):\n    nodes = [node]\n    seen = set()\n    while nodes:\n        cur_node = nodes.pop(0)\n        if cur_node in seen:\n            continue\n        seen.add(cur_node)\n        for prefix, cls in self._Lookup(cur_node):\n            if isinstance(cls, pytd.Alias) and isinstance(cls.type, pytd.NothingType):\n                continue\n            if isinstance(cls, pytd.Alias) and isinstance(cls.type, pytd.ClassType):\n                if cls.type.cls:\n                    cls = cls.type.cls\n                else:\n                    nodes.append(cls.type)\n            if isinstance(cls, pytd.Class):\n                node.cls = cls\n                return\n            else:\n                logging.warning(\"Couldn't resolve %s: Not a class: %s\", prefix + node.name, type(cls))", "docstring": "Fills in a class type.\n\nArgs:\nnode: A ClassType. This node will have a name, which we use for lookup.\n\nReturns:\nThe same ClassType. We will have done our best to fill in its \"cls\"\nattribute. Call VerifyLookup() on your tree if you want to be sure that\nall of the cls pointers have been filled in.", "source": "github-repos"}
{"code": "def enforce_epsilon_and_compute_hash(dataset_batch_dir, adv_dir, output_dir,\n                                     epsilon):\n  \n  dataset_images = [f for f in os.listdir(dataset_batch_dir)\n                    if f.endswith('.png')]\n  image_hashes = {}\n  resize_warning = False\n  for img_name in dataset_images:\n    if not os.path.exists(os.path.join(adv_dir, img_name)):\n      logging.warning('Image %s not found in the output', img_name)\n      continue\n    image = np.array(\n        Image.open(os.path.join(dataset_batch_dir, img_name)).convert('RGB'))\n    image = image.astype('int32')\n    image_max_clip = np.clip(image + epsilon, 0, 255).astype('uint8')\n    image_min_clip = np.clip(image - epsilon, 0, 255).astype('uint8')\n    \n    adv_image = Image.open(os.path.join(adv_dir, img_name)).convert('RGB')\n    \n    if adv_image.size[::-1] != image.shape[:2]:\n      resize_warning = True\n      adv_image = adv_image.resize((image.shape[1], image.shape[0]),\n                                   Image.BICUBIC)\n    adv_image = np.array(adv_image)\n    clipped_adv_image = np.clip(adv_image,\n                                image_min_clip,\n                                image_max_clip)\n    Image.fromarray(clipped_adv_image).save(os.path.join(output_dir, img_name))\n    \n    image_hashes[img_name[:-4]] = hashlib.sha1(\n        clipped_adv_image.view(np.uint8)).hexdigest()\n  if resize_warning:\n    logging.warning('One or more adversarial images had incorrect size')\n  return image_hashes", "docstring": "Enforces size of perturbation on images, and compute hashes for all images.\n\nArgs:\ndataset_batch_dir: directory with the images of specific dataset batch\nadv_dir: directory with generated adversarial images\noutput_dir: directory where to copy result\nepsilon: size of perturbation\n\nReturns:\ndictionary with mapping form image ID to hash.", "source": "juraj-google-style"}
{"code": "def _decorator(func):\n    opname = func.__name__\n    cap_sym_name = sym_name.capitalize()\n    func.__doc__ = '\\n    Assert the condition `x {sym}` holds element-wise.\\n\\n    When running in graph mode, you should add a dependency on this operation\\n    to ensure that it runs. Example of adding a dependency to an operation:\\n\\n    ```python\\n    with tf.control_dependencies([tf.debugging.{opname}(x, y)]):\\n      output = tf.reduce_sum(x)\\n    ```\\n\\n    {sym_name} means, for every element `x[i]` of `x`, we have `x[i] {sym}`.\\n    If `x` is empty this is trivially satisfied.\\n\\n    Args:\\n      x:  Numeric `Tensor`.\\n      data:  The tensors to print out if the condition is False.  Defaults to\\n        error message and first few entries of `x`.\\n      summarize: Print this many entries of each tensor.\\n      message: A string to prefix to the default message.\\n      name: A name for this operation (optional).  Defaults to \"{opname}\".\\n\\n    Returns:\\n      Op that raises `InvalidArgumentError` if `x {sym}` is False.\\n      @compatibility(eager)\\n        returns None\\n      @end_compatibility\\n\\n    Raises:\\n      InvalidArgumentError: if the check can be performed immediately and\\n        `x {sym}` is False. The check can be performed immediately during\\n        eager execution or if `x` is statically known.\\n    '.format(sym=sym, sym_name=cap_sym_name, opname=opname)\n    return func", "docstring": "Generated decorator that adds the appropriate docstring to the function for symbol `sym`.\n\nArgs:\nfunc: Function for a TensorFlow op\n\nReturns:\nVersion of `func` with documentation attached.", "source": "github-repos"}
{"code": "def build_image(image_path, image_name, build_args=None, dockerfile_path=None):\n    \n    cmd = ['docker', 'build', '-t', image_name, image_path]\n    if dockerfile_path:\n        cmd.extend(['-f', dockerfile_path])\n\n    for k, v in (build_args or {}).items():\n        cmd += ['--build-arg', '{}={}'.format(k, v)]\n    check_call(cmd)", "docstring": "Build an image\n\nArgs:\nimage_path (str): the path to the image directory\nimage_name (str): image 'name:tag' to build\nbuild_args (dict, optional): dict of docker build arguments\ndockerfile_path (str, optional):\npath to dockerfile relative to image_path\nif not `image_path/Dockerfile`.", "source": "juraj-google-style"}
{"code": "def brightness(x, severity=1):\n    c = [0.1, 0.2, 0.3, 0.4, 0.5][(severity - 1)]\n    x = (np.array(x) / 255.0)\n    x = tfds.core.lazy_imports.skimage.color.rgb2hsv(x)\n    x[(:, :, 2)] = np.clip((x[(:, :, 2)] + c), 0, 1)\n    x = tfds.core.lazy_imports.skimage.color.hsv2rgb(x)\n    x_clip = (np.clip(x, 0, 1) * 255)\n    return around_and_astype(x_clip)", "docstring": "Change brightness of images.\n\nArgs:\nx: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\nseverity: integer, severity of corruption.\n\nReturns:\nnumpy array, image with uint8 pixels in [0,255]. Changed brightness.", "source": "codesearchnet"}
{"code": "def get_njobs_in_queue(self, username=None):\n        \n        if username is None: username = getpass.getuser()\n        njobs, process = self._get_njobs_in_queue(username=username)\n\n        if process is not None and process.returncode != 0:\n            \n            err_msg = ('Error trying to get the number of jobs in the queue' +\n                       'The error response reads:\\n {}'.format(process.stderr.read()))\n            logger.critical(err_msg)\n\n        if not isinstance(self, ShellAdapter):\n            logger.info('The number of jobs currently in the queue is: {}'.format(njobs))\n\n        return njobs", "docstring": "returns the number of jobs in the queue, probably using subprocess or shutil to\ncall a command like 'qstat'. returns None when the number of jobs cannot be determined.\n\nArgs:\nusername: (str) the username of the jobs to count (default is to autodetect)", "source": "juraj-google-style"}
{"code": "def value_to_message(self, value):\n    if (not isinstance(value, self.type)):\n        raise EncodeError(('Expected type %s, got %s: %r' % (self.type.__name__, type(value).__name__, value)))\n    return value", "docstring": "Convert a value instance to a message.\n\nUsed by serializers to convert Python user types to underlying\nmessages for transmission.\n\nArgs:\nvalue: A value of type self.type.\n\nReturns:\nAn instance of type self.message_type.", "source": "codesearchnet"}
{"code": "def multilayer_fully_connected(images, labels):\n  \n  \n  \n  images = pt.wrap(images)\n  with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):\n    return (images.flatten().fully_connected(100).fully_connected(100)\n            .softmax_classifier(10, labels))", "docstring": "Creates a multi layer network of fully_connected layers.\n\nEach layer is 100 neurons.  Please change this to experiment with\narchitectures.\n\nArgs:\nimages: The input images.\nlabels: The labels as dense one-hot vectors.\nReturns:\nA softmax result.", "source": "juraj-google-style"}
{"code": "def AddSpecification(self, specification):\n    if (specification.identifier in self._format_specifications):\n        raise KeyError('Format specification {0:s} is already defined in store.'.format(specification.identifier))\n    self._format_specifications[specification.identifier] = specification\n    for signature in specification.signatures:\n        signature_index = len(self._signature_map)\n        signature_identifier = '{0:s}:{1:d}'.format(specification.identifier, signature_index)\n        if (signature_identifier in self._signature_map):\n            raise KeyError('Signature {0:s} is already defined in map.'.format(signature_identifier))\n        signature.SetIdentifier(signature_identifier)\n        self._signature_map[signature_identifier] = specification", "docstring": "Adds a format specification.\n\nArgs:\nspecification (FormatSpecification): format specification.\n\nRaises:\nKeyError: if the store already contains a specification with\nthe same identifier.", "source": "codesearchnet"}
{"code": "def delete_attachment(cls, session, attachment):\n    return super(Conversations, cls).delete(session, attachment, endpoint_override=('/attachments/%s.json' % attachment.id), out_type=Attachment)", "docstring": "Delete an attachment.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nattachment (helpscout.models.Attachment): The attachment to\nbe deleted.\n\nReturns:\nNoneType: Nothing.", "source": "codesearchnet"}
{"code": "def frame(self, frame):\n    try:\n        zframe = str(int(frame)).zfill(self._zfill)\n    except ValueError:\n        zframe = frame\n    if (self._zfill == 0):\n        zframe = ''\n    return ''.join((self._dir, self._base, zframe, self._ext))", "docstring": "Return a path go the given frame in the sequence.  Integer or string\ndigits are treated as a frame number and padding is applied, all other\nvalues are passed though.\n\nExamples:\n>>> seq.frame(1)\n/foo/bar.0001.exr\n>>> seq.frame(\"#\")\n/foo/bar.#.exr\n\nArgs:\nframe (int or str): the desired frame number or a char to pass\nthrough (ie. #)\n\nReturns:\nstr:", "source": "codesearchnet"}
{"code": "def row_splits(self):\n    return self._row_partition.row_splits()", "docstring": "The row-split indices for this ragged tensor's `values`.\n\n`rt.row_splits` specifies where the values for each row begin and end in\n`rt.values`.  In particular, the values for row `rt[i]` are stored in\nthe slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.\n\nReturns:\nA 1-D integer `Tensor` with shape `[self.nrows+1]`.\nThe returned tensor is non-empty, and is sorted in ascending order.\n`self.row_splits[0]` is zero, and `self.row_splits[-1]` is equal to\n`self.values.shape[0]`.\n\n#### Example:\n\n>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])\n>>> print(rt.row_splits)  # indices of row splits in rt.values\ntf.Tensor([0 4 4 7 8 8], shape=(6,), dtype=int64)", "source": "github-repos"}
{"code": "def tokens(self, tokenset='internal'):\n        \n        toks = self.get('tokens', {}).get(tokenset)\n        if toks is not None:\n            if isinstance(toks, stringtypes):\n                toks = YyTokenLattice.from_string(toks)\n            elif isinstance(toks, Sequence):\n                toks = YyTokenLattice.from_list(toks)\n        return toks", "docstring": "Deserialize and return a YyTokenLattice object for the\ninitial or internal token set, if provided, from the YY\nformat or the JSON-formatted data; otherwise return the\noriginal string.\n\nArgs:\ntokenset (str): return `'initial'` or `'internal'` tokens\n(default: `'internal'`)\nReturns:\n:class:`YyTokenLattice`", "source": "juraj-google-style"}
{"code": "def _process_update(self, item, feed_item):\n    campaign = self._campaign_dao.get(feed_item, required=True)\n    item['active'] = feed_item.get(FieldMap.AD_ACTIVE, True)\n    if item['active']:\n        self._wait_all_creative_activation(feed_item)\n    self._setup_rotation_strategy(item['creativeRotation'], feed_item)\n    if feed_item['creative_assignment']:\n        item['creativeRotation']['creativeAssignments'] = []\n    item['placementAssignments'] = []\n    item['eventTagOverrides'] = []\n    self._process_assignments(feed_item, item['creativeRotation'].get('creativeAssignments', []), item['placementAssignments'], item['eventTagOverrides'], campaign)\n    if 'deliverySchedule' in item:\n        item['deliverySchedule']['priority'] = feed_item.get(FieldMap.AD_PRIORITY, None)\n    if feed_item.get(FieldMap.AD_HARDCUTOFF, '') != '':\n        if not 'deliverySchedule' in item:\n            item['deliverySchedule'] = {}\n        item['deliverySchedule']['hardCutoff'] = feed_item.get(FieldMap.AD_HARDCUTOFF)\n    item['archived'] = feed_item.get(FieldMap.AD_ARCHIVED, False)\n    if 'T' in feed_item.get(FieldMap.AD_END_DATE, None):\n        item['endTime'] = feed_item.get(FieldMap.AD_END_DATE, None)\n    else:\n        item['endTime'] = StringExtensions.convertDateStrToDateTimeStr(feed_item.get(FieldMap.AD_END_DATE, None), '23:59:59')\n    if 'T' in feed_item.get(FieldMap.AD_START_DATE, None):\n        item['startTime'] = feed_item.get(FieldMap.AD_START_DATE, None)\n    else:\n        item['startTime'] = StringExtensions.convertDateStrToDateTimeStr(feed_item.get(FieldMap.AD_START_DATE, None))\n    item['name'] = feed_item.get(FieldMap.AD_NAME, None)\n    self._process_landing_page(item, feed_item)", "docstring": "Updates an ad based on the values from the feed.\n\nArgs:\nitem: Object representing the ad to be updated, this object is updated\ndirectly.\nfeed_item: Feed item representing ad values from the Bulkdozer feed.", "source": "github-repos"}
{"code": "def open(self, path, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO):\n    return self._path_open(path, 'rb', mime_type, compression_type)", "docstring": "Returns a read channel for the given file path.\n\nArgs:\npath: string path of the file object to be read\nmime_type: MIME type to specify the type of content in the file object\ncompression_type: Type of compression to be used for this object\n\nReturns: file handle with a close function for the user to use", "source": "github-repos"}
{"code": "def find(self, title):\n        \n        files = backend.iterfiles(self._drive, name=title)\n        try:\n            return next(self[id] for id, _ in files)\n        except StopIteration:\n            raise KeyError(title)", "docstring": "Fetch and return the first spreadsheet with the given title.\n\nArgs:\ntitle(str): title/name of the spreadsheet to return\nReturns:\nSpreadSheet: new SpreadSheet instance\nRaises:\nKeyError: if no spreadsheet with the given ``title`` is found", "source": "juraj-google-style"}
{"code": "def _make_patterns(patterns):\n    \n    field_registry = display_fields.FieldRegistry()\n\n    pattern_list = display_pattern.ScreenPatternList(\n        field_registry=field_registry,\n    )\n    for pattern in patterns:\n        pattern_list.add(pattern.split('\\n'))\n    return pattern_list", "docstring": "Create a ScreenPatternList from a given pattern text.\n\nArgs:\npattern_txt (str list): the patterns\n\nReturns:\nmpdlcd.display_pattern.ScreenPatternList: a list of patterns from the\ngiven entries.", "source": "juraj-google-style"}
{"code": "def launch_batch_workflow(self, batch_workflow):\n    url = ('%(base_url)s/batch_workflows' % {'base_url': self.base_url})\n    try:\n        r = self.gbdx_connection.post(url, json=batch_workflow)\n        batch_workflow_id = r.json()['batch_workflow_id']\n        return batch_workflow_id\n    except TypeError as e:\n        self.logger.debug('Batch Workflow not launched, reason: {0}'.format(e))", "docstring": "Launches GBDX batch workflow.\n\nArgs:\nbatch_workflow (dict): Dictionary specifying batch workflow tasks.\n\nReturns:\nBatch Workflow id (str).", "source": "codesearchnet"}
{"code": "def replace(table, columns, values):\n    rows = len(values)\n    cells = len(columns) * len(values)\n    return _Mutator(mutation=Mutation(replace=batch._make_write_pb(table, columns, values)), operation=WriteMutation._OPERATION_REPLACE, rows=rows, cells=cells, kwargs={'table': table, 'columns': columns, 'values': values})", "docstring": "Replace one or more table rows.\n\nArgs:\ntable: Name of the table to be modified.\ncolumns: Name of the table columns to be modified.\nvalues: Values to be modified.", "source": "github-repos"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    cls = [self.cls_token_id]\n    sep = [self.sep_token_id]\n    return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A SqueezeBERT sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def __init__(self, channel):\n    \n    self.Classify = channel.unary_unary(\n        '/tensorflow.serving.PredictionService/Classify',\n        request_serializer=tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationRequest.SerializeToString,\n        response_deserializer=tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationResponse.FromString,\n        )\n    self.Regress = channel.unary_unary(\n        '/tensorflow.serving.PredictionService/Regress',\n        request_serializer=tensorflow__serving_dot_apis_dot_regression__pb2.RegressionRequest.SerializeToString,\n        response_deserializer=tensorflow__serving_dot_apis_dot_regression__pb2.RegressionResponse.FromString,\n        )\n    self.Predict = channel.unary_unary(\n        '/tensorflow.serving.PredictionService/Predict',\n        request_serializer=tensorflow__serving_dot_apis_dot_predict__pb2.PredictRequest.SerializeToString,\n        response_deserializer=tensorflow__serving_dot_apis_dot_predict__pb2.PredictResponse.FromString,\n        )\n    self.GetModelMetadata = channel.unary_unary(\n        '/tensorflow.serving.PredictionService/GetModelMetadata',\n        request_serializer=tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataRequest.SerializeToString,\n        response_deserializer=tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    value = registry_key.GetValueByName('AppCompatCache')\n    if not value:\n      return\n\n    value_data = value.data\n    value_data_size = len(value.data)\n\n    format_type = self._CheckSignature(value_data)\n    if not format_type:\n      parser_mediator.ProduceExtractionWarning(\n          'Unsupported signature in AppCompatCache key: {0:s}'.format(\n              registry_key.path))\n      return\n\n    header_object = self._ParseHeader(format_type, value_data)\n\n    \n    \n    if value_data_size <= header_object.header_size:\n      return\n\n    cached_entry_offset = header_object.header_size\n\n    self._cached_entry_data_type_map = self._GetCachedEntryDataTypeMap(\n        format_type, value_data, cached_entry_offset)\n    if not self._cached_entry_data_type_map:\n      raise errors.ParseError('Unable to determine cached entry data type.')\n\n    parse_cached_entry_function = None\n    if format_type == self._FORMAT_TYPE_XP:\n      parse_cached_entry_function = self._ParseCachedEntryXP\n    elif format_type == self._FORMAT_TYPE_2003:\n      parse_cached_entry_function = self._ParseCachedEntry2003\n    elif format_type == self._FORMAT_TYPE_VISTA:\n      parse_cached_entry_function = self._ParseCachedEntryVista\n    elif format_type == self._FORMAT_TYPE_7:\n      parse_cached_entry_function = self._ParseCachedEntry7\n    elif format_type == self._FORMAT_TYPE_8:\n      parse_cached_entry_function = self._ParseCachedEntry8\n    elif format_type == self._FORMAT_TYPE_10:\n      parse_cached_entry_function = self._ParseCachedEntry10\n\n    cached_entry_index = 0\n    while cached_entry_offset < value_data_size:\n      cached_entry_object = parse_cached_entry_function(\n          value_data, cached_entry_offset)\n\n      event_data = AppCompatCacheEventData()\n      event_data.entry_index = cached_entry_index + 1\n      event_data.key_path = registry_key.path\n      event_data.offset = cached_entry_offset\n      event_data.path = cached_entry_object.path\n\n      if cached_entry_object.last_modification_time is not None:\n        if not cached_entry_object.last_modification_time:\n          date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n        else:\n          date_time = dfdatetime_filetime.Filetime(\n              timestamp=cached_entry_object.last_modification_time)\n\n        \n        event = time_events.DateTimeValuesEvent(\n            date_time, 'File Last Modification Time')\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      if cached_entry_object.last_update_time is not None:\n        if not cached_entry_object.last_update_time:\n          date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n        else:\n          date_time = dfdatetime_filetime.Filetime(\n              timestamp=cached_entry_object.last_update_time)\n\n        \n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_LAST_RUN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      cached_entry_offset += cached_entry_object.cached_entry_size\n      cached_entry_index += 1\n\n      if (header_object.number_of_cached_entries != 0 and\n          cached_entry_index >= header_object.number_of_cached_entries):\n        break", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\n\nRaises:\nParseError: if the value data could not be parsed.", "source": "juraj-google-style"}
{"code": "def unban_user(self, room_id, user_id):\n        \n        body = {\n            \"user_id\": user_id\n        }\n        return self._send(\"POST\", \"/rooms/\" + room_id + \"/unban\", body)", "docstring": "Perform POST /rooms/$room_id/unban\n\nArgs:\nroom_id (str): The room ID\nuser_id (str): The user ID of the banee(sic)", "source": "juraj-google-style"}
{"code": "def Read(self, path, length=None, offset=0, fh=None):\n    \n    del fh\n\n    if self._IsDir(path):\n      raise fuse.FuseOSError(errno.EISDIR)\n\n    fd = aff4.FACTORY.Open(self.root.Add(path), token=self.token)\n\n    \n    if all((hasattr(fd, \"Read\"), hasattr(fd, \"Seek\"), callable(fd.Read),\n            callable(fd.Seek))):\n      \n      if length is None:\n        length = fd.Get(fd.Schema.SIZE)\n\n      fd.Seek(offset)\n      return fd.Read(length)\n    else:\n      \n      raise fuse.FuseOSError(errno.EIO)", "docstring": "Reads data from a file.\n\nArgs:\npath: The path to the file to read.\nlength: How many bytes to read.\noffset: Offset in bytes from which reading should start.\nfh: A file handler. Not used.\n\nReturns:\nA string containing the file contents requested.\n\nRaises:\nFuseOSError: If we try and read a directory or if we try and read an\nobject that doesn't support reading.", "source": "juraj-google-style"}
{"code": "def remove_by_threshold(self, threshold=5):\n        \n        keys = [x for x in self._dictionary.keys()]\n        for key in keys:\n            if self._dictionary[key] <= threshold:\n                self._dictionary.pop(key)\n        self._update_dictionary()", "docstring": "Remove all words at, or below, the provided threshold\n\nArgs:\nthreshold (int): The threshold at which a word is to be \\\nremoved", "source": "juraj-google-style"}
{"code": "def _bind_topics(self, topics):\n        \n\n        \n        \n\n        self.client.subscribe(topics.status, self._on_status_message)\n        self.client.subscribe(topics.tracing, self._on_trace)\n        self.client.subscribe(topics.streaming, self._on_report)\n        self.client.subscribe(topics.response, self._on_response_message)", "docstring": "Subscribe to all the topics we need to communication with this device\n\nArgs:\ntopics (MQTTTopicValidator): The topic validator for this device that\nwe are connecting to.", "source": "juraj-google-style"}
{"code": "def add_edge(self, source, target):\n    \n    edge = Edge(len(self.edges))\n    self.edges.append(edge)\n    source.out_edges.append(edge.idx)\n    target.in_edges.append(edge.idx)\n    edge.source = source.idx\n    edge.target = target.idx\n    return edge", "docstring": "Returns a new edge connecting source and target vertices.\n\nArgs:\nsource: The source Vertex.\ntarget: The target Vertex.\n\nReturns:\nA new Edge linking source to target.", "source": "juraj-google-style"}
{"code": "def create_grid_samples(order, dim=1):\n    x_data = (numpy.arange(1, (order + 1)) / (order + 1.0))\n    x_data = chaospy.quad.combine(([x_data] * dim))\n    return x_data.T", "docstring": "Create samples from a regular grid.\n\nArgs:\norder (int):\nThe order of the grid. Defines the number of samples.\ndim (int):\nThe number of dimensions in the grid\n\nReturns (numpy.ndarray):\nRegular grid with ``shape == (dim, order)``.", "source": "codesearchnet"}
{"code": "def _InsertEvent(self, event, force_flush=False):\n    \n    if event:\n      event_document = {'index': {\n          '_index': self._index_name, '_type': self._document_type}}\n      event_values = self._GetSanitizedEventValues(event)\n\n      self._event_documents.append(event_document)\n      self._event_documents.append(event_values)\n      self._number_of_buffered_events += 1\n\n    if force_flush or self._number_of_buffered_events > self._flush_interval:\n      self._FlushEvents()", "docstring": "Inserts an event.\n\nEvents are buffered in the form of documents and inserted to Elasticsearch\nwhen either forced to flush or when the flush interval (threshold) has been\nreached.\n\nArgs:\nevent (EventObject): event.\nforce_flush (bool): True if buffered event documents should be inserted\ninto Elasticsearch.", "source": "juraj-google-style"}
{"code": "def ingress(self, envelope, http_headers, operation):\n    if self._logger.isEnabledFor(logging.DEBUG):\n        self._logger.debug(_RESPONSE_XML_LOG_LINE, etree.tostring(envelope, pretty_print=True))\n    if self._logger.isEnabledFor(logging.WARN):\n        warn_data = {}\n        header = envelope.find(_HEADER_XPATH)\n        fault = envelope.find(_FAULT_XPATH)\n        if (fault is not None):\n            warn_data['faultMessage'] = fault.find('faultstring').text\n            if (header is not None):\n                header_data = {re.sub(_REMOVE_NS_REGEXP, '', child.tag): child.text for child in header[0]}\n                warn_data.update(header_data)\n            if ('serviceName' not in warn_data):\n                warn_data['serviceName'] = operation.binding.wsdl.services.keys()[0]\n            if ('methodName' not in warn_data):\n                warn_data['methodName'] = operation.name\n            self._logger.warn('Error summary: %s', warn_data)\n    return (envelope, http_headers)", "docstring": "Overrides the ingress function for response logging.\n\nArgs:\nenvelope: An Element with the SOAP request data.\nhttp_headers: A dict of the current http headers.\noperation: The SoapOperation instance.\n\nReturns:\nA tuple of the envelope and headers.", "source": "codesearchnet"}
{"code": "def _scale_tensor(tensor, range_min, range_max, scale_min, scale_max):\n    if (range_min == range_max):\n        return tensor\n    float_tensor = tf.to_float(tensor)\n    scaled_tensor = tf.divide((tf.subtract(float_tensor, range_min) * tf.constant(float((scale_max - scale_min)))), tf.constant(float((range_max - range_min))))\n    shifted_tensor = (scaled_tensor + tf.constant(float(scale_min)))\n    return shifted_tensor", "docstring": "Scale a tensor to scale_min to scale_max.\n\nArgs:\ntensor: input tensor. Should be a numerical tensor.\nrange_min: min expected value for this feature/tensor.\nrange_max: max expected Value.\nscale_min: new expected min value.\nscale_max: new expected max value.\n\nReturns:\nscaled tensor.", "source": "codesearchnet"}
{"code": "def _build_instruction_ds(instructions):\n    tensor_inputs = {k: (np.array(vals, dtype=np.int64) if (k == 'mask_offset') else list(vals)) for (k, vals) in utils.zip_dict(*instructions)}\n    return tf.data.Dataset.from_tensor_slices(tensor_inputs)", "docstring": "Create a dataset containing individual instruction for each shard.\n\nEach instruction is a dict:\n```\n{\n\"filepath\": tf.Tensor(shape=(), dtype=tf.string),\n\"mask_offset\": tf.Tensor(shape=(), dtype=tf.int64),\n\"mask\": tf.Tensor(shape=(100,), dtype=tf.bool),\n}\n```\n\nArgs:\ninstructions: `list[dict]`, the list of instruction dict\n\nReturns:\ninstruction_ds: The dataset containing the instruction. The dataset size is\nthe number of shard.", "source": "codesearchnet"}
{"code": "def add_folder(self, path, title, description=None, language=None, thumbnail=None, source_id=None, **node_data):\n        \n        self._parse_path(path)\n        path = path if path.endswith(title) else \"{}/{}\".format(path, title)\n        self._commit(path, title, description=description, language=language, thumbnail=thumbnail, source_id=source_id)", "docstring": "add_folder: Creates folder in csv\nArgs:\npath: (str) where in zip to write folder\ntitle: (str) content's title\nsource_id: (str) content's original id (optional)\ndescription: (str) description of content (optional)\nlanguage (str): language of content (optional)\nthumbnail (str):  path to thumbnail in zip (optional)\nReturns: None", "source": "juraj-google-style"}
{"code": "def _on_connect(self, sequence, topic, message):\n    try:\n        slug = None\n        parts = topic.split('/')\n        slug = parts[(- 3)]\n        uuid = self._extract_device_uuid(slug)\n    except Exception:\n        self._logger.exception('Error parsing slug from connection request (slug=%s, topic=%s)', slug, topic)\n        return\n    if messages.ConnectCommand.matches(message):\n        key = message['key']\n        client = message['client']\n        self._loop.add_callback(self._connect_to_device, uuid, key, client)\n    else:\n        self._logger.warn('Unknown message received on connect topic=%s, message=%s', topic, message)", "docstring": "Process a request to connect to an IOTile device\n\nA connection message triggers an attempt to connect to a device,\nany error checking is done by the DeviceManager that is actually\nmanaging the devices.\n\nA disconnection message is checked to make sure its key matches\nwhat we except for this device and is either discarded or\nforwarded on to the DeviceManager.\nArgs:\nsequence (int): The sequence number of the packet received\ntopic (string): The topic this message was received on\nmessage_type (string): The type of the packet received\nmessage (dict): The message itself", "source": "codesearchnet"}
{"code": "def parse_range_header(self, header, resource_size):\n        \n        if not header or '=' not in header:\n            return None\n\n        ranges = []\n        units, range_ = header.split('=', 1)\n        units = units.strip().lower()\n\n        if units != 'bytes':\n            return None\n\n        for val in range_.split(','):\n            val = val.strip()\n            if '-' not in val:\n                return None\n\n            if val.startswith('-'):\n                \n                \n                start = resource_size + int(val)\n                if start < 0:\n                    start = 0\n                stop = resource_size\n            else:\n                \n                start, stop = val.split('-', 1)\n                start = int(start)\n                \n                \n                \n                stop = int(stop) + 1 if stop else resource_size\n                if start >= stop:\n                    return None\n\n            ranges.append((start, stop))\n\n        return ranges", "docstring": "Parses a range header into a list of two-tuples (start, stop) where\n`start` is the starting byte of the range (inclusive) and\n`stop` is the ending byte position of the range (exclusive).\n\nArgs:\nheader (str): The HTTP_RANGE request header.\nresource_size (int): The size of the file in bytes.\n\nReturns:\nNone if the value of the header is not syntatically valid.", "source": "juraj-google-style"}
{"code": "def repr(self, changed_widgets=None):\n        \n        if changed_widgets is None:\n            changed_widgets={}\n        return super(Widget, self).repr(changed_widgets)", "docstring": "Represents the widget as HTML format, packs all the attributes, children and so on.\n\nArgs:\nclient (App): Client instance.\nchanged_widgets (dict): A dictionary containing a collection of widgets that have to be updated.\nThe Widget that have to be updated is the key, and the value is its textual repr.", "source": "juraj-google-style"}
{"code": "def get_transaction(self, transaction_id):\n        \n        payload = self._get_data_by_id(\n            transaction_id, 'commit_store_get_transaction')\n\n        txn = Transaction()\n        txn.ParseFromString(payload)\n\n        return txn", "docstring": "Returns a Transaction object from the block store by its id.\n\nParams:\ntransaction_id (str): The header_signature of the desired txn\n\nReturns:\nTransaction: The specified transaction\n\nRaises:\nValueError: The transaction is not in the block store", "source": "juraj-google-style"}
{"code": "def keyword_args_only(func):\n    decorator_utils.validate_callable(func, 'keyword_args_only')\n\n    @functools.wraps(func)\n    def new_func(*args, **kwargs):\n        \n        if args:\n            raise ValueError(f'The function {func.__name__} only accepts keyword arguments. Do not pass positional arguments. Received the following positional arguments: {args}')\n        return func(**kwargs)\n    return new_func", "docstring": "Decorator for marking specific function accepting keyword args only.\n\nThis decorator raises a `ValueError` if the input `func` is called with any\nnon-keyword args. This prevents the caller from providing the arguments in\nwrong order.\n\nArgs:\nfunc: The function or method needed to be decorated.\n\nReturns:\nDecorated function or method.\n\nRaises:\nValueError: If `func` is not callable.", "source": "github-repos"}
{"code": "def get_frequency_shift(self, grid_points, temperatures=np.arange(0, 1001, 10, dtype='double'), epsilons=None, output_filename=None):\n    if (self._interaction is None):\n        self.set_phph_interaction()\n    if (epsilons is None):\n        _epsilons = [0.1]\n    else:\n        _epsilons = epsilons\n    self._grid_points = grid_points\n    get_frequency_shift(self._interaction, self._grid_points, self._band_indices, _epsilons, temperatures, output_filename=output_filename, log_level=self._log_level)", "docstring": "Frequency shift from lowest order diagram is calculated.\n\nArgs:\nepslins(list of float):\nThe value to avoid divergence. When multiple values are given\nfrequency shifts for those values are returned.", "source": "codesearchnet"}
{"code": "def set(self, name, value):\n        \n        if name not in self._options:\n\n            raise AttributeError(\"Option {0} does not exist.\".format(name))\n\n        return self._options[name].__set__(self, value)", "docstring": "Set an option value.\n\nArgs:\nname (str): The name of the option.\nvalue: The value to set the option to.\n\nRaises:\nAttributeError: If the name is not registered.\nTypeError: If the value is not a string or appropriate native type.\nValueError: If the value is a string but cannot be coerced.", "source": "juraj-google-style"}
{"code": "def init_app(self, app):\n        \n        app.config.setdefault('FEDORA_BASE_URL', 'http:\n        if hasattr(app, 'teardown_appcontext'):\n            app.teardown_appcontext(self.teardown)\n        else:\n            app.teardown_request(self.teardown)", "docstring": "Initializes a Flask app object for the extension.\n\nArgs:\napp(Flask): Flask app", "source": "juraj-google-style"}
{"code": "def get_site_spd_dos(self, site):\n    spd_dos = dict()\n    for (orb, pdos) in self.pdos[site].items():\n        orbital_type = _get_orb_type(orb)\n        if (orbital_type in spd_dos):\n            spd_dos[orbital_type] = add_densities(spd_dos[orbital_type], pdos)\n        else:\n            spd_dos[orbital_type] = pdos\n    return {orb: Dos(self.efermi, self.energies, densities) for (orb, densities) in spd_dos.items()}", "docstring": "Get orbital projected Dos of a particular site\n\nArgs:\nsite: Site in Structure associated with CompleteDos.\n\nReturns:\ndict of {orbital: Dos}, e.g. {\"s\": Dos object, ...}", "source": "codesearchnet"}
{"code": "def label_count(self, label_list_ids=None):\n        \n        count = collections.defaultdict(int)\n\n        for label_list in self.label_lists.values():\n            if label_list_ids is None or label_list.idx in label_list_ids:\n                for label_value, label_count in label_list.label_count().items():\n                    count[label_value] += label_count\n\n        return count", "docstring": "Return a dictionary containing the number of times,\nevery label-value in this utterance is occurring.\n\nArgs:\nlabel_list_ids (list): If not None, only labels from label-lists\nwith an id contained in this list\nare considered.\n\nReturns:\ndict: A dictionary containing the number of occurrences\nwith the label-value as key.", "source": "juraj-google-style"}
{"code": "def maybe_scheduled_sampling(self, features, logits, losses):\n    hparams = self.hparams\n    problem_hparams = self._problem_hparams\n    if (hparams.scheduled_sampling_prob == 0.0):\n        return (logits, losses)\n    modality = problem_hparams.modality['targets']\n    if (modality != modalities.ModalityType.SYMBOL):\n        assert (hparams.scheduled_sampling_prob == 0), 'Scheduled sampling only applies to ModalityType.SYMBOL. Set hparams.scheduled_sampling_prob == 0.0.'\n        return (logits, losses)\n    is_training = (hparams.mode == tf.estimator.ModeKeys.TRAIN)\n    if (not is_training):\n        tf.logging.info('Running in %s mode. Not using scheduled sampling.', hparams.mode)\n        return (logits, losses)\n    vocab_size = problem_hparams.vocab_size['targets']\n    assert (vocab_size is not None)\n    assert (hparams.vocab_divisor == 1)\n\n    def sample(x):\n        'Multinomial sampling from a n-dimensional tensor.'\n        samples = tf.multinomial(tf.reshape(x, [(- 1), vocab_size]), 1)\n        reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:(- 1)])\n        return tf.to_int32(reshaped_samples)\n\n    def mix_gold_sampled(gold_targets, sampled_targets, mixin_prob):\n        'Interleave sampled and gold tokens randomly.'\n        return tf.where(tf.less(tf.random_uniform(common_layers.shape_list(sampled_targets)), mixin_prob), sampled_targets, gold_targets)\n\n    def sampled_results(features, logits, mixin_prob):\n        'Generate scheduled sampling results.'\n        sampled_targets = sample(logits)\n        new_targets = mix_gold_sampled(features['targets'], sampled_targets, mixin_prob)\n        new_targets = tf.stop_gradient(new_targets)\n        new_features = copy.copy(features)\n        new_features['targets'] = new_targets\n        with tf.variable_scope(tf.get_variable_scope(), reuse=True):\n            new_transformed_features = self.bottom(new_features)\n            with tf.variable_scope('body'):\n                (new_body_outputs, new_losses) = self._normalize_body_output(self.body(new_transformed_features))\n            assert ('training' not in new_losses)\n            new_logits = self.top(new_body_outputs, new_features)\n            if ((hparams.mode != tf.estimator.ModeKeys.PREDICT) and (hparams.mode != 'attack')):\n                new_losses['training'] = self.loss(new_logits, features)\n            else:\n                new_losses['training'] = 0.0\n        return (new_logits, new_losses)\n    tf.logging.info('Using scheduled sampling.')\n    assert (hparams.scheduled_sampling_prob == 1.0), 'hparams.scheduled_sampling_prob must be 0 or 1.'\n    mixin_prob = (hparams.scheduled_sampling_gold_mixin_prob * common_layers.inverse_exp_decay(hparams.scheduled_sampling_warmup_steps, min_value=0.001))\n    scheduled_sampling_num_passes = getattr(hparams, 'scheduled_sampling_num_passes', 1)\n    assert (scheduled_sampling_num_passes > 0), 'hparams.scheduled_sampling_num_passes must be > 0 if hparams.scheduled_sampling_prob > 0.0'\n    new_logits = logits\n    new_losses = losses\n    for _ in range(scheduled_sampling_num_passes):\n        (new_logits, new_losses) = sampled_results(features, new_logits, mixin_prob)\n    return (new_logits, new_losses)", "docstring": "Scheduled sampling.\n\nPerforms forward inference again with \"targets\" feature replaced with values\nsampled from the model.\n\nThis is the identity unless self.hparams.scheduled_sampling_prob > 0\n(default).\n\n**WARNING**: This is not a faithful implementation of scheduled sampling.\nThis implementation samples tokens for timestep t condtioned on gold tokens\n1...t-1. A proper implementation must condition on a mix of gold and\nsampled tokens. Doing so is not efficient for models such like Transformer.\n\nArgs:\nfeatures: {str: Tensor}. Features sharded along batch dimension.\nlogits: Tensor. Logits for each shard of data.\nlosses: 0-D Tensor or (num: 0-D Tensor, denom: 0-D Tensor). Loss Tensor\n\nReturns:\nnew_logits: Tensor.\nnew_losses: {str: loss} where loss is one of (i) a 0-D Tensor or\n(ii) a (num: 0-D Tensor, denom: 0-D Tensor) pair to be used in a\nweighted average.", "source": "codesearchnet"}
{"code": "def shutdown(self, message=None):\n        \n        for name, server in self.servers.items():\n            server.quit(message)", "docstring": "Disconnect all servers with a message.\n\nArgs:\nmessage (str): Quit message to use on each connection.", "source": "juraj-google-style"}
{"code": "def find_file(search_dir, file_pattern):\n    \n    for root, dirnames, fnames in os.walk(search_dir):\n            for fname in fnames:\n                if fnmatch.fnmatch(fname, file_pattern):\n                    return os.path.join(root, fname)\n    return \"\"", "docstring": "Search for a file in a directory, and return the first match.\nIf the file is not found return an empty string\n\nArgs:\nsearch_dir: The root directory to search in\nfile_pattern: A unix-style wildcard pattern representing\nthe file to find\n\nReturns:\nThe path to the file if it was found, otherwise an empty string", "source": "juraj-google-style"}
{"code": "def path_new_using_map(m: tcod.map.Map, dcost: float=1.41) -> tcod.path.AStar:\n    return tcod.path.AStar(m, dcost)", "docstring": "Return a new AStar using the given Map.\n\nArgs:\nm (Map): A Map instance.\ndcost (float): The path-finding cost of diagonal movement.\nCan be set to 0 to disable diagonal movement.\nReturns:\nAStar: A new AStar instance.", "source": "codesearchnet"}
{"code": "def display_as(self, name_type):\n    if (rname_rfc6680 is None):\n        raise NotImplementedError('Your GSSAPI implementation does not support RFC 6680 (the GSSAPI naming extensions)')\n    return rname_rfc6680.display_name_ext(self, name_type).decode(_utils._get_encoding())", "docstring": "Display this name as the given name type.\n\nThis method attempts to display the current :class:`Name`\nusing the syntax of the given :class:`NameType`, if possible.\n\nWarning:\n\nIn MIT krb5 versions below 1.13.3, this method can segfault if\nthe name was not *originally* created with a `name_type` that was\nnot ``None`` (even in cases when a ``name_type``\nis later \"added\", such as via :meth:`canonicalize`).\n**Do not use this method unless you are sure the above\nconditions can never happen in your code.**\n\nWarning:\n\nIn addition to the above warning, current versions of MIT krb5 do\nnot actually fully implement this method, and it may return\nincorrect results in the case of canonicalized names.\n\n:requires-ext:`rfc6680`\n\nArgs:\nname_type (OID): the :class:`NameType` to use to display the given\nname\n\nReturns:\nstr: the displayed name\n\nRaises:\nOperationUnavailableError", "source": "codesearchnet"}
{"code": "def take_indexed_slices_grad(self, num_required, name=None):\n    return_val = gen_data_flow_ops.sparse_accumulator_take_gradient(self._accumulator_ref, num_required, dtype=self._dtype, name=name)\n    return indexed_slices.IndexedSlices(indices=return_val.indices, values=return_val.values, dense_shape=return_val.shape)", "docstring": "Attempts to extract the average gradient from the accumulator.\n\nThe operation blocks until sufficient number of gradients have been\nsuccessfully applied to the accumulator.\n\nOnce successful, the following actions are also triggered:\n- Counter of accumulated gradients is reset to 0.\n- Aggregated gradient is reset to 0 tensor.\n- Accumulator's internal time step is incremented by 1.\n\nArgs:\nnum_required: Number of gradients that needs to have been aggregated\nname: Optional name for the operation\n\nReturns:\nAn `IndexedSlices` holding the value of the average gradient.\n\nRaises:\nInvalidArgumentError: If `num_required` < 1", "source": "github-repos"}
{"code": "def l1_distance(t1, t2, name=None):\n  \n  with tf.name_scope(name, 'l1_distance', [t1, t2]) as scope:\n    t1 = tf.convert_to_tensor(t1, name='t1')\n    t2 = tf.convert_to_tensor(t2, name='t2')\n    sub = tf.subtract(t1, t2)\n    reduction_dim = _last_index(sub, 1)\n    return tf.reduce_sum(tf.abs(sub), reduction_dim, name=scope)", "docstring": "l1 distance between t1 and t2.\n\nArgs:\nt1: A tensor.\nt2: A tensor that is the same size as t1.\nname: Optional name for this op.\nReturns:\nThe l1 distance between t1 and t2.", "source": "juraj-google-style"}
{"code": "def _append_commands(dct,  \n                     module_name,  \n                     commands  \n                     ):\n    \n    \n    for command in commands:\n        entry_point = '{command}{subcommand} = {module}{callable}'.format(\n            command=command.command,\n            subcommand=(':{}'.format(command.subcommand)\n                        if command.subcommand else ''),\n            module=module_name,\n            callable=(':{}'.format(command.callable)\n                      if command.callable else ''),\n        )\n        dct.setdefault(command.command, set()).add(entry_point)", "docstring": "Append entry point strings representing the given Command objects.\n\nArgs:\ndct: The dictionary to append with entry point strings. Each key will\nbe a primary command with a value containing a list of entry point\nstrings representing a Command.\nmodule_name: The name of the module in which the command object\nresides.\ncommands: A list of Command objects to convert to entry point strings.", "source": "juraj-google-style"}
{"code": "def rename(source_file_names, destination_file_names):\n    if len(source_file_names) == 0:\n        return\n    filesystem = FileSystems.get_filesystem(source_file_names[0])\n    return filesystem.rename(source_file_names, destination_file_names)", "docstring": "Rename the files at the source list to the destination list.\nSource and destination lists should be of the same size.\n\nArgs:\nsource_file_names: List of file paths that need to be moved\ndestination_file_names: List of destination_file_names for the files\n\nRaises:\n``BeamIOError``: if any of the rename operations fail", "source": "github-repos"}
{"code": "def list_inputs(self):\n    doc = []\n    for (inp, typ) in self.input_types.items():\n        if isinstance(typ, six.string_types):\n            typ = \"'{}'\".format(typ)\n        doc.append('{}: {}'.format(inp, typ))\n    return '\\n'.join(doc)", "docstring": "Return a string listing all the Step's input names and their types.\n\nThe types are returned in a copy/pastable format, so if the type is\n`string`, `'string'` (with single quotes) is returned.\n\nReturns:\nstr containing all input names and types.", "source": "codesearchnet"}
{"code": "class GaussianNoise(layers.Layer):\n\n    def __init__(self, stddev, seed=None, **kwargs):\n        super().__init__(**kwargs)\n        if not 0 <= stddev <= 1:\n            raise ValueError(f'Invalid value received for argument `stddev`. Expected a float value between 0 and 1. Received: stddev={stddev}')\n        self.stddev = stddev\n        self.seed = seed\n        if stddev > 0:\n            self.seed_generator = backend.random.SeedGenerator(seed)\n        self.supports_masking = True\n        self._build_at_init()\n\n    def call(self, inputs, training=False):\n        if training and self.stddev > 0:\n            return inputs + backend.random.normal(shape=ops.shape(inputs), mean=0.0, stddev=self.stddev, dtype=self.compute_dtype, seed=self.seed_generator)\n        return inputs\n\n    def compute_output_shape(self, input_shape):\n        return input_shape\n\n    def get_config(self):\n        base_config = super().get_config()\n        config = {'stddev': self.stddev, 'seed': self.seed}\n        return {**base_config, **config}", "docstring": "Apply additive zero-centered Gaussian noise.\n\nThis is useful to mitigate overfitting\n(you could see it as a form of random data augmentation).\nGaussian Noise (GS) is a natural choice as corruption process\nfor real valued inputs.\n\nAs it is a regularization layer, it is only active at training time.\n\nArgs:\nstddev: Float, standard deviation of the noise distribution.\nseed: Integer, optional random seed to enable deterministic behavior.\n\nCall arguments:\ninputs: Input tensor (of any rank).\ntraining: Python boolean indicating whether the layer should behave in\ntraining mode (adding noise) or in inference mode (doing nothing).", "source": "github-repos"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. BlenderbotSmall\ndoes not make use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def copy(src, dst):\n    (src, src_is_storage) = format_and_is_storage(src)\n    (dst, dst_is_storage) = format_and_is_storage(dst)\n    if ((not src_is_storage) and (not dst_is_storage)):\n        return shutil_copy(src, dst)\n    with handle_os_exceptions():\n        if (not hasattr(dst, 'read')):\n            try:\n                if isdir(dst):\n                    dst = join(dst, basename(src))\n                elif (not isdir(dirname(dst))):\n                    raise IOError((\"No such file or directory: '%s'\" % dst))\n            except ObjectPermissionError:\n                pass\n        _copy(src, dst, src_is_storage, dst_is_storage)", "docstring": "Copies a source file to a destination file or directory.\n\nEquivalent to \"shutil.copy\".\n\nSource and destination can also be binary opened file-like objects.\n\nArgs:\nsrc (path-like object or file-like object): Source file.\ndst (path-like object or file-like object):\nDestination file or directory.\n\nRaises:\nIOError: Destination directory not found.", "source": "codesearchnet"}
{"code": "def ng(self, wavelength):\n        \n        return self.n(wavelength) - (wavelength*1.e-9)*self.nDer1(wavelength)", "docstring": "The group index with respect to wavelength.\n\nArgs:\nwavelength (float, list, None): The wavelength(s) the group\nindex will be evaluated at.\n\nReturns:\nfloat, list: The group index at the target wavelength(s).", "source": "juraj-google-style"}
{"code": "def _parse_redistribution(self, config):\n        \n        redistributions = list()\n        regexp = r'redistribute .*'\n        matches = re.findall(regexp, config)\n        for line in matches:\n            ospf_redist = line.split()\n            if len(ospf_redist) == 2:\n                \n                protocol = ospf_redist[1]\n                redistributions.append(dict(protocol=protocol))\n            if len(ospf_redist) == 4:\n                \n                protocol = ospf_redist[1]\n                route_map_name = ospf_redist[3]\n                redistributions.append(dict(protocol=protocol,\n                                       route_map=route_map_name))\n        return dict(redistributions=redistributions)", "docstring": "Parses config file for the OSPF router ID\n\nArgs:\nconfig (str):  Running configuration\nReturns:\nlist: dict:\nkeys: protocol (str)\nroute-map (optional) (str)", "source": "juraj-google-style"}
{"code": "def resolve_one_of(tags, at_least_one):\n    \n    if len(tags) < len(at_least_one):\n        return None\n    for possible_resolution in choose_1_from_each(at_least_one):\n        resolution = {}\n        pr = possible_resolution[:]\n        for entity_type in pr:\n            last_end_index = -1\n            if entity_type in resolution:\n                last_end_index = resolution.get[entity_type][-1].get('end_token')\n            tag, value, c = find_first_tag(tags, entity_type, after_index=last_end_index)\n            if not tag:\n                break\n            else:\n                if entity_type not in resolution:\n                    resolution[entity_type] = []\n                resolution[entity_type].append(tag)\n        if len(resolution) == len(possible_resolution):\n            return resolution\n\n    return None", "docstring": "This searches tags for Entites in at_least_one and returns any match\n\nArgs:\ntags(list): List of tags with Entities to search for Entities\nat_least_one(list): List of Entities to find in tags\n\nReturns:\nobject: returns None if no match is found but returns any match as an object", "source": "juraj-google-style"}
{"code": "def Process(self, parser_mediator, registry_key, **kwargs):\n    if (registry_key is None):\n        raise ValueError('Windows Registry key is not set.')\n    super(WindowsRegistryPlugin, self).Process(parser_mediator, **kwargs)\n    self.ExtractEvents(parser_mediator, registry_key, **kwargs)", "docstring": "Processes a Windows Registry key or value.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\n\nRaises:\nValueError: If the Windows Registry key is not set.", "source": "codesearchnet"}
{"code": "def __init__(self, do_lower_case=False, never_split=None, normalize_text=True, mecab_dic: Optional[str]='unidic_lite', mecab_option: Optional[str]=None):\n    self.do_lower_case = do_lower_case\n    self.never_split = never_split if never_split is not None else []\n    self.normalize_text = normalize_text\n    try:\n        import fugashi\n    except ModuleNotFoundError as error:\n        raise error.__class__('You need to install fugashi to use MecabTokenizer. See https:\n    mecab_option = mecab_option or ''\n    if mecab_dic is not None:\n        if mecab_dic == 'ipadic':\n            try:\n                import ipadic\n            except ModuleNotFoundError as error:\n                raise error.__class__('The ipadic dictionary is not installed. See https:\n            dic_dir = ipadic.DICDIR\n        elif mecab_dic == 'unidic_lite':\n            try:\n                import unidic_lite\n            except ModuleNotFoundError as error:\n                raise error.__class__('The unidic_lite dictionary is not installed. See https:\n            dic_dir = unidic_lite.DICDIR\n        elif mecab_dic == 'unidic':\n            try:\n                import unidic\n            except ModuleNotFoundError as error:\n                raise error.__class__('The unidic dictionary is not installed. See https:\n            dic_dir = unidic.DICDIR\n            if not os.path.isdir(dic_dir):\n                raise RuntimeError('The unidic dictionary itself is not found. See https:\n        else:\n            raise ValueError('Invalid mecab_dic is specified.')\n        mecabrc = os.path.join(dic_dir, 'mecabrc')\n        mecab_option = f'-d \"{dic_dir}\" -r \"{mecabrc}\" ' + mecab_option\n    self.mecab = fugashi.GenericTagger(mecab_option)", "docstring": "Constructs a MecabTokenizer.\n\nArgs:\n**do_lower_case**: (*optional*) boolean (default True)\nWhether to lowercase the input.\n**never_split**: (*optional*) list of str\nKept for backward compatibility purposes. Now implemented directly at the base class level (see\n[`PreTrainedTokenizer.tokenize`]) List of tokens not to split.\n**normalize_text**: (*optional*) boolean (default True)\nWhether to apply unicode normalization to text before tokenization.\n**mecab_dic**: (*optional*) string (default \"ipadic\")\nName of dictionary to be used for MeCab initialization. If you are using a system-installed dictionary,\nset this option to `None` and modify *mecab_option*.\n**mecab_option**: (*optional*) string\nString passed to MeCab constructor.", "source": "github-repos"}
{"code": "async def populate_projects(self, force=False):\n    if (force or (not self.projects)):\n        with tempfile.TemporaryDirectory() as tmpdirname:\n            self.projects = (await load_json_or_yaml_from_url(self, self.config['project_configuration_url'], os.path.join(tmpdirname, 'projects.yml')))", "docstring": "Download the ``projects.yml`` file and populate ``self.projects``.\n\nThis only sets it once, unless ``force`` is set.\n\nArgs:\nforce (bool, optional): Re-run the download, even if ``self.projects``\nis already defined. Defaults to False.", "source": "codesearchnet"}
{"code": "def _get_bond_data(line):\n    orb_labs = ['s', 'p_y', 'p_z', 'p_x', 'd_xy', 'd_yz', 'd_z^2', 'd_xz', 'd_x^2-y^2', 'f_y(3x^2-y^2)', 'f_xyz', 'f_yz^2', 'f_z^3', 'f_xz^2', 'f_z(x^2-y^2)', 'f_x(x^2-3y^2)']\n    line = line.rsplit('(', 1)\n    length = float(line[(- 1)][:(- 1)])\n    sites = line[0].replace('->', ':').split(':')[1:3]\n    site_indices = tuple(((int(re.split('\\\\D+', site)[1]) - 1) for site in sites))\n    if ('[' in sites[0]):\n        orbs = [re.findall('\\\\[(.*)\\\\]', site)[0] for site in sites]\n        orbitals = [tuple((int(orb[0]), Orbital(orb_labs.index(orb[1:])))) for orb in orbs]\n        orb_label = ('%d%s-%d%s' % (orbitals[0][0], orbitals[0][1].name, orbitals[1][0], orbitals[1][1].name))\n    else:\n        orbitals = None\n        orb_label = None\n    bond_data = {'length': length, 'sites': site_indices, 'orbitals': orbitals, 'orb_label': orb_label}\n    return bond_data", "docstring": "Subroutine to extract bond label, site indices, and length from\na LOBSTER header line. The site indices are zero-based, so they\ncan be easily used with a Structure object.\n\nExample header line: No.4:Fe1->Fe9(2.4524893531900283)\nExample header line for orbtial-resolved COHP:\nNo.1:Fe1[3p_x]->Fe2[3d_x^2-y^2](2.456180552772262)\n\nArgs:\nline: line in the COHPCAR header describing the bond.\n\nReturns:\nDict with the bond label, the bond length, a tuple of the site\nindices, a tuple containing the orbitals (if orbital-resolved),\nand a label for the orbitals (if orbital-resolved).", "source": "codesearchnet"}
{"code": "def MakeSuiteFromDict(d, name=''):\n    \n    suite = Suite(name=name)\n    suite.SetDict(d)\n    suite.Normalize()\n    return suite", "docstring": "Makes a suite from a map from values to probabilities.\n\nArgs:\nd: dictionary that maps values to probabilities\nname: string name for this suite\n\nReturns:\nSuite object", "source": "juraj-google-style"}
{"code": "def convert_clip(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting clip ...')\n\n    if params['min'] == 0:\n        print(\"using ReLU({0})\".format(params['max']))\n        layer = keras.layers.ReLU(max_value=params['max'])\n    else:\n        def target_layer(x, vmin=params['min'], vmax=params['max']):\n            import tensorflow as tf\n            return tf.clip_by_value(x, vmin, vmax)\n        layer = keras.layers.Lambda(target_layer)\n\n    layers[scope_name] = layer(layers[inputs[0]])", "docstring": "Convert clip operation.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def __init__(self, data_type, default=None, **kwargs):\n    \n    kwargs[\"default\"] = default\n    super(JsonProperty, self).__init__(**kwargs)\n    self.data_type = data_type", "docstring": "Constructor.\n\nArgs:\ndata_type: underlying data type as class.\ndefault: default value for the property. The value is deep copied\nfore each model instance.\n**kwargs: remaining arguments.", "source": "juraj-google-style"}
{"code": "def returnListOfConfigurationValues(util):\n    VALUES = {}\n    configPath = os.path.join(getConfigPath()['appPath'], 'general.cfg')\n    if (not os.path.exists(configPath)):\n        defaultConfigPath = os.path.join(getConfigPath()['appPathDefaults'], 'general.cfg')\n        try:\n            with open(defaultConfigPath) as iF:\n                cont = iF.read()\n                with open(configPath, 'w') as oF:\n                    oF.write(cont)\n        except Exception as e:\n            raise errors.DefaultConfigurationFileNotFoundError(configPath, defaultConfigPath)\n    config = ConfigParser.ConfigParser()\n    config.read(configPath)\n    LISTS = ['tlds', 'domains', 'platforms', 'extension', 'exclude_platforms', 'exclude_domains']\n    for section in config.sections():\n        incomplete = False\n        if (section.lower() == util.lower()):\n            for (param, value) in config.items(section):\n                if (value == ''):\n                    if (param in LISTS):\n                        value = []\n                    else:\n                        value = ''\n                elif (param in LISTS):\n                    value = value.split(' ')\n                elif (param == 'threads'):\n                    try:\n                        value = int(value)\n                    except Exception as err:\n                        raise errors.ConfigurationParameterNotValidError(configPath, section, param, value)\n                elif (param == 'debug'):\n                    try:\n                        if (int(value) == 0):\n                            value = False\n                        else:\n                            value = True\n                    except Exception as err:\n                        print('Something happened when processing this debug option. Resetting to default.')\n                        defaultConfigPath = os.path.join(getConfigPath()['appPathDefaults'], 'general.cfg')\n                        try:\n                            with open(defaultConfigPath) as iF:\n                                cont = iF.read()\n                                with open(configPath, 'w') as oF:\n                                    oF.write(cont)\n                        except Exception as e:\n                            raise errors.DefaultConfigurationFileNotFoundError(configPath, defaultConfigPath)\n                VALUES[param] = value\n            break\n    return VALUES", "docstring": "Method that recovers the configuration information about each program\n\nTODO: Grab the default file from the package data instead of storing it in\nthe main folder.\n\nArgs:\n-----\nutil: Any of the utils that are contained in the framework: domainfy,\nentify, mailfy, phonefy, searchfy, usufy.\n\nReturns:\n--------\nA dictionary containing the default configuration.", "source": "codesearchnet"}
{"code": "def ias53(msg):\n    d = hex2bin(data(msg))\n    if (d[12] == '0'):\n        return None\n    ias = bin2int(d[13:23])\n    return ias", "docstring": "Indicated airspeed, DBS 5,3 message\n\nArgs:\nmsg (String): 28 bytes hexadecimal message\n\nReturns:\nint: indicated arispeed in knots", "source": "codesearchnet"}
{"code": "def _FilterOutPathInfoDuplicates(path_infos):\n    pi_dict = {}\n    for pi in path_infos:\n        path_key = (pi.path_type, pi.GetPathID())\n        pi_dict.setdefault(path_key, []).append(pi)\n\n    def _SortKey(pi):\n        return (pi.stat_entry.st_ctime, pi.stat_entry.st_mtime, pi.stat_entry.st_atime, pi.stat_entry.st_ino)\n    for pi_values in pi_dict.values():\n        if (len(pi_values) > 1):\n            pi_values.sort(key=_SortKey, reverse=True)\n    return [v[0] for v in pi_dict.values()]", "docstring": "Filters out duplicates from passed PathInfo objects.\n\nArgs:\npath_infos: An iterable with PathInfo objects.\n\nReturns:\nA list of PathInfo objects with duplicates removed. Duplicates are\nremoved following this logic: they're sorted by (ctime, mtime, atime,\ninode number) in the descending order and then the first one is taken\nand the others are dropped.", "source": "codesearchnet"}
{"code": "def tmybasename(usaf):\n    url_file = open((env.SRC_PATH + '/tmy3.csv'))\n    for line in url_file.readlines():\n        if (line.find(usaf) is not (- 1)):\n            return line.rstrip().partition(',')[0]", "docstring": "Basename for USAF base.\n\nArgs:\nusaf (str): USAF code\n\nReturns:\n(str)", "source": "codesearchnet"}
{"code": "def log_optimal_transport(scores: torch.Tensor, reg_param: torch.Tensor, iterations: int) -> torch.Tensor:\n    batch_size, num_rows, num_columns = scores.shape\n    one_tensor = scores.new_tensor(1)\n    num_rows_tensor, num_columns_tensor = ((num_rows * one_tensor).to(scores), (num_columns * one_tensor).to(scores))\n    source_reg_param = reg_param.expand(batch_size, num_rows, 1)\n    target_reg_param = reg_param.expand(batch_size, 1, num_columns)\n    reg_param = reg_param.expand(batch_size, 1, 1)\n    couplings = torch.cat([torch.cat([scores, source_reg_param], -1), torch.cat([target_reg_param, reg_param], -1)], 1)\n    log_normalization = -(num_rows_tensor + num_columns_tensor).log()\n    log_source_distribution = torch.cat([log_normalization.expand(num_rows), num_columns_tensor.log()[None] + log_normalization])\n    log_target_distribution = torch.cat([log_normalization.expand(num_columns), num_rows_tensor.log()[None] + log_normalization])\n    log_source_distribution, log_target_distribution = (log_source_distribution[None].expand(batch_size, -1), log_target_distribution[None].expand(batch_size, -1))\n    log_optimal_transport_matrix = log_sinkhorn_iterations(couplings, log_source_distribution, log_target_distribution, num_iterations=iterations)\n    log_optimal_transport_matrix = log_optimal_transport_matrix - log_normalization\n    return log_optimal_transport_matrix", "docstring": "Perform Differentiable Optimal Transport in Log-space for stability\n\nArgs:\nscores: (`torch.Tensor` of shape `(batch_size, num_rows, num_columns)`):\nCost matrix.\nreg_param: (`torch.Tensor` of shape `(batch_size, 1, 1)`):\nRegularization parameter.\niterations: (`int`):\nNumber of Sinkhorn iterations.\n\nReturns:\nlog_optimal_transport_matrix: (`torch.Tensor` of shape `(batch_size, num_rows, num_columns)`): Logarithm of the\noptimal transport matrix.", "source": "github-repos"}
{"code": "def preprocess(self, images: ImageInput, return_tensors: Optional[Union[str, TensorType]]='pt') -> BatchFeature:\n    if return_tensors != 'pt':\n        raise ValueError(f\"return_tensors for TimmWrapperImageProcessor must be 'pt', but got {return_tensors}\")\n    if self._not_supports_tensor_input and isinstance(images, torch.Tensor):\n        images = images.cpu().numpy()\n    if isinstance(images, torch.Tensor):\n        images = self.val_transforms(images)\n        images = images.unsqueeze(0) if images.ndim == 3 else images\n    else:\n        images = make_list_of_images(images)\n        images = [to_pil_image(image) for image in images]\n        images = torch.stack([self.val_transforms(image) for image in images])\n    return BatchFeature({'pixel_values': images}, tensor_type=return_tensors)", "docstring": "Preprocess an image or batch of images.\n\nArgs:\nimages (`ImageInput`):\nImage to preprocess. Expects a single or batch of images\nreturn_tensors (`str` or `TensorType`, *optional*):\nThe type of tensors to return.", "source": "github-repos"}
{"code": "def rewrite_bytes(self, in_bytes: List[str], reverse=False) -> List[str]:\n    out_bytes = []\n    b_start = 0\n    b_end = 0\n    while b_start < len(in_bytes):\n        tree_pointer = self.hash_tree if not reverse else self.reverse_hash_tree\n        for j in range(b_start, len(in_bytes)):\n            b = in_bytes[j]\n            if b in tree_pointer:\n                tree_pointer = tree_pointer[b]\n            elif j == b_start:\n                cur_leaf = [b]\n                b_end = j\n                break\n            else:\n                break\n            if self.LEAF in tree_pointer:\n                cur_leaf = tree_pointer[self.LEAF]\n                b_end = j\n        out_bytes.extend(cur_leaf)\n        b_start = b_end + 1\n    return out_bytes", "docstring": "Rewrite a sequence of bytes using the hash tree.\n\nArgs:\nin_bytes (`List[str]`): A list of bytes to be rewritten.\nreverse (`bool`): If True, decoding is performed with the reverse hash tree.\nReturns:\n`List[str]`: The rewritten byte sequence.", "source": "github-repos"}
{"code": "def smooth(self, noise, strategy=INVERSE_STRATEGY):\n    if (strategy is INVERSE_STRATEGY):\n        self.points = with_inverse(self.points, noise)\n    elif (strategy is EXTRAPOLATE_STRATEGY):\n        self.points = with_extrapolation(self.points, noise, 30)\n    elif (strategy is NO_STRATEGY):\n        self.points = with_no_strategy(self.points, noise)\n    return self", "docstring": "In-place smoothing\n\nSee smooth_segment function\n\nArgs:\nnoise (float): Noise expected\nstrategy (int): Strategy to use. Either smooth.INVERSE_STRATEGY\nor smooth.EXTRAPOLATE_STRATEGY\nReturns:\n:obj:`Segment`", "source": "codesearchnet"}
{"code": "def retry_loop(self, context, step_method):\n        \n        logger.debug(\"starting\")\n\n        context['retryCounter'] = 0\n\n        sleep = context.get_formatted_as_type(self.sleep, out_type=float)\n        if self.max:\n            max = context.get_formatted_as_type(self.max, out_type=int)\n\n            logger.info(f\"retry decorator will try {max} times at {sleep}s \"\n                        \"intervals.\")\n        else:\n            max = None\n            logger.info(f\"retry decorator will try indefinitely at {sleep}s \"\n                        \"intervals.\")\n\n        \n        \n        \n        \n        if poll.while_until_true(interval=sleep,\n                                 max_attempts=max)(\n                self.exec_iteration)(context=context,\n                                     step_method=step_method\n                                     ):  \n            logger.debug(\"retry loop complete, reporting success.\")\n\n        logger.debug(\"retry loop done\")\n\n        logger.debug(\"done\")", "docstring": "Run step inside a retry loop.\n\nArgs:\ncontext: (pypyr.context.Context) The pypyr context. This arg will\nmutate - after method execution will contain the new\nupdated context.\nstep_method: (method/function) This is the method/function that\nwill execute on every loop iteration. Signature is:\nfunction(context)", "source": "juraj-google-style"}
{"code": "def GetPlasoTimestamp(self):\n    normalized_timestamp = self._GetNormalizedTimestamp()\n    if (normalized_timestamp is None):\n        return None\n    normalized_timestamp *= definitions.MICROSECONDS_PER_SECOND\n    normalized_timestamp = normalized_timestamp.quantize(1, rounding=decimal.ROUND_HALF_UP)\n    return int(normalized_timestamp)", "docstring": "Retrieves a timestamp that is compatible with plaso.\n\nReturns:\nint: a POSIX timestamp in microseconds or None if no timestamp is\navailable.", "source": "codesearchnet"}
{"code": "def projector(state, flatten=False):\n    density_matrix = np.outer(state.conjugate(), state)\n    if flatten:\n        return density_matrix.flatten(order='F')\n    return density_matrix", "docstring": "maps a pure state to a state matrix\n\nArgs:\nstate (ndarray): the number of qubits\nflatten (bool): determine if state matrix of column work\nReturns:\nndarray:  state_mat(2**num, 2**num) if flatten is false\nndarray:  state_mat(4**num) if flatten is true stacked on by the column", "source": "codesearchnet"}
{"code": "def word_error_rate(ref: Sequence[T], hyp: Sequence[T]) -> float:\n    \n\n    if len(ref) == 0:\n        raise EmptyReferenceException(\n            \"Cannot calculating word error rate against a length 0 \"\\\n            \"reference sequence.\")\n\n    distance = min_edit_distance(ref, hyp)\n    return 100 * float(distance) / len(ref)", "docstring": "Calculate the word error rate of a sequence against a reference.\n\nArgs:\nref: The gold-standard reference sequence\nhyp: The hypothesis to be evaluated against the reference.\n\nReturns:\nThe word error rate of the supplied hypothesis with respect to the\nreference string.\n\nRaises:\npersephone.exceptions.EmptyReferenceException: If the length of the reference sequence is 0.", "source": "juraj-google-style"}
{"code": "def WaitUntilNoFlowsToProcess(self, timeout=None):\n    \n    t = self.flow_handler_thread\n    if not t:\n      return\n\n    start_time = time.time()\n    while True:\n      with self.lock:\n        \n        \n        \n        if (not t.isAlive() or\n            (not self._GetFlowRequestsReadyForProcessing() and\n             not self.flow_handler_num_being_processed)):\n          return\n\n      time.sleep(0.2)\n\n      if timeout and time.time() - start_time > timeout:\n        raise TimeOutWhileWaitingForFlowsToBeProcessedError(\n            \"Flow processing didn't finish in time.\")", "docstring": "Waits until flow processing thread is done processing flows.\n\nArgs:\ntimeout: If specified, is a max number of seconds to spend waiting.\n\nRaises:\nTimeOutWhileWaitingForFlowsToBeProcessedError: if timeout is reached.", "source": "juraj-google-style"}
{"code": "def take(x, indices, axis=None):\n    if any_symbolic_tensors((x, indices)):\n        return Take(axis=axis).symbolic_call(x, indices)\n    return backend.numpy.take(x, indices, axis=axis)", "docstring": "Take elements from a tensor along an axis.\n\nArgs:\nx: Source tensor.\nindices: The indices of the values to extract.\naxis: The axis over which to select values. By default, the\nflattened input tensor is used.\n\nReturns:\nThe corresponding tensor of values.", "source": "github-repos"}
{"code": "def get_summary(result):\n    \n    summary = {\n        \"success\": result.wasSuccessful(),\n        \"stat\": {\n            'total': result.testsRun,\n            'failures': len(result.failures),\n            'errors': len(result.errors),\n            'skipped': len(result.skipped),\n            'expectedFailures': len(result.expectedFailures),\n            'unexpectedSuccesses': len(result.unexpectedSuccesses)\n        }\n    }\n    summary[\"stat\"][\"successes\"] = summary[\"stat\"][\"total\"] \\\n        - summary[\"stat\"][\"failures\"] \\\n        - summary[\"stat\"][\"errors\"] \\\n        - summary[\"stat\"][\"skipped\"] \\\n        - summary[\"stat\"][\"expectedFailures\"] \\\n        - summary[\"stat\"][\"unexpectedSuccesses\"]\n\n    summary[\"time\"] = {\n        'start_at': result.start_at,\n        'duration': result.duration\n    }\n    summary[\"records\"] = result.records\n\n    return summary", "docstring": "get summary from test result\n\nArgs:\nresult (instance): HtmlTestResult() instance\n\nReturns:\ndict: summary extracted from result.\n\n{\n\"success\": True,\n\"stat\": {},\n\"time\": {},\n\"records\": []\n}", "source": "juraj-google-style"}
{"code": "def transform_coords(self, width, height):\n    if (self.type not in {EventType.TOUCH_DOWN, EventType.TOUCH_MOTION}):\n        raise AttributeError(_wrong_meth.format(self.type))\n    x = self._libinput.libinput_event_touch_get_x_transformed(self._handle, width)\n    y = self._libinput.libinput_event_touch_get_y_transformed(self._handle, height)\n    return (x, y)", "docstring": "Return the current absolute coordinates of the touch event,\ntransformed to screen coordinates.\n\nFor events not of type :attr:`~libinput.constant.EventType.TOUCH_DOWN`,\n:attr:`~libinput.constant.EventType.TOUCH_MOTION`, this method\nraises :exc:`AttributeError`.\n\nArgs:\nwidth (int): The current output screen width.\nheight (int): The current output screen height.\nReturns:\n(float, float): The current absolute (x, y) coordinates transformed\nto screen coordinates.", "source": "codesearchnet"}
{"code": "class ErrorHandlingConfig(NamedTuple):\n    output: str", "docstring": "This option specifies whether and where to output error rows.\n\nArgs:\noutput (str): Name to use for the output error collection", "source": "github-repos"}
{"code": "def _GetIdentifierFromPath(self, parser_mediator):\n    file_entry = parser_mediator.GetFileEntry()\n    path = file_entry.path_spec.location\n    file_system = file_entry.GetFileSystem()\n    path_segments = file_system.SplitPath(path)\n    return path_segments[(- 2)]", "docstring": "Extracts a container or a graph ID from a JSON file's path.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\n\nReturns:\nstr: container or graph identifier.", "source": "codesearchnet"}
{"code": "def external_ids(self, **kwargs):\n    path = self._get_id_path('external_ids')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the external ids for a specific movie id.\n\nArgs:\nlanguage: (optional) ISO 639-1 code.\nappend_to_response: (optional) Comma separated, any movie method.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def purview(repertoire):\n    if (repertoire is None):\n        return None\n    return tuple((i for (i, dim) in enumerate(repertoire.shape) if (dim == 2)))", "docstring": "The purview of the repertoire.\n\nArgs:\nrepertoire (np.ndarray): A repertoire\n\nReturns:\ntuple[int]: The purview that the repertoire was computed over.", "source": "codesearchnet"}
{"code": "def setup(self, reason, grr_server_url, grr_username, grr_password, approvers=None, verify=True):\n    grr_auth = (grr_username, grr_password)\n    self.approvers = []\n    if approvers:\n        self.approvers = [item.strip() for item in approvers.strip().split(',')]\n    self.grr_api = grr_api.InitHttp(api_endpoint=grr_server_url, auth=grr_auth, verify=verify)\n    self.output_path = tempfile.mkdtemp()\n    self.reason = reason", "docstring": "Initializes a GRR hunt result collector.\n\nArgs:\nreason: justification for GRR access.\ngrr_server_url: GRR server URL.\ngrr_username: GRR username.\ngrr_password: GRR password.\napprovers: list of GRR approval recipients.\nverify: boolean, whether to verify the GRR server's x509 certificate.", "source": "codesearchnet"}
{"code": "def _send_success_response(self, response, start_response):\n    headers = [('Content-Type', 'application/json; charset=UTF-8')]\n    return util.send_wsgi_response('200 OK', headers, response, start_response)", "docstring": "Sends an HTTP 200 json success response.\n\nThis calls start_response and returns the response body.\n\nArgs:\nresponse: A string containing the response body to return.\nstart_response: A function with semantics defined in PEP-333.\n\nReturns:\nA string, the response body.", "source": "codesearchnet"}
{"code": "async def _open_connection_https(self, location):\n        \n        sock = await connect_tcp(location[0],\n                                 location[1],\n                                 ssl_context=self.ssl_context or ssl.SSLContext(),\n                                 bind_host=self.source_address,\n                                 autostart_tls=True)\n        sock._active = True\n        return sock", "docstring": "Creates an async SSL socket, returns it.\nArgs:\nlocation (tuple(str, int)): A tuple of net location (eg\n'127.0.0.1' or 'example.org') and port (eg 80 or 25000).", "source": "juraj-google-style"}
{"code": "def et2roc(et_fo, roc_fo):\n        \n\n        stats_dicts = [\n            {\n                \"q\": q,\n                \"M\": 0,\n                \"w\": 0,\n                \"m\": 0,\n                \"P\": 0,\n                \"U\": 0,\n                \"u\": 0,\n                \"T\": 0,\n                \"t\": 0,\n                \"x\": 0\n            } for q in range(rnftools.lavender.MAXIMAL_MAPPING_QUALITY + 1)\n        ]\n\n        for line in et_fo:\n            line = line.strip()\n            if line != \"\" and line[0] != \"\n                (read_tuple_name, tab, info_categories) = line.partition(\"\\t\")\n                intervals = info_categories.split(\",\")\n                for interval in intervals:\n                    category = interval[0]\n                    (left, colon, right) = interval[2:].partition(\"-\")\n                    for q in range(int(left), int(right) + 1):\n                        stats_dicts[q][category] += 1\n\n        roc_fo.write(\"\n        roc_fo.write(\"\n        roc_fo.write(\"\n        roc_fo.write(\"\n        roc_fo.write(\"\n        roc_fo.write(\"\n        roc_fo.write(\"\n        roc_fo.write(\"\n        roc_fo.write(\"\n        roc_fo.write(\"\n        roc_fo.write(\"\n        roc_fo.write(\"\n        roc_fo.write(\"\n        roc_fo.write(\"\n        roc_fo.write(\"\n\n        l_numbers = []\n        for line in stats_dicts:\n            numbers = [\n                line[\"M\"], line[\"w\"], line[\"m\"], line[\"P\"], line[\"U\"], line[\"u\"], line[\"T\"], line[\"t\"], line[\"x\"]\n            ]\n            if numbers != l_numbers:\n                roc_fo.write(\"\\t\".join([str(line[\"q\"])] + list(map(str, numbers)) + [str(sum(numbers))]) + os.linesep)\n            l_numbers = numbers", "docstring": "ET to ROC conversion.\n\nArgs:\net_fo (file): File object for the ET file.\nroc_fo (file): File object for the ROC file.\n\nraises: ValueError", "source": "juraj-google-style"}
{"code": "def _flatten_multiplicand_list(kernels):\n  \n  flattened = []\n  for k in kernels:\n    if isinstance(k, _ProductKernel):\n      flattened += k.kernels\n    else:\n      flattened.append(k)\n  return flattened", "docstring": "Flatten a list of kernels which may contain _ProductKernel instances.\n\nArgs:\nkernels: Python list of `PositiveSemidefiniteKernel` instances\n\nReturns:\nPython list containing the elements of kernels, with any _ProductKernel\ninstances replaced by their `kernels` property contents.", "source": "juraj-google-style"}
{"code": "def __directory_list_descriptor(self, configs):\n    \n    descriptor = {\n        'kind': 'discovery\n        'discoveryVersion': 'v1',\n    }\n\n    items = []\n    for config in configs:\n      item_descriptor = self.__item_descriptor(config)\n      if item_descriptor:\n        items.append(item_descriptor)\n\n    if items:\n      descriptor['items'] = items\n\n    return descriptor", "docstring": "Builds a directory list for an API.\n\nArgs:\nconfigs: List of dicts containing the service configurations to list.\n\nReturns:\nA dictionary that can be deserialized into JSON in discovery list format.\n\nRaises:\nApiConfigurationError: If there's something wrong with the API\nconfiguration, such as a multiclass API decorated with different API\ndescriptors (see the docstring for api()), or a repeated method\nsignature.", "source": "juraj-google-style"}
{"code": "def find_files(base_dir, extensions, exclude_dirs=list()):\n    result = []\n    for (root, dir_names, file_names) in os.walk(base_dir):\n        for filename in file_names:\n            candidate = os.path.join(root, filename)\n            if should_include_file_in_search(candidate, extensions, exclude_dirs):\n                result.append(candidate)\n    return result", "docstring": "Find all files matching the given extensions.\n\nArgs:\nbase_dir (str): Path of base directory to search in.\nextensions (list): A list of file extensions to search for.\nexclude_dirs (list): A list of directories to exclude from search.\n\nReturns:\nlist of paths that match the search", "source": "codesearchnet"}
{"code": "def list_container_services_sub(access_token, subscription_id):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.ContainerService/ContainerServices', '?api-version=', ACS_API])\n    return do_get(endpoint, access_token)", "docstring": "List the container services in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON model.", "source": "codesearchnet"}
{"code": "def timezone(self, value=0.0):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `timezone`'.format(value))\n            if value < -12.0:\n                raise ValueError('value need to be greater or equal -12.0 '\n                                 'for field `timezone`')\n            if value > 12.0:\n                raise ValueError('value need to be smaller 12.0 '\n                                 'for field `timezone`')\n\n        self._timezone = value", "docstring": "Corresponds to IDD Field `timezone` Time relative to GMT.\n\nArgs:\nvalue (float): value for IDD Field `timezone`\nUnit: hr - not on standard units list???\nDefault value: 0.0\nvalue >= -12.0\nvalue <= 12.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def get_source_url(obj):\n    \n    source_env_prefix = obj.context.config['source_env_prefix']\n    task = obj.task\n    log.debug(\"Getting source url for {} {}...\".format(obj.name, obj.task_id))\n    repo = get_repo(obj.task, source_env_prefix=source_env_prefix)\n    source = task['metadata']['source']\n    if repo and not verify_repo_matches_url(repo, source):\n        raise CoTError(\"{name} {task_id}: {source_env_prefix} {repo} doesn't match source {source}!\".format(\n            name=obj.name, task_id=obj.task_id, source_env_prefix=source_env_prefix, repo=repo, source=source\n        ))\n    log.info(\"{} {}: found {}\".format(obj.name, obj.task_id, source))\n    return source", "docstring": "Get the source url for a Trust object.\n\nArgs:\nobj (ChainOfTrust or LinkOfTrust): the trust object to inspect\n\nRaises:\nCoTError: if repo and source are defined and don't match\n\nReturns:\nstr: the source url.", "source": "juraj-google-style"}
{"code": "def ForceRemoveFileObject(self, path_spec):\n    \n    cache_value = self._file_object_cache.GetCacheValue(path_spec.comparable)\n    if not cache_value:\n      return False\n\n    while not cache_value.IsDereferenced():\n      cache_value.vfs_object.close()\n\n    return True", "docstring": "Forces the removal of a file-like object based on a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nbool: True if the file-like object was cached.", "source": "juraj-google-style"}
{"code": "def merge_with(self, other):\n    other = as_shape(other)\n    if (self._dims is None):\n        return other\n    else:\n        try:\n            self.assert_same_rank(other)\n            new_dims = []\n            for (i, dim) in enumerate(self._dims):\n                new_dims.append(dim.merge_with(other[i]))\n            return TensorShape(new_dims)\n        except ValueError:\n            raise ValueError(('Shapes %s and %s are not convertible' % (self, other)))", "docstring": "Returns a `TensorShape` combining the information in `self` and `other`.\n\nThe dimensions in `self` and `other` are merged elementwise,\naccording to the rules defined for `Dimension.merge_with()`.\n\nArgs:\nother: Another `TensorShape`.\n\nReturns:\nA `TensorShape` containing the combined information of `self` and\n`other`.\n\nRaises:\nValueError: If `self` and `other` are not convertible.", "source": "codesearchnet"}
{"code": "def get_dG_at_T(seq, temp):\n    \n    \n    r_cal = scipy.constants.R / scipy.constants.calorie\n\n    seq = ssbio.protein.sequence.utils.cast_to_str(seq)\n\n    oobatake = {}\n    for t in range(20, 51):\n        oobatake[t] = calculate_oobatake_dG(seq, t)\n\n    stable = [i for i in oobatake.values() if i > 0]\n\n    if len(stable) == 0:\n        \n        \n        dG = 0.238846 * calculate_dill_dG(len(seq), temp)\n        method='Dill'\n    else:\n        dG = oobatake[temp]\n        method='Oobatake'\n\n    keq = math.exp(-1 * dG / (r_cal * (temp + 273.15)))\n\n    return dG, keq, method", "docstring": "Predict dG at temperature T, using best predictions from Dill or Oobatake methods.\n\nArgs:\nseq (str, Seq, SeqRecord): Amino acid sequence\ntemp (float): Temperature in degrees C\n\nReturns:\n(tuple): tuple containing:\n\ndG (float) Free energy of unfolding dG (cal/mol)\nkeq (float): Equilibrium constant Keq\nmethod (str): Method used to calculate", "source": "juraj-google-style"}
{"code": "def _get_user_agent():\n    client = '{0}/{1}'.format(__name__.split('.')[0], ver.__version__)\n    python_version = 'Python/{v.major}.{v.minor}.{v.micro}'.format(v=sys.version_info)\n    system_info = '{0}/{1}'.format(platform.system(), platform.release())\n    user_agent_string = ' '.join([python_version, client, system_info])\n    return user_agent_string", "docstring": "Construct the user-agent header with the package info,\nPython version and OS version.\n\nReturns:\nThe user agent string.\ne.g. 'Python/3.6.7 slack/2.0.0 Darwin/17.7.0'", "source": "codesearchnet"}
{"code": "def squeeze(x, axis):\n    return array_ops.squeeze(x, [axis])", "docstring": "Removes a 1-dimension from the tensor at index \"axis\".\n\nArgs:\nx: A tensor or variable.\naxis: Axis to drop.\n\nReturns:\nA tensor with the same data as `x` but reduced dimensions.", "source": "github-repos"}
{"code": "def run(self, *args):\n    if (self.prefix_char is None):\n        prefix_char = config.suite_alias_prefix_char\n    else:\n        prefix_char = self.prefix_char\n    if (prefix_char == ''):\n        return self._run_no_args(args)\n    else:\n        return self._run(prefix_char, args)", "docstring": "Invoke the wrapped script.\n\nReturns:\nReturn code of the command, or 0 if the command is not run.", "source": "codesearchnet"}
{"code": "def get_data_path(module_id: str) -> Path:\n    \n    profile = coordinator.profile\n    data_path = get_base_path() / 'profiles' / profile / module_id\n    if not data_path.exists():\n        data_path.mkdir(parents=True)\n    return data_path", "docstring": "Get the path for persistent storage of a module.\n\nThis method creates the queried path if not existing.\n\nArgs:\nmodule_id (str): Module ID\n\nReturns:\nThe data path of indicated module.", "source": "juraj-google-style"}
{"code": "def add_condition(self, observed_arr):\n        \n        condition_arr = self.__image_true_sampler.draw()\n        return np.concatenate((observed_arr, condition_arr), axis=1)", "docstring": "Add condtion.\n\nArgs:\nobserved_arr:       `np.ndarray` of samples.\n\nReturns:\n`np.ndarray` of samples.", "source": "juraj-google-style"}
{"code": "def _instantiate_exception(self, node, exc_type):\n    value = self.ctx.program.NewVariable()\n    types = []\n    stack = list(exc_type.data)\n    while stack:\n        e = stack.pop()\n        if isinstance(e, abstract.Tuple):\n            for sub_exc_type in e.pyval:\n                sub_value, sub_types = self._instantiate_exception(node, sub_exc_type)\n                value.PasteVariable(sub_value)\n                types.extend(sub_types)\n        elif isinstance(e, abstract.Instance) and e.cls.full_name == 'builtins.tuple':\n            sub_exc_type = e.get_instance_type_parameter(abstract_utils.T)\n            sub_value, sub_types = self._instantiate_exception(node, sub_exc_type)\n            value.PasteVariable(sub_value)\n            types.extend(sub_types)\n        elif isinstance(e, abstract.Class) and any((base.full_name == 'builtins.BaseException' or isinstance(base, abstract.AMBIGUOUS_OR_EMPTY) for base in e.mro)):\n            value.PasteVariable(self.init_class(node, e))\n            types.append(e)\n        elif isinstance(e, abstract.Union):\n            stack.extend(e.options)\n        else:\n            if not isinstance(e, abstract.AMBIGUOUS_OR_EMPTY):\n                if isinstance(e, abstract.Class):\n                    mro_seqs = [e.mro] if isinstance(e, abstract.Class) else []\n                    msg = f'{e.name} does not inherit from BaseException'\n                else:\n                    mro_seqs = []\n                    msg = 'Not a class'\n                self.ctx.errorlog.mro_error(self.frames, e.name, mro_seqs, details=msg)\n            value.AddBinding(self.ctx.convert.unsolvable, [], node)\n            types.append(None)\n    return (value, types)", "docstring": "Instantiate an exception type.\n\nArgs:\nnode: The current node.\nexc_type: A cfg.Variable of the exception type.\n\nReturns:\nA tuple of a cfg.Variable of the instantiated type and a list of\nthe flattened exception types in the data of exc_type. None takes the\nplace of invalid types.", "source": "github-repos"}
{"code": "def _validate_version(connection, dsn):\n    try:\n        version = get_stored_version(connection)\n    except VersionIsNotStored:\n        logger.debug('Version not stored in the db: assuming new database creation.')\n        version = SCHEMA_VERSION\n        _update_version(connection, version)\n    assert isinstance(version, int)\n    if ((version > 10) and (version < 100)):\n        raise DatabaseError('You are trying to open an old SQLite database.')\n    if _migration_required(connection):\n        migrate(connection, dsn)", "docstring": "Performs on-the-fly schema updates based on the models version.\n\nRaises:\nDatabaseError: if user uses old sqlite database.", "source": "codesearchnet"}
{"code": "def signUserCsr(self, xcsr, signas, outp=None):\n        \n        pkey = xcsr.get_pubkey()\n        name = xcsr.get_subject().CN\n        return self.genUserCert(name, csr=pkey, signas=signas, outp=outp)", "docstring": "Signs a user CSR with a CA keypair.\n\nArgs:\ncert (OpenSSL.crypto.X509Req): The certificate signing request.\nsignas (str): The CA keypair name to sign the CSR with.\noutp (synapse.lib.output.Output): The output buffer.\n\nExamples:\ncdir.signUserCsr(mycsr, 'myca')\n\nReturns:\n((OpenSSL.crypto.PKey, OpenSSL.crypto.X509)): Tuple containing the public key and certificate objects.", "source": "juraj-google-style"}
{"code": "def text_editor(file='', background=False, return_cmd=False):\n    desktop_env = system.get_name()\n    if (desktop_env == 'windows'):\n        editor_cmd_str = system.get_cmd_out(['ftype', 'textfile']).split('=', 1)[1]\n    elif (desktop_env == 'mac'):\n        editor_cmd_str = ('open -a' + system.get_cmd_out(['def', 'read', 'com.apple.LaunchServices', 'LSHandlers-array{LSHandlerContentType=public.plain-text;}']))\n    else:\n        editor_cmd_str = system.get_cmd_out(['xdg-mime', 'query', 'default', 'text/plain'])\n        if ('\\n' in editor_cmd_str):\n            editor_cmd_str = editor_cmd_str.split('\\n')[0]\n    if editor_cmd_str.endswith('.desktop'):\n        editor_cmd_str = desktopfile.parse(desktopfile.locate(editor_cmd_str)[0])['Exec']\n        for i in editor_cmd_str.split():\n            if i.startswith('%'):\n                editor_cmd_str = editor_cmd_str.replace(i, '')\n            if (i == '--new-document'):\n                editor_cmd_str = editor_cmd_str.replace(i, '')\n    if file:\n        editor_cmd_str += ' {}'.format(shlex.quote(file))\n    if return_cmd:\n        return editor_cmd_str\n    text_editor_proc = sp.Popen([editor_cmd_str], shell=True)\n    if (not background):\n        text_editor_proc.wait()", "docstring": "Starts the default graphical text editor.\n\nStart the user's preferred graphical text editor, optionally with a file.\n\nArgs:\nfile\t   (str) : The file to be opened with the editor. Defaults to an empty string (i.e. no file).\nbackground (bool): Runs the editor in the background, instead of waiting for completion. Defaults to ``False``.\nreturn_cmd (bool): Returns the command (str) to run the editor instead of running it. Defaults to ``False``.\n\nReturns:\nstr: Only if ``return_cmd``, the command to run the editor is returned. Else returns nothing.", "source": "codesearchnet"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        super(KeyWrappingSpecification, self).read(\n            input_stream,\n            kmip_version=kmip_version\n        )\n        local_stream = BytearrayStream(input_stream.read(self.length))\n\n        if self.is_tag_next(enums.Tags.WRAPPING_METHOD, local_stream):\n            self._wrapping_method = primitives.Enumeration(\n                enum=enums.WrappingMethod,\n                tag=enums.Tags.WRAPPING_METHOD\n            )\n            self._wrapping_method.read(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        else:\n            raise ValueError(\n                \"Invalid struct missing the wrapping method attribute.\"\n            )\n\n        if self.is_tag_next(\n                enums.Tags.ENCRYPTION_KEY_INFORMATION,\n                local_stream\n        ):\n            self._encryption_key_information = EncryptionKeyInformation()\n            self._encryption_key_information.read(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self.is_tag_next(\n                enums.Tags.MAC_SIGNATURE_KEY_INFORMATION,\n                local_stream\n        ):\n            self._mac_signature_key_information = MACSignatureKeyInformation()\n            self._mac_signature_key_information.read(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        attribute_names = []\n        while self.is_tag_next(enums.Tags.ATTRIBUTE_NAME, local_stream):\n            attribute_name = primitives.TextString(\n                tag=enums.Tags.ATTRIBUTE_NAME\n            )\n            attribute_name.read(local_stream, kmip_version=kmip_version)\n            attribute_names.append(attribute_name)\n        self._attribute_names = attribute_names\n\n        if self.is_tag_next(enums.Tags.ENCODING_OPTION, local_stream):\n            self._encoding_option = primitives.Enumeration(\n                enum=enums.EncodingOption,\n                tag=enums.Tags.ENCODING_OPTION\n            )\n            self._encoding_option.read(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        self.is_oversized(local_stream)", "docstring": "Read the data encoding the KeyWrappingSpecification struct and decode\nit into its constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def get_dense_tensor(self, transformation_cache, state_manager):\n    pass", "docstring": "Returns a `Tensor`.\n\nThe output of this function will be used by model-builder-functions. For\nexample the pseudo code of `input_layer` will be like:\n\n```python\ndef input_layer(features, feature_columns, ...):\noutputs = [fc.get_dense_tensor(...) for fc in feature_columns]\nreturn tf.concat(outputs)\n```\n\nArgs:\ntransformation_cache: A `FeatureTransformationCache` object to access\nfeatures.\nstate_manager: A `StateManager` to create / access resources such as\nlookup tables.\n\nReturns:\n`Tensor` of shape [batch_size] + `variable_shape`.", "source": "github-repos"}
{"code": "def read(self, filename):\n    try:\n        SafeConfigParser.read(self, filename)\n    except SafeConfigParserError as exc:\n        msg = ('%s: parsing error in eapi conf file: %s' % (type(exc).__name__, filename))\n        debug(msg)\n    self._add_default_connection()\n    for name in self.sections():\n        if (name.startswith('connection:') and ('host' not in dict(self.items(name)))):\n            self.set(name, 'host', name.split(':')[1])\n    self.generate_tags()", "docstring": "Reads the file specified by filename\n\nThis method will load the eapi.conf file specified by filename into\nthe instance object.  It will also add the default connection localhost\nif it was not defined in the eapi.conf file\n\nArgs:\nfilename (str): The full path to the file to load", "source": "codesearchnet"}
{"code": "def to_ising(self):\n    return (dict(self.spin.linear), dict(self.spin.quadratic), self.spin.offset)", "docstring": "Converts a binary quadratic model to Ising format.\n\nIf the binary quadratic model's vartype is not :class:`.Vartype.SPIN`,\nvalues are converted.\n\nReturns:\ntuple: 3-tuple of form (`linear`, `quadratic`, `offset`), where `linear`\nis a dict of linear biases, `quadratic` is a dict of quadratic biases,\nand `offset` is a number that represents the constant offset of the\nbinary quadratic model.\n\nExamples:\nThis example converts a binary quadratic model to an Ising problem.\n\n>>> import dimod\n>>> model = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5},\n...                                    {(0, 1): .5, (1, 2): 1.5},\n...                                    1.4,\n...                                    dimod.SPIN)\n>>> model.to_ising()    # doctest: +SKIP\n({0: 1, 1: -1, 2: 0.5}, {(0, 1): 0.5, (1, 2): 1.5}, 1.4)", "source": "codesearchnet"}
{"code": "def use(self, middleware, path=None):\n        \n        self.log.info(\" Using middleware {}\", middleware)\n        if path is None:\n            path = MiddlewareChain.ROOT_PATTERN\n        self.add(HTTPMethod.ALL, path, middleware)\n        return self", "docstring": "Call the provided middleware upon requests matching the path.\nIf path is not provided or None, all requests will match.\n\nArgs:\nmiddleware (callable): Callable with the signature\n``(res, req) -> None``\npath (Optional[str or regex]): a specific path the\nrequest must match for the middleware to be called.\nReturns:\nThis router", "source": "juraj-google-style"}
{"code": "def __init__(self, server, port, project_key=None,\n                 run_asyncore_thread=True):\n        \n        self.server = server\n        self.port = port\n\n        super(ZEOWrapper, self).__init__(\n            project_key=project_key,\n            run_asyncore_thread=run_asyncore_thread,\n        )", "docstring": "Initialize the object.\n\nArgs:\nconf_path (str): See :attr:`conf_path`.\nproject_key (str, default None): See :attr:`project_key`. If not\nset, the root of the database is used (this may cause\nperformace issues).\nrun_asyncore_thread (bool, default True): Run external asyncore\nthread, which handles connections to database? Default True.", "source": "juraj-google-style"}
{"code": "def update_labels(self, node_name: str, labels: dict):\n        \n        \n        if not self._manager:\n            raise RuntimeError('Only the Swarm manager node can update '\n                               'node details.')\n\n        \n        node_spec = {'Availability': 'active',\n                     'Name': node_name,\n                     'Role': 'manager',\n                     'Labels': labels}\n        node = self._client.nodes.get(node_name)\n        node.update(node_spec)", "docstring": "Update label of a node.\n\nArgs:\nnode_name (string): Name of the node.\nlabels (dict): Label to add to the node", "source": "juraj-google-style"}
{"code": "def attrname_to_colname_dict(cls) -> Dict[str, str]:\n    \n    attr_col = {}  \n    for attrname, column in gen_columns(cls):\n        attr_col[attrname] = column.name\n    return attr_col", "docstring": "Asks an SQLAlchemy class how its attribute names correspond to database\ncolumn names.\n\nArgs:\ncls: SQLAlchemy ORM class\n\nReturns:\na dictionary mapping attribute names to database column names", "source": "juraj-google-style"}
{"code": "def write_info_file(tensorboard_info):\n  \n  payload = \"%s\\n\" % _info_to_string(tensorboard_info)\n  with open(_get_info_file_path(), \"w\") as outfile:\n    outfile.write(payload)", "docstring": "Write TensorBoardInfo to the current process's info file.\n\nThis should be called by `main` once the server is ready. When the\nserver shuts down, `remove_info_file` should be called.\n\nArgs:\ntensorboard_info: A valid `TensorBoardInfo` object.\n\nRaises:\nValueError: If any field on `info` is not of the correct type.", "source": "juraj-google-style"}
{"code": "def starts_when(iterable, condition):\n    \n    \n    if not callable(condition):\n        cond_value = condition\n\n        def condition(x):\n            return x == cond_value\n    return itertools.dropwhile(lambda x: not condition(x), iterable)", "docstring": "Start yielding items when a condition arise.\n\nArgs:\niterable: the iterable to filter.\ncondition: if the callable returns True once, start yielding\nitems. If it's not a callable, it will be converted\nto one as `lambda condition: condition == item`.\n\nExample:\n\n>>> list(starts_when(range(10), lambda x: x > 5))\n[6, 7, 8, 9]\n>>> list(starts_when(range(10), 7))\n[7, 8, 9]", "source": "juraj-google-style"}
{"code": "def _run_submission(self, metadata):\n    if self._use_gpu:\n        docker_binary = 'nvidia-docker'\n        container_name = metadata['container_gpu']\n    else:\n        docker_binary = 'docker'\n        container_name = metadata['container']\n    if (metadata['type'] == 'defense'):\n        cmd = [docker_binary, 'run', '--network=none', '-m=24g', '-v', '{0}:/input_images:ro'.format(self._sample_input_dir), '-v', '{0}:/output_data'.format(self._sample_output_dir), '-v', '{0}:/code'.format(self._extracted_submission_dir), '-w', '/code', container_name, ('./' + metadata['entry_point']), '/input_images', '/output_data/result.csv']\n    else:\n        epsilon = np.random.choice(ALLOWED_EPS)\n        cmd = [docker_binary, 'run', '--network=none', '-m=24g', '-v', '{0}:/input_images:ro'.format(self._sample_input_dir), '-v', '{0}:/output_images'.format(self._sample_output_dir), '-v', '{0}:/code'.format(self._extracted_submission_dir), '-w', '/code', container_name, ('./' + metadata['entry_point']), '/input_images', '/output_images', str(epsilon)]\n    logging.info('Command to run submission: %s', ' '.join(cmd))\n    return shell_call(cmd)", "docstring": "Runs submission inside Docker container.\n\nArgs:\nmetadata: dictionary with submission metadata\n\nReturns:\nTrue if status code of Docker command was success (i.e. zero),\nFalse otherwise.", "source": "codesearchnet"}
{"code": "def get_encoder_from_vocab(vocab_filepath):\n    if (not tf.gfile.Exists(vocab_filepath)):\n        raise ValueError('Vocab file does not exist: {}.'.format(vocab_filepath))\n    tf.logging.info('Found vocab file: %s', vocab_filepath)\n    encoder = text_encoder.SubwordTextEncoder(vocab_filepath)\n    return encoder", "docstring": "Get encoder from vocab file.\n\nIf vocab is not found in output dir, it will be copied there by\ncopy_vocab_to_output_dir to clarify the vocab used to generate the data.\n\nArgs:\nvocab_filepath: path to vocab, either local or cns\n\nReturns:\nA SubwordTextEncoder vocabulary object. None if the output_parallel_text\nis set.", "source": "codesearchnet"}
{"code": "def decode_datetime(encoded_datetime):\n    time_zone_match = _TIME_ZONE_RE.search(encoded_datetime)\n    if time_zone_match:\n        time_string = encoded_datetime[:time_zone_match.start(1)].upper()\n    else:\n        time_string = encoded_datetime.upper()\n    if ('.' in time_string):\n        format_string = '%Y-%m-%dT%H:%M:%S.%f'\n    else:\n        format_string = '%Y-%m-%dT%H:%M:%S'\n    decoded_datetime = datetime.datetime.strptime(time_string, format_string)\n    if (not time_zone_match):\n        return decoded_datetime\n    if time_zone_match.group('z'):\n        offset_minutes = 0\n    else:\n        sign = time_zone_match.group('sign')\n        (hours, minutes) = [int(value) for value in time_zone_match.group('hours', 'minutes')]\n        offset_minutes = ((hours * 60) + minutes)\n        if (sign == '-'):\n            offset_minutes *= (- 1)\n    return datetime.datetime(decoded_datetime.year, decoded_datetime.month, decoded_datetime.day, decoded_datetime.hour, decoded_datetime.minute, decoded_datetime.second, decoded_datetime.microsecond, TimeZoneOffset(offset_minutes))", "docstring": "Decode a DateTimeField parameter from a string to a python datetime.\n\nArgs:\nencoded_datetime: A string in RFC 3339 format.\n\nReturns:\nA datetime object with the date and time specified in encoded_datetime.\n\nRaises:\nValueError: If the string is not in a recognized format.", "source": "codesearchnet"}
{"code": "def auth_ping(self):\n    url = (self.rest_url + '/non-existent/location')\n    response = self._get(url)\n    if (response.status_code == 401):\n        return False\n    elif (response.status_code == 404):\n        return True\n    else:\n        return False", "docstring": "Test that application can authenticate to Crowd.\n\nAttempts to authenticate the application user against\nthe Crowd server. In order for user authentication to\nwork, an application must be able to authenticate.\n\nReturns:\nbool:\nTrue if the application authentication succeeded.", "source": "codesearchnet"}
{"code": "def fermi_fourier_trans_inverse_conjugate_4(qubits):\n    \n\n    yield fswap(qubits[1], qubits[2]),\n    yield fermi_fourier_trans_2(qubits[0], qubits[1])\n    yield fermi_fourier_trans_2(qubits[2], qubits[3])\n    yield fswap(qubits[1], qubits[2])\n    yield fermi_fourier_trans_2(qubits[0], qubits[1])\n    yield cirq.S(qubits[2]) ** 3\n    yield fermi_fourier_trans_2(qubits[2], qubits[3])\n    yield fswap(qubits[1], qubits[2])", "docstring": "We will need to map the momentum states in the reversed order for\nspin-down states to the position picture. This transformation can be\nsimply implemented the complex conjugate of the former one. We only\nneed to change the S gate to S* = S ** 3.\n\nArgs:\nqubits: list of four qubits", "source": "juraj-google-style"}
{"code": "def trim(self, len_):\n    other = Version(None)\n    other.tokens = self.tokens[:len_]\n    other.seps = self.seps[:(len_ - 1)]\n    return other", "docstring": "Return a copy of the version, possibly with less tokens.\n\nArgs:\nlen_ (int): New version length. If >= current length, an\nunchanged copy of the version is returned.", "source": "codesearchnet"}
{"code": "def translate_sites(self, indices=None, vector=None):\n        \n        if indices is None:\n            indices = range(len(self))\n        if vector is None:\n            vector == [0, 0, 0]\n        for i in indices:\n            site = self._sites[i]\n            new_site = Site(site.species, site.coords + vector,\n                            properties=site.properties)\n            self._sites[i] = new_site", "docstring": "Translate specific sites by some vector, keeping the sites within the\nunit cell.\n\nArgs:\nindices (list): List of site indices on which to perform the\ntranslation.\nvector (3x1 array): Translation vector for sites.", "source": "juraj-google-style"}
{"code": "def create_server(self, server_name, *args, **kwargs):\n    server = ServerConnection(name=server_name, reactor=self)\n    if (args or kwargs):\n        server.set_connect_info(*args, **kwargs)\n    for (verb, infos) in self._event_handlers.items():\n        for info in infos:\n            server.register_event(info['direction'], verb, info['handler'], priority=info['priority'])\n    self.servers[server_name] = server\n    return server", "docstring": "Create an IRC server connection slot.\n\nThe server will actually be connected to when\n:meth:`girc.client.ServerConnection.connect` is called later.\n\nArgs:\nserver_name (str): Name of the server, to be used for functions and accessing the\nserver later through the reactor.\n\nReturns:\nserver (girc.client.ServerConnection): A not-yet-connected server.", "source": "codesearchnet"}
{"code": "def check_completion(task, mark_incomplete=False, clear=False, return_stats=False):\n    to_clear = dict()\n    (is_complete, stats) = _check_completion(task, mark_incomplete=mark_incomplete, clear=clear, stats={}, visited=dict(), to_clear=to_clear)\n    while to_clear:\n        found_clearable_task = False\n        for task_id in list(to_clear.keys()):\n            v = to_clear[task_id]\n            if (not v['required_by']):\n                found_clearable_task = True\n                task = v['task']\n                if isinstance(task, ORMTask):\n                    task.mark_incomplete()\n                    task.clear()\n                    _increment_stats(stats, 'Cleared')\n                    config.logger.info(('Cleared task: ' + task_id))\n                else:\n                    config.logger.info(('Cannot clear task, not an ORMTask: ' + task_id))\n                del to_clear[task_id]\n                for w in to_clear.values():\n                    w['required_by'].discard(task_id)\n        if (not found_clearable_task):\n            raise RuntimeError('Error in recursive task clearing, no clearable task found')\n    config.logger.info(('Task completion checking, summary:\\n' + str(stats)))\n    if return_stats:\n        return (is_complete, stats)\n    else:\n        return is_complete", "docstring": "Recursively check if a task and all its requirements are complete\n\nArgs:\ntask (derived from luigi.Task): Task to check completion for; check everything 'downstream'\nfrom that task.\n\nmark_incomplete (bool): If ``True`` set any task as incomplete for which a requirement\nis found to be incomplete (checked recursively).\nThis works only for tasks derived from :class:`ORMTask`.\n\nclear (bool): If ``True``, call the :func:`clear()` method of any task for which a requirement\nis found to be incomplete (checked recursively). This implies ``mark_incomplete = True``.\nThis works only for tasks derived from :class:`ORMTask`.\n\nreturn_stats (bool): If ``True``, return task checking statistics in addition to completion status\n\nReturns:\nbool: ``True`` if the task, all its requirements and (recursively) all their requirements\nare complete, ``False`` otherwise.", "source": "codesearchnet"}
{"code": "def setup(import_roots, zip_safe):\n    archive_path = _find_archive()\n    if not archive_path:\n        warnings.warn('Failed to initialize .par file runtime support', UserWarning)\n        return False\n    if os.path.abspath(sys.path[0]) != os.path.abspath(archive_path):\n        warnings.warn('Failed to initialize .par file runtime support. ' + 'archive_path was %r, sys.path was %r' % (archive_path, sys.path), UserWarning)\n        return False\n    if not zip_safe:\n        extract_dir = _extract_files(archive_path)\n        sys.path[0] = extract_dir\n        import_prefix = extract_dir\n    else:\n        extract_dir = None\n        import_prefix = archive_path\n    _initialize_import_path(import_roots, import_prefix)\n    _setup_pkg_resources('pkg_resources')\n    _setup_pkg_resources('pip._vendor.pkg_resources')\n    return True", "docstring": "Initialize subpar run-time support\n\nArgs:\nimport_root (list): subdirs inside .par file to add to the\nmodule import path at runtime.\nzip_safe (bool): If False, extract the .par file contents to a\ntemporary directory, and import everything from\nthat directory.\n\nReturns:\nTrue if setup was successful, else False", "source": "github-repos"}
{"code": "def append_dims_and_file_extension(fname, data_df):\n    if (not fname.endswith('.gct')):\n        out_fname = '{0}_n{1}x{2}.gct'.format(fname, data_df.shape[1], data_df.shape[0])\n        return out_fname\n    else:\n        basename = os.path.splitext(fname)[0]\n        out_fname = '{0}_n{1}x{2}.gct'.format(basename, data_df.shape[1], data_df.shape[0])\n        return out_fname", "docstring": "Append dimensions and file extension to output filename.\nN.B. Dimensions are cols x rows.\n\nArgs:\nfname (string): output filename\ndata_df (pandas df)\nReturns:\nout_fname (string): output filename with matrix dims and .gct appended", "source": "codesearchnet"}
{"code": "def normalize(a, new_min=0.0, new_max=1.0):\n    \n\n    n = (a - np.amin(a)) / np.amax(a - np.amin(a))\n    return n * (new_max - new_min) + new_min", "docstring": "From ``bruges``\n\nNormalize an array to [0,1] or to arbitrary new min and max.\n\nArgs:\na (ndarray)\nnew_min (float): the new min, default 0.\nnew_max (float): the new max, default 1.\n\nReturns:\nndarray. The normalized array.", "source": "juraj-google-style"}
{"code": "def _escape_token(token, alphabet):\n    token = token.replace(u'\\\\', u'\\\\\\\\').replace(u'_', u'\\\\u')\n    ret = [(c if ((c in alphabet) and (c != u'\\n')) else ('\\\\%d;' % ord(c))) for c in token]\n    return (u''.join(ret) + '_')", "docstring": "r\"\"\"Replace characters that aren't in the alphabet and append \"_\" to token.\n\nApply three transformations to the token:\n1. Replace underline character \"_\" with \"\\u\", and backslash \"\\\" with \"\\\\\".\n2. Replace characters outside of the alphabet with \"\\###;\", where ### is the\ncharacter's Unicode code point.\n3. Appends \"_\" to mark the end of a token.\n\nArgs:\ntoken: unicode string to be escaped\nalphabet: list of all known characters\n\nReturns:\nescaped string", "source": "codesearchnet"}
{"code": "def update(self, jump):\n    atom = jump.initial_site.atom\n    dr = jump.dr(self.cell_lengths)\n    jump.final_site.occupation = atom.number\n    jump.final_site.atom = atom\n    jump.final_site.is_occupied = True\n    jump.initial_site.occupation = 0\n    jump.initial_site.atom = None\n    jump.initial_site.is_occupied = False\n    atom.site = jump.final_site\n    atom.number_of_hops += 1\n    atom.dr += dr\n    atom.summed_dr2 += np.dot(dr, dr)", "docstring": "Update the lattice state by accepting a specific jump\n\nArgs:\njump (Jump): The jump that has been accepted.\n\nReturns:\nNone.", "source": "codesearchnet"}
{"code": "def _AddCredentialConfiguration(self, path_spec, credential_type, credential_data):\n    credential_configuration = configurations.CredentialConfiguration(credential_data=credential_data, credential_type=credential_type, path_spec=path_spec)\n    self._credential_configurations.append(credential_configuration)", "docstring": "Adds a credential configuration.\n\nArgs:\npath_spec (dfvfs.PathSpec): path specification.\ncredential_type (str): credential type.\ncredential_data (bytes): credential data.", "source": "codesearchnet"}
{"code": "def generate_output_newline(self, line='0', colorize=True):\n    return generate_output(line=line, is_parent=True, colorize=colorize)", "docstring": "The function for generating a CLI output new line.\n\nArgs:\nline (:obj:`str`): The line number (0-4). Determines indentation.\nDefaults to '0'.\ncolorize (:obj:`bool`): Colorize the console output with ANSI\ncolors. Defaults to True.\n\nReturns:\nstr: The generated output.", "source": "codesearchnet"}
{"code": "def relocate(self, destination):\n        \n        for activate in self.bin.activates:\n\n            activate.vpath = destination\n\n        for binfile in self.bin.files:\n\n            if binfile.shebang and (\n                    'python' in binfile.shebang or 'pypy' in binfile.shebang\n            ):\n\n                binfile.shebang = '\n                    os.path.join(destination, 'bin', 'python')\n                )", "docstring": "Configure the virtual environment for another path.\n\nArgs:\ndestination (str): The target path of the virtual environment.\n\nNote:\nThis does not actually move the virtual environment. Is only\nrewrites the metadata required to support a move.", "source": "juraj-google-style"}
{"code": "def pythonify_logs(logs):\n    logs = logs or {}\n    result = {}\n    for key, value in sorted(logs.items()):\n        if isinstance(value, dict):\n            result.update(pythonify_logs(value))\n        else:\n            try:\n                value = float(value)\n            except:\n                pass\n            result[key] = value\n    return result", "docstring": "Flatten and convert log values to Python-native types.\n\nThis function attempts to convert dict value by `float(value)` and skips\nthe conversion if it fails.\n\nArgs:\nlogs: A dict containing log values.\n\nReturns:\nA flattened dict with values converted to Python-native types if\npossible.", "source": "github-repos"}
{"code": "def step(self, actions):\n    for (index, (env, action)) in enumerate(zip(self._envs, actions)):\n        if (not env.action_space.contains(action)):\n            message = 'Invalid action at index {}: {}'\n            raise ValueError(message.format(index, action))\n    if self._blocking:\n        transitions = [env.step(action) for (env, action) in zip(self._envs, actions)]\n    else:\n        transitions = [env.step(action, blocking=False) for (env, action) in zip(self._envs, actions)]\n        transitions = [transition() for transition in transitions]\n    (observs, rewards, dones, infos) = zip(*transitions)\n    observ = np.stack(observs)\n    reward = np.stack(rewards)\n    done = np.stack(dones)\n    info = tuple(infos)\n    return (observ, reward, done, info)", "docstring": "Forward a batch of actions to the wrapped environments.\n\nArgs:\nactions: Batched action to apply to the environment.\n\nRaises:\nValueError: Invalid actions.\n\nReturns:\nBatch of observations, rewards, and done flags.", "source": "codesearchnet"}
{"code": "def generate_substitution_structures(self, atom, target_species=[], sub_both_sides=False, range_tol=0.01, dist_from_surf=0):\n    sym_slab = SpacegroupAnalyzer(self.slab).get_symmetrized_structure()\n\n    def substitute(site, i):\n        slab = self.slab.copy()\n        props = self.slab.site_properties\n        if sub_both_sides:\n            eq_indices = [indices for indices in sym_slab.equivalent_indices if (i in indices)][0]\n            for ii in eq_indices:\n                if (('%.6f' % sym_slab[ii].frac_coords[2]) != ('%.6f' % site.frac_coords[2])):\n                    props['surface_properties'][ii] = 'substitute'\n                    slab.replace(ii, atom)\n                    break\n        props['surface_properties'][i] = 'substitute'\n        slab.replace(i, atom)\n        slab.add_site_property('surface_properties', props['surface_properties'])\n        return slab\n    substituted_slabs = []\n    sorted_sites = sorted(sym_slab, key=(lambda site: site.frac_coords[2]))\n    if (sorted_sites[0].surface_properties == 'surface'):\n        d = (sorted_sites[0].frac_coords[2] + dist_from_surf)\n    else:\n        d = (sorted_sites[(- 1)].frac_coords[2] - dist_from_surf)\n    for (i, site) in enumerate(sym_slab):\n        if ((d - range_tol) < site.frac_coords[2] < (d + range_tol)):\n            if (target_species and (site.species_string in target_species)):\n                substituted_slabs.append(substitute(site, i))\n            elif (not target_species):\n                substituted_slabs.append(substitute(site, i))\n    matcher = StructureMatcher()\n    return [s[0] for s in matcher.group_structures(substituted_slabs)]", "docstring": "Function that performs substitution-type doping on the surface and\nreturns all possible configurations where one dopant is substituted\nper surface. Can substitute one surface or both.\n\nArgs:\natom (str): atom corresponding to substitutional dopant\nsub_both_sides (bool): If true, substitute an equivalent\nsite on the other surface\ntarget_species (list): List of specific species to substitute\nrange_tol (float): Find viable substitution sites at a specific\ndistance from the surface +- this tolerance\ndist_from_surf (float): Distance from the surface to find viable\nsubstitution sites, defaults to 0 to substitute at the surface", "source": "codesearchnet"}
{"code": "def put_vmss(access_token, subscription_id, resource_group, vmss_name, vmss_body):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API])\n    body = json.dumps(vmss_body)\n    return do_put(endpoint, body, access_token)", "docstring": "Put VMSS body.\n\nCan be used to create or update a scale set.\nE.g. call get_vmss(), make changes to the body, call put_vmss().\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvmss_name (str): Name of the new scale set.\nvmss_body (dictionary): Body containining\n\nReturns:\nHTTP response. JSON body of the virtual machine scale set properties.", "source": "codesearchnet"}
{"code": "def object_table(self, object_id=None):\n    self._check_connected()\n    if (object_id is not None):\n        return self._object_table(object_id)\n    else:\n        object_keys = self._keys((ray.gcs_utils.TablePrefix_OBJECT_string + '*'))\n        object_ids_binary = {key[len(ray.gcs_utils.TablePrefix_OBJECT_string):] for key in object_keys}\n        results = {}\n        for object_id_binary in object_ids_binary:\n            results[binary_to_object_id(object_id_binary)] = self._object_table(binary_to_object_id(object_id_binary))\n        return results", "docstring": "Fetch and parse the object table info for one or more object IDs.\n\nArgs:\nobject_id: An object ID to fetch information about. If this is\nNone, then the entire object table is fetched.\n\nReturns:\nInformation from the object table.", "source": "codesearchnet"}
{"code": "def disambiguate_pdf(self, file, language=None, entities=None):\n    body = {'customisation': 'generic'}\n    if language:\n        body['language'] = {'lang': language}\n    if entities:\n        body['entities'] = entities\n    files = {'query': str(body), 'file': (file, open(file, 'rb'), 'application/pdf', {'Expires': '0'})}\n    (res, status) = self.post(self.disambiguate_service, files=files, headers={'Accept': 'application/json'})\n    if (status != 200):\n        logger.debug(('Disambiguation failed with error ' + str(status)))\n    return (self.decode(res), status)", "docstring": "Call the disambiguation service in order to process a pdf file .\n\nArgs:\npdf (file): PDF file to be disambiguated.\nlanguage (str): language of text (if known)\n\nReturns:\ndict, int: API response and API status.", "source": "codesearchnet"}
{"code": "def _applyInter(finter0, finter1, conflict='ignore'):\n    OPTIONS = ['error', 'ignore', 'me', 'other']\n    assert (conflict in OPTIONS), 'Invalid value in `conflict`.'\n    min_int = (- (2 ** 63))\n    inter0 = tuple([(f.getValue() if f else min_int) for f in finter0])\n    inter1 = tuple([(f.getValue() if f else min_int) for f in finter1])\n    le00 = (inter0[0] <= inter1[0])\n    le01 = ((inter1[1] == min_int) or (inter0[0] <= inter1[1]))\n    le11 = ((inter1[1] == min_int) or ((inter0[1] != min_int) and (inter0[1] <= inter1[1])))\n    ge00 = ((not le00) or (inter0[0] == inter1[0]))\n    ge10 = ((inter0[1] == min_int) or (inter0[1] >= inter1[0]))\n    if (le00 and ge10 and le11):\n        return (finter1[0], finter0[1])\n    elif (le00 and ge10 and (not le11)):\n        return finter1\n    elif (ge00 and le01 and le11):\n        return finter0\n    elif (ge00 and le01 and (not le11)):\n        return (finter0[0], finter1[1])\n    elif (conflict == 'me'):\n        return finter0\n    elif (conflict == 'other'):\n        return finter1\n    elif (conflict == 'error'):\n        raise Exception('Disjoint intervals!')\n    return None", "docstring": "Return the restriction of first interval by the second.\n\nArgs:\n\n- inter0, inter1 (tuple of Feature): intervals\n\nReturn(tuple of Feature): the resulting interval\n- conflict(str): if a property hasn't compatible values/constrains, do:\n- ``\"error\"``: raise exception.\n- ``\"ignore\"``: return None.\n- ``\"me\"``: return finter0.\n- ``\"other\"``: return finter1.", "source": "codesearchnet"}
{"code": "def package_and_copy(package_root_dir, setup_py, output_tar_path):\n    if (not output_tar_path.startswith('gs:\n        raise ValueError('output_tar_path needs to be a GCS path.')\n    if (not os.path.isfile(setup_py)):\n        raise ValueError(('Supplied file \"%s\" does not exist.' % setup_py))\n    dest_setup_py = os.path.join(package_root_dir, 'setup.py')\n    if (dest_setup_py != setup_py):\n        if os.path.isfile(dest_setup_py):\n            os.rename(dest_setup_py, (dest_setup_py + '._bak_'))\n        shutil.copyfile(setup_py, dest_setup_py)\n    tempdir = tempfile.mkdtemp()\n    previous_cwd = os.getcwd()\n    os.chdir(package_root_dir)\n    try:\n        sdist = ['python', dest_setup_py, 'sdist', '--format=gztar', '-d', tempdir]\n        subprocess.check_call(sdist)\n        source = os.path.join(tempdir, '*.tar.gz')\n        gscopy = ['gsutil', 'cp', source, output_tar_path]\n        subprocess.check_call(gscopy)\n        return\n    finally:\n        os.chdir(previous_cwd)\n        if (dest_setup_py != setup_py):\n            os.remove(dest_setup_py)\n        if os.path.isfile((dest_setup_py + '._bak_')):\n            os.rename((dest_setup_py + '._bak_'), dest_setup_py)\n        shutil.rmtree(tempdir)", "docstring": "Repackage an CloudML package and copy it to a staging dir.\n\nArgs:\npackage_root_dir: the root dir to install package from. Usually you can get the path\nfrom inside your module using a relative path to __file__.\nsetup_py: the path to setup.py.\noutput_tar_path: the GCS path of the output tarball package.\nRaises:\nValueError if output_tar_path is not a GCS path, or setup_py does not exist.", "source": "codesearchnet"}
{"code": "def contains(self, rect):\n        \n        return (rect.y >= self.y and \\\n                rect.x >= self.x and \\\n                rect.y+rect.height <= self.y+self.height and \\\n                rect.x+rect.width  <= self.x+self.width)", "docstring": "Tests if another rectangle is contained by this one\n\nArguments:\nrect (Rectangle): The other rectangle\n\nReturns:\nbool: True if it is container, False otherwise", "source": "juraj-google-style"}
{"code": "def find_wells_with_curve(self, mnemonic, alias=None):\n        \n        return Project([w for w in self if w.get_curve(mnemonic, alias=alias) is not None])", "docstring": "Returns a new Project with only the wells which have the named curve.\n\nArgs:\nmenmonic (str): the name of the curve to look for.\nalias (dict): a welly alias dictionary.\n\nReturns:\nproject.", "source": "juraj-google-style"}
{"code": "def PrintExtractionSummary(self, processing_status):\n    if (not processing_status):\n        self._output_writer.Write('WARNING: missing processing status information.\\n')\n    elif (not processing_status.aborted):\n        if processing_status.error_path_specs:\n            self._output_writer.Write('Processing completed with errors.\\n')\n        else:\n            self._output_writer.Write('Processing completed.\\n')\n        number_of_warnings = processing_status.foreman_status.number_of_produced_warnings\n        if number_of_warnings:\n            output_text = '\\n'.join(['', 'Number of warnings generated while extracting events: {0:d}.'.format(number_of_warnings), '', 'Use pinfo to inspect warnings in more detail.', ''])\n            self._output_writer.Write(output_text)\n        if processing_status.error_path_specs:\n            output_text = '\\n'.join(['', 'Path specifications that could not be processed:', ''])\n            self._output_writer.Write(output_text)\n            for path_spec in processing_status.error_path_specs:\n                self._output_writer.Write(path_spec.comparable)\n                self._output_writer.Write('\\n')\n    self._output_writer.Write('\\n')", "docstring": "Prints a summary of the extraction.\n\nArgs:\nprocessing_status (ProcessingStatus): processing status.", "source": "codesearchnet"}
{"code": "def reqs(amend: bool = False, stage: bool = False):\n    \n    changed_files = CTX.repo.changed_files()\n    if 'requirements.txt' in changed_files or 'requirements-dev.txt' in changed_files:\n        LOGGER.error('Requirements have changed; cannot update them')\n        sys.exit(-1)\n    _write_reqs(amend, stage)", "docstring": "Write requirements files\n\nArgs:\namend: amend last commit with changes\nstage: stage changes", "source": "juraj-google-style"}
{"code": "def find_mapreduce_yaml(status_file=__file__):\n    checked = set()\n    yaml = _find_mapreduce_yaml(os.path.dirname(status_file), checked)\n    if (not yaml):\n        yaml = _find_mapreduce_yaml(os.getcwd(), checked)\n    return yaml", "docstring": "Traverse directory trees to find mapreduce.yaml file.\n\nBegins with the location of status.py and then moves on to check the working\ndirectory.\n\nArgs:\nstatus_file: location of status.py, overridable for testing purposes.\n\nReturns:\nthe path of mapreduce.yaml file or None if not found.", "source": "codesearchnet"}
{"code": "def avg_grads(tower_grads):\n    average_grads = []\n    for grad_and_vars in zip(*tower_grads):\n        grads = []\n        for (g, _) in grad_and_vars:\n            expanded_g = tf.expand_dims(g, 0)\n            grads.append(expanded_g)\n        grad = tf.concat(0, grads)\n        grad = tf.reduce_mean(grad, 0)\n        v = grad_and_vars[0][1]\n        grad_and_var = (grad, v)\n        average_grads.append(grad_and_var)\n    return average_grads", "docstring": "Calculate the average gradient for each shared variable across all towers.\n\nNote that this function provides a synchronization point across all towers.\n\nArgs:\ntower_grads: List of lists of (gradient, variable) tuples. The outer list\nis over individual gradients. The inner list is over the gradient\ncalculation for each tower.\nReturns:\nList of pairs of (gradient, variable) where the gradient has been averaged\nacross all towers.", "source": "codesearchnet"}
{"code": "def dframe(self, dimensions=None, multi_index=False):\n        \n        if dimensions:\n            dimensions = [self.get_dimension(d, strict=True) for d in dimensions]\n        else:\n            dimensions = self.kdims\n        vdims = [d for d in dimensions if d in self.vdims]\n        if vdims:\n            raise ValueError('%s element does not hold data for value '\n                             'dimensions. Could not return data for %s '\n                             'dimension(s).' %\n                             (type(self).__name__, ', '.join([d.name for d in vdims])))\n        return super(StatisticsElement, self).dframe(dimensions, False)", "docstring": "Convert dimension values to DataFrame.\n\nReturns a pandas dataframe of columns along each dimension,\neither completely flat or indexed by key dimensions.\n\nArgs:\ndimensions: Dimensions to return as columns\nmulti_index: Convert key dimensions to (multi-)index\n\nReturns:\nDataFrame of columns corresponding to each dimension", "source": "juraj-google-style"}
{"code": "def nan_to_num(x, nan=0.0, posinf=None, neginf=None):\n    if any_symbolic_tensors((x,)):\n        return NanToNum(nan=nan, posinf=posinf, neginf=neginf).symbolic_call(x)\n    return backend.numpy.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf)", "docstring": "Replace NaN with zero and infinity with large finite numbers.\n\nArgs:\nx: Input data.\nnan: Optional float or int. Value to replace `NaN` entries with.\nposinf: Optional float or int.\nValue to replace positive infinity with.\nneginf: Optional float or int.\nValue to replace negative infinity with.\n\nReturns:\n`x`, with non-finite values replaced.", "source": "github-repos"}
{"code": "def CheckCasts(filename, clean_lines, linenum, error):\n  \n  line = clean_lines.elided[linenum]\n\n  \n  \n  \n  \n  match = Search(\n      r'(\\bnew\\s+(?:const\\s+)?|\\S<\\s*(?:const\\s+)?)?\\b'\n      r'(int|float|double|bool|char|int32|uint32|int64|uint64)'\n      r'(\\([^)].*)', line)\n  expecting_function = ExpectingFunctionArgs(clean_lines, linenum)\n  if match and not expecting_function:\n    matched_type = match.group(2)\n\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    matched_new_or_template = match.group(1)\n\n    \n    \n    if Match(r'\\([^()]+\\)\\s*\\[', match.group(3)):\n      return\n\n    \n    \n    \n    \n    \n    matched_funcptr = match.group(3)\n    if (matched_new_or_template is None and\n        not (matched_funcptr and\n             (Match(r'\\((?:[^() ]+::\\s*\\*\\s*)?[^() ]+\\)\\s*\\(',\n                    matched_funcptr) or\n              matched_funcptr.startswith('(*)'))) and\n        not Match(r'\\s*using\\s+\\S+\\s*=\\s*' + matched_type, line) and\n        not Search(r'new\\(\\S+\\)\\s*' + matched_type, line)):\n      error(filename, linenum, 'readability/casting', 4,\n            'Using deprecated casting style.  '\n            'Use static_cast<%s>(...) instead' %\n            matched_type)\n\n  if not expecting_function:\n    CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',\n                    r'\\((int|float|double|bool|char|u?int(16|32|64))\\)', error)\n\n  \n  \n  \n  \n  if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',\n                     r'\\((char\\s?\\*+\\s?)\\)\\s*\"', error):\n    pass\n  else:\n    \n    CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',\n                    r'\\((\\w+\\s?\\*+\\s?)\\)', error)\n\n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  match = Search(\n      r'(?:[^\\w]&\\(([^)*][^)]*)\\)[\\w(])|'\n      r'(?:[^\\w]&(static|dynamic|down|reinterpret)_cast\\b)', line)\n  if match:\n    \n    \n    \n    parenthesis_error = False\n    match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\\b)<', line)\n    if match:\n      _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))\n      if x1 >= 0 and clean_lines.elided[y1][x1] == '(':\n        _, y2, x2 = CloseExpression(clean_lines, y1, x1)\n        if x2 >= 0:\n          extended_line = clean_lines.elided[y2][x2:]\n          if y2 < clean_lines.NumLines() - 1:\n            extended_line += clean_lines.elided[y2 + 1]\n          if Match(r'\\s*(?:->|\\[)', extended_line):\n            parenthesis_error = True\n\n    if parenthesis_error:\n      error(filename, linenum, 'readability/casting', 4,\n            ('Are you taking an address of something dereferenced '\n             'from a cast?  Wrapping the dereferenced expression in '\n             'parentheses will make the binding more obvious'))\n    else:\n      error(filename, linenum, 'runtime/casting', 4,\n            ('Are you taking an address of a cast?  '\n             'This is dangerous: could be a temp var.  '\n             'Take the address before doing the cast, rather than after'))", "docstring": "Various cast related checks.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def write_fingerprint(export_dir: str) -> None:\n    if flags.config().saved_model_fingerprinting.value():\n        fingerprint_path = file_io.join(compat.as_str(export_dir), compat.as_str(constants.FINGERPRINT_FILENAME))\n        logging.info('Writing fingerprint to %s', fingerprint_path)\n        try:\n            fingerprint_serialized = fingerprinting_pywrap.CreateFingerprintDef(export_dir)\n        except FingerprintException as e:\n            raise ValueError(e) from None\n        file_io.atomic_write_string_to_file(fingerprint_path, fingerprint_serialized)\n        metrics.SetWriteFingerprint(fingerprint=fingerprint_serialized)\n        try:\n            metrics.SetWritePathAndSingleprint(path=export_dir, singleprint=singleprint_from_fingerprint_proto(export_dir))\n        except metrics.MetricException:\n            logging.info('path_and_singleprint metric could not be set. Model saving will continue.')", "docstring": "Write fingerprint protobuf, if requested.\n\nWrites a `tf.saved_model.experimental.Fingerprint` object to a\n`fingerprint.pb` file in the `export_dir` using the `saved_model.pb` file\ncontained in `export_dir`.\n\nArgs:\nexport_dir: The directory in which to write the fingerprint.", "source": "github-repos"}
{"code": "def quaternion_from_axis_rotation(angle, axis):\n    out = np.zeros(4, dtype=float)\n    if (axis == 'x'):\n        out[1] = 1\n    elif (axis == 'y'):\n        out[2] = 1\n    elif (axis == 'z'):\n        out[3] = 1\n    else:\n        raise ValueError('Invalid axis input.')\n    out *= math.sin((angle / 2.0))\n    out[0] = math.cos((angle / 2.0))\n    return Quaternion(out)", "docstring": "Return quaternion for rotation about given axis.\n\nArgs:\nangle (float): Angle in radians.\naxis (str): Axis for rotation\n\nReturns:\nQuaternion: Quaternion for axis rotation.\n\nRaises:\nValueError: Invalid input axis.", "source": "codesearchnet"}
{"code": "def run(self, timeout=-1):\n        \n        def target():\n            self.process = subprocess.Popen(self.cmd,\n                                            stdout=self.stdout_dest,\n                                            stderr=self.stderr_dest,\n                                            shell=self.shell)\n            stdout, stderr = self.process.communicate()\n\n            \n            if self.decode_out:\n                if stdout:\n                    self.stdout = stdout.decode(\"utf-8\")\n                if stderr:\n                    self.stderr = stderr.decode(\"utf-8\")\n\n        thread = threading.Thread(target=target)\n        thread.start()\n\n        if timeout > 0:\n            thread.join(timeout)\n            if thread.is_alive():\n                self.process.terminate()\n                thread.join()\n                raise SubprocessError((\"Reached timeout after {t} seconds\"\n                                       .format(t=timeout)))\n        else:\n            thread.join()\n\n        return self.process.returncode, self.stdout, self.stderr", "docstring": "Run the subprocess.\n\nArguments:\ntimeout (optional) If a positive real value, then timout after\nthe given number of seconds.\n\nRaises:\nSubprocessError If subprocess has not completed after \"timeout\"\nseconds.", "source": "juraj-google-style"}
{"code": "def AssertThat(target):\n    if type(target) is type:\n        if issubclass(target, BaseException):\n            return _ExceptionClassSubject(target)\n        return _ClassSubject(target)\n    for super_type, subject_class in six.iteritems(_TYPE_CONSTRUCTORS):\n        if issubclass(type(target), super_type):\n            return subject_class(target)\n    if _IsMock(target):\n        return _MockSubject(target)\n    if _IsNumeric(target):\n        return _NumericSubject(target)\n    if _IsComparable(target) and _IsIterable(target):\n        return _ComparableIterableSubject(target)\n    if _IsComparable(target):\n        return _ComparableSubject(target)\n    if _IsIterable(target):\n        return _IterableSubject(target)\n    return _DefaultSubject(target)", "docstring": "Gateway function that initiates an assertion.\n\nArgs:\ntarget: any object whatsoever, the object under test.\n\nReturns:\nA subject appropriate for the target.", "source": "github-repos"}
{"code": "def _create_delegate_handler(delegate):\n    \n    @coroutine\n    def handler(*args):\n        yield\n        yield delegate.send(Transition(args, delegate))\n\n    return handler", "docstring": "Creates a handler function that creates a co-routine that can yield once with the given\npositional arguments to the delegate as a transition.\n\nArgs:\ndelegate (Coroutine): The co-routine to delegate to.\n\nReturns:\nA :class:`callable` handler that returns a co-routine that ignores the data it receives\nand sends with the arguments given to the handler as a :class:`Transition`.", "source": "juraj-google-style"}
{"code": "def dump(self):\n    results = []\n    for data in self.data():\n        results.append(data)\n    return results", "docstring": "Dump raw JSON output of matching queryset objects.\n\nReturns:\nList of dicts.", "source": "codesearchnet"}
{"code": "def add_how(voevent, descriptions=None, references=None):\n    \n    if not voevent.xpath('How'):\n        etree.SubElement(voevent, 'How')\n    if descriptions is not None:\n        for desc in _listify(descriptions):\n            \n            \n            \n            etree.SubElement(voevent.How, 'Description')\n            voevent.How.Description[-1] = desc\n    if references is not None:\n        voevent.How.extend(_listify(references))", "docstring": "Add descriptions or references to the How section.\n\nArgs:\nvoevent(:class:`Voevent`): Root node of a VOEvent etree.\ndescriptions(str): Description string, or list of description\nstrings.\nreferences(:py:class:`voeventparse.misc.Reference`): A reference element\n(or list thereof).", "source": "juraj-google-style"}
{"code": "def __init__(self, callback):\n        \n        super(Interface, self).__init__(callback)\n        self._mac_address_table = brocade_mac_address_table(\n            callback=pynos.utilities.return_xml\n        )", "docstring": "Interface init function.\nArgs:\ncallback: Callback function that will be called for each action.\nReturns:\nInterface Object\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def has_register(self, register):\n    has_reg = False\n    if (isinstance(register, QuantumRegister) and (register in self.qregs)):\n        has_reg = True\n    elif (isinstance(register, ClassicalRegister) and (register in self.cregs)):\n        has_reg = True\n    return has_reg", "docstring": "Test if this circuit has the register r.\n\nArgs:\nregister (Register): a quantum or classical register.\n\nReturns:\nbool: True if the register is contained in this circuit.", "source": "codesearchnet"}
{"code": "def dataverse_search_doi(doi):\n    \n\n    url = '{}/api/datasets/:persistentId?persistentId=doi:{}'.format(dataverse, doi)\n    r = requests.get(url)\n\n    try:\n        r.raise_for_status()\n    except requests.exceptions.HTTPError as error:\n        print('Error looking up DOI \"{}\" in the Harvard Dataverse.'.format(doi))\n        print(r.text)\n        raise error\n\n    return json.loads(r.text)", "docstring": "Fetches metadata pertaining to a Digital Object Identifier (DOI) in the\nHarvard Dataverse.\n\nArgs:\ndoi (str): The Digital Object Identifier (DOI) of the entry in the\nDataverse.\n\nRaises:\nrequests.exceptions.HTTPError: The given DOI does not exist, or there\nwas a problem connecting to the Dataverse.", "source": "juraj-google-style"}
{"code": "def open(self, callback):\n        \n        if self.is_active:\n            raise ValueError(\"This manager is already open.\")\n\n        if self._closed:\n            raise ValueError(\"This manager has been closed and can not be re-used.\")\n\n        self._callback = functools.partial(_wrap_callback_errors, callback)\n\n        \n        self._rpc = bidi.ResumableBidiRpc(\n            start_rpc=self._client.api.streaming_pull,\n            initial_request=self._get_initial_request,\n            should_recover=self._should_recover,\n        )\n        self._rpc.add_done_callback(self._on_rpc_done)\n\n        \n        self._dispatcher = dispatcher.Dispatcher(self, self._scheduler.queue)\n        self._consumer = bidi.BackgroundConsumer(self._rpc, self._on_response)\n        self._leaser = leaser.Leaser(self)\n        self._heartbeater = heartbeater.Heartbeater(self)\n\n        \n        self._dispatcher.start()\n\n        \n        self._consumer.start()\n\n        \n        self._leaser.start()\n\n        \n        self._heartbeater.start()", "docstring": "Begin consuming messages.\n\nArgs:\ncallback (Callable[None, google.cloud.pubsub_v1.message.Messages]):\nA callback that will be called for each message received on the\nstream.", "source": "juraj-google-style"}
{"code": "def encode_endian(text, encoding, errors=\"strict\", le=True):\n    \n\n    encoding = codecs.lookup(encoding).name\n\n    if encoding == \"utf-16\":\n        if le:\n            return codecs.BOM_UTF16_LE + text.encode(\"utf-16-le\", errors)\n        else:\n            return codecs.BOM_UTF16_BE + text.encode(\"utf-16-be\", errors)\n    elif encoding == \"utf-32\":\n        if le:\n            return codecs.BOM_UTF32_LE + text.encode(\"utf-32-le\", errors)\n        else:\n            return codecs.BOM_UTF32_BE + text.encode(\"utf-32-be\", errors)\n    else:\n        return text.encode(encoding, errors)", "docstring": "Like text.encode(encoding) but always returns little endian/big endian\nBOMs instead of the system one.\n\nArgs:\ntext (text)\nencoding (str)\nerrors (str)\nle (boolean): if little endian\nReturns:\nbytes\nRaises:\nUnicodeEncodeError\nLookupError", "source": "juraj-google-style"}
{"code": "def _operator(self, op, close_group=False):\n    op = op.upper().strip()\n    if (op not in OP_LIST):\n        raise ValueError(\"Error: '{}' is not a valid operator.\".format(op))\n    else:\n        if close_group:\n            op = ((') ' + op) + ' (')\n        else:\n            op = ((' ' + op) + ' ')\n        self.__query['q'] += op\n    return self", "docstring": "Add an operator between terms.\nThere must be a term added before using this method.\nAll operators have helpers, so this method is usually not necessary to directly invoke.\n\nArguments:\nop (str): The operator to add. Must be in the OP_LIST.\nclose_group (bool): If ``True``, will end the current parenthetical\ngroup and start a new one.\nIf ``False``, will continue current group.\n\nExample::\n\"(foo AND bar)\" is one group.\n\"(foo) AND (bar)\" is two groups.\n\nReturns:\nSearchHelper: Self", "source": "codesearchnet"}
{"code": "def parse_transdos(path_dir, efermi, dos_spin=1, trim_dos=False):\n\n        \n\n        data_dos = {'total': [], 'partial': {}}\n        \n        \n        with open(os.path.join(path_dir, \"boltztrap.transdos\"), 'r') as f:\n            count_series = 0  \n            for line in f:\n                if line.lstrip().startswith(\"\n                    count_series += 1\n                    if count_series > 1:\n                        break\n                else:\n                    data_dos['total'].append(\n                        [Energy(float(line.split()[0]), \"Ry\").to(\"eV\"),\n                         float(line.split()[1])])\n                    total_elec = float(line.split()[2])\n\n        lw_l = 0\n        hg_l = -len(data_dos['total'])\n        if trim_dos:\n            \n            \n            \n            \n            tmp_data = np.array(data_dos['total'])\n            tmp_den = np.trim_zeros(tmp_data[:, 1], 'f')[1:]\n            lw_l = len(tmp_data[:, 1]) - len(tmp_den)\n            tmp_ene = tmp_data[lw_l:, 0]\n            tmp_den = np.trim_zeros(tmp_den, 'b')[:-1]\n            hg_l = len(tmp_ene) - len(tmp_den)\n            tmp_ene = tmp_ene[:-hg_l]\n            tmp_data = np.vstack((tmp_ene, tmp_den)).T\n            data_dos['total'] = tmp_data.tolist()\n\n        \n        for file_name in os.listdir(path_dir):\n            if file_name.endswith(\n                    \"transdos\") and file_name != 'boltztrap.transdos':\n                tokens = file_name.split(\".\")[1].split(\"_\")\n                site = tokens[1]\n                orb = '_'.join(tokens[2:])\n                with open(os.path.join(path_dir, file_name), 'r') as f:\n                    for line in f:\n                        if not line.lstrip().startswith(\" \n                            if site not in data_dos['partial']:\n                                data_dos['partial'][site] = {}\n                            if orb not in data_dos['partial'][site]:\n                                data_dos['partial'][site][orb] = []\n                            data_dos['partial'][site][orb].append(\n                                float(line.split()[1]))\n                data_dos['partial'][site][orb] = data_dos['partial'][site][\n                                                     orb][lw_l:-hg_l]\n\n        dos_full = {'energy': [], 'density': []}\n\n        for t in data_dos['total']:\n            dos_full['energy'].append(t[0])\n            dos_full['density'].append(t[1])\n\n        dos = Dos(efermi, dos_full['energy'],\n                  {Spin(dos_spin): dos_full['density']})\n        dos_partial = data_dos['partial']  \n\n        return dos, dos_partial", "docstring": "Parses .transdos (total DOS) and .transdos_x_y (partial DOS) files\nArgs:\npath_dir: (str) dir containing DOS files\nefermi: (float) Fermi energy\ndos_spin: (int) -1 for spin down, +1 for spin up\ntrim_dos: (bool) whether to post-process / trim DOS\n\nReturns:\ntuple - (DOS, dict of partial DOS)", "source": "juraj-google-style"}
{"code": "def get_plot(self, xlim=None, ylim=None):\n        \n\n        ncolors = max(3, len(self._doses))\n        ncolors = min(9, ncolors)\n\n        import palettable\n\n        colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors\n\n        y = None\n        alldensities = []\n        allenergies = []\n        plt = pretty_plot(12, 8)\n\n        \n        \n        for key, dos in self._doses.items():\n            energies = dos['energies']\n            densities = dos['densities']\n            if not y:\n                y = {Spin.up: np.zeros(energies.shape),\n                     Spin.down: np.zeros(energies.shape)}\n            newdens = {}\n            for spin in [Spin.up, Spin.down]:\n                if spin in densities:\n                    if self.stack:\n                        y[spin] += densities[spin]\n                        newdens[spin] = y[spin].copy()\n                    else:\n                        newdens[spin] = densities[spin]\n            allenergies.append(energies)\n            alldensities.append(newdens)\n\n        keys = list(self._doses.keys())\n        keys.reverse()\n        alldensities.reverse()\n        allenergies.reverse()\n        allpts = []\n        for i, key in enumerate(keys):\n            x = []\n            y = []\n            for spin in [Spin.up, Spin.down]:\n                if spin in alldensities[i]:\n                    densities = list(int(spin) * alldensities[i][spin])\n                    energies = list(allenergies[i])\n                    if spin == Spin.down:\n                        energies.reverse()\n                        densities.reverse()\n                    x.extend(energies)\n                    y.extend(densities)\n            allpts.extend(list(zip(x, y)))\n            if self.stack:\n                plt.fill(x, y, color=colors[i % ncolors],\n                         label=str(key))\n            else:\n                plt.plot(x, y, color=colors[i % ncolors],\n                         label=str(key), linewidth=3)\n            if not self.zero_at_efermi:\n                ylim = plt.ylim()\n                plt.plot([self._doses[key]['efermi'],\n                          self._doses[key]['efermi']], ylim,\n                         color=colors[i % ncolors],\n                         linestyle='--', linewidth=2)\n\n        if xlim:\n            plt.xlim(xlim)\n        if ylim:\n            plt.ylim(ylim)\n        else:\n            xlim = plt.xlim()\n            relevanty = [p[1] for p in allpts\n                         if xlim[0] < p[0] < xlim[1]]\n            plt.ylim((min(relevanty), max(relevanty)))\n\n        if self.zero_at_efermi:\n            ylim = plt.ylim()\n            plt.plot([0, 0], ylim, 'k--', linewidth=2)\n\n        plt.xlabel('Energies (eV)')\n        plt.ylabel('Density of states')\n\n        plt.legend()\n        leg = plt.gca().get_legend()\n        ltext = leg.get_texts()  \n        plt.setp(ltext, fontsize=30)\n        plt.tight_layout()\n        return plt", "docstring": "Get a matplotlib plot showing the DOS.\n\nArgs:\nxlim: Specifies the x-axis limits. Set to None for automatic\ndetermination.\nylim: Specifies the y-axis limits.", "source": "juraj-google-style"}
{"code": "def save(self, filething=None, deleteid3=False, padding=None):\n        \n\n        self._save(filething, self.metadata_blocks, deleteid3, padding)", "docstring": "Save metadata blocks to a file.\n\nArgs:\nfilething (filething)\ndeleteid3 (bool): delete id3 tags while at it\npadding (:obj:`mutagen.PaddingFunction`)\n\nIf no filename is given, the one most recently loaded is used.", "source": "juraj-google-style"}
{"code": "def save(self, path):\n    with open(path, 'w') as out_file:\n        json.dump(self.to_dict(), out_file, indent=4)", "docstring": "Save the specification of this MLPipeline in a JSON file.\n\nThe content of the JSON file is the dict returned by the `to_dict` method.\n\nArgs:\npath (str): Path to the JSON file to write.", "source": "codesearchnet"}
{"code": "def Group(items, key):\n  \n  result = {}\n\n  for item in items:\n    result.setdefault(key(item), []).append(item)\n\n  return result", "docstring": "Groups items by given key function.\n\nArgs:\nitems: An iterable or an iterator of items.\nkey: A function which given each item will return the key.\n\nReturns:\nA dict with keys being each unique key and values being a list of items of\nthat key.", "source": "juraj-google-style"}
{"code": "def get_data(self, label: str) -> Any:\n        \n        return self._get_resource(label, self._data, \"data\")", "docstring": "Get a data resource by label\n\nArgs:\nlabel (str): The labvel for the data resource to fetch\n\nReturns:\nThe requeted data object", "source": "juraj-google-style"}
{"code": "def get_extended_attention_mask(self, word_attention_mask: torch.LongTensor, entity_attention_mask: Optional[torch.LongTensor]):\n    attention_mask = word_attention_mask\n    if entity_attention_mask is not None:\n        attention_mask = torch.cat([attention_mask, entity_attention_mask], dim=-1)\n    if attention_mask.dim() == 3:\n        extended_attention_mask = attention_mask[:, None, :, :]\n    elif attention_mask.dim() == 2:\n        extended_attention_mask = attention_mask[:, None, None, :]\n    else:\n        raise ValueError(f'Wrong shape for attention_mask (shape {attention_mask.shape})')\n    extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)\n    extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min\n    return extended_attention_mask", "docstring": "Makes broadcastable attention and causal masks so that future and masked tokens are ignored.\n\nArguments:\nword_attention_mask (`torch.LongTensor`):\nAttention mask for word tokens with ones indicating tokens to attend to, zeros for tokens to ignore.\nentity_attention_mask (`torch.LongTensor`, *optional*):\nAttention mask for entity tokens with ones indicating tokens to attend to, zeros for tokens to ignore.\n\nReturns:\n`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.", "source": "github-repos"}
{"code": "async def on_message(message):\n    \n\n    \n    server = message.server\n    author = message.author\n    channel = message.channel\n    content = message.content\n\n    data = datatools.get_data()\n\n    if not data[\"discord\"][\"servers\"][server.id][_data.modulename][\"activated\"]:\n        return\n\n    \n    if server is not None and author != channel.server.me:\n        \n        prefix = data[\"discord\"][\"servers\"][server.id][\"prefix\"]\n        if content.startswith(prefix):\n            \n            package = content.split(\" \")\n            command = package[0][len(prefix):]\n            args = package[1:]\n            arg = ' '.join(args)\n\n            \n            if command == 'hex':\n                await client.send_typing(channel)\n\n                \n                hex_strs = api_hexconvert.convert_hex_value(arg)\n                \n                if len(hex_strs) > 0:\n                    for hex_str in hex_strs:\n                        image_url = convert_hex_to_url(hex_str)\n                        embed = ui_embed.success(channel, image_url, hex_str)\n                        await embed.send()\n                else:\n                    embed = ui_embed.fail_api(channel)\n                    await embed.send()\n        else:\n            \n            hex_strs = api_hexconvert.convert_hex_value(content)\n            \n            if len(hex_strs) > 0:\n                for hex_str in hex_strs:\n                    await client.send_typing(channel)\n                    image_url = convert_hex_to_url(hex_str)\n                    embed = ui_embed.success(channel, image_url, hex_str)\n                    await embed.send()", "docstring": "The on_message event handler for this module\n\nArgs:\nmessage (discord.Message): Input message", "source": "juraj-google-style"}
{"code": "def keep_artifacts(self, **kwargs):\n    path = ('%s/%s/artifacts/keep' % (self.manager.path, self.get_id()))\n    self.manager.gitlab.http_post(path)", "docstring": "Prevent artifacts from being deleted when expiration is set.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabCreateError: If the request could not be performed", "source": "codesearchnet"}
{"code": "def get_train_op(self, loss, learning_rate, optimizer=None, clip_norm=None, learnable_scopes=None, optimizer_scope_name=None, **kwargs):\n    if (optimizer_scope_name is None):\n        opt_scope = tf.variable_scope('Optimizer')\n    else:\n        opt_scope = tf.variable_scope(optimizer_scope_name)\n    with opt_scope:\n        if (learnable_scopes is None):\n            variables_to_train = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n        else:\n            variables_to_train = []\n            for scope_name in learnable_scopes:\n                variables_to_train.extend(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope_name))\n        if (optimizer is None):\n            optimizer = tf.train.AdamOptimizer\n        extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n        with tf.control_dependencies(extra_update_ops):\n\n            def clip_if_not_none(grad):\n                if (grad is not None):\n                    return tf.clip_by_norm(grad, clip_norm)\n            opt = optimizer(learning_rate, **kwargs)\n            grads_and_vars = opt.compute_gradients(loss, var_list=variables_to_train)\n            if (clip_norm is not None):\n                grads_and_vars = [(clip_if_not_none(grad), var) for (grad, var) in grads_and_vars]\n            train_op = opt.apply_gradients(grads_and_vars)\n    return train_op", "docstring": "Get train operation for given loss\n\nArgs:\nloss: loss, tf tensor or scalar\nlearning_rate: scalar or placeholder.\nclip_norm: clip gradients norm by clip_norm.\nlearnable_scopes: which scopes are trainable (None for all).\noptimizer: instance of tf.train.Optimizer, default Adam.\n**kwargs: parameters passed to tf.train.Optimizer object\n(scalars or placeholders).\n\nReturns:\ntrain_op", "source": "codesearchnet"}
{"code": "def _save_and_write_assets(self, meta_graph_def, assets_list=None):\n    write_fn = functools.partial(_add_asset_to_metagraph, meta_graph_def)\n    asset_filename_map = _maybe_save_assets(write_fn, assets_list)\n    if not asset_filename_map:\n        tf_logging.info('No assets to write.')\n        return\n    copy_assets_to_destination_dir(asset_filename_map, self._export_dir, self._saved_asset_files)", "docstring": "Saves asset to the meta graph and writes asset files to disk.\n\nArgs:\nmeta_graph_def: The meta graph def to which the assets will be added.\nassets_list: The list where the asset paths are setup.", "source": "github-repos"}
{"code": "def row_starts(self, name=None):\n    with ops.name_scope(name, 'RaggedRowStarts', [self]):\n        return self._row_partition.row_starts()", "docstring": "Returns the start indices for rows in this ragged tensor.\n\nThese indices specify where the values for each row begin in\n`self.values`.  `rt.row_starts()` is equal to `rt.row_splits[:-1]`.\n\nArgs:\nname: A name prefix for the returned tensor (optional).\n\nReturns:\nA 1-D integer Tensor with shape `[nrows]`.\nThe returned tensor is nonnegative, and is sorted in ascending order.\n\n#### Example:\n\n>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])\n>>> print(rt.values)\ntf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)\n>>> print(rt.row_starts())  # indices of row starts in rt.values\ntf.Tensor([0 4 4 7 8], shape=(5,), dtype=int64)", "source": "github-repos"}
{"code": "def random_restore(rnd: Optional[tcod.random.Random], backup: tcod.random.Random) -> None:\n    lib.TCOD_random_restore((rnd.random_c if rnd else ffi.NULL), backup.random_c)", "docstring": "Restore a random number generator from a backed up copy.\n\nArgs:\nrnd (Optional[Random]): A Random instance, or None to use the default.\nbackup (Random): The Random instance which was used as a backup.\n\n.. deprecated:: 8.4\nYou can use the standard library copy and pickle modules to save a\nrandom state.", "source": "codesearchnet"}
{"code": "def __init__(self, reactants_coeffs, products_coeffs):\n        \n        \n        all_reactants = sum([k * v for k, v in reactants_coeffs.items()],\n                            Composition({}))\n        all_products = sum([k * v for k, v in products_coeffs.items()],\n                           Composition({}))\n\n        if not all_reactants.almost_equals(all_products, rtol=0,\n                                           atol=self.TOLERANCE):\n            raise ReactionError(\"Reaction is unbalanced!\")\n\n        self._els = all_reactants.elements\n\n        self.reactants_coeffs = reactants_coeffs\n        self.products_coeffs = products_coeffs\n\n        \n        self._coeffs = []\n        self._els = []\n        self._all_comp = []\n        for c in set(list(reactants_coeffs.keys()) +\n                     list(products_coeffs.keys())):\n            coeff = products_coeffs.get(c, 0) - reactants_coeffs.get(c, 0)\n\n            if abs(coeff) > self.TOLERANCE:\n                self._all_comp.append(c)\n                self._coeffs.append(coeff)", "docstring": "Reactants and products to be specified as dict of {Composition: coeff}.\n\nArgs:\nreactants_coeffs ({Composition: float}): Reactants as dict of\n{Composition: amt}.\nproducts_coeffs ({Composition: float}): Products as dict of\n{Composition: amt}.", "source": "juraj-google-style"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(GetUsageAllocationRequestPayload, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = utils.BytearrayStream(input_stream.read(self.length))\n    if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):\n        self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)\n        self._unique_identifier.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.USAGE_LIMITS_COUNT, local_stream):\n        self._usage_limits_count = primitives.LongInteger(tag=enums.Tags.USAGE_LIMITS_COUNT)\n        self._usage_limits_count.read(local_stream, kmip_version=kmip_version)\n    self.is_oversized(local_stream)", "docstring": "Read the data encoding the GetUsageAllocation request payload and\ndecode it into its constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the data attribute is missing from the\nencoded payload.", "source": "codesearchnet"}
{"code": "def run_the_target(G, target, settings):\n    \n    sprint = settings[\"sprint\"]\n    sprint(\"Running target {}\".format(target))\n    the_formula = get_the_node_dict(G, target)[\"formula\"]\n    run_commands(the_formula, settings)", "docstring": "Wrapper function that sends to commands in a target's 'formula'\nto run_commands()\n\nArgs:\nThe graph we are going to build\nThe target to run\nThe settings dictionary", "source": "juraj-google-style"}
{"code": "def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs):  \n  \n  if tensors_to_log is None:\n    tensors_to_log = _TENSORS_TO_LOG\n\n  return tf.train.LoggingTensorHook(\n      tensors=tensors_to_log,\n      every_n_iter=every_n_iter)", "docstring": "Function to get LoggingTensorHook.\n\nArgs:\nevery_n_iter: `int`, print the values of `tensors` once every N local\nsteps taken on the current worker.\ntensors_to_log: List of tensor names or dictionary mapping labels to tensor\nnames. If not set, log _TENSORS_TO_LOG by default.\n**kwargs: a dictionary of arguments to LoggingTensorHook.\n\nReturns:\nReturns a LoggingTensorHook with a standard set of tensors that will be\nprinted to stdout.", "source": "juraj-google-style"}
{"code": "def min_zoom(self):\n    zoom_levels = [map_layer.min_zoom for map_layer in self.layers]\n    return min(zoom_levels)", "docstring": "Get the minimal zoom level of all layers.\n\nReturns:\nint: the minimum of all zoom levels of all layers\n\nRaises:\nValueError: if no layers exist", "source": "codesearchnet"}
{"code": "def ensure_resource_data(self, update_data=False):\n    if (not any(((key in self.data) for key in self.UNIQUE_IDENTIFIERS))):\n        raise exceptions.HPOneViewMissingUniqueIdentifiers(MISSING_UNIQUE_IDENTIFIERS)\n    if (not update_data):\n        return\n    resource_data = None\n    if (('uri' in self.UNIQUE_IDENTIFIERS) and self.data.get('uri')):\n        resource_data = self._helper.do_get(self.data['uri'])\n    else:\n        for identifier in self.UNIQUE_IDENTIFIERS:\n            identifier_value = self.data.get(identifier)\n            if identifier_value:\n                result = self.get_by(identifier, identifier_value)\n                if (result and isinstance(result, list)):\n                    resource_data = result[0]\n                    break\n    if resource_data:\n        self.data.update(resource_data)\n    else:\n        raise exceptions.HPOneViewResourceNotFound(RESOURCE_DOES_NOT_EXIST)", "docstring": "Retrieves data from OneView and updates resource object.\n\nArgs:\nupdate_data: Flag to update resource data when it is required.", "source": "codesearchnet"}
{"code": "def __init__(self, gcs_dag_bucket, gcs_dag_file_path=None):\n    \n    self._gcs_dag_bucket = gcs_dag_bucket\n    self._gcs_dag_file_path = gcs_dag_file_path or ''", "docstring": "Initializes an instance of a Airflow object.\n\nArgs:\ngcs_dag_bucket: Bucket where Airflow expects dag files to be uploaded.\ngcs_dag_file_path: File path of the Airflow dag files.", "source": "juraj-google-style"}
{"code": "def mp_atan2(y, x):\n    return 'if((x)>0, atan((y)/(x)), if(((x)<0) and ((y)>=0), atan((y)/(x))+pi, if(((x)<0) and ((y)<0), atan((y)/(x))-pi, if(((x)==0) and ((y)>0), pi/2, if(((x)==0) and ((y)<0), -pi/2, 0)))))'.replace('pi', str(math.pi)).replace('y', y).replace('x', x)", "docstring": "muparser atan2 function\n\nImplements an atan2(y,x) function for older muparser versions (<2.1.0);\natan2 was added as a built-in function in muparser 2.1.0\n\nArgs:\ny (str): y argument of the atan2(y,x) function\nx (str): x argument of the atan2(y,x) function\n\nReturns:\nA muparser string that calculates atan2(y,x)", "source": "codesearchnet"}
{"code": "def get_space_group_info(self, symprec=1e-2, angle_tolerance=5.0):\n        \n        \n        from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n        a = SpacegroupAnalyzer(self, symprec=symprec,\n                               angle_tolerance=angle_tolerance)\n        return a.get_space_group_symbol(), a.get_space_group_number()", "docstring": "Convenience method to quickly get the spacegroup of a structure.\n\nArgs:\nsymprec (float): Same definition as in SpacegroupAnalyzer.\nDefaults to 1e-2.\nangle_tolerance (float): Same definition as in SpacegroupAnalyzer.\nDefaults to 5 degrees.\n\nReturns:\nspacegroup_symbol, international_number", "source": "juraj-google-style"}
{"code": "def get_sql_statement_with_environment(item, args=None):\n    if isinstance(item, basestring):\n        item = _sql_statement.SqlStatement(item)\n    elif (not isinstance(item, _sql_statement.SqlStatement)):\n        item = SqlModule.get_default_query_from_module(item)\n        if (not item):\n            raise Exception(('Expected a SQL statement or module but got %s' % str(item)))\n    env = {}\n    if item.module:\n        env.update(item.module.__dict__)\n        parser = env.get(_utils._SQL_MODULE_ARGPARSE, None)\n        if parser:\n            args = SqlModule._get_sql_args(parser, args=args)\n        else:\n            args = None\n    if isinstance(args, dict):\n        env.update(args)\n    return (item, env)", "docstring": "Given a SQLStatement, string or module plus command line args or a dictionary,\nreturn a SqlStatement and final dictionary for variable resolution.\n\nArgs:\nitem: a SqlStatement, %%sql module, or string containing a query.\nargs: a string of command line arguments or a dictionary of values.\n\nReturns:\nA SqlStatement for the query or module, plus a dictionary of variable values to use.", "source": "codesearchnet"}
{"code": "def dispatch_event(event):\n    try:\n        if (event.http_verb == enums.HTTPVerbs.GET):\n            requests.get(event.url, params=event.params, timeout=REQUEST_TIMEOUT).raise_for_status()\n        elif (event.http_verb == enums.HTTPVerbs.POST):\n            requests.post(event.url, data=json.dumps(event.params), headers=event.headers, timeout=REQUEST_TIMEOUT).raise_for_status()\n    except request_exception.RequestException as error:\n        logging.error(('Dispatch event failed. Error: %s' % str(error)))", "docstring": "Dispatch the event being represented by the Event object.\n\nArgs:\nevent: Object holding information about the request to be dispatched to the Optimizely backend.", "source": "codesearchnet"}
{"code": "def jacobian(func, x, unconnected_gradients=None, parallel_iterations=None, experimental_use_pfor=True, name=None):\n    unconnected_gradients = unconnected_gradients or tf.UnconnectedGradients.NONE\n    x, is_x_batch_size = _prepare_args(x)\n    with tf.name_scope(name or 'jacobian'):\n        if not callable(func):\n            raise ValueError('`func` should be a callable in eager mode or when `tf.GradientTape` is used.')\n        with tf.GradientTape() as tape:\n            tape.watch(x)\n            y = func(x)\n        jac = tape.batch_jacobian(y, x, unconnected_gradients=unconnected_gradients, parallel_iterations=parallel_iterations, experimental_use_pfor=experimental_use_pfor)\n        if is_x_batch_size:\n            return jac\n        return jac[0]", "docstring": "Computes the jacobian of `func` wrt to `x`.\n\nArgs:\nfunc: Python callable accepting one `Tensor` of shape of `x` and returning\na `Tensor` of any shape. The function whose jacobian is to be computed.\nx: A `Tensor` with respect to which the gradient is to be computed.\nunconnected_gradients: An enum `tf.UnconnectedGradients` which specifies\nthe gradient value returned when the given input tensors are\nunconnected. Default value: `None`, which maps to\n`tf.UnconnectedGradients.NONE`.\nparallel_iterations: A knob to control how many iterations are dispatched\nin parallel. This knob can be used to control the total memory usage.\nexperimental_use_pfor: If true, uses pfor for computing the Jacobian.\nElse uses a tf.while_loop.\nname: Python `str` name prefixed to ops created by this function.\nDefault value: `None` (i.e., 'jacobian').\n\nReturns:\nA `Tensor` with the gradient of `y` wrt each of `x`.", "source": "github-repos"}
{"code": "def _checkTensorElementLocations(self, out, a):\n    begin_line_num = 0\n    while not out.lines[begin_line_num].startswith('array'):\n        begin_line_num += 1\n    element_index = 0\n    for line_num in range(begin_line_num, len(out.lines)):\n        line = out.lines[line_num]\n        if '...' in line:\n            raise ValueError('Unexpected found ellipses in line representing array')\n        matches = re.finditer(self._ELEMENT_REGEX, line)\n        for match in matches:\n            subscripts = list(np.unravel_index(element_index, a.shape))\n            is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(out, subscripts)\n            self.assertFalse(is_omitted)\n            self.assertEqual(line_num, row)\n            self.assertEqual(match.start(), start_col)\n            self.assertEqual(match.end(), end_col)\n            element_index += 1\n    self.assertEqual(element_index, np.size(a))", "docstring": "Check the results of locate_tensor_element on an ndarray representation.\n\nthat represents a numpy.ndarray.\n\nArgs:\nout: An instance of RichTextLines representing a numpy.ndarray.\na: The numpy.ndarray being represented.\n\nRaises:\nValueError: if any ellipses (\"...\") are found in the lines representing\nthe array.", "source": "github-repos"}
{"code": "def IsPathSuffix(mod_path, path):\n    return (mod_path.endswith(path) and ((len(mod_path) == len(path)) or mod_path[:(- len(path))].endswith(os.sep)))", "docstring": "Checks whether path is a full path suffix of mod_path.\n\nArgs:\nmod_path: Must be an absolute path to a source file. Must not have\nfile extension.\npath: A relative path. Must not have file extension.\n\nReturns:\nTrue if path is a full path suffix of mod_path. False otherwise.", "source": "codesearchnet"}
{"code": "def scale_stoichiometry( self, scaling ):\n         \n        return { k:v*scaling for k,v in self.stoichiometry.items() }", "docstring": "Scale the Calculation stoichiometry\nReturns the stoichiometry, scaled by the argument scaling.\n\nArgs:\nscaling (float): The scaling factor.\n\nReturns:\n(Counter(Str:Int)): The scaled stoichiometry as a Counter of label: stoichiometry pairs", "source": "juraj-google-style"}
{"code": "def cosine_proximity(y_true, y_pred, axis=-1):\n    y_true = nn.l2_normalize(y_true, axis=axis)\n    y_pred = nn.l2_normalize(y_pred, axis=axis)\n    return math_ops.reduce_sum(y_true * y_pred, axis=axis)", "docstring": "Computes the cosine similarity between labels and predictions.\n\nArgs:\ny_true: The ground truth values.\ny_pred: The prediction values.\naxis: (Optional) Defaults to -1. The dimension along which the cosine\nsimilarity is computed.\n\nReturns:\nCosine similarity value.", "source": "github-repos"}
{"code": "def find_last(self, selector, **kwargs):\n    self.debug_log(('Finding last element with selector: %s' % selector))\n    elements = self.find_all(selector, **kwargs)\n    if len(elements):\n        self.debug_log(('find_last (%s): element found' % selector))\n        return elements[(- 1)]\n    else:\n        self.debug_log(('find_last (%s): No element found' % selector))\n        return None", "docstring": "Return the last element found with a selector\n\nArgs:\nselector (str): the selector used to find the element\n\nKwargs:\nwait_until_present (bool)\nwait_until_visible (bool)\nraise_exception (bool)\n\nReturns:\nNone if no element was found\nproxy_element is an element was found\n\nRaises:\nthis function might raise an exception depending\non the raise_exception kwargs\nor\nthe config proxy_driver:raise_exception", "source": "codesearchnet"}
{"code": "def CheckCasts(filename, clean_lines, linenum, error):\n    line = clean_lines.elided[linenum]\n    match = Search('(\\\\bnew\\\\s+(?:const\\\\s+)?|\\\\S<\\\\s*(?:const\\\\s+)?)?\\\\b(int|float|double|bool|char|int32|uint32|int64|uint64)(\\\\([^)].*)', line)\n    expecting_function = ExpectingFunctionArgs(clean_lines, linenum)\n    if (match and (not expecting_function)):\n        matched_type = match.group(2)\n        matched_new_or_template = match.group(1)\n        if Match('\\\\([^()]+\\\\)\\\\s*\\\\[', match.group(3)):\n            return\n        matched_funcptr = match.group(3)\n        if ((matched_new_or_template is None) and (not (matched_funcptr and (Match('\\\\((?:[^() ]+::\\\\s*\\\\*\\\\s*)?[^() ]+\\\\)\\\\s*\\\\(', matched_funcptr) or matched_funcptr.startswith('(*)')))) and (not Match(('\\\\s*using\\\\s+\\\\S+\\\\s*=\\\\s*' + matched_type), line)) and (not Search(('new\\\\(\\\\S+\\\\)\\\\s*' + matched_type), line))):\n            error(filename, linenum, 'readability/casting', 4, ('Using deprecated casting style.  Use static_cast<%s>(...) instead' % matched_type))\n    if (not expecting_function):\n        CheckCStyleCast(filename, clean_lines, linenum, 'static_cast', '\\\\((int|float|double|bool|char|u?int(16|32|64))\\\\)', error)\n    if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast', '\\\\((char\\\\s?\\\\*+\\\\s?)\\\\)\\\\s*\"', error):\n        pass\n    else:\n        CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast', '\\\\((\\\\w+\\\\s?\\\\*+\\\\s?)\\\\)', error)\n    match = Search('(?:[^\\\\w]&\\\\(([^)*][^)]*)\\\\)[\\\\w(])|(?:[^\\\\w]&(static|dynamic|down|reinterpret)_cast\\\\b)', line)\n    if match:\n        parenthesis_error = False\n        match = Match('^(.*&(?:static|dynamic|down|reinterpret)_cast\\\\b)<', line)\n        if match:\n            (_, y1, x1) = CloseExpression(clean_lines, linenum, len(match.group(1)))\n            if ((x1 >= 0) and (clean_lines.elided[y1][x1] == '(')):\n                (_, y2, x2) = CloseExpression(clean_lines, y1, x1)\n                if (x2 >= 0):\n                    extended_line = clean_lines.elided[y2][x2:]\n                    if (y2 < (clean_lines.NumLines() - 1)):\n                        extended_line += clean_lines.elided[(y2 + 1)]\n                    if Match('\\\\s*(?:->|\\\\[)', extended_line):\n                        parenthesis_error = True\n        if parenthesis_error:\n            error(filename, linenum, 'readability/casting', 4, 'Are you taking an address of something dereferenced from a cast?  Wrapping the dereferenced expression in parentheses will make the binding more obvious')\n        else:\n            error(filename, linenum, 'runtime/casting', 4, 'Are you taking an address of a cast?  This is dangerous: could be a temp var.  Take the address before doing the cast, rather than after')", "docstring": "Various cast related checks.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def delete(self, key):\n    \n    key = self._service_key(key)\n    self._service_ops['delete'](key)", "docstring": "Removes the object named by `key` in `service`.\n\nArgs:\nkey: Key naming the object to remove.", "source": "juraj-google-style"}
{"code": "def publish_time(self):\n    timestamp = self._message.publish_time\n    delta = datetime.timedelta(seconds=timestamp.seconds, microseconds=(timestamp.nanos \n    return (datetime_helpers._UTC_EPOCH + delta)", "docstring": "Return the time that the message was originally published.\n\nReturns:\ndatetime: The date and time that the message was published.", "source": "codesearchnet"}
{"code": "def AddBackpropAccumulator(self, op: ops.Operation, grad):\n    self.Exit()\n    shape = grad.get_shape()\n    if shape.is_fully_defined():\n        if self.outer_context:\n            self.outer_context.Enter()\n        acc = constant_op.constant(0, grad.dtype, shape=shape, name='b_acc')\n        if self.outer_context:\n            self.outer_context.Exit()\n    else:\n        value = op.inputs[0]\n        if isinstance(self.outer_context, WhileContext) and self.outer_context.grad_state is not None:\n            forward_ctxt = self.grad_state.forward_context\n            forward_ctxt.outer_context.Enter()\n            zeros_shape = array_ops.shape_internal(value, optimize=False)\n            forward_ctxt.outer_context.Exit()\n            outer_grad_state = self.grad_state.outer_grad_state\n            history_zeros_shape = outer_grad_state.AddForwardAccumulator(zeros_shape)\n            self.outer_context.Enter()\n            real_shape = outer_grad_state.AddBackpropAccumulatedValue(history_zeros_shape, zeros_shape)\n            acc = array_ops.zeros(real_shape, grad.dtype)\n            self.outer_context.Exit()\n        else:\n            if self.outer_context:\n                self.outer_context.Enter()\n            zeros_shape = array_ops.shape_internal(value, optimize=False)\n            acc = array_ops.zeros(zeros_shape, grad.dtype)\n            if self.outer_context:\n                self.outer_context.Exit()\n    self.Enter()\n    self.AddName(acc.name)\n    enter_acc = _Enter(acc, self._name, is_constant=False, parallel_iterations=self._parallel_iterations, name='b_acc')\n    self.loop_enters.append(enter_acc)\n    merge_acc = merge([enter_acc, enter_acc], name='b_acc')[0]\n    switch_acc_false, switch_acc_true = switch(merge_acc, self._pivot)\n    add_acc = math_ops.add(switch_acc_true, grad)\n    next_acc = _NextIteration(add_acc)\n    merge_acc.op._update_input(1, next_acc)\n    result_acc = exit(switch_acc_false, name='b_acc')\n    self.loop_exits.append(result_acc)\n    self.ExitResult([result_acc])\n    return result_acc", "docstring": "Add an accumulation loop for every loop invariant.\n\nThis is added to the backprop loop. It is used to accumulate partial\ngradients within each loop iteration. Called when in the gradient while\ncontext.\n\nThe pseudocode is:\n```\nacc = 0.0;\nwhile (_pivot) {\nacc += grad;\n}\n```\n\nArgs:\nop: The Enter op for a loop invariant.\ngrad: The partial gradient of an iteration for a loop invariant.\n\nReturns:\nThe gradient for a loop invariant.", "source": "github-repos"}
{"code": "def retrieve_file_from_url(url):\n    try:\n        (alias_source, _) = urlretrieve(url)\n        with open(alias_source, 'r') as f:\n            content = f.read()\n            if content[:3].isdigit():\n                raise CLIError(ALIAS_FILE_URL_ERROR.format(url, content.strip()))\n    except Exception as exception:\n        if isinstance(exception, CLIError):\n            raise\n        raise CLIError(ALIAS_FILE_URL_ERROR.format(url, exception))\n    return alias_source", "docstring": "Retrieve a file from an URL\n\nArgs:\nurl: The URL to retrieve the file from.\n\nReturns:\nThe absolute path of the downloaded file.", "source": "codesearchnet"}
{"code": "def FormatProblem(self, d=None):\n    if (not d):\n        d = self.GetDictToFormat()\n    output_error_text = (self.__class__.ERROR_TEXT % d)\n    if (('reason' in d) and d['reason']):\n        return ('%s\\n%s' % (output_error_text, d['reason']))\n    else:\n        return output_error_text", "docstring": "Return a text string describing the problem.\n\nArgs:\nd: map returned by GetDictToFormat with  with formatting added", "source": "codesearchnet"}
{"code": "def _receive_signal(self, progress):\n        \n        \n        \n\n        self.progress = progress\n        self.updateProgress.emit(progress)", "docstring": "this function takes care of signals emitted by the subscripts\nthe default behaviour is that it just reemits the signal\nArgs:\nprogress: progress of subscript", "source": "juraj-google-style"}
{"code": "def tournament_number2name(self, number):\n    tournaments = self.get_tournaments()\n    d = {t['tournament']: t['name'] for t in tournaments}\n    return d.get(number, None)", "docstring": "Translate tournament number to tournament name.\n\nArgs:\nnumber (int): tournament number to translate\n\nReturns:\nname (str): name of the tournament or `None` if unknown.\n\nExamples:\n>>> NumerAPI().tournament_number2name(4)\n'delta'\n>>> NumerAPI().tournament_number2name(99)\nNone", "source": "codesearchnet"}
{"code": "def ChunkedDecoderLayer(feature_depth,\n                        feedforward_depth,\n                        num_heads,\n                        dropout,\n                        chunk_selector,\n                        mode):\n  \n  return layers.Serial(\n      layers.Residual(  \n          layers.Map(layers.LayerNorm()),\n          layers.ChunkedCausalMultiHeadedAttention(\n              feature_depth, num_heads=num_heads, dropout=dropout,\n              chunk_selector=chunk_selector, mode=mode),\n          layers.Map(layers.Dropout(rate=dropout, mode=mode)),\n      ),\n      layers.Map(ResidualFeedForward(\n          feature_depth, feedforward_depth, dropout, mode=mode))\n  )", "docstring": "Transformer decoder layer operating on chunks.\n\nArgs:\nfeature_depth: int:  depth of embedding\nfeedforward_depth: int: depth of feed-forward layer\nnum_heads: int: number of attention heads\ndropout: float: dropout rate (how much to drop out)\nchunk_selector: a function from chunk number to list of chunks to attend.\nmode: str: 'train' or 'eval'\n\nReturns:\nthe layer.", "source": "juraj-google-style"}
{"code": "def _VerifyMaxBatchSizeAnnotations(self, expected_engines, original_gdef, converted_gdef, default_max_batch_size, expected_max_batch_sizes=None):\n    if isinstance(expected_max_batch_sizes, collections.abc.Collection):\n        self.assertEqual(len(expected_max_batch_sizes), len(expected_engines))\n    else:\n        self.assertIsNone(expected_max_batch_sizes, \"'expected_max_batch_sizes' shall only be a sequence of integers or `None`.\")\n\n    def _ChainAllNodes(graph_def):\n        return itertools.chain(graph_def.node, itertools.chain(*[func.node_def for func in graph_def.library.function]))\n    old_name_to_node_map = {self._ToString(node.name): node for node in _ChainAllNodes(original_gdef)}\n    new_name_to_func_map = {self._ToString(func.signature.name): func for func in converted_gdef.library.function}\n\n    def _DetectStaticBatchSize(node_def):\n        \n        shapes = node_def.attr['_output_shapes'].list.shape\n        batch_size = set((list(s.dim)[0].size if len(s.dim) >= 2 else None for s in shapes))\n        if len(batch_size) == 1 and list(batch_size)[0] >= 1:\n            return list(batch_size)[0]\n        return None\n    name_to_engines_map = {}\n    actual_max_batch_sizes = []\n    for node in _ChainAllNodes(converted_gdef):\n        if node.op == 'TRTEngineOp':\n            engine = node\n            engine_name = self._RemoveGraphSequenceNumber(self._Canonicalize(self._ToString(engine.name)))\n            self.assertIn(engine_name, expected_engines)\n            name_to_engines_map[engine_name] = engine\n            self.assertIn('max_batch_size', node.attr)\n            engine_max_batch_size = node.attr['max_batch_size'].i\n            self.assertIsInstance(engine_max_batch_size, int)\n            actual_max_batch_sizes.append(engine_max_batch_size)\n            seg_func = node.attr['segment_func'].func\n            self.assertIsNotNone(seg_func)\n            self.assertIn(seg_func.name, new_name_to_func_map)\n            seg_func_def = new_name_to_func_map[seg_func.name]\n            logging.info('Segment function name: %s. Including %d nodes.', seg_func.name, len(seg_func_def.node_def))\n            node_max_batch_size_all_none = True\n            for alternative_node in seg_func_def.node_def:\n                node_name = self._Canonicalize(self._ToString(alternative_node.name))\n                if node_name not in old_name_to_node_map:\n                    continue\n                original_node = old_name_to_node_map[node_name]\n                node_max_batch_size = None\n                if '_tftrt_op_max_batch_size' in original_node.attr:\n                    node_max_batch_size = original_node.attr['_tftrt_op_max_batch_size'].i\n                elif original_node.op != 'Const' and alternative_node.op != 'Const' and ('_output_shapes' in original_node.attr):\n                    node_max_batch_size = _DetectStaticBatchSize(original_node)\n                logging.info(\"'{%s}(%s)'s max batch size annotation is %s. '{%s}'s max batch size is %s.\", node_name, original_node.op, str(node_max_batch_size), engine_name, str(engine_max_batch_size))\n                node_max_batch_size_all_none &= node_max_batch_size is None\n                self.assertTrue(engine_max_batch_size == node_max_batch_size or node_max_batch_size is None)\n            logging.info(\"'{%s}'s max batch size is %d.\", engine_name, engine_max_batch_size)\n            self.assertTrue(default_max_batch_size is None or engine_max_batch_size == default_max_batch_size or (not node_max_batch_size_all_none))\n    self.assertCountEqual(expected_engines, tuple(name_to_engines_map.keys()))\n    if expected_max_batch_sizes is not None:\n        self.assertCountEqual(expected_max_batch_sizes, actual_max_batch_sizes)", "docstring": "Verifies the max batch size annotations in the original and converted GraphDef.\n\nArgs:\nexpected_engines: A sequence of engines names.\noriginal_gdef: GraphDef. The graph def before TensorRT conversion.\nconverted_gdef: GraphDef. The graph def after TensorRT conversion.\ndefault_max_batch_size: The default maximum batch size to use if no node\ninside a segment is annotated with a customized max batch size. This\nvalue is None when the graph is converted to TF-TRT with dynamic\nengines.\nexpected_max_batch_sizes: Optional. A sequence of max batch sizes for all\nthe engines. `None` if does not check enforce max batch sizes.", "source": "github-repos"}
{"code": "def profile(self, profile):\n    self._staging_data = None\n    lang = profile.get('install_json', {}).get('programLanguage', 'PYTHON')\n    profile_args = ArgBuilder(lang, self.profile_args(profile.get('args')))\n    self._profile = profile\n    self._profile['profile_args'] = profile_args\n    self.load_tcex()\n    self.reports.profile(profile.get('profile_name'))\n    self._create_tc_dirs()", "docstring": "Set the current profile.\n\nArgs:\nprofile (dict): The profile data.", "source": "codesearchnet"}
{"code": "def get_schema(self, reportId: int=None) -> list:\n    if reportId is None:\n        reportId = self.reportId\n    schema = []\n    report = API_SearchAds(self.config, self.auth).reports().get(reportId=reportId).execute()\n    for column in report['request']['columns']:\n        name = column.get('columnName', column.get('savedColumnName'))\n        schema.append({'name': column_header_sanitize(name), 'type': self.column_type(report['request']['reportScope']['agencyId'], report['request']['reportScope']['advertiserId'], name), 'mode': 'NULLABLE'})\n    return schema", "docstring": "Read columns from report and produce BigQuery compatible schema.\n\nColumns with an unknown type default to STRING.\n\nArgs:\nreportId - optional,  if not given uses prior value from request(...) call.\n\nReturns:\nList of BigQuery schema fields derived from report columns.", "source": "github-repos"}
{"code": "def interpolate(self, date, method=None, order=None):\n    if (not (self.start <= date <= self.stop)):\n        raise ValueError((\"Date '%s' not in range\" % date))\n    prev_idx = 0\n    ephem = self\n    while True:\n        idx = len(ephem)\n        if (idx == 1):\n            break\n        k = (idx \n        if (date > ephem[k].date):\n            prev_idx += k\n            ephem = ephem[k:]\n        else:\n            ephem = ephem[:k]\n    method = (method if (method is not None) else self.method)\n    order = (order if (order is not None) else self.order)\n    if (method == self.LINEAR):\n        y0 = self[prev_idx]\n        y1 = self[(prev_idx + 1)]\n        result = (y0[:] + (((y1[:] - y0[:]) * (date.mjd - y0.date.mjd)) / (y1.date.mjd - y0.date.mjd)))\n    elif (method == self.LAGRANGE):\n        stop = (((prev_idx + 1) + (order \n        start = ((prev_idx - (order \n        if (stop >= len(self)):\n            start -= (stop - len(self))\n        elif (start < 0):\n            stop -= start\n            start = 0\n        subset = self[start:stop]\n        date_subset = np.array([x.date.mjd for x in subset])\n        result = np.zeros(6)\n        for j in range(order):\n            mask = (date_subset != date_subset[j])\n            l_j = ((date.mjd - date_subset[mask]) / (date_subset[j] - date_subset[mask]))\n            result = (result + (l_j.prod() * subset[j]))\n    else:\n        raise ValueError('Unkown interpolation method', method)\n    orb = ephem[0]\n    return orb.__class__(date, result, orb.form, orb.frame, orb.propagator)", "docstring": "Interpolate data at a given date\n\nArgs:\ndate (Date):\nmethod (str): Method of interpolation to use\norder (int): In case of ``LAGRANGE`` method is used\nReturn:\nOrbit:", "source": "codesearchnet"}
{"code": "def AddFileDescriptor(self, file_desc):\n    \n\n    self._AddFileDescriptor(file_desc)\n    \n    \n    for extension in file_desc.extensions_by_name.values():\n      self._file_desc_by_toplevel_extension[\n          extension.full_name] = file_desc", "docstring": "Adds a FileDescriptor to the pool, non-recursively.\n\nIf the FileDescriptor contains messages or enums, the caller must explicitly\nregister them.\n\nArgs:\nfile_desc: A FileDescriptor.", "source": "juraj-google-style"}
{"code": "def _AnalyzeFileObject(self, mediator, file_object):\n    \n    maximum_read_size = max([\n        analyzer_object.SIZE_LIMIT for analyzer_object in self._analyzers])\n\n    hashers_only = True\n    for analyzer_object in self._analyzers:\n      if not isinstance(analyzer_object, hashing_analyzer.HashingAnalyzer):\n        hashers_only = False\n        break\n\n    file_size = file_object.get_size()\n\n    if (hashers_only and self._hasher_file_size_limit and\n        file_size > self._hasher_file_size_limit):\n      return\n\n    file_object.seek(0, os.SEEK_SET)\n\n    data = file_object.read(maximum_read_size)\n    while data:\n      if self._abort:\n        break\n\n      for analyzer_object in self._analyzers:\n        if self._abort:\n          break\n\n        if (not analyzer_object.INCREMENTAL_ANALYZER and\n            file_size > analyzer_object.SIZE_LIMIT):\n          continue\n\n        if (isinstance(analyzer_object, hashing_analyzer.HashingAnalyzer) and\n            self._hasher_file_size_limit and\n            file_size > self._hasher_file_size_limit):\n          continue\n\n        self.processing_status = analyzer_object.PROCESSING_STATUS_HINT\n\n        analyzer_object.Analyze(data)\n\n        self.last_activity_timestamp = time.time()\n\n      data = file_object.read(maximum_read_size)\n\n    display_name = mediator.GetDisplayName()\n    for analyzer_object in self._analyzers:\n      if self._abort:\n        break\n\n      for result in analyzer_object.GetResults():\n        logger.debug((\n            '[AnalyzeFileObject] attribute {0:s}:{1:s} calculated for '\n            'file: {2:s}.').format(\n                result.attribute_name, result.attribute_value, display_name))\n\n        mediator.AddEventAttribute(\n            result.attribute_name, result.attribute_value)\n\n      analyzer_object.Reset()\n\n    self.processing_status = definitions.STATUS_INDICATOR_RUNNING", "docstring": "Processes a file-like object with analyzers.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\nfile_object (dfvfs.FileIO): file-like object to process.", "source": "juraj-google-style"}
{"code": "def parse_message(message: str) -> str:\n    if message is None:\n        return ''\n    message = message.strip().lower()\n    if not message.startswith(('run-slow', 'run_slow', 'run slow')):\n        return ''\n    message = message[len('run slow'):]\n    while message.strip().startswith(':'):\n        message = message.strip()[1:]\n    return message", "docstring": "Parses a GitHub pull request's comment to find the models specified in it to run slow CI.\n\nArgs:\nmessage (`str`): The body of a GitHub pull request's comment.\n\nReturns:\n`str`: The substring in `message` after `run-slow`, run_slow` or run slow`. If no such prefix is found, the\nempty string is returned.", "source": "github-repos"}
{"code": "def _GetNumberOfSecondsFromElements(self, year, month, day_of_month, hours, minutes, seconds):\n    if ((not year) or (not month) or (not day_of_month)):\n        return None\n    if (hours is None):\n        hours = 0\n    elif (hours not in range(0, 24)):\n        raise ValueError('Hours value: {0!s} out of bounds.'.format(hours))\n    if (minutes is None):\n        minutes = 0\n    elif (minutes not in range(0, 60)):\n        raise ValueError('Minutes value: {0!s} out of bounds.'.format(minutes))\n    if (seconds is None):\n        seconds = 0\n    elif (seconds not in range(0, 60)):\n        raise ValueError('Seconds value: {0!s} out of bounds.'.format(seconds))\n    days_per_month = self._GetDaysPerMonth(year, month)\n    if ((day_of_month < 1) or (day_of_month > days_per_month)):\n        raise ValueError('Day of month value out of bounds.')\n    time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)\n    number_of_seconds = calendar.timegm(time_elements_tuple)\n    return int(number_of_seconds)", "docstring": "Retrieves the number of seconds from the date and time elements.\n\nArgs:\nyear (int): year e.g. 1970.\nmonth (int): month, where 1 represents January.\nday_of_month (int): day of the month, where 1 represents the first day.\nhours (int): hours.\nminutes (int): minutes.\nseconds (int): seconds.\n\nReturns:\nint: number of seconds since January 1, 1970 00:00:00 or None if year,\nmonth or day of month are not set.\n\nRaises:\nValueError: if the time elements are invalid.", "source": "codesearchnet"}
{"code": "def from_path(cls, path, format=None):\n        \n        name = None\n        data = None\n\n        if format is None:\n            formats = (FileFormat.py, FileFormat.yaml)\n        else:\n            formats = (format,)\n\n        try:\n            mode = os.stat(path).st_mode\n        except (IOError, OSError):\n            raise PackageMetadataError(\n                \"Path %r did not exist, or was not accessible\" % path)\n        is_dir = stat.S_ISDIR(mode)\n\n        for name_ in config.plugins.package_repository.filesystem.package_filenames:\n            for format_ in formats:\n                if is_dir:\n                    filepath = os.path.join(path, \"%s.%s\" % (name_,\n                                                             format_.extension))\n                    exists = os.path.isfile(filepath)\n                else:\n                    \n                    \n                    if format is None:\n                        if os.path.splitext(path)[1] != format_.extension:\n                            continue\n                    filepath = path\n                    exists = True\n\n                if exists:\n                    data = load_from_file(filepath, format_, disable_memcache=True)\n                    break\n            if data:\n                name = data.get(\"name\")\n                if name is not None or isinstance(name, basestring):\n                    break\n\n        if data is None:\n            raise PackageMetadataError(\"No package definition file found at %s\" % path)\n\n        if name is None or not isinstance(name, basestring):\n            raise PackageMetadataError(\n                \"Error in %r - missing or non-string field 'name'\" % filepath)\n\n        package = create_package(name, data, package_cls=cls)\n\n        \n        result = package._get_preprocessed(data)\n\n        if result:\n            package, data = result\n\n        package.filepath = filepath\n\n        \n        \n        package.includes = set()\n\n        def visit(d):\n            for k, v in d.iteritems():\n                if isinstance(v, SourceCode):\n                    package.includes |= (v.includes or set())\n                elif isinstance(v, dict):\n                    visit(v)\n\n        visit(data)\n\n        package._validate_includes()\n\n        return package", "docstring": "Load a developer package.\n\nA developer package may for example be a package.yaml or package.py in a\nuser's source directory.\n\nArgs:\npath: Directory containing the package definition file, or file\npath for the package file itself\nformat: which FileFormat to use, or None to check both .py and .yaml\n\nReturns:\n`Package` object.", "source": "juraj-google-style"}
{"code": "def explain_tabular(self, trainset, labels, instance, num_features=5, kernel_width=3):\n    from lime.lime_tabular import LimeTabularExplainer\n    if isinstance(instance, six.string_types):\n        instance = next(csv.DictReader([instance], fieldnames=self._headers))\n    categories = self._get_unique_categories(trainset)\n    np_trainset = self._preprocess_data_for_tabular_explain(trainset, categories)\n    predict_fn = self._make_tabular_predict_fn(labels, instance, categories)\n    prediction_df = pd.DataFrame([instance])\n    prediction_instance = self._preprocess_data_for_tabular_explain(prediction_df, categories)\n    explainer = LimeTabularExplainer(np_trainset, feature_names=(self._categorical_columns + self._numeric_columns), class_names=labels, categorical_features=range(len(categories)), categorical_names={i: v for (i, v) in enumerate(categories)}, kernel_width=kernel_width)\n    exp = explainer.explain_instance(prediction_instance[0], predict_fn, num_features=num_features, labels=range(len(labels)))\n    return exp", "docstring": "Explain categorical and numeric features for a prediction.\n\nIt analyze the prediction by LIME, and returns a report of the most impactful tabular\nfeatures contributing to certain labels.\n\nArgs:\ntrainset: a DataFrame representing the training features that LIME can use to decide\nvalue distributions.\nlabels: a list of labels to explain.\ninstance: the prediction instance. It needs to conform to model's input. Can be a csv\nline string, or a dict.\nnum_features: maximum number of features to show.\nkernel_width: Passed to LIME LimeTabularExplainer directly.\n\nReturns:\nA LIME's lime.explanation.Explanation.", "source": "codesearchnet"}
{"code": "def _copy_stream_position(position):\n    if isinstance(position, types.StreamPosition):\n        output = types.StreamPosition()\n        output.CopyFrom(position)\n        return output\n    return types.StreamPosition(**position)", "docstring": "Copy a StreamPosition.\n\nArgs:\nposition (Union[ \\\ndict, \\\n~google.cloud.bigquery_storage_v1beta1.types.StreamPosition \\\n]):\nStreamPostion (or dictionary in StreamPosition format) to copy.\n\nReturns:\n~google.cloud.bigquery_storage_v1beta1.types.StreamPosition:\nA copy of the input StreamPostion.", "source": "codesearchnet"}
{"code": "def _update_from_body(self, destination, source):\n    \n    for key, value in source.iteritems():\n      destination_value = destination.get(key)\n      if isinstance(value, dict) and isinstance(destination_value, dict):\n        self._update_from_body(destination_value, value)\n      else:\n        destination[key] = value", "docstring": "Updates the dictionary for an API payload with the request body.\n\nThe values from the body should override those already in the payload, but\nfor nested fields (message objects) the values can be combined\nrecursively.\n\nArgs:\ndestination: A dictionary containing an API payload parsed from the\npath and query parameters in a request.\nsource: A dictionary parsed from the body of the request.", "source": "juraj-google-style"}
{"code": "def upper_diag_self_prodx(list_):\n    \n    return [(item1, item2)\n            for n1, item1 in enumerate(list_)\n            for n2, item2 in enumerate(list_) if n1 < n2]", "docstring": "upper diagnoal of cartesian product of self and self.\nWeird name. fixme\n\nArgs:\nlist_ (list):\n\nReturns:\nlist:\n\nCommandLine:\npython -m utool.util_alg --exec-upper_diag_self_prodx\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_alg import *  # NOQA\n>>> list_ = [1, 2, 3]\n>>> result = upper_diag_self_prodx(list_)\n>>> print(result)\n[(1, 2), (1, 3), (2, 3)]", "source": "juraj-google-style"}
{"code": "def list_from_file(filename, prefix='', offset=0, max_num=0):\n    cnt = 0\n    item_list = []\n    with open(filename, 'r') as f:\n        for _ in range(offset):\n            f.readline()\n        for line in f:\n            if ((max_num > 0) and (cnt >= max_num)):\n                break\n            item_list.append((prefix + line.rstrip('\\n')))\n            cnt += 1\n    return item_list", "docstring": "Load a text file and parse the content as a list of strings.\n\nArgs:\nfilename (str): Filename.\nprefix (str): The prefix to be inserted to the begining of each item.\noffset (int): The offset of lines.\nmax_num (int): The maximum number of lines to be read,\nzeros and negatives mean no limitation.\n\nReturns:\nlist[str]: A list of strings.", "source": "codesearchnet"}
{"code": "def assert_print_equals_golden(self, json_path: str, proto_path: str, proto_cls: Type[message.Message], *, print_f: Callable[..., str], json_delimiter: Optional[str]=None, proto_delimiter: Optional[str]=None, **print_kwargs: Any) -> None:\n    testdata = self._read_json_and_protos(json_path, proto_path, proto_cls, json_delimiter=json_delimiter, proto_delimiter=proto_delimiter)\n    for json_str, proto in zip(testdata.json_strs, testdata.protos):\n        from_json = json.loads(json_str, parse_int=decimal.Decimal, parse_float=decimal.Decimal)\n        orig_proto = copy.deepcopy(proto)\n        raw_json_str = print_f(proto, **print_kwargs)\n        from_proto = json.loads(raw_json_str, parse_int=decimal.Decimal, parse_float=decimal.Decimal)\n        self.assertEqual(proto, orig_proto)\n        self.assertEqual(from_json, from_proto)", "docstring": "Compare printer output against 'golden' file.\n\nNote that we perform a comparison between Python native types after calling\ninto json.loads(...), as diffing raw strings can have minor differences in\nspaces that are inconsequential to the underlying representations.\n\nIf json_delimiter and proto_delimiter are supplied, the cardinality of the\nresulting sequences must match exactly or an error will be thrown.\n\nArgs:\njson_path: The filepath to the .json file (loaded as a 'golden').\nproto_path: The filepath to the .prototxt file (loaded as a 'test case').\nproto_cls: The type of protobuf message to serialize to and print from\n(type under test).\nprint_f: The print function to execute and examine.\njson_delimiter: An optional delimiter for the .json file to load multiple\nrepresentations. Defaults to None.\nproto_delimiter: An optional delimiter for the .prototxt file to load\nmultiple representations. Defaults to None.\n**print_kwargs: An optional list of key/value arguments to supply to the\nprint function.", "source": "github-repos"}
{"code": "def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):\n    if resolved_archive_file.endswith('.safetensors'):\n        load_function = load_tf_weights_from_safetensors\n    else:\n        load_function = load_tf_weights_from_h5\n    return load_function(model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=_prefix)", "docstring": "Detect missing and unexpected layers and load the TF weights from the shard file accordingly to their names and\nshapes.\n\nArgs:\nmodel (`keras.models.Model`):\nThe model to load the weights into.\nresolved_archive_file (`str`):\nThe location of the H5 file.\nignore_mismatched_sizes (`bool`, *optional*, defaults to `False`):\nWhether or not to ignore weights with shapes that don't match between the checkpoint of the model.\n\nReturns:\nThree lists, one for the missing layers, another one for the unexpected layers, and a last one for the\nmismatched layers.", "source": "github-repos"}
{"code": "def mark_streamer(self, index):\n        \n\n        self._logger.debug(\"Marking streamer %d manually\", index)\n        if index >= len(self.streamers):\n            raise ArgumentError(\"Invalid streamer index\", index=index, num_streamers=len(self.streamers))\n\n        self._manually_triggered_streamers.add(index)", "docstring": "Manually mark a streamer that should trigger.\n\nThe next time check_streamers is called, the given streamer will be\nmanually marked that it should trigger, which will cause it to trigger\nunless it has no data.\n\nArgs:\nindex (int): The index of the streamer that we should mark as\nmanually triggered.\n\nRaises:\nArgumentError: If the streamer index is invalid.", "source": "juraj-google-style"}
{"code": "def joinpaths(self, *paths):\n    if (sys.version_info >= (3, 6)):\n        paths = [os.fspath(path) for path in paths]\n    if (len(paths) == 1):\n        return paths[0]\n    if self.is_windows_fs:\n        return self._join_paths_with_drive_support(*paths)\n    joined_path_segments = []\n    sep = self._path_separator(paths[0])\n    for path_segment in paths:\n        if self._starts_with_root_path(path_segment):\n            joined_path_segments = [path_segment]\n        else:\n            if (joined_path_segments and (not joined_path_segments[(- 1)].endswith(sep))):\n                joined_path_segments.append(sep)\n            if path_segment:\n                joined_path_segments.append(path_segment)\n    return self._matching_string(paths[0], '').join(joined_path_segments)", "docstring": "Mimic os.path.join using the specified path_separator.\n\nArgs:\n*paths:  (str) Zero or more paths to join.\n\nReturns:\n(str) The paths joined by the path separator, starting with\nthe last absolute path in paths.", "source": "codesearchnet"}
{"code": "def save(self, path):\n        \n\n        data = self.encode()\n\n        with open(path, \"wb\") as out:\n            out.write(data)", "docstring": "Save a binary copy of this report\n\nArgs:\npath (string): The path where we should save the binary copy of the report", "source": "juraj-google-style"}
{"code": "def _parse_book_links(dom):\n    links = []\n    picker = (lambda x: x.params.get('class', '').startswith('boxProKnihy'))\n    for el in dom.find(None, fn=picker):\n        book_ref = el.find('a')\n        if ((not book_ref) or ('href' not in book_ref[0].params)):\n            continue\n        links.append(book_ref[0].params['href'])\n    return links", "docstring": "Parse links to the details about publications from page with book list.\n\nArgs:\ndom (obj): HTMLElement container of the page with book list.\n\nReturns:\nlist: List of strings / absolute links to book details.", "source": "codesearchnet"}
{"code": "def ParseOptions(self, options):\n    \n    \n    \n    self._ParseTimezoneOption(options)\n\n    names = ['analysis_plugins', 'language', 'profiling']\n    helpers_manager.ArgumentHelperManager.ParseOptions(\n        options, self, names=names)\n\n    self.list_analysis_plugins = self._analysis_plugins == 'list'\n    self.list_language_identifiers = self._preferred_language == 'list'\n    self.list_profilers = self._profilers == 'list'\n\n    if (self.list_analysis_plugins or self.list_language_identifiers or\n        self.list_profilers or self.list_timezones):\n      return\n\n    \n    \n    helpers_manager.ArgumentHelperManager.ParseOptions(\n        options, self, names=['output_modules'])\n\n    self.list_output_modules = self._output_format == 'list'\n    if self.list_output_modules:\n      return\n\n    self._ParseInformationalOptions(options)\n\n    helpers_manager.ArgumentHelperManager.ParseOptions(\n        options, self, names=['data_location'])\n\n    self._ParseLogFileOptions(options)\n\n    self._ParseProcessingOptions(options)\n\n    helpers_manager.ArgumentHelperManager.ParseOptions(\n        options, self, names=['event_filters'])\n\n    self._deduplicate_events = getattr(options, 'dedup', True)\n\n    if self._data_location:\n      \n      options.data_location = self._data_location\n    else:\n      logger.warning('Unable to automatically determine data location.')\n\n    self._command_line_arguments = self.GetCommandLineArguments()\n\n    helpers_manager.ArgumentHelperManager.ParseOptions(\n        options, self, names=['storage_file'])\n\n    \n    if not self._storage_file_path:\n      raise errors.BadConfigOption('Missing storage file option.')\n\n    if not os.path.isfile(self._storage_file_path):\n      raise errors.BadConfigOption(\n          'No such storage file: {0:s}.'.format(self._storage_file_path))\n\n    self._EnforceProcessMemoryLimit(self._process_memory_limit)\n\n    self._analysis_plugins = self._CreateAnalysisPlugins(options)\n    self._output_module = self._CreateOutputModule(options)", "docstring": "Parses the options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "juraj-google-style"}
{"code": "def nltk_stemmer(stemmer, token, i=None, tokens=None):\n\n    def wrapped_stem(token, metadata=None):\n        return stemmer.stem(token)\n    return token.update(wrapped_stem)", "docstring": "Wrapper around a NLTK SnowballStemmer, which includes stop words for\neach language.\n\nArgs:\nstemmer (SnowballStemmer): Stemmer instance that performs the stemming.\ntoken (lunr.Token): The token to stem.\ni (int): The index of the token in a set.\ntokens (list): A list of tokens representing the set.", "source": "codesearchnet"}
{"code": "def _SetPath(self, path):\n    \n    old_path = self._path\n    if old_path and not io_wrapper.IsCloudPath(old_path):\n      try:\n        \n        size = tf.io.gfile.stat(old_path).length\n        logger.debug('Setting latest size of %s to %d', old_path, size)\n        self._finalized_sizes[old_path] = size\n      except tf.errors.OpError as e:\n        logger.error('Unable to get size of %s: %s', old_path, e)\n\n    self._path = path\n    self._loader = self._loader_factory(path)", "docstring": "Sets the current path to watch for new events.\n\nThis also records the size of the old path, if any. If the size can't be\nfound, an error is logged.\n\nArgs:\npath: The full path of the file to watch.", "source": "juraj-google-style"}
{"code": "def aes_encrypt(base64_encryption_key, data):\n    \n    if isinstance(data, text_type):\n        data = data.encode(\"UTF-8\")\n    aes_key_bytes, hmac_key_bytes = _extract_keys(base64_encryption_key)\n    data = _pad(data)\n    iv_bytes = os.urandom(AES_BLOCK_SIZE)\n    cipher = AES.new(aes_key_bytes, mode=AES.MODE_CBC, IV=iv_bytes)\n    data = iv_bytes + cipher.encrypt(data)  \n    hmac_signature = hmac.new(hmac_key_bytes, data, hashlib.sha256).digest()\n    return as_base64(data + hmac_signature)", "docstring": "Encrypt data with AES-CBC and sign it with HMAC-SHA256\n\nArguments:\nbase64_encryption_key (str): a base64-encoded string containing an AES encryption key\nand HMAC signing key as generated by generate_encryption_key()\ndata (str): a byte string containing the data to be encrypted\n\nReturns:\nstr: the encrypted data as a byte string with the HMAC signature appended to the end", "source": "juraj-google-style"}
{"code": "def whilst(coro, coro_test, assert_coro=None, *args, **kw):\n    assert_corofunction(coro=coro, coro_test=coro_test)\n    results = []\n    assert_coro = (assert_coro or assert_true)\n    while (yield from assert_coro((yield from coro_test()))):\n        results.append((yield from coro(*args, **kw)))\n    return results", "docstring": "Repeatedly call `coro` coroutine function while `coro_test` returns `True`.\n\nThis function is the inverse of `paco.until()`.\n\nThis function is a coroutine.\n\nArguments:\ncoro (coroutinefunction): coroutine function to execute.\ncoro_test (coroutinefunction): coroutine function to test.\nassert_coro (coroutinefunction): optional assertion coroutine used\nto determine if the test passed or not.\n*args (mixed): optional variadic arguments to pass to `coro` function.\n\nRaises:\nTypeError: if input arguments are invalid.\n\nReturns:\nlist: result values returned by `coro`.\n\nUsage::\n\ncalls = 0\n\nasync def task():\nnonlocal calls\ncalls += 1\nreturn calls\n\nasync def calls_lt_4():\nreturn calls > 4\n\nawait paco.until(task, calls_lt_4)\n# => [1, 2, 3, 4, 5]", "source": "codesearchnet"}
{"code": "def __len__(self):\n    raise NotImplementedError", "docstring": "Number of batch in the Sequence.\n\nReturns:\nThe number of batches in the Sequence.", "source": "github-repos"}
{"code": "def Add(self, artifact=None, target=None, callback=None):\n    \n    \n    \n    \n    \n    \n    if target is None:\n      target = Target()\n    os_name = target.Get(\"os\") or [None]\n    cpe = target.Get(\"cpe\") or [None]\n    label = target.Get(\"label\") or [None]\n    attributes = itertools.product(os_name, cpe, label)\n    new_conditions = [Condition(artifact, *attr) for attr in attributes]\n    self.conditions.update(new_conditions)\n    self._Register(new_conditions, callback)", "docstring": "Add criteria for a check.\n\nArgs:\nartifact: An artifact name.\ntarget: A tuple of artifact necessary to process the data.\ncallback: Entities that should be called if the condition matches.", "source": "juraj-google-style"}
{"code": "def build_masked_loss(loss_function, mask_value):\n    \n\n    def masked_loss_function(y_true, y_pred):\n        mask = K.cast(K.not_equal(y_true, mask_value), K.floatx())\n        return loss_function(y_true * mask, y_pred * mask)\n\n    return masked_loss_function", "docstring": "Builds a loss function that masks based on targets\n\nArgs:\nloss_function: The loss function to mask\nmask_value: The value to mask in the targets\n\nReturns:\nfunction: a loss function that acts like loss_function with masked inputs", "source": "juraj-google-style"}
{"code": "def get_replicas(self, service_id: str) -> str:\n        \n        \n        replicas = []\n\n        \n        if not self._manager:\n            raise RuntimeError('Only the Swarm manager node can retrieve '\n                               'replication level of the service')\n\n        service_tasks = self._client.services.get(service_id).tasks()\n        for task in service_tasks:\n            if task['Status']['State'] == \"running\":\n                replicas.append(task)\n        return len(replicas)", "docstring": "Get the replication level of a service.\n\nArgs:\nservice_id (str): docker swarm service id\n\nReturns:\nstr, replication level of the service", "source": "juraj-google-style"}
{"code": "def send(self, message):\n        \n        if \"call_id\" not in message:\n            message[\"call_id\"] = self.gen_call_id()\n\n        self._ws.send(message.to_json())", "docstring": "Sends a RTMMessage\nShould be called after starting the loop\n\nArgs:\nmessage(RTMMessage): the sending message\n\nRaises:\nWebSocketConnectionClosedException: if the loop is closed", "source": "juraj-google-style"}
{"code": "def supported_cache_type(types):\n    if isinstance(types, str):\n        types = [typ.strip() for typ in types.split(',')]\n    for typ in types:\n        if (typ not in ['reflink', 'hardlink', 'symlink', 'copy']):\n            return False\n    return True", "docstring": "Checks if link type config option has a valid value.\n\nArgs:\ntypes (list/string): type(s) of links that dvc should try out.", "source": "codesearchnet"}
{"code": "def list_features(self, **kwargs):\n        \n        \n        params = {\n            'language': util.language_code(kwargs.get('lang')),\n            'publicData': True\n        }\n\n        \n        result = self.make_request('list_features', {}, **params)\n\n        if not util.check_result(result):\n            return False, result.get('message', 'UNKNOWN ERROR')\n\n        \n        values = util.response_list(result, 'Data')\n        return True, [emtype.ParkingFeature(**a) for a in values]", "docstring": "Obtain a list of parkings.\n\nArgs:\nlang (str):  Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[Parking]), or message\nstring in case of error.", "source": "juraj-google-style"}
{"code": "def _to_enos_networks(networks):\n    nets = []\n    for (roles, network) in networks:\n        nets.append(network.to_enos(roles))\n    logger.debug(nets)\n    return nets", "docstring": "Transform the networks returned by deploy5k.\n\nArgs:\nnetworks (dict): networks returned by\n:py:func:`enoslib.infra.provider.Provider.init`", "source": "codesearchnet"}
{"code": "def from_dir(dirpath: Path, feat_type: str) -> None:\n    logger.info('Extracting features from directory {}'.format(dirpath))\n    dirname = str(dirpath)\n\n    def all_wavs_processed() -> bool:\n        '\\n        True if all wavs in the directory have corresponding numpy feature\\n        file; False otherwise.\\n        '\n        for fn in os.listdir(dirname):\n            (prefix, ext) = os.path.splitext(fn)\n            if (ext == '.wav'):\n                if (not os.path.exists(os.path.join(dirname, ('%s.%s.npy' % (prefix, feat_type))))):\n                    return False\n        return True\n    if all_wavs_processed():\n        logger.info('All WAV files already preprocessed')\n        return\n    if ((feat_type == 'pitch') or (feat_type == 'fbank_and_pitch')):\n        kaldi_pitch(dirname, dirname)\n    for filename in os.listdir(dirname):\n        logger.info('Preparing %s features for %s', feat_type, filename)\n        path = os.path.join(dirname, filename)\n        if path.endswith('.wav'):\n            if empty_wav(path):\n                raise PersephoneException(\"Can't extract features for {} since it is an empty WAV file. Remove it from the corpus.\".format(path))\n            if (feat_type == 'fbank'):\n                fbank(path)\n            elif (feat_type == 'fbank_and_pitch'):\n                fbank(path)\n                prefix = os.path.splitext(filename)[0]\n                combine_fbank_and_pitch(dirname, prefix)\n            elif (feat_type == 'pitch'):\n                pass\n            elif (feat_type == 'mfcc13_d'):\n                mfcc(path)\n            else:\n                logger.warning('Feature type not found: %s', feat_type)\n                raise PersephoneException(('Feature type not found: %s' % feat_type))", "docstring": "Performs feature extraction from the WAV files in a directory.\n\nArgs:\ndirpath: A `Path` to the directory where the WAV files reside.\nfeat_type: The type of features that are being used.", "source": "codesearchnet"}
{"code": "def submit(self, command, blocksize, tasks_per_node, job_name='parsl.auto'):\n    wrapped_cmd = self.launcher(command, tasks_per_node, 1)\n    (instance, name) = self.create_instance(command=wrapped_cmd)\n    self.provisioned_blocks += 1\n    self.resources[name] = {'job_id': name, 'status': translate_table[instance['status']]}\n    return name", "docstring": "The submit method takes the command string to be executed upon\ninstantiation of a resource most often to start a pilot.\n\nArgs :\n- command (str) : The bash command string to be executed.\n- blocksize (int) : Blocksize to be requested\n- tasks_per_node (int) : command invocations to be launched per node\n\nKWargs:\n- job_name (str) : Human friendly name to be assigned to the job request\n\nReturns:\n- A job identifier, this could be an integer, string etc\n\nRaises:\n- ExecutionProviderException or its subclasses", "source": "codesearchnet"}
{"code": "def format_date(self, dl_string):\n    thedate = get_simple_date(dl_string)\n    if ((thedate != 'Failed') and thedate):\n        return thedate\n    day = get_day_of_month(dl_string)\n    month = get_month(dl_string)\n    return (((day + '.') + month) + '.')", "docstring": "Formats various date formats to dd.MM.\n\nExamples\n- January 15th      --> 15.01.\n- 15.01.2017        --> 15.01.\n- 15th of January   --> 15.01.\n- 15.1.             --> 15.01.\n\nKeyword arguments:\ndl_string -- a string to be formatted\n\nReturns:\nDate string in format dd.MM. or \"None.None\"", "source": "codesearchnet"}
{"code": "def suggest_charges(self, tolerance=0.1):\n    recommendations = {}\n    for def_type in self.defect_types:\n        test_charges = np.arange((np.min(self.stable_charges[def_type]) - 1), (np.max(self.stable_charges[def_type]) + 2))\n        test_charges = [charge for charge in test_charges if (charge not in self.finished_charges[def_type])]\n        if len(self.transition_level_map[def_type].keys()):\n            min_tl = min(self.transition_level_map[def_type].keys())\n            if (min_tl < tolerance):\n                max_charge = max(self.transition_level_map[def_type][min_tl])\n                test_charges = [charge for charge in test_charges if (charge < max_charge)]\n            max_tl = max(self.transition_level_map[def_type].keys())\n            if (max_tl > (self.band_gap - tolerance)):\n                min_charge = min(self.transition_level_map[def_type][max_tl])\n                test_charges = [charge for charge in test_charges if (charge > min_charge)]\n        else:\n            test_charges = [charge for charge in test_charges if (charge not in self.stable_charges[def_type])]\n        recommendations[def_type] = test_charges\n    return recommendations", "docstring": "Suggest possible charges for defects to computee based on proximity\nof known transitions from entires to VBM and CBM\n\nArgs:\ntolerance (float): tolerance with respect to the VBM and CBM to\n`          continue to compute new charges", "source": "codesearchnet"}
{"code": "def __init__(self, xml=None, resort=True):\n        \n        self.leader = None\n        self.oai_marc = False\n        self.controlfields = OrderedDict()\n        self.datafields = OrderedDict()\n        self.valid_i_chars = set(list(\" 0123456789*\"))\n\n        \n        self.resorted = tools.resorted if resort else lambda x: x\n\n        \n        if hasattr(xml, \"read\"):\n            xml = xml.read()\n\n        \n        \n        if xml is not None:\n            self._original_xml = xml\n            self._parse_string(xml)", "docstring": "Constructor.\n\nArgs:\nxml (str/file, default None): XML to be parsed. May be file-like\nobject.\nresort (bool, default True): Sort the output alphabetically?", "source": "juraj-google-style"}
{"code": "def run(in_file_nose, out_dir_unitth):\n        \n        suites = Converter.read_nose(in_file_nose)\n        Converter.write_unitth(suites, out_dir_unitth)", "docstring": "Convert nose-style test reports to UnitTH-style test reports by splitting modules into separate XML files\n\nArgs:\nin_file_nose (:obj:`str`): path to nose-style test report\nout_file_unitth (:obj:`str`): path to save UnitTH-style test reports", "source": "juraj-google-style"}
{"code": "def start(args_string):\n    context = _get_context()\n    try:\n        import IPython\n        import IPython.display\n    except ImportError:\n        IPython = None\n    if (context == _CONTEXT_NONE):\n        handle = None\n        print('Launching TensorBoard...')\n    else:\n        handle = IPython.display.display(IPython.display.Pretty('Launching TensorBoard...'), display_id=True)\n\n    def print_or_update(message):\n        if (handle is None):\n            print(message)\n        else:\n            handle.update(IPython.display.Pretty(message))\n    parsed_args = shlex.split(args_string, comments=True, posix=True)\n    start_result = manager.start(parsed_args)\n    if isinstance(start_result, manager.StartLaunched):\n        _display(port=start_result.info.port, print_message=False, display_handle=handle)\n    elif isinstance(start_result, manager.StartReused):\n        template = \"Reusing TensorBoard on port {port} (pid {pid}), started {delta} ago. (Use '!kill {pid}' to kill it.)\"\n        message = template.format(port=start_result.info.port, pid=start_result.info.pid, delta=_time_delta_from_info(start_result.info))\n        print_or_update(message)\n        _display(port=start_result.info.port, print_message=False, display_handle=None)\n    elif isinstance(start_result, manager.StartFailed):\n\n        def format_stream(name, value):\n            if (value == ''):\n                return ''\n            elif (value is None):\n                return ('\\n<could not read %s>' % name)\n            else:\n                return ('\\nContents of %s:\\n%s' % (name, value.strip()))\n        message = ('ERROR: Failed to launch TensorBoard (exited with %d).%s%s' % (start_result.exit_code, format_stream('stderr', start_result.stderr), format_stream('stdout', start_result.stdout)))\n        print_or_update(message)\n    elif isinstance(start_result, manager.StartTimedOut):\n        message = ('ERROR: Timed out waiting for TensorBoard to start. It may still be running as pid %d.' % start_result.pid)\n        print_or_update(message)\n    else:\n        raise TypeError(('Unexpected result from `manager.start`: %r.\\nThis is a TensorBoard bug; please report it.' % start_result))", "docstring": "Launch and display a TensorBoard instance as if at the command line.\n\nArgs:\nargs_string: Command-line arguments to TensorBoard, to be\ninterpreted by `shlex.split`: e.g., \"--logdir ./logs --port 0\".\nShell metacharacters are not supported: e.g., \"--logdir 2>&1\" will\npoint the logdir at the literal directory named \"2>&1\".", "source": "codesearchnet"}
{"code": "def _get_short_value(cls, value, type_):\n    if type_ == 'CLASS':\n        return value.__name__\n    return None", "docstring": "Calculates the short value for an item.\n\nArgs:\nvalue: The value of the item that needs to be shortened.\ntype_(string): The type of the value.\n\nReturns:\nThe unqualified name of a class if type_ is 'CLASS'. None otherwise.", "source": "github-repos"}
{"code": "def compare_profiles(profile1, profile2):\n    length = len(profile1)\n    profile1 = np.array(list(profile1))\n    profile2 = np.array(list(profile2))\n    similarity_array = (profile1 == profile2)\n    matches = np.sum(similarity_array)\n    similarity_ratio = (matches / length)\n    return similarity_ratio", "docstring": "Given two profiles, determine the ratio of similarity, i.e.\nthe hamming distance between the strings.\n\nArgs:\nprofile1/2 (str): profile string\nReturns:\nsimilarity_ratio (float): the ratio of similiarity (0-1)", "source": "codesearchnet"}
{"code": "def _orthogonal_kernel(self, ksize, cin, cout):\n    if cin > cout:\n        raise ValueError(f'The number of input channels (cin={cin}) cannot exceed the number of output channels (cout={cout}).')\n    orth = self._orthogonal_matrix(cout)[0:cin, :]\n    if ksize == 1:\n        return array_ops.expand_dims(array_ops.expand_dims(orth, 0), 0)\n    p = self._block_orth(self._symmetric_projection(cout), self._symmetric_projection(cout))\n    for _ in range(ksize - 2):\n        temp = self._block_orth(self._symmetric_projection(cout), self._symmetric_projection(cout))\n        p = self._matrix_conv(p, temp)\n    for i in range(ksize):\n        for j in range(ksize):\n            p[i, j] = math_ops.matmul(orth, p[i, j])\n    return self._dict_to_tensor(p, ksize, ksize)", "docstring": "Construct orthogonal kernel for convolution.\n\nArgs:\nksize: Kernel size.\ncin: Number of input channels.\ncout: Number of output channels.\n\nReturns:\nAn [ksize, ksize, cin, cout] orthogonal kernel.\nRaises:\nValueError: If cin > cout.", "source": "github-repos"}
{"code": "def model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=True):\n    print('X_train shape:', X_train.shape)\n    (w, b) = initialize_with_zeros(X_train.shape[0])\n    print('w shape:', w.shape)\n    (parameters, grads, costs) = LR_train(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)\n    w = parameters['w']\n    b = parameters['b']\n    print('w shape params:', w.shape)\n    Y_prediction_test = LR_predict(w, b, X_test)\n    Y_prediction_train = LR_predict(w, b, X_train)\n    print('train accuracy: {} %'.format((100 - (np.mean(np.abs((Y_prediction_train - Y_train))) * 100))))\n    print('test accuracy: {} %'.format((100 - (np.mean(np.abs((Y_prediction_test - Y_test))) * 100))))\n    d = {'costs': costs, 'Y_prediction_test': Y_prediction_test, 'Y_prediction_train': Y_prediction_train, 'w': w, 'b': b, 'learning_rate': learning_rate, 'num_iterations': num_iterations}\n    return d", "docstring": "Builds the logistic regression model by calling the function\nimplemented above\n\nArguments:\nX_train     training set represented by a numpy array of shape (dim, m_train)\nY_train     training labels represented by a numpy array (vector) of shape (1, m_train)\nX_test      test set represented by a numpy array of shape (dim, m_test)\nY_test      test labels represented by a numpy array (vector) of shape (1, m_test)\nnum_iterations  hyperparameter representing the number of iterations to optimize the parameters\nlearning_rate   hyperparameter representing the learning rate used in the update rule of optimize()\nprint_cost      Set to true to print the cost every 100 iterations\n\nReturns:\nd -- dictionary containing information about the model.", "source": "codesearchnet"}
{"code": "def read(self, path):\n        \n        with open(path, \"r\") as f:\n            for line in f:\n                line = line.strip()\n                match_obj_name = re.search(r\"^([A-Z][A-Z/ \\d]+),\", line)\n                if match_obj_name is not None:\n                    internal_name = match_obj_name.group(1)\n                    if internal_name in self._data:\n                        self._data[internal_name] = self._create_datadict(\n                            internal_name)\n                        data_line = line[len(internal_name) + 1:]\n                        vals = data_line.strip().split(',')\n                        self._data[internal_name].read(vals)\n                else:\n                    wd = WeatherData()\n                    wd.read(line.strip().split(','))\n                    self.add_weatherdata(wd)", "docstring": "Read EPW weather data from path.\n\nArgs:\npath (str): path to read weather data from", "source": "juraj-google-style"}
{"code": "def verbose_ping(dest_addr: str, count: int = 4, *args, **kwargs):\n    \n    timeout = kwargs.get(\"timeout\")\n    src = kwargs.get(\"src\")\n    unit = kwargs.setdefault(\"unit\", \"ms\")\n    for i in range(count):\n        output_text = \"ping '{}'\".format(dest_addr)\n        output_text += \" from '{}'\".format(src) if src else \"\"\n        output_text += \" ... \"\n        print(output_text, end=\"\")\n        delay = ping(dest_addr, seq=i, *args, **kwargs)\n        if delay is None:\n            print(\"Timeout > {}s\".format(timeout) if timeout else \"Timeout\")\n        else:\n            print(\"{value}{unit}\".format(value=int(delay), unit=unit))", "docstring": "Send pings to destination address with the given timeout and display the result.\n\nArgs:\ndest_addr: The destination address. Ex. \"192.168.1.1\"/\"example.com\"\ncount: How many pings should be sent. Default is 4, same as Windows CMD. (default 4)\n*args and **kwargs: And all the other arguments available in ping() except `seq`.\n\nReturns:\nFormatted ping results printed.", "source": "juraj-google-style"}
{"code": "def Parse(self, conditions, host_data):\n    \n    result = CheckResult(check_id=self.check_id)\n    methods = self.SelectChecks(conditions)\n    result.ExtendAnomalies([m.Parse(conditions, host_data) for m in methods])\n    return result", "docstring": "Runs methods that evaluate whether collected host_data has an issue.\n\nArgs:\nconditions: A list of conditions to determine which Methods to trigger.\nhost_data: A map of artifacts and rdf data.\n\nReturns:\nA CheckResult populated with Anomalies if an issue exists.", "source": "juraj-google-style"}
{"code": "def _GetBcastSubshape(subscripts):\n    start = subscripts.find(ellipsis)\n    if start == -1:\n        return (0, 0)\n    remaining = len(subscripts) - (start + len(ellipsis))\n    end = -remaining if remaining > 0 else None\n    return (start, end)", "docstring": "Returns a tuple denoting the slice mapping to ellipsis.\n\nFor a given subscript, returns a tuple (start, end) denoting the start\naxis index and the (negative) end axis index respectively. For any input\nTensor `x` described by the subscript, `x[start:end]` would be the slice\nrepresented by the ellipsis. E.g. For `ab...cd` returns `[1, -2]`.\n\nIf ellipsis is not present in `subscripts`, returns `(0, 0)`.\n\nArgs:\nsubscripts: A string denoting the einsum subscript.", "source": "github-repos"}
{"code": "def get_op(self, id: str, **kwargs: str) -> dict:\n        \n        path = self._get_path_for_op_id(id)\n        return self.get_path(path, kwargs)", "docstring": "Queries the ESI by looking up an operation id.\n\nEndpoints are cached, so calls to this method\nfor the same op and args will return the data\nfrom the cache instead of making the API call.\n\nArgs:\nid: operation id\nkwargs: data to populate the endpoint's URL variables\n\nReturns:\nESI data", "source": "juraj-google-style"}
{"code": "def json_get_fields(recipe, path=[]):\n    fields = {}\n    path = path[:]\n    if isinstance(recipe, dict):\n        if 'field' in recipe:\n            fields[recipe['field']['name']] = recipe['field']\n        else:\n            for key, value in recipe.items():\n                fields.update(json_get_fields(value, path + [key]))\n    elif isinstance(recipe, list) or isinstance(recipe, tuple):\n        for index, value in enumerate(recipe):\n            fields.update(json_get_fields(value, path + [index]))\n    if path == []:\n        return sorted(fields.values(), key=lambda f: f.get('order', 0))\n    else:\n        return fields", "docstring": "Recusrsively finds fields in script JSON and returns them as a list.\n\nField has format: { \"field\":{ \"name\":\"???\", \"kind\":\"???\", \"default\":???,\n\"description\":\"???\" }}\n\nArgs:\nrecipe: (dict) A dictionary representation fo the JSON script.\npath: (list) Stack that keeps track of recursion depth. Not used\nexternally.\n\nReturns:\nfields: (list or dictionary) A list or dictionary representing each field recipe found\nin the JSON.", "source": "github-repos"}
{"code": "def attach(self, droplet_id, region):\n        \n        return self.get_data(\n            \"volumes/%s/actions/\" % self.id,\n            type=POST,\n            params={\"type\": \"attach\",\n                    \"droplet_id\": droplet_id,\n                    \"region\": region}\n        )", "docstring": "Attach a Volume to a Droplet.\n\nArgs:\ndroplet_id: int - droplet id\nregion: string - slug identifier for the region", "source": "juraj-google-style"}
{"code": "def md5sum(string):\n    h = hashlib.new('md5')\n    h.update(string.encode('utf-8'))\n    return h.hexdigest()", "docstring": "Generate the md5 checksum for a string\n\nArgs:\nstring (Str): The string to be checksummed.\n\nReturns:\n(Str): The hex checksum.", "source": "codesearchnet"}
{"code": "def camel_to_snake(name):\n    \n    s1 = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", name)\n    return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", s1).lower()", "docstring": "Converts CamelCase to snake_case.\n\nArgs:\nname (string): The name to convert from CamelCase to snake_case.\n\nReturns:\nstring: Converted string.", "source": "juraj-google-style"}
{"code": "def _prepare_socket_file(self, socket_path, default_prefix):\n        \n        if socket_path is not None:\n            if os.path.exists(socket_path):\n                raise Exception(\"Socket file {} exists!\".format(socket_path))\n            socket_dir = os.path.dirname(socket_path)\n            try_to_create_directory(socket_dir)\n            return socket_path\n        return self._make_inc_temp(\n            prefix=default_prefix, directory_name=self._sockets_dir)", "docstring": "Prepare the socket file for raylet and plasma.\n\nThis method helps to prepare a socket file.\n1. Make the directory if the directory does not exist.\n2. If the socket file exists, raise exception.\n\nArgs:\nsocket_path (string): the socket file to prepare.", "source": "juraj-google-style"}
{"code": "def _CompletionsFromArgs(fn_args):\n    completions = []\n    for arg in fn_args:\n        arg = arg.replace('_', '-')\n        completions.append(f'--{arg}')\n    return completions", "docstring": "Takes a list of fn args and returns a list of the fn's completion strings.\n\nArgs:\nfn_args: A list of the args accepted by a function.\nReturns:\nA list of possible completion strings for that function.", "source": "github-repos"}
{"code": "def _split_generators(self, dl_manager):\n    path = dl_manager.download_and_extract(_DOWNLOAD_URL)\n    return [tfds.core.SplitGenerator(name=tfds.Split.TEST, num_shards=1, gen_kwargs={'data_dir': os.path.join(path, _DIRNAME)})]", "docstring": "Return the test split of Cifar10.\n\nArgs:\ndl_manager: download manager object.\n\nReturns:\ntest split.", "source": "codesearchnet"}
{"code": "def get_or_create_direct_channel(cls, initiator_key, receiver_key):\n    existing = cls.objects.OR().filter(code_name=('%s_%s' % (initiator_key, receiver_key))).filter(code_name=('%s_%s' % (receiver_key, initiator_key)))\n    receiver_name = UserModel.objects.get(receiver_key).full_name\n    if existing:\n        channel = existing[0]\n    else:\n        channel_name = ('%s_%s' % (initiator_key, receiver_key))\n        channel = cls(is_direct=True, code_name=channel_name, typ=10).blocking_save()\n    with BlockSave(Subscriber):\n        Subscriber.objects.get_or_create(channel=channel, user_id=initiator_key, name=receiver_name)\n        Subscriber.objects.get_or_create(channel=channel, user_id=receiver_key, name=UserModel.objects.get(initiator_key).full_name)\n    return (channel, receiver_name)", "docstring": "Creates a  direct messaging channel between two user\n\nArgs:\ninitiator: User, who want's to make first contact\nreceiver: User, other party\n\nReturns:\n(Channel, receiver_name)", "source": "codesearchnet"}
{"code": "def to_json_string(self, use_diff: bool=True, ignore_metadata: bool=False) -> str:\n    if use_diff is True:\n        config_dict = self.to_diff_dict()\n    else:\n        config_dict = self.to_dict()\n    if ignore_metadata:\n        for metadata_field in METADATA_FIELDS:\n            config_dict.pop(metadata_field, None)\n\n    def convert_keys_to_string(obj):\n        if isinstance(obj, dict):\n            return {str(key): convert_keys_to_string(value) for key, value in obj.items()}\n        elif isinstance(obj, list):\n            return [convert_keys_to_string(item) for item in obj]\n        else:\n            return obj\n\n    def convert_dataclass_to_dict(obj):\n        if isinstance(obj, dict):\n            return {key: convert_dataclass_to_dict(value) for key, value in obj.items()}\n        elif is_dataclass(obj):\n            return obj.to_dict()\n        else:\n            return obj\n    config_dict = convert_keys_to_string(config_dict)\n    config_dict = convert_dataclass_to_dict(config_dict)\n    return json.dumps(config_dict, indent=2, sort_keys=True) + '\\n'", "docstring": "Serializes this instance to a JSON string.\n\nArgs:\nuse_diff (`bool`, *optional*, defaults to `True`):\nIf set to `True`, only the difference between the config instance and the default `GenerationConfig()`\nis serialized to JSON string.\nignore_metadata (`bool`, *optional*, defaults to `False`):\nWhether to ignore the metadata fields present in the instance\n\nReturns:\n`str`: String containing all the attributes that make up this configuration instance in JSON format.", "source": "github-repos"}
{"code": "def hertz_to_octave(freq: Union[float, np.ndarray], tuning: Optional[float]=0.0, bins_per_octave: Optional[int]=12):\n    stuttgart_pitch = 440.0 * 2.0 ** (tuning / bins_per_octave)\n    octave = np.log2(freq / (float(stuttgart_pitch) / 16))\n    return octave", "docstring": "Convert frequency from hertz to fractional octave numbers.\nAdapted from *librosa*.\n\nArgs:\nfreq (`float` or `np.ndarray`):\nThe frequency, or multiple frequencies, in hertz (Hz).\ntuning (`float`, defaults to `0.`):\nTuning deviation from the Stuttgart pitch (A440) in (fractional) bins per octave.\nbins_per_octave (`int`, defaults to `12`):\nNumber of bins per octave.\n\nReturns:\n`float` or `np.ndarray`: The frequencies on the octave scale.", "source": "github-repos"}
{"code": "def verify_calling_thread(self, should_be_emulation, message=None):\n    if (should_be_emulation == self._on_emulation_thread()):\n        return\n    if (message is None):\n        message = 'Operation performed on invalid thread'\n    raise InternalError(message)", "docstring": "Verify if the calling thread is or is not the emulation thread.\n\nThis method can be called to make sure that an action is being taken\nin the appropriate context such as not blocking the event loop thread\nor modifying an emulate state outside of the event loop thread.\n\nIf the verification fails an InternalError exception is raised,\nallowing this method to be used to protect other methods from being\ncalled in a context that could deadlock or cause race conditions.\n\nArgs:\nshould_be_emulation (bool): True if this call should be taking place\non the emulation, thread, False if it must not take place on\nthe emulation thread.\nmessage (str): Optional message to include when raising the exception.\nOtherwise a generic message is used.\n\nRaises:\nInternalError: When called from the wrong thread.", "source": "codesearchnet"}
{"code": "def setReplicationPolicy(self, pid, policy, serialVersion, vendorSpecific=None):\n        \n        response = self.setReplicationPolicyResponse(\n            pid, policy, serialVersion, vendorSpecific\n        )\n        return self._read_boolean_response(response)", "docstring": "See Also: setReplicationPolicyResponse()\n\nArgs:\npid:\npolicy:\nserialVersion:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def rate_to_mcs(rate, bw=20, long_gi=True):\n    if (bw not in [20, 40, 80, 160]):\n        raise Exception(('Unknown bandwidth: %d MHz' % bw))\n    idx = int(((math.log((bw / 10), 2) - 1) * 2))\n    if (not long_gi):\n        idx += 1\n    for (mcs, rates) in MCS_TABLE.items():\n        if (abs((rates[idx] - rate)) < 0.001):\n            return mcs\n    for (idx, r) in enumerate(DOT11A_RATES):\n        if (abs((r - rate)) < 0.001):\n            return idx\n    raise Exception(('MCS not found: rate=%f, bw=%d, long_gi=%s' % (rate, bw, long_gi)))", "docstring": "Convert bit rate to MCS index.\n\nArgs:\nrate (float): bit rate in Mbps\nbw (int): bandwidth, 20, 40, 80, ...\nlong_gi (bool): True if long GI is used.\n\nReturns:\nmcs (int): MCS index\n\n>>> rate_to_mcs(120, bw=40, long_gi=False)\n5", "source": "codesearchnet"}
{"code": "def _local_var_name(splittable_dimensions, assignment):\n    assignment_string = []\n    for splittable in sorted(splittable_dimensions):\n        if (splittable in assignment):\n            assignment_string.append('{}:{}'.format(splittable, assignment[splittable]))\n        else:\n            assignment_string.append('{}'.format(splittable))\n    return (('y_(' + ','.join(assignment_string)) + ')')", "docstring": "Name for a local variable.\n\nArgs:\nsplittable_dimensions: frozenset of names of splittable dimensions.\nassignment: dict from names of splittable dimensions to names of mesh\ndimensions.\n\nReturns:\nA string, the variable name.", "source": "codesearchnet"}
{"code": "def MergeAttributeContainers(self, callback=None, maximum_number_of_containers=0):\n    if (maximum_number_of_containers < 0):\n        raise ValueError('Invalid maximum number of containers')\n    if (not self._cursor):\n        self._Open()\n        self._ReadStorageMetadata()\n        self._container_types = self._GetContainerTypes()\n    number_of_containers = 0\n    while (self._active_cursor or self._container_types):\n        if (not self._active_cursor):\n            self._PrepareForNextContainerType()\n        if (maximum_number_of_containers == 0):\n            rows = self._active_cursor.fetchall()\n        else:\n            number_of_rows = (maximum_number_of_containers - number_of_containers)\n            rows = self._active_cursor.fetchmany(size=number_of_rows)\n        if (not rows):\n            self._active_cursor = None\n            continue\n        for row in rows:\n            identifier = identifiers.SQLTableIdentifier(self._active_container_type, row[0])\n            if (self._compression_format == definitions.COMPRESSION_FORMAT_ZLIB):\n                serialized_data = zlib.decompress(row[1])\n            else:\n                serialized_data = row[1]\n            attribute_container = self._DeserializeAttributeContainer(self._active_container_type, serialized_data)\n            attribute_container.SetIdentifier(identifier)\n            if (self._active_container_type == self._CONTAINER_TYPE_EVENT_TAG):\n                event_identifier = identifiers.SQLTableIdentifier(self._CONTAINER_TYPE_EVENT, attribute_container.event_row_identifier)\n                attribute_container.SetEventIdentifier(event_identifier)\n                del attribute_container.event_row_identifier\n            if callback:\n                callback(self._storage_writer, attribute_container)\n            self._add_active_container_method(attribute_container)\n            number_of_containers += 1\n        if ((maximum_number_of_containers != 0) and (number_of_containers >= maximum_number_of_containers)):\n            return False\n    self._Close()\n    os.remove(self._path)\n    return True", "docstring": "Reads attribute containers from a task storage file into the writer.\n\nArgs:\ncallback (function[StorageWriter, AttributeContainer]): function to call\nafter each attribute container is deserialized.\nmaximum_number_of_containers (Optional[int]): maximum number of\ncontainers to merge, where 0 represent no limit.\n\nReturns:\nbool: True if the entire task storage file has been merged.\n\nRaises:\nRuntimeError: if the add method for the active attribute container\ntype is missing.\nOSError: if the task storage file cannot be deleted.\nValueError: if the maximum number of containers is a negative value.", "source": "codesearchnet"}
{"code": "def update_submit_s3_uri(estimator, job_name):\n    \n    if estimator.uploaded_code is None:\n        return\n\n    pattern = r'(?<=/)[^/]+?(?=/source/sourcedir.tar.gz)'\n\n    \n    \n    submit_uri = estimator.uploaded_code.s3_prefix\n    submit_uri = re.sub(pattern, job_name, submit_uri)\n    script_name = estimator.uploaded_code.script_name\n    estimator.uploaded_code = fw_utils.UploadedCode(submit_uri, script_name)", "docstring": "Updated the S3 URI of the framework source directory in given estimator.\n\nArgs:\nestimator (sagemaker.estimator.Framework): The Framework estimator to update.\njob_name (str): The new job name included in the submit S3 URI\n\nReturns:\nstr: The updated S3 URI of framework source directory", "source": "juraj-google-style"}
{"code": "def plot_carriers(self, temp=300):\n    import matplotlib.pyplot as plt\n    plt.semilogy(self._bz.mu_steps, abs((self._bz._carrier_conc[temp] / (self._bz.vol * 1e-24))), linewidth=3.0, color='r')\n    self._plot_bg_limits()\n    self._plot_doping(temp)\n    plt.xlim((- 0.5), (self._bz.gap + 0.5))\n    plt.ylim(100000000000000.0, 1e+22)\n    plt.ylabel('carrier concentration (cm-3)', fontsize=30.0)\n    plt.xlabel('E-E$_f$ (eV)', fontsize=30)\n    plt.xticks(fontsize=25)\n    plt.yticks(fontsize=25)\n    return plt", "docstring": "Plot the carrier concentration in function of Fermi level\n\nArgs:\ntemp: the temperature\n\nReturns:\na matplotlib object", "source": "codesearchnet"}
{"code": "def generate_dummy_inputs_onnxruntime(self, reference_model_inputs: Mapping[str, Any]) -> Mapping[str, Any]:\n    return reference_model_inputs", "docstring": "Generate inputs for ONNX Runtime using the reference model inputs. Override this to run inference with seq2seq\nmodels which have the encoder and decoder exported as separate ONNX files.\n\nArgs:\nreference_model_inputs ([`Mapping[str, Tensor]`):\nReference inputs for the model.\n\nReturns:\n`Mapping[str, Tensor]`: The mapping holding the kwargs to provide to the model's forward function", "source": "github-repos"}
{"code": "def check_output_despite_error(args):\n    try:\n        output = subprocess.check_output(args, shell=True, stderr=subprocess.STDOUT)\n    except subprocess.CalledProcessError as e:\n        output = e.output\n    return output.strip()", "docstring": "Get output of args from command line, even if there are errors.\n\nArgs:\nargs: a list of command line args.\n\nReturns:\noutput as string.", "source": "github-repos"}
{"code": "def __new__(cls, *args, **kwargs):\n        \n        instance = super(_AutoFinalizedObjectBase, cls).__new__(cls)\n        instance._finalize_called = False\n        return instance", "docstring": "Creates a new object instance and adds the private finalizer\nattributes to it.\n\nReturns: new object instance\n\nArguments:\n* *args, **kwargs -- ignored", "source": "juraj-google-style"}
{"code": "def from_b58check(private_key):\n        \n        b58dec = base58.b58decode_check(private_key)\n        version = b58dec[0]\n        assert version in [PrivateKey.TESTNET_VERSION,\n                           PrivateKey.MAINNET_VERSION]\n\n        return PrivateKey(int.from_bytes(b58dec[1:], 'big'))", "docstring": "Decodes a Base58Check encoded private-key.\n\nArgs:\nprivate_key (str): A Base58Check encoded private key.\n\nReturns:\nPrivateKey: A PrivateKey object", "source": "juraj-google-style"}
{"code": "def _add_exac(self, variant_obj, info_dict):\n        \n        exac = None\n        exac_keys = ['ExAC', 'EXAC', 'ExACAF', 'EXACAF']\n        for key in exac_keys:\n            if info_dict.get(key):\n                exac = float(info_dict[key])\n        \n        if not exac:\n            for transcript in variant_obj.transcripts:\n                exac_raw = transcript.ExAC_MAF\n                if exac_raw:\n                    exac = float(exac_raw.split(':')[-1])\n        \n        if exac:\n            variant_obj.add_frequency('ExAC', exac)", "docstring": "Add the gmaf frequency\n\nArgs:\nvariant_obj (puzzle.models.Variant)\ninfo_dict (dict): A info dictionary", "source": "juraj-google-style"}
{"code": "def create_nic(access_token, subscription_id, resource_group, nic_name, public_ip_id, subnet_id, location, nsg_id=None):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/networkInterfaces/', nic_name, '?api-version=', NETWORK_API])\n    nic_body = {'location': location}\n    ipconfig = {'name': 'ipconfig1'}\n    ipc_properties = {'privateIPAllocationMethod': 'Dynamic'}\n    ipc_properties['publicIPAddress'] = {'id': public_ip_id}\n    ipc_properties['subnet'] = {'id': subnet_id}\n    ipconfig['properties'] = ipc_properties\n    properties = {'ipConfigurations': [ipconfig]}\n    if (nsg_id is not None):\n        properties['networkSecurityGroup'] = {'id': nsg_id}\n    nic_body['properties'] = properties\n    body = json.dumps(nic_body)\n    return do_put(endpoint, body, access_token)", "docstring": "Create a network interface with an associated public ip address.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nnic_name (str): Name of the new NIC.\npublic_ip_id (str): Public IP address resource id.\nsubnetid (str): Subnet resource id.\nlocation (str): Azure data center location. E.g. westus.\nnsg_id (str): Optional Network Secruity Group resource id.\n\nReturns:\nHTTP response. NIC JSON body.", "source": "codesearchnet"}
{"code": "def value_from_message(self, message):\n    message = super(DateTimeField, self).value_from_message(message)\n    if (message.time_zone_offset is None):\n        return datetime.datetime.utcfromtimestamp((message.milliseconds / 1000.0))\n    milliseconds = (message.milliseconds - (60000 * message.time_zone_offset))\n    timezone = util.TimeZoneOffset(message.time_zone_offset)\n    return datetime.datetime.fromtimestamp((milliseconds / 1000.0), tz=timezone)", "docstring": "Convert DateTimeMessage to a datetime.\n\nArgs:\nA DateTimeMessage instance.\n\nReturns:\nA datetime instance.", "source": "codesearchnet"}
{"code": "def wrap_http_for_jwt_access(credentials, http):\n    orig_request_method = http.request\n    wrap_http_for_auth(credentials, http)\n    authenticated_request_method = http.request\n\n    def new_request(uri, method='GET', body=None, headers=None, redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):\n        if ('aud' in credentials._kwargs):\n            if ((credentials.access_token is None) or credentials.access_token_expired):\n                credentials.refresh(None)\n            return request(authenticated_request_method, uri, method, body, headers, redirections, connection_type)\n        else:\n            headers = _initialize_headers(headers)\n            _apply_user_agent(headers, credentials.user_agent)\n            uri_root = uri.split('?', 1)[0]\n            (token, unused_expiry) = credentials._create_token({'aud': uri_root})\n            headers['Authorization'] = ('Bearer ' + token)\n            return request(orig_request_method, uri, method, body, clean_headers(headers), redirections, connection_type)\n    http.request = new_request\n    http.request.credentials = credentials", "docstring": "Prepares an HTTP object's request method for JWT access.\n\nWraps HTTP requests with logic to catch auth failures (typically\nidentified via a 401 status code). In the event of failure, tries\nto refresh the token used and then retry the original request.\n\nArgs:\ncredentials: _JWTAccessCredentials, the credentials used to identify\na service account that uses JWT access tokens.\nhttp: httplib2.Http, an http object to be used to make\nauth requests.", "source": "codesearchnet"}
{"code": "def token_validate_with_login(self, **kwargs):\n    path = self._get_path('token_validate_with_login')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Authenticate a user with a TMDb username and password.  The user\nmust have a verified email address and be registered on TMDb.\n\nArgs:\nrequest_token: The token you generated for the user to approve.\nusername: The user's username on TMDb.\npassword: The user's password on TMDb.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def zip_file(self, app_path, app_name, tmp_path):\n        \n        \n        zip_file = os.path.join(app_path, self.args.outdir, app_name)\n        zip_file_zip = '{}.zip'.format(zip_file)\n        zip_file_tcx = '{}.tcx'.format(zip_file)\n        shutil.make_archive(zip_file, 'zip', tmp_path, app_name)\n        shutil.move(zip_file_zip, zip_file_tcx)\n        self._app_packages.append(zip_file_tcx)\n        \n        self.package_data['package'].append({'action': 'App Package:', 'output': zip_file_tcx})", "docstring": "Zip the App with tcex extension.\n\nArgs:\napp_path (str): The path of the current project.\napp_name (str): The name of the App.\ntmp_path (str): The temp output path for the zip.", "source": "juraj-google-style"}
{"code": "def gzip_uncompress(data, truncated=False):\n    \n    decompressor = SimpleGzipDecompressor()\n    inflated_data = decompressor.decompress(data)\n\n    if not truncated:\n        inflated_data += decompressor.flush()\n\n    return inflated_data", "docstring": "Uncompress gzip data.\n\nArgs:\ndata (bytes): The gzip data.\ntruncated (bool): If True, the decompressor is not flushed.\n\nThis is a convenience function.\n\nReturns:\nbytes: The inflated data.\n\nRaises:\nzlib.error", "source": "juraj-google-style"}
{"code": "def _update_from_file(self, filename):\n        \n        if os.path.exists(filename):\n            try:\n                with open(filename, 'r') as config_file:\n                    yaml_dict = yaml.safe_load(config_file.read())\n                    if yaml_dict is not None:\n                        self._update_dict(self._config, yaml_dict)\n            except IsADirectoryError:\n                raise ConfigLoadError(\n                    'The specified configuration file is a directory not a file')\n        else:\n            raise ConfigLoadError('The config file {} does not exist'.format(filename))", "docstring": "Helper method to update an existing configuration with the values from a file.\n\nLoads a configuration file and replaces all values in the existing configuration\ndictionary with the values from the file.\n\nArgs:\nfilename (str): The path and name to the configuration file.", "source": "juraj-google-style"}
{"code": "def update_institute(self, internal_id, sanger_recipient=None, coverage_cutoff=None, frequency_cutoff=None, display_name=None, remove_sanger=None, phenotype_groups=None, group_abbreviations=None, add_groups=None):\n    add_groups = (add_groups or False)\n    institute_obj = self.institute(internal_id)\n    if (not institute_obj):\n        raise IntegrityError('Institute {} does not exist in database'.format(internal_id))\n    updates = {}\n    updated_institute = institute_obj\n    if sanger_recipient:\n        user_obj = self.user(sanger_recipient)\n        if (not user_obj):\n            raise IntegrityError('user {} does not exist in database'.format(sanger_recipient))\n        LOG.info('Updating sanger recipients for institute: {0} with {1}'.format(internal_id, sanger_recipient))\n        updates['$push'] = {'sanger_recipients': remove_sanger}\n    if remove_sanger:\n        LOG.info('Removing sanger recipient {0} from institute: {1}'.format(remove_sanger, internal_id))\n        updates['$pull'] = {'sanger_recipients': remove_sanger}\n    if coverage_cutoff:\n        LOG.info('Updating coverage cutoff for institute: {0} to {1}'.format(internal_id, coverage_cutoff))\n        updates['$set'] = {'coverage_cutoff': coverage_cutoff}\n    if frequency_cutoff:\n        LOG.info('Updating frequency cutoff for institute: {0} to {1}'.format(internal_id, frequency_cutoff))\n        if (not ('$set' in updates)):\n            updates['$set'] = {}\n        updates['$set'] = {'frequency_cutoff': frequency_cutoff}\n    if display_name:\n        LOG.info('Updating display name for institute: {0} to {1}'.format(internal_id, display_name))\n        if (not ('$set' in updates)):\n            updates['$set'] = {}\n        updates['$set'] = {'display_name': display_name}\n    if phenotype_groups:\n        if group_abbreviations:\n            group_abbreviations = list(group_abbreviations)\n        existing_groups = {}\n        if add_groups:\n            existing_groups = institute_obj.get('phenotype_groups', PHENOTYPE_GROUPS)\n        for (i, hpo_term) in enumerate(phenotype_groups):\n            hpo_obj = self.hpo_term(hpo_term)\n            if (not hpo_obj):\n                raise IntegrityError('Term {} does not exist'.format(hpo_term))\n            hpo_id = hpo_obj['hpo_id']\n            description = hpo_obj['description']\n            abbreviation = None\n            if group_abbreviations:\n                abbreviation = group_abbreviations[i]\n            existing_groups[hpo_term] = {'name': description, 'abbr': abbreviation}\n        updates['$set'] = {'phenotype_groups': existing_groups}\n    if updates:\n        if (not ('$set' in updates)):\n            updates['$set'] = {}\n        updates['$set']['updated_at'] = datetime.now()\n        updated_institute = self.institute_collection.find_one_and_update({'_id': internal_id}, updates, return_document=pymongo.ReturnDocument.AFTER)\n        LOG.info('Institute updated')\n    return updated_institute", "docstring": "Update the information for an institute\n\nArgs:\ninternal_id(str): The internal institute id\nsanger_recipient(str): Email adress to add for sanger order\ncoverage_cutoff(int): Update coverage cutoff\nfrequency_cutoff(float): New frequency cutoff\ndisplay_name(str): New display name\nremove_sanger(str): Email adress for sanger user to be removed\nphenotype_groups(iterable(str)): New phenotype groups\ngroup_abbreviations(iterable(str))\nadd_groups: If groups should be added. If False replace groups\n\nReturns:\nupdated_institute(dict)", "source": "codesearchnet"}
{"code": "def show(self, app_path, browser=None, new='tab'):\n    if (not app_path.startswith('/')):\n        raise ValueError('app_path must start with a /')\n    address_string = 'localhost'\n    if ((self.address is not None) and (self.address != '')):\n        address_string = self.address\n    url = ('http:\n    from bokeh.util.browser import view\n    view(url, browser=browser, new=new)", "docstring": "Opens an app in a browser window or tab.\n\nThis method is useful for testing or running Bokeh server applications\non a local machine but should not call when running Bokeh server for\nan actual deployment.\n\nArgs:\napp_path (str) : the app path to open\nThe part of the URL after the hostname:port, with leading slash.\n\nbrowser (str, optional) : browser to show with (default: None)\nFor systems that support it, the **browser** argument allows\nspecifying which browser to display in, e.g. \"safari\", \"firefox\",\n\"opera\", \"windows-default\" (see the ``webbrowser`` module\ndocumentation in the standard lib for more details).\n\nnew (str, optional) : window or tab (default: \"tab\")\nIf ``new`` is 'tab', then opens a new tab.\nIf ``new`` is 'window', then opens a new window.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def log_request(\n        self, request: str, trim_log_values: bool = False, **kwargs: Any\n    ) -> None:\n        \n        return log_(request, request_log, \"info\", trim=trim_log_values, **kwargs)", "docstring": "Log a request.\n\nArgs:\nrequest: The JSON-RPC request string.\ntrim_log_values: Log an abbreviated version of the request.", "source": "juraj-google-style"}
{"code": "def left_shift(x, y):\n    if any_symbolic_tensors((x, y)):\n        return LeftShift().symbolic_call(x, y)\n    return backend.numpy.left_shift(x, y)", "docstring": "Shift the bits of an integer to the left.\n\nBits are shifted to the left by appending `y` 0s at the right of `x`.\nSince the internal representation of numbers is in binary format, this\noperation is equivalent to multiplying `x` by `2**y`.\n\nArgs:\nx: Input integer tensor.\ny: Input integer tensor.\n\nReturns:\nResult tensor.", "source": "github-repos"}
{"code": "def run_from_ufos(self, ufos, output=(), **kwargs):\n    if (set(output) == {'ufo'}):\n        return\n    ufo_paths = []\n    if isinstance(ufos, basestring):\n        ufo_paths = glob.glob(ufos)\n        ufos = [Font(x) for x in ufo_paths]\n    elif isinstance(ufos, list):\n        ufos = [(Font(x) if isinstance(x, basestring) else x) for x in ufos]\n        ufo_paths = [x.path for x in ufos]\n    else:\n        raise FontmakeError('UFOs parameter is neither a defcon.Font object, a path or a glob, nor a list of any of these.', ufos)\n    need_reload = False\n    if ('otf' in output):\n        self.build_otfs(ufos, **kwargs)\n        need_reload = True\n    if ('ttf' in output):\n        if need_reload:\n            ufos = [Font(path) for path in ufo_paths]\n        self.build_ttfs(ufos, **kwargs)\n        need_reload = True", "docstring": "Run toolchain from UFO sources.\n\nArgs:\nufos: List of UFO sources, as either paths or opened objects.\noutput: List of output formats to generate.\nkwargs: Arguments passed along to save_otfs.", "source": "codesearchnet"}
{"code": "def StopTaskStorage(self, abort=False):\n    if (self._storage_type != definitions.STORAGE_TYPE_SESSION):\n        raise IOError('Unsupported storage type.')\n    if os.path.isdir(self._merge_task_storage_path):\n        if abort:\n            shutil.rmtree(self._merge_task_storage_path)\n        else:\n            os.rmdir(self._merge_task_storage_path)\n    if os.path.isdir(self._processed_task_storage_path):\n        if abort:\n            shutil.rmtree(self._processed_task_storage_path)\n        else:\n            os.rmdir(self._processed_task_storage_path)\n    if os.path.isdir(self._task_storage_path):\n        if abort:\n            shutil.rmtree(self._task_storage_path)\n        else:\n            os.rmdir(self._task_storage_path)\n    self._merge_task_storage_path = None\n    self._processed_task_storage_path = None\n    self._task_storage_path = None", "docstring": "Removes the temporary path for the task storage.\n\nThe results of tasks will be lost on abort.\n\nArgs:\nabort (bool): True to indicate the stop is issued on abort.\n\nRaises:\nIOError: if the storage type is not supported.\nOSError: if the storage type is not supported.", "source": "codesearchnet"}
{"code": "def extract_cookies(self, response, request, referrer_host=None):\n        \n        new_response = HTTPResponseInfoWrapper(response)\n        new_request = convert_http_request(request, referrer_host)\n\n        self._cookie_jar.extract_cookies(new_response, new_request)", "docstring": "Wrapped ``extract_cookies``.\n\nArgs:\nresponse: An instance of :class:`.http.request.Response`.\nrequest: An instance of :class:`.http.request.Request`.\nreferrer_host (str): An hostname or IP address of the referrer\nURL.", "source": "juraj-google-style"}
{"code": "def with_division(self, division):\n        \n        if division is None:\n            division = ''\n        division = slugify(division)\n        self._validate_division(division)\n        self.division = division\n        return self", "docstring": "Add a division segment\n\nArgs:\ndivision (str): Official name of an electoral division.\n\nReturns:\nIdBuilder\n\nRaises:\nValueError", "source": "juraj-google-style"}
{"code": "def select_savename(self, original_filename):\n        \n        if self.edit_filetypes is None:\n            self.edit_filetypes = get_edit_filetypes()\n        if self.edit_filters is None:\n            self.edit_filters = get_edit_filters()\n\n        \n        \n        \n        if is_kde_desktop() and not is_anaconda():\n            filters = ''\n            selectedfilter = ''\n        else:\n            filters = self.edit_filters\n            selectedfilter = get_filter(self.edit_filetypes,\n                                        osp.splitext(original_filename)[1])\n\n        self.redirect_stdio.emit(False)\n        filename, _selfilter = getsavefilename(self, _(\"Save file\"),\n                                    original_filename,\n                                    filters=filters,\n                                    selectedfilter=selectedfilter,\n                                    options=QFileDialog.HideNameFilterDetails)\n        self.redirect_stdio.emit(True)\n        if filename:\n            return osp.normpath(filename)\n        return None", "docstring": "Select a name to save a file.\n\nArgs:\noriginal_filename: Used in the dialog to display the current file\npath and name.\n\nReturns:\nNormalized path for the selected file name or None if no name was\nselected.", "source": "juraj-google-style"}
{"code": "def get_maskformer_resize_output_image_size(image: np.ndarray, size: Union[int, Tuple[int, int], List[int], Tuple[int]], max_size: Optional[int]=None, size_divisor: int=0, default_to_square: bool=True, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]:\n    output_size = get_resize_output_image_size(input_image=image, size=size, default_to_square=default_to_square, max_size=max_size, input_data_format=input_data_format)\n    if size_divisor > 0:\n        height, width = output_size\n        height = int(math.ceil(height / size_divisor) * size_divisor)\n        width = int(math.ceil(width / size_divisor) * size_divisor)\n        output_size = (height, width)\n    return output_size", "docstring": "Computes the output size given the desired size.\n\nArgs:\nimage (`np.ndarray`):\nThe input image.\nsize (`int` or `Tuple[int, int]` or `List[int]` or `Tuple[int]`):\nThe size of the output image.\nmax_size (`int`, *optional*):\nThe maximum size of the output image.\nsize_divisor (`int`, *optional*, defaults to 0):\nIf `size_divisor` is given, the output image size will be divisible by the number.\ndefault_to_square (`bool`, *optional*, defaults to `True`):\nWhether to default to square if no size is provided.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If unset, will use the inferred format from the input.\n\nReturns:\n`Tuple[int, int]`: The output size.", "source": "github-repos"}
{"code": "def pretty_polyfit_plot(x, y, deg=1, xlabel=None, ylabel=None, **kwargs):\n    plt = pretty_plot(**kwargs)\n    pp = np.polyfit(x, y, deg)\n    xp = np.linspace(min(x), max(x), 200)\n    plt.plot(xp, np.polyval(pp, xp), 'k--', x, y, 'o')\n    if xlabel:\n        plt.xlabel(xlabel)\n    if ylabel:\n        plt.ylabel(ylabel)\n    return plt", "docstring": "Convenience method to plot data with trend lines based on polynomial fit.\n\nArgs:\nx: Sequence of x data.\ny: Sequence of y data.\ndeg (int): Degree of polynomial. Defaults to 1.\nxlabel (str): Label for x-axis.\nylabel (str): Label for y-axis.\n\\\\*\\\\*kwargs: Keyword args passed to pretty_plot.\n\nReturns:\nmatplotlib.pyplot object.", "source": "codesearchnet"}
{"code": "def __new__(cls, obj=None, prop=None, func=None):\n        \n        new = super(SinonBase, cls).__new__(cls)\n        if func:\n            new.__init__(obj, prop, func)\n        else:\n            new.__init__(obj, prop)\n        cls._queue.append(new)\n        return weakref.proxy(new)", "docstring": "Constructor of SinonBase\nIt will new true base but return a proxy of weakref and store it in _queue\nArgs:\nobj: None / function / instance method / module / class\nInspected target\nprop: None / string\nInspected target when obj contains callable things\nfunc: function / instance method\nONLY used by stub, it will replace original target\nReturn:\nweakref", "source": "juraj-google-style"}
{"code": "def GetOutputDir(self, base_dir, config_filename):\n    \n    return os.path.join(base_dir,\n                        os.path.basename(config_filename.replace(\".yaml\", \"\")))", "docstring": "Add the repack config filename onto the base output directory.\n\nThis allows us to repack lots of different configs to the same installer\nname and still be able to distinguish them.\n\nArgs:\nbase_dir: output directory string\nconfig_filename: the secondary config filename string\n\nReturns:\nString to be used as output directory for this repack.", "source": "juraj-google-style"}
{"code": "def __init__(self, resolver_context):\n    \n    super(TSKPartitionFileSystem, self).__init__(resolver_context)\n    self._file_object = None\n    self._tsk_volume = None", "docstring": "Initializes a file system object.\n\nArgs:\nresolver_context (Context): a resolver context.", "source": "juraj-google-style"}
{"code": "def _attend_over_memory(self, memory):\n    attention_mlp = basic.BatchApply(mlp.MLP(([self._mem_size] * self._attention_mlp_layers)))\n    for _ in range(self._num_blocks):\n        attended_memory = self._multihead_attention(memory)\n        memory = basic.BatchApply(layer_norm.LayerNorm())((memory + attended_memory))\n        memory = basic.BatchApply(layer_norm.LayerNorm())((attention_mlp(memory) + memory))\n    return memory", "docstring": "Perform multiheaded attention over `memory`.\n\nArgs:\nmemory: Current relational memory.\n\nReturns:\nThe attended-over memory.", "source": "codesearchnet"}
{"code": "def read_local_config(cfg):\n    try:\n        if os.path.exists(cfg):\n            config = import_file_object(cfg)\n            return config\n        else:\n            logger.warning(('%s: local config file (%s) not found, cannot be read' % (inspect.stack()[0][3], str(cfg))))\n    except IOError as e:\n        logger.warning(('import_file_object: %s error opening %s' % (str(e), str(cfg))))\n    return {}", "docstring": "Parses local config file for override values\n\nArgs:\n:local_file (str):  filename of local config file\n\nReturns:\ndict object of values contained in local config file", "source": "codesearchnet"}
{"code": "def add_dos(self, label, dos):\n        \n        energies = dos.energies - dos.efermi if self.zero_at_efermi \\\n            else dos.energies\n        densities = dos.get_smeared_densities(self.sigma) if self.sigma \\\n            else dos.densities\n        efermi = dos.efermi\n        self._doses[label] = {'energies': energies, 'densities': densities,\n                              'efermi': efermi}", "docstring": "Adds a dos for plotting.\n\nArgs:\nlabel:\nlabel for the DOS. Must be unique.\ndos:\nDos object", "source": "juraj-google-style"}
{"code": "def setitem(self, axis, key, value):\n        \n\n        def setitem(df, internal_indices=[]):\n            def _setitem():\n                if len(internal_indices) == 1:\n                    if axis == 0:\n                        df[df.columns[internal_indices[0]]] = value\n                    else:\n                        df.iloc[internal_indices[0]] = value\n                else:\n                    if axis == 0:\n                        df[df.columns[internal_indices]] = value\n                    else:\n                        df.iloc[internal_indices] = value\n\n            try:\n                _setitem()\n            except ValueError:\n                \n                df = df.copy()\n                _setitem()\n            return df\n\n        if axis == 0:\n            numeric_indices = list(self.columns.get_indexer_for([key]))\n        else:\n            numeric_indices = list(self.index.get_indexer_for([key]))\n        prepared_func = self._prepare_method(setitem)\n        if is_list_like(value):\n            new_data = self.data.apply_func_to_select_indices_along_full_axis(\n                axis, prepared_func, numeric_indices, keep_remaining=True\n            )\n        else:\n            new_data = self.data.apply_func_to_select_indices(\n                axis, prepared_func, numeric_indices, keep_remaining=True\n            )\n        return self.__constructor__(new_data, self.index, self.columns)", "docstring": "Set the column defined by `key` to the `value` provided.\n\nArgs:\nkey: The column name to set.\nvalue: The value to set the column to.\n\nReturns:\nA new QueryCompiler", "source": "juraj-google-style"}
{"code": "def json_dict(json_data):\n    if isinstance(json_data, dict):\n        return json_data\n    elif isinstance(json_data, basestring):\n        return json.loads(json_data, object_hook=OrderedDict)\n    else:\n        raise TypeError(\"'json_data' must be a dictionary or valid JSON string; received: {!r}\".format(json_data))", "docstring": "Given a dictionary or JSON string; return a dictionary.\n\nArgs:\njson_data(dict, str): Input JSON object.\n\nReturns:\nA Python dictionary with the contents of the JSON object.\n\nRaises:\nTypeError: If the input object is not a dictionary or string.", "source": "codesearchnet"}
{"code": "def get_version_from_cache_dir(src_file):\n    \n    if src_file is None:\n        return None\n\n    tmp_dir = local.path(str(CFG[\"tmp_dir\"]))\n    if tmp_dir.exists():\n        cache_file = tmp_dir / src_file\n        dir_hash = get_hash_of_dirs(cache_file)\n        if dir_hash is None:\n            return None\n        if len(str(dir_hash)) <= 7:\n            return str(dir_hash)\n        return str(dir_hash)[:7]\n    return None", "docstring": "Creates a version for a project out of the hash.\n\nThe hash is taken from the directory of the source file.\n\nArgs:\nsrc_file: The source file of the project using this function.\n\nReturns:\nEither returns the first 8 digits of the hash as string,\nthe entire hash as a string if the hash consists out of less\nthan 7 digits or None if the path is incorrect.", "source": "juraj-google-style"}
{"code": "def filter_by_pattern(self, pattern):\n        \n        _filt_values, _filt_datetimes = self._filter_by_pattern(pattern)\n        if self._enumeration is None:\n            self._get_mutable_enumeration()\n        col_obj = self._enumeration['mutable'][self._collection_type]\n        collection = col_obj(self.header.duplicate(), _filt_values, _filt_datetimes)\n        collection._validated_a_period = self._validated_a_period\n        return collection", "docstring": "Filter the Data Collection based on a list of booleans.\n\nArgs:\npattern: A list of True/False values.  Typically, this is a list\nwith a length matching the length of the Data Collections values\nbut it can also be a pattern to be repeated over the Data Collection.\n\nReturn:\nA new Data Collection with filtered data", "source": "juraj-google-style"}
{"code": "def __init__(self, filename=None):\n        \n        self.idx = {'http': 0, 'https': 0}\n        self.test_url = {\n            'http': 'http:\n            'https': 'https:\n        }\n        self.proxies = {'http': {}, 'https': {}}\n        self.addr_list = {'http': [], 'https': []}\n        self.dec_ratio = 0.9\n        self.inc_ratio = 1 / self.dec_ratio\n        self.weight_thr = 0.2\n        self.logger = logging.getLogger(__name__)\n        if filename is not None:\n            self.load(filename)", "docstring": "Init the pool from a json file.\n\nArgs:\nfilename (str, optional): if the filename is provided, proxies\nwill be load from it.", "source": "juraj-google-style"}
{"code": "def on_the_air(self, **kwargs):\n    path = self._get_path('on_the_air')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the list of TV shows that are currently on the air. This query\nlooks for any TV show that has an episode with an air date in the\nnext 7 days.\n\nArgs:\npage: (optional) Minimum 1, maximum 1000.\nlanguage: (optional) ISO 639 code.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def Transfer(self, wallet, from_addr, to_addr, amount, tx_attributes=None):\n    if (not tx_attributes):\n        tx_attributes = []\n    sb = ScriptBuilder()\n    sb.EmitAppCallWithOperationAndArgs(self.ScriptHash, 'transfer', [PromptUtils.parse_param(from_addr, wallet), PromptUtils.parse_param(to_addr, wallet), PromptUtils.parse_param(amount)])\n    (tx, fee, results, num_ops, engine_success) = test_invoke(sb.ToArray(), wallet, [], from_addr=from_addr, invoke_attrs=tx_attributes)\n    return (tx, fee, results)", "docstring": "Transfer a specified amount of the NEP5Token to another address.\n\nArgs:\nwallet (neo.Wallets.Wallet): a wallet instance.\nfrom_addr (str): public address of the account to transfer the given amount from.\nto_addr (str): public address of the account to transfer the given amount to.\namount (int): quantity to send.\ntx_attributes (list): a list of TransactionAtribute objects.\n\nReturns:\ntuple:\nInvocationTransaction: the transaction.\nint: the transaction fee.\nlist: the neo VM evaluationstack results.", "source": "codesearchnet"}
{"code": "def axis_shape_dims_for_broadcast_in_dim(axis, input_shape, insert_dims):\n    if axis is None:\n        raise ValueError('Received `None` value for `axis`')\n    if isinstance(axis, int):\n        axis = (axis,)\n    if len(set(axis)) != len(axis):\n        raise ValueError(f'Repeated axis in `axis`: {axis}')\n    result_dims = len(input_shape)\n    if insert_dims:\n        result_dims += len(axis)\n    canonical_axis = []\n    for a in axis:\n        if not -result_dims <= a < result_dims:\n            raise ValueError(f'In `axis`, axis {a} is out of bounds for array of dimension {result_dims}')\n        if a < 0:\n            a = a + result_dims\n        canonical_axis.append(a)\n    if len(set(canonical_axis)) != len(canonical_axis):\n        raise ValueError(f'Repeated axis in `axis`: {canonical_axis}')\n    canonical_axis = sorted(canonical_axis)\n    output_shape = list(input_shape)\n    for i in canonical_axis:\n        if insert_dims:\n            output_shape.insert(i, 1)\n        else:\n            output_shape[i] = 1\n    broadcast_dims = [i for i in range(result_dims) if i not in canonical_axis]\n    return (canonical_axis, output_shape, broadcast_dims)", "docstring": "Turn the `axis` argument to the arguments needed by `broadcast_in_dim`.\n\nArgs:\naxis: single int or a tuple of ints for the axis argument. The list of\ndimensions to reduce or insert.\ninput_shape: the shape of the input as a tuple ints.\ninsert_dims: `False` turns dimensions in `axis` to 1s (use case:\nreduction along `axis` with `keep_dims=True`). `True`, inserts 1s\naccording to `axis` (use case: `expand_dims`).\nReturns:\nA tuple of three lists\n- The canonical value for `axis`: always a list, negative values have\nbeen resolved and values are sorted in ascending order.\n- The output shape: `input_shape` with 1s at the indices in `axis`, for\nuse as the `shape` argument of `broadcast_in_dim`.\n- The broadcast dimensions: list of dimensions not in `axis`, for use as\nthe `broadcast_dimensions` argument of `broadcast_in_dim`.", "source": "github-repos"}
{"code": "def remove(self, key):\n    if self.prepickle:\n        key = pickle.dumps(key)\n    if (key not in self.keys):\n        raise ValueError('The given key does not exist')\n    for (H, hashtable) in zip(self.keys[key], self.hashtables):\n        hashtable.remove_val(H, key)\n        if (not hashtable.get(H)):\n            hashtable.remove(H)\n    self.keys.remove(key)", "docstring": "Remove the key from the index.\n\nArgs:\nkey (hashable): The unique identifier of a set.", "source": "codesearchnet"}
{"code": "def set_y_grid_info(self, y_low, y_high, num_y, yscale, yval_name):\n        \n        self._set_grid_info('y', y_low, y_high, num_y, yscale, yval_name)\n        return", "docstring": "Set the grid values for y.\n\nCreate information for the grid of y values.\n\nArgs:\nnum_y (int): Number of points on axis.\ny_low/y_high (float): Lowest/highest value for the axis.\nyscale (str): Scale of the axis. Choices are 'log' or 'lin'.\nyval_name (str): Name representing the axis. See GenerateContainer documentation\nfor options for the name.", "source": "juraj-google-style"}
{"code": "def setKeySequenceCounter(self, iKeySequenceValue):\n        \n        print '%s call setKeySequenceCounter' % self.port\n        print iKeySequenceValue\n        try:\n            cmd = WPANCTL_CMD + 'setprop Network:KeyIndex %s' % str(iKeySequenceValue)\n            if self.__sendCommand(cmd)[0] != 'Fail':\n                time.sleep(1)\n                return True\n            else:\n                return False\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger('setKeySequenceCounter() Error: ' + str(e))", "docstring": "set the Key sequence counter corresponding to Thread Network master key\n\nArgs:\niKeySequenceValue: key sequence value\n\nReturns:\nTrue: successful to set the key sequence\nFalse: fail to set the key sequence", "source": "juraj-google-style"}
{"code": "def convert_persistent_value(self, shift, instruction):\n        \n        command_dict = {\n            'name': 'pv',\n            't0': shift+instruction.start_time,\n            'ch': instruction.channels[0].name,\n            'val': instruction.command.value\n        }\n        return self._qobj_model(**command_dict)", "docstring": "Return converted `PersistentValueInstruction`.\n\nArgs:\nshift(int): Offset time.\ninstruction (PersistentValueInstruction): persistent value instruction.\nReturns:\ndict: Dictionary of required parameters.", "source": "juraj-google-style"}
{"code": "def forward(self, input):\n    output = torch.einsum('eoi,bei->beo', self.weight, input)\n    if self.bias is not None:\n        raise RuntimeError()\n    return output", "docstring": "Args:\ninput (`torch.FloatTensor` of shape `(B, n_models, input_dim)`):\nThe input to the layer.", "source": "github-repos"}
{"code": "def Svn(url, fname, to=None):\n    \n    if to is None:\n        to = str(CFG[\"tmp_dir\"])\n\n    src_dir = local.path(to) / fname\n    if not source_required(src_dir):\n        Copy(src_dir, \".\")\n        return\n\n    from benchbuild.utils.cmd import svn\n    svn(\"co\", url, src_dir)\n    update_hash(src_dir)\n    Copy(src_dir, \".\")", "docstring": "Checkout the SVN repo.\n\nArgs:\nurl (str): The SVN SOURCE repo.\nfname (str): The name of the repo on disk.\nto (str): The name of the TARGET folder on disk.\nDefaults to ``CFG[\"tmpdir\"]``", "source": "juraj-google-style"}
{"code": "async def _get_popular_people_page(self, page=1):\n    return (await self.get_data(self.url_builder('person/popular', url_params=OrderedDict(page=page))))", "docstring": "Get a specific page of popular person data.\n\nArguments:\npage (:py:class:`int`, optional): The page to get.\n\nReturns:\n:py:class:`dict`: The page data.", "source": "codesearchnet"}
{"code": "def getConfig(self, section=None):\n    data = {}\n    if (section is None):\n        for s in self.config.sections():\n            if ('/' in s):\n                (parent, _s) = s.split('/')\n                data[parent][_s] = dict(self.config.items(s))\n            else:\n                data[s] = dict(self.config.items(s))\n    else:\n        data = dict(self.config.items(section))\n    return data", "docstring": "Returns a dictionary which contains the current config. If a section is setted,\nonly will returns the section config\n\nArgs:\nsection (str): (Optional) Section name.\n\nReturns:\ndict: Representation of current config", "source": "codesearchnet"}
{"code": "def __init__(self, bucket, prefix, sagemaker_session):\n        \n\n        \n        root_dir = sagemaker.utils.get_config_value('local.container_root', sagemaker_session.config)\n        if root_dir:\n            root_dir = os.path.abspath(root_dir)\n\n        working_dir = tempfile.mkdtemp(dir=root_dir)\n        \n        \n        \n        if root_dir is None and platform.system() == 'Darwin':\n            working_dir = '/private{}'.format(working_dir)\n\n        sagemaker.utils.download_folder(bucket, prefix, working_dir, sagemaker_session)\n        self.files = LocalFileDataSource(working_dir)", "docstring": "Create an S3DataSource instance\n\nArgs:\nbucket (str): S3 bucket name\nprefix (str): S3 prefix path to the data\nsagemaker_session (:class:`sagemaker.session.Session`): a sagemaker_session with the desired settings\nto talk to S3", "source": "juraj-google-style"}
{"code": "def _unshard_from_sc_to_cpu(stacked_table: tensor.Tensor, from_shard_layouts: Sequence[sparse_core_layout_pb2.SparseCoreTableLayout]) -> Sequence[tensor.Tensor]:\n    logging.vlog(1, 'To unshuffle_from_sc_to_cpu on stacked_table.shape: %s', stacked_table[0].shape)\n    ret_tensors = []\n    for layout in from_shard_layouts:\n        padded_table = tpu_embedding_v3_utils.unshuffle_from_sc_to_cpu(stacked_table[0], num_sparse_cores=layout.num_sparse_cores, offset_in_shard=layout.sparse_core_shard_row_offset, size_in_shard=layout.unsharded_padded_shape[0] \n        orig_table = tpu_embedding_v3_utils.remove_padding_from_sc(padded_table, layout.unsharded_shape)\n        logging.vlog(1, 'orig_tensors.shape[%s]: %s', layout.table_name, orig_table.shape)\n        ret_tensors.append(orig_table)\n    return ret_tensors", "docstring": "Undo the shard the feature tables into SparseCore stacked table.\n\nArgs:\nstacked_table: The value of a SparseCore stacked and sharded table.\nfrom_shard_layouts: The target layouts for the target hardware.\n\nReturns:\nThe unsharded feature tables.", "source": "github-repos"}
{"code": "def get_policies_from_git(self):\n    fldr = mkdtemp()\n    try:\n        url = 'https:\n        policies = {'GLOBAL': {}}\n        if self.dbconfig.get('git_no_ssl_verify', self.ns, False):\n            os.environ['GIT_SSL_NO_VERIFY'] = '1'\n        repo = Repo.clone_from(url, fldr)\n        for obj in repo.head.commit.tree:\n            (name, ext) = os.path.splitext(obj.name)\n            if (ext == '.json'):\n                policies['GLOBAL'][name] = obj.data_stream.read()\n            if ((name == 'roles') and (obj.type == 'tree')):\n                for account in [x for x in obj.trees]:\n                    for role in [x for x in account.trees]:\n                        role_policies = {policy.name.replace('.json', ''): policy.data_stream.read() for policy in role.blobs if policy.name.endswith('.json')}\n                        if (account.name in policies):\n                            if (role.name in policies[account.name]):\n                                policies[account.name][role.name] += role_policies\n                            else:\n                                policies[account.name][role.name] = role_policies\n                        else:\n                            policies[account.name] = {role.name: role_policies}\n        return policies\n    finally:\n        if (os.path.exists(fldr) and os.path.isdir(fldr)):\n            shutil.rmtree(fldr)", "docstring": "Retrieve policies from the Git repo. Returns a dictionary containing all the roles and policies\n\nReturns:\n:obj:`dict` of `str`: `dict`", "source": "codesearchnet"}
{"code": "def compute_digest_response(self, realm, user_name, method, uri, nonce, cnonce, qop, nc, environ):\n\n    def md5h(data):\n        return md5(compat.to_bytes(data)).hexdigest()\n\n    def md5kd(secret, data):\n        return md5h(((secret + ':') + data))\n    A1 = self.domain_controller.digest_auth_user(realm, user_name, environ)\n    if (not A1):\n        return False\n    A2 = ((method + ':') + uri)\n    if qop:\n        res = md5kd(A1, ((((((((nonce + ':') + nc) + ':') + cnonce) + ':') + qop) + ':') + md5h(A2)))\n    else:\n        res = md5kd(A1, ((nonce + ':') + md5h(A2)))\n    return res", "docstring": "Computes digest hash.\n\nCalculation of the A1 (HA1) part is delegated to the dc interface method\n`digest_auth_user()`.\n\nArgs:\nrealm (str):\nuser_name (str):\nmethod (str): WebDAV Request Method\nuri (str):\nnonce (str): server generated nonce value\ncnonce (str): client generated cnonce value\nqop (str): quality of protection\nnc (str) (number), nonce counter incremented by client\nReturns:\nMD5 hash string\nor False if user rejected by domain controller", "source": "codesearchnet"}
{"code": "def _UpdateUserGroups(self, user, groups):\n    \n    groups = ','.join(groups)\n    self.logger.debug('Updating user %s with groups %s.', user, groups)\n    command = self.usermod_cmd.format(user=user, groups=groups)\n    try:\n      subprocess.check_call(command.split(' '))\n    except subprocess.CalledProcessError as e:\n      self.logger.warning('Could not update user %s. %s.', user, str(e))\n      return False\n    else:\n      self.logger.debug('Updated user account %s.', user)\n      return True", "docstring": "Update group membership for a Linux user.\n\nArgs:\nuser: string, the name of the Linux user account.\ngroups: list, the group names to add the user as a member.\n\nReturns:\nbool, True if user update succeeded.", "source": "juraj-google-style"}
{"code": "def listen_error_messages_raylet(worker, task_error_queue, threads_stopped):\n    worker.error_message_pubsub_client = worker.redis_client.pubsub(ignore_subscribe_messages=True)\n    error_pubsub_channel = str(ray.gcs_utils.TablePubsub.ERROR_INFO).encode('ascii')\n    worker.error_message_pubsub_client.subscribe(error_pubsub_channel)\n    try:\n        error_messages = global_state.error_messages(worker.task_driver_id)\n        for error_message in error_messages:\n            logger.error(error_message)\n        while True:\n            if threads_stopped.is_set():\n                return\n            msg = worker.error_message_pubsub_client.get_message()\n            if (msg is None):\n                threads_stopped.wait(timeout=0.01)\n                continue\n            gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(msg['data'], 0)\n            assert (gcs_entry.EntriesLength() == 1)\n            error_data = ray.gcs_utils.ErrorTableData.GetRootAsErrorTableData(gcs_entry.Entries(0), 0)\n            driver_id = error_data.DriverId()\n            if (driver_id not in [worker.task_driver_id.binary(), DriverID.nil().binary()]):\n                continue\n            error_message = ray.utils.decode(error_data.ErrorMessage())\n            if (ray.utils.decode(error_data.Type()) == ray_constants.TASK_PUSH_ERROR):\n                task_error_queue.put((error_message, time.time()))\n            else:\n                logger.error(error_message)\n    finally:\n        worker.error_message_pubsub_client.close()", "docstring": "Listen to error messages in the background on the driver.\n\nThis runs in a separate thread on the driver and pushes (error, time)\ntuples to the output queue.\n\nArgs:\nworker: The worker class that this thread belongs to.\ntask_error_queue (queue.Queue): A queue used to communicate with the\nthread that prints the errors found by this thread.\nthreads_stopped (threading.Event): A threading event used to signal to\nthe thread that it should exit.", "source": "codesearchnet"}
{"code": "def metadata(self, path):\n    raise NotImplementedError", "docstring": "Fetch metadata of a file on the\n:class:`~apache_beam.io.filesystem.FileSystem`.\n\nThis operation returns metadata as stored in the underlying\nFileSystem. It should not need to read file data to obtain this value.\nFor web based file systems, this method should also incur as few as\npossible requests.\n\nArgs:\npath: string path of a file.\n\nReturns:\n:class:`~apache_beam.io.filesystem.FileMetadata`.\n\nRaises:\n``BeamIOError``: if path isn't a file or doesn't exist.", "source": "github-repos"}
{"code": "def newick(self):\n    node_to_str = dict()\n    for node in self.traverse_postorder():\n        if node.is_leaf():\n            if (node.label is None):\n                node_to_str[node] = ''\n            else:\n                node_to_str[node] = str(node.label)\n        else:\n            out = ['(']\n            for c in node.children:\n                out.append(node_to_str[c])\n                if (c.edge_length is not None):\n                    if isinstance(c.edge_length, int):\n                        l_str = str(c.edge_length)\n                    elif (isinstance(c.edge_length, float) and c.edge_length.is_integer()):\n                        l_str = str(int(c.edge_length))\n                    else:\n                        l_str = str(c.edge_length)\n                    out.append((':%s' % l_str))\n                out.append(',')\n                del node_to_str[c]\n            out.pop()\n            out.append(')')\n            if (node.label is not None):\n                out.append(str(node.label))\n            node_to_str[node] = ''.join(out)\n    return node_to_str[self]", "docstring": "Newick string conversion starting at this ``Node`` object\n\nReturns:\n``str``: Newick string conversion starting at this ``Node`` object", "source": "codesearchnet"}
{"code": "def _prefix_from_ip_int(self, ip_int):\n    prefixlen = self._max_prefixlen\n    while prefixlen:\n        if (ip_int & 1):\n            break\n        ip_int >>= 1\n        prefixlen -= 1\n    if (ip_int == ((1 << prefixlen) - 1)):\n        return prefixlen\n    else:\n        raise NetmaskValueError('Bit pattern does not match /1*0*/')", "docstring": "Return prefix length from a bitwise netmask.\n\nArgs:\nip_int: An integer, the netmask in expanded bitwise format.\n\nReturns:\nAn integer, the prefix length.\n\nRaises:\nNetmaskValueError: If the input is not a valid netmask.", "source": "codesearchnet"}
{"code": "def download(url, output_file=None, open_file=True, allow_overwrite=False):\n    filename = url.split('/')[(- 1)]\n    if (output_file is None):\n        cache = os.path.join(get_data_home(), filename)\n    else:\n        cache = output_file\n    if (os.path.exists(cache) and (not allow_overwrite)):\n        logger.info('> {} already exists.'.format(cache))\n        logger.info('> If you have any issue when using this file, ')\n        logger.info('> manually remove the file and try download again.')\n    else:\n        r = request.urlopen(url)\n        try:\n            if six.PY2:\n                content_length = int(r.info().dict['content-length'])\n            elif six.PY3:\n                content_length = int(r.info()['Content-Length'])\n        except:\n            content_length = 0\n        unit = 1000000\n        content = b''\n        with tqdm(total=content_length, desc=filename, unit='B', unit_scale=True, unit_divisor=1024) as t:\n            while True:\n                data = r.read(unit)\n                l = len(data)\n                t.update(l)\n                if (l == 0):\n                    break\n                content += data\n        with open(cache, 'wb') as f:\n            f.write(content)\n    if (not open_file):\n        return\n    return open(cache, 'rb')", "docstring": "Download a file from URL.\n\nArgs:\nurl (str): URL.\noutput_file (str, optional): If given, the downloaded file is written to the given path.\nopen_file (bool): If True, it returns an opened file stream of the downloaded file.\nallow_overwrite (bool): If True, it overwrites an existing file.\n\nReturns:\nReturns file object if open_file is True, otherwise None.", "source": "codesearchnet"}
{"code": "async def inspect(self, task_id: str) -> Mapping[str, Any]:\n        \n\n        response = await self.docker._query_json(\n            \"tasks/{task_id}\".format(task_id=task_id), method=\"GET\"\n        )\n        return response", "docstring": "Return info about a task\n\nArgs:\ntask_id: is ID of the task", "source": "juraj-google-style"}
{"code": "def _dump_data(ground_truth_detections, images_folder_path, output_folder_path):\n    if not os.path.exists(output_folder_path):\n        os.makedirs(output_folder_path)\n    output_images_folder = os.path.join(output_folder_path, 'images')\n    if not os.path.exists(output_images_folder):\n        os.makedirs(output_images_folder)\n    output_proto_file = os.path.join(output_folder_path, 'ground_truth.pb')\n    ground_truth_data = evaluation_stages_pb2.ObjectDetectionGroundTruth()\n    for image_dict in ground_truth_detections.values():\n        detection_result = ground_truth_data.detection_results.add()\n        detection_result.image_id = image_dict['id']\n        detection_result.image_name = image_dict['file_name']\n        for detection_dict in image_dict['detections']:\n            object_instance = detection_result.objects.add()\n            object_instance.bounding_box.normalized_top = detection_dict['bbox'][0]\n            object_instance.bounding_box.normalized_left = detection_dict['bbox'][1]\n            object_instance.bounding_box.normalized_bottom = detection_dict['bbox'][2]\n            object_instance.bounding_box.normalized_right = detection_dict['bbox'][3]\n            object_instance.class_id = detection_dict['category_id']\n        shutil.copy2(os.path.join(images_folder_path, image_dict['file_name']), output_images_folder)\n    with open(output_proto_file, 'wb') as proto_file:\n        proto_file.write(ground_truth_data.SerializeToString())", "docstring": "Dumps images & data from ground-truth objects into output_folder_path.\n\nThe following are created in output_folder_path:\nimages/: sub-folder for allowlisted validation images.\nground_truth.pb: A binary proto file containing all ground-truth\nobject-sets.\n\nArgs:\nground_truth_detections: A dict mapping image id to ground truth data.\nOutput of _get_ground_truth_detections.\nimages_folder_path: Validation images folder\noutput_folder_path: folder to output files to.", "source": "github-repos"}
{"code": "def finalize_options(self):\n    self.cwd = os.path.abspath(os.path.dirname(__file__))\n    self.test_dir = os.path.join(self.cwd, 'tests')", "docstring": "Finalizes the command's options.\n\nArgs:\nself (CoverageCommand): the ``CoverageCommand`` instance\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "class MedianTracker(BaseTracker):\n\n    def __init__(self, quantile_tracker: Optional[QuantileTracker]=None):\n        self._quantile_tracker = quantile_tracker or BufferedSlidingQuantileTracker(DEFAULT_WINDOW_SIZE, 0.5)\n        assert self._quantile_tracker._q == 0.5, 'quantile_tracker must be initialized with q = 0.5'\n\n    def push(self, x):\n        \n        self._quantile_tracker.push(x)\n\n    def get(self):\n        \n        return self._quantile_tracker.get()", "docstring": "Tracks the median of a stream of values using a quantile tracker.\n\nThis wrapper class encapsulates a `QuantileTracker` configured specifically\nfor the 0.5 quantile (median).\n\nArgs:\nquantile_tracker: An optional `QuantileTracker` instance. If not provided,\na `BufferedSlidingQuantileTracker` with a default window size 1000 and\nq=0.5 is created.\n\nRaises:\nAssertionError: If the provided quantile_tracker is not initialized with\nq=0.5.", "source": "github-repos"}
{"code": "def get_indent(code: str) -> str:\n    lines = code.split('\\n')\n    idx = 0\n    while idx < len(lines) and len(lines[idx]) == 0:\n        idx += 1\n    if idx < len(lines):\n        return re.search('^(\\\\s*)\\\\S', lines[idx]).groups()[0]\n    return ''", "docstring": "Find the indent in the first non empty line in a code sample.\n\nArgs:\ncode (`str`): The code to inspect.\n\nReturns:\n`str`: The indent looked at (as string).", "source": "github-repos"}
{"code": "def get_weights(model_hparams, vocab_size, hidden_dim=None):\n    if (hidden_dim is None):\n        hidden_dim = model_hparams.hidden_size\n    num_shards = model_hparams.symbol_modality_num_shards\n    shards = []\n    for i in range(num_shards):\n        shard_size = ((vocab_size \n        var_name = ('weights_%d' % i)\n        shards.append(tf.get_variable(var_name, [shard_size, hidden_dim], initializer=tf.random_normal_initializer(0.0, (hidden_dim ** (- 0.5)))))\n    if (num_shards == 1):\n        ret = shards[0]\n    else:\n        ret = tf.concat(shards, 0)\n    if (not tf.executing_eagerly()):\n        ret = common_layers.convert_gradient_to_tensor(ret)\n    return ret", "docstring": "Create or get concatenated embedding or softmax variable.\n\nArgs:\nmodel_hparams: HParams, model hyperparmeters.\nvocab_size: int, vocabulary size.\nhidden_dim: dim of the variable. Defaults to _model_hparams' hidden_size\n\nReturns:\na list of num_shards Tensors.", "source": "codesearchnet"}
{"code": "def _GetMountpoints(only_physical=True):\n    partitions = psutil.disk_partitions(all=(not only_physical))\n    return set((partition.mountpoint for partition in partitions))", "docstring": "Fetches a list of mountpoints.\n\nArgs:\nonly_physical: Determines whether only mountpoints for physical devices\n(e.g. hard disks) should be listed. If false, mountpoints for things such\nas memory partitions or `/dev/shm` will be returned as well.\n\nReturns:\nA set of mountpoints.", "source": "codesearchnet"}
{"code": "def download_archive(self, id_or_uri, file_path):\n    uri = ((self.URI + '/archive/') + extract_id_from_uri(id_or_uri))\n    return self._client.download(uri, file_path)", "docstring": "Download the details of the Golden Image capture logs, which has been archived based on the specific attribute\nID.\n\nArgs:\nid_or_uri: ID or URI of the Golden Image.\nfile_path (str): File name to save the archive.\n\nReturns:\nbool: Success.", "source": "codesearchnet"}
{"code": "def zopen(filename, *args, **kwargs):\n    \n    if Path is not None and isinstance(filename, Path):\n        filename = str(filename)\n\n    name, ext = os.path.splitext(filename)\n    ext = ext.upper()\n    if ext == \".BZ2\":\n        if PY_VERSION[0] >= 3:\n            return bz2.open(filename, *args, **kwargs)\n        else:\n            args = list(args)\n            if len(args) > 0:\n                args[0] = \"\".join([c for c in args[0] if c != \"t\"])\n            if \"mode\" in kwargs:\n                kwargs[\"mode\"] = \"\".join([c for c in kwargs[\"mode\"]\n                                          if c != \"t\"])\n            return bz2.BZ2File(filename, *args, **kwargs)\n    elif ext in (\".GZ\", \".Z\"):\n        return gzip.open(filename, *args, **kwargs)\n    else:\n        return io.open(filename, *args, **kwargs)", "docstring": "This function wraps around the bz2, gzip and standard python's open\nfunction to deal intelligently with bzipped, gzipped or standard text\nfiles.\n\nArgs:\nfilename (str/Path): filename or pathlib.Path.\n\\*args: Standard args for python open(..). E.g., 'r' for read, 'w' for\nwrite.\n\\*\\*kwargs: Standard kwargs for python open(..).\n\nReturns:\nFile-like object. Supports with context.", "source": "juraj-google-style"}
{"code": "def get_requires(self, build_requires=False, private_build_requires=False):\n    requires = (self.requires or [])\n    if build_requires:\n        requires = (requires + (self.build_requires or []))\n    if private_build_requires:\n        requires = (requires + (self.private_build_requires or []))\n    return requires", "docstring": "Get the requirements of the variant.\n\nArgs:\nbuild_requires (bool): If True, include build requirements.\nprivate_build_requires (bool): If True, include private build\nrequirements.\n\nReturns:\nList of `Requirement` objects.", "source": "codesearchnet"}
{"code": "def AddArguments(cls, argument_group):\n    \n    argument_group.add_argument(\n        '--append', dest='append', action='store_true', default=False,\n        required=cls._DEFAULT_APPEND, help=(\n            'Defines whether the intention is to append to an already '\n            'existing database or overwrite it. Defaults to overwrite.'))\n    argument_group.add_argument(\n        '--evidence', dest='evidence', type=str,\n        default=cls._DEFAULT_EVIDENCE, action='store', required=False,\n        help='Set the evidence field to a specific value, defaults to empty.')\n    argument_group.add_argument(\n        '--fields', dest='fields', type=str, action='store',\n        default=cls._DEFAULT_FIELDS, help=(\n            'Defines which fields should be indexed in the database.'))\n    argument_group.add_argument(\n        '--additional_fields', dest='additional_fields', type=str,\n        action='store', default='', help=(\n            'Defines extra fields to be included in the output, in addition to'\n            ' the default fields, which are {0:s}.'.format(\n                cls._DEFAULT_FIELDS)))", "docstring": "Adds command line arguments the helper supports to an argument group.\n\nThis function takes an argument parser or an argument group object and adds\nto it all the command line arguments this helper supports.\n\nArgs:\nargument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\nargparse group.", "source": "juraj-google-style"}
{"code": "def df_numeric_column(min_value=0, max_value=1, num_rows=100):\n    \n    \n    return pd.Series(np.random.uniform(min_value, max_value, num_rows))", "docstring": "Generate a numeric column with random data\nArgs:\nmin_value (float): Minimum value (default = 0)\nmax_value (float): Maximum value (default = 1)\nnum_rows (int): The number of rows to generate  (default = 100)", "source": "juraj-google-style"}
{"code": "def SetAndLoadTagFile(self, tagging_file_path):\n    tag_file = tagging_file.TaggingFile(tagging_file_path)\n    self._tagging_rules = tag_file.GetEventTaggingRules()", "docstring": "Sets the tag file to be used by the plugin.\n\nArgs:\ntagging_file_path (str): path of the tagging file.", "source": "codesearchnet"}
{"code": "def validate_bagit_file(bagit_path):\n    _assert_zip_file(bagit_path)\n    bagit_zip = zipfile.ZipFile(bagit_path)\n    manifest_info_list = _get_manifest_info_list(bagit_zip)\n    _validate_checksums(bagit_zip, manifest_info_list)\n    return True", "docstring": "Check if a BagIt file is valid.\n\nRaises:\nServiceFailure\nIf the BagIt zip archive file fails any of the following checks:\n\n- Is a valid zip file.\n- The tag and manifest files are correctly formatted.\n- Contains all the files listed in the manifests.\n- The file checksums match the manifests.", "source": "codesearchnet"}
{"code": "def token_validate_with_login(self, **kwargs):\n        \n        path = self._get_path('token_validate_with_login')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Authenticate a user with a TMDb username and password.  The user\nmust have a verified email address and be registered on TMDb.\n\nArgs:\nrequest_token: The token you generated for the user to approve.\nusername: The user's username on TMDb.\npassword: The user's password on TMDb.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def Serialize(self, writer):\n        \n        writer.WriteVarString(self.name)\n        writer.WriteVarString(self.symbol)\n        writer.WriteUInt8(self.decimals)", "docstring": "Serialize this token data to bytes\nArgs:\nwriter (neocore.IO.BinaryWriter): binary writer to write serialization data to", "source": "juraj-google-style"}
{"code": "def slicewise(self, fn, *inputs):\n    \n    if fn == tf.add:\n      assert len(inputs) == 2\n      if isinstance(inputs[0], mtf.LazyAllreduceSum):\n        \n        return inputs[0] + inputs[1]\n    \n    inputs = mtf.convert_args_to_laid_out_tensors(inputs)\n    inputs = [x.tensor_list if isinstance(x, self.LaidOutTensor)\n              else [x] * len(self.devices) for x in inputs]\n    ret = mtf.parallel(self.devices, fn, *inputs)\n    if isinstance(ret[0], tuple):\n      ret = mtf.transpose_list_of_lists(ret)\n      return tuple([self.LaidOutTensor(t) for t in ret])\n    else:\n      return self.LaidOutTensor(ret)", "docstring": "Execute a function in parallel on all slices.\n\nArgs:\nfn: a function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors.\n*inputs: a list of inputs.  Each input is either a LaidOutTensor or\nis convertible to a tf.Tensor.\nReturns:\na LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple.", "source": "juraj-google-style"}
{"code": "def generate_custom(self, cpu, vcpu_num, fill_topology):\n    try:\n        cpu = utils.dict_to_xml({'cpu': cpu})\n    except:\n        raise LagoInitException(\"conversion of 'cpu' to XML failed\")\n    if ((not cpu.xpath('topology')) and fill_topology):\n        cpu.append(self.generate_topology(vcpu_num))\n    return cpu", "docstring": "Generate custom CPU model. This method attempts to convert the dict to\nXML, as defined by ``xmltodict.unparse`` method.\n\nArgs:\ncpu(dict): CPU spec\nvcpu_num(int): number of virtual cpus\nfill_topology(bool): if topology is not defined in ``cpu`` and\n``vcpu`` was not set, will add CPU topology to the generated\nCPU.\n\nReturns:\nlxml.etree.Element: CPU XML node\n\nRaises:\n:exc:`~LagoInitException`: when failed to convert dict to XML", "source": "codesearchnet"}
{"code": "def import_descriptor_loader(definition_name, importer=__import__):\n    if definition_name.startswith('.'):\n        definition_name = definition_name[1:]\n    if (not definition_name.startswith('.')):\n        leaf = definition_name.split('.')[(- 1)]\n        if definition_name:\n            try:\n                module = importer(definition_name, '', '', [leaf])\n            except ImportError:\n                pass\n            else:\n                return describe(module)\n    try:\n        return describe(messages.find_definition(definition_name, importer=__import__))\n    except messages.DefinitionNotFoundError as err:\n        split_name = definition_name.rsplit('.', 1)\n        if (len(split_name) > 1):\n            (parent, child) = split_name\n            try:\n                parent_definition = import_descriptor_loader(parent, importer=importer)\n            except messages.DefinitionNotFoundError:\n                pass\n            else:\n                if isinstance(parent_definition, EnumDescriptor):\n                    search_list = (parent_definition.values or [])\n                elif isinstance(parent_definition, MessageDescriptor):\n                    search_list = (parent_definition.fields or [])\n                else:\n                    search_list = []\n                for definition in search_list:\n                    if (definition.name == child):\n                        return definition\n        raise err", "docstring": "Find objects by importing modules as needed.\n\nA definition loader is a function that resolves a definition name to a\ndescriptor.\n\nThe import finder resolves definitions to their names by importing modules\nwhen necessary.\n\nArgs:\ndefinition_name: Name of definition to find.\nimporter: Import function used for importing new modules.\n\nReturns:\nAppropriate descriptor for any describable type located by name.\n\nRaises:\nDefinitionNotFoundError when a name does not refer to either a definition\nor a module.", "source": "codesearchnet"}
{"code": "def get_model_hash(model):\n    hash_value = 0\n    for subgraph in model.subgraphs:\n        if subgraph.operators is not None:\n            hash_value = update_hash_with_primitive_value(hash_value, len(subgraph.operators))\n            for operator in subgraph.operators:\n                if operator.inputs is not None:\n                    hash_value = update_hash_with_array(hash_value, operator.inputs)\n                if operator.outputs is not None:\n                    hash_value = update_hash_with_array(hash_value, operator.outputs)\n        if subgraph.tensors is not None:\n            hash_value = update_hash_with_primitive_value(hash_value, len(subgraph.tensors))\n            for tensor in subgraph.tensors:\n                if tensor.buffer is not None:\n                    buffer = model.buffers[tensor.buffer]\n                    if buffer.data is not None:\n                        hash_value = update_hash_with_primitive_value(hash_value, len(buffer.data))\n                if tensor.shape is not None:\n                    hash_value = update_hash_with_array(hash_value, tensor.shape)\n        if subgraph.inputs is not None:\n            hash_value = update_hash_with_primitive_value(hash_value, len(subgraph.inputs))\n        if subgraph.outputs is not None:\n            hash_value = update_hash_with_primitive_value(hash_value, len(subgraph.outputs))\n    return hash_value", "docstring": "Calculate a 64-bit integer hash for a TensorFlow Lite model based on its structure.\n\nArgs:\nmodel: A TensorFlow Lite model object.\n\nReturns:\nint: A 64-bit integer hash value representing the model structure.", "source": "github-repos"}
{"code": "def remove_node_by_value(self, value):\n        \n        self.node_list = [node for node in self.node_list\n                          if node.value != value]\n        \n        for node in self.node_list:\n            node.link_list = [link for link in node.link_list if\n                              link.target.value != value]", "docstring": "Delete all nodes in ``self.node_list`` with the value ``value``.\n\nArgs:\nvalue (Any): The value to find and delete owners of.\n\nReturns: None\n\nExample:\n>>> from blur.markov.node import Node\n>>> node_1 = Node('One')\n>>> graph = Graph([node_1])\n>>> graph.remove_node_by_value('One')\n>>> len(graph.node_list)\n0", "source": "juraj-google-style"}
{"code": "def setup(self, target_directory=None):\n    self._target_directory = target_directory\n    if (not target_directory):\n        self._target_directory = tempfile.mkdtemp()\n    elif (not os.path.exists(target_directory)):\n        try:\n            os.makedirs(target_directory)\n        except OSError as exception:\n            message = 'An unknown error occurred: {0!s}'.format(exception)\n            self.state.add_error(message, critical=True)", "docstring": "Sets up the _target_directory attribute.\n\nArgs:\ntarget_directory: Directory in which collected files will be dumped.", "source": "codesearchnet"}
{"code": "def normalize(p):\n    \n    l = math.sqrt(p[0]**2 + p[1]**2)\n    return [0.0, 0.0] if l == 0 else [p[0]/l, p[1]/l]", "docstring": "Normalizes a point/vector\n\nArgs:\np ([float, float]): x and y coordinates\nReturns:\nfloat", "source": "juraj-google-style"}
{"code": "def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=''):\n    if (flag_id or flag_filt):\n        qry_string += ', '\n        param_str += ', '\n    if (not flag_filt):\n        qry_string += filt_st\n    return (qry_string, param_str)", "docstring": "Dynamically add syntaxtical elements to query.\n\nThis functions adds syntactical elements to the query string, and\nreport title, based on the types and number of items added thus far.\n\nArgs:\nflag_filt (bool): at least one filter item specified.\nqry_string (str): portion of the query constructed thus far.\nparam_str (str): the title to display before the list.\nflag_id (bool): optional - instance-id was specified.\nfilt_st (str): optional - syntax to add on end if filter specified.\nReturns:\nqry_string (str): the portion of the query that was passed in with\nthe appropriate syntactical elements added.\nparam_str (str): the title to display before the list.", "source": "codesearchnet"}
{"code": "def _parse_resources(resource_values: dict, resource_name: str) -> dict:\n        \n        \n        resources = {}\n\n        for r_values in resource_values[resource_name]:\n            if 'limits' in r_values:\n                for r_key, r_value in \\\n                        resource_values[resource_name][r_values].items():\n                    if 'cpu' in r_key:\n                        cpu_value = float(r_value) * 10 ** 9\n                        cpu_key = r_key[:3] + '_limit'\n                        resources[cpu_key] = int(cpu_value)\n                    if 'mem' in r_key:\n                        mem_value = re.sub('M', '', r_value)\n                        mem_key = r_key[:3] + '_limit'\n                        resources[mem_key] = int(mem_value) * 1048576\n        resources_spec = docker.types.Resources(**resources)\n\n        return resources_spec", "docstring": "Parse resources key.\n\nArgs:\nresource_values (dict): resource configurations values\nresource_name (string): Resource name\n\nReturns:\ndict, resources specification", "source": "juraj-google-style"}
{"code": "def _add_parameters(self, parameter_map, parameter_list):\n        \n        for parameter in parameter_list:\n            if parameter.get('$ref'):\n                \n                parameter = self.specification['parameters'].get(parameter.get('$ref').split('/')[-1])\n            parameter_map[parameter['name']] = parameter", "docstring": "Populates the given parameter map with the list of parameters provided, resolving any reference objects encountered.\n\nArgs:\nparameter_map: mapping from parameter names to parameter objects\nparameter_list: list of either parameter objects or reference objects", "source": "juraj-google-style"}
{"code": "def parse_arguments(argv):\n    parser = argparse.ArgumentParser(description='benchmark-runinference')\n    parser.add_argument('-m', '--mode', help='Mode to run pipeline in.', choices=['local', 'cloud'], default='local')\n    parser.add_argument('-p', '--project', help='GCP project to run pipeline on.', default=cfg.PROJECT_ID)\n    parser.add_argument('-d', '--device', help='Device to run the dataflow job on', choices=['CPU', 'GPU'], default='CPU')\n    args, _ = parser.parse_known_args(args=argv)\n    return args", "docstring": "Parses the arguments passed to the command line and\nreturns them as an object\nArgs:\nargv: The arguments passed to the command line.\nReturns:\nThe arguments that are being passed in.", "source": "github-repos"}
{"code": "def transfer(self, data):\n        \n        if not isinstance(data, (bytes, bytearray, list)):\n            raise TypeError(\"Invalid data type, should be bytes, bytearray, or list.\")\n\n        \n        try:\n            buf = array.array('B', data)\n        except OverflowError:\n            raise ValueError(\"Invalid data bytes.\")\n\n        buf_addr, buf_len = buf.buffer_info()\n\n        \n        spi_xfer = _CSpiIocTransfer()\n        spi_xfer.tx_buf = buf_addr\n        spi_xfer.rx_buf = buf_addr\n        spi_xfer.len = buf_len\n\n        \n        try:\n            fcntl.ioctl(self._fd, SPI._SPI_IOC_MESSAGE_1, spi_xfer)\n        except OSError as e:\n            raise SPIError(e.errno, \"SPI transfer: \" + e.strerror)\n\n        \n        if isinstance(data, bytes):\n            return bytes(bytearray(buf))\n        elif isinstance(data, bytearray):\n            return bytearray(buf)\n        elif isinstance(data, list):\n            return buf.tolist()", "docstring": "Shift out `data` and return shifted in data.\n\nArgs:\ndata (bytes, bytearray, list): a byte array or list of 8-bit integers to shift out.\n\nReturns:\nbytes, bytearray, list: data shifted in.\n\nRaises:\nSPIError: if an I/O or OS error occurs.\nTypeError: if `data` type is invalid.\nValueError: if data is not valid bytes.", "source": "juraj-google-style"}
{"code": "def remove(self, name):\n        \n        try:\n            del self.data[name]\n        except (ValueError, KeyError):\n            import warnings\n            warnings.warn(\"Unable to find column '%s' in data source\" % name)", "docstring": "Remove a column of data.\n\nArgs:\nname (str) : name of the column to remove\n\nReturns:\nNone\n\n.. note::\nIf the column name does not exist, a warning is issued.", "source": "juraj-google-style"}
{"code": "def save_config(self, out_file_name):\n        \n\n        def get_hidden_parameter(item):\n\n            numer_of_sub_elements = item.childCount()\n\n            if numer_of_sub_elements == 0:\n                dictator = {item.name : item.visible}\n            else:\n                dictator = {item.name:{}}\n                for child_id in range(numer_of_sub_elements):\n                    dictator[item.name].update(get_hidden_parameter(item.child(child_id)))\n            return dictator\n\n        out_file_name = str(out_file_name)\n        if not os.path.exists(os.path.dirname(out_file_name)):\n            os.makedirs(os.path.dirname(out_file_name))\n\n        \n        dictator = {}\n        for index in range(self.tree_scripts.topLevelItemCount()):\n            script_item = self.tree_scripts.topLevelItem(index)\n            dictator.update(get_hidden_parameter(script_item))\n\n        dictator = {\"gui_settings\": self.gui_settings, \"scripts_hidden_parameters\":dictator}\n\n        \n        for index in range(self.tree_scripts.topLevelItemCount()):\n            script_item = self.tree_scripts.topLevelItem(index)\n            self.update_script_from_item(script_item)\n\n        dictator.update({'instruments': {}, 'scripts': {}, 'probes': {}})\n\n        for instrument in self.instruments.values():\n            dictator['instruments'].update(instrument.to_dict())\n        for script in self.scripts.values():\n            dictator['scripts'].update(script.to_dict())\n\n        for instrument, probe_dict in self.probes.items():\n            dictator['probes'].update({instrument: ','.join(list(probe_dict.keys()))})\n\n        with open(out_file_name, 'w') as outfile:\n            tmp = json.dump(dictator, outfile, indent=4)", "docstring": "saves gui configuration to out_file_name\nArgs:\nout_file_name: name of file", "source": "juraj-google-style"}
{"code": "def _create_extra_packages(extra_packages, temp_dir) -> List[beam_runner_api_pb2.ArtifactInformation]:\n    resources: List[beam_runner_api_pb2.ArtifactInformation] = []\n    staging_temp_dir = tempfile.mkdtemp(dir=temp_dir)\n    local_packages: List[str] = []\n    for package in extra_packages:\n        if not (os.path.basename(package).endswith('.tar') or os.path.basename(package).endswith('.tar.gz') or os.path.basename(package).endswith('.whl') or os.path.basename(package).endswith('.zip')):\n            raise RuntimeError('The --extra_package option expects a full path ending with \".tar\", \".tar.gz\", \".whl\" or \".zip\" instead of %s' % package)\n        if os.path.basename(package).endswith('.whl'):\n            _LOGGER.warning('The .whl package \"%s\" provided in --extra_package must be binary-compatible with the worker runtime environment.' % package)\n        if not os.path.isfile(package):\n            if Stager._is_remote_path(package):\n                _LOGGER.info('Downloading extra package: %s locally before staging', package)\n                _, last_component = FileSystems.split(package)\n                local_file_path = FileSystems.join(staging_temp_dir, last_component)\n                Stager._download_file(package, local_file_path)\n            else:\n                raise RuntimeError('The file %s cannot be found. It was specified in the --extra_packages command line option.' % package)\n        else:\n            local_packages.append(package)\n    local_packages.extend([FileSystems.join(staging_temp_dir, f) for f in os.listdir(staging_temp_dir)])\n    for package in local_packages:\n        basename = os.path.basename(package)\n        resources.append(Stager._create_file_stage_to_artifact(package, basename))\n    with open(os.path.join(temp_dir, EXTRA_PACKAGES_FILE), 'wt') as f:\n        for package in local_packages:\n            f.write('%s\\n' % os.path.basename(package))\n    resources.append(Stager._create_file_stage_to_artifact(os.path.join(temp_dir, EXTRA_PACKAGES_FILE), EXTRA_PACKAGES_FILE))\n    return resources", "docstring": "Creates a list of local extra packages.\n\nArgs:\nextra_packages: Ordered list of local paths to extra packages to be\nstaged. Only packages on localfile system and GCS are supported.\ntemp_dir: Temporary folder where the resource building can happen.\nCaller is responsible for cleaning up this folder after this function\nreturns.\n\nReturns:\nA list of ArtifactInformation of local file paths and file names\n(no paths) for the resources staged. All the files are assumed to be\nstaged in staging_location.\n\nRaises:\nRuntimeError: If files specified are not found or do not have expected\nname patterns.", "source": "github-repos"}
{"code": "def convert_data_to_dtype(data, data_type, mot_float_type='float'):\n    \n    scalar_dtype = ctype_to_dtype(data_type, mot_float_type)\n\n    if isinstance(data, numbers.Number):\n        data = scalar_dtype(data)\n\n    if is_vector_ctype(data_type):\n        shape = data.shape\n        dtype = ctype_to_dtype(data_type, mot_float_type)\n        ve = np.zeros(shape[:-1], dtype=dtype)\n\n        if len(shape) == 1:\n            for vector_ind in range(shape[0]):\n                ve[0][vector_ind] = data[vector_ind]\n        elif len(shape) == 2:\n            for i in range(data.shape[0]):\n                for vector_ind in range(data.shape[1]):\n                    ve[i][vector_ind] = data[i, vector_ind]\n        elif len(shape) == 3:\n            for i in range(data.shape[0]):\n                for j in range(data.shape[1]):\n                    for vector_ind in range(data.shape[2]):\n                        ve[i, j][vector_ind] = data[i, j, vector_ind]\n\n        return np.require(ve, requirements=['C', 'A', 'O'])\n    return np.require(data, scalar_dtype, ['C', 'A', 'O'])", "docstring": "Convert the given input data to the correct numpy type.\n\nArgs:\ndata (ndarray): The value to convert to the correct numpy type\ndata_type (str): the data type we need to convert the data to\nmot_float_type (str): the data type of the current ``mot_float_type``\n\nReturns:\nndarray: the input data but then converted to the desired numpy data type", "source": "juraj-google-style"}
{"code": "def touch(path, content='', encoding='utf-8', overwrite=False):\n    path = os.path.abspath(path)\n    if ((not overwrite) and os.path.exists(path)):\n        logger.warning('touch: \"%s\" already exists', path)\n        return False\n    try:\n        logger.info('touch: %s', path)\n        with io.open(path, 'wb') as f:\n            if (not isinstance(content, six.binary_type)):\n                content = content.encode(encoding)\n            f.write(content)\n        return True\n    except Exception as e:\n        logger.error('touch: %s failed. Error: %s', path, e)\n        return False", "docstring": "Create a file at the given path if it does not already exists.\n\nArgs:\npath (str): Path to the file.\ncontent (str): Optional content that will be written in the file.\nencoding (str): Encoding in which to write the content.\nDefault: ``utf-8``\noverwrite (bool): Overwrite the file if exists.\n\nReturns:\nbool: True if the operation is successful, False otherwise.", "source": "codesearchnet"}
{"code": "def decode_proto(proto):\n    return _map_structure(proto, _get_decoders())", "docstring": "Decodes proto representing a nested structure.\n\nArgs:\nproto: Proto to decode.\n\nReturns:\nDecoded structure.\n\nRaises:\nNotEncodableError: For values for which there are no encoders.", "source": "github-repos"}
{"code": "def scan_devices(self, devnames, timeout=DEF_TIMEOUT, interval=DEF_SCAN_INTERVAL, window=DEF_SCAN_WINDOW):\n        \n\n        \n        logger.debug('configuring scan parameters') \n        self.api.ble_cmd_gap_set_scan_parameters(interval, window, 1)\n        self._set_state(self._STATE_CONFIGURE_SCAN)\n        self.api.ble_cmd_gap_discover(1) \n        self._wait_for_state(self._STATE_CONFIGURE_SCAN)\n\n        logger.debug('starting scan for devices {}'.format(devnames))\n        self.scan_targets = devnames\n        self._set_state(self._STATE_SCANNING)\n        self._wait_for_state(self._STATE_SCANNING, timeout)\n\n        self._set_state(self._STATE_GAP_END)\n        self.api.ble_cmd_gap_end_procedure()\n        self._wait_for_state(self._STATE_GAP_END)\n\n        logger.debug('scanning completed')\n\n        return self.scan_responses", "docstring": "Run a BLE scan for a defined interval and return results.\n\nAlternative to :meth:`begin_scan/:meth:`end_scan`.\n\nArgs:\ntimeout (float): time in seconds to run the scanning process for\ninterval (int): BLE scan interval, in units of 625us\nwindow (int): BLE scan window, in units of 625us\n\nReturns:\na :class:`ScanResults` object containing the scan results.", "source": "juraj-google-style"}
{"code": "def _build_file_writer(cls, session: AppSession):\n    args = session.args\n    if args.delete_after:\n        return session.factory.new('FileWriter')\n    elif args.output_document:\n        session.factory.class_map['FileWriter'] = SingleDocumentWriter\n        return session.factory.new('FileWriter', args.output_document, headers_included=args.save_headers)\n    use_dir = ((len(args.urls) != 1) or args.page_requisites or args.recursive)\n    if (args.use_directories == 'force'):\n        use_dir = True\n    elif (args.use_directories == 'no'):\n        use_dir = False\n    os_type = ('windows' if ('windows' in args.restrict_file_names) else 'unix')\n    ascii_only = ('ascii' in args.restrict_file_names)\n    no_control = ('nocontrol' not in args.restrict_file_names)\n    if ('lower' in args.restrict_file_names):\n        case = 'lower'\n    elif ('upper' in args.restrict_file_names):\n        case = 'upper'\n    else:\n        case = None\n    path_namer = session.factory.new('PathNamer', args.directory_prefix, index=args.default_page, use_dir=use_dir, cut=args.cut_dirs, protocol=args.protocol_directories, hostname=args.host_directories, os_type=os_type, ascii_only=ascii_only, no_control=no_control, case=case, max_filename_length=args.max_filename_length)\n    if (args.recursive or args.page_requisites or args.continue_download):\n        if (args.clobber_method == 'disable'):\n            file_class = OverwriteFileWriter\n        else:\n            file_class = IgnoreFileWriter\n    elif args.timestamping:\n        file_class = TimestampingFileWriter\n    else:\n        file_class = AntiClobberFileWriter\n    session.factory.class_map['FileWriter'] = file_class\n    return session.factory.new('FileWriter', path_namer, file_continuing=args.continue_download, headers_included=args.save_headers, local_timestamping=args.use_server_timestamps, adjust_extension=args.adjust_extension, content_disposition=args.content_disposition, trust_server_names=args.trust_server_names)", "docstring": "Create the File Writer.\n\nReturns:\nFileWriter: An instance of :class:`.writer.BaseFileWriter`.", "source": "codesearchnet"}
{"code": "def get_namespaces(start=None, end=None):\n    q = Namespace.query()\n    if (start is not None):\n        q = q.filter((Namespace.key >= Namespace.key_for_namespace(start)))\n    if (end is not None):\n        q = q.filter((Namespace.key < Namespace.key_for_namespace(end)))\n    return [x.namespace_name for x in q]", "docstring": "Return all namespaces in the specified range.\n\nArgs:\nstart: only return namespaces >= start if start is not None.\nend: only return namespaces < end if end is not None.\n\nReturns:\nA list of namespace names between the (optional) start and end values.", "source": "codesearchnet"}
{"code": "def __init__(self, output_names, force_strict=False):\n    \n    self._after_all_pipelines = set()\n    self._output_dict = {\n      'default': Slot(name='default'),\n    }\n\n    self._strict = len(output_names) > 0 or force_strict\n    if self._strict:\n      for name in output_names:\n        if name in self._output_dict:\n          raise UnexpectedPipelineError('Output name reserved: \"%s\"' % name)\n        self._output_dict[name] = Slot(name=name, strict=True)", "docstring": "Initializer.\n\nArgs:\noutput_names: The list of require output names that will be strictly\nenforced by this class.\nforce_strict: If True, force this future to be in strict mode.", "source": "juraj-google-style"}
{"code": "def depth_february_average_ground_temperature(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `depth_february_average_ground_temperature`'.format(value))\n    self._depth_february_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_february_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_february_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def get_num_bytes(self, batch: Sequence[np.ndarray]) -> int:\n    return sum((np_array.itemsize for np_array in batch))", "docstring": "Returns:\nThe number of bytes of data for a batch of Tensors.", "source": "github-repos"}
{"code": "def num_records_produced(self, name=None):\n    if self._reader_ref.dtype == dtypes.resource:\n        return gen_io_ops.reader_num_records_produced_v2(self._reader_ref, name=name)\n    else:\n        return gen_io_ops.reader_num_records_produced(self._reader_ref, name=name)", "docstring": "Returns the number of records this reader has produced.\n\nThis is the same as the number of Read executions that have\nsucceeded.\n\nArgs:\nname: A name for the operation (optional).\n\nReturns:\nAn int64 Tensor.", "source": "github-repos"}
{"code": "def reset_sequence(self, topic):\n        \n\n        if topic in self.queues:\n            self.queues[topic].reset()", "docstring": "Reset the expected sequence number for a topic\n\nIf the topic is unknown, this does nothing.  This behaviour is\nuseful when you have wildcard topics that only create queues\nonce they receive the first message matching the topic.\n\nArgs:\ntopic (string): The topic to reset the packet queue on", "source": "juraj-google-style"}
{"code": "def write_file(self, filename, file_format=\"xyz\"):\n        \n        mol = pb.Molecule(self._obmol)\n        return mol.write(file_format, filename, overwrite=True)", "docstring": "Uses OpenBabel to output all supported formats.\n\nArgs:\nfilename: Filename of file to output\nfile_format: String specifying any OpenBabel supported formats.", "source": "juraj-google-style"}
{"code": "def intersect(self, other):\n        \n        operation = bool.__and__\n        self.cross_product(other, operation)\n        return  self", "docstring": "Constructs an unminimized DFA recognizing\nthe intersection of the languages of two given DFAs.\nArgs:\nother (DFA): The other DFA that will be used\nfor the intersect operation\nReturns:\nReturns:\nDFA: The resulting DFA", "source": "juraj-google-style"}
{"code": "def raster_to_gtiff(tif, geotif, change_nodata=False, change_gdal_type=False):\n        \n        rst_file = RasterUtilClass.read_raster(tif)\n        nodata = rst_file.noDataValue\n        if change_nodata:\n            if not MathClass.floatequal(rst_file.noDataValue, DEFAULT_NODATA):\n                nodata = DEFAULT_NODATA\n                rst_file.data[rst_file.data == rst_file.noDataValue] = DEFAULT_NODATA\n        gdal_type = rst_file.dataType\n        if change_gdal_type:\n            gdal_type = GDT_Float32\n        RasterUtilClass.write_gtiff_file(geotif, rst_file.nRows, rst_file.nCols, rst_file.data,\n                                         rst_file.geotrans, rst_file.srs, nodata,\n                                         gdal_type)", "docstring": "Converting Raster format to GeoTIFF.\n\nArgs:\ntif: source raster file path.\ngeotif: output raster file path.\nchange_nodata: change NoDataValue to -9999 or not.\ngdal_type (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default.\nchange_gdal_type: If True, output the Float32 data type.", "source": "juraj-google-style"}
{"code": "def from_tensor(tensor, validate=True):\n    tensor = tf.convert_to_tensor(tensor, dtype=tf.int32)\n    return from_year_month_day(year=tensor[..., 0], month=tensor[..., 1], day=tensor[..., 2], validate=validate)", "docstring": "Creates DateTensor from a single tensor containing years, months and days.\n\nThis function is complementary to DateTensor.to_tensor: given an int32 Tensor\nof shape (..., 3), creates a DateTensor. The three elements of the last\ndimension are years, months and days, in this order.\n\nArgs:\ntensor: Tensor of type int32 and shape (..., 3).\nvalidate: Whether to validate the dates.\n\nReturns:\nDateTensor object.\n\n#### Example\n\n```python\ntensor = tf.constant([[2015, 4, 15], [2017, 12, 30]], dtype=tf.int32)\ndate_tensor = tff.datetime.dates_from_tensor(tensor)\n```", "source": "github-repos"}
{"code": "def _has_tf_decorator_attr(obj):\n    return hasattr(obj, '_tf_decorator') and isinstance(getattr(obj, '_tf_decorator'), TFDecorator)", "docstring": "Checks if object has _tf_decorator attribute.\n\nThis check would work for mocked object as well since it would\ncheck if returned attribute has the right type.\n\nArgs:\nobj: Python object.", "source": "github-repos"}
{"code": "def get_attribute_label(\n        self, main_type, sub_type, unique_id, attribute_id, label, owner=None, params=None\n    ):\n        \n        return self.attribute_label(\n            main_type, sub_type, unique_id, attribute_id, label, owner=owner, params=params\n        )", "docstring": "Args:\nowner:\nmain_type:\nsub_type:\nunique_id:\nattribute_id:\nlabel:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def _collect_leaf_level_keys(cross):\n    leaf_level_keys = []\n    for k in cross.keys:\n        if isinstance(k, _CrossedColumn):\n            leaf_level_keys.extend(_collect_leaf_level_keys(k))\n        else:\n            leaf_level_keys.append(k)\n    return leaf_level_keys", "docstring": "Collects base keys by expanding all nested crosses.\n\nArgs:\ncross: A `_CrossedColumn`.\n\nReturns:\nA list of strings or `_CategoricalColumn` instances.", "source": "github-repos"}
{"code": "def _wait_time(self, shard_state, secs, now=datetime.datetime.now):\n    \n    assert shard_state.slice_start_time is not None\n    delta = now() - shard_state.slice_start_time\n    duration = datetime.timedelta(seconds=secs)\n    if delta < duration:\n      return util.total_seconds(duration - delta)\n    else:\n      return 0", "docstring": "Time to wait until slice_start_time is secs ago from now.\n\nArgs:\nshard_state: shard state.\nsecs: duration in seconds.\nnow: a func that gets now.\n\nReturns:\n0 if no wait. A positive int in seconds otherwise. Always around up.", "source": "juraj-google-style"}
{"code": "def wavelength_match(a, b):\n        \n        if type(a) == (type(b) or\n                       isinstance(a, numbers.Number) and\n                       isinstance(b, numbers.Number)):\n            return a == b\n        elif a is None or b is None:\n            return False\n        elif isinstance(a, (list, tuple)) and len(a) == 3:\n            return a[0] <= b <= a[2]\n        elif isinstance(b, (list, tuple)) and len(b) == 3:\n            return b[0] <= a <= b[2]\n        else:\n            raise ValueError(\"Can only compare wavelengths of length 1 or 3\")", "docstring": "Return if two wavelengths are equal.\n\nArgs:\na (tuple or scalar): (min wl, nominal wl, max wl) or scalar wl\nb (tuple or scalar): (min wl, nominal wl, max wl) or scalar wl", "source": "juraj-google-style"}
{"code": "async def play(self, author, text_channel, query, index=None, stop_current=False, shuffle=False):\n    if (self.state == 'off'):\n        self.state = 'starting'\n        self.prev_queue = []\n        (await self.set_topic(''))\n        (await self.msetup(text_channel))\n        (await self.enqueue(query, index, stop_current, shuffle))\n        (await self.vsetup(author))\n        self.state = ('ready' if (self.mready and self.vready) else 'off')\n    else:\n        (await self.enqueue(query, index, stop_current, shuffle))\n    if (self.state == 'ready'):\n        if (self.streamer is None):\n            (await self.vplay())", "docstring": "The play command\n\nArgs:\nauthor (discord.Member): The member that called the command\ntext_channel (discord.Channel): The channel where the command was called\nquery (str): The argument that was passed with the command\nindex (str): Whether to play next or at the end of the queue\nstop_current (bool): Whether to stop the currently playing song\nshuffle (bool): Whether to shuffle the queue after starting", "source": "codesearchnet"}
{"code": "def exists(self, path):\n    return self._gcsIO().exists(path)", "docstring": "Check if the provided path exists on the FileSystem.\n\nArgs:\npath: string path that needs to be checked.\n\nReturns: boolean flag indicating if path exists", "source": "github-repos"}
{"code": "def get_label(self, main_type, sub_type, unique_id, label, owner=None, params=None):\n        \n        params = params or {}\n\n        return self.label(\n            main_type, sub_type, unique_id, label, action='GET', owner=owner, params=params\n        )", "docstring": "Args:\nowner:\nmain_type:\nsub_type:\nunique_id:\nlabel:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def routerify(obj):\n    router = Router()\n    for info in get_routing_attributes(obj):\n        router.add_route(*info)\n    obj.__growler_router = router\n    return router", "docstring": "Scan through attributes of object parameter looking for any which\nmatch a route signature.\nA router will be created and added to the object with parameter.\n\nArgs:\nobj (object): The object (with attributes) from which to\nsetup a router\n\nReturns:\nRouter: The router created from attributes in the object.", "source": "codesearchnet"}
{"code": "def insert(self, optional_root_locations_path):\n    encountered_simple_optional = False\n    parent_location = self._root_location\n    for optional_root_location in optional_root_locations_path:\n        if encountered_simple_optional:\n            raise AssertionError(u'Encountered simple optional root location {} in path, butfurther locations are present. This should not happen: {}'.format(optional_root_location, optional_root_locations_path))\n        if (optional_root_location not in self._location_to_children):\n            encountered_simple_optional = True\n        else:\n            self._location_to_children[parent_location].add(optional_root_location)\n            parent_location = optional_root_location", "docstring": "Insert a path of optional Locations into the tree.\n\nEach OptionalTraversalTree object contains child Location objects as keys mapping to\nother OptionalTraversalTree objects.\n\nArgs:\noptional_root_locations_path: list of optional root Locations all except the last\nof which must be present in complex_optional_roots", "source": "codesearchnet"}
{"code": "def git_checkout(branch_name, create=False):\n    \n    \n    log.info(\"Checking out <33>{}\".format(branch_name))\n    shell.run('git checkout {} {}'.format('-b' if create else '', branch_name))", "docstring": "Checkout or create a given branch\n\nArgs:\nbranch_name (str):\nThe name of the branch to checkout or create.\ncreate (bool):\nIf set to **True** it will create the branch instead of checking it\nout.", "source": "juraj-google-style"}
{"code": "def get_submission_filenames(self, tournament=None, round_num=None):\n    query = '\\n          query {\\n            user {\\n              submissions {\\n                filename\\n                selected\\n                round {\\n                   tournament\\n                   number\\n                }\\n              }\\n            }\\n          }\\n        '\n    data = self.raw_query(query, authorization=True)['data']['user']\n    filenames = [{'round_num': item['round']['number'], 'tournament': item['round']['tournament'], 'filename': item['filename']} for item in data['submissions'] if item['selected']]\n    if (round_num is not None):\n        filenames = [f for f in filenames if (f['round_num'] == round_num)]\n    if (tournament is not None):\n        filenames = [f for f in filenames if (f['tournament'] == tournament)]\n    filenames.sort(key=(lambda f: (f['round_num'], f['tournament'])))\n    return filenames", "docstring": "Get filenames of the submission of the user.\n\nArgs:\ntournament (int): optionally filter by ID of the tournament\nround_num (int): optionally filter round number\n\nReturns:\nlist: list of user filenames (`dict`)\n\nEach filenames in the list as the following structure:\n\n* filename (`str`)\n* round_num (`int`)\n* tournament (`int`)\n\nExample:\n>>> NumerAPI().get_submission_filenames(3, 111)\n[{'filename': 'model57-dMpHpYMPIUAF.csv',\n'round_num': 111,\n'tournament': 3}]", "source": "codesearchnet"}
{"code": "def restore(self, state):\n        \n\n        reading = state.get(u'reading')\n        if reading is not None:\n            reading = IOTileReading.FromDict(reading)\n\n        selector = DataStreamSelector.FromString(state.get(u'selector'))\n        if self.selector != selector:\n            raise ArgumentError(\"Attempted to restore a VirtualStreamWalker with a different selector\",\n                                selector=self.selector, serialized_data=state)\n\n        self.reading = reading", "docstring": "Restore the contents of this virtual stream walker.\n\nArgs:\nstate (dict): The previously serialized state.\n\nRaises:\nArgumentError: If the serialized state does not have\na matching selector.", "source": "juraj-google-style"}
{"code": "def ProcessClientResourcesStats(self, client_id, status):\n    if hasattr(status, 'child_session_id'):\n        flow_path = status.child_session_id\n    else:\n        flow_path = ('aff4:/%s/flows/%s' % (status.client_id, status.flow_id))\n    resources = rdf_client_stats.ClientResources()\n    resources.client_id = client_id\n    resources.session_id = flow_path\n    resources.cpu_usage.user_cpu_time = status.cpu_time_used.user_cpu_time\n    resources.cpu_usage.system_cpu_time = status.cpu_time_used.system_cpu_time\n    resources.network_bytes_sent = status.network_bytes_sent\n    self.context.usage_stats.RegisterResources(resources)", "docstring": "Process status message from a client and update the stats.\n\nArgs:\nclient_id: Client id.\nstatus: The status object returned from the client.", "source": "codesearchnet"}
{"code": "def coordinate_filter(self, query, mongo_query):\n        \n        LOG.debug('Adding genomic coordinates to the query')\n        chromosome = query['chrom']\n        mongo_query['chromosome'] = chromosome\n\n        if (query.get('start') and query.get('end')):\n            mongo_query['position'] = {'$lte': int(query['end'])}\n            mongo_query['end'] = {'$gte': int(query['start'])}\n\n        return mongo_query", "docstring": "Adds genomic coordinated-related filters to the query object\n\nArgs:\nquery(dict): a dictionary of query filters specified by the users\nmongo_query(dict): the query that is going to be submitted to the database\n\nReturns:\nmongo_query(dict): returned object contains coordinate filters", "source": "juraj-google-style"}
{"code": "def get_box_files(self, box_key):\n\t\t\n\t\turi = '/'.join([self.api_uri,\n\t\t\t\t\t\tself.boxes_suffix,\n\t\t\t\t\t\tbox_key,\n\t\t\t\t\t\tself.files_suffix\n\t\t\t\t\t\t])\n\n\t\treturn self._req('get', uri)", "docstring": "Gets to file infos in a single box.\nArgs:\nbox_key\t\tkey for the file\nreturn\t\t(status code, list of file info dicts)", "source": "juraj-google-style"}
{"code": "def set_dimension(tensor, axis, value):\n  \n  shape = tensor.shape.as_list()\n  if shape[axis] not in (value, None):\n    message = 'Cannot set dimension {} of tensor {} to {}; is already {}.'\n    raise ValueError(message.format(axis, tensor.name, value, shape[axis]))\n  shape[axis] = value\n  tensor.set_shape(shape)", "docstring": "Set the length of a tensor along the specified dimension.\n\nArgs:\ntensor: Tensor to define shape of.\naxis: Dimension to set the static shape for.\nvalue: Integer holding the length.\n\nRaises:\nValueError: When the tensor already has a different length specified.", "source": "juraj-google-style"}
{"code": "def send_file(self, file_name, remote_destination=None, **kwargs):\n    if (not remote_destination):\n        remote_destination = file_name\n    return SubprocessTask((self._rsync_cmd() + ['-ut', file_name, ('%s:%s' % (self.hostname, remote_destination))]), **kwargs)", "docstring": "Send a file to a remote host with rsync.\n\nArgs:\nfile_name (str): The relative location of the file on the local\nhost.\n\nremote_destination (str): The destination for the file on the remote\nhost. If `None`, will be assumed to be the same as\n**file_name**. Default `None`.\n\n**kwargs: Passed to ``SubprocessTask``'s init method.\n\nReturn:\n``pyrem.task.SubprocessTask``: The resulting task.", "source": "codesearchnet"}
{"code": "def __len__(self):\n    if not context.executing_eagerly():\n        raise TypeError('`tf.data.Dataset` only supports `len` in eager mode. Use `tf.data.Dataset.cardinality()` instead.')\n    length = self.cardinality()\n    if length.numpy() == INFINITE:\n        raise TypeError('The dataset is infinite.')\n    if length.numpy() == UNKNOWN:\n        raise TypeError('The dataset length is unknown.')\n    return length", "docstring": "Returns the length of the dataset if it is known and finite.\n\nThis method requires that you are running in eager mode, and that the\nlength of the dataset is known and non-infinite. When the length may be\nunknown or infinite, or if you are running in graph mode, use\n`tf.data.Dataset.cardinality` instead.\n\nReturns:\nAn integer representing the length of the dataset.\n\nRaises:\nRuntimeError: If the dataset length is unknown or infinite, or if eager\nexecution is not enabled.", "source": "github-repos"}
{"code": "def column(self, index_or_label):\n    if (isinstance(index_or_label, str) and (index_or_label not in self.labels)):\n        raise ValueError('The column \"{}\" is not in the table. The table contains these columns: {}'.format(index_or_label, ', '.join(self.labels)))\n    if (isinstance(index_or_label, int) and (not (0 <= index_or_label < len(self.labels)))):\n        raise ValueError('The index {} is not in the table. Only indices between 0 and {} are valid'.format(index_or_label, (len(self.labels) - 1)))\n    return self._columns[self._as_label(index_or_label)]", "docstring": "Return the values of a column as an array.\n\ntable.column(label) is equivalent to table[label].\n\n>>> tiles = Table().with_columns(\n...     'letter', make_array('c', 'd'),\n...     'count',  make_array(2, 4),\n... )\n\n>>> list(tiles.column('letter'))\n['c', 'd']\n>>> tiles.column(1)\narray([2, 4])\n\nArgs:\nlabel (int or str): The index or label of a column\n\nReturns:\nAn instance of ``numpy.array``.\n\nRaises:\n``ValueError``: When the ``index_or_label`` is not in the table.", "source": "codesearchnet"}
{"code": "def sample(self, num_samples: int):\n    return self._sample(num_samples)", "docstring": "Returns samples from the EBM corresponding to `self.energy`.\n\nArgs:\nnum_samples: Number of samples to draw from the EBM.", "source": "github-repos"}
{"code": "def minhash(self, v):\n        \n        if not isinstance(v, collections.Iterable):\n            raise TypeError(\"Input vector must be an iterable\")\n        if not len(v) == self.dim:\n            raise ValueError(\"Input dimension mismatch, expecting %d\" % self.dim)\n        if not isinstance(v, np.ndarray):\n            v = np.array(v, dtype=np.float32)\n        elif v.dtype != np.float32:\n            v = v.astype(np.float32)\n        hashvalues = np.zeros((self.sample_size, 2), dtype=np.int)\n        vzeros = (v == 0)\n        if vzeros.all():\n            raise ValueError(\"Input is all zeros\")\n        v[vzeros] = np.nan\n        vlog = np.log(v)\n        for i in range(self.sample_size):\n            t = np.floor((vlog / self.rs[i]) + self.betas[i])\n            ln_y = (t - self.betas[i]) * self.rs[i]\n            ln_a = self.ln_cs[i] - ln_y - self.rs[i]\n            k = np.nanargmin(ln_a)\n            hashvalues[i][0], hashvalues[i][1] = k, int(t[k])\n        return WeightedMinHash(self.seed, hashvalues)", "docstring": "Create a new weighted MinHash given a weighted Jaccard vector.\nEach dimension is an integer\nfrequency of the corresponding element in the multi-set represented\nby the vector.\n\nArgs:\nv (numpy.array): The Jaccard vector.", "source": "juraj-google-style"}
{"code": "def load_pickle(file, encoding=None):\n    if encoding:\n        with open(file, 'rb') as f:\n            return pickle.load(f, encoding=encoding)\n    with open(file, 'rb') as f:\n        return pickle.load(f)", "docstring": "Load a pickle file.\n\nArgs:\nfile (str): Path to pickle file\n\nReturns:\nobject: Loaded object from pickle file", "source": "codesearchnet"}
{"code": "def _construct_adb_cmd(self, raw_name, args, shell):\n    args = (args or '')\n    name = raw_name.replace('_', '-')\n    if shell:\n        args = utils.cli_cmd_to_string(args)\n        if self.serial:\n            adb_cmd = ('\"%s\" -s \"%s\" %s %s' % (ADB, self.serial, name, args))\n        else:\n            adb_cmd = ('\"%s\" %s %s' % (ADB, name, args))\n    else:\n        adb_cmd = [ADB]\n        if self.serial:\n            adb_cmd.extend(['-s', self.serial])\n        adb_cmd.append(name)\n        if args:\n            if isinstance(args, basestring):\n                adb_cmd.append(args)\n            else:\n                adb_cmd.extend(args)\n    return adb_cmd", "docstring": "Constructs an adb command with arguments for a subprocess call.\n\nArgs:\nraw_name: string, the raw unsanitized name of the adb command to\nformat.\nargs: string or list of strings, arguments to the adb command.\nSee subprocess.Proc() documentation.\nshell: bool, True to run this command through the system shell,\nFalse to invoke it directly. See subprocess.Proc() docs.\n\nReturns:\nThe adb command in a format appropriate for subprocess. If shell is\nTrue, then this is a string; otherwise, this is a list of\nstrings.", "source": "codesearchnet"}
{"code": "def __init__(self, graduation_year):\n        \n        if graduation_year is None:\n            self._number = 13\n        else:\n            self._number = settings.SENIOR_GRADUATION_YEAR - int(graduation_year) + 12\n\n        if 9 <= self._number <= 12:\n            self._name = Grade.names[self._number - 9]\n        else:\n            self._name = \"graduate\"", "docstring": "Initialize the Grade object.\n\nArgs:\ngraduation_year\nThe numerical graduation year of the user", "source": "juraj-google-style"}
{"code": "def _expand_place_ids(self, terms):\n    place_vids = []\n    first_type = None\n    for result in self.backend.identifier_index.search(terms):\n        if (not first_type):\n            first_type = result.type\n        if (result.type != first_type):\n            continue\n        place_vids.append(result.vid)\n    if place_vids:\n        all_set = set(itertools.chain.from_iterable((iallval(GVid.parse(x)) for x in place_vids)))\n        place_vids += list((str(x) for x in all_set))\n        return place_vids\n    else:\n        return terms", "docstring": "Lookups all of the place identifiers to get gvids\n\nArgs:\nterms (str or unicode): terms to lookup\n\nReturns:\nstr or list: given terms if no identifiers found, otherwise list of identifiers.", "source": "codesearchnet"}
{"code": "def __init__(self, ports_to_serve):\n        \n        self._port_pool = _PortPool()\n        self._total_allocations = 0\n        self._denied_allocations = 0\n        self._client_request_errors = 0\n        for port in ports_to_serve:\n            self._port_pool.add_port_to_free_pool(port)", "docstring": "Initialize a new port server.\n\nArgs:\nports_to_serve: A sequence of unique port numbers to test and offer\nup to clients.", "source": "juraj-google-style"}
{"code": "def all(self, data={}, **kwargs):\n    return super(Settlement, self).all(data, **kwargs)", "docstring": "Fetch all Settlement entities\n\nReturns:\nDictionary of Settlement data", "source": "codesearchnet"}
{"code": "def CaptureFrameLocals(self, frame):\n    variables = {n: self.CaptureNamedVariable(n, v, 1, self.default_capture_limits) for (n, v) in six.viewitems(frame.f_locals)}\n    nargs = frame.f_code.co_argcount\n    if (frame.f_code.co_flags & inspect.CO_VARARGS):\n        nargs += 1\n    if (frame.f_code.co_flags & inspect.CO_VARKEYWORDS):\n        nargs += 1\n    frame_arguments = []\n    for argname in frame.f_code.co_varnames[:nargs]:\n        if (argname in variables):\n            frame_arguments.append(variables.pop(argname))\n    return (frame_arguments, list(six.viewvalues(variables)))", "docstring": "Captures local variables and arguments of the specified frame.\n\nArgs:\nframe: frame to capture locals and arguments.\n\nReturns:\n(arguments, locals) tuple.", "source": "codesearchnet"}
{"code": "def safe_filename(filename, os_type='unix', no_control=True, ascii_only=True, case=None, encoding='utf8', max_length=None):\n    assert isinstance(filename, str), 'Expect str. Got {}.'.format(type(filename))\n    if (filename in ('.', os.curdir)):\n        new_filename = '%2E'\n    elif (filename in ('.', os.pardir)):\n        new_filename = '%2E%2E'\n    else:\n        unix = (os_type == 'unix')\n        windows = (os_type == 'windows')\n        encoder_args = (unix, no_control, windows, ascii_only)\n        if (encoder_args not in _encoder_cache):\n            _encoder_cache[encoder_args] = PercentEncoder(unix=unix, control=no_control, windows=windows, ascii_=ascii_only)\n        encoder = _encoder_cache[encoder_args]\n        encoded_filename = filename.encode(encoding)\n        new_filename = encoder.quote(encoded_filename).decode(encoding)\n    if (os_type == 'windows'):\n        if (new_filename[(- 1)] in ' .'):\n            new_filename = '{0}{1:02X}'.format(new_filename[:(- 1)], new_filename[(- 1)])\n    if (max_length and (len(new_filename) > max_length)):\n        hash_obj = hashlib.sha1(new_filename.encode(encoding))\n        new_length = max(0, (max_length - 8))\n        new_filename = '{0}{1}'.format(new_filename[:new_length], hash_obj.hexdigest()[:8])\n    if (case == 'lower'):\n        new_filename = new_filename.lower()\n    elif (case == 'upper'):\n        new_filename = new_filename.upper()\n    return new_filename", "docstring": "Return a safe filename or path part.\n\nArgs:\nfilename (str): The filename or path component.\nos_type (str): If ``unix``, escape the slash. If ``windows``, escape\nextra Windows characters.\nno_control (bool): If True, escape control characters.\nascii_only (bool): If True, escape non-ASCII characters.\ncase (str): If ``lower``, lowercase the string. If ``upper``, uppercase\nthe string.\nencoding (str): The character encoding.\nmax_length (int): The maximum length of the filename.\n\nThis function assumes that `filename` has not already been percent-encoded.\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def write(self, destination, filename, content):\n        \n        if not os.path.exists(destination):\n            try:\n                os.makedirs(destination)\n            except:  \n                pass\n\n        filepath = \"%s/%s\" % (destination, filename)\n\n        f = open(filepath, \"w+\")\n        f.write(content)\n        f.close()", "docstring": "Write a file at the specific destination with the content.\n\nArgs:\ndestination (string): the destination location\nfilename (string): the filename that will be written\ncontent (string): the content of the filename", "source": "juraj-google-style"}
{"code": "def __diff_internal(self):\n    assert (self.p > 0), 'order of Bspline must be > 0'\n    t = self.knot_vector\n    p = self.p\n    Bi = Bspline(t[:(- 1)], (p - 1))\n    Bip1 = Bspline(t[1:], (p - 1))\n    numer1 = (+ p)\n    numer2 = (- p)\n    denom1 = (t[p:(- 1)] - t[:(- (p + 1))])\n    denom2 = (t[(p + 1):] - t[1:(- p)])\n    with np.errstate(divide='ignore', invalid='ignore'):\n        ci = np.where((denom1 != 0.0), (numer1 / denom1), 0.0)\n        cip1 = np.where((denom2 != 0.0), (numer2 / denom2), 0.0)\n    return ((ci, Bi), (cip1, Bip1))", "docstring": "Differentiate a B-spline once, and return the resulting coefficients and Bspline objects.\n\nThis preserves the Bspline object nature of the data, enabling recursive implementation\nof higher-order differentiation (see `diff`).\n\nThe value of the first derivative of `B` at a point `x` can be obtained as::\n\ndef diff1(B, x):\nterms = B.__diff_internal()\nreturn sum( ci*Bi(x) for ci,Bi in terms )\n\nReturns:\ntuple of tuples, where each item is (coefficient, Bspline object).\n\nSee:\n`diff`: differentiation of any order >= 0", "source": "codesearchnet"}
{"code": "def get_values(self, k, v):\n    \n    metadata = self.metadata\n    values = []\n    if metadata != None:\n      if k in metadata:\n        for metav in metadata[k]:\n          if v in metav:\n            values.append(metav[v])\n    return values", "docstring": "Get a list of values from the key value metadata attribute.\n\nArgs:\nk (str):\nKey in :class:`api.results`.metadata\nv (str):\nValues from each item in the key of :class:`api.results`.metadata\n\nReturns:\nA list containing all the ``v`` values in the ``k`` key for the  :class:`api.results`.metadata attribute.", "source": "juraj-google-style"}
{"code": "def unwrap_arguments(xml_response):\n    xml_response = xml_response.encode('utf-8')\n    try:\n        tree = XML.fromstring(xml_response)\n    except XML.ParseError:\n        filtered = illegal_xml_re.sub('', xml_response.decode('utf-8')).encode('utf-8')\n        tree = XML.fromstring(filtered)\n    action_response = tree.find('{http:\n    return dict(((i.tag, (i.text or '')) for i in action_response))", "docstring": "Extract arguments and their values from a SOAP response.\n\nArgs:\nxml_response (str):  SOAP/xml response text (unicode,\nnot utf-8).\nReturns:\ndict: a dict of ``{argument_name: value}`` items.", "source": "codesearchnet"}
{"code": "def StartService(service_name):\n  \n  try:\n    win32serviceutil.StartService(service_name)\n    logging.info(\"Service '%s' started.\", service_name)\n  except pywintypes.error as e:\n    if getattr(e, \"winerror\", None) == winerror.ERROR_SERVICE_DOES_NOT_EXIST:\n      logging.debug(\"Tried to start '%s', but the service is not installed.\",\n                    service_name)\n    else:\n      logging.exception(\"Encountered error trying to start '%s':\", service_name)", "docstring": "Start a Windows service with the given name.\n\nArgs:\nservice_name: string The name of the service to be started.", "source": "juraj-google-style"}
{"code": "def local_variables(scope=None):\n    return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES, scope)", "docstring": "Returns local variables.\n\nLocal variables - per process variables, usually not saved/restored to\ncheckpoint and used for temporary or intermediate values.\nFor example, they can be used as counters for metrics computation or\nnumber of epochs this machine has read data.\nThe `tf.contrib.framework.local_variable()` function automatically adds the\nnew variable to `GraphKeys.LOCAL_VARIABLES`.\nThis convenience function returns the contents of that collection.\n\nAn alternative to local variables are global variables. See\n`tf.compat.v1.global_variables`\n\nArgs:\nscope: (Optional.) A string. If supplied, the resulting list is filtered to\ninclude only items whose `name` attribute matches `scope` using\n`re.match`. Items without a `name` attribute are never returned if a scope\nis supplied. The choice of `re.match` means that a `scope` without special\ntokens filters by prefix.\n\nReturns:\nA list of local `Variable` objects.", "source": "github-repos"}
{"code": "def get_dialect_name(mixed: Union[(SQLCompiler, Engine, Dialect)]) -> str:\n    dialect = get_dialect(mixed)\n    return dialect.name", "docstring": "Finds the name of the SQLAlchemy dialect in use.\n\nArgs:\nmixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or\n:class:`Dialect` object\n\nReturns: the SQLAlchemy dialect name being used", "source": "codesearchnet"}
{"code": "def are_debian_packages_installed(packages: List[str]) -> Dict[str, bool]:\n    \n    assert len(packages) >= 1\n    require_executable(DPKG_QUERY)\n    args = [\n        DPKG_QUERY,\n        \"-W\",  \n        \n        \"-f=${Package} ${Status}\\n\",  \n    ] + packages\n    completed_process = subprocess.run(args,\n                                       stdout=subprocess.PIPE,\n                                       stderr=subprocess.PIPE,\n                                       check=False)\n    encoding = sys.getdefaultencoding()\n    stdout = completed_process.stdout.decode(encoding)\n    stderr = completed_process.stderr.decode(encoding)\n    present = OrderedDict()\n    for line in stdout.split(\"\\n\"):\n        if line:  \n            words = line.split()\n            assert len(words) >= 2\n            package = words[0]\n            present[package] = \"installed\" in words[1:]\n    for line in stderr.split(\"\\n\"):\n        if line:  \n            words = line.split()\n            assert len(words) >= 2\n            package = words[-1]\n            present[package] = False\n    log.debug(\"Debian package presence: {}\", present)\n    return present", "docstring": "Check which of a list of Debian packages are installed, via ``dpkg-query``.\n\nArgs:\npackages: list of Debian package names\n\nReturns:\ndict: mapping from package name to boolean (\"present?\")", "source": "juraj-google-style"}
{"code": "def load_raw_type(self, typ: type[Any]) -> abstract.BaseValue:\n    if typ is type(None):\n        return self.consts[None]\n    pytd_node = self._pytd_loader.lookup_pytd(typ.__module__, typ.__name__)\n    return self._load_pytd_node(pytd_node)", "docstring": "Converts a raw type to an abstract value.\n\nFor convenience, this method can also be called via ctx.types[typ].\n\nArgs:\ntyp: The type.\n\nReturns:\nThe abstract representation of the type. For example, when passed `int`,\nthis function returns `abstract.SimpleClass(int)`.", "source": "github-repos"}
{"code": "def build_schedule(inputs: Optional[Set[EventSetNode]], outputs: Set[EventSetNode], verbose: int=0) -> Schedule:\n    graph = infer_graph(inputs, outputs)\n    schedule = Schedule(input_nodes=graph.inputs)\n    if verbose >= 2:\n        print('Graph:\\n', graph, file=sys.stderr)\n    ready_ops: List[Operator] = []\n    ready_ops_set: Set[Operator] = set()\n    node_to_op: Dict[EventSetNode, List[Operator]] = defaultdict(lambda: [])\n    op_to_num_pending_inputs: Dict[Operator, int] = defaultdict(lambda: 0)\n    for op in graph.operators:\n        num_pending_inputs = 0\n        for input_node in op.inputs.values():\n            node_to_op[input_node].append(op)\n            if input_node in graph.inputs:\n                continue\n            num_pending_inputs += 1\n        if num_pending_inputs == 0:\n            ready_ops.append(op)\n            ready_ops_set.add(op)\n        else:\n            op_to_num_pending_inputs[op] = num_pending_inputs\n    ready_ops.sort(key=lambda op: op._internal_ordered_id, reverse=True)\n    while ready_ops:\n        op = ready_ops.pop()\n        ready_ops_set.remove(op)\n        released_nodes = []\n        for input in op.inputs.values():\n            if input in outputs:\n                continue\n            if input not in node_to_op:\n                continue\n            input_usage = node_to_op[input]\n            input_usage.remove(op)\n            if not input_usage:\n                released_nodes.append(input)\n                del node_to_op[input]\n        schedule.steps.append(ScheduleStep(op=op, released_nodes=released_nodes))\n        for output in op.outputs.values():\n            if output not in node_to_op:\n                continue\n            for new_op in node_to_op[output]:\n                assert new_op in op_to_num_pending_inputs\n                num_missing_inputs = op_to_num_pending_inputs[new_op] - 1\n                op_to_num_pending_inputs[new_op] = num_missing_inputs\n                assert num_missing_inputs >= 0\n                if num_missing_inputs == 0:\n                    ready_ops.append(new_op)\n                    ready_ops_set.add(new_op)\n                    del op_to_num_pending_inputs[new_op]\n    assert not op_to_num_pending_inputs\n    return schedule", "docstring": "Calculates which operators need to be executed in which order to compute\na set of output EventSetNodes given a set of input EventSetNodes.\n\nThis implementation is based on Kahn's algorithm.\n\nArgs:\ninputs: Input EventSetNodes.\noutputs: Output EventSetNodes.\nverbose: If >0, prints details about the execution on the standard error\noutput. The larger the number, the more information is displayed.\n\nReturns:\nTuple of:\n- Ordered list of operators, such that the first operator should be\ncomputed before the second, second before the third, etc.\n- Mapping of EventSetNode name inputs to EventSetNodes. The keys are the string\nvalues in the `inputs` argument, and the values are the EventSetNodes\ncorresponding to each one. If a value was already an EventSetNode, it won't\nbe present in the returned dictionary.", "source": "github-repos"}
{"code": "def ParseGenericRow(\n      self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = WindowsTimelineGenericEventData()\n\n    \n    \n    payload_json_bytes = bytes(self._GetRowValue(query_hash, row, 'Payload'))\n    payload_json_string = payload_json_bytes.decode('utf-8')\n    \n    appid_entries_string = self._GetRowValue(query_hash, row, 'AppId')\n\n    payload = json.loads(payload_json_string)\n    appid_entries = json.loads(appid_entries_string)\n\n    \n    \n    package_id_locations = [\n        'packageId', 'x_exe_path', 'windows_win32', 'windows_universal',\n        'alternateId']\n    for location in package_id_locations:\n      for entry in appid_entries:\n        if entry['platform'] == location and entry['application'] != '':\n          event_data.package_identifier = entry['application']\n          break\n      if event_data.package_identifier is None:\n        \n        break\n\n    if 'description' in payload:\n      event_data.description = payload['description']\n    else:\n      event_data.description = ''\n\n    if 'appDisplayName' in payload and payload['appDisplayName'] != '':\n      event_data.application_display_name = payload['appDisplayName']\n    elif 'displayText' in payload and payload['displayText'] != '':\n      \n      event_data.application_display_name = payload['displayText']\n\n    timestamp = self._GetRowValue(query_hash, row, 'StartTime')\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_START)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a generic windows timeline row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def description(self, description):\n        \n        self._data['description'] = description\n        request = self._base_request\n        request['description'] = description\n        return self._tc_requests.update(request, owner=self.owner)", "docstring": "Updates the security labels description.\n\nArgs:\ndescription:", "source": "juraj-google-style"}
{"code": "def create_single_token(self, *, payer_id, name, identification_number, payment_method, number, expiration_date):\n        \n        payload = {\n            \"language\": self.client.language.value,\n            \"command\": PaymentCommand.CREATE_TOKEN.value,\n            \"merchant\": {\n                \"apiLogin\": self.client.api_login,\n                \"apiKey\": self.client.api_key\n            },\n            \"creditCardToken\": {\n                \"payerId\": payer_id,\n                \"name\": name,\n                \"identificationNumber\": identification_number,\n                \"paymentMethod\": payment_method,\n                \"number\": number,\n                \"expirationDate\": expiration_date\n            },\n            \"test\": self.client.is_test\n        }\n        return self.client._post(self.url, json=payload)", "docstring": "Using this feature you can register a customer’s credit card data and get a token sequential number.\n\nArgs:\npayer_id:\nname:\nidentification_number:\npayment_method:\nnumber:\nexpiration_date:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def avg(vals, count=None):\n    sum = 0\n    for v in vals:\n        sum += v\n    if (count is None):\n        count = len(vals)\n    return (float(sum) / count)", "docstring": "Returns the average value\n\nArgs:\nvals: List of numbers to calculate average from.\ncount: Int of total count that vals was part of.\n\nReturns:\nFloat average value throughout a count.", "source": "codesearchnet"}
{"code": "def CopyToProto(self, proto):\n    \n    if (self.file is not None and\n        self._serialized_start is not None and\n        self._serialized_end is not None):\n      proto.ParseFromString(self.file.serialized_pb[\n          self._serialized_start:self._serialized_end])\n    else:\n      raise Error('Descriptor does not contain serialization.')", "docstring": "Copies this to the matching proto in descriptor_pb2.\n\nArgs:\nproto: An empty proto instance from descriptor_pb2.\n\nRaises:\nError: If self couldnt be serialized, due to to few constructor arguments.", "source": "juraj-google-style"}
{"code": "def DeserializeForImport(self, reader):\n        \n        super(Block, self).Deserialize(reader)\n\n        self.Transactions = []\n        transaction_length = reader.ReadVarInt()\n\n        for i in range(0, transaction_length):\n            tx = Transaction.DeserializeFrom(reader)\n            self.Transactions.append(tx)\n\n        if len(self.Transactions) < 1:\n            raise Exception('Invalid format %s ' % self.Index)", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def _handle_problem_status(self, message, future):\n        \n        try:\n            _LOGGER.trace(\"Handling response: %r\", message)\n            _LOGGER.debug(\"Handling response for %s with status %s\", message.get('id'), message.get('status'))\n\n            \n            if 'error_code' in message and 'error_msg' in message:\n                raise SolverFailureError(message['error_msg'])\n\n            if 'status' not in message:\n                raise InvalidAPIResponseError(\"'status' missing in problem description response\")\n            if 'id' not in message:\n                raise InvalidAPIResponseError(\"'id' missing in problem description response\")\n\n            future.id = message['id']\n            future.remote_status = status = message['status']\n\n            \n            with future._single_cancel_lock:\n                \n                \n                if future._cancel_requested:\n                    if not future._cancel_sent and status == self.STATUS_PENDING:\n                        \n                        \n                        self._cancel(message['id'], future)\n                    \n                    future._cancel_sent = True\n\n            if not future.time_received and message.get('submitted_on'):\n                future.time_received = parse_datetime(message['submitted_on'])\n\n            if not future.time_solved and message.get('solved_on'):\n                future.time_solved = parse_datetime(message['solved_on'])\n\n            if not future.eta_min and message.get('earliest_estimated_completion'):\n                future.eta_min = parse_datetime(message['earliest_estimated_completion'])\n\n            if not future.eta_max and message.get('latest_estimated_completion'):\n                future.eta_max = parse_datetime(message['latest_estimated_completion'])\n\n            if status == self.STATUS_COMPLETE:\n                \n                \n                \n                \n\n                \n                if 'answer' in message:\n                    future._set_message(message)\n                \n                \n                else:\n                    self._load(future)\n            elif status in self.ANY_STATUS_ONGOING:\n                \n                self._poll(future)\n            elif status == self.STATUS_CANCELLED:\n                \n                raise CanceledFutureError()\n            else:\n                \n                errmsg = message.get('error_message', 'An unknown error has occurred.')\n                if 'solver is offline' in errmsg.lower():\n                    raise SolverOfflineError(errmsg)\n                else:\n                    raise SolverFailureError(errmsg)\n\n        except Exception as error:\n            \n            \n            future._set_error(error, sys.exc_info())", "docstring": "Handle the results of a problem submission or results request.\n\nThis method checks the status of the problem and puts it in the correct queue.\n\nArgs:\nmessage (dict): Update message from the SAPI server wrt. this problem.\nfuture `Future`: future corresponding to the problem\n\nNote:\nThis method is always run inside of a daemon thread.", "source": "juraj-google-style"}
{"code": "def _read_range(self, start, end=0):\n        \n        \n        try:\n            with _handle_client_error():\n                response = self._client.get_object(\n                    Range=self._http_range(start, end), **self._client_kwargs)\n\n        \n        except _ClientError as exception:\n            if exception.response['Error']['Code'] == 'InvalidRange':\n                \n                return bytes()\n            raise\n\n        \n        return response['Body'].read()", "docstring": "Read a range of bytes in stream.\n\nArgs:\nstart (int): Start stream position.\nend (int): End stream position.\n0 To not specify end.\n\nReturns:\nbytes: number of bytes read", "source": "juraj-google-style"}
{"code": "def merge(self, other):\n    if (not isinstance(other, Ontology)):\n        raise TypeError(\"'merge' requires an Ontology as argument, not {}\".format(type(other)))\n    self.terms.update(other.terms)\n    self._empty_cache()\n    self.adopt()\n    self.reference()", "docstring": "Merge another ontology into the current one.\n\nRaises:\nTypeError: When argument is not an Ontology object.\n\nExample:\n>>> from pronto import Ontology\n>>> nmr = Ontology('tests/resources/nmrCV.owl', False)\n>>> po = Ontology('tests/resources/po.obo.gz', False)\n>>> 'NMR:1000271' in nmr\nTrue\n>>> 'NMR:1000271' in po\nFalse\n>>> po.merge(nmr)\n>>> 'NMR:1000271' in po\nTrue", "source": "codesearchnet"}
{"code": "def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    encoder_hidden_states = encoder_outputs[0]\n    if encoder_attention_mask is None:\n        batch_size, sequence_length = encoder_hidden_states.shape[:2]\n        encoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    batch_size, sequence_length = decoder_input_ids.shape\n    if decoder_attention_mask is None:\n        decoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    if decoder_position_ids is None:\n        if past_key_values is not None:\n            raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.')\n        decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n    inputs = {'params': params or self.params}\n    if past_key_values:\n        inputs['cache'] = past_key_values\n        mutable = ['cache']\n    else:\n        mutable = False\n\n    def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):\n        decoder_module = module._get_decoder_module()\n        return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)\n    outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)\n    if past_key_values is not None and return_dict:\n        outputs, past = outputs\n        outputs['past_key_values'] = unfreeze(past['cache'])\n        return outputs\n    elif past_key_values is not None and (not return_dict):\n        outputs, past = outputs\n        outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> import jax.numpy as jnp\n>>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration\n\n>>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained(\"facebook/blenderbot_small-90M\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/blenderbot_small-90M\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, max_length=1024, return_tensors=\"np\")\n>>> encoder_outputs = model.encode(**inputs)\n\n>>> decoder_start_token_id = model.config.decoder_start_token_id\n>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype=\"i4\") * decoder_start_token_id\n\n>>> outputs = model.decode(decoder_input_ids, encoder_outputs)\n>>> last_decoder_hidden_states = outputs.last_hidden_state\n```", "source": "github-repos"}
{"code": "def _QueryHashes(self, digests):\n    \n    url_parameters = {'apikey': self._api_key, 'resource': ', '.join(digests)}\n\n    try:\n      json_response = self.MakeRequestAndDecodeJSON(\n          self._VIRUSTOTAL_API_REPORT_URL, 'GET', params=url_parameters)\n    except errors.ConnectionError as exception:\n      json_response = None\n      logger.error('Unable to query VirusTotal with error: {0!s}.'.format(\n          exception))\n\n    return json_response", "docstring": "Queries VirusTotal for a specfic hashes.\n\nArgs:\ndigests (list[str]): hashes to look up.\n\nReturns:\ndict[str, object]: JSON response or None on error.", "source": "juraj-google-style"}
{"code": "def run(argv=None):\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--input', dest='input', default='gs:\n    parser.add_argument('--output', dest='output', help='Output file to write results to.', required=True)\n    known_args, pipeline_args = parser.parse_known_args(argv)\n    pipeline_options = PipelineOptions(pipeline_args)\n    pipeline_options.view_as(SetupOptions).save_main_session = True\n    with beam.Pipeline(options=pipeline_options) as p:\n        p | ReadFromAvro(known_args.input) | beam.Filter(lambda record: all((record[k] is not None for k in ('hvfhs_license_num', 'trip_miles', 'trip_time', 'base_passenger_fare', 'tips', 'driver_pay'))) and any((record[k] is not None for k in ('request_datetime', 'on_scene_datetime', 'pickup_datetime', 'dropoff_datetime')))) | beam.ParDo(CreateKeyWithServiceAndDay()) | beam.CombinePerKey(CalculatePricePerAttribute()) | beam.Map(flatten_group) | WriteToAvro(known_args.output, SCHEMA, file_name_suffix='.avro')", "docstring": "Runs the New York City trips pipeline.\n\nArgs:\nargv: Pipeline options as a list of arguments.", "source": "github-repos"}
{"code": "def as_dataframe(self, pattern='*', max_rows=None):\n    \n    data = []\n    for i, resource in enumerate(self.list(pattern)):\n      if max_rows is not None and i >= max_rows:\n        break\n      labels = ', '. join([l.key for l in resource.labels])\n      data.append([resource.type, resource.display_name, labels])\n\n    return pandas.DataFrame(data, columns=self._DISPLAY_HEADERS)", "docstring": "Creates a pandas dataframe from the descriptors that match the filters.\n\nArgs:\npattern: An optional pattern to further filter the descriptors. This can\ninclude Unix shell-style wildcards. E.g. ``\"aws*\"``, ``\"*cluster*\"``.\nmax_rows: The maximum number of descriptors to return. If None, return\nall.\n\nReturns:\nA pandas dataframe containing matching resource descriptors.", "source": "juraj-google-style"}
{"code": "def setup(self, disk_name, project, turbinia_zone):\n    \n    \n    \n\n    if project is None or turbinia_zone is None:\n      self.state.add_error(\n          'project or turbinia_zone are not all specified, bailing out',\n          critical=True)\n      return\n\n    self.disk_name = disk_name\n    self.project = project\n    self.turbinia_zone = turbinia_zone\n\n    try:\n      turbinia_config.LoadConfig()\n      self.turbinia_region = turbinia_config.TURBINIA_REGION\n      self.instance = turbinia_config.PUBSUB_TOPIC\n      if turbinia_config.PROJECT != self.project:\n        self.state.add_error(\n            'Specified project {0:s} does not match Turbinia configured '\n            'project {1:s}. Use gcp_turbinia_import recipe to copy the disk '\n            'into the same project.'.format(\n                self.project, turbinia_config.PROJECT), critical=True)\n        return\n      self._output_path = tempfile.mkdtemp()\n      self.client = turbinia_client.TurbiniaClient()\n    except TurbiniaException as e:\n      self.state.add_error(e, critical=True)\n      return", "docstring": "Sets up the object attributes.\n\nArgs:\ndisk_name (string): Name of the disk to process\nproject (string): The project containing the disk to process\nturbinia_zone (string): The zone containing the disk to process", "source": "juraj-google-style"}
{"code": "def persist_project(project):\n    from benchbuild.utils.schema import Project, Session\n    session = Session()\n    projects = session.query(Project).filter((Project.name == project.name)).filter((Project.group_name == project.group))\n    name = project.name\n    desc = project.__doc__\n    domain = project.domain\n    group_name = project.group\n    version = (project.version() if callable(project.version) else project.version)\n    try:\n        src_url = project.src_uri\n    except AttributeError:\n        src_url = 'unknown'\n    if (projects.count() == 0):\n        newp = Project()\n        newp.name = name\n        newp.description = desc\n        newp.src_url = src_url\n        newp.domain = domain\n        newp.group_name = group_name\n        newp.version = version\n        session.add(newp)\n    else:\n        newp_value = {'name': name, 'description': desc, 'src_url': src_url, 'domain': domain, 'group_name': group_name, 'version': version}\n        projects.update(newp_value)\n    session.commit()\n    return (projects, session)", "docstring": "Persist this project in the benchbuild database.\n\nArgs:\nproject: The project we want to persist.", "source": "codesearchnet"}
{"code": "def drawdown_details(drawdown, index_type=pd.DatetimeIndex):\n    \n\n    is_zero = drawdown == 0\n    \n    start = ~is_zero & is_zero.shift(1)\n    start = list(start[start == True].index)  \n\n    \n    end = is_zero & (~is_zero).shift(1)\n    end = list(end[end == True].index)  \n\n    if len(start) is 0:\n        return None\n\n    \n    if len(end) is 0:\n        end.append(drawdown.index[-1])\n\n    \n    \n    \n    if start[0] > end[0]:\n        start.insert(0, drawdown.index[0])\n\n    \n    \n    if start[-1] > end[-1]:\n        end.append(drawdown.index[-1])\n\n    result = pd.DataFrame(\n        columns=('Start', 'End', 'Length', 'drawdown'),\n        index=range(0, len(start))\n    )\n\n    for i in range(0, len(start)):\n        dd = drawdown[start[i]:end[i]].min()\n\n        if index_type is pd.DatetimeIndex:\n            result.iloc[i] = (start[i], end[i], (end[i] - start[i]).days, dd)\n        else:\n            result.iloc[i] = (start[i], end[i], (end[i] - start[i]), dd)\n\n    return result", "docstring": "Returns a data frame with start, end, days (duration) and\ndrawdown for each drawdown in a drawdown series.\n\n.. note::\n\ndays are actual calendar days, not trading days\n\nArgs:\n* drawdown (pandas.Series): A drawdown Series\n(can be obtained w/ drawdown(prices).\nReturns:\n* pandas.DataFrame -- A data frame with the following\ncolumns: start, end, days, drawdown.", "source": "juraj-google-style"}
{"code": "def modify_lattice(self, new_lattice):\n        \n        self._lattice = new_lattice\n        for site in self._sites:\n            site.lattice = new_lattice", "docstring": "Modify the lattice of the structure.  Mainly used for changing the\nbasis.\n\nArgs:\nnew_lattice (Lattice): New lattice", "source": "juraj-google-style"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, return_attentions: Optional[bool]=False, interpolate_pos_encoding: Optional[bool]=False):\n    vision_model_output = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)\n    image_embeds = self.vision_model.model.post_layernorm(vision_model_output[0])\n    image_embeds = nn.functional.normalize(image_embeds, dim=-1)\n    image_embeds, projection_attentions = self.image_to_text_projection(image_embeds)\n    if return_attentions:\n        return (image_embeds, projection_attentions)\n    return image_embeds", "docstring": "Encodes images into continuous embeddings that can be forwarded to the language model.\n\nArgs:\npixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\nThe tensors corresponding to the input images.\nreturn_attentions (`bool`, *optional*, defaults to `False`):\nWhether to return `projection_attentions` or not.\ninterpolate_pos_encoding (`bool`, *optional*, defaults to `False`):\nWhether to interpolate positional embeddings or not.", "source": "github-repos"}
{"code": "def compile_initial_state(self, batch_size: Optional[int] = None) -> Sequence[tf.Tensor]:\n        \n        with self.graph.as_default():\n            with tf.name_scope('initial_state'):\n                self._initialize_initial_state_fluents()\n                if batch_size is None:\n                    return self.initial_state_fluents\n                return self._compile_batch_fluents(self.initial_state_fluents, batch_size)", "docstring": "Returns a tuple of tensors representing the initial state fluents.\n\nArgs:\nbatch_size (Optional[int]): The batch size.\n\nReturns:\nSequence[tf.Tensor]: A tuple of tensors.", "source": "juraj-google-style"}
{"code": "def extractUnits(self, inp):\n    inp = self._preprocess(inp)\n    units = []\n    description = ''\n    for w in inp.split(' '):\n        if (self.isValidUnit(w) or (w == '/')):\n            if description:\n                description += ' '\n            description += w\n        else:\n            if description:\n                units.append(description)\n            description = ''\n    if description:\n        units.append(description)\n    return units", "docstring": "Collects all the valid units from an inp string. Works by\nappending consecutive words from the string and cross-referncing\nthem with a set of valid units.\n\nArgs:\ninp (str): Some text which hopefully contains descriptions\nof different units.\n\nReturns:\nA list of strings, each entry in which is a valid quantities\nunit.", "source": "codesearchnet"}
{"code": "def _ApproxTopKGradient(op: ops.Operation, grad, _):\n    idx_shape = op.outputs[1].shape\n    lifted_idx_shape = idx_shape + [1]\n    flat_shape_len = functools.reduce(operator.mul, idx_shape)\n    rank = idx_shape.rank\n    reduction_dim = op.get_attr('reduction_dimension')\n    if reduction_dim < 0:\n        reduction_dim = rank + reduction_dim\n\n    def GetLiftedIdx(d):\n        if d == reduction_dim:\n            return array_ops.reshape(op.outputs[1], lifted_idx_shape)\n        iota_len = idx_shape[d]\n        iota_shape = list(itertools.repeat(1, rank + 1))\n        iota_shape[d] = iota_len\n        iota = array_ops.reshape(math_ops.range(iota_len), iota_shape)\n        return array_ops.broadcast_to(iota, lifted_idx_shape)\n    lifted_idx = array_ops.concat(list((GetLiftedIdx(d) for d in range(rank))), axis=rank)\n    flat_idx = array_ops.reshape(lifted_idx, [flat_shape_len, rank])\n    flat_grad = array_ops.reshape(grad, [flat_shape_len])\n    return array_ops.scatter_nd(flat_idx, flat_grad, op.inputs[0].shape)", "docstring": "Return the gradients for ApproxTopK.\n\nArgs:\nop: The ApproxTopK for which we need to generate gradients.\ngrad: The gradients for backprop.\n\nReturns:\nScattered gradient based on the top-k indices.", "source": "github-repos"}
{"code": "def CreateSignatureScanner(cls, specification_store):\n    scanner_object = pysigscan.scanner()\n    for format_specification in specification_store.specifications:\n        for signature in format_specification.signatures:\n            pattern_offset = signature.offset\n            if (pattern_offset is None):\n                signature_flags = pysigscan.signature_flags.NO_OFFSET\n            elif (pattern_offset < 0):\n                pattern_offset *= (- 1)\n                signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END\n            else:\n                signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START\n            scanner_object.add_signature(signature.identifier, pattern_offset, signature.pattern, signature_flags)\n    return scanner_object", "docstring": "Creates a signature scanner for format specifications with signatures.\n\nArgs:\nspecification_store (FormatSpecificationStore): format specifications\nwith signatures.\n\nReturns:\npysigscan.scanner: signature scanner.", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n    \n    self.Alarm = channel.unary_unary(\n        '/etcdserverpb.Maintenance/Alarm',\n        request_serializer=rpc__pb2.AlarmRequest.SerializeToString,\n        response_deserializer=rpc__pb2.AlarmResponse.FromString,\n        )\n    self.Status = channel.unary_unary(\n        '/etcdserverpb.Maintenance/Status',\n        request_serializer=rpc__pb2.StatusRequest.SerializeToString,\n        response_deserializer=rpc__pb2.StatusResponse.FromString,\n        )\n    self.Defragment = channel.unary_unary(\n        '/etcdserverpb.Maintenance/Defragment',\n        request_serializer=rpc__pb2.DefragmentRequest.SerializeToString,\n        response_deserializer=rpc__pb2.DefragmentResponse.FromString,\n        )\n    self.Hash = channel.unary_unary(\n        '/etcdserverpb.Maintenance/Hash',\n        request_serializer=rpc__pb2.HashRequest.SerializeToString,\n        response_deserializer=rpc__pb2.HashResponse.FromString,\n        )\n    self.HashKV = channel.unary_unary(\n        '/etcdserverpb.Maintenance/HashKV',\n        request_serializer=rpc__pb2.HashKVRequest.SerializeToString,\n        response_deserializer=rpc__pb2.HashKVResponse.FromString,\n        )\n    self.Snapshot = channel.unary_stream(\n        '/etcdserverpb.Maintenance/Snapshot',\n        request_serializer=rpc__pb2.SnapshotRequest.SerializeToString,\n        response_deserializer=rpc__pb2.SnapshotResponse.FromString,\n        )\n    self.MoveLeader = channel.unary_unary(\n        '/etcdserverpb.Maintenance/MoveLeader',\n        request_serializer=rpc__pb2.MoveLeaderRequest.SerializeToString,\n        response_deserializer=rpc__pb2.MoveLeaderResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def __init__(self, fname):\n        \n        self.file = fname\n        self.folder = os.path.dirname(fname)\n        self.tree = ET.parse(fname)\n        self.root = self.tree.getroot()\n        self.name = self.root.get(\"model\")\n        self.worldbody = self.create_default_element(\"worldbody\")\n        self.actuator = self.create_default_element(\"actuator\")\n        self.asset = self.create_default_element(\"asset\")\n        self.equality = self.create_default_element(\"equality\")\n        self.contact = self.create_default_element(\"contact\")\n        self.default = self.create_default_element(\"default\")\n        self.resolve_asset_dependency()", "docstring": "Loads a mujoco xml from file.\n\nArgs:\nfname (str): path to the MJCF xml file.", "source": "juraj-google-style"}
{"code": "def dump(self, file, payload):\n    json.dump(payload, file, indent=2, ensure_ascii=False)", "docstring": "Dump json oject to open file output.\n\nWrites json with 2 spaces indentation.\n\nArgs:\nfile: Open file-like object. Must be open for writing.\npayload: The Json object to write to file.\n\nReturns:\nNone.", "source": "codesearchnet"}
{"code": "def create_folder(self, name, parent_folder_id=0):\n    return self.__request('POST', 'folders', data={'name': name, 'parent': {'id': unicode(parent_folder_id)}})", "docstring": "Create a folder\n\nIf the folder exists, a BoxError will be raised.\n\nArgs:\nfolder_id (int): Name of the folder.\n\nparent_folder_id (int): ID of the folder where to create the new one.\n\nReturns:\ndict. Response from Box.\n\nRaises:\nBoxError: An error response is returned from Box (status_code >= 400).\n\nBoxHttpResponseError: Response from Box is malformed.\n\nrequests.exceptions.*: Any connection related problem.", "source": "codesearchnet"}
{"code": "def draw_line(self, x1, y1, x2, y2):\n    check_int_err(lib.SDL_RenderDrawLine(self._ptr, x1, y1, x2, y2))", "docstring": "Draw a line on the current rendering target.\n\nArgs:\nx1 (int): The x coordinate of the start point.\ny1 (int): The y coordinate of the start point.\nx2 (int): The x coordinate of the end point.\ny2 (int): The y coordinate of the end point.\n\nRaises:\nSDLError: If an error is encountered.", "source": "codesearchnet"}
{"code": "def from_rtm(cls, raw_event: MutableMapping) -> 'Event':\n    if raw_event['type'].startswith('message'):\n        return Message(raw_event)\n    else:\n        return Event(raw_event)", "docstring": "Create an event with data coming from the RTM API.\n\nIf the event type is a message a :class:`slack.events.Message` is returned.\n\nArgs:\nraw_event: JSON decoded data from the RTM API\n\nReturns:\n:class:`slack.events.Event` or :class:`slack.events.Message`", "source": "codesearchnet"}
{"code": "def describe_images(self, idaho_image_results):\n        \n\n        results = idaho_image_results['results']\n\n        \n        results = [r for r in results if 'IDAHOImage' in r['type']]\n        self.logger.debug('Describing %s IDAHO images.' % len(results))\n\n        \n        catids = set([r['properties']['catalogID'] for r in results])\n\n        description = {}\n\n        for catid in catids:\n            \n            description[catid] = {}\n            description[catid]['parts'] = {}\n            images = [r for r in results if r['properties']['catalogID'] == catid]\n            for image in images:\n                description[catid]['sensorPlatformName'] = image['properties']['sensorPlatformName']\n                part = int(image['properties']['vendorDatasetIdentifier'].split(':')[1][-3:])\n                color = image['properties']['colorInterpretation']\n                bucket = image['properties']['tileBucketName']\n                identifier = image['identifier']\n                boundstr = image['properties']['footprintWkt']\n\n                try:\n                    description[catid]['parts'][part]\n                except:\n                    description[catid]['parts'][part] = {}\n\n                description[catid]['parts'][part][color] = {}\n                description[catid]['parts'][part][color]['id'] = identifier\n                description[catid]['parts'][part][color]['bucket'] = bucket\n                description[catid]['parts'][part][color]['boundstr'] = boundstr\n\n        return description", "docstring": "Describe the result set of a catalog search for IDAHO images.\n\nArgs:\nidaho_image_results (dict): Result set of catalog search.\nReturns:\nresults (json): The full catalog-search response for IDAHO images\ncorresponding to the given catID.", "source": "juraj-google-style"}
{"code": "def inspect(logdir='', event_file='', tag=''):\n  \n  print(PRINT_SEPARATOR +\n        'Processing event files... (this can take a few minutes)\\n' +\n        PRINT_SEPARATOR)\n  inspection_units = get_inspection_units(logdir, event_file, tag)\n\n  for unit in inspection_units:\n    if tag:\n      print('Event statistics for tag {} in {}:'.format(tag, unit.name))\n    else:\n      \n      \n      print('These tags are in {}:'.format(unit.name))\n      print_dict(get_unique_tags(unit.field_to_obs))\n      print(PRINT_SEPARATOR)\n      print('Event statistics for {}:'.format(unit.name))\n\n    print_dict(get_dict_to_print(unit.field_to_obs), show_missing=(not tag))\n    print(PRINT_SEPARATOR)", "docstring": "Main function for inspector that prints out a digest of event files.\n\nArgs:\nlogdir: A log directory that contains event files.\nevent_file: Or, a particular event file path.\ntag: An optional tag name to query for.\n\nRaises:\nValueError: If neither logdir and event_file are given, or both are given.", "source": "juraj-google-style"}
{"code": "def get_output_shape_at(self, node_index):\n    return self._get_node_attribute_at_index(node_index, 'output_shapes', 'output shape')", "docstring": "Retrieves the output shape(s) of a layer at a given node.\n\nArgs:\nnode_index: Integer, index of the node\nfrom which to retrieve the attribute.\nE.g. `node_index=0` will correspond to the\nfirst time the layer was called.\n\nReturns:\nA shape tuple\n(or list of shape tuples if the layer has multiple outputs).\n\nRaises:\nRuntimeError: If called in Eager mode.", "source": "github-repos"}
{"code": "def node_defs(self):\n    return self._node_defs", "docstring": "All the node defs in the graph to be converted.\n\nReturns:\nA map from node name to the NodeDef for all NodeDefs in the graph, as well\nas all control flow NodeDefs in the functions.", "source": "github-repos"}
{"code": "def _get_local_folder(self, root=None):\n    if (root is None):\n        root = Path()\n    for folders in (['.'], [self.user, self.napp]):\n        kytos_json = ((root / Path(*folders)) / 'kytos.json')\n        if kytos_json.exists():\n            with kytos_json.open() as file_descriptor:\n                meta = json.load(file_descriptor)\n                username = meta.get('username', meta.get('author'))\n                if ((username == self.user) and (meta.get('name') == self.napp)):\n                    return kytos_json.parent\n    raise FileNotFoundError('kytos.json not found.')", "docstring": "Return local NApp root folder.\n\nSearch for kytos.json in _./_ folder and _./user/napp_.\n\nArgs:\nroot (pathlib.Path): Where to begin searching.\n\nReturn:\npathlib.Path: NApp root folder.\n\nRaises:\nFileNotFoundError: If there is no such local NApp.", "source": "codesearchnet"}
{"code": "def assert_present(self, selector, testid=None, **kwargs):\n    self.info_log(('Assert present selector(%s) testid(%s)' % (selector, testid)))\n    wait_until_present = kwargs.get('wait_until_present', BROME_CONFIG['proxy_driver']['wait_until_present_before_assert_present'])\n    self.debug_log(('effective wait_until_present: %s' % wait_until_present))\n    if wait_until_present:\n        element = self.wait_until_present(selector, raise_exception=False)\n    else:\n        element = self.is_present(selector)\n    if element:\n        if (testid is not None):\n            self.create_test_result(testid, True)\n        return True\n    else:\n        if (testid is not None):\n            self.create_test_result(testid, False)\n        return False", "docstring": "Assert that the element is present in the dom\n\nArgs:\nselector (str): the selector used to find the element\ntest_id (str): the test_id or a str\n\nKwargs:\nwait_until_present (bool)\n\nReturns:\nbool: True is the assertion succeed; False otherwise.", "source": "codesearchnet"}
{"code": "def open(self, mode='r', encoding=None):\n        \n        access_type = self._get_access_type(mode)\n\n        if encoding is None:\n            encoding = self.encoding\n\n        \n        if access_type == 'b':\n            if not self._isbytes:\n                content = self._contents.encode(encoding)  \n            else:\n                content = self._contents  \n            return io.BytesIO(content)\n        else:\n            assert access_type == 't'\n            if PYVERSION == 2 and self._isbytes:\n                return io.BytesIO(self._contents)  \n            elif self._isbytes:\n                content = self._contents.decode(encoding)  \n            else:\n                content = self._contents  \n            return io.StringIO(content)", "docstring": "Return file-like object\n\nArgs:\nmode (str): access mode (only reading modes are supported)\nencoding (str): encoding type (only for binary access)\n\nReturns:\nio.BytesIO OR io.TextIOWrapper: buffer accessing the file as bytes or characters", "source": "juraj-google-style"}
{"code": "def get_permissions(self, grp_name, resource):\n        \n        self.project_service.set_auth(self._token_project)\n        return self.project_service.get_permissions(grp_name, resource)", "docstring": "Get permissions associated the group has with the given resource.\n\nArgs:\ngrp_name (string): Name of group.\nresource (intern.resource.boss.Resource): Identifies which data\nmodel object to operate on.\n\nReturns:\n(list): List of permissions.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def patch_mask(patch: dict) -> dict:\n\n    def _patch_mask(body: dict) -> list:\n        \n        mask = set()\n        if isinstance(body, dict):\n            for parent, value in body.items():\n                children = _patch_mask(value)\n                if children and parent not in ('budgetSegments', 'partnerCosts'):\n                    for child in children:\n                        mask.add(parent + '.' + child)\n                else:\n                    mask.add(parent)\n        elif isinstance(body, (list, tuple)):\n            for value in body:\n                mask.update(_patch_mask(value))\n        return list(mask)\n    mask = ','.join(_patch_mask(patch['parameters'].get('body')))\n    if mask:\n        patch['parameters']['updateMask'] = mask\n    return patch", "docstring": "Adds an update mask to a patch based on keys in patch.\n\nOperates under assumption that all fields prsent in update will be updated.\nImmediately wraps a nested function to perform actual patch logic using generator.\n\nArgs:\npatch: {\n\"operation\": IGNORED,\n\"action\": IGNORED,\n\"partner\": IGNORED,\n\"advertiser\": IGNORED,\n\"campaign\": IGNORED,\n\"parameters\": {\n\"body\": { PATCH IS CONSTRUCTED FROM THIS }\n}\n}\n\nReturns:\nPatch with ['parameters']['updateMask'] added.", "source": "github-repos"}
{"code": "def break_before_sequence(chunks: typing.List[str], sequence: str) -> typing.List[str]:\n    chunks = utils.SEP.join(chunks).replace(sequence, utils.SEP + sequence).split(utils.SEP)\n    chunks = [chunk for chunk in chunks if len(chunk) > 0]\n    return chunks", "docstring": "Breaks chunks before a specified character sequence appears.\n\nArgs:\nchunks (List[str]): Chunks to break.\nsequence (str): A character sequence to break chunks before.\n\nReturns:\nProcessed chunks.", "source": "github-repos"}
{"code": "def from_structures(structures, transformations=None, extend_collection=0):\n    tstruct = [TransformedStructure(s, []) for s in structures]\n    return StandardTransmuter(tstruct, transformations, extend_collection)", "docstring": "Alternative constructor from structures rather than\nTransformedStructures.\n\nArgs:\nstructures: Sequence of structures\ntransformations: New transformations to be applied to all\nstructures\nextend_collection: Whether to use more than one output structure\nfrom one-to-many transformations. extend_collection can be a\nnumber, which determines the maximum branching for each\ntransformation.\n\nReturns:\nStandardTransmuter", "source": "codesearchnet"}
{"code": "def get_attribute_list(self, uid=None):\n    batch_item = self._build_get_attribute_list_batch_item(uid)\n    request = self._build_request_message(None, [batch_item])\n    response = self._send_and_receive_message(request)\n    results = self._process_batch_items(response)\n    return results[0]", "docstring": "Send a GetAttributeList request to the server.\n\nArgs:\nuid (string): The ID of the managed object with which the retrieved\nattribute names should be associated.\n\nReturns:\nresult (GetAttributeListResult): A structure containing the results\nof the operation.", "source": "codesearchnet"}
{"code": "def add_severity(self, name, value):\n        \n        logger.debug(\"Adding severity {0} with value {1} to variant {2}\".format(\n            name, value, self['variant_id']))\n        self['severities'].append({name: value})", "docstring": "Add a severity to the variant\n\nArgs:\nname (str): The name of the severity\nvalue : The value of the severity", "source": "juraj-google-style"}
{"code": "def center_of_mass(self, time):\n    if (self.start_time <= time <= self.end_time):\n        diff = (time - self.start_time)\n        valid = np.flatnonzero((self.masks[diff] != 0))\n        if (valid.size > 0):\n            com_x = ((1.0 / self.timesteps[diff].ravel()[valid].sum()) * np.sum((self.timesteps[diff].ravel()[valid] * self.x[diff].ravel()[valid])))\n            com_y = ((1.0 / self.timesteps[diff].ravel()[valid].sum()) * np.sum((self.timesteps[diff].ravel()[valid] * self.y[diff].ravel()[valid])))\n        else:\n            com_x = np.mean(self.x[diff])\n            com_y = np.mean(self.y[diff])\n    else:\n        com_x = None\n        com_y = None\n    return (com_x, com_y)", "docstring": "Calculate the center of mass at a given timestep.\n\nArgs:\ntime: Time at which the center of mass calculation is performed\n\nReturns:\nThe x- and y-coordinates of the center of mass.", "source": "codesearchnet"}
{"code": "def get_block_iter(self, start_block=None, start_block_num=None, reverse=True):\n    start = None\n    if start_block_num:\n        if (len(start_block_num) < 2):\n            raise ValueError('Invalid start block num')\n        if (start_block_num[:2] != '0x'):\n            raise ValueError('Invalid start block num')\n        start = int(start_block_num, 16)\n    elif start_block:\n        start = start_block.block_num\n    return _BlockStoreIter(self.pointer, start, reverse)", "docstring": "Returns an iterator that traverses blocks in block number order.\n\nArgs:\nstart_block (:obj:`BlockWrapper`): the block from which traversal\nbegins\nstart_block_num (str): a starting block number, in hex, from where\ntraversal begins; only used if no starting_block is provided\n\nreverse (bool): If True, traverse the blocks in from most recent\nto oldest block. Otherwise, it traverse the blocks in the\nopposite order.\n\nReturns:\nAn iterator of block wrappers\n\nRaises:\nValueError: If start_block or start_block_num do not specify a\nvalid block", "source": "codesearchnet"}
{"code": "def _infer_mutants_handler(self, request):\n    \n    try:\n      if request.method != 'GET':\n        logger.error('%s requests are forbidden.', request.method)\n        return http_util.Respond(request, {'error': 'invalid non-GET request'},\n                                 'application/json', code=405)\n\n      example_index = int(request.args.get('example_index', '0'))\n      feature_name = request.args.get('feature_name')\n      examples = (self.examples if example_index == -1\n          else [self.examples[example_index]])\n\n      (inference_addresses, model_names, model_versions,\n          model_signatures) = self._parse_request_arguments(request)\n\n      serving_bundles = []\n      for model_num in xrange(len(inference_addresses)):\n        serving_bundles.append(inference_utils.ServingBundle(\n            inference_addresses[model_num],\n            model_names[model_num],\n            request.args.get('model_type'),\n            model_versions[model_num],\n            model_signatures[model_num],\n            request.args.get('use_predict') == 'true',\n            request.args.get('predict_input_tensor'),\n            request.args.get('predict_output_tensor')))\n\n      viz_params = inference_utils.VizParams(\n          request.args.get('x_min'), request.args.get('x_max'),\n          self.examples[0:NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS,\n          request.args.get('feature_index_pattern'))\n      json_mapping = inference_utils.mutant_charts_for_feature(\n          examples, feature_name, serving_bundles, viz_params)\n      return http_util.Respond(request, json_mapping, 'application/json')\n    except common_utils.InvalidUserInputError as e:\n      return http_util.Respond(request, {'error': e.message},\n                               'application/json', code=400)", "docstring": "Returns JSON for the `vz-line-chart`s for a feature.\n\nArgs:\nrequest: A request that should contain 'feature_name', 'example_index',\n'inference_address', 'model_name', 'model_type', 'model_version', and\n'model_signature'.\n\nReturns:\nA list of JSON objects, one for each chart.", "source": "juraj-google-style"}
{"code": "def persist_time(run, session, timings):\n    \n    from benchbuild.utils import schema as s\n\n    for timing in timings:\n        session.add(\n            s.Metric(name=\"time.user_s\", value=timing[0], run_id=run.id))\n        session.add(\n            s.Metric(name=\"time.system_s\", value=timing[1], run_id=run.id))\n        session.add(\n            s.Metric(name=\"time.real_s\", value=timing[2], run_id=run.id))", "docstring": "Persist the run results in the database.\n\nArgs:\nrun: The run we attach this timing results to.\nsession: The db transaction we belong to.\ntimings: The timing measurements we want to store.", "source": "juraj-google-style"}
{"code": "def set_computer_desc(desc=None):\n    if six.PY2:\n        desc = _to_unicode(desc)\n    system_info = win32net.NetServerGetInfo(None, 101)\n    if (desc is None):\n        return False\n    system_info['comment'] = desc\n    try:\n        win32net.NetServerSetInfo(None, 101, system_info)\n    except win32net.error as exc:\n        (number, context, message) = exc.args\n        log.error('Failed to update system')\n        log.error('nbr: %s', number)\n        log.error('ctx: %s', context)\n        log.error('msg: %s', message)\n        return False\n    return {'Computer Description': get_computer_desc()}", "docstring": "Set the Windows computer description\n\nArgs:\n\ndesc (str):\nThe computer description\n\nReturns:\nstr: Description if successful, otherwise ``False``\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!'", "source": "codesearchnet"}
{"code": "def __init__(self, logger, gccxml_cvs_revision=None, castxml_format=None):\n        \n\n        if castxml_format is not None and gccxml_cvs_revision is not None:\n            raise RuntimeError(\"Setting both gccxml_cvs_revision and\"\n                               \"castxml_format is not allowed!\")\n\n        self._is_castxml1 = False\n        self._is_castxml = False\n        self._is_gccxml = False\n\n        if castxml_format is not None:\n            self._xml_generator_version = self.__castxml\n            self._xml_output_version = castxml_format\n            self._is_castxml = True\n            self._is_castxml1 = True\n        elif gccxml_cvs_revision is not None:\n            self._xml_generator_version, self._xml_output_version = \\\n                self.__extract_versions(logger, gccxml_cvs_revision)\n            self._is_gccxml = \"GCC-XML\" in self._xml_generator_version\n            self._is_castxml = \"CastXML\" in self._xml_generator_version\n        else:\n            raise RuntimeError(\"Either castxml_format or gccxml_cvs_revision\"\n                               \"need to be defined!\")", "docstring": "Create a new xml_generators object.\n\nArgs:\nlogger (logging.Logger) : a logger for debugging output\ngccxml_cvs_revision (str|None): the xml output version\ncastxml_format (str|None): the xml output version", "source": "juraj-google-style"}
{"code": "def ParseFileDownloadedRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    event_data = ChromeHistoryFileDownloadedEventData()\n    event_data.full_path = self._GetRowValue(query_hash, row, 'target_path')\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.received_bytes = self._GetRowValue(query_hash, row, 'received_bytes')\n    event_data.total_bytes = self._GetRowValue(query_hash, row, 'total_bytes')\n    event_data.url = self._GetRowValue(query_hash, row, 'url')\n    timestamp = self._GetRowValue(query_hash, row, 'start_time')\n    date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a file downloaded row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "codesearchnet"}
{"code": "def export_analytics_data_to_excel(data, output_file_name, result_info_key, identifier_keys):\n    workbook = create_excel_workbook(data, result_info_key, identifier_keys)\n    workbook.save(output_file_name)\n    print('Saved Excel file to {}'.format(output_file_name))", "docstring": "Creates an Excel file containing data returned by the Analytics API\n\nArgs:\ndata: Analytics API data as a list of dicts\noutput_file_name: File name for output Excel file (use .xlsx extension).", "source": "codesearchnet"}
{"code": "def copy_script(self, filename, id_=(- 1)):\n    for repo in self._children:\n        repo.copy_script(filename, id_)", "docstring": "Copy a script to all repositories.\n\nTakes into account whether a JSS has been migrated. See the\nindividual DistributionPoint types for more information.\n\nArgs:\nfilename: String path to the local file to copy.\nid_: Integer ID you wish to associate script with for a JDS\nor CDP only. Default is -1, which is used for creating\na new script object in the database.", "source": "codesearchnet"}
{"code": "def check_integrity(sakefile, settings):\n    sprint = settings['sprint']\n    error = settings['error']\n    sprint('Call to check_integrity issued', level='verbose')\n    if (not sakefile):\n        error('Sakefile is empty')\n        return False\n    if (len(sakefile.keys()) != len(set(sakefile.keys()))):\n        error('Sakefile contains duplicate targets')\n        return False\n    for target in sakefile:\n        if (target == 'all'):\n            if (not check_target_integrity(target, sakefile['all'], all=True)):\n                error(\"Failed to accept target 'all'\")\n                return False\n            continue\n        if ('formula' not in sakefile[target]):\n            if (not check_target_integrity(target, sakefile[target], meta=True)):\n                errmes = \"Failed to accept meta-target '{}'\".format(target)\n                error(errmes)\n                return False\n            for atom_target in sakefile[target]:\n                if (atom_target == 'help'):\n                    continue\n                if (not check_target_integrity(atom_target, sakefile[target][atom_target], parent=target)):\n                    errmes = \"Failed to accept target '{}'\\n\".format(atom_target)\n                    error(errmes)\n                    return False\n            continue\n        if (not check_target_integrity(target, sakefile[target])):\n            errmes = \"Failed to accept target '{}'\\n\".format(target)\n            error(errmes)\n            return False\n    return True", "docstring": "Checks the format of the sakefile dictionary\nto ensure it conforms to specification\n\nArgs:\nA dictionary that is the parsed Sakefile (from sake.py)\nThe setting dictionary (for print functions)\nReturns:\nTrue if the Sakefile is conformant\nFalse if not", "source": "codesearchnet"}
{"code": "def _batch_prepare_for_model(self, batch_text_or_text_pairs, is_pair: Optional[bool]=None, boxes: Optional[List[List[int]]]=None, word_labels: Optional[List[List[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:\n    batch_outputs = {}\n    for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):\n        batch_text_or_text_pair, boxes_example = example\n        outputs = self.prepare_for_model(batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose)\n        for key, value in outputs.items():\n            if key not in batch_outputs:\n                batch_outputs[key] = []\n            batch_outputs[key].append(value)\n    batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)\n    batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)\n    return batch_outputs", "docstring": "Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It\nadds special tokens, truncates sequences if overflowing while taking into account the special tokens and\nmanages a moving window (with user defined stride) for overflowing tokens\n\nArgs:\nbatch_ids_pairs: list of tokenized input ids or input ids pairs", "source": "github-repos"}
{"code": "def do_reset_ids(concatenated_meta_df, data_df, concat_direction):\n    if (concat_direction == 'horiz'):\n        assert concatenated_meta_df.index.equals(data_df.columns), 'cids in concatenated_meta_df do not agree with cids in data_df.'\n        reset_ids_in_meta_df(concatenated_meta_df)\n        data_df.columns = pd.Index(concatenated_meta_df.index.values)\n    elif (concat_direction == 'vert'):\n        assert concatenated_meta_df.index.equals(data_df.index), 'rids in concatenated_meta_df do not agree with rids in data_df.'\n        reset_ids_in_meta_df(concatenated_meta_df)\n        data_df.index = pd.Index(concatenated_meta_df.index.values)", "docstring": "Reset ids in concatenated metadata and data dfs to unique integers and\nsave the old ids in a metadata column.\n\nNote that the dataframes are modified in-place.\n\nArgs:\nconcatenated_meta_df (pandas df)\ndata_df (pandas df)\nconcat_direction (string): 'horiz' or 'vert'\n\nReturns:\nNone (dfs modified in-place)", "source": "codesearchnet"}
{"code": "def ws_db996(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `ws_db996`'.format(value))\n    self._ws_db996 = value", "docstring": "Corresponds to IDD Field `ws_db996`\nMean wind speed coincident with 99.6% dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `ws_db996`\nUnit: m/s\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def Mult(self, x, factor):\n        \n        self.d[x] = self.d.get(x, 0) * factor", "docstring": "Scales the freq/prob associated with the value x.\n\nArgs:\nx: number value\nfactor: how much to multiply by", "source": "juraj-google-style"}
{"code": "def _finish_disconnection_action(self, action):\n        \n\n        success = action.data['success']\n        conn_key = action.data['id']\n\n        if self._get_connection_state(conn_key) != self.Disconnecting:\n            self._logger.error(\"Invalid finish_disconnection action on a connection whose state is not Disconnecting, conn_key=%s\", str(conn_key))\n            return\n\n        \n        data = self._get_connection(conn_key)\n        callback = data['callback']\n\n        conn_id = data['conn_id']\n        int_id = data['int_id']\n\n        if success is False:\n            reason = action.data['reason']\n            if reason is None:\n                reason = \"No reason was given\"\n\n            data['state'] = self.Idle\n            data['microstate'] = None\n            data['callback'] = None\n            callback(conn_id, self.id, False, reason)\n        else:\n            del self._connections[conn_id]\n            del self._int_connections[int_id]\n            callback(conn_id, self.id, True, None)", "docstring": "Finish a disconnection attempt\n\nThere are two possible outcomes:\n- if we were successful at disconnecting, we transition to disconnected\n- if we failed at disconnecting, we transition back to idle\n\nArgs:\naction (ConnectionAction): the action object describing what we are\ndisconnecting from and what the result of the operation was", "source": "juraj-google-style"}
{"code": "def load_case(self, config_data, update=False):\n        \n        \n        institute_obj = self.institute(config_data['owner'])\n        if not institute_obj:\n            raise IntegrityError(\"Institute '%s' does not exist in database\" % config_data['owner'])\n\n        \n        parsed_case = parse_case(config=config_data)\n        \n        case_obj = build_case(parsed_case, self)\n        \n        old_caseid = '-'.join([case_obj['owner'], case_obj['display_name']])\n        old_case = self.case(old_caseid)\n        if old_case:\n            LOG.info(\"Update case id for existing case: %s -> %s\", old_caseid, case_obj['_id'])\n            self.update_caseid(old_case, case_obj['_id'])\n            update = True\n\n        \n        existing_case = self.case(case_obj['_id'])\n        if existing_case and not update:\n            raise IntegrityError(\"Case %s already exists in database\" % case_obj['_id'])\n\n        files = [\n            {'file_name': 'vcf_snv', 'variant_type': 'clinical', 'category': 'snv'},\n            {'file_name': 'vcf_sv', 'variant_type': 'clinical', 'category': 'sv'},\n            {'file_name': 'vcf_cancer', 'variant_type': 'clinical', 'category': 'cancer'},\n            {'file_name': 'vcf_str', 'variant_type': 'clinical', 'category': 'str'}\n        ]\n\n        try:\n            for vcf_file in files:\n                \n                if not case_obj['vcf_files'].get(vcf_file['file_name']):\n                    LOG.debug(\"didn't find {}, skipping\".format(vcf_file['file_name']))\n                    continue\n\n                variant_type = vcf_file['variant_type']\n                category = vcf_file['category']\n                if update:\n                    self.delete_variants(\n                        case_id=case_obj['_id'],\n                        variant_type=variant_type,\n                        category=category\n                    )\n                self.load_variants(\n                    case_obj=case_obj,\n                    variant_type=variant_type,\n                    category=category,\n                    rank_threshold=case_obj.get('rank_score_threshold', 0),\n                )\n\n        except (IntegrityError, ValueError, ConfigError, KeyError) as error:\n            LOG.warning(error)\n\n        if existing_case and update:\n            self.update_case(case_obj)\n        else:\n            LOG.info('Loading case %s into database', case_obj['display_name'])\n            self._add_case(case_obj)\n\n        return case_obj", "docstring": "Load a case into the database\n\nCheck if the owner and the institute exists.\n\nArgs:\nconfig_data(dict): A dictionary with all the necessary information\nupdate(bool): If existing case should be updated\n\nReturns:\ncase_obj(dict)", "source": "juraj-google-style"}
{"code": "def call(self, url, method=None, args=None):\n    if (not args):\n        args = {}\n    if (sys.version_info.major == 3):\n        data = urllib.parse.urlparse(url)\n        path = (data.path.rstrip('/') + '/')\n        _args = dict(urllib.parse.parse_qs(data.query, keep_blank_values=True))\n    elif (sys.version_info.major == 2):\n        data = urlparse.urlparse(url)\n        path = (data.path.rstrip('/') + '/')\n        _args = dict(urlparse.parse_qs(data.query, keep_blank_values=True))\n    for elem in self._data_store:\n        pattern = elem['pattern']\n        function = elem['function']\n        _method = elem['method']\n        type_cast = elem['type_cast']\n        result = re.match(pattern, path)\n        if (result and (_method == method)):\n            _args = dict(_args, **result.groupdict())\n            for (key, val) in _args.items():\n                if (isinstance(_args[key], list) and (len(_args[key]) == 1)):\n                    _args[key] = _args[key][0]\n            for (key, val) in type_cast.items():\n                if (key not in _args):\n                    continue\n                if (not _args[key]):\n                    continue\n                if isinstance(_args[key], list):\n                    for (i, _val) in enumerate(_args[key]):\n                        _args[key][i] = self._cast(_val, val)\n                else:\n                    _args[key] = self._cast(_args[key], val)\n            requiered_args = self._get_function_args(function)\n            for (key, val) in args.items():\n                if (key in requiered_args):\n                    _args[key] = val\n            return function(**_args)\n    return None", "docstring": "Calls the first function matching the urls pattern and method.\n\nArgs:\nurl (str): Url for which to call a matching function.\nmethod (str, optional): The method used while registering a\nfunction.\nDefaults to None\nargs (dict, optional): Additional args to be passed to the\nmatching function.\n\nReturns:\nThe functions return value or `None` if no function was called.", "source": "codesearchnet"}
{"code": "def get_alpha(self, x: int, y: int) -> int:\n        \n        return lib.TCOD_image_get_alpha(self.image_c, x, y)", "docstring": "Get the Image alpha of the pixel at x, y.\n\nArgs:\nx (int): X pixel of the image.  Starting from the left at 0.\ny (int): Y pixel of the image.  Starting from the top at 0.\n\nReturns:\nint: The alpha value of the pixel.\nWith 0 being fully transparent and 255 being fully opaque.", "source": "juraj-google-style"}
{"code": "def _compute_nfp_real(l, u, counts, sizes):\n    \n    if l > u:\n        raise ValueError(\"l must be less or equal to u\")\n    return np.sum((float(sizes[u])-sizes[l:u+1])/float(sizes[u])*counts[l:u+1])", "docstring": "Computes the expected number of false positives caused by using\nu to approximate set sizes in the interval [l, u], using the real\nset size distribution.\n\nArgs:\nl: the lower bound on set sizes.\nu: the upper bound on set sizes.\ncounts: the complete distribution of set sizes.\nsizes: the complete domain of set sizes.\n\nReturn (float): the expected number of false positives.", "source": "juraj-google-style"}
{"code": "def GetMessages(self, formatter_mediator, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    event_values = event.CopyToDict()\n\n    page_transition_type = event_values.get('page_transition_type', None)\n    if page_transition_type is not None:\n      page_transition, page_transition_long = self._PAGE_TRANSITIONS.get(\n          page_transition_type, self._UNKNOWN_PAGE_TRANSITION)\n\n      if page_transition_long:\n        event_values['page_transition'] = '{0:s} - {1:s}'.format(\n            page_transition, page_transition_long)\n      else:\n        event_values['page_transition'] = page_transition\n\n    visit_source = event_values.get('visit_source', None)\n    if visit_source is not None:\n      event_values['visit_source'] = self._VISIT_SOURCE.get(\n          visit_source, 'UNKNOWN')\n\n    extras = []\n\n    url_hidden = event_values.get('url_hidden', False)\n    if url_hidden:\n      extras.append('(url hidden)')\n\n    typed_count = event_values.get('typed_count', 0)\n    if typed_count == 0:\n      extras.append('(URL not typed directly - no typed count)')\n    elif typed_count == 1:\n      extras.append('(type count {0:d} time)'.format(typed_count))\n    else:\n      extras.append('(type count {0:d} times)'.format(typed_count))\n\n    event_values['extra'] = ' '.join(extras)\n\n    return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions between\nformatters and other components, such as storage and Windows EventLog\nresources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def validate(cls, job_config):\n    \n    if job_config.input_reader_cls != cls:\n      raise errors.BadReaderParamsError(\n          \"Expect input reader class %r, got %r.\" %\n          (cls, job_config.input_reader_cls))", "docstring": "Validates relevant parameters.\n\nThis method can validate fields which it deems relevant.\n\nArgs:\njob_config: an instance of map_job.JobConfig.\n\nRaises:\nerrors.BadReaderParamsError: required parameters are missing or invalid.", "source": "juraj-google-style"}
{"code": "def register_game(game_name, game_mode=\"NoFrameskip-v4\"):\n  \n  if game_name not in ATARI_GAMES:\n    raise ValueError(\"Game %s not in ATARI_GAMES\" % game_name)\n  if game_mode not in ATARI_GAME_MODES:\n    raise ValueError(\"Unknown ATARI game mode: %s.\" % game_mode)\n  camel_game_name = misc_utils.snakecase_to_camelcase(game_name) + game_mode\n  \n  cls = type(\"Gym%sRandom\" % camel_game_name,\n             (T2TGymEnv,), {\"base_env_name\": camel_game_name})\n  registry.register_problem(cls)", "docstring": "Create and register problems for the game.\n\nArgs:\ngame_name: str, one of the games in ATARI_GAMES, e.g. \"bank_heist\".\ngame_mode: the frame skip and sticky keys config.\n\nRaises:\nValueError: if game_name or game_mode are wrong.", "source": "juraj-google-style"}
{"code": "def diff(self, container):\n        \n        return self._result(\n            self._get(self._url(\"/containers/{0}/changes\", container)), True\n        )", "docstring": "Inspect changes on a container's filesystem.\n\nArgs:\ncontainer (str): The container to diff\n\nReturns:\n(str)\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def bogoliubov_trans(p, q, theta):\n    expo = (((- 4) * theta) / np.pi)\n    (yield cirq.X(p))\n    (yield cirq.S(p))\n    (yield (cirq.ISWAP(p, q) ** expo))\n    (yield (cirq.S(p) ** 1.5))\n    (yield cirq.X(p))", "docstring": "r\"\"\"The 2-mode Bogoliubov transformation is mapped to two-qubit operations.\nWe use the identity X S^\\dag X S X = Y X S^\\dag Y S X = X to transform\nthe Hamiltonian XY+YX to XX+YY type. The time evolution of the XX + YY\nHamiltonian can be expressed as a power of the iSWAP gate.\n\nArgs:\np: the first qubit\nq: the second qubit\ntheta: The rotational angle that specifies the Bogoliubov\ntransformation, which is a function of the kinetic energy and\nthe superconducting gap.", "source": "codesearchnet"}
{"code": "def extract(self, text: str, confidence=0.5, filter=['Person', 'Place', 'Organisation']) -> List[Extraction]:\n        \n\n        filter = ','.join(filter)\n        search_data = [('confidence', confidence),\n                       ('text', text),\n                       ('types', filter)]\n        search_headers = {'Accept': 'application/json'}\n        r = requests.post(self._search_url,\n                          data=search_data,\n                          headers=search_headers)\n        results = r.json()\n\n        last_results = self._combiner(results)\n        return last_results", "docstring": "Extract with the input text, confidence and fields filter to be used.\nArgs:\ntext (str): text input to be annotated\nconfidence (float): the confidence of the annotation\nfilter (List[str]): the fields that to be extracted\n\nReturns:\nList[Extraction]", "source": "juraj-google-style"}
{"code": "def _convert_to_ragged_tensor_values(value):\n    if _is_supported_ragged_values_type(value):\n        return value\n    else:\n        return convert_to_tensor_or_ragged_tensor(value, name='values')", "docstring": "Converts value to supported RaggedTensor value.\n\n* If `value` is an object of supported value type, then return it as-is.\n* Otherwise convert it to Tensor or RaggedTensor.\n\nArgs:\nvalue: An object of `Tensor`, `RaggedTensor` or registered RaggedTensor\nvalue types, or an object whose type has a registered `Tensor` conversion\nfunction.\n\nReturns:\nAn object of `Tensor`, `RaggedTensor` or registered RaggedTensor\nvalue types", "source": "github-repos"}
{"code": "def _determine_api_url(self, platform, service, action):\n        \n        base_uri = settings.BASE_PAL_URL.format(platform)\n        if service == \"Recurring\":\n            api_version = settings.API_RECURRING_VERSION\n        elif service == \"Payout\":\n            api_version = settings.API_PAYOUT_VERSION\n        else:\n            api_version = settings.API_PAYMENT_VERSION\n        return '/'.join([base_uri, service, api_version, action])", "docstring": "This returns the Adyen API endpoint based on the provided platform,\nservice and action.\n\nArgs:\nplatform (str): Adyen platform, ie 'live' or 'test'.\nservice (str): API service to place request through.\naction (str): the API action to perform.", "source": "juraj-google-style"}
{"code": "def __init_subclass__(cls, user_cls=None):\n    utils.ensure_explicit_method_override(cls.__init__, '`pg.Object.__init__` is a PyGlove managed method. For setting up the class initialization logic, please override `_on_bound()` or `_on_init()`. If you do have a need to override `__init__` and know the implications, please decorate your overridden method with `@pg.explicit_method_override`.')\n    setattr(cls, '__serialization_key__', cls.__type_name__)\n    super().__init_subclass__()\n    user_cls = user_cls or cls\n    if user_cls.auto_schema:\n        base_schema_list = []\n        for base_cls in user_cls.__bases__:\n            base_schema = getattr(base_cls, '__schema__', None)\n            if isinstance(base_schema, pg_typing.Schema):\n                base_schema_list.append(base_schema)\n        new_fields = user_cls._infer_fields_from_annotations()\n        cls_schema = pg_typing.create_schema(new_fields, base_schema_list=base_schema_list, allow_nonconst_keys=True, metadata={}, for_cls=user_cls)\n        user_cls._update_default_values_from_class_attributes(cls_schema)\n        if new_fields:\n            cls_schema.metadata['init_arg_list'] = None\n        user_cls.apply_schema(cls_schema)", "docstring": "Initializes subclass.\n\n`pg.Object` allows child classes to explicit call\n`pg.Object.__init_subclass__` in their `__init_subclass__`, to bypass other\nclasses' `__init__subclass__` in multi-inheritance use cases.\n\nExample:\n\nclass Subclass(pg.Object, UserClass):\ndef __init_subclass__(cls):\n# This bypasses UserClass.__init_subclass__\npg.Object.__init_subclass__(cls)\n\nArgs:\nuser_cls: The source class that calls this class method.", "source": "github-repos"}
{"code": "def depth_january_average_ground_temperature(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `depth_january_average_ground_temperature`'.format(value))\n\n        self._depth_january_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_january_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_january_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def from_json(cls, raw):\n    bcls = None\n    if ('webLink' in raw):\n        bcls = WebLink\n    elif ('topicCategory' in raw):\n        bcls = Category\n    elif ('taskAssist' in raw):\n        bcls = TaskAssist\n    elif ('context' in raw):\n        bcls = Context\n    if (bcls is None):\n        logger.warning('Unknown annotation type: %s', raw.keys())\n        return None\n    annotation = bcls()\n    annotation.load(raw)\n    return annotation", "docstring": "Helper to construct an annotation from a dict.\n\nArgs:\nraw (dict): Raw annotation representation.\n\nReturns:\nNode: An Annotation object or None.", "source": "codesearchnet"}
{"code": "def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Union[Tuple, BaseModelOutput]:\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    encoder_states = () if output_hidden_states else None\n    all_attentions = () if output_attentions else None\n    hidden_states = inputs_embeds\n    for encoder_layer in self.layers:\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if self.gradient_checkpointing and self.training:\n            layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, position_embeddings, output_attentions)\n        else:\n            layer_outputs = encoder_layer(hidden_states, attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions, **kwargs)\n        hidden_states = layer_outputs[0]\n        if output_attentions:\n            all_attentions = all_attentions + (layer_outputs[1],)\n    if output_hidden_states:\n        encoder_states = encoder_states + (hidden_states,)\n    if not return_dict:\n        return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n    return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Args:\ninputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\nEmbeddings which serve as input to the Transformer.\nattention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\nMask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n- 1 for tokens that are **not masked**,\n- 0 for tokens that are **masked**.\n\n[What are attention masks?](../glossary#attention-mask)\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.\noutput_hidden_states (`bool`, *optional*):\nWhether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\nfor more detail.\nreturn_dict (`bool`, *optional*):\nWhether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.", "source": "github-repos"}
{"code": "def _reset_env(self, env: BaseUnityEnvironment):\n    if (self.meta_curriculum is not None):\n        return env.reset(train_mode=self.fast_simulation, config=self.meta_curriculum.get_config())\n    else:\n        return env.reset(train_mode=self.fast_simulation)", "docstring": "Resets the environment.\n\nReturns:\nA Data structure corresponding to the initial reset state of the\nenvironment.", "source": "codesearchnet"}
{"code": "def Run(self, conf, args):\n    try:\n        options, args = self.parser.parse_args(args)\n    except SystemExit as e:\n        return e.code\n    if options.maps:\n        self.log.info('Setting configured maps to %s', options.maps)\n        conf.maps = options.maps\n    warnings, errors = (0, 0)\n    self.log.info('Verifying program and system configuration.')\n    config_warnings, config_errors = config.VerifyConfiguration(conf)\n    warnings += config_warnings\n    errors += config_errors\n    self.log.info('Verifying data sources.')\n    errors += Verify().VerifySources(conf)\n    self.log.info('verification: %d warnings, %d errors', warnings, errors)\n    if errors > 0:\n        self.log.error('Too many errors in verification tests failed; repair aborted!')\n        return 1\n    self.log.info('Rebuilding and verifying caches: %s.', conf.maps)\n    return Update().UpdateMaps(conf=conf, incremental=False)", "docstring": "Run the Repair command.\n\nSee Command.Run() for full documentation on the Run() method.\n\nArgs:\nconf: nss_cache.config.Config object\nargs: list of arguments to be parsed by this command\n\nReturns:\n0 on success, nonzero on error", "source": "github-repos"}
{"code": "def stop(self, **kwargs):\n        \n        path = '%s/%s/stop' % (self.manager.path, self.get_id())\n        self.manager.gitlab.http_post(path, **kwargs)", "docstring": "Stop the environment.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabStopError: If the operation failed", "source": "juraj-google-style"}
{"code": "def exponential(x):\n    return math_ops.exp(x)", "docstring": "Exponential activation function.\n\nFor example:\n\n>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)\n>>> b = tf.keras.activations.exponential(a)\n>>> b.numpy()\narray([0.04978707,  0.36787945,  1.,  2.7182817 , 20.085537], dtype=float32)\n\nArgs:\nx: Input tensor.\n\nReturns:\nTensor with exponential activation: `exp(x)`.", "source": "github-repos"}
{"code": "def __new__(cls, name, bases, attrs):\n        \n        new_cls = super(DidlMetaClass, cls).__new__(cls, name, bases, attrs)\n        \n        item_class = attrs.get('item_class', None)\n        if item_class is not None:\n            _DIDL_CLASS_TO_CLASS[item_class] = new_cls\n        return new_cls", "docstring": "Create a new instance.\n\nArgs:\nname (str): Name of the class.\nbases (tuple): Base classes.\nattrs (dict): attributes defined for the class.", "source": "juraj-google-style"}
{"code": "def assets(self, asset_type=None):\n        \n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        if not asset_type:\n            return self.tc_requests.adversary_assets(\n                self.api_type, self.api_sub_type, self.unique_id\n            )\n        if asset_type == 'PHONE':\n            return self.tc_requests.adversary_phone_assets(\n                self.api_type, self.api_sub_type, self.unique_id\n            )\n        if asset_type == 'HANDLER':\n            return self.tc_requests.adversary_handle_assets(\n                self.api_type, self.api_sub_type, self.unique_id\n            )\n        if asset_type == 'URL':\n            return self.tc_requests.adversary_url_assets(\n                self.api_type, self.api_sub_type, self.unique_id\n            )\n\n        self._tcex.handle_error(\n            925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type]\n        )\n        return None", "docstring": "Retrieves all of the assets of a given asset_type\n\nArgs:\nasset_type: (str) Either None, PHONE, HANDLER, or URL\n\nReturns:", "source": "juraj-google-style"}
{"code": "def SerializeExclusiveData(self, writer):\n        \n        self.Code.Serialize(writer)\n\n        if self.Version >= 1:\n            writer.WriteBool(self.NeedStorage)\n\n        writer.WriteVarString(self.Name)\n        writer.WriteVarString(self.CodeVersion)\n        writer.WriteVarString(self.Author)\n        writer.WriteVarString(self.Email)\n        writer.WriteVarString(self.Description)", "docstring": "Serialize object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def new_list(iterable=None):\n    if iterable:\n        elements = tuple(iterable)\n    else:\n        elements = ()\n    if elements:\n        return _py_list_new(elements)\n    return tf_tensor_list_new(elements)", "docstring": "The list constructor.\n\nArgs:\niterable: Optional elements to fill the list with.\n\nReturns:\nA list-like object. The exact return value depends on the initial elements.", "source": "github-repos"}
{"code": "def record_factory(app, fields=None):\n    record = Record(app, {'$type': Record._type, 'isNew': True, 'applicationId': app.id, 'comments': {'$type': 'System.Collections.Generic.Dictionary`2[[System.String, mscorlib],[System.Collections.Generic.List`1[[Core.Models.Record.Comments, Core]], mscorlib]], mscorlib'}, 'values': {'$type': 'System.Collections.Generic.Dictionary`2[[System.String, mscorlib],[System.Object, mscorlib]], mscorlib'}})\n    fields = (fields or {})\n    for (name, value) in six.iteritems(fields):\n        record[name] = value\n    copy_raw = copy.copy(record._raw)\n    values_dict = {}\n    for (key, value) in six.iteritems(copy_raw['values']):\n        if (value is not None):\n            values_dict[key] = value\n    record._raw['values'] = values_dict\n    return record", "docstring": "Return a temporary Record instance to be used for field validation and value parsing\n\nArgs:\napp (App): Target App to create a transient Record instance for\nfields (dict): Optional dict of fields and values to set on new Record instance before returning\n\nReturns:\nRecord: Unsaved Record instance to be used for validation, creation, etc.", "source": "codesearchnet"}
{"code": "def _central_crop(image, crop_height, crop_width):\n    shape = tf.shape(image)\n    (height, width) = (shape[0], shape[1])\n    mlperf_log.resnet_print(key=mlperf_log.INPUT_CENTRAL_CROP, value=[crop_height, crop_width])\n    amount_to_be_cropped_h = (height - crop_height)\n    crop_top = (amount_to_be_cropped_h \n    amount_to_be_cropped_w = (width - crop_width)\n    crop_left = (amount_to_be_cropped_w \n    return tf.slice(image, [crop_top, crop_left, 0], [crop_height, crop_width, (- 1)])", "docstring": "Performs central crops of the given image list.\n\nArgs:\nimage: a 3-D image tensor\ncrop_height: the height of the image following the crop.\ncrop_width: the width of the image following the crop.\n\nReturns:\n3-D tensor with cropped image.", "source": "codesearchnet"}
{"code": "def _OverloadOperator(cls, operator):\n    if operator == '__eq__' or operator == '__ne__':\n        return\n    tensor_oper = getattr(tensor_lib.Tensor, operator)\n\n    def _run_op(a, *args, **kwargs):\n        return tensor_oper(a.value(), *args, **kwargs)\n    functools.update_wrapper(_run_op, tensor_oper)\n    setattr(cls, operator, _run_op)", "docstring": "Defer an operator overload to `tensor_lib.Tensor`.\n\nWe pull the operator out of tensor_lib.Tensor dynamically to avoid ordering\nissues.\n\nArgs:\noperator: string. The operator name.", "source": "github-repos"}
{"code": "def _load_from_hdx(self, object_type, id_field):\n        \n        \n        success, result = self._read_from_hdx(object_type, id_field)\n        if success:\n            self.old_data = self.data\n            self.data = result\n            return True\n        logger.debug(result)\n        return False", "docstring": "Helper method to load the HDX object given by identifier from HDX\n\nArgs:\nobject_type (str): Description of HDX object type (for messages)\nid_field (str): HDX object identifier\n\nReturns:\nbool: True if loaded, False if not", "source": "juraj-google-style"}
{"code": "def ZerosLikeV1WhileLoop(self, op, index):\n    if util.IsLoopSwitch(op):\n        return None\n    if op.graph.building_function:\n        return array_ops.zeros_like(op.outputs[index])\n    dead_branch = util.IsSwitch(op)\n    forward_ctxt = util.GetWhileContext(op)\n    grad_state = self._map.get(forward_ctxt)\n    if grad_state is None:\n        return ZerosLike(op, index)\n    op_ctxt = op._get_control_flow_context()\n    val = ops.convert_to_tensor(op.outputs[index], name='tensor')\n    shape = val.get_shape()\n    if shape.is_fully_defined():\n        if val.dtype == dtypes.resource:\n            result = array_ops.zeros(resource_variable_ops.variable_shape(val), dtype=default_gradient.get_zeros_dtype(val))\n        else:\n            result = constant_op.constant(0, shape=shape.dims, dtype=val.dtype)\n        if dead_branch:\n            pred = grad_state.history_map.get(op_ctxt.pred.name)\n            branch = op_ctxt.branch\n            result = control_flow_ops._SwitchRefOrTensor(result, pred)[1 - branch]\n    else:\n        if dead_branch:\n            pred = op_ctxt.pred\n            branch = op_ctxt.branch\n            op_ctxt.outer_context.Enter()\n            val = control_flow_ops._SwitchRefOrTensor(op.inputs[0], pred)[1 - branch]\n            zeros_shape = array_ops.shape_internal(val, optimize=False)\n            op_ctxt.outer_context.Exit()\n            val.op._set_control_flow_context(op_ctxt)\n            zeros_shape.op._set_control_flow_context(op_ctxt)\n        else:\n            op_ctxt.Enter()\n            zeros_shape = array_ops.shape_internal(val, optimize=False)\n            op_ctxt.Exit()\n        grad_state.grad_context.Exit()\n        history_zeros_shape = grad_state.AddForwardAccumulator(zeros_shape, dead_branch=dead_branch)\n        grad_state.grad_context.Enter()\n        shape = grad_state.AddBackpropAccumulatedValue(history_zeros_shape, zeros_shape, dead_branch)\n        result = array_ops.zeros(shape, val.dtype)\n    return result", "docstring": "Create zeros_like for the specified output of an op.\n\nIf op is in a while loop that is part of gradients(), this method\nmust be called in its grad loop context.\n\nArgs:\nop: A tensorflow operation.\nindex: the index for a specific output of the op.\n\nReturns:\nA zero tensor of the same shape of op.outputs[index].", "source": "github-repos"}
{"code": "def parse_init(init_file) -> Optional[Tuple[Dict[str, List[str]], Dict[str, List[str]]]]:\n    with open(init_file, 'r', encoding='utf-8', newline='\\n') as f:\n        lines = f.readlines()\n    line_index = 0\n    while line_index < len(lines) and (not lines[line_index].startswith('_import_structure = {')):\n        line_index += 1\n    if line_index >= len(lines):\n        return None\n    objects = []\n    while not lines[line_index].startswith('if TYPE_CHECKING') and find_backend(lines[line_index]) is None:\n        line = lines[line_index]\n        if _re_one_line_import_struct.search(line):\n            content = _re_one_line_import_struct.search(line).groups()[0]\n            imports = re.findall('\\\\[([^\\\\]]+)\\\\]', content)\n            for imp in imports:\n                objects.extend([obj[1:-1] for obj in imp.split(', ')])\n            line_index += 1\n            continue\n        single_line_import_search = _re_import_struct_key_value.search(line)\n        if single_line_import_search is not None:\n            imports = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ') if len(obj) > 0]\n            objects.extend(imports)\n        elif line.startswith(' ' * 8 + '\"'):\n            objects.append(line[9:-3])\n        line_index += 1\n    import_dict_objects = {'none': objects}\n    while not lines[line_index].startswith('if TYPE_CHECKING'):\n        backend = find_backend(lines[line_index])\n        if _re_try.search(lines[line_index - 1]) is None:\n            backend = None\n        if backend is not None:\n            line_index += 1\n            while _re_else.search(lines[line_index]) is None:\n                line_index += 1\n            line_index += 1\n            objects = []\n            while len(lines[line_index]) <= 1 or lines[line_index].startswith(' ' * 4):\n                line = lines[line_index]\n                if _re_import_struct_add_one.search(line) is not None:\n                    objects.append(_re_import_struct_add_one.search(line).groups()[0])\n                elif _re_import_struct_add_many.search(line) is not None:\n                    imports = _re_import_struct_add_many.search(line).groups()[0].split(', ')\n                    imports = [obj[1:-1] for obj in imports if len(obj) > 0]\n                    objects.extend(imports)\n                elif _re_between_brackets.search(line) is not None:\n                    imports = _re_between_brackets.search(line).groups()[0].split(', ')\n                    imports = [obj[1:-1] for obj in imports if len(obj) > 0]\n                    objects.extend(imports)\n                elif _re_quote_object.search(line) is not None:\n                    objects.append(_re_quote_object.search(line).groups()[0])\n                elif line.startswith(' ' * 8 + '\"'):\n                    objects.append(line[9:-3])\n                elif line.startswith(' ' * 12 + '\"'):\n                    objects.append(line[13:-3])\n                line_index += 1\n            import_dict_objects[backend] = objects\n        else:\n            line_index += 1\n    objects = []\n    while line_index < len(lines) and find_backend(lines[line_index]) is None and (not lines[line_index].startswith('else')):\n        line = lines[line_index]\n        single_line_import_search = _re_import.search(line)\n        if single_line_import_search is not None:\n            objects.extend(single_line_import_search.groups()[0].split(', '))\n        elif line.startswith(' ' * 8):\n            objects.append(line[8:-2])\n        line_index += 1\n    type_hint_objects = {'none': objects}\n    while line_index < len(lines):\n        backend = find_backend(lines[line_index])\n        if _re_try.search(lines[line_index - 1]) is None:\n            backend = None\n        if backend is not None:\n            line_index += 1\n            while _re_else.search(lines[line_index]) is None:\n                line_index += 1\n            line_index += 1\n            objects = []\n            while len(lines[line_index]) <= 1 or lines[line_index].startswith(' ' * 8):\n                line = lines[line_index]\n                single_line_import_search = _re_import.search(line)\n                if single_line_import_search is not None:\n                    objects.extend(single_line_import_search.groups()[0].split(', '))\n                elif line.startswith(' ' * 12):\n                    objects.append(line[12:-2])\n                line_index += 1\n            type_hint_objects[backend] = objects\n        else:\n            line_index += 1\n    return (import_dict_objects, type_hint_objects)", "docstring": "Read an init_file and parse (per backend) the `_import_structure` objects defined and the `TYPE_CHECKING` objects\ndefined.\n\nArgs:\ninit_file (`str`): Path to the init file to inspect.\n\nReturns:\n`Optional[Tuple[Dict[str, List[str]], Dict[str, List[str]]]]`: A tuple of two dictionaries mapping backends to list of\nimported objects, one for the `_import_structure` part of the init and one for the `TYPE_CHECKING` part of the\ninit. Returns `None` if the init is not a custom init.", "source": "github-repos"}
{"code": "def request(self, path, data=None, headers=None, method=None):\n    if isinstance(data, str):\n        data = data.encode('utf-8')\n    response = urlopen(self._request(path, data=data, headers=headers, method=method))\n    self._set_session_cookie(response)\n    return response", "docstring": "Performs a HTTP request to the Go server\n\nArgs:\npath (str): The full path on the Go server to request.\nThis includes any query string attributes.\ndata (str, dict, bool, optional): If any data is present this\nrequest will become a POST request.\nheaders (dict, optional): Headers to set for this particular\nrequest\n\nRaises:\nHTTPError: when the HTTP request fails.\n\nReturns:\nfile like object: The response from a\n:func:`urllib2.urlopen` call", "source": "codesearchnet"}
{"code": "def __new__(cls, orb, values, frame=PARENT_FRAME):\n        \n\n        if isinstance(values, cls):\n            frame = values.frame\n            values = values.base\n\n        obj = np.ndarray.__new__(cls, (6, 6), buffer=np.array(values), dtype=float)\n        obj._frame = frame\n        obj.orb = orb.copy(form=\"cartesian\")\n\n        return obj", "docstring": "Create a covariance matrix\n\nArgs:\norb (Orbit): Covariance from which this is the covariance\nvalues: 2D matrix\nframe (str): Frame in which the covariance is expressed", "source": "juraj-google-style"}
{"code": "def evaluate_inverse(distribution, u_data, cache=None, parameters=None):\n    if (cache is None):\n        cache = {}\n    out = numpy.zeros(u_data.shape)\n    if hasattr(distribution, '_ppf'):\n        parameters = load_parameters(distribution, '_ppf', parameters=parameters, cache=cache)\n        out[:] = distribution._ppf(u_data.copy(), **parameters)\n    else:\n        from .. import approximation\n        parameters = load_parameters(distribution, '_cdf', parameters=parameters, cache=cache)\n        out[:] = approximation.approximate_inverse(distribution, u_data.copy(), cache=cache.copy(), parameters=parameters)\n    cache[distribution] = out\n    return out", "docstring": "Evaluate inverse Rosenblatt transformation.\n\nArgs:\ndistribution (Dist):\nDistribution to evaluate.\nu_data (numpy.ndarray):\nLocations for where evaluate inverse transformation distribution at.\nparameters (:py:data:typing.Any):\nCollection of parameters to override the default ones in the\ndistribution.\ncache (:py:data:typing.Any):\nA collection of previous calculations in case the same distribution\nturns up on more than one occasion.\n\nReturns:\nThe cumulative distribution values of ``distribution`` at location\n``u_data`` using parameters ``parameters``.", "source": "codesearchnet"}
{"code": "def get_pretty_app_names(self):\n    pretty_app_names = set()\n    for app_name in self.get_app_names():\n        pretty_app_names.add(self.get_name(app_name))\n    return pretty_app_names", "docstring": "Return the list of pretty app names that are available in the database.\n\nReturns:\nset of str.", "source": "codesearchnet"}
{"code": "def tuplize(nested):\n  \n  if isinstance(nested, str):\n    return nested\n  try:\n    return tuple(map(tuplize, nested))\n  except TypeError:\n    return nested", "docstring": "Recursively converts iterables into tuples.\n\nArgs:\nnested: A nested structure of items and iterables.\n\nReturns:\nA nested structure of items and tuples.", "source": "juraj-google-style"}
{"code": "def barrier(mesh: layout.Mesh, barrier_name: Optional[str]=None, timeout_in_ms: Optional[int]=None):\n    if barrier_name is None:\n        barrier_name = '(barrier)'\n    logging.info('entering barrier before op: %s', barrier_name)\n    context.async_wait()\n    component = array_ops.reshape(1.0, [1] * len(mesh.shape()))\n    ones = api.pack([component] * mesh.num_local_devices(), layout.Layout(mesh.dim_names, mesh))\n    mesh_size = math_ops.reduce_sum(ones)\n    if mesh_size != mesh.size:\n        raise ValueError('Global barrier produced wrong mesh size : {0} while mesh has actualsize : {1}'.format(mesh_size, mesh.size))\n    context.async_wait()\n    if context.context().coordination_service:\n        if timeout_in_ms is None:\n            timeout_in_ms = 24 * 60 * 60 * 1000\n        num_calls = _BARRIER_DICT.setdefault(barrier_name, 0)\n        _BARRIER_DICT[barrier_name] = num_calls + 1\n        barrier_id = f'{barrier_name}:{num_calls}'\n        context.context().wait_at_barrier(barrier_id, timeout_in_ms)\n    logging.info('finished running barrier across all clients after op: %s', barrier_name)", "docstring": "Runs a barrier on the mesh.\n\nUpon returning from the barrier, all operations run before the barrier\nwould have completed across all clients. Currently we allocate a fully\nsharded tensor with mesh shape and run an all_reduce on it.\n\nExample:\n\nA barrier can be used before application exit to ensure completion of pending\nops.\n\n```python\n\nx = [1, 2, 3]\nx = dtensor.relayout(x, dtensor.Layout.batch_sharded(mesh, 'batch', 1))\ndtensor.barrier(mesh)\n\n# At this point all devices on all clients in the mesh have completed\n# operations before the barrier. Therefore it is OK to tear down the clients.\nsys.exit()\n```\n\nArgs:\nmesh: The mesh to run the barrier on.\nbarrier_name: The name of the barrier. Mainly used for logging purpose.\ntimeout_in_ms: The timeout of the barrier in ms. If omitted, blocks\nindefinitely till the barrier is reached from all clients.", "source": "github-repos"}
{"code": "def __init__(self, iterable, order, func):\n        \n        assert abs(order) == 1, 'order argument must be +1 or -1'\n        super(OrderedQueryable, self).__init__(iterable)\n        self._funcs = [(order, func)]", "docstring": "Create an OrderedIterable.\n\nArgs:\niterable: The iterable sequence to be ordered.\norder: +1 for ascending, -1 for descending.\nfunc: The function to select the sorting key.", "source": "juraj-google-style"}
{"code": "def infer_transportation_mode(self, clf, min_time):\n    self.transportation_modes = speed_clustering(clf, self.points, min_time)\n    return self", "docstring": "In-place transportation mode inferring\n\nSee infer_transportation_mode function\n\nArgs:\nReturns:\n:obj:`Segment`: self", "source": "codesearchnet"}
{"code": "def Update(self, request, global_params=None):\n    config = self.GetMethodConfig('Update')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Updates information in an existing routine. The update method replaces the entire Routine resource.\n\nArgs:\nrequest: (BigqueryRoutinesUpdateRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Routine) The response message.", "source": "github-repos"}
{"code": "def ms_to_mph(value):\n    if value is None:\n        return None\n    return value * 2.237", "docstring": "Converts speed from meters per second to miles per hour\n\nArgs:\nvalue: floating point representing the speed in meters per second\nReturns: speed in miles per hour", "source": "github-repos"}
{"code": "def on_train_begin(self, logs=None):", "docstring": "Called at the beginning of training.\n\nSubclasses should override for any actions to run.\n\nArgs:\nlogs: Dict. Currently no data is passed to this argument for this method\nbut that may change in the future.", "source": "github-repos"}
{"code": "def tuplesorted(items, *keys):\n    \n    \n    tuple_keys = [\n        Key(func=lambda t, i=index, k=key: k.func(t[i]), reverse=key.reverse)\n        for index, key in enumerate(keys)\n    ]\n    return multisorted(items, *tuple_keys)", "docstring": "Sort by tuples with a different key for each item.\n\nArgs:\nitems: An iterable series of sequences (typically tuples)\n\n*keys: Key objects which transform individual elements of\neach tuple into sort keys. The zeroth object\ntransforms the zeroth element of each tuple, the first\nkey object transforms the first element of each tuple,\nand so on.\nReturns:\nA list of items sorted according to keys.", "source": "juraj-google-style"}
{"code": "def controlled_by(self, *control_qubits: Qid) -> 'Operation':\n    from cirq.ops import ControlledOperation\n    if ((control_qubits is None) or (len(control_qubits) is 0)):\n        raise ValueError(\"Can't get controlled operation without control qubit. Op: {}\".format(repr(self)))\n    else:\n        return ControlledOperation(control_qubits, self)", "docstring": "Returns a controlled version of this operation.\n\nArgs:\ncontrol_qubits: Qubits to control the operation by. Required.", "source": "codesearchnet"}
{"code": "def combiplot(self, fontsize=8, **kwargs):\n        \n        ax_list = None\n        for i, (label, cycle) in enumerate(self.items()):\n            fig = cycle.plot(ax_list=ax_list, label=label, fontsize=fontsize,\n                             lw=2.0, marker=\"o\", linestyle=\"-\", show=False)\n            ax_list = fig.axes\n\n        return fig", "docstring": "Compare multiple cycels on a grid: one subplot per quantity,\nall cycles on the same subplot.\n\nArgs:\nfontsize: Legend fontsize.", "source": "juraj-google-style"}
{"code": "def log_batch(self, log_data):\n        \n\n        url = uri_join(self.base_url, \"log\")\n\n        attachments = []\n        for log_item in log_data:\n            log_item[\"item_id\"] = self.stack[-1]\n            attachment = log_item.get(\"attachment\", None)\n\n            if \"attachment\" in log_item:\n                del log_item[\"attachment\"]\n\n            if attachment:\n                if not isinstance(attachment, collections.Mapping):\n                    attachment = {\"data\": attachment}\n\n                name = attachment.get(\"name\", str(uuid.uuid4()))\n                log_item[\"file\"] = {\"name\": name}\n                attachments.append((\"file\", (\n                    name,\n                    attachment[\"data\"],\n                    attachment.get(\"mime\", \"application/octet-stream\")\n                )))\n\n        files = [(\n            \"json_request_part\", (\n                None,\n                json.dumps(log_data),\n                \"application/json\"\n            )\n        )]\n        files.extend(attachments)\n        from reportportal_client import POST_LOGBATCH_RETRY_COUNT\n        for i in range(POST_LOGBATCH_RETRY_COUNT):\n            try:\n                r = self.session.post(\n                    url=url,\n                    files=files,\n                    verify=self.verify_ssl\n                )\n            except KeyError:\n                if i < POST_LOGBATCH_RETRY_COUNT - 1:\n                    continue\n                else:\n                    raise\n            break\n\n        logger.debug(\"log_batch - Stack: %s\", self.stack)\n        logger.debug(\"log_batch response: %s\", r.text)\n\n        return _get_data(r)", "docstring": "Logs batch of messages with attachment.\n\nArgs:\nlog_data: list of log records.\nlog record is a dict of;\ntime, message, level, attachment\nattachment is a dict of:\nname: name of attachment\ndata: fileobj or content\nmime: content type for attachment", "source": "juraj-google-style"}
{"code": "def get_structure_seqs(self, model):\n    dont_overwrite = []\n    chains = list(model.get_chains())\n    for x in chains:\n        if self.chains.has_id(x.id):\n            if self.chains.get_by_id(x.id).seq_record:\n                dont_overwrite.append(x.id)\n    if (len(dont_overwrite) == len(chains)):\n        log.debug('Not writing structure sequences, already stored')\n        return\n    structure_seqs = ssbio.protein.structure.properties.residues.get_structure_seqrecords(model)\n    log.debug('{}: gathered chain sequences'.format(self.id))\n    for seq_record in structure_seqs:\n        log.debug('{}: adding chain sequence to ChainProp'.format(seq_record.id))\n        my_chain = self.chains.get_by_id(seq_record.id)\n        my_chain.seq_record = seq_record", "docstring": "Gather chain sequences and store in their corresponding ``ChainProp`` objects in the ``chains`` attribute.\n\nArgs:\nmodel (Model): Biopython Model object of the structure you would like to parse", "source": "codesearchnet"}
{"code": "def get_ast(module_path, python_version):\n    \n    with module_path.open(mode='rt', encoding='utf-8') as handle:\n        source = handle.read()\n\n    return parso.parse(source, version=python_version)", "docstring": "Get the AST for the code in a file.\n\nArgs:\nmodule_path: pathlib.Path to the file containing the code.\npython_version: Python version as a \"MAJ.MIN\" string.\n\nReturns: The parso parse tree for the code in `module_path`.", "source": "juraj-google-style"}
{"code": "def get_filename_by_suffixes(dir_src, suffixes):\n        \n        \n        list_files = os.listdir(dir_src)\n        re_files = list()\n        if is_string(suffixes):\n            suffixes = [suffixes]\n        if not isinstance(suffixes, list):\n            return None\n        for i, suf in enumerate(suffixes):\n            if len(suf) >= 1 and suf[0] != '.':\n                suffixes[i] = '.' + suf\n        for f in list_files:\n            name, ext = os.path.splitext(f)\n            if StringClass.string_in_list(ext, suffixes):\n                re_files.append(f)\n        return re_files", "docstring": "get file names with the given suffixes in the given directory\n\nArgs:\ndir_src: directory path\nsuffixes: wanted suffixes list, the suffix in suffixes can with or without '.'\n\nReturns:\nfile names with the given suffixes as list", "source": "juraj-google-style"}
{"code": "def update_refresh_state(self, id_or_uri, refresh_state_data):\n    uri = (self._client.build_uri(id_or_uri) + '/refreshState')\n    return self._client.update(refresh_state_data, uri=uri)", "docstring": "Refreshes a given intelligent power delivery device.\n\nArgs:\nid_or_uri:\nCan be either the power device id or the uri\nrefresh_state_data:\nPower device refresh request\n\nReturns:\nstr: The power state", "source": "codesearchnet"}
{"code": "def WriteSignedBinaryBlobs(binary_urn,\n                           blobs,\n                           token = None):\n  \n  if _ShouldUseLegacyDatastore():\n    aff4.FACTORY.Delete(binary_urn, token=token)\n    with data_store.DB.GetMutationPool() as mutation_pool:\n      with aff4.FACTORY.Create(\n          binary_urn,\n          collects.GRRSignedBlob,\n          mode=\"w\",\n          mutation_pool=mutation_pool,\n          token=token) as fd:\n        for blob in blobs:\n          fd.Add(blob, mutation_pool=mutation_pool)\n\n  if data_store.RelationalDBEnabled():\n    blob_references = rdf_objects.BlobReferences()\n    current_offset = 0\n    for blob in blobs:\n      blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(\n          blob.SerializeToString())\n      blob_references.items.Append(\n          rdf_objects.BlobReference(\n              offset=current_offset, size=len(blob.data), blob_id=blob_id))\n      current_offset += len(blob.data)\n    data_store.REL_DB.WriteSignedBinaryReferences(\n        _SignedBinaryIDFromURN(binary_urn), blob_references)", "docstring": "Saves signed blobs to the datastore.\n\nIf a signed binary with the given URN already exists, its contents will get\noverwritten.\n\nArgs:\nbinary_urn: RDFURN that should serve as a unique identifier for the binary.\nblobs: An Iterable of signed blobs to write to the datastore.\ntoken: ACL token to use with the legacy (non-relational) datastore.", "source": "juraj-google-style"}
{"code": "def GetModifyTimestamp(self):\n    return self._last_modification_timestamp", "docstring": "Return last modification timestamp of this map.\n\nReturns:\nEither an int containing seconds since epoch, or None.", "source": "github-repos"}
{"code": "def find_stages(document):\n    \n    names = []\n    if 'pipeline' in document:\n        for entry in document['pipeline']:\n            \n            key, _ = list(entry.items())[0]\n            if key.startswith(\"stage(\"):\n                names.append(key.replace('stage(', '').replace(')', ''))\n    return names", "docstring": "Find  **stages** in document.\n\nArgs:\ndocument (dict): validated spline document loaded from a yaml file.\n\nReturns:\nlist: stages as a part of the spline document or an empty list if not given.\n\n>>> find_stages({'pipeline': [{'stage(Prepare)':1}, {'stage(Build)':1}, {'stage(Deploy)':2}]})\n['Prepare', 'Build', 'Deploy']", "source": "juraj-google-style"}
{"code": "def _attempt_slice_retry(self, shard_state, tstate):\n    if ((shard_state.slice_retries + 1) < parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS):\n        logging.warning('Slice %s %s failed for the %s of up to %s attempts (%s of %s taskqueue execution attempts). Will retry now.', tstate.shard_id, tstate.slice_id, (shard_state.slice_retries + 1), parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS, (self.task_retry_count() + 1), parameters.config.TASK_MAX_ATTEMPTS)\n        sys.exc_clear()\n        self._try_free_lease(shard_state, slice_retry=True)\n        return self._TASK_DIRECTIVE.RETRY_SLICE\n    if (parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS > 0):\n        logging.warning('Slice attempt %s exceeded %s max attempts.', (self.task_retry_count() + 1), parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS)\n    return self._TASK_DIRECTIVE.RETRY_SHARD", "docstring": "Attempt to retry this slice.\n\nThis method may modify shard_state and tstate to prepare for retry or fail.\n\nArgs:\nshard_state: model.ShardState for current shard.\ntstate: model.TransientShardState for current shard.\n\nReturns:\nA _TASK_DIRECTIVE enum. RETRY_SLICE if slice should be retried.\nRETRY_SHARD if shard retry should be attempted.", "source": "codesearchnet"}
{"code": "async def article(self, title, description=None, *, url=None, thumb=None, content=None, id=None, text=None, parse_mode=(), link_preview=True, geo=None, period=60, contact=None, game=False, buttons=None):\n    result = types.InputBotInlineResult(id=(id or ''), type='article', send_message=(await self._message(text=text, parse_mode=parse_mode, link_preview=link_preview, geo=geo, period=period, contact=contact, game=game, buttons=buttons)), title=title, description=description, url=url, thumb=thumb, content=content)\n    if (id is None):\n        result.id = hashlib.sha256(bytes(result)).hexdigest()\n    return result", "docstring": "Creates new inline result of article type.\n\nArgs:\ntitle (`str`):\nThe title to be shown for this result.\n\ndescription (`str`, optional):\nFurther explanation of what this result means.\n\nurl (`str`, optional):\nThe URL to be shown for this result.\n\nthumb (:tl:`InputWebDocument`, optional):\nThe thumbnail to be shown for this result.\nFor now it has to be a :tl:`InputWebDocument` if present.\n\ncontent (:tl:`InputWebDocument`, optional):\nThe content to be shown for this result.\nFor now it has to be a :tl:`InputWebDocument` if present.", "source": "codesearchnet"}
{"code": "def CleanseRawStrings(raw_lines):\n    delimiter = None\n    lines_without_raw_strings = []\n    for line in raw_lines:\n        if delimiter:\n            end = line.find(delimiter)\n            if (end >= 0):\n                leading_space = Match('^(\\\\s*)\\\\S', line)\n                line = ((leading_space.group(1) + '\"\"') + line[(end + len(delimiter)):])\n                delimiter = None\n            else:\n                line = '\"\"'\n        while (delimiter is None):\n            matched = Match('^(.*?)\\\\b(?:R|u8R|uR|UR|LR)\"([^\\\\s\\\\\\\\()]*)\\\\((.*)$', line)\n            if (matched and (not Match('^([^\\\\\\'\"]|\\\\\\'(\\\\\\\\.|[^\\\\\\'])*\\\\\\'|\"(\\\\\\\\.|[^\"])*\")*\n                delimiter = ((')' + matched.group(2)) + '\"')\n                end = matched.group(3).find(delimiter)\n                if (end >= 0):\n                    line = ((matched.group(1) + '\"\"') + matched.group(3)[(end + len(delimiter)):])\n                    delimiter = None\n                else:\n                    line = (matched.group(1) + '\"\"')\n            else:\n                break\n        lines_without_raw_strings.append(line)\n    return lines_without_raw_strings", "docstring": "Removes C++11 raw strings from lines.\n\nBefore:\nstatic const char kData[] = R\"(\nmulti-line string\n)\";\n\nAfter:\nstatic const char kData[] = \"\"\n(replaced by blank line)\n\"\";\n\nArgs:\nraw_lines: list of raw lines.\n\nReturns:\nlist of lines with C++11 raw strings replaced by empty strings.", "source": "codesearchnet"}
{"code": "def _await_flow(self, client, flow_id):\n    \n    \n    print('{0:s}: Waiting to finish'.format(flow_id))\n    while True:\n      try:\n        status = client.Flow(flow_id).Get().data\n      except grr_errors.UnknownError:\n        msg = 'Unable to stat flow {0:s} for host {1:s}'.format(\n            flow_id, client.data.os_info.fqdn.lower())\n        self.state.add_error(msg)\n        raise DFTimewolfError(\n            'Unable to stat flow {0:s} for host {1:s}'.format(\n                flow_id, client.data.os_info.fqdn.lower()))\n\n      if status.state == flows_pb2.FlowContext.ERROR:\n        \n        message = status.context.backtrace\n        if 'ArtifactNotRegisteredError' in status.context.backtrace:\n          message = status.context.backtrace.split('\\n')[-2]\n        raise DFTimewolfError(\n            '{0:s}: FAILED! Message from GRR:\\n{1:s}'.format(\n                flow_id, message))\n\n      if status.state == flows_pb2.FlowContext.TERMINATED:\n        print('{0:s}: Complete'.format(flow_id))\n        break\n      time.sleep(self._CHECK_FLOW_INTERVAL_SEC)", "docstring": "Awaits flow completion.\n\nArgs:\nclient: GRR Client object in which to await the flow.\nflow_id: string containing ID of flow to await.\n\nRaises:\nDFTimewolfError: if flow error encountered.", "source": "juraj-google-style"}
{"code": "def graph_execution_trace_to_tensor_value(self, trace):\n    debug_event = self._reader.read_graph_execution_traces_event(trace.locator)\n    return _parse_tensor_value(debug_event.graph_execution_trace.tensor_proto)", "docstring": "Read full tensor values from an Execution or ExecutionDigest.\n\nArgs:\ntrace: An `GraphExecutionTraceDigest` or `GraphExecutionTrace` object.\n\nReturns:\nA numpy array representing the output tensor value of the intra-graph\ntensor execution event.", "source": "github-repos"}
{"code": "def compute_mu(L_aug, Y, k, p):\n    (n, d) = L_aug.shape\n    assert (Y.shape[0] == n)\n    mu = np.zeros((d, k))\n    for y in range(1, (k + 1)):\n        L_y = L_aug[(Y == y)]\n        mu[(:, (y - 1))] = (L_y.sum(axis=0) / L_y.shape[0])\n    return mu", "docstring": "Given label matrix L_aug and labels Y, compute the true mu params.\n\nArgs:\nL: (np.array {0,1}) [n, d] The augmented (indicator) label matrix\nY: (np.array int) [n] The true labels in {1,...,k}\nk: (int) Cardinality\np: (np.array float) [k] The class balance", "source": "codesearchnet"}
{"code": "def decode(self, ids, strip_extraneous=False):\n    del strip_extraneous\n    (_, tmp_file_path) = tempfile.mkstemp('_decode.png')\n    if ((self._height is None) or (self._width is None)):\n        size = int(math.sqrt((len(ids) / self._channels)))\n        length = ((size * size) * self._channels)\n    else:\n        size = None\n        length = ((self._height * self._width) * self._channels)\n    if (len(ids) != length):\n        raise ValueError(('Length of ids (%d) must be height (%d) x width (%d) x channels (%d); %d != %d.\\n Ids: %s' % (len(ids), self._height, self._width, self._channels, len(ids), length, ' '.join([str(i) for i in ids]))))\n    with tf.Graph().as_default():\n        raw = tf.constant(ids, dtype=tf.uint8)\n        if (size is None):\n            img = tf.reshape(raw, [self._height, self._width, self._channels])\n        else:\n            img = tf.reshape(raw, [size, size, self._channels])\n        png = tf.image.encode_png(img)\n        op = tf.write_file(tmp_file_path, png)\n        with tf.Session() as sess:\n            sess.run(op)\n    return tmp_file_path", "docstring": "Transform a sequence of int ids into an image file.\n\nArgs:\nids: list of integers to be converted.\nstrip_extraneous: unused\n\nReturns:\nPath to the temporary file where the image was saved.\n\nRaises:\nValueError: if the ids are not of the appropriate size.", "source": "codesearchnet"}
{"code": "def fetch_next_page(self):\n    for page in self:\n        return page\n    else:\n        return Page(self._resultset.cursor, iter(()))", "docstring": "Fetch the next Page of results.\n\nReturns:\nPage: The next page of results.", "source": "codesearchnet"}
{"code": "def strip_hidden(key_tuples, visibilities):\n    result = []\n    for key_tuple in key_tuples:\n        if (len(key_tuple) != len(visibilities)):\n            raise ValueError('length of key tuple {} is not equal to length of visibilities {}'.format(key_tuple, visibilities))\n        filtered_tuple = tuple((item for (item, visible) in zip(key_tuple, visibilities) if visible))\n        result.append(filtered_tuple)\n    return result", "docstring": "Filter each tuple according to visibility.\n\nArgs:\nkey_tuples: A sequence of tuples of equal length (i.e. rectangular)\nvisibilities: A sequence of booleans equal in length to the tuples contained in key_tuples.\n\nReturns:\nA sequence equal in length to key_tuples where the items are tuples with a length corresponding\nto the number of items in visibility which are True.", "source": "codesearchnet"}
{"code": "def GetUsername(self, event, default_username='-'):\n    \n    username = getattr(event, 'username', None)\n    if username and username != '-':\n      return username\n\n    session_identifier = event.GetSessionIdentifier()\n    if session_identifier is None:\n      return default_username\n\n    user_sid = getattr(event, 'user_sid', None)\n    username = self._knowledge_base.GetUsernameByIdentifier(\n        user_sid, session_identifier=session_identifier)\n    return username or default_username", "docstring": "Retrieves the username related to the event.\n\nArgs:\nevent (EventObject): event.\ndefault_username (Optional[str]): default username.\n\nReturns:\nstr: username.", "source": "juraj-google-style"}
{"code": "def next_location(self, raw=False):\n    if self._response:\n        location = self._response.fields.get('location')\n        if ((not location) or raw):\n            return location\n        return wpull.url.urljoin(self._response.request.url_info.url, location)", "docstring": "Returns the next location.\n\nArgs:\nraw (bool): If True, the original string contained in the Location\nfield will be returned. Otherwise, the URL will be\nnormalized to a complete URL.\n\nReturns:\nstr, None: If str, the location. Otherwise, no next location.", "source": "codesearchnet"}
{"code": "def GetGroupMap(self, since=None):\n    return GroupUpdateGetter().GetUpdates(self, self.conf['group_url'], since)", "docstring": "Return the group map from this source.\n\nArgs:\nsince: Get data only changed since this timestamp (inclusive) or None\nfor all data.\n\nReturns:\ninstance of group.GroupMap", "source": "github-repos"}
{"code": "def _parse_vrf(self, config):\n        \n        match = re.search(r'^router ospf \\d+ vrf (\\w+)', config)\n        if match:\n            return dict(vrf=match.group(1))\n        return dict(vrf='default')", "docstring": "Parses config file for the OSPF vrf name\n\nArgs:\nconfig(str):  Running configuration\nReturns:\ndict: key: ospf_vrf (str)", "source": "juraj-google-style"}
{"code": "def memory_usage(self, string=False):\n    if string:\n        n = getsizeof(self)\n        return ' '.join((str(s) for s in convert_bytes(n)))\n    return self.info()['size']", "docstring": "Get the memory usage estimate of the container.\n\nArgs:\nstring (bool): Human readable string (default false)\n\nSee Also:\n:func:`~exa.core.container.Container.info`", "source": "codesearchnet"}
{"code": "def get_cell_length(flow_model):\n        \n        assert flow_model.lower() in FlowModelConst.d8_lens\n        return FlowModelConst.d8_lens.get(flow_model.lower())", "docstring": "Get flow direction induced cell length dict.\nArgs:\nflow_model: Currently, \"TauDEM\", \"ArcGIS\", and \"Whitebox\" are supported.", "source": "juraj-google-style"}
{"code": "def format_delta(__timedelta: datetime.timedelta) -> str:\n    \n    if __timedelta == datetime.timedelta(0):\n        return ''\n    days_s = '{}D'.format(__timedelta.days) if __timedelta.days else ''\n    hours, minutes = divmod(__timedelta.seconds, 3600)\n    minutes, seconds = divmod(minutes, 60)\n    hours_s = '{:02d}H'.format(hours) if hours else ''\n    minutes_s = '{:02d}M'.format(minutes) if minutes else ''\n    seconds_s = '{:02d}S'.format(seconds) if seconds else ''\n    return 'P{}{}{}{}{}'.format(days_s,\n                                'T' if hours or minutes or seconds else '',\n                                hours_s, minutes_s, seconds_s)", "docstring": "Format ISO-8601 duration string.\n\nArgs:\n__timedelta: Duration to process\nReturns:\nISO-8601 representation of duration", "source": "juraj-google-style"}
{"code": "def load_json(raw_json: str) -> Dict[str, Any]:\n    return json.loads(raw_json, parse_float=decimal.Decimal, parse_int=decimal.Decimal)", "docstring": "Load JSON using Decimal objects for numerics.\n\nArgs:\nraw_json: The JSON string to parse.\n\nReturns:\nA dictionary representation of the deserialized JSON.", "source": "github-repos"}
{"code": "def is_special_unitary(\n        matrix: np.ndarray,\n        *,\n        rtol: float = 1e-5,\n        atol: float = 1e-8) -> bool:\n    \n    return (is_unitary(matrix, rtol=rtol, atol=atol) and\n            (matrix.shape[0] == 0 or\n             np.allclose(np.linalg.det(matrix), 1, rtol=rtol, atol=atol)))", "docstring": "Determines if a matrix is approximately unitary with unit determinant.\n\nA matrix is special-unitary if it is square and its adjoint is its inverse\nand its determinant is one.\n\nArgs:\nmatrix: The matrix to check.\nrtol: The per-matrix-entry relative tolerance on equality.\natol: The per-matrix-entry absolute tolerance on equality.\nReturns:\nWhether the matrix is unitary with unit determinant within the given\ntolerance.", "source": "juraj-google-style"}
{"code": "def DeregisterDefinition(self, data_type_definition):\n    \n    name = data_type_definition.name.lower()\n    if name not in self._definitions:\n      raise KeyError('Definition not set for name: {0:s}.'.format(\n          data_type_definition.name))\n\n    del self._definitions[name]", "docstring": "Deregisters a data type definition.\n\nThe data type definitions are identified based on their lower case name.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\n\nRaises:\nKeyError: if a data type definition is not set for the corresponding\nname.", "source": "juraj-google-style"}
{"code": "def enable_quantized_dtypes_training(fn: _F) -> _F:\n\n    def wrapper(*args, **kwargs):\n        if flags.config().enable_quantized_dtypes_training.value():\n            return fn(*args, **kwargs)\n        flags.config().enable_quantized_dtypes_training.reset(True)\n        try:\n            return fn(*args, **kwargs)\n        finally:\n            flags.config().enable_quantized_dtypes_training.reset(False)\n    return wrapper", "docstring": "Decorator for enabling quantized_dtypes_training on a test.\n\nThis function returns a decorator intended to be applied to test methods in\na `tf.test.TestCase` class. Doing so will set quantized_dtypes_training,\nreset the context, execute the test, then reset the context to the state\nit was in prior to this test.\n\nExample:\n\nclass MyTest(test.TestCase):\n\n@enable_quantized_dtypes_training\ndef testFoo(self):\n...\n\nArgs:\nfn: the function to be wrapped.\n\nReturns:\nThe wrapped function.", "source": "github-repos"}
{"code": "def Deserialize(self, reader):\n        \n        self.DeserializeUnsigned(reader)\n\n        self.scripts = reader.ReadSerializableArray()\n        self.OnDeserialized()", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def _send_request(self, xml_request):\n    if (self._scheme == 'http'):\n        return self._send_http_request(xml_request)\n    else:\n        return self._send_socket_request(xml_request)", "docstring": "Send the prepared XML request block to the CPS using the corect protocol.\n\nArgs:\nxml_request -- A fully formed xml request string for the CPS.\n\nReturns:\nThe raw xml response string.\n\nRaises:\nConnectionError -- Can't establish a connection with the server.", "source": "codesearchnet"}
{"code": "def parse_cron_line(self, line):\n        \n        stripped = line.strip()\n\n        if stripped and stripped.startswith('\n            rexres = self.rex.search(stripped)\n            if rexres:\n                return ' '.join(rexres.group(1).split())\n\n        return None", "docstring": "Parses crontab line and returns only starting time string\n\nArgs:\nline: crontab line\nReturns:\nTime part of cron line", "source": "juraj-google-style"}
{"code": "def getSet(self, name):\n        \n        return lock_and_call(\n            lambda: Set(self._impl.getSet(name)),\n            self._lock\n        )", "docstring": "Get the set with the corresponding name.\n\nArgs:\nname: Name of the set to be found.\n\nRaises:\nTypeError: if the specified set does not exist.", "source": "juraj-google-style"}
{"code": "def get_properties(properties_file='raw.properties.json', env=None, region=None):\n    with open(properties_file, 'rt') as file_handle:\n        properties = json.load(file_handle)\n    env_properties = properties.get(env, properties)\n    contents = env_properties.get(region, env_properties)\n    LOG.debug('Found properties for %s:\\n%s', env, contents)\n    return contents", "docstring": "Get contents of _properties_file_ for the _env_.\n\nArgs:\nproperties_file (str): File name of `create-configs` JSON output.\nenv (str): Environment to read optionally.\nregion (str): Region to get specific configs for.\n\nReturns:\ndict: JSON loaded Application properties for _env_.\nNone: Given _env_ was not found in `create-configs` JSON output.", "source": "codesearchnet"}
{"code": "def _strict_object_meta_fset(_, private_attr, type_):\n    \n    \n\n    def _fset(self, value):  \n        \n        \n        rtype = type_\n        if isinstance(type_, TypeVar):\n            type_map = dict(\n                zip(self.__parameters__, self.__orig_class__.__args__)\n            )\n            rtype = type_map[type_]\n        if not is_instance(value, rtype):\n            raise TypeError(\n                \"Cannot assign type of {} to attribute of type {}.\".format(\n                    _get_type_name(type(value)), _get_type_name(rtype)\n                )\n            )\n        vars(self)[private_attr] = value\n\n    return _fset", "docstring": "Create a property setter method for the attribute.\n\nArgs:\n_: The name of the attribute to set. Unused.\nprivate_attr: The name of the attribute that will store any data\nrelated to the attribute.\ntype_: The annotated type defining what values can be stored in the\nattribute.\n\nReturns:\nA method that takes self and a value and stores that value on self\nin the private attribute iff the value is an instance of type_.", "source": "juraj-google-style"}
{"code": "def _rewrite_grad_indexed_slices_output(old_output_slices, new_input_slices):\n\n    def rewrite(old_output, new_input):\n        assert old_output.type == 'Identity'\n        concat_op = old_output.inputs[0].op\n        assert concat_op.type == 'ConcatV2'\n        old_concat_args = concat_op.inputs[:-1]\n        return array_ops.concat([new_input] + old_concat_args[1:], 0)\n    values = rewrite(old_output_slices.values.op, new_input_slices.values)\n    indices = rewrite(old_output_slices.indices.op, new_input_slices.indices)\n    return indexed_slices.IndexedSlices(values=values, indices=indices, dense_shape=new_input_slices.dense_shape)", "docstring": "Creates a new version of old_output_slices with new_input_slices as input.\n\nThis method assumes that old_output_slices.{values,indices} are produced by\nconcatenating the incoming gradient Tensor input with the IndexedSlices\nproduced by the gradient computation of the while body. See\nbackprop.aggregate_indexed_slices_gradients for where these concats are\nconstructed. We build new concats that use new_input_slices instead of the\noriginal Tensor input.\n\nArgs:\nold_output_slices: original IndexedSlices output of while gradient.\nnew_input_slices: new IndexedSlices to use as input to while gradient.\n\nReturns:\nA new IndexedSlices to replace old_output_slices.", "source": "github-repos"}
{"code": "def split_result_of_axis_func_pandas(axis, num_splits, result, length_list=None):\n    \n    if num_splits == 1:\n        return result\n    if length_list is not None:\n        length_list.insert(0, 0)\n        sums = np.cumsum(length_list)\n        if axis == 0:\n            return [result.iloc[sums[i] : sums[i + 1]] for i in range(len(sums) - 1)]\n        else:\n            return [result.iloc[:, sums[i] : sums[i + 1]] for i in range(len(sums) - 1)]\n    \n    chunksize = compute_chunksize(result, num_splits, axis=axis)\n    if axis == 0:\n        return [\n            result.iloc[chunksize * i : chunksize * (i + 1)] for i in range(num_splits)\n        ]\n    else:\n        return [\n            result.iloc[:, chunksize * i : chunksize * (i + 1)]\n            for i in range(num_splits)\n        ]", "docstring": "Split the Pandas result evenly based on the provided number of splits.\n\nArgs:\naxis: The axis to split across.\nnum_splits: The number of even splits to create.\nresult: The result of the computation. This should be a Pandas\nDataFrame.\nlength_list: The list of lengths to split this DataFrame into. This is used to\nreturn the DataFrame to its original partitioning schema.\n\nReturns:\nA list of Pandas DataFrames.", "source": "juraj-google-style"}
{"code": "def parse_config(args=sys.argv):\n    parser = argparse.ArgumentParser(description='Read in the config file')\n    parser.add_argument('config_file', help='Configuration file.', metavar='FILE', type=extant_file)\n    return parser.parse_args(args[1:])", "docstring": "Parse the args using the config_file pattern\n\nArgs:\nargs: sys.argv\n\nReturns:\nThe populated namespace object from parser.parse_args().\n\nRaises:\nTBD", "source": "codesearchnet"}
{"code": "def copy_submission_locally(self, cloud_path):\n    \n    local_path = os.path.join(self.download_dir, os.path.basename(cloud_path))\n    cmd = ['gsutil', 'cp', cloud_path, local_path]\n    if subprocess.call(cmd) != 0:\n      logging.error('Can\\'t copy submission locally')\n      return None\n    return local_path", "docstring": "Copies submission from Google Cloud Storage to local directory.\n\nArgs:\ncloud_path: path of the submission in Google Cloud Storage\n\nReturns:\nname of the local file where submission is copied to", "source": "juraj-google-style"}
{"code": "def _dispatch_function(self, event, listener, *args, **kwargs):\n    try:\n        return listener(*args, **kwargs)\n    except Exception as exc:\n        if (event == self.LISTENER_ERROR_EVENT):\n            raise\n        return self.emit(self.LISTENER_ERROR_EVENT, event, listener, exc)", "docstring": "Execute a sync function.\n\nArgs:\nevent (str): The name of the event that triggered this call.\nlistener (def): The def that needs to be executed.\n*args: Any number of positional arguments.\n**kwargs: Any number of keyword arguments.\n\nThe values of *args and **kwargs are passed, unaltered, to the def\nwhen exceuting. If there is an exception executing the def, such as the\nwrong number of arguments, the emitter's error event is triggered. If\nthe triggering event _is_ the emitter's error event then the exception\nis reraised. The reraised exception may show in debug mode for the\nevent loop but is otherwise silently dropped.", "source": "codesearchnet"}
{"code": "def iterator_arange(variables: VarType, parent: str) -> Iterable[VarMatrix]:\n    \n    assert parent is not None\n    if isinstance(variables, (int, float)):\n        yield [{parent: i} for i in np.arange(variables)]\n\n    elif isinstance(variables, dict):\n        if variables.get(\"stop\"):\n            yield [{parent: i} for i in arange(**variables)]\n        else:\n            raise ValueError(f\"Stop is a required keyword for the arange iterator.\")\n\n    else:\n        raise ValueError(\n            f\"The arange keyword only takes a dict as arguments, got {variables} of type {type(variables)}\"\n        )", "docstring": "Create a list of values using the :func:`numpy.arange` function.\n\nArgs:\nvariables: The input variables for the creation of the range\nparent: The variable for which the values are being generated.\n\nReturns: A list of dictionaries mapping the parent to each value.", "source": "juraj-google-style"}
{"code": "def __init__(self, role, train_instance_count, train_instance_type, data_location=None, **kwargs):\n        \n        super(AmazonAlgorithmEstimatorBase, self).__init__(role, train_instance_count, train_instance_type,\n                                                           **kwargs)\n\n        data_location = data_location or \"s3:\n            self.sagemaker_session.default_bucket())\n        self.data_location = data_location", "docstring": "Initialize an AmazonAlgorithmEstimatorBase.\n\nArgs:\ndata_location (str or None): The s3 prefix to upload RecordSet objects to, expressed as an\nS3 url. For example \"s3://example-bucket/some-key-prefix/\". Objects will be\nsaved in a unique sub-directory of the specified location. If None, a default\ndata location will be used.", "source": "juraj-google-style"}
{"code": "def load_notebook(resources=None, verbose=False, hide_banner=False, load_timeout=5000):\n    global _NOTEBOOK_LOADED\n    from .. import __version__\n    from ..core.templates import NOTEBOOK_LOAD\n    from ..util.serialization import make_id\n    from ..resources import CDN\n    from ..util.compiler import bundle_all_models\n    if (resources is None):\n        resources = CDN\n    if (not hide_banner):\n        if (resources.mode == 'inline'):\n            js_info = 'inline'\n            css_info = 'inline'\n        else:\n            js_info = (resources.js_files[0] if (len(resources.js_files) == 1) else resources.js_files)\n            css_info = (resources.css_files[0] if (len(resources.css_files) == 1) else resources.css_files)\n        warnings = [('Warning: ' + msg['text']) for msg in resources.messages if (msg['type'] == 'warn')]\n        if (_NOTEBOOK_LOADED and verbose):\n            warnings.append('Warning: BokehJS previously loaded')\n        element_id = make_id()\n        html = NOTEBOOK_LOAD.render(element_id=element_id, verbose=verbose, js_info=js_info, css_info=css_info, bokeh_version=__version__, warnings=warnings)\n    else:\n        element_id = None\n    _NOTEBOOK_LOADED = resources\n    custom_models_js = (bundle_all_models() or '')\n    nb_js = _loading_js(resources, element_id, custom_models_js, load_timeout, register_mime=True)\n    jl_js = _loading_js(resources, element_id, custom_models_js, load_timeout, register_mime=False)\n    if (not hide_banner):\n        publish_display_data({'text/html': html})\n    publish_display_data({JS_MIME_TYPE: nb_js, LOAD_MIME_TYPE: jl_js})", "docstring": "Prepare the IPython notebook for displaying Bokeh plots.\n\nArgs:\nresources (Resource, optional) :\nhow and where to load BokehJS from (default: CDN)\n\nverbose (bool, optional) :\nwhether to report detailed settings (default: False)\n\nhide_banner (bool, optional):\nwhether to hide the Bokeh banner (default: False)\n\nload_timeout (int, optional) :\nTimeout in milliseconds when plots assume load timed out (default: 5000)\n\n.. warning::\nClearing the output cell containing the published BokehJS\nresources HTML code may cause Bokeh CSS styling to be removed.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def add_node(self, transformer, name=None, children=None, parent=None, parameters={}, return_node=False):\n    node = Node(transformer, name, **parameters)\n    self.nodes[node.id] = node\n    if (parent is None):\n        self.roots.append(node)\n    else:\n        parent = self.nodes[parent.id]\n        parent.add_child(node)\n    if (children is not None):\n        self.add_nodes(children, parent=node)\n    if return_node:\n        return node", "docstring": "Adds a node to the current graph.\n\nArgs:\ntransformer (str, Transformer): The pliers Transformer to use at\nthe to-be-added node. Either a case-insensitive string giving\nthe name of a Transformer class, or an initialized Transformer\ninstance.\nname (str): Optional name to give this Node.\nchildren (list): Optional list of child nodes (i.e., nodes to pass\nthe to-be-added node's Transformer output to).\nparent (Node): Optional node from which the to-be-added Node\nreceives its input.\nparameters (dict): Optional keyword arguments to pass onto the\nTransformer initialized at this Node if a string is passed to\nthe 'transformer' argument. Ignored if an already-initialized\nTransformer is passed.\nreturn_node (bool): If True, returns the initialized Node instance.\n\nReturns:\nThe initialized Node instance if return_node is True,\nNone otherwise.", "source": "codesearchnet"}
{"code": "def sync_trial_info(self, job_path, expr_dir_name):\n        \n        expr_name = expr_dir_name[-8:]\n        expr_path = os.path.join(job_path, expr_dir_name)\n\n        if expr_name not in self._monitored_trials:\n            self._create_trial_info(expr_path)\n            self._monitored_trials.add(expr_name)\n        else:\n            self._update_trial_info(expr_path)", "docstring": "Load information of the trial from the given experiment directory.\n\nCreate or update the trial information, together with the trial\nmeta file.\n\nArgs:\njob_path(str)\nexpr_dir_name(str)", "source": "juraj-google-style"}
{"code": "def _escape_token(token, alphabet):\n  r\n  token = token.replace(u\"\\\\\", u\"\\\\\\\\\").replace(u\"_\", u\"\\\\u\")\n  ret = [c if c in alphabet and c != u\"\\n\" else r\"\\%d;\" % ord(c) for c in token]\n  return u\"\".join(ret) + \"_\"", "docstring": "r\"\"\"Replace characters that aren't in the alphabet and append \"_\" to token.\n\nApply three transformations to the token:\n1. Replace underline character \"_\" with \"\\u\", and backslash \"\\\" with \"\\\\\".\n2. Replace characters outside of the alphabet with \"\\###;\", where ### is the\ncharacter's Unicode code point.\n3. Appends \"_\" to mark the end of a token.\n\nArgs:\ntoken: unicode string to be escaped\nalphabet: list of all known characters\n\nReturns:\nescaped string", "source": "juraj-google-style"}
{"code": "def get(self, name):\n    if name.startswith('\n        return self.tags.get(name[1:])\n    return self.props.get(name)", "docstring": "Return a secondary property value from the Node.\n\nArgs:\nname (str): The name of a secondary property.\n\nReturns:\n(obj): The secondary property value or None.", "source": "codesearchnet"}
{"code": "def swd_read8(self, offset):\n    value = self._dll.JLINK_SWD_GetU8(offset)\n    return ctypes.c_uint8(value).value", "docstring": "Gets a unit of ``8`` bits from the input buffer.\n\nArgs:\nself (JLink): the ``JLink`` instance\noffset (int): the offset (in bits) from which to start reading\n\nReturns:\nThe integer read from the input buffer.", "source": "codesearchnet"}
{"code": "def list(self, path, timeout=None):\n    \n    transport = DentFilesyncTransport(self.stream)\n    transport.write_data('LIST', path, timeout)\n    return (DeviceFileStat(dent_msg.name, dent_msg.mode,\n                           dent_msg.size, dent_msg.time) for dent_msg in\n            transport.read_until_done('DENT', timeout))", "docstring": "List directory contents on the device.\n\nArgs:\npath: List the contents of this directory.\ntimeout: Timeout to use for this operation.\n\nReturns:\nGenerator yielding DeviceFileStat tuples representing the contents of\nthe requested path.", "source": "juraj-google-style"}
{"code": "def calc_inv_vol_weights(returns):\n    vol = np.divide(1.0, np.std(returns, ddof=1))\n    vol[np.isinf(vol)] = np.NaN\n    volsum = vol.sum()\n    return np.divide(vol, volsum)", "docstring": "Calculates weights proportional to inverse volatility of each column.\n\nReturns weights that are inversely proportional to the column's\nvolatility resulting in a set of portfolio weights where each position\nhas the same level of volatility.\n\nNote, that assets with returns all equal to NaN or 0 are excluded from\nthe portfolio (their weight is set to NaN).\n\nReturns:\nSeries {col_name: weight}", "source": "codesearchnet"}
{"code": "def take_samples(self, sample_hz, sample_num, sample_offset=0, live=False):\n    sys.stdout.flush()\n    voltage = self.mon.GetVoltage()\n    self.log.info('Taking samples at %dhz for %ds, voltage %.2fv.', sample_hz, (sample_num / sample_hz), voltage)\n    sample_num += sample_offset\n    self.mon.StopDataCollection()\n    status = self.mon.GetStatus()\n    native_hz = (status['sampleRate'] * 1000)\n    self.mon.StartDataCollection()\n    emitted = offset = 0\n    collected = []\n    history_deque = collections.deque()\n    current_values = []\n    timestamps = []\n    try:\n        last_flush = time.time()\n        while ((emitted < sample_num) or (sample_num == (- 1))):\n            need = int(((((native_hz - offset) + sample_hz) - 1) / sample_hz))\n            if (need > len(collected)):\n                samples = self.mon.CollectData()\n                if (not samples):\n                    break\n                collected.extend(samples)\n            else:\n                offset += (need * sample_hz)\n                while (offset >= native_hz):\n                    this_sample = (sum(collected[:need]) / need)\n                    this_time = int(time.time())\n                    timestamps.append(this_time)\n                    if live:\n                        self.log.info('%s %s', this_time, this_sample)\n                    current_values.append(this_sample)\n                    sys.stdout.flush()\n                    offset -= native_hz\n                    emitted += 1\n                collected = collected[need:]\n                now = time.time()\n                if ((now - last_flush) >= 0.99):\n                    sys.stdout.flush()\n                    last_flush = now\n    except Exception as e:\n        pass\n    self.mon.StopDataCollection()\n    try:\n        return MonsoonData(current_values, timestamps, sample_hz, voltage, offset=sample_offset)\n    except:\n        return None", "docstring": "Take samples of the current value supplied by monsoon.\n\nThis is the actual measurement for power consumption. This function\nblocks until the number of samples requested has been fulfilled.\n\nArgs:\nhz: Number of points to take for every second.\nsample_num: Number of samples to take.\noffset: The number of initial data points to discard in MonsoonData\ncalculations. sample_num is extended by offset to compensate.\nlive: Print each sample in console as measurement goes on.\n\nReturns:\nA MonsoonData object representing the data obtained in this\nsampling. None if sampling is unsuccessful.", "source": "codesearchnet"}
{"code": "def groups_setPurpose(self, *, channel: str, purpose: str, **kwargs) -> SlackResponse:\n        \n        kwargs.update({\"channel\": channel, \"purpose\": purpose})\n        return self.api_call(\"groups.setPurpose\", json=kwargs)", "docstring": "Sets the purpose for a private channel.\n\nArgs:\nchannel (str): The channel id. e.g. 'G1234567890'\npurpose (str): The new purpose for the channel. e.g. 'My Purpose'", "source": "juraj-google-style"}
{"code": "def batch_workflow_cancel(self, batch_workflow_id):\n        \n        self.logger.debug('Cancel batch workflow: ' + batch_workflow_id)\n        url = '%(base_url)s/batch_workflows/%(batch_id)s/cancel' % {\n            'base_url': self.base_url, 'batch_id': batch_workflow_id\n        }\n        r = self.gbdx_connection.post(url)\n\n        return r.json()", "docstring": "Cancels GBDX batch workflow.\n\nArgs:\nbatch workflow_id (str): Batch workflow id.\n\nReturns:\nBatch Workflow status (str).", "source": "juraj-google-style"}
{"code": "def _MaybePurgeOrphanedData(self, event):\n    \n    if not self.purge_orphaned_data:\n      return\n    \n    if self.file_version and self.file_version >= 2:\n      \n      \n      self._CheckForRestartAndMaybePurge(event)\n    else:\n      \n      \n      self._CheckForOutOfOrderStepAndMaybePurge(event)\n    \n    if event.HasField('summary'):\n      self.most_recent_step = event.step\n      self.most_recent_wall_time = event.wall_time", "docstring": "Maybe purge orphaned data due to a TensorFlow crash.\n\nWhen TensorFlow crashes at step T+O and restarts at step T, any events\nwritten after step T are now \"orphaned\" and will be at best misleading if\nthey are included in TensorBoard.\n\nThis logic attempts to determine if there is orphaned data, and purge it\nif it is found.\n\nArgs:\nevent: The event to use as a reference, to determine if a purge is needed.", "source": "juraj-google-style"}
{"code": "def make_fn(shared_variable_store, device_id):\n    variable_scope_access_index = {}\n    assert isinstance(device_id, int)\n\n    def create_new_variable(next_creator, **kwargs):\n        \n        canonical_name = _canonicalize_variable_name(kwargs.get('name'))\n        v = next_creator(**kwargs)\n        if canonical_name not in shared_variable_store:\n            shared_variable_store[canonical_name] = []\n        shared_variable_store[canonical_name].append(v)\n        return v\n\n    def reuse_variable(next_creator, **kwargs):\n        \n        del next_creator\n        name = kwargs.get('name')\n        canonical_name = _canonicalize_variable_name(name)\n        try:\n            variable_index = variable_scope_access_index.get(canonical_name, 0)\n            v = shared_variable_store[canonical_name][variable_index]\n            variable_scope_access_index[canonical_name] = variable_index + 1\n            return v\n        except (KeyError, IndexError):\n            raise RuntimeError('Tried to create variable {} with mismatching name on device {}'.format(name, device_id))\n    if device_id == 0:\n        return create_new_variable\n    else:\n        return reuse_variable", "docstring": "Construct the variable creator function for device `device_id`.\n\nConstructs custom variable creator functions for the given device.\nOn first device (device_id == 0), it creates the variable using the\n`next_creator`, and stores it in the provided `shared_variable_store`.\nOn all other devices (device_id > 0), it tries to re-use the variable\nalready created with the same name. If no such variable exists, it throws an\nerror.\nAdditionally, we de-uniquify variable names before checking for matches. This\nhelps re-use variables which are intended to be the same but have different\nnames due to variable uniquification happening upstream. Since this might\nmean we may have multiple variables with the same canonical name, we store\nthem in a list per canonical name and return them in the same order as well.\n\nArgs:\nshared_variable_store: A dictionary that we will use to store variables\ncreated on the first device, and re-used by creators for other devices.\ndevice_id: Integer index of the device whose creator should be\nconstructed.\n\nReturns:\nAn appropriate creator function based on device_id.", "source": "github-repos"}
{"code": "def trace(state: State, fn: TransitionOperator, num_steps: IntTensor, trace_fn: Callable[([State, TensorNest], TensorNest)]) -> Tuple[(State, TensorNest)]:\n\n    def fn_wrapper(args, _):\n        return tf.nest.map_structure(tf.convert_to_tensor, call_fn(fn, args[0]))\n\n    def trace_fn_wrapper(args):\n        return tf.nest.map_structure(tf.convert_to_tensor, call_fn(trace_fn, args))\n    state = call_fn(fn, state)\n    first_trace = trace_fn_wrapper(state)\n    (state, full_trace) = mcmc_util.trace_scan(fn_wrapper, state, tf.ones((num_steps - 1)), trace_fn=trace_fn_wrapper)\n    prepend = (lambda x, y: tf.concat([tf.convert_to_tensor(value=x)[tf.newaxis], y], 0))\n    return (state, tf.nest.map_structure(prepend, first_trace, full_trace))", "docstring": "`TransitionOperator` that runs `fn` repeatedly and traces its outputs.\n\nArgs:\nstate: A nest of `Tensor`s or None.\nfn: A `TransitionOperator`.\nnum_steps: Number of steps to run the function for. Must be greater than 1.\ntrace_fn: Callable that the unpacked outputs of `fn` and returns a nest of\n`Tensor`s. These will be stacked and returned.\n\nReturns:\nstate: The final state returned by `fn`.\ntraces: Stacked outputs of `trace_fn`.", "source": "codesearchnet"}
{"code": "def parse_variant_id(chrom, pos, ref, alt, variant_type):\n    \n    return generate_md5_key([chrom, pos, ref, alt, variant_type])", "docstring": "Parse the variant id for a variant\n\nvariant_id is used to identify variants within a certain type of\nanalysis. It is not human readable since it is a md5 key.\n\nArgs:\nchrom(str)\npos(str)\nref(str)\nalt(str)\nvariant_type(str): 'clinical' or 'research'\n\nReturns:\nvariant_id(str): The variant id converted to md5 string", "source": "juraj-google-style"}
{"code": "def detailed_log_handler(self, handler):\n        \n        if not self.opened():\n            handler = handler or util.noop\n            self._detailed_log_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler)\n            self._dll.JLINKARM_EnableLogCom(self._detailed_log_handler)", "docstring": "Setter for the detailed log handler function.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def process_alias_create_namespace(namespace):\n    \n    namespace = filter_alias_create_namespace(namespace)\n    _validate_alias_name(namespace.alias_name)\n    _validate_alias_command(namespace.alias_command)\n    _validate_alias_command_level(namespace.alias_name, namespace.alias_command)\n    _validate_pos_args_syntax(namespace.alias_name, namespace.alias_command)", "docstring": "Validate input arguments when the user invokes 'az alias create'.\n\nArgs:\nnamespace: argparse namespace object.", "source": "juraj-google-style"}
{"code": "def __init__(self, hashes):\n        \n        self.Root = MerkleTree.__Build([MerkleTreeNode(hash) for hash in hashes])\n        depth = 1\n        i = self.Root\n        while i.LeftChild is not None:\n            depth = depth + 1\n            i = i.LeftChild\n        self.Depth = depth", "docstring": "Crease an instance.\nArgs:\nhashes (list): each hash is of bytearray type.", "source": "juraj-google-style"}
{"code": "def generate_rpcs(self, address):\n    rpc_list = []\n    for offset in range(2, len(self.data), 16):\n        rpc = (address, rpcs.SET_CONFIG_VARIABLE, self.var_id, (offset - 2), self.data[offset:(offset + 16)])\n        rpc_list.append(rpc)\n    return rpc_list", "docstring": "Generate the RPCs needed to stream this config variable to a tile.\n\nArgs:\naddress (int): The address of the tile that we should stream to.\n\nReturns:\nlist of tuples: A list of argument tuples for each RPC.\n\nThese tuples can be passed to EmulatedDevice.rpc to actually make\nthe RPCs.", "source": "codesearchnet"}
{"code": "def get_managed_ports(self, id_or_uri, port_id_or_uri=''):\n        \n        if port_id_or_uri:\n            uri = self._client.build_uri(port_id_or_uri)\n            if \"/managedPorts\" not in uri:\n                uri = self._client.build_uri(id_or_uri) + \"/managedPorts\" + \"/\" + port_id_or_uri\n\n        else:\n            uri = self._client.build_uri(id_or_uri) + \"/managedPorts\"\n\n        return self._client.get_collection(uri)", "docstring": "Gets all ports or a specific managed target port for the specified storage system.\n\nArgs:\nid_or_uri: Can be either the storage system id or the storage system uri.\nport_id_or_uri: Can be either the port id or the port uri.\n\nReturns:\ndict: Managed ports.", "source": "juraj-google-style"}
{"code": "def containsParamSubset(self, params):\n        \n        for key in params.keys():\n            if key not in self.params:\n                return False\n\n            if params[key] != self.params[key]:\n                return False\n\n        return True", "docstring": "Test whether this element contains at least all `params`, or more.\n\nArgs:\nparams (dict/SpecialDict): Subset of parameters.\n\nReturns:\nbool: True if all `params` are contained in this element.", "source": "juraj-google-style"}
{"code": "def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n    known_args, pipeline_args = parse_known_args(argv)\n    pipeline_options = PipelineOptions(pipeline_args)\n    pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n    requirements_dir = os.path.dirname(os.path.realpath(__file__))\n    pipeline_options.view_as(SetupOptions).requirements_file = f'{requirements_dir}/sklearn_examples_requirements.txt'\n    model_loader = KeyedModelHandler(SklearnModelHandlerNumpy(model_file_type=ModelFileType.PICKLE, model_uri=known_args.model_path, large_model=known_args.large_model))\n    pipeline = test_pipeline\n    if not test_pipeline:\n        pipeline = beam.Pipeline(options=pipeline_options)\n    label_pixel_tuple = pipeline | 'ReadFromInput' >> beam.io.ReadFromText(known_args.input) | 'PreProcessInputs' >> beam.Map(process_input)\n    predictions = label_pixel_tuple | 'RunInference' >> RunInference(model_loader) | 'PostProcessOutputs' >> beam.ParDo(PostProcessor())\n    _ = predictions | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)\n    result = pipeline.run()\n    result.wait_until_finish()\n    return result", "docstring": "Args:\nargv: Command line arguments defined for this example.\nsave_main_session: Used for internal testing.\ntest_pipeline: Used for internal testing.", "source": "github-repos"}
{"code": "def aggregate_grads(all_grads, colocation=False, devices=None, average=True):\n    assert (not ((devices is not None) and colocation))\n    if (devices is not None):\n        assert isinstance(devices, list), devices\n    nr_tower = len(all_grads)\n    if (nr_tower == 1):\n        return all_grads[0]\n\n    def aggregate(grads):\n        if average:\n            return tf.multiply(tf.add_n(grads), (1.0 / nr_tower))\n        else:\n            return tf.add_n(grads)\n    ret = []\n    for (idx, grad_and_vars) in enumerate(zip(*all_grads)):\n        v = grad_and_vars[0][1]\n        grads = [g for (g, _) in grad_and_vars]\n        if colocation:\n            with tf.device(v.device):\n                grad = aggregate(grads)\n        elif (devices is None):\n            grad = aggregate(grads)\n        else:\n            dev = devices[(idx % len(devices))]\n            with tf.device(dev):\n                grad = aggregate(grads)\n        ret.append((grad, v))\n    return ret", "docstring": "Average the gradients.\n\nArgs:\nall_grads (K x N x 2): A list of K lists. Each of the list is a list of N (grad, var) tuples.\nThe variables have to be the same across the K lists.\ncolocation (bool): colocate gradient averaging on the device of the variable.\ndevices (list[str]): assign the averaging to these device in\nround-robin. Cannot be used together with ``colocation``.\naverage (bool): do average or sum\n\nReturns:\n(N x 2): A list of N (grad, var) tuples, where grad is averaged or summed over K.", "source": "codesearchnet"}
{"code": "def bloom_gelu_back(g: torch.Tensor, x: torch.Tensor) -> torch.Tensor:\n    x = x[0]\n    tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))\n    ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out)\n    return ff * g", "docstring": "gradient of tanh approximation of gelu gradient of actual gelu is: 0.5 * (1. + torch.erf(x * 0.70710678)) +\n0.3989423 * x * torch.exp(-0.5 * x * x)\n\nArgs:\ng (`torch.tensor`):\ngradient output tensor\nx (`torch.tensor`):\ninput tensor", "source": "github-repos"}
{"code": "def list(self, **kwargs):\n    resp = self.client.api.secrets(**kwargs)\n    return [self.prepare_model(obj) for obj in resp]", "docstring": "List secrets. Similar to the ``docker secret ls`` command.\n\nArgs:\nfilters (dict): Server-side list filtering options.\n\nReturns:\n(list of :py:class:`Secret`): The secrets.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def _add_qasm_reset(self, qubit):\n    (outcome, probability) = self._get_measure_outcome(qubit)\n    if (outcome == '0'):\n        update = [[(1 / np.sqrt(probability)), 0], [0, 0]]\n        self._add_unitary_single(update, qubit)\n    else:\n        update = [[0, (1 / np.sqrt(probability))], [0, 0]]\n        self._add_unitary_single(update, qubit)", "docstring": "Apply a reset instruction to a qubit.\n\nArgs:\nqubit (int): the qubit being rest\n\nThis is done by doing a simulating a measurement\noutcome and projecting onto the outcome state while\nrenormalizing.", "source": "codesearchnet"}
{"code": "def _convert_args(handler, args):\n    \n\n    args = list(args)\n    params = inspect.signature(handler).parameters\n    for i, (arg, name) in enumerate(zip(args, params)):\n        default = params[name].default\n        annotation = params[name].annotation\n\n        if annotation != inspect.Parameter.empty:\n            if isinstance(annotation, type) and annotation != str:\n                \n                \n                args[i] = annotation(arg)\n        elif default != inspect.Parameter.empty:\n            if default is not None and not isinstance(default, str):\n                \n                \n                args[i] = type(default)(arg)\n\n    return args", "docstring": "Convert a list of command arguments to types specified by the handler.\n\nArgs:\nhandler: a command handler function.\nargs: the list of string arguments to pass to handler.\n\nReturns:\nA new list containing `args` that have been converted to the expected type\nfor `handler`. For each function parameter of `handler` that has either an\nexplicit type annotation or a non-None default value, the corresponding\nelement in `args` is converted to that type.", "source": "juraj-google-style"}
{"code": "def update(self, friendly_name=None, description=None):\n    self._get_info()\n    if self._info:\n        if friendly_name:\n            self._info['friendlyName'] = friendly_name\n        if description:\n            self._info['description'] = description\n        try:\n            self._api.datasets_update(self._name_parts, self._info)\n        except Exception as e:\n            raise e\n        finally:\n            self._info = None", "docstring": "Selectively updates Dataset information.\n\nArgs:\nfriendly_name: if not None, the new friendly name.\ndescription: if not None, the new description.\n\nReturns:", "source": "codesearchnet"}
{"code": "def get_element_from_tensor_info(tensor_info, graph=None, import_scope=None):\n    graph = graph or ops.get_default_graph()\n    return graph.as_graph_element(ops.prepend_name_scope(tensor_info.name, import_scope=import_scope))", "docstring": "Returns the element in the graph described by a TensorInfo proto.\n\nArgs:\ntensor_info: A TensorInfo proto describing an Op or Tensor by name.\ngraph: The tf.Graph in which tensors are looked up. If None, the current\ndefault graph is used.\nimport_scope: If not None, names in `tensor_info` are prefixed with this\nstring before lookup.\n\nReturns:\nOp or tensor in `graph` described by `tensor_info`.\n\nRaises:\nKeyError: If `tensor_info` does not correspond to an op or tensor in `graph`", "source": "github-repos"}
{"code": "def __init__(self, fetches, feed_dict, run_options, run_metadata, run_call_count, is_callable_runner=False):\n    self.fetches = fetches\n    self.feed_dict = feed_dict\n    self.run_options = run_options\n    self.run_metadata = run_metadata\n    self.run_call_count = run_call_count\n    self.is_callable_runner = is_callable_runner", "docstring": "Constructor of `OnRunStartRequest`.\n\nArgs:\nfetches: Fetch targets of the run() call.\nfeed_dict: The feed dictionary to the run() call.\nrun_options: RunOptions input to the run() call.\nrun_metadata: RunMetadata input to the run() call.\nThe above four arguments are identical to the input arguments to the\nrun() method of a non-wrapped TensorFlow session.\nrun_call_count: 1-based count of how many run calls (including this one)\nhas been invoked.\nis_callable_runner: (bool) whether a runner returned by\nSession.make_callable is being run.", "source": "github-repos"}
{"code": "def create_table_from(self, name, src):\n        \n        \n        query = self.execute(\"SELECT sql FROM sqlite_master WHERE \"\n                             \"type='table' and name=?\", (src,))\n        try:\n            cmd = query.fetchone()[0]\n        except TypeError:\n            raise sql.OperationalError(\"Cannot copy non-existent table '{0}'\"\n                                       .format(src))\n\n        \n        \n        new_cmd = re.sub(\"(CREATE TABLE) \\w+\", \"\\\\1 \" + name, cmd,\n                         re.IGNORECASE)\n\n        \n        self.execute(new_cmd)", "docstring": "Create a new table with same schema as the source.\n\nIf the named table already exists, nothing happens.\n\nArguments:\n\nname (str): The name of the table to create.\nsrc (str): The name of the source table to duplicate.\n\nRaises:\n\nsql.OperationalError: If source table does not exist.", "source": "juraj-google-style"}
{"code": "def count(self, event_str, inc_int=1):\n    self._event_dict.setdefault(event_str, 0)\n    self._event_dict[event_str] += inc_int", "docstring": "Count an event.\n\nArgs:\nevent_str:\nThe name of an event to count. Used as a key in the event dict. The same\nname will also be used in the summary.\n\ninc_int: int\nOptional argument to increase the count for the event by more than 1.", "source": "codesearchnet"}
{"code": "def abs_url(self, url):\n    parsed_url = urllib.parse.urlparse(url)\n    if ((not parsed_url.scheme) and (not parsed_url.netloc)):\n        return urllib.parse.urljoin(str(self.base_url), str(url))\n    else:\n        return url", "docstring": "Given a relative or absolute URL; return an absolute URL.\n\nArgs:\nurl(basestring): A relative or absolute URL.\n\nReturns:\nstr: An absolute URL.", "source": "codesearchnet"}
{"code": "def un(byts):\n    \n    \n    return msgpack.loads(byts, use_list=False, raw=False, unicode_errors='surrogatepass')", "docstring": "Use msgpack to de-serialize a python object.\n\nArgs:\nbyts (bytes): The bytes to de-serialize\n\nNotes:\nString objects are decoded using utf8 encoding.  In order to handle\npotentially malformed input, ``unicode_errors='surrogatepass'`` is set\nto allow decoding bad input strings.\n\nReturns:\nobj: The de-serialized object", "source": "juraj-google-style"}
{"code": "def CreateBiddingStrategy(client):\n    bidding_strategy_service = client.GetService('BiddingStrategyService', version='v201809')\n    shared_bidding_strategy = {'name': ('Maximize Clicks %s' % uuid.uuid4()), 'biddingScheme': {'xsi_type': 'TargetSpendBiddingScheme', 'bidCeiling': {'microAmount': '2000000'}}}\n    operation = {'operator': 'ADD', 'operand': shared_bidding_strategy}\n    response = bidding_strategy_service.mutate([operation])\n    new_bidding_strategy = response['value'][0]\n    print(('Shared bidding strategy with name \"%s\" and ID \"%s\" of type \"%s\"was created.' % (new_bidding_strategy['name'], new_bidding_strategy['id'], new_bidding_strategy['biddingScheme']['BiddingScheme.Type'])))\n    return new_bidding_strategy", "docstring": "Creates a bidding strategy object.\n\nArgs:\nclient: AdWordsClient the client to run the example with.\n\nReturns:\ndict An object representing a bidding strategy.", "source": "codesearchnet"}
{"code": "def random_new_from_seed(seed: Hashable, algo: int=RNG_CMWC) -> tcod.random.Random:\n    return tcod.random.Random(algo, seed)", "docstring": "Return a new Random instance.  Using the given ``seed`` and ``algo``.\n\nArgs:\nseed (Hashable): The RNG seed.  Should be a 32-bit integer, but any\nhashable object is accepted.\nalgo (int): The random number algorithm to use.\n\nReturns:\nRandom: A new Random instance using the given algorithm.", "source": "codesearchnet"}
{"code": "def matches(self, desc):\n    desc_value_type = (desc.valueType or ValueType.STRING)\n    return ((self.label_name == desc.key) and (self.value_type == desc_value_type))", "docstring": "Determines if a given label descriptor matches this enum instance\n\nArgs:\ndesc (:class:`endpoints_management.gen.servicemanagement_v1_messages.LabelDescriptor`):\nthe instance to test\n\nReturn:\n`True` if desc is supported, otherwise `False`", "source": "codesearchnet"}
{"code": "def depth_january_average_ground_temperature(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `depth_january_average_ground_temperature`'.format(value))\n    self._depth_january_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_january_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_january_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def __init__(self, app=None, env=None, region=None, prop_path=None):\n        \n        self.app_name = app\n        self.env = env\n        self.region = region\n        self.prop_path = prop_path\n        self.properties = get_properties(properties_file=prop_path, env=env, region=region)", "docstring": "Lambda event object.\n\nArgs:\napp (str): Application name\nenv (str): Environment/Account\nregion (str): AWS Region\nprop_path (str): Path of environment property file", "source": "juraj-google-style"}
{"code": "def get_pipeline(self, name):\n    check.str_param(name, 'name')\n    if (name in self._pipeline_cache):\n        return self._pipeline_cache[name]\n    try:\n        pipeline = self.pipeline_dict[name]()\n    except KeyError:\n        raise DagsterInvariantViolationError('Could not find pipeline \"{name}\". Found: {pipeline_names}.'.format(name=name, pipeline_names=', '.join(['\"{pipeline_name}\"'.format(pipeline_name=pipeline_name) for pipeline_name in self.pipeline_dict.keys()])))\n    check.invariant((pipeline.name == name), 'Name does not match. Name in dict {name}. Name in pipeline {pipeline.name}'.format(name=name, pipeline=pipeline))\n    self._pipeline_cache[name] = check.inst(pipeline, PipelineDefinition, 'Function passed into pipeline_dict with key {key} must return a PipelineDefinition'.format(key=name))\n    return pipeline", "docstring": "Get a pipeline by name. Only constructs that pipeline and caches it.\n\nArgs:\nname (str): Name of the pipeline to retriever\n\nReturns:\nPipelineDefinition: Instance of PipelineDefinition with that name.", "source": "codesearchnet"}
{"code": "def parse_module_content(content: str) -> List[str]:\n    objects = []\n    current_object = []\n    lines = content.split('\\n')\n    end_markers = [')', ']', '}', '\"\"\"']\n    for line in lines:\n        is_valid_object = len(current_object) > 0\n        if is_valid_object and len(current_object) == 1:\n            is_valid_object = not current_object[0].startswith('\n        if not is_empty_line(line) and find_indent(line) == 0 and is_valid_object:\n            if line in end_markers:\n                current_object.append(line)\n                objects.append('\\n'.join(current_object))\n                current_object = []\n            else:\n                objects.append('\\n'.join(current_object))\n                current_object = [line]\n        else:\n            current_object.append(line)\n    if len(current_object) > 0:\n        objects.append('\\n'.join(current_object))\n    return objects", "docstring": "Parse the content of a module in the list of objects it defines.\n\nArgs:\ncontent (`str`): The content to parse\n\nReturns:\n`List[str]`: The list of objects defined in the module.", "source": "github-repos"}
{"code": "def lint(filename, lines, config):\n    (_, ext) = os.path.splitext(filename)\n    if (ext in config):\n        output = collections.defaultdict(list)\n        for linter in config[ext]:\n            linter_output = linter(filename, lines)\n            for (category, values) in linter_output[filename].items():\n                output[category].extend(values)\n        if ('comments' in output):\n            output['comments'] = sorted(output['comments'], key=(lambda x: (x.get('line', (- 1)), x.get('column', (- 1)))))\n        return {filename: dict(output)}\n    else:\n        return {filename: {'skipped': [('no linter is defined or enabled for files with extension \"%s\"' % ext)]}}", "docstring": "Lints a file.\n\nArgs:\nfilename: string: filename to lint.\nlines: list[int]|None: list of lines that we want to capture. If None,\nthen all lines will be captured.\nconfig: dict[string: linter]: mapping from extension to a linter\nfunction.\n\nReturns: dict: if there were errors running the command then the field\n'error' will have the reasons in a list. if the lint process was skipped,\nthen a field 'skipped' will be set with the reasons. Otherwise, the field\n'comments' will have the messages.", "source": "codesearchnet"}
{"code": "def alexa(self) -> list:\n    alexa_controls = [control.alexa() for control in self.controls]\n    return alexa_controls", "docstring": "Returns list of Amazon Alexa compatible states of the RichMessage\ninstance nested controls.\n\nReturns:\nalexa_controls: Amazon Alexa representation of RichMessage instance nested\ncontrols.", "source": "codesearchnet"}
{"code": "def apply_modifications(model, custom_objects=None):\n    \n    \n    \n    \n    \n    model_path = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + '.h5')\n    try:\n        model.save(model_path)\n        return load_model(model_path, custom_objects=custom_objects)\n    finally:\n        os.remove(model_path)", "docstring": "Applies modifications to the model layers to create a new Graph. For example, simply changing\n`model.layers[idx].activation = new activation` does not change the graph. The entire graph needs to be updated\nwith modified inbound and outbound tensors because of change in layer building function.\n\nArgs:\nmodel: The `keras.models.Model` instance.\n\nReturns:\nThe modified model with changes applied. Does not mutate the original `model`.", "source": "juraj-google-style"}
{"code": "def _encode_gif(images, fps):\n    writer = WholeVideoWriter(fps)\n    writer.write_multi(images)\n    return writer.finish()", "docstring": "Encodes numpy images into gif string.\n\nArgs:\nimages: A 4-D `uint8` `np.array` (or a list of 3-D images) of shape\n`[time, height, width, channels]` where `channels` is 1 or 3.\nfps: frames per second of the animation\n\nReturns:\nThe encoded gif string.\n\nRaises:\nIOError: If the ffmpeg command returns an error.", "source": "codesearchnet"}
{"code": "def kill(self, signal=None):\n    return self.client.api.kill(self.id, signal=signal)", "docstring": "Kill or send a signal to the container.\n\nArgs:\nsignal (str or int): The signal to send. Defaults to ``SIGKILL``\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def parse_auth(cls, entries, raise_on_error=False):\n        \n\n        conf = {}\n        for registry, entry in six.iteritems(entries):\n            if not isinstance(entry, dict):\n                log.debug(\n                    'Config entry for key {0} is not auth config'.format(\n                        registry\n                    )\n                )\n                \n                \n                \n                \n                if raise_on_error:\n                    raise errors.InvalidConfigFile(\n                        'Invalid configuration for registry {0}'.format(\n                            registry\n                        )\n                    )\n                return {}\n            if 'identitytoken' in entry:\n                log.debug(\n                    'Found an IdentityToken entry for registry {0}'.format(\n                        registry\n                    )\n                )\n                conf[registry] = {\n                    'IdentityToken': entry['identitytoken']\n                }\n                continue  \n\n            if 'auth' not in entry:\n                \n                \n                \n                log.debug(\n                    'Auth data for {0} is absent. Client might be using a '\n                    'credentials store instead.'.format(registry)\n                )\n                conf[registry] = {}\n                continue\n\n            username, password = decode_auth(entry['auth'])\n            log.debug(\n                'Found entry (registry={0}, username={1})'\n                .format(repr(registry), repr(username))\n            )\n\n            conf[registry] = {\n                'username': username,\n                'password': password,\n                'email': entry.get('email'),\n                'serveraddress': registry,\n            }\n        return conf", "docstring": "Parses authentication entries\n\nArgs:\nentries:        Dict of authentication entries.\nraise_on_error: If set to true, an invalid format will raise\nInvalidConfigFile\n\nReturns:\nAuthentication registry.", "source": "juraj-google-style"}
{"code": "def _sample_proposals(self, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor):\n    has_gt = gt_classes.numel() > 0\n    if has_gt:\n        gt_classes = gt_classes[matched_idxs]\n        gt_classes[matched_labels == 0] = self.bg_label\n        gt_classes[matched_labels == -1] = -1\n    else:\n        gt_classes = torch.zeros_like(matched_idxs) + self.bg_label\n    sampled_fg_idxs, sampled_bg_idxs = subsample_labels(gt_classes, self.batch_size_per_image, self.positive_fraction, self.bg_label)\n    sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)\n    return (sampled_idxs, gt_classes[sampled_idxs])", "docstring": "Based on the matching between N proposals and M groundtruth, sample the proposals and set their classification\nlabels.\n\nArgs:\nmatched_idxs (Tensor): a vector of length N, each is the best-matched\ngt index in [0, M) for each proposal.\nmatched_labels (Tensor): a vector of length N, the matcher's label\n(one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal.\ngt_classes (Tensor): a vector of length M.\n\nReturns:\nTensor: a vector of indices of sampled proposals. Each is in [0, N). Tensor: a vector of the same length,\nthe classification label for\neach sampled proposal. Each sample is labeled as either a category in [0, num_classes) or the\nbackground (num_classes).", "source": "github-repos"}
{"code": "def _GetCurrentControlSet(self, key_path_suffix):\n    select_key_path = 'HKEY_LOCAL_MACHINE\\\\System\\\\Select'\n    select_key = self.GetKeyByPath(select_key_path)\n    if (not select_key):\n        return None\n    control_set = None\n    for value_name in ('Current', 'Default', 'LastKnownGood'):\n        value = select_key.GetValueByName(value_name)\n        if ((not value) or (not value.DataIsInteger())):\n            continue\n        control_set = value.GetDataAsObject()\n        if ((control_set > 0) or (control_set <= 999)):\n            break\n    if ((not control_set) or (control_set <= 0) or (control_set > 999)):\n        return None\n    control_set_path = 'HKEY_LOCAL_MACHINE\\\\System\\\\ControlSet{0:03d}'.format(control_set)\n    key_path = ''.join([control_set_path, key_path_suffix])\n    return self.GetKeyByPath(key_path)", "docstring": "Virtual key callback to determine the current control set.\n\nArgs:\nkey_path_suffix (str): current control set Windows Registry key path\nsuffix with leading path separator.\n\nReturns:\nWinRegistryKey: the current control set Windows Registry key or None\nif not available.", "source": "codesearchnet"}
{"code": "def _wrap_response(self, status=None, **kwargs):\n    kwargs['status'] = (status if (status is not None) else self._status.OK)\n    return kwargs", "docstring": "Convenience method to wrap a status with any key word args.\n\nArgs:\nstatus (enum): enum response status, defaults to OK\n\nReturns:\ndict: inlcudes a 'status' attribute and any key word arguments", "source": "codesearchnet"}
{"code": "def get_oxi_state_decorated_structure(self, structure):\n        \n        s = structure.copy()\n        if s.is_ordered:\n            valences = self.get_valences(s)\n            s.add_oxidation_state_by_site(valences)\n        else:\n            valences = self.get_valences(s)\n            s = add_oxidation_state_by_site_fraction(s, valences)\n        return s", "docstring": "Get an oxidation state decorated structure. This currently works only\nfor ordered structures only.\n\nArgs:\nstructure: Structure to analyze\n\nReturns:\nA modified structure that is oxidation state decorated.\n\nRaises:\nValueError if the valences cannot be determined.", "source": "juraj-google-style"}
{"code": "def _update_example(self, request):\n    if (request.method != 'POST'):\n        return http_util.Respond(request, {'error': 'invalid non-POST request'}, 'application/json', code=405)\n    example_json = request.form['example']\n    index = int(request.form['index'])\n    if (index >= len(self.examples)):\n        return http_util.Respond(request, {'error': 'invalid index provided'}, 'application/json', code=400)\n    new_example = self.example_class()\n    json_format.Parse(example_json, new_example)\n    self.examples[index] = new_example\n    self.updated_example_indices.add(index)\n    self.generate_sprite([ex.SerializeToString() for ex in self.examples])\n    return http_util.Respond(request, {}, 'application/json')", "docstring": "Updates the specified example.\n\nArgs:\nrequest: A request that should contain 'index' and 'example'.\n\nReturns:\nAn empty response.", "source": "codesearchnet"}
{"code": "def register_to_random_name(grad_f):\n    grad_f_name = ((grad_f.__name__ + '_') + str(uuid.uuid4()))\n    tf.RegisterGradient(grad_f_name)(grad_f)\n    return grad_f_name", "docstring": "Register a gradient function to a random string.\n\nIn order to use a custom gradient in TensorFlow, it must be registered to a\nstring. This is both a hassle, and -- because only one function can every be\nregistered to a string -- annoying to iterate on in an interactive\nenvironemnt.\n\nThis function registers a function to a unique random string of the form:\n\n{FUNCTION_NAME}_{RANDOM_SALT}\n\nAnd then returns the random string. This is a helper in creating more\nconvenient gradient overrides.\n\nArgs:\ngrad_f: gradient function to register. Should map (op, grad) -> grad(s)\n\nReturns:\nString that gradient function was registered to.", "source": "codesearchnet"}
{"code": "def _viscounts2radiance(counts, slope, offset):\n        \n        rad = counts * slope + offset\n        return rad.clip(min=0)", "docstring": "Convert VIS counts to radiance\n\nReferences: [VIS]\n\nArgs:\ncounts: Raw detector counts\nslope: Slope [W m-2 um-1 sr-1]\noffset: Offset [W m-2 um-1 sr-1]\nReturns:\nRadiance [W m-2 um-1 sr-1]", "source": "juraj-google-style"}
{"code": "def _ReadFileHeader(self, file_object):\n    \n    data_type_map = self._GetDataTypeMap('keychain_file_header')\n\n    file_header, _ = self._ReadStructureFromFileObject(\n        file_object, 0, data_type_map)\n\n    if file_header.signature != self._FILE_SIGNATURE:\n      raise errors.ParseError('Unsupported file signature.')\n\n    if (file_header.major_format_version != self._MAJOR_VERSION or\n        file_header.minor_format_version != self._MINOR_VERSION):\n      raise errors.ParseError('Unsupported format version: {0:s}.{1:s}'.format(\n          file_header.major_format_version, file_header.minor_format_version))\n\n    return file_header", "docstring": "Reads the file header.\n\nArgs:\nfile_object (file): file-like object.\n\nReturns:\nkeychain_file_header: file header.\n\nRaises:\nParseError: if the file header cannot be read.", "source": "juraj-google-style"}
{"code": "def to_dict(self):\n    msg_dict = {}\n    msg_dict['level'] = self.level\n    msg_dict['message'] = self.message\n    msg_dict['now_time'] = monotonic()\n    msg_dict['created_time'] = self.created\n    msg_dict['id'] = self.id\n    msg_dict['count'] = self.count\n    return msg_dict", "docstring": "Create a dictionary with the information in this message.\n\nReturns:\ndict: The dictionary with information", "source": "codesearchnet"}
{"code": "def kill_task(self, task_type, task_id):\n    assert self._mpr\n    if not self._start_events[task_type][task_id].is_set() or self._finish_events[task_type][task_id].is_set():\n        raise ValueError(\"The task %s:%d doesn't exist.\" % (task_type, task_id))\n    self._finish_events[task_type][task_id].set()\n    self._mpr._processes[task_type, task_id].join()", "docstring": "Kill a server given task_type and task_id.\n\nArgs:\ntask_type: the type of the task such as \"worker\".\ntask_id: the id the task such as 1.", "source": "github-repos"}
{"code": "def table(self, ref):\n    try:\n        obj_number = ObjectNumber.parse(ref)\n        ds_obj_number = obj_number.as_dataset\n        dataset = self._db.dataset(ds_obj_number)\n        table = dataset.table(ref)\n    except NotObjectNumberError:\n        q = self.database.session.query(Table).filter((Table.name == str(ref))).order_by(Table.vid.desc())\n        table = q.first()\n    if (not table):\n        raise NotFoundError(\"No table for ref: '{}'\".format(ref))\n    return table", "docstring": "Finds table by ref and returns it.\n\nArgs:\nref (str): id, vid (versioned id) or name of the table\n\nRaises:\nNotFoundError: if table with given ref not found.\n\nReturns:\norm.Table", "source": "codesearchnet"}
{"code": "def reset(self, indices=None):\n    \n    if self._store_rollouts and self.current_epoch is None:\n      raise ValueError(\n          \"No current epoch. start_new_epoch() should first be called.\"\n      )\n\n    if indices is None:\n      indices = np.arange(self.batch_size)\n    new_obs = self._reset(indices)\n    if self._should_preprocess_on_reset:\n      new_obs = self._preprocess_observations(new_obs)\n    if self._store_rollouts:\n      encoded_obs = self._encode_observations(new_obs)\n      for (index, ob) in zip(indices, encoded_obs):\n        frame = self._current_batch_frames[index]\n        if frame is not None:\n          rollout = self._current_batch_rollouts[index]\n          rollout.append(frame._replace(action=0))\n          self._current_epoch_rollouts.append(rollout)\n          self._current_batch_rollouts[index] = []\n        self._current_batch_frames[index] = Frame(\n            observation=ob, reward=0, unclipped_reward=0, done=False,\n            action=None\n        )\n    return new_obs", "docstring": "Resets environments at given indices.\n\nDoes any preprocessing and adds rollouts to history.\n\nArgs:\nindices: Indices of environments to reset.\n\nReturns:\nBatch of initial observations of reset environments.\n\nRaises:\nValueError: when there's no current epoch.", "source": "juraj-google-style"}
{"code": "def close(self):\n    if (not self._is_open):\n        return\n    else:\n        try:\n            self.proxy.close()\n            self._is_open = False\n        except Exception as e:\n            self.logger.error('could not close client connection: %s', e)\n            raise", "docstring": "Close the client connection.\n\nRaises:\nException: if an error occurs while trying to close the connection", "source": "codesearchnet"}
{"code": "def to_str(self, *, content_only: bool=False, **kwargs) -> str:\n    if content_only:\n        return self.content\n    return '\\n'.join([v for v in ['<html>', self.head_section, self.body_section, '</html>'] if v])", "docstring": "Returns the HTML str.\n\nArgs:\ncontent_only: If True, only the content will be returned.\n**kwargs: Additional keyword arguments passed from the user that\nwill be ignored.\n\nReturns:\nThe generated HTML str.", "source": "github-repos"}
{"code": "def create_wf_instances(self, roles=None):\n        \n\n        \n        \n\n        if roles:\n            wf_instances = [\n                WFInstance(\n                    wf=self.wf,\n                    current_actor=role,\n                    task=self,\n                    name=self.wf.name\n                ) for role in roles\n                ]\n        else:\n            wf_instances = [\n                WFInstance(\n                    wf=self.wf,\n                    task=self,\n                    name=self.wf.name\n                )\n            ]\n\n        \n        if self.task_type in [\"C\", \"D\"]:\n            return [wfi.save() for wfi in wf_instances]\n\n        \n        else:\n            wf_obj_instances = []\n            for wfi in wf_instances:\n                role = wfi.current_actor if self.task_type == \"A\" else None\n                keys = self.get_object_keys(role)\n                wf_obj_instances.extend(\n                    [WFInstance(\n                        wf=self.wf,\n                        current_actor=role,\n                        task=self,\n                        name=self.wf.name,\n                        wf_object=key,\n                        wf_object_type=self.object_type\n                    ).save() for key in keys]\n                )\n\n            return wf_obj_instances", "docstring": "Creates wf instances.\nArgs:\nroles (list): role list\n\nReturns:\n(list): wf instances", "source": "juraj-google-style"}
{"code": "def __add__(self, period_tensor):\n    period_type = period_tensor.period_type()\n    if period_type == constants.PeriodType.DAY:\n        ordinals = self._ordinals + period_tensor.quantity()\n        return from_ordinals(ordinals)\n    if period_type == constants.PeriodType.WEEK:\n        return self + periods.PeriodTensor(period_tensor.quantity() * 7, constants.PeriodType.DAY)\n\n    def adjust_day(year, month, day):\n        return tf.math.minimum(day, _num_days_in_month(month, year))\n    if period_type == constants.PeriodType.MONTH:\n        m = self._months - 1 + period_tensor.quantity()\n        y = self._years + m \n        m = m % 12 + 1\n        d = adjust_day(y, m, self._days)\n        return from_year_month_day(y, m, d, validate=False)\n    if period_type == constants.PeriodType.YEAR:\n        y = self._years + period_tensor.quantity()\n        m = tf.broadcast_to(self._months, tf.shape(y))\n        d = adjust_day(y, m, self._days)\n        return from_year_month_day(y, m, d, validate=False)\n    raise ValueError('Unrecognized period type: {}'.format(period_type))", "docstring": "Adds a tensor of periods.\n\nWhen adding months or years, the resulting day of the month is decreased\nto the largest valid value if necessary. E.g. 31.03.2020 + 1 month =\n30.04.2020, 29.02.2020 + 1 year = 28.02.2021.\n\nArgs:\nperiod_tensor: A `PeriodTensor` object broadcastable to the shape of\n\"self\".\n\nReturns:\nThe new instance of DateTensor.\n\n#### Example\n```python\ndates = tff.datetime.dates_from_tuples([(2020, 2, 25), (2020, 3, 31)])\nnew_dates = dates + tff.datetime.month()\n# DateTensor([(2020, 3, 25), (2020, 4, 30)])\n\nnew_dates = dates + tff.datetime.month([1, 2])\n# DateTensor([(2020, 3, 25), (2020, 5, 31)])\n```", "source": "github-repos"}
{"code": "def is_attribute_supported(self, attribute):\n    if (attribute not in self._attribute_rule_sets.keys()):\n        return False\n    rule_set = self._attribute_rule_sets.get(attribute)\n    if (self._version >= rule_set.version_added):\n        return True\n    else:\n        return False", "docstring": "Check if the attribute is supported by the current KMIP version.\n\nArgs:\nattribute (string): The name of the attribute\n(e.g., 'Cryptographic Algorithm'). Required.\nReturns:\nbool: True if the attribute is supported by the current KMIP\nversion. False otherwise.", "source": "codesearchnet"}
{"code": "def domain_dimension(self):\n    if self.shape.rank is None:\n        return tensor_shape.Dimension(None)\n    else:\n        return self.shape.dims[-1]", "docstring": "Dimension (in the sense of vector spaces) of the domain of this operator.\n\nIf this operator acts like the batch matrix `A` with\n`A.shape = [B1,...,Bb, M, N]`, then this returns `N`.\n\nReturns:\n`Dimension` object.", "source": "github-repos"}
{"code": "def begin_episode(self, agent_indices):\n    \n    with tf.name_scope('begin_episode/'):\n      if self._last_state is None:\n        reset_state = tf.no_op()\n      else:\n        reset_state = utility.reinit_nested_vars(\n            self._last_state, agent_indices)\n      reset_buffer = self._current_episodes.clear(agent_indices)\n      with tf.control_dependencies([reset_state, reset_buffer]):\n        return tf.constant('')", "docstring": "Reset the recurrent states and stored episode.\n\nArgs:\nagent_indices: Tensor containing current batch indices.\n\nReturns:\nSummary tensor.", "source": "juraj-google-style"}
{"code": "def _resource_apply_sparse(self, grad, handle, indices, apply_state):\n    raise NotImplementedError('Must be implemented in subclasses.')", "docstring": "Add ops to apply sparse gradients to the variable `handle`.\n\nSimilar to `_apply_sparse`, the `indices` argument to this method has been\nde-duplicated. Optimizers which deal correctly with non-unique indices may\ninstead override `_resource_apply_sparse_duplicate_indices` to avoid this\noverhead.\n\nArgs:\ngrad: a `Tensor` representing the gradient for the affected indices.\nhandle: a `Tensor` of dtype `resource` which points to the variable to be\nupdated.\nindices: a `Tensor` of integral type representing the indices for which\nthe gradient is nonzero. Indices are unique.\napply_state: A dict which is used across multiple apply calls.\n\nReturns:\nAn `Operation` which updates the value of the variable.", "source": "github-repos"}
{"code": "def add_squashed_change(self, path, data):\n    assert self._squashed_count, 'Called while not squashing changes'\n    self._squashed_changes.append([path[1:], data])", "docstring": "Register a squashed change to a particular path\n\nArgs:\npath (list): The path of what has changed, relative from Block\ndata (object): The new data", "source": "codesearchnet"}
{"code": "def _ConvertFieldValuePair(self, js, message):\n    \n    names = []\n    message_descriptor = message.DESCRIPTOR\n    fields_by_json_name = dict((f.json_name, f)\n                               for f in message_descriptor.fields)\n    for name in js:\n      try:\n        field = fields_by_json_name.get(name, None)\n        if not field:\n          field = message_descriptor.fields_by_name.get(name, None)\n        if not field:\n          if self.ignore_unknown_fields:\n            continue\n          raise ParseError(\n              'Message type \"{0}\" has no field named \"{1}\".'.format(\n                  message_descriptor.full_name, name))\n        if name in names:\n          raise ParseError('Message type \"{0}\" should not have multiple '\n                           '\"{1}\" fields.'.format(\n                               message.DESCRIPTOR.full_name, name))\n        names.append(name)\n        \n        if field.containing_oneof is not None:\n          oneof_name = field.containing_oneof.name\n          if oneof_name in names:\n            raise ParseError('Message type \"{0}\" should not have multiple '\n                             '\"{1}\" oneof fields.'.format(\n                                 message.DESCRIPTOR.full_name, oneof_name))\n          names.append(oneof_name)\n\n        value = js[name]\n        if value is None:\n          if (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE\n              and field.message_type.full_name == 'google.protobuf.Value'):\n            sub_message = getattr(message, field.name)\n            sub_message.null_value = 0\n          else:\n            message.ClearField(field.name)\n          continue\n\n        \n        if _IsMapEntry(field):\n          message.ClearField(field.name)\n          self._ConvertMapFieldValue(value, message, field)\n        elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n          message.ClearField(field.name)\n          if not isinstance(value, list):\n            raise ParseError('repeated field {0} must be in [] which is '\n                             '{1}.'.format(name, value))\n          if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:\n            \n            for item in value:\n              sub_message = getattr(message, field.name).add()\n              \n              if (item is None and\n                  sub_message.DESCRIPTOR.full_name != 'google.protobuf.Value'):\n                raise ParseError('null is not allowed to be used as an element'\n                                 ' in a repeated field.')\n              self.ConvertMessage(item, sub_message)\n          else:\n            \n            for item in value:\n              if item is None:\n                raise ParseError('null is not allowed to be used as an element'\n                                 ' in a repeated field.')\n              getattr(message, field.name).append(\n                  _ConvertScalarFieldValue(item, field))\n        elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:\n          sub_message = getattr(message, field.name)\n          sub_message.SetInParent()\n          self.ConvertMessage(value, sub_message)\n        else:\n          setattr(message, field.name, _ConvertScalarFieldValue(value, field))\n      except ParseError as e:\n        if field and field.containing_oneof is None:\n          raise ParseError('Failed to parse {0} field: {1}'.format(name, e))\n        else:\n          raise ParseError(str(e))\n      except ValueError as e:\n        raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))\n      except TypeError as e:\n        raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))", "docstring": "Convert field value pairs into regular message.\n\nArgs:\njs: A JSON object to convert the field value pairs.\nmessage: A regular protocol message to record the data.\n\nRaises:\nParseError: In case of problems converting.", "source": "juraj-google-style"}
{"code": "def OpenAndRead(relative_path='debugger-blacklist.yaml'):\n  \n\n  \n  try:\n    with open(os.path.join(sys.path[0], relative_path), 'r') as f:\n      return Read(f)\n  except IOError:\n    return None", "docstring": "Attempts to find the yaml configuration file, then read it.\n\nArgs:\nrelative_path: Optional relative path override.\n\nReturns:\nA Config object if the open and read were successful, None if the file\ndoes not exist (which is not considered an error).\n\nRaises:\nError (some subclass): As thrown by the called Read() function.", "source": "juraj-google-style"}
{"code": "def _compute_llama3_parameters(config: PretrainedConfig, device: 'torch.device', seq_len: Optional[int]=None, **rope_kwargs) -> tuple['torch.Tensor', float]:\n    inv_freq, attention_factor = _compute_default_rope_parameters(config, device, seq_len, **rope_kwargs)\n    factor = config.rope_scaling['factor']\n    low_freq_factor = config.rope_scaling['low_freq_factor']\n    high_freq_factor = config.rope_scaling['high_freq_factor']\n    old_context_len = config.rope_scaling['original_max_position_embeddings']\n    low_freq_wavelen = old_context_len / low_freq_factor\n    high_freq_wavelen = old_context_len / high_freq_factor\n    wavelen = 2 * math.pi / inv_freq\n    inv_freq_llama = torch.where(wavelen > low_freq_wavelen, inv_freq / factor, inv_freq)\n    smooth_factor = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)\n    smoothed_inv_freq = (1 - smooth_factor) * inv_freq_llama / factor + smooth_factor * inv_freq_llama\n    is_medium_freq = ~(wavelen < high_freq_wavelen) * ~(wavelen > low_freq_wavelen)\n    inv_freq_llama = torch.where(is_medium_freq, smoothed_inv_freq, inv_freq_llama)\n    return (inv_freq_llama, attention_factor)", "docstring": "Computes the inverse frequencies for llama 3.1.\n\nArgs:\nconfig ([`~transformers.PretrainedConfig`]):\nThe model configuration.\ndevice (`torch.device`):\nThe device to use for initialization of the inverse frequencies.\nseq_len (`int`, *optional*):\nThe current sequence length. Unused for this type of RoPE.\nrope_kwargs (`Dict`, *optional*):\nBC compatibility with the previous RoPE class instantiation, will be removed in v4.45.\nReturns:\nTuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the\npost-processing scaling factor applied to the computed cos/sin.", "source": "github-repos"}
{"code": "def merge(self, x=None, y=None, ildj=None, kwargs=None, mapping=None):\n    if (mapping is None):\n        mapping = _Mapping(x=x, y=y, ildj=ildj, kwargs=kwargs)\n    elif any(((arg is not None) for arg in [x, y, ildj, kwargs])):\n        raise ValueError('Cannot simultaneously specify mapping and individual arguments.')\n    return _Mapping(x=self._merge(self.x, mapping.x), y=self._merge(self.y, mapping.y), ildj=self._merge(self.ildj, mapping.ildj), kwargs=self._merge(self.kwargs, mapping.kwargs, use_equals=True))", "docstring": "Returns new _Mapping with args merged with self.\n\nArgs:\nx: `Tensor` or None. Input to forward; output of inverse.\ny: `Tensor` or None. Input to inverse; output of forward.\nildj: `Tensor`. This is the (un-reduce_sum'ed) inverse log det jacobian.\nkwargs: Python dictionary. Extra args supplied to forward/inverse/etc\nfunctions.\nmapping: Instance of _Mapping to merge. Can only be specified if no other\narg is specified.\n\nReturns:\nmapping: New instance of `_Mapping` which has inputs merged with self.\n\nRaises:\nValueError: if mapping and any other arg is not `None`.", "source": "codesearchnet"}
{"code": "def __get_valid_form_data_elements(self, soup):\n        \n\n        elements = []\n\n        for element in soup.find_all([\"input\", \"button\", \"textarea\", \"select\"]):\n            if element.has_attr(\"name\"):\n                elements.append(element)\n\n        return elements", "docstring": "Get all valid form input elements.\n\nNote:\nAn element is valid when the value can be updated client-side\nand the element has a name attribute.\n\nArgs:\nsoup (obj): The BeautifulSoup form.\n\nReturns:\nlist(obj): Soup elements.", "source": "juraj-google-style"}
{"code": "def _div_python2(x, y, name=None):\n    with ops.name_scope(name, 'div', [x, y]) as name:\n        x = ops.convert_to_tensor(x, name='x')\n        y = ops.convert_to_tensor(y, name='y', dtype=x.dtype.base_dtype)\n        x_dtype = x.dtype.base_dtype\n        y_dtype = y.dtype.base_dtype\n        if x_dtype != y_dtype:\n            raise TypeError(f'`x` and `y` must have the same dtype, got {x_dtype!r} != {y_dtype!r}.')\n        if x_dtype.is_floating or x_dtype.is_complex:\n            return gen_math_ops.real_div(x, y, name=name)\n        else:\n            return gen_math_ops.floor_div(x, y, name=name)", "docstring": "Divide two values using Python 2 semantics.\n\nUsed for Tensor.__div__.\n\nArgs:\nx: `Tensor` numerator of real numeric type.\ny: `Tensor` denominator of real numeric type.\nname: A name for the operation (optional).\n\nReturns:\n`x / y` returns the quotient of x and y.", "source": "github-repos"}
{"code": "def from_config(cls, config, custom_objects=None):\n    if 'lr' in config:\n        config['learning_rate'] = config.pop('lr')\n    if 'learning_rate' in config:\n        if isinstance(config['learning_rate'], dict):\n            config['learning_rate'] = learning_rate_schedule.deserialize(config['learning_rate'], custom_objects=custom_objects)\n    return cls(**config)", "docstring": "Creates an optimizer from its config.\n\nThis method is the reverse of `get_config`,\ncapable of instantiating the same optimizer from the config\ndictionary.\n\nArgs:\nconfig: A Python dictionary, typically the output of get_config.\ncustom_objects: A Python dictionary mapping names to additional Python\nobjects used to create this optimizer, such as a function used for a\nhyperparameter.\n\nReturns:\nAn optimizer instance.", "source": "github-repos"}
{"code": "def _get_elements(self, url, key, eclass, id=None, name=None):\n    if ((id is not None) and (name is not None)):\n        raise ValueError('id and name cannot specified together')\n    json_elements = self.rest_client.make_request(url)[key]\n    return [eclass(element, self.rest_client) for element in json_elements if (_exact_resource(element, id) and _matching_resource(element, name))]", "docstring": "Get elements matching `id` or `name`\n\nArgs:\nurl(str): url of children.\nkey(str): key in the returned JSON.\neclass(subclass type of :py:class:`_ResourceElement`): element class to create instances of.\nid(str, optional): only return resources whose `id` property matches the given `id`\nname(str, optional): only return resources whose `name` property matches the given `name`\n\nReturns:\nlist(_ResourceElement): List of `eclass` instances\n\nRaises:\nValueError: both `id` and `name` are specified together", "source": "codesearchnet"}
{"code": "def outline(self, level=logging.INFO, message=\"\"):\n        \n        steps = 1\n        logger.log(level, \"Plan \\\"%s\\\":\", self.description)\n        for step in self.steps:\n            logger.log(\n                level,\n                \"  - step: %s: target: \\\"%s\\\", action: \\\"%s\\\"\",\n                steps,\n                step.name,\n                step.fn.__name__,\n            )\n            steps += 1\n\n        if message:\n            logger.log(level, message)", "docstring": "Print an outline of the actions the plan is going to take.\nThe outline will represent the rough ordering of the steps that will be\ntaken.\nArgs:\nlevel (int, optional): a valid log level that should be used to log\nthe outline\nmessage (str, optional): a message that will be logged to\nthe user after the outline has been logged.", "source": "juraj-google-style"}
{"code": "def reset_from_key_counter(self, key, counter):\n    counter = _convert_to_state_tensor(counter)\n    key = _convert_to_state_tensor(key)\n    counter.shape.assert_is_compatible_with([_get_state_size(self.algorithm) - 1])\n    key.shape.assert_is_compatible_with([])\n    key = array_ops.reshape(key, [1])\n    state = array_ops.concat([counter, key], 0)\n    self._state_var.assign(state)", "docstring": "Resets the generator by a new key-counter pair.\n\nSee `from_key_counter` for the meaning of \"key\" and \"counter\".\n\nArgs:\nkey: the new key.\ncounter: the new counter.", "source": "github-repos"}
{"code": "def export(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):\n    return self.client.api.export(self.id, chunk_size)", "docstring": "Export the contents of the container's filesystem as a tar archive.\n\nArgs:\nchunk_size (int): The number of bytes returned by each iteration\nof the generator. If ``None``, data will be streamed as it is\nreceived. Default: 2 MB\n\nReturns:\n(str): The filesystem tar archive\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def get_items(self, page=1, order_by=None, filters=None):\n        \n        start = (page-1)*self.per_page\n        query = self.get_query()\n        if order_by is not None:\n            query = query.order_by(self._get_field(order_by))\n        if filters is not None:\n            query = self._filter(query, filters)\n        return query.offset(start).limit(self.per_page), self.count(query)", "docstring": "Fetch database for items matching.\n\nArgs:\npage (int):\nwhich page will be sliced\nslice size is ``self.per_page``.\norder_by (str):\na field name to order query by.\nfilters (dict):\na ``filter name``: ``value`` dict.\n\nReturns:\ntuple with:\nitems, sliced by page*self.per_page\ntotal items without slice", "source": "juraj-google-style"}
{"code": "def _FindLargestIdPostfixNumber(self, schedule):\n    postfix_number_re = re.compile('(\\\\d+)$')\n\n    def ExtractPostfixNumber(entity_id):\n        'Try to extract an integer from the end of entity_id.\\n\\n      If entity_id is None or if there is no integer ending the id, zero is\\n      returned.\\n\\n      Args:\\n        entity_id: An id string or None.\\n\\n      Returns:\\n        An integer ending the entity_id or zero.\\n      '\n        if (entity_id is None):\n            return 0\n        match = postfix_number_re.search(entity_id)\n        if (match is not None):\n            return int(match.group(1))\n        else:\n            return 0\n    id_data_sets = {'agency_id': schedule.GetAgencyList(), 'stop_id': schedule.GetStopList(), 'route_id': schedule.GetRouteList(), 'trip_id': schedule.GetTripList(), 'service_id': schedule.GetServicePeriodList(), 'fare_id': schedule.GetFareAttributeList(), 'shape_id': schedule.GetShapeList()}\n    max_postfix_number = 0\n    for (id_name, entity_list) in id_data_sets.items():\n        for entity in entity_list:\n            entity_id = getattr(entity, id_name)\n            postfix_number = ExtractPostfixNumber(entity_id)\n            max_postfix_number = max(max_postfix_number, postfix_number)\n    return max_postfix_number", "docstring": "Finds the largest integer used as the ending of an id in the schedule.\n\nArgs:\nschedule: The schedule to check.\n\nReturns:\nThe maximum integer used as an ending for an id.", "source": "codesearchnet"}
{"code": "def find_emails_by_subject(self, subject, limit=50, match_recipient=None):\n        \n        \n        self._mail.select(\"inbox\")\n\n        matching_uids = self.__search_email_by_subject(\n            subject, match_recipient)\n\n        return matching_uids", "docstring": "Searches for Email by Subject.  Returns email's imap message IDs\nas a list if matching subjects is found.\n\nArgs:\nsubject (str) - Subject to search for.\n\nKwargs:\nlimit (int) - Limit search to X number of matches, default 50\nmatch_recipient (str) - Recipient to exactly (don't care if not specified)\n\nReturns:\nlist - List of Integers representing imap message UIDs.", "source": "juraj-google-style"}
{"code": "def handle(self, message):\n        \n\n        logger.debug(message)\n        if Utilities.isNotEmpty(message['metadata']['opts']):\n            target = message['metadata']['opts']['target']\n            self.botThread.create_message(target, message['text'])", "docstring": "Attempts to send a message to the specified destination in Discord.\nExtends Legobot.Lego.handle()\n\nArgs:\nmessage (Legobot.Message): message w/ metadata to send.", "source": "juraj-google-style"}
{"code": "def _add_function(self, func, identify_observed):\n        \n\n        key = self.make_key(func)\n        if key not in self.observers:\n            self.observers[key] = ObserverFunction(\n                func, identify_observed, (key, self.observers))\n            return True\n        else:\n            return False", "docstring": "Add a function as an observer.\n\nArgs:\nfunc: The function to register as an observer.\nidentify_observed: See docstring for add_observer.\n\nReturns:\nTrue if the function is added, otherwise False.", "source": "juraj-google-style"}
{"code": "def set(self, key, value):\n    match = self._get_match(key=key)\n    if (not match):\n        self._log.info('\"%s\" does not exist, so it will be added.', key)\n        if isinstance(value, str):\n            self._log.info('\"%s\" will be added as a PHP string value.', key)\n            value_str = \"'{}'\".format(value)\n        else:\n            self._log.info('\"%s\" will be added as a PHP object value.', key)\n            value_str = str(value).lower()\n        new = \"define('{key}', {value});\".format(key=key, value=value_str)\n        self._log.info('\"%s\" will be added as: %s', key, new)\n        replace_this = '<?php\\n'\n        replace_with = (('<?php\\n' + new) + '\\n')\n        self._content = self._content.replace(replace_this, replace_with)\n        self._log.info('Content string has been updated.')\n        return True\n    if (self._get_value_from_match(key=key, match=match) == value):\n        self._log.info('\"%s\" is already up-to-date.', key)\n        return False\n    self._log.info('\"%s\" exists and will be updated.', key)\n    start_index = match.start(1)\n    end_index = match.end(1)\n    if isinstance(value, bool):\n        value = str(value).lower()\n        self._log.info('\"%s\" will be updated with boolean value: %s', key, value)\n    else:\n        self._log.info('\"%s\" will be updated with string value: %s', key, value)\n    start = self._content[:start_index]\n    end = self._content[end_index:]\n    self._content = ((start + value) + end)\n    return True", "docstring": "Updates the value of the given key in the loaded content.\n\nArgs:\nkey (str): Key of the property to update.\nvalue (str): New value of the property.\n\nReturn:\nbool: Indicates whether or not a change was made.", "source": "codesearchnet"}
{"code": "def authenticate_search_bind(self, username, password):\n    connection = self._make_connection(bind_user=self.config.get('LDAP_BIND_USER_DN'), bind_password=self.config.get('LDAP_BIND_USER_PASSWORD'))\n    try:\n        connection.bind()\n        log.debug(\"Successfully bound to LDAP as '{0}' for search_bind method\".format((self.config.get('LDAP_BIND_USER_DN') or 'Anonymous')))\n    except Exception as e:\n        self.destroy_connection(connection)\n        log.error(e)\n        return AuthenticationResponse()\n    user_filter = '({search_attr}={username})'.format(search_attr=self.config.get('LDAP_USER_LOGIN_ATTR'), username=username)\n    search_filter = '(&{0}{1})'.format(self.config.get('LDAP_USER_OBJECT_FILTER'), user_filter)\n    log.debug(\"Performing an LDAP Search using filter '{0}', base '{1}', and scope '{2}'\".format(search_filter, self.full_user_search_dn, self.config.get('LDAP_USER_SEARCH_SCOPE')))\n    connection.search(search_base=self.full_user_search_dn, search_filter=search_filter, search_scope=getattr(ldap3, self.config.get('LDAP_USER_SEARCH_SCOPE')), attributes=self.config.get('LDAP_GET_USER_ATTRIBUTES'))\n    response = AuthenticationResponse()\n    if ((len(connection.response) == 0) or (self.config.get('LDAP_FAIL_AUTH_ON_MULTIPLE_FOUND') and (len(connection.response) > 1))):\n        log.debug(\"Authentication was not successful for user '{0}'\".format(username))\n    else:\n        for user in connection.response:\n            if (('type' not in user) or (user.get('type') != 'searchResEntry')):\n                continue\n            user_connection = self._make_connection(bind_user=user['dn'], bind_password=password)\n            log.debug(\"Directly binding a connection to a server with user:'{0}'\".format(user['dn']))\n            try:\n                user_connection.bind()\n                log.debug(\"Authentication was successful for user '{0}'\".format(username))\n                response.status = AuthenticationResponseStatus.success\n                user['attributes']['dn'] = user['dn']\n                response.user_info = user['attributes']\n                response.user_id = username\n                response.user_dn = user['dn']\n                if self.config.get('LDAP_SEARCH_FOR_GROUPS'):\n                    response.user_groups = self.get_user_groups(dn=user['dn'], _connection=connection)\n                self.destroy_connection(user_connection)\n                break\n            except ldap3.core.exceptions.LDAPInvalidCredentialsResult:\n                log.debug(\"Authentication was not successful for user '{0}'\".format(username))\n                response.status = AuthenticationResponseStatus.fail\n            except Exception as e:\n                log.error(e)\n                response.status = AuthenticationResponseStatus.fail\n            self.destroy_connection(user_connection)\n    self.destroy_connection(connection)\n    return response", "docstring": "Performs a search bind to authenticate a user. This is\nrequired when a the login attribute is not the same\nas the RDN, since we cannot string together their DN on\nthe fly, instead we have to find it in the LDAP, then attempt\nto bind with their credentials.\n\nArgs:\nusername (str): Username of the user to bind (the field specified\nas LDAP_BIND_LOGIN_ATTR)\npassword (str): User's password to bind with when we find their dn.\n\nReturns:\nAuthenticationResponse", "source": "codesearchnet"}
{"code": "def delete_template(self, template_id):\n        \n\n        url = self.TEMPLATE_DELETE_URL\n\n        request = self._get_request()\n        response = request.post(url + template_id, get_json=False)\n\n        return response", "docstring": "Deletes the specified template\n\nArgs:\n\ntemplate_id (str): The id of the template to delete\n\nReturns:\nA status code", "source": "juraj-google-style"}
{"code": "def exists_function(function: _evaluation.ExistsFunction, operand_result: Optional[_sql_data_types.Select], params_result: Collection[_sql_data_types.StandardSqlExpression]) -> _sql_data_types.Select:\n    if operand_result is None:\n        raise ValueError('exists() cannot be called without an operand.')\n    if params_result:\n        raise ValueError('Unsupported FHIRPath expression: `criteria` parameter for exists() is not currently supported.')\n    sql_alias = 'exists_'\n    sql_data_type = _sql_data_types.Boolean\n    if not _fhir_path_data_types.returns_collection(function.parent_node.return_type) and (not operand_result.where_part):\n        return dataclasses.replace(operand_result, select_part=operand_result.select_part.is_not_null(_sql_alias=sql_alias))\n    else:\n        return _sql_data_types.Select(select_part=_sql_data_types.RawExpression('CASE WHEN COUNT(*) = 0 THEN FALSE ELSE TRUE END', _sql_data_type=sql_data_type, _sql_alias=sql_alias), from_part=str(operand_result.to_subquery()), where_part=f'{operand_result.sql_alias} IS NOT NULL', sql_dialect=_sql_data_types.SqlDialect.SPARK)", "docstring": "Generates Spark SQL representing the FHIRPath empty() function.\n\nReturns `TRUE` if the operand has any elements, and `FALSE` otherwise.\n\nThis is the opposite of `_EmptyFunction`. If the operand is empty, then the\nresult is `FALSE`.\n\nThe returned SQL expression is a table of cardinality 1, whose value is of\n`BOOL` type. By default, `_ExistsFunction` will return `FALSE` if given no\noperand.\n\nArgs:\nfunction: The FHIRPath AST `ExistsFunction` node\noperand_result: The expression which is being evaluated\nparams_result: The parameter passed in to function\n\nReturns:\nA compiled Spark SQL expression.\n\nRaises:\nValueError: When the function is called without an operand", "source": "github-repos"}
{"code": "def reflect_runtime_member(self, name):\n    for scope in reversed(self.scopes):\n        try:\n            return structured.reflect_runtime_member(scope, name)\n        except (NotImplementedError, KeyError, AttributeError):\n            continue\n    return protocol.AnyType", "docstring": "Reflect 'name' using ONLY runtime reflection.\n\nYou most likely want to use ScopeStack.reflect instead.\n\nReturns:\nType of 'name', or protocol.AnyType.", "source": "codesearchnet"}
{"code": "def get_info_by_tail_number(self, tail_number, page=1, limit=100):\n    url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit)\n    return self._fr24.get_aircraft_data(url)", "docstring": "Fetch the details of a particular aircraft by its tail number.\n\nThis method can be used to get the details of a particular aircraft by its tail number.\nDetails include the serial number, age etc along with links to the images of the aircraft.\nIt checks the user authentication and returns the data accordingly.\n\nArgs:\ntail_number (str): The tail number, e.g. VT-ANL\npage (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data\nlimit (int): Optional limit on number of records returned\n\nReturns:\nA list of dicts with the data; one dict for each row of data from flightradar24\n\nExample::\n\nfrom pyflightdata import FlightData\nf=FlightData()\n#optional login\nf.login(myemail,mypassword)\nf.get_info_by_flight_number('VT-ANL')\nf.get_info_by_flight_number('VT-ANL',page=1,limit=10)", "source": "codesearchnet"}
{"code": "def get_results_as_xarray(self, parameter_space, result_parsing_function, output_labels, runs):\n    np_array = np.array(self.get_space(self.db.get_complete_results(), {}, collections.OrderedDict([(k, v) for (k, v) in parameter_space.items()]), runs, result_parsing_function))\n    clean_parameter_space = collections.OrderedDict([(k, v) for (k, v) in parameter_space.items()])\n    clean_parameter_space['runs'] = range(runs)\n    if isinstance(output_labels, list):\n        clean_parameter_space['metrics'] = output_labels\n    xr_array = xr.DataArray(np_array, coords=clean_parameter_space, dims=list(clean_parameter_space.keys()))\n    return xr_array", "docstring": "Return the results relative to the desired parameter space in the form\nof an xarray data structure.\n\nArgs:\nparameter_space (dict): The space of parameters to export.\nresult_parsing_function (function): user-defined function, taking a\nresult dictionary as argument, that can be used to parse the\nresult files and return a list of values.\noutput_labels (list): a list of labels to apply to the results\ndimensions, output by the result_parsing_function.\nruns (int): the number of runs to export for each parameter\ncombination.", "source": "codesearchnet"}
{"code": "def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):\n    if dtype is None:\n        dtype = floatx()\n    if seed is None:\n        seed = np.random.randint(10000000.0)\n    return random_ops.truncated_normal(shape, mean, stddev, dtype=dtype, seed=seed)", "docstring": "Returns a tensor with truncated random normal distribution of values.\n\nThe generated values follow a normal distribution\nwith specified mean and standard deviation,\nexcept that values whose magnitude is more than\ntwo standard deviations from the mean are dropped and re-picked.\n\nArgs:\nshape: A tuple of integers, the shape of tensor to create.\nmean: Mean of the values.\nstddev: Standard deviation of the values.\ndtype: String, dtype of returned tensor.\nseed: Integer, random seed.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def freeze(self):\n    self._frozen = True\n    if self._tuple_types is None:\n        raise ValueError(\"Can't freeze an InfeedQueue without setting all tuple types.\")\n    if self._tuple_shapes is None:\n        raise ValueError(\"Can't freeze an InfeedQueue without setting all tuple shapes.\")\n    for shape in self._tuple_shapes:\n        if shape.dims is None:\n            raise ValueError(\"Can't freeze an InfeedQueue without setting all tuple shapes.\")\n    for policy in self._sharding_policies:\n        policy.freeze()\n    self._validate()", "docstring": "Freezes the InfeedQueue so it can no longer be modified.\n\nThe configuration is implicitly frozen before any host-side or\ndevice-side Ops are generated. The configuration cannot be frozen\nuntil the types and shapes of the tuple elements have been set.\n\nRaises:\nValueError: if the types or shapes of the tuple elements have not been\nset.", "source": "github-repos"}
{"code": "def __init__(self, channel):\n        \n        self.SubmitJob = channel.unary_unary(\n            \"/google.cloud.dataproc.v1.JobController/SubmitJob\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.SubmitJobRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString,\n        )\n        self.GetJob = channel.unary_unary(\n            \"/google.cloud.dataproc.v1.JobController/GetJob\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.GetJobRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString,\n        )\n        self.ListJobs = channel.unary_unary(\n            \"/google.cloud.dataproc.v1.JobController/ListJobs\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.ListJobsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.ListJobsResponse.FromString,\n        )\n        self.UpdateJob = channel.unary_unary(\n            \"/google.cloud.dataproc.v1.JobController/UpdateJob\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.UpdateJobRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString,\n        )\n        self.CancelJob = channel.unary_unary(\n            \"/google.cloud.dataproc.v1.JobController/CancelJob\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.CancelJobRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString,\n        )\n        self.DeleteJob = channel.unary_unary(\n            \"/google.cloud.dataproc.v1.JobController/DeleteJob\",\n            request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.DeleteJobRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def slicewise(self, fn, *inputs):\n    \n    if fn == tf.add:\n      assert len(inputs) == 2\n      if isinstance(inputs[0], mtf.LazyAllreduceSum):\n        \n        return inputs[0] + inputs[1]\n    \n    inputs = mtf.convert_args_to_laid_out_tensors(inputs)\n    ret = fn(*[\n        x.one_slice if isinstance(x, self.LaidOutTensor) else x\n        for x in inputs])\n    if isinstance(ret, tuple):\n      return tuple([self.LaidOutTensor([t]) for t in ret])\n    else:\n      return self.LaidOutTensor([ret])", "docstring": "Execute a function in parallel on all slices.\n\nArgs:\nfn: a function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors.\n*inputs: a list of inputs.  Each input is either a LaidOutTensor or\nis convertible to a tf.Tensor.\nReturns:\na LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple.", "source": "juraj-google-style"}
{"code": "def atomic_swap(alias_name, new_index_name, index_client):\n    \n    logging.info('Performing atomic index alias swap')\n    if index_client.exists_alias(name=alias_name):\n        old_index_name = get_index_from_alias(alias_name, index_client)\n        logging.info('Removing old as well as adding new')\n        actions = {'actions': [\n            {'remove': {'index': old_index_name, 'alias': alias_name}},\n            {'add': {'index': new_index_name, 'alias': alias_name}}\n        ]}\n        index_client.update_aliases(body=actions)\n        index_client.delete(index=old_index_name)\n    else:\n        logging.info('Old alias not found, only adding new')\n        actions = {'actions': [\n            {'add': {'index': new_index_name, 'alias': alias_name}}\n        ]}\n        index_client.update_aliases(body=actions)", "docstring": "Points an alias to a new index, then delete the old index if needed\n\nUses client.update_aliases to perform this with zero downtime\n\nArgs:\nalias_name (str) Name of the alias\nnew_index_name (str) The new index that the alias should point to\nindex_client (Elasticsearch.IndicesClient) Elasticsearch index client", "source": "juraj-google-style"}
{"code": "def _get_fitnesses(self, problem, population, cache_encoded=True, cache_solution=False, pool=None):\n    fitnesses = ([None] * len(population))\n    if cache_encoded:\n        try:\n            encoded_keys = map(self._get_encoded_key, population)\n            to_decode_indices = []\n            for (i, encoded_key) in enumerate(encoded_keys):\n                try:\n                    fitnesses[i] = self.__encoded_cache[encoded_key]\n                except KeyError:\n                    to_decode_indices.append(i)\n        except UnhashableError:\n            encoded_keys = None\n            to_decode_indices = range(len(population))\n    else:\n        encoded_keys = None\n        to_decode_indices = range(len(population))\n    if (encoded_keys is None):\n        to_decode_keys = None\n    else:\n        to_decode_keys = [encoded_keys[i] for i in to_decode_indices]\n    solutions = ([None] * len(population))\n    for (i, solution) in zip(to_decode_indices, self._pmap(problem.decode_solution, [population[i] for i in to_decode_indices], to_decode_keys, pool)):\n        solutions[i] = solution\n    if cache_solution:\n        try:\n            if problem.hash_solution:\n                hash_solution_func = problem.hash_solution\n            else:\n                hash_solution_func = self._get_solution_key\n            solution_keys = [(hash_solution_func(solution) if (solution is not None) else None) for solution in solutions]\n            to_eval_indices = []\n            for (i, solution_key) in enumerate(solution_keys):\n                if (solution_key is not None):\n                    try:\n                        fitnesses[i] = self.__solution_cache[solution_key]\n                    except KeyError:\n                        to_eval_indices.append(i)\n        except UnhashableError:\n            solution_keys = None\n            to_eval_indices = to_decode_indices[:]\n    else:\n        solution_keys = None\n        to_eval_indices = to_decode_indices[:]\n    if (solution_keys is None):\n        if (encoded_keys is None):\n            to_eval_keys = None\n        else:\n            to_eval_keys = [encoded_keys[i] for i in to_eval_indices]\n    else:\n        to_eval_keys = [solution_keys[i] for i in to_eval_indices]\n    finished = False\n    eval_bookkeeping = {}\n    for (i, fitness_finished) in zip(to_eval_indices, self._pmap(problem.get_fitness, [solutions[i] for i in to_eval_indices], to_eval_keys, pool, bookkeeping_dict=eval_bookkeeping)):\n        try:\n            (fitness, maybe_finished) = fitness_finished\n            if maybe_finished:\n                finished = True\n        except TypeError:\n            fitness = fitness_finished\n        fitnesses[i] = fitness\n    self.fitness_runs += len(eval_bookkeeping['key_indices'])\n    if (cache_encoded and (encoded_keys is not None)):\n        for i in to_decode_indices:\n            self.__encoded_cache[encoded_keys[i]] = fitnesses[i]\n    if (cache_solution and (solution_keys is not None)):\n        for i in to_eval_indices:\n            self.__solution_cache[solution_keys[i]] = fitnesses[i]\n    return (solutions, fitnesses, finished)", "docstring": "Get the fitness for every solution in a population.\n\nArgs:\nproblem: Problem; The problem that defines fitness.\npopulation: list; List of potential solutions.\npool: None/multiprocessing.Pool; Pool of processes for parallel\ndecoding and evaluation.", "source": "codesearchnet"}
{"code": "async def _get_popular_people_page(self, page=1):\n        \n        return await self.get_data(self.url_builder(\n            'person/popular',\n            url_params=OrderedDict(page=page),\n        ))", "docstring": "Get a specific page of popular person data.\n\nArguments:\npage (:py:class:`int`, optional): The page to get.\n\nReturns:\n:py:class:`dict`: The page data.", "source": "juraj-google-style"}
{"code": "def report(self, name, owner=None, **kwargs):\n    return Report(self.tcex, name, owner=owner, **kwargs)", "docstring": "Create the Report TI object.\n\nArgs:\nowner:\nname:\n**kwargs:\n\nReturn:", "source": "codesearchnet"}
{"code": "def _process_image_files(name, filenames, texts, labels, num_shards):\n  \n  assert len(filenames) == len(texts)\n  assert len(filenames) == len(labels)\n\n  \n  spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)\n  ranges = []\n  for i in range(len(spacing) - 1):\n    ranges.append([spacing[i], spacing[i+1]])\n\n  \n  print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))\n  sys.stdout.flush()\n\n  \n  coord = tf.train.Coordinator()\n\n  \n  coder = ImageCoder()\n\n  threads = []\n  for thread_index in range(len(ranges)):\n    args = (coder, thread_index, ranges, name, filenames,\n            texts, labels, num_shards)\n    t = threading.Thread(target=_process_image_files_batch, args=args)\n    t.start()\n    threads.append(t)\n\n  \n  coord.join(threads)\n  print('%s: Finished writing all %d images in data set.' %\n        (datetime.now(), len(filenames)))\n  sys.stdout.flush()", "docstring": "Process and save list of images as TFRecord of Example protos.\n\nArgs:\nname: string, unique identifier specifying the data set\nfilenames: list of strings; each string is a path to an image file\ntexts: list of strings; each string is human readable, e.g. 'dog'\nlabels: list of integer; each integer identifies the ground truth\nnum_shards: integer number of shards for this data set.", "source": "juraj-google-style"}
{"code": "def url_fetch(config, task) -> Iterator[dict]:\n    for url, uri in get_rows(config, task['auth'], task['urls']):\n        if config.verbose:\n            print('URL/URI', url, uri)\n        record = {'URL': url, 'URI': None if uri is None else str(uri)}\n        url_request = request.Request(url, data=task.get('data'))\n        try:\n            url_response = request.urlopen(url_request)\n            if task.get('status', False):\n                record['Status'] = url_response.status\n            if task.get('read', False):\n                record['Read'] = url_response.read()\n        except InvalidURL as error:\n            if task.get('status', False):\n                record['Status'] = 400\n        except HTTPError as error:\n            if task.get('status', False):\n                record['Status'] = error.status\n        except Exception as error:\n            if task.get('status', False):\n                record['Status'] = 500\n        yield record", "docstring": "Fetch URL list and return both status code and/or contents.\n\nTakes no parameters, it operates on recipe JSON directly. Core\nfunction is to call urlopen on each passed in URL.\n\nReturns:\nProduces a dictionary generator with record matching URL_SCHEMA.", "source": "github-repos"}
{"code": "def sites_at_edges( self ):\n        \n        min_x = min( [ s.r[0] for s in self.sites ] )\n        max_x = max( [ s.r[0] for s in self.sites ] )\n        min_y = min( [ s.r[1] for s in self.sites ] )\n        max_y = max( [ s.r[1] for s in self.sites ] )\n        min_z = min( [ s.r[2] for s in self.sites ] )\n        max_z = max( [ s.r[2] for s in self.sites ] )\n        x_max = [ s for s in self.sites if s.r[0] == min_x ]\n        x_min = [ s for s in self.sites if s.r[0] == max_x ]\n        y_max = [ s for s in self.sites if s.r[1] == min_y ]\n        y_min = [ s for s in self.sites if s.r[1] == max_y ]\n        z_max = [ s for s in self.sites if s.r[2] == min_z ]\n        z_min = [ s for s in self.sites if s.r[2] == max_z ]\n        return ( x_max, x_min, y_max, y_min, z_max, z_min )", "docstring": "Finds the six sites with the maximum and minimum coordinates along x, y, and z.\n\nArgs:\nNone\n\nReturns:\n(List(List)): In the order [ +x, -x, +y, -y, +z, -z ]", "source": "juraj-google-style"}
{"code": "def extractDates(inp, tz=None, now=None):\n    service = DateService(tz=tz, now=now)\n    return service.extractDates(inp)", "docstring": "Extract semantic date information from an input string.\nThis is a convenience method which would only be used if\nyou'd rather not initialize a DateService object.\n\nArgs:\ninp (str): The input string to be parsed.\ntz: An optional Pytz timezone. All datetime objects returned will\nbe relative to the supplied timezone, or timezone-less if none\nis supplied.\nnow: The time to which all returned datetime objects should be\nrelative. For example, if the text is \"In 5 hours\", the\ndatetime returned will be now + datetime.timedelta(hours=5).\nUses datetime.datetime.now() if none is supplied.\n\nReturns:\nA list of datetime objects extracted from input.", "source": "codesearchnet"}
{"code": "def smear(self, sigma):\n    diff = [(self.x[(i + 1)] - self.x[i]) for i in range((len(self.x) - 1))]\n    avg_x_per_step = (np.sum(diff) / len(diff))\n    if (len(self.ydim) == 1):\n        self.y = gaussian_filter1d(self.y, (sigma / avg_x_per_step))\n    else:\n        self.y = np.array([gaussian_filter1d(self.y[(:, k)], (sigma / avg_x_per_step)) for k in range(self.ydim[1])]).T", "docstring": "Apply Gaussian smearing to spectrum y value.\n\nArgs:\nsigma: Std dev for Gaussian smear function", "source": "codesearchnet"}
{"code": "def update_utxoset(self, transaction):\n        \n        spent_outputs = [\n            spent_output for spent_output in transaction.spent_outputs\n        ]\n        if spent_outputs:\n            self.delete_unspent_outputs(*spent_outputs)\n        self.store_unspent_outputs(\n            *[utxo._asdict() for utxo in transaction.unspent_outputs]\n        )", "docstring": "Update the UTXO set given ``transaction``. That is, remove\nthe outputs that the given ``transaction`` spends, and add the\noutputs that the given ``transaction`` creates.\n\nArgs:\ntransaction (:obj:`~bigchaindb.models.Transaction`): A new\ntransaction incoming into the system for which the UTXO\nset needs to be updated.", "source": "juraj-google-style"}
{"code": "def _create_and_save_tf1_gather_model(self, saved_model_path: str, signature_key: str, tags: Collection[str], input_key: str, output_key: str, input_type: dtypes.DType, use_variable=False) -> core.Tensor:\n    with ops.Graph().as_default(), session.Session() as sess:\n        in_placeholder, output_tensor = self._create_simple_tf1_gather_model(input_type=input_type, use_variable_for_filter=use_variable)\n        if use_variable:\n            sess.run(variables.global_variables_initializer())\n        self._save_tf1_model(sess, saved_model_path, signature_key, tags, inputs={input_key: in_placeholder}, outputs={output_key: output_tensor})\n        return in_placeholder", "docstring": "Creates and saves a simple gather model.\n\nThis is intended to be used for TF1 (graph mode) tests.\n\nArgs:\nsaved_model_path: Directory to save the model.\nsignature_key: The key to the SignatureDef that inputs & outputs\ncorrespond to.\ntags: Set of tags associated with the model.\ninput_key: The key to the input tensor.\noutput_key: The key to the output tensor.\ninput_type: type of the input index tensor for gather operation.\nuse_variable: Setting this to `True` makes the filter for the gather\noperation a `tf.Variable`.\n\nReturns:\nin_placeholder: The placeholder tensor used as an input to the model.", "source": "github-repos"}
{"code": "def smiles_to_compound(smiles, assign_descriptors=True):\n    it = iter(smiles)\n    mol = molecule()\n    try:\n        for token in it:\n            mol(token)\n        (result, _) = mol(None)\n    except KeyError as err:\n        raise ValueError('Unsupported Symbol: {}'.format(err))\n    result.graph.remove_node(0)\n    logger.debug(result)\n    if assign_descriptors:\n        molutil.assign_descriptors(result)\n    return result", "docstring": "Convert SMILES text to compound object\n\nRaises:\nValueError: SMILES with unsupported format", "source": "codesearchnet"}
{"code": "def deserialize(config, custom_objects=None):\n    return deserialize_keras_object(config, module_objects=globals(), custom_objects=custom_objects, printable_module_name='metric function')", "docstring": "Deserializes a serialized metric class/function instance.\n\nArgs:\nconfig: Metric configuration.\ncustom_objects: Optional dictionary mapping names (strings) to custom\nobjects (classes and functions) to be considered during deserialization.\n\nReturns:\nA Keras `Metric` instance or a metric function.", "source": "github-repos"}
{"code": "def _delete_minibatch(self, bucket, keys):\n    request = messages.DeleteBatchRequest(bucket, keys)\n    results = {}\n    try:\n        response = self.client.delete_batch(request)\n        for key in response.deleted:\n            results[bucket, key] = None\n        for key, error in zip(response.failed, response.errors):\n            results[bucket, key] = error\n    except messages.S3ClientError as e:\n        for key in keys:\n            results[bucket, key] = e\n    return results", "docstring": "A helper method. Boto3 allows batch deletions\nfor files within the same bucket.\n\nArgs:\nbucket: String bucket name\nkeys: List of keys to be deleted in the bucket\n\nReturns: dict of the form {(bucket, key): error}, where error is None if the\noperation succeeded", "source": "github-repos"}
{"code": "def _check_callback(callback):\n    if inspect.isclass(callback):\n        callback_object = callback()\n        if (not callable(callback_object)):\n            raise ValueError('Callback must be a class that implements __call__ or a function.')\n    elif callable(callback):\n        callback_object = callback\n    else:\n        raise ValueError('Callback must be a class that implements __call__ or a function.')\n    return callback_object", "docstring": "Turns a callback that is potentially a class into a callable object.\n\nArgs:\ncallback (object): An object that might be a class, method, or function.\nif the object is a class, this creates an instance of it.\n\nRaises:\nValueError: If an instance can't be created or it isn't a callable object.\nTypeError: If the class requires arguments to be instantiated.\n\nReturns:\ncallable: A callable object suitable for use as the consumer callback.", "source": "codesearchnet"}
{"code": "def filter(self, scored_list):\n        \n        top_n_key = -1 * self.top_n\n        top_n_list = sorted(scored_list, key=lambda x: x[1])[top_n_key:]\n        result_list = sorted(top_n_list, key=lambda x: x[0])\n        return result_list", "docstring": "Filtering with top-n ranking.\n\nArgs:\nscored_list:    The list of scoring.\n\nRetruns:\nThe list of filtered result.", "source": "juraj-google-style"}
{"code": "def _unbind_topics(self, topics):\n    self.client.unsubscribe(topics.status)\n    self.client.unsubscribe(topics.tracing)\n    self.client.unsubscribe(topics.streaming)\n    self.client.unsubscribe(topics.response)", "docstring": "Unsubscribe to all of the topics we needed for communication with device\n\nArgs:\ntopics (MQTTTopicValidator): The topic validator for this device that\nwe have connected to.", "source": "codesearchnet"}
{"code": "def write_journal(self, journal_file_path):\n    with open(journal_file_path, 'w') as jrn_file:\n        jrn_file.write(self._journal_contents)", "docstring": "Write the constructed journal in to the provided file.\n\nArgs:\njournal_file_path (str): full path to output journal file", "source": "codesearchnet"}
{"code": "def publishItems(self, items_info):\n        \n        if self.securityhandler is None:\n            print (\"Security handler required\")\n            return\n        itemInfo = None\n        item_results = None\n        item_info = None\n        admin = None\n        try:\n            admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)\n            item_results = []\n            for item_info in items_info:\n                if 'ReplaceTag' in item_info:\n\n                    itemInfo = {\"ReplaceTag\":item_info['ReplaceTag'] }\n                else:\n                    itemInfo = {\"ReplaceTag\":\"{FeatureService}\" }\n\n                itemInfo['ItemInfo']  = self._publishItems(config=item_info)\n\n                if itemInfo['ItemInfo'] is not None and 'name' in itemInfo['ItemInfo']:\n                    print (\"%s created\" % itemInfo['ItemInfo']['name'])\n                    item_results.append(itemInfo)\n                else:\n                    print (str(itemInfo['ItemInfo']))\n\n            return item_results\n\n        except common.ArcRestHelperError as e:\n            raise e\n        except Exception as e:\n\n            line, filename, synerror = trace()\n            raise common.ArcRestHelperError({\n                \"function\": \"publishItems\",\n                \"line\": line,\n                \"filename\":  filename,\n                \"synerror\": synerror,\n            })\n        finally:\n            itemInfo = None\n            item_results = None\n            item_info = None\n            admin = None\n\n            del itemInfo\n            del item_results\n            del item_info\n            del admin\n\n            gc.collect()", "docstring": "Publishes a list of items.\n\nArgs:\nitems_info (list): A list of JSON configuration items to publish.\n\nReturns:\nlist: A list of results from :py:meth:`arcrest.manageorg._content.User.addItem`.", "source": "juraj-google-style"}
{"code": "def check_arg_in_support(f):\n\n    @functools.wraps(f)\n    def _check_arg_and_apply_f(*args, **kwargs):\n        dist = args[0]\n        x = args[1]\n        with tf.control_dependencies(([assert_util.assert_greater_equal(x, dist.loc, message='x is not in the support of the distribution')] if dist.validate_args else [])):\n            return f(*args, **kwargs)\n    return _check_arg_and_apply_f", "docstring": "Decorator function for argument bounds checking.\n\nThis decorator is meant to be used with methods that require the first\nargument to be in the support of the distribution. If `validate_args` is\n`True`, the method is wrapped with an assertion that the first argument is\ngreater than or equal to `loc`, since the support of the half-Cauchy\ndistribution is given by `[loc, infinity)`.\n\n\nArgs:\nf: method to be decorated.\n\nReturns:\nReturns a decorated method that, when `validate_args` attribute of the class\nis `True`, will assert that all elements in the first argument are within\nthe support of the distribution before executing the original method.", "source": "codesearchnet"}
{"code": "def period(self, value: float):\n    if (value < 0):\n        raise ValueError('Period must be greater or equal than zero.')\n    self._period = timedelta(seconds=value)", "docstring": "Set the period.\n\nArgs:\nvalue (float): seconds", "source": "codesearchnet"}
{"code": "def validate(self, value, model_instance):\n        \n        if not isinstance(value, base.StateWrapper):\n            raise exceptions.ValidationError(self.error_messages['wrong_type'] % value)\n        elif not value.workflow == self.workflow:\n            raise exceptions.ValidationError(self.error_messages['wrong_workflow'] % value.workflow)\n        elif value.state not in self.workflow.states:\n            raise exceptions.ValidationError(self.error_messages['invalid_state'] % value.state)", "docstring": "Validate that a given value is a valid option for a given model instance.\n\nArgs:\nvalue (xworkflows.base.StateWrapper): The base.StateWrapper returned by to_python.\nmodel_instance: A WorkflowEnabled instance", "source": "juraj-google-style"}
{"code": "def _from_config(cls, config, **kwargs):\n    torch_dtype = kwargs.pop('torch_dtype', config.torch_dtype)\n    if isinstance(torch_dtype, str):\n        torch_dtype = getattr(torch, torch_dtype)\n    dtype_orig = None\n    if torch_dtype is not None:\n        dtype_orig = cls._set_default_torch_dtype(torch_dtype)\n    config = copy.deepcopy(config)\n    if config._attn_implementation_internal is not None:\n        attn_implementation = config._attn_implementation_internal\n    else:\n        attn_implementation = None\n    config._attn_implementation = kwargs.pop('attn_implementation', attn_implementation)\n    if not getattr(config, '_attn_implementation_autoset', False):\n        config = cls._autoset_attn_implementation(config, check_device_map=False, torch_dtype=torch_dtype)\n    if is_deepspeed_zero3_enabled() and (not _is_quantized) and (not _is_ds_init_called):\n        logger.info('Detected DeepSpeed ZeRO-3: activating zero.init() for this model')\n        import deepspeed\n        init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()]\n        with ContextManagers(init_contexts):\n            model = cls(config, **kwargs)\n    else:\n        model = cls(config, **kwargs)\n    if dtype_orig is not None:\n        torch.set_default_dtype(dtype_orig)\n    return model", "docstring": "All context managers that the model should be initialized under go here.\n\nArgs:\ntorch_dtype (`torch.dtype`, *optional*):\nOverride the default `torch.dtype` and load the model under this dtype.", "source": "github-repos"}
{"code": "def additive_coupling(name, x, mid_channels=512, reverse=False, activation='relu', dropout=0.0):\n    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n        output_channels = (common_layers.shape_list(x)[(- 1)] \n        (x1, x2) = tf.split(x, num_or_size_splits=2, axis=(- 1))\n        z1 = x1\n        shift = conv_stack('nn', x1, mid_channels, output_channels=output_channels, activation=activation, dropout=dropout)\n        if (not reverse):\n            z2 = (x2 + shift)\n        else:\n            z2 = (x2 - shift)\n        return (tf.concat([z1, z2], axis=3), 0.0)", "docstring": "Reversible additive coupling layer.\n\nArgs:\nname: variable scope.\nx: 4-D Tensor, shape=(NHWC).\nmid_channels: number of channels in the coupling layer.\nreverse: Forward or reverse operation.\nactivation: \"relu\" or \"gatu\"\ndropout: default, 0.0\nReturns:\noutput: 4-D Tensor, shape=(NHWC)\nobjective: 0.0", "source": "codesearchnet"}
{"code": "def max(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent':\n        \n        return cls._binary_op(x, y, tf.maximum, tf.float32)", "docstring": "Returns a TensorFluent for the maximum function.TensorFluent\n\nArgs:\nx: The first operand.\ny: The second operand.\n\nReturns:\nA TensorFluent wrapping the maximum function.", "source": "juraj-google-style"}
{"code": "def exit_code_from_run_infos(run_infos: t.List[RunInfo]) -> int:\n    assert (run_infos is not None)\n    if (not hasattr(run_infos, '__iter__')):\n        return run_infos.retcode\n    rcs = [ri.retcode for ri in run_infos]\n    max_rc = max(rcs)\n    min_rc = min(rcs)\n    if (max_rc == 0):\n        return min_rc\n    return max_rc", "docstring": "Generate a single exit code from a list of RunInfo objects.\n\nTakes a list of RunInfos and returns the exit code that is furthest away\nfrom 0.\n\nArgs:\nrun_infos (t.List[RunInfo]): [description]\n\nReturns:\nint: [description]", "source": "codesearchnet"}
{"code": "def __init__(self, x, y=None, **kwargs):\n    if not self.can_handle(x, y):\n        raise ValueError('{} Cannot handle input {}, {}'.format(self.__class__, x, y))", "docstring": "Create a DataAdapter based on data inputs.\n\nThe caller must make sure to call `can_handle()` first before invoking this\nmethod. Provide unsupported data type will result into unexpected behavior.\n\nArgs:\nx: input features.\ny: target labels. Note that y could be None in the case of prediction.\n**kwargs: Other keyword arguments for DataAdapter during the construction\nof the tf.dataset.Dataset. For example:\n- Numpy data might have `sample_weights` which will be used for\nweighting the loss function during training.\n- Numpy data might need to have `batch_size` parameter when constructing\nthe dataset and iterator.\n- Certain input might need to be distribution strategy aware. When\n`distribution_strategy` is passed, the created dataset need to respect\nthe strategy.\nDataAdapter might choose to ignore any keyword argument if it doesn't\nuse it, or raise exception if any required argument is not provide.", "source": "github-repos"}
{"code": "def set_installed_version(vcs, version):\n    \n    version_path = _get_version_path(vcs)\n    with open(version_path, 'w') as f:\n        f.write(version)", "docstring": "Set the installed version for this project.\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\nversion (str)", "source": "juraj-google-style"}
{"code": "def detect_palette_support(basic_palette=None):\n    result = col_init = win_enabled = None\n    TERM = (env.TERM or '')\n    if (os_name == 'nt'):\n        from .windows import is_ansi_capable, enable_vt_processing, is_colorama_initialized\n        if is_ansi_capable():\n            win_enabled = all(enable_vt_processing())\n        col_init = is_colorama_initialized()\n    if (TERM.startswith('xterm') or (TERM == 'linux') or col_init):\n        result = 'basic'\n    if (('256color' in TERM) or (TERM == 'fbterm') or env.ANSICON):\n        result = 'extended'\n    if ((env.COLORTERM in ('truecolor', '24bit')) or win_enabled):\n        result = 'truecolor'\n    pal_name = 'Unknown'\n    if (result and (not basic_palette)):\n        (result, pal_name, basic_palette) = _find_basic_palette(result)\n    try:\n        import webcolors\n    except ImportError:\n        webcolors = None\n    log.debug(f\"{result!r} ({os_name}, TERM={(env.TERM or '')}, COLORTERM={(env.COLORTERM or '')}, ANSICON={env.ANSICON}, webcolors={bool(webcolors)}, basic_palette={pal_name})\")\n    return (result, basic_palette)", "docstring": "Returns whether we think the terminal supports basic, extended, or\ntruecolor.  None if not able to tell.\n\nReturns:\nNone or str: 'basic', 'extended', 'truecolor'", "source": "codesearchnet"}
{"code": "def _MergeEntities(self, a, b):\n    \n\n    def _MergeAgencyId(a_agency_id, b_agency_id):\n      \n      a_agency_id = a_agency_id or None\n      b_agency_id = b_agency_id or None\n      return self._MergeIdentical(a_agency_id, b_agency_id)\n\n    scheme = {'agency_id': _MergeAgencyId,\n              'agency_name': self._MergeIdentical,\n              'agency_url': self._MergeIdentical,\n              'agency_timezone': self._MergeIdentical}\n    return self._SchemedMerge(scheme, a, b)", "docstring": "Merges two agencies.\n\nTo be merged, they are required to have the same id, name, url and\ntimezone. The remaining language attribute is taken from the new agency.\n\nArgs:\na: The first agency.\nb: The second agency.\n\nReturns:\nThe merged agency.\n\nRaises:\nMergeError: The agencies could not be merged.", "source": "juraj-google-style"}
{"code": "def _ConstructAndTestGradientForConfig(self, pool_func, input_sizes, output_sizes, window, strides, padding, data_format, data_type, use_gpu):\n    jacob_a, jacob_n = self._getJacobians(pool_func, input_sizes, output_sizes, window, strides, padding, data_format, use_gpu, dtype=data_type.as_numpy_dtype)\n    if data_type == dtypes.bfloat16:\n        _, jacob_n = self._getJacobians(pool_func, input_sizes, output_sizes, window, strides, padding, data_format, use_gpu, dtype=np.float32)\n    input_jacob_a, grad_jacob_a = jacob_a\n    input_jacob_n, grad_jacob_n = jacob_n\n    self.assertAllClose(input_jacob_a, input_jacob_n, rtol=0.001, atol=0.001)\n    self.assertAllClose(grad_jacob_a, grad_jacob_n, rtol=0.001, atol=0.001)", "docstring": "Verifies the gradients of a pooling function.\n\nArgs:\npool_func: Function to be called, co.MaxPool, co.AvgPool,\nor the Lua version.\ninput_sizes: Input tensor dimensions.\noutput_sizes: Output tensor dimensions.\nwindow: Tuple of kernel dims: planes, rows, cols.\nstrides: Tuple of strides for dims: planes, rows, cols.\npadding: Padding type.\ndata_format: Data format string.\ndata_type: The data type to use to run the pooling operation.\nuse_gpu: Whether to run on GPU.", "source": "github-repos"}
{"code": "def tf_step(self, x, iteration, conjugate, residual, squared_residual):\n    (x, next_iteration, conjugate, residual, squared_residual) = super(ConjugateGradient, self).tf_step(x, iteration, conjugate, residual, squared_residual)\n    A_conjugate = self.fn_x(conjugate)\n    if (self.damping > 0.0):\n        A_conjugate = [(A_conj + (self.damping * conj)) for (A_conj, conj) in zip(A_conjugate, conjugate)]\n    conjugate_A_conjugate = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(conj * A_conj)) for (conj, A_conj) in zip(conjugate, A_conjugate)])\n    alpha = (squared_residual / tf.maximum(x=conjugate_A_conjugate, y=util.epsilon))\n    next_x = [(t + (alpha * conj)) for (t, conj) in zip(x, conjugate)]\n    next_residual = [(res - (alpha * A_conj)) for (res, A_conj) in zip(residual, A_conjugate)]\n    next_squared_residual = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(res * res)) for res in next_residual])\n    beta = (next_squared_residual / tf.maximum(x=squared_residual, y=util.epsilon))\n    next_conjugate = [(res + (beta * conj)) for (res, conj) in zip(next_residual, conjugate)]\n    return (next_x, next_iteration, next_conjugate, next_residual, next_squared_residual)", "docstring": "Iteration loop body of the conjugate gradient algorithm.\n\nArgs:\nx: Current solution estimate $x_t$.\niteration: Current iteration counter $t$.\nconjugate: Current conjugate $c_t$.\nresidual: Current residual $r_t$.\nsquared_residual: Current squared residual $r_t^2$.\n\nReturns:\nUpdated arguments for next iteration.", "source": "codesearchnet"}
{"code": "def push(self, stream_id, timestamp, value):\n        \n\n        stream = DataStream.FromEncoded(stream_id)\n        reading = IOTileReading(stream_id, timestamp, value)\n\n        try:\n            self.storage.push(stream, reading)\n\n            return Error.NO_ERROR\n        except StorageFullError:\n            return pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.RING_BUFFER_FULL)", "docstring": "Push a value to a stream.\n\nArgs:\nstream_id (int): The stream we want to push to.\ntimestamp (int): The raw timestamp of the value we want to\nstore.\nvalue (int): The 32-bit integer value we want to push.\nReturns:\nint: Packed 32-bit error code.", "source": "juraj-google-style"}
{"code": "def _checkString(inputstring, description, minlength=0, maxlength=None):\n    \n    \n    if not isinstance(description, str):\n        raise TypeError('The description should be a string. Given: {0!r}'.format(description))\n\n    if not isinstance(inputstring, str):\n        raise TypeError('The {0} should be a string. Given: {1!r}'.format(description, inputstring))\n\n    if not isinstance(maxlength, (int, type(None))):\n        raise TypeError('The maxlength must be an integer or None. Given: {0!r}'.format(maxlength))\n\n    \n    _checkInt(minlength, minvalue=0, maxvalue=None, description='minlength')\n\n    if len(inputstring) < minlength:\n        raise ValueError('The {0} is too short: {1}, but minimum value is {2}. Given: {3!r}'.format( \\\n            description, len(inputstring), minlength, inputstring))\n\n    if not maxlength is None:\n        if maxlength < 0:\n            raise ValueError('The maxlength must be positive. Given: {0}'.format(maxlength))\n\n        if maxlength < minlength:\n            raise ValueError('The maxlength must not be smaller than minlength. Given: {0} and {1}'.format( \\\n                maxlength, minlength))\n\n        if len(inputstring) > maxlength:\n            raise ValueError('The {0} is too long: {1}, but maximum value is {2}. Given: {3!r}'.format( \\\n                description, len(inputstring), maxlength, inputstring))", "docstring": "Check that the given string is valid.\n\nArgs:\n* inputstring (string): The string to be checked\n* description (string): Used in error messages for the checked inputstring\n* minlength (int): Minimum length of the string\n* maxlength (int or None): Maximum length of the string\n\nRaises:\nTypeError, ValueError\n\nUses the function :func:`_checkInt` internally.", "source": "juraj-google-style"}
{"code": "def authorize(self, http):\n    return google_auth_httplib2.AuthorizedHttp(self._google_auth_credentials, http=http)", "docstring": "Return an http client authorized with the google-auth credentials.\n\nArgs:\nhttp: httplib2.Http, an http object to be used to make the refresh\nrequest.\n\nReturns:\ngoogle_auth_httplib2.AuthorizedHttp: An authorized http client.", "source": "github-repos"}
{"code": "def reset(self):\n    if self.running:\n        raise RuntimeError('paco: executor is still running')\n    self.pool.clear()\n    self.observer.clear()\n    self.semaphore = asyncio.Semaphore(self.limit, loop=self.loop)", "docstring": "Resets the executer scheduler internal state.\n\nRaises:\nRuntimeError: is the executor is still running.", "source": "codesearchnet"}
{"code": "def fail(msg, extras=None):\n    raise signals.TestFailure(msg, extras)", "docstring": "Explicitly fail a test.\n\nArgs:\nmsg: A string explaining the details of the failure.\nextras: An optional field for extra information to be included in\ntest result.\n\nRaises:\nsignals.TestFailure: Mark a test as failed.", "source": "github-repos"}
{"code": "def cos(cls, x: 'TensorFluent') -> 'TensorFluent':\n        \n        return cls._unary_op(x, tf.cos, tf.float32)", "docstring": "Returns a TensorFluent for the cos function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the cos function.", "source": "juraj-google-style"}
{"code": "def write(self, value):\n    if (not isinstance(value, bool)):\n        raise TypeError('Invalid value type, should be bool.')\n    try:\n        if value:\n            os.write(self._fd, b'1\\n')\n        else:\n            os.write(self._fd, b'0\\n')\n    except OSError as e:\n        raise GPIOError(e.errno, ('Writing GPIO: ' + e.strerror))\n    try:\n        os.lseek(self._fd, 0, os.SEEK_SET)\n    except OSError as e:\n        raise GPIOError(e.errno, ('Rewinding GPIO: ' + e.strerror))", "docstring": "Set the state of the GPIO to `value`.\n\nArgs:\nvalue (bool): ``True`` for high state, ``False`` for low state.\n\nRaises:\nGPIOError: if an I/O or OS error occurs.\nTypeError: if `value` type is not bool.", "source": "codesearchnet"}
{"code": "def consume(self, source):\n    manifest = OrderedDict()\n    rules = parse_stylesheet(source, skip_comments=True, skip_whitespace=True)\n    for rule in rules:\n        name = self.digest_prelude(rule)\n        if (not name.startswith(RULE_BASE_PREFIX)):\n            continue\n        properties = self.digest_content(rule)\n        manifest[name] = properties\n    return manifest", "docstring": "Parse source and consume tokens from tinycss2.\n\nArguments:\nsource (string): Source content to parse.\n\nReturns:\ndict: Retrieved rules.", "source": "codesearchnet"}
{"code": "def unexpected_disconnect(self, conn_or_internal_id):\n    data = {'id': conn_or_internal_id}\n    action = ConnectionAction('force_disconnect', data, sync=False)\n    self._actions.put(action)", "docstring": "Notify that there was an unexpected disconnection of the device.\n\nAny in progress operations are canceled cleanly and the device is transitioned\nto a disconnected state.\n\nArgs:\nconn_or_internal_id (string, int): Either an integer connection id or a string\ninternal_id", "source": "codesearchnet"}
{"code": "def __init__(self, paths=None, separator='/'):\n    \n    if not paths:\n      raise errors.FormatError('Missing paths value.')\n\n    super(FileSourceType, self).__init__()\n    self.paths = paths\n    self.separator = separator", "docstring": "Initializes a source type.\n\nArgs:\npaths (Optional[str]): paths relative to the root of the file system.\nseparator (Optional[str]): path segment separator.\n\nRaises:\nFormatError: when paths is not set.", "source": "juraj-google-style"}
{"code": "def view(self, vleaf, fpath=None, cleanup=True, format=None):\n        \n        graph = self.create_graphviz_digraph(vleaf, format=format)\n        graph.view(fpath, cleanup=cleanup)", "docstring": "View the graph.\n\nArgs:\nvleaf (`nnabla.Variable`): End variable. All variables and functions which can be traversed from this variable are shown in the reuslt.\nfpath (`str`): The file path used to save.\ncleanup (`bool`): Clean up the source file after rendering. Default is True.\nformat (str):\nForce overwrite ``format`` (``'pdf', 'png', ...)``) configuration.", "source": "juraj-google-style"}
{"code": "def __init__(self, srcstate_id, nextstate_id, ilabel=None):\n        \n        self.srcstate = srcstate_id\n        self.nextstate = nextstate_id\n        self.ilabel = ilabel", "docstring": "The initialization function\nArgs:\nsrcstate_id (int): The source state identifier\nnextstate_id (int): The destination state identifier\nilabel (str): The symbol corresponding to character for the transition", "source": "juraj-google-style"}
{"code": "def release_port(upnp, external_port):\n    \n    mapping = upnp.getspecificportmapping(external_port, 'UDP')\n\n    if mapping is None:\n        log.error('could not find a port mapping', external=external_port)\n        return False\n    else:\n        log.debug('found existing port mapping', mapping=mapping)\n\n    if upnp.deleteportmapping(external_port, 'UDP'):\n        log.info('successfully released port mapping', external=external_port)\n        return True\n\n    log.warning(\n        'could not release port mapping, check your router for stale mappings',\n    )\n    return False", "docstring": "Try to release the port mapping for `external_port`.\n\nArgs:\nexternal_port (int): the port that was previously forwarded to.\n\nReturns:\nsuccess (boolean): if the release was successful.", "source": "juraj-google-style"}
{"code": "def _GetMostSignificantPathSegmentIndex(self, paths, similarity_weights, occurrence_weights, value_weights):\n    if (not paths):\n        raise ValueError('Missing paths.')\n    number_of_paths = len(paths)\n    path_segment_index = None\n    if (number_of_paths == 1):\n        path_segment_index = self._GetPathSegmentIndexForValueWeights(value_weights)\n    elif (number_of_paths == 2):\n        path_segment_index = self._GetPathSegmentIndexForOccurrenceWeights(occurrence_weights, value_weights)\n    elif (number_of_paths > 2):\n        path_segment_index = self._GetPathSegmentIndexForSimilarityWeights(similarity_weights, occurrence_weights, value_weights)\n    return path_segment_index", "docstring": "Retrieves the index of the most significant path segment.\n\nArgs:\npaths: a list of strings containing the paths.\nsimilarity_weights: the similarity weights object (instance of\n_PathSegmentWeights).\noccurrence_weights: the occurrence weights object (instance of\n_PathSegmentWeights).\nvalue_weights: the value weights object (instance of _PathSegmentWeights).\n\nReturns:\nAn integer containing the path segment index.\n\nRaises:\nValueError: when paths is an empty list.", "source": "codesearchnet"}
{"code": "def standardize_weights(y, sample_weight=None, class_weight=None, sample_weight_mode=None):\n    if isinstance(sample_weight, tuple):\n        sample_weight = sample_weight[0]\n    if sample_weight_mode is not None and sample_weight_mode != 'samplewise':\n        if sample_weight_mode != 'temporal':\n            raise ValueError('\"sample_weight_mode should be None or \"temporal\". Found: ' + str(sample_weight_mode))\n        if len(y.shape) < 3:\n            raise ValueError('Found a sample_weight array for an input with shape ' + str(y.shape) + '. Timestep-wise sample weighting (use of sample_weight_mode=\"temporal\") is restricted to outputs that are at least 3D, i.e. that have a time dimension.')\n        if sample_weight is not None and len(sample_weight.shape) != 2:\n            raise ValueError('Found a sample_weight array with shape ' + str(sample_weight.shape) + '. In order to use timestep-wise sample weighting, you should pass a 2D sample_weight array.')\n    elif sample_weight is not None and len(sample_weight.shape) != 1:\n        raise ValueError('Found a sample_weight array with shape {}. In order to use timestep-wise sample weights, you should specify sample_weight_mode=\"temporal\" in compile(); founssd \"{}\" instead. If you just mean to use sample-wise weights, make sure your sample_weight array is 1D.'.format(sample_weight.shape, sample_weight_mode))\n    if sample_weight is not None:\n        if len(sample_weight.shape) > len(y.shape):\n            raise ValueError('Found a sample_weight with shape' + str(sample_weight.shape) + '.Expected sample_weight with rank less than or equal to ' + str(len(y.shape)))\n        if not tensor_util.is_tf_type(sample_weight) and y.shape[:sample_weight.ndim] != sample_weight.shape:\n            raise ValueError('Found a sample_weight array with shape ' + str(sample_weight.shape) + ' for an input with shape ' + str(y.shape) + '. sample_weight cannot be broadcast.')\n    class_sample_weight = None\n    if isinstance(class_weight, dict):\n        if len(y.shape) > 2:\n            raise ValueError('`class_weight` not supported for 3+ dimensional targets.')\n        if tensor_util.is_tf_type(y):\n            keys = np.array(sorted(class_weight.keys()))\n            values = np.array([class_weight[i] for i in keys])\n            weight_vector = np.zeros(np.max(keys) + 1)\n            weight_vector[:] = np.nan\n            weight_vector[keys] = values\n            y_classes = smart_cond.smart_cond(len(y.shape.as_list()) == 2 and backend.shape(y)[1] > 1, lambda: backend.argmax(y, axis=1), lambda: math_ops.cast(backend.reshape(y, (-1,)), dtypes.int64))\n            class_sample_weight = array_ops.gather(weight_vector, y_classes)\n            gen_array_ops.check_numerics(class_sample_weight, 'Invalid classes or class weights detected. NaN values indicate that an appropriate class weight could not be determined.')\n            class_sample_weight = math_ops.cast(class_sample_weight, backend.floatx())\n            if sample_weight is not None:\n                sample_weight = math_ops.cast(tensor_conversion.convert_to_tensor_v2_with_dispatch(sample_weight), backend.floatx())\n        else:\n            y_classes = y\n            if len(y.shape) == 2:\n                if y.shape[1] > 1:\n                    y_classes = np.argmax(y, axis=1)\n                elif y.shape[1] == 1:\n                    y_classes = np.reshape(y, y.shape[0])\n            class_sample_weight = numpy_compat.np_asarray([class_weight[cls] for cls in y_classes if cls in class_weight])\n            if len(class_sample_weight) != len(y_classes):\n                existing_classes = set(y_classes)\n                existing_class_weight = set(class_weight.keys())\n                raise ValueError('`class_weight` must contain all classes in the data. The classes %s exist in the data but not in `class_weight`.' % (existing_classes - existing_class_weight))\n    if class_sample_weight is not None and sample_weight is not None:\n        return class_sample_weight * sample_weight\n    if sample_weight is not None:\n        return sample_weight\n    if class_sample_weight is not None:\n        return class_sample_weight\n    return None", "docstring": "Performs sample weight validation and standardization.\n\nEverything gets normalized to a single sample-wise (or timestep-wise)\nweight array. If both `sample_weight` and `class_weight` are provided,\nthe weights are multiplied.\n\nArgs:\ny: Numpy array or Tensor of model targets to be weighted.\nsample_weight: User-provided `sample_weight` argument.\nclass_weight: User-provided `class_weight` argument.\nsample_weight_mode: One of `None` or `\"temporal\"`. `\"temporal\"` indicated\nthat we expect 2D weight data that will be applied to the last 2\ndimensions of the targets (i.e. we are weighting timesteps, not\nsamples).\n\nReturns:\nA numpy array of target weights, one entry per sample to weight.\n\nRaises:\nValueError: In case of invalid user-provided arguments.", "source": "github-repos"}
{"code": "def from_known_inputs(cls, logs=None, metric_names=None, label_names=None):\n    if (not metric_names):\n        metric_names = ()\n    if (not label_names):\n        label_names = ()\n    known_labels = []\n    known_metrics = []\n    for l in label_descriptor.KnownLabels.__members__.values():\n        if (l.update_label_func and (l.label_name in label_names)):\n            known_labels.append(l)\n    for m in metric_descriptor.KnownMetrics.__members__.values():\n        if (m.update_op_func and (m.metric_name in metric_names)):\n            known_metrics.append(m)\n    return cls(logs=logs, metrics=known_metrics, labels=known_labels)", "docstring": "An alternate constructor that assumes known metrics and labels.\n\nThis differs from the default constructor in that the metrics and labels\nare iterables of names of 'known' metrics and labels respectively. The\nnames are used to obtain the metrics and labels from\n:class:`endpoints_management.control.metric_descriptor.KnownMetrics` and\n:class:`endpoints_management.control.label_descriptor.KnownLabels` respectively.\n\nnames that don't correspond to a known metric or label are ignored; as\nare metrics or labels that don't yet have a way of updating the\n`ReportRequest` operation.\n\nArgs:\nlogs (iterable[string]): the name of logs to be included in the\n`ReportRequest`\nmetric_names (iterable[string]): the name of a known metric to be\nadded to the `ReportRequest`\nlabel_names (iterable[string]): the name of a known label to be added\nto the `ReportRequest`", "source": "codesearchnet"}
{"code": "def starts_with_prefix_in_list(text, prefixes):\n    \n    for prefix in prefixes:\n        if text.startswith(prefix):\n            return True\n    return False", "docstring": "Return True if the given string starts with one of the prefixes in the given list, otherwise\nreturn False.\n\nArguments:\ntext (str): Text to check for prefixes.\nprefixes (list): List of prefixes to check for.\n\nReturns:\nbool: True if the given text starts with any of the given prefixes, otherwise False.", "source": "juraj-google-style"}
{"code": "def select_best_resolution(original_size: tuple, possible_resolutions: list) -> tuple:\n    original_height, original_width = original_size\n    best_fit = None\n    max_effective_resolution = 0\n    min_wasted_resolution = float('inf')\n    for height, width in possible_resolutions:\n        scale = min(width / original_width, height / original_height)\n        downscaled_width, downscaled_height = (int(original_width * scale), int(original_height * scale))\n        effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)\n        wasted_resolution = width * height - effective_resolution\n        if effective_resolution > max_effective_resolution or (effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution):\n            max_effective_resolution = effective_resolution\n            min_wasted_resolution = wasted_resolution\n            best_fit = (height, width)\n    return best_fit", "docstring": "Selects the best resolution from a list of possible resolutions based on the original size.\n\nThis is done by calculating the effective and wasted resolution for each possible resolution.\n\nThe best fit resolution is the one that maximizes the effective resolution and minimizes the wasted resolution.\n\nArgs:\noriginal_size (tuple):\nThe original size of the image in the format (height, width).\npossible_resolutions (list):\nA list of possible resolutions in the format [(height1, width1), (height2, width2), ...].\n\nReturns:\ntuple: The best fit resolution in the format (height, width).", "source": "github-repos"}
{"code": "def _checkString(inputstring, description, minlength=0, maxlength=None):\n    if (not isinstance(description, str)):\n        raise TypeError('The description should be a string. Given: {0!r}'.format(description))\n    if (not isinstance(inputstring, str)):\n        raise TypeError('The {0} should be a string. Given: {1!r}'.format(description, inputstring))\n    if (not isinstance(maxlength, (int, type(None)))):\n        raise TypeError('The maxlength must be an integer or None. Given: {0!r}'.format(maxlength))\n    _checkInt(minlength, minvalue=0, maxvalue=None, description='minlength')\n    if (len(inputstring) < minlength):\n        raise ValueError('The {0} is too short: {1}, but minimum value is {2}. Given: {3!r}'.format(description, len(inputstring), minlength, inputstring))\n    if (not (maxlength is None)):\n        if (maxlength < 0):\n            raise ValueError('The maxlength must be positive. Given: {0}'.format(maxlength))\n        if (maxlength < minlength):\n            raise ValueError('The maxlength must not be smaller than minlength. Given: {0} and {1}'.format(maxlength, minlength))\n        if (len(inputstring) > maxlength):\n            raise ValueError('The {0} is too long: {1}, but maximum value is {2}. Given: {3!r}'.format(description, len(inputstring), maxlength, inputstring))", "docstring": "Check that the given string is valid.\n\nArgs:\n* inputstring (string): The string to be checked\n* description (string): Used in error messages for the checked inputstring\n* minlength (int): Minimum length of the string\n* maxlength (int or None): Maximum length of the string\n\nRaises:\nTypeError, ValueError\n\nUses the function :func:`_checkInt` internally.", "source": "codesearchnet"}
{"code": "def parse_kegg_gene_metadata(infile):\n    \n    metadata = defaultdict(str)\n\n    with open(infile) as mf:\n        kegg_parsed = bs_kegg.parse(mf.read())\n\n    \n\n    if 'DBLINKS' in kegg_parsed.keys():\n        if 'UniProt' in kegg_parsed['DBLINKS']:\n            unis = str(kegg_parsed['DBLINKS']['UniProt']).split(' ')\n            \n            if isinstance(unis, list):\n                metadata['uniprot'] = unis[0]\n            else:\n                metadata['uniprot'] = unis\n        if 'NCBI-ProteinID' in kegg_parsed['DBLINKS']:\n            metadata['refseq'] = str(kegg_parsed['DBLINKS']['NCBI-ProteinID'])\n    if 'STRUCTURE' in kegg_parsed.keys():\n        metadata['pdbs'] = str(kegg_parsed['STRUCTURE']['PDB']).split(' ')\n    else:\n        metadata['pdbs'] = None\n    if 'ORGANISM' in kegg_parsed.keys():\n        metadata['taxonomy'] = str(kegg_parsed['ORGANISM'])\n\n    return metadata", "docstring": "Parse the KEGG flatfile and return a dictionary of metadata.\n\nDictionary keys are:\nrefseq\nuniprot\npdbs\ntaxonomy\n\nArgs:\ninfile: Path to KEGG flatfile\n\nReturns:\ndict: Dictionary of metadata", "source": "juraj-google-style"}
{"code": "def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: int=0) -> nn.Linear:\n    index = index.to(layer.weight.device)\n    W = layer.weight.index_select(dim, index).detach().clone()\n    if layer.bias is not None:\n        if dim == 1:\n            b = layer.bias.detach().clone()\n        else:\n            b = layer.bias[index].detach().clone()\n    new_size = list(layer.weight.size())\n    new_size[dim] = len(index)\n    new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)\n    new_layer.weight.requires_grad = False\n    new_layer.weight.copy_(W.contiguous())\n    new_layer.weight.requires_grad = True\n    if layer.bias is not None:\n        new_layer.bias.requires_grad = False\n        new_layer.bias.copy_(b.contiguous())\n        new_layer.bias.requires_grad = True\n    return new_layer", "docstring": "Prune a linear layer to keep only entries in index.\n\nUsed to remove heads.\n\nArgs:\nlayer (`torch.nn.Linear`): The layer to prune.\nindex (`torch.LongTensor`): The indices to keep in the layer.\ndim (`int`, *optional*, defaults to 0): The dimension on which to keep the indices.\n\nReturns:\n`torch.nn.Linear`: The pruned layer as a new layer with `requires_grad=True`.", "source": "github-repos"}
{"code": "def hwvtep_add_ve_interface(self, **kwargs):\n        \n        name = kwargs.pop('name')\n        ve_id = kwargs.pop('ve_id')\n        vrrp_id = kwargs.pop('vrrp_id')\n        ve_args = dict(name=name, ve_id=ve_id)\n        method_name = 'overlay_gateway_ip_interface_ve_ve_id'\n        method_class = self._brocade_tunnels\n        ve_attr = getattr(method_class, method_name)\n        config = ve_attr(**ve_args)\n        output = self._callback(config)\n        method_name = 'overlay_gateway_ip_interface_ve_vrrp_extended_group'\n        vrrp_attr = getattr(method_class, method_name)\n        vrrp_args = dict(name=name, vrrp_extended_group=vrrp_id)\n        config = vrrp_attr(**vrrp_args)\n        output = self._callback(config)\n\n        return output", "docstring": "Add virtual ethernet (ve) interface to the overlay-gateway\n\nArgs:\nname  (str): gateway-name\nint_id (int): ve id\nvrrp_id (int): VRPP-E group ID\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def list_group_maintainers(self, name):\n        \n        return self.service.list_group_maintainers(\n            name, self.url_prefix, self.auth, self.session,\n            self.session_send_opts)", "docstring": "Get the maintainers of a group.\n\nArgs:\nname (string): Name of group to query.\n\nReturns:\n(list[string]): List of maintainer names.", "source": "juraj-google-style"}
{"code": "def write_contents(self, filename, contents, directory=None):\n        \n        filepath = \"{}/{}\".format(directory.rstrip(\"/\"), filename) if directory else filename\n        self._write_to_zipfile(filepath, contents)\n        return filepath", "docstring": "write_contents: Write contents to filename in zip\nArgs:\ncontents: (str) contents of file\nfilename: (str) name of file in zip\ndirectory: (str) directory in zipfile to write file to (optional)\nReturns: path to file in zip", "source": "juraj-google-style"}
{"code": "def to_dict(self):\n    output = copy.deepcopy(self.__dict__)\n    if output['backbone_config'] is not None:\n        output['backbone_config'] = self.backbone_config.to_dict()\n    output['model_type'] = self.__class__.model_type\n    return output", "docstring": "Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].\n\nReturns:\n`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,", "source": "github-repos"}
{"code": "def remove(self, *l):\n        \n        removeList = list(flatten(l))\n        self._remove(removeList, self.value)", "docstring": "remove elements from self.value by matching.\n\nCreate the exactly same single you want to delete and pass it(them) in.\nNormally this method needs to be overwrited by subclass. It only looks inside current instance's value, not recursive. There is no need for a recursive one anyway.\n\nArgs:\n*l: a single element, a bunch of element seperated by comma, or a list of elements, or any combination. Element is what you match with.", "source": "juraj-google-style"}
{"code": "def bulk_lookup(self, api_name, keys):\n        \n        cached_data = {}\n\n        for key in keys:\n            value = self.lookup_value(api_name, key)\n            if value is not None:\n                cached_data[key] = value\n        return cached_data", "docstring": "Perform lookup on an enumerable of keys.\n\nArgs:\napi_name: a string name of the API. Keys and values are segmented by api_name.\nkeys: an enumerable of string keys.", "source": "juraj-google-style"}
{"code": "def transform_to_length(nndata, length):\n        \n\n        if length is None:\n            return nndata\n\n        if length:\n            for cn in range(length):\n                if cn not in nndata.cn_weights:\n                    nndata.cn_weights[cn] = 0\n                    nndata.cn_nninfo[cn] = []\n\n        return nndata", "docstring": "Given NNData, transforms data to the specified fingerprint length\nArgs:\nnndata: (NNData)\nlength: (int) desired length of NNData", "source": "juraj-google-style"}
{"code": "def register_codec(x):\n    _codecs.append(x)", "docstring": "Registers a codec to use for encoding/decoding.\n\nArgs:\nx: The codec object to register. The object must implement can_encode,\ndo_encode, can_decode, and do_decode. See the various _*Codec classes for\nexamples.", "source": "github-repos"}
{"code": "def _parse_email(self, val):\n        \n\n        ret = {\n            'type': None,\n            'value': None\n        }\n\n        try:\n\n            ret['type'] = val[1]['type']\n\n        except (KeyError, ValueError, TypeError):\n\n                pass\n\n        ret['value'] = val[3].strip()\n\n        try:\n\n            self.vars['email'].append(ret)\n\n        except AttributeError:\n\n            self.vars['email'] = []\n            self.vars['email'].append(ret)", "docstring": "The function for parsing the vcard email addresses.\n\nArgs:\nval (:obj:`list`): The value to parse.", "source": "juraj-google-style"}
{"code": "def get_volume_details(self, volume_name: str) -> dict:\n        \n        if volume_name not in self.volumes:\n            raise RuntimeError('No such volume found: ', volume_name)\n\n        volume = self._client.volumes.get(volume_name)\n        return volume.attrs", "docstring": "Get details of the volume.\n\nArgs:\nvolume_name (str): Name of the volume\n\nReturns:\ndict, details of the volume", "source": "juraj-google-style"}
{"code": "def first_timestamp(self, event_key=None):\n    if (event_key is None):\n        timestamps = [self._trackers[key].first_timestamp for key in self._trackers]\n        return min((timestamp for timestamp in timestamps if (timestamp >= 0)))\n    else:\n        return self._trackers[event_key].first_timestamp", "docstring": "Obtain the first timestamp.\n\nArgs:\nevent_key: the type key of the sought events (e.g., constants.NAN_KEY).\nIf None, includes all event type keys.\n\nReturns:\nFirst (earliest) timestamp of all the events of the given type (or all\nevent types if event_key is None).", "source": "codesearchnet"}
{"code": "def input_fn(is_training, data_dir, batch_size, num_epochs=1, num_gpus=None,\n             dtype=tf.float32):\n  \n  mlperf_log.resnet_print(key=mlperf_log.INPUT_ORDER)\n  filenames = get_filenames(is_training, data_dir)\n  dataset = tf.data.Dataset.from_tensor_slices(filenames)\n\n  if is_training:\n    \n    dataset = dataset.shuffle(buffer_size=_NUM_TRAIN_FILES)\n\n  \n  dataset = dataset.flat_map(tf.data.TFRecordDataset)\n\n  return resnet_run_loop.process_record_dataset(\n      dataset=dataset,\n      is_training=is_training,\n      batch_size=batch_size,\n      shuffle_buffer=_SHUFFLE_BUFFER,\n      parse_record_fn=parse_record,\n      num_epochs=num_epochs,\n      num_gpus=num_gpus,\n      examples_per_epoch=_NUM_IMAGES['train'] if is_training else None,\n      dtype=dtype\n  )", "docstring": "Input function which provides batches for train or eval.\n\nArgs:\nis_training: A boolean denoting whether the input is for training.\ndata_dir: The directory containing the input data.\nbatch_size: The number of samples per batch.\nnum_epochs: The number of epochs to repeat the dataset.\nnum_gpus: The number of gpus used for training.\ndtype: Data type to use for images/features\n\nReturns:\nA dataset that can be used for iteration.", "source": "juraj-google-style"}
{"code": "def get_user_info_for_username(self, username, _connection=None):\n    ldap_filter = '(&({0}={1}){2})'.format(self.config.get('LDAP_USER_LOGIN_ATTR'), username, self.config.get('LDAP_USER_OBJECT_FILTER'))\n    return self.get_object(dn=self.full_user_search_dn, filter=ldap_filter, attributes=self.config.get('LDAP_GET_USER_ATTRIBUTES'), _connection=_connection)", "docstring": "Gets info about a user at a specified username by searching the\nUsers DN. Username attribute is the same as specified as\nLDAP_USER_LOGIN_ATTR.\n\n\nArgs:\nusername (str): Username of the user to search for.\n_connection (ldap3.Connection): A connection object to use when\nsearching. If not given, a temporary connection will be\ncreated, and destroyed after use.\nReturns:\ndict: A dictionary of the user info from LDAP", "source": "codesearchnet"}
{"code": "def _routing_enabled():\n    return sklearn.get_config().get('enable_metadata_routing', False)", "docstring": "Return whether metadata routing is enabled.\n\nReturns:\nenabled : bool\nWhether metadata routing is enabled. If the config is not set, it\ndefaults to False.\n\nTODO: remove when the config key is no longer available in scikit-learn", "source": "github-repos"}
{"code": "def _CalculateElementsDataSize(self, context):\n    elements_data_size = None\n    if self._HasElementsDataSize():\n        elements_data_size = self._EvaluateElementsDataSize(context)\n    elif self._HasNumberOfElements():\n        element_byte_size = self._element_data_type_definition.GetByteSize()\n        if (element_byte_size is not None):\n            number_of_elements = self._EvaluateNumberOfElements(context)\n            elements_data_size = (number_of_elements * element_byte_size)\n    return elements_data_size", "docstring": "Calculates the elements data size.\n\nArgs:\ncontext (Optional[DataTypeMapContext]): data type map context, used to\ndetermine the size hint.\n\nReturns:\nint: the elements data size or None if not available.", "source": "codesearchnet"}
{"code": "def append_with_data(url, data):\n    if (data is None):\n        return url\n    url_parts = list(urlparse(url))\n    query = OrderedDict(parse_qsl(url_parts[4], keep_blank_values=True))\n    query.update(data)\n    url_parts[4] = URLHelper.query_dict_to_string(query)\n    return urlunparse(url_parts)", "docstring": "Append the given URL with the given data OrderedDict.\n\nArgs:\nurl (str): The URL to append.\ndata (obj): The key value OrderedDict to append to the URL.\n\nReturns:\nstr: The new URL.", "source": "codesearchnet"}
{"code": "def GetDisplayNameForPathSpec(cls, path_spec, mount_path=None, text_prepend=None):\n    if (not path_spec):\n        return None\n    relative_path = cls.GetRelativePathForPathSpec(path_spec, mount_path=mount_path)\n    if (not relative_path):\n        return path_spec.type_indicator\n    if text_prepend:\n        relative_path = '{0:s}{1:s}'.format(text_prepend, relative_path)\n    parent_path_spec = path_spec.parent\n    if (parent_path_spec and (path_spec.type_indicator in (dfvfs_definitions.TYPE_INDICATOR_BZIP2, dfvfs_definitions.TYPE_INDICATOR_GZIP))):\n        parent_path_spec = parent_path_spec.parent\n    if (parent_path_spec and (parent_path_spec.type_indicator == dfvfs_definitions.TYPE_INDICATOR_VSHADOW)):\n        store_index = getattr(path_spec.parent, 'store_index', None)\n        if (store_index is not None):\n            return 'VSS{0:d}:{1:s}:{2:s}'.format((store_index + 1), path_spec.type_indicator, relative_path)\n    return '{0:s}:{1:s}'.format(path_spec.type_indicator, relative_path)", "docstring": "Retrieves the display name of a path specification.\n\nArgs:\npath_spec (dfvfs.PathSpec): path specification.\nmount_path (Optional[str]): path where the file system that is used\nby the path specification is mounted, such as \"/mnt/image\". The\nmount path will be stripped from the absolute path defined by\nthe path specification.\ntext_prepend (Optional[str]): text to prepend.\n\nReturns:\nstr: human readable version of the path specification or None.", "source": "codesearchnet"}
{"code": "def get_cv_idxs(n, cv_idx=0, val_pct=0.2, seed=42):\n    \n    np.random.seed(seed)\n    n_val = int(val_pct*n)\n    idx_start = cv_idx*n_val\n    idxs = np.random.permutation(n)\n    return idxs[idx_start:idx_start+n_val]", "docstring": "Get a list of index values for Validation set from a dataset\n\nArguments:\nn : int, Total number of elements in the data set.\ncv_idx : int, starting index [idx_start = cv_idx*int(val_pct*n)]\nval_pct : (int, float), validation set percentage\nseed : seed value for RandomState\n\nReturns:\nlist of indexes", "source": "juraj-google-style"}
{"code": "def ParseUserEngagedRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    event_data = WindowsTimelineUserEngagedEventData()\n    event_data.package_identifier = self._GetRowValue(query_hash, row, 'PackageName')\n    payload_json_bytes = bytes(self._GetRowValue(query_hash, row, 'Payload'))\n    payload_json_string = payload_json_bytes.decode('utf-8')\n    payload = json.loads(payload_json_string)\n    if ('reportingApp' in payload):\n        event_data.reporting_app = payload['reportingApp']\n    if ('activeDurationSeconds' in payload):\n        event_data.active_duration_seconds = int(payload['activeDurationSeconds'])\n    timestamp = self._GetRowValue(query_hash, row, 'StartTime')\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_START)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a timeline row that describes a user interacting with an app.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "codesearchnet"}
{"code": "def container_type_mismatch(self, stack, cls, mutations, name):\n    details = f'Container: {self._pp.print_generic_type(cls)}\\n'\n    allowed_contained = ''\n    new_contained = ''\n    for formal in cls.formal_type_parameters.keys():\n        if formal in mutations:\n            params, values, _ = mutations[formal]\n            allowed_content = self._pp.print_type_of_instance(cls.get_formal_type_parameter(formal))\n            new_content = self._pp.join_printed_types(sorted((self._pp.print_type(v) for v in set(values.data) - set(params.data))))\n            allowed_contained += f'  {formal}: {allowed_content}\\n'\n            new_contained += f'  {formal}: {new_content}\\n'\n    annotation = self._pp.print_type_of_instance(cls)\n    details += 'Allowed contained types (from annotation %s):\\n%sNew contained types:\\n%s' % (annotation, allowed_contained, new_contained)\n    suffix = '' if name is None else ' for ' + name\n    err_msg = f'New container type{suffix} does not match type annotation'\n    self.error(stack, err_msg, details=details)", "docstring": "Invalid combination of annotation and mutation.\n\nArgs:\nstack: the frame stack\ncls: the container type\nmutations: a dict of {parameter name: (annotated types, new types)}\nname: the variable name (or None)", "source": "github-repos"}
{"code": "def remove_app(name, site):\n    \n    current_apps = list_apps(site)\n\n    if name not in current_apps:\n        log.debug('Application already absent: %s', name)\n        return True\n\n    ps_cmd = ['Remove-WebApplication',\n              '-Name', \"'{0}'\".format(name),\n              '-Site', \"'{0}'\".format(site)]\n\n    cmd_ret = _srvmgr(ps_cmd)\n\n    if cmd_ret['retcode'] != 0:\n        msg = 'Unable to remove application: {0}\\nError: {1}' \\\n              ''.format(name, cmd_ret['stderr'])\n        raise CommandExecutionError(msg)\n\n    new_apps = list_apps(site)\n\n    if name not in new_apps:\n        log.debug('Application removed successfully: %s', name)\n        return True\n\n    log.error('Unable to remove application: %s', name)\n    return False", "docstring": "Remove an IIS application.\n\nArgs:\nname (str): The application name.\nsite (str): The IIS site name.\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.remove_app name='app0' site='site0'", "source": "juraj-google-style"}
{"code": "def get_temp_dir():\n    return _googletest.GetTempDir()", "docstring": "Returns a temporary directory for use during tests.\n\nThere is no need to delete the directory after the test.\n\n@compatibility(TF2)\nThis function is removed in TF2. Please use `TestCase.get_temp_dir` instead\nin a test case.\nOutside of a unit test, obtain a temporary directory through Python's\n`tempfile` module.\n@end_compatibility\n\nReturns:\nThe temporary directory.", "source": "github-repos"}
{"code": "def create_as(access_token, subscription_id, resource_group, as_name, update_domains, fault_domains, location):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/availabilitySets/', as_name, '?api-version=', COMP_API])\n    as_body = {'location': location}\n    properties = {'platformUpdateDomainCount': update_domains}\n    properties['platformFaultDomainCount'] = fault_domains\n    as_body['properties'] = properties\n    body = json.dumps(as_body)\n    return do_put(endpoint, body, access_token)", "docstring": "Create availability set.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nas_name (str): Name of the new availability set.\nupdate_domains (int): Number of update domains.\nfault_domains (int): Number of fault domains.\nlocation (str): Azure data center location. E.g. westus.\n\nReturns:\nHTTP response. JSON body of the availability set properties.", "source": "codesearchnet"}
{"code": "def _handle_join_dags(self, request):\n    if (request.payload['names'] is None):\n        send_response = (len(self._dags_running) <= 1)\n    else:\n        send_response = all([(name not in self._dags_running.keys()) for name in request.payload['names']])\n    if send_response:\n        return Response(success=True, uid=request.uid)\n    else:\n        return None", "docstring": "The handler for the join_dags request.\n\nIf dag names are given in the payload only return a valid Response if none of\nthe dags specified by the names are running anymore. If no dag names are given,\nwait for all dags except one, which by design is the one that issued the request,\nto be finished.\n\nArgs:\nrequest (Request): Reference to a request object containing the\nincoming request.\n\nReturns:\nResponse: A response object containing the following fields:\n- success: True if all dags the request was waiting for have\ncompleted.", "source": "codesearchnet"}
{"code": "def from_file(cls, fp, is_outlook=False):\n        \n        log.debug(\"Parsing email from file {!r}\".format(fp))\n\n        with ported_open(fp) as f:\n            message = email.message_from_file(f)\n\n        if is_outlook:\n            log.debug(\"Removing temp converted Outlook email {!r}\".format(fp))\n            os.remove(fp)\n\n        return cls(message)", "docstring": "Init a new object from a file path.\n\nArgs:\nfp (string): file path of raw email\nis_outlook (boolean): if True is an Outlook email\n\nReturns:\nInstance of MailParser", "source": "juraj-google-style"}
{"code": "def _select_in_voltage_range(self, min_voltage=None, max_voltage=None):\n        \n        min_voltage = min_voltage if min_voltage is not None \\\n            else self.min_voltage\n        max_voltage = max_voltage if max_voltage is not None \\\n            else self.max_voltage\n        return list(filter(lambda p: min_voltage <= p.voltage <= max_voltage,\n                           self.voltage_pairs))", "docstring": "Selects VoltagePairs within a certain voltage range.\n\nArgs:\nmin_voltage (float): The minimum allowable voltage for a given\nstep.\nmax_voltage (float): The maximum allowable voltage allowable for a\ngiven step.\n\nReturns:\nA list of VoltagePair objects", "source": "juraj-google-style"}
{"code": "def LR_predict(w, b, X):\n    \n\n    m = X.shape[1]\n    Y_prediction = np.zeros((1, m))\n    w = w.reshape(X.shape[0], 1)\n\n    A = sigmoid(np.dot(w.T, X) + b)\n\n    for i in range(A.shape[1]):\n        if A[0, i] > 0.5:\n            Y_prediction[0, i] = 1.0\n        else:\n            Y_prediction[0, i] = 0.0\n\n    assert (Y_prediction.shape == (1, m))\n\n    return Y_prediction", "docstring": "Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)\n\nArguments:\nw -- weights, a numpy array of size (num_px * num_px * 3, 1)\nb -- bias, a scalar\nX -- data of size (num_px * num_px * 3, number of examples)\n\nReturns:\nY_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X", "source": "juraj-google-style"}
{"code": "def parse_hpo_diseases(hpo_lines):\n    \n    diseases = {}\n    LOG.info(\"Parsing hpo diseases...\")\n    for index, line in enumerate(hpo_lines):\n        \n        if index == 0:\n            continue\n        \n        if not len(line) > 3:\n            continue\n        \n        disease_info = parse_hpo_disease(line)\n        \n        if not disease_info:\n            continue\n        disease_nr = disease_info['disease_nr']\n        hgnc_symbol = disease_info['hgnc_symbol']\n        hpo_term = disease_info['hpo_term']\n        source = disease_info['source']\n        disease_id = \"{0}:{1}\".format(source, disease_nr)\n        \n        if disease_id not in diseases:\n            diseases[disease_id] = {\n                'disease_nr': disease_nr,\n                'source': source,\n                'hgnc_symbols': set(),\n                'hpo_terms': set(),\n            }\n\n        if hgnc_symbol:\n            diseases[disease_id]['hgnc_symbols'].add(hgnc_symbol)\n        if hpo_term:\n            diseases[disease_id]['hpo_terms'].add(hpo_term)\n\n    LOG.info(\"Parsing done.\")\n    return diseases", "docstring": "Parse hpo disease phenotypes\n\nArgs:\nhpo_lines(iterable(str))\n\nReturns:\ndiseases(dict): A dictionary with mim numbers as keys", "source": "juraj-google-style"}
{"code": "def rotation_matrix(self):\n    self._normalise()\n    product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n    return product_matrix[1:][(:, 1:)]", "docstring": "Get the 3x3 rotation matrix equivalent of the quaternion rotation.\n\nReturns:\nA 3x3 orthogonal rotation matrix as a 3x3 Numpy array\n\nNote:\nThis feature only makes sense when referring to a unit quaternion. Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.", "source": "codesearchnet"}
{"code": "def on_predict_begin(self, logs=None):", "docstring": "Called at the beginning of prediction.\n\nSubclasses should override for any actions to run.\n\nArgs:\nlogs: Dict. Currently no data is passed to this argument for this\nmethod but that may change in the future.", "source": "github-repos"}
{"code": "def validate_and_slice_inputs(names_to_saveables):\n    saveables = []\n    seen_ops = object_identity.ObjectIdentitySet()\n    for name, op in sorted(names_to_saveables.items(), key=lambda x: x[0]):\n        for converted_saveable_object in saveable_objects_for_op(op, name):\n            _add_saveable(saveables, seen_ops, converted_saveable_object)\n    return saveables", "docstring": "Returns the variables and names that will be used for a Saver.\n\nArgs:\nnames_to_saveables: A dict (k, v) where k is the name of an operation and\nv is an operation to save or a BaseSaverBuilder.Saver.\n\nReturns:\nA list of SaveableObjects.\n\nRaises:\nTypeError: If any of the keys are not strings or any of the\nvalues are not one of Tensor or Variable or a trackable operation.\nValueError: If the same operation is given in more than one value\n(this also applies to slices of SlicedVariables).", "source": "github-repos"}
{"code": "def read_infile(infile: Union[(Path, str)], from_words=False, word_column: int=WORD_COLUMN, pos_column: int=POS_COLUMN, tag_column: int=TAG_COLUMN, max_sents: int=(- 1), read_only_words: bool=False) -> List[Tuple[(List, Union[(List, None)])]]:\n    (answer, curr_word_sent, curr_tag_sent) = ([], [], [])\n    if from_words:\n        (word_column, read_only_words) = (0, True)\n    with open(infile, 'r', encoding='utf8') as fin:\n        for line in fin:\n            line = line.strip()\n            if line.startswith('\n                continue\n            if (line == ''):\n                if (len(curr_word_sent) > 0):\n                    if read_only_words:\n                        curr_tag_sent = None\n                    answer.append((curr_word_sent, curr_tag_sent))\n                (curr_tag_sent, curr_word_sent) = ([], [])\n                if (len(answer) == max_sents):\n                    break\n                continue\n            splitted = line.split('\\t')\n            index = splitted[0]\n            if ((not from_words) and (not index.isdigit())):\n                continue\n            curr_word_sent.append(splitted[word_column])\n            if (not read_only_words):\n                (pos, tag) = (splitted[pos_column], splitted[tag_column])\n                tag = (pos if (tag == '_') else '{},{}'.format(pos, tag))\n                curr_tag_sent.append(tag)\n        if (len(curr_word_sent) > 0):\n            if read_only_words:\n                curr_tag_sent = None\n            answer.append((curr_word_sent, curr_tag_sent))\n    return answer", "docstring": "Reads input file in CONLL-U format\n\nArgs:\ninfile: a path to a file\nword_column: column containing words (default=1)\npos_column: column containing part-of-speech labels (default=3)\ntag_column: column containing fine-grained tags (default=5)\nmax_sents: maximal number of sents to read\nread_only_words: whether to read only words\n\nReturns:\na list of sentences. Each item contains a word sequence and a tag sequence, which is ``None``\nin case ``read_only_words = True``", "source": "codesearchnet"}
{"code": "def remove(self, dic):\n        \n        for kw in dic:\n            removePair = Pair(kw, dic[kw])\n            self._remove([removePair])", "docstring": "remove the pair by passing a identical dict\n\nArgs:\ndic (dict): key and value", "source": "juraj-google-style"}
{"code": "def get_vulnerability_chains(\n    current_node,\n    sink,\n    def_use,\n    chain=[]\n):\n    \n    for use in def_use[current_node]:\n        if use == sink:\n            yield chain\n        else:\n            vuln_chain = list(chain)\n            vuln_chain.append(use)\n            yield from get_vulnerability_chains(\n                use,\n                sink,\n                def_use,\n                vuln_chain\n            )", "docstring": "Traverses the def-use graph to find all paths from source to sink that cause a vulnerability.\n\nArgs:\ncurrent_node()\nsink()\ndef_use(dict):\nchain(list(Node)): A path of nodes between source and sink.", "source": "juraj-google-style"}
{"code": "def check(self, orb):\n        \n\n        return self.prev is not None and np.sign(self(orb)) != np.sign(self(self.prev))", "docstring": "Method that check whether or not the listener is triggered\n\nArgs:\norb (Orbit):\n\nReturn:\nbool: True if there is a zero-crossing for the parameter watched by the listener", "source": "juraj-google-style"}
{"code": "def _ParseBinaryDataAsString(self, parser_mediator, binary_data_value):\n    if (not binary_data_value):\n        return None\n    try:\n        return binary_data_value.decode('utf-8')\n    except UnicodeDecodeError:\n        parser_mediator.ProduceExtractionWarning('invalid binary data string value: {0:s}'.format(repr(binary_data_value)))\n        return None", "docstring": "Parses a binary data value as string\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nbinary_data_value (bytes): binary data value\n(CSSM_DB_ATTRIBUTE_FORMAT_BLOB)\n\nReturns:\nstr: binary data value formatted as a string or None if no string could\nbe extracted or binary data value is None (NULL).", "source": "codesearchnet"}
{"code": "def text(self, value):\n        \n        self._text = value\n        self.timestamps.edited = datetime.datetime.utcnow()\n        self.touch(True)", "docstring": "Set the text value.\n\nArgs:\nvalue (str): Text value.", "source": "juraj-google-style"}
{"code": "def is_outlier(df, item_id, segment_id, price):\n    \n\n    if (segment_id, item_id) not in df.index:\n        return False\n\n    mean = df.loc[(segment_id, item_id)]['mean']\n    std = df.loc[(segment_id, item_id)]['std']\n\n    return gaussian_outlier.is_outlier(\n        x=price, mean=mean, standard_deviation=std\n    )", "docstring": "Verify if a item is an outlier compared to the\nother occurrences of the same item, based on his price.\n\nArgs:\nitem_id: idPlanilhaItens\nsegment_id: idSegmento\nprice: VlUnitarioAprovado", "source": "juraj-google-style"}
{"code": "def reindex(self, kdims=[], force=False):\n        \n        if not isinstance(kdims, list):\n            kdims = [kdims]\n        kdims = [self.get_dimension(kd, strict=True) for kd in kdims]\n        dropped = [kd for kd in self.kdims if kd not in kdims]\n        if dropped:\n            raise ValueError(\"DynamicMap does not allow dropping dimensions, \"\n                             \"reindex may only be used to reorder dimensions.\")\n        return super(DynamicMap, self).reindex(kdims, force)", "docstring": "Reorders key dimensions on DynamicMap\n\nCreate a new object with a reordered set of key dimensions.\nDropping dimensions is not allowed on a DynamicMap.\n\nArgs:\nkdims: List of dimensions to reindex the mapping with\nforce: Not applicable to a DynamicMap\n\nReturns:\nReindexed DynamicMap", "source": "juraj-google-style"}
{"code": "def _run_eager_benchmark(self, iterable, iters, warmup):\n    deltas = []\n    if not context.executing_eagerly():\n        raise RuntimeError('Eager mode benchmarking is not supported in graph mode.')\n    for _ in range(iters):\n        if warmup:\n            iterator = iter(iterable)\n            next(iterator)\n        iterator = iter(iterable)\n        start = time.time()\n        next(iterator)\n        end = time.time()\n        deltas.append(end - start)\n    return np.median(deltas)", "docstring": "Benchmark the iterable in eager mode.\n\nRuns the iterable `iters` times. In each iteration, the benchmark measures\nthe time it takes to go execute the iterable.\n\nArgs:\niterable: The tf op or tf.data Dataset to benchmark.\niters: Number of times to repeat the timing.\nwarmup: If true, warms up the session caches by running an untimed run.\n\nReturns:\nA float, representing the median time (with respect to `iters`)\nit takes for the iterable to be executed `iters` num of times.\n\nRaises:\nRuntimeError: When executed in graph mode.", "source": "github-repos"}
{"code": "def key_validation_check(tweet_keys_list, superset_keys, minset_keys):\n    \n    \n    tweet_keys = set(tweet_keys_list)\n    minset_overlap = tweet_keys & minset_keys\n    if minset_overlap != minset_keys:\n        raise UnexpectedFormatError(\"keys ({}) missing from Tweet (Public API data is not supported)\"\n                                    .format(minset_keys - tweet_keys))\n    \n    unexpected_keys = tweet_keys - superset_keys\n    if len(unexpected_keys) > 0:\n        raise UnexpectedFormatError(\"Unexpected keys ({}) are in this Tweet\"\n                                    .format(unexpected_keys))\n    return 0", "docstring": "Validates the keys present in a Tweet.\n\nArgs:\ntweet_keys_list (list): the keys present in a tweet\nsuperset_keys (set): the set of all possible keys for a tweet\nminset_keys (set): the set of minimal keys expected in a tweet.\n\nReturns:\n0 if no errors\n\nRaises:\nUnexpectedFormatError on any mismatch of keys.", "source": "juraj-google-style"}
{"code": "def __init__(self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool=False):\n    thresholds = thresholds[:]\n    if thresholds[0] < 0:\n        raise ValueError('Thresholds should be positive')\n    thresholds.insert(0, -float('inf'))\n    thresholds.append(float('inf'))\n    if not all((low <= high for low, high in zip(thresholds[:-1], thresholds[1:]))):\n        raise ValueError('Thresholds should be sorted.')\n    if not all((l in [-1, 0, 1] for l in labels)):\n        raise ValueError('All labels should be either -1, 0 or 1')\n    if len(labels) != len(thresholds) - 1:\n        raise ValueError('Number of labels should be equal to number of thresholds - 1')\n    self.thresholds = thresholds\n    self.labels = labels\n    self.allow_low_quality_matches = allow_low_quality_matches", "docstring": "Args:\nthresholds (`list[float]`):\nA list of thresholds used to stratify predictions into levels.\nlabels (`list[int`):\nA list of values to label predictions belonging at each level. A label can be one of {-1, 0, 1}\nsignifying {ignore, negative class, positive class}, respectively.\nallow_low_quality_matches (`bool`, *optional*, defaults to `False`):\nIf `True`, produce additional matches for predictions with maximum match quality lower than\nhigh_threshold. See `set_low_quality_matches_` for more details.\n\nFor example,\nthresholds = [0.3, 0.5] labels = [0, -1, 1] All predictions with iou < 0.3 will be marked with 0 and\nthus will be considered as false positives while training. All predictions with 0.3 <= iou < 0.5 will\nbe marked with -1 and thus will be ignored. All predictions with 0.5 <= iou will be marked with 1 and\nthus will be considered as true positives.", "source": "github-repos"}
{"code": "def signature_type(self):\n    if (not self.mardata.signatures):\n        return None\n    for sig in self.mardata.signatures.sigs:\n        if (sig.algorithm_id == 1):\n            return 'sha1'\n        elif (sig.algorithm_id == 2):\n            return 'sha384'\n    else:\n        return 'unknown'", "docstring": "Return the signature type used in this MAR.\n\nReturns:\nOne of None, 'unknown', 'sha1', or 'sha384'", "source": "codesearchnet"}
{"code": "def resize(self, image: np.ndarray, size: Dict[str, int], size_divisor: int=0, resample: PILImageResampling=PILImageResampling.BILINEAR, data_format=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n    max_size = kwargs.pop('max_size', None)\n    size = get_size_dict(size, max_size=max_size, default_to_square=False)\n    if 'shortest_edge' in size and 'longest_edge' in size:\n        size, max_size = (size['shortest_edge'], size['longest_edge'])\n    elif 'height' in size and 'width' in size:\n        size = (size['height'], size['width'])\n        max_size = None\n    else:\n        raise ValueError(f\"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got {size.keys()}.\")\n    size = get_mask2former_resize_output_image_size(image=image, size=size, max_size=max_size, size_divisor=size_divisor, default_to_square=False, input_data_format=input_data_format)\n    image = resize(image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)\n    return image", "docstring": "Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an\nint, smaller edge of the image will be matched to this number.\n\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nsize (`Dict[str, int]`):\nThe size of the output image.\nsize_divisor (`int`, *optional*, defaults to 0):\nIf `size_divisor` is given, the output image size will be divisible by the number.\nresample (`PILImageResampling` resampling filter, *optional*, defaults to `PILImageResampling.BILINEAR`):\nResampling filter to use when resizing the image.\ndata_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format for the output image. If unset, the channel dimension format of the input\nimage is used.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def GetAPFSVolumeByPathSpec(self, path_spec):\n    \n    volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(path_spec)\n    if volume_index is None:\n      return None\n\n    return self._fsapfs_container.get_volume(volume_index)", "docstring": "Retrieves an APFS volume for a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\npyfsapfs.volume: an APFS volume or None if not available.", "source": "juraj-google-style"}
{"code": "def transform(self, new_frame):\n    steps = self.__class__.steps(new_frame)\n    orbit = self.orbit\n    for (_from, _to) in steps:\n        from_obj = _from(self.date, orbit)\n        direct = ('_to_%s' % _to)\n        if hasattr(from_obj, direct):\n            (rotation, offset) = getattr(from_obj, direct)()\n        else:\n            to_obj = _to(self.date, orbit)\n            inverse = ('_to_%s' % _from)\n            if hasattr(to_obj, inverse):\n                (rotation, offset) = getattr(to_obj, inverse)()\n                rotation = rotation.T\n                offset = (- offset)\n            else:\n                raise NotImplementedError('Unknown transformation {} to {}'.format(_from, _to))\n        if getattr(_from, '_rotation_before_translation', False):\n            orbit = (offset + (rotation @ orbit))\n        else:\n            orbit = (rotation @ (offset + orbit))\n    return orbit", "docstring": "Change the frame of the orbit\n\nArgs:\nnew_frame (str)\nReturn:\nnumpy.ndarray", "source": "codesearchnet"}
{"code": "def get_summed_cohp_by_label_list(self, label_list, divisor=1):\n        \n        \n        first_cohpobject = self.get_cohp_by_label(label_list[0])\n        summed_cohp = first_cohpobject.cohp.copy()\n        summed_icohp = first_cohpobject.icohp.copy()\n        for label in label_list[1:]:\n            cohp_here = self.get_cohp_by_label(label)\n            summed_cohp[Spin.up] = np.sum([summed_cohp[Spin.up], cohp_here.cohp[Spin.up]], axis=0)\n            if Spin.down in summed_cohp:\n                summed_cohp[Spin.down] = np.sum([summed_cohp[Spin.down], cohp_here.cohp[Spin.down]], axis=0)\n            summed_icohp[Spin.up] = np.sum([summed_icohp[Spin.up], cohp_here.icohp[Spin.up]], axis=0)\n            if Spin.down in summed_icohp:\n                summed_icohp[Spin.down] = np.sum([summed_icohp[Spin.down], cohp_here.icohp[Spin.down]], axis=0)\n\n        divided_cohp = {}\n        divided_icohp = {}\n        divided_cohp[Spin.up] = np.divide(summed_cohp[Spin.up], divisor)\n        divided_icohp[Spin.up] = np.divide(summed_icohp[Spin.up], divisor)\n        if Spin.down in summed_cohp:\n            divided_cohp[Spin.down] = np.divide(summed_cohp[Spin.down], divisor)\n            divided_icohp[Spin.down] = np.divide(summed_icohp[Spin.down], divisor)\n\n        return Cohp(efermi=first_cohpobject.efermi, energies=first_cohpobject.energies, cohp=divided_cohp,\n                    are_coops=first_cohpobject.are_coops,\n                    icohp=divided_icohp)", "docstring": "Returns a COHP object that includes a summed COHP divided by divisor\n\nArgs:\nlabel_list: list of labels for the COHP that should be included in the summed cohp\ndivisor: float/int, the summed cohp will be divided by this divisor\nReturns:\nReturns a COHP object including a summed COHP", "source": "juraj-google-style"}
{"code": "def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, expected):\n    total_size_1 = 1\n    total_size_2 = 1\n    for s in tensor_in_sizes:\n        total_size_1 *= s\n    for s in filter_in_sizes:\n        total_size_2 *= s\n    x1 = np.array([f for f in range(1, total_size_1 + 1)])\n    x1 = x1.astype(np.uint8).reshape(tensor_in_sizes)\n    x1_min = 0.0\n    x1_max = 255.0\n    x2 = np.array([f for f in range(1, total_size_2 + 1)]).astype(np.uint8)\n    x2 = x2.astype(np.uint8).reshape(filter_in_sizes)\n    x2_min = 0.0\n    x2_max = 255.0\n    with self.cached_session(use_gpu=False) as sess:\n        t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtypes.quint8)\n        t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtypes.quint8)\n        conv = nn_ops.quantized_conv2d(t1, t2, out_type=dtypes.qint32, strides=[1, stride, stride, 1], padding=padding, min_input=x1_min, max_input=x1_max, min_filter=x2_min, max_filter=x2_max)\n        value = self.evaluate(conv)\n    quantized_output = value[0]\n    output_min = value[1]\n    output_max = value[2]\n    float_output = self._QuantizedOutputToFloat(quantized_output, output_min, output_max)\n    self.assertArrayNear(expected, float_output.flatten(), 1.0)\n    self.assertEqual(value[0].shape, conv[0].get_shape())", "docstring": "Verifies the output values of the convolution function.\n\nArgs:\ntensor_in_sizes: Input tensor dimensions in\n[batch, input_rows, input_cols, input_depth].\nfilter_in_sizes: Filter tensor dimensions in\n[kernel_rows, kernel_cols, input_depth, output_depth].\nstride: Stride.\npadding: Padding type.\nexpected: An array containing the expected operation outputs.", "source": "github-repos"}
{"code": "def __eq__(self, other: Any) -> bool:\n    if self is other:\n        return True\n    if isinstance(other, MissingValue):\n        return self._value_spec == other.value_spec\n    return MISSING_VALUE == other", "docstring": "Operator ==.\n\nNOTE: `MissingValue(value_spec) and `utils.MissingValue` are\nconsidered equal, but `MissingValue(value_spec1)` and\n`MissingValue(value_spec2)` are considered different. That being said,\nthe 'eq' operation is not transitive.\n\nHowever in practice this is not a problem, since user always compare\nagainst `schema.MISSING_VALUE` which is `utils.MissingValue`.\nTherefore the `__hash__` function returns the same value with\n`utils.MissingValue`.\n\nArgs:\nother: the value to compare against.\n\nReturns:\nTrue if the other value is a general MissingValue or MissingValue of the\nsame value spec.", "source": "github-repos"}
{"code": "def get_classes(tensors):\n    return nest.pack_sequence_as(tensors, [sparse_tensor.SparseTensor if isinstance(tensor, sparse_tensor.SparseTensor) else tensor_lib.Tensor for tensor in nest.flatten(tensors)])", "docstring": "Gets classes for a structure of tensors.\n\nArgs:\ntensors: the tensor structure to get classes for.\n\nReturns:\na structure matching the nested structure of `tensors`, containing\n`tf.sparse.SparseTensor` at positions where `tensors` contains a sparse\ntensor and `tf.Tensor` otherwise.", "source": "github-repos"}
{"code": "def StatFS(self, path=None):\n    if (platform.system() == 'Windows'):\n        raise RuntimeError('os.statvfs not available on Windows')\n    local_path = client_utils.CanonicalPathToLocalPath((path or self.path))\n    return os.statvfs(local_path)", "docstring": "Call os.statvfs for a given list of rdf_paths.\n\nOS X and Linux only.\n\nNote that a statvfs call for a network filesystem (e.g. NFS) that is\nunavailable, e.g. due to no network, will result in the call blocking.\n\nArgs:\npath: a Unicode string containing the path or None. If path is None the\nvalue in self.path is used.\n\nReturns:\nposix.statvfs_result object\nRaises:\nRuntimeError: if called on windows", "source": "codesearchnet"}
{"code": "def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int=3):\n    super().__init__()\n    in_dims = [input_dim] + [hidden_dim] * (num_layers - 1)\n    out_dims = [hidden_dim] * (num_layers - 1) + [output_dim]\n    layers = []\n    for i, (in_dim, out_dim) in enumerate(zip(in_dims, out_dims)):\n        layers.append(PredictionBlock(in_dim, out_dim, activation=nn.ReLU() if i < num_layers - 1 else nn.Identity()))\n    self.layers = nn.Sequential(*layers)", "docstring": "A classic Multi Layer Perceptron (MLP).\n\nArgs:\ninput_dim (`int`):\nThe input dimensions.\nhidden_dim (`int`):\nThe hidden dimensions.\noutput_dim (`int`):\nThe output dimensions.\nnum_layers (int, *optional*, defaults to 3):\nThe number of layers.", "source": "github-repos"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(SignatureVerifyRequestPayload, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = utils.BytearrayStream(input_stream.read(self.length))\n    if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):\n        self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)\n        self._unique_identifier.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.CRYPTOGRAPHIC_PARAMETERS, local_stream):\n        self._cryptographic_parameters = attributes.CryptographicParameters()\n        self._cryptographic_parameters.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.DATA, local_stream):\n        self._data = primitives.ByteString(tag=enums.Tags.DATA)\n        self._data.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.DIGESTED_DATA, local_stream):\n        self._digested_data = primitives.ByteString(tag=enums.Tags.DIGESTED_DATA)\n        self._digested_data.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.SIGNATURE_DATA, local_stream):\n        self._signature_data = primitives.ByteString(tag=enums.Tags.SIGNATURE_DATA)\n        self._signature_data.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.CORRELATION_VALUE, local_stream):\n        self._correlation_value = primitives.ByteString(tag=enums.Tags.CORRELATION_VALUE)\n        self._correlation_value.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.INIT_INDICATOR, local_stream):\n        self._init_indicator = primitives.Boolean(tag=enums.Tags.INIT_INDICATOR)\n        self._init_indicator.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.FINAL_INDICATOR, local_stream):\n        self._final_indicator = primitives.Boolean(tag=enums.Tags.FINAL_INDICATOR)\n        self._final_indicator.read(local_stream, kmip_version=kmip_version)\n    self.is_oversized(local_stream)", "docstring": "Read the data encoding the SignatureVerify request payload and decode\nit into its constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the data attribute is missing from the\nencoded payload.", "source": "codesearchnet"}
{"code": "def read(self, size=None):\n    \n    if not self._is_open:\n      raise IOError('Not opened.')\n\n    return self._fsapfs_file_entry.read(size=size)", "docstring": "Reads a byte string from the file-like object at the current offset.\n\nThe function will read a byte string of the specified size or\nall of the remaining data if no size was specified.\n\nArgs:\nsize (Optional[int]): number of bytes to read, where None is all\nremaining data.\n\nReturns:\nbytes: data read.\n\nRaises:\nIOError: if the read failed.\nOSError: if the read failed.", "source": "juraj-google-style"}
{"code": "def loads(s, single=False):\n    corpus = etree.fromstring(s)\n    if single:\n        ds = _deserialize_mrs(next(corpus))\n    else:\n        ds = (_deserialize_mrs(mrs_elem) for mrs_elem in corpus)\n    return ds", "docstring": "Deserialize MRX string representations\n\nArgs:\ns (str): a MRX string\nsingle (bool): if `True`, only return the first Xmrs object\nReturns:\na generator of Xmrs objects (unless *single* is `True`)", "source": "codesearchnet"}
{"code": "def _safe_mean(losses, num_present):\n    total_loss = math_ops.reduce_sum(losses)\n    return math_ops.div_no_nan(total_loss, num_present, name='value')", "docstring": "Computes a safe mean of the losses.\n\nArgs:\nlosses: `Tensor` whose elements contain individual loss measurements.\nnum_present: The number of measurable elements in `losses`.\n\nReturns:\nA scalar representing the mean of `losses`. If `num_present` is zero,\nthen zero is returned.", "source": "github-repos"}
{"code": "def _connect_nodes(self, first, second):\n    if isinstance(first, Node):\n        first.next.add(second)\n        second.prev.add(first)\n        self.forward_edges.add((first, second))\n    else:\n        for node in first:\n            self._connect_nodes(node, second)", "docstring": "Connects nodes to signify that control flows from first to second.\n\nArgs:\nfirst: Union[Set[Node, ...], Node]\nsecond: Node", "source": "github-repos"}
{"code": "def as_dict(self, voigt=False):\n    input_array = (self.voigt if voigt else self)\n    d = {'@module': self.__class__.__module__, '@class': self.__class__.__name__, 'input_array': input_array.tolist()}\n    if voigt:\n        d.update({'voigt': voigt})\n    return d", "docstring": "Serializes the tensor object\n\nArgs:\nvoigt (bool): flag for whether to store entries in\nvoigt-notation.  Defaults to false, as information\nmay be lost in conversion.\n\nReturns (Dict):\nserialized format tensor object", "source": "codesearchnet"}
{"code": "def _parse_trunk_groups(self, config):\n        \n        values = re.findall(r'switchport trunk group ([^\\s]+)', config, re.M)\n        return dict(trunk_groups=values)", "docstring": "Scans the specified config and parses the trunk group values\n\nArgs:\nconfig (str): The interface configuraiton blcok\n\nReturns:\nA dict object with the trunk group values that can be merged\ninto the resource dict", "source": "juraj-google-style"}
{"code": "def publish_traceback(debug_server_urls, graph, feed_dict, fetches, old_graph_version):\n    from tensorflow.python.debug.lib import source_remote\n    if graph.version > old_graph_version:\n        run_key = common.get_run_key(feed_dict, fetches)\n        source_remote.send_graph_tracebacks(debug_server_urls, run_key, traceback.extract_stack(), graph, send_source=True)\n        return graph.version\n    else:\n        return old_graph_version", "docstring": "Publish traceback and source code if graph version is new.\n\n`graph.version` is compared with `old_graph_version`. If the former is higher\n(i.e., newer), the graph traceback and the associated source code is sent to\nthe debug server at the specified gRPC URLs.\n\nArgs:\ndebug_server_urls: A single gRPC debug server URL as a `str` or a `list` of\ndebug server URLs.\ngraph: A Python `tf.Graph` object.\nfeed_dict: Feed dictionary given to the `Session.run()` call.\nfetches: Fetches from the `Session.run()` call.\nold_graph_version: Old graph version to compare to.\n\nReturns:\nIf `graph.version > old_graph_version`, the new graph version as an `int`.\nElse, the `old_graph_version` is returned.", "source": "github-repos"}
{"code": "def parse_xml_to_obj(self, xml_file, check_version=True, check_root=True, encoding=None):\n    root = get_etree_root(xml_file, encoding=encoding)\n    if check_root:\n        self._check_root_tag(root)\n    if check_version:\n        self._check_version(root)\n    entity_class = self.get_entity_class(root.tag)\n    entity_obj = entity_class._binding_class.factory()\n    entity_obj.build(root)\n    return entity_obj", "docstring": "Creates a STIX binding object from the supplied xml file.\n\nArgs:\nxml_file: A filename/path or a file-like object representing a STIX\ninstance document\ncheck_version: Inspect the version before parsing.\ncheck_root: Inspect the root element before parsing.\nencoding: The character encoding of the input `xml_file`.\n\nRaises:\n.UnknownVersionError: If `check_version` is ``True`` and `xml_file`\ndoes not contain STIX version information.\n.UnsupportedVersionError: If `check_version` is ``False`` and\n`xml_file` contains an unsupported STIX version.\n.UnsupportedRootElement: If `check_root` is ``True`` and `xml_file`\ncontains an invalid root element.", "source": "codesearchnet"}
{"code": "def __init__(self, element_type, dimensions, layout=None):\n    self.message = xla_data_pb2.ShapeProto()\n    self.message.element_type = element_type\n    if element_type == xla_data_pb2.TUPLE:\n        if not all((isinstance(subshape, Shape) for subshape in dimensions)):\n            raise ValueError('XLA tuple requires sequence of Shape objects as dimensions')\n        self._tuple_shapes = tuple(dimensions)\n        for component_shape in self._tuple_shapes:\n            component_message = self.message.tuple_shapes.add()\n            component_message.CopyFrom(component_shape.message)\n    else:\n        self.message.dimensions.extend(dimensions)\n        if layout is None:\n            layout = list(reversed(range(len(dimensions))))\n        self.message.layout.minor_to_major.extend(layout)", "docstring": "Creates a new XLA Shape.\n\nArgs:\nelement_type: element type from xla_data_pb2.\ndimensions: sequence of dimensions sizes (integers), or sequence\nof Shapes in the case of a tuple, i.e. when element_type is\nTUPLE.\nlayout: optional minor_to_major sequence for layout. If not given, the\ndefault major-to-minor layout is used.\n\nRaises:\nValueError: if element_type is TUPLE but dimensions are not Shape objects.", "source": "github-repos"}
{"code": "def parse_GSE(filepath):\n    \n    gpls = {}\n    gsms = {}\n    series_counter = 0\n    database = None\n    metadata = {}\n    gse_name = None\n    with utils.smart_open(filepath) as soft:\n        groupper = groupby(soft, lambda x: x.startswith(\"^\"))\n        for is_new_entry, group in groupper:\n            if is_new_entry:\n                entry_type, entry_name = __parse_entry(next(group))\n                logger.debug(\"%s: %s\" % (entry_type.upper(), entry_name))\n                if entry_type == \"SERIES\":\n                    gse_name = entry_name\n                    series_counter += 1\n                    if series_counter > 1:\n                        raise Exception(\n                            \"GSE file should contain only one series entry!\")\n                    is_data, data_group = next(groupper)\n                    message = (\"The key is not False, probably there is an \"\n                               \"error in the SOFT file\")\n                    assert not is_data, message\n                    metadata = parse_metadata(data_group)\n                elif entry_type == \"SAMPLE\":\n                    is_data, data_group = next(groupper)\n                    gsms[entry_name] = parse_GSM(data_group, entry_name)\n                elif entry_type == \"PLATFORM\":\n                    is_data, data_group = next(groupper)\n                    gpls[entry_name] = parse_GPL(data_group, entry_name)\n                elif entry_type == \"DATABASE\":\n                    is_data, data_group = next(groupper)\n                    database_metadata = parse_metadata(data_group)\n                    database = GEODatabase(name=entry_name,\n                                           metadata=database_metadata)\n                else:\n                    logger.error(\"Cannot recognize type %s\" % entry_type)\n    gse = GSE(name=gse_name,\n              metadata=metadata,\n              gpls=gpls,\n              gsms=gsms,\n              database=database)\n    return gse", "docstring": "Parse GSE SOFT file.\n\nArgs:\nfilepath (:obj:`str`): Path to GSE SOFT file.\n\nReturns:\n:obj:`GEOparse.GSE`: A GSE object.", "source": "juraj-google-style"}
{"code": "def __init__(self, *args, **kwargs):\n        \n        super(self.__class__, self).__init__(*args, **kwargs)\n\n        \n        if not self.name.strip():\n            raise ValueError(\".name property must be set!\")\n        if type(self.sub_trees) not in [list, tuple]:\n            raise ValueError(\".sub_trees property must contain list/tuple!\")\n        if type(self.sub_publications) not in [list, tuple]:\n            raise ValueError(\".sub_trees property must contain list/tuple!\")\n\n        if not self.path:\n            self.path = self.name\n\n        for sub_tree in self.sub_trees:\n            sub_tree.path = os.path.join(self.path, sub_tree.name)", "docstring": "Constructor.\n\nArgs:\nname (str): Name of the periodical.\nsub_trees (list): List of other trees.\nsub_publications (list): List of sub-publication UUID's.\naleph_id (str): ID used in aleph.\nissn (str): ISSN given to the periodical.\nis_public (bool): Is the tree public?\n\nRaises:\nValueError: In case that `name` is not set, or `sub_trees` or\n`sub_publications` is not list/tuple.", "source": "juraj-google-style"}
{"code": "def __init__(self, contents, out=None, prompt=None):\n    self._contents = contents\n    self._out = out or sys.stdout\n    self._search_pattern = None\n    self._search_direction = None\n    self.prev_pos, self.prev_nxt = self.PREV_POS_NXT_REPRINT\n    self._attr = console_attr.GetConsoleAttr()\n    self._width, self._height = self._attr.GetTermSize()\n    if not prompt:\n        prompt = '{bold}--({{percent}}%)--{normal}'.format(bold=self._attr.GetFontCode(bold=True), normal=self._attr.GetFontCode())\n    self._clear = '\\r{0}\\r'.format(' ' * (self._attr.DisplayWidth(prompt) - 6))\n    self._prompt = prompt\n    self._lines = []\n    for line in contents.splitlines():\n        self._lines += self._attr.SplitLine(line, self._width)", "docstring": "Constructor.\n\nArgs:\ncontents: The entire contents of the text lines to page.\nout: The output stream, log.out (effectively) if None.\nprompt: The page break prompt, a default prompt is used if None..", "source": "github-repos"}
{"code": "def get_cache_index_key(resource):\n    \n    if isinstance(resource, APIResource):\n        attr, attr_value = list(resource.get_cache_index_keys().items())[0]\n        key = (type(resource), attr, attr_value)\n    else:\n        key = tuple(resource)\n\n    if len(key) != 3:\n        raise TypeError('Cache key must be tuple of (class, key, value), got `{!r}` instead'.format(key))\n\n    if not issubclass(key[0], APIResource):\n        raise TypeError('First value of cache key must be a subclass of APIResource, got `{!r}` instead'.format(key[0]))\n\n    return key", "docstring": "Return a usable cache lookup key for an already initialized resource\n\nArgs:\nresource (APIResource|tuple): APIResource instance or 3-length tuple key returned from this function\n\nRaises:\nTypeError: If resource is not an APIResource instance or acceptable 3-length tuple cache key", "source": "juraj-google-style"}
{"code": "def roots_in_unit_interval(coeffs):\n    r\n    all_roots = polynomial.polyroots(coeffs)\n    \n    all_roots = all_roots[\n        (_UNIT_INTERVAL_WIGGLE_START < all_roots.real)\n        & (all_roots.real < _UNIT_INTERVAL_WIGGLE_END)\n    ]\n    \n    \n    real_inds = np.abs(all_roots.imag) < _IMAGINARY_WIGGLE\n    return all_roots[real_inds].real", "docstring": "r\"\"\"Compute roots of a polynomial in the unit interval.\n\nArgs:\ncoeffs (numpy.ndarray): A 1D array (size ``d + 1``) of coefficients in\nmonomial / power basis.\n\nReturns:\nnumpy.ndarray: ``N``-array of real values in :math:`\\left[0, 1\\right]`.", "source": "juraj-google-style"}
{"code": "def chrome_tracing_dump(self, filename=None):\n    profile_table = self.profile_table()\n    all_events = []\n    for (component_id_hex, component_events) in profile_table.items():\n        component_type = component_events[0]['component_type']\n        if (component_type not in ['worker', 'driver']):\n            continue\n        for event in component_events:\n            new_event = {'cat': event['event_type'], 'name': event['event_type'], 'pid': event['node_ip_address'], 'tid': ((event['component_type'] + ':') + event['component_id']), 'ts': self._seconds_to_microseconds(event['start_time']), 'dur': self._seconds_to_microseconds((event['end_time'] - event['start_time'])), 'ph': 'X', 'cname': self._default_color_mapping[event['event_type']], 'args': event['extra_data']}\n            if ('cname' in event['extra_data']):\n                new_event['cname'] = event['extra_data']['cname']\n            if ('name' in event['extra_data']):\n                new_event['name'] = event['extra_data']['name']\n            all_events.append(new_event)\n    if (filename is not None):\n        with open(filename, 'w') as outfile:\n            json.dump(all_events, outfile)\n    else:\n        return all_events", "docstring": "Return a list of profiling events that can viewed as a timeline.\n\nTo view this information as a timeline, simply dump it as a json file\nby passing in \"filename\" or using using json.dump, and then load go to\nchrome://tracing in the Chrome web browser and load the dumped file.\nMake sure to enable \"Flow events\" in the \"View Options\" menu.\n\nArgs:\nfilename: If a filename is provided, the timeline is dumped to that\nfile.\n\nReturns:\nIf filename is not provided, this returns a list of profiling\nevents. Each profile event is a dictionary.", "source": "codesearchnet"}
{"code": "def require(builder_name):\n    reg = ComponentRegistry()\n    for (_name, autobuild_func) in reg.load_extensions('iotile.autobuild', name_filter=builder_name):\n        return autobuild_func\n    raise BuildError('Cannot find required autobuilder, make sure the distribution providing it is installed', name=builder_name)", "docstring": "Find an advertised autobuilder and return it\n\nThis function searches through all installed distributions to find\nif any advertise an entry point with group 'iotile.autobuild' and\nname equal to builder_name.  The first one that is found is returned.\n\nThis function raises a BuildError if it cannot find the required\nautobuild function\n\nArgs:\nbuilder_name (string): The name of the builder to find\n\nReturns:\ncallable: the autobuilder function found in the search", "source": "codesearchnet"}
{"code": "def acl_required(permission, context):\n\n    def decorator(func):\n\n        @wraps(func)\n        async def wrapper(*args):\n            request = args[(- 1)]\n            if callable(context):\n                context = context()\n            if (await get_permitted(request, permission, context)):\n                return (await func(*args))\n            raise web.HTTPForbidden()\n        return wrapper\n    return decorator", "docstring": "Returns a decorator that checks if a user has the requested permission\nfrom the passed acl context.\n\nThis function constructs a decorator that can be used to check a aiohttp's\nview for authorization before calling it. It uses the get_permission()\nfunction to check the request against the passed permission and context. If\nthe user does not have the correct permission to run this function, it\nraises HTTPForbidden.\n\nArgs:\npermission: The specific permission requested.\ncontext: Either a sequence of ACL tuples, or a callable that returns a\nsequence of ACL tuples. For more information on ACL tuples, see\nget_permission()\n\nReturns:\nA decorator which will check the request passed has the permission for\nthe given context. The decorator will raise HTTPForbidden if the user\ndoes not have the correct permissions to access the view.", "source": "codesearchnet"}
{"code": "def gbest_idx(swarm):\n    best = 0\n    cmp = comparator(swarm[best].best_fitness)\n    for (idx, particle) in enumerate(swarm):\n        if cmp(particle.best_fitness, swarm[best].best_fitness):\n            best = idx\n    return best", "docstring": "gbest Neighbourhood topology function.\n\nArgs:\nswarm: list: The list of particles.\n\nReturns:\nint: The index of the gbest particle.", "source": "codesearchnet"}
{"code": "def get_posts(self, num=None, tag=None, private=False):\n    posts = self.posts\n    if (not private):\n        posts = [post for post in posts if post.public]\n    if tag:\n        posts = [post for post in posts if (tag in post.tags)]\n    if num:\n        return posts[:num]\n    return posts", "docstring": "Get all the posts added to the blog.\n\nArgs:\nnum (int): Optional. If provided, only return N posts (sorted by date,\nmost recent first).\ntag (Tag): Optional. If provided, only return posts that have a\nspecific tag.\nprivate (bool): By default (if False), private posts are not included.\nIf set to True, private posts will also be included.", "source": "codesearchnet"}
{"code": "def _ReadConstantDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):\n    if is_member:\n        error_message = 'data type not supported as member'\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    value = definition_values.get('value', None)\n    if (value is None):\n        error_message = 'missing value'\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    definition_object = self._ReadSemanticDataTypeDefinition(definitions_registry, definition_values, data_types.ConstantDefinition, definition_name, self._SUPPORTED_DEFINITION_VALUES_CONSTANT)\n    definition_object.value = value\n    return definition_object", "docstring": "Reads a constant data type definition.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\ndefinition_values (dict[str, object]): definition values.\ndefinition_name (str): name of the definition.\nis_member (Optional[bool]): True if the data type definition is a member\ndata type definition.\n\nReturns:\nConstantDataTypeDefinition: constant data type definition.\n\nRaises:\nDefinitionReaderError: if the definitions values are missing or if\nthe format is incorrect.", "source": "codesearchnet"}
{"code": "def add_hgnc_id(self, genes):\n    genes_by_alias = self.genes_by_alias()\n    for gene in genes:\n        id_info = genes_by_alias.get(gene['hgnc_symbol'])\n        if (not id_info):\n            LOG.warning('Gene %s does not exist in scout', gene['hgnc_symbol'])\n            continue\n        gene['hgnc_id'] = id_info['true']\n        if (not id_info['true']):\n            if (len(id_info['ids']) > 1):\n                LOG.warning('Gene %s has ambiguous value, please choose one hgnc id in result', gene['hgnc_symbol'])\n            gene['hgnc_id'] = ','.join([str(hgnc_id) for hgnc_id in id_info['ids']])", "docstring": "Add the correct hgnc id to a set of genes with hgnc symbols\n\nArgs:\ngenes(list(dict)): A set of genes with hgnc symbols only", "source": "codesearchnet"}
{"code": "def _make_mail(self, complete=True):\n    mail = {}\n    keys = get_mail_keys(self.message, complete)\n    for i in keys:\n        log.debug('Getting header or part {!r}'.format(i))\n        value = getattr(self, i)\n        if value:\n            mail[i] = value\n    mail['has_defects'] = self.has_defects\n    if self.has_defects:\n        mail['defects'] = self.defects\n        mail['defects_categories'] = list(self.defects_categories)\n    return mail", "docstring": "This method assigns the right values to all tokens of email.\nReturns a parsed object\n\nKeyword Arguments:\ncomplete {bool} -- If True returns all mails parts\n(default: {True})\n\nReturns:\ndict -- Parsed email object", "source": "codesearchnet"}
{"code": "def _check_approval_wrapper(self, grr_object, grr_function, *args, **kwargs):\n    approval_sent = False\n    while True:\n        try:\n            return grr_function(*args, **kwargs)\n        except grr_errors.AccessForbiddenError as exception:\n            print('No valid approval found: {0!s}'.format(exception))\n            if approval_sent:\n                print('Approval not yet granted, waiting {0:d}s'.format(self._CHECK_APPROVAL_INTERVAL_SEC))\n                time.sleep(self._CHECK_APPROVAL_INTERVAL_SEC)\n                continue\n            if (not self.approvers):\n                message = 'GRR needs approval but no approvers specified (hint: use --approvers)'\n                self.state.add_error(message, critical=True)\n                return None\n            grr_object.CreateApproval(reason=self.reason, notified_users=self.approvers)\n            approval_sent = True\n            print('{0!s}: approval request sent to: {1!s} (reason: {2:s})'.format(grr_object, self.approvers, self.reason))", "docstring": "Wraps a call to GRR functions checking for approval.\n\nArgs:\ngrr_object: the GRR object to create the eventual approval on.\ngrr_function: The GRR function requiring approval.\n*args: Positional arguments that are to be passed to `grr_function`.\n**kwargs: Keyword arguments that are to be passed to `grr_function`.\n\nReturns:\nThe return value of the execution of grr_function(*args, **kwargs).", "source": "codesearchnet"}
{"code": "def smart_init_mapping(candidate_mapping, instance1, instance2):\n    \n    random.seed()\n    matched_dict = {}\n    result = []\n    \n    no_word_match = []\n    for i, candidates in enumerate(candidate_mapping):\n        if not candidates:\n            \n            result.append(-1)\n            continue\n        \n        value1 = instance1[i][2]\n        for node_index in candidates:\n            value2 = instance2[node_index][2]\n            \n            \n            if value1 == value2:\n                if node_index not in matched_dict:\n                    result.append(node_index)\n                    matched_dict[node_index] = 1\n                    break\n        if len(result) == i:\n            no_word_match.append(i)\n            result.append(-1)\n    \n    for i in no_word_match:\n        candidates = list(candidate_mapping[i])\n        while candidates:\n            \n            rid = random.randint(0, len(candidates) - 1)\n            candidate = candidates[rid]\n            if candidate in matched_dict:\n                candidates.pop(rid)\n            else:\n                matched_dict[candidate] = 1\n                result[i] = candidate\n                break\n    return result", "docstring": "Initialize mapping based on the concept mapping (smart initialization)\nArguments:\ncandidate_mapping: candidate node match list\ninstance1: instance triples of AMR 1\ninstance2: instance triples of AMR 2\nReturns:\ninitialized node mapping between two AMRs", "source": "juraj-google-style"}
{"code": "def find_block_end(lines: List[str], start_index: int, indent: int) -> int:\n    indent = ' ' * indent\n    line_index = start_index + 1\n    while line_index < len(lines) and _should_continue(lines[line_index], indent):\n        line_index += 1\n    while len(lines[line_index - 1]) <= 1:\n        line_index -= 1\n    return line_index", "docstring": "Find the end of the class/func block starting at `start_index` in a source code (defined by `lines`).\n\nArgs:\nlines (`List[str]`):\nThe source code, represented by a list of lines.\nstart_index (`int`):\nThe starting index of the target class/func block.\nindent (`int`):\nThe indent of the class/func body.\n\nReturns:\n`int`: The index of the block's ending line plus by 1 (i.e. exclusive).", "source": "github-repos"}
{"code": "def lookup_id(self, group):\n        \n        filter = [\"(cn={})\".format(group), \"(objectclass=posixGroup)\"]\n        results = self.client.search(filter, ['gidNumber'])\n\n        if len(results) < 1:\n            raise ldap_tools.exceptions.NoGroupsFound(\n                'No Groups Returned by LDAP')\n        elif len(results) > 1:\n            raise ldap_tools.exceptions.TooManyResults(\n                'Multiple groups found. Please narrow your search.')\n        else:\n            return results[0].gidNumber.value", "docstring": "Lookup GID for the given group.\n\nArgs:\ngroup: Name of group whose ID needs to be looked up\n\nReturns:\nA bytestring representation of the group ID (gid)\nfor the group specified\n\nRaises:\nldap_tools.exceptions.NoGroupsFound:\nNo Groups were returned by LDAP\n\nldap_tools.exceptions.TooManyResults:\nMore than one group was returned by LDAP", "source": "juraj-google-style"}
{"code": "def from_file(cls, filename, constant_lattice=True, **kwargs):\n    fname = os.path.basename(filename)\n    if fnmatch(fname, '*XDATCAR*'):\n        structures = Xdatcar(filename).structures\n    elif fnmatch(fname, 'vasprun*.xml*'):\n        structures = Vasprun(filename).structures\n    else:\n        raise ValueError('Unsupported file')\n    return cls.from_structures(structures, constant_lattice=constant_lattice, **kwargs)", "docstring": "Convenience constructor to obtain trajectory from XDATCAR or vasprun.xml file\n\nArgs:\nfilename (str): The filename to read from.\nconstant_lattice (bool): Whether the lattice changes during the simulation, such as in an NPT MD\nsimulation. True results in\n\nReturns:\n(Trajectory)", "source": "codesearchnet"}
{"code": "def kld(d1, d2):\n    \n    d1, d2 = flatten(d1), flatten(d2)\n    return entropy(d1, d2, 2.0)", "docstring": "Return the Kullback-Leibler Divergence (KLD) between two distributions.\n\nArgs:\nd1 (np.ndarray): The first distribution.\nd2 (np.ndarray): The second distribution.\n\nReturns:\nfloat: The KLD of ``d1`` from ``d2``.", "source": "juraj-google-style"}
{"code": "def _get_elements(mol, label):\n    elements = [int(mol.GetAtom(i).GetAtomicNum()) for i in label]\n    return elements", "docstring": "The the elements of the atoms in the specified order\n\nArgs:\nmol: The molecule. OpenBabel OBMol object.\nlabel: The atom indices. List of integers.\n\nReturns:\nElements. List of integers.", "source": "codesearchnet"}
{"code": "def _IsType(clean_lines, nesting_state, expr):\n    last_word = Match('^.*(\\\\b\\\\S+)$', expr)\n    if last_word:\n        token = last_word.group(1)\n    else:\n        token = expr\n    if _TYPES.match(token):\n        return True\n    typename_pattern = (('\\\\b(?:typename|class|struct)\\\\s+' + re.escape(token)) + '\\\\b')\n    block_index = (len(nesting_state.stack) - 1)\n    while (block_index >= 0):\n        if isinstance(nesting_state.stack[block_index], _NamespaceInfo):\n            return False\n        last_line = nesting_state.stack[block_index].starting_linenum\n        next_block_start = 0\n        if (block_index > 0):\n            next_block_start = nesting_state.stack[(block_index - 1)].starting_linenum\n        first_line = last_line\n        while (first_line >= next_block_start):\n            if (clean_lines.elided[first_line].find('template') >= 0):\n                break\n            first_line -= 1\n        if (first_line < next_block_start):\n            block_index -= 1\n            continue\n        for i in xrange(first_line, (last_line + 1), 1):\n            if Search(typename_pattern, clean_lines.elided[i]):\n                return True\n        block_index -= 1\n    return False", "docstring": "Check if expression looks like a type name, returns true if so.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nnesting_state: A NestingState instance which maintains information about\nthe current stack of nested blocks being parsed.\nexpr: The expression to check.\nReturns:\nTrue, if token looks like a type.", "source": "codesearchnet"}
{"code": "def from_json(cls, json, image_config=None):\n    cls.image_config = image_config\n    return cls(**{attr: json.get((attr if (key is None) else key)) for (attr, key) in cls.JSON_MAPPING.items()})", "docstring": "Create a model instance\n\nArguments:\njson (:py:class:`dict`): The parsed JSON data.\nimage_config (:py:class:`dict`): The API image configuration\ndata.\n\nReturns:\n:py:class:`BaseModel`: The model instance.", "source": "codesearchnet"}
{"code": "def to_jdbc_url(self) -> str:\n    url = f'jdbc:postgresql:\n    properties = {'socketFactory': 'com.google.cloud.alloydb.SocketFactory', 'alloydbInstanceName': self.instance_name, 'alloydbIpType': self.ip_type}\n    if self.enable_iam_auth:\n        properties['alloydbEnableIAMAuth'] = 'true'\n    if self.target_principal:\n        properties['alloydbTargetPrincipal'] = self.target_principal\n    if self.delegates:\n        properties['alloydbDelegates'] = ','.join(self.delegates)\n    if self.admin_service_endpoint:\n        properties['alloydbAdminServiceEndpoint'] = self.admin_service_endpoint\n    if self.quota_project:\n        properties['alloydbQuotaProject'] = self.quota_project\n    if self.additional_properties:\n        properties.update(self.additional_properties)\n    property_string = '&'.join((f'{k}={v}' for k, v in properties.items()))\n    return url + property_string", "docstring": "Convert options to a properly formatted JDBC URL.\n\nReturns:\nJDBC URL string configured with all options.", "source": "github-repos"}
{"code": "def delete_direct(self, addresses):\n    with self._lock:\n        for address in addresses:\n            self._validate_write(address)\n            if (address in self._state):\n                self._state[address].set_deleted()\n            else:\n                fut = _ContextFuture(address=address)\n                self._state[address] = fut\n                fut.set_deleted()", "docstring": "Called in the context manager's delete method to either\nmark an entry for deletion , or create a new future and immediately\nset it for deletion in the future.\n\nArgs:\naddress_list (list of str): The unique full addresses.\n\nRaises:\nAuthorizationException", "source": "codesearchnet"}
{"code": "def vgg13(pretrained=False, **kwargs):\n    \n    if pretrained:\n        kwargs['init_weights'] = False\n    model = VGG(make_layers(cfg['B']), **kwargs)\n    if pretrained:\n        model.load_state_dict(model_zoo.load_url(model_urls['vgg13']))\n    return model", "docstring": "VGG 13-layer model (configuration \"B\")\n\nArgs:\npretrained (bool): If True, returns a model pre-trained on ImageNet", "source": "juraj-google-style"}
{"code": "def max_sequence_length(self, dataset_split):\n    \n    return {\n        problem.DatasetSplit.TRAIN: 64,\n        problem.DatasetSplit.EVAL: 128,\n        problem.DatasetSplit.TEST: 128\n    }[dataset_split]", "docstring": "Determine the maximum sequence length given a dataset_split.\n\nArgs:\ndataset_split: A problem.DatasetSplit.\n\nReturns:\nThe maximum length that a sequence can be for this dataset_split.", "source": "juraj-google-style"}
{"code": "def summary_dict(self):\n    d = {}\n    d['Requested'] = len(self.requested)\n    d['Executed'] = len(self.executed)\n    d['Passed'] = len(self.passed)\n    d['Failed'] = len(self.failed)\n    d['Skipped'] = len(self.skipped)\n    d['Error'] = len(self.error)\n    return d", "docstring": "Gets a dictionary that summarizes the stats of this test result.\n\nThe summary provides the counts of how many tests fall into each\ncategory, like 'Passed', 'Failed' etc.\n\nReturns:\nA dictionary with the stats of this test result.", "source": "github-repos"}
{"code": "def from_compatible_tensor_list(element_spec, tensor_list):\n    return _from_tensor_list_helper(lambda spec, value: spec._from_compatible_tensor_list(value), element_spec, tensor_list)", "docstring": "Returns an element constructed from the given spec and tensor list.\n\nArgs:\nelement_spec: A nested structure of `tf.TypeSpec` objects representing to\nelement type specification.\ntensor_list: A list of tensors to use for constructing the value.\n\nReturns:\nAn element constructed from the given spec and tensor list.\n\nRaises:\nValueError: If the number of tensors needed to construct an element for\nthe given spec does not match the given number of tensors.", "source": "github-repos"}
{"code": "def sign(self, message):\n        \n        message = _helpers._to_bytes(message, encoding='utf-8')\n        return rsa.pkcs1.sign(message, self._key, 'SHA-256')", "docstring": "Signs a message.\n\nArgs:\nmessage: bytes, Message to be signed.\n\nReturns:\nstring, The signature of the message for the given key.", "source": "juraj-google-style"}
{"code": "def url(self, url, owner=None, **kwargs):\n        \n        return URL(self.tcex, url, owner=owner, **kwargs)", "docstring": "Create the URL TI object.\n\nArgs:\nowner:\nurl:\n**kwargs:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def get_out_of_order(list_of_numbers):\n    result = []\n    for i in range(len(list_of_numbers)):\n        if (i == 0):\n            continue\n        if (list_of_numbers[i] < list_of_numbers[(i - 1)]):\n            result.append((list_of_numbers[(i - 1)], list_of_numbers[i]))\n    return result", "docstring": "Returns elements that break the monotonically non-decreasing trend.\n\nThis is used to find instances of global step values that are \"out-of-order\",\nwhich may trigger TensorBoard event discarding logic.\n\nArgs:\nlist_of_numbers: A list of numbers.\n\nReturns:\nA list of tuples in which each tuple are two elements are adjacent, but the\nsecond element is lower than the first.", "source": "codesearchnet"}
{"code": "def update_info(self, custom=None):\n        \n        self.figure.suptitle(self.info_string() if custom is None else custom)", "docstring": "Updates the figure's suptitle.\n\nCalls self.info_string() unless custom is provided.\n\nArgs:\ncustom: Overwrite it with this string, unless None.", "source": "juraj-google-style"}
{"code": "def extract_formats(config_handle):\n    \n    configurations = dict(config_handle)\n    formats = dict(configurations.get('formats', {}))\n    return formats", "docstring": "Get application formats.\n\nSee :class:`gogoutils.Formats` for available options.\n\nArgs:\nconfig_handle (configparser.ConfigParser): Instance of configurations.\n\nReturns:\ndict: Formats in ``{$format_type: $format_pattern}``.", "source": "juraj-google-style"}
{"code": "def get_all_ad_units(inventory_service):\n  \n  \n  statement = (ad_manager.StatementBuilder(version='v201811')\n               .OrderBy('id', ascending=True))\n\n  \n  keep_iterating = True\n  total_results = 0\n  found_ad_units = []\n  while keep_iterating:\n    page = inventory_service.getAdUnitsByStatement(statement.ToStatement())\n    if 'results' in page and len(page['results']):\n      total_results = page['totalResultSetSize']\n      found_ad_units.extend(page['results'])\n\n    statement.offset += statement.limit\n    keep_iterating = statement.offset < total_results\n\n  return found_ad_units", "docstring": "Download all ad units.\n\nArgs:\ninventory_service: An instance of the InventoryService.\n\nReturns:\nA list containing all ad units.", "source": "juraj-google-style"}
{"code": "def string_handle(self, name=None):\n    if name is None:\n        return self._string_handle\n    else:\n        return gen_dataset_ops.iterator_to_string_handle(self._iterator_resource, name=name)", "docstring": "Returns a string-valued `tf.Tensor` that represents this iterator.\n\nArgs:\nname: (Optional.) A name for the created operation.\n\nReturns:\nA scalar `tf.Tensor` of type `tf.string`.", "source": "github-repos"}
{"code": "def set(cls, values):\n    cls.mrc_out_el.text = values.get('mrc', '')\n    cls.oai_out_el.text = values.get('oai', '')\n    cls.dc_out_el.text = values.get('dc', '')\n    cls.filename = values.get('fn', 'fn')\n    cls.values = values", "docstring": "Set the elements from the data obtained from REST API.\n\nArgs:\nvalues (dict): Dict with ``mrc``, ``oai``, ``dc`` and ``fn`` keys.", "source": "codesearchnet"}
{"code": "def getTextBlocks(page, images=False):\n    \n    CheckParent(page)\n    dl = page.getDisplayList()\n    flags = TEXT_PRESERVE_LIGATURES | TEXT_PRESERVE_WHITESPACE\n    if images:\n        flags |= TEXT_PRESERVE_IMAGES\n    tp = dl.getTextPage(flags)\n    l = tp._extractTextBlocks_AsList()\n    del tp\n    del dl\n    return l", "docstring": "Return the text blocks on a page.\n\nNotes:\nLines in a block are concatenated with line breaks.\nArgs:\nimages: (bool) also return meta data of any images.\nImage data are never returned with this method.\nReturns:\nA list of the blocks. Each item contains the containing rectangle coordinates,\ntext lines, block type and running block number.", "source": "juraj-google-style"}
{"code": "def reset(self, indices, observations):\n    \n\n    \n    \n    \n    assert isinstance(indices, np.ndarray)\n    assert len(indices.shape) == 1\n    assert isinstance(observations, np.ndarray)\n    assert indices.shape[0] == observations.shape[0]\n\n    for index, observation in zip(indices, observations):\n      trajectory = self._trajectories[index]\n\n      \n      if not trajectory.is_active:\n        \n        trajectory.add_time_step(observation=observation)\n        \n        continue\n\n      \n      \n      \n\n      \n      \n      \n\n      \n      self._complete_trajectory(trajectory, index)\n\n      \n      \n      self._trajectories[index].add_time_step(observation=observation)", "docstring": "Resets trajectories at given indices and populates observations.\n\nReset can either be called right at the beginning, when there are no\ntime-steps, or to reset a currently active trajectory.\n\nIf resetting a currently active trajectory then we save it in\nself._completed_trajectories.\n\nArgs:\nindices: 1-D np.ndarray stating the indices to reset.\nobservations: np.ndarray of shape (indices len, obs.shape) of observations", "source": "juraj-google-style"}
{"code": "def authenticate(self, code: str) -> 'Preston':\n        \n        headers = self._get_authorization_headers()\n        data = {\n            'grant_type': 'authorization_code',\n            'code': code\n        }\n        r = self.session.post(self.TOKEN_URL, headers=headers, data=data)\n        if not r.status_code == 200:\n            raise Exception(f'Could not authenticate, got repsonse code {r.status_code}')\n        new_kwargs = dict(self._kwargs)\n        response_data = r.json()\n        new_kwargs['access_token'] = response_data['access_token']\n        new_kwargs['access_expiration'] = time.time() + float(response_data['expires_in'])\n        new_kwargs['refresh_token'] = response_data['refresh_token']\n        return Preston(**new_kwargs)", "docstring": "Authenticates using the code from the EVE SSO.\n\nA new Preston object is returned; this object is not modified.\n\nThe intended usage is:\n\nauth = preston.authenticate('some_code_here')\n\nArgs:\ncode: SSO code\n\nReturns:\nnew Preston, authenticated", "source": "juraj-google-style"}
{"code": "def assign(self, institute, case, user, link):\n    LOG.info('Creating event for assigning {0} to {1}'.format(user['name'].encode('utf-8'), case['display_name']))\n    self.create_event(institute=institute, case=case, user=user, link=link, category='case', verb='assign', subject=case['display_name'])\n    LOG.info('Updating {0} to be assigned with {1}'.format(case['display_name'], user['name']))\n    updated_case = self.case_collection.find_one_and_update({'_id': case['_id']}, {'$addToSet': {'assignees': user['_id']}}, return_document=pymongo.ReturnDocument.AFTER)\n    return updated_case", "docstring": "Assign a user to a case.\n\nThis function will create an Event to log that a person has been assigned\nto a case. Also the user will be added to case \"assignees\".\n\nArguments:\ninstitute (dict): A institute\ncase (dict): A case\nuser (dict): A User object\nlink (str): The url to be used in the event\n\nReturns:\nupdated_case(dict)", "source": "codesearchnet"}
{"code": "def groups_replies(self, *, channel: str, thread_ts: str, **kwargs) -> SlackResponse:\n    self._validate_xoxp_token()\n    kwargs.update({'channel': channel, 'thread_ts': thread_ts})\n    return self.api_call('groups.replies', http_verb='GET', params=kwargs)", "docstring": "Retrieve a thread of messages posted to a private channel\n\nArgs:\nchannel (str): The channel id. e.g. 'C1234567890'\nthread_ts (str): The timestamp of an existing message with 0 or more replies.\ne.g. '1234567890.123456'", "source": "codesearchnet"}
{"code": "def __init__(self, default: typing.Optional[bool]=MISSING_VALUE, is_noneable: bool=False, frozen: bool=False):\n    super().__init__(bool, default, is_noneable=is_noneable, frozen=frozen)", "docstring": "Constructor.\n\nArgs:\ndefault: Default value for the value spec.\nis_noneable: If True, None is acceptable.\nfrozen: If True, values other than the default value is not accceptable.", "source": "github-repos"}
{"code": "def ListChildPathInfos(self, client_id, path_type, components,\n                         timestamp=None):\n    \n    return self.ListDescendentPathInfos(\n        client_id, path_type, components, max_depth=1, timestamp=timestamp)", "docstring": "Lists path info records that correspond to children of given path.\n\nArgs:\nclient_id: An identifier string for a client.\npath_type: A type of a path to retrieve path information for.\ncomponents: A tuple of path components of a path to retrieve child path\ninformation for.\ntimestamp: If set, lists only descendants that existed only at that\ntimestamp.\n\nReturns:\nA list of `rdf_objects.PathInfo` instances sorted by path components.", "source": "juraj-google-style"}
{"code": "def users_setPhoto(self, *, image: Union[(str, IOBase)], **kwargs) -> SlackResponse:\n    self._validate_xoxp_token()\n    return self.api_call('users.setPhoto', files={'image': image}, data=kwargs)", "docstring": "Set the user profile photo\n\nArgs:\nimage (str): Supply the path of the image you'd like to upload.\ne.g. 'myimage.png'", "source": "codesearchnet"}
{"code": "def __init__(self, name, segments):\n        \n        self.name = name\n        self.meta = []\n        self.segments = sorted(segments, key=lambda s: s.points[0].time)", "docstring": "Constructor\n\nWhen constructing a track it's not guaranteed that the segments\nhave their properties computed. Call preprocess method over this\nclass, or over each segment to guarantee it.\n\nArgs:\nname (:obj:`str`)\nsegments(:obj:`list` of :obj:`Segment`)", "source": "juraj-google-style"}
{"code": "def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):\n    target = tensor_conversion.convert_to_tensor_v2_with_dispatch(target)\n    output = tensor_conversion.convert_to_tensor_v2_with_dispatch(output)\n    if hasattr(output, '_keras_logits'):\n        output = output._keras_logits\n        if from_logits:\n            warnings.warn('\"`sparse_categorical_crossentropy` received `from_logits=True`, but the `output` argument was produced by a sigmoid or softmax activation and thus does not represent logits. Was this intended?\"')\n        from_logits = True\n    elif (not from_logits and (not isinstance(output, (ops.EagerTensor, variables_module.Variable))) and (output.op.type == 'Softmax')) and (not hasattr(output, '_keras_history')):\n        assert len(output.op.inputs) == 1\n        output = output.op.inputs[0]\n        from_logits = True\n    elif not from_logits:\n        epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)\n        output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)\n        output = math_ops.log(output)\n    if isinstance(output.shape, (tuple, list)):\n        output_rank = len(output.shape)\n    else:\n        output_rank = output.shape.ndims\n    if output_rank is not None:\n        axis %= output_rank\n        if axis != output_rank - 1:\n            permutation = list(itertools.chain(range(axis), range(axis + 1, output_rank), [axis]))\n            output = array_ops.transpose(output, perm=permutation)\n    elif axis != -1:\n        raise ValueError('Cannot compute sparse categorical crossentropy with `axis={}` on an output tensor with unknown rank'.format(axis))\n    target = cast(target, 'int64')\n    output_shape = array_ops.shape_v2(output)\n    target_rank = target.shape.ndims\n    update_shape = target_rank is not None and output_rank is not None and (target_rank != output_rank - 1)\n    if update_shape:\n        target = flatten(target)\n        output = array_ops.reshape(output, [-1, output_shape[-1]])\n    if py_any((_is_symbolic_tensor(v) for v in [target, output])):\n        with get_graph().as_default():\n            res = nn.sparse_softmax_cross_entropy_with_logits_v2(labels=target, logits=output)\n    else:\n        res = nn.sparse_softmax_cross_entropy_with_logits_v2(labels=target, logits=output)\n    if update_shape and output_rank >= 3:\n        return array_ops.reshape(res, output_shape[:-1])\n    else:\n        return res", "docstring": "Categorical crossentropy with integer targets.\n\nArgs:\ntarget: An integer tensor.\noutput: A tensor resulting from a softmax\n(unless `from_logits` is True, in which\ncase `output` is expected to be the logits).\nfrom_logits: Boolean, whether `output` is the\nresult of a softmax, or is a tensor of logits.\naxis: Int specifying the channels axis. `axis=-1` corresponds to data\nformat `channels_last`, and `axis=1` corresponds to data format\n`channels_first`.\n\nReturns:\nOutput tensor.\n\nRaises:\nValueError: if `axis` is neither -1 nor one of the axes of `output`.", "source": "github-repos"}
{"code": "def parse_results(lines):\n    idx = 0\n    batch, onednn, model = (None, None, None)\n    state = State.FIND_CONFIG_OR_MODEL\n    while idx < len(lines):\n        if state is State.FIND_CONFIG_OR_MODEL:\n            config = re.match(\"\\\\+ echo 'BATCH=(?P<batch>[\\\\d]+), ONEDNN=(?P<onednn>[\\\\d]+)\", lines[idx])\n            if config:\n                batch = int(config.group('batch'))\n                onednn = int(config.group('onednn'))\n                batch_sizes.add(batch)\n            else:\n                model_re = re.search('tf-graphs\\\\/(?P<model>[\\\\w\\\\d_-]+).pb', lines[idx])\n                assert model_re\n                model = model_re.group('model')\n                models.add(model)\n                state = State.FIND_RUNNING_TIME\n        elif state is State.FIND_RUNNING_TIME:\n            match = re.search('no stats: (?P<avg>[\\\\d.]+)', lines[idx])\n            state = State.FIND_CONFIG_OR_MODEL\n            if match:\n                avg = float(match.group('avg'))\n                key = (model, batch, onednn)\n                assert None not in key\n                db[key] = avg\n            else:\n                continue\n        else:\n            raise RuntimeError('Reached the unreachable code.')\n        idx = idx + 1", "docstring": "Parses benchmark results from run_onednn_benchmarks.sh.\n\nStores results in a global dict.\n\nArgs:\nlines: Array of strings corresponding to each line of the output from\nrun_onednn_benchmarks.sh\n\nRaises:\nRuntimeError: If the program reaches an unknown state.", "source": "github-repos"}
{"code": "class _IdentityBlock(tf.keras.Model):\n\n    def __init__(self, kernel_size, filters, stage, block, data_format):\n        super(_IdentityBlock, self).__init__(name='')\n        filters1, filters2, filters3 = filters\n        conv_name_base = 'res' + str(stage) + block + '_branch'\n        bn_name_base = 'bn' + str(stage) + block + '_branch'\n        bn_axis = 1 if data_format == 'channels_first' else 3\n        self.conv2a = layers.Conv2D(filters1, (1, 1), name=conv_name_base + '2a', data_format=data_format)\n        self.bn2a = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')\n        self.conv2b = layers.Conv2D(filters2, kernel_size, padding='same', data_format=data_format, name=conv_name_base + '2b')\n        self.bn2b = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')\n        self.conv2c = layers.Conv2D(filters3, (1, 1), name=conv_name_base + '2c', data_format=data_format)\n        self.bn2c = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')\n\n    def call(self, input_tensor, training=False):\n        x = self.conv2a(input_tensor)\n        x = self.bn2a(x, training=training)\n        x = tf.nn.relu(x)\n        x = self.conv2b(x)\n        x = self.bn2b(x, training=training)\n        x = tf.nn.relu(x)\n        x = self.conv2c(x)\n        x = self.bn2c(x, training=training)\n        x += input_tensor\n        return tf.nn.relu(x)", "docstring": "_IdentityBlock is the block that has no conv layer at shortcut.\n\nArgs:\nkernel_size: the kernel size of middle conv layer at main path\nfilters: list of integers, the filters of 3 conv layer at main path\nstage: integer, current stage label, used for generating layer names\nblock: 'a','b'..., current block label, used for generating layer names\ndata_format: data_format for the input ('channels_first' or\n'channels_last').", "source": "github-repos"}
{"code": "def mode(self, **kwargs):\n    axis = kwargs.get('axis', 0)\n\n    def mode_builder(df, **kwargs):\n        result = df.mode(**kwargs)\n        if ((not axis) and (len(df) != len(result))):\n            append_values = pandas.DataFrame(columns=result.columns, index=range(len(result), len(df)))\n            result = pandas.concat([result, append_values], ignore_index=True)\n        elif (axis and (len(df.columns) != len(result.columns))):\n            append_vals = pandas.DataFrame(columns=range(len(result.columns), len(df.columns)), index=result.index)\n            result = pandas.concat([result, append_vals], axis=1)\n        return pandas.DataFrame(result)\n    func = self._prepare_method(mode_builder, **kwargs)\n    new_data = self._map_across_full_axis(axis, func)\n    new_index = (pandas.RangeIndex(len(self.index)) if (not axis) else self.index)\n    new_columns = (self.columns if (not axis) else pandas.RangeIndex(len(self.columns)))\n    new_dtypes = self._dtype_cache\n    if (new_dtypes is not None):\n        new_dtypes.index = new_columns\n    return self.__constructor__(new_data, new_index, new_columns, new_dtypes).dropna(axis=axis, how='all')", "docstring": "Returns a new QueryCompiler with modes calculated for each label along given axis.\n\nReturns:\nA new QueryCompiler with modes calculated.", "source": "codesearchnet"}
{"code": "def pack_tag(field_number, wire_type):\n    \n    if not 0 <= wire_type <= _WIRETYPE_MAX:\n        raise errors.EncodeError('Unknown wire type: %d' % wire_type)\n    return (field_number << TAG_TYPE_BITS) | wire_type", "docstring": "Returns an unsigned 32-bit integer that encodes the field number and\nwire type information in standard protocol message wire format.\n\nArgs:\nfield_number: Expected to be an integer in the range [1, 1 << 29)\nwire_type: One of the WIRETYPE_* constants.", "source": "juraj-google-style"}
{"code": "def Process(self, parser_mediator, cookie_name, cookie_data, url, **kwargs):\n    if ((cookie_name is None) or (cookie_data is None)):\n        raise ValueError('Cookie name or data are not set.')\n    if (cookie_name != self.COOKIE_NAME):\n        raise errors.WrongPlugin('Not the correct cookie plugin for: {0:s} [{1:s}]'.format(cookie_name, self.NAME))\n    super(BaseCookiePlugin, self).Process(parser_mediator)\n    self.GetEntries(parser_mediator, cookie_data=cookie_data, url=url)", "docstring": "Determine if this is the right plugin for this cookie.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ncookie_name (str): the name of the cookie value.\ncookie_data (bytes): the cookie data, as a byte sequence.\nurl (str): the full URL or path where the cookie was set.\n\nRaises:\nerrors.WrongPlugin: If the cookie name differs from the one\nsupplied in COOKIE_NAME.\nValueError: If cookie_name or cookie_data are not set.", "source": "codesearchnet"}
{"code": "def remove_instance(self, instance):\n        \n        \n        \n        query = { \"instance_id\" : instance.instance_id, \"binding_id\" : { \"$exists\" : False } }\n        \n        \n        try:\n            result = self.broker.delete_one(query)\n        except:\n            raise ErrStorageMongoConnection(\"Remove Instance\")\n        \n        \n        if result is not None and result.deleted_count == 1:\n            instance.provisioned = False\n        else:\n            raise ErrStorageRemoveInstance(instance.instance_id)", "docstring": "Remove an instance\n\nRemove an object from the MongoDB storage for caching\n\nArgs:\ninstance (AtlasServiceInstance.Instance): instance\n\nRaises:\nErrStorageMongoConnection: Error during MongoDB communication.\nErrStorageRemoveInstance: Failed to remove the instance.", "source": "juraj-google-style"}
{"code": "def __init__(self, filesystem, os_path_module=None):\n        \n        self.filesystem = filesystem\n        self.sep = filesystem.path_separator\n        self.altsep = filesystem.alternative_path_separator\n        self.linesep = filesystem.line_separator()\n        self._os_module = os\n        if os_path_module is None:\n            self.path = FakePathModule(self.filesystem, self)\n        else:\n            warnings.warn(FAKE_PATH_MODULE_DEPRECATION, DeprecationWarning,\n                          stacklevel=2)\n            self.path = os_path_module\n        if IS_PY2:\n            self.fdopen = self._fdopen_ver2\n        else:\n            self.fdopen = self._fdopen\n        self.__class__.devnull = ('/dev/nul' if filesystem.is_windows_fs\n                                  else '/dev/nul')", "docstring": "Also exposes self.path (to fake os.path).\n\nArgs:\nfilesystem: FakeFilesystem used to provide file system information\nos_path_module: (deprecated) Optional FakePathModule instance", "source": "juraj-google-style"}
{"code": "def connect_to_websocket(self):\n        \n        self.logger.info('Making websocket connection')\n        try:\n            if hasattr(self, '_ws'):\n                self._ws.close()\n        except:\n            self.logger.debug('Couldn\\'t terminate previous websocket connection')\n        self._ws = websocket.WebSocketApp(\n            self._get_websocket_address() + '?v=6&encoding=json',\n            on_message=self._ws_on_message,\n            on_error=self._ws_on_error,\n            on_close=self._ws_on_close\n        )\n        self._ws.on_open = self._ws_on_open\n        self._ws_run_forever_wrapper = WebSocketRunForeverWrapper(self.logger, self._ws)\n        self._ws_run_forever_wrapper.start()", "docstring": "Call this method to make the connection to the Discord websocket\n\nThis method is not blocking, so you'll probably want to call it after\ninitializating your Pycord object, and then move on with your code. When\nyou want to block on just maintaining the websocket connection, then call\n``keep_running``, and it'll block until your application is interrupted.\n\nArgs:\nNone", "source": "juraj-google-style"}
{"code": "def run_inference(self, batch: Sequence[Sequence[OpenAIChatMessage]], model: _VLLMModelServer, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n    return asyncio.run(self._async_run_inference(batch, model, inference_args))", "docstring": "Runs inferences on a batch of text strings.\n\nArgs:\nbatch: A sequence of examples as OpenAI messages.\nmodel: A _VLLMModelServer for connecting to the spun up server.\ninference_args: Any additional arguments for an inference.\n\nReturns:\nAn Iterable of type PredictionResult.", "source": "github-repos"}
{"code": "def update_ports(self, ports, id_or_uri):\n        \n        ports = merge_default_values(ports, {'type': 'port'})\n\n        uri = self._client.build_uri(id_or_uri) + \"/update-ports\"\n        return self._client.update(uri=uri, resource=ports)", "docstring": "Updates the switch ports. Only the ports under the management of OneView and those that are unlinked are\nsupported for update.\n\nNote:\nThis method is available for API version 300 or later.\n\nArgs:\nports: List of Switch Ports.\nid_or_uri: Can be either the switch id or the switch uri.\n\nReturns:\ndict: Switch", "source": "juraj-google-style"}
{"code": "def major_complex(network, state):\n    \n    log.info('Calculating major complex...')\n\n    result = complexes(network, state)\n    if result:\n        result = max(result)\n    else:\n        empty_subsystem = Subsystem(network, state, ())\n        result = _null_sia(empty_subsystem)\n\n    log.info(\"Finished calculating major complex.\")\n\n    return result", "docstring": "Return the major complex of the network.\n\nArgs:\nnetwork (Network): The |Network| of interest.\nstate (tuple[int]): The state of the network (a binary tuple).\n\nReturns:\nSystemIrreducibilityAnalysis: The |SIA| for the |Subsystem| with\nmaximal |big_phi|.", "source": "juraj-google-style"}
{"code": "def create_binary(self, key, value):\n        \n        data = None\n        if key is not None and value is not None:\n            try:\n                \n                \n                \n                data = self.db.create(\n                    key.strip(), json.dumps(base64.b64encode(bytes(value)).decode('utf-8'))\n                )\n            except TypeError:\n                \n                \n                \n                data = self.db.create(\n                    key.strip(), json.dumps(base64.b64encode(bytes(value, 'utf-8')).decode('utf-8'))\n                )\n        else:\n            self.tcex.log.warning(u'The key or value field was None.')\n        return data", "docstring": "Create method of CRUD operation for binary data.\n\nArgs:\nkey (string): The variable to write to the DB.\nvalue (any): The data to write to the DB.\n\nReturns:\n(string): Result of DB write.", "source": "juraj-google-style"}
{"code": "def parse_ped(ped_stream, family_type='ped'):\n    pedigree = FamilyParser(ped_stream, family_type=family_type)\n    if (len(pedigree.families) != 1):\n        raise PedigreeError('Only one case per ped file is allowed')\n    family_id = list(pedigree.families.keys())[0]\n    family = pedigree.families[family_id]\n    samples = [{'sample_id': ind_id, 'father': individual.father, 'mother': individual.mother, 'sex': SEX_MAP[individual.sex], 'phenotype': PHENOTYPE_MAP[int(individual.phenotype)]} for (ind_id, individual) in family.individuals.items()]\n    return (family_id, samples)", "docstring": "Parse out minimal family information from a PED file.\n\nArgs:\nped_stream(iterable(str))\nfamily_type(str): Format of the pedigree information\n\nReturns:\nfamily_id(str), samples(list[dict])", "source": "codesearchnet"}
{"code": "def with_start_after(self, after_namespace):\n    \n    namespace_start = _ord_to_namespace(_namespace_to_ord(after_namespace) + 1)\n    return NamespaceRange(namespace_start, self.namespace_end, _app=self.app)", "docstring": "Returns a copy of this NamespaceName with a new namespace_start.\n\nArgs:\nafter_namespace: A namespace string.\n\nReturns:\nA NamespaceRange object whose namespace_start is the lexographically next\nnamespace after the given namespace string.\n\nRaises:\nValueError: if the NamespaceRange includes only a single namespace.", "source": "juraj-google-style"}
{"code": "def __init__(self, cells, state_is_tuple=True):\n    logging.warning('`tf.nn.rnn_cell.MultiRNNCell` is deprecated. This class is equivalent as `tf.keras.layers.StackedRNNCells`, and will be replaced by that in Tensorflow 2.0.')\n    super(MultiRNNCell, self).__init__()\n    if not cells:\n        raise ValueError('Must specify at least one cell for MultiRNNCell.')\n    if not nest.is_nested(cells):\n        raise TypeError('cells must be a list or tuple, but saw: %s.' % cells)\n    if len(set((id(cell) for cell in cells))) < len(cells):\n        logging.log_first_n(logging.WARN, 'At least two cells provided to MultiRNNCell are the same object and will share weights.', 1)\n    self._cells = cells\n    for cell_number, cell in enumerate(self._cells):\n        if isinstance(cell, trackable.Trackable):\n            self._track_trackable(cell, name='cell-%d' % (cell_number,))\n    self._state_is_tuple = state_is_tuple\n    if not state_is_tuple:\n        if any((nest.is_nested(c.state_size) for c in self._cells)):\n            raise ValueError('Some cells return tuples of states, but the flag state_is_tuple is not set.  State sizes are: %s' % str([c.state_size for c in self._cells]))", "docstring": "Create a RNN cell composed sequentially of a number of RNNCells.\n\nArgs:\ncells: list of RNNCells that will be composed in this order.\nstate_is_tuple: If True, accepted and returned states are n-tuples, where\n`n = len(cells)`.  If False, the states are all concatenated along the\ncolumn axis.  This latter behavior will soon be deprecated.\n\nRaises:\nValueError: if cells is empty (not allowed), or at least one of the cells\nreturns a state tuple but the flag `state_is_tuple` is `False`.", "source": "github-repos"}
{"code": "def find_log_dir_and_names(program_name=None, log_dir=None):\n    if (not program_name):\n        program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]\n        program_name = ('py_%s' % program_name)\n    actual_log_dir = find_log_dir(log_dir=log_dir)\n    try:\n        username = getpass.getuser()\n    except KeyError:\n        if hasattr(os, 'getuid'):\n            username = str(os.getuid())\n        else:\n            username = 'unknown'\n    hostname = socket.gethostname()\n    file_prefix = ('%s.%s.%s.log' % (program_name, hostname, username))\n    return (actual_log_dir, file_prefix, program_name)", "docstring": "Computes the directory and filename prefix for log file.\n\nArgs:\nprogram_name: str|None, the filename part of the path to the program that\nis running without its extension.  e.g: if your program is called\n'usr/bin/foobar.py' this method should probably be called with\nprogram_name='foobar' However, this is just a convention, you can\npass in any string you want, and it will be used as part of the\nlog filename. If you don't pass in anything, the default behavior\nis as described in the example.  In python standard logging mode,\nthe program_name will be prepended with py_ if it is the program_name\nargument is omitted.\nlog_dir: str|None, the desired log directory.\n\nReturns:\n(log_dir, file_prefix, symlink_prefix)", "source": "codesearchnet"}
{"code": "def __init__(self, size, dropout=None, lstmcell_args={}, named_tensors=None, scope='internal_lstm', summary_labels=()):\n        \n        self.size = size\n        self.dropout = dropout\n        self.lstmcell_args = lstmcell_args\n        super(InternalLstm, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "LSTM layer.\n\nArgs:\nsize: LSTM size.\ndropout: Dropout rate.", "source": "juraj-google-style"}
{"code": "def from_api_repr(cls, api_repr):\n        \n        \n        mode = api_repr.get(\"mode\", \"NULLABLE\")\n        description = api_repr.get(\"description\")\n        fields = api_repr.get(\"fields\", ())\n        return cls(\n            field_type=api_repr[\"type\"].upper(),\n            fields=[cls.from_api_repr(f) for f in fields],\n            mode=mode.upper(),\n            description=description,\n            name=api_repr[\"name\"],\n        )", "docstring": "Return a ``SchemaField`` object deserialized from a dictionary.\n\nArgs:\napi_repr (Mapping[str, str]): The serialized representation\nof the SchemaField, such as what is output by\n:meth:`to_api_repr`.\n\nReturns:\ngoogle.cloud.biquery.schema.SchemaField:\nThe ``SchemaField`` object.", "source": "juraj-google-style"}
{"code": "def _on_cancelok(self, cancel_frame):\n        \n        _log.info(\"Consumer canceled; returning all unprocessed messages to the queue\")\n        self._channel.basic_nack(delivery_tag=0, multiple=True, requeue=True)", "docstring": "Called when the server acknowledges a cancel request.\n\nArgs:\ncancel_frame (pika.spec.Basic.CancelOk): The cancelok frame from\nthe server.", "source": "juraj-google-style"}
{"code": "def __init__(self, dataset_fn, coordinator):\n\n    def disallow_variable_creation(next_creator, **kwargs):\n        raise ValueError('Creating variables in `dataset_fn` is not allowed.')\n    if isinstance(dataset_fn, def_function.Function):\n        with variable_scope.variable_creator_scope(disallow_variable_creation):\n            dataset_fn = dataset_fn.get_concrete_function()\n    elif not isinstance(dataset_fn, tf_function.ConcreteFunction):\n        with variable_scope.variable_creator_scope(disallow_variable_creation):\n            dataset_fn = def_function.function(dataset_fn).get_concrete_function()\n    self._dataset_fn = dataset_fn\n    self._coordinator = coordinator\n    self._element_spec = None", "docstring": "Makes an iterable from datasets created by the given function.\n\nArgs:\ndataset_fn: A function that returns a `Dataset`.\ncoordinator: a `ClusterCoordinator` object, used to create dataset\nresources.", "source": "github-repos"}
{"code": "def rewards_to_go(rewards, mask, gamma=0.99):\n  r\n  B, T = rewards.shape  \n\n  masked_rewards = rewards * mask  \n\n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n\n  \n  r2gs = [masked_rewards[:, -1]]\n\n  \n  for t in reversed(range(T - 1)):\n    r2gs.append(masked_rewards[:, t] + (gamma * r2gs[-1]))\n\n  \n  assert T == len(r2gs)\n\n  \n  \n  return np.flip(np.stack(r2gs, axis=1), axis=1)", "docstring": "r\"\"\"Computes rewards to go.\n\nReward to go is defined as follows, the discounted reward that we have to\nyet collect, going forward from this point, i.e.:\n\nr2g_t = \\sum_{l=0}^{\\infty} (\\gamma^{l} * reward_{t+l})\n\nArgs:\nrewards: np.ndarray of shape (B, T) of rewards.\nmask: np.ndarray of shape (B, T) of mask for the rewards.\ngamma: float, discount factor.\n\nReturns:\nrewards to go, np.ndarray of shape (B, T).", "source": "juraj-google-style"}
{"code": "def add_rec_new(self, k, val):\n    self.rec_new(val)\n    self[k] = val\n    return val", "docstring": "Recursively add a new value and its children to me, and assign a\nvariable to it.\n\nArgs:\nk (str): The name of the variable to assign.\nval (LispVal): The value to be added and assigned.\n\nReturns:\nLispVal: The added value.", "source": "codesearchnet"}
{"code": "def run(self, resources):\n    if (not resources['connection']._port.startswith('jlink')):\n        raise ArgumentError('FlashBoardStep is currently only possible through jlink', invalid_port=args['port'])\n    hwman = resources['connection']\n    debug = hwman.hwman.debug(self._debug_string)\n    debug.flash(self._file)", "docstring": "Runs the flash step\n\nArgs:\nresources (dict): A dictionary containing the required resources that\nwe needed access to in order to perform this step.", "source": "codesearchnet"}
{"code": "def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor:\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n    pooled_output = text_outputs[1]\n    text_features = self.text_projection(pooled_output)\n    return text_features", "docstring": "Returns:\ntext_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by\napplying the projection layer to the pooled output of [`CLIPSegTextModel`].\n\nExamples:\n\n```python\n>>> from transformers import AutoTokenizer, CLIPSegModel\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"CIDAS/clipseg-rd64-refined\")\n>>> model = CLIPSegModel.from_pretrained(\"CIDAS/clipseg-rd64-refined\")\n\n>>> inputs = tokenizer([\"a photo of a cat\", \"a photo of a dog\"], padding=True, return_tensors=\"pt\")\n>>> text_features = model.get_text_features(**inputs)\n```", "source": "github-repos"}
{"code": "def _all_gather(self, input_tensor: core.TensorLike, options: Optional[collective_util.Options]) -> core.Tensor:\n    instance_key = self._next_instance_key()\n    options = self._options.merge(options)\n    ordering_token = self._get_ordering_token()\n    with ops.device(self._device):\n        return collective_ops.all_gather_v2(input_tensor, self._group_size, self._group_key, instance_key, communication_hint=options.implementation.value, timeout=options.timeout_seconds, ordering_token=ordering_token)", "docstring": "All-gather a dense tensor.\n\nArgs:\ninput_tensor: a dense tensor. It must have the same shape on all replicas.\noptions: an optional tf.distribute.experimental.CommunicationOptions. If\nprovided, it overrides the default options.\n\nReturns:\nThe reduced tensor.", "source": "github-repos"}
{"code": "def screenshot(path=None):\n    if (not _rootinitialized):\n        raise TDLError('Initialize first with tdl.init')\n    if isinstance(path, str):\n        _lib.TCOD_sys_save_screenshot(_encodeString(path))\n    elif (path is None):\n        filelist = _os.listdir('.')\n        n = 1\n        filename = ('screenshot%.3i.png' % n)\n        while (filename in filelist):\n            n += 1\n            filename = ('screenshot%.3i.png' % n)\n        _lib.TCOD_sys_save_screenshot(_encodeString(filename))\n    else:\n        tmpname = _os.tempnam()\n        _lib.TCOD_sys_save_screenshot(_encodeString(tmpname))\n        with tmpname as tmpfile:\n            path.write(tmpfile.read())\n        _os.remove(tmpname)", "docstring": "Capture the screen and save it as a png file.\n\nIf path is None then the image will be placed in the current\nfolder with the names:\n``screenshot001.png, screenshot002.png, ...``\n\nArgs:\npath (Optional[Text]): The file path to save the screenshot.", "source": "codesearchnet"}
{"code": "def is_periodic_image(self, other, tolerance=1e-8, check_lattice=True):\n        \n        if check_lattice and self.lattice != other.lattice:\n            return False\n        if self.species != other.species:\n            return False\n\n        frac_diff = pbc_diff(self.frac_coords, other.frac_coords)\n        return np.allclose(frac_diff, [0, 0, 0], atol=tolerance)", "docstring": "Returns True if sites are periodic images of each other.\n\nArgs:\nother (PeriodicSite): Other site\ntolerance (float): Tolerance to compare fractional coordinates\ncheck_lattice (bool): Whether to check if the two sites have the\nsame lattice.\n\nReturns:\nbool: True if sites are periodic images of each other.", "source": "juraj-google-style"}
{"code": "def from_dir(dirpath: Path, feat_type: str) -> None:\n    \n\n    logger.info(\"Extracting features from directory {}\".format(dirpath))\n\n    dirname = str(dirpath)\n\n    def all_wavs_processed() -> bool:\n        \n\n        for fn in os.listdir(dirname):\n            prefix, ext = os.path.splitext(fn)\n            if ext == \".wav\":\n                if not os.path.exists(\n                        os.path.join(dirname, \"%s.%s.npy\" % (prefix, feat_type))):\n                    return False\n        return True\n\n    if all_wavs_processed():\n        \n        logger.info(\"All WAV files already preprocessed\")\n        return\n    \n\n    \n    if feat_type == \"pitch\" or feat_type == \"fbank_and_pitch\":\n        kaldi_pitch(dirname, dirname)\n\n    \n    for filename in os.listdir(dirname):\n        logger.info(\"Preparing %s features for %s\", feat_type, filename)\n        path = os.path.join(dirname, filename)\n        if path.endswith(\".wav\"):\n            if empty_wav(path):\n                raise PersephoneException(\"Can't extract features for {} since it is an empty WAV file. Remove it from the corpus.\".format(path))\n            if feat_type == \"fbank\":\n                fbank(path)\n            elif feat_type == \"fbank_and_pitch\":\n                fbank(path)\n                prefix = os.path.splitext(filename)[0]\n                combine_fbank_and_pitch(dirname, prefix)\n            elif feat_type == \"pitch\":\n                \n                pass\n            elif feat_type == \"mfcc13_d\":\n                mfcc(path)\n            else:\n                logger.warning(\"Feature type not found: %s\", feat_type)\n                raise PersephoneException(\"Feature type not found: %s\" % feat_type)", "docstring": "Performs feature extraction from the WAV files in a directory.\n\nArgs:\ndirpath: A `Path` to the directory where the WAV files reside.\nfeat_type: The type of features that are being used.", "source": "juraj-google-style"}
{"code": "def GetDefinitionByName(self, name):\n    \n    lookup_name = name.lower()\n    if lookup_name not in self._definitions:\n      lookup_name = self._aliases.get(name, None)\n\n    return self._definitions.get(lookup_name, None)", "docstring": "Retrieves a specific data type definition by name.\n\nArgs:\nname (str): name of the data type definition.\n\nReturns:\nDataTypeDefinition: data type definition or None if not available.", "source": "juraj-google-style"}
{"code": "def grabEmails(emails=None, emailsFile=None, nicks=None, nicksFile=None, domains=EMAIL_DOMAINS, excludeDomains=[]):\n    \n    email_candidates = []\n\n    if emails != None:\n        email_candidates = emails\n    elif emailsFile != None:\n        \n        with open(emailsFile, \"r\") as iF:\n            email_candidates = iF.read().splitlines()\n    elif nicks != None:\n        \n        for n in nicks:\n            \n            for d in domains:\n                if d not in excludeDomains:\n                    email_candidates.append(n+\"@\"+d)\n    elif nicksFile != None:\n        \n        with open(nicksFile, \"r\") as iF:\n            nicks = iF.read().splitlines()\n            \n            for n in nicks:\n                \n                for d in domains:\n                    if d not in excludeDomains:\n                        email_candidates.append(n+\"@\"+d)\n    return email_candidates", "docstring": "Method that generates a list of emails.\n\nArgs:\n-----\nemails: Any premade list of emails.\nemailsFile: Filepath to the emails file (one per line).\nnicks: A list of aliases.\nnicksFile: Filepath to the aliases file (one per line).\ndomains: Domains where the aliases will be tested.\nexcludeDomains: Domains to be excluded from the created list.\n\nReturns:\n--------\nlist: the list of emails that will be verified.", "source": "juraj-google-style"}
{"code": "def custom_object_save(obj: Any, folder: Union[str, os.PathLike], config: Optional[dict]=None) -> list[str]:\n    if obj.__module__ == '__main__':\n        logger.warning(f\"We can't save the code defining {obj} in {folder} as it's been defined in __main__. You should put this code in a separate module so we can include it in the saved folder and make it easier to share via the Hub.\")\n        return\n\n    def _set_auto_map_in_config(_config):\n        module_name = obj.__class__.__module__\n        last_module = module_name.split('.')[-1]\n        full_name = f'{last_module}.{obj.__class__.__name__}'\n        if 'Tokenizer' in full_name:\n            slow_tokenizer_class = None\n            fast_tokenizer_class = None\n            if obj.__class__.__name__.endswith('Fast'):\n                fast_tokenizer_class = f'{last_module}.{obj.__class__.__name__}'\n                if getattr(obj, 'slow_tokenizer_class', None) is not None:\n                    slow_tokenizer = getattr(obj, 'slow_tokenizer_class')\n                    slow_tok_module_name = slow_tokenizer.__module__\n                    last_slow_tok_module = slow_tok_module_name.split('.')[-1]\n                    slow_tokenizer_class = f'{last_slow_tok_module}.{slow_tokenizer.__name__}'\n            else:\n                slow_tokenizer_class = f'{last_module}.{obj.__class__.__name__}'\n            full_name = (slow_tokenizer_class, fast_tokenizer_class)\n        if isinstance(_config, dict):\n            auto_map = _config.get('auto_map', {})\n            auto_map[obj._auto_class] = full_name\n            _config['auto_map'] = auto_map\n        elif getattr(_config, 'auto_map', None) is not None:\n            _config.auto_map[obj._auto_class] = full_name\n        else:\n            _config.auto_map = {obj._auto_class: full_name}\n    if isinstance(config, (list, tuple)):\n        for cfg in config:\n            _set_auto_map_in_config(cfg)\n    elif config is not None:\n        _set_auto_map_in_config(config)\n    result = []\n    object_file = sys.modules[obj.__module__].__file__\n    dest_file = Path(folder) / Path(object_file).name\n    shutil.copy(object_file, dest_file)\n    result.append(dest_file)\n    for needed_file in get_relative_import_files(object_file):\n        dest_file = Path(folder) / Path(needed_file).name\n        shutil.copy(needed_file, dest_file)\n        result.append(dest_file)\n    return result", "docstring": "Save the modeling files corresponding to a custom model/configuration/tokenizer etc. in a given folder. Optionally\nadds the proper fields in a config.\n\nArgs:\nobj (`Any`): The object for which to save the module files.\nfolder (`str` or `os.PathLike`): The folder where to save.\nconfig (`PretrainedConfig` or dictionary, `optional`):\nA config in which to register the auto_map corresponding to this custom object.\n\nReturns:\n`List[str]`: The list of files saved.", "source": "github-repos"}
{"code": "def gc_velocity_update(particle, social, state):\n    \n    gbest = state.swarm[gbest_idx(state.swarm)].position\n    if not np.array_equal(gbest, particle.position):\n        return std_velocity(particle, social, state)\n\n    rho = state.params['rho']\n    inertia = state.params['inertia']\n    v_max = state.params['v_max']\n    size = particle.position.size\n\n    r2 = state.rng.uniform(0.0, 1.0, size)\n    velocity = __gc_velocity_equation__(inertia, rho, r2, particle, gbest)\n    return __clamp__(velocity, v_max)", "docstring": "Guaranteed convergence velocity update.\n\nArgs:\nparticle: cipy.algorithms.pso.Particle: Particle to update the velocity\nfor.\nsocial: cipy.algorithms.pso.Particle: The social best for the particle.\nstate: cipy.algorithms.pso.State: The state of the PSO algorithm.\n\nReturns:\nnumpy.ndarray: the calculated velocity.", "source": "juraj-google-style"}
{"code": "def parseTree(self, root, state: ParseState) -> List[Dict]:\n        \n\n        if root.tag in self.AST_TAG_HANDLERS:\n            return self.AST_TAG_HANDLERS[root.tag](root, state)\n\n        elif root.tag in self.libRtns:\n            return self.process_libRtn(root, state)\n\n        else:\n            prog = []\n            for node in root:\n                prog += self.parseTree(node, state)\n            return prog", "docstring": "Parses the XML ast tree recursively to generate a JSON AST\nwhich can be ingested by other scripts to generate Python\nscripts.\n\nArgs:\nroot: The current root of the tree.\nstate: The current state of the tree defined by an object of the\nParseState class.\n\nReturns:\nast: A JSON ast that defines the structure of the Fortran file.", "source": "juraj-google-style"}
{"code": "def ParseArguments(args):\n    try:\n        (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', 'counting=', 'filter=', 'root=', 'repository=', 'linelength=', 'extensions=', 'exclude=', 'headers=', 'quiet', 'recursive'])\n    except getopt.GetoptError:\n        PrintUsage('Invalid arguments.')\n    verbosity = _VerboseLevel()\n    output_format = _OutputFormat()\n    filters = ''\n    counting_style = ''\n    recursive = False\n    for (opt, val) in opts:\n        if (opt == '--help'):\n            PrintUsage(None)\n        elif (opt == '--output'):\n            if (val not in ('emacs', 'vs7', 'eclipse', 'junit')):\n                PrintUsage('The only allowed output formats are emacs, vs7, eclipse and junit.')\n            output_format = val\n        elif (opt == '--verbose'):\n            verbosity = int(val)\n        elif (opt == '--filter'):\n            filters = val\n            if (not filters):\n                PrintCategories()\n        elif (opt == '--counting'):\n            if (val not in ('total', 'toplevel', 'detailed')):\n                PrintUsage('Valid counting options are total, toplevel, and detailed')\n            counting_style = val\n        elif (opt == '--root'):\n            global _root\n            _root = val\n        elif (opt == '--repository'):\n            global _repository\n            _repository = val\n        elif (opt == '--linelength'):\n            global _line_length\n            try:\n                _line_length = int(val)\n            except ValueError:\n                PrintUsage('Line length must be digits.')\n        elif (opt == '--exclude'):\n            global _excludes\n            if (not _excludes):\n                _excludes = set()\n            _excludes.update(glob.glob(val))\n        elif (opt == '--extensions'):\n            global _valid_extensions\n            try:\n                _valid_extensions = set(val.split(','))\n            except ValueError:\n                PrintUsage('Extensions must be comma seperated list.')\n        elif (opt == '--headers'):\n            global _header_extensions\n            try:\n                _header_extensions = set(val.split(','))\n            except ValueError:\n                PrintUsage('Extensions must be comma seperated list.')\n        elif (opt == '--recursive'):\n            recursive = True\n        elif (opt == '--quiet'):\n            global _quiet\n            _quiet = True\n    if (not filenames):\n        PrintUsage('No files were specified.')\n    if recursive:\n        filenames = _ExpandDirectories(filenames)\n    if _excludes:\n        filenames = _FilterExcludedFiles(filenames)\n    _SetOutputFormat(output_format)\n    _SetVerboseLevel(verbosity)\n    _SetFilters(filters)\n    _SetCountingStyle(counting_style)\n    return filenames", "docstring": "Parses the command line arguments.\n\nThis may set the output format and verbosity level as side-effects.\n\nArgs:\nargs: The command line arguments:\n\nReturns:\nThe list of filenames to lint.", "source": "codesearchnet"}
{"code": "def return_item_count_on_page(self, page=1, total_items=1):\n        \n        up_to_page = ((page - 1) * self.page_items)\n        \n\n        if total_items > up_to_page:\n            \n            \n            count = total_items - up_to_page\n\n        if count >= self.page_items:\n            \n            \n            return self.page_items\n        else:\n            \n            \n            return count", "docstring": "Return the number of items on page.\n\nArgs:\n* page = The Page to test for\n* total_items = the total item count\n\nReturns:\n* Integer - Which represents the calculated number of items on page.", "source": "juraj-google-style"}
{"code": "def setPANID(self, xPAN):\n        \n        print '%s call setPANID' % self.port\n        print xPAN\n        panid = ''\n        try:\n            if not isinstance(xPAN, str):\n                panid = str(hex(xPAN))\n                print panid\n\n            cmd = WPANCTL_CMD + 'setprop -s Network:PANID %s' % panid\n            datasetCmd = WPANCTL_CMD + 'setprop Dataset:PanId %s' % panid\n            self.hasActiveDatasetToCommit = True\n            return self.__sendCommand(cmd)[0] != 'Fail' and self.__sendCommand(datasetCmd)[0] != 'Fail'\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger('setPANID() Error: ' + str(e))", "docstring": "set Thread Network PAN ID\n\nArgs:\nxPAN: a given PAN ID in hex format\n\nReturns:\nTrue: successful to set the Thread Network PAN ID\nFalse: fail to set the Thread Network PAN ID", "source": "juraj-google-style"}
{"code": "def _transition_instrumentation_block(self, instrumentation_block, new_state=_InstrumentationBlockStates.UNKNOWN):\n    formatters = self._create_formatters(instrumentation_block, new_state)\n    for formatter in formatters:\n        test_record = formatter.create_test_record(self.TAG)\n        if test_record:\n            self.results.add_record(test_record)\n            self.summary_writer.dump(test_record.to_dict(), records.TestSummaryEntryType.RECORD)\n    return instrumentation_block.transition_state(new_state=new_state)", "docstring": "Transitions and finishes the current instrumentation block.\n\nArgs:\ninstrumentation_block: _InstrumentationBlock, the current\ninstrumentation block to finish.\nnew_state: _InstrumentationBlockState, the next state for the\nparser to transition to.\n\nReturns:\nThe new instrumentation block to use for storing parsed\ninstrumentation output.", "source": "github-repos"}
{"code": "def store_to_file(self, filename):\n    with tf.gfile.Open(filename, 'w') as f:\n        for i in range(len(self._id_to_token)):\n            f.write((self._id_to_token[i] + '\\n'))", "docstring": "Write vocab file to disk.\n\nVocab files have one token per line. The file ends in a newline. Reserved\ntokens are written to the vocab file as well.\n\nArgs:\nfilename: Full path of the file to store the vocab to.", "source": "codesearchnet"}
{"code": "def broadcast_to_rank(self, rank):\n    if self.rank is None:\n        raise ValueError('Unable to broadcast: self.rank is unknown')\n    dims_to_add = rank - self.rank\n    if dims_to_add < 0:\n        raise ValueError('Unable to broadcast: rank=%d must be greater than self.rank=%d.' % (rank, self.rank))\n    elif dims_to_add == 0:\n        return self\n    elif self._partitioned_dim_sizes:\n        partitioned_dims = (1,) * dims_to_add + self._partitioned_dim_sizes\n        return RaggedTensorDynamicShape(partitioned_dims, self.inner_dim_sizes, self.dim_size_dtype)\n    else:\n        inner_dims = array_ops.concat([array_ops.ones([dims_to_add], self.dim_size_dtype), self.inner_dim_sizes], axis=0)\n        return RaggedTensorDynamicShape([], inner_dims, self.dim_size_dtype)", "docstring": "Adds leading size-1 dimensions to broadcast `self` to the given rank.\n\nE.g., if `shape1` is `[3, (D2), 4]`, then `shape1.broadcast_to_rank(5)`\nis `[1, 1, 3, (D2), 4]`.\n\nArgs:\nrank: The rank for the returned shape.\n\nReturns:\nA RaggedTensorDynamicShape with `rank` dimensions, whose inner dimensions\nhave the same size as `self` and whose outer dimensions have size `1`.\n\nRaises:\nValueError: If `self.rank` is unknown or greater than `rank`.", "source": "github-repos"}
{"code": "def modify_user_power_levels(self, users=None, users_default=None):\n    try:\n        content = self.client.api.get_power_levels(self.room_id)\n        if users_default:\n            content['users_default'] = users_default\n        if users:\n            if ('users' in content):\n                content['users'].update(users)\n            else:\n                content['users'] = users\n            for (user, power_level) in list(content['users'].items()):\n                if (power_level is None):\n                    del content['users'][user]\n        self.client.api.set_power_levels(self.room_id, content)\n        return True\n    except MatrixRequestError:\n        return False", "docstring": "Modify the power level for a subset of users\n\nArgs:\nusers(dict): Power levels to assign to specific users, in the form\n{\"@name0:host0\": 10, \"@name1:host1\": 100, \"@name3:host3\", None}\nA level of None causes the user to revert to the default level\nas specified by users_default.\nusers_default(int): Default power level for users in the room\n\nReturns:\nTrue if successful, False if not", "source": "codesearchnet"}
{"code": "def _InsertNodeAt(new_node, target, after=False):\n    if new_node.parent is not None:\n        raise RuntimeError('inserting node which already has a parent', (new_node, new_node.parent))\n    parent_of_target = target.parent\n    if parent_of_target is None:\n        raise RuntimeError('expected target node to have a parent', (target,))\n    for i, child in enumerate(parent_of_target.children):\n        if child is target:\n            insertion_index = i + 1 if after else i\n            parent_of_target.insert_child(insertion_index, new_node)\n            return\n    raise RuntimeError('unable to find insertion point for target node', (target,))", "docstring": "Underlying implementation for node insertion.\n\nArguments:\nnew_node: a new node to insert (this node should not be in the tree).\ntarget: the target node.\nafter: if True, new_node is inserted after target. Otherwise, it's inserted\nbefore target.\n\nReturns:\nnothing\n\nRaises:\nRuntimeError: if the tree is corrupted, or the insertion would corrupt it.", "source": "github-repos"}
{"code": "def _restore_resources(resources):\n    resources = deepcopy(resources)\n    for resource in resources:\n        schema = resource['schema']\n        for fk in schema.get('foreignKeys', []):\n            (_, name) = _restore_path(fk['reference']['resource'])\n            fk['reference']['resource'] = name\n    return resources", "docstring": "Restore schemas from being compatible with storage schemas.\n\nForeign keys related operations.\n\nArgs:\nlist: resources from storage\n\nReturns:\nlist: restored resources", "source": "codesearchnet"}
{"code": "def _get_sorted_methods(self, methods):\n    if (not methods):\n        return methods\n\n    def _sorted_methods_comparison(method_info1, method_info2):\n        \"Sort method info by path and http_method.\\n\\n      Args:\\n        method_info1: Method name and info for the first method to compare.\\n        method_info2: Method name and info for the method to compare to.\\n\\n      Returns:\\n        Negative if the first method should come first, positive if the\\n        first method should come after the second.  Zero if they're\\n        equivalent.\\n      \"\n\n        def _score_path(path):\n            \"Calculate the score for this path, used for comparisons.\\n\\n        Higher scores have priority, and if scores are equal, the path text\\n        is sorted alphabetically.  Scores are based on the number and location\\n        of the constant parts of the path.  The server has some special handling\\n        for variables with regexes, which we don't handle here.\\n\\n        Args:\\n          path: The request path that we're calculating a score for.\\n\\n        Returns:\\n          The score for the given path.\\n        \"\n            score = 0\n            parts = path.split('/')\n            for part in parts:\n                score <<= 1\n                if ((not part) or (part[0] != '{')):\n                    score += 1\n            score <<= (31 - len(parts))\n            return score\n        path_score1 = _score_path(method_info1[1].get('path', ''))\n        path_score2 = _score_path(method_info2[1].get('path', ''))\n        if (path_score1 != path_score2):\n            return (path_score2 - path_score1)\n        path_result = cmp(method_info1[1].get('path', ''), method_info2[1].get('path', ''))\n        if (path_result != 0):\n            return path_result\n        method_result = cmp(method_info1[1].get('httpMethod', ''), method_info2[1].get('httpMethod', ''))\n        return method_result\n    return sorted(methods.items(), _sorted_methods_comparison)", "docstring": "Get a copy of 'methods' sorted the way they would be on the live server.\n\nArgs:\nmethods: JSON configuration of an API's methods.\n\nReturns:\nThe same configuration with the methods sorted based on what order\nthey'll be checked by the server.", "source": "codesearchnet"}
{"code": "def port_remove(br, port, if_exists=True):\n    \n    param_if_exists = _param_if_exists(if_exists)\n\n    if port and not br:\n        cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)\n    else:\n        cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)\n    result = __salt__['cmd.run_all'](cmd)\n    retcode = result['retcode']\n    return _retcode_to_bool(retcode)", "docstring": "Deletes port.\n\nArgs:\nbr: A string - bridge name (If bridge is None, port is removed from  whatever bridge contains it)\nport: A string - port name.\nif_exists: Bool, if False - attempting to delete a por that  does  not exist returns False. (Default True)\n\nReturns:\nTrue on success, else False.\n\n.. versionadded:: 2016.3.0\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.port_remove br0 8080", "source": "juraj-google-style"}
{"code": "def file_path(path=None, payload=None, objectInput=None):\n    \n    f = path if path else write_payload(payload, objectInput)\n\n    if not os.path.exists(f):\n        msg = \"File {!r} does not exist\".format(f)\n        log.exception(msg)\n        raise TikaAppFilePathError(msg)\n\n    return f", "docstring": "Given a file path, payload or file object, it writes file on disk and\nreturns the temp path.\n\nArgs:\npath (string): path of real file\npayload(string): payload in base64 of file\nobjectInput (object): file object/standard input to analyze\n\nReturns:\nPath of file", "source": "juraj-google-style"}
{"code": "def load(path):\n    with open(path, 'r') as fobj:\n        analytics = Analytics(info=json.load(fobj))\n    os.unlink(path)\n    return analytics", "docstring": "Loads analytics report from json file specified by path.\n\nArgs:\npath (str): path to json file with analytics report.", "source": "codesearchnet"}
{"code": "def _compute_version_info():\n    ray_version = ray.__version__\n    python_version = '.'.join(map(str, sys.version_info[:3]))\n    pyarrow_version = pyarrow.__version__\n    return (ray_version, python_version, pyarrow_version)", "docstring": "Compute the versions of Python, pyarrow, and Ray.\n\nReturns:\nA tuple containing the version information.", "source": "codesearchnet"}
{"code": "def create_position_ids_from_input_ids(self, input_ids):\n    mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype)\n    incremental_indices = tf.math.cumsum(mask, axis=1) * mask\n    return incremental_indices + self.padding_idx", "docstring": "Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding\nsymbols are ignored. This is modified from fairseq's `utils.make_positions`.\n\nArgs:\ninput_ids: tf.Tensor\nReturns: tf.Tensor", "source": "github-repos"}
{"code": "def _parse_field_value(self, field: descriptor.FieldDescriptor, json_value: Any) -> message.Message:\n    if field.type != descriptor.FieldDescriptor.TYPE_MESSAGE:\n        raise ValueError(f'Error in FHIR proto definition, field: {field.full_name} is not a message.')\n    if field.message_type.full_name == any_pb2.Any.DESCRIPTOR.full_name:\n        contained = self.primitive_handler.new_contained_resource()\n        self._merge_contained_resource(json_value, contained)\n        any_message = any_pb2.Any()\n        any_message.Pack(contained)\n        return any_message\n    else:\n        target = proto_utils.create_message_from_descriptor(field.message_type)\n        self.merge_value(json_value, target)\n        return target", "docstring": "Returns a new Message described by the FieldDescriptor and json_value.\n\nArgs:\nfield: The FieldDescriptor of the Message instance to create.\njson_value: The JSON value representation to merge into the newly created\nMessage.\n\nReturns:\nA new Message as described by the provided FieldDescriptor merged with the\ncontents of json_value.", "source": "github-repos"}
{"code": "def center_crop(self, image: 'torch.Tensor', crop_size: dict[str, int], size: dict[str, int], **kwargs) -> 'torch.Tensor':\n    if size.height is None or size.width is None:\n        raise ValueError(f\"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}\")\n    height, width = image.shape[-2:]\n    min_dim = min(height, width)\n    cropped_height = int(size.height / crop_size.height * min_dim)\n    cropped_width = int(size.width / crop_size.width * min_dim)\n    return F.center_crop(image, (cropped_height, cropped_width))", "docstring": "Center crop an image to `(size[\"height\"] / crop_size[\"height\"] * min_dim, size[\"width\"] / crop_size[\"width\"] *\nmin_dim)`. Where `min_dim = min(size[\"height\"], size[\"width\"])`.\n\nIf the input size is smaller than `crop_size` along any edge, the image will be padded with zeros and then\ncenter cropped.\n\nArgs:\nimage (`\"torch.Tensor\"`):\nImage to center crop.\ncrop_size (`Dict[str, int]`):\nDesired output size after applying the center crop.\nsize (`Dict[str, int]`):\nSize of the output image.\n\nReturns:\n`torch.Tensor`: The center cropped image.", "source": "github-repos"}
{"code": "async def remember(self, request, user_id):\n        \n        ticket = self._new_ticket(request, user_id)\n        await self.remember_ticket(request, ticket)", "docstring": "Called to store the userid for a request.\n\nThis function creates a ticket from the request and user_id, and calls\nthe abstract function remember_ticket() to store the ticket.\n\nArgs:\nrequest: aiohttp Request object.\nuser_id: String representing the user_id to remember", "source": "juraj-google-style"}
{"code": "def read_handler(Model, name=None, **kwds):\n\n    async def action_handler(service, action_type, payload, props, **kwds):\n        if (action_type == get_crud_action('read', (name or Model))):\n            message_props = {}\n            if ('correlation_id' in props):\n                message_props['correlation_id'] = props['correlation_id']\n            try:\n                resolved = service.schema.execute(payload)\n                response = json.dumps({'data': {key: value for (key, value) in resolved.data.items()}, 'errors': resolved.errors})\n                (await service.event_broker.send(payload=response, action_type=change_action_status(action_type, success_status()), **message_props))\n            except Exception as err:\n                (await service.event_broker.send(payload=str(err), action_type=change_action_status(action_type, error_status()), **message_props))\n    return action_handler", "docstring": "This factory returns an action handler that responds to read requests\nby resolving the payload as a graphql query against the internal schema.\n\n\nArgs:\nModel (nautilus.BaseModel): The model to delete when the action\nreceived.\n\nReturns:\nfunction(type, payload): The action handler for this model", "source": "codesearchnet"}
{"code": "def set_contrast(self, contrast):\n    self._contrast = contrast\n    self.x_spread = (2 * (1.0 - contrast))\n    self.y_spread = (2.0 - (2 * (1.0 - contrast)))\n    self._build_cdict()", "docstring": "Adjusts the image contrast.\n\nContrast refers to the rate of change of color with color level.\nAt low contrast, color changes gradually over many intensity\nlevels, while at high contrast it can change rapidly within a\nfew levels\n\nArgs:\ncontrast: float\nA number between 0 and 1.  Note that upon initialization the\ncolormap has a default contrast value of 0.5.\n\nReturns: void", "source": "codesearchnet"}
{"code": "def all_reduce(self, input_tensor: core.TensorLike, control_input: Optional[Union[core.TensorLike, ops.Operation]]=None, options: Optional[collective_util.Options]=None) -> core.Tensor:\n    instance_key = self._next_instance_key()\n    options = self._options.merge(options)\n    ordering_token = self._get_ordering_token()\n    with ops.device(self._device), self._control_input(control_input):\n        return collective_ops.all_reduce_v2(input_tensor, self._group_size, self._group_key, instance_key, communication_hint=options.implementation.value, timeout=options.timeout_seconds, ordering_token=ordering_token)", "docstring": "All-reduce a dense tensor.\n\nArgs:\ninput_tensor: a dense tensor. It must have the same shape on all replicas.\ncontrol_input: if not None, add control edges between control_input and\nthe all-reduce.\noptions: an optional tf.distribute.experimental.CommunicationOptions. If\nprovided, it overrides the default options.\n\nReturns:\nThe reduced tensor.", "source": "github-repos"}
{"code": "def _PromptUserForEncryptedVolumeCredential(\n      self, scan_context, locked_scan_node, output_writer):\n    \n    credentials = credentials_manager.CredentialsManager.GetCredentials(\n        locked_scan_node.path_spec)\n\n    \n    if locked_scan_node.type_indicator == (\n        definitions.TYPE_INDICATOR_APFS_CONTAINER):\n      line = 'Found an APFS encrypted volume.'\n    elif locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_BDE:\n      line = 'Found a BitLocker encrypted volume.'\n    elif locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_FVDE:\n      line = 'Found a CoreStorage (FVDE) encrypted volume.'\n    else:\n      line = 'Found an encrypted volume.'\n\n    output_writer.WriteLine(line)\n\n    credentials_list = list(credentials.CREDENTIALS)\n    credentials_list.append('skip')\n\n    \n    output_writer.WriteLine('Supported credentials:')\n    output_writer.WriteLine('')\n    for index, name in enumerate(credentials_list):\n      output_writer.WriteLine('  {0:d}. {1:s}'.format(index + 1, name))\n    output_writer.WriteLine('')\n\n    result = False\n    while not result:\n      output_writer.WriteString(\n          'Select a credential to unlock the volume: ')\n      \n      input_line = sys.stdin.readline()\n      input_line = input_line.strip()\n\n      if input_line in credentials_list:\n        credential_identifier = input_line\n      else:\n        try:\n          credential_identifier = int(input_line, 10)\n          credential_identifier = credentials_list[credential_identifier - 1]\n        except (IndexError, ValueError):\n          output_writer.WriteLine(\n              'Unsupported credential: {0:s}'.format(input_line))\n          continue\n\n      if credential_identifier == 'skip':\n        break\n\n      getpass_string = 'Enter credential data: '\n      if sys.platform.startswith('win') and sys.version_info[0] < 3:\n        \n        \n        getpass_string = self._EncodeString(getpass_string)\n\n      credential_data = getpass.getpass(getpass_string)\n      output_writer.WriteLine('')\n\n      result = self._source_scanner.Unlock(\n          scan_context, locked_scan_node.path_spec, credential_identifier,\n          credential_data)\n\n      if not result:\n        output_writer.WriteLine('Unable to unlock volume.')\n        output_writer.WriteLine('')", "docstring": "Prompts the user to provide a credential for an encrypted volume.\n\nArgs:\nscan_context (SourceScannerContext): the source scanner context.\nlocked_scan_node (SourceScanNode): the locked scan node.\noutput_writer (StdoutWriter): the output writer.", "source": "juraj-google-style"}
{"code": "def __init__(self, num_embeddings, num_additional_embeddings, embedding_dim, partially_freeze: Optional[bool]=False, dtype=None, **kwargs) -> None:\n    super().__init__(input_dim=num_embeddings, output_dim=embedding_dim, dtype=dtype, **kwargs)\n    self.num_embeddings = num_embeddings\n    self.num_additional_embeddings = num_additional_embeddings\n    self.partially_freeze = partially_freeze\n    if partially_freeze:\n        self.trainable = False\n    if self.num_additional_embeddings > 0:\n        self.additional_embedding = tf.keras.layers.Embedding(input_dim=self.num_additional_embeddings, output_dim=embedding_dim, dtype=dtype, name='additional_embedding')", "docstring": "Args:\nnum_embeddings (`int`):\nSize of the dictionary of embeddings\nnum_additional_embeddings (`int`):\nNumber of additional embeddings. Only useful when you `partially_freeze=True`.\nembedding_dim (`int`):\nThe size of each embedding vector\npartially_freeze: (`bool`, *optional*, defaults to `False`):\nIf `True`, the regular `weight` will be frozen. `additional_weight` is never frozen.\n\nNote: there are a lot of other parameters to initialize a standard `tf.keras.layers.Embedding` such as `mask_zero`,\n`input_length` or `embeddings_initializer`. We are not supporting these.", "source": "github-repos"}
{"code": "def maybe_do_strip(node: node_def_pb2.NodeDef) -> None:\n    if node.op == 'Assert' or node.op == 'PrintV2':\n        node.op = 'NoOp'\n        erase_regular_node_attributes(node)\n        new_inputs = []\n        for inp in node.input:\n            if not is_control_input(inp):\n                new_inputs.append(as_control_dep(inp))\n            else:\n                new_inputs.append(inp)\n        node.ClearField('input')\n        node.input.extend(new_inputs)\n    elif node.op == 'CheckNumerics' or node.op == 'Print':\n        node.op = 'Identity'\n        prune_all_non_t_attributes(node)\n        for i in range(1, len(node.input)):\n            if not is_control_input(node.input[i]):\n                node.input[i] = as_control_dep(node.input[i])", "docstring": "Strips the graph from Assert and CheckNumerics ops.\n\nFor Assert ops, this function also rewrites all of the inputs to the nodes\nthat were transformed by making them into control dependencies. It also\nremoves all of the regular node attributes, that is all node attributes\nthat do not start with `_`.\n\nFor CheckNumerics ops, this function turns the op into an Identity op,\nwhich will be pruned later (according to the original implementation in\ngrappler's `debug_stripper.cc`. Then, since Identity ops only take one\ninput, it leaves the first input as is while transforming the other ones\ninto control dependencies.\n\nArgs:\nnode: The node to potentally strip.", "source": "github-repos"}
{"code": "def address_to_ip(address):\n    address_parts = address.split(':')\n    ip_address = socket.gethostbyname(address_parts[0])\n    if (ip_address == '127.0.0.1'):\n        ip_address = get_node_ip_address()\n    return ':'.join(([ip_address] + address_parts[1:]))", "docstring": "Convert a hostname to a numerical IP addresses in an address.\n\nThis should be a no-op if address already contains an actual numerical IP\naddress.\n\nArgs:\naddress: This can be either a string containing a hostname (or an IP\naddress) and a port or it can be just an IP address.\n\nReturns:\nThe same address but with the hostname replaced by a numerical IP\naddress.", "source": "codesearchnet"}
{"code": "def args_to_dict(args):\n    \n    \n    arguments = dict()\n    for arg in args.split(','):\n        key, value = arg.split('=')\n        arguments[key] = value\n    return arguments", "docstring": "Convert command line arguments in a comma separated string to a dictionary\n\nArgs:\nargs (str): Command line arguments\n\nReturns:\nDictUpperBound[str,str]: Dictionary of arguments", "source": "juraj-google-style"}
{"code": "def is_valid_embedding(emb, source, target):\n    \n    for _ in diagnose_embedding(emb, source, target):\n        return False\n    return True", "docstring": "A simple (bool) diagnostic for minor embeddings.\n\nSee :func:`diagnose_embedding` for a more detailed diagnostic / more information.\n\nArgs:\nemb (dict): a dictionary mapping source nodes to arrays of target nodes\nsource (graph or edgelist): the graph to be embedded\ntarget (graph or edgelist): the graph being embedded into\n\nReturns:\nbool: True if `emb` is valid.", "source": "juraj-google-style"}
{"code": "def __init__(self, kwargs):\n        \n        PanCloudError.__init__(\n            self, \"{}\".format(\", \".join(kwargs.keys()))\n        )", "docstring": "Convert kwargs to CSV string.\n\nArgs:\nkwargs (dict): Key-word arguments.", "source": "juraj-google-style"}
{"code": "def _get_fbeta_score(true_positives, selected, relevant, beta=1):\n  \n  precision = 1\n  if selected > 0:\n    precision = true_positives / selected\n  if beta == 0:\n    return precision\n  recall = 1\n  if relevant > 0:\n    recall = true_positives / relevant\n  if precision > 0 and recall > 0:\n    beta2 = beta * beta\n    return (1 + beta2) * precision * recall / (beta2 * precision + recall)\n  else:\n    return 0", "docstring": "Compute Fbeta score.\n\nArgs:\ntrue_positives: Number of true positive ngrams.\nselected: Number of selected ngrams.\nrelevant: Number of relevant ngrams.\nbeta: 0 gives precision only, 1 gives F1 score, and Inf gives recall only.\n\nReturns:\nFbeta score.", "source": "juraj-google-style"}
{"code": "def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0):\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        raise exceptions.VersionNotSupported('KMIP {} does not support the ObjectDefaults object.'.format(kmip_version.value))\n    super(ObjectDefaults, self).read(input_buffer, kmip_version=kmip_version)\n    local_buffer = utils.BytearrayStream(input_buffer.read(self.length))\n    if self.is_tag_next(enums.Tags.OBJECT_TYPE, local_buffer):\n        self._object_type = primitives.Enumeration(enums.ObjectType, tag=enums.Tags.OBJECT_TYPE)\n        self._object_type.read(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidKmipEncoding('The ObjectDefaults encoding is missing the object type enumeration.')\n    if self.is_tag_next(enums.Tags.ATTRIBUTES, local_buffer):\n        self._attributes = Attributes()\n        self._attributes.read(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidKmipEncoding('The ObjectDefaults encoding is missing the attributes structure.')\n    self.is_oversized(local_buffer)", "docstring": "Read the data encoding the ObjectDefaults structure and decode it into\nits constituent parts.\n\nArgs:\ninput_buffer (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 2.0.\n\nRaises:\nInvalidKmipEncoding: Raised if the object type or attributes are\nmissing from the encoding.\nVersionNotSupported: Raised when a KMIP version is provided that\ndoes not support the ObjectDefaults structure.", "source": "codesearchnet"}
{"code": "def _GetLineNumberPrefix(self, error, show_number=True):\n    prefix_with_number = ' {} | '.format(error.position.line)\n    if show_number:\n        return prefix_with_number\n    return re.sub('\\\\d', ' ', prefix_with_number)", "docstring": "Returns a prefix to annotate a line with a line number.\n\nArgs:\nerror: The ErrorInfo to get the line number from.\nshow_number: Whether to show or hide the number (hiding the number is\nuseful to get a prefix with the same width and formatting).", "source": "github-repos"}
{"code": "def describe_field(field_definition):\n    field_descriptor = FieldDescriptor()\n    field_descriptor.name = field_definition.name\n    field_descriptor.number = field_definition.number\n    field_descriptor.variant = field_definition.variant\n    if isinstance(field_definition, messages.EnumField):\n        field_descriptor.type_name = field_definition.type.definition_name()\n    if isinstance(field_definition, messages.MessageField):\n        field_descriptor.type_name = field_definition.message_type.definition_name()\n    if (field_definition.default is not None):\n        field_descriptor.default_value = _DEFAULT_TO_STRING_MAP[type(field_definition)](field_definition.default)\n    if field_definition.repeated:\n        field_descriptor.label = FieldDescriptor.Label.REPEATED\n    elif field_definition.required:\n        field_descriptor.label = FieldDescriptor.Label.REQUIRED\n    else:\n        field_descriptor.label = FieldDescriptor.Label.OPTIONAL\n    return field_descriptor", "docstring": "Build descriptor for Field instance.\n\nArgs:\nfield_definition: Field instance to provide descriptor for.\n\nReturns:\nInitialized FieldDescriptor instance describing the Field instance.", "source": "codesearchnet"}
{"code": "def _ParseMRUListValue(self, registry_key):\n    mrulist_value = registry_key.GetValueByName('MRUList')\n    if (not mrulist_value):\n        return None\n    mrulist_entries_map = self._GetDataTypeMap('mrulist_entries')\n    context = dtfabric_data_maps.DataTypeMapContext(values={'data_size': len(mrulist_value.data)})\n    return self._ReadStructureFromByteStream(mrulist_value.data, 0, mrulist_entries_map, context=context)", "docstring": "Parses the MRUList value in a given Registry key.\n\nArgs:\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains\nthe MRUList value.\n\nReturns:\nmrulist_entries: MRUList entries or None if not available.", "source": "codesearchnet"}
{"code": "def _Execute(statements, context, callback, trace):\n    if trace:\n        trace.exec_depth += 1\n    for (i, statement) in enumerate(statements):\n        if isinstance(statement, six.string_types):\n            callback(statement)\n        else:\n            try:\n                (func, args) = statement\n                func(args, context, callback, trace)\n            except UndefinedVariable as e:\n                start = max(0, (i - 3))\n                end = (i + 3)\n                e.near = statements[start:end]\n                e.trace = trace\n                raise", "docstring": "Execute a bunch of template statements in a ScopedContext.\n\nArgs:\ncallback: Strings are \"written\" to this callback function.\ntrace: Trace object, or None\n\nThis is called in a mutually recursive fashion.", "source": "codesearchnet"}
{"code": "def _AddParentDirectories(self, path):\n    \n    path_segments = self.file_system.SplitPath(path)\n    for segment_index in range(len(path_segments)):\n      parent_path = self.file_system.JoinPath(path_segments[:segment_index])\n      file_entry = self.file_system.GetFileEntryByPath(parent_path)\n      if file_entry and not file_entry.IsDirectory():\n        raise ValueError(\n            'Non-directory parent file entry: {0:s} already exists.'.format(\n                parent_path))\n\n    for segment_index in range(len(path_segments)):\n      parent_path = self.file_system.JoinPath(path_segments[:segment_index])\n      if not self.file_system.FileEntryExistsByPath(parent_path):\n        self.file_system.AddFileEntry(\n            parent_path, file_entry_type=definitions.FILE_ENTRY_TYPE_DIRECTORY)", "docstring": "Adds the parent directories of a path to the fake file system.\n\nArgs:\npath (str): path of the file within the fake file system.\n\nRaises:\nValueError: if a parent directory is already set and is not a directory.", "source": "juraj-google-style"}
{"code": "def _seed2key(a):\n\n    def int32s_to_int64(a):\n        \n        a = math_ops.bitwise_or(math_ops.cast(a[0], dtypes.uint64), math_ops.left_shift(math_ops.cast(a[1], dtypes.uint64), constant_op.constant(32, dtypes.uint64)))\n        a = math_ops.cast(a, dtypes.int64)\n        return a\n    return tf_np.asarray(int32s_to_int64(a))", "docstring": "Converts an RNG seed to an RNG key.\n\nArgs:\na: an RNG seed, a tensor of shape [2] and dtype `tf.int32`.\n\nReturns:\nan RNG key, an ndarray of shape [] and dtype `np.int64`.", "source": "github-repos"}
{"code": "def run_board(args):\n    \n    init_config(args)\n\n    \n    from backend.collector import CollectorService\n\n    service = CollectorService(\n        args.logdir,\n        args.reload_interval,\n        standalone=False,\n        log_level=args.log_level)\n    service.run()\n\n    \n    logger.info(\"Try to start automlboard on port %s\\n\" % args.port)\n    command = [\n        os.path.join(root_path, \"manage.py\"), \"runserver\",\n        \"0.0.0.0:%s\" % args.port, \"--noreload\"\n    ]\n    execute_from_command_line(command)", "docstring": "Run main entry for AutoMLBoard.\n\nArgs:\nargs: args parsed from command line", "source": "juraj-google-style"}
{"code": "def _tokenize_field_path(path):\n    \n    pos = 0\n    get_token = TOKENS_REGEX.match\n    match = get_token(path)\n    while match is not None:\n        type_ = match.lastgroup\n        value = match.group(type_)\n        yield value\n        pos = match.end()\n        match = get_token(path, pos)\n    if pos != len(path):\n        raise ValueError(\"Path {} not consumed, residue: {}\".format(path, path[pos:]))", "docstring": "Lex a field path into tokens (including dots).\n\nArgs:\npath (str): field path to be lexed.\nReturns:\nList(str): tokens", "source": "juraj-google-style"}
{"code": "def __init__(self, shape, dtype, minimum, maximum, name=None):\n    super(BoundedTensorSpec, self).__init__(shape, dtype, name)\n    if minimum is None:\n        raise ValueError('`minimum` can not be None.')\n    if maximum is None:\n        raise ValueError('`maximum` can not be None.')\n    try:\n        minimum_shape = np.shape(minimum)\n        common_shapes.broadcast_shape(tensor_shape.TensorShape(minimum_shape), self.shape)\n    except ValueError as exception:\n        raise ValueError(f'`minimum` {minimum} is not compatible with shape {self.shape}.') from exception\n    try:\n        maximum_shape = np.shape(maximum)\n        common_shapes.broadcast_shape(tensor_shape.TensorShape(maximum_shape), self.shape)\n    except ValueError as exception:\n        raise ValueError(f'`maximum` {maximum} is not compatible with shape {self.shape}.') from exception\n    self._minimum = np.array(minimum, dtype=self.dtype.as_numpy_dtype)\n    self._minimum.setflags(write=False)\n    self._maximum = np.array(maximum, dtype=self.dtype.as_numpy_dtype)\n    self._maximum.setflags(write=False)", "docstring": "Initializes a new `BoundedTensorSpec`.\n\nArgs:\nshape: Value convertible to `tf.TensorShape`. The shape of the tensor.\ndtype: Value convertible to `tf.DType`. The type of the tensor values.\nminimum: Number or sequence specifying the minimum element bounds\n(inclusive). Must be broadcastable to `shape`.\nmaximum: Number or sequence specifying the maximum element bounds\n(inclusive). Must be broadcastable to `shape`.\nname: Optional string containing a semantic name for the corresponding\narray. Defaults to `None`.\n\nRaises:\nValueError: If `minimum` or `maximum` are not provided or not\nbroadcastable to `shape`.\nTypeError: If the shape is not an iterable or if the `dtype` is an invalid\nnumpy dtype.", "source": "github-repos"}
{"code": "def get_many(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> Iterable[T]:\n        \n        pass", "docstring": "Gets a query from the data source, which contains a request for multiple objects.\n\nArgs:\nquery: The query being requested (contains a request for multiple objects).\ncontext: The context for the extraction (mutable).\n\nReturns:\nThe requested objects.", "source": "juraj-google-style"}
{"code": "def Register(self, a, b, migrated_entity):\n    if (a is not None):\n        self.a_merge_map[a] = migrated_entity\n        a._migrated_entity = migrated_entity\n    if (b is not None):\n        self.b_merge_map[b] = migrated_entity\n        b._migrated_entity = migrated_entity", "docstring": "Registers a merge mapping.\n\nIf a and b are both not None, this means that entities a and b were merged\nto produce migrated_entity. If one of a or b are not None, then it means\nit was not merged but simply migrated.\n\nThe effect of a call to register is to update a_merge_map and b_merge_map\naccording to the merge. Also the private attributes _migrated_entity of a\nand b are set to migrated_entity.\n\nArgs:\na: The entity from the old feed or None.\nb: The entity from the new feed or None.\nmigrated_entity: The migrated entity.", "source": "codesearchnet"}
{"code": "def save_summaries_secs(self):\n    return self._save_summaries_secs", "docstring": "Return the delay between summary computations.\n\nReturns:\nA timestamp.", "source": "github-repos"}
{"code": "def maybe_get_static_value(x, dtype=None):\n    if (x is None):\n        return x\n    try:\n        x_ = tf.get_static_value(x)\n    except TypeError:\n        x_ = x\n    if ((x_ is None) or (dtype is None)):\n        return x_\n    return np.array(x_, dtype)", "docstring": "Helper which tries to return a static value.\n\nGiven `x`, extract it's value statically, optionally casting to a specific\ndtype. If this is not possible, None is returned.\n\nArgs:\nx: `Tensor` for which to extract a value statically.\ndtype: Optional dtype to cast to.\n\nReturns:\nStatically inferred value if possible, otherwise None.", "source": "codesearchnet"}
{"code": "def keywords(self):\n    path = self._get_id_path('keywords')\n    response = self._GET(path)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the plot keywords for a specific movie id.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def get(self, key, **ctx_options):\n    \n    options = _make_ctx_options(ctx_options)\n    use_cache = self._use_cache(key, options)\n    if use_cache:\n      self._load_from_cache_if_available(key)\n\n    use_datastore = self._use_datastore(key, options)\n    if (use_datastore and\n        isinstance(self._conn, datastore_rpc.TransactionalConnection)):\n      use_memcache = False\n    else:\n      use_memcache = self._use_memcache(key, options)\n    ns = key.namespace()\n    memcache_deadline = None  \n\n    if use_memcache:\n      mkey = self._memcache_prefix + key.urlsafe()\n      memcache_deadline = self._get_memcache_deadline(options)\n      mvalue = yield self.memcache_get(mkey, for_cas=use_datastore,\n                                       namespace=ns, use_cache=True,\n                                       deadline=memcache_deadline)\n      \n      if use_cache:\n        self._load_from_cache_if_available(key)\n      if mvalue not in (_LOCKED, None):\n        cls = model.Model._lookup_model(key.kind(),\n                                        self._conn.adapter.default_model)\n        pb = entity_pb.EntityProto()\n\n        try:\n          pb.MergePartialFromString(mvalue)\n        except ProtocolBuffer.ProtocolBufferDecodeError:\n          logging.warning('Corrupt memcache entry found '\n                          'with key %s and namespace %s' % (mkey, ns))\n          mvalue = None\n        else:\n          entity = cls._from_pb(pb)\n          \n          entity._key = key\n          if use_cache:\n            \n            self._cache[key] = entity\n          raise tasklets.Return(entity)\n\n      if mvalue is None and use_datastore:\n        yield self.memcache_set(mkey, _LOCKED, time=_LOCK_TIME, namespace=ns,\n                                use_cache=True, deadline=memcache_deadline)\n        yield self.memcache_gets(mkey, namespace=ns, use_cache=True,\n                                 deadline=memcache_deadline)\n\n    if not use_datastore:\n      \n      \n      raise tasklets.Return(None)\n\n    if use_cache:\n      entity = yield self._get_batcher.add_once(key, options)\n    else:\n      entity = yield self._get_batcher.add(key, options)\n\n    if entity is not None:\n      if use_memcache and mvalue != _LOCKED:\n        \n        pbs = entity._to_pb(set_key=False).SerializePartialToString()\n        \n        \n        \n        \n        \n        if len(pbs) <= memcache.MAX_VALUE_SIZE:\n          timeout = self._get_memcache_timeout(key, options)\n          \n          \n          \n          \n          yield self.memcache_cas(mkey, pbs, time=timeout, namespace=ns,\n                                  deadline=memcache_deadline)\n\n    if use_cache:\n      \n      \n      self._cache[key] = entity\n\n    raise tasklets.Return(entity)", "docstring": "Return a Model instance given the entity key.\n\nIt will use the context cache if the cache policy for the given\nkey is enabled.\n\nArgs:\nkey: Key instance.\n**ctx_options: Context options.\n\nReturns:\nA Model instance if the key exists in the datastore; None otherwise.", "source": "juraj-google-style"}
{"code": "def allreduce_grads(all_grads, average):\n    if (get_tf_version_tuple() <= (1, 12)):\n        from tensorflow.contrib import nccl\n    else:\n        from tensorflow.python.ops import nccl_ops as nccl\n    nr_tower = len(all_grads)\n    if (nr_tower == 1):\n        return all_grads\n    new_all_grads = []\n    for grads in zip(*all_grads):\n        summed = nccl.all_sum(grads)\n        grads_for_devices = []\n        for g in summed:\n            with tf.device(g.device):\n                if average:\n                    g = tf.multiply(g, (1.0 / nr_tower))\n            grads_for_devices.append(g)\n        new_all_grads.append(grads_for_devices)\n    ret = list(zip(*new_all_grads))\n    return ret", "docstring": "All-reduce average the gradients among K devices. Results are broadcasted to all devices.\n\nArgs:\nall_grads (K x N): List of list of gradients. N is the number of variables.\naverage (bool): average gradients or not.\n\nReturns:\nK x N: same as input, but each grad is replaced by the average over K devices.", "source": "codesearchnet"}
{"code": "def tensor_dim_to_mesh_dim_size(layout, mesh_shape, tensor_dim):\n  \n  layout_rules = convert_to_layout_rules(layout)\n  mesh_shape = convert_to_shape(mesh_shape)\n  mesh_axis = layout_rules.tensor_dimension_to_mesh_axis(tensor_dim, mesh_shape)\n  if mesh_axis is None:\n    return 1\n  else:\n    return mesh_shape.dims[mesh_axis].size", "docstring": "How many ways does a tensor dimension get split.\n\nThis is used to \"cheat\" when building the mtf graph and peek at how a\ntensor dimension will be split.  Returns 1 if the tensor dimension is not\nsplit.\n\nArgs:\nlayout: an input to convert_to_layout_rules\nmesh_shape: an input to convert_to_shape\ntensor_dim: a Dimension\n\nReturns:\nan integer", "source": "juraj-google-style"}
{"code": "def _MakeSavedModelV1(self, run_params):\n    saved_model_dir = trt_test.TfTrtIntegrationTestBase._MakeSavedModelV1(self, run_params)\n    saved_model_proto = loader_impl.parse_saved_model(saved_model_dir)\n    new_saved_model = saved_model_pb2.SavedModel()\n    new_saved_model.CopyFrom(saved_model_proto)\n    new_meta_graph_def = new_saved_model.meta_graphs[0]\n    for func_def in new_meta_graph_def.graph_def.library.function:\n        func_def.attr['_noinline'].CopyFrom(attr_value_pb2.AttrValue(b=True))\n        self._copy_test_attributes_to_func_def(func_def)\n    old_saved_model_file = os.path.join(saved_model_dir, constants.SAVED_MODEL_FILENAME_PB)\n    if os.path.exists(old_saved_model_file):\n        os.remove(old_saved_model_file)\n    path = os.path.join(compat.as_bytes(saved_model_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))\n    file_io.write_string_to_file(path, new_saved_model.SerializeToString(deterministic=True))\n    return saved_model_dir", "docstring": "Write the saved model as an input for testing.\n\nIn addition to creating a SavedModel like its parent method, this method\nreplaces this SavedModel by adding TF-TRT conversion parameters as function\nattributes to each function in the SavedModel.\n\nArgs:\nrun_params: The current test run parameters.\n\nReturns:\nThe directory of the saved model.", "source": "github-repos"}
{"code": "def _CreateQueryAccessHelper(self):\n    h = CheckAccessHelper('query')\n    h.Allow('aff4:/users/*', self._IsHomeDir)\n    h.Allow('aff4:/cron')\n    h.Allow('aff4:/cron/*')\n    h.Allow('aff4:/hunts')\n    h.Allow('aff4:/hunts/*')\n    h.Allow('aff4:/ACL')\n    h.Allow('aff4:/ACL/*')\n    h.Allow(self.CLIENT_URN_PATTERN)\n    h.Allow((self.CLIENT_URN_PATTERN + '/*'))\n    h.Allow('aff4:/index')\n    h.Allow('aff4:/index/*')\n    h.Allow('aff4:/config')\n    h.Allow('aff4:/config/*')\n    h.Allow('aff4:/flows/*')\n    h.Allow(('aff4:/files/hash/generic/sha256/' + ('[a-z0-9]' * 64)))\n    h.Allow('aff4:/artifact_store')\n    h.Allow('aff4:/artifact_store/*')\n    h.Allow('aff4:/artifact_store')\n    h.Allow('aff4:/artifact_store/*')\n    h.Allow('aff4:/audit/logs')\n    h.Allow('aff4:/audit/logs/*')\n    return h", "docstring": "Creates a CheckAccessHelper for controlling query access.\n\nThis function and _CreateReadAccessHelper essentially define GRR's ACL\npolicy. Please refer to these 2 functions to either review or modify\nGRR's ACLs.\n\nQuery access gives you the ability to find objects in the tree without\nknowing their URN, using ListChildren.  If you grant query access,\nyou will also need read access.\n\nReturns:\nCheckAccessHelper for controlling query access.", "source": "codesearchnet"}
{"code": "def _parse_ports(port_values: dict) -> dict:\n    endpoints = {}\n    for port_element in port_values:\n        target_port = port_element.split(':')\n        for port in target_port:\n            endpoints[int(port)] = int(port)\n    endpoint_spec = docker.types.EndpointSpec(ports=endpoints)\n    return endpoint_spec", "docstring": "Parse ports key.\n\nArgs:\nport_values (dict): ports configuration values\n\nReturns:\ndict, Ports specification which contains exposed ports", "source": "codesearchnet"}
{"code": "class FuzzedExponentialIntervals(object):\n\n    def __init__(self, initial_delay_secs, num_retries, factor=2, fuzz=0.5, max_delay_secs=60 * 60 * 1, stop_after_secs=None):\n        self._initial_delay_secs = initial_delay_secs\n        if num_retries > 10000:\n            raise ValueError('num_retries parameter cannot exceed 10000.')\n        self._num_retries = num_retries\n        self._factor = factor\n        if not 0 <= fuzz <= 1:\n            raise ValueError('fuzz parameter expected to be in [0, 1] range.')\n        self._fuzz = fuzz\n        self._max_delay_secs = max_delay_secs\n        self._stop_after_secs = stop_after_secs\n\n    def __iter__(self):\n        current_delay_secs = min(self._max_delay_secs, self._initial_delay_secs)\n        total_delay_secs = 0\n        for _ in range(self._num_retries):\n            fuzz_multiplier = 1 - self._fuzz + random.random() * self._fuzz\n            delay_secs = current_delay_secs * fuzz_multiplier\n            total_delay_secs += delay_secs\n            if self._stop_after_secs is not None and total_delay_secs > self._stop_after_secs:\n                break\n            yield delay_secs\n            current_delay_secs = min(self._max_delay_secs, current_delay_secs * self._factor)", "docstring": "Iterable for intervals that are exponentially spaced, with fuzzing.\n\nOn iteration, yields retry interval lengths, in seconds. Every iteration over\nthis iterable will yield differently fuzzed interval lengths, as long as fuzz\nis nonzero.\n\nArgs:\ninitial_delay_secs: The delay before the first retry, in seconds.\nnum_retries: The total number of times to retry.\nfactor: The exponential factor to use on subsequent retries.\nDefault is 2 (doubling).\nfuzz: A value between 0 and 1, indicating the fraction of fuzz. For a\ngiven delay d, the fuzzed delay is randomly chosen between\n[(1 - fuzz) * d, d].\nmax_delay_secs: Maximum delay (in seconds). After this limit is reached,\nfurther tries use max_delay_sec instead of exponentially increasing\nthe time. Defaults to 1 hour.\nstop_after_secs: Places a limit on the sum of intervals returned (in\nseconds), such that the sum is <= stop_after_secs. Defaults to disabled\n(None). You may need to increase num_retries to effectively use this\nfeature.", "source": "github-repos"}
{"code": "def text_pb(tag, data, description=None):\n    try:\n        tensor = tensor_util.make_tensor_proto(data, dtype=np.object)\n    except TypeError as e:\n        raise TypeError('tensor must be of type string', e)\n    summary_metadata = metadata.create_summary_metadata(display_name=None, description=description)\n    summary = summary_pb2.Summary()\n    summary.value.add(tag=tag, metadata=summary_metadata, tensor=tensor)\n    return summary", "docstring": "Create a text tf.Summary protobuf.\n\nArguments:\ntag: String tag for the summary.\ndata: A Python bytestring (of type bytes), a Unicode string, or a numpy data\narray of those types.\ndescription: Optional long-form description for this summary, as a `str`.\nMarkdown is supported. Defaults to empty.\n\nRaises:\nTypeError: If the type of the data is unsupported.\n\nReturns:\nA `tf.Summary` protobuf object.", "source": "codesearchnet"}
{"code": "def summarize(self, highlight=None):\n    lines = [RL('Command-line configuration:', 'bold'), RL('')]\n    for name, val in self._config.items():\n        highlight_attr = 'bold' if name == highlight else None\n        line = RL('  ')\n        line += RL(name, ['underline', highlight_attr])\n        line += RL(': ')\n        line += RL(str(val), font_attr=highlight_attr)\n        lines.append(line)\n    return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)", "docstring": "Get a text summary of the config.\n\nArgs:\nhighlight: A property name to highlight in the output.\n\nReturns:\nA `RichTextLines` output.", "source": "github-repos"}
{"code": "class MeanMetricWrapper(Mean):\n\n    def __init__(self, fn, name=None, dtype=None, **kwargs):\n        super().__init__(name=name, dtype=dtype)\n        self._fn = fn\n        self._fn_kwargs = kwargs\n        if self._fn in losses.ALL_OBJECTS or (hasattr(self._fn, '__class__') and self._fn.__class__ in losses.ALL_OBJECTS):\n            self._direction = 'down'\n\n    def update_state(self, y_true, y_pred, sample_weight=None):\n        mask = backend.get_keras_mask(y_pred)\n        values = self._fn(y_true, y_pred, **self._fn_kwargs)\n        if sample_weight is not None and mask is not None:\n            sample_weight = losses.loss.apply_mask(sample_weight, mask, dtype=self.dtype, reduction='sum')\n        return super().update_state(values, sample_weight=sample_weight)\n\n    def get_config(self):\n        base_config = super().get_config()\n        config = {'fn': serialization_lib.serialize_keras_object(self._fn)}\n        config.update(serialization_lib.serialize_keras_object(self._fn_kwargs))\n        return {**base_config, **config}\n\n    @classmethod\n    def from_config(cls, config):\n        if 'fn' in config:\n            config = serialization_lib.deserialize_keras_object(config)\n        return cls(**config)", "docstring": "Wrap a stateless metric function with the `Mean` metric.\n\nYou could use this class to quickly build a mean metric from a function. The\nfunction needs to have the signature `fn(y_true, y_pred)` and return a\nper-sample loss array. `MeanMetricWrapper.result()` will return\nthe average metric value across all samples seen so far.\n\nFor example:\n\n```python\ndef mse(y_true, y_pred):\nreturn (y_true - y_pred) ** 2\n\nmse_metric = MeanMetricWrapper(fn=mse)\n```\n\nArgs:\nfn: The metric function to wrap, with signature\n`fn(y_true, y_pred, **kwargs)`.\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.\n**kwargs: Keyword arguments to pass on to `fn`.", "source": "github-repos"}
{"code": "def _data_format_resolver(data_format, resolver_dict):\n    \n    try:\n        data_format = DataFormat(data_format)\n    except ValueError:\n        supported_formats = ', '.join(\n            [\"'{}'\".format(f.value) for f in DataFormat])\n        raise ValueError((\"'data_format' must be one of {formats}. Given \"\n                          \"'{value}'.\").format(formats=supported_formats,\n                                               value=data_format))\n    return (resolver_dict.get(data_format) or\n            resolver_dict.get(data_format.value))", "docstring": "Resolve a value from :attr:`resolver_dict` based on the\n:attr:`data_format`.\n\nArgs:\ndata_format (:class:`~.DataFormat` or str): The data format;\nmust be a member of :class:`~.DataFormat` or a string\nequivalent.\nresolver_dict (dict): the resolving dict. Can hold any value\nfor any of the valid :attr:`data_format` strings\n\nReturns:\nThe value of the key in :attr:`resolver_dict` that matches\n:attr:`data_format`", "source": "juraj-google-style"}
{"code": "def assemble_config(partition: Partition, manifest: Manifest) -> Config:\n    name, params, out = partition\n    out.kwargs.update(params)\n    out.subsection_name = name\n    location = prepare_target_name(out)\n    user = out.user_id\n    manifest.schedule(out.config_name, out.dataset, out.selection, location, user)\n    logger.info(f'[{name}] Created partition {location!r}.')\n    beam.metrics.Metrics.counter('Subsection', name).inc()\n    return out", "docstring": "Assemble the configuration for a single partition.\n\nFor each cross product of the 'selection' sections, the output dictionary\nwill overwrite parameters from the extra param subsections, evenly cycling\nthrough each subsection.\n\nFor example:\n{ 'parameters': {... 'api_key': KKKKK1, ... }, ... }\n{ 'parameters': {... 'api_key': KKKKK2, ... }, ... }\n{ 'parameters': {... 'api_key': KKKKK3, ... }, ... }\n{ 'parameters': {... 'api_key': KKKKK1, ... }, ... }\n{ 'parameters': {... 'api_key': KKKKK2, ... }, ... }\n{ 'parameters': {... 'api_key': KKKKK3, ... }, ... }\n...\n\nReturns:\nAn `Config` assembled out of subsection parameters and config shards.", "source": "github-repos"}
{"code": "def word_error_rate(ref: Sequence[T], hyp: Sequence[T]) -> float:\n    if (len(ref) == 0):\n        raise EmptyReferenceException('Cannot calculating word error rate against a length 0 reference sequence.')\n    distance = min_edit_distance(ref, hyp)\n    return ((100 * float(distance)) / len(ref))", "docstring": "Calculate the word error rate of a sequence against a reference.\n\nArgs:\nref: The gold-standard reference sequence\nhyp: The hypothesis to be evaluated against the reference.\n\nReturns:\nThe word error rate of the supplied hypothesis with respect to the\nreference string.\n\nRaises:\npersephone.exceptions.EmptyReferenceException: If the length of the reference sequence is 0.", "source": "codesearchnet"}
{"code": "def pars_in_groups(self):\n    pargp = self.par_groups\n    allpars = dict()\n    for cpg in pargp:\n        allpars[cpg] = [i for i in self.parameter_data.loc[((self.parameter_data.pargp == cpg), 'parnme')]]\n    return allpars", "docstring": "return a dictionary of  parameter names in each parameter group.\n\nReturns:\ndictionary", "source": "codesearchnet"}
{"code": "def update_failover_dns_record(env, zone_id, **kwargs):\n    client = boto3.Session(profile_name=env).client('route53')\n    response = {}\n    hosted_zone_info = client.get_hosted_zone(Id=zone_id)\n    zone_name = hosted_zone_info['HostedZone']['Name'].rstrip('.')\n    dns_name = kwargs.get('dns_name')\n    failover_state = kwargs.get('failover_state')\n    if (failover_state.lower() != 'primary'):\n        primary_record = find_existing_record(env, zone_id, dns_name, check_key='Failover', check_value='PRIMARY')\n        if (not primary_record):\n            raise PrimaryDNSRecordNotFound('Primary Failover DNS record not found: {}'.format(dns_name))\n    if (dns_name and dns_name.endswith(zone_name)):\n        dns_json = get_template(template_file='infrastructure/dns_failover_upsert.json.j2', **kwargs)\n        LOG.info('Attempting to create DNS Failover record %s (%s) in Hosted Zone %s (%s)', dns_name, kwargs['elb_aws_dns'], zone_id, zone_name)\n        try:\n            delete_existing_cname(env, zone_id, dns_name)\n            response = client.change_resource_record_sets(HostedZoneId=zone_id, ChangeBatch=json.loads(dns_json))\n            LOG.info('Upserted DNS Failover record %s (%s) in Hosted Zone %s (%s)', dns_name, kwargs['elb_aws_dns'], zone_id, zone_name)\n        except botocore.exceptions.ClientError as error:\n            LOG.info('Error creating DNS Failover record %s (%s) in Hosted Zone %s (%s)', dns_name, kwargs['elb_aws_dns'], zone_id, zone_name)\n            LOG.debug(error)\n    else:\n        LOG.info('Skipping creating DNS record %s in non-matching Hosted Zone %s (%s)', dns_name, zone_id, zone_name)\n    LOG.debug('Route53 JSON Response: \\n%s', pformat(response))", "docstring": "Create a Failover Route53 alias record in _env_ zone.\n\nArgs:\nenv (str): Deployment environment.\nzone_id (str): Route53 zone id.\n\nKeyword Args:\ndns_name (str): FQDN of application's dns entry to add/update.\ndns_ttl (int): DNS time-to-live (ttl)\nelb_aws_dns (str): DNS A Record of ELB from AWS\nelb_dns_zone_id (str): Zone ID of ELB DNS\nfailover_state (str): if the record is primary or secondary\nprimary_region (str): Primary AWS region for DNS", "source": "codesearchnet"}
{"code": "def GetContainingCondContext(ctxt):\n    while ctxt:\n        if ctxt.IsCondContext():\n            return ctxt\n        ctxt = ctxt.outer_context\n    return None", "docstring": "Returns the first ancestor CondContext of `ctxt`.\n\nReturns `ctxt` if `ctxt` is a CondContext, or None if `ctxt` is not in a cond.\n\nArgs:\nctxt: ControlFlowContext\n\nReturns:\n`ctxt` if `ctxt` is a CondContext, the most nested CondContext containing\n`ctxt`, or None if `ctxt` is not in a cond.", "source": "github-repos"}
{"code": "def space(self, newlines=1):\n    space = Space()\n    for line in range(newlines):\n        space.add_line('\\n')\n    self._container.structure.insert(self._idx, space)\n    self._idx += 1\n    return self", "docstring": "Creates a vertical space of newlines\n\nArgs:\nnewlines (int): number of empty lines\n\nReturns:\nself for chaining", "source": "codesearchnet"}
{"code": "def get_figure(new_fig=True, subplot='111', params=None):\n    _get_plt()\n    if new_fig:\n        fig = plt.figure()\n    else:\n        fig = plt.gcf()\n    params = dict_if_none(params)\n    if isinstance(subplot, (tuple, list)):\n        ax = fig.add_subplot(*subplot, **params)\n    else:\n        ax = fig.add_subplot(subplot, **params)\n    return (fig, ax)", "docstring": "Function to be used for viewing - plotting,\nto initialize the matplotlib figure - axes.\n\nArgs:\nnew_fig(bool): Defines if a new figure will be created, if false current figure is used\nsubplot (tuple or matplolib subplot specifier string): Create axes with these parameters\nparams (dict): extra options passed to add_subplot()\n\nReturns:\nMatplotlib Figure and Axes", "source": "codesearchnet"}
{"code": "def _get_descending_key(gettime=time.time):\n    now_descending = int(((_FUTURE_TIME - gettime()) * 100))\n    request_id_hash = os.environ.get('REQUEST_ID_HASH')\n    if (not request_id_hash):\n        request_id_hash = str(random.getrandbits(32))\n    return ('%d%s' % (now_descending, request_id_hash))", "docstring": "Returns a key name lexically ordered by time descending.\n\nThis lets us have a key name for use with Datastore entities which returns\nrows in time descending order when it is scanned in lexically ascending order,\nallowing us to bypass index building for descending indexes.\n\nArgs:\ngettime: Used for testing.\n\nReturns:\nA string with a time descending key.", "source": "codesearchnet"}
{"code": "def clipped_zoom(img, zoom_factor):\n  \n  h = img.shape[0]\n  ch = int(np.ceil(h / float(zoom_factor)))\n  top_h = (h - ch) \n\n  w = img.shape[1]\n  cw = int(np.ceil(w / float(zoom_factor)))\n  top_w = (w - cw) \n\n  img = tfds.core.lazy_imports.scipy.ndimage.zoom(\n      img[top_h:top_h + ch, top_w:top_w + cw], (zoom_factor, zoom_factor, 1),\n      order=1)\n\n  \n  trim_top_h = (img.shape[0] - h) \n  trim_top_w = (img.shape[1] - w) \n\n  return img[trim_top_h:trim_top_h + h, trim_top_w:trim_top_w + w]", "docstring": "Zoom image with clipping.\n\nZoom the central part of the image and clip extra pixels.\n\nArgs:\nimg: numpy array, uncorrupted image.\nzoom_factor: numpy array, a sequence of float numbers for zoom factor.\n\nReturns:\nnumpy array, zoomed image after clipping.", "source": "juraj-google-style"}
{"code": "def get_torch_dataloader(self):\n    raise NotImplementedError", "docstring": "Get a Torch `DataLoader` for the `DataAdapter`.\n\nReturns:\nA Torch `DataLoader`.", "source": "github-repos"}
{"code": "def str_to_etree(xml_str, encoding='utf-8'):\n    \n    parser = xml.etree.ElementTree.XMLParser(encoding=encoding)\n    return xml.etree.ElementTree.fromstring(xml_str, parser=parser)", "docstring": "Deserialize API XML doc to an ElementTree.\n\nArgs:\nxml_str: bytes\nDataONE API XML doc\n\nencoding: str\nDecoder to use when converting the XML doc ``bytes`` to a Unicode str.\n\nReturns:\nElementTree: Matching the API version of the XML doc.", "source": "juraj-google-style"}
{"code": "def __init__(self, dtensor_components: Tuple[tensor.Tensor], global_element_spec: tensor_spec.TensorSpec, layouts: Any):\n    [self._iterator_resource_dtensor] = dtensor_components\n    self._global_element_spec = global_element_spec\n    self._layouts = layouts\n    self._layouts_str = nest.map_structure(lambda layout: layout.to_string(), layouts)\n    super().__init__(components=dtensor_components, element_spec=global_element_spec)", "docstring": "Initializes a distributed iterator for DTensor datasets.\n\nThis iterator encapsulates tf.data iterators for the underlying devices, and\ntreats it as a packed DTensor of iterator resource tensors.\n\nArgs:\ndtensor_components: a tuple containing the underlying iterator resources\npacked into a DTensor. This is expected to be a tuple with a single\nelement.\nglobal_element_spec: the underlying dataset's element spec from a global\nview.\nlayouts: a structure of DTensor layouts to be applied to the elements\nreturned by the underlying iterators. This can be a single layout or\n(possibly nested) tuples or dictionaries of layouts, and the structure\nmust match the structure of the iterator elements.", "source": "github-repos"}
{"code": "def dict_take(dict_, keys, default=util_const.NoParam):\n    if (default is util_const.NoParam):\n        for key in keys:\n            (yield dict_[key])\n    else:\n        for key in keys:\n            (yield dict_.get(key, default))", "docstring": "r\"\"\"\nGenerates values from a dictionary\n\nArgs:\ndict_ (Mapping): a dictionary to take from\nkeys (Iterable): the keys to take\ndefault (object, optional): if specified uses default if keys are missing\n\nCommandLine:\npython -m ubelt.util_dict dict_take_gen\n\nExample:\n>>> import ubelt as ub\n>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}\n>>> keys = [1, 2, 3, 4, 5]\n>>> result = list(ub.dict_take(dict_, keys, None))\n>>> assert result == ['a', 'b', 'c', None, None]\n\nExample:\n>>> import ubelt as ub\n>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}\n>>> keys = [1, 2, 3, 4, 5]\n>>> try:\n>>>     print(list(ub.dict_take(dict_, keys)))\n>>>     raise AssertionError('did not get key error')\n>>> except KeyError:\n>>>     print('correctly got key error')", "source": "codesearchnet"}
{"code": "def Log(self, format_str, *args):\n    format_str = utils.SmartUnicode(format_str)\n    status = format_str\n    if args:\n        try:\n            status = (format_str % args)\n        except TypeError:\n            logging.error('Tried to log a format string with the wrong number of arguments: %s', format_str)\n    logging.info('%s: %s', self.session_id, status)\n    self.context.status = utils.SmartUnicode(status)\n    log_entry = rdf_flows.FlowLog(client_id=None, urn=self.session_id, flow_name=self.hunt_obj.__class__.__name__, log_message=status)\n    logs_collection_urn = self.hunt_obj.logs_collection_urn\n    with data_store.DB.GetMutationPool() as pool:\n        grr_collections.LogCollection.StaticAdd(logs_collection_urn, log_entry, mutation_pool=pool)", "docstring": "Logs the message using the hunt's standard logging.\n\nArgs:\nformat_str: Format string\n*args: arguments to the format string\n\nRaises:\nRuntimeError: on parent missing logs_collection", "source": "codesearchnet"}
{"code": "def sell(self, product_id, order_type, **kwargs):\n    return self.place_order(product_id, 'sell', order_type, **kwargs)", "docstring": "Place a sell order.\n\nThis is included to maintain backwards compatibility with older versions\nof cbpro-Python. For maximum support from docstrings and function\nsignatures see the order type-specific functions place_limit_order,\nplace_market_order, and place_stop_order.\n\nArgs:\nproduct_id (str): Product to order (eg. 'BTC-USD')\norder_type (str): Order type ('limit', 'market', or 'stop')\n**kwargs: Additional arguments can be specified for different order\ntypes.\n\nReturns:\ndict: Order details. See `place_order` for example.", "source": "codesearchnet"}
{"code": "def __init__(self, getter, verbose=False):\n    \n    self._count = 0\n    self._getter = getter\n    self._verbose = verbose", "docstring": "Initializes a contextual switch for a custom getter.\n\nArgs:\ngetter: The custom getter which we may want to switch on.\nverbose: Log out every time a variable is fetched, and whether or not\n`getter` is used.\n\nReturns:\nA custom getter which can also be used as a context manager.\nEntering the context enables the custom getter.", "source": "juraj-google-style"}
{"code": "def get_request_data(self, path, action, body=None):\n        \n        body = body or ''\n        path_name, path_spec = self.get_path_spec(path)\n        response = {}\n\n        \n        if path_spec is not None and action in path_spec.keys():\n            for status_code in path_spec[action]['responses'].keys():\n                resp = path_spec[action]['responses'][status_code]\n                try:\n                    response[int(status_code)] = self.get_response_example(resp)\n                except ValueError:\n                    response[status_code] = self.get_response_example(resp)\n\n        \n        if response == {}:\n            response[400] = ''\n        return response", "docstring": "Get the default data and status code of the given path + action request.\n\nArgs:\npath: path of the request.\naction: action of the request(get, post, delete...)\nbody: body sent, used to sent it back for post request.\n\nReturns:\nA tuple with the default response data and status code\nIn case of default status_code, use 0", "source": "juraj-google-style"}
{"code": "def authenticate(self, user, password):\n        \n\n        request = Request(AUTH_URL)\n        request.add_header('X-Simperium-API-Key', API_KEY)\n        if sys.version_info < (3, 3):\n            request.add_data(json.dumps({'username': user, 'password': password}))\n        else:\n            request.data = json.dumps({'username': user, 'password': password}).encode()\n        try:\n            res = urllib2.urlopen(request).read()\n            token = json.loads(res.decode('utf-8'))[\"access_token\"]\n        except HTTPError:\n            raise SimplenoteLoginFailed('Login to Simplenote API failed!')\n        except IOError: \n            token = None\n        return token", "docstring": "Method to get simplenote auth token\n\nArguments:\n- user (string):     simplenote email address\n- password (string): simplenote password\n\nReturns:\nSimplenote API token as string", "source": "juraj-google-style"}
{"code": "def get_file_diff(tree, files_to_diff):\n    \n    \n    config.LOGGER.info(\"\\nChecking if files exist on Kolibri Studio...\")\n    file_diff = tree.get_file_diff(files_to_diff)\n    return file_diff", "docstring": "get_file_diff: Download files from nodes\nArgs:\ntree (ChannelManager): manager to handle communication to Kolibri Studio\nReturns: list of files that are not on Kolibri Studio", "source": "juraj-google-style"}
{"code": "def read_samples(self, sr=None, offset=0, duration=None):\n    with self.container.open_if_needed(mode='r') as cnt:\n        (samples, native_sr) = cnt.get(self.key)\n        start_sample_index = int((offset * native_sr))\n        if (duration is None):\n            end_sample_index = samples.shape[0]\n        else:\n            end_sample_index = int(((offset + duration) * native_sr))\n        samples = samples[start_sample_index:end_sample_index]\n        if ((sr is not None) and (sr != native_sr)):\n            samples = librosa.core.resample(samples, native_sr, sr, res_type='kaiser_best')\n        return samples", "docstring": "Return the samples from the track in the container.\nUses librosa for resampling, if needed.\n\nArgs:\nsr (int): If ``None``, uses the sampling rate given by the file,\notherwise resamples to the given sampling rate.\noffset (float): The time in seconds, from where to start reading\nthe samples (rel. to the file start).\nduration (float): The length of the samples to read in seconds.\n\nReturns:\nnp.ndarray: A numpy array containing the samples as a\nfloating point (numpy.float32) time series.", "source": "codesearchnet"}
{"code": "def is_tensor_final(self, tensor_name):\n    \n    tensor = self._name_to_tensor(tensor_name)\n    return tensor in self._final_tensors", "docstring": "Whether a tensor is a final output of the computation.\n\nArgs:\ntensor_name: a string, name of a tensor in the graph.\n\nReturns:\na boolean indicating whether the tensor was a final output.", "source": "juraj-google-style"}
{"code": "def numeric_function_clean_dataframe(self, axis):\n    result = None\n    query_compiler = self\n    if ((not axis) and (len(self.index) == 0)):\n        result = pandas.Series(dtype=np.int64)\n    nonnumeric = [col for (col, dtype) in zip(self.columns, self.dtypes) if (not is_numeric_dtype(dtype))]\n    if (len(nonnumeric) == len(self.columns)):\n        if axis:\n            result = pandas.Series([np.nan for _ in self.index])\n        else:\n            result = pandas.Series([0 for _ in self.index])\n    else:\n        query_compiler = self.drop(columns=nonnumeric)\n    return (result, query_compiler)", "docstring": "Preprocesses numeric functions to clean dataframe and pick numeric indices.\n\nArgs:\naxis: '0' if columns and '1' if rows.\n\nReturns:\nTuple with return value(if any), indices to apply func to & cleaned Manager.", "source": "codesearchnet"}
{"code": "def execute(self, asm_instr):\n    self.ir_emulator.registers[self.ip] = (asm_instr.address + asm_instr.size)\n    if self.arch_info.instr_is_syscall(asm_instr):\n        raise Syscall()\n    return self.__execute(asm_instr)", "docstring": "Execute an assembler instruction.\n\nArgs:\nasm_instr (X86Instruction): A instruction to execute.\n\nReturns:\nA int. The address of the next instruction to execute.", "source": "codesearchnet"}
{"code": "def get_member_slackuid(self, slack):\n        \n        members = self.__con__.search_s(\n            CSHMember.__ldap_user_ou__,\n            ldap.SCOPE_SUBTREE,\n            \"(slackuid=%s)\" % slack,\n            ['ipaUniqueID'])\n        if members:\n            return CSHMember(\n                    self,\n                    members[0][1]['ipaUniqueID'][0].decode('utf-8'),\n                    False)\n        return None", "docstring": "Get a CSHMember object.\n\nArguments:\nslack -- the Slack UID of the member\n\nReturns:\nNone if the Slack UID provided does not correspond to a CSH Member", "source": "juraj-google-style"}
{"code": "def detect(self, text):\n    t = text.encode('utf-8')\n    (reliable, index, top_3_choices) = cld2.detect(t, bestEffort=False)\n    if (not reliable):\n        self.reliable = False\n        (reliable, index, top_3_choices) = cld2.detect(t, bestEffort=True)\n        if (not self.quiet):\n            if (not reliable):\n                raise UnknownLanguage('Try passing a longer snippet of text')\n            else:\n                logger.warning('Detector is not able to detect the language reliably.')\n    self.languages = [Language(x) for x in top_3_choices]\n    self.language = self.languages[0]\n    return self.language", "docstring": "Decide which language is used to write the text.\n\nThe method tries first to detect the language with high reliability. If\nthat is not possible, the method switches to best effort strategy.\n\n\nArgs:\ntext (string): A snippet of text, the longer it is the more reliable we\ncan detect the language used to write the text.", "source": "codesearchnet"}
{"code": "def created(cls, data=None):\n    if cls.expose_status:\n        cls.response.content_type = 'application/json'\n        cls.response._status_line = '201 Created'\n    return cls(201, data=data).to_json", "docstring": "Shortcut API for HTTP 201 `Created` response.\n\nArgs:\ndata (object): Response key/value data.\n\nReturns:\nWSResponse Instance.", "source": "codesearchnet"}
{"code": "def search_users(self, user):\n        \n        user_url = \"%s/%s/%s\" % (self.url, \"user\", user)\n        response = self.jss.get(user_url)\n        return LDAPUsersResults(self.jss, response)", "docstring": "Search for LDAP users.\n\nArgs:\nuser: User to search for. It is not entirely clear how the\nJSS determines the results- are regexes allowed, or\nglobbing?\n\nReturns:\nLDAPUsersResult object.\n\nRaises:\nWill raise a JSSGetError if no results are found.", "source": "juraj-google-style"}
{"code": "def create_volume(self, name=None, driver=None, driver_opts=None, labels=None):\n    url = self._url('/volumes/create')\n    if ((driver_opts is not None) and (not isinstance(driver_opts, dict))):\n        raise TypeError('driver_opts must be a dictionary')\n    data = {'Name': name, 'Driver': driver, 'DriverOpts': driver_opts}\n    if (labels is not None):\n        if (utils.compare_version('1.23', self._version) < 0):\n            raise errors.InvalidVersion('volume labels were introduced in API 1.23')\n        if (not isinstance(labels, dict)):\n            raise TypeError('labels must be a dictionary')\n        data['Labels'] = labels\n    return self._result(self._post_json(url, data=data), True)", "docstring": "Create and register a named volume\n\nArgs:\nname (str): Name of the volume\ndriver (str): Name of the driver used to create the volume\ndriver_opts (dict): Driver options as a key-value dictionary\nlabels (dict): Labels to set on the volume\n\nReturns:\n(dict): The created volume reference object\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.\n\nExample:\n\n>>> volume = cli.create_volume(name='foobar', driver='local',\ndriver_opts={'foo': 'bar', 'baz': 'false'},\nlabels={\"key\": \"value\"})\n>>> print(volume)\n{u'Driver': u'local',\nu'Labels': {u'key': u'value'},\nu'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',\nu'Name': u'foobar',\nu'Scope': u'local'}", "source": "codesearchnet"}
{"code": "def add(TargetGroup, NewMember, Config=None, Args=None):\n  r\n  Member = Task(NewMember, Args or {}, Config or {}) if isfunction(NewMember) else Group(NewMember, Config or {})\n  ParentMembers = TargetGroup.__ec_member__.Members\n\n  ParentMembers[Member.Config['name']] = Member\n\n  alias = Member.Config.get('alias')\n\n  if alias:\n    ParentMembers[alias] = Member", "docstring": "r\"\"\"Adds members to an existing group.\n\nArgs:\nTargetGroup (Group): The target group for the addition.\nNewMember (Group / Task): The member to be added.\nConfig (dict): The config for the member.\nArgs (OrderedDict): ArgConfig for the NewMember, if it's a task (optional).", "source": "juraj-google-style"}
{"code": "def blocksearch(block, name):\n    \n    if hasattr(block, 'tokens'):\n        for b in block.tokens[1]:\n            b = (b if hasattr(b, 'raw') and b.raw() == name else blocksearch(\n                b, name))\n            if b:\n                return b\n    return False", "docstring": "Recursive search for name in block (inner blocks)\nArgs:\nname (str): search term\nReturns:\nBlock OR False", "source": "juraj-google-style"}
{"code": "def print_all_configs(configs, missing, warning):\n    print_text = ''\n    llen = 65\n    for i, row in enumerate(configs):\n        if i != 0:\n            print_text += '-' * llen + '\\n'\n        if isinstance(row[1], list):\n            val = ', '.join(row[1])\n        else:\n            val = row[1]\n        print_text += ' {: <28}'.format(row[0]) + '    {: <25}'.format(val) + '\\n'\n    print_text += '=' * llen\n    print('\\n\\n {: ^32}    {: ^25}'.format('Configuration(s)', 'Detected value(s)'))\n    print('=' * llen)\n    print(print_text)\n    if missing:\n        print('\\n * ERROR: The following configurations are missing:')\n        for m in missing:\n            print('   ', *m)\n    if warning:\n        print('\\n * WARNING: The following configurations could cause issues:')\n        for w in warning:\n            print('   ', *w)\n    if not missing and (not warning):\n        print('\\n * INFO: Successfully found all configurations.')\n    print('\\n')", "docstring": "Prints the status and info on all configurations in a table format.\n\nArgs:\nconfigs: List of all configurations found.\nmissing: List of all configurations that are missing.\nwarning: List of all configurations found with warnings.", "source": "github-repos"}
{"code": "def matches(node, pattern):\n    if isinstance(pattern, str):\n        pattern = parser.parse_str(pattern)\n    matcher = PatternMatcher(pattern)\n    matcher.visit(node)\n    return matcher.matches", "docstring": "Basic pattern matcher for AST.\n\nThe pattern may contain wildcards represented by the symbol '_'. A node\nmatches a pattern if for every node in the tree, either there is a node of\nthe same type in pattern, or a Name node with id='_'.\n\nArgs:\nnode: ast.AST\npattern: ast.AST\nReturns:\nbool", "source": "github-repos"}
{"code": "def _set_auditpol_data(option, value):\n    \n    auditpol_values = {'None': 'No Auditing',\n                       '0': 'No Auditing',\n                       '1': 'Success',\n                       '2': 'Failure',\n                       '3': 'Success and Failure'}\n    defaults = _get_audit_defaults(option)\n    return __utils__['auditpol.set_setting'](\n        name=defaults['Auditpol Name'],\n        value=auditpol_values[value])", "docstring": "Helper function that updates the current applied settings to match what has\njust been set in the audit.csv files. We're doing it this way instead of\nrunning `gpupdate`\n\nArgs:\noption (str): The name of the option to set\nvalue (str): The value to set. ['None', '0', '1', '2', '3']\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``", "source": "juraj-google-style"}
{"code": "def xml(self, xml):\n    self._request.xml = xml\n    self.add_matcher(matcher('XMLMatcher', xml))", "docstring": "Defines a XML body value to match.\n\nArguments:\nxml (str|regex): body XML to match.\n\nReturns:\nself: current Mock instance.", "source": "codesearchnet"}
{"code": "def __init__(self, saved_model_dir, saved_model_tags, saved_model_exported_names, experimental_debug_info_func=None):\n    super(TFLiteSavedModelConverter, self).__init__(experimental_debug_info_func)\n    self.saved_model_dir = saved_model_dir\n    self._saved_model_tags = saved_model_tags\n    self._saved_model_exported_names = saved_model_exported_names\n    if len(self._saved_model_exported_names) != 1:\n        raise ValueError('Only supports a single signature key.')\n    signature_key = self._saved_model_exported_names[0]\n    result = _freeze_saved_model(self.saved_model_dir, None, None, None, self._saved_model_tags, signature_key)\n    self._graph_def = result[0]\n    self._input_tensors = result[1]\n    self._output_tensors = result[2]\n    self._parse_saved_model_args()", "docstring": "Constructor for TFLiteConverter.\n\nArgs:\nsaved_model_dir: Directory of the SavedModel.\nsaved_model_tags: Set of tags identifying the MetaGraphDef within the\nSavedModel to analyze. All tags in the tag set must be present. (default\n{tf.saved_model.SERVING}).\nsaved_model_exported_names: Names to be exported when the saved model\nimport path is on.\nexperimental_debug_info_func: An experimental function to retrieve the\ngraph debug info for a set of nodes from the `graph_def`.\n\nRaises:\nValueError: Invalid arguments.", "source": "github-repos"}
{"code": "class PatchTSMixerBlock(nn.Module):\n\n    def __init__(self, config: PatchTSMixerConfig):\n        super().__init__()\n        num_layers = config.num_layers\n        self.mixers = nn.ModuleList([PatchTSMixerLayer(config=config) for _ in range(num_layers)])\n\n    def forward(self, hidden_state, output_hidden_states: bool=False):\n        \n        all_hidden_states = []\n        embedding = hidden_state\n        for mod in self.mixers:\n            embedding = mod(embedding)\n            if output_hidden_states:\n                all_hidden_states.append(embedding)\n        if output_hidden_states:\n            return (embedding, all_hidden_states)\n        else:\n            return (embedding, None)", "docstring": "The main computing framework of the `PatchTSMixer` model.\n\nArgs:\nconfig (`PatchTSMixerConfig`):\nConfiguration.", "source": "github-repos"}
{"code": "def from_text_vision_configs(cls, text_config: XCLIPTextConfig, vision_config: XCLIPVisionConfig, **kwargs):\n    return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`XCLIPConfig`] (or a derived class) from xclip text model configuration and xclip vision model\nconfiguration.\n\nReturns:\n[`XCLIPConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def dimension_values(self, dimension, expanded=True, flat=True):\n        \n        index = self.get_dimension_index(dimension)\n        if index == 0:\n            return np.array([self.x])\n        elif index == 1:\n            return np.array([self.y])\n        else:\n            return super(Arrow, self).dimension_values(dimension)", "docstring": "Return the values along the requested dimension.\n\nArgs:\ndimension: The dimension to return values for\nexpanded (bool, optional): Whether to expand values\nflat (bool, optional): Whether to flatten array\n\nReturns:\nNumPy array of values along the requested dimension", "source": "juraj-google-style"}
{"code": "def from_string(key_pem, is_x509_cert):\n        \n        key_pem = _helpers._to_bytes(key_pem)\n        if is_x509_cert:\n            pubkey = crypto.load_certificate(crypto.FILETYPE_PEM, key_pem)\n        else:\n            pubkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem)\n        return OpenSSLVerifier(pubkey)", "docstring": "Construct a Verified instance from a string.\n\nArgs:\nkey_pem: string, public key in PEM format.\nis_x509_cert: bool, True if key_pem is an X509 cert, otherwise it\nis expected to be an RSA key in PEM format.\n\nReturns:\nVerifier instance.\n\nRaises:\nOpenSSL.crypto.Error: if the key_pem can't be parsed.", "source": "juraj-google-style"}
{"code": "def write(self, data):\n    self._check_open()\n    if (not isinstance(data, str)):\n        raise TypeError(('Expected str but got %s.' % type(data)))\n    if (not data):\n        return\n    self._buffer.append(data)\n    self._buffered += len(data)\n    self._offset += len(data)\n    if (self._buffered >= self._flushsize):\n        self._flush()", "docstring": "Write some bytes.\n\nArgs:\ndata: data to write. str.\n\nRaises:\nTypeError: if data is not of type str.", "source": "codesearchnet"}
{"code": "def pcoll_to_pcoll_id(pipeline, original_context):\n\n    class PCollVisitor(PipelineVisitor):\n        \n\n        def __init__(self):\n            self.pcoll_to_pcoll_id = {}\n\n        def enter_composite_transform(self, transform_node):\n            self.visit_transform(transform_node)\n\n        def visit_transform(self, transform_node):\n            for pcoll in transform_node.outputs.values():\n                self.pcoll_to_pcoll_id[str(pcoll)] = original_context.pcollections.get_id(pcoll)\n    v = PCollVisitor()\n    pipeline.visit(v)\n    return v.pcoll_to_pcoll_id", "docstring": "Returns a dict mapping PCollections string to PCollection IDs.\n\nUsing a PipelineVisitor to iterate over every node in the pipeline,\nrecords the mapping from PCollections to PCollections IDs. This mapping\nwill be used to query cached PCollections.\n\nReturns:\n(dict from str to str) a dict mapping str(pcoll) to pcoll_id.", "source": "github-repos"}
{"code": "def get_single_upstream_artifact_full_path(context, task_id, path):\n    return os.path.abspath(os.path.join(context.config['work_dir'], 'cot', task_id, path))", "docstring": "Return the full path where an upstream artifact should be located.\n\nArtifact may not exist. If you want to be sure if does, use\n``get_and_check_single_upstream_artifact_full_path()`` instead.\n\nThis function is mainly used to move artifacts to the expected location.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\ntask_id (str): the task id of the task that published the artifact\npath (str): the relative path of the artifact\n\nReturns:\nstr: absolute path to the artifact should be.", "source": "codesearchnet"}
{"code": "def is_lambda(fun):\n    return (isinstance(fun, type(LAMBDA)) and (fun.__name__ == LAMBDA.__name__))", "docstring": "Check whether the given function is a lambda function.\n\n.. testsetup::\n\nfrom proso.func import is_lambda\n\n.. testcode::\n\ndef not_lambda_fun():\nreturn 1\n\nlambda_fun = lambda: 1\n\nprint(\nis_lambda(not_lambda_fun),\nis_lambda(lambda_fun)\n)\n.. testoutput::\n\nFalse True\n\nArgs:\nfun (function)\n\nReturns:\nbool: True if the given function is a lambda function, False otherwise", "source": "codesearchnet"}
{"code": "def upload_files(tree, file_diff):\n    \n    \n    config.LOGGER.info(\"\\nUploading {0} new file(s) to Kolibri Studio...\".format(len(file_diff)))\n    tree.upload_files(file_diff)\n    tree.reattempt_upload_fails()\n    return file_diff", "docstring": "upload_files: Upload files to Kolibri Studio\nArgs:\ntree (ChannelManager): manager to handle communication to Kolibri Studio\nfile_diff ([str]): list of files to upload\nReturns: None", "source": "juraj-google-style"}
{"code": "def validate_task_schema(context, schema_key='schema_file'):\n    \n    schema_path = context.config\n    schema_keys = schema_key.split('.')\n    for key in schema_keys:\n        schema_path = schema_path[key]\n\n    task_schema = load_json_or_yaml(schema_path, is_path=True)\n    log.debug('Task is validated against this schema: {}'.format(task_schema))\n\n    try:\n        validate_json_schema(context.task, task_schema)\n    except ScriptWorkerTaskException as e:\n        raise TaskVerificationError('Cannot validate task against schema. Task: {}.'.format(context.task)) from e", "docstring": "Validate the task definition.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context. It must contain a task and\nthe config pointing to the schema file\nschema_key: the key in `context.config` where the path to the schema file is. Key can contain\ndots (e.g.: 'schema_files.file_a'), in which case\n\nRaises:\nTaskVerificationError: if the task doesn't match the schema", "source": "juraj-google-style"}
{"code": "def _philox_scramble_seed(seed):\n    key = constant_op.constant([163851598941452064], dtypes.uint64)\n    counter = math_ops.cast(seed, dtypes.uint64)\n    mix = gen_stateless_random_ops_v2.stateless_random_uniform_full_int_v2([4], key=key, counter=counter, dtype=dtypes.uint32, alg=Algorithm.PHILOX.value)\n    key = array_ops.reshape(_uint32s_to_uint64(mix[:2]), [1])\n    counter = array_ops_stack.stack([0, _uint32s_to_uint64(mix[2:])], axis=0)\n    return (key, counter)", "docstring": "Determines the key and counter for Philox PRNG with the given seed.\n\nArgs:\nseed: An integer tensor of shape [2]. The seed to calculate the key and\ncounter from.\n\nReturns:\nA pair (key, counter) suitable for V2 stateless RNG ops like\n`StatelessRandomUniformV2`.", "source": "github-repos"}
{"code": "def getenv(key, value=None):\n    key = path2fsn(key)\n    if (is_win and PY2):\n        return environ.get(key, value)\n    return os.getenv(key, value)", "docstring": "Like `os.getenv` but returns unicode under Windows + Python 2\n\nArgs:\nkey (pathlike): The env var to get\nvalue (object): The value to return if the env var does not exist\nReturns:\n`fsnative` or `object`:\nThe env var or the passed value if it doesn't exist", "source": "codesearchnet"}
{"code": "def composition_prediction(self, composition, to_this_composition=True):\n    preds = self.list_prediction(list(composition.keys()), to_this_composition)\n    output = []\n    for p in preds:\n        if to_this_composition:\n            subs = {v: k for (k, v) in p['substitutions'].items()}\n        else:\n            subs = p['substitutions']\n        charge = 0\n        for (k, v) in composition.items():\n            charge += (subs[k].oxi_state * v)\n        if (abs(charge) < 1e-08):\n            output.append(p)\n    logging.info('{} charge balanced substitutions found'.format(len(output)))\n    return output", "docstring": "Returns charged balanced substitutions from a starting or ending\ncomposition.\n\nArgs:\ncomposition:\nstarting or ending composition\nto_this_composition:\nIf true, substitutions with this as a final composition\nwill be found. If false, substitutions with this as a\nstarting composition will be found (these are slightly\ndifferent)\n\nReturns:\nList of predictions in the form of dictionaries.\nIf to_this_composition is true, the values of the dictionary\nwill be from the list species. If false, the keys will be\nfrom that list.", "source": "codesearchnet"}
{"code": "def GenerateMemoryReport(metagraph, detailed_report=True, cluster=None):\n    if cluster is None:\n        cluster = gcluster.Cluster(disable_detailed_stats=True, disable_timeline=True)\n    item = gitem.Item(metagraph)\n    peak_usage = cluster.DeterminePeakMemoryUsage(item)\n    report = ''\n    for device, snapshot in peak_usage.items():\n        peak_usage = snapshot[0]\n        report += 'Peak usage for device ' + device + ': ' + str(peak_usage) + ' bytes\\n'\n        if detailed_report:\n            live_tensors = snapshot[1]\n            for tensor in live_tensors:\n                op_name = tensor[0]\n                output_id = tensor[1]\n                mem_used = tensor[2]\n                report += '  ' + str(op_name) + ':' + str(output_id) + ' uses ' + str(mem_used) + ' bytes\\n'\n    return report", "docstring": "Analyze the peak memory usage for the provided metagraph.\n\nArgs:\nmetagraph: A TensorFlow MetaGraphDef.\ndetailed_report: print the live tensors in addition to the peak memory\nusage.\ncluster: Analyze the memory using the specified cluster, or the local\nmachine if no cluster was specified.\n\nReturns:\nA string with the formatted memory usage.", "source": "github-repos"}
{"code": "def match_tokens(ast_tokens, ast_types):\n    ast_final_types = ([ast.Module, ast.Expr] + ast_types)\n    return all((isinstance(ast_token, ast_type) for (ast_token, ast_type) in zip(ast_tokens, ast_final_types)))", "docstring": "Verify that each token in order does match the expected types.\n\nThe list provided by `get_tokens` does have three more elements\nat the beginning of the list which should be always the same\nfor a condition (Module and Expr). Those are automatically\nadded first to the final list of expected types so you don't have\nto specify it yourself each time.\n\n>>> tokens = Condition.get_tokens('2 == 3')\n>>> Condition.match_tokens(tokens, [ast.Compare, ast.Num, ast.Eq, ast.Num])\nTrue\n\nArgs:\nast_entries (list): list of AST tokens parsers previously.\nast_types (list): list of expected AST types.\n\nReturns:\nbool: when all tokes match the expected types", "source": "codesearchnet"}
{"code": "def remove_from_queue(self, index):\n        \n        \n        updid = '0'\n        objid = 'Q:0/' + str(index + 1)\n        self.avTransport.RemoveTrackFromQueue([\n            ('InstanceID', 0),\n            ('ObjectID', objid),\n            ('UpdateID', updid),\n        ])", "docstring": "Remove a track from the queue by index. The index number is\nrequired as an argument, where the first index is 0.\n\nArgs:\nindex (int): The (0-based) index of the track to remove", "source": "juraj-google-style"}
{"code": "def _check_stop(self):\n    return False", "docstring": "Hook for subclasses to provide their own stop condition.\n\nReturns:\nTrue if the session should stop, False otherwise.", "source": "github-repos"}
{"code": "def encode_csv(data_dict, column_names):\n    import csv\n    import six\n    values = [str(data_dict[x]) for x in column_names]\n    str_buff = six.StringIO()\n    writer = csv.writer(str_buff, lineterminator='')\n    writer.writerow(values)\n    return str_buff.getvalue()", "docstring": "Builds a csv string.\n\nArgs:\ndata_dict: dict of {column_name: 1 value}\ncolumn_names: list of column names\n\nReturns:\nA csv string version of data_dict", "source": "codesearchnet"}
{"code": "def delete(self, response_choice=1, async=False, callback=None):\n        \n        return self._manage_child_object(nurest_object=self, method=HTTP_METHOD_DELETE, async=async, callback=callback, response_choice=response_choice)", "docstring": "Delete object and call given callback in case of call.\n\nArgs:\nresponse_choice (int): Automatically send a response choice when confirmation is needed\nasync (bool): Boolean to make an asynchronous call. Default is False\ncallback (function): Callback method that will be triggered in case of asynchronous call\n\nExample:\n>>> entity.delete() # will delete the enterprise from the server", "source": "juraj-google-style"}
{"code": "def __get_distribution_tags(self, client, arn):\n        \n        return {\n            t['Key']: t['Value'] for t in client.list_tags_for_resource(\n            Resource=arn\n        )['Tags']['Items']\n        }", "docstring": "Returns a dict containing the tags for a CloudFront distribution\n\nArgs:\nclient (botocore.client.CloudFront): Boto3 CloudFront client object\narn (str): ARN of the distribution to get tags for\n\nReturns:\n`dict`", "source": "juraj-google-style"}
{"code": "def strip_el_text(el, max_depth=0, cur_depth=0):\n    \n    \n    el_text = strip_str(el.text if el.text is not None else \"\")\n\n    if cur_depth < max_depth:\n        for child in el:\n            el_text += \" \"+strip_el_text(child, max_depth=max_depth, cur_depth=cur_depth+1)\n    else:\n        \n        children = list(el)\n        if children is not None and len(children) > 0:\n            if children[-1].tail is not None:\n                el_text += \" \"+strip_str(children[-1].tail)\n\n    \n    if cur_depth > 0:\n        \n        if el.tail is not None:\n            el_text += \" \"+strip_str(el.tail)\n\n    return strip_str(el_text)", "docstring": "Recursively strips the plain text out of the given XML etree element up to the desired depth.\n\nArgs:\nel: The etree element to scan.\nmax_depth: The depth to which to recursively strip text (default: 0).\ncur_depth: The current recursive depth to which we've scanned so far.\n\nReturns:\nThe stripped, plain text from within the element.", "source": "juraj-google-style"}
{"code": "def save_pretrained(self, save_directory: Union[str, os.PathLike], config_file_name: Optional[Union[str, os.PathLike]]=None, push_to_hub: bool=False, **kwargs):\n    try:\n        self.validate(strict=True)\n    except ValueError as exc:\n        raise ValueError(str(exc) + '\\n\\nFix these issues to save the configuration.')\n    use_auth_token = kwargs.pop('use_auth_token', None)\n    if use_auth_token is not None:\n        warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)\n        if kwargs.get('token', None) is not None:\n            raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')\n        kwargs['token'] = use_auth_token\n    config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME\n    if os.path.isfile(save_directory):\n        raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file')\n    os.makedirs(save_directory, exist_ok=True)\n    if push_to_hub:\n        commit_message = kwargs.pop('commit_message', None)\n        repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1])\n        repo_id = self._create_repo(repo_id, **kwargs)\n        files_timestamps = self._get_files_timestamps(save_directory)\n    output_config_file = os.path.join(save_directory, config_file_name)\n    self.to_json_file(output_config_file, use_diff=True)\n    logger.info(f'Configuration saved in {output_config_file}')\n    if push_to_hub:\n        self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get('token'))", "docstring": "Save a generation configuration object to the directory `save_directory`, so that it can be re-loaded using the\n[`~GenerationConfig.from_pretrained`] class method.\n\nArgs:\nsave_directory (`str` or `os.PathLike`):\nDirectory where the configuration JSON file will be saved (will be created if it does not exist).\nconfig_file_name (`str` or `os.PathLike`, *optional*, defaults to `\"generation_config.json\"`):\nName of the generation configuration JSON file to be saved in `save_directory`.\npush_to_hub (`bool`, *optional*, defaults to `False`):\nWhether or not to push your model to the Hugging Face model hub after saving it. You can specify the\nrepository you want to push to with `repo_id` (will default to the name of `save_directory` in your\nnamespace).\nkwargs (`Dict[str, Any]`, *optional*):\nAdditional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.", "source": "github-repos"}
{"code": "def StartsWith(this, that):\n    this_iter = iter(this)\n    that_iter = iter(that)\n    while True:\n        try:\n            this_value = next(that_iter)\n        except StopIteration:\n            return True\n        try:\n            that_value = next(this_iter)\n        except StopIteration:\n            return False\n        if (this_value != that_value):\n            return False", "docstring": "Checks whether an items of one iterable are a prefix of another.\n\nArgs:\nthis: An iterable that needs to be checked.\nthat: An iterable of which items must match the prefix of `this`.\n\nReturns:\n`True` if `that` is a prefix of `this`, `False` otherwise.", "source": "codesearchnet"}
{"code": "def draw(self):\n    for (age, level) in enumerate(self.tree.get_branches()):\n        if (age in self.ages):\n            thickness = self._get_thickness(age)\n            color = self._get_color(age)\n            for branch in level:\n                self._draw_branch(branch, color, thickness, age)", "docstring": "Draws the tree.\n\nArgs:\nages (array): Contains the ages you want to draw.", "source": "codesearchnet"}
{"code": "def get_user_information(self):\n    \n    url = \"https:\n\n    headers = self.__gen_headers()\n    headers[\"Content-Type\"] = \"application/json\"\n\n    r = requests.get(url, headers=headers)\n    return r.json()", "docstring": "Gets the current user information, including sensor ID\n\nArgs:\nNone\n\nReturns:\ndictionary object containing information about the current user", "source": "juraj-google-style"}
{"code": "def save(obj, filename, protocol=4):\n    \n\n    with open(filename, 'wb') as f:\n        pickle.dump(obj, f, protocol=protocol)", "docstring": "Serialize an object to disk using pickle protocol.\n\nArgs:\nobj: The object to serialize.\nfilename: Path to the output file.\nprotocol: Version of the pickle protocol.", "source": "juraj-google-style"}
{"code": "def bin(values, bins, labels=None):\n    \n    bins = np.asarray(bins)\n    if labels is None:\n        labels = (bins[:-1] + np.diff(bins)/2.)\n    else:\n        labels = np.asarray(labels)\n    dtype = 'float' if labels.dtype.kind == 'f' else 'O'\n    binned = np.full_like(values, (np.nan if dtype == 'f' else None), dtype=dtype)\n    for lower, upper, label in zip(bins[:-1], bins[1:], labels):\n        condition = (values > lower) & (values <= upper)\n        binned[np.where(condition)[0]] = label\n    return binned", "docstring": "Bins data into declared bins\n\nBins data into declared bins. By default each bin is labelled\nwith bin center values but an explicit list of bin labels may be\ndefined.\n\nArgs:\nvalues: Array of values to be binned\nbins: List or array containing the bin boundaries\nlabels: List of labels to assign to each bin\nIf the bins are length N the labels should be length N-1\n\nReturns:\nArray of binned values", "source": "juraj-google-style"}
{"code": "def __init__(self, module, dropout, weights=['weight_hh_l0']):\n        \n        super().__init__()\n        self.module,self.weights,self.dropout = module,weights,dropout\n        self._setup()", "docstring": "Default constructor for the WeightDrop module\n\nArgs:\nmodule (torch.nn.Module): A pytorch layer being wrapped\ndropout (float): a dropout value to apply\nweights (list(str)): the parameters of the wrapped **module**\nwhich should be fractionally dropped.", "source": "juraj-google-style"}
{"code": "def has_permission(self, perm):\n        \n        return self.user.superuser or self.auth.has_permission(perm)", "docstring": "Checks if current user (or role) has the given permission.\n\nArgs:\nperm: Permmission code or object.\nDepends on the :attr:`~zengine.auth.auth_backend.AuthBackend` implementation.\n\nReturns:\nBoolean.", "source": "juraj-google-style"}
{"code": "def get_angle(self, i: int, j: int, k: int) -> float:\n    v1 = (self[i].coords - self[j].coords)\n    v2 = (self[k].coords - self[j].coords)\n    return get_angle(v1, v2, units='degrees')", "docstring": "Returns angle specified by three sites.\n\nArgs:\ni: Index of first site.\nj: Index of second site.\nk: Index of third site.\n\nReturns:\nAngle in degrees.", "source": "codesearchnet"}
{"code": "def __init__(self, data_type_definition):\n    \n    super(DataTypeMap, self).__init__()\n    self._data_type_definition = data_type_definition", "docstring": "Initializes a data type map.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\n\nRaises:\nFormatError: if the data type map cannot be determined from the data\ntype definition.", "source": "juraj-google-style"}
{"code": "def send_message_for_lane_change(sender, **kwargs):\n    current = kwargs['current']\n    owners = kwargs['possible_owners']\n    if ('lane_change_invite' in current.task_data):\n        msg_context = current.task_data.pop('lane_change_invite')\n    else:\n        msg_context = DEFAULT_LANE_CHANGE_INVITE_MSG\n    wfi = WFCache(current).get_instance()\n    TaskInvitation.objects.filter(instance=wfi, role=current.role, wf_name=wfi.wf.name).delete()\n    today = datetime.today()\n    for recipient in owners:\n        inv = TaskInvitation(instance=wfi, role=recipient, wf_name=wfi.wf.name, progress=30, start_date=today, finish_date=(today + timedelta(15)))\n        inv.title = (current.task_data.get('INVITATION_TITLE') or wfi.wf.title)\n        inv.save()\n        try:\n            recipient.send_notification(title=msg_context['title'], message=('%s %s' % (wfi.wf.title, msg_context['body'])), typ=1, url='', sender=sender)\n        except:\n            pass", "docstring": "Sends a message to possible owners of the current workflows\nnext lane.\n\nArgs:\n**kwargs: ``current`` and ``possible_owners`` are required.\nsender (User): User object", "source": "codesearchnet"}
{"code": "def add_case(self, case, update=False):\n    existing_case = self.case(case)\n    if (existing_case and (not update)):\n        raise CaseError('Case {} already exists'.format(case['case_id']))\n    if existing_case:\n        self.db.case.find_one_and_replace({'case_id': case['case_id']}, case)\n    else:\n        self.db.case.insert_one(case)\n    return case", "docstring": "Add a case to the case collection\n\nIf the case exists and update is False raise error.\n\nArgs:\ndb (MongoClient): A connection to the mongodb\ncase (dict): A case dictionary\nupdate(bool): If existing case should be updated\n\nReturns:\nmongo_case_id(ObjectId)", "source": "codesearchnet"}
{"code": "def as_list(self):\n    if (self._dims is None):\n        raise ValueError('as_list() is not defined on an unknown TensorShape.')\n    return [dim.value for dim in self._dims]", "docstring": "Returns a list of integers or `None` for each dimension.\n\nReturns:\nA list of integers or `None` for each dimension.\n\nRaises:\nValueError: If `self` is an unknown shape with an unknown rank.", "source": "codesearchnet"}
{"code": "def show_plot(plot, width=PREVIEW_WIDTH, height=PREVIEW_HEIGHT):\n    \n    return SVG(data=plot_to_svg(plot, width, height))", "docstring": "Preview a plot in a jupyter notebook.\n\nArgs:\nplot (list): the plot to display (list of layers)\nwidth (int): the width of the preview\nheight (int): the height of the preview\n\nReturns:\nAn object that renders in Jupyter as the provided plot", "source": "juraj-google-style"}
{"code": "def convert_compartment_entry(self, compartment, adjacencies):\n    d = OrderedDict()\n    d['id'] = compartment.id\n    if (adjacencies is not None):\n        d['adjacent_to'] = adjacencies\n    order = {key: i for (i, key) in enumerate(['name'])}\n    prop_keys = set(compartment.properties)\n    for prop in sorted(prop_keys, key=(lambda x: (order.get(x, 1000), x))):\n        if (compartment.properties[prop] is not None):\n            d[prop] = compartment.properties[prop]\n    return d", "docstring": "Convert compartment entry to YAML dict.\n\nArgs:\ncompartment: :class:`psamm.datasource.entry.CompartmentEntry`.\nadjacencies: Sequence of IDs or a single ID of adjacent\ncompartments (or None).", "source": "codesearchnet"}
{"code": "def job_tasks(self, job_name):\n    try:\n        job = self._cluster_spec[job_name]\n    except KeyError:\n        raise ValueError('No such job in cluster: %r' % job_name)\n    ret = [None for _ in range(max(job.keys()) + 1)]\n    for i, task in job.items():\n        ret[i] = task\n    return ret", "docstring": "Returns a mapping from task ID to address in the given job.\n\nNOTE: For backwards compatibility, this method returns a list. If\nthe given job was defined with a sparse set of task indices, the\nlength of this list may not reflect the number of tasks defined in\nthis job. Use the `tf.train.ClusterSpec.num_tasks` method\nto find the number of tasks defined in a particular job.\n\nArgs:\njob_name: The string name of a job in this cluster.\n\nReturns:\nA list of task addresses, where the index in the list\ncorresponds to the task index of each task. The list may contain\n`None` if the job was defined with a sparse set of task indices.\n\nRaises:\nValueError: If `job_name` does not name a job in this cluster.", "source": "github-repos"}
{"code": "def AFF4Path(self, client_urn):\n    if (not self.HasField('pathtype')):\n        raise ValueError(\"Can't determine AFF4 path without a valid pathtype.\")\n    first_component = self[0]\n    dev = first_component.path\n    if first_component.HasField('offset'):\n        dev += ':{}'.format((first_component.offset \n    if ((len(self) > 1) and (first_component.pathtype == PathSpec.PathType.OS) and (self[1].pathtype == PathSpec.PathType.TSK)):\n        result = [self.AFF4_PREFIXES[PathSpec.PathType.TSK], dev]\n        start = 1\n    else:\n        result = [self.AFF4_PREFIXES[first_component.pathtype]]\n        start = 0\n    for p in self[start]:\n        component = p.path\n        if p.HasField('offset'):\n            component += ':{}'.format((p.offset \n        if p.HasField('stream_name'):\n            component += (':' + p.stream_name)\n        result.append(component)\n    return client_urn.Add('/'.join(result))", "docstring": "Returns the AFF4 URN this pathspec will be stored under.\n\nArgs:\nclient_urn: A ClientURN.\n\nReturns:\nA urn that corresponds to this pathspec.\n\nRaises:\nValueError: If pathspec is not of the correct type.", "source": "codesearchnet"}
{"code": "def _determine_hpp_url(self, platform, action):\n        \n        base_uri = settings.BASE_HPP_URL.format(platform)\n        service = action + '.shtml'\n        result = '/'.join([base_uri, service])\n        return result", "docstring": "This returns the Adyen HPP endpoint based on the provided platform,\nand action.\n\nArgs:\nplatform (str): Adyen platform, ie 'live' or 'test'.\naction (str):   the HPP action to perform.\npossible actions: select, pay, skipDetails, directory", "source": "juraj-google-style"}
{"code": "def isfunc(x):\n    return any([(inspect.isfunction(x) and (not asyncio.iscoroutinefunction(x))), (inspect.ismethod(x) and (not asyncio.iscoroutinefunction(x)))])", "docstring": "Returns `True` if the given value is a function or method object.\n\nArguments:\nx (mixed): value to check.\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def bruteVersionStr(self, valu):\n    try:\n        (valu, info) = self.core.model.type('it:semver').norm(valu)\n        subs = info.get('subs')\n        return (valu, subs)\n    except s_exc.BadTypeValu:\n        subs = s_version.parseVersionParts(valu)\n        if (subs is None):\n            raise s_exc.BadTypeValu(valu=valu, name='bruteVersionStr', mesg='Unable to brute force version parts out of the string')\n        if subs:\n            valu = s_version.packVersion(subs.get('major'), subs.get('minor', 0), subs.get('patch', 0))\n            return (valu, subs)", "docstring": "Brute force the version out of a string.\n\nArgs:\nvalu (str): String to attempt to get version information for.\n\nNotes:\nThis first attempts to parse strings using the it:semver normalization\nbefore attempting to extract version parts out of the string.\n\nReturns:\nint, dict: The system normalized version integer and a subs dictionary.", "source": "codesearchnet"}
{"code": "def download(url):\n    \n    filepath = get_file(fname='tmp.zip', origin=url, extract=True)\n    base_dir = os.path.dirname(filepath)\n    weights_file = os.path.join(base_dir, 'weights.h5')\n    params_file = os.path.join(base_dir, 'params.json')\n    preprocessor_file = os.path.join(base_dir, 'preprocessor.pickle')\n\n    return weights_file, params_file, preprocessor_file", "docstring": "Download a trained weights, config and preprocessor.\n\nArgs:\nurl (str): target url.", "source": "juraj-google-style"}
{"code": "def supports_default_grad(t):\n    if t.dtype == dtypes.resource:\n        handle_data = resource_variable_ops.get_eager_safe_handle_data(t)\n        if handle_data is None or not handle_data.is_set or len(handle_data.shape_and_type) != 1:\n            return False\n    return True", "docstring": "Whether tensor `t` supports creating a default gradient.\n\nThis function assumes that `t` is of a trainable type.\n\nArgs:\nt: Tensor\n\nReturns:\nBool", "source": "github-repos"}
{"code": "def process_module(self, mod_info, mod_ast):\n    module_name = mod_info.module_name\n    module = Module(module_name, mod_info.filename, mod_ast)\n    self._resolver.allow_singletons = False\n    module.ast = self._resolver.resolve_builtin_types(module.ast)\n    self._modules[module_name] = module\n    try:\n        self._resolver.allow_singletons = True\n        module.ast = self._resolve_external_and_local_types(module.ast)\n        module.ast = self._resolver.resolve_builtin_types(module.ast)\n        self._resolver.allow_singletons = False\n        module.ast = module.ast.Visit(visitors.AdjustTypeParameters())\n        module_map = {'': module.ast, module_name: module.ast}\n        module.ast.Visit(visitors.FillInLocalPointers(module_map))\n    except:\n        del self._modules[module_name]\n        raise\n    if module_name:\n        self.add_module_prefixes(module_name)\n    return module.ast", "docstring": "Create a module from a loaded ast and save it to the loader cache.\n\nArgs:\nmod_info: The metadata of the module being imported.\nmod_ast: The pytd.TypeDeclUnit representing the module.\n\nReturns:\nThe ast (pytd.TypeDeclUnit) as represented in this loader.", "source": "github-repos"}
{"code": "def round_to_nearest(dt, n_round_sec=1.0):\n    ts = (ts_from_dt(strip_timezone(dt)) + (n_round_sec / 2.0))\n    res = dt_from_ts((ts - (ts % n_round_sec)))\n    return res.replace(tzinfo=dt.tzinfo)", "docstring": "Round datetime up or down to nearest divisor.\n\nRound datetime up or down to nearest number of seconds that divides evenly by\nthe divisor.\n\nAny timezone is preserved but ignored in the rounding.\n\nArgs:\ndt: datetime\n\nn_round_sec : int or float\nDivisor for rounding\n\nExamples:\n- ``n_round_sec`` = 0.1: nearest 10th of a second.\n- ``n_round_sec`` = 1: nearest second.\n- ``n_round_sec`` = 30: nearest half minute.", "source": "codesearchnet"}
{"code": "def simplify_exprs(exprs, result_type, stop_term, skip_term):\n    expr_set = set()\n    for e in exprs:\n        if e is stop_term:\n            return stop_term\n        elif e is skip_term:\n            continue\n        elif isinstance(e, result_type):\n            expr_set = expr_set.union(e.exprs)\n        else:\n            expr_set.add(e)\n    if len(expr_set) > 1:\n        return result_type(expr_set)\n    elif expr_set:\n        return expr_set.pop()\n    else:\n        return skip_term", "docstring": "Simplify a set of subexpressions for a conjunction or disjunction.\n\nArgs:\nexprs: An iterable. The subexpressions.\nresult_type: _And or _Or. The type of result (unless it simplifies down to\nsomething simpler).\nstop_term: FALSE for _And, TRUE for _Or. If this term is encountered, it\nwill be immediately returned.\nskip_term: TRUE for _And, FALSE for _Or. If this term is encountered, it\nwill be ignored.\n\nReturns:\nA BooleanTerm.", "source": "github-repos"}
{"code": "def add_bonds(self, neighbors, center, color=None, opacity=None, radius=0.1):\n    points = vtk.vtkPoints()\n    points.InsertPoint(0, center.x, center.y, center.z)\n    n = len(neighbors)\n    lines = vtk.vtkCellArray()\n    for i in range(n):\n        points.InsertPoint((i + 1), neighbors[i].coords)\n        lines.InsertNextCell(2)\n        lines.InsertCellPoint(0)\n        lines.InsertCellPoint((i + 1))\n    pd = vtk.vtkPolyData()\n    pd.SetPoints(points)\n    pd.SetLines(lines)\n    tube = vtk.vtkTubeFilter()\n    if (vtk.VTK_MAJOR_VERSION <= 5):\n        tube.SetInputConnection(pd.GetProducerPort())\n    else:\n        tube.SetInputData(pd)\n    tube.SetRadius(radius)\n    mapper = vtk.vtkPolyDataMapper()\n    mapper.SetInputConnection(tube.GetOutputPort())\n    actor = vtk.vtkActor()\n    actor.SetMapper(mapper)\n    if (opacity is not None):\n        actor.GetProperty().SetOpacity(opacity)\n    if (color is not None):\n        actor.GetProperty().SetColor(color)\n    self.ren.AddActor(actor)", "docstring": "Adds bonds for a site.\n\nArgs:\nneighbors: Neighbors of the site.\ncenter: The site in the center for all bonds.\ncolor: Color of the tubes representing the bonds\nopacity: Opacity of the tubes representing the bonds\nradius: Radius of tube s representing the bonds", "source": "codesearchnet"}
{"code": "def _indexOfEndTag(istack):\n    \n    if len(istack) <= 0:\n        return 0\n\n    if not istack[0].isOpeningTag():\n        return 0\n\n    cnt = 0\n    opener = istack[0]\n    for index, el in enumerate(istack[1:]):\n        if el.isOpeningTag() and \\\n           el.getTagName().lower() == opener.getTagName().lower():\n            cnt += 1\n\n        elif el.isEndTagTo(opener):\n            if cnt == 0:\n                return index + 1\n\n            cnt -= 1\n\n    return 0", "docstring": "Go through `istack` and search endtag. Element at first index is considered\nas opening tag.\n\nArgs:\nistack (list): List of :class:`.HTMLElement` objects.\n\nReturns:\nint: Index of end tag or 0 if not found.", "source": "juraj-google-style"}
{"code": "def ListFileEntries(self, base_path_specs, output_writer):\n    for base_path_spec in base_path_specs:\n        file_system = resolver.Resolver.OpenFileSystem(base_path_spec)\n        file_entry = resolver.Resolver.OpenFileEntry(base_path_spec)\n        if (file_entry is None):\n            logging.warning('Unable to open base path specification:\\n{0:s}'.format(base_path_spec.comparable))\n            return\n        self._ListFileEntry(file_system, file_entry, '', output_writer)", "docstring": "Lists file entries in the base path specification.\n\nArgs:\nbase_path_specs (list[dfvfs.PathSpec]): source path specification.\noutput_writer (StdoutWriter): output writer.", "source": "codesearchnet"}
{"code": "def _kl_gamma_gamma(g0, g1, name=None):\n    with ops.name_scope(name, 'kl_gamma_gamma', values=[g0.concentration, g0.rate, g1.concentration, g1.rate]):\n        return (g0.concentration - g1.concentration) * math_ops.digamma(g0.concentration) + math_ops.lgamma(g1.concentration) - math_ops.lgamma(g0.concentration) + g1.concentration * math_ops.log(g0.rate) - g1.concentration * math_ops.log(g1.rate) + g0.concentration * (g1.rate / g0.rate - 1.0)", "docstring": "Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma.\n\nArgs:\ng0: instance of a Gamma distribution object.\ng1: instance of a Gamma distribution object.\nname: (optional) Name to use for created operations.\nDefault is \"kl_gamma_gamma\".\n\nReturns:\nkl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).", "source": "github-repos"}
{"code": "def long_description():\n    cwd = os.path.abspath(os.path.dirname(__file__))\n    readme_path = os.path.join(cwd, 'README.md')\n    if (not os.path.exists(readme_path)):\n        return pylink.__long_description__\n    try:\n        import pypandoc\n        return pypandoc.convert(readme_path, 'rst')\n    except (IOError, ImportError):\n        pass\n    return open(readme_path, 'r').read()", "docstring": "Reads and returns the contents of the README.\n\nOn failure, returns the project long description.\n\nReturns:\nThe project's long description.", "source": "codesearchnet"}
{"code": "def elevation(self, value=0.0):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `elevation`'.format(value))\n            if value < -1000.0:\n                raise ValueError('value need to be greater or equal -1000.0 '\n                                 'for field `elevation`')\n            if value >= 9999.9:\n                raise ValueError('value need to be smaller 9999.9 '\n                                 'for field `elevation`')\n\n        self._elevation = value", "docstring": "Corresponds to IDD Field `elevation`\n\nArgs:\nvalue (float): value for IDD Field `elevation`\nUnit: m\nDefault value: 0.0\nvalue >= -1000.0\nvalue < 9999.9\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def _format_line(headers, fields):\n    assert (len(fields) == len(headers)), (fields, headers)\n    fields = [(('%2.4f' % field) if isinstance(field, float) else str(field)) for field in fields]\n    return '  '.join((((' ' * max(0, (len(header) - len(field)))) + field) for (header, field) in zip(headers, fields)))", "docstring": "Format a line of a table.\n\nArguments:\nheaders: A list of strings that are used as the table headers.\nfields: A list of the same length as `headers` where `fields[i]` is\nthe entry for `headers[i]` in this row. Elements can be of\narbitrary types. Pass `headers` to print the header row.\n\nReturns:\nA pretty string.", "source": "codesearchnet"}
{"code": "def __recognize_union(self, node: yaml.Node, expected_type: Type) -> RecResult:\n    logger.debug('Recognizing as a union')\n    recognized_types = []\n    message = ''\n    union_types = generic_type_args(expected_type)\n    logger.debug('Union types {}'.format(union_types))\n    for possible_type in union_types:\n        (recognized_type, msg) = self.recognize(node, possible_type)\n        if (len(recognized_type) == 0):\n            message += msg\n        recognized_types.extend(recognized_type)\n    recognized_types = list(set(recognized_types))\n    if ((bool in recognized_types) and (bool_union_fix in recognized_types)):\n        recognized_types.remove(bool_union_fix)\n    if (len(recognized_types) == 0):\n        return (recognized_types, message)\n    elif (len(recognized_types) > 1):\n        message = '{}{}Could not determine which of the following types this is: {}'.format(node.start_mark, os.linesep, recognized_types)\n        return (recognized_types, message)\n    return (recognized_types, '')", "docstring": "Recognize a node that we expect to be one of a union of types.\n\nArgs:\nnode: The node to recognize.\nexpected_type: Union[...something...]\n\nReturns:\nThe specific type that was recognized, multiple, or none.", "source": "codesearchnet"}
{"code": "def _postprocess_flat_outputs(outputs):\n    if outputs is None:\n        outputs = tuple()\n    if not isinstance(outputs, collections_abc.Sequence):\n        outputs = (outputs,)\n    outputs += (control_flow_ops.no_op(),)\n    try:\n        outputs = [o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o) for o in outputs]\n    except Exception as e:\n        raise ValueError('XLA computation function return values must all either be Operations or convertible to Tensors. Got error: \"%s\"' % str(e))\n    output_operations = [o for o in outputs if isinstance(o, ops.Operation)]\n    output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)]\n    if outputs != output_tensors + output_operations:\n        raise ValueError('XLA computation function must return zero or more Tensor values followed by zero or more Operations.')\n    new_output_tensors = []\n    for t in output_tensors:\n        with ops.device(t.device if t.device else ''):\n            new_output_tensors.append(array_ops.identity(t))\n    return (new_output_tensors, output_operations)", "docstring": "Validates flat outputs and adds back device assignments.\n\nArgs:\noutputs: Output from `computation` inside `xla.compile`.\n\nReturns:\nTensors and Operations extracted from outputs.", "source": "github-repos"}
{"code": "def _MaxPoolGrad(self, orig_input, orig_output, grad, window_rows, window_cols, row_stride, col_stride, padding, v2):\n    pool_func = gen_nn_ops.max_pool_grad_v2 if v2 else gen_nn_ops.max_pool_grad\n    if v2:\n        return pool_func(orig_input, orig_output, grad, [1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1], padding)\n    else:\n        padding, explicit_paddings = nn_ops.convert_padding(padding)\n        return pool_func(orig_input, orig_output, grad, [1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1], padding, explicit_paddings)", "docstring": "Max Pooling Gradient.\n\nArgs:\norig_input: A float Tensor. The original input tensor.\norig_output: A float Tensor. The original output tensor.\ngrad: A float Tensor.\nThe 4D (batch x rows x cols x depth) output backprop.\nwindow_rows: integer. Kernel size along rows dimension.\nwindow_cols: integer. Kernel size along cols dimension.\nrow_stride: integer. Stride along rows dimension\ncol_stride: integer. Stride along cols dimension\npadding: PoolingOpDef.Padding.  Padding type.\n\nReturns:\nA Tensor.", "source": "github-repos"}
{"code": "def localize(dt, force_to_local=True):\n  \n  if not isinstance(dt, datetime_tz):\n    if not dt.tzinfo:\n      return datetime_tz(dt, tzinfo=localtz())\n    dt = datetime_tz(dt)\n  if force_to_local:\n    return dt.astimezone(localtz())\n  return dt", "docstring": "Localize a datetime to the local timezone.\n\nIf dt is naive, returns the same datetime with the local timezone, otherwise\nuses astimezone to convert.\n\nArgs:\ndt: datetime object.\nforce_to_local: Force all results to be in local time.\n\nReturns:\nA datetime_tz object.", "source": "juraj-google-style"}
{"code": "def execute_tests(self):\n    with open(self.notebook_path, 'r') as nb_f:\n        nb = nbformat.read(nb_f, as_version=4)\n    ExecutePreprocessor.timeout = self.timeout_secs\n    ep = ExecutePreprocessor(allow_errors=True)\n    exec_nb, _ = ep.preprocess(nb, {'metadata': {'path': self.dir + '/'}})\n    test_count = 0\n    error_count = 0\n    errors = OrderedDict()\n    code_cells = {}\n    for cell in exec_nb['cells']:\n        if cell['cell_type'] == 'code':\n            code_cells[cell['execution_count']] = cell\n    for cell_num in sorted(self.tests.keys()):\n        if cell_num not in code_cells:\n            test_count += 1\n            error_count += 1\n            errors[cell_num, '', ''] = 'Given cell does not exist.'\n        else:\n            cell = code_cells[cell_num]\n            for test in self.tests[cell_num]:\n                cls, setup = list(test.items())[0]\n                test_count += 1\n                try:\n                    getattr(sys.modules['testlib'], cls)(setup).check(cell)\n                except Exception as e:\n                    error_count += 1\n                    errors[cell_num, cls, setup] = str(e)\n    return (test_count, error_count, errors)", "docstring": "Executes notebook and compares to test spec.\n\nReturns:\n# of tests, # of errors, error_dict\nwhere error_dict maps (cell number, test class, expected output) to string", "source": "github-repos"}
{"code": "def lineitem_patch_v1(config, auth, patch, li):\n    return API_DV360(config, auth).advertisers().lineItems().patch(advertiserId=li['advertiserId'], lineItemId=li['lineItemId'], updateMask=patch, body=li).execute()", "docstring": "Patches a DV360 Line Item\n\nArgs:\nauth: StarThinker authentication scheme\npatch: List of field names to patch\nli: Line item with updates to push\nReturns: Updated Line Item", "source": "github-repos"}
{"code": "def update_nsval(\n        self, *, nsval: str = None, ns: str = None, val: str = None\n    ) -> None:\n        \n\n        if not (ns and val) and nsval:\n            (ns, val) = nsval.split(\":\", 1)\n        elif not (ns and val) and not nsval:\n            log.error(\"Did not update NSArg - no ns:val or nsval provided\")\n\n        self.namespace = ns\n        self.value = val", "docstring": "Update Namespace and valueast.\n\nArgs:\nnsval: e.g. HGNC:AKT1\nns: namespace\nval: value of entity", "source": "juraj-google-style"}
{"code": "def closure(self, rules):\n    closure = set()\n    todo = set(rules)\n    while todo:\n        rule = todo.pop()\n        closure.add(rule)\n        if rule.at_end:\n            continue\n        symbol = rule.rhs[rule.pos]\n        for production in self.nonterminals[symbol]:\n            for first in self.first(rule.rest):\n                if (EPSILON in production.rhs):\n                    new_rule = DottedRule(production, 1, first)\n                else:\n                    new_rule = DottedRule(production, 0, first)\n                if (new_rule not in closure):\n                    todo.add(new_rule)\n    return frozenset(closure)", "docstring": "Fills out the entire closure based on some initial dotted rules.\n\nArgs:\nrules - an iterable of DottedRules\n\nReturns: frozenset of DottedRules", "source": "codesearchnet"}
{"code": "def gradient(poly):\n    return differential(poly, chaospy.poly.collection.basis(1, 1, poly.dim))", "docstring": "Gradient of a polynomial.\n\nArgs:\npoly (Poly) : polynomial to take gradient of.\n\nReturns:\n(Poly) : The resulting gradient.\n\nExamples:\n>>> q0, q1, q2 = chaospy.variable(3)\n>>> poly = 2*q0 + q1*q2\n>>> print(chaospy.gradient(poly))\n[2, q2, q1]", "source": "codesearchnet"}
{"code": "def update_profiles(adapter):\n\n    \n\n    for case in adapter.cases():\n\n        \n        \n        if case.get('profile_path'):\n\n            profiles = get_profiles(adapter, case['profile_path'])\n            profiled_individuals = deepcopy(case['individuals'])\n\n            for individual in profiled_individuals:\n                ind_id = individual['ind_id']\n                try:\n                    profile = profiles[ind_id]\n                    individual['profile'] = profile\n\n                except KeyError:\n                    LOG.warning(f\"sample IDs in vcf does not match for case {case['case_id']}\")\n\n            updated_case = deepcopy(case)\n\n            updated_case['individuals'] = profiled_individuals\n\n            adapter.add_case(updated_case, update=True)", "docstring": "For all cases having vcf_path, update the profile string for the samples\n\nArgs:\nadapter (MongoAdapter): Adapter to mongodb", "source": "juraj-google-style"}
{"code": "def plot_pie(self, key=\"wall_time\", minfract=0.05, **kwargs):\n        \n        timers = self.timers()\n        n = len(timers)\n\n        \n        import matplotlib.pyplot as plt\n        from matplotlib.gridspec import GridSpec\n        fig = plt.gcf()\n        gspec = GridSpec(n, 1)\n        for idx, timer in enumerate(timers):\n            ax = plt.subplot(gspec[idx, 0])\n            ax.set_title(str(timer))\n            timer.pie(ax=ax, key=key, minfract=minfract, show=False)\n\n        return fig", "docstring": "Plot pie charts of the different timers.\n\nArgs:\nkey: Keyword used to extract data from timers.\nminfract: Don't show sections whose relative weight is less that minfract.\n\nReturns:\n`matplotlib` figure", "source": "juraj-google-style"}
{"code": "def _raise_if_annotated(self, func):\n    if (hasattr(func, ANNOTATED) and getattr(func, ANNOTATED)):\n        msg = 'Functions decorated with {!r} should not be decorated with {!r}.\\nPlease reverse the order of the decorators!'.format(self.__class__.__name__, Annotate.__name__)\n        raise TypeError(msg)", "docstring": "Raise TypeError if a function is decorated with Annotate, as such\nfunctions cause visual bugs when decorated with Animate.\n\nAnimate should be wrapped by Annotate instead.\n\nArgs:\nfunc (function): Any callable.\nRaises:\nTypeError", "source": "codesearchnet"}
{"code": "def __init__(self, mol):\n        \n        if isinstance(mol, Molecule):\n            if not mol.is_ordered:\n                raise ValueError(\"OpenBabel Molecule only supports ordered \"\n                                 \"molecules.\")\n\n            \n            \n            \n            \n            obmol = ob.OBMol()\n            obmol.BeginModify()\n            for site in mol:\n                coords = [c for c in site.coords]\n                atomno = site.specie.Z\n                obatom = ob.OBAtom()\n                obatom.thisown = 0\n                obatom.SetAtomicNum(atomno)\n                obatom.SetVector(*coords)\n                obmol.AddAtom(obatom)\n                del obatom\n            obmol.ConnectTheDots()\n            obmol.PerceiveBondOrders()\n            obmol.SetTotalSpinMultiplicity(mol.spin_multiplicity)\n            obmol.SetTotalCharge(mol.charge)\n            obmol.Center()\n            obmol.Kekulize()\n            obmol.EndModify()\n            self._obmol = obmol\n        elif isinstance(mol, ob.OBMol):\n            self._obmol = mol", "docstring": "Initializes with pymatgen Molecule or OpenBabel\"s OBMol.\n\nArgs:\nmol: pymatgen's Molecule or OpenBabel OBMol", "source": "juraj-google-style"}
{"code": "def find_elements(driver, elem_path, by=CSS, timeout=TIMEOUT, poll_frequency=0.5):\n    wait = WebDriverWait(driver, timeout, poll_frequency)\n    return wait.until(EC.presence_of_all_elements_located((by, elem_path)))", "docstring": "Find and return all elements once located\n\nfind_elements locates all elements on the page, waiting\nfor up to timeout seconds. The elements, when located,\nare returned. If not located, a TimeoutException is raised.\n\nArgs:\ndriver (selenium webdriver or element): A driver or element\nelem_path (str): String used to located the element\nby (selenium By): Selenium By reference\ntimeout (int): Selenium Wait timeout, in seconds\npoll_frequency (float): Selenium Wait polling frequency, in seconds\n\nReturns:\nlist of elements: Selenium element\n\nRaises:\nTimeoutException: Raised when target element isn't located", "source": "codesearchnet"}
{"code": "def verify_signature(amazon_cert: crypto.X509, signature: str, request_body: bytes) -> bool:\n    signature = base64.b64decode(signature)\n    try:\n        crypto.verify(amazon_cert, signature, request_body, 'sha1')\n        result = True\n    except crypto.Error:\n        result = False\n    return result", "docstring": "Verifies Alexa request signature.\n\nArgs:\namazon_cert: Pycrypto X509 Amazon certificate.\nsignature: Base64 decoded Alexa request signature from Signature HTTP header.\nrequest_body: full HTTPS request body\nReturns:\nresult: True if verification was successful, False if not.", "source": "codesearchnet"}
{"code": "def get(self, name_or_uri):\n    name_or_uri = quote(name_or_uri)\n    return self._client.get(name_or_uri)", "docstring": "Get the role by its URI or Name.\n\nArgs:\nname_or_uri:\nCan be either the Name or the URI.\n\nReturns:\ndict: Role", "source": "codesearchnet"}
{"code": "def check_status(self, **kwargs):\n        \n        for work in self:\n            work.check_status()\n\n        if kwargs.pop(\"show\", False):\n            self.show_status(**kwargs)", "docstring": "Check the status of the works in self.\n\nArgs:\nshow: True to show the status of the flow.\nkwargs: keyword arguments passed to show_status", "source": "juraj-google-style"}
{"code": "def line_number_above():\n    call_site_lineno = tf_inspect.stack()[1][2]\n    if sys.version_info < (3, 8):\n        return call_site_lineno - 1\n    else:\n        with open(__file__, 'rb') as f:\n            source_text = f.read().decode('utf-8')\n        source_tree = ast.parse(source_text)\n        prev_node = _find_preceding_ast_node(source_tree, call_site_lineno)\n        return prev_node.lineno", "docstring": "Get lineno of the AST node immediately above this function's call site.\n\nIt is assumed that there is no empty line(s) between the call site and the\npreceding AST node.\n\nReturns:\nThe lineno of the preceding AST node, at the same level of the AST.\nIf the preceding AST spans multiple lines:\n- In Python 3.8+, the lineno of the first line is returned.\n- In older Python versions, the lineno of the last line is returned.", "source": "github-repos"}
{"code": "def increment(self, counter_name, delta):\n    \n    current_value = self.counters.get(counter_name, 0)\n    new_value = current_value + delta\n    self.counters[counter_name] = new_value\n    return new_value", "docstring": "Increment counter value.\n\nArgs:\ncounter_name: counter name as String.\ndelta: increment delta as Integer.\n\nReturns:\nnew counter value.", "source": "juraj-google-style"}
{"code": "def synthesize(self, duration, freqs_in_hz=[440.0]):\n    freqs = np.array(freqs_in_hz)\n    scaling = (1 / len(freqs))\n    sr = int(self.samplerate)\n    cps = (freqs / sr)\n    ts = ((duration / Seconds(1)) * sr)\n    ranges = np.array([np.arange(0, (ts * c), c) for c in cps])\n    raw = (np.sin((ranges * (2 * np.pi))) * scaling).sum(axis=0)\n    return AudioSamples(raw, self.samplerate)", "docstring": "Synthesize one or more sine waves\n\nArgs:\nduration (numpy.timdelta64): The duration of the sound to be\nsynthesized\nfreqs_in_hz (list of float): Numbers representing the frequencies\nin hz that should be synthesized", "source": "codesearchnet"}
{"code": "def _decontextualise_connection(self, connection):\n        \n\n        ctx = stack.top\n        if ctx is not None and connection in ctx.ldap3_manager_connections:\n            ctx.ldap3_manager_connections.remove(connection)", "docstring": "Remove a connection from the appcontext.\n\nArgs:\nconnection (ldap3.Connection): connection to remove from the\nappcontext", "source": "juraj-google-style"}
{"code": "def ajax(cls, url, param={}, method='get'):\n        \n        param = urllib.parse.urlencode(param)\n        if method.lower() == 'get':\n            req = urllib.request.Request(url + '?' + param)\n        elif method.lower() == 'post':\n            param = param.encode('utf-8')\n            req = urllib.request.Request(url, data=param)\n        else:\n            raise Exception(\"invalid method '{}' (GET/POST)\".format(method))\n        rsp = urllib.request.urlopen(req)\n        if rsp:\n            rsp_json = rsp.read().decode('utf-8')\n            rsp_dict = json.loads(rsp_json)\n            return rsp_dict\n        return None", "docstring": "Get info by ajax\n\nArgs:\nurl: string\nReturns:\ndict: json decoded into a dict", "source": "juraj-google-style"}
{"code": "def _ParseMRUListExEntryValue(\n      self, parser_mediator, registry_key, entry_index, entry_number,\n      codepage='cp1252', **kwargs):\n    \n    value_string = ''\n\n    value = registry_key.GetValueByName('{0:d}'.format(entry_number))\n    if value is None:\n      parser_mediator.ProduceExtractionWarning(\n          'missing MRUListEx value: {0:d} in key: {1:s}.'.format(\n              entry_number, registry_key.path))\n\n    elif not value.DataIsBinaryData():\n      logger.debug((\n          '[{0:s}] Non-binary MRUListEx entry value: {1:d} in key: '\n          '{2:s}.').format(self.NAME, entry_number, registry_key.path))\n\n    elif value.data:\n      utf16le_string_map = self._GetDataTypeMap('utf16le_string')\n\n      context = dtfabric_data_maps.DataTypeMapContext()\n\n      try:\n        path = self._ReadStructureFromByteStream(\n            value.data, 0, utf16le_string_map, context=context)\n      except (ValueError, errors.ParseError) as exception:\n        parser_mediator.ProduceExtractionWarning((\n            'unable to parse MRUListEx entry value: {0:d} with error: '\n            '{1!s}').format(entry_number, exception))\n        return value_string\n\n      path = path.rstrip('\\x00')\n\n      shell_item_data = value.data[context.byte_size:]\n\n      if not shell_item_data:\n        parser_mediator.ProduceExtractionWarning((\n            'missing shell item in MRUListEx value: {0:d} in key: '\n            '{1:s}.').format(entry_number, registry_key.path))\n        value_string = 'Path: {0:s}'.format(path)\n\n      else:\n        shell_items_parser = shell_items.ShellItemsParser(registry_key.path)\n        shell_items_parser.ParseByteStream(\n            parser_mediator, shell_item_data, codepage=codepage)\n\n        value_string = 'Path: {0:s}, Shell item: [{1:s}]'.format(\n            path, shell_items_parser.CopyToPath())\n\n    return value_string", "docstring": "Parses the MRUListEx entry value.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains\nthe MRUListEx value.\nentry_index (int): MRUListEx entry index.\nentry_number (int): entry number.\ncodepage (Optional[str]): extended ASCII string codepage.\n\nReturns:\nstr: MRUList entry value.", "source": "juraj-google-style"}
{"code": "def _parse_line(line):\n    \n    line, timestamp = line.rsplit(\",\", 1)\n    line, command = line.rsplit(\",\", 1)\n    path, username = line.rsplit(\",\", 1)\n\n    return {\n        \"timestamp\": timestamp.strip(),\n        \"command\": command.strip(),\n        \"username\": username.strip(),\n        \"path\": path,\n    }", "docstring": "Convert one line from the extended log to dict.\n\nArgs:\nline (str): Line which will be converted.\n\nReturns:\ndict: dict with ``timestamp``, ``command``, ``username`` and ``path`` \\\nkeys.\n\nNote:\nTypical line looks like this::\n\n/home/ftp/xex/asd bsd.dat, xex, STOR, 1398351777\n\nFilename may contain ``,`` character, so I am ``rsplitting`` the line\nfrom the end to the beginning.", "source": "juraj-google-style"}
{"code": "def get_geostationary_bounding_box(geos_area, nb_points=50):\n    \n    xmax, ymax = get_geostationary_angle_extent(geos_area)\n\n    \n    \n    x = np.cos(np.linspace(-np.pi, 0, nb_points / 2)) * (xmax - 0.001)\n    y = -np.sin(np.linspace(-np.pi, 0, nb_points / 2)) * (ymax - 0.001)\n\n    \n    ll_x, ll_y, ur_x, ur_y = (np.array(geos_area.area_extent) /\n                              geos_area.proj_dict['h'])\n\n    x = np.clip(np.concatenate([x, x[::-1]]), min(ll_x, ur_x), max(ll_x, ur_x))\n    y = np.clip(np.concatenate([y, -y]), min(ll_y, ur_y), max(ll_y, ur_y))\n\n    return _lonlat_from_geos_angle(x, y, geos_area)", "docstring": "Get the bbox in lon/lats of the valid pixels inside *geos_area*.\n\nArgs:\nnb_points: Number of points on the polygon", "source": "juraj-google-style"}
{"code": "def FromString(cls, desc):\n        \n        if language.stream is None:\n            language.get_language()\n\n        parse_exp = Optional(time_interval('time') - Literal(':').suppress()) - language.stream('stream') - Literal('=').suppress() - number('value')\n\n        try:\n            data = parse_exp.parseString(desc)\n            time = 0\n            if 'time' in data:\n                time = data['time'][0]\n\n            return SimulationStimulus(time, data['stream'][0], data['value'])\n        except (ParseException, ParseSyntaxException):\n            raise ArgumentError(\"Could not parse stimulus descriptor\", descriptor=desc)", "docstring": "Create a new stimulus from a description string.\n\nThe string must have the format:\n\n[time: ][system ]input X = Y\nwhere X and Y are integers.  The time, if given must\nbe a time_interval, which is an integer followed by a\ntime unit such as second(s), minute(s), etc.\n\nArgs:\ndesc (str): A string description of the stimulus.\n\nReturns:\nSimulationStimulus: The parsed stimulus object.", "source": "juraj-google-style"}
{"code": "def _write_input(self, input_dir='.'):\n    with open(os.path.join(input_dir, self.input_file), 'wt', encoding='utf-8') as inp:\n        for (k, v) in self.control_params.items():\n            inp.write('{} {}\\n'.format(k, self._format_param_val(v)))\n        for (idx, mol) in enumerate(self.mols):\n            filename = os.path.join(input_dir, '{}.{}'.format(idx, self.control_params['filetype'])).encode('ascii')\n            if (self.control_params['filetype'] == 'pdb'):\n                self.write_pdb(mol, filename, num=(idx + 1))\n            else:\n                a = BabelMolAdaptor(mol)\n                pm = pb.Molecule(a.openbabel_mol)\n                pm.write(self.control_params['filetype'], filename=filename, overwrite=True)\n            inp.write('\\n')\n            inp.write('structure {}.{}\\n'.format(os.path.join(input_dir, str(idx)), self.control_params['filetype']))\n            for (k, v) in self.param_list[idx].items():\n                inp.write('  {} {}\\n'.format(k, self._format_param_val(v)))\n            inp.write('end structure\\n')", "docstring": "Write the packmol input file to the input directory.\n\nArgs:\ninput_dir (string): path to the input directory", "source": "codesearchnet"}
{"code": "async def get_all(self, direction: msg.StreamDirection=msg.StreamDirection.Forward, from_position: Optional[Union[(msg.Position, msg._PositionSentinel)]]=None, max_count: int=100, resolve_links: bool=True, require_master: bool=False, correlation_id: uuid.UUID=None):\n    correlation_id = correlation_id\n    cmd = convo.ReadAllEvents(msg.Position.for_direction(direction, from_position), max_count, resolve_links, require_master, direction=direction, credentials=self.credential)\n    result = (await self.dispatcher.start_conversation(cmd))\n    return (await result)", "docstring": "Read a range of events from the whole database.\n\nArgs:\ndirection (optional): Controls whether to read events forward or backward.\ndefaults to Forward.\nfrom_position (optional): The position to read from.\ndefaults to the beginning of the stream when direction is forward\nand the end of the stream if direction is backward.\nmax_count (optional): The maximum number of events to return.\nresolve_links (optional): True if eventstore should\nautomatically resolve Link Events, otherwise False.\nrequired_master (optional): True if this command must be\nsent direct to the master node, otherwise False.\ncorrelation_id (optional): A unique identifer for this command.\n\nExamples:\n\nRead 5 events\n\n>>> async for event in conn.get_all(max_count=5):\n>>>     print(event)\n\n\nRead 10 most recent events in reverse order\n\n>>> async for event in conn.get_all(\nmax_count=10,\ndirection=StreamDirection.Backward\n):\n>>>     print(event)", "source": "codesearchnet"}
{"code": "def diff(self, **kwargs):\n        \n        path = '%s/%s/diff' % (self.manager.path, self.get_id())\n        return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "Generate the commit diff.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the diff could not be retrieved\n\nReturns:\nlist: The changes done in this commit", "source": "juraj-google-style"}
{"code": "def get_colorscale(cmap, levels=None, cmin=None, cmax=None):\n    ncolors = (levels if isinstance(levels, int) else None)\n    if isinstance(levels, list):\n        ncolors = (len(levels) - 1)\n        if (isinstance(cmap, list) and (len(cmap) != ncolors)):\n            raise ValueError(('The number of colors in the colormap must match the intervals defined in the color_levels, expected %d colors found %d.' % (ncolors, len(cmap))))\n    try:\n        palette = process_cmap(cmap, ncolors)\n    except Exception as e:\n        colorscale = colors.PLOTLY_SCALES.get(cmap)\n        if (colorscale is None):\n            raise e\n        return colorscale\n    if isinstance(levels, int):\n        colorscale = []\n        scale = np.linspace(0, 1, (levels + 1))\n        for i in range((levels + 1)):\n            if (i == 0):\n                colorscale.append((scale[0], palette[i]))\n            elif (i == levels):\n                colorscale.append((scale[(- 1)], palette[(- 1)]))\n            else:\n                colorscale.append((scale[i], palette[(i - 1)]))\n                colorscale.append((scale[i], palette[i]))\n        return colorscale\n    elif isinstance(levels, list):\n        (palette, (cmin, cmax)) = color_intervals(palette, levels, clip=(cmin, cmax))\n    return colors.make_colorscale(palette)", "docstring": "Converts a cmap spec to a plotly colorscale\n\nArgs:\ncmap: A recognized colormap by name or list of colors\nlevels: A list or integer declaring the color-levels\ncmin: The lower bound of the color range\ncmax: The upper bound of the color range\n\nReturns:\nA valid plotly colorscale", "source": "codesearchnet"}
{"code": "def _on_change(self, field_updates: Dict[utils.KeyPath, FieldUpdate]):", "docstring": "Event that is triggered when field values in the subtree are updated.\n\nThis event will be called\n* On per-field basis when object is modified via attribute.\n* In batch when multiple fields are modified via `rebind` method.\n\nWhen a field in an object tree is updated, all ancestors' `_on_change` event\nwill be triggered in order, from the nearest one to furthest one.\n\nArgs:\nfield_updates: Updates made to the subtree. Key path is relative to\ncurrent object.", "source": "github-repos"}
{"code": "def setObsoletedBy(self, pid, obsoletedByPid, serialVersion, vendorSpecific=None):\n        \n        response = self.setObsoletedByResponse(\n            pid, obsoletedByPid, serialVersion, vendorSpecific\n        )\n        return self._read_boolean_response(response)", "docstring": "See Also: setObsoletedByResponse()\n\nArgs:\npid:\nobsoletedByPid:\nserialVersion:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def add_value(self, value_type, value_min, value_max):\n        \n\n        if len(self._employers) > 0:\n            self._logger.log(\n                'warn',\n                'Adding a value after employers have been created'\n            )\n        value = (value_type,  (value_min, value_max))\n        self._value_ranges.append(value)\n        self._limit = self._num_employers*len(self._value_ranges)\n        self._logger.log(\n            'debug',\n            'Limit set to {}'.format(self._limit)\n        )", "docstring": "Add a tunable value to the ABC (fitness function must be\nconfigured to handle it)\n\nArgs:\nvalue_type (string): type of the value, 'int' or 'float'\nvalue_min (int or float): minimum bound for the value\nvalue_max (int or float): maximum bound for the value\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def Verify(self, mempool):\n        \n\n        for descriptor in self.Descriptors:\n            if not descriptor.Verify():\n                return False\n\n        return super(StateTransaction, self).Verify(mempool)", "docstring": "Verify the transaction.\n\nArgs:\nmempool:\n\nReturns:\nbool: True if verified. False otherwise.", "source": "juraj-google-style"}
{"code": "def stat_float_times(cls, newvalue=None):\n        \n        if newvalue is not None:\n            cls._stat_float_times = bool(newvalue)\n        return cls._stat_float_times", "docstring": "Determine whether a file's time stamps are reported as floats\nor ints.\n\nCalling without arguments returns the current value.\nThe value is shared by all instances of FakeOsModule.\n\nArgs:\nnewvalue: If `True`, mtime, ctime, atime are reported as floats.\nOtherwise, they are returned as ints (rounding down).", "source": "juraj-google-style"}
{"code": "def CollectFromKnowledgeBase(cls, knowledge_base):\n    for preprocess_plugin in cls._knowledge_base_plugins.values():\n        logger.debug('Running knowledge base preprocessor plugin: {0:s}'.format(preprocess_plugin.__class__.__name__))\n        try:\n            preprocess_plugin.Collect(knowledge_base)\n        except errors.PreProcessFail as exception:\n            logger.warning('Unable to collect knowledge base value with error: {0!s}'.format(exception))", "docstring": "Collects values from knowledge base values.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.", "source": "codesearchnet"}
{"code": "def find_iteration(\n    url: Union[methods, str],\n    itermode: Optional[str] = None,\n    iterkey: Optional[str] = None,\n) -> Tuple[str, str]:\n    \n    if isinstance(url, methods):\n        if not itermode:\n            itermode = url.value[1]\n        if not iterkey:\n            iterkey = url.value[2]\n\n    if not iterkey or not itermode:\n        raise ValueError(\"Iteration not supported for: {}\".format(url))\n    elif itermode not in ITERMODE:\n        raise ValueError(\"Iteration not supported for: {}\".format(itermode))\n\n    return itermode, iterkey", "docstring": "Find iteration mode and iteration key for a given :class:`slack.methods`\n\nArgs:\nurl: :class:`slack.methods` or string url\nitermode: Custom iteration mode\niterkey: Custom iteration key\n\nReturns:\n:py:class:`tuple` (itermode, iterkey)", "source": "juraj-google-style"}
{"code": "def init_from_storage_write_to_datastore(self,\n                                           batch_size=100,\n                                           allowed_epsilon=None,\n                                           skip_image_ids=None,\n                                           max_num_images=None):\n    \n    if allowed_epsilon is None:\n      allowed_epsilon = copy.copy(DEFAULT_EPSILON)\n    \n    self._dataset_batches = {}\n    \n    images = self._read_image_list(skip_image_ids)\n    if max_num_images:\n      images = images[:max_num_images]\n    for batch_idx, batch_start in enumerate(range(0, len(images), batch_size)):\n      batch = images[batch_start:batch_start+batch_size]\n      batch_id = DATASET_BATCH_ID_PATTERN.format(batch_idx)\n      batch_epsilon = allowed_epsilon[batch_idx % len(allowed_epsilon)]\n      self.add_batch(batch_id, {'epsilon': batch_epsilon})\n      for image_id, image_path in batch:\n        self.add_image(batch_id, image_id,\n                       {'dataset_image_id': os.path.basename(image_path)[:-4],\n                        'image_path': image_path})\n    \n    self.write_to_datastore()", "docstring": "Initializes dataset batches from the list of images in the datastore.\n\nArgs:\nbatch_size: batch size\nallowed_epsilon: list of allowed epsilon or None to use default\nskip_image_ids: list of image ids to skip\nmax_num_images: maximum number of images to read", "source": "juraj-google-style"}
{"code": "def orient_directed_graph(self, data, graph):\n        \n        warnings.warn(\"The algorithm is ran on the skeleton of the given graph.\")\n        return self.orient_undirected_graph(data, nx.Graph(graph))", "docstring": "Run the algorithm on a directed_graph.\n\nArgs:\ndata (pandas.DataFrame): DataFrame containing the data\ngraph (networkx.DiGraph): Skeleton of the graph to orient\n\nReturns:\nnetworkx.DiGraph: Solution on the given skeleton.\n\n.. warning::\nThe algorithm is ran on the skeleton of the given graph.", "source": "juraj-google-style"}
{"code": "def write_serializable_array(self, array):\n        \n        if array is None:\n            self.write_byte(0)\n        else:\n            self.write_var_int(len(array))\n            for item in array:\n                item.Serialize(self)", "docstring": "Write an array of serializable objects to the stream.\n\nArgs:\narray(list): a list of serializable objects. i.e. extending neo.IO.Mixins.SerializableMixin", "source": "juraj-google-style"}
{"code": "def get_splitter_instance(split_type):\n    \n    if split_type is None:\n        return NoneSplitter()\n    elif split_type == 'Line':\n        return LineSplitter()\n    elif split_type == 'RecordIO':\n        return RecordIOSplitter()\n    else:\n        raise ValueError('Invalid Split Type: %s' % split_type)", "docstring": "Return an Instance of :class:`sagemaker.local.data.Splitter` according to\nthe specified `split_type`.\n\nArgs:\nsplit_type (str): either 'Line' or 'RecordIO'. Can be left as None to signal no data split\nwill happen.\n\nReturns\n:class:`sagemaker.local.data.Splitter`: an Instance of a Splitter", "source": "juraj-google-style"}
{"code": "def GetArtifacts(self, os_name=None, name_list=None, source_type=None, exclude_dependents=False, provides=None, reload_datastore_artifacts=False):\n    self._CheckDirty(reload_datastore_artifacts=reload_datastore_artifacts)\n    results = set()\n    for artifact in itervalues(self._artifacts):\n        if (os_name and artifact.supported_os and (os_name not in artifact.supported_os)):\n            continue\n        if (name_list and (artifact.name not in name_list)):\n            continue\n        if source_type:\n            source_types = [c.type for c in artifact.sources]\n            if (source_type not in source_types):\n                continue\n        if (exclude_dependents and GetArtifactPathDependencies(artifact)):\n            continue\n        if (not provides):\n            results.add(artifact)\n        else:\n            for provide_string in artifact.provides:\n                if (provide_string in provides):\n                    results.add(artifact)\n                    break\n    return results", "docstring": "Retrieve artifact classes with optional filtering.\n\nAll filters must match for the artifact to be returned.\n\nArgs:\nos_name: string to match against supported_os\nname_list: list of strings to match against artifact names\nsource_type: rdf_artifacts.ArtifactSource.SourceType to match against\nsource_type\nexclude_dependents: if true only artifacts with no dependencies will be\nreturned\nprovides: return the artifacts that provide these dependencies\nreload_datastore_artifacts: If true, the data store sources are queried\nfor new artifacts.\n\nReturns:\nset of artifacts matching filter criteria", "source": "codesearchnet"}
{"code": "def is_time_included(self, time):\n    if (self._timestamps_data is None):\n        self._calculate_timestamps()\n    return (time.moy in self._timestamps_data)", "docstring": "Check if time is included in analysis period.\n\nReturn True if time is inside this analysis period,\notherwise return False\n\nArgs:\ntime: A DateTime to be tested\n\nReturns:\nA boolean. True if time is included in analysis period", "source": "codesearchnet"}
{"code": "def add_message(self, message_type):\n    name = self.__normalized_name(message_type)\n    if (name not in self.__schemas):\n        self.__schemas[name] = None\n        schema = self.__message_to_schema(message_type)\n        self.__schemas[name] = schema\n    return name", "docstring": "Add a new message.\n\nArgs:\nmessage_type: protorpc.message.Message class to be parsed.\n\nReturns:\nstring, The JSON Schema id.\n\nRaises:\nKeyError if the Schema id for this message_type would collide with the\nSchema id of a different message_type that was already added.", "source": "codesearchnet"}
{"code": "def create_vpc_flow_logs(self, account, region, vpc_id, iam_role_arn):\n        \n        try:\n            flow = self.session.client('ec2', region)\n            flow.create_flow_logs(\n                ResourceIds=[vpc_id],\n                ResourceType='VPC',\n                TrafficType='ALL',\n                LogGroupName=vpc_id,\n                DeliverLogsPermissionArn=iam_role_arn\n            )\n            fvpc = VPC.get(vpc_id)\n            fvpc.set_property('vpc_flow_logs_status', 'ACTIVE')\n\n            self.log.info('Enabled VPC Logging {}/{}/{}'.format(account, region, vpc_id))\n            auditlog(\n                event='vpc_flow_logs.create_vpc_flow',\n                actor=self.ns,\n                data={\n                    'account': account.account_name,\n                    'region': region,\n                    'vpcId': vpc_id,\n                    'arn': iam_role_arn\n                }\n            )\n        except Exception:\n            self.log.exception('Failed creating VPC Flow Logs for {}/{}/{}.'.format(\n                account,\n                region,\n                vpc_id\n            ))", "docstring": "Create a new VPC Flow log\n\nArgs:\naccount (:obj:`Account`): Account to create the flow in\nregion (`str`): Region to create the flow in\nvpc_id (`str`): ID of the VPC to create the flow for\niam_role_arn (`str`): ARN of the IAM role used to post logs to the log group\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def _process_counter_example(self, mma, w_string):\n        \n        if len(w_string) == 1:\n            self.observation_table.smi_vector.append(w_string)\n            for exp in self.observation_table.em_vector:\n                self._fill_table_entry(w_string, exp)\n\n        diff = len(w_string)\n        same = 0\n        membership_answer = self._membership_query(w_string)\n        while True:\n            i = (same + diff) / 2\n            access_string = self._run_in_hypothesis(mma, w_string, i)\n            if membership_answer != self._membership_query(access_string + w_string[i:]):\n                diff = i\n            else:\n                same = i\n            if diff - same == 1:\n                break\n\n        \n        access_string = self._run_in_hypothesis(mma, w_string, diff - 1)\n        wrong_transition = access_string + w_string[diff - 1]\n        if wrong_transition not in self.observation_table.smi_vector:\n            \n            \n            self.observation_table.smi_vector.append(wrong_transition)\n            for exp in self.observation_table.em_vector:\n                self._fill_table_entry(wrong_transition, exp)\n            return\n\n        \n        \n        \n        \n        \n        \n        \n        \n        \n\n        exp = w_string[diff:]\n        self.observation_table.em_vector.append(exp)\n        for row in self.observation_table.sm_vector + self.observation_table.smi_vector:\n            self._fill_table_entry(row, exp)", "docstring": "Process a counterexample in the Rivest-Schapire way.\nArgs:\nmma (DFA): The hypothesis automaton\nw_string (str): The examined string to be consumed\nReturn:\nNone", "source": "juraj-google-style"}
{"code": "def set_unrecognized_field(self, key, value, variant):\n        \n        if not isinstance(variant, Variant):\n            raise TypeError('Variant type %s is not valid.' % variant)\n        self.__unrecognized_fields[key] = value, variant", "docstring": "Set an unrecognized field, used when decoding a message.\n\nArgs:\nkey: The name or number used to refer to this unknown value.\nvalue: The value of the field.\nvariant: Type information needed to interpret the value or re-encode\nit.\n\nRaises:\nTypeError: If the variant is not an instance of messages.Variant.", "source": "juraj-google-style"}
{"code": "def post(self, url, headers=None, params=None, **kwargs):\n    if (len(kwargs) > 1):\n        raise InvalidArgumentsError('Too many extra args ({} > 1)'.format(len(kwargs)))\n    if kwargs:\n        kwarg = next(iter(kwargs))\n        if (kwarg not in ('json', 'data')):\n            raise InvalidArgumentsError(('Invalid kwarg: ' + kwarg))\n    resp = self.session.post(url, headers=headers, params=params, **kwargs)\n    resp.raise_for_status()\n    return _to_json(resp)", "docstring": "Send a JSON POST request with the given request headers, additional\nURL query parameters, and the given JSON in the request body.  The\nextra query parameters are merged with any which already exist in the\nURL.  The 'json' and 'data' parameters may not both be given.\n\nArgs:\nurl (str): URL to retrieve\nheaders (dict): Any other headers to be added to the request.\nparams: dictionary or bytes to be sent in the query string for the\nrequest. (optional)\njson: json to send in the body of the Request.  This must be a\nJSON-serializable object. (optional)\ndata: raw request body data.  May be a dictionary, list of tuples,\nbytes, or file-like object to send in the body of the Request.\n(optional)", "source": "codesearchnet"}
{"code": "def _get_key_counter(seed, alg):\n    if alg == Algorithm.AUTO_SELECT.value:\n        key, counter = gen_stateless_random_ops_v2.stateless_random_get_key_counter(seed)\n    elif alg == Algorithm.PHILOX.value:\n        key, counter = _philox_scramble_seed(seed)\n    elif alg == Algorithm.THREEFRY.value:\n        key = array_ops.reshape(_uint32s_to_uint64(math_ops.cast(seed, dtypes.uint32)), [1])\n        counter = array_ops.zeros([1], dtypes.uint64)\n    else:\n        raise ValueError(unsupported_alg_error_msg(alg))\n    return (key, counter)", "docstring": "Calculates the key and counter to pass to raw RNG ops.\n\nThis function calculates the key and counter that will be passed to\nthe raw RNG ops like `StatelessRandomUniformV2`. Depending on the\ninput `alg`, the key and counter may be scrambled or copied from\n`seed`. If `alg` is `\"auto_select\"`, the key and counter will be\ndetermined at runtime based on device type.\n\nArgs:\nseed: An integer tensor of shape [2]. The seed to calculate the key and\ncounter from.\nalg: The RNG algorithm. See `tf.random.stateless_uniform` for an\nexplanation.\n\nReturns:\nA pair (key, counter) suitable for V2 stateless RNG ops like\n`StatelessRandomUniformV2`.", "source": "github-repos"}
{"code": "def export_kml_file(self):\n    kml = create_elem('kml')\n    kml.Document = create_elem('Document')\n    for place in sorted(self.values(), key=(lambda x: x.name)):\n        kml.Document.append(place.tokml())\n    return etree.ElementTree(kml)", "docstring": "Generate KML element tree from ``Placemarks``.\n\nReturns:\netree.ElementTree: KML element tree depicting ``Placemarks``", "source": "codesearchnet"}
{"code": "def reviews(self, **kwargs):\n        \n        path = self._get_id_path('reviews')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the reviews for a particular movie id.\n\nArgs:\npage: (optional) Minimum value of 1.  Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\nappend_to_response: (optional) Comma separated, any movie method.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def render_template(cmd_derived_from_alias, pos_args_table):\n    \n    try:\n        cmd_derived_from_alias = normalize_placeholders(cmd_derived_from_alias, inject_quotes=True)\n        template = jinja.Template(cmd_derived_from_alias)\n\n        \n        \n        rendered = shlex.split(template.render(pos_args_table))\n\n        \n        \n        \n        if '' in rendered:\n            check_runtime_errors(cmd_derived_from_alias, pos_args_table)\n\n        return rendered\n    except Exception as exception:\n        \n        if isinstance(exception, CLIError):\n            raise\n\n        \n        split_exception_message = str(exception).split()\n\n        \n        error_index = split_exception_message[-1]\n        if error_index.isdigit():\n            split_exception_message.insert(-1, 'index')\n            error_msg = RENDER_TEMPLATE_ERROR.format(' '.join(split_exception_message), cmd_derived_from_alias)\n\n            \n            \n            \n            error_msg += '\\n{}^'.format(' ' * (len(error_msg) - len(cmd_derived_from_alias) + int(error_index) - 1))\n        else:\n            exception_str = str(exception).replace('\"{{', '}}').replace('}}\"', '}}')\n            error_msg = RENDER_TEMPLATE_ERROR.format(cmd_derived_from_alias, exception_str)\n\n        raise CLIError(error_msg)", "docstring": "Render cmd_derived_from_alias as a Jinja template with pos_args_table as the arguments.\n\nArgs:\ncmd_derived_from_alias: The string to be injected with positional arguemnts.\npos_args_table: The dictionary used to rendered.\n\nReturns:\nA processed string with positional arguments injected.", "source": "juraj-google-style"}
{"code": "def load_template(path_or_buffer):\n    \n\n    from itertools import groupby\n    from operator import itemgetter\n\n    path_or_buffer = _stringify_path(path_or_buffer)\n\n    if is_file_like(path_or_buffer):\n        templates = json.load(path_or_buffer)\n    else:\n        with open(path_or_buffer, 'r') as f:\n            templates = json.load(f)\n\n    options = []\n\n    grouper = itemgetter('page', 'extraction_method')\n\n    for key, grp in groupby(sorted(templates, key=grouper), grouper):\n        tmp_options = [_convert_template_option(e) for e in grp]\n\n        if len(tmp_options) == 1:\n            options.append(tmp_options[0])\n            continue\n\n        option = tmp_options[0]\n        areas = [e.get('area') for e in tmp_options]\n        option['area'] = areas\n        option['multiple_tables'] = True\n        options.append(option)\n\n    return options", "docstring": "Build tabula-py option from template file\n\nArgs:\nfile_like_obj: File like object of Tabula app template\n\nReturns:\n`obj`:dict: tabula-py options", "source": "juraj-google-style"}
{"code": "def set_json(self, reason='', new_page=False):\n        \n        compressed_json = json.dumps(self._compress_json(self.cached_json))\n\n        if len(compressed_json) > self.max_page_size:\n            raise OverflowError(\n                'Usernotes page is too large (>{0} characters)'.\n                format(self.max_page_size)\n            )\n\n        if new_page:\n            self.subreddit.wiki.create(\n                self.page_name,\n                compressed_json,\n                reason\n            )\n            \n            self.subreddit.wiki[self.page_name].mod.update(False, permlevel=2)\n        else:\n            self.subreddit.wiki[self.page_name].edit(\n                compressed_json,\n                reason\n            )", "docstring": "Send the JSON from the cache to the usernotes wiki page.\n\nArguments:\nreason: the change reason that will be posted to the wiki changelog\n(str)\nRaises:\nOverflowError if the new JSON data is greater than max_page_size", "source": "juraj-google-style"}
{"code": "def AddForwardLoopCounter(self, outer_grad_state):\n    n = constant_op.constant(0, name='f_count')\n    if outer_grad_state is not None:\n        outer_add_op = outer_grad_state.forward_index.op.inputs[0].op\n        n.op._add_control_input(outer_add_op)\n    self.Enter()\n    self.AddName(n.name)\n    enter_n = _Enter(n, self._name, is_constant=False, parallel_iterations=self._parallel_iterations, name='f_count')\n    self.loop_enters.append(enter_n)\n    merge_n = merge([enter_n, enter_n])[0]\n    switch_n = switch(merge_n, self._pivot)\n    index = math_ops.add(switch_n[1], 1)\n    next_n = _NextIteration(index)\n    merge_n.op._update_input(1, next_n)\n    total_iterations = exit(switch_n[0], name='f_count')\n    self.loop_exits.append(total_iterations)\n    self.ExitResult([total_iterations])\n    self.Exit()\n    return (total_iterations, next_n)", "docstring": "Adds a loop that counts the number of iterations.\n\nThis is added to the forward loop at the time when we start to\ncreate the loop for backprop gradient computation. Called in\nthe outer context of this forward context.\n\nThe pseudocode is:\n`n = 0; while (_pivot) { n++; }`\n\nNote that a control dependency is added to `n` to ensure the correct\nexecution order of stack push ops.\n\nArgs:\nouter_grad_state: The outer grad state. None if not nested.\n\nReturns:\nThe number of iterations taken by the forward loop and the loop index.", "source": "github-repos"}
{"code": "def AddStorageMediaImageOptions(self, argument_group):\n    \n    argument_group.add_argument(\n        '--partitions', '--partition', dest='partitions', action='store',\n        type=str, default=None, help=(\n            'Define partitions to be processed. A range of '\n            'partitions can be defined as: \"3..5\". Multiple partitions can '\n            'be defined as: \"1,3,5\" (a list of comma separated values). '\n            'Ranges and lists can also be combined as: \"1,3..5\". The first '\n            'partition is 1. All partitions can be specified with: \"all\".'))\n\n    argument_group.add_argument(\n        '--volumes', '--volume', dest='volumes', action='store', type=str,\n        default=None, help=(\n            'Define volumes to be processed. A range of volumes can be defined '\n            'as: \"3..5\". Multiple volumes can be defined as: \"1,3,5\" (a list '\n            'of comma separated values). Ranges and lists can also be combined '\n            'as: \"1,3..5\". The first volume is 1. All volumes can be specified '\n            'with: \"all\".'))", "docstring": "Adds the storage media image options to the argument group.\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "juraj-google-style"}
{"code": "def listSubjects(\n        self, query, status=None, start=None, count=None, vendorSpecific=None\n    ):\n        \n        response = self.listSubjectsResponse(\n            query, status, start, count, vendorSpecific\n        )\n        return self._read_dataone_type_response(response, 'SubjectInfo')", "docstring": "See Also: listSubjectsResponse()\n\nArgs:\nquery:\nstatus:\nstart:\ncount:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def add_keyword(self, keyword, schema=None, source=None):\n        \n        keyword_dict = self._sourced_dict(source, value=keyword)\n\n        if schema is not None:\n            keyword_dict['schema'] = schema\n\n        self._append_to('keywords', keyword_dict)", "docstring": "Add a keyword.\n\nArgs:\nkeyword(str): keyword to add.\nschema(str): schema to which the keyword belongs.\nsource(str): source for the keyword.", "source": "juraj-google-style"}
{"code": "def validate(self, x: symbolic.Symbolic) -> None:\n    if self._validator is not None:\n        self._validator(x)", "docstring": "Validates an input's integrity.\n\nThis method will be called in :func:`pyglove.patch` when a chain of patchers\nhave been applied, as to validate the patched object in chain still\nconforms to the patcher's plan.\n\nArgs:\nx: The input after modification.", "source": "github-repos"}
{"code": "def __init__(self, *args, **kwargs):\n        \n        super(EnrollmentTransaction, self).__init__(*args, **kwargs)\n        self.Type = TransactionType.EnrollmentTransaction", "docstring": "Create an instance.\n\nArgs:\n*args:\n**kwargs:", "source": "juraj-google-style"}
{"code": "def try_pick_piece_of_work(self, worker_id, submission_id=None):\n    client = self._datastore_client\n    unclaimed_work_ids = None\n    if submission_id:\n        unclaimed_work_ids = [k for (k, v) in iteritems(self.work) if (is_unclaimed(v) and (v['submission_id'] == submission_id))]\n    if (not unclaimed_work_ids):\n        unclaimed_work_ids = [k for (k, v) in iteritems(self.work) if is_unclaimed(v)]\n    if unclaimed_work_ids:\n        next_work_id = random.choice(unclaimed_work_ids)\n    else:\n        return None\n    try:\n        with client.transaction() as transaction:\n            work_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id, KIND_WORK, next_work_id)\n            work_entity = client.get(work_key, transaction=transaction)\n            if (not is_unclaimed(work_entity)):\n                return None\n            work_entity['claimed_worker_id'] = worker_id\n            work_entity['claimed_worker_start_time'] = get_integer_time()\n            transaction.put(work_entity)\n    except Exception:\n        return None\n    return next_work_id", "docstring": "Tries pick next unclaimed piece of work to do.\n\nAttempt to claim work piece is done using Cloud Datastore transaction, so\nonly one worker can claim any work piece at a time.\n\nArgs:\nworker_id: ID of current worker\nsubmission_id: if not None then this method will try to pick\npiece of work for this submission\n\nReturns:\nID of the claimed work piece", "source": "codesearchnet"}
{"code": "def GetConsensusAddress(validators):\n    vlen = len(validators)\n    script = Contract.CreateMultiSigRedeemScript((vlen - int(((vlen - 1) / 3))), validators)\n    return Crypto.ToScriptHash(script)", "docstring": "Get the script hash of the consensus node.\n\nArgs:\nvalidators (list): of Ellipticcurve.ECPoint's\n\nReturns:\nUInt160:", "source": "codesearchnet"}
{"code": "def do_operation_update(self, info, an_op):\n    self.update_op_func(self.metric_name, info, an_op)", "docstring": "Updates an operation using the assigned update_op_func\n\nArgs:\ninfo: (:class:`endpoints_management.control.report_request.Info`): the\ninfo instance to update\nan_op: (:class:`endpoints_management.control.report_request.Info`):\nthe info instance to update\n\nReturn:\n`True` if desc is supported, otherwise `False`", "source": "codesearchnet"}
{"code": "def goto(self, iroute: 'InstanceRoute') -> 'InstanceNode':\n    inst = self\n    for sel in iroute:\n        inst = sel.goto_step(inst)\n    return inst", "docstring": "Move the focus to an instance inside the receiver's value.\n\nArgs:\niroute: Instance route (relative to the receiver).\n\nReturns:\nThe instance node corresponding to the target instance.\n\nRaises:\nInstanceValueError: If `iroute` is incompatible with the receiver's\nvalue.\nNonexistentInstance: If the instance node doesn't exist.\nNonDataNode: If an instance route addresses a non-data node\n(rpc/action/notification).", "source": "codesearchnet"}
{"code": "def log_cert_info(logger, msg_str, cert_obj):\n    list(map(logger, (['{}:'.format(msg_str)] + ['  {}'.format(v) for v in ['Subject: {}'.format(_get_val_str(cert_obj, ['subject', 'value'], reverse=True)), 'Issuer: {}'.format(_get_val_str(cert_obj, ['issuer', 'value'], reverse=True)), 'Not Valid Before: {}'.format(cert_obj.not_valid_before.isoformat()), 'Not Valid After: {}'.format(cert_obj.not_valid_after.isoformat()), 'Subject Alt Names: {}'.format(_get_ext_val_str(cert_obj, 'SUBJECT_ALTERNATIVE_NAME', ['value', 'value'])), 'CRL Distribution Points: {}'.format(_get_ext_val_str(cert_obj, 'CRL_DISTRIBUTION_POINTS', ['value', 'full_name', 'value', 'value'])), 'Authority Access Location: {}'.format((extract_issuer_ca_cert_url(cert_obj) or '<not found>'))]])))", "docstring": "Dump basic certificate values to the log.\n\nArgs:\nlogger: Logger\nLogger to which to write the certificate values.\n\nmsg_str: str\nA message to write to the log before the certificate values.\n\ncert_obj: cryptography.Certificate\nCertificate containing values to log.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def reflection(n1, n2):\n    \n    r = abs((n1-n2) / (n1+n2))**2\n    return r", "docstring": "Calculate the power reflection at the interface\nof two refractive index materials.\n\nArgs:\nn1 (float): Refractive index of material 1.\nn2 (float): Refractive index of material 2.\n\nReturns:\nfloat: The percentage of reflected power.", "source": "juraj-google-style"}
{"code": "def update_mp_firware_version(self, timeout=-1):\n        \n        uri = \"{}/mpFirmwareVersion\".format(self.data[\"uri\"])\n        return self._helper.do_put(uri, None, timeout, None)", "docstring": "Updates the iLO firmware on a physical server to a minimum ILO firmware version required by OneView to\nmanage the server.\n\nArgs:\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\nReturns:\nResource", "source": "juraj-google-style"}
{"code": "def get_class(schema_name):\n    \n    global _registry_loaded\n    if not _registry_loaded:\n        load_message_classes()\n\n    try:\n        return _schema_name_to_class[schema_name]\n    except KeyError:\n        _log.warning(\n            'The schema \"%s\" is not in the schema registry! Either install '\n            \"the package with its schema definition or define a schema. \"\n            \"Falling back to the default schema...\",\n            schema_name,\n        )\n        return Message", "docstring": "Retrieve the message class associated with the schema name.\n\nIf no match is found, the default schema is returned and a warning is logged.\n\nArgs:\nschema_name (six.text_type): The name of the :class:`Message` sub-class;\nthis is typically the Python path.\n\nReturns:\nMessage: A sub-class of :class:`Message` to create the message from.", "source": "juraj-google-style"}
{"code": "def diagonalize_real_symmetric_matrix(matrix: np.ndarray, *, rtol: float=1e-05, atol: float=1e-08) -> np.ndarray:\n    if (np.any((np.imag(matrix) != 0)) or (not predicates.is_hermitian(matrix))):\n        raise ValueError('Input must be real and symmetric.')\n    (_, result) = np.linalg.eigh(matrix)\n    return result", "docstring": "Returns an orthogonal matrix that diagonalizes the given matrix.\n\nArgs:\nmatrix: A real symmetric matrix to diagonalize.\nrtol: float = 1e-5,\natol: float = 1e-8\n\nReturns:\nAn orthogonal matrix P such that P.T @ matrix @ P is diagonal.\n\nRaises:\nValueError: Matrix isn't real symmetric.", "source": "codesearchnet"}
{"code": "def includes(self, lo_freq: float) -> bool:\n        \n        if self._lb <= lo_freq <= self._ub:\n            return True\n        return False", "docstring": "Whether `lo_freq` is within the `LoRange`.\n\nArgs:\nlo_freq: LO frequency to be checked\n\nReturns:\nbool: True if lo_freq is included in this range, otherwise False", "source": "juraj-google-style"}
{"code": "def split_leading_dim(tensor, inputs, n_dims=2):\n  \n  input_shape_static = inputs.get_shape()\n  input_shape_list = input_shape_static.as_list()\n  tensor_shape_static = tensor.get_shape()\n  tensor_shape_list = tensor_shape_static.as_list()\n  if (input_shape_static.is_fully_defined()\n      and tensor_shape_static.is_fully_defined()):\n    new_shape = input_shape_list[:n_dims] + tensor_shape_list[1:]\n    return tf.reshape(tensor, new_shape)\n\n  \n  dims_after_first = tf.shape(tensor)[1:]\n  split_sizes = tf.shape(inputs)[:n_dims]\n  known_split_sizes = input_shape_list[:n_dims]\n  known_dims_after_first = tensor_shape_list[1:]\n  output_size = tf.concat([split_sizes, dims_after_first], 0)\n  result = tf.reshape(tensor, output_size)\n  result.set_shape(known_split_sizes + known_dims_after_first)\n  return result", "docstring": "Split the first dimension of a tensor.\n\nArgs:\ntensor: Tensor to have its first dimension split.\ninputs: Original reference input to look the dimensions of.\nn_dims: Number of dimensions to split.\n\nReturns:\nThe input tensor, with its first dimension split.", "source": "juraj-google-style"}
{"code": "def _get_events_list(object_key: str) -> List[str]:\n    return DB.get_list(_keys.events_list(object_key))", "docstring": "Get list of event ids for the object with the specified key.\n\nArgs:\nobject_key (str): Key of an object in the database.", "source": "codesearchnet"}
{"code": "def get_worfklow_spec(self):\n    if (self.current.workflow_name not in self.workflow_spec_cache):\n        try:\n            self.current.wf_object = BPMNWorkflow.objects.get(name=self.current.workflow_name)\n        except ObjectDoesNotExist:\n            self.current.wf_object = BPMNWorkflow.objects.get(name='not_found')\n            self.current.task_data['non-existent-wf'] = self.current.workflow_name\n            self.current.workflow_name = 'not_found'\n        xml_content = self.current.wf_object.xml.body\n        spec = ZopsSerializer().deserialize_workflow_spec(xml_content, self.current.workflow_name)\n        spec.wf_id = self.current.wf_object.key\n        self.workflow_spec_cache[self.current.workflow_name] = spec\n    return self.workflow_spec_cache[self.current.workflow_name]", "docstring": "Generates and caches the workflow spec package from\nBPMN diagrams that read from disk\n\nReturns:\nSpiffWorkflow Spec object.", "source": "codesearchnet"}
{"code": "def add_string_pairs_from_text_view_element(xib_file, results, text_view, special_ui_components_prefix):\n    text_view_entry_comment = extract_element_internationalized_comment(text_view)\n    if (text_view_entry_comment is None):\n        return\n    if (text_view.hasAttribute('usesAttributedText') and (text_view.attributes['usesAttributedText'].value == 'YES')):\n        add_string_pairs_from_attributed_ui_element(results, text_view, text_view_entry_comment)\n    else:\n        try:\n            text_view_entry_key = text_view.attributes['text'].value\n            results.append((text_view_entry_key, (text_view_entry_comment + ' default text value')))\n        except KeyError:\n            pass\n    warn_if_element_not_of_class(text_view, 'TextView', special_ui_components_prefix)", "docstring": "Adds string pairs from a textview element.\n\nArgs:\nxib_file (str): Path to the xib file.\nresults (list): The list to add the results to.\ntext_view(element): The textview element from the xib, to extract the string pairs from.\nspecial_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)", "source": "codesearchnet"}
{"code": "def create_parser():\n    parser = argparse_flags.ArgumentParser(description='saved_model_cli: Command-line interface for SavedModel', conflict_handler='resolve')\n    parser.add_argument('-v', '--version', action='version', version='0.1.0')\n    subparsers = parser.add_subparsers(title='commands', description='valid commands', help='additional help')\n    add_show_subparser(subparsers)\n    add_run_subparser(subparsers)\n    add_scan_subparser(subparsers)\n    add_convert_subparser(subparsers)\n    add_aot_compile_cpu_subparser(subparsers)\n    add_freeze_model_subparser(subparsers)\n    return parser", "docstring": "Creates a parser that parse the command line arguments.\n\nReturns:\nA namespace parsed from command line arguments.", "source": "github-repos"}
{"code": "def _HasExpired(self, key):\n    self.logger.debug('Processing key: %s.', key)\n    try:\n        (schema, json_str) = key.split(None, 3)[2:]\n    except (ValueError, AttributeError):\n        self.logger.debug('No schema identifier. Not expiring key.')\n        return False\n    if (schema != 'google-ssh'):\n        self.logger.debug('Invalid schema %s. Not expiring key.', schema)\n        return False\n    try:\n        json_obj = json.loads(json_str)\n    except ValueError:\n        self.logger.debug('Invalid JSON %s. Not expiring key.', json_str)\n        return False\n    if ('expireOn' not in json_obj):\n        self.logger.debug('No expiration timestamp. Not expiring key.')\n        return False\n    expire_str = json_obj['expireOn']\n    format_str = '%Y-%m-%dT%H:%M:%S+0000'\n    try:\n        expire_time = datetime.datetime.strptime(expire_str, format_str)\n    except ValueError:\n        self.logger.warning('Expiration timestamp \"%s\" not in format %s. Not expiring key.', expire_str, format_str)\n        return False\n    return (datetime.datetime.utcnow() > expire_time)", "docstring": "Check whether an SSH key has expired.\n\nUses Google-specific semantics of the OpenSSH public key format's comment\nfield to determine if an SSH key is past its expiration timestamp, and\ntherefore no longer to be trusted. This format is still subject to change.\nReliance on it in any way is at your own risk.\n\nArgs:\nkey: string, a single public key entry in OpenSSH public key file format.\nThis will be checked for Google-specific comment semantics, and if\npresent, those will be analysed.\n\nReturns:\nbool, True if the key has Google-specific comment semantics and has an\nexpiration timestamp in the past, or False otherwise.", "source": "codesearchnet"}
{"code": "def _parse_networks(service_list: dict) -> list:\n        \n        \n        networks = []\n\n        for n_values in service_list['networks'].values():\n            for n_key, n_value in n_values.items():\n                if 'name' in n_key:\n                    networks.append(n_value)\n        return networks", "docstring": "Parse network key.\n\nArgs:\nservice_list (dict): Service configurations\n\nReturns:\nlist, List of networks", "source": "juraj-google-style"}
{"code": "def _get_edge_sentences(\n    G: AnalysisGraph, source: str, target: str\n) -> List[str]:\n    \n\n    return chain.from_iterable(\n        [\n            [repr(e.text) for e in s.evidence]\n            for s in G.edges[source, target][\"InfluenceStatements\"]\n        ]\n    )", "docstring": "Return the sentences that led to the construction of a specified edge.\n\nArgs:\nG\nsource: The source of the edge.\ntarget: The target of the edge.", "source": "juraj-google-style"}
{"code": "def calc_transition_to_state(self, newstate):\n    cached_val = JTAGStateMachine._lookup_cache.get((self.state, newstate))\n    if cached_val:\n        return cached_val\n    if (newstate not in self.states):\n        raise ValueError(('%s is not a valid state for this state machine' % newstate))\n    path = self._find_shortest_path(self._statestr, newstate)\n    if (not path):\n        raise ValueError('No path to the requested state.')\n    res = self._get_steps_from_nodes_path(path)\n    res.reverse()\n    JTAGStateMachine._lookup_cache[(self.state, newstate)] = res\n    return res", "docstring": "Given a target state, generate the sequence of transitions that would move this state machine instance to that target state.\n\nArgs:\nnewstate: A str state name to calculate the path to.\n\nReturns:\nA bitarray containing the bits that would transition this\nstate machine to the target state. The bits read from right\nto left. For efficiency, this retulting bitarray is cached.\nDo not edit this bitarray, or it will cause undefined\nbehavior.", "source": "codesearchnet"}
{"code": "def gates_to_uncompute(self):\n    q = QuantumRegister(self.num_qubits)\n    circuit = QuantumCircuit(q, name='disentangler')\n    remaining_param = self.params\n    for i in range(self.num_qubits):\n        (remaining_param, thetas, phis) = Initialize._rotations_to_disentangle(remaining_param)\n        rz_mult = self._multiplex(RZGate, phis)\n        ry_mult = self._multiplex(RYGate, thetas)\n        circuit.append(rz_mult.to_instruction(), q[i:self.num_qubits])\n        circuit.append(ry_mult.to_instruction(), q[i:self.num_qubits])\n    return circuit", "docstring": "Call to create a circuit with gates that take the\ndesired vector to zero.\n\nReturns:\nQuantumCircuit: circuit to take self.params vector to |00..0>", "source": "codesearchnet"}
{"code": "def setSingleStep(self, singleStep):\n        \n        if not isinstance(singleStep, int):\n            raise TypeError(\"Argument is not of type int\")\n        \n        self._singleStep = abs(singleStep)\n        return self._singleStep", "docstring": "setter to _singleStep. converts negativ values to positiv ones.\n\nArgs:\nsingleStep (int): new _singleStep value. converts negativ values to positiv ones.\n\nRaises:\nTypeError: If the given argument is not an integer.\n\nReturns:\nint or long: the absolute value of the given argument.", "source": "juraj-google-style"}
{"code": "def put(self, block_id, priority, pb_type='offline'):\n    if (pb_type not in ('offline', 'realtime')):\n        raise ValueError('Invalid PB type.')\n    with self._mutex:\n        added_time = datetime.datetime.utcnow().isoformat()\n        entry = (priority, (sys.maxsize - self._index), block_id, pb_type, added_time)\n        self._index += 1\n        if (self._block_map.get(block_id) is not None):\n            raise KeyError('ERROR: Block id \"{}\" already exists in PC PB queue!'.format(block_id))\n        self._block_map[block_id] = entry\n        LOG.debug('Adding PB %s to queue', block_id)\n        self._queue.append(entry)\n        self._queue.sort()\n        self._queue.reverse()", "docstring": "Add a Processing Block to the queue.\n\nWhen a new entry it added, the queue is (re-)sorted by priority\nfollowed by insertion order (older blocks with equal priority are\nfirst).\n\nArgs:\nblock_id (str): Processing Block Identifier\npriority (int): Processing Block scheduling priority\n(higher values = higher priority)\npb_type (str): Processing Block type (offline, realtime)", "source": "codesearchnet"}
{"code": "def rental_report(self, address, zipcode, format_type=\"json\"):\n        \n\n        \n        query_params = {\n            \"format\": format_type,\n            \"address\": address,\n            \"zipcode\": zipcode\n        }\n\n        return self._api_client.fetch_synchronous(\"property/rental_report\", query_params)", "docstring": "Call the rental_report component\n\nRental Report only supports a single address.\n\nArgs:\n- address\n- zipcode\n\nKwargs:\n- format_type - \"json\", \"xlsx\" or \"all\". Default is \"json\".", "source": "juraj-google-style"}
{"code": "def get_user_info(self, token):\n    url = self.get_user_info_url()\n    try:\n        headers = {'Authorization': 'Bearer {}'.format(token)}\n        response = requests.get(url, headers=headers)\n    except requests.RequestException:\n        logger.exception('Failed to retrieve user info due to a request exception.')\n        raise UserInfoRetrievalFailed\n    if (response.status_code == 200):\n        return self.process_user_info_response(response.json())\n    else:\n        msg = 'Failed to retrieve user info. Server [{server}] responded with status [{status}].'.format(server=url, status=response.status_code)\n        raise UserInfoRetrievalFailed(msg)", "docstring": "Retrieves the user info from the OAuth provider.\n\nArguments:\ntoken (str): OAuth2 access token.\n\nReturns:\ndict\n\nRaises:\nUserInfoRetrievalFailed: Retrieval of user info from the remote server failed.", "source": "codesearchnet"}
{"code": "def get_key_counter_alg(seed, alg):\n    if alg is None:\n        alg = Algorithm.AUTO_SELECT.value\n    alg = convert_alg_to_int(alg)\n    key, counter = _get_key_counter(seed, alg)\n    return (key, counter, alg)", "docstring": "Calculates the key, counter and algorithm to pass to raw RNG ops.\n\nThis function calculates the key and counter, and determines the algorithm\nthat will be passed to the raw RNG ops like `StatelessRandomUniformV2`.\nDepending on the input `alg`, the key and counter may be scrambled or copied\nfrom `seed`. If `alg` is `\"auto_select\"`, the key and counter will be\ndetermined at runtime based on device type.\n\nArgs:\nseed: An integer tensor of shape [2]. The seed to calculate the key and\ncounter from.\nalg: The RNG algorithm. See `tf.random.stateless_uniform` for an\nexplanation.\n\nReturns:\nA pair (key, counter, algorithm) suitable for V2 stateless RNG ops like\n`StatelessRandomUniformV2`.", "source": "github-repos"}
{"code": "def console_set_char_foreground(con: tcod.console.Console, x: int, y: int, col: Tuple[(int, int, int)]) -> None:\n    lib.TCOD_console_set_char_foreground(_console(con), x, y, col)", "docstring": "Change the foreground color of x,y to col.\n\nArgs:\ncon (Console): Any Console instance.\nx (int): Character x position from the left.\ny (int): Character y position from the top.\ncol (Union[Tuple[int, int, int], Sequence[int]]):\nAn (r, g, b) sequence or Color instance.\n\n.. deprecated:: 8.4\nArray access performs significantly faster than using this function.\nSee :any:`Console.fg`.", "source": "codesearchnet"}
{"code": "def append(self, text, afterline=None):\n    if afterline:\n        self._vim.current.buffer.append(text, afterline)\n    else:\n        self._vim.current.buffer.append(text)", "docstring": "Append text to the current buffer.\n\nArgs:\ntext (str or Sequence[str]): One or many lines of text to append.\nafterline (Optional[int]):\nLine number to append after. If 0, text is prepended before the\nfirst line; if ``None``, at end of the buffer.", "source": "codesearchnet"}
{"code": "def import_project(self, file, path, namespace=None, overwrite=False, override_params=None, **kwargs):\n    files = {'file': ('file.tar.gz', file)}\n    data = {'path': path, 'overwrite': overwrite}\n    if override_params:\n        for (k, v) in override_params.items():\n            data[('override_params[%s]' % k)] = v\n    if namespace:\n        data['namespace'] = namespace\n    return self.gitlab.http_post('/projects/import', post_data=data, files=files, **kwargs)", "docstring": "Import a project from an archive file.\n\nArgs:\nfile: Data or file object containing the project\npath (str): Name and path for the new project\nnamespace (str): The ID or path of the namespace that the project\nwill be imported to\noverwrite (bool): If True overwrite an existing project with the\nsame path\noverride_params (dict): Set the specific settings for the project\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabListError: If the server failed to perform the request\n\nReturns:\ndict: A representation of the import status.", "source": "codesearchnet"}
{"code": "def create_from_json(cls, json_data):\n    prop = Property()\n    address_info = json_data['address_info']\n    prop.address = address_info['address']\n    prop.block_id = address_info['block_id']\n    prop.zipcode = address_info['zipcode']\n    prop.zipcode_plus4 = address_info['zipcode_plus4']\n    prop.address_full = address_info['address_full']\n    prop.city = address_info['city']\n    prop.county_fips = address_info['county_fips']\n    prop.geo_precision = address_info['geo_precision']\n    prop.lat = address_info['lat']\n    prop.lng = address_info['lng']\n    prop.slug = address_info['slug']\n    prop.state = address_info['state']\n    prop.unit = address_info['unit']\n    prop.meta = None\n    if ('meta' in json_data):\n        prop.meta = json_data['meta']\n    prop.component_results = _create_component_results(json_data, 'address_info')\n    return prop", "docstring": "Deserialize property json data into a Property object\n\nArgs:\njson_data (dict): The json data for this property\n\nReturns:\nProperty object", "source": "codesearchnet"}
{"code": "def object_upload(self, bucket, key, content, content_type):\n    args = {'uploadType': 'media', 'name': key}\n    headers = {'Content-Type': content_type}\n    url = (Api._UPLOAD_ENDPOINT + (Api._OBJECT_PATH % (bucket, '')))\n    return google.datalab.utils.Http.request(url, args=args, data=content, headers=headers, credentials=self._credentials, raw_response=True)", "docstring": "Writes text content to the object.\n\nArgs:\nbucket: the name of the bucket containing the object.\nkey: the key of the object to be written.\ncontent: the text content to be written.\ncontent_type: the type of text content.\nRaises:\nException if the object could not be written to.", "source": "codesearchnet"}
{"code": "def set_aliases_and_defaults(self, aliases_config=None, default_properties=None):\n    if (aliases_config is None):\n        with open(os.path.join(os.path.dirname(__file__), 'aliases.json')) as f:\n            d = json.load(f)\n            self.aliases = d.get('aliases', {})\n            self.default_criteria = d.get('defaults', {})\n    else:\n        self.aliases = aliases_config.get('aliases', {})\n        self.default_criteria = aliases_config.get('defaults', {})\n    if (default_properties is None):\n        (self._default_props, self._default_prop_dict) = (None, None)\n    else:\n        (self._default_props, self._default_prop_dict) = self._parse_properties(default_properties)", "docstring": "Set the alias config and defaults to use. Typically used when\nswitching to a collection with a different schema.\n\nArgs:\naliases_config:\nAn alias dict to use. Defaults to None, which means the default\naliases defined in \"aliases.json\" is used. See constructor\nfor format.\ndefault_properties:\nList of property names (strings) to use by default, if no\nproperties are given to the 'properties' argument of\nquery().", "source": "codesearchnet"}
{"code": "def run(self, verbose=True):\n        \n        self.results.clear()\n\n        for analysis_group in self.config.analysis_groups:\n            if analysis_group.providers:\n                for provider in analysis_group.providers:\n                    logger.info('Run provider %s', provider.identifier)\n                    provider.run()\n                    for checker in analysis_group.checkers:\n                        result = self._get_checker_result(\n                            analysis_group, checker, provider)\n                        self.results.append(result)\n                        analysis_group.results.append(result)\n                        if verbose:\n                            result.print()\n            else:\n                for checker in analysis_group.checkers:\n                    result = self._get_checker_result(\n                        analysis_group, checker, nd='no-data-')\n                    self.results.append(result)\n                    analysis_group.results.append(result)\n                    if verbose:\n                        result.print()", "docstring": "Run the analysis.\n\nGenerate data from each provider, then check these data with every\nchecker, and store the analysis results.\n\nArgs:\nverbose (bool): whether to immediately print the results or not.", "source": "juraj-google-style"}
{"code": "def _get_transformers(self):\n    transformer_dict = {}\n    for table in self.metadata['tables']:\n        table_name = table['name']\n        for field in table['fields']:\n            transformer_type = field.get('type')\n            if transformer_type:\n                col_name = field['name']\n                transformer_dict[(table_name, col_name)] = transformer_type\n    return transformer_dict", "docstring": "Load the contents of meta_file and extract information about the transformers.\n\nReturns:\ndict: tuple(str, str) -> Transformer.", "source": "codesearchnet"}
{"code": "def order(self, image_catalog_ids, batch_size=100, callback=None):\n\n    def _order_single_batch(url_, ids, results_list):\n        data = (json.dumps(ids) if (callback is None) else json.dumps({'acquisitionIds': ids, 'callback': callback}))\n        r = self.gbdx_connection.post(url_, data=data)\n        r.raise_for_status()\n        order_id = r.json().get('order_id')\n        if order_id:\n            results_list.append(order_id)\n    self.logger.debug('Place order')\n    url = (('%s/order' if (callback is None) else '%s/ordercb') % self.base_url)\n    batch_size = min(100, batch_size)\n    if (not isinstance(image_catalog_ids, list)):\n        image_catalog_ids = [image_catalog_ids]\n    sanitized_ids = list(set((id for id in (_id.strip() for _id in image_catalog_ids) if id)))\n    res = []\n    acq_ids_by_batch = zip(*([iter(sanitized_ids)] * batch_size))\n    for ids_batch in acq_ids_by_batch:\n        _order_single_batch(url, ids_batch, res)\n    remain_count = (len(sanitized_ids) % batch_size)\n    if (remain_count > 0):\n        _order_single_batch(url, sanitized_ids[(- remain_count):], res)\n    if (len(res) == 1):\n        return res[0]\n    elif (len(res) > 1):\n        return res", "docstring": "Orders images from GBDX.\n\nArgs:\nimage_catalog_ids (str or list): A single catalog id or a list of\ncatalog ids.\nbatch_size (int): The image_catalog_ids will be split into\nbatches of batch_size. The ordering API max\nbatch size is 100, if batch_size is greater\nthan 100 it will be truncated.\ncallback (str): A url to call when ordering is completed.\n\nReturns:\norder_ids (str or list): If one batch, returns a string. If more\nthan one batch, returns a list of order ids,\none for each batch.", "source": "codesearchnet"}
{"code": "def _apply_gradients_and_copy(self, opt, raw_grad_list, ps_var_grads):\n    with tf.name_scope('apply_gradients'):\n        var_update_ops = []\n        for (vid, (g, v)) in enumerate(ps_var_grads):\n            apply_gradient_op = opt.apply_gradients([(g, v)])\n            barrier = self._add_sync_queues_and_barrier('param_update_barrier_{}'.format(vid), [apply_gradient_op])\n            with tf.control_dependencies([barrier]), tf.device(self.cpu_device):\n                updated_value = v.read_value()\n                for towerid in range(self.nr_gpu):\n                    var_update_ops.append(raw_grad_list[towerid][vid][1].assign(updated_value))\n        return var_update_ops", "docstring": "Apply averaged gradients to ps vars, and then copy the updated\nvariables back to each tower.\n\nArgs:\nraw_grad_list: Ngpu x Nvar x 2 gradient list from all towers\nps_var_grads: Nvar x 2 (grad, ps_var)\n\nReturns:\nlist of copy ops", "source": "codesearchnet"}
{"code": "def call(self, inputs, training=None, mask=None):\n    return self._run_internal_graph(inputs, training=training, mask=mask)", "docstring": "Calls the model on new inputs.\n\nIn this case `call` just reapplies\nall ops in the graph to the new inputs\n(e.g. build a new computational graph from the provided inputs).\n\nArgs:\ninputs: A tensor or list of tensors.\ntraining: Boolean or boolean scalar tensor, indicating whether to run\nthe `Network` in training mode or inference mode.\nmask: A mask or list of masks. A mask can be\neither a tensor or None (no mask).\n\nReturns:\nA tensor if there is a single output, or\na list of tensors if there are more than one outputs.", "source": "github-repos"}
{"code": "def checkUser(self, user):\n    return (not self.conn('POST', '{0}/GetCredentialType.srf'.format(SkypeConnection.API_MSACC), json={'username': user}).json().get('IfExistsResult'))", "docstring": "Query a username or email address to see if a corresponding Microsoft account exists.\n\nArgs:\nuser (str): username or email address of an account\n\nReturns:\nbool: whether the account exists", "source": "codesearchnet"}
{"code": "def get_videos_for_course(course_id, sort_field=None, sort_dir=SortDirection.asc, pagination_conf=None):\n    return _get_videos_for_filter({'courses__course_id': six.text_type(course_id), 'courses__is_hidden': False}, sort_field, sort_dir, pagination_conf)", "docstring": "Returns an iterator of videos for the given course id.\n\nArgs:\ncourse_id (String)\nsort_field (VideoSortField)\nsort_dir (SortDirection)\n\nReturns:\nA generator expression that contains the videos found, sorted by the\ngiven field and direction, with ties broken by edx_video_id to ensure a\ntotal order.", "source": "codesearchnet"}
{"code": "def get(cls):\n    if cls.is_twoconspect:\n        return (cls.subconspect_el.value or None)\n    input_value = cls.input_el.value.strip()\n    if (not input_value):\n        return None\n    mdt = conspectus.mdt_by_name.get(input_value)\n    if (not mdt):\n        alert(('Invalid sub-conspect `%s`!' % input_value))\n        return None\n    return mdt", "docstring": "Get code selected by user.\n\nReturns:\nstr: Code or None in case that user didn't selected anything yet.", "source": "codesearchnet"}
{"code": "def lookup_descriptor(self, definition_name):\n        \n        try:\n            return self.__descriptors[definition_name]\n        except KeyError:\n            pass\n\n        if self.__descriptor_loader:\n            definition = self.__descriptor_loader(definition_name)\n            self.__descriptors[definition_name] = definition\n            return definition\n        else:\n            raise messages.DefinitionNotFoundError(\n                'Could not find definition for %s' % definition_name)", "docstring": "Lookup descriptor by name.\n\nGet descriptor from library by name.  If descriptor is not found will\nattempt to find via descriptor loader if provided.\n\nArgs:\ndefinition_name: Definition name to find.\n\nReturns:\nDescriptor that describes definition name.\n\nRaises:\nDefinitionNotFoundError if not descriptor exists for definition name.", "source": "juraj-google-style"}
{"code": "def get(account):\n        \n        account = Account.get(account)\n        if not account:\n            return None\n\n        acct_type = AccountType.get(account.account_type_id).account_type\n        account_class = get_plugin_by_name(PLUGIN_NAMESPACES['accounts'], acct_type)\n\n        return account_class(account)", "docstring": "Returns the class object identified by `account_id`\n\nArgs:\naccount (`int`, `str`): Unique ID of the account to load from database\n\nReturns:\n`Account` object if found, else None", "source": "juraj-google-style"}
{"code": "def Sleep(self, seconds):\n    time.sleep((seconds - int(seconds)))\n    for _ in range(int(seconds)):\n        time.sleep(1)\n        if (self.GetMemoryUsage() > self.memory_quota):\n            raise MemoryError('Exceeded memory allowance.')\n        if (not self.running):\n            break", "docstring": "Sleep a given time in 1 second intervals.\n\nWhen a machine is suspended during a time.sleep(n) call for more\nthan n seconds, sometimes the sleep is interrupted and all threads\nwake up at the same time. This leads to race conditions between\nthe threads issuing the heartbeat and the one checking for it. By\nsleeping in small intervals, we make sure that even if one sleep\ncall is interrupted, we do not check for the heartbeat too early.\n\nArgs:\nseconds: Number of seconds to sleep.\n\nRaises:\nMemoryError: if the process exceeds memory quota.", "source": "codesearchnet"}
{"code": "def guess_settings(self, major, minor):\n        \n\n        version = major, minor\n\n        if self.vbr_method == 2:\n            if version in ((3, 90), (3, 91), (3, 92)) and self.encoding_flags:\n                if self.bitrate < 255:\n                    return u\"--alt-preset %d\" % self.bitrate\n                else:\n                    return u\"--alt-preset %d+\" % self.bitrate\n            if self.preset_used != 0:\n                return u\"--preset %d\" % self.preset_used\n            elif self.bitrate < 255:\n                return u\"--abr %d\" % self.bitrate\n            else:\n                return u\"--abr %d+\" % self.bitrate\n        elif self.vbr_method == 1:\n            if self.preset_used == 0:\n                if self.bitrate < 255:\n                    return u\"-b %d\" % self.bitrate\n                else:\n                    return u\"-b 255+\"\n            elif self.preset_used == 1003:\n                return u\"--preset insane\"\n            return u\"-b %d\" % self.preset_used\n        elif version in ((3, 90), (3, 91), (3, 92)):\n            preset_key = (self.vbr_quality, self.quality, self.vbr_method,\n                          self.lowpass_filter, self.ath_type)\n            if preset_key == (1, 2, 4, 19500, 3):\n                return u\"--preset r3mix\"\n            if preset_key == (2, 2, 3, 19000, 4):\n                return u\"--alt-preset standard\"\n            if preset_key == (2, 2, 3, 19500, 2):\n                return u\"--alt-preset extreme\"\n\n            if self.vbr_method == 3:\n                return u\"-V %s\" % self.vbr_quality\n            elif self.vbr_method in (4, 5):\n                return u\"-V %s --vbr-new\" % self.vbr_quality\n        elif version in ((3, 93), (3, 94), (3, 95), (3, 96), (3, 97)):\n            if self.preset_used == 1001:\n                return u\"--preset standard\"\n            elif self.preset_used == 1002:\n                return u\"--preset extreme\"\n            elif self.preset_used == 1004:\n                return u\"--preset fast standard\"\n            elif self.preset_used == 1005:\n                return u\"--preset fast extreme\"\n            elif self.preset_used == 1006:\n                return u\"--preset medium\"\n            elif self.preset_used == 1007:\n                return u\"--preset fast medium\"\n\n            if self.vbr_method == 3:\n                return u\"-V %s\" % self.vbr_quality\n            elif self.vbr_method in (4, 5):\n                return u\"-V %s --vbr-new\" % self.vbr_quality\n        elif version == (3, 98):\n            if self.vbr_method == 3:\n                return u\"-V %s --vbr-old\" % self.vbr_quality\n            elif self.vbr_method in (4, 5):\n                return u\"-V %s\" % self.vbr_quality\n        elif version >= (3, 99):\n            if self.vbr_method == 3:\n                return u\"-V %s --vbr-old\" % self.vbr_quality\n            elif self.vbr_method in (4, 5):\n                p = self.vbr_quality\n                adjust_key = (p, self.bitrate, self.lowpass_filter)\n                \n                p = {\n                    (5, 32, 0): 7,\n                    (5, 8, 0): 8,\n                    (6, 8, 0): 9,\n                }.get(adjust_key, p)\n                return u\"-V %s\" % p\n\n        return u\"\"", "docstring": "Gives a guess about the encoder settings used. Returns an empty\nstring if unknown.\n\nThe guess is mostly correct in case the file was encoded with\nthe default options (-V --preset --alt-preset --abr -b etc) and no\nother fancy options.\n\nArgs:\nmajor (int)\nminor (int)\nReturns:\ntext", "source": "juraj-google-style"}
{"code": "def get_readrows_iterator(bq_read_client: BigQueryReadClient, table_metadata: TableMetadata, columns: Iterable[str] | None=None, data_format: DataFormat=DataFormat.AVRO) -> Iterable[Mapping]:\n    requested_session = ReadSession(table=table_metadata.table_path, data_format=data_format.value, read_options={'selected_fields': columns})\n    session = bq_read_client.create_read_session(parent=f'projects/{table_metadata.project_id}', read_session=requested_session, max_stream_count=1)\n    stream_name = session.streams[0].name\n    reader = bq_read_client.read_rows(stream_name)\n    rows = reader.rows(session)\n    return cast(Iterable[Mapping], rows)", "docstring": "Get an Iterator of row Mappings with the requested columns of the table,\nusing an authenticated BigQuery Storage API client.\n\nNote: Does NOT support nested columns.\n\nArgs:\n* bq_read_client: BigQuery Storage API Read client\n* table_metadata: TableMetadata object\n* columns (optional): List of columns to select\n* data_format: Format to fetch data in, one of:\n* DataFormat.AVRO\n* DataFormat.ARROW\n\nDefaults:\n* columns: None, i.e. select all columns\n* data_format: AVRO, since it auto-parses to Dict\n\nReturns:\n* Iterator of row Mappings", "source": "github-repos"}
{"code": "def complete(self, stream):\n            \n            assert not self.is_complete()\n            self._marker.addInputPort(outputPort=stream.oport)\n            self.stream.oport.schema = stream.oport.schema\n            \n            \n            \n            self._pending_schema._set(self.stream.oport.schema)\n\n            \n            \n            stream.oport.operator._start_op = True", "docstring": "Complete the pending stream.\n\nAny connections made to :py:attr:`stream` are connected to `stream` once\nthis method returns.\n\nArgs:\nstream(Stream): Stream that completes the connection.", "source": "juraj-google-style"}
{"code": "def add_multiple_to_queue(self, items, container=None):\n    if (container is not None):\n        container_uri = container.resources[0].uri\n        container_metadata = to_didl_string(container)\n    else:\n        container_uri = ''\n        container_metadata = ''\n    chunk_size = 16\n    item_list = list(items)\n    for index in range(0, len(item_list), chunk_size):\n        chunk = item_list[index:(index + chunk_size)]\n        uris = ' '.join([item.resources[0].uri for item in chunk])\n        uri_metadata = ' '.join([to_didl_string(item) for item in chunk])\n        self.avTransport.AddMultipleURIsToQueue([('InstanceID', 0), ('UpdateID', 0), ('NumberOfURIs', len(chunk)), ('EnqueuedURIs', uris), ('EnqueuedURIsMetaData', uri_metadata), ('ContainerURI', container_uri), ('ContainerMetaData', container_metadata), ('DesiredFirstTrackNumberEnqueued', 0), ('EnqueueAsNext', 0)])", "docstring": "Add a sequence of items to the queue.\n\nArgs:\nitems (list): A sequence of items to the be added to the queue\ncontainer (DidlObject, optional): A container object which\nincludes the items.", "source": "codesearchnet"}
{"code": "def feed(self, data_len, feed_time=None):\n        \n        self._bytes_transferred += data_len\n        self._collected_bytes_transferred += data_len\n\n        time_now = feed_time or time.time()\n        time_diff = time_now - self._last_feed_time\n\n        if time_diff < self._sample_min_time:\n            return\n\n        self._last_feed_time = time.time()\n\n        if data_len == 0 and time_diff >= self._stall_time:\n            self._stalled = True\n            return\n\n        self._samples.append((time_diff, self._collected_bytes_transferred))\n        self._collected_bytes_transferred = 0", "docstring": "Update the bandwidth meter.\n\nArgs:\ndata_len (int): The number of bytes transfered since the last\ncall to :func:`feed`.\nfeed_time (float): Current time.", "source": "juraj-google-style"}
{"code": "def user_lists(self, username, member_type=\"USER\"):\n        \n        return self.client.service.getUserLists(username, member_type, self.proxy_id)", "docstring": "Look up all the lists that the user is a member of.\n\nArgs:\nusername (str): The MIT username of the user\nmember_type(str): The type of user, \"USER\" or \"STRING\"\n\nReturns:\nlist of strings: names of the lists that this user is a member of", "source": "juraj-google-style"}
{"code": "def ask_for_approval(full_changeset=None, params_diff=None,\n                     include_verbose=False):\n    \n    approval_options = ['y', 'n']\n    if include_verbose:\n        approval_options.append('v')\n\n    approve = ui.ask(\"Execute the above changes? [{}] \".format(\n        '/'.join(approval_options))).lower()\n\n    if include_verbose and approve == \"v\":\n        if params_diff:\n            logger.info(\n                \"Full changeset:\\n\\n%s\\n%s\",\n                format_params_diff(params_diff),\n                yaml.safe_dump(full_changeset),\n            )\n        else:\n            logger.info(\n                \"Full changeset:\\n%s\",\n                yaml.safe_dump(full_changeset),\n            )\n        return ask_for_approval()\n    elif approve != \"y\":\n        raise exceptions.CancelExecution", "docstring": "Prompt the user for approval to execute a change set.\n\nArgs:\nfull_changeset (list, optional): A list of the full changeset that will\nbe output if the user specifies verbose.\nparams_diff (list, optional): A list of DictValue detailing the\ndifferences between two parameters returned by\n:func:`stacker.actions.diff.diff_dictionaries`\ninclude_verbose (bool, optional): Boolean for whether or not to include\nthe verbose option", "source": "juraj-google-style"}
{"code": "def generate_string(self, initial_logits, initial_state, sequence_length):\n    \n\n    current_logits = initial_logits\n    current_state = initial_state\n\n    generated_letters = []\n    for _ in range(sequence_length):\n      \n      char_index = tf.squeeze(tf.multinomial(current_logits, 1))\n      char_one_hot = tf.one_hot(char_index, self._output_size, 1.0, 0.0)\n      generated_letters.append(char_one_hot)\n\n      \n      gen_out_seq, current_state = self._core(\n          tf.nn.relu(self._embed_module(char_one_hot)),\n          current_state)\n      current_logits = self._output_module(gen_out_seq)\n\n    generated_string = tf.stack(generated_letters)\n\n    return generated_string", "docstring": "Builds sub-graph to generate a string, sampled from the model.\n\nArgs:\ninitial_logits: Starting logits to sample from.\ninitial_state: Starting state for the RNN core.\nsequence_length: Number of characters to sample.\n\nReturns:\nA Tensor of characters, with dimensions `[sequence_length, batch_size,\noutput_size]`.", "source": "juraj-google-style"}
{"code": "def testDefaultBoundaryConditionsWithInnerTerm(self, default_bc):\n\n    def second_order_coeff_fn(t, coord_grid):\n        del t\n        x = coord_grid[0]\n        return [[-(-x ** 3 + x)]]\n\n    def first_order_coeff_fn(t, coord_grid):\n        del t\n        x = coord_grid[0]\n        return [1 + x]\n\n    def inner_first_order_coeff_fn(t, coord_grid):\n        del t\n        x = coord_grid[0]\n        return [-x ** 2 + 1]\n\n    @dirichlet\n    def lower_boundary_fn(t, x):\n        del x\n        return tf.math.exp(t)\n\n    @dirichlet\n    def upper_boundary_fn(t, x):\n        del x\n        return tf.math.exp(1.0 + t)\n\n    def zeroth_order_coeff_fn(t, coord_grid):\n        del t\n        x = coord_grid[0]\n        return 2 * x ** 2 - 1 + 2 * x - (1 - x ** 2)\n    grid = self.evaluate(grids.uniform_grid(minimums=[0], maximums=[1], sizes=[100], dtype=np.float64))\n    initial_values = tf.math.exp(grid[0])\n    time_step = 0.001\n    final_t = 0.1\n    if default_bc == 'left':\n        boundary_conditions = [(None, upper_boundary_fn)]\n    elif default_bc == 'right':\n        boundary_conditions = [(lower_boundary_fn, None)]\n    else:\n        boundary_conditions = [(None, None)]\n    est_values = fd_solvers.solve_forward(start_time=0, end_time=final_t, coord_grid=grid, values_grid=initial_values, time_step=time_step, boundary_conditions=boundary_conditions, second_order_coeff_fn=second_order_coeff_fn, first_order_coeff_fn=first_order_coeff_fn, inner_first_order_coeff_fn=inner_first_order_coeff_fn, zeroth_order_coeff_fn=zeroth_order_coeff_fn)[0]\n    true_values = tf.math.exp(final_t + grid[0])\n    self.assertAllClose(est_values, true_values, atol=0.01, rtol=0.01)", "docstring": "Test for PDE with default boundary condition with inner term.\n\nTake equation\n`u_{t} - (x - x**3)[u]_{xx} + (1 + x) * [(1 - x**2) u]_{x}\n+ (2 * x**2 - 1 + 2 *x - (1 - x**2))u = 0` with\nboundary conditions `u_{t} + (x - 1) u_{x} = 0` at x = 0\nand `u(t, 1) = exp(t + 1)`, and an initial condition `u(0, x) = exp(x)`.\n\nSolve this equation and compare the result to `u(t, x) = exp(t + x)`.\n\nArgs:\ndefault_bc: A string to indicate which boundary condition is 'default'.\nCan be either 'left', 'right', or 'both'.", "source": "github-repos"}
{"code": "def close_stream_transport(self, stream_transport, timeout):\n    with self._stream_transport_map_lock:\n        if (stream_transport.local_id in self._stream_transport_map):\n            del self._stream_transport_map[stream_transport.local_id]\n            if stream_transport.remote_id:\n                self.transport.write_message(adb_message.AdbMessage('CLSE', stream_transport.local_id, stream_transport.remote_id), timeout)\n            return True\n    return False", "docstring": "Remove the given stream transport's id from our map of id's.\n\nIf the stream id is actually removed, we send a CLSE message to let the\nremote end know (this happens when we are ack'ing a CLSE message we\nreceived).  The ADB protocol doesn't say this is a requirement, but ADB\ndoes it, so we do too.\n\nArgs:\nstream_transport: The stream transport to close.\ntimeout: Timeout on the operation.\n\nReturns:\nTrue if the id was removed and message sent, False if it was already\nmissing from the stream map (already closed).", "source": "codesearchnet"}
{"code": "def _check_mr_state(cls, state, mr_id):\n    if (state is None):\n        logging.warning('Mapreduce State for job %s is missing. Dropping Task.', mr_id)\n        return False\n    if (not state.active):\n        logging.warning('Mapreduce %s is not active. Looks like spurious task execution. Dropping Task.', mr_id)\n        return False\n    return True", "docstring": "Check MapreduceState.\n\nArgs:\nstate: an MapreduceState instance.\nmr_id: mapreduce id.\n\nReturns:\nTrue if state is valid. False if not and this task should be dropped.", "source": "codesearchnet"}
{"code": "def get_compression_type_string(cls, options):\n    if not options:\n        return ''\n    elif isinstance(options, TFRecordOptions):\n        return cls.get_compression_type_string(options.compression_type)\n    elif isinstance(options, TFRecordCompressionType):\n        return cls.compression_type_map[options]\n    elif options in TFRecordOptions.compression_type_map:\n        return cls.compression_type_map[options]\n    elif options in TFRecordOptions.compression_type_map.values():\n        return options\n    else:\n        raise ValueError('Not a valid compression_type: \"{}\"'.format(options))", "docstring": "Convert various option types to a unified string.\n\nArgs:\noptions: `TFRecordOption`, `TFRecordCompressionType`, or string.\n\nReturns:\nCompression type as string (e.g. `'ZLIB'`, `'GZIP'`, or `''`).\n\nRaises:\nValueError: If compression_type is invalid.", "source": "github-repos"}
{"code": "def _render_trajectories(self,\n            trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array]) -> None:\n        \n        if self._verbose:\n            non_fluents, initial_state, states, actions, interms, rewards = trajectories\n            shape = states[0][1].shape\n            batch_size, horizon, = shape[0], shape[1]\n            states = [(s[0], s[1][0]) for s in states]\n            interms = [(f[0], f[1][0]) for f in interms]\n            actions = [(a[0], a[1][0]) for a in actions]\n            rewards = np.reshape(rewards, [batch_size, horizon])[0]\n            self._render_batch(non_fluents, states, actions, interms, rewards)", "docstring": "Prints the first batch of simulated `trajectories`.\n\nArgs:\ntrajectories: NonFluents, states, actions, interms and rewards.", "source": "juraj-google-style"}
{"code": "def is_supported(cls, desc):\n        \n        for m in cls:\n            if m.matches(desc):\n                return True\n        return False", "docstring": "Determines if the given metric descriptor is supported.\n\nArgs:\ndesc (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricDescriptor`): the\nmetric descriptor to test\n\nReturn:\n`True` if desc is supported, otherwise `False`", "source": "juraj-google-style"}
{"code": "def make_group_index(self, groupby_cols, bool_arr):\n    (factor_list, values_list) = self.factorize_groupby_cols(groupby_cols)\n    if (len(factor_list) == 0):\n        tmp_rootdir = self.create_tmp_rootdir()\n        carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')\n        carray_values = ['Total']\n    elif (len(factor_list) == 1):\n        carray_factor = factor_list[0]\n        carray_values = values_list[0]\n    elif self.group_cache_valid(col_list=groupby_cols):\n        col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))\n        col_factor_rootdir = (col_rootdir + '.factor')\n        carray_factor = bcolz.carray(rootdir=col_factor_rootdir)\n        col_values_rootdir = (col_rootdir + '.values')\n        carray_values = bcolz.carray(rootdir=col_values_rootdir)\n    else:\n        (carray_factor, carray_values) = self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)\n    nr_groups = len(carray_values)\n    skip_key = None\n    if (bool_arr is not None):\n        tmp_rootdir = self.create_tmp_rootdir()\n        carray_factor = bcolz.eval('(factor + 1) * bool - 1', user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')\n        tmp_rootdir = self.create_tmp_rootdir()\n        labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')\n        (carray_factor, values) = ctable_ext.factorize(carray_factor, labels)\n        filter_check = [key for (key, value) in values.items() if (value == (- 1))]\n        if filter_check:\n            skip_key = filter_check[0]\n        nr_groups = len(values)\n    if (skip_key is None):\n        skip_key = nr_groups\n    return (carray_factor, nr_groups, skip_key)", "docstring": "Create unique groups for groupby loop\n\nArgs:\nfactor_list:\nvalues_list:\ngroupby_cols:\nbool_arr:\n\nReturns:\ncarray: (carray_factor)\nint: (nr_groups) the number of resulting groups\nint: (skip_key)", "source": "codesearchnet"}
{"code": "def _get_environment_updates(self, display_all_distributions=False):\n        \n        updates = []\n        for distribution in self.pip.get_installed_distributions():\n\n            versions = self.get_available_versions(distribution.project_name)\n            max_version = max(versions.keys()) if versions else UNKNOW_NUM\n\n            update = None\n            distribution_version = self._parse_version(distribution.version)\n            if versions and max_version > distribution_version:\n                update = Update(\n                    distribution.project_name,\n                    distribution.version,\n                    versions[max_version],\n                    prelease=max_version[-1]\n                )\n\n            elif (\n                display_all_distributions and\n                max_version == distribution_version\n            ):\n                update = Update(\n                    distribution.project_name,\n                    distribution.version,\n                    versions[max_version],\n                )\n\n            elif display_all_distributions:\n                update = Update(\n                    distribution.project_name,\n                    distribution.version,\n                    UNKNOWN\n                )\n\n            if update:\n                updates.append(update)\n\n        return sorted(updates, key=lambda x: x.name)", "docstring": "Check all pacakges installed in the environment to see if there are\nany updates availalble.\n\nArgs:\ndisplay_all_distributions (bool): Return distribution even if it is\nup-to-date. Defaults to ``False``.\n\nReturns:\nlist: A list of Update objects ordered based on ``instance.name``.", "source": "juraj-google-style"}
{"code": "def timestampFormat(self, timestampFormat):\n        \n        if not isinstance(timestampFormat, str):\n            raise TypeError('not of type unicode')\n        \n        self._timestampFormat = timestampFormat", "docstring": "Setter to _timestampFormat. Formatting string for conversion of timestamps to QtCore.QDateTime\n\nRaises:\nAssertionError: if timestampFormat is not of type unicode.\n\nArgs:\ntimestampFormat (unicode): assign timestampFormat to _timestampFormat.\nFormatting string for conversion of timestamps to QtCore.QDateTime. Used in data method.", "source": "juraj-google-style"}
{"code": "def joinCommissioned(self, strPSKd='threadjpaketest', waitTime=20):\n        \n        print '%s call joinCommissioned' % self.port\n        self.__sendCommand('ifconfig up')\n        cmd = 'joiner start %s %s' %(strPSKd, self.provisioningUrl)\n        print cmd\n        if self.__sendCommand(cmd)[0] == \"Done\":\n            maxDuration = 150 \n            self.joinCommissionedStatus = self.joinStatus['ongoing']\n\n            if self.logThreadStatus == self.logStatus['stop']:\n                self.logThread = ThreadRunner.run(target=self.__readCommissioningLogs, args=(maxDuration,))\n\n            t_end = time.time() + maxDuration\n            while time.time() < t_end:\n                if self.joinCommissionedStatus == self.joinStatus['succeed']:\n                    break\n                elif self.joinCommissionedStatus == self.joinStatus['failed']:\n                    return False\n\n                time.sleep(1)\n\n            self.__sendCommand('thread start')\n            time.sleep(30)\n            return True\n        else:\n            return False", "docstring": "start joiner\n\nArgs:\nstrPSKd: Joiner's PSKd\n\nReturns:\nTrue: successful to start joiner\nFalse: fail to start joiner", "source": "juraj-google-style"}
{"code": "def ParseForwardedIps(self, forwarded_ips):\n    \n    addresses = []\n    forwarded_ips = forwarded_ips or []\n    for ip in forwarded_ips:\n      if ip and (IP_REGEX.match(ip) or IP_ALIAS_REGEX.match(ip)):\n        addresses.append(ip[:-3] if ip.endswith('/32') else ip)\n      else:\n        self.logger.warning('Could not parse IP address: \"%s\".', ip)\n    return addresses", "docstring": "Parse and validate forwarded IP addresses.\n\nArgs:\nforwarded_ips: list, the IP address strings to parse.\n\nReturns:\nlist, the valid IP address strings.", "source": "juraj-google-style"}
{"code": "def __init__(self, comma_compat=False):\n    \n    self._comma_compat = comma_compat\n    name = 'whitespace or comma' if self._comma_compat else 'whitespace'\n    BaseListParser.__init__(self, None, name)", "docstring": "Initializer.\n\nArgs:\ncomma_compat: bool - Whether to support comma as an additional separator.\nIf false then only whitespace is supported.  This is intended only for\nbackwards compatibility with flags that used to be comma-separated.", "source": "juraj-google-style"}
{"code": "def _print_unhashable(df, columns=None):\n    \n    for c in df.columns if columns is None else columns:\n        if df.dtypes[c] == object:\n            try:\n                df[c].apply(hash)\n            except TypeError:\n                df[c] = df[c].dropna().apply(pformat).ix[df.index]\n\n    return df", "docstring": "Replace unhashable values in a DataFrame with their string repr\nArgs:\ndf: DataFrame\ncolumns: columns to replace, if necessary. Default None replaces all columns.", "source": "juraj-google-style"}
{"code": "def _ParseMRUListKey(self, parser_mediator, registry_key, codepage='cp1252'):\n    try:\n        mrulist = self._ParseMRUListValue(registry_key)\n    except (ValueError, errors.ParseError) as exception:\n        parser_mediator.ProduceExtractionWarning('unable to parse MRUList value with error: {0!s}'.format(exception))\n        return\n    if (not mrulist):\n        return\n    values_dict = {}\n    found_terminator = False\n    for (entry_index, entry_letter) in enumerate(mrulist):\n        if (entry_letter == 0):\n            break\n        if found_terminator:\n            parser_mediator.ProduceExtractionWarning('found additional MRUList entries after terminator in key: {0:s}.'.format(registry_key.path))\n            found_terminator = False\n        entry_letter = chr(entry_letter)\n        value_string = self._ParseMRUListEntryValue(parser_mediator, registry_key, entry_index, entry_letter, codepage=codepage)\n        value_text = 'Index: {0:d} [MRU Value {1:s}]'.format((entry_index + 1), entry_letter)\n        values_dict[value_text] = value_string\n    event_data = windows_events.WindowsRegistryEventData()\n    event_data.key_path = registry_key.path\n    event_data.offset = registry_key.offset\n    event_data.regvalue = values_dict\n    event_data.source_append = self._SOURCE_APPEND\n    event = time_events.DateTimeValuesEvent(registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extract event objects from a MRUList Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\ncodepage (Optional[str]): extended ASCII string codepage.", "source": "codesearchnet"}
{"code": "def pretty_plot(width=8, height=None, plt=None, dpi=None, color_cycle=('qualitative', 'Set1_9')):\n    ticksize = int((width * 2.5))\n    golden_ratio = ((math.sqrt(5) - 1) / 2)\n    if (not height):\n        height = int((width * golden_ratio))\n    if (plt is None):\n        import matplotlib.pyplot as plt\n        import importlib\n        mod = importlib.import_module(('palettable.colorbrewer.%s' % color_cycle[0]))\n        colors = getattr(mod, color_cycle[1]).mpl_colors\n        from cycler import cycler\n        plt.figure(figsize=(width, height), facecolor='w', dpi=dpi)\n        ax = plt.gca()\n        ax.set_prop_cycle(cycler('color', colors))\n    else:\n        fig = plt.gcf()\n        fig.set_size_inches(width, height)\n    plt.xticks(fontsize=ticksize)\n    plt.yticks(fontsize=ticksize)\n    ax = plt.gca()\n    ax.set_title(ax.get_title(), size=(width * 4))\n    labelsize = int((width * 3))\n    ax.set_xlabel(ax.get_xlabel(), size=labelsize)\n    ax.set_ylabel(ax.get_ylabel(), size=labelsize)\n    return plt", "docstring": "Provides a publication quality plot, with nice defaults for font sizes etc.\n\nArgs:\nwidth (float): Width of plot in inches. Defaults to 8in.\nheight (float): Height of plot in inches. Defaults to width * golden\nratio.\nplt (matplotlib.pyplot): If plt is supplied, changes will be made to an\nexisting plot. Otherwise, a new plot will be created.\ndpi (int): Sets dot per inch for figure. Defaults to 300.\ncolor_cycle (tuple): Set the color cycle for new plots to one of the\ncolor sets in palettable. Defaults to a qualitative Set1_9.\n\nReturns:\nMatplotlib plot object with properly sized fonts.", "source": "codesearchnet"}
{"code": "def desc_from_uri(uri):\n    \n    \n    \n    \n    \n    \n    \n    \n\n    \n    \n    \n    \n    \n    if \":\" in uri:\n        _, uri = uri.split(\":\", 1)\n    query_string = parse_qs(urlparse(uri, 'http').query)\n    \n    if query_string.get('sn'):\n        account_serial_number = query_string['sn'][0]\n        try:\n            account = Account.get_accounts()[account_serial_number]\n            desc = \"SA_RINCON{}_{}\".format(\n                account.service_type, account.username)\n            return desc\n        except KeyError:\n            \n            \n            pass\n    if query_string.get('sid'):\n        service_id = query_string['sid'][0]\n        for service in MusicService._get_music_services_data().values():\n            if service_id == service[\"ServiceID\"]:\n                service_type = service[\"ServiceType\"]\n                account = Account.get_accounts_for_service(service_type)\n                if not account:\n                    break\n                \n                account = account[0]\n                desc = \"SA_RINCON{}_{}\".format(\n                    account.service_type, account.username)\n                return desc\n    \n    \n    desc = 'RINCON_AssociatedZPUDN'\n    return desc", "docstring": "Create the content of DIDL desc element from a uri.\n\nArgs:\nuri (str): A uri, eg:\n``'x-sonos-http:track%3a3402413.mp3?sid=2&amp;flags=32&amp;sn=4'``\n\nReturns:\nstr: The content of a desc element for that uri, eg\n``'SA_RINCON519_email@example.com'``", "source": "juraj-google-style"}
{"code": "def _backspaced_single_line_animation(animation_, *args, **kwargs):\n    \n    animation_gen = animation_(*args, **kwargs)\n    yield next(animation_gen)  \n    yield from util.concatechain(\n        util.BACKSPACE_GEN(kwargs['width']), animation_gen)", "docstring": "Turn an animation into an automatically backspaced animation.\n\nArgs:\nanimation: A function that returns a generator that yields\nstrings for animation frames.\nargs: Arguments for the animation function.\nkwargs: Keyword arguments for the animation function.\nReturns:\nthe animation generator, with backspaces applied to each but the first\nframe.", "source": "juraj-google-style"}
{"code": "def post_headline(self, name, level, message):\n        \n\n        self._client.post_headline(name, level, message)", "docstring": "Asynchronously update the sticky headline for a service.\n\nArgs:\nname (string): The name of the service\nlevel (int): A message level in states.*_LEVEL\nmessage (string): The user facing error message that will be stored\nfor the service and can be queried later.", "source": "juraj-google-style"}
{"code": "def energies(self, samples_like, dtype=np.float):\n        \n        samples, labels = as_samples(samples_like)\n        if labels:\n            idx, label = zip(*enumerate(labels))\n            labeldict = dict(zip(label, idx))\n        else:\n            labeldict = {}\n\n        num_samples = samples.shape[0]\n\n        energies = np.zeros(num_samples, dtype=dtype)\n        for term, bias in self.items():\n            if len(term) == 0:\n                energies += bias\n            else:\n                energies += np.prod([samples[:, labeldict[v]] for v in term], axis=0) * bias\n\n        return energies", "docstring": "The energies of the given samples.\n\nArgs:\nsamples_like (samples_like):\nA collection of raw samples. `samples_like` is an extension of\nNumPy's array_like structure. See :func:`.as_samples`.\n\ndtype (:class:`numpy.dtype`, optional):\nThe data type of the returned energies. Defaults to float.\n\nReturns:\n:obj:`numpy.ndarray`: The energies.", "source": "juraj-google-style"}
{"code": "def _init_global_step(self, global_step=USE_DEFAULT):\n    if global_step is Supervisor.USE_DEFAULT:\n        global_step = self._get_first_op_from_collection(ops.GraphKeys.GLOBAL_STEP)\n        if global_step is None:\n            global_step = self._default_global_step_tensor()\n            if global_step is not None:\n                ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, global_step)\n    self._global_step = global_step", "docstring": "Initializes global_step.\n\nArgs:\nglobal_step: An integer Tensor of size 1 that counts steps. If set to\nUSE_DEFAULT, creates global_step tensor.", "source": "github-repos"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return self.prefix_tokens + token_ids_0 + self.suffix_tokens\n    return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. The special tokens depend on calling set_lang.\n\nAn SeamlessM4T sequence has the following format, where `X` represents the sequence:\n\n- `input_ids` (for encoder) `[src_lang_code] X [eos]`\n- `decoder_input_ids`: (for decoder) `[eos, tgt_lang_code] X [eos]`\n\nBOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a\nseparator.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def __init__(self, num_labels: int, matcher: MaskFormerHungarianMatcher, weight_dict: Dict[str, float], eos_coef: float):\n    super().__init__()\n    requires_backends(self, ['scipy'])\n    self.num_labels = num_labels\n    self.matcher = matcher\n    self.weight_dict = weight_dict\n    self.eos_coef = eos_coef\n    empty_weight = torch.ones(self.num_labels + 1)\n    empty_weight[-1] = self.eos_coef\n    self.register_buffer('empty_weight', empty_weight)", "docstring": "The MaskFormer Loss. The loss is computed very similar to DETR. The process happens in two steps: 1) we compute\nhungarian assignment between ground truth masks and the outputs of the model 2) we supervise each pair of\nmatched ground-truth / prediction (supervise class and mask)\n\nArgs:\nnum_labels (`int`):\nThe number of classes.\nmatcher (`MaskFormerHungarianMatcher`):\nA torch module that computes the assignments between the predictions and labels.\nweight_dict (`Dict[str, float]`):\nA dictionary of weights to be applied to the different losses.\neos_coef (`float`):\nWeight to apply to the null class.", "source": "github-repos"}
{"code": "def put(f, s3_path, multipart_chunk_size_mb=500, logger=None):\n    \n    if not logger:\n        logger = log.get_logger('s3')\n    fname = os.path.basename(f)\n    target = os.path.join(s3_path, fname)\n    s3cmd_cline = 's3cmd put {} {} --multipart-chunk-size-mb {}'.format(f,\n                                                                        target,\n                                                                        multipart_chunk_size_mb)\n    print_put_info(fname, target, logger)\n    s3cmd = sp.Popen(s3cmd_cline,\n                     stdout=sp.PIPE,\n                     stderr=sp.PIPE,\n                     shell=True)\n    stdout, stderr = s3cmd.communicate()", "docstring": "Uploads a single file to S3, using s3cmd.\n\nArgs:\n\nf (str): Path to a single file.\n\ns3_path (str): The S3 path, with the filename omitted. The S3 filename\nwill be the basename of the ``f``. For example::\n\nput(f='/path/to/myfile.tar.gz', s3_path='s3://my_bucket/path/to/')\n\nwill result in an uploaded S3 path of ``s3://my_bucket/path/to/myfile.tar.gz``", "source": "juraj-google-style"}
{"code": "def anchored_pairs(self, anchor):\n\n        \n\n        pairs = OrderedDict()\n\n        for term in self.keys:\n            score = self.get_pair(anchor, term)\n            if score: pairs[term] = score\n\n        return utils.sort_dict(pairs)", "docstring": "Get distances between an anchor term and all other terms.\n\nArgs:\nanchor (str): The anchor term.\n\nReturns:\nOrderedDict: The distances, in descending order.", "source": "juraj-google-style"}
{"code": "def add_chain(self, name, order):\n        \n\n        if name not in self.chains:\n            setattr(self.chains, name, MarkovChain(order=order))\n        else:\n            raise ValueError(\"Chain with this name already exists\")", "docstring": "Add chain to current shelve file\n\nArgs:\nname: chain name\norder: markov chain order", "source": "juraj-google-style"}
{"code": "def update_shared_file(self,\n                           sharekey=None,\n                           title=None,\n                           description=None):\n        \n        if not sharekey:\n            raise Exception(\n                \"You must specify a sharekey for the sharedfile\"\n                \"you wish to update.\")\n\n        if not (title or description):\n            raise Exception(\"You must specify a title or description.\")\n\n        post_data = {}\n\n        if title:\n            post_data['title'] = title\n        if description:\n            post_data['description'] = description\n\n        endpoint = '/api/sharedfile/{0}'.format(sharekey)\n\n        data = self._make_request('POST', endpoint=endpoint, data=post_data)\n\n        return SharedFile.NewFromJSON(data)", "docstring": "Update the editable details (just the title and description) of a\nSharedFile.\n\nArgs:\nsharekey (str): Sharekey of the SharedFile to update.\ntitle (Optional[str]): Title of the SharedFile.\ndescription (Optional[str]): Description of the SharedFile\n\nReturns:\nSharedFile on success, 404 on Sharekey not found, 403 on\nunauthorized.", "source": "juraj-google-style"}
{"code": "def easeInBack(n, s=1.70158):\n    _checkRange(n)\n    return ((n * n) * (((s + 1) * n) - s))", "docstring": "A tween function that backs up first at the start and then goes to the destination.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "codesearchnet"}
{"code": "def _calculate_scores(self, query, key):\n    q_reshaped = ops.expand_dims(query, axis=-2)\n    k_reshaped = ops.expand_dims(key, axis=-3)\n    scale = self.scale if self.use_scale else 1.0\n    return ops.sum(scale * ops.tanh(q_reshaped + k_reshaped), axis=-1)", "docstring": "Calculates attention scores as a nonlinear sum of query and key.\n\nArgs:\nquery: Query tensor of shape `(batch_size, Tq, dim)`.\nkey: Key tensor of shape `(batch_size, Tv, dim)`.\n\nReturns:\nTensor of shape `(batch_size, Tq, Tv)`.", "source": "github-repos"}
{"code": "def compute_probab_ratios(p_new, p_old, actions, reward_mask):\n    (B, T) = actions.shape\n    assert ((B, (T + 1)) == p_old.shape[:2])\n    assert ((B, (T + 1)) == p_new.shape[:2])\n    logp_old = chosen_probabs(p_old, actions)\n    logp_new = chosen_probabs(p_new, actions)\n    assert ((B, T) == logp_old.shape)\n    assert ((B, T) == logp_new.shape)\n    probab_ratios = (np.exp((logp_new - logp_old)) * reward_mask)\n    assert ((B, T) == probab_ratios.shape)\n    return probab_ratios", "docstring": "Computes the probability ratios for each time-step in a trajectory.\n\nArgs:\np_new: ndarray of shape [B, T+1, A] of the log-probabilities that the policy\nnetwork assigns to all the actions at each time-step in each batch using\nthe old parameters.\np_old: ndarray of shape [B, T+1, A], same as above, but using old policy\nnetwork parameters.\nactions: ndarray of shape [B, T] where each element is from [0, A).\nreward_mask: ndarray of shape [B, T] masking over probabilities.\n\nReturns:\nprobab_ratios: ndarray of shape [B, T], where\nprobab_ratios_{b,t} = p_new_{b,t,action_{b,t}} / p_old_{b,t,action_{b,t}}", "source": "codesearchnet"}
{"code": "def _SendItem(self, zmq_socket, item, block=True):\n    try:\n        logger.debug('{0:s} sending item'.format(self.name))\n        if block:\n            zmq_socket.send_pyobj(item)\n        else:\n            zmq_socket.send_pyobj(item, zmq.DONTWAIT)\n        logger.debug('{0:s} sent item'.format(self.name))\n        return True\n    except zmq.error.Again:\n        logger.debug('{0:s} could not send an item'.format(self.name))\n    except zmq.error.ZMQError as exception:\n        if (exception.errno == errno.EINTR):\n            logger.error('ZMQ syscall interrupted in {0:s}.'.format(self.name))\n    return False", "docstring": "Attempts to send an item to a ZeroMQ socket.\n\nArgs:\nzmq_socket (zmq.Socket): used to the send the item.\nitem (object): sent on the queue. Will be pickled prior to sending.\nblock (Optional[bool]): whether the push should be performed in blocking\nor non-blocking mode.\n\nReturns:\nbool: whether the item was sent successfully.", "source": "codesearchnet"}
{"code": "def restore(self, file_prefix: tensor_lib.Tensor, options: 'checkpoint_options.CheckpointOptions | None'=None) -> Mapping[str, ops.Operation]:\n    options = options or checkpoint_options.CheckpointOptions()\n\n    def restore_fn() -> Mapping[str, ops.Operation]:\n        restore_fn_inputs = {}\n        restore_fn_input_count = {fn: len(keys) for fn, keys in self._restore_fn_to_keys.items()}\n        restore_ops = {}\n        for task, shard in self._shardable_tensors_by_task.items():\n            with ops.device(task):\n                restored_tensor_dict = _single_shard_restore(file_prefix, shard, options)\n                for ckpt_key, slice_and_tensor in restored_tensor_dict.items():\n                    for slice_spec, tensor in slice_and_tensor.items():\n                        restore_fn = self._keys_to_restore_fn[ckpt_key, slice_spec]\n                        if slice_spec:\n                            restore_fn_inputs.setdefault(restore_fn, {}).setdefault(ckpt_key, {})[slice_spec] = tensor\n                        else:\n                            restore_fn_inputs.setdefault(restore_fn, {})[ckpt_key] = tensor\n                        restore_fn_input_count[restore_fn] -= 1\n                        if restore_fn_input_count[restore_fn] == 0:\n                            restored_tensors = {}\n                            for ckpt_key, tensor in restore_fn_inputs[restore_fn].items():\n                                restored_tensors[trackable_utils.extract_local_name(ckpt_key)] = tensor\n                            ret = restore_fn(restored_tensors)\n                            if isinstance(ret, dict):\n                                restore_ops.update(ret)\n        for _, (_, restore_fn) in self._registered_savers.items():\n            restore_fn(file_prefix)\n        return restore_ops\n    has_custom_device_saver = False\n    for sts in self._shardable_tensors_by_task.values():\n        if any([context.is_custom_device(st.device.to_string()) for st in sts]):\n            has_custom_device_saver = True\n            break\n    if context.executing_eagerly() and (self._num_unique_tasks > 1 or has_custom_device_saver):\n\n        @def_function.function(jit_compile=False, autograph=False)\n        def tf_function_restore() -> Mapping[str, ops.Operation]:\n            restore_fn()\n            return {}\n        restore_ops = tf_function_restore()\n    else:\n        restore_ops = restore_fn()\n    return restore_ops", "docstring": "Restore the saveable objects from a checkpoint with `file_prefix`.\n\nArgs:\nfile_prefix: A string or scalar string Tensor containing the prefix for\nfiles to read from.\noptions: Optional `CheckpointOptions` object.\n\nReturns:\nWhen not run eagerly or when saving on a single device, returns a\ndictionary mapping from SaveableObject names to restore operations;\notherwise, returns an empty dict.", "source": "github-repos"}
{"code": "def _get_latest_eval_step_value(update_ops):\n    if isinstance(update_ops, dict):\n        update_ops = list(update_ops.values())\n    with ops.control_dependencies(update_ops):\n        return array_ops.identity(_get_or_create_eval_step().read_value())", "docstring": "Gets the eval step `Tensor` value after running `update_ops`.\n\nArgs:\nupdate_ops: A list of `Tensors` or a dictionary of names to `Tensors`, which\nare run before reading the eval step value.\n\nReturns:\nA `Tensor` representing the value for the evaluation step.", "source": "github-repos"}
{"code": "def economic_svd(G, epsilon=sqrt(finfo(float).eps)):\n    from scipy.linalg import svd\n    G = asarray(G, float)\n    (U, S, V) = svd(G, full_matrices=False, check_finite=False)\n    ok = (S >= epsilon)\n    S = S[ok]\n    U = U[(:, ok)]\n    V = V[(ok, :)]\n    return (U, S, V)", "docstring": "r\"\"\"Economic Singular Value Decomposition.\n\nArgs:\nG (array_like): Matrix to be factorized.\nepsilon (float): Threshold on the square root of the eigen values.\nDefault is ``sqrt(finfo(float).eps)``.\n\nReturns:\n:class:`numpy.ndarray`: Unitary matrix.\n:class:`numpy.ndarray`: Singular values.\n:class:`numpy.ndarray`: Unitary matrix.\n\nSee Also\n--------\nnumpy.linalg.svd : Cholesky decomposition.\nscipy.linalg.svd : Cholesky decomposition.", "source": "codesearchnet"}
{"code": "def convert(self, vroot, entry_variables):\n        \n        self.graph_info = GraphInfo(vroot)\n        self.entry_variables = entry_variables\n\n        with nn.parameter_scope(self.name):\n            \n            for t, func in enumerate(self.graph_info.funcs):\n                \n\n                \n                if func.name == \"BatchNormalization\":\n                    i0 = func.inputs[0]\n                    bn_func = func\n                    \n                    if bn_func.info.args[\"batch_stat\"] == False:\n                        \n                        if i0.parent.info.type_name in self.inner_prod_functions:\n                            nn.logger.info(\"{} is skipped.\".format(func.name))\n                            continue\n\n                \n                if func.name in self.inner_prod_functions:\n                    inner_prod_func = func\n\n                    o0 = inner_prod_func.outputs[0]\n                    fs = self.graph_info.variable_to_funcs[o0]\n                    \n                    if fs is not None and len(fs) == 1:\n                        \n                        bn_func = fs[0]\n                        if bn_func.name == \"BatchNormalization\":\n\n                            \n                            if bn_func.info.args[\"batch_stat\"] == False:\n\n                                \n                                nn.logger.info(\"BatchNormalization parameters are folded to \"\n                                               \"the preceding convolution.\")\n                                o = self._inner_prod_bn_conversion(\n                                    inner_prod_func, bn_func)\n                                continue\n\n                \n                o = self._identity_conversion(func)\n\n        self.end_variable = o\n        return self.end_variable", "docstring": "All functions are replaced with the same `new` function.\n\nArgs:\nvroot (:obj:`Variable`): NNabla Variable\nentry_variables (:obj:`Variable`): Entry variable from which the conversion starts.", "source": "juraj-google-style"}
{"code": "def is_blast_result_trunc(qstart, qend, sstart, send, qlen, slen):\n        \n        q_match_len = abs(qstart - qend) + 1\n        s_max = max(sstart, send)\n        s_min = min(sstart, send)\n        return (q_match_len < qlen) and (s_max >= slen or s_min <= 1)", "docstring": "Check if a query sequence is truncated by the end of a subject sequence\n\nArgs:\nqstart (int): Query sequence start index\nqend (int): Query sequence end index\nsstart (int): Subject sequence start index\nsend (int): Subject sequence end index\nqlen (int): Query sequence length\nslen (int): Subject sequence length\n\nReturns:\nbool: Result truncated by subject sequence end?", "source": "juraj-google-style"}
{"code": "def subscribe(self, clock_name: str=None, clock_slots: Iterable[str]=None, subscriptions: Dict[str, Any]={}):\n\t\t\n\t\tfor area in subscriptions:  \n\t\t\tinit_full(self, area, subscriptions[area])\n\t\t\tsubscriptions[area] = {'slots': subscriptions[area]}\n\n\t\tif clock_name is not None:\n\t\t\tself.clock_name = clock_name\n\t\t\tself.clock_slots = clock_slots\n\t\t\tsubscriptions[clock_name] = {'slots': clock_slots, 'buffer-length': 1}\n\t\tself.setup(puller=True, subscriptions=subscriptions)", "docstring": "Subscribes this Area to the given Areas and optionally given Slots. Must be called before the Area is run.\n\nArgs:\nclock_name: The name of the Area that is used as synchronizing Clock.\nclock_slots: The slots of the Clock relevant to this Area.\nsubscriptions: A dictionary containing the relevant Areas names as keys and optionally the Slots as values.", "source": "juraj-google-style"}
{"code": "def get_list_of_concatenated_objects(obj, dot_separated_name,\n                                     lst=None):\n    \n    from textx.scoping import Postponed\n    if lst is None:\n        lst = []\n    if not obj:\n        return lst\n    if obj in lst:\n        return lst\n    lst.append(obj)\n    if type(obj) is Postponed:\n        return lst\n    ret = get_referenced_object(None, obj, dot_separated_name)\n    if type(ret) is list:\n        for r in ret:\n            lst = get_list_of_concatenated_objects(r, dot_separated_name, lst)\n    else:\n        lst = get_list_of_concatenated_objects(ret, dot_separated_name, lst)\n    return lst", "docstring": "get a list of the objects consisting of\n- obj\n- obj+\".\"+dot_separated_name\n- (obj+\".\"+dot_separated_name)+\".\"+dot_separated_name (called recursively)\nNote: lists are expanded\n\nArgs:\nobj: the starting point\ndot_separated_name: \"the search path\" (applied recursively)\nlst: the initial list (e.g. [])\n\nReturns:\nthe filled list (if one single object is requested, a list with one\nentry is returned).", "source": "juraj-google-style"}
{"code": "def check_for_empty_defaults(status):\n    dirs_to_check = ('./vars', './handlers', './defaults', './tasks')\n    for (dirpath, dirname, filename) in os.walk('.'):\n        if ((dirpath == './files') or (dirpath == './templates')):\n            if (not any([dirname, filename])):\n                status.append('There are no files in the {0} directory. please remove directory'.format(dirpath))\n        if (dirpath in dirs_to_check):\n            try:\n                joined_filename = os.path.join(dirpath, 'main.yml')\n                with open(joined_filename, 'r') as f:\n                    if re.match('^---\\\\n\n                        status.append('Empty file, please remove file and directory: {0}'.format(joined_filename))\n            except IOError:\n                pass\n    return status", "docstring": "Method to check for empty roles structure.\n\nWhen a role is created using ansible-galaxy it creates a default\nscaffolding structure. Best practice dictates that if any of these are not\nused then they should be removed. For example a bare main.yml with the\nfollowing string is created for a 'defaults' for a role called 'myrole':\n\n---\ndefaults file for myrole\n\nThis should be removed.\n\nArgs:\nstatus (list): list of pre-receive check failures to eventually print\nto the user\nReturns:\nstatus list of current pre-redeive check failures. Might be an empty\nlist.", "source": "codesearchnet"}
{"code": "def _ParseNoHeaderSingleLine(self, parser_mediator, structure):\n    \n    if not self._last_event_data:\n      logger.debug('SkyDrive, found isolated line with no previous events')\n      return\n\n    event_data = SkyDriveOldLogEventData()\n    event_data.offset = self._last_event_data.offset\n    event_data.text = structure.text\n\n    event = time_events.DateTimeValuesEvent(\n        self._last_date_time, definitions.TIME_DESCRIPTION_ADDED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    \n    self._last_date_time = None\n    self._last_event_data = None", "docstring": "Parse an isolated header line and store appropriate attributes.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.", "source": "juraj-google-style"}
{"code": "def kill_log_monitor(self, check_alive=True):\n    self._kill_process_type(ray_constants.PROCESS_TYPE_LOG_MONITOR, check_alive=check_alive)", "docstring": "Kill the log monitor.\n\nArgs:\ncheck_alive (bool): Raise an exception if the process was already\ndead.", "source": "codesearchnet"}
{"code": "def dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs):\n    \n    text = dumps(graphs, triples=triples, cls=cls, **kwargs)\n\n    if hasattr(file, 'write'):\n        print(text, file=file)\n    else:\n        with open(file, 'w') as fh:\n            print(text, file=fh)", "docstring": "Serialize each graph in *graphs* to PENMAN and write to *file*.\n\nArgs:\ngraphs: an iterable of Graph objects\nfile: a filename or file-like object to write to\ntriples: if True, write graphs as triples instead of as PENMAN\ncls: serialization codec class\nkwargs: keyword arguments passed to the constructor of *cls*", "source": "juraj-google-style"}
{"code": "def apply_to_miz(self, miz):\n        \n\n        report = ['Building mission with weather:']\n\n        miz.mission.weather.wind_at_ground_level_dir = self.wind_at_ground_level_dir\n        miz.mission.weather.wind_at_ground_level_speed = self.wind_at_ground_level_speed\n        miz.mission.weather.wind_at2000_dir = self._randomize_direction(self.wind_dir, 40)\n        miz.mission.weather.wind_at2000_speed = self._randomize_speed(5 + self.wind_at_ground_level_speed * 2)\n        miz.mission.weather.wind_at8000_dir = self._randomize_direction(self.wind_dir, 80)\n        miz.mission.weather.wind_at8000_speed = self._randomize_speed(10 + self.wind_at_ground_level_speed * 3)\n        miz.mission.weather.turbulence_at_ground_level = self.turbulence\n\n        _ground = f'{miz.mission.weather.wind_at_ground_level_dir}/{miz.mission.weather.wind_at_ground_level_speed}'\n        _at2000 = f'{miz.mission.weather.wind_at2000_dir}/{miz.mission.weather.wind_at2000_speed}'\n        _at8000 = f'{miz.mission.weather.wind_at8000_dir}/{miz.mission.weather.wind_at8000_speed}'\n        _turbulence = f'{miz.mission.weather.turbulence_at_ground_level}'\n\n        wind = f'Wind:' \\\n               f'\\n\\tGround: {_ground}' \\\n               f'\\n\\t2000m: {_at2000}' \\\n               f'\\n\\t8000m: {_at8000}' \\\n               f'\\n\\tTurbulence: {_turbulence}'\n\n        report.append(wind)\n\n        miz.mission.weather.atmosphere_type = 0\n        miz.mission.weather.qnh = self.qnh\n\n        report.append(f'Atmosphere type: {miz.mission.weather.atmosphere_type}')\n        report.append(f'QNH: {miz.mission.weather.qnh}')\n\n        miz.mission.weather.visibility = self.visibility\n        if self.fog_vis:\n            miz.mission.weather.fog_thickness = 1000\n            miz.mission.weather.fog_visibility = self.fog_vis\n            miz.mission.weather.fog_enabled = True\n        else:\n            miz.mission.weather.fog_enabled = False\n            miz.mission.weather.fog_visibility = 0\n            miz.mission.weather.fog_thickness = 0\n\n        visibility = f'Visibility: {miz.mission.weather.visibility}' \\\n                     f'\\n\\tFog: {\"yes\" if miz.mission.weather.fog_enabled else \"no\"}' \\\n                     f'\\n\\tFog thickness: {miz.mission.weather.fog_thickness}' \\\n                     f'\\n\\tFog visibility: {miz.mission.weather.fog_visibility}'\n\n        report.append(visibility)\n\n        miz.mission.weather.temperature = self.temperature\n\n        report.append(f'Temperature: {self.temperature}°C')\n\n        miz.mission.weather.cloud_density = max(self.force_cloud_density, self.cloud_density)\n        miz.mission.weather.cloud_thickness = self.cloud_thickness\n        miz.mission.weather.cloud_base = self.cloud_base\n        miz.mission.weather.precipitations = self.precipitations\n\n        clouds = f'Clouds:' \\\n                 f'\\n\\tClouds density: {miz.mission.weather.cloud_density}' \\\n                 f'\\n\\tClouds thickness: {miz.mission.weather.cloud_thickness}' \\\n                 f'\\n\\tClouds base: {miz.mission.weather.cloud_base}' \\\n                 f'\\n\\tPrecipitations: {miz.mission.weather.precipitations}'\n\n        report.append(clouds)\n\n        LOGGER.debug('applying weather: %s', report)\n\n        return True", "docstring": "Applies weather to an opened Miz file (the mission will be mutated)\n\nArgs:\nmiz: source miz\n\nReturns: True", "source": "juraj-google-style"}
{"code": "def sheets_tab_range(sheet_tab, sheet_range):\n    if sheet_range:\n        return '%s!%s' % (sheet_tab, sheet_range)\n    else:\n        return sheet_tab", "docstring": "Helper for creating range format.\n\nArgs:\nsheet_tab - name of tab in sheet\nsheet_range - A1 notation\n\nReturns:\nString containing full sheet range specification.", "source": "github-repos"}
{"code": "def read(self, size=None):\n    \n    if not self._is_open:\n      raise IOError('Not opened.')\n\n    if self._current_offset < 0:\n      raise IOError('Invalid current offset value less than zero.')\n\n    if self._current_offset > self._size:\n      return b''\n\n    if size is None or self._current_offset + size > self._size:\n      size = self._size - self._current_offset\n\n    self._tar_ext_file.seek(self._current_offset, os.SEEK_SET)\n\n    data = self._tar_ext_file.read(size)\n\n    \n    \n    \n    self._current_offset += len(data)\n\n    return data", "docstring": "Reads a byte string from the file-like object at the current offset.\n\nThe function will read a byte string of the specified size or\nall of the remaining data if no size was specified.\n\nArgs:\nsize (Optional[int]): number of bytes to read, where None is all\nremaining data.\n\nReturns:\nbytes: data read.\n\nRaises:\nIOError: if the read failed.\nOSError: if the read failed.", "source": "juraj-google-style"}
{"code": "def Open(self, path=None, read_only=True, **unused_kwargs):\n    if self._is_open:\n        raise IOError('Storage file already opened.')\n    if (not path):\n        raise ValueError('Missing path.')\n    path = os.path.abspath(path)\n    connection = sqlite3.connect(path, detect_types=(sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES))\n    cursor = connection.cursor()\n    if (not cursor):\n        return\n    self._connection = connection\n    self._cursor = cursor\n    self._is_open = True\n    self._read_only = read_only\n    if read_only:\n        self._ReadAndCheckStorageMetadata(check_readable_only=True)\n    else:\n        self._cursor.execute('PRAGMA synchronous=OFF')\n        if (not self._HasTable('metadata')):\n            self._WriteStorageMetadata()\n        else:\n            self._ReadAndCheckStorageMetadata()\n        if (self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB):\n            data_column_type = 'BLOB'\n        else:\n            data_column_type = 'TEXT'\n        for container_type in self._CONTAINER_TYPES:\n            if (not self._HasTable(container_type)):\n                if (container_type == self._CONTAINER_TYPE_EVENT):\n                    query = self._CREATE_EVENT_TABLE_QUERY.format(container_type, data_column_type)\n                else:\n                    query = self._CREATE_TABLE_QUERY.format(container_type, data_column_type)\n                self._cursor.execute(query)\n        self._connection.commit()\n    last_session_start = self._CountStoredAttributeContainers(self._CONTAINER_TYPE_SESSION_START)\n    last_session_completion = self._CountStoredAttributeContainers(self._CONTAINER_TYPE_SESSION_COMPLETION)\n    for container_type in self._REFERENCED_CONTAINER_TYPES:\n        container_list = self._GetSerializedAttributeContainerList(container_type)\n        container_list.next_sequence_number = self._CountStoredAttributeContainers(container_type)\n    if (last_session_start != last_session_completion):\n        logger.warning('Detected unclosed session.')\n    self._last_session = last_session_completion", "docstring": "Opens the storage.\n\nArgs:\npath (Optional[str]): path to the storage file.\nread_only (Optional[bool]): True if the file should be opened in\nread-only mode.\n\nRaises:\nIOError: if the storage file is already opened or if the database\ncannot be connected.\nOSError: if the storage file is already opened or if the database\ncannot be connected.\nValueError: if path is missing.", "source": "codesearchnet"}
{"code": "def get_openapi_dict(self, services, hostname=None, x_google_api_name=False):\n    \n\n    if not isinstance(services, (tuple, list)):\n      services = [services]\n\n    \n    \n    \n    util.check_list_type(services, remote._ServiceClass, 'services',\n                         allow_none=False)\n\n    return self.__api_openapi_descriptor(services, hostname=hostname, x_google_api_name=x_google_api_name)", "docstring": "JSON dict description of a protorpc.remote.Service in OpenAPI format.\n\nArgs:\nservices: Either a single protorpc.remote.Service or a list of them\nthat implements an api/version.\nhostname: string, Hostname of the API, to override the value set on the\ncurrent service. Defaults to None.\n\nReturns:\ndict, The OpenAPI descriptor document as a JSON dict.", "source": "juraj-google-style"}
{"code": "def read_file(self, file: Union[IO, asyncio.StreamWriter]=None):\n        \n        if file:\n            file_is_async = hasattr(file, 'drain')\n\n        while True:\n            data = yield from self._connection.read(4096)\n\n            if not data:\n                break\n\n            if file:\n                file.write(data)\n\n                if file_is_async:\n                    yield from file.drain()\n\n            self._data_event_dispatcher.notify_read(data)", "docstring": "Read from connection to file.\n\nArgs:\nfile: A file object or a writer stream.", "source": "juraj-google-style"}
{"code": "def __call__(self, obj, attr, obj_ref):\n        \n        from textx.const import RULE_COMMON, RULE_ABSTRACT\n        from textx.model import ObjCrossRef\n        from textx.scoping.tools import get_parser\n\n        if obj_ref is None:\n            return None  \n\n        assert type(obj_ref) is ObjCrossRef, type(obj_ref)\n\n        if get_parser(obj).debug:\n            get_parser(obj).dprint(\"Resolving obj crossref: {}:{}\"\n                                   .format(obj_ref.cls, obj_ref.obj_name))\n\n        def _inner_resolve_link_rule_ref(cls, obj_name):\n            \n            if cls._tx_type is RULE_ABSTRACT:\n                for inherited in cls._tx_inh_by:\n                    result = _inner_resolve_link_rule_ref(inherited,\n                                                          obj_name)\n                    if result:\n                        return result\n            elif cls._tx_type == RULE_COMMON:\n                \n                \n                \n                \n                \n                \n                \n                \n                \n                \n                if id(cls) in get_parser(obj)._instances:\n                    objs = get_parser(obj)._instances[id(cls)]\n                    return objs.get(obj_name)\n\n        if self.multi_metamodel_support:\n            from textx import get_model, get_children\n            from textx import textx_isinstance\n            result_lst = get_children(\n                lambda x:\n                hasattr(x, \"name\") and x.name == obj_ref.obj_name\n                and textx_isinstance(x, obj_ref.cls), get_model(obj))\n            if len(result_lst) == 1:\n                result = result_lst[0]\n            elif len(result_lst) > 1:\n                line, col = get_parser(obj).pos_to_linecol(obj_ref.position)\n                raise TextXSemanticError(\n                    \"name {} is not unique.\".format(obj_ref.obj_name),\n                    line=line, col=col, filename=get_model(obj)._tx_filename)\n            else:\n                result = None\n        else:\n            result = _inner_resolve_link_rule_ref(obj_ref.cls,\n                                                  obj_ref.obj_name)\n        if result:\n            return result\n\n        return None", "docstring": "the default scope provider\n\nArgs:\nobj: unused (used for multi_metamodel_support)\nattr: unused\nobj_ref: the cross reference to be resolved\n\nReturns:\nthe resolved reference or None", "source": "juraj-google-style"}
{"code": "def _query(self, path: str, method: str, data: Dict[(str, Any)]=None, expected_status: int=200) -> Union[(List[Dict[(str, Any)]], Dict[(str, Any)], None)]:\n    url = (Pycord.url_base + path)\n    self.logger.debug(f'Making {method} request to \"{url}\"')\n    if (method == 'GET'):\n        r = requests.get(url, headers=self._build_headers())\n    elif (method == 'POST'):\n        r = requests.post(url, headers=self._build_headers(), json=data)\n        r = requests.get(url, headers=self._build_headers())\n    elif (method == 'PATCH'):\n        r = requests.patch(url, headers=self._build_headers(), json=data)\n    else:\n        raise ValueError(f'Unknown HTTP method {method}')\n    self.logger.debug(f'{method} response from \"{url}\" was \"{r.status_code}\"')\n    if (r.status_code != expected_status):\n        raise ValueError(f'Non-{expected_status} {method} response from Discord API ({r.status_code}): {r.text}')\n    if (expected_status == 200):\n        return r.json()\n    return None", "docstring": "Make an HTTP request\n\nArgs:\npath: the URI path (not including the base url, start with\nthe first uri segment, like 'users/...')\nmethod: the HTTP method to use (GET, POST, PATCH, ...)\ndata: the data to send as JSON data\nexpected_status: expected HTTP status; other statuses\nreceived will raise an Exception\n\nReturns:\nData from the endpoint's response", "source": "codesearchnet"}
{"code": "def set_video_pos(self, x1, y1, x2, y2):\n        \n        position = \"%s %s %s %s\" % (str(x1),str(y1),str(x2),str(y2))\n        self._player_interface.VideoPos(ObjectPath('/not/used'), String(position))", "docstring": "Set the video position on the screen\n\nArgs:\nx1 (int): Top left x coordinate (px)\ny1 (int): Top left y coordinate (px)\nx2 (int): Bottom right x coordinate (px)\ny2 (int): Bottom right y coordinate (px)", "source": "juraj-google-style"}
{"code": "def document(self, *document_path):\n    if (len(document_path) == 1):\n        path = document_path[0].split(_helpers.DOCUMENT_PATH_DELIMITER)\n    else:\n        path = document_path\n    return DocumentReference(*path, client=self)", "docstring": "Get a reference to a document in a collection.\n\nFor a top-level document:\n\n.. code-block:: python\n\n>>> client.document('collek/shun')\n>>> # is the same as\n>>> client.document('collek', 'shun')\n\nFor a document in a sub-collection:\n\n.. code-block:: python\n\n>>> client.document('mydocs/doc/subcol/child')\n>>> # is the same as\n>>> client.document('mydocs', 'doc', 'subcol', 'child')\n\nDocuments in sub-collections can be nested deeper in a similar fashion.\n\nArgs:\ndocument_path (Tuple[str, ...]): Can either be\n\n* A single ``/``-delimited path to a document\n* A tuple of document path segments\n\nReturns:\n~.firestore_v1beta1.document.DocumentReference: A reference\nto a document in a collection.", "source": "codesearchnet"}
{"code": "def memoizedmethod(method):\n    method_name = method.__name__\n\n    @wraps(method)\n    def patched(self, *args, **kwargs):\n        'Patched method'\n        try:\n            return self._cache[method_name]\n        except KeyError:\n            result = self._cache[method_name] = method(self, *args, **kwargs)\n            return result\n    return patched", "docstring": "Decorator that caches method result.\n\nArgs:\nmethod (function): Method\n\nReturns:\nfunction: Memoized method.\n\nNotes:\nTarget method class needs as \"_cache\" attribute (dict).\n\nIt is the case of \"ObjectIOBase\" and all its subclasses.", "source": "codesearchnet"}
{"code": "def expect_no_raises(message=None, extras=None):\n    try:\n        (yield)\n    except Exception as e:\n        e_record = records.ExceptionRecord(e)\n        if extras:\n            e_record.extras = extras\n        msg = (message or 'Got an unexpected exception')\n        details = ('%s: %s' % (msg, e_record.details))\n        logging.exception(details)\n        e_record.details = details\n        recorder.add_error(e_record)", "docstring": "Expects no exception is raised in a context.\n\nIf the expectation is not met, the test is marked as fail after its\nexecution finishes.\n\nA default message is added to the exception `details`.\n\nArgs:\nmessage: string, custom message to add to exception's `details`.\nextras: An optional field for extra information to be included in test\nresult.", "source": "codesearchnet"}
{"code": "def submitTemplate(id, data={}):\n        \n        conn = Qubole.agent()\n        path = str(id) + \"/run\"\n        return conn.post(Template.element_path(path), data)", "docstring": "Submit an existing Template.\n\nArgs:\n`id`: ID of the template to submit\n`data`: json data containing the input_vars\nReturns:\nDictionary containing Command Object details.", "source": "juraj-google-style"}
{"code": "def set_tif(self, interface):\n    if (not ((1 << interface) & self.supported_tifs())):\n        raise errors.JLinkException(('Unsupported target interface: %s' % interface))\n    res = self._dll.JLINKARM_TIF_Select(interface)\n    if (res != 0):\n        return False\n    self._tif = interface\n    return True", "docstring": "Selects the specified target interface.\n\nNote that a restart must be triggered for this to take effect.\n\nArgs:\nself (Jlink): the ``JLink`` instance\ninterface (int): integer identifier of the interface\n\nReturns:\n``True`` if target was updated, otherwise ``False``.\n\nRaises:\nJLinkException: if the given interface is invalid or unsupported.", "source": "codesearchnet"}
{"code": "def merge(self, workdir, gswfk_file, dfpt_files, gkk_files, out_gkk, binascii=0):\n        \n        raise NotImplementedError(\"This method should be tested\")\n        \n\n        \n        gswfk_file = os.path.absath(gswfk_file)\n        dfpt_files = [os.path.abspath(s) for s in list_strings(dfpt_files)]\n        gkk_files = [os.path.abspath(s) for s in list_strings(gkk_files)]\n\n        print(\"Will merge %d 1WF files, %d GKK file in output %s\" %\n              (len(dfpt_files), len(gkk_files), out_gkk))\n\n        if self.verbose:\n            for i, f in enumerate(dfpt_files): print(\" [%d] 1WF %s\" % (i, f))\n            for i, f in enumerate(gkk_files): print(\" [%d] GKK %s\" % (i, f))\n\n        self.stdin_fname, self.stdout_fname, self.stderr_fname = \\\n            map(os.path.join, 3 * [workdir], [\"mrggkk.stdin\", \"mrggkk.stdout\", \"mrggkk.stderr\"])\n\n        inp = StringIO()\n        inp.write(out_gkk + \"\\n\")        \n        inp.write(str(binascii) + \"\\n\")  \n        inp.write(gswfk_file + \"\\n\")     \n\n        \n        dims = \" \".join([str(d) for d in dims])\n        inp.write(dims + \"\\n\")             \n\n        \n        for fname in dfpt_files:\n            inp.write(fname + \"\\n\")\n\n        \n        for fname in gkk_files:\n            inp.write(fname + \"\\n\")\n\n        self.stdin_data = [s for s in inp.getvalue()]\n\n        with open(self.stdin_fname, \"w\") as fh:\n            fh.writelines(self.stdin_data)\n            \n            fh.flush()\n            os.fsync(fh.fileno())\n\n        self.execute(workdir)\n\n        return out_gkk", "docstring": "Merge GGK files, return the absolute path of the new database.\n\nArgs:\ngswfk_file: Ground-state WFK filename\ndfpt_files: List of 1WFK files to merge.\ngkk_files: List of GKK files to merge.\nout_gkk: Name of the output GKK file\nbinascii: Integer flat. 0 --> binary output, 1 --> ascii formatted output", "source": "juraj-google-style"}
{"code": "def _set_class_path(cls, module_dict=sys.modules):\n    \n    \n    found = cls.__dict__.get('_class_path')\n    if found is not None:\n      return\n\n    \n    \n    \n    \n    if cls is Pipeline:\n      return\n\n    class_path = '%s.%s' % (cls.__module__, cls.__name__)\n    \n    \n    \n    if cls.__module__ == '__main__':\n      for name, module in module_dict.items():\n        if name == '__main__':\n          continue\n        found = getattr(module, cls.__name__, None)\n        if found is cls:\n          class_path = '%s.%s' % (name, cls.__name__)\n          break\n    cls._class_path = class_path", "docstring": "Sets the absolute path to this class as a string.\n\nUsed by the Pipeline API to reconstruct the Pipeline sub-class object\nat execution time instead of passing around a serialized function.\n\nArgs:\nmodule_dict: Used for testing.", "source": "juraj-google-style"}
{"code": "def get_line_count(fname):\n    \n    i = 0\n    with open(fname) as f:\n        for i, l in enumerate(f):\n            pass\n    return i + 1", "docstring": "Counts the number of lines in a file.\n\nArgs:\nfname: string, name of the file.\n\nReturns:\ninteger, the number of lines in the file.", "source": "juraj-google-style"}
{"code": "def GetFormatStringAttributeNames(self, event):\n    \n    event_formatter = self.GetEventFormatter(event)\n    if not event_formatter:\n      return None\n\n    return event_formatter.GetFormatStringAttributeNames()", "docstring": "Retrieves the attribute names in the format string.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nlist[str]: list containing the attribute names. If no event formatter to\nmatch the event can be found the function returns None.", "source": "juraj-google-style"}
{"code": "def get_seed(seed):\n    seed, seed2 = random_seed.get_seed(seed)\n    if seed is None:\n        seed = constant_op.constant(0, dtype=dtypes.int64, name='seed')\n    else:\n        seed = ops.convert_to_tensor(seed, dtype=dtypes.int64, name='seed')\n    if seed2 is None:\n        seed2 = constant_op.constant(0, dtype=dtypes.int64, name='seed2')\n    else:\n        with ops.name_scope('seed2') as scope:\n            seed2 = ops.convert_to_tensor(seed2, dtype=dtypes.int64)\n            seed2 = array_ops.where_v2(math_ops.logical_and(math_ops.equal(seed, 0), math_ops.equal(seed2, 0)), constant_op.constant(2 ** 31 - 1, dtype=dtypes.int64), seed2, name=scope)\n    return (seed, seed2)", "docstring": "Returns the local seeds an operation should use given an op-specific seed.\n\nSee `random_seed.get_seed` for more details. This wrapper adds support for\nthe case where `seed` may be a tensor.\n\nArgs:\nseed: An integer or a `tf.int64` scalar tensor.\n\nReturns:\nA tuple of two `tf.int64` scalar tensors that should be used for the local\nseed of the calling dataset.", "source": "github-repos"}
{"code": "def _scan_two_qubit_ops_into_matrix(self, circuit: circuits.Circuit, index: Optional[int], qubits: Tuple[(ops.Qid, ...)]) -> Tuple[(List[ops.Operation], List[int], np.ndarray)]:\n    product = np.eye(4, dtype=np.complex128)\n    all_operations = []\n    touched_indices = []\n    while (index is not None):\n        operations = list({circuit.operation_at(q, index) for q in qubits})\n        op_data = [self._op_to_matrix(op, qubits) for op in operations if (op is not None)]\n        if any(((e is None) for e in op_data)):\n            break\n        present_ops = [op for op in operations if op]\n        present_op_data = cast(List[np.ndarray], op_data)\n        for op_mat in present_op_data:\n            product = np.dot(op_mat, product)\n        all_operations.extend(present_ops)\n        touched_indices.append(index)\n        index = circuit.next_moment_operating_on(qubits, (index + 1))\n    return (all_operations, touched_indices, product)", "docstring": "Accumulates operations affecting the given pair of qubits.\n\nThe scan terminates when it hits the end of the circuit, finds an\noperation without a known matrix, or finds an operation that interacts\nthe given qubits with other qubits.\n\nArgs:\ncircuit: The circuit to scan for operations.\nindex: The index to start scanning forward from.\nqubits: The pair of qubits we care about.\n\nReturns:\nA tuple containing:\n0. The operations.\n1. The moment indices those operations were on.\n2. A matrix equivalent to the effect of the scanned operations.", "source": "codesearchnet"}
{"code": "def calculate_weighted_avg(bonds):\n    \n    minimum_bond = min(bonds)\n    weighted_sum = 0.0\n    total_sum = 0.0\n    for entry in bonds:\n        weighted_sum += entry * exp(1 - (entry / minimum_bond) ** 6)\n        total_sum += exp(1 - (entry / minimum_bond) ** 6)\n    return weighted_sum / total_sum", "docstring": "Returns the weighted average bond length given by\nHoppe's effective coordination number formula.\n\nArgs:\nbonds (list): list of floats that are the\nbond distances between a cation and its\nperipheral ions", "source": "juraj-google-style"}
{"code": "def is_decomposed(P):\n    \n    if P.shape:\n        return min([is_decomposed(poly) for poly in P])\n    return len(P.keys) <= 1", "docstring": "Check if a polynomial (array) is on component form.\n\nArgs:\nP (Poly):\nInput data.\n\nReturns:\n(bool):\nTrue if all polynomials in ``P`` are on component form.\n\nExamples:\n>>> x,y = cp.variable(2)\n>>> print(cp.is_decomposed(cp.Poly([1,x,x*y])))\nTrue\n>>> print(cp.is_decomposed(cp.Poly([x+1,x*y])))\nFalse", "source": "juraj-google-style"}
{"code": "def setEditorData(self, spinBox, index):\n    if index.isValid():\n        value = index.model().data(index, QtCore.Qt.EditRole)\n        spinBox.setValue(value)", "docstring": "Sets the data to be displayed and edited by the editor from the data model item specified by the model index.\n\nArgs:\nspinBox (BigIntSpinbox): editor widget.\nindex (QModelIndex): model data index.", "source": "codesearchnet"}
{"code": "def stft_magnitude(signal, fft_length,\n                   hop_length=None,\n                   window_length=None):\n  \n  frames = frame(signal, window_length, hop_length)\n  \n  \n  \n  window = periodic_hann(window_length)\n  windowed_frames = frames * window\n  return np.abs(np.fft.rfft(windowed_frames, int(fft_length)))", "docstring": "Calculate the short-time Fourier transform magnitude.\n\nArgs:\nsignal: 1D np.array of the input time-domain signal.\nfft_length: Size of the FFT to apply.\nhop_length: Advance (in samples) between each frame passed to FFT.\nwindow_length: Length of each block of samples to pass to FFT.\n\nReturns:\n2D np.array where each row contains the magnitudes of the fft_length/2+1\nunique values of the FFT for the corresponding frame of input samples.", "source": "juraj-google-style"}
{"code": "def get_pmg_structure(phonopy_structure):\n    \n\n    lattice = phonopy_structure.get_cell()\n    frac_coords = phonopy_structure.get_scaled_positions()\n    symbols = phonopy_structure.get_chemical_symbols()\n    masses = phonopy_structure.get_masses()\n    mms = phonopy_structure.get_magnetic_moments()\n    mms = mms or [0] * len(symbols)\n\n    return Structure(lattice, symbols, frac_coords,\n                     site_properties={\"phonopy_masses\": masses,\n                                      \"magnetic_moments\": mms})", "docstring": "Convert a PhonopyAtoms object to pymatgen Structure object.\n\nArgs:\nphonopy_structure (PhonopyAtoms): A phonopy structure object.", "source": "juraj-google-style"}
{"code": "def refine_rotation(self):\n        \n        new_x, y = get_uvec(self[0]), get_uvec(self[1])\n        \n        new_y = y - np.dot(new_x, y) * new_x\n        new_z = np.cross(new_x, new_y)\n        return SquareTensor([new_x, new_y, new_z])", "docstring": "Helper method for refining rotation matrix by ensuring\nthat second and third rows are perpindicular to the first.\nGets new y vector from an orthogonal projection of x onto y\nand the new z vector from a cross product of the new x and y\n\nArgs:\ntol to test for rotation\n\nReturns:\nnew rotation matrix", "source": "juraj-google-style"}
{"code": "def sanitize_arg_name(name: str) -> str:\n    swapped = ''.join([c if c.isalnum() else '_' for c in name])\n    result = swapped if swapped[0].isalpha() else 'arg_' + swapped\n    global sanitization_warnings_given\n    if name != result and sanitization_warnings_given < MAX_SANITIZATION_WARNINGS:\n        logging.warning('`%s` is not a valid tf.function parameter name. Sanitizing to `%s`.', name, result)\n        sanitization_warnings_given += 1\n    return result", "docstring": "Sanitizes function argument names.\n\nMatches Python symbol naming rules.\n\nWithout sanitization, names that are not legal Python parameter names can be\nset which makes it challenging to represent callables supporting the named\ncalling capability.\n\nArgs:\nname: The name to sanitize.\n\nReturns:\nA string that meets Python parameter conventions.", "source": "github-repos"}
{"code": "def setLCD(self, password='00000000'):\n    result = False\n    self.setContext('setLCD')\n    try:\n        self.clearCmdMsg()\n        if (len(password) != 8):\n            self.writeCmdMsg('Invalid password length.')\n            self.setContext('')\n            return result\n        if (not self.request()):\n            self.writeCmdMsg('Bad read CRC on setting')\n        elif (not self.serialCmdPwdAuth(password)):\n            self.writeCmdMsg('Password failure')\n        else:\n            req_table = ''\n            fill_len = (40 - len(self.m_lcd_items))\n            for lcdid in self.m_lcd_items:\n                append_val = binascii.hexlify(str(lcdid).zfill(2))\n                req_table += append_val\n            for i in range(0, fill_len):\n                append_val = binascii.hexlify(str(0).zfill(2))\n                req_table += append_val\n            req_str = (('015731023030443228' + req_table) + '2903')\n            req_str += self.calc_crc16(req_str[2:].decode('hex'))\n            self.m_serial_port.write(req_str.decode('hex'))\n            if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'):\n                self.writeCmdMsg('Success: 06 returned.')\n                result = True\n        self.serialPostEnd()\n    except:\n        ekm_log(traceback.format_exc(sys.exc_info()))\n    self.setContext('')\n    return result", "docstring": "Serial call to set LCD using meter object bufer.\n\nUsed with :func:`~ekmmeters.V4Meter.addLcdItem`.\n\nArgs:\npassword (str): Optional password\n\nReturns:\nbool: True on completion and ACK.", "source": "codesearchnet"}
{"code": "def PushTask(self, task):\n    storage_file_size = getattr(task, 'storage_file_size', None)\n    if (not storage_file_size):\n        raise ValueError('Task storage file size not set.')\n    if (task.file_entry_type == dfvfs_definitions.FILE_ENTRY_TYPE_DIRECTORY):\n        weight = 1\n    else:\n        weight = storage_file_size\n    task.merge_priority = weight\n    heap_values = (weight, task)\n    heapq.heappush(self._heap, heap_values)\n    self._task_identifiers.add(task.identifier)", "docstring": "Pushes a task onto the heap.\n\nArgs:\ntask (Task): task.\n\nRaises:\nValueError: if the size of the storage file is not set in the task.", "source": "codesearchnet"}
{"code": "def get_tensors(self, node_name, output_slot, debug_op, device_name=None):\n    watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)\n    try:\n        device_name = self._infer_device_name(device_name, node_name)\n        return [datum.get_tensor() for datum in self._watch_key_to_datum[device_name][watch_key]]\n    except (ValueError, KeyError):\n        raise WatchKeyDoesNotExistInDebugDumpDirError('Watch key \"%s\" does not exist in the debug dump of device %s' % (watch_key, device_name))", "docstring": "Get the tensor value from for a debug-dumped tensor.\n\nThe tensor may be dumped multiple times in the dump root directory, so a\nlist of tensors (`numpy.ndarray`) is returned.\n\nArgs:\nnode_name: (`str`) name of the node that the tensor is produced by.\noutput_slot: (`int`) output slot index of tensor.\ndebug_op: (`str`) name of the debug op.\ndevice_name: (`str`) name of the device. If there is only one device or if\nthe specified debug_watch_key exists on only one device, this argument\nis optional.\n\nReturns:\nList of tensors (`numpy.ndarray`) loaded from the debug-dump file(s).\n\nRaises:\nWatchKeyDoesNotExistInDebugDumpDirError: If the tensor does not exist in\nthe debug-dump data.", "source": "github-repos"}
{"code": "def _craft_s3_keys(self):\n        \n        now = time.gmtime()\n        stub = \"templates/{stack_name}/{version}\".format(\n            stack_name=self._config.get('environment', {}).get('stack_name', None),\n            version=self._config.get('codeVersion')\n        )\n\n        stub = stub + \"/\" + str(now.tm_year)\n        stub = stub + \"/\" + str('%02d' % now.tm_mon)\n        stub = stub + \"/\" + str('%02d' % now.tm_mday)\n        stub = stub + \"/\" + str('%02d' % now.tm_hour)\n        stub = stub + \":\" + str('%02d' % now.tm_min)\n        stub = stub + \":\" + str('%02d' % now.tm_sec)\n\n        if self._yaml:\n            template_key = stub + \"/stack.yaml\"\n        else:\n            template_key = stub + \"/stack.json\"\n\n        property_key = stub + \"/stack.properties\"\n        return template_key, property_key", "docstring": "We are putting stuff into S3, were supplied the bucket. Here we\ncraft the key of the elements we are putting up there in the\ninternet clouds.\n\nArgs:\nNone\n\nReturns:\na tuple of teplate file key and property file key", "source": "juraj-google-style"}
{"code": "def _embedding_lookup_for_ragged_tensor(inp: ragged_tensor.RaggedTensor, weight: Optional[ragged_tensor.RaggedTensor], table: tf_variables.Variable, feature: tpu_embedding_v2_utils.FeatureConfig) -> tensor.Tensor:\n    if inp.shape.rank != 2:\n        raise ValueError('Only rank 2 ragged tensor is supported, but got rank {}'.format(inp.shape.rank))\n    batch_size = inp.shape[0]\n    if feature.output_shape:\n        output_batch_size = math_ops.reduce_prod(feature.output_shape)\n        if output_batch_size == batch_size:\n            ragged_output = _ragged_embedding_lookup_with_reduce(table, inp, weight, feature.table.combiner)\n            ragged_output = array_ops.reshape(ragged_output, shape=feature.output_shape + [feature.table.dim])\n        elif output_batch_size > batch_size and output_batch_size % batch_size == 0:\n            ragged_output = embedding_ops.embedding_lookup_v2(table, inp)\n            ragged_output = ragged_output.to_tensor(shape=[batch_size, output_batch_size \n            ragged_output = array_ops.reshape(ragged_output, feature.output_shape + [feature.table.dim])\n        else:\n            raise ValueError('Output shape set in the FeatureConfig should be the factor of the input data batch size. But instead got output shape {}, input data batch size {}'.format(feature.output_shape, batch_size))\n    elif feature.max_sequence_length > 0:\n        output_shape = [batch_size, feature.max_sequence_length, feature.table.dim]\n        ragged_lookup = embedding_ops.embedding_lookup_v2(table, inp)\n        ragged_output = ragged_lookup.to_tensor(shape=output_shape)\n    else:\n        ragged_output = _ragged_embedding_lookup_with_reduce(table, inp, weight, feature.table.combiner)\n    return ragged_output", "docstring": "Embedding lookup for ragged tensor based on its feature config.\n\nArgs:\ninp: a single rank 2 RaggedTensor input.\nweight: None or RaggedTensor which has the same shape of the input.\ntable: a table variable.\nfeature: a feature config.\n\nReturns:\nEmbedding lookup result.\n\nRaises:\nValueError: if input ragged tensor is not rank 2 or output shape set in the\nfeature config doesn't match with the first dim size of the input.", "source": "github-repos"}
{"code": "def get_graph(graph, conn, **kwargs):\n    \n    sparql = render_without_request(\"sparqlGraphDataTemplate.rq\",\n                                    prefix=NSM.prefix(),\n                                    graph=graph)\n    return conn.query(sparql, **kwargs)", "docstring": "Returns all the triples for a specific are graph\n\nargs:\ngraph: the URI of the graph to retreive\nconn: the rdfframework triplestore connection", "source": "juraj-google-style"}
{"code": "def xml_to_json(root, tag_prefix=None, on_tag={}):\n\n    def get_key(tag):\n        if (tag_prefix is not None):\n            return tag.split(tag_prefix)[1]\n        return tag\n\n    def parse_element(elmt):\n        key = get_key(elmt.tag)\n        if (key in on_tag):\n            return on_tag[key](elmt)\n        items = dict(elmt.items())\n        if (len(elmt) == 0):\n            if items:\n                return {**items, **{key: elmt.text}}\n            else:\n                return elmt.text\n        else:\n            tags = {child.tag for child in elmt}\n            max_children = max({len(child) for child in elmt})\n            if (len(tags) == 1):\n                value_list = [parse_element(child) for child in elmt]\n                if items:\n                    return {**items, **{key: value_list}}\n                else:\n                    return value_list\n            elif (len(tags) > 1):\n                tag2children = {tag: [] for tag in tags}\n                for child in elmt:\n                    tag2children[child.tag].append(child)\n                if (max_children == 0):\n                    value_dict = {get_key(tag): ([child.text for child in children] if (len(children) > 1) else children[0].text) for (tag, children) in tag2children.items()}\n                else:\n                    value_dict = {get_key(tag): ([parse_element(child) for child in children] if (len(children) > 1) else parse_element(children[0])) for (tag, children) in tag2children.items()}\n                if items:\n                    return {**items, **value_dict}\n                else:\n                    return value_dict\n    return parse_element(root)", "docstring": "Parses a XML element to JSON format.\n\nThis is a relatively generic function parsing a XML element\nto JSON format. It does not guarantee any specific formal\nbehaviour but is empirically known to \"work well\" with respect\nto the author's needs. External verification of the returned\nresults by the user is therefore instrumental.\n\nFor bigger XML elements the whole procedure may take a while,\nso the philosophy should be to save the laboriously mapped\nJSON data structure to a file once you have it. This of course\nalso means that this functions is probably of little value\nwhen you have to constantly JSONify big XMLs. In summary,\nthis function is mostly useful for one-time parsing of XML to\nJSON for subsequent use of the resulting JSON data instead of\nthe XML-formated data.\n\nArgs:\nroot: A XML element\ntag_prefix: A tag prefix which will be cut from the keys\non_tag: User-defined parsing for elements identified by tag\n\nReturns:\nA Python data structure corresponding to the JSON mapping\nof the supplied XML element", "source": "codesearchnet"}
{"code": "def unpack(container, path):\n    from benchbuild.utils.run import run\n    from benchbuild.utils.uchroot import no_args\n    path = local.path(path)\n    c_filename = local.path(container.filename)\n    name = c_filename.basename\n    if (not path.exists()):\n        path.mkdir()\n    with local.cwd(path):\n        Wget(container.remote, name)\n        uchroot = no_args()\n        uchroot = uchroot[('-E', '-A', '-C', '-r', '/', '-w', os.path.abspath('.'), '--')]\n        has_erlent = bash[('-c', \"tar --list -f './{0}' | grep --silent '.erlent'\".format(name))]\n        has_erlent = (has_erlent & TF)\n        untar = local['/bin/tar'][('xf', ('./' + name))]\n        if (not has_erlent):\n            untar = uchroot[untar]\n        run(untar['--exclude=dev/*'])\n        if (not os.path.samefile(name, container.filename)):\n            rm(name)\n        else:\n            LOG.warning('File contents do not match: %s != %s', name, container.filename)\n        cp((container.filename + '.hash'), path)", "docstring": "Unpack a container usable by uchroot.\n\nMethod that checks if a directory for the container exists,\nchecks if erlent support is needed and then unpacks the\ncontainer accordingly.\n\nArgs:\npath: The location where the container is, that needs to be unpacked.", "source": "codesearchnet"}
{"code": "def get_compatible_systems(self, id_or_uri):\n    uri = (self._client.build_uri(id_or_uri) + '/compatible-systems')\n    return self._client.get(uri)", "docstring": "Retrieves a collection of all storage systems that is applicable to this storage volume template.\n\nArgs:\nid_or_uri:\nCan be either the power device id or the uri\n\nReturns:\nlist: Storage systems.", "source": "codesearchnet"}
{"code": "def _dataset_partition(self, mode, config, params):\n    if ((mode != tf.estimator.ModeKeys.TRAIN) or (not hasattr(config, 'tpu_config'))):\n        self._next_partition_id = 0\n        return (0, 1)\n    phift = config.tpu_config.per_host_input_for_training\n    if (hasattr(tpu_config.InputPipelineConfig, 'BROADCAST') and (phift == tpu_config.InputPipelineConfig.BROADCAST)):\n        return (0, 1)\n    if phift:\n        num_hosts = (params['context'].num_hosts if ('context' in params) else (config.tpu_config.num_shards \n        num_partitions = max(num_hosts, 1)\n    else:\n        num_partitions = config.tpu_config.num_shards\n    partition_id = getattr(self, '_next_partition_id', 0)\n    self._next_partition_id = (partition_id + 1)\n    tf.logging.info(('num_partitions = %d partition_id = %d' % (num_partitions, partition_id)))\n    assert (partition_id < num_partitions)\n    return (partition_id, num_partitions)", "docstring": "Which part of the training data to read.\n\nIf there are multiple parallel calls to input_fn (multiple TPU hosts),\nthen we want each one to read from a separate partition of the training\ndata.\n\nArgs:\nmode: tf.estimator.ModeKeys\nconfig: RunConfig\nparams: A dict that contains parameters.\nReturns:\npartition_id: an integer\nnum_partitions: an integer", "source": "codesearchnet"}
{"code": "def StatEntryFromStat(stat,\n                      pathspec,\n                      ext_attrs = True):\n  \n  result = rdf_client_fs.StatEntry(pathspec=pathspec)\n\n  for attr in _STAT_ATTRS:\n    value = getattr(stat.GetRaw(), attr, None)\n    if value is None:\n      continue\n\n    \n    value = int(value)\n    if value < 0:\n      value &= 0xFFFFFFFF\n\n    setattr(result, attr, value)\n\n  result.st_flags_linux = stat.GetLinuxFlags()\n  result.st_flags_osx = stat.GetOsxFlags()\n  if ext_attrs:\n    \n    \n    \n    result.ext_attrs = list(GetExtAttrs(stat.GetPath()))\n\n  return result", "docstring": "Build a stat entry object from a given stat object.\n\nArgs:\nstat: A `Stat` object.\npathspec: A `PathSpec` from which `stat` was obtained.\next_attrs: Whether to include extended file attributes in the result.\n\nReturns:\n`StatEntry` object.", "source": "juraj-google-style"}
{"code": "def victim(self, name, owner=None, **kwargs):\n        \n        return Victim(self.tcex, name, owner=owner, **kwargs)", "docstring": "Create the Victim TI object.\n\nArgs:\nowner:\nname:\n**kwargs:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def tar_archive(context):\n    logger.debug('start')\n    mode = get_file_mode_for_writing(context)\n    for item in context['tar']['archive']:\n        destination = context.get_formatted_string(item['out'])\n        source = context.get_formatted_string(item['in'])\n        with tarfile.open(destination, mode) as archive_me:\n            logger.debug(f\"Archiving '{source}' to '{destination}'\")\n            archive_me.add(source, arcname='.')\n            logger.info(f\"Archived '{source}' to '{destination}'\")\n    logger.debug('end')", "docstring": "Archive specified path to a tar archive.\n\nArgs:\ncontext: dictionary-like. context is mandatory.\ncontext['tar']['archive'] must exist. It's a dictionary.\nkeys are the paths to archive.\nvalues are the destination output paths.\n\nExample:\ntar:\narchive:\n- in: path/to/dir\nout: path/to/destination.tar.xs\n- in: another/my.file\nout: ./my.tar.xs\n\nThis will archive directory path/to/dir to path/to/destination.tar.xs,\nand also archive file another/my.file to ./my.tar.xs", "source": "codesearchnet"}
{"code": "def from_dict(event_dict):\n    return SnippetEvent(callback_id=event_dict['callbackId'], name=event_dict['name'], creation_time=event_dict['time'], data=event_dict['data'])", "docstring": "Create a SnippetEvent object from a dictionary.\n\nArgs:\nevent_dict: a dictionary representing an event.\n\nReturns:\nA SnippetEvent object.", "source": "codesearchnet"}
{"code": "def from_vision_qformer_text_configs(cls, vision_config: Blip2VisionConfig, qformer_config: Blip2QFormerConfig, text_config: Optional[PretrainedConfig]=None, **kwargs):\n    return cls(vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict() if text_config is not None else None, **kwargs)", "docstring": "Instantiate a [`Blip2Config`] (or a derived class) from a BLIP-2 vision model, Q-Former and language model\nconfigurations.\n\nArgs:\nvision_config (`dict`):\nDictionary of configuration options used to initialize [`Blip2VisionConfig`].\nqformer_config (`dict`):\nDictionary of configuration options used to initialize [`Blip2QFormerConfig`].\ntext_config (`dict`, *optional*):\nDictionary of configuration options used to initialize any [`PretrainedConfig`].\n\nReturns:\n[`Blip2Config`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def get_data(__pkg: str, __name: str) -> str:\n    \n    for dname in get_data_dirs(__pkg):\n        test_path = path.join(dname, __name)\n        if path.exists(test_path):\n            return test_path\n    raise FileNotFoundError('No data file {!r} for {!r}'.format(__name, __pkg))", "docstring": "Return top-most data file for given package.\n\nArgs:\n__pkg: Package name\n__name: Data file name", "source": "juraj-google-style"}
{"code": "def parents(self, sourcepath, recursive=True):\n    return self._get_recursive_dependancies(self._PARENTS_MAP, sourcepath, recursive=True)", "docstring": "Recursively find all parents that import the given source path.\n\nArgs:\nsourcepath (str): Source file path to search for.\n\nKeyword Arguments:\nrecursive (bool): Switch to enabled recursive finding (if True).\nDefault to True.\n\nReturns:\nset: List of finded parents path.", "source": "codesearchnet"}
{"code": "def get_rooms(self, sort=True):\n    rooms = self._connection.get('rooms')\n    if sort:\n        rooms.sort(key=operator.itemgetter('name'))\n    return rooms", "docstring": "Get rooms list.\n\nKwargs:\nsort (bool): If True, sort rooms by name\n\nReturns:\narray. List of rooms (each room is a dict)", "source": "codesearchnet"}
{"code": "def GetEntries(self, parser_mediator, match=None, **unused_kwargs):\n    version = match.get('LastAttemptSystemVersion', 'N/A')\n    pending = match.get('LastUpdatesAvailable', None)\n    event_data = plist_event.PlistTimeEventData()\n    event_data.desc = 'Last MacOS {0:s} full update.'.format(version)\n    event_data.key = ''\n    event_data.root = '/'\n    datetime_value = match.get('LastFullSuccessfulDate', None)\n    if datetime_value:\n        event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n    datetime_value = match.get('LastSuccessfulDate', None)\n    if (datetime_value and pending):\n        software = []\n        for update in match.get('RecommendedUpdates', []):\n            identifier = update.get('Identifier', '<IDENTIFIER>')\n            product_key = update.get('Product Key', '<PRODUCT_KEY>')\n            software.append('{0:s}({1:s})'.format(identifier, product_key))\n        if (not software):\n            return\n        software = ','.join(software)\n        event_data.desc = 'Last Mac OS {0!s} partially update, pending {1!s}: {2:s}.'.format(version, pending, software)\n        event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts relevant MacOS update entries.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmatch (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.", "source": "codesearchnet"}
{"code": "def inspect_container(self, container):\n        \n        return self._result(\n            self._get(self._url(\"/containers/{0}/json\", container)), True\n        )", "docstring": "Identical to the `docker inspect` command, but only for containers.\n\nArgs:\ncontainer (str): The container to inspect\n\nReturns:\n(dict): Similar to the output of `docker inspect`, but as a\nsingle dict\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def get_help(func):\n    \n\n    help_text = \"\"\n    if isinstance(func, dict):\n        name = context_name(func)\n\n        help_text = \"\\n\" + name + \"\\n\\n\"\n        doc = inspect.getdoc(func)\n        if doc is not None:\n            doc = inspect.cleandoc(doc)\n            help_text += doc + '\\n'\n\n        return help_text\n\n    sig = func.metadata.signature()\n    doc = inspect.getdoc(func)\n    if doc is not None:\n        doc = inspect.cleandoc(doc)\n\n    help_text += \"\\n\" + sig + \"\\n\\n\"\n    if doc is not None:\n        help_text += doc + '\\n'\n\n    if inspect.isclass(func):\n        func = func.__init__\n\n    \n    \n    \n    if func.metadata.load_from_doc:\n        return help_text\n\n    help_text += \"\\nArguments:\\n\"\n    for key, info in func.metadata.annotated_params.items():\n        type_name = info.type_name\n        desc = \"\"\n        if info.desc is not None:\n            desc = info.desc\n\n        help_text += \"  - %s (%s): %s\\n\" % (key, type_name, desc)\n\n    return help_text", "docstring": "Return usage information about a context or function.\n\nFor contexts, just return the context name and its docstring\nFor functions, return the function signature as well as its\nargument types.\n\nArgs:\nfunc (callable): An annotated callable function\n\nReturns:\nstr: The formatted help text", "source": "juraj-google-style"}
{"code": "def load_new_checkpoint_when_available(\n      self, sess, current_checkpoint, sleep_seconds=10):\n    \n    \n    while True:\n      next_checkpoint = self.load_from_checkpoint(sess)\n      if not next_checkpoint or next_checkpoint == current_checkpoint:\n        print('Model not yet available, sleeping for %d seconds: '\n              'path %s; found: %s' %\n              (sleep_seconds,\n               os.path.dirname(self._save_path), current_checkpoint))\n        sys.stdout.flush()\n        time.sleep(sleep_seconds)\n      else:\n        return next_checkpoint", "docstring": "Waits for a new checkpoint to be available and then loads it.\n\nArgs:\nsess: The current session.\ncurrent_checkpoint: The current checkpoint or None to just load the next\none.\nsleep_seconds: How long to sleep between checks.\n\nReturns:\nThe next checkpoint to use.", "source": "juraj-google-style"}
{"code": "def constant(duration: int, amp: complex, name: str = None) -> SamplePulse:\n    \n    return _sampled_constant_pulse(duration, amp, name=name)", "docstring": "Generates constant-sampled `SamplePulse`.\n\nApplies `left` sampling strategy to generate discrete pulse from continuous function.\n\nArgs:\nduration: Duration of pulse. Must be greater than zero.\namp: Complex pulse amplitude.\nname: Name of pulse.", "source": "juraj-google-style"}
{"code": "def get_by_name(self, name):\n    try:\n        spec = self._dom.get('templates', {})[name]\n    except KeyError:\n        raise LagoMissingTemplateError(name, self._path)\n    return Template(name=name, versions={ver_name: TemplateVersion(name=('%s:%s:%s' % (self.name, name, ver_name)), source=self._providers[ver_spec['source']], handle=ver_spec['handle'], timestamp=ver_spec['timestamp']) for (ver_name, ver_spec) in spec['versions'].items()})", "docstring": "Retrieve a template by it's name\n\nArgs:\nname (str): Name of the template to retrieve\n\nRaises:\nLagoMissingTemplateError: if no template is found", "source": "codesearchnet"}
{"code": "def ParseOptions(cls, options, configuration_object):\n    \n    if not isinstance(configuration_object, tools.CLITool):\n      raise errors.BadConfigObject(\n          'Configuration object is not an instance of CLITool')\n\n    output_format = getattr(options, 'output_format', 'dynamic')\n    output_filename = getattr(options, 'write', None)\n\n    if output_format != 'list':\n      if not output_manager.OutputManager.HasOutputClass(output_format):\n        raise errors.BadConfigOption(\n            'Unsupported output format: {0:s}.'.format(output_format))\n\n    if output_manager.OutputManager.IsLinearOutputModule(output_format):\n      if not output_filename:\n        raise errors.BadConfigOption((\n            'Output format: {0:s} requires an output file').format(\n                output_format))\n\n      if os.path.exists(output_filename):\n        raise errors.BadConfigOption(\n            'Output file already exists: {0:s}.'.format(output_filename))\n\n    setattr(configuration_object, '_output_format', output_format)\n    setattr(configuration_object, '_output_filename', output_filename)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.send = channel.stream_stream(\n        '/predix.eventhub.Publisher/send',\n        request_serializer=EventHub__pb2.PublishRequest.SerializeToString,\n        response_deserializer=EventHub__pb2.PublishResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def assert_not_visible(self, selector, testid=None, **kwargs):\n    self.info_log(('Assert not visible selector(%s) testid(%s)' % (selector, testid)))\n    highlight = kwargs.get('highlight', BROME_CONFIG['highlight']['highlight_on_assertion_failure'])\n    self.debug_log(('effective highlight: %s' % highlight))\n    wait_until_not_visible = kwargs.get('wait_until_not_visible', BROME_CONFIG['proxy_driver']['wait_until_not_visible_before_assert_not_visible'])\n    self.debug_log(('effective wait_until_not_visible: %s' % wait_until_not_visible))\n    if wait_until_not_visible:\n        self.wait_until_not_visible(selector, raise_exception=False)\n    element = self.find(selector, raise_exception=False, wait_until_visible=False, wait_until_present=False)\n    if (element and element.is_displayed(raise_exception=False)):\n        data = self.execute_script('return arguments[0].getBoundingClientRect();', element._element)\n        if highlight:\n            element.highlight(style=BROME_CONFIG['highlight']['style_on_assertion_failure'])\n        if (testid is not None):\n            self.create_test_result(testid, False, extra_data={'bounding_client_rect': data, 'video_x_offset': self.browser_config.get('video_x_offset', 0), 'video_y_offset': self.browser_config.get('video_y_offset', 0)})\n        return False\n    else:\n        if (testid is not None):\n            self.create_test_result(testid, True)\n        return True", "docstring": "Assert that the element is not visible in the dom\n\nArgs:\nselector (str): the selector used to find the element\ntest_id (str): the test_id or a str\n\nKwargs:\nwait_until_not_visible (bool)\nhighlight (bool)\n\nReturns:\nbool: True is the assertion succeed; False otherwise.", "source": "codesearchnet"}
{"code": "def ast_dict_to_objects(ast_dict: Mapping[(str, Any)], bel_obj) -> BELAst:\n    ast_subject = ast_dict.get('subject', None)\n    ast_object = ast_dict.get('object', None)\n    bel_subject = None\n    bel_object = None\n    bel_relation = ast_dict.get('relation')\n    if ast_subject:\n        bel_subject = function_ast_to_objects(ast_subject, bel_obj)\n    if ast_object:\n        bel_object = function_ast_to_objects(ast_object, bel_obj)\n    ast_obj = BELAst(bel_subject, bel_relation, bel_object, bel_obj.spec)\n    return ast_obj", "docstring": "Convert Tatsu AST dictionary to BEL AST object\n\nArgs:\nast_dict (Mapping[str, Any])\n\nReturns:\nBELAst: object representing the BEL Statement AST", "source": "codesearchnet"}
{"code": "def __init__(self, metrics_namespace: Optional[str]=None, is_streaming: bool=False, gpu: Optional[costs.Accelerator]=None, pcollection: str='ProcessOutput.out0'):\n    self.is_streaming = is_streaming\n    self.gpu = gpu\n    self.pcollection = pcollection\n    super().__init__(metrics_namespace=metrics_namespace)\n    self.dataflow_client = DataflowApplicationClient(self.pipeline.get_pipeline_options())\n    self.monitoring_client = monitoring_v3.MetricServiceClient()", "docstring": "Initializes DataflowCostBenchmark.\n\nArgs:\nmetrics_namespace (Optional[str]): Namespace for metrics.\nis_streaming (bool): Whether the pipeline is streaming or batch.\ngpu (Optional[costs.Accelerator]): Optional GPU type.\npcollection (str): PCollection name to monitor throughput.", "source": "github-repos"}
{"code": "def compute_cost_graph(self, devices=None):\n    cost_graph_def = cost_graph_pb2.CostGraphDef()\n    for (i, operation_name) in enumerate(self.get_all_operation_names()):\n        node = cost_graph_def.node.add(name=operation_name, device=self.get_operation_device(operation_name), id=i)\n        for input_name in self.get_operation_input_names(operation_name):\n            (id1, id2) = self._tensor_name_to_ids[input_name]\n            node.input_info.add(preceding_node=id1, preceding_port=id2)\n        for output_name in self.get_operation_output_names(operation_name):\n            tensor_device = self.get_tensor_device(output_name)\n            if ((devices is None) or (tensor_device is None) or (tensor_device in devices)):\n                node.output_info.add(size=self.get_tensor_num_entries(output_name), alias_input_port=(- 1), dtype=self.get_tensor_dtype(output_name).as_datatype_enum, shape=self.get_tensor_shape(output_name).as_proto())\n            else:\n                node.output_info.add(size=0, alias_input_port=(- 1), dtype=self.get_tensor_dtype(output_name).as_datatype_enum)\n            if self.is_tensor_final(output_name):\n                node.is_final = True\n    return cost_graph_def", "docstring": "Computes a CostGraphDef protobuf based on this graph.\n\nDefined in tensorflow/core/framework/cost_graph.proto.\n\nArgs:\ndevices: optional [string], the names of devices to consider. If\nspecified, any tensor on a device not listed is given a size of zero.\nAny device-less tensor (e.g. Mesh TensorFlow tensor) is not affected.\n\nReturns:\na CostGraphDef protobuf with a Node for every operation in the graph, each\nof which is populated with size/dtype information for its inputs and\noutputs (which match the input/output order of the operation).", "source": "codesearchnet"}
{"code": "def prepare(path, name):  \n    \n    setup_path = os.path.join(path, 'setup.py')\n    if not os.path.exists(setup_path):\n        data = textwrap.dedent( % name)\n\n        logger.info('Module %s does not provide a setup.py. \\nGenerating setup.py' % name)\n\n        _files.write_file(setup_path, data)\n\n        data = textwrap.dedent()\n\n        logger.info('Generating setup.cfg')\n\n        _files.write_file(os.path.join(path, 'setup.cfg'), data)\n\n        data = textwrap.dedent()\n\n        logger.info('Generating MANIFEST.in')\n\n        _files.write_file(os.path.join(path, 'MANIFEST.in'), data)", "docstring": "Prepare a Python script (or module) to be imported as a module.\nIf the script does not contain a setup.py file, it creates a minimal setup.\nArgs:\npath (str): path to directory with the script or module.\nname (str): name of the script or module.", "source": "juraj-google-style"}
{"code": "def insertImage(page, rect, filename=None, pixmap=None, stream=None, rotate=0,\n                keep_proportion = True,\n                overlay=True):\n    \n\n    def calc_matrix(fw, fh, tr, rotate=0):\n        \n        \n        tmp = Point((tr.x1 + tr.x0) / 2., (tr.y1 + tr.y0) / 2.)\n\n        rot = Matrix(rotate)  \n\n        \n        m = Matrix(1, 0, 0, 1, -0.5, -0.5) * rot\n\n        \n\n        \n        \n        \n        small = min(fw, fh)  \n\n        if rotate not in (0, 180):\n            fw, fh = fh, fw  \n\n        if fw < 1: \n            if (float(tr.width) / fw) > (float(tr.height) / fh):\n                w = tr.height * small\n                h = tr.height\n            else:\n                w = tr.width\n                h = tr.width / small\n\n        elif fw != fh:  \n            if (float(tr.width) / fw) > (float(tr.height) / fh):\n                w = tr.height / small\n                h = tr.height\n            else:\n                w = tr.width\n                h = tr.width * small\n\n        else: \n            w = tr.width\n            h = tr.height\n\n        m *= Matrix(w, h)  \n\n        m *= Matrix(1, 0, 0, 1, tmp.x, tmp.y)  \n\n        return m\n    \n\n    CheckParent(page)\n    doc = page.parent\n    if not doc.isPDF:\n        raise ValueError(\"not a PDF\")\n    if bool(filename) + bool(stream) + bool(pixmap) != 1:\n        raise ValueError(\"need exactly one of filename, pixmap, stream\")\n\n    if filename and not os.path.exists(filename):\n        raise FileNotFoundError(\"No such file: '%s'\" % filename)\n    elif stream and type(stream) not in (bytes, bytearray, io.BytesIO):\n        raise ValueError(\"stream must be bytes-like or BytesIO\")\n    elif pixmap and type(pixmap) is not Pixmap:\n        raise ValueError(\"pixmap must be a Pixmap\")\n\n    while rotate < 0:\n        rotate += 360\n    while rotate > 360:\n        rotate -= 360\n    if rotate not in (0, 90, 180, 270):\n        raise ValueError(\"bad rotate value\")\n\n    r = page.rect & rect\n    if r.isEmpty or r.isInfinite:\n        raise ValueError(\"rect must be finite and not empty\")\n    _imgpointer = None\n\n    if keep_proportion is True:  \n        if pixmap:  \n            w = pixmap.width\n            h = pixmap.height\n        elif stream:  \n            \n            img_size = TOOLS.image_size(stream, keep_image=True)\n            w, h = img_size[:2]\n            stream = None  \n            _imgpointer = img_size[-1]  \n        else:  \n            img = open(filename, \"rb\")\n            stream = img.read()\n            img_size = TOOLS.image_size(stream, keep_image=True)\n            w, h = img_size[:2]\n            _imgpointer = img_size[-1]  \n            stream = None  \n            filename = None  \n            img.close()  \n\n        maxf = max(w, h).__float__()\n        fw = w / maxf\n        fh = h / maxf\n    else:\n        fw = fh = 1.0\n\n    clip = r * ~page._getTransformation()  \n\n    matrix = calc_matrix(fw, fh, clip, rotate=rotate)\n\n    ilst = [i[7] for i in doc.getPageImageList(page.number)]\n    n = \"fzImg\"\n    i = 0\n    _imgname = n + \"0\"\n    while _imgname in ilst:\n        i += 1\n        _imgname = n + str(i)\n\n    page._insertImage(\n            filename=filename,  \n            pixmap=pixmap,  \n            stream=stream,  \n            matrix=matrix,  \n            overlay=overlay,\n            _imgname=_imgname,  \n            _imgpointer=_imgpointer,  \n        )", "docstring": "Insert an image in a rectangle on the current page.\n\nNotes:\nExactly one of filename, pixmap or stream must be provided.\nArgs:\nrect: (rect-like) where to place the source image\nfilename: (str) name of an image file\npixmap: (obj) a Pixmap object\nstream: (bytes) an image in memory\nrotate: (int) degrees (multiple of 90)\nkeep_proportion: (bool) whether to maintain aspect ratio\noverlay: (bool) put in foreground", "source": "juraj-google-style"}
{"code": "def set_ocha_url(cls, url=None):\n    if (url is None):\n        url = cls._ochaurl_int\n    cls._ochaurl = url", "docstring": "Set World Bank url from which to retrieve countries data\n\nArgs:\nurl (str): World Bank url from which to retrieve countries data. Defaults to internal value.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _determine_timeout(default_timeout, specified_timeout, retry):\n    if (specified_timeout is DEFAULT):\n        specified_timeout = default_timeout\n    if (specified_timeout is default_timeout):\n        if (retry and (retry is not DEFAULT) and isinstance(default_timeout, timeout.ExponentialTimeout)):\n            return default_timeout.with_deadline(retry._deadline)\n        else:\n            return default_timeout\n    if isinstance(specified_timeout, (int, float)):\n        return timeout.ConstantTimeout(specified_timeout)\n    else:\n        return specified_timeout", "docstring": "Determines how timeout should be applied to a wrapped method.\n\nArgs:\ndefault_timeout (Optional[Timeout]): The default timeout specified\nat method creation time.\nspecified_timeout (Optional[Timeout]): The timeout specified at\ninvocation time. If :attr:`DEFAULT`, this will be set to\nthe ``default_timeout``.\nretry (Optional[Retry]): The retry specified at invocation time.\n\nReturns:\nOptional[Timeout]: The timeout to apply to the method or ``None``.", "source": "codesearchnet"}
{"code": "def testConcreteFunctionStructuredSignatureError(self, conc_args=(), conc_kwargs=None, call_args=(), call_kwargs=None, error='.*', exception=TypeError):\n    conc_args = conc_args() if callable(conc_args) else conc_args\n    conc_kwargs = conc_kwargs() if callable(conc_kwargs) else conc_kwargs or {}\n    call_args = call_args() if callable(call_args) else call_args\n    call_kwargs = call_kwargs() if callable(call_kwargs) else call_kwargs or {}\n    self.assertIsInstance(conc_args, tuple)\n    self.assertIsInstance(call_args, tuple)\n    self.assertIsInstance(conc_kwargs, dict)\n    self.assertIsInstance(call_kwargs, dict)\n\n    @polymorphic_function.function\n    def func(x, y=5, *varargs, **kwargs):\n        del y, varargs, kwargs\n        return x\n    conc = func.get_concrete_function(*conc_args, **conc_kwargs)\n    with self.assertRaisesRegex(exception, error):\n        self.evaluate(conc(*call_args, **call_kwargs))", "docstring": "Tests for errors in the structured signature.\n\nArgs:\nconc_args: Positional arguments used for get_concrete_function.\nconc_kwargs: Keyword arguments used for get_concrete_function.\ncall_args: Positional arguments used to call the function.\ncall_kwargs: Keyword arguments used to call the function.\nerror: Expected exception message.\nexception: Expected exception type.", "source": "github-repos"}
{"code": "def load_img(path, grayscale=False, target_size=None):\n    img = io.imread(path, grayscale)\n    if target_size:\n        img = transform.resize(img, target_size, preserve_range=True).astype('uint8')\n    return img", "docstring": "Utility function to load an image from disk.\n\nArgs:\npath: The image file path.\ngrayscale: True to convert to grayscale image (Default value = False)\ntarget_size: (w, h) to resize. (Default value = None)\n\nReturns:\nThe loaded numpy image.", "source": "codesearchnet"}
{"code": "def get_index(self, prefix=''):\n        \n        if prefix:\n            prefixed = '%s_index' % prefix\n        else:\n            prefixed = 'index'\n\n        if prefixed in self.__cli and self.__cli[prefixed]:\n            index = self.__cli.get(prefixed)\n            from_conf = False\n        else:\n            index = self.__config.get(prefixed)\n            from_conf = True\n\n        return self.__abspath(index, from_conf)", "docstring": "Retrieve the absolute path to an index, according to\n`prefix`.\n\nArgs:\nprefix: str, the desired prefix or `None`.\n\nReturns:\nstr: An absolute path, or `None`", "source": "juraj-google-style"}
{"code": "def build_error_response(self, version, reason, message):\n    batch_item = messages.ResponseBatchItem(result_status=contents.ResultStatus(enums.ResultStatus.OPERATION_FAILED), result_reason=contents.ResultReason(reason), result_message=contents.ResultMessage(message))\n    return self._build_response(version, [batch_item])", "docstring": "Build a simple ResponseMessage with a single error result.\n\nArgs:\nversion (ProtocolVersion): The protocol version the response\nshould be addressed with.\nreason (ResultReason): An enumeration classifying the type of\nerror occurred.\nmessage (str): A string providing additional information about\nthe error.\n\nReturns:\nResponseMessage: The simple ResponseMessage containing a\nsingle error result.", "source": "codesearchnet"}
{"code": "def autobuild_release(family=None):\n    \n\n    if family is None:\n        family = utilities.get_family('module_settings.json')\n\n    env = Environment(tools=[])\n    env['TILE'] = family.tile\n\n    target = env.Command(['\n                         action=env.Action(create_release_settings_action, \"Creating release manifest\"))\n    env.AlwaysBuild(target)\n\n    \n    if os.path.exists('RELEASE.md'):\n        env.Command(['build/output/RELEASE.md'], ['RELEASE.md'], Copy(\"$TARGET\", \"$SOURCE\"))\n\n    \n    copy_include_dirs(family.tile)\n    copy_tilebus_definitions(family.tile)\n    copy_dependency_docs(family.tile)\n    copy_linker_scripts(family.tile)\n\n    \n    if not family.tile.settings.get('hide_dependency_images', False):\n        copy_dependency_images(family.tile)\n\n    copy_extra_files(family.tile)\n    build_python_distribution(family.tile)", "docstring": "Copy necessary files into build/output so that this component can be used by others\n\nArgs:\nfamily (ArchitectureGroup): The architecture group that we are targeting.  If not\nprovided, it is assumed that we are building in the current directory and the\nmodule_settings.json file is read to create an ArchitectureGroup", "source": "juraj-google-style"}
{"code": "def _dereference_args(pipeline_name, args, kwargs):\n    lookup_slots = set()\n    for arg in itertools.chain(args, kwargs.itervalues()):\n        if (arg['type'] == 'slot'):\n            lookup_slots.add(db.Key(arg['slot_key']))\n    slot_dict = {}\n    for (key, slot_record) in zip(lookup_slots, db.get(lookup_slots)):\n        if ((slot_record is None) or (slot_record.status != _SlotRecord.FILLED)):\n            raise SlotNotFilledError(('Slot \"%s\" missing its value. From %s(*args=%s, **kwargs=%s)' % (key, pipeline_name, _short_repr(args), _short_repr(kwargs))))\n        slot_dict[key] = slot_record.value\n    arg_list = []\n    for current_arg in args:\n        if (current_arg['type'] == 'slot'):\n            arg_list.append(slot_dict[db.Key(current_arg['slot_key'])])\n        elif (current_arg['type'] == 'value'):\n            arg_list.append(current_arg['value'])\n        else:\n            raise UnexpectedPipelineError(('Unknown parameter type: %r' % current_arg))\n    kwarg_dict = {}\n    for (key, current_arg) in kwargs.iteritems():\n        if (current_arg['type'] == 'slot'):\n            kwarg_dict[key] = slot_dict[db.Key(current_arg['slot_key'])]\n        elif (current_arg['type'] == 'value'):\n            kwarg_dict[key] = current_arg['value']\n        else:\n            raise UnexpectedPipelineError(('Unknown parameter type: %r' % current_arg))\n    return (arg_list, kwarg_dict)", "docstring": "Dereference a Pipeline's arguments that are slots, validating them.\n\nEach argument value passed in is assumed to be a dictionary with the format:\n{'type': 'value', 'value': 'serializable'}  # A resolved value.\n{'type': 'slot', 'slot_key': 'str() on a db.Key'}  # A pending Slot.\n\nArgs:\npipeline_name: The name of the pipeline class; used for debugging.\nargs: Iterable of positional arguments.\nkwargs: Dictionary of keyword arguments.\n\nReturns:\nTuple (args, kwargs) where:\nArgs: A list of positional arguments values that are all dereferenced.\nKwargs: A list of keyword arguments values that are all dereferenced.\n\nRaises:\nSlotNotFilledError if any of the supplied 'slot_key' records are not\npresent in the Datastore or have not yet been filled.\nUnexpectedPipelineError if an unknown parameter type was passed.", "source": "codesearchnet"}
{"code": "def flash_from_file(self, partition, source_file, source_len=0,\n                      info_cb=DEFAULT_MESSAGE_CALLBACK, progress_callback=None,\n                      timeout_ms=None):\n    \n    if source_len == 0:\n      \n      source_len = os.stat(source_file).st_size\n    download_response = self.download(\n        source_file, source_len=source_len, info_cb=info_cb,\n        progress_callback=progress_callback)\n    flash_response = self.flash(partition, info_cb=info_cb,\n                                timeout_ms=timeout_ms)\n    return download_response + flash_response", "docstring": "Flashes a partition from the file on disk.\n\nArgs:\npartition: Partition name to flash to.\nsource_file: Filename to download to the device.\nsource_len: Optional length of source_file, uses os.stat if not provided.\ninfo_cb: See Download.\nprogress_callback: See Download.\ntimeout_ms: The amount of time to wait on okay after flashing.\n\nReturns:\nDownload and flash responses, normally nothing.", "source": "juraj-google-style"}
{"code": "def __init__(self, vendor_id=9583, product_id=50735):\n        \n\n        print(\"Opening SpaceMouse device\")\n        self.device = hid.device()\n        self.device.open(vendor_id, product_id)  \n\n        print(\"Manufacturer: %s\" % self.device.get_manufacturer_string())\n        print(\"Product: %s\" % self.device.get_product_string())\n\n        self._display_controls()\n\n        self.single_click_and_hold = False\n\n        self._control = [0., 0., 0., 0., 0., 0.]\n        self._reset_state = 0\n        self.rotation = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])\n        self._enabled = False\n\n        \n        self.thread = threading.Thread(target=self.run)\n        self.thread.daemon = True\n        self.thread.start()", "docstring": "Initialize a SpaceMouse handler.\n\nArgs:\nvendor_id: HID device vendor id\nproduct_id: HID device product id\n\nNote:\nUse hid.enumerate() to view all USB human interface devices (HID).\nMake sure SpaceMouse is detected before running the script.\nYou can look up its vendor/product id from this method.", "source": "juraj-google-style"}
{"code": "def constant_time_string_compare(a, b):\n    try:\n        return hmac.compare_digest(a, b)\n    except AttributeError:\n        if (len(a) != len(b)):\n            return False\n        result = 0\n        for (x, y) in zip(a, b):\n            result |= (ord(x) ^ ord(y))\n        return (result == 0)", "docstring": "Helper for comparing string in constant time, independent\nof the python version being used.\n\nArgs:\na (str): A string to compare\nb (str): A string to compare", "source": "codesearchnet"}
{"code": "def contains_saved_model(export_dir):\n    if isinstance(export_dir, os.PathLike):\n        export_dir = os.fspath(export_dir)\n    return maybe_saved_model_directory(export_dir)", "docstring": "Checks whether the provided export directory could contain a SavedModel.\n\nNote that the method does not load any data by itself. If the method returns\n`false`, the export directory definitely does not contain a SavedModel. If the\nmethod returns `true`, the export directory may contain a SavedModel but\nprovides no guarantee that it can be loaded.\n\nArgs:\nexport_dir: Absolute path to possible export location. For example,\n'/my/foo/model'.\n\nReturns:\nTrue if the export directory contains SavedModel files, False otherwise.", "source": "github-repos"}
{"code": "def write(self, data):\n    if (not isinstance(data, (bytes, bytearray, list))):\n        raise TypeError('Invalid data type, should be bytes, bytearray, or list.')\n    if isinstance(data, list):\n        data = bytearray(data)\n    try:\n        return os.write(self._fd, data)\n    except OSError as e:\n        raise SerialError(e.errno, ('Writing serial port: ' + e.strerror))", "docstring": "Write `data` to the serial port and return the number of bytes\nwritten.\n\nArgs:\ndata (bytes, bytearray, list): a byte array or list of 8-bit integers to write.\n\nReturns:\nint: number of bytes written.\n\nRaises:\nSerialError: if an I/O or OS error occurs.\nTypeError: if `data` type is invalid.\nValueError: if data is not valid bytes.", "source": "codesearchnet"}
{"code": "def Progress(self):\n    now = time.time()\n    if ((now - self.last_progress_time) <= 2):\n        return\n    self.last_progress_time = now\n    client_utils.KeepAlive()\n    self.grr_worker.Heartbeat()\n    user_start = self.cpu_start.user\n    system_start = self.cpu_start.system\n    cpu_times = self.proc.cpu_times()\n    user_end = cpu_times.user\n    system_end = cpu_times.system\n    used_cpu = (((user_end - user_start) + system_end) - system_start)\n    if (used_cpu > self.cpu_limit):\n        self.grr_worker.SendClientAlert('Cpu limit exceeded.')\n        raise CPUExceededError('Action exceeded cpu limit.')", "docstring": "Indicate progress of the client action.\n\nThis function should be called periodically during client actions that do\nnot finish instantly. It will notify the nanny that the action is not stuck\nand avoid the timeout and it will also check if the action has reached its\ncpu limit.\n\nRaises:\nCPUExceededError: CPU limit exceeded.", "source": "codesearchnet"}
{"code": "def MakeDescriptor(desc_proto, package='', build_file_if_cpp=True, syntax=None):\n    if ((api_implementation.Type() == 'cpp') and build_file_if_cpp):\n        from typy.google.protobuf import descriptor_pb2\n        file_descriptor_proto = descriptor_pb2.FileDescriptorProto()\n        file_descriptor_proto.message_type.add().MergeFrom(desc_proto)\n        proto_name = str(uuid.uuid4())\n        if package:\n            file_descriptor_proto.name = os.path.join(package.replace('.', '/'), (proto_name + '.proto'))\n            file_descriptor_proto.package = package\n        else:\n            file_descriptor_proto.name = (proto_name + '.proto')\n        _message.default_pool.Add(file_descriptor_proto)\n        result = _message.default_pool.FindFileByName(file_descriptor_proto.name)\n        if _USE_C_DESCRIPTORS:\n            return result.message_types_by_name[desc_proto.name]\n    full_message_name = [desc_proto.name]\n    if package:\n        full_message_name.insert(0, package)\n    enum_types = {}\n    for enum_proto in desc_proto.enum_type:\n        full_name = '.'.join((full_message_name + [enum_proto.name]))\n        enum_desc = EnumDescriptor(enum_proto.name, full_name, None, [EnumValueDescriptor(enum_val.name, ii, enum_val.number) for (ii, enum_val) in enumerate(enum_proto.value)])\n        enum_types[full_name] = enum_desc\n    nested_types = {}\n    for nested_proto in desc_proto.nested_type:\n        full_name = '.'.join((full_message_name + [nested_proto.name]))\n        nested_desc = MakeDescriptor(nested_proto, package='.'.join(full_message_name), build_file_if_cpp=False, syntax=syntax)\n        nested_types[full_name] = nested_desc\n    fields = []\n    for field_proto in desc_proto.field:\n        full_name = '.'.join((full_message_name + [field_proto.name]))\n        enum_desc = None\n        nested_desc = None\n        if field_proto.HasField('type_name'):\n            type_name = field_proto.type_name\n            full_type_name = '.'.join((full_message_name + [type_name[(type_name.rfind('.') + 1):]]))\n            if (full_type_name in nested_types):\n                nested_desc = nested_types[full_type_name]\n            elif (full_type_name in enum_types):\n                enum_desc = enum_types[full_type_name]\n        field = FieldDescriptor(field_proto.name, full_name, (field_proto.number - 1), field_proto.number, field_proto.type, FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type), field_proto.label, None, nested_desc, enum_desc, None, False, None, options=field_proto.options, has_default_value=False)\n        fields.append(field)\n    desc_name = '.'.join(full_message_name)\n    return Descriptor(desc_proto.name, desc_name, None, None, fields, list(nested_types.values()), list(enum_types.values()), [], options=desc_proto.options)", "docstring": "Make a protobuf Descriptor given a DescriptorProto protobuf.\n\nHandles nested descriptors. Note that this is limited to the scope of defining\na message inside of another message. Composite fields can currently only be\nresolved if the message is defined in the same scope as the field.\n\nArgs:\ndesc_proto: The descriptor_pb2.DescriptorProto protobuf message.\npackage: Optional package name for the new message Descriptor (string).\nbuild_file_if_cpp: Update the C++ descriptor pool if api matches.\nSet to False on recursion, so no duplicates are created.\nsyntax: The syntax/semantics that should be used.  Set to \"proto3\" to get\nproto3 field presence semantics.\nReturns:\nA Descriptor for protobuf messages.", "source": "codesearchnet"}
{"code": "def assign_methods(self, resource_class):\n        \n        assert all([\n            x.upper() in VALID_METHODS for x in resource_class.Meta.methods])\n        for method in resource_class.Meta.methods:\n\n            self._assign_method(\n                resource_class,\n                method.upper()\n            )", "docstring": "Given a resource_class and it's Meta.methods tuple,\nassign methods for communicating with that resource.\n\nArgs:\nresource_class: A single resource class", "source": "juraj-google-style"}
{"code": "def update_box(self, box):\n\t\t\n\t\t\n\t\tpayload = None\n\t\tif  type(box) is not StreakBox:\n\t\t\treturn requests.codes.bad_request, None\n\n\t\tpayload = box.to_dict(rw = True)\n\n\t\ttry:\n\t\t\turi = self.box_root_uri + '/' + box.attributes['boxKey']\n\t\texcept KeyError:\n\t\t\treturn requests.codes.bad_request, None\n\t\n\t\tcode, data = self._req('post', uri , json.dumps(payload))\n\n\t\treturn code, data", "docstring": "Updates a box with the provided attributes.\nArgs:\nbox \tStreakBox object with updated info\nreturn\t(status code, box in dict form)", "source": "juraj-google-style"}
{"code": "def _setBitOn(x, bitNum):\n    \n    _checkInt(x, minvalue=0, description='input value')\n    _checkInt(bitNum, minvalue=0, description='bitnumber')\n\n    return x | (1 << bitNum)", "docstring": "Set bit 'bitNum' to True.\n\nArgs:\n* x (int): The value before.\n* bitNum (int): The bit number that should be set to True.\n\nReturns:\nThe value after setting the bit. This is an integer.\n\nFor example:\nFor x = 4 (dec) = 0100 (bin), setting bit number 0 results in 0101 (bin) = 5 (dec).", "source": "juraj-google-style"}
{"code": "async def process_response(self, request, response):\n    (await super().process_response(request, response))\n    if (COOKIE_AUTH_KEY in request):\n        if response.started:\n            raise RuntimeError('Cannot save cookie into started response')\n        cookie = request[COOKIE_AUTH_KEY]\n        if (cookie == ''):\n            response.del_cookie(self.cookie_name)\n        else:\n            response.set_cookie(self.cookie_name, cookie)", "docstring": "Called to perform any processing of the response required.\n\nThis function stores any cookie data in the COOKIE_AUTH_KEY as a\ncookie in the response object. If the value is a empty string, the\nassociated cookie is deleted instead.\n\nThis function requires the response to be a aiohttp Response object,\nand assumes that the response has not started if the remember or\nforget functions are called during the request.\n\nArgs:\nrequest: aiohttp Request object.\nresponse: response object returned from the handled view\n\nRaises:\nRuntimeError: Raised if response has already started.", "source": "codesearchnet"}
{"code": "def _CalculateNTFSTimeHash(self, file_entry):\n    \n    date_time_values = []\n\n    access_time = getattr(file_entry, 'access_time', None)\n    if access_time:\n      date_time_string = access_time.CopyToDateTimeString()\n      date_time_values.append('atime:{0:s}'.format(date_time_string))\n\n    creation_time = getattr(file_entry, 'creation_time', None)\n    if creation_time:\n      date_time_string = creation_time.CopyToDateTimeString()\n      date_time_values.append('crtime:{0:s}'.format(date_time_string))\n\n    modification_time = getattr(file_entry, 'modification_time', None)\n    if modification_time:\n      date_time_string = modification_time.CopyToDateTimeString()\n      date_time_values.append('mtime:{0:s}'.format(date_time_string))\n\n    \n    change_time = getattr(file_entry, 'change_time', None)\n    if change_time:\n      date_time_string = change_time.CopyToDateTimeString()\n      date_time_values.append('ctime:{0:s}'.format(date_time_string))\n\n    date_time_values = ''.join(date_time_values)\n    date_time_values = date_time_values.encode('ascii')\n\n    hash_value = hashlib.md5()\n    hash_value.update(date_time_values)\n    return hash_value.hexdigest()", "docstring": "Calculates an MD5 from the date and time value of a NTFS file entry.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry.\n\nReturns:\nstr: hexadecimal representation of the MD5 hash value of the date and\ntime values of the file entry.", "source": "juraj-google-style"}
{"code": "def locator(self, value):\n    self._locator = value\n    (self._latitude, self._longitude) = utils.from_grid_locator(value)", "docstring": "Update the locator, and trigger a latitude and longitude update.\n\nArgs:\nvalue (str): New Maidenhead locator string", "source": "codesearchnet"}
{"code": "def _process_dataset(name, directory, num_shards, synset_to_human,\n                     image_to_bboxes):\n  \n  filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)\n  humans = _find_human_readable_labels(synsets, synset_to_human)\n  bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)\n  _process_image_files(name, filenames, synsets, labels,\n                       humans, bboxes, num_shards)", "docstring": "Process a complete data set and save it as a TFRecord.\n\nArgs:\nname: string, unique identifier specifying the data set.\ndirectory: string, root path to the data set.\nnum_shards: integer number of shards for this data set.\nsynset_to_human: dict of synset to human labels, e.g.,\n'n02119022' --> 'red fox, Vulpes vulpes'\nimage_to_bboxes: dictionary mapping image file names to a list of\nbounding boxes. This list contains 0+ bounding boxes.", "source": "juraj-google-style"}
{"code": "def create_detector(model_uri: str, **kwargs) -> OfflineDetector:\n    model_handler = KeyedModelHandler(PyODModelHandler(model_uri=model_uri)).with_postprocess_fn(OfflineDetector.score_prediction_adapter)\n    m = model_handler.load_model()\n    assert isinstance(m, PyODBaseDetector)\n    threshold = float(m.threshold_)\n    detector = OfflineDetector(model_handler, threshold_criterion=FixedThreshold(threshold), **kwargs)\n    return detector", "docstring": "A utility function to create OfflineDetector for a PyOD model.\n\n**NOTE:** This API and its implementation are currently under active\ndevelopment and may not be backward compatible.\n\nArgs:\nmodel_uri: The URI specifying the location of the pickled PyOD model.\n**kwargs: Additional keyword arguments.", "source": "github-repos"}
{"code": "def restore(self, save_path, options=None):\n    self._checkpoint_options = copy.copy(options) if options else self._checkpoint_options\n    if self._checkpoint_options:\n        self._checkpoint_options.experimental_enable_async_checkpoint = False\n    self._queue.join()\n    status = self.checkpointer().restore(save_path, self._checkpoint_options)\n    return status", "docstring": "Restore the checkpointed variables.\n\nArgs:\nsave_path: The full name of the checkpoint file to be restored.\noptions: CheckpointOption instance.\n\nReturns:\nA load status object, which can be used to make assertions about the\nstatus of a checkpoint restoration. See tf.train.Checkpoint.restore()\nfor more details.", "source": "github-repos"}
{"code": "def add_response(self, req, resp):\n        \n        if self._cache is None:\n            return\n        signature = sign(req.allocateQuotaRequest)\n        with self._cache as c:\n            now = self._timer()\n            item = c.get(signature)\n            if item is None:\n                c[signature] = CachedItem(\n                    req, resp, self.service_name, now)\n            else:\n                \n                item.last_check_time = now\n                item.response = resp\n                item.is_in_flight = False\n                c[signature] = item", "docstring": "Adds the response from sending to `req` to this instance's cache.\n\nArgs:\nreq (`ServicecontrolServicesAllocateQuotaRequest`): the request\nresp (AllocateQuotaResponse): the response from sending the request", "source": "juraj-google-style"}
{"code": "def get_all(self, include_archived=False):\n    return [conv for conv in self._conv_dict.values() if ((not conv.is_archived) or include_archived)]", "docstring": "Get all the conversations.\n\nArgs:\ninclude_archived (bool): (optional) Whether to include archived\nconversations. Defaults to ``False``.\n\nReturns:\nList of all :class:`.Conversation` objects.", "source": "codesearchnet"}
{"code": "def to_dict(self, remove_nones=False):\n    content = {'uri': self.uri, 'protocol_info': self.protocol_info, 'import_uri': self.import_uri, 'size': self.size, 'duration': self.duration, 'bitrate': self.bitrate, 'sample_frequency': self.sample_frequency, 'bits_per_sample': self.bits_per_sample, 'nr_audio_channels': self.nr_audio_channels, 'resolution': self.resolution, 'color_depth': self.color_depth, 'protection': self.protection}\n    if remove_nones:\n        nones = [k for k in content if (content[k] is None)]\n        for k in nones:\n            del content[k]\n    return content", "docstring": "Return a dict representation of the `DidlResource`.\n\nArgs:\nremove_nones (bool, optional): Optionally remove dictionary\nelements when their value is `None`.\n\nReturns:\ndict: a dict representing the `DidlResource`", "source": "codesearchnet"}
{"code": "def run_defense_work(self, work_id):\n    \n    class_batch_id = (\n        self.defense_work.work[work_id]['output_classification_batch_id'])\n    class_batch = self.class_batches.read_batch_from_datastore(class_batch_id)\n    adversarial_batch_id = class_batch['adversarial_batch_id']\n    submission_id = class_batch['submission_id']\n    cloud_result_path = class_batch['result_path']\n    logging.info('Defense work piece: '\n                 'adversarial_batch_id=\"%s\" submission_id=\"%s\"',\n                 adversarial_batch_id, submission_id)\n    if submission_id in self.blacklisted_submissions:\n      raise WorkerError('Blacklisted submission')\n    \n    defense = DefenseSubmission(submission_id, self.submissions,\n                                self.storage_bucket)\n    defense.download()\n    \n    input_dir = os.path.join(LOCAL_INPUT_DIR, adversarial_batch_id)\n    if os.path.exists(input_dir):\n      sudo_remove_dirtree(input_dir)\n    os.makedirs(input_dir)\n    try:\n      shell_call([\n          'gsutil', '-m', 'cp',\n          \n          \n          os.path.join('gs:\n                       'adversarial_images', adversarial_batch_id, '*'),\n          input_dir\n      ])\n      adv_images_files = os.listdir(input_dir)\n      if (len(adv_images_files) == 1) and adv_images_files[0].endswith('.zip'):\n        logging.info('Adversarial batch is in zip archive %s',\n                     adv_images_files[0])\n        shell_call([\n            'unzip', os.path.join(input_dir, adv_images_files[0]),\n            '-d', input_dir\n        ])\n        os.remove(os.path.join(input_dir, adv_images_files[0]))\n        adv_images_files = os.listdir(input_dir)\n      logging.info('%d adversarial images copied', len(adv_images_files))\n    except (subprocess.CalledProcessError, IOError) as e:\n      raise WorkerError('Can''t copy adversarial batch locally', e)\n    \n    if os.path.exists(LOCAL_OUTPUT_DIR):\n      sudo_remove_dirtree(LOCAL_OUTPUT_DIR)\n    os.mkdir(LOCAL_OUTPUT_DIR)\n    output_filname = os.path.join(LOCAL_OUTPUT_DIR, 'result.csv')\n    \n    elapsed_time_sec = defense.run(input_dir, output_filname)\n    \n    batch_result = eval_lib.analyze_one_classification_result(\n        storage_client=None,\n        file_path=output_filname,\n        adv_batch=self.adv_batches.data[adversarial_batch_id],\n        dataset_batches=self.dataset_batches,\n        dataset_meta=self.dataset_meta)\n    \n    try:\n      shell_call([\n          'gsutil', 'cp', output_filname,\n          os.path.join('gs:\n      ])\n    except subprocess.CalledProcessError as e:\n      raise WorkerError('Can''t result to Cloud Storage', e)\n    return elapsed_time_sec, submission_id, batch_result", "docstring": "Runs one defense work.\n\nArgs:\nwork_id: ID of the piece of work to run\n\nReturns:\nelapsed_time_sec, submission_id - elapsed time and id of the submission\n\nRaises:\nWorkerError: if error occurred during execution.", "source": "juraj-google-style"}
{"code": "def from_dict(cls, tx, skip_schema_validation=True):\n        \n        operation = tx.get('operation', Transaction.CREATE) if isinstance(tx, dict) else Transaction.CREATE\n        cls = Transaction.resolve_class(operation)\n\n        if not skip_schema_validation:\n            cls.validate_id(tx)\n            cls.validate_schema(tx)\n\n        inputs = [Input.from_dict(input_) for input_ in tx['inputs']]\n        outputs = [Output.from_dict(output) for output in tx['outputs']]\n        return cls(tx['operation'], tx['asset'], inputs, outputs,\n                   tx['metadata'], tx['version'], hash_id=tx['id'], tx_dict=tx)", "docstring": "Transforms a Python dictionary to a Transaction object.\n\nArgs:\ntx_body (dict): The Transaction to be transformed.\n\nReturns:\n:class:`~bigchaindb.common.transaction.Transaction`", "source": "juraj-google-style"}
{"code": "def sign(self, byts):\n        \n        chosen_hash = c_hashes.SHA256()\n        hasher = c_hashes.Hash(chosen_hash, default_backend())\n        hasher.update(byts)\n        digest = hasher.finalize()\n        return self.priv.sign(digest,\n                              c_ec.ECDSA(c_utils.Prehashed(chosen_hash))\n                              )", "docstring": "Compute the ECC signature for the given bytestream.\n\nArgs:\nbyts (bytes): The bytes to sign.\n\nReturns:\nbytes: The RSA Signature bytes.", "source": "juraj-google-style"}
{"code": "def show_stories(self, raw=False, limit=None):\n    show_stories = self._get_stories('showstories', limit)\n    if raw:\n        show_stories = [story.raw for story in show_stories]\n    return show_stories", "docstring": "Returns list of item ids of latest Show HN stories\n\nArgs:\nlimit (int): specifies the number of stories to be returned.\nraw (bool): Flag to indicate whether to transform all\nobjects into raw json.\n\nReturns:\n`list` object containing ids of Show HN stories.", "source": "codesearchnet"}
{"code": "def get_path_list(self, type_str=None):\n    return list(reversed([v.label_str for v in self.parent_gen if (type_str in (None, v.type_str))]))", "docstring": "Get list of the labels of the nodes leading up to this node from the root.\n\nArgs:\ntype_str:\nSUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include\ninformation from nodes of that type.\n\nReturns:\nlist of str: The labels of the nodes leading up to this node from the root.", "source": "codesearchnet"}
{"code": "def check(self, dsm, **kwargs):\n        \n        \n        med_matrix = CompleteMediation.generate_mediation_matrix(dsm)\n        return CompleteMediation.matrices_compliance(dsm, med_matrix)", "docstring": "Check if matrix and its mediation matrix are compliant.\n\nIt means that number of dependencies for each (line, column) is either\n0 if the mediation matrix (line, column) is 0, or >0 if the mediation\nmatrix (line, column) is 1.\n\nArgs:\ndsm (:class:`DesignStructureMatrix`): the DSM to check.\n\nReturns:\nbool: True if compliant, else False", "source": "juraj-google-style"}
{"code": "def find_element_by_name(self, name, update=False) -> Elements:\n        \n        return self.find_element(by=By.NAME, value=name, update=update)", "docstring": "Finds an element by name.\n\nArgs:\nname: The name of the element to be found.\nupdate: If the interface has changed, this option should be True.\n\nReturns:\nThe element if it was found.\n\nRaises:\nNoSuchElementException - If the element wasn't found.\n\nUsage:\nelement = driver.find_element_by_name('foo')", "source": "juraj-google-style"}
{"code": "def _linear_interp(curve, test_x, round_result=False):\n    index = 0\n    for index in range((len(curve) - 1)):\n        if (curve[index][0] == curve[(index + 1)][0]):\n            continue\n        if (curve[index][0] <= test_x <= curve[(index + 1)][0]):\n            slope = ((curve[(index + 1)][1] - curve[index][1]) / (curve[(index + 1)][0] - curve[index][0]))\n            y_intercept = (curve[index][1] - (slope * curve[index][0]))\n            result = ((slope * test_x) + y_intercept)\n            if round_result:\n                return int(round(result))\n            elif result.is_integer():\n                return int(result)\n            else:\n                return result\n    else:\n        raise ProbabilityUndefinedError", "docstring": "Take a series of points and interpolate between them at ``test_x``.\n\nArgs:\ncurve (list[tuple]): A list of ``(x, y)`` points sorted in\nnondecreasing ``x`` value. If multiple points have the same\n``x`` value, all but the last will be ignored.\ntest_x (float): The ``x`` value to find the ``y`` value of\n\nReturns:\nfloat: The ``y`` value of the curve at ``test_x``\nif ``round_result is False``\n\nint: if ``round_result is True`` or the result is a whole number,\nthe ``y`` value of the curve at ``test_x`` rounded to the\nnearest whole number.\n\nRaises:\nProbabilityUndefinedError: if ``test_x`` is out of the\ndomain of ``curve``\n\nExample:\n>>> curve = [(0, 0), (2, 1)]\n>>> _linear_interp(curve, 0.5)\n0.25\n>>> _linear_interp(curve, 0.5, round_result=True)\n0", "source": "codesearchnet"}
{"code": "def prep_parallel(self, binary_args, other_args):\n    if (self.length < 100):\n        raise Exception('Run this across 1 processor by setting num_processors kwarg to None.')\n    if (self.num_processors == (- 1)):\n        self.num_processors = mp.cpu_count()\n    split_val = int(np.ceil((self.length / self.num_splits)))\n    split_inds = [(self.num_splits * i) for i in np.arange(1, split_val)]\n    inds_split_all = np.split(np.arange(self.length), split_inds)\n    self.args = []\n    for (i, ind_split) in enumerate(inds_split_all):\n        trans_args = []\n        for arg in binary_args:\n            try:\n                trans_args.append(arg[ind_split])\n            except TypeError:\n                trans_args.append(arg)\n        self.args.append(((i, tuple(trans_args)) + other_args))\n    return", "docstring": "Prepare the parallel calculations\n\nPrepares the arguments to be run in parallel.\nIt will divide up arrays according to num_splits.\n\nArgs:\nbinary_args (list): List of binary arguments for input into the SNR function.\nother_args (tuple of obj): tuple of other args for input into parallel snr function.", "source": "codesearchnet"}
{"code": "def write_sample(binary, payload, path, filename):  \n    \n    if not os.path.exists(path):\n        os.makedirs(path)\n\n    sample = os.path.join(path, filename)\n\n    if binary:\n        with open(sample, \"wb\") as f:\n            f.write(base64.b64decode(payload))\n    else:\n        with open(sample, \"w\") as f:\n            f.write(payload)", "docstring": "This function writes a sample on file system.\n\nArgs:\nbinary (bool): True if it's a binary file\npayload: payload of sample, in base64 if it's a binary\npath (string): path of file\nfilename (string): name of file\nhash_ (string): file hash", "source": "juraj-google-style"}
{"code": "def pre_fetch(self, feed):\n    pass", "docstring": "Pre-fetches all required items to be update into the cache.\n\nThis increases performance for update operations.\n\nArgs:\nfeed: List of feed items to retrieve", "source": "github-repos"}
{"code": "def bottom(self, features):\n    \n    if not self._problem_hparams:\n      log_warn(\"Without a Problem, T2TModel.bottom is a passthrough.\")\n      return features\n\n    transformed_features = collections.OrderedDict()\n    all_previous_modalities = []\n    target_modality = _create_target_modality(self._problem_hparams.modality)\n\n    \n    for feature_name, modality in sorted(\n        six.iteritems(self._problem_hparams.modality)):\n      if feature_name not in features:\n        tf.logging.warning(\"Missing feature %s - ignoring.\" % feature_name)\n        continue\n      vocab_size = self._problem_hparams.vocab_size[feature_name]\n      if vocab_size is not None and hasattr(self._hparams, \"vocab_divisor\"):\n        vocab_size += (-vocab_size) % self._hparams.vocab_divisor\n      modality_name = self._hparams.name.get(\n          feature_name,\n          modalities.get_name(modality))(self._hparams, vocab_size)\n      \n      \n      \n      if feature_name in target_modality:\n        if len(target_modality) > 1:\n          variable_scope_name = \"%s/%s\" % (modality_name, feature_name)\n        else:\n          variable_scope_name = modality_name\n        bottom = self._hparams.bottom.get(\n            feature_name,\n            modalities.get_targets_bottom(modality))\n        \n        with tf.variable_scope(variable_scope_name) as vs:\n          self._add_variable_scope(variable_scope_name, vs)\n          log_info(\"Transforming feature '%s' with %s.targets_bottom\",\n                   feature_name,\n                   modality_name)\n          transformed_features[feature_name] = bottom(features[feature_name],\n                                                      self._hparams,\n                                                      vocab_size)\n      else:\n        bottom = self._hparams.bottom.get(feature_name,\n                                          modalities.get_bottom(modality))\n        do_reuse = modality_name in all_previous_modalities\n        with tf.variable_scope(modality_name, reuse=do_reuse) as vs:\n          self._add_variable_scope(modality_name, vs)\n          log_info(\"Transforming feature '%s' with %s.bottom\",\n                   feature_name,\n                   modality_name)\n          transformed_features[feature_name] = bottom(features[feature_name],\n                                                      self._hparams,\n                                                      vocab_size)\n        all_previous_modalities.append(modality_name)\n\n    for key in features:\n      if key not in transformed_features:\n        \n        transformed_features[key] = features[key]\n      else:\n        \n        transformed_features[key + \"_raw\"] = features[key]\n\n    return transformed_features", "docstring": "Transforms features to feed into body.\n\nArgs:\nfeatures: dict of str to Tensor. Typically it is the preprocessed data\nbatch after Problem's preprocess_example().\n\nReturns:\ntransformed_features: dict of same key-value pairs as features. The value\nTensors are newly transformed.", "source": "juraj-google-style"}
{"code": "def enter_diff_mode(self, context_model=None):\n    assert (not self.diff_mode)\n    self.diff_mode = True\n    if (context_model is None):\n        self.diff_from_source = True\n        self.diff_context_model = self.context_model.copy()\n    else:\n        self.diff_from_source = False\n        self.diff_context_model = context_model\n    self.clear()\n    self.setColumnCount(5)\n    self.refresh()", "docstring": "Enter diff mode.\n\nArgs:\ncontext_model (`ContextModel`): Context to diff against. If None, a\ncopy of the current context is used.", "source": "codesearchnet"}
{"code": "def JobDueToRun(self, job):\n    \n    if not job.enabled:\n      return False\n\n    if job.forced_run_requested:\n      return True\n\n    now = rdfvalue.RDFDatetime.Now()\n\n    if (job.last_run_time is not None and\n        job.last_run_time + job.frequency > now):\n      return False\n\n    \n    if not job.current_run_id:\n      return True\n\n    \n    if job.allow_overruns:\n      return True\n\n    return False", "docstring": "Determines if the given job is due for another run.\n\nArgs:\njob: The cron job rdfvalue object.\n\nReturns:\nTrue if it is time to run based on the specified frequency.", "source": "juraj-google-style"}
{"code": "def __copyfile2(source, destination):\n    \n    logger.info(\"copyfile2: %s -> %s\" % (source, destination))\n    try:\n        __create_destdir(destination)\n        shutil.copy2(source, destination)\n        return True\n    except Exception as e:\n        logger.error(\n            \"copyfile2: %s -> %s failed! Error: %s\", source, destination, e\n        )\n        return False", "docstring": "Copy data and all stat info (\"cp -p source destination\").\n\nThe destination may be a directory.\n\nArgs:\nsource (str): Source file (file to copy).\ndestination (str): Destination file or directory (where to copy).\n\nReturns:\nbool: True if the operation is successful, False otherwise.", "source": "juraj-google-style"}
{"code": "def flow2rgb(flow, color_wheel=None, unknown_thr=1000000.0):\n    assert ((flow.ndim == 3) and (flow.shape[(- 1)] == 2))\n    if (color_wheel is None):\n        color_wheel = make_color_wheel()\n    assert ((color_wheel.ndim == 2) and (color_wheel.shape[1] == 3))\n    num_bins = color_wheel.shape[0]\n    dx = flow[(:, :, 0)].copy()\n    dy = flow[(:, :, 1)].copy()\n    ignore_inds = (((np.isnan(dx) | np.isnan(dy)) | (np.abs(dx) > unknown_thr)) | (np.abs(dy) > unknown_thr))\n    dx[ignore_inds] = 0\n    dy[ignore_inds] = 0\n    rad = np.sqrt(((dx ** 2) + (dy ** 2)))\n    if np.any((rad > np.finfo(float).eps)):\n        max_rad = np.max(rad)\n        dx /= max_rad\n        dy /= max_rad\n    [h, w] = dx.shape\n    rad = np.sqrt(((dx ** 2) + (dy ** 2)))\n    angle = (np.arctan2((- dy), (- dx)) / np.pi)\n    bin_real = (((angle + 1) / 2) * (num_bins - 1))\n    bin_left = np.floor(bin_real).astype(int)\n    bin_right = ((bin_left + 1) % num_bins)\n    w = (bin_real - bin_left.astype(np.float32))[(..., None)]\n    flow_img = (((1 - w) * color_wheel[(bin_left, :)]) + (w * color_wheel[(bin_right, :)]))\n    small_ind = (rad <= 1)\n    flow_img[small_ind] = (1 - (rad[(small_ind, None)] * (1 - flow_img[small_ind])))\n    flow_img[np.logical_not(small_ind)] *= 0.75\n    flow_img[(ignore_inds, :)] = 0\n    return flow_img", "docstring": "Convert flow map to RGB image.\n\nArgs:\nflow (ndarray): Array of optical flow.\ncolor_wheel (ndarray or None): Color wheel used to map flow field to\nRGB colorspace. Default color wheel will be used if not specified.\nunknown_thr (str): Values above this threshold will be marked as\nunknown and thus ignored.\n\nReturns:\nndarray: RGB image that can be visualized.", "source": "codesearchnet"}
{"code": "def find(self, selector, **kwargs):\n    self.debug_log(('Finding element with selector: %s' % selector))\n    elements = self.find_all(selector, **kwargs)\n    if len(elements):\n        self.debug_log(('find (%s): Element found' % selector))\n        return elements[0]\n    else:\n        self.debug_log(('find (%s): No element found' % selector))\n        return None", "docstring": "Find an element with a selector\n\nArgs:\nselector (str): the selector used to find the element\n\nKwargs:\nwait_until_present (bool)\nwait_until_visible (bool)\nraise_exception (bool)\n\nReturns:\nNone if no element was found\nproxy_element is an element was found\n\nRaises:\nthis function might raise an exception\ndepending on the raise_exception kwargs\nor\nthe config proxy_driver:raise_exception", "source": "codesearchnet"}
{"code": "def multiply(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return Multiply().symbolic_call(x1, x2)\n    return backend.numpy.multiply(x1, x2)", "docstring": "Multiply arguments element-wise.\n\nArgs:\nx1: First input tensor.\nx2: Second input tensor.\n\nReturns:\nOutput tensor, element-wise product of `x1` and `x2`.", "source": "github-repos"}
{"code": "def add_inner_graph_id(self, inner_graph_id):\n    assert isinstance(inner_graph_id, str)\n    self._inner_graph_ids.append(inner_graph_id)", "docstring": "Add the debugger-generated ID of a graph nested within this graph.\n\nArgs:\ninner_graph_id: The debugger-generated ID of the nested inner graph.", "source": "github-repos"}
{"code": "def get_service(self, uuid):\n        \n        if uuid in self.services:\n            return self.services[uuid]\n\n        if pp_hex(uuid) in self.services:\n            return self.services[pp_hex(uuid)]\n\n        return None", "docstring": "Lookup information about a given GATT service.\n\nArgs:\nuuid (str): a string containing the hex-encoded service UUID\n\nReturns:\nNone if an error occurs, otherwise a :class:`Service` object.", "source": "juraj-google-style"}
{"code": "def _call_post_with_user_override(self, sap_user_id, url, payload):\n    SAPSuccessFactorsEnterpriseCustomerConfiguration = apps.get_model('sap_success_factors', 'SAPSuccessFactorsEnterpriseCustomerConfiguration')\n    (oauth_access_token, _) = SAPSuccessFactorsAPIClient.get_oauth_access_token(self.enterprise_configuration.sapsf_base_url, self.enterprise_configuration.key, self.enterprise_configuration.secret, self.enterprise_configuration.sapsf_company_id, sap_user_id, SAPSuccessFactorsEnterpriseCustomerConfiguration.USER_TYPE_USER)\n    response = requests.post(url, data=payload, headers={'Authorization': 'Bearer {}'.format(oauth_access_token), 'content-type': 'application/json'})\n    return (response.status_code, response.text)", "docstring": "Make a post request with an auth token acquired for a specific user to a SuccessFactors endpoint.\n\nArgs:\nsap_user_id (str): The user to use to retrieve an auth token.\nurl (str): The url to post to.\npayload (str): The json encoded payload to post.", "source": "codesearchnet"}
{"code": "def pipe(self, format=None, renderer=None, formatter=None):\n    if (format is None):\n        format = self._format\n    data = text_type(self.source).encode(self._encoding)\n    out = backend.pipe(self._engine, format, data, renderer, formatter)\n    return out", "docstring": "Return the source piped through the Graphviz layout command.\n\nArgs:\nformat: The output format used for rendering (``'pdf'``, ``'png'``, etc.).\nrenderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...).\nformatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...).\nReturns:\nBinary (encoded) stdout of the layout command.\nRaises:\nValueError: If ``format``, ``renderer``, or ``formatter`` are not known.\ngraphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None.\ngraphviz.ExecutableNotFound: If the Graphviz executable is not found.\nsubprocess.CalledProcessError: If the exit status is non-zero.", "source": "codesearchnet"}
{"code": "def zipf_distribution(nbr_symbols, alpha):\n  \n  tmp = np.power(np.arange(1, nbr_symbols + 1), -alpha)\n  zeta = np.r_[0.0, np.cumsum(tmp)]\n  return [x / zeta[-1] for x in zeta]", "docstring": "Helper function: Create a Zipf distribution.\n\nArgs:\nnbr_symbols: number of symbols to use in the distribution.\nalpha: float, Zipf's Law Distribution parameter. Default = 1.5.\nUsually for modelling natural text distribution is in\nthe range [1.1-1.6].\n\nReturns:\ndistr_map: list of float, Zipf's distribution over nbr_symbols.", "source": "juraj-google-style"}
{"code": "def _ScanVolume(self, scan_context, scan_node, base_path_specs):\n    \n    if not scan_node or not scan_node.path_spec:\n      raise errors.ScannerError('Invalid or missing scan node.')\n\n    if scan_context.IsLockedScanNode(scan_node.path_spec):\n      \n      \n      self._ScanEncryptedVolume(scan_context, scan_node)\n\n      if scan_context.IsLockedScanNode(scan_node.path_spec):\n        return\n\n    if scan_node.IsVolumeSystemRoot():\n      self._ScanVolumeSystemRoot(scan_context, scan_node, base_path_specs)\n\n    elif scan_node.IsFileSystem():\n      self._ScanFileSystem(scan_node, base_path_specs)\n\n    elif scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW:\n      \n\n      \n      \n      path_spec = path_spec_factory.Factory.NewPathSpec(\n          definitions.TYPE_INDICATOR_TSK, location='/',\n          parent=scan_node.path_spec)\n\n      base_path_specs.append(path_spec)\n\n    else:\n      for sub_scan_node in scan_node.sub_nodes:\n        self._ScanVolume(scan_context, sub_scan_node, base_path_specs)", "docstring": "Scans a volume scan node for volume and file systems.\n\nArgs:\nscan_context (SourceScannerContext): source scanner context.\nscan_node (SourceScanNode): volume scan node.\nbase_path_specs (list[PathSpec]): file system base path specifications.\n\nRaises:\nScannerError: if the format of or within the source\nis not supported or the scan node is invalid.", "source": "juraj-google-style"}
{"code": "def ParsePathItem(item, opts=None):\n    if (item == os.path.curdir):\n        return CurrentComponent()\n    if (item == os.path.pardir):\n        return ParentComponent()\n    recursion = PATH_RECURSION_REGEX.search(item)\n    if (recursion is None):\n        return GlobComponent(item, opts)\n    (start, end) = recursion.span()\n    if (not ((start == 0) and (end == len(item)))):\n        raise ValueError('malformed recursive component')\n    if recursion.group('max_depth'):\n        max_depth = int(recursion.group('max_depth'))\n    else:\n        max_depth = None\n    return RecursiveComponent(max_depth=max_depth, opts=opts)", "docstring": "Parses string path component to an `PathComponent` instance.\n\nArgs:\nitem: A path component string to be parsed.\nopts: A `PathOpts` object.\n\nReturns:\n`PathComponent` instance corresponding to given path fragment.\n\nRaises:\nValueError: If the path item contains a recursive component fragment but\ncannot be parsed as such.", "source": "codesearchnet"}
{"code": "def _CreateFolder(self, parent, name, visible=True, description=None):\n    \n    folder = ET.SubElement(parent, 'Folder')\n    name_tag = ET.SubElement(folder, 'name')\n    name_tag.text = name\n    if description is not None:\n      desc_tag = ET.SubElement(folder, 'description')\n      desc_tag.text = description\n    if not visible:\n      visibility = ET.SubElement(folder, 'visibility')\n      visibility.text = '0'\n    return folder", "docstring": "Create a KML Folder element.\n\nArgs:\nparent: The parent ElementTree.Element instance.\nname: The folder name as a string.\nvisible: Whether the folder is initially visible or not.\ndescription: A description string or None.\n\nReturns:\nThe folder ElementTree.Element instance.", "source": "juraj-google-style"}
{"code": "def _load_data(self, resource, default=DEFAULT_VALUE_SAFEGUARD, **kwargs):\n        \n        default_val = default if default != self.DEFAULT_VALUE_SAFEGUARD else {}\n        try:\n            return get_edx_api_data(\n                api_config=CatalogIntegration.current(),\n                resource=resource,\n                api=self.client,\n                **kwargs\n            ) or default_val\n        except (SlumberBaseException, ConnectionError, Timeout) as exc:\n            LOGGER.exception(\n                'Failed to load data from resource [%s] with kwargs [%s] due to: [%s]',\n                resource, kwargs, str(exc)\n            )\n            return default_val", "docstring": "Load data from API client.\n\nArguments:\nresource(string): type of resource to load\ndefault(any): value to return if API query returned empty result. Sensible values: [], {}, None etc.\n\nReturns:\ndict: Deserialized response from Course Catalog API", "source": "juraj-google-style"}
{"code": "def extend_webfont_settings(webfont_settings):\n    \n    if not webfont_settings.get('fontdir_path', False):\n        raise IcomoonSettingsError((\"Webfont settings miss the required key \"\n                                    \"item 'fontdir_path'\"))\n\n    if not webfont_settings.get('csspart_path', False):\n        webfont_settings['csspart_path'] = None\n\n    return webfont_settings", "docstring": "Validate a webfont settings and optionally fill missing ``csspart_path``\noption.\n\nArgs:\nwebfont_settings (dict): Webfont settings (an item value from\n``settings.ICOMOON_WEBFONTS``).\n\nReturns:\ndict: Webfont settings", "source": "juraj-google-style"}
{"code": "def d_hkl(self, miller_index: Vector3Like) -> float:\n    gstar = self.reciprocal_lattice_crystallographic.metric_tensor\n    hkl = np.array(miller_index)\n    return (1 / (dot(dot(hkl, gstar), hkl.T) ** (1 / 2)))", "docstring": "Returns the distance between the hkl plane and the origin\n\nArgs:\nmiller_index ([h,k,l]): Miller index of plane\n\nReturns:\nd_hkl (float)", "source": "codesearchnet"}
{"code": "def list_users():\n    res = 0\n    user_list = []\n    dowhile = True\n    try:\n        while (res or dowhile):\n            dowhile = False\n            (users, _, res) = win32net.NetUserEnum(None, 0, win32netcon.FILTER_NORMAL_ACCOUNT, res, win32netcon.MAX_PREFERRED_LENGTH)\n            for user in users:\n                user_list.append(user['name'])\n        return user_list\n    except win32net.error:\n        pass", "docstring": "Return a list of all users on Windows\n\nReturns:\nlist: A list of all users on the system\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' user.list_users", "source": "codesearchnet"}
{"code": "def register_controller(self, module, required=True, min_number=1):\n    verify_controller_module(module)\n    module_ref_name = module.__name__.split('.')[-1]\n    if module_ref_name in self._controller_objects:\n        raise signals.ControllerError('Controller module %s has already been registered. It cannot be registered again.' % module_ref_name)\n    module_config_name = module.MOBLY_CONTROLLER_CONFIG_NAME\n    if module_config_name not in self.controller_configs:\n        if required:\n            raise signals.ControllerError('No corresponding config found for %s' % module_config_name)\n        logging.warning('No corresponding config found for optional controller %s', module_config_name)\n        return None\n    try:\n        original_config = self.controller_configs[module_config_name]\n        controller_config = copy.deepcopy(original_config)\n        objects = module.create(controller_config)\n    except Exception:\n        logging.exception('Failed to initialize objects for controller %s, abort!', module_config_name)\n        raise\n    if not isinstance(objects, list):\n        raise signals.ControllerError('Controller module %s did not return a list of objects, abort.' % module_ref_name)\n    actual_number = len(objects)\n    if actual_number < min_number:\n        module.destroy(objects)\n        raise signals.ControllerError('Expected to get at least %d controller objects, got %d.' % (min_number, actual_number))\n    self._controller_objects[module_ref_name] = copy.copy(objects)\n    logging.debug('Found %d objects for controller %s', len(objects), module_config_name)\n    self._controller_modules[module_ref_name] = module\n    return objects", "docstring": "Loads a controller module and returns its loaded devices.\n\nThis is to be used in a mobly test class.\n\nArgs:\nmodule: A module that follows the controller module interface.\nrequired: A bool. If True, failing to register the specified\ncontroller module raises exceptions. If False, the objects\nfailed to instantiate will be skipped.\nmin_number: An integer that is the minimum number of controller\nobjects to be created. Default is one, since you should not\nregister a controller module without expecting at least one\nobject.\n\nReturns:\nA list of controller objects instantiated from controller_module, or\nNone if no config existed for this controller and it was not a\nrequired controller.\n\nRaises:\nControllerError:\n* The controller module has already been registered.\n* The actual number of objects instantiated is less than the\n* `min_number`.\n* `required` is True and no corresponding config can be found.\n* Any other error occurred in the registration process.", "source": "github-repos"}
{"code": "def find_all_build_files(dir: str) -> List[Tuple[str, str]]:\n    build_file_dirs = []\n    for root, _, files in os.walk(dir):\n        for file in files:\n            if file in BUILD_FILENAMES:\n                root = root.strip('./')\n                build_file_dirs.append((root, file))\n    return build_file_dirs", "docstring": "List all the BUILD files.\n\nReturns:\nThe list of (directory, filename) of all BUILD files.", "source": "github-repos"}
{"code": "def __init__(self, config: PretrainedConfig, generation_config: GenerationConfig, device: torch.device, dtype: torch.dtype=torch.float16, layer_device_map: Optional[Dict[int, Union[str, torch.device, int]]]=None, initial_prompt_shapes: Optional[List[List[int]]]=None) -> None:\n    self.num_key_value_heads = config.num_attention_heads if getattr(config, 'num_key_value_heads', None) is None else config.num_key_value_heads\n    self.head_dim = config.head_dim if hasattr(config, 'head_dim') else config.hidden_size \n    self.num_hidden_layers = config.num_hidden_layers\n    num_blocks = getattr(generation_config, 'num_blocks', None)\n    block_size = getattr(generation_config, 'block_size', None)\n    if num_blocks is None or block_size is None:\n        logger.info('Calculating optimal block size and number...')\n        num_blocks, block_size = compute_optimal_blocks(device, config, generation_config, initial_prompt_shapes or [], dtype, median_prefill_length=200)\n        logger.info(f'Using calculated num_blocks={num_blocks}, block_size={block_size}')\n    self.block_size = block_size\n    self.num_blocks = num_blocks\n    self.cache_shape = (self.num_key_value_heads, num_blocks, self.block_size, self.head_dim)\n    self.dtype = dtype\n    self.device = device\n    self.key_cache: List[torch.Tensor] = []\n    self.value_cache: List[torch.Tensor] = []\n    for idx in range(config.num_hidden_layers):\n        layer_device = layer_device_map[idx] if layer_device_map is not None else device\n        new_layer_key_cache = torch.zeros(self.cache_shape, dtype=self.dtype, device=layer_device)\n        new_layer_value_cache = torch.zeros(self.cache_shape, dtype=self.dtype, device=layer_device)\n        torch._dynamo.mark_static_address(new_layer_key_cache)\n        torch._dynamo.mark_static_address(new_layer_value_cache)\n        self.key_cache.append(new_layer_key_cache)\n        self.value_cache.append(new_layer_value_cache)\n    self._free_blocks = deque(range(num_blocks))\n    self._block_tables: Dict[str, List[int]] = {}", "docstring": "Initialize a paged attention cache for efficient memory usage.\n\nArgs:\nconfig: Model configuration\ngeneration_config: Generation configuration containing cache parameters\ndevice: Device for the cache tensors\ndtype: Data type for the cache tensors\nlayer_device_map: Optional mapping of layer indices to devices\ninitial_prompt_shapes: Optional sample prompts to help calculate optimal cache size", "source": "github-repos"}
{"code": "def percent_point(self, y, V):\n        \n        self.check_fit()\n\n        if self.theta < 0:\n            return V\n\n        else:\n            a = np.power(y, self.theta / (-1 - self.theta))\n            b = np.power(V, self.theta)\n            u = np.power((a + b - 1) / b, -1 / self.theta)\n            return u", "docstring": "Compute the inverse of conditional cumulative distribution :math:`C(u|v)^-1`\n\nArgs:\ny: `np.ndarray` value of :math:`C(u|v)`.\nv: `np.ndarray` given value of v.", "source": "juraj-google-style"}
{"code": "def potentially_ragged_concat(tensors):\n    if len(tensors) == 1:\n        return tensors[0]\n    elif isinstance(tensors[0], tf.SparseTensor):\n        return tf.sparse.concat(axis=0, sp_inputs=tensors)\n    elif isinstance(tensors[0], tf.RaggedTensor):\n        return tf.concat(tensors, axis=0)\n    non_batch_shapes = tf.stack([tf.shape(tensor)[1:] for tensor in tensors])\n    constant_dims = tf.math.reduce_all(non_batch_shapes == non_batch_shapes[:1], axis=0)\n    if tf.math.reduce_all(constant_dims).numpy().item():\n        if _is_scalar(tensors[0]):\n            return tf.stack(tensors, axis=0)\n        else:\n            return tf.concat(tensors, axis=0)\n    constant_inner_dimensions = constant_dims.numpy().tolist()[::-1].index(False)\n    if constant_inner_dimensions == 0:\n        constant_inner_shape = None\n    else:\n        constant_inner_shape = tensors[0].shape[-constant_inner_dimensions:]\n    return tf.ragged.constant([tensor.numpy() for tensor in tensors], inner_shape=constant_inner_shape).merge_dims(0, 1)", "docstring": "Concats `Tensor`s along their first dimension.\n\nArgs:\ntensors: List of `Tensor`s.\n\nReturns:\nConcatenation of the inputs along the first dimension -- of type\n`np.ndarray` if all input shapes are compatible, or `tf.RaggedTensor`\nif not.", "source": "github-repos"}
{"code": "def ScanForWindowsVolume(self, source_path):\n    \n    windows_path_specs = self.GetBasePathSpecs(source_path)\n    if (not windows_path_specs or\n        self._source_type == definitions.SOURCE_TYPE_FILE):\n      return False\n\n    file_system_path_spec = windows_path_specs[0]\n    self._file_system = resolver.Resolver.OpenFileSystem(file_system_path_spec)\n\n    if file_system_path_spec.type_indicator == definitions.TYPE_INDICATOR_OS:\n      mount_point = file_system_path_spec\n    else:\n      mount_point = file_system_path_spec.parent\n\n    self._path_resolver = windows_path_resolver.WindowsPathResolver(\n        self._file_system, mount_point)\n\n    \n    if not self._windows_directory:\n      self._ScanFileSystemForWindowsDirectory(self._path_resolver)\n\n    if not self._windows_directory:\n      return False\n\n    self._path_resolver.SetEnvironmentVariable(\n        'SystemRoot', self._windows_directory)\n    self._path_resolver.SetEnvironmentVariable(\n        'WinDir', self._windows_directory)\n\n    return True", "docstring": "Scans for a Windows volume.\n\nArgs:\nsource_path (str): source path.\n\nReturns:\nbool: True if a Windows volume was found.\n\nRaises:\nScannerError: if the source path does not exists, or if the source path\nis not a file or directory, or if the format of or within the source\nfile is not supported.", "source": "juraj-google-style"}
{"code": "def match(self, path):\n    this = self.segments\n    that = path.split('/')\n    current_var = None\n    bindings = {}\n    segment_count = self.segment_count\n    j = 0\n    for i in range(0, len(this)):\n        if (j >= len(that)):\n            break\n        if (this[i].kind == _TERMINAL):\n            if (this[i].literal == '*'):\n                bindings[current_var] = that[j]\n                j += 1\n            elif (this[i].literal == '**'):\n                until = (((j + len(that)) - segment_count) + 1)\n                segment_count += (len(that) - segment_count)\n                bindings[current_var] = '/'.join(that[j:until])\n                j = until\n            elif (this[i].literal != that[j]):\n                raise ValidationException((\"mismatched literal: '%s' != '%s'\" % (this[i].literal, that[j])))\n            else:\n                j += 1\n        elif (this[i].kind == _BINDING):\n            current_var = this[i].literal\n    if ((j != len(that)) or (j != segment_count)):\n        raise ValidationException('match error: could not render from the path template: {}'.format(path))\n    return bindings", "docstring": "Matches a fully qualified path template string.\n\nArgs:\npath (str): A fully qualified path template string.\n\nReturns:\ndict: Var names to matched binding values.\n\nRaises:\nValidationException: If path can't be matched to the template.", "source": "codesearchnet"}
{"code": "async def receive(self, timeout: float=None) -> Union[(Message, None)]:\n    if timeout:\n        coro = self.queue.get()\n        try:\n            msg = (await asyncio.wait_for(coro, timeout=timeout))\n        except asyncio.TimeoutError:\n            msg = None\n    else:\n        try:\n            msg = self.queue.get_nowait()\n        except asyncio.QueueEmpty:\n            msg = None\n    return msg", "docstring": "Receives a message for this behaviour.\nIf timeout is not None it returns the message or \"None\"\nafter timeout is done.\n\nArgs:\ntimeout (float): number of seconds until return\n\nReturns:\nspade.message.Message: a Message or None", "source": "codesearchnet"}
{"code": "def _create_regexp_filter(regex):\n    compiled_regex = re.compile(regex)\n\n    def filter_fn(value):\n        if (not isinstance(value, six.string_types)):\n            raise error.HParamsError(('Cannot use a regexp filter for a value of type %s. Value: %s' % (type(value), value)))\n        return (re.search(compiled_regex, value) is not None)\n    return filter_fn", "docstring": "Returns a boolean function that filters strings based on a regular exp.\n\nArgs:\nregex: A string describing the regexp to use.\nReturns:\nA function taking a string and returns True if any of its substrings\nmatches regex.", "source": "codesearchnet"}
{"code": "def _delete_record(self, identifier=None, rtype=None, name=None, content=None):\n    success_url = self.URLS['dns'].format(self.domain_id)\n    record_ids = self._get_matching_dns_entry_ids(identifier, rtype, name, content)\n    LOGGER.debug('Record IDs to delete: %s', record_ids)\n    success = True\n    for rec_id in record_ids:\n        delete_response = self.session.get(self.URLS['dns_delete_entry'].format(self.domain_id, rec_id))\n        self._invalidate_records_cache()\n        self._log('Delete DNS entry {}'.format(rec_id), delete_response)\n        success = (success and (delete_response.url == success_url))\n    return success", "docstring": "Delete one or more DNS entries in the domain zone that match the given\ncriteria.\n\nArgs:\n[identifier] (str): An ID to match against DNS entry easyname IDs.\n[rtype] (str): A DNS rtype (e.g. A, TXT, MX, etc) to match against DNS\nentry types.\n[name] (str): A name to match against DNS entry names.\n[content] (str): A content to match against a DNS entry contents.\n\nReturns:\nbool: True if the record(s) were deleted successfully, False\notherwise.", "source": "codesearchnet"}
{"code": "def DeserializeExclusiveData(self, reader):\n        \n        self.Type = TransactionType.ClaimTransaction\n        if self.Version != 0:\n            raise Exception('Format Exception')\n\n        numrefs = reader.ReadVarInt()\n\n        claims = []\n        for i in range(0, numrefs):\n            c = CoinReference()\n            c.Deserialize(reader)\n            claims.append(c)\n\n        self.Claims = claims\n        if len(self.Claims) == 0:\n            raise Exception('Format Exception')", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):\n\nRaises:\nException: If the transaction type is incorrect or if there are no claims.", "source": "juraj-google-style"}
{"code": "def items_sort(cls, items):\n        \n        class t(tuple):\n            \n            def __cmp__(self, other):\n                for a, b in six.moves.zip_longest(self, other):\n                    if a != b:\n                        if a is None:\n                            return 1\n                        if b is None:\n                            return -1\n                        return a - b\n                return 0\n\n            def __lt__(self, other):\n                return self.__cmp__(other) < 0\n            def __gt_(self, other):\n                return self.__cmp__(other) > 0\n            def __le__(self, other):\n                return self.__cmp__(other) <= 0\n            def __ge_(self, other):\n                return self.__cmp__(other) >= 0\n            def __eq__(self, other):\n                return self.__cmp__(other) == 0\n            def __ne__(self, other):\n                return self.__cmp__(other) != 0\n\n        def key_func(x):\n            if x.indented:\n                return t((int(x.parent_item.sort), int(x.sort)))\n            return t((int(x.sort), ))\n\n        return sorted(items, key=key_func, reverse=True)", "docstring": "Sort list items, taking into account parent items.\n\nArgs:\nitems (list[gkeepapi.node.ListItem]): Items to sort.\nReturns:\nlist[gkeepapi.node.ListItem]: Sorted items.", "source": "juraj-google-style"}
{"code": "async def sync_services(self):\n    services = {}\n    servs = (await self.list_services())\n    for (i, serv) in enumerate(servs):\n        info = (await self.service_info(serv))\n        status = (await self.service_status(serv))\n        messages = (await self.get_messages(serv))\n        headline = (await self.get_headline(serv))\n        services[serv] = states.ServiceState(info['short_name'], info['long_name'], info['preregistered'], i)\n        services[serv].state = status['numeric_status']\n        for message in messages:\n            services[serv].post_message(message.level, message.message, message.count, message.created)\n        if (headline is not None):\n            services[serv].set_headline(headline.level, headline.message, headline.created)\n    return services", "docstring": "Poll the current state of all services.\n\nReturns:\ndict: A dictionary mapping service name to service status", "source": "codesearchnet"}
{"code": "def _build_projection_expression(clean_table_keys):\n    projection_expression = ''\n    for key in clean_table_keys[:(- 1)]:\n        projection_expression += '{},'.format(key)\n    projection_expression += clean_table_keys[(- 1)]\n    return projection_expression", "docstring": "Given cleaned up keys, this will return a projection expression for\nthe dynamodb lookup.\n\nArgs:\nclean_table_keys (dict): keys without the data types attached\n\nReturns:\nstr: A projection expression for the dynamodb lookup.", "source": "codesearchnet"}
{"code": "def blit(self, console: tcod.console.Console, x: float, y: float, bg_blend: int, scale_x: float, scale_y: float, angle: float) -> None:\n    lib.TCOD_image_blit(self.image_c, _console(console), x, y, bg_blend, scale_x, scale_y, angle)", "docstring": "Blit onto a Console using scaling and rotation.\n\nArgs:\nconsole (Console): Blit destination Console.\nx (float): Console X position for the center of the Image blit.\ny (float): Console Y position for the center of the Image blit.\nThe Image blit is centered on this position.\nbg_blend (int): Background blending mode to use.\nscale_x (float): Scaling along Image x axis.\nSet to 1 for no scaling.  Must be over 0.\nscale_y (float): Scaling along Image y axis.\nSet to 1 for no scaling.  Must be over 0.\nangle (float): Rotation angle in radians. (Clockwise?)", "source": "codesearchnet"}
{"code": "def _collect_classes(m):\n    \n    from f311 import filetypes as ft\n    from f311 import explorer as ex\n\n    def _extend(classes, newclasses):\n        \n        classes.extend([class_ for class_ in newclasses if class_ not in classes])\n        \n\n    file_classes = [class_ for class_ in a99.get_classes_in_module(m, ft.DataFile) if class_.flag_collect]\n\n    \n    _extend(_classes_txt, [class_ for class_ in file_classes if class_.flag_txt])\n\n    \n    _extend(_classes_bin, [class_ for class_ in file_classes if not class_.flag_txt])\n    \n    _extend(_classes_sp, [class_ for class_ in file_classes if issubclass(class_, ft.FileSpectrum)])\n    \n    _extend(_classes_file, file_classes)\n    \n\n    _extend(_classes_vis, a99.get_classes_in_module(m, ex.Vis))\n\n    global _classes_file_superclass\n    _classes_file_superclass = [cls.__bases__[0] for cls in _classes_file]", "docstring": "Adds entries to _classes_*\n\nArgs:\nm: module object that must contain the following sub-modules: datatypes, vis", "source": "juraj-google-style"}
{"code": "def as_list_data(self):\n    element = ElementTree.Element(self.list_type)\n    id_ = ElementTree.SubElement(element, 'id')\n    id_.text = self.id\n    name = ElementTree.SubElement(element, 'name')\n    name.text = self.name\n    return element", "docstring": "Return an Element to be used in a list.\n\nMost lists want an element with tag of list_type, and\nsubelements of id and name.\n\nReturns:\nElement: list representation of object.", "source": "codesearchnet"}
{"code": "def to_dict(self, rw = False):\n\t\t\n\t\treturn {k:v for (k,v) in self.attributes.items() \n\t\t\t\tif (v is not None and (not rw or (k in self.rw_attr_keys)))}", "docstring": "Returns relevant attributes as a dict.\nArgs:\nrw \t\t\tif True only returns the read/write enabled object attributes", "source": "juraj-google-style"}
{"code": "def ChangePassword(self, password_old, password_new):\n    if (not self.ValidatePassword(password_old)):\n        return False\n    if isinstance(password_new, str):\n        password_new = password_new.encode('utf-8')\n    password_key = hashlib.sha256(password_new)\n    self.SaveStoredData('PasswordHash', password_key)\n    self.SaveStoredData('MasterKey', AES.new(self._master_key, AES.MODE_CBC, self._iv))\n    return True", "docstring": "Change the password used to protect the private key.\n\nArgs:\npassword_old (str): the current password used to encrypt the private key.\npassword_new (str): the new to be used password to encrypt the private key.\n\nReturns:\nbool: whether the password has been changed", "source": "codesearchnet"}
{"code": "def __init__(self, endpoint_name, sagemaker_session=None):\n        \n        super(MXNetPredictor, self).__init__(endpoint_name, sagemaker_session, json_serializer, json_deserializer)", "docstring": "Initialize an ``MXNetPredictor``.\n\nArgs:\nendpoint_name (str): The name of the endpoint to perform inference on.\nsagemaker_session (sagemaker.session.Session): Session object which manages interactions with\nAmazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one\nusing the default AWS configuration chain.", "source": "juraj-google-style"}
{"code": "def bbox_clip(bboxes, img_shape):\n    assert ((bboxes.shape[(- 1)] % 4) == 0)\n    clipped_bboxes = np.empty_like(bboxes, dtype=bboxes.dtype)\n    clipped_bboxes[(..., 0::2)] = np.maximum(np.minimum(bboxes[(..., 0::2)], (img_shape[1] - 1)), 0)\n    clipped_bboxes[(..., 1::2)] = np.maximum(np.minimum(bboxes[(..., 1::2)], (img_shape[0] - 1)), 0)\n    return clipped_bboxes", "docstring": "Clip bboxes to fit the image shape.\n\nArgs:\nbboxes (ndarray): Shape (..., 4*k)\nimg_shape (tuple): (height, width) of the image.\n\nReturns:\nndarray: Clipped bboxes.", "source": "codesearchnet"}
{"code": "def get_pb_ids(self) -> List[str]:\n    values = DB.get_hash_value(self._key, 'processing_block_ids')\n    return ast.literal_eval(values)", "docstring": "Return the list of PB ids associated with the SBI.\n\nReturns:\nlist, Processing block ids", "source": "codesearchnet"}
{"code": "def get_gruneisen_parameter(self, temperature=None, structure=None, quad=None):\n    return (np.trace(self.get_tgt(temperature, structure, quad)) / 3.0)", "docstring": "Gets the single average gruneisen parameter from the TGT.\n\nArgs:\ntemperature (float): Temperature in kelvin, if not specified\nwill return non-cv-normalized value\nstructure (float): Structure to be used in directional heat\ncapacity determination, only necessary if temperature\nis specified\nquad (dict): quadrature for integration, should be\ndictionary with \"points\" and \"weights\" keys defaults\nto quadpy.sphere.Lebedev(19) as read from file", "source": "codesearchnet"}
{"code": "def items_sort(cls, items):\n\n    class t(tuple):\n        'Tuple with element-based sorting'\n\n        def __cmp__(self, other):\n            for (a, b) in six.moves.zip_longest(self, other):\n                if (a != b):\n                    if (a is None):\n                        return 1\n                    if (b is None):\n                        return (- 1)\n                    return (a - b)\n            return 0\n\n        def __lt__(self, other):\n            return (self.__cmp__(other) < 0)\n\n        def __gt_(self, other):\n            return (self.__cmp__(other) > 0)\n\n        def __le__(self, other):\n            return (self.__cmp__(other) <= 0)\n\n        def __ge_(self, other):\n            return (self.__cmp__(other) >= 0)\n\n        def __eq__(self, other):\n            return (self.__cmp__(other) == 0)\n\n        def __ne__(self, other):\n            return (self.__cmp__(other) != 0)\n\n    def key_func(x):\n        if x.indented:\n            return t((int(x.parent_item.sort), int(x.sort)))\n        return t((int(x.sort),))\n    return sorted(items, key=key_func, reverse=True)", "docstring": "Sort list items, taking into account parent items.\n\nArgs:\nitems (list[gkeepapi.node.ListItem]): Items to sort.\nReturns:\nlist[gkeepapi.node.ListItem]: Sorted items.", "source": "codesearchnet"}
{"code": "def get_dependency_graph(self):\n    from rez.vendor.pygraph.classes.digraph import digraph\n    nodes = {}\n    edges = set()\n    for variant in self._resolved_packages:\n        nodes[variant.name] = variant.qualified_package_name\n        for request in variant.get_requires():\n            if (not request.conflict):\n                edges.add((variant.name, request.name))\n    g = digraph()\n    node_color = '\n    node_fontsize = 10\n    attrs = [('fontsize', node_fontsize), ('fillcolor', node_color), ('style', 'filled')]\n    for (name, qname) in nodes.iteritems():\n        g.add_node(name, attrs=(attrs + [('label', qname)]))\n    for edge in edges:\n        g.add_edge(edge)\n    return g", "docstring": "Generate the dependency graph.\n\nThe dependency graph is a simpler subset of the resolve graph. It\ncontains package name nodes connected directly to their dependencies.\nWeak references and conflict requests are not included in the graph.\nThe dependency graph does not show conflicts.\n\nReturns:\n`pygraph.digraph` object.", "source": "codesearchnet"}
{"code": "def list_container_services(access_token, subscription_id, resource_group):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerService/ContainerServices', '?api-version=', ACS_API])\n    return do_get(endpoint, access_token)", "docstring": "List the container services in a resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\n\nReturns:\nHTTP response. JSON model.", "source": "codesearchnet"}
{"code": "def _update_workflow_definition(pb_config: dict):\n    known_workflows = get_workflows()\n    workflow_id = pb_config['workflow']['id']\n    workflow_version = pb_config['workflow']['version']\n    if ((workflow_id not in known_workflows) or (workflow_version not in known_workflows[workflow_id])):\n        raise RuntimeError('Unknown workflow definition: {}:{}'.format(workflow_id, workflow_version))\n    workflow = get_workflow(workflow_id, workflow_version)\n    for stage in workflow['stages']:\n        stage['status'] = 'none'\n    pb_config['workflow_parameters'] = pb_config['workflow']['parameters']\n    pb_config['workflow_id'] = pb_config['workflow']['id']\n    pb_config['workflow_version'] = pb_config['workflow']['version']\n    pb_config['workflow_stages'] = workflow['stages']\n    pb_config.pop('workflow', None)", "docstring": "Update the PB configuration workflow definition.\n\nArgs:\npb_config (dict): PB configuration dictionary\n\nRaises:\nRunTimeError, if the workflow definition (id, version)\nspecified in the sbi_config is not known.", "source": "codesearchnet"}
{"code": "def add_connection(self, name, **kwargs):\n    name = 'connection:{}'.format(name)\n    self.add_section(name)\n    for (key, value) in list(kwargs.items()):\n        self.set(name, key, value)\n    self.generate_tags()", "docstring": "Adds a connection to the configuration\n\nThis method will add a connection to the configuration.  The connection\nadded is only available for the lifetime of the object and is not\npersisted.\n\nNote:\nIf a call is made to load() or reload(), any connections added\nwith this method must be re-added to the config instance\n\nArgs:\nname (str): The name of the connection to add to the config.  The\nname provided will automatically be prepended with the string\nconnection:\n**kwargs (dict); The set of properties used to provide the node\nconfiguration", "source": "codesearchnet"}
{"code": "def _getargspec(target):\n    fullargspecs = getfullargspec(target)\n    defaults = fullargspecs.defaults or ()\n    if fullargspecs.kwonlydefaults:\n        defaults += tuple(fullargspecs.kwonlydefaults.values())\n    if not defaults:\n        defaults = None\n    argspecs = ArgSpec(args=fullargspecs.args + fullargspecs.kwonlyargs, varargs=fullargspecs.varargs, keywords=fullargspecs.varkw, defaults=defaults)\n    return argspecs", "docstring": "A python3 version of getargspec.\n\nCalls `getfullargspec` and assigns args, varargs,\nvarkw, and defaults to a python 2/3 compatible `ArgSpec`.\n\nThe parameter name 'varkw' is changed to 'keywords' to fit the\n`ArgSpec` struct.\n\nArgs:\ntarget: the target object to inspect.\n\nReturns:\nAn ArgSpec with args, varargs, keywords, and defaults parameters\nfrom FullArgSpec.", "source": "github-repos"}
{"code": "def from_preset(preset):\n        \n        if preset == 'vesta_2019':\n            cut_offs = loadfn(os.path.join(_directory, 'vesta_cutoffs.yaml'))\n            return CutOffDictNN(cut_off_dict=cut_offs)\n        else:\n            raise ValueError(\"Unrecognised preset: {}\".format(preset))", "docstring": "Initialise a CutOffDictNN according to a preset set of cut-offs.\n\nArgs:\npreset (str): A preset name. The list of supported presets are:\n\n- \"vesta_2019\": The distance cut-offs used by the VESTA\nvisualisation program.\n\nReturns:\nA CutOffDictNN using the preset cut-off dictionary.", "source": "juraj-google-style"}
{"code": "def gradients(ys, xs, grad_ys=None):\n  \n  graph = ys[0].graph\n  if not grad_ys:\n    grad_ys = [Constant(y.mesh, 1.0, y.shape, y.dtype).outputs[0] for y in ys]\n  \n  downstream = set(xs)\n  for op in graph.operations:\n    if op.has_gradient:\n      if set(op.inputs) & downstream:\n        downstream |= set(op.outputs)\n  tensor_to_gradient = dict(zip(ys, grad_ys))\n  for op in graph.operations[::-1]:\n    grad_outputs = [tensor_to_gradient.get(out) for out in op.outputs]\n    if op.has_gradient and any(grad_outputs) and (set(op.inputs) & downstream):\n      with tf.variable_scope(op.name + \"/gradients\"):\n        input_grads = op.gradient(grad_outputs)\n        for inp, grad in zip(op.inputs, input_grads):\n          if inp in downstream and grad is not None:\n            if inp in tensor_to_gradient:\n              tensor_to_gradient[inp] += grad\n            else:\n              tensor_to_gradient[inp] = grad\n  return [tensor_to_gradient.get(x, None) for x in xs]", "docstring": "Compute gradients in dtf.\n\nArgs:\nys: a list of Tensors\nxs: a list of Tensors\ngrad_ys: an optional list of Tensors\n\nReturns:\ngrad_xs: a list of Tensors", "source": "juraj-google-style"}
{"code": "def _build_vocab(filename, vocab_dir, vocab_name):\n    vocab_path = os.path.join(vocab_dir, vocab_name)\n    if (not tf.gfile.Exists(vocab_path)):\n        with tf.gfile.GFile(filename, 'r') as f:\n            data = f.read().split()\n        counter = collections.Counter(data)\n        count_pairs = sorted(counter.items(), key=(lambda x: ((- x[1]), x[0])))\n        (words, _) = list(zip(*count_pairs))\n        encoder = text_encoder.TokenTextEncoder(None, vocab_list=words)\n        encoder.store_to_file(vocab_path)\n    else:\n        encoder = text_encoder.TokenTextEncoder(vocab_path)\n    return encoder", "docstring": "Reads a file to build a vocabulary.\n\nArgs:\nfilename: file to read list of words from.\nvocab_dir: directory where to save the vocabulary.\nvocab_name: vocab file name.\n\nReturns:\ntext encoder.", "source": "codesearchnet"}
{"code": "def dump(self, content, filepath, indent=4):\n        \n        with open(filepath, 'w') as fp:\n            pyaml.dump(content, dst=fp, indent=indent)", "docstring": "Dump settings content to filepath.\n\nArgs:\ncontent (str): Settings content.\nfilepath (str): Settings file location.", "source": "juraj-google-style"}
{"code": "def __init__(self, ctx):\n    if ctx.options.use_fiddle_overlay:\n        member_map = {'Config': overlay.add_name('Config', BuildableBuilder), 'Partial': overlay.add_name('Partial', BuildableBuilder)}\n    else:\n        member_map = {}\n    ast = ctx.loader.import_name('fiddle')\n    super().__init__(ctx, 'fiddle', member_map, ast)", "docstring": "Initializes the FiddleOverlay.\n\nThis function loads the AST for the fiddle module, which is used to\naccess type information for any members that are not explicitly provided by\nthe overlay. See get_attribute in attribute.py for how it's used.\n\nArgs:\nctx: An instance of context.Context.", "source": "github-repos"}
{"code": "def _FormatInode(self, event):\n    inode = event.inode\n    if (inode is None):\n        if (hasattr(event, 'pathspec') and hasattr(event.pathspec, 'image_inode')):\n            inode = event.pathspec.image_inode\n    if (inode is None):\n        inode = '-'\n    return inode", "docstring": "Formats the inode.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: inode field.", "source": "codesearchnet"}
{"code": "def torque_on(self):\n        \n        data = []\n        data.append(0x0A)\n        data.append(self.servoid)\n        data.append(RAM_WRITE_REQ)\n        data.append(TORQUE_CONTROL_RAM)\n        data.append(0x01)\n        data.append(0x60)\n        send_data(data)", "docstring": "Enable the torques of Herkulex\n\nIn this mode, position control and velocity control\nwill work.\n\nArgs:\nnone", "source": "juraj-google-style"}
{"code": "def _std(self):\n    variance = tf.cond((self._count > 1), (lambda : (self._var_sum / tf.cast((self._count - 1), tf.float32))), (lambda : (tf.ones_like(self._var_sum) * float('nan'))))\n    return tf.sqrt((variance + 0.0001))", "docstring": "Computes the current estimate of the standard deviation.\n\nNote that the standard deviation is not defined until at least two samples\nwere seen.\n\nReturns:\nTensor of current variance.", "source": "codesearchnet"}
{"code": "def split_by_proportionally_distribute_labels(self, proportions={}, use_lengths=True):\n    identifiers = {}\n    for utterance in self.corpus.utterances.values():\n        if use_lengths:\n            identifiers[utterance.idx] = {l: int((d * 100)) for (l, d) in utterance.label_total_duration().items()}\n        else:\n            identifiers[utterance.idx] = utterance.label_count()\n    splits = utils.get_identifiers_splitted_by_weights(identifiers, proportions)\n    return self._subviews_from_utterance_splits(splits)", "docstring": "Split the corpus into subsets, so the occurrence of the labels is distributed amongst the\nsubsets according to the given proportions.\n\nArgs:\nproportions (dict): A dictionary containing the relative size of the target subsets.\nThe key is an identifier for the subset.\nuse_lengths (bool): If True the lengths of the labels are considered for splitting proportionally,\notherwise only the number of occurrences is taken into account.\n\nReturns:\n(dict): A dictionary containing the subsets with the identifier from the input as key.", "source": "codesearchnet"}
{"code": "def getprop(self, prop_name):\n        \n        return self.shell(\n            ['getprop', prop_name],\n            timeout=DEFAULT_GETPROP_TIMEOUT_SEC).decode('utf-8').strip()", "docstring": "Get a property of the device.\n\nThis is a convenience wrapper for \"adb shell getprop xxx\".\n\nArgs:\nprop_name: A string that is the name of the property to get.\n\nReturns:\nA string that is the value of the property, or None if the property\ndoesn't exist.", "source": "juraj-google-style"}
{"code": "def _save_states(self, state, serialized_readers_entity):\n    mr_id = state.key().id_or_name()\n    fresh_state = model.MapreduceState.get_by_job_id(mr_id)\n    if (not self._check_mr_state(fresh_state, mr_id)):\n        return False\n    if (fresh_state.active_shards != 0):\n        logging.warning('Mapreduce %s already has active shards. Looks like spurious task execution.', mr_id)\n        return None\n    config = util.create_datastore_write_config(state.mapreduce_spec)\n    db.put([state, serialized_readers_entity], config=config)\n    return True", "docstring": "Run transaction to save state.\n\nArgs:\nstate: a model.MapreduceState entity.\nserialized_readers_entity: a model._HugeTaskPayload entity containing\njson serialized input readers.\n\nReturns:\nFalse if a fatal error is encountered and this task should be dropped\nimmediately. True if transaction is successful. None if a previous\nattempt of this same transaction has already succeeded.", "source": "codesearchnet"}
{"code": "def as_graph(self, depth=0):\n    if (depth in self._graph_cache):\n        return self._graph_cache[depth]\n    self._graph_cache[depth] = graph = Graph(self, depth=depth)\n    return graph", "docstring": "Create a graph with self as node, cache it, return it.\n\nArgs:\ndepth (int): depth of the graph.\n\nReturns:\nGraph: an instance of Graph.", "source": "codesearchnet"}
{"code": "def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):\n    if already_has_special_tokens:\n        if token_ids_1 is not None:\n            raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')\n        return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]\n    if token_ids_1 is not None:\n        return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1]\n    return [1] + [0] * len(token_ids_0) + [1]", "docstring": "Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `encode` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of ids of the first sequence.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`str`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\nReturns:\n`List[int]`:\nThe list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def update_video_status(edx_video_id, status):\n    try:\n        video = _get_video(edx_video_id)\n    except Video.DoesNotExist:\n        error_message = u'Video not found when trying to update video status with edx_video_id: {0}'.format(edx_video_id)\n        raise ValVideoNotFoundError(error_message)\n    video.status = status\n    video.save()", "docstring": "Update status for an existing video.\n\nArgs:\nedx_video_id: ID of the video\nstatus: video status\n\nRaises:\nRaises ValVideoNotFoundError if the video cannot be retrieved.", "source": "codesearchnet"}
{"code": "def VisitNamedType(self, node):\n    return pytd.ClassType(node.name)", "docstring": "Converts a named type to a class type, to be filled in later.\n\nArgs:\nnode: The NamedType. This type only has a name.\n\nReturns:\nA ClassType. This ClassType will (temporarily) only have a name.", "source": "github-repos"}
{"code": "def quadratic_jacobian_polynomial(nodes):\n    jac_parts = _helpers.matrix_product(nodes, _QUADRATIC_JACOBIAN_HELPER)\n    jac_at_nodes = np.empty((1, 6), order='F')\n    jac_at_nodes[(0, 0)] = two_by_two_det(jac_parts[(:, :2)])\n    jac_at_nodes[(0, 1)] = two_by_two_det(jac_parts[(:, 2:4)])\n    jac_at_nodes[(0, 2)] = two_by_two_det(jac_parts[(:, 4:6)])\n    jac_at_nodes[(0, 3)] = two_by_two_det(jac_parts[(:, 6:8)])\n    jac_at_nodes[(0, 4)] = two_by_two_det(jac_parts[(:, 8:10)])\n    jac_at_nodes[(0, 5)] = two_by_two_det(jac_parts[(:, 10:)])\n    bernstein = _helpers.matrix_product(jac_at_nodes, _QUADRATIC_TO_BERNSTEIN)\n    return bernstein", "docstring": "r\"\"\"Compute the Jacobian determinant of a quadratic surface.\n\n.. note::\n\nThis is used **only** by :meth:`Surface._compute_valid` (which is\nin turn used to compute / cache the :attr:`Surface.is_valid`\nproperty).\n\nConverts :math:`\\det(J(s, t))` to a polynomial on the reference\ntriangle and represents it as a surface object.\n\n.. note::\n\nThis assumes that ``nodes`` is ``2 x 6`` but doesn't verify this.\n(However, the right multiplication by ``_QUADRATIC_JACOBIAN_HELPER``\nwould fail if ``nodes`` wasn't ``R x 6`` and then the ensuing\ndeterminants would fail if there weren't 2 rows.)\n\nArgs:\nnodes (numpy.ndarray): A 2 x 6 array of nodes in a surface.\n\nReturns:\nnumpy.ndarray: 1 x 6 array, coefficients in Bernstein basis.", "source": "codesearchnet"}
{"code": "def Match(self, event):\n    \n    if not self._matcher:\n      return True\n\n    self._decision = self._matcher.Matches(event)\n    return self._decision", "docstring": "Determines if an event matches the filter.\n\nArgs:\nevent (EventObject): an event.\n\nReturns:\nbool: True if the event matches the filter.", "source": "juraj-google-style"}
{"code": "def day_of_year(self):\n    if self._day_of_year is None:\n        cumul_days_in_month_nonleap = tf.math.cumsum(_DAYS_IN_MONTHS_NON_LEAP, exclusive=True)\n        cumul_days_in_month_leap = tf.math.cumsum(_DAYS_IN_MONTHS_LEAP, exclusive=True)\n        days_before_month_non_leap = tf.gather(cumul_days_in_month_nonleap, self.month() - 1)\n        days_before_month_leap = tf.gather(cumul_days_in_month_leap, self.month() - 1)\n        days_before_month = tf.where(date_utils.is_leap_year(self.year()), days_before_month_leap, days_before_month_non_leap)\n        self._day_of_year = days_before_month + self.day()\n    return self._day_of_year", "docstring": "Calculates the number of days since the beginning of the year.\n\nReturns:\nTensor of int32 type with elements in range [1, 366]. January 1st yields\n\"1\".\n\n#### Example\n\n```python\ndt = tff.datetime.dates_from_tuples([(2019, 1, 25), (2020, 3, 2)])\ndt.day_of_year()  # [25, 62]\n```", "source": "github-repos"}
{"code": "def next(self) -> 'ArrayEntry':\n    try:\n        (newval, naft) = self.after.pop()\n    except IndexError:\n        raise NonexistentInstance(self.json_pointer(), 'next of last') from None\n    return ArrayEntry((self.index + 1), self.before.cons(self.value), naft, newval, self.parinst, self.schema_node, self.timestamp)", "docstring": "Return an instance node corresponding to the next entry.\n\nRaises:\nNonexistentInstance: If the receiver is the last entry of the parent array.", "source": "codesearchnet"}
{"code": "def gene_filter(self, query, mongo_query):\n    LOG.debug('Adding panel and genes-related parameters to the query')\n    gene_query = []\n    if (query.get('hgnc_symbols') and query.get('gene_panels')):\n        gene_query.append({'hgnc_symbols': {'$in': query['hgnc_symbols']}})\n        gene_query.append({'panels': {'$in': query['gene_panels']}})\n        mongo_query['$or'] = gene_query\n    else:\n        if query.get('hgnc_symbols'):\n            hgnc_symbols = query['hgnc_symbols']\n            mongo_query['hgnc_symbols'] = {'$in': hgnc_symbols}\n            LOG.debug(('Adding hgnc_symbols: %s to query' % ', '.join(hgnc_symbols)))\n        if query.get('gene_panels'):\n            gene_panels = query['gene_panels']\n            mongo_query['panels'] = {'$in': gene_panels}\n    return gene_query", "docstring": "Adds gene-related filters to the query object\n\nArgs:\nquery(dict): a dictionary of query filters specified by the users\nmongo_query(dict): the query that is going to be submitted to the database\n\nReturns:\nmongo_query(dict): returned object contains gene and panel-related filters", "source": "codesearchnet"}
{"code": "def Named(self, name):\n    self._name = name\n    return self", "docstring": "Adds a prefix to the subject, when it is displayed in error messages.\n\nThis is especially useful in the context of types that have no helpful\nstring representation (e.g., boolean). Writing\nAssertThat(foo).Named('foo').IsTrue()\nthen results in a more reasonable error.\n\nArgs:\nname: string, the name to display along with the actual value.\n\nReturns:\nself", "source": "github-repos"}
{"code": "def add_cookie_header(self, request, referrer_host=None):\n        \n        new_request = convert_http_request(request, referrer_host)\n        self._cookie_jar.add_cookie_header(new_request)\n\n        request.fields.clear()\n\n        for name, value in new_request.header_items():\n            request.fields.add(name, value)", "docstring": "Wrapped ``add_cookie_header``.\n\nArgs:\nrequest: An instance of :class:`.http.request.Request`.\nreferrer_host (str): An hostname or IP address of the referrer\nURL.", "source": "juraj-google-style"}
{"code": "def persist_as_png(structure_dict, filepath):\n    \n    graph = _create_graph(structure_dict)\n    graph.write(filepath, format='png')", "docstring": "Saves pipeline diagram to disk as png file.\n\nArgs:\nstructure_dict (dict): dict returned by\n:func:`~steppy.base.Step.upstream_structure`\nfilepath (str): filepath to which the png with pipeline visualization should be persisted", "source": "juraj-google-style"}
{"code": "def download(self, url_or_urls):\n    \n    \n    with self._downloader.tqdm():\n      return _map_promise(self._download, url_or_urls)", "docstring": "Download given url(s).\n\nArgs:\nurl_or_urls: url or `list`/`dict` of urls to download and extract. Each\nurl can be a `str` or `tfds.download.Resource`.\n\nReturns:\ndownloaded_path(s): `str`, The downloaded paths matching the given input\nurl_or_urls.", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_state):\n    num_channels = hidden_state.shape[-1]\n    x = self.projection_in(hidden_state).permute(0, 3, 1, 2).contiguous()\n    q, ctx, gates = torch.split(x, (num_channels, num_channels, self.focal_level + 1), 1)\n    ctx_all = 0\n    for level in range(self.focal_level):\n        ctx = self.focal_layers[level](ctx)\n        ctx_all = ctx_all + ctx * gates[:, level:level + 1]\n    ctx_global = self.activation(ctx.mean(2, keepdim=True).mean(3, keepdim=True))\n    ctx_all = ctx_all + ctx_global * gates[:, self.focal_level:]\n    if self.normalize_modulator:\n        ctx_all = ctx_all / (self.focal_level + 1)\n    modulator = self.projection_context(ctx_all)\n    x_out = q * modulator\n    x_out = x_out.permute(0, 2, 3, 1).contiguous()\n    if self.use_post_layernorm_in_modulation:\n        x_out = self.layernorm(x_out)\n    x_out = self.projection_out(x_out)\n    x_out = self.projection_dropout(x_out)\n    return x_out", "docstring": "Args:\nhidden_state:\nInput features with shape of (batch_size, height, width, num_channels)", "source": "github-repos"}
{"code": "class BayesianDetectorConfig(PretrainedConfig):\n\n    def __init__(self, watermarking_depth: Optional[int]=None, base_rate: float=0.5, **kwargs):\n        self.watermarking_depth = watermarking_depth\n        self.base_rate = base_rate\n        self.model_name = None\n        self.watermarking_config = None\n        super().__init__(**kwargs)\n\n    def set_detector_information(self, model_name, watermarking_config):\n        self.model_name = model_name\n        self.watermarking_config = watermarking_config", "docstring": "This is the configuration class to store the configuration of a [`BayesianDetectorModel`]. It is used to\ninstantiate a Bayesian Detector model according to the specified arguments.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\nwatermarking_depth (`int`, *optional*):\nThe number of tournament layers.\nbase_rate (`float1`, *optional*, defaults to 0.5):\nPrior probability P(w) that a text is watermarked.", "source": "github-repos"}
{"code": "def predict_proba(self, a, b, **kwargs):\n        \n        return self.cds_score(b, a) - self.cds_score(a, b)", "docstring": "Infer causal relationships between 2 variables using the CDS statistic\n\nArgs:\na (numpy.ndarray): Variable 1\nb (numpy.ndarray): Variable 2\n\nReturns:\nfloat: Causation score (Value : 1 if a->b and -1 if b->a)", "source": "juraj-google-style"}
{"code": "def get_parameters(params=None, path='', grad_only=True):\n    \n\n    global current_scope\n    if params is None:\n        params = OrderedDict()\n    for k, v in iteritems(current_scope):\n        if isinstance(v, dict):\n            with parameter_scope(k):\n                params = get_parameters(\n                    params, '/'.join([path, k]) if path else k, grad_only=grad_only)\n        else:\n            assert isinstance(v, nn.Variable)\n            if not grad_only or v.need_grad:\n                params['/'.join([path, k]) if path else k] = v\n    return params", "docstring": "Get parameter Variables under the current parameter scope.\n\nArgs:\nparams (dict): Internal use. User doesn't set it manually.\npath (str): Internal use.  User doesn't set it manually.\ngrad_only (bool): Retrieve all parameters under the current scope if\nFalse, while only parameters with need_grad=True are retrieved\nif True.\n\nReturns:\ndict: {:obj:`str` : :obj:`~nnabla.Variable`}", "source": "juraj-google-style"}
{"code": "def move(self, x, y):\n        \n        self._cursor = self._normalizePoint(x, y)", "docstring": "Move the virtual cursor.\n\nArgs:\nx (int): x-coordinate to place the cursor.\ny (int): y-coordinate to place the cursor.\n\n.. seealso:: :any:`get_cursor`, :any:`print_str`, :any:`write`", "source": "juraj-google-style"}
{"code": "def parse_str_to_expression(fiql_str):\n    nesting_lvl = 0\n    last_element = None\n    expression = Expression()\n    for (preamble, selector, comparison, argument) in iter_parse(fiql_str):\n        if preamble:\n            for char in preamble:\n                if (char == '('):\n                    if isinstance(last_element, BaseExpression):\n                        raise FiqlFormatException(('%s can not be followed by %s' % (last_element.__class__, Expression)))\n                    expression = expression.create_nested_expression()\n                    nesting_lvl += 1\n                elif (char == ')'):\n                    expression = expression.get_parent()\n                    last_element = expression\n                    nesting_lvl -= 1\n                else:\n                    if (not expression.has_constraint()):\n                        raise FiqlFormatException(('%s proceeding initial %s' % (Operator, Constraint)))\n                    if isinstance(last_element, Operator):\n                        raise FiqlFormatException(('%s can not be followed by %s' % (Operator, Operator)))\n                    last_element = Operator(char)\n                    expression = expression.add_operator(last_element)\n        if selector:\n            if isinstance(last_element, BaseExpression):\n                raise FiqlFormatException(('%s can not be followed by %s' % (last_element.__class__, Constraint)))\n            last_element = Constraint(selector, comparison, argument)\n            expression.add_element(last_element)\n    if (nesting_lvl != 0):\n        raise FiqlFormatException('At least one nested expression was not correctly closed')\n    if (not expression.has_constraint()):\n        raise FiqlFormatException((\"Parsed string '%s' contained no constraint\" % fiql_str))\n    return expression", "docstring": "Parse a FIQL formatted string into an ``Expression``.\n\nArgs:\nfiql_str (string): The FIQL formatted string we want to parse.\n\nReturns:\nExpression: An ``Expression`` object representing the parsed FIQL\nstring.\n\nRaises:\nFiqlFormatException: Unable to parse string due to incorrect\nformatting.\n\nExample:\n\n>>> expression = parse_str_to_expression(\n...         \"name==bar,dob=gt=1990-01-01\")", "source": "codesearchnet"}
{"code": "def __init__(self, **kwargs):\n\t\t\n\t\tif kwargs:\n\t\t\tself.attributes = {}\n\t\t\tself.attributes.update(**kwargs)\n\t\telse:\n\t\t\tself.attributes = dict.fromkeys(self.__class__.disp_attr_keys)", "docstring": "Initializes class attributes\nArgs:\nkwargs\t\t\ttakes kwargs or a dict", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool=False):\n    residual = hidden_states\n    hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    residual = hidden_states\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    if self.training:\n        if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():\n            clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n            hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\nInput to the layer.\nattention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\nAttention mask.\nposition_embeddings (`torch.FloatTensor`, *optional*):\nPosition embeddings, to be added to `hidden_states`.\nreference_points (`torch.FloatTensor`, *optional*):\nReference points.\nspatial_shapes (`torch.LongTensor`, *optional*):\nSpatial shapes of the backbone feature maps.\nlevel_start_index (`torch.LongTensor`, *optional*):\nLevel start index.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def insert(self,\n               entity_id,\n               property_uri,\n               value):\n        \n        if not entity_id.startswith(\"http\"):\n            entity_uri = urllib.parse.urljoin(self.base_url, entity_id)\n        else:\n            entity_uri = entity_id\n        if entity_uri.endswith(\"/\"):\n            entity_uri = entity_uri[:-1]\n        if not entity_id.endswith(\"fcr:metadata\"):\n            entity_uri = \"/\".join([entity_uri, \"fcr:metadata\"])\n        if not self.exists(entity_id):\n            self.create(entity_id)\n        sparql_template = Template()\n        sparql = sparql_template.substitute(\n            prefix=build_prefixes(self.namespaces),\n            entity=entity_uri,\n            prop_uri=property_uri,\n            value_str=self.__value_format__(value))\n        update_request = urllib.request.Request(\n            entity_uri,\n            data=sparql.encode(),\n            method='PATCH',\n            headers={'Content-Type': 'application/sparql-update'})\n        try:\n            response = urllib.request.urlopen(update_request)\n        except urllib.error.HTTPError:\n            print(\"Error trying patch {}, sparql=\\n{}\".format(entity_uri,\n                sparql))\n            return False\n        if response.code < 400:\n            return True\n        return False", "docstring": "Method inserts a new entity's property in Fedora4 Repository\n\nArgs:\nentity_id(string): Unique ID of Fedora object\nproperty_uri(string): URI of property\nvalue: Value of the property, can be literal or URI reference\n\nReturns:\nboolean: True if successful changed in Fedora, False otherwise", "source": "juraj-google-style"}
{"code": "def _recurse(self, matrix, m_list, indices, output_m_list=[]):\n        \n        \n        if self._finished:\n            return\n\n        \n        while m_list[-1][1] == 0:\n            m_list = copy(m_list)\n            m_list.pop()\n            \n            if not m_list:\n                matrix_sum = np.sum(matrix)\n                if matrix_sum < self._current_minimum:\n                    self.add_m_list(matrix_sum, output_m_list)\n                return\n\n        \n        if m_list[-1][1] > len(indices.intersection(m_list[-1][2])):\n            return\n\n        if len(m_list) == 1 or m_list[-1][1] > 1:\n            if self.best_case(matrix, m_list, indices) > self._current_minimum:\n                return\n\n        index = self.get_next_index(matrix, m_list[-1], indices)\n\n        m_list[-1][2].remove(index)\n\n        \n        \n        matrix2 = np.copy(matrix)\n        m_list2 = deepcopy(m_list)\n        output_m_list2 = copy(output_m_list)\n\n        matrix2[index, :] *= m_list[-1][0]\n        matrix2[:, index] *= m_list[-1][0]\n        output_m_list2.append([index, m_list[-1][3]])\n        indices2 = copy(indices)\n        indices2.remove(index)\n        m_list2[-1][1] -= 1\n\n        \n\n        self._recurse(matrix2, m_list2, indices2, output_m_list2)\n        self._recurse(matrix, m_list, indices, output_m_list)", "docstring": "This method recursively finds the minimal permutations using a binary\ntree search strategy.\n\nArgs:\nmatrix: The current matrix (with some permutations already\nperformed).\nm_list: The list of permutations still to be performed\nindices: Set of indices which haven't had a permutation\nperformed on them.", "source": "juraj-google-style"}
{"code": "def tpu_conv1d(inputs, filters, kernel_size, padding=\"SAME\", name=\"tpu_conv1d\"):\n  \n  if kernel_size == 1:\n    return dense(inputs, filters, name=name, use_bias=True)\n  if padding == \"SAME\":\n    assert kernel_size % 2 == 1\n    first_offset = -((kernel_size - 1) \n  else:\n    assert padding == \"LEFT\"\n    first_offset = -(kernel_size - 1)\n  last_offset = first_offset + kernel_size - 1\n  results = []\n  padded = tf.pad(inputs, [[0, 0], [-first_offset, last_offset], [0, 0]])\n  for i in range(kernel_size):\n    shifted = tf.slice(padded, [0, i, 0], tf.shape(inputs)) if i else inputs\n    shifted.set_shape(inputs.get_shape())\n    results.append(\n        dense(shifted, filters, use_bias=(i == 0), name=name + \"_%d\" % i))\n  ret = tf.add_n(results)\n  ret *= kernel_size**-0.5\n  return ret", "docstring": "Version of conv1d that works on TPU (as of 11/2017).\n\nArgs:\ninputs: a Tensor with shape [batch, length, input_depth].\nfilters: an integer.\nkernel_size: an integer.\npadding: a string - \"SAME\" or \"LEFT\".\nname: a string.\n\nReturns:\na Tensor with shape [batch, length, filters].", "source": "juraj-google-style"}
{"code": "def report_proto_path(self, trace_dir, summary_tag_name):\n    filename = _TT_REPORT_PROTO + '.' + summary_tag_name.replace('/', '_')\n    return os.path.join(trace_dir, filename)", "docstring": "Returns the path where report proto should be written.\n\nArgs:\ntrace_dir: String denoting the trace directory.\nsummary_tag_name: Name of the unique tag that relates to\nthe report.\nReturns:\nA string denoting the path to the report proto.", "source": "github-repos"}
{"code": "def _streaming_confusion_matrix(labels, predictions, num_classes, weights=None):\n    total_cm = metric_variable([num_classes, num_classes], dtypes.float64, name='total_confusion_matrix')\n    predictions = math_ops.cast(predictions, dtypes.int64)\n    labels = math_ops.cast(labels, dtypes.int64)\n    num_classes = math_ops.cast(num_classes, dtypes.int64)\n    if predictions.get_shape().ndims > 1:\n        predictions = array_ops.reshape(predictions, [-1])\n    if labels.get_shape().ndims > 1:\n        labels = array_ops.reshape(labels, [-1])\n    if weights is not None and weights.get_shape().ndims > 1:\n        weights = array_ops.reshape(weights, [-1])\n    current_cm = confusion_matrix.confusion_matrix(labels, predictions, num_classes, weights=weights, dtype=dtypes.float64)\n    update_op = state_ops.assign_add(total_cm, current_cm)\n    return (total_cm, update_op)", "docstring": "Calculate a streaming confusion matrix.\n\nCalculates a confusion matrix. For estimation over a stream of data,\nthe function creates an  `update_op` operation.\n\nArgs:\nlabels: A `Tensor` of ground truth labels with shape [batch size] and of\ntype `int32` or `int64`. The tensor will be flattened if its rank > 1.\npredictions: A `Tensor` of prediction results for semantic labels, whose\nshape is [batch size] and type `int32` or `int64`. The tensor will be\nflattened if its rank > 1.\nnum_classes: The possible number of labels the prediction task can\nhave. This value must be provided, since a confusion matrix of\ndimension = [num_classes, num_classes] will be allocated.\nweights: Optional `Tensor` whose rank is either 0, or the same rank as\n`labels`, and must be broadcastable to `labels` (i.e., all dimensions must\nbe either `1`, or the same as the corresponding `labels` dimension).\n\nReturns:\ntotal_cm: A `Tensor` representing the confusion matrix.\nupdate_op: An operation that increments the confusion matrix.", "source": "github-repos"}
{"code": "def __init__(self, json, Api):\n        \n        self.json = json\n        self.Api = Api\n\n        self.type = json[\"type\"]\n        self.content = json[\"content\"]\n        self.timestamp = json[\"origin_server_ts\"]\n        self.id = json[\"room_id\"]\n        if \"sender\" in json:\n            self.mxid = json[\"sender\"]\n        else:\n            self.mxid = json[\"user_id\"]", "docstring": "Instantiates Event instance.\n\nArgs:\njson(dict): Event json from homeserver.\nApi(func): Creates api for calling homeserver.", "source": "juraj-google-style"}
{"code": "def run(self):\n    target = getattr(self, '_Thread__target', getattr(self, '_target', None))\n    args = getattr(self, '_Thread__args', getattr(self, '_args', None))\n    kwargs = getattr(self, '_Thread__kwargs', getattr(self, '_kwargs', None))\n    if (target is not None):\n        self._return = target(*args, **kwargs)\n    return None", "docstring": "Runs the thread.\n\nArgs:\nself (ThreadReturn): the ``ThreadReturn`` instance\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def CreateClass(cls, data_type_definition):\n    cls._ValidateDataTypeDefinition(data_type_definition)\n    class_definition = cls._CreateClassTemplate(data_type_definition)\n    namespace = {'__builtins__': {'object': builtins.object, 'super': builtins.super}, '__name__': '{0:s}'.format(data_type_definition.name)}\n    if (sys.version_info[0] >= 3):\n        namespace['__builtins__']['__build_class__'] = builtins.__build_class__\n    exec(class_definition, namespace)\n    return namespace[data_type_definition.name]", "docstring": "Creates a new structure values class.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\n\nReturns:\nclass: structure values class.", "source": "codesearchnet"}
{"code": "def declaration_path(decl):\n    \n\n    if not decl:\n        return []\n    if not decl.cache.declaration_path:\n        result = [decl.name]\n        parent = decl.parent\n        while parent:\n            if parent.cache.declaration_path:\n                result.reverse()\n                decl.cache.declaration_path = parent.cache.declaration_path + \\\n                    result\n                return decl.cache.declaration_path\n            else:\n                result.append(parent.name)\n                parent = parent.parent\n        result.reverse()\n        decl.cache.declaration_path = result\n        return result\n\n    return decl.cache.declaration_path", "docstring": "Returns a list of parent declarations names.\n\nArgs:\ndecl (declaration_t): declaration for which declaration path\nshould be calculated.\n\nReturns:\nlist[(str | basestring)]: list of names, where first item is the top\nparent name and last item the inputted\ndeclaration name.", "source": "juraj-google-style"}
{"code": "def _collect_unused(self, start: GridQubit,\n                        used: Set[GridQubit]) -> Set[GridQubit]:\n        \n\n        def collect(n: GridQubit, visited: Set[GridQubit]):\n            visited.add(n)\n            for m in self._c_adj[n]:\n                if m not in used and m not in visited:\n                    collect(m, visited)\n\n        visited = set()  \n        collect(start, visited)\n        return visited", "docstring": "Lists all the qubits that are reachable from given qubit.\n\nArgs:\nstart: The first qubit for which connectivity should be calculated.\nMight be a member of used set.\nused: Already used qubits, which cannot be used during the\ncollection.\n\nReturns:\nSet of qubits that are reachable from starting qubit without\ntraversing any of the used qubits.", "source": "juraj-google-style"}
{"code": "def scaled_dot_product_attention_simple(q, k, v, bias, name=None):\n  \n  with tf.variable_scope(\n      name, default_name=\"scaled_dot_product_attention_simple\"):\n    scalar = tf.rsqrt(tf.to_float(common_layers.shape_list(q)[2]))\n    logits = tf.matmul(q * scalar, k, transpose_b=True)\n    if bias is not None:\n      logits += bias\n    weights = tf.nn.softmax(logits, name=\"attention_weights\")\n    if common_layers.should_generate_summaries():\n      tf.summary.image(\n          \"attention\", tf.expand_dims(tf.pow(weights, 0.2), 3), max_outputs=1)\n    return tf.matmul(weights, v)", "docstring": "Scaled dot-product attention. One head. One spatial dimension.\n\nArgs:\nq: a Tensor with shape [batch, length_q, depth_k]\nk: a Tensor with shape [batch, length_kv, depth_k]\nv: a Tensor with shape [batch, length_kv, depth_v]\nbias: optional Tensor broadcastable to [batch, length_q, length_kv]\nname: an optional string\n\nReturns:\nA Tensor.", "source": "juraj-google-style"}
{"code": "def _validated_config_filename(self, name):\n    dir_name = self._make_config_dir()\n    filename = os.path.join(dir_name, (name.split('.json')[0] + '.json'))\n    return filename", "docstring": "Make config dir and return full file path and extension\n\nArgs:\nname (str): Filename without dir or extension\n\nReturns:\nstr: Full path including extension", "source": "codesearchnet"}
{"code": "def select_by_value(self, value):\n    self._selected_key = None\n    self._selected_item = None\n    for k in self.children:\n        item = self.children[k]\n        item.attributes['selected'] = False\n        if (value == item.get_value()):\n            self._selected_key = k\n            self._selected_item = item\n            self._selected_item.attributes['selected'] = True", "docstring": "Selects an item by the text content of the child.\n\nArgs:\nvalue (str): Text content of the item that have to be selected.", "source": "codesearchnet"}
{"code": "def market_close(self, session, mins) -> Session:\n    if (session not in self.exch):\n        return SessNA\n    end_time = self.exch[session][(- 1)]\n    return Session(shift_time(end_time, ((- int(mins)) + 1)), end_time)", "docstring": "Time intervals for market close\n\nArgs:\nsession: [allday, day, am, pm, night]\nmins: mintues before close\n\nReturns:\nSession of start_time and end_time", "source": "codesearchnet"}
{"code": "def HandleGetBlocksMessageReceived(self, payload):\n        \n        if not self.leader.ServiceEnabled:\n            return\n\n        inventory = IOHelper.AsSerializableWithType(payload, 'neo.Network.Payloads.GetBlocksPayload.GetBlocksPayload')\n        if not inventory:\n            return\n\n        blockchain = BC.Default()\n        hash = inventory.HashStart[0]\n        if not blockchain.GetHeader(hash):\n            return\n\n        hashes = []\n        hcount = 0\n        while hash != inventory.HashStop and hcount < 500:\n            hash = blockchain.GetNextBlockHash(hash)\n            if hash is None:\n                break\n            hashes.append(hash)\n            hcount += 1\n        if hcount > 0:\n            self.SendSerializedMessage(Message('inv', InvPayload(type=InventoryType.Block, hashes=hashes)))", "docstring": "Process a GetBlocksPayload payload.\n\nArgs:\npayload (neo.Network.Payloads.GetBlocksPayload):", "source": "juraj-google-style"}
{"code": "def setup_formatters(self, *args):\n        \n        formatters = []\n        col_offset = 0\n        \n        if self.rownum:\n            formatters.append(fmt.RowNumberFormatter.setup(0))\n            col_offset += 1\n        if self.timestamp:\n            formatters.append(fmt.DatetimeFormatter.setup(\n                datetime.datetime.now(),\n                fmt='{:%Y-%m-%d %H:%M:%S.%f}'.format,\n                col_width=26))\n            col_offset += 1\n        if self.time_diff:\n            formatters.append(fmt.TimeDeltaFormatter.setup(0))\n            col_offset += 1\n\n        \n        for coli, value in enumerate(args):\n            fmt_class = type2fmt.get(type(value), fmt.GenericFormatter)\n            kwargs = {}\n\n            \n            if self.default_colwidth is not None:\n                kwargs['col_width'] = self.default_colwidth\n            if coli in self.column_widths:\n                kwargs['col_width'] = self.column_widths[coli]\n            elif self.columns and self.columns[coli + col_offset] in self.column_widths:\n                kwargs['col_width'] = self.column_widths[self.columns[coli + col_offset]]\n\n            \n            if fmt_class == fmt.FloatFormatter and self.float_format is not None:\n                kwargs['fmt'] = self.float_format\n            if coli in self.column_formatters:\n                kwargs['fmt'] = self.column_formatters[coli]\n            elif self.columns and self.columns[coli + col_offset] in self.column_formatters:\n                kwargs['fmt'] = self.column_formatters[self.columns[coli + col_offset]]\n\n            formatter = fmt_class.setup(value, **kwargs)\n            formatters.append(formatter)\n\n        self.formatters = formatters", "docstring": "Setup formatters by observing the first row.\n\nArgs:\n*args: row cells", "source": "juraj-google-style"}
{"code": "def _is_sequence_right_padded(mask):\n    max_seq_length = tf.shape(mask)[1]\n    count_of_true = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1)\n    right_padded_mask = tf.sequence_mask(count_of_true, maxlen=max_seq_length)\n    return tf.reduce_all(tf.equal(tf.cast(mask, dtype='bool'), tf.cast(right_padded_mask, dtype='bool')))", "docstring": "Check the mask tensor and see if it right padded.\n\ncuDNN uses the sequence length param to skip the tailing\ntimestep. If the data is left padded, or not a strict right padding (has\nmasked value in the middle of the sequence), then cuDNN won't work\nproperly in those cases.\n\nLeft padded data: [[False, False, True, True, True]].\nRight padded data: [[True, True, True, False, False]].\nMixture of mask/unmasked data: [[True, False, True, False, False]].\n\nNote that for the mixed data example above, the actually data RNN should see\nare those 2 Trues (index 0 and 2), the index 1 False should be ignored and\nnot pollute the internal states.\n\nArgs:\nmask: the Boolean tensor with shape [batch, timestep]\n\nReturns:\nboolean scalar tensor, whether the mask is strictly right padded.", "source": "github-repos"}
{"code": "def transform(self, tables, table_metas=None, missing=None):\n    if (missing is None):\n        missing = self.missing\n    else:\n        self.missing = missing\n        warnings.warn(DEPRECATION_MESSAGE.format('transform'), DeprecationWarning)\n    transformed = {}\n    for table_name in tables:\n        table = tables[table_name]\n        if (table_metas is None):\n            table_meta = self.table_dict[table_name][1]\n        else:\n            table_meta = table_metas[table_name]\n        transformed[table_name] = self.transform_table(table, table_meta)\n    return transformed", "docstring": "Apply all the saved transformers to `tables`.\n\nArgs:\ntables(dict):   mapping of table names to `tuple` where each tuple is on the form\n(`pandas.DataFrame`, `dict`). The `DataFrame` contains the table data\nand the `dict` the corresponding meta information.\nIf not specified, the tables will be retrieved using the meta_file.\n\ntable_metas(dict):  Full metadata file for the dataset.\n\nmissing(bool):      Wheter or not use NullTransformer to handle missing values.\n\nReturns:\ndict: Map from `str` (table_names) to `pandas.DataFrame` (transformed data).", "source": "codesearchnet"}
{"code": "def file_digest(source):\n    \n    hash_sha256 = hashlib.sha256()\n\n    should_close = False\n\n    if isinstance(source, six.string_types):\n        should_close = True\n        source = open(source, 'rb')\n\n    for chunk in iter(lambda: source.read(_BUFFER_SIZE), b''):\n        hash_sha256.update(chunk)\n\n    if should_close:\n        source.close()\n\n    return hash_sha256.hexdigest()", "docstring": "Calculates SHA256 digest of a file.\n\nArgs:\nsource: either a file-like object or a path to file", "source": "juraj-google-style"}
{"code": "def _get_free_gpu(max_gpu_utilization=40, min_free_memory=0.5, num_gpu=1):\n  \n  def get_gpu_info():\n    \n    gpu_info = subprocess.check_output([\"nvidia-smi\", \"--format=csv,noheader,nounits\", \"--query-gpu=index,memory.total,memory.free,memory.used,utilization.gpu\"]).decode()\n    gpu_info = gpu_info.split('\\n')\n\n    gpu_info_array = []\n\n    \n    for line in gpu_info:\n      if len(line) > 0:\n        gpu_id, total_memory, free_memory, used_memory, gpu_util = line.split(',')\n        gpu_memory_util = float(used_memory) / float(total_memory)\n        gpu_info_array.append((float(gpu_util), gpu_memory_util, gpu_id))\n\n    return(gpu_info_array)\n\n  \n  num_times_to_average = 5\n  current_array = []\n  for ind in range(num_times_to_average):\n    current_array.append(get_gpu_info())\n    time.sleep(1)\n\n  \n  num_gpus = len(current_array[0])\n\n  \n  avg_array = [(0, 0, str(x)) for x in range(num_gpus)]\n  for ind in range(num_times_to_average):\n    for gpu_ind in range(num_gpus):\n      avg_array[gpu_ind] = (avg_array[gpu_ind][0] + current_array[ind][gpu_ind][0], avg_array[gpu_ind][1] + current_array[ind][gpu_ind][1], avg_array[gpu_ind][2])\n\n  for gpu_ind in range(num_gpus):\n    avg_array[gpu_ind] = (float(avg_array[gpu_ind][0]) / num_times_to_average, float(avg_array[gpu_ind][1]) / num_times_to_average, avg_array[gpu_ind][2])\n\n  avg_array.sort()\n\n  gpus_found = 0\n  gpus_to_use = \"\"\n  free_memory = 1.0\n  \n  \n  for current_gpu in avg_array:\n    if current_gpu[0] < max_gpu_utilization and (1 - current_gpu[1]) > min_free_memory:\n      if gpus_found == 0:\n        gpus_to_use = current_gpu[2]\n        free_memory = 1 - current_gpu[1]\n      else:\n        gpus_to_use = gpus_to_use + \",\" + current_gpu[2]\n        free_memory = min(free_memory, 1 - current_gpu[1])\n\n      gpus_found = gpus_found + 1\n\n    if gpus_found == num_gpu:\n      break\n\n  return gpus_to_use, free_memory", "docstring": "Get available GPUs according to utilization thresholds.\n\nArgs:\n:max_gpu_utilization: percent utilization threshold to consider a GPU \"free\"\n:min_free_memory: percent free memory to consider a GPU \"free\"\n:num_gpu: number of requested GPUs\n\nReturns:\nA tuple of (available_gpus, minimum_free_memory), where available_gpus is a comma-delimited string of GPU ids, and minimum_free_memory\nis the lowest amount of free memory available on the available_gpus.", "source": "juraj-google-style"}
{"code": "def buid(valu=None):\n    \n    if valu is None:\n        return os.urandom(32)\n\n    byts = s_msgpack.en(valu)\n    return hashlib.sha256(byts).digest()", "docstring": "A binary GUID like sequence of 32 bytes.\n\nArgs:\nvalu (object): Optional, if provided, the hash of the msgpack\nencoded form of the object is returned. This can be used to\ncreate stable buids.\n\nNotes:\nBy default, this returns a random 32 byte value.\n\nReturns:\nbytes: A 32 byte value.", "source": "juraj-google-style"}
{"code": "def daylight_saving_start_day(self, value=None):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `daylight_saving_start_day`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `daylight_saving_start_day`')\n    self._daylight_saving_start_day = value", "docstring": "Corresponds to IDD Field `daylight_saving_start_day`\n\nArgs:\nvalue (str): value for IDD Field `daylight_saving_start_day`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def get_images_by_catid(self, catid):\n        \n\n        self.logger.debug('Retrieving IDAHO metadata')\n\n        \n        footprint = self.catalog.get_strip_footprint_wkt(catid)\n\n        \n        try:\n            footprint = from_wkt(footprint).geoms[0].wkt\n        except:\n            pass\n\n        if not footprint:\n            self.logger.debug( % catid)\n            return None\n\n        return self.get_images_by_catid_and_aoi(catid=catid,\n                                                aoi_wkt=footprint)", "docstring": "Retrieves the IDAHO image records associated with a given catid.\nArgs:\ncatid (str): The source catalog ID from the platform catalog.\nReturns:\nresults (json): The full catalog-search response for IDAHO images\nwithin the catID.", "source": "juraj-google-style"}
{"code": "def _indent(lines, prefix=\"  \"):\n    \n    indented = []\n    for line in lines.split(\"\\n\"):\n        indented.append(prefix + line)\n    return \"\\n\".join(indented)", "docstring": "Indent some text.\n\nNote that this is present as ``textwrap.indent``, but not in Python 2.\n\nArgs:\nlines (str): The newline delimited string to be indented.\nprefix (Optional[str]): The prefix to indent each line with. Default\nto two spaces.\n\nReturns:\nstr: The newly indented content.", "source": "juraj-google-style"}
{"code": "def getDelOps(self, buid):\n    return (('prop:del', (buid, self.form.name, self.name, self.storinfo)),)", "docstring": "Get a list of storage operations to delete this property from the buid.\n\nArgs:\nbuid (bytes): The node buid.\n\nReturns:\n(tuple): The storage operations", "source": "codesearchnet"}
{"code": "def IsBlockInNameSpace(nesting_state, is_forward_declaration):\n    if is_forward_declaration:\n        return ((len(nesting_state.stack) >= 1) and isinstance(nesting_state.stack[(- 1)], _NamespaceInfo))\n    return ((len(nesting_state.stack) > 1) and nesting_state.stack[(- 1)].check_namespace_indentation and isinstance(nesting_state.stack[(- 2)], _NamespaceInfo))", "docstring": "Checks that the new block is directly in a namespace.\n\nArgs:\nnesting_state: The _NestingState object that contains info about our state.\nis_forward_declaration: If the class is a forward declared class.\nReturns:\nWhether or not the new block is directly in a namespace.", "source": "codesearchnet"}
{"code": "def read_binary(self, key, b64decode=True, decode=False):\n        \n        data = None\n        if key is not None:\n            data = self.db.read(key.strip())\n            if data is not None:\n                data = json.loads(data)\n                if b64decode:\n                    \n                    data = base64.b64decode(data)\n                    if decode:\n                        try:\n                            \n                            data = data.decode('utf-8')\n                        except UnicodeDecodeError:\n                            \n                            data = data.decode('latin-1')\n        else:\n            self.tcex.log.warning(u'The key field was None.')\n        return data", "docstring": "Read method of CRUD operation for binary data.\n\nArgs:\nkey (string): The variable to read from the DB.\nb64decode (bool): If true the data will be base64 decoded.\ndecode (bool): If true the data will be decoded to a String.\n\nReturns:\n(bytes|string): Results retrieved from DB.", "source": "juraj-google-style"}
{"code": "def setup_spline(self, spline_options=None):\n        \n        self.spline_options = spline_options\n        relative_energies = self.energies - self.energies[0]\n        if scipy_old_piecewisepolynomial:\n            if self.spline_options:\n                raise RuntimeError('Option for saddle point not available with'\n                                   'old scipy implementation')\n            self.spline = PiecewisePolynomial(\n                self.r, np.array([relative_energies, -self.forces]).T,\n                orders=3)\n        else:\n            \n            if self.spline_options.get('saddle_point', '') == 'zero_slope':\n                imax = np.argmax(relative_energies)\n                self.spline = CubicSpline(x=self.r[:imax + 1],\n                                          y=relative_energies[:imax + 1],\n                                          bc_type=((1, 0.0), (1, 0.0)))\n                cspline2 = CubicSpline(x=self.r[imax:], y=relative_energies[imax:],\n                                       bc_type=((1, 0.0), (1, 0.0)))\n                self.spline.extend(c=cspline2.c, x=cspline2.x[1:])\n            else:\n                self.spline = CubicSpline(x=self.r, y=relative_energies,\n                                          bc_type=((1, 0.0), (1, 0.0)))", "docstring": "Setup of the options for the spline interpolation\n\nArgs:\nspline_options (dict): Options for cubic spline. For example,\n{\"saddle_point\": \"zero_slope\"} forces the slope at the saddle to\nbe zero.", "source": "juraj-google-style"}
{"code": "def _parse_pages_binding(details):\n    \n    pages = _get_td_or_none(\n        details,\n        \"ctl00_ContentPlaceHolder1_tblRowRozsahVazba\"\n    )\n\n    if not pages:\n        return None, None\n\n    binding = None  \n    if \"/\" in pages:\n        binding = pages.split(\"/\")[1].strip()\n        pages = pages.split(\"/\")[0].strip()\n\n    if not pages:\n        pages = None\n\n    return pages, binding", "docstring": "Parse number of pages and binding of the book.\n\nArgs:\ndetails (obj): HTMLElement containing slice of the page with details.\n\nReturns:\n(pages, binding): Tuple with two string or two None.", "source": "juraj-google-style"}
{"code": "def ebalance(sdat, tstart=None, tend=None):\n    tseries = sdat.tseries_between(tstart, tend)\n    (rbot, rtop) = misc.get_rbounds(sdat.steps.last)\n    if (rbot != 0):\n        coefsurf = ((rtop / rbot) ** 2)\n        volume = ((rbot * (((rtop / rbot) ** 3) - 1)) / 3)\n    else:\n        coefsurf = 1.0\n        volume = 1.0\n    (dtdt, time) = dt_dt(sdat, tstart, tend)\n    ftop = (tseries['ftop'].values * coefsurf)\n    fbot = tseries['fbot'].values\n    radio = tseries['H_int'].values\n    ebal = ((ftop[1:] - fbot[1:]) + (volume * (dtdt - radio[1:])))\n    return (ebal, time)", "docstring": "Energy balance.\n\nCompute Nu_t - Nu_b + V*dT/dt as a function of time using an explicit\nEuler scheme. This should be zero if energy is conserved.\n\nArgs:\nsdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.\ntstart (float): time at which the computation should start. Use the\nbeginning of the time series data if set to None.\ntend (float): time at which the computation should end. Use the\nend of the time series data if set to None.\nReturns:\ntuple of :class:`numpy.array`: energy balance and time arrays.", "source": "codesearchnet"}
{"code": "def profile_name_scope(self, options):\n    opts = _build_options(options)\n    tfprof_node = tfprof_output_pb2.GraphNodeProto()\n    try:\n        tfprof_node.ParseFromString(print_mdl.Profile('scope'.encode('utf-8'), opts.SerializeToString()))\n    except message.DecodeError as e:\n        sys.stderr.write('Cannot parse returned proto: %s.\\n' % e)\n    return tfprof_node", "docstring": "Profile the statistics of graph nodes, organized by name scope.\n\nArgs:\noptions: A dict of options. See core/profiler/g3doc/options.md.\n\nReturns:\na GraphNodeProto that records the results.", "source": "github-repos"}
{"code": "def from_text_vision_configs(cls, text_config: Pix2StructTextConfig, vision_config: Pix2StructVisionConfig, **kwargs):\n    return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`Pix2StructConfig`] (or a derived class) from pix2struct text model configuration and pix2struct\nvision model configuration.\n\nReturns:\n[`Pix2StructConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def tokenize(self, text):\n    if self.normalize_text:\n        text = unicodedata.normalize('NFKC', text)\n    output_tokens = []\n    for char in text:\n        if char not in self.vocab:\n            output_tokens.append(self.unk_token)\n            continue\n        output_tokens.append(char)\n    return output_tokens", "docstring": "Tokenizes a piece of text into characters.\n\nFor example, `input = \"apple\"\"` will return as output `[\"a\", \"p\", \"p\", \"l\", \"e\"]`.\n\nArgs:\ntext: A single token or whitespace separated tokens.\nThis should have already been passed through *BasicTokenizer*.\n\nReturns:\nA list of characters.", "source": "github-repos"}
{"code": "def list_vmss_skus(access_token, subscription_id, resource_group, vmss_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/skus', '?api-version=', COMP_API])\n    return do_get_next(endpoint, access_token)", "docstring": "List the VM skus available for a VM Scale Set.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvmss_name (str): Name of the virtual machine scale set.\n\nReturns:\nHTTP response. JSON body of VM skus.", "source": "codesearchnet"}
{"code": "def encode(self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy, None]=None, max_length: Optional[int]=None, stride: int=0, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> List[int]:\n    encoded_inputs = self.encode_plus(text, text_pair=text_pair, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, padding_side=padding_side, return_tensors=return_tensors, **kwargs)\n    return encoded_inputs['input_ids']", "docstring": "Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.\n\nSame as doing `self.convert_tokens_to_ids(self.tokenize(text))`.\n\nArgs:\ntext (`str`, `List[str]` or `List[int]`):\nThe first sequence to be encoded. This can be a string, a list of strings (tokenized string using the\n`tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`\nmethod).\ntext_pair (`str`, `List[str]` or `List[int]`, *optional*):\nOptional second sequence to be encoded. This can be a string, a list of strings (tokenized string using\nthe `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`\nmethod).", "source": "github-repos"}
{"code": "def _request_reports(self, resource_param_name, resources, endpoint_name):\n        \n        params = [{resource_param_name: resource, 'apikey': self._api_key} for resource in resources]\n        return self._requests.multi_get(self.BASE_DOMAIN + endpoint_name, query_params=params)", "docstring": "Sends multiples requests for the resources to a particular endpoint.\n\nArgs:\nresource_param_name: a string name of the resource parameter.\nresources: list of of the resources.\nendpoint_name: VirusTotal endpoint URL suffix.\nReturns:\nA list of the responses.", "source": "juraj-google-style"}
{"code": "def get_members(self, name):\n    grpid = re.search('(\\\\d+)', name).group()\n    command = ('show port-channel %s all-ports' % grpid)\n    config = self.node.enable(command, 'text')\n    return re.findall('\\\\b(?!Peer)Ethernet[\\\\d/]*\\\\b', config[0]['result']['output'])", "docstring": "Returns the member interfaces for the specified Port-Channel\n\nArgs:\nname(str): The Port-channel interface name to return the member\ninterfaces for\n\nReturns:\nA list of physical interface names that belong to the specified\ninterface", "source": "codesearchnet"}
{"code": "def parse(self, arguments):\n    \n    if not isinstance(arguments, list):\n      \n      \n      \n      arguments = [arguments]\n\n    if self.present:\n      \n      values = self.value\n    else:\n      \n      values = []\n\n    for item in arguments:\n      \n      Flag.Parse(self, item)  \n      values.append(self.value)\n\n    \n    self.value = values", "docstring": "Parses one or more arguments with the installed parser.\n\nArgs:\narguments: a single argument or a list of arguments (typically a\nlist of default values); a single argument is converted\ninternally into a list containing one item.", "source": "juraj-google-style"}
{"code": "def _get_config():\n    conf_file = os.path.join(_get_config_dir(), 'log_config.toml')\n    if os.path.exists(conf_file):\n        with open(conf_file) as fd:\n            raw_config = fd.read()\n        log_config = toml.loads(raw_config)\n        return log_config\n    conf_file = os.path.join(_get_config_dir(), 'log_config.yaml')\n    if os.path.exists(conf_file):\n        with open(conf_file) as fd:\n            raw_config = fd.read()\n        log_config = yaml.safe_load(raw_config)\n        return log_config\n    return None", "docstring": "Determines if there is a log config in the config directory\nand returns it. If it does not exist, return None.\n\nReturns:\nlog_config (dict): The dictionary to pass to logging.config.dictConfig", "source": "codesearchnet"}
{"code": "def _add_node(self, node):\n        \n        node_id = len(self.node_list)\n        self.node_to_id[node] = node_id\n        self.node_list.append(node)\n        self.adj_list[node_id] = []\n        self.reverse_adj_list[node_id] = []\n        return node_id", "docstring": "Add a new node to node_list and give the node an ID.\nArgs:\nnode: An instance of Node.\nReturns:\nnode_id: An integer.", "source": "juraj-google-style"}
{"code": "def _GetComparable(self, sub_comparable_string=''):\n    string_parts = []\n    string_parts.append(getattr(self.parent, 'comparable', ''))\n    string_parts.append('type: {0:s}'.format(self.type_indicator))\n    if sub_comparable_string:\n        string_parts.append(', {0:s}'.format(sub_comparable_string))\n    string_parts.append('\\n')\n    return ''.join(string_parts)", "docstring": "Retrieves the comparable representation.\n\nThis is a convenience function for constructing comparables.\n\nArgs:\nsub_comparable_string (str): sub comparable string.\n\nReturns:\nstr: comparable representation of the path specification.", "source": "codesearchnet"}
{"code": "def extract_labels(self, f, one_hot=False, num_classes=10):\n        \n        print('Extracting', f.name)\n        with gzip.GzipFile(fileobj=f) as bytestream:\n            magic = self._read32(bytestream)\n            if magic != 2049:\n                raise ValueError('Invalid magic number %d in MNIST label file: %s' %\n                                 (magic, f.name))\n            num_items = self._read32(bytestream)\n            buf = bytestream.read(num_items)\n            labels = np.frombuffer(buf, dtype=np.uint8)\n            if one_hot:\n                return self.dense_to_one_hot(labels, num_classes)\n            return labels", "docstring": "Extract the labels into a 1D uint8 numpy array [index].\nArgs:\nf: A file object that can be passed into a gzip reader.\none_hot: Does one hot encoding for the result.\nnum_classes: Number of classes for the one hot encoding.\nReturns:\nlabels: a 1D unit8 numpy array.\nRaises:\nValueError: If the bystream doesn't start with 2049.", "source": "juraj-google-style"}
{"code": "def extract_simple_optional_location_info(ir_blocks, complex_optional_roots, location_to_optional_roots):\n    location_to_preceding_optional_root_iteritems = six.iteritems({location: optional_root_locations_stack[(- 1)] for (location, optional_root_locations_stack) in six.iteritems(location_to_optional_roots)})\n    simple_optional_root_to_inner_location = {optional_root_location: inner_location for (inner_location, optional_root_location) in location_to_preceding_optional_root_iteritems if (optional_root_location not in complex_optional_roots)}\n    simple_optional_root_locations = set(simple_optional_root_to_inner_location.keys())\n    (_, non_folded_ir_blocks) = extract_folds_from_ir_blocks(ir_blocks)\n    simple_optional_root_info = {}\n    preceding_location = None\n    for current_block in non_folded_ir_blocks:\n        if isinstance(current_block, MarkLocation):\n            preceding_location = current_block.location\n        elif (isinstance(current_block, Traverse) and current_block.optional):\n            if (preceding_location in simple_optional_root_locations):\n                inner_location = simple_optional_root_to_inner_location[preceding_location]\n                (inner_location_name, _) = inner_location.get_location_name()\n                simple_optional_info_dict = {'inner_location_name': inner_location_name, 'edge_field': current_block.get_field_name()}\n                simple_optional_root_info[preceding_location] = simple_optional_info_dict\n    return simple_optional_root_info", "docstring": "Construct a map from simple optional locations to their inner location and traversed edge.\n\nArgs:\nir_blocks: list of IR blocks to extract optional data from\ncomplex_optional_roots: list of @optional locations (location immmediately preceding\nan @optional traverse) that expand vertex fields\nlocation_to_optional_roots: dict mapping from location -> optional_roots where location is\nwithin some number of @optionals and optional_roots is a list\nof optional root locations preceding the successive @optional\nscopes within which the location resides\n\nReturns:\ndict mapping from simple_optional_root_location -> dict containing keys\n- 'inner_location_name': Location object correspoding to the unique MarkLocation present\nwithin a simple optional (one that does not expand vertex fields)\nscope\n- 'edge_field': string representing the optional edge being traversed\nwhere simple_optional_root_to_inner_location is the location preceding the @optional scope", "source": "codesearchnet"}
{"code": "def _configure_tls_parameters(parameters):\n    cert = config.conf['tls']['certfile']\n    key = config.conf['tls']['keyfile']\n    if (cert and key):\n        _log.info('Authenticating with server using x509 (certfile: %s, keyfile: %s)', cert, key)\n        parameters.credentials = pika.credentials.ExternalCredentials()\n    else:\n        (cert, key) = (None, None)\n    if (SSLOptions is None):\n        parameters.ssl = True\n        parameters.ssl_options = {'keyfile': key, 'certfile': cert, 'ca_certs': config.conf['tls']['ca_cert'], 'cert_reqs': ssl.CERT_REQUIRED, 'ssl_version': ssl.PROTOCOL_TLSv1_2}\n    else:\n        ssl_context = ssl.create_default_context()\n        if config.conf['tls']['ca_cert']:\n            try:\n                ssl_context.load_verify_locations(cafile=config.conf['tls']['ca_cert'])\n            except ssl.SSLError as e:\n                raise ConfigurationException('The \"ca_cert\" setting in the \"tls\" section is invalid ({})'.format(e))\n        ssl_context.options |= ssl.OP_NO_SSLv2\n        ssl_context.options |= ssl.OP_NO_SSLv3\n        ssl_context.options |= ssl.OP_NO_TLSv1\n        ssl_context.options |= ssl.OP_NO_TLSv1_1\n        ssl_context.verify_mode = ssl.CERT_REQUIRED\n        ssl_context.check_hostname = True\n        if (cert and key):\n            try:\n                ssl_context.load_cert_chain(cert, key)\n            except ssl.SSLError as e:\n                raise ConfigurationException('The \"keyfile\" setting in the \"tls\" section is invalid ({})'.format(e))\n        parameters.ssl_options = SSLOptions(ssl_context, server_hostname=parameters.host)", "docstring": "Configure the pika connection parameters for TLS based on the configuration.\n\nThis modifies the object provided to it. This accounts for whether or not\nthe new API based on the standard library's SSLContext is available for\npika.\n\nArgs:\nparameters (pika.ConnectionParameters): The connection parameters to apply\nTLS connection settings to.", "source": "codesearchnet"}
{"code": "def cancel(self, subscription_id, data={}, **kwargs):\n        \n        url = \"{}/{}/cancel\".format(self.base_url, subscription_id)\n        return self.post_url(url, data, **kwargs)", "docstring": "Cancel subscription given by subscription_id\n\nArgs:\nsubscription_id : Id for which subscription has to be cancelled\n\nReturns:\nSubscription Dict for given subscription id", "source": "juraj-google-style"}
{"code": "def save_to_object(self):\n    tmpdir = tempfile.mkdtemp('save_to_object', dir=self.logdir)\n    checkpoint_prefix = self.save(tmpdir)\n    data = {}\n    base_dir = os.path.dirname(checkpoint_prefix)\n    for path in os.listdir(base_dir):\n        path = os.path.join(base_dir, path)\n        if path.startswith(checkpoint_prefix):\n            with open(path, 'rb') as f:\n                data[os.path.basename(path)] = f.read()\n    out = io.BytesIO()\n    data_dict = pickle.dumps({'checkpoint_name': os.path.basename(checkpoint_prefix), 'data': data})\n    if (len(data_dict) > 10000000.0):\n        logger.info('Checkpoint size is {} bytes'.format(len(data_dict)))\n    out.write(data_dict)\n    shutil.rmtree(tmpdir)\n    return out.getvalue()", "docstring": "Saves the current model state to a Python object. It also\nsaves to disk but does not return the checkpoint path.\n\nReturns:\nObject holding checkpoint data.", "source": "codesearchnet"}
{"code": "def fix_reference_url(url):\n    new_url = url\n    new_url = fix_url_bars_instead_of_slashes(new_url)\n    new_url = fix_url_add_http_if_missing(new_url)\n    new_url = fix_url_replace_tilde(new_url)\n    try:\n        rfc3987.parse(new_url, rule='URI')\n        return new_url\n    except ValueError:\n        return url", "docstring": "Used to parse an incorect url to try to fix it with the most common ocurrences for errors.\nIf the fixed url is still incorrect, it returns ``None``.\n\nReturns:\nString containing the fixed url or the original one if it could not be fixed.", "source": "codesearchnet"}
{"code": "def validate_slicing_string(slicing_string):\n    return bool(re.search('^\\\\[(\\\\d|,|\\\\s|:)+\\\\]$', slicing_string))", "docstring": "Validate a slicing string.\n\nCheck if the input string contains only brackets, digits, commas and\ncolons that are valid characters in numpy-style array slicing.\n\nArgs:\nslicing_string: (str) Input slicing string to be validated.\n\nReturns:\n(bool) True if and only if the slicing string is valid.", "source": "github-repos"}
{"code": "async def set(self, name, valu, init=False):\n    with s_editatom.EditAtom(self.snap.core.bldgbuids) as editatom:\n        retn = (await self._setops(name, valu, editatom, init))\n        if (not retn):\n            return False\n        (await editatom.commit(self.snap))\n        return True", "docstring": "Set a property on the node.\n\nArgs:\nname (str): The name of the property.\nvalu (obj): The value of the property.\ninit (bool): Set to True to disable read-only enforcement\n\nReturns:\n(bool): True if the property was changed.", "source": "codesearchnet"}
{"code": "def compute_bleu(reference_corpus, translation_corpus, max_order=4, smooth=False):\n    matches_by_order = ([0] * max_order)\n    possible_matches_by_order = ([0] * max_order)\n    reference_length = 0\n    translation_length = 0\n    for (references, translation) in zip(reference_corpus, translation_corpus):\n        reference_length += min((len(r) for r in references))\n        translation_length += len(translation)\n        merged_ref_ngram_counts = collections.Counter()\n        for reference in references:\n            merged_ref_ngram_counts |= _get_ngrams(reference, max_order)\n        translation_ngram_counts = _get_ngrams(translation, max_order)\n        overlap = (translation_ngram_counts & merged_ref_ngram_counts)\n        for ngram in overlap:\n            matches_by_order[(len(ngram) - 1)] += overlap[ngram]\n        for order in range(1, (max_order + 1)):\n            possible_matches = ((len(translation) - order) + 1)\n            if (possible_matches > 0):\n                possible_matches_by_order[(order - 1)] += possible_matches\n    precisions = ([0] * max_order)\n    for i in range(0, max_order):\n        if smooth:\n            precisions[i] = ((matches_by_order[i] + 1.0) / (possible_matches_by_order[i] + 1.0))\n        elif (possible_matches_by_order[i] > 0):\n            precisions[i] = (float(matches_by_order[i]) / possible_matches_by_order[i])\n        else:\n            precisions[i] = 0.0\n    if (min(precisions) > 0):\n        p_log_sum = sum((((1.0 / max_order) * math.log(p)) for p in precisions))\n        geo_mean = math.exp(p_log_sum)\n    else:\n        geo_mean = 0\n    ratio = (float(translation_length) / reference_length)\n    if (ratio > 1.0):\n        bp = 1.0\n    else:\n        bp = math.exp((1 - (1.0 / ratio)))\n    bleu = (geo_mean * bp)\n    return (bleu, precisions, bp, ratio, translation_length, reference_length)", "docstring": "Computes BLEU score of translated segments against one or more references.\n\nArgs:\nreference_corpus: list of lists of references for each translation. Each\nreference should be tokenized into a list of tokens.\ntranslation_corpus: list of translations to score. Each translation\nshould be tokenized into a list of tokens.\nmax_order: Maximum n-gram order to use when computing BLEU score.\nsmooth: Whether or not to apply Lin et al. 2004 smoothing.\n\nReturns:\n3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram\nprecisions and brevity penalty.", "source": "codesearchnet"}
{"code": "def CheckFile(self, filename):\n    result = True\n    artifact_reader = reader.YamlArtifactsReader()\n    try:\n        for artifact_definition in artifact_reader.ReadFile(filename):\n            try:\n                self._artifact_registry.RegisterDefinition(artifact_definition)\n            except KeyError:\n                logging.warning('Duplicate artifact definition: {0:s} in file: {1:s}'.format(artifact_definition.name, filename))\n                result = False\n            artifact_definition_supports_macos = (definitions.SUPPORTED_OS_DARWIN in artifact_definition.supported_os)\n            artifact_definition_supports_windows = (definitions.SUPPORTED_OS_WINDOWS in artifact_definition.supported_os)\n            for source in artifact_definition.sources:\n                if (source.type_indicator in (definitions.TYPE_INDICATOR_FILE, definitions.TYPE_INDICATOR_PATH)):\n                    if ((definitions.SUPPORTED_OS_DARWIN in source.supported_os) or (artifact_definition_supports_macos and (not source.supported_os))):\n                        if (not self._CheckMacOSPaths(filename, artifact_definition, source, source.paths)):\n                            result = False\n                    elif (artifact_definition_supports_windows or (definitions.SUPPORTED_OS_WINDOWS in source.supported_os)):\n                        for path in source.paths:\n                            if (not self._CheckWindowsPath(filename, artifact_definition, source, path)):\n                                result = False\n                elif (source.type_indicator == definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY):\n                    if ((filename != self.LEGACY_PATH) and self._HasDuplicateRegistryKeyPaths(filename, artifact_definition, source)):\n                        result = False\n                    for key_path in source.keys:\n                        if (not self._CheckWindowsRegistryKeyPath(filename, artifact_definition, key_path)):\n                            result = False\n                elif (source.type_indicator == definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE):\n                    for key_value_pair in source.key_value_pairs:\n                        if (not self._CheckWindowsRegistryKeyPath(filename, artifact_definition, key_value_pair['key'])):\n                            result = False\n    except errors.FormatError as exception:\n        logging.warning('Unable to validate file: {0:s} with error: {1!s}'.format(filename, exception))\n        result = False\n    return result", "docstring": "Validates the artifacts definition in a specific file.\n\nArgs:\nfilename (str): name of the artifacts definition file.\n\nReturns:\nbool: True if the file contains valid artifacts definitions.", "source": "codesearchnet"}
{"code": "def to_tensor_4x4(self) -> torch.Tensor:\n    tensor = self._trans.new_zeros((*self.shape, 4, 4))\n    tensor[..., :3, :3] = self._rots.get_rot_mats()\n    tensor[..., :3, 3] = self._trans\n    tensor[..., 3, 3] = 1\n    return tensor", "docstring": "Converts a transformation to a homogeneous transformation tensor.\n\nReturns:\nA [*, 4, 4] homogeneous transformation tensor", "source": "github-repos"}
{"code": "def _validate_snapshot(path: str, metadata: snapshot_pb2.DistributedSnapshotMetadata, element_spec: Any, compression: str) -> None:\n    error_file = _pywrap_snapshot_utils.TF_DATA_SnapshotErrorFilePath(path)\n    if gfile.Exists(error_file):\n        with gfile.GFile(error_file, 'r') as f:\n            raise ValueError(f'Failed to load tf.data snapshot at {path}. The save job failed to write it. Status: {f.read()}')\n    snapshot_element_spec = _parse_element_spec(metadata.element_spec)\n    if element_spec and element_spec != snapshot_element_spec:\n        raise ValueError(f'Failed to load tf.data snapshot at {path}. User specified element_spec {element_spec}, but the actual element_spec is {snapshot_element_spec}.')\n    if compression and compression != metadata.compression:\n        raise ValueError(f'Failed to load tf.data snapshot at {path}. User specified compression {compression}, but the actual compression is {metadata.compression}.')", "docstring": "Validates a tf.data distributed snapshot.\n\nArgs:\npath: Root path of the distributed snapshot.\nmetadata: The DistributedSnapshotMetadata of the snapshot.\nelement_spec: Dataset element_spec.\ncompression: Compression method used for saving.\n\nRaises:\nValueError if the snapshot is invalid.", "source": "github-repos"}
{"code": "def get_ordered_params(url):\n        \n\n        if url not in URLHelper.__cache:\n            URLHelper.__cache[url] = urlparse(url)\n\n        params = URLHelper.query_string_to_dict(URLHelper.__cache[url].query)\n\n        return OrderedDict(sorted(params.items()))", "docstring": "Get the query parameters of the given URL in alphabetical order.\n\nArgs:\nurl (str): The URL to get the query parameters from.\n\nReturns:\nstr: The query parameters", "source": "juraj-google-style"}
{"code": "def generate_dequeue_op(self, tpu_device=0):\n    self.freeze()\n    if self._generated_dequeue_op and (not ops.inside_function()):\n        raise ValueError(\"Can't generate two dequeue Ops from the same queue\")\n    self._generated_dequeue_op = True\n    full_name = '%s/dequeue' % self._name\n    sharded_shapes = [policy.get_sharded_shape(shape) for shape, policy in zip(self._tuple_shapes, self._sharding_policies)]\n    with ops.device(tpu_name_util.core(tpu_device)):\n        values = tpu_ops.infeed_dequeue_tuple(dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)\n    return tag_sharding_attribute_for_dequeued_tensors(values, self._input_partition_dims)", "docstring": "Generate TPU dequeue ops.\n\nArgs:\ntpu_device: The TPU device ordinal where the infeed instruction should be\nplaced.\n\nReturns:\nA list of Outputs corresponding to a partition of infeed dequeued\ninto XLA, suitable for use within a replicated block.\n\nRaises:\nValueError: if the types or shapes of the tuple elements have not been\nset; or if a dequeue op has already been generated.", "source": "github-repos"}
{"code": "def jsonRender(self, def_buf):\n        \n        try:\n            ret_dict = SerialBlock()\n            ret_dict[Field.Meter_Address] = self.getMeterAddress()\n            for fld in def_buf:\n                compare_fld = fld.upper()\n                if not \"RESERVED\" in compare_fld and not \"CRC\" in compare_fld:\n                    ret_dict[str(fld)] = def_buf[fld][MeterData.StringValue]\n        except:\n            ekm_log(traceback.format_exc(sys.exc_info()))\n            return \"\"\n        return json.dumps(ret_dict, indent=4)", "docstring": "Translate the passed serial block into string only JSON.\n\nArgs:\ndef_buf (SerialBlock): Any :class:`~ekmmeters.SerialBlock` object.\n\nReturns:\nstr: JSON rendering of meter record.", "source": "juraj-google-style"}
{"code": "def design_stat_cooling(self, value=\"Cooling\"):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type str '\n                    'for field `design_stat_cooling`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `design_stat_cooling`')\n            vals = set()\n            vals.add(\"Cooling\")\n            if value not in vals:\n                raise ValueError('value {} is not an accepted value for '\n                                 'field `design_stat_cooling`'.format(value))\n\n        self._design_stat_cooling = value", "docstring": "Corresponds to IDD Field `design_stat_cooling`\n\nArgs:\nvalue (str): value for IDD Field `design_stat_cooling`\nAccepted values are:\n- Cooling\nDefault value: Cooling\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def change_numbering(self, rename_dict, inplace=False):\n    output = (self if inplace else self.copy())\n    new_index = [rename_dict.get(key, key) for key in self.index]\n    output.index = new_index\n    if (not inplace):\n        return output", "docstring": "Return the reindexed version of Cartesian.\n\nArgs:\nrename_dict (dict): A dictionary mapping integers on integers.\n\nReturns:\nCartesian: A renamed copy according to the dictionary passed.", "source": "codesearchnet"}
{"code": "def getall(self):\n    vlans_re = re.compile('(?<=^vlan\\\\s)(\\\\d+)', re.M)\n    response = dict()\n    for vid in vlans_re.findall(self.config):\n        response[vid] = self.get(vid)\n    return response", "docstring": "Returns a dict object of all Vlans in the running-config\n\nReturns:\nA dict object of Vlan attributes", "source": "codesearchnet"}
{"code": "def replace_batch_norm(model):\n    for name, module in model.named_children():\n        if isinstance(module, nn.BatchNorm2d):\n            new_module = RTDetrFrozenBatchNorm2d(module.num_features)\n            if not module.weight.device == torch.device('meta'):\n                new_module.weight.data.copy_(module.weight)\n                new_module.bias.data.copy_(module.bias)\n                new_module.running_mean.data.copy_(module.running_mean)\n                new_module.running_var.data.copy_(module.running_var)\n            model._modules[name] = new_module\n        if len(list(module.children())) > 0:\n            replace_batch_norm(module)", "docstring": "Recursively replace all `torch.nn.BatchNorm2d` with `RTDetrFrozenBatchNorm2d`.\n\nArgs:\nmodel (torch.nn.Module):\ninput model", "source": "github-repos"}
{"code": "def init_config_json(config_file):\n    json_data = None\n    try:\n        if os.path.exists(config_file):\n            with open(config_file) as json_file:\n                json_data = json.load(json_file)\n                return unicode_convert(json_data)\n        else:\n            return None\n    except:\n        (line, filename, synerror) = trace()\n        raise ArcRestHelperError({'function': 'init_config_json', 'line': line, 'filename': filename, 'synerror': synerror})\n    finally:\n        json_data = None\n        del json_data\n        gc.collect()", "docstring": "Deserializes a JSON configuration file.\n\nArgs:\nconfig_file (str): The path to the JSON file.\nReturns:\ndict: A dictionary object containing the JSON data. If ``config_file`` does not exist, returns ``None``.", "source": "codesearchnet"}
{"code": "def corpus_token_counts(text_filepattern, corpus_max_lines, split_on_newlines=True):\n    counts = collections.Counter()\n    for doc in _read_filepattern(text_filepattern, max_lines=corpus_max_lines, split_on_newlines=split_on_newlines):\n        counts.update(encode(_native_to_unicode(doc)))\n    mlperf_log.transformer_print(key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(counts))\n    return counts", "docstring": "Read the corpus and compute a dictionary of token counts.\n\nArgs:\ntext_filepattern: A pattern matching one or more files.\ncorpus_max_lines: An integer; maximum total lines to read.\nsplit_on_newlines: A boolean. If true, then split files by lines and strip\nleading and trailing whitespace from each line. Otherwise, treat each\nfile as a single string.\n\nReturns:\na dictionary mapping token to count.", "source": "codesearchnet"}
{"code": "def _add_pos_constrain(token_lst: List[Dict], pos_tags: List) -> List[Dict]:\n        \n\n        result = []\n        for a_token in token_lst:\n            for pos in pos_tags:\n                a_token[attrs.POS] = POS_MAP[pos]\n                result.append(copy.deepcopy(a_token))\n        return result", "docstring": "Add pos tag constrain for some token type, create cross production\nArgs:\ntoken_lst: List[Dict]\npos_tags: List\n\nReturns: List[Dict]", "source": "juraj-google-style"}
{"code": "def popular(self, **kwargs):\n        \n        path = self._get_path('popular')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the list of popular movies on The Movie Database. This list\nrefreshes every day.\n\nArgs:\npage: (optional) Minimum value of 1.  Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def json_to_params(fn=None, return_json=True):\n    \n    def json_to_params_decorator(fn):\n        @handle_type_error\n        @wraps(fn)\n        def json_to_params_wrapper(*args, **kwargs):\n            data = decode_json_body()\n\n            if type(data) in [tuple, list]:\n                args = list(args) + data\n            elif type(data) == dict:\n                \n                allowed_keys = set(data.keys()) - set(kwargs.keys())\n                for key in allowed_keys:\n                    kwargs[key] = data[key]\n            elif type(data) in PRIMITIVE_TYPES:\n                args = list(args)\n                args.append(data)\n\n            if not return_json:\n                return fn(*args, **kwargs)\n\n            return encode_json_body(\n                fn(*args, **kwargs)\n            )\n\n        return json_to_params_wrapper\n\n    if fn:  \n        return json_to_params_decorator(fn)\n\n    return json_to_params_decorator", "docstring": "Convert JSON in the body of the request to the parameters for the wrapped\nfunction.\n\nIf the JSON is list, add it to ``*args``.\n\nIf dict, add it to ``**kwargs`` in non-rewrite mode (no key in ``**kwargs``\nwill be overwritten).\n\nIf single value, add it to ``*args``.\n\nArgs:\nreturn_json (bool, default True): Should the decorator automatically\nconvert returned value to JSON?", "source": "juraj-google-style"}
{"code": "def is_coord_subset(subset, superset, atol=1e-08):\n    c1 = np.array(subset)\n    c2 = np.array(superset)\n    is_close = np.all((np.abs((c1[(:, None, :)] - c2[(None, :, :)])) < atol), axis=(- 1))\n    any_close = np.any(is_close, axis=(- 1))\n    return np.all(any_close)", "docstring": "Tests if all coords in subset are contained in superset.\nDoesn't use periodic boundary conditions\n\nArgs:\nsubset, superset: List of coords\n\nReturns:\nTrue if all of subset is in superset.", "source": "codesearchnet"}
{"code": "def decode(self, encoded):\n    if self.enforce_reversible:\n        self.enforce_reversible = False\n        if (self.encode(self.decode(encoded)) != encoded):\n            raise ValueError(('Decoding is not reversible for \"%s\"' % encoded))\n        self.enforce_reversible = True\n    return encoded", "docstring": "Decodes an object.\n\nArgs:\nobject_ (object): Encoded object.\n\nReturns:\nobject: Object decoded.", "source": "codesearchnet"}
{"code": "def __init__(self, flags: Optional[Sequence[str]]=None, **kwargs) -> None:\n    logging.basicConfig()\n    if isinstance(flags, str):\n        raise ValueError('Flags must be an iterable of of strings, not a single string.')\n    self._flags = flags\n    parser = _BeamArgumentParser(allow_abbrev=False)\n    for cls in type(self).mro():\n        if cls == PipelineOptions:\n            break\n        elif '_add_argparse_args' in cls.__dict__:\n            cls._add_argparse_args(parser)\n    self._visible_options, _ = parser.parse_known_args(flags)\n    self._all_options = kwargs\n    for option_name in self._visible_option_list():\n        if option_name not in self._all_options:\n            self._all_options[option_name] = getattr(self._visible_options, option_name)", "docstring": "Initialize an options class.\n\nThe initializer will traverse all subclasses, add all their argparse\narguments and then parse the command line specified by flags or by default\nthe one obtained from sys.argv.\n\nThe subclasses of PipelineOptions do not need to redefine __init__.\n\nArgs:\nflags: An iterable of command line arguments to be used. If not specified\nthen sys.argv will be used as input for parsing arguments.\n\n**kwargs: Add overrides for arguments passed in flags. For overrides\nof arguments, please pass the `option names` instead of\nflag names.\nOption names: These are defined as dest in the\nparser.add_argument() for each flag. Passing flags\nlike {no_use_public_ips: True}, for which the dest is\ndefined to a different flag name in the parser,\nwould be discarded. Instead, pass the dest of\nthe flag (dest of no_use_public_ips is use_public_ips).", "source": "github-repos"}
{"code": "def check_email_exists_by_subject(self, subject, match_recipient=None):\n    self._mail.select('inbox')\n    try:\n        matches = self.__search_email_by_subject(subject, match_recipient)\n        if (len(matches) <= 0):\n            return False\n        else:\n            return True\n    except Exception as e:\n        raise e", "docstring": "Searches for Email by Subject.  Returns True or False.\n\nArgs:\nsubject (str): Subject to search for.\n\nKwargs:\nmatch_recipient (str) : Recipient to match exactly. (don't care if not specified)\n\nReturns:\nTrue - email found, False - email not found", "source": "codesearchnet"}
{"code": "def get_all(cls, keyvals, key='id', user_id=None):\n    if (len(keyvals) == 0):\n        return []\n    original_keyvals = keyvals\n    keyvals_set = list(set(keyvals))\n    resultset = cls.query.filter(getattr(cls, key).in_(keyvals_set))\n    key_result_mapping = {getattr(result, key): result for result in resultset.all()}\n    return [key_result_mapping.get(kv) for kv in original_keyvals]", "docstring": "Works like a map function from keyvals to instances.\n\n\nArgs:\n\nkeyvals(list):  The list of values of the attribute.\n\nkey (str, optional): The attribute to search by. By default, it is\n'id'.\n\n\nReturns:\n\nlist: A list of model instances, in the same order as the list of\nkeyvals.\n\n\nExamples:\n\n\n>>> User.get_all([2,5,7, 8000, 11])\nuser2@i.com, user5@i.com, user7@i.com, None, user11@i.com\n\n>>> User.get_all(['user35@i.com', 'user5@i.com'], key='email')\nuser35@i.com, user5@i.com", "source": "codesearchnet"}
{"code": "def mean_area_distance(item_a, item_b, max_value):\n    \n    mean_area_a = np.mean([item_a.size(t) for t in item_a.times])\n    mean_area_b = np.mean([item_b.size(t) for t in item_b.times])\n    return np.abs(mean_area_a - mean_area_b) / float(max_value)", "docstring": "Absolute difference in the means of the areas of each track over time.\n\nArgs:\nitem_a: STObject from the first set in TrackMatcher\nitem_b: STObject from the second set in TrackMatcher\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "juraj-google-style"}
{"code": "def raw_decrypt(self, ciphertext):\n        \n        if not isinstance(ciphertext, int):\n            raise TypeError('Expected ciphertext to be an int, not: %s' %\n                type(ciphertext))\n\n        decrypt_to_p = self.l_function(powmod(ciphertext, self.p-1, self.psquare), self.p) * self.hp % self.p\n        decrypt_to_q = self.l_function(powmod(ciphertext, self.q-1, self.qsquare), self.q) * self.hq % self.q\n        return self.crt(decrypt_to_p, decrypt_to_q)", "docstring": "Decrypt raw ciphertext and return raw plaintext.\n\nArgs:\nciphertext (int): (usually from :meth:`EncryptedNumber.ciphertext()`)\nthat is to be Paillier decrypted.\n\nReturns:\nint: Paillier decryption of ciphertext. This is a positive\ninteger < :attr:`public_key.n`.\n\nRaises:\nTypeError: if ciphertext is not an int.", "source": "juraj-google-style"}
{"code": "def capture_update_from_model(cls, table_name, record_id, *, update_fields=()):\n    include_cols = ()\n    if update_fields:\n        model_cls = get_connected_model_for_table_name(table_name)\n        include_cols = cls._fieldnames_to_colnames(model_cls, update_fields)\n    raw_query = sql.SQL('\\n            SELECT {schema}.hc_capture_update_from_row(\\n              hstore({schema}.{table_name}.*),\\n              %(table_name)s,\\n              ARRAY[{include_cols}]::text[]  -- cast to type expected by stored procedure\\n            ) AS id\\n            FROM {schema}.{table_name}\\n            WHERE id = %(record_id)s\\n        ').format(schema=sql.Identifier(settings.HEROKU_CONNECT_SCHEMA), table_name=sql.Identifier(table_name), include_cols=sql.SQL(', ').join((sql.Identifier(col) for col in include_cols)))\n    params = {'record_id': record_id, 'table_name': table_name}\n    result_qs = TriggerLog.objects.raw(raw_query, params)\n    return list(result_qs)", "docstring": "Create a fresh update record from the current model state in the database.\n\nFor read-write connected models, this will lead to the attempted update of the values of\na corresponding object in Salesforce.\n\nArgs:\ntable_name (str): The name of the table backing the connected model (without schema)\nrecord_id (int): The primary id of the connected model\nupdate_fields (Iterable[str]): If given, the names of fields that will be included in\nthe write record\n\nReturns:\nA list of the created TriggerLog entries (usually one).\n\nRaises:\nLookupError: if ``table_name`` does not belong to a connected model", "source": "codesearchnet"}
{"code": "def _copy(self, filename, destination):   \n        \n        full_filename = os.path.abspath(os.path.expanduser(filename))\n\n        if os.path.isdir(full_filename):\n            shutil.copytree(full_filename, destination)\n        elif os.path.isfile(full_filename):\n            shutil.copyfile(full_filename, destination)", "docstring": "Copy a file or folder to the repository.\n\nWill mount if needed.\n\nArgs:\nfilename: Path to copy.\ndestination: Remote path to copy file to.", "source": "juraj-google-style"}
{"code": "def load_spacy_rule(file_path: str) -> Dict:\n    with open(file_path) as fp:\n        return json.load(fp)", "docstring": "A spacy rule file is a json file.\n\nArgs:\nfile_path (str): path to a text file containing a spacy rule sets.\n\nReturns: Dict as the representation of spacy rules", "source": "codesearchnet"}
{"code": "def make_connection(self, bind_user=None, bind_password=None, **kwargs):\n    return self._make_connection(bind_user, bind_password, contextualise=False, **kwargs)", "docstring": "Make a connection to the LDAP Directory.\n\nArgs:\nbind_user (str): User to bind with. If `None`, AUTH_ANONYMOUS is\nused, otherwise authentication specified with\nconfig['LDAP_BIND_AUTHENTICATION_TYPE'] is used.\nbind_password (str): Password to bind to the directory with\n**kwargs (dict): Additional arguments to pass to the\n``ldap3.Connection``\n\nReturns:\nldap3.Connection: An unbound ldap3.Connection. You should handle exceptions\nupon bind if you use this internal method.", "source": "codesearchnet"}
{"code": "def run_repair_pdb(self, silent=False, force_rerun=False):\n    foldx_repair_pdb = 'foldx --command=RepairPDB --pdb={}'.format(self.pdb_file)\n    foldx_repair_outfile = '{}_Repair.pdb'.format(op.splitext(self.pdb_file)[0])\n    ssbio.utils.command_runner(shell_command=foldx_repair_pdb, force_rerun_flag=force_rerun, silent=silent, outfile_checker=foldx_repair_outfile, cwd=self.foldx_dir)\n    self.repaired_pdb_outfile = foldx_repair_outfile", "docstring": "Run FoldX RepairPDB on this PDB file.\n\nOriginal command::\n\nfoldx --command=RepairPDB --pdb=4bxi.pdb\n\nArgs:\nsilent (bool): If FoldX output should be silenced from printing to the shell.\nforce_rerun (bool): If FoldX RepairPDB should be rerun even if a repaired file exists.", "source": "codesearchnet"}
{"code": "def CreateStorageReaderForFile(cls, path):\n    \n    if sqlite_file.SQLiteStorageFile.CheckSupportedFormat(\n        path, check_readable_only=True):\n      return sqlite_reader.SQLiteStorageFileReader(path)\n\n    return None", "docstring": "Creates a storage reader based on the file.\n\nArgs:\npath (str): path to the storage file.\n\nReturns:\nStorageReader: a storage reader or None if the storage file cannot be\nopened or the storage format is not supported.", "source": "juraj-google-style"}
{"code": "def timestamp(self):\n    return self._timestamp", "docstring": "Timestamp of when this tensor value was dumped.\n\nReturns:\n(`int`) The timestamp in microseconds.", "source": "github-repos"}
{"code": "def add_untagged_ok(self, text: MaybeBytes, code: Optional[ResponseCode]=None) -> None:\n    response = ResponseOk(b'*', text, code)\n    self.add_untagged(response)", "docstring": "Add an untagged ``OK`` response.\n\nSee Also:\n:meth:`.add_untagged`, :class:`ResponseOk`\n\nArgs:\ntext: The response text.\ncode: Optional response code.", "source": "codesearchnet"}
{"code": "def get(self, recipe_id):\n    self.logger.debug(('Retrieving recipe by id: ' + recipe_id))\n    url = ('%(base_url)s/recipe/%(recipe_id)s' % {'base_url': self.base_url, 'recipe_id': recipe_id})\n    r = self.gbdx_connection.get(url)\n    r.raise_for_status()\n    return r.json()", "docstring": "Retrieves an AnswerFactory Recipe by id\n\nArgs:\nrecipe_id The id of the recipe\n\nReturns:\nA JSON representation of the recipe", "source": "codesearchnet"}
{"code": "async def send_code_request(self, phone, *, force_sms=False):\n    phone = (utils.parse_phone(phone) or self._phone)\n    phone_hash = self._phone_code_hash.get(phone)\n    if (not phone_hash):\n        try:\n            result = (await self(functions.auth.SendCodeRequest(phone, self.api_id, self.api_hash, types.CodeSettings())))\n        except errors.AuthRestartError:\n            return self.send_code_request(phone, force_sms=force_sms)\n        self._tos = result.terms_of_service\n        self._phone_code_hash[phone] = phone_hash = result.phone_code_hash\n    else:\n        force_sms = True\n    self._phone = phone\n    if force_sms:\n        result = (await self(functions.auth.ResendCodeRequest(phone, phone_hash)))\n        self._phone_code_hash[phone] = result.phone_code_hash\n    return result", "docstring": "Sends a code request to the specified phone number.\n\nArgs:\nphone (`str` | `int`):\nThe phone to which the code will be sent.\n\nforce_sms (`bool`, optional):\nWhether to force sending as SMS.\n\nReturns:\nAn instance of :tl:`SentCode`.", "source": "codesearchnet"}
{"code": "def __init__(self, input_reader=None, output_writer=None):\n    \n    preferred_encoding = locale.getpreferredencoding()\n\n    if not input_reader:\n      input_reader = StdinInputReader(encoding=preferred_encoding)\n    if not output_writer:\n      output_writer = StdoutOutputWriter(encoding=preferred_encoding)\n\n    super(CLIVolumeScannerMediator, self).__init__()\n    self._encode_errors = 'strict'\n    self._input_reader = input_reader\n    self._output_writer = output_writer\n    self._preferred_encoding = locale.getpreferredencoding()\n    self._textwrapper = textwrap.TextWrapper()", "docstring": "Initializes a volume scanner mediator.\n\nArgs:\ninput_reader (Optional[CLIInputReader]): input reader, where None\nindicates that the stdin input reader should be used.\noutput_writer (Optional[CLIOutputWriter]): output writer, where None\nindicates that the stdout output writer should be used.", "source": "juraj-google-style"}
{"code": "def get_many(self, type: Type[T], query: Mapping[(str, Any)], streaming: bool=False) -> Iterable[T]:\n    LOGGER.info('Getting SourceHandlers for \"{type}\"'.format(type=type.__name__))\n    try:\n        handlers = self._get_types[type]\n    except KeyError:\n        try:\n            LOGGER.info('Building new SourceHandlers for \"{type}\"'.format(type=type.__name__))\n            handlers = self._get_handlers(type)\n        except NoConversionError:\n            handlers = None\n        self._get_types[type] = handlers\n    if (handlers is None):\n        raise NoConversionError('No source can provide \"{type}\"'.format(type=type.__name__))\n    LOGGER.info('Creating new PipelineContext')\n    context = self._new_context()\n    LOGGER.info('Querying SourceHandlers for \"{type}\"'.format(type=type.__name__))\n    for handler in handlers:\n        try:\n            return handler.get_many(query, context, streaming)\n        except NotFoundError:\n            pass\n    raise NotFoundError('No source returned a query result!')", "docstring": "Gets a query from the data pipeline, which contains a request for multiple objects.\n\n1) Extracts the query the sequence of data sources.\n2) Inserts the results into the data sinks (if appropriate).\n3) Transforms the results into the requested type if it wasn't already.\n4) Inserts the transformed result into any data sinks.\n\nArgs:\nquery: The query being requested (contains a request for multiple objects).\ncontext: The context for the extraction (mutable).\nstreaming: Specifies whether the results should be returned as a generator (default False).\n\nReturns:\nThe requested objects or a generator of the objects if streaming is True.", "source": "codesearchnet"}
{"code": "def set_hosts(hosts, use_ssl=False, ssl_cert_path=None):\n    if (type(hosts) != list):\n        hosts = [hosts]\n    conn_params = {'hosts': hosts, 'timeout': 20}\n    if use_ssl:\n        conn_params['use_ssl'] = True\n        if ssl_cert_path:\n            conn_params['verify_certs'] = True\n            conn_params['ca_certs'] = ssl_cert_path\n        else:\n            conn_params['verify_certs'] = False\n    connections.create_connection(**conn_params)", "docstring": "Sets the Elasticsearch hosts to use\n\nArgs:\nhosts (str): A single hostname or URL, or list of hostnames or URLs\nuse_ssl (bool): Use a HTTPS connection to the server\nssl_cert_path (str): Path to the certificate chain", "source": "codesearchnet"}
{"code": "def _start_event_client(self):", "docstring": "Starts a separate JsonRpc client to the same session for propagating\nevents.\n\nThis is an optional function that should only implement if the client\nutilizes the snippet event mechanism.\n\nReturns:\nA JsonRpc Client object that connects to the same session as the\none on which this function is called.", "source": "github-repos"}
{"code": "def push(self, x):\n    if not math.isnan(x):\n        self._n += 1\n        delta = x - self._mean\n    else:\n        delta = 0\n    if self._window_mode == WindowMode.SLIDING:\n        if len(self._queue) >= self._window_size and (not math.isnan((old_x := self.pop()))):\n            self._n -= 1\n            delta += self._mean - old_x\n        super().push(x)\n    if self._n > 0:\n        self._mean += delta / self._n\n    else:\n        self._mean = 0", "docstring": "Pushes a new value and updates the incremental mean.\n\nArgs:\nx: The new value to be pushed.", "source": "github-repos"}
{"code": "def set_preferred_prefix_for_namespace(self, ns_uri, prefix, add_if_not_exist=False):\n    ni = self.__lookup_uri(ns_uri)\n    if (not prefix):\n        ni.preferred_prefix = None\n    elif (prefix in ni.prefixes):\n        ni.preferred_prefix = prefix\n    elif add_if_not_exist:\n        self.add_prefix(ns_uri, prefix, set_as_preferred=True)\n    else:\n        raise PrefixNotFoundError(prefix)", "docstring": "Sets the preferred prefix for ns_uri.  If add_if_not_exist is True,\nthe prefix is added if it's not already registered.  Otherwise,\nsetting an unknown prefix as preferred is an error.  The default\nis False.  Setting to None always works, and indicates a preference\nto use the namespace as a default.  The given namespace must already\nbe in this set.\n\nArgs:\nns_uri (str): the namespace URI whose prefix is to be set\nprefix (str): the preferred prefix to set\nadd_if_not_exist (bool): Whether to add the prefix if it is not\nalready set as a prefix of ``ns_uri``.\n\nRaises:\nNamespaceNotFoundError: If namespace ``ns_uri`` isn't in this set.\nDuplicatePrefixError: If ``prefix`` already maps to a different\nnamespace.", "source": "codesearchnet"}
{"code": "def __init__(self, is_found, key, value):\n        \n        self.key = key\n        self.is_found = is_found\n        if self.is_found:\n            self.value = value", "docstring": "Creates a cached response object.\n\nArgs:\nis_found (bool): True if the key was found in the cache, False\notherwise.\nkey (string): The key originally used to retrieve the value.\nvalue (object)", "source": "juraj-google-style"}
{"code": "def explore(config, mutations, resample_probability, custom_explore_fn):\n    \n    new_config = copy.deepcopy(config)\n    for key, distribution in mutations.items():\n        if isinstance(distribution, dict):\n            new_config.update({\n                key: explore(config[key], mutations[key], resample_probability,\n                             None)\n            })\n        elif isinstance(distribution, list):\n            if random.random() < resample_probability or \\\n                    config[key] not in distribution:\n                new_config[key] = random.choice(distribution)\n            elif random.random() > 0.5:\n                new_config[key] = distribution[max(\n                    0,\n                    distribution.index(config[key]) - 1)]\n            else:\n                new_config[key] = distribution[min(\n                    len(distribution) - 1,\n                    distribution.index(config[key]) + 1)]\n        else:\n            if random.random() < resample_probability:\n                new_config[key] = distribution()\n            elif random.random() > 0.5:\n                new_config[key] = config[key] * 1.2\n            else:\n                new_config[key] = config[key] * 0.8\n            if type(config[key]) is int:\n                new_config[key] = int(new_config[key])\n    if custom_explore_fn:\n        new_config = custom_explore_fn(new_config)\n        assert new_config is not None, \\\n            \"Custom explore fn failed to return new config\"\n    logger.info(\"[explore] perturbed config from {} -> {}\".format(\n        config, new_config))\n    return new_config", "docstring": "Return a config perturbed as specified.\n\nArgs:\nconfig (dict): Original hyperparameter configuration.\nmutations (dict): Specification of mutations to perform as documented\nin the PopulationBasedTraining scheduler.\nresample_probability (float): Probability of allowing resampling of a\nparticular variable.\ncustom_explore_fn (func): Custom explore fn applied after built-in\nconfig perturbations are.", "source": "juraj-google-style"}
{"code": "def HasCustomStr(component):\n    if hasattr(component, '__str__'):\n        class_attrs = inspectutils.GetClassAttrsDict(type(component)) or {}\n        str_attr = class_attrs.get('__str__')\n        if str_attr and str_attr.defining_class is not object:\n            return True\n    return False", "docstring": "Determines if a component has a custom __str__ method.\n\nUses inspect.classify_class_attrs to determine the origin of the object's\n__str__ method, if one is present. If it defined by `object` itself, then\nit is not considered custom. Otherwise it is. This means that the __str__\nmethods of primitives like ints and floats are considered custom.\n\nObjects with custom __str__ methods are treated as values and can be\nserialized in places where more complex objects would have their help screen\nshown instead.\n\nArgs:\ncomponent: The object to check for a custom __str__ method.\nReturns:\nWhether `component` has a custom __str__ method.", "source": "github-repos"}
{"code": "def _is_variant(self, gemini_variant, ind_objs):\n        \n\n        indexes = (ind.ind_index for ind in ind_objs)\n        \n        for index in indexes:\n            gt_call = gemini_variant['gt_types'][index]\n            if (gt_call == 1 or gt_call == 3):\n                return True\n\n        return False", "docstring": "Check if the variant is a variation in any of the individuals\n\nArgs:\ngemini_variant (GeminiQueryRow): The gemini variant\nind_objs (list(puzzle.models.individual)): A list of individuals to check\n\nReturns:\nbool : If any of the individuals has the variant", "source": "juraj-google-style"}
{"code": "def get_attr_value(self, attr_key, el_idx=0):\n        \n        return self.get_element_by_attr_key(attr_key, el_idx).attrib[attr_key]", "docstring": "Return the value of the selected attribute in the selected element.\n\nArgs:\nattr_key : str\nName of attribute for which to search\n\nel_idx : int\nIndex of element to use in the event that there are multiple sibling\nelements with the same name.\n\nReturns:\nstr : Value of the selected attribute in the selected element.", "source": "juraj-google-style"}
{"code": "def check_time(timer_id):\n    \n    if timer_id not in _g_timers:\n        _g_timers[timer_id] = Timer()\n        return 0\n    else:\n        return _g_timers[timer_id].since_last_check()", "docstring": "Add check points in a single line.\n\nThis method is suitable for running a task on a list of items. A timer will\nbe registered when the method is called for the first time.\n\n:Example:\n\n>>> import time\n>>> import mmcv\n>>> for i in range(1, 6):\n>>>     # simulate a code block\n>>>     time.sleep(i)\n>>>     mmcv.check_time('task1')\n2.000\n3.000\n4.000\n5.000\n\nArgs:\ntimer_id (str): Timer identifier.", "source": "juraj-google-style"}
{"code": "def package_and_copy(package_root_dir, setup_py, output_tar_path):\n  \n  if not output_tar_path.startswith('gs:\n    raise ValueError('output_tar_path needs to be a GCS path.')\n  if not os.path.isfile(setup_py):\n    raise ValueError('Supplied file \"%s\" does not exist.' % setup_py)\n\n  dest_setup_py = os.path.join(package_root_dir, 'setup.py')\n  if dest_setup_py != setup_py:\n    \n    \n    if os.path.isfile(dest_setup_py):\n      os.rename(dest_setup_py, dest_setup_py + '._bak_')\n    shutil.copyfile(setup_py, dest_setup_py)\n\n  tempdir = tempfile.mkdtemp()\n  previous_cwd = os.getcwd()\n  os.chdir(package_root_dir)\n  try:\n    \n    sdist = ['python', dest_setup_py, 'sdist', '--format=gztar', '-d', tempdir]\n    subprocess.check_call(sdist)\n\n    \n    source = os.path.join(tempdir, '*.tar.gz')\n    gscopy = ['gsutil', 'cp', source, output_tar_path]\n    subprocess.check_call(gscopy)\n    return\n  finally:\n    os.chdir(previous_cwd)\n    if dest_setup_py != setup_py:\n      os.remove(dest_setup_py)\n    if os.path.isfile(dest_setup_py + '._bak_'):\n      os.rename(dest_setup_py + '._bak_', dest_setup_py)\n    shutil.rmtree(tempdir)", "docstring": "Repackage an CloudML package and copy it to a staging dir.\n\nArgs:\npackage_root_dir: the root dir to install package from. Usually you can get the path\nfrom inside your module using a relative path to __file__.\nsetup_py: the path to setup.py.\noutput_tar_path: the GCS path of the output tarball package.\nRaises:\nValueError if output_tar_path is not a GCS path, or setup_py does not exist.", "source": "juraj-google-style"}
{"code": "def _prepare_for_training(self, records, mini_batch_size=None, job_name=None):\n        \n        super(AmazonAlgorithmEstimatorBase, self)._prepare_for_training(job_name=job_name)\n\n        feature_dim = None\n\n        if isinstance(records, list):\n            for record in records:\n                if record.channel == 'train':\n                    feature_dim = record.feature_dim\n                    break\n            if feature_dim is None:\n                raise ValueError('Must provide train channel.')\n        else:\n            feature_dim = records.feature_dim\n\n        self.feature_dim = feature_dim\n        self.mini_batch_size = mini_batch_size", "docstring": "Set hyperparameters needed for training.\n\nArgs:\n* records (:class:`~RecordSet`): The records to train this ``Estimator`` on.\n* mini_batch_size (int or None): The size of each mini-batch to use when training. If ``None``, a\ndefault value will be used.\n* job_name (str): Name of the training job to be created. If not specified, one is generated,\nusing the base name given to the constructor if applicable.", "source": "juraj-google-style"}
{"code": "def Call(self, position, function_call):\n    self.EnsureGdbPosition(position[0], None, None)\n    if (not gdb.selected_thread().is_stopped()):\n        self.Interrupt(position)\n    result_value = gdb.parse_and_eval(function_call)\n    return self._UnpackGdbVal(result_value)", "docstring": "Perform a function call in the inferior.\n\nWARNING: Since Gdb's concept of threads can't be directly identified with\npython threads, the function call will be made from what has to be assumed\nis an arbitrary thread. This *will* interrupt the inferior. Continuing it\nafter the call is the responsibility of the caller.\n\nArgs:\nposition: the context of the inferior to call the function from.\nfunction_call: A string corresponding to a function call. Format:\n'foo(0,0)'\nReturns:\nThre return value of the called function.", "source": "codesearchnet"}
{"code": "def Query(self, queue, limit=1):\n    \n    \n    \n    if isinstance(queue, rdf_client.ClientURN):\n      queue = queue.Queue()\n\n    return self.data_store.QueueQueryTasks(queue, limit=limit)", "docstring": "Retrieves tasks from a queue without leasing them.\n\nThis is good for a read only snapshot of the tasks.\n\nArgs:\nqueue: The task queue that this task belongs to, usually client.Queue()\nwhere client is the ClientURN object you want to schedule msgs on.\nlimit: Number of values to fetch.\n\nReturns:\nA list of Task() objects.", "source": "juraj-google-style"}
{"code": "def _GetTimeElementsTuple(self, timestamp):\n    (year, month, day_of_month, hours, minutes, seconds) = (int((hexdigit[0] + hexdigit[1]), 16) for hexdigit in zip(timestamp[::2], timestamp[1::2]))\n    return ((year + 1970), (month + 1), day_of_month, hours, minutes, seconds)", "docstring": "Retrieves a time elements tuple from the timestamp.\n\nA Symantec log timestamp consist of six hexadecimal octets, that represent:\nFirst octet: Number of years since 1970\nSecond octet: Month, where January is represented by 0\nThird octet: Day of the month\nFourth octet: Number of hours\nFifth octet: Number of minutes\nSixth octet: Number of seconds\n\nFor example, 200A13080122 represents November 19, 2002, 8:01:34 AM.\n\nArgs:\ntimestamp (str): hexadecimal encoded date and time values.\n\nReturns:\ntuple: containing:\nyear (int): year.\nmonth (int): month, where 1 represents January.\nday_of_month (int): day of month, where 1 is the first day of the month.\nhours (int): hours.\nminutes (int): minutes.\nseconds (int): seconds.", "source": "codesearchnet"}
{"code": "def format_speech_generation_kwargs(kwargs):\n    kwargs_text = {}\n    kwargs_speech = {}\n    for key, value in kwargs.items():\n        if key.startswith('text_'):\n            key = key[len('text_'):]\n            kwargs_text[key] = value\n        elif key.startswith('speech_'):\n            key = key[len('speech_'):]\n            kwargs_speech[key] = value\n        elif key == 'generation_config':\n            kwargs_text[key] = value\n        else:\n            if key not in kwargs_text:\n                kwargs_text[key] = value\n            if key not in kwargs_speech:\n                kwargs_speech[key] = value\n    return (kwargs_text, kwargs_speech)", "docstring": "Format kwargs for SeamlessM4Tv2 models that generate speech, attribute kwargs to either the text generation or the\nspeech generation models.\n\nArgs:\nkwargs (`dict`)`:\nKeyword arguments are of two types:\n\n- Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,\nexcept for `decoder_input_ids` which will only be passed through the text components.\n- With a *text_* or *speech_* prefix, they will be input for the `generate` method of the\ntext model and speech model respectively. It has the priority over the keywords without a prefix.\n\nThis means you can, for example, specify a generation strategy for one generation but not for the\nother.", "source": "github-repos"}
{"code": "def _prep_binary_mimetype(self):\n\n\t\t\n\n\t\t\n\t\tif not self.mimetype and 'Content-Type' not in self.resource.headers.keys():\n\t\t\traise Exception('to create/update NonRDFSource, mimetype or Content-Type header is required')\n\n\t\t\n\t\telif self.mimetype and 'Content-Type' not in self.resource.headers.keys():\n\t\t\tlogger.debug('setting Content-Type header with provided mimetype: %s'\n\t\t\t\t% self.mimetype)\n\t\t\tself.resource.headers['Content-Type'] = self.mimetype", "docstring": "Sets Content-Type header based on headers and/or self.binary.mimetype values\nImplicitly favors Content-Type header if set\n\nArgs:\nNone\n\nReturns:\nNone: sets attributes in self.binary and headers", "source": "juraj-google-style"}
{"code": "def execute_no_wait(self, cmd, walltime, envs={}):\n        \n        current_env = copy.deepcopy(self._envs)\n        current_env.update(envs)\n\n        try:\n            proc = subprocess.Popen(\n                cmd,\n                stdout=subprocess.PIPE,\n                stderr=subprocess.PIPE,\n                cwd=self.userhome,\n                env=current_env,\n                shell=True,\n                preexec_fn=os.setpgrp\n            )\n            pid = proc.pid\n\n        except Exception as e:\n            print(\"Caught exception : {0}\".format(e))\n            logger.warn(\"Execution of command [%s] failed due to \\n %s \", (cmd, e))\n\n        return pid, proc", "docstring": "Synchronously execute a commandline string on the shell.\n\nArgs:\n- cmd (string) : Commandline string to execute\n- walltime (int) : walltime in seconds, this is not really used now.\n\nReturns:\n\n- retcode : Return code from the execution, -1 on fail\n- stdout  : stdout string\n- stderr  : stderr string\n\nRaises:\nNone.", "source": "juraj-google-style"}
{"code": "def _AddUser(self, user):\n    \n    self.logger.info('Creating a new user account for %s.', user)\n\n    command = self.useradd_cmd.format(user=user)\n    try:\n      subprocess.check_call(command.split(' '))\n    except subprocess.CalledProcessError as e:\n      self.logger.warning('Could not create user %s. %s.', user, str(e))\n      return False\n    else:\n      self.logger.info('Created user account %s.', user)\n      return True", "docstring": "Configure a Linux user account.\n\nArgs:\nuser: string, the name of the Linux user account to create.\n\nReturns:\nbool, True if user creation succeeded.", "source": "juraj-google-style"}
{"code": "def serve(args):\n    port = (args.serve_port or PORT)\n    host = '0.0.0.0'\n    dir_path = Path().absolute()\n    web_dir = (dir_path / 'site')\n    utils.set_routes()\n    if args.offline:\n        os.environ['MKINX_OFFLINE'] = 'true'\n        _ = subprocess.check_output('mkdocs build > /dev/null', shell=True)\n        utils.make_offline()\n\n    class MkinxHTTPHandler(SimpleHTTPRequestHandler):\n        'Class routing urls (paths) to projects (resources)\\n        '\n\n        def translate_path(self, path):\n            location = str(web_dir)\n            route = location\n            if ((len(path) != 0) and (path != '/')):\n                for (key, loc) in utils.get_routes():\n                    if path.startswith(key):\n                        location = loc\n                        path = path[len(key):]\n                        break\n            if ((location[(- 1)] == '/') or (not path) or (path[0] == '/')):\n                route = (location + path)\n            else:\n                route = ((location + '/') + path)\n            return route.split('?')[0]\n    success = False\n    count = 0\n    print('Waiting for server port...')\n    try:\n        while (not success):\n            try:\n                httpd = socketserver.TCPServer((host, port), MkinxHTTPHandler)\n                success = True\n            except OSError:\n                count += 1\n            finally:\n                if ((not success) and (count > 20)):\n                    s = 'port {} seems occupied. Try with {} ? (y/n)'\n                    if ('y' in input(s.format(port, (port + 1)))):\n                        port += 1\n                        count = 0\n                    else:\n                        print('You can specify a custom port with mkinx serve -s')\n                        return\n                time.sleep(0.5)\n    except KeyboardInterrupt:\n        print('Aborting.')\n        return\n    httpd.allow_reuse_address = True\n    print('\\nServing at http:\n    thread = threading.Thread(target=httpd.serve_forever)\n    thread.daemon = True\n    thread.start()\n    event_handler = utils.MkinxFileHandler(patterns=['*.rst', '*.md', '*.yml', '*.yaml'])\n    observer = Observer()\n    observer.schedule(event_handler, path=str(dir_path), recursive=True)\n    observer.start()\n    try:\n        while True:\n            time.sleep(1)\n    except KeyboardInterrupt:\n        observer.stop()\n        httpd.server_close()\n    observer.join()", "docstring": "Start a server which will watch .md and .rst files for changes.\nIf a md file changes, the Home Documentation is rebuilt. If a .rst\nfile changes, the updated sphinx project is rebuilt\n\nArgs:\nargs (ArgumentParser): flags from the CLI", "source": "codesearchnet"}
{"code": "def get(self, key, value):\n    if (key == 'id'):\n        response = self._swimlane.request('get', 'app/{}'.format(value))\n        if (response.status_code == 204):\n            raise ValueError('No app with id \"{}\"'.format(value))\n        return App(self._swimlane, response.json())\n    else:\n        for app in self.list():\n            if (value and (value == app.name)):\n                return app\n        raise ValueError('No app with name \"{}\"'.format(value))", "docstring": "Get single app by one of id or name\n\nSupports resource cache\n\nKeyword Args:\nid (str): Full app id\nname (str): App name\n\nReturns:\nApp: Corresponding App resource instance\n\nRaises:\nTypeError: No or multiple keyword arguments provided\nValueError: No matching app found on server", "source": "codesearchnet"}
{"code": "def erf(x):\n    if any_symbolic_tensors((x,)):\n        return Erf().symbolic_call(x)\n    x = backend.convert_to_tensor(x)\n    return backend.math.erf(x)", "docstring": "Computes the error function of `x`, element-wise.\n\nArgs:\nx: Input tensor.\n\nReturns:\nA tensor with the same dtype as `x`.\n\nExample:\n\n>>> x = np.array([-3.0, -2.0, -1.0, 0.0, 1.0])\n>>> keras.ops.erf(x)\narray([-0.99998 , -0.99532, -0.842701,  0.,  0.842701], dtype=float32)", "source": "github-repos"}
{"code": "def get_nic(access_token, subscription_id, resource_group, nic_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/networkInterfaces/', nic_name, '?api-version=', NETWORK_API])\n    return do_get(endpoint, access_token)", "docstring": "Get details about a network interface.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nnic_name (str): Name of the NIC.\n\nReturns:\nHTTP response. NIC JSON body.", "source": "codesearchnet"}
{"code": "def _get_by_id(cls, id, parent=None, **ctx_options):\n    return cls._get_by_id_async(id, parent=parent, **ctx_options).get_result()", "docstring": "Returns an instance of Model class by ID.\n\nThis is really just a shorthand for Key(cls, id, ...).get().\n\nArgs:\nid: A string or integer key ID.\nparent: Optional parent key of the model to get.\nnamespace: Optional namespace.\napp: Optional app ID.\n**ctx_options: Context options.\n\nReturns:\nA model instance or None if not found.", "source": "codesearchnet"}
{"code": "def enable_streaming(self):\n    if (not self.connected):\n        raise HardwareError('Cannot enable streaming if we are not in a connected state')\n    if (self._reports is not None):\n        _clear_queue(self._reports)\n        return self._reports\n    self._reports = queue.Queue()\n    self._loop.run_coroutine(self.adapter.open_interface(0, 'streaming'))\n    return self._reports", "docstring": "Open the streaming interface and accumute reports in a queue.\n\nThis method is safe to call multiple times in a single device\nconnection. There is no way to check if the streaming interface is\nopened or to close it once it is opened (apart from disconnecting from\nthe device).\n\nThe first time this method is called, it will open the streaming\ninterface and return a queue that will be filled asynchronously with\nreports as they are received.  Subsequent calls will just empty the\nqueue and return the same queue without interacting with the device at\nall.\n\nReturns:\nqueue.Queue: A queue that will be filled with reports from the device.", "source": "codesearchnet"}
{"code": "def songs(self):\n    song_list = []\n    for chunk in self.songs_iter(page_size=49995):\n        song_list.extend(chunk)\n    return song_list", "docstring": "Get a listing of library songs.\n\nReturns:\nlist: Song dicts.", "source": "codesearchnet"}
{"code": "def _data_from_dotnotation(self, key, default=None):\n        \n        if key is None:\n            raise KeyError('NoneType is not a valid key!')\n\n        doc = self._collection.find_one({\"_id\": ObjectId(self._workflow_id)})\n        if doc is None:\n            return default\n\n        for k in key.split('.'):\n            doc = doc[k]\n\n        return doc", "docstring": "Returns the MongoDB data from a key using dot notation.\n\nArgs:\nkey (str): The key to the field in the workflow document. Supports MongoDB's\ndot notation for embedded fields.\ndefault (object): The default value that is returned if the key\ndoes not exist.\n\nReturns:\nobject: The data for the specified key or the default value.", "source": "juraj-google-style"}
{"code": "def load_spectrum(filename):\n    import f311\n    f = load_with_classes(filename, f311.classes_sp())\n    if f:\n        return f.spectrum\n    return None", "docstring": "Attempts to load spectrum as one of the supported types.\n\nReturns:\na Spectrum, or None", "source": "codesearchnet"}
{"code": "def _get_grouped_variables(vars_to_warm_start):\n    if isinstance(vars_to_warm_start, str) or vars_to_warm_start is None:\n        logging.info('Warm-starting variables only in TRAINABLE_VARIABLES.')\n        list_of_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope=vars_to_warm_start)\n    elif isinstance(vars_to_warm_start, list):\n        if all((isinstance(v, str) for v in vars_to_warm_start)):\n            list_of_vars = []\n            for v in vars_to_warm_start:\n                list_of_vars += ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope=v)\n        elif all((checkpoint_utils._is_variable(v) for v in vars_to_warm_start)):\n            list_of_vars = vars_to_warm_start\n        else:\n            raise ValueError('If `vars_to_warm_start` is a list, it must be all `Variable` or all `str`.  Given types are {}'.format([type(v) for v in vars_to_warm_start]))\n    else:\n        raise ValueError('`vars_to_warm_start must be a `list` or `str`.  Given type is {}'.format(type(vars_to_warm_start)))\n    grouped_variables = {}\n    for v in list_of_vars:\n        t = [v] if not isinstance(v, list) else v\n        var_name = _infer_var_name(t)\n        grouped_variables.setdefault(var_name, []).append(v)\n    return grouped_variables", "docstring": "Collects and groups (possibly partitioned) variables into a dictionary.\n\nThe variables can be provided explicitly through vars_to_warm_start, or they\nare retrieved from collections (see below).\n\nArgs:\nvars_to_warm_start: One of the following:\n\n- A regular expression (string) that captures which variables to\nwarm-start (see tf.compat.v1.get_collection).  This expression will\nonly consider variables in the TRAINABLE_VARIABLES collection.\n- A list of strings, each representing a full variable name to warm-start.\nThese will consider variables in GLOBAL_VARIABLES collection.\n- A list of Variables to warm-start.\n- `None`, in which case all variables in TRAINABLE_VARIABLES will be used.\nReturns:\nA dictionary mapping variable names (strings) to lists of Variables.\nRaises:\nValueError: If vars_to_warm_start is not a string, `None`, a list of\n`Variables`, or a list of strings.", "source": "github-repos"}
{"code": "def get_workflow(workflow_id: str, workflow_version: str) -> dict:\n    \n    name = \"workflow_definitions:{}:{}\".format(workflow_id, workflow_version)\n    workflow = DB.get_hash_dict(name)\n    workflow['stages'] = ast.literal_eval(workflow['stages'])\n    return workflow", "docstring": "Get a workflow definition from the Configuration Database.\n\nArgs:\nworkflow_id (str): Workflow identifier\nworkflow_version (str): Workflow version\n\nReturns:\ndict, Workflow definition dictionary", "source": "juraj-google-style"}
{"code": "def _module_info_from_proto(module_info_def, import_scope=None):\n  \n  graph = tf.get_default_graph()\n  def prepend_name_scope(name_scope):\n    return ops.prepend_name_scope(name_scope, import_scope)\n  def process_leafs(name):\n    return _path_to_graph_element(prepend_name_scope(name), graph)\n  connected_subgraphs = []\n  module_info = ModuleInfo(\n      module_name=module_info_def.module_name,\n      scope_name=prepend_name_scope(module_info_def.scope_name),\n      class_name=module_info_def.class_name,\n      connected_subgraphs=connected_subgraphs)\n  for connected_subgraph_def in module_info_def.connected_subgraphs:\n    connected_subgraph = ConnectedSubGraph(\n        module=module_info,\n        name_scope=prepend_name_scope(connected_subgraph_def.name_scope),\n        inputs=_nested_from_proto(\n            connected_subgraph_def.inputs, process_leafs),\n        outputs=_nested_from_proto(\n            connected_subgraph_def.outputs, process_leafs))\n    connected_subgraphs.append(connected_subgraph)\n  return module_info", "docstring": "Deserializes `module_info_def` proto.\n\nArgs:\nmodule_info_def: An instance of `module_pb2.SonnetModule`.\nimport_scope: Optional `string`. Name scope to use.\n\nReturns:\nAn instance of `ModuleInfo`.\n\nRaises:\nbase_errors.ModuleInfoError: If the probobuf is of the wrong type or\nif some of its fields are missing.", "source": "juraj-google-style"}
{"code": "def log_softmax_v2(logits, axis=None, name=None):\n    if axis is None:\n        axis = -1\n    return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name)", "docstring": "Computes log softmax activations.\n\nFor each batch `i` and class `j` we have\n\nlogsoftmax = logits - log(reduce_sum(exp(logits), axis))\n\nArgs:\nlogits: A non-empty `Tensor`. Must be one of the following types: `half`,\n`float32`, `float64`.\naxis: The dimension softmax would be performed on. The default is -1 which\nindicates the last dimension.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor`. Has the same type as `logits`. Same shape as `logits`.\n\nRaises:\nInvalidArgumentError: if `logits` is empty or `axis` is beyond the last\ndimension of `logits`.", "source": "github-repos"}
{"code": "def describe_file_set(modules):\n    \n    descriptor = FileSet()\n    file_descriptors = []\n    for module in modules:\n        file_descriptors.append(describe_file(module))\n\n    if file_descriptors:\n        descriptor.files = file_descriptors\n\n    return descriptor", "docstring": "Build a file set from a specified Python modules.\n\nArgs:\nmodules: Iterable of Python module to describe.\n\nReturns:\nInitialized FileSet instance describing the modules.", "source": "juraj-google-style"}
{"code": "def list_vdirs(site, app=_DEFAULT_APP):\n    \n    ret = dict()\n\n    ps_cmd = ['Get-WebVirtualDirectory',\n              '-Site', r\"'{0}'\".format(site),\n              '-Application', r\"'{0}'\".format(app),\n              '|', \"Select-Object PhysicalPath, @{ Name = 'name';\",\n              r\"Expression = { $_.path.Split('/')[-1] } }\"]\n\n    cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)\n\n    try:\n        items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n    except ValueError:\n        raise CommandExecutionError('Unable to parse return data as Json.')\n\n    for item in items:\n        ret[item['name']] = {'sourcepath': item['physicalPath']}\n\n    if not ret:\n        log.warning('No vdirs found in output: %s', cmd_ret)\n\n    return ret", "docstring": "Get all configured IIS virtual directories for the specified site, or for\nthe combination of site and application.\n\nArgs:\nsite (str): The IIS site name.\napp (str): The IIS application.\n\nReturns:\ndict: A dictionary of the virtual directory names and properties.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.list_vdirs site", "source": "juraj-google-style"}
{"code": "def match(obj, matchers=TYPES):\n    \n    buf = get_bytes(obj)\n\n    for matcher in matchers:\n        if matcher.match(buf):\n            return matcher\n\n    return None", "docstring": "Matches the given input againts the available\nfile type matchers.\n\nArgs:\nobj: path to file, bytes or bytearray.\n\nReturns:\nType instance if type matches. Otherwise None.\n\nRaises:\nTypeError: if obj is not a supported type.", "source": "juraj-google-style"}
{"code": "def minimum(station_code):\n    \n    temp = None\n    fin = None\n    try:\n        fin = open('%s/%s' % (env.WEATHER_DATA_PATH,\n                              _basename(station_code, 'ddy')))\n    except IOError:\n        logger.info(\"File not found\")\n        download_extract(_eere_url(station_code))\n        fin = open('%s/%s' % (env.WEATHER_DATA_PATH,\n                              _basename(station_code, 'ddy')))\n    for line in fin:\n        value = re.search('Max Drybulb=(-?\\\\d+\\\\.\\\\d*)', line)\n        if value:\n            temp = float(value.groups()[0])\n    if not temp:\n        try:\n            fin = open('%s/%s' % (env.WEATHER_DATA_PATH,\n                                  _basename(station_code, 'stat')))\n            for line in fin:\n                if line.find('Minimum Dry Bulb') is not -1:\n                    return float(line[37:-1].split('\\xb0')[0])\n        except IOError:\n            pass\n    if temp:\n        return temp\n    else:\n        raise Exception(\"Error: Minimum Temperature not found\")", "docstring": "Extreme Minimum Design Temperature for a location.\n\nDegrees in Celcius\n\nArgs:\nstation_code (str): Weather Station Code\n\nReturns:\nfloat degrees Celcius", "source": "juraj-google-style"}
{"code": "def rtm(  \n        self, url: Optional[str] = None, bot_id: Optional[str] = None\n    ) -> Iterator[events.Event]:\n        \n        while True:\n            bot_id = bot_id or self._find_bot_id()\n            url = url or self._find_rtm_url()\n            for event in self._incoming_from_rtm(url, bot_id):\n                yield event\n            url = None", "docstring": "Iterate over event from the RTM API\n\nArgs:\nurl: Websocket connection url\nbot_id: Connecting bot ID\n\nReturns:\n:class:`slack.events.Event` or :class:`slack.events.Message`", "source": "juraj-google-style"}
{"code": "def connect(self, chip_name, speed='auto', verbose=False):\n        \n\n        if verbose:\n            self.exec_command('EnableRemarks = 1')\n\n        \n        \n        self.exec_command('Device = %s' % chip_name)\n\n        \n        \n        if speed == 'auto':\n            self.set_speed(auto=True)\n        elif speed == 'adaptive':\n            self.set_speed(adaptive=True)\n        else:\n            self.set_speed(speed)\n\n        result = self._dll.JLINKARM_Connect()\n        if result < 0:\n            raise errors.JLinkException(result)\n\n        try:\n            \n            self.halted()\n        except errors.JLinkException:\n            pass\n\n        \n        \n        for index in range(self.num_supported_devices()):\n            device = self.supported_device(index)\n            if device.name.lower() == chip_name.lower():\n                self._device = device\n                break\n        else:\n            raise errors.JLinkException('Unsupported device was connected to.')\n\n        return None", "docstring": "Connects the J-Link to its target.\n\nArgs:\nself (JLink): the ``JLink`` instance\nchip_name (str): target chip name\nspeed (int): connection speed, one of ``{5-12000, 'auto', 'adaptive'}``\nverbose (bool): boolean indicating if connection should be verbose in logging\n\nReturns:\n``None``\n\nRaises:\nJLinkException: if connection fails to establish.\nTypeError: if given speed is invalid", "source": "juraj-google-style"}
{"code": "def _get_match(self, key):\n    return (self._get_string_match(key=key) or self._get_non_string_match(key=key))", "docstring": "Gets a MatchObject for the given key.\n\nArgs:\nkey (str): Key of the property to look-up.\n\nReturn:\nMatchObject: The discovered match.", "source": "codesearchnet"}
{"code": "def getRegisterUserInfo(self, svctype = \"Android NDrive App ver\", auth = 0):\n        \n        data = {'userid': self.user_id, 'svctype': svctype, 'auth': auth}\n        r = self.session.get(nurls['getRegisterUserInfo'], params = data)\n\n        j = json.loads(r.text)\n\n        if j['message'] != 'success':\n            print \"[*] Error getRegisterUserInfo: \" + j['message']\n            return False\n\n        else:\n            self.useridx = j['resultvalue']['useridx']\n            return True", "docstring": "Get registerUserInfo\n\nArgs:\nsvctype: Platform information\nauth: ???\n\nReturns:\nTrue: Success\nFalse: Failed", "source": "juraj-google-style"}
{"code": "def create_model(self, model_server_workers=None, role=None, vpc_config_override=VPC_CONFIG_DEFAULT):\n    role = (role or self.role)\n    return ChainerModel(self.model_data, role, self.entry_point, source_dir=self._model_source_dir(), enable_cloudwatch_metrics=self.enable_cloudwatch_metrics, name=self._current_job_name, container_log_level=self.container_log_level, code_location=self.code_location, py_version=self.py_version, framework_version=self.framework_version, model_server_workers=model_server_workers, image=self.image_name, sagemaker_session=self.sagemaker_session, vpc_config=self.get_vpc_config(vpc_config_override), dependencies=self.dependencies)", "docstring": "Create a SageMaker ``ChainerModel`` object that can be deployed to an ``Endpoint``.\n\nArgs:\nrole (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during\ntransform jobs. If not specified, the role from the Estimator will be used.\nmodel_server_workers (int): Optional. The number of worker processes used by the inference server.\nIf None, server will use one worker per vCPU.\nvpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the model.\nDefault: use subnets and security groups from this Estimator.\n* 'Subnets' (list[str]): List of subnet ids.\n* 'SecurityGroupIds' (list[str]): List of security group ids.\n\nReturns:\nsagemaker.chainer.model.ChainerModel: A SageMaker ``ChainerModel`` object.\nSee :func:`~sagemaker.chainer.model.ChainerModel` for full details.", "source": "codesearchnet"}
{"code": "def make_decorator(target, decorator_func, decorator_name=None, decorator_doc='', decorator_argspec=None):\n    if decorator_name is None:\n        decorator_name = inspect.currentframe().f_back.f_code.co_name\n    decorator = TFDecorator(decorator_name, target, decorator_doc, decorator_argspec)\n    setattr(decorator_func, '_tf_decorator', decorator)\n    if hasattr(target, '__name__'):\n        decorator_func.__name__ = target.__name__\n    if hasattr(target, '__qualname__'):\n        decorator_func.__qualname__ = target.__qualname__\n    if hasattr(target, '__module__'):\n        decorator_func.__module__ = target.__module__\n    if hasattr(target, '__dict__'):\n        for name in target.__dict__:\n            if name not in decorator_func.__dict__:\n                decorator_func.__dict__[name] = target.__dict__[name]\n    if hasattr(target, '__doc__'):\n        decorator_func.__doc__ = decorator.__doc__\n    decorator_func.__wrapped__ = target\n    decorator_func.__original_wrapped__ = target\n    if decorator_argspec:\n        decorator_func.__signature__ = fullargspec_to_signature(decorator_argspec)\n    elif callable(target):\n        try:\n            signature = inspect.signature(target)\n        except (TypeError, ValueError):\n            pass\n        else:\n            bound_instance = _get_bound_instance(target)\n            if bound_instance and 'self' in signature.parameters:\n                signature = inspect.Signature(list(signature.parameters.values())[1:])\n                decorator_func.__self__ = bound_instance\n            decorator_func.__signature__ = signature\n    return decorator_func", "docstring": "Make a decorator from a wrapper and a target.\n\nArgs:\ntarget: The final callable to be wrapped.\ndecorator_func: The wrapper function.\ndecorator_name: The name of the decorator. If `None`, the name of the\nfunction calling make_decorator.\ndecorator_doc: Documentation specific to this application of\n`decorator_func` to `target`.\ndecorator_argspec: Override the signature using FullArgSpec.\n\nReturns:\nThe `decorator_func` argument with new metadata attached.", "source": "github-repos"}
{"code": "def adjust_contrast(img, contrast_factor):\n    \n    if not _is_pil_image(img):\n        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n    enhancer = ImageEnhance.Contrast(img)\n    img = enhancer.enhance(contrast_factor)\n    return img", "docstring": "Adjust contrast of an Image.\n\nArgs:\nimg (PIL Image): PIL Image to be adjusted.\ncontrast_factor (float): How much to adjust the contrast. Can be any\nnon negative number. 0 gives a solid gray image, 1 gives the\noriginal image while 2 increases the contrast by a factor of 2.\n\nReturns:\nPIL Image: Contrast adjusted image.", "source": "juraj-google-style"}
{"code": "def debye_temperature(self, structure):\n    v0 = ((structure.volume * 1e-30) / structure.num_sites)\n    (vl, vt) = (self.long_v(structure), self.trans_v(structure))\n    vm = ((3 ** (1.0 / 3.0)) * (((1 / (vl ** 3)) + (2 / (vt ** 3))) ** ((- 1.0) / 3.0)))\n    td = (((1.05457e-34 / 1.38065e-23) * vm) * (((6 * (np.pi ** 2)) / v0) ** (1.0 / 3.0)))\n    return td", "docstring": "Estimates the debye temperature from longitudinal and\ntransverse sound velocities\n\nArgs:\nstructure: pymatgen structure object\n\nReturns: debye temperature (in SI units)", "source": "codesearchnet"}
{"code": "def call_rpc(self, rpc_id, payload=bytes()):\n        \n\n        \n        \n        if super(ServiceDelegateTile, self).has_rpc(rpc_id):\n            return super(ServiceDelegateTile, self).call_rpc(rpc_id, payload)\n\n        async def _awaitable_wrapper():\n\n            \n            \n            \n            \n            \n            resp = await self._client.send_rpc(self._service, rpc_id, payload, timeout=120.0)\n            result = resp['result']\n\n            if result == 'success':\n                return resp['response']\n            elif result == 'service_not_found':\n                raise TileNotFoundError(\"Could not find service by name\", name=self._service)\n            elif result == 'rpc_not_found':\n                raise RPCNotFoundError(\"Could not find RPC on service\", name=self._service, rpc_id=rpc_id)\n            elif result == 'invalid_arguments':\n                raise RPCInvalidArgumentsError(\"Invalid arguments to RPC\", name=self._service, rpc_id=rpc_id)\n            elif result == 'invalid_response':\n                raise RPCInvalidReturnValueError(\"Invalid response from RPC\", name=self._service, rpc_id=rpc_id)\n            elif result == 'execution_exception':\n                raise InternalError(\"Exception raised during processing RPC\", name=self._service, rpc_id=rpc_id)\n            else:\n                raise InternalError(\"Unknown response received from delegated RPC\", name=self._service, rpc_id=rpc_id, result=result)\n\n        return _awaitable_wrapper()", "docstring": "Call an RPC by its ID.\n\nArgs:\nrpc_id (int): The number of the RPC\npayload (bytes): A byte string of payload parameters up to 20 bytes\n\nReturns:\nstr: The response payload from the RPC", "source": "juraj-google-style"}
{"code": "def add_output(self, output):\n    if (not isinstance(output, Output)):\n        raise TypeError('`output` must be an Output instance or None')\n    self.outputs.append(output)", "docstring": "Adds an output to a Transaction's list of outputs.\n\nArgs:\noutput (:class:`~bigchaindb.common.transaction.\nOutput`): An Output to be added to the\nTransaction.", "source": "codesearchnet"}
{"code": "def __init__(self, tcex):\n        \n        self._tcex = tcex\n        self._data = {}\n\n        self._type = 'Owner'\n        self._api_type = 'owners'\n        self._api_entity = 'owner'\n\n        self._utils = TcExUtils()\n        self._tc_requests = TiTcRequest(self._tcex)", "docstring": "Initialize Class Properties.\n\nArgs:\ntcex:", "source": "juraj-google-style"}
{"code": "def _xray_clean_up_entries_for_driver(self, driver_id):\n        \n\n        xray_task_table_prefix = (\n            ray.gcs_utils.TablePrefix_RAYLET_TASK_string.encode(\"ascii\"))\n        xray_object_table_prefix = (\n            ray.gcs_utils.TablePrefix_OBJECT_string.encode(\"ascii\"))\n\n        task_table_objects = self.state.task_table()\n        driver_id_hex = binary_to_hex(driver_id)\n        driver_task_id_bins = set()\n        for task_id_hex, task_info in task_table_objects.items():\n            task_table_object = task_info[\"TaskSpec\"]\n            task_driver_id_hex = task_table_object[\"DriverID\"]\n            if driver_id_hex != task_driver_id_hex:\n                \n                continue\n            driver_task_id_bins.add(hex_to_binary(task_id_hex))\n\n        \n        object_table_objects = self.state.object_table()\n        driver_object_id_bins = set()\n        for object_id, _ in object_table_objects.items():\n            task_id_bin = ray._raylet.compute_task_id(object_id).binary()\n            if task_id_bin in driver_task_id_bins:\n                driver_object_id_bins.add(object_id.binary())\n\n        def to_shard_index(id_bin):\n            return binary_to_object_id(id_bin).redis_shard_hash() % len(\n                self.state.redis_clients)\n\n        \n        sharded_keys = [[] for _ in range(len(self.state.redis_clients))]\n        for task_id_bin in driver_task_id_bins:\n            sharded_keys[to_shard_index(task_id_bin)].append(\n                xray_task_table_prefix + task_id_bin)\n        for object_id_bin in driver_object_id_bins:\n            sharded_keys[to_shard_index(object_id_bin)].append(\n                xray_object_table_prefix + object_id_bin)\n\n        \n        for shard_index in range(len(sharded_keys)):\n            keys = sharded_keys[shard_index]\n            if len(keys) == 0:\n                continue\n            redis = self.state.redis_clients[shard_index]\n            num_deleted = redis.delete(*keys)\n            logger.info(\"Monitor: \"\n                        \"Removed {} dead redis entries of the \"\n                        \"driver from redis shard {}.\".format(\n                            num_deleted, shard_index))\n            if num_deleted != len(keys):\n                logger.warning(\"Monitor: \"\n                               \"Failed to remove {} relevant redis \"\n                               \"entries from redis shard {}.\".format(\n                                   len(keys) - num_deleted, shard_index))", "docstring": "Remove this driver's object/task entries from redis.\n\nRemoves control-state entries of all tasks and task return\nobjects belonging to the driver.\n\nArgs:\ndriver_id: The driver id.", "source": "juraj-google-style"}
{"code": "def __init__(self, communication=collective_util.CommunicationImplementation.AUTO, cluster_resolver=None):\n    communication_options = collective_util.Options(implementation=communication)\n    super(_CollectiveAllReduceStrategyExperimental, self).__init__(cluster_resolver, communication_options)", "docstring": "Creates the strategy.\n\nArgs:\ncommunication: optional\n`tf.distribute.experimental.CommunicationImplementation`. This is a hint\non the preferred collective communication implementation. Possible\nvalues include `AUTO`, `RING`, and `NCCL`.\ncluster_resolver: optional\n`tf.distribute.cluster_resolver.ClusterResolver`. If `None`,\n`tf.distribute.cluster_resolver.TFConfigClusterResolver` is used.", "source": "github-repos"}
{"code": "def write_data(num_lines, no_data=False, directory=None, prefix=tempfile.template, eol=EOL.LF, custom_delimiter=None, line_value=b'line'):\n    all_data = []\n    with tempfile.NamedTemporaryFile(delete=False, dir=directory, prefix=prefix) as f:\n        sep_values = [b'\\n', b'\\r\\n']\n        for i in range(num_lines):\n            data = b'' if no_data else line_value + str(i).encode()\n            all_data.append(data)\n            if eol == EOL.LF:\n                sep = sep_values[0]\n            elif eol == EOL.CRLF:\n                sep = sep_values[1]\n            elif eol == EOL.MIXED:\n                sep = sep_values[i % len(sep_values)]\n            elif eol == EOL.LF_WITH_NOTHING_AT_LAST_LINE:\n                sep = b'' if i == num_lines - 1 else sep_values[0]\n            elif eol == EOL.CUSTOM_DELIMITER:\n                if custom_delimiter is None or len(custom_delimiter) == 0:\n                    raise ValueError('delimiter can not be null or empty')\n                else:\n                    sep = custom_delimiter\n            else:\n                raise ValueError('Received unknown value %s for eol.' % eol)\n            f.write(data + sep)\n        return (f.name, [line.decode('utf-8') for line in all_data])", "docstring": "Writes test data to a temporary file.\n\nArgs:\nnum_lines (int): The number of lines to write.\nno_data (bool): If :data:`True`, empty lines will be written, otherwise\neach line will contain a concatenation of b'line' and the line number.\ndirectory (str): The name of the directory to create the temporary file in.\nprefix (str): The prefix to use for the temporary file.\neol (int): The line ending to use when writing.\n:class:`~apache_beam.io.textio_test.EOL` exposes attributes that can be\nused here to define the eol.\ncustom_delimiter (bytes): The custom delimiter.\nline_value (bytes): Default value for test data, default b'line'\n\nReturns:\nTuple[str, List[str]]: A tuple of the filename and a list of the\nutf-8 decoded written data.", "source": "github-repos"}
{"code": "def write_data(msg_type, profile_name, data, cfg):\n    if (profile_name not in cfg.data):\n        cfg.data[profile_name] = {}\n    cfg.data[profile_name][msg_type] = data", "docstring": "Write the settings into the data portion of the cfg.\n\nArgs:\n:msg_type: (str) message type to create config entry.\n:profile_name: (str) name of the profile entry\n:data: (dict) dict values for the 'settings'\n:cfg: (jsonconfig.Config) config instance.", "source": "codesearchnet"}
{"code": "def __init__(self, observ_shape, action_shape, min_duration, max_duration):\n    \n    self._observ_shape = observ_shape\n    self._action_shape = action_shape\n    self._min_duration = min_duration\n    self._max_duration = max_duration\n    self._random = np.random.RandomState(0)\n    self.steps = []\n    self.durations = []", "docstring": "Generate random agent input and keep track of statistics.\n\nArgs:\nobserv_shape: Shape for the random observations.\naction_shape: Shape for the action space.\nmin_duration: Minimum number of steps per episode.\nmax_duration: Maximum number of steps per episode.\n\nAttributes:\nsteps: List of actual simulated lengths for all episodes.\ndurations: List of decided lengths for all episodes.", "source": "juraj-google-style"}
{"code": "def set_xlim(self, xlims, dx, xscale, reverse=False):\n        \n        self._set_axis_limits('x', xlims, dx, xscale, reverse)\n        return", "docstring": "Set x limits for plot.\n\nThis will set the limits for the x axis\nfor the specific plot.\n\nArgs:\nxlims (len-2 list of floats): The limits for the axis.\ndx (float): Amount to increment by between the limits.\nxscale (str): Scale of the axis. Either `log` or `lin`.\nreverse (bool, optional): If True, reverse the axis tick marks. Default is False.", "source": "juraj-google-style"}
{"code": "def get(self, path, params=None, headers=None):\n        \n        response = requests.get(\n            self._url_for(path),\n            params=params,\n            headers=self._headers(headers)\n        )\n        self._handle_errors(response)\n        return response", "docstring": "Perform a GET request, optionally providing query-string params.\n\nArgs:\npath (str): A path that gets appended to ``base_url``.\nparams (dict, optional): Dictionary of param names to values.\n\nExample:\napi_client.get('/users', params={'active': True})\n\nReturns:\nA requests ``Response`` object.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n                 log_dir=DEFAULT_RESULTS_DIR,\n                 reload_interval=30,\n                 standalone=True,\n                 log_level=\"INFO\"):\n        \n        self.logger = self.init_logger(log_level)\n        self.standalone = standalone\n        self.collector = Collector(\n            reload_interval=reload_interval,\n            logdir=log_dir,\n            logger=self.logger)", "docstring": "Initialize the collector service.\n\nArgs:\nlog_dir (str): Directory of the logs about trials' information.\nreload_interval (int): Sleep time period after each polling round.\nstandalone (boolean): The service will not stop and if True.\nlog_level (str): Level of logging.", "source": "juraj-google-style"}
{"code": "def _filter_exception(self, ex):\n    if isinstance(ex, tuple):\n        ex2 = ex[1]\n    else:\n        ex2 = ex\n    if isinstance(ex2, self._clean_stop_exception_types):\n        ex = None\n    return ex", "docstring": "Check if the exception indicated in 'ex' should be ignored.\n\nThis method examines `ex` to check if it is an exception that should be\nreported to the users.  If yes, it returns `ex` as is, otherwise it returns\nNone.\n\nThe code returns None for exception types listed in\n`_clean_stop_exception_types`.\n\nArgs:\nex: None, an `Exception`, or a Python `exc_info` tuple as returned by\n`sys.exc_info()`.\n\nReturns:\nex or None.", "source": "github-repos"}
{"code": "def moments_of_masked_time_series(time_series_tensor, broadcast_mask):\n  \n  num_unmasked_entries = tf.cast(\n      tf.reduce_sum(input_tensor=tf.cast(~broadcast_mask, tf.int32), axis=-1),\n      time_series_tensor.dtype)\n\n  \n  mean = (tf.reduce_sum(input_tensor=tf.where(\n      broadcast_mask,\n      tf.zeros_like(time_series_tensor),\n      time_series_tensor), axis=-1) / num_unmasked_entries)\n  variance = (tf.reduce_sum(input_tensor=tf.where(\n      broadcast_mask,\n      tf.zeros_like(time_series_tensor),\n      (time_series_tensor - mean[..., tf.newaxis]) ** 2), axis=-1)\n              / num_unmasked_entries)\n  return mean, variance", "docstring": "Compute mean and variance, accounting for a mask.\n\nArgs:\ntime_series_tensor: float `Tensor` time series of shape\n`concat([batch_shape, [num_timesteps]])`.\nbroadcast_mask: bool `Tensor` of the same shape as `time_series`.\nReturns:\nmean: float `Tensor` of shape `batch_shape`.\nvariance: float `Tensor` of shape `batch_shape`.", "source": "juraj-google-style"}
{"code": "def _transpile_circuit(circuit_config_tuple):\n    \n    circuit, transpile_config = circuit_config_tuple\n\n    \n    if transpile_config.pass_manager:\n        pass_manager = transpile_config.pass_manager\n\n    elif transpile_config.coupling_map:\n        pass_manager = default_pass_manager(transpile_config.basis_gates,\n                                            transpile_config.coupling_map,\n                                            transpile_config.initial_layout,\n                                            transpile_config.seed_transpiler)\n    else:\n        pass_manager = default_pass_manager_simulator(transpile_config.basis_gates)\n\n    return pass_manager.run(circuit)", "docstring": "Select a PassManager and run a single circuit through it.\n\nArgs:\ncircuit_config_tuple (tuple):\ncircuit (QuantumCircuit): circuit to transpile\ntranspile_config (TranspileConfig): configuration dictating how to transpile\n\nReturns:\nQuantumCircuit: transpiled circuit", "source": "juraj-google-style"}
{"code": "def instantiate_resolver(self, name, args):\n    if (name not in self._known_resolvers):\n        raise ArgumentError('Attempting to instantiate unknown dependency resolver', name=name)\n    return self._known_resolvers[name](args)", "docstring": "Directly instantiate a dependency resolver by name with the given arguments\n\nArgs:\nname (string): The name of the class that we want to instantiate\nargs (dict): The arguments to pass to the resolver factory\n\nReturns:\nDependencyResolver", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    self._last_charset_attribute = 'ascii'\n\n    self._ParseHeader(parser_mediator, file_object)\n\n    data_dict = {}\n    time_dict = {}\n\n    try:\n      for name, value in self._ParseAttributesGroup(file_object):\n        name = self._ATTRIBUTE_NAME_TRANSLATION.get(name, name)\n\n        if name in self._DATE_TIME_VALUE_NAMES:\n          time_dict.setdefault(name, []).append(value)\n        else:\n          data_dict.setdefault(name, []).append(value)\n\n    except (ValueError, errors.ParseError) as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to parse attributes with error: {0!s}'.format(exception))\n      return\n\n    event_data = CupsIppEventData()\n    event_data.application = self._GetStringValue(data_dict, 'application')\n    event_data.computer_name = self._GetStringValue(data_dict, 'computer_name')\n    event_data.copies = data_dict.get('copies', [0])[0]\n    event_data.data_dict = data_dict\n    event_data.doc_type = self._GetStringValue(data_dict, 'doc_type')\n    event_data.job_id = self._GetStringValue(data_dict, 'job_id')\n    event_data.job_name = self._GetStringValue(data_dict, 'job_name')\n    event_data.user = self._GetStringValue(data_dict, 'user')\n    event_data.owner = self._GetStringValue(data_dict, 'owner')\n    event_data.printer_id = self._GetStringValue(data_dict, 'printer_id')\n    event_data.uri = self._GetStringValue(data_dict, 'uri')\n\n    for name, usage in iter(self._DATE_TIME_VALUES.items()):\n      for date_time in time_dict.get(name, []):\n        event = time_events.DateTimeValuesEvent(date_time, usage)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    for name, usage in iter(self._POSIX_TIME_VALUES.items()):\n      for time_value in time_dict.get(name, []):\n        date_time = dfdatetime_posix_time.PosixTime(timestamp=time_value)\n        event = time_events.DateTimeValuesEvent(date_time, usage)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a CUPS IPP file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def write_to_hdf5(self, filename_out, *args, **kwargs):\n        \n\n        print(\"[Filterbank] Warning: Non-standard function to write in HDF5 (.h5) format. Please use Waterfall.\")\n\n        if not HAS_HDF5:\n            raise RuntimeError(\"h5py package required for HDF5 output.\")\n\n        with h5py.File(filename_out, 'w') as h5:\n\n            dset = h5.create_dataset(b'data',\n                              data=self.data,\n                              compression='lzf')\n\n            dset_mask = h5.create_dataset(b'mask',\n                                     shape=self.data.shape,\n                                     compression='lzf',\n                                     dtype='uint8')\n\n            dset.dims[0].label = b\"frequency\"\n            dset.dims[1].label = b\"feed_id\"\n            dset.dims[2].label = b\"time\"\n\n            dset_mask.dims[0].label = b\"frequency\"\n            dset_mask.dims[1].label = b\"feed_id\"\n            dset_mask.dims[2].label = b\"time\"\n\n            \n            for key, value in self.header.items():\n                dset.attrs[key] = value", "docstring": "Write data to HDF5 file.\n\nArgs:\nfilename_out (str): Name of output file", "source": "juraj-google-style"}
{"code": "def shifted_centroid_distance(item_a, time_a, item_b, time_b, max_value):\n    \n    ax, ay = item_a.center_of_mass(time_a)\n    bx, by = item_b.center_of_mass(time_b)\n    if time_a < time_b:\n        bx = bx - item_b.u\n        by = by - item_b.v\n    else:\n        ax = ax - item_a.u\n        ay = ay - item_a.v\n    return np.minimum(np.sqrt((ax - bx) ** 2 + (ay - by) ** 2), max_value) / float(max_value)", "docstring": "Centroid distance with motion corrections.\n\nArgs:\nitem_a: STObject from the first set in ObjectMatcher\ntime_a: Time integer being evaluated\nitem_b: STObject from the second set in ObjectMatcher\ntime_b: Time integer being evaluated\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "juraj-google-style"}
{"code": "def insert_paulis(self, indices=None, paulis=None, pauli_labels=None):\n    if (pauli_labels is not None):\n        if (paulis is not None):\n            raise QiskitError('Please only provide either `paulis` or `pauli_labels`')\n        if isinstance(pauli_labels, str):\n            pauli_labels = list(pauli_labels)\n        paulis = Pauli.from_label(pauli_labels[::(- 1)])\n    if (indices is None):\n        self._z = np.concatenate((self._z, paulis.z))\n        self._x = np.concatenate((self._x, paulis.x))\n    else:\n        if (not isinstance(indices, list)):\n            indices = [indices]\n        self._z = np.insert(self._z, indices, paulis.z)\n        self._x = np.insert(self._x, indices, paulis.x)\n    return self", "docstring": "Insert or append pauli to the targeted indices.\n\nIf indices is None, it means append at the end.\n\nArgs:\nindices (list[int]): the qubit indices to be inserted\npaulis (Pauli): the to-be-inserted or appended pauli\npauli_labels (list[str]): the to-be-inserted or appended pauli label\n\nNote:\nthe indices refers to the localion of original paulis,\ne.g. if indices = [0, 2], pauli_labels = ['Z', 'I'] and original pauli = 'ZYXI'\nthe pauli will be updated to ZY'I'XI'Z'\n'Z' and 'I' are inserted before the qubit at 0 and 2.\n\nReturns:\nPauli: self\n\nRaises:\nQiskitError: provide both `paulis` and `pauli_labels` at the same time", "source": "codesearchnet"}
{"code": "def __init__(self, name, func):\n    \n    self._func = func\n    if name:\n      self._var_scope = None\n      self._name = name\n    else:\n      self._var_scope = tf.get_variable_scope()\n      self._name = None\n    self._reuse = None\n    self._stacktrace = traceback.format_stack()[:-3]", "docstring": "Creates a template for the given function.\n\nArgs:\nname: The variable_scope to use, if None the current scope is captured.\nfunc: The function to apply each time.", "source": "juraj-google-style"}
{"code": "def wait_for_capture(self, timeout=None):\n    raise NotImplementedError('Base class should not be called directly!')", "docstring": "This function waits for a capture to terminate and guarantees that\nthe capture is saved to the capture file configured during the\nstart_capture() method. Depending on the type of the sniffer the file\nmay previously contain partial results (e.g. for a local sniffer) or\nmay not exist until the stop_capture() method is executed (e.g. for a\nremote sniffer).\n\nDepending on the type/subtype and configuration of the sniffer the\ncapture may terminate on its own without requiring a call to this\nfunction. In such a case it is still necessary to call either this\nfunction or the stop_capture() function to make sure that the capture\nfile is moved to the correct location.\n\nArgs:\ntimeout: An integer specifying the number of seconds to wait for\nthe capture to terminate on its own. On expiration of the\ntimeout the sniffer is stopped explicitly using the\nstop_capture() function.\n\nRaises:\nNoPermissionError: No permission when trying to stop a capture and\nsave the capture file.", "source": "github-repos"}
{"code": "def execute(self, sensor_graph, scope_stack):\n        \n\n        parent = scope_stack[-1]\n        alloc = parent.allocator\n\n        trigger_stream, trigger_cond = parent.trigger_chain()\n        rpc_const = alloc.allocate_stream(DataStream.ConstantType, attach=True)\n        rpc_val = (self.slot_id.address << 16) | self.rpc_id\n\n        stream = self.stream\n        if stream is None:\n            stream = alloc.allocate_stream(DataStream.UnbufferedType)\n\n        sensor_graph.add_node(u\"({} {} && {} always) => {} using call_rpc\".format(trigger_stream, trigger_cond, rpc_const, stream))\n        sensor_graph.add_constant(rpc_const, rpc_val)", "docstring": "Execute this statement on the sensor_graph given the current scope tree.\n\nThis adds a single node to the sensor graph with the call_rpc function\nas is processing function.\n\nArgs:\nsensor_graph (SensorGraph): The sensor graph that we are building or\nmodifying\nscope_stack (list(Scope)): A stack of nested scopes that may influence\nhow this statement allocates clocks or other stream resources.", "source": "juraj-google-style"}
{"code": "def recursive_chmod(path, mode=0755):\n    \n    passwd_reader.set_permissions(path, mode=mode)\n    if os.path.isfile(path):\n        return\n\n    \n    for root, dirs, files in os.walk(path):\n        for fn in files + dirs:\n            passwd_reader.set_permissions(os.path.join(root, fn), mode=mode)", "docstring": "Recursively change ``mode`` for given ``path``. Same as ``chmod -R mode``.\n\nArgs:\npath (str): Path of the directory/file.\nmode (octal int, default 0755): New mode of the file.\n\nWarning:\nDon't forget to add ``0`` at the beginning of the numbers of `mode`, or\n`Unspeakable hOrRoRs` will be awaken from their unholy sleep outside of\nthe reality and they WILL eat your soul (and your files).", "source": "juraj-google-style"}
{"code": "def densifying_unary(func):\n\n    @functools.wraps(func)\n    def sparse_wrapper(x, *args, **kwargs):\n        if isinstance(x, jax_sparse.JAXSparse):\n            x = x.todense()\n        return func(x, *args, **kwargs)\n    return sparse_wrapper", "docstring": "Decorator to add support for `JAXSparse` tensors (including `BCOO`) to a\nnon-zero-preserving element-wise unary operator.\n\nThere are requirements on the operator for this decorator to work correctly:\n\n- The operator must be element-wise\n- The operator must be unary (one input tensor and one output tensor)\n- The operator must return a tensor of the same shape.\n\nAdditional arguments to the function (besides the input tensor) are\nsupported. The returned result is a dense tensor.\n\nArgs:\nfunc: The unary operator to wrap.\nReturns:\nWrapped function that supports `JAXSparse` tensors.", "source": "github-repos"}
{"code": "def _compile_function_expression(self,\n                                     expr: Expression,\n                                     scope: Dict[str, TensorFluent],\n                                     batch_size: Optional[int] = None,\n                                     noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:\n        \n        etype = expr.etype\n        args = expr.args\n\n        if len(args) == 1:\n\n            etype2func = {\n                'abs':    TensorFluent.abs,\n                'exp':    TensorFluent.exp,\n                'log':    TensorFluent.log,\n                'sqrt':   TensorFluent.sqrt,\n                'cos':    TensorFluent.cos,\n                'sin':    TensorFluent.sin,\n                'tan':    TensorFluent.tan,\n                'acos':   TensorFluent.acos,\n                'arccos': TensorFluent.acos,\n                'asin':   TensorFluent.asin,\n                'arcsin': TensorFluent.asin,\n                'atan':   TensorFluent.atan,\n                'arctan': TensorFluent.atan,\n                'round':  TensorFluent.round,\n                'ceil':   TensorFluent.ceil,\n                'floor':  TensorFluent.floor\n            }\n\n            if etype[1] not in etype2func:\n                raise ValueError('Invalid unary function expression:\\n{}'.format(expr))\n\n            op = etype2func[etype[1]]\n            x = self._compile_expression(args[0], scope, batch_size, noise)\n            fluent = op(x)\n\n        else:\n            etype2func = {\n                'pow': TensorFluent.pow,\n                'max': TensorFluent.max,\n                'min': TensorFluent.min\n            }\n\n            if etype[1] not in etype2func:\n                raise ValueError('Invalid binary function expression:\\n{}'.format(expr))\n\n            op = etype2func[etype[1]]\n            x = self._compile_expression(args[0], scope, batch_size, noise)\n            y = self._compile_expression(args[1], scope, batch_size, noise)\n            fluent = op(x, y)\n\n        return fluent", "docstring": "Compile a function expression `expr` into a TensorFluent\nin the given `scope` with optional batch size.\n\nArgs:\nexpr (:obj:`rddl2tf.expr.Expression`): A RDDL function expression.\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.\nbatch_size (Optional[size]): The batch size.\n\nReturns:\n:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.", "source": "juraj-google-style"}
{"code": "def _compare_versions(v1, v2):\n    if v1 == 'inf' and v2 == 'inf':\n        raise RuntimeError('Cannot compare `inf` to `inf`.')\n    rtn_dict = {'smaller': None, 'larger': None}\n    v1_list = v1.split('.')\n    v2_list = v2.split('.')\n    if v1_list[0] == 'inf':\n        v1_list[0] = str(int(v2_list[0]) + 1)\n    if v2_list[0] == 'inf':\n        v2_list[0] = str(int(v1_list[0]) + 1)\n    v_long = v1_list if len(v1_list) >= len(v2_list) else v2_list\n    v_short = v1_list if len(v1_list) < len(v2_list) else v2_list\n    larger, smaller = (None, None)\n    for i, ver in enumerate(v_short, start=0):\n        if int(ver) > int(v_long[i]):\n            larger = _list_to_string(v_short, '.')\n            smaller = _list_to_string(v_long, '.')\n        elif int(ver) < int(v_long[i]):\n            larger = _list_to_string(v_long, '.')\n            smaller = _list_to_string(v_short, '.')\n        elif i == len(v_short) - 1:\n            if v_long[i + 1:] == ['0'] * (len(v_long) - 1 - i):\n                larger = 'equal'\n                smaller = 'equal'\n            else:\n                larger = _list_to_string(v_long, '.')\n                smaller = _list_to_string(v_short, '.')\n        else:\n            pass\n        if larger:\n            break\n    rtn_dict['smaller'] = smaller\n    rtn_dict['larger'] = larger\n    return rtn_dict", "docstring": "Compare two versions and return information on which is smaller vs. larger.\n\nArgs:\nv1: String that is a version to be compared against `v2`.\nv2: String that is a version to be compared against `v1`.\n\nReturns:\nDict that stores larger version with key `larger` and smaller version with\nkey `smaller`.\ne.g. {`larger`: `1.5.0`, `smaller`: `1.2.0`}\n\nRaises:\nRuntimeError: If asked to compare `inf` to `inf`.", "source": "github-repos"}
{"code": "def set_extana_led(self, r, g, b, check_state=True):\n    (r, g, b) = map(int, [r, g, b])\n    if ((min([r, g, b]) < LED_MIN) or (max([r, g, b]) > LED_MAX)):\n        logger.warn('RGB channel values must be {}-{}'.format(LED_MIN, LED_MAX))\n        return False\n    if (check_state and ((r, g, b) == self.led_state)):\n        return True\n    (ir, ig, ib) = map((lambda x: int((x * (INT_LED_MAX / LED_MAX)))), [r, g, b])\n    val = struct.pack('<HHH', ir, ig, ib)\n    extana_led = self.get_characteristic_handle_from_uuid(UUID_EXTANA_LED)\n    if (extana_led is None):\n        logger.warn('Failed to find handle for ExtAna LED')\n        return None\n    if (not self.dongle._write_attribute(self.conn_handle, extana_led, val)):\n        return False\n    self.led_state = (r, g, b)\n    return True", "docstring": "Update the colour of the RGB LED on the SK8-ExtAna board.\n\nArgs:\nr (int): red channel, 0-255\ng (int): green channel, 0-255\nb (int): blue channel, 0-255\ncheck_state (bool): if True (default) and the locally cached LED state matches\nthe given (r, g, b) triplet, pysk8 will NOT send any LED update command to\nthe SK8. If you want to force the command to be sent even if the local state\nmatches the new colour, set this to False.\n\nReturns:\nTrue on success, False if an error occurred.", "source": "codesearchnet"}
{"code": "def __init__(self, lookup_map, fallback=None):\n    super().__init__()\n    if fallback is not None:\n        lookup_map['*'] = fallback\n    self._lookup_map = lookup_map", "docstring": "Create this visitor.\n\nYou're expected to then pass this instance to node.Visit().\n\nArgs:\nlookup_map: A map from names to symbol tables (i.e., objects that have a\n\"Lookup\" function).\nfallback: A symbol table to be tried if lookup otherwise fails.", "source": "github-repos"}
{"code": "def is_location(v) -> (bool, str):\n        \n\n        def convert2float(value):\n            try:\n                float_num = float(value)\n                return float_num\n            except ValueError:\n                return False\n\n        if not isinstance(v, str):\n            return False, v\n        split_lst = v.split(\":\")\n        if len(split_lst) != 5:\n            return False, v\n        if convert2float(split_lst[3]):\n            longitude = abs(convert2float(split_lst[3]))\n            if longitude > 90:\n                return False, v\n        if convert2float(split_lst[4]):\n            latitude = abs(convert2float(split_lst[3]))\n            if latitude > 180:\n                return False, v\n        return True, v", "docstring": "Boolean function for checking if v is a location format\n\nArgs:\nv:\nReturns: bool", "source": "juraj-google-style"}
{"code": "def _ParseDateTimeValue(self, byte_stream, file_offset):\n    datetime_value_map = self._GetDataTypeMap('cups_ipp_datetime_value')\n    try:\n        value = self._ReadStructureFromByteStream(byte_stream, file_offset, datetime_value_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse datetime value with error: {0!s}'.format(exception))\n    direction_from_utc = chr(value.direction_from_utc)\n    rfc2579_date_time_tuple = (value.year, value.month, value.day_of_month, value.hours, value.minutes, value.seconds, value.deciseconds, direction_from_utc, value.hours_from_utc, value.minutes_from_utc)\n    return dfdatetime_rfc2579_date_time.RFC2579DateTime(rfc2579_date_time_tuple=rfc2579_date_time_tuple)", "docstring": "Parses a CUPS IPP RFC2579 date-time value from a byte stream.\n\nArgs:\nbyte_stream (bytes): byte stream.\nfile_offset (int): offset of the attribute data relative to the start of\nthe file-like object.\n\nReturns:\ndfdatetime.RFC2579DateTime: RFC2579 date-time stored in the value.\n\nRaises:\nParseError: when the RFC2579 date-time value cannot be parsed.", "source": "codesearchnet"}
{"code": "def return_estimator(self):\n    estimator = self.base_learner_origin.return_estimator()\n    estimator = estimator.set_params(**self.hyperparameters)\n    return estimator", "docstring": "Returns base learner using its origin and the given hyperparameters\n\nReturns:\nest (estimator): Estimator object", "source": "codesearchnet"}
{"code": "def get_sailthru_client(site_code):\n    \n    \n    config = get_sailthru_configuration(site_code)\n\n    \n    if not config.get('SAILTHRU_ENABLE'):\n        msg = 'Sailthru is not enabled for site {}'.format(site_code)\n        log.debug(msg)\n        raise SailthruNotEnabled(msg)\n\n    \n    key = config.get('SAILTHRU_KEY')\n    secret = config.get('SAILTHRU_SECRET')\n\n    if not (key and secret):\n        msg = 'Both key and secret are required for site {}'.format(site_code)\n        log.error(msg)\n        raise ConfigurationError(msg)\n\n    return SailthruClient(key, secret)", "docstring": "Returns a Sailthru client for the specified site.\n\nArgs:\nsite_code (str): Site for which the client should be configured.\n\nReturns:\nSailthruClient\n\nRaises:\nSailthruNotEnabled: If Sailthru is not enabled for the specified site.\nConfigurationError: If either the Sailthru API key or secret are not set for the site.", "source": "juraj-google-style"}
{"code": "async def send_command(self, command, args, validator, timeout=10.0):\n    if (self._con is None):\n        raise ExternalError('No websock connection established')\n    cmd_uuid = str(uuid.uuid4())\n    msg = dict(type='command', operation=command, uuid=cmd_uuid, payload=args)\n    packed = pack(msg)\n    response_future = self._manager.wait_for(type='response', uuid=cmd_uuid, timeout=timeout)\n    (await self._con.send(packed))\n    response = (await response_future)\n    if (response.get('success') is False):\n        self._raise_error(command, response)\n    if (validator is None):\n        return response.get('payload')\n    return validator.verify(response.get('payload'))", "docstring": "Send a command and synchronously wait for a single response.\n\nArgs:\ncommand (string): The command name\nargs (dict): Optional arguments.\nvalidator (Verifier): A SchemaVerifier to verify the response\npayload.\ntimeout (float): The maximum time to wait for a response.\nDefaults to 10 seconds.\n\nReturns:\ndict: The response payload\n\nRaises:\nExternalError: If the server is not connected or the command\nfails.\nasyncio.TimeoutError: If the command times out.\nValidationError: If the response payload does not match the\ngiven validator.", "source": "codesearchnet"}
{"code": "def create_transfer_learning_tuner(parent, additional_parents=None, estimator=None, sagemaker_session=None):\n    parent_tuner = HyperparameterTuner.attach(tuning_job_name=parent, sagemaker_session=sagemaker_session)\n    return parent_tuner.transfer_learning_tuner(additional_parents=additional_parents, estimator=estimator)", "docstring": "Creates a new ``HyperParameterTuner`` by copying the request fields from the provided parent to the new instance\nof ``HyperparameterTuner`` followed by addition of warm start configuration with the type as \"TransferLearning\"\nand ``parents`` as the union of provided list of ``additional_parents`` and the ``parent``.\n\nArgs:\nparent (str): Primary parent tuning job's name from which the Tuner and Estimator configuration has to be copied\nadditional_parents (set{str}): Set of additional parent tuning job's names along with the primary parent tuning\njob name to be used in warm starting the identical dataset and algorithm tuner.\nestimator (sagemaker.estimator.EstimatorBase): An estimator object that has been initialized with\nthe desired configuration. There does not need to be a training job associated with this instance.\nsagemaker_session (sagemaker.session.Session): Session object which manages interactions with\nAmazon SageMaker APIs and any other AWS services needed. If not specified, one is created\nusing the default AWS configuration chain.\n\nReturns:\nsagemaker.tuner.HyperparameterTuner: New instance of warm started HyperparameterTuner", "source": "codesearchnet"}
{"code": "def _ExtractFileEntry(self, path_spec, destination_path, output_writer, skip_duplicates=True):\n    file_entry = path_spec_resolver.Resolver.OpenFileEntry(path_spec)\n    if (not file_entry):\n        logger.warning('Unable to open file entry for path spec: {0:s}'.format(path_spec.comparable))\n        return\n    if (not self._filter_collection.Matches(file_entry)):\n        return\n    file_entry_processed = False\n    for data_stream in file_entry.data_streams:\n        if self._abort:\n            break\n        self._ExtractDataStream(file_entry, data_stream.name, destination_path, output_writer, skip_duplicates=skip_duplicates)\n        file_entry_processed = True\n    if (not file_entry_processed):\n        self._ExtractDataStream(file_entry, '', destination_path, output_writer, skip_duplicates=skip_duplicates)", "docstring": "Extracts a file entry.\n\nArgs:\npath_spec (dfvfs.PathSpec): path specification of the source file.\ndestination_path (str): path where the extracted files should be stored.\noutput_writer (CLIOutputWriter): output writer.\nskip_duplicates (Optional[bool]): True if files with duplicate content\nshould be skipped.", "source": "codesearchnet"}
{"code": "def traverse_pagination(response, endpoint, content_filter_query, query_params):\n    results = response.get('results', [])\n    page = 1\n    while response.get('next'):\n        page += 1\n        response = endpoint().post(content_filter_query, **dict(query_params, page=page))\n        results += response.get('results', [])\n    return results", "docstring": "Traverse a paginated API response and extracts and concatenates \"results\" returned by API.\n\nArguments:\nresponse (dict): API response object.\nendpoint (Slumber.Resource): API endpoint object.\ncontent_filter_query (dict): query parameters used to filter catalog results.\nquery_params (dict): query parameters used to paginate results.\n\nReturns:\nlist: all the results returned by the API.", "source": "codesearchnet"}
{"code": "def _tf_assert_stmt(expression1, expression2):\n    expression2_tensors = expression2()\n    if not isinstance(expression2_tensors, list):\n        expression2_tensors = [expression2_tensors]\n    return control_flow_assert.Assert(expression1, expression2_tensors)", "docstring": "Overload of assert_stmt that stages a TF Assert.\n\nThis implementation deviates from Python semantics as follows:\n(1) the assertion is verified regardless of the state of __debug__\n(2) on assertion failure, the graph execution will fail with\ntensorflow.errors.ValueError, rather than AssertionError.\n\nArgs:\nexpression1: tensorflow.Tensor, must evaluate to a tf.bool scalar\nexpression2: Callable[[], Union[tensorflow.Tensor, List[tensorflow.Tensor]]]\n\nReturns:\ntensorflow.Operation", "source": "github-repos"}
{"code": "def _do_policy_eval(tf_sess, to_eval, policies, active_episodes):\n    eval_results = {}\n    if tf_sess:\n        builder = TFRunBuilder(tf_sess, 'policy_eval')\n        pending_fetches = {}\n    else:\n        builder = None\n    if log_once('compute_actions_input'):\n        logger.info('Inputs to compute_actions():\\n\\n{}\\n'.format(summarize(to_eval)))\n    for (policy_id, eval_data) in to_eval.items():\n        rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data])\n        policy = _get_or_raise(policies, policy_id)\n        if (builder and (policy.compute_actions.__code__ is TFPolicyGraph.compute_actions.__code__)):\n            pending_fetches[policy_id] = policy._build_compute_actions(builder, [t.obs for t in eval_data], rnn_in_cols, prev_action_batch=[t.prev_action for t in eval_data], prev_reward_batch=[t.prev_reward for t in eval_data])\n        else:\n            eval_results[policy_id] = policy.compute_actions([t.obs for t in eval_data], rnn_in_cols, prev_action_batch=[t.prev_action for t in eval_data], prev_reward_batch=[t.prev_reward for t in eval_data], info_batch=[t.info for t in eval_data], episodes=[active_episodes[t.env_id] for t in eval_data])\n    if builder:\n        for (k, v) in pending_fetches.items():\n            eval_results[k] = builder.get(v)\n    if log_once('compute_actions_result'):\n        logger.info('Outputs of compute_actions():\\n\\n{}\\n'.format(summarize(eval_results)))\n    return eval_results", "docstring": "Call compute actions on observation batches to get next actions.\n\nReturns:\neval_results: dict of policy to compute_action() outputs.", "source": "codesearchnet"}
{"code": "def start_upsert(ini_data):\n    stack_driver = CloudStackUtility(ini_data)\n    poll_stack = (not ini_data.get('no_poll', False))\n    if stack_driver.upsert():\n        logging.info('stack create/update was started successfully.')\n        if poll_stack:\n            stack_tool = None\n            try:\n                profile = ini_data.get('environment', {}).get('profile')\n                if profile:\n                    boto3_session = boto3.session.Session(profile_name=profile)\n                else:\n                    boto3_session = boto3.session.Session()\n                region = ini_data['environment']['region']\n                stack_name = ini_data['environment']['stack_name']\n                cf_client = stack_driver.get_cloud_formation_client()\n                if (not cf_client):\n                    cf_client = boto3_session.client('cloudformation', region_name=region)\n                stack_tool = stack_tool = StackTool(stack_name, region, cf_client)\n            except Exception as wtf:\n                logging.warning('there was a problems creating stack tool: {}'.format(wtf))\n            if stack_driver.poll_stack():\n                try:\n                    logging.info('stack create/update was finished successfully.')\n                    stack_tool.print_stack_info()\n                except Exception as wtf:\n                    logging.warning('there was a problems printing stack info: {}'.format(wtf))\n                sys.exit(0)\n            else:\n                try:\n                    logging.error('stack create/update was did not go well.')\n                    stack_tool.print_stack_events()\n                except Exception as wtf:\n                    logging.warning('there was a problems printing stack events: {}'.format(wtf))\n                sys.exit(1)\n    else:\n        logging.error('start of stack create/update did not go well.')\n        sys.exit(1)", "docstring": "Helper function to facilitate upsert.\n\nArgs:\nini_date - the dictionary of info to run upsert\n\nExit:\n0 - good\n1 - bad", "source": "codesearchnet"}
{"code": "def clean_code(content: str) -> str:\n    splits = content.split('\"\"\"')\n    content = ''.join(splits[::2])\n    splits = content.split(\"'''\")\n    content = ''.join(splits[::2])\n    lines_to_keep = []\n    for line in content.split('\\n'):\n        line = re.sub('\n        if len(line) != 0 and (not line.isspace()):\n            lines_to_keep.append(line)\n    return '\\n'.join(lines_to_keep)", "docstring": "Remove docstrings, empty line or comments from some code (used to detect if a diff is real or only concern\ncomments or docstings).\n\nArgs:\ncontent (`str`): The code to clean\n\nReturns:\n`str`: The cleaned code.", "source": "github-repos"}
{"code": "def map_fn(fn, elems, name=None, dtype=None):\n    return map_fn_lib.map_fn(fn, elems, name=name, dtype=dtype)", "docstring": "Map the function fn over the elements elems and return the outputs.\n\nArgs:\nfn: Callable that will be called upon each element in elems\nelems: tensor\nname: A string name for the map node in the graph\ndtype: Output data type.\n\nReturns:\nTensor with dtype `dtype`.", "source": "github-repos"}
{"code": "def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001):\n    if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:\n        if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:\n            return _broadcast_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=epsilon)\n        return _fused_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=epsilon)\n    elif sorted(reduction_axes) == list(range(ndim(x)))[:-1]:\n        return _regular_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=epsilon)\n    else:\n        return _broadcast_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=epsilon)", "docstring": "Computes mean and std for batch then apply batch_normalization on batch.\n\nArgs:\nx: Input tensor or variable.\ngamma: Tensor by which to scale the input.\nbeta: Tensor with which to center the input.\nreduction_axes: iterable of integers,\naxes over which to normalize.\nepsilon: Fuzz factor.\n\nReturns:\nA tuple length of 3, `(normalized_tensor, mean, variance)`.", "source": "github-repos"}
{"code": "def _partitioner(shape, dtype):\n    if not isinstance(shape, tensor_shape.TensorShape):\n        raise ValueError(f'shape is not a TensorShape: {shape}')\n    if not shape.is_fully_defined():\n        raise ValueError(f'shape is not fully defined: {shape}')\n    dtype = dtypes.as_dtype(dtype)\n    if dtype.base_dtype == dtypes.string:\n        element_size = bytes_per_string_element\n    else:\n        element_size = dtype.size\n    partitions = [1] * shape.ndims\n    bytes_per_slice = 1.0 * (shape.num_elements() / shape.dims[axis].value) * element_size\n    slices_per_shard = max(1, math.floor(max_shard_bytes / bytes_per_slice))\n    axis_shards = int(math.ceil(1.0 * shape.dims[axis].value / slices_per_shard))\n    if max_shards:\n        axis_shards = min(max_shards, axis_shards)\n    partitions[axis] = axis_shards\n    return partitions", "docstring": "Partitioner that partitions shards to have max_shard_bytes total size.\n\nArgs:\nshape: A `TensorShape`.\ndtype: A `DType`.\n\nReturns:\nA tuple representing how much to slice each axis in shape.\n\nRaises:\nValueError: If shape is not a fully defined `TensorShape` or dtype is not\na `DType`.", "source": "github-repos"}
{"code": "def while_loop_op(op):\n    return control_flow_util.IsLoopSwitch(op) or control_flow_util.IsLoopMerge(op) or control_flow_util.IsLoopEnter(op) or control_flow_util.IsLoopExit(op) or TensorTracer.loop_cond_op(op) or (op.type in ('RefNextIteration', 'NextIteration'))", "docstring": "Returns true if op is one of the special ops of in a while loop.\n\nArgs:\nop: A tf.Operation.\n\nReturns:\nTrue if the given op is one of [Switch, Merge, Enter, Exit,\nNextIteration, LoopCond], which are all building blocks for TF while\nloops.", "source": "github-repos"}
{"code": "def get_subgraph_for_concept_pair(\n        self, source: str, target: str, cutoff: Optional[int] = None\n    ):\n        \n        paths = nx.all_simple_paths(self, source, target, cutoff=cutoff)\n        return AnalysisGraph(self.subgraph(set(chain.from_iterable(paths))))", "docstring": "Get subgraph comprised of simple paths between the source and the\ntarget.\n\nArgs:\nsource\ntarget\ncutoff", "source": "juraj-google-style"}
{"code": "def get(self, addresses):\n    with self._lock:\n        results = []\n        for add in addresses:\n            self.validate_read(add)\n            results.append(self._get(add))\n        return results", "docstring": "Returns the value in this context, or None, for each address in\naddresses. Useful for gets on the context manager.\n\nArgs:\naddresses (list of str): The addresses to return values for, if\nwithin this context.\n\nReturns:\nresults (list of bytes): The values in state for these addresses.", "source": "codesearchnet"}
{"code": "def fit_transform_table(self, table, table_meta, transformer_dict=None, transformer_list=None, missing=None):\n    if (missing is None):\n        missing = self.missing\n    else:\n        self.missing = missing\n        warnings.warn(DEPRECATION_MESSAGE.format('fit_transform_table'), DeprecationWarning)\n    result = pd.DataFrame()\n    table_name = table_meta['name']\n    for field in table_meta['fields']:\n        col_name = field['name']\n        if transformer_list:\n            for transformer_name in transformer_list:\n                if (field['type'] == self.get_class(transformer_name).type):\n                    transformed = self._fit_transform_column(table, field, transformer_name, table_name)\n                    result = pd.concat([result, transformed], axis=1)\n        elif ((table_name, col_name) in transformer_dict):\n            transformer_name = TRANSFORMERS[transformer_dict[(table_name, col_name)]]\n            transformed = self._fit_transform_column(table, field, transformer_name, table_name)\n            result = pd.concat([result, transformed], axis=1)\n    return result", "docstring": "Create, apply and store the specified transformers for `table`.\n\nArgs:\ntable(pandas.DataFrame):    Contents of the table to be transformed.\n\ntable_meta(dict):   Metadata for the given table.\n\ntransformer_dict(dict):     Mapping  `tuple(str, str)` -> `str` where the tuple in the\nkeys represent the (table_name, column_name) and the value\nthe name of the assigned transformer.\n\ntransformer_list(list):     List of transformers to use. Overrides the transformers in\nthe meta_file.\n\nmissing(bool):      Wheter or not use NullTransformer to handle missing values.\n\nReturns:\npandas.DataFrame: Transformed table.", "source": "codesearchnet"}
{"code": "def check_filepath(self, path, filename):\n        \n        settings_path = os.path.join(path, filename)\n\n        if not os.path.exists(settings_path) or \\\n           not os.path.isfile(settings_path):\n            msg = \"Unable to find settings file: {}\"\n            raise SettingsBackendError(msg.format(settings_path))\n\n        return settings_path", "docstring": "Check and return the final filepath to settings\n\nArgs:\npath (str): Directory path where to search for settings file.\nfilename (str): Filename to use to search for settings file.\n\nRaises:\nboussole.exceptions.SettingsBackendError: If determined filepath\ndoes not exists or is a directory.\n\nReturns:\nstring: Settings file path, joining given path and filename.", "source": "juraj-google-style"}
{"code": "def getParameter(self, name):\n    return lock_and_call((lambda : Parameter(self._impl.getParameter(name))), self._lock)", "docstring": "Get the parameter with the corresponding name.\n\nArgs:\nname: Name of the parameter to be found.\n\nRaises:\nTypeError: if the specified parameter does not exist.", "source": "codesearchnet"}
{"code": "def predict_image(img, model_func):\n    \n\n    orig_shape = img.shape[:2]\n    resizer = CustomResize(cfg.PREPROC.TEST_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE)\n    resized_img = resizer.augment(img)\n    scale = np.sqrt(resized_img.shape[0] * 1.0 / img.shape[0] * resized_img.shape[1] / img.shape[1])\n    boxes, probs, labels, *masks = model_func(resized_img)\n    boxes = boxes / scale\n    \n    boxes = clip_boxes(boxes, orig_shape)\n\n    if masks:\n        \n        full_masks = [_paste_mask(box, mask, orig_shape)\n                      for box, mask in zip(boxes, masks[0])]\n        masks = full_masks\n    else:\n        \n        masks = [None] * len(boxes)\n\n    results = [DetectionResult(*args) for args in zip(boxes, probs, labels.tolist(), masks)]\n    return results", "docstring": "Run detection on one image, using the TF callable.\nThis function should handle the preprocessing internally.\n\nArgs:\nimg: an image\nmodel_func: a callable from the TF model.\nIt takes image and returns (boxes, probs, labels, [masks])\n\nReturns:\n[DetectionResult]", "source": "juraj-google-style"}
{"code": "def std(x, axis=None, keepdims=False):\n    if any_symbolic_tensors((x,)):\n        return Std(axis=axis, keepdims=keepdims).symbolic_call(x)\n    return backend.numpy.std(x, axis=axis, keepdims=keepdims)", "docstring": "Compute the standard deviation along the specified axis.\n\nArgs:\nx: Input tensor.\naxis: Axis along which to compute standard deviation.\nDefault is to compute the standard deviation of the\nflattened tensor.\nkeepdims: If this is set to `True`, the axes which are reduced are left\nin the result as dimensions with size one.\n\nReturns:\nOutput tensor containing the standard deviation values.", "source": "github-repos"}
{"code": "def affine_transform(self, image: np.array, center: Tuple[float], scale: Tuple[float], rotation: float, size: Dict[str, int], data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.array:\n    data_format = input_data_format if data_format is None else data_format\n    size = (size['width'], size['height'])\n    transformation = get_warp_matrix(rotation, center * 2.0, np.array(size) - 1.0, scale * 200.0)\n    image = image if input_data_format == ChannelDimension.LAST else to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format)\n    image = scipy_warp_affine(src=image, M=transformation, size=(size[1], size[0]))\n    image = to_channel_dimension_format(image, data_format, ChannelDimension.LAST)\n    return image", "docstring": "Apply an affine transformation to an image.\n\nArgs:\nimage (`np.array`):\nImage to transform.\ncenter (`Tuple[float]`):\nCenter of the bounding box (x, y).\nscale (`Tuple[float]`):\nScale of the bounding box with respect to height/width.\nrotation (`float`):\nRotation angle in degrees.\nsize (`Dict[str, int]`):\nSize of the destination image.\ndata_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):\nThe channel dimension format of the output image.\ninput_data_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the input image.", "source": "github-repos"}
{"code": "def received(self, messages):\n    if messages:\n        if self._queue:\n            self._queue.put_nowait(messages)\n        if self._callback:\n            self._callback(messages)", "docstring": "Called when new messages arrive.\n\nArgs:\nmessages (tuple): Messages", "source": "codesearchnet"}
{"code": "def __init__(self, channel, pin):\n        \n\n        self._channel = None\n        self._pin = None\n        self._open(channel, pin)", "docstring": "Instantiate a PWM object and open the sysfs PWM corresponding to the\nspecified channel and pin.\n\nArgs:\nchannel (int): Linux channel number.\npin (int): Linux pin number.\n\nReturns:\nPWM: PWM object.\n\nRaises:\nPWMError: if an I/O or OS error occurs.\nTypeError: if `channel` or `pin` types are invalid.\nValueError: if PWM channel does not exist.", "source": "juraj-google-style"}
{"code": "def make_innermost_setter(setter):\n  \n\n  @functools.wraps(setter)\n  def _new_setter(kernel_results, *args, **kwargs):\n    \n    results_stack = []\n    while hasattr(kernel_results, 'inner_results'):\n      results_stack.append(kernel_results)\n      kernel_results = kernel_results.inner_results\n\n    new_kernel_results = setter(kernel_results, *args, **kwargs)\n    for outer_results in reversed(results_stack):\n      new_kernel_results = outer_results._replace(\n          inner_results=new_kernel_results)\n\n    return new_kernel_results\n\n  return _new_setter", "docstring": "Wraps a setter so it applies to the inner-most results in `kernel_results`.\n\nThe wrapped setter unwraps `kernel_results` and applies `setter` to the first\nresults without an `inner_results` attribute.\n\nArgs:\nsetter: A callable that takes the kernel results as well as some `*args` and\n`**kwargs` and returns a modified copy of those kernel results.\n\nReturns:\nnew_setter: A wrapped `setter`.", "source": "juraj-google-style"}
{"code": "def _CreateShapePointFolder(self, shapes_folder, shape):\n    folder_name = (shape.shape_id + ' Shape Points')\n    folder = self._CreateFolder(shapes_folder, folder_name, visible=False)\n    for (index, (lat, lon, dist)) in enumerate(shape.points):\n        placemark = self._CreatePlacemark(folder, str((index + 1)))\n        point = ET.SubElement(placemark, 'Point')\n        coordinates = ET.SubElement(point, 'coordinates')\n        coordinates.text = ('%.6f,%.6f' % (lon, lat))\n    return folder", "docstring": "Create a KML Folder containing all the shape points in a shape.\n\nThe folder contains placemarks for each shapepoint.\n\nArgs:\nshapes_folder: A KML Shape Folder ElementTree.Element instance\nshape: The shape to plot.\n\nReturns:\nThe Folder ElementTree.Element instance or None.", "source": "codesearchnet"}
{"code": "def plot_brillouin_zone(bz_lattice, lines=None, labels=None, kpoints=None, fold=False, coords_are_cartesian=False, ax=None, **kwargs):\n    (fig, ax) = plot_lattice_vectors(bz_lattice, ax=ax)\n    plot_wigner_seitz(bz_lattice, ax=ax)\n    if (lines is not None):\n        for line in lines:\n            plot_path(line, bz_lattice, coords_are_cartesian=coords_are_cartesian, ax=ax)\n    if (labels is not None):\n        plot_labels(labels, bz_lattice, coords_are_cartesian=coords_are_cartesian, ax=ax)\n        plot_points(labels.values(), bz_lattice, coords_are_cartesian=coords_are_cartesian, fold=False, ax=ax)\n    if (kpoints is not None):\n        plot_points(kpoints, bz_lattice, coords_are_cartesian=coords_are_cartesian, ax=ax, fold=fold)\n    ax.set_xlim3d((- 1), 1)\n    ax.set_ylim3d((- 1), 1)\n    ax.set_zlim3d((- 1), 1)\n    ax.set_aspect('equal')\n    ax.axis('off')\n    return fig", "docstring": "Plots a 3D representation of the Brillouin zone of the structure.\nCan add to the plot paths, labels and kpoints\n\nArgs:\nbz_lattice: Lattice object of the Brillouin zone\nlines: list of lists of coordinates. Each list represent a different path\nlabels: dict containing the label as a key and the coordinates as value.\nkpoints: list of coordinates\nfold: whether the points should be folded inside the first Brillouin Zone.\nDefaults to False. Requires lattice if True.\ncoords_are_cartesian: Set to True if you are providing\ncoordinates in cartesian coordinates. Defaults to False.\nax: matplotlib :class:`Axes` or None if a new figure should be created.\nkwargs: provided by add_fig_kwargs decorator\n\nReturns:\nmatplotlib figure", "source": "codesearchnet"}
{"code": "def is_user_in_group(self, user, group):\n    search_url = ('%s/%s/%s/%s/%s' % (self.url, 'group', group, 'user', user))\n    response = self.jss.get(search_url)\n    length = len(response)\n    result = False\n    if (length == 1):\n        pass\n    elif (length == 2):\n        if (response.findtext('ldap_user/username') == user):\n            if (response.findtext('ldap_user/is_member') == 'Yes'):\n                result = True\n    elif (len(response) >= 2):\n        raise JSSGetError('Unexpected response.')\n    return result", "docstring": "Test for whether a user is in a group.\n\nThere is also the ability in the API to test for whether\nmultiple users are members of an LDAP group, but you should just\ncall is_user_in_group over an enumerated list of users.\n\nArgs:\nuser: String username.\ngroup: String group name.\n\nReturns bool.", "source": "codesearchnet"}
{"code": "def as_bytes(bytes_or_text, encoding='utf-8'):\n    encoding = codecs.lookup(encoding).name\n    if isinstance(bytes_or_text, bytearray):\n        return bytes(bytes_or_text)\n    elif isinstance(bytes_or_text, str):\n        return bytes_or_text.encode(encoding)\n    elif isinstance(bytes_or_text, bytes):\n        return bytes_or_text\n    else:\n        raise TypeError('Expected binary or unicode string, got %r' % (bytes_or_text,))", "docstring": "Converts `bytearray`, `bytes`, or unicode python input types to `bytes`.\n\nUses utf-8 encoding for text by default.\n\nArgs:\nbytes_or_text: A `bytearray`, `bytes`, `str`, or `unicode` object.\nencoding: A string indicating the charset for encoding unicode.\n\nReturns:\nA `bytes` object.\n\nRaises:\nTypeError: If `bytes_or_text` is not a binary or unicode string.", "source": "github-repos"}
{"code": "def resize_image(image, tuple_wh, preserve_aspect=True):\n    if preserve_aspect:\n        img_cpy = image.copy()\n        img_cpy.thumbnail(tuple_wh)\n        return img_cpy\n    else:\n        return image.resize(tuple_wh)", "docstring": "Resizes an instance of a PIL Image.\n\nIn order to prevent un-intended side effects,\nthis function always returns a copy of the image,\nas the resize function from PIL returns a copy\nbut the thumbnail function does not.\n\nArgs:\nimage: An instance of a PIL Image.\ntuple_wh: A tuple containing the (width, height) for resizing.\npreserve_aspect: A boolean that determines whether or not the\nresizing should preserve the image's aspect ratio.\n\nReturns: A resized copy of the provided PIL image.", "source": "codesearchnet"}
{"code": "def _get_nn_shell_info(self, structure, all_nn_info, site_idx, shell, _previous_steps=frozenset(), _cur_image=(0, 0, 0)):\n    if (shell <= 0):\n        raise ValueError('Shell must be positive')\n    _previous_steps = _previous_steps.union({(site_idx, _cur_image)})\n    possible_steps = list(all_nn_info[site_idx])\n    for (i, step) in enumerate(possible_steps):\n        step = dict(step)\n        step['image'] = tuple(np.add(step['image'], _cur_image).tolist())\n        possible_steps[i] = step\n    allowed_steps = [x for x in possible_steps if ((x['site_index'], x['image']) not in _previous_steps)]\n    if (shell == 1):\n        return allowed_steps\n    else:\n        terminal_neighbors = [self._get_nn_shell_info(structure, all_nn_info, x['site_index'], (shell - 1), _previous_steps, x['image']) for x in allowed_steps]\n        all_sites = dict()\n        for (first_site, term_sites) in zip(allowed_steps, terminal_neighbors):\n            for term_site in term_sites:\n                key = (term_site['site_index'], tuple(term_site['image']))\n                term_site['weight'] *= first_site['weight']\n                value = all_sites.get(key)\n                if (value is not None):\n                    value['weight'] += term_site['weight']\n                else:\n                    value = term_site\n                all_sites[key] = value\n    return list(all_sites.values())", "docstring": "Private method for computing the neighbor shell information\n\nArgs:\nstructure (Structure) - Structure being assessed\nall_nn_info ([[dict]]) - Results from `get_all_nn_info`\nsite_idx (int) - index of site for which to determine neighbor\ninformation.\nshell (int) - Which neighbor shell to retrieve (1 == 1st NN shell)\n_previous_step ({(site_idx, image}) - Internal use only: Set of\nsites that have already been traversed.\n_cur_image (tuple) - Internal use only Image coordinates of current atom\nReturns:\nlist of dictionaries. Each entry in the list is information about\na certain neighbor in the structure, in the same format as\n`get_nn_info`. Does not update the site positions", "source": "codesearchnet"}
{"code": "def constant(name, shape, value=0, dtype=tf.sg_floatx, summary=True, regularizer=None, trainable=True):\n    shape = (shape if isinstance(shape, (tuple, list)) else [shape])\n    x = tf.get_variable(name, shape, dtype=dtype, initializer=tf.constant_initializer(value), regularizer=regularizer, trainable=trainable)\n    if summary:\n        tf.sg_summary_param(x)\n    return x", "docstring": "r\"\"\"Creates a tensor variable of which initial values are `value` and shape is `shape`.\n\nArgs:\nname: The name of new variable.\nshape: A tuple/list of integers or an integer.\nIf shape is an integer, it is converted to a list.\nvalue: A Python scalar. All elements of the initialized variable\nwill be set to this value. Default is 0.\ndtype: The data type. Only floating point types are supported. Default is float32.\nsummary: If True, add this constant to tensor board summary.\nregularizer:  A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable\nwill be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization\ntrainable: If True, add this constant to trainable collection. Default is True.\n\nReturns:\nA `Variable`.", "source": "codesearchnet"}
{"code": "def get_package_install_path(self, path):\n        \n        from rez.package_repository import package_repository_manager\n\n        pkg_repo = package_repository_manager.get_repository(path)\n\n        return pkg_repo.get_package_payload_path(\n            package_name=self.package.name,\n            package_version=self.package.version\n        )", "docstring": "Return the installation path for a package (where its payload goes).\n\nArgs:\npath (str): Package repository path.", "source": "juraj-google-style"}
{"code": "def memory_read32(self, addr, num_words, zone=None):\n        \n        return self.memory_read(addr, num_words, zone=zone, nbits=32)", "docstring": "Reads memory from the target system in units of 32-bits.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): start address to read from\nnum_words (int): number of words to read\nzone (str): memory zone to read from\n\nReturns:\nList of words read from the target system.\n\nRaises:\nJLinkException: if memory could not be read", "source": "juraj-google-style"}
{"code": "def send_update(url_id, dataset):\n    data = _convert_to_seeder_format(dataset)\n    if (not data):\n        return\n    try:\n        _send_request(url_id, json=data, req_type=requests.patch)\n    except Exception as e:\n        sys.stderr.write('Seeder PATCH error: ')\n        sys.stderr.write(str(e.message))\n        return None", "docstring": "Send request to Seeder's API with data changed by user.\n\nArgs:\nurl_id (str): ID used as identification in Seeder.\ndataset (dict): WA-KAT dataset sent from frontend.", "source": "codesearchnet"}
{"code": "def add_tags(self, ID3=None):\n    if (ID3 is None):\n        ID3 = self.ID3\n    if (self.tags is None):\n        self.ID3 = ID3\n        self.tags = ID3()\n    else:\n        raise error('an ID3 tag already exists')", "docstring": "Add an empty ID3 tag to the file.\n\nArgs:\nID3 (ID3): An ID3 subclass to use or `None` to use the one\nthat used when loading.\n\nA custom tag reader may be used in instead of the default\n`ID3` object, e.g. an `mutagen.easyid3.EasyID3` reader.", "source": "codesearchnet"}
{"code": "def self_adjoint_eigvals(tensor, name=None):\n    e, _ = gen_linalg_ops.self_adjoint_eig_v2(tensor, compute_v=False, name=name)\n    return e", "docstring": "Computes the eigenvalues of one or more self-adjoint matrices.\n\nNote: If your program backpropagates through this function, you should replace\nit with a call to tf.linalg.eigh (possibly ignoring the second output) to\navoid computing the eigen decomposition twice. This is because the\neigenvectors are used to compute the gradient w.r.t. the eigenvalues. See\n_SelfAdjointEigV2Grad in linalg_grad.py.\n\nArgs:\ntensor: `Tensor` of shape `[..., N, N]`.\nname: string, optional name of the operation.\n\nReturns:\ne: Eigenvalues. Shape is `[..., N]`. The vector `e[..., :]` contains the `N`\neigenvalues of `tensor[..., :, :]`.", "source": "github-repos"}
{"code": "def _benchmarkFetch(self, name, target, size, iters):\n    times = []\n    with ops.Graph().as_default():\n        v = variables.Variable(random_ops.random_normal([size]))\n        with session.Session(target) as sess:\n            sess.run(v.initializer)\n            sess.run(v)\n            for _ in range(iters):\n                start_time = time.time()\n                sess.run(v)\n                end_time = time.time()\n                times.append(end_time - start_time)\n    print('%s %d %f' % (name, size, np.median(times)))\n    self.report_benchmark(iters=1, wall_time=np.median(times), name=name)", "docstring": "Runs a microbenchmark to measure the cost of fetching a tensor.\n\nReports the median cost of fetching a tensor of `size` * `sizeof(float)`\nbytes.\n\nArgs:\nname: A human-readable name for logging the output.\ntarget: The session target to use for the benchmark.\nsize: The number of floating-point numbers to be fetched.\niters: The number of iterations to perform.", "source": "github-repos"}
{"code": "def update_configuration(self, did, wid, eid, payload):\n    req_headers = {'Accept': 'application/vnd.onshape.v1+json', 'Content-Type': 'application/json'}\n    res = self._api.request('post', (((((('/api/partstudios/d/' + did) + '/w/') + wid) + '/e/') + eid) + '/configuration'), body=payload, headers=req_headers)\n    return res", "docstring": "Update the configuration specified in the payload\n\nArgs:\n- did (str): Document ID\n- eid (str): Element ID\n- payload (json): the request body\nReturns:\n- configuration (str): the url-ready configuration string.", "source": "codesearchnet"}
{"code": "def patch(make_pool=_default_make_pool):\n    setattr(httplib2, '_HttpOriginal', httplib2.Http)\n    httplib2.Http = Http\n    Http._make_pool = make_pool", "docstring": "Monkey-patches httplib2.Http to be httplib2shim.Http.\n\nThis effectively makes all clients of httplib2 use urlilb3. It's preferable\nto specify httplib2shim.Http explicitly where you can, but this can be\nuseful in situations where you do not control the construction of the http\nobject.\n\nArgs:\nmake_pool: A function that returns a urllib3.Pool-like object. This\nallows you to specify special arguments to your connection pool if\nneeded. By default, this will create a urllib3.PoolManager with\nSSL verification enabled using the certifi certificates.", "source": "codesearchnet"}
{"code": "def __init__(self, namespace: str, prefix: str=''):\n    if prefix:\n        prefix = f'{prefix}_'\n    self._inference_counter = beam.metrics.Metrics.counter(namespace, prefix + 'num_inferences')\n    self.failed_batches_counter = beam.metrics.Metrics.counter(namespace, prefix + 'failed_batches_counter')\n    self._inference_request_batch_size = beam.metrics.Metrics.distribution(namespace, prefix + 'inference_request_batch_size')\n    self._inference_request_batch_byte_size = beam.metrics.Metrics.distribution(namespace, prefix + 'inference_request_batch_byte_size')\n    self._inference_batch_latency_micro_secs = beam.metrics.Metrics.distribution(namespace, prefix + 'inference_batch_latency_micro_secs')\n    self._model_byte_size = beam.metrics.Metrics.distribution(namespace, prefix + 'model_byte_size')\n    self._load_model_latency_milli_secs = beam.metrics.Metrics.distribution(namespace, prefix + 'load_model_latency_milli_secs')\n    self._load_model_latency_milli_secs_cache = None\n    self._model_byte_size_cache = None", "docstring": "Args:\nnamespace: Namespace for the metrics.\nprefix: Unique identifier for metrics, used when models\nare updated using side input.", "source": "github-repos"}
{"code": "def csv(self, ondemand=False):\n        \n        self._request_uri = '{}/{}'.format(self._api_uri, 'csv')\n        self._stream = True\n        if ondemand:\n            self._request.add_payload('runNow', True)", "docstring": "Update request URI to return CSV data.\n\nFor onDemand bulk generation to work it must first be enabled in the\nThreatConnect platform under System settings.\n\nArgs:\nondemand (boolean): Enable on demand bulk generation.", "source": "juraj-google-style"}
{"code": "def _build_ring_scatter(pred_by_s_d, rank_by_s_d, chunks_by_dev):\n    num_devices = len(chunks_by_dev)\n    num_chunks = len(chunks_by_dev[0])\n    if 0 != num_chunks % num_devices:\n        raise ValueError('Expect number of chunks per device to be divisible by num_devices')\n    num_subchunks = int(num_chunks / num_devices)\n    num_ticks = num_devices - 1\n    for tick in range(0, num_ticks):\n        passed_values = [None for _ in range(0, num_chunks)]\n        for d in range(0, num_devices):\n            with ops.colocate_with(chunks_by_dev[d][0]):\n                for s in range(0, num_subchunks):\n                    rank = rank_by_s_d[s][d]\n                    seg_index = (rank + num_devices - (1 + tick)) % num_devices\n                    pred_dev = pred_by_s_d[s][d]\n                    chunk_index = seg_index * num_subchunks + s\n                    passed_values[chunk_index] = array_ops.identity(chunks_by_dev[pred_dev][chunk_index])\n        for d in range(0, num_devices):\n            for s in range(0, num_subchunks):\n                rank = rank_by_s_d[s][d]\n                seg_index = (rank + num_devices - (1 + tick)) % num_devices\n                chunk_index = seg_index * num_subchunks + s\n                chunks_by_dev[d][chunk_index] = passed_values[chunk_index]\n    output = []\n    for x in chunks_by_dev:\n        with ops.colocate_with(x[0]):\n            output.append(array_ops.concat(x, 0))\n    return output", "docstring": "Construct subgraph for second (scatter) pass of ring all-reduce.\n\nArgs:\npred_by_s_d: as produced by _ring_permutations\nrank_by_s_d: as produced by _ring_permutations\nchunks_by_dev: list of list of `tf.Tensor` indexed by ints\n(device, chunk)\n\nRaises:\nValueError: chunks_by_dev is not well-formed\n\nReturns:\nlist of `tf.Tensor` which are the fully reduced tensors, one\nat each device corresponding to the outer dimension of chunks_by_dev.", "source": "github-repos"}
{"code": "def dump_in_memory_result(self, result, output_path):\n        \n        file_count = 0\n        logger.debug(\"Dumping in-memory processing results to output folder: %s\", output_path)\n        for k, v in iteritems(result):\n            cur_output_path = os.path.join(output_path, k)\n\n            if isinstance(v, dict):\n                file_count += self.dump_in_memory_result(v, cur_output_path)\n            else:\n                if not os.path.isdir(output_path):\n                    os.makedirs(output_path)\n\n                filename = os.path.join(output_path, k)\n                logger.debug(\"Writing output file: %s\", filename)\n                \n                with open(filename, 'wt', encoding=self.config.encoding) as f:\n                    f.write(v)\n\n                file_count += 1\n\n        return file_count", "docstring": "Recursively dumps the result of our processing into files within the\ngiven output path.\n\nArgs:\nresult: The in-memory result of our processing.\noutput_path: Full path to the folder into which to dump the files.\n\nReturns:\nThe number of files generated (integer).", "source": "juraj-google-style"}
{"code": "def from_string(input_str) -> 'MissionTime':\n        \n        \n        match = RE_INPUT_STRING.match(input_str)\n        if not match:\n            raise ValueError(f'badly formatted date/time: {input_str}')\n\n        return MissionTime(\n            datetime.datetime(\n                int(match.group('year')),\n                int(match.group('month')),\n                int(match.group('day')),\n                int(match.group('hour')),\n                int(match.group('minute')),\n                int(match.group('second')),\n            )\n        )", "docstring": "Creates a MissionTime instance from a string\n\nFormat: YYYYMMDDHHMMSS\n\nArgs:\ninput_str: string to parse\n\nReturns: MissionTime instance", "source": "juraj-google-style"}
{"code": "class MaxLengthCriteria(StoppingCriteria):\n\n    def __init__(self, max_length: int, max_position_embeddings: Optional[int]=None):\n        self.max_length = max_length\n        self.max_position_embeddings = max_position_embeddings\n\n    @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)\n    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:\n        cur_len = input_ids.shape[1]\n        is_done = cur_len >= self.max_length\n        if self.max_position_embeddings is not None and (not is_done) and (cur_len >= self.max_position_embeddings):\n            logger.warning_once(f\"This is a friendly reminder - the current text generation call will exceed the model's predefined maximum length ({self.max_position_embeddings}). Depending on the model, you may observe exceptions, performance degradation, or nothing at all.\")\n        return torch.full((input_ids.shape[0],), is_done, device=input_ids.device, dtype=torch.bool)", "docstring": "This class can be used to stop generation whenever the full generated number of tokens exceeds `max_length`. Keep\nin mind for decoder-only type of transformers, this will include the initial prompted tokens.\n\nArgs:\nmax_length (`int`):\nThe maximum length that the output sequence can have in number of tokens.\nmax_position_embeddings (`int`, *optional*):\nThe maximum model length, as defined by the model's `config.max_position_embeddings` attribute.", "source": "github-repos"}
{"code": "def get_course_track_selection_url(course_run, query_parameters):\n    try:\n        course_root = reverse('course_modes_choose', kwargs={'course_id': course_run['key']})\n    except KeyError:\n        LOGGER.exception('KeyError while parsing course run data.\\nCourse Run: \\n[%s]', course_run)\n        raise\n    url = '{}{}'.format(settings.LMS_ROOT_URL, course_root)\n    course_run_url = update_query_parameters(url, query_parameters)\n    return course_run_url", "docstring": "Return track selection url for the given course.\n\nArguments:\ncourse_run (dict): A dictionary containing course run metadata.\nquery_parameters (dict): A dictionary containing query parameters to be added to course selection url.\n\nRaises:\n(KeyError): Raised when course run dict does not have 'key' key.\n\nReturns:\n(str): Course track selection url.", "source": "codesearchnet"}
{"code": "def create_degrees(input_dim, hidden_dims, input_order='left-to-right', hidden_order='left-to-right'):\n    if (isinstance(input_order, str) and (input_order not in ('random', 'left-to-right', 'right-to-left'))):\n        raise ValueError('Input order is not valid.')\n    if (hidden_order not in ('random', 'left-to-right')):\n        raise ValueError('Hidden order is not valid.')\n    degrees = []\n    if isinstance(input_order, str):\n        input_degrees = np.arange(1, (input_dim + 1))\n        if (input_order == 'right-to-left'):\n            input_degrees = np.flip(input_degrees, 0)\n        elif (input_order == 'random'):\n            np.random.shuffle(input_degrees)\n    else:\n        input_order = np.array(input_order)\n        if np.all((np.sort(input_order) != np.arange(1, (input_dim + 1)))):\n            raise ValueError('invalid input order')\n        input_degrees = input_order\n    degrees.append(input_degrees)\n    for units in hidden_dims:\n        if (hidden_order == 'random'):\n            min_prev_degree = min(np.min(degrees[(- 1)]), (input_dim - 1))\n            hidden_degrees = np.random.randint(low=min_prev_degree, high=input_dim, size=units)\n        elif (hidden_order == 'left-to-right'):\n            hidden_degrees = ((np.arange(units) % max(1, (input_dim - 1))) + min(1, (input_dim - 1)))\n        degrees.append(hidden_degrees)\n    return degrees", "docstring": "Returns a list of degree vectors, one for each input and hidden layer.\n\nA unit with degree d can only receive input from units with degree < d. Output\nunits always have the same degree as their associated input unit.\n\nArgs:\ninput_dim: Number of inputs.\nhidden_dims: list with the number of hidden units per layer. It does not\ninclude the output layer. Each hidden unit size must be at least the size\nof length (otherwise autoregressivity is not possible).\ninput_order: Order of degrees to the input units: 'random', 'left-to-right',\n'right-to-left', or an array of an explicit order. For example,\n'left-to-right' builds an autoregressive model\np(x) = p(x1) p(x2 | x1) ... p(xD | x<D).\nhidden_order: Order of degrees to the hidden units: 'random',\n'left-to-right'. If 'left-to-right', hidden units are allocated equally\n(up to a remainder term) to each degree.", "source": "codesearchnet"}
{"code": "def __init__(self, config_block):\n        \n        if config_block:\n            self._config = config_block\n        else:\n            logging.error('config block was garbage')\n            raise SystemError", "docstring": "Cloud stack utility init method.\n\nArgs:\nconfig_block - a dictionary creates from the CLI driver. See that\nscript for the things that are required and\noptional.\n\nReturns:\nnot a damn thing\n\nRaises:\nSystemError - if everything isn't just right", "source": "juraj-google-style"}
{"code": "def get_service_details(self, service_id: str) -> dict:\n    if (not self._manager):\n        raise RuntimeError('Only the Swarm manager node can retrieve all the services details.')\n    service = self._client.services.get(service_id)\n    return service.attrs", "docstring": "Get details of a service.\n\nOnly the manager nodes can retrieve service details\n\nArgs:\nservice_id (string): List of service id\n\nReturns:\ndict, details of the service", "source": "codesearchnet"}
{"code": "def __call__(self, name, value):\n        \n        if not isinstance(value, self.base_type):\n            raise ValueError(\"%s must be %s, not %s\" % (name, self.base_type, value.__class__))", "docstring": "Call method.\n\nArgs:\nname (str): the value's name.\nvalue (object): the value to check.\n\nRaises:\nValueError: if value is not type base_type.", "source": "juraj-google-style"}
{"code": "def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor:\n    out = F.dropout(x, p=prob, training=training)\n    out = residual + out\n    return out", "docstring": "Dropout add function\n\nArgs:\nx (`torch.tensor`):\ninput tensor\nresidual (`torch.tensor`):\nresidual tensor\nprob (`float`):\ndropout probability\ntraining (`bool`):\ntraining mode", "source": "github-repos"}
{"code": "def lease(queue_name, owner, count=1, timeout_seconds=60):\n    now = datetime.datetime.utcnow()\n    query = WorkQueue.query.filter_by(queue_name=queue_name, status=WorkQueue.LIVE).filter((WorkQueue.eta <= now)).order_by(WorkQueue.eta).with_lockmode('update').limit(count)\n    task_list = query.all()\n    if (not task_list):\n        return None\n    next_eta = (now + datetime.timedelta(seconds=timeout_seconds))\n    for task in task_list:\n        task.eta = next_eta\n        task.lease_attempts += 1\n        task.last_owner = owner\n        task.last_lease = now\n        task.heartbeat = None\n        task.heartbeat_number = 0\n        db.session.add(task)\n    return [_task_to_dict(task) for task in task_list]", "docstring": "Leases a work item from a queue, usually the oldest task available.\n\nArgs:\nqueue_name: Name of the queue to lease work from.\nowner: Who or what is leasing the task.\ncount: Lease up to this many tasks. Return value will never have more\nthan this many items present.\ntimeout_seconds: Number of seconds to lock the task for before\nallowing another owner to lease it.\n\nReturns:\nList of dictionaries representing the task that was leased, or\nan empty list if no tasks are available to be leased.", "source": "codesearchnet"}
{"code": "def get_by_name(self, name):\n    scopes = self._client.get_all()\n    result = [x for x in scopes if (x['name'] == name)]\n    return (result[0] if result else None)", "docstring": "Gets a Scope by name.\n\nArgs:\nname: Name of the Scope\n\nReturns:\ndict: Scope.", "source": "codesearchnet"}
{"code": "def _run_static_range_qat(src_saved_model_path: str, dst_saved_model_path: str, quant_opts: _QuantizationOptions, signature_def_map: _SignatureDefMap) -> None:\n    logging.info('Running static-range quantization for QAT model.')\n    pywrap_quantize_model.quantize_qat_model(src_saved_model_path, dst_saved_model_path, quantization_options_serialized=quant_opts.SerializeToString(), signature_keys=list(quant_opts.signature_keys), signature_def_map_serialized=_serialize_signature_def_map(signature_def_map), py_function_library=py_function_lib.PyFunctionLibrary())", "docstring": "Runs static-range quantization for a Quantization-Aware Trained model.\n\nRuns the quantization for a model trained using QAT.\n\nArgs:\nsrc_saved_model_path: Path to the source SavedModel directory.\ndst_saved_model_path: Path to the destination SavedModel directory.\nquant_opts: Quantization options.\nsignature_def_map: Signature def key -> SignatureDef mapping.", "source": "github-repos"}
{"code": "def replace_batch_norm(model):\n    for name, module in model.named_children():\n        if isinstance(module, nn.BatchNorm2d):\n            new_module = RTDetrV2FrozenBatchNorm2d(module.num_features)\n            if not module.weight.device == torch.device('meta'):\n                new_module.weight.data.copy_(module.weight)\n                new_module.bias.data.copy_(module.bias)\n                new_module.running_mean.data.copy_(module.running_mean)\n                new_module.running_var.data.copy_(module.running_var)\n            model._modules[name] = new_module\n        if len(list(module.children())) > 0:\n            replace_batch_norm(module)", "docstring": "Recursively replace all `torch.nn.BatchNorm2d` with `RTDetrV2FrozenBatchNorm2d`.\n\nArgs:\nmodel (torch.nn.Module):\ninput model", "source": "github-repos"}
{"code": "def segment_ids_to_row_splits(segment_ids, num_segments=None, out_type=None, name=None):\n    from tensorflow.python.ops import bincount_ops\n    if out_type is None:\n        if isinstance(segment_ids, tensor.Tensor):\n            out_type = segment_ids.dtype\n        elif isinstance(num_segments, tensor.Tensor):\n            out_type = num_segments.dtype\n        else:\n            out_type = dtypes.int64\n    else:\n        out_type = dtypes.as_dtype(out_type)\n    with ops.name_scope(name, 'SegmentIdsToRaggedSplits', [segment_ids]) as name:\n        segment_ids = ragged_util.convert_to_int_tensor(segment_ids, 'segment_ids', dtype=dtypes.int32)\n        segment_ids.shape.assert_has_rank(1)\n        if num_segments is not None:\n            num_segments = ragged_util.convert_to_int_tensor(num_segments, 'num_segments', dtype=dtypes.int32)\n            num_segments.shape.assert_has_rank(0)\n        row_lengths = bincount_ops.bincount(segment_ids, minlength=num_segments, maxlength=num_segments, dtype=out_type)\n        splits = array_ops.concat([[0], math_ops.cumsum(row_lengths)], axis=0)\n        if num_segments is not None:\n            const_num_segments = tensor_util.constant_value(num_segments)\n            if const_num_segments is not None:\n                splits.set_shape(tensor_shape.TensorShape([const_num_segments + 1]))\n        return splits", "docstring": "Generates the RaggedTensor `row_splits` corresponding to a segmentation.\n\nReturns an integer vector `splits`, where `splits[0] = 0` and\n`splits[i] = splits[i-1] + count(segment_ids==i)`.  Example:\n\n>>> print(tf.ragged.segment_ids_to_row_splits([0, 0, 0, 2, 2, 3, 4, 4, 4]))\ntf.Tensor([0 3 3 5 6 9], shape=(6,), dtype=int64)\n\nArgs:\nsegment_ids: A 1-D integer Tensor.\nnum_segments: A scalar integer indicating the number of segments.  Defaults\nto `max(segment_ids) + 1` (or zero if `segment_ids` is empty).\nout_type: The dtype for the return value.  Defaults to `segment_ids.dtype`,\nor `tf.int64` if `segment_ids` does not have a dtype.\nname: A name prefix for the returned tensor (optional).\n\nReturns:\nA sorted 1-D integer Tensor, with `shape=[num_segments + 1]`.", "source": "github-repos"}
{"code": "def get_book_links(links):\n    \n    book_links = []\n\n    for link in links:\n        data = DOWNER.download(link + \"1\")\n        dom = dhtmlparser.parseString(data)\n\n        book_links.extend(_parse_book_links(dom))\n\n        max_page = _get_max_page(dom)\n        if max_page == 1:\n            continue\n\n        for i in range(max_page - 1):\n            data = DOWNER.download(link + str(i + 2))\n\n            book_links.extend(\n                _parse_book_links(\n                    dhtmlparser.parseString(data)\n                )\n            )\n\n    return book_links", "docstring": "Go thru `links` to categories and return list to all publications in all\ngiven categories.\n\nArgs:\nlinks (list): List of strings (absolute links to categories).\n\nReturns:\nlist: List of strings / absolute links to book details.", "source": "juraj-google-style"}
{"code": "def download_kegg_gene_metadata(gene_id, outdir=None, force_rerun=False):\n    if (not outdir):\n        outdir = ''\n    outfile = op.join(outdir, '{}.kegg'.format(custom_slugify(gene_id)))\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n        raw_text = bs_kegg.get('{}'.format(gene_id))\n        if (raw_text == 404):\n            return\n        with io.open(outfile, mode='wt', encoding='utf-8') as f:\n            f.write(raw_text)\n        log.debug('{}: downloaded KEGG metadata file'.format(outfile))\n    else:\n        log.debug('{}: KEGG metadata file already exists'.format(outfile))\n    return outfile", "docstring": "Download the KEGG flatfile for a KEGG ID and return the path.\n\nArgs:\ngene_id: KEGG gene ID (with organism code), i.e. \"eco:1244\"\noutdir: optional output directory of metadata\n\nReturns:\nPath to metadata file", "source": "codesearchnet"}
{"code": "def Serialize(self, writer):\n        \n        super(StorageItem, self).Serialize(writer)\n        writer.WriteVarBytes(self.Value)", "docstring": "Serialize full object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    return metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight)", "docstring": "Accumulates true positive and false negative statistics.\n\nArgs:\ny_true: The ground truth values, with the same dimensions as `y_pred`.\nWill be cast to `bool`.\ny_pred: The predicted values. Each element must be in the range `[0, 1]`.\nsample_weight: Optional weighting of each example. Defaults to 1. Can be a\n`Tensor` whose rank is either 0, or the same rank as `y_true`, and must\nbe broadcastable to `y_true`.\n\nReturns:\nUpdate op.", "source": "github-repos"}
{"code": "def __init__(self, apps):\n        \n        try:\n            apps = list(apps.items())\n        except AttributeError:\n            pass\n\n        \n        def by_path_len(app):\n            return len(app[0])\n        apps.sort(key=by_path_len, reverse=True)\n\n        \n        \n        self.apps = [(p.rstrip('/'), a) for p, a in apps]", "docstring": "Initialize path info WSGI app dispatcher.\n\nArgs:\napps (dict[str,object]|list[tuple[str,object]]): URI prefix\nand WSGI app pairs", "source": "juraj-google-style"}
{"code": "def _FormatIPToken(self, token_data):\n    \n    data = ''.join(['{0:02x}'.format(byte) for byte in token_data.data])\n    return {'IPv4_Header': data}", "docstring": "Formats an IPv4 packet header token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_ip): AUT_IP token data.\n\nReturns:\ndict[str, str]: token values.", "source": "juraj-google-style"}
{"code": "def compare_jsone_task_definition(parent_link, rebuilt_definitions):\n    \n    diffs = []\n    for compare_definition in rebuilt_definitions['tasks']:\n        \n        if 'taskId' in compare_definition:\n            del(compare_definition['taskId'])\n        \n        \n        compare_definition = remove_empty_keys(compare_definition)\n        runtime_definition = remove_empty_keys(parent_link.task)\n\n        diff = list(dictdiffer.diff(compare_definition, runtime_definition))\n        if diff:\n            diffs.append(pprint.pformat(diff))\n            continue\n        log.info(\"{}: Good.\".format(parent_link.name))\n        break\n    else:\n        error_msg = \"{} {}: the runtime task doesn't match any rebuilt definition!\\n{}\".format(\n            parent_link.name, parent_link.task_id, pprint.pformat(diffs)\n        )\n        log.critical(error_msg)\n        raise CoTError(error_msg)", "docstring": "Compare the json-e rebuilt task definition vs the runtime definition.\n\nArgs:\nparent_link (LinkOfTrust): the parent link to test.\nrebuilt_definitions (dict): the rebuilt task definitions.\n\nRaises:\nCoTError: on failure.", "source": "juraj-google-style"}
{"code": "def _GetSectionNames(self, pefile_object):\n    \n    section_names = []\n    for section in pefile_object.sections:\n      section_name = getattr(section, 'Name', b'')\n      \n      try:\n        section_name = '{0:s}'.format(section_name.decode('unicode_escape'))\n      except UnicodeDecodeError:\n        section_name = '{0:s}'.format(repr(section_name))\n      section_names.append(section_name)\n\n    return section_names", "docstring": "Retrieves all PE section names.\n\nArgs:\npefile_object (pefile.PE): pefile object.\n\nReturns:\nlist[str]: names of the sections.", "source": "juraj-google-style"}
{"code": "def assert_equal(first, second, msg=None, extras=None):\n    my_msg = None\n    try:\n        _pyunit_proxy.assertEqual(first, second)\n    except AssertionError as e:\n        my_msg = str(e)\n        if msg:\n            my_msg = ('%s %s' % (my_msg, msg))\n    if (my_msg is not None):\n        raise signals.TestFailure(my_msg, extras=extras)", "docstring": "Assert the equality of objects, otherwise fail the test.\n\nError message is \"first != second\" by default. Additional explanation can\nbe supplied in the message.\n\nArgs:\nfirst: The first object to compare.\nsecond: The second object to compare.\nmsg: A string that adds additional info about the failure.\nextras: An optional field for extra information to be included in\ntest result.", "source": "codesearchnet"}
{"code": "def _calculate_expected_result(dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config):\n    if config.use_gumbel_for_cells:\n        gumbel_dist = tfp.distributions.RelaxedBernoulli(config.temperature, logits=dist_per_cell.logits_parameter() * config.temperature)\n        scaled_probability_per_cell = gumbel_dist.sample()\n    else:\n        scaled_probability_per_cell = dist_per_cell.probs_parameter()\n    scaled_probability_per_cell = scaled_probability_per_cell / numeric_values_scale * input_mask_float\n    count_result = tf.reduce_sum(scaled_probability_per_cell, axis=1)\n    numeric_values_masked = tf.where(tf.math.is_nan(numeric_values), tf.zeros_like(numeric_values), numeric_values)\n    sum_result = tf.reduce_sum(scaled_probability_per_cell * numeric_values_masked, axis=1)\n    avg_approximation = config.average_approximation_function\n    if avg_approximation == AverageApproximationFunction.RATIO:\n        average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION)\n    elif avg_approximation == AverageApproximationFunction.FIRST_ORDER:\n        ex = tf.reduce_sum(scaled_probability_per_cell, axis=1, keepdims=True) - scaled_probability_per_cell + 1\n        average_result = tf.reduce_sum(numeric_values_masked * scaled_probability_per_cell / ex, axis=1)\n    elif avg_approximation == AverageApproximationFunction.SECOND_ORDER:\n        ex = tf.reduce_sum(scaled_probability_per_cell, axis=1, keepdims=True) - scaled_probability_per_cell + 1\n        pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell)\n        var = tf.reduce_sum(pointwise_var, axis=1, keepdims=True) - pointwise_var\n        multiplier = (var / tf.math.square(ex) + 1) / ex\n        average_result = tf.reduce_sum(numeric_values_masked * scaled_probability_per_cell * multiplier, axis=1)\n    else:\n        raise ValueError('Invalid average_approximation_function: %s', config.average_approximation_function)\n    if config.use_gumbel_for_aggregation:\n        gumbel_dist = tfp.distributions.RelaxedOneHotCategorical(config.aggregation_temperature, logits=logits_aggregation[:, 1:])\n        aggregation_op_only_probs = gumbel_dist.sample()\n    else:\n        aggregation_op_only_probs = stable_softmax(logits_aggregation[:, 1:] / config.aggregation_temperature, axis=-1)\n    all_results = tf.concat([tf.expand_dims(sum_result, axis=1), tf.expand_dims(average_result, axis=1), tf.expand_dims(count_result, axis=1)], axis=1)\n    expected_result = tf.reduce_sum(all_results * aggregation_op_only_probs, axis=1)\n    return expected_result", "docstring": "Calculates the expected result given cell and aggregation probabilities.\n\nArgs:\ndist_per_cell (`tfp.distributions.Bernoulli`):\nCell selection distribution for each cell.\nnumeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`):\nNumeric values of every token. Nan for tokens which are not numeric values.\nnumeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`):\nScale of the numeric values of every token.\ninput_mask_float (`tf.Tensor` of shape `(batch_size, seq_length)`):\nMask for the table, without question tokens and table headers.\nlogits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`):\nLogits per aggregation operation.\nconfig ([`TapasConfig`]):\nModel configuration class with all the hyperparameters of the model\n\nReturns:\nexpected_result (`tf.Tensor` of shape `(batch_size,)`): The expected result per example.", "source": "github-repos"}
{"code": "def load_resource(resource_url: str, forceupdate: bool=False):\n    log.info(f'Loading resource {resource_url}')\n    try:\n        fo = bel.utils.download_file(resource_url)\n        if (not fo):\n            log.error(f'Could not download and open file {resource_url}')\n            return 'Failed to download resource_url'\n        fo.seek(0)\n        with gzip.open(fo, 'rt') as f:\n            metadata = json.loads(f.__next__())\n        if ('metadata' not in metadata):\n            log.error(f'Missing metadata entry for {resource_url}')\n            return 'Cannot load resource file - missing metadata object in first line of file'\n        if (metadata['metadata']['type'] == 'namespace'):\n            bel.resources.namespace.load_terms(fo, metadata, forceupdate)\n        elif (metadata['metadata']['type'] == 'ortholog'):\n            bel.resources.ortholog.load_orthologs(fo, metadata)\n    finally:\n        fo.close()", "docstring": "Load BEL Resource file\n\nForceupdate will create a new index in Elasticsearch regardless of whether\nan index with the resource version already exists.\n\nArgs:\nresource_url: URL from which to download the resource to load into the BEL API\nforceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches", "source": "codesearchnet"}
{"code": "def repository_contributors(self, **kwargs):\n    path = ('/projects/%s/repository/contributors' % self.get_id())\n    return self.manager.gitlab.http_list(path, **kwargs)", "docstring": "Return a list of contributors for the project.\n\nArgs:\nall (bool): If True, return all the items, without pagination\nper_page (int): Number of items to retrieve per request\npage (int): ID of the page to return (starts with page 1)\nas_list (bool): If set to False and no pagination option is\ndefined, return a generator instead of a list\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the server failed to perform the request\n\nReturns:\nlist: The contributors", "source": "codesearchnet"}
{"code": "def parse(cls, data: bytes) -> 'MessageContent':\n    lines = cls._find_lines(data)\n    view = memoryview(data)\n    return cls._parse(data, view, lines)", "docstring": "Parse the bytestring into message content.\n\nArgs:\ndata: The bytestring to parse.", "source": "codesearchnet"}
{"code": "def convert_bytes_to_c_source(data, array_name, max_line_width=80, include_guard=None, include_path=None, use_tensorflow_license=False):\n    starting_pad = '   '\n    array_lines = []\n    array_line = starting_pad\n    for value in bytearray(data):\n        if len(array_line) + 4 > max_line_width:\n            array_lines.append(array_line + '\\n')\n            array_line = starting_pad\n        array_line += ' 0x%02x,' % (value,)\n    if len(array_line) > len(starting_pad):\n        array_lines.append(array_line + '\\n')\n    array_values = ''.join(array_lines)\n    if include_guard is None:\n        include_guard = 'TENSORFLOW_LITE_UTIL_' + array_name.upper() + '_DATA_H_'\n    if include_path is not None:\n        include_line = '\n    else:\n        include_line = ''\n    if use_tensorflow_license:\n        license_text = '\\n/* Copyright {year} The TensorFlow Authors. All Rights Reserved.\\n\\nLicensed under the Apache License, Version 2.0 (the \"License\");\\nyou may not use this file except in compliance with the License.\\nYou may obtain a copy of the License at\\n\\n    http:\n    else:\n        license_text = ''\n    source_template = \"{license_text}\\n\n    source_text = source_template.format(array_name=array_name, array_length=len(data), array_values=array_values, license_text=license_text, include_line=include_line)\n    header_template = \"\\n{license_text}\\n\\n\n    header_text = header_template.format(array_name=array_name, include_guard=include_guard, license_text=license_text)\n    return (source_text, header_text)", "docstring": "Returns strings representing a C constant array containing `data`.\n\nArgs:\ndata: Byte array that will be converted into a C constant.\narray_name: String to use as the variable name for the constant array.\nmax_line_width: The longest line length, for formatting purposes.\ninclude_guard: Name to use for the include guard macro definition.\ninclude_path: Optional path to include in the source file.\nuse_tensorflow_license: Whether to include the standard TensorFlow Apache2\nlicense in the generated files.\n\nReturns:\nText that can be compiled as a C source file to link in the data as a\nliteral array of values.\nText that can be used as a C header file to reference the literal array.", "source": "github-repos"}
{"code": "def _MergeTaskStorage(self, storage_writer):\n    if self._processing_profiler:\n        self._processing_profiler.StartTiming('merge_check')\n    for task_identifier in storage_writer.GetProcessedTaskIdentifiers():\n        try:\n            task = self._task_manager.GetProcessedTaskByIdentifier(task_identifier)\n            self._task_manager.SampleTaskStatus(task, 'processed')\n            to_merge = self._task_manager.CheckTaskToMerge(task)\n            if (not to_merge):\n                storage_writer.RemoveProcessedTaskStorage(task)\n                self._task_manager.RemoveTask(task)\n                self._task_manager.SampleTaskStatus(task, 'removed_processed')\n            else:\n                storage_writer.PrepareMergeTaskStorage(task)\n                self._task_manager.UpdateTaskAsPendingMerge(task)\n        except KeyError:\n            logger.error('Unable to retrieve task: {0:s} to prepare it to be merged.'.format(task_identifier))\n            continue\n    if self._processing_profiler:\n        self._processing_profiler.StopTiming('merge_check')\n    task = None\n    if (not self._storage_merge_reader_on_hold):\n        task = self._task_manager.GetTaskPendingMerge(self._merge_task)\n    if (task or self._storage_merge_reader):\n        self._status = definitions.STATUS_INDICATOR_MERGING\n        if self._processing_profiler:\n            self._processing_profiler.StartTiming('merge')\n        if task:\n            if self._storage_merge_reader:\n                self._merge_task_on_hold = self._merge_task\n                self._storage_merge_reader_on_hold = self._storage_merge_reader\n                self._task_manager.SampleTaskStatus(self._merge_task_on_hold, 'merge_on_hold')\n            self._merge_task = task\n            try:\n                self._storage_merge_reader = storage_writer.StartMergeTaskStorage(task)\n                self._task_manager.SampleTaskStatus(task, 'merge_started')\n            except IOError as exception:\n                logger.error('Unable to merge results of task: {0:s} with error: {1!s}'.format(task.identifier, exception))\n                self._storage_merge_reader = None\n        if self._storage_merge_reader:\n            fully_merged = self._storage_merge_reader.MergeAttributeContainers(maximum_number_of_containers=self._MAXIMUM_NUMBER_OF_CONTAINERS)\n        else:\n            fully_merged = True\n        if self._processing_profiler:\n            self._processing_profiler.StopTiming('merge')\n        if fully_merged:\n            try:\n                self._task_manager.CompleteTask(self._merge_task)\n            except KeyError as exception:\n                logger.error('Unable to complete task: {0:s} with error: {1!s}'.format(self._merge_task.identifier, exception))\n            if (not self._storage_merge_reader_on_hold):\n                self._merge_task = None\n                self._storage_merge_reader = None\n            else:\n                self._merge_task = self._merge_task_on_hold\n                self._storage_merge_reader = self._storage_merge_reader_on_hold\n                self._merge_task_on_hold = None\n                self._storage_merge_reader_on_hold = None\n                self._task_manager.SampleTaskStatus(self._merge_task, 'merge_resumed')\n        self._status = definitions.STATUS_INDICATOR_RUNNING\n        self._number_of_produced_events = storage_writer.number_of_events\n        self._number_of_produced_sources = storage_writer.number_of_event_sources\n        self._number_of_produced_warnings = storage_writer.number_of_warnings", "docstring": "Merges a task storage with the session storage.\n\nThis function checks all task stores that are ready to merge and updates\nthe scheduled tasks. Note that to prevent this function holding up\nthe task scheduling loop only the first available task storage is merged.\n\nArgs:\nstorage_writer (StorageWriter): storage writer for a session storage used\nto merge task storage.", "source": "codesearchnet"}
{"code": "def _create_formatters(self, instrumentation_block, new_state):\n    formatters = []\n    if self._previous_block_never_completed(current_block=instrumentation_block, previous_block=instrumentation_block.previous_instrumentation_block, new_state=new_state):\n        instrumentation_block.previous_instrumentation_block.set_error_message(self.DEFAULT_INSTRUMENTATION_ERROR_MESSAGE)\n        formatters.append(_InstrumentationBlockFormatter(instrumentation_block.previous_instrumentation_block))\n    if not instrumentation_block.is_empty:\n        formatters.append(_InstrumentationBlockFormatter(instrumentation_block))\n    return formatters", "docstring": "Creates the _InstrumentationBlockFormatters for outputting the\ninstrumentation method block that have finished parsing.\n\nArgs:\ninstrumentation_block: _InstrumentationBlock, the current\ninstrumentation method block to create formatters based upon.\nnew_state: _InstrumentationBlockState, the next state that the\nparser will transition to.\n\nReturns:\nA list of the formatters tha need to create and add\nTestResultRecords to the test results.", "source": "github-repos"}
{"code": "def delta_E( self ):\n        \n        site_delta_E = self.final_site.energy - self.initial_site.energy\n        if self.nearest_neighbour_energy:\n            site_delta_E += self.nearest_neighbour_delta_E()\n        if self.coordination_number_energy:\n            site_delta_E += self.coordination_number_delta_E()\n        return site_delta_E", "docstring": "The change in system energy if this jump were accepted.\n\nArgs:\nNone\n\nReturns:\n(Float): delta E", "source": "juraj-google-style"}
{"code": "def create_bulk(self, resource, timeout=(- 1)):\n    uri = (self.URI + '/bulk')\n    default_values = self._get_default_values(self.BULK_DEFAULT_VALUES)\n    updated_data = self._helper.update_resource_fields(resource, default_values)\n    self._helper.create(updated_data, uri=uri, timeout=timeout)\n    return self.get_range(resource['namePrefix'], resource['vlanIdRange'])", "docstring": "Creates bulk Ethernet networks.\n\nArgs:\nresource (dict): Specifications to create in bulk.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\nlist: List of created Ethernet Networks.", "source": "codesearchnet"}
{"code": "def set_y_grid_info(self, y_low, y_high, num_y, yscale, yval_name):\n    self._set_grid_info('y', y_low, y_high, num_y, yscale, yval_name)\n    return", "docstring": "Set the grid values for y.\n\nCreate information for the grid of y values.\n\nArgs:\nnum_y (int): Number of points on axis.\ny_low/y_high (float): Lowest/highest value for the axis.\nyscale (str): Scale of the axis. Choices are 'log' or 'lin'.\nyval_name (str): Name representing the axis. See GenerateContainer documentation\nfor options for the name.", "source": "codesearchnet"}
{"code": "def Wget(src_url, tgt_name, tgt_root=None):\n    \n    if tgt_root is None:\n        tgt_root = str(CFG[\"tmp_dir\"])\n\n    from benchbuild.utils.cmd import wget\n\n    tgt_file = local.path(tgt_root) / tgt_name\n    if not source_required(tgt_file):\n        Copy(tgt_file, \".\")\n        return\n\n    wget(src_url, \"-O\", tgt_file)\n    update_hash(tgt_file)\n    Copy(tgt_file, \".\")", "docstring": "Download url, if required.\n\nArgs:\nsrc_url (str): Our SOURCE url.\ntgt_name (str): The filename we want to have on disk.\ntgt_root (str): The TARGET directory for the download.\nDefaults to ``CFG[\"tmpdir\"]``.", "source": "juraj-google-style"}
{"code": "def length(self, rows=None):\n    \n    rows = tf.range(self._capacity) if rows is None else rows\n    return tf.gather(self._length, rows)", "docstring": "Tensor holding the current length of episodes.\n\nArgs:\nrows: Episodes to select length from, defaults to all.\n\nReturns:\nBatch tensor of sequence lengths.", "source": "juraj-google-style"}
{"code": "def make_scheduler(self, **kwargs):\n        \n        from .launcher import PyFlowScheduler\n        if not kwargs:\n            \n            sched = PyFlowScheduler.from_user_config()\n        else:\n            \n            filepath = kwargs.pop(\"filepath\", None)\n            if filepath is not None:\n                assert not kwargs\n                sched = PyFlowScheduler.from_file(filepath)\n            else:\n                sched = PyFlowScheduler(**kwargs)\n\n        sched.add_flow(self)\n        return sched", "docstring": "Build a return a :class:`PyFlowScheduler` to run the flow.\n\nArgs:\nkwargs: if empty we use the user configuration file.\nif `filepath` in kwargs we init the scheduler from filepath.\nelse pass **kwargs to :class:`PyFlowScheduler` __init__ method.", "source": "juraj-google-style"}
{"code": "def run_conditional_decorators(self, context):\n        \n        logger.debug(\"starting\")\n\n        \n        \n        \n        run_me = context.get_formatted_as_type(self.run_me, out_type=bool)\n        skip_me = context.get_formatted_as_type(self.skip_me, out_type=bool)\n        swallow_me = context.get_formatted_as_type(self.swallow_me,\n                                                   out_type=bool)\n\n        if run_me:\n            if not skip_me:\n                try:\n                    if self.retry_decorator:\n                        self.retry_decorator.retry_loop(context,\n                                                        self.invoke_step)\n                    else:\n                        self.invoke_step(context=context)\n                except Exception as ex_info:\n                    if swallow_me:\n                        logger.error(\n                            f\"{self.name} Ignoring error because swallow \"\n                            \"is True for this step.\\n\"\n                            f\"{type(ex_info).__name__}: {ex_info}\")\n                    else:\n                        raise\n            else:\n                logger.info(\n                    f\"{self.name} not running because skip is True.\")\n        else:\n            logger.info(f\"{self.name} not running because run is False.\")\n\n        logger.debug(\"done\")", "docstring": "Evaluate the step decorators to decide whether to run step or not.\n\nUse pypyr.dsl.Step.run_step if you intend on executing the step the\nsame way pypyr does.\n\nArgs:\ncontext: (pypyr.context.Context) The pypyr context. This arg will\nmutate.", "source": "juraj-google-style"}
{"code": "def num_mode_groups(self):\n    num = self._libinput.libinput_device_tablet_pad_get_num_mode_groups(self._handle)\n    if (num < 0):\n        raise AttributeError('This device is not a tablet pad device')\n    return num", "docstring": "Most devices only provide a single mode group, however devices\nsuch as the Wacom Cintiq 22HD provide two mode groups.\n\nIf multiple mode groups are available, a caller should use\n:meth:`~libinput.define.TabletPadModeGroup.has_button`,\n:meth:`~libinput.define.TabletPadModeGroup.has_ring`\nand :meth:`~libinput.define.TabletPadModeGroup.has_strip` to associate\neach button, ring and strip with the correct mode group.\n\nReturns:\nint: The number of mode groups available on this device.\nRaises:\nAttributeError", "source": "codesearchnet"}
{"code": "def validate_config_value(value, possible_values):\n  \n  if value not in possible_values:\n    raise Exception('Invalid config value \"%s\". Possible values are '\n                    '%s' % (value, ', '.join(e for e in possible_values)))", "docstring": "Validate a config value to make sure it is one of the possible values.\n\nArgs:\nvalue: the config value to validate.\npossible_values: the possible values the value can be\n\nRaises:\nException if the value is not one of possible values.", "source": "juraj-google-style"}
{"code": "def __init__(self, key_path):\n    \n    super(WindowsRegistryKeyPathFilter, self).__init__()\n\n    key_path.rstrip('\\\\')\n    self._key_path = key_path\n\n    key_path = key_path.upper()\n    self._key_path_upper = key_path\n\n    self._wow64_key_path = None\n    self._wow64_key_path_upper = None\n\n    if key_path.startswith(self._CONTROL_SET_PREFIX.upper()):\n      self._key_path_prefix, _, self._key_path_suffix = key_path.partition(\n          'CurrentControlSet'.upper())\n\n    else:\n      self._key_path_prefix = None\n      self._key_path_suffix = None\n\n      \n      \n      \n      \n      \n      \n      wow64_prefix = None\n      for key_path_prefix in self._WOW64_PREFIXES:\n        if key_path.startswith(key_path_prefix.upper()):\n          wow64_prefix = key_path_prefix\n          break\n\n      if wow64_prefix:\n        key_path_suffix = self._key_path[len(wow64_prefix):]\n        if key_path_suffix.startswith('\\\\'):\n          key_path_suffix = key_path_suffix[1:]\n        self._wow64_key_path = '\\\\'.join([\n            wow64_prefix, 'Wow6432Node', key_path_suffix])\n        self._wow64_key_path_upper = self._wow64_key_path.upper()", "docstring": "Initializes a Windows Registry key filter.\n\nArgs:\nkey_path (str): key path.", "source": "juraj-google-style"}
{"code": "def replace_composites_with_components(structure):\n    if isinstance(structure, CompositeTensor):\n        return replace_composites_with_components(structure._type_spec._to_components(structure))\n    elif not nest.is_nested(structure):\n        return structure\n    else:\n        return nest.map_structure(replace_composites_with_components, structure, expand_composites=False)", "docstring": "Recursively replaces CompositeTensors with their components.\n\nArgs:\nstructure: A `nest`-compatible structure, possibly containing composite\ntensors.\n\nReturns:\nA copy of `structure`, where each composite tensor has been replaced by\nits components.  The result will contain no composite tensors.\nNote that `nest.flatten(replace_composites_with_components(structure))`\nreturns the same value as `nest.flatten(structure)`.", "source": "github-repos"}
{"code": "def _get_localized_fn(path, root_dir):\n    local_fn = path\n    if path.startswith(root_dir):\n        local_fn = path.replace(root_dir, '', 1)\n    if (not local_fn.startswith('/')):\n        return ('/' + local_fn)\n    return local_fn", "docstring": "Return absolute `path` relative to `root_dir`.\n\nWhen `path` == ``/home/xex/somefile.txt`` and `root_dir` == ``/home``,\nreturned path will be ``/xex/somefile.txt``.\n\nArgs:\npath (str): Absolute path beginning in `root_dir`.\nroot_dir (str): Absolute path containing `path` argument.\n\nReturns:\nstr: Local `path` when `root_dir` is considered as root of FS.", "source": "codesearchnet"}
{"code": "def _PrintProcessingTime(self, processing_status):\n    \n    if not processing_status:\n      processing_time = '00:00:00'\n    else:\n      processing_time = time.time() - processing_status.start_time\n      time_struct = time.gmtime(processing_time)\n      processing_time = time.strftime('%H:%M:%S', time_struct)\n\n    self._output_writer.Write(\n        'Processing time\\t\\t: {0:s}\\n'.format(processing_time))", "docstring": "Prints the processing time.\n\nArgs:\nprocessing_status (ProcessingStatus): processing status.", "source": "juraj-google-style"}
{"code": "def get_graphs(self, run_key, debug=False):\n    graph_dict = (self._run_key_to_debug_graphs if debug else self._run_key_to_original_graphs)\n    graph_wrappers = graph_dict.get(run_key, {})\n    graph_defs = dict()\n    for (device_name, wrapper) in graph_wrappers.items():\n        graph_defs[device_name] = wrapper.graph_def\n    return graph_defs", "docstring": "Get the runtime GraphDef protos associated with a run key.\n\nArgs:\nrun_key: A Session.run kay.\ndebug: Whether the debugger-decoratedgraph is to be retrieved.\n\nReturns:\nA `dict` mapping device name to `GraphDef` protos.", "source": "codesearchnet"}
{"code": "def get_all_existing(self, server_group):\n    self.log.info('Checking for existing scaling policy')\n    url = '{0}/applications/{1}/clusters/{2}/{1}/serverGroups'.format(API_URL, self.app, self.env)\n    response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n    assert response.ok, 'Error looking for existing Autoscaling Policy for {0}: {1}'.format(self.app, response.text)\n    scalingpolicies = []\n    for servergroup in response.json():\n        if (servergroup['scalingPolicies'] and (servergroup['asg']['autoScalingGroupName'] == server_group)):\n            self.log.info('Found policies on %s', server_group)\n            scalingpolicies.append(servergroup['scalingPolicies'])\n    self.log.debug('Scaling policies: %s', scalingpolicies)\n    return scalingpolicies", "docstring": "Finds all existing scaling policies for an application\n\nReturns:\nscalingpolicies (list): List of all existing scaling policies for the application", "source": "codesearchnet"}
{"code": "def create_variable(self, feature_column, name, shape, dtype=None, trainable=True, use_resource=True, initializer=None):\n    del feature_column, name, shape, dtype, trainable, use_resource, initializer\n    raise NotImplementedError('StateManager.create_variable')", "docstring": "Creates a new variable.\n\nArgs:\nfeature_column: A `FeatureColumn` object this variable corresponds to.\nname: variable name.\nshape: variable shape.\ndtype: The type of the variable. Defaults to `self.dtype` or `float32`.\ntrainable: Whether this variable is trainable or not.\nuse_resource: If true, we use resource variables. Otherwise we use\nRefVariable.\ninitializer: initializer instance (callable).\n\nReturns:\nThe created variable.", "source": "github-repos"}
{"code": "def _update_dict(self, to_dict, from_dict):\n    for (key, value) in from_dict.items():\n        if ((key in to_dict) and isinstance(to_dict[key], dict) and isinstance(from_dict[key], dict)):\n            self._update_dict(to_dict[key], from_dict[key])\n        else:\n            to_dict[key] = from_dict[key]", "docstring": "Recursively merges the fields for two dictionaries.\n\nArgs:\nto_dict (dict): The dictionary onto which the merge is executed.\nfrom_dict (dict): The dictionary merged into to_dict", "source": "codesearchnet"}
{"code": "def __init__(self, num_agents, observation_spec, action_spec):\n    \n    self._num_agents = num_agents\n    self._observation_spec = observation_spec\n    self._action_spec = action_spec\n    self._episode_steps = 0\n\n    self.next_timestep = [\n        environment.TimeStep(\n            step_type=environment.StepType.MID,\n            reward=0.,\n            discount=1.,\n            observation=self._default_observation(obs_spec, agent_index))\n        for agent_index, obs_spec in enumerate(observation_spec)]\n\n    self.episode_length = float('inf')", "docstring": "Initializes the TestEnvironment.\n\nThe `next_observation` is initialized to be reward = 0., discount = 1.,\nand an appropriately sized observation of all zeros. `episode_length` is set\nto `float('inf')`.\n\nArgs:\nnum_agents: The number of agents.\nobservation_spec: The observation specs for each player.\naction_spec: The action specs for each player.", "source": "juraj-google-style"}
{"code": "def write_uint64(self, value, little_endian=True):\n    if little_endian:\n        endian = '<'\n    else:\n        endian = '>'\n    return self.pack(('%sQ' % endian), value)", "docstring": "Pack the value as an unsigned integer and write 8 bytes to the stream.\n\nArgs:\nvalue:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint: the number of bytes written.", "source": "codesearchnet"}
{"code": "def end_of_chunk(prev_tag, tag, prev_type, type_):\n    \n    chunk_end = False\n\n    if prev_tag == 'E': chunk_end = True\n    if prev_tag == 'S': chunk_end = True\n\n    if prev_tag == 'B' and tag == 'B': chunk_end = True\n    if prev_tag == 'B' and tag == 'S': chunk_end = True\n    if prev_tag == 'B' and tag == 'O': chunk_end = True\n    if prev_tag == 'I' and tag == 'B': chunk_end = True\n    if prev_tag == 'I' and tag == 'S': chunk_end = True\n    if prev_tag == 'I' and tag == 'O': chunk_end = True\n\n    if prev_tag != 'O' and prev_tag != '.' and prev_type != type_:\n        chunk_end = True\n\n    return chunk_end", "docstring": "Checks if a chunk ended between the previous and current word.\n\nArgs:\nprev_tag: previous chunk tag.\ntag: current chunk tag.\nprev_type: previous type.\ntype_: current type.\n\nReturns:\nchunk_end: boolean.", "source": "juraj-google-style"}
{"code": "def ParseFileEntry(self, parser_mediator, file_entry):\n    index_file_parser = ChromeCacheIndexFileParser()\n    file_object = file_entry.GetFileObject()\n    try:\n        index_file_parser.ParseFileObject(parser_mediator, file_object)\n    except (IOError, errors.ParseError) as exception:\n        file_object.close()\n        display_name = parser_mediator.GetDisplayName()\n        raise errors.UnableToParseFile('[{0:s}] unable to parse index file {1:s} with error: {2!s}'.format(self.NAME, display_name, exception))\n    try:\n        file_system = file_entry.GetFileSystem()\n        self._ParseIndexTable(parser_mediator, file_system, file_entry, index_file_parser.index_table)\n    finally:\n        file_object.close()", "docstring": "Parses Chrome Cache files.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_entry (dfvfs.FileEntry): file entry.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def python_value(self, value):\n    value = super(ArrowDateTimeField, self).python_value(value)\n    if isinstance(value, (datetime.datetime, datetime.date, string_types)):\n        return arrow.get(value)\n    return value", "docstring": "Return the value in the data base as an arrow object.\n\nReturns:\narrow.Arrow: An instance of arrow with the field filled in.", "source": "codesearchnet"}
{"code": "def recipe_sheets_clear(config, auth_read, sheets_sheet, sheets_tab, sheets_range):\n    sheets(config, {'auth': auth_read, 'sheet': sheets_sheet, 'tab': sheets_tab, 'range': sheets_range, 'clear': True})", "docstring": "Clear data from a sheet.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nsheets_sheet (string) - NA\nsheets_tab (string) - NA\nsheets_range (string) - NA", "source": "github-repos"}
{"code": "def size(self, path):\n    try:\n        return os.path.getsize(path)\n    except Exception as e:\n        raise BeamIOError('Size operation failed', {path: e})", "docstring": "Get size of path on the FileSystem.\n\nArgs:\npath: string path in question.\n\nReturns: int size of path according to the FileSystem.\n\nRaises:\n``BeamIOError``: if path doesn't exist.", "source": "github-repos"}
{"code": "async def _on_trace_notification(self, trace_event):\n        \n\n        conn_string = trace_event.get('connection_string')\n        payload = trace_event.get('payload')\n\n        await self.notify_event(conn_string, 'trace', payload)", "docstring": "Callback function called when a trace chunk is received.\n\nArgs:\ntrace_chunk (dict): The received trace chunk information", "source": "juraj-google-style"}
{"code": "def _expand_terms(self, terms):\n        \n\n        ret = {\n            'keywords': list(),\n            'doc': list()}\n\n        if not isinstance(terms, dict):\n            stp = SearchTermParser()\n            terms = stp.parse(terms, term_join=self.backend._and_join)\n\n        if 'about' in terms:\n            ret['doc'].append(terms['about'])\n\n        if 'source' in terms:\n            ret['keywords'].append(terms['source'])\n        return ret", "docstring": "Expands terms of the dataset to the appropriate fields. It will parse the search phrase\nand return only the search term components that are applicable to a Dataset query.\n\nArgs:\nterms (dict or str):\n\nReturns:\ndict: keys are field names, values are query strings", "source": "juraj-google-style"}
{"code": "def tf_baseline_loss(self, states, internals, reward, update, reference=None):\n        \n        if self.baseline_mode == 'states':\n            loss = self.baseline.loss(\n                states=states,\n                internals=internals,\n                reward=reward,\n                update=update,\n                reference=reference\n            )\n\n        elif self.baseline_mode == 'network':\n            loss = self.baseline.loss(\n                states=self.network.apply(x=states, internals=internals, update=update),\n                internals=internals,\n                reward=reward,\n                update=update,\n                reference=reference\n            )\n\n        regularization_loss = self.baseline.regularization_loss()\n        if regularization_loss is not None:\n            loss += regularization_loss\n\n        return loss", "docstring": "Creates the TensorFlow operations for calculating the baseline loss of a batch.\n\nArgs:\nstates: Dict of state tensors.\ninternals: List of prior internal state tensors.\nreward: Reward tensor.\nupdate: Boolean tensor indicating whether this call happens during an update.\nreference: Optional reference tensor(s), in case of a comparative loss.\n\nReturns:\nLoss tensor.", "source": "juraj-google-style"}
{"code": "def setModelData(self, spinBox, model, index):\n        \n        spinBox.interpretText()\n        value = spinBox.value()\n        model.setData(index, value, QtCore.Qt.EditRole)", "docstring": "Gets data from the editor widget and stores it in the specified model at the item index.\n\nArgs:\nspinBox (QDoubleSpinBox): editor widget.\nmodel (QAbstractItemModel): parent model.\nindex (QModelIndex): model data index.", "source": "juraj-google-style"}
{"code": "def _CheckIsDirectory(self, file_entry):\n    \n    if definitions.FILE_ENTRY_TYPE_DIRECTORY not in self._file_entry_types:\n      return False\n    return file_entry.IsDirectory()", "docstring": "Checks the is_directory find specification.\n\nArgs:\nfile_entry (FileEntry): file entry.\n\nReturns:\nbool: True if the file entry matches the find specification, False if not.", "source": "juraj-google-style"}
{"code": "def sin(cls, x: 'TensorFluent') -> 'TensorFluent':\n    return cls._unary_op(x, tf.sin, tf.float32)", "docstring": "Returns a TensorFluent for the sin function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the sin function.", "source": "codesearchnet"}
{"code": "def grid_deploy(site, nodes, options):\n    \n    gk = get_api_client()\n    environment = options.pop(\"env_name\")\n    options.update(environment=environment)\n    options.update(nodes=nodes)\n    key_path = DEFAULT_SSH_KEYFILE\n    options.update(key=key_path.read_text())\n    logger.info(\"Deploying %s with options %s\" % (nodes, options))\n    deployment = gk.sites[site].deployments.create(options)\n    while deployment.status not in [\"terminated\", \"error\"]:\n        deployment.refresh()\n        print(\"Waiting for the end of deployment [%s]\" % deployment.uid)\n        time.sleep(10)\n\n    deploy = []\n    undeploy = []\n    if deployment.status == \"terminated\":\n        deploy = [node for node, v in deployment.result.items()\n                  if v[\"state\"] == \"OK\"]\n        undeploy = [node for node, v in deployment.result.items()\n                    if v[\"state\"] == \"KO\"]\n    elif deployment.status == \"error\":\n        undeploy = nodes\n    return deploy, undeploy", "docstring": "Deploy and wait for the deployment to be finished.\n\nArgs:\nsite(str): the site\nnodes(list): list of nodes (str) to depoy\noptions(dict): option of the deployment (refer to the Grid'5000 API\nSpecifications)\n\nReturns:\ntuple of deployed(list), undeployed(list) nodes.", "source": "juraj-google-style"}
{"code": "def testBroadcastDimension(self, axis, row_length, original_dim_sizes, broadcast_dim_sizes):\n    original_shape = RaggedTensorDynamicShape.from_dim_sizes(original_dim_sizes)\n    bcast_shape = RaggedTensorDynamicShape.from_dim_sizes(broadcast_dim_sizes)\n    self.assertEqual(original_shape.rank, bcast_shape.rank)\n    bcast1 = original_shape.broadcast_dimension(axis, row_length)\n    bcast2 = bcast_shape.broadcast_dimension(axis, row_length)\n    bcast3 = bcast_shape.broadcast_dimension(axis, 1)\n    self.assertShapeEq(bcast1, bcast_shape)\n    self.assertShapeEq(bcast2, bcast_shape)\n    self.assertShapeEq(bcast3, bcast_shape)", "docstring": "Tests for the broadcast_dimension method.\n\nVerifies that:\n\n* `original.broadcast_dimension(axis, row_length) == broadcast`\n* `broadcast.broadcast_dimension(axis, row_length) == broadcast`\n* `broadcast.broadcast_dimension(axis, 1) == broadcast`\n\nArgs:\naxis: The axis to broadcast\nrow_length: The slice lengths to broadcast to.\noriginal_dim_sizes: The dimension sizes before broadcasting.\noriginal_dim_sizes[axis] should be equal to `1` or `row_length`.\nbroadcast_dim_sizes: THe dimension sizes after broadcasting.", "source": "github-repos"}
{"code": "def __init__(self, granularity: Granularity) -> None:\n    super().__init__()\n    self.chunks = ['']\n    self.row = 0\n    self.col = 0\n    self.current_word = ''\n    self.on_split_row = False\n    self.granularity = granularity", "docstring": "Initializes the HTML parser for the KNBC corpus.\n\nArgs:\ngranularity: Granularity of the output chunks.", "source": "github-repos"}
{"code": "def get_tensor_by_name(self, name) -> tensor_lib.Tensor:\n    if not isinstance(name, str):\n        raise TypeError('Tensor names are strings (or similar), not %s.' % type(name).__name__)\n    tensor = cast(tensor_lib.Tensor, self.as_graph_element(name, allow_tensor=True, allow_operation=False))\n    return tensor", "docstring": "Returns the `Tensor` with the given `name`.\n\nThis method may be called concurrently from multiple threads.\n\nArgs:\nname: The name of the `Tensor` to return.\n\nReturns:\nThe `Tensor` with the given `name`.\n\nRaises:\nTypeError: If `name` is not a string.\nKeyError: If `name` does not correspond to a tensor in this graph.", "source": "github-repos"}
{"code": "def columns(self, dimensions=None):\n        \n        if dimensions is None:\n            dimensions = self.dimensions()\n        else:\n            dimensions = [self.get_dimension(d, strict=True) for d in dimensions]\n        return OrderedDict([(d.name, self.dimension_values(d)) for d in dimensions])", "docstring": "Convert dimension values to a dictionary.\n\nReturns a dictionary of column arrays along each dimension\nof the element.\n\nArgs:\ndimensions: Dimensions to return as columns\n\nReturns:\nDictionary of arrays for each dimension", "source": "juraj-google-style"}
{"code": "def encode(self, s):\n    \n    try:\n      import matplotlib.image as im  \n    except ImportError as e:\n      tf.logging.warning(\n          \"Reading an image requires matplotlib to be installed: %s\", e)\n      raise NotImplementedError(\"Image reading not implemented.\")\n    return im.imread(s)", "docstring": "Transform a string with a filename into a list of RGB integers.\n\nArgs:\ns: path to the file with an image.\n\nReturns:\nids: list of integers", "source": "juraj-google-style"}
{"code": "def acos(cls, x: 'TensorFluent') -> 'TensorFluent':\n    return cls._unary_op(x, tf.acos, tf.float32)", "docstring": "Returns a TensorFluent for the arccos function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the arccos function.", "source": "codesearchnet"}
{"code": "def __fa_process_sequence(self, sequence, avoid, initial_state, execution_state, trace_current, next_addr):\n        \n        \n\n        ip = sequence.address\n        next_ip = None\n\n        while ip:\n            \n            try:\n                instr = sequence.fetch(ip)\n            except ReilSequenceInvalidAddressError:\n                \n                \n                assert split_address(ip)[1] == 0x0\n                next_ip = ip\n                break\n\n            try:\n                target_addr = sequence.get_next_address(ip)\n            except ReilSequenceInvalidAddressError:\n                \n                \n                target_addr = next_addr\n\n            next_ip = self.__process_instr(instr, avoid, target_addr, initial_state, execution_state, trace_current)\n\n            \n            try:\n                ip = next_ip if next_ip else sequence.get_next_address(ip)\n            except ReilSequenceInvalidAddressError:\n                break\n\n        return next_ip", "docstring": "Process a REIL sequence.\n\nArgs:\nsequence (ReilSequence): A REIL sequence to process.\navoid (list): List of address to avoid.\ninitial_state: Initial state.\nexecution_state: Execution state queue.\ntrace_current (list): Current trace.\nnext_addr: Address of the next instruction following the current one.\n\nReturns:\nReturns the next instruction to execute in case there is one, otherwise returns None.", "source": "juraj-google-style"}
{"code": "def ParseTable(table):\n  \n  precondition.AssertIterableType(table, dict)\n\n  result = rdf_osquery.OsqueryTable()\n  result.header = ParseHeader(table)\n  for row in table:\n    result.rows.append(ParseRow(result.header, row))\n  return result", "docstring": "Parses table of osquery output.\n\nArgs:\ntable: A table in a \"parsed JSON\" representation.\n\nReturns:\nA parsed `rdf_osquery.OsqueryTable` instance.", "source": "juraj-google-style"}
{"code": "def parse_statement(self, statement, orig_contents):\n        \n\n        children = []\n        is_block = False\n        name = statement.getName()\n\n        \n        \n        \n        \n        \n        \n        if name == 'block':\n            children_statements = statement[1]\n            for child in children_statements:\n                parsed = self.parse_statement(child, orig_contents=orig_contents)\n                children.append(parsed)\n\n            locn = statement[0]['location']\n            statement = statement[0][1]\n            name = statement.getName()\n            is_block = True\n        else:\n            stmt_language = get_statement()\n            locn = statement['location']\n            statement = statement['match']\n            statement_string = str(u\"\".join(statement.asList()))\n\n            \n            \n            \n            try:\n                statement = stmt_language.parseString(statement_string)[0]\n            except (pyparsing.ParseException, pyparsing.ParseSyntaxException) as exc:\n                raise SensorGraphSyntaxError(\"Error parsing statement in sensor graph file\", message=exc.msg, line=pyparsing.line(locn, orig_contents).strip(), line_number=pyparsing.lineno(locn, orig_contents), column=pyparsing.col(locn, orig_contents))\n            except SensorGraphSemanticError as exc:\n                \n                raise SensorGraphSemanticError(exc.msg, line=pyparsing.line(locn, orig_contents).strip(), line_number=pyparsing.lineno(locn, orig_contents), **exc.params)\n\n            name = statement.getName()\n\n        if name not in statement_map:\n            raise ArgumentError(\"Unknown statement in sensor graph file\", parsed_statement=statement, name=name)\n\n        \n        line = pyparsing.line(locn, orig_contents).strip()\n        line_number = pyparsing.lineno(locn, orig_contents)\n        column = pyparsing.col(locn, orig_contents)\n        location_info = LocationInfo(line, line_number, column)\n\n        if is_block:\n            return statement_map[name](statement, children=children, location=location_info)\n\n        return statement_map[name](statement, location_info)", "docstring": "Parse a statement, possibly called recursively.\n\nArgs:\nstatement (int, ParseResult): The pyparsing parse result that\ncontains one statement prepended with the match location\norig_contents (str): The original contents of the file that we're\nparsing in case we need to convert an index into a line, column\npair.\n\nReturns:\nSensorGraphStatement: The parsed statement.", "source": "juraj-google-style"}
{"code": "def from_dict(cls, config_dict: dict[str, Any], **kwargs) -> 'PretrainedConfig':\n    return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)\n    kwargs.pop('_from_auto', None)\n    kwargs.pop('_from_pipeline', None)\n    if '_commit_hash' in kwargs and '_commit_hash' in config_dict:\n        kwargs['_commit_hash'] = config_dict['_commit_hash']\n    config_dict['attn_implementation'] = kwargs.pop('attn_implementation', None)\n    config = cls(**config_dict)\n    if hasattr(config, 'pruned_heads'):\n        config.pruned_heads = {int(key): value for key, value in config.pruned_heads.items()}\n    if 'num_labels' in kwargs and 'id2label' in kwargs:\n        num_labels = kwargs['num_labels']\n        id2label = kwargs['id2label'] if kwargs['id2label'] is not None else []\n        if len(id2label) != num_labels:\n            raise ValueError(f'You passed along `num_labels={num_labels}` with an incompatible id to label map: {kwargs['id2label']}. Since those arguments are inconsistent with each other, you should remove one of them.')\n    to_remove = []\n    for key, value in kwargs.items():\n        if hasattr(config, key):\n            current_attr = getattr(config, key)\n            if isinstance(current_attr, PretrainedConfig) and isinstance(value, dict):\n                value = current_attr.__class__(**value)\n            setattr(config, key, value)\n            if key != 'torch_dtype':\n                to_remove.append(key)\n    for key in to_remove:\n        kwargs.pop(key, None)\n    logger.info(f'Model config {config}')\n    if return_unused_kwargs:\n        return (config, kwargs)\n    else:\n        return config", "docstring": "Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters.\n\nArgs:\nconfig_dict (`Dict[str, Any]`):\nDictionary that will be used to instantiate the configuration object. Such a dictionary can be\nretrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method.\nkwargs (`Dict[str, Any]`):\nAdditional parameters from which to initialize the configuration object.\n\nReturns:\n[`PretrainedConfig`]: The configuration object instantiated from those parameters.", "source": "github-repos"}
{"code": "def numeric_task_id(task_id):\n  \n\n  \n  \n  \n  \n\n  if task_id is not None:\n    if task_id.startswith('task-'):\n      return int(task_id[len('task-'):])\n    else:\n      return int(task_id)", "docstring": "Converts a task-id to the numeric task-id.\n\nArgs:\ntask_id: task-id in either task-n or n format\n\nReturns:\nn", "source": "juraj-google-style"}
{"code": "def _get_fans(shape):\n    r\n    if len(shape) == 2:\n        fan_in = shape[0]\n        fan_out = shape[1]\n    elif len(shape) == 4 or len(shape) == 5:\n        \n        kernel_size = np.prod(shape[:2])\n        fan_in = shape[-2] * kernel_size\n        fan_out = shape[-1] * kernel_size\n    else:\n        \n        fan_in = np.sqrt(np.prod(shape))\n        fan_out = np.sqrt(np.prod(shape))\n    return fan_in, fan_out", "docstring": "r\"\"\"Returns the size of input dimension and output dimension, given `shape`.\n\nArgs:\nshape: A list of integers.\n\nReturns:\nfan_in: An int. The value of input dimension.\nfan_out: An int. The value of output dimension.", "source": "juraj-google-style"}
{"code": "def upsert_run(self, id=None, name=None, project=None, host=None, group=None, tags=None, config=None, description=None, entity=None, state=None, repo=None, job_type=None, program_path=None, commit=None, sweep_name=None, summary_metrics=None, num_retries=None):\n    mutation = gql('\\n        mutation UpsertBucket(\\n            $id: String, $name: String,\\n            $project: String,\\n            $entity: String!,\\n            $groupName: String,\\n            $description: String,\\n            $commit: String,\\n            $config: JSONString,\\n            $host: String,\\n            $debug: Boolean,\\n            $program: String,\\n            $repo: String,\\n            $jobType: String,\\n            $state: String,\\n            $sweep: String,\\n            $tags: [String!],\\n            $summaryMetrics: JSONString,\\n        ) {\\n            upsertBucket(input: {\\n                id: $id,\\n                name: $name,\\n                groupName: $groupName,\\n                modelName: $project,\\n                entityName: $entity,\\n                description: $description,\\n                config: $config,\\n                commit: $commit,\\n                host: $host,\\n                debug: $debug,\\n                jobProgram: $program,\\n                jobRepo: $repo,\\n                jobType: $jobType,\\n                state: $state,\\n                sweep: $sweep,\\n                tags: $tags,\\n                summaryMetrics: $summaryMetrics,\\n            }) {\\n                bucket {\\n                    id\\n                    name\\n                    description\\n                    config\\n                    project {\\n                        id\\n                        name\\n                        entity {\\n                            id\\n                            name\\n                        }\\n                    }\\n                }\\n            }\\n        }\\n        ')\n    if (config is not None):\n        config = json.dumps(config)\n    if (not description):\n        description = None\n    kwargs = {}\n    if (num_retries is not None):\n        kwargs['num_retries'] = num_retries\n    variable_values = {'id': id, 'entity': (entity or self.settings('entity')), 'name': name, 'project': project, 'groupName': group, 'tags': tags, 'description': description, 'config': config, 'commit': commit, 'host': host, 'debug': env.is_debug(), 'repo': repo, 'program': program_path, 'jobType': job_type, 'state': state, 'sweep': sweep_name, 'summaryMetrics': summary_metrics}\n    response = self.gql(mutation, variable_values=variable_values, **kwargs)\n    run = response['upsertBucket']['bucket']\n    project = run.get('project')\n    if project:\n        self.set_setting('project', project['name'])\n        entity = project.get('entity')\n        if entity:\n            self.set_setting('entity', entity['name'])\n    return response['upsertBucket']['bucket']", "docstring": "Update a run\n\nArgs:\nid (str, optional): The existing run to update\nname (str, optional): The name of the run to create\ngroup (str, optional): Name of the group this run is a part of\nproject (str, optional): The name of the project\nconfig (dict, optional): The latest config params\ndescription (str, optional): A description of this project\nentity (str, optional): The entity to scope this project to.\nrepo (str, optional): Url of the program's repository.\nstate (str, optional): State of the program.\njob_type (str, optional): Type of job, e.g 'train'.\nprogram_path (str, optional): Path to the program.\ncommit (str, optional): The Git SHA to associate the run with\nsummary_metrics (str, optional): The JSON summary metrics", "source": "codesearchnet"}
{"code": "def last(series, order_by=None):\n    if (order_by is not None):\n        series = order_series_by(series, order_by)\n    last_s = series.iloc[(series.size - 1)]\n    return last_s", "docstring": "Returns the last value of a series.\n\nArgs:\nseries (pandas.Series): column to summarize.\n\nKwargs:\norder_by: a pandas.Series or list of series (can be symbolic) to order\nthe input series by before summarization.", "source": "codesearchnet"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.LongTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=False):\n    vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True)\n    image_embeds = vision_outputs[0]\n    image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)\n    query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)\n    query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)\n    if qformer_attention_mask is None:\n        qformer_attention_mask = torch.ones_like(qformer_input_ids)\n    qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)\n    query_outputs = self.qformer(input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True)\n    query_output = query_outputs[0][:, :query_tokens.size(1), :]\n    language_model_inputs = self.language_projection(query_output)\n    if return_dict:\n        return (language_model_inputs, vision_outputs, query_outputs)\n    return language_model_inputs", "docstring": "Encodes images into continuous embeddings that can be forwarded to the language model.\n\nArgs:\npixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\nThe tensors corresponding to the input images.", "source": "github-repos"}
{"code": "def distance_between(self, u, v):\n        \n        if not isinstance(u, Node):\n            raise TypeError(\"u must be a Node\")\n        if not isinstance(v, Node):\n            raise TypeError(\"v must be a Node\")\n        if u == v:\n            return 0.\n        u_dists = {u:0.}; v_dists = {v:0.}\n        c = u; p = u.parent \n        while p is not None:\n            u_dists[p] = u_dists[c]\n            if c.edge_length is not None:\n                u_dists[p] += c.edge_length\n            c = p; p = p.parent\n        c = v; p = v.parent \n        while p is not None:\n            v_dists[p] = v_dists[c]\n            if c.edge_length is not None:\n                v_dists[p] += c.edge_length\n            if p in u_dists:\n                return u_dists[p] + v_dists[p]\n            c = p; p = p.parent\n        raise RuntimeError(\"u and v are not in the same Tree\")", "docstring": "Return the distance between nodes ``u`` and ``v`` in this ``Tree``\n\nArgs:\n``u`` (``Node``): Node ``u``\n\n``v`` (``Node``): Node ``v``\n\nReturns:\n``float``: The distance between nodes ``u`` and ``v``", "source": "juraj-google-style"}
{"code": "def construct(parent=None, defaults=None, **kwargs):\n    for key in kwargs:\n        assert (key in LEGAL_ATTRS), '{} is not legal input'.format(key)\n    if (parent is not None):\n        for (key, value) in LEGAL_ATTRS.items():\n            if ((key not in kwargs) and hasattr(parent, value)):\n                kwargs[key] = getattr(parent, value)\n    assert ('cdf' in kwargs), 'cdf function must be defined'\n    assert ('bnd' in kwargs), 'bnd function must be defined'\n    if (('str' in kwargs) and isinstance(kwargs['str'], str)):\n        string = kwargs.pop('str')\n        kwargs['str'] = (lambda *args, **kwargs: string)\n    defaults = (defaults if defaults else {})\n    for key in defaults:\n        assert (key in LEGAL_ATTRS), 'invalid default value {}'.format(key)\n\n    def custom_distribution(**kws):\n        prm = defaults.copy()\n        prm.update(kws)\n        dist = Dist(**prm)\n        for (key, function) in kwargs.items():\n            attr_name = LEGAL_ATTRS[key]\n            setattr(dist, attr_name, types.MethodType(function, dist))\n        return dist\n    if ('doc' in kwargs):\n        custom_distribution.__doc__ = kwargs['doc']\n    return custom_distribution", "docstring": "Random variable constructor.\n\nArgs:\ncdf:\nCumulative distribution function. Optional if ``parent`` is used.\nbnd:\nBoundary interval. Optional if ``parent`` is used.\nparent (Dist):\nDistribution used as basis for new distribution. Any other argument\nthat is omitted will instead take is function from ``parent``.\ndoc (str]):\nDocumentation for the distribution.\nstr (str, :py:data:typing.Callable):\nPretty print of the variable.\npdf:\nProbability density function.\nppf:\nPoint percentile function.\nmom:\nRaw moment generator.\nttr:\nThree terms recursion coefficient generator.\ninit:\nCustom initialiser method.\ndefaults (dict):\nDefault values to provide to initialiser.\n\nReturns:\n(Dist):\nNew custom distribution.", "source": "codesearchnet"}
{"code": "def postings(self, quarter, stats_counter=None):\n        \n        logging.info('Finding postings for %s', quarter)\n        for posting in self._iter_postings(quarter):\n            transformed = self._transform(posting)\n            transformed['id'] = '{}_{}'.format(\n                self.partner_id,\n                self._id(posting)\n            )\n            if stats_counter:\n                stats_counter.track(\n                    input_document=posting,\n                    output_document=transformed\n                )\n            yield transformed", "docstring": "Yield job postings in common schema format\n\nArgs:\nquarter (str) The quarter, in format '2015Q1'\nstats_counter (object, optional) A counter that can track both\ninput and output documents using a 'track' method.", "source": "juraj-google-style"}
{"code": "def mounts(prefix, __mounts):\n    \n    i = 0\n    mntpoints = []\n    for mount in __mounts:\n        if not isinstance(mount, dict):\n            mntpoint = \"{0}/{1}\".format(prefix, str(i))\n            mntpoints.append(mntpoint)\n            i = i + 1\n    return mntpoints", "docstring": "Compute the mountpoints of the current user.\n\nArgs:\nprefix: Define where the job was running if it ran on a cluster.\nmounts: All mounts the user currently uses in his file system.\nReturn:\nmntpoints", "source": "juraj-google-style"}
{"code": "def FormatCode(unformatted_source, filename='<unknown>', style_config=None, lines=None, print_diff=False):\n    try:\n        tree = pytree_utils.ParseCodeToTree(unformatted_source)\n    except Exception as e:\n        e.filename = filename\n        raise errors.YapfError(errors.FormatErrorMsg(e))\n    reformatted_source = FormatTree(tree, style_config=style_config, lines=lines)\n    if unformatted_source == reformatted_source:\n        return ('' if print_diff else reformatted_source, False)\n    if print_diff:\n        code_diff = _GetUnifiedDiff(unformatted_source, reformatted_source, filename=filename)\n        return (code_diff, code_diff.strip() != '')\n    return (reformatted_source, True)", "docstring": "Format a string of Python code.\n\nThis provides an alternative entry point to YAPF.\n\nArguments:\nunformatted_source: (unicode) The code to format.\nfilename: (unicode) The name of the file being reformatted.\nstyle_config: (string) Either a style name or a path to a file that contains\nformatting style settings. If None is specified, use the default style\nas set in style.DEFAULT_STYLE_FACTORY\nlines: (list of tuples of integers) A list of tuples of lines, [start, end],\nthat we want to format. The lines are 1-based indexed. It can be used by\nthird-party code (e.g., IDEs) when reformatting a snippet of code rather\nthan a whole file.\nprint_diff: (bool) Instead of returning the reformatted source, return a\ndiff that turns the formatted source into reformatter source.\n\nReturns:\nTuple of (reformatted_source, changed). reformatted_source conforms to the\ndesired formatting style. changed is True if the source changed.", "source": "github-repos"}
{"code": "def __init__(self, map_task, counter_factory, state_sampler, test_shuffle_source=None, test_shuffle_sink=None):\n    self._map_task = map_task\n    self._counter_factory = counter_factory\n    self._ops = []\n    self._state_sampler = state_sampler\n    self._test_shuffle_source = test_shuffle_source\n    self._test_shuffle_sink = test_shuffle_sink", "docstring": "Initializes SimpleMapTaskExecutor.\n\nArgs:\nmap_task: The map task we are to run. The maptask contains a list of\noperations, and aligned lists for step_names, original_names,\nsystem_names of pipeline steps.\ncounter_factory: The CounterFactory instance for the work item.\nstate_sampler: The StateSampler tracking the execution step.\ntest_shuffle_source: Used during tests for dependency injection into\nshuffle read operation objects.\ntest_shuffle_sink: Used during tests for dependency injection into\nshuffle write operation objects.", "source": "github-repos"}
{"code": "def get_credentials_for_url(url, opts, force_user=None):\n    creds = None\n    verbose = int(opts.get('verbose'))\n    force_prompt = opts.get('prompt', False)\n    allow_prompt = (not opts.get('no_prompt', True))\n    allow_keyring = ((not opts.get('no_keyring', False)) and (not force_user))\n    allow_netrc = ((not opts.get('no_netrc', False)) and (not force_user))\n    if (force_user and (not allow_prompt)):\n        raise RuntimeError('Cannot get credentials for a distinct user ({}) from keyring or .netrc and prompting is disabled.'.format(force_user))\n    home_path = os.path.expanduser('~')\n    file_path = os.path.join(home_path, DEFAULT_CREDENTIAL_STORE)\n    if os.path.isfile(file_path):\n        raise RuntimeError('Custom password files are no longer supported. Delete {} and use .netrc instead.'.format(file_path))\n    if ((creds is None) and keyring and allow_keyring):\n        try:\n            c = keyring.get_password('pyftpsync', url)\n            if (c is not None):\n                creds = c.split(':', 1)\n                write(\"Using credentials from keyring('pyftpsync', '{}'): {}:***.\".format(url, creds[0]))\n            elif (verbose >= 4):\n                write(\"No credentials found in keyring('pyftpsync', '{}').\".format(url))\n        except Exception as e:\n            write_error('Could not get password from keyring {}'.format(e))\n    if ((creds is None) and allow_netrc):\n        try:\n            authenticators = None\n            authenticators = netrc.netrc().authenticators(url)\n        except CompatFileNotFoundError:\n            if (verbose >= 4):\n                write('Could not get password (no .netrc file).')\n        except Exception as e:\n            write_error('Could not read .netrc: {}.'.format(e))\n        if authenticators:\n            creds = (authenticators[0], authenticators[2])\n            write('Using credentials from .netrc file: {}:***.'.format(creds[0]))\n        elif (verbose >= 4):\n            write(\"Could not find entry for '{}' in .netrc file.\".format(url))\n    if allow_prompt:\n        if (creds is None):\n            creds = prompt_for_password(url)\n        elif force_prompt:\n            creds = prompt_for_password(url, default_user=creds[0])\n    return creds", "docstring": "Lookup credentials for a given target in keyring and .netrc.\n\nOptionally prompts for credentials if not found.\n\nReturns:\n2-tuple (username, password) or None", "source": "codesearchnet"}
{"code": "def inferred_steps(self):\n    return self._inferred_steps", "docstring": "The inferred steps per epoch of the created `Dataset`.\n\nThis will be `None` in the case where:\n\n(1) A `Dataset` of unknown cardinality was passed to the `DataHandler`, and\n(2) `steps_per_epoch` was not provided, and\n(3) The first epoch of iteration has not yet completed.\n\nReturns:\nThe inferred steps per epoch of the created `Dataset`.", "source": "github-repos"}
{"code": "def forward(self, input_ids: torch.Tensor, cache_position: torch.Tensor) -> torch.Tensor:\n    return self.model.forward(input_ids, cache_position)", "docstring": "Forward pass of the module, which is compatible with the ExecuTorch llm runner.\n\nArgs:\ninput_ids (`torch.Tensor`): Tensor representing current input token id to the module.\ncache_position (`torch.Tensor`): Tensor representing current input position in the cache.\n\nReturns:\ntorch.Tensor: Logits output from the model.", "source": "github-repos"}
{"code": "def get_node(self, index: int) -> Optional[Node]:\n        \n        return self._nodes.get(index)", "docstring": "Returns the node with the given index if such a node currently exists in the node list.\n\nArguments:\nindex (int): The index of the queried node.\n\nReturns:\nThe node with the given index if such a node currently exists in the node list,\n`None` otherwise.", "source": "juraj-google-style"}
{"code": "def list_container_instance_groups_sub(access_token, subscription_id):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/providers/Microsoft.ContainerInstance/ContainerGroups',\n                        '?api-version=', CONTAINER_API])\n    return do_get(endpoint, access_token)", "docstring": "List the container groups in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON list of container groups and their properties.", "source": "juraj-google-style"}
{"code": "def _AlignDecryptedDataOffset(self, decrypted_data_offset):\n    self._file_object.seek(0, os.SEEK_SET)\n    self._decrypter = self._GetDecrypter()\n    self._decrypted_data = b''\n    encrypted_data_offset = 0\n    encrypted_data_size = self._file_object.get_size()\n    while (encrypted_data_offset < encrypted_data_size):\n        read_count = self._ReadEncryptedData(self._ENCRYPTED_DATA_BUFFER_SIZE)\n        if (read_count == 0):\n            break\n        encrypted_data_offset += read_count\n        if (decrypted_data_offset < self._decrypted_data_size):\n            self._decrypted_data_offset = decrypted_data_offset\n            break\n        decrypted_data_offset -= self._decrypted_data_size", "docstring": "Aligns the encrypted file with the decrypted data offset.\n\nArgs:\ndecrypted_data_offset (int): decrypted data offset.", "source": "codesearchnet"}
{"code": "def satellites_used(feed):\n    \n    total_satellites = 0\n    used_satellites = 0\n\n    if not isinstance(feed, list):\n        return 0, 0\n\n    for satellites in feed:\n        total_satellites += 1\n        if satellites['used'] is True:\n            used_satellites += 1\n    return total_satellites, used_satellites", "docstring": "Counts number of satellites used in calculation from total visible satellites\nArguments:\nfeed feed=data_stream.TPV['satellites']\nReturns:\ntotal_satellites(int):\nused_satellites (int):", "source": "juraj-google-style"}
{"code": "def check_symmetry(A):\n    A = asanyarray(A)\n    if (A.ndim != 2):\n        raise ValueError('Checks symmetry only for bi-dimensional arrays.')\n    if (A.shape[0] != A.shape[1]):\n        return False\n    return (abs((A - A.T)).max() < sqrt(finfo(float).eps))", "docstring": "Check if ``A`` is a symmetric matrix.\n\nArgs:\nA (array_like): Matrix.\n\nReturns:\nbool: ``True`` if ``A`` is symmetric; ``False`` otherwise.", "source": "codesearchnet"}
{"code": "def linear(x):\n    return x", "docstring": "Linear activation function (pass-through).\n\nFor example:\n\n>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)\n>>> b = tf.keras.activations.linear(a)\n>>> b.numpy()\narray([-3., -1.,  0.,  1.,  3.], dtype=float32)\n\nArgs:\nx: Input tensor.\n\nReturns:\nThe input, unmodified.", "source": "github-repos"}
{"code": "def parse_lxml(self, file, encoding=None, target_class=HTMLParserTarget, parser_type='html'):\n    if encoding:\n        lxml_encoding = (to_lxml_encoding(encoding) or 'latin1')\n    else:\n        lxml_encoding = encoding\n    elements = []\n    callback_func = elements.append\n    target = target_class(callback_func)\n    if (parser_type == 'html'):\n        parser = lxml.html.HTMLParser(encoding=lxml_encoding, target=target)\n    elif (parser_type == 'xhtml'):\n        parser = lxml.html.XHTMLParser(encoding=lxml_encoding, target=target, recover=True)\n    else:\n        parser = lxml.etree.XMLParser(encoding=lxml_encoding, target=target, recover=True)\n    if (parser_type == 'html'):\n        for dummy in range(3):\n            parser.feed('<html>'.encode(encoding))\n    while True:\n        data = file.read(self.BUFFER_SIZE)\n        if (not data):\n            break\n        parser.feed(data)\n        for element in elements:\n            (yield element)\n        del elements[:]\n    parser.close()\n    for element in elements:\n        (yield element)", "docstring": "Return an iterator of elements found in the document.\n\nArgs:\nfile: A file object containing the document.\nencoding (str): The encoding of the document.\ntarget_class: A class to be used for target parsing.\nparser_type (str): The type of parser to use. Accepted values:\n``html``, ``xhtml``, ``xml``.\n\nReturns:\niterator: Each item is an element from\n:mod:`.document.htmlparse.element`", "source": "codesearchnet"}
{"code": "def get_mutations(aln_df):\n    \n    mutation_df = aln_df[aln_df['type'] == 'mutation']\n    tuples = []\n    if not mutation_df.empty:\n        subset = mutation_df[['id_a_aa', 'id_a_pos', 'id_b_aa']]\n        subset['id_a_pos'] = subset['id_a_pos'].astype(int)\n        tuples = [tuple(x) for x in subset.values]\n    return tuples", "docstring": "Get a list of residue numbers (in the original sequence's numbering) that are mutated\n\nArgs:\naln_df (DataFrame): Alignment DataFrame\njust_resnums: If only the residue numbers should be returned, instead of a list of tuples of\n(original_residue, resnum, mutated_residue)\n\nReturns:\nlist: Residue mutations", "source": "juraj-google-style"}
{"code": "def verifymessage(self, address, signature, message):\n        \n        verified = self.rpc.call(\"verifymessage\", address, signature, message)\n        self.logger.debug(\"Signature verified: %s\" % str(verified))\n        return verified", "docstring": "Verifies that a message has been signed by an address.\n\nArgs:\naddress (str): address claiming to have signed the message\nsignature (str): ECDSA signature\nmessage (str): plaintext message which was signed\n\nReturns:\nbool: True if the address signed the message, False otherwise", "source": "juraj-google-style"}
{"code": "def estimate_cpdag(skel_graph, sep_set):\n    \n    dag = skel_graph.to_directed()\n    node_ids = skel_graph.nodes()\n    for (i, j) in combinations(node_ids, 2):\n        adj_i = set(dag.successors(i))\n        if j in adj_i:\n            continue\n        adj_j = set(dag.successors(j))\n        if i in adj_j:\n            continue\n        if sep_set[i][j] is None:\n            continue\n        common_k = adj_i & adj_j\n        for k in common_k:\n            if k not in sep_set[i][j]:\n                if dag.has_edge(k, i):\n                    _logger.debug('S: remove edge (%s, %s)' % (k, i))\n                    dag.remove_edge(k, i)\n                if dag.has_edge(k, j):\n                    _logger.debug('S: remove edge (%s, %s)' % (k, j))\n                    dag.remove_edge(k, j)\n\n    def _has_both_edges(dag, i, j):\n        return dag.has_edge(i, j) and dag.has_edge(j, i)\n\n    def _has_any_edge(dag, i, j):\n        return dag.has_edge(i, j) or dag.has_edge(j, i)\n\n    def _has_one_edge(dag, i, j):\n        return ((dag.has_edge(i, j) and (not dag.has_edge(j, i))) or\n                (not dag.has_edge(i, j)) and dag.has_edge(j, i))\n\n    def _has_no_edge(dag, i, j):\n        return (not dag.has_edge(i, j)) and (not dag.has_edge(j, i))\n\n    \n    \n    old_dag = dag.copy()\n    while True:\n        for (i, j) in combinations(node_ids, 2):\n            \n            \n            \n            \n            if _has_both_edges(dag, i, j):\n                \n                for k in dag.predecessors(i):\n                    \n                    if dag.has_edge(i, k):\n                        continue\n                    \n                    if _has_any_edge(dag, k, j):\n                        continue\n                    \n                    _logger.debug('R1: remove edge (%s, %s)' % (j, i))\n                    dag.remove_edge(j, i)\n                    break\n\n            \n            \n            \n            \n            if _has_both_edges(dag, i, j):\n                \n                succs_i = set()\n                for k in dag.successors(i):\n                    if not dag.has_edge(k, i):\n                        succs_i.add(k)\n                \n                preds_j = set()\n                for k in dag.predecessors(j):\n                    if not dag.has_edge(j, k):\n                        preds_j.add(k)\n                \n                if len(succs_i & preds_j) > 0:\n                    \n                    _logger.debug('R2: remove edge (%s, %s)' % (j, i))\n                    dag.remove_edge(j, i)\n\n            \n            \n            \n            \n            if _has_both_edges(dag, i, j):\n                \n                adj_i = set()\n                for k in dag.successors(i):\n                    if dag.has_edge(k, i):\n                        adj_i.add(k)\n                \n                for (k, l) in combinations(adj_i, 2):\n                    \n                    if _has_any_edge(dag, k, l):\n                        continue\n                    \n                    if dag.has_edge(j, k) or (not dag.has_edge(k, j)):\n                        continue\n                    \n                    if dag.has_edge(j, l) or (not dag.has_edge(l, j)):\n                        continue\n                    \n                    _logger.debug('R3: remove edge (%s, %s)' % (j, i))\n                    dag.remove_edge(j, i)\n                    break\n\n            \n            \n            \n            \n            \n\n        if nx.is_isomorphic(dag, old_dag):\n            break\n        old_dag = dag.copy()\n\n    return dag", "docstring": "Estimate a CPDAG from the skeleton graph and separation sets\nreturned by the estimate_skeleton() function.\n\nArgs:\nskel_graph: A skeleton graph (an undirected networkx.Graph).\nsep_set: An 2D-array of separation set.\nThe contents look like something like below.\nsep_set[i][j] = set([k, l, m])\n\nReturns:\nAn estimated DAG.", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    page_header_map = self._GetDataTypeMap('dls_page_header')\n    try:\n        (page_header, file_offset) = self._ReadStructureFromFileObject(file_object, 0, page_header_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.UnableToParseFile('Unable to parse page header with error: {0!s}'.format(exception))\n    if (page_header.signature not in self._DLS_SIGNATURES):\n        raise errors.UnableToParseFile('Invalid file signature')\n    current_page_end = page_header.page_size\n    file_entry = parser_mediator.GetFileEntry()\n    date_time = self._GetParentModificationTime(file_entry)\n    if date_time:\n        timestamp_description = definitions.TIME_DESCRIPTION_RECORDED\n    else:\n        date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n        timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME\n    event = time_events.DateTimeValuesEvent(date_time, timestamp_description)\n    file_size = file_object.get_size()\n    while (file_offset < file_size):\n        if (file_offset >= current_page_end):\n            try:\n                (page_header, header_size) = self._ParseDLSPageHeader(file_object, file_offset)\n            except errors.ParseError as exception:\n                parser_mediator.ProduceExtractionWarning('Unable to parse page header with error: {0!s}'.format(exception))\n                break\n            current_page_end += page_header.page_size\n            file_offset += header_size\n            continue\n        if (page_header.signature == self._DLS_V1_SIGNATURE):\n            record_map = self._GetDataTypeMap('dls_record_v1')\n        else:\n            record_map = self._GetDataTypeMap('dls_record_v2')\n        try:\n            (record, record_length) = self._ReadStructureFromFileObject(file_object, file_offset, record_map)\n            file_offset += record_length\n        except (ValueError, errors.ParseError) as exception:\n            parser_mediator.ProduceExtractionWarning('Unable to parse page record with error: {0!s}'.format(exception))\n            break\n        event_data = self._BuildEventData(record)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an fseventsd file.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the header cannot be parsed.", "source": "codesearchnet"}
{"code": "def GetDefaultContract(self):\n    try:\n        return self.GetContracts()[0]\n    except Exception as e:\n        logger.error(('Could not find default contract: %s' % str(e)))\n        raise", "docstring": "Get the default contract.\n\nReturns:\ncontract (Contract): if Successful, a contract of type neo.SmartContract.Contract, otherwise an Exception.\n\nRaises:\nException: if no default contract is found.\n\nNote:\nPrints a warning to the console if the default contract could not be found.", "source": "codesearchnet"}
{"code": "def port_add(br, port, may_exist=False, internal=False):\n    \n    param_may_exist = _param_may_exist(may_exist)\n    cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)\n    if internal:\n        cmd += ' -- set interface {0} type=internal'.format(port)\n    result = __salt__['cmd.run_all'](cmd)\n    retcode = result['retcode']\n    return _retcode_to_bool(retcode)", "docstring": "Creates on bridge a new port named port.\n\nReturns:\nTrue on success, else False.\n\nArgs:\nbr: A string - bridge name\nport: A string - port name\nmay_exist: Bool, if False - attempting to create a port that exists returns False.\ninternal: A boolean to create an internal interface if one does not exist.\n\n.. versionadded:: 2016.3.0\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.port_add br0 8080", "source": "juraj-google-style"}
{"code": "def __init__(self, keys=None):\n    \n    if not keys:\n      raise errors.FormatError('Missing keys value.')\n\n    if not isinstance(keys, list):\n      raise errors.FormatError('keys must be a list')\n\n    for key in keys:\n      self.ValidateKey(key)\n\n    super(WindowsRegistryKeySourceType, self).__init__()\n    self.keys = keys", "docstring": "Initializes a source type.\n\nArgs:\nkeys (Optional[list[str]]): key paths relative to the root of\nthe Windows Registry.\n\nRaises:\nFormatError: when keys is not set.", "source": "juraj-google-style"}
{"code": "def send_message(self, message):\n        \n        try:\n            if _message_test_port is not None:\n                _message_test_port.sent.append(message)\n            yield message.send(self)\n        except (WebSocketClosedError, StreamClosedError): \n            \n            log.warning(\"Failed sending message as connection was closed\")\n        raise gen.Return(None)", "docstring": "Send a Bokeh Server protocol message to the connected client.\n\nArgs:\nmessage (Message) : a message to send", "source": "juraj-google-style"}
{"code": "def build_graph(self):\n    import tensorflow as tf\n    input_jpeg = tf.placeholder(tf.string, shape=None)\n    image = tf.image.decode_jpeg(input_jpeg, channels=self.CHANNELS)\n    image = tf.expand_dims(image, 0)\n    image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n    image = tf.image.resize_bilinear(image, [self.HEIGHT, self.WIDTH], align_corners=False)\n    image = tf.subtract(image, 0.5)\n    inception_input = tf.multiply(image, 2.0)\n    with tf.contrib.slim.arg_scope(_inceptionlib.inception_v3_arg_scope()):\n        (_, end_points) = _inceptionlib.inception_v3(inception_input, is_training=False)\n    embedding = end_points['PreLogits']\n    return (input_jpeg, embedding)", "docstring": "Forms the core by building a wrapper around the inception graph.\n\nHere we add the necessary input & output tensors, to decode jpegs,\nserialize embeddings, restore from checkpoint etc.\n\nTo use other Inception models modify this file. Note that to use other\nmodels beside Inception, you should make sure input_shape matches\ntheir input. Resizing or other modifications may be necessary as well.\nSee tensorflow/contrib/slim/python/slim/nets/inception_v3.py for\ndetails about InceptionV3.\n\nReturns:\ninput_jpeg: A tensor containing raw image bytes as the input layer.\nembedding: The embeddings tensor, that will be materialized later.", "source": "codesearchnet"}
{"code": "def show_warning_messages(self, title=_(u\"Incorrect Operation\"), box_type='warning'):\n        \n        msg = self.current.task_data['msg']\n        self.current.output['msgbox'] = {'type': box_type, \"title\": title, \"msg\": msg}\n        del self.current.task_data['msg']", "docstring": "It shows incorrect operations or successful operation messages.\n\nArgs:\ntitle (string): title of message box\nbox_type (string): type of message box (warning, info)", "source": "juraj-google-style"}
{"code": "def start_site(name):\n    \n    ps_cmd = ['Start-WebSite', r\"'{0}'\".format(name)]\n\n    cmd_ret = _srvmgr(ps_cmd)\n\n    return cmd_ret['retcode'] == 0", "docstring": "Start a Web Site in IIS.\n\n.. versionadded:: 2017.7.0\n\nArgs:\nname (str): The name of the website to start.\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.start_site name='My Test Site'", "source": "juraj-google-style"}
{"code": "def get_metalpdb_info(metalpdb_lig_file):\n    pdb_metals = ['CU', 'ZN', 'MN', 'FE', 'MG', 'CO', 'SE', 'YB', 'SF4', 'FES', 'F3S', 'NI', 'FE2']\n    coordination_number = 0\n    endogenous_ligands = []\n    exogenous_ligands = []\n    ss = StructProp(ident='metalpdb', structure_path=metalpdb_lig_file, file_type='pdb')\n    chain_id = op.basename(metalpdb_lig_file)[5]\n    metal_id = (op.basename(metalpdb_lig_file).split('_')[2], op.basename(metalpdb_lig_file).split('_')[3])\n    for r in ss.parse_structure().first_model.get_residues():\n        return_id = (r.get_id(), r.get_resname())\n        if (r.get_id()[0] != ' '):\n            if ((not (r.resname.strip() in pdb_metals)) and (r.resname != 'HOH')):\n                exogenous_ligands.append(return_id)\n        else:\n            endogenous_ligands.append(return_id)\n        for a in r.get_atom():\n            if (not (a.element in pdb_metals)):\n                coordination_number += 1\n    infodict = {metal_id: {'endogenous_ligands': endogenous_ligands, 'exogenous_ligands': exogenous_ligands, 'coordination_number': coordination_number}}\n    return (chain_id, infodict)", "docstring": "Parse a MetalPDB .lig file and return a tuple of the chain ID it represents, along with metal binding information.\n\nArgs:\nmetalpdb_lig_file (str): Path to .lig file\n\nReturns:\ntuple: (str, dict) of the chain ID and the parsed metal binding site information", "source": "codesearchnet"}
{"code": "def single_offset(self, shape):\n    single_slice_dim = self.single_slice_dim(shape)\n    if single_slice_dim is None:\n        return 0\n    return self.var_offset[single_slice_dim]", "docstring": "Returns the offset when the variable is partitioned in at most one dim.\n\nArgs:\nshape: Tuple or list of `int` indicating the shape of one specific\nvariable partition.\n\nReturns:\n`int` representing the offset in the dimension along which the variable is\npartitioned. Returns 0 if the variable is not being partitioned.\n\nRaises:\nValueError: Depending on self.single_slice_dim().", "source": "github-repos"}
{"code": "def get_filelikeobject(filename: str=None, blob: bytes=None) -> BinaryIO:\n    if ((not filename) and (not blob)):\n        raise ValueError('no filename and no blob')\n    if (filename and blob):\n        raise ValueError('specify either filename or blob')\n    if filename:\n        return open(filename, 'rb')\n    else:\n        return io.BytesIO(blob)", "docstring": "Open a file-like object.\n\nGuard the use of this function with ``with``.\n\nArgs:\nfilename: for specifying via a filename\nblob: for specifying via an in-memory ``bytes`` object\n\nReturns:\na :class:`BinaryIO` object", "source": "codesearchnet"}
{"code": "def process_buffer(buffer, n_channels):\n    \n    samples = np.concatenate(buffer)\n\n    if n_channels > 1:\n        samples = samples.reshape((-1, n_channels)).T\n        samples = librosa.to_mono(samples)\n\n    return samples", "docstring": "Merge the read blocks and resample if necessary.\n\nArgs:\nbuffer (list): A list of blocks of samples.\nn_channels (int): The number of channels of the input data.\n\nReturns:\nnp.array: The samples", "source": "juraj-google-style"}
{"code": "async def get_person(self, id_):\n    data = (await self._get_person_json(id_, OrderedDict(append_to_response='movie_credits')))\n    return Person.from_json(data, self.config['data'].get('images'))", "docstring": "Retrieve person data by ID.\n\nArguments:\nid_ (:py:class:`int`): The person's TMDb ID.\n\nReturns:\n:py:class:`~.Person`: The requested person.", "source": "codesearchnet"}
{"code": "def CompleteTask(self, task):\n    with self._lock:\n        if (task.identifier not in self._tasks_merging):\n            raise KeyError('Task {0:s} was not merging.'.format(task.identifier))\n        self.SampleTaskStatus(task, 'completed')\n        del self._tasks_merging[task.identifier]\n        logger.debug('Completed task {0:s}.'.format(task.identifier))", "docstring": "Completes a task.\n\nThe task is complete and can be removed from the task manager.\n\nArgs:\ntask (Task): task.\n\nRaises:\nKeyError: if the task was not merging.", "source": "codesearchnet"}
{"code": "def convert_softmax(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting softmax ...')\n\n    if names == 'short':\n        tf_name = 'SMAX' + random_string(4)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    def target_layer(x, dim=params['dim']):\n        import keras\n        return keras.activations.softmax(x, axis=dim)\n\n    lambda_layer = keras.layers.Lambda(target_layer)\n    layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert softmax layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def __init__(self, structure, element):\n        \n        self.structure = structure\n        self.element = element\n\n        \n        sga = SpacegroupAnalyzer(self.structure)\n        self.symm_structure = sga.get_symmetrized_structure()\n\n        self.equiv_sub = []\n        for equiv_site_set in list(self.symm_structure.equivalent_sites):\n            vac_site = equiv_site_set[0]\n            if isinstance(element, str):  \n                vac_specie = vac_site.specie.symbol\n            else:\n                vac_specie = vac_site.specie\n            if element != vac_specie:\n                defect_site = PeriodicSite(element, vac_site.coords, structure.lattice, coords_are_cartesian=True)\n                sub = Substitution(structure, defect_site)\n                self.equiv_sub.append(sub)", "docstring": "Initializes a Substitution Generator\nnote: an Antisite is considered a type of substitution\nArgs:\nstructure(Structure): pymatgen structure object\nelement (str or Element or Specie): element for the substitution", "source": "juraj-google-style"}
{"code": "def _ufunc_dispatch(ufunc, method, i, inputs, **kwargs):\n    \n    \n    if 'out' in kwargs and kwargs['out'] is not None:\n        raise Error('for distributed ufuncs `out=` is not yet implemented')\n    nin = 2 if ufunc is np.dot else ufunc.nin\n    if nin is 1 and method == '__call__':\n        return vectorize(ufunc.__call__)(inputs[0], **kwargs)\n    elif nin is 2 and method == '__call__':\n        from distob import engine\n        here = engine.eid\n        \n        locs, weights = zip(*[_engine_affinity(a) for a in inputs])\n        \n        bshape = _broadcast_shape(*inputs)\n        locs = list(locs)\n        for i, loc in enumerate(locs):\n            if isinstance(loc, _TupleType):\n                num_new_axes = len(bshape) - inputs[i].ndim\n                if num_new_axes > 0:\n                    locs[i] = (locs[i][0], locs[i][1] + num_new_axes)\n        if ufunc is np.dot:\n            locs = [here if isinstance(m, _TupleType) else m for m in locs]\n        if locs[0] == locs[1]:\n            location = locs[0]\n        else:\n            \n            \n            smallest = 0 if weights[0] <= weights[1] else 1\n            largest = 1 - smallest\n            if locs[0] is here or locs[1] is here:\n                location = here if weights[0] == weights[1] else locs[largest]\n            else:\n                \n                \n                \n                if weights[smallest]*2 < weights[largest] + weights[smallest]:\n                    location = locs[largest]\n                else:\n                    location = here\n        \n        inputs = [_ufunc_move_input(a, location, bshape) for a in inputs]\n        \n        if location is here:\n            return ufunc.__call__(inputs[0], inputs[1], **kwargs)\n        else:\n            if isinstance(location, numbers.Integral):\n                \n                return call(ufunc.__call__, inputs[0], inputs[1], **kwargs)\n            else:\n                \n                \n                engine_ids, distaxis = location\n                n = len(engine_ids)\n                is_dist = tuple(isinstance(a, DistArray) for a in inputs)\n                assert(is_dist[0] or is_dist[1])\n                for i in 0, 1:\n                    if is_dist[i]:\n                        ndim = inputs[i].ndim\n                        assert(inputs[i]._distaxis == distaxis)\n                        assert(inputs[i]._n == n)\n                def _remote_ucall(inputs, **kwargs):\n                    \n                    return ufunc.__call__(inputs[0], inputs[1], **kwargs)\n                results = []\n                kwargs = kwargs.copy()\n                kwargs['block'] = False\n                kwargs['prefer_local'] = False\n                for j in range(n):\n                    subinputs = tuple(inputs[i]._subarrays[j] if \n                            is_dist[i] else inputs[i] for i in (0, 1))\n                    results.append(call(_remote_ucall, subinputs, **kwargs))\n                results = [convert_result(ar) for ar in results]\n                return DistArray(results, distaxis)\n    elif ufunc.nin > 2:\n        raise Error(u'Distributing ufuncs with >2 inputs is not yet supported')\n    else:\n        raise Error(u'Distributed ufunc.%s() is not yet implemented' % method)", "docstring": "Route ufunc execution intelligently to local host or remote engine(s)\ndepending on where the inputs are, to minimize the need to move data.\nArgs:\nsee numpy documentation for __numpy_ufunc__", "source": "juraj-google-style"}
{"code": "def _is_injective(self):\n    return True", "docstring": "Returns true iff the forward map `g` is injective (one-to-one function).\n\n**WARNING** This hidden property and its behavior are subject to change.\n\nNote:  Non-injective maps `g` are supported, provided their domain `D` can\nbe partitioned into `k` disjoint subsets, `Union{D1, ..., Dk}`, such that,\nignoring sets of measure zero, the restriction of `g` to each subset is a\ndifferentiable bijection onto `g(D)`.\n\nReturns:\nis_injective: Python `bool`.", "source": "github-repos"}
{"code": "def accumulate_dict_from_superclasses(cls, propname):\n    \n    cachename = \"__cached_all\" + propname\n    \n    \n    if cachename not in cls.__dict__:\n        d = dict()\n        for c in inspect.getmro(cls):\n            if issubclass(c, HasProps) and hasattr(c, propname):\n                base = getattr(c, propname)\n                for k,v in base.items():\n                    if k not in d:\n                        d[k] = v\n        setattr(cls, cachename, d)\n    return cls.__dict__[cachename]", "docstring": "Traverse the class hierarchy and accumulate the special dicts\n``MetaHasProps`` stores on classes:\n\nArgs:\nname (str) : name of the special attribute to collect.\n\nTypically meaningful values are: ``__dataspecs__``,\n``__overridden_defaults__``", "source": "juraj-google-style"}
{"code": "def match_objects(self, set_a, set_b, time_a, time_b):\n    costs = (self.cost_matrix(set_a, set_b, time_a, time_b) * 100)\n    min_row_costs = costs.min(axis=1)\n    min_col_costs = costs.min(axis=0)\n    good_rows = np.where((min_row_costs < 100))[0]\n    good_cols = np.where((min_col_costs < 100))[0]\n    assignments = []\n    if ((len(good_rows) > 0) and (len(good_cols) > 0)):\n        munk = Munkres()\n        initial_assignments = munk.compute(costs[tuple(np.meshgrid(good_rows, good_cols, indexing='ij'))].tolist())\n        initial_assignments = [(good_rows[x[0]], good_cols[x[1]]) for x in initial_assignments]\n        for a in initial_assignments:\n            if (costs[(a[0], a[1])] < 100):\n                assignments.append(a)\n    return assignments", "docstring": "Match two sets of objects at particular times.\n\nArgs:\nset_a: list of STObjects\nset_b: list of STObjects\ntime_a: time at which set_a is being evaluated for matching\ntime_b: time at which set_b is being evaluated for matching\n\nReturns:\nList of tuples containing (set_a index, set_b index) for each match", "source": "codesearchnet"}
{"code": "def process_arguments(self, func, args):\n    pos_args = []\n    kw_args = {}\n    while (len(args) > 0):\n        if (func.metadata.spec_filled(pos_args, kw_args) and (not self._is_flag(args[0]))):\n            break\n        arg = args.pop(0)\n        if (arg == '--'):\n            break\n        elif self._is_flag(arg):\n            arg_value = None\n            arg_name = None\n            if (len(arg) == 2):\n                arg_name = func.metadata.match_shortname(arg[1:], filled_args=pos_args)\n            else:\n                if (not arg.startswith('--')):\n                    raise ArgumentError('Invalid method of specifying keyword argument that did not start with --', argument=arg)\n                arg = arg[2:]\n                if ('=' in arg):\n                    (arg, arg_value) = arg.split('=', 1)\n                arg_name = func.metadata.match_shortname(arg, filled_args=pos_args)\n            arg_type = func.metadata.param_type(arg_name)\n            if (arg_type is None):\n                raise ArgumentError('Attempting to set a parameter from command line that does not have type information', argument=arg_name)\n            if (arg_value is None):\n                arg_value = self._extract_arg_value(arg_name, arg_type, args)\n            kw_args[arg_name] = arg_value\n        else:\n            pos_args.append(arg)\n    if ((len(args) > 0) and (args[0] == '--')):\n        args.pop(0)\n    return (pos_args, kw_args, args)", "docstring": "Process arguments from the command line into positional and kw args.\n\nArguments are consumed until the argument spec for the function is filled\nor a -- is found or there are no more arguments.  Keyword arguments can be\nspecified using --field=value, -f value or --field value.  Positional\narguments are specified just on the command line itself.\n\nIf a keyword argument (`field`) is a boolean, it can be set to True by just passing\n--field or -f without needing to explicitly pass True unless this would cause\nambiguity in parsing since the next expected positional argument is also a boolean\nor a string.\n\nArgs:\nfunc (callable): A function previously annotated with type information\nargs (list): A list of all of the potential arguments to this function.\n\nReturns:\n(args, kw_args, unused args): A tuple with a list of args, a dict of\nkeyword args and a list of any unused args that were not processed.", "source": "codesearchnet"}
{"code": "def long_click(self, pos, duration=2.0):\n        \n\n        try:\n            duration = float(duration)\n        except ValueError:\n            raise ValueError('Argument `duration` should be <float>. Got {}'.format(repr(duration)))\n\n        if not (0 <= pos[0] <= 1) or not (0 <= pos[1] <= 1):\n            raise InvalidOperationException('Click position out of screen. {}'.format(repr(pos)))\n        return self.agent.input.longClick(pos[0], pos[1], duration)", "docstring": "Similar to click but press the screen for the given time interval and then release\n\nArgs:\npos (:obj:`2-list/2-tuple`): coordinates (x, y) in range from 0 to 1\nduration: duration of press the screen", "source": "juraj-google-style"}
{"code": "def _build(self, inputs):\n    \n    shape_inputs = inputs.get_shape().as_list()\n    rank = len(shape_inputs)\n\n    \n    full_multiples = [1] * rank\n\n    \n    for dim, multiple in zip(self._dims, self._multiples):\n      full_multiples[dim] = multiple\n\n    return tf.tile(inputs, multiples=full_multiples)", "docstring": "Connects the `TileByDim` module into the graph.\n\nArgs:\ninputs: `Tensor` to tile.\n\nReturns:\nThe tiled tensor.", "source": "juraj-google-style"}
{"code": "def load(self,cache_genotype=False,cache_phenotype=True):\n        \n        self.f = h5py.File(self.file_name,'r')\n        self.pheno = self.f['phenotype']\n        self.geno  = self.f['genotype']\n        \n\n        \n        self.genoM = self.geno['matrix']\n\n        self.phenoM = self.pheno['matrix']\n        self.sample_ID = self.geno['row_header']['sample_ID'][:]\n        self.genoChrom = self.geno['col_header']['chrom'][:]\n        self.genoPos   = self.geno['col_header']['pos'][:]\n        if 'pos_cum' in list(self.geno['col_header'].keys()):\n            self.genoPos_cum   = self.geno['col_header']['pos_cum'][:]\n        else:\n            self.genoPos_cum = None\n        self.phenotype_ID = self.pheno['col_header']['phenotype_ID'][:]\n\n\n        \n        if cache_genotype:\n            self.genoM = self.genoM[:]\n        if cache_phenotype:\n            self.phenoM = self.phenoM[:]\n    \n        \n        headers = list(self.pheno['col_header'].keys())\n        if 'gene_ID' in headers:\n            self.eqtl = True\n            self.geneID = self.pheno['col_header']['gene_ID'][:]\n            self.gene_pos = SP.array([self.pheno['col_header']['gene_chrom'][:],self.pheno['col_header']['gene_start'][:],self.pheno['col_header']['gene_end']],dtype='int').T\n            self.geneIDs= list(set(self.geneID))\n        else:\n            self.eqtl = False\n        if 'environment' in headers:\n            self.E  = self.pheno['col_header/environment'][:]\n            self.Es = list(set(self.E))\n        else:\n            self.E = None\n\n        \n        self.N = self.genoM.shape[0]\n        self.S = self.genoM.shape[1]\n        self.P = self.phenoM.shape[1]\n        assert (self.genoM.shape[0]==self.phenoM.shape[0]), 'dimension missmatch'", "docstring": "load data file\n\nArgs:\ncache_genotype:     load genotypes fully into memory (default: False)\ncache_phenotype:    load phentopyes fully intro memry (default: True)", "source": "juraj-google-style"}
{"code": "def _receive_signal(self, progress_subscript):\n        \n\n        self.progress = self._estimate_progress()\n        self.updateProgress.emit(int(self.progress))", "docstring": "this function takes care of signals emitted by the subscripts\nArgs:\nprogress_subscript: progress of subscript", "source": "juraj-google-style"}
{"code": "def from_json(cls, json):\n    \n    \n    params = dict((str(k), v) for k, v in json.iteritems()\n                  if k in cls._PARAMS)\n\n    \n    \n    \n    if cls._OFFSET_PARAM in params:\n      params[cls._OFFSET_PARAM] = base64.b64decode(params[cls._OFFSET_PARAM])\n    return cls(**params)", "docstring": "Creates an instance of the InputReader for the given input shard's state.\n\nArgs:\njson: The InputReader state as a dict-like object.\n\nReturns:\nAn instance of the InputReader configured using the given JSON parameters.", "source": "juraj-google-style"}
{"code": "def ignore():\n\n    def parse_line(line):\n        if (not isinstance(line, string_types)):\n            line = line.decode('utf-8')\n        line = line.split('\n        return line\n    ignore_files = [conf.proj_path('.gitignore'), conf.proj_path('.git/info/exclude'), config().get('core.excludesfile')]\n    result = []\n    for ignore_file in ignore_files:\n        if (not (ignore_file and os.path.exists(ignore_file))):\n            continue\n        with open(ignore_file) as fp:\n            parsed = (parse_line(l) for l in fp.readlines())\n            result += [x for x in parsed if x]\n    return result", "docstring": "Return a list of patterns in the project .gitignore\n\nReturns:\nlist[str]: List of patterns set to be ignored by git.", "source": "codesearchnet"}
{"code": "def register_gpt_plugin(self, fs_guid, plugin):\n        \n        key = uuid.UUID(fs_guid.lower())\n\n        self.logger.debug('GPT: {}, GUID: {}'\n                          .format(self.__get_plugin_name(plugin), fs_guid))\n        self.__gpt_plugins[key].append(plugin)", "docstring": "Used in plugin's registration routine,\nto associate it's detection method with given filesystem guid\n\nArgs:\nfs_guid: filesystem guid that is read from GPT partition entry\nplugin: plugin that supports this filesystem", "source": "juraj-google-style"}
{"code": "def write_libraries(dir, libraries):\n  \n  files = [open(os.path.join(dir, k), \"w\") for k, _ in libraries]\n  \n  for f, (_, v) in zip(files, libraries):\n    v.write_markdown_to_file(f)\n  \n  \n  \n  for f, (_, v) in zip(files, libraries):\n    v.write_other_members(f)\n    f.close()", "docstring": "Write a list of libraries to disk.\n\nArgs:\ndir: Output directory.\nlibraries: List of (filename, library) pairs.", "source": "juraj-google-style"}
{"code": "def schedCoro(self, coro):\n    import synapse.lib.provenance as s_provenance\n    if __debug__:\n        assert s_coro.iscoro(coro)\n        import synapse.lib.threads as s_threads\n        assert (s_threads.iden() == self.tid)\n    task = self.loop.create_task(coro)\n    if asyncio.current_task():\n        s_provenance.dupstack(task)\n\n    def taskDone(task):\n        self._active_tasks.remove(task)\n        try:\n            task.result()\n        except asyncio.CancelledError:\n            pass\n        except Exception:\n            logger.exception('Task scheduled through Base.schedCoro raised exception')\n    self._active_tasks.add(task)\n    task.add_done_callback(taskDone)\n    return task", "docstring": "Schedules a free-running coroutine to run on this base's event loop.  Kills the coroutine if Base is fini'd.\nIt does not pend on coroutine completion.\n\nPrecondition:\nThis function is *not* threadsafe and must be run on the Base's event loop\n\nReturns:\nasyncio.Task: An asyncio.Task object.", "source": "codesearchnet"}
{"code": "def convert(self):\n    return super(TFLiteSavedModelConverter, self).convert()", "docstring": "Converts a TensorFlow GraphDef based on instance variables.\n\nNote that in the converted TensorFlow Lite model, the input tensor's order\nmight be changed each time `convert` is called. To access input tensor\ninformation, please consider using the `SignatureRunner` API\n(`interpreter.get_signature_runner`).\n\nReturns:\nThe converted data in serialized format, either a TFLite Flatbuffer or\na Graphviz graph depending on value in `output_format`.\n\nRaises:\nValueError:\nInput shape is not specified.\nNone value for dimension in input_tensor.", "source": "github-repos"}
{"code": "def Current(os_override=None, arch_override=None):\n    return Platform(os_override if os_override else OperatingSystem.Current(), arch_override if arch_override else Architecture.Current())", "docstring": "Determines the current platform you are running on.\n\nArgs:\nos_override: OperatingSystem, A value to use instead of the current.\narch_override: Architecture, A value to use instead of the current.\n\nReturns:\nPlatform, The platform tuple of operating system and architecture.  Either\ncan be None if it could not be determined.", "source": "github-repos"}
{"code": "def Verify(self, public_key):\n    \n    if self.digest_type != self.HashType.SHA256:\n      raise rdfvalue.DecodeError(\"Unsupported digest.\")\n    if self.signature_type not in [\n        self.SignatureType.RSA_PKCS1v15, self.SignatureType.RSA_PSS\n    ]:\n      raise rdfvalue.DecodeError(\"Unsupported signature type.\")\n\n    try:\n      public_key.Verify(self.data, self.signature)\n    except InvalidSignature as e:\n      raise rdfvalue.DecodeError(\"Could not verify blob. Error: %s\" % e)\n\n    return True", "docstring": "Verify the data in this blob.\n\nArgs:\npublic_key: The public key to use for verification.\n\nReturns:\nTrue when verification succeeds.\n\nRaises:\nrdfvalue.DecodeError if the data is not suitable verified.", "source": "juraj-google-style"}
{"code": "def _get_responses_list(self, raw_output, stream):\n        \n        responses = []\n\n        raw_output, self._incomplete_output[stream] = _buffer_incomplete_responses(\n            raw_output, self._incomplete_output.get(stream)\n        )\n\n        if not raw_output:\n            return responses\n\n        response_list = list(\n            filter(lambda x: x, raw_output.decode(errors=\"replace\").split(\"\\n\"))\n        )  \n\n        \n        for response in response_list:\n            if gdbmiparser.response_is_finished(response):\n                pass\n            else:\n                parsed_response = gdbmiparser.parse_response(response)\n                parsed_response[\"stream\"] = stream\n\n                self.logger.debug(\"%s\", pformat(parsed_response))\n\n                responses.append(parsed_response)\n\n        return responses", "docstring": "Get parsed response list from string output\nArgs:\nraw_output (unicode): gdb output to parse\nstream (str): either stdout or stderr", "source": "juraj-google-style"}
{"code": "def lazy_load(name):\n\n    def wrapper(load_fn):\n\n        @_memoize\n        def load_once(self):\n            if load_once.loading:\n                raise ImportError(('Circular import when resolving LazyModule %r' % name))\n            load_once.loading = True\n            try:\n                module = load_fn()\n            finally:\n                load_once.loading = False\n            self.__dict__.update(module.__dict__)\n            load_once.loaded = True\n            return module\n        load_once.loading = False\n        load_once.loaded = False\n\n        class LazyModule(types.ModuleType):\n\n            def __getattr__(self, attr_name):\n                return getattr(load_once(self), attr_name)\n\n            def __dir__(self):\n                return dir(load_once(self))\n\n            def __repr__(self):\n                if load_once.loaded:\n                    return ('<%r via LazyModule (loaded)>' % load_once(self))\n                return ('<module %r via LazyModule (not yet loaded)>' % self.__name__)\n        return LazyModule(name)\n    return wrapper", "docstring": "Decorator to define a function that lazily loads the module 'name'.\n\nThis can be used to defer importing troublesome dependencies - e.g. ones that\nare large and infrequently used, or that cause a dependency cycle -\nuntil they are actually used.\n\nArgs:\nname: the fully-qualified name of the module; typically the last segment\nof 'name' matches the name of the decorated function\n\nReturns:\nDecorator function that produces a lazy-loading module 'name' backed by the\nunderlying decorated function.", "source": "codesearchnet"}
{"code": "def sigmoid_recall_one_hot(logits, labels, weights_fn=None):\n    with tf.variable_scope('sigmoid_recall_one_hot', values=[logits, labels]):\n        del weights_fn\n        num_classes = logits.shape[(- 1)]\n        predictions = tf.nn.sigmoid(logits)\n        predictions = tf.argmax(predictions, (- 1))\n        predictions = tf.one_hot(predictions, num_classes)\n        (_, recall) = tf.metrics.recall(labels=labels, predictions=predictions)\n        return (recall, tf.constant(1.0))", "docstring": "Calculate recall for a set, given one-hot labels and logits.\n\nPredictions are converted to one-hot,\nas predictions[example][arg-max(example)] = 1\n\nArgs:\nlogits: Tensor of size [batch-size, o=1, p=1, num-classes]\nlabels: Tensor of size [batch-size, o=1, p=1, num-classes]\nweights_fn: Function that takes in labels and weighs examples (unused)\nReturns:\nrecall (scalar), weights", "source": "codesearchnet"}
{"code": "def _parse_url(self, url):\n    if not self._full_urls:\n        m = _URL_RE.match(url)\n        if m is None:\n            raise ValueError('Could not parse url: %s' % url)\n        return ('', m.group(1))\n    else:\n        m = _FULL_URL_RE.match(url)\n        if m is None:\n            raise ValueError('Could not parse url: %s' % url)\n        return (m.group(1), m.group(2) or '/')", "docstring": "Verifies that url begins with hdfs:// prefix, strips it and adds a\nleading /.\n\nParsing behavior is determined by HadoopFileSystemOptions.hdfs_full_urls.\n\nArgs:\nurl: (str) A URL in the form hdfs://path/...\nor in the form hdfs://server/path/...\n\nRaises:\nValueError if the URL doesn't match the expect format.\n\nReturns:\n(str, str) If using hdfs_full_urls, for an input of\n'hdfs://server/path/...' will return (server, '/path/...').\nOtherwise, for an input of 'hdfs://path/...', will return\n('', '/path/...').", "source": "github-repos"}
{"code": "def try_checkpoint_metadata(self, trial):\n        \n        if trial._checkpoint.storage == Checkpoint.MEMORY:\n            logger.debug(\"Not saving data for trial w/ memory checkpoint.\")\n            return\n        try:\n            logger.debug(\"Saving trial metadata.\")\n            self._cached_trial_state[trial.trial_id] = trial.__getstate__()\n        except Exception:\n            logger.exception(\"Error checkpointing trial metadata.\")", "docstring": "Checkpoints metadata.\n\nArgs:\ntrial (Trial): Trial to checkpoint.", "source": "juraj-google-style"}
{"code": "def percent_point(self, y, V):\n        \n        self.check_fit()\n\n        if self.theta < 0:\n            return V\n\n        else:\n            result = []\n            for _y, _V in zip(y, V):\n                minimum = fminbound(self.partial_derivative_scalar, EPSILON, 1.0, args=(_y, _V))\n                if isinstance(minimum, np.ndarray):\n                    minimum = minimum[0]\n\n                result.append(minimum)\n\n            return np.array(result)", "docstring": "Compute the inverse of conditional cumulative distribution :math:`C(u|v)^-1`\n\nArgs:\ny: `np.ndarray` value of :math:`C(u|v)`.\nv: `np.ndarray` given value of v.", "source": "juraj-google-style"}
{"code": "def Create(self, urn, aff4_type, mode='w', token=None, age=NEWEST_TIME, force_new_version=True, object_exists=False, mutation_pool=None, transaction=None):\n    if (not data_store.AFF4Enabled()):\n        raise NotImplementedError('AFF4 data store has been disabled.')\n    if (mode not in ['w', 'r', 'rw']):\n        raise AttributeError(('Invalid mode %s' % mode))\n    if (token is None):\n        token = data_store.default_token\n    if (urn is not None):\n        urn = rdfvalue.RDFURN(urn)\n    _ValidateAFF4Type(aff4_type)\n    if ('r' in mode):\n        try:\n            existing = self.Open(urn, mode=mode, token=token, age=age, transaction=transaction)\n            result = existing.Upgrade(aff4_type)\n            if aff4_type:\n                result.aff4_type = aff4_type.__name__\n            if (force_new_version and (existing.Get(result.Schema.TYPE) != aff4_type.__name__)):\n                result.ForceNewVersion()\n            return result\n        except IOError:\n            pass\n    result = aff4_type(urn, mode=mode, token=token, age=age, aff4_type=aff4_type.__name__, object_exists=object_exists, mutation_pool=mutation_pool, transaction=transaction)\n    result.Initialize()\n    if force_new_version:\n        result.ForceNewVersion()\n    return result", "docstring": "Creates the urn if it does not already exist, otherwise opens it.\n\nIf the urn exists and is of a different type, this will also promote it to\nthe specified type.\n\nArgs:\nurn: The object to create.\naff4_type: The desired type for this object.\nmode: The desired mode for this object.\ntoken: The Security Token to use for opening this item.\nage: The age policy used to build this object. Only makes sense when mode\nhas \"r\".\nforce_new_version: Forces the creation of a new object in the data_store.\nobject_exists: If we know the object already exists we can skip index\ncreation.\nmutation_pool: An optional MutationPool object to write to. If not given,\nthe data_store is used directly.\ntransaction: For locked objects, a lock is passed to the object.\n\nReturns:\nAn AFF4 object of the desired type and mode.\n\nRaises:\nAttributeError: If the mode is invalid.", "source": "codesearchnet"}
{"code": "def get_configured_consensus_module(block_id, state_view):\n    settings_view = SettingsView(state_view)\n    default_consensus = ('genesis' if (block_id == NULL_BLOCK_IDENTIFIER) else 'devmode')\n    consensus_module_name = settings_view.get_setting('sawtooth.consensus.algorithm', default_value=default_consensus)\n    return ConsensusFactory.get_consensus_module(consensus_module_name)", "docstring": "Returns the consensus_module based on the consensus module set by\nthe \"sawtooth_settings\" transaction family.\n\nArgs:\nblock_id (str): the block id associated with the current state_view\nstate_view (:obj:`StateView`): the current state view to use for\nsetting values\nRaises:\nUnknownConsensusModuleError: Thrown when an invalid consensus\nmodule has been configured.", "source": "codesearchnet"}
{"code": "def create_channel(cls, address=\"firestore.googleapis.com:443\", credentials=None):\n        \n        return google.api_core.grpc_helpers.create_channel(\n            address, credentials=credentials, scopes=cls._OAUTH_SCOPES\n        )", "docstring": "Create and return a gRPC channel object.\n\nArgs:\naddress (str): The host for the channel to use.\ncredentials (~.Credentials): The\nauthorization credentials to attach to requests. These\ncredentials identify this application to the service. If\nnone are specified, the client will attempt to ascertain\nthe credentials from the environment.\n\nReturns:\ngrpc.Channel: A gRPC channel object.", "source": "juraj-google-style"}
{"code": "def get_help(self, cmd_prefix=None):\n    if not cmd_prefix:\n        help_info = RichTextLines([])\n        if self._help_intro:\n            help_info.extend(self._help_intro)\n        sorted_prefixes = sorted(self._handlers)\n        for cmd_prefix in sorted_prefixes:\n            lines = self._get_help_for_command_prefix(cmd_prefix)\n            lines.append('')\n            lines.append('')\n            help_info.extend(RichTextLines(lines))\n        return help_info\n    else:\n        return RichTextLines(self._get_help_for_command_prefix(cmd_prefix))", "docstring": "Compile help information into a RichTextLines object.\n\nArgs:\ncmd_prefix: Optional command prefix. As the prefix itself or one of its\naliases.\n\nReturns:\nA RichTextLines object containing the help information. If cmd_prefix\nis None, the return value will be the full command-line help. Otherwise,\nit will be the help information for the specified command.", "source": "github-repos"}
{"code": "def triangular(logu, name=None):\n    with tf.compat.v1.name_scope(name, 'triangular', [logu]):\n        logu = tf.convert_to_tensor(value=logu, name='logu')\n        return (pearson(logu) / (1.0 + tf.exp(logu)))", "docstring": "The Triangular Csiszar-function in log-space.\n\nA Csiszar-function is a member of,\n\n```none\nF = { f:R_+ to R : f convex }.\n```\n\nThe Triangular Csiszar-function is:\n\n```none\nf(u) = (u - 1)**2 / (1 + u)\n```\n\nThis Csiszar-function induces a symmetric f-Divergence, i.e.,\n`D_f[p, q] = D_f[q, p]`.\n\nWarning: this function makes non-log-space calculations and may therefore be\nnumerically unstable for `|logu| >> 0`.\n\nArgs:\nlogu: `float`-like `Tensor` representing `log(u)` from above.\nname: Python `str` name prefixed to Ops created by this function.\n\nReturns:\ntriangular_of_u: `float`-like `Tensor` of the Csiszar-function evaluated\nat `u = exp(logu)`.", "source": "codesearchnet"}
{"code": "def get_polypeptide_within(self, chain_id, resnum, angstroms, only_protein=True, use_ca=False, custom_coord=None, return_resnums=False):\n    if self.structure:\n        parsed = self.structure\n    else:\n        parsed = self.parse_structure()\n    residue_list = ssbio.protein.structure.properties.residues.within(resnum=resnum, chain_id=chain_id, model=parsed.first_model, angstroms=angstroms, use_ca=use_ca, custom_coord=custom_coord)\n    if only_protein:\n        filtered_residue_list = [x for x in residue_list if (x.id[0] == ' ')]\n    else:\n        filtered_residue_list = residue_list\n    residue_list_combined = Polypeptide(filtered_residue_list)\n    if return_resnums:\n        resnums = [int(x.id[1]) for x in filtered_residue_list]\n        return (residue_list_combined, resnums)\n    return residue_list_combined", "docstring": "Get a Polypeptide object of the amino acids within X angstroms of the specified chain + residue number.\n\nArgs:\nresnum (int): Residue number of the structure\nchain_id (str): Chain ID of the residue number\nangstroms (float): Radius of the search sphere\nonly_protein (bool): If only protein atoms (no HETATMS) should be included in the returned sequence\nuse_ca (bool): If the alpha-carbon atom should be used for searching, default is False (last atom of residue used)\ncustom_coord (list): custom XYZ coord\nreturn_resnums (bool): if list of resnums should be returned\n\nReturns:\nBio.PDB.Polypeptide.Polypeptide: Biopython Polypeptide object", "source": "codesearchnet"}
{"code": "def Collect(self, knowledge_base):\n    \n    environment_variable = knowledge_base.GetEnvironmentVariable('programdata')\n    allusersappdata = getattr(environment_variable, 'value', None)\n\n    if not allusersappdata:\n      environment_variable = knowledge_base.GetEnvironmentVariable(\n          'allusersprofile')\n      allusersdata = getattr(environment_variable, 'value', None)\n\n      if allusersdata:\n        allusersappdata = '\\\\'.join([allusersdata, 'Application Data'])\n\n    if allusersappdata:\n      environment_variable = artifacts.EnvironmentVariableArtifact(\n          case_sensitive=False, name='allusersappdata', value=allusersappdata)\n\n      try:\n        logger.debug('setting environment variable: {0:s} to: \"{1:s}\"'.format(\n            'allusersappdata', allusersappdata))\n        knowledge_base.AddEnvironmentVariable(environment_variable)\n      except KeyError:\n        \n        pass", "docstring": "Collects values from the knowledge base.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\n\nRaises:\nPreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def get_month(datestring):\n    convert_written = re.compile('jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec', re.IGNORECASE)\n    month = convert_written.search(datestring)\n    month_number = None\n    if month:\n        month_number = strptime(month.group(), '%b').tm_mon\n        if (month_number < 10):\n            month_number = add_zero(month_number)\n    return str(month_number)", "docstring": "Transforms a written month into corresponding month number.\n\nE.g. November -> 11, or May -> 05.\n\nKeyword arguments:\ndatestring -- a string\n\nReturns:\nString, or None if the transformation fails", "source": "codesearchnet"}
{"code": "def ground_temperature_depth(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `ground_temperature_depth`'.format(value))\n    self._ground_temperature_depth = value", "docstring": "Corresponds to IDD Field `ground_temperature_depth`\n\nArgs:\nvalue (float): value for IDD Field `ground_temperature_depth`\nUnit: m\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def _RunScript(self, script):\n    \n    process = subprocess.Popen(\n        script, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)\n    while True:\n      for line in iter(process.stdout.readline, b''):\n        self.logger.info(line.decode('utf-8').rstrip('\\n'))\n      if process.poll() is not None:\n        break", "docstring": "Run a script and log the streamed script output.\n\nArgs:\nscript: string, the file location of an executable script.", "source": "juraj-google-style"}
{"code": "def p(value, bits=None, endian=None, target=None):\n    return globals()[('p%d' % _get_bits(bits, target))](value, endian=endian, target=target)", "docstring": "Pack a signed pointer for a given target.\n\nArgs:\nvalue(int): The value to pack.\nbits(:class:`pwnypack.target.Target.Bits`): Override the default\nword size. If ``None`` it will look at the word size of\n``target``.\nendian(:class:`~pwnypack.target.Target.Endian`): Override the default\nbyte order. If ``None``, it will look at the byte order of\nthe ``target`` argument.\ntarget(:class:`~pwnypack.target.Target`): Override the default byte\norder. If ``None``, it will look at the byte order of\nthe global :data:`~pwnypack.target.target`.", "source": "codesearchnet"}
{"code": "def operates_on(self, qubits: Iterable[raw_types.Qid]) -> bool:\n        \n        return any(q in qubits for q in self.qubits)", "docstring": "Determines if the moment has operations touching the given qubits.\n\nArgs:\nqubits: The qubits that may or may not be touched by operations.\n\nReturns:\nWhether this moment has operations involving the qubits.", "source": "juraj-google-style"}
{"code": "def get_barycenter(self):\n        \n        try:\n            mass = self['mass'].values\n        except KeyError:\n            mass = self.add_data('mass')['mass'].values\n        pos = self.loc[:, ['x', 'y', 'z']].values\n        return (pos * mass[:, None]).sum(axis=0) / self.get_total_mass()", "docstring": "Return the mass weighted average location.\n\nArgs:\nNone\n\nReturns:\n:class:`numpy.ndarray`:", "source": "juraj-google-style"}
{"code": "def nack(self, channel_id=None, **kwargs):  \n        \n        path = \"/event-service/v1/channels/{}/nack\".format(channel_id)\n        r = self._httpclient.request(\n            method=\"POST\",\n            url=self.url,\n            path=path,\n            **kwargs\n        )\n        return r", "docstring": "Send a negative read-acknowledgement to the service.\n\nCauses the channel's read point to move to its previous position\nprior to the last poll.\n\nArgs:\nchannel_id (str): The channel ID.\n**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.\n\nReturns:\nrequests.Response: Requests Response() object.\n\nExamples:\nRefer to ``event_nack.py`` example.", "source": "juraj-google-style"}
{"code": "def pre_run_cell(self, cellno, code):\n    self.cellid = cellno\n    import ast\n    if findloop(ast.parse(code)):\n        from acorn.logging.decoration import set_streamlining\n        set_streamlining(True)\n        from time import time\n        self.pre = {'m': 'loop', 'a': None, 's': time(), 'r': None, 'c': code}", "docstring": "Executes before the user-entered code in `ipython` is run. This\nintercepts loops and other problematic code that would produce lots of\ndatabase entries and streamlines it to produce only a single entry.\n\nArgs:\ncellno (int): the cell number that is about to be executed.\ncode (str): python source code that is about to be executed.", "source": "codesearchnet"}
{"code": "def decode(self, ids, strip_extraneous=False):\n    \n    if strip_extraneous:\n      ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))\n    return unicode_to_native(\n        tokenizer.decode(self._subtoken_ids_to_tokens(ids)))", "docstring": "Converts a sequence of subtoken ids to a native string.\n\nArgs:\nids: a list of integers in the range [0, vocab_size)\nstrip_extraneous: bool, whether to strip off extraneous tokens\n(EOS and PAD).\n\nReturns:\na native string", "source": "juraj-google-style"}
{"code": "def delete_endpoint(self, delete_endpoint_config=True):\n        \n        if delete_endpoint_config:\n            self._delete_endpoint_config()\n\n        self.sagemaker_session.delete_endpoint(self.endpoint)", "docstring": "Delete the Amazon SageMaker endpoint backing this predictor. Also delete the endpoint configuration attached\nto it if delete_endpoint_config is True.\n\nArgs:\ndelete_endpoint_config (bool, optional): Flag to indicate whether to delete endpoint configuration together\nwith endpoint. Defaults to True. If True, both endpoint and endpoint configuration will be deleted. If\nFalse, only endpoint will be deleted.", "source": "juraj-google-style"}
{"code": "def draw_on(self, canvas, stem_color, leaf_color, thickness, ages=None):\n        \n        if canvas.__module__ in SUPPORTED_CANVAS:\n            drawer = SUPPORTED_CANVAS[canvas.__module__]\n            drawer(self, canvas, stem_color, leaf_color, thickness, ages).draw()", "docstring": "Draw the tree on a canvas.\n\nArgs:\ncanvas (object): The canvas, you want to draw the tree on. Supported canvases: svgwrite.Drawing and PIL.Image (You can also add your custom libraries.)\nstem_color (tupel): Color or gradient for the stem of the tree.\nleaf_color (tupel): Color for the leaf (= the color for last iteration).\nthickness (int): The start thickness of the tree.", "source": "juraj-google-style"}
{"code": "def live(self, url, resource_class, resource_args, params=None):\n    return self.adapter.live(self, url, resource_class, resource_args, params=params)", "docstring": "Get a live endpoint.\n\nArgs:\n\nurl(string): URL for the request\n\nresource_class(class): The class to use for entries coming\nfrom the live endpoint.\n\nresource_args(dict): Additional arguments to pass to the\n`resource_class` constructor\n\nKeyword Args:\n\nparams(dict): Request parameters for the live url\n\nReturns:\n\nAn iterator over the live endpoint. Depending on the\nadapter the iterator will allow asynchronous\nbehavior. The default adapter will block while\niterating over the response of this method.", "source": "codesearchnet"}
{"code": "def create_message_from_descriptor(desc: descriptor.Descriptor, **kwargs: Any) -> message.Message:\n    return get_message_class_from_descriptor(desc)(**kwargs)", "docstring": "Instantiates a new Message based on a provided Descriptor.\n\nArgs:\ndesc: The Descriptor that describes the Message to instantiate.\n**kwargs: An optional list of key/value pairs to initialize with.\n\nReturns:\nA new Message based on the provided Descriptor.", "source": "github-repos"}
{"code": "def _GetGradWrt(output_grad, other_operand, input_shape, input_subs, other_subs, output_subs):\n    reduced_label_set = set(input_subs).difference(set(output_subs + other_subs + '.'))\n    left_subs = ''.join((s for s in input_subs if s not in reduced_label_set))\n    grad_reduced = gen_linalg_ops.einsum([output_grad, other_operand], '{},{}->{}'.format(output_subs, other_subs, left_subs))\n    if not reduced_label_set:\n        return grad_reduced\n    return _GetGradReduced(grad_reduced, left_subs, input_subs, input_shape, reduced_label_set)", "docstring": "Returns the gradient wrt an input operand for a binary einsum.\n\nThis function does not handle (un)broadcasting. This must be done separately\non the returned gradient.\n\nArgs:\noutput_grad: The gradient wrt the output of a binary einsum operation.\nother_operand: The complementary `Tensor` operand i.e. which is not the\ninput operand.\ninput_shape: A `Tensor` representing the shape of input operand.\ninput_subs: The subscripts of the input operand.\nother_subs: The subscripts of the complementary operand.\noutput_subs: The output subscripts.", "source": "github-repos"}
{"code": "def step(self, action):\n        \n        \n        if self.done:\n            raise ValueError('cannot step in a done environment! call `reset`')\n        \n        self.controllers[0][:] = action\n        \n        _LIB.Step(self._env)\n        \n        reward = self._get_reward()\n        \n        self.done = self._get_done()\n        \n        info = self._get_info()\n        \n        self._did_step(self.done)\n        \n        if reward < self.reward_range[0]:\n            reward = self.reward_range[0]\n        elif reward > self.reward_range[1]:\n            reward = self.reward_range[1]\n        \n        return self.screen, reward, self.done, info", "docstring": "Run one frame of the NES and return the relevant observation data.\n\nArgs:\naction (byte): the bitmap determining which buttons to press\n\nReturns:\na tuple of:\n- state (np.ndarray): next frame as a result of the given action\n- reward (float) : amount of reward returned after given action\n- done (boolean): whether the episode has ended\n- info (dict): contains auxiliary diagnostic information", "source": "juraj-google-style"}
{"code": "async def rewind(self, query=\"1\"):\n        \n\n        if not self.state == 'ready':\n            logger.debug(\"Trying to rewind from wrong state '{}'\".format(self.state))\n            return\n\n        if query == \"\":\n            query = \"1\"\n\n        try:\n            num = int(query)\n        except TypeError:\n            self.statuslog.error(\"Rewind argument must be a number\")\n        except ValueError:\n            self.statuslog.error(\"Rewind argument must be a number\")\n        else:\n            if len(self.prev_queue) == 0:\n                self.statuslog.error(\"No songs to rewind\")\n                return\n\n            if num < 0:\n                self.statuslog.error(\"Rewind must be postitive or 0\")\n                return\n            elif num > len(self.prev_queue):\n                self.statuslog.warning(\"Rewinding to start\")\n            else:\n                self.statuslog.info(\"Rewinding\")\n\n            for i in range(num + 1):\n                if len(self.prev_queue) > 0:\n                    self.queue.insert(0, self.prev_queue.pop())\n\n            try:\n                self.streamer.stop()\n            except Exception as e:\n                logger.exception(e)", "docstring": "The rewind command\n\nArgs:\nquery (str): The number of items to skip", "source": "juraj-google-style"}
{"code": "def inverse(self, y, name='inverse'):\n    return self._call_inverse(y, name)", "docstring": "Returns the inverse `Bijector` evaluation, i.e., X = g^{-1}(Y).\n\nArgs:\ny: `Tensor`. The input to the \"inverse\" evaluation.\nname: The name to give this op.\n\nReturns:\n`Tensor`, if this bijector is injective.\nIf not injective, returns the k-tuple containing the unique\n`k` points `(x1, ..., xk)` such that `g(xi) = y`.\n\nRaises:\nTypeError: if `self.dtype` is specified and `y.dtype` is not\n`self.dtype`.\nNotImplementedError: if `_inverse` is not implemented.", "source": "github-repos"}
{"code": "def fit(self, x, y):\n    train = np.vstack((np.array([self.featurize_row(row.iloc[0], row.iloc[1]) for (idx, row) in x.iterrows()]), np.array([self.featurize_row(row.iloc[1], row.iloc[0]) for (idx, row) in x.iterrows()])))\n    labels = np.vstack((y, (- y))).ravel()\n    verbose = (1 if self.verbose else 0)\n    self.clf = CLF(verbose=verbose, min_samples_leaf=self.L, n_estimators=self.E, max_depth=self.max_depth, n_jobs=self.n_jobs).fit(train, labels)", "docstring": "Train the model.\n\nArgs:\nx_tr (pd.DataFrame): CEPC format dataframe containing the pairs\ny_tr (pd.DataFrame or np.ndarray): labels associated to the pairs", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context):\n    \n    super(GzipFile, self).__init__(resolver_context)\n    self._compressed_data_size = -1\n    self._current_offset = 0\n    self._gzip_file_object = None\n    self._members_by_end_offset = collections.OrderedDict()\n\n    self.uncompressed_data_size = 0", "docstring": "Initializes a file-like object.\n\nArgs:\nresolver_context (Context): resolver context.\n\nRaises:\nValueError: when file_object is set.", "source": "juraj-google-style"}
{"code": "def potential(self, value):\n    if value:\n        self._potential = True\n    else:\n        self._potential = False", "docstring": "Setter for 'potential' property\n\nArgs:\nvalue (bool): True if a potential is required. False else", "source": "codesearchnet"}
{"code": "def create_masks(input_dim, hidden_dims, input_order='left-to-right', hidden_order='left-to-right'):\n    degrees = create_degrees(input_dim, hidden_dims, input_order, hidden_order)\n    masks = []\n    for (input_degrees, output_degrees) in zip(degrees[:(- 1)], degrees[1:]):\n        mask = tf.cast((input_degrees[(:, np.newaxis)] <= output_degrees), tf.float32)\n        masks.append(mask)\n    mask = tf.cast((degrees[(- 1)][(:, np.newaxis)] < degrees[0]), tf.float32)\n    masks.append(mask)\n    return masks", "docstring": "Returns a list of binary mask matrices respecting autoregressive ordering.\n\nArgs:\ninput_dim: Number of inputs.\nhidden_dims: list with the number of hidden units per layer. It does not\ninclude the output layer; those number of units will always be set to\ninput_dim downstream. Each hidden unit size must be at least the size of\nlength (otherwise autoregressivity is not possible).\ninput_order: Order of degrees to the input units: 'random', 'left-to-right',\n'right-to-left', or an array of an explicit order. For example,\n'left-to-right' builds an autoregressive model\np(x) = p(x1) p(x2 | x1) ... p(xD | x<D).\nhidden_order: Order of degrees to the hidden units: 'random',\n'left-to-right'. If 'left-to-right', hidden units are allocated equally\n(up to a remainder term) to each degree.", "source": "codesearchnet"}
{"code": "def bcoo_add_indices(x1, x2, sum_duplicates):\n    x2_zeros = jnp.zeros(x2.data.shape, x1.data.dtype)\n    concat_axis = len(x1.indices.shape) - 2\n    output_indices = jnp.concatenate([x1.indices, x2.indices], axis=concat_axis)\n    output_data = jnp.concatenate([x1.data, x2_zeros], axis=concat_axis)\n    output = jax_sparse.BCOO((output_data, output_indices), shape=x1.shape)\n    if sum_duplicates:\n        output = jax_sparse.bcoo_sum_duplicates(output)\n    return output", "docstring": "Add the indices of `x2` to `x1` with zero values.\n\nArgs:\nx1: `BCOO` tensor to add indices to.\nx2: `BCOO` tensor to take the indices to add to x1.\nsum_duplicates: if `True` calls `bcoo_sum_duplicates` on the output.\nReturns:\na `BCOO` tensor equal to `x1` but with extra zeros at indices in `x2`\nthat were missing in `x1`.", "source": "github-repos"}
{"code": "def sg_producer_func(func):\n    r\n    @wraps(func)\n    def wrapper(**kwargs):\n        r\n\n        \n        opt = tf.sg_opt(kwargs) + tf.sg_opt(dtypes=[tf.sg_floatx], capacity=32, num_threads=1)\n\n        \n        assert opt.source is not None, 'source is mandatory.'\n        if type(opt.source) is not list and type(opt.source) is not tuple:\n            opt.source = [opt.source]\n        if type(opt.dtypes) is not list and type(opt.dtypes) is not tuple:\n            opt.dtypes = [opt.dtypes]\n        \n        if opt.out_dtypes is None:\n            opt.out_dtypes = opt.dtypes\n        if type(opt.out_dtypes) is not list and type(opt.out_dtypes) is not tuple:\n            opt.out_dtypes = [opt.out_dtypes]\n        assert len(opt.source) == len(opt.dtypes), 'Source and dtypes should have same length.'\n\n        \n        def enqueue_func(sess, op):\n            \n            data = func(sess.run(opt.source))\n            \n            feed_dict = {}\n            for ph, col in zip(placeholders, data):\n                feed_dict[ph] = col\n            \n            sess.run(op, feed_dict=feed_dict)\n\n        \n        placeholders = []\n        for dtype in opt.dtypes:\n            placeholders.append(tf.placeholder(dtype=dtype))\n\n        \n        queue = tf.FIFOQueue(opt.capacity, dtypes=opt.out_dtypes)\n\n        \n        enqueue_op = queue.enqueue(placeholders)\n\n        \n        runner = _FuncQueueRunner(enqueue_func, queue, [enqueue_op] * opt.num_threads)\n\n        \n        tf.train.add_queue_runner(runner)\n\n        \n        return queue.dequeue()\n\n    return wrapper", "docstring": "r\"\"\"Decorates a function `func` as sg_producer_func.\n\nArgs:\nfunc: A function to decorate.", "source": "juraj-google-style"}
{"code": "def update_internal_networks(self, network_uri_list, force=False, timeout=(- 1)):\n    uri = '{}/internalNetworks'.format(self.data['uri'])\n    return self._helper.update(network_uri_list, uri=uri, force=force, timeout=timeout)", "docstring": "Updates internal networks on the logical interconnect.\n\nArgs:\nnetwork_uri_list: List of Ethernet network uris.\nforce: If set to true, the operation completes despite any problems with network connectivity or errors\non the resource itself. The default is false.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Logical Interconnect.", "source": "codesearchnet"}
{"code": "def build_uri(self, id_or_uri):\n    if (not id_or_uri):\n        logger.exception(RESOURCE_CLIENT_INVALID_ID)\n        raise ValueError(RESOURCE_CLIENT_INVALID_ID)\n    if ('/' in id_or_uri):\n        self.validate_resource_uri(id_or_uri)\n        return id_or_uri\n    else:\n        return ((self._base_uri + '/') + id_or_uri)", "docstring": "Helps to build the URI from resource id and validate the URI.\n\nArgs:\nid_or_uri: ID/URI of the resource.\n\nReturns:\nReturns a valid resource URI", "source": "codesearchnet"}
{"code": "def _call_validators(self):\n    msg = []\n    msg.extend(self._validate_keyfile())\n    msg.extend(self._validate_dns_zone())\n    msg.extend(self._validate_retries())\n    msg.extend(self._validate_project())\n    return msg", "docstring": "Actually run all the validations.\n\nReturns:\nlist(str): Error messages from the validators.", "source": "codesearchnet"}
{"code": "def array(x, dtype=None):\n    if any_symbolic_tensors((x,)):\n        return Array(dtype=dtype).symbolic_call(x)\n    return backend.numpy.array(x, dtype=dtype)", "docstring": "Create a tensor.\n\nArgs:\nx: Input tensor.\ndtype: The desired data-type for the tensor.\n\nReturns:\nA tensor.\n\nExamples:\n>>> keras.ops.array([1, 2, 3])\narray([1, 2, 3], dtype=int32)\n\n>>> keras.ops.array([1, 2, 3], dtype=\"float32\")\narray([1., 2., 3.], dtype=float32)", "source": "github-repos"}
{"code": "def _save_to_hdx(self, action, id_field_name, file_to_upload=None):\n        \n        \n        result = self._write_to_hdx(action, self.data, id_field_name, file_to_upload)\n        self.old_data = self.data\n        self.data = result", "docstring": "Creates or updates an HDX object in HDX, saving current data and replacing with returned HDX object data\nfrom HDX\n\nArgs:\naction (str): Action to perform: 'create' or 'update'\nid_field_name (str): Name of field containing HDX object identifier\nfile_to_upload (Optional[str]): File to upload to HDX\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def get_well(self, uwi):\n        \n        if uwi is None:\n            raise ValueError('a UWI must be provided')\n        matching_wells = [w for w in self if w.uwi == uwi]\n        return matching_wells[0] if len(matching_wells) >= 1 else None", "docstring": "Returns a Well object identified by UWI\n\nArgs:\nuwi (string): the UWI string for the well.\n\nReturns:\nwell", "source": "juraj-google-style"}
{"code": "def supply(self, issuer):\n    issuer_uri_config = self._issuer_uri_configs.get(issuer)\n    if (not issuer_uri_config):\n        return\n    jwks_uri = issuer_uri_config.jwks_uri\n    if jwks_uri:\n        return jwks_uri\n    open_id_valid = issuer_uri_config.open_id_valid\n    if open_id_valid:\n        discovered_jwks_uri = _discover_jwks_uri(issuer)\n        self._issuer_uri_configs[issuer] = IssuerUriConfig(False, discovered_jwks_uri)\n        return discovered_jwks_uri", "docstring": "Supplies the `jwks_uri` for the given issuer.\n\nArgs:\nissuer: the issuer.\n\nReturns:\nThe `jwks_uri` that is either statically configured or retrieved via\nOpenId discovery. None is returned when the issuer is unknown or the\nOpenId discovery fails.", "source": "codesearchnet"}
{"code": "def shuffle_sparse_coo_matrix(sparse_matrix, dropout_rate=0.0, min_dropout_rate=None, max_dropout_rate=None):\n    if ((dropout_rate < 0.0) or (dropout_rate >= 1.0)):\n        raise ValueError(('Dropout rate should be in [0, 1) but is %f' % dropout_rate))\n    (num_rows, num_cols) = sparse_matrix.shape\n    shuffled_rows = shuffle(np.arange(num_rows))\n    shuffled_cols = shuffle(np.arange(num_cols))\n    if (dropout_rate > 0.0):\n        sparse_matrix = _dropout_sparse_coo_matrix(sparse_matrix, dropout_rate, min_dropout_rate, max_dropout_rate)\n    new_row = np.take(shuffled_rows, sparse_matrix.row)\n    new_col = np.take(shuffled_cols, sparse_matrix.col)\n    return sparse.csr_matrix((sparse_matrix.data, (new_row, new_col)), shape=(num_rows, num_cols))", "docstring": "Shuffle sparse matrix encoded as a SciPy coo matrix.\n\nArgs:\nsparse_matrix: a SciPy coo sparse matrix.\ndropout_rate: if dropout_rate > 0 then non-zero elements of the input matrix\nwill be droped uniformly at random.\nmin_dropout_rate: minimum value for the dropout rate. If None\nFLAGS.min_dropout_rate is used.\nmax_dropout_rate: minimum value for the dropout rate. If None\nFLAGS.max_dropout_rate is used.\n\nReturns:\nA SciPy csr_matrix entailing the randomized interactions.", "source": "codesearchnet"}
{"code": "def backslashcase(string):\n    str1 = re.sub('_', '\\\\\\\\', snakecase(string))\n    return str1", "docstring": "Convert string into spinal case.\nJoin punctuation with backslash.\n\nArgs:\nstring: String to convert.\n\nReturns:\nstring: Spinal cased string.", "source": "codesearchnet"}
{"code": "def LoadFromStorage(cls, path=None):\n    if (path is None):\n        path = os.path.join(os.path.expanduser('~'), 'googleads.yaml')\n    return cls(**googleads.common.LoadFromStorage(path, cls._YAML_KEY, cls._REQUIRED_INIT_VALUES, cls._OPTIONAL_INIT_VALUES))", "docstring": "Creates an AdWordsClient with information stored in a yaml file.\n\nArgs:\n[optional]\npath: The path string to the file containing cached AdWords data.\n\nReturns:\nAn AdWordsClient initialized with the values cached in the file.\n\nRaises:\nA GoogleAdsValueError if the given yaml file does not contain the\ninformation necessary to instantiate a client object - either a\nrequired key was missing or an OAuth2 key was missing.", "source": "codesearchnet"}
{"code": "def upsert(self, conflict_target: List, fields: Dict, index_predicate: str=None) -> int:\n        \n\n        return self.get_queryset().upsert(conflict_target, fields, index_predicate)", "docstring": "Creates a new record or updates the existing one\nwith the specified data.\n\nArguments:\nconflict_target:\nFields to pass into the ON CONFLICT clause.\n\nfields:\nFields to insert/update.\n\nindex_predicate:\nThe index predicate to satisfy an arbiter partial index.\n\nReturns:\nThe primary key of the row that was created/updated.", "source": "juraj-google-style"}
{"code": "def find_structure(self, filename_or_structure):\n        \n        try:\n            if isinstance(filename_or_structure, str):\n                s = Structure.from_file(filename_or_structure)\n            elif isinstance(filename_or_structure, Structure):\n                s = filename_or_structure\n            else:\n                raise MPRestError(\"Provide filename or Structure object.\")\n            payload = {'structure': json.dumps(s.as_dict(), cls=MontyEncoder)}\n            response = self.session.post(\n                '{}/find_structure'.format(self.preamble), data=payload\n            )\n            if response.status_code in [200, 400]:\n                resp = json.loads(response.text, cls=MontyDecoder)\n                if resp['valid_response']:\n                    return resp['response']\n                else:\n                    raise MPRestError(resp[\"error\"])\n            raise MPRestError(\"REST error with status code {} and error {}\"\n                              .format(response.status_code, response.text))\n        except Exception as ex:\n            raise MPRestError(str(ex))", "docstring": "Finds matching structures on the Materials Project site.\n\nArgs:\nfilename_or_structure: filename or Structure object\n\nReturns:\nA list of matching structures.\n\nRaises:\nMPRestError", "source": "juraj-google-style"}
{"code": "def _list_objects(self, client_kwargs, path, max_request_entries):\n        \n        kwargs = dict(prefix=path)\n        if max_request_entries:\n            kwargs['limit'] = max_request_entries\n        else:\n            kwargs['full_listing'] = True\n\n        with _handle_client_exception():\n            response = self.client.get_container(\n                client_kwargs['container'], **kwargs)\n\n        for obj in response[1]:\n            yield obj.pop('name'), obj", "docstring": "Lists objects.\n\nargs:\nclient_kwargs (dict): Client arguments.\npath (str): Path relative to current locator.\nmax_request_entries (int): If specified, maximum entries returned\nby request.\n\nReturns:\ngenerator of tuple: object name str, object header dict", "source": "juraj-google-style"}
{"code": "def attention_bias_batch(batch_coordinates_q, batch_coordinates_k=None, condition_fn=None):\n    if (batch_coordinates_k is None):\n        batch_coordinates_k = batch_coordinates_q\n\n    def to_float(bc):\n        bc = tf.squeeze(bc, 1)\n        bc = tf.to_float(bc)\n        return bc\n    bc_v = tf.expand_dims(to_float(batch_coordinates_q), 1)\n    bc_h = tf.expand_dims(to_float(batch_coordinates_k), 0)\n    bias_batch = (bc_h - bc_v)\n    bias_batch = condition_fn(bias_batch)\n    bias_batch *= (- 1000000000.0)\n    return bias_batch", "docstring": "Generate a mask to prevent the batch to attend to each others.\n\nArgs:\nbatch_coordinates_q: Int-like Tensor of shape [length_q, 1] containing the\ncoordinates of the batches\nbatch_coordinates_k: Int-like Tensor of shape [length_k, 1] containing the\ncoordinates of the batches. If None, do self-attention.\ncondition_fn: Callable defining the attention mask.\n\nReturns:\nFloat-like Tensor of shape [length_q, length_k] containing either 0 or\n-infinity (-1e9).", "source": "codesearchnet"}
{"code": "def _is_every_steps(self, phase_step, batch, every):\n    \n    if not every:\n      return False\n    covered_steps = range(phase_step, phase_step + batch)\n    return any((step + 1) % every == 0 for step in covered_steps)", "docstring": "Determine whether a periodic event should happen at this step.\n\nArgs:\nphase_step: The incrementing step.\nbatch: The number of steps progressed at once.\nevery: The interval of the period.\n\nReturns:\nBoolean of whether the event should happen.", "source": "juraj-google-style"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(DerivationParameters, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = BytearrayStream(input_stream.read(self.length))\n    if self.is_tag_next(enums.Tags.CRYPTOGRAPHIC_PARAMETERS, local_stream):\n        self._cryptographic_parameters = CryptographicParameters()\n        self._cryptographic_parameters.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.INITIALIZATION_VECTOR, local_stream):\n        self._initialization_vector = ByteString(tag=enums.Tags.INITIALIZATION_VECTOR)\n        self._initialization_vector.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.DERIVATION_DATA, local_stream):\n        self._derivation_data = ByteString(tag=enums.Tags.DERIVATION_DATA)\n        self._derivation_data.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.SALT, local_stream):\n        self._salt = ByteString(tag=enums.Tags.SALT)\n        self._salt.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(Tags.ITERATION_COUNT, local_stream):\n        self._iteration_count = Integer(tag=Tags.ITERATION_COUNT)\n        self._iteration_count.read(local_stream, kmip_version=kmip_version)\n    self.is_oversized(local_stream)", "docstring": "Read the data encoding the DerivationParameters struct and decode it\ninto its constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def get_duration(self, matrix_name):\n    duration = 0.0\n    if (matrix_name in self.data):\n        duration = sum([stage.duration() for stage in self.data[matrix_name]])\n    return duration", "docstring": "Get duration for a concrete matrix.\n\nArgs:\nmatrix_name (str): name of the Matrix.\n\nReturns:\nfloat: duration of concrete matrix in seconds.", "source": "codesearchnet"}
{"code": "def get(self,key,default=None):\n        \n\n        retval = self.__getitem__(key)\n        if not retval:\n            retval = default\n\n        return retval", "docstring": "Get a value from the dictionary.\n\nArgs:\nkey (str): The dictionary key.\ndefault (any): The default to return if the key is not in the\ndictionary. Defaults to None.\n\nReturns:\nstr or any: The dictionary value or the default if the key is not\nin the dictionary.", "source": "juraj-google-style"}
{"code": "def pop(self, key, default=None):\n    return self._dictionary.pop(key.lower(), default)", "docstring": "Remove the key and return the associated value or default if not\nfound\n\nArgs:\nkey (str): The key to remove\ndefault (obj): The value to return if key is not present", "source": "codesearchnet"}
{"code": "def get_object_from_normpath(self, file_path):\n        \n        file_path = make_string_path(file_path)\n        if file_path == self.root.name:\n            return self.root\n        if file_path == self.dev_null.name:\n            return self.dev_null\n\n        file_path = self._original_path(file_path)\n        path_components = self._path_components(file_path)\n        target_object = self.root\n        try:\n            for component in path_components:\n                if S_ISLNK(target_object.st_mode):\n                    target_object = self.resolve(target_object.contents)\n                if not S_ISDIR(target_object.st_mode):\n                    if not self.is_windows_fs:\n                        self.raise_io_error(errno.ENOTDIR, file_path)\n                    self.raise_io_error(errno.ENOENT, file_path)\n                target_object = target_object.get_entry(component)\n        except KeyError:\n            self.raise_io_error(errno.ENOENT, file_path)\n        return target_object", "docstring": "Search for the specified filesystem object within the fake\nfilesystem.\n\nArgs:\nfile_path: Specifies target FakeFile object to retrieve, with a\npath that has already been normalized/resolved.\n\nReturns:\nThe FakeFile object corresponding to file_path.\n\nRaises:\nIOError: if the object is not found.", "source": "juraj-google-style"}
{"code": "def get_sqlalchemy_url(database=None, host=None, port=None, username=None, password=None, driver='postgres'):\n    strings = [('%s:\n    if username:\n        strings.append(username)\n        if password:\n            strings.append((':%s@' % password))\n        else:\n            strings.append('@')\n    if host:\n        strings.append(host)\n    if (port is not None):\n        strings.append((':%d' % int(port)))\n    if database:\n        strings.append(('/%s' % database))\n    return ''.join(strings)", "docstring": "Gets SQLAlchemy url from database connection parameters\n\nArgs:\ndatabase (Optional[str]): Database name\nhost (Optional[str]): Host where database is located\nport (Union[int, str, None]): Database port\nusername (Optional[str]): Username to log into database\npassword (Optional[str]): Password to log into database\ndriver (str): Database driver. Defaults to 'postgres'.\n\nReturns:\ndb_url (str): SQLAlchemy url", "source": "codesearchnet"}
{"code": "def to_md_file(string, filename, out_path='.'):\n    md_file = ('%s.md' % filename)\n    with open(os.path.join(out_path, md_file), 'w') as f:\n        f.write(string)\n    print('wrote {}.'.format(md_file))", "docstring": "Import a module path and create an api doc from it\n\nArgs:\nstring (str): string with line breaks to write to file.\nfilename (str): filename without the .md\nout_path (str): The output directory", "source": "codesearchnet"}
{"code": "def UpdateChainAndProcess(self, parser_mediator, registry_key, **kwargs):\n    \n    parser_mediator.AppendToParserChain(self)\n    try:\n      self.Process(parser_mediator, registry_key, **kwargs)\n    finally:\n      parser_mediator.PopFromParserChain()", "docstring": "Updates the parser chain and processes a Windows Registry key or value.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\n\nRaises:\nValueError: If the Windows Registry key is not set.", "source": "juraj-google-style"}
{"code": "def s3_app_bucket(self, include_region=False):\n    if include_region:\n        s3_app_bucket = self.format['s3_app_region_bucket'].format(**self.data)\n    else:\n        s3_app_bucket = self.format['s3_app_bucket'].format(**self.data)\n    return s3_app_bucket", "docstring": "Generate s3 application bucket name.\n\nArgs:\ninclude_region (bool): Include region in the name generation.", "source": "codesearchnet"}
{"code": "def _anonymize_table(cls, table_data, pii_fields):\n        \n        for pii_field in pii_fields:\n            field_name = pii_field['name']\n            transformer = cls.get_class(TRANSFORMERS['categorical'])(pii_field)\n            table_data[field_name] = transformer.anonymize_column(table_data)\n\n        return table_data", "docstring": "Anonymize in `table_data` the fields in `pii_fields`.\n\nArgs:\ntable_data (pandas.DataFrame): Original dataframe/table.\npii_fields (list[dict]): Metadata for the fields to transform.\n\nResult:\npandas.DataFrame: Anonymized table.", "source": "juraj-google-style"}
{"code": "def assign(self, value, use_locking=None, name=None, read_value=True):\n    with _handle_graph(self.handle):\n        value_tensor = ops.convert_to_tensor(value, dtype=self.dtype)\n        if not self._shape.is_compatible_with(value_tensor.shape):\n            if self.name is None:\n                tensor_name = ''\n            else:\n                tensor_name = ' ' + str(self.name)\n            raise ValueError(f\"Cannot assign value to variable '{tensor_name}': Shape mismatch.The variable shape {self._shape}, and the assigned value shape {value_tensor.shape} are incompatible.\")\n        kwargs = {}\n        if forward_compat.forward_compatible(2022, 3, 23):\n            validate_shape = self._validate_shape and self._shape.is_fully_defined()\n            kwargs['validate_shape'] = validate_shape\n        assign_op = gen_resource_variable_ops.assign_variable_op(self.handle, value_tensor, name=name, **kwargs)\n        if read_value:\n            return self._lazy_read(assign_op)\n    return assign_op", "docstring": "Assigns a new value to this variable.\n\nArgs:\nvalue: A `Tensor`. The new value for this variable.\nuse_locking: If `True`, use locking during the assignment.\nname: The name to use for the assignment.\nread_value: A `bool`. Whether to read and return the new value of the\nvariable or not.\n\nReturns:\nIf `read_value` is `True`, this method will return the new value of the\nvariable after the assignment has completed. Otherwise, when in graph mode\nit will return the `Operation` that does the assignment, and when in eager\nmode it will return `None`.", "source": "github-repos"}
{"code": "def _process_update(self, item, feed_item):\n    campaign = self.campaign_dao.get(feed_item, required=True)\n    feed_item[FieldMap.CAMPAIGN_ID] = campaign['id']\n    feed_item[FieldMap.CAMPAIGN_NAME] = campaign['name']\n    item['name'] = feed_item.get(FieldMap.PLACEMENT_GROUP_NAME, None)\n    item['placementGroupType'] = feed_item.get(FieldMap.PLACEMENT_GROUP_TYPE, None)\n    item['pricingSchedule']['startDate'] = feed_item.get(FieldMap.PLACEMENT_GROUP_START_DATE, None)\n    item['pricingSchedule']['endDate'] = feed_item.get(FieldMap.PLACEMENT_GROUP_END_DATE, None)\n    item['pricingSchedule']['pricingType'] = feed_item.get(FieldMap.PLACEMENT_GROUP_PRICING_TYPE, None)", "docstring": "Updates a placement group based on the values from the feed.\n\nArgs:\nitem: Object representing the placement group to be updated, this object\nis updated directly.\nfeed_item: Feed item representing placement group values from the\nBulkdozer feed.", "source": "github-repos"}
{"code": "def _MergeEntities(self, a, b):\n    if (a.shape_id != b.shape_id):\n        raise MergeError('shape_id must be the same')\n    distance = max(ApproximateDistanceBetweenPoints(a.points[0][:2], b.points[0][:2]), ApproximateDistanceBetweenPoints(a.points[(- 1)][:2], b.points[(- 1)][:2]))\n    if (distance > self.largest_shape_distance):\n        raise MergeError(('The shape endpoints are too far away: %.1fm (largest_shape_distance is %.1fm)' % (distance, self.largest_shape_distance)))\n    return self._Migrate(b, self.feed_merger.b_schedule, False)", "docstring": "Merges the shapes by taking the new shape.\n\nArgs:\na: The first transitfeed.Shape instance.\nb: The second transitfeed.Shape instance.\n\nReturns:\nThe merged shape.\n\nRaises:\nMergeError: If the ids are different or if the endpoints are further\nthan largest_shape_distance apart.", "source": "codesearchnet"}
{"code": "def timestamp_fmt(value, fmt):\n    \n    return int(calendar.timegm(\n        datetime.datetime.strptime(value, fmt).utctimetuple()\n    ))", "docstring": "Convert timestamp string to time in seconds since epoch.\n\nWraps the datetime.datetime.strptime(). This is slow use the other\ntimestamp_*() functions if possible.\n\nArgs:\nvalue: A timestamp string.\nfmt: A timestamp format string.\n\nReturns:\nThe time in seconds since epoch as an integer.", "source": "juraj-google-style"}
{"code": "def GetAllClientLabels(token, include_catchall=False):\n    labels_index = aff4.FACTORY.Create(standard.LabelSet.CLIENT_LABELS_URN, standard.LabelSet, mode='r', token=token)\n    labels = set(labels_index.ListLabels())\n    if include_catchall:\n        labels.add(ALL_CLIENTS_LABEL)\n    return labels", "docstring": "Get the set of all label names applied to all clients.\n\nArgs:\ntoken: token to use when opening the index.\ninclude_catchall: If true, we include ALL_CLIENTS_LABEL in the results.\n\nReturns:\nset of label name strings, including the catchall \"All\"", "source": "codesearchnet"}
{"code": "def NewPathSpec(cls, type_indicator, **kwargs):\n    \n    if type_indicator not in cls._path_spec_types:\n      raise KeyError(\n          'Path specification type: {0:s} not set.'.format(type_indicator))\n\n    \n    \n    if 'parent' in kwargs and kwargs['parent'] is None:\n      del kwargs['parent']\n\n    path_spec_type = cls._path_spec_types[type_indicator]\n    return path_spec_type(**kwargs)", "docstring": "Creates a new path specification for the specific type indicator.\n\nArgs:\ntype_indicator (str): type indicator.\nkwargs (dict): keyword arguments depending on the path specification.\n\nReturns:\nPathSpec: path specification.\n\nRaises:\nKeyError: if path specification is not registered.", "source": "juraj-google-style"}
{"code": "def query(self, connection, query, fetch=True):\n    self.install_module(connection)\n    statements = sqlparse.parse(sqlparse.format(query, strip_comments=True))\n    logger.debug('Finding and installing all partitions from query. \\n    query: {}'.format(query))\n    new_query = []\n    if (len(statements) > 1):\n        raise BadSQLError('Can only query a single statement')\n    if (len(statements) == 0):\n        raise BadSQLError(\"DIdn't get any statements in '{}'\".format(query))\n    statement = statements[0]\n    logger.debug('Searching statement for partition ref.\\n    statement: {}'.format(statement.to_unicode()))\n    logger.debug('Executing updated query after partition install.\\n    query before update: {}\\n    query to execute (updated query): {}'.format(statement, new_query))\n    return self._execute(connection, statement.to_unicode(), fetch=fetch)", "docstring": "Creates virtual tables for all partitions found in the query and executes query.\n\nArgs:\nquery (str): sql query\nfetch (bool): fetch result from database if True, do not fetch overwise.", "source": "codesearchnet"}
{"code": "def create_group(self, name):\n        \n        self.service.create_group(\n            name, self.url_prefix, self.auth, self.session,\n            self.session_send_opts)", "docstring": "Create a new group.\n\nArgs:\nname (string): Name of the group to create.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def splitext2(filepath):\n    \n    root, filename = os.path.split(safepath(filepath))\n    filename, ext = os.path.splitext(safepath(filename))\n    return root, filename, ext", "docstring": "Split filepath into root, filename, ext\n\nArgs:\nfilepath (str, path): file path\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def extract_backup_bundle(self, resource, timeout=(- 1)):\n    return self._client.update(resource, uri=self.BACKUP_ARCHIVE_PATH, timeout=timeout)", "docstring": "Extracts the existing backup bundle on the appliance and creates all the artifacts.\n\nArgs:\nresource (dict): Deployment Group to extract.\ntimeout:\nTimeout in seconds. Waits for task completion by default. The timeout does not abort the operation in\nOneView, it just stops waiting for its completion.\n\nReturns:\ndict: A Deployment Group associated with the Artifact Bundle backup.", "source": "codesearchnet"}
{"code": "def _is_subclass(self, cls, class_spec):\n    if isinstance(cls, abstract.AMBIGUOUS_OR_EMPTY):\n        return None\n    return abstract_utils.check_against_mro(self.ctx, cls, class_spec)", "docstring": "Check if the given class is a subclass of a class specification.\n\nArgs:\ncls: A BaseValue, the first argument to an issubclass call.\nclass_spec: A BaseValue, the second issubclass argument.\n\nReturns:\nTrue if the class is a subclass (or is a class) in the class_spec, False\nif not, and None if it is ambiguous.", "source": "github-repos"}
{"code": "def read_to_offset(self, offset):\n    \n    assert offset >= self._offset\n    result = self._buffer[self._offset: offset]\n    self._offset += len(result)\n    return result", "docstring": "Returns bytes from self._buffer and update related offsets.\n\nArgs:\noffset: read from current offset to this offset, exclusive.\n\nReturns:\nRequested bytes from buffer.", "source": "juraj-google-style"}
{"code": "def _parse_v_parameters(val_type, val, filename, param_name):\n    if (val_type == 'logical'):\n        val = [(i == 'T') for i in val.split()]\n    elif (val_type == 'int'):\n        try:\n            val = [int(i) for i in val.split()]\n        except ValueError:\n            val = _parse_from_incar(filename, param_name)\n            if (val is None):\n                raise IOError('Error in parsing vasprun.xml')\n    elif (val_type == 'string'):\n        val = val.split()\n    else:\n        try:\n            val = [float(i) for i in val.split()]\n        except ValueError:\n            val = _parse_from_incar(filename, param_name)\n            if (val is None):\n                raise IOError('Error in parsing vasprun.xml')\n    return val", "docstring": "Helper function to convert a Vasprun array-type parameter into the proper\ntype. Boolean, int and float types are converted.\n\nArgs:\nval_type: Value type parsed from vasprun.xml.\nval: Actual string value parsed for vasprun.xml.\nfilename: Fullpath of vasprun.xml. Used for robust error handling.\nE.g., if vasprun.xml contains \\\\*\\\\*\\\\* for some Incar parameters,\nthe code will try to read from an INCAR file present in the same\ndirectory.\nparam_name: Name of parameter.\n\nReturns:\nParsed value.", "source": "codesearchnet"}
{"code": "def __recognize_dict(self, node: yaml.Node, expected_type: Type) -> RecResult:\n    logger.debug('Recognizing as a dict')\n    if (not issubclass(generic_type_args(expected_type)[0], str)):\n        raise RuntimeError('YAtiML only supports dicts with strings as keys')\n    if (not isinstance(node, yaml.MappingNode)):\n        message = '{}{}Expected a dict/mapping here'.format(node.start_mark, os.linesep)\n        return ([], message)\n    value_type = generic_type_args(expected_type)[1]\n    for (_, value) in node.value:\n        (recognized_value_types, message) = self.recognize(value, value_type)\n        if (len(recognized_value_types) == 0):\n            return ([], message)\n        if (len(recognized_value_types) > 1):\n            return ([Dict[(str, t)] for t in recognized_value_types], message)\n    return ([expected_type], '')", "docstring": "Recognize a node that we expect to be a dict of some kind.\n\nArgs:\nnode: The node to recognize.\nexpected_type: Dict[str, ...something...]\n\nReturns:\nexpected_type if it was recognized, [] otherwise.", "source": "codesearchnet"}
{"code": "def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):\n    return", "docstring": "Updates the candidate generation strategy based on the outcomes.\n\nArgs:\ninput_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\nIndices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)\nscores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):\nPrediction scores of a language modeling head. These can be logits for each vocabulary when not using\nbeam search or log softmax for each vocabulary token when using beam search\nnum_matches (`int`):\nThe number of matches between the candidate sequences and the model predictions.", "source": "github-repos"}
{"code": "def _group_device_list(devices):\n    assert not _is_device_list_single_worker(devices)\n    device_dict = {}\n    for d in devices:\n        d_spec = tf_device.DeviceSpec.from_string(d)\n        if d_spec.job not in device_dict:\n            device_dict[d_spec.job] = []\n        while len(device_dict[d_spec.job]) <= d_spec.task:\n            device_dict[d_spec.job].append([])\n        device_dict[d_spec.job][d_spec.task].append(d)\n    return device_dict", "docstring": "Groups the devices list by task_type and task_id.\n\nArgs:\ndevices: a list of device strings for remote devices.\n\nReturns:\na dict of list of device strings mapping from task_type to a list of devices\nfor the task_type in the ascending order of task_id.", "source": "github-repos"}
{"code": "def check_config_file(msg):\n    with jsonconfig.Config('messages', indent=4) as cfg:\n        verify_profile_name(msg, cfg)\n        retrieve_data_from_config(msg, cfg)\n        if (msg._auth is None):\n            retrieve_pwd_from_config(msg, cfg)\n        if msg.save:\n            update_config_data(msg, cfg)\n            update_config_pwd(msg, cfg)", "docstring": "Checks the config.json file for default settings and auth values.\n\nArgs:\n:msg: (Message class) an instance of a message class.", "source": "codesearchnet"}
{"code": "def find_contour_yaml(config_file=__file__, names=None):\n    checked = set()\n    contour_yaml = _find_countour_yaml(os.path.dirname(config_file), checked, names=names)\n    if (not contour_yaml):\n        contour_yaml = _find_countour_yaml(os.getcwd(), checked, names=names)\n    return contour_yaml", "docstring": "Traverse directory trees to find a contour.yaml file\n\nBegins with the location of this file then checks the\nworking directory if not found\n\nArgs:\nconfig_file: location of this file, override for\ntesting\nReturns:\nthe path of contour.yaml or None if not found", "source": "codesearchnet"}
{"code": "def shift(schedule: ScheduleComponent, time: int, name: str = None) -> Schedule:\n    \n    if name is None:\n        name = schedule.name\n    return union((time, schedule), name=name)", "docstring": "Return schedule shifted by `time`.\n\nArgs:\nschedule: The schedule to shift\ntime: The time to shift by\nname: Name of shifted schedule. Defaults to name of `schedule`", "source": "juraj-google-style"}
{"code": "def open(self, path, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO):\n    return self._path_open(path, 'rb', mime_type, compression_type)", "docstring": "Returns a read channel for the given file path.\n\nArgs:\npath: string path of the file object to be written to the system\nmime_type: MIME type to specify the type of content in the file object\ncompression_type: Type of compression to be used for this object\n\nReturns: file handle with a close function for the user to use", "source": "github-repos"}
{"code": "def publish(self, object_id: str, event_type: str,\n                event_data: dict = None):\n        \n        object_key = SchedulingObject.get_key(self.type, object_id)\n        publish(event_type=event_type,\n                event_data=event_data,\n                object_type=self.type,\n                object_id=object_id,\n                object_key=object_key,\n                origin=None)", "docstring": "Publish a scheduling object event.\n\nArgs:\nobject_id (str): ID of the scheduling object\nevent_type (str): Type of event.\nevent_data (dict, optional): Event data.", "source": "juraj-google-style"}
{"code": "def hardware_version(self):\n    res = self.rpc(0, 2, result_type=(0, True))\n    binary_version = res['buffer']\n    ver = ''\n    for x in binary_version:\n        if (x != 0):\n            ver += chr(x)\n    return ver", "docstring": "Return the embedded hardware version string for this tile.\n\nThe hardware version is an up to 10 byte user readable string that is\nmeant to encode any necessary information about the specific hardware\nthat this tile is running on.  For example, if you have multiple\nassembly variants of a given tile, you could encode that information\nhere.\n\nReturns:\nstr: The hardware version read from the tile.", "source": "codesearchnet"}
{"code": "def DeleteIndex(self, index):\n        \n        to_remove = None\n        for i in self.Items:\n            if i.index == index:\n                to_remove = i\n\n        if to_remove:\n            self.Items.remove(to_remove)", "docstring": "Remove a spent coin based on its index.\n\nArgs:\nindex (int):", "source": "juraj-google-style"}
{"code": "def _brent_loop_body(state, params, constants):\n    best_estimate = state.best_estimate\n    last_estimate = state.last_estimate\n    contrapoint = state.contrapoint\n    value_at_best_estimate = state.value_at_best_estimate\n    value_at_last_estimate = state.value_at_last_estimate\n    value_at_contrapoint = state.value_at_contrapoint\n    step_to_best_estimate = state.step_to_best_estimate\n    step_to_last_estimate = state.step_to_last_estimate\n    num_iterations = state.num_iterations\n    finished = state.finished\n    replace_contrapoint = ~finished & (value_at_last_estimate * value_at_best_estimate < constants.zero_value)\n    contrapoint = tf.where(replace_contrapoint, last_estimate, contrapoint)\n    value_at_contrapoint = tf.where(replace_contrapoint, value_at_last_estimate, value_at_contrapoint)\n    step_to_last_estimate = tf.where(replace_contrapoint, best_estimate - last_estimate, step_to_last_estimate)\n    step_to_best_estimate = tf.where(replace_contrapoint, step_to_last_estimate, step_to_best_estimate)\n    replace_best_estimate = tf.where(finished, constants.false, tf.math.abs(value_at_contrapoint) < tf.math.abs(value_at_best_estimate))\n    last_estimate = tf.where(replace_best_estimate, best_estimate, last_estimate)\n    best_estimate = tf.where(replace_best_estimate, contrapoint, best_estimate)\n    contrapoint = tf.where(replace_best_estimate, last_estimate, contrapoint)\n    value_at_last_estimate = tf.where(replace_best_estimate, value_at_best_estimate, value_at_last_estimate)\n    value_at_best_estimate = tf.where(replace_best_estimate, value_at_contrapoint, value_at_best_estimate)\n    value_at_contrapoint = tf.where(replace_best_estimate, value_at_last_estimate, value_at_contrapoint)\n    root_tolerance = 0.5 * (params.absolute_root_tolerance + params.relative_root_tolerance * tf.math.abs(best_estimate))\n    bisection_step = 0.5 * (contrapoint - best_estimate)\n    finished |= (num_iterations >= params.max_iterations) | (tf.math.abs(bisection_step) < root_tolerance) | ~tf.math.is_finite(value_at_best_estimate) | (tf.math.abs(value_at_best_estimate) <= params.function_tolerance)\n    compute_short_step = tf.where(finished, constants.false, (root_tolerance < tf.math.abs(step_to_last_estimate)) & (tf.math.abs(value_at_best_estimate) < tf.math.abs(value_at_last_estimate)))\n    short_step = tf.where(compute_short_step, tf.where(tf.equal(last_estimate, contrapoint), _secant_step(best_estimate, last_estimate, value_at_best_estimate, value_at_last_estimate), _quadratic_interpolation_step(value_at_best_estimate, value_at_last_estimate, value_at_contrapoint, best_estimate, last_estimate, contrapoint)), constants.zero)\n    use_short_step = tf.where(compute_short_step, 2 * tf.math.abs(short_step) < tf.minimum(3 * tf.math.abs(bisection_step) - root_tolerance, tf.math.abs(step_to_last_estimate)), constants.false)\n    step_to_last_estimate = tf.where(use_short_step, step_to_best_estimate, bisection_step)\n    step_to_best_estimate = tf.where(finished, constants.zero, tf.where(use_short_step, short_step, bisection_step))\n    last_estimate = tf.where(finished, last_estimate, best_estimate)\n    best_estimate += tf.where(finished, constants.zero, tf.where(root_tolerance < tf.math.abs(step_to_best_estimate), step_to_best_estimate, tf.where(bisection_step > 0, root_tolerance, -root_tolerance)))\n    value_at_last_estimate = tf.where(finished, value_at_last_estimate, value_at_best_estimate)\n    value_at_best_estimate = tf.where(finished, value_at_best_estimate, params.objective_fn(best_estimate))\n    num_iterations = tf.where(finished, num_iterations, num_iterations + 1)\n    return [_BrentSearchState(best_estimate=best_estimate, last_estimate=last_estimate, contrapoint=contrapoint, value_at_best_estimate=value_at_best_estimate, value_at_last_estimate=value_at_last_estimate, value_at_contrapoint=value_at_contrapoint, step_to_best_estimate=step_to_best_estimate, step_to_last_estimate=step_to_last_estimate, num_iterations=num_iterations, finished=finished)]", "docstring": "Performs one iteration of the Brent root-finding algorithm.\n\nArgs:\nstate: A Python `_BrentSearchState` namedtuple.\nparams: A Python `_BrentSearchParams` namedtuple.\nconstants: A Python `_BrentSearchConstants` namedtuple.\n\nReturns:\nThe `Tensor`s to use for the next iteration of the algorithm.", "source": "github-repos"}
{"code": "def encode_request(request_line, **headers):\n    lines = [request_line]\n    lines.extend([('%s: %s' % kv) for kv in headers.items()])\n    return ('\\r\\n'.join(lines) + '\\r\\n\\r\\n').encode('utf-8')", "docstring": "Creates the data for a SSDP request.\n\nArgs:\nrequest_line (string): The request line for the request (e.g.\n``\"M-SEARCH * HTTP/1.1\"``).\nheaders (dict of string -> string): Dictionary of header name - header\nvalue pairs to present in the request.\n\nReturns:\nbytes: The encoded request.", "source": "codesearchnet"}
{"code": "def get_or_create_from_ip(ip):\n        \n        data = ip_api_handler.get(ip)\n        if data and any(v for v in data.values()):\n            if data.get('ip_address', None) is None or not data['ip_address']:\n                data['ip_address'] = ip\n            return IPInfo.objects.get_or_create(**data)\n        return None, False", "docstring": "Get or create an entry using obtained information from an IP.\n\nArgs:\nip (str): IP address xxx.xxx.xxx.xxx.\n\nReturns:\nip_info: an instance of IPInfo.", "source": "juraj-google-style"}
{"code": "def get_without(self, fragments,\n                    use_lookup=None):\n        \n        if use_lookup is None:\n            use_lookup = settings['defaults']['use_lookup']\n\n        if pd.api.types.is_list_like(fragments):\n            for fragment in fragments:\n                try:\n                    index_of_all_fragments |= fragment.index\n                except NameError:\n                    index_of_all_fragments = fragment.index\n        else:\n            index_of_all_fragments = fragments.index\n        missing_part = self.loc[self.index.difference(index_of_all_fragments)]\n        missing_part = missing_part.fragmentate(use_lookup=use_lookup)\n        return sorted(missing_part, key=len, reverse=True)", "docstring": "Return self without the specified fragments.\n\nArgs:\nfragments: Either a list of :class:`~chemcoord.Cartesian` or a\n:class:`~chemcoord.Cartesian`.\nuse_lookup (bool): Use a lookup variable for\n:meth:`~chemcoord.Cartesian.get_bonds`. The default is\nspecified in ``settings['defaults']['use_lookup']``\n\nReturns:\nlist: List containing :class:`~chemcoord.Cartesian`.", "source": "juraj-google-style"}
{"code": "def gcd(*numbers):\n    \n    n = numbers[0]\n    for i in numbers:\n        n = pygcd(n, i)\n    return n", "docstring": "Returns the greatest common divisor for a sequence of numbers.\n\nArgs:\n\\*numbers: Sequence of numbers.\n\nReturns:\n(int) Greatest common divisor of numbers.", "source": "juraj-google-style"}
{"code": "def __init__(self, name, indicator):\n    \n    super(ThreatIntelligence, self).__init__()\n    self.name = name\n    self.indicator = indicator", "docstring": "Initializes the Threat Intelligence container.\n\nArgs:\nname (string): name of the threat\nindicator (string): regular expression relevant to a threat", "source": "juraj-google-style"}
{"code": "def has_key(self, key):\n\t\t\n\n\t\trc = self._libinput.libinput_device_keyboard_has_key(self._handle, key)\n\t\tassert rc >= 0, 'This device is not a keyboard device'\n\t\treturn bool(rc)", "docstring": "Check if a :attr:`~libinput.constant.DeviceCapability.KEYBOARD`\ndevice has a given key.\n\nArgs:\nkey (int): Key to check for, see ``input.h`` for key definitions.\nReturns:\nbool: :obj:`True` if the device has this key, :obj:`False` if\nit does not.\nRaises:\nAssertionError", "source": "juraj-google-style"}
{"code": "def parse_conservations(variant):\n    conservations = {}\n    conservations['gerp'] = parse_conservation(variant, 'dbNSFP_GERP___RS')\n    conservations['phast'] = parse_conservation(variant, 'dbNSFP_phastCons100way_vertebrate')\n    conservations['phylop'] = parse_conservation(variant, 'dbNSFP_phyloP100way_vertebrate')\n    return conservations", "docstring": "Parse the conservation predictors\n\nArgs:\nvariant(dict): A variant dictionary\n\nReturns:\nconservations(dict): A dictionary with the conservations", "source": "codesearchnet"}
{"code": "def _flatten_location_translations(location_translations):\n    \n    sources_to_process = set(six.iterkeys(location_translations))\n\n    def _update_translation(source):\n        \n        destination = location_translations[source]\n        if destination not in location_translations:\n            \n            return destination\n        else:\n            \n            \n            sources_to_process.discard(destination)\n            final_destination = _update_translation(destination)\n            location_translations[source] = final_destination\n            return final_destination\n\n    while sources_to_process:\n        _update_translation(sources_to_process.pop())", "docstring": "If location A translates to B, and B to C, then make A translate directly to C.\n\nArgs:\nlocation_translations: dict of Location -> Location, where the key translates to the value.\nMutated in place for efficiency and simplicity of implementation.", "source": "juraj-google-style"}
{"code": "def _call(self, utterances_batch: List[str], utterances_ids: List[int] = None) -> List[RichMessage]:\n        \n\n        rich_message = RichMessage()\n        for utt_id, utt in enumerate(utterances_batch):\n\n            if utterances_ids:\n                id_ = utterances_ids[utt_id]\n\n            log.debug(f'Utterance: {utt}')\n\n            if utt == \"/start\":\n                welcome = \"I am a new e-commerce bot. I will help you to find products that you are looking for. Please type your request in plain text.\"\n                rich_message.add_control(PlainText(welcome))\n                continue\n\n            if utt[0] == \"@\":\n                command, *parts = utt.split(\":\")\n                log.debug(f'Actions: {parts}')\n\n                if command == \"@details\":\n                    batch_index = int(parts[0])  \n                    item_index = int(parts[1])  \n                    rich_message.add_control(PlainText(show_details(\n                        self.history[id_][batch_index][item_index])))\n                    continue\n\n                if command == \"@entropy\":\n                    state = self.history[id_][int(parts[0])]\n                    state[parts[1]] = parts[2]\n                    state[\"start\"] = 0\n                    state[\"stop\"] = 5\n                    utt = state['query']\n                    self.states[id_] = state\n\n                if command == \"@next\":\n                    state = self.history[id_][int(parts[0])]\n                    state['start'] = state['stop']\n                    state['stop'] = state['stop'] + 5\n                    utt = state['query']\n                    self.states[id_] = state\n            else:\n                if id_ not in self.states:\n                    self.states[id_] = {}\n\n                self.states[id_][\"start\"] = 0\n                self.states[id_][\"stop\"] = 5\n\n            responses_batch, confidences_batch, state_batch = self.skills[0](\n                [utt], self.history[id_], [self.states[id_]])\n\n            \n            self.states[id_] = state_batch[0]\n            self.states[id_][\"query\"] = utt\n\n            items_batch, entropy_batch = responses_batch\n\n            for batch_idx, items in enumerate(items_batch):\n\n                self.history[id_].append(items)\n                self.history[id_].append(self.states[id_])\n\n                for idx, item in enumerate(items):\n                    rich_message.add_control(_draw_item(item, idx, self.history[id_]))\n\n                if len(items) == self.states[id_]['stop'] - self.states[id_]['start']:\n                    buttons_frame = _draw_tail(entropy_batch[batch_idx], self.history[id_])\n                    rich_message.add_control(buttons_frame)\n\n        return [rich_message]", "docstring": "Processes batch of utterances and returns corresponding responses batch.\n\nArgs:\nutterances_batch: Batch of incoming utterances.\nutterances_ids: Batch of dialog IDs corresponding to incoming utterances.\n\nReturns:\nresponses: A batch of responses corresponding to the\nutterance batch received by agent.", "source": "juraj-google-style"}
{"code": "def _CreateFeed(client):\n  \n  \n  feed_service = client.GetService('FeedService', version='v201809')\n\n  \n  operation = {\n      \n      'operand': {\n          'name': 'DSA Feed %s' % uuid.uuid4(),\n          \n          'attributes': [\n              {'type': 'URL_LIST', 'name': 'Page URL'},\n              {'type': 'STRING_LIST', 'name': 'Label'}\n          ],\n          'origin': 'USER'\n      },\n      'operator': 'ADD'\n  }\n\n  \n  feed = feed_service.mutate([operation])['value'][0]\n  return _DSAFeedDetails(feed['id'], feed['attributes'][0]['id'],\n                         feed['attributes'][1]['id'])", "docstring": "Creates the feed for DSA page URLs.\n\nArgs:\nclient: an AdWordsClient instance.\n\nReturns:\nA _DSAFeedDetails instance containing details about the created feed.", "source": "juraj-google-style"}
{"code": "def set_peer_address(self, value=None, default=False, disable=False):\n    return self._configure_mlag('peer-address', value, default, disable)", "docstring": "Configures the mlag peer-address value\n\nArgs:\nvalue (str): The value to configure the peer-address\ndefault (bool): Configures the peer-address using the\ndefault keyword\ndisable (bool): Negates the peer-address using the no keyword\n\nReturns:\nbool: Returns True if the commands complete successfully", "source": "codesearchnet"}
{"code": "def export_analytics_data_to_excel(data, output_file_name, result_info_key, identifier_keys):\n    \n    workbook = create_excel_workbook(data, result_info_key, identifier_keys)\n    workbook.save(output_file_name)\n    print('Saved Excel file to {}'.format(output_file_name))", "docstring": "Creates an Excel file containing data returned by the Analytics API\n\nArgs:\ndata: Analytics API data as a list of dicts\noutput_file_name: File name for output Excel file (use .xlsx extension).", "source": "juraj-google-style"}
{"code": "def register_custom_opdefs(custom_opdefs_list):\n    return wrap_converter.wrapped_register_custom_opdefs(custom_opdefs_list)", "docstring": "Register the given custom opdefs to the TensorFlow global op registry.\n\nArgs:\ncustom_opdefs_list: String representing the custom ops OpDefs that are\nincluded in the GraphDef.\n\nReturns:\nTrue if the registration is successfully completed.", "source": "github-repos"}
{"code": "def _ParseLeak(\n      self, parser_mediator, cache_directories, msiecf_item, recovered=False):\n    \n    \n    date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n\n    event_data = MSIECFLeakEventData()\n    event_data.cached_filename = msiecf_item.filename\n    event_data.cached_file_size = msiecf_item.cached_file_size\n    event_data.cache_directory_index = msiecf_item.cache_directory_index\n    event_data.offset = msiecf_item.offset\n    event_data.recovered = recovered\n\n    if (event_data.cache_directory_index >= 0 and\n        event_data.cache_directory_index < len(cache_directories)):\n      event_data.cache_directory_name = (\n          cache_directories[event_data.cache_directory_index])\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extract data from a MSIE Cache Files (MSIECF) leak item.\n\nEvery item is stored as an event object, one for each timestamp.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ncache_directories (list[str]): cache directory names.\nmsiecf_item (pymsiecf.leak): MSIECF leak item.\nrecovered (Optional[bool]): True if the item was recovered.", "source": "juraj-google-style"}
{"code": "def from_config(cls, config_dict: dict, schema_path: str = None):\n        \n        \n        if schema_path is None:\n            schema_path = join(dirname(__file__), 'schema',\n                               'configure_sbi.json')\n        with open(schema_path, 'r') as file:\n            schema = json.loads(file.read())\n            validate(config_dict, schema)\n\n        \n        config_dict['status'] = 'created'\n\n        \n        if 'subarray_id' not in config_dict:\n            config_dict['subarray_id'] = 'None'\n\n        \n        timestamp = datetime.datetime.utcnow().isoformat()\n        config_dict['created'] = timestamp\n        config_dict['updated'] = timestamp\n\n        \n        pb_list = copy.deepcopy(config_dict['processing_blocks'])\n\n        \n        config_dict.pop('processing_blocks', None)\n\n        \n        config_dict['processing_block_ids'] = []\n        for pb in pb_list:\n            config_dict['processing_block_ids'].append(pb['id'])\n\n        \n        key = SchedulingObject.get_key(SBI_KEY, config_dict['id'])\n        DB.save_dict(key, config_dict, hierarchical=False)\n        \n\n        \n        key = '{}:active'.format(SBI_KEY)\n        DB.append_to_list(key, config_dict['id'])\n\n        \n        sbi = SchedulingObject(SBI_KEY, config_dict['id'])\n        sbi.set_status('created')\n\n        for pb in pb_list:\n            pb['sbi_id'] = config_dict['id']\n            cls._add_pb(pb)\n\n        return cls(config_dict['id'])", "docstring": "Create an SBI object from the specified configuration dict.\n\nNOTE(BM) This should really be done as a single atomic db transaction.\n\nArgs:\nconfig_dict(dict): SBI configuration dictionary\nschema_path(str, optional): Path to the SBI config schema.", "source": "juraj-google-style"}
{"code": "def _expand_terms(self, terms):\n    ret = {'keywords': list(), 'doc': list()}\n    if (not isinstance(terms, dict)):\n        stp = SearchTermParser()\n        terms = stp.parse(terms, term_join=self.backend._and_join)\n    if ('about' in terms):\n        ret['doc'].append(terms['about'])\n    if ('source' in terms):\n        ret['keywords'].append(terms['source'])\n    return ret", "docstring": "Expands terms of the dataset to the appropriate fields. It will parse the search phrase\nand return only the search term components that are applicable to a Dataset query.\n\nArgs:\nterms (dict or str):\n\nReturns:\ndict: keys are field names, values are query strings", "source": "codesearchnet"}
{"code": "def _ParseLogLine(self, parser_mediator, structure, key):\n    \n    time_elements_tuple = self._GetTimeElementsTuple(structure)\n\n    try:\n      date_time = dfdatetime_time_elements.TimeElements(\n          time_elements_tuple=time_elements_tuple)\n    except ValueError:\n      parser_mediator.ProduceExtractionWarning(\n          'invalid date time value: {0!s}'.format(structure.date_time))\n      return\n\n    self._last_month = time_elements_tuple[1]\n\n    if key == 'logline':\n      self._previous_structure = structure\n      message = structure.message\n    else:\n      message = 'Repeated {0:d} times: {1:s}'.format(\n          structure.times, self._previous_structure.message)\n      structure = self._previous_structure\n\n    \n    \n\n    event_data = MacOSSecuritydLogEventData()\n    event_data.caller = structure.caller.strip() or 'unknown'\n    event_data.facility = structure.facility\n    event_data.level = structure.level\n    event_data.message = message\n    event_data.security_api = structure.security_api or 'unknown'\n    event_data.sender_pid = structure.sender_pid\n    event_data.sender = structure.sender.strip()\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_ADDED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parse a single log line and produce an event object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.\nkey (str): name of the parsed structure.", "source": "juraj-google-style"}
{"code": "def write(self, *pb2_obj):\n        \n        base = len(self._write_buff)\n\n        for idx, obj in enumerate(pb2_obj):\n            if self._buffer_size > 0 and \\\n                    (idx + base) != 0 and \\\n                    (idx + base) % self._buffer_size == 0:\n                self.flush()\n            self._write_buff.append(obj)\n\n        if self._buffer_size == 0:\n            self.flush()", "docstring": "Write a group of one or more protobuf objects to the file. Multiple\nobject groups can be written by calling this method several times\nbefore closing stream or exiting the runtime context.\n\nThe input protobuf objects get buffered and will be written down when\nthe number of buffered objects exceed the `self._buffer_size`.\n\nArgs:\npb2_obj (*protobuf.message.Message): list of protobuf messages.", "source": "juraj-google-style"}
{"code": "def file(path, format='csv', csv_delimiter=',', csv_header=True, compress=False, use_cache=True):\n    output = QueryOutput()\n    output._output_type = 'file'\n    output._file_path = path\n    output._file_format = format\n    output._csv_delimiter = csv_delimiter\n    output._csv_header = csv_header\n    output._compress_file = compress\n    return output", "docstring": "Construct a query output object where the result is either a local file or a GCS path\n\nNote that there are two jobs that may need to be run sequentially, one to run the query,\nand the second to extract the resulting table. These are wrapped by a single outer Job.\n\nIf the query has already been executed and you would prefer to get a Job just for the\nextract, you can can call extract[_async] on the QueryResultsTable returned by the query\n\nArgs:\npath: the destination path. Can either be a local or GCS URI (starting with gs://)\nformat: the format to use for the exported data; one of 'csv', 'json', or 'avro'\n(default 'csv').\ncsv_delimiter: for CSV exports, the field delimiter to use (default ',').\ncsv_header: for CSV exports, whether to include an initial header line (default True).\ncompress: whether to compress the data on export. Compression is not supported for\nAVRO format (default False). Applies only to GCS URIs.\nuse_cache: whether to use cached results or not (default True).", "source": "codesearchnet"}
{"code": "def post_process_travis_macos(journal_filename):\n    \n    travis_build_dir = os.environ.get(\"TRAVIS_BUILD_DIR\", \"\")\n    with open(journal_filename, \"r\") as file_obj:\n        content = file_obj.read()\n    processed = content.replace(travis_build_dir, \"${TRAVIS_BUILD_DIR}\")\n    with open(journal_filename, \"w\") as file_obj:\n        file_obj.write(processed)", "docstring": "Post-process a generated journal file on Travis macOS.\n\nArgs:\njournal_filename (str): The name of the journal file.", "source": "juraj-google-style"}
{"code": "def ldu(load_v, name):\n    try:\n        return load_v()\n    except (KeyError, AttributeError, NameError):\n        return Undefined(name)", "docstring": "Load variable operator that returns Undefined when failing to evaluate.\n\nNote: the name (\"load or return undefined\") is abbreviated to minimize\nthe amount of clutter in generated code.\n\nThis variant of `ld` is useful when loading symbols that may be undefined at\nruntime, such as composite symbols, and whether they are defined or not cannot\nbe determined statically. For example `d['a']` is undefined when `d` is an\nempty dict.\n\nArgs:\nload_v: Lambda that executes the actual read.\nname: Human-readable name of the symbol being read.\nReturns:\nEither the value of the symbol, or Undefined, if the symbol is not fully\ndefined.", "source": "github-repos"}
{"code": "def _get_mtime():\n    return ((os.path.exists(RPM_PATH) and int(os.path.getmtime(RPM_PATH))) or 0)", "docstring": "Get the modified time of the RPM Database.\n\nReturns:\nUnix ticks", "source": "codesearchnet"}
{"code": "def evaluate_model_predictions(y_true, y_pred, weights=None):\n    \n    if isinstance(y_pred[0], np.ndarray):\n        y_pred = np.concatenate(y_pred)\n    if isinstance(y_true[0], np.ndarray):\n        y_true = np.concatenate(y_true)\n    if (weights is not None) and (isinstance(weights[0], np.ndarray)):\n        weights = np.concatenate(weights)\n\n    accuracy = accuracy_score(\n        y_true, y_pred, normalize=True, sample_weight=weights)\n    precision = precision_score(\n        y_true, y_pred, average='binary', pos_label=1, sample_weight=weights)\n    recall = recall_score(\n        y_true, y_pred, average='binary', pos_label=1, sample_weight=weights)\n    f1 = f1_score(\n        y_true, y_pred, average='binary', pos_label=1, sample_weight=weights)\n    return {'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1}", "docstring": "Evaluate the performance of an extractor model's binary classification\npredictions, typically at the block level, of whether a block is content\nor not.\n\nArgs:\ny_true (``np.ndarray``)\ny_pred (``np.ndarray``)\nweights (``np.ndarray``)\n\nReturns:\nDict[str, float]", "source": "juraj-google-style"}
{"code": "def atan(cls, x: 'TensorFluent') -> 'TensorFluent':\n    return cls._unary_op(x, tf.atan2, tf.float32)", "docstring": "Returns a TensorFluent for the arctan function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the arctan function.", "source": "codesearchnet"}
{"code": "def get_available_references(self, datas):\n        \n        names = []\n\n        for k, v in datas.items():\n            if k.startswith(RULE_REFERENCE):\n                names.append(k[len(RULE_REFERENCE)+1:])\n\n        return names", "docstring": "Get available manifest reference names.\n\nEvery rules starting with prefix from ``nomenclature.RULE_REFERENCE``\nare available references.\n\nOnly name validation is performed on these references.\n\nArguments:\ndatas (dict): Data where to search for reference declarations.\n\nReturns:\nlist: List of every available reference names. This is the real\nname unprefixed.", "source": "juraj-google-style"}
{"code": "def flush_all(self, delay=0, noreply=None):\n        \n        if noreply is None:\n            noreply = self.default_noreply\n        cmd = b'flush_all ' + six.text_type(delay).encode('ascii')\n        if noreply:\n            cmd += b' noreply'\n        cmd += b'\\r\\n'\n        results = self._misc_cmd([cmd], b'flush_all', noreply)\n        if noreply:\n            return True\n        return results[0] == b'OK'", "docstring": "The memcached \"flush_all\" command.\n\nArgs:\ndelay: optional int, the number of seconds to wait before flushing,\nor zero to flush immediately (the default).\nnoreply: optional bool, True to not wait for the reply (defaults to\nself.default_noreply).\n\nReturns:\nTrue.", "source": "juraj-google-style"}
{"code": "def delete_user(self, user):\n        \n        self.project_service.set_auth(self._token_project)\n        self.project_service.delete_user(user)", "docstring": "Delete the given user.\n\nArgs:\nuser (string): User name.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def validate(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame:\n        \n        raise NotImplementedError(\"This method must be defined for each subclass.\")", "docstring": "Return a dataframe of validation results for the appropriate series vs the vector of validators.\n\nArgs:\ntable (pd.DataFrame): A dataframe on which to apply validation logic.\nfailed_only (bool): If ``True``: return only the indexes that failed to validate.", "source": "juraj-google-style"}
{"code": "def Parse(self, value):\n    \n\n    value_line = value.split(' ')\n    if len(value_line) < 3:\n      raise TextFSMTemplateError('Expect at least 3 tokens on line.')\n\n    if not value_line[2].startswith('('):\n      \n      options = value_line[1]\n      for option in options.split(','):\n        self._AddOption(option)\n      \n      _ = [option.OnCreateOptions() for option in self.options]\n\n      self.name = value_line[2]\n      self.regex = ' '.join(value_line[3:])\n    else:\n      \n      \n      self.name = value_line[1]\n      self.regex = ' '.join(value_line[2:])\n\n    if len(self.name) > self.max_name_len:\n      raise TextFSMTemplateError(\n          \"Invalid Value name '%s' or name too long.\" % self.name)\n\n    if (not re.match(r'^\\(.*\\)$', self.regex) or\n        self.regex.count('(') != self.regex.count(')')):\n      raise TextFSMTemplateError(\n          \"Value '%s' must be contained within a '()' pair.\" % self.regex)\n\n    self.template = re.sub(r'^\\(', '(?P<%s>' % self.name, self.regex)\n\n    \n    if any(map(lambda x: isinstance(x, TextFSMOptions.List), self.options)):\n        try:\n            self.compiled_regex = re.compile(self.regex)\n        except re.error as e:\n            raise TextFSMTemplateError(str(e))", "docstring": "Parse a 'Value' declaration.\n\nArgs:\nvalue: String line from a template file, must begin with 'Value '.\n\nRaises:\nTextFSMTemplateError: Value declaration contains an error.", "source": "juraj-google-style"}
{"code": "def execute(self, method, **kwargs):\n    payload = {'id': 1, 'jsonrpc': '2.0', 'method': method, 'params': kwargs}\n    credentials = base64.b64encode('{}:{}'.format(self._username, self._password).encode())\n    auth_header_prefix = ('Basic ' if (self._auth_header == DEFAULT_AUTH_HEADER) else '')\n    headers = {self._auth_header: (auth_header_prefix + credentials.decode()), 'Content-Type': 'application/json'}\n    return self._do_request(headers, payload)", "docstring": "Call remote API procedure\n\nArgs:\nmethod: Procedure name\nkwargs: Procedure named arguments\n\nReturns:\nProcedure result\n\nRaises:\nurllib2.HTTPError: Any HTTP error (Python 2)\nurllib.error.HTTPError: Any HTTP error (Python 3)", "source": "codesearchnet"}
{"code": "def process_extra_vars(extra_vars_list, force_json=True):\n    extra_vars = {}\n    extra_vars_yaml = ''\n    for extra_vars_opt in extra_vars_list:\n        if extra_vars_opt.startswith('@'):\n            with open(extra_vars_opt[1:], 'r') as f:\n                extra_vars_opt = f.read()\n            opt_dict = string_to_dict(extra_vars_opt, allow_kv=False)\n        else:\n            opt_dict = string_to_dict(extra_vars_opt, allow_kv=True)\n        if any((line.startswith('\n            extra_vars_yaml += (extra_vars_opt + '\\n')\n        elif (extra_vars_opt != ''):\n            extra_vars_yaml += (yaml.dump(opt_dict, default_flow_style=False) + '\\n')\n        extra_vars.update(opt_dict)\n    if (not force_json):\n        try:\n            try_dict = yaml.load(extra_vars_yaml, Loader=yaml.SafeLoader)\n            assert (type(try_dict) is dict)\n            debug.log('Using unprocessed YAML', header='decision', nl=2)\n            return extra_vars_yaml.rstrip()\n        except Exception:\n            debug.log('Failed YAML parsing, defaulting to JSON', header='decison', nl=2)\n    if (extra_vars == {}):\n        return ''\n    return json.dumps(extra_vars, ensure_ascii=False)", "docstring": "Returns a string that is valid JSON or YAML and contains all the\nvariables in every extra_vars_opt inside of extra_vars_list.\n\nArgs:\nparse_kv (bool): whether to allow key=value syntax.\nforce_json (bool): if True, always output json.", "source": "codesearchnet"}
{"code": "def reminders_complete(self, *, reminder: str, **kwargs) -> SlackResponse:\n        \n        self._validate_xoxp_token()\n        kwargs.update({\"reminder\": reminder})\n        return self.api_call(\"reminders.complete\", json=kwargs)", "docstring": "Marks a reminder as complete.\n\nArgs:\nreminder (str): The ID of the reminder to be marked as complete.\ne.g. 'Rm12345678'", "source": "juraj-google-style"}
{"code": "def parse_blob_info(field_storage):\n  \n  if field_storage is None:\n    return None\n\n  field_name = field_storage.name\n\n  def get_value(dct, name):\n    value = dct.get(name, None)\n    if value is None:\n      raise BlobInfoParseError(\n          'Field %s has no %s.' % (field_name, name))\n    return value\n\n  filename = get_value(field_storage.disposition_options, 'filename')\n  blob_key_str = get_value(field_storage.type_options, 'blob-key')\n  blob_key = BlobKey(blob_key_str)\n\n  upload_content = email.message_from_file(field_storage.file)\n  content_type = get_value(upload_content, 'content-type')\n  size = get_value(upload_content, 'content-length')\n  creation_string = get_value(upload_content, UPLOAD_INFO_CREATION_HEADER)\n  md5_hash_encoded = get_value(upload_content, 'content-md5')\n  md5_hash = base64.urlsafe_b64decode(md5_hash_encoded)\n\n  try:\n    size = int(size)\n  except (TypeError, ValueError):\n    raise BlobInfoParseError(\n        '%s is not a valid value for %s size.' % (size, field_name))\n\n  try:\n    creation = blobstore._parse_creation(creation_string, field_name)\n  except blobstore._CreationFormatError, err:\n    raise BlobInfoParseError(str(err))\n\n  return BlobInfo(id=blob_key_str,\n                  content_type=content_type,\n                  creation=creation,\n                  filename=filename,\n                  size=size,\n                  md5_hash=md5_hash,\n                 )", "docstring": "Parse a BlobInfo record from file upload field_storage.\n\nArgs:\nfield_storage: cgi.FieldStorage that represents uploaded blob.\n\nReturns:\nBlobInfo record as parsed from the field-storage instance.\nNone if there was no field_storage.\n\nRaises:\nBlobInfoParseError when provided field_storage does not contain enough\ninformation to construct a BlobInfo object.", "source": "juraj-google-style"}
{"code": "def save_config(self, lookup_key, config):\n    \n    with self._config_lock:\n      self._configs[lookup_key] = config", "docstring": "Save a configuration to the cache of configs.\n\nArgs:\nlookup_key: A string containing the cache lookup key.\nconfig: The dict containing the configuration to save to the cache.", "source": "juraj-google-style"}
{"code": "def unique_row_id(self):\n    self._unique_row_id += 1\n    return '%s_%d' % (self._row_id_prefix, self._unique_row_id)", "docstring": "Returns a unique row ID (str) used to avoid multiple insertions.\n\nIf the row ID is provided, BigQuery will make a best effort to not insert\nthe same row multiple times for fail and retry scenarios in which the insert\nrequest may be issued several times. This comes into play for sinks executed\nin a local runner.\n\nReturns:\na unique row ID string", "source": "github-repos"}
{"code": "def load(self) -> RepresentativeDatasetMapping:\n    raise NotImplementedError('Method \"load\" is not implemented.')", "docstring": "Loads the representative datasets.\n\nReturns:\nrepresentative dataset mapping: A loaded signature def key ->\nrepresentative mapping.", "source": "github-repos"}
{"code": "def generate_combinations_with_testcase_name(**kwargs) -> list[OrderedDict[str, Any]]:\n    combinations = _combine_named_parameters(**kwargs)\n    named_combinations: list[OrderedDict[str, Any]] = []\n    for combination in combinations:\n        assert isinstance(combination, OrderedDict)\n        name = ''.join(['_{}_{}'.format(''.join(filter(str.isalnum, key)), ''.join(filter(str.isalnum, str(value)))) for key, value in combination.items()])\n        named_combinations.append(OrderedDict(list(combination.items()) + [('testcase_name', '_test{}'.format(name))]))\n    return named_combinations", "docstring": "Generate combinations based on its keyword arguments using combine().\n\nThis function calls combine() and appends a testcase name to the list of\ndictionaries returned. The 'testcase_name' key is a required for named\nparameterized tests.\n\nArgs:\n**kwargs: keyword arguments of form `option=[possibilities, ...]` or\n`option=the_only_possibility`.\n\nReturns:\na list of dictionaries for each combination. Keys in the dictionaries are\nthe keyword argument names.  Each key has one value - one of the\ncorresponding keyword argument values.", "source": "github-repos"}
{"code": "def iterator_chain(variables: VarType, parent: str=None) -> Iterable[VarMatrix]:\n    logger.debug('Yielding from append iterator')\n    if (not isinstance(variables, list)):\n        raise ValueError(f'Append keyword only takes a list of arguments, got {variables} of type {type(variables)}')\n    (yield list(chain.from_iterable((variable_matrix(item, parent, 'product') for item in variables))))", "docstring": "This successively appends each element of an array to a single list of values.\n\nThis takes a list of values and puts all the values generated for each element in\nthe list into a single list of values. It uses the :func:`itertools.chain` function to\nachieve this. This function is particularly useful for specifying multiple types of\nsimulations with different parameters.\n\nArgs:\nvariables: The variables object\nparent: Unused", "source": "codesearchnet"}
{"code": "def allreduce_grads(all_grads, average):\n    \n\n    if get_tf_version_tuple() <= (1, 12):\n        from tensorflow.contrib import nccl\n    else:\n        from tensorflow.python.ops import nccl_ops as nccl\n    nr_tower = len(all_grads)\n    if nr_tower == 1:\n        return all_grads\n    new_all_grads = []  \n    for grads in zip(*all_grads):\n        summed = nccl.all_sum(grads)\n\n        grads_for_devices = []  \n        for g in summed:\n            with tf.device(g.device):\n                \n                if average:\n                    g = tf.multiply(g, 1.0 / nr_tower)\n            grads_for_devices.append(g)\n        new_all_grads.append(grads_for_devices)\n\n    \n    ret = list(zip(*new_all_grads))\n    return ret", "docstring": "All-reduce average the gradients among K devices. Results are broadcasted to all devices.\n\nArgs:\nall_grads (K x N): List of list of gradients. N is the number of variables.\naverage (bool): average gradients or not.\n\nReturns:\nK x N: same as input, but each grad is replaced by the average over K devices.", "source": "juraj-google-style"}
{"code": "def get_license(name):\n    filenames = os.listdir((cwd + licenses_loc))\n    licenses = dict(zip(filenames, ([(- 1)] * len(filenames))))\n    for l in licenses:\n        licenses[l] = compute_distance(name, l)\n    return min(licenses, key=(lambda k: licenses[k]))", "docstring": "Returns the closest match to the requested license.\n\nArguments:\n- name (str) License to use\n\nReturns:\n- (str) License that most closely matches the 'name' parameter", "source": "codesearchnet"}
{"code": "def joint(node):\n    (node, _, _) = _fix(node)\n    body = (node.body[0].body[:(- 1)] + node.body[1].body)\n    func = gast.Module(body=[gast.FunctionDef(name=node.body[0].name, args=node.body[1].args, body=body, decorator_list=[], returns=None)])\n    anno.clearanno(func)\n    return func", "docstring": "Merge the bodies of primal and adjoint into a single function.\n\nArgs:\nnode: A module with the primal and adjoint function definitions as returned\nby `reverse_ad`.\n\nReturns:\nfunc: A `Module` node with a single function definition containing the\ncombined primal and adjoint.", "source": "codesearchnet"}
{"code": "def _find_initialized_value_for_variable(variable_op):\n    try:\n        var_names = [variable_op.node_def.name, variable_op.node_def.name + ':0']\n        for collection_name in (ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.LOCAL_VARIABLES):\n            for var in variable_op.graph.get_collection(collection_name):\n                if var.name in var_names:\n                    return var.initialized_value()\n    except AttributeError:\n        return None\n    return None", "docstring": "Find the initialized value for a variable op.\n\nTo do so, lookup the variable op in the variables collection.\n\nArgs:\nvariable_op: A variable `Operation`.\n\nReturns:\nA `Tensor` representing the initialized value for the variable or `None`\nif the initialized value could not be found.", "source": "github-repos"}
{"code": "def lineitem_get_v1(config, auth, advertiser_id, lineitem_id):\n    return API_DV360(config, auth).advertisers().lineItems().get(advertiserId=advertiser_id, lineItemId=lineitem_id).execute()", "docstring": "Gets a DV360 Line Item\n\nArgs:\nauth: StarThinker authentication scheme\nadvertiser_id: ID of the advertiser of the line item\nlineitem_id: ID of the line item\nReturns: Line Item from the DV360 API", "source": "github-repos"}
{"code": "def en010(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `en010`'.format(value))\n    self._en010 = value", "docstring": "Corresponds to IDD Field `en010`\nmean coincident dry-bulb temperature to\nEnthalpy corresponding to 1.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `en010`\nUnit: kJ/kg\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def get_subscriber_queue(self, event_types=None):\n    try:\n        self.started_queue.get(timeout=1)\n        raise RuntimeError('Cannot create a new subscriber queue while Exchange is running.')\n    except Empty:\n        pass\n    if (event_types is None):\n        event_types = EventTypes.ALL\n    queue = Queue()\n    self.queues[event_types].append(queue)\n    return queue", "docstring": "Create a new queue for a specific combination of event types\nand return it.\n\nReturns:\na :class:`multiprocessing.Queue`.\nRaises:\nRuntimeError if called after `run`", "source": "codesearchnet"}
{"code": "def __parameter_default(self, field):\n    \n    if field.default:\n      if isinstance(field, messages.EnumField):\n        return field.default.name\n      else:\n        return field.default", "docstring": "Returns default value of field if it has one.\n\nArgs:\nfield: A simple field.\n\nReturns:\nThe default value of the field, if any exists, with the exception of an\nenum field, which will have its value cast to a string.", "source": "juraj-google-style"}
{"code": "def set_probe_file_name(self, checked):\n        \n        if checked:\n            file_name = os.path.join(self.gui_settings['probes_log_folder'], '{:s}_probes.csv'.format(datetime.datetime.now().strftime('%y%m%d-%H_%M_%S')))\n            if os.path.isfile(file_name) == False:\n                self.probe_file = open(file_name, 'a')\n                new_values = self.read_probes.probes_values\n                header = ','.join(list(np.array([['{:s} ({:s})'.format(p, instr) for p in list(p_dict.keys())] for instr, p_dict in new_values.items()]).flatten()))\n                self.probe_file.write('{:s}\\n'.format(header))\n        else:\n            self.probe_file.close()", "docstring": "sets the filename to which the probe logging function will write\nArgs:\nchecked: boolean (True: opens file) (False: closes file)", "source": "juraj-google-style"}
{"code": "def create_module_graph(module_spec):\n    (height, width) = hub.get_expected_image_size(module_spec)\n    with tf.Graph().as_default() as graph:\n        resized_input_tensor = tf.placeholder(tf.float32, [None, height, width, 3])\n        m = hub.Module(module_spec)\n        bottleneck_tensor = m(resized_input_tensor)\n        wants_quantization = any(((node.op in FAKE_QUANT_OPS) for node in graph.as_graph_def().node))\n    return (graph, bottleneck_tensor, resized_input_tensor, wants_quantization)", "docstring": "Creates a graph and loads Hub Module into it.\n\nArgs:\nmodule_spec: the hub.ModuleSpec for the image module being used.\n\nReturns:\ngraph: the tf.Graph that was created.\nbottleneck_tensor: the bottleneck values output by the module.\nresized_input_tensor: the input images, resized as expected by the module.\nwants_quantization: a boolean, whether the module has been instrumented\nwith fake quantization ops.", "source": "codesearchnet"}
{"code": "def mark_experimental(fn):\n    \n    \n    @wraps(fn)\n    def wrapper(*args, **kw):   \n        from peltak.core import shell\n\n        if shell.is_tty:\n            warnings.warn(\"This command is has experimental status. The \"\n                          \"interface is not yet stable and might change \"\n                          \"without notice within with a patch version update. \"\n                          \"Use at your own risk\")\n        return fn(*args, **kw)\n\n    return wrapper", "docstring": "Mark function as experimental.\n\nArgs:\nfn (FunctionType):\nThe command function to decorate.", "source": "juraj-google-style"}
{"code": "def are_genes_in_api(my_clue_api_client, gene_symbols):\n    if (len(gene_symbols) > 0):\n        query_gene_symbols = (gene_symbols if (type(gene_symbols) is list) else list(gene_symbols))\n        query_result = my_clue_api_client.run_filter_query(resource_name, {'where': {'gene_symbol': {'inq': query_gene_symbols}}, 'fields': {'gene_symbol': True}})\n        logger.debug('query_result:  {}'.format(query_result))\n        r = set([x['gene_symbol'] for x in query_result])\n        return r\n    else:\n        logger.warning('provided gene_symbols was empty, cannot run query')\n        return set()", "docstring": "determine if genes are present in the API\n\nArgs:\nmy_clue_api_client:\ngene_symbols: collection of gene symbols to query the API with\n\nReturns: set of the found gene symbols", "source": "codesearchnet"}
{"code": "def get_local_variable_from_name(self, variable_name):\n        \n        return next((v for v in self.variables if v.name == variable_name), None)", "docstring": "Return a local variable from a name\nArgs:\nvarible_name (str): name of the variable\nReturns:\nLocalVariable", "source": "juraj-google-style"}
{"code": "def StartFlowAndWorker(client_id, flow_name, **kwargs):\n  \n  \n  queue = rdfvalue.RDFURN(\"DEBUG-%s-\" % getpass.getuser())\n  if \"token\" in kwargs:\n    token = kwargs.pop(\"token\")\n  else:\n    token = access_control.ACLToken(username=\"GRRConsole\")\n\n  session_id = flow.StartAFF4Flow(\n      client_id=client_id,\n      flow_name=flow_name,\n      queue=queue,\n      token=token,\n      **kwargs)\n  worker_thrd = worker_lib.GRRWorker(\n      queues=[queue], token=token, threadpool_size=1)\n  while True:\n    try:\n      worker_thrd.RunOnce()\n    except KeyboardInterrupt:\n      print(\"exiting\")\n      worker_thrd.thread_pool.Join()\n      break\n\n    time.sleep(2)\n    with aff4.FACTORY.Open(session_id, token=token) as flow_obj:\n      if not flow_obj.GetRunner().IsRunning():\n        break\n\n  \n  worker_thrd.thread_pool.Join()\n\n  return session_id", "docstring": "Launches the flow and worker and waits for it to finish.\n\nArgs:\nclient_id: The client common name we issue the request.\nflow_name: The name of the flow to launch.\n**kwargs: passthrough to flow.\n\nReturns:\nA flow session id.\n\nNote: you need raw access to run this flow as it requires running a worker.", "source": "juraj-google-style"}
{"code": "def unique_parameter_values(self) -> 'list[Collection[cfg.Binding]]':\n\n    def _get_values(parameter):\n        return {b.data.get_type_key(): b for b in parameter.bindings}.values()\n    return [_get_values(parameter) for parameter in self._unique_parameters()]", "docstring": "Get unique parameter subtypes as bindings.\n\nLike _unique_parameters, but returns bindings instead of variables.\n\nReturns:\nA list of list of bindings.", "source": "github-repos"}
{"code": "def install(self, connection, partition, table_name = None, index_columns=None, materialize=False,\n                logger = None):\n        \n        virtual_table = partition.vid\n\n        table = partition.vid if not table_name else table_name\n\n        if self._relation_exists(connection, table):\n            if logger:\n                logger.debug(\"Skipping '{}'; already installed\".format(table))\n            return\n        else:\n            if logger:\n                logger.info(\"Installing '{}'\".format(table))\n\n        partition.localize()\n\n\n        virtual_table = partition.vid + '_vt'\n\n        self._add_partition(connection, partition)\n\n        if materialize:\n\n            if self._relation_exists(connection, table):\n                debug_logger.debug(\n                    'Materialized table of the partition already exists.\\n partition: {}, table: {}'\n                    .format(partition.name, table))\n            else:\n                cursor = connection.cursor()\n\n                \n                create_query = self.__class__._get_create_query(partition, table)\n                debug_logger.debug(\n                    'Creating new materialized view for partition mpr.'\n                    '\\n    partition: {}, view: {}, query: {}'\n                    .format(partition.name, table, create_query))\n\n                cursor.execute(create_query)\n\n                \n                copy_query = .format(table, virtual_table)\n                debug_logger.debug(\n                    'Populating sqlite table with rows from partition mpr.'\n                    '\\n    partition: {}, view: {}, query: {}'\n                    .format(partition.name, table, copy_query))\n                cursor.execute(copy_query)\n\n                cursor.close()\n\n        else:\n            cursor = connection.cursor()\n            view_q = \"CREATE VIEW IF NOT EXISTS {} AS SELECT * FROM {} \".format(partition.vid, virtual_table)\n            cursor.execute(view_q)\n            cursor.close()\n\n        if index_columns is not None:\n            self.index(connection,table, index_columns)\n\n        return table", "docstring": "Creates virtual table or read-only table for gion.\n\nArgs:\nref (str): id, vid, name or versioned name of the partition.\nmaterialize (boolean): if True, create read-only table. If False create virtual table.\n\nReturns:\nstr: name of the created table.", "source": "juraj-google-style"}
{"code": "def as_dict(self, verbosity=0):\n    species_list = []\n    for (spec, occu) in self._species.items():\n        d = spec.as_dict()\n        del d['@module']\n        del d['@class']\n        d['occu'] = occu\n        species_list.append(d)\n    d = {'species': species_list, 'abc': [float(c) for c in self._frac_coords], 'lattice': self._lattice.as_dict(verbosity=verbosity), '@module': self.__class__.__module__, '@class': self.__class__.__name__}\n    if (verbosity > 0):\n        d['xyz'] = [float(c) for c in self.coords]\n        d['label'] = self.species_string\n    d['properties'] = self.properties\n    return d", "docstring": "Json-serializable dict representation of PeriodicSite.\n\nArgs:\nverbosity (int): Verbosity level. Default of 0 only includes the\nmatrix representation. Set to 1 for more details such as\ncartesian coordinates, etc.", "source": "codesearchnet"}
{"code": "def require_params(self, req):\n    params = {}\n    for (name, param) in self.params.items():\n        if ((name not in req.params) and param.required):\n            missing = (set((p for p in self.params if self.params[p].required)) - set(req.params.keys()))\n            raise errors.HTTPMissingParam(', '.join(missing))\n        elif ((name in req.params) or param.default):\n            try:\n                if param.many:\n                    values = (req.get_param_as_list(name, param.validated_value) or [(param.default and param.validated_value(param.default))])\n                    params[name] = param.container(values)\n                else:\n                    params[name] = param.validated_value(req.get_param(name, default=param.default))\n            except ValidationError as err:\n                raise err.as_invalid_param(name)\n            except ValueError as err:\n                raise errors.HTTPInvalidParam(str(err), name)\n    return params", "docstring": "Require all defined parameters from request query string.\n\nRaises ``falcon.errors.HTTPMissingParam`` exception if any of required\nparameters is missing and ``falcon.errors.HTTPInvalidParam`` if any\nof parameters could not be understood (wrong format).\n\nArgs:\nreq (falcon.Request): request object", "source": "codesearchnet"}
{"code": "def tokens(cls, tokens):\n    return cls(Lnk.TOKENS, tuple(map(int, tokens)))", "docstring": "Create a Lnk object for a token range.\n\nArgs:\ntokens: a list of token identifiers", "source": "codesearchnet"}
{"code": "def __init__(self, p_range, ns_range, query_spec):\n    \n    self._property_range = p_range\n    self._ns_range = ns_range\n    self._query_spec = query_spec\n    self._cursor = None\n    self._query = None", "docstring": "Init.\n\nArgs:\np_range: a property_range.PropertyRange object that defines the\nconditions entities should safisfy.\nns_range: a namesrange.NamespaceRange object that defines the namespaces\nto examine.\nquery_spec: a model.QuerySpec object that defines how to retrieve\nentities from datastore.", "source": "juraj-google-style"}
{"code": "def rank(input: ragged_tensor.Ragged, name=None):\n    with ops.name_scope(name, 'RaggedRank', [input]) as name:\n        if not ragged_tensor.is_ragged(input):\n            return array_ops.rank(input, name)\n        return input.ragged_rank + array_ops.rank(input.flat_values)", "docstring": "Returns the rank of a RaggedTensor.\n\nReturns a 0-D `int32` `Tensor` representing the rank of `input`.\n\n#### Example:\n\n>>> # shape of tensor 't' is [2, None, None]\n>>> t = tf.ragged.constant([[[1], [2, 2]], [[3, 3, 3], [4, 4, 4, 4]]])\n>>> tf.rank(t).numpy().item()\n3\n\nArgs:\ninput: A `RaggedTensor`\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` of type `int32`.", "source": "github-repos"}
{"code": "def get_dialect(mixed: Union[(SQLCompiler, Engine, Dialect)]) -> Dialect:\n    if isinstance(mixed, Dialect):\n        return mixed\n    elif isinstance(mixed, Engine):\n        return mixed.dialect\n    elif isinstance(mixed, SQLCompiler):\n        return mixed.dialect\n    else:\n        raise ValueError(\"get_dialect: 'mixed' parameter of wrong type\")", "docstring": "Finds the SQLAlchemy dialect in use.\n\nArgs:\nmixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or\n:class:`Dialect` object\n\nReturns: the SQLAlchemy :class:`Dialect` being used", "source": "codesearchnet"}
{"code": "def get_campaign_name_list(self):\n    campaigns = self.find('campaigns', {})\n    campaign_names = []\n    for campaign in campaigns:\n        if ('name' in campaign):\n            campaign_names.append(campaign['name'])\n    return campaign_names", "docstring": "Returns a list of all valid campaign names\n\nReturns:\nList of strings containing all valid campaign names", "source": "codesearchnet"}
{"code": "def cos(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:\n    \n    return amp*np.cos(2*np.pi*freq*times+phase).astype(np.complex_)", "docstring": "Continuous cosine wave.\n\nArgs:\ntimes: Times to output wave for.\namp: Pulse amplitude.\nfreq: Pulse frequency, units of 1/dt.\nphase: Pulse phase.", "source": "juraj-google-style"}
{"code": "def extract_wavs(utterances: List[Utterance], tgt_dir: Path,\n                 lazy: bool) -> None:\n    \n    tgt_dir.mkdir(parents=True, exist_ok=True)\n    for utter in utterances:\n        wav_fn = \"{}.{}\".format(utter.prefix, \"wav\")\n        out_wav_path = tgt_dir / wav_fn\n        if lazy and out_wav_path.is_file():\n            logger.info(\"File {} already exists and lazy == {}; not \" \\\n                         \"writing.\".format(out_wav_path, lazy))\n            continue\n        logger.info(\"File {} does not exist and lazy == {}; creating \" \\\n                     \"it.\".format(out_wav_path, lazy))\n        trim_wav_ms(utter.org_media_path, out_wav_path,\n                    utter.start_time, utter.end_time)", "docstring": "Extracts WAVs from the media files associated with a list of Utterance\nobjects and stores it in a target directory.\n\nArgs:\nutterances: A list of Utterance objects, which include information\nabout the source media file, and the offset of the utterance in the\nmedia_file.\ntgt_dir: The directory in which to write the output WAVs.\nlazy: If True, then existing WAVs will not be overwritten if they have\nthe same name", "source": "juraj-google-style"}
{"code": "def _flatten_multiplicand_list(kernels):\n    flattened = []\n    for k in kernels:\n        if isinstance(k, _ProductKernel):\n            flattened += k.kernels\n        else:\n            flattened.append(k)\n    return flattened", "docstring": "Flatten a list of kernels which may contain _ProductKernel instances.\n\nArgs:\nkernels: Python list of `PositiveSemidefiniteKernel` instances\n\nReturns:\nPython list containing the elements of kernels, with any _ProductKernel\ninstances replaced by their `kernels` property contents.", "source": "codesearchnet"}
{"code": "def to_raw_op(f: types.FunctionType) -> Callable[..., Any]:\n    f = types.FunctionType(f.__code__, f.__globals__, f.__name__, f.__defaults__, f.__closure__)\n    return kwarg_only(f)", "docstring": "Make a given op wrapper function `f` raw.\n\nRaw op wrappers can only be called with keyword arguments.\n\nArgs:\nf: An op wrapper function to make raw.\n\nReturns:\nRaw `f`.", "source": "github-repos"}
{"code": "def _map_args(call_node, function):\n    args = call_node.args\n    kwds = {kwd.arg: kwd.value for kwd in call_node.keywords}\n    call_args = tf_inspect.getcallargs(function, *args, **kwds)\n    unexpected_defaults = []\n    for k in call_args:\n        if k not in kwds and call_args[k] not in args and (call_args[k] is not directives.UNSPECIFIED):\n            unexpected_defaults.append(k)\n    if unexpected_defaults:\n        raise ValueError('Unexpected keyword argument values, %s, for function %s' % (zip(unexpected_defaults, [call_args[k] for k in unexpected_defaults]), function))\n    return {k: v for k, v in call_args.items() if v is not directives.UNSPECIFIED}", "docstring": "Maps AST call nodes to the actual function's arguments.\n\nArgs:\ncall_node: ast.Call\nfunction: Callable[..., Any], the actual function matching call_node\nReturns:\nDict[Text, ast.AST], mapping each of the function's argument names to\nthe respective AST node.\nRaises:\nValueError: if the default arguments are not correctly set", "source": "github-repos"}
{"code": "def touch(self, key, expire=0, noreply=None):\n    if (noreply is None):\n        noreply = self.default_noreply\n    key = self.check_key(key)\n    cmd = (((b'touch ' + key) + b' ') + six.text_type(expire).encode('ascii'))\n    if noreply:\n        cmd += b' noreply'\n    cmd += b'\\r\\n'\n    results = self._misc_cmd([cmd], b'touch', noreply)\n    if noreply:\n        return True\n    return (results[0] == b'TOUCHED')", "docstring": "The memcached \"touch\" command.\n\nArgs:\nkey: str, see class docs for details.\nexpire: optional int, number of seconds until the item is expired\nfrom the cache, or zero for no expiry (the default).\nnoreply: optional bool, True to not wait for the reply (defaults to\nself.default_noreply).\n\nReturns:\nTrue if the expiration time was updated, False if the key wasn't\nfound.", "source": "codesearchnet"}
{"code": "def transform(self, program: moderngl.Program, buffer: moderngl.Buffer,\n                  mode=None, vertices=-1, first=0, instances=1):\n        \n        vao = self.instance(program)\n\n        if mode is None:\n            mode = self.mode\n\n        vao.transform(buffer, mode=mode, vertices=vertices, first=first, instances=instances)", "docstring": "Transform vertices. Stores the output in a single buffer.\n\nArgs:\nprogram: The ``moderngl.Program``\nbuffer: The ``moderngl.buffer`` to store the output\n\nKeyword Args:\nmode: Draw mode (for example ``moderngl.POINTS``)\nvertices (int): The number of vertices to transform\nfirst (int): The index of the first vertex to start with\ninstances (int): The number of instances", "source": "juraj-google-style"}
{"code": "def _init_index(root_dir, schema, index_name):\n    \n\n    index_dir = os.path.join(root_dir, index_name)\n    try:\n        if not os.path.exists(index_dir):\n            os.makedirs(index_dir)\n            return create_in(index_dir, schema), index_dir\n        else:\n            return open_dir(index_dir), index_dir\n    except Exception as e:\n        logger.error(\"Init error: failed to open search index at: '{}': {} \".format(index_dir, e))\n        raise", "docstring": "Creates new index or opens existing.\n\nArgs:\nroot_dir (str): root dir where to find or create index.\nschema (whoosh.fields.Schema): schema of the index to create or open.\nindex_name (str): name of the index.\n\nReturns:\ntuple ((whoosh.index.FileIndex, str)): first element is index, second is index directory.", "source": "juraj-google-style"}
{"code": "def build_request_body(type, id, attributes=None, relationships=None):\n    result = {'data': {'type': type}}\n    data = result['data']\n    if (attributes is not None):\n        data['attributes'] = attributes\n    if (relationships is not None):\n        data['relationships'] = relationships\n    if (id is not None):\n        data['id'] = id\n    return result", "docstring": "Build a request body  object.\n\nA body JSON object is used for any of the ``update`` or ``create``\nmethods on :class:`Resource` subclasses. In normal library use you\nshould not have to use this function directly.\n\nArgs:\n\ntype(string): The resource type for the attribute\nid(uuid): The id of the object to update. This may be ``None``\n\nKeyword Args:\n\nattributes(dict): A JSON dictionary of the attributes to set\nrelationships(dict) A JSON dictionary of relationships to set\n\nReturns:\n\nA valid attribute dictionary. Often used in the ``update`` or\n``create`` :class:`Resource`` methods.", "source": "codesearchnet"}
{"code": "def add(self, pattern, function, method=None, type_cast=None):\n    if (not type_cast):\n        type_cast = {}\n    with self._lock:\n        self._data_store.append({'pattern': pattern, 'function': function, 'method': method, 'type_cast': type_cast})", "docstring": "Function for registering a path pattern.\n\nArgs:\npattern (str): Regex pattern to match a certain path.\nfunction (function): Function to associate with this path.\nmethod (str, optional): Usually used to define one of GET, POST,\nPUT, DELETE. You may use whatever fits your situation though.\nDefaults to None.\ntype_cast (dict, optional): Mapping between the param name and\none of `int`, `float` or `bool`. The value reflected by the\nprovided param name will than be casted to the given type.\nDefaults to None.", "source": "codesearchnet"}
{"code": "def tf_initialize(self, x_init, b):\n        \n        if x_init is None:\n            \n            x_init = [tf.zeros(shape=util.shape(t)) for t in b]\n\n        initial_args = super(ConjugateGradient, self).tf_initialize(x_init)\n\n        \n        \n        conjugate = residual = [t - fx for t, fx in zip(b, self.fn_x(x_init))]\n\n        \n        squared_residual = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(res * res)) for res in residual])\n\n        return initial_args + (conjugate, residual, squared_residual)", "docstring": "Initialization step preparing the arguments for the first iteration of the loop body:\n$x_0, 0, p_0, r_0, r_0^2$.\n\nArgs:\nx_init: Initial solution guess $x_0$, zero vector if None.\nb: The right-hand side $b$ of the system of linear equations.\n\nReturns:\nInitial arguments for tf_step.", "source": "juraj-google-style"}
{"code": "def error_messages(self, driver_id=None):\n        \n        if driver_id is not None:\n            assert isinstance(driver_id, ray.DriverID)\n            return self._error_messages(driver_id)\n\n        error_table_keys = self.redis_client.keys(\n            ray.gcs_utils.TablePrefix_ERROR_INFO_string + \"*\")\n        driver_ids = [\n            key[len(ray.gcs_utils.TablePrefix_ERROR_INFO_string):]\n            for key in error_table_keys\n        ]\n\n        return {\n            binary_to_hex(driver_id): self._error_messages(\n                ray.DriverID(driver_id))\n            for driver_id in driver_ids\n        }", "docstring": "Get the error messages for all drivers or a specific driver.\n\nArgs:\ndriver_id: The specific driver to get the errors for. If this is\nNone, then this method retrieves the errors for all drivers.\n\nReturns:\nA dictionary mapping driver ID to a list of the error messages for\nthat driver.", "source": "juraj-google-style"}
{"code": "def compute_metrics_cv(self, X, y, **kwargs):\n    results = self.cv_score_mean(X, y)\n    return results", "docstring": "Compute cross-validated metrics.\n\nTrains this model on data X with labels y.\nReturns a list of dict with keys name, scoring_name, value.\n\nArgs:\nX (Union[np.array, pd.DataFrame]): data\ny (Union[np.array, pd.DataFrame, pd.Series]): labels", "source": "codesearchnet"}
{"code": "def get_pipeline_id(app='', name=''):\n    \n    return_id = None\n\n    pipelines = get_all_pipelines(app=app)\n\n    for pipeline in pipelines:\n        LOG.debug('ID of %(name)s: %(id)s', pipeline)\n\n        if pipeline['name'] == name:\n            return_id = pipeline['id']\n            LOG.info('Pipeline %s found, ID: %s', name, return_id)\n            break\n\n    return return_id", "docstring": "Get the ID for Pipeline _name_.\n\nArgs:\napp (str): Name of Spinnaker Application to search.\nname (str): Name of Pipeline to get ID for.\n\nReturns:\nstr: ID of specified Pipeline.\nNone: Pipeline or Spinnaker Appliation not found.", "source": "juraj-google-style"}
{"code": "def __init__(self, executable_path: _PATH = 'default', port: Union[int, str] = 5037, env: Dict = None, service_args: Union[list, tuple] = None) -> None:\n        \n\n        self._service_args = service_args or []\n\n        super(Service, self).__init__(executable_path, port=port, env=env)", "docstring": "Creates a new instance of the Service.\n\nArgs:\nexecutable_path: Path to the AndroidDriver.\nport: Port the service is running on.\nenv: Environment variables.\nservice_args: List of args to pass to the androiddriver service.", "source": "juraj-google-style"}
{"code": "def get_original(self):\n    pk_value = self._get_pk_value()\n    if (isinstance(pk_value, int) and (not self._original)):\n        self._original = self.select().where((self.__class__.id == pk_value)).get()\n    return self._original", "docstring": "Get the original instance of this instance before it's updated.\n\nReturns:\nfleaker.peewee.EventMixin:\nThe original instance of the model.", "source": "codesearchnet"}
{"code": "def __init__(self, table_id=None, active_count=None, lookup_count=None,\n                 matched_count=None):\n        \n        super().__init__()\n        self.table_id = table_id\n        self.active_count = active_count\n        self.lookup_count = lookup_count\n        self.matched_count = matched_count", "docstring": "Create a TableStats with the optional parameters below.\n\nArgs:\ntable_id (int): Identifier of table.  Lower numbered tables are\nconsulted first.\nactive_count (int): Number of active entries.\nlookup_count (int): Number of packets looked up in table.\nmatched_count (int): Number of packets that hit table.", "source": "juraj-google-style"}
{"code": "def press(keys, presses=1, interval=0.0, pause=None, _pause=True):\n    if (type(keys) == str):\n        keys = [keys]\n    else:\n        lowerKeys = []\n        for s in keys:\n            if (len(s) > 1):\n                lowerKeys.append(s.lower())\n            else:\n                lowerKeys.append(s)\n    interval = float(interval)\n    for i in range(presses):\n        for k in keys:\n            _failSafeCheck()\n            platformModule._keyDown(k)\n            platformModule._keyUp(k)\n        time.sleep(interval)\n    _autoPause(pause, _pause)", "docstring": "Performs a keyboard key press down, followed by a release.\n\nArgs:\nkey (str, list): The key to be pressed. The valid names are listed in\nKEYBOARD_KEYS. Can also be a list of such strings.\npresses (integer, optiional): the number of press repetition\n1 by default, for just one press\ninterval (float, optional): How many seconds between each press.\n0.0 by default, for no pause between presses.\npause (float, optional): How many seconds in the end of function process.\nNone by default, for no pause in the end of function process.\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _force_disconnect_action(self, action):\n        \n\n        conn_key = action.data['id']\n        if self._get_connection_state(conn_key) == self.Disconnected:\n            return\n\n        data = self._get_connection(conn_key)\n\n        \n        if data['state'] == self.Connecting:\n            callback = data['action'].data['callback']\n            callback(data['connection_id'], self.id, False, 'Unexpected disconnection')\n        elif data['state'] == self.Disconnecting:\n            callback = data['action'].data['callback']\n            callback(data['connection_id'], self.id, True, None)\n        elif data['state'] == self.InProgress:\n            callback = data['action'].data['callback']\n            if data['microstate'] == 'rpc':\n                callback(False, 'Unexpected disconnection', 0xFF, None)\n            elif data['microstate'] == 'open_interface':\n                callback(False, 'Unexpected disconnection')\n            elif data['microstate'] == 'close_interface':\n                callback(False, 'Unexpected disconnection')\n\n        connection_id = data['connection_id']\n        internal_id = data['internal_id']\n        del self._connections[connection_id]\n        del self._int_connections[internal_id]", "docstring": "Forcibly disconnect a device.\n\nArgs:\naction (ConnectionAction): the action object describing what we are\nforcibly disconnecting", "source": "juraj-google-style"}
{"code": "def update(self, *args, **kwargs):\n        \n        for next_dict in chain(args, (kwargs, )):\n            for k, v in next_dict.items():\n                self[k] = v", "docstring": "Equivalent to the python dict update method.\n\nUpdate the dictionary with the key/value pairs from other, overwriting\nexisting keys.\n\nArgs:\nother (dict): The source of key value pairs to add to headers\nKeyword Args:\nAll keyword arguments are stored in header directly\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def Exponential(cls, mean: 'TensorFluent', batch_size: Optional[int]=None) -> Tuple[(Distribution, 'TensorFluent')]:\n    rate = (1 / mean.tensor)\n    dist = tf.distributions.Exponential(rate)\n    batch = mean.batch\n    if ((not batch) and (batch_size is not None)):\n        t = dist.sample(batch_size)\n        batch = True\n    else:\n        t = dist.sample()\n    scope = mean.scope.as_list()\n    return (dist, TensorFluent(t, scope, batch=batch))", "docstring": "Returns a TensorFluent for the Exponential sampling op with given mean parameter.\n\nArgs:\nmean: The mean parameter of the Exponential distribution.\nbatch_size: The size of the batch (optional).\n\nReturns:\nThe Exponential distribution and a TensorFluent sample drawn from the distribution.", "source": "codesearchnet"}
{"code": "def _read_range(self, start, end=0):\n        \n        try:\n            with _handle_client_exception():\n                return self._client.get_object(*self._client_args, headers=dict(\n                    Range=self._http_range(start, end)))[1]\n\n        except _ClientException as exception:\n            if exception.http_status == 416:\n                \n                return b''\n            raise", "docstring": "Read a range of bytes in stream.\n\nArgs:\nstart (int): Start stream position.\nend (int): End stream position.\n0 To not specify end.\n\nReturns:\nbytes: number of bytes read", "source": "juraj-google-style"}
{"code": "def copy(self, source, dest):\n        \n        if not self.copyable:\n            raise IOError('Driver does not support raster copying')\n        if not isinstance(source, Raster):\n            source = Raster(source)\n            should_close = True\n        else:\n            should_close = False\n        if source.name == dest:\n            raise ValueError(\n                'Input and output are the same location: %s' % source.name)\n        settings = driverdict_tolist(self.settings)\n        ds = self.CreateCopy(dest, source.ds, self.strictmode,\n                             options=settings)\n        if should_close:\n            source.close()\n        return Raster(ds)", "docstring": "Returns a copied Raster instance.\n\nArguments:\nsource -- the source Raster instance or filepath as str\ndest -- destination filepath as str", "source": "juraj-google-style"}
{"code": "def __init__(self, item_type=None, min_length=None, max_length=None, empty=True):\n        \n        super(ListTypeChecker, self).__init__(\n            iter_type=list, item_type=item_type, min_length=min_length, max_length=max_length, empty=empty\n        )", "docstring": "Initialization method.\n\nArgs:\nitem_type (type): the type of the items inside the list.\nmin_length (int): minimum length of the list (included).\nmax_length (int): maximum length of the list (included).\nempty (bool): whether empty list is allowed.", "source": "juraj-google-style"}
{"code": "def get_unconditional_inputs(self, num_samples=1):\n    input_ids = torch.ones((num_samples, 1), device=self.device, dtype=torch.int64) * self.config.vocab_size\n    user_audio_codes = torch.ones((num_samples, self.num_codebooks, 1), device=self.device, dtype=torch.int64) * self.config.audio_vocab_size\n    moshi_audio_codes = torch.ones((num_samples, self.num_codebooks, 1), device=self.device, dtype=torch.int64) * self.config.audio_vocab_size\n    attention_mask = torch.ones((num_samples, 1), device=self.device, dtype=torch.long)\n    return MoshiUnconditionalInput(input_ids=input_ids, user_audio_codes=user_audio_codes, moshi_audio_codes=moshi_audio_codes, attention_mask=attention_mask)", "docstring": "Helper function to get null inputs for unconditional generation, enabling the model to be used without the\nfeature extractor or tokenizer.\n\nArgs:\nnum_samples (int, *optional*):\nNumber of audio samples to unconditionally generate.\nmax_new_tokens (int, *optional*):\nNumber of tokens to generate for each sample. More tokens means longer audio samples, at the expense of\nlonger inference (since more audio tokens need to be generated per sample).\n\nExample:\n```python\n>>> from transformers import MoshiForConditionalGeneration\n\n>>> model = MoshiForConditionalGeneration.from_pretrained(\"kmhf/hf-moshiko-pytorch-bf16\")\n\n>>> # get the unconditional (or 'null') inputs for the model\n>>> unconditional_inputs = model.get_unconditional_inputs(num_samples=1)\n>>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256)\n```", "source": "github-repos"}
{"code": "def call_api(self, method, url, headers=None, params=None, data=None, files=None, timeout=None):\n    method = method.upper()\n    headers = (deepcopy(headers) or {})\n    headers['Accept'] = self.accept_type\n    params = (deepcopy(params) or {})\n    data = (data or {})\n    files = (files or {})\n    if (self.username and self.api_key):\n        params.update(self.get_credentials())\n    url = urljoin(self.base_url, url)\n    r = requests.request(method, url, headers=headers, params=params, files=files, data=data, timeout=timeout)\n    return (r, r.status_code)", "docstring": "Call API.\n\nThis returns object containing data, with error details if applicable.\n\nArgs:\nmethod (str): The HTTP method to use.\nurl (str): Resource location relative to the base URL.\nheaders (dict or None): Extra request headers to set.\nparams (dict or None): Query-string parameters.\ndata (dict or None): Request body contents for POST or PUT requests.\nfiles (dict or None: Files to be passed to the request.\ntimeout (int): Maximum time before timing out.\n\nReturns:\nResultParser or ErrorParser.", "source": "codesearchnet"}
{"code": "def zero_fraction(value, name=None):\n    with ops.name_scope(name, 'zero_fraction', [value]):\n        value = ops.convert_to_tensor(value, name='value')\n        size = array_ops.size(value, out_type=dtypes.int64)\n        num_nonzero = tf_cond.cond(size <= dtypes.int32.max, true_fn=lambda: math_ops.cast(_count_nonzero(value, dtype=dtypes.int32), dtype=dtypes.int64), false_fn=lambda: _count_nonzero(value, dtype=dtypes.int64))\n        with ops.name_scope('counts_to_fraction'):\n            num_zero = size - num_nonzero\n            num_zero_float32 = math_ops.cast(num_zero, dtype=dtypes.float32)\n            size_float32 = math_ops.cast(size, dtype=dtypes.float32)\n            zero_fraction_float32 = num_zero_float32 / size_float32\n        return array_ops.identity(zero_fraction_float32, 'fraction')", "docstring": "Returns the fraction of zeros in `value`.\n\nIf `value` is empty, the result is `nan`.\n\nThis is useful in summaries to measure and report sparsity.  For example,\n\n```python\nz = tf.nn.relu(...)\nsumm = tf.compat.v1.summary.scalar('sparsity', tf.nn.zero_fraction(z))\n```\n\nArgs:\nvalue: A tensor of numeric type.\nname: A name for the operation (optional).\n\nReturns:\nThe fraction of zeros in `value`, with type `float32`.", "source": "github-repos"}
{"code": "def extract_resources_from_bundle(bundle: message.Message, *, resource_type: Type[_T]) -> List[_T]:\n    if not fhir_types.is_type_or_profile_of('http:\n        raise TypeError(f'{bundle.DESCRIPTOR.name} is not a type or profile of Bundle.')\n    contained_resource_field = path_utils.camel_case_to_snake_case(resource_type.DESCRIPTOR.name)\n    return [getattr(entry.resource, contained_resource_field) for entry in cast(Any, bundle).entry if entry.resource.HasField(contained_resource_field)]", "docstring": "Returns a list of resources of type `resource_type` from `bundle`.\n\nArgs:\nbundle: The FHIR Bundle to examine.\nresource_type: The message type of the resource to return.\n\nReturns:\nA list of resources of type `resource_type` belonging to the bundle.\n\nRaises:\nTypeError: In the event that `bundle` is not of type \"Bundle\".\nValueError: In the event that a field corresponding to the \"snake_case\" name\nof `resource_type` does not exist on `Bundle.Entry`.", "source": "github-repos"}
{"code": "def __init__(self, statistics: calib_stats_pb2.CalibrationStatistics, calib_opts: stablehlo_quant_config_pb2.CalibrationOptions):\n    super().__init__(statistics, calib_opts)\n    hist_stats = statistics.histogram_statistics\n    self._bin_width = hist_stats.bin_width\n    self._lower_bound = hist_stats.lower_bound\n    self._hist_freq = np.array(hist_stats.hist_freq)\n    self._num_bins = len(self._hist_freq)\n    self._num_bits = 8\n    first_mid = self._lower_bound + self._bin_width / 2\n    last_mid = first_mid + (self._num_bins - 1) * self._bin_width\n    self._hist_mids = np.linspace(first_mid, last_mid, self._num_bins)", "docstring": "Builds histogram using statistics.histogram_statistics.\n\nlower_bound                                    hist_mid\nv                                            v\n|=========|=========|=========|=========|=========|\nbin width\n\nArgs:\nstatistics: Collected calibration statistics.\ncalib_opts: Calibration options used for calculating min and max.", "source": "github-repos"}
{"code": "def fixup_for_packaged():\n    if exists(join(ROOT, 'PKG-INFO')):\n        if (('--build-js' in sys.argv) or ('--install-js' in sys.argv)):\n            print(SDIST_BUILD_WARNING)\n            if ('--build-js' in sys.argv):\n                sys.argv.remove('--build-js')\n            if ('--install-js' in sys.argv):\n                sys.argv.remove('--install-js')\n        if ('--existing-js' not in sys.argv):\n            sys.argv.append('--existing-js')", "docstring": "If we are installing FROM an sdist, then a pre-built BokehJS is\nalready installed in the python source tree.\n\nThe command line options ``--build-js`` or ``--install-js`` are\nremoved from ``sys.argv``, with a warning.\n\nAlso adds ``--existing-js`` to ``sys.argv`` to signal that BokehJS is\nalready packaged.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def get_parent_of_type(typ, obj):\n    \n    if type(typ) is not text:\n        typ = typ.__name__\n\n    while hasattr(obj, 'parent'):\n        obj = obj.parent\n        if obj.__class__.__name__ == typ:\n            return obj", "docstring": "Finds first object up the parent chain of the given type.\nIf no parent of the given type exists None is returned.\n\nArgs:\ntyp(str or python class): The type of the model object we are\nlooking for.\nobj (model object): Python model object which is the start of the\nsearch process.", "source": "juraj-google-style"}
{"code": "def _common_args(self):\n    return {'metadata': self._metadata.SerializeToString(), 'output_shapes': self._flat_shapes, 'output_types': self._flat_types}", "docstring": "Helper for generating arguments that are common across most dataset ops.\n\nMost dataset op constructors expect `output_shapes` and `output_types`\narguments that represent the flattened structure of an element, as well as a\n`metadata` argument for additional metadata such as user-defined dataset\nname. This helper function generates common attributes as a keyword argument\ndictionary, allowing `Dataset._variant_tensor` implementations to pass\n`**self._common_args` to the op constructor.\n\nReturns:\nA dictionary of keyword arguments that can be passed to a dataset op\nconstructor.", "source": "github-repos"}
{"code": "def config_from_url(u, **kwargs):\n\n    \n\n    path = u.path.lstrip(\"/\").split(\"/\")\n    if len(path) > 2 or not path:\n        raise AssertionError(\"zmq url format: zmq:\n\n    typ = path[0].upper()\n\n    try:\n        topic = path[1]\n    except IndexError as _:\n        topic = ''\n\n    param = dict(urllib.parse.parse_qsl(u.query))\n\n    \n    transport = param.get(\"transport\", \"tcp\")\n\n    _id = \"%s-%s-%s-%s\" % (typ, topic, transport, u.netloc)\n    if kwargs.get(\"prefix\") is not None:\n        _id = \"%s-%s\" % (kwargs.get(\"prefix\"), _id)\n\n    return {\n        \"id\" : _id,\n        \"typ_str\" : typ,\n        \"typ\" : getattr(zmq, typ),\n        \"topic\" : topic,\n        \"transport\" : transport,\n        \"url\" : \"%s:\n    }", "docstring": "Returns dict containing zmq configuration arguments\nparsed from xbahn url\n\nArguments:\n\n- u (urlparse.urlparse result)\n\nReturns:\n\ndict:\n- id (str): connection index key\n- typ_str (str): string representation of zmq socket type\n- typ (int): zmq socket type (PUB, SUB, REQ, REP, PUSH, PULL)\n- topic (str): subscription topic\n- url (str): url to use with zmq's bind function", "source": "juraj-google-style"}
{"code": "def broadcast_to(input: ragged_tensor.RaggedOrDense, shape: dynamic_ragged_shape.DynamicRaggedShape) -> Union[ragged_tensor.RaggedTensor, tensor_lib.Tensor]:\n    return dynamic_ragged_shape.broadcast_to(input, shape)", "docstring": "Broadcasts a potentially ragged tensor to a ragged shape.\n\nTiles `input` as necessary to match the given shape.\n\nBehavior is undefined if `input` is not broadcast-compatible with `shape`.\n\nArgs:\ninput: The potentially ragged tensor to broadcast.\nshape: A `DynamicRaggedShape`\n\nReturns:\nA potentially ragged tensor whose values are taken from\n`input`, and whose shape matches `shape`.", "source": "github-repos"}
{"code": "def convert_inference_tf_type_to_tflite_type(tf_type: dtypes.DType, usage: str='') -> _types_pb2.IODataType:\n    mapping = {dtypes.float32: _types_pb2.FLOAT, dtypes.uint8: _types_pb2.QUANTIZED_UINT8, dtypes.int8: _types_pb2.QUANTIZED_INT8, dtypes.int16: _types_pb2.QUANTIZED_INT16}\n    tflite_type = mapping.get(tf_type)\n    if tflite_type is None:\n        raise ValueError('Unsupported TensorFlow type `{0}` provided for the {1}'.format(tf_type, usage))\n    return tflite_type", "docstring": "Convert inference type from tf type to tflite type.\n\nArgs:\ntf_type: TensorFlow type.\nusage: Text describing the reason for invoking this function.\n\nRaises:\nValueError: If `tf_type` is unsupported.\n\nReturns:\ntflite_type: TFLite type. Refer to compiler/mlir/lite/types.proto.", "source": "github-repos"}
{"code": "def set_global(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs):\n    cls.user_agent = cls._create(user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs)", "docstring": "Set global user agent string\n\nArgs:\nuser_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed.\nuser_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml.\nuser_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def list_knowledge_bases(project_id):\n    import dialogflow_v2beta1 as dialogflow\n    client = dialogflow.KnowledgeBasesClient()\n    project_path = client.project_path(project_id)\n    print('Knowledge Bases for: {}'.format(project_id))\n    for knowledge_base in client.list_knowledge_bases(project_path):\n        print(' - Display Name: {}'.format(knowledge_base.display_name))\n        print(' - Knowledge ID: {}\\n'.format(knowledge_base.name))", "docstring": "Lists the Knowledge bases belonging to a project.\n\nArgs:\nproject_id: The GCP project linked with the agent.", "source": "codesearchnet"}
{"code": "def db_en020(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `db_en020`'.format(value))\n\n        self._db_en020 = value", "docstring": "Corresponds to IDD Field `db_en020`\nmean coincident dry-bulb temperature to\nEnthalpy corresponding to 2.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `db_en020`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def apply_regression(input_, regression_fn, target, regression_args=(), regression_kwargs=None, name=PROVIDED, loss_weight=None, per_example_weights=None):\n    if (regression_kwargs is None):\n        regression_kwargs = {}\n    if ((name is not None) and ('name' not in regression_kwargs)):\n        regression_kwargs['name'] = name\n    elif (name is None):\n        name = input_.tensor.op.name\n    tensor = input_.tensor\n    loss = regression_fn(tensor, target, *regression_args, **regression_kwargs)\n    if (loss_weight is not None):\n        loss *= loss_weight\n    if (per_example_weights is not None):\n        per_example_weights = _convert_and_assert_per_example_weights_compatible(input_, per_example_weights, dtype=loss.dtype)\n        loss *= per_example_weights\n    if (name is None):\n        name = loss.op.name\n    if (tensor.get_shape()[0].value is not None):\n        avg_loss = (tf.reduce_sum(loss) / tensor.get_shape()[0].value)\n    else:\n        avg_loss = tf.reduce_mean(loss)\n    return input_.add_loss(avg_loss, name=name)", "docstring": "Applies the given regression and adds the loss to the bookkeeper.\n\nThis does not change tensor.\nArgs:\ninput_: A Tensor or a Pretty Tensor holding the input.\nregression_fn: A function that takes (in order) tensor, labels.\ntarget: The targe of the regression.\nregression_args: Other arguments for the regression.\nregression_kwargs: Keyword args for the regression.\nname: The name, also added to regression_kwargs.\nloss_weight: A scalar multiplier for the loss.\nper_example_weights: A Tensor with a weight per example.\nReturns:\nThe loss tensor's name.\nRaises:\nValueError: If the target is not a compatible shape with input_.", "source": "codesearchnet"}
{"code": "def _GetParsersFromPresetCategory(cls, category):\n    preset_definition = cls._presets.GetPresetByName(category)\n    if (preset_definition is None):\n        return []\n    preset_names = cls._presets.GetNames()\n    parser_names = set()\n    for element_name in preset_definition.parsers:\n        if (element_name in preset_names):\n            category_parser_names = cls._GetParsersFromPresetCategory(element_name)\n            parser_names.update(category_parser_names)\n        else:\n            parser_names.add(element_name)\n    return sorted(parser_names)", "docstring": "Retrieves the parser names of specific preset category.\n\nArgs:\ncategory (str): parser preset categories.\n\nReturns:\nlist[str]: parser names in alphabetical order.", "source": "codesearchnet"}
{"code": "def flat_values(self):\n    rt_values = self.values\n    while isinstance(rt_values, RaggedTensor):\n        rt_values = rt_values.values\n    return rt_values", "docstring": "The innermost `values` tensor for this ragged tensor.\n\nConcretely, if `rt.values` is a `Tensor`, then `rt.flat_values` is\n`rt.values`; otherwise, `rt.flat_values` is `rt.values.flat_values`.\n\nConceptually, `flat_values` is the tensor formed by flattening the\noutermost dimension and all of the ragged dimensions into a single\ndimension.\n\n`rt.flat_values.shape = [nvals] + rt.shape[rt.ragged_rank + 1:]`\n(where `nvals` is the number of items in the flattened dimensions).\n\nReturns:\nA `Tensor`.\n\n#### Example:\n\n>>> rt = tf.ragged.constant([[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])\n>>> print(rt.flat_values)\ntf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)", "source": "github-repos"}
{"code": "def __register_notifiers(self):\n    notifiers = {}\n    for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.notifiers']['plugins']:\n        cls = entry_point.load()\n        notifiers[cls.notifier_type] = cls.validation\n    return notifiers", "docstring": "Lists all notifiers to be able to provide metadata for the frontend\n\nReturns:\n`list` of `dict`", "source": "codesearchnet"}
{"code": "def __init__(self, connections, picker_class=RoundRobinPicker):\n        \n        self.connections = connections\n        self.picker = picker_class()", "docstring": "Initializes a :class:`~bigchaindb_driver.pool.Pool` instance.\n\nArgs:\nconnections (list): List of\n:class:`~bigchaindb_driver.connection.Connection` instances.", "source": "juraj-google-style"}
{"code": "def connection_delay(self, node_id):\n        \n        conn = self._conns.get(node_id)\n        if conn is None:\n            return 0\n        return conn.connection_delay()", "docstring": "Return the number of milliseconds to wait, based on the connection\nstate, before attempting to send data. When disconnected, this respects\nthe reconnect backoff time. When connecting, returns 0 to allow\nnon-blocking connect to finish. When connected, returns a very large\nnumber to handle slow/stalled connections.\n\nArguments:\nnode_id (int): The id of the node to check\n\nReturns:\nint: The number of milliseconds to wait.", "source": "juraj-google-style"}
{"code": "def get(self, profile_id):\n        \n        if profile_id not in self._profiles:\n            try:\n                self._profiles[profile_id] = self._get_profile(profile_id)\n            except (ValueError,\n                    IOError) as e:\n                six.raise_from(RegistryError(e), e)\n        return self._profiles[profile_id]", "docstring": "Returns the profile with the received ID as a dict\n\nIf a local copy of the profile exists, it'll be returned. If not, it'll\nbe downloaded from the web. The results are cached, so any subsequent\ncalls won't hit the filesystem or the web.\n\nArgs:\nprofile_id (str): The ID of the profile you want.\n\nRaises:\nRegistryError: If there was some problem opening the profile file\nor its format was incorrect.", "source": "juraj-google-style"}
{"code": "def run(self, group_x=1, group_y=1, group_z=1) -> None:\n        \n\n        return self.mglo.run(group_x, group_y, group_z)", "docstring": "Run the compute shader.\n\nArgs:\ngroup_x (int): The number of work groups to be launched in the X dimension.\ngroup_y (int): The number of work groups to be launched in the Y dimension.\ngroup_z (int): The number of work groups to be launched in the Z dimension.", "source": "juraj-google-style"}
{"code": "def _colourise(text: str, colour: str) -> str:\n    \n    if COLOUR:\n        text = style(text, fg=colour, bold=True)\n    return text", "docstring": "Colour text, if possible.\n\nArgs:\ntext: Text to colourise\ncolour: Colour to display text in\nReturns:\nColourised text, if possible", "source": "juraj-google-style"}
{"code": "def connect(self, component):\n        \n        if not isinstance(component, ThreadPool):\n            raise TypeError('\"component\" must be a ThreadPool object')\n        component.in_queue = self.out_queue\n        return component", "docstring": "Connect two ThreadPools.\n\nThe ``in_queue`` of the second pool will be set as the ``out_queue`` of\nthe current pool, thus all the output will be input to the second pool.\n\nArgs:\ncomponent (ThreadPool): the ThreadPool to be connected.\nReturns:\nThreadPool: the modified second ThreadPool.", "source": "juraj-google-style"}
{"code": "def add_logger(self, name, address, conn_type, log_dir_path=None, **kwargs):\n    capture_handler_conf = kwargs\n    if (not log_dir_path):\n        log_dir_path = self._mngr_conf['root_log_directory']\n    log_dir_path = os.path.normpath(os.path.expanduser(log_dir_path))\n    capture_handler_conf['log_dir'] = log_dir_path\n    capture_handler_conf['name'] = name\n    if ('rotate_log' not in capture_handler_conf):\n        capture_handler_conf['rotate_log'] = True\n    transforms = []\n    if ('pre_write_transforms' in capture_handler_conf):\n        for transform in capture_handler_conf['pre_write_transforms']:\n            if isinstance(transform, str):\n                if globals().has_key(transform):\n                    transforms.append(globals().get(transform))\n                else:\n                    msg = 'Unable to load data transformation \"{}\" for handler \"{}\"'.format(transform, capture_handler_conf['name'])\n                    log.warn(msg)\n            elif hasattr(transform, '__call__'):\n                transforms.append(transform)\n            else:\n                msg = 'Unable to determine how to load data transform \"{}\"'.format(transform)\n                log.warn(msg)\n    capture_handler_conf['pre_write_transforms'] = transforms\n    address_key = str(address)\n    if (address_key in self._stream_capturers):\n        capturer = self._stream_capturers[address_key][0]\n        capturer.add_handler(capture_handler_conf)\n        return\n    socket_logger = SocketStreamCapturer(capture_handler_conf, address, conn_type)\n    greenlet = gevent.spawn(socket_logger.socket_monitor_loop)\n    self._stream_capturers[address_key] = (socket_logger, greenlet)\n    self._pool.add(greenlet)", "docstring": "Add a new stream capturer to the manager.\n\nAdd a new stream capturer to the manager with the provided configuration\ndetails. If an existing capturer is monitoring the same address the\nnew handler will be added to it.\n\nArgs:\nname:\nA string defining the new capturer's name.\n\naddress:\nA tuple containing address data for the capturer. Check the\n:class:`SocketStreamCapturer` documentation for what is\nrequired.\n\nconn_type:\nA string defining the connection type. Check the\n:class:`SocketStreamCapturer` documentation for a list of valid\noptions.\n\nlog_dir_path:\nAn optional path defining the directory where the\ncapturer should write its files. If this isn't provided the root\nlog directory from the manager configuration is used.", "source": "codesearchnet"}
{"code": "def with_rank_at_least(self, rank):\n    if ((self.ndims is not None) and (self.ndims < rank)):\n        raise ValueError(('Shape %s must have rank at least %d' % (self, rank)))\n    else:\n        return self", "docstring": "Returns a shape based on `self` with at least the given rank.\n\nArgs:\nrank: An integer.\n\nReturns:\nA shape that is at least as specific as `self` with at least the given\nrank.\n\nRaises:\nValueError: If `self` does not represent a shape with at least the given\n`rank`.", "source": "codesearchnet"}
{"code": "def _make_estimator_serving_session(estimator, serving_input_fn, checkpoint_path):\n    with tf.Graph().as_default() as g:\n        mode = tf_v1.estimator.ModeKeys.PREDICT\n        tf_v1.train.create_global_step(g)\n        tf_v1.set_random_seed(estimator.config.tf_random_seed)\n        serving_input_receiver = serving_input_fn()\n        estimator_spec = estimator.model_fn(features=serving_input_receiver.features, labels=None, mode=mode, config=estimator.config)\n        session = tf_v1.Session(config=estimator._session_config)\n        with session.as_default():\n            saver_for_restore = (estimator_spec.scaffold.saver or tf_v1.train.Saver(sharded=True))\n            saver_for_restore.restore(session, checkpoint_path)\n        return session", "docstring": "Returns a session constructed using `estimator` and `serving_input_fn`.\n\nThe Estimator API does not provide an API to construct a graph and session,\nmaking it necessary for this function to replicate how an estimator builds\na graph.\n\nThis code is based on `Estimator.export_savedmodel` (another function that\nhas to replicate how an estimator builds a graph).\n\nArgs:\nestimator: tf.Estimator to use when constructing the session.\nserving_input_fn: A function that takes no arguments and returns a\n`ServingInputReceiver`. It is used to construct the session.\ncheckpoint_path: The checkpoint path to restore in the session. Must not\nbe None.", "source": "codesearchnet"}
{"code": "def median(data):\n    ordered = sorted(data)\n    length = len(ordered)\n    if ((length % 2) == 0):\n        return ((ordered[(math.floor((length / 2)) - 1)] + ordered[math.floor((length / 2))]) / 2.0)\n    elif ((length % 2) != 0):\n        return ordered[math.floor((length / 2))]", "docstring": "Calculates  the median of a list of integers or floating point numbers.\n\nArgs:\ndata: A list of integers or floating point numbers\n\nReturns:\nSorts the list numerically and returns the middle number if the list has an odd number\nof items. If the list contains an even number of items the mean of the two middle numbers\nis returned.", "source": "codesearchnet"}
{"code": "def reset_port_protection(self, id_or_uri, timeout=-1):\n        \n        uri = self._client.build_uri(id_or_uri) + \"/resetportprotection\"\n        return self._client.update_with_zero_body(uri, timeout)", "docstring": "Triggers a reset of port protection.\n\nCause port protection to be reset on all the interconnects of the logical interconnect that matches ID.\n\nArgs:\nid_or_uri: Can be either the interconnect id or the interconnect uri.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: The interconnect.", "source": "juraj-google-style"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    cls = [self.cls_token_id]\n    sep = [self.sep_token_id]\n    return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A Big Bird sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def ungzip(file_path, extract_path: Path = None):\n    \n    CHUNK = 16 * 1024\n    file_path = Path(file_path)\n    extract_path = extract_path or file_path.with_suffix('')\n\n    with gzip.open(file_path, 'rb') as fin, extract_path.open('wb') as fout:\n        while True:\n            block = fin.read(CHUNK)\n            if not block:\n                break\n            fout.write(block)", "docstring": "Simple .gz archive extractor\n\nArgs:\nfile_path: path to the gzip file to be extracted\nextract_path: path where the file will be extracted", "source": "juraj-google-style"}
{"code": "def _illegal_character(c, ctx, message=''):\n    container_type = (((ctx.container.ion_type is None) and 'top-level') or ctx.container.ion_type.name)\n    value_type = (((ctx.ion_type is None) and 'unknown') or ctx.ion_type.name)\n    if (c is None):\n        header = 'Illegal token'\n    else:\n        c = ('EOF' if BufferQueue.is_eof(c) else _chr(c))\n        header = ('Illegal character %s' % (c,))\n    raise IonException(('%s at position %d in %s value contained in %s. %s Pending value: %s' % (header, ctx.queue.position, value_type, container_type, message, ctx.value)))", "docstring": "Raises an IonException upon encountering the given illegal character in the given context.\n\nArgs:\nc (int|None): Ordinal of the illegal character.\nctx (_HandlerContext):  Context in which the illegal character was encountered.\nmessage (Optional[str]): Additional information, as necessary.", "source": "codesearchnet"}
{"code": "def ctype_to_dtype(cl_type, mot_float_type='float'):\n    if is_vector_ctype(cl_type):\n        (raw_type, vector_length) = split_vector_ctype(cl_type)\n        if (raw_type == 'mot_float_type'):\n            if is_vector_ctype(mot_float_type):\n                (raw_type, _) = split_vector_ctype(mot_float_type)\n            else:\n                raw_type = mot_float_type\n        vector_type = (raw_type + str(vector_length))\n        return getattr(cl_array.vec, vector_type)\n    else:\n        if (cl_type == 'mot_float_type'):\n            cl_type = mot_float_type\n        data_types = [('char', np.int8), ('uchar', np.uint8), ('short', np.int16), ('ushort', np.uint16), ('int', np.int32), ('uint', np.uint32), ('long', np.int64), ('ulong', np.uint64), ('float', np.float32), ('double', np.float64)]\n        for (ctype, dtype) in data_types:\n            if (ctype == cl_type):\n                return dtype", "docstring": "Get the numpy dtype of the given cl_type string.\n\nArgs:\ncl_type (str): the CL data type to match, for example 'float' or 'float4'.\nmot_float_type (str): the C name of the ``mot_float_type``. The dtype will be looked up recursively.\n\nReturns:\ndtype: the numpy datatype", "source": "codesearchnet"}
{"code": "def decode_body(headers: MutableMapping, body: bytes) -> dict:\n    \n\n    type_, encoding = parse_content_type(headers)\n    decoded_body = body.decode(encoding)\n\n    \n    if type_ == \"application/json\":\n        payload = json.loads(decoded_body)\n    else:\n        if decoded_body == \"ok\":\n            payload = {\"ok\": True}\n        else:\n            payload = {\"ok\": False, \"data\": decoded_body}\n\n    return payload", "docstring": "Decode the response body\n\nFor 'application/json' content-type load the body as a dictionary\n\nArgs:\nheaders: Response headers\nbody: Response body\n\nReturns:\ndecoded body", "source": "juraj-google-style"}
{"code": "def top_prior(name, z_shape, learn_prior='normal', temperature=1.0):\n    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n        h = tf.zeros(z_shape, dtype=tf.float32)\n        if (learn_prior == 'normal'):\n            prior_dist = tfp.distributions.Normal(h, tf.exp(h))\n        elif (learn_prior == 'single_conv'):\n            prior_dist = single_conv_dist('top_learn_prior', h)\n        else:\n            raise ValueError(('Expected learn_prior to be normal or single_conv got %s' % learn_prior))\n        return TemperedNormal(prior_dist.loc, prior_dist.scale, temperature)", "docstring": "Unconditional prior distribution.\n\nArgs:\nname: variable scope\nz_shape: Shape of the mean / scale of the prior distribution.\nlearn_prior: Possible options are \"normal\" and \"single_conv\".\nIf set to \"single_conv\", the gaussian is parametrized by a\nsingle convolutional layer whose input are an array of zeros\nand initialized such that the mean and std are zero and one.\nIf set to \"normal\", the prior is just a Gaussian with zero\nmean and unit variance.\ntemperature: Temperature with which to sample from the Gaussian.\nReturns:\nobjective: 1-D Tensor shape=(batch_size,) summed across spatial components.\nRaises:\nValueError: If learn_prior not in \"normal\" or \"single_conv\"", "source": "codesearchnet"}
{"code": "def main(argv):\n    parser = _BuildParser()\n    args = parser.parse_args(argv[1:])\n    style_config = args.style\n    if args.style_help:\n        _PrintHelp(args)\n        return 0\n    if args.lines and len(args.files) > 1:\n        parser.error('cannot use -l/--lines with more than one file')\n    lines = _GetLines(args.lines) if args.lines is not None else None\n    if not args.files:\n        if args.in_place or args.diff:\n            parser.error('cannot use --in-place or --diff flags when reading from stdin')\n        original_source = []\n        while True:\n            if hasattr(sys.stdin, 'closed') and sys.stdin.closed:\n                break\n            try:\n                original_source.append(_raw_input())\n            except EOFError:\n                break\n            except KeyboardInterrupt:\n                return 1\n        if style_config is None and (not args.no_local_style):\n            style_config = file_resources.GetDefaultStyleForDir(os.getcwd())\n        source = [line.rstrip() for line in original_source]\n        source[0] = _removeBOM(source[0])\n        try:\n            reformatted_source, _ = yapf_api.FormatCode(str('\\n'.join(source).replace('\\r\\n', '\\n') + '\\n'), filename='<stdin>', style_config=style_config, lines=lines)\n        except errors.YapfError:\n            raise\n        except Exception as e:\n            raise errors.YapfError(errors.FormatErrorMsg(e))\n        file_resources.WriteReformattedCode('<stdout>', reformatted_source)\n        return 0\n    exclude_patterns_from_ignore_file = file_resources.GetExcludePatternsForDir(os.getcwd())\n    files = file_resources.GetCommandLineFiles(args.files, args.recursive, (args.exclude or []) + exclude_patterns_from_ignore_file)\n    if not files:\n        raise errors.YapfError('input filenames did not match any python files')\n    changed = FormatFiles(files, lines, style_config=args.style, no_local_style=args.no_local_style, in_place=args.in_place, print_diff=args.diff, parallel=args.parallel, quiet=args.quiet, verbose=args.verbose, print_modified=args.print_modified)\n    return 1 if changed and (args.diff or args.quiet) else 0", "docstring": "Main program.\n\nArguments:\nargv: command-line arguments, such as sys.argv (including the program name\nin argv[0]).\n\nReturns:\nZero on successful program termination, non-zero otherwise.\nWith --diff: zero if there were no changes, non-zero otherwise.\n\nRaises:\nYapfError: if none of the supplied files were Python files.", "source": "github-repos"}
{"code": "def update_pos(self, pos_id, name, pos_type, location=None):\n        \n        arguments = {'name': name,\n                     'type': pos_type,\n                     'location': location}\n        return self.do_req('PUT',\n                           self.merchant_api_base_url + '/pos/' +\n                           pos_id + '/', arguments)", "docstring": "Update POS resource. Returns the raw response object.\n\nArguments:\npos_id:\nPOS id as chosen on registration\nname:\nHuman-readable name of the POS, used for displaying payment\nrequest origin to end user\npos_type:\nPOS type\nlocation:\nMerchant location", "source": "juraj-google-style"}
{"code": "def defaultannotator(self, annotationtype, set=None):\n    if (inspect.isclass(annotationtype) or isinstance(annotationtype, AbstractElement)):\n        annotationtype = annotationtype.ANNOTATIONTYPE\n    if (not set):\n        set = self.defaultset(annotationtype)\n    try:\n        return self.annotationdefaults[annotationtype][set]['annotator']\n    except KeyError:\n        raise NoDefaultError", "docstring": "Obtain the default annotator for the specified annotation type and set.\n\nArguments:\nannotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.\nset (str): the set, should formally be a URL pointing to the set definition\n\nReturns:\nthe set (str)\n\nRaises:\n:class:`NoDefaultError` if the annotation type does not exist or if there is ambiguity (multiple sets for the same type)", "source": "codesearchnet"}
{"code": "def from_args_and_dict(cls, args, processor_dict: dict[str, Any], **kwargs):\n    processor_dict = processor_dict.copy()\n    return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)\n    if 'processor_class' in processor_dict:\n        del processor_dict['processor_class']\n    if 'auto_map' in processor_dict:\n        del processor_dict['auto_map']\n    processor_dict.update(kwargs)\n    accepted_args_and_kwargs = cls.__init__.__code__.co_varnames[:cls.__init__.__code__.co_argcount][1:]\n    unused_kwargs, valid_kwargs = cls.validate_init_kwargs(processor_config=processor_dict, valid_kwargs=accepted_args_and_kwargs)\n    args_to_remove = [i for i, arg in enumerate(accepted_args_and_kwargs) if arg in processor_dict]\n    args = [arg for i, arg in enumerate(args) if i not in args_to_remove]\n    processor = cls(*args, **valid_kwargs)\n    logger.info(f'Processor {processor}')\n    if return_unused_kwargs:\n        return (processor, unused_kwargs)\n    else:\n        return processor", "docstring": "Instantiates a type of [`~processing_utils.ProcessingMixin`] from a Python dictionary of parameters.\n\nArgs:\nprocessor_dict (`Dict[str, Any]`):\nDictionary that will be used to instantiate the processor object. Such a dictionary can be\nretrieved from a pretrained checkpoint by leveraging the\n[`~processing_utils.ProcessingMixin.to_dict`] method.\nkwargs (`Dict[str, Any]`):\nAdditional parameters from which to initialize the processor object.\n\nReturns:\n[`~processing_utils.ProcessingMixin`]: The processor object instantiated from those\nparameters.", "source": "github-repos"}
{"code": "def lines(start=None, end=None, reverse=False, selection=False):\n    if selection:\n        (start, end) = get_selection()\n    else:\n        (start, end) = fix_addresses(start, end)\n    if (not reverse):\n        item = idaapi.get_item_head(start)\n        while (item < end):\n            (yield Line(item))\n            item += idaapi.get_item_size(item)\n    else:\n        item = idaapi.get_item_head((end - 1))\n        while (item >= start):\n            (yield Line(item))\n            item = idaapi.get_item_head((item - 1))", "docstring": "Iterate lines in range.\n\nArgs:\nstart: Starting address, start of IDB if `None`.\nend: End address, end of IDB if `None`.\nreverse: Set to true to iterate in reverse order.\nselection: If set to True, replaces start and end with current selection.\n\nReturns:\niterator of `Line` objects.", "source": "codesearchnet"}
{"code": "def transfer(self, payment_id, data={}, **kwargs):\n    url = '{}/{}/transfers'.format(self.base_url, payment_id)\n    return self.post_url(url, data, **kwargs)", "docstring": "Create Transfer for given Payment Id\n\nArgs:\npayment_id : Id for which payment object has to be transfered\n\nReturns:\nPayment dict after getting transfered", "source": "codesearchnet"}
{"code": "def _gradient(self, diff, d, coords):\n        \n        denom = np.copy(d)\n        denom[denom == 0] = 1e-5\n\n        with np.errstate(divide='ignore', invalid='ignore'):\n            K = -2 * diff / denom\n\n        K[np.isnan(K)] = 0\n\n        g = np.empty_like(coords)\n        for n in range(self.n):\n            for i in range(self.m):\n                \n                \n                \n                \n                g[i, n] = ((coords[i, n] - coords[:, n]) * K[i, :]).sum()\n\n        return g", "docstring": "Compute the gradient.\n\nArgs:\ndiff (`array-like`): [`m`, `m`] matrix. `D` - `d`\nd (`array-like`): [`m`, `m`] matrix.\ncoords (`array-like`): [`m`, `n`] matrix.\n\nReturns:\n`np.array`: Gradient, shape [`m`, `n`].", "source": "juraj-google-style"}
{"code": "def capture(self, data_buffer=None, log_time=False, debug_print=False, retry_reset=True):\n    start = time.time()\n    if (data_buffer is None):\n        data_buffer = np.ndarray((Lepton.ROWS, Lepton.COLS, 1), dtype=np.uint16)\n    elif ((data_buffer.ndim < 2) or (data_buffer.shape[0] < Lepton.ROWS) or (data_buffer.shape[1] < Lepton.COLS) or (data_buffer.itemsize < 2)):\n        raise Exception('Provided input array not large enough')\n    while True:\n        Lepton.capture_segment(self.__handle, self.__xmit_buf, self.__msg_size, self.__capture_buf[0])\n        if (retry_reset and ((self.__capture_buf[(20, 0)] & 65295) != 5120)):\n            if debug_print:\n                print('Garbage frame number reset waiting...')\n            time.sleep(0.185)\n        else:\n            break\n    self.__capture_buf.byteswap(True)\n    data_buffer[(:, :)] = self.__capture_buf[(:, 2:)]\n    end = time.time()\n    if debug_print:\n        print('---')\n        for i in range(Lepton.ROWS):\n            fid = self.__capture_buf[(i, 0, 0)]\n            crc = self.__capture_buf[(i, 1, 0)]\n            fnum = (fid & 4095)\n            print('0x{0:04x} 0x{1:04x} : Row {2:2} : crc={1}'.format(fid, crc, fnum))\n        print('---')\n    if log_time:\n        print('frame processed int {0}s, {1}hz'.format((end - start), (1.0 / (end - start))))\n    return (data_buffer, data_buffer.sum())", "docstring": "Capture a frame of data.\n\nCaptures 80x60 uint16 array of non-normalized (raw 12-bit) data. Returns that frame and a frame_id (which\nis currently just the sum of all pixels). The Lepton will return multiple, identical frames at a rate of up\nto ~27 Hz, with unique frames at only ~9 Hz, so the frame_id can help you from doing additional work\nprocessing duplicate frames.\n\nArgs:\ndata_buffer (numpy.ndarray): Optional. If specified, should be ``(60,80,1)`` with `dtype`=``numpy.uint16``.\n\nReturns:\ntuple consisting of (data_buffer, frame_id)", "source": "codesearchnet"}
{"code": "def are_equal(self, sp1, sp2):\n    set1 = set(sp1.elements)\n    set2 = set(sp2.elements)\n    return (set1.issubset(set2) or set2.issubset(set1))", "docstring": "True if there is some overlap in composition between the species\n\nArgs:\nsp1: First species. A dict of {specie/element: amt} as per the\ndefinition in Site and PeriodicSite.\nsp2: Second species. A dict of {specie/element: amt} as per the\ndefinition in Site and PeriodicSite.\n\nReturns:\nTrue always", "source": "codesearchnet"}
{"code": "def open(self, mode='r', buffering=(- 1), encoding=None, errors=None, newline=None):\n    if self._closed:\n        self._raise_closed()\n    return FakeFileOpen(self.filesystem, use_io=True)(self._path(), mode, buffering, encoding, errors, newline)", "docstring": "Open the file pointed by this path and return a fake file object.\n\nRaises:\nIOError: if the target object is a directory, the path is invalid\nor permission is denied.", "source": "codesearchnet"}
{"code": "def assert_title(self, title, **kwargs):\n        \n\n        query = TitleQuery(title, **kwargs)\n\n        @self.synchronize(wait=query.wait)\n        def assert_title():\n            if not query.resolves_for(self):\n                raise ExpectationNotMet(query.failure_message)\n\n            return True\n\n        return assert_title()", "docstring": "Asserts that the page has the given title.\n\nArgs:\ntitle (str | RegexObject): The string or regex that the title should match.\n**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.\n\nReturns:\nTrue\n\nRaises:\nExpectationNotMet: If the assertion hasn't succeeded during the wait time.", "source": "juraj-google-style"}
{"code": "def _min_max_filter(t: List, min_v: str, max_v: str) -> bool:\n        \n\n        def tofloat(value):\n            try:\n                float(value)\n                return float(value)\n            except ValueError:\n                return False\n\n        for a_token in t:\n            if not tofloat(a_token.text):\n                return False\n            else:\n                if min_v and tofloat(min_v):\n                    this_v = tofloat(a_token.text)\n                    if this_v < tofloat(min_v):\n                        return False\n                if max_v and tofloat(max_v):\n                    this_v = tofloat(a_token.text)\n                    if this_v > tofloat(max_v):\n                        return False\n\n        return True", "docstring": "Min and Max filter\nArgs:\nt: List, list of tokens\nmin_v: str\nmax_v: str\n\nReturns: bool", "source": "juraj-google-style"}
{"code": "def compress_multiple_pdfs(source_directory, output_directory, ghostscript_binary):\n    source_paths = _get_pdf_filenames_at(source_directory)\n    (yield len(source_paths))\n    for source_path in source_paths:\n        output = os.path.join(output_directory, os.path.basename(source_path))\n        compress_pdf(source_path, output, ghostscript_binary)\n        (yield output)", "docstring": "Compress all PDF files in the current directory and place the output in the\ngiven output directory. This is a generator function that first yields the amount\nof files to be compressed, and then yields the output path of each file.\n\nArgs:\nsource_directory (str): Filepath to the source directory.\noutput_directory (str): Filepath to the output directory.\nghostscript_binary (str): Name of the Ghostscript binary.\n\nReturns:\nlist(str): paths to outputs.", "source": "codesearchnet"}
{"code": "def mean_minimum_centroid_distance(item_a, item_b, max_value):\n    centroids_a = np.array([item_a.center_of_mass(t) for t in item_a.times])\n    centroids_b = np.array([item_b.center_of_mass(t) for t in item_b.times])\n    distance_matrix = (((centroids_a[(:, 0:1)] - centroids_b.T[0:1]) ** 2) + ((centroids_a[(:, 1:)] - centroids_b.T[1:]) ** 2))\n    mean_min_distances = np.sqrt((distance_matrix.min(axis=0).mean() + distance_matrix.min(axis=1).mean()))\n    return (np.minimum(mean_min_distances, max_value) / float(max_value))", "docstring": "RMS difference in the minimum distances from the centroids of one track to the centroids of another track\n\nArgs:\nitem_a: STObject from the first set in TrackMatcher\nitem_b: STObject from the second set in TrackMatcher\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "codesearchnet"}
{"code": "def random_dna(self, random_generator: Union[types.ModuleType, random.Random, None]=None, attach_spec: bool=True, previous_dna: Optional['DNA']=None) -> 'DNA':\n    random_generator = random_generator or random\n    dna = self._random_dna(random_generator, previous_dna)\n    if attach_spec:\n        dna.use_spec(self)\n    return dna", "docstring": "Returns a random DNA based on current spec.\n\nArgs:\nrandom_generator: An optional Random object. If None, the global random\nmodule will be used.\nattach_spec: If True, current spec will be attached to the returned DNA.\nprevious_dna: An optional DNA representing previous DNA. This field might\nbe useful for generating stateful random DNAs.\n\nReturns:\nA random DNA based on current spec.", "source": "github-repos"}
{"code": "def _CheckStatusWorkerProcess(self, pid):\n    \n    \n    \n    self._RaiseIfNotRegistered(pid)\n\n    process = self._processes_per_pid[pid]\n\n    process_status = self._QueryProcessStatus(process)\n    if process_status is None:\n      process_is_alive = False\n    else:\n      process_is_alive = True\n\n    process_information = self._process_information_per_pid[pid]\n    used_memory = process_information.GetUsedMemory() or 0\n\n    if self._worker_memory_limit and used_memory > self._worker_memory_limit:\n      logger.warning((\n          'Process: {0:s} (PID: {1:d}) killed because it exceeded the '\n          'memory limit: {2:d}.').format(\n              process.name, pid, self._worker_memory_limit))\n      self._KillProcess(pid)\n\n    if isinstance(process_status, dict):\n      self._rpc_errors_per_pid[pid] = 0\n      status_indicator = process_status.get('processing_status', None)\n\n    else:\n      rpc_errors = self._rpc_errors_per_pid.get(pid, 0) + 1\n      self._rpc_errors_per_pid[pid] = rpc_errors\n\n      if rpc_errors > self._MAXIMUM_RPC_ERRORS:\n        process_is_alive = False\n\n      if process_is_alive:\n        rpc_port = process.rpc_port.value\n        logger.warning((\n            'Unable to retrieve process: {0:s} (PID: {1:d}) status via '\n            'RPC socket: http:\n                process.name, pid, rpc_port))\n\n        processing_status_string = 'RPC error'\n        status_indicator = definitions.STATUS_INDICATOR_RUNNING\n      else:\n        processing_status_string = 'killed'\n        status_indicator = definitions.STATUS_INDICATOR_KILLED\n\n      process_status = {\n          'processing_status': processing_status_string}\n\n    self._UpdateProcessingStatus(pid, process_status, used_memory)\n\n    \n    \n    for worker_status in self._processing_status.workers_status:\n      if worker_status.pid == pid:\n        status_indicator = worker_status.status\n        break\n\n    if status_indicator in definitions.ERROR_STATUS_INDICATORS:\n      logger.error((\n          'Process {0:s} (PID: {1:d}) is not functioning correctly. '\n          'Status code: {2!s}.').format(process.name, pid, status_indicator))\n\n      self._TerminateProcessByPid(pid)\n\n      replacement_process = None\n      for replacement_process_attempt in range(\n          self._MAXIMUM_REPLACEMENT_RETRIES):\n        logger.info((\n            'Attempt: {0:d} to start replacement worker process for '\n            '{1:s}').format(replacement_process_attempt + 1, process.name))\n\n        replacement_process = self._StartWorkerProcess(\n            process.name, self._storage_writer)\n        if replacement_process:\n          break\n\n        time.sleep(self._REPLACEMENT_WORKER_RETRY_DELAY)\n\n      if not replacement_process:\n        logger.error(\n            'Unable to create replacement worker process for: {0:s}'.format(\n                process.name))", "docstring": "Checks the status of a worker process.\n\nIf a worker process is not responding the process is terminated and\na replacement process is started.\n\nArgs:\npid (int): process ID (PID) of a registered worker process.\n\nRaises:\nKeyError: if the process is not registered with the engine.", "source": "juraj-google-style"}
{"code": "def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    encoder_hidden_states = encoder_outputs[0]\n    if encoder_attention_mask is None:\n        batch_size, sequence_length = encoder_hidden_states.shape[:2]\n        encoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    batch_size, sequence_length = decoder_input_ids.shape\n    if decoder_attention_mask is None:\n        decoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    if decoder_position_ids is None:\n        if past_key_values is not None:\n            raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.')\n        decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n    inputs = {'params': params or self.params}\n    if past_key_values:\n        inputs['cache'] = past_key_values\n        mutable = ['cache']\n    else:\n        mutable = False\n\n    def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):\n        decoder_module = module._get_decoder_module()\n        return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)\n    outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)\n    if past_key_values is not None and return_dict:\n        outputs, past = outputs\n        outputs['past_key_values'] = unfreeze(past['cache'])\n        return outputs\n    elif past_key_values is not None and (not return_dict):\n        outputs, past = outputs\n        outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> import jax.numpy as jnp\n>>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration\n\n>>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained(\"facebook/blenderbot-400M-distill\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/blenderbot-400M-distill\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, max_length=1024, return_tensors=\"jax\")\n>>> encoder_outputs = model.encode(**inputs)\n\n>>> decoder_start_token_id = model.config.decoder_start_token_id\n>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype=\"i4\") * decoder_start_token_id\n\n>>> outputs = model.decode(decoder_input_ids, encoder_outputs)\n>>> last_decoder_hidden_states = outputs.last_hidden_state\n```", "source": "github-repos"}
{"code": "def _get_tags(self):\n    tags = self.data.get('tags', None)\n    if (not tags):\n        return list()\n    return [x['name'] for x in tags]", "docstring": "Return the dataset's list of tags\n\nReturns:\nList[str]: list of tags or [] if there are none", "source": "codesearchnet"}
{"code": "def obj_with_unit(obj, unit):\n    \n    unit_type = _UNAME2UTYPE[unit]\n\n    if isinstance(obj, numbers.Number):\n        return FloatWithUnit(obj, unit=unit, unit_type=unit_type)\n    elif isinstance(obj, collections.Mapping):\n        return {k: obj_with_unit(v, unit) for k,v in obj.items()}\n    else:\n        return ArrayWithUnit(obj, unit=unit, unit_type=unit_type)", "docstring": "Returns a `FloatWithUnit` instance if obj is scalar, a dictionary of\nobjects with units if obj is a dict, else an instance of\n`ArrayWithFloatWithUnit`.\n\nArgs:\nunit: Specific units (eV, Ha, m, ang, etc.).", "source": "juraj-google-style"}
{"code": "def power(self, n):\n        \n        if n > 0:\n            return super().power(n)\n        return Kraus(SuperOp(self).power(n))", "docstring": "The matrix power of the channel.\n\nArgs:\nn (int): compute the matrix power of the superoperator matrix.\n\nReturns:\nKraus: the matrix power of the SuperOp converted to a Kraus channel.\n\nRaises:\nQiskitError: if the input and output dimensions of the\nQuantumChannel are not equal, or the power is not an integer.", "source": "juraj-google-style"}
{"code": "def _compare(self, other: Any, comparison: Callable[[Any, Any], bool]) -> bool:\n    if isinstance(other, str):\n        return comparison(self.path, other)\n    if isinstance(other, KeyPath):\n        return comparison(tuple(map(KeyPath._KeyComparisonWrapper, self.keys)), tuple(map(KeyPath._KeyComparisonWrapper, other.keys)))\n    raise TypeError(f\"Comparison is not supported between instances of 'KeyPath' and {type(other).__name__!r}.\")", "docstring": "Compare to another KeyPath or a string.\n\nArgs:\nother: A Keypath or a string.\ncomparison: A comparison operator.\n\nReturns:\nWhether or not the comparison holds true.\n\nRaises:\nTypeError: The other object is neither a Keypath nor a string.", "source": "github-repos"}
{"code": "def WriteFileHash(self, path, hash_value):\n    \n    string = '{0:s}\\t{1:s}'.format(hash_value, path)\n\n    encoded_string = self._EncodeString(string)\n    print(encoded_string)", "docstring": "Writes the file path and hash to stdout.\n\nArgs:\npath (str): path of the file.\nhash_value (str): message digest hash calculated over the file data.", "source": "juraj-google-style"}
{"code": "def get_current_user(with_domain=True):\n    \n    try:\n        user_name = win32api.GetUserNameEx(win32api.NameSamCompatible)\n        if user_name[-1] == '$':\n            \n            \n            test_user = win32api.GetUserName()\n            if test_user == 'SYSTEM':\n                user_name = 'SYSTEM'\n            elif get_sid_from_name(test_user) == 'S-1-5-18':\n                user_name = 'SYSTEM'\n        elif not with_domain:\n            user_name = win32api.GetUserName()\n    except pywintypes.error as exc:\n        raise CommandExecutionError(\n            'Failed to get current user: {0}'.format(exc))\n\n    if not user_name:\n        return False\n\n    return user_name", "docstring": "Gets the user executing the process\n\nArgs:\n\nwith_domain (bool):\n``True`` will prepend the user name with the machine name or domain\nseparated by a backslash\n\nReturns:\nstr: The user name", "source": "juraj-google-style"}
{"code": "def filter_collections_by_statement(data_collections, statement):\n    pattern = BaseCollection.pattern_from_collections_and_statement(data_collections, statement)\n    collections = [coll.filter_by_pattern(pattern) for coll in data_collections]\n    return collections", "docstring": "Generate a filtered data collections according to a conditional statement.\n\nArgs:\ndata_collections: A list of aligned Data Collections to be evaluated\nagainst the statement.\nstatement: A conditional statement as a string (e.g. a>25 and a%5==0).\nThe variable should always be named as 'a' (without quotations).\n\nReturn:\ncollections: A list of Data Collections that have been filtered based\non the statement.", "source": "codesearchnet"}
{"code": "def walknset_vars(self, task_class=None, *args, **kwargs):\n\n    def change_task(task):\n        if ((task_class is not None) and (task.__class__ is not task_class)):\n            return False\n        return True\n    if self.is_work:\n        for task in self:\n            if (not change_task(task)):\n                continue\n            task.set_vars(*args, **kwargs)\n    elif self.is_flow:\n        for task in self.iflat_tasks():\n            if (not change_task(task)):\n                continue\n            task.set_vars(*args, **kwargs)\n    else:\n        raise TypeError((\"Don't know how to set variables for object class %s\" % self.__class__.__name__))", "docstring": "Set the values of the ABINIT variables in the input files of the nodes\n\nArgs:\ntask_class: If not None, only the input files of the tasks belonging\nto class `task_class` are modified.\n\nExample:\n\nflow.walknset_vars(ecut=10, kptopt=4)", "source": "codesearchnet"}
{"code": "def format_rpc(data):\n    (address, rpc_id, args, resp, _status) = data\n    name = rpc_name(rpc_id)\n    if isinstance(args, (bytes, bytearray)):\n        arg_str = hexlify(args)\n    else:\n        arg_str = repr(args)\n    if isinstance(resp, (bytes, bytearray)):\n        resp_str = hexlify(resp)\n    else:\n        resp_str = repr(resp)\n    return ('%s called on address %d, payload=%s, response=%s' % (name, address, arg_str, resp_str))", "docstring": "Format an RPC call and response.\n\nArgs:\ndata (tuple): A tuple containing the address, rpc_id, argument and\nresponse payloads and any error code.\n\nReturns:\nstr: The formated RPC string.", "source": "codesearchnet"}
{"code": "def random_dna(dna_spec: DNASpec, random_generator: Union[None, types.ModuleType, random.Random]=None, attach_spec: bool=True, previous_dna: Optional[DNA]=None) -> DNA:\n    return dna_spec.random_dna(random_generator or random, attach_spec, previous_dna)", "docstring": "Generates a random DNA from a DNASpec.\n\nExample::\n\nspec = pg.geno.space([\npg.geno.oneof([\npg.geno.constant(),\npg.geno.constant(),\npg.geno.constant()\n]),\npg.geno.floatv(0.1, 0.2)\n])\n\nprint(pg.random_dna(spec))\n# DNA([2, 0.1123])\n\nArgs:\ndna_spec: a DNASpec object.\nrandom_generator: a Python random generator.\nattach_spec: If True, attach the DNASpec to generated DNA.\nprevious_dna: An optional DNA representing previous DNA. This field might\nbe useful for generating stateful random DNAs.\n\nReturns:\nA DNA object.", "source": "github-repos"}
{"code": "def dummy_signatures(self):\n    if (not self.signing_algorithm):\n        return []\n    algo_id = {'sha1': 1, 'sha384': 2}[self.signing_algorithm]\n    signature = make_dummy_signature(algo_id)\n    return [(algo_id, signature)]", "docstring": "Create a dummy signature.\n\nThis is used when initially writing the MAR header and we don't know\nwhat the final signature data will be.\n\nReturns:\nFake signature data suitable for writing to the header with\n.write_signatures()", "source": "codesearchnet"}
{"code": "def get_image_data(self, ids=None, voxels=None, dense=True):\n        \n        return self.image_table.get_image_data(ids, voxels=voxels, dense=dense)", "docstring": "A convenience wrapper for ImageTable.get_image_data().\n\nArgs:\nids (list, array): A list or 1D numpy array of study ids to\nreturn. If None, returns data for all studies.\nvoxels (list, array): A list or 1D numpy array of voxel indices\n(i.e., rows) to return. If None, returns data for all voxels.", "source": "juraj-google-style"}
{"code": "def ReplaceAll(pattern, rep, s):\n  \n  if pattern not in _regexp_compile_cache:\n    _regexp_compile_cache[pattern] = sre_compile.compile(pattern)\n  return _regexp_compile_cache[pattern].sub(rep, s)", "docstring": "Replaces instances of pattern in a string with a replacement.\n\nThe compiled regex is kept in a cache shared by Match and Search.\n\nArgs:\npattern: regex pattern\nrep: replacement text\ns: search string\n\nReturns:\nstring with replacements made (or original string if no replacements)", "source": "juraj-google-style"}
{"code": "def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor:\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n    pooled_output = text_outputs[1]\n    text_features = self.text_projection(pooled_output)\n    return text_features", "docstring": "Returns:\ntext_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by\napplying the projection layer to the pooled output of [`GroupViTTextModel`].\n\nExamples:\n\n```python\n>>> from transformers import CLIPTokenizer, GroupViTModel\n\n>>> model = GroupViTModel.from_pretrained(\"nvidia/groupvit-gcc-yfcc\")\n>>> tokenizer = CLIPTokenizer.from_pretrained(\"nvidia/groupvit-gcc-yfcc\")\n\n>>> inputs = tokenizer([\"a photo of a cat\", \"a photo of a dog\"], padding=True, return_tensors=\"pt\")\n>>> text_features = model.get_text_features(**inputs)\n```", "source": "github-repos"}
{"code": "def crypto_withdraw(self, amount, currency, crypto_address):\n        \n        params = {'amount': amount,\n                  'currency': currency,\n                  'crypto_address': crypto_address}\n        return self._send_message('post', '/withdrawals/crypto',\n                                  data=json.dumps(params))", "docstring": "Withdraw funds to a crypto address.\n\nArgs:\namount (Decimal): The amount to withdraw\ncurrency (str): The type of currency (eg. 'BTC')\ncrypto_address (str): Crypto address to withdraw to.\n\nReturns:\ndict: Withdraw details. Example::\n{\n\"id\":\"593533d2-ff31-46e0-b22e-ca754147a96a\",\n\"amount\":\"10.00\",\n\"currency\": \"BTC\",\n}", "source": "juraj-google-style"}
{"code": "def __init__(self, fetches):\n    self._fetch_type = type(fetches)\n    if isinstance(fetches, collections.defaultdict):\n        self._type_ctor = functools.partial(collections.defaultdict, fetches.default_factory)\n    else:\n        self._type_ctor = self._fetch_type\n    self._keys = fetches.keys()\n    self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches.values()]\n    self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)", "docstring": "Creates a _DictFetchMapper.\n\nArgs:\nfetches: Dict of fetches.", "source": "github-repos"}
{"code": "def get_estimated_size_and_observables(self, value, nested=False):\n    return (self.estimate_size(value, nested), [])", "docstring": "Returns estimated size of value along with any nested observables.\n\nThe list of nested observables is returned as a list of 2-tuples of\n(obj, coder_impl), where obj is an instance of observable.ObservableMixin,\nand coder_impl is the CoderImpl that can be used to encode elements sent by\nobj to its observers.\n\nArguments:\nvalue: the value whose encoded size is to be estimated.\nnested: whether the value is nested.\n\nReturns:\nThe estimated encoded size of the given value and a list of observables\nwhose elements are 2-tuples of (obj, coder_impl) as described above.", "source": "github-repos"}
{"code": "def _handle_skip_feature(self, test_dict):\n        \n        \n        skip_reason = None\n\n        if \"skip\" in test_dict:\n            skip_reason = test_dict[\"skip\"]\n\n        elif \"skipIf\" in test_dict:\n            skip_if_condition = test_dict[\"skipIf\"]\n            if self.session_context.eval_content(skip_if_condition):\n                skip_reason = \"{} evaluate to True\".format(skip_if_condition)\n\n        elif \"skipUnless\" in test_dict:\n            skip_unless_condition = test_dict[\"skipUnless\"]\n            if not self.session_context.eval_content(skip_unless_condition):\n                skip_reason = \"{} evaluate to False\".format(skip_unless_condition)\n\n        if skip_reason:\n            raise SkipTest(skip_reason)", "docstring": "handle skip feature for test\n- skip: skip current test unconditionally\n- skipIf: skip current test if condition is true\n- skipUnless: skip current test unless condition is true\n\nArgs:\ntest_dict (dict): test info\n\nRaises:\nSkipTest: skip test", "source": "juraj-google-style"}
{"code": "def __add__(self, other: Self | Processor) -> PartProcessor | Processor:\n    if isinstance(other, _ChainPartProcessor):\n        return _ChainPartProcessor([self] + other._processor_list)\n    elif isinstance(other, _ChainProcessor):\n        return _ChainProcessor([self.to_processor().call] + other._processor_list)\n    elif isinstance(other, Processor):\n        return _ChainProcessor([self.to_processor().call, other])\n    else:\n        return _ChainPartProcessor([self, other])", "docstring": "Adds `other` to this processor.\n\nArgs:\nother: a processor to add to `self`.\n\nReturns:\nThe chain of this process with `other`.", "source": "github-repos"}
{"code": "def _pearson_correlation(self, imgs_to_decode):\n        \n        x, y = imgs_to_decode.astype(float), self.feature_images.astype(float)\n        return self._xy_corr(x, y)", "docstring": "Decode images using Pearson's r.\n\nComputes the correlation between each input image and each feature\nimage across voxels.\n\nArgs:\nimgs_to_decode: An ndarray of images to decode, with voxels in rows\nand images in columns.\n\nReturns:\nAn n_features x n_images 2D array, with each cell representing the\npearson correlation between the i'th feature and the j'th image\nacross all voxels.", "source": "juraj-google-style"}
{"code": "def replace_in_file(filename: str, text_from: str, text_to: str) -> None:\n    \n    log.info(\"Amending {}: {} -> {}\",\n             filename, repr(text_from), repr(text_to))\n    with open(filename) as infile:\n        contents = infile.read()\n    contents = contents.replace(text_from, text_to)\n    with open(filename, 'w') as outfile:\n        outfile.write(contents)", "docstring": "Replaces text in a file.\n\nArgs:\nfilename: filename to process (modifying it in place)\ntext_from: original text to replace\ntext_to: replacement text", "source": "juraj-google-style"}
{"code": "def __init__(self, config: interfaces.Config | None=None):\n    self._config = config or interfaces.Config()", "docstring": "Initializes the TopicVerbalizer.\n\nArgs:\nconfig: The agent configuration.", "source": "github-repos"}
{"code": "def _lease_owned(self, lease, current_uuid_path):\n    (prev_uuid_path, prev_uuid) = lease.metadata\n    with open(current_uuid_path) as f:\n        current_uuid = f.read()\n    return ((current_uuid_path == prev_uuid_path) and (prev_uuid == current_uuid))", "docstring": "Checks if the given lease is owned by the prefix whose uuid is in\nthe given path\n\nNote:\nThe prefix must be also in the same path it was when it took the\nlease\n\nArgs:\npath (str): Path to the lease\ncurrent_uuid_path (str): Path to the uuid to check ownership of\n\nReturns:\nbool: ``True`` if the given lease in owned by the prefix,\n``False`` otherwise", "source": "codesearchnet"}
{"code": "def are_equal(self, sp1, sp2):\n        \n        comp1 = Composition(sp1)\n        comp2 = Composition(sp2)\n        return comp1.get_el_amt_dict() == comp2.get_el_amt_dict()", "docstring": "True if element:amounts are exactly the same, i.e.,\noxidation state is not considered.\n\nArgs:\nsp1: First species. A dict of {specie/element: amt} as per the\ndefinition in Site and PeriodicSite.\nsp2: Second species. A dict of {specie/element: amt} as per the\ndefinition in Site and PeriodicSite.\n\nReturns:\nBoolean indicating whether species are the same based on element\nand amounts.", "source": "juraj-google-style"}
{"code": "def reply(self, user, msg, errors_as_replies=True):\n    return self._brain.reply(user, msg, errors_as_replies)", "docstring": "Fetch a reply from the RiveScript brain.\n\nArguments:\nuser (str): A unique user ID for the person requesting a reply.\nThis could be e.g. a screen name or nickname. It's used internally\nto store user variables (including topic and history), so if your\nbot has multiple users each one should have a unique ID.\nmsg (str): The user's message. This is allowed to contain\npunctuation and such, but any extraneous data such as HTML tags\nshould be removed in advance.\nerrors_as_replies (bool): When errors are encountered (such as a\ndeep recursion error, no reply matched, etc.) this will make the\nreply be a text representation of the error message. If you set\nthis to ``False``, errors will instead raise an exception, such as\na ``DeepRecursionError`` or ``NoReplyError``. By default, no\nexceptions are raised and errors are set in the reply instead.\n\nReturns:\nstr: The reply output.", "source": "codesearchnet"}
{"code": "async def execute_method(self, method, **params):\n    url = self.url_builder(method, url_params=params)\n    logger.info('Executing method %r', method)\n    response = (await aiohttp.get(url))\n    logger.info('Status: %r', response.status)\n    if (response.status == 200):\n        json = (await response.json())\n        logger.debug('...with JSON %r', json)\n        if json.get('ok'):\n            return json\n        raise SlackApiError(json['error'])\n    else:\n        raise_for_status(response)", "docstring": "Execute a specified Slack Web API method.\n\nArguments:\nmethod (:py:class:`str`): The name of the method.\n**params (:py:class:`dict`): Any additional parameters\nrequired.\n\nReturns:\n:py:class:`dict`: The JSON data from the response.\n\nRaises:\n:py:class:`aiohttp.web_exceptions.HTTPException`: If the HTTP\nrequest returns a code other than 200 (OK).\nSlackApiError: If the Slack API is reached but the response\ncontains an error message.", "source": "codesearchnet"}
{"code": "def _scan(self, fs, dir_path, namespaces=None):\n    try:\n        for info in fs.scandir(dir_path, namespaces=namespaces):\n            (yield info)\n    except FSError as error:\n        if (not self.on_error(dir_path, error)):\n            six.reraise(type(error), error)", "docstring": "Get an iterator of `Info` objects for a directory path.\n\nArguments:\nfs (FS): A filesystem instance.\ndir_path (str): A path to a directory on the filesystem.\nnamespaces (list): A list of additional namespaces to\ninclude in the `Info` objects.\n\nReturns:\n~collections.Iterator: iterator of `Info` objects for\nresources within the given path.", "source": "codesearchnet"}
{"code": "def IsDerivedFunction(clean_lines, linenum):\n    for i in xrange(linenum, max((- 1), (linenum - 10)), (- 1)):\n        match = Match('^([^()]*\\\\w+)\\\\(', clean_lines.elided[i])\n        if match:\n            (line, _, closing_paren) = CloseExpression(clean_lines, i, len(match.group(1)))\n            return ((closing_paren >= 0) and Search('\\\\boverride\\\\b', line[closing_paren:]))\n    return False", "docstring": "Check if current line contains an inherited function.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nReturns:\nTrue if current line contains a function with \"override\"\nvirt-specifier.", "source": "codesearchnet"}
{"code": "def oauth_aware(self, method):\n\n    def setup_oauth(request_handler, *args, **kwargs):\n        if self._in_error:\n            self._display_error_message(request_handler)\n            return\n        user = users.get_current_user()\n        if (not user):\n            request_handler.redirect(users.create_login_url(request_handler.request.uri))\n            return\n        self._create_flow(request_handler)\n        self.flow.params['state'] = _build_state_value(request_handler, user)\n        self.credentials = self._storage_class(self._credentials_class, None, self._credentials_property_name, user=user).get()\n        try:\n            resp = method(request_handler, *args, **kwargs)\n        finally:\n            self.credentials = None\n        return resp\n    return setup_oauth", "docstring": "Decorator that sets up for OAuth 2.0 dance, but doesn't do it.\n\nDoes all the setup for the OAuth dance, but doesn't initiate it.\nThis decorator is useful if you want to create a page that knows\nwhether or not the user has granted access to this application.\nFrom within a method decorated with @oauth_aware the has_credentials()\nand authorize_url() methods can be called.\n\nArgs:\nmethod: callable, to be decorated method of a webapp.RequestHandler\ninstance.", "source": "codesearchnet"}
{"code": "def matches(self, applied_ptransform):\n    raise NotImplementedError", "docstring": "Determines whether the given AppliedPTransform matches.\n\nNote that the matching will happen *after* Runner API proto translation.\nIf matching is done via type checks, to/from_runner_api[_parameter] methods\nmust be implemented to preserve the type (and other data) through proto\nserialization.\n\nConsider URN-based translation instead.\n\nArgs:\napplied_ptransform: AppliedPTransform to be matched.\n\nReturns:\na bool indicating whether the given AppliedPTransform is a match.", "source": "github-repos"}
{"code": "def enroll_users_in_program(cls, enterprise_customer, program_details, course_mode, emails, cohort=None):\n    (existing_users, unregistered_emails) = cls.get_users_by_email(emails)\n    course_ids = get_course_runs_from_program(program_details)\n    successes = []\n    pending = []\n    failures = []\n    for user in existing_users:\n        succeeded = cls.enroll_user(enterprise_customer, user, course_mode, *course_ids)\n        if succeeded:\n            successes.append(user)\n        else:\n            failures.append(user)\n    for email in unregistered_emails:\n        pending_user = enterprise_customer.enroll_user_pending_registration(email, course_mode, *course_ids, cohort=cohort)\n        pending.append(pending_user)\n    return (successes, pending, failures)", "docstring": "Enroll existing users in all courses in a program, and create pending enrollments for nonexisting users.\n\nArgs:\nenterprise_customer: The EnterpriseCustomer which is sponsoring the enrollment\nprogram_details: The details of the program in which we're enrolling\ncourse_mode (str): The mode with which we're enrolling in the program\nemails: An iterable of email addresses which need to be enrolled\n\nReturns:\nsuccesses: A list of users who were successfully enrolled in all courses of the program\npending: A list of PendingEnterpriseCustomerUsers who were successfully linked and had\npending enrollments created for them in the database\nfailures: A list of users who could not be enrolled in the program", "source": "codesearchnet"}
{"code": "def import_image(self, src=None, repository=None, tag=None, image=None, changes=None, stream_src=False):\n    if (not (src or image)):\n        raise errors.DockerException('Must specify src or image to import from')\n    u = self._url('/images/create')\n    params = _import_image_params(repository, tag, image, src=(src if isinstance(src, six.string_types) else None), changes=changes)\n    headers = {'Content-Type': 'application/tar'}\n    if (image or (params.get('fromSrc') != '-')):\n        return self._result(self._post(u, data=None, params=params))\n    elif isinstance(src, six.string_types):\n        with open(src, 'rb') as f:\n            return self._result(self._post(u, data=f, params=params, headers=headers, timeout=None))\n    else:\n        if stream_src:\n            headers['Transfer-Encoding'] = 'chunked'\n        return self._result(self._post(u, data=src, params=params, headers=headers))", "docstring": "Import an image. Similar to the ``docker import`` command.\n\nIf ``src`` is a string or unicode string, it will first be treated as a\npath to a tarball on the local system. If there is an error reading\nfrom that file, ``src`` will be treated as a URL instead to fetch the\nimage from. You can also pass an open file handle as ``src``, in which\ncase the data will be read from that file.\n\nIf ``src`` is unset but ``image`` is set, the ``image`` parameter will\nbe taken as the name of an existing image to import from.\n\nArgs:\nsrc (str or file): Path to tarfile, URL, or file-like object\nrepository (str): The repository to create\ntag (str): The tag to apply\nimage (str): Use another image like the ``FROM`` Dockerfile\nparameter", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, is_index_masked=None, is_index_global_attn=None, is_global_attn=None, output_attentions=False):\n    residual = hidden_states\n    attn_outputs = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, output_attentions=output_attentions)\n    hidden_states = attn_outputs[0]\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    residual = hidden_states\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()):\n        clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n        hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n    return (hidden_states,) + attn_outputs[1:]", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape *(batch, seq_len, embed_dim)*\nattention_mask (`torch.FloatTensor`): attention mask of size\n*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.\nlayer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size\n*(encoder_attention_heads,)*.", "source": "github-repos"}
{"code": "def Rsync(url, tgt_name, tgt_root=None):\n    if (tgt_root is None):\n        tgt_root = str(CFG['tmp_dir'])\n    from benchbuild.utils.cmd import rsync\n    tgt_dir = (local.path(tgt_root) / tgt_name)\n    if (not source_required(tgt_dir)):\n        Copy(tgt_dir, '.')\n        return\n    rsync('-a', url, tgt_dir)\n    update_hash(tgt_dir)\n    Copy(tgt_dir, '.')", "docstring": "RSync a folder.\n\nArgs:\nurl (str): The url of the SOURCE location.\nfname (str): The name of the TARGET.\nto (str): Path of the target location.\nDefaults to ``CFG[\"tmpdir\"]``.", "source": "codesearchnet"}
{"code": "def cat_adb_log(self, tag, begin_time):\n    if (not self.adb_logcat_file_path):\n        raise Error(self._ad, 'Attempting to cat adb log when none has been collected.')\n    end_time = mobly_logger.get_log_line_timestamp()\n    self._ad.log.debug('Extracting adb log from logcat.')\n    adb_excerpt_path = os.path.join(self._ad.log_path, 'AdbLogExcerpts')\n    utils.create_dir(adb_excerpt_path)\n    f_name = os.path.basename(self.adb_logcat_file_path)\n    out_name = f_name.replace('adblog,', '').replace('.txt', '')\n    out_name = (',%s,%s.txt' % (begin_time, out_name))\n    out_name = out_name.replace(':', '-')\n    tag_len = (utils.MAX_FILENAME_LEN - len(out_name))\n    tag = tag[:tag_len]\n    out_name = (tag + out_name)\n    full_adblog_path = os.path.join(adb_excerpt_path, out_name)\n    with io.open(full_adblog_path, 'w', encoding='utf-8') as out:\n        in_file = self.adb_logcat_file_path\n        with io.open(in_file, 'r', encoding='utf-8', errors='replace') as f:\n            in_range = False\n            while True:\n                line = None\n                try:\n                    line = f.readline()\n                    if (not line):\n                        break\n                except:\n                    continue\n                line_time = line[:mobly_logger.log_line_timestamp_len]\n                if (not mobly_logger.is_valid_logline_timestamp(line_time)):\n                    continue\n                if self._is_timestamp_in_range(line_time, begin_time, end_time):\n                    in_range = True\n                    if (not line.endswith('\\n')):\n                        line += '\\n'\n                    out.write(line)\n                elif in_range:\n                    break", "docstring": "Takes an excerpt of the adb logcat log from a certain time point to\ncurrent time.\n\nArgs:\ntag: An identifier of the time period, usualy the name of a test.\nbegin_time: Logline format timestamp of the beginning of the time\nperiod.", "source": "codesearchnet"}
{"code": "def address(self, num):\n    url_root = 'company/{}/registered-office-address'\n    baseuri = (self._BASE_URI + url_root.format(num))\n    res = self.session.get(baseuri)\n    self.handle_http_error(res)\n    return res", "docstring": "Search for company addresses by company number.\n\nArgs:\nnum (str): Company number to search on.", "source": "codesearchnet"}
{"code": "def point_probability(self, threshold):\n        \n        point_prob = np.zeros(self.data.shape[1:])\n        for t in range(self.data.shape[1]):\n            point_prob[t] = np.where(self.data[:, t] >= threshold, 1.0, 0.0).mean(axis=0)\n        return EnsembleConsensus(point_prob, \"point_probability\", self.ensemble_name,\n                                 self.run_date, self.variable + \"_{0:0.2f}_{1}\".format(threshold,\n                                                                                       self.units.replace(\" \", \"_\")),\n                                 self.start_date, self.end_date, \"\")", "docstring": "Determine the probability of exceeding a threshold at a grid point based on the ensemble forecasts at\nthat point.\n\nArgs:\nthreshold: If >= threshold assigns a 1 to member, otherwise 0.\n\nReturns:\nEnsembleConsensus", "source": "juraj-google-style"}
{"code": "def disassemble(self, start=None, end=None, arch_mode=None):\n        \n        if arch_mode is None:\n            arch_mode = self.binary.architecture_mode\n\n        curr_addr = start if start else self.binary.ea_start\n        end_addr = end if end else self.binary.ea_end\n\n        while curr_addr < end_addr:\n            \n            encoding = self.__fetch_instr(curr_addr)\n\n            \n            asm_instr = self.disassembler.disassemble(encoding, curr_addr, architecture_mode=arch_mode)\n\n            if not asm_instr:\n                return\n\n            yield curr_addr, asm_instr, asm_instr.size\n\n            \n            curr_addr += asm_instr.size", "docstring": "Disassemble native instructions.\n\nArgs:\nstart (int): Start address.\nend (int): End address.\narch_mode (int): Architecture mode.\n\nReturns:\n(int, Instruction, int): A tuple of the form (address, assembler instruction, instruction size).", "source": "juraj-google-style"}
{"code": "def assert_splits_match(nested_splits_lists):\n    error_msg = 'Inputs must have identical ragged splits'\n    for splits_list in nested_splits_lists:\n        if len(splits_list) != len(nested_splits_lists[0]):\n            raise ValueError(error_msg)\n    return [check_ops.assert_equal(s1, s2, message=error_msg) for splits_list in nested_splits_lists[1:] for s1, s2 in zip(nested_splits_lists[0], splits_list)]", "docstring": "Checks that the given splits lists are identical.\n\nPerforms static tests to ensure that the given splits lists are identical,\nand returns a list of control dependency op tensors that check that they are\nfully identical.\n\nArgs:\nnested_splits_lists: A list of nested_splits_lists, where each split_list is\na list of `splits` tensors from a `RaggedTensor`, ordered from outermost\nragged dimension to innermost ragged dimension.\n\nReturns:\nA list of control dependency op tensors.\nRaises:\nValueError: If the splits are not identical.", "source": "github-repos"}
{"code": "def _starts_with(field, filter_value):\n        \n        valid = False\n        if field.startswith(filter_value):\n            valid = True\n        return valid", "docstring": "Validate field starts with provided value.\n\nArgs:\nfilter_value (string): A string or list of values.\n\nReturns:\n(boolean): Results of validation", "source": "juraj-google-style"}
{"code": "def get_min_max_value(self) -> tuple[float, float]:\n    total_freq = sum(self._hist_freq)\n    hist_freq_cumsum = np.cumsum(self._hist_freq) / total_freq\n    min_quantile, max_quantile = (self._calib_opts.calibration_parameters.min_percentile / 100.0, self._calib_opts.calibration_parameters.max_percentile / 100.0)\n    min_quantile_idx, max_quantile_idx = (np.searchsorted(hist_freq_cumsum, min_quantile, side='right'), np.searchsorted(hist_freq_cumsum, max_quantile, side='left'))\n    min_value, max_value = (self._hist_mids[min_quantile_idx], self._hist_mids[max_quantile_idx])\n    return (min_value, max_value)", "docstring": "Calculates min and max from statistics using calibration options.\n\nA \"percentile\" is a statistical concept that represents the value below\nwhich a given percentage of data falls in a dataset. It involves sorting the\ndata from smallest to largest and then finding the value at a specified\npercentage position. For example, the 0.01 percentile represents the value\nin a given data set that corresponds to the lowest 0.01% of the data.\n\nHistogramPercentile calibration uses min_percentile and max_percentile to\nfind min and max.\n\nmin_percentile and max_percentile must be in range [0, 100].\nmin_percentile is 0.001 by default.\nmax_percentile is 99.999 by default.\n\nReturns:\n(min_value, max_value): Min and max calculated using HistogramPercentile", "source": "github-repos"}
{"code": "def get_bin_test(self, hashes):\n        \n        all_responses = {}\n\n        if self._cache:\n            api_name = 'shadowserver-bin-test'\n            all_responses = self._cache.bulk_lookup(api_name, hashes)\n            hashes = [key for key in hashes if key not in all_responses.keys()]\n            all_responses = dict([(key, val) for key, val in all_responses.items() if len(val) >= 2])\n\n        HASHES_PER_REQ = 25\n        hash_chunks = ['\\n'.join(hashes[pos:pos + HASHES_PER_REQ]) for pos in range(0, len(hashes), HASHES_PER_REQ)]\n\n        responses = self._requests.multi_post(self.BINTEST_URL, data=hash_chunks, to_json=False, send_as_file=True)\n        for response in responses:\n            if response is not None and 200 == response.status_code:\n                response_lines = response.text.split('\\n')\n                for line in response_lines:\n                    \n                    val = {}\n\n                    \n                    index_of_first_space = line.find(' ')\n                    if -1 == index_of_first_space:\n                        index_of_first_space = len(line)\n                    key = line[:index_of_first_space].lower()\n\n                    \n                    json_text = line[index_of_first_space + 1:]\n                    if len(json_text):\n                        try:\n                            val = simplejson.loads(json_text)\n                            \n                            if len(val.keys()) >= 2:\n                                all_responses[key] = val\n\n                        except ValueError:\n                            \n                            pass\n\n                    if self._cache:\n                        self._cache.cache_value(api_name, key, val)\n\n        return all_responses", "docstring": "Test hashes against a list of known software applications.\n\nKnown hashes will return a dictionary of information.\nUnknown hashes will return nothing.\n\nArgs:\nhashes: list of string hashes.\nReturns:\nA dict with the hash as key and the shadowserver report as value.", "source": "juraj-google-style"}
{"code": "def Process(self, parser_mediator, cache=None, database=None, **kwargs):\n    if (database is None):\n        raise ValueError('Invalid database.')\n    super(ESEDBPlugin, self).Process(parser_mediator)\n    self.GetEntries(parser_mediator, cache=cache, database=database, **kwargs)", "docstring": "Determines if this is the appropriate plugin for the database.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ncache (Optional[ESEDBCache]): cache.\ndatabase (Optional[pyesedb.file]): ESE database.\n\nRaises:\nValueError: If the database attribute is not valid.", "source": "codesearchnet"}
{"code": "def get_logfile_name(tags):\n    if (not os.path.exists(sd.LOG_DIR)):\n        os.mkdir(sd.LOG_DIR)\n    filename = 'log'\n    for tag in tags:\n        filename += '_{}'.format(tag)\n    filename += '.txt'\n    filename = os.path.join(sd.LOG_DIR, filename)\n    return filename", "docstring": "Formulates a log file name that incorporates the provided tags.\n\nThe log file will be located in ``scgpm_seqresults_dnanexus.LOG_DIR``.\n\nArgs:\ntags: `list` of tags to append to the log file name. Each tag will be '_' delimited. Each tag\nwill be added in the same order as provided.", "source": "codesearchnet"}
{"code": "def restore(self, state):\n        \n\n\n        selector = DataStreamSelector.FromString(state.get(u'selector'))\n        if self.selector != selector:\n            raise ArgumentError(\"Attempted to restore an InvalidStreamWalker with a different selector\",\n                                selector=self.selector, serialized_data=state)\n\n        if state.get(u'type') != u'invalid':\n            raise ArgumentError(\"Invalid serialized state for InvalidStreamWalker\", serialized_data=state)", "docstring": "Restore the contents of this virtual stream walker.\n\nArgs:\nstate (dict): The previously serialized state.\n\nRaises:\nArgumentError: If the serialized state does not have\na matching selector.", "source": "juraj-google-style"}
{"code": "def _get_cert_expiration_time(headers):\n  \n  \n  cache_control = headers.get('Cache-Control', '')\n  \n  \n  \n  for entry in cache_control.split(','):\n    match = _MAX_AGE_REGEX.match(entry)\n    if match:\n      cache_time_seconds = int(match.group(1))\n      break\n  else:\n    return 0\n\n  \n  age = headers.get('Age')\n  if age is not None:\n    try:\n      age = int(age)\n    except ValueError:\n      age = 0\n    cache_time_seconds -= age\n\n  return max(0, cache_time_seconds)", "docstring": "Get the expiration time for a cert, given the response headers.\n\nGet expiration time from the headers in the result.  If we can't get\na time from the headers, this returns 0, indicating that the cert\nshouldn't be cached.\n\nArgs:\nheaders: A dict containing the response headers from the request to get\ncerts.\n\nReturns:\nAn integer with the number of seconds the cert should be cached.  This\nvalue is guaranteed to be >= 0.", "source": "juraj-google-style"}
{"code": "def IsCppString(line):\n  \n\n  line = line.replace(r'\\\\', 'XX')  \n  return ((line.count('\"') - line.count(r'\\\"') - line.count(\"'\\\"'\")) & 1) == 1", "docstring": "Does line terminate so, that the next symbol is in string constant.\n\nThis function does not consider single-line nor multi-line comments.\n\nArgs:\nline: is a partial line of code starting from the 0..n.\n\nReturns:\nTrue, if next character appended to 'line' is inside a\nstring constant.", "source": "juraj-google-style"}
{"code": "def leaky_relu(x, name=None):\n  \n  with tf.name_scope(name, 'leaky_relu', [x]) as scope:\n    x = tf.convert_to_tensor(x, name='x')\n    return tf.where(tf.less(x, 0.0), 0.01 * x, x, name=scope)", "docstring": "Creates a leaky_relu.\n\nThis is an alternate non-linearity to relu. The leaky part of the relu may\nprevent dead Neurons in a model since the gradient doesn't go completely to\n0.\n\nArgs:\nx: The input tensor.\nname: Optional name for this op.\nReturns:\nx if x > 0 otherwise 0.01 * x.", "source": "juraj-google-style"}
{"code": "def r_edges(step):\n    \n    rbot, rtop = misc.get_rbounds(step)\n    centers = step.rprof.loc[:, 'r'].values + rbot\n    \n    \n    edges = (centers[:-1] + centers[1:]) / 2\n    edges = np.insert(edges, 0, rbot)\n    edges = np.append(edges, rtop)\n    return edges, edges", "docstring": "Cell border.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nReturns:\ntuple of :class:`numpy.array`: the position of the bottom and top walls\nof the cells. The two elements of the tuple are identical.", "source": "juraj-google-style"}
{"code": "def process_usufy(self, data):\n    mode = 'usufy'\n    info = []\n    try:\n        verifier = self.modes.get(mode, {}).get('extra_fields', {})\n        for field in verifier.keys():\n            regexp = verifier[field]\n            values = re.findall(regexp, data)\n            for val in values:\n                aux = {}\n                aux['type'] = field\n                aux['value'] = val\n                aux['attributes'] = []\n                if (aux not in info):\n                    info.append(aux)\n    except AttributeError as e:\n        for field in self.fieldsRegExp[mode].keys():\n            try:\n                regexp = ((self.fieldsRegExp[mode][field]['start'] + '([^\\\\)]+)') + self.fieldsRegExp[mode][field]['end'])\n                tmp = re.findall(regexp, data)\n                values = []\n                for t in tmp:\n                    if (self.fieldsRegExp[mode][field]['end'] in t):\n                        values.append(t.split(self.fieldsRegExp[mode][field]['end'])[0])\n                    else:\n                        values.append(t)\n            except:\n                regexp = self.fieldsRegExp[mode][field]\n                values = re.findall(regexp, data)\n            for val in values:\n                aux = {}\n                aux['type'] = field\n                aux['value'] = val\n                aux['attributes'] = []\n                if (aux not in info):\n                    info.append(aux)\n    return info", "docstring": "Method to process and extract the entities of a usufy\n\nArgs:\n-----\ndata: The information from which the info will be extracted.\n\nReturn:\n-------\nA list of the entities found.", "source": "codesearchnet"}
{"code": "def weights_to_cpu(state_dict):\n    state_dict_cpu = OrderedDict()\n    for (key, val) in state_dict.items():\n        state_dict_cpu[key] = val.cpu()\n    return state_dict_cpu", "docstring": "Copy a model state_dict to cpu.\n\nArgs:\nstate_dict (OrderedDict): Model weights on GPU.\n\nReturns:\nOrderedDict: Model weights on GPU.", "source": "codesearchnet"}
{"code": "def ParseReportDescriptor(rd, desc):\n    rd = bytearray(rd)\n    pos = 0\n    report_count = None\n    report_size = None\n    usage_page = None\n    usage = None\n    while (pos < len(rd)):\n        key = rd[pos]\n        (key_size, value_length) = GetValueLength(rd, pos)\n        if ((key & REPORT_DESCRIPTOR_KEY_MASK) == INPUT_ITEM):\n            if (report_count and report_size):\n                byte_length = ((report_count * report_size) \n                desc.internal_max_in_report_len = max(desc.internal_max_in_report_len, byte_length)\n                report_count = None\n                report_size = None\n        elif ((key & REPORT_DESCRIPTOR_KEY_MASK) == OUTPUT_ITEM):\n            if (report_count and report_size):\n                byte_length = ((report_count * report_size) \n                desc.internal_max_out_report_len = max(desc.internal_max_out_report_len, byte_length)\n                report_count = None\n                report_size = None\n        elif ((key & REPORT_DESCRIPTOR_KEY_MASK) == COLLECTION_ITEM):\n            if usage_page:\n                desc.usage_page = usage_page\n            if usage:\n                desc.usage = usage\n        elif ((key & REPORT_DESCRIPTOR_KEY_MASK) == REPORT_COUNT):\n            if (len(rd) >= ((pos + 1) + value_length)):\n                report_count = ReadLsbBytes(rd, (pos + 1), value_length)\n        elif ((key & REPORT_DESCRIPTOR_KEY_MASK) == REPORT_SIZE):\n            if (len(rd) >= ((pos + 1) + value_length)):\n                report_size = ReadLsbBytes(rd, (pos + 1), value_length)\n        elif ((key & REPORT_DESCRIPTOR_KEY_MASK) == USAGE_PAGE):\n            if (len(rd) >= ((pos + 1) + value_length)):\n                usage_page = ReadLsbBytes(rd, (pos + 1), value_length)\n        elif ((key & REPORT_DESCRIPTOR_KEY_MASK) == USAGE):\n            if (len(rd) >= ((pos + 1) + value_length)):\n                usage = ReadLsbBytes(rd, (pos + 1), value_length)\n        pos += (value_length + key_size)\n    return desc", "docstring": "Parse the binary report descriptor.\n\nParse the binary report descriptor into a DeviceDescriptor object.\n\nArgs:\nrd: The binary report descriptor\ndesc: The DeviceDescriptor object to update with the results\nfrom parsing the descriptor.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def get_env_spec(self, filters=None):\n        \n        spec = {\n            'domains':\n                {\n                    vm_name: deepcopy(vm_object.spec)\n                    for vm_name, vm_object in self._vms.viewitems()\n                },\n            'nets':\n                {\n                    net_name: deepcopy(net_object.spec)\n                    for net_name, net_object in self._nets.viewitems()\n                }\n        }\n\n        if filters:\n            utils.filter_spec(spec, filters)\n\n        return spec", "docstring": "Get the spec of the current env.\nThe spec will hold the info about all the domains and\nnetworks associated with this env.\nArgs:\nfilters (list): list of paths to keys that should be removed from\nthe init file\nReturns:\ndict: the spec of the current env", "source": "juraj-google-style"}
{"code": "def get_checklists(self, **query_params):\n    checklists = self.get_checklist_json(self.base_uri, query_params=query_params)\n    checklists_list = []\n    for checklist_json in checklists:\n        checklists_list.append(self.create_checklist(checklist_json))\n    return checklists_list", "docstring": "Get the checklists for this card. Returns a list of Checklist objects.\n\nReturns:\nlist(Checklist): The checklists attached to this card", "source": "codesearchnet"}
{"code": "def update(self, information, timeout=(- 1)):\n    return self._client.update(information, timeout=timeout)", "docstring": "Edit an IPv4 Range.\n\nArgs:\ninformation (dict): Information to update.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Updated IPv4 range.", "source": "codesearchnet"}
{"code": "def serialize_workflow(self):\n    self.workflow.refresh_waiting_tasks()\n    return CompactWorkflowSerializer().serialize_workflow(self.workflow, include_spec=False)", "docstring": "Serializes the current WF.\n\nReturns:\nWF state data.", "source": "codesearchnet"}
{"code": "def with_dtype(self, dtype):\n    dtype = dtypes.as_dtype(dtype)\n    if dtype not in (dtypes.int32, dtypes.int64):\n        raise ValueError('dtype must be int32 or int64')\n    if self.dtype == dtype:\n        return self\n    return RowPartition(row_splits=_cast_if_not_none(self._row_splits, dtype), row_lengths=_cast_if_not_none(self._row_lengths, dtype), value_rowids=_cast_if_not_none(self._value_rowids, dtype), nrows=_cast_if_not_none(self._nrows, dtype), uniform_row_length=_cast_if_not_none(self._uniform_row_length, dtype), internal=_row_partition_factory_key)", "docstring": "Returns a copy of this RowPartition with the given encoding dtype.\n\nArgs:\ndtype: The dtype for encoding tensors, such as `row_splits` and `nrows`.\nOne of `tf.int32` or `tf.int64`.\n\nReturns:\nA copy of this RowPartition, with the encoding tensors cast to the given\ntype.", "source": "github-repos"}
{"code": "def is_predecessor_of_other(self, predecessor, others):\n    return any(((predecessor in self._predecessors_by_id[o]) for o in others))", "docstring": "Returns whether the predecessor is a predecessor or a predecessor\nof a predecessor...of any of the others.\n\nArgs:\npredecessor (str): The txn id of the predecessor.\nothers (list(str)): The txn id of the successor.\n\nReturns:\n(bool)", "source": "codesearchnet"}
{"code": "def inspect(obj: object) -> None:\n    root = nodes.Node.from_obj(obj)\n    html_content = IPython.display.HTML(f'\\n      {resource_utils.resource_import('theme.css')}\\n      {pyjs_com.js_import()}\\n      {resource_utils.resource_import('main.js')}\\n\\n      {main_inspect_html(root)}\\n      <script>\\n        load_content(\"{root.id}\");\\n      </script>\\n      ')\n    IPython.display.display(html_content)", "docstring": "Inspect all attributes of a Python object interactivelly.\n\nArgs:\nobj: Any object to inspect (module, class, dict,...).", "source": "github-repos"}
{"code": "def recompose(src: Path, target_file: Path):\n    (mission_folder, assets_folder) = NewMiz._get_subfolders(src)\n    base_info = ujson.loads(Path(mission_folder, 'base_info.json').read_text(encoding=ENCODING))\n    version = base_info['__version__']\n    with Miz(target_file) as miz:\n        LOGGER.info('re-composing mission table from folder: \"%s\"', mission_folder)\n        miz.mission.d = NewMiz._recreate_dict_from_folder(mission_folder, version)\n        for item in assets_folder.iterdir():\n            target = Path(miz.temp_dir, item.name).absolute()\n            if item.is_dir():\n                if target.exists():\n                    shutil.rmtree(target)\n                shutil.copytree(item.absolute(), target)\n            elif item.is_file():\n                shutil.copy(item.absolute(), target)\n        miz.zip(target_file, encode=False)", "docstring": "Recompose a Miz from json object\n\nArgs:\nsrc: folder containing the json structure\ntarget_file: target Miz file", "source": "codesearchnet"}
{"code": "def get_variables_in_module(module, collection=tf.GraphKeys.TRAINABLE_VARIABLES):\n    return module.get_variables(collection=collection)", "docstring": "Returns tuple of `tf.Variable`s declared inside an `snt.Module`.\n\nNote that this operates by searching the variable scope a module contains,\nand so does not know about any modules which were constructed elsewhere but\nused inside this module.\n\nArgs:\nmodule: `snt.Module` instance to query the scope of.\ncollection: Collection to restrict query to. By default this is\n`tf.Graphkeys.TRAINABLE_VARIABLES`, which doesn't include non-trainable\nvariables such as moving averages.\n\nReturns:\nA tuple of `tf.Variable` objects.\n\nRaises:\nNotConnectedError: If the module is not connected to the Graph.", "source": "codesearchnet"}
{"code": "def put(self, item):\n        \n        if not item:\n            return\n        self._queue.put(item)\n        if self._queue.qsize() >= self._max_queue_length:\n            self.flush()", "docstring": "Adds the passed in item object to the queue and calls :func:`flush` if the size of the queue is larger\nthan :func:`max_queue_length`. This method does nothing if the passed in item is None.\n\nArgs:\nitem (:class:`contracts.Envelope`) item the telemetry envelope object to send to the service.", "source": "juraj-google-style"}
{"code": "def create(cls, name, config=None, kind='spark'):\n    conn = Qubole.agent()\n    return conn.post(cls.rest_entity_path, data={'name': name, 'config': config, 'kind': kind})", "docstring": "Create a new app.\n\nArgs:\n`name`: the name of the app\n\n`config`: a dictionary of key-value pairs\n\n`kind`: kind of the app (default=spark)", "source": "codesearchnet"}
{"code": "def _address_content(self, x):\n    mem_keys = tf.layers.dense(self.mem_vals, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name='mem_key')\n    mem_query = tf.layers.dense(x, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name='mem_query')\n    norm = tf.matmul(self._norm(mem_query), self._norm(mem_keys), transpose_b=True)\n    dot_product = tf.matmul(mem_query, mem_keys, transpose_b=True)\n    cos_dist = tf.div(dot_product, (norm + 1e-07), name='cos_dist')\n    access_logits = (self.sharpen_factor * cos_dist)\n    return access_logits", "docstring": "Address the memory based on content similarity.\n\nArgs:\nx: a tensor in the shape of [batch_size, length, depth].\nReturns:\nthe logits for each memory entry [batch_size, length, memory_size].", "source": "codesearchnet"}
{"code": "def set_mtu(self, name, value=None, default=False, disable=False):\n    if (value is not None):\n        value = int(value)\n        if (not (68 <= value <= 65535)):\n            raise ValueError('invalid mtu value')\n    commands = [('interface %s' % name)]\n    commands.append(self.command_builder('mtu', value=value, default=default, disable=disable))\n    return self.configure(commands)", "docstring": "Configures the interface IP MTU\n\nArgs:\nname (string): The interface identifier to apply the interface\nconfig to\n\nvalue (integer): The MTU value to set the interface to.  Accepted\nvalues include 68 to 65535\n\ndefault (bool): Configures the mtu parameter to its default\nvalue using the EOS CLI default command\n\ndisable (bool); Negate the mtu parameter value using the EOS\nCLI no command\n\nReturns:\nTrue if the operation succeeds otherwise False.\n\nRaises:\nValueError: If the value for MTU is not an integer value or\noutside of the allowable range", "source": "codesearchnet"}
{"code": "def get_tensor_dependencies(tensor):\n    \n    dependencies = set()\n    dependencies.update(tensor.op.inputs)\n    for sub_op in tensor.op.inputs:\n        dependencies.update(get_tensor_dependencies(sub_op))\n    return dependencies", "docstring": "Utility method to get all dependencies (including placeholders) of a tensor (backwards through the graph).\n\nArgs:\ntensor (tf.Tensor): The input tensor.\n\nReturns: Set of all dependencies (including needed placeholders) for the input tensor.", "source": "juraj-google-style"}
{"code": "def get_forced_variation(self, experiment, user_id):\n    \n\n    forced_variations = experiment.forcedVariations\n    if forced_variations and user_id in forced_variations:\n      variation_key = forced_variations.get(user_id)\n      variation = self.config.get_variation_from_key(experiment.key, variation_key)\n      if variation:\n        self.logger.info('User \"%s\" is forced in variation \"%s\".' % (user_id, variation_key))\n      return variation\n\n    return None", "docstring": "Determine if a user is forced into a variation for the given experiment and return that variation.\n\nArgs:\nexperiment: Object representing the experiment for which user is to be bucketed.\nuser_id: ID for the user.\n\nReturns:\nVariation in which the user with ID user_id is forced into. None if no variation.", "source": "juraj-google-style"}
{"code": "def __init__(self, vocab, unk_token, normalize_text=True):\n    self.vocab = vocab\n    self.unk_token = unk_token\n    self.normalize_text = normalize_text", "docstring": "Constructs a CharacterTokenizer.\n\nArgs:\n**vocab**:\nVocabulary object.\n**unk_token**: str\nA special symbol for out-of-vocabulary token.\n**normalize_text**: (`optional`) boolean (default True)\nWhether to apply unicode normalization to text before tokenization.", "source": "github-repos"}
{"code": "def rectify_acquaintance_strategy(\n        circuit: circuits.Circuit,\n        acquaint_first: bool=True\n        ) -> None:\n    \n\n    if not is_acquaintance_strategy(circuit):\n        raise TypeError('not is_acquaintance_strategy(circuit)')\n\n    rectified_moments = []\n    for moment in circuit:\n        gate_type_to_ops = collections.defaultdict(list\n                ) \n        for op in moment.operations:\n            gate_type_to_ops[isinstance(op.gate, AcquaintanceOpportunityGate)\n                    ].append(op)\n        if len(gate_type_to_ops) == 1:\n            rectified_moments.append(moment)\n            continue\n        for acquaint_first in sorted(gate_type_to_ops.keys(),\n                                     reverse=acquaint_first):\n            rectified_moments.append(\n                    ops.Moment(gate_type_to_ops[acquaint_first]))\n    circuit._moments = rectified_moments", "docstring": "Splits moments so that they contain either only acquaintance gates\nor only permutation gates. Orders resulting moments so that the first one\nis of the same type as the previous one.\n\nArgs:\ncircuit: The acquaintance strategy to rectify.\nacquaint_first: Whether to make acquaintance moment first in when\nsplitting the first mixed moment.", "source": "juraj-google-style"}
{"code": "def register_entity(self, entity_value, entity_type, alias_of=None, domain=0):\n        \n        if domain not in self.domains:\n            self.register_domain(domain=domain)\n        self.domains[domain].register_entity(entity_value=entity_value,\n                                             entity_type=entity_type,\n                                             alias_of=alias_of)", "docstring": "Register an entity to be tagged in potential parse results.\n\nArgs:\nentity_value(str): the value/proper name of an entity instance\n(Ex: \"The Big Bang Theory\")\nentity_type(str): the type/tag of an entity instance (Ex: \"Television Show\")\ndomain(str): a string representing the domain you wish to add the entity to", "source": "juraj-google-style"}
{"code": "class ActivityRegularization(Layer):\n\n    def __init__(self, l1=0.0, l2=0.0, **kwargs):\n        super(ActivityRegularization, self).__init__(activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs)\n        self.supports_masking = True\n        self.l1 = l1\n        self.l2 = l2\n\n    def compute_output_shape(self, input_shape):\n        return input_shape\n\n    def get_config(self):\n        config = {'l1': self.l1, 'l2': self.l2}\n        base_config = super(ActivityRegularization, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))", "docstring": "Layer that applies an update to the cost function based input activity.\n\nArgs:\nl1: L1 regularization factor (positive float).\nl2: L2 regularization factor (positive float).\n\nInput shape:\nArbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model.\n\nOutput shape:\nSame shape as input.", "source": "github-repos"}
{"code": "def SvelteComponent(name, path):\n    if (path[(- 3):] == '.js'):\n        js_path = path\n    elif (path[(- 5):] == '.html'):\n        print('Trying to build svelte component from html...')\n        js_path = build_svelte(path)\n    js_content = read(js_path, mode='r')\n\n    def inner(data):\n        id_str = js_id(name)\n        html = _template.replace('$js', js_content).replace('$name', name).replace('$data', json.dumps(data)).replace('$id', id_str)\n        _display_html(html)\n    return inner", "docstring": "Display svelte components in iPython.\n\nArgs:\nname: name of svelte component (must match component filename when built)\npath: path to compile svelte .js file or source svelte .html file.\n(If html file, we try to call svelte and build the file.)\n\nReturns:\nA function mapping data to a rendered svelte component in ipython.", "source": "codesearchnet"}
{"code": "def get_path(url):\n    url = urlsplit(url)\n    path = url.path\n    if url.query:\n        path += '?{}'.format(url.query)\n    return path", "docstring": "Get the path from a given url, including the querystring.\n\nArgs:\nurl (str)\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def clips(self, **kwargs):\n    path = self._get_id_path('clips')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get related clips and trailers for a movie specified by id\nfrom the API.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def parse(self, line):\n    tree = list(self.parser.raw_parse(line))[0]\n    tree = tree[0]\n    return tree", "docstring": "Returns tree objects from a sentence\n\nArgs:\nline: Sentence to be parsed into a tree\n\nReturns:\nTree object representing parsed sentence\nNone if parse fails", "source": "codesearchnet"}
{"code": "def splitext(path):\n    (parent_path, pathname) = split(path)\n    if (pathname.startswith('.') and (pathname.count('.') == 1)):\n        return (path, '')\n    if ('.' not in pathname):\n        return (path, '')\n    (pathname, ext) = pathname.rsplit('.', 1)\n    path = join(parent_path, pathname)\n    return (path, ('.' + ext))", "docstring": "Split the extension from the path.\n\nArguments:\npath (str): A path to split.\n\nReturns:\n(str, str): A tuple containing the path and the extension.\n\nExample:\n>>> splitext('baz.txt')\n('baz', '.txt')\n>>> splitext('foo/bar/baz.txt')\n('foo/bar/baz', '.txt')\n>>> splitext('foo/bar/.foo')\n('foo/bar/.foo', '')", "source": "codesearchnet"}
{"code": "def fill_wildcards(self, field=None, value=0):\n        \n        if field in [None, 'wildcards'] or isinstance(value, Pad):\n            return\n\n        default_value = getattr(Match, field)\n        if isinstance(default_value, IPAddress):\n            if field == 'nw_dst':\n                shift = FlowWildCards.OFPFW_NW_DST_SHIFT\n                base_mask = FlowWildCards.OFPFW_NW_DST_MASK\n            else:\n                shift = FlowWildCards.OFPFW_NW_SRC_SHIFT\n                base_mask = FlowWildCards.OFPFW_NW_SRC_MASK\n\n            \n            \n            \n            self.wildcards &= FlowWildCards.OFPFW_ALL ^ base_mask\n\n            \n            \n            \n            \n            \n            wildcard = (value.max_prefix - value.netmask) << shift\n            self.wildcards |= wildcard\n        else:\n            wildcard_field = \"OFPFW_{}\".format(field.upper())\n            wildcard = getattr(FlowWildCards, wildcard_field)\n\n            if value == default_value and not (self.wildcards & wildcard) or \\\n               value != default_value and (self.wildcards & wildcard):\n                self.wildcards ^= wildcard", "docstring": "Update wildcards attribute.\n\nThis method update a wildcards considering the attributes of the\ncurrent instance.\n\nArgs:\nfield (str): Name of the updated field.\nvalue (GenericType): New value used in the field.", "source": "juraj-google-style"}
{"code": "def size(self, name=None):\n    if name is None:\n        name = '%s_size' % self._name\n    return gen_data_flow_ops.stage_size(name=name, shared_name=self._name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit)", "docstring": "Returns the number of elements in the staging area.\n\nArgs:\nname: A name for the operation (optional)\n\nReturns:\nThe created op", "source": "github-repos"}
{"code": "def report_fhir_path_error(self, element_path: str, fhir_path_constraint: str, msg: str) -> None:", "docstring": "Reports a FHIRPath constraint error during validation and/or encoding.\n\nThe base implementation logs to the `error` context and raises `e` by\ndefault. Subclasses should override this behavior as necessary.\n\nArgs:\nelement_path: The path to the field that the constraint is defined on.\nfhir_path_constraint: The FHIRPath constraint expression.\nmsg: The error message produced.", "source": "github-repos"}
{"code": "def result_to_dict(raw_result):\n    result = {}\n    for (channel_index, channel) in enumerate(raw_result):\n        (channel_id, channel_name) = (channel[0], channel[1])\n        channel_result = {'id': channel_id, 'name': channel_name, 'movies': []}\n        for movie in channel[2]:\n            channel_result['movies'].append({'title': movie[1], 'start_time': datetime.fromtimestamp(movie[2]), 'end_time': datetime.fromtimestamp((movie[2] + movie[3])), 'inf': (True if movie[3] else False)})\n        result[channel_id] = channel_result\n    return result", "docstring": "Parse raw result from fetcher into readable dictionary\n\nArgs:\nraw_result (dict) - raw data from `fetcher`\n\nReturns:\ndict - readable dictionary", "source": "codesearchnet"}
{"code": "def filter_by_moys(self, moys):\n        \n        t_s = 60 / self.header.analysis_period.timestep\n        st_ind = self.header.analysis_period.st_time.moy / t_s\n        if self.header.analysis_period.is_reversed is False:\n            _filt_indices = [int(moy / t_s - st_ind) for moy in moys]\n        else:\n            if self.header.analysis_period.is_leap_year is False:\n                eoy_ind = 8759 * self.header.analysis_period.timestep - st_ind\n            else:\n                eoy_ind = 8783 * self.header.analysis_period.timestep - st_ind\n            _filt_indices = []\n            for moy in moys:\n                ind = moy / t_s\n                if ind > st_ind:\n                    _filt_indices.append(int(ind - st_ind))\n                else:\n                    _filt_indices.append(int(ind + eoy_ind))\n\n        _filt_values = [self._values[i] for i in _filt_indices]\n        _filt_datetimes = [self.datetimes[i] for i in _filt_indices]\n        _filt_header = self.header.duplicate()\n        coll = HourlyDiscontinuousCollection(_filt_header, _filt_values, _filt_datetimes)\n        coll._validated_a_period = True\n        return coll", "docstring": "Filter the Data Collection based on a list of minutes of the year.\n\nArgs:\nmoys: A List of minutes of the year [0..8759 * 60]\n\nReturn:\nA new Data Collection with filtered data", "source": "juraj-google-style"}
{"code": "def CsvToTable(self, buf, header=True, separator=','):\n    self.Reset()\n    header_row = self.row_class()\n    if header:\n        line = buf.readline()\n        header_str = ''\n        while (not header_str):\n            header_str = line.split('\n            if (not header_str):\n                line = buf.readline()\n        header_list = header_str.split(separator)\n        header_length = len(header_list)\n        for entry in header_list:\n            entry = entry.strip()\n            if (entry in header_row):\n                raise TableError(('Duplicate header entry %r.' % entry))\n            header_row[entry] = entry\n        header_row.row = 0\n        self._table[0] = header_row\n    for line in buf:\n        if line.startswith('\n            continue\n        lst = line.split(separator)\n        lst = [l.strip() for l in lst]\n        if (header and (len(lst) != header_length)):\n            continue\n        if (not header):\n            header_row = self.row_class()\n            header_length = len(lst)\n            header_row.values = dict(zip(range(header_length), range(header_length)))\n            self._table[0] = header_row\n            header = True\n            continue\n        new_row = self.NewRow()\n        new_row.values = lst\n        header_row.row = (self.size + 1)\n        self._table.append(new_row)\n    return self.size", "docstring": "Parses buffer into tabular format.\n\nStrips off comments (preceded by '#').\nOptionally parses and indexes by first line (header).\n\nArgs:\nbuf: String file buffer containing CSV data.\nheader: Is the first line of buffer a header.\nseparator: String that CSV is separated by.\n\nReturns:\nint, the size of the table created.\n\nRaises:\nTableError: A parsing error occurred.", "source": "codesearchnet"}
{"code": "def are_equivalent_xml(a_xml, b_xml):\n    return are_equivalent_pyxb(d1_common.xml.deserialize(a_xml), d1_common.xml.deserialize(b_xml))", "docstring": "Check if two ReplicationPolicy XML docs are semantically equivalent.\n\nThe ReplicationPolicy XML docs are normalized before comparison.\n\nArgs:\na_xml, b_xml: ReplicationPolicy XML docs to compare\n\nReturns:\nbool: ``True`` if the resulting policies for the two objects are semantically\nequivalent.", "source": "codesearchnet"}
{"code": "def add_prefix(self, ns_uri, prefix, set_as_preferred=False):\n    assert prefix\n    ni = self.__lookup_uri(ns_uri)\n    self.__check_prefix_conflict(ni, prefix)\n    ni.prefixes.add(prefix)\n    self.__prefix_map[prefix] = ni\n    if set_as_preferred:\n        ni.preferred_prefix = prefix", "docstring": "Adds prefix for the given namespace URI.  The namespace must already\nexist in this set.  If set_as_preferred is True, also set this\nnamespace as the preferred one.\n\n``prefix`` must be non-None; a default preference can't be set this way.\nSee :meth:`set_preferred_prefix_for_namespace` for that.\n\nArgs:\nns_uri (str): The namespace URI to add the prefix to\nprefix (str): The prefix to add (not None)\nset_as_preferred (bool): Whether to set the new prefix as preferred\n\nRaises:\nNamespaceNotFoundError: If namespace ``ns_uri`` isn't in this set", "source": "codesearchnet"}
{"code": "def offsets(self, group=None):\n    if (not group):\n        return {'fetch': self.offsets('fetch'), 'commit': self.offsets('commit'), 'task_done': self.offsets('task_done'), 'highwater': self.offsets('highwater')}\n    else:\n        return dict(deepcopy(getattr(self._offsets, group)))", "docstring": "Get internal consumer offset values\n\nKeyword Arguments:\ngroup: Either \"fetch\", \"commit\", \"task_done\", or \"highwater\".\nIf no group specified, returns all groups.\n\nReturns:\nA copy of internal offsets struct", "source": "codesearchnet"}
{"code": "def _get_dtype_and_weakness(x):\n    if isinstance(x, weak_tensor.WeakTensor):\n        return (x.dtype, True)\n    if isinstance(x, dtypes.DType):\n        return (x, False)\n    tf_dtype = getattr(x, 'dtype', None)\n    if isinstance(tf_dtype, dtypes.DType):\n        return (tf_dtype, False)\n    if isinstance(x, (np.ndarray, np.generic)) or isinstance(tf_dtype, np.dtype):\n        infer_dtype = dtypes.as_dtype(tf_dtype)\n        return (infer_dtype, False)\n    if isinstance(x, (bytes, str)) or tf_dtype in _all_str_dtypes:\n        return _str\n    try:\n        if x in _NP_TO_TF:\n            return (_NP_TO_TF[x], False)\n    except TypeError:\n        pass\n    if isinstance(x, bool) or x == bool:\n        return _b8\n    if isinstance(x, _pi):\n        if x < np.iinfo(np.int32).min or x > np.iinfo(np.int32).max:\n            raise OverflowError(f'Python int {x} too large to convert to np.int32')\n        return _i32w\n    if x == int:\n        return _i32w\n    if isinstance(x, _pf) or x == float:\n        return _f32w\n    if isinstance(x, _pc) or x == complex:\n        return _c128w\n    if isinstance(x, tensor_shape.TensorShape):\n        return _i32\n    if isinstance(x, np.dtype):\n        try:\n            np_dtype = dtypes.as_dtype(x)\n            return (np_dtype, False)\n        except TypeError as exc:\n            raise NotImplementedError(f'Auto dtype conversion semantics does not support {x}. Try using a NumPy built-in dtype objects or cast them explicitly.') from exc\n    raise NotImplementedError(f'Auto dtype conversion semantics does not support {type(x)} type.')", "docstring": "Returns a TF type and weak type information from x.\n\nArgs:\nx: an input scalar, array or a NumPy/TF/Python dtype.\n\nRaises:\nOverflowError: if Python int x is too large to convert to int32.\nNotImplementedError: when x is an unsupported input type.\n\nReturns:\nTF type and weak type information inferred from x in the form of\n(dtype, bool).", "source": "github-repos"}
{"code": "def napoleon_to_sphinx(docstring, **config_params):\n    if ('napoleon_use_param' not in config_params):\n        config_params['napoleon_use_param'] = False\n    if ('napoleon_use_rtype' not in config_params):\n        config_params['napoleon_use_rtype'] = False\n    config = Config(**config_params)\n    return str(GoogleDocstring(docstring, config))", "docstring": "Convert napoleon docstring to plain sphinx string.\n\nArgs:\ndocstring (str): Docstring in napoleon format.\n**config_params (dict): Whatever napoleon doc configuration you want.\n\nReturns:\nstr: Sphinx string.", "source": "codesearchnet"}
{"code": "def enter_cond_section(self, section_id):\n    assert section_id not in self.cond_entry\n    assert section_id not in self.cond_leaves\n    self.cond_leaves[section_id] = []", "docstring": "Enters a conditional section.\n\nConditional sections define an entry node, and one or more branches.\n\nArgs:\nsection_id: Hashable, the same node that will be used in calls to the\nsection_id arg passed to new_cond_branch", "source": "github-repos"}
{"code": "def run(self, text):\n    for regex in self.regexes:\n        text = regex.sub(self.repl, text)\n    return text", "docstring": "Run each regex substitution on ``text``.\n\nArgs:\ntext (string): the input text.\n\nReturns:\nstring: text after all substitutions have been sequentially\napplied.", "source": "codesearchnet"}
{"code": "def emit_tid(self, name, pid, tid):\n    event = {}\n    event['name'] = 'thread_name'\n    event['ph'] = 'M'\n    event['pid'] = pid\n    event['tid'] = tid\n    event['args'] = {'name': name}\n    self._metadata.append(event)", "docstring": "Adds a thread metadata event to the trace.\n\nArgs:\nname:  The thread name as a string.\npid:  Identifier of the process as an integer.\ntid:  Identifier of the thread as an integer.", "source": "github-repos"}
{"code": "def make_df_from_batch(batch_name, batch_col='b01', reader=None, reader_label=None):\n    batch_name = batch_name\n    batch_col = batch_col\n    logger.debug(f'batch_name, batch_col: {batch_name}, {batch_col}')\n    if (reader is None):\n        reader_obj = get_db_reader(reader_label)\n        reader = reader_obj()\n    srnos = reader.select_batch(batch_name, batch_col)\n    logger.debug(('srnos:' + str(srnos)))\n    info_dict = _create_info_dict(reader, srnos)\n    info_df = pd.DataFrame(info_dict)\n    info_df = info_df.sort_values(['groups', 'filenames'])\n    info_df = _make_unique_groups(info_df)\n    info_df['labels'] = info_df['filenames'].apply(create_labels)\n    info_df.set_index('filenames', inplace=True)\n    return info_df", "docstring": "Create a pandas DataFrame with the info needed for ``cellpy`` to load\nthe runs.\n\nArgs:\nbatch_name (str): Name of the batch.\nbatch_col (str): The column where the batch name is in the db.\nreader (method): the db-loader method.\nreader_label (str): the label for the db-loader (if db-loader method is\nnot given)\n\nReturns: info_df (pandas DataFrame)", "source": "codesearchnet"}
{"code": "def from_file(cls, filename):\n    with open(filename, 'r') as stream:\n        data = yaml.load(stream, Loader=yaml.SafeLoader)\n        notes = data.get('notes')\n        v_type = data.get('type')\n        track = data.get('track')\n        xargs = {}\n        if track:\n            if (type(track) is str):\n                track = [track]\n            xargs['track'] = track\n        vaspmeta = VASPMeta(data['title'], data['description'], data['status'], notes=notes, type=v_type, **xargs)\n    return vaspmeta", "docstring": "Create a VASPMeta object by reading a `vaspmeta.yaml` file\n\nArgs:\nfilename (Str): filename to read in.\n\nReturns:\n(vasppy.VASPMeta): the VASPMeta object", "source": "codesearchnet"}
{"code": "def getsize(self, path):\n        \n        try:\n            file_obj = self.filesystem.resolve(path)\n            if (self.filesystem.ends_with_path_separator(path) and\n                    S_IFMT(file_obj.st_mode) != S_IFDIR):\n                error_nr = (errno.EINVAL if self.filesystem.is_windows_fs\n                            else errno.ENOTDIR)\n                self.filesystem.raise_os_error(error_nr, path)\n            return file_obj.st_size\n        except IOError as exc:\n            raise os.error(exc.errno, exc.strerror)", "docstring": "Return the file object size in bytes.\n\nArgs:\npath:  path to the file object.\n\nReturns:\nfile size in bytes.", "source": "juraj-google-style"}
{"code": "def from_config(cls, config, custom_objects=None):\n    if 'initial_accumulator_value' not in config:\n        config['initial_accumulator_value'] = 0.1\n    if 'lr' in config:\n        config['learning_rate'] = config.pop('lr')\n    return cls(**config)", "docstring": "Creates an optimizer from its config.\n\nThis method is the reverse of `get_config`,\ncapable of instantiating the same optimizer from the config\ndictionary.\n\nArgs:\nconfig: A Python dictionary, typically the output of get_config.\ncustom_objects: A Python dictionary mapping names to additional Python\nobjects used to create this optimizer, such as a function used for a\nhyperparameter.\n\nReturns:\nAn optimizer instance.", "source": "github-repos"}
{"code": "def fit_freq_std_dev(self, training_signal):\n        \n\n        window_length = len(self.window)\n        window_weight = sum(self.window)\n        num_of_windows = len(training_signal) - window_length - 1\n        mean = np.zeros(int(window_length / 2) + 1)\n        pow = np.zeros(int(window_length / 2) + 1)\n        temp = np.zeros(int(window_length / 2) + 1)\n        rfft = np.fft.rfft(training_signal[0:0 + window_length] * self.window)\n        max = np.abs(rfft) / window_weight\n        min = max\n\n        for i in range(0, num_of_windows):\n            rfft = np.fft.rfft(training_signal[i:i + window_length] * self.window)\n            temp = np.abs(rfft) / window_weight\n            max = np.maximum(temp, max)\n            min = np.minimum(temp, min)\n            mean = mean + temp\n            pow = pow + np.power(temp, 2)\n\n        mean = mean / num_of_windows\n        pow = pow / num_of_windows\n        std_dev = np.sqrt(pow - np.power(mean, 2))\n        self.mask_top = mean + self.gain * std_dev\n        self.mask_bottom = np.maximum(mean - self.gain * std_dev,\n                                      np.zeros(int(window_length / 2) + 1))", "docstring": "Defines a spectral mask based on training data using the standard deviation values of\neach frequency component\n\nArgs:\ntraining_signal: Training data", "source": "juraj-google-style"}
{"code": "def GetBudget(self, client_customer_id, budget_id):\n    \n    self.client.SetClientCustomerId(client_customer_id)\n    selector = {\n        'fields': ['BudgetId', 'BudgetName', 'BudgetStatus', 'Amount',\n                   'DeliveryMethod', 'BudgetReferenceCount',\n                   'IsBudgetExplicitlyShared'],\n        'predicates': [\n            {\n                'field': 'BudgetId',\n                'operator': 'EQUALS',\n                'values': [budget_id]\n            }\n        ]\n    }\n    budgets = self.client.GetService('BudgetService').get(selector)\n\n    if int(budgets['totalNumEntries']) > 0:\n      return budgets['entries'][0]\n    else:\n      return None", "docstring": "Return a Budget with the associated budgetId.\n\nArgs:\nclient_customer_id: str Client Customer Id to which the budget belongs.\nbudget_id: str id of the budget we want to examine.\n\nReturns:\nBudget A Budget data object.", "source": "juraj-google-style"}
{"code": "def update_connection_public_key(self, connection_id, public_key):\n    if (connection_id in self._connections):\n        connection_info = self._connections[connection_id]\n        self._connections[connection_id] = ConnectionInfo(connection_info.connection_type, connection_info.connection, connection_info.uri, connection_info.status, public_key)\n    else:\n        LOGGER.debug('Could not update the public key %s for connection_id %s. The connection does not exist.', public_key, connection_id)", "docstring": "Adds the public_key to the connection definition.\n\nArgs:\nconnection_id (str): The identifier for the connection.\npublic_key (str): The public key used to enforce permissions on\nconnections.", "source": "codesearchnet"}
{"code": "def _total_variation_np(self, x_np):\n    dim = len(x_np.shape)\n    if dim == 3:\n        dif1 = x_np[1:, :, :] - x_np[:-1, :, :]\n        dif2 = x_np[:, 1:, :] - x_np[:, :-1, :]\n        sum_axis = None\n    elif dim == 4:\n        dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :]\n        dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :]\n        sum_axis = (1, 2, 3)\n    else:\n        pass\n    tot_var = np.sum(np.abs(dif1), axis=sum_axis) + np.sum(np.abs(dif2), axis=sum_axis)\n    return tot_var", "docstring": "Calculate the total variation of x_np using numpy.\nThis implements the same function as TensorFlow but\nusing numpy instead.\n\nArgs:\nx_np: Numpy array with 3 or 4 dimensions.", "source": "github-repos"}
{"code": "def label_TM_tmhmm_residue_numbers_and_leaflets(tmhmm_seq):\n    \n\n    TM_number_dict = {}\n    T_index = []\n    T_residue = []\n\n    residue_count = 1\n    for residue_label in tmhmm_seq:\n        if residue_label == 'T':\n            T_residue.append(residue_count)\n\n        residue_count = residue_count + 1\n    TM_number_dict.update({'T_residue': T_residue})\n\n    \n    T_residue_list = TM_number_dict['T_residue']\n\n    count = 0\n    max_count = len(T_residue_list) - 1\n    TM_helix_count = 0\n    TM_boundary_dict = {}\n\n    while count <= max_count:\n        \n        if count == 0:\n            TM_start = T_residue_list[count]\n            count = count + 1\n            continue\n        \n        elif count == max_count:\n            TM_end = T_residue_list[count]\n            TM_helix_count = TM_helix_count + 1\n            TM_boundary_dict.update({'TM_helix_' + str(TM_helix_count): [TM_start, TM_end]})\n            break\n        \n        elif T_residue_list[count] != T_residue_list[count + 1] - 1:\n            TM_end = T_residue_list[count]\n            TM_helix_count = TM_helix_count + 1\n            TM_boundary_dict.update({'TM_helix_' + str(TM_helix_count): [TM_start, TM_end]})\n            \n            TM_start = T_residue_list[count + 1]\n        count = count + 1\n    \n    leaflet_dict = {}\n    for leaflet in ['O', 'I']:\n        leaflet_list = []\n        for TM_helix, TM_residues in TM_boundary_dict.items():\n            for residue_num in TM_residues:\n                tmhmm_seq_index = residue_num - 1\n                previous_residue = tmhmm_seq_index - 1\n                next_residue = tmhmm_seq_index + 1\n                \n                if tmhmm_seq[previous_residue] == leaflet or tmhmm_seq[next_residue] == leaflet:\n                    leaflet_list.append(residue_num)\n        leaflet_dict.update({'tmhmm_leaflet_' + leaflet: leaflet_list})\n\n    return TM_boundary_dict, leaflet_dict", "docstring": "Determine the residue numbers of the TM-helix residues that cross the membrane and label them by leaflet.\n\nArgs:\ntmhmm_seq: g.protein.representative_sequence.seq_record.letter_annotations['TM-tmhmm']\n\nReturns:\nleaflet_dict: a dictionary with leaflet_variable : [residue list] where the variable is inside or outside\nTM_boundary dict: outputs a dictionar with : TM helix number : [TM helix residue start , TM helix residue end]\n\nTODO:\nuntested method!", "source": "juraj-google-style"}
{"code": "def fit_circular_gaussian(samples, high=np.pi, low=0):\n    \n    cl_func = SimpleCLFunction.from_string()\n\n    def run_cl(samples):\n        data = {'samples': Array(samples, 'mot_float_type'),\n                'means': Zeros(samples.shape[0], 'mot_float_type'),\n                'stds': Zeros(samples.shape[0], 'mot_float_type'),\n                'nmr_samples': Scalar(samples.shape[1]),\n                'low': Scalar(low),\n                'high': Scalar(high),\n                }\n\n        cl_func.evaluate(data, samples.shape[0])\n        return data['means'].get_data(), data['stds'].get_data()\n\n    if len(samples.shape) == 1:\n        mean, std = run_cl(samples[None, :])\n        return mean[0], std[0]\n    return run_cl(samples)", "docstring": "Compute the circular mean for samples in a range\n\nArgs:\nsamples (ndarray): a one or two dimensional array. If one dimensional we calculate the fit using all\nvalues. If two dimensional, we fit the Gaussian for every set of samples over the first dimension.\nhigh (float): The maximum wrap point\nlow (float): The minimum wrap point", "source": "juraj-google-style"}
{"code": "def modutf7_encode(data: str) -> bytes:\n    ret = bytearray()\n    is_usascii = True\n    encode_start = None\n    for (i, symbol) in enumerate(data):\n        charpoint = ord(symbol)\n        if is_usascii:\n            if (charpoint == 38):\n                ret.extend(b'&-')\n            elif (32 <= charpoint <= 126):\n                ret.append(charpoint)\n            else:\n                encode_start = i\n                is_usascii = False\n        elif (32 <= charpoint <= 126):\n            to_encode = data[encode_start:i]\n            encoded = _modified_b64encode(to_encode)\n            ret.append(38)\n            ret.extend(encoded)\n            ret.extend((45, charpoint))\n            is_usascii = True\n    if (not is_usascii):\n        to_encode = data[encode_start:]\n        encoded = _modified_b64encode(to_encode)\n        ret.append(38)\n        ret.extend(encoded)\n        ret.append(45)\n    return bytes(ret)", "docstring": "Encode the string using modified UTF-7.\n\nArgs:\ndata: The input string to encode.", "source": "codesearchnet"}
{"code": "def get_merged_env(self, include_os=False):\n    env = {}\n    if include_os:\n        env.update(os.environ.copy())\n    for level in range(3):\n        env.update(self.pipeline.data.env_list[level].copy())\n    return env", "docstring": "Copying and merging environment variables.\n\nArgs:\ninclude_os (bool): when true then include the environment variables (default: False)\n\nReturns:\ndict: environment variables as defined in the pipeline\n(optional including system environment variables).", "source": "codesearchnet"}
{"code": "def sync_model(self, comment='', compact_central=False, release_borrowed=True, release_workset=True, save_local=False):\n    self._add_entry(templates.FILE_SYNC_START)\n    if compact_central:\n        self._add_entry(templates.FILE_SYNC_COMPACT)\n    if release_borrowed:\n        self._add_entry(templates.FILE_SYNC_RELEASE_BORROWED)\n    if release_workset:\n        self._add_entry(templates.FILE_SYNC_RELEASE_USERWORKSETS)\n    if save_local:\n        self._add_entry(templates.FILE_SYNC_RELEASE_SAVELOCAL)\n    self._add_entry(templates.FILE_SYNC_COMMENT_OK.format(sync_comment=comment))", "docstring": "Append a sync model entry to the journal.\n\nThis instructs Revit to sync the currently open workshared model.\n\nArgs:\ncomment (str): comment to be provided for the sync step\ncompact_central (bool): if True compacts the central file\nrelease_borrowed (bool): if True releases the borrowed elements\nrelease_workset (bool): if True releases the borrowed worksets\nsave_local (bool): if True saves the local file as well", "source": "codesearchnet"}
{"code": "def paint(self):\n    snippet = {'line-opacity': VectorStyle.get_style_value(self.opacity), 'line-color': VectorStyle.get_style_value(self.color), 'line-width': VectorStyle.get_style_value(self.width)}\n    if self.translate:\n        snippet['line-translate'] = self.translate\n    if self.dasharray:\n        snippet['line-dasharray'] = VectorStyle.get_style_value(self.dasharray)\n    return snippet", "docstring": "Renders a javascript snippet suitable for use as a mapbox-gl line paint entry\n\nReturns:\nA dict that can be converted to a mapbox-gl javascript paint snippet", "source": "codesearchnet"}
{"code": "def get_properties_of_kind(kind, start=None, end=None):\n  \n  q = Property.query(ancestor=Property.key_for_kind(kind))\n  if start is not None and start != '':\n    q = q.filter(Property.key >= Property.key_for_property(kind, start))\n  if end is not None:\n    if end == '':\n      return []\n    q = q.filter(Property.key < Property.key_for_property(kind, end))\n\n  return [Property.key_to_property(k) for k in q.iter(keys_only=True)]", "docstring": "Return all properties of kind in the specified range.\n\nNOTE: This function does not return unindexed properties.\n\nArgs:\nkind: name of kind whose properties you want.\nstart: only return properties >= start if start is not None.\nend: only return properties < end if end is not None.\n\nReturns:\nA list of property names of kind between the (optional) start and end\nvalues.", "source": "juraj-google-style"}
{"code": "def merge(self, obj):\n    if (obj.id in self.cache):\n        self.cache[obj.id].merge(obj)\n    else:\n        self.cache[obj.id] = obj\n    return self.cache[obj.id]", "docstring": "Add a given object to the cache, or update an existing entry to include more fields.\n\nArgs:\nobj (SkypeObj): object to add to the cache", "source": "codesearchnet"}
{"code": "def ContainsAll(self, *values):\n    \n    self._awql = self._CreateMultipleValuesCondition(values, 'CONTAINS_ALL')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"contains all\".\n\nArgs:\n*values: The values to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "juraj-google-style"}
{"code": "def _detect(self):\n    results = []\n    for contract in self.contracts:\n        shadows = self.detect_shadowing_definitions(contract)\n        if shadows:\n            for shadow in shadows:\n                local_parent_name = shadow[1]\n                local_variable = shadow[2]\n                overshadowed = shadow[3]\n                info = '{}.{}.{} (local variable @ {}) shadows:\\n'.format(contract.name, local_parent_name, local_variable.name, local_variable.source_mapping_str)\n                for overshadowed_entry in overshadowed:\n                    info += '\\t- {}.{} ({} @ {})\\n'.format(overshadowed_entry[1], overshadowed_entry[2], overshadowed_entry[0], overshadowed_entry[2].source_mapping_str)\n                json = self.generate_json_result(info)\n                self.add_variable_to_json(local_variable, json)\n                for overshadowed_entry in overshadowed:\n                    if (overshadowed_entry[0] in [self.OVERSHADOWED_FUNCTION, self.OVERSHADOWED_MODIFIER, self.OVERSHADOWED_EVENT]):\n                        self.add_function_to_json(overshadowed_entry[2], json)\n                    elif (overshadowed_entry[0] == self.OVERSHADOWED_STATE_VARIABLE):\n                        self.add_variable_to_json(overshadowed_entry[2], json)\n                results.append(json)\n    return results", "docstring": "Detect shadowing local variables\n\nRecursively visit the calls\nReturns:\nlist: {'vuln', 'filename,'contract','func', 'shadow'}", "source": "codesearchnet"}
{"code": "def CheckCompletedBlocks(self, filename, error):\n    \n    \n    \n    \n    for obj in self.stack:\n      if isinstance(obj, _ClassInfo):\n        error(filename, obj.starting_linenum, 'build/class', 5,\n              'Failed to find complete declaration of class %s' %\n              obj.name)\n      elif isinstance(obj, _NamespaceInfo):\n        error(filename, obj.starting_linenum, 'build/namespaces', 5,\n              'Failed to find complete declaration of namespace %s' %\n              obj.name)", "docstring": "Checks that all classes and namespaces have been completely parsed.\n\nCall this when all lines in a file have been processed.\nArgs:\nfilename: The name of the current file.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def update_asset(self, asset, asset_id, asset_name, asset_type):\n        \n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        if asset == 'PHONE':\n            return self.tc_requests.update_victim_phone_asset(self.unique_id, asset_id, asset_name)\n        if asset == 'EMAIL':\n            return self.tc_requests.update_victim_email_asset(\n                self.unique_id, asset_id, asset_name, asset_type\n            )\n        if asset == 'NETWORK':\n            return self.tc_requests.update_victim_network_asset(\n                self.unique_id, asset_id, asset_name, asset_type\n            )\n        if asset == 'SOCIAL':\n            return self.tc_requests.update_victim_social_asset(\n                self.unique_id, asset_id, asset_name, asset_type\n            )\n        if asset == 'WEB':\n            return self.tc_requests.update_victim_web_asset(self.unique_id, asset_id, asset_name)\n\n        self._tcex.handle_error(\n            925, ['asset_type', 'update_asset', 'asset_type', 'asset_type', asset_type]\n        )\n        return None", "docstring": "Update a asset of a Victim\n\nValid asset_type:\n+ PHONE\n+ EMAIL\n+ NETWORK\n+ SOCIAL\n+ WEB\n\nArgs:\nasset:\nasset_name:\nasset_id:\nasset_type: PHONE, EMAIL, NETWORK, SOCIAL, or WEB\n\nReturns:", "source": "juraj-google-style"}
{"code": "def set_cellpydata(self, cellpydata, cycle):\n        \n        self.data = cellpydata\n        self.step_table = self.data.dataset  \n        time_voltage = self.data.get_ocv(direction='up',\n                                         cycles=cycle)\n        time = time_voltage.Step_Time\n        voltage = time_voltage.Voltage\n\n        self.time = np.array(time)\n        self.voltage = np.array(voltage)", "docstring": "Performing fit of the OCV steps in the cycles set by set_cycles()\nfrom the data set by set_data()\n\nr is found by calculating v0 / i_start --> err(r)= err(v0) + err(i_start).\nc is found from using tau / r --> err(c) = err(r) + err(tau)\n\nArgs:\ncellpydata (CellpyData): data object from cellreader\ncycle (int): cycle number to get from CellpyData object\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def call(self, name, *args, **kwargs):\n    \n    payload = name, args, kwargs\n    self._conn.send((self._CALL, payload))\n    return self._receive", "docstring": "Asynchronously call a method of the external environment.\n\nArgs:\nname: Name of the method to call.\n*args: Positional arguments to forward to the method.\n**kwargs: Keyword arguments to forward to the method.\n\nReturns:\nPromise object that blocks and provides the return value when called.", "source": "juraj-google-style"}
{"code": "def record(self, ekey, entry, diff=False):\n        \n        if ekey not in self.entities:\n            self.entities[ekey] = []\n            \n        \n        if diff and len(self.entities[ekey]) > 0:\n            \n            from acorn.logging.diff import cascade, compress\n            sequence = [e[\"c\"] for e in self.entities[ekey]\n                        if e[\"m\"] == entry[\"m\"]]\n            original = cascade(sequence)\n            difference = compress(original, entry[\"c\"])\n            \n            entry[\"c\"] = difference\n\n        self.entities[ekey].append(entry)\n\n        \n        \n        from uuid import UUID\n        uid = None\n        if entry[\"r\"] is not None:\n            uid = entry[\"r\"]\n        elif isinstance(ekey, str):\n            \n            \n            try:\n                uid = str(UUID(ekey))\n            except ValueError: \n                pass\n\n        if uid is not None and isinstance(uid, str):\n            self.log_uuid(uid)\n\n        \n        \n        if entry[\"a\"] is None:\n            return\n                    \n        for larg in entry[\"a\"][\"_\"]:\n            \n            \n            \n            if not isinstance(larg, str):\n                continue\n            \n            try:\n                uid = str(UUID(larg))\n                self.log_uuid(uid)\n            except ValueError:\n                \n                \n                pass\n\n        \n        for key, karg in entry[\"a\"].items():\n            if key == \"_\" or not isinstance(karg, str):\n                \n                continue\n            try:\n                uid = str(UUID(karg))\n                self.log_uuid(uid)\n            except ValueError:\n                pass", "docstring": "Records the specified entry to the key-value store under the specified\nentity key.\n\nArgs:\nekey (str): fqdn/uuid of the method/object to store the entry for.\nentry (dict): attributes and values gleaned from the execution.\ndiff (bool): when True, the \"c\" element of `entry` will be diffed\nagainst previous entries under the same `ekey` if their method\n(attribute \"m\") matches.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n               map_values,\n               validate_args=False,\n               name='categorical_to_discrete'):\n    \n    with tf.name_scope(name):\n      map_values = tf.convert_to_tensor(value=map_values, name='map_values')\n      assertions = _maybe_check_valid_map_values(map_values, validate_args)\n      if assertions:\n        with tf.control_dependencies(assertions):\n          map_values = tf.identity(map_values)\n      self._map_values = map_values\n      super(CategoricalToDiscrete, self).__init__(\n          graph_parents=[map_values],\n          forward_min_event_ndims=0,\n          is_constant_jacobian=True,\n          validate_args=validate_args,\n          name=name)", "docstring": "Instantiates `CategoricalToDiscrete` bijector.\n\nArgs:\nmap_values: 1D numerical tensor of discrete values to map to, sorted in\nstrictly increasing order.\nvalidate_args: Python `bool` indicating whether arguments should be\nchecked for correctness.\nname: Python `str` name given to ops managed by this object.", "source": "juraj-google-style"}
{"code": "def load(self, filename, bs=512):\n    with open(filename, 'rb') as f:\n        f.seek((GPT_HEADER_OFFSET + 12))\n        header_size = struct.unpack('<I', f.read(4))[0]\n        f.seek(GPT_HEADER_OFFSET)\n        header_data = f.read(header_size)\n        self.header = GPT_HEADER(header_data)\n        if (self.header.signature != GPT_SIGNATURE):\n            raise Exception('Invalid GPT signature')\n        self.__load_partition_entries(f, bs)", "docstring": "Loads GPT partition table.\n\nArgs:\nfilename (str): path to file or device to open for reading\nbs (uint): Block size of the volume, default: 512\n\nRaises:\nIOError: If file does not exist or not readable", "source": "codesearchnet"}
{"code": "def create_stack(self, fqn, template, parameters, tags, force_change_set=False, stack_policy=None, **kwargs):\n    logger.debug('Attempting to create stack %s:.', fqn)\n    logger.debug('    parameters: %s', parameters)\n    logger.debug('    tags: %s', tags)\n    if template.url:\n        logger.debug('    template_url: %s', template.url)\n    else:\n        logger.debug('    no template url, uploading template directly.')\n    if force_change_set:\n        logger.debug('force_change_set set to True, creating stack with changeset.')\n        (_changes, change_set_id) = create_change_set(self.cloudformation, fqn, template, parameters, tags, 'CREATE', service_role=self.service_role, **kwargs)\n        self.cloudformation.execute_change_set(ChangeSetName=change_set_id)\n    else:\n        args = generate_cloudformation_args(fqn, parameters, tags, template, service_role=self.service_role, stack_policy=stack_policy)\n        try:\n            self.cloudformation.create_stack(**args)\n        except botocore.exceptions.ClientError as e:\n            if (e.response['Error']['Message'] == 'TemplateURL must reference a valid S3 object to which you have access.'):\n                s3_fallback(fqn, template, parameters, tags, self.cloudformation.create_stack, self.service_role)\n            else:\n                raise", "docstring": "Create a new Cloudformation stack.\n\nArgs:\nfqn (str): The fully qualified name of the Cloudformation stack.\ntemplate (:class:`stacker.providers.base.Template`): A Template\nobject to use when creating the stack.\nparameters (list): A list of dictionaries that defines the\nparameter list to be applied to the Cloudformation stack.\ntags (list): A list of dictionaries that defines the tags\nthat should be applied to the Cloudformation stack.\nforce_change_set (bool): Whether or not to force change set use.\nstack_policy (:class:`stacker.providers.base.Template`): A template\nobject representing a stack policy.", "source": "codesearchnet"}
{"code": "def parse_conf(self, keys=[]):\n    confs = self.app.config.get('WAFFLE_CONFS', {})\n    if (not keys):\n        keys = confs.keys()\n    result = {}\n    for key in keys:\n        if key.startswith('WAFFLE_'):\n            continue\n        if (key not in confs.keys()):\n            continue\n        stored_conf = self.configstore.get(key)\n        if (not stored_conf):\n            value = confs[key].get('default', '')\n            stored_conf = self.configstore.put(key, util.serialize(value))\n            self.configstore.commit()\n        else:\n            value = util.deserialize(stored_conf.get_value())\n        result[stored_conf.get_key()] = value\n    return result", "docstring": "Parse configuration values from the database.\n\nThe extension must have been previously initialized.\n\nIf a key is not found in the database, it will be created with the\ndefault value specified.\n\nArguments:\nkeys (list[str]): list of keys to parse. If the list is empty, then\nall the keys known to the application will be used.\n\nReturns:\ndict of the parsed config values.", "source": "codesearchnet"}
{"code": "class EarlyStoppingCallback(TrainerCallback, ExportableState):\n\n    def __init__(self, early_stopping_patience: int=1, early_stopping_threshold: Optional[float]=0.0):\n        self.early_stopping_patience = early_stopping_patience\n        self.early_stopping_threshold = early_stopping_threshold\n        self.early_stopping_patience_counter = 0\n\n    def check_metric_value(self, args, state, control, metric_value):\n        operator = np.greater if args.greater_is_better else np.less\n        if state.best_metric is None or (operator(metric_value, state.best_metric) and abs(metric_value - state.best_metric) > self.early_stopping_threshold):\n            self.early_stopping_patience_counter = 0\n        else:\n            self.early_stopping_patience_counter += 1\n\n    def on_train_begin(self, args, state, control, **kwargs):\n        if not args.load_best_model_at_end:\n            logger.warning('Using EarlyStoppingCallback without load_best_model_at_end=True. Once training is finished, the best model will not be loaded automatically.')\n        assert args.metric_for_best_model is not None, 'EarlyStoppingCallback requires metric_for_best_model to be defined'\n        assert args.eval_strategy != IntervalStrategy.NO, 'EarlyStoppingCallback requires IntervalStrategy of steps or epoch'\n\n    def on_evaluate(self, args, state, control, metrics, **kwargs):\n        metric_to_check = args.metric_for_best_model\n        if not metric_to_check.startswith('eval_'):\n            metric_to_check = f'eval_{metric_to_check}'\n        metric_value = metrics.get(metric_to_check)\n        if metric_value is None:\n            logger.warning(f'early stopping required metric_for_best_model, but did not find {metric_to_check} so early stopping is disabled')\n            return\n        self.check_metric_value(args, state, control, metric_value)\n        if self.early_stopping_patience_counter >= self.early_stopping_patience:\n            control.should_training_stop = True\n\n    def state(self) -> dict:\n        return {'args': {'early_stopping_patience': self.early_stopping_patience, 'early_stopping_threshold': self.early_stopping_threshold}, 'attributes': {'early_stopping_patience_counter': self.early_stopping_patience_counter}}", "docstring": "A [`TrainerCallback`] that handles early stopping.\n\nArgs:\nearly_stopping_patience (`int`):\nUse with `metric_for_best_model` to stop training when the specified metric worsens for\n`early_stopping_patience` evaluation calls.\nearly_stopping_threshold(`float`, *optional*):\nUse with TrainingArguments `metric_for_best_model` and `early_stopping_patience` to denote how much the\nspecified metric must improve to satisfy early stopping conditions. `\n\nThis callback depends on [`TrainingArguments`] argument *load_best_model_at_end* functionality to set best_metric\nin [`TrainerState`]. Note that if the [`TrainingArguments`] argument *save_steps* differs from *eval_steps*, the\nearly stopping will not occur until the next save step.", "source": "github-repos"}
{"code": "def connect(self, address, port):\n        \n        self.peeraddress = socket.gethostbyname(address)\n        self.peerport = port\n        self.buffer = buffer.LineBuffer()\n        self.handlers = {}\n        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n        try:\n            self.socket.connect((self.peeraddress, self.peerport))\n        except socket.error as x:\n            raise DCCConnectionError(\"Couldn't connect to socket: %s\" % x)\n        self.connected = True\n        self.reactor._on_connect(self.socket)\n        return self", "docstring": "Connect/reconnect to a DCC peer.\n\nArguments:\naddress -- Host/IP address of the peer.\n\nport -- The port number to connect to.\n\nReturns the DCCConnection object.", "source": "juraj-google-style"}
{"code": "def SetRowsCustomProperties(self, rows, custom_properties):\n    \n    if not hasattr(rows, \"__iter__\"):\n      rows = [rows]\n    for row in rows:\n      self.__data[row] = (self.__data[row][0], custom_properties)", "docstring": "Sets the custom properties for given row(s).\n\nCan accept a single row or an iterable of rows.\nSets the given custom properties for all specified rows.\n\nArgs:\nrows: The row, or rows, to set the custom properties for.\ncustom_properties: A string to string dictionary of custom properties to\nset for all rows.", "source": "juraj-google-style"}
{"code": "def _tidy_names(names, nnames, extra_names=None):\n    if ((len(names) < nnames) and (extra_names is not None)):\n        names.extend(extra_names)\n    names.extend(range((nnames - len(names))))\n    del names[nnames:]", "docstring": "Truncate or extend names so that its len is nnames.\n\nThe list is modified, this function returns nothing.\n\nArgs:\nnames (list): list of names.\nnnames (int): desired number of names.\nextra_names (list of str): list of names to be used to extend the list\nif needed. If this list isn't provided, a range is used instead.", "source": "codesearchnet"}
{"code": "def append(self, instruction, qargs=None, cargs=None):\n    qargs = (qargs or [])\n    cargs = (cargs or [])\n    if ((not isinstance(instruction, Instruction)) and hasattr(instruction, 'to_instruction')):\n        instruction = instruction.to_instruction()\n    if (not isinstance(instruction, Instruction)):\n        raise QiskitError('object is not an Instruction.')\n    self._check_dups(qargs)\n    self._check_qargs(qargs)\n    self._check_cargs(cargs)\n    if ((instruction.num_qubits != len(qargs)) or (instruction.num_clbits != len(cargs))):\n        raise QiskitError(('instruction %s with %d qubits and %d clbits cannot be appended onto %d qubits and %d clbits.' % (instruction.name, instruction.num_qubits, instruction.num_clbits, len(qargs), len(cargs))))\n    instruction_context = (instruction, qargs, cargs)\n    self.data.append(instruction_context)\n    for (param_index, param) in enumerate(instruction.params):\n        if isinstance(param, Parameter):\n            current_symbols = self.parameters\n            if (param in current_symbols):\n                self._parameter_table[param].append((instruction, param_index))\n            else:\n                self._parameter_table[param] = [(instruction, param_index)]\n    return instruction", "docstring": "Append an instruction to the end of the circuit, modifying\nthe circuit in place.\n\nArgs:\ninstruction (Instruction or Operator): Instruction instance to append\nqargs (list(tuple)): qubits to attach instruction to\ncargs (list(tuple)): clbits to attach instruction to\n\nReturns:\nInstruction: a handle to the instruction that was just added\n\nRaises:\nQiskitError: if the gate is of a different shape than the wires\nit is being attached to.", "source": "codesearchnet"}
{"code": "def from_petrel(cls, filename, stop=None, points=False, null=None, function=None, include=None, exclude=None, remap=None, ignore=None):\n    result = utils.read_petrel(filename, function=function, remap=remap)\n    data = cls._clean_longitudinal_data(result, null=null)\n    list_of_Intervals = cls._build_list_of_Intervals(data, stop=stop, points=points, include=include, exclude=exclude, ignore=ignore)\n    if list_of_Intervals:\n        return cls(list_of_Intervals)\n    return None", "docstring": "Makes a striplog from a Petrel text file.\n\nReturns:\nstriplog.", "source": "codesearchnet"}
{"code": "def disconnect_sync(self, conn_id):\n        \n\n        done = threading.Event()\n        result = {}\n\n        def disconnect_done(conn_id, adapter_id, status, reason):\n            result['success'] = status\n            result['failure_reason'] = reason\n            done.set()\n\n        self.disconnect_async(conn_id, disconnect_done)\n        done.wait()\n\n        return result", "docstring": "Synchronously disconnect from a connected device\n\nArgs:\nconn_id (int): A unique identifier that will refer to this connection\n\nReturns:\ndict: A dictionary with two elements\n'success': a bool with the result of the connection attempt\n'failure_reason': a string with the reason for the failure if we failed", "source": "juraj-google-style"}
{"code": "def distribute_equally(daily_data, divide=False):\n    \n\n    index = hourly_index(daily_data.index)\n    hourly_data = daily_data.reindex(index)\n    hourly_data = hourly_data.groupby(hourly_data.index.day).transform(\n        lambda x: x.fillna(method='ffill', limit=23))\n\n    if divide:\n        hourly_data /= 24\n\n    return hourly_data", "docstring": "Obtains hourly values by equally distributing the daily values.\n\nArgs:\ndaily_data: daily values\ndivide: if True, divide resulting values by the number of hours in\norder to preserve the daily sum (required e.g. for precipitation).\n\nReturns:\nEqually distributed hourly values.", "source": "juraj-google-style"}
{"code": "def Convert(self, metadata, stat_entry, token=None):\n    return self.BatchConvert([(metadata, stat_entry)], token=token)", "docstring": "Converts StatEntry to ExportedFile.\n\nDoes nothing if StatEntry corresponds to a registry entry and not to a file.\n\nArgs:\nmetadata: ExportedMetadata to be used for conversion.\nstat_entry: StatEntry to be converted.\ntoken: Security token.\n\nReturns:\nList or generator with resulting RDFValues. Empty list if StatEntry\ncorresponds to a registry entry and not to a file.", "source": "codesearchnet"}
{"code": "def ascii_tree(node, get_children, get_description=None):\n    out = io.StringIO()\n    _ascii_tree(out, node, '', '', set(), get_children, get_description)\n    return out.getvalue()", "docstring": "Draw a graph, starting at a given position.\n\nArgs:\nnode: The node from where to draw.\nget_children: The function to call to retrieve children.\nget_description: Optional. A function to call to describe a node.\n\nReturns:\nA string.", "source": "github-repos"}
{"code": "def enable_auto_login(name, password):\n    \n    \n    cmd = ['defaults',\n           'write',\n           '/Library/Preferences/com.apple.loginwindow.plist',\n           'autoLoginUser',\n           name]\n    __salt__['cmd.run'](cmd)\n    current = get_auto_login()\n\n    \n    o_password = _kcpassword(password=password)\n    with salt.utils.files.set_umask(0o077):\n        with salt.utils.files.fopen('/etc/kcpassword', 'w' if six.PY2 else 'wb') as fd:\n            fd.write(o_password)\n\n    return current if isinstance(current, bool) else current.lower() == name.lower()", "docstring": ".. versionadded:: 2016.3.0\n\nConfigures the machine to auto login with the specified user\n\nArgs:\n\nname (str): The user account use for auto login\n\npassword (str): The password to user for auto login\n\n.. versionadded:: 2017.7.3\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' user.enable_auto_login stevej", "source": "juraj-google-style"}
{"code": "def gather_nd(self, indices, name=None):\n    raise AttributeError", "docstring": "Gather slices from `params` into a Tensor with shape specified by `indices`.\n\nSee tf.gather_nd for details.\n\nArgs:\nindices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\nIndex tensor.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor`. Has the same type as `params`.", "source": "github-repos"}
{"code": "def _retrieve_variables_impl(config: Text, hosts: List[Tuple[int, Text]], variables: Dict[Text, Dict[Text, tf_variables.Variable]], table_config: tpu_embedding_v2_utils.TableConfig):\n    for host_id, host in enumerate(hosts):\n        with ops.device(host):\n            for table in table_config:\n                retrieved = table.optimizer._retrieve()(table_name=table.name, num_shards=len(hosts), shard_id=host_id, config=config)\n                if not isinstance(retrieved, tuple):\n                    retrieved = (retrieved,)\n                for i, slot in enumerate(['parameters'] + table.optimizer._slot_names()):\n                    sharded_var = variables[table.name][slot]\n                    if host_id < len(sharded_var.variables):\n                        sharded_var.variables[host_id].assign(retrieved[i])\n                config = None", "docstring": "Retrieve embedding tables from TPU to host memory.\n\nArgs:\nconfig: A serialized TPUEmbeddingConfiguration proto.\nhosts: A list of all the host CPU devices.\nvariables: A dictionary of dictionaries of TPUEmbeddingVariables. First key\nis the table name, second key is 'parameters' or the optimizer slot name.\ntable_config: A list of tf.tpu.experimental.embedding.TableConfig objects.", "source": "github-repos"}
{"code": "def __init__(self, experimenter=None, data=None):\n        \n        super().__init__()\n        self.experimenter = experimenter\n        self.data = data", "docstring": "Create a QueuePropExperimenter with the optional parameters below.\n\nArgs:\nexperimenter (int): Experimenter ID which takes the same form as in\nstruct ofp_experimenter_header.\ndata (bytes): Experimenter defined data.", "source": "juraj-google-style"}
{"code": "def subset(self, ns_uris):\n        \n        sub_ns = NamespaceSet()\n\n        for ns_uri in ns_uris:\n            ni = self.__lookup_uri(ns_uri)\n            new_ni = copy.deepcopy(ni)\n\n            \n            \n            \n            \n            sub_ns._NamespaceSet__add_namespaceinfo(new_ni)\n\n        return sub_ns", "docstring": "Return a subset of this NamespaceSet containing only data for the\ngiven namespaces.\n\nArgs:\nns_uris (iterable): An iterable of namespace URIs which select the\nnamespaces for the subset.\n\nReturns:\nThe subset\n\nRaises:\nNamespaceNotFoundError: If any namespace URIs in `ns_uris` don't\nmatch any namespaces in this set.", "source": "juraj-google-style"}
{"code": "def Head(num_classes=1000, classifier_activation=None, name=None):\n    if name is None:\n        name = str(backend.get_uid('head'))\n\n    def apply(x):\n        x = layers.GlobalAveragePooling2D(name=name + '_head_gap')(x)\n        x = layers.LayerNormalization(epsilon=1e-06, name=name + '_head_layernorm')(x)\n        x = layers.Dense(num_classes, activation=classifier_activation, name=name + '_head_dense')(x)\n        return x\n    return apply", "docstring": "Implementation of classification head of ConvNeXt.\n\nArgs:\nnum_classes: number of classes for Dense layer\nclassifier_activation: activation function for the Dense layer\nname: name prefix\n\nReturns:\nClassification head function.", "source": "github-repos"}
{"code": "def main(argv=None):\n    if argv is None:\n        argv = sys.argv\n    args = parse_args(argv)\n    logging.basicConfig(level=50 - args.verbosity * 10)\n    backup = args.backup or None\n    verbose = args.verbosity > 0\n    changed, errors = merge_pyi.merge_tree(py_path=args.py, pyi_path=args.pyi, backup=backup, verbose=verbose)\n    if changed:\n        print()\n        print('Changed files:')\n        for f in changed:\n            print('  ', f)\n    if errors:\n        print()\n        print('Errors:')\n        for f, err in errors:\n            print()\n            print('File: ', f, err)", "docstring": "Merge source files and a pyi files in a project tree.\n\nArgs:\nargv: Flags and files to process.", "source": "github-repos"}
{"code": "def write(self, face, data, viewport=None, *, alignment=1) -> None:\n        \n\n        if type(data) is Buffer:\n            data = data.mglo\n\n        self.mglo.write(face, data, viewport, alignment)", "docstring": "Update the content of the texture.\n\nArgs:\nface (int): The face to update.\ndata (bytes): The pixel data.\nviewport (tuple): The viewport.\n\nKeyword Args:\nalignment (int): The byte alignment of the pixels.", "source": "juraj-google-style"}
{"code": "def get_items(self, page=1, order_by=None, filters=None):\n    start = ((page - 1) * self.per_page)\n    query = self.get_query()\n    if (order_by is not None):\n        query = query.order_by(self._get_field(order_by))\n    if (filters is not None):\n        query = self._filter(query, filters)\n    return (query.offset(start).limit(self.per_page), self.count(query))", "docstring": "Fetch database for items matching.\n\nArgs:\npage (int):\nwhich page will be sliced\nslice size is ``self.per_page``.\norder_by (str):\na field name to order query by.\nfilters (dict):\na ``filter name``: ``value`` dict.\n\nReturns:\ntuple with:\nitems, sliced by page*self.per_page\ntotal items without slice", "source": "codesearchnet"}
{"code": "def find_contacts(self, geoms_1, geoms_2):\n        \n        for contact in self.sim.data.contact[0 : self.sim.data.ncon]:\n            \n            c1_in_g1 = self.sim.model.geom_id2name(contact.geom1) in geoms_1\n            c2_in_g2 = self.sim.model.geom_id2name(contact.geom2) in geoms_2\n            \n            c2_in_g1 = self.sim.model.geom_id2name(contact.geom2) in geoms_1\n            c1_in_g2 = self.sim.model.geom_id2name(contact.geom1) in geoms_2\n            if (c1_in_g1 and c2_in_g2) or (c1_in_g2 and c2_in_g1):\n                yield contact", "docstring": "Finds contact between two geom groups.\n\nArgs:\ngeoms_1: a list of geom names (string)\ngeoms_2: another list of geom names (string)\n\nReturns:\niterator of all contacts between @geoms_1 and @geoms_2", "source": "juraj-google-style"}
{"code": "def from_json_keyfile_name(cls, filename, scopes='', token_uri=None, revoke_uri=None):\n    with open(filename, 'r') as file_obj:\n        client_credentials = json.load(file_obj)\n    return cls._from_parsed_json_keyfile(client_credentials, scopes, token_uri=token_uri, revoke_uri=revoke_uri)", "docstring": "Factory constructor from JSON keyfile by name.\n\nArgs:\nfilename: string, The location of the keyfile.\nscopes: List or string, (Optional) Scopes to use when acquiring an\naccess token.\ntoken_uri: string, URI for OAuth 2.0 provider token endpoint.\nIf unset and not present in the key file, defaults\nto Google's endpoints.\nrevoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.\nIf unset and not present in the key file, defaults\nto Google's endpoints.\n\nReturns:\nServiceAccountCredentials, a credentials object created from\nthe keyfile.\n\nRaises:\nValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.\nKeyError, if one of the expected keys is not present in\nthe keyfile.", "source": "codesearchnet"}
{"code": "def is_valid_geometry(geometry):\n    \n    if isinstance(geometry, Polygon) or isinstance(geometry, MultiPolygon):\n        return True\n    else:\n        return False", "docstring": "Confirm that the geometry type is of type Polygon or MultiPolygon.\n\nArgs:\ngeometry (BaseGeometry): BaseGeometry instance (e.g. Polygon)\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def _any_log_contains(self, substring: str, log_record_list: List['logging.LogRecord']) -> bool:\n    return any(map(lambda log_record: substring in str(log_record.message), log_record_list))", "docstring": "Returns True if any of the log contains a given substring.\n\nArgs:\nsubstring: A piece of string to check whether it exists in the log\nmessage.\nlog_record_list: A list of `absl.logging.LogRecord`s.\n\nReturns:\nTrue if and only if the substring exists in any of the log in\n`log_record_list`.", "source": "github-repos"}
{"code": "def load_assistant_model(model: 'PreTrainedModel', assistant_model: Optional[Union[str, 'PreTrainedModel']], assistant_tokenizer: Optional[PreTrainedTokenizer]) -> Tuple[Optional['PreTrainedModel'], Optional[PreTrainedTokenizer]]:\n    if not model.can_generate() or assistant_model is None:\n        return (None, None)\n    if getattr(model, 'framework') != 'pt' or not isinstance(model, PreTrainedModel):\n        raise ValueError('Assisted generation, triggered by the `assistant_model` argument, is only available for `PreTrainedModel` model instances. For instance, TF or JAX models are not supported.')\n    if isinstance(assistant_model, str):\n        assistant_config = AutoConfig.from_pretrained(assistant_model)\n        _, loaded_assistant_model = infer_framework_load_model(assistant_model, config=assistant_config)\n        loaded_assistant_model = loaded_assistant_model.to(device=model.device, dtype=model.dtype)\n        loaded_assistant_tokenizer = AutoTokenizer.from_pretrained(assistant_model)\n    else:\n        loaded_assistant_model = assistant_model\n        loaded_assistant_tokenizer = assistant_tokenizer\n    same_vocab_size = model.config.vocab_size == loaded_assistant_model.config.vocab_size\n    same_special_tokens = all((getattr(model.config, token) == getattr(loaded_assistant_model.config, token) for token in ('eos_token_id', 'pad_token_id', 'bos_token_id')))\n    if same_vocab_size and same_special_tokens:\n        loaded_assistant_tokenizer = None\n    elif loaded_assistant_tokenizer is None:\n        raise ValueError('The assistant model has a different tokenizer than the main model. You should pass the assistant tokenizer.')\n    return (loaded_assistant_model, loaded_assistant_tokenizer)", "docstring": "Prepares the assistant model and the assistant tokenizer for a pipeline whose model that can call `generate`.\n\nArgs:\nmodel ([`PreTrainedModel`]):\nThe main model that will be used by the pipeline to make predictions.\nassistant_model (`str` or [`PreTrainedModel`], *optional*):\nThe assistant model that will be used by the pipeline to make predictions.\nassistant_tokenizer ([`PreTrainedTokenizer`], *optional*):\nThe assistant tokenizer that will be used by the pipeline to encode data for the model.\n\nReturns:\nTuple: The loaded assistant model and (optionally) the loaded tokenizer.", "source": "github-repos"}
{"code": "def marcxml2mods(marc_xml, uuid, url):\n    \n    marc_xml = _read_content_or_path(marc_xml)\n\n    return type_decisioner(\n        marc_xml,\n        lambda: transform_to_mods_mono(marc_xml, uuid, url),\n        lambda: transform_to_mods_multimono(marc_xml, uuid, url),\n        lambda: transform_to_mods_periodical(marc_xml, uuid, url),\n    )", "docstring": "Convert `marc_xml` to MODS. Decide type of the record and what template to\nuse (monograph, multi-monograph, periodical).\n\nArgs:\nmarc_xml (str): Filename or XML string. Don't use ``\\\\n`` in case of\nfilename.\nuuid (str): UUID string giving the package ID.\nurl (str): URL of the publication (public or not).\n\nReturns:\nlist: Collection of transformed xml strings.", "source": "juraj-google-style"}
{"code": "def getDocPath(fn, root=None):\n    cwd = pathlib.Path(os.getcwd())\n    if root:\n        cwd = pathlib.Path(root)\n    while True:\n        dpath = cwd.joinpath('docdata')\n        if dpath.is_dir():\n            break\n        parent = cwd.parent\n        if (parent == cwd):\n            raise ValueError(f'Unable to find data directory from {os.getcwd()}.')\n        cwd = parent\n    fpath = os.path.abspath(os.path.join(dpath.as_posix(), fn))\n    if (not fpath.startswith(dpath.as_posix())):\n        raise ValueError(f'Path escaping detected: {fn}')\n    if (not os.path.isfile(fpath)):\n        raise ValueError(f'File does not exist: {fn}')\n    return fpath", "docstring": "Helper for getting a documentation data file paths.\n\nArgs:\nfn (str): Name of the file to retrieve the full path for.\nroot (str): Optional root path to look for a docdata in.\n\nNotes:\nDefaults to looking for the ``docdata`` directory in the current\nworking directory. This behavior works fine for notebooks nested\nin the docs directory of synapse; but this root directory that\nis looked for may be overridden by providing an alternative root.\n\nReturns:\nstr: A file path.\n\nRaises:\nValueError if the file does not exist or directory traversal attempted..", "source": "codesearchnet"}
{"code": "def send_offer_update_email(self, user_email, subject, email_body, site_code=None):\n    \n    config = get_sailthru_configuration(site_code)\n    _send_offer_assignment_notification_email(config, user_email, subject, email_body, site_code, self)", "docstring": "Sends the offer emails after assignment, either for revoking or reminding.\nArgs:\nself: Ignore.\nuser_email (str): Recipient's email address.\nsubject (str): Email subject.\nemail_body (str): The body of the email.\nsite_code (str): Identifier of the site sending the email.", "source": "juraj-google-style"}
{"code": "def init_cache(self, batch_size, max_length, encoder_outputs):\n    decoder_input_ids = jnp.ones((batch_size, max_length), dtype='i4')\n    decoder_attention_mask = jnp.ones_like(decoder_input_ids)\n\n    def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs):\n        decoder_module = module._get_decoder_module()\n        return decoder_module(decoder_input_ids, decoder_attention_mask, **kwargs)\n    init_variables = self.module.init(jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward)\n    return unfreeze(init_variables['cache'])", "docstring": "Args:\nbatch_size (`int`):\nbatch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.\nmax_length (`int`):\nmaximum possible length for auto-regressive decoding. Defines the sequence length of the initialized\ncache.\nencoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):\n`encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:\n`attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)\nis a sequence of hidden-states at the output of the last layer of the encoder. Used in the\ncross-attention of the decoder.", "source": "github-repos"}
{"code": "def timeit(hosts=None, stmt=None, warmup=30, repeat=None, duration=None, concurrency=1, output_fmt=None, fail_if=None, sample_mode='reservoir'):\n    num_lines = 0\n    log = Logger(output_fmt)\n    with Runner(hosts, concurrency, sample_mode) as runner:\n        version_info = aio.run(runner.client.get_server_version)\n        for line in as_statements(lines_from_stdin(stmt)):\n            runner.warmup(line, warmup)\n            timed_stats = runner.run(line, iterations=repeat, duration=duration)\n            r = Result(version_info=version_info, statement=line, timed_stats=timed_stats, concurrency=concurrency)\n            log.result(r)\n            if fail_if:\n                eval_fail_if(fail_if, r)\n        num_lines += 1\n    if (num_lines == 0):\n        raise SystemExit('No SQL statements provided. Use --stmt or provide statements via stdin')", "docstring": "Run the given statement a number of times and return the runtime stats\n\nArgs:\nfail-if: An expression that causes cr8 to exit with a failure if it\nevaluates to true.\nThe expression can contain formatting expressions for:\n- runtime_stats\n- statement\n- meta\n- concurrency\n- bulk_size\nFor example:\n--fail-if \"{runtime_stats.mean} > 1.34\"", "source": "codesearchnet"}
{"code": "def get_yield_stress(self, n):\n        \n        \n        comp = root(self.get_stability_criteria, -1, args=n)\n        tens = root(self.get_stability_criteria, 1, args=n)\n        return (comp.x, tens.x)", "docstring": "Gets the yield stress for a given direction\n\nArgs:\nn (3x1 array-like): direction for which to find the\nyield stress", "source": "juraj-google-style"}
{"code": "def get_osx_config(browser: str) -> dict:\n    \n    \n    if browser.lower() == 'chrome':\n        cookie_file = ('~/Library/Application Support/Google/Chrome/Default/'\n                       'Cookies')\n    elif browser.lower() == \"chromium\":\n        cookie_file = '~/Library/Application Support/Chromium/Default/Cookies'\n    else:\n        raise ValueError(\"Browser must be either Chrome or Chromium.\")\n\n    config = {\n        'my_pass': keyring.get_password(\n            '{} Safe Storage'.format(browser), browser),\n        'iterations': 1003,\n        'cookie_file': cookie_file,\n        }\n    return config", "docstring": "Get settings for getting Chrome/Chromium cookies on OSX.\n\nArgs:\nbrowser: Either \"Chrome\" or \"Chromium\"\nReturns:\nConfig dictionary for Chrome/Chromium cookie decryption", "source": "juraj-google-style"}
{"code": "def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0):\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        raise exceptions.VersionNotSupported('KMIP {} does not support the DefaultsInformation object.'.format(kmip_version.value))\n    super(DefaultsInformation, self).read(input_buffer, kmip_version=kmip_version)\n    local_buffer = utils.BytearrayStream(input_buffer.read(self.length))\n    object_defaults = []\n    while self.is_tag_next(enums.Tags.OBJECT_DEFAULTS, local_buffer):\n        object_default = ObjectDefaults()\n        object_default.read(local_buffer, kmip_version=kmip_version)\n        object_defaults.append(object_default)\n    if (len(object_defaults) == 0):\n        raise exceptions.InvalidKmipEncoding('The DefaultsInformation encoding is missing the object defaults structure.')\n    else:\n        self._object_defaults = object_defaults\n    self.is_oversized(local_buffer)", "docstring": "Read the data encoding the DefaultsInformation structure and decode it\ninto its constituent parts.\n\nArgs:\ninput_buffer (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 2.0.\n\nRaises:\nInvalidKmipEncoding: Raised if the object defaults are missing\nfrom the encoding.\nVersionNotSupported: Raised when a KMIP version is provided that\ndoes not support the DefaultsInformation structure.", "source": "codesearchnet"}
{"code": "def eval(self, expr, **kwargs):\n    columns = (self.index if self._is_transposed else self.columns)\n    index = (self.columns if self._is_transposed else self.index)\n    columns_copy = pandas.DataFrame(columns=self.columns)\n    columns_copy = columns_copy.eval(expr, inplace=False, **kwargs)\n    expect_series = isinstance(columns_copy, pandas.Series)\n\n    def eval_builder(df, **kwargs):\n        kwargs.pop('axis', None)\n        df.columns = columns\n        result = df.eval(expr, inplace=False, **kwargs)\n        return result\n    func = self._build_mapreduce_func(eval_builder, axis=1, **kwargs)\n    new_data = self._map_across_full_axis(1, func)\n    if expect_series:\n        new_columns = [columns_copy.name]\n        new_index = index\n    else:\n        new_columns = columns_copy.columns\n        new_index = self.index\n    return self.__constructor__(new_data, new_index, new_columns)", "docstring": "Returns a new QueryCompiler with expr evaluated on columns.\n\nArgs:\nexpr: The string expression to evaluate.\n\nReturns:\nA new QueryCompiler with new columns after applying expr.", "source": "codesearchnet"}
{"code": "def _parseDOM(istack):\n    ostack = []\n    end_tag_index = 0\n\n    def neither_nonpair_or_end_or_comment(el):\n        return (not (el.isNonPairTag() or el.isEndTag() or el.isComment()))\n    index = 0\n    while (index < len(istack)):\n        el = istack[index]\n        end_tag_index = _indexOfEndTag(istack[index:])\n        if ((end_tag_index == 0) and neither_nonpair_or_end_or_comment(el)):\n            el.isNonPairTag(True)\n        if (end_tag_index == 0):\n            if (not el.isEndTag()):\n                ostack.append(el)\n        else:\n            el.childs = _parseDOM(istack[(index + 1):(end_tag_index + index)])\n            el.endtag = istack[(end_tag_index + index)]\n            el.endtag.openertag = el\n            ostack.append(el)\n            ostack.append(el.endtag)\n            index = (end_tag_index + index)\n        index += 1\n    return ostack", "docstring": "Recursively go through element array and create DOM.\n\nArgs:\nistack (list): List of :class:`.HTMLElement` objects.\n\nReturns:\nlist: DOM tree as list.", "source": "codesearchnet"}
{"code": "def test_gradient(self, shape, rt_value, rt_grad, default_value, default_grad, output_value, output_grad, ragged_rank=None):\n    rt_value = ragged_factory_ops.constant(rt_value, dtype=dtypes.float32, ragged_rank=ragged_rank)\n    rt_grad = ragged_factory_ops.constant(rt_grad, dtype=dtypes.float32, ragged_rank=ragged_rank)\n    default_value = constant_op.constant(default_value, dtype=dtypes.float32)\n    default_grad = constant_op.constant(default_grad, dtype=dtypes.float32)\n    output_value = constant_op.constant(output_value, dtype=dtypes.float32, shape=shape)\n    output_grad = constant_op.constant(output_grad, dtype=dtypes.float32, shape=shape)\n    shape = tensor_shape.as_shape(shape)\n    for partition_type in ['row_splits', 'value_rowids']:\n        rt_val = self.rt_with_partition_type(rt_value, partition_type)\n        if context.executing_eagerly():\n            self._test_gradient_helper(rt_val, default_value, shape, output_grad, output_value, rt_grad, default_grad)\n        else:\n            for shape_info in ['known', 'unknown_dims', 'unknown_rank']:\n                rt_val = self.wrap_in_placeholder(rt_val, shape_info)\n                default_val = self.wrap_in_placeholder(default_value, shape_info)\n                shape_val = self.wrap_in_placeholder(shape, shape_info)\n                self._test_gradient_helper(rt_val, default_val, shape_val, output_grad, output_value, rt_grad, default_grad)", "docstring": "Tests that ragged_to_dense generates the right gradient.\n\nArgs:\nshape: The `shape` arg for `ragged_to_dense`.\nrt_value: The `rt_input` arg for `ragged_to_dense`.\nrt_grad: The expected gradient for `rt_value`.  Corresponds 1:1 with\n`rt_value`.\ndefault_value: The `default_value` arg for `ragged_to_dense`.\ndefault_grad: The expected gradient for `default_value`.  Corresponds 1:1\nwith `default_value`.\noutput_value: The expected output of `ragged_to_dense`.\noutput_grad: The gradient for the output (used to generate the gradients\n`rt_grad` and `default_grad`).  Corresponds 1:1 with `output_value`.\nragged_rank: Ragged rank for `rt_value`.", "source": "github-repos"}
{"code": "def remove_waiter(self, waiter_handle):\n    (spec, waiter) = waiter_handle\n    self._remove_waiter(spec, waiter)", "docstring": "Remove a message callback.\n\nThis call will remove a callback previously registered using\nevery_match.\n\nArgs:\nwaiter_handle (object): The opaque handle returned by the\nprevious call to every_match().", "source": "codesearchnet"}
{"code": "def get_object(cls, api_token, ip):\n        \n        floating_ip = cls(token=api_token, ip=ip)\n        floating_ip.load()\n        return floating_ip", "docstring": "Class method that will return a FloatingIP object by its IP.\n\nArgs:\napi_token: str - token\nip: str - floating ip address", "source": "juraj-google-style"}
{"code": "def scale_stoichiometry(self, scaling):\n    return {k: (v * scaling) for (k, v) in self.stoichiometry.items()}", "docstring": "Scale the Calculation stoichiometry\nReturns the stoichiometry, scaled by the argument scaling.\n\nArgs:\nscaling (float): The scaling factor.\n\nReturns:\n(Counter(Str:Int)): The scaled stoichiometry as a Counter of label: stoichiometry pairs", "source": "codesearchnet"}
{"code": "def delete_entity(self, etype, entity_id):\n        \n        r = fapi.delete_entity(self.namespace, self.name, etype,\n                                  entity_id, self.api_url)\n        fapi._check_response_code(r, 202)", "docstring": "Delete an entity in this workspace.\n\nArgs:\netype (str): Entity type\nentity_id (str): Entity name/unique id", "source": "juraj-google-style"}
{"code": "def test_sample_paths_2d(self, random_type, seed):\n    mu = np.array([0.2, 0.7])\n    a = np.array([[0.4, 0.1], [0.3, 0.2]])\n    b = np.array([[0.33, -0.03], [0.21, 0.5]])\n\n    def drift_fn(t, x):\n        return mu * tf.sqrt(t) * tf.ones_like(x, dtype=t.dtype)\n\n    def vol_fn(t, x):\n        del x\n        return (a * t + b) * tf.ones([2, 2], dtype=t.dtype)\n    num_samples = 10000\n    times = np.array([0.1, 0.21, 0.32, 0.43, 0.55])\n    x0 = np.array([0.1, -1.1])\n    paths = self.evaluate(euler_sampling.sample(dim=2, drift_fn=drift_fn, volatility_fn=vol_fn, times=times, num_samples=num_samples, initial_state=x0, time_step=0.01, random_type=random_type, seed=seed))\n    self.assertAllClose(paths.shape, (num_samples, 5, 2), atol=0)\n    means = np.mean(paths, axis=0)\n    times = np.reshape(times, [-1, 1])\n    expected_means = x0 + 2.0 / 3.0 * mu * np.power(times, 1.5)\n    self.assertAllClose(means, expected_means, rtol=0.01, atol=0.01)", "docstring": "Tests path properties for 2-dimentional Ito process.\n\nWe construct the following Ito processes.\n\ndX_1 = mu_1 sqrt(t) dt + s11 dW_1 + s12 dW_2\ndX_2 = mu_2 sqrt(t) dt + s21 dW_1 + s22 dW_2\n\nmu_1, mu_2 are constants.\ns_ij = a_ij t + b_ij\n\nFor this process expected value at time t is (x_0)_i + 2/3 * mu_i * t^1.5.\n\nArgs:\nrandom_type: Random number type defined by tff.math.random.RandomType\nenum.\nseed: Random seed.", "source": "github-repos"}
{"code": "def verify_loop_init_vars(init_vars, symbol_names, first_iter_vars=None, extra_message=None):\n    if not symbol_names:\n        return\n    if first_iter_vars is None:\n        first_iter_vars = (None,) * len(symbol_names)\n    assert len(symbol_names) == len(init_vars)\n    assert len(symbol_names) == len(first_iter_vars)\n    for name, val, fi_val in zip(symbol_names, init_vars, first_iter_vars):\n        if isinstance(val, variables.UndefinedReturnValue):\n            if fi_val:\n                raise ValueError('the return value from a TensorFlow loop may only be a {}; got {}'.format(LEGAL_LOOP_TYPES, type(fi_val)))\n            else:\n                raise NotImplementedError('a return statement cannot be placed inside this TensorFlow loop; this may happen if a return statement depends on a static Python condition such as a hyperparameter')\n        error_msg = None\n        if val is None:\n            error_msg = \"'{}' is not allowed to be None before the loop\".format(name)\n        elif isinstance(val, variables.Undefined):\n            error_msg = \"'{}' must be defined before the loop\".format(name)\n            if extra_message:\n                error_msg += '\\n' + extra_message\n        if error_msg is not None:\n            raise ValueError(error_msg)", "docstring": "Ensures that all values in the state are valid to use in a TF loop.\n\nThe init_vars may contain placeholder values derived from first_iter_vars.\n\nArgs:\ninit_vars: initial loop variables (as taken before entering the loop)\nsymbol_names: corresponding names of the initial loop variables\nfirst_iter_vars: loop variables after one iteration of the loop\nextra_message: an extra string to append to the error message, in case of\n\"undefined variable\" errors (see variables.Undefined)", "source": "github-repos"}
{"code": "def _build_map(outputs):\n    finished_nodes = set()\n    nodes_in_progress = set()\n    nodes_in_decreasing_depth = []\n    layer_indices = {}\n    for output in nest.flatten(outputs):\n        _build_map_helper(output, finished_nodes, nodes_in_progress, nodes_in_decreasing_depth, layer_indices)\n    return (nodes_in_decreasing_depth, layer_indices)", "docstring": "This method topologically sorts nodes in order from inputs to outputs.\n\nIt uses a depth-first search to topologically sort nodes that appear in the\n_keras_history connectivity metadata of `outputs`.\n\nArgs:\noutputs: the output tensors whose _keras_history metadata should be walked.\nThis may be an arbitrary nested structure.\n\nReturns:\nA tuple like (ordered_nodes, layer_to_first_traversal_index)\nordered_nodes: list of nodes appearing in the keras history, topologically\nsorted from original inputs to the `outputs`.\n(If outputs have different sets of ancestors, the inputs to one output\nmay appear after a different output).\nlayer_to_first_traversal_index:\nA dict mapping layer to the traversal index in the DFS where it is\nseen. Note: if a layer is shared by several nodes, the dict will only\nstore the index corresponding to the *first* time the layer seen.", "source": "github-repos"}
{"code": "def _load_and_verify_metadata(self, submission_type):\n    metadata_filename = os.path.join(self._extracted_submission_dir, 'metadata.json')\n    if (not os.path.isfile(metadata_filename)):\n        logging.error('metadata.json not found')\n        return None\n    try:\n        with open(metadata_filename, 'r') as f:\n            metadata = json.load(f)\n    except IOError as e:\n        logging.error('Failed to load metadata: %s', e)\n        return None\n    for field_name in REQUIRED_METADATA_JSON_FIELDS:\n        if (field_name not in metadata):\n            logging.error('Field %s not found in metadata', field_name)\n            return None\n    if (submission_type != metadata['type']):\n        logging.error('Invalid submission type in metadata, expected \"%s\", actual \"%s\"', submission_type, metadata['type'])\n        return None\n    entry_point = metadata['entry_point']\n    if (not os.path.isfile(os.path.join(self._extracted_submission_dir, entry_point))):\n        logging.error('Entry point not found: %s', entry_point)\n        return None\n    if (not entry_point.endswith('.sh')):\n        logging.warning('Entry point is not an .sh script. This is not necessarily a problem, but if submission wont run double check entry point first: %s', entry_point)\n    return metadata", "docstring": "Loads and verifies metadata.\n\nArgs:\nsubmission_type: type of the submission\n\nReturns:\ndictionaty with metadata or None if metadata not found or invalid", "source": "codesearchnet"}
{"code": "async def _async_wait_for_process(\n        future_process: Any,\n        out: Optional[Union[TeeCapture, IO[str]]] = sys.stdout,\n        err: Optional[Union[TeeCapture, IO[str]]] = sys.stderr\n) -> CommandOutput:\n    \n    process = await future_process\n    future_output = _async_forward(process.stdout, out)\n    future_err_output = _async_forward(process.stderr, err)\n    output, err_output = await asyncio.gather(future_output, future_err_output)\n    await process.wait()\n\n    return CommandOutput(output, err_output, process.returncode)", "docstring": "Awaits the creation and completion of an asynchronous process.\n\nArgs:\nfuture_process: The eventually created process.\nout: Where to write stuff emitted by the process' stdout.\nerr: Where to write stuff emitted by the process' stderr.\n\nReturns:\nA (captured output, captured error output, return code) triplet.", "source": "juraj-google-style"}
{"code": "def get_database_info(db_uri):\n    if (not db_uri):\n        return (None, None)\n    scheme = urlparse.urlparse(db_uri).scheme\n    if (scheme == 'sqlite'):\n        return (sqlite3, create_sqlite_connection_provider(db_uri))\n    else:\n        raise ValueError(('Only sqlite DB URIs are supported now: ' + db_uri))", "docstring": "Returns TBContext fields relating to SQL database.\n\nArgs:\ndb_uri: A string URI expressing the DB file, e.g. \"sqlite:~/tb.db\".\n\nReturns:\nA tuple with the db_module and db_connection_provider TBContext fields. If\ndb_uri was empty, then (None, None) is returned.\n\nRaises:\nValueError: If db_uri scheme is not supported.", "source": "codesearchnet"}
{"code": "def __init__(self, s):\n        \n        self.txt = s\n        self._family = self._extract_family(s)\n        self.regex = re.compile(s)", "docstring": "Create a regex rule.\n\nArgs:\ns (str): Regex pattern. Eg '.*\\\\.beta$'.", "source": "juraj-google-style"}
{"code": "def post_process_depth_estimation(self, outputs: 'DepthEstimatorOutput', target_sizes: Optional[Union[TensorType, List[Tuple[int, int]], None]]=None) -> List[Dict[str, TensorType]]:\n    requires_backends(self, 'torch')\n    predicted_depth = outputs.predicted_depth\n    if target_sizes is not None and len(predicted_depth) != len(target_sizes):\n        raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the predicted depth')\n    results = []\n    target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes\n    for depth, target_size in zip(predicted_depth, target_sizes):\n        if target_size is not None:\n            depth = torch.nn.functional.interpolate(depth.unsqueeze(0).unsqueeze(1), size=target_size, mode='bicubic', align_corners=False).squeeze()\n        results.append({'predicted_depth': depth})\n    return results", "docstring": "Converts the raw output of [`DepthEstimatorOutput`] into final depth predictions and depth PIL images.\nOnly supports PyTorch.\n\nArgs:\noutputs ([`DepthEstimatorOutput`]):\nRaw outputs of the model.\ntarget_sizes (`TensorType` or `List[Tuple[int, int]]`, *optional*):\nTensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size\n(height, width) of each image in the batch. If left to None, predictions will not be resized.\n\nReturns:\n`List[Dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth\npredictions.", "source": "github-repos"}
{"code": "def get_list_store(data_frame):\n    \n    df_py_dtypes = get_py_dtypes(data_frame)\n    list_store = gtk.ListStore(*df_py_dtypes.dtype)\n    for i, row_i in data_frame.iterrows():\n        list_store.append(row_i.tolist())\n    return df_py_dtypes, list_store", "docstring": "Return a `pandas.DataFrame` containing Python type information for the\ncolumns in `data_frame` and a `gtk.ListStore` matching the contents of the\ndata frame.\n\nArgs:\n\ndata_frame (pandas.DataFrame) : Data frame containing data columns.\n\nReturns:\n\n(tuple) : The first element is a data frame as returned by\n`get_py_dtypes` and the second element is a `gtk.ListStore`\nmatching the contents of the data frame.", "source": "juraj-google-style"}
{"code": "def make_mixture_prior(latent_size, mixture_components):\n  \n  if mixture_components == 1:\n    \n    return tfd.MultivariateNormalDiag(\n        loc=tf.zeros([latent_size]),\n        scale_identity_multiplier=1.0)\n\n  loc = tf.compat.v1.get_variable(\n      name=\"loc\", shape=[mixture_components, latent_size])\n  raw_scale_diag = tf.compat.v1.get_variable(\n      name=\"raw_scale_diag\", shape=[mixture_components, latent_size])\n  mixture_logits = tf.compat.v1.get_variable(\n      name=\"mixture_logits\", shape=[mixture_components])\n\n  return tfd.MixtureSameFamily(\n      components_distribution=tfd.MultivariateNormalDiag(\n          loc=loc,\n          scale_diag=tf.nn.softplus(raw_scale_diag)),\n      mixture_distribution=tfd.Categorical(logits=mixture_logits),\n      name=\"prior\")", "docstring": "Creates the mixture of Gaussians prior distribution.\n\nArgs:\nlatent_size: The dimensionality of the latent representation.\nmixture_components: Number of elements of the mixture.\n\nReturns:\nrandom_prior: A `tfd.Distribution` instance representing the distribution\nover encodings in the absence of any evidence.", "source": "juraj-google-style"}
{"code": "def obtain_token(self, config=None):\n    client_application = CLIENT_APPLICATION\n    if (self.config and ('client_application' in self.config)):\n        client_application += (':' + self.config['client_application'])\n    headers = {'x-qx-client-application': client_application}\n    if self.token_unique:\n        try:\n            response = requests.post(str((self.config.get('url') + '/users/loginWithToken')), data={'apiToken': self.token_unique}, verify=self.verify, headers=headers, **self.extra_args)\n        except requests.RequestException as e:\n            raise ApiError(('error during login: %s' % str(e)))\n    elif (config and ('email' in config) and ('password' in config)):\n        email = config.get('email', None)\n        password = config.get('password', None)\n        credentials = {'email': email, 'password': password}\n        try:\n            response = requests.post(str((self.config.get('url') + '/users/login')), data=credentials, verify=self.verify, headers=headers, **self.extra_args)\n        except requests.RequestException as e:\n            raise ApiError(('error during login: %s' % str(e)))\n    else:\n        raise CredentialsError('invalid token')\n    if (response.status_code == 401):\n        error_message = None\n        try:\n            error_message = response.json()['error']['message']\n        except:\n            pass\n        if error_message:\n            raise CredentialsError(('error during login: %s' % error_message))\n        else:\n            raise CredentialsError('invalid token')\n    try:\n        response.raise_for_status()\n        self.data_credentials = response.json()\n    except (requests.HTTPError, ValueError) as e:\n        raise ApiError(('error during login: %s' % str(e)))\n    if (self.get_token() is None):\n        raise CredentialsError('invalid token')", "docstring": "Obtain the token to access to QX Platform.\n\nRaises:\nCredentialsError: when token is invalid or the user has not\naccepted the license.\nApiError: when the response from the server couldn't be parsed.", "source": "codesearchnet"}
{"code": "def expects_none(options):\n    \n\n    if any(options.get(key) is not None for key in [\"count\", \"maximum\", \"minimum\", \"between\"]):\n        return matches_count(0, options)\n    else:\n        return False", "docstring": "Returns whether the given query options expect a possible count of zero.\n\nArgs:\noptions (Dict[str, int | Iterable[int]]): A dictionary of query options.\n\nReturns:\nbool: Whether a possible count of zero is expected.", "source": "juraj-google-style"}
{"code": "def reverse_axis(self, axis_to_reverse):\n        \n        if axis_to_reverse.lower() == 'x':\n            self.general.reverse_x_axis = True\n        if axis_to_reverse.lower() == 'y':\n            self.general.reverse_y_axis = True\n        if axis_to_reverse.lower() != 'x' or axis_to_reverse.lower() != 'y':\n            raise ValueError('Axis for reversing needs to be either x or y.')\n        return", "docstring": "Reverse an axis in all figure plots.\n\nThis will reverse the tick marks on an axis for each plot in the figure.\nIt can be overridden in SinglePlot class.\n\nArgs:\naxis_to_reverse (str): Axis to reverse. Supports `x` and `y`.\n\nRaises:\nValueError: The string representing the axis to reverse is not `x` or `y`.", "source": "juraj-google-style"}
{"code": "def GetMessages(self, files):\n    \n\n    result = {}\n    for f in files:\n      result.update(self._symbols_by_file[f])\n    return result", "docstring": "Gets all the messages from a specified file.\n\nThis will find and resolve dependencies, failing if they are not registered\nin the symbol database.\n\n\nArgs:\nfiles: The file names to extract messages from.\n\nReturns:\nA dictionary mapping proto names to the message classes. This will include\nany dependent messages as well as any messages defined in the same file as\na specified message.\n\nRaises:\nKeyError: if a file could not be found.", "source": "juraj-google-style"}
{"code": "def delete_user(self, email):\n        \n        LOG.info(\"Deleting user %s\", email)\n        user_obj = self.user_collection.delete_one({'_id': email})\n        \n        return user_obj", "docstring": "Delete a user from the database\n\nArgs:\nemail(str)\n\nReturns:\nuser_obj(dict)", "source": "juraj-google-style"}
{"code": "def default(self, value):\n        \n        if isinstance(value, messages.Enum):\n            return str(value)\n\n        if six.PY3 and isinstance(value, bytes):\n            return value.decode('utf8')\n\n        if isinstance(value, messages.Message):\n            result = {}\n            for field in value.all_fields():\n                item = value.get_assigned_value(field.name)\n                if item not in (None, [], ()):\n                    result[field.name] = (\n                        self.__protojson_protocol.encode_field(field, item))\n            \n            \n            for unknown_key in value.all_unrecognized_fields():\n                unrecognized_field, _ = value.get_unrecognized_field_info(\n                    unknown_key)\n                \n                \n                result[unknown_key] = unrecognized_field\n            return result\n\n        return super(MessageJSONEncoder, self).default(value)", "docstring": "Return dictionary instance from a message object.\n\nArgs:\nvalue: Value to get dictionary for.  If not encodable, will\ncall superclasses default method.", "source": "juraj-google-style"}
{"code": "def ConfigureLogging(debug_output=False, filename=None, mode='w', quiet_mode=False):\n    for handler in logging.root.handlers:\n        logging.root.removeHandler(handler)\n    logger = logging.getLogger()\n    if (filename and filename.endswith('.gz')):\n        handler = CompressedFileHandler(filename, mode=mode)\n    elif filename:\n        handler = logging.FileHandler(filename, mode=mode)\n    else:\n        handler = logging.StreamHandler()\n    format_string = '%(asctime)s [%(levelname)s] (%(processName)-10s) PID:%(process)d <%(module)s> %(message)s'\n    formatter = logging.Formatter(format_string)\n    handler.setFormatter(formatter)\n    if debug_output:\n        level = logging.DEBUG\n    elif quiet_mode:\n        level = logging.WARNING\n    else:\n        level = logging.INFO\n    logger.setLevel(level)\n    handler.setLevel(level)\n    logger.addHandler(handler)", "docstring": "Configures the logging root logger.\n\nArgs:\ndebug_output (Optional[bool]): True if the logging should include debug\noutput.\nfilename (Optional[str]): log filename.\nmode (Optional[str]): log file access mode.\nquiet_mode (Optional[bool]): True if the logging should not include\ninformation output. Note that debug_output takes precedence over\nquiet_mode.", "source": "codesearchnet"}
{"code": "def add_to_query(self, query):\n        \n        self.handle = win32pdh.AddCounter(query, self.path)", "docstring": "Add the current path to the query\n\nArgs:\nquery (obj):\nThe handle to the query to add the counter", "source": "juraj-google-style"}
{"code": "def search(self, filters):\n        \n        records = self.__model__.search(self.__five9__, filters)\n        return self.__class__(\n            self.__five9__, self.__model__, records,\n        )", "docstring": "Search Five9 given a filter.\n\nArgs:\nfilters (dict): A dictionary of search strings, keyed by the name\nof the field to search.\n\nReturns:\nEnvironment: An environment representing the recordset.", "source": "juraj-google-style"}
{"code": "def start_apppool(name):\n    \n    ps_cmd = ['Start-WebAppPool', r\"'{0}'\".format(name)]\n\n    cmd_ret = _srvmgr(ps_cmd)\n\n    return cmd_ret['retcode'] == 0", "docstring": "Start an IIS application pool.\n\n.. versionadded:: 2017.7.0\n\nArgs:\nname (str): The name of the App Pool to start.\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.start_apppool name='MyTestPool'", "source": "juraj-google-style"}
{"code": "def get_pull_request_number(task, source_env_prefix):\n    pull_request = _extract_from_env_in_payload(task, (source_env_prefix + '_PULL_REQUEST_NUMBER'))\n    if (pull_request is not None):\n        pull_request = int(pull_request)\n    return pull_request", "docstring": "Get what Github pull request created the graph.\n\nArgs:\nobj (ChainOfTrust or LinkOfTrust): the trust object to inspect\nsource_env_prefix (str): The environment variable prefix that is used\nto get repository information.\n\nReturns:\nint: the pull request number.\nNone: if not defined for this task.", "source": "codesearchnet"}
{"code": "def info(self, **kwargs):\n    path = self._get_id_path('info')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the basic movie information for a specific movie id.\n\nArgs:\nlanguage: (optional) ISO 639-1 code.\nappend_to_response: (optional) Comma separated, any movie method.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def PureMultiHeadedAttention(x, params, num_heads=8, dropout=0.0, mode='train', **kwargs):\n    del params\n    rng = kwargs.get('rng', None)\n    ((q, k, v), mask) = x\n    feature_depth = q.shape[(- 1)]\n    assert ((feature_depth % num_heads) == 0)\n    head_depth = (feature_depth \n    nbatch = np.shape(q)[0]\n\n    def SplitHeads(x):\n        return np.transpose(np.reshape(x, (nbatch, (- 1), num_heads, head_depth)), (0, 2, 1, 3))\n\n    def JoinHeads(x):\n        return np.reshape(np.transpose(x, (0, 2, 1, 3)), (nbatch, (- 1), (num_heads * head_depth)))\n    return JoinHeads(DotProductAttention(SplitHeads(q), SplitHeads(k), SplitHeads(v), mask, dropout=dropout, mode=mode, rng=rng))", "docstring": "Pure transformer-style multi-headed attention.\n\nArgs:\nx: inputs ((q, k, v), mask)\nparams: parameters (none)\nnum_heads: int: number of attention heads\ndropout: float: dropout rate\nmode: str: 'train' or 'eval'\n**kwargs: other arguments including the rng\n\nReturns:\nPure Multi-headed attention layer (no Dense transforms on input).", "source": "codesearchnet"}
{"code": "def controlled_by(self, *control_qubits: Qid) -> 'Gate':\n    from cirq.ops import ControlledGate\n    return ControlledGate(self, control_qubits, (len(control_qubits) if (control_qubits is not None) else 1))", "docstring": "Returns a controlled version of this gate.\n\nArgs:\ncontrol_qubits: Optional qubits to control the gate by.", "source": "codesearchnet"}
{"code": "def to_diff_dict(self) -> Dict[str, Any]:\n    config_dict = self.to_dict()\n    default_config_dict = GenerationConfig().to_dict()\n    serializable_config_dict = {}\n    for key, value in config_dict.items():\n        if key not in default_config_dict or key == 'transformers_version' or value != default_config_dict[key]:\n            serializable_config_dict[key] = value\n    self.dict_torch_dtype_to_str(serializable_config_dict)\n    return serializable_config_dict", "docstring": "Removes all attributes from config which correspond to the default config attributes for better readability and\nserializes to a Python dictionary.\n\nReturns:\n`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,", "source": "github-repos"}
{"code": "def fetcher(date=datetime.today(), url_pattern=URL_PATTERN):\n    \n    api_url = url_pattern % date.strftime('%Y-%m-%d')\n\n    headers = {'Referer': 'http:\n    raw_result = requests.get(api_url, headers=headers).json()\n    return raw_result", "docstring": "Fetch json data from n.pl\n\nArgs:\ndate (date) - default today\nurl_patter (string) - default URL_PATTERN\n\nReturns:\ndict - data from api", "source": "juraj-google-style"}
{"code": "def query(self,\n              sparql,\n              mode=\"get\",\n              namespace=None,\n              rtn_format=\"json\",\n              **kwargs):\n        \n        namespace = pick(namespace, self.namespace)\n        if kwargs.get(\"log_level\"):\n            log.setLevel(kwargs['log_level'])\n        if kwargs.get(\"debug\"):\n            log.setLevel(logging.DEBUG)\n        if rtn_format not in self.qry_formats:\n            raise KeyError(\"rtn_format was '%s'. Allowed values are %s\" % \\\n                           (rtn_format, self.qry_results_formats))\n        url = self._make_url(namespace)\n        if 'prefix' not in sparql.lower():\n            sparql = \"%s\\n%s\" % (NSM.prefix(), sparql)\n\n\n        if mode == \"get\":\n\n            data = {\"query\": sparql} \n        elif mode == \"update\":\n            data = {\"update\": sparql}\n        else:\n            raise NotImplementedError(\"'mode' != to ['get', 'update']\")\n\n        headers = {'Accept': self.qry_formats[rtn_format]}\n        start = datetime.datetime.now()\n        try:\n            result = requests.post(url, data=data, headers=headers)\n        except requests.exceptions.ConnectionError:\n            result = requests.post(self._make_url(namespace, self.local_url),\n                                   data=data,\n                                   headers=headers)\n        log.debug(format_multiline([\"\",\n                                    \"url='{url}'\",\n                                    ,\n                                    \"**** SPAQRL QUERY ****\",\n                                    \"\",\n                                    \"{sparql}\",\n                                    \"Query Time: {q_time}\"],\n                                   url=url,\n                                   mode=mode,\n                                   namespace=namespace,\n                                   rtn_format=rtn_format,\n                                   sparql=sparql,\n                                   q_time=(datetime.datetime.now()-start),\n                                   **kwargs))\n\n        if result.status_code == 200:\n            try:\n                if rtn_format == \"json\":\n                    bindings = result.json().get('results',\n                                                 {}).get('bindings', [])\n                elif rtn_format == 'xml':\n                    xml_doc = etree.XML(result.text)\n                    bindings = xml_doc.findall(\"results/bindings\")\n                else:\n                    bindings = result.text\n                try:\n                    log.debug(\"result count: %s\", len(bindings))\n                except TypeError:\n                    pass\n                return bindings\n            except json.decoder.JSONDecodeError:\n                if mode == 'update':\n                    return BeautifulSoup(result.text, 'lxml').get_text()\n                return result.text\n        else:\n            raise SyntaxError(\"%s\\n\\n%s\\n\\n%s\" % (sparql,\n                    add_sparql_line_nums(sparql),\n                    result.text[result.text.find(\"java.\"):]))", "docstring": "Runs a sparql query and returns the results\n\nArgs:\n-----\nsparql: the sparql query to run\nnamespace: the namespace to run the sparql query against\nmode: ['get'(default), 'update'] the type of sparql query\nrtn_format: ['json'(default), 'xml'] format of query results\n\nKwargs:\n-------\ndebug(bool): If True sets logging level to debug", "source": "juraj-google-style"}
{"code": "def _sample(self, initial_states: tf.Tensor, counts: tf.Tensor):\n    circuits = self.circuit(initial_states)\n    num_circuits = tf.shape(circuits)[0]\n    tiled_values = tf.tile(tf.expand_dims(self.circuit.symbol_values, 0), [num_circuits, 1])\n    num_samples_mask = tf.cast((tf.ragged.range(counts) + 1).to_tensor(), tf.bool)\n    num_samples_mask = tf.map_fn(tf.random.shuffle, num_samples_mask)\n    samples = self._sample_layer(circuits, symbol_names=self.circuit.symbol_names, symbol_values=tiled_values, repetitions=tf.expand_dims(tf.math.reduce_max(counts), 0))\n    return tf.ragged.boolean_mask(samples, num_samples_mask)", "docstring": "Returns bitstring samples from the QNN.\n\nArgs:\ninitial_states: Shape [batch_size, num_qubits] of dtype `tf.int8`.\nThese are the initial states of each qubit in the circuit.\ncounts: Shape [batch_size] of dtype `tf.int32` such that `counts[i]` is\nthe number of samples to draw from `(qnn)|initial_states[i]>`.\n\nReturns:\nragged_samples: `tf.RaggedTensor` of DType `tf.int8` structured such\nthat `ragged_samples[i]` contains `counts[i]` bitstrings drawn from\n`(qnn)|initial_states[i]>`.", "source": "github-repos"}
{"code": "def __init__(self, identifier, value):\n    \n    super(VolumeAttribute, self).__init__()\n    self.identifier = identifier\n    self.value = value", "docstring": "Initializes the volume attribute object.\n\nArgs:\nidentifier (str): identifier of the attribute within the volume.\nvalue (object): value of the attribute.", "source": "juraj-google-style"}
{"code": "def concatenate(samplesets, defaults=None):\n    itertup = iter(samplesets)\n    try:\n        first = next(itertup)\n    except StopIteration:\n        raise ValueError('samplesets must contain at least one SampleSet')\n    vartype = first.vartype\n    variables = first.variables\n    records = [first.record]\n    records.extend(_iter_records(itertup, vartype, variables))\n    record = recfunctions.stack_arrays(records, defaults=defaults, asrecarray=True, usemask=False)\n    return SampleSet(record, variables, {}, vartype)", "docstring": "Combine SampleSets.\n\nArgs:\nsamplesets (iterable[:obj:`.SampleSet`):\nAn iterable of sample sets.\n\ndefaults (dict, optional):\nDictionary mapping data vector names to the corresponding default values.\n\nReturns:\n:obj:`.SampleSet`: A sample set with the same vartype and variable order as the first\ngiven in `samplesets`.\n\nExamples:\n>>> a = dimod.SampleSet.from_samples(([-1, +1], 'ab'), dimod.SPIN, energy=-1)\n>>> b = dimod.SampleSet.from_samples(([-1, +1], 'ba'), dimod.SPIN, energy=-1)\n>>> ab = dimod.concatenate((a, b))\n>>> ab.record.sample\narray([[-1,  1],\n[ 1, -1]], dtype=int8)", "source": "codesearchnet"}
{"code": "def _create_topk_unique(inputs, k):\n    height = inputs.shape[0]\n    width = inputs.shape[1]\n    neg_inf_r0 = tf.constant((- np.inf), dtype=tf.float32)\n    ones = tf.ones([height, width], dtype=tf.float32)\n    neg_inf_r2 = (ones * neg_inf_r0)\n    inputs = tf.where(tf.is_nan(inputs), neg_inf_r2, inputs)\n    tmp = inputs\n    topk_r2 = tf.zeros([height, k], dtype=tf.float32)\n    for i in range(k):\n        kth_order_statistic = tf.reduce_max(tmp, axis=1, keepdims=True)\n        k_mask = tf.tile(tf.expand_dims(tf.equal(tf.range(k), tf.fill([k], i)), 0), [height, 1])\n        topk_r2 = tf.where(k_mask, tf.tile(kth_order_statistic, [1, k]), topk_r2)\n        ge_r2 = tf.greater_equal(inputs, tf.tile(kth_order_statistic, [1, width]))\n        tmp = tf.where(ge_r2, neg_inf_r2, inputs)\n    log2_ceiling = int(math.ceil(math.log(float(int(width)), 2)))\n    next_power_of_two = (1 << log2_ceiling)\n    count_mask = (next_power_of_two - 1)\n    mask_r0 = tf.constant(count_mask)\n    mask_r2 = tf.fill([height, k], mask_r0)\n    topk_r2_s32 = tf.bitcast(topk_r2, tf.int32)\n    topk_indices_r2 = tf.bitwise.bitwise_and(topk_r2_s32, mask_r2)\n    return (topk_r2, topk_indices_r2)", "docstring": "Creates the top k values in sorted order with indices.\n\nArgs:\ninputs: A tensor with rank of 2. [batch_size, original_size].\nk: An integer, number of top elements to select.\n\nReturns:\ntopk_r2: A tensor, the k largest elements. [batch_size, k].\ntopk_indices_r2: A tensor, indices of the top k values. [batch_size, k].", "source": "codesearchnet"}
{"code": "def place_line(self,\n                   device: 'cirq.google.XmonDevice',\n                   length: int) -> GridQubitLineTuple:\n        \n        seqs = AnnealSequenceSearch(device, self.seed).search(self.trace_func)\n        return GridQubitLineTuple.best_of(seqs, length)", "docstring": "Runs line sequence search.\n\nArgs:\ndevice: Chip description.\nlength: Required line length.\n\nReturns:\nList of linear sequences on the chip found by simulated annealing\nmethod.", "source": "juraj-google-style"}
{"code": "def relative_entropy(rho, sigma):\n    log_rho = tf.linalg.logm(tf.cast(rho, tf.complex128))\n    log_sigma = tf.linalg.logm(tf.cast(sigma, tf.complex128))\n    return optimized_trace_matmul(rho, tf.subtract(log_rho, log_sigma))", "docstring": "Calculate the relative entropy between the two given density matrices.\nD(rho||sigma) = Tr[rho(log(rho) - log(sigma))]\n= tf.linalg.trace(\ntf.matmul(rho,\ntf.linalg.logm(rho) - tf.linalg.logm(sigma)))\nArgs:\nrho: 2-D `tf.Tensor` of dtype `complex64` representing the left density\nmatrix in the fidelity calculation.\nsigma: 2-D `tf.Tensor` of dtype `complex64` representing the right density\nmatrix in the fidelity calculation.\nReturns:\nA tf.Tensor float64 fidelity scalar between the two given density\nmatrices.", "source": "github-repos"}
{"code": "def _unbatch(self) -> TypeSpec:\n    raise NotImplementedError(f'{type(self).__name__}._unbatch')", "docstring": "Returns a TypeSpec representing a single element this TypeSpec.\n\nReturns:\nA `TypeSpec` representing a single element of objects with this TypeSpec.", "source": "github-repos"}
{"code": "def max_variance_genes(data, nbins=5, frac=0.2):\n    indices = []\n    if sparse.issparse(data):\n        (means, var) = sparse_mean_var(data)\n    else:\n        means = data.mean(1)\n        var = data.var(1)\n    mean_indices = means.argsort()\n    n_elements = int((data.shape[0] / nbins))\n    frac_elements = int((n_elements * frac))\n    for i in range(nbins):\n        bin_i = mean_indices[(i * n_elements):((i + 1) * n_elements)]\n        if (i == (nbins - 1)):\n            bin_i = mean_indices[(i * n_elements):]\n        var_i = var[bin_i]\n        var_sorted = var_i.argsort()\n        top_var_indices = var_sorted[(len(bin_i) - frac_elements):]\n        ind = bin_i[top_var_indices]\n        ind = [index for index in ind if (var[index] > 0)]\n        indices.extend(ind)\n    return indices", "docstring": "This function identifies the genes that have the max variance\nacross a number of bins sorted by mean.\n\nArgs:\ndata (array): genes x cells\nnbins (int): number of bins to sort genes by mean expression level. Default: 10.\nfrac (float): fraction of genes to return per bin - between 0 and 1. Default: 0.1\n\nReturns:\nlist of gene indices (list of ints)", "source": "codesearchnet"}
{"code": "def eval_algorithm(closing, low, high):\n    if ((high - low) == 0):\n        return (100 * (closing - low))\n    else:\n        return ((100 * (closing - low)) / (high - low))", "docstring": "Evaluates the SO algorithm\n\nArgs:\nclosing: Float of current closing price.\nlow: Float of lowest low closing price throughout some duration.\nhigh: Float of highest high closing price throughout some duration.\n\nReturns:\nFloat SO between 0 and 100.", "source": "codesearchnet"}
{"code": "def assert_image_exists(self, pattern, timeout=20.0, **kwargs):\n        \n        pattern = self.d.pattern_open(pattern)\n        match_kwargs = kwargs.copy()\n        match_kwargs.pop('safe', None)\n        match_kwargs.update({\n            'timeout': timeout,\n            'safe': True,\n        })\n        res = self.d.wait(pattern, **match_kwargs)\n        is_success = res is not None\n        message = 'assert image exists'\n        if res:\n            x, y = res.pos\n            kwargs['position'] = {'x': x, 'y': y}\n            message = 'image exists\\npos %s\\nconfidence=%.2f\\nmethod=%s' % (res.pos, res.confidence, res.method)\n        else:\n            res = self.d.match(pattern)\n            if res is None:\n                message = 'Image not found'\n            else:\n                th = kwargs.get('threshold') or pattern.threshold or self.image_match_threshold\n                message = 'Matched: %s\\nPosition: %s\\nConfidence: %.2f\\nThreshold: %.2f' % (\n                    res.matched, res.pos, res.confidence, th)\n\n        kwargs['target'] = self._save_screenshot(pattern, name_prefix='target')\n        kwargs['screenshot'] = self.last_screenshot\n        kwargs.update({\n            'action': 'assert_image_exists',\n            'message': message,\n            'success': is_success,\n        })\n        self._add_assert(**kwargs)", "docstring": "Assert if image exists\nArgs:\n- pattern: image filename # not support pattern for now\n- timeout (float): seconds\n- safe (bool): not raise assert error even throung failed.", "source": "juraj-google-style"}
{"code": "def transform_feature(self, transformation_cache, state_manager):\n    pass", "docstring": "Returns intermediate representation (usually a `Tensor`).\n\nUses `transformation_cache` to create an intermediate representation\n(usually a `Tensor`) that other feature columns can use.\n\nExample usage of `transformation_cache`:\nLet's say a Feature column depends on raw feature ('raw') and another\n`FeatureColumn` (input_fc). To access corresponding `Tensor`s,\ntransformation_cache will be used as follows:\n\n```python\nraw_tensor = transformation_cache.get('raw', state_manager)\nfc_tensor = transformation_cache.get(input_fc, state_manager)\n```\n\nArgs:\ntransformation_cache: A `FeatureTransformationCache` object to access\nfeatures.\nstate_manager: A `StateManager` to create / access resources such as\nlookup tables.\n\nReturns:\nTransformed feature `Tensor`.", "source": "github-repos"}
{"code": "def _parse_alt_url(html_chunk):\n    \n    url_list = html_chunk.find(\"a\", fn=has_param(\"href\"))\n    url_list = map(lambda x: x.params[\"href\"], url_list)\n    url_list = filter(lambda x: not x.startswith(\"autori/\"), url_list)\n\n    if not url_list:\n        return None\n\n    return normalize_url(BASE_URL, url_list[0])", "docstring": "Parse URL from alternative location if not found where it should be.\n\nArgs:\nhtml_chunk (obj): HTMLElement containing slice of the page with details.\n\nReturns:\nstr: Book's URL.", "source": "juraj-google-style"}
{"code": "def plotallanvar(data, dt, tmax=10, ax=None, **kwargs):\n    if (ax is None):\n        ax = plt.gca()\n    (tk, allanvar) = allan_variance(data, dt, tmax)\n    ax.loglog(tk, allanvar, **kwargs)\n    ax.set_xlabel('Time [s]')\n    ax.set_ylabel('Allan Variance')\n    ax.legend()", "docstring": "Plot Allan variance.\n\nArgs:\ndata (np.ndarray): Input data.\ndt (float): Time between each data.\ntmax (float): Maximum time.\nax (matplotlib.axes): Axis the figure is plotted on.\nkwargs (optional): Plot options passed to ax.plot().", "source": "codesearchnet"}
{"code": "def hwvtep_attach_vlan_vid(self, **kwargs):\n    name = kwargs.pop('name')\n    mac = kwargs.pop('mac')\n    vlan = kwargs.pop('vlan')\n    name_args = dict(name=name, vid=vlan, mac=mac)\n    method_name = 'overlay_gateway_attach_vlan_mac'\n    method_class = self._brocade_tunnels\n    gw_attr = getattr(method_class, method_name)\n    config = gw_attr(**name_args)\n    output = self._callback(config)\n    return output", "docstring": "Identifies exported VLANs in VXLAN gateway configurations.\n\nArgs:\nname (str): overlay_gateway name\nvlan(str):  vlan_id range\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nNone", "source": "codesearchnet"}
{"code": "class InputFeatures:\n    input_ids: List[int]\n    attention_mask: Optional[List[int]] = None\n    token_type_ids: Optional[List[int]] = None\n    label: Optional[Union[int, float]] = None\n\n    def to_json_string(self):\n        \n        return json.dumps(dataclasses.asdict(self)) + '\\n'", "docstring": "A single set of features of data. Property names are the same names as the corresponding inputs to a model.\n\nArgs:\ninput_ids: Indices of input sequence tokens in the vocabulary.\nattention_mask: Mask to avoid performing attention on padding token indices.\nMask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded)\ntokens.\ntoken_type_ids: (Optional) Segment token indices to indicate first and second\nportions of the inputs. Only some models use them.\nlabel: (Optional) Label corresponding to the input. Int for classification problems,\nfloat for regression problems.", "source": "github-repos"}
{"code": "def close(self):\n        \n        self._dll.JLINKARM_Close()\n\n        if self._lock is not None:\n            del self._lock\n            self._lock = None\n\n        return None", "docstring": "Closes the open J-Link.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\n``None``\n\nRaises:\nJLinkException: if there is no connected JLink.", "source": "juraj-google-style"}
{"code": "def normalize_words(self, ord=2, inplace=False):\n    if (ord == 2):\n        ord = None\n    vectors = (self.vectors.T / np.linalg.norm(self.vectors, ord, axis=1))\n    if inplace:\n        self.vectors = vectors.T\n        return self\n    return Embedding(vectors=vectors.T, vocabulary=self.vocabulary)", "docstring": "Normalize embeddings matrix row-wise.\n\nArgs:\nord: normalization order. Possible values {1, 2, 'inf', '-inf'}", "source": "codesearchnet"}
{"code": "def make_id():\n    global _simple_id\n    if settings.simple_ids(True):\n        with _simple_id_lock:\n            _simple_id += 1\n            return str(_simple_id)\n    else:\n        return make_globally_unique_id()", "docstring": "Return a new unique ID for a Bokeh object.\n\nNormally this function will return simple monotonically increasing integer\nIDs (as strings) for identifying Bokeh objects within a Document. However,\nif it is desirable to have globally unique for every object, this behavior\ncan be overridden by setting the environment variable ``BOKEH_SIMPLE_IDS=no``.\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0]\n    return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Funnel\nTransformer sequence pair mask has the following format:\n\n```\n2 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n| first sequence    | second sequence |\n```\n\nIf `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).", "source": "github-repos"}
{"code": "def _GetAuthToken(self, email, password):\n\t\t\n\t\taccount_type = \"GOOGLE\"\n\t\tif self.host.endswith(\".google.com\") and not force_google_account:\n\t\t\t\n\t\t\taccount_type = \"HOSTED\"\n\t\treq = self._CreateRequest(\n\t\t\t\turl=\"https:\n\t\t\t\tdata=urllib.urlencode({\n\t\t\t\t\t\t\"Email\": email,\n\t\t\t\t\t\t\"Passwd\": password,\n\t\t\t\t\t\t\"service\": \"ah\",\n\t\t\t\t\t\t\"source\": \"rietveld-codereview-upload\",\n\t\t\t\t\t\t\"accountType\": account_type,\n\t\t\t\t}),\n\t\t)\n\t\ttry:\n\t\t\tresponse = self.opener.open(req)\n\t\t\tresponse_body = response.read()\n\t\t\tresponse_dict = dict(x.split(\"=\") for x in response_body.split(\"\\n\") if x)\n\t\t\treturn response_dict[\"Auth\"]\n\t\texcept urllib2.HTTPError, e:\n\t\t\tif e.code == 403:\n\t\t\t\tbody = e.read()\n\t\t\t\tresponse_dict = dict(x.split(\"=\", 1) for x in body.split(\"\\n\") if x)\n\t\t\t\traise ClientLoginError(req.get_full_url(), e.code, e.msg, e.headers, response_dict)\n\t\t\telse:\n\t\t\t\traise", "docstring": "Uses ClientLogin to authenticate the user, returning an auth token.\n\nArgs:\nemail:    The user's email address\npassword: The user's password\n\nRaises:\nClientLoginError: If there was an error authenticating with ClientLogin.\nHTTPError: If there was some other form of HTTP error.\n\nReturns:\nThe authentication token returned by ClientLogin.", "source": "juraj-google-style"}
{"code": "def get_dimension_index(self, name, value):\n        \n\n        if 'index' not in self.get('dimension', {}). \\\n                get(name, {}).get('category', {}):\n            return 0\n        ndx = self['dimension'][name]['category']['index']\n\n        if isinstance(ndx, list):\n            return ndx.index(value)\n        else:\n            return ndx[value]", "docstring": "Converts a dimension ID string and a categody ID string into the \\\nnumeric index of that category in that dimension\nArgs:\nname(string): ID string of the dimension.\nvalue(string): ID string of the category.\n\nReturns:\nndx[value](int): index of the category in the dimension.", "source": "juraj-google-style"}
{"code": "def _get_course_content_from_ecommerce(course_id, site_code=None):\n    api = get_ecommerce_client(site_code=site_code)\n    try:\n        api_response = api.courses(course_id).get()\n    except Exception:\n        logger.exception('An error occurred while retrieving data for course run [%s] from the Catalog API.', course_id, exc_info=True)\n        return {}\n    return {'title': api_response.get('name'), 'verification_deadline': api_response.get('verification_deadline')}", "docstring": "Get course information using the Ecommerce course api.\n\nIn case of error returns empty response.\nArguments:\ncourse_id (str): course key of the course\nsite_code (str): site code\n\nReturns:\ncourse information from Ecommerce", "source": "codesearchnet"}
{"code": "def create_bird_config_files(bird_configuration):\n    for ip_version in bird_configuration:\n        config_file = bird_configuration[ip_version]['config_file']\n        try:\n            touch(config_file)\n        except OSError as exc:\n            raise ValueError('failed to create {f}:{e}'.format(f=config_file, e=exc))\n        if bird_configuration[ip_version]['keep_changes']:\n            history_dir = os.path.join(os.path.dirname(config_file), 'history')\n            try:\n                os.mkdir(history_dir)\n            except FileExistsError:\n                pass\n            except OSError as exc:\n                raise ValueError('failed to make directory {d} for keeping a history of changes for {b}:{e}'.format(d=history_dir, b=config_file, e=exc))\n            else:\n                print('{d} is created'.format(d=history_dir))", "docstring": "Create bird configuration files per IP version.\n\nCreates bird configuration files if they don't exist. It also creates the\ndirectories where we store the history of changes, if this functionality is\nenabled.\n\nArguments:\nbird_configuration (dict): A dictionary with settings for bird.\n\nReturns:\nNone\n\nRaises:\nValueError if we can't create bird configuration files and the\ndirectory to store the history of changes in bird configuration file.", "source": "codesearchnet"}
{"code": "def interleave(args):\n    arg_iters = list(map(iter, args))\n    cycle_iter = it.cycle(arg_iters)\n    for iter_ in cycle_iter:\n        (yield six.next(iter_))", "docstring": "r\"\"\"\nzip followed by flatten\n\nArgs:\nargs (tuple): tuple of lists to interleave\n\nSeeAlso:\nYou may actually be better off doing something like this:\na, b, = args\nut.flatten(ut.bzip(a, b))\n\nut.flatten(ut.bzip([1, 2, 3], ['-']))\n[1, '-', 2, '-', 3, '-']\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_iter import *  # NOQA\n>>> import utool as ut\n>>> args = ([1, 2, 3, 4, 5], ['A', 'B', 'C', 'D', 'E', 'F', 'G'])\n>>> genresult = interleave(args)\n>>> result = ut.repr4(list(genresult), nl=False)\n>>> print(result)\n[1, 'A', 2, 'B', 3, 'C', 4, 'D', 5, 'E']", "source": "codesearchnet"}
{"code": "def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None):\n    with tf.variable_scope('sigmoid_accuracy_one_hot', values=[logits, labels]):\n        del weights_fn\n        predictions = tf.nn.sigmoid(logits)\n        labels = tf.argmax(labels, (- 1))\n        predictions = tf.argmax(predictions, (- 1))\n        (_, accuracy) = tf.metrics.accuracy(labels=labels, predictions=predictions)\n        return (accuracy, tf.constant(1.0))", "docstring": "Calculate accuracy for a set, given one-hot labels and logits.\n\nArgs:\nlogits: Tensor of size [batch-size, o=1, p=1, num-classes]\nlabels: Tensor of size [batch-size, o=1, p=1, num-classes]\nweights_fn: Function that takes in labels and weighs examples (unused)\nReturns:\naccuracy (scalar), weights", "source": "codesearchnet"}
{"code": "def call(self, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, decoder_input_ids: tf.Tensor | None=None, decoder_attention_mask: tf.Tensor | None=None, decoder_position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, decoder_head_mask: tf.Tensor | None=None, cross_attn_head_mask: tf.Tensor | None=None, encoder_outputs: Optional[TFBaseModelOutput]=None, past_key_values: List[tf.Tensor] | None=None, inputs_embeds: tf.Tensor | None=None, decoder_inputs_embeds: tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]:\n    if labels is not None:\n        labels = tf.where(labels == self.config.pad_token_id, tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), labels)\n        use_cache = False\n        if decoder_input_ids is None and decoder_inputs_embeds is None:\n            decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)\n    outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n    lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)\n    lm_logits = self.bias_layer(lm_logits)\n    masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)\n    if not return_dict:\n        output = (lm_logits,) + outputs[1:]\n        return (masked_lm_loss,) + output if masked_lm_loss is not None else output\n    return TFSeq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)", "docstring": "labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):\nLabels for computing the masked language modeling loss. Indices should either be in `[0, ...,\nconfig.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nReturns:", "source": "github-repos"}
{"code": "def remove_unused_links(self, used):\n        \n        unused = []\n\n        self._execute(\"SELECT * FROM {}\".format(self.LINK_STATE_TABLE))\n        for row in self.cursor:\n            relpath, inode, mtime = row\n            inode = self._from_sqlite(inode)\n            path = os.path.join(self.root_dir, relpath)\n\n            if path in used:\n                continue\n\n            if not os.path.exists(path):\n                continue\n\n            actual_inode = get_inode(path)\n            actual_mtime, _ = get_mtime_and_size(path)\n\n            if inode == actual_inode and mtime == actual_mtime:\n                logger.debug(\"Removing '{}' as unused link.\".format(path))\n                remove(path)\n                unused.append(relpath)\n\n        for relpath in unused:\n            cmd = 'DELETE FROM {} WHERE path = \"{}\"'\n            self._execute(cmd.format(self.LINK_STATE_TABLE, relpath))", "docstring": "Removes all saved links except the ones that are used.\n\nArgs:\nused (list): list of used links that should not be removed.", "source": "juraj-google-style"}
{"code": "def data_period_start_day(self, value=None):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type str '\n                    'for field `data_period_start_day`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `data_period_start_day`')\n\n        self._data_period_start_day = value", "docstring": "Corresponds to IDD Field `data_period_start_day`\n\nArgs:\nvalue (str): value for IDD Field `data_period_start_day`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def get_asset_url(self, path):\n\t\t\n\t\turl = self.root_url + '/assets/' + path\n\t\tif path in self.asset_hash:\n\t\t\turl += '?' + self.asset_hash[path]\n\t\treturn url", "docstring": "Get the URL of an asset. If asset hashes are added and one exists for\nthe path, it will be appended as a query string.\n\nArgs:\npath (str): Path to the file, relative to your \"assets\" directory.", "source": "juraj-google-style"}
{"code": "def _container_strategy(self):\n    container_strategy = self._container_strategy_weakref()\n    assert container_strategy is not None\n    return container_strategy", "docstring": "Get the containing `tf.distribute.Strategy`.\n\nThis should not generally be needed except when creating a new\n`ReplicaContext` and to validate that the caller is in the correct\n`scope()`.\n\nReturns:\nThe `tf.distribute.Strategy` such that `strategy.extended` is `self`.", "source": "github-repos"}
{"code": "def get_project_name(project_id, projects):\n    \n    for project in projects:\n        if project_id == project.id:\n            return project.name", "docstring": "Retrieves project name for given project id\n\nArgs:\nprojects: List of projects\nproject_id: project id\n\nReturns: Project name or None if there is no match", "source": "juraj-google-style"}
{"code": "def files_info(self, *, id: str, **kwargs) -> SlackResponse:\n        \n        kwargs.update({\"id\": id})\n        return self.api_call(\"files.info\", http_verb=\"GET\", params=kwargs)", "docstring": "Gets information about a team file.\n\nArgs:\nid (str): The file id. e.g. 'F1234467890'", "source": "juraj-google-style"}
{"code": "def get_equivalent_qpoints(self, index):\n        \n        \n        \n\n        if self.qpoints[index].label is None:\n            return [index]\n\n        list_index_qpoints = []\n        for i in range(self.nb_qpoints):\n            if self.qpoints[i].label == self.qpoints[index].label:\n                list_index_qpoints.append(i)\n\n        return list_index_qpoints", "docstring": "Returns the list of qpoint indices equivalent (meaning they are the\nsame frac coords) to the given one.\n\nArgs:\nindex: the qpoint index\n\nReturns:\na list of equivalent indices\n\nTODO: now it uses the label we might want to use coordinates instead\n(in case there was a mislabel)", "source": "juraj-google-style"}
{"code": "def parse(self, text, key=None):\n        \n        try:\n            data = json.loads(text)\n        except ValueError as e:\n            raise ValueError(\"%s: Value: [%s]\" % (e, text))\n\n        if data and key:\n            if key not in data:\n                raise ValueError(\"Invalid response (key %s not found): %s\" % (key, data))\n            data = data[key]\n        return data", "docstring": "Parses a response.\n\nArgs:\ntext (str): Text to parse\n\nKwargs:\nkey (str): Key to look for, if any\n\nReturns:\nParsed value\n\nRaises:\nValueError", "source": "juraj-google-style"}
{"code": "def _extract_response_xml(self, domain, response):\n        \n        attributes = {}\n        alexa_keys = {'POPULARITY': 'TEXT', 'REACH': 'RANK', 'RANK': 'DELTA'}\n        try:\n            xml_root = ET.fromstring(response._content)\n            for xml_child in xml_root.findall('SD\n                if xml_child.tag in alexa_keys and \\\n                        alexa_keys[xml_child.tag] in xml_child.attrib:\n                    attributes[xml_child.tag.lower(\n                    )] = xml_child.attrib[alexa_keys[xml_child.tag]]\n        except ParseError:\n            \n            pass\n        attributes['domain'] = domain\n        return {'attributes': attributes}", "docstring": "Extract XML content of an HTTP response into dictionary format.\n\nArgs:\nresponse: HTML Response objects\nReturns:\nA dictionary: {alexa-ranking key : alexa-ranking value}.", "source": "juraj-google-style"}
{"code": "def List(self, request, global_params=None):\n    config = self.GetMethodConfig('List')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Lists existing `BuildTrigger`s. This API is experimental.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsTriggersListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ListBuildTriggersResponse) The response message.", "source": "github-repos"}
{"code": "def add_tensor_filter(self, filter_name, tensor_filter):\n    if self._session_wrapper:\n        self._session_wrapper.add_tensor_filter(filter_name, tensor_filter)\n    else:\n        self._pending_tensor_filters[filter_name] = tensor_filter", "docstring": "Add a tensor filter.\n\nSee doc of `LocalCLIDebugWrapperSession.add_tensor_filter()` for details.\nOverride default behavior to accommodate the possibility of this method\nbeing\ncalled prior to the initialization of the underlying\n`LocalCLIDebugWrapperSession` object.\n\nArgs:\nfilter_name: See doc of `LocalCLIDebugWrapperSession.add_tensor_filter()`\nfor details.\ntensor_filter: See doc of\n`LocalCLIDebugWrapperSession.add_tensor_filter()` for details.", "source": "github-repos"}
{"code": "def render_template(template, out_dir='.', context=None):\n    \n    \n    template_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n                                      '..',\n                                      'templates',\n                                      template\n                                     )\n\n    \n    files = []\n    empty_dirs = []\n\n    for (dirpath, _, filenames) in os.walk(template_directory):\n        \n        if len(filenames) == 0:\n            \n            empty_dirs.append(os.path.relpath(dirpath, template_directory))\n        \n        else:\n            \n            files.extend([os.path.join(dirpath, filepath) for filepath in filenames])\n\n    \n    for source_file in files:\n        \n        with open(source_file, 'r') as file:\n            \n            template = Template(file.read())\n            \n            template_rendered = template.render(**(context or {}))\n\n            \n            source_relpath = os.path.relpath(source_file, template_directory)\n\n            \n            filename = os.path.join(out_dir, source_relpath)\n            \n            filename_rendered = Template(filename).render(**context)\n\n            \n            source_dir = os.path.dirname(filename_rendered)\n            \n            if not os.path.exists(source_dir):\n                \n                os.makedirs(source_dir)\n\n            \n            with open(filename_rendered, 'w') as target_file:\n                \n                target_file.write(template_rendered)\n\n    \n    for dirpath in empty_dirs:\n        try:\n            \n            dirname = os.path.join(out_dir, dirpath)\n            \n            dirname_rendered = Template(dirname).render(**context)\n\n            \n            if not os.path.exists(dirname_rendered):\n                \n                os.makedirs(dirname_rendered)\n        except OSError as exc:\n            \n            if exc.errno == errno.EEXIST and os.path.isdir(dirpath):\n                \n                pass\n            \n            else:\n                \n                raise", "docstring": "This function renders the template desginated by the argument to the\ndesignated directory using the given context.\n\nArgs:\ntemplate (string) : the source template to use (relative to ./templates)\nout_dir (string) : the name of the output directory\ncontext (dict) : the template rendering context", "source": "juraj-google-style"}
{"code": "def __init__(\n      self, session, storage_type=definitions.STORAGE_TYPE_SESSION, task=None):\n    \n    super(StorageWriter, self).__init__()\n    self._first_written_event_source_index = 0\n    self._serializers_profiler = None\n    self._session = session\n    self._storage_profiler = None\n    self._storage_type = storage_type\n    self._task = task\n    self._written_event_source_index = 0\n    self.number_of_analysis_reports = 0\n    self.number_of_event_sources = 0\n    self.number_of_event_tags = 0\n    self.number_of_events = 0\n    self.number_of_warnings = 0", "docstring": "Initializes a storage writer.\n\nArgs:\nsession (Session): session the storage changes are part of.\nstorage_type (Optional[str]): storage type.\ntask(Optional[Task]): task.", "source": "juraj-google-style"}
{"code": "def get_string(self, significant_figures=6):\n        \n        ph = \"{:.%df}\" % significant_figures\n        lines = []\n        for bound, d in zip(self.bounds, \"xyz\"):\n            fillers = bound + [d] * 2\n            bound_format = \" \".join([ph] * 2 + [\" {}lo {}hi\"])\n            lines.append(bound_format.format(*fillers))\n        if self.tilt:\n            tilt_format = \" \".join([ph] * 3 + [\" xy xz yz\"])\n            lines.append(tilt_format.format(*self.tilt))\n        return \"\\n\".join(lines)", "docstring": "Returns the string representation of simulation box in LAMMPS\ndata file format.\n\nArgs:\nsignificant_figures (int): No. of significant figures to\noutput for box settings. Default to 6.\n\nReturns:\nString representation", "source": "juraj-google-style"}
{"code": "def _init_from_proto(self, context_def, import_scope=None):\n    assert isinstance(context_def, control_flow_pb2.CondContextDef)\n    g = ops.get_default_graph()\n    self._name = ops.prepend_name_scope(context_def.context_name, import_scope)\n    self._pred = g.as_graph_element(ops.prepend_name_scope(context_def.pred_name, import_scope))\n    self._pivot = g.as_graph_element(ops.prepend_name_scope(context_def.pivot_name, import_scope))\n    self._branch = context_def.branch\n    super(CondContext, self).__init__(values_def=context_def.values_def, import_scope=import_scope)", "docstring": "Creates a new `CondContext` from protocol buffer.\n\nArgs:\ncontext_def: `CondContextDef` protocol buffer.\nimport_scope: Optional `string`. Name scope to add.", "source": "github-repos"}
{"code": "def filter(self, nodes):\n        \n\n        filtered_dag = DAG()\n\n        \n        for node in nodes:\n            filtered_dag.add_node_if_not_exists(node)\n            for edge in self.all_downstreams(node):\n                filtered_dag.add_node_if_not_exists(edge)\n\n        \n        for node, edges in self.graph.items():\n            if node in filtered_dag.graph:\n                filtered_dag.graph[node] = edges\n\n        return filtered_dag", "docstring": "Returns a new DAG with only the given nodes and their\ndependencies.\n\nArgs:\nnodes (list): The nodes you are interested in.\n\nReturns:\n:class:`stacker.dag.DAG`: The filtered graph.", "source": "juraj-google-style"}
{"code": "def get(self, ldap_dn):\n        \n        self.base_dn = ldap_dn\n        self.sub_tree = BASE\n        return self.first()", "docstring": "Return an LDAP entry by DN\n\nArgs:\nldap_dn (str): LDAP DN", "source": "juraj-google-style"}
{"code": "def is_copy_constructor(constructor):\n    \n    assert isinstance(constructor, calldef_members.constructor_t)\n    args = constructor.arguments\n    parent = constructor.parent\n\n    \n    if len(args) != 1:\n        return False\n\n    \n    arg = args[0]\n\n    if not isinstance(arg.decl_type, cpptypes.compound_t):\n        \n        \n        \n        \n        \n        \n        \n        \n        return False\n\n    \n    if not type_traits.is_reference(arg.decl_type):\n        return False\n\n    \n    if not type_traits.is_const(arg.decl_type.base):\n        return False\n\n    un_aliased = type_traits.remove_alias(arg.decl_type.base)\n    \n    if not isinstance(un_aliased.base, cpptypes.declarated_t):\n        \n        \n        \n        \n        return False\n\n    \n    \n    return id(un_aliased.base.declaration) == id(parent)", "docstring": "Check if the declaration is a copy constructor,\n\nArgs:\nconstructor (declarations.constructor_t): the constructor\nto be checked.\n\nReturns:\nbool: True if this is a copy constructor, False instead.", "source": "juraj-google-style"}
{"code": "def _read_mptcp_prio(self, bits, size):\n    temp = (self._read_unpack(1) if size else None)\n    data = dict(subtype='MP_PRIO', prio=dict(res=(b'\\x00' * 3), backup=(True if int(bits[3]) else False), addrid=temp))\n    return data", "docstring": "Read Change Subflow Priority option.\n\nPositional arguments:\n* bits - str, 4-bit data\n* size - int, length of option\n\nReturns:\n* dict -- extracted Change Subflow Priority (MP_PRIO) option\n\nStructure of MP_PRIO [RFC 6824]:\n1                   2                   3\n0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\n+---------------+---------------+-------+-----+-+--------------+\n|     Kind      |     Length    |Subtype|     |B| AddrID (opt) |\n+---------------+---------------+-------+-----+-+--------------+\n\nOctets      Bits        Name                    Description\n0           0     tcp.opt.kind            Kind (30)\n1           8     tcp.opt.length          Length (3/4)\n2          16     tcp.opt.mp.subtype      Subtype (5)\n2          23     tcp.opt.mp.prio.backup  Backup Path (B)\n3          24     tcp.opt.mp.prio.addrid  Address ID (optional)", "source": "codesearchnet"}
{"code": "def fermi_fourier_trans_inverse_4(qubits):\n    \n\n    yield fswap(qubits[1], qubits[2]),\n    yield fermi_fourier_trans_2(qubits[0], qubits[1])\n    yield fermi_fourier_trans_2(qubits[2], qubits[3])\n    yield fswap(qubits[1], qubits[2])\n    yield fermi_fourier_trans_2(qubits[0], qubits[1])\n    yield cirq.S(qubits[2])\n    yield fermi_fourier_trans_2(qubits[2], qubits[3])\n    yield fswap(qubits[1], qubits[2])", "docstring": "The reverse fermionic Fourier transformation implemented on 4 qubits\non a line, which maps the momentum picture to the position picture.\nUsing the fast Fourier transformation algorithm, the circuit can be\ndecomposed into 2-mode fermionic Fourier transformation, the fermionic\nSWAP gates, and single-qubit rotations.\n\nArgs:\nqubits: list of four qubits", "source": "juraj-google-style"}
{"code": "def load_variables_from_checkpoint(sess, start_checkpoint):\n    saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables())\n    saver.restore(sess, start_checkpoint)", "docstring": "Utility function to centralize checkpoint restoration.\n\nArgs:\nsess: TensorFlow session.\nstart_checkpoint: Path to saved checkpoint on disk.", "source": "github-repos"}
{"code": "def do_get(self, uri):\n    self.validate_resource_uri(uri)\n    return self._connection.get(uri)", "docstring": "Helps to make get requests\n\nArgs:\nuri: URI of the resource\n\nReturns:\nReturns: Returns the resource data", "source": "codesearchnet"}
{"code": "def nmf_ensemble(data, k, n_runs=10, W_list=[], **nmf_params):\n    nmf = NMF(k)\n    if (len(W_list) == 0):\n        W_list = []\n        for i in range(n_runs):\n            W = nmf.fit_transform(data)\n            W_list.append(W)\n    W_stacked = np.hstack(W_list)\n    nmf_w = nmf.fit_transform(W_stacked)\n    nmf_h = nmf.components_\n    H_new = data.T.dot(nmf_w).T\n    nmf2 = NMF(k, init='custom')\n    nmf_w = nmf2.fit_transform(data, W=nmf_w, H=H_new)\n    H_new = nmf2.components_\n    return (nmf_w, H_new)", "docstring": "Runs an ensemble method on the list of NMF W matrices...\n\nArgs:\ndata: genes x cells array (should be log + cell-normalized)\nk: number of classes\nn_runs (optional): number of random initializations of state estimation\nM_list (optional): list of M arrays from state estimation\nse_params (optional): optional poisson_estimate_state params\n\nReturns:\nW_new\nH_new", "source": "codesearchnet"}
{"code": "def _create_query(node, context):\n    \n    visited_nodes = [node]\n    output_columns = _get_output_columns(visited_nodes, context)\n    filters = _get_filters(visited_nodes, context)\n    selectable = sql_context_helpers.get_node_selectable(node, context)\n    query = select(output_columns).select_from(selectable).where(and_(*filters))\n    return query", "docstring": "Create a query from a SqlNode.\n\nArgs:\nnode: SqlNode, the current node.\ncontext: CompilationContext, global compilation state and metadata.\n\nReturns:\nSelectable, selectable of the generated query.", "source": "juraj-google-style"}
{"code": "def __init__(self, run_object):\n        \n        run_obj_type = self.get_run_object_type(run_object)\n        if run_obj_type == 'module':\n            self.init_module(run_object)\n        elif run_obj_type == 'package':\n            self.init_package(run_object)\n        else:\n            self.init_function(run_object)", "docstring": "Initializes profiler.\n\nArgs:\nrun_object: object to be profiled.", "source": "juraj-google-style"}
{"code": "def get_topics_strings(topics_words, alpha, vocabulary, topics_to_print=10, words_per_topic=10):\n    alpha = np.squeeze(alpha, axis=0)\n    highest_weight_topics = np.argsort((- alpha), kind='mergesort')\n    top_words = np.argsort((- topics_words), axis=1)\n    res = []\n    for topic_idx in highest_weight_topics[:topics_to_print]:\n        l = ['index={} alpha={:.2f}'.format(topic_idx, alpha[topic_idx])]\n        l += [vocabulary[word] for word in top_words[(topic_idx, :words_per_topic)]]\n        res.append(' '.join(l))\n    return np.array(res)", "docstring": "Returns the summary of the learned topics.\n\nArguments:\ntopics_words: KxV tensor with topics as rows and words as columns.\nalpha: 1xK tensor of prior Dirichlet concentrations for the\ntopics.\nvocabulary: A mapping of word's integer index to the corresponding string.\ntopics_to_print: The number of topics with highest prior weight to\nsummarize.\nwords_per_topic: Number of wodrs per topic to return.\n\nReturns:\nsummary: A np.array with strings.", "source": "codesearchnet"}
{"code": "def successful_request(self, now):\n    self._successful_requests.add(now, 1)", "docstring": "Notifies the throttler of a successful request.\n\nMust be called once for each request (for which throttle_request was\npreviously called) that succeeded.\n\nArgs:\nnow: int, time in ms since the epoch", "source": "github-repos"}
{"code": "def update(cls, session, record):\n        \n        cls._check_implements('update')\n        data = record.to_api()\n        del data['id']\n        data['reload'] = True\n        return cls(\n            '/%s/%s.json' % (cls.__endpoint__, record.id),\n            data=data,\n            request_type=RequestPaginator.PUT,\n            singleton=True,\n            session=session,\n        )", "docstring": "Update a record.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nrecord (helpscout.BaseModel): The record to\nbe updated.\n\nReturns:\nhelpscout.BaseModel: Freshly updated record.", "source": "juraj-google-style"}
{"code": "def encipher_vigenere(plaintext, plain_vocab, key):\n    ciphertext = []\n    layers = [ShiftEncryptionLayer(plain_vocab, i) for i in range(len(plain_vocab))]\n    for (i, sentence) in enumerate(plaintext):\n        cipher_sentence = []\n        for (j, character) in enumerate(sentence):\n            key_idx = key[(j % len(key))]\n            encrypted_char = layers[key_idx].encrypt_character(character)\n            cipher_sentence.append(encrypted_char)\n        ciphertext.append(cipher_sentence)\n    return ciphertext", "docstring": "Encrypt plain text with given key.\n\nArgs:\nplaintext (list of list of Strings): a list of plain text to encrypt.\nplain_vocab (list of Integer): unique vocabularies being used.\nkey (list of Integer): key to encrypt cipher using Vigenere table.\n\nReturns:\nciphertext (list of Strings): encrypted plain text.", "source": "codesearchnet"}
{"code": "def __init__(\n      self, datetime_value, date_time_description, data_type=None,\n      time_zone=None):\n    \n    year, month, day_of_month, hours, minutes, seconds, _, _, _ = (\n        datetime_value.utctimetuple())\n\n    time_elements_tuple = (\n        year, month, day_of_month, hours, minutes, seconds,\n        datetime_value.microsecond)\n\n    date_time = dfdatetime_time_elements.TimeElementsInMicroseconds(\n        time_elements_tuple=time_elements_tuple)\n\n    super(PythonDatetimeEvent, self).__init__(\n        date_time, date_time_description, data_type=data_type,\n        time_zone=time_zone)", "docstring": "Initializes an event.\n\nArgs:\ndatetime_value (datetime.datetime): date and time values.\ndate_time_description (str): description of the meaning of the date and\ntime values.\ndata_type (Optional[str]): event data type. If the data type is not set\nit is derived from the DATA_TYPE class attribute.\ntime_zone (Optional[datetime.tzinfo]): time zone.", "source": "juraj-google-style"}
{"code": "def serialize_to_normalized_compact_json(py_obj):\n    return json.dumps(py_obj, sort_keys=True, separators=(',', ':'), cls=ToJsonCompatibleTypes)", "docstring": "Serialize a native object to normalized, compact JSON.\n\nThe JSON string is normalized by sorting any dictionary keys. It will be on a single\nline without whitespace between elements.\n\nArgs:\npy_obj: object\nAny object that can be represented in JSON. Some types, such as datetimes are\nautomatically converted to strings.\n\nReturns:\nstr: normalized, compact JSON string.", "source": "codesearchnet"}
{"code": "def assert_integer_form(x, data=None, summarize=None, message=None, int_dtype=None, name='assert_integer_form'):\n    with ops.name_scope(name, values=[x, data]):\n        x = ops.convert_to_tensor(x, name='x')\n        if x.dtype.is_integer:\n            return control_flow_ops.no_op()\n        message = message or '{} has non-integer components'.format(x)\n        if int_dtype is None:\n            try:\n                int_dtype = {dtypes.float16: dtypes.int16, dtypes.float32: dtypes.int32, dtypes.float64: dtypes.int64}[x.dtype.base_dtype]\n            except KeyError:\n                raise TypeError('Unrecognized type {}'.format(x.dtype.name))\n        return check_ops.assert_equal(x, math_ops.cast(math_ops.cast(x, int_dtype), x.dtype), data=data, summarize=summarize, message=message, name=name)", "docstring": "Assert that x has integer components (or floats equal to integers).\n\nArgs:\nx: Floating-point `Tensor`\ndata: The tensors to print out if the condition is `False`. Defaults to\nerror message and first few entries of `x` and `y`.\nsummarize: Print this many entries of each tensor.\nmessage: A string to prefix to the default message.\nint_dtype: A `tf.dtype` used to cast the float to. The default (`None`)\nimplies the smallest possible signed int will be used for casting.\nname: A name for this operation (optional).\n\nReturns:\nOp raising `InvalidArgumentError` if `cast(x, int_dtype) != x`.", "source": "github-repos"}
{"code": "def get_input_shape_and_dtype(layer):\n\n    def _is_graph_model(layer):\n        return hasattr(layer, '_is_graph_network') and layer._is_graph_network or layer.__class__.__name__ == 'Sequential'\n    while _is_graph_model(layer):\n        if not layer.layers:\n            raise ValueError('An empty Model cannot be used as a Layer.')\n        layer = layer.layers[0]\n    if getattr(layer, '_batch_input_shape', None):\n        return (layer._batch_input_shape, layer.dtype)\n    return (None, None)", "docstring": "Retrieves input shape and input dtype of layer if applicable.\n\nArgs:\nlayer: Layer (or model) instance.\n\nReturns:\nTuple (input_shape, input_dtype). Both could be None if the layer\ndoes not have a defined input shape.\n\nRaises:\nValueError: in case an empty Sequential or Functional model is passed.", "source": "github-repos"}
{"code": "def post(self, path, params=None, timeout=None, event_timeout=None):\n        \n        future = self.post_async(path, params)\n        self.wait_all_futures(\n            future, timeout=timeout, event_timeout=event_timeout)\n        return future.result()", "docstring": "Synchronously calls a method\n\nArgs:\npath (list): The path to post to\nparams (dict): parameters for the call\ntimeout (float): time in seconds to wait for responses, wait\nforever if None\nevent_timeout: maximum time in seconds to wait between each response\nevent, wait forever if None\n\nReturns:\nthe result from 'method'", "source": "juraj-google-style"}
{"code": "def override_binary_operator_helper(func, op_name, clazz_object=tensor_lib.Tensor):\n\n    @traceback_utils.filter_traceback\n    def binary_op_wrapper(x, y):\n        with ops.name_scope(None, op_name, [x, y]) as name:\n            try:\n                x, y = maybe_promote_tensors(x, y)\n                return func(x, y, name=name)\n            except (TypeError, ValueError) as e:\n                if hasattr(type(y), '__r%s__' % op_name):\n                    try:\n                        r_op = getattr(y, '__r%s__' % op_name)\n                        out = r_op(x)\n                        if out is NotImplemented:\n                            raise\n                        return out\n                    except (TypeError, ValueError):\n                        raise e\n                else:\n                    raise\n\n    @traceback_utils.filter_traceback\n    def binary_op_wrapper_sparse(sp_x, y):\n        with ops.name_scope(None, op_name, [sp_x, y]) as name:\n            y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name='y')\n            return clazz_object(sp_x.indices, func(sp_x.indices, sp_x.values, sp_x.dense_shape, y, name=name), sp_x.dense_shape)\n\n    @traceback_utils.filter_traceback\n    def r_binary_op_wrapper(y, x):\n        with ops.name_scope(None, op_name, [x, y]) as name:\n            y, x = maybe_promote_tensors(y, x, force_same_dtype=True)\n            return func(x, y, name=name)\n    try:\n        doc = func.__doc__\n    except AttributeError:\n        doc = None\n    binary_op_wrapper.__doc__ = doc\n    r_binary_op_wrapper.__doc__ = doc\n    binary_op_wrapper_sparse.__doc__ = doc\n    if clazz_object is tensor_lib.Tensor:\n        clazz_object._override_operator('__%s__' % op_name, binary_op_wrapper)\n        del binary_op_wrapper\n        clazz_object._override_operator('__r%s__' % op_name, r_binary_op_wrapper)\n        del r_binary_op_wrapper\n    else:\n        clazz_object._override_operator('__%s__' % op_name, binary_op_wrapper_sparse)\n        del binary_op_wrapper_sparse", "docstring": "Register operators with different tensor and scalar versions.\n\nIf `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,\nsp_values, sp_shape, dense)` and outputs `(new_sp_values)`.\n\nArgs:\nfunc: the operator\nop_name: name of the operator being overridden\nclazz_object: class to override for.  Either `Tensor` or `SparseTensor`.", "source": "github-repos"}
{"code": "def _EnforceProcessMemoryLimit(self, memory_limit):\n    \n    \n    if resource:\n      if memory_limit is None:\n        memory_limit = 4 * 1024 * 1024 * 1024\n      elif memory_limit == 0:\n        memory_limit = resource.RLIM_INFINITY\n\n      resource.setrlimit(resource.RLIMIT_DATA, (memory_limit, memory_limit))", "docstring": "Enforces a process memory limit.\n\nArgs:\nmemory_limit (int): maximum number of bytes the process is allowed\nto allocate, where 0 represents no limit and None a default of\n4 GiB.", "source": "juraj-google-style"}
{"code": "def ws010(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `ws010`'.format(value))\n\n        self._ws010 = value", "docstring": "Corresponds to IDD Field `ws010`\nWind speed corresponding to 1.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `ws010`\nUnit: m/s\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def _create_rand_mask_from_inputs(from_blocked_mask, to_blocked_mask, rand_attn, num_attention_heads, num_rand_blocks, batch_size, from_seq_length, from_block_size):\n    num_windows = from_seq_length \n    rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)])\n    rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size)\n    rand_mask = torch.einsum('blq,bhlk->bhlqk', from_blocked_mask[:, 1:-1], rand_mask)\n    return rand_mask", "docstring": "Create 3D attention mask from a 2D tensor mask.\n\nArgs:\nfrom_blocked_mask: 2D Tensor of shape [batch_size,\nfrom_seq_length//from_block_size, from_block_size].\nto_blocked_mask: int32 Tensor of shape [batch_size,\nto_seq_length//to_block_size, to_block_size].\nrand_attn: [batch_size, num_attention_heads,\nfrom_seq_length//from_block_size-2, num_rand_blocks]\nnum_attention_heads: int. Number of attention heads.\nnum_rand_blocks: int. Number of random chunks per row.\nbatch_size: int. Batch size for computation.\nfrom_seq_length: int. length of from sequence.\nfrom_block_size: int. size of block in from sequence.\n\nReturns:\nfloat Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2,\nfrom_block_size, num_rand_blocks*to_block_size].", "source": "github-repos"}
{"code": "def insert_data(self, remove_data=False, db_type='sqlite'):\n        \n        if self.update_source:\n            \n            import msp2db\n            self.c.execute(\n                \"INSERT INTO library_spectra_source (id, name, parsing_software) VALUES\"\n                \" ({a}, '{b}', 'msp2db-v{c}')\".format(a=self.current_id_origin, b=self.source, c=msp2db.__version__))\n            self.conn.commit()\n\n        if self.compound_info_all:\n            self.compound_info_all = _make_sql_compatible(self.compound_info_all)\n\n            cn = ', '.join(self.compound_info.keys()) + ',created_at,updated_at'\n\n            insert_query_m(self.compound_info_all, columns=cn, conn=self.conn, table='metab_compound',\n                           db_type=db_type)\n\n        self.meta_info_all = _make_sql_compatible(self.meta_info_all)\n\n        cn = 'id,' + ', '.join(self.meta_info.keys()) + ',library_spectra_source_id, inchikey_id'\n\n        insert_query_m(self.meta_info_all, columns=cn, conn=self.conn, table='library_spectra_meta',\n                       db_type=db_type)\n\n\n        cn = \"id, mz, i, other, library_spectra_meta_id\"\n        insert_query_m(self.spectra_all, columns=cn, conn=self.conn, table='library_spectra', db_type=db_type)\n        if self.spectra_annotation_all:\n            cn = \"id, mz, tentative_formula, mass_error, library_spectra_meta_id\"\n            insert_query_m(self.spectra_annotation_all, columns=cn, conn=self.conn,\n                           table='library_spectra_annotation', db_type=db_type)\n\n\n        \n        if remove_data:\n            self.meta_info_all = []\n            self.spectra_all = []\n            self.spectra_annotation_all = []\n            self.compound_info_all = []\n            self._get_current_ids(source=False)", "docstring": "Insert data stored in the current chunk of parsing into the selected database\n\n\nArgs:\nremove_data (boolean): Remove the data stored within the LibraryData object for the current chunk of\nprocessing\ndb_type (str): The type of database to submit to\neither 'sqlite', 'mysql' or 'django_mysql' [default sqlite]", "source": "juraj-google-style"}
{"code": "def write_config_file(self, parsed_namespace, output_file_paths, exit_after=False):\n    for output_file_path in output_file_paths:\n        try:\n            with open(output_file_path, 'w') as output_file:\n                pass\n        except IOError as e:\n            raise ValueError((\"Couldn't open %s for writing: %s\" % (output_file_path, e)))\n    if output_file_paths:\n        config_items = self.get_items_for_config_file_output(self._source_to_settings, parsed_namespace)\n        file_contents = self._config_file_parser.serialize(config_items)\n        for output_file_path in output_file_paths:\n            with open(output_file_path, 'w') as output_file:\n                output_file.write(file_contents)\n        message = ('Wrote config file to ' + ', '.join(output_file_paths))\n        if exit_after:\n            self.exit(0, message)\n        else:\n            print(message)", "docstring": "Write the given settings to output files.\n\nArgs:\nparsed_namespace: namespace object created within parse_known_args()\noutput_file_paths: any number of file paths to write the config to\nexit_after: whether to exit the program after writing the config files", "source": "codesearchnet"}
{"code": "def from_epsg_code(code):\n    code = str(code)\n    proj4 = utils.crscode_to_string('epsg', code, 'proj4')\n    crs = from_proj4(proj4)\n    return crs", "docstring": "Load crs object from epsg code, via spatialreference.org.\nParses based on the proj4 representation.\n\nArguments:\n\n- *code*: The EPSG code as an integer.\n\nReturns:\n\n- A CS instance of the indicated type.", "source": "codesearchnet"}
{"code": "def delete_variants(self, case_id, variant_type, category=None):\n    category = (category or '')\n    LOG.info('Deleting old {0} {1} variants for case {2}'.format(variant_type, category, case_id))\n    query = {'case_id': case_id, 'variant_type': variant_type}\n    if category:\n        query['category'] = category\n    result = self.variant_collection.delete_many(query)\n    LOG.info('{0} variants deleted'.format(result.deleted_count))", "docstring": "Delete variants of one type for a case\n\nThis is used when a case is reanalyzed\n\nArgs:\ncase_id(str): The case id\nvariant_type(str): 'research' or 'clinical'\ncategory(str): 'snv', 'sv' or 'cancer'", "source": "codesearchnet"}
{"code": "def GetForwardedIps(self, interface, interface_ip=None):\n    \n    args = ['ls', 'table', 'local', 'type', 'local']\n    options = self._CreateRouteOptions(dev=interface)\n    result = self._RunIpRoute(args=args, options=options)\n    result = re.sub(r'local\\s', r'', result)\n    return self.ParseForwardedIps(result.split())", "docstring": "Retrieve the list of configured forwarded IP addresses.\n\nArgs:\ninterface: string, the output device to query.\ninterface_ip: string, current interface ip address.\n\nReturns:\nlist, the IP address strings.", "source": "juraj-google-style"}
{"code": "def get_fractional_coords(self, cart_coords: Vector3Like) -> np.ndarray:\n    return dot(cart_coords, self.inv_matrix)", "docstring": "Returns the fractional coordinates given cartesian coordinates.\n\nArgs:\ncart_coords (3x1 array): Cartesian coords.\n\nReturns:\nFractional coordinates.", "source": "codesearchnet"}
{"code": "def image_summary(seqs, name, num=None):\n    seqs = tf.clip_by_value(seqs, 0.0, 1.0)\n    seqs = tf.unstack(seqs[:num])\n    joined_seqs = [tf.concat(tf.unstack(seq), 1) for seq in seqs]\n    joined_seqs = tf.expand_dims(tf.concat(joined_seqs, 0), 0)\n    tf.compat.v2.summary.image(name, joined_seqs, max_outputs=1, step=tf.compat.v1.train.get_or_create_global_step())", "docstring": "Visualizes sequences as TensorBoard summaries.\n\nArgs:\nseqs: A tensor of shape [n, t, h, w, c].\nname: String name of this summary.\nnum: Integer for the number of examples to visualize. Defaults to\nall examples.", "source": "codesearchnet"}
{"code": "def bqm_index_labelled_input(var_labels_arg_name, samples_arg_names):\n\n    def index_label_decorator(f):\n\n        @wraps(f)\n        def _index_label(sampler, bqm, **kwargs):\n            if (not hasattr(bqm, 'linear')):\n                raise TypeError('expected input to be a BinaryQuadraticModel')\n            linear = bqm.linear\n            var_labels = kwargs.get(var_labels_arg_name, None)\n            has_samples_input = any(((kwargs.get(arg_name, None) is not None) for arg_name in samples_arg_names))\n            if (var_labels is None):\n                if all(((v in linear) for v in range(len(bqm)))):\n                    return f(sampler, bqm, **kwargs)\n                if has_samples_input:\n                    err_str = 'Argument `{}` must be provided if any of the samples arguments {} are provided and the bqm is not already index-labelled'.format(var_labels_arg_name, samples_arg_names)\n                    raise ValueError(err_str)\n                try:\n                    inverse_mapping = dict(enumerate(sorted(linear)))\n                except TypeError:\n                    inverse_mapping = dict(enumerate(linear))\n                var_labels = {v: i for (i, v) in iteritems(inverse_mapping)}\n            else:\n                inverse_mapping = {i: v for (v, i) in iteritems(var_labels)}\n            response = f(sampler, bqm.relabel_variables(var_labels, inplace=False), **kwargs)\n            return response.relabel_variables(inverse_mapping, inplace=True)\n        return _index_label\n    return index_label_decorator", "docstring": "Returns a decorator which ensures bqm variable labeling and all other\nspecified sample-like inputs are index labeled and consistent.\n\nArgs:\nvar_labels_arg_name (str):\nThe name of the argument that the user should use to pass in an\nindex labeling for the bqm.\n\nsamples_arg_names (list[str]):\nThe names of the expected sample-like inputs which should be\nindexed according to the labels passed to the argument\n`var_labels_arg_name`.\n\nReturns:\nFunction decorator.", "source": "codesearchnet"}
{"code": "def has_no_narrow_neurite_section(neuron, neurite_filter, radius_threshold=0.05, considered_section_min_length=50):\n    considered_sections = (sec for sec in iter_sections(neuron, neurite_filter=neurite_filter) if (sec.length > considered_section_min_length))\n\n    def narrow_section(section):\n        'Select narrow sections'\n        return (section.points[(:, COLS.R)].mean() < radius_threshold)\n    bad_ids = [(section.id, section.points[1]) for section in considered_sections if narrow_section(section)]\n    return CheckResult((len(bad_ids) == 0), bad_ids)", "docstring": "Check if the neuron has dendrites with narrow sections\n\nArguments:\nneuron(Neuron): The neuron object to test\nneurite_filter(callable): filter the neurites by this callable\nradius_threshold(float): radii below this are considered narro\nconsidered_section_min_length(float): sections with length below\nthis are not taken into account\n\nReturns:\nCheckResult with result. result.info contains the narrow section ids and their\nfirst point", "source": "codesearchnet"}
{"code": "def assertNear(self, f1, f2, err, msg=None):\n    self.assertTrue(f1 == f2 or math.fabs(f1 - f2) <= err, '%f != %f +/- %f%s' % (f1, f2, err, ' (%s)' % msg if msg is not None else ''))", "docstring": "Asserts that two floats are near each other.\n\nChecks that |f1 - f2| < err and asserts a test failure\nif not.\n\nArgs:\nf1: A float value.\nf2: A float value.\nerr: A float value.\nmsg: An optional string message to append to the failure message.", "source": "github-repos"}
{"code": "def split(state, num):\n    state = tf_np.asarray(state, dtype=_RNG_KEY_DTYPE)\n    state = _key2seed(state)\n    try:\n        states = stateless_random_ops.stateless_split(state, num)\n    except AttributeError as e:\n        states = stateless_split(state, num)\n    states = array_ops_stack.unstack(states, num)\n    states = nest.map_structure(_seed2key, states)\n    return states", "docstring": "Creates new independent RNG states from an existing state.\n\nArgs:\nstate: the existing state.\nnum: the number of the new states.\n\nReturns:\nA tuple of new states.", "source": "github-repos"}
{"code": "def from_cif_file(cif_file, source='', comment=''):\n    r = CifParser(cif_file)\n    structure = r.get_structures()[0]\n    return Header(structure, source, comment)", "docstring": "Static method to create Header object from cif_file\n\nArgs:\ncif_file: cif_file path and name\nsource: User supplied identifier, i.e. for Materials Project this\nwould be the material ID number\ncomment: User comment that goes in header\n\nReturns:\nHeader Object", "source": "codesearchnet"}
{"code": "def get_bin_edges_from_axis(axis) -> np.ndarray:\n    \n    \n    bins = range(1, axis.GetNbins() + 1)\n    \n    bin_edges = np.empty(len(bins) + 1)\n    bin_edges[:-1] = [axis.GetBinLowEdge(i) for i in bins]\n    bin_edges[-1] = axis.GetBinUpEdge(axis.GetNbins())\n\n    return bin_edges", "docstring": "Get bin edges from a ROOT hist axis.\n\nNote:\nDoesn't include over- or underflow bins!\n\nArgs:\naxis (ROOT.TAxis): Axis from which the bin edges should be extracted.\nReturns:\nArray containing the bin edges.", "source": "juraj-google-style"}
{"code": "def resolution(self, indicator=None):\n    self._request_entity = 'dnsResolution'\n    self._request_uri = '{}/dnsResolutions'.format(self._request_uri)\n    if (indicator is not None):\n        self._request_uri = '{}/{}/dnsResolutions'.format(self._api_uri, indicator)", "docstring": "Update the URI to retrieve host resolutions for the provided indicator.\n\nArgs:\nindicator (string): The indicator to retrieve resolutions.", "source": "codesearchnet"}
{"code": "def download_url(self, url, **kwargs):\n    if (self.baseurl and (':\n        url = join(self.baseurl, url)\n    return self.resolver.download_to_directory(self.directory, url, **kwargs)", "docstring": "Download a URL to the workspace.\n\nArgs:\nurl (string): URL to download to directory\n**kwargs : See :py:mod:`ocrd.resolver.Resolver`\n\nReturns:\nThe local filename of the downloaded file", "source": "codesearchnet"}
{"code": "def decode(data):\n    \n    decoded = None\n    try:\n        decoded = yaml.load(data)\n    except Exception, e:\n        e = e.message if e.message else str(e)\n        raise MetaParsingException(\"Can't parse your YAML data: %s\" % e)\n\n    decoded = validator.check_structure(decoded)\n\n    return decoded", "docstring": "Handles decoding of the YAML `data`.\n\nArgs:\ndata (str): Data which will be decoded.\n\nReturns:\ndict: Dictionary with decoded data.", "source": "juraj-google-style"}
{"code": "def getListOfBases():\n    downer = Downloader()\n    data = downer.download((ALEPH_URL + '/F/?func=file&file_name=base-list'))\n    dom = dhtmlparser.parseString(data.lower())\n    base_links = filter((lambda x: (('href' in x.params) and ('local_base' in x.params['href']))), dom.find('a'))\n    base_links = map((lambda x: x.params['href'].replace('?', '&', 1).split('&')), base_links)\n    bases = map((lambda link: filter((lambda base: ('local_base=' in base)), link)[0]), base_links)\n    bases = map((lambda x: x.split('=')[1].strip()), bases)\n    return list(set(bases))", "docstring": "This function is here mainly for purposes of unittest\n\nReturns:\nlist of str: Valid bases as they are used as URL parameters in links at\nAleph main page.", "source": "codesearchnet"}
{"code": "def get_padding_bias(x):\n    with tf.name_scope('attention_bias'):\n        padding = get_padding(x)\n        attention_bias = (padding * _NEG_INF)\n        attention_bias = tf.expand_dims(tf.expand_dims(attention_bias, axis=1), axis=1)\n    return attention_bias", "docstring": "Calculate bias tensor from padding values in tensor.\n\nBias tensor that is added to the pre-softmax multi-headed attention logits,\nwhich has shape [batch_size, num_heads, length, length]. The tensor is zero at\nnon-padding locations, and -1e9 (negative infinity) at padding locations.\n\nArgs:\nx: int tensor with shape [batch_size, length]\n\nReturns:\nAttention bias tensor of shape [batch_size, 1, 1, length].", "source": "codesearchnet"}
{"code": "def report_filter(config, auth, body, filters):\n    new_body = body.copy()\n    for f, d in filters.items():\n        for v in get_rows(config, auth, d):\n            if f == 'accountId':\n                new_body['accountId'] = v\n            elif f == 'activity':\n                new_body['reachCriteria']['activities'].setdefault('filters', []).append({'kind': 'dfareporting\n            else:\n                new_body.setdefault('criteria', {}).setdefault('dimensionFilters', []).append({'kind': 'dfareporting\n    return new_body", "docstring": "Adds filters to a report body\n\nFilters cannot be easily added to the reports without templateing, this allows\nfilters to be passed as lists.\nValues are specified using get_rows(...) helper, see\nstarthinker/util/data/__init__.py.\nTo specify a filter, use the official filter name and a list of values.\n\nFor exmaple:\n\n```\nfilters = {\n\"accountId\": {\n\"values\": 789\n},\n\"advertiser\": {\n\"values\":[1234, 5678, 91011]\n}\n}\n```\n\nArgs:\n* auth: (string) Either user or service.\n* body: (json) the report body ( with or without filters )\n* filters: (json) a dictionary of filters to apply ( see above examples )\n\nReturns:\n* body: ( json ) modified report body", "source": "github-repos"}
{"code": "def _FindFileContainingSymbolInDb(self, symbol):\n    try:\n        file_proto = self._internal_db.FindFileContainingSymbol(symbol)\n    except KeyError as error:\n        if self._descriptor_db:\n            file_proto = self._descriptor_db.FindFileContainingSymbol(symbol)\n        else:\n            raise error\n    if (not file_proto):\n        raise KeyError(('Cannot find a file containing %s' % symbol))\n    return self._ConvertFileProtoToFileDescriptor(file_proto)", "docstring": "Finds the file in descriptor DB containing the specified symbol.\n\nArgs:\nsymbol: The name of the symbol to search for.\n\nReturns:\nA FileDescriptor that contains the specified symbol.\n\nRaises:\nKeyError: if the file cannot be found in the descriptor database.", "source": "codesearchnet"}
{"code": "def _PrintSessionsDetails(self, storage_reader):\n    \n    for session_number, session in enumerate(storage_reader.GetSessions()):\n      session_identifier = uuid.UUID(hex=session.identifier)\n      session_identifier = '{0!s}'.format(session_identifier)\n\n      start_time = 'N/A'\n      if session.start_time is not None:\n        start_time = timelib.Timestamp.CopyToIsoFormat(session.start_time)\n\n      completion_time = 'N/A'\n      if session.completion_time is not None:\n        completion_time = timelib.Timestamp.CopyToIsoFormat(\n            session.completion_time)\n\n      enabled_parser_names = 'N/A'\n      if session.enabled_parser_names:\n        enabled_parser_names = ', '.join(sorted(session.enabled_parser_names))\n\n      command_line_arguments = session.command_line_arguments or 'N/A'\n      parser_filter_expression = session.parser_filter_expression or 'N/A'\n      preferred_encoding = session.preferred_encoding or 'N/A'\n      \n      \n      if isinstance(preferred_encoding, py2to3.BYTES_TYPE):\n        preferred_encoding = preferred_encoding.decode('utf-8')\n      if session.artifact_filters:\n        artifact_filters_string = ', '.join(session.artifact_filters)\n      else:\n        artifact_filters_string = 'N/A'\n      filter_file = session.filter_file or 'N/A'\n\n      title = 'Session: {0:s}'.format(session_identifier)\n      table_view = views.ViewsFactory.GetTableView(\n          self._views_format_type, title=title)\n\n      table_view.AddRow(['Start time', start_time])\n      table_view.AddRow(['Completion time', completion_time])\n      table_view.AddRow(['Product name', session.product_name])\n      table_view.AddRow(['Product version', session.product_version])\n      table_view.AddRow(['Command line arguments', command_line_arguments])\n      table_view.AddRow(['Parser filter expression', parser_filter_expression])\n      table_view.AddRow(['Enabled parser and plugins', enabled_parser_names])\n      table_view.AddRow(['Preferred encoding', preferred_encoding])\n      table_view.AddRow(['Debug mode', session.debug_mode])\n      table_view.AddRow(['Artifact filters', artifact_filters_string])\n      table_view.AddRow(['Filter file', filter_file])\n\n      table_view.Write(self._output_writer)\n\n      if self._verbose:\n        self._PrintPreprocessingInformation(storage_reader, session_number + 1)\n\n        self._PrintParsersCounter(\n            session.parsers_counter, session_identifier=session_identifier)\n\n        self._PrintAnalysisReportCounter(\n            session.analysis_reports_counter,\n            session_identifier=session_identifier)\n\n        self._PrintEventLabelsCounter(\n            session.event_labels_counter,\n            session_identifier=session_identifier)", "docstring": "Prints the details of the sessions.\n\nArgs:\nstorage_reader (BaseStore): storage.", "source": "juraj-google-style"}
{"code": "def eigh(x):\n    if any_symbolic_tensors((x,)):\n        return Eigh().symbolic_call(x)\n    return _eigh(x)", "docstring": "Computes the eigenvalues and eigenvectors of a complex Hermitian.\n\nArgs:\nx: Input tensor of shape `(..., M, M)`.\n\nReturns:\nA tuple of two tensors: a tensor of shape `(..., M)` containing\neigenvalues and a tensor of shape `(..., M, M)` containing eigenvectors.", "source": "github-repos"}
{"code": "def restore(self, fade=False):\n        \n\n        if self.is_coordinator:\n            \n            \n            \n            transport_info = self.device.get_current_transport_info()\n            if transport_info is not None:\n                if transport_info['current_transport_state'] == 'PLAYING':\n                    self.device.pause()\n\n            \n            self._restore_queue()\n\n            \n\n            if self.is_playing_queue and self.playlist_position > 0:\n                \n\n                if self.playlist_position is not None:\n                    \n                    \n                    \n                    \n                    self.playlist_position -= 1\n                    self.device.play_from_queue(self.playlist_position, False)\n\n                if self.track_position is not None:\n                    if self.track_position != \"\":\n                        self.device.seek(self.track_position)\n\n                \n                \n                self.device.play_mode = self.play_mode\n                self.device.cross_fade = self.cross_fade\n\n            elif self.is_playing_cloud_queue:\n                \n                \n                pass\n\n            else:\n                \n                \n                if self.media_uri != \"\":\n                    self.device.play_uri(\n                        self.media_uri, self.media_metadata, start=False)\n\n        \n        \n        self.device.mute = self.mute\n        self.device.bass = self.bass\n        self.device.treble = self.treble\n        self.device.loudness = self.loudness\n\n        \n        \n        \n        \n        \n        if self.volume == 100:\n            fixed_vol = self.device.renderingControl.GetOutputFixed(\n                [('InstanceID', 0)])['CurrentFixed']\n        else:\n            fixed_vol = False\n\n        \n        if not fixed_vol:\n            if fade:\n                \n                \n                self.device.volume = 0\n                self.device.ramp_to_volume(self.volume)\n            else:\n                \n                self.device.volume = self.volume\n\n        \n        \n        if self.is_coordinator:\n            if self.transport_state == 'PLAYING':\n                self.device.play()\n            elif self.transport_state == 'STOPPED':\n                self.device.stop()", "docstring": "Restore the state of a device to that which was previously saved.\n\nFor coordinator devices restore everything. For slave devices\nonly restore volume etc., not transport info (transport info\ncomes from the slave's coordinator).\n\nArgs:\nfade (bool): Whether volume should be faded up on restore.", "source": "juraj-google-style"}
{"code": "def _ConvertScalarFieldValue(value, field, require_str=False):\n  \n  if field.cpp_type in _INT_TYPES:\n    return _ConvertInteger(value)\n  elif field.cpp_type in _FLOAT_TYPES:\n    return _ConvertFloat(value)\n  elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:\n    return _ConvertBool(value, require_str)\n  elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:\n    if field.type == descriptor.FieldDescriptor.TYPE_BYTES:\n      return base64.b64decode(value)\n    else:\n      \n      \n      if _UNPAIRED_SURROGATE_PATTERN.search(value):\n        raise ParseError('Unpaired surrogate')\n      return value\n  elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:\n    \n    enum_value = field.enum_type.values_by_name.get(value, None)\n    if enum_value is None:\n      try:\n        number = int(value)\n        enum_value = field.enum_type.values_by_number.get(number, None)\n      except ValueError:\n        raise ParseError('Invalid enum value {0} for enum type {1}.'.format(\n            value, field.enum_type.full_name))\n      if enum_value is None:\n        raise ParseError('Invalid enum value {0} for enum type {1}.'.format(\n            value, field.enum_type.full_name))\n    return enum_value.number", "docstring": "Convert a single scalar field value.\n\nArgs:\nvalue: A scalar value to convert the scalar field value.\nfield: The descriptor of the field to convert.\nrequire_str: If True, the field value must be a str.\n\nReturns:\nThe converted scalar field value\n\nRaises:\nParseError: In case of convert problems.", "source": "juraj-google-style"}
{"code": "def record_markdown(text, cellid):\n    \n    from acorn.logging.database import record\n    from time import time\n    ekey = \"nb-{}\".format(cellid)\n    \n    global _cellid_map\n    if cellid not in _cellid_map:\n        from acorn.logging.database import active_db\n        from difflib import SequenceMatcher\n        from acorn.logging.diff import cascade\n        taskdb = active_db()\n        \n        if ekey not in taskdb.entities:\n            \n            \n            possible = [k for k in taskdb.entities if k[0:3] == \"nb-\"]\n            maxkey, maxvalue = None, 0.\n            for pkey in possible:\n                sequence = [e[\"c\"] for e in taskdb.entities[pkey]]\n                state = ''.join(cascade(sequence))\n                matcher = SequenceMatcher(a=state, b=text)\n                ratio = matcher.quick_ratio()\n                if ratio > maxvalue and ratio > 0.5:\n                    maxkey, maxvalue = pkey, ratio\n\n            \n            \n            if maxkey is not None:\n                ekey = pkey\n                            \n        _cellid_map[cellid] = ekey\n        \n    ekey = _cellid_map[cellid]        \n    entry = {\n        \"m\": \"md\",\n        \"a\": None,\n        \"s\": time(),\n        \"r\": None,\n        \"c\": text,\n    }\n    record(ekey, entry, diff=True)", "docstring": "Records the specified markdown text to the acorn database.\n\nArgs:\ntext (str): the *raw* markdown text entered into the cell in the ipython\nnotebook.", "source": "juraj-google-style"}
{"code": "def _parse_plugin_data_as(content, data_oneof_field):\n  \n  plugin_data = plugin_data_pb2.HParamsPluginData.FromString(content)\n  if plugin_data.version != PLUGIN_DATA_VERSION:\n    raise error.HParamsError(\n        'Only supports plugin_data version: %s; found: %s in: %s' %\n        (PLUGIN_DATA_VERSION, plugin_data.version, plugin_data))\n  if not plugin_data.HasField(data_oneof_field):\n    raise error.HParamsError(\n        'Expected plugin_data.%s to be set. Got: %s' %\n        (data_oneof_field, plugin_data))\n  return getattr(plugin_data, data_oneof_field)", "docstring": "Returns a data oneof's field from plugin_data.content.\n\nRaises HParamsError if the content doesn't have 'data_oneof_field' set or\nthis file is incompatible with the version of the metadata stored.\n\nArgs:\ncontent: The SummaryMetadata.plugin_data.content to use.\ndata_oneof_field: string. The name of the data oneof field to return.", "source": "juraj-google-style"}
{"code": "def _send_unary_request(self, request):\n        \n        if request.ack_ids:\n            self._client.acknowledge(\n                subscription=self._subscription, ack_ids=list(request.ack_ids)\n            )\n\n        if request.modify_deadline_ack_ids:\n            \n            deadline_to_ack_ids = collections.defaultdict(list)\n\n            for n, ack_id in enumerate(request.modify_deadline_ack_ids):\n                deadline = request.modify_deadline_seconds[n]\n                deadline_to_ack_ids[deadline].append(ack_id)\n\n            for deadline, ack_ids in six.iteritems(deadline_to_ack_ids):\n                self._client.modify_ack_deadline(\n                    subscription=self._subscription,\n                    ack_ids=ack_ids,\n                    ack_deadline_seconds=deadline,\n                )\n\n        _LOGGER.debug(\"Sent request(s) over unary RPC.\")", "docstring": "Send a request using a separate unary request instead of over the\nstream.\n\nArgs:\nrequest (types.StreamingPullRequest): The stream request to be\nmapped into unary requests.", "source": "juraj-google-style"}
{"code": "def forward(self, music_tokens, raw_audio_conditioning=None):\n    if raw_audio_conditioning is None:\n        raw_audio_conditioning = 0.0\n    music_tokens = music_tokens.long()\n    hidden_states = self.embed_tokens(music_tokens)\n    hidden_states = hidden_states + raw_audio_conditioning\n    hidden_states = hidden_states.permute(0, 2, 1)\n    hidden_states = self.upsampler(hidden_states)\n    hidden_states = hidden_states.permute(0, 2, 1)\n    hidden_states = self.layer_norm(hidden_states)\n    return hidden_states", "docstring": "Args:\nmusic_tokens (`torch.LongTensor`):\nMusic tokens form the upper level in range(nb_discrete_codes)\nraw_audio_conditioning (`torch.LongTensor`, *optional*):\nAudio used when primed sampling, raw audio information that conditions the generation", "source": "github-repos"}
{"code": "def airborne_velocity(msg):\n    \n\n    if common.typecode(msg) != 19:\n        raise RuntimeError(\"%s: Not a airborne velocity message, expecting TC=19\" % msg)\n\n    mb = common.hex2bin(msg)[32:]\n\n    subtype = common.bin2int(mb[5:8])\n\n    if common.bin2int(mb[14:24]) == 0 or common.bin2int(mb[25:35]) == 0:\n        return None\n\n    if subtype in (1, 2):\n        v_ew_sign = -1 if mb[13]=='1' else 1\n        v_ew = common.bin2int(mb[14:24]) - 1       \n\n        v_ns_sign = -1 if mb[24]=='1' else 1\n        v_ns = common.bin2int(mb[25:35]) - 1       \n\n        v_we = v_ew_sign * v_ew\n        v_sn = v_ns_sign * v_ns\n\n        spd = math.sqrt(v_sn*v_sn + v_we*v_we)  \n        spd = int(spd)\n\n        trk = math.atan2(v_we, v_sn)\n        trk = math.degrees(trk)                 \n        trk = trk if trk >= 0 else trk + 360    \n\n        tag = 'GS'\n        trk_or_hdg = round(trk, 2)\n\n    else:\n        if mb[13] == '0':\n            hdg = None\n        else:\n            hdg = common.bin2int(mb[14:24]) / 1024.0 * 360.0\n            hdg = round(hdg, 2)\n\n        trk_or_hdg = hdg\n\n        spd = common.bin2int(mb[25:35])\n        spd = None if spd==0 else spd-1\n\n        if mb[24]=='0':\n            tag = 'IAS'\n        else:\n            tag = 'TAS'\n\n    vr_sign = -1 if mb[36]=='1' else 1\n    vr = common.bin2int(mb[37:46])\n    rocd = None if vr==0 else int(vr_sign*(vr-1)*64)\n\n    return spd, trk_or_hdg, rocd, tag", "docstring": "Calculate the speed, track (or heading), and vertical rate\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string\n\nReturns:\n(int, float, int, string): speed (kt), ground track or heading (degree),\nrate of climb/descend (ft/min), and speed type\n('GS' for ground speed, 'AS' for airspeed)", "source": "juraj-google-style"}
{"code": "def get_config_path(appdirs=DEFAULT_APPDIRS, file_name=DEFAULT_CONFIG_FILENAME):\n    \n    return os.path.join(appdirs.user_config_dir, file_name)", "docstring": "Return the path where the config file is stored.\n\nArgs:\napp_name (text_type, optional): Name of the application, defaults to\n``'projecthamster``. Allows you to use your own application specific\nnamespace if you wish.\nfile_name (text_type, optional): Name of the config file. Defaults to\n``config.conf``.\n\nReturns:\nstr: Fully qualified path (dir & filename) where we expect the config file.", "source": "juraj-google-style"}
{"code": "def create_report_proto(self, tt_config, tt_parameters, tensor_trace_order, tensor_trace_points, collected_signature_types):\n    report = tensor_tracer_pb2.TensorTracerReport()\n    report.config.version = tt_config.version\n    report.config.device = tt_config.device_type\n    report.config.num_cores = tt_config.num_replicas\n    report.config.num_hosts = tt_config.num_hosts\n    report.config.num_cores_per_host = tt_config.num_replicas_per_host\n    report.config.submode = tt_parameters.submode\n    report.config.trace_mode = tt_parameters.trace_mode\n    for signature_name, _ in sorted(collected_signature_types.items(), key=lambda x: x[1]):\n        report.config.signatures.append(signature_name)\n    for tensor in tensor_trace_order.graph_order.tensors:\n        tensor_def = tensor_tracer_pb2.TensorTracerReport.TracedTensorDef()\n        tensor_def.name = tensor.name\n        if tensor.name in tensor_trace_order.tensorname_to_cache_idx:\n            tensor_def.is_traced = True\n            tensor_def.cache_index = tensor_trace_order.tensorname_to_cache_idx[tensor.name]\n        else:\n            if tt_parameters.use_fingerprint_subdir:\n                continue\n            tensor_def.is_traced = False\n        if tensor.name in tensor_trace_points:\n            tensor_def.trace_point_name = tensor_trace_points[tensor.name]\n        if tensor.name in self.instrument_records:\n            tensor_def.explanation = self.instrument_records[tensor.name]\n        elif tensor.op.name in self.instrument_records:\n            tensor_def.explanation = self.instrument_records[tensor.op.name]\n        report.tensordef[tensor.name].CopyFrom(tensor_def)\n    report.fingerprint = proto_fingerprint(report)\n    logging.info('TensorTracerProto fingerprint is %s.', report.fingerprint)\n    tf_graph = tensor_trace_order.graph_order.graph\n    report.graphdef.CopyFrom(tf_graph.as_graph_def())\n    return report", "docstring": "Creates and returns a proto that stores tensor tracer configuration.\n\nArgs:\ntt_config: TensorTracerConfig object holding information about the run\nenvironment (device, # cores, # hosts), and tensor tracer version\ninformation.\ntt_parameters: TTParameters objects storing the user provided parameters\nfor tensor tracer.\ntensor_trace_order: TensorTraceOrder object storing a topological order of\nthe graph.\ntensor_trace_points: Progromatically added trace_points/checkpoints.\ncollected_signature_types: The signature types collected, e,g, norm,\nmax, min, mean...\nReturns:\nTensorTracerReport proto.", "source": "github-repos"}
{"code": "def js_link(self, attr, other, other_attr):\n    if (attr not in self.properties()):\n        raise ValueError(('%r is not a property of self (%r)' % (attr, self)))\n    if (not isinstance(other, Model)):\n        raise ValueError((\"'other' is not a Bokeh model: %r\" % other))\n    if (other_attr not in other.properties()):\n        raise ValueError(('%r is not a property of other (%r)' % (other_attr, other)))\n    from bokeh.models.callbacks import CustomJS\n    cb = CustomJS(args=dict(other=other), code=('other.%s = this.%s' % (other_attr, attr)))\n    self.js_on_change(attr, cb)", "docstring": "Link two Bokeh model properties using JavaScript.\n\nThis is a convenience method that simplifies adding a CustomJS callback\nto update one Bokeh model property whenever another changes value.\n\nArgs:\n\nattr (str) :\nThe name of a Bokeh property on this model\n\nother (Model):\nA Bokeh model to link to self.attr\n\nother_attr (str) :\nThe property on ``other`` to link together\n\nAdded in version 1.1\n\nRaises:\n\nValueError\n\nExamples:\n\nThis code with ``js_link``:\n\n.. code :: python\n\nselect.js_link('value', plot, 'sizing_mode')\n\nis equivalent to the following:\n\n.. code:: python\n\nfrom bokeh.models import CustomJS\nselect.js_on_change('value',\nCustomJS(args=dict(other=plot),\ncode=\"other.sizing_mode = this.value\"\n)\n)", "source": "codesearchnet"}
{"code": "def _ParseShellItem(self, parser_mediator, shell_item):\n    path_segment = self._ParseShellItemPathSegment(shell_item)\n    self._path_segments.append(path_segment)\n    event_data = shell_item_events.ShellItemFileEntryEventData()\n    event_data.origin = self._origin\n    event_data.shell_item_path = self.CopyToPath()\n    if isinstance(shell_item, pyfwsi.file_entry):\n        event_data.name = shell_item.name\n        for extension_block in shell_item.extension_blocks:\n            if isinstance(extension_block, pyfwsi.file_entry_extension):\n                long_name = extension_block.long_name\n                localized_name = extension_block.localized_name\n                file_reference = extension_block.file_reference\n                if file_reference:\n                    file_reference = '{0:d}-{1:d}'.format((file_reference & 281474976710655), (file_reference >> 48))\n                event_data.file_reference = file_reference\n                event_data.localized_name = localized_name\n                event_data.long_name = long_name\n                fat_date_time = extension_block.get_creation_time_as_integer()\n                if (fat_date_time != 0):\n                    date_time = dfdatetime_fat_date_time.FATDateTime(fat_date_time=fat_date_time)\n                    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n                    parser_mediator.ProduceEventWithEventData(event, event_data)\n                fat_date_time = extension_block.get_access_time_as_integer()\n                if (fat_date_time != 0):\n                    date_time = dfdatetime_fat_date_time.FATDateTime(fat_date_time=fat_date_time)\n                    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)\n                    parser_mediator.ProduceEventWithEventData(event, event_data)\n        fat_date_time = shell_item.get_modification_time_as_integer()\n        if (fat_date_time != 0):\n            date_time = dfdatetime_fat_date_time.FATDateTime(fat_date_time=fat_date_time)\n            event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n            parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a shell item.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nshell_item (pyfwsi.item): shell item.", "source": "codesearchnet"}
{"code": "def _GetCachedFileByPath(self, key_path_upper):\n    longest_key_path_prefix_upper = ''\n    longest_key_path_prefix_length = len(longest_key_path_prefix_upper)\n    for key_path_prefix_upper in self._registry_files:\n        if key_path_upper.startswith(key_path_prefix_upper):\n            key_path_prefix_length = len(key_path_prefix_upper)\n            if (key_path_prefix_length > longest_key_path_prefix_length):\n                longest_key_path_prefix_upper = key_path_prefix_upper\n                longest_key_path_prefix_length = key_path_prefix_length\n    if (not longest_key_path_prefix_upper):\n        return (None, None)\n    registry_file = self._registry_files.get(longest_key_path_prefix_upper, None)\n    return (longest_key_path_prefix_upper, registry_file)", "docstring": "Retrieves a cached Windows Registry file for a key path.\n\nArgs:\nkey_path_upper (str): Windows Registry key path, in upper case with\na resolved root key alias.\n\nReturns:\ntuple: consist:\n\nstr: key path prefix\nWinRegistryFile: corresponding Windows Registry file or None if not\navailable.", "source": "codesearchnet"}
{"code": "def __getitem__(self, slice_spec):\n    if isinstance(slice_spec, bool) or (isinstance(slice_spec, tensor_lib.Tensor) and slice_spec.dtype == dtypes.bool) or (isinstance(slice_spec, np.ndarray) and slice_spec.dtype == bool):\n        tensor = _var_to_tensor(self)\n        return array_ops.boolean_mask(tensor=tensor, mask=slice_spec)\n    if not isinstance(slice_spec, (list, tuple)):\n        slice_spec = (slice_spec,)\n    s = slice_spec[0]\n    if isinstance(s, slice):\n        first_dim_slice_specs = self._decompose_slice_spec(s)\n        values = []\n        for i, var in enumerate(self._variables):\n            if first_dim_slice_specs[i] is not None:\n                all_dim_slice_spec = (first_dim_slice_specs[i],) + slice_spec[1:]\n                values.append(var[all_dim_slice_spec])\n        if s.step is not None and s.step < 0:\n            values.reverse()\n        if not values:\n            return constant_op.constant([], dtype=self._dtype, shape=(0,) + self._shape[1:])\n        return array_ops.concat(values, axis=0)\n    elif s is Ellipsis:\n        return array_ops.concat([var[slice_spec] for var in self._variables], axis=0)\n    elif s is array_ops.newaxis:\n        return array_ops.concat([var[slice_spec[1:]] for var in self._variables], axis=0)[array_ops.newaxis]\n    else:\n        if isinstance(s, tensor_lib.Tensor):\n            raise TypeError('ShardedVariable: using Tensor for indexing is not allowed.')\n        if s < 0:\n            s += self._shape[0]\n        if s < 0 or s >= self._shape[0]:\n            raise IndexError(f'ShardedVariable: slice index {s} of dimension 0 out of bounds.')\n        for i in range(len(self._variables)):\n            if i == len(self._variables) - 1 or (s >= self._var_offsets[i][0] and s < self._var_offsets[i + 1][0]):\n                return self._variables[i][(s - self._var_offsets[i][0],) + slice_spec[1:]]", "docstring": "Extracts the specified region as a Tensor from the sharded variable.\n\nThe API contract is identical to `Tensor.__getitem__`. Assignment to the\nsliced range is not yet supported.\n\nArgs:\nslice_spec: The arguments to __getitem__, specifying the global slicing of\nthe sharded variable.\n\nReturns:\nThe appropriate slice of tensor based on `slice_spec`.\n\nRaises:\nIndexError: If a slice index is out of bound.\nTypeError: If `spec_spec` contains Tensor.", "source": "github-repos"}
{"code": "def choose_1_from_each(lists):\n    \n    if len(lists) == 0:\n        yield []\n    else:\n        for el in lists[0]:\n            for next_list in choose_1_from_each(lists[1:]):\n                yield [el] + next_list", "docstring": "Takes a list of lists and returns a list of lists with one item\nfrom each list.  This new list should be the length of each list multiplied\nby the others.  18 for an list with lists of 3, 2 and 3.  Also the lenght\nof each sub list should be same as the length of lists passed in.\n\nArgs:\nlists(list of Lists):  A list of lists\n\nReturns:\nlist of lists: returns a list of lists constructions of one item from each\nlist in lists.", "source": "juraj-google-style"}
{"code": "def print_table(col_tuple, row_tuples):\n    col_widths = [max((len(str(row[col])) for row in ([col_tuple] + row_tuples))) for col in range(len(col_tuple))]\n    format_str = ' '.join(('{{:<{}}}'.format(col_width) for col_width in col_widths))\n    header_border = ' '.join((('=' * col_width) for col_width in col_widths))\n    print(header_border)\n    print(format_str.format(*col_tuple))\n    print(header_border)\n    for row_tuple in row_tuples:\n        print(format_str.format(*row_tuple))\n    print(header_border)\n    print()", "docstring": "Print column headers and rows as a reStructuredText table.\n\nArgs:\ncol_tuple: Tuple of column name strings.\nrow_tuples: List of tuples containing row data.", "source": "codesearchnet"}
{"code": "def is_periodic_image(self, other, tolerance=1e-08, check_lattice=True):\n    if (check_lattice and (self.lattice != other.lattice)):\n        return False\n    if (self.species != other.species):\n        return False\n    frac_diff = pbc_diff(self.frac_coords, other.frac_coords)\n    return np.allclose(frac_diff, [0, 0, 0], atol=tolerance)", "docstring": "Returns True if sites are periodic images of each other.\n\nArgs:\nother (PeriodicSite): Other site\ntolerance (float): Tolerance to compare fractional coordinates\ncheck_lattice (bool): Whether to check if the two sites have the\nsame lattice.\n\nReturns:\nbool: True if sites are periodic images of each other.", "source": "codesearchnet"}
{"code": "def take(self, count, name=None) -> 'DatasetV2':\n    from tensorflow.python.data.ops import take_op\n    return take_op._take(self, count, name=name)", "docstring": "Creates a `Dataset` with at most `count` elements from this dataset.\n\n>>> dataset = tf.data.Dataset.range(10)\n>>> dataset = dataset.take(3)\n>>> [a.item() for a in dataset.as_numpy_iterator()]\n[0, 1, 2]\n\nArgs:\ncount: A `tf.int64` scalar `tf.Tensor`, representing the number of\nelements of this dataset that should be taken to form the new dataset.\nIf `count` is -1, or if `count` is greater than the size of this\ndataset, the new dataset will contain all elements of this dataset.\nname: (Optional.) A name for the tf.data operation.\n\nReturns:\nA new `Dataset` with the transformation applied as described above.", "source": "github-repos"}
{"code": "def _ParseDataObject(self, file_object, file_offset):\n    \n    data_object_map = self._GetDataTypeMap('systemd_journal_data_object')\n\n    try:\n      data_object, _ = self._ReadStructureFromFileObject(\n          file_object, file_offset, data_object_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError((\n          'Unable to parse data object at offset: 0x{0:08x} with error: '\n          '{1!s}').format(file_offset, exception))\n\n    if data_object.object_type != self._OBJECT_TYPE_DATA:\n      raise errors.ParseError('Unsupported object type: {0:d}.'.format(\n          data_object.object_type))\n\n    if data_object.object_flags not in (\n        0, self._OBJECT_COMPRESSED_FLAG_XZ, self._OBJECT_COMPRESSED_FLAG_LZ4):\n      raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format(\n          data_object.object_flags))\n\n    \n    data_size = data_object.data_size - 64\n    data = file_object.read(data_size)\n\n    if data_object.object_flags & self._OBJECT_COMPRESSED_FLAG_XZ:\n      data = lzma.decompress(data)\n\n    elif data_object.object_flags & self._OBJECT_COMPRESSED_FLAG_LZ4:\n      uncompressed_size_map = self._GetDataTypeMap('uint32le')\n\n      try:\n        uncompressed_size = self._ReadStructureFromByteStream(\n            data, file_offset + 64, uncompressed_size_map)\n      except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError((\n            'Unable to parse LZ4 uncompressed size at offset: 0x{0:08x} with '\n            'error: {1!s}').format(file_offset + 64, exception))\n\n      data = lz4.block.decompress(\n          data[8:], uncompressed_size=uncompressed_size)\n\n    return data", "docstring": "Parses a data object.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object.\nfile_offset (int): offset of the data object relative to the start\nof the file-like object.\n\nReturns:\nbytes: data.\n\nRaises:\nParseError: if the data object cannot be parsed.", "source": "juraj-google-style"}
{"code": "def _fused_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001):\n    if list(reduction_axes) == [0, 1, 2]:\n        normalization_axis = 3\n        tf_data_format = 'NHWC'\n    else:\n        normalization_axis = 1\n        tf_data_format = 'NCHW'\n    if gamma is None:\n        gamma = constant_op.constant(1.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])\n    if beta is None:\n        beta = constant_op.constant(0.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])\n    return nn.fused_batch_norm(x, gamma, beta, epsilon=epsilon, data_format=tf_data_format)", "docstring": "Fused version of `normalize_batch_in_training`.\n\nArgs:\nx: Input tensor or variable.\ngamma: Tensor by which to scale the input.\nbeta: Tensor with which to center the input.\nreduction_axes: iterable of integers,\naxes over which to normalize.\nepsilon: Fuzz factor.\n\nReturns:\nA tuple length of 3, `(normalized_tensor, mean, variance)`.", "source": "github-repos"}
{"code": "def update_if_absent(self, **kwargs):\n        \n        for arg in kwargs:\n            if hasattr(self, arg):\n                if getattr(self, arg) is None:\n                    setattr(self, arg, kwargs[arg])\n            else:\n                raise ValueError(\"Invalid RayParams parameter in\"\n                                 \" update_if_absent: %s\" % arg)\n\n        self._check_usage()", "docstring": "Update the settings when the target fields are None.\n\nArgs:\nkwargs: The keyword arguments to set corresponding fields.", "source": "juraj-google-style"}
{"code": "def RunMetadata(self, tag):\n    \n    if tag not in self._tagged_metadata:\n      raise ValueError('There is no run metadata with this tag name')\n\n    run_metadata = config_pb2.RunMetadata()\n    run_metadata.ParseFromString(self._tagged_metadata[tag])\n    return run_metadata", "docstring": "Given a tag, return the associated session.run() metadata.\n\nArgs:\ntag: A string tag associated with the event.\n\nRaises:\nValueError: If the tag is not found.\n\nReturns:\nThe metadata in form of `RunMetadata` proto.", "source": "juraj-google-style"}
{"code": "def evaluate_period_forecasts(self):\n    score_columns = ['Run_Date', 'Ensemble Name', 'Model_Name', 'Forecast_Variable', 'Neighbor_Radius', 'Smoothing_Radius', 'Size_Threshold', 'ROC', 'Reliability']\n    all_scores = pd.DataFrame(columns=score_columns)\n    if (self.coordinate_file is not None):\n        coord_mask = np.where((((((self.coordinates['lon'] >= self.lon_bounds[0]) & (self.coordinates['lon'] <= self.lon_bounds[1])) & (self.coordinates['lat'] >= self.lat_bounds[0])) & (self.coordinates['lat'] <= self.lat_bounds[1])) & (self.period_obs[self.mask_variable] > 0)))\n    else:\n        coord_mask = None\n    for neighbor_radius in self.neighbor_radii:\n        n_filter = disk(neighbor_radius)\n        for (s, size_threshold) in enumerate(self.size_thresholds):\n            period_obs = fftconvolve((self.period_obs[self.mrms_variable] >= self.obs_thresholds[s]), n_filter, mode='same')\n            period_obs[(period_obs > 1)] = 1\n            if (self.obs_mask and (self.coordinate_file is None)):\n                period_obs = period_obs[(self.period_obs[self.mask_variable] > 0)]\n            elif (self.obs_mask and (self.coordinate_file is not None)):\n                period_obs = period_obs[(coord_mask[0], coord_mask[1])]\n            else:\n                period_obs = period_obs.ravel()\n            for smoothing_radius in self.smoothing_radii:\n                print('Eval period forecast {0} {1} {2} {3} {4} {5}'.format(self.model_name, self.forecast_variable, self.run_date, neighbor_radius, size_threshold, smoothing_radius))\n                period_var = 'neighbor_prob_{0:d}-hour_r_{1:d}_s_{2:d}_{3}_{4:0.2f}'.format(((self.end_hour - self.start_hour) + 1), neighbor_radius, smoothing_radius, self.forecast_variable, size_threshold)\n                if (self.obs_mask and (self.coordinate_file is None)):\n                    period_forecast = self.period_forecasts[period_var][(self.period_obs[self.mask_variable] > 0)]\n                elif (self.obs_mask and (self.coordinate_file is not None)):\n                    period_forecast = self.period_forecasts[period_var][(coord_mask[0], coord_mask[1])]\n                else:\n                    period_forecast = self.period_forecasts[period_var].ravel()\n                roc = DistributedROC(thresholds=self.probability_levels, obs_threshold=0.5)\n                roc.update(period_forecast, period_obs)\n                rel = DistributedReliability(thresholds=self.probability_levels, obs_threshold=0.5)\n                rel.update(period_forecast, period_obs)\n                row = [self.run_date, self.ensemble_name, self.model_name, self.forecast_variable, neighbor_radius, smoothing_radius, size_threshold, roc, rel]\n                all_scores.loc[period_var] = row\n    return all_scores", "docstring": "Evaluates ROC and Reliability scores for forecasts over the full period from start hour to end hour\n\nReturns:\nA pandas DataFrame with full-period metadata and verification statistics", "source": "codesearchnet"}
{"code": "def get_keras_mask(x):\n    return get_tensor_attr(x, '_keras_mask')", "docstring": "Gets the Keras mask attribute from the given tensor.\n\nArgs:\nx: Input tensor.\n\nReturns:\nThe mask tensor associated with the input tensor, or `None` if no mask\nhas been set.", "source": "github-repos"}
{"code": "def BuildFindSpecs(self, environment_variables=None):\n    \n    path_attributes = {}\n    if environment_variables:\n      for environment_variable in environment_variables:\n        attribute_name = environment_variable.name.lower()\n        attribute_value = environment_variable.value\n        if not isinstance(attribute_value, py2to3.STRING_TYPES):\n          continue\n\n        \n        if len(attribute_value) > 2 and attribute_value[1] == ':':\n          _, _, attribute_value = attribute_value.rpartition(':')\n\n        if attribute_value.startswith('\\\\'):\n          attribute_value = attribute_value.replace('\\\\', '/')\n\n        path_attributes[attribute_name] = attribute_value\n\n    find_specs = []\n    with open(self._path, 'r') as file_object:\n      for line in file_object:\n        line = line.strip()\n        if line.startswith('\n          continue\n\n        if path_attributes:\n          try:\n            line = line.format(**path_attributes)\n          except KeyError as exception:\n            logger.error((\n                'Unable to expand path filter: {0:s} with error: '\n                '{1!s}').format(line, exception))\n            continue\n\n        if not line.startswith('/'):\n          logger.warning((\n              'The path filter must be defined as an absolute path: '\n              '{0:s}').format(line))\n          continue\n\n        \n        \n        path_segments = line.split('/')\n        path_segments.pop(0)\n\n        if not path_segments[-1]:\n          logger.warning(\n              'Empty last path segment in path filter: {0:s}'.format(line))\n          continue\n\n        find_spec = file_system_searcher.FindSpec(\n            location_regex=path_segments, case_sensitive=False)\n        find_specs.append(find_spec)\n\n    return find_specs", "docstring": "Build find specification from a filter file.\n\nArgs:\nenvironment_variables (Optional[list[EnvironmentVariableArtifact]]):\nenvironment variables.\n\nReturns:\nlist[dfvfs.FindSpec]: find specification.", "source": "juraj-google-style"}
{"code": "def generic_type_args(type_: Type) -> List[Type]:\n    if hasattr(type_, '__union_params__'):\n        return list(type_.__union_params__)\n    return list(type_.__args__)", "docstring": "Gets the type argument list for the given generic type.\n\nIf you give this function List[int], it will return [int], and\nif you give it Union[int, str] it will give you [int, str]. Note\nthat on Python < 3.7, Union[int, bool] collapses to Union[int] and\nthen to int; this is already done by the time this function is\ncalled, so it does not help with that.\n\nArgs:\ntype_: The type to get the arguments list of.\n\nReturns:\nA list of Type objects.", "source": "codesearchnet"}
{"code": "def sca_intensity(scatterer, h_pol=True):\n    \n    Z = scatterer.get_Z()\n    return (Z[0,0] - Z[0,1]) if h_pol else (Z[0,0] + Z[0,1])", "docstring": "Scattering intensity (phase function) for the current setup.\n\nArgs:\nscatterer: a Scatterer instance.\nh_pol: If True (default), use horizontal polarization.\nIf False, use vertical polarization.\n\nReturns:\nThe differential scattering cross section.", "source": "juraj-google-style"}
{"code": "def get(self, key, default=None, *, section=DataStoreDocumentSection.Data):\n    key_notation = '.'.join([section, key])\n    try:\n        return self._decode_value(self._data_from_dotnotation(key_notation, default))\n    except KeyError:\n        return None", "docstring": "Return the field specified by its key from the specified section.\n\nThis method access the specified section of the workflow document and returns the\nvalue for the given key.\n\nArgs:\nkey (str): The key pointing to the value that should be retrieved. It supports\nMongoDB's dot notation for nested fields.\ndefault: The default value that is returned if the key does not exist.\nsection (DataStoreDocumentSection): The section from which the data should\nbe retrieved.\n\nReturns:\nobject: The value from the field that the specified key is pointing to. If the\nkey does not exist, the default value is returned. If no default value\nis provided and the key does not exist ``None`` is returned.", "source": "codesearchnet"}
{"code": "def add_affiliation(self, value, curated_relation=None, record=None):\n    if value:\n        affiliation = {'value': value}\n        if record:\n            affiliation['record'] = record\n        if (curated_relation is not None):\n            affiliation['curated_relation'] = curated_relation\n        self._ensure_list_field('affiliations', affiliation)", "docstring": "Add an affiliation.\n\nArgs:\nvalue (string): affiliation value\ncurated_relation (bool): is relation curated\nrecord (dict): affiliation JSON reference", "source": "codesearchnet"}
{"code": "def copy_framebuffer(self, dst, src) -> None:\n        \n\n        self.mglo.copy_framebuffer(dst.mglo, src.mglo)", "docstring": "Copy framebuffer content.\n\nUse this method to:\n\n- blit framebuffers.\n- copy framebuffer content into a texture.\n- downsample framebuffers. (it will allow to read the framebuffer's content)\n- downsample a framebuffer directly to a texture.\n\nArgs:\ndst (Framebuffer or Texture): Destination framebuffer or texture.\nsrc (Framebuffer): Source framebuffer.", "source": "juraj-google-style"}
{"code": "def before_request(self, request, method, url, headers):\n        \n        \n        \n        parts = urllib.parse.urlsplit(url)\n        \n        audience = urllib.parse.urlunsplit(\n            (parts.scheme, parts.netloc, parts.path, \"\", \"\"))\n        token = self._get_jwt_for_audience(audience)\n        self.apply(headers, token=token)", "docstring": "Performs credential-specific before request logic.\n\nArgs:\nrequest (Any): Unused. JWT credentials do not need to make an\nHTTP request to refresh.\nmethod (str): The request's HTTP method.\nurl (str): The request's URI. This is used as the audience claim\nwhen generating the JWT.\nheaders (Mapping): The request's headers.", "source": "juraj-google-style"}
{"code": "def _new_ass_hierarchy(self, file_ass):\n        \n        ret_struct = {'source': '',\n                      'subhierarchy': {},\n                      'attrs': {},\n                      'snippets': {}}\n        ret_struct['source'] = file_ass['source']\n        self._ass_refresh_attrs(ret_struct, file_ass)\n\n        for name, subhierarchy in file_ass['subhierarchy'].items():\n            ret_struct['subhierarchy'][name] = self._new_ass_hierarchy(subhierarchy)\n\n        return ret_struct", "docstring": "Returns a completely new cache hierarchy for given assistant file.\n\nArgs:\nfile_ass: the assistant from filesystem hierarchy to create cache hierarchy for\n(for format see what refresh_role accepts)\nReturns:\nthe newly created cache hierarchy", "source": "juraj-google-style"}
{"code": "def encode_plus(self, table: 'pd.DataFrame', query: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]]=None, answer_coordinates: Optional[List[Tuple]]=None, answer_text: Optional[List[TextInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n    if return_token_type_ids is not None and (not add_special_tokens):\n        raise ValueError('Asking to return token_type_ids while setting add_special_tokens to False results in an undefined behavior. Please set add_special_tokens to True or set return_token_type_ids to None.')\n    if answer_coordinates and (not answer_text) or (not answer_coordinates and answer_text):\n        raise ValueError('In case you provide answers, both answer_coordinates and answer_text should be provided')\n    if 'is_split_into_words' in kwargs:\n        raise NotImplementedError('Currently TapasTokenizer only supports questions as strings.')\n    if return_offsets_mapping:\n        raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast.')\n    return self._encode_plus(table=table, query=query, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, truncation=truncation, padding=padding, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)", "docstring": "Prepare a table and a string for the model.\n\nArgs:\ntable (`pd.DataFrame`):\nTable containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas\ndataframe to convert it to string.\nquery (`str` or `List[str]`):\nQuestion related to a table to be encoded.\nanswer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*):\nAnswer coordinates of each table-question pair in the batch. The answer_coordinates must be a single\nlist of one or more tuples. Each tuple must be a (row_index, column_index) pair. The first data row\n(not the column header row) has index 0. The first column has index 0.\nanswer_text (`List[str]` or `List[List[str]]`, *optional*):\nAnswer text of each table-question pair in the batch. The answer_text must be a single list of one or\nmore strings. Each string must be the answer text of a corresponding answer coordinate.", "source": "github-repos"}
{"code": "def _freezeModel(self, func):\n    root = autotrackable.AutoTrackable()\n    root.f = func\n    input_func = root.f.get_concrete_function()\n    output_func = convert_to_constants.convert_variables_to_constants_v2(input_func, lower_control_flow=False)\n    return (root, output_func)", "docstring": "Freezes the function.\n\nArgs:\nfunc: Function.\n\nReturns:\nroot: AutoTrackable object with original ConcreteFunction.\noutput_func: frozen ConcreteFunction.", "source": "github-repos"}
{"code": "def invoke_string(self, line):\n    line = str(line)\n    if (len(line) == 0):\n        return True\n    if (line[0] == u'\n        return True\n    args = self._split_line(line)\n    return self.invoke(args)", "docstring": "Parse and invoke a string line.\n\nArgs:\nline (str): The line that we want to parse and invoke.\n\nReturns:\nbool: A boolean specifying if the last function created a new context\n(False if a new context was created) and a list with the remainder of the\ncommand line if this function did not consume all arguments.)", "source": "codesearchnet"}
{"code": "def get_go_server(settings=None):\n    \n    if not settings:\n        settings = get_settings()\n\n    return gocd.Server(\n        settings.get('server'),\n        user=settings.get('user'),\n        password=settings.get('password'),\n    )", "docstring": "Returns a `gocd.Server` configured by the `settings`\nobject.\n\nArgs:\nsettings: a `gocd_cli.settings.Settings` object.\nDefault: if falsey calls `get_settings`.\n\nReturns:\ngocd.Server: a configured gocd.Server instance", "source": "juraj-google-style"}
{"code": "def upload(cls, file_obj, store=None):\n        \n        if store is None:\n            store = 'auto'\n        elif store:\n            store = '1'\n        else:\n            store = '0'\n\n        data = {\n            'UPLOADCARE_STORE': store,\n        }\n\n        files = uploading_request('POST', 'base/', data=data,\n                                  files={'file': file_obj})\n        file_ = cls(files['file'])\n        return file_", "docstring": "Uploads a file and returns ``File`` instance.\n\nArgs:\n- file_obj: file object to upload to\n- store (Optional[bool]): Should the file be automatically stored\nupon upload. Defaults to None.\n- False - do not store file\n- True - store file (can result in error if autostore\nis disabled for project)\n- None - use project settings\n\nReturns:\n``File`` instance", "source": "juraj-google-style"}
{"code": "def select(self, field_paths):\n    field_paths = list(field_paths)\n    for field_path in field_paths:\n        field_path_module.split_field_path(field_path)\n    new_projection = query_pb2.StructuredQuery.Projection(fields=[query_pb2.StructuredQuery.FieldReference(field_path=field_path) for field_path in field_paths])\n    return self.__class__(self._parent, projection=new_projection, field_filters=self._field_filters, orders=self._orders, limit=self._limit, offset=self._offset, start_at=self._start_at, end_at=self._end_at)", "docstring": "Project documents matching query to a limited set of fields.\n\nSee :meth:`~.firestore_v1beta1.client.Client.field_path` for\nmore information on **field paths**.\n\nIf the current query already has a projection set (i.e. has already\ncalled :meth:`~.firestore_v1beta1.query.Query.select`), this\nwill overwrite it.\n\nArgs:\nfield_paths (Iterable[str, ...]): An iterable of field paths\n(``.``-delimited list of field names) to use as a projection\nof document fields in the query results.\n\nReturns:\n~.firestore_v1beta1.query.Query: A \"projected\" query. Acts as\na copy of the current query, modified with the newly added\nprojection.\nRaises:\nValueError: If any ``field_path`` is invalid.", "source": "codesearchnet"}
{"code": "def get_cosine_with_hard_restarts_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int=1, last_epoch: int=-1):\n    lr_lambda = partial(_get_cosine_with_hard_restarts_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles)\n    return LambdaLR(optimizer, lr_lambda, last_epoch)", "docstring": "Create a schedule with a learning rate that decreases following the values of the cosine function between the\ninitial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases\nlinearly between 0 and the initial lr set in the optimizer.\n\nArgs:\noptimizer ([`~torch.optim.Optimizer`]):\nThe optimizer for which to schedule the learning rate.\nnum_warmup_steps (`int`):\nThe number of steps for the warmup phase.\nnum_training_steps (`int`):\nThe total number of training steps.\nnum_cycles (`int`, *optional*, defaults to 1):\nThe number of hard restarts to use.\nlast_epoch (`int`, *optional*, defaults to -1):\nThe index of the last epoch when resuming training.\n\nReturn:\n`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.", "source": "github-repos"}
{"code": "def __call__(self, inputs, *args, **kwargs):\n    scope = kwargs.pop('scope', None)\n    if self._keras_style:\n        if scope is not None:\n            raise ValueError('scope argument not allowed when keras style layers are enabled, but saw: {}'.format(scope))\n        return super(Layer, self).__call__(inputs, *args, **kwargs)\n    self._set_scope(scope)\n    if self.built:\n        try:\n            scope_context_manager = self._always_reuse_variable_scope\n        except AttributeError:\n            scope_context_manager = None\n        if scope_context_manager is None:\n            scope_context_manager = vs.variable_scope(self._scope, reuse=True, auxiliary_name_scope=False)\n            if not ops.executing_eagerly_outside_functions():\n                self._always_reuse_variable_scope = scope_context_manager\n    else:\n        scope_context_manager = vs.variable_scope(self._scope, reuse=self._reuse, auxiliary_name_scope=False)\n    with scope_context_manager as scope:\n        self._current_scope = scope\n        try:\n            call_has_scope_arg = self._call_has_scope_arg\n        except AttributeError:\n            self._call_fn_args = variable_scope_shim.fn_args(self.call)\n            self._call_has_scope_arg = 'scope' in self._call_fn_args\n            call_has_scope_arg = self._call_has_scope_arg\n        if call_has_scope_arg:\n            kwargs['scope'] = scope\n        outputs = super(Layer, self).__call__(inputs, *args, **kwargs)\n    if not context.executing_eagerly():\n        _add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS)\n    return outputs", "docstring": "Wraps `call`, applying pre- and post-processing steps.\n\nArgs:\ninputs: input tensor(s).\n*args: additional positional arguments to be passed to `self.call`.\n**kwargs: additional keyword arguments to be passed to `self.call`.\n**Note**: kwarg `scope` is reserved for use by the layer.\n\nReturns:\nOutput tensor(s).\n\nNote:\n- If the layer's `call` method takes a `scope` keyword argument,\nthis argument will be automatically set to the current variable scope.\n- If the layer's `call` method takes a `mask` argument (as some Keras\nlayers do), its default value will be set to the mask generated\nfor `inputs` by the previous layer (if `input` did come from\na layer that generated a corresponding mask, i.e. if it came from\na Keras layer with masking support.\n\nRaises:\nValueError: if the layer's `call` method returns None (an invalid value).", "source": "github-repos"}
{"code": "def getFingerprint(self, text):\n        \n        fp = self._fullClient.getFingerprintForText(text)\n        return fp.positions", "docstring": "Get the semantic fingerprint of the input text.\nArgs:\ntext, str: The text to be evaluated\nReturns:\nlist of str: the positions of the semantic fingerprint\nRaises:\nCorticalioException: if the request was not successful", "source": "juraj-google-style"}
{"code": "def match(self, other_version):\n    \n    major, minor, patch = _str_to_version(other_version, allow_wildcard=True)\n    return (major in [self.major, \"*\"] and minor in [self.minor, \"*\"]\n            and patch in [self.patch, \"*\"])", "docstring": "Returns True if other_version matches.\n\nArgs:\nother_version: string, of the form \"x[.y[.x]]\" where {x,y,z} can be a\nnumber or a wildcard.", "source": "juraj-google-style"}
{"code": "def ParseOptions(cls, options, configuration_object):\n    \n    if not isinstance(configuration_object, tools.CLITool):\n      raise errors.BadConfigObject(\n          'Configuration object is not an instance of CLITool')\n\n    filter_collection = getattr(\n        configuration_object, '_filter_collection', None)\n    if not filter_collection:\n      raise errors.BadConfigObject(\n          'Filter collection missing from configuration object')\n\n    date_filters = getattr(options, 'date_filters', None)\n    if not date_filters:\n      return\n\n    file_entry_filter = file_entry_filters.DateTimeFileEntryFilter()\n\n    for date_filter in date_filters:\n      date_filter_pieces = date_filter.split(',')\n      if len(date_filter_pieces) != 3:\n        raise errors.BadConfigOption(\n            'Badly formed date filter: {0:s}'.format(date_filter))\n\n      time_value, start_time_string, end_time_string = date_filter_pieces\n      time_value = time_value.strip()\n      start_time_string = start_time_string.strip()\n      end_time_string = end_time_string.strip()\n\n      try:\n        file_entry_filter.AddDateTimeRange(\n            time_value, start_time_string=start_time_string,\n            end_time_string=end_time_string)\n      except ValueError:\n        raise errors.BadConfigOption(\n            'Badly formed date filter: {0:s}'.format(date_filter))\n\n    filter_collection.AddFilter(file_entry_filter)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.", "source": "juraj-google-style"}
{"code": "def initialize(graph=None, session=None):\n    if context.executing_eagerly():\n        return\n    if _summary_state.writer is None:\n        raise RuntimeError('No default tf.contrib.summary.SummaryWriter found')\n    if session is None:\n        session = ops.get_default_session()\n        if session is None:\n            raise ValueError('Argument `session must be passed if no default session exists')\n    session.run(summary_writer_initializer_op())\n    if graph is not None:\n        data = _serialize_graph(graph)\n        x = array_ops.placeholder(dtypes.string)\n        session.run(graph_v1(x, 0), feed_dict={x: data})", "docstring": "Initializes summary writing for graph execution mode.\n\nThis operation is a no-op when executing eagerly.\n\nThis helper method provides a higher-level alternative to using\n`tf.contrib.summary.summary_writer_initializer_op` and\n`tf.contrib.summary.graph`.\n\nMost users will also want to call `tf.compat.v1.train.create_global_step`\nwhich can happen before or after this function is called.\n\nArgs:\ngraph: A `tf.Graph` or `tf.compat.v1.GraphDef` to output to the writer.\nThis function will not write the default graph by default. When\nwriting to an event log file, the associated step will be zero.\nsession: So this method can call `tf.Session.run`. This defaults\nto `tf.compat.v1.get_default_session`.\n\nRaises:\nRuntimeError: If  the current thread has no default\n`tf.contrib.summary.SummaryWriter`.\nValueError: If session wasn't passed and no default session.", "source": "github-repos"}
{"code": "def FormatSOAPDateTime(value):\n    value_date = value['date']\n    return ('%s-%s-%s %s:%s:%s (%s)' % (value_date['year'], value_date['month'], value_date['day'], value['hour'], value['minute'], value['second'], value['timeZoneId']))", "docstring": "Format a SOAP DateTime object for printing.\n\nArgs:\nvalue: The DateTime object to format.\n\nReturns:\nA string representing the value.", "source": "codesearchnet"}
{"code": "def items(self, section=_UNSET):\n        \n        if section is _UNSET:\n            return [(sect.name, sect) for sect in self.sections_blocks()]\n\n        section = self.__getitem__(section)\n        return [(opt.key, opt) for opt in section.option_blocks()]", "docstring": "Return a list of (name, value) tuples for options or sections.\n\nIf section is given, return a list of tuples with (name, value) for\neach option in the section. Otherwise, return a list of tuples with\n(section_name, section_type) for each section.\n\nArgs:\nsection (str): optional section name, default UNSET\n\nReturns:\nlist: list of :class:`Section` or :class:`Option` objects", "source": "juraj-google-style"}
{"code": "def get_gradients(self, loss, params):\n    params = nest.flatten(params)\n    with backend.get_graph().as_default(), backend.name_scope(self._name + '/gradients'):\n        grads = gradients.gradients(loss, params)\n        for grad, param in zip(grads, params):\n            if grad is None:\n                raise ValueError('Variable {} has `None` for gradient. Please make sure that all of your ops have a gradient defined (i.e. are differentiable). Common ops without gradient: K.argmax, K.round, K.eval.'.format(param))\n    return grads", "docstring": "Returns gradients of `loss` with respect to `params`.\n\nShould be used only in legacy v1 graph mode.\n\nArgs:\nloss: Loss tensor.\nparams: List of variables.\n\nReturns:\nList of gradient tensors.\n\nRaises:\nValueError: In case any gradient cannot be computed (e.g. if gradient\nfunction not implemented).", "source": "github-repos"}
{"code": "def in_builddir(sub='.'):\n    \n    from functools import wraps\n\n    def wrap_in_builddir(func):\n        \n\n        @wraps(func)\n        def wrap_in_builddir_func(self, *args, **kwargs):\n            \n            p = local.path(self.builddir) / sub\n            if not p.exists():\n                LOG.error(\"%s does not exist.\", p)\n\n            if p == local.cwd:\n                LOG.debug(\"CWD already is %s\", p)\n                return func(self, *args, *kwargs)\n            with local.cwd(p):\n                return func(self, *args, **kwargs)\n\n        return wrap_in_builddir_func\n\n    return wrap_in_builddir", "docstring": "Decorate a project phase with a local working directory change.\n\nArgs:\nsub: An optional subdirectory to change into.", "source": "juraj-google-style"}
{"code": "def remote(self, *args, **kwargs):\n        \n        return self._remote(args=args, kwargs=kwargs)", "docstring": "Create an actor.\n\nArgs:\nargs: These arguments are forwarded directly to the actor\nconstructor.\nkwargs: These arguments are forwarded directly to the actor\nconstructor.\n\nReturns:\nA handle to the newly created actor.", "source": "juraj-google-style"}
{"code": "def convert(self):\n    self._validate_inputs(self._input_tensors, self.quantized_input_stats)\n    quant_mode = QuantizationMode(self.optimizations, self.target_spec, self.representative_dataset, self._graph_def, self._experimental_disable_per_channel, self.experimental_new_dynamic_range_quantizer, self._experimental_low_bit_qat, self._experimental_full_integer_quantization_bias_type, self._experimental_variable_quantization, self._experimental_strict_qdq)\n    optimized_graph = self._optimize_tf_model(self._graph_def, self._input_tensors, self._output_tensors, quant_mode)\n    self._debug_info = _get_debug_info(self._debug_info_func, optimized_graph)\n    converter_kwargs = self._get_base_converter_args()\n    converter_kwargs.update(quant_mode.converter_flags(self.inference_type, self.inference_input_type))\n    converter_kwargs.update({'output_format': self.output_format, 'quantized_input_stats': self._quantized_stats, 'default_ranges_stats': self.default_ranges_stats, 'drop_control_dependency': self.drop_control_dependency, 'reorder_across_fake_quant': self.reorder_across_fake_quant, 'change_concat_input_ranges': self.change_concat_input_ranges, 'dump_graphviz_dir': self.dump_graphviz_dir, 'dump_graphviz_video': self.dump_graphviz_video, 'conversion_summary_dir': self.conversion_summary_dir})\n    self._validate_quantized_input_stats(converter_kwargs, quant_mode)\n    if not self.experimental_new_converter:\n        logging.warning('Please consider switching to the new converter by setting experimental_new_converter=True. The old converter is deprecated.')\n    else:\n        logging.info('Using experimental converter: If you encountered a problem please file a bug. You can opt-out by setting experimental_new_converter=False')\n    if self._has_valid_tensors():\n        result = _convert_graphdef(input_data=optimized_graph, input_tensors=self._input_tensors, output_tensors=self._output_tensors, **converter_kwargs)\n    else:\n        result = _convert_graphdef_with_arrays(input_data=optimized_graph, input_arrays_with_shape=self._input_arrays_with_shape, output_arrays=self._output_arrays, control_output_arrays=self._control_output_arrays, **converter_kwargs)\n    return self._optimize_tflite_model(result, quant_mode, _build_conversion_flags(**converter_kwargs).debug_options, quant_io=self.experimental_new_quantizer)", "docstring": "Converts a TensorFlow GraphDef based on instance variables.\n\nReturns:\nThe converted data in serialized format, either a TFLite Flatbuffer or\na Graphviz graph depending on value in `output_format`.\n\nRaises:\nValueError:\nInput shape is not specified.\nNone value for dimension in input_tensor.", "source": "github-repos"}
{"code": "def _normalize_string(raw_str):\n    return ' '.join((token.strip() for token in tokenizer.encode(text_encoder.native_to_unicode(raw_str))))", "docstring": "Normalizes the string using tokenizer.encode.\n\nArgs:\nraw_str: the input string\n\nReturns:\nA string which is ready to be tokenized using split()", "source": "codesearchnet"}
{"code": "class RowwiseParallel(TensorParallelLayer):\n\n    def __init__(self, *, input_layouts: Optional[Placement]=None, output_layouts: Optional[Placement]=None, use_local_output: bool=True, use_dtensor=True):\n        super().__init__()\n        self.input_layouts = (input_layouts or Shard(-1),)\n        self.output_layouts = (output_layouts or Replicate(),)\n        self.use_local_output = use_local_output\n        self.use_dtensor = use_dtensor\n\n    def partition_tensor(self, param, empty_param, param_type, param_casting_dtype, to_contiguous, rank, device_mesh):\n        if param_type != 'bias':\n            parameter = get_tensor_shard(param, empty_param, device_mesh, rank, -1)\n            shard = [Shard(-1)]\n        else:\n            shard = [Replicate()]\n            parameter = param[:]\n        parameter = parameter.to(param_casting_dtype)\n        if to_contiguous:\n            parameter = parameter.contiguous()\n        if self.use_dtensor:\n            parameter = DTensor.from_local(parameter, device_mesh, shard, run_check=False)\n        return nn.Parameter(parameter, requires_grad=parameter.is_floating_point())\n\n    @staticmethod\n    def _prepare_input_fn(input_layouts, desired_input_layouts, mod, inputs, device_mesh):\n        if hasattr(mod, 'bias') and mod.bias is not None:\n            mod._bias = mod.bias\n            mod.bias = None\n        input_tensor = inputs[0]\n        if not isinstance(input_tensor, DTensor):\n            input_tensor = DTensor.from_local(input_tensor, device_mesh, input_layouts, run_check=False)\n        if input_layouts != desired_input_layouts:\n            input_tensor = input_tensor.redistribute(placements=desired_input_layouts, async_op=True)\n        return input_tensor\n\n    @staticmethod\n    def _prepare_output_fn(output_layouts, use_local_output, mod, outputs, device_mesh):\n        if outputs.placements != output_layouts:\n            outputs = outputs.redistribute(placements=output_layouts, async_op=True)\n        if hasattr(mod, '_bias'):\n            outputs += mod._bias\n        return outputs.to_local() if use_local_output else outputs\n\n    def prepare_module_tp(self, module: nn.Module, device_mesh) -> nn.Module:\n        module._distribute_module_applied = True\n        if self.use_dtensor:\n            if isinstance(module, nn.Linear):\n                self.desired_input_layouts: Tuple[Placement, ...] = (Shard(-1),)\n            elif isinstance(module, nn.Embedding):\n                self.desired_input_layouts = (Replicate(),)\n            elif isinstance(module, nn.Parameter):\n                self.desired_input_layouts = (Shard(-1),)\n            else:\n                raise NotImplementedError('RowwiseParallel currently only support nn.Linear and nn.Embedding!')\n            distribute_module(module, device_mesh, partial(self._prepare_input_fn, self.input_layouts, self.desired_input_layouts), partial(self._prepare_output_fn, self.output_layouts, self.use_local_output))", "docstring": "Partition a compatible nn.Module in a row-wise fashion. Currently supports nn.Linear and nn.Embedding.\nUsers can compose it with ColwiseParallel to achieve the sharding of more complicated modules.\n(i.e. MLP, Attention)\n\nKeyword Args:\ninput_layouts (Placement, optional):\nThe DTensor layout of input tensor for the nn.Module, this is used to annotate the input tensor to\nbecome a DTensor. If not specified, we assume the input tensor to be sharded on the last dimension.\noutput_layouts (Placement, optional):\nThe DTensor layout of the output for the nn.Module, this is used to ensure the output of the nn.Module\nwith the user desired layout. If not specified, the output tensor is replicated.\nuse_local_output (bool, optional):\nWhether to use local :class:`torch.Tensor` instead of :class:`DTensor` for the module output, default: True.\nReturns:\nA :class:`ParallelStyle` object that represents Rowwise sharding of the nn.Module.", "source": "github-repos"}
{"code": "def sg_regularizer_loss(scale=1.0):\n    r\n    return scale * tf.reduce_mean(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))", "docstring": "r\"\"\" Get regularizer losss\n\nArgs:\nscale: A scalar. A weight applied to regularizer loss", "source": "juraj-google-style"}
{"code": "def get_community_names():\n    ret = dict()\n    if __utils__['reg.key_exists'](_HKEY, _COMMUNITIES_GPO_KEY):\n        _LOG.debug('Loading communities from Group Policy settings')\n        current_values = __utils__['reg.list_values'](_HKEY, _COMMUNITIES_GPO_KEY, include_default=False)\n        if isinstance(current_values, list):\n            for current_value in current_values:\n                if (not isinstance(current_value, dict)):\n                    continue\n                ret[current_value['vdata']] = 'Managed by GPO'\n    if (not ret):\n        _LOG.debug('Loading communities from SNMP settings')\n        current_values = __utils__['reg.list_values'](_HKEY, _COMMUNITIES_KEY, include_default=False)\n        if isinstance(current_values, list):\n            for current_value in current_values:\n                if (not isinstance(current_value, dict)):\n                    continue\n                permissions = six.text_type()\n                for permission_name in _PERMISSION_TYPES:\n                    if (current_value['vdata'] == _PERMISSION_TYPES[permission_name]):\n                        permissions = permission_name\n                        break\n                ret[current_value['vname']] = permissions\n    if (not ret):\n        _LOG.debug('Unable to find existing communities.')\n    return ret", "docstring": "Get the current accepted SNMP community names and their permissions.\n\nIf community names are being managed by Group Policy, those values will be\nreturned instead like this:\n\n.. code-block:: bash\n\nTestCommunity:\nManaged by GPO\n\nCommunity names managed normally will denote the permission instead:\n\n.. code-block:: bash\n\nTestCommunity:\nRead Only\n\nReturns:\ndict: A dictionary of community names and permissions.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_snmp.get_community_names", "source": "codesearchnet"}
{"code": "def GetSuperClasses():\n    return SUPERCLASSES.copy()", "docstring": "Get a Python type hierarchy mapping.\n\nThis generates a dictionary that can be used to look up the bases of\na type in the abstract base class hierarchy.\n\nReturns:\nA dictionary mapping a type, as string, to a list of base types (also\nas strings). E.g. \"float\" -> [\"Real\"].", "source": "github-repos"}
{"code": "def payoff(spots):\n    return tf.nn.relu((spots - strikes) * option_signs)", "docstring": "Computes payff for the specified options given the spot grid.\n\nArgs:\nspots: Tensor of shape [batch_size, grid_size, 1]. The spot values at some\ntime.\n\nReturns:\nPayoffs for exercise at the specified strikes.", "source": "github-repos"}
{"code": "def insert_column(table, insert_column, col_name=None, default_value=None):\n    \n    column_labels = table[0]\n    following_index = 0\n\n    def set_cell(row, column_index, value):\n        \n        if hasattr(value, '__call__'):\n            row[column_index] = value(column_labels, row, column_index)\n        else:\n            row[column_index] = value\n\n    if isinstance(insert_column, basestring):\n        insert_column = insert_column.strip()\n        for column_index in range(len(column_labels)):\n            if column_labels[column_index] == insert_column:\n                following_index = column_index\n                break\n    else:\n        following_index = insert_column\n\n    col_data_start = 0\n    if col_name != None:\n        table[0].insert(following_index, col_name.strip())\n        col_data_start = 1\n    for row in table[col_data_start:]:\n        row.insert(following_index, None)\n        if default_value:\n            set_cell(row, min(following_index, len(row)-1), default_value)", "docstring": "Inserts a new column before another specified column (by name or index).\n\nArgs:\ninsert_column: The column index or first row name where the insertion should occur\ncol_name: The name to insert into the first row of the column. Leaving this argument\nto the default of None will apply the default_value to that row's cell.\ndefault_value: Can be a value or function which takes (row, index, value) as\narguments to return a value.", "source": "juraj-google-style"}
{"code": "def create_halton_samples(order, dim=1, burnin=(- 1), primes=()):\n    primes = list(primes)\n    if (not primes):\n        prime_order = (10 * dim)\n        while (len(primes) < dim):\n            primes = create_primes(prime_order)\n            prime_order *= 2\n    primes = primes[:dim]\n    assert (len(primes) == dim), 'not enough primes'\n    if (burnin < 0):\n        burnin = max(primes)\n    out = numpy.empty((dim, order))\n    indices = [(idx + burnin) for idx in range(order)]\n    for dim_ in range(dim):\n        out[dim_] = create_van_der_corput_samples(indices, number_base=primes[dim_])\n    return out", "docstring": "Create Halton sequence.\n\nFor ``dim == 1`` the sequence falls back to Van Der Corput sequence.\n\nArgs:\norder (int):\nThe order of the Halton sequence. Defines the number of samples.\ndim (int):\nThe number of dimensions in the Halton sequence.\nburnin (int):\nSkip the first ``burnin`` samples. If negative, the maximum of\n``primes`` is used.\nprimes (tuple):\nThe (non-)prime base to calculate values along each axis. If\nempty, growing prime values starting from 2 will be used.\n\nReturns (numpy.ndarray):\nHalton sequence with ``shape == (dim, order)``.", "source": "codesearchnet"}
{"code": "def __init__(self, config, in_channels, out_channels, bottleneck_channels):\n    super().__init__()\n    self.conv1 = nn.Conv2d(in_channels, bottleneck_channels, 1, bias=False)\n    self.norm1 = VitDetLayerNorm(bottleneck_channels)\n    self.act1 = ACT2FN[config.hidden_act]\n    self.conv2 = nn.Conv2d(bottleneck_channels, bottleneck_channels, 3, padding=1, bias=False)\n    self.norm2 = VitDetLayerNorm(bottleneck_channels)\n    self.act2 = ACT2FN[config.hidden_act]\n    self.conv3 = nn.Conv2d(bottleneck_channels, out_channels, 1, bias=False)\n    self.norm3 = VitDetLayerNorm(out_channels)", "docstring": "Args:\nconfig (`VitDetConfig`):\nModel configuration.\nin_channels (`int`):\nNumber of input channels.\nout_channels (`int`):\nNumber of output channels.\nbottleneck_channels (`int`):\nNumber of output channels for the 3x3 \"bottleneck\" conv layers.", "source": "github-repos"}
{"code": "def generate_example(config, ext='json'):\n    template_name = 'example.{0}'.format(ext.lower())\n    template = ENV.get_template(template_name)\n    return template.render(config=config)", "docstring": "Generate an example file based on the given Configuration object.\n\nArgs:\nconfig (confpy.core.configuration.Configuration): The configuration\nobject on which to base the example.\next (str): The file extension to render. Choices: JSON and INI.\n\nReturns:\nstr: The text of the example file.", "source": "codesearchnet"}
{"code": "def set_zones_device_assignment(self, internal_devices, external_devices) -> dict:\n        \n        internal = [x.id for x in internal_devices]\n        external = [x.id for x in external_devices]\n        data = {\"zonesDeviceAssignment\": {\"INTERNAL\": internal, \"EXTERNAL\": external}}\n        return self._restCall(\n            \"home/security/setZonesDeviceAssignment\", body=json.dumps(data)\n        )", "docstring": "sets the devices for the security zones\nArgs:\ninternal_devices(List[Device]): the devices which should be used for the internal zone\nexternal_devices(List[Device]):  the devices which should be used for the external(hull) zone\n\nReturns:\nthe result of _restCall", "source": "juraj-google-style"}
{"code": "def distinct(l):\n    \n    seen = set()\n    seen_add = seen.add\n    return (_ for _ in l if not (_ in seen or seen_add(_)))", "docstring": "Return a list where the duplicates have been removed.\n\nArgs:\nl (list): the list to filter.\n\nReturns:\nlist: the same list without duplicates.", "source": "juraj-google-style"}
{"code": "def add(self, dic):\n    for kw in dic:\n        checkKey(kw, self.keyWord)\n        self._add([Pair(kw, StringSingle(dic[kw]))], self.d)", "docstring": "adds a dict as pair\n\nArgs:\ndic (dict): key and value", "source": "codesearchnet"}
{"code": "def get_templates(self, id_or_uri, start=0, count=(- 1), filter='', query='', sort=''):\n    uri = (self._client.build_uri(id_or_uri) + '/templates')\n    return self._client.get(self._client.build_query_uri(start=start, count=count, filter=filter, query=query, sort=sort, uri=uri))", "docstring": "Gets a list of volume templates. Returns a list of storage templates belonging to the storage system.\n\nReturns:\nlist: Storage Template List.", "source": "codesearchnet"}
{"code": "def site_specific_nn_occupation(self):\n    to_return = {l: 0 for l in set((site.label for site in self.p_neighbours))}\n    for site in self.p_neighbours:\n        if site.is_occupied:\n            to_return[site.label] += 1\n    return to_return", "docstring": "Returns the number of occupied nearest neighbour sites, classified by site type.\n\nArgs:\nNone\n\nReturns:\n(Dict(Str:Int)): Dictionary of nearest-neighbour occupied site numbers, classified by site label, e.g. { 'A' : 2, 'B' : 1 }.", "source": "codesearchnet"}
{"code": "def count_lines(self):\n    lines = 0\n    non_blank = 0\n    for (path, info) in self._make_iter():\n        if info.is_file:\n            for line in self.fs.open(path, 'rb'):\n                lines += 1\n                if line.rstrip():\n                    non_blank += 1\n    return LineCounts(lines=lines, non_blank=non_blank)", "docstring": "Count the lines in the matched files.\n\nReturns:\n`~LineCounts`: A named tuple containing line counts.\n\nExample:\n>>> import fs\n>>> fs.open_fs('~/projects').glob('**/*.py').count_lines()\nLineCounts(lines=5767102, non_blank=4915110)", "source": "codesearchnet"}
{"code": "def mel_spectrogram(self, waveform: np.ndarray) -> np.ndarray:\n    waveform = np.pad(waveform, (int((self.n_fft - self.hop_length) / 2), int((self.n_fft - self.hop_length) / 2)), mode='reflect')\n    complex_spectrogram = spectrogram(waveform, window=self.window, frame_length=self.n_fft, hop_length=self.hop_length, fft_length=self.n_fft, power=None, center=self.center, mel_filters=None, mel_floor=None)\n    amplitude_spectrogram = np.sqrt(np.real(complex_spectrogram) ** 2 + np.imag(complex_spectrogram) ** 2 + self.mel_floor)\n    mel_spectrogram = np.matmul(self.mel_filters.T, amplitude_spectrogram)\n    log_mel_spectrogram = np.log(np.clip(mel_spectrogram, a_min=self.compression_clip_val, a_max=None) * self.compression_factor)\n    return log_mel_spectrogram.T", "docstring": "Calculates log MEL spectrograms from a batch of waveforms. Note that the input waveform(s) will be padded by\n`int(self.n_fft - self.hop_length) / 2` on both sides using the `reflect` padding mode.\n\nArgs:\nwaveform (`np.ndarray` of shape `(length,)`):\nThe input waveform. This must be a single real-valued, mono waveform.\n\nReturns:\n`numpy.ndarray`: Array containing a log-mel spectrogram of shape `(num_frames, num_mel_bins)`.", "source": "github-repos"}
{"code": "def _render_normalized_cost_bar(self, cost, max_cost, length):\n    num_ticks = int(np.ceil(float(cost) / max_cost * length))\n    num_ticks = num_ticks or 1\n    output = RL('[', font_attr=self._LINE_COST_ATTR)\n    output += RL('|' * num_ticks + ' ' * (length - num_ticks), font_attr=['bold', self._LINE_COST_ATTR])\n    output += RL(']', font_attr=self._LINE_COST_ATTR)\n    return output", "docstring": "Render a text bar representing a normalized cost.\n\nArgs:\ncost: the absolute value of the cost.\nmax_cost: the maximum cost value to normalize the absolute cost with.\nlength: (int) length of the cost bar, in number of characters, excluding\nthe brackets on the two ends.\n\nReturns:\nAn instance of debugger_cli_common.RichTextLine.", "source": "github-repos"}
{"code": "def get_validators(self, id=None, endpoint=None):\n        \n        return self._call_endpoint(GET_VALIDATORS, id=id, endpoint=endpoint)", "docstring": "Returns the current NEO consensus nodes information and voting status.\nArgs:\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\n\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"}
{"code": "def getColor(name):\n    try:\n        c = getColorInfoList()[getColorList().index(name.upper())]\n        return ((c[1] / 255.0), (c[2] / 255.0), (c[3] / 255.0))\n    except:\n        return (1, 1, 1)", "docstring": "Retrieve RGB color in PDF format by name.\n\nReturns:\na triple of floats in range 0 to 1. In case of name-not-found, \"white\" is returned.", "source": "codesearchnet"}
{"code": "def get_table(self, table, retry=DEFAULT_RETRY):\n    table_ref = _table_arg_to_table_ref(table, default_project=self.project)\n    api_response = self._call_api(retry, method='GET', path=table_ref.path)\n    return Table.from_api_repr(api_response)", "docstring": "Fetch the table referenced by ``table``.\n\nArgs:\ntable (Union[ \\\n:class:`~google.cloud.bigquery.table.Table`, \\\n:class:`~google.cloud.bigquery.table.TableReference`, \\\nstr, \\\n]):\nA reference to the table to fetch from the BigQuery API.\nIf a string is passed in, this method attempts to create a\ntable reference from a string using\n:func:`google.cloud.bigquery.table.TableReference.from_string`.\nretry (:class:`google.api_core.retry.Retry`):\n(Optional) How to retry the RPC.\n\nReturns:\ngoogle.cloud.bigquery.table.Table:\nA ``Table`` instance.", "source": "codesearchnet"}
{"code": "def funds(self, term, field=None, **kwargs):\n    params = kwargs\n    params['q'] = term\n    if field:\n        params['f'] = field\n    else:\n        params['f'] = 'fu.org.n'\n    baseuri = (self._BASE_URI + 'funds')\n    res = self.session.get(baseuri, params=params)\n    self.handle_http_error(res)\n    return res", "docstring": "Search for funds matching a search term.\n\nArgs:\nterm (str): Fund id to search on\nfield (str): The field to search on.\nOptions are title, amount, org_name and type.\nkwargs (dict): additional keywords passed into\nrequests.session.get params keyword.", "source": "codesearchnet"}
{"code": "def expression_filter(self, name, **kwargs):\n\n    def decorator(func):\n        self.filters[name] = ExpressionFilter(name, func, **kwargs)\n    return decorator", "docstring": "Returns a decorator function for adding an expression filter.\n\nArgs:\nname (str): The name of the filter.\n**kwargs: Variable keyword arguments for the filter.\n\nReturns:\nCallable[[Callable[[AbstractExpression, Any], AbstractExpression]]]: A decorator\nfunction for adding an expression filter.", "source": "codesearchnet"}
{"code": "def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, List[Tuple]]=None, top_k: int=100):\n    out_logits, out_bbox = (outputs.logits, outputs.pred_boxes)\n    if target_sizes is not None:\n        if len(out_logits) != len(target_sizes):\n            raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n    prob = out_logits.sigmoid()\n    prob = prob.view(out_logits.shape[0], -1)\n    k_value = min(top_k, prob.size(1))\n    topk_values, topk_indexes = torch.topk(prob, k_value, dim=1)\n    scores = topk_values\n    topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor')\n    labels = topk_indexes % out_logits.shape[2]\n    boxes = center_to_corners_format(out_bbox)\n    boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))\n    if target_sizes is not None:\n        if isinstance(target_sizes, List):\n            img_h = torch.Tensor([i[0] for i in target_sizes])\n            img_w = torch.Tensor([i[1] for i in target_sizes])\n        else:\n            img_h, img_w = target_sizes.unbind(1)\n        scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)\n        boxes = boxes * scale_fct[:, None, :]\n    results = []\n    for s, l, b in zip(scores, labels, boxes):\n        score = s[s > threshold]\n        label = l[s > threshold]\n        box = b[s > threshold]\n        results.append({'scores': score, 'labels': label, 'boxes': box})\n    return results", "docstring": "Converts the raw output of [`ConditionalDetrForObjectDetection`] into final bounding boxes in (top_left_x,\ntop_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.\n\nArgs:\noutputs ([`ConditionalDetrObjectDetectionOutput`]):\nRaw outputs of the model.\nthreshold (`float`, *optional*):\nScore threshold to keep object detection predictions.\ntarget_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):\nTensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size\n(height, width) of each image in the batch. If left to None, predictions will not be resized.\ntop_k (`int`, *optional*, defaults to 100):\nKeep only top k bounding boxes before filtering by thresholding.\n\nReturns:\n`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image\nin the batch as predicted by the model.", "source": "github-repos"}
{"code": "def VerifyRow(self, parser_mediator, row):\n    \n    try:\n      time_elements_tuple = self._GetTimeElementsTuple(row['time'])\n    except (TypeError, ValueError):\n      return False\n\n    try:\n      dfdatetime_time_elements.TimeElements(\n          time_elements_tuple=time_elements_tuple)\n    except ValueError:\n      return False\n\n    try:\n      my_event = int(row['event'], 10)\n    except (TypeError, ValueError):\n      return False\n\n    if my_event < 1 or my_event > 77:\n      return False\n\n    try:\n      category = int(row['cat'], 10)\n    except (TypeError, ValueError):\n      return False\n\n    if category < 1 or category > 4:\n      return False\n\n    return True", "docstring": "Verifies if a line of the file is in the expected format.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrow (dict[str, str]): fields of a single row, as specified in COLUMNS.\n\nReturns:\nbool: True if this is the correct parser, False otherwise.", "source": "juraj-google-style"}
{"code": "def get_options_as(op: Union[schema_fb.Operator, schema_fb.OperatorT], opts_type: Type[OptsT]) -> Optional[OptsT]:\n    err = ValueError(f'Unsupported options type: {opts_type}')\n    type_name: str = opts_type.__name__\n    if not type_name.endswith('T'):\n        raise err\n    base_type_name = type_name.removesuffix('T')\n    is_opt_1_type = hasattr(schema_fb.BuiltinOptions, base_type_name)\n    if not is_opt_1_type and (not hasattr(schema_fb.BuiltinOptions2, base_type_name)):\n        raise err\n    if isinstance(op, schema_fb.Operator):\n        if not is_opt_1_type:\n            enum_val = getattr(schema_fb.BuiltinOptions2, base_type_name)\n            opts_creator = schema_fb.BuiltinOptions2Creator\n            raw_ops = op.BuiltinOptions2()\n            actual_enum_val = op.BuiltinOptions2Type()\n        else:\n            enum_val = getattr(schema_fb.BuiltinOptions, base_type_name)\n            opts_creator = schema_fb.BuiltinOptionsCreator\n            raw_ops = op.BuiltinOptions()\n            actual_enum_val = op.BuiltinOptionsType()\n        if raw_ops is None or actual_enum_val != enum_val:\n            return None\n        return opts_creator(enum_val, raw_ops)\n    elif isinstance(op, schema_fb.OperatorT):\n        if is_opt_1_type:\n            raw_ops_t = op.builtinOptions\n        else:\n            raw_ops_t = op.builtinOptions2\n        if raw_ops_t is None or not isinstance(raw_ops_t, opts_type):\n            return None\n        return raw_ops_t\n    else:\n        return None", "docstring": "Get the options of an operator as the specified type.\n\nRequested type must be an object-api type (ends in 'T').\n\nArgs:\nop: The operator to get the options from.\nopts_type: The type of the options to get.\n\nReturns:\nThe options as the specified type, or None if the options are not of the\nspecified type.\n\nRaises:\nValueError: If the specified type is not a valid options type.", "source": "github-repos"}
{"code": "def remove(self, value):\n\t\t\n\t\ttry:\n\t\t\tindex = self._dict[value]\n\t\texcept KeyError:\n\t\t\traise ValueError('Value \"%s\" is not present.')\n\t\telse:\n\t\t\tdel self[index]", "docstring": "Remove value from self.\n\nArgs:\nvalue: Element to remove from self\nRaises:\nValueError: if element is already present", "source": "juraj-google-style"}
{"code": "def log_sigmoid(x):\n    if any_symbolic_tensors((x,)):\n        return LogSigmoid().symbolic_call(x)\n    return backend.nn.log_sigmoid(x)", "docstring": "Logarithm of the sigmoid activation function.\n\nIt is defined as `f(x) = log(1 / (1 + exp(-x)))`.\n\nArgs:\nx: Input tensor.\n\nReturns:\nA tensor with the same shape as `x`.\n\nExample:\n\n>>> x = keras.ops.convert_to_tensor([-0.541391, 0.0, 0.50, 5.0])\n>>> keras.ops.log_sigmoid(x)\narray([-1.0000418, -0.6931472, -0.474077, -0.00671535], dtype=float32)", "source": "github-repos"}
{"code": "def fit(self, X, y, X_val=None, y_val=None):\n        \n        y = y.reshape((len(y), 1))\n\n        if sparse.issparse(X):\n            X = X.tocsr()\n\n        if X_val is not None:\n            n_val = len(y_val)\n            y_val = y_val.reshape((n_val, 1))\n\n        \n        self.i = X.shape[1]\n        self.l1 = self.l1 / self.i\n        self.w = (np.random.rand((self.i + 2) * self.h + 1) - .5) * 1e-6\n        self.w_opt = self.w\n        self.n_opt = 0\n\n        logger.info('training ...')\n        n_obs = X.shape[0]\n        batch = self.b\n        n_epoch = self.n\n        idx = range(n_obs)\n        self.auc_opt = .5\n\n        start = time.time()\n        print('\\tEPOCH TRAIN     VALID     BEST      TIME (m)')\n        print('\\t--------------------------------------------')\n\n        \n        p = self.predict_raw(X)\n        auc = roc_auc_score(y, p)\n        auc_val = auc\n        if X_val is not None:\n            p_val = self.predict_raw(X_val)\n            auc_val = roc_auc_score(y_val, p_val)\n\n        print('\\t{:3d}:  {:.6f}  {:.6f}  {:.6f}  {:.2f}'.format(\n              0, auc, auc_val, self.auc_opt,\n              (time.time() - start) / SEC_PER_MIN))\n     \n        \n        \n        epoch = 1\n        while epoch <= n_epoch:\n            \n            \n            np.random.shuffle(idx)\n\n            \n            \n            \n            \n            \n            \n            for i in range(int(n_obs / batch) + 1):\n                if (i + 1) * batch > n_obs:\n                    sub_idx = idx[batch * i:n_obs]\n                else:\n                    sub_idx = idx[batch * i:batch * (i + 1)]\n\n                x = X[sub_idx]\n                neg_idx = [n_idx for n_idx, n_y in enumerate(y[sub_idx]) if n_y == 0.]\n                pos_idx = [p_idx for p_idx, p_y in enumerate(y[sub_idx]) if p_y == 1.]\n                x0 = x[neg_idx]\n                x1 = x[pos_idx]\n                \n                \n                \n                \n                \n                ret = minimize(self.func,\n                               self.w,\n                               args=(x0, x1),\n                               method='L-BFGS-B',\n                               jac=self.fprime,\n                               options={'maxiter': 5})\n                self.w = ret.x\n\n            p = self.predict_raw(X)\n            auc = roc_auc_score(y, p)\n            auc_val = auc\n\n            if X_val is not None:\n                p_val = self.predict_raw(X_val)\n                auc_val = roc_auc_score(y_val, p_val)\n\n                if auc_val > self.auc_opt:\n                    self.auc_opt = auc_val\n                    self.w_opt = self.w\n                    self.n_opt = epoch\n\n                    \n                    \n                    if epoch == n_epoch:\n                        n_epoch += 5\n\n            print('\\t{:3d}:  {:.6f}  {:.6f}  {:.6f}  {:.2f}'.format(\n                  epoch, auc, auc_val, self.auc_opt,\n                  (time.time() - start) / SEC_PER_MIN))\n\n            epoch += 1\n\n        if X_val is not None:\n            print('Optimal epoch is {0} ({1:.6f})'.format(self.n_opt,\n                                                          self.auc_opt))\n            self.w = self.w_opt\n\n        logger.info('done training')", "docstring": "Train a network with the quasi-Newton method.\n\nArgs:\nX (np.array of float): feature matrix for training\ny (np.array of float): target values for training\nX_val (np.array of float): feature matrix for validation\ny_val (np.array of float): target values for validation", "source": "juraj-google-style"}
{"code": "def add_trunk_group(self, intf, value):\n    string = 'switchport trunk group {}'.format(value)\n    return self.configure_interface(intf, string)", "docstring": "Adds the specified trunk group to the interface\n\nArgs:\nintf (str): The interface name to apply the trunk group to\nvalue (str): The trunk group value to apply to the interface\n\nReturns:\nTrue if the operation as successfully applied otherwise false", "source": "codesearchnet"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return token_ids_0 + [self.sep_token_id]\n    sep = [self.sep_token_id]\n    return token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A XLMProphetNet sequence has the following format:\n\n- single sequence: `X [SEP]`\n- pair of sequences: `A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def _load_and_verify_metadata(self, submission_type):\n    \n    metadata_filename = os.path.join(self._extracted_submission_dir,\n                                     'metadata.json')\n    if not os.path.isfile(metadata_filename):\n      logging.error('metadata.json not found')\n      return None\n    try:\n      with open(metadata_filename, 'r') as f:\n        metadata = json.load(f)\n    except IOError as e:\n      logging.error('Failed to load metadata: %s', e)\n      return None\n    for field_name in REQUIRED_METADATA_JSON_FIELDS:\n      if field_name not in metadata:\n        logging.error('Field %s not found in metadata', field_name)\n        return None\n    \n    if submission_type != metadata['type']:\n      logging.error('Invalid submission type in metadata, expected \"%s\", '\n                    'actual \"%s\"', submission_type, metadata['type'])\n      return None\n    \n    entry_point = metadata['entry_point']\n    if not os.path.isfile(os.path.join(self._extracted_submission_dir,\n                                       entry_point)):\n      logging.error('Entry point not found: %s', entry_point)\n      return None\n    if not entry_point.endswith('.sh'):\n      logging.warning('Entry point is not an .sh script. '\n                      'This is not necessarily a problem, but if submission '\n                      'won''t run double check entry point first: %s',\n                      entry_point)\n    \n    return metadata", "docstring": "Loads and verifies metadata.\n\nArgs:\nsubmission_type: type of the submission\n\nReturns:\ndictionaty with metadata or None if metadata not found or invalid", "source": "juraj-google-style"}
{"code": "def get_group_by_name(self, group_name: str) -> typing.Optional['Group']:\n        \n        VALID_STR.validate(group_name, 'get_group_by_name')\n        for group in self.groups:\n\n            if group.group_name == group_name:\n                return group\n        return None", "docstring": "Gets a group from its name\n\nArgs:\ngroup_name:\n\nReturns: Group", "source": "juraj-google-style"}
{"code": "def volatility_fn(self):\n    pass", "docstring": "Python callable calculating the instantaneous volatility matrix.\n\nThe callable should accept two real `Tensor` arguments of the same dtype and\nshape `times_shape`. The first argument is the scalar time t, the second\nargument is the value of Ito process X - `Tensor` of shape `batch_shape +\n[dim]`. Here `batch_shape` is an arbitrary shape. The result is value of\nvolatility `S_ij`(t, X). The return value of the callable is a real `Tensor`\nof the same dtype as the input arguments and of shape\n`batch_shape + [dim, dim]`.\n\nReturns:\nThe instantaneous volatility callable.", "source": "github-repos"}
{"code": "def feed(self, url_template, keyword, offset, max_num, page_step):\n        \n        for i in range(offset, offset + max_num, page_step):\n            url = url_template.format(keyword, i)\n            self.out_queue.put(url)\n            self.logger.debug('put url to url_queue: {}'.format(url))", "docstring": "Feed urls once\n\nArgs:\nurl_template: A string with parameters replaced with \"{}\".\nkeyword: A string indicating the searching keyword.\noffset: An integer indicating the starting index.\nmax_num: An integer indicating the max number of images to be crawled.\npage_step: An integer added to offset after each iteration.", "source": "juraj-google-style"}
{"code": "def normalize_cell_value(value):\n    \n    if isinstance(value, dict) or isinstance(value, list):\n        return json.dumps(value)\n    return value", "docstring": "Process value for writing into a cell.\n\nArgs:\nvalue: any type of variable\n\nReturns:\njson serialized value if value is list or dict, else value", "source": "juraj-google-style"}
{"code": "def parse_genes(gene_lines):\n    genes = []\n    header = []\n    hgnc_identifiers = set()\n    delimiter = '\\t'\n    delimiters = ['\\t', ' ', ';']\n    for (i, line) in enumerate(gene_lines):\n        line = line.rstrip()\n        if (not (len(line) > 0)):\n            continue\n        if line.startswith('\n            if (not line.startswith('\n                line_length = 0\n                delimiter = None\n                for alt in delimiters:\n                    head_line = line.split(alt)\n                    if (len(head_line) > line_length):\n                        line_length = len(head_line)\n                        delimiter = alt\n                header = [word.lower() for word in line[1:].split(delimiter)]\n        else:\n            if (i == 0):\n                line_length = 0\n                for alt in delimiters:\n                    head_line = line.split(alt)\n                    if (len(head_line) > line_length):\n                        line_length = len(head_line)\n                        delimiter = alt\n                if (('hgnc' in line) or ('HGNC' in line)):\n                    header = [word.lower() for word in line.split(delimiter)]\n                    continue\n                if line.split(delimiter)[0].isdigit():\n                    header = ['hgnc_id']\n                else:\n                    header = ['hgnc_symbol']\n            splitted_line = line.split(delimiter)\n            gene_info = dict(zip(header, splitted_line))\n            info_found = False\n            for key in gene_info:\n                if gene_info[key]:\n                    info_found = True\n                    break\n            if (not info_found):\n                continue\n            try:\n                gene = parse_gene(gene_info)\n            except Exception as e:\n                LOG.warning(e)\n                raise SyntaxError('Line {0} is malformed'.format((i + 1)))\n            identifier = gene.pop('identifier')\n            if (not (identifier in hgnc_identifiers)):\n                hgnc_identifiers.add(identifier)\n                genes.append(gene)\n    return genes", "docstring": "Parse a file with genes and return the hgnc ids\n\nArgs:\ngene_lines(iterable(str)): Stream with genes\n\nReturns:\ngenes(list(dict)): Dictionaries with relevant gene info", "source": "codesearchnet"}
{"code": "def build_graph(device, input_shape, variable, num_inputs, axis, grad):\n    with ops.device('/%s:0' % device):\n        if not variable:\n            inputs = [array_ops.zeros(input_shape) for _ in range(num_inputs)]\n        elif axis == 1:\n            inputs = [array_ops.zeros([input_shape[0], random.randint(max(1, input_shape[1] - 5), input_shape[1] + 5)]) for _ in range(num_inputs)]\n        else:\n            inputs = [array_ops.zeros([random.randint(max(1, input_shape[0] - 5), input_shape[0] + 5), input_shape[1]]) for _ in range(num_inputs)]\n        outputs = [array_ops.concat(inputs, axis) for _ in range(100)]\n        if grad:\n            return control_flow_ops.group(*list(itertools.chain.from_iterable([gradients_impl.gradients(output, inputs) for output in outputs])))\n        else:\n            return control_flow_ops.group(*outputs)", "docstring": "Build a graph containing a sequence of concat operations.\n\nArgs:\ndevice: string, the device to run on.\ninput_shape: shape of the input tensors.\nvariable: whether or not to randomize the input shape\nnum_inputs: the number of inputs to concat\naxis: axis to be concat'ed\ngrad: if True compute the gradient\n\nReturns:\nAn array of tensors to run()", "source": "github-repos"}
{"code": "def get_feature_from_key(self, feature_key):\n    feature = self.feature_key_map.get(feature_key)\n    if feature:\n        return feature\n    self.logger.error(('Feature \"%s\" is not in datafile.' % feature_key))\n    return None", "docstring": "Get feature for the provided feature key.\n\nArgs:\nfeature_key: Feature key for which feature is to be fetched.\n\nReturns:\nFeature corresponding to the provided feature key.", "source": "codesearchnet"}
{"code": "def calculate_bv_sum_unordered(site, nn_list, scale_factor=1):\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    bvsum = 0\n    for specie1, occu1 in site.species.items():\n        el1 = Element(specie1.symbol)\n        for (nn, dist) in nn_list:\n            for specie2, occu2 in nn.species.items():\n                el2 = Element(specie2.symbol)\n                if (el1 in ELECTRONEG or el2 in ELECTRONEG) and el1 != el2:\n                    r1 = BV_PARAMS[el1][\"r\"]\n                    r2 = BV_PARAMS[el2][\"r\"]\n                    c1 = BV_PARAMS[el1][\"c\"]\n                    c2 = BV_PARAMS[el2][\"c\"]\n                    R = r1 + r2 - r1 * r2 * (sqrt(c1) - sqrt(c2)) ** 2 / \\\n                        (c1 * r1 + c2 * r2)\n                    vij = exp((R - dist * scale_factor) / 0.31)\n                    bvsum += occu1 * occu2 * vij * (1 if el1.X < el2.X else -1)\n    return bvsum", "docstring": "Calculates the BV sum of a site for unordered structures.\n\nArgs:\nsite:\nThe site\nnn_list:\nList of nearest neighbors in the format [(nn_site, dist), ...].\nscale_factor:\nA scale factor to be applied. This is useful for scaling distance,\nesp in the case of calculation-relaxed structures which may tend\nto under (GGA) or over bind (LDA).", "source": "juraj-google-style"}
{"code": "def component_mget(self, zip_data, components):\n    if (not isinstance(components, list)):\n        print('Components param must be a list')\n        return\n    query_params = {'components': ','.join(components)}\n    return self.fetch_identifier_component('zip/component_mget', zip_data, query_params)", "docstring": "Call the zip component_mget endpoint\n\nArgs:\n- zip_data - As described in the class docstring.\n- components - A list of strings for each component to include in the request.\nExample: [\"zip/details\", \"zip/volatility\"]", "source": "codesearchnet"}
{"code": "def _get_document_path(client, path):\n    parts = ((client._database_string, 'documents') + path)\n    return _helpers.DOCUMENT_PATH_DELIMITER.join(parts)", "docstring": "Convert a path tuple into a full path string.\n\nOf the form:\n\n``projects/{project_id}/databases/{database_id}/...\ndocuments/{document_path}``\n\nArgs:\nclient (~.firestore_v1beta1.client.Client): The client that holds\nconfiguration details and a GAPIC client object.\npath (Tuple[str, ...]): The components in a document path.\n\nReturns:\nstr: The fully-qualified document path.", "source": "codesearchnet"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        if token_ids_1 is not None:\n            raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')\n        return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]\n    if token_ids_1 is not None:\n        return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]\n    return [1] + [0] * len(token_ids_0) + [1]", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_buffer = utils.BytearrayStream()\n    if self._query_functions:\n        for query_function in self._query_functions:\n            query_function.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The Query request payload is missing the query functions field.')\n    self.length = local_buffer.length()\n    super(QueryRequestPayload, self).write(output_buffer, kmip_version=kmip_version)\n    output_buffer.write(local_buffer.buffer)", "docstring": "Write the data encoding the QueryRequestPayload object to a stream.\n\nArgs:\noutput_buffer (Stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nInvalidField: Raised if the query functions are not defined.", "source": "codesearchnet"}
{"code": "def on_enter(__msg: Optional[Union[(Callable, str)]]=None) -> Callable:\n\n    def decorator(__func):\n\n        @wraps(__func)\n        def wrapper(*args, **kwargs):\n            if __msg:\n                print(__msg)\n            else:\n                print('Entering {!r}({!r})'.format(__func.__name__, __func))\n            return __func(*args, **kwargs)\n        return wrapper\n    if callable(__msg):\n        return on_enter()(__msg)\n    return decorator", "docstring": "Decorator to display a message when entering a function.\n\nArgs:\n__msg: Message to display\nReturns:\nWrapped function", "source": "codesearchnet"}
{"code": "def cv_score_mean(self, X, y):\n    (X, y) = self._format_inputs(X, y)\n    if self.problem_type.binary_classification:\n        kf = StratifiedKFold(shuffle=True, random_state=(RANDOM_STATE + 3))\n    elif self.problem_type.multi_classification:\n        self.target_type_transformer.inverse_transform(y)\n        transformer = self.target_type_transformer\n        kf = StratifiedKFoldMultiClassIndicator(transformer, shuffle=True, n_splits=3, random_state=(RANDOM_STATE + 3))\n    elif self.problem_type.regression:\n        kf = KFold(shuffle=True, n_splits=3, random_state=(RANDOM_STATE + 4))\n    else:\n        raise NotImplementedError\n    scoring = {scorer_info.name: scorer_info.scorer for scorer_info in self.scorers_info}\n    cv_results = cross_validate(self.estimator, X, y, scoring=scoring, cv=kf, return_train_score=False)\n    results = self._process_cv_results(cv_results)\n    return results", "docstring": "Compute mean score across cross validation folds.\n\nSplit data and labels into cross validation folds and fit the model for\neach fold. Then, for each scoring type in scorings, compute the score.\nFinally, average the scores across folds. Returns a dictionary mapping\nscoring to score.\n\nArgs:\nX (np.array): data\ny (np.array): labels\nscorings (List[str]): scoring types", "source": "codesearchnet"}
{"code": "def schedule(self, callback, *args, **kwargs):\n    self._executor.submit(callback, *args, **kwargs)", "docstring": "Schedule the callback to be called asynchronously in a thread pool.\n\nArgs:\ncallback (Callable): The function to call.\nargs: Positional arguments passed to the function.\nkwargs: Key-word arguments passed to the function.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def UpdateBudget(self, client_customer_id, budget_id, micro_amount, delivery_method):\n    self.client.SetClientCustomerId(client_customer_id)\n    operations = [{'operator': 'SET', 'operand': {'budgetId': budget_id, 'amount': {'microAmount': micro_amount}, 'deliveryMethod': delivery_method}}]\n    self.client.GetService('BudgetService').mutate(operations)", "docstring": "Update a Budget with the given budgetId.\n\nArgs:\nclient_customer_id: str Client Customer Id used to update Budget.\nbudget_id: str Id of the budget to be updated.\nmicro_amount: str New value for the microAmount field.\ndelivery_method: str New value for the deliveryMethod field.", "source": "codesearchnet"}
{"code": "def allconcat_ring(xs, devices, concat_axis):\n    n = len(xs)\n    if (n == 1):\n        return xs\n    parts = [[(xs[target] if (target == source) else None) for source in xrange(n)] for target in xrange(n)]\n    for distance in xrange(1, ((n \n        for target in xrange(n):\n            source = ((target + distance) % n)\n            if (parts[target][source] is None):\n                with tf.device(devices[target]):\n                    parts[target][source] = tf.identity(parts[((target + 1) % n)][source])\n            source = ((target - distance) % n)\n            if (parts[target][source] is None):\n                with tf.device(devices[target]):\n                    parts[target][source] = tf.identity(parts[((target - 1) % n)][source])\n    return mtf.parallel(devices, tf.concat, parts, axis=([concat_axis] * n))", "docstring": "Concatenate all Tensors everywhere.\n\nPerformance-optimized for a ring of devices.\n\nArgs:\nxs: a list of n tf.Tensors\ndevices: a list of n strings\nconcat_axis: an integer\n\nReturns:\na list of n Tensors", "source": "codesearchnet"}
{"code": "def __init__(self, cmd='gulp'):\n        \n        def is_exe(f):\n            return os.path.isfile(f) and os.access(f, os.X_OK)\n\n        fpath, fname = os.path.split(cmd)\n        if fpath:\n            if is_exe(cmd):\n                self._gulp_cmd = cmd\n                return\n        else:\n            for path in os.environ['PATH'].split(os.pathsep):\n                path = path.strip('\"')\n                file = os.path.join(path, cmd)\n                if is_exe(file):\n                    self._gulp_cmd = file\n                    return\n        raise GulpError(\"Executable not found\")", "docstring": "Initialize with the executable if not in the standard path\n\nArgs:\ncmd: Command. Defaults to gulp.", "source": "juraj-google-style"}
{"code": "def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(GetAttributeListResponsePayload, self).read(input_buffer, kmip_version=kmip_version)\n    local_buffer = utils.BytearrayStream(input_buffer.read(self.length))\n    if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_buffer):\n        self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)\n        self._unique_identifier.read(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidKmipEncoding('The GetAttributeList response payload encoding is missing the unique identifier.')\n    names = list()\n    if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n        while self.is_tag_next(enums.Tags.ATTRIBUTE_NAME, local_buffer):\n            name = primitives.TextString(tag=enums.Tags.ATTRIBUTE_NAME)\n            name.read(local_buffer, kmip_version=kmip_version)\n            names.append(name)\n        if (len(names) == 0):\n            raise exceptions.InvalidKmipEncoding('The GetAttributeList response payload encoding is missing the attribute names.')\n        self._attribute_names = names\n    else:\n        while self.is_tag_next(enums.Tags.ATTRIBUTE_REFERENCE, local_buffer):\n            if self.is_type_next(enums.Types.STRUCTURE, local_buffer):\n                reference = objects.AttributeReference()\n                reference.read(local_buffer, kmip_version=kmip_version)\n                names.append(primitives.TextString(value=reference.attribute_name, tag=enums.Tags.ATTRIBUTE_NAME))\n            elif self.is_type_next(enums.Types.ENUMERATION, local_buffer):\n                reference = primitives.Enumeration(enums.Tags, tag=enums.Tags.ATTRIBUTE_REFERENCE)\n                reference.read(local_buffer, kmip_version=kmip_version)\n                name = enums.convert_attribute_tag_to_name(reference.value)\n                names.append(primitives.TextString(value=name, tag=enums.Tags.ATTRIBUTE_NAME))\n            else:\n                raise exceptions.InvalidKmipEncoding('The GetAttributeList response payload encoding contains an invalid AttributeReference type.')\n        self._attribute_names = names\n    self.is_oversized(local_buffer)", "docstring": "Read the data encoding the GetAttributeList response payload and\ndecode it into its constituent parts.\n\nArgs:\ninput_buffer (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nInvalidKmipEncoding: Raised if the unique identifier or attribute\nnames are missing from the encoded payload.", "source": "codesearchnet"}
{"code": "def task_ids(self):\n    if (not self.id):\n        raise WorkflowError('Workflow is not running.  Cannot get task IDs.')\n    if self.batch_values:\n        raise NotImplementedError('Query Each Workflow Id within the Batch Workflow for task IDs.')\n    wf = self.workflow.get(self.id)\n    return [task['id'] for task in wf['tasks']]", "docstring": "Get the task IDs of a running workflow\n\nArgs:\nNone\n\nReturns:\nList of task IDs", "source": "codesearchnet"}
{"code": "def script_dir_plus_file(filename, pyobject, follow_symlinks=True):\n    return join(script_dir(pyobject, follow_symlinks), filename)", "docstring": "Get current script's directory and then append a filename\n\nArgs:\nfilename (str): Filename to append to directory path\npyobject (Any): Any Python object in the script\nfollow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.\n\nReturns:\nstr: Current script's directory and with filename appended", "source": "codesearchnet"}
{"code": "def remove_pardir_symbols(path, sep=os.sep, pardir=os.pardir):\n    bits = path.split(sep)\n    bits = (x for x in bits if (x != pardir))\n    return sep.join(bits)", "docstring": "Remove relative path symobls such as '..'\n\nArgs:\npath (str): A target path string\nsep (str): A strint to refer path delimiter (Default: `os.sep`)\npardir (str): A string to refer parent directory (Default: `os.pardir`)\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def which(cmd):\n\n    def is_exe(fp):\n        return (os.path.isfile(fp) and os.access(fp, os.X_OK))\n    (fpath, fname) = os.path.split(cmd)\n    if fpath:\n        if is_exe(cmd):\n            return cmd\n    else:\n        for path in os.environ['PATH'].split(os.pathsep):\n            exe_file = os.path.join(path, cmd)\n            if is_exe(exe_file):\n                return exe_file\n    return None", "docstring": "Returns full path to a executable.\n\nArgs:\ncmd (str): Executable command to search for.\n\nReturns:\n(str) Full path to command. None if it is not found.\n\nExample::\n\nfull_path_to_python = which(\"python\")", "source": "codesearchnet"}
{"code": "def update_container(self, container, blkio_weight=None, cpu_period=None, cpu_quota=None, cpu_shares=None, cpuset_cpus=None, cpuset_mems=None, mem_limit=None, mem_reservation=None, memswap_limit=None, kernel_memory=None, restart_policy=None):\n    url = self._url('/containers/{0}/update', container)\n    data = {}\n    if blkio_weight:\n        data['BlkioWeight'] = blkio_weight\n    if cpu_period:\n        data['CpuPeriod'] = cpu_period\n    if cpu_shares:\n        data['CpuShares'] = cpu_shares\n    if cpu_quota:\n        data['CpuQuota'] = cpu_quota\n    if cpuset_cpus:\n        data['CpusetCpus'] = cpuset_cpus\n    if cpuset_mems:\n        data['CpusetMems'] = cpuset_mems\n    if mem_limit:\n        data['Memory'] = utils.parse_bytes(mem_limit)\n    if mem_reservation:\n        data['MemoryReservation'] = utils.parse_bytes(mem_reservation)\n    if memswap_limit:\n        data['MemorySwap'] = utils.parse_bytes(memswap_limit)\n    if kernel_memory:\n        data['KernelMemory'] = utils.parse_bytes(kernel_memory)\n    if restart_policy:\n        if utils.version_lt(self._version, '1.23'):\n            raise errors.InvalidVersion('restart policy update is not supported for API version < 1.23')\n        data['RestartPolicy'] = restart_policy\n    res = self._post_json(url, data=data)\n    return self._result(res, True)", "docstring": "Update resource configs of one or more containers.\n\nArgs:\ncontainer (str): The container to inspect\nblkio_weight (int): Block IO (relative weight), between 10 and 1000\ncpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period\ncpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota\ncpu_shares (int): CPU shares (relative weight)\ncpuset_cpus (str): CPUs in which to allow execution\ncpuset_mems (str): MEMs in which to allow execution\nmem_limit (int or str): Memory limit\nmem_reservation (int or str): Memory soft limit\nmemswap_limit (int or str): Total memory (memory + swap), -1 to\ndisable swap\nkernel_memory (int or str): Kernel memory limit\nrestart_policy (dict): Restart policy dictionary\n\nReturns:\n(dict): Dictionary containing a ``Warnings`` key.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def __get_scope(cls, expr: Union[('Expression', Tuple)]) -> Set[str]:\n    scope = set()\n    for (i, atom) in enumerate(expr):\n        if isinstance(atom, Expression):\n            scope.update(cls.__get_scope(atom._expr))\n        elif (type(atom) in [tuple, list]):\n            scope.update(cls.__get_scope(atom))\n        elif (atom == 'pvar_expr'):\n            (functor, params) = expr[(i + 1)]\n            arity = (len(params) if (params is not None) else 0)\n            name = '{}/{}'.format(functor, arity)\n            scope.add(name)\n            break\n    return scope", "docstring": "Returns the set of fluents in the expression's scope.\n\nArgs:\nexpr: Expression object or nested tuple of Expressions.\n\nReturns:\nThe set of fluents in the expression's scope.", "source": "codesearchnet"}
{"code": "def _lease_owned(self, lease, current_uuid_path):\n        \n\n        prev_uuid_path, prev_uuid = lease.metadata\n\n        with open(current_uuid_path) as f:\n            current_uuid = f.read()\n\n        return \\\n            current_uuid_path == prev_uuid_path and \\\n            prev_uuid == current_uuid", "docstring": "Checks if the given lease is owned by the prefix whose uuid is in\nthe given path\n\nNote:\nThe prefix must be also in the same path it was when it took the\nlease\n\nArgs:\npath (str): Path to the lease\ncurrent_uuid_path (str): Path to the uuid to check ownership of\n\nReturns:\nbool: ``True`` if the given lease in owned by the prefix,\n``False`` otherwise", "source": "juraj-google-style"}
{"code": "def links(res: requests.models.Response,\n          search: str = None,\n          pattern: str = None) -> list:\n    \n    hrefs = [link.to_text() for link in find_all_links(res.text)]\n    if search:\n        hrefs = [href for href in hrefs if search in href]\n    if pattern:\n        hrefs = [href for href in hrefs if re.findall(pattern, href)]\n    return list(set(hrefs))", "docstring": "Get the links of the page.\n\nArgs:\nres (requests.models.Response): The response of the page.\nsearch (str, optional): Defaults to None. Search the links you want.\npattern (str, optional): Defaults to None. Search the links use a regex pattern.\n\nReturns:\nlist: All the links of the page.", "source": "juraj-google-style"}
{"code": "def _pycurl_post(self, url, json=None, data=None, username='', password='', headers={}, timeout=30):\n    response_headers = {}\n    curl = pycurl.Curl()\n    curl.setopt(curl.URL, url)\n    if (sys.version_info[0] >= 3):\n        stringbuffer = BytesIO()\n    else:\n        stringbuffer = StringIO()\n    curl.setopt(curl.WRITEDATA, stringbuffer)\n    headers['User-Agent'] = self.user_agent\n    if (sys.version_info[0] >= 3):\n        header_list = [('%s:%s' % (k, v)) for (k, v) in headers.items()]\n    else:\n        header_list = [('%s:%s' % (k, v)) for (k, v) in headers.iteritems()]\n    if json:\n        header_list.append('Content-Type:application/json')\n    curl.setopt(pycurl.HTTPHEADER, header_list)\n    raw_store = json\n    raw_request = (json_lib.dumps(json) if json else urlencode(data))\n    curl.setopt(curl.POSTFIELDS, raw_request)\n    if (username and password):\n        curl.setopt(curl.USERPWD, ('%s:%s' % (username, password)))\n    curl.setopt(curl.TIMEOUT, timeout)\n    curl.perform()\n    result = stringbuffer.getvalue()\n    status_code = curl.getinfo(curl.RESPONSE_CODE)\n    curl.close()\n    raw_request = raw_store\n    return (result, raw_request, status_code, response_headers)", "docstring": "This function will POST to the url endpoint using pycurl. returning\nan AdyenResult object on 200 HTTP responce. Either json or data has to\nbe provided. If username and password are provided, basic auth will be\nused.\n\n\nArgs:\nurl (str): url to send the POST\njson (dict, optional): Dict of the JSON to POST\ndata (dict, optional): Dict, presumed flat structure\nof key/value of request to place\nusername (str, optional): Username for basic auth. Must be included\nas part of password.\npassword (str, optional): Password for basic auth. Must be included\nas part of username.\nheaders (dict, optional): Key/Value pairs of headers to include\ntimeout (int, optional): Default 30. Timeout for the request.\n\nReturns:\nstr:    Raw response received\nstr:    Raw request placed\nint:    HTTP status code, eg 200,404,401\ndict:   Key/Value pairs of the headers received.", "source": "codesearchnet"}
{"code": "def load_pickle(file, encoding=None):\n    \n    \n    if encoding:\n        with open(file, 'rb') as f:\n            return pickle.load(f, encoding=encoding)\n\n    with open(file, 'rb') as f:\n       return pickle.load(f)", "docstring": "Load a pickle file.\n\nArgs:\nfile (str): Path to pickle file\n\nReturns:\nobject: Loaded object from pickle file", "source": "juraj-google-style"}
{"code": "def delete_contexts(self, context_id_list):\n        \n        for c_id in context_id_list:\n            if c_id in self._contexts:\n                del self._contexts[c_id]", "docstring": "Delete contexts from the ContextManager.\n\nArgs:\ncontext_id_list (list): a list of context ids\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _save_function_alias(saved_model_dir: str, tags: Collection[str], function_aliases: Mapping[str, str]) -> None:\n    loader = saved_model_loader.SavedModelLoader(saved_model_dir)\n    meta_graph_def = loader.get_meta_graph_def_from_tags(tags)\n    for function_name, function_alias in function_aliases.items():\n        meta_graph_def.meta_info_def.function_aliases[function_name] = function_alias\n    saved_model_proto_serialized = loader.saved_model.SerializeToString()\n    path = file_io.join(saved_model_dir, saved_model_constants.SAVED_MODEL_FILENAME_PB)\n    file_io.atomic_write_string_to_file(path, saved_model_proto_serialized)", "docstring": "Saves the function alias to the SavedModel.\n\nSavedModelBuilder (TF1 saved model saver) does not support saving function\naliases, so this function loads the SavedModel proto and adds the\n`function_aliases` field.\n\nArgs:\nsaved_model_dir: Path to the saved model directory.\ntags: A collection of tags to specify the meta graph.\nfunction_aliases: Function name -> function alias mapping.", "source": "github-repos"}
{"code": "def CheckEmptyBlockBody(filename, clean_lines, linenum, error):\n  \n\n  \n  \n  \n  \n  \n  \n  line = clean_lines.elided[linenum]\n  matched = Match(r'\\s*(for|while|if)\\s*\\(', line)\n  if matched:\n    \n    (end_line, end_linenum, end_pos) = CloseExpression(\n        clean_lines, linenum, line.find('('))\n\n    \n    \n    \n    if end_pos >= 0 and Match(r';', end_line[end_pos:]):\n      if matched.group(1) == 'if':\n        error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,\n              'Empty conditional bodies should use {}')\n      else:\n        error(filename, end_linenum, 'whitespace/empty_loop_body', 5,\n              'Empty loop bodies should use {} or continue')\n\n    \n    \n    if end_pos >= 0 and matched.group(1) == 'if':\n      \n      \n      opening_linenum = end_linenum\n      opening_line_fragment = end_line[end_pos:]\n      \n      while not Search(r'^\\s*\\{', opening_line_fragment):\n        if Search(r'^(?!\\s*$)', opening_line_fragment):\n          \n          return\n        opening_linenum += 1\n        if opening_linenum == len(clean_lines.elided):\n          \n          return\n        opening_line_fragment = clean_lines.elided[opening_linenum]\n      \n      opening_line = clean_lines.elided[opening_linenum]\n\n      \n      opening_pos = opening_line_fragment.find('{')\n      if opening_linenum == end_linenum:\n        \n        opening_pos += end_pos\n      (closing_line, closing_linenum, closing_pos) = CloseExpression(\n          clean_lines, opening_linenum, opening_pos)\n      if closing_pos < 0:\n        return\n\n      \n      \n      \n      if (clean_lines.raw_lines[opening_linenum] !=\n          CleanseComments(clean_lines.raw_lines[opening_linenum])):\n        \n        return\n      if closing_linenum > opening_linenum:\n        \n        bodylist = list(opening_line[opening_pos+1:])\n        \n        bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])\n        \n        bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1])\n        body = '\\n'.join(bodylist)\n      else:\n        \n        body = opening_line[opening_pos+1:closing_pos-1]\n\n      \n      if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):\n        return\n      \n      current_linenum = closing_linenum\n      current_line_fragment = closing_line[closing_pos:]\n      \n      while Search(r'^\\s*$|^(?=\\s*else)', current_line_fragment):\n        if Search(r'^(?=\\s*else)', current_line_fragment):\n          \n          return\n        current_linenum += 1\n        if current_linenum == len(clean_lines.elided):\n          break\n        current_line_fragment = clean_lines.elided[current_linenum]\n\n      \n      error(filename, end_linenum, 'whitespace/empty_if_body', 4,\n            ('If statement had no body and no else clause'))", "docstring": "Look for empty loop/conditional body with only a single semicolon.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def is_clockwise(vertices):\n    \n    it = iterator.consecutive(cycle(vertices), 3)\n    clockwise = 0\n    counter = 0\n    for _ in range(len(vertices)):\n        p0, p1, p2 = next(it)\n        cross = cross_product(p1, p2, p0)\n        int_angle = interior_angle(p0, p2, p1)  \n        if cross < 0:\n            clockwise += int_angle\n            counter += 2 * pi - int_angle\n        else:\n            clockwise += 2 * pi - int_angle\n            counter += int_angle\n    if round(clockwise / pi) == len(vertices) - 2:\n        return True\n    elif round(counter / pi) == len(vertices) - 2:\n        return False\n    else:\n        raise ValueError(\"the polygon is complex or overlapped\")", "docstring": "Evaluate whether vertices are in clockwise order.\nArgs:\nvertices: list of vertices (x, y) in polygon.\nReturns:\nTrue: clockwise, False: counter-clockwise\nRaises:\nValueError: the polygon is complex or overlapped.", "source": "juraj-google-style"}
{"code": "def make_edge_vectors(adjacency_matrix, num_edge_types, depth, name=None):\n    with tf.variable_scope(name, default_name='edge_vectors'):\n        att_adj_vectors_shape = [num_edge_types, depth]\n        adjacency_matrix_shape = common_layers.shape_list(adjacency_matrix)\n        adj_vectors = (tf.get_variable('adj_vectors', att_adj_vectors_shape, initializer=tf.random_normal_initializer(0, (depth ** (- 0.5)))) * (depth ** 0.5))\n        adjacency_matrix_one_hot = tf.one_hot(adjacency_matrix, num_edge_types)\n        att_adj_vectors = tf.matmul(tf.reshape(tf.to_float(adjacency_matrix_one_hot), [(- 1), num_edge_types]), adj_vectors)\n        return tf.reshape(att_adj_vectors, [adjacency_matrix_shape[0], adjacency_matrix_shape[1], adjacency_matrix_shape[2], depth])", "docstring": "Gets edge vectors for the edge types in the adjacency matrix.\n\nArgs:\nadjacency_matrix: A [batch, num_nodes, num_nodes] tensor of ints.\nnum_edge_types: Number of different edge types\ndepth: Number of channels\nname: a string\nReturns:\nA [batch, num_nodes, num_nodes, depth] vector of tensors", "source": "codesearchnet"}
{"code": "def ms_bot_framework(self) -> dict:\n    card_action = {}\n    card_action['type'] = 'postBack'\n    card_action['title'] = self.name\n    card_action['value'] = self.callback = self.callback\n    return card_action", "docstring": "Returns MS Bot Framework compatible state of the Button instance.\n\nCreates MS Bot Framework CardAction (button) with postBack value return.\n\nReturns:\ncontrol_json: MS Bot Framework representation of Button state.", "source": "codesearchnet"}
{"code": "def _combine_named_parameters(**kwargs) -> list[OrderedDict[str, Any]]:\n    sort_by_key = lambda k: k[0]\n    combinations: list[list[tuple[str, Any]]] = []\n    for key, values in sorted(kwargs.items(), key=sort_by_key):\n        if not isinstance(values, list):\n            values = [values]\n        combinations.append([(key, value) for value in values])\n    return [OrderedDict(result) for result in itertools.product(*combinations)]", "docstring": "Generate combinations based on its keyword arguments.\n\nTwo sets of returned combinations can be concatenated using +.  Their product\ncan be computed using `times()`.\n\nArgs:\n**kwargs: keyword arguments of form `option=[possibilities, ...]` or\n`option=the_only_possibility`.\n\nReturns:\na list of dictionaries for each combination. Keys in the dictionaries are\nthe keyword argument names.  Each key has one value - one of the\ncorresponding keyword argument values.", "source": "github-repos"}
{"code": "def copy_foreign_keys(self, event):\n        \n        event_keys = set(event._meta.fields.keys())\n        obj_keys = self._meta.fields.keys()\n        matching_keys = event_keys.intersection(obj_keys)\n\n        for key in matching_keys:\n            \n            \n            if key == 'created_by':\n                continue\n\n            \n            if not isinstance(self._meta.fields[key], peewee.ForeignKeyField):\n                continue\n\n            setattr(event, key, getattr(self, key))\n\n        \n        \n        \n        \n        \n        possible_key = self.__class__.__name__.lower()\n\n        if possible_key in event_keys and event.code != 'AUDIT_DELETE':\n            setattr(event, possible_key, self)", "docstring": "Copies possible foreign key values from the object into the Event,\nskipping common keys like modified and created.\n\nArgs:\nevent (Event): The Event instance to copy the FKs into\nobj (fleaker.db.Model): The object to pull the values from", "source": "juraj-google-style"}
{"code": "def _draw_breakpoint_icon(self, top, painter, icon_name):\n        \n        rect = QRect(0, top, self.sizeHint().width(),\n                     self.sizeHint().height())\n        try:\n            icon = self.icons[icon_name]\n        except KeyError as e:\n            debug_print(\"Breakpoint icon doen't exist, {}\".format(e))\n        else:\n            icon.paint(painter, rect)", "docstring": "Draw the given breakpoint pixmap.\n\nArgs:\ntop (int): top of the line to draw the breakpoint icon.\npainter (QPainter)\nicon_name (srt): key of icon to draw (see: self.icons)", "source": "juraj-google-style"}
{"code": "def MakePmfFromHist(hist, name=None):\n    if (name is None):\n        name = hist.name\n    d = dict(hist.GetDict())\n    pmf = Pmf(d, name)\n    pmf.Normalize()\n    return pmf", "docstring": "Makes a normalized PMF from a Hist object.\n\nArgs:\nhist: Hist object\nname: string name\n\nReturns:\nPmf object", "source": "codesearchnet"}
{"code": "def most_specific_common_supertype(self, others):\n    if not all((isinstance(other, TensorArraySpec) for other in others)):\n        return False\n    common_shape = self._element_shape.most_specific_common_supertype((other._element_shape for other in others))\n    if common_shape is None:\n        return None\n    if not all((self._dtype == other._dtype for other in others)):\n        return None\n    if not all((self._dynamic_size == other._dynamic_size for other in others)):\n        return None\n    infer_shape = self._infer_shape and all((other._infer_shape for other in others))\n    return TensorArraySpec(common_shape, self._dtype, self._dynamic_size, infer_shape)", "docstring": "Returns the most specific supertype of `self` and `others`.\n\nArgs:\nothers: A Sequence of `TypeSpec`.\n\nReturns `None` if a supertype does not exist.", "source": "github-repos"}
{"code": "def imag(self):\n\n    def im(val):\n        if hasattr(val, 'imag'):\n            return val.imag\n        elif hasattr(val, 'as_real_imag'):\n            return val.as_real_imag()[1]\n        elif hasattr(val, 'conjugate'):\n            return ((val.conjugate() - val) / (2 * I))\n        else:\n            raise NoConjugateMatrix(('Matrix entry %s contains has no defined conjugate' % str(val)))\n    return self.element_wise(im)", "docstring": "Element-wise imaginary part\n\nRaises:\nNoConjugateMatrix: if entries have no `conjugate` method and no\nother way to determine the imaginary part\n\nNote:\nA mathematically equivalent way to obtain an imaginary matrix from\na complex matrix ``M`` is::\n\n(M.conjugate() - M) / (I * 2)\n\nwith same same caveats as :attr:`real`.", "source": "codesearchnet"}
{"code": "def get(self):\n    return self._diff_median_tracker.get()", "docstring": "Retrieves the current MAD value.\n\nReturns:\nfloat: The MAD of the values within the defined window. Returns `NaN` if\nthe window is empty.", "source": "github-repos"}
{"code": "def convert_dict_to_compatible_tensor(values, targets):\n  \n  result = {}\n  for key, value in sorted(values.items()):\n    result[key] = _convert_to_compatible_tensor(\n        value, targets[key], error_prefix=\"Can't convert %r\" % key)\n  return result", "docstring": "Converts dict `values` in tensors that are compatible with `targets`.\n\nArgs:\nvalues: A dict to objects to convert with same keys as `targets`.\ntargets: A dict returned by `parse_tensor_info_map`.\n\nReturns:\nA map with the same keys as `values` but values converted into\nTensor/SparseTensors that can be fed into `protomap`.\n\nRaises:\nTypeError: If it fails to convert.", "source": "juraj-google-style"}
{"code": "def pad_image(self, image: 'torch.Tensor', size: SizeDict, random_padding: bool=False) -> 'torch.Tensor':\n    output_height, output_width = (size.height, size.width)\n    input_height, input_width = image.shape[-2:]\n    delta_width = output_width - input_width\n    delta_height = output_height - input_height\n    if random_padding:\n        pad_top = torch.random.randint(low=0, high=delta_height + 1)\n        pad_left = torch.random.randint(low=0, high=delta_width + 1)\n    else:\n        pad_top = delta_height \n        pad_left = delta_width \n    pad_bottom = delta_height - pad_top\n    pad_right = delta_width - pad_left\n    padding = (pad_left, pad_top, pad_right, pad_bottom)\n    return F.pad(image, padding)", "docstring": "Pad the image to the specified size.\n\nArgs:\nimage (`torch.Tensor`):\nThe image to be padded.\nsize (`Dict[str, int]`):\nThe size `{\"height\": h, \"width\": w}` to pad the image to.\nrandom_padding (`bool`, *optional*, defaults to `False`):\nWhether to use random padding or not.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe data format of the output image. If unset, the same format as the input image is used.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def resolve_variables(self, provided_variables):\n        \n        \n        self.resolved_variables = {}\n        variable_dict = dict((var.name, var) for var in provided_variables)\n        for var_name, _var_def in variable_dict.items():\n            value = resolve_variable(\n                variable_dict.get(var_name),\n                self.name\n            )\n            if value is not None:\n                self.resolved_variables[var_name] = value\n\n        \n        \n        defined_variables = self.get_parameter_definitions()\n        self.resolved_variables = {}\n        variable_dict = dict((var.name, var) for var in provided_variables)\n        for var_name, _var_def in defined_variables.items():\n            value = resolve_variable(\n                variable_dict.get(var_name),\n                self.name\n            )\n            if value is not None:\n                self.resolved_variables[var_name] = value", "docstring": "Resolve the values of the blueprint variables.\n\nThis will resolve the values of the template parameters with values\nfrom the env file, the config, and any lookups resolved. The\nresolution is run twice, in case the blueprint is jinja2 templated\nand requires provided variables to render.\n\nArgs:\nprovided_variables (list of :class:`stacker.variables.Variable`):\nlist of provided variables", "source": "juraj-google-style"}
{"code": "def _Extract(\n      self, source_path_specs, destination_path, output_writer,\n      skip_duplicates=True):\n    \n    output_writer.Write('Extracting file entries.\\n')\n    path_spec_generator = self._path_spec_extractor.ExtractPathSpecs(\n        source_path_specs, resolver_context=self._resolver_context)\n\n    for path_spec in path_spec_generator:\n      self._ExtractFileEntry(\n          path_spec, destination_path, output_writer,\n          skip_duplicates=skip_duplicates)", "docstring": "Extracts files.\n\nArgs:\nsource_path_specs (list[dfvfs.PathSpec]): path specifications to extract.\ndestination_path (str): path where the extracted files should be stored.\noutput_writer (CLIOutputWriter): output writer.\nskip_duplicates (Optional[bool]): True if files with duplicate content\nshould be skipped.", "source": "juraj-google-style"}
{"code": "def reset(self, indices, observations):\n    assert isinstance(indices, np.ndarray)\n    assert (len(indices.shape) == 1)\n    assert isinstance(observations, np.ndarray)\n    assert (indices.shape[0] == observations.shape[0])\n    for (index, observation) in zip(indices, observations):\n        trajectory = self._trajectories[index]\n        if (not trajectory.is_active):\n            trajectory.add_time_step(observation=observation)\n            continue\n        self._complete_trajectory(trajectory, index)\n        self._trajectories[index].add_time_step(observation=observation)", "docstring": "Resets trajectories at given indices and populates observations.\n\nReset can either be called right at the beginning, when there are no\ntime-steps, or to reset a currently active trajectory.\n\nIf resetting a currently active trajectory then we save it in\nself._completed_trajectories.\n\nArgs:\nindices: 1-D np.ndarray stating the indices to reset.\nobservations: np.ndarray of shape (indices len, obs.shape) of observations", "source": "codesearchnet"}
{"code": "def find_yang_file(profile, filename, path):\n    module_dir = os.path.dirname(__file__)\n    full_path = os.path.join(module_dir, 'mappings', profile, path, filename)\n    if os.path.exists(full_path):\n        return full_path\n    else:\n        msg = \"Couldn't find parsing file: {}\".format(full_path)\n        logger.error(msg)\n        raise IOError(msg)", "docstring": "Find the necessary file for the given test case.\n\nArgs:\ndevice(napalm device connection): for which device\nfilename(str): file to find\npath(str): where to find it relative to where the module is installed", "source": "codesearchnet"}
{"code": "def _ConvertFieldValuePair(js, message):\n  \n  names = []\n  message_descriptor = message.DESCRIPTOR\n  for name in js:\n    try:\n      field = message_descriptor.fields_by_camelcase_name.get(name, None)\n      if not field:\n        raise ParseError(\n            'Message type \"{0}\" has no field named \"{1}\".'.format(\n                message_descriptor.full_name, name))\n      if name in names:\n        raise ParseError(\n            'Message type \"{0}\" should not have multiple \"{1}\" fields.'.format(\n                message.DESCRIPTOR.full_name, name))\n      names.append(name)\n      \n      if field.containing_oneof is not None:\n        oneof_name = field.containing_oneof.name\n        if oneof_name in names:\n          raise ParseError('Message type \"{0}\" should not have multiple \"{1}\" '\n                           'oneof fields.'.format(\n                               message.DESCRIPTOR.full_name, oneof_name))\n        names.append(oneof_name)\n\n      value = js[name]\n      if value is None:\n        message.ClearField(field.name)\n        continue\n\n      \n      if _IsMapEntry(field):\n        message.ClearField(field.name)\n        _ConvertMapFieldValue(value, message, field)\n      elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n        message.ClearField(field.name)\n        if not isinstance(value, list):\n          raise ParseError('repeated field {0} must be in [] which is '\n                           '{1}.'.format(name, value))\n        if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:\n          \n          for item in value:\n            sub_message = getattr(message, field.name).add()\n            \n            if (item is None and\n                sub_message.DESCRIPTOR.full_name != 'google.protobuf.Value'):\n              raise ParseError('null is not allowed to be used as an element'\n                               ' in a repeated field.')\n            _ConvertMessage(item, sub_message)\n        else:\n          \n          for item in value:\n            if item is None:\n              raise ParseError('null is not allowed to be used as an element'\n                               ' in a repeated field.')\n            getattr(message, field.name).append(\n                _ConvertScalarFieldValue(item, field))\n      elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:\n        sub_message = getattr(message, field.name)\n        _ConvertMessage(value, sub_message)\n      else:\n        setattr(message, field.name, _ConvertScalarFieldValue(value, field))\n    except ParseError as e:\n      if field and field.containing_oneof is None:\n        raise ParseError('Failed to parse {0} field: {1}'.format(name, e))\n      else:\n        raise ParseError(str(e))\n    except ValueError as e:\n      raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))\n    except TypeError as e:\n      raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))", "docstring": "Convert field value pairs into regular message.\n\nArgs:\njs: A JSON object to convert the field value pairs.\nmessage: A regular protocol message to record the data.\n\nRaises:\nParseError: In case of problems converting.", "source": "juraj-google-style"}
{"code": "def resolve_workdir_path(cls, start_path=os.curdir):\n        \n        if start_path == 'auto':\n            start_path = os.curdir\n\n        cur_path = start_path\n\n        LOGGER.debug(\n            'Checking if %s is a workdir',\n            os.path.abspath(cur_path),\n        )\n        if cls.is_workdir(cur_path):\n            return os.path.abspath(cur_path)\n\n        \n        cur_path = os.path.join(start_path, '.lago')\n        while not cls.is_workdir(cur_path):\n            LOGGER.debug('%s is not a workdir', cur_path)\n            cur_path = os.path.normpath(\n                os.path.join(cur_path, '..', '..', '.lago')\n            )\n            LOGGER.debug('Checking %s for a workdir', cur_path)\n            if os.path.realpath(os.path.join(cur_path, '..')) == '/':\n                \n                \n                candidates = []\n                for path in os.listdir(os.curdir):\n                    if os.path.isdir(path):\n                        dirs = os.listdir(path)\n                        if 'current' in dirs:\n                            candidates.append(\n                                os.path.abspath(os.path.join(os.curdir, path))\n                            )\n                        elif '.lago' in dirs:\n                            candidates.append(\n                                os.path.abspath(\n                                    os.path.join(os.curdir, path, '.lago')\n                                )\n                            )\n                candidates = filter(Workdir.is_possible_workdir, candidates)\n                for idx in range(len(candidates)):\n                    if os.path.split(candidates[idx])[1] == '.lago':\n                        candidates[idx] = os.path.dirname(candidates[idx])\n\n                msg = 'Unable to find workdir in {0}'.format(\n                    os.path.abspath(start_path)\n                )\n                if candidates:\n                    msg += '\\nFound possible workdirs in: {0}'.format(\n                        ', '.join(candidates)\n                    )\n                raise LagoUserException(msg)\n\n        return os.path.abspath(cur_path)", "docstring": "Look for an existing workdir in the given path, in a path/.lago dir,\nor in a .lago dir under any of it's parent directories\n\nArgs:\nstart_path (str): path to start the search from, if None passed, it\nwill use the current dir\n\nReturns:\nstr: path to the found prefix\n\nRaises:\nLagoUserException: if no prefix was found", "source": "juraj-google-style"}
{"code": "def save_image(byteio, imgfmt):\n    from os import path, mkdir\n    ptdir = '{}.{}'.format(project, task)\n    uuid = str(uuid4())\n    idir = path.join(dbdir, ptdir)\n    if (not path.isdir(idir)):\n        mkdir(idir)\n    ipath = path.join(idir, '{}.{}'.format(uuid, imgfmt))\n    with open(ipath, 'wb') as f:\n        f.write(byteio)\n    return uuid", "docstring": "Saves the specified image to disk.\n\nArgs:\nbyteio (bytes): image bytes to save to disk.\nimgfmt (str): used as the extension of the saved file.\n\nReturns:\nstr: a uuid for the saved image that can be added to the database entry.", "source": "codesearchnet"}
{"code": "def get_service_credentials(pipeline_options):\n    return _Credentials.get_service_credentials(pipeline_options)", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\nGet credentials to access Azure services.\nArgs:\npipeline_options: Pipeline options, used in creating credentials\nlike managed identity credentials.\n\nReturns:\nA ``azure.identity.*Credential`` object or None if credentials\nnot found. Returned object is thread-safe.", "source": "github-repos"}
{"code": "def filepath(self):\n    if hasattr(self, 'local_path'):\n        return self.local_path\n    if (self.scheme in ['ftp', 'http', 'https', 'globus']):\n        return self.filename\n    elif (self.scheme in ['file']):\n        return self.path\n    else:\n        raise Exception('Cannot return filepath for unknown scheme {}'.format(self.scheme))", "docstring": "Return the resolved filepath on the side where it is called from.\n\nThe appropriate filepath will be returned when called from within\nan app running remotely as well as regular python on the client side.\n\nArgs:\n- self\nReturns:\n- filepath (string)", "source": "codesearchnet"}
{"code": "def rules(self):\n    list_of_rules = []\n    for main_row in self.dict_rules:\n        if ('rules' in main_row):\n            for rule_row in main_row['rules']:\n                if ('grants' in rule_row):\n                    for grant_row in rule_row['grants']:\n                        if ('group_id' in grant_row):\n                            group_id = grant_row['group_id']\n                            if ('name' in grant_row):\n                                row_name = grant_row['name']\n                            else:\n                                row_name = None\n                            fr = FirewallRule(main_row['id'], main_row['name'], main_row['description'], rules_direction=rule_row['direction'], rules_ip_protocol=rule_row['ip_protocol'], rules_from_port=rule_row['from_port'], rules_to_port=rule_row['to_port'], rules_grants_group_id=group_id, rules_grants_name=row_name, rules_description=grant_row['description'])\n                            list_of_rules.append(fr)\n                        elif ('cidr_ip' in grant_row):\n                            fr = FirewallRule(main_row['id'], main_row['name'], main_row['description'], rules_direction=rule_row['direction'], rules_ip_protocol=rule_row['ip_protocol'], rules_from_port=rule_row['from_port'], rules_to_port=rule_row['to_port'], rules_grants_cidr_ip=grant_row['cidr_ip'], rules_description=grant_row['description'])\n                            list_of_rules.append(fr)\n                        else:\n                            raise ValueError('Unsupported grant:', grant_row)\n                else:\n                    fr = FirewallRule(main_row['id'], main_row['name'], main_row['description'], rules_direction=rule_row['direction'], rules_ip_protocol=rule_row['ip_protocol'], rules_from_port=rule_row['from_port'], rules_to_port=rule_row['to_port'])\n                    list_of_rules.append(fr)\n        else:\n            fr = FirewallRule(main_row['id'], main_row['name'], main_row['description'])\n            list_of_rules.append(fr)\n    sorted_list = sorted(list_of_rules, key=(lambda fr: (str(fr.id), str(fr.name), str(fr.description), str(fr.rules_direction), str(fr.rules_ip_protocol), str(fr.rules_from_port), str(fr.rules_to_port), str(fr.rules_grants_group_id), str(fr.rules_grants_name), str(fr.rules_grants_cidr_ip))))\n    return sorted_list", "docstring": "Returns a sorted list of firewall rules.\n\nReturns:\nlist", "source": "codesearchnet"}
{"code": "def metadata(self, path):\n    try:\n        file_metadata = self._gcsIO()._status(path)\n        return FileMetadata(path, file_metadata['size'], file_metadata['updated'])\n    except Exception as e:\n        raise BeamIOError('Metadata operation failed', {path: e})", "docstring": "Fetch metadata fields of a file on the FileSystem.\n\nArgs:\npath: string path of a file.\n\nReturns:\n:class:`~apache_beam.io.filesystem.FileMetadata`.\n\nRaises:\n``BeamIOError``: if path isn't a file or doesn't exist.", "source": "github-repos"}
{"code": "def get_existing_test_names(self):\n    test_names = []\n    for name, _ in inspect.getmembers(type(self), callable):\n        if name.startswith('test_'):\n            test_names.append(name)\n    return test_names + list(self._generated_test_table.keys())", "docstring": "Gets the names of existing tests in the class.\n\nA method in the class is considered a test if its name starts with\n'test_*'.\n\nNote this only gets the names of tests that already exist. If\n`generate_tests` has not happened when this was called, the\ngenerated tests won't be listed.\n\nReturns:\nA list of strings, each is a test method name.", "source": "github-repos"}
{"code": "def save_lines(lines, filename):\n    with open(filename, 'w', encoding='utf-8') as f:\n        f.write('\\n'.join(lines))", "docstring": "Save an array of lines to a file.\n\nArgs:\nlines: An array of strings that will be saved as individual lines.\nfilename: Path to the output file.", "source": "codesearchnet"}
{"code": "def _ParseValueData(self, knowledge_base, value_data):\n    \n    if not isinstance(value_data, py2to3.UNICODE_TYPE):\n      raise errors.PreProcessFail(\n          'Unsupported Windows Registry value type: {0:s} for '\n          'artifact: {1:s}.'.format(\n              type(value_data), self.ARTIFACT_DEFINITION_NAME))\n\n    if not knowledge_base.GetValue('operating_system_product'):\n      knowledge_base.SetValue('operating_system_product', value_data)", "docstring": "Parses Windows Registry value data for a preprocessing attribute.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nvalue_data (object): Windows Registry value data.\n\nRaises:\nerrors.PreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def sequential_spherical(xyz):\n    \n    d_xyz = np.diff(xyz,axis=0)\n    \n    r = np.linalg.norm(d_xyz,axis=1)\n    theta = np.arctan2(d_xyz[:,1], d_xyz[:,0])\n    hyp = d_xyz[:,0]**2 + d_xyz[:,1]**2\n    phi = np.arctan2(np.sqrt(hyp), d_xyz[:,2])\n    \n    return (r,theta,phi)", "docstring": "Converts sequence of cartesian coordinates into a sequence of\nline segments defined by spherical coordinates.\n\nArgs:\nxyz = 2d numpy array, each row specifies a point in\ncartesian coordinates (x,y,z) tracing out a\npath in 3D space.\n\nReturns:\nr = lengths of each line segment (1D array)\ntheta = angles of line segments in XY plane (1D array)\nphi = angles of line segments down from Z axis (1D array)", "source": "juraj-google-style"}
{"code": "def simplify(self, eps, max_dist_error, max_speed_error, topology_only=False):\n        \n        if topology_only:\n            self.points = drp(self.points, eps)\n        else:\n            self.points = spt(self.points, max_dist_error, max_speed_error)\n        return self", "docstring": "In-place segment simplification\n\nSee `drp` and `compression` modules\n\nArgs:\neps (float): Distance threshold for the `drp` function\nmax_dist_error (float): Max distance error, in meters\nmax_speed_error (float): Max speed error, in km/h\ntopology_only (bool, optional): True to only keep topology, not considering\ntimes when simplifying. Defaults to False.\nReturns:\n:obj:`Segment`", "source": "juraj-google-style"}
{"code": "def simple_lmdb_settings(path, map_size=1000000000.0, user_supplied_id=False):\n\n    def decorator(cls):\n        provider = (ff.UserSpecifiedIdProvider(key='_id') if user_supplied_id else ff.UuidProvider())\n\n        class Settings(ff.PersistenceSettings):\n            id_provider = provider\n            key_builder = ff.StringDelimitedKeyBuilder('|')\n            database = ff.LmdbDatabase(path, key_builder=key_builder, map_size=map_size)\n\n        class Model(cls, Settings):\n            pass\n        Model.__name__ = cls.__name__\n        Model.__module__ = cls.__module__\n        return Model\n    return decorator", "docstring": "Creates a decorator that can be used to configure sane default LMDB\npersistence settings for a model\n\nArgs:\npath (str): The path where the LMDB database files will be created\nmap_size (int): The amount of space to allot for the database", "source": "codesearchnet"}
{"code": "def jwt_is_expired(self, access_token=None, leeway=0):\n        \n        if access_token is not None:\n            exp = self._decode_exp(access_token)\n        else:\n            exp = self.jwt_exp\n        now = time()\n        if exp < (now - leeway):\n            return True\n        return False", "docstring": "Validate JWT access token expiration.\n\nArgs:\naccess_token (str): Access token to validate. Defaults to ``None``.\nleeway (float): Time in seconds to adjust for local clock skew. Defaults to 0.\n\nReturns:\nbool: ``True`` if expired, otherwise ``False``.", "source": "juraj-google-style"}
{"code": "def convert(cls, content, input_format, output_format):\n    assert (input_format in ('srt', 'sjson'))\n    assert (output_format in ('srt', 'sjson'))\n    content = content.decode('utf-8-sig')\n    if (input_format == output_format):\n        return content\n    if (input_format == 'srt'):\n        if (output_format == 'sjson'):\n            try:\n                srt_subs = SubRipFile.from_string(content, error_handling=SubRipFile.ERROR_RAISE)\n            except Error as ex:\n                raise TranscriptsGenerationException(text_type(ex))\n            return json.dumps(cls.generate_sjson_from_srt(srt_subs))\n    if (input_format == 'sjson'):\n        if (output_format == 'srt'):\n            return cls.generate_srt_from_sjson(json.loads(content))", "docstring": "Convert transcript `content` from `input_format` to `output_format`.\n\nArguments:\ncontent: Transcript content byte-stream.\ninput_format: Input transcript format.\noutput_format: Output transcript format.\n\nAccepted input formats: sjson, srt.\nAccepted output format: srt, sjson.\n\nRaises:\nTranscriptsGenerationException: On parsing the invalid srt\ncontent during conversion from srt to sjson.", "source": "codesearchnet"}
{"code": "def optimize(node):\n  \n  node = dead_code_elimination(node)\n  node = constant_folding(node)\n  node = assignment_propagation(node)\n  return node", "docstring": "Perform a series of optimization passes.\n\nThis function performs a series of optimizations (dead code elimination,\nconstant folding, variable folding) on the given AST.\nIt optimizes the code repeatedly until reaching a fixed point. The fixed\npoint is determine roughly by checking whether the number of lines of\ngenerated source code changed after the latest pass.\n\nArgs:\nnode: The AST to optimize.\nReturns:\nThe optimized AST.", "source": "juraj-google-style"}
{"code": "def _add_genotype_calls(self, variant_obj, variant_line, case_obj):\n    variant_line = variant_line.split('\\t')\n    if (len(variant_line) > 8):\n        gt_format = variant_line[8].split(':')\n        for individual in case_obj.individuals:\n            sample_id = individual.ind_id\n            index = individual.ind_index\n            gt_call = variant_line[(9 + index)].split(':')\n            raw_call = dict(zip(gt_format, gt_call))\n            genotype = Genotype(**raw_call)\n            variant_obj.add_individual(puzzle_genotype(sample_id=sample_id, genotype=genotype.genotype, case_id=case_obj.name, phenotype=individual.phenotype, ref_depth=genotype.ref_depth, alt_depth=genotype.alt_depth, genotype_quality=genotype.genotype_quality, depth=genotype.depth_of_coverage, supporting_evidence=genotype.supporting_evidence, pe_support=genotype.pe_support, sr_support=genotype.sr_support))", "docstring": "Add the genotype calls for the variant\n\nArgs:\nvariant_obj (puzzle.models.Variant)\nvariant_dict (dict): A variant dictionary\ncase_obj (puzzle.models.Case)", "source": "codesearchnet"}
{"code": "def reset_state(self, reset_state):\n    if isinstance(reset_state, int):\n        self._pool.map(_reset_state, self._shard_num_args({'reset_state': reset_state}))\n    elif isinstance(reset_state, np.ndarray):\n        sim.validate_normalized_state(reset_state, self._num_qubits)\n        args = []\n        for kwargs in self._shard_num_args():\n            shard_num = kwargs['shard_num']\n            shard_size = (1 << kwargs['num_shard_qubits'])\n            start = (shard_num * shard_size)\n            end = (start + shard_size)\n            kwargs['reset_state'] = reset_state[start:end]\n            args.append(kwargs)\n        self._pool.map(_reset_state, args)", "docstring": "Reset the state to the given initial state.\n\nArgs:\nreset_state: If this is an int, then this is the state to reset\nthe stepper to, expressed as an integer of the computational\nbasis. Integer to bitwise indices is little endian. Otherwise\nif this is a np.ndarray this must be the correct size, be\nnormalized (L2 norm of 1), and have dtype of np.complex64.\n\nRaises:\nValueError if the state is incorrectly sized or not of the correct\ndtype.", "source": "codesearchnet"}
{"code": "def assertAllCloseAccordingToType(self, a, b, rtol=1e-06, atol=1e-06, float_rtol=1e-06, float_atol=1e-06, half_rtol=0.001, half_atol=0.001, bfloat16_rtol=0.01, bfloat16_atol=0.01, msg=None):\n    a, b = self.evaluate_if_both_tensors(a, b)\n    a = self._GetNdArray(a)\n    b = self._GetNdArray(b)\n    if a.dtype == np.float32 or b.dtype == np.float32 or a.dtype == np.complex64 or (b.dtype == np.complex64):\n        rtol = max(rtol, float_rtol)\n        atol = max(atol, float_atol)\n    if a.dtype == np.float16 or b.dtype == np.float16:\n        rtol = max(rtol, half_rtol)\n        atol = max(atol, half_atol)\n    if a.dtype == dtypes.bfloat16.as_numpy_dtype or b.dtype == dtypes.bfloat16.as_numpy_dtype:\n        rtol = max(rtol, bfloat16_rtol)\n        atol = max(atol, bfloat16_atol)\n    self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)", "docstring": "Like assertAllClose, but also suitable for comparing fp16 arrays.\n\nIn particular, the tolerance is reduced to 1e-3 if at least\none of the arguments is of type float16.\n\nArgs:\na: the expected numpy ndarray or anything can be converted to one.\nb: the actual numpy ndarray or anything can be converted to one.\nrtol: relative tolerance.\natol: absolute tolerance.\nfloat_rtol: relative tolerance for float32.\nfloat_atol: absolute tolerance for float32.\nhalf_rtol: relative tolerance for float16.\nhalf_atol: absolute tolerance for float16.\nbfloat16_rtol: relative tolerance for bfloat16.\nbfloat16_atol: absolute tolerance for bfloat16.\nmsg: Optional message to report on failure.", "source": "github-repos"}
{"code": "def _cancel_http(api_request, operation_name):\n    \n    path = \"operations/{}:cancel\".format(operation_name)\n    api_request(method=\"POST\", path=path)", "docstring": "Cancel an operation using a JSON/HTTP client.\n\nArgs:\napi_request (Callable): A callable used to make an API request. This\nshould generally be\n:meth:`google.cloud._http.Connection.api_request`.\noperation_name (str): The name of the operation.", "source": "juraj-google-style"}
{"code": "def embedding_lookup(params, ids: ragged_tensor.Ragged, partition_strategy='mod', name=None, validate_indices=True, max_norm=None):\n    if params is None:\n        raise ValueError('params must be specified.')\n    if isinstance(params, (list, tuple)) and (not params):\n        raise ValueError('params should not be empty.')\n    if ids.dtype != dtypes.int32 and ids.dtype != dtypes.int64:\n        raise ValueError(f'The values contained by the inputs have type {str(ids.dtype)} and cannot be processed. All values should be indices, either of type `int32` or `int64`.')\n    with ops.name_scope(name, 'embedding_lookup_ragged') as name:\n        looked_up_ragged = ragged_functional_ops.map_flat_values(embedding_ops.embedding_lookup, params=params, ids=ids, partition_strategy=partition_strategy, max_norm=max_norm)\n        return looked_up_ragged", "docstring": "Look up the ragged ids in a list of embedding tensors.\n\nArgs:\nparams: A tensor representing the complete embedding tensor having the shape\n[e1, ...eM]\nragged_ids: A 'RaggedTensor' with type 'int32' or 'int64' containing the ids\nto be looked up in 'params' of shape [r0, ..rN]. Values must be in the\nrange '[0, params.shape[0]]'.\npartition_strategy: A string specifying the partitioning strategy.\nmax_norm: If not `None`, each embedding is clipped if its l2-norm is larger\nthan this value.\nname: A name for the operation (optional)\n\nReturns:\nA ragged tensor of shape [r0, r1, ...rN, e1, ...eM].\n\nRaises:\nValueError: When params is empty or the type of the ids is not int32 or\nint64.", "source": "github-repos"}
{"code": "def _flatten_subsection(subsection, _type, offset, parent):\n    \n    for row in subsection:\n        \n        if row in ('Low', 'Generated', 'High', ):\n            continue\n        elif isinstance(row[0], StringType):\n            if len(row) in (4, 5, ):\n                if len(row) == 5:\n                    assert row[4][0] == 'S', \\\n                        'Only known usage of a fifth member is Sn, found: %s' % row[4][0]\n                yield (float(row[0]), float(row[1]), float(row[2]), float(row[3]) / 2.,\n                       _type, offset, parent)\n                parent = offset\n                offset += 1\n        elif isinstance(row[0], list):\n            split_parent = offset - 1\n            start_offset = 0\n\n            slices = []\n            start = 0\n            for i, value in enumerate(row):\n                if value == '|':\n                    slices.append(slice(start + start_offset, i))\n                    start = i + 1\n            slices.append(slice(start + start_offset, len(row)))\n\n            for split_slice in slices:\n                for _row in _flatten_subsection(row[split_slice], _type, offset,\n                                                split_parent):\n                    offset += 1\n                    yield _row", "docstring": "Flatten a subsection from its nested version\n\nArgs:\nsubsection: Nested subsection as produced by _parse_section, except one level in\n_type: type of section, ie: AXON, etc\nparent: first element has this as it's parent\noffset: position in the final array of the first element\n\nReturns:\nGenerator of values corresponding to [X, Y, Z, R, TYPE, ID, PARENT_ID]", "source": "juraj-google-style"}
{"code": "def tv_credits(self, **kwargs):\n    path = self._get_id_path('tv_credits')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the TV credits for a specific person id.\n\nArgs:\nlanguage: (optional) ISO 639-1 code.\nappend_to_response: (optional) Comma separated, any person method.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def _method_url(self, method_name):\n        \n        return \"{base_url}/api/{api}/{method}\".format(\n            base_url=self._base_url(),\n            api=self.api_version,\n            method=method_name\n        )", "docstring": "Generate the URL for the requested method\n\nArgs:\nmethod_name (str): Name of the method\n\nReturns:\nA string containing the URL of the method", "source": "juraj-google-style"}
{"code": "def add_to_queue(self, queueable_item, position=0, as_next=False):\n    metadata = to_didl_string(queueable_item)\n    response = self.avTransport.AddURIToQueue([('InstanceID', 0), ('EnqueuedURI', queueable_item.resources[0].uri), ('EnqueuedURIMetaData', metadata), ('DesiredFirstTrackNumberEnqueued', position), ('EnqueueAsNext', int(as_next))])\n    qnumber = response['FirstTrackNumberEnqueued']\n    return int(qnumber)", "docstring": "Add a queueable item to the queue.\n\nArgs:\nqueueable_item (DidlObject or MusicServiceItem): The item to be\nadded to the queue\nposition (int): The index (1-based) at which the URI should be\nadded. Default is 0 (add URI at the end of the queue).\nas_next (bool): Whether this URI should be played as the next\ntrack in shuffle mode. This only works if `play_mode=SHUFFLE`.\n\nReturns:\nint: The index of the new item in the queue.", "source": "codesearchnet"}
{"code": "def tokenize(self, vector_list):\n        \n        if self.computable_distance is None:\n            self.computable_distance = EuclidDistance()\n        vector_arr = np.array(vector_list)\n        distance_arr = np.empty_like(vector_arr)\n        feature_arr = self.__dbm.get_feature_point(layer_number=0)\n        key_arr = np.empty(vector_arr.shape[0], dtype=int)\n        for i in range(vector_arr.shape[0]):\n            distance_arr = self.computable_distance.compute(\n                np.expand_dims(vector_arr[i], axis=0).repeat(feature_arr.shape[0], axis=0),\n                feature_arr\n            )\n            key_arr[i] = distance_arr.argmin(axis=0)\n        return self.token_arr[key_arr]", "docstring": "Tokenize vector.\n\nArgs:\nvector_list:    The list of vector of one token.\n\nReturns:\ntoken", "source": "juraj-google-style"}
{"code": "def _get_authorization_headers(self, context):\n    headers = {}\n    self._credentials.before_request(self._request, context.method_name, context.service_url, headers)\n    return list(six.iteritems(headers))", "docstring": "Gets the authorization headers for a request.\n\nReturns:\nSequence[Tuple[str, str]]: A list of request headers (key, value)\nto add to the request.", "source": "codesearchnet"}
{"code": "def _extract_response_chunks(self, all_responses, response_chunks, api_name):\n        \n        for response_chunk in response_chunks:\n            if not isinstance(response_chunk, list):\n                response_chunk = [response_chunk]\n            for response in response_chunk:\n                if not response:\n                    continue\n\n                if self._cache:\n                    self._cache.cache_value(api_name, response['resource'], response)\n                all_responses[response['resource']] = response", "docstring": "Extracts and caches the responses from the response chunks in case\nof the responses for the requests containing multiple concatenated\nresources. Extracted responses are added to the already cached\nresponses passed in the all_responses parameter.\n\nArgs:\nall_responses: a list containing already cached responses.\nresponse_chunks: a list with response chunks.\napi_name: a string name of the API.", "source": "juraj-google-style"}
{"code": "def write(self, name, **data):\n    data['name'] = name\n    if (not ('timestamp' in data)):\n        data['timestamp'] = datetime.utcnow()\n    try:\n        self.client.index(index=self.get_index(), doc_type=self.doc_type, id=None, body=data)\n    except TransportError as exc:\n        logger.warning('writing metric %r failure %r', data, exc)", "docstring": "Write the metric to elasticsearch\n\nArgs:\nname (str): The name of the metric to write\ndata (dict): Additional data to store with the metric", "source": "codesearchnet"}
{"code": "def get_datas(callback, macs=[], run_flag=RunFlag(), bt_device=''):\n        \n\n        log.info('Get latest data for sensors. Stop with Ctrl+C.')\n        log.info('MACs: %s', macs)\n\n        for new_data in RuuviTagSensor._get_ruuvitag_datas(macs, None, run_flag, bt_device):\n            callback(new_data)", "docstring": "Get data for all ruuvitag sensors or sensors in the MAC's list.\n\nArgs:\ncallback (func): callback funcion to be called when new data is received\nmacs (list): MAC addresses\nrun_flag (object): RunFlag object. Function executes while run_flag.running\nbt_device (string): Bluetooth device id", "source": "juraj-google-style"}
{"code": "def identifiers(config):\n    \n    ids = []\n    if (config.klass_name == 'gen'):\n        for generator in os.listdir(config.generator_dir):\n            if (generator == '__init__.py'):\n                continue\n            (gid, ext) = os.path.splitext(generator)\n            if (ext == '.py' and\n                    os.path.isfile(os.path.join(config.generator_dir, generator))):\n                ids.append(gid)\n    else:\n        for image_file in os.listdir(config.image_dir):\n            (iid, ext) = os.path.splitext(image_file)\n            if (ext in ['.jpg', '.png', '.tif'] and\n                    os.path.isfile(os.path.join(config.image_dir, image_file))):\n                ids.append(iid)\n    return ids", "docstring": "Show list of identifiers for this prefix.\n\nHandles both the case of local file based identifiers and\nalso image generators.\n\nArguments:\nconfig - configuration object in which:\nconfig.klass_name - 'gen' if a generator function\nconfig.generator_dir - directory for generator code\nconfig.image_dir - directory for images\n\nReturns:\nids - a list of ids", "source": "juraj-google-style"}
{"code": "def send_async(self, transaction, headers=None):\n    return self.transport.forward_request(method='POST', path=self.path, json=transaction, params={'mode': 'async'}, headers=headers)", "docstring": "Submit a transaction to the Federation with the mode `async`.\n\nArgs:\ntransaction (dict): the transaction to be sent\nto the Federation node(s).\nheaders (dict): Optional headers to pass to the request.\n\nReturns:\ndict: The transaction sent to the Federation node(s).", "source": "codesearchnet"}
{"code": "def migrate(self, id_or_uri, timeout=-1):\n        \n\n        \n        migrationInformation = {\n            'migrationState': 'Migrated',\n            'type': 'migratable-vc-domains',\n            'category': 'migratable-vc-domains'\n        }\n\n        \n        \n        complete_uri = self._client.build_uri(id_or_uri)\n\n        return self._client.update(migrationInformation, uri=complete_uri, timeout=timeout)", "docstring": "Initiates a migration of an enclosure specified by the ID or URI of a migration report.\n\nArgs:\nid_or_uri: ID or URI of the migration report.\ntimeout: Timeout in seconds.  Waits for task completion by default.  The timeout does not abort the task in\nOneView; just stops waiting for its completion.\n\nReturns: dict: a migration report.", "source": "juraj-google-style"}
{"code": "def __check_no_missing_attributes(self, node: yaml.Node,\n                                      mapping: CommentedMap) -> None:\n        \n        logger.debug('Checking presence of required attributes')\n        for name, type_, required in class_subobjects(self.class_):\n            if required and name not in mapping:\n                raise RecognitionError(('{}{}Missing attribute {} needed for'\n                                        ' constructing a {}').format(\n                                            node.start_mark, os.linesep, name,\n                                            self.class_.__name__))\n            if name in mapping and not self.__type_matches(\n                    mapping[name], type_):\n                raise RecognitionError(('{}{}Attribute {} has incorrect type'\n                                        ' {}, expecting a {}').format(\n                                            node.start_mark, os.linesep, name,\n                                            type(mapping[name]), type_))", "docstring": "Checks that all required attributes are present.\n\nAlso checks that they're of the correct type.\n\nArgs:\nmapping: The mapping with subobjects of this object.\n\nRaises:\nRecognitionError: if an attribute is missing or the type \\\nis incorrect.", "source": "juraj-google-style"}
{"code": "def tomography_data(results, name, tomoset):\n    labels = tomography_circuit_names(tomoset, name)\n    circuits = tomoset['circuits']\n    data = []\n    prep = None\n    for (j, _) in enumerate(labels):\n        counts = marginal_counts(results.get_counts(labels[j]), tomoset['qubits'])\n        shots = sum(counts.values())\n        meas = circuits[j]['meas']\n        prep = circuits[j].get('prep', None)\n        meas_qubits = sorted(meas.keys())\n        if prep:\n            prep_qubits = sorted(prep.keys())\n        circuit = {}\n        for c in counts.keys():\n            circuit[c] = {}\n            circuit[c]['meas'] = [(meas[meas_qubits[k]], int(c[((- 1) - k)])) for k in range(len(meas_qubits))]\n            if prep:\n                circuit[c]['prep'] = [prep[prep_qubits[k]] for k in range(len(prep_qubits))]\n        data.append({'counts': counts, 'shots': shots, 'circuit': circuit})\n    ret = {'data': data, 'meas_basis': tomoset['meas_basis']}\n    if prep:\n        ret['prep_basis'] = tomoset['prep_basis']\n    return ret", "docstring": "Return a results dict for a state or process tomography experiment.\n\nArgs:\nresults (Result): Results from execution of a process tomography\ncircuits on a backend.\nname (string): The name of the circuit being reconstructed.\ntomoset (tomography_set): the dict of tomography configurations.\n\nReturns:\nlist: A list of dicts for the outcome of each process tomography\nmeasurement circuit.", "source": "codesearchnet"}
{"code": "def __init__(\n      self, name, aliases=None, description=None, maximum_value=None,\n      minimum_value=None, urls=None):\n    \n    super(IntegerDefinition, self).__init__(\n        name, aliases=aliases, description=description, urls=urls)\n    self.format = definitions.FORMAT_SIGNED\n    self.maximum_value = maximum_value\n    self.minimum_value = minimum_value", "docstring": "Initializes an integer data type definition.\n\nArgs:\nname (str): name.\naliases (Optional[list[str]]): aliases.\ndescription (Optional[str]): description.\nmaximum_value (Optional[int]): maximum allowed value of the integer\ndata type.\nminimum_value (Optional[int]): minimum allowed value of the integer\ndata type.\nurls (Optional[list[str]]): URLs.", "source": "juraj-google-style"}
{"code": "def get_what_follows_raw(s: str,\n                         prefix: str,\n                         onlyatstart: bool = True,\n                         stripwhitespace: bool = True) -> Tuple[bool, str]:\n    \n    prefixstart = s.find(prefix)\n    if ((prefixstart == 0 and onlyatstart) or\n            (prefixstart != -1 and not onlyatstart)):\n        \n        resultstart = prefixstart + len(prefix)\n        result = s[resultstart:]\n        if stripwhitespace:\n            result = result.strip()\n        return True, result\n    return False, \"\"", "docstring": "Find the part of ``s`` that is after ``prefix``.\n\nArgs:\ns: string to analyse\nprefix: prefix to find\nonlyatstart: only accept the prefix if it is right at the start of\n``s``\nstripwhitespace: remove whitespace from the result\n\nReturns:\ntuple: ``(found, result)``", "source": "juraj-google-style"}
{"code": "def FilterItem(self, launchditem):\n    for regex in self.blacklist_regex:\n        if regex.match(launchditem.get('Label', '')):\n            return True\n    return False", "docstring": "Should this job be filtered.\n\nArgs:\nlaunchditem: job NSCFDictionary\nReturns:\nTrue if the item should be filtered (dropped)", "source": "codesearchnet"}
{"code": "def __init__(self, fsntfs_data_stream):\n    \n    super(NTFSDataStream, self).__init__()\n    self._fsntfs_data_stream = fsntfs_data_stream", "docstring": "Initializes the data stream object.\n\nArgs:\nfsntfs_data_stream (pyfsntfs.data_stream): NTFS data stream.", "source": "juraj-google-style"}
{"code": "def create_course_completion(self, user_id, payload):  \n        \n        return self._post(\n            urljoin(\n                self.enterprise_configuration.degreed_base_url,\n                self.global_degreed_config.completion_status_api_path\n            ),\n            payload,\n            self.COMPLETION_PROVIDER_SCOPE\n        )", "docstring": "Send a completion status payload to the Degreed Completion Status endpoint\n\nArgs:\nuser_id: Unused.\npayload: JSON encoded object (serialized from DegreedLearnerDataTransmissionAudit)\ncontaining completion status fields per Degreed documentation.\n\nReturns:\nA tuple containing the status code and the body of the response.\nRaises:\nHTTPError: if we received a failure response code from Degreed", "source": "juraj-google-style"}
{"code": "def fill_rects(self, *rects):\n    rect_array = ffi.new('SDL_Rect[]', len(rects))\n    for (i, r) in enumerate(rects):\n        rect_array[i] = r._ptr[0]\n    check_int_err(lib.SDL_RenderFillRects(self._ptr, rect_array, len(rects)))", "docstring": "Fill some number of rectangles on the current rendering target with the drawing color.\n\nArgs:\n*rects (Rect): The destination rectangles.\n\nRaises:\nSDLError: If an error is encountered.", "source": "codesearchnet"}
{"code": "def compose(self, r: Rigid) -> Rigid:\n    new_rot = self._rots.compose_r(r._rots)\n    new_trans = self._rots.apply(r._trans) + self._trans\n    return Rigid(new_rot, new_trans)", "docstring": "Composes the current rigid object with another.\n\nArgs:\nr:\nAnother Rigid object\nReturns:\nThe composition of the two transformations", "source": "github-repos"}
{"code": "def encode_request(request_line, **headers):\n    \n    lines = [request_line]\n    lines.extend(['%s: %s' % kv for kv in headers.items()])\n    return ('\\r\\n'.join(lines) + '\\r\\n\\r\\n').encode('utf-8')", "docstring": "Creates the data for a SSDP request.\n\nArgs:\nrequest_line (string): The request line for the request (e.g.\n``\"M-SEARCH * HTTP/1.1\"``).\nheaders (dict of string -> string): Dictionary of header name - header\nvalue pairs to present in the request.\n\nReturns:\nbytes: The encoded request.", "source": "juraj-google-style"}
{"code": "def __init__(self, cell, device, **kwargs):\n    super(DeviceWrapperBase, self).__init__(cell, **kwargs)\n    self._device = device", "docstring": "Construct a `DeviceWrapper` for `cell` with device `device`.\n\nEnsures the wrapped `cell` is called with `tf.device(device)`.\n\nArgs:\ncell: An instance of `RNNCell`.\ndevice: A device string or function, for passing to `tf.device`.\n**kwargs: dict of keyword arguments for base layer.", "source": "github-repos"}
{"code": "def checksum1(data, stringlength):\n    value_buffer = 0\n    for count in range(0, stringlength):\n        value_buffer = (value_buffer ^ data[count])\n    return (value_buffer & 254)", "docstring": "Calculate Checksum 1\n\nCalculate the ckecksum 1 required for the herkulex data packet\n\nArgs:\ndata (list): the data of which checksum is to be calculated\nstringlength (int): the length of the data\n\nReturns:\nint:  The calculated checksum 1", "source": "codesearchnet"}
{"code": "def get_error_name(error):\n    error_type = type(error)\n    if (error_type.__module__ in ['__main__', 'builtins']):\n        return error_type.__name__\n    else:\n        return f'{error_type.__module__}.{error_type.__name__}'", "docstring": "Return canonical error name as string.\n\nFor builtin errors like ValueError or Exception, will return the bare\nname, like ValueError or Exception.\n\nFor all other exceptions, will return modulename.errorname, such as\narbpackage.mod.myerror\n\nArgs:\nerror: Exception object.\n\nReturns:\nstr. Canonical error name.", "source": "codesearchnet"}
{"code": "def mark_typed_list(self, name, type_object):\n    if (not hasattr(type_object, 'dump')):\n        raise ArgumentError(('The passed type object %s is missing required method: dump()' % type_object))\n    if (not hasattr(type_object, 'Restore')):\n        raise ArgumentError(('The passed type object %s is missing required method: Restore()' % type_object))\n\n    def _dump_list(obj):\n        if (obj is None):\n            return None\n        if (not isinstance(obj, list)):\n            raise DataError(('Property %s marked as list was not a list: %s' % (name, repr(obj))))\n        return [x.dump() for x in obj]\n\n    def _restore_list(obj):\n        if (obj is None):\n            return obj\n        return [type_object.Restore(x) for x in obj]\n    self.mark_complex(name, _dump_list, _restore_list)", "docstring": "Mark a property as containing serializable objects of a given type.\n\nThis convenience method allows you to avoid having to call\n``mark_complex()`` whenever you need to serialize a list of objects.\nThis method requires that all members of the given list be of a single\nclass that contains a dump() method and a Restore() class method where\ntype_object.Restore(x.dump()) == x.\n\nArgs:\nname (str): The name of the complex property.\ntype_object: The class object that will be contained inside\nthis list.", "source": "codesearchnet"}
{"code": "def dq_argument(self) -> str:\n\n    def escape():\n        self._escape = True\n        return 1\n    self._escape = False\n    self.offset += 1\n    start = self.offset\n    self.dfa([{'': (lambda : 0), '\"': (lambda : (- 1)), '\\\\': escape}, {'': (lambda : 0)}])\n    self._arg += (self.unescape(self.input[start:self.offset]) if self._escape else self.input[start:self.offset])\n    self.offset += 1", "docstring": "Parse double-quoted argument.\n\nRaises:\nEndOfInput: If past the end of input.", "source": "codesearchnet"}
{"code": "def __call__(self, fn):\n        \n\n        def exception(app, *args, **kwargs):\n            \n\n            try:\n                return fn(app, *args, **kwargs)\n            except Exception as e:\n                app.tcex.log.error('method failure ({})'.format(e))\n                app.tcex.exit(1, self.msg)\n\n        return exception", "docstring": "Implement __call__ function for decorator.\n\nArgs:\nfn (function): The decorated function.\n\nReturns:\nfunction: The custom decorator function.", "source": "juraj-google-style"}
{"code": "def get_feature(w1: str, w2: str, w3: str, w4: str, w5: str, w6: str) -> typing.List[str]:\n    raw_feature = {'UW1': w1, 'UW2': w2, 'UW3': w3, 'UW4': w4, 'UW5': w5, 'UW6': w6, 'BW1': w2 + w3, 'BW2': w3 + w4, 'BW3': w4 + w5, 'TW1': w1 + w2 + w3, 'TW2': w2 + w3 + w4, 'TW3': w3 + w4 + w5, 'TW4': w4 + w5 + w6}\n    for key, value in list(raw_feature.items()):\n        if INVALID in value:\n            del raw_feature[key]\n    return [f'{item[0]}:{item[1]}' for item in raw_feature.items()]", "docstring": "Generates a feature from characters around (w1-6).\n\nArgs:\nw1 (str): The character 3 characters before the break point.\nw2 (str): The character 2 characters before the break point.\nw3 (str): The character right before the break point.\nw4 (str): The character right after the break point.\nw5 (str): The character 2 characters after the break point.\nw6 (str): The character 3 characters after the break point.\n\nReturns:\nThe feature (list[str]).", "source": "github-repos"}
{"code": "def plugin_method(*plugin_names):\n    \n    def wrapper(callable_obj):\n        for plugin_name in plugin_names:\n            if not hasattr(callable_obj, plugin_name):\n                setattr(callable_obj, plugin_name, True)\n        return callable_obj\n    return wrapper", "docstring": "Plugin Method decorator.\nSigns a web handler function with the plugins to be applied as attributes.\n\nArgs:\nplugin_names (list): A list of plugin callable names\n\nReturns:\nA wrapped handler callable.\n\nExamples:\n>>> @plugin_method('json', 'bill')\n... def method():\n...     return \"Hello!\"\n...\n>>> print method.json\nTrue\n>>> print method.bill\nTrue", "source": "juraj-google-style"}
{"code": "def longestNumber(self, inp):\n        \n        split = inp.split(' ')\n\n        \n        numStart = None\n        numEnd = None\n        for i, w in enumerate(split):\n            if self.isValid(w):\n                if numStart is None:\n                    numStart = i\n                numEnd = i\n            else:\n                \n                w = re.sub(r'(\\w+)s(\\b)', '\\g<1>\\g<2>', w)\n                if w in self.__ordinals__:\n                    if self.isValid(' '.join(split[numStart:i + 1])):\n                        numEnd = i\n                        break\n        description = ' '.join(split[numStart:numEnd + 1])\n        return self.parse(description)", "docstring": "Extracts the longest valid numerical description from a string.\nNot guaranteed to return a result even if some valid numerical\ndescription exists (i.e., method is not particularly advanced).\n\nArgs:\ninp (str): An arbitrary string, hopefully containing a number.\n\nReturns:\nThe number with the longest string description in input,\nor None if not found.", "source": "juraj-google-style"}
{"code": "def _ParseCachedEntryXP(self, value_data, cached_entry_offset):\n    \n    try:\n      cached_entry = self._ReadStructureFromByteStream(\n          value_data[cached_entry_offset:], cached_entry_offset,\n          self._cached_entry_data_type_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError(\n          'Unable to parse cached entry value with error: {0!s}'.format(\n              exception))\n\n    \n    string_size = 0\n    for string_index in range(0, 528, 2):\n      if (cached_entry.path[string_index] == 0 and\n          cached_entry.path[string_index + 1] == 0):\n        break\n      string_size += 2\n\n    try:\n      path = bytearray(cached_entry.path[0:string_size]).decode('utf-16-le')\n    except UnicodeDecodeError:\n      raise errors.ParseError('Unable to decode cached entry path to string')\n\n    cached_entry_object = AppCompatCacheCachedEntry()\n    cached_entry_object.cached_entry_size = (\n        self._cached_entry_data_type_map.GetByteSize())\n    cached_entry_object.file_size = cached_entry.file_size\n    cached_entry_object.last_modification_time = (\n        cached_entry.last_modification_time)\n    cached_entry_object.last_update_time = cached_entry.last_update_time\n    cached_entry_object.path = path\n\n    return cached_entry_object", "docstring": "Parses a Windows XP cached entry.\n\nArgs:\nvalue_data (bytes): value data.\ncached_entry_offset (int): offset of the first cached entry data\nrelative to the start of the value data.\n\nReturns:\nAppCompatCacheCachedEntry: cached entry.\n\nRaises:\nParseError: if the value data could not be parsed.", "source": "juraj-google-style"}
{"code": "def get_signatures_from_saved_model(saved_model_path: str, signature_keys: Optional[Sequence[str]]=None, tags: Optional[Collection[str]]=None) -> Dict[str, meta_graph_pb2.SignatureDef]:\n    if tags is None:\n        tags = {tag_constants.SERVING}\n    loader = saved_model_loader.SavedModelLoader(saved_model_path)\n    meta_graphdef = loader.get_meta_graph_def_from_tags(tags)\n    signatures = {}\n    for key, signature_def in meta_graphdef.signature_def.items():\n        if key == saved_model_constants.INIT_OP_SIGNATURE_KEY:\n            continue\n        if signature_keys is not None and key not in signature_keys:\n            continue\n        signatures[key] = signature_def\n    return signatures", "docstring": "Gets a map from signature keys to their SignatureDef.\n\nArgs:\nsaved_model_path: Path to the saved model.\nsignature_keys: List of keys identifying SignatureDef to retrieve. If None,\nretrieve all except the init signature.\ntags: Set of tags identifying the MetaGraphDef within the SavedModel.\n\nReturns:\nA map from signature_key to its SignatureDef.", "source": "github-repos"}
{"code": "def select_embedding_from_tag(cur, embedding_tag, target_nodelist, target_edgelist):\n    encoded_data = {'num_nodes': len(target_nodelist), 'num_edges': len(target_edgelist), 'edges': json.dumps(target_edgelist, separators=(',', ':')), 'tag': embedding_tag}\n    select = '\\n        SELECT\\n            source_node,\\n            chain\\n        FROM\\n            embedding_component_view\\n        WHERE\\n            embedding_tag = :tag AND\\n            target_edges = :edges AND\\n            target_num_nodes = :num_nodes AND\\n            target_num_edges = :num_edges\\n        '\n    embedding = {v: json.loads(chain) for (v, chain) in cur.execute(select, encoded_data)}\n    return embedding", "docstring": "Select an embedding from the given tag and target graph.\n\nArgs:\ncur (:class:`sqlite3.Cursor`):\nAn sqlite3 cursor. This function is meant to be run within a :obj:`with` statement.\n\nsource_nodelist (list):\nThe nodes in the source graph. Should be integer valued.\n\nsource_edgelist (list):\nThe edges in the source graph.\n\ntarget_nodelist (list):\nThe nodes in the target graph. Should be integer valued.\n\ntarget_edgelist (list):\nThe edges in the target graph.\n\nReturns:\ndict: The mapping from the source graph to the target graph.\nIn the form {v: {s, ...}, ...} where v is a variable in the\nsource model and s is a variable in the target model.", "source": "codesearchnet"}
{"code": "def mkzip(archive, items, mode=\"w\", save_full_paths=False):\n    \n    close = False\n    try:\n        if not isinstance(archive, zipfile.ZipFile):\n            archive = zipfile.ZipFile(archive, mode, allowZip64=True)\n            close = True\n        logger.info(\"mkdzip: Creating %s, from: %s\", archive.filename, items)\n        if isinstance(items, str):\n            items = [items]\n        for item in items:\n            item = os.path.abspath(item)\n            basename = os.path.basename(item)\n            if os.path.isdir(item):\n                for root, directoires, filenames in os.walk(item):\n                    for filename in filenames:\n                        path = os.path.join(root, filename)\n                        if save_full_paths:\n                            archive_path = path.encode(\"utf-8\")\n                        else:\n                            archive_path = os.path.join(\n                                basename, path.replace(item, \"\").strip(\"\\\\/\")\n                            ).encode(\"utf-8\")\n                        archive.write(path, archive_path)\n            elif os.path.isfile(item):\n                if save_full_paths:\n                    archive_name = item.encode(\"utf-8\")\n                else:\n                    archive_name = basename.encode(\"utf-8\")\n                archive.write(item, archive_name)  \n        return True\n    except Exception as e:\n        logger.error(\"Error occurred during mkzip: %s\" % e)\n        return False\n    finally:\n        if close:\n            archive.close()", "docstring": "Recursively zip a directory.\n\nArgs:\narchive (zipfile.ZipFile or str): ZipFile object add to or path to the\noutput zip archive.\nitems (str or list of str): Single item or list of items (files and\ndirectories) to be added to zipfile.\nmode (str): w for create new and write a for append to.\nsave_full_paths (bool): Preserve full paths.", "source": "juraj-google-style"}
{"code": "def list(self,params=None, headers=None):\n        \n        path = '/creditor_bank_accounts'\n        \n\n        response = self._perform_request('GET', path, params, headers,\n                                         retry_failures=True)\n        return self._resource_for(response)", "docstring": "List creditor bank accounts.\n\nReturns a [cursor-paginated](#api-usage-cursor-pagination) list of your\ncreditor bank accounts.\n\nArgs:\nparams (dict, optional): Query string parameters.\n\nReturns:\nCreditorBankAccount", "source": "juraj-google-style"}
{"code": "def get(path, objectType, user=None):\n    \n    ret = {'Path': path,\n           'ACLs': []}\n\n    sidRet = _getUserSid(user)\n\n    if path and objectType:\n        dc = daclConstants()\n        objectTypeBit = dc.getObjectTypeBit(objectType)\n        path = dc.processPath(path, objectTypeBit)\n        tdacl = _get_dacl(path, objectTypeBit)\n        if tdacl:\n            for counter in range(0, tdacl.GetAceCount()):\n                tAce = tdacl.GetAce(counter)\n                if not sidRet['sid'] or (tAce[2] == sidRet['sid']):\n                    ret['ACLs'].append(_ace_to_text(tAce, objectTypeBit))\n    return ret", "docstring": "Get the ACL of an object. Will filter by user if one is provided.\n\nArgs:\npath: The path to the object\nobjectType: The type of object (FILE, DIRECTORY, REGISTRY)\nuser: A user name to filter by\n\nReturns (dict): A dictionary containing the ACL\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt 'minion-id' win_dacl.get c:\\temp directory", "source": "juraj-google-style"}
{"code": "def attribute(self, name):\n        \n        return super(Map, self).attribute(self._inputs[0], name)", "docstring": "Expression for an input attribute.\n\nAn input attribute is an attribute on the input\nport of the operator invocation.\n\nArgs:\nname(str): Name of the attribute.\n\nReturns:\nExpression: Expression representing the input attribute.", "source": "juraj-google-style"}
{"code": "def ParseGshadowEntry(self, line):\n    \n    fields = (\"name\", \"passwd\", \"administrators\", \"members\")\n    if line:\n      rslt = dict(zip(fields, line.split(\":\")))\n      \n      name = rslt[\"name\"]\n      pw_entry = self.shadow.setdefault(name, rdf_client.PwEntry())\n      pw_entry.store = self.shadow_store\n      pw_entry.hash_type = self.GetHashType(rslt[\"passwd\"])\n      \n      members = self.gshadow_members.setdefault(name, set())\n      for accts in rslt[\"administrators\"], rslt[\"members\"]:\n        if accts:\n          members.update(accts.split(\",\"))", "docstring": "Extract the members of each group from /etc/gshadow.\n\nIdentifies the groups in /etc/gshadow and several attributes of the group,\nincluding how the password is crypted (if set).\n\ngshadow files have the format group_name:passwd:admins:members\nadmins are both group members and can manage passwords and memberships.\n\nArgs:\nline: An entry in gshadow.", "source": "juraj-google-style"}
{"code": "def decode(self):\n    if (self.encoding >= self.public_key.n):\n        raise ValueError('Attempted to decode corrupted number')\n    elif (self.encoding <= self.public_key.max_int):\n        mantissa = self.encoding\n    elif (self.encoding >= (self.public_key.n - self.public_key.max_int)):\n        mantissa = (self.encoding - self.public_key.n)\n    else:\n        raise OverflowError('Overflow detected in decrypted number')\n    if (self.exponent >= 0):\n        return (mantissa * (self.BASE ** self.exponent))\n    else:\n        try:\n            return (mantissa / (self.BASE ** (- self.exponent)))\n        except OverflowError as e:\n            raise OverflowError('decoded result too large for a float') from e", "docstring": "Decode plaintext and return the result.\n\nReturns:\nan int or float: the decoded number. N.B. if the number\nreturned is an integer, it will not be of type float.\n\nRaises:\nOverflowError: if overflow is detected in the decrypted number.", "source": "codesearchnet"}
{"code": "def validate(cls, mapper_spec):\n    \n    if mapper_spec.input_reader_class() != cls:\n      raise errors.BadReaderParamsError(\"Input reader class mismatch\")\n\n    params = _get_params(mapper_spec, allowed_keys=cls._PARAMS)\n    if (cls.VERSION_IDS_PARAM not in params and\n        cls.MODULE_VERSIONS_PARAM not in params):\n      raise errors.BadReaderParamsError(\"Must specify a list of version ids or \"\n                                        \"module/version ids for mapper input\")\n    if (cls.VERSION_IDS_PARAM in params and\n        cls.MODULE_VERSIONS_PARAM in params):\n      raise errors.BadReaderParamsError(\"Can not supply both version ids or \"\n                                        \"module/version ids. Use only one.\")\n    if (cls.START_TIME_PARAM not in params or\n        params[cls.START_TIME_PARAM] is None):\n      raise errors.BadReaderParamsError(\"Must specify a starting time for \"\n                                        \"mapper input\")\n    if cls.END_TIME_PARAM not in params or params[cls.END_TIME_PARAM] is None:\n      params[cls.END_TIME_PARAM] = time.time()\n\n    if params[cls.START_TIME_PARAM] >= params[cls.END_TIME_PARAM]:\n      raise errors.BadReaderParamsError(\"The starting time cannot be later \"\n                                        \"than or the same as the ending time.\")\n\n    if cls._PROTOTYPE_REQUEST_PARAM in params:\n      try:\n        params[cls._PROTOTYPE_REQUEST_PARAM] = log_service_pb.LogReadRequest(\n            params[cls._PROTOTYPE_REQUEST_PARAM])\n      except (TypeError, ProtocolBuffer.ProtocolBufferDecodeError):\n        raise errors.BadReaderParamsError(\"The prototype request must be \"\n                                          \"parseable as a LogReadRequest.\")\n\n    \n    \n    \n    try:\n      logservice.fetch(**params)\n    except logservice.InvalidArgumentError, e:\n      raise errors.BadReaderParamsError(\"One or more parameters are not valid \"\n                                        \"inputs to logservice.fetch(): %s\" % e)", "docstring": "Validates the mapper's specification and all necessary parameters.\n\nArgs:\nmapper_spec: The MapperSpec to be used with this InputReader.\n\nRaises:\nBadReaderParamsError: If the user fails to specify both a starting time\nand an ending time, or if the starting time is later than the ending\ntime.", "source": "juraj-google-style"}
{"code": "def get_servo_angle(self):\n    servoposition = self.get_servo_position()\n    if ((self.servomodel == 6) or (self.servomodel == 4)):\n        return scale(servoposition, 10627, 22129, (- 159.9), 159.6)\n    else:\n        return scale(servoposition, 21, 1002, (- 150), 150)", "docstring": "Gets the current angle of the servo in degrees\n\nArgs:\nnone\nReturns:\nint : the current servo angle", "source": "codesearchnet"}
{"code": "def neighborhood_probability(self, threshold, radius):\n        \n        weights = disk(radius, dtype=np.uint8)\n        thresh_data = np.zeros(self.data.shape[1:], dtype=np.uint8)\n        neighbor_prob = np.zeros(self.data.shape, dtype=np.float32)\n        for t in np.arange(self.data.shape[0]):\n            thresh_data[self.data[t] >= threshold] = 1\n            maximized = fftconvolve(thresh_data, weights, mode=\"same\")\n            maximized[maximized > 1] = 1\n            maximized[maximized < 1] = 0\n            neighbor_prob[t] = fftconvolve(maximized, weights, mode=\"same\")\n            thresh_data[:] = 0\n        neighbor_prob[neighbor_prob < 1] = 0\n        neighbor_prob /= weights.sum()\n        return neighbor_prob", "docstring": "Calculate a probability based on the number of grid points in an area that exceed a threshold.\n\nArgs:\nthreshold:\nradius:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _FormatIPCPermToken(self, token_data):\n    \n    return {\n        'user_id': token_data.user_identifier,\n        'group_id': token_data.group_identifier,\n        'creator_user_id': token_data.creator_user_identifier,\n        'creator_group_id': token_data.creator_group_identifier,\n        'access': token_data.access_mode}", "docstring": "Formats an IPC permissions token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_ipc_perm): AUT_IPC_PERM token data.\n\nReturns:\ndict[str, str]: token values.", "source": "juraj-google-style"}
{"code": "def abort_all(reason, extras=None):\n    raise signals.TestAbortAll(reason, extras)", "docstring": "Abort all subsequent tests, including the ones not in this test class or\niteration.\n\nArgs:\nreason: The reason to abort.\nextras: An optional field for extra information to be included in\ntest result.\n\nRaises:\nsignals.TestAbortAll: Abort all subsequent tests.", "source": "github-repos"}
{"code": "def extend(self, elts):\n    elts = elts[:]\n    self._in_deque.append(elts)\n    event = self._event_for(elts)\n    self._event_deque.append(event)\n    return event", "docstring": "Adds elts to the tasks.\n\nArgs:\nelts (Sequence): a iterable of elements that can be appended to the\ntask's bundle_field.\n\nReturns:\nEvent: an event that can be used to wait on the response.", "source": "codesearchnet"}
{"code": "def read(self, size=None):\n    \n    if not self._is_open:\n      raise IOError('Not opened.')\n\n    if self._fsntfs_data_stream:\n      return self._fsntfs_data_stream.read(size=size)\n    return self._fsntfs_file_entry.read(size=size)", "docstring": "Reads a byte string from the file-like object at the current offset.\n\nThe function will read a byte string of the specified size or\nall of the remaining data if no size was specified.\n\nArgs:\nsize (Optional[int]): number of bytes to read, where None is all\nremaining data.\n\nReturns:\nbytes: data read.\n\nRaises:\nIOError: if the read failed.\nOSError: if the read failed.", "source": "juraj-google-style"}
{"code": "def _process_update(self, item, feed_item):\n    lp = self.landing_page_dao.get(feed_item, required=True)\n    feed_item[FieldMap.CAMPAIGN_LANDING_PAGE_ID] = lp['id']\n    feed_item[FieldMap.CAMPAIGN_LANDING_PAGE_NAME] = lp['name']\n    item['startDate'] = StringExtensions.convertDateTimeStrToDateStr(feed_item.get(FieldMap.CAMPAIGN_START_DATE, None))\n    item['endDate'] = StringExtensions.convertDateTimeStrToDateStr(feed_item.get(FieldMap.CAMPAIGN_END_DATE, None))\n    item['name'] = feed_item.get(FieldMap.CAMPAIGN_NAME, None)\n    item['defaultLandingPageId'] = lp['id']", "docstring": "Updates a campaign based on the values from the feed.\n\nArgs:\nitem: Object representing the campaign to be updated, this object is\nupdated directly.\nfeed_item: Feed item representing campaign values from the Bulkdozer feed.", "source": "github-repos"}
{"code": "def ParseOptions(cls, options, analysis_plugin):\n    \n    if not isinstance(analysis_plugin, nsrlsvr.NsrlsvrAnalysisPlugin):\n      raise errors.BadConfigObject(\n          'Analysis plugin is not an instance of NsrlsvrAnalysisPlugin')\n\n    label = cls._ParseStringOption(\n        options, 'nsrlsvr_label', default_value=cls._DEFAULT_LABEL)\n    analysis_plugin.SetLabel(label)\n\n    lookup_hash = cls._ParseStringOption(\n        options, 'nsrlsvr_hash', default_value=cls._DEFAULT_HASH)\n    analysis_plugin.SetLookupHash(lookup_hash)\n\n    host = cls._ParseStringOption(\n        options, 'nsrlsvr_host', default_value=cls._DEFAULT_HOST)\n    analysis_plugin.SetHost(host)\n\n    port = cls._ParseNumericOption(\n        options, 'nsrlsvr_port', default_value=cls._DEFAULT_PORT)\n    analysis_plugin.SetPort(port)\n\n    if not analysis_plugin.TestConnection():\n      raise errors.BadConfigOption(\n          'Unable to connect to nsrlsvr {0:s}:{1:d}'.format(host, port))", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options object.\nanalysis_plugin (NsrlsvrAnalysisPlugin): analysis plugin to configure.\n\nRaises:\nBadConfigObject: when the analysis plugin is the wrong type.\nBadConfigOption: when unable to connect to nsrlsvr instance.", "source": "juraj-google-style"}
{"code": "def CreateSubdivision(self, parent=None, value=None):\n    division = {'xsi_type': 'ProductPartition', 'partitionType': 'SUBDIVISION', 'id': str(self.next_id)}\n    if (parent is not None):\n        division['parentCriterionId'] = parent['id']\n        division['caseValue'] = value\n    adgroup_criterion = {'xsi_type': 'BiddableAdGroupCriterion', 'adGroupId': self.adgroup_id, 'criterion': division}\n    self.CreateAddOperation(adgroup_criterion)\n    self.next_id -= 1\n    return division", "docstring": "Creates a subdivision node.\n\nArgs:\nparent: The node that should be this node's parent.\nvalue: The value being partitioned on.\nReturns:\nA new subdivision node.", "source": "codesearchnet"}
{"code": "def _send_offset_fetch_request(self, partitions):\n        \n        assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'\n        assert all(map(lambda k: isinstance(k, TopicPartition), partitions))\n        if not partitions:\n            return Future().success({})\n\n        node_id = self.coordinator()\n        if node_id is None:\n            return Future().failure(Errors.GroupCoordinatorNotAvailableError)\n\n        \n        if not self._client.ready(node_id):\n            log.debug(\"Node %s not ready -- failing offset fetch request\",\n                      node_id)\n            return Future().failure(Errors.NodeNotReadyError)\n\n        log.debug(\"Group %s fetching committed offsets for partitions: %s\",\n                  self.group_id, partitions)\n        \n        topic_partitions = collections.defaultdict(set)\n        for tp in partitions:\n            topic_partitions[tp.topic].add(tp.partition)\n\n        if self.config['api_version'] >= (0, 8, 2):\n            request = OffsetFetchRequest[1](\n                self.group_id,\n                list(topic_partitions.items())\n            )\n        else:\n            request = OffsetFetchRequest[0](\n                self.group_id,\n                list(topic_partitions.items())\n            )\n\n        \n        future = Future()\n        _f = self._client.send(node_id, request)\n        _f.add_callback(self._handle_offset_fetch_response, future)\n        _f.add_errback(self._failed_request, node_id, request, future)\n        return future", "docstring": "Fetch the committed offsets for a set of partitions.\n\nThis is a non-blocking call. The returned future can be polled to get\nthe actual offsets returned from the broker.\n\nArguments:\npartitions (list of TopicPartition): the partitions to fetch\n\nReturns:\nFuture: resolves to dict of offsets: {TopicPartition: int}", "source": "juraj-google-style"}
{"code": "def word_error_rate(raw_predictions,\n                    labels,\n                    lookup=None,\n                    weights_fn=common_layers.weights_nonzero):\n  \n\n  def from_tokens(raw, lookup_):\n    gathered = tf.gather(lookup_, tf.cast(raw, tf.int32))\n    joined = tf.regex_replace(tf.reduce_join(gathered, axis=1), b\"<EOS>.*\", b\"\")\n    cleaned = tf.regex_replace(joined, b\"_\", b\" \")\n    tokens = tf.string_split(cleaned, \" \")\n    return tokens\n\n  def from_characters(raw, lookup_):\n    \n    corrected = tf.bitcast(\n        tf.clip_by_value(tf.subtract(raw, 2), 0, 255), tf.uint8)\n\n    gathered = tf.gather(lookup_, tf.cast(corrected, tf.int32))[:, :, 0]\n    joined = tf.reduce_join(gathered, axis=1)\n    cleaned = tf.regex_replace(joined, b\"\\0\", b\"\")\n    tokens = tf.string_split(cleaned, \" \")\n    return tokens\n\n  if lookup is None:\n    lookup = tf.constant([chr(i) for i in range(256)])\n    convert_fn = from_characters\n  else:\n    convert_fn = from_tokens\n\n  if weights_fn is not common_layers.weights_nonzero:\n    raise ValueError(\"Only weights_nonzero can be used for this metric.\")\n\n  with tf.variable_scope(\"word_error_rate\", values=[raw_predictions, labels]):\n\n    raw_predictions = tf.squeeze(\n        tf.argmax(raw_predictions, axis=-1), axis=(2, 3))\n    labels = tf.squeeze(labels, axis=(2, 3))\n\n    reference = convert_fn(labels, lookup)\n    predictions = convert_fn(raw_predictions, lookup)\n\n    distance = tf.reduce_sum(\n        tf.edit_distance(predictions, reference, normalize=False))\n    reference_length = tf.cast(\n        tf.size(reference.values, out_type=tf.int32), dtype=tf.float32)\n\n    return distance / reference_length, reference_length", "docstring": "Calculate word error rate.\n\nArgs:\nraw_predictions: The raw predictions.\nlabels: The actual labels.\nlookup: A tf.constant mapping indices to output tokens.\nweights_fn: Weighting function.\n\nReturns:\nThe word error rate.", "source": "juraj-google-style"}
{"code": "def ends_with(self, suffix):\n    suffix = suffix.lower()\n    found_words = []\n    res = cgaddag.gdg_ends_with(self.gdg, suffix.encode(encoding='ascii'))\n    tmp = res\n    while tmp:\n        word = tmp.contents.str.decode('ascii')\n        found_words.append(word)\n        tmp = tmp.contents.next\n    cgaddag.gdg_destroy_result(res)\n    return found_words", "docstring": "Find all words ending with a suffix.\n\nArgs:\nsuffix: A suffix to be searched for.\n\nReturns:\nA list of all words found.", "source": "codesearchnet"}
{"code": "def load_metadata_for_topics(self, *topics, **kwargs):\n    if ('ignore_leadernotavailable' in kwargs):\n        ignore_leadernotavailable = kwargs['ignore_leadernotavailable']\n    else:\n        ignore_leadernotavailable = False\n    if topics:\n        self.reset_topic_metadata(*topics)\n    else:\n        self.reset_all_metadata()\n    resp = self.send_metadata_request(topics)\n    log.debug('Updating broker metadata: %s', resp.brokers)\n    log.debug('Updating topic metadata: %s', [topic for (_, topic, _) in resp.topics])\n    self.brokers = dict([(nodeId, BrokerMetadata(nodeId, host, port, None)) for (nodeId, host, port) in resp.brokers])\n    for (error, topic, partitions) in resp.topics:\n        if error:\n            error_type = kafka.errors.kafka_errors.get(error, UnknownError)\n            if (error_type in (UnknownTopicOrPartitionError, LeaderNotAvailableError)):\n                log.error('Error loading topic metadata for %s: %s (%s)', topic, error_type, error)\n                if (topic not in topics):\n                    continue\n                elif ((error_type is LeaderNotAvailableError) and ignore_leadernotavailable):\n                    continue\n            raise error_type(topic)\n        self.topic_partitions[topic] = {}\n        for (error, partition, leader, _, _) in partitions:\n            self.topic_partitions[topic][partition] = leader\n            topic_part = TopicPartition(topic, partition)\n            if error:\n                error_type = kafka.errors.kafka_errors.get(error, UnknownError)\n                if (error_type is LeaderNotAvailableError):\n                    log.error('No leader for topic %s partition %d', topic, partition)\n                    self.topics_to_brokers[topic_part] = None\n                    continue\n                elif (error_type is ReplicaNotAvailableError):\n                    log.debug('Some (non-leader) replicas not available for topic %s partition %d', topic, partition)\n                else:\n                    raise error_type(topic_part)\n            if (leader in self.brokers):\n                self.topics_to_brokers[topic_part] = self.brokers[leader]\n            else:\n                self.topics_to_brokers[topic_part] = BrokerMetadata(leader, None, None, None)", "docstring": "Fetch broker and topic-partition metadata from the server.\n\nUpdates internal data: broker list, topic/partition list, and\ntopic/partition -> broker map. This method should be called after\nreceiving any error.\n\nNote: Exceptions *will not* be raised in a full refresh (i.e. no topic\nlist). In this case, error codes will be logged as errors.\nPartition-level errors will also not be raised here (a single partition\nw/o a leader, for example).\n\nArguments:\n*topics (optional): If a list of topics is provided,\nthe metadata refresh will be limited to the specified topics\nonly.\nignore_leadernotavailable (bool): suppress LeaderNotAvailableError\nso that metadata is loaded correctly during auto-create.\nDefault: False.\n\nRaises:\nUnknownTopicOrPartitionError: Raised for topics that do not exist,\nunless the broker is configured to auto-create topics.\nLeaderNotAvailableError: Raised for topics that do not exist yet,\nwhen the broker is configured to auto-create topics. Retry\nafter a short backoff (topics/partitions are initializing).", "source": "codesearchnet"}
{"code": "def dict_to_pyxb(rp_dict):\n    \n    rp_pyxb = d1_common.types.dataoneTypes.replicationPolicy()\n    rp_pyxb.replicationAllowed = rp_dict['allowed']\n    rp_pyxb.numberReplicas = rp_dict['num']\n    rp_pyxb.blockedMemberNode = rp_dict['block']\n    rp_pyxb.preferredMemberNode = rp_dict['pref']\n    normalize(rp_pyxb)\n    return rp_pyxb", "docstring": "Convert dict to ReplicationPolicy PyXB object.\n\nArgs:\nrp_dict: Native Python structure representing a Replication Policy.\n\nExample::\n\n{\n'allowed': True,\n'num': 3,\n'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'},\n'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'},\n}\n\nReturns:\nReplicationPolicy PyXB object.", "source": "juraj-google-style"}
{"code": "def all_label_values(self, label_list_ids=None):\n    values = set()\n    for utterance in self.utterances.values():\n        values = values.union(utterance.all_label_values(label_list_ids=label_list_ids))\n    return values", "docstring": "Return a set of all label-values occurring in this corpus.\n\nArgs:\nlabel_list_ids (list): If not None, only labels from label-lists with an id contained in this list\nare considered.\n\nReturns:\n:class:`set`: A set of distinct label-values.", "source": "codesearchnet"}
{"code": "def console_set_default_foreground(con: tcod.console.Console, col: Tuple[(int, int, int)]) -> None:\n    lib.TCOD_console_set_default_foreground(_console(con), col)", "docstring": "Change the default foreground color for a console.\n\nArgs:\ncon (Console): Any Console instance.\ncol (Union[Tuple[int, int, int], Sequence[int]]):\nAn (r, g, b) sequence or Color instance.\n\n.. deprecated:: 8.5\nUse :any:`Console.default_fg` instead.", "source": "codesearchnet"}
{"code": "def projector(state, flatten=False):\n    \n    density_matrix = np.outer(state.conjugate(), state)\n    if flatten:\n        return density_matrix.flatten(order='F')\n    return density_matrix", "docstring": "maps a pure state to a state matrix\n\nArgs:\nstate (ndarray): the number of qubits\nflatten (bool): determine if state matrix of column work\nReturns:\nndarray:  state_mat(2**num, 2**num) if flatten is false\nndarray:  state_mat(4**num) if flatten is true stacked on by the column", "source": "juraj-google-style"}
{"code": "def expand_dims(self, axis):\n        \n        if axis <= self._distaxis:\n            subaxis = axis\n            new_distaxis = self._distaxis + 1\n        else:\n            subaxis = axis - 1\n            new_distaxis = self._distaxis\n        new_subts = [rts.expand_dims(subaxis) for rts in self._subarrays]\n        if axis == 0:\n            \n            return distob.DistArray(new_subts, new_distaxis)\n        else:\n            axislabels = self.labels[self._distaxis]\n            return DistTimeseries(new_subts, new_distaxis, axislabels)", "docstring": "Insert a new axis, at a given position in the array shape\nArgs:\naxis (int): Position (amongst axes) where new axis is to be inserted.", "source": "juraj-google-style"}
{"code": "def example_number_for_non_geo_entity(country_calling_code):\n    \n    metadata = PhoneMetadata.metadata_for_nongeo_region(country_calling_code, None)\n    if metadata is not None:\n        \n        \n        \n        \n        for desc in (metadata.mobile, metadata.toll_free, metadata.shared_cost, metadata.voip,\n                     metadata.voicemail, metadata.uan, metadata.premium_rate):\n            try:\n                if (desc is not None and desc.example_number is not None):\n                    return parse(_PLUS_SIGN + unicod(country_calling_code) + desc.example_number, UNKNOWN_REGION)\n            except NumberParseException:\n                pass\n    return None", "docstring": "Gets a valid number for the specified country calling code for a non-geographical entity.\n\nArguments:\ncountry_calling_code -- The country calling code for a non-geographical entity.\n\nReturns a valid number for the non-geographical entity. Returns None when\nthe metadata does not contain such information, or the country calling\ncode passed in does not belong to a non-geographical entity.", "source": "juraj-google-style"}
{"code": "def _execute_adb_install(device: AndroidDevice, install_args: Iterable[str], timeout: int) -> None:\n    stderr_buffer = io.BytesIO()\n    stdout = device.adb.install(install_args, stderr=stderr_buffer, timeout=timeout)\n    stderr = stderr_buffer.getvalue().decode('utf-8').strip()\n    if not _is_apk_install_success(stdout, stderr):\n        adb_cmd = 'adb -s %s install %s' % (device.serial, ' '.join(install_args))\n        raise adb.AdbError(cmd=adb_cmd, stdout=stdout, stderr=stderr, ret_code=0)", "docstring": "Executes the adb install command.\n\nArgs:\ndevice: AndroidDevice, Mobly's Android controller object.\ninstall_args: list of strings, the args to be added to `adb install` cmd.\ntimeout: int, the number of seconds to wait before timing out.\n\nRaises:\nAdbError: installation failed.", "source": "github-repos"}
{"code": "def _image_channel_compress_bottom(inputs, model_hparams, name='bottom'):\n    num_channels = 3\n    with tf.variable_scope(name):\n        inputs = tf.to_float(inputs)\n        hp = model_hparams\n        if (hp.mode != tf.estimator.ModeKeys.PREDICT):\n            tf.summary.image('inputs', common_layers.tpu_safe_image_summary(inputs), max_outputs=2)\n        inputs = common_layers.convert_rgb_to_symmetric_real(inputs)\n        inputs_shape = common_layers.shape_list(inputs)\n        inputs = tf.reshape(inputs, [(- 1), inputs_shape[1], (inputs_shape[2] * inputs_shape[3]), 1])\n        outputs = tf.layers.conv2d(inputs, model_hparams.hidden_size, kernel_size=(1, num_channels), padding='VALID', strides=(1, num_channels), activation=tf.nn.relu, name='conv_input')\n        return outputs", "docstring": "Compresses channel-wise input pixels into whole pixel representions.\n\nPerform conversion of RGB pixel values to a real number in the range -1 to\n1. This combines pixel channels to form a representation of shape\n[img_len, img_len].\n\nArgs:\ninputs: Tensor representing RGB pixel intensities as integers, of shape\n[batch, img_len, img_len, channels].\nmodel_hparams: HParams, model hyperparmeters.\nname: string, scope.\n\nReturns:\nbody_input: Tensor of shape\n[batch, img_len, img_len, model_hparams.hidden_size].", "source": "codesearchnet"}
{"code": "def DeserializeFromDB(buffer):\n        \n        m = StreamManager.GetStream(buffer)\n        reader = BinaryReader(m)\n        v = StorageItem()\n        v.Deserialize(reader)\n        StreamManager.ReleaseStream(m)\n        return v", "docstring": "Deserialize full object.\n\nArgs:\nbuffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from.\n\nReturns:\nStorageItem:", "source": "juraj-google-style"}
{"code": "def set_step_input_context(self, context):\n    logger.debug('starting')\n    if (self.in_parameters is not None):\n        parameter_count = len(self.in_parameters)\n        if (parameter_count > 0):\n            logger.debug(f\"Updating context with {parameter_count} 'in' parameters.\")\n            context.update(self.in_parameters)\n    logger.debug('done')", "docstring": "Append step's 'in' parameters to context, if they exist.\n\nAppend the[in] dictionary to the context. This will overwrite\nexisting values if the same keys are already in there. I.e if\nin_parameters has {'eggs': 'boiled'} and key 'eggs' already\nexists in context, context['eggs'] hereafter will be 'boiled'.\n\nArgs:\ncontext: (pypyr.context.Context) The pypyr context. This arg will\nmutate - after method execution will contain the new\nupdated context.", "source": "codesearchnet"}
{"code": "def process(self, feed_item):\n    item = self.get(feed_item)\n    if item:\n        self._process_update(item, feed_item)\n        self._clean(item)\n        self._update(item, feed_item)\n    else:\n        new_item = self._process_new(feed_item)\n        self._clean(new_item)\n        item = self._insert(new_item, feed_item)\n        if self._id_field and feed_item.get(self._id_field, '').startswith('ext'):\n            store.map(self._entity, feed_item.get(self._id_field), item['id'])\n            store.set(self._entity, [feed_item[self._id_field]], item)\n        if self._search_field and feed_item.get(self._search_field, ''):\n            store.map(self._entity, feed_item.get(self._search_field), item['id'])\n            store.set(self._entity, [feed_item[self._search_field]], item)\n    if item:\n        feed_item[self._id_field] = item['id']\n        store.set(self._entity, [item['id']], item)\n    self._post_process(feed_item, item)\n    return item", "docstring": "Processes a Bulkdozer feed item.\n\nThis method identifies if the item needs to be inserted or updated, cleans\nit, performs the CM operations required, and update the feed item with newly\ncreated ids and name lookups so that the feed can be updated.\n\nArgs:\nfeed_item: Bulkdozer feed item to process.\n\nReturns:\nNewly created or updated CM object.", "source": "github-repos"}
{"code": "class OfflineDetector(AnomalyDetector):\n\n    @staticmethod\n    def score_prediction_adapter(keyed_prediction: tuple[KeyT, PredictionResult]) -> tuple[KeyT, AnomalyPrediction]:\n        \n        key, prediction = keyed_prediction\n        score = prediction.inference\n        assert isinstance(score, SupportsFloat)\n        return (key, AnomalyPrediction(score=float(score)))\n\n    @staticmethod\n    def label_prediction_adapter(keyed_prediction: tuple[KeyT, PredictionResult]) -> tuple[KeyT, AnomalyPrediction]:\n        \n        key, prediction = keyed_prediction\n        label = prediction.inference\n        assert isinstance(label, SupportsInt)\n        return (key, AnomalyPrediction(label=int(label)))\n\n    def __init__(self, keyed_model_handler: KeyedModelHandler[Any, beam.Row, PredictionT, Any], run_inference_args: Optional[dict[str, Any]]=None, **kwargs):\n        super().__init__(**kwargs)\n        self._keyed_model_handler = keyed_model_handler\n        self._run_inference_args = run_inference_args or {}\n        self._run_inference_args['model_identifier'] = self._model_id\n\n    def learn_one(self, x: beam.Row) -> None:\n        \n        raise NotImplementedError\n\n    def score_one(self, x: beam.Row) -> Optional[float]:\n        \n        raise NotImplementedError", "docstring": "A offline anomaly detector that uses a provided model handler for scoring.\n\nArgs:\nkeyed_model_handler: The model handler to use for inference.\nRequires a `KeyModelHandler[Any, Row, PredictionT, Any]` instance.\nrun_inference_args: Optional arguments to pass to RunInference\n**kwargs: Additional keyword arguments to pass to the base\nAnomalyDetector class.", "source": "github-repos"}
{"code": "def get(self, key, default=None):\n\t\t\n\t\tif key in self._nodes: return self._nodes[key]\n\t\telse: return default", "docstring": "Get\n\nReturns the node of a specific key from the parent\n\nArguments:\nkey {str} -- The key to get\ndefault {mixed} Value to return if the key does not exist\n\nReturns:\nmixed", "source": "juraj-google-style"}
{"code": "def register_extensions(self, exts, force=False):\n        \n        for ext_in, ext_out in exts.items():\n            self.register_extension(ext_in, ext_out, force)", "docstring": "Add/register extensions.\n\nArgs:\nexts (dict):\nforce (bool): If ``force`` is set to ``True``, simply overwrite existing extensions, otherwise do nothing.\nIf the ``logger`` is set, log a warning about the duplicate extension if ``force == False``.", "source": "juraj-google-style"}
{"code": "def matches(self, new, old):\n    if (isinstance(new, np.ndarray) or isinstance(old, np.ndarray)):\n        return np.array_equal(new, old)\n    if pd:\n        if (isinstance(new, pd.Series) or isinstance(old, pd.Series)):\n            return np.array_equal(new, old)\n        if (isinstance(new, pd.Index) or isinstance(old, pd.Index)):\n            return np.array_equal(new, old)\n    try:\n        if (isinstance(new, dict) and isinstance(old, dict)):\n            if (set(new.keys()) != set(old.keys())):\n                return False\n            return all((self.matches(new[k], old[k]) for k in new))\n        return (new == old)\n    except ValueError:\n        return False", "docstring": "Whether two parameters match values.\n\nIf either ``new`` or ``old`` is a NumPy array or Pandas Series or Index,\nthen the result of ``np.array_equal`` will determine if the values match.\n\nOtherwise, the result of standard Python equality will be returned.\n\nReturns:\nTrue, if new and old match, False otherwise", "source": "codesearchnet"}
{"code": "def _get_args_name_from_parser(parser):\n    return [action.dest for action in parser._actions if (not isinstance(action, argparse._HelpAction))]", "docstring": "Retrieve the name of the function argument linked to the given parser.\n\nArgs:\nparser: a function parser", "source": "codesearchnet"}
{"code": "def maximum(self, vars_list: List[str]) -> 'TensorFluent':\n    return self._aggregation_op(tf.reduce_max, self, vars_list)", "docstring": "Returns the TensorFluent for the maximum aggregation function.\n\nArgs:\nvars_list: The list of variables to be aggregated over.\n\nReturns:\nA TensorFluent wrapping the maximum aggregation function.", "source": "codesearchnet"}
{"code": "def to_datetime(arg):\n    \n\n\n    if isinstance(arg, datetime.datetime):\n        return arg\n    elif arg == 0:\n        return datetime.datetime.now()\n    elif isinstance(arg, str):\n        if arg == \"now\":\n            arg = datetime.datetime.now()\n        elif arg == \"?\":\n            arg = datetime.datetime(1970, 1, 1)\n        else:\n            arg = str2dt(arg)\n    elif isinstance(arg, datetime.date):\n        arg = date2datetime(arg)\n    elif isinstance(arg, (int, float)):\n        \n        arg = ts2dt(arg)\n    else:\n        raise TypeError(\"Wrong type for argument 'arg': {}\".format(arg.__class__.__name__))\n\n    return arg", "docstring": "Tries to convert any type of argument to datetime\n\nArgs:\narg: datetime, date, or str. If \"?\", will be converted to 1970-1-1.\nif 0 or \"now\", will be converted to datetime.datetime.now()", "source": "juraj-google-style"}
{"code": "def gt(self, other, axis=\"columns\", level=None):\n        \n        return self._binary_op(\"gt\", other, axis=axis, level=level)", "docstring": "Checks element-wise that this is greater than other.\n\nArgs:\nother: A DataFrame or Series or scalar to compare to.\naxis: The axis to perform the gt over.\nlevel: The Multilevel index level to apply gt over.\n\nReturns:\nA new DataFrame filled with Booleans.", "source": "juraj-google-style"}
{"code": "def unparse_headers(hdrs):\n    \n    return \"\".join([unparse_header(n, v) for n, v in hdrs.items()]) + \"\\r\\n\"", "docstring": "Parse a dictionary of headers to a string.\n\nArgs:\nhdrs: A dictionary of headers.\n\nReturns:\nThe headers as a string that can be used in an NNTP POST.", "source": "juraj-google-style"}
{"code": "def _probe_characteristics(self, conn, services, timeout=5.0):\n    for service in services.values():\n        (success, result) = self._enumerate_handles(conn, service['start_handle'], service['end_handle'])\n        if (not success):\n            return (False, None)\n        attributes = result['attributes']\n        service['characteristics'] = {}\n        last_char = None\n        for (handle, attribute) in attributes.items():\n            if (attribute['uuid'].hex[(- 4):] == '0328'):\n                (success, result) = self._read_handle(conn, handle, timeout)\n                if (not success):\n                    return (False, None)\n                value = result['data']\n                char = parse_characteristic_declaration(value)\n                service['characteristics'][char['uuid']] = char\n                last_char = char\n            elif (attribute['uuid'].hex[(- 4):] == '0229'):\n                if (last_char is None):\n                    return (False, None)\n                (success, result) = self._read_handle(conn, handle, timeout)\n                if (not success):\n                    return (False, None)\n                value = result['data']\n                assert (len(value) == 2)\n                (value,) = unpack('<H', value)\n                last_char['client_configuration'] = {'handle': handle, 'value': value}\n    return (True, {'services': services})", "docstring": "Probe gatt services for all associated characteristics in a BLE device\n\nArgs:\nconn (int): the connection handle to probe\nservices (dict): a dictionary of services produced by probe_services()\ntimeout (float): the maximum number of seconds to spend in any single task", "source": "codesearchnet"}
{"code": "def flush(self):\n    try:\n        termios.tcdrain(self._fd)\n    except termios.error as e:\n        raise SerialError(e.errno, ('Flushing serial port: ' + e.strerror))", "docstring": "Flush the write buffer of the serial port, blocking until all bytes\nare written.\n\nRaises:\nSerialError: if an I/O or OS error occurs.", "source": "codesearchnet"}
{"code": "def update_device_map(self, device_map: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:\n    return device_map", "docstring": "Override this method if you want to pass a override the existing device map with a new\none. E.g. for bitsandbytes, since `accelerate` is a hard requirement, if no device_map is\npassed, the device_map is set to `\"auto\"``\n\nArgs:\ndevice_map (`Union[dict, str]`, *optional*):\nThe device_map that is passed through the `from_pretrained` method.", "source": "github-repos"}
{"code": "def retry(retries=0, delay=timedelta(), conditions=[]):\n    \n    delay_in_seconds = delay.total_seconds()\n\n    def decorator(function):\n        \n        @wraps(function)\n        def wrapper(*args, **kwargs):\n            \n            func = partial(function, *args, **kwargs)\n            return retry_loop(retries, delay_in_seconds, conditions, func)\n\n        return wrapper\n\n    return decorator", "docstring": "A decorator for making a function that retries on failure.\n\nArgs:\nretries (Integral): The number of times to retry if a failure occurs.\ndelay (timedelta, optional, 0 seconds): A timedelta representing\nthe amount of time to delay between retries.\nconditions (list): A list of retry conditions.", "source": "juraj-google-style"}
{"code": "def build_cfg(cls, node):\n    if (not isinstance(node, gast.FunctionDef)):\n        raise TypeError('input must be a function definition')\n    cfg = cls()\n    cfg.entry = Node(node.args)\n    cfg.head = [cfg.entry]\n    cfg.visit_statements(node.body)\n    cfg.exit = Node(None)\n    cfg.set_head(cfg.exit)\n    cfg.backlink(cfg.entry)\n    return cfg", "docstring": "Build a CFG for a function.\n\nArgs:\nnode: A function definition the body of which to analyze.\n\nReturns:\nA CFG object.\n\nRaises:\nTypeError: If the input is not a function definition.", "source": "codesearchnet"}
{"code": "def _find_current_phase(self, global_step):\n    epoch_size = sum((phase.steps for phase in self._phases))\n    epoch = int((global_step \n    steps_in = (global_step % epoch_size)\n    for phase in self._phases:\n        if (steps_in < phase.steps):\n            return (phase, epoch, steps_in)\n        steps_in -= phase.steps", "docstring": "Determine the current phase based on the global step.\n\nThis ensures continuing the correct phase after restoring checkoints.\n\nArgs:\nglobal_step: The global number of steps performed across all phases.\n\nReturns:\nTuple of phase object, epoch number, and phase steps within the epoch.", "source": "codesearchnet"}
{"code": "def decode_value(value, client):\n    value_type = value.WhichOneof('value_type')\n    if (value_type == 'null_value'):\n        return None\n    elif (value_type == 'boolean_value'):\n        return value.boolean_value\n    elif (value_type == 'integer_value'):\n        return value.integer_value\n    elif (value_type == 'double_value'):\n        return value.double_value\n    elif (value_type == 'timestamp_value'):\n        return DatetimeWithNanoseconds.from_timestamp_pb(value.timestamp_value)\n    elif (value_type == 'string_value'):\n        return value.string_value\n    elif (value_type == 'bytes_value'):\n        return value.bytes_value\n    elif (value_type == 'reference_value'):\n        return reference_value_to_document(value.reference_value, client)\n    elif (value_type == 'geo_point_value'):\n        return GeoPoint(value.geo_point_value.latitude, value.geo_point_value.longitude)\n    elif (value_type == 'array_value'):\n        return [decode_value(element, client) for element in value.array_value.values]\n    elif (value_type == 'map_value'):\n        return decode_dict(value.map_value.fields, client)\n    else:\n        raise ValueError('Unknown ``value_type``', value_type)", "docstring": "Converts a Firestore protobuf ``Value`` to a native Python value.\n\nArgs:\nvalue (google.cloud.firestore_v1beta1.types.Value): A\nFirestore protobuf to be decoded / parsed / converted.\nclient (~.firestore_v1beta1.client.Client): A client that has\na document factory.\n\nReturns:\nUnion[NoneType, bool, int, float, datetime.datetime, \\\nstr, bytes, dict, ~google.cloud.Firestore.GeoPoint]: A native\nPython value converted from the ``value``.\n\nRaises:\nNotImplementedError: If the ``value_type`` is ``reference_value``.\nValueError: If the ``value_type`` is unknown.", "source": "codesearchnet"}
{"code": "def add_signature_block(src_fileobj, dest_fileobj, signing_algorithm, signature=None):\n    algo_id = {'sha1': 1, 'sha384': 2}[signing_algorithm]\n    if (not signature):\n        signature = make_dummy_signature(algo_id)\n    src_fileobj.seek(0)\n    mardata = mar.parse_stream(src_fileobj)\n    header = mardata.header\n    dest_fileobj.write(mar_header.build(header))\n    sig = dict(algorithm_id=algo_id, size=len(signature), signature=signature)\n    filesize = 0\n    sigs_offset = dest_fileobj.tell()\n    sigs = sigs_header.build(dict(filesize=filesize, count=1, sigs=[sig]))\n    dest_fileobj.write(sigs)\n    dest_fileobj.write(extras_header.build(mardata.additional))\n    data_offset = dest_fileobj.tell()\n    src_fileobj.seek(mardata.data_offset)\n    write_to_file(takeexactly(src_fileobj, mardata.data_length), dest_fileobj)\n    index_offset = dest_fileobj.tell()\n    index = mardata.index\n    data_offset_delta = (data_offset - mardata.data_offset)\n    for e in index.entries:\n        e.offset += data_offset_delta\n    dest_fileobj.write(index_header.build(index))\n    filesize = dest_fileobj.tell()\n    dest_fileobj.seek(0)\n    header.index_offset = index_offset\n    dest_fileobj.write(mar_header.build(header))\n    dest_fileobj.seek(sigs_offset)\n    sigs = sigs_header.build(dict(filesize=filesize, count=1, sigs=[sig]))\n    dest_fileobj.write(sigs)", "docstring": "Add a signature block to marfile, a MarReader object.\n\nProductversion and channel are preserved, but any existing signatures are overwritten.\n\nArgs:\nsrc_fileobj (file object): The input MAR file to add a signature to\ndest_fileobj (file object): File object to write new MAR file to. Must be open in w+b mode.\nsigning_algorithm (str): One of 'sha1', or 'sha384'\nsignature (bytes): Signature to write, or None to use a dummy signature", "source": "codesearchnet"}
{"code": "def GetAPIScope(api_name):\n    try:\n        return SCOPES[api_name]\n    except KeyError:\n        raise googleads.errors.GoogleAdsValueError(('Invalid API name \"%s\" provided. Acceptable values are: %s' % (api_name, SCOPES.keys())))", "docstring": "Retrieves the scope for the given API name.\n\nArgs:\napi_name: A string identifying the name of the API we want to retrieve a\nscope for.\n\nReturns:\nA string that is the scope for the given API name.\n\nRaises:\nGoogleAdsValueError: If the given api_name is invalid; accepted values are\n\"adwords\" and \"ad_manager\".", "source": "codesearchnet"}
{"code": "def set_type(self, weather_type):\n        \n        weather_type.lower()\n        exists = self.has_type(weather_type)\n        if exists:\n            self.add_string_parameters(weather_type)", "docstring": "Set the weather type.\n\nArgs:\nweather_type (str): The weather type.", "source": "juraj-google-style"}
{"code": "def _FormatOpaqueToken(self, token_data):\n    \n    data = ''.join(['{0:02x}'.format(byte) for byte in token_data.data])\n    return {'data': data}", "docstring": "Formats an opaque token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_opaque): AUT_OPAQUE token data.\n\nReturns:\ndict[str, str]: token values.", "source": "juraj-google-style"}
{"code": "def _on_action(self, sequence, topic, message):\n    try:\n        slug = None\n        parts = topic.split('/')\n        slug = parts[(- 3)]\n        uuid = self._extract_device_uuid(slug)\n    except Exception as exc:\n        self._logger.warn('Error parsing slug in action handler (slug=%s, topic=%s)', slug, topic)\n        return\n    if messages.DisconnectCommand.matches(message):\n        self._logger.debug('Received disconnect command for device 0x%X', uuid)\n        key = message['key']\n        client = message['client']\n        self._loop.add_callback(self._disconnect_from_device, uuid, key, client)\n    elif (messages.OpenInterfaceCommand.matches(message) or messages.CloseInterfaceCommand.matches(message)):\n        self._logger.debug('Received %s command for device 0x%X', message['operation'], uuid)\n        key = message['key']\n        client = message['client']\n        oper = message['operation']\n        if (oper == 'open_interface'):\n            self._loop.add_callback(self._open_interface, client, uuid, message['interface'], key)\n        else:\n            self._loop.add_callback(self._close_interface, client, uuid, message['interface'], key)\n    elif messages.RPCCommand.matches(message):\n        rpc_msg = messages.RPCCommand.verify(message)\n        client = rpc_msg['client']\n        address = rpc_msg['address']\n        rpc = rpc_msg['rpc_id']\n        payload = rpc_msg['payload']\n        key = rpc_msg['key']\n        timeout = rpc_msg['timeout']\n        self._loop.add_callback(self._send_rpc, client, uuid, address, rpc, payload, timeout, key)\n    elif messages.ScriptCommand.matches(message):\n        script_msg = messages.ScriptCommand.verify(message)\n        key = script_msg['key']\n        client = script_msg['client']\n        script = script_msg['script']\n        self._loop.add_callback(self._send_script, client, uuid, script, key, (script_msg['fragment_index'], script_msg['fragment_count']))\n    else:\n        self._logger.error('Unsupported message received (topic=%s) (message=%s)', topic, str(message))", "docstring": "Process a command action that we received on behalf of a device.\n\nArgs:\nsequence (int): The sequence number of the packet received\ntopic (string): The topic this message was received on\nmessage (dict): The message itself", "source": "codesearchnet"}
{"code": "def header_present(self, *names):\n    for name in names:\n        headers = {name: re.compile('(.*)')}\n        self.add_matcher(matcher('HeadersMatcher', headers))", "docstring": "Defines a new header matcher expectation that must be present in the\noutgoing request in order to be satisfied, no matter what value it\nhosts.\n\nHeader keys are case insensitive.\n\nArguments:\n*names (str): header or headers names to match.\n\nReturns:\nself: current Mock instance.\n\nExample::\n\n(pook.get('server.com/api')\n.header_present('content-type'))", "source": "codesearchnet"}
{"code": "def datastore(self, domain, data_type, mapping=None):\n    from .tcex_datastore import TcExDataStore\n    return TcExDataStore(self, domain, data_type, mapping)", "docstring": "Get instance of the DataStore module.\n\nArgs:\ndomain (str): The domain can be either \"system\", \"organization\", or \"local\". When using\n\"organization\" the data store can be accessed by any Application in the entire org,\nwhile \"local\" access is restricted to the App writing the data. The \"system\" option\nshould not be used in almost all cases.\ndata_type (str): The data type descriptor (e.g., tc:whois:cache).\n\nReturns:\nobject: An instance of the DataStore Class.", "source": "codesearchnet"}
{"code": "def get_browser_controller(browser=None):\n    browser = settings.browser(browser)\n    if (browser is not None):\n        if (browser == 'none'):\n            controller = DummyWebBrowser()\n        else:\n            controller = webbrowser.get(browser)\n    else:\n        controller = webbrowser\n    return controller", "docstring": "Return a browser controller.\n\nArgs:\nbrowser (str or None) : browser name, or ``None`` (default: ``None``)\nIf passed the string ``'none'``, a dummy web browser controller\nis returned\n\nOtherwise, use the value to select an appropriate controller using\nthe ``webbrowser`` standard library module. In the value is\n``None`` then a system default is used.\n\n.. note::\nIf the environment variable ``BOKEH_BROWSER`` is set, it will take\nprecedence.\n\nReturns:\ncontroller : a web browser controller", "source": "codesearchnet"}
{"code": "def from_any_pb(pb_type, any_pb):\n    msg = pb_type()\n    if callable(getattr(pb_type, 'pb', None)):\n        msg_pb = pb_type.pb(msg)\n    else:\n        msg_pb = msg\n    if (not any_pb.Unpack(msg_pb)):\n        raise TypeError('Could not convert {} to {}'.format(any_pb.__class__.__name__, pb_type.__name__))\n    return msg", "docstring": "Converts an ``Any`` protobuf to the specified message type.\n\nArgs:\npb_type (type): the type of the message that any_pb stores an instance\nof.\nany_pb (google.protobuf.any_pb2.Any): the object to be converted.\n\nReturns:\npb_type: An instance of the pb_type message.\n\nRaises:\nTypeError: if the message could not be converted.", "source": "codesearchnet"}
{"code": "def mirror(self):\n    if (not self._definition):\n        return self.copy()\n    reverse_inst = self.copy(name=(self.name + '_mirror'))\n    reverse_inst.definition = []\n    for (inst, qargs, cargs) in reversed(self._definition):\n        reverse_inst._definition.append((inst.mirror(), qargs, cargs))\n    return reverse_inst", "docstring": "For a composite instruction, reverse the order of sub-gates.\n\nThis is done by recursively mirroring all sub-instructions.\nIt does not invert any gate.\n\nReturns:\nInstruction: a fresh gate with sub-gates reversed", "source": "codesearchnet"}
{"code": "def _MaybeColocateWith(inputs):\n    if not inputs:\n        yield\n    else:\n        with ops.colocate_with(inputs[0]), _MaybeColocateWith(inputs[1:]):\n            yield", "docstring": "A context manager for (maybe) colocating with a list of input tensors.\n\nArgs:\ninputs: A list of `Tensor` or `Operation` objects.\n\nReturns:\nA context manager.", "source": "github-repos"}
{"code": "def example_serving_receiver_fn(tf_transform_output, schema):\n    raw_feature_spec = taxi.get_raw_feature_spec(schema)\n    raw_feature_spec.pop(taxi.LABEL_KEY)\n    raw_input_fn = tf_estimator.export.build_parsing_serving_input_receiver_fn(raw_feature_spec, default_batch_size=None)\n    serving_input_receiver = raw_input_fn()\n    transformed_features = tf_transform_output.transform_raw_features(serving_input_receiver.features)\n    return tf_estimator.export.ServingInputReceiver(transformed_features, serving_input_receiver.receiver_tensors)", "docstring": "Build the serving in inputs.\n\nArgs:\ntf_transform_output: A TFTransformOutput.\nschema: the schema of the input data.\n\nReturns:\nTensorflow graph which parses examples, applying tf-transform to them.", "source": "github-repos"}
{"code": "def subcomponents(self, subcomponents):\n        \n\n        for arg in self.args:\n            if arg.__class__.__name__ == \"Function\":\n                subcomponents.append(arg.to_string())\n                if arg.function_type == \"primary\":\n                    arg.subcomponents(subcomponents)\n            else:\n                subcomponents.append(arg.to_string())\n\n        return subcomponents", "docstring": "Generate subcomponents of the BEL subject or object\n\nThese subcomponents are used for matching parts of a BEL\nsubject or Object in the Edgestore.\n\nArgs:\nAST\nsubcomponents:  Pass an empty list to start a new subcomponents request\n\nReturns:\nList[str]: subcomponents of BEL subject or object", "source": "juraj-google-style"}
{"code": "def get_header(vcf_file_path):\n    logger.info('Parsing header of file {0}'.format(vcf_file_path))\n    head = HeaderParser()\n    handle = get_vcf_handle(infile=vcf_file_path)\n    for line in handle:\n        line = line.rstrip()\n        if line.startswith('\n            if line.startswith('\n                head.parse_meta_data(line)\n            else:\n                head.parse_header_line(line)\n        else:\n            break\n    handle.close()\n    return head", "docstring": "Parse the header and return a header object\n\nArgs:\nvcf_file_path(str): Path to vcf\n\nReturns:\nhead: A HeaderParser object", "source": "codesearchnet"}
{"code": "def sampler_to_iterator(dataset, sampler):\n    \n    for sample in sampler:\n        if isinstance(sample, (list, tuple)):\n            \n            yield [dataset[i] for i in sample]\n        else:\n            \n            yield dataset[sample]", "docstring": "Given a batch sampler or sampler returns examples instead of indices\n\nArgs:\ndataset (torch.utils.data.Dataset): Dataset to sample from.\nsampler (torch.utils.data.sampler.Sampler): Sampler over the dataset.\n\nReturns:\ngenerator over dataset examples", "source": "juraj-google-style"}
{"code": "def get_readonly_field_data(field_name, instance, view=None, fun_kwargs=None):\n    \n    fun_kwargs = fun_kwargs or {}\n\n    if view:\n        view_readonly_data = _get_view_readonly_data(field_name, view, fun_kwargs)\n        if view_readonly_data is not None:\n            return view_readonly_data\n\n    field_data = _get_model_readonly_data(field_name, instance, fun_kwargs)\n    if field_data is not None:\n        return field_data\n\n    raise FieldOrMethodDoesNotExist('Field or method with name {} not found'.format(field_name))", "docstring": "Returns field humanized value, label and widget which are used to display of instance or view readonly data.\nArgs:\nfield_name: name of the field which will be displayed\ninstance: model instance\nview: view instance\nfun_kwargs: kwargs that can be used inside method call\n\nReturns:\nfield humanized value, label and widget which are used to display readonly data", "source": "juraj-google-style"}
{"code": "def __init__(self, offset):\n        \n        super(TimeZoneOffset, self).__init__()\n        if isinstance(offset, datetime.timedelta):\n            offset = total_seconds(offset) / 60\n        self.__offset = offset", "docstring": "Initialize a time zone offset.\n\nArgs:\noffset: Integer or timedelta time zone offset, in minutes from UTC.\nThis can be negative.", "source": "juraj-google-style"}
{"code": "def get_submission_and_student(uuid, read_replica=False):\n    submission = get_submission(uuid, read_replica=read_replica)\n    cache_key = 'submissions.student_item.{}'.format(submission['student_item'])\n    try:\n        cached_student_item = cache.get(cache_key)\n    except Exception:\n        logger.exception('Error occurred while retrieving student item from the cache')\n        cached_student_item = None\n    if (cached_student_item is not None):\n        submission['student_item'] = cached_student_item\n    else:\n        try:\n            student_item_qs = StudentItem.objects\n            if read_replica:\n                student_item_qs = _use_read_replica(student_item_qs)\n            student_item = student_item_qs.get(id=submission['student_item'])\n            submission['student_item'] = StudentItemSerializer(student_item).data\n            cache.set(cache_key, submission['student_item'])\n        except Exception as ex:\n            err_msg = 'Could not get submission due to error: {}'.format(ex)\n            logger.exception(err_msg)\n            raise SubmissionInternalError(err_msg)\n    return submission", "docstring": "Retrieve a submission by its unique identifier, including the associated student item.\n\nArgs:\nuuid (str): the unique identifier of the submission.\n\nKwargs:\nread_replica (bool): If true, attempt to use the read replica database.\nIf no read replica is available, use the default database.\n\nReturns:\nSerialized Submission model (dict) containing a serialized StudentItem model\n\nRaises:\nSubmissionNotFoundError: Raised if the submission does not exist.\nSubmissionRequestError: Raised if the search parameter is not a string.\nSubmissionInternalError: Raised for unknown errors.", "source": "codesearchnet"}
{"code": "def energies(self, samples_like, dtype=np.float):\n        \n        samples, labels = as_samples(samples_like)\n\n        if all(v == idx for idx, v in enumerate(labels)):\n            ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(dtype=dtype)\n        else:\n            ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(variable_order=labels, dtype=dtype)\n\n        energies = samples.dot(ldata) + (samples[:, irow]*samples[:, icol]).dot(qdata) + offset\n        return np.asarray(energies, dtype=dtype)", "docstring": "Determine the energies of the given samples.\n\nArgs:\nsamples_like (samples_like):\nA collection of raw samples. `samples_like` is an extension of NumPy's array_like\nstructure. See :func:`.as_samples`.\n\ndtype (:class:`numpy.dtype`):\nThe data type of the returned energies.\n\nReturns:\n:obj:`numpy.ndarray`: The energies.", "source": "juraj-google-style"}
{"code": "def start_log_monitor(redis_address, logs_dir, stdout_file=None, stderr_file=None, redis_password=None):\n    log_monitor_filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'log_monitor.py')\n    command = [sys.executable, '-u', log_monitor_filepath, '--redis-address={}'.format(redis_address), '--logs-dir={}'.format(logs_dir)]\n    if redis_password:\n        command += ['--redis-password', redis_password]\n    process_info = start_ray_process(command, ray_constants.PROCESS_TYPE_LOG_MONITOR, stdout_file=stdout_file, stderr_file=stderr_file)\n    return process_info", "docstring": "Start a log monitor process.\n\nArgs:\nredis_address (str): The address of the Redis instance.\nlogs_dir (str): The directory of logging files.\nstdout_file: A file handle opened for writing to redirect stdout to. If\nno redirection should happen, then this should be None.\nstderr_file: A file handle opened for writing to redirect stderr to. If\nno redirection should happen, then this should be None.\nredis_password (str): The password of the redis server.\n\nReturns:\nProcessInfo for the process that was started.", "source": "codesearchnet"}
{"code": "def attention_mask_same_segment(\n    query_segment, memory_segment=None, dtype=tf.float32):\n  \n  memory_segment = rename_length_to_memory_length(\n      memory_segment or query_segment)\n  return mtf.cast(mtf.not_equal(query_segment, memory_segment), dtype) * -1e9", "docstring": "Bias for attention where attention between segments is disallowed.\n\nArgs:\nquery_segment: a mtf.Tensor with shape [..., length_dim]\nmemory_segment: a mtf.Tensor with shape [..., memory_length_dim]\ndtype: a tf.dtype\n\nReturns:\na mtf.Tensor with shape [..., length_dim, memory_length_dim]", "source": "juraj-google-style"}
{"code": "def get_aws_unique_id(timeout=DEFAULT_AWS_TIMEOUT):\n    try:\n        resp = requests.get(AWS_ID_URL, timeout=timeout).json()\n    except requests.exceptions.ConnectTimeout:\n        _logger.warning('Connection timeout when determining AWS unique ID. Not using AWS unique ID.')\n        return None\n    else:\n        aws_id = '{0}_{1}_{2}'.format(resp['instanceId'], resp['region'], resp['accountId'])\n        _logger.debug('Using AWS unique ID %s.', aws_id)\n        return aws_id", "docstring": "Determine the current AWS unique ID\n\nArgs:\ntimeout (int): How long to wait for a response from AWS metadata IP", "source": "codesearchnet"}
{"code": "def set_client_cmd(self, *args):\n        \n        self.client_cmd.update(args)\n        self.output['client_cmd'] = list(self.client_cmd)", "docstring": "Adds given cmd(s) to ``self.output['client_cmd']``\n\nArgs:\n*args: Client commands.", "source": "juraj-google-style"}
{"code": "def _get_tensorrt_rewriter_config(conversion_params, is_dynamic_op=None, max_batch_size=None, is_v2=False, disable_non_trt_optimizers=False, use_implicit_batch=True, profile_strategy=PROFILE_STRATEGY_RANGE):\n    _check_conversion_params(conversion_params, is_v2=is_v2)\n    if is_v2 and is_dynamic_op is not None and (not is_dynamic_op):\n        raise ValueError('is_dynamic_op is either None or True for TF2')\n    if not is_v2 and is_dynamic_op is None:\n        raise ValueError(\"is_dynamic_op can't be None for TF1\")\n    if (is_dynamic_op is None or is_dynamic_op) and max_batch_size is not None:\n        raise ValueError('max_batch_size has to be None for TF2 or when is_dynamic_op == True in TF1')\n    if is_dynamic_op is not None and (not is_dynamic_op) and (not isinstance(max_batch_size, int)):\n        raise ValueError('max_batch_size has to be an integer for is_dynamic_op==False in TF1')\n    rewriter_config_with_trt = rewriter_config_pb2.RewriterConfig()\n    rewriter_config_with_trt.remapping = False\n    rewriter_config_with_trt.experimental_disable_folding_quantization_emulation = trt_utils.is_linked_tensorrt_version_greater_equal(8, 0, 0) or trt_utils.is_loaded_tensorrt_version_greater_equal(8, 0, 0)\n    if not disable_non_trt_optimizers:\n        rewriter_config_with_trt.optimizers.extend(['pruning', 'debug_stripper', 'layout', 'dependency', 'constfold', 'common_subgraph_elimination'])\n    rewriter_config_with_trt.meta_optimizer_iterations = rewriter_config_pb2.RewriterConfig.ONE\n    optimizer = rewriter_config_with_trt.custom_optimizers.add()\n    if not disable_non_trt_optimizers:\n        rewriter_config_with_trt.custom_optimizers.add().name = 'constfold'\n    optimizer.name = 'TensorRTOptimizer'\n    optimizer.parameter_map['minimum_segment_size'].i = conversion_params.minimum_segment_size\n    optimizer.parameter_map['max_workspace_size_bytes'].i = conversion_params.max_workspace_size_bytes\n    optimizer.parameter_map['precision_mode'].s = _to_bytes(conversion_params.precision_mode)\n    optimizer.parameter_map['maximum_cached_engines'].i = conversion_params.maximum_cached_engines\n    optimizer.parameter_map['use_calibration'].b = conversion_params.use_calibration\n    optimizer.parameter_map['is_dynamic_op'].b = is_dynamic_op\n    optimizer.parameter_map['allow_build_at_runtime'].b = conversion_params.allow_build_at_runtime\n    if max_batch_size is not None:\n        optimizer.parameter_map['max_batch_size'].i = max_batch_size\n    optimizer.parameter_map['use_implicit_batch'].b = use_implicit_batch\n    if not use_implicit_batch:\n        optimizer.parameter_map['profile_strategy'].s = _to_bytes(profile_strategy.lower())\n    if disable_non_trt_optimizers:\n        trt_utils.disable_non_trt_optimizers_in_rewriter_config(rewriter_config_with_trt)\n    return rewriter_config_with_trt", "docstring": "Returns a RewriterConfig proto for TRT transformation.\n\nArgs:\nconversion_params: a TrtConversionParams instance.\nis_dynamic_op: whether to use dynamic engines.\nmax_batch_size: maximum batch size for static engines.\nis_v2: whether we're getting a RewriterConfig for TF 2.0.\ndisable_non_trt_optimizers: Turn off all default Grappler optimizers.\nuse_implicit_batch: Whether to use implicit batch or explicit batch.\nprofile_strategy: dynamic shape optimization profile strategy.\n\nReturns:\nA RewriterConfig proto which sets a TensorRTOptimizer to run Grappler.\n\nRaises:\nTypeError: if any of the parameters are of unexpected type.\nValueError: if any of the parameters are of unexpected value.", "source": "github-repos"}
{"code": "def create_asset_delivery_policy(access_token, ams_account, key_delivery_url):\n    \n    path = '/AssetDeliveryPolicies'\n    endpoint = ''.join([ams_rest_endpoint, path])\n    body = '{ \\\n\t\t\"Name\":\"AssetDeliveryPolicy\", \\\n\t\t\"AssetDeliveryProtocol\":\"4\", \\\n\t\t\"AssetDeliveryPolicyType\":\"3\", \\\n\t\t\"AssetDeliveryConfiguration\":\"[{ \\\n\t\t\t\\\\\"Key\\\\\":\\\\\"2\\\\\", \\\n\t\t\t\\\\\"Value\\\\\":\\\\\"' + key_delivery_url + '\\\\\"}]\" \\\n\t}'\n    return do_ams_post(endpoint, path, body, access_token)", "docstring": "Create Media Service Asset Delivery Policy.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nams_account (str): Media Service Account.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def __new__(cls, input_array, vscale=None, check_rank=None):\n        \n        obj = np.asarray(input_array).view(cls)\n        obj.rank = len(obj.shape)\n\n        if check_rank and check_rank != obj.rank:\n            raise ValueError(\"{} input must be rank {}\".format(\n                obj.__class__.__name__, check_rank))\n\n        vshape = tuple([3] * (obj.rank % 2) + [6] * (obj.rank \n        obj._vscale = np.ones(vshape)\n        if vscale is not None:\n            obj._vscale = vscale\n        if obj._vscale.shape != vshape:\n            raise ValueError(\"Voigt scaling matrix must be the shape of the \"\n                             \"voigt notation matrix or vector.\")\n        if not all([i == 3 for i in obj.shape]):\n            raise ValueError(\"Pymatgen only supports 3-dimensional tensors, \"\n                             \"and default tensor constructor uses standard \"\n                             \"notation.  To construct from voigt notation, use\"\n                             \" {}.from_voigt\".format(obj.__class__.__name__))\n        return obj", "docstring": "Create a Tensor object.  Note that the constructor uses __new__\nrather than __init__ according to the standard method of\nsubclassing numpy ndarrays.\n\nArgs:\ninput_array: (array-like with shape 3^N): array-like representing\na tensor quantity in standard (i. e. non-voigt) notation\nvscale: (N x M array-like): a matrix corresponding\nto the coefficients of the voigt-notation tensor", "source": "juraj-google-style"}
{"code": "def delete_if_exists(self, **kwargs):\n        \n        try:\n            self.get(**kwargs).blocking_delete()\n            return True\n        except ObjectDoesNotExist:\n            return False", "docstring": "Deletes an object if it exists in database according to given query\nparameters and returns True otherwise does nothing and returns False.\n\nArgs:\n**kwargs: query parameters\n\nReturns(bool): True or False", "source": "juraj-google-style"}
{"code": "def encode(request, data):\n    if (data is None):\n        return request\n    request.add_header('Content-Type', 'application/json')\n    request.data = json.dumps(data)\n    return request", "docstring": "Add request content data to request body, set Content-type header.\n\nShould be overridden by subclasses if not using JSON encoding.\n\nArgs:\nrequest (HTTPRequest): The request object.\ndata (dict, None): Data to be encoded.\n\nReturns:\nHTTPRequest: The request object.", "source": "codesearchnet"}
{"code": "def log_prob(self, value, name='log_prob'):\n    return self._call_log_prob(value, name)", "docstring": "Log probability density/mass function.\n\nArgs:\nvalue: `float` or `double` `Tensor`.\nname: Python `str` prepended to names of ops created by this function.\n\nReturns:\nlog_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with\nvalues of type `self.dtype`.", "source": "github-repos"}
{"code": "def consume(self, data):\n        \n        if not self._started:\n            self.fire(JSONStreamer.DOC_START_EVENT)\n            self._started = True\n        self._file_like.write(data)\n        try:\n            self._parser.parse(self._file_like)\n        except YajlError as ye:\n            raise JSONStreamerException(ye.value)", "docstring": "Takes input that must be parsed\n\nNote:\nAttach all your listeners before calling this method\n\nArgs:\ndata (str): input json string", "source": "juraj-google-style"}
{"code": "def get_overlaps(self, offset, length):\n    \n    \n    if ''.join([chunk.word for chunk in self])[offset] == ' ':\n      offset += 1\n    index = 0\n    result = ChunkList()\n    for chunk in self:\n      if offset < index + len(chunk.word) and index < offset + length:\n        result.append(chunk)\n      index += len(chunk.word)\n    return result", "docstring": "Returns chunks overlapped with the given range.\n\nArgs:\noffset (int): Begin offset of the range.\nlength (int): Length of the range.\n\nReturns:\nOverlapped chunks. (:obj:`budou.chunk.ChunkList`)", "source": "juraj-google-style"}
{"code": "def Parse(text):\n    precondition.AssertType(text, Text)\n    if compatibility.PY2:\n        text = text.encode('utf-8')\n    return yaml.safe_load(text)", "docstring": "Parses a YAML source into a Python object.\n\nArgs:\ntext: A YAML source to parse.\n\nReturns:\nA Python data structure corresponding to the YAML source.", "source": "codesearchnet"}
{"code": "def insert_meta_fields_into_existing_schema(graphql_schema):\n    root_type_name = graphql_schema.get_query_type().name\n    for (type_name, type_obj) in six.iteritems(graphql_schema.get_type_map()):\n        if (type_name.startswith('__') or (type_name == root_type_name)):\n            continue\n        if (not isinstance(type_obj, (GraphQLObjectType, GraphQLInterfaceType))):\n            continue\n        for (meta_field_name, meta_field) in six.iteritems(EXTENDED_META_FIELD_DEFINITIONS):\n            if (meta_field_name in type_obj.fields):\n                raise AssertionError(u'Unexpectedly encountered an existing field named {} while attempting to add a meta-field of the same name. Make sure you are not attempting to add meta-fields twice.'.format(meta_field_name))\n            type_obj.fields[meta_field_name] = meta_field", "docstring": "Add compiler-specific meta-fields into all interfaces and types of the specified schema.\n\nIt is preferable to use the EXTENDED_META_FIELD_DEFINITIONS constant above to directly inject\nthe meta-fields during the initial process of building the schema, as that approach\nis more robust. This function does its best to not mutate unexpected definitions, but\nmay break unexpectedly as the GraphQL standard is extended and the underlying\nGraphQL library is updated.\n\nUse this function at your own risk. Don't say you haven't been warned.\n\nProperties added include:\n- \"_x_count\", which allows filtering folds based on the number of elements they capture.\n\nArgs:\ngraphql_schema: GraphQLSchema object describing the schema that is going to be used with\nthe compiler. N.B.: MUTATED IN-PLACE in this method.", "source": "codesearchnet"}
{"code": "def download_folder(bucket_name, prefix, target, sagemaker_session):\n    \n    boto_session = sagemaker_session.boto_session\n\n    s3 = boto_session.resource('s3')\n    bucket = s3.Bucket(bucket_name)\n\n    prefix = prefix.lstrip('/')\n\n    \n    \n    objects = list(bucket.objects.filter(Prefix=prefix))\n\n    if len(objects) > 0 and objects[0].key == prefix and prefix[-1] != '/':\n        s3.Object(bucket_name, prefix).download_file(os.path.join(target, os.path.basename(prefix)))\n        return\n\n    \n    for obj_sum in bucket.objects.filter(Prefix=prefix):\n        \n        if obj_sum.key != '' and obj_sum.key[-1] == '/':\n            continue\n        obj = s3.Object(obj_sum.bucket_name, obj_sum.key)\n        s3_relative_path = obj_sum.key[len(prefix):].lstrip('/')\n        file_path = os.path.join(target, s3_relative_path)\n\n        try:\n            os.makedirs(os.path.dirname(file_path))\n        except OSError as exc:\n            \n            \n            if exc.errno != errno.EEXIST:\n                raise\n        obj.download_file(file_path)", "docstring": "Download a folder from S3 to a local path\n\nArgs:\nbucket_name (str): S3 bucket name\nprefix (str): S3 prefix within the bucket that will be downloaded. Can be a single file.\ntarget (str): destination path where the downloaded items will be placed\nsagemaker_session (:class:`sagemaker.session.Session`): a sagemaker session to interact with S3.", "source": "juraj-google-style"}
{"code": "def session_new(self, **kwargs):\n    path = self._get_path('session_new')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Generate a session id for user based authentication.\n\nA session id is required in order to use any of the write methods.\n\nArgs:\nrequest_token: The token you generated for the user to approve.\nThe token needs to be approved before being\nused here.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def convert_to_python_type(typ):\n    if isinstance(typ, typehints.TypeVariable):\n        if id(typ) not in _type_var_cache:\n            new_type_variable = typing.TypeVar(typ.name)\n            _type_var_cache[id(typ)] = new_type_variable\n            _type_var_cache[id(new_type_variable)] = typ\n        return _type_var_cache[id(typ)]\n    elif not getattr(typ, '__module__', None).endswith('typehints'):\n        return typ\n    if isinstance(typ, typehints.AnyTypeConstraint):\n        return typing.Any\n    if isinstance(typ, typehints.DictConstraint):\n        return dict[convert_to_python_type(typ.key_type), convert_to_python_type(typ.value_type)]\n    if isinstance(typ, typehints.ListConstraint):\n        return list[convert_to_python_type(typ.inner_type)]\n    if isinstance(typ, typehints.IterableTypeConstraint):\n        return collections.abc.Iterable[convert_to_python_type(typ.inner_type)]\n    if isinstance(typ, typehints.UnionConstraint):\n        if not typ.union_types:\n            return typing.Any\n        return typing.Union[tuple(convert_to_python_types(typ.union_types))]\n    if isinstance(typ, typehints.SetTypeConstraint):\n        return set[convert_to_python_type(typ.inner_type)]\n    if isinstance(typ, typehints.FrozenSetTypeConstraint):\n        return frozenset[convert_to_python_type(typ.inner_type)]\n    if isinstance(typ, typehints.TupleConstraint):\n        return tuple[tuple(convert_to_python_types(typ.tuple_types))]\n    if isinstance(typ, typehints.TupleSequenceConstraint):\n        return tuple[convert_to_python_type(typ.inner_type), ...]\n    if isinstance(typ, typehints.ABCSequenceTypeConstraint):\n        return collections.abc.Sequence[convert_to_python_type(typ.inner_type)]\n    if isinstance(typ, typehints.IteratorTypeConstraint):\n        return collections.abc.Iterator[convert_to_python_type(typ.yielded_type)]\n    if isinstance(typ, typehints.MappingTypeConstraint):\n        return collections.abc.Mapping[convert_to_python_type(typ.key_type), convert_to_python_type(typ.value_type)]\n    raise ValueError('Failed to convert Beam type: %s' % typ)", "docstring": "Converts a given Beam type to a python type.\n\nThis is the reverse of convert_to_beam_type.\n\nArgs:\ntyp: If a typehints.TypeConstraint, the type to convert. Otherwise, typ\nwill be unchanged.\n\nReturns:\nConverted version of typ, or unchanged.\n\nRaises:\nValueError: The type was malformed or could not be converted.", "source": "github-repos"}
{"code": "def conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1)):\n    if data_format is None:\n        data_format = image_data_format()\n    if data_format not in {'channels_first', 'channels_last'}:\n        raise ValueError('Unknown data_format: ' + str(data_format))\n    x, tf_data_format = _preprocess_conv3d_input(x, data_format)\n    padding = _preprocess_padding(padding)\n    x = nn.convolution(input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format)\n    if data_format == 'channels_first' and tf_data_format == 'NDHWC':\n        x = array_ops.transpose(x, (0, 4, 1, 2, 3))\n    return x", "docstring": "3D convolution.\n\nArgs:\nx: Tensor or variable.\nkernel: kernel tensor.\nstrides: strides tuple.\npadding: string, `\"same\"` or `\"valid\"`.\ndata_format: string, `\"channels_last\"` or `\"channels_first\"`.\ndilation_rate: tuple of 3 integers.\n\nReturns:\nA tensor, result of 3D convolution.\n\nRaises:\nValueError: if `data_format` is neither `channels_last` or\n`channels_first`.", "source": "github-repos"}
{"code": "def watch_statuses(self, observer, batch_ids):\n        \n        with self._lock:\n            statuses = self.get_statuses(batch_ids)\n            if self._has_no_pendings(statuses):\n                observer.notify_batches_finished(statuses)\n            else:\n                self._observers[observer] = statuses", "docstring": "Allows a component to register to be notified when a set of\nbatches is no longer PENDING. Expects to be able to call the\n\"notify_batches_finished\" method on the registered component, sending\nthe statuses of the batches.\n\nArgs:\nobserver (object): Must implement \"notify_batches_finished\" method\nbatch_ids (list of str): The ids of the batches to watch", "source": "juraj-google-style"}
{"code": "def _get_bucket(self, client_kwargs):\n    return _oss.Bucket(self.client, endpoint=self._endpoint, bucket_name=client_kwargs['bucket_name'])", "docstring": "Get bucket object.\n\nReturns:\noss2.Bucket", "source": "codesearchnet"}
{"code": "def _add(self, frame, strict):\n    if (not isinstance(frame, Frame)):\n        raise TypeError(('%r not a Frame instance' % frame))\n    orig_frame = frame\n    frame = frame._upgrade_frame()\n    if (frame is None):\n        if (not strict):\n            return\n        raise TypeError((\"Can't upgrade %r frame\" % type(orig_frame).__name__))\n    hash_key = frame.HashKey\n    if (strict or (hash_key not in self)):\n        self[hash_key] = frame\n        return\n    while True:\n        old_frame = self[hash_key]\n        new_frame = old_frame._merge_frame(frame)\n        new_hash = new_frame.HashKey\n        if (new_hash == hash_key):\n            self[hash_key] = new_frame\n            break\n        else:\n            assert (new_frame is frame)\n            if (new_hash not in self):\n                self[new_hash] = new_frame\n                break\n            hash_key = new_hash", "docstring": "Add a frame.\n\nArgs:\nframe (Frame): the frame to add\nstrict (bool): if this should raise in case it can't be added\nand frames shouldn't be merged.", "source": "codesearchnet"}
{"code": "def create_indexes(names, settings=None):\n    for name in names:\n        index = Index(name)\n        try:\n            if (not index.exists()):\n                logger.debug('Creating Elasticsearch index: {0}'.format(name))\n                if (settings is None):\n                    index.settings(number_of_shards=1, number_of_replicas=1)\n                else:\n                    index.settings(**settings)\n                index.create()\n        except Exception as e:\n            raise ElasticsearchError('Elasticsearch error: {0}'.format(e.__str__()))", "docstring": "Create Elasticsearch indexes\n\nArgs:\nnames (list): A list of index names\nsettings (dict): Index settings", "source": "codesearchnet"}
{"code": "def generate_row(fake: Faker, config: Config) -> Row:\n    row: Row = {}\n    for column in config:\n        row[column.bq_name] = column.value(fake)\n    return row", "docstring": "Generates a Row of Faker data, conforming to the config.\n\nArgs:\n* fake: Faker instance\n* config: List of Columns\n\nReturns:\n* Row of Faker data", "source": "github-repos"}
{"code": "def transform(self, df):\n        \n        for name, function in self.outputs:\n            df[name] = function(df)", "docstring": "Transforms a DataFrame in place. Computes all outputs of the DataFrame.\n\nArgs:\ndf (pandas.DataFrame): DataFrame to transform.", "source": "juraj-google-style"}
{"code": "def _get_block_sizes(resnet_size):\n  \n  choices = {\n      18: [2, 2, 2, 2],\n      34: [3, 4, 6, 3],\n      50: [3, 4, 6, 3],\n      101: [3, 4, 23, 3],\n      152: [3, 8, 36, 3],\n      200: [3, 24, 36, 3]\n  }\n\n  try:\n    return choices[resnet_size]\n  except KeyError:\n    err = ('Could not find layers for selected Resnet size.\\n'\n           'Size received: {}; sizes allowed: {}.'.format(\n               resnet_size, choices.keys()))\n    raise ValueError(err)", "docstring": "Retrieve the size of each block_layer in the ResNet model.\n\nThe number of block layers used for the Resnet model varies according\nto the size of the model. This helper grabs the layer set we want, throwing\nan error if a non-standard size has been selected.\n\nArgs:\nresnet_size: The number of convolutional layers needed in the model.\n\nReturns:\nA list of block sizes to use in building the model.\n\nRaises:\nKeyError: if invalid resnet_size is received.", "source": "juraj-google-style"}
{"code": "def ParseNetworkConnectivityUsage(self, parser_mediator, cache=None, database=None, table=None, **unused_kwargs):\n    self._ParseGUIDTable(parser_mediator, cache, database, table, self._NETWORK_CONNECTIVITY_USAGE_VALUES_MAP, SRUMNetworkConnectivityUsageEventData)", "docstring": "Parses the network connectivity usage monitor table.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ncache (Optional[ESEDBCache]): cache, which contains information about\nthe identifiers stored in the SruDbIdMapTable table.\ndatabase (Optional[pyesedb.file]): ESE database.\ntable (Optional[pyesedb.table]): table.", "source": "codesearchnet"}
{"code": "def event_type(self, event, cameo_code) -> List[str]:\n        \n        key = self.event_name[event]\n        entry = self.mapping.get(cameo_code)\n        result = None\n        if entry:\n            result = entry[key]\n            if result is None or result == \"\":\n                return None\n            elif not isinstance(result, list):\n                result = [result]\n        return result", "docstring": "Look up the event tupe of an event\nArgs:\nevent: one of \"event1\", \"event2\" or \"event3\"\ncameo_code: one of the cameo codes\n\nReturns: a list of the event types or None if the event is not relevant.", "source": "juraj-google-style"}
{"code": "def has_axon(neuron, treefun=_read_neurite_type):\n    return CheckResult((NeuriteType.axon in (treefun(n) for n in neuron.neurites)))", "docstring": "Check if a neuron has an axon\n\nArguments:\nneuron(Neuron): The neuron object to test\ntreefun: Optional function to calculate the tree type of\nneuron's neurites\n\nReturns:\nCheckResult with result", "source": "codesearchnet"}
{"code": "def report_get(config, auth, report_id=None, name=None):\n    if name:\n        for query in API_DBM(config, auth, iterate=True).queries().list().execute():\n            if query['metadata']['title'] == name:\n                return query\n    else:\n        return API_DBM(config, auth).queries().get(queryId=report_id).execute()", "docstring": "Returns the DBM JSON definition of a report based on name or ID.\n\nArgs:\n* auth: (string) Either user or service.\n* report_id: (int) ID of DCm report to fetch ( either or name ).\n* name: (string) Name of report to fetch ( either or report_id ).\n\nReturns:\n* JSON definition of report.", "source": "github-repos"}
{"code": "def convert_x_www_form_urlencoded_to_dict(post_data):\n    \n    if isinstance(post_data, str):\n        converted_dict = {}\n        for k_v in post_data.split(\"&\"):\n            try:\n                key, value = k_v.split(\"=\")\n            except ValueError:\n                raise Exception(\n                    \"Invalid x_www_form_urlencoded data format: {}\".format(post_data)\n                )\n            converted_dict[key] = unquote(value)\n        return converted_dict\n    else:\n        return post_data", "docstring": "convert x_www_form_urlencoded data to dict\n\nArgs:\npost_data (str): a=1&b=2\n\nReturns:\ndict: {\"a\":1, \"b\":2}", "source": "juraj-google-style"}
{"code": "def loss_contrastive(self, contrastive_queries_logits: Tensor, text_queries: Tensor):\n    image_queries = contrastive_queries_logits.float()\n    image_queries = nn.functional.normalize(image_queries.flatten(1), dim=-1)\n    text_queries = nn.functional.normalize(text_queries.flatten(1), dim=-1)\n    logit_scale = torch.clamp(self.logit_scale.exp(), max=100)\n    logits_per_text = torch.matmul(text_queries, image_queries.t()) * logit_scale\n    logits_per_img = logits_per_text.t()\n    loss_img = nn.functional.cross_entropy(logits_per_img, torch.arange(len(logits_per_img), device=logits_per_text.device))\n    loss_text = nn.functional.cross_entropy(logits_per_text, torch.arange(len(logits_per_text), device=logits_per_text.device))\n    loss_contrastive = loss_img + loss_text\n    losses = {'loss_contrastive': loss_contrastive}\n    return losses", "docstring": "Compute the query-text contrastive loss.\n\nArgs:\ncontrastive_queries_logits (`torch.Tensor`):\nA tensor of shape `batch_size, num_queries, hidden_dim`\ntext_queries (`torch.Tensor`):\nA tensor of shape `batch_size, num_queries, hidden_dim`\nReturns:\n`Dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:\n- **loss_contrastive** -- The query-text contrastive loss computed using task-guided queries\nand text queries derived from input text list.", "source": "github-repos"}
{"code": "def read_value(self, varname, path='/', cmode=None, default=NO_DEFAULT):\n    try:\n        var = self.read_variable(varname, path=path)\n    except self.Error:\n        if (default is NO_DEFAULT):\n            raise\n        return default\n    if (cmode is None):\n        try:\n            return (var.getValue()[0] if (not var.shape) else var[:])\n        except IndexError:\n            return (var.getValue() if (not var.shape) else var[:])\n    else:\n        assert (var.shape[(- 1)] == 2)\n        if (cmode == 'c'):\n            return (var[(..., 0)] + (1j * var[(..., 1)]))\n        else:\n            raise ValueError(('Wrong value for cmode %s' % cmode))", "docstring": "Returns the values of variable with name varname in the group specified by path.\n\nArgs:\nvarname: Name of the variable\npath: path to the group.\ncmode: if cmode==\"c\", a complex ndarrays is constructed and returned\n(netcdf does not provide native support from complex datatype).\ndefault: returns default if varname is not present.\nself.Error is raised if default is default is NO_DEFAULT\n\nReturns:\nnumpy array if varname represents an array, scalar otherwise.", "source": "codesearchnet"}
{"code": "def GetFileEntryByPathSpec(self, path_spec):\n    \n    return data_range_file_entry.DataRangeFileEntry(\n        self._resolver_context, self, path_spec, is_root=True, is_virtual=True)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\nDataRangeFileEntry: a file entry or None if not available.", "source": "juraj-google-style"}
{"code": "def isfinite(x):\n    if any_symbolic_tensors((x,)):\n        return Isfinite().symbolic_call(x)\n    return backend.numpy.isfinite(x)", "docstring": "Return whether a tensor is finite, element-wise.\n\nReal values are finite when they are not NaN, not positive infinity, and\nnot negative infinity. Complex values are finite when both their real\nand imaginary parts are finite.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput boolean tensor.", "source": "github-repos"}
{"code": "def create_string_array(self, key, value):\n    data = None\n    if ((key is not None) and (value is not None)):\n        if isinstance(value, list):\n            data = self.db.create(key.strip(), json.dumps(value))\n        else:\n            data = self.db.create(key.strip(), value)\n    else:\n        self.tcex.log.warning(u'The key or value field was None.')\n    return data", "docstring": "Create method of CRUD operation for string array data.\n\nArgs:\nkey (string): The variable to write to the DB.\nvalue (any): The data to write to the DB.\n\nReturns:\n(string): Result of DB write.", "source": "codesearchnet"}
{"code": "def assertProtoEqual(self, a, b, check_initialized=True, normalize_numbers=False, msg=None, relative_tolerance=None):\n    pool = descriptor_pool.Default()\n    if isinstance(a, str):\n        a = text_format.Parse(a, b.__class__(), descriptor_pool=pool)\n    for pb in (a, b):\n        if check_initialized:\n            errors = pb.FindInitializationErrors()\n            if errors:\n                self.fail('Initialization errors: %s\\n%s' % (errors, pb))\n        if normalize_numbers:\n            NormalizeNumberFields(pb)\n    if relative_tolerance is not None:\n        checkFloatEqAndReplace(self, expected=b, actual=a, relative_tolerance=relative_tolerance)\n    a_str = text_format.MessageToString(a, descriptor_pool=pool)\n    b_str = text_format.MessageToString(b, descriptor_pool=pool)\n    if len(a_str) < 2 ** 16 and len(b_str) < 2 ** 16:\n        self.assertMultiLineEqual(a_str, b_str, msg=msg)\n    else:\n        diff = ''.join(difflib.unified_diff(a_str.splitlines(True), b_str.splitlines(True)))\n        if diff:\n            self.fail('%s :\\n%s' % (msg, diff))", "docstring": "Fails with a useful error if a and b aren't equal.\n\nComparison of repeated fields matches the semantics of\nunittest.TestCase.assertEqual(), ie order and extra duplicates fields matter.\n\nArgs:\nself: googletest.TestCase\na: proto2 PB instance, or text string representing one.\nb: proto2 PB instance -- message.Message or subclass thereof.\ncheck_initialized: boolean, whether to fail if either a or b isn't\ninitialized.\nnormalize_numbers: boolean, whether to normalize types and precision of\nnumbers before comparison.\nmsg: if specified, is used as the error message on failure.\nrelative_tolerance: float, relative tolerance. If this is not provided, then\nall floats are compared using string comparison otherwise, floating point\ncomparisons are done using the relative tolerance provided.", "source": "github-repos"}
{"code": "def iter_acgt_geno_marker(self, markers):\n        \n        \n        for snp, geno, s in self.iter_geno_marker(markers, return_index=True):\n            \n            yield snp, self._allele_encoding[s][geno]", "docstring": "Iterates over genotypes for a list of markers (ACGT format).\n\nArgs:\nmarkers (list): The list of markers to iterate onto.\n\nReturns:\ntuple: The name of the marker as a string, and its genotypes as a\n:py:class:`numpy.ndarray` (ACGT format).", "source": "juraj-google-style"}
{"code": "def value_report(self, address, zipcode, report_type='full', format_type='json'):\n    query_params = {'report_type': report_type, 'format': format_type, 'address': address, 'zipcode': zipcode}\n    return self._api_client.fetch_synchronous('property/value_report', query_params)", "docstring": "Call the value_report component\n\nValue Report only supports a single address.\n\nArgs:\n- address\n- zipcode\n\nKwargs:\n- report_type - \"full\" or \"summary\". Default is \"full\".\n- format_type - \"json\", \"pdf\", \"xlsx\" or \"all\". Default is \"json\".", "source": "codesearchnet"}
{"code": "def fft_mesh(self, kpoint, band, spin=0, shift=True):\n    mesh = np.zeros(tuple(self.ng), dtype=np.complex)\n    tcoeffs = (self.coeffs[spin][kpoint][band] if (self.spin == 2) else self.coeffs[kpoint][band])\n    for (gp, coeff) in zip(self.Gpoints[kpoint], tcoeffs):\n        t = tuple((gp.astype(np.int) + (self.ng / 2).astype(np.int)))\n        mesh[t] = coeff\n    if shift:\n        return np.fft.ifftshift(mesh)\n    else:\n        return mesh", "docstring": "Places the coefficients of a wavefunction onto an fft mesh.\n\nOnce the mesh has been obtained, a discrete fourier transform can be\nused to obtain real-space evaluation of the wavefunction. The output\nof this function can be passed directly to numpy's fft function. For\nexample:\n\nmesh = Wavecar('WAVECAR').fft_mesh(kpoint, band)\nevals = np.fft.ifftn(mesh)\n\nArgs:\nkpoint (int): the index of the kpoint where the wavefunction\nwill be evaluated\nband (int): the index of the band where the wavefunction will be\nevaluated\nspin (int):  the spin of the wavefunction for the desired\nwavefunction (only for ISPIN = 2, default = 0)\nshift (bool): determines if the zero frequency coefficient is\nplaced at index (0, 0, 0) or centered\nReturns:\na numpy ndarray representing the 3D mesh of coefficients", "source": "codesearchnet"}
{"code": "def embed(self, url, format='json', **opt):\n    if (format not in ['json', 'xml']):\n        raise OEmbedInvalidRequest('Format must be json or xml')\n    opt['format'] = format\n    return self._request(url, **opt)", "docstring": "Get an OEmbedResponse from one of the providers configured in this\nconsumer according to the resource url.\n\nArgs:\nurl: The url of the resource to get.\nformat: Desired response format.\n**opt: Optional parameters to pass in the url to the provider.\n\nReturns:\nOEmbedResponse object.", "source": "codesearchnet"}
{"code": "def _streaming_request_iterable(self, config, requests):\n    (yield self.types.StreamingRecognizeRequest(streaming_config=config))\n    for request in requests:\n        (yield request)", "docstring": "A generator that yields the config followed by the requests.\n\nArgs:\nconfig (~.speech_v1.types.StreamingRecognitionConfig): The\nconfiguration to use for the stream.\nrequests (Iterable[~.speech_v1.types.StreamingRecognizeRequest]):\nThe input objects.\n\nReturns:\nIterable[~.speech_v1.types.StreamingRecognizeRequest]): The\ncorrectly formatted input for\n:meth:`~.speech_v1.SpeechClient.streaming_recognize`.", "source": "codesearchnet"}
{"code": "def __init__(self, code):\n        \n        message = code\n\n        self.code = None\n\n        if util.is_integer(code):\n            message = self.to_string(code)\n            self.code = code\n\n        super(JLinkException, self).__init__(message)\n        self.message = message", "docstring": "Generates an exception by coercing the given ``code`` to an error\nstring if is a number, otherwise assumes it is the message.\n\nArgs:\nself (JLinkException): the 'JLinkException' instance\ncode (object): message or error code\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def path_new_using_function(\n    w: int,\n    h: int,\n    func: Callable[[int, int, int, int, Any], float],\n    userData: Any = 0,\n    dcost: float = 1.41,\n) -> tcod.path.AStar:\n    \n    return tcod.path.AStar(\n        tcod.path._EdgeCostFunc((func, userData), (w, h)), dcost\n    )", "docstring": "Return a new AStar using the given callable function.\n\nArgs:\nw (int): Clipping width.\nh (int): Clipping height.\nfunc (Callable[[int, int, int, int, Any], float]):\nuserData (Any):\ndcost (float): A multiplier for the cost of diagonal movement.\nCan be set to 0 to disable diagonal movement.\nReturns:\nAStar: A new AStar instance.", "source": "juraj-google-style"}
{"code": "def peek_native(make):\n    \n    def peek(service, container, _stack=None):\n        return make(service.peekNative(container))\n    return peek", "docstring": "Deserializer factory for types which state can be natively serialized.\n\nArguments:\n\nmake (callable): type constructor.\n\nReturns:\n\ncallable: deserializer (`peek` routine)", "source": "juraj-google-style"}
{"code": "def read_records(self, file_name, offset_range_tracker):\n    raise NotImplementedError", "docstring": "Returns a generator of records created by reading file 'file_name'.\n\nArgs:\nfile_name: a ``string`` that gives the name of the file to be read. Method\n``FileBasedSource.open_file()`` must be used to open the file\nand create a seekable file object.\noffset_range_tracker: a object of type ``OffsetRangeTracker``. This\ndefines the byte range of the file that should be\nread. See documentation in\n``iobase.BoundedSource.read()`` for more information\non reading records while complying to the range\ndefined by a given ``RangeTracker``.\n\nReturns:\nan iterator that gives the records read from the given file.", "source": "github-repos"}
{"code": "def get_config_parameter_loglevel(config: ConfigParser,\n                                  section: str,\n                                  param: str,\n                                  default: int) -> int:\n    \n    try:\n        value = config.get(section, param).lower()\n        if value == \"debug\":\n            return logging.DEBUG  \n        elif value == \"info\":\n            return logging.INFO\n        elif value in [\"warn\", \"warning\"]:\n            return logging.WARN\n        elif value == \"error\":\n            return logging.ERROR\n        elif value in [\"critical\", \"fatal\"]:\n            return logging.CRITICAL  \n        else:\n            raise ValueError\n    except (TypeError, ValueError, NoOptionError, AttributeError):\n        log.warning(\n            \"Configuration variable {} not found or improper in section [{}]; \"\n            \"using default of {!r}\", param, section, default)\n        return default", "docstring": "Get ``loglevel`` parameter from ``configparser`` ``.INI`` file, e.g.\nmapping ``'debug'`` to ``logging.DEBUG``.\n\nArgs:\nconfig: :class:`ConfigParser` object\nsection: section name within config file\nparam: name of parameter within section\ndefault: default value\nReturns:\nparameter value, or default", "source": "juraj-google-style"}
{"code": "def exit(tensor, name=None):\n    tensor = ops.internal_convert_to_tensor_or_composite(tensor, as_ref=True)\n    if isinstance(tensor, tensor_lib.Tensor):\n        if tensor.dtype._is_ref_dtype:\n            return gen_control_flow_ops.ref_exit(tensor, name)\n        else:\n            return gen_control_flow_ops._exit(tensor, name)\n    elif isinstance(tensor, composite_tensor.CompositeTensor):\n        return nest.map_structure(exit, tensor, expand_composites=True)\n    else:\n        raise TypeError(f\"'tensor' must be a Tensor or CompositeTensor. Received: {type(tensor)}.\")", "docstring": "Exits the current frame to its parent frame.\n\nExit makes its input `tensor` available to the parent frame.\n\nArgs:\ntensor: The tensor to be made available to the parent frame.\nname: A name for this operation (optional).\n\nReturns:\nThe same tensor as `tensor`.", "source": "github-repos"}
{"code": "def __expand_meta_datas(meta_datas, meta_datas_expanded):\n    \n    if isinstance(meta_datas, dict):\n        meta_datas_expanded.append(meta_datas)\n    elif isinstance(meta_datas, list):\n        for meta_data in meta_datas:\n            __expand_meta_datas(meta_data, meta_datas_expanded)", "docstring": "expand meta_datas to one level\n\nArgs:\nmeta_datas (dict/list): maybe in nested format\n\nReturns:\nlist: expanded list in one level\n\nExamples:\n>>> meta_datas = [\n[\ndict1,\ndict2\n],\ndict3\n]\n>>> meta_datas_expanded = []\n>>> __expand_meta_datas(meta_datas, meta_datas_expanded)\n>>> print(meta_datas_expanded)\n[dict1, dict2, dict3]", "source": "juraj-google-style"}
{"code": "def DecodeMessages(self, response_comms):\n    cipher_verified = False\n    try:\n        cipher = self.encrypted_cipher_cache.Get(response_comms.encrypted_cipher)\n        stats_collector_instance.Get().IncrementCounter('grr_encrypted_cipher_cache', fields=['hits'])\n        cipher.VerifyReceivedHMAC(response_comms)\n        cipher_verified = True\n        source = cipher.GetSource()\n        remote_public_key = self._GetRemotePublicKey(source)\n    except KeyError:\n        stats_collector_instance.Get().IncrementCounter('grr_encrypted_cipher_cache', fields=['misses'])\n        cipher = ReceivedCipher(response_comms, self.private_key)\n        source = cipher.GetSource()\n        try:\n            remote_public_key = self._GetRemotePublicKey(source)\n            if cipher.VerifyCipherSignature(remote_public_key):\n                self.encrypted_cipher_cache.Put(response_comms.encrypted_cipher, cipher)\n                cipher_verified = True\n        except UnknownClientCertError:\n            remote_public_key = None\n    plain = cipher.Decrypt(response_comms.encrypted, response_comms.packet_iv)\n    try:\n        packed_message_list = rdf_flows.PackedMessageList.FromSerializedString(plain)\n    except rdfvalue.DecodeError as e:\n        raise DecryptionError(e)\n    message_list = self.DecompressMessageList(packed_message_list)\n    auth_state = self.VerifyMessageSignature(response_comms, packed_message_list, cipher, cipher_verified, response_comms.api_version, remote_public_key)\n    for msg in message_list.job:\n        msg.auth_state = auth_state\n        msg.source = cipher.cipher_metadata.source\n    return (message_list.job, cipher.cipher_metadata.source, packed_message_list.timestamp)", "docstring": "Extract and verify server message.\n\nArgs:\nresponse_comms: A ClientCommunication rdfvalue\n\nReturns:\nlist of messages and the CN where they came from.\n\nRaises:\nDecryptionError: If the message failed to decrypt properly.", "source": "codesearchnet"}
{"code": "def _wordcount_test_preprocessor(test_spec: dict, expected: List[str], env: TestEnvironment):\n    all_words = []\n    for element in expected:\n        word = element.split('=')[1].split(',')[0].replace(\"'\", '')\n        count = int(element.split('=')[2].replace(')', ''))\n        all_words += [word] * count\n    random.shuffle(all_words)\n    lines = []\n    while all_words:\n        line_length = random.randint(1, min(10, len(all_words)))\n        line = ' '.join((all_words.pop(random.randrange(len(all_words))) for _ in range(line_length)))\n        lines.append(line)\n    return replace_recursive(test_spec, 'ReadFromText', 'path', env.input_file('kinglear.txt', '\\n'.join(lines)))", "docstring": "Preprocessor for the wordcount_minimal.yaml test.\n\nThis preprocessor generates a random input file based on the expected output\nof the wordcount example. This allows the test to verify the pipeline's\ncorrectness without relying on a fixed input file.\n\nArgs:\ntest_spec: The dictionary representation of the YAML pipeline specification.\nexpected: A list of strings representing the expected output of the\npipeline.\nenv: The TestEnvironment object providing utilities for creating temporary\nfiles.\n\nReturns:\nThe modified test_spec dictionary with the input file path replaced.", "source": "github-repos"}
{"code": "class MllamaVisionEncoder(nn.Module):\n\n    def __init__(self, config: MllamaVisionConfig, num_layers=32, is_gated=False):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([MllamaVisionEncoderLayer(config, is_gated) for _ in range(num_layers)])\n        self.gradient_checkpointing = False\n        self.config = config\n\n    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        for encoder_layer in self.layers:\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, output_attentions)\n            else:\n                layer_outputs = encoder_layer(hidden_state=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n            hidden_states = layer_outputs[0]\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`MllamaEncoderLayer`].\n\nArgs:\nconfig: MllamaConfig", "source": "github-repos"}
{"code": "def icon_description(self, **kwargs):\n        \n        \n        params = {'language': util.language_code(kwargs.get('lang'))}\n\n        \n        result = self.make_request('icon_description', {}, **params)\n\n        if not util.check_result(result):\n            return False, result.get('message', 'UNKNOWN ERROR')\n\n        \n        values = util.response_list(result, 'Data')\n        return True, [emtype.IconDescription(**a) for a in values]", "docstring": "Obtain a list of elements that have an associated icon.\n\nArgs:\nlang (str): Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[IconDescription]), or\nmessage string in case of error.", "source": "juraj-google-style"}
{"code": "def convert_dicts(d, to_class=AttrDictWrapper, from_class=dict):\n    \n    d_ = to_class()\n    for key, value in d.iteritems():\n        if isinstance(value, from_class):\n            d_[key] = convert_dicts(value, to_class=to_class,\n                                    from_class=from_class)\n        else:\n            d_[key] = value\n    return d_", "docstring": "Recursively convert dict and UserDict types.\n\nNote that `d` is unchanged.\n\nArgs:\nto_class (type): Dict-like type to convert values to, usually UserDict\nsubclass, or dict.\nfrom_class (type): Dict-like type to convert values from. If a tuple,\nmultiple types are converted.\n\nReturns:\nConverted data as `to_class` instance.", "source": "juraj-google-style"}
{"code": "def parse_args(args):\n    \n    parser = argparse.ArgumentParser(\n        description=\"Just a Fibonnaci demonstration\")\n    parser.add_argument(\n        '--version',\n        action='version',\n        version='nlpia {ver}'.format(ver=__version__))\n    parser.add_argument(\n        dest=\"n\",\n        help=\"n-th Fibonacci number\",\n        type=int,\n        metavar=\"INT\")\n    parser.add_argument(\n        '-v',\n        '--verbose',\n        dest=\"loglevel\",\n        help=\"set loglevel to INFO\",\n        action='store_const',\n        const=logging.INFO)\n    parser.add_argument(\n        '-vv',\n        '--very-verbose',\n        dest=\"loglevel\",\n        help=\"set loglevel to DEBUG\",\n        action='store_const',\n        const=logging.DEBUG)\n    return parser.parse_args(args)", "docstring": "Parse command line parameters\n\nArgs:\nargs ([str]): command line parameters as list of strings\n\nReturns:\n:obj:`argparse.Namespace`: command line parameters namespace", "source": "juraj-google-style"}
{"code": "def add_module(self, module_name, module_ui):\n        \n        m_button = tk.Label(self.module_selection, text=module_name, bg=\"white\", anchor=\"w\")\n        m_button.grid(column=0, row=len(self.module_selection.winfo_children()), padx=0, pady=0, sticky=\"W E N S\")\n\n        self.module_buttons[module_name] = m_button\n        m_button.bind(\"<Button-1>\", lambda e: self.module_selected(module_name, module_ui))", "docstring": "Adds a module to the list\n\nArgs:\nmodule_name (str): The name of the module\nmodule_ui: The function to call to create the module's UI", "source": "juraj-google-style"}
{"code": "def _PrintExtractionStatusUpdateLinear(self, processing_status):\n    for worker_status in processing_status.workers_status:\n        status_line = '{0:s} (PID: {1:d}) - events produced: {2:d} - file: {3:s} - running: {4!s}\\n'.format(worker_status.identifier, worker_status.pid, worker_status.number_of_produced_events, worker_status.display_name, (worker_status.status not in definitions.ERROR_STATUS_INDICATORS))\n        self._output_writer.Write(status_line)", "docstring": "Prints an extraction status update in linear mode.\n\nArgs:\nprocessing_status (ProcessingStatus): processing status.", "source": "codesearchnet"}
{"code": "def status(self, job_ids):\n        \n        statuses = []\n        for job_id in job_ids:\n            instance = self.client.instances().get(instance=job_id, project=self.project_id, zone=self.zone).execute()\n            self.resources[job_id]['status'] = translate_table[instance['status']]\n            statuses.append(translate_table[instance['status']])\n        return statuses", "docstring": "Get the status of a list of jobs identified by the job identifiers\nreturned from the submit request.\n\nArgs:\n- job_ids (list) : A list of job identifiers\n\nReturns:\n- A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',\n'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.\n\nRaises:\n- ExecutionProviderException or its subclasses", "source": "juraj-google-style"}
{"code": "def _compute_inside_group(df):\n    \n    inside_group = df.copy()\n    inside_group['type'] = 'child'\n    inside_group['variation'] = inside_group['value'] / inside_group[\n        'value_start']\n    inside_group.drop(['upperGroup_label', 'insideGroup', 'value_start'],\n                      axis=1, inplace=True)\n    inside_group.rename(columns={'insideGroup_label': 'label'},\n                        inplace=True)\n    return inside_group", "docstring": "Compute inside Group\nArgs:\ndf(dataframe):\n\nReturns: Dataframe", "source": "juraj-google-style"}
{"code": "def encode(self, builder: expressions.Builder, select_scalars_as_array: bool=True, use_resource_alias: bool=False) -> str:\n    self._use_resource_alias = use_resource_alias\n    result = self.visit(builder.node)\n    if select_scalars_as_array or _fhir_path_data_types.returns_collection(builder.node.return_type):\n        return f'ARRAY(SELECT {result.sql_alias}\\nFROM {result.to_subquery()}\\nWHERE {result.sql_alias} IS NOT NULL)'\n    else:\n        return f'{result.to_subquery()}'", "docstring": "Returns a Standard SQL encoding of a FHIRPath expression.\n\nIf select_scalars_as_array is True, the resulting Standard SQL encoding\nalways returns a top-level `ARRAY`, whose elements are non-`NULL`. Otherwise\nthe resulting SQL will attempt to return a scalar when possible and only\nreturn an `ARRAY` for actual collections.\n\nArgs:\nbuilder: The FHIR Path builder to encode as a SQL string.\nselect_scalars_as_array: When True, always builds SQL selecting results in\nan array. When False, attempts to build SQL returning scalars where\npossible.\nuse_resource_alias: Determines whether it is necessary to call the\nresource table directly through an alias.\n\nReturns:\nA Standard SQL representation of the provided FHIRPath expression.", "source": "github-repos"}
{"code": "def _is_node_return_ended(self, node):\n        \n        \n        if isinstance(node, astroid.Return):\n            return True\n        if isinstance(node, astroid.Call):\n            try:\n                funcdef_node = node.func.inferred()[0]\n                if self._is_function_def_never_returning(funcdef_node):\n                    return True\n            except astroid.InferenceError:\n                pass\n        \n        \n        if isinstance(node, astroid.While):\n            return True\n        if isinstance(node, astroid.Raise):\n            \n            \n            \n            if not node.exc:\n                \n                return True\n            if not utils.is_node_inside_try_except(node):\n                \n                \n                \n                return True\n            exc = utils.safe_infer(node.exc)\n            if exc is None or exc is astroid.Uninferable:\n                return False\n            exc_name = exc.pytype().split(\".\")[-1]\n            handlers = utils.get_exception_handlers(node, exc_name)\n            handlers = list(handlers) if handlers is not None else []\n            if handlers:\n                \n                \n                return any(\n                    self._is_node_return_ended(_handler) for _handler in handlers\n                )\n            \n            return True\n        if isinstance(node, astroid.If):\n            \n            \n            \n            is_orelse_returning = any(\n                self._is_node_return_ended(_ore)\n                for _ore in node.orelse\n                if not isinstance(_ore, astroid.FunctionDef)\n            )\n            is_if_returning = any(\n                self._is_node_return_ended(_ifn)\n                for _ifn in node.body\n                if not isinstance(_ifn, astroid.FunctionDef)\n            )\n            return is_if_returning and is_orelse_returning\n        \n        \n        return any(\n            self._is_node_return_ended(_child)\n            for _child in node.get_children()\n            if not isinstance(_child, astroid.ExceptHandler)\n        )", "docstring": "Check if the node ends with an explicit return statement.\n\nArgs:\nnode (astroid.NodeNG): node to be checked.\n\nReturns:\nbool: True if the node ends with an explicit statement, False otherwise.", "source": "juraj-google-style"}
{"code": "def xw_plus_b_v1(x, weights, biases, name=None):\n    with ops.name_scope(name, 'xw_plus_b_v1', [x, weights, biases]) as name:\n        x = ops.convert_to_tensor(x, name='x')\n        weights = ops.convert_to_tensor(weights, name='weights')\n        biases = ops.convert_to_tensor(biases, name='biases')\n        mm = math_ops.matmul(x, weights)\n        return bias_add_v1(mm, biases, name=name)", "docstring": "Computes matmul(x, weights) + biases.\n\nThis is a deprecated version of that will soon be removed.\n\nArgs:\nx: a 2D tensor.  Dimensions typically: batch, in_units\nweights: a 2D tensor.  Dimensions typically: in_units, out_units\nbiases: a 1D tensor.  Dimensions: out_units\nname: A name for the operation (optional).  If not specified\n\"xw_plus_b_v1\" is used.\n\nReturns:\nA 2-D Tensor computing matmul(x, weights) + biases.\nDimensions typically: batch, out_units.", "source": "github-repos"}
{"code": "def serializable_value(self, obj):\n    value = self.__get__(obj, obj.__class__)\n    return self.property.serialize_value(value)", "docstring": "Produce the value as it should be serialized.\n\nSometimes it is desirable for the serialized value to differ from\nthe ``__get__`` in order for the ``__get__`` value to appear simpler\nfor user or developer convenience.\n\nArgs:\nobj (HasProps) : the object to get the serialized attribute for\n\nReturns:\nJSON-like", "source": "codesearchnet"}
{"code": "def is_control(input, model_file=None, model_proto=None, name=None):\n  \n\n  return _gen_sentencepiece_processor_op.sentencepiece_get_piece_type(\n      input, model_file=model_file, model_proto=model_proto, name=name,\n      piece_type=1)", "docstring": "Returns true if input id is control piece.\n\nArgs:\ninput: An arbitrary tensor of int32.\nmodel_file: The sentencepiece model file path.\nmodel_proto: The sentencepiece model serialized proto.\nEither `model_file` or `model_proto` must be set.\nname: The name argument that is passed to the op function.\nReturns:\nA tensor of bool with the same shape as input.", "source": "juraj-google-style"}
{"code": "def set_hash_value(self, key, field, value, pipeline=False):\n    if pipeline:\n        self._pipeline.hset(key, field, str(value))\n    else:\n        self._db.hset(key, field, str(value))", "docstring": "Set the value of field in a hash stored at key.\n\nArgs:\nkey (str): key (name) of the hash\nfield (str): Field within the hash to set\nvalue: Value to set\npipeline (bool): True, start a transaction block. Default false.", "source": "codesearchnet"}
{"code": "def get_command_from_result(script, result, debug=False):\n    if (not debug):\n        command = (((('python waf --run \"' + script) + ' ') + ' '.join([('--%s=%s' % (param, value)) for (param, value) in result['params'].items()])) + '\"')\n    else:\n        command = ((((('python waf --run ' + script) + ' --command-template=\"') + 'gdb --args %s ') + ' '.join([('--%s=%s' % (param, value)) for (param, value) in result['params'].items()])) + '\"')\n    return command", "docstring": "Return the command that is needed to obtain a certain result.\n\nArgs:\nparams (dict): Dictionary containing parameter: value pairs.\ndebug (bool): Whether the command should include the debugging\ntemplate.", "source": "codesearchnet"}
{"code": "async def debug(self, conn_id, name, cmd_args):\n        \n\n\n        device = self._get_property(conn_id, 'device')\n\n        retval = None\n\n        try:\n            if name == 'dump_state':\n                retval = device.dump_state()\n            elif name == 'restore_state':\n                state = cmd_args['snapshot']\n                device.restore_state(state)\n            elif name == 'load_scenario':\n                scenario = cmd_args['scenario']\n                device.load_metascenario(scenario)\n            elif name == 'track_changes':\n                if cmd_args['enabled']:\n                    device.state_history.enable()\n                else:\n                    device.state_history.disable()\n            elif name == 'dump_changes':\n                outpath = cmd_args['path']\n                device.state_history.dump(outpath)\n            else:\n                reason = \"Unknown command %s\" % name\n                raise DeviceAdapterError(conn_id, 'debug {}'.format(name), reason)\n        except Exception as exc:\n            self._logger.exception(\"Error processing debug command %s: args=%s\", name, cmd_args)\n            reason = \"Exception %s occurred during processing\" % str(exc)\n            raise DeviceAdapterError(conn_id, 'debug {}'.format(name), reason) from exc\n\n        return retval", "docstring": "Asynchronously complete a named debug command.\n\nThe command name and arguments are passed to the underlying device adapter\nand interpreted there.\n\nArgs:\nconn_id (int): A unique identifer that will refer to this connection\nname (string): the name of the debug command we want to invoke\ncmd_args (dict): any arguments that we want to send with this command.", "source": "juraj-google-style"}
{"code": "async def update_flags(self, messages: Sequence[MessageT],\n                           flag_set: FrozenSet[Flag], mode: FlagOp) -> None:\n        \n        ...", "docstring": "Update the permanent flags of each messages.\n\nArgs:\nmessages: The message objects.\nflag_set: The set of flags for the update operation.\nflag_op: The mode to change the flags.", "source": "juraj-google-style"}
{"code": "def parse(self, filepath, content):\n        \n        try:\n            parsed = json.loads(content)\n        except ValueError:\n            msg = \"No JSON object could be decoded from file: {}\"\n            raise SettingsBackendError(msg.format(filepath))\n        return parsed", "docstring": "Parse opened settings content using JSON parser.\n\nArgs:\nfilepath (str): Settings object, depends from backend\ncontent (str): Settings content from opened file, depends from\nbackend.\n\nRaises:\nboussole.exceptions.SettingsBackendError: If parser can not decode\na valid JSON object.\n\nReturns:\ndict: Dictionnary containing parsed setting elements.", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    scca_file = pyscca.file()\n\n    try:\n      scca_file.open_file_object(file_object)\n    except IOError as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to open file with error: {0!s}'.format(exception))\n      return\n\n    format_version = scca_file.format_version\n    executable_filename = scca_file.executable_filename\n    prefetch_hash = scca_file.prefetch_hash\n    run_count = scca_file.run_count\n    number_of_volumes = scca_file.number_of_volumes\n\n    volume_serial_numbers = []\n    volume_device_paths = []\n    path = ''\n\n    for volume_information in iter(scca_file.volumes):\n      volume_serial_number = volume_information.serial_number\n      volume_device_path = volume_information.device_path\n\n      volume_serial_numbers.append(volume_serial_number)\n      volume_device_paths.append(volume_device_path)\n\n      timestamp = volume_information.get_creation_time_as_integer()\n      if timestamp:\n        event_data = windows_events.WindowsVolumeEventData()\n        event_data.device_path = volume_device_path\n        event_data.origin = parser_mediator.GetFilename()\n        event_data.serial_number = volume_serial_number\n\n        date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_CREATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      for filename in iter(scca_file.filenames):\n        if not filename:\n          continue\n\n        if (filename.startswith(volume_device_path) and\n            filename.endswith(executable_filename)):\n          _, _, path = filename.partition(volume_device_path)\n\n    mapped_files = []\n    for entry_index, file_metrics in enumerate(scca_file.file_metrics_entries):\n      mapped_file_string = file_metrics.filename\n      if not mapped_file_string:\n        parser_mediator.ProduceExtractionWarning(\n            'missing filename for file metrics entry: {0:d}'.format(\n                entry_index))\n        continue\n\n      file_reference = file_metrics.file_reference\n      if file_reference:\n        mapped_file_string = (\n            '{0:s} [MFT entry: {1:d}, sequence: {2:d}]').format(\n                mapped_file_string, file_reference & 0xffffffffffff,\n                file_reference >> 48)\n\n      mapped_files.append(mapped_file_string)\n\n    event_data = WinPrefetchExecutionEventData()\n    event_data.executable = executable_filename\n    event_data.mapped_files = mapped_files\n    event_data.number_of_volumes = number_of_volumes\n    event_data.path = path\n    event_data.prefetch_hash = prefetch_hash\n    event_data.run_count = run_count\n    event_data.version = format_version\n    event_data.volume_device_paths = volume_device_paths\n    event_data.volume_serial_numbers = volume_serial_numbers\n\n    timestamp = scca_file.get_last_run_time_as_integer(0)\n    if not timestamp:\n      parser_mediator.ProduceExtractionWarning('missing last run time')\n      date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n    else:\n      date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_LAST_RUN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    \n    \n    if format_version >= 26:\n      for last_run_time_index in range(1, 8):\n        timestamp = scca_file.get_last_run_time_as_integer(last_run_time_index)\n        if not timestamp:\n          continue\n\n        date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n        date_time_description = 'Previous {0:s}'.format(\n            definitions.TIME_DESCRIPTION_LAST_RUN)\n        event = time_events.DateTimeValuesEvent(\n            date_time, date_time_description)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    scca_file.close()", "docstring": "Parses a Windows Prefetch file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.", "source": "juraj-google-style"}
{"code": "def export_model(self, export_formats, export_dir=None):\n    export_dir = (export_dir or self.logdir)\n    return self._export_model(export_formats, export_dir)", "docstring": "Exports model based on export_formats.\n\nSubclasses should override _export_model() to actually\nexport model to local directory.\n\nArgs:\nexport_formats (list): List of formats that should be exported.\nexport_dir (str): Optional dir to place the exported model.\nDefaults to self.logdir.\n\nReturn:\nA dict that maps ExportFormats to successfully exported models.", "source": "codesearchnet"}
{"code": "def map_texture_to_surface(texture, surface):\n    \n    texture_x, texture_y = texture\n    surface_h, surface_w = surface.shape\n\n    surface_x = np.clip(\n        np.int32(surface_w * texture_x - 1e-9), 0, surface_w - 1)\n    surface_y = np.clip(\n        np.int32(surface_h * texture_y - 1e-9), 0, surface_h - 1)\n\n    surface_z = surface[surface_y, surface_x]\n    return surface_z", "docstring": "Returns values on a surface for points on a texture.\n\nArgs:\ntexture (texture): the texture to trace over the surface\nsurface (surface): the surface to trace along\n\nReturns:\nan array of surface heights for each point in the\ntexture. Line separators (i.e. values that are ``nan`` in\nthe texture) will be ``nan`` in the output, so the output\nwill have the same dimensions as the x/y axes in the\ninput texture.", "source": "juraj-google-style"}
{"code": "def _is_definition_section(source):\n    \n    try:\n        definitions = textwrap.dedent(source).split('\\n', 1)[1].splitlines()\n        return all(\n            re.match(r'\\s\\s+((?!\\s\\s).+)\\s\\s+.+', s) for s in definitions)\n    except IndexError:\n        return False", "docstring": "Determine if the source is a definition section.\n\nArgs:\nsource: The usage string source that may be a section.\n\nReturns:\nTrue if the source describes a definition section; otherwise, False.", "source": "juraj-google-style"}
{"code": "def broadcast_row_partition(self, rp):\n    if not rp.is_uniform():\n        return RowPartition.from_row_lengths(self.broadcast_tensor(rp.row_lengths()))\n    else:\n        return RowPartition.from_uniform_row_length(rp.uniform_row_length(), nvals=rp.uniform_row_length() * self.dest_nrows(), nrows=self.dest_nrows())", "docstring": "Return a new shape where the rows are broadcasted.\n\n*--self--->*\n|          |\nrp       result\n|          |\nV          V\n*--------->*\n\nThis is equivalent to:\nreturn RowPartition.from_row_lengths(self.broadcast(rp.row_lengths()))\n\nHowever, if the shape has uniform row length, then that property is\nmaintained.\n\nArgs:\nrp: a row partition.\n\nReturns:\na RowPartition representing a broadcast version of this row partition.", "source": "github-repos"}
{"code": "def add_pending(self, panel_obj, hgnc_gene, action, info=None):\n    valid_actions = ['add', 'delete', 'edit']\n    if (action not in valid_actions):\n        raise ValueError('Invalid action {0}'.format(action))\n    info = (info or {})\n    pending_action = {'hgnc_id': hgnc_gene['hgnc_id'], 'action': action, 'info': info, 'symbol': hgnc_gene['hgnc_symbol']}\n    updated_panel = self.panel_collection.find_one_and_update({'_id': panel_obj['_id']}, {'$addToSet': {'pending': pending_action}}, return_document=pymongo.ReturnDocument.AFTER)\n    return updated_panel", "docstring": "Add a pending action to a gene panel\n\nStore the pending actions in panel.pending\n\nArgs:\npanel_obj(dict): The panel that is about to be updated\nhgnc_gene(dict)\naction(str): choices=['add','delete','edit']\ninfo(dict): additional gene info (disease_associated_transcripts,\nreduced_penetrance, mosaicism, database_entry_version ,\ninheritance_models, comment)\n\nReturns:\nupdated_panel(dict):", "source": "codesearchnet"}
{"code": "def embed(self, x):\n    \n    shape_x = common_layers.shape_list(x)\n    x_flat = tf.reshape(x, [-1, 1])\n    c = self.int_to_bit(x_flat, num_bits=self.hparams.z_size, base=2)\n    shape = common_layers.shape_list(c)\n    new_shape = shape\n    new_shape.append(self.hparams.num_blocks)\n    new_shape.append(int(self.hparams.z_size / self.hparams.num_blocks))\n    c = tf.to_int32(tf.reshape(c, shape=new_shape))\n    h1_shape = shape_x\n    h1_shape.append(self.hparams.hidden_size)\n    h1 = tf.zeros(dtype=tf.float32, shape=h1_shape)\n    c_int = self.bit_to_int(\n        c, num_bits=int(self.hparams.z_size / self.hparams.num_blocks), base=2)\n    c_hot = tf.one_hot(c_int, depth=self.hparams.block_v_size, axis=-1)\n    c_hot_flat = tf.reshape(\n        c_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size])\n    h1 = tf.matmul(tf.transpose(c_hot_flat, perm=[1, 0, 2]), self.means)\n    h1 = tf.transpose(h1, perm=[1, 0, 2])\n    h1 = tf.reshape(h1, shape=h1_shape)\n    h1_shape[0] = self.hparams.batch_size\n    h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name=\"vch2\")\n    res = tf.layers.dense(\n        tf.nn.relu(h2), self.hparams.hidden_size, name=\"vcfin\")\n    return res", "docstring": "Embedding function that takes discrete latent and returns embedding.\n\nArgs:\nx: Input to the discretization bottleneck.\nReturns:\nContinuous embedding to be passed on to the decoder.\n\nRaises:\nValueError: For unknown or missing arguments.", "source": "juraj-google-style"}
{"code": "def download_aspera(self, user, host, silent=False):\n    aspera_home = os.environ.get('ASPERA_HOME', None)\n    if (not aspera_home):\n        raise ValueError('environment variable $ASPERA_HOME not set')\n    if (not os.path.exists(aspera_home)):\n        raise ValueError('$ASPERA_HOME directory {} does not exist'.format(aspera_home))\n    ascp = os.path.join(aspera_home, 'connect/bin/ascp')\n    key = os.path.join(aspera_home, 'connect/etc/asperaweb_id_dsa.openssh')\n    if (not os.path.exists(ascp)):\n        raise ValueError('could not find ascp binary')\n    if (not os.path.exists(key)):\n        raise ValueError('could not find openssh key')\n    parsed_url = urlparse(self.url)\n    cmd = '{} -i {} -k1 -T -l400m {}@{}:{} {}'.format(ascp, key, user, host, parsed_url.path, self._temp_file_name)\n    logger.debug(cmd)\n    try:\n        pr = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)\n        (stdout, stderr) = pr.communicate()\n        if (not silent):\n            logger.debug(('Aspera stdout: ' + str(stdout)))\n            logger.debug(('Aspera stderr: ' + str(stderr)))\n        if (pr.returncode == 0):\n            logger.debug(('Moving %s to %s' % (self._temp_file_name, self.destination)))\n            shutil.move(self._temp_file_name, self.destination)\n            logger.debug(('Successfully downloaded %s' % self.url))\n        else:\n            logger.error(('Failed to download %s using Aspera Connect' % self.url))\n    finally:\n        try:\n            os.remove(self._temp_file_name)\n        except OSError:\n            pass", "docstring": "Download file with Aspera Connect.\n\nFor details see the documentation ov Aspera Connect\n\nArgs:\nuser (:obj:`str`): FTP user.\nhost (:obj:`str`): FTP host. Defaults to \"ftp-trace.ncbi.nlm.nih.gov\".", "source": "codesearchnet"}
{"code": "def build_as_function_and_v1_graph(func: Callable[..., Any]) -> Callable[..., None]:\n    if tf_inspect.isclass(func):\n        raise ValueError('`run_in_graph_mode_and_function` only supports test methods.')\n\n    @parameterized.named_parameters(('_v1_graph', 'v1_graph'), ('_function', 'function'))\n    @functools.wraps(func)\n    def decorated(self: 'TensorFlowTestCase', run_mode: str, *args, **kwargs) -> None:\n        if run_mode == 'v1_graph':\n            with ops.Graph().as_default():\n                func(self, *args, **kwargs)\n        elif run_mode == 'function':\n\n            @def_function.function\n            def function_in_eager():\n                func(self, *args, **kwargs)\n            graph_for_eager_test = ops.Graph()\n            with graph_for_eager_test.as_default(), context.eager_mode():\n                function_in_eager()\n            ops.dismantle_graph(graph_for_eager_test)\n        else:\n            raise ValueError('Unknown run mode %s' % run_mode)\n    return decorated", "docstring": "Run a test case in v1 graph mode and inside tf.function in eager mode.\n\nWARNING: This decorator can only be used in test cases that statically checks\ngenerated graph. Attempting to evaluate graph or function results via.\nsession.run() or self.evaluate() will fail.\n\nWARNING: This decorator can only be used for test cases that inherit from\nabsl.testing.parameterized.TestCase.\n\nArgs:\nfunc: Test case function to be decorated.\n\nReturns:\nDecorated test case function.", "source": "github-repos"}
{"code": "def _ScheduleTasks(self, storage_writer):\n    logger.debug('Task scheduler started')\n    self._status = definitions.STATUS_INDICATOR_RUNNING\n    event_source_heap = _EventSourceHeap()\n    self._FillEventSourceHeap(storage_writer, event_source_heap, start_with_first=True)\n    event_source = event_source_heap.PopEventSource()\n    task = None\n    while (event_source or self._task_manager.HasPendingTasks()):\n        if self._abort:\n            break\n        try:\n            if (not task):\n                task = self._task_manager.CreateRetryTask()\n            if ((not task) and event_source):\n                task = self._task_manager.CreateTask(self._session_identifier)\n                task.file_entry_type = event_source.file_entry_type\n                task.path_spec = event_source.path_spec\n                event_source = None\n                self._number_of_consumed_sources += 1\n                if self._guppy_memory_profiler:\n                    self._guppy_memory_profiler.Sample()\n            if task:\n                if self._ScheduleTask(task):\n                    logger.debug('Scheduled task {0:s} for path specification {1:s}'.format(task.identifier, task.path_spec.comparable))\n                    self._task_manager.SampleTaskStatus(task, 'scheduled')\n                    task = None\n                else:\n                    self._task_manager.SampleTaskStatus(task, 'schedule_attempted')\n            self._MergeTaskStorage(storage_writer)\n            if (not event_source_heap.IsFull()):\n                self._FillEventSourceHeap(storage_writer, event_source_heap)\n            if ((not task) and (not event_source)):\n                event_source = event_source_heap.PopEventSource()\n        except KeyboardInterrupt:\n            self._abort = True\n            self._processing_status.aborted = True\n            if self._status_update_callback:\n                self._status_update_callback(self._processing_status)\n    for task in self._task_manager.GetFailedTasks():\n        warning = warnings.ExtractionWarning(message='Worker failed to process path specification', path_spec=task.path_spec)\n        self._storage_writer.AddWarning(warning)\n        self._processing_status.error_path_specs.append(task.path_spec)\n    self._status = definitions.STATUS_INDICATOR_IDLE\n    if self._abort:\n        logger.debug('Task scheduler aborted')\n    else:\n        logger.debug('Task scheduler stopped')", "docstring": "Schedules tasks.\n\nArgs:\nstorage_writer (StorageWriter): storage writer for a session storage.", "source": "codesearchnet"}
{"code": "def _open_ring_2d(x_size: int, y_size: int, z_coord: int) -> List[Tuple[int, int, int]]:\n    ret = []\n    for i in range(y_size \n        for j in range(1, x_size):\n            ret.append((j, 2 * i, z_coord))\n        for j in range(x_size - 1, 0, -1):\n            ret.append((j, 2 * i + 1, z_coord))\n    for i in range(y_size - 1, 0, -1):\n        ret.append((0, i, z_coord))\n    return ret", "docstring": "Ring-order of a X by Y mesh, with a fixed Z coordinate.\n\nFor example, in a 4x4 mesh, this returns the following order.\n0 -- 1 -- 2 -- 3\n|    |    |    |\n15-- 6 -- 5 -- 4\n|    |    |    |\n14-- 7 -- 8 -- 9\n|    |    |    |\n13-- 12-- 11-- 10\n\nNote that chip 0 is not included in the output.\n\nArgs:\nx_size: An integer represents the mesh size in the x-dimension. Must be\nlarger than 1.\ny_size: An integer represents the mesh size in the y-dimension. Must be\nlarger than 1.\nz_coord: An integer represents the z-coordinate to use for the chips in the\nring.\n\nReturns:\nA list of (x,y,z) triples in ring order.", "source": "github-repos"}
{"code": "def parse_sv_frequencies(variant):\n    \n    frequency_keys = [\n        'clingen_cgh_benignAF',\n        'clingen_cgh_benign',\n        'clingen_cgh_pathogenicAF',\n        'clingen_cgh_pathogenic',\n        'clingen_ngi',\n        'clingen_ngiAF',\n        'swegen',\n        'swegenAF',\n        'decipherAF',\n        'decipher'\n    ]\n    sv_frequencies = {}\n\n    for key in frequency_keys:\n        value = variant.INFO.get(key, 0)\n        if 'AF' in key:\n            value = float(value)\n        else:\n            value = int(value)\n        if value > 0:\n            sv_frequencies[key] = value\n    return sv_frequencies", "docstring": "Parsing of some custom sv frequencies\n\nThese are very specific at the moment, this will hopefully get better over time when the\nfield of structural variants is more developed.\n\nArgs:\nvariant(cyvcf2.Variant)\n\nReturns:\nsv_frequencies(dict)", "source": "juraj-google-style"}
{"code": "def has_no_title(self, title, **kwargs):\n        \n\n        try:\n            self.assert_no_title(title, **kwargs)\n            return True\n        except ExpectationNotMet:\n            return False", "docstring": "Checks if the page doesn't have the given title.\n\nArgs:\ntitle (str | RegexObject): The string that the title should include.\n**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.\n\nReturns:\nbool: Whether it doesn't match.", "source": "juraj-google-style"}
{"code": "def namespace_for_prefix(self, prefix):\n        \n        try:\n            ni = self.__lookup_prefix(prefix)\n        except PrefixNotFoundError:\n            return None\n        else:\n            return ni.uri", "docstring": "Get the namespace the given prefix maps to.\n\nArgs:\nprefix (str): The prefix\n\nReturns:\nstr: The namespace, or None if the prefix isn't mapped to\nanything in this set.", "source": "juraj-google-style"}
{"code": "def schema_from_json(self, file_or_path):\n    if isinstance(file_or_path, io.IOBase):\n        return self._schema_from_json_file_object(file_or_path)\n    with open(file_or_path) as file_obj:\n        return self._schema_from_json_file_object(file_obj)", "docstring": "Takes a file object or file path that contains json that describes\na table schema.\n\nReturns:\nList of schema field objects.", "source": "codesearchnet"}
{"code": "def video_augmentation(features, hue=False, saturate=False, contrast=False):\n    (inputs, targets) = (features['inputs'], features['targets'])\n    in_steps = common_layers.shape_list(inputs)[0]\n    video = tf.concat((inputs, targets), axis=0)\n    if hue:\n        video = tf.image.random_hue(video, max_delta=0.2)\n    if saturate:\n        video = tf.image.random_saturation(video, lower=0.5, upper=1.5)\n    if contrast:\n        video = tf.image.random_contrast(video, lower=0.5, upper=1.5)\n    (features['inputs'], features['targets']) = (video[:in_steps], video[in_steps:])\n    return features", "docstring": "Augments video with optional hue, saturation and constrast.\n\nArgs:\nfeatures: dict, with keys \"inputs\", \"targets\".\nfeatures[\"inputs\"], 4-D Tensor, shape=(THWC)\nfeatures[\"targets\"], 4-D Tensor, shape=(THWC)\nhue: bool, apply hue_transform.\nsaturate: bool, apply saturation transform.\ncontrast: bool, apply constrast transform.\nReturns:\naugment_features: dict with transformed \"inputs\" and \"targets\".", "source": "codesearchnet"}
{"code": "def minimize(self, minimize):\n        \n\n        self._minimize = minimize\n        self._logger.log('debug', 'Minimize set to {}'.format(minimize))", "docstring": "Configures the ABC to minimize fitness function return value or\nderived score\n\nArgs:\nminimize (bool): if True, minimizes fitness function return value;\nif False, minimizes derived score", "source": "juraj-google-style"}
{"code": "def dump(o, f):\n    if (not f.write):\n        raise TypeError('You can only dump an object to a file descriptor')\n    d = dumps(o)\n    f.write(d)\n    return d", "docstring": "Writes out dict as toml to a file\n\nArgs:\no: Object to dump into toml\nf: File descriptor where the toml should be stored\n\nReturns:\nString containing the toml corresponding to dictionary\n\nRaises:\nTypeError: When anything other than file descriptor is passed", "source": "codesearchnet"}
{"code": "def get_axis_value(self, axis):\n    if (self.type != EventType.POINTER_AXIS):\n        raise AttributeError(_wrong_meth.format(self.type))\n    return self._libinput.libinput_event_pointer_get_axis_value(self._handle, axis)", "docstring": "Return the axis value of the given axis.\n\nThe interpretation of the value depends on the axis. For the two\nscrolling axes :attr:`~libinput.constant.PointerAxis.SCROLL_VERTICAL`\nand :attr:`~libinput.constant.PointerAxis.SCROLL_HORIZONTAL`, the value\nof the event is in relative scroll units, with the positive direction\nbeing down or right, respectively. For the interpretation of the value,\nsee :attr:`axis_source`.\n\nIf :meth:`has_axis` returns False for an axis, this method returns 0\nfor that axis.\n\nFor pointer events that are not of type\n:attr:`~libinput.constant.Event.POINTER_AXIS`, this method raises\n:exc:`AttributeError`.\n\nArgs:\naxis (~libinput.constant.PointerAxis): The axis who's value to get.\nReturns:\nfloat: The axis value of this event.\nRaises:\nAttributeError", "source": "codesearchnet"}
{"code": "def list_pull_requests(self, username, page, status=None):\n    request_url = '{}/api/0/user/{}/requests/filed'.format(self.instance, username)\n    payload = {}\n    if (username is not None):\n        payload['username'] = username\n    if (page is not None):\n        payload['page'] = page\n    if (status is not None):\n        payload['status'] = status\n    return_value = self._call_api(request_url, params=payload)\n    return return_value['requests']", "docstring": "List pull-requests filed by user.\n\nParams:\nusername (string): filters the username of the user whose activity you are interested in.\npage (integer): the page requested. Defaults to 1.\nstatus (string): filter the status of pull requests. Default: Open,\ncan be Closed, Merged, All.\n\nReturns:\nlist: A list of Pull-Requests filed by a given user for all the\nprojects for given Pagure instance.", "source": "codesearchnet"}
{"code": "def find_primitive(self):\n    (lattice, scaled_positions, numbers) = spglib.find_primitive(self._cell, symprec=self._symprec)\n    species = [self._unique_species[(i - 1)] for i in numbers]\n    return Structure(lattice, species, scaled_positions, to_unit_cell=True).get_reduced_structure()", "docstring": "Find a primitive version of the unit cell.\n\nReturns:\nA primitive cell in the input cell is searched and returned\nas an Structure object. If no primitive cell is found, None is\nreturned.", "source": "codesearchnet"}
{"code": "def _hard_upsample(self, hidden_states, durations):\n    if hidden_states.size(0) == 1:\n        hidden_states = torch.repeat_interleave(hidden_states, durations.view(-1), dim=1)\n    else:\n        if hidden_states.shape[0] > 1 and self.training:\n            logger.warning_once('`self.training=True` and you use batching. You lose parallelism during the hifigan\\n                               forward pass because the samples are interleaved.')\n        hidden_states = [torch.repeat_interleave(hidden_state, duration, dim=0) for hidden_state, duration in zip(hidden_states, durations)]\n        hidden_states = nn.utils.rnn.pad_sequence(hidden_states, batch_first=True)\n    return hidden_states", "docstring": "Repeats the time dimension of each sample in the batch based on the corresponding duration.\n\nArgs:\nhidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, *)`, *optional*):\nThe sequence to repeat, where `*` is any number of sequence-specific dimensions including none.\ndurations (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\nIndicates how many times to repeat time segments.", "source": "github-repos"}
{"code": "def _update_inplace(self, new_query_compiler):\n        \n        old_query_compiler = self._query_compiler\n        self._query_compiler = new_query_compiler\n        old_query_compiler.free()", "docstring": "Updates the current DataFrame inplace.\n\nArgs:\nnew_query_compiler: The new QueryCompiler to use to manage the data", "source": "juraj-google-style"}
{"code": "def write_config_file(config_instance, appdirs=DEFAULT_APPDIRS,\n        file_name=DEFAULT_CONFIG_FILENAME):\n    \n\n    path = get_config_path(appdirs, file_name)\n    with open(path, 'w') as fobj:\n        config_instance.write(fobj)\n    return config_instance", "docstring": "Write a ConfigParser instance to file at the correct location.\n\nArgs:\nconfig_instance: Config instance to safe to file.\nappdirs (HamsterAppDirs, optional): ``HamsterAppDirs`` instance storing app/user specific\npath information.\nfile_name (text_type, optional): Name of the config file. Defaults to\n``DEFAULT_CONFIG_FILENAME``.\n\nReturns:\nSafeConfigParser: Instance written to file.", "source": "juraj-google-style"}
{"code": "def _get_corrupted_example(self, x):\n    corruption_type = self.builder_config.corruption_type\n    severity = self.builder_config.severity\n    return {'gaussian_noise': corruptions.gaussian_noise, 'shot_noise': corruptions.shot_noise, 'impulse_noise': corruptions.impulse_noise, 'defocus_blur': corruptions.defocus_blur, 'frosted_glass_blur': corruptions.frosted_glass_blur, 'zoom_blur': corruptions.zoom_blur, 'fog': corruptions.fog, 'brightness': corruptions.brightness, 'contrast': corruptions.contrast, 'elastic': corruptions.elastic, 'pixelate': corruptions.pixelate, 'jpeg_compression': corruptions.jpeg_compression}[corruption_type](x, severity)", "docstring": "Return corrupted images.\n\nArgs:\nx: numpy array, uncorrupted image.\n\nReturns:\nnumpy array, corrupted images.", "source": "codesearchnet"}
{"code": "def cos(times: np.ndarray, amp: complex, freq: float, phase: float=0) -> np.ndarray:\n    return (amp * np.cos(((((2 * np.pi) * freq) * times) + phase)).astype(np.complex_))", "docstring": "Continuous cosine wave.\n\nArgs:\ntimes: Times to output wave for.\namp: Pulse amplitude.\nfreq: Pulse frequency, units of 1/dt.\nphase: Pulse phase.", "source": "codesearchnet"}
{"code": "def CreateUnit(self, parent=None, value=None, bid_amount=None):\n    unit = {'xsi_type': 'ProductPartition', 'partitionType': 'UNIT'}\n    if (parent is not None):\n        unit['parentCriterionId'] = parent['id']\n        unit['caseValue'] = value\n    if ((bid_amount is not None) and (bid_amount > 0)):\n        bidding_strategy_configuration = {'bids': [{'xsi_type': 'CpcBid', 'bid': {'xsi_type': 'Money', 'microAmount': str(bid_amount)}}]}\n        adgroup_criterion = {'xsi_type': 'BiddableAdGroupCriterion', 'biddingStrategyConfiguration': bidding_strategy_configuration}\n    else:\n        adgroup_criterion = {'xsi_type': 'NegativeAdGroupCriterion'}\n    adgroup_criterion['adGroupId'] = self.adgroup_id\n    adgroup_criterion['criterion'] = unit\n    self.CreateAddOperation(adgroup_criterion)\n    return unit", "docstring": "Creates a unit node.\n\nArgs:\nparent: The node that should be this node's parent.\nvalue: The value being partitioned on.\nbid_amount: The amount to bid for matching products, in micros.\nReturns:\nA new unit node.", "source": "codesearchnet"}
{"code": "def normalize(code):\n    if (len(code) == 3):\n        return code\n    normalized = translate(code)\n    if normalized:\n        return normalized\n    country = countries.get(code, None)\n    if country:\n        return country.alpha3.lower()\n    return code", "docstring": "Normalize language codes to ISO 639-2. If all conversions fails, return the\n`code` as it was given.\n\nArgs:\ncode (str): Language / country code.\n\nReturns:\nstr: ISO 639-2 country code.", "source": "codesearchnet"}
{"code": "def unpack_small_tensors(tower_grads, packing):\n    \n    if not packing:\n        return tower_grads\n    new_tower_grads = []\n    num_devices = len(tower_grads)\n    num_packed = len(packing.keys()) \n    for dev_idx, gv_list in enumerate(tower_grads):\n        new_gv_list = gv_list[num_packed:]\n        for i in xrange(0, num_packed):\n            k = \"%d:%d\" % (dev_idx, i)\n            gpt = packing[k]\n            gv = unpack_grad_tuple(gv_list[i], gpt)\n            for gi, idx in enumerate(gpt.indices):\n                assert idx == gpt.indices[gi]\n                new_gv_list.insert(idx, gv[gi])\n        new_tower_grads.append(new_gv_list)\n    return new_tower_grads", "docstring": "Undo the structure alterations to tower_grads done by pack_small_tensors.\n\nArgs:\ntower_grads: List of List of (grad, var) tuples.\npacking: A dict generated by pack_small_tensors describing the changes\nit made to tower_grads.\n\nReturns:\nnew_tower_grads: identical to tower_grads except that concatentations\nof small tensors have been split apart and returned to their original\npositions, paired with their original variables.", "source": "juraj-google-style"}
{"code": "def vcf_records(self, qualified=False):\n        \n        if qualified:\n            sample_names = self.qualified_sample_names\n        else:\n            sample_names = self.sample_names\n\n        for line in self._file_reader.read_lines():\n            if line.startswith(\"\n                continue\n            yield VcfRecord.parse_record(line, sample_names)", "docstring": "Generates parsed VcfRecord objects.\n\nTypically called in a for loop to process each vcf record in a\nVcfReader. VcfReader must be opened in advanced and closed when\ncomplete. Skips all headers.\n\nArgs:\nqualified: When True, sample names are prefixed with file name\n\nReturns:\nParsed VcfRecord\n\nRaises:\nStopIteration: when reader is exhausted.\nTypeError: if reader is closed.", "source": "juraj-google-style"}
{"code": "def convert(self, vroot, entry_variables):\n        \n        self.graph_info = GraphInfo(vroot)\n        self.entry_variables = entry_variables\n\n        cnt = 0\n        with nn.parameter_scope(self.name):\n            \n            for t, func in enumerate(self.graph_info.funcs):\n                if func.name == \"BatchNormalization\":\n                    bn_func = func\n                    \n                    if bn_func.info.args[\"batch_stat\"] == False:\n                        o = self._bn_linear_conversion(bn_func, cnt)\n                        cnt += 1\n                        continue\n                \n                o = self._identity_conversion(func)\n\n        self.end_variable = o\n        return self.end_variable", "docstring": "All functions are replaced with the same `new` function.\n\nArgs:\nvroot (:obj:`Variable`): NNabla Variable\nentry_variables (:obj:`Variable`): Entry variable from which the conversion starts.", "source": "juraj-google-style"}
{"code": "def _compute_upper_group(df):\n    \n    upper_group = df.groupby(['groups']).agg({\n        'value': sum,\n        'value_start': sum,\n        'upperGroup_label': 'first',\n        'upperGroup_order': 'first'\n    }).reset_index()\n    upper_group['type'] = 'parent'\n    upper_group['variation'] = upper_group['value'] / upper_group[\n        'value_start']\n    upper_group.drop(['value_start'], axis=1, inplace=True)\n    upper_group.rename(columns={'upperGroup_label': 'label'}, inplace=True)\n    return upper_group", "docstring": "Compute upperGroup\nArgs:\ndf (Dataframe):\n\nReturns: Dataframe", "source": "juraj-google-style"}
{"code": "def CreateBiddingStrategy(client):\n  \n  \n  bidding_strategy_service = client.GetService(\n      'BiddingStrategyService', version='v201809')\n\n  \n  shared_bidding_strategy = {\n      'name': 'Maximize Clicks %s' % uuid.uuid4(),\n      'biddingScheme': {\n          'xsi_type': 'TargetSpendBiddingScheme',\n          \n          'bidCeiling': {\n              'microAmount': '2000000'\n          }\n      }\n  }\n\n  \n  operation = {\n      'operator': 'ADD',\n      'operand': shared_bidding_strategy\n  }\n\n  response = bidding_strategy_service.mutate([operation])\n  new_bidding_strategy = response['value'][0]\n\n  print ('Shared bidding strategy with name \"%s\" and ID \"%s\" of type \"%s\"'\n         'was created.' %\n         (new_bidding_strategy['name'], new_bidding_strategy['id'],\n          new_bidding_strategy['biddingScheme']['BiddingScheme.Type']))\n\n  return new_bidding_strategy", "docstring": "Creates a bidding strategy object.\n\nArgs:\nclient: AdWordsClient the client to run the example with.\n\nReturns:\ndict An object representing a bidding strategy.", "source": "juraj-google-style"}
{"code": "def apply2(self, func, *args, **kwargs):\n    ret = func(args[0], self._t, *args[1:], **kwargs)\n    return LinearWrap(ret)", "docstring": "Apply a function on the wrapped tensor. The tensor\nwill be the second argument of func.\n\nThis is because many symbolic functions\n(such as tensorpack's layers) takes 'scope' as the first argument.\n\nReturns:\nLinearWrap: ``LinearWrap(func(args[0], self.tensor(), *args[1:], **kwargs))``.", "source": "codesearchnet"}
{"code": "def _avro_rows(block, avro_schema):\n    \n    blockio = six.BytesIO(block.avro_rows.serialized_binary_rows)\n    while True:\n        \n        \n        try:\n            \n            \n            yield fastavro.schemaless_reader(blockio, avro_schema)\n        except StopIteration:\n            break", "docstring": "Parse all rows in a stream block.\n\nArgs:\nblock ( \\\n~google.cloud.bigquery_storage_v1beta1.types.ReadRowsResponse \\\n):\nA block containing Avro bytes to parse into rows.\navro_schema (fastavro.schema):\nA parsed Avro schema, used to deserialized the bytes in the\nblock.\n\nReturns:\nIterable[Mapping]:\nA sequence of rows, represented as dictionaries.", "source": "juraj-google-style"}
{"code": "def execute_show(args, root_dir):\n    key = None\n    if args.get('key'):\n        key = args['key']\n        status = command_factory('status')({}, root_dir=root_dir)\n        if ((key not in status['data']) or (status['data'][key]['status'] != 'running')):\n            print('No running process with this key, use `log` to show finished processes.')\n            return\n    else:\n        status = command_factory('status')({}, root_dir=root_dir)\n        if isinstance(status['data'], str):\n            print(status['data'])\n            return\n        for k in sorted(status['data'].keys()):\n            if (status['data'][k]['status'] == 'running'):\n                key = k\n                break\n        if (key is None):\n            print('No running process, use `log` to show finished processes.')\n            return\n    config_dir = os.path.join(root_dir, '.config/pueue')\n    stdoutFile = os.path.join(config_dir, 'pueue_process_{}.stdout'.format(key))\n    stderrFile = os.path.join(config_dir, 'pueue_process_{}.stderr'.format(key))\n    stdoutDescriptor = open(stdoutFile, 'r')\n    stderrDescriptor = open(stderrFile, 'r')\n    running = True\n    if args['watch']:\n        stdscr = curses.initscr()\n        curses.noecho()\n        curses.cbreak()\n        curses.curs_set(2)\n        stdscr.keypad(True)\n        stdscr.refresh()\n        try:\n            while running:\n                stdscr.clear()\n                stdoutDescriptor.seek(0)\n                message = stdoutDescriptor.read()\n                stdscr.addstr(0, 0, message)\n                stdscr.refresh()\n                time.sleep(2)\n        except Exception:\n            curses.nocbreak()\n            stdscr.keypad(False)\n            curses.echo()\n            curses.endwin()\n    else:\n        print('Stdout output:\\n')\n        stdoutDescriptor.seek(0)\n        print(get_descriptor_output(stdoutDescriptor, key))\n        print('\\n\\nStderr output:\\n')\n        stderrDescriptor.seek(0)\n        print(get_descriptor_output(stderrDescriptor, key))", "docstring": "Print stderr and stdout of the current running process.\n\nArgs:\nargs['watch'] (bool): If True, we open a curses session and tail\nthe output live in the console.\nroot_dir (string): The path to the root directory the daemon is running in.", "source": "codesearchnet"}
{"code": "def compute_loss(self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], return_outputs: bool=False, num_items_in_batch: Optional[torch.Tensor]=None):\n    if (self.label_smoother is not None or self.compute_loss_func is not None) and 'labels' in inputs:\n        labels = inputs.pop('labels')\n    else:\n        labels = None\n    if self.model_accepts_loss_kwargs:\n        loss_kwargs = {}\n        if num_items_in_batch is not None:\n            loss_kwargs['num_items_in_batch'] = num_items_in_batch\n        inputs = {**inputs, **loss_kwargs}\n    outputs = model(**inputs)\n    if self.args.past_index >= 0:\n        self._past = outputs[self.args.past_index]\n    if labels is not None:\n        unwrapped_model = self.accelerator.unwrap_model(model)\n        if _is_peft_model(unwrapped_model):\n            model_name = unwrapped_model.base_model.model._get_name()\n        else:\n            model_name = unwrapped_model._get_name()\n        if self.compute_loss_func is not None:\n            loss = self.compute_loss_func(outputs, labels, num_items_in_batch=num_items_in_batch)\n        elif model_name in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values():\n            loss = self.label_smoother(outputs, labels, shift_labels=True)\n        else:\n            loss = self.label_smoother(outputs, labels)\n    else:\n        if isinstance(outputs, dict) and 'loss' not in outputs:\n            raise ValueError(f'The model did not return a loss from the inputs, only the following keys: {','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}.')\n        loss = outputs['loss'] if isinstance(outputs, dict) else outputs[0]\n    if self.args.average_tokens_across_devices and (self.model_accepts_loss_kwargs or self.compute_loss_func) and (num_items_in_batch is not None):\n        loss *= self.accelerator.num_processes\n    return (loss, outputs) if return_outputs else loss", "docstring": "How the loss is computed by Trainer. By default, all models return the loss in the first element.\n\nArgs:\nmodel (`nn.Module`):\nThe model to compute the loss for.\ninputs (`Dict[str, Union[torch.Tensor, Any]]`):\nThe input data for the model.\nreturn_outputs (`bool`, *optional*, defaults to `False`):\nWhether to return the model outputs along with the loss.\nnum_items_in_batch (Optional[torch.Tensor], *optional*):\nThe number of items in the batch. If num_items_in_batch is not passed,\n\nReturns:\nThe loss of the model along with its output if return_outputs was set to True\n\nSubclass and override for custom behavior. If you are not using `num_items_in_batch` when computing your loss,\nmake sure to overwrite `self.model_accepts_loss_kwargs` to `False`. Otherwise, the loss calculationg might be slightly inacurate when performing gradient accumulation.", "source": "github-repos"}
{"code": "def proxy_num(self, protocol=None):\n    http_num = len(self.proxies['http'])\n    https_num = len(self.proxies['https'])\n    if (protocol == 'http'):\n        return http_num\n    elif (protocol == 'https'):\n        return https_num\n    else:\n        return (http_num + https_num)", "docstring": "Get the number of proxies in the pool\n\nArgs:\nprotocol (str, optional): 'http' or 'https' or None. (default None)\n\nReturns:\nIf protocol is None, return the total number of proxies, otherwise,\nreturn the number of proxies of corresponding protocol.", "source": "codesearchnet"}
{"code": "def get_njobs_in_queue(self, username=None):\n    if (username is None):\n        username = getpass.getuser()\n    (njobs, process) = self._get_njobs_in_queue(username=username)\n    if ((process is not None) and (process.returncode != 0)):\n        err_msg = ('Error trying to get the number of jobs in the queue' + 'The error response reads:\\n {}'.format(process.stderr.read()))\n        logger.critical(err_msg)\n    if (not isinstance(self, ShellAdapter)):\n        logger.info('The number of jobs currently in the queue is: {}'.format(njobs))\n    return njobs", "docstring": "returns the number of jobs in the queue, probably using subprocess or shutil to\ncall a command like 'qstat'. returns None when the number of jobs cannot be determined.\n\nArgs:\nusername: (str) the username of the jobs to count (default is to autodetect)", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False):\n    residual = hidden_states\n    query = key = self.with_pos_embed(hidden_states, position_embeddings)\n    hidden_states = self.self_attn(queries=query, keys=key, values=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)\n    hidden_states, attentions = hidden_states if output_attentions else (hidden_states[0], None)\n    hidden_states = self.dropout(hidden_states)\n    hidden_states = residual + hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    residual = hidden_states\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = self.encoder_feedforward_dropout(hidden_states)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = self.dropout(hidden_states)\n    hidden_states = residual + hidden_states\n    hidden_states = self.final_layer_norm(hidden_states)\n    if self.training:\n        if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():\n            clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n            hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n    if output_attentions:\n        return (hidden_states, attentions)\n    return (hidden_states,)", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`): attention mask of size\n`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative\nvalues.\nposition_embeddings (`torch.FloatTensor`, *optional*):\nObject queries (also called content embeddings), to be added to the hidden states.\noutput_attentions (`bool`, *optional*, defaults to `False`):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def CacheFileSystem(self, path_spec, file_system):\n    identifier = self._GetFileSystemCacheIdentifier(path_spec)\n    self._file_system_cache.CacheObject(identifier, file_system)", "docstring": "Caches a file system object based on a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nfile_system (FileSystem): file system object.", "source": "codesearchnet"}
{"code": "def insert_tokenizer_in_auto_module(old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns):\n    if old_model_patterns.tokenizer_class is None or new_model_patterns.tokenizer_class is None:\n        return\n    with open(TRANSFORMERS_PATH / 'models' / 'auto' / 'tokenization_auto.py', 'r', encoding='utf-8') as f:\n        content = f.read()\n    pattern_tokenizer = re.compile('^\\\\s*TOKENIZER_MAPPING_NAMES\\\\s*=\\\\s*OrderedDict\\\\b')\n    lines = content.split('\\n')\n    idx = 0\n    while not pattern_tokenizer.search(lines[idx]):\n        idx += 1\n    idx += 1\n    while not lines[idx].startswith('TOKENIZER_MAPPING = _LazyAutoMapping'):\n        if lines[idx].endswith(','):\n            block = lines[idx]\n        else:\n            block = []\n            while not lines[idx].startswith('        ),'):\n                block.append(lines[idx])\n                idx += 1\n            block.append(lines[idx])\n            block = '\\n'.join(block)\n        idx += 1\n        if f'\"{old_model_patterns.model_type}\"' in block and old_model_patterns.tokenizer_class in block:\n            break\n    new_block = block.replace(old_model_patterns.model_type, new_model_patterns.model_type)\n    new_block = new_block.replace(old_model_patterns.tokenizer_class, new_model_patterns.tokenizer_class)\n    new_lines = lines[:idx] + [new_block] + lines[idx:]\n    with open(TRANSFORMERS_PATH / 'models' / 'auto' / 'tokenization_auto.py', 'w', encoding='utf-8') as f:\n        f.write('\\n'.join(new_lines))", "docstring": "Add a tokenizer to the relevant mappings in the auto module.\n\nArgs:\nold_model_patterns (`ModelPatterns`): The patterns for the old model.\nnew_model_patterns (`ModelPatterns`): The patterns for the new model.", "source": "github-repos"}
{"code": "def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> GenerationConfig:\n    if isinstance(gen_config_arg, GenerationConfig):\n        gen_config = deepcopy(gen_config_arg)\n    else:\n        pretrained_model_name = Path(gen_config_arg) if isinstance(gen_config_arg, str) else gen_config_arg\n        config_file_name = None\n        if pretrained_model_name.is_file():\n            config_file_name = pretrained_model_name.name\n            pretrained_model_name = pretrained_model_name.parent\n        elif pretrained_model_name.is_dir():\n            pass\n        else:\n            pretrained_model_name = gen_config_arg\n        gen_config = GenerationConfig.from_pretrained(pretrained_model_name, config_file_name)\n    try:\n        gen_config.validate(strict=True)\n    except ValueError as exc:\n        raise ValueError(str(exc) + '\\n\\nFix these issues to train your model.')\n    return gen_config", "docstring": "Loads a `~generation.GenerationConfig` from the `Seq2SeqTrainingArguments.generation_config` arguments.\n\nArgs:\ngen_config_arg (`str` or [`~generation.GenerationConfig]`):\n`Seq2SeqTrainingArguments.generation_config` argument.\n\nReturns:\nA `~generation.GenerationConfig`.", "source": "github-repos"}
{"code": "def from_linearized(first, second, intersections):\n    (s, t, success) = segment_intersection(first.start_node, first.end_node, second.start_node, second.end_node)\n    bad_parameters = False\n    if success:\n        if (not (_helpers.in_interval(s, 0.0, 1.0) and _helpers.in_interval(t, 0.0, 1.0))):\n            bad_parameters = True\n    else:\n        if ((first.error == 0.0) and (second.error == 0.0)):\n            raise ValueError(_UNHANDLED_LINES)\n        bad_parameters = True\n        s = 0.5\n        t = 0.5\n    if bad_parameters:\n        if (not convex_hull_collide(first.curve.nodes, second.curve.nodes)):\n            return\n    orig_s = (((1 - s) * first.curve.start) + (s * first.curve.end))\n    orig_t = (((1 - t) * second.curve.start) + (t * second.curve.end))\n    (refined_s, refined_t) = _intersection_helpers.full_newton(orig_s, first.curve.original_nodes, orig_t, second.curve.original_nodes)\n    (refined_s, success) = _helpers.wiggle_interval(refined_s)\n    if (not success):\n        return\n    (refined_t, success) = _helpers.wiggle_interval(refined_t)\n    if (not success):\n        return\n    add_intersection(refined_s, refined_t, intersections)", "docstring": "Determine curve-curve intersection from pair of linearizations.\n\n.. note::\n\nThis assumes that at least one of ``first`` and ``second`` is\nnot a line. The line-line case should be handled \"early\"\nby :func:`check_lines`.\n\n.. note::\n\nThis assumes the caller has verified that the bounding boxes\nfor ``first`` and ``second`` actually intersect.\n\nIf there is an intersection along the segments, adds that intersection\nto ``intersections``. Otherwise, returns without doing anything.\n\nArgs:\nfirst (Linearization): First curve being intersected.\nsecond (Linearization): Second curve being intersected.\nintersections (list): A list of existing intersections.\n\nRaises:\nValueError: If ``first`` and ``second`` both have linearization error\nof ``0.0`` (i.e. they are both lines). This is because this\nfunction expects the caller to have used :func:`check_lines`\nalready.", "source": "codesearchnet"}
{"code": "def transform(self, path):\n    if ((path is None) or (not path)):\n        return None\n    obj_parent_modules = path.split('.')\n    objects = [obj_parent_modules.pop((- 1))]\n    while True:\n        try:\n            parent_module_path = '.'.join(obj_parent_modules)\n            parent_module = importlib.import_module(parent_module_path)\n            break\n        except ImportError:\n            if (len(obj_parent_modules) == 1):\n                raise ImportError((\"No module named '%s'\" % obj_parent_modules[0]))\n            objects.insert(0, obj_parent_modules.pop((- 1)))\n    current_object = parent_module\n    for obj in objects:\n        current_object = getattr(current_object, obj)\n    return current_object", "docstring": "Transform a path into an actual Python object.\n\nThe path can be arbitrary long. You can pass the path to a package,\na module, a class, a function or a global variable, as deep as you\nwant, as long as the deepest module is importable through\n``importlib.import_module`` and each object is obtainable through\nthe ``getattr`` method. Local objects will not work.\n\nArgs:\npath (str): the dot-separated path of the object.\n\nReturns:\nobject: the imported module or obtained object.", "source": "codesearchnet"}
{"code": "def list_leases(self, uuid=None):\n    try:\n        lease_files = os.listdir(self.path)\n    except OSError as e:\n        raise_from(LagoSubnetLeaseBadPermissionsException(self.path, e.strerror), e)\n    leases = [self.create_lease_object_from_idx(lease_file.split('.')[0]) for lease_file in lease_files if (lease_file != LOCK_NAME)]\n    if (not uuid):\n        return leases\n    else:\n        return [lease for lease in leases if (lease.uuid == uuid)]", "docstring": "List current subnet leases\n\nArgs:\nuuid(str): Filter the leases by uuid\n\nReturns:\nlist of :class:~Lease: current leases", "source": "codesearchnet"}
{"code": "async def download_cot_artifact(chain, task_id, path):\n    link = chain.get_link(task_id)\n    log.debug('Verifying {} is in {} cot artifacts...'.format(path, task_id))\n    if (not link.cot):\n        log.warning('Chain of Trust for \"{}\" in {} does not exist. See above log for more details. Skipping download of this artifact'.format(path, task_id))\n        return\n    if (path not in link.cot['artifacts']):\n        raise CoTError('path {} not in {} {} chain of trust artifacts!'.format(path, link.name, link.task_id))\n    url = get_artifact_url(chain.context, task_id, path)\n    loggable_url = get_loggable_url(url)\n    log.info('Downloading Chain of Trust artifact:\\n{}'.format(loggable_url))\n    (await download_artifacts(chain.context, [url], parent_dir=link.cot_dir, valid_artifact_task_ids=[task_id]))\n    full_path = link.get_artifact_full_path(path)\n    for (alg, expected_sha) in link.cot['artifacts'][path].items():\n        if (alg not in chain.context.config['valid_hash_algorithms']):\n            raise CoTError('BAD HASH ALGORITHM: {}: {} {}!'.format(link.name, alg, full_path))\n        real_sha = get_hash(full_path, hash_alg=alg)\n        if (expected_sha != real_sha):\n            raise CoTError('BAD HASH on file {}: {}: Expected {} {}; got {}!'.format(full_path, link.name, alg, expected_sha, real_sha))\n        log.debug('{} matches the expected {} {}'.format(full_path, alg, expected_sha))\n    return full_path", "docstring": "Download an artifact and verify its SHA against the chain of trust.\n\nArgs:\nchain (ChainOfTrust): the chain of trust object\ntask_id (str): the task ID to download from\npath (str): the relative path to the artifact to download\n\nReturns:\nstr: the full path of the downloaded artifact\n\nRaises:\nCoTError: on failure.", "source": "codesearchnet"}
{"code": "def _format_line(headers, fields):\n  \n  assert len(fields) == len(headers), (fields, headers)\n  fields = [\"%2.4f\" % field if isinstance(field, float) else str(field)\n            for field in fields]\n  return '  '.join(' ' * max(0, len(header) - len(field)) + field\n                   for (header, field) in zip(headers, fields))", "docstring": "Format a line of a table.\n\nArguments:\nheaders: A list of strings that are used as the table headers.\nfields: A list of the same length as `headers` where `fields[i]` is\nthe entry for `headers[i]` in this row. Elements can be of\narbitrary types. Pass `headers` to print the header row.\n\nReturns:\nA pretty string.", "source": "juraj-google-style"}
{"code": "def eig_one_step(current_vector, learning_rate, vector_prod_fn):\n  \n  grad = 2*vector_prod_fn(current_vector)\n  \n  \n  current_objective = tf.reshape(tf.matmul(tf.transpose(current_vector),\n                                           grad) / 2., shape=())\n\n  \n  \n  \n  grad = grad - current_vector*tf.matmul(tf.transpose(current_vector), grad)\n  grad_norm = tf.norm(grad)\n  grad_norm_sq = tf.square(grad_norm)\n\n  \n  norm_grad = grad / grad_norm\n\n  \n  \n  directional_second_derivative = (\n      tf.reshape(2*tf.matmul(tf.transpose(norm_grad),\n                             vector_prod_fn(norm_grad)),\n                 shape=()))\n\n  \n  \n  \n  grad_m_grad = directional_second_derivative*grad_norm_sq / 2\n\n  \n  \n  if directional_second_derivative / 2. < current_objective:\n    return norm_grad\n\n  \n  if directional_second_derivative > 0.:\n    step = -1. * grad_norm / directional_second_derivative\n  else:\n    \n    if grad_norm_sq <= 1e-16:\n      step = 0.0\n    else:\n      \n      step = -2. * tf.reduce_sum(current_vector*grad) / grad_norm_sq\n      \n      gain = -(2 * tf.reduce_sum(current_vector*grad) +\n               (step*step) * grad_m_grad)\n\n      \n      if gain < 0.:\n        step = -learning_rate * grad_norm\n  current_vector = current_vector + step * norm_grad\n  return tf.nn.l2_normalize(current_vector)", "docstring": "Function that performs one step of gd (variant) for min eigen value.\n\nArgs:\ncurrent_vector: current estimate of the eigen vector with minimum eigen\nvalue.\nlearning_rate: learning rate.\nvector_prod_fn: function which returns product H*x, where H is a matrix for\nwhich we computing eigenvector.\n\nReturns:\nupdated vector after one step", "source": "juraj-google-style"}
{"code": "def sg_regularizer_loss(scale=1.0):\n    return (scale * tf.reduce_mean(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)))", "docstring": "r\"\"\" Get regularizer losss\n\nArgs:\nscale: A scalar. A weight applied to regularizer loss", "source": "codesearchnet"}
{"code": "def RegisterDefinition(self, artifact_definition):\n    \n    artifact_definition_name = artifact_definition.name.lower()\n    if artifact_definition_name in self._artifact_definitions:\n      raise KeyError(\n          'Artifact definition already set for name: {0:s}.'.format(\n              artifact_definition.name))\n\n    self._artifact_definitions[artifact_definition_name] = artifact_definition\n    self._defined_artifact_names.add(artifact_definition.name)\n\n    for source in artifact_definition.sources:\n      if source.type_indicator == definitions.TYPE_INDICATOR_ARTIFACT_GROUP:\n        self._artifact_name_references.update(source.names)", "docstring": "Registers an artifact definition.\n\nArtifact definitions are identified based on their lower case name.\n\nArgs:\nartifact_definition (ArtifactDefinition): an artifact definition.\n\nRaises:\nKeyError: if artifact definition is already set for the corresponding\nname.", "source": "juraj-google-style"}
{"code": "def __init__(self, unicodeHexValue, block):\n        \n        if unicodeHexValue < 0 or unicodeHexValue > 0x10FFFF:\n            raise (ValueError, \"numeric value outside Unicode range\")\n        self.unicodeHexValue = unicodeHexValue\n        \n        self.chr = chr(self.unicodeHexValue)\n        self.name = unicodedata.name(self.chr)\n        self.equivalents = {}\n        self._block = block", "docstring": "Set up a unicode character.\n\nArguments:\nunicodeHexValue -- an integer that should correspond to a\nUnicode code point.\nblock -- the CharacterBlock this character belongs to.\n\nRaises:\nValueError -- if unicodeHexValue is not a valid code point.", "source": "juraj-google-style"}
{"code": "def list_json_files(directory, recursive=False):\n    json_files = []\n    for (top, dirs, files) in os.walk(directory):\n        dirs.sort()\n        paths = (os.path.join(top, f) for f in sorted(files))\n        json_files.extend((x for x in paths if is_json(x)))\n        if (not recursive):\n            break\n    return json_files", "docstring": "Return a list of file paths for JSON files within `directory`.\n\nArgs:\ndirectory: A path to a directory.\nrecursive: If ``True``, this function will descend into all\nsubdirectories.\n\nReturns:\nA list of JSON file paths directly under `directory`.", "source": "codesearchnet"}
{"code": "def run(self):\n    accounts = list(AWSAccount.get_all(include_disabled=False).values())\n    for account in accounts:\n        self.log.debug('Updating VPC Flow Logs for {}'.format(account))\n        self.session = get_aws_session(account)\n        role_arn = self.confirm_iam_role(account)\n        for aws_region in AWS_REGIONS:\n            try:\n                vpc_list = VPC.get_all(account, aws_region).values()\n                need_vpc_flow_logs = [x for x in vpc_list if (x.vpc_flow_logs_status != 'ACTIVE')]\n                for vpc in need_vpc_flow_logs:\n                    if self.confirm_cw_log(account, aws_region, vpc.id):\n                        self.create_vpc_flow_logs(account, aws_region, vpc.id, role_arn)\n                    else:\n                        self.log.info('Failed to confirm log group for {}/{}'.format(account, aws_region))\n            except Exception:\n                self.log.exception('Failed processing VPCs for {}/{}.'.format(account, aws_region))\n        db.session.commit()", "docstring": "Main entry point for the auditor worker.\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def multiply(x1, x2, output_shape=None, name=None):\n    if (not isinstance(x2, Tensor)):\n        return ScalarMultiplyOperation(x1, x2).outputs[0]\n    with tf.name_scope(name, default_name='mul'):\n        (x1, x2) = binary_arguments_to_tensors(x1, x2)\n        return einsum([x1, x2], output_shape=_infer_binary_broadcast_shape(x1.shape, x2.shape, output_shape))", "docstring": "Binary multiplication with broadcasting.\n\nArgs:\nx1: a Tensor\nx2: a Tensor\noutput_shape: an optional Shape\nname: an optional string\nReturns:\na Tensor", "source": "codesearchnet"}
{"code": "def relu(x):\n    if any_symbolic_tensors((x,)):\n        return Relu().symbolic_call(x)\n    return backend.nn.relu(x)", "docstring": "Rectified linear unit activation function.\n\nIt is defined as `f(x) = max(0, x)`.\n\nArgs:\nx: Input tensor.\n\nReturns:\nA tensor with the same shape as `x`.\n\nExample:\n\n>>> x1 = keras.ops.convert_to_tensor([-1.0, 0.0, 1.0, 0.2])\n>>> keras.ops.relu(x1)\narray([0.0, 0.0, 1.0, 0.2], dtype=float32)", "source": "github-repos"}
{"code": "def replace_characters(self, text, characters, replacement=''):\n    if (not characters):\n        return text\n    characters = ''.join(sorted(characters))\n    if (characters in self._characters_regexes):\n        characters_regex = self._characters_regexes[characters]\n    else:\n        characters_regex = re.compile(('[%s]' % re.escape(characters)))\n        self._characters_regexes[characters] = characters_regex\n    return characters_regex.sub(replacement, text)", "docstring": "Remove characters from text.\n\nRemoves custom characters from input text or replaces them\nwith a string if specified.\n\nArgs:\ntext: The text to be processed.\ncharacters: Characters that will be replaced.\nreplacement: New text that will replace the custom characters.\n\nReturns:\nThe text without the given characters.", "source": "codesearchnet"}
{"code": "def regroup(values, wrap_class=values_lib.PerReplica, always_wrap=False):\n    v0 = values[0]\n    if isinstance(v0, list):\n        for v in values[1:]:\n            assert isinstance(v, list)\n            assert len(v) == len(v0), 'len(v) == %d, len(v0) == %d, v: %s, v0: %s' % (len(v), len(v0), v, v0)\n        return [regroup(tuple((v[i] for v in values)), wrap_class, always_wrap) for i in range(len(v0))]\n    if isinstance(v0, tuple):\n        for v in values[1:]:\n            assert isinstance(v, tuple)\n            assert len(v) == len(v0), f'Values to regroup had different lengths: len(v) == {len(v)}, len(v0) == {len(v0)}, v: {v}, v0: {v0}'\n        regrouped_tuple = tuple((regroup(tuple((v[i] for v in values)), wrap_class, always_wrap) for i in range(len(v0))))\n        if hasattr(v0, '_fields'):\n            assert hasattr(v0, '_make')\n            return v0._make(regrouped_tuple)\n        else:\n            return regrouped_tuple\n    if isinstance(v0, abc.Mapping):\n        v0keys = v0.keys()\n        for v in values[1:]:\n            assert isinstance(v, abc.Mapping), 'v[0]: %r  v[i]: %r' % (v0, v)\n            assert set(v.keys()) == set(v0keys), 'v[0].keys: %s  v[i].keys: %s' % (set(v0keys), set(v.keys()))\n        return type(v0)({key: regroup(tuple((v[key] for v in values)), wrap_class, always_wrap) for key in v0keys})\n    same_id = True\n    for v in values[1:]:\n        if v is not v0:\n            same_id = False\n            break\n    if same_id and isinstance(v0, values_lib.DistributedVariable):\n        return v0\n    if same_id and (not always_wrap) and (value_container(v0) is v0):\n        return v0\n    if not isinstance(v0, resource_variable_ops._UnreadVariable) and value_container(v0) is not v0:\n        assert not isinstance(v0, values_lib.MirroredVariable), 'ids = %s, values = %s' % ([id(v) for v in values], values)\n        distributed_container = value_container(v0)\n        assert distributed_container is not None\n        for v in values[1:]:\n            assert distributed_container is value_container(v)\n        return distributed_container\n    return wrap_class(values)", "docstring": "Makes a nest per-replica into a nest of PerReplica/Mirrored values.\n\nArgs:\nvalues: Values to regroup\nwrap_class: Class that `values` be wrapped in.\nalways_wrap: Always wrap the `values` in `wrap_class` even if the values\nare the same except for DistributeVariable.\nReturns:\nWrapped `values`.", "source": "github-repos"}
{"code": "def authenticate(self, username, password):\n        \n        if self.config.get('LDAP_BIND_DIRECT_CREDENTIALS'):\n            result = self.authenticate_direct_credentials(username, password)\n\n        elif not self.config.get('LDAP_ALWAYS_SEARCH_BIND') and \\\n                self.config.get('LDAP_USER_RDN_ATTR') == \\\n                self.config.get('LDAP_USER_LOGIN_ATTR'):\n            \n            \n            result = self.authenticate_direct_bind(username, password)\n        else:\n            \n            \n            result = self.authenticate_search_bind(username, password)\n\n        return result", "docstring": "An abstracted authentication method. Decides whether to perform a\ndirect bind or a search bind based upon the login attribute configured\nin the config.\n\nArgs:\nusername (str): Username of the user to bind\npassword (str): User's password to bind with.\n\nReturns:\nAuthenticationResponse", "source": "juraj-google-style"}
{"code": "def get_metrics_for_resource(access_token, subscription_id, resource_group, resource_provider, resource_type, resource_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/', resource_provider, '/', resource_type, '/', resource_name, '/providers/microsoft.insights', '/metrics?api-version=', INSIGHTS_PREVIEW_API])\n    return do_get(endpoint, access_token)", "docstring": "Get the monitoring metrics for a resource.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nresource_type (str): Type of resource.\nresource_name (str): Name of resource.\n\nReturns:\nHTTP response. JSON body of resource metrics.", "source": "codesearchnet"}
{"code": "def get_posts(self, num=None, tag=None, private=False):\n\t\t\n\t\tposts = self.posts\n\n\t\tif not private:\n\t\t\tposts = [post for post in posts if post.public]\n\n\t\tif tag:\n\t\t\tposts = [post for post in posts if tag in post.tags]\n\n\t\tif num:\n\t\t\treturn posts[:num]\n\t\treturn posts", "docstring": "Get all the posts added to the blog.\n\nArgs:\nnum (int): Optional. If provided, only return N posts (sorted by date,\nmost recent first).\ntag (Tag): Optional. If provided, only return posts that have a\nspecific tag.\nprivate (bool): By default (if False), private posts are not included.\nIf set to True, private posts will also be included.", "source": "juraj-google-style"}
{"code": "def load_steps(working_dir=None, steps_dir=None, step_file=None,\n               step_list=None):\n    \n    if steps_dir is not None:\n        step_files = glob.glob(os.path.join(steps_dir, '*.cwl'))\n    elif step_file is not None:\n        step_files = [step_file]\n    elif step_list is not None:\n        step_files = []\n        for path in step_list:\n            if os.path.isdir(path):\n                step_files += glob.glob(os.path.join(path, '*.cwl'))\n            else:\n                step_files.append(path)\n    else:\n        step_files = []\n\n    if working_dir is not None:\n        step_files = sort_loading_order(step_files)\n\n    steps = {}\n    for f in step_files:\n        if working_dir is not None:\n            \n            if not working_dir == os.path.dirname(f) and not is_url(f):\n                copied_file = os.path.join(working_dir, os.path.basename(f))\n                shutil.copy2(f, copied_file)\n                f = copied_file\n\n        \n        try:\n            s = Step(f)\n            steps[s.name] = s\n        except (NotImplementedError, ValidationException,\n                PackedWorkflowException) as e:\n            logger.warning(e)\n\n    return steps", "docstring": "Return a dictionary containing Steps read from file.\n\nArgs:\nsteps_dir (str, optional): path to directory containing CWL files.\nstep_file (str, optional): path or http(s) url to a single CWL file.\nstep_list (list, optional): a list of directories, urls or local file\npaths to CWL files or directories containing CWL files.\n\nReturn:\ndict containing (name, Step) entries.", "source": "juraj-google-style"}
{"code": "def moveRel(xOffset=None, yOffset=None, duration=0.0, tween=linear, pause=None, _pause=True):\n    _failSafeCheck()\n    (xOffset, yOffset) = _unpackXY(xOffset, yOffset)\n    _mouseMoveDrag('move', None, None, xOffset, yOffset, duration, tween)\n    _autoPause(pause, _pause)", "docstring": "Moves the mouse cursor to a point on the screen, relative to its current\nposition.\n\nThe x and y parameters detail where the mouse event happens. If None, the\ncurrent mouse position is used. If a float value, it is rounded down. If\noutside the boundaries of the screen, the event happens at edge of the\nscreen.\n\nArgs:\nx (int, float, None, tuple, optional): How far left (for negative values) or\nright (for positive values) to move the cursor. 0 by default. If tuple, this is used for x and y.\ny (int, float, None, optional): How far up (for negative values) or\ndown (for positive values) to move the cursor. 0 by default.\nduration (float, optional): The amount of time it takes to move the mouse\ncursor to the new xy coordinates. If 0, then the mouse cursor is moved\ninstantaneously. 0.0 by default.\ntween (func, optional): The tweening function used if the duration is not\n0. A linear tween is used by default. See the tweens.py file for\ndetails.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def Kdp(scatterer):\n    \n    if (scatterer.thet0 != scatterer.thet) or \\\n        (scatterer.phi0 != scatterer.phi):\n        \n        raise ValueError(\"A forward scattering geometry is needed to \" + \\\n            \"compute the specific differential phase.\")\n\n    S = scatterer.get_S()\n    return 1e-3 * (180.0/np.pi) * scatterer.wavelength * (S[1,1]-S[0,0]).real", "docstring": "Specific differential phase (K_dp) for the current setup.\n\nArgs:\nscatterer: a Scatterer instance.\n\nReturns:\nK_dp [deg/km].\n\nNOTE: This only returns the correct value if the particle diameter and\nwavelength are given in [mm]. The scatterer object should be set to\nforward scattering geometry before calling this function.", "source": "juraj-google-style"}
{"code": "def _ircounts2radiance(counts, scale, offset):\n    rad = ((counts - offset) / scale)\n    return rad.clip(min=0)", "docstring": "Convert IR counts to radiance\n\nReference: [IR].\n\nArgs:\ncounts: Raw detector counts\nscale: Scale [mW-1 m2 cm sr]\noffset: Offset [1]\n\nReturns:\nRadiance [mW m-2 cm-1 sr-1]", "source": "codesearchnet"}
{"code": "def random_hermitian_matrix(num_qubits):\n    dim = 2 ** num_qubits\n    val_range = 2\n    random_real = tf.cast(tf.random.uniform([dim, dim], -val_range, val_range), tf.complex128)\n    random_imag = 1j * tf.cast(tf.random.uniform([dim, dim], -val_range, val_range), tf.complex128)\n    random_matrix = random_real + random_imag\n    return random_matrix + tf.linalg.adjoint(random_matrix)", "docstring": "Returns a random Hermitian matrix.\n\nUses the property that A + A* is Hermitian for any matrix A.\n\nArgs:\nnum_qubits: Number of qubits on which the matrix acts.", "source": "github-repos"}
{"code": "def select_char_code_table(self, table):\n        \n        tables = {'standard': 0,\n                  'eastern european': 1,\n                  'western european': 2,\n                  'spare': 3\n                  }\n        if table in tables:\n            self.send(chr(27)+'t'+chr(tables[table]))\n        else:\n            raise RuntimeError('Invalid char table.')", "docstring": "Select character code table, from tree built in ones.\n\nArgs:\ntable: The desired character code table. Choose from 'standard', 'eastern european', 'western european', and 'spare'\nReturns:\nNone\nRaises:\nRuntimeError: Invalid chartable.", "source": "juraj-google-style"}
{"code": "def mols_to_file(mols, path):\n    with open(path, 'w') as f:\n        f.write(mols_to_text(mols))", "docstring": "Save molecules to the SDFile format file\n\nArgs:\nmols: list of molecule objects\npath: file path to save", "source": "codesearchnet"}
{"code": "def create(labels=None, **kw):\n    \n    if labels is not None:\n        kw[u'labels'] = encoding.PyValueToMessage(MetricValue.LabelsValue,\n                                                  labels)\n    return MetricValue(**kw)", "docstring": "Constructs a new metric value.\n\nThis acts as an alternate to MetricValue constructor which\nsimplifies specification of labels.  Rather than having to create\na MetricValue.Labels instance, all that's necessary to specify the\nrequired string.\n\nArgs:\nlabels (dict([string, [string]]):\n**kw: any other valid keyword args valid in the MetricValue constructor\n\nReturns\n:class:`MetricValue`: the created instance", "source": "juraj-google-style"}
{"code": "def __init__(self, batch_env, step, is_training, should_log, config):\n    \n    self._batch_env = batch_env\n    self._step = step\n    self._is_training = is_training\n    self._should_log = should_log\n    self._config = config\n    self._observ_filter = parts.StreamingNormalize(\n        self._batch_env.observ[0], center=True, scale=True, clip=5,\n        name='normalize_observ')\n    self._reward_filter = parts.StreamingNormalize(\n        self._batch_env.reward[0], center=False, scale=True, clip=10,\n        name='normalize_reward')\n    self._use_gpu = self._config.use_gpu and utility.available_gpus()\n    policy_params, state = self._initialize_policy()\n    self._initialize_memory(policy_params)\n    \n    with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):\n      self._optimizer = self._config.optimizer(self._config.learning_rate)\n    self._penalty = tf.Variable(\n        self._config.kl_init_penalty, False, dtype=tf.float32)\n    \n    with tf.variable_scope('ppo_temporary'):\n      with tf.device('/gpu:0'):\n        if state is None:\n          self._last_state = None\n        else:\n          var_like = lambda x: tf.Variable(lambda: tf.zeros_like(x), False)\n          self._last_state = tools.nested.map(var_like, state)\n    \n    with tf.variable_scope('ppo_temporary'):\n      self._last_action = tf.Variable(\n          tf.zeros_like(self._batch_env.action), False, name='last_action')\n      self._last_policy = tools.nested.map(\n          lambda x: tf.Variable(tf.zeros_like(x[:, 0], False)), policy_params)", "docstring": "Create an instance of the PPO algorithm.\n\nArgs:\nbatch_env: In-graph batch environment.\nstep: Integer tensor holding the current training step.\nis_training: Boolean tensor for whether the algorithm should train.\nshould_log: Boolean tensor for whether summaries should be returned.\nconfig: Object containing the agent configuration as attributes.", "source": "juraj-google-style"}
{"code": "def __init__(self, filename, content_generator=None, content_length=None):\n    \n    precondition.AssertType(filename, Text)\n    self.filename = filename\n    self.content_length = content_length\n\n    if content_generator is None:\n      raise ValueError(\"content_generator can't be None\")\n    self.content_generator = content_generator", "docstring": "ApiBinaryStream constructor.\n\nArgs:\nfilename: A file name to be used by the browser when user downloads the\nfile.\ncontent_generator: A generator that yields byte chunks (of any size) to\nbe streamed to the user.\ncontent_length: The length of the stream, if known upfront.\n\nRaises:\nValueError: if content_generator is None.", "source": "juraj-google-style"}
{"code": "def _create_w_objective(m, X, R):\n    \n    genes, clusters = m.shape\n    cells = X.shape[1]\n    R1 = R.reshape((genes, 1)).dot(np.ones((1, cells)))\n    def objective(w):\n        \n        \n        w = w.reshape((m.shape[1], X.shape[1]))\n        d = m.dot(w)+eps\n        return np.sum((X + R1)*np.log(d + R1) - X*np.log(d))/genes\n    def deriv(w):\n        \n        \n        \n        w2 = w.reshape((m.shape[1], X.shape[1]))\n        d = m.dot(w2)+eps\n        temp = X/d\n        temp2 = (X+R1)/(d+R1)\n        m1 = m.T.dot(temp2)\n        m2 = m.T.dot(temp)\n        deriv = m1 - m2\n        return deriv.flatten()/genes\n    return objective, deriv", "docstring": "Creates an objective function and its derivative for W, given M and X (data)\n\nArgs:\nm (array): genes x clusters\nX (array): genes x cells\nR (array): 1 x genes", "source": "juraj-google-style"}
{"code": "def all_distances(coords1, coords2):\n    \n    c1 = np.array(coords1)\n    c2 = np.array(coords2)\n    z = (c1[:, None, :] - c2[None, :, :]) ** 2\n    return np.sum(z, axis=-1) ** 0.5", "docstring": "Returns the distances between two lists of coordinates\n\nArgs:\ncoords1: First set of cartesian coordinates.\ncoords2: Second set of cartesian coordinates.\n\nReturns:\n2d array of cartesian distances. E.g the distance between\ncoords1[i] and coords2[j] is distances[i,j]", "source": "juraj-google-style"}
{"code": "def write_index(self, overwrite: bool=False, mock: bool=False) -> None:\n    write_if_allowed(self.index_filename, self.index_content(), overwrite=overwrite, mock=mock)", "docstring": "Writes the index file, if permitted.\n\nArgs:\noverwrite: allow existing files to be overwritten?\nmock: pretend to write, but don't", "source": "codesearchnet"}
{"code": "def tournament_number2name(self, number):\n        \n        tournaments = self.get_tournaments()\n        d = {t['tournament']: t['name'] for t in tournaments}\n        return d.get(number, None)", "docstring": "Translate tournament number to tournament name.\n\nArgs:\nnumber (int): tournament number to translate\n\nReturns:\nname (str): name of the tournament or `None` if unknown.\n\nExamples:\n>>> NumerAPI().tournament_number2name(4)\n'delta'\n>>> NumerAPI().tournament_number2name(99)\nNone", "source": "juraj-google-style"}
{"code": "def build_ellipse(X, Y):\n    x_mean = np.mean(X)\n    y_mean = np.mean(Y)\n    cov_matrix = np.cov(np.vstack((X, Y)))\n    (U, s, V) = linalg.svd(cov_matrix, full_matrices=False)\n    chi_95 = np.sqrt(4.61)\n    width = ((np.sqrt(cov_matrix[0][0]) * chi_95) * 2)\n    height = ((np.sqrt(cov_matrix[1][1]) * chi_95) * 2)\n    eigenvector = V.T[0]\n    angle = np.arctan((eigenvector[1] / eigenvector[0]))\n    return (x_mean, y_mean, width, height, angle)", "docstring": "Construct ellipse coordinates from two arrays of numbers.\n\nArgs:\nX (1D array_like)\nY (1D array_like)\n\nReturns:\nfloat: The mean of `X`.\nfloat: The mean of `Y`.\nfloat: The width of the ellipse.\nfloat: The height of the ellipse.\nfloat: The angle of orientation of the ellipse.", "source": "codesearchnet"}
{"code": "def record(*fields):\n\n    @six.add_metaclass(_RecordMetaClass)\n    class RecordType(object):\n        _record_sentinel = True\n        _record_fields = fields\n    return RecordType", "docstring": "Constructs a type that can be extended to create immutable, value types.\n\nExamples:\nA typical declaration looks like::\n\nclass MyRecord(record('a', ('b', 1))):\npass\n\nThe above would make a sub-class of ``collections.namedtuple`` that was named ``MyRecord`` with\na constructor that had the ``b`` field set to 1 by default.\n\nNote:\nThis uses meta-class machinery to rewrite the inheritance hierarchy.\nThis is done in order to make sure that the underlying ``namedtuple`` instance is\nbound to the right type name and to make sure that the synthetic class that is generated\nto enable this machinery is not enabled for sub-classes of a user's record class.\n\nArgs:\nfields (list[str | (str, any)]): A sequence of str or pairs that", "source": "codesearchnet"}
{"code": "def remove_site(name):\n    \n    current_sites = list_sites()\n\n    if name not in current_sites:\n        log.debug('Site already absent: %s', name)\n        return True\n\n    ps_cmd = ['Remove-WebSite', '-Name', r\"'{0}'\".format(name)]\n\n    cmd_ret = _srvmgr(ps_cmd)\n\n    if cmd_ret['retcode'] != 0:\n        msg = 'Unable to remove site: {0}\\nError: {1}' \\\n              ''.format(name, cmd_ret['stderr'])\n        raise CommandExecutionError(msg)\n\n    log.debug('Site removed successfully: %s', name)\n    return True", "docstring": "Delete a website from IIS.\n\nArgs:\nname (str): The IIS site name.\n\nReturns:\nbool: True if successful, otherwise False\n\n.. note::\n\nThis will not remove the application pool used by the site.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.remove_site name='My Test Site'", "source": "juraj-google-style"}
{"code": "def get_likelihood(self, uni_matrix):\n        \n        uni_dim = uni_matrix.shape[1]\n        num_edge = len(self.edges)\n        values = np.zeros([1, num_edge])\n        new_uni_matrix = np.empty([uni_dim, uni_dim])\n\n        for i in range(num_edge):\n            edge = self.edges[i]\n            value, left_u, right_u = edge.get_likelihood(uni_matrix)\n            new_uni_matrix[edge.L, edge.R] = left_u\n            new_uni_matrix[edge.R, edge.L] = right_u\n            values[0, i] = np.log(value)\n\n        return np.sum(values), new_uni_matrix", "docstring": "Compute likelihood of the tree given an U matrix.\n\nArgs:\nuni_matrix(numpy.array): univariate matrix to evaluate likelihood on.\n\nReturns:\ntuple[float, numpy.array]:\nlikelihood of the current tree, next level conditional univariate matrix", "source": "juraj-google-style"}
{"code": "def updateParams(self, newvalues):\n        \n        for (param, value) in newvalues.items():\n            if param not in self.model.freeparams:\n                raise RuntimeError(\"Can't handle param: {0}\".format(\n                        param))\n        if newvalues:\n            self.model.updateParams(newvalues)\n            self._updateInternals()\n            self._paramsarray = None", "docstring": "Update model parameters and re-compute likelihoods.\n\nThis method is the **only** acceptable way to update model\nparameters. The likelihood is re-computed as needed\nby this method.\n\nArgs:\n`newvalues` (dict)\nA dictionary keyed by param name and with value as new\nvalue to set. Each parameter name must either be a\nvalid model parameter (in `model.freeparams`).", "source": "juraj-google-style"}
{"code": "class ByteRewriter:\n    LEAF = '[LEAF]'\n\n    def __init__(self, rewriting_rules: Union[str, Dict[str, str]]):\n        if isinstance(rewriting_rules, str):\n            with open(rewriting_rules, 'r') as f:\n                rewriting_rules = json.load(f)\n        elif not isinstance(rewriting_rules, dict):\n            raise ValueError(f'rewriting_rules should be either a path to json file or a dict, got {type(rewriting_rules)}')\n        self.hash_tree = self.construct_hash_tree(rewriting_rules)\n        reverse_rewriting_rules = {v: k for k, v in rewriting_rules.items()}\n        self.reverse_hash_tree = self.construct_hash_tree(reverse_rewriting_rules)\n\n    def add_leaf(self, hash_tree: Dict[str, Union[dict, List[str]]], byte_in_sequence: str, byte_out_sequence: str):\n        \n        byte_in_list = byte_in_sequence.split(' ')\n        byte_out_list = byte_out_sequence.split(' ')\n        tree_pointer = hash_tree\n        for b in byte_in_list:\n            if b not in tree_pointer:\n                tree_pointer[b] = {}\n            tree_pointer = tree_pointer[b]\n        tree_pointer[self.LEAF] = byte_out_list\n\n    def construct_hash_tree(self, rewriting_rules: Dict[str, str]) -> Dict[str, Union[dict, List[str]]]:\n        \n        hash_tree = defaultdict(dict)\n        for b in (f'{x:02x}' for x in range(256)):\n            hash_tree[b][self.LEAF] = [b]\n        for in_sequence, out_sequence in rewriting_rules.items():\n            self.add_leaf(hash_tree, in_sequence, out_sequence)\n        return hash_tree\n\n    def search_hash_tree(self, byte_sequence: List[str]) -> Union[None, List[str]]:\n        \n        tree_pointer = self.hash_tree\n        for b in byte_sequence:\n            if b in tree_pointer:\n                tree_pointer = tree_pointer[b]\n            else:\n                return None\n        return tree_pointer[self.LEAF]\n\n    def rewrite_bytes(self, in_bytes: List[str], reverse=False) -> List[str]:\n        \n        out_bytes = []\n        b_start = 0\n        b_end = 0\n        while b_start < len(in_bytes):\n            tree_pointer = self.hash_tree if not reverse else self.reverse_hash_tree\n            for j in range(b_start, len(in_bytes)):\n                b = in_bytes[j]\n                if b in tree_pointer:\n                    tree_pointer = tree_pointer[b]\n                elif j == b_start:\n                    cur_leaf = [b]\n                    b_end = j\n                    break\n                else:\n                    break\n                if self.LEAF in tree_pointer:\n                    cur_leaf = tree_pointer[self.LEAF]\n                    b_end = j\n            out_bytes.extend(cur_leaf)\n            b_start = b_end + 1\n        return out_bytes", "docstring": "Byte rewriter class for MyT5 tokenizer.\nThis class is used to rewrite bytes using a hash tree. The hash tree is constructed from a set of rewriting rules.\n\nArgs:\nrewriting_rules (`str` or `Dict[str, str]`):\nA path to a json file containing the rewriting rules or a dictionary containing the rewriting rules.", "source": "github-repos"}
{"code": "def load_compositors(self, sensor_names):\n    comps = {}\n    mods = {}\n    for sensor_name in sensor_names:\n        if (sensor_name not in self.compositors):\n            self.load_sensor_composites(sensor_name)\n        if (sensor_name in self.compositors):\n            comps[sensor_name] = DatasetDict(self.compositors[sensor_name].copy())\n            mods[sensor_name] = self.modifiers[sensor_name].copy()\n    return (comps, mods)", "docstring": "Load all compositor configs for the provided sensors.\n\nArgs:\nsensor_names (list of strings): Sensor names that have matching\n``sensor_name.yaml`` config files.\n\nReturns:\n(comps, mods): Where `comps` is a dictionary:\n\nsensor_name -> composite ID -> compositor object\n\nAnd `mods` is a dictionary:\n\nsensor_name -> modifier name -> (modifier class,\nmodifiers options)\n\nNote that these dictionaries are copies of those cached in\nthis object.", "source": "codesearchnet"}
{"code": "def DeleteSignedBinary(binary_urn,\n                       token = None):\n  \n  if _ShouldUseLegacyDatastore():\n    try:\n      aff4.FACTORY.Open(\n          binary_urn, aff4_type=aff4.AFF4Stream, mode=\"r\", token=token)\n    except aff4.InstantiationError:\n      raise SignedBinaryNotFoundError(binary_urn)\n    aff4.FACTORY.Delete(binary_urn, token=token)\n\n  if data_store.RelationalDBEnabled():\n    try:\n      data_store.REL_DB.ReadSignedBinaryReferences(\n          _SignedBinaryIDFromURN(binary_urn))\n    except db.UnknownSignedBinaryError:\n      if _ShouldUseLegacyDatastore():\n        \n        \n        return\n      else:\n        raise SignedBinaryNotFoundError(binary_urn)\n    data_store.REL_DB.DeleteSignedBinaryReferences(\n        _SignedBinaryIDFromURN(binary_urn))", "docstring": "Deletes the binary with the given urn from the datastore.\n\nArgs:\nbinary_urn: RDFURN that serves as a unique identifier for the binary.\ntoken: ACL token to use with the legacy (non-relational) datastore.\n\nRaises:\nSignedBinaryNotFoundError: If the signed binary does not exist.", "source": "juraj-google-style"}
{"code": "def appliance_device_snmp_v3_trap_destinations(self):\n    if (not self.__appliance_device_snmp_v3_trap_destinations):\n        self.__appliance_device_snmp_v3_trap_destinations = ApplianceDeviceSNMPv3TrapDestinations(self.__connection)\n    return self.__appliance_device_snmp_v3_trap_destinations", "docstring": "Gets the ApplianceDeviceSNMPv3TrapDestinations API client.\n\nReturns:\nApplianceDeviceSNMPv3TrapDestinations:", "source": "codesearchnet"}
{"code": "def mark_flags_as_mutual_exclusive(flag_names, required=False, flag_values=_flagvalues.FLAGS):\n    for flag_name in flag_names:\n        if (flag_values[flag_name].default is not None):\n            warnings.warn('Flag --{} has a non-None default value. That does not make sense with mark_flags_as_mutual_exclusive, which checks whether the listed flags have a value other than None.'.format(flag_name))\n\n    def validate_mutual_exclusion(flags_dict):\n        flag_count = sum((1 for val in flags_dict.values() if (val is not None)))\n        if ((flag_count == 1) or ((not required) and (flag_count == 0))):\n            return True\n        raise _exceptions.ValidationError('{} one of ({}) must have a value other than None.'.format(('Exactly' if required else 'At most'), ', '.join(flag_names)))\n    register_multi_flags_validator(flag_names, validate_mutual_exclusion, flag_values=flag_values)", "docstring": "Ensures that only one flag among flag_names is not None.\n\nImportant note: This validator checks if flag values are None, and it does not\ndistinguish between default and explicit values. Therefore, this validator\ndoes not make sense when applied to flags with default values other than None,\nincluding other false values (e.g. False, 0, '', []). That includes multi\nflags with a default value of [] instead of None.\n\nArgs:\nflag_names: [str], names of the flags.\nrequired: bool. If true, exactly one of the flags must have a value other\nthan None. Otherwise, at most one of the flags can have a value other\nthan None, and it is valid for all of the flags to be None.\nflag_values: flags.FlagValues, optional FlagValues instance where the flags\nare defined.", "source": "codesearchnet"}
{"code": "def __init__(self, client=None, workingdir='/workingdir'):\n        \n\n        self.client = self.connect_to_docker(client)\n        self.default_wdir = workingdir\n        self.hostname = self.client.base_url", "docstring": "Initialization:\n\nArgs:\nclient (docker.Client): a docker-py client. If not passed, we will try to create the\nclient from the job's environmental varaibles\nworkingdir (str): default working directory to create in the containers", "source": "juraj-google-style"}
{"code": "def find_priority_list(py_files):\n    dependencies = map_dependencies(py_files)\n    ordered_files = topological_sort(dependencies)\n    return (ordered_files, dependencies)", "docstring": "Given a list of modular files, sorts them by topological order. Modular models that DON'T depend on other modular\nmodels will be higher in the topological order.\n\nArgs:\npy_files: List of paths to the modular files\n\nReturns:\nA tuple with the ordered files (list) and their dependencies (dict)", "source": "github-repos"}
{"code": "def _assert_same_base_type(items, expected_type=None):\n    original_expected_type = expected_type\n    mismatch = False\n    for item in items:\n        if item is not None:\n            item_type = item.dtype.base_dtype\n            if not expected_type:\n                expected_type = item_type\n            elif expected_type != item_type:\n                mismatch = True\n                break\n    if mismatch:\n        expected_type = original_expected_type\n        original_item_str = None\n        for item in items:\n            if item is not None:\n                item_type = item.dtype.base_dtype\n                if not expected_type:\n                    expected_type = item_type\n                    original_item_str = item.name if hasattr(item, 'name') else str(item)\n                elif expected_type != item_type:\n                    raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (item.name if hasattr(item, 'name') else str(item), item_type, expected_type, ' as %s' % original_item_str if original_item_str else ''))\n        return expected_type\n    else:\n        return expected_type", "docstring": "Asserts all items are of the same base type.\n\nArgs:\nitems: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,\n`Operation`, or `IndexedSlices`). Can include `None` elements, which\nwill be ignored.\nexpected_type: Expected type. If not specified, assert all items are\nof the same base type.\n\nReturns:\nValidated type, or none if neither expected_type nor items provided.\n\nRaises:\nValueError: If any types do not match.", "source": "github-repos"}
{"code": "def get(self, blocking=True):\n    \n    if self.closed:\n      raise PoolAlreadyClosedError(\"Connection pool is already closed.\")\n\n    \n    \n    \n    if not self.limiter.acquire(blocking=blocking):\n      return None\n    c = None\n    \n    \n    try:\n      c = self.idle_conns.pop()\n    except IndexError:\n      \n      try:\n        c = self.connect_func()\n      except Exception:\n        self.limiter.release()\n        raise\n    return _ConnectionProxy(self, c)", "docstring": "Gets a connection.\n\nArgs:\nblocking: Whether to block when max_size connections are already in use.\nIf false, may return None.\n\nReturns:\nA connection to the database.\n\nRaises:\nPoolAlreadyClosedError: if close() method was already called on\nthis pool.", "source": "juraj-google-style"}
{"code": "def ts_to_str(jwt_dict):\n    d = ts_to_dt(jwt_dict)\n    for (k, v) in list(d.items()):\n        if isinstance(v, datetime.datetime):\n            d[k] = v.isoformat().replace('T', ' ')\n    return d", "docstring": "Convert timestamps in JWT to human readable dates.\n\nArgs:\njwt_dict: dict\nJWT with some keys containing timestamps.\n\nReturns:\ndict: Copy of input dict where timestamps have been replaced with human readable\ndates.", "source": "codesearchnet"}
{"code": "def _RegisterCredentialsMethod(method, position=None):\n    if (position is None):\n        position = len(_CREDENTIALS_METHODS)\n    else:\n        position = min(position, len(_CREDENTIALS_METHODS))\n    _CREDENTIALS_METHODS.insert(position, method)\n    return method", "docstring": "Register a new method for fetching credentials.\n\nThis new method should be a function with signature:\nclient_info, **kwds -> Credentials or None\nThis method can be used as a decorator, unless position needs to\nbe supplied.\n\nNote that method must *always* accept arbitrary keyword arguments.\n\nArgs:\nmethod: New credential-fetching method.\nposition: (default: None) Where in the list of methods to\nadd this; if None, we append. In all but rare cases,\nthis should be either 0 or None.\nReturns:\nmethod, for use as a decorator.", "source": "codesearchnet"}
{"code": "def find_backend(line: str) -> Optional[str]:\n    if _re_test_backend.search(line) is None:\n        return None\n    backends = [b[0] for b in _re_backend.findall(line)]\n    backends.sort()\n    return '_and_'.join(backends)", "docstring": "Find one (or multiple) backend in a code line of the init.\n\nArgs:\nline (`str`): A code line in an init file.\n\nReturns:\nOptional[`str`]: If one (or several) backend is found, returns it. In the case of multiple backends (the line\ncontains `if is_xxx_available() and `is_yyy_available()`) returns all backends joined on `_and_` (so\n`xxx_and_yyy` for instance).", "source": "github-repos"}
{"code": "def create(self, reference, document_data):\n        \n        write_pbs = _helpers.pbs_for_create(reference._document_path, document_data)\n        self._add_write_pbs(write_pbs)", "docstring": "Add a \"change\" to this batch to create a document.\n\nIf the document given by ``reference`` already exists, then this\nbatch will fail when :meth:`commit`-ed.\n\nArgs:\nreference (~.firestore_v1beta1.document.DocumentReference): A\ndocument reference to be created in this batch.\ndocument_data (dict): Property names and values to use for\ncreating a document.", "source": "juraj-google-style"}
{"code": "def gray2bgr(img):\n    img = (img[(..., None)] if (img.ndim == 2) else img)\n    out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n    return out_img", "docstring": "Convert a grayscale image to BGR image.\n\nArgs:\nimg (ndarray or str): The input image.\n\nReturns:\nndarray: The converted BGR image.", "source": "codesearchnet"}
{"code": "def associated_stream(self):\n    if (not self.important):\n        raise InternalError('You may only call autocopied_stream on when DataStream.important is True', stream=self)\n    if (self.stream_id >= DataStream.ImportantSystemStorageStart):\n        stream_type = DataStream.BufferedType\n    else:\n        stream_type = DataStream.OutputType\n    return DataStream(stream_type, self.stream_id, True)", "docstring": "Return the corresponding output or storage stream for an important system input.\n\nCertain system inputs are designed as important and automatically\ncopied to output streams without requiring any manual interaction.\n\nThis method returns the corresponding stream for an important system\ninput.  It will raise an InternalError unlesss the self.important\nproperty is True.\n\nReturns:\nDataStream: The corresponding output or storage stream.\n\nRaises:\nInternalError: If this stream is not marked as an important system input.", "source": "codesearchnet"}
{"code": "def from_bytes(b):\n    if (len(b) != 64):\n        raise ValueError('from_bytes: Signature length != 64.')\n    r = int.from_bytes(b[0:32], 'big')\n    s = int.from_bytes(b[32:64], 'big')\n    return Signature(r, s)", "docstring": "Extracts the r and s components from a byte string.\n\nArgs:\nb (bytes): A 64-byte long string. The first 32 bytes are\nextracted as the r component and the second 32 bytes\nare extracted as the s component.\n\nReturns:\nSignature: A Signature object.\n\nRaises:\nValueError: If signature is incorrect length", "source": "codesearchnet"}
{"code": "def remote_command(task: Task, command: str) -> Result:\n    \n    client = task.host.get_connection(\"paramiko\", task.nornir.config)\n    connection_state = task.host.get_connection_state(\"paramiko\")\n\n    chan = client.get_transport().open_session()\n\n    if connection_state[\"ssh_forward_agent\"]:\n        AgentRequestHandler(chan)\n\n    chan.exec_command(command)\n\n    with chan.makefile() as f:\n        stdout = f.read().decode()\n    with chan.makefile_stderr() as f:\n        stderr = f.read().decode()\n\n    exit_status_code = chan.recv_exit_status()\n\n    if exit_status_code:\n        raise CommandError(command, exit_status_code, stdout, stderr)\n\n    result = stderr if stderr else stdout\n    return Result(result=result, host=task.host, stderr=stderr, stdout=stdout)", "docstring": "Executes a command remotely on the host\n\nArguments:\ncommand (``str``): command to execute\n\nReturns:\nResult object with the following attributes set:\n* result (``str``): stderr or stdout\n* stdout (``str``): stdout\n* stderr (``str``): stderr\n\nRaises:\n:obj:`nornir.core.exceptions.CommandError`: when there is a command error", "source": "juraj-google-style"}
{"code": "def pretty_plot_two_axis(x, y1, y2, xlabel=None, y1label=None, y2label=None, width=8, height=None, dpi=300):\n    import palettable.colorbrewer.diverging\n    colors = palettable.colorbrewer.diverging.RdYlBu_4.mpl_colors\n    c1 = colors[0]\n    c2 = colors[(- 1)]\n    golden_ratio = ((math.sqrt(5) - 1) / 2)\n    if (not height):\n        height = int((width * golden_ratio))\n    import matplotlib.pyplot as plt\n    width = 12\n    labelsize = int((width * 3))\n    ticksize = int((width * 2.5))\n    styles = ['-', '--', '-.', '.']\n    (fig, ax1) = plt.subplots()\n    fig.set_size_inches((width, height))\n    if dpi:\n        fig.set_dpi(dpi)\n    if isinstance(y1, dict):\n        for (i, (k, v)) in enumerate(y1.items()):\n            ax1.plot(x, v, c=c1, marker='s', ls=styles[(i % len(styles))], label=k)\n        ax1.legend(fontsize=labelsize)\n    else:\n        ax1.plot(x, y1, c=c1, marker='s', ls='-')\n    if xlabel:\n        ax1.set_xlabel(xlabel, fontsize=labelsize)\n    if y1label:\n        ax1.set_ylabel(y1label, color=c1, fontsize=labelsize)\n    ax1.tick_params('x', labelsize=ticksize)\n    ax1.tick_params('y', colors=c1, labelsize=ticksize)\n    ax2 = ax1.twinx()\n    if isinstance(y2, dict):\n        for (i, (k, v)) in enumerate(y2.items()):\n            ax2.plot(x, v, c=c2, marker='o', ls=styles[(i % len(styles))], label=k)\n        ax2.legend(fontsize=labelsize)\n    else:\n        ax2.plot(x, y2, c=c2, marker='o', ls='-')\n    if y2label:\n        ax2.set_ylabel(y2label, color=c2, fontsize=labelsize)\n    ax2.tick_params('y', colors=c2, labelsize=ticksize)\n    return plt", "docstring": "Variant of pretty_plot that does a dual axis plot. Adapted from matplotlib\nexamples. Makes it easier to create plots with different axes.\n\nArgs:\nx (np.ndarray/list): Data for x-axis.\ny1 (dict/np.ndarray/list): Data for y1 axis (left). If a dict, it will\nbe interpreted as a {label: sequence}.\ny2 (dict/np.ndarray/list): Data for y2 axis (right). If a dict, it will\nbe interpreted as a {label: sequence}.\nxlabel (str): If not None, this will be the label for the x-axis.\ny1label (str): If not None, this will be the label for the y1-axis.\ny2label (str): If not None, this will be the label for the y2-axis.\nwidth (float): Width of plot in inches. Defaults to 8in.\nheight (float): Height of plot in inches. Defaults to width * golden\nratio.\ndpi (int): Sets dot per inch for figure. Defaults to 300.\n\nReturns:\nmatplotlib.pyplot", "source": "codesearchnet"}
{"code": "def FromTXOutputsConfirmed(outputs):\n    uns = UnspentCoinState()\n    uns.Items = ([0] * len(outputs))\n    for i in range(0, len(outputs)):\n        uns.Items[i] = int(CoinState.Confirmed)\n    return uns", "docstring": "Get unspent outputs from a list of transaction outputs.\n\nArgs:\noutputs (list): of neo.Core.TX.Transaction.TransactionOutput items.\n\nReturns:\nUnspentCoinState:", "source": "codesearchnet"}
{"code": "def helper_add(access_token, ck_id, path, body):\n    \n    full_path = ''.join([path, \"('\", ck_id, \"')\"])\n    full_path_encoded = urllib.parse.quote(full_path, safe='')\n    endpoint = ''.join([ams_rest_endpoint, full_path_encoded])\n    return do_ams_put(endpoint, full_path_encoded, body, access_token, \"json_only\", \"1.0;NetFx\")", "docstring": "Helper Function to add strings to a URL path.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nck_id (str): A CK ID.\npath (str): A URL Path.\nbody (str): A Body.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def set_status(self, name: str = None):\n        \n        game = None\n        if name:\n            game = {\n                'name': name\n            }\n        payload = {\n            'op': WebSocketEvent.STATUS_UPDATE.value,\n            'd': {\n                'game': game,\n                'status': 'online',\n                'afk': False,\n                'since': 0.0\n            }\n        }\n        data = json.dumps(payload, indent=2)\n        self.logger.debug(f'Sending status update payload: {data}')\n        self._ws.send(data)", "docstring": "Updates the bot's status\n\nThis is used to get the game that the bot is \"playing\" or to clear it.\nIf you want to set a game, pass a name; if you want to clear it, either\ncall this method without the optional ``name`` parameter or explicitly\npass ``None``.\n\nArgs:\nname: the game's name, or None", "source": "juraj-google-style"}
{"code": "def convert_image_to_example_proto(tensor: tf.Tensor) -> tf.train.Example:\n    serialized_non_scalar = tf.io.serialize_tensor(tensor)\n    feature_of_bytes = tf.train.Feature(bytes_list=tf.train.BytesList(value=[serialized_non_scalar.numpy()]))\n    features_for_example = {'image': feature_of_bytes}\n    example_proto = tf.train.Example(features=tf.train.Features(feature=features_for_example))\n    return example_proto", "docstring": "This method performs the following:\n1. Accepts the tensor as input\n2. Serializes the tensor into bytes and pass it through\ntf.train.Feature\n3. Pass the serialized tensor feature using tf.train.Example\nProto to the RunInference transform.\n\nArgs:\ntensor: A TF tensor.\nReturns:\nexample_proto: A tf.train.Example containing serialized tensor.", "source": "github-repos"}
{"code": "def lint(self, content, **kwargs):\n    post_data = {'content': content}\n    data = self.http_post('/ci/lint', post_data=post_data, **kwargs)\n    return ((data['status'] == 'valid'), data['errors'])", "docstring": "Validate a gitlab CI configuration.\n\nArgs:\ncontent (txt): The .gitlab-ci.yml content\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabVerifyError: If the validation could not be done\n\nReturns:\ntuple: (True, []) if the file is valid, (False, errors(list))\notherwise", "source": "codesearchnet"}
{"code": "def indent(self, space=4):\n    if (not isinstance(space, int)):\n        raise TypeError('space must be an int')\n    if (space < 0):\n        raise ValueError('space must be a non-negative integer')\n    space = (' ' * space)\n    o = []\n    l = 0\n    for c in self.newick():\n        if (c == '('):\n            o.append('(\\n')\n            l += 1\n            o.append((space * l))\n        elif (c == ')'):\n            o.append('\\n')\n            l -= 1\n            o.append((space * l))\n            o.append(')')\n        elif (c == ','):\n            o.append(',\\n')\n            o.append((space * l))\n        else:\n            o.append(c)\n    return ''.join(o)", "docstring": "Return an indented Newick string, just like ``nw_indent`` in Newick Utilities\n\nArgs:\n``space`` (``int``): The number of spaces a tab should equal\n\nReturns:\n``str``: An indented Newick string", "source": "codesearchnet"}
{"code": "def __init__(self, file_pattern, batch_size=1, buffer_size=1, parallelism=1, shift_ratio=0, seed=0, name=None, batches=None, compression_type=None):\n    self._batch_size = batch_size\n    if batches is not None:\n        self._batch_size *= batches\n    self._batches = batches\n    self._file_pattern = file_pattern\n    self._buffer_size = buffer_size\n    self._parallelism = parallelism\n    self._shift_ratio = shift_ratio\n    self._seed = seed\n    self._name = name\n    self._compression_type = python_io.TFRecordCompressionType.NONE\n    if compression_type is not None:\n        self._compression_type = compression_type", "docstring": "Constructs a RecordInput Op.\n\nArgs:\nfile_pattern: File path to the dataset, possibly containing wildcards.\nAll matching files will be iterated over each epoch.\nbatch_size: How many records to return at a time.\nbuffer_size: The maximum number of records the buffer will contain.\nparallelism: How many reader threads to use for reading from files.\nshift_ratio: What percentage of the total number files to move the start\nfile forward by each epoch.\nseed: Specify the random number seed used by generator that randomizes\nrecords.\nname: Optional name for the operation.\nbatches: None by default, creating a single batch op. Otherwise specifies\nhow many batches to create, which are returned as a list when\n`get_yield_op()` is called. An example use case is to split processing\nbetween devices on one computer.\ncompression_type: The type of compression for the file. Currently ZLIB and\nGZIP are supported. Defaults to none.\n\nRaises:\nValueError: If one of the arguments is invalid.", "source": "github-repos"}
{"code": "def write_json(self, path, contents, message):\n        \n        log.debug(message.format(path=path))\n        makedirs(os.path.dirname(path))\n        with open(path, \"w\") as fh:\n            json.dump(contents, fh, indent=2, sort_keys=True)", "docstring": "Write json to disk.\n\nArgs:\npath (str): the path to write to\ncontents (dict): the contents of the json blob\nmessage (str): the message to log", "source": "juraj-google-style"}
{"code": "def start_app(self, bundle_id):\n        \n        idevicedebug = must_look_exec('idevicedebug')\n\n        \n        kwargs = {'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE}\n        if sys.platform != 'darwin':\n            kwargs['close_fds'] = True\n        return subprocess.Popen([idevicedebug, \"--udid\", self.udid, 'run', bundle_id], **kwargs)", "docstring": "Start app by bundle_id\nArgs:\n- bundle_id(string): ex com.netease.my\nReturns:\nidevicedebug subprocess instance", "source": "juraj-google-style"}
{"code": "def get_student_enrollments(self):\n    resp = self.requester.get(urljoin(self.base_url, self.enrollment_url))\n    resp.raise_for_status()\n    return Enrollments(resp.json())", "docstring": "Returns an Enrollments object with the user enrollments\n\nReturns:\nEnrollments: object representing the student enrollments", "source": "codesearchnet"}
{"code": "def read(self, vals):\n        \n        i = 0\n        if len(vals[i]) == 0:\n            self.typical_or_extreme_period_name = None\n        else:\n            self.typical_or_extreme_period_name = vals[i]\n        i += 1\n        if len(vals[i]) == 0:\n            self.typical_or_extreme_period_type = None\n        else:\n            self.typical_or_extreme_period_type = vals[i]\n        i += 1\n        if len(vals[i]) == 0:\n            self.period_start_day = None\n        else:\n            self.period_start_day = vals[i]\n        i += 1\n        if len(vals[i]) == 0:\n            self.period_end_day = None\n        else:\n            self.period_end_day = vals[i]\n        i += 1", "docstring": "Read values.\n\nArgs:\nvals (list): list of strings representing values", "source": "juraj-google-style"}
{"code": "def play_random(env, steps):\n    \n    try:\n        done = True\n        progress = tqdm(range(steps))\n        for _ in progress:\n            if done:\n                _ = env.reset()\n            action = env.action_space.sample()\n            _, reward, done, info = env.step(action)\n            progress.set_postfix(reward=reward, info=info)\n            env.render()\n    except KeyboardInterrupt:\n        pass\n    \n    env.close()", "docstring": "Play the environment making uniformly random decisions.\n\nArgs:\nenv (gym.Env): the initialized gym environment to play\nsteps (int): the number of random steps to take\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def ToTsvExcel(self, columns_order=None, order_by=()):\n    csv_result = self.ToCsv(columns_order, order_by, separator='\\t')\n    if (not isinstance(csv_result, six.text_type)):\n        csv_result = csv_result.decode('utf-8')\n    return csv_result.encode('UTF-16LE')", "docstring": "Returns a file in tab-separated-format readable by MS Excel.\n\nReturns a file in UTF-16 little endian encoding, with tabs separating the\nvalues.\n\nArgs:\ncolumns_order: Delegated to ToCsv.\norder_by: Delegated to ToCsv.\n\nReturns:\nA tab-separated little endian UTF16 file representing the table.", "source": "codesearchnet"}
{"code": "def require_debian_packages(packages: List[str]) -> None:\n    \n    present = are_debian_packages_installed(packages)\n    missing_packages = [k for k, v in present.items() if not v]\n    if missing_packages:\n        missing_packages.sort()\n        msg = (\n            \"Debian packages are missing, as follows. Suggest:\\n\\n\"\n            \"sudo apt install {}\".format(\" \".join(missing_packages))\n        )\n        log.critical(msg)\n        raise ValueError(msg)", "docstring": "Ensure specific packages are installed under Debian.\n\nArgs:\npackages: list of packages\n\nRaises:\nValueError: if any are missing", "source": "juraj-google-style"}
{"code": "def dispatch_event(event):\n    \n\n    try:\n      if event.http_verb == enums.HTTPVerbs.GET:\n        requests.get(event.url, params=event.params, timeout=REQUEST_TIMEOUT).raise_for_status()\n      elif event.http_verb == enums.HTTPVerbs.POST:\n        requests.post(\n          event.url, data=json.dumps(event.params), headers=event.headers, timeout=REQUEST_TIMEOUT\n        ).raise_for_status()\n\n    except request_exception.RequestException as error:\n      logging.error('Dispatch event failed. Error: %s' % str(error))", "docstring": "Dispatch the event being represented by the Event object.\n\nArgs:\nevent: Object holding information about the request to be dispatched to the Optimizely backend.", "source": "juraj-google-style"}
{"code": "def __init__(self, linter_name, path, msg, line_nr=None, col=None):\n        \n        \n        \n        if line_nr:\n            line_nr = int(line_nr)\n        if col:\n            col = int(col)\n        self._linter_name = linter_name\n        self.path = path\n        self.line_nr = line_nr\n        self.msg = msg\n        self.col = col", "docstring": "Optionally set all attributes.\n\nArgs:\npath (str): Relative file path.\nline (int): Line number.\nmsg (str): Explanation of what is wrong.\ncol (int): Column where the problem begins.", "source": "juraj-google-style"}
{"code": "def get_configuration(head, update, head_source=None):\n    head_source = (head_source or get_head_source(head))\n    update_source = get_acquisition_source(update)\n    if ((not is_arxiv_and_publisher(head_source, update_source)) and is_manual_merge(head, update)):\n        return ManualMergeOperations\n    if (head_source == 'arxiv'):\n        if (update_source == 'arxiv'):\n            return ArxivOnArxivOperations\n        else:\n            return PublisherOnArxivOperations\n    elif (update_source == 'arxiv'):\n        return ArxivOnPublisherOperations\n    else:\n        return PublisherOnPublisherOperations", "docstring": "This function return the right configuration for the inspire_merge\nfunction in according to the given sources. Both parameters can not be None.\n\nParams:\nhead(dict): the HEAD record\nupdate(dict): the UPDATE record\nhead_source(string): the source of the HEAD record\n\nReturns:\nMergerConfigurationOperations: an object containing\nthe rules needed to merge HEAD and UPDATE", "source": "codesearchnet"}
{"code": "def _get_segments(self, start, request_size):\n    if (not request_size):\n        return []\n    end = (start + request_size)\n    futures = []\n    while (request_size > self._max_request_size):\n        futures.append(self._get_segment(start, self._max_request_size))\n        request_size -= self._max_request_size\n        start += self._max_request_size\n    if (start < end):\n        futures.append(self._get_segment(start, (end - start)))\n    return [fut.get_result() for fut in futures]", "docstring": "Get segments of the file from Google Storage as a list.\n\nA large request is broken into segments to avoid hitting urlfetch\nresponse size limit. Each segment is returned from a separate urlfetch.\n\nArgs:\nstart: start offset to request. Inclusive. Have to be within the\nrange of the file.\nrequest_size: number of bytes to request.\n\nReturns:\nA list of file segments in order", "source": "codesearchnet"}
{"code": "def _serve_plugins_listing(self, request):\n    response = {}\n    for plugin in self._plugins:\n        start = time.time()\n        response[plugin.plugin_name] = plugin.is_active()\n        elapsed = (time.time() - start)\n        logger.info('Plugin listing: is_active() for %s took %0.3f seconds', plugin.plugin_name, elapsed)\n    return http_util.Respond(request, response, 'application/json')", "docstring": "Serves an object mapping plugin name to whether it is enabled.\n\nArgs:\nrequest: The werkzeug.Request object.\n\nReturns:\nA werkzeug.Response object.", "source": "codesearchnet"}
{"code": "def review_score(self, reviewer, product):\n        \n        return self._g.retrieve_review(reviewer, product).score", "docstring": "Find a review score from a given reviewer to a product.\n\nArgs:\nreviewer: Reviewer i.e. an instance of :class:`ria.bipartite.Reviewer`.\nproduct: Product i.e. an instance of :class:`ria.bipartite.Product`.\n\nReturns:\nA review object representing the review from the reviewer to the product.", "source": "juraj-google-style"}
{"code": "def create_attachment(cls, session, attachment):\n    return super(Conversations, cls).create(session, attachment, endpoint_override='/attachments.json', out_type=Attachment)", "docstring": "Create an attachment.\n\nAn attachment must be sent to the API before it can be used in a\nthread. Use this method to create the attachment, then use the\nresulting hash when creating a thread.\n\nNote that HelpScout only supports attachments of 10MB or lower.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nattachment (helpscout.models.Attachment): The attachment to be\ncreated.\n\nReturns:\nhelpscout.models.Attachment: The newly created attachment (hash\nproperty only). Use this hash when associating the attachment with\na new thread.", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states):\n    hidden_states = hidden_states.transpose(1, 2)\n    hidden_states = self.pointwise_conv1(hidden_states)\n    hidden_states = nn.functional.glu(hidden_states, dim=1)\n    hidden_states = self.depthwise_conv(hidden_states)\n    hidden_states = self.norm(hidden_states)\n    hidden_states = hidden_states * torch.sigmoid(hidden_states)\n    hidden_states = self.pointwise_conv2(hidden_states)\n    return hidden_states.transpose(1, 2)", "docstring": "Compute convolution module.\n\nArgs:\nhidden_states (`torch.Tensor` of shape `(batch, time, channels)`): Input tensor.\n\nReturns:\n`torch.Tensor`: Output tensor of shape `(batch, time, channels)`.", "source": "github-repos"}
{"code": "def are_debian_packages_installed(packages: List[str]) -> Dict[(str, bool)]:\n    assert (len(packages) >= 1)\n    require_executable(DPKG_QUERY)\n    args = ([DPKG_QUERY, '-W', '-f=${Package} ${Status}\\n'] + packages)\n    completed_process = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)\n    encoding = sys.getdefaultencoding()\n    stdout = completed_process.stdout.decode(encoding)\n    stderr = completed_process.stderr.decode(encoding)\n    present = OrderedDict()\n    for line in stdout.split('\\n'):\n        if line:\n            words = line.split()\n            assert (len(words) >= 2)\n            package = words[0]\n            present[package] = ('installed' in words[1:])\n    for line in stderr.split('\\n'):\n        if line:\n            words = line.split()\n            assert (len(words) >= 2)\n            package = words[(- 1)]\n            present[package] = False\n    log.debug('Debian package presence: {}', present)\n    return present", "docstring": "Check which of a list of Debian packages are installed, via ``dpkg-query``.\n\nArgs:\npackages: list of Debian package names\n\nReturns:\ndict: mapping from package name to boolean (\"present?\")", "source": "codesearchnet"}
{"code": "class PatchingSpec:\n    o: Any\n    name: str\n    custom_op: Callable\n    orig_op: Optional[Callable] = None\n    op_wrapper: Optional[Callable] = None", "docstring": "Data class that holds patching specifications.\n\nArgs:\no: Module / object where the op to patch is located\nname: Name of the op to monkey patch\ncustom_op: Custom op that patches the original op\norig_op: Original op that is being patched\nop_wrapper: Wrapper (optional) that wraps both the original and custom ops.\nIt is useful for ops that are class or static methods for instance.", "source": "github-repos"}
{"code": "def make_noise_surface(dims=DEFAULT_DIMS, blur=10, seed=None):\n    \n    if seed is not None:\n        np.random.seed(seed)\n\n    return gaussian_filter(np.random.normal(size=dims), blur)", "docstring": "Makes a surface by generating random noise and blurring it.\n\nArgs:\ndims (pair): the dimensions of the surface to create\nblur (float): the amount of Gaussian blur to apply\nseed (int): a random seed to use (optional)\n\nReturns:\nsurface: A surface.", "source": "juraj-google-style"}
{"code": "def _open_tracing_interface(self, conn_id, callback):\n    try:\n        handle = self._find_handle(conn_id)\n        services = self._connections[handle]['services']\n    except (ValueError, KeyError):\n        callback(conn_id, self.id, False, 'Connection closed unexpectedly before we could open the streaming interface')\n        return\n    self._command_task.async_command(['_enable_tracing', handle, services], self._on_interface_finished, {'connection_id': conn_id, 'callback': callback})", "docstring": "Enable the debug tracing interface for this IOTile device\n\nArgs:\nconn_id (int): the unique identifier for the connection\ncallback (callback): Callback to be called when this command finishes\ncallback(conn_id, adapter_id, success, failure_reason)", "source": "codesearchnet"}
{"code": "def get_sequence_properties(self, clean_seq=False, representatives_only=True):\n        \n        for g in tqdm(self.genes):\n            g.protein.get_sequence_properties(clean_seq=clean_seq, representative_only=representatives_only)", "docstring": "Run Biopython ProteinAnalysis and EMBOSS pepstats to summarize basic statistics of all protein sequences.\nResults are stored in the protein's respective SeqProp objects at ``.annotations``\n\nArgs:\nrepresentative_only (bool): If analysis should only be run on the representative sequences", "source": "juraj-google-style"}
{"code": "def members(self):\n    resp = self._rtm_client.get('v1/current_team.members?all=true')\n    if resp.is_fail():\n        raise RTMServiceError('Failed to get members of current team', resp)\n    return resp.data['result']", "docstring": "Gets members of current team\n\nReturns:\nlist of User\n\nThrows:\nRTMServiceError when request failed", "source": "codesearchnet"}
{"code": "def _find_docstring_line_for_no_body(self, start):\n        \n        tracked = sorted(list(self._tokenized_triple_quotes.keys()))\n\n        for i in tracked:\n            if min(start, i) == start:\n                return i\n        return None", "docstring": "Find the docstring associated with a definition with no body\nin the node.\n\nIn these cases, the provided start and end line number for that\nelement are the same, so we must get the docstring based on the\nsequential position of known docstrings.\n\nArgs:\nstart: the row where the class / function starts.\n\nReturns:\nint: the row number where the docstring is found.", "source": "juraj-google-style"}
{"code": "def ParseOptions(cls, options, output_module):\n    \n    if not hasattr(output_module, 'SetServerInformation'):\n      raise errors.BadConfigObject('Unable to set server information.')\n\n    server = cls._ParseStringOption(\n        options, 'server', default_value=cls._DEFAULT_SERVER)\n    port = cls._ParseNumericOption(\n        options, 'port', default_value=cls._DEFAULT_PORT)\n\n    output_module.SetServerInformation(server, port)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\noutput_module (OutputModule): output module to configure.\n\nRaises:\nBadConfigObject: when the output module object does not have the\nSetServerInformation method.", "source": "juraj-google-style"}
{"code": "def _split_ir_into_match_steps(pruned_ir_blocks):\n    output = []\n    current_tuple = None\n    for block in pruned_ir_blocks:\n        if isinstance(block, OutputSource):\n            continue\n        elif isinstance(block, root_block_types):\n            if (current_tuple is not None):\n                output.append(current_tuple)\n            current_tuple = (block,)\n        elif isinstance(block, (CoerceType, Filter, MarkLocation)):\n            current_tuple += (block,)\n        else:\n            raise AssertionError(u'Unexpected block type when converting to MATCH query: {} {}'.format(block, pruned_ir_blocks))\n    if (current_tuple is None):\n        raise AssertionError(u'current_tuple was unexpectedly None: {}'.format(pruned_ir_blocks))\n    output.append(current_tuple)\n    return [_per_location_tuple_to_step(x) for x in output]", "docstring": "Split a list of IR blocks into per-location MATCH steps.\n\nArgs:\npruned_ir_blocks: list of IR basic block objects that have gone through a lowering step.\n\nReturns:\nlist of MatchStep namedtuples, each of which contains all basic blocks that correspond\nto a single MATCH step.", "source": "codesearchnet"}
{"code": "def setup(self, disk_name, project, turbinia_zone):\n    if ((project is None) or (turbinia_zone is None)):\n        self.state.add_error('project or turbinia_zone are not all specified, bailing out', critical=True)\n        return\n    self.disk_name = disk_name\n    self.project = project\n    self.turbinia_zone = turbinia_zone\n    try:\n        turbinia_config.LoadConfig()\n        self.turbinia_region = turbinia_config.TURBINIA_REGION\n        self.instance = turbinia_config.PUBSUB_TOPIC\n        if (turbinia_config.PROJECT != self.project):\n            self.state.add_error('Specified project {0:s} does not match Turbinia configured project {1:s}. Use gcp_turbinia_import recipe to copy the disk into the same project.'.format(self.project, turbinia_config.PROJECT), critical=True)\n            return\n        self._output_path = tempfile.mkdtemp()\n        self.client = turbinia_client.TurbiniaClient()\n    except TurbiniaException as e:\n        self.state.add_error(e, critical=True)\n        return", "docstring": "Sets up the object attributes.\n\nArgs:\ndisk_name (string): Name of the disk to process\nproject (string): The project containing the disk to process\nturbinia_zone (string): The zone containing the disk to process", "source": "codesearchnet"}
{"code": "def PyParseIntCast(string, location, tokens):\n  \n  \n  for index, token in enumerate(tokens):\n    try:\n      tokens[index] = int(token)\n    except ValueError:\n      logger.error('Unable to cast [{0:s}] to an int, setting to 0'.format(\n          token))\n      tokens[index] = 0\n\n  \n  for key in tokens.keys():\n    try:\n      tokens[key] = int(tokens[key], 10)\n    except ValueError:\n      logger.error(\n          'Unable to cast [{0:s} = {1:d}] to an int, setting to 0'.format(\n              key, tokens[key]))\n      tokens[key] = 0", "docstring": "Return an integer from a string.\n\nThis is a pyparsing callback method that converts the matched\nstring into an integer.\n\nThe method modifies the content of the tokens list and converts\nthem all to an integer value.\n\nArgs:\nstring (str): original string.\nlocation (int): location in the string where the match was made.\ntokens (list[str]): extracted tokens, where the string to be converted\nis stored.", "source": "juraj-google-style"}
{"code": "def encode_chunk(dataframe):\n    \n    csv_buffer = six.StringIO()\n    dataframe.to_csv(\n        csv_buffer,\n        index=False,\n        header=False,\n        encoding=\"utf-8\",\n        float_format=\"%.15g\",\n        date_format=\"%Y-%m-%d %H:%M:%S.%f\",\n    )\n\n    \n    \n    body = csv_buffer.getvalue()\n    if isinstance(body, bytes):\n        body = body.decode(\"utf-8\")\n    body = body.encode(\"utf-8\")\n    return six.BytesIO(body)", "docstring": "Return a file-like object of CSV-encoded rows.\n\nArgs:\ndataframe (pandas.DataFrame): A chunk of a dataframe to encode", "source": "juraj-google-style"}
{"code": "def __init__(self, level, message, message_id, timestamp=None, now_reference=None):\n        \n\n        self.level = level\n        self.message = message\n        self.count = 1\n        self.id = message_id\n\n        if timestamp is None:\n            self.created = monotonic()\n        elif now_reference is None:\n            self.created = timestamp\n        else:\n            \n            \n            \n            now = monotonic()\n            adj = now - now_reference\n            self.created = timestamp + adj\n\n            \n            if self.created > now:\n                self.created = now", "docstring": "Constructor.\n\nArgs:\nlevel (int): The message importance\nmessage (string): The message contents\nmessage_id (int): A unique id for the message\ntimestamp (float): An optional monotonic value in seconds for when the message was created\nnow_reference (float): If timestamp is not relative to monotonic() as called from this\nmodule then this should be now() as seen by whoever created the timestamp.", "source": "juraj-google-style"}
{"code": "def complete_acquaintance_strategy(qubit_order: Sequence[ops.Qid],\n                                   acquaintance_size: int=0,\n                                   ) -> circuits.Circuit:\n    \n    if acquaintance_size < 0:\n        raise ValueError('acquaintance_size must be non-negative.')\n    elif acquaintance_size == 0:\n        return circuits.Circuit(device=UnconstrainedAcquaintanceDevice)\n\n    if acquaintance_size > len(qubit_order):\n        return circuits.Circuit(device=UnconstrainedAcquaintanceDevice)\n    if acquaintance_size == len(qubit_order):\n        return circuits.Circuit.from_ops(\n                acquaint(*qubit_order), device=UnconstrainedAcquaintanceDevice)\n\n    strategy = circuits.Circuit.from_ops(\n            (acquaint(q) for q in qubit_order),\n            device=UnconstrainedAcquaintanceDevice)\n    for size_to_acquaint in range(2, acquaintance_size + 1):\n        expose_acquaintance_gates(strategy)\n        replace_acquaintance_with_swap_network(\n                strategy, qubit_order, size_to_acquaint)\n    return strategy", "docstring": "Returns an acquaintance strategy capable of executing a gate corresponding\nto any set of at most acquaintance_size qubits.\n\nArgs:\nqubit_order: The qubits on which the strategy should be defined.\nacquaintance_size: The maximum number of qubits to be acted on by\nan operation.\n\nReturns:\nAn circuit capable of implementing any set of k-local\noperation.", "source": "juraj-google-style"}
{"code": "def softplus(x):\n    return ops.softplus(x)", "docstring": "Softplus activation function.\n\nIt is defined as: `softplus(x) = log(exp(x) + 1)`.\n\nArgs:\nx: Input tensor.", "source": "github-repos"}
{"code": "def get_dag(nodes, downstream_fn) -> Tuple[Dict, Dict]:\n    \n    dag = {}\n    node_by_ids = {}\n    for node in nodes:\n        downstream_ops = downstream_fn(node)\n        dag[node.id] = set(downstream_ops)\n        node_by_ids[node.id] = node\n\n    return dag, node_by_ids", "docstring": "Return a dag representation of the nodes passed.\n\nThis is equally used for pipelines and pipeline runs.\n\nParams:\nnodes: an instance of `Operation` | `OperationRun` the nodes to represent en dag.\ndownstream_fn: a function that returns the downstream nodes of the a node.\n\nReturns:\ntuple: (dag, dict(node_id: node))", "source": "juraj-google-style"}
{"code": "def GetDisplayName(self, file_entry=None):\n    \n    if file_entry is None:\n      file_entry = self._file_entry\n\n    if file_entry is None:\n      raise ValueError('Missing file entry')\n\n    path_spec = getattr(file_entry, 'path_spec', None)\n\n    relative_path = path_helper.PathHelper.GetRelativePathForPathSpec(\n        path_spec, mount_path=self._mount_path)\n    if not relative_path:\n      return file_entry.name\n\n    return self.GetDisplayNameForPathSpec(path_spec)", "docstring": "Retrieves the display name for a file entry.\n\nArgs:\nfile_entry (Optional[dfvfs.FileEntry]): file entry object, where None\nwill return the display name of self._file_entry.\n\nReturns:\nstr: human readable string that describes the path to the file entry.\n\nRaises:\nValueError: if the file entry is missing.", "source": "juraj-google-style"}
{"code": "def create_frames(until=None):\n    now = Date.now()\n    if until:\n        get_orbit(until, now)\n    else:\n        for body in list_bodies():\n            get_orbit(body.name, now)", "docstring": "Create frames available in the JPL files\n\nArgs:\nuntil (str): Name of the body you want to create the frame of, and all frames in between.\nIf ``None`` all the frames available in the .bsp files will be created\n\nExample:\n\n.. code-block:: python\n\n# All frames between Earth and Mars are created (Earth, EarthBarycenter,\n# SolarSystemBarycenter, MarsBarycenter and Mars)\ncreate_frames(until='Mars')\n\n# All frames between Earth and Phobos are created (Earth, EarthBarycenter,\n# SolarSystemBarycenter, MarsBarycenter and Phobos)\ncreate_frames(until='Phobos')\n\n# All frames available in the .bsp files are created\ncreate_frames()", "source": "codesearchnet"}
{"code": "def uniprot_reviewed_checker_batch(uniprot_ids):\n    \n    uniprot_ids = ssbio.utils.force_list(uniprot_ids)\n\n    invalid_ids = [i for i in uniprot_ids if not is_valid_uniprot_id(i)]\n    uniprot_ids = [i for i in uniprot_ids if is_valid_uniprot_id(i)]\n\n    if invalid_ids:\n        warnings.warn(\"Invalid UniProt IDs {} will be ignored\".format(invalid_ids))\n\n    \n    Nmax = 200\n    N, rest = divmod(len(uniprot_ids), Nmax)\n\n    uni_rev_dict = {}\n\n    if rest > 0:\n        N += 1\n    for i in range(0, N):\n        i1 = i * Nmax\n        i2 = (i + 1) * Nmax\n        if i2 > len(uniprot_ids):\n            i2 = len(uniprot_ids)\n\n        query = uniprot_ids[i1:i2]\n\n        query_string = ''\n        for x in query:\n            query_string += 'id:' + x + '+OR+'\n        query_string = query_string.strip('+OR+')\n\n        uni_rev_raw = StringIO(bsup.search(query_string, columns='id,reviewed', frmt='tab'))\n        uni_rev_df = pd.read_table(uni_rev_raw, sep='\\t', index_col=0)\n        uni_rev_df = uni_rev_df.fillna(False)\n\n        \n        \n        \n        uni_rev_df = uni_rev_df[pd.notnull(uni_rev_df.Status)]\n\n        uni_rev_df = uni_rev_df.replace(to_replace=\"reviewed\", value=True)\n        uni_rev_df = uni_rev_df.replace(to_replace=\"unreviewed\", value=False)\n        uni_rev_dict_adder = uni_rev_df.to_dict()['Status']\n        uni_rev_dict.update(uni_rev_dict_adder)\n\n    return uni_rev_dict", "docstring": "Batch check if uniprot IDs are reviewed or not\n\nArgs:\nuniprot_ids: UniProt ID or list of UniProt IDs\n\nReturns:\nA dictionary of {UniProtID: Boolean}", "source": "juraj-google-style"}
{"code": "def probability_density(self, X):\n    self.check_fit()\n    return norm.pdf(X, loc=self.mean, scale=self.std)", "docstring": "Compute probability density.\n\nArguments:\nX: `np.ndarray` of shape (n, 1).\n\nReturns:\nnp.ndarray", "source": "codesearchnet"}
{"code": "def get_reference(root):\n    \n    reference = {}\n    elem = root.find('bibliographyLink')\n    if elem is None:\n        raise MissingElementError('bibliographyLink')\n\n    \n    ref_doi = elem.get('doi', None)\n    ref_key = elem.get('preferredKey', None)\n\n    if ref_doi is not None:\n        try:\n            ref = crossref_api.works(ids=ref_doi)['message']\n        except (HTTPError, habanero.RequestError, ConnectionError):\n            if ref_key is None:\n                raise KeywordError('DOI not found and preferredKey attribute not set')\n            else:\n                warn('Missing doi attribute in bibliographyLink or lookup failed. '\n                     'Setting \"detail\" key as a fallback; please update to the appropriate fields.'\n                     )\n                reference['detail'] = ref_key\n                if reference['detail'][-1] != '.':\n                    reference['detail'] += '.'\n        else:\n            if ref_key is not None:\n                warn('Using DOI to obtain reference information, rather than preferredKey.')\n            reference['doi'] = elem.attrib['doi']\n            \n            \n            reference['journal'] = ref.get('container-title')[0]\n            ref_year = ref.get('published-print') or ref.get('published-online')\n            reference['year'] = int(ref_year['date-parts'][0][0])\n            reference['volume'] = int(ref.get('volume'))\n            reference['pages'] = ref.get('page')\n            reference['authors'] = []\n            for author in ref['author']:\n                auth = {}\n                auth['name'] = ' '.join([author['given'], author['family']])\n                \n                orcid = author.get('ORCID')\n                if orcid:\n                    auth['ORCID'] = orcid.lstrip('http:\n                reference['authors'].append(auth)\n\n    elif ref_key is not None:\n        warn('Missing doi attribute in bibliographyLink. '\n             'Setting \"detail\" key as a fallback; please update to the appropriate fields.'\n             )\n        reference['detail'] = ref_key\n        if reference['detail'][-1] != '.':\n            reference['detail'] += '.'\n    else:\n        \n        raise MissingAttributeError('preferredKey', 'bibliographyLink')\n\n    return reference", "docstring": "Read reference info from root of ReSpecTh XML file.\n\nArgs:\nroot (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file\n\nReturns:\nproperties (`dict`): Dictionary with reference information", "source": "juraj-google-style"}
{"code": "def __valueKeyWithHeaderIndex(self, values):\n    machingIndexes = {}\n    for (index, name) in enumerate(self.header):\n        if (name in values):\n            machingIndexes[index] = values[name]\n    return machingIndexes", "docstring": "This is hellper function, so that we can mach decision values with row index\nas represented in header index.\n\nArgs:\nvalues (dict): Normaly this will have dict of header values and values from decision\n\nReturn:\n>>> return()\n{\nvalues[headerName] : int(headerName index in header array),\n...\n}", "source": "codesearchnet"}
{"code": "def set_record(self, name, record_id, record):\n        \n        if name not in self._cache:\n            self._cache[name] = {}\n        self._cache[name][record_id] = record", "docstring": "Save a record into the cache.\n\nArgs:\nname (string): The name to save the model under.\nrecord_id (int): The record id.\nrecord (:class:`cinder_data.model.CinderModel`): The model", "source": "juraj-google-style"}
{"code": "def cost(self, t_node, branch_length, multiplicity=2.0):\n    merger_time = (t_node + branch_length)\n    return ((self.integral_merger_rate(merger_time) - self.integral_merger_rate(t_node)) - ((np.log(self.total_merger_rate(merger_time)) * (multiplicity - 1.0)) / multiplicity))", "docstring": "returns the cost associated with a branch starting at t_node\nt_node is time before present, the branch goes back in time\n\nArgs:\n- t_node:           time of the node\n- branch_length:    branch length, determines when this branch merges with sister\n- multiplicity:     2 if merger is binary, higher if this is a polytomy", "source": "codesearchnet"}
{"code": "def has_implicit_access_to_catalog(user, obj):\n    request = get_request_or_stub()\n    decoded_jwt = get_decoded_jwt_from_request(request)\n    return request_user_has_implicit_access_via_jwt(decoded_jwt, ENTERPRISE_CATALOG_ADMIN_ROLE, obj)", "docstring": "Check that if request user has implicit access to `ENTERPRISE_CATALOG_ADMIN_ROLE` feature role.\n\nReturns:\nboolean: whether the request user has access or not", "source": "codesearchnet"}
{"code": "def dispatch(self, inp):\n    \n    inp = tf.reshape(inp, [self._batch * self._length, -1])\n    \n    ret = tf.gather(inp, self._flat_indices)\n    return ret", "docstring": "Send the inputs to the experts.\n\nArgs:\ninp: a `Tensor` of shape \"[batch, length, depth]`\nReturns:\na tensor with shape [batch, num_experts, expert_capacity, depth]", "source": "juraj-google-style"}
{"code": "def __rmtree(path):\n    \n    logger.info(\"rmtree: %s\" % path)\n    try:\n        shutil.rmtree(path)\n        return True\n    except Exception as e:\n        logger.error(\"rmtree: %s failed! Error: %s\" % (path, e))\n        return False", "docstring": "Recursively delete a directory tree.\n\nArgs:\npath (str): Path to the directory that needs to be deleted.\n\nReturns:\nbool: True if the operation is successful, False otherwise.", "source": "juraj-google-style"}
{"code": "def pop_events(self, regex_pattern, timeout):\n    if (not self.started):\n        raise IllegalStateError('Dispatcher needs to be started before popping.')\n    deadline = (time.time() + timeout)\n    while True:\n        results = self._match_and_pop(regex_pattern)\n        if ((len(results) != 0) or (time.time() > deadline)):\n            break\n        time.sleep(1)\n    if (len(results) == 0):\n        raise queue.Empty('Timeout after {}s waiting for event: {}'.format(timeout, regex_pattern))\n    return sorted(results, key=(lambda event: event['time']))", "docstring": "Pop events whose names match a regex pattern.\n\nIf such event(s) exist, pop one event from each event queue that\nsatisfies the condition. Otherwise, wait for an event that satisfies\nthe condition to occur, with timeout.\n\nResults are sorted by timestamp in ascending order.\n\nArgs:\nregex_pattern: The regular expression pattern that an event name\nshould match in order to be popped.\ntimeout: Number of seconds to wait for events in case no event\nmatching the condition exits when the function is called.\n\nReturns:\nEvents whose names match a regex pattern.\nEmpty if none exist and the wait timed out.\n\nRaises:\nIllegalStateError: Raised if pop is called before the dispatcher\nstarts polling.\nqueue.Empty: Raised if no event was found before time out.", "source": "codesearchnet"}
{"code": "def __init__(self, cronfile):\n        \n        options = Options()\n        options.day_of_week_start_index_zero = False\n        options.use_24hour_time_format = True\n        with open(cronfile) as f:\n            for line in f.readlines():\n                parsed_line = self.parse_cron_line(line)\n                if parsed_line:\n                    print(\"{} -> {}\".format(parsed_line, ExpressionDescriptor(parsed_line, options)))", "docstring": "Initialize CrontabReader\n\nArgs:\ncronfile: Path to cronfile\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def is_variable_initialized(variable):\n    from tensorflow.python.ops import state_ops\n    return state_ops.is_variable_initialized(variable)", "docstring": "Tests if a variable has been initialized.\n\nArgs:\nvariable: A `Variable`.\n\nReturns:\nReturns a scalar boolean Tensor, `True` if the variable has been\ninitialized, `False` otherwise.", "source": "github-repos"}
{"code": "def get_region_vcf(self, case_obj, chrom=None, start=None, end=None, gene_obj=None, variant_type='clinical', category='snv', rank_threshold=None):\n    rank_threshold = (rank_threshold or (- 100))\n    variant_file = None\n    if (variant_type == 'clinical'):\n        if (category == 'snv'):\n            variant_file = case_obj['vcf_files'].get('vcf_snv')\n        elif (category == 'sv'):\n            variant_file = case_obj['vcf_files'].get('vcf_sv')\n        elif (category == 'str'):\n            variant_file = case_obj['vcf_files'].get('vcf_str')\n    elif (variant_type == 'research'):\n        if (category == 'snv'):\n            variant_file = case_obj['vcf_files'].get('vcf_snv_research')\n        elif (category == 'sv'):\n            variant_file = case_obj['vcf_files'].get('vcf_sv_research')\n    if (not variant_file):\n        raise SyntaxError('Vcf file does not seem to exist')\n    vcf_obj = VCF(variant_file)\n    region = ''\n    if gene_obj:\n        chrom = gene_obj['chromosome']\n        start = gene_obj['start']\n        end = gene_obj['end']\n    if chrom:\n        if (start and end):\n            region = '{0}:{1}-{2}'.format(chrom, start, end)\n        else:\n            region = '{0}'.format(chrom)\n    else:\n        rank_threshold = (rank_threshold or 5)\n    with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp:\n        file_name = str(pathlib.Path(temp.name))\n        for header_line in vcf_obj.raw_header.split('\\n'):\n            if (len(header_line) > 3):\n                temp.write((header_line + '\\n'))\n        for variant in vcf_obj(region):\n            temp.write(str(variant))\n    return file_name", "docstring": "Produce a reduced vcf with variants from the specified coordinates\nThis is used for the alignment viewer.\n\nArgs:\ncase_obj(dict): A case from the scout database\nvariant_type(str): 'clinical' or 'research'. Default: 'clinical'\ncategory(str): 'snv' or 'sv'. Default: 'snv'\nrank_threshold(float): Only load variants above this score. Default: 5\nchrom(str): Load variants from a certain chromosome\nstart(int): Specify the start position\nend(int): Specify the end position\ngene_obj(dict): A gene object from the database\n\nReturns:\nfile_name(str): Path to the temporary file", "source": "codesearchnet"}
{"code": "def parse_text(text):\n    span_dict = collections.defaultdict(list)\n    for match in _NUMBER_PATTERN.finditer(text):\n        span_text = text[match.start():match.end()]\n        number = _parse_number(span_text)\n        if number is not None:\n            span_dict[match.span()].append(_get_numeric_value_from_float(number))\n    for begin_index, end_index in get_all_spans(text, max_ngram_length=1):\n        if (begin_index, end_index) in span_dict:\n            continue\n        span_text = text[begin_index:end_index]\n        number = _parse_number(span_text)\n        if number is not None:\n            span_dict[begin_index, end_index].append(_get_numeric_value_from_float(number))\n        for number, word in enumerate(_NUMBER_WORDS):\n            if span_text == word:\n                span_dict[begin_index, end_index].append(_get_numeric_value_from_float(float(number)))\n                break\n        for number, word in enumerate(_ORDINAL_WORDS):\n            if span_text == word:\n                span_dict[begin_index, end_index].append(_get_numeric_value_from_float(float(number)))\n                break\n    for begin_index, end_index in get_all_spans(text, max_ngram_length=_MAX_DATE_NGRAM_SIZE):\n        span_text = text[begin_index:end_index]\n        date = _parse_date(span_text)\n        if date is not None:\n            span_dict[begin_index, end_index].append(date)\n    spans = sorted(span_dict.items(), key=lambda span_value: _get_span_length_key(span_value[0]), reverse=True)\n    selected_spans = []\n    for span, value in spans:\n        for selected_span, _ in selected_spans:\n            if selected_span[0] <= span[0] and span[1] <= selected_span[1]:\n                break\n        else:\n            selected_spans.append((span, value))\n    selected_spans.sort(key=lambda span_value: span_value[0][0])\n    numeric_value_spans = []\n    for span, values in selected_spans:\n        numeric_value_spans.append(NumericValueSpan(begin_index=span[0], end_index=span[1], values=values))\n    return numeric_value_spans", "docstring": "Extracts longest number and date spans.\n\nArgs:\ntext: text to annotate\n\nReturns:\nList of longest numeric value spans.", "source": "github-repos"}
{"code": "def cos(duration: int, amp: complex, freq: float = None,\n        phase: float = 0, name: str = None) -> SamplePulse:\n    \n    if freq is None:\n        freq = 1/duration\n\n    return _sampled_cos_pulse(duration, amp, freq, phase=phase, name=name)", "docstring": "Generates cosine wave `SamplePulse`.\n\nApplies `left` sampling strategy to generate discrete pulse from continuous function.\n\nArgs:\nduration: Duration of pulse. Must be greater than zero.\namp: Pulse amplitude.\nfreq: Pulse frequency, units of 1/dt. If `None` defaults to single cycle.\nphase: Pulse phase.\nname: Name of pulse.", "source": "juraj-google-style"}
{"code": "def parse_key(key):\n    \n    hkey, lkey = struct.unpack('<II',key[0:UBIFS_SK_LEN])\n    ino_num = hkey & UBIFS_S_KEY_HASH_MASK\n    key_type = lkey >> UBIFS_S_KEY_BLOCK_BITS\n    khash = lkey\n\n    \n    return {'type':key_type, 'ino_num':ino_num, 'khash': khash}", "docstring": "Parse node key\n\nArguments:\nStr:key    -- Hex string literal of node key.\n\nReturns:\nInt:key_type   -- Type of key, data, ino, dent, etc.\nInt:ino_num    -- Inode number.\nInt:khash      -- Key hash.", "source": "juraj-google-style"}
{"code": "def fetch(self, subscription_id, data={}, **kwargs):\n        \n        return super(Subscription, self).fetch(subscription_id, data, **kwargs)", "docstring": "Fetch Subscription for given Id\n\nArgs:\nsubscription_id : Id for which subscription object is retrieved\n\nReturns:\nSubscription dict for given subscription Id", "source": "juraj-google-style"}
{"code": "def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):\n    hasher = resolve_hasher(algorithm, file_hash)\n    if str(hash_file(fpath, hasher, chunk_size)) == str(file_hash):\n        return True\n    else:\n        return False", "docstring": "Validates a file against a sha256 or md5 hash.\n\nArgs:\nfpath: path to the file being validated\nfile_hash:  The expected hash string of the file.\nThe sha256 and md5 hash algorithms are both supported.\nalgorithm: Hash algorithm, one of `\"auto\"`, `\"sha256\"`, or `\"md5\"`.\nThe default `\"auto\"` detects the hash algorithm in use.\nchunk_size: Bytes to read at a time, important for large files.\n\nReturns:\nBoolean, whether the file is valid.", "source": "github-repos"}
{"code": "def group_systems(self, group_name, systems):\n        \n        api_group_id = None\n        headers = {'Content-Type': 'application/json'}\n        group_path = self.api_url + '/v1/groups'\n        group_get_path = group_path + ('?display_name=%s' % quote(group_name))\n\n        logger.debug(\"GET group: %s\", group_get_path)\n        net_logger.info(\"GET %s\", group_get_path)\n        get_group = self.session.get(group_get_path)\n        logger.debug(\"GET group status: %s\", get_group.status_code)\n        if get_group.status_code == 200:\n            api_group_id = get_group.json()['id']\n\n        if get_group.status_code == 404:\n            \n            logger.debug(\"POST group\")\n            data = json.dumps({'display_name': group_name})\n            net_logger.info(\"POST\", group_path)\n            post_group = self.session.post(group_path,\n                                           headers=headers,\n                                           data=data)\n            logger.debug(\"POST group status: %s\", post_group.status_code)\n            logger.debug(\"POST Group: %s\", post_group.json())\n            self.handle_fail_rcs(post_group)\n            api_group_id = post_group.json()['id']\n\n        logger.debug(\"PUT group\")\n        data = json.dumps(systems)\n        net_logger.info(\"PUT %s\", group_path + ('/%s/systems' % api_group_id))\n        put_group = self.session.put(group_path +\n                                     ('/%s/systems' % api_group_id),\n                                     headers=headers,\n                                     data=data)\n        logger.debug(\"PUT group status: %d\", put_group.status_code)\n        logger.debug(\"PUT Group: %s\", put_group.json())", "docstring": "Adds an array of systems to specified group\n\nArgs:\ngroup_name: Display name of group\nsystems: Array of {'machine_id': machine_id}", "source": "juraj-google-style"}
{"code": "def setDataFrame(self, dataFrame, copyDataFrame=False, filePath=None):\n    if (not isinstance(dataFrame, pandas.core.frame.DataFrame)):\n        raise TypeError('not of type pandas.core.frame.DataFrame')\n    self.layoutAboutToBeChanged.emit()\n    if copyDataFrame:\n        self._dataFrame = dataFrame.copy()\n    else:\n        self._dataFrame = dataFrame\n    self._columnDtypeModel = ColumnDtypeModel(dataFrame)\n    self._columnDtypeModel.dtypeChanged.connect(self.propagateDtypeChanges)\n    self._columnDtypeModel.changeFailed.connect((lambda columnName, index, dtype: self.changingDtypeFailed.emit(columnName, index, dtype)))\n    if (filePath is not None):\n        self._filePath = filePath\n    self.layoutChanged.emit()\n    self.dataChanged.emit()\n    self.dataFrameChanged.emit()", "docstring": "Setter function to _dataFrame. Holds all data.\n\nNote:\nIt's not implemented with python properties to keep Qt conventions.\n\nRaises:\nTypeError: if dataFrame is not of type pandas.core.frame.DataFrame.\n\nArgs:\ndataFrame (pandas.core.frame.DataFrame): assign dataFrame to _dataFrame. Holds all the data displayed.\ncopyDataFrame (bool, optional): create a copy of dataFrame or use it as is. defaults to False.\nIf you use it as is, you can change it from outside otherwise you have to reset the dataFrame\nafter external changes.", "source": "codesearchnet"}
{"code": "def make_serializable(json):\n    new_dict = dict()\n    for (key, value) in iteritems(json):\n        if is_valid_json(value):\n            new_dict[key] = value\n    return new_dict", "docstring": "This function ensures that the dictionary is JSON serializable. If not,\nkeys with non-serializable values are removed from the return value.\n\nArgs:\njson (dict): Dictionary to convert to serializable\n\nReturns:\nnew_dict (dict): New dictionary with non JSON serializable values removed", "source": "codesearchnet"}
{"code": "def predictPhenos(self,use_fixed=None,use_random=None):\n        \n        assert self.noisPos is not None,      'No noise element'\n        assert self.init,               'GP not initialised'\n        assert self.Ntest is not None,        'VarianceDecomposition:: specify Ntest for predictions (method VarianceDecomposition::setTestSampleSize)'\n\n        use_fixed  = list(range(self.n_fixedEffs))\n        use_random = list(range(self.n_randEffs))\n\n        KiY = self.gp.agetKEffInvYCache()\n\n        if self.fast==False:\n            KiY = KiY.reshape(self.P,self.N).T\n\n        Ypred = sp.zeros((self.Ntest,self.P))\n\n        \n        for term_i in use_random:\n            if term_i!=self.noisPos:\n                Kstar = self.Kstar[term_i]\n                if Kstar is None:\n                    warnings.warn('warning: random effect term %d not used for predictions as it has None cross covariance'%term_i)\n                    continue\n                term  = sp.dot(Kstar.T,KiY)\n                if self.P>1:\n                    C    = self.getTraitCovar(term_i)\n                    term = sp.dot(term,C)\n                else:\n                    term *= self.getVarianceComps()[0,term_i]\n                Ypred += term\n\n        \n        weights = self.getWeights()\n        w_i = 0\n        for term_i in use_fixed:\n            Fstar = self.Fstar[term_i]\n            if Fstar is None:\n                warnings.warn('warning: fixed effect term %d not used for predictions as it has None test sample design'%term_i)\n                continue\n            if self.P==1:    A = sp.eye(1)\n            else:            A = self.vd.getDesign(term_i)\n            Fstar = self.Fstar[term_i]\n            W = weights[w_i:w_i+A.shape[0],0:1].T\n            term = sp.dot(Fstar,sp.dot(W,A))\n            w_i += A.shape[0]\n            Ypred += term\n\n        return Ypred", "docstring": "predict the conditional mean (BLUP)\n\nArgs:\nuse_fixed:        list of fixed effect indeces to use for predictions\nuse_random:        list of random effect indeces to use for predictions\nReturns:\npredictions (BLUP)", "source": "juraj-google-style"}
{"code": "def score_one(self, x: beam.Row) -> Optional[float]:\n    if len(x.__dict__) != 1:\n        raise ValueError('ZScore.score_one expected univariate input, but got %s', str(x))\n    v = next(iter(x))\n    if v is None or math.isnan(v):\n        return None\n    sub_stat = self._sub_stat_tracker.get()\n    stdev = self._stdev_tracker.get()\n    if math.isnan(stdev) or math.isnan(sub_stat):\n        return float('NaN')\n    if abs(stdev) < EPSILON:\n        return 0.0\n    return abs((v - sub_stat) / stdev)", "docstring": "Scores a data point using the Z-Score.\n\nArgs:\nx: A `beam.Row` containing a single numerical value.\n\nReturns:\nfloat | None: The Z-Score.", "source": "github-repos"}
{"code": "def set_logfile(self, filename, max_bytes=0, backup_count=0):\n        \n        _logger = logging.getLogger(\"neo-python\")\n\n        if not filename and not self.rotating_filehandler:\n            _logger.removeHandler(self.rotating_filehandler)\n        else:\n            self.rotating_filehandler = RotatingFileHandler(filename, mode='a', maxBytes=max_bytes, backupCount=backup_count, encoding=None)\n            self.rotating_filehandler.setLevel(logging.DEBUG)\n            self.rotating_filehandler.setFormatter(LogFormatter(color=False))\n            _logger.addHandler(self.rotating_filehandler)", "docstring": "Setup logging to a (rotating) logfile.\n\nArgs:\nfilename (str): Logfile. If filename is None, disable file logging\nmax_bytes (int): Maximum number of bytes per logfile. If used together with backup_count,\nlogfile will be rotated when it reaches this amount of bytes.\nbackup_count (int): Number of rotated logfiles to keep", "source": "juraj-google-style"}
{"code": "def enum_to_yaml(cls: Type[T_EnumToYAML], representer: Representer, data: T_EnumToYAML) -> ruamel.yaml.nodes.ScalarNode:\n    return representer.represent_scalar(f'!{cls.__name__}', f'{str(data)}')", "docstring": "Encodes YAML representation.\n\nThis is a mixin method for writing enum values to YAML. It needs to be added to the enum\nas a classmethod. See the module docstring for further information on this approach and how\nto implement it.\n\nThis method writes whatever is used in the string representation of the YAML value.\nUsually, this will be the unique name of the enumeration value. If the name is used,\nthe corresponding ``EnumFromYAML`` mixin can be used to recreate the value. If the name\nisn't used, more care may be necessary, so a ``from_yaml`` method for that particular\nenumeration may be necessary.\n\nNote:\nThis method assumes that the name of the enumeration value should be stored as a scalar node.\n\nArgs:\nrepresenter: Representation from YAML.\ndata: Enumeration value to be encoded.\nReturns:\nScalar representation of the name of the enumeration value.", "source": "codesearchnet"}
{"code": "def children(cls, obj, save_type=base.SaveType.CHECKPOINT, **kwargs):\n    obj._maybe_initialize_trackable()\n    children = {}\n    for name, ref in obj._trackable_children(save_type, **kwargs).items():\n        ref = converter.convert_to_trackable(ref, parent=obj)\n        children[name] = ref\n    return children", "docstring": "Returns all child trackables attached to obj.\n\nArgs:\nobj: A `Trackable` object.\nsave_type: A string, can be 'savedmodel' or 'checkpoint'.\n**kwargs: kwargs to use when retrieving the object's children.\n\nReturns:\nDictionary of all children attached to the object with name to trackable.", "source": "github-repos"}
{"code": "def kill_raylet_monitor(self, check_alive=True):\n        \n        self._kill_process_type(\n            ray_constants.PROCESS_TYPE_RAYLET_MONITOR, check_alive=check_alive)", "docstring": "Kill the raylet monitor.\n\nArgs:\ncheck_alive (bool): Raise an exception if the process was already\ndead.", "source": "juraj-google-style"}
{"code": "def get_general_case_info(adapter, institute_id=None, slice_query=None):\n    general = {}\n    name_query = slice_query\n    cases = adapter.cases(owner=institute_id, name_query=name_query)\n    phenotype_cases = 0\n    causative_cases = 0\n    pinned_cases = 0\n    cohort_cases = 0\n    pedigree = {1: {'title': 'Single', 'count': 0}, 2: {'title': 'Duo', 'count': 0}, 3: {'title': 'Trio', 'count': 0}, 'many': {'title': 'Many', 'count': 0}}\n    case_ids = set()\n    total_cases = 0\n    for (total_cases, case) in enumerate(cases, 1):\n        if institute_id:\n            case_ids.add(case['_id'])\n        if case.get('phenotype_terms'):\n            phenotype_cases += 1\n        if case.get('causatives'):\n            causative_cases += 1\n        if case.get('suspects'):\n            pinned_cases += 1\n        if case.get('cohorts'):\n            cohort_cases += 1\n        nr_individuals = len(case.get('individuals', []))\n        if (nr_individuals == 0):\n            continue\n        if (nr_individuals > 3):\n            pedigree['many']['count'] += 1\n        else:\n            pedigree[nr_individuals]['count'] += 1\n    general['total_cases'] = total_cases\n    general['phenotype_cases'] = phenotype_cases\n    general['causative_cases'] = causative_cases\n    general['pinned_cases'] = pinned_cases\n    general['cohort_cases'] = cohort_cases\n    general['pedigree'] = pedigree\n    general['case_ids'] = case_ids\n    return general", "docstring": "Return general information about cases\n\nArgs:\nadapter(adapter.MongoAdapter)\ninstitute_id(str)\nslice_query(str):   Query to filter cases to obtain statistics for.\n\n\nReturns:\ngeneral(dict)", "source": "codesearchnet"}
{"code": "def merge(self, other):\n    if ((self.m != other.m) or (self.p != other.p)):\n        raise ValueError('Cannot merge HyperLogLog with different                    precisions.')\n    self.reg = np.maximum(self.reg, other.reg)", "docstring": "Merge the other HyperLogLog with this one, making this the union of the\ntwo.\n\nArgs:\nother (datasketch.HyperLogLog):", "source": "codesearchnet"}
{"code": "def SelectComponent(ds, idxs):\n    \n    return MapData(ds, lambda dp: [dp[i] for i in idxs])", "docstring": "Select / reorder components from datapoints.\n\nArgs:\nds (DataFlow): input DataFlow.\nidxs (list[int]): a list of component indices.\n\nExample:\n\n.. code-block:: none\n\noriginal df produces: [c1, c2, c3]\nidxs: [2,1]\nthis df: [c3, c2]", "source": "juraj-google-style"}
{"code": "def __contains__(self, temp_ver):\n        \n        return os.path.exists(self._prefixed(temp_ver.name))", "docstring": "Checks if a given version is in this store\n\nArgs:\ntemp_ver (TemplateVersion): Version to look for\n\nReturns:\nbool: ``True`` if the version is in this store", "source": "juraj-google-style"}
{"code": "def append(self, *values):\n        \n\n        for value in values:\n            list.append(self, value)\n        return self", "docstring": "Append values at the end of the list\n\nAllow chaining.\n\nArgs:\nvalues: values to be appened at the end.\n\nExample:\n\n>>> from ww import l\n>>> lst = l([])\n>>> lst.append(1)\n[1]\n>>> lst\n[1]\n>>> lst.append(2, 3).append(4,5)\n[1, 2, 3, 4, 5]\n>>> lst\n[1, 2, 3, 4, 5]", "source": "juraj-google-style"}
{"code": "def propagate(cls, date):\n    date = date.change_scale('TDB')\n    t_tdb = date.julian_century\n\n    def cos(angle):\n        'cosine in degrees'\n        return np.cos(np.radians(angle))\n\n    def sin(angle):\n        'sine in degrees'\n        return np.sin(np.radians(angle))\n    lambda_el = (((((((218.32 + (481267.8813 * t_tdb)) + (6.29 * sin((134.9 + (477198.85 * t_tdb))))) - (1.27 * sin((259.2 - (413335.38 * t_tdb))))) + (0.66 * sin((235.7 + (890534.23 * t_tdb))))) + (0.21 * sin((269.9 + (954397.7 * t_tdb))))) - (0.19 * sin((357.5 + (35999.05 * t_tdb))))) - (0.11 * sin((186.6 + (966404.05 * t_tdb)))))\n    phi_el = ((((5.13 * sin((93.3 + (483202.03 * t_tdb)))) + (0.28 * sin((228.2 + (960400.87 * t_tdb))))) - (0.28 * sin((318.3 + (6003.18 * t_tdb))))) - (0.17 * sin((217.6 - (407332.2 * t_tdb)))))\n    p = ((((0.9508 + (0.0518 * cos((134.9 + (477198.85 * t_tdb))))) + (0.0095 * cos((259.2 - (413335.38 * t_tdb))))) + (0.0078 * cos((235.7 + (890534.23 * t_tdb))))) + (0.0028 * cos((269.9 + (954397.7 * t_tdb)))))\n    e_bar = (((23.439291 - (0.0130042 * t_tdb)) - (1.64e-07 * (t_tdb ** 2))) + (5.04e-07 * (t_tdb ** 3)))\n    r_moon = (Earth.r / sin(p))\n    state_vector = (r_moon * np.array([(cos(phi_el) * cos(lambda_el)), (((cos(e_bar) * cos(phi_el)) * sin(lambda_el)) - (sin(e_bar) * sin(phi_el))), (((sin(e_bar) * cos(phi_el)) * sin(lambda_el)) + (cos(e_bar) * sin(phi_el))), 0, 0, 0]))\n    return Orbit(date, state_vector, 'cartesian', 'EME2000', cls())", "docstring": "Compute the Moon position at a given date\n\nArgs:\ndate (~beyond.utils.date.Date)\nReturn:\n~beyond.orbits.orbit.Orbit: Position of the Moon in EME2000 frame\nExample:\n\n.. code-block:: python\n\nfrom beyond.utils.date import Date\nMoonPropagator.propagate(Date(1994, 4, 28))\n# Orbit =\n#   date = 1994-04-28T00:00:00 UTC\n#   form = Cartesian\n#   frame = EME2000\n#   propag = MoonPropagator\n#   coord =\n#     x = -134181157.317\n#     y = -311598171.54\n#     z = -126699062.437\n#     vx = 0.0\n#     vy = 0.0\n#     vz = 0.0", "source": "codesearchnet"}
{"code": "def get_path_str(self, sep=os.path.sep, type_str=None):\n        \n        return sep.join(\n            list(\n                reversed(\n                    [\n                        v.label_str\n                        for v in self.parent_gen\n                        if type_str in (None, v.type_str)\n                    ]\n                )\n            )\n        )", "docstring": "Get path from root to this node.\n\nArgs:\nsep: str\nOne or more characters to insert between each element in the path.\nDefaults to \"/\" on Unix and \"\\\" on Windows.\n\ntype_str:\nSUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include\ninformation from nodes of that type.\n\nReturns:\nstr: String describing the path from the root to this node.", "source": "juraj-google-style"}
{"code": "def shannon_entropy(pvec, base=2):\n    \n    \n    if base == 2:\n        def logfn(x):\n            return - x * np.log2(x)\n    elif base == np.e:\n        def logfn(x):\n            return - x * np.log(x)\n    else:\n        def logfn(x):\n            return -x * np.log(x) / np.log(base)\n\n    h = 0.\n    for x in pvec:\n        if 0 < x < 1:\n            h += logfn(x)\n    return h", "docstring": "Compute the Shannon entropy of a probability vector.\n\nThe shannon entropy of a probability vector pv is defined as\n$H(pv) = - \\\\sum_j pv[j] log_b (pv[j])$ where $0 log_b 0 = 0$.\n\nArgs:\npvec (array_like): a probability vector.\nbase (int): the base of the logarith\n\nReturns:\nfloat: The Shannon entropy H(pvec).", "source": "juraj-google-style"}
{"code": "def ShowUnspentCoins(wallet, asset_id=None, from_addr=None, watch_only=False, do_count=False):\n    if (wallet is None):\n        print('Please open a wallet.')\n        return\n    watch_only_flag = (64 if watch_only else 0)\n    if asset_id:\n        unspents = wallet.FindUnspentCoinsByAsset(asset_id, from_addr=from_addr, watch_only_val=watch_only_flag)\n    else:\n        unspents = wallet.FindUnspentCoins(from_addr=from_addr, watch_only_val=watch_only)\n    if do_count:\n        print('\\n-----------------------------------------------')\n        print(('Total Unspent: %s' % len(unspents)))\n        return unspents\n    for unspent in unspents:\n        print('\\n-----------------------------------------------')\n        print(json.dumps(unspent.ToJson(), indent=4))\n    if (not unspents):\n        print('No unspent assets matching the arguments.')\n    return unspents", "docstring": "Show unspent coin objects in the wallet.\n\nArgs:\nwallet (neo.Wallet): wallet to show unspent coins from.\nasset_id (UInt256): a bytearray (len 32) representing an asset on the blockchain.\nfrom_addr (UInt160): a bytearray (len 20) representing an address.\nwatch_only (bool): indicate if this shows coins that are in 'watch only' addresses.\ndo_count (bool): if True only show a count of unspent assets.\n\nReturns:\nlist: a list of unspent ``neo.Wallet.Coin`` in the wallet", "source": "codesearchnet"}
{"code": "def _encode_value(self, value):\n        \n        if isinstance(value, (int, float, str, bool, datetime)):\n            return value\n        elif isinstance(value, list):\n            return [self._encode_value(item) for item in value]\n        elif isinstance(value, dict):\n            result = {}\n            for key, item in value.items():\n                result[key] = self._encode_value(item)\n            return result\n        else:\n            return self._gridfs.put(Binary(pickle.dumps(value)),\n                                    workflow_id=self._workflow_id)", "docstring": "Encodes the value such that it can be stored into MongoDB.\n\nAny primitive types are stored directly into MongoDB, while non-primitive types\nare pickled and stored as GridFS objects. The id pointing to a GridFS object\nreplaces the original value.\n\nArgs:\nvalue (object): The object that should be encoded for storing in MongoDB.\n\nReturns:\nobject: The encoded value ready to be stored in MongoDB.", "source": "juraj-google-style"}
{"code": "def update_metadata(self, resource, keys_vals):\n    self.metadata_service.set_auth(self._token_metadata)\n    self.metadata_service.update(resource, keys_vals)", "docstring": "Updates key-value pairs with the given resource.\n\nWill attempt to update all key-value pairs even if some fail.\nKeys must already exist.\n\nArgs:\nresource (intern.resource.boss.BossResource)\nkeys_vals (dictionary): Collection of key-value pairs to update on\nthe given resource.\n\nRaises:\nHTTPErrorList on failure.", "source": "codesearchnet"}
{"code": "def set_device_state(self, device, state, id_override=None, type_override=None):\n    _LOGGER.info('Setting state via online API')\n    object_id = (id_override or device.object_id())\n    object_type = (type_override or device.object_type())\n    url_string = '{}/{}s/{}'.format(self.BASE_URL, object_type, object_id)\n    if ((state is None) or (object_type == 'group')):\n        url_string += '/activate'\n        if (state is None):\n            arequest = requests.post(url_string, headers=API_HEADERS)\n        else:\n            arequest = requests.post(url_string, data=json.dumps(state), headers=API_HEADERS)\n    else:\n        arequest = requests.put(url_string, data=json.dumps(state), headers=API_HEADERS)\n    if (arequest.status_code == 401):\n        new_token = refresh_access_token()\n        if new_token:\n            arequest = requests.put(url_string, data=json.dumps(state), headers=API_HEADERS)\n        else:\n            raise WinkAPIException('Failed to refresh access token.')\n    response_json = arequest.json()\n    _LOGGER.debug('%s', response_json)\n    return response_json", "docstring": "Set device state via online API.\n\nArgs:\ndevice (WinkDevice): The device the change is being requested for.\nstate (Dict): The state being requested.\nid_override (String, optional): A device ID used to override the\npassed in device's ID. Used to make changes on sub-devices.\ni.e. Outlet in a Powerstrip. The Parent device's ID.\ntype_override (String, optional): Used to override the device type\nwhen a device inherits from a device other than WinkDevice.\nReturns:\nresponse_json (Dict): The API's response in dictionary format", "source": "codesearchnet"}
{"code": "def is_initialized(self, name=None):\n    if values_util.is_saving_non_distributed():\n        return self._primary.is_initialized()\n    if self._use_packed_variable():\n        return self._packed_var.is_initialized()\n    result = self._primary.is_initialized()\n    for v in self._values[1:-1]:\n        result = math_ops.logical_and(result, v.is_initialized())\n    result = math_ops.logical_and(result, self._values[-1].is_initialized(), name=name)\n    return result", "docstring": "Identifies if all the component variables are initialized.\n\nArgs:\nname: Name of the final `logical_and` op.\n\nReturns:\nThe op that evaluates to True or False depending on if all the\ncomponent variables are initialized.", "source": "github-repos"}
{"code": "def __init__(self, pred=None, pivot=None, branch=None, name='cond_text', context_def=None, import_scope=None):\n    self._name = ops.get_default_graph().unique_name(name)\n    if context_def:\n        self._init_from_proto(context_def, import_scope=import_scope)\n    else:\n        ControlFlowContext.__init__(self)\n        self._pred = pred\n        self._pivot = pivot\n        self._branch = branch\n        self._values.add(pred.name)\n        self._external_values[pred.name] = pred\n        self._values.add(pivot.name)\n        pivot.op._set_control_flow_context(self)", "docstring": "Creates a `CondContext`.\n\nArgs:\npred: The `boolean` tensor for the conditional predicate.\npivot: The predicate tensor in this branch.\nbranch: 0 or 1 representing this branch.\nname: Name of the `CondContext` python object.\ncontext_def: Optional `ContextDef` protocol buffer to initialize the\n`CondContext` object from.\nimport_scope: Optional `string`. Name scope to add. Only used when\ninitialing from protocol buffer.", "source": "github-repos"}
{"code": "def get_states(self, n):\n    return self.states[len(self.new_states):(len(self.new_states) + n)]", "docstring": "Get the next n recurrent states.\n\nCalled by layers in \"incremental\" mode.\n\nArgs:\nn: an integer\nReturns:\na list of n Tensors", "source": "codesearchnet"}
{"code": "def _unflatten_dict(flat_dict, prefixes):\n  \n  original_dict = {}\n  for key, value in flat_dict.items():\n    prefix_found = False\n    for prefix in prefixes:\n      full_prefix = \"__\" + prefix + \"_\"\n      if key.startswith(full_prefix):\n        \n        if prefix not in original_dict:\n          original_dict[prefix] = {}\n        original_dict[prefix][key[len(full_prefix):]] = value\n        prefix_found = True\n        break\n    if not prefix_found:\n      \n      original_dict[key] = value\n\n  return original_dict", "docstring": "Returns a dict of dicts if any prefixes match keys in the flat dict.\n\nThe function handles the case where the prefix may not be a dict.\n\nArgs:\nflat_dict: A dict without any nesting.\nprefixes: A list of strings which may have been dicts in the\noriginal structure.", "source": "juraj-google-style"}
{"code": "def __init__(self, filter=None):\n    \n    self._filter = filter\n    self._context = datalab.Context.default()\n    self._api = discovery.build('ml', 'v1', credentials=self._context.credentials)\n    self._page_size = 0", "docstring": "Initializes an instance of a CloudML Job list that is iteratable (\"for job in jobs()\").\n\nArgs:\nfilter: filter string for retrieving jobs, such as \"state=FAILED\"\ncontext: an optional Context object providing project_id and credentials.\napi: an optional CloudML API client.", "source": "juraj-google-style"}
{"code": "def _fulfillment_to_details(fulfillment):\n    if (fulfillment.type_name == 'ed25519-sha-256'):\n        return {'type': 'ed25519-sha-256', 'public_key': base58.b58encode(fulfillment.public_key).decode()}\n    if (fulfillment.type_name == 'threshold-sha-256'):\n        subconditions = [_fulfillment_to_details(cond['body']) for cond in fulfillment.subconditions]\n        return {'type': 'threshold-sha-256', 'threshold': fulfillment.threshold, 'subconditions': subconditions}\n    raise UnsupportedTypeError(fulfillment.type_name)", "docstring": "Encode a fulfillment as a details dictionary\n\nArgs:\nfulfillment: Crypto-conditions Fulfillment object", "source": "codesearchnet"}
{"code": "def __init__(self, report_type):\n    \n    self._report_type = report_type\n    self.categories = dict([(x, {}) for x in self.active_days])", "docstring": "Constructor.\n\nArgs:\nreport_type: rdf_stats.ClientGraphSeries.ReportType for the client stats\nto track.", "source": "juraj-google-style"}
{"code": "def __init__(self, port=2223, task_type=None, task_id=None, rpc_layer=None, environment=None):\n    self._task_type = task_type\n    self._task_id = task_id\n    self._rpc_layer = rpc_layer\n    self._environment = environment\n    self._port = str(port)", "docstring": "Creates a new SageMakerClusterResolver.\n\nArgs:\nport: (integer, optional) Override default port usage of 2223\ntask_type: (String, optional) Overrides the task type.\ntask_id: (Integer, optional) Overrides the task index.\nrpc_layer: (String, optional) Overrides the rpc layer TensorFlow uses.\nenvironment: (String, optional) Overrides the environment TensorFlow\noperates in.", "source": "github-repos"}
{"code": "def do_batch(args):\n    if (args.subcommand == 'list'):\n        do_batch_list(args)\n    if (args.subcommand == 'show'):\n        do_batch_show(args)\n    if (args.subcommand == 'status'):\n        do_batch_status(args)\n    if (args.subcommand == 'submit'):\n        do_batch_submit(args)", "docstring": "Runs the batch list, batch show or batch status command, printing output\nto the console\n\nArgs:\nargs: The parsed arguments sent to the command at runtime", "source": "codesearchnet"}
{"code": "def stitch_images(images, margin=5, cols=5):\n    if (len(images) == 0):\n        return None\n    (h, w, c) = images[0].shape\n    n_rows = int(math.ceil((len(images) / cols)))\n    n_cols = min(len(images), cols)\n    out_w = ((n_cols * w) + ((n_cols - 1) * margin))\n    out_h = ((n_rows * h) + ((n_rows - 1) * margin))\n    stitched_images = np.zeros((out_h, out_w, c), dtype=images[0].dtype)\n    for row in range(n_rows):\n        for col in range(n_cols):\n            img_idx = ((row * cols) + col)\n            if (img_idx >= len(images)):\n                break\n            stitched_images[(((h + margin) * row):(((h + margin) * row) + h), ((w + margin) * col):(((w + margin) * col) + w), :)] = images[img_idx]\n    return stitched_images", "docstring": "Utility function to stitch images together with a `margin`.\n\nArgs:\nimages: The array of 2D images to stitch.\nmargin: The black border margin size between images (Default value = 5)\ncols: Max number of image cols. New row is created when number of images exceed the column size.\n(Default value = 5)\n\nReturns:\nA single numpy image array comprising of input images.", "source": "codesearchnet"}
{"code": "def _open_file(filename):\n    if (filename is None):\n        raise DataSourceError('Trace filename is not defined')\n    try:\n        trace_file = open(filename, 'r')\n    except IOError as e:\n        raise DataSourceError(('Unable to open trace file %s' % filename), e)\n    else:\n        LOG.debug('Opened trace file %s', filename)\n        return trace_file", "docstring": "Attempt to open the the file at ``filename`` for reading.\n\nRaises:\nDataSourceError, if the file cannot be opened.", "source": "codesearchnet"}
{"code": "def init_log(logger, loglevel=0):\n    \n    \n    global log_tmp_dir, log_tmp_fn\n    log_tmp_dir = tempfile.mkdtemp()\n    log_tmp_fn = os.path.join(log_tmp_dir, 'multiqc.log')\n\n    \n    debug_template = '[%(asctime)s] %(name)-50s [%(levelname)-7s]  %(message)s'\n    info_template = '[%(levelname)-7s] %(module)15s : %(message)s'\n\n    \n    logger.setLevel(getattr(logging, 'DEBUG'))\n\n    \n    console = logging.StreamHandler()\n    console.setLevel(getattr(logging, loglevel))\n    if loglevel == 'DEBUG':\n        console.setFormatter(logging.Formatter(debug_template))\n    else:\n        console.setFormatter(logging.Formatter(info_template))\n    logger.addHandler(console)\n\n    \n    file_handler = logging.FileHandler(log_tmp_fn, encoding='utf-8')\n    file_handler.setLevel(getattr(logging, 'DEBUG')) \n    file_handler.setFormatter(logging.Formatter(debug_template))\n    logger.addHandler(file_handler)", "docstring": "Initializes logging.\nPrints logs to console with level defined by loglevel\nAlso prints verbose log to the multiqc data directory if available.\n(multiqc_data/multiqc.log)\n\nArgs:\nloglevel (str): Determines the level of the log output.", "source": "juraj-google-style"}
{"code": "def post_process_generation(self, generation: Union[str, List[str]], fix_markdown: bool=True, num_workers: Optional[int]=None) -> Union[str, List[str]]:\n    requires_backends(self, ['nltk', 'levenshtein'])\n    if isinstance(generation, list):\n        if num_workers is not None and isinstance(num_workers, int):\n            with Pool(num_workers) as p:\n                return p.map(partial(self.post_process_single, fix_markdown=fix_markdown), generation)\n        else:\n            return [self.post_process_single(s, fix_markdown=fix_markdown) for s in generation]\n    else:\n        return self.post_process_single(generation, fix_markdown=fix_markdown)", "docstring": "Postprocess a generated text or a list of generated texts.\n\nThis function can be used to perform postprocessing on generated text, such as fixing Markdown formatting.\n\nPostprocessing is quite slow so it is recommended to use multiprocessing to speed up the process.\n\nArgs:\ngeneration (Union[str, List[str]]):\nThe generated text or a list of generated texts.\nfix_markdown (`bool`, *optional*, defaults to `True`):\nWhether to perform Markdown formatting fixes.\nnum_workers (`int`, *optional*):\nOptional number of workers to pass to leverage multiprocessing (postprocessing several texts in\nparallel).\n\nReturns:\nUnion[str, List[str]]: The postprocessed text or list of postprocessed texts.", "source": "github-repos"}
{"code": "def SCM(root_dir, repo=None):  \n    \n    if Git.is_repo(root_dir) or Git.is_submodule(root_dir):\n        return Git(root_dir, repo=repo)\n\n    return NoSCM(root_dir, repo=repo)", "docstring": "Returns SCM instance that corresponds to a repo at the specified\npath.\n\nArgs:\nroot_dir (str): path to a root directory of the repo.\nrepo (dvc.repo.Repo): dvc repo instance that root_dir belongs to.\n\nReturns:\ndvc.scm.base.Base: SCM instance.", "source": "juraj-google-style"}
{"code": "def __init__(self, filename,f_start=None, f_stop=None,t_start=None, t_stop=None, load_data=True, max_load=1.):\n        \n        super(FilReader, self).__init__()\n\n        self.header_keywords_types = sigproc.header_keyword_types\n\n        if filename and os.path.isfile(filename):\n            self.filename = filename\n            self.load_data = load_data\n            self.header = self.read_header()\n            self.file_size_bytes = os.path.getsize(self.filename)\n            self.idx_data = sigproc.len_header(self.filename)\n            self.n_channels_in_file  = self.header[b'nchans']\n            self.n_beams_in_file = self.header[b'nifs'] \n            self.n_pols_in_file = 1 \n            self._n_bytes = int(self.header[b'nbits'] / 8)  \n            self._d_type = self._setup_dtype()\n            self._setup_n_ints_in_file()\n            self.file_shape = (self.n_ints_in_file,self.n_beams_in_file,self.n_channels_in_file)\n\n            if self.header[b'foff'] < 0:\n                self.f_end  = self.header[b'fch1']\n                self.f_begin  = self.f_end + self.n_channels_in_file*self.header[b'foff']\n            else:\n                self.f_begin  = self.header[b'fch1']\n                self.f_end  = self.f_begin + self.n_channels_in_file*self.header[b'foff']\n\n            self.t_begin = 0\n            self.t_end = self.n_ints_in_file\n\n            \n            self._setup_selection_range(f_start=f_start, f_stop=f_stop, t_start=t_start, t_stop=t_stop, init=True)\n            \n            self._setup_chans()\n            \n            self._setup_freqs()\n\n            self.freq_axis = 2\n            self.time_axis = 0\n            self.beam_axis = 1  \n\n\n\n            \n\n\n            \n            if max_load is not None:\n                if max_load > 1.0:\n                    logger.warning('Setting data limit != 1GB, please handle with care!')\n                self.MAX_DATA_ARRAY_SIZE = max_load * MAX_DATA_ARRAY_SIZE_UNIT\n            else:\n                self.MAX_DATA_ARRAY_SIZE = MAX_DATA_ARRAY_SIZE_UNIT\n\n            if self.file_size_bytes > self.MAX_DATA_ARRAY_SIZE:\n                self.large_file = True\n            else:\n                self.large_file = False\n\n            if self.load_data:\n                if self.large_file:\n                    if self.f_start or self.f_stop or self.t_start or self.t_stop:\n                        if self.isheavy():\n                            logger.warning(\"Selection size of %.2f GB, exceeding our size limit %.2f GB. Instance created, header loaded, but data not loaded, please try another (t,v) selection.\" % (self._calc_selection_size() / (1024. ** 3), self.MAX_DATA_ARRAY_SIZE / (1024. ** 3)))\n                            self._init_empty_selection()\n                        else:\n                            self.read_data()\n                    else:\n                        logger.warning(\"The file is of size %.2f GB, exceeding our size limit %.2f GB. Instance created, header loaded, but data not loaded. You could try another (t,v) selection.\"%(self.file_size_bytes/(1024.**3), self.MAX_DATA_ARRAY_SIZE/(1024.**3)))\n                        self._init_empty_selection()\n                else:\n                    self.read_data()\n            else:\n                logger.info(\"Skipping loading data ...\")\n                self._init_empty_selection()\n        else:\n            raise IOError(\"Need a file to open, please give me one!\")", "docstring": "Constructor.\n\nArgs:\nfilename (str): filename of blimpy file.\nf_start (float): start frequency, in MHz\nf_stop (float): stop frequency, in MHz\nt_start (int): start time bin\nt_stop (int): stop time bin", "source": "juraj-google-style"}
{"code": "def mkdir(path):\n    try:\n        os.makedirs(path)\n        if (not os.path.isdir(path)):\n            raise IOError('path is not a directory')\n    except OSError as e:\n        if ((e.errno == 17) and os.path.isdir(path)):\n            return\n        raise", "docstring": "Make a directory and its parents.\n\nArgs:\npath (str): path to create\n\nReturns:\nNone\n\nRaises:\nOSError if the directory cannot be created.", "source": "codesearchnet"}
{"code": "def delete_bq_table(project, dataset_id, table_id):\n    _LOGGER.info('Clean up a BigQuery table with project: %s, dataset: %s, table: %s.', project, dataset_id, table_id)\n    client = bigquery.Client(project=project)\n    table_ref = client.dataset(dataset_id).table(table_id)\n    try:\n        client.delete_table(table_ref)\n    except gexc.NotFound:\n        raise GcpTestIOError('BigQuery table does not exist: %s' % table_ref)", "docstring": "Delete a BiqQuery table.\n\nArgs:\nproject: Name of the project.\ndataset_id: Name of the dataset where table is.\ntable_id: Name of the table.", "source": "github-repos"}
{"code": "def from_config(cls, config):\n    return cls(**config)", "docstring": "Instantiates a `Loss` from its config (output of `get_config()`).\n\nArgs:\nconfig: Output of `get_config()`.\n\nReturns:\nA `Loss` instance.", "source": "github-repos"}
{"code": "def get_percentage_lattice_parameter_changes(self):\n    initial_latt = self.initial.lattice\n    final_latt = self.final.lattice\n    d = {l: ((getattr(final_latt, l) / getattr(initial_latt, l)) - 1) for l in ['a', 'b', 'c']}\n    return d", "docstring": "Returns the percentage lattice parameter changes.\n\nReturns:\nA dict of the percentage change in lattice parameter, e.g.,\n{'a': 0.012, 'b': 0.021, 'c': -0.031} implies a change of 1.2%,\n2.1% and -3.1% in the a, b and c lattice parameters respectively.", "source": "codesearchnet"}
{"code": "def execute_only_once():\n    f = inspect.currentframe().f_back\n    ident = (f.f_code.co_filename, f.f_lineno)\n    if (ident in _EXECUTE_HISTORY):\n        return False\n    _EXECUTE_HISTORY.add(ident)\n    return True", "docstring": "Each called in the code to this function is guaranteed to return True the\nfirst time and False afterwards.\n\nReturns:\nbool: whether this is the first time this function gets called from this line of code.\n\nExample:\n.. code-block:: python\n\nif execute_only_once():\n# do something only once", "source": "codesearchnet"}
{"code": "def iterator_product(variables: VarType, parent: str=None) -> Iterable[VarMatrix]:\n    logger.debug('Yielding from product iterator')\n    if isinstance(variables, list):\n        raise ValueError(f'Product only takes mappings of values, got {variables} of type {type(variables)}')\n    (yield list(variable_matrix(variables, parent, 'product')))", "docstring": "Apply the product operator to a set of variables.\n\nThis uses the python itertools.product iterator to combine multiple variables\nsuch that all possible combinations are generated. This is the default iterator\nhowever this is a method of manually specifying the option.\n\nArgs:\nvariables: The variables object\nparent: Unused", "source": "codesearchnet"}
{"code": "def __init__(self, points, add_bounding_box=False):\n        \n        self.points = list(points)\n        dim = [len(i) for i in self.points]\n        if max(dim) != min(dim):\n            raise ValueError(\"Input points must all have the same dimension!\")\n        self.dim = dim[0]\n        if add_bounding_box:\n            coord_ranges = zip(np.amin(points, 0), np.amax(points, 0))\n            for coord in itertools.product(*coord_ranges):\n                self.points.append(coord)\n        output = qvoronoi(\"o Fv\", self.points)\n        output.pop(0)\n        nvertices, nregions, i = [int(i) for i in output.pop(0).split()]\n        self.vertices = [[float(j) for j in output.pop(0).split()]\n                         for i in range(nvertices)]\n        self.regions = [[int(j) for j in output.pop(0).split()[1:]]\n                        for i in range(nregions)]\n\n        output.pop(0)\n        ridges = {}\n        for line in output:\n            val = [int(i) for i in line.split()]\n            ridges[tuple(val[1:3])] = val[3:]\n        self.ridges = ridges", "docstring": "Initializes a VoronoiTess from points.\n\nArgs:\npoints ([[float]]): All the points as a sequence of sequences.\ne.g., [[-0.5, -0.5], [-0.5, 0.5], [0.5, -0.5], [0.5, 0.5]]\nadd_bounding_box (bool): If True, a hypercube corresponding to\nthe extremes of each coordinate will be added to the list of\npoints.", "source": "juraj-google-style"}
{"code": "def setMAC(self, xEUI):\n        \n        print '%s call setMAC' % self.port\n\n        address64 = ''\n        try:\n            if not xEUI:\n                address64 = self.mac\n\n            if not isinstance(xEUI, str):\n                address64 = self.__convertLongToString(xEUI)\n\n                \n                if len(address64) < 16:\n                    address64 = address64.zfill(16)\n                    print address64\n            else:\n                address64 = xEUI\n\n            cmd = WPANCTL_CMD + 'setprop NCP:MACAddress %s' % address64\n\n            if self.__sendCommand(cmd)[0] != 'Fail':\n                self.mac = address64\n                return True\n            else:\n                return False\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger('setMAC() Error: ' + str(e))", "docstring": "set the extended addresss of Thread device\n\nArgs:\nxEUI: extended address in hex format\n\nReturns:\nTrue: successful to set the extended address\nFalse: fail to set the extended address", "source": "juraj-google-style"}
{"code": "def _build_late_dispatcher(func_name):\n\n    def _late_dynamic_dispatcher(obj, *args):\n        method = getattr(obj, func_name, None)\n        if (not callable(method)):\n            raise NotImplementedError(('Instance method %r is not implemented by %r.' % (func_name, obj)))\n        return method(*args)\n    return _late_dynamic_dispatcher", "docstring": "Return a function that calls method 'func_name' on objects.\n\nThis is useful for building late-bound dynamic dispatch.\n\nArguments:\nfunc_name: The name of the instance method that should be called.\n\nReturns:\nA function that takes an 'obj' parameter, followed by *args and\nreturns the result of calling the instance method with the same\nname as the contents of 'func_name' on the 'obj' object with the\narguments from *args.", "source": "codesearchnet"}
{"code": "def rebuild(self, image_id=None, return_dict=True):\n        \n        if not image_id:\n            image_id = self.image['id']\n\n        return self._perform_action(\n            {\"type\": \"rebuild\", \"image\": image_id},\n            return_dict\n        )", "docstring": "Restore the droplet to an image ( snapshot or backup )\n\nArgs:\nimage_id (int): id of image\n\nOptional Args:\nreturn_dict (bool): Return a dict when True (default),\notherwise return an Action.\n\nReturns dict or Action", "source": "juraj-google-style"}
{"code": "def visit_Import(self, node):\n    for import_alias in node.names:\n        full_import = (import_alias.name, import_alias.asname)\n        detection = self._api_analysis_spec.imports_to_detect.get(full_import, None)\n        if detection:\n            self.add_result(detection)\n            self.add_log(detection.log_level, node.lineno, node.col_offset, detection.log_message)\n    self.generic_visit(node)", "docstring": "Handle visiting an import node in the AST.\n\nArgs:\nnode: Current Node", "source": "github-repos"}
{"code": "def append(self, event, category=None):\n    date = datetime.datetime.now()\n    self.store.insert(0, (date, event, category))\n    if (len(self.store) > self.size):\n        del self.store[(- 1)]", "docstring": "Adds a new event to the trace store.\nThe event may hava a category\n\nArgs:\nevent (spade.message.Message): the event to be stored\ncategory (str, optional): a category to classify the event (Default value = None)", "source": "codesearchnet"}
{"code": "def get_programs_dict(pkgname_only=None, flag_protected=False):\n    \n\n    ___ret = _get_programs_dict()\n    __ret = ___ret if pkgname_only is None else OrderedDict(((pkgname_only, ___ret[pkgname_only]),))\n    if flag_protected:\n        _ret = __ret\n    else:\n        _ret = copy.deepcopy(__ret)\n        for value in _ret.values():\n            value[\"exeinfo\"] = [exeinfo for exeinfo in value[\"exeinfo\"] if not exeinfo.filename.startswith(\"_\")]\n\n    \n    ret = _ret if pkgname_only is None and flag_protected is None else \\\n        OrderedDict(((key, value) for key, value in _ret.items() if len(value[\"exeinfo\"]) > 0))\n\n    return ret", "docstring": "Scans COLLABORATORS_S packages for scripts, eventually filtering if arguments passed\n\nArgs:\npkgname_only: name of single package within COLLABORATORS_S\nflag_protected: include scripts starting with \"_\"?\n\nReturns:\ndictionary: {\"packagename0\": {\"exeinfo\": [ExeInfo00, ...], \"description\": description0}, ...}", "source": "juraj-google-style"}
{"code": "def transform_absolute_coords(self, width, height):\n\t\t\n\n\t\tif self.type != EventType.POINTER_MOTION_ABSOLUTE:\n\t\t\traise AttributeError(_wrong_meth.format(self.type))\n\t\tabs_x = self._libinput \\\n\t\t\t.libinput_event_pointer_get_absolute_x_transformed(\n\t\t\t\tself._handle, width)\n\t\tabs_y = self._libinput \\\n\t\t\t.libinput_event_pointer_get_absolute_y_transformed(\n\t\t\t\tself._handle, height)\n\t\treturn abs_x, abs_y", "docstring": "Return the current absolute coordinates of the pointer event,\ntransformed to screen coordinates.\n\nFor pointer events that are not of type\n:attr:`~libinput.constant.EventType.POINTER_MOTION_ABSOLUTE`,\nthis method raises :exc:`AttributeError`.\n\nArgs:\nwidth (int): The current output screen width.\nheight (int): The current output screen height.\nReturns:\n(float, float): The current absolute (x, y) coordinates transformed\nto a screen coordinates.\nRaises:\nAttributeError", "source": "juraj-google-style"}
{"code": "def get_structure_from_id(self, task_id, final_structure=True):\n        \n        args = {'task_id': task_id}\n        field = 'output.crystal' if final_structure else 'input.crystal'\n        results = tuple(self.query([field], args))\n\n        if len(results) > 1:\n            raise QueryError(\"More than one result found for task_id {}!\".format(task_id))\n        elif len(results) == 0:\n            raise QueryError(\"No structure found for task_id {}!\".format(task_id))\n        c = results[0]\n        return Structure.from_dict(c[field])", "docstring": "Returns a structure from the database given the task id.\n\nArgs:\ntask_id:\nThe task_id to query for.\nfinal_structure:\nWhether to obtain the final or initial structure. Defaults to\nTrue.", "source": "juraj-google-style"}
{"code": "def split(cls, tensor, split_dimension, num_devices, input_shape=None):\n    if input_shape:\n        shape = input_shape\n    else:\n        shape = tensor.shape.as_list()\n    if shape[split_dimension] is not None and shape[split_dimension] < num_devices:\n        raise ValueError('Split dimension was smaller than the required number of splits: shape=%r, dimension=%r, num_devices=%r' % (shape, split_dimension, num_devices))\n    tile_assignment_dims = [1] * len(shape)\n    tile_assignment_dims[split_dimension] = num_devices\n    return Sharding(proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.OTHER, tile_assignment_dimensions=tile_assignment_dims, tile_assignment_devices=range(num_devices)))", "docstring": "Returns a Sharding that splits a tensor across a dimension.\n\nThis creates a Tiled attribute, similar to tile(), but easier to use for the\ncommon case of tiling a tensor N ways in one dimension.\n\nArgs:\ntensor: A tf.Tensor to split.\nsplit_dimension: The dimension number to split.\nnum_devices: The number of cores to split `tensor` over.\ninput_shape: The shape of the original tensor.\n\nRaises:\nValueError: The tensor to split was smaller in the split dimension than\nthe number of devices to split over.", "source": "github-repos"}
{"code": "def locate(desktop_filename_or_name):\n\t\n\n\tpaths = [\n\t\tos.path.expanduser('~/.local/share/applications'),\n\t\t'/usr/share/applications']\n\n\tresult = []\n\n\tfor path in paths:\n\t\tfor file in os.listdir(path):\n\t\t\tif desktop_filename_or_name in file.split(\n\t\t\t\t\t'.') or desktop_filename_or_name == file:\n\t\t\t\t\n\t\t\t\tresult.append(os.path.join(path, file))\n\n\t\t\telse:\n\t\t\t\tfile_parsed = parse(os.path.join(path, file))\n\n\t\t\t\ttry:\n\t\t\t\t\tif desktop_filename_or_name.lower() == file_parsed[\n\t\t\t\t\t\t\t'Name'].lower():\n\t\t\t\t\t\tresult.append(file)\n\t\t\t\t\telif desktop_filename_or_name.lower() == file_parsed[\n\t\t\t\t\t\t\t'Exec'].split(' ')[0]:\n\t\t\t\t\t\tresult.append(file)\n\t\t\t\texcept KeyError:\n\t\t\t\t\tpass\n\n\tfor res in result:\n\t\tif not res.endswith('.desktop'):\n\t\t\tresult.remove(res)\n\n\tif not result and not result.endswith('.desktop'):\n\t\tresult.extend(locate(desktop_filename_or_name + '.desktop'))\n\n\treturn result", "docstring": "Locate a .desktop from the standard locations.\nFind the path to the .desktop file of a given .desktop filename or application name.\nStandard locations:\n- ``~/.local/share/applications/``\n- ``/usr/share/applications``\nArgs:\ndesktop_filename_or_name (str): Either the filename of a .desktop file or the name of an application.\nReturns:\nlist: A list of all matching .desktop files found.", "source": "juraj-google-style"}
{"code": "def record_operation_forwardprop_only(op_type, output_tensors, input_tensors, backward_function, forwardprop_output_indices):\n    pywrap_tfe.TFE_Py_TapeSetRecordOperationForwardprop(op_type, output_tensors, input_tensors, backward_function, forwardprop_output_indices)", "docstring": "Records the operation on all forward accumulators in the stack.\n\nArgs:\nop_type: a string for the operation type, used in the backprop code\noutput_tensors: a list of Python Tensor objects output by the operation\ninput_tensors: a list of input Tensors to the recorded operation\nbackward_function: the function to be called to, given the gradients of the\noutput tensors, produce the gradients of the input tensors. This function\nis automatically transposed to produce output gradients given input\ngradients.\nforwardprop_output_indices: indicates any output_tensors which contain JVPs.\nTypically these will have come from TFE_Py_PackForwardGradients. May be\nNone or an empty sequence if there are no JVP outputs from the operation.", "source": "github-repos"}
{"code": "def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):\n        \n        init_params = super(Framework, cls)._prepare_init_params_from_job_description(job_details, model_channel_name)\n\n        init_params['entry_point'] = json.loads(init_params['hyperparameters'].get(SCRIPT_PARAM_NAME))\n        init_params['source_dir'] = json.loads(init_params['hyperparameters'].get(DIR_PARAM_NAME))\n        init_params['enable_cloudwatch_metrics'] = json.loads(\n            init_params['hyperparameters'].get(CLOUDWATCH_METRICS_PARAM_NAME))\n        init_params['container_log_level'] = json.loads(\n            init_params['hyperparameters'].get(CONTAINER_LOG_LEVEL_PARAM_NAME))\n\n        hyperparameters = {}\n        for k, v in init_params['hyperparameters'].items():\n            \n            if k == '_tuning_objective_metric':\n                if v.startswith('\"') and v.endswith('\"'):\n                    v = v.strip('\"')\n                hyperparameters[k] = v\n            else:\n                hyperparameters[k] = json.loads(v)\n\n        init_params['hyperparameters'] = hyperparameters\n\n        return init_params", "docstring": "Convert the job description to init params that can be handled by the class constructor\n\nArgs:\njob_details: the returned job details from a describe_training_job API call.\nmodel_channel_name (str): Name of the channel where pre-trained model data will be downloaded\n\nReturns:\ndictionary: The transformed init_params", "source": "juraj-google-style"}
{"code": "def implicit_static(cls, for_type=None, for_types=None):\n    for type_ in cls.__get_type_args(for_type, for_types):\n        implementations = {}\n        for function in cls.required():\n            method = getattr(type_, function.__name__, None)\n            if (not callable(method)):\n                raise TypeError(('%s.implicit invokation on type %r is missing instance method %r.' % (cls.__name__, type_, function.__name__)))\n            implementations[function] = method\n        for function in cls.optional():\n            method = getattr(type_, function.__name__, None)\n            if callable(method):\n                implementations[function] = method\n        return cls.implement(for_type=type_, implementations=implementations)", "docstring": "Automatically generate implementations for a type.\n\nImplement the protocol for the 'for_type' type by dispatching each\nmember function of the protocol to an instance method of the same name\ndeclared on the type 'for_type'.\n\nArguments:\nfor_type: The type to implictly implement the protocol with.\n\nRaises:\nTypeError if not all implementations are provided by 'for_type'.", "source": "codesearchnet"}
{"code": "def sh(cmd, ignore_error=False, cwd=None, shell=False, **kwargs):\n    kwargs.update({'shell': shell, 'cwd': cwd, 'stderr': subprocess.STDOUT, 'stdout': subprocess.PIPE})\n    log.debug((('cmd', cmd), ('kwargs', kwargs)))\n    p = subprocess.Popen(cmd, universal_newlines=True, **kwargs)\n    p_stdout = p.communicate()[0]\n    if (p.returncode and (not ignore_error)):\n        raise subprocess.CalledProcessError(p.returncode, cmd, p_stdout)\n    return p_stdout", "docstring": "Execute a command with subprocess.Popen and block until output\n\nArgs:\ncmd (tuple or str): same as subprocess.Popen args\n\nKeyword Arguments:\nignore_error (bool): if False, raise an Exception if p.returncode is\nnot 0\ncwd (str): current working directory path to run cmd with\nshell (bool): subprocess.Popen ``shell`` kwarg\n\nReturns:\nstr: stdout output of wrapped call to ``sh`` (``subprocess.Popen``)\n\nRaises:\nException: if ignore_error is true and returncode is not zero\n\n.. note:: this executes commands with ``shell=True``: careful with\nshell-escaping.", "source": "codesearchnet"}
{"code": "def subscribe_sns_topic_to_sqs(self, region):\n        \n        sns = self.session.resource('sns', region_name=region)\n        topic = sns.Topic('arn:aws:sns:{}:{}:{}'.format(region, self.account.account_number, self.topic_name))\n\n        topic.subscribe(Protocol='sqs', Endpoint=self.sqs_queue)\n\n        auditlog(\n            event='cloudtrail.subscribe_sns_topic_to_sqs',\n            actor=self.ns,\n            data={\n                'account': self.account.account_name,\n                'region': region\n            }\n        )\n\n        return topic.attributes['TopicArn']", "docstring": "Subscribe SQS to the SNS topic. Returns the ARN of the SNS Topic subscribed\n\nArgs:\nregion (`str`): Name of the AWS region\n\nReturns:\n`str`", "source": "juraj-google-style"}
{"code": "def _eventual_warn_about_too_long_sequence(self, ids: List[int], max_length: Optional[int], verbose: bool):\n    if max_length is None and len(ids) > self.model_max_length and verbose and (self.model_max_length != 0):\n        if not self.deprecation_warnings.get('sequence-length-is-longer-than-the-specified-maximum', False):\n            logger.warning(f'Token indices sequence length is longer than the specified maximum sequence length for this model ({len(ids)} > {self.model_max_length}). Running this sequence through the model will result in indexing errors')\n        self.deprecation_warnings['sequence-length-is-longer-than-the-specified-maximum'] = True", "docstring": "Depending on the input and internal state we might trigger a warning about a sequence that is too long for its\ncorresponding model\n\nArgs:\nids (`List[str]`): The ids produced by the tokenization\nmax_length (`int`, *optional*): The max_length desired (does not trigger a warning if it is set)\nverbose (`bool`): Whether or not to print more information and warnings.", "source": "github-repos"}
{"code": "def __call__(self, fn):\n        \n\n        def output(app, *args, **kwargs):\n            \n            data = fn(app, *args, **kwargs)\n            index = '{}-{}'.format(self.key, self.variable_type)\n            if self.value is not None:\n                \n                app.tcex.playbook.add_output(self.key, self.value, self.variable_type)\n            elif app.tcex.playbook.output_data.get(index) and not self.overwrite:\n                \n                pass\n            else:\n                \n                app.tcex.playbook.add_output(self.key, data, self.variable_type)\n            return data\n\n        return output", "docstring": "Implement __call__ function for decorator.\n\nArgs:\nfn (function): The decorated function.\n\nReturns:\nfunction: The custom decorator function.", "source": "juraj-google-style"}
{"code": "async def verify_docker_image_task(chain, link):\n    \n    errors = []\n    \n    worker_type = get_worker_type(link.task)\n    if worker_type not in chain.context.config['valid_docker_image_worker_types']:\n        errors.append(\"{} is not a valid docker-image workerType!\".format(worker_type))\n    raise_on_errors(errors)", "docstring": "Verify the docker image Link.\n\nArgs:\nchain (ChainOfTrust): the chain we're operating on.\nlink (LinkOfTrust): the task link we're checking.", "source": "juraj-google-style"}
{"code": "def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops, gradient_uid='__unsupported__'):\n    if len(grad_ys) != len(ys):\n        raise ValueError(f'Length mismatch. Passed {len(grad_ys)} grad_ys for {len(ys)} ys')\n    grad_ys = indexed_slices.convert_n_to_tensor_or_indexed_slices(grad_ys, name='grad_y')\n    new_grad_ys = []\n    for i, (y, grad_y) in enumerate(zip(ys, grad_ys)):\n        with _maybe_colocate_with(y.op, gradient_uid, colocate_gradients_with_ops):\n            if grad_y is None:\n                if y.dtype.is_complex:\n                    raise TypeError(f'Gradients of complex tensors ({y}) must set grad_ys (y.dtype = {dtypes.as_dtype(y.dtype).name})')\n                new_grad_ys.append(array_ops.ones(array_ops.shape(y), dtype=y.dtype, name='grad_ys_%d' % i))\n                continue\n            if y.dtype.is_floating or y.dtype.is_integer:\n                if not grad_y.dtype.is_floating and (not grad_y.dtype.is_integer):\n                    raise TypeError(f'Gradient type {dtypes.as_dtype(grad_y.dtype).name} generated for real or integer-valued tensor {y} with type {dtypes.as_dtype(y.dtype).name} must be real or integer')\n            elif y.dtype.is_complex:\n                if not grad_y.dtype.is_complex:\n                    raise TypeError(f'Gradient type {dtypes.as_dtype(grad_y.dtype).name} generated for complex-valued tensor {y} with type {dtypes.as_dtype(y.dtype).name} must be real')\n            elif y.dtype == dtypes.variant:\n                if grad_y.dtype != dtypes.variant:\n                    raise TypeError(f'Gradient type {dtypes.as_dtype(grad_y.dtype).name} generated for variant tensor {y} with type {dtypes.as_dtype(y.dtype).name} must be variant')\n            elif y.dtype == dtypes.resource:\n                if grad_y.dtype == dtypes.resource:\n                    raise TypeError(f'Input gradient {grad_y} for resource tensor {y} should not be a resource')\n            else:\n                raise TypeError(f'Tensor {y} with type {dtypes.as_dtype(y.dtype).name} must be numeric to obtain a default gradient')\n            if isinstance(grad_y, indexed_slices.IndexedSlices):\n                new_grad_ys.append(indexed_slices.IndexedSlices(indices=array_ops.identity(grad_y.indices, name='grad_ys_%d_indices' % i) if isinstance(grad_y.indices, tensor_lib.Tensor) else grad_y.indices, values=array_ops.identity(grad_y.values, name='grad_ys_%d_values' % i) if isinstance(grad_y.values, tensor_lib.Tensor) else grad_y.values, dense_shape=array_ops.identity(grad_y.dense_shape, name='grad_ys_%d_shape' % i) if isinstance(grad_y.dense_shape, tensor_lib.Tensor) else grad_y.dense_shape))\n            else:\n                new_grad_ys.append(array_ops.identity(grad_y, name='grad_ys_%d' % i))\n    return new_grad_ys", "docstring": "Fill in default values for grad_ys.\n\nArgs:\ngrad_ys: List of gradients, can contain None.\nys: List of tensors.\ncolocate_gradients_with_ops: If True, try colocating gradients with\nthe corresponding op.\ngradient_uid: A unique identifier within the graph indicating\nwhich invocation of gradients is being executed. Used to cluster\nops for compilation.\n\nReturns:\nA list of gradients to use, without None.\n\nRaises:\nValueError: If sizes of gradients and inputs don't match\nTypeError: If type of any gradient is not valid for its input.", "source": "github-repos"}
{"code": "def file_crc32(filename, block_size=_DEFAULT_BLOCK_SIZE):\n    crc = 0\n    with FileIO(filename, mode='rb') as f:\n        chunk = f.read(n=block_size)\n        while chunk:\n            crc = binascii.crc32(chunk, crc)\n            chunk = f.read(n=block_size)\n    return hex(crc & 4294967295)", "docstring": "Get the crc32 of the passed file.\n\nThe crc32 of a file can be used for error checking; two files with the same\ncrc32 are considered equivalent. Note that the entire file must be read\nto produce the crc32.\n\nArgs:\nfilename: string, path to a file\nblock_size: Integer, process the files by reading blocks of `block_size`\nbytes. Use -1 to read the file as once.\n\nReturns:\nhexadecimal as string, the crc32 of the passed file.", "source": "github-repos"}
{"code": "def convertTime(self, time):\n        \n        \n        m_format = \"\"\n        if time.minute:\n            m_format = \":%M\"\n\n        timeString = time.strftime(\"%I\" + m_format + \" %p\")\n\n        \n        if not int(timeString[0]):\n            timeString = timeString[1:]\n\n        return timeString", "docstring": "Convert a datetime object representing a time into a human-ready\nstring that can be read, spoken aloud, etc.\n\nArgs:\ntime (datetime.date): A datetime object to be converted into text.\n\nReturns:\nA string representation of the input time, ignoring any day-related\ninformation.", "source": "juraj-google-style"}
{"code": "def create_mutation_file(self, list_of_tuples):\n    self.mutation_infile = op.join(self.foldx_dir, 'individual_list.txt')\n    idx = 1\n    with open(self.mutation_infile, 'w') as f:\n        for mutant_group in list_of_tuples:\n            mutstring = ''.join(list(map((lambda x: '{}{}{}{};'.format(x[0], x[1], x[2], x[3])), mutant_group)))\n            f.write((mutstring + '\\n'))\n            self.mutation_index_to_group[idx] = mutant_group\n            idx += 1", "docstring": "Create the FoldX file 'individual_list.txt' to run BuildModel upon.\n\nArgs:\nlist_of_tuples (list): A list of tuples indicating mutation groups to carry out BuildModel upon. Example::\n\n[\n(('N', 'A', 308, 'S'), ('S', 'A', 320, 'T'), ('S', 'A', 321, 'H')),  # Mutation group 1\n(('S', 'A', 321, 'R'), ('T', 'A', 345, 'S'))  # Mutation group 2\n]", "source": "codesearchnet"}
{"code": "def get_backend(backend_class=None):\n    cache_name = '_backend_instance'\n    if (not hasattr(get_backend, cache_name)):\n        backend_class = (backend_class or settings.ROUGHPAGES_BACKEND)\n        if isinstance(backend_class, basestring):\n            (module_path, class_name) = backend_class.rsplit('.', 1)\n            module = import_module(module_path)\n            backend_class = getattr(module, class_name)\n        setattr(get_backend, cache_name, backend_class())\n    return getattr(get_backend, cache_name)", "docstring": "Get backend instance\n\nIf no `backend_class` is specified, the backend class is determined from\nthe value of `settings.ROUGHPAGES_BACKEND`.\n`backend_class` can be a class object or dots separated python import path\n\nReturns:\nbackend instance", "source": "codesearchnet"}
{"code": "def api_server(api_services, **kwargs):\n    if ('protocols' in kwargs):\n        raise TypeError(\"__init__() got an unexpected keyword argument 'protocols'\")\n    from . import _logger as endpoints_logger\n    from . import __version__ as endpoints_version\n    endpoints_logger.info('Initializing Endpoints Framework version %s', endpoints_version)\n    apis_app = _ApiServer(api_services, **kwargs)\n    dispatcher = endpoints_dispatcher.EndpointsDispatcherMiddleware(apis_app)\n    service_name = os.environ.get('ENDPOINTS_SERVICE_NAME')\n    if (not service_name):\n        _logger.warn('Did not specify the ENDPOINTS_SERVICE_NAME environment variable so service control is disabled.  Please specify the name of service in ENDPOINTS_SERVICE_NAME to enable it.')\n        return dispatcher\n    if control_wsgi.running_on_devserver():\n        _logger.warn('Running on local devserver, so service control is disabled.')\n        return dispatcher\n    from endpoints_management import _logger as management_logger\n    from endpoints_management import __version__ as management_version\n    management_logger.info('Initializing Endpoints Management Framework version %s', management_version)\n    controller = control_client.Loaders.DEFAULT.load(service_name)\n    control_client.use_gae_thread()\n    controller.start()\n    return control_wsgi.add_all(dispatcher, app_identity.get_application_id(), controller)", "docstring": "Create an api_server.\n\nThe primary function of this method is to set up the WSGIApplication\ninstance for the service handlers described by the services passed in.\nAdditionally, it registers each API in ApiConfigRegistry for later use\nin the BackendService.getApiConfigs() (API config enumeration service).\nIt also configures service control.\n\nArgs:\napi_services: List of protorpc.remote.Service classes implementing the API\nor a list of _ApiDecorator instances that decorate the service classes\nfor an API.\n**kwargs: Passed through to protorpc.wsgi.service.service_handlers except:\nprotocols - ProtoRPC protocols are not supported, and are disallowed.\n\nReturns:\nA new WSGIApplication that serves the API backend and config registry.\n\nRaises:\nTypeError: if protocols are configured (this feature is not supported).", "source": "codesearchnet"}
{"code": "def restore_saveables(self, tensor_saveables, python_positions, registered_savers=None, reader=None):\n    if reader is None:\n        reader = py_checkpoint_reader.NewCheckpointReader(self.save_path_string)\n    restore_ops = []\n    for position in python_positions:\n        key = position.object_proto.attributes[0].checkpoint_key\n        position.trackable.deserialize(reader.get_tensor(key))\n    if tensor_saveables or registered_savers:\n        flat_saveables = saveable_object_util.validate_and_slice_inputs(tensor_saveables)\n        new_restore_ops = functional_saver.MultiDeviceSaver.from_saveables(flat_saveables, registered_savers).restore(self.save_path_tensor, self.options)\n        if not context.executing_eagerly():\n            for name, restore_op in sorted(new_restore_ops.items()):\n                restore_ops.append(restore_op)\n                assert name not in self.restore_ops_by_name\n                self.restore_ops_by_name[name] = restore_op\n    return restore_ops", "docstring": "Run or build restore operations for SaveableObjects.\n\nArgs:\ntensor_saveables: `SaveableObject`s which correspond to Tensors.\npython_positions: List of CheckpointPositions bound to `PythonState`\nobjects which must be restored eagerly.\nregistered_savers: a dict mapping saver names-> object name -> Trackable.\nreader: A `CheckpointReader`. If None, a new instance will be created.\n\nReturns:\nWhen graph building, a list of restore operations, either cached or newly\ncreated, to restore `tensor_saveables`.", "source": "github-repos"}
{"code": "def merge_with(self, other):\n        \n        other = as_shape(other)\n        if self._dims is None:\n            return other\n        else:\n            try:\n                self.assert_same_rank(other)\n                new_dims = []\n                for i, dim in enumerate(self._dims):\n                    new_dims.append(dim.merge_with(other[i]))\n                return TensorShape(new_dims)\n            except ValueError:\n                raise ValueError(\"Shapes %s and %s are not convertible\" % (self, other))", "docstring": "Returns a `TensorShape` combining the information in `self` and `other`.\n\nThe dimensions in `self` and `other` are merged elementwise,\naccording to the rules defined for `Dimension.merge_with()`.\n\nArgs:\nother: Another `TensorShape`.\n\nReturns:\nA `TensorShape` containing the combined information of `self` and\n`other`.\n\nRaises:\nValueError: If `self` and `other` are not convertible.", "source": "juraj-google-style"}
{"code": "def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):\n    if data_format == 'channels_first':\n        output = repeat_elements(x, depth_factor, axis=2)\n        output = repeat_elements(output, height_factor, axis=3)\n        output = repeat_elements(output, width_factor, axis=4)\n        return output\n    elif data_format == 'channels_last':\n        output = repeat_elements(x, depth_factor, axis=1)\n        output = repeat_elements(output, height_factor, axis=2)\n        output = repeat_elements(output, width_factor, axis=3)\n        return output\n    else:\n        raise ValueError('Invalid data_format: ' + str(data_format))", "docstring": "Resizes the volume contained in a 5D tensor.\n\nArgs:\nx: Tensor or variable to resize.\ndepth_factor: Positive integer.\nheight_factor: Positive integer.\nwidth_factor: Positive integer.\ndata_format: One of `\"channels_first\"`, `\"channels_last\"`.\n\nReturns:\nA tensor.\n\nRaises:\nValueError: if `data_format` is neither\n`channels_last` or `channels_first`.", "source": "github-repos"}
{"code": "def IsRValueAllowed(clean_lines, linenum, typenames):\n  \n  \n  for i in xrange(linenum, 0, -1):\n    line = clean_lines.elided[i]\n    if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):\n      if not line.endswith('PUSH'):\n        return False\n      for j in xrange(linenum, clean_lines.NumLines(), 1):\n        line = clean_lines.elided[j]\n        if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):\n          return line.endswith('POP')\n\n  \n  line = clean_lines.elided[linenum]\n  if Search(r'\\boperator\\s*=\\s*\\(', line):\n    return IsDeletedOrDefault(clean_lines, linenum)\n\n  \n  match = Match(r'\\s*(?:[\\w<>]+::)*([\\w<>]+)\\s*::\\s*([\\w<>]+)\\s*\\(', line)\n  if match and match.group(1) == match.group(2):\n    return IsDeletedOrDefault(clean_lines, linenum)\n  if Search(r'\\b(?:explicit|inline)\\s+[\\w<>]+\\s*\\(', line):\n    return IsDeletedOrDefault(clean_lines, linenum)\n\n  if Match(r'\\s*[\\w<>]+\\s*\\(', line):\n    previous_line = 'ReturnType'\n    if linenum > 0:\n      previous_line = clean_lines.elided[linenum - 1]\n    if Match(r'^\\s*$', previous_line) or Search(r'[{}:;]\\s*$', previous_line):\n      return IsDeletedOrDefault(clean_lines, linenum)\n\n  \n  while line:\n    match = Match(r'^.*?(\\w+)\\s*&&(.*)$', line)\n    if not match:\n      break\n    if match.group(1) not in typenames:\n      return False\n    line = match.group(2)\n\n  \n  \n  \n  \n  \n  \n  return line.find('&&') < 0", "docstring": "Check if RValue reference is allowed on a particular line.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\ntypenames: set of type names from template-argument-list.\nReturns:\nTrue if line is within the region where RValue references are allowed.", "source": "juraj-google-style"}
{"code": "def __init__(self, name, type, description=''):\n        \n        self._name = name\n        self._type = type\n        self._description = description", "docstring": "Filter constructor.\n\nArgs:\nname (str): Filter name.\ntype (str): Type of the filter (boolean, int, etc.).\ndescription (str): Filter description.", "source": "juraj-google-style"}
{"code": "def __init__(self, lat=None, lng=None, name=None, stop_id=None,\n               field_dict=None, stop_code=None):\n    \n    self._schedule = None\n    if field_dict:\n      if isinstance(field_dict, self.__class__):\n        \n        \n        for k, v in field_dict.iteritems():\n          self.__dict__[k] = v\n      else:\n        self.__dict__.update(field_dict)\n    else:\n      if lat is not None:\n        self.stop_lat = lat\n      if lng is not None:\n        self.stop_lon = lng\n      if name is not None:\n        self.stop_name = name\n      if stop_id is not None:\n        self.stop_id = stop_id\n      if stop_code is not None:\n        self.stop_code = stop_code", "docstring": "Initialize a new Stop object.\n\nArgs:\nfield_dict: A dictionary mapping attribute name to unicode string\nlat: a float, ignored when field_dict is present\nlng: a float, ignored when field_dict is present\nname: a string, ignored when field_dict is present\nstop_id: a string, ignored when field_dict is present\nstop_code: a string, ignored when field_dict is present", "source": "juraj-google-style"}
{"code": "def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):\n    raise NotImplementedError('subclasses must implement')", "docstring": "Resolves the type of a (possibly annotated) function argument.\n\nArgs:\nns: namespace\ntypes_ns: types namespace\nf_name: str, the function name\nname: str, the argument name\ntype_anno: the type annotating the argument, if any\nf_is_local: bool, whether the function is a local function\nReturns:\nSet of the argument types.", "source": "github-repos"}
{"code": "def visualize(logdir, outdir, num_agents, num_episodes, checkpoint=None, env_processes=True):\n    config = utility.load_config(logdir)\n    with tf.device('/cpu:0'):\n        batch_env = utility.define_batch_env((lambda : _create_environment(config, outdir)), num_agents, env_processes)\n        graph = utility.define_simulation_graph(batch_env, config.algorithm, config)\n        total_steps = (num_episodes * config.max_length)\n        loop = _define_loop(graph, total_steps)\n    saver = utility.define_saver(exclude=('.*_temporary.*', 'global_step'))\n    sess_config = tf.ConfigProto(allow_soft_placement=True)\n    sess_config.gpu_options.allow_growth = True\n    with tf.Session(config=sess_config) as sess:\n        utility.initialize_variables(sess, saver, config.logdir, checkpoint, resume=True)\n        for unused_score in loop.run(sess, saver, total_steps):\n            pass\n    batch_env.close()", "docstring": "Recover checkpoint and render videos from it.\n\nArgs:\nlogdir: Logging directory of the trained algorithm.\noutdir: Directory to store rendered videos in.\nnum_agents: Number of environments to simulate in parallel.\nnum_episodes: Total number of episodes to simulate.\ncheckpoint: Checkpoint name to load; defaults to most recent.\nenv_processes: Whether to step environments in separate processes.", "source": "codesearchnet"}
{"code": "def incr(self, counter_name, delta=1):\n    \n    self._state.counters_map.increment(counter_name, delta)", "docstring": "Changes counter by delta.\n\nArgs:\ncounter_name: the name of the counter to change. str.\ndelta: int.", "source": "juraj-google-style"}
{"code": "def _comparison_functions(cls, partial=False):\n        \n\n        def prerelease_cmp(a, b):\n            \n            if a and b:\n                return identifier_list_cmp(a, b)\n            elif a:\n                \n                return -1\n            elif b:\n                return 1\n            else:\n                return 0\n\n        def build_cmp(a, b):\n            \n            if a == b:\n                return 0\n            else:\n                return NotImplemented\n\n        def make_optional(orig_cmp_fun):\n            \n            @functools.wraps(orig_cmp_fun)\n            def alt_cmp_fun(a, b):\n                if a is None or b is None:\n                    return 0\n                return orig_cmp_fun(a, b)\n\n            return alt_cmp_fun\n\n        if partial:\n            return [\n                base_cmp,  \n                make_optional(base_cmp),\n                make_optional(base_cmp),\n                make_optional(prerelease_cmp),\n                make_optional(build_cmp),\n            ]\n        else:\n            return [\n                base_cmp,\n                base_cmp,\n                base_cmp,\n                prerelease_cmp,\n                build_cmp,\n            ]", "docstring": "Retrieve comparison methods to apply on version components.\n\nThis is a private API.\n\nArgs:\npartial (bool): whether to provide 'partial' or 'strict' matching.\n\nReturns:\n5-tuple of cmp-like functions.", "source": "juraj-google-style"}
{"code": "def _process(self, input):\n        \n\n        input = re.sub(\"<[^>]*>\", \" \", input) \n        punct = list(string.punctuation)\n        for symbol in punct:\n            input = input.replace(symbol, \" %s \" % symbol)\n        input = filter(lambda x: x != u'', input.lower().split(' '))\n        return input", "docstring": "Takes in html-mixed body text as a string and returns a list of strings,\nlower case and with punctuation given spacing.\n\nCalled by self._gen_sentence()\n\nArgs:\ninpnut (string): body text", "source": "juraj-google-style"}
{"code": "async def get_event(self, stream: str, event_number: int, resolve_links=True, require_master=False, correlation_id: uuid.UUID=None) -> msg.Event:\n    correlation_id = (correlation_id or uuid.uuid4())\n    cmd = convo.ReadEvent(stream, event_number, resolve_links, require_master, conversation_id=correlation_id)\n    result = (await self.dispatcher.start_conversation(cmd))\n    return (await result)", "docstring": "Get a single event by stream and event number.\n\nArgs:\nstream: The name of the stream containing the event.\nevent_number: The sequence number of the event to read.\nresolve_links (optional): True if eventstore should\nautomatically resolve Link Events, otherwise False.\nrequired_master (optional): True if this command must be\nsent direct to the master node, otherwise False.\ncorrelation_id (optional): A unique identifer for this\ncommand.\n\nReturns:\nThe resolved event if found, else None.\n\nExamples:\n\n>>> async with connection() as conn:\n>>>     await conn.publish(\"inventory_item-1\", \"item_created\")\n>>>     event = await conn.get_event(\"inventory_item-1\", 1)\n>>>     print(event)", "source": "codesearchnet"}
{"code": "def visit_comparison(self, comparison: _evaluation.ComparisonNode) -> _sql_data_types.Select:\n    lhs_result = self.visit(comparison.left)\n    rhs_result = self.visit(comparison.right)\n    lhs_subquery = lhs_result.as_operand()\n    rhs_subquery = rhs_result.as_operand()\n    sql_value = f'{lhs_subquery} {comparison.op} {rhs_subquery}'\n    return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_value, _sql_data_type=_sql_data_types.Boolean, _sql_alias='comparison_'), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK)", "docstring": "Translates a FHIRPath comparison to Spark SQL.\n\nEach operand is expected to be a collection of a single element. Operands\ncan be strings, integers, decimals, dates, datetimes, and times. Comparison\nwill perform implicit conversion between applicable types.\n\nArgs:\ncomparison: The `Comparison` Expression node.\n\nReturns:\nA compiled Spark SQL expression.", "source": "github-repos"}
{"code": "def _psum(tensor, axis_name=None):\n    if axis_name != _pmap_config.axis_name():\n        raise ValueError('axis_name (%s) is not equal to that of the surrounding pmap (%s)' % (axis_name, _pmap_config.axis_name()))\n    devices = _pmap_config.devices()\n    if devices is None:\n        raise ValueError(\"Can't retrieve the device list from the surrounding pmap\")\n    tensor = tf_np.asarray(tensor)\n    if tpu_devices(devices):\n        is_int64 = False\n        is_float64 = False\n        if tensor.dtype == np.int64:\n            is_int64 = True\n            tensor = tensor.astype(np.int32)\n        elif tensor.dtype == np.float64:\n            is_float64 = True\n            tensor = tensor.astype(np.float32)\n        tensor = tpu_ops.cross_replica_sum(tensor)\n        if is_int64:\n            tensor = math_ops.cast(tensor, dtypes.int64)\n        elif is_float64:\n            tensor = math_ops.cast(tensor, dtypes.float64)\n    else:\n        tensor = gen_collective_ops.collective_reduce(input=tensor, group_size=len(devices), group_key=_GROUP_KEY, instance_key=_get_instance_key(), merge_op='Add', final_op='Id', subdiv_offsets=(0,))\n    return tf_np.asarray(tensor)", "docstring": "Sum all-reduction.\n\nArgs:\ntensor: A tensor.\naxis_name: The axis name to reduce. Must equal to that of the surrounding\npmap.\n\nReturns:\nThe sum of the `tensor` replicas on each participating devices.", "source": "github-repos"}
{"code": "def add_arguments(self, parser):\n        \n        group = parser.add_mutually_exclusive_group(required=True)\n        group.add_argument('-d', '--downgrade', action='store_true',\n                           help='downgrade the J-Link firmware')\n        group.add_argument('-u', '--upgrade', action='store_true',\n                           help='upgrade the J-Link firmware')\n        return self.add_common_arguments(parser, False)", "docstring": "Adds the arguments for the firmware command.\n\nArgs:\nself (FirmwareCommand): the ``FirmwareCommand`` instance\nparser (argparse.ArgumentParser): parser to add the commands to\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def find_log_dir(log_dir=None):\n  \n  \n  if log_dir:\n    \n    dirs = [log_dir]\n  elif FLAGS['log_dir'].value:\n    \n    \n    dirs = [FLAGS['log_dir'].value]\n  else:\n    dirs = ['/tmp/', './']\n\n  \n  for d in dirs:\n    if os.path.isdir(d) and os.access(d, os.W_OK):\n      return d\n  _absl_logger.fatal(\"Can't find a writable directory for logs, tried %s\", dirs)", "docstring": "Returns the most suitable directory to put log files into.\n\nArgs:\nlog_dir: str|None, if specified, the logfile(s) will be created in that\ndirectory.  Otherwise if the --log_dir command-line flag is provided,\nthe logfile will be created in that directory.  Otherwise the logfile\nwill be created in a standard location.", "source": "juraj-google-style"}
{"code": "def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n        \n        axis = self._get_axis_number(axis) if axis is not None else 0\n        data = self._validate_dtypes_min_max(axis, numeric_only)\n        return data._reduce_dimension(\n            data._query_compiler.min(\n                axis=axis,\n                skipna=skipna,\n                level=level,\n                numeric_only=numeric_only,\n                **kwargs\n            )\n        )", "docstring": "Perform min across the DataFrame.\n\nArgs:\naxis (int): The axis to take the min on.\nskipna (bool): True to skip NA values, false otherwise.\n\nReturns:\nThe min of the DataFrame.", "source": "juraj-google-style"}
{"code": "def atomic_write_string_to_file(filename, contents, overwrite):\n    temp_pathname = ((tf.compat.as_bytes(filename) + tf.compat.as_bytes('.tmp')) + tf.compat.as_bytes(uuid.uuid4().hex))\n    with tf_v1.gfile.GFile(temp_pathname, mode='w') as f:\n        f.write(contents)\n    try:\n        tf_v1.gfile.Rename(temp_pathname, filename, overwrite)\n    except tf.errors.OpError:\n        tf_v1.gfile.Remove(temp_pathname)\n        raise", "docstring": "Writes to `filename` atomically.\n\nThis means that when `filename` appears in the filesystem, it will contain\nall of `contents`. With write_string_to_file, it is possible for the file\nto appear in the filesystem with `contents` only partially written.\n\nAccomplished by writing to a temp file and then renaming it.\n\nArgs:\nfilename: string, pathname for a file\ncontents: string, contents that need to be written to the file\noverwrite: boolean, if false it's an error for `filename` to be occupied by\nan existing file.", "source": "codesearchnet"}
{"code": "def push_file(self, source, dest_dir):\n    local_dest = ((dest_dir + '/') + os.path.basename(source))\n    if (os.path.dirname(source) != dest_dir):\n        try:\n            shutil.copyfile(source, local_dest)\n            os.chmod(local_dest, 511)\n        except OSError as e:\n            raise FileCopyException(e, self.hostname)\n    return local_dest", "docstring": "If the source files dirpath is the same as dest_dir, a copy\nis not necessary, and nothing is done. Else a copy is made.\n\nArgs:\n- source (string) : Path to the source file\n- dest_dir (string) : Path to the directory to which the files is to be copied\n\nReturns:\n- destination_path (String) : Absolute path of the destination file\n\nRaises:\n- FileCopyException : If file copy failed.", "source": "codesearchnet"}
{"code": "def add_subassistants_to(cls, parser, assistant_tuple, level, alias=None):\n    name = (alias or assistant_tuple[0].name)\n    p = parser.add_parser(name, description=assistant_tuple[0].description, argument_default=argparse.SUPPRESS)\n    for arg in assistant_tuple[0].args:\n        arg.add_argument_to(p)\n    if (len(assistant_tuple[1]) > 0):\n        subparsers = cls._add_subparsers_required(p, dest=settings.SUBASSISTANT_N_STRING.format(level), title=cls.subparsers_str, description=cls.subparsers_desc)\n        for subas_tuple in sorted(assistant_tuple[1], key=(lambda x: x[0].name)):\n            cls.add_subassistants_to(subparsers, subas_tuple, (level + 1))\n    elif (level == 1):\n        subparsers = cls._add_subparsers_required(p, dest=settings.SUBASSISTANT_N_STRING.format(level), title=cls.subparsers_str, description=devassistant_argparse.ArgumentParser.no_assistants_msg)", "docstring": "Adds assistant from given part of assistant tree and all its subassistants to\na given argument parser.\n\nArgs:\nparser: instance of devassistant_argparse.ArgumentParser\nassistant_tuple: part of assistant tree (see generate_argument_parser doc)\nlevel: level of subassistants that given assistant is at", "source": "codesearchnet"}
{"code": "def get_schema_node(self, path: SchemaPath) -> Optional[SchemaNode]:\n    return self.schema.get_schema_descendant(self.schema_data.path2route(path))", "docstring": "Return the schema node addressed by a schema path.\n\nArgs:\npath: Schema path.\n\nReturns:\nSchema node if found in the schema, or ``None``.\n\nRaises:\nInvalidSchemaPath: If the schema path is invalid.", "source": "codesearchnet"}
{"code": "def _create(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs):\n    kwargs = UserAgent._environment_variables(**kwargs)\n    if ('user_agent' in kwargs):\n        user_agent = kwargs['user_agent']\n        del kwargs['user_agent']\n    prefix = kwargs.get('prefix')\n    if prefix:\n        del kwargs['prefix']\n    else:\n        prefix = ('HDXPythonUtilities/%s' % get_utils_version())\n    if (not user_agent):\n        ua = cls._load(prefix, user_agent_config_yaml, user_agent_lookup)\n    else:\n        ua = cls._construct(kwargs, prefix, user_agent)\n    return ua", "docstring": "Get full user agent string\n\nArgs:\nuser_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed.\nuser_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml.\nuser_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied.\n\nReturns:\nstr: Full user agent string", "source": "codesearchnet"}
{"code": "def connect(self):\n    if self.is_connected():\n        raise tornado.gen.Return(True)\n    cb1 = self._read_callback\n    cb2 = self._close_callback\n    self.__callback_queue = collections.deque()\n    self._reply_list = []\n    self.__reader = hiredis.Reader(replyError=ClientError)\n    kwargs = self.connection_kwargs\n    self.__connection = Connection(cb1, cb2, **kwargs)\n    connection_status = (yield self.__connection.connect())\n    if (connection_status is not True):\n        raise tornado.gen.Return(False)\n    if (self.password is not None):\n        authentication_status = (yield self._call('AUTH', self.password))\n        if (authentication_status != b'OK'):\n            LOG.warning('impossible to connect: bad password')\n            self.__connection.disconnect()\n            raise tornado.gen.Return(False)\n    if (self.db != 0):\n        db_status = (yield self._call('SELECT', self.db))\n        if (db_status != b'OK'):\n            LOG.warning(\"can't select db %s\", self.db)\n            raise tornado.gen.Return(False)\n    raise tornado.gen.Return(True)", "docstring": "Connects the client object to redis.\n\nIt's safe to use this method even if you are already connected.\nNote: this method is useless with autoconnect mode (default).\n\nReturns:\na Future object with True as result if the connection was ok.", "source": "codesearchnet"}
{"code": "def get_regularization_loss(scope=None, name='total_regularization_loss'):\n    losses = get_regularization_losses(scope)\n    if losses:\n        return math_ops.add_n(losses, name=name)\n    else:\n        return constant_op.constant(0.0)", "docstring": "Gets the total regularization loss.\n\nArgs:\nscope: An optional scope name for filtering the losses to return.\nname: The name of the returned tensor.\n\nReturns:\nA scalar regularization loss.", "source": "github-repos"}
{"code": "def restore_saveables(self, tensor_saveables: Dict[str, saveable_object.SaveableObject], python_positions: List[restore_lib.CheckpointPosition], registered_savers: Optional[Dict[str, Dict[str, base.Trackable]]]=None, reader: py_checkpoint_reader.NewCheckpointReader=None) -> Optional[List[ops.Operation]]:\n    del registered_savers\n    restore_ops = []\n    if python_positions:\n        if reader is None:\n            reader = py_checkpoint_reader.NewCheckpointReader(self.save_path_string)\n        for position in python_positions:\n            key = position.object_proto.attributes[0].checkpoint_key\n            position.trackable.deserialize(reader.get_tensor(key))\n    if tensor_saveables:\n        validated_saveables = saveable_object_util.validate_and_slice_inputs(tensor_saveables)\n        validated_names = set((saveable.name for saveable in validated_saveables))\n        if set(tensor_saveables.keys()) != validated_names:\n            raise AssertionError('Saveable keys changed when validating. Got back %s, was expecting %s' % (tensor_saveables.keys(), validated_names))\n        new_restore_ops = _DSaver(self._mesh, validated_saveables).restore(self.save_path_tensor, self.options)\n        if not context.executing_eagerly():\n            for name, restore_op in sorted(new_restore_ops.items()):\n                restore_ops.append(restore_op)\n                assert name not in self.restore_ops_by_name\n                self.restore_ops_by_name[name] = restore_op\n    return restore_ops", "docstring": "Run or build restore operations for SaveableObjects.\n\nArgs:\ntensor_saveables: `SaveableObject`s which correspond to Tensors.\npython_positions: `CheckpointPosition`s which correspond to `PythonState`\nTrackables bound to the checkpoint.\nregistered_savers: a dict mapping saver names-> object name -> Trackable.\nThis argument is not implemented for DTensorCheckpoint.\nreader: A CheckpointReader. Creates one lazily if None.\n\nReturns:\nWhen graph building, a list of restore operations, either cached or newly\ncreated, to restore `tensor_saveables`.", "source": "github-repos"}
{"code": "def set_targets(x, delta=10):\n    data = []\n    for (row, _) in x.iterrows():\n        if (row == (x.shape[0] - 1)):\n            break\n        curr_close = x.close[row]\n        next_close = x.close[(row + 1)]\n        high_close = (next_close + (delta / 2))\n        low_close = (next_close - (delta / 2))\n        if (curr_close < low_close):\n            target = TARGET_CODES['bearish']\n        elif (curr_close > high_close):\n            target = TARGET_CODES['bullish']\n        else:\n            target = TARGET_CODES['neutral']\n        data.append(target)\n    return pd.Series(data=data, dtype=np.int32, name='target')", "docstring": "Sets target market trend for a date\n\nArgs:\nx: Pandas DataFrame of market features\ndelta: Positive number defining a price buffer between what is\nclassified as a bullish/bearish market for the training set.\ndelta is equivalent to the total size of the neutral price zone.\ndelta / 2 is equivalent to either the positive or negative\nthreshold of the neutral price zone.\n\nReturns:\nPandas Series of numpy int8 market trend targets", "source": "codesearchnet"}
{"code": "def scan(self, func=operator.add):\n    if self.closed():\n        raise ValueError('Attempt to call scan() on a closed Queryable.')\n    if (not is_callable(func)):\n        raise TypeError('scan() parameter func={0} is not callable'.format(repr(func)))\n    return self._create(self._generate_scan_result(func))", "docstring": "An inclusive prefix sum which returns the cumulative application of the\nsupplied function up to an including the current element.\n\nArgs:\nfunc: An optional binary function which is commutative - that is,\nthe order of the arguments is unimportant.  Defaults to a\nsumming operator.\n\nReturns:\nA Queryable such that the nth element is the sum of the first n\nelements of the source sequence.\n\nRaises:\nValueError: If the Queryable has been closed.\nTypeError: If func is not callable.", "source": "codesearchnet"}
{"code": "def all_elements_equal(value):\n    if is_scalar(value):\n        return True\n    return np.array((value == value.flatten()[0])).all()", "docstring": "Checks if all elements in the given value are equal to each other.\n\nIf the input is a single value the result is trivial. If not, we compare all the values to see\nif they are exactly the same.\n\nArgs:\nvalue (ndarray or number): a numpy array or a single number.\n\nReturns:\nbool: true if all elements are equal to each other, false otherwise", "source": "codesearchnet"}
{"code": "def _wait_for_any_event(events, timeout_s):\n\n    def any_event_set():\n        return any((event.is_set() for event in events))\n    result = timeouts.loop_until_timeout_or_true(timeout_s, any_event_set, sleep_s=_WAIT_FOR_ANY_EVENT_POLL_S)\n    return (result or any_event_set())", "docstring": "Wait for any in a list of threading.Event's to be set.\n\nArgs:\nevents: List of threading.Event's.\ntimeout_s: Max duration in seconds to wait before returning.\n\nReturns:\nTrue if at least one event was set before the timeout expired, else False.", "source": "codesearchnet"}
{"code": "def _ParseGUIDTable(self, parser_mediator, cache, database, esedb_table, values_map, event_data_class):\n    if (cache is None):\n        raise ValueError('Missing cache value.')\n    if (database is None):\n        raise ValueError('Missing database value.')\n    if (esedb_table is None):\n        raise ValueError('Missing table value.')\n    identifier_mappings = self._GetIdentifierMappings(parser_mediator, cache, database)\n    for esedb_record in esedb_table.records:\n        if parser_mediator.abort:\n            break\n        record_values = self._GetRecordValues(parser_mediator, esedb_table.name, esedb_record, value_mappings=self._GUID_TABLE_VALUE_MAPPINGS)\n        event_data = event_data_class()\n        for (attribute_name, column_name) in values_map.items():\n            record_value = record_values.get(column_name, None)\n            if (attribute_name in ('application', 'user_identifier')):\n                record_value = identifier_mappings.get(record_value, record_value)\n            setattr(event_data, attribute_name, record_value)\n        timestamp = record_values.get('TimeStamp')\n        if timestamp:\n            date_time = dfdatetime_ole_automation_date.OLEAutomationDate(timestamp=timestamp)\n            timestamp_description = definitions.TIME_DESCRIPTION_SAMPLE\n        else:\n            date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n            timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME\n        event = time_events.DateTimeValuesEvent(date_time, timestamp_description)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n        timestamp = record_values.get('ConnectStartTime')\n        if timestamp:\n            date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n            event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_FIRST_CONNECTED)\n            parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a table with a GUID as name.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ncache (ESEDBCache): cache, which contains information about\nthe identifiers stored in the SruDbIdMapTable table.\ndatabase (pyesedb.file): ESE database.\nesedb_table (pyesedb.table): table.\nvalues_map (dict[str, str]): mapping of table columns to event data\nattribute names.\nevent_data_class (type): event data class.\n\nRaises:\nValueError: if the cache, database or table value is missing.", "source": "codesearchnet"}
{"code": "def diff(self, periods=1, axis=0):\n        \n        axis = self._get_axis_number(axis)\n        return self.__constructor__(\n            query_compiler=self._query_compiler.diff(periods=periods, axis=axis)\n        )", "docstring": "Finds the difference between elements on the axis requested\n\nArgs:\nperiods: Periods to shift for forming difference\naxis: Take difference over rows or columns\n\nReturns:\nDataFrame with the diff applied", "source": "juraj-google-style"}
{"code": "def _ParseEventData(self, variable_length_section):\n    \n    event_data = WinJobEventData()\n    event_data.application = (\n        variable_length_section.application_name.rstrip('\\x00'))\n    event_data.comment = variable_length_section.comment.rstrip('\\x00')\n    event_data.parameters = (\n        variable_length_section.parameters.rstrip('\\x00'))\n    event_data.username = variable_length_section.author.rstrip('\\x00')\n    event_data.working_directory = (\n        variable_length_section.working_directory.rstrip('\\x00'))\n\n    return event_data", "docstring": "Parses the event data form a variable-length data section.\n\nArgs:\nvariable_length_section (job_variable_length_data_section): a\nWindows Scheduled Task job variable-length data section.\n\nReturns:\nWinJobEventData: event data of the job file.", "source": "juraj-google-style"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    shutdown_value = registry_key.GetValueByName('ShutdownTime')\n    if (not shutdown_value):\n        return\n    try:\n        date_time = self._ParseFiletime(shutdown_value.data)\n    except errors.ParseError as exception:\n        parser_mediator.ProduceExtractionWarning('unable to determine shutdown timestamp with error: {0!s}'.format(exception))\n        return\n    if (not date_time):\n        date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n    event_data = ShutdownWindowsRegistryEventData()\n    event_data.key_path = registry_key.path\n    event_data.offset = shutdown_value.offset\n    event_data.value_name = shutdown_value.name\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_SHUTDOWN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a ShutdownTime Windows Registry value.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "codesearchnet"}
{"code": "def _load_metadata_files(self):\n    metadata_paths = file_io.get_matching_files(os.path.join(self._dump_root, '*%s' % self._METADATA_SUFFIX))\n    if not metadata_paths:\n        raise ValueError('Cannot find any tfdbg metadata file in directory: %s' % self._dump_root)\n    wall_times = []\n    run_ids = []\n    tensorflow_versions = []\n    file_versions = []\n    for metadata_path in metadata_paths:\n        reader = tf_record.tf_record_random_reader(metadata_path)\n        try:\n            record = reader.read(0)[0]\n            debug_event = debug_event_pb2.DebugEvent.FromString(record)\n            wall_times.append(debug_event.wall_time)\n            run_ids.append(debug_event.debug_metadata.tfdbg_run_id)\n            tensorflow_versions.append(debug_event.debug_metadata.tensorflow_version)\n            file_versions.append(debug_event.debug_metadata.file_version)\n        except Exception as e:\n            raise errors.DataLossError(None, None, 'Error reading tfdbg metadata from paths %s' % metadata_paths) from e\n        finally:\n            reader.close()\n    self._starting_wall_time = wall_times[0]\n    self._tfdbg_run_id = run_ids[0]\n    self._tensorflow_version = tensorflow_versions[0]\n    self._file_version = file_versions[0]\n    if len(metadata_paths) == 1:\n        return metadata_paths\n    num_no_id = len([run_id for run_id in run_ids if not run_id])\n    if num_no_id:\n        paths_without_run_id = [metadata_path for metadata_path, run_id in zip(metadata_paths, run_ids) if not run_id]\n        raise ValueError('Found %d tfdbg metadata files and %d of them do not have tfdbg run ids. The metadata files without run ids are: %s' % (len(run_ids), num_no_id, paths_without_run_id))\n    elif len(set(run_ids)) != 1:\n        raise ValueError('Unexpected: Found multiple (%d) tfdbg2 runs in directory %s' % (len(set(run_ids)), self._dump_root))\n    paths_and_timestamps = sorted(zip(metadata_paths, wall_times), key=lambda t: t[1])\n    self._starting_wall_time = paths_and_timestamps[0][1]\n    return [path[0] for path in paths_and_timestamps]", "docstring": "Load and parse metadata files in the dump root.\n\nCheck that all metadata files have a common tfdbg_run_id, and raise\na ValueError if their tfdbg_run_ids differ.\n\nReturns:\nA list of metadata file paths in ascending order of their starting\nwall_time timestamp.", "source": "github-repos"}
{"code": "def map_across_blocks(self, map_func):\n        \n        preprocessed_map_func = self.preprocess_func(map_func)\n        new_partitions = np.array(\n            [\n                [part.apply(preprocessed_map_func) for part in row_of_parts]\n                for row_of_parts in self.partitions\n            ]\n        )\n        return self.__constructor__(new_partitions)", "docstring": "Applies `map_func` to every partition.\n\nArgs:\nmap_func: The function to apply.\n\nReturns:\nA new BaseFrameManager object, the type of object that called this.", "source": "juraj-google-style"}
{"code": "def cmRecall(cm, average=True):\n    \n\n    \n    cm = cm.type(torch.float64)\n    recall = cm.diag() / (cm.sum(dim=1) + 1e-15)\n    if average:\n        return recall.mean()\n    return recall", "docstring": "Calculates recall using :class:`~ignite.metrics.ConfusionMatrix` metric.\nArgs:\ncm (ConfusionMatrix): instance of confusion matrix metric\naverage (bool, optional): if True metric value is averaged over all classes\nReturns:\nMetricsLambda", "source": "juraj-google-style"}
{"code": "def SmartBroadcastGradientArgs(x, y, grad=None):\n    del grad\n    x_shape = array_ops.shape(x)\n    y_shape = array_ops.shape(y)\n    if not context.executing_eagerly() and isinstance(x, tensor.Tensor) and isinstance(y, tensor.Tensor):\n        x_axes, y_axes = _InferGradientReductionAxes(x.shape, y.shape)\n    else:\n        x_axes, y_axes = (None, None)\n    if x_axes is None or y_axes is None:\n        x_axes, y_axes = gen_array_ops.broadcast_gradient_args(x_shape, y_shape)\n        x_must_reduce = True\n        y_must_reduce = True\n    else:\n        x_must_reduce = x_axes or x.shape.rank < y.shape.rank\n        y_must_reduce = y_axes or y.shape.rank < x.shape.rank\n    return ((x_shape, x_axes, x_must_reduce), (y_shape, y_axes, y_must_reduce))", "docstring": "Version of `BroadcastGradientArgs` optimized for partially-known shapes.\n\nArgs:\nx: The first argument of a broadcasting binary op.\ny: The second argument of a broadcasting binary op.\ngrad: Deprecated.\n\nReturns:\nA pair of triples, one per argument with\n* Shape of the argument (tensor);\n* Reduction axes for the argument (list or tensor);\n* Boolean indicating whether the reduction must be applied.", "source": "github-repos"}
{"code": "def _remove_outliers_from_hist(hist: Hist, outliers_start_index: int, outliers_removal_axis: OutliersRemovalAxis) -> None:\n    if (outliers_start_index > 0):\n        x = ctypes.c_int(0)\n        y = ctypes.c_int(0)\n        z = ctypes.c_int(0)\n        outliers_removal_axis_values: Dict[(OutliersRemovalAxis, ctypes.c_int)] = {projectors.TH1AxisType.x_axis: x, projectors.TH1AxisType.y_axis: y, projectors.TH1AxisType.z_axis: z}\n        for index in range(0, hist.GetNcells()):\n            hist.GetBinXYZ(index, x, y, z)\n            if (hist.GetBinContent(index) < hist.GetBinError(index)):\n                logger.warning(f'Bin content < error. Name: {hist.GetName()}, Bin content: {hist.GetBinContent(index)}, Bin error: {hist.GetBinError(index)}, index: {index}, ({x.value}, {y.value})')\n            if (outliers_removal_axis_values[outliers_removal_axis].value >= outliers_start_index):\n                hist.SetBinContent(index, 0)\n                hist.SetBinError(index, 0)\n    else:\n        logger.info(f'Hist {hist.GetName()} did not have any outliers to cut')", "docstring": "Remove outliers from a given histogram.\n\nArgs:\nhist: Histogram to check for outliers.\noutliers_start_index: Index in the truth axis where outliers begin.\noutliers_removal_axis: Axis along which outliers removal will be performed. Usually\nthe particle level aixs.\nReturns:\nNone. The histogram is modified in place.", "source": "codesearchnet"}
{"code": "def site_occupation_statistics(self):\n    if (self.time == 0.0):\n        return None\n    occupation_stats = {label: 0.0 for label in self.site_labels}\n    for site in self.sites:\n        occupation_stats[site.label] += site.time_occupied\n    for label in self.site_labels:\n        occupation_stats[label] /= self.time\n    return occupation_stats", "docstring": "Average site occupation for each site type\n\nArgs:\nNone\n\nReturns:\n(Dict(Str:Float)): Dictionary of occupation statistics, e.g.::\n\n{ 'A' : 2.5, 'B' : 25.3 }", "source": "codesearchnet"}
{"code": "def combine(a1, a2):\n    if (not isinstance(a1, list)):\n        a1 = [a1]\n    if (not isinstance(a2, list)):\n        a2 = [a2]\n    return (a1 + a2)", "docstring": "Combine to argument into a single flat list\n\nIt is used when you are not sure whether arguments are lists but want to combine them into one flat list\n\nArgs:\na1: list or other thing\na2: list or other thing\n\nReturns:\nlist: a flat list contain a1 and a2", "source": "codesearchnet"}
{"code": "def get_common_properties(root):\n    \n    properties = {}\n\n    for elem in root.iterfind('commonProperties/property'):\n        name = elem.attrib['name']\n\n        if name == 'initial composition':\n            properties['composition'] = {'species': [], 'kind': None}\n\n            for child in elem.iter('component'):\n                spec = {}\n                spec['species-name'] = child.find('speciesLink').attrib['preferredKey']\n                units = child.find('amount').attrib['units']\n\n                \n                try:\n                    spec['InChI'] = child.find('speciesLink').attrib['InChI']\n                except KeyError:\n                    \n                    warn('Missing InChI for species ' + spec['species-name'])\n                    pass\n\n                \n                if units in ['mole fraction', 'mass fraction', 'mole percent']:\n                    spec['amount'] = [float(child.find('amount').text)]\n                elif units == 'percent':\n                    \n                    warn('Assuming percent in composition means mole percent')\n                    spec['amount'] = [float(child.find('amount').text)]\n                    units = 'mole percent'\n                elif units == 'ppm':\n                    \n                    warn('Assuming molar ppm in composition and converting to mole fraction')\n                    spec['amount'] = [float(child.find('amount').text) * 1.e-6]\n                    units = 'mole fraction'\n                elif units == 'ppb':\n                    \n                    warn('Assuming molar ppb in composition and converting to mole fraction')\n                    spec['amount'] = [float(child.find('amount').text) * 1.e-9]\n                    units = 'mole fraction'\n                else:\n                    raise KeywordError('Composition units need to be one of: mole fraction, '\n                                       'mass fraction, mole percent, percent, ppm, or ppb.'\n                                       )\n\n                properties['composition']['species'].append(spec)\n\n                \n                if properties['composition']['kind'] is None:\n                    properties['composition']['kind'] = units\n                elif properties['composition']['kind'] != units:\n                    raise KeywordError('composition units ' + units +\n                                       ' not consistent with ' +\n                                       properties['composition']['kind']\n                                       )\n\n        elif name in datagroup_properties:\n            field = name.replace(' ', '-')\n            units = elem.attrib['units']\n            if units == 'Torr':\n                units = 'torr'\n            quantity = 1.0 * unit_registry(units)\n            try:\n                quantity.to(property_units[field])\n            except pint.DimensionalityError:\n                raise KeywordError('units incompatible for property ' + name)\n\n            properties[field] = [' '.join([elem.find('value').text, units])]\n\n        else:\n            raise KeywordError('Property ' + name + ' not supported as common property')\n\n    return properties", "docstring": "Read common properties from root of ReSpecTh XML file.\n\nArgs:\nroot (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file\n\nReturns:\nproperties (`dict`): Dictionary with common properties", "source": "juraj-google-style"}
{"code": "def convert_flatten(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting flatten ...')\n\n    if names == 'short':\n        tf_name = 'R' + random_string(7)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    reshape = keras.layers.Reshape([-1], name=tf_name)\n    layers[scope_name] = reshape(layers[inputs[0]])", "docstring": "Convert reshape(view).\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def __message_to_schema(self, message_type):\n    name = self.__normalized_name(message_type)\n    schema = {'id': name, 'type': 'object'}\n    if message_type.__doc__:\n        schema['description'] = message_type.__doc__\n    properties = {}\n    for field in message_type.all_fields():\n        descriptor = {}\n        type_info = {}\n        if (type(field) == messages.MessageField):\n            field_type = field.type().__class__\n            type_info['$ref'] = self.add_message(field_type)\n            if field_type.__doc__:\n                descriptor['description'] = field_type.__doc__\n        else:\n            schema_type = self.__FIELD_TO_SCHEMA_TYPE_MAP.get(type(field), self.__DEFAULT_SCHEMA_TYPE)\n            if isinstance(schema_type, dict):\n                variant_map = schema_type\n                variant = getattr(field, 'variant', None)\n                if (variant in variant_map):\n                    schema_type = variant_map[variant]\n                else:\n                    schema_type = variant_map[None]\n            type_info['type'] = schema_type[0]\n            if schema_type[1]:\n                type_info['format'] = schema_type[1]\n        if (type(field) == messages.EnumField):\n            sorted_enums = sorted([enum_info for enum_info in field.type], key=(lambda enum_info: enum_info.number))\n            type_info['enum'] = [enum_info.name for enum_info in sorted_enums]\n        if field.required:\n            descriptor['required'] = True\n        if field.default:\n            if (type(field) == messages.EnumField):\n                descriptor['default'] = str(field.default)\n            else:\n                descriptor['default'] = field.default\n        if field.repeated:\n            descriptor['items'] = type_info\n            descriptor['type'] = 'array'\n        else:\n            descriptor.update(type_info)\n        properties[field.name] = descriptor\n    schema['properties'] = properties\n    return schema", "docstring": "Parse a single message into JSON Schema.\n\nWill recursively descend the message structure\nand also parse other messages references via MessageFields.\n\nArgs:\nmessage_type: protorpc.messages.Message class to parse.\n\nReturns:\nAn object representation of the schema.", "source": "codesearchnet"}
{"code": "def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        tstream = BytearrayStream()\n\n        self.revocation_code.write(tstream, kmip_version=kmip_version)\n        if self.revocation_message is not None:\n            self.revocation_message.write(tstream, kmip_version=kmip_version)\n\n        \n        self.length = tstream.length()\n        super(RevocationReason, self).write(ostream, kmip_version=kmip_version)\n        ostream.write(tstream.buffer)", "docstring": "Write the data encoding the RevocationReason object to a stream.\n\nArgs:\nostream (Stream): A data stream in which to encode object data,\nsupporting a write method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def start_reporter(redis_address, stdout_file=None, stderr_file=None, redis_password=None):\n    reporter_filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'reporter.py')\n    command = [sys.executable, '-u', reporter_filepath, '--redis-address={}'.format(redis_address)]\n    if redis_password:\n        command += ['--redis-password', redis_password]\n    try:\n        import psutil\n    except ImportError:\n        logger.warning(\"Failed to start the reporter. The reporter requires 'pip install psutil'.\")\n        return None\n    process_info = start_ray_process(command, ray_constants.PROCESS_TYPE_REPORTER, stdout_file=stdout_file, stderr_file=stderr_file)\n    return process_info", "docstring": "Start a reporter process.\n\nArgs:\nredis_address (str): The address of the Redis instance.\nstdout_file: A file handle opened for writing to redirect stdout to. If\nno redirection should happen, then this should be None.\nstderr_file: A file handle opened for writing to redirect stderr to. If\nno redirection should happen, then this should be None.\nredis_password (str): The password of the redis server.\n\nReturns:\nProcessInfo for the process that was started.", "source": "codesearchnet"}
{"code": "def _MergeIdenticalCaseInsensitive(self, a, b):\n    \n    if a.lower() != b.lower():\n      raise MergeError(\"values must be the same (case insensitive) \"\n                       \"('%s' vs '%s')\" % (transitfeed.EncodeUnicode(a),\n                                           transitfeed.EncodeUnicode(b)))\n    return b", "docstring": "Tries to merge two strings.\n\nThe string are required to be the same ignoring case. The second string is\nalways used as the merged value.\n\nArgs:\na: The first string.\nb: The second string.\n\nReturns:\nThe merged string. This is equal to the second string.\n\nRaises:\nMergeError: The strings were not the same ignoring case.", "source": "juraj-google-style"}
{"code": "def add(self, other):\n        \n        if isinstance(other, SeriesWeld):\n            other = other.expr\n        return SeriesWeld(\n            grizzly_impl.element_wise_op(\n                self.expr,\n                other,\n                \"+\",\n                self.weld_type\n            ),\n            self.weld_type,\n            self.df,\n            self.column_name\n        )", "docstring": "Summary\n\nArgs:\nother (TYPE): Description\n\nReturns:\nTYPE: Description", "source": "juraj-google-style"}
{"code": "def get_feature_variable_boolean(self, feature_key, variable_key, user_id, attributes=None):\n    variable_type = entities.Variable.Type.BOOLEAN\n    return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)", "docstring": "Returns value for a certain boolean variable attached to a feature flag.\n\nArgs:\nfeature_key: Key of the feature whose variable's value is being accessed.\nvariable_key: Key of the variable whose value is to be accessed.\nuser_id: ID for user.\nattributes: Dict representing user attributes.\n\nReturns:\nBoolean value of the variable. None if:\n- Feature key is invalid.\n- Variable key is invalid.\n- Mismatch with type of variable.", "source": "codesearchnet"}
{"code": "def center_of_mass(self, time):\n        \n        if self.start_time <= time <= self.end_time:\n            diff = time - self.start_time\n            valid = np.flatnonzero(self.masks[diff] != 0)\n            if valid.size > 0:\n                com_x = 1.0 / self.timesteps[diff].ravel()[valid].sum() * np.sum(self.timesteps[diff].ravel()[valid] *\n                                                                                 self.x[diff].ravel()[valid])\n                com_y = 1.0 / self.timesteps[diff].ravel()[valid].sum() * np.sum(self.timesteps[diff].ravel()[valid] *\n                                                                                 self.y[diff].ravel()[valid])\n            else:\n                com_x = np.mean(self.x[diff])\n                com_y = np.mean(self.y[diff])\n        else:\n            com_x = None\n            com_y = None\n        return com_x, com_y", "docstring": "Calculate the center of mass at a given timestep.\n\nArgs:\ntime: Time at which the center of mass calculation is performed\n\nReturns:\nThe x- and y-coordinates of the center of mass.", "source": "juraj-google-style"}
{"code": "def dict_to_csv(orig_dict, file_name, field_names_tuple, file_location):\n    \n    file = __os.path.join(file_location, file_name)\n    csv_write = open(file, 'a')\n    writer = __csv.DictWriter(csv_write, fieldnames=field_names_tuple, lineterminator='\\n')\n    headers = dict((n, n) for n in field_names_tuple)\n    writer.writerow(headers)\n    for dict_key, a in list(orig_dict.items()):\n        writer.writerow(orig_dict[dict_key])\n    csv_write.close()\n    return file_name", "docstring": "Function to export a dictionary to a csv file\nArgs:\norig_dict: The dictionary you want exported\nfile_name: The name of the exported file\nfield_names_tuple: The fieldnames in a tuple\nfile_location: The location of the file, derive from the os module\n\nReturns: returns the filename info", "source": "juraj-google-style"}
{"code": "def controlled_by(self, *control_qubits: Qid) -> 'Gate':\n        \n        \n        from cirq.ops import ControlledGate\n        return ControlledGate(self, control_qubits,\n                              len(control_qubits) if control_qubits is not None\n                                                  else 1)", "docstring": "Returns a controlled version of this gate.\n\nArgs:\ncontrol_qubits: Optional qubits to control the gate by.", "source": "juraj-google-style"}
{"code": "def ls(root='.', abspaths=False, recursive=False):\n\n    def _expand_subdirs(file):\n        if isdir(path(root, file)):\n            return ([file] + [path(file, x) for x in ls(path(root, file), recursive=True)])\n        else:\n            return [file]\n    if isfile(root):\n        return ([abspath(root)] if abspaths else [basename(root)])\n    elif abspaths:\n        relpaths = ls(root, recursive=recursive, abspaths=False)\n        base = abspath(root)\n        return [path(base, relpath) for relpath in relpaths]\n    elif recursive:\n        paths = ls(root, abspaths=abspaths, recursive=False)\n        return labtypes.flatten([_expand_subdirs(file) for file in paths])\n    else:\n        return list(sorted(os.listdir(root)))", "docstring": "Return a list of files in directory.\n\nDirectory listings are sorted alphabetically. If the named\ndirectory is a file, return it's path.\n\nExamples:\n\n>>> fs.ls(\"foo\")\n[\"a\", \"b\", \"c\"]\n\n>>> fs.ls(\"foo/a\")\n[\"foo/a\"]\n\n>>> fs.ls(\"foo\", abspaths=True)\n[\"/home/test/foo/a\", \"/home/test/foo/b\", \"/home/test/foo/c\"]\n\n>>> fs.ls(\"foo\", recursive=True)\n[\"a\", \"b\", \"b/d\", \"b/d/e\", \"c\"]\n\nArguments:\n\nroot (str): Path to directory. Can be relative or absolute.\nabspaths (bool, optional): Return absolute paths if true.\nrecursive (bool, optional): Recursively list subdirectories if\ntrue.\n\nReturns:\n\nlist of str: A list of paths.\n\nRaises:\n\nOSError: If root directory does not exist.", "source": "codesearchnet"}
{"code": "def _add_genetic_models(self, variant_obj, info_dict):\n        \n        genetic_models_entry = info_dict.get('GeneticModels')\n        if genetic_models_entry:\n            genetic_models = []\n            for family_annotation in genetic_models_entry.split(','):\n                for genetic_model in family_annotation.split(':')[-1].split('|'):\n                    genetic_models.append(genetic_model)\n            logger.debug(\"Updating genetic models to: {0}\".format(\n                ', '.join(genetic_models)))\n                \n            variant_obj.genetic_models = genetic_models", "docstring": "Add the genetic models found\n\nArgs:\nvariant_obj (puzzle.models.Variant)\ninfo_dict (dict): A info dictionary", "source": "juraj-google-style"}
{"code": "def get(self, block_id):\n    pool = current_app.config['bigchain_pool']\n    with pool() as bigchain:\n        block = bigchain.get_block(block_id=block_id)\n    if (not block):\n        return make_error(404)\n    return block", "docstring": "API endpoint to get details about a block.\n\nArgs:\nblock_id (str): the id of the block.\n\nReturn:\nA JSON string containing the data about the block.", "source": "codesearchnet"}
{"code": "def __init__(self, uri='http:\n        \n\n        \n        try:\n            \n            self.graph_db = neo4j.GraphDatabaseService(uri)\n            version = self.graph_db.neo4j_version\n            print '\\t- Neo4j GraphDB connected: %s %s' % (str(uri), version)\n        except packages.httpstream.http.SocketError:\n            print '\\t- Neo4j connection failed! Is your Neo4j server running? $ neo4j start'\n            raise RuntimeError('Could not connect to Neo4j')", "docstring": "Initialization for NeoDB indexer.\n\nArgs:\nuri: The uri to connect NeoDB.\n\nRaises:\nRuntimeError: When connection to NeoDB failed.", "source": "juraj-google-style"}
{"code": "def project(self, project, entity=None):\n        \n        query = gql()\n        return self.gql(query, variable_values={\n            'entity': entity, 'project': project})['model']", "docstring": "Retrive project\n\nArgs:\nproject (str): The project to get details for\nentity (str, optional): The entity to scope this project to.\n\nReturns:\n[{\"id\",\"name\",\"repo\",\"dockerImage\",\"description\"}]", "source": "juraj-google-style"}
{"code": "def format_params_diff(parameter_diff):\n    \n\n    params_output = '\\n'.join([line for v in parameter_diff\n                               for line in v.changes()])\n    return  % params_output", "docstring": "Handles the formatting of differences in parameters.\n\nArgs:\nparameter_diff (list): A list of DictValues detailing the\ndifferences between two dicts returned by\n:func:`stacker.actions.diff.diff_dictionaries`\nReturns:\nstring: A formatted string that represents a parameter diff", "source": "juraj-google-style"}
{"code": "def imwrite(img, file_path, params=None, auto_mkdir=True):\n    \n    if auto_mkdir:\n        dir_name = osp.abspath(osp.dirname(file_path))\n        mkdir_or_exist(dir_name)\n    return cv2.imwrite(file_path, img, params)", "docstring": "Write image to file\n\nArgs:\nimg (ndarray): Image array to be written.\nfile_path (str): Image file path.\nparams (None or list): Same as opencv's :func:`imwrite` interface.\nauto_mkdir (bool): If the parent folder of `file_path` does not exist,\nwhether to create it automatically.\n\nReturns:\nbool: Successful or not.", "source": "juraj-google-style"}
{"code": "def loop_until_timeout_or_not_none(timeout_s, function, sleep_s=1):  \n  \n  return loop_until_timeout_or_valid(\n      timeout_s, function, lambda x: x is not None, sleep_s)", "docstring": "Loops until the specified function returns non-None or until a timeout.\n\nArgs:\ntimeout_s: The number of seconds to wait until a timeout condition is\nreached. As a convenience, this accepts None to mean never timeout.  Can\nalso be passed a PolledTimeout object instead of an integer.\nfunction: The function to call each iteration.\nsleep_s: The number of seconds to wait after calling the function.\n\nReturns:\nWhatever the function returned last.", "source": "juraj-google-style"}
{"code": "def update(self, other, **kwargs):\n        \n        assert isinstance(\n            other, type(self)\n        ), \"Must have the same DataManager subclass to perform this operation\"\n\n        def update_builder(df, other, **kwargs):\n            \n            df = df.copy()\n            df.update(other, **kwargs)\n            return df\n\n        return self._inter_df_op_handler(update_builder, other, **kwargs)", "docstring": "Uses other manager to update corresponding values in this manager.\n\nArgs:\nother: The other manager.\n\nReturns:\nNew DataManager with updated data and index.", "source": "juraj-google-style"}
{"code": "def compare_files(path1, path2):\n    \n    \n    diff = difflib.ndiff(open(path1).readlines(), open(path2).readlines())\n    return [x for x in diff if x[0] in ['-', '+', '?']]", "docstring": "Returns the delta between two files using -, ?, + format excluding\nlines that are the same\n\nArgs:\npath1 (str): Path to first file\npath2 (str): Path to second file\n\nReturns:\nList[str]: Delta between the two files", "source": "juraj-google-style"}
{"code": "def load_data(path, dense=False):\n    catalog = {'.csv': load_csv, '.sps': load_svmlight_file, '.h5': load_hdf5}\n    ext = os.path.splitext(path)[1]\n    func = catalog[ext]\n    (X, y) = func(path)\n    if (dense and sparse.issparse(X)):\n        X = X.todense()\n    return (X, y)", "docstring": "Load data from a CSV, LibSVM or HDF5 file based on the file extension.\n\nArgs:\npath (str): A path to the CSV, LibSVM or HDF5 format file containing data.\ndense (boolean): An optional variable indicating if the return matrix\nshould be dense.  By default, it is false.\n\nReturns:\nData matrix X and target vector y", "source": "codesearchnet"}
{"code": "def validate_string(string, options=None):\n    \n    output.info(\"Performing JSON schema validation on input string: \" + string)\n    stream = io.StringIO(string)\n    return validate(stream, options)", "docstring": "Validate the input `string` according to the options passed in.\n\nIf any exceptions are raised during validation, no further validation\nwill take place.\n\nArgs:\nstring: The string containing the JSON to be validated.\noptions: An instance of ``ValidationOptions``.\n\nReturns:\nAn ObjectValidationResults instance, or a list of such.", "source": "juraj-google-style"}
{"code": "def get_texture(self, label: str) -> Union[moderngl.Texture, moderngl.TextureArray,\n                                               moderngl.Texture3D, moderngl.TextureCube]:\n        \n        return self._project.get_texture(label)", "docstring": "Get a texture by its label\n\nArgs:\nlabel (str): The Label for the texture\n\nReturns:\nThe py:class:`moderngl.Texture` instance", "source": "juraj-google-style"}
{"code": "def FilterRange(self, start_time=None, stop_time=None):\n    \n\n    start_time = self._NormalizeTime(start_time)\n    stop_time = self._NormalizeTime(stop_time)\n    self.data = [\n        p for p in self.data\n        if (start_time is None or p[1] >= start_time) and\n        (stop_time is None or p[1] < stop_time)\n    ]", "docstring": "Filter the series to lie between start_time and stop_time.\n\nRemoves all values of the series which are outside of some time range.\n\nArgs:\nstart_time: If set, timestamps before start_time will be dropped.\nstop_time: If set, timestamps at or past stop_time will be dropped.", "source": "juraj-google-style"}
{"code": "def init_app(self, app):\n    self._key = (app.config.get(CONF_KEY) or getenv(CONF_KEY))\n    if (not self._key):\n        return\n    self._endpoint_uri = app.config.get(CONF_ENDPOINT_URI)\n    sender = AsynchronousSender(self._endpoint_uri)\n    queue = AsynchronousQueue(sender)\n    self._channel = TelemetryChannel(None, queue)\n    self._init_request_logging(app)\n    self._init_trace_logging(app)\n    self._init_exception_logging(app)", "docstring": "Initializes the extension for the provided Flask application.\n\nArgs:\napp (flask.Flask). the Flask application for which to initialize the extension.", "source": "codesearchnet"}
{"code": "def eval(self, data, data_store, *, exclude=None):\n    exclude = ([] if (exclude is None) else exclude)\n    result = {}\n    for (key, value) in self.items():\n        if (key in exclude):\n            continue\n        if ((value is not None) and callable(value)):\n            result[key] = value(data, data_store)\n        else:\n            result[key] = value\n    return TaskParameters(result)", "docstring": "Return a new object in which callable parameters have been evaluated.\n\nNative types are not touched and simply returned, while callable methods are\nexecuted and their return value is returned.\n\nArgs:\ndata (MultiTaskData): The data object that has been passed from the\npredecessor task.\ndata_store (DataStore): The persistent data store object that allows the task\nto store data for access across the current workflow\nrun.\nexclude (list): List of key names as strings that should be excluded from\nthe evaluation.\n\nReturns:\nTaskParameters: A new TaskParameters object with the callable parameters\nreplaced by their return value.", "source": "codesearchnet"}
{"code": "def shell_call(command, **kwargs):\n  \n  command = list(command)\n  for i in range(len(command)):\n    m = CMD_VARIABLE_RE.match(command[i])\n    if m:\n      var_id = m.group(1)\n      if var_id in kwargs:\n        command[i] = kwargs[var_id]\n  return subprocess.call(command) == 0", "docstring": "Calls shell command with parameter substitution.\n\nArgs:\ncommand: command to run as a list of tokens\n**kwargs: dirctionary with substitutions\n\nReturns:\nwhether command was successful, i.e. returned 0 status code\n\nExample of usage:\nshell_call(['cp', '${A}', '${B}'], A='src_file', B='dst_file')\nwill call shell command:\ncp src_file dst_file", "source": "juraj-google-style"}
{"code": "def init(self, game_info, static_data):\n    \n    self._game_info = game_info\n    self._static_data = static_data\n\n    if not game_info.HasField(\"start_raw\"):\n      raise ValueError(\"Raw observations are required for the renderer.\")\n\n    self._map_size = point.Point.build(game_info.start_raw.map_size)\n\n    if game_info.options.HasField(\"feature_layer\"):\n      fl_opts = game_info.options.feature_layer\n      self._feature_screen_px = point.Point.build(fl_opts.resolution)\n      self._feature_minimap_px = point.Point.build(fl_opts.minimap_resolution)\n      self._feature_camera_width_world_units = fl_opts.width\n      self._render_rgb = False\n    else:\n      self._feature_screen_px = self._feature_minimap_px = None\n    if game_info.options.HasField(\"render\"):\n      render_opts = game_info.options.render\n      self._rgb_screen_px = point.Point.build(render_opts.resolution)\n      self._rgb_minimap_px = point.Point.build(render_opts.minimap_resolution)\n      self._render_rgb = True\n    else:\n      self._rgb_screen_px = self._rgb_minimap_px = None\n\n    if not self._feature_screen_px and not self._rgb_screen_px:\n      raise ValueError(\"Nothing to render.\")\n\n    try:\n      self.init_window()\n      self._initialized = True\n    except pygame.error as e:\n      self._initialized = False\n      logging.error(\"-\" * 60)\n      logging.error(\"Failed to initialize pygame: %s\", e)\n      logging.error(\"Continuing without pygame.\")\n      logging.error(\"If you're using ssh and have an X server, try ssh -X.\")\n      logging.error(\"-\" * 60)\n\n    self._obs = sc_pb.ResponseObservation()\n    self._queued_action = None\n    self._queued_hotkey = \"\"\n    self._select_start = None\n    self._alerts = {}\n    self._past_actions = []\n    self._help = False", "docstring": "Take the game info and the static data needed to set up the game.\n\nThis must be called before render or get_actions for each game or restart.\n\nArgs:\ngame_info: A `sc_pb.ResponseGameInfo` object for this game.\nstatic_data: A `StaticData` object for this game.\n\nRaises:\nValueError: if there is nothing to render.", "source": "juraj-google-style"}
{"code": "def GetDataStreamByPathSpec(self, path_spec):\n    \n    file_entry = self.GetFileEntryByPathSpec(path_spec)\n    if not file_entry:\n      return None\n\n    data_stream_name = getattr(path_spec, 'data_stream', None)\n    return file_entry.GetDataStream(data_stream_name)", "docstring": "Retrieves a data stream for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\nDataStream: a data stream or None if not available.", "source": "juraj-google-style"}
{"code": "def query(self, coords, **kwargs):\n        \n        return super(Lenz2017Query, self).query(coords, **kwargs)", "docstring": "Returns E(B-V), in mags, at the specified location(s) on the sky.\n\nArgs:\ncoords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query.\n\nReturns:\nA float array of the reddening, in magnitudes of E(B-V), at the\nselected coordinates.", "source": "juraj-google-style"}
{"code": "def __message_to_schema(self, message_type):\n    \n    name = self.__normalized_name(message_type)\n    schema = {\n        'id': name,\n        'type': 'object',\n        }\n    if message_type.__doc__:\n      schema['description'] = message_type.__doc__\n    properties = {}\n    for field in message_type.all_fields():\n      descriptor = {}\n      \n      \n      \n      type_info = {}\n\n      if type(field) == messages.MessageField:\n        field_type = field.type().__class__\n        type_info['$ref'] = self.add_message(field_type)\n        if field_type.__doc__:\n          descriptor['description'] = field_type.__doc__\n      else:\n        schema_type = self.__FIELD_TO_SCHEMA_TYPE_MAP.get(\n            type(field), self.__DEFAULT_SCHEMA_TYPE)\n        \n        \n        if isinstance(schema_type, dict):\n          variant_map = schema_type\n          variant = getattr(field, 'variant', None)\n          if variant in variant_map:\n            schema_type = variant_map[variant]\n          else:\n            \n            schema_type = variant_map[None]\n        type_info['type'] = schema_type[0]\n        if schema_type[1]:\n          type_info['format'] = schema_type[1]\n\n      if type(field) == messages.EnumField:\n        sorted_enums = sorted([enum_info for enum_info in field.type],\n                              key=lambda enum_info: enum_info.number)\n        type_info['enum'] = [enum_info.name for enum_info in sorted_enums]\n\n      if field.required:\n        descriptor['required'] = True\n\n      if field.default:\n        if type(field) == messages.EnumField:\n          descriptor['default'] = str(field.default)\n        else:\n          descriptor['default'] = field.default\n\n      if field.repeated:\n        descriptor['items'] = type_info\n        descriptor['type'] = 'array'\n      else:\n        descriptor.update(type_info)\n\n      properties[field.name] = descriptor\n\n    schema['properties'] = properties\n\n    return schema", "docstring": "Parse a single message into JSON Schema.\n\nWill recursively descend the message structure\nand also parse other messages references via MessageFields.\n\nArgs:\nmessage_type: protorpc.messages.Message class to parse.\n\nReturns:\nAn object representation of the schema.", "source": "juraj-google-style"}
{"code": "class FlaxSuppressTokensLogitsProcessor(FlaxLogitsProcessor):\n\n    def __init__(self, suppress_tokens: list):\n        self.suppress_tokens = list(suppress_tokens)\n\n    def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:\n        scores = scores.at[..., self.suppress_tokens].set(-float('inf'))\n        return scores", "docstring": "[`FlaxLogitsProcessor`] suppressing a list of tokens at each decoding step. The processor will set their log probs\nto be `-inf` so they are not sampled.\n\nArgs:\nsuppress_tokens (`list`):\nTokens to not sample.", "source": "github-repos"}
{"code": "def children(self, as_resources=False):\n\n\t\t\n\n\t\tchildren = [o for s,p,o in self.rdf.graph.triples((None, self.rdf.prefixes.ldp.contains, None))]\n\n\t\t\n\t\tif as_resources:\n\t\t\tlogger.debug('retrieving children as resources')\n\t\t\tchildren = [ self.repo.get_resource(child) for child in children ]\n\n\t\treturn children", "docstring": "method to return hierarchical  children of this resource\n\nArgs:\nas_resources (bool): if True, opens each as appropriate resource type instead of return URI only\n\nReturns:\n(list): list of resources", "source": "juraj-google-style"}
{"code": "def emit(self, **kwargs):\n        \n        self._ensure_emit_kwargs(kwargs)\n        for slot in self.slots:\n            slot(**kwargs)", "docstring": "Emit signal by calling all connected slots.\n\nThe arguments supplied have to match the signal definition.\n\nArgs:\nkwargs: Keyword arguments to be passed to connected slots.\n\nRaises:\n:exc:`InvalidEmit`: If arguments don't match signal specification.", "source": "juraj-google-style"}
{"code": "def _parse_line_entry(self, line, type):\n    name = None\n    key_values = {}\n    if (type == 'vars'):\n        key_values = self._parse_line_vars(line)\n    else:\n        tokens = shlex.split(line.strip())\n        name = tokens.pop(0)\n        try:\n            key_values = self._parse_vars(tokens)\n        except ValueError:\n            self.log.warning('Unsupported vars syntax. Skipping line: {0}'.format(line))\n            return (name, {})\n    return (name, key_values)", "docstring": "Parse a section entry line into its components. In case of a 'vars'\nsection, the first field will be None. Otherwise, the first field will\nbe the unexpanded host or group name the variables apply to.\n\nFor example:\n[production:children]\nfrontend  purpose=\"web\"    # The line we process\nReturns:\n('frontend', {'purpose': 'web'})\n\nFor example:\n[production:vars]\npurpose=\"web\"              # The line we process\nReturns:\n(None, {'purpose': 'web'})\n\nUndocumented feature:\n[prod:vars]\njson_like_vars=[{'name': 'htpasswd_auth'}]\nReturns:\n(None, {'name': 'htpasswd_auth'})", "source": "codesearchnet"}
{"code": "def AddClass(self, class_name, gtfs_class):\n    \n    if class_name in self._class_mapping:\n      raise problems.DuplicateMapping(class_name)\n    self._class_mapping[class_name] = gtfs_class", "docstring": "Adds an entry to the list of known classes.\n\nArgs:\nclass_name: A string with name through which gtfs_class is to be made\naccessible.\ngtfs_class: The class to be added.\nRaises:\nDuplicateMapping if class_name is already present in the class mapping.", "source": "juraj-google-style"}
{"code": "def GenerateLabels(self, hash_information):\n    \n    response_code = hash_information['response_code']\n    if response_code == self._VIRUSTOTAL_NOT_PRESENT_RESPONSE_CODE:\n      return ['virustotal_not_present']\n\n    if response_code == self._VIRUSTOTAL_PRESENT_RESPONSE_CODE:\n      positives = hash_information['positives']\n      if positives > 0:\n        return ['virustotal_detections_{0:d}'.format(positives)]\n\n      return ['virsutotal_no_detections']\n\n    if response_code == self._VIRUSTOTAL_ANALYSIS_PENDING_RESPONSE_CODE:\n      return ['virustotal_analysis_pending']\n\n    logger.error(\n        'VirusTotal returned unknown response code {0!s}'.format(\n            response_code))\n    return ['virustotal_unknown_response_code_{0:d}'.format(response_code)]", "docstring": "Generates a list of strings that will be used in the event tag.\n\nArgs:\nhash_information (dict[str, object]): the JSON decoded contents of the\nresult of a VirusTotal lookup, as produced by the VirusTotalAnalyzer.\n\nReturns:\nlist[str]: strings describing the results from VirusTotal.", "source": "juraj-google-style"}
{"code": "def _parse_version(version):\n        \n        parsed_version = parse_version(version)\n        return tuple(\n            int(dot_version)\n            for dot_version in parsed_version.base_version.split('.')\n        ) + (parsed_version.is_prerelease,)", "docstring": "Parse a version string.\n\nArgs:\nversion (str): A string representing a version e.g. '1.9rc2'\n\nReturns:\ntuple: major, minor, patch parts cast as integer and whether or not\nit was a pre-release version.", "source": "juraj-google-style"}
{"code": "def get_all_keywords(self, term_so_far='', current_dict=None):\n    terms_present = {}\n    if (not term_so_far):\n        term_so_far = ''\n    if (current_dict is None):\n        current_dict = self.keyword_trie_dict\n    for key in current_dict:\n        if (key == '_keyword_'):\n            terms_present[term_so_far] = current_dict[key]\n        else:\n            sub_values = self.get_all_keywords((term_so_far + key), current_dict[key])\n            for key in sub_values:\n                terms_present[key] = sub_values[key]\n    return terms_present", "docstring": "Recursively builds a dictionary of keywords present in the dictionary\nAnd the clean name mapped to those keywords.\n\nArgs:\nterm_so_far : string\nterm built so far by adding all previous characters\ncurrent_dict : dict\ncurrent recursive position in dictionary\n\nReturns:\nterms_present : dict\nA map of key and value where each key is a term in the keyword_trie_dict.\nAnd value mapped to it is the clean name mapped to it.\n\nExamples:\n>>> keyword_processor = KeywordProcessor()\n>>> keyword_processor.add_keyword('j2ee', 'Java')\n>>> keyword_processor.add_keyword('Python', 'Python')\n>>> keyword_processor.get_all_keywords()\n>>> {'j2ee': 'Java', 'python': 'Python'}\n>>> # NOTE: for case_insensitive all keys will be lowercased.", "source": "codesearchnet"}
{"code": "def __init__(self, client, method, url, query=None):\n    \n    self.client = client\n    self.method = method\n    self.url = url\n    self.query = query if query is not None else {}", "docstring": "Construct request for the Retsly API\n\nArgs:\nclient (dict):          Retsly client\nmethod (string):        method\nurl (string):           url\nquery (list):           query", "source": "juraj-google-style"}
{"code": "def extra(name: str, desc: str) -> Callable:\n    \n\n    def attr_dec(f):\n        f.__setattr__(\"extra_fn\", True)\n        f.__setattr__(\"name\", name)\n        f.__setattr__(\"desc\", desc)\n        return f\n\n    return attr_dec", "docstring": "Decorator for slave channel's \"additional features\" interface.\n\nArgs:\nname (str): A human readable name for the function.\ndesc (str): A short description and usage of it. Use\n``{function_name}`` in place of the function name\nin the description.\n\nReturns:\nThe decorated method.", "source": "juraj-google-style"}
{"code": "def open(cls, filename):\n        \n\n        asarfile = open(filename, 'rb')\n\n        \n        \n        \n        asarfile.seek(4)\n\n        header_size = struct.unpack('I', asarfile.read(4))\n        if len(header_size) <= 0:\n            raise IndexError()\n\n        \n        \n        header_size = header_size[0] - 8\n\n        \n        \n        asarfile.seek(asarfile.tell() + 8)\n        header = asarfile.read(header_size).decode('utf-8')\n\n        files = json.loads(header)\n        return cls(filename, asarfile, files, asarfile.tell())", "docstring": "Opens a *.asar file and constructs a new :see AsarArchive instance.\n\nArgs:\nfilename (str):\nPath to the *.asar file to open for reading.\n\nReturns (AsarArchive):\nAn insance of of the :AsarArchive class or None if reading failed.", "source": "juraj-google-style"}
{"code": "def render(self, tmpl_name, request_env):\n        \n        return super(WebApplication, self).render(tmpl_name, request_env)", "docstring": "Render the specified template and return the output.\n\nArgs:\ntmpl_name (str): file name of the template\nrequest_env (dict): request environment\n\n\nReturns:\nstr - the rendered template", "source": "juraj-google-style"}
{"code": "def softplus(x):\n    return math_ops.softplus(x)", "docstring": "Softplus of a tensor.\n\nArgs:\nx: A tensor or variable.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def tooltip(self, value: Any, *, parent: Any=None, root_path: Optional[KeyPath]=None, css_classes: Optional[Sequence[str]]=None, id: Optional[str]=None, content: Union[str, Html, None]=None, **kwargs) -> Html:\n    del parent, kwargs\n    if content is None:\n        content = Html.escape(utils.format(value, root_path=root_path, compact=False, verbose=False, python_format=True, max_bytes_len=64, max_str_len=256))\n    return Html.element('span', [content], id=id, css_classes=['tooltip', css_classes]).add_style('\\n        \\n        span.tooltip {\\n          visibility: hidden;\\n          white-space: pre-wrap;\\n          font-weight: normal;\\n          background-color:", "docstring": "Renders a tooltip for the value.\n\nArgs:\nvalue: The value to render.\nparent: The parent of the value.\nroot_path: The root path of the value.\ncss_classes: CSS classes to add to the HTML element.\nid: The ID of the tooltip span element. If None, no ID will be added.\ncontent: The content to render. If None, the value will be rendered.\n**kwargs: Additional keyword arguments passed from the user that\nwill be ignored.\n\nReturns:\nThe rendered HTML as the tooltip of the value.", "source": "github-repos"}
{"code": "def get_history(self, filters=(), pagesize=15, offset=0):\n        \n        response = None\n        try:\n            response = requests.get(\n                urls.history(self._giid),\n                headers={\n                    'Accept': 'application/json, text/javascript, */*; q=0.01',\n                    'Cookie': 'vid={}'.format(self._vid)},\n                params={\n                    \"offset\": int(offset),\n                    \"pagesize\": int(pagesize),\n                    \"notificationCategories\": filters})\n        except requests.exceptions.RequestException as ex:\n            raise RequestError(ex)\n        _validate_response(response)\n        return json.loads(response.text)", "docstring": "Get recent events\n\nArgs:\nfilters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',\n'TECHNICAL', 'SOS', 'WARNING', 'LOCK',\n'UNLOCK'\npagesize (int): Number of events to display\noffset (int): Skip pagesize * offset first events", "source": "juraj-google-style"}
{"code": "def _get_required_params_for_impression(self, experiment, variation_id):\n    \n    snapshot = {}\n\n    snapshot[self.EventParams.DECISIONS] = [{\n      self.EventParams.EXPERIMENT_ID: experiment.id,\n      self.EventParams.VARIATION_ID: variation_id,\n      self.EventParams.CAMPAIGN_ID: experiment.layerId\n    }]\n\n    snapshot[self.EventParams.EVENTS] = [{\n      self.EventParams.EVENT_ID: experiment.layerId,\n      self.EventParams.TIME: self._get_time(),\n      self.EventParams.KEY: 'campaign_activated',\n      self.EventParams.UUID: str(uuid.uuid4())\n    }]\n\n    return snapshot", "docstring": "Get parameters that are required for the impression event to register.\n\nArgs:\nexperiment: Experiment for which impression needs to be recorded.\nvariation_id: ID for variation which would be presented to user.\n\nReturns:\nDict consisting of decisions and events info for impression event.", "source": "juraj-google-style"}
{"code": "def ReconcileShadow(self, store_type):\n    for (k, v) in iteritems(self.entry):\n        if (v.pw_entry.store == store_type):\n            shadow_entry = self.shadow.get(k)\n            if (shadow_entry is not None):\n                v.pw_entry = shadow_entry\n            else:\n                v.pw_entry.store = 'UNKNOWN'", "docstring": "Verify that entries that claim to use shadow files have a shadow entry.\n\nIf the entries of the non-shadowed file indicate that a shadow file is used,\ncheck that there is actually an entry for that file in shadow.\n\nArgs:\nstore_type: The type of password store that should be used (e.g.\n/etc/shadow or /etc/gshadow)", "source": "codesearchnet"}
{"code": "def epoch_to_human_time(epoch_time):\n    \n    if isinstance(epoch_time, int):\n        try:\n            d = datetime.datetime.fromtimestamp(epoch_time / 1000)\n            return d.strftime(\"%m-%d-%Y %H:%M:%S \")\n        except ValueError:\n            return None", "docstring": "Converts an epoch timestamp to human readable time.\n\nThis essentially converts an output of get_current_epoch_time to an output\nof get_current_human_time\n\nArgs:\nepoch_time: An integer representing an epoch timestamp in milliseconds.\n\nReturns:\nA time string representing the input time.\nNone if input param is invalid.", "source": "juraj-google-style"}
{"code": "def Serialize(self, writer):\n        \n        super(UnspentCoinState, self).Serialize(writer)\n\n        writer.WriteVarInt(len(self.Items))\n\n        for item in self.Items:\n            byt = item.to_bytes(1, 'little')\n            writer.WriteByte(byt)", "docstring": "Serialize full object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def set_setting(name, value):\n    if (name.lower() not in _get_valid_names()):\n        raise KeyError('Invalid name: {0}'.format(name))\n    for setting in settings:\n        if (value.lower() == setting.lower()):\n            cmd = '/set /subcategory:\"{0}\" {1}'.format(name, settings[setting])\n            break\n    else:\n        raise KeyError('Invalid setting value: {0}'.format(value))\n    _auditpol_cmd(cmd)\n    return True", "docstring": "Set the configuration for the named audit setting\n\nArgs:\n\nname (str):\nThe name of the setting to configure\n\nvalue (str):\nThe configuration for the named value. Valid options are:\n\n- No Auditing\n- Success\n- Failure\n- Success and Failure\n\nReturns:\nbool: True if successful\n\nRaises:\nKeyError: On invalid ``name`` or ``value``\nCommandExecutionError: If an error is encountered modifying the setting\n\nUsage:\n\n.. code-block:: python\n\nimport salt.utils.win_lgpo_auditpol\n\n# Set the state of the \"Credential Validation\" setting to Success and\n# Failure\nsalt.utils.win_lgpo_auditpol.set_setting(name='Credential Validation',\nvalue='Success and Failure')\n\n# Set the state of the \"Credential Validation\" setting to No Auditing\nsalt.utils.win_lgpo_auditpol.set_setting(name='Credential Validation',\nvalue='No Auditing')", "source": "codesearchnet"}
{"code": "def angle(self, deg=False):\n        \n        if self.dtype.str[1] != 'c':\n            warnings.warn('angle() is intended for complex-valued timeseries',\n                          RuntimeWarning, 1)\n        da = distob.vectorize(np.angle)(self, deg)\n        return _dts_from_da(da, self.tspan, self.labels)", "docstring": "Return the angle of a complex Timeseries\n\nArgs:\ndeg (bool, optional):\nReturn angle in degrees if True, radians if False (default).\n\nReturns:\nangle (Timeseries):\nThe counterclockwise angle from the positive real axis on\nthe complex plane, with dtype as numpy.float64.", "source": "juraj-google-style"}
{"code": "def _ed25519_key_from_file(fn, path):\n    try:\n        return fn(read_from_file(path, exception=ScriptWorkerEd25519Error))\n    except ScriptWorkerException as exc:\n        raise ScriptWorkerEd25519Error('Failed calling {} for {}: {}!'.format(fn, path, str(exc)))", "docstring": "Create an ed25519 key from the contents of ``path``.\n\n``path`` is a filepath containing a base64-encoded ed25519 key seed.\n\nArgs:\nfn (callable): the function to call with the contents from ``path``\npath (str): the file path to the base64-encoded key seed.\n\nReturns:\nobj: the appropriate key type from ``path``\n\nRaises:\nScriptWorkerEd25519Error", "source": "codesearchnet"}
{"code": "def _make_assert_msg_data(sym, x, y, summarize, test_op):\n    data = []\n    data.append('Condition x %s y did not hold.' % sym)\n    if summarize > 0:\n        if x.shape == y.shape and x.shape.as_list():\n            mask = math_ops.logical_not(test_op)\n            indices = array_ops.where(mask)\n            indices_np = indices.numpy()\n            x_vals = array_ops.boolean_mask(x, mask)\n            y_vals = array_ops.boolean_mask(y, mask)\n            num_vals = min(summarize, indices_np.shape[0])\n            data.append('Indices of first %d different values:' % num_vals)\n            data.append(indices_np[:num_vals])\n            data.append('Corresponding x values:')\n            data.append(x_vals.numpy().reshape((-1,))[:num_vals])\n            data.append('Corresponding y values:')\n            data.append(y_vals.numpy().reshape((-1,))[:num_vals])\n        x_np = x.numpy().reshape((-1,))\n        y_np = y.numpy().reshape((-1,))\n        x_sum = min(x_np.size, summarize)\n        y_sum = min(y_np.size, summarize)\n        data.append('First %d elements of x:' % x_sum)\n        data.append(x_np[:x_sum])\n        data.append('First %d elements of y:' % y_sum)\n        data.append(y_np[:y_sum])\n    return data", "docstring": "Subroutine of _binary_assert that generates the components of the default error message when running in eager mode.\n\nArgs:\nsym: Mathematical symbol for the test to apply to pairs of tensor elements,\ni.e. \"==\"\nx: First input to the assertion after applying `convert_to_tensor()`\ny: Second input to the assertion\nsummarize: Value of the \"summarize\" parameter to the original assert_* call;\ntells how many elements of each tensor to print.\ntest_op: TensorFlow op that returns a Boolean tensor with True in each\nposition where the assertion is satisfied.\n\nReturns:\nList of tensors and scalars that, when stringified and concatenated,\nwill produce the error message string.", "source": "github-repos"}
{"code": "def bridge_exists(br):\n    cmd = 'ovs-vsctl br-exists {0}'.format(br)\n    result = __salt__['cmd.run_all'](cmd)\n    retcode = result['retcode']\n    return _retcode_to_bool(retcode)", "docstring": "Tests whether bridge exists as a real or fake  bridge.\n\nReturns:\nTrue if Bridge exists, else False.\n\n.. versionadded:: 2016.3.0\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.bridge_exists br0", "source": "codesearchnet"}
{"code": "def drop_scored_calls(self,names):\n        \n        def _remove(calls,names):\n            d = dict([(k,v) for k,v in calls.items() if k not in names])\n            return d\n        if isinstance(names, str):\n            names = [names]\n        output = self.copy()\n        output['scored_calls'] = output['scored_calls'].\\\n            apply(lambda x: _remove(x,names))\n        return output", "docstring": "Take a name or list of scored call names and drop those from the scored calls\n\nArgs:\nnames (list): list of names to drop or a single string name to drop\n\nReturns:\nCellDataFrame: The CellDataFrame modified.", "source": "juraj-google-style"}
{"code": "class WindowedTracker(BaseTracker):\n\n    def __init__(self, window_mode, **kwargs):\n        if window_mode == WindowMode.SLIDING:\n            self._window_size = kwargs.get('window_size', 100)\n            self._queue = deque(maxlen=self._window_size)\n        self._n = 0\n        self._window_mode = window_mode\n\n    def push(self, x):\n        \n        self._queue.append(x)\n\n    def pop(self):\n        \n        return self._queue.popleft()", "docstring": "Abstract base class for trackers that operate on a data window.\n\nThis class provides a foundation for trackers that maintain a window of data,\neither as a landmark window or a sliding window. It provides basic push and\npop operations.\n\nArgs:\nwindow_mode: A `WindowMode` enum specifying whether the window is `LANDMARK`\nor `SLIDING`.\n**kwargs: Keyword arguments.\nFor `SLIDING` window mode, `window_size` can be specified to set the\nmaximum size of the sliding window. Defaults to 100.", "source": "github-repos"}
{"code": "def set_position(self, position):\n        \n        self._player_interface.SetPosition(ObjectPath(\"/not/used\"), Int64(position * 1000.0 * 1000))\n        self.positionEvent(self, position)", "docstring": "Set the video to playback position to `position` seconds from the start of the video\n\nArgs:\nposition (float): The position in seconds.", "source": "juraj-google-style"}
{"code": "def move_to(self, folder):\n        \n        if isinstance(folder, Folder):\n            self.move_to(folder.id)\n        else:\n            self._move_to(folder)", "docstring": "Moves the email to the folder specified by the folder parameter.\n\nArgs:\nfolder: A string containing the folder ID the message should be moved to, or a Folder instance", "source": "juraj-google-style"}
{"code": "def run_plugins(context_obj, boto3_clients):\n  \n\n  def print_if_verbose(message):\n    if context_obj.verbose:\n      print(message)\n\n  service_name = os.path.basename(sys.argv[0]).replace(\".py\", \"\")\n  try:\n    import plugins\n  except ImportError:\n    print_if_verbose(\"no plugins detected.\")\n    return\n  else:\n    for plugin_importer, plugin_name, plugin_ispkg in pkgutil.iter_modules(plugins.__path__):\n      if plugin_ispkg:\n        plugin_package = importlib.import_module(\"plugins.{}\".format(plugin_name))\n        for importer, modname, ispkg in pkgutil.iter_modules(plugin_package.__path__):\n          plugin_module = importlib.import_module(\"plugins.{}.{}\".format(plugin_name, modname))\n          for name, obj in inspect.getmembers(plugin_module):\n            if inspect.isclass(obj) and obj.__name__ == \"EFPlugin\":\n              plugin_class = getattr(plugin_module, name)\n              plugin_instance = plugin_class(context=context_obj, clients=boto3_clients)\n              if plugin_instance.service == service_name:\n                print_if_verbose(\"plugin '{}' loaded\".format(plugin_name))\n                if not context_obj.commit:\n                  print_if_verbose(\"dryrun: skipping plugin execution.\")\n                else:\n                  try:\n                    plugin_instance.run()\n                  except AttributeError:\n                    print(\"error executing plugin '{}'\".format(modname))", "docstring": "Executes all loaded plugins designated for the service calling the function.\n\nArgs:\ncontext_obj (obj:EFContext): The EFContext object created by the service.\nboto3_clients (dict): Dictionary of boto3 clients created by ef_utils.create_aws_clients()", "source": "juraj-google-style"}
{"code": "def apply(self, score: Optional[float]) -> Optional[int]:\n    if score is None:\n        return None\n    if math.isnan(score):\n        return self._missing_label\n    if score < self.threshold:\n        return self._normal_label\n    return self._outlier_label", "docstring": "Applies the fixed threshold to an anomaly score.\n\nClassifies the given anomaly score as normal or outlier based on the\npredefined cutoff.\n\nArgs:\nscore (Optional[float]): The input anomaly score.\n\nReturns:\nOptional[int]: The anomaly label:\n- `normal_label` if the score is less than the threshold.\n- `outlier_label` if the score is at or above the threshold.\n- `missing_label` if the score is `NaN` (detector not ready).\n- `None` if the score is `None` (detector ready, but unable to produce\nscore).", "source": "github-repos"}
{"code": "def queue_scan_command(self, server_info: ServerConnectivityInfo, scan_command: PluginScanCommand) -> None:\n        \n        \n        self._check_and_create_process(server_info.hostname)\n\n        \n        self._queued_tasks_nb += 1\n        if scan_command.is_aggressive:\n            \n            \n            self._hostname_queues_dict[server_info.hostname].put((server_info, scan_command))\n        else:\n            \n            self._task_queue.put((server_info, scan_command))", "docstring": "Queue a scan command targeting a specific server.\n\nArgs:\nserver_info: The server's connectivity information. The test_connectivity_to_server() method must have been\ncalled first to ensure that the server is online and accessible.\nscan_command: The scan command to run against this server.", "source": "juraj-google-style"}
{"code": "def _restore_and_convert(self, elem: tuple[tuple[Any, Any, beam.Row], Any]) -> NestedKeyedOutputT:\n    (orig_key, temp_key, row), prediction = elem\n    assert isinstance(prediction, AnomalyPrediction), 'Wrong model handler output type.' + f\"Expected: 'AnomalyPrediction', but got '{type(prediction).__name__}'. \" + 'Consider adding a post-processing function via `with_postprocess_fn` ' + f\"to convert from '{type(prediction).__name__}' to 'AnomalyPrediction', \" + 'or use `score_prediction_adapter` or `label_prediction_adapter` to ' + 'perform the conversion.'\n    result = AnomalyResult(example=row, predictions=[dataclasses.replace(prediction, model_id=self._offline_detector._model_id)])\n    return (orig_key, (temp_key, result))", "docstring": "Converts the model output to AnomalyResult.\n\nArgs:\nelem: A tuple containing the combined key (original key, temp key, row)\nand the output from RunInference.\n\nReturns:\nA tuple containing the keyed AnomalyResult.", "source": "github-repos"}
{"code": "def console_set_char(\n    con: tcod.console.Console, x: int, y: int, c: Union[int, str]\n) -> None:\n    \n    lib.TCOD_console_set_char(_console(con), x, y, _int(c))", "docstring": "Change the character at x,y to c, keeping the current colors.\n\nArgs:\ncon (Console): Any Console instance.\nx (int): Character x position from the left.\ny (int): Character y position from the top.\nc (Union[int, AnyStr]): Character to draw, can be an integer or string.\n\n.. deprecated:: 8.4\nArray access performs significantly faster than using this function.\nSee :any:`Console.ch`.", "source": "juraj-google-style"}
{"code": "def to_tuple(param, low=None, bias=None):\n    \n    if low is not None and bias is not None:\n        raise ValueError('Arguments low and bias are mutually exclusive')\n\n    if param is None:\n        return param\n\n    if isinstance(param, (int, float)):\n        if low is None:\n            param = - param, + param\n        else:\n            param = (low, param) if low < param else (param, low)\n    elif isinstance(param, (list, tuple)):\n        param = tuple(param)\n    else:\n        raise ValueError('Argument param must be either scalar (int,float) or tuple')\n\n    if bias is not None:\n        return tuple([bias + x for x in param])\n\n    return tuple(param)", "docstring": "Convert input argument to min-max tuple\nArgs:\nparam (scalar, tuple or list of 2+ elements): Input value.\nIf value is scalar, return value would be (offset - value, offset + value).\nIf value is tuple, return value would be value + offset (broadcasted).\nlow:  Second element of tuple can be passed as optional argument\nbias: An offset factor added to each element", "source": "juraj-google-style"}
{"code": "def format_terminal_row(headers, example_row):\n\n    def format_column(col):\n        if isinstance(col, str):\n            return '{{:{w}.{w}}}'\n        return '{{:<{w}}}'\n    widths = [max(len(h), len(str(d))) for (h, d) in zip(headers, example_row)]\n    original_last_width = widths[(- 1)]\n    if sys.stdout.isatty():\n        widths[(- 1)] = max(len(headers[(- 1)]), ((tty.width() - sum(((w + 2) for w in widths[0:(- 1)]))) - 3))\n    cols = [format_column(c).format(w=w) for (c, w) in zip(example_row, widths)]\n    format_string = '  '.join(cols)\n    if (original_last_width > widths[(- 1)]):\n        format_string += '...'\n    return format_string", "docstring": "Uses headers and a row of example data to generate a format string\nfor printing a single row of data.\n\nArgs:\nheaders (tuple of strings): The headers for each column of data\nexample_row (tuple): A representative tuple of strings or ints\n\nReturns\nstring: A format string with a size for each column", "source": "codesearchnet"}
{"code": "def has_resource(self, feature_column, name):\n    del feature_column, name\n    raise NotImplementedError('StateManager.has_resource')", "docstring": "Returns true iff a resource with same name exists.\n\nResources can be things such as tables, variables, trackables, etc.\n\nArgs:\nfeature_column: A `FeatureColumn` object this variable corresponds to.\nname: Name of the resource.", "source": "github-repos"}
{"code": "def check(self, read_tuple_name):\n    parts = read_tuple_name.split('__')\n    if ((len(parts[0]) != self.prefix_width) or (len(parts[1]) != self.read_tuple_id_width)):\n        return False\n    segments = parts[2][1:(- 1)].split('),(')\n    for segment in segments:\n        int_widths = list(map(len, segment.split(',')))\n        if (self.genome_id_width != int_widths[0]):\n            return False\n        if (self.chr_id_width != int_widths[1]):\n            return False\n        if ((self.coor_width != int_widths[3]) or (self.coor_width != int_widths[4])):\n            return False\n    return True", "docstring": "Check if the given read tuple name satisfies this profile.\n\nArgs:\nread_tuple_name (str): Read tuple name.", "source": "codesearchnet"}
{"code": "def on_state_changed(self, state):\n        \n        if state:\n            self.editor.sig_breakpoints_changed.connect(self.repaint)\n            self.editor.sig_debug_stop.connect(self.set_current_line_arrow)\n            self.editor.sig_debug_stop[()].connect(self.stop_clean)\n            self.editor.sig_debug_start.connect(self.start_clean)\n        else:\n            self.editor.sig_breakpoints_changed.disconnect(self.repaint)\n            self.editor.sig_debug_stop.disconnect(self.set_current_line_arrow)\n            self.editor.sig_debug_stop[()].disconnect(self.stop_clean)\n            self.editor.sig_debug_start.disconnect(self.start_clean)", "docstring": "Change visibility and connect/disconnect signal.\n\nArgs:\nstate (bool): Activate/deactivate.", "source": "juraj-google-style"}
{"code": "def mp2q(p, q):\n    \n    p, q = flatten(p), flatten(q)\n    entropy_dist = 1 / len(p)\n    return sum(entropy_dist * np.nan_to_num((p ** 2) / q * np.log(p / q)))", "docstring": "Compute the MP2Q measure.\n\nArgs:\np (np.ndarray): The unpartitioned repertoire\nq (np.ndarray): The partitioned repertoire", "source": "juraj-google-style"}
{"code": "def load_addon(username, package_name, _globals):\n    addon_module = get_or_create_module_r(username)\n    package_module = __import__(package_name)\n    add_tasks_r(addon_module, package_module, package_name)\n    _globals.update({username: addon_module})\n    del package_module\n    del addon_module", "docstring": "Load an fabsetup addon given by 'package_name' and hook it in the\nbase task namespace 'username'.\n\nArgs:\nusername(str)\npackage_name(str)\n_globals(dict): the globals() namespace of the fabric script.\n\nReturn: None", "source": "codesearchnet"}
{"code": "def Gradient(inputs, f, name=None):\n    tlist = [_.type for _ in f.definition.signature.input_arg]\n    return symbolic_gradient(input=inputs, Tout=tlist, f=f, name=name)", "docstring": "Computes the gradient function for function f via backpropagation.\n\nArgs:\ninputs: A list of tensors of size N + M.\nf: The function we want to compute the gradient for.  The function 'f' must\nbe a numerical function which takes N inputs and produces M outputs. Its\ngradient function 'g', which is  a function taking N + M inputs and\nproduces N outputs.  I.e. if we have (y1, y2, ..., yM) = f(x1, x2, ...,\nxN), then, g is (dL/dx1, dL/dx2, ..., dL/dxN) = g(x1, x2, ..., xN, dL/dy1,\ndL/dy2, ..., dL/dyM),  where L is a scalar-value function of (x1, x2, ...,\nxN) (e.g., the loss function). dL/dxi is the partial derivative of L with\nrespect to xi.\nname: A name for the operation (optional).\n\nReturns:\nA list of tensors of size N.", "source": "github-repos"}
{"code": "def _ParseVolumeIdentifiersString(self, volume_identifiers_string, prefix='v'):\n    prefix_length = 0\n    if prefix:\n        prefix_length = len(prefix)\n    if (not volume_identifiers_string):\n        return []\n    if (volume_identifiers_string == 'all'):\n        return ['all']\n    volume_identifiers = set()\n    for identifiers_range in volume_identifiers_string.split(','):\n        if ('..' in identifiers_range):\n            (first_identifier, last_identifier) = identifiers_range.split('..')\n            if first_identifier.startswith(prefix):\n                first_identifier = first_identifier[prefix_length:]\n            if last_identifier.startswith(prefix):\n                last_identifier = last_identifier[prefix_length:]\n            try:\n                first_identifier = int(first_identifier, 10)\n                last_identifier = int(last_identifier, 10)\n            except ValueError:\n                raise ValueError('Invalid volume identifiers range: {0:s}.'.format(identifiers_range))\n            for volume_identifier in range(first_identifier, (last_identifier + 1)):\n                if (volume_identifier not in volume_identifiers):\n                    volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier)\n                    volume_identifiers.add(volume_identifier)\n        else:\n            identifier = identifiers_range\n            if identifier.startswith(prefix):\n                identifier = identifiers_range[prefix_length:]\n            try:\n                volume_identifier = int(identifier, 10)\n            except ValueError:\n                raise ValueError('Invalid volume identifier range: {0:s}.'.format(identifiers_range))\n            volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier)\n            volume_identifiers.add(volume_identifier)\n    return sorted(volume_identifiers)", "docstring": "Parses a user specified volume identifiers string.\n\nArgs:\nvolume_identifiers_string (str): user specified volume identifiers. A\nrange of volumes can be defined as: \"3..5\". Multiple volumes can be\ndefined as: \"1,3,5\" (a list of comma separated values). Ranges and\nlists can also be combined as: \"1,3..5\". The first volume is 1. All\nvolumes can be defined as: \"all\".\nprefix (Optional[str]): volume identifier prefix.\n\nReturns:\nlist[str]: volume identifiers with prefix or the string \"all\".\n\nRaises:\nValueError: if the volume identifiers string is invalid.", "source": "codesearchnet"}
{"code": "def to_dict(self, remove_nones=False):\n    content = {}\n    for key in self._translation:\n        if hasattr(self, key):\n            content[key] = getattr(self, key)\n    content['parent_id'] = self.parent_id\n    content['item_id'] = self.item_id\n    content['restricted'] = self.restricted\n    content['title'] = self.title\n    if (self.resources != []):\n        content['resources'] = [resource.to_dict(remove_nones=remove_nones) for resource in self.resources]\n    content['desc'] = self.desc\n    return content", "docstring": "Return the dict representation of the instance.\n\nArgs:\nremove_nones (bool, optional): Optionally remove dictionary\nelements when their value is `None`.\n\nReturns:\ndict: a dict representation of the `DidlObject`.", "source": "codesearchnet"}
{"code": "def AddCampaign(self, client_customer_id, campaign_name, ad_channel_type,\n                  budget):\n    \n    self.client.SetClientCustomerId(client_customer_id)\n    campaign_service = self.client.GetService('CampaignService')\n    budget_id = self.AddBudget(client_customer_id, budget)\n\n    operations = [{\n        'operator': 'ADD',\n        'operand': {\n            'name': campaign_name,\n            'status': 'PAUSED',\n            'biddingStrategyConfiguration': {\n                'biddingStrategyType': 'MANUAL_CPC',\n                'biddingScheme': {\n                    'xsi_type': 'ManualCpcBiddingScheme',\n                    'enhancedCpcEnabled': 'false'\n                }\n            },\n            'budget': {\n                'budgetId': budget_id\n            },\n            'advertisingChannelType': ad_channel_type\n        }\n    }]\n\n    campaign_service.mutate(operations)", "docstring": "Add a Campaign to the client account.\n\nArgs:\nclient_customer_id: str Client Customer Id to use when creating Campaign.\ncampaign_name: str Name of the campaign to be added.\nad_channel_type: str Primary serving target the campaign's ads.\nbudget: str a budget amount (in micros) to use.", "source": "juraj-google-style"}
{"code": "def docker_list(registry_pass):\n    registry = conf.get('docker.registry', None)\n    if (registry is None):\n        log.err('You must define docker.registry conf variable to list images')\n        sys.exit((- 1))\n    registry_user = conf.get('docker.registry_user', None)\n    if (registry_user is None):\n        registry_user = click.prompt('Username')\n    rc = client.RegistryClient(registry, registry_user, registry_pass)\n    images = {x: rc.list_tags(x) for x in rc.list_images()}\n    shell.cprint('<32>Images in <34>{} <32>registry:', registry)\n    for (image, tags) in images.items():\n        shell.cprint('  <92>{}', image)\n        for tag in tags:\n            shell.cprint('      <90>{}:<35>{}', image, tag)", "docstring": "List docker images stored in the remote registry.\n\nArgs:\nregistry_pass (str):\nRemote docker registry password.", "source": "codesearchnet"}
{"code": "def from_array(arr, name=None):\n    tensor = TensorProto()\n    tensor.dims.extend(arr.shape)\n    if name:\n        tensor.name = name\n    if (arr.dtype == np.object):\n        tensor.data_type = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype]\n        flat_array = arr.flatten()\n        for e in flat_array:\n            if isinstance(e, text_type):\n                tensor.string_data.append(e.encode('utf-8'))\n            elif isinstance(e, np.ndarray):\n                for s in e:\n                    if isinstance(s, text_type):\n                        tensor.string_data.append(s.encode('utf-8'))\n            else:\n                raise NotImplementedError('Unrecognized object in the object array, expect a string, or array of bytes: ', str(type(e)))\n        return tensor\n    try:\n        dtype = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype]\n    except KeyError:\n        raise RuntimeError('Numpy data type not understood yet: {}'.format(str(arr.dtype)))\n    tensor.data_type = dtype\n    tensor.raw_data = arr.tobytes()\n    return tensor", "docstring": "Converts a numpy array to a tensor def.\n\nInputs:\narr: a numpy array.\nname: (optional) the name of the tensor.\nReturns:\ntensor_def: the converted tensor def.", "source": "codesearchnet"}
{"code": "def _path(cls, ndivsm, structure=None, kpath_bounds=None, comment=None):\n    if (kpath_bounds is None):\n        from pymatgen.symmetry.bandstructure import HighSymmKpath\n        sp = HighSymmKpath(structure)\n        kpath_labels = []\n        for labels in sp.kpath['path']:\n            kpath_labels.extend(labels)\n        kpath_bounds = []\n        for label in kpath_labels:\n            red_coord = sp.kpath['kpoints'][label]\n            kpath_bounds.append(red_coord)\n    return cls(mode=KSamplingModes.path, num_kpts=ndivsm, kpts=kpath_bounds, comment=(comment if comment else 'K-Path scheme'))", "docstring": "Static constructor for path in k-space.\n\nArgs:\nstructure: :class:`Structure` object.\nkpath_bounds: List with the reduced coordinates of the k-points defining the path.\nndivsm: Number of division for the smallest segment.\ncomment: Comment string.\n\nReturns:\n:class:`KSampling` object.", "source": "codesearchnet"}
{"code": "def cholesky(x):\n    if any_symbolic_tensors((x,)):\n        return Cholesky().symbolic_call(x)\n    return _cholesky(x)", "docstring": "Computes the Cholesky decomposition of a positive semi-definite matrix.\n\nArgs:\nx: Input tensor of shape `(..., M, M)`.\n\nReturns:\nA tensor of shape `(..., M, M)` representing the lower triangular\nCholesky factor of `x`.", "source": "github-repos"}
{"code": "def pauli_single(cls, num_qubits, index, pauli_label):\n        \n        tmp = Pauli.from_label(pauli_label)\n        z = np.zeros(num_qubits, dtype=np.bool)\n        x = np.zeros(num_qubits, dtype=np.bool)\n\n        z[index] = tmp.z[0]\n        x[index] = tmp.x[0]\n\n        return cls(z, x)", "docstring": "Generate single qubit pauli at index with pauli_label with length num_qubits.\n\nArgs:\nnum_qubits (int): the length of pauli\nindex (int): the qubit index to insert the single qubii\npauli_label (str): pauli\n\nReturns:\nPauli: single qubit pauli", "source": "juraj-google-style"}
{"code": "def AddStopTime(self, stop, problems=None, schedule=None, **kwargs):\n    \n    if problems is None:\n      \n      \n      problems = problems_module.default_problem_reporter\n    stoptime = self.GetGtfsFactory().StopTime(\n        problems=problems, stop=stop, **kwargs)\n    self.AddStopTimeObject(stoptime, schedule)", "docstring": "Add a stop to this trip. Stops must be added in the order visited.\n\nArgs:\nstop: A Stop object\nkwargs: remaining keyword args passed to StopTime.__init__\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def exists(self, **kwargs):\n    requests_params = self._handle_requests_params(kwargs)\n    self._check_load_parameters(**kwargs)\n    kwargs['uri_as_parts'] = True\n    session = self._meta_data['bigip']._meta_data['icr_session']\n    uri = self._meta_data['container']._meta_data['uri']\n    endpoint = kwargs.pop('id', '')\n    kwargs.pop('name', '')\n    base_uri = ((uri + endpoint) + '/')\n    kwargs.update(requests_params)\n    try:\n        session.get(base_uri, **kwargs)\n    except HTTPError as err:\n        if (err.response.status_code == 404):\n            return False\n        else:\n            raise\n    return True", "docstring": "r\"\"\"Check for the existence of the ASM object on the BIG-IP\n\nSends an HTTP GET to the URI of the ASM object and if it fails with\na :exc:~requests.HTTPError` exception it checks the exception for\nstatus code of 404 and returns :obj:`False` in that case.\n\nIf the GET is successful it returns :obj:`True`.\n\nFor any other errors are raised as-is.\n\nArgs:\n\\*\\*kwargs (dict): Arbitrary number of keyword arguments.\nKeyword arguments required to get objects\n\nIf kwargs has a ``requests_param`` key the corresponding dict will\nbe passed to the underlying ``requests.session.get`` method where it will\nbe handled according to that API.\n\nReturns:\nbool: True is the object exists: False otherwise.\n\nRaises:\nrequests.HTTPError: Any HTTP error that was not status code 404.", "source": "codesearchnet"}
{"code": "def replace_with_quanto_layers(model, quantization_config=None, modules_to_not_convert=None, current_key_name=None, has_been_replaced=False):\n    from accelerate import init_empty_weights\n    if is_optimum_quanto_available():\n        from optimum.quanto import QLayerNorm, QLinear, qfloat8, qint2, qint4, qint8\n    w_mapping = {'float8': qfloat8, 'int8': qint8, 'int4': qint4, 'int2': qint2}\n    a_mapping = {None: None, 'float8': qfloat8, 'int8': qint8}\n    if modules_to_not_convert is None:\n        modules_to_not_convert = []\n    for name, module in model.named_children():\n        if current_key_name is None:\n            current_key_name = []\n        current_key_name.append(name)\n        if not any((key in '.'.join(current_key_name) for key in modules_to_not_convert)):\n            with init_empty_weights():\n                if isinstance(module, torch.nn.Linear):\n                    model._modules[name] = QLinear(in_features=module.in_features, out_features=module.out_features, bias=module.bias is not None, dtype=module.weight.dtype, weights=w_mapping[quantization_config.weights], activations=a_mapping[quantization_config.activations])\n                    model._modules[name].requires_grad_(False)\n                    has_been_replaced = True\n                elif isinstance(module, torch.nn.LayerNorm):\n                    if quantization_config.activations is not None:\n                        model._modules[name] = QLayerNorm(module.normalized_shape, module.eps, module.elementwise_affine, module.bias is not None, activations=a_mapping[quantization_config.activations])\n                        has_been_replaced = True\n        if len(list(module.children())) > 0:\n            _, has_been_replaced = replace_with_quanto_layers(module, quantization_config=quantization_config, modules_to_not_convert=modules_to_not_convert, current_key_name=current_key_name, has_been_replaced=has_been_replaced)\n        current_key_name.pop(-1)\n    return (model, has_been_replaced)", "docstring": "Public method that recursively replaces the Linear layers of the given model with Quanto quantized layers.\nReturns the converted model and a boolean that indicates if the conversion has been successful or not.\n\nArgs:\nmodel (`torch.nn.Module`):\nThe model to convert, can be any `torch.nn.Module` instance.\nquantization_config (`AqlmConfig`, defaults to `None`):\nThe quantization config object that contains the quantization parameters.\nmodules_to_not_convert (`list`, *optional*, defaults to `None`):\nA list of modules to not convert. If a module name is in the list (e.g. `lm_head`), it will not be\nconverted.\ncurrent_key_name (`list`, *optional*, defaults to `None`):\nA list that contains the current key name. This is used for recursion and should not be passed by the user.\nhas_been_replaced (`bool`, *optional*, defaults to `None`):\nA boolean that indicates if the conversion has been successful or not. This is used for recursion and\nshould not be passed by the user.", "source": "github-repos"}
{"code": "def populate_sites(self, number_of_atoms, selected_sites=None):\n    if (number_of_atoms > self.number_of_sites):\n        raise ValueError\n    if selected_sites:\n        atoms = [atom.Atom(initial_site=site) for site in random.sample([s for s in self.sites if (s.label in selected_sites)], number_of_atoms)]\n    else:\n        atoms = [atom.Atom(initial_site=site) for site in random.sample(self.sites, number_of_atoms)]\n    self.number_of_occupied_sites = number_of_atoms\n    return atoms", "docstring": "Populate the lattice sites with a specific number of atoms.\n\nArgs:\nnumber_of_atoms (Int): The number of atoms to populate the lattice sites with.\nselected_sites (:obj:List, optional): List of site labels if only some sites are to be occupied. Defaults to None.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def active_qubits(linear, quadratic):\n    active = {idx for (idx, bias) in uniform_iterator(linear)}\n    for (edge, _) in six.iteritems(quadratic):\n        active.update(edge)\n    return active", "docstring": "Calculate a set of all active qubits. Qubit is \"active\" if it has\nbias or coupling attached.\n\nArgs:\nlinear (dict[variable, bias]/list[variable, bias]):\nLinear terms of the model.\n\nquadratic (dict[(variable, variable), bias]):\nQuadratic terms of the model.\n\nReturns:\nset:\nActive qubits' indices.", "source": "codesearchnet"}
{"code": "def __get_bindings__(self, sparql, output_format):\n        \n        return self.ext_conn.query(sparql,\n                                   rtn_format=output_format,\n                                   debug=False)", "docstring": "Internal method queries triplestore or remote\nsparql endpont and returns the bindings\n\nArgs:\n\n----\nsparql: String of SPARQL query\noutput_format: String of type of outputform", "source": "juraj-google-style"}
{"code": "def get_cur_rot(self) -> torch.Tensor:\n    if self._rot_mats is not None:\n        return self._rot_mats\n    elif self._quats is not None:\n        return self._quats\n    else:\n        raise ValueError('Both rotations are None')", "docstring": "Return the underlying rotation in its current form\n\nReturns:\nThe stored rotation", "source": "github-repos"}
{"code": "def wb010(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `wb010`'.format(value))\n    self._wb010 = value", "docstring": "Corresponds to IDD Field `wb010`\nWet-bulb temperature corresponding to 1.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `wb010`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def changed(dirname, filename='.md5', args=None, glob=None):\n    root = Path(dirname)\n    if (not root.exists()):\n        return True\n    cachefile = (root / filename)\n    current_digest = (cachefile.open().read() if cachefile.exists() else '')\n    _digest = digest(dirname, glob=glob)\n    if (args and args.verbose):\n        print('md5:', _digest)\n    has_changed = (current_digest != _digest)\n    if has_changed:\n        with open(os.path.join(dirname, filename), 'w') as fp:\n            fp.write(_digest)\n    return has_changed", "docstring": "Has `glob` changed in `dirname`\n\nArgs:\ndirname: directory to measure\nfilename: filename to store checksum", "source": "codesearchnet"}
{"code": "def remove_exit(self):\n    if self.items:\n        if (self.items[(- 1)] is self.exit_item):\n            del self.items[(- 1)]\n            return True\n    return False", "docstring": "Remove the exit item if necessary. Used to make sure we only remove the exit item, not something else.\n\nReturns:\nbool: True if item needed to be removed, False otherwise.", "source": "codesearchnet"}
{"code": "def GetPlatformRestrictions(campaign_feed):\n    platform_restrictions = None\n    if (campaign_feed['matchingFunction']['operator'] == 'AND'):\n        for argument in campaign_feed['matchingFunction']['lhsOperand']:\n            if (argument['value']['operator'] == 'EQUALS'):\n                request_context_operand = argument['value']['lhsOperand'][0]\n                if (request_context_operand and (request_context_operand == 'DEVICE_PLATFORM')):\n                    platform_restrictions = argument['value']['rhsOperand'][0].upper()\n    return platform_restrictions", "docstring": "Get the Platform Restrictions for a given Campaign Feed.\n\nArgs:\ncampaign_feed: the Campaign Feed we are retrieving Platform Restrictons for.\n\nReturns:\nThe Platform Restrictions for the given feed.", "source": "codesearchnet"}
{"code": "async def update(\n        self, *, node_id: str, version: int, spec: Mapping[str, Any]\n    ) -> Mapping[str, Any]:\n        \n\n        params = {\"version\": version}\n\n        if \"Role\" in spec:\n            assert spec[\"Role\"] in {\"worker\", \"manager\"}\n\n        if \"Availability\" in spec:\n            assert spec[\"Availability\"] in {\"active\", \"pause\", \"drain\"}\n\n        response = await self.docker._query_json(\n            \"nodes/{node_id}/update\".format(node_id=node_id),\n            method=\"POST\",\n            params=params,\n            data=spec,\n        )\n        return response", "docstring": "Update the spec of a node.\n\nArgs:\nnode_id: The ID or name of the node\nversion: version number of the node being updated\nspec: fields to be updated", "source": "juraj-google-style"}
{"code": "def ones_comp_sum16(num1: int, num2: int) -> int:\n    carry = (1 << 16)\n    result = (num1 + num2)\n    return (result if (result < carry) else ((result + 1) - carry))", "docstring": "Calculates the 1's complement sum for 16-bit numbers.\n\nArgs:\nnum1: 16-bit number.\nnum2: 16-bit number.\n\nReturns:\nThe calculated result.", "source": "codesearchnet"}
{"code": "def UpdateNumberOfEventTags(self, number_of_consumed_event_tags, number_of_produced_event_tags):\n    consumed_event_tags_delta = 0\n    if (number_of_consumed_event_tags is not None):\n        if (number_of_consumed_event_tags < self.number_of_consumed_event_tags):\n            raise ValueError('Number of consumed event tags smaller than previous update.')\n        consumed_event_tags_delta = (number_of_consumed_event_tags - self.number_of_consumed_event_tags)\n        self.number_of_consumed_event_tags = number_of_consumed_event_tags\n        self.number_of_consumed_event_tags_delta = consumed_event_tags_delta\n    produced_event_tags_delta = 0\n    if (number_of_produced_event_tags is not None):\n        if (number_of_produced_event_tags < self.number_of_produced_event_tags):\n            raise ValueError('Number of produced event tags smaller than previous update.')\n        produced_event_tags_delta = (number_of_produced_event_tags - self.number_of_produced_event_tags)\n        self.number_of_produced_event_tags = number_of_produced_event_tags\n        self.number_of_produced_event_tags_delta = produced_event_tags_delta\n    return ((consumed_event_tags_delta > 0) or (produced_event_tags_delta > 0))", "docstring": "Updates the number of event tags.\n\nArgs:\nnumber_of_consumed_event_tags (int): total number of event tags consumed\nby the process.\nnumber_of_produced_event_tags (int): total number of event tags produced\nby the process.\n\nReturns:\nbool: True if either number of event tags has increased.\n\nRaises:\nValueError: if the consumed or produced number of event tags is smaller\nthan the value of the previous update.", "source": "codesearchnet"}
{"code": "def receive(self,message_type):\n        \n        topic = None\n        message = None\n        if message_type == RAW:\n            message = self._sock.recv(flags=zmq.NOBLOCK)\n        elif message_type == PYOBJ:\n            message = self._sock.recv_pyobj(flags=zmq.NOBLOCK)\n        elif message_type == JSON:\n            message = self._sock.recv_json(flags=zmq.NOBLOCK)\n        elif message_type == MULTIPART:\n            data = self._sock.recv_multipart(flags=zmq.NOBLOCK)\n            message = data[1]\n            topic = data[0]\n        elif message_type == STRING:\n            message = self._sock.recv_string(flags=zmq.NOBLOCK)\n        elif message_type == UNICODE:\n            message = self._sock.recv_unicode(flags=zmq.NOBLOCK)\n        else:\n            raise Exception(\"Unknown message type %s\"%(self._message_type,))\n            \n        return (topic, message)", "docstring": "Receive the message of the specified type and retun\n\nArgs:\n- message_type: the type of the message to receive\n\nReturns:\n- the topic of the message\n- the message received from the socket", "source": "juraj-google-style"}
{"code": "def show_status(self, **kwargs):\n        \n        stream = kwargs.pop(\"stream\", sys.stdout)\n        nids = as_set(kwargs.pop(\"nids\", None))\n        wslice = kwargs.pop(\"wslice\", None)\n        verbose = kwargs.pop(\"verbose\", 0)\n        wlist = None\n        if wslice is not None:\n            \n            wlist = list(range(wslice.start, wslice.step, wslice.stop))\n\n        \n        has_colours = True\n        red = \"red\" if has_colours else None\n\n        for i, work in enumerate(self):\n            if nids and work.node_id not in nids: continue\n            print(\"\", file=stream)\n            cprint_map(\"Work \n            if wlist is not None and i in wlist: continue\n            if verbose == 0 and work.finalized:\n                print(\"  Finalized works are not shown. Use verbose > 0 to force output.\", file=stream)\n                continue\n\n            headers = [\"Task\", \"Status\", \"Queue\", \"MPI|Omp|Gb\",\n                       \"Warn|Com\", \"Class\", \"Sub|Rest|Corr\", \"Time\",\n                       \"Node_ID\"]\n            table = []\n            tot_num_errors = 0\n            for task in work:\n                if nids and task.node_id not in nids: continue\n                task_name = os.path.basename(task.name)\n\n                \n                \n                \n                report = task.get_event_report()\n\n                \n                stime = None\n                timedelta = task.datetimes.get_runtime()\n                if timedelta is not None:\n                    stime = str(timedelta) + \"R\"\n                else:\n                    timedelta = task.datetimes.get_time_inqueue()\n                    if timedelta is not None:\n                        stime = str(timedelta) + \"Q\"\n\n                events = \"|\".join(2*[\"NA\"])\n                if report is not None:\n                    events = '{:>4}|{:>3}'.format(*map(str, (\n                       report.num_warnings, report.num_comments)))\n\n                para_info = '{:>4}|{:>3}|{:>3}'.format(*map(str, (\n                   task.mpi_procs, task.omp_threads, \"%.1f\" % task.mem_per_proc.to(\"Gb\"))))\n\n                task_info = list(map(str, [task.__class__.__name__,\n                                 (task.num_launches, task.num_restarts, task.num_corrections), stime, task.node_id]))\n\n                qinfo = \"None\"\n                if task.queue_id is not None:\n                    qname = str(task.qname)\n                    if not verbose:\n                        qname = qname[:min(5, len(qname))]\n                    qinfo = str(task.queue_id) + \"@\" + qname\n\n                if task.status.is_critical:\n                    tot_num_errors += 1\n                    task_name = colored(task_name, red)\n\n                if has_colours:\n                    table.append([task_name, task.status.colored, qinfo,\n                                  para_info, events] + task_info)\n                else:\n                    table.append([task_name, str(task.status), qinfo, events,\n                                  para_info] + task_info)\n\n            \n            print(tabulate(table, headers=headers, tablefmt=\"grid\"), file=stream)\n            if tot_num_errors:\n                cprint(\"Total number of errors: %d\" % tot_num_errors, \"red\", file=stream)\n            print(\"\", file=stream)\n\n        if self.all_ok:\n            cprint(\"\\nall_ok reached\\n\", \"green\", file=stream)", "docstring": "Report the status of the works and the status  of the different tasks on the specified stream.\n\nArgs:\nstream: File-like object, Default: sys.stdout\nnids:  List of node identifiers. By defaults all nodes are shown\nwslice: Slice object used to select works.\nverbose: Verbosity level (default 0). > 0 to show only the works that are not finalized.", "source": "juraj-google-style"}
{"code": "def transfers(self, payment_id, data={}, **kwargs):\n    url = '{}/{}/transfers'.format(self.base_url, payment_id)\n    return self.get_url(url, data, **kwargs)", "docstring": "Fetches all transfer for given Payment Id\n\nArgs:\npayment_id : Id for which payment object has to be refunded\nAmount : Amount for which the payment has to be refunded\n\nReturns:\nPayment dict after getting refunded", "source": "codesearchnet"}
{"code": "def __init__(self, config: FastSpeech2ConformerConfig, module_config):\n    super().__init__()\n    input_channels = config.hidden_size\n    hidden_channels = module_config['linear_units']\n    kernel_size = config.positionwise_conv_kernel_size\n    self.conv1 = nn.Conv1d(input_channels, hidden_channels, kernel_size, stride=1, padding=(kernel_size - 1) \n    self.conv2 = nn.Conv1d(hidden_channels, input_channels, kernel_size, stride=1, padding=(kernel_size - 1) \n    self.dropout = nn.Dropout(module_config['dropout_rate'])", "docstring": "Initialize FastSpeech2ConformerMultiLayeredConv1d module.\n\nArgs:\ninput_channels (`int`): Number of input channels.\nhidden_channels (`int`): Number of hidden channels.\nkernel_size (`int`): Kernel size of conv1d.\ndropout_rate (`float`): Dropout rate.", "source": "github-repos"}
{"code": "def get_percentile_min_max(input, lower_percentile, upper_percentile, output_tensor=False):\n    input_length = input.shape[0]\n    lower_index = round(input_length * (1 - lower_percentile * 0.01))\n    upper_index = round(input_length * upper_percentile * 0.01)\n    upper_bound = torch.kthvalue(input, k=upper_index).values\n    if lower_percentile == 0:\n        lower_bound = upper_bound * 0\n    else:\n        lower_bound = -torch.kthvalue(-input, k=lower_index).values\n    if not output_tensor:\n        lower_bound = lower_bound.item()\n        upper_bound = upper_bound.item()\n    return (lower_bound, upper_bound)", "docstring": "Calculate the percentile max and min values in a given tensor\n\nArgs:\ninput (`torch.Tensor`):\nThe target tensor to calculate percentile max and min.\nlower_percentile (`float`):\nIf 0.1, means we return the value of the smallest 0.1% value in the tensor as percentile min.\nupper_percentile (`float`):\nIf 99.9, means we return the value of the largest 0.1% value in the tensor as percentile max.\noutput_tensor (`bool`, *optional*, defaults to `False`):\nIf True, this function returns tensors, otherwise it returns values.\n\nReturns:\n`Tuple(torch.Tensor, torch.Tensor)`: Percentile min and max value of *input*", "source": "github-repos"}
{"code": "def to_json(self, indent=None, separators=None, sort_keys=False):\n\n    def remove_callables(x):\n        'Omit callable elements from input with arbitrary nesting.'\n        if isinstance(x, dict):\n            return {k: remove_callables(v) for (k, v) in six.iteritems(x) if (not callable(v))}\n        elif isinstance(x, list):\n            return [remove_callables(i) for i in x if (not callable(i))]\n        return x\n    return json.dumps(remove_callables(self.values()), indent=indent, separators=separators, sort_keys=sort_keys)", "docstring": "Serializes the hyperparameters into JSON.\n\nArgs:\nindent: If a non-negative integer, JSON array elements and object members\nwill be pretty-printed with that indent level. An indent level of 0, or\nnegative, will only insert newlines. `None` (the default) selects the\nmost compact representation.\nseparators: Optional `(item_separator, key_separator)` tuple. Default is\n`(', ', ': ')`.\nsort_keys: If `True`, the output dictionaries will be sorted by key.\n\nReturns:\nA JSON string.", "source": "codesearchnet"}
{"code": "def nb_fit(data, P_init=None, R_init=None, epsilon=1e-08, max_iters=100):\n    means = data.mean(1)\n    variances = data.var(1)\n    if (means > variances).any():\n        raise ValueError('For NB fit, means must be less than variances')\n    (genes, cells) = data.shape\n    P = (1.0 - (means / variances))\n    R = ((means * (1 - P)) / P)\n    for i in range(genes):\n        result = minimize(nb_ll_row, [P[i], R[i]], args=(data[(i, :)],), bounds=[(0, 1), (eps, None)])\n        params = result.x\n        P[i] = params[0]\n        R[i] = params[1]\n    return (P, R)", "docstring": "Fits the NB distribution to data using method of moments.\n\nArgs:\ndata (array): genes x cells\nP_init (array, optional): NB success prob param - genes x 1\nR_init (array, optional): NB stopping param - genes x 1\n\nReturns:\nP, R - fit to data", "source": "codesearchnet"}
{"code": "def _run(self, sess, enqueue_op, coord=None):\n    decremented = False\n    try:\n        enqueue_callable = sess.make_callable(enqueue_op)\n        while True:\n            if coord and coord.should_stop():\n                break\n            try:\n                enqueue_callable()\n            except self._queue_closed_exception_types:\n                with self._lock:\n                    self._runs_per_session[sess] -= 1\n                    decremented = True\n                    if self._runs_per_session[sess] == 0:\n                        try:\n                            sess.run(self._close_op)\n                        except Exception as e:\n                            logging.vlog(1, 'Ignored exception: %s', str(e))\n                    return\n    except Exception as e:\n        if coord:\n            coord.request_stop(e)\n        else:\n            logging.error('Exception in QueueRunner: %s', str(e))\n            with self._lock:\n                self._exceptions_raised.append(e)\n            raise\n    finally:\n        if not decremented:\n            with self._lock:\n                self._runs_per_session[sess] -= 1", "docstring": "Execute the enqueue op in a loop, close the queue in case of error.\n\nArgs:\nsess: A Session.\nenqueue_op: The Operation to run.\ncoord: Optional Coordinator object for reporting errors and checking\nfor stop conditions.", "source": "github-repos"}
{"code": "def seek(self, offset, whence=os.SEEK_SET):\n    \n    if not self._is_open:\n      raise IOError('Not opened.')\n\n    if self._current_offset < 0:\n      raise IOError(\n          'Invalid current offset: {0:d} value less than zero.'.format(\n              self._current_offset))\n\n    if whence == os.SEEK_CUR:\n      offset += self._current_offset\n    elif whence == os.SEEK_END:\n      offset += self._range_size\n    elif whence != os.SEEK_SET:\n      raise IOError('Unsupported whence.')\n    if offset < 0:\n      raise IOError('Invalid offset value less than zero.')\n    self._current_offset = offset", "docstring": "Seeks to an offset within the file-like object.\n\nArgs:\noffset (int): offset to seek to.\nwhence (Optional(int)): value that indicates whether offset is an absolute\nor relative position within the file.\n\nRaises:\nIOError: if the seek failed.\nOSError: if the seek failed.", "source": "juraj-google-style"}
{"code": "def load_ipython_extension(shell):\n\n    def _request(self, uri, method='GET', body=None, headers=None, redirections=_httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):\n        if (headers is None):\n            headers = {}\n        headers['user-agent'] = 'GoogleCloudDataLab/1.0'\n        return _orig_request(self, uri, method=method, body=body, headers=headers, redirections=redirections, connection_type=connection_type)\n    _httplib2.Http.request = _request\n\n    def _init_session(self):\n        _orig_init(self)\n        self.headers['User-Agent'] = 'GoogleCloudDataLab/1.0'\n    _requests.Session.__init__ = _init_session\n\n    def _run_line_magic(self, magic_name, line):\n        fn = self.find_line_magic(magic_name)\n        if (fn is None):\n            cm = self.find_cell_magic(magic_name)\n            if cm:\n                return _run_cell_magic(self, magic_name, line, None)\n        return _orig_run_line_magic(self, magic_name, line)\n\n    def _run_cell_magic(self, magic_name, line, cell):\n        if ((cell is None) or (len(cell) == 0) or cell.isspace()):\n            fn = self.find_line_magic(magic_name)\n            if fn:\n                return _orig_run_line_magic(self, magic_name, line)\n            cell = None\n        return _orig_run_cell_magic(self, magic_name, line, cell)\n    _shell.InteractiveShell.run_cell_magic = _run_cell_magic\n    _shell.InteractiveShell.run_line_magic = _run_line_magic\n\n    def _get_project_id():\n        try:\n            return google.datalab.Context.default().project_id\n        except Exception:\n            return None\n\n    def _set_project_id(project_id):\n        context = google.datalab.Context.default()\n        context.set_project_id(project_id)\n        try:\n            from datalab.context import Context as _old_context\n            _old_context.default().set_project_id(project_id)\n        except ImportError:\n            pass\n    try:\n        if ('datalab_project_id' not in _IPython.get_ipython().user_ns):\n            _IPython.get_ipython().user_ns['datalab_project_id'] = _get_project_id\n            _IPython.get_ipython().user_ns['set_datalab_project_id'] = _set_project_id\n    except TypeError:\n        pass", "docstring": "Called when the extension is loaded.\n\nArgs:\nshell - (NotebookWebApplication): handle to the Notebook interactive shell instance.", "source": "codesearchnet"}
{"code": "def sari_score(predictions, labels, features, **unused_kwargs):\n  \n  if \"inputs\" not in features:\n    raise ValueError(\"sari_score requires inputs feature\")\n\n  \n  inputs = tf.squeeze(features[\"inputs\"], axis=[-1, -2])\n  outputs = tf.to_int32(tf.argmax(predictions, axis=-1))\n  outputs = tf.squeeze(outputs, axis=[-1, -2])\n\n  \n  labels = tf.squeeze(labels, axis=[-1, -2])\n  labels = tf.expand_dims(labels, axis=1)\n\n  score, _, _, _ = get_sari(inputs, outputs, labels)\n  return score, tf.constant(1.0)", "docstring": "Computes the SARI scores from the given source, prediction and targets.\n\nAn approximate SARI scoring method since we do not glue word pieces or\ndecode the ids and tokenize the output. By default, we use ngram order of 4.\nAlso, this does not have beam search.\n\nArgs:\npredictions: tensor, model predictions.\nlabels: tensor, gold output.\nfeatures: dict, containing inputs.\n\nReturns:\nsari: int, approx sari score", "source": "juraj-google-style"}
{"code": "def get_headers_from_environ(environ):\n    headers = wsgiref.headers.Headers([])\n    for (header, value) in environ.iteritems():\n        if header.startswith('HTTP_'):\n            headers[header[5:].replace('_', '-')] = value\n    if ('CONTENT_TYPE' in environ):\n        headers['CONTENT-TYPE'] = environ['CONTENT_TYPE']\n    return headers", "docstring": "Get a wsgiref.headers.Headers object with headers from the environment.\n\nHeaders in environ are prefixed with 'HTTP_', are all uppercase, and have\nhad dashes replaced with underscores.  This strips the HTTP_ prefix and\nchanges underscores back to dashes before adding them to the returned set\nof headers.\n\nArgs:\nenviron: An environ dict for the request as defined in PEP-333.\n\nReturns:\nA wsgiref.headers.Headers object that's been filled in with any HTTP\nheaders found in environ.", "source": "codesearchnet"}
{"code": "def join_global_room(client: GMatrixClient, name: str, servers: Sequence[str]=()) -> Room:\n    our_server_name = urlparse(client.api.base_url).netloc\n    assert our_server_name, \"Invalid client's homeserver url\"\n    servers = ([our_server_name] + [urlparse(s).netloc for s in servers if (urlparse(s).netloc not in {None, '', our_server_name})])\n    our_server_global_room_alias_full = f'\n    for server in servers:\n        global_room_alias_full = f'\n        try:\n            global_room = client.join_room(global_room_alias_full)\n        except MatrixRequestError as ex:\n            if (ex.code not in (403, 404, 500)):\n                raise\n            log.debug('Could not join global room', room_alias_full=global_room_alias_full, _exception=ex)\n        else:\n            if (our_server_global_room_alias_full not in global_room.aliases):\n                global_room.add_room_alias(our_server_global_room_alias_full)\n                global_room.aliases.append(our_server_global_room_alias_full)\n            break\n    else:\n        log.debug('Could not join any global room, trying to create one')\n        for _ in range(JOIN_RETRIES):\n            try:\n                global_room = client.create_room(name, is_public=True)\n            except MatrixRequestError as ex:\n                if (ex.code not in (400, 409)):\n                    raise\n                try:\n                    global_room = client.join_room(our_server_global_room_alias_full)\n                except MatrixRequestError as ex:\n                    if (ex.code not in (404, 403)):\n                        raise\n                else:\n                    break\n            else:\n                break\n        else:\n            raise TransportError('Could neither join nor create a global room')\n    return global_room", "docstring": "Join or create a global public room with given name\n\nFirst, try to join room on own server (client-configured one)\nIf can't, try to join on each one of servers, and if able, alias it in our server\nIf still can't, create a public room with name in our server\n\nParams:\nclient: matrix-python-sdk client instance\nname: name or alias of the room (without #-prefix or server name suffix)\nservers: optional: sequence of known/available servers to try to find the room in\nReturns:\nmatrix's Room instance linked to client", "source": "codesearchnet"}
{"code": "def push(self, stream_id, timestamp, value):\n    stream = DataStream.FromEncoded(stream_id)\n    reading = IOTileReading(stream_id, timestamp, value)\n    try:\n        self.storage.push(stream, reading)\n        return Error.NO_ERROR\n    except StorageFullError:\n        return pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.RING_BUFFER_FULL)", "docstring": "Push a value to a stream.\n\nArgs:\nstream_id (int): The stream we want to push to.\ntimestamp (int): The raw timestamp of the value we want to\nstore.\nvalue (int): The 32-bit integer value we want to push.\nReturns:\nint: Packed 32-bit error code.", "source": "codesearchnet"}
{"code": "def hget(self, key):\n    data = self.r.hget(self.hash, key)\n    if ((data is not None) and (not isinstance(data, str))):\n        data = str(self.r.hget(self.hash, key), 'utf-8')\n    return data", "docstring": "Read data from Redis for the provided key.\n\nArgs:\nkey (string): The key to read in Redis.\n\nReturns:\n(any): The response data from Redis.", "source": "codesearchnet"}
{"code": "def trunc(x):\n    if any_symbolic_tensors((x,)):\n        return Trunc().symbolic_call(x)\n    return backend.numpy.trunc(x)", "docstring": "Return the truncated value of the input, element-wise.\n\nThe truncated value of the scalar `x` is the nearest integer `i` which is\ncloser to zero than `x` is. In short, the fractional part of the signed\nnumber `x` is discarded.\n\nArgs:\nx: Input tensor.\n\nReturns:\nThe truncated value of each element in `x`.\n\nExample:\n>>> x = ops.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n>>> ops.trunc(x)\narray([-1.0, -1.0, -0.0, 0.0, 1.0, 1.0, 2.0])", "source": "github-repos"}
{"code": "def is_hermitian(\n        matrix: np.ndarray,\n        *,\n        rtol: float = 1e-5,\n        atol: float = 1e-8) -> bool:\n    \n    return (matrix.shape[0] == matrix.shape[1] and\n            np.allclose(matrix, np.conj(matrix.T), rtol=rtol, atol=atol))", "docstring": "Determines if a matrix is approximately Hermitian.\n\nA matrix is Hermitian if it's square and equal to its adjoint.\n\nArgs:\nmatrix: The matrix to check.\nrtol: The per-matrix-entry relative tolerance on equality.\natol: The per-matrix-entry absolute tolerance on equality.\n\nReturns:\nWhether the matrix is Hermitian within the given tolerance.", "source": "juraj-google-style"}
{"code": "def UpdateUserCredentials(client_id, client_secret, refresh_token,\n                          adwords_manager_cid, developer_token):\n  \n  app_user = AppUser.query(AppUser.user == users.get_current_user()).fetch()[0]\n\n  app_user.client_id = client_id\n  app_user.client_secret = client_secret\n  app_user.refresh_token = refresh_token\n  app_user.adwords_manager_cid = adwords_manager_cid\n  app_user.developer_token = developer_token\n\n  app_user.put()", "docstring": "Update the credentials associated with application user.\n\nArgs:\nclient_id: str Client Id retrieved from the developer's console.\nclient_secret: str Client Secret retrieved from the developer's console.\nrefresh_token: str Refresh token generated with the above client id/secret.\nadwords_manager_cid: str Customer Id for the AdWords manager account.\ndeveloper_token: str Developer Token for the AdWords account.", "source": "juraj-google-style"}
{"code": "def _get_single_block_row_attention(block_id, to_start_block_id, to_end_block_id, num_rand_blocks, window_block_left=1, window_block_right=1, global_block_left=1, global_block_right=1):\n    to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32)\n    perm_block = np.random.permutation(to_block_list)\n    illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1))\n    illegal_blocks.extend(list(range(global_block_left)))\n    illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id)))\n    if block_id == 1:\n        illegal_blocks.append(to_end_block_id - 2)\n    if block_id == to_end_block_id - 2:\n        illegal_blocks.append(1)\n    selected_random_blokcs = []\n    for i in range(to_end_block_id - to_start_block_id):\n        if perm_block[i] not in illegal_blocks:\n            selected_random_blokcs.append(perm_block[i])\n        if len(selected_random_blokcs) == num_rand_blocks:\n            break\n    return np.array(selected_random_blokcs, dtype=np.int32)", "docstring": "For a single row block get random row attention.\n\nArgs:\nblock_id: int. block id of row.\nto_start_block_id: int. random attention column start id.\nto_end_block_id: int. random attention column end id.\nnum_rand_blocks: int. number of random blocks to be selected.\nwindow_block_left: int. number of blocks of window to left of a block.\nwindow_block_right: int. number of blocks of window to right of a block.\nglobal_block_left: int. Number of blocks globally used to the left.\nglobal_block_right: int. Number of blocks globally used to the right.\n\nReturns:\nrow containing the random attention vector of size num_rand_blocks.", "source": "github-repos"}
{"code": "def compose_object(self, file_list, destination_file, content_type):\n    xml_setting_list = ['<ComposeRequest>']\n    for meta_data in file_list:\n        xml_setting_list.append('<Component>')\n        for (key, val) in meta_data.iteritems():\n            xml_setting_list.append(('<%s>%s</%s>' % (key, val, key)))\n        xml_setting_list.append('</Component>')\n    xml_setting_list.append('</ComposeRequest>')\n    xml = ''.join(xml_setting_list)\n    if (content_type is not None):\n        headers = {'Content-Type': content_type}\n    else:\n        headers = None\n    (status, resp_headers, content) = self.put_object((api_utils._quote_filename(destination_file) + '?compose'), payload=xml, headers=headers)\n    errors.check_status(status, [200], destination_file, resp_headers, body=content)", "docstring": "COMPOSE multiple objects together.\n\nUsing the given list of files, calls the put object with the compose flag.\nThis call merges all the files into the destination file.\n\nArgs:\nfile_list: list of dicts with the file name.\ndestination_file: Path to the destination file.\ncontent_type: Content type for the destination file.", "source": "codesearchnet"}
{"code": "def _build_attention(self, rank):\n    if self._attention_axes is None:\n        self._attention_axes = tuple(range(1, rank - 2))\n    else:\n        self._attention_axes = tuple(self._attention_axes)\n    self._dot_product_equation, self._combine_equation, attn_scores_rank = _build_attention_equation(rank, attn_axes=self._attention_axes)\n    norm_axes = tuple(range(attn_scores_rank - len(self._attention_axes), attn_scores_rank))\n    self._softmax = Softmax(axis=norm_axes, dtype=self.dtype_policy)\n    self._dropout_layer = Dropout(rate=self._dropout, dtype=self.dtype_policy, seed=self.seed)", "docstring": "Builds multi-head dot-product attention computations.\n\nThis function builds attributes necessary for `_compute_attention` to\ncustomize attention computation to replace the default dot-product\nattention.\n\nArgs:\nrank: the rank of query, key, value tensors.", "source": "github-repos"}
{"code": "def enhex(d, separator=''):\n    v = binascii.hexlify(d).decode('ascii')\n    if separator:\n        return separator.join((v[i:(i + 2)] for i in range(0, len(v), 2)))\n    else:\n        return v", "docstring": "Convert bytes to their hexadecimal representation, optionally joined by a\ngiven separator.\n\nArgs:\nd(bytes): The data to convert to hexadecimal representation.\nseparator(str): The separator to insert between hexadecimal tuples.\n\nReturns:\nstr: The hexadecimal representation of ``d``.\n\nExamples:\n>>> from pwny import *\n>>> enhex(b'pwnypack')\n'70776e797061636b'\n>>> enhex(b'pwnypack', separator=' ')\n'70 77 6e 79 70 61 63 6b'", "source": "codesearchnet"}
{"code": "def upload_timeline(self, timeline_name, plaso_storage_path):\n    \n    resource_url = '{0:s}/upload/'.format(self.api_base_url)\n    files = {'file': open(plaso_storage_path, 'rb')}\n    data = {'name': timeline_name}\n    response = self.session.post(resource_url, files=files, data=data)\n    try:\n      response_dict = response.json()\n    except ValueError:\n      raise RuntimeError(\n          'Could not decode JSON response from Timesketch'\n          ' (Status {0:d}):\\n{1:s}'.format(\n              response.status_code, response.content))\n\n    index_id = response_dict['objects'][0]['id']\n    return index_id", "docstring": "Create a timeline with the specified name from the given plaso file.\n\nArgs:\ntimeline_name (str): Name of timeline\nplaso_storage_path (str): Local path of plaso file to be uploaded\n\nReturns:\nint: ID of uploaded timeline\n\nRaises:\nRuntimeError: When the JSON response from Timesketch cannot be decoded.", "source": "juraj-google-style"}
{"code": "def path_to_text(self, path):\n    rsrcmgr = PDFResourceManager()\n    retstr = StringIO()\n    codec = 'utf-8'\n    laparams = LAParams()\n    device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)\n    fp = open(path, 'rb')\n    interpreter = PDFPageInterpreter(rsrcmgr, device)\n    password = ''\n    maxpages = 0\n    caching = True\n    pagenos = set()\n    pages_data = PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password, caching=caching, check_extractable=True)\n    for page in pages_data:\n        interpreter.process_page(page)\n    text = retstr.getvalue()\n    text = text.replace('\\n', '')\n    fp.close()\n    device.close()\n    retstr.close()\n    return text", "docstring": "Transform local PDF file to string.\n\nArgs:\npath:   path to PDF file.\n\nReturns:\nstring.", "source": "codesearchnet"}
{"code": "def copy(self, datasets=None):\n        \n        new_scn = self.__class__()\n        new_scn.attrs = self.attrs.copy()\n        new_scn.dep_tree = self.dep_tree.copy()\n\n        for ds_id in (datasets or self.keys()):\n            \n            \n            new_scn.datasets[ds_id] = self[ds_id]\n\n        if not datasets:\n            new_scn.wishlist = self.wishlist.copy()\n        else:\n            new_scn.wishlist = set([DatasetID.from_dict(ds.attrs)\n                                    for ds in new_scn])\n        return new_scn", "docstring": "Create a copy of the Scene including dependency information.\n\nArgs:\ndatasets (list, tuple): `DatasetID` objects for the datasets\nto include in the new Scene object.", "source": "juraj-google-style"}
{"code": "def is_erc20(self):\n    full_names = [f.full_name for f in self.functions]\n    return (('transfer(address,uint256)' in full_names) and ('transferFrom(address,address,uint256)' in full_names) and ('approve(address,uint256)' in full_names))", "docstring": "Check if the contract is an erc20 token\n\nNote: it does not check for correct return values\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def __init__(self, job_id, context):\n    \n    super(Job, self).__init__(job_id, context)", "docstring": "Initializes an instance of a Job.\n\nArgs:\njob_id: the BigQuery job ID corresponding to this job.\ncontext: a Context object providing project_id and credentials.", "source": "juraj-google-style"}
{"code": "def rep(obj, *attrs, **kwargs):\n    \n    s = obj.__class__.__name__\n    args = chain(((attr, getattr(obj, attr)) for attr in attrs), kwargs.items())\n    s += '(%s)' % ','.join('{}={!r}'.format(k, v) for k, v in args)\n    return s", "docstring": "Create a repr of a property based class quickly\nArgs:\nobj      -- instance of class\n*attrs   -- list of attrs to add to the representation\n**kwargs -- Extra arguments to add that are not captured as attributes\n\nReturns: A string representing the class", "source": "juraj-google-style"}
{"code": "def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    encoder_states = () if output_hidden_states else None\n    all_attentions = () if output_attentions else None\n    hidden_states = inputs_embeds\n    for idx, encoder_layer in enumerate(self.layers):\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)\n        hidden_states = layer_outputs[0]\n        if output_attentions:\n            all_attentions = all_attentions + (layer_outputs[1],)\n    if output_hidden_states:\n        encoder_states = encoder_states + (hidden_states,)\n    if not return_dict:\n        return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n    return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Args:\ninputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\nEmbedded representation of the inputs. Should be float, not int tokens.\nattention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\nMask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n- 1 for tokens that are **not masked**,\n- 0 for tokens that are **masked**.\n\n[What are attention masks?](../glossary#attention-mask)\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.\noutput_hidden_states (`bool`, *optional*):\nWhether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\nfor more detail.\nreturn_dict (`bool`, *optional*):\nWhether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.", "source": "github-repos"}
{"code": "def whatIfOrder(self, contract: Contract, order: Order) -> OrderState:\n    return self._run(self.whatIfOrderAsync(contract, order))", "docstring": "Retrieve commission and margin impact without actually\nplacing the order. The given order will not be modified in any way.\n\nThis method is blocking.\n\nArgs:\ncontract: Contract to test.\norder: Order to test.", "source": "codesearchnet"}
{"code": "def from_dict(cls, d, ignore=()):\n    filtered = {}\n    for (k, v) in d.items():\n        if (k == 'typeid'):\n            assert (v == cls.typeid), ('Dict has typeid %s but %s has typeid %s' % (v, cls, cls.typeid))\n        elif (k not in ignore):\n            filtered[k] = v\n    try:\n        inst = cls(**filtered)\n    except TypeError as e:\n        raise TypeError(('%s raised error: %s' % (cls.typeid, str(e))))\n    return inst", "docstring": "Create an instance from a serialized version of cls\n\nArgs:\nd(dict): Endpoints of cls to set\nignore(tuple): Keys to ignore\n\nReturns:\nInstance of this class", "source": "codesearchnet"}
{"code": "def GetNumberOfRows(self):\n    if (not self._database_object):\n        raise IOError('Not opened.')\n    if (self._number_of_rows is None):\n        self._number_of_rows = self._database_object.GetNumberOfRows(self._table_name)\n    return self._number_of_rows", "docstring": "Retrieves the number of rows of the table.\n\nReturns:\nint: number of rows.\n\nRaises:\nIOError: if the file-like object has not been opened.\nOSError: if the file-like object has not been opened.", "source": "codesearchnet"}
{"code": "def squeeze_batch_dims(inp, op, inner_rank, name=None):\n    with ops.name_scope(name, 'squeeze_batch_dims', [inp]):\n        inp = ops.convert_to_tensor(inp, name='input')\n        shape = inp.shape\n        inner_shape = shape[-inner_rank:]\n        if not inner_shape.is_fully_defined():\n            inner_shape = array_ops.shape(inp)[-inner_rank:]\n        batch_shape = shape[:-inner_rank]\n        if not batch_shape.is_fully_defined():\n            batch_shape = array_ops.shape(inp)[:-inner_rank]\n        if isinstance(inner_shape, tensor_shape.TensorShape):\n            inp_reshaped = array_ops.reshape(inp, [-1] + inner_shape.as_list())\n        else:\n            inp_reshaped = array_ops.reshape(inp, array_ops.concat(([-1], inner_shape), axis=-1))\n        out_reshaped = op(inp_reshaped)\n        out_inner_shape = out_reshaped.shape[-inner_rank:]\n        if not out_inner_shape.is_fully_defined():\n            out_inner_shape = array_ops.shape(out_reshaped)[-inner_rank:]\n        out = array_ops.reshape(out_reshaped, array_ops.concat((batch_shape, out_inner_shape), axis=-1))\n        out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:])\n        return out", "docstring": "Returns `unsqueeze_batch(op(squeeze_batch(inp)))`.\n\nWhere `squeeze_batch` reshapes `inp` to shape\n`[prod(inp.shape[:-inner_rank])] + inp.shape[-inner_rank:]`\nand `unsqueeze_batch` does the reverse reshape but on the output.\n\nArgs:\ninp: A tensor with dims `batch_shape + inner_shape` where `inner_shape`\nis length `inner_rank`.\nop: A callable that takes a single input tensor and returns a single.\noutput tensor.\ninner_rank: A python integer.\nname: A string.\n\nReturns:\n`unsqueeze_batch_op(squeeze_batch(inp))`.", "source": "github-repos"}
{"code": "def _SetCredentials(self, **kwds):\n    args = {'api_key': self._API_KEY, 'client': self, 'client_id': self._CLIENT_ID, 'client_secret': self._CLIENT_SECRET, 'package_name': self._PACKAGE, 'scopes': self._SCOPES, 'user_agent': self._USER_AGENT}\n    args.update(kwds)\n    from apitools.base.py import credentials_lib\n    self._credentials = credentials_lib.GetCredentials(**args)", "docstring": "Fetch credentials, and set them for this client.\n\nNote that we can't simply return credentials, since creating them\nmay involve side-effecting self.\n\nArgs:\n**kwds: Additional keyword arguments are passed on to GetCredentials.\n\nReturns:\nNone. Sets self._credentials.", "source": "codesearchnet"}
{"code": "def setColumn(self, header, values):\n        \n        if any(isinstance(value, basestring) for value in values):\n            values = list(map(str, values))\n            self._impl.setColumnStr(header, values, len(values))\n        elif all(isinstance(value, Real) for value in values):\n            values = list(map(float, values))\n            self._impl.setColumnDbl(header, values, len(values))\n        else:\n            print(values)\n            raise NotImplementedError", "docstring": "Set the values of a column.\n\nArgs:\nheader: The header of the column to be set.\n\nvalues: The values to set.", "source": "juraj-google-style"}
{"code": "def list_local_devices(session_config=None):\n\n    def _convert(pb_str):\n        m = device_attributes_pb2.DeviceAttributes()\n        m.ParseFromString(pb_str)\n        return m\n    serialized_config = None\n    if session_config is not None:\n        serialized_config = session_config.SerializeToString()\n    return [_convert(s) for s in _pywrap_device_lib.list_devices(serialized_config)]", "docstring": "List the available devices available in the local process.\n\nArgs:\nsession_config: a session config proto or None to use the default config.\n\nReturns:\nA list of `DeviceAttribute` protocol buffers.", "source": "github-repos"}
{"code": "def edges(self, tail_head_iter):\n    edge = self._edge_plain\n    quote = self._quote_edge\n    lines = ((edge % (quote(t), quote(h))) for (t, h) in tail_head_iter)\n    self.body.extend(lines)", "docstring": "Create a bunch of edges.\n\nArgs:\ntail_head_iter: Iterable of ``(tail_name, head_name)`` pairs.", "source": "codesearchnet"}
{"code": "def write_json(self, fh, pretty=True):\n        \n        sjson = json.JSONEncoder().encode(self.json())\n        if pretty:\n            json.dump(json.loads(sjson), fh, sort_keys=True, indent=4)\n        else:\n            json.dump(json.loads(sjson), fh)\n        return", "docstring": "Write composite object to file handle in JSON format.\n\nArgs:\nfh (file): File handle to write to.\npretty (bool): Sort keys and indent in output.", "source": "juraj-google-style"}
{"code": "def encode_dict(values_dict):\n    return {key: encode_value(value) for (key, value) in six.iteritems(values_dict)}", "docstring": "Encode a dictionary into protobuf ``Value``-s.\n\nArgs:\nvalues_dict (dict): The dictionary to encode as protobuf fields.\n\nReturns:\nDict[str, ~google.cloud.firestore_v1beta1.types.Value]: A\ndictionary of string keys and ``Value`` protobufs as dictionary\nvalues.", "source": "codesearchnet"}
{"code": "def metadata(self, path):\n    if not self.exists(path):\n        raise BeamIOError('Path does not exist: %s' % path)\n    return FileMetadata(path, os.path.getsize(path), os.path.getmtime(path))", "docstring": "Fetch metadata fields of a file on the FileSystem.\n\nArgs:\npath: string path of a file.\n\nReturns:\n:class:`~apache_beam.io.filesystem.FileMetadata`.\n\nRaises:\n``BeamIOError``: if path isn't a file or doesn't exist.", "source": "github-repos"}
{"code": "def delete_media_service_rg(access_token, subscription_id, rgname, msname):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', rgname, '/providers/microsoft.media/mediaservices/', msname, '?api-version=', MEDIA_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete a media service.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\nmsname (str): Media service name.\n\nReturns:\nHTTP response.", "source": "codesearchnet"}
{"code": "def _skip_tensor(self, op_id, out_tensor, report_handler):\n    non_numeric_tensor_types = set([dtypes.variant, dtypes.resource, dtypes.string])\n    if out_tensor.dtype in non_numeric_tensor_types:\n        report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_NON_NUMERIC_TENSOR))\n        return True\n    if [consumer for consumer in out_tensor.consumers() if TensorTracer.while_loop_op(consumer)]:\n        report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_FEEDS_WHILELOOP_OP))\n        return True\n    if self._is_user_included_op(out_tensor.op):\n        report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_USER_INCLUDED))\n        if tensor_tracer_flags.TT_CHECK_FILTER.value:\n            logging.info('USER_INCLUDED tensor %s', out_tensor.name)\n        return False\n    if self._is_user_excluded_op(out_tensor.op):\n        report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_USER_EXCLUDED))\n        if tensor_tracer_flags.TT_CHECK_FILTER.value:\n            logging.info('USER_EXCLUDED tensor %s', out_tensor.name)\n        return True\n    if not out_tensor.get_shape().is_fully_defined():\n        if self._parameters.trace_mode in (tensor_tracer_flags.TRACE_MODE_NAN_INF, tensor_tracer_flags.TRACE_MODE_NORM, tensor_tracer_flags.TRACE_MODE_HISTORY, tensor_tracer_flags.TRACE_MODE_MAX_ABS, tensor_tracer_flags.TRACE_MODE_SUMMARY):\n            report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_TENSOR_GET_TRACED))\n            return False\n        else:\n            report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_DYNAMIC_SHAPE))\n            return True\n    rank = len(out_tensor.shape)\n    if rank < 1:\n        if self._parameters.trace_scalar_ops:\n            if TensorTracer.unsafe_scalar_trace(out_tensor.op):\n                report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_UNSAFE_SCALAR))\n                return True\n            else:\n                report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_SCALAR_GET_TRACED))\n                return False\n        else:\n            report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_SKIP_SCALAR))\n            return True\n    else:\n        report_handler.instrument_tensor(out_tensor, TensorTracer.reason(op_id, _REASON_TENSOR_GET_TRACED))\n        return False", "docstring": "Returns True if we should not trace out_tensor.\n\nArgs:\nop_id: Topological index of the op producing tensor.\nout_tensor: tf.Tensor\nreport_handler: An instance of tensor_tracer_report.TTReportHandle.\nReturns:\nTrue if the tensor should not be traced, false otherwise.", "source": "github-repos"}
{"code": "def quad_2d(width, height, xpos=0.0, ypos=0.0) -> VAO:\n    \n    pos = numpy.array([\n        xpos - width / 2.0, ypos + height / 2.0, 0.0,\n        xpos - width / 2.0, ypos - height / 2.0, 0.0,\n        xpos + width / 2.0, ypos - height / 2.0, 0.0,\n        xpos - width / 2.0, ypos + height / 2.0, 0.0,\n        xpos + width / 2.0, ypos - height / 2.0, 0.0,\n        xpos + width / 2.0, ypos + height / 2.0, 0.0,\n    ], dtype=numpy.float32)\n\n    normals = numpy.array([\n        0.0, 0.0, 1.0,\n        0.0, 0.0, 1.0,\n        0.0, 0.0, 1.0,\n        0.0, 0.0, 1.0,\n        0.0, 0.0, 1.0,\n        0.0, 0.0, 1.0,\n    ], dtype=numpy.float32)\n\n    uvs = numpy.array([\n        0.0, 1.0,\n        0.0, 0.0,\n        1.0, 0.0,\n        0.0, 1.0,\n        1.0, 0.0,\n        1.0, 1.0,\n    ], dtype=numpy.float32)\n\n    vao = VAO(\"geometry:quad\", mode=moderngl.TRIANGLES)\n    vao.buffer(pos, '3f', [\"in_position\"])\n    vao.buffer(normals, '3f', [\"in_normal\"])\n    vao.buffer(uvs, '2f', [\"in_uv\"])\n\n    return vao", "docstring": "Creates a 2D quad VAO using 2 triangles with normals and texture coordinates.\n\nArgs:\nwidth (float): Width of the quad\nheight (float): Height of the quad\n\nKeyword Args:\nxpos (float): Center position x\nypos (float): Center position y\n\nReturns:\nA :py:class:`demosys.opengl.vao.VAO` instance.", "source": "juraj-google-style"}
{"code": "def get_backdoor(self, name, version=''):\n    params = {}\n    params['or'] = 1\n    params['c-name'] = name\n    params['c-aliases__in'] = name\n    r = requests.get('{0}/backdoors/'.format(self.url), params=params, verify=self.verify, proxies=self.proxies)\n    if (r.status_code == 200):\n        result_data = json.loads(r.text)\n        if ('meta' not in result_data):\n            return None\n        if ('total_count' not in result_data['meta']):\n            return None\n        if (result_data['meta']['total_count'] <= 0):\n            return None\n        if ('objects' not in result_data):\n            return None\n        for backdoor in result_data['objects']:\n            if ('version' in backdoor):\n                if (backdoor['version'] == version):\n                    return backdoor\n    else:\n        log.error('Non-200 status code: {}'.format(r.status_code))\n    return None", "docstring": "Searches for the backdoor based on name and version.\n\nArgs:\nname: The name of the backdoor. This can be an alias.\nversion: The version.\nReturns:\nReturns a JSON object contain one or more backdoor results or\nNone if not found.", "source": "codesearchnet"}
{"code": "def save(self, target, format=None,  encoding=None, **options):\n        \n\n        \n        if encoding is None:\n            encoding = config.DEFAULT_ENCODING\n        if format is None:\n            _, format = helpers.detect_scheme_and_format(target)\n\n        \n        writer_class = self.__custom_writers.get(format)\n        if writer_class is None:\n            if format not in config.WRITERS:\n                message = 'Format \"%s\" is not supported' % format\n                raise exceptions.FormatError(message)\n            writer_class = helpers.import_attribute(config.WRITERS[format])\n\n        \n        writer_options = helpers.extract_options(options, writer_class.options)\n        if options:\n            message = 'Not supported options \"%s\" for format \"%s\"'\n            message = message % (', '.join(options), format)\n            raise exceptions.TabulatorException(message)\n\n        \n        writer = writer_class(**writer_options)\n        writer.write(self.iter(), target, headers=self.headers, encoding=encoding)", "docstring": "Save stream to the local filesystem.\n\nArgs:\ntarget (str): Path where to save the stream.\nformat (str, optional): The format the stream will be saved as. If\nNone, detects from the ``target`` path. Defaults to None.\nencoding (str, optional): Saved file encoding. Defaults to\n``config.DEFAULT_ENCODING``.\n**options: Extra options passed to the writer.", "source": "juraj-google-style"}
{"code": "def _GetInode(self, inode_value):\n    if isinstance(inode_value, py2to3.INTEGER_TYPES):\n        return inode_value\n    if isinstance(inode_value, float):\n        return int(inode_value)\n    if (not isinstance(inode_value, py2to3.STRING_TYPES)):\n        return (- 1)\n    if (b'-' in inode_value):\n        (inode_value, _, _) = inode_value.partition(b'-')\n    try:\n        return int(inode_value, 10)\n    except ValueError:\n        return (- 1)", "docstring": "Retrieves the inode from the inode value.\n\nArgs:\ninode_value (int|str): inode, such as 1 or '27-128-1'.\n\nReturns:\nint: inode or -1 if the inode value cannot be converted to an integer.", "source": "codesearchnet"}
{"code": "def convert_gguf_tokenizer(architecture, tokenizer_dict) -> Tokenizer:\n    tokenizer_class_name = architecture\n    converter = GGUF_TO_FAST_CONVERTERS[tokenizer_class_name](tokenizer_dict)\n    fast_tokenizer = converter.converted()\n    return (fast_tokenizer, converter.additional_kwargs)", "docstring": "Utilities to convert a slow tokenizer instance in a fast tokenizer instance.\n\nArgs:\narchitecture (`str`): The model architecture derived from gguf file.\ntransformer_tokenizer ([`~tokenization_utils_base.PreTrainedTokenizer`]):\nInstance of a slow tokenizer to convert in the backend tokenizer for\n[`~tokenization_utils_base.PreTrainedTokenizerFast`].\n\nReturn:\nA instance of [`~tokenizers.Tokenizer`] to be used as the backend tokenizer of a\n[`~tokenization_utils_base.PreTrainedTokenizerFast`]", "source": "github-repos"}
{"code": "def rm(path):\n    if (path and os.path.exists(path)):\n        if os.path.isdir(path):\n            shutil.rmtree(path)\n        else:\n            os.remove(path)", "docstring": "Equivalent to rm -rf.\n\nMake sure ``path`` doesn't exist after this call.  If it's a dir,\nshutil.rmtree(); if it's a file, os.remove(); if it doesn't exist,\nignore.\n\nArgs:\npath (str): the path to nuke.", "source": "codesearchnet"}
{"code": "def PushSection(self, name, pre_formatters):\n    if (name == '@'):\n        value = self.stack[(- 1)].context\n    else:\n        value = self.stack[(- 1)].context.get(name)\n    for (i, (f, args, formatter_type)) in enumerate(pre_formatters):\n        if (formatter_type == ENHANCED_FUNC):\n            value = f(value, self, args)\n        elif (formatter_type == SIMPLE_FUNC):\n            value = f(value)\n        else:\n            assert False, ('Invalid formatter type %r' % formatter_type)\n    self.stack.append(_Frame(value))\n    return value", "docstring": "Given a section name, push it on the top of the stack.\n\nReturns:\nThe new section, or None if there is no such section.", "source": "codesearchnet"}
{"code": "def dd2dms(dd):\n    \n    m, s = divmod(dd * 3600, 60)\n    d, m = divmod(m, 60)\n    return int(d), int(m), s", "docstring": "Decimal degrees to DMS.\n\nArgs:\ndd (float). Decimal degrees.\n\nReturn:\ntuple. Degrees, minutes, and seconds.", "source": "juraj-google-style"}
{"code": "def _open_repo(args, path_key='<path>'):\n    path = (pathlib.Path(args[path_key]) if args[path_key] else None)\n    try:\n        repo = open_repository(path)\n    except ValueError as exc:\n        raise ExitError(ExitCode.DATA_ERR, str(exc))\n    return repo", "docstring": "Open and return the repository containing the specified file.\n\nThe file is specified by looking up `path_key` in `args`. This value or\n`None` is passed to `open_repository`.\n\nReturns: A `Repository` instance.\n\nRaises:\nExitError: If there is a problem opening the repo.", "source": "codesearchnet"}
{"code": "def __init__(self, property_type=TableFeaturePropType.OFPTFPT_MATCH,\n                 oxm_ids=None):\n        \n        super().__init__(property_type)\n        self.oxm_ids = ListOfOxmHeader() if oxm_ids is None else oxm_ids\n        self.update_length()", "docstring": "Create an OxmProperty with the optional parameters below.\n\nArgs:\ntype(|TableFeaturePropType_v0x04|):\nProperty Type value of this instance.\noxm_ids(|ListOfOxmHeader_v0x04|):\nList of OxmHeader instances.", "source": "juraj-google-style"}
{"code": "async def close_interface(self, client_id, conn_string, interface):\n    conn_id = self._client_connection(client_id, conn_string)\n    (await self.adapter.close_interface(conn_id, interface))\n    self._hook_close_interface(conn_string, interface, client_id)", "docstring": "Close a device interface on behalf of a client.\n\nSee :meth:`AbstractDeviceAdapter.close_interface`.\n\nArgs:\nclient_id (str): The client we are working for.\nconn_string (str): A connection string that will be\npassed to the underlying device adapter.\ninterface (str): The name of the interface to close.\n\nRaises:\nDeviceServerError: There is an issue with your client_id such\nas not being connected to the device.\nDeviceAdapterError: The adapter had an issue closing the interface.", "source": "codesearchnet"}
{"code": "def index_last_dim_with_indices(x, indices):\n  \n  assert len(x.shape) == len(indices.shape) + 1\n\n  x_shape = shape_list(x)\n  vocab_size = x_shape[-1]\n\n  flat_x = tf.reshape(x, [list_product(x_shape[:-1]), vocab_size])\n  flat_indices = tf.reshape(indices, [list_product(x_shape[:-1])])\n\n  idx = tf.stack(\n      [\n          tf.range(tf.to_int64(shape_list(flat_indices)[0])),\n          tf.to_int64(flat_indices)\n      ],\n      axis=1)\n  flat_x_idx = tf.gather_nd(flat_x, idx)\n\n  x_idx = tf.reshape(flat_x_idx, x_shape[:-1])\n\n  return x_idx", "docstring": "Use indices to index into the last axis of x.\n\nThis can be useful for recovering the actual probabilities of a sample from a\nprobability distribution.\n\nArgs:\nx: Tensor, n-d.\nindices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1)\ndimensions of x. The values of indices will be used to index into the last\naxis of x.\n\nReturns:\nTensor, (n-1)-d.", "source": "juraj-google-style"}
{"code": "def _ord_to_namespace(n, _max_length=None):\n    if (_max_length is None):\n        _max_length = MAX_NAMESPACE_LENGTH\n    length = _LEX_DISTANCE[(_max_length - 1)]\n    if (n == 0):\n        return ''\n    n -= 1\n    return (NAMESPACE_CHARACTERS[(n / length)] + _ord_to_namespace((n % length), (_max_length - 1)))", "docstring": "Convert a namespace ordinal to a namespace string.\n\nConverts an int, representing the sequence number of a namespace ordered\nlexographically, into a namespace string.\n\n>>> _ord_to_namespace(0)\n''\n>>> _ord_to_namespace(1)\n'-'\n>>> _ord_to_namespace(2)\n'--'\n>>> _ord_to_namespace(3)\n'---'\n\nArgs:\nn: A number representing the lexographical ordering of a namespace.\n_max_length: The maximum namespace length.\nReturns:\nA string representing the nth namespace in lexographical order.", "source": "codesearchnet"}
{"code": "def rouge_l_fscore(predictions, labels, **unused_kwargs):\n  \n  outputs = tf.to_int32(tf.argmax(predictions, axis=-1))\n  \n  outputs = tf.squeeze(outputs, axis=[-1, -2])\n  labels = tf.squeeze(labels, axis=[-1, -2])\n  rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),\n                               tf.float32)\n  return rouge_l_f_score, tf.constant(1.0)", "docstring": "ROUGE scores computation between labels and predictions.\n\nThis is an approximate ROUGE scoring method since we do not glue word pieces\nor decode the ids and tokenize the output.\n\nArgs:\npredictions: tensor, model predictions\nlabels: tensor, gold output.\n\nReturns:\nrouge_l_fscore: approx rouge-l f1 score.", "source": "juraj-google-style"}
{"code": "def __init__(self, field, **kwargs):\n\t\t\n\t\t\n\t\tself.attrs = kwargs\n\t\tself.attrs.update(field.field.widget.attrs)\n\n\t\t\n\t\tself.field = field\n\t\tself.widget = field.field.widget\n\n\t\t\n\t\tself.values = {\"class\": [], \"label\": \"\", \"help\": \"\", \"errors\": \"\"}", "docstring": "Initializer for Field class.\n\nArgs:\nfield (BoundField): Form field\n**kwargs (dict): Field attributes", "source": "juraj-google-style"}
{"code": "def provide(self, cls):\n        \n        support.verify_class_type(cls, 'cls')\n        if not self._is_injectable_fn(cls):\n            provide_loc = locations.get_back_frame_loc()\n            raise errors.NonExplicitlyBoundClassError(provide_loc, cls)\n        try:\n            return self._obj_provider.provide_class(\n                cls, self._injection_context_factory.new(cls.__init__),\n                direct_init_pargs=[], direct_init_kwargs={})\n        except errors.Error as e:\n            if self._use_short_stack_traces:\n                raise e\n            else:\n                raise", "docstring": "Provides an instance of the given class.\n\nArgs:\ncls: a class (not an instance)\nReturns:\nan instance of cls\nRaises:\nError: an instance of cls is not providable", "source": "juraj-google-style"}
{"code": "def _load(cls, prefix, user_agent_config_yaml, user_agent_lookup=None):\n    if (not user_agent_config_yaml):\n        user_agent_config_yaml = cls.default_user_agent_config_yaml\n        logger.info(('No user agent or user agent config file given. Using default user agent config file: %s.' % user_agent_config_yaml))\n    if (not isfile(user_agent_config_yaml)):\n        raise UserAgentError(\"User_agent should be supplied in a YAML config file. It can be your project's name for example.\")\n    logger.info(('Loading user agent config from: %s' % user_agent_config_yaml))\n    user_agent_config_dict = load_yaml(user_agent_config_yaml)\n    if user_agent_lookup:\n        user_agent_config_dict = user_agent_config_dict.get(user_agent_lookup)\n    if (not user_agent_config_dict):\n        raise UserAgentError(('No user agent information read from: %s' % user_agent_config_yaml))\n    ua = user_agent_config_dict.get('user_agent')\n    return cls._construct(user_agent_config_dict, prefix, ua)", "docstring": "Load user agent YAML file\n\nArgs:\nprefix (str): Text to put at start of user agent\nuser_agent_config_yaml (str): Path to user agent YAML file\nuser_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied.\n\nReturns:\nstr: user agent", "source": "codesearchnet"}
{"code": "def format_argspec_plus(fn, grouped=True):\n    spec = ((callable(fn) and inspect.getargspec(fn)) or fn)\n    args = inspect.formatargspec(*spec)\n    if spec[0]:\n        self_arg = spec[0][0]\n    elif spec[1]:\n        self_arg = ('%s[0]' % spec[1])\n    else:\n        self_arg = None\n    apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2])\n    defaulted_vals = (((spec[3] is not None) and spec[0][(0 - len(spec[3])):]) or ())\n    apply_kw = inspect.formatargspec(spec[0], spec[1], spec[2], defaulted_vals, formatvalue=(lambda x: ('=' + x)))\n    if grouped:\n        return dict(args=args, self_arg=self_arg, apply_pos=apply_pos, apply_kw=apply_kw)\n    else:\n        return dict(args=args[1:(- 1)], self_arg=self_arg, apply_pos=apply_pos[1:(- 1)], apply_kw=apply_kw[1:(- 1)])", "docstring": "Returns a dictionary of formatted, introspected function arguments.\n\nA enhanced variant of inspect.formatargspec to support code generation.\n\nfn\nAn inspectable callable or tuple of inspect getargspec() results.\ngrouped\nDefaults to True; include (parens, around, argument) lists\n\nReturns:\n\nargs\nFull inspect.formatargspec for fn\nself_arg\nThe name of the first positional argument, varargs[0], or None\nif the function defines no positional arguments.\napply_pos\nargs, re-written in calling rather than receiving syntax.  Arguments are\npassed positionally.\napply_kw\nLike apply_pos, except keyword-ish args are passed as keywords.\n\nExample::\n\n>>> format_argspec_plus(lambda self, a, b, c=3, **d: 123)\n{'args': '(self, a, b, c=3, **d)',\n'self_arg': 'self',\n'apply_kw': '(self, a, b, c=c, **d)',\n'apply_pos': '(self, a, b, c, **d)'}", "source": "codesearchnet"}
{"code": "def list_channels(self, collection_name, experiment_name):\n    dont_care = 'image'\n    chan = ChannelResource(name='', collection_name=collection_name, experiment_name=experiment_name, type=dont_care)\n    return self._list_resource(chan)", "docstring": "List all channels belonging to the named experiment that is part\nof the named collection.\n\nArgs:\ncollection_name (string): Name of the parent collection.\nexperiment_name (string): Name of the parent experiment.\n\nReturns:\n(list)\n\nRaises:\nrequests.HTTPError on failure.", "source": "codesearchnet"}
{"code": "def get_plot(self, ylim=None, units=\"thz\"):\n        \n\n        u = freq_units(units)\n\n        plt = pretty_plot(12, 8)\n\n        band_linewidth = 1\n\n        data = self.bs_plot_data()\n        for d in range(len(data['distances'])):\n            for i in range(self._nb_bands):\n                plt.plot(data['distances'][d],\n                         [data['frequency'][d][i][j] * u.factor\n                          for j in range(len(data['distances'][d]))], 'b-',\n                         linewidth=band_linewidth)\n\n        self._maketicks(plt)\n\n        \n        plt.axhline(0, linewidth=1, color='k')\n\n        \n        plt.xlabel(r'$\\mathrm{Wave\\ Vector}$', fontsize=30)\n        ylabel = r'$\\mathrm{{Frequencies\\ ({})}}$'.format(u.label)\n        plt.ylabel(ylabel, fontsize=30)\n\n        \n        \n        x_max = data['distances'][-1][-1]\n        plt.xlim(0, x_max)\n\n        if ylim is not None:\n            plt.ylim(ylim)\n\n        plt.tight_layout()\n\n        return plt", "docstring": "Get a matplotlib object for the bandstructure plot.\n\nArgs:\nylim: Specify the y-axis (frequency) limits; by default None let\nthe code choose.\nunits: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.", "source": "juraj-google-style"}
{"code": "def _get_best(values: List[float], losses: List[float], max_loss_div: float=0.9, min_val_div: float=10.0) -> float:\n    assert (len(values) == len(losses)), 'lengths of values and losses should be equal'\n    min_ind = np.argmin(losses)\n    for i in range((min_ind - 1), 0, (- 1)):\n        if (((losses[i] * max_loss_div) > losses[min_ind]) or ((values[i] * min_val_div) < values[min_ind])):\n            return values[(i + 1)]\n    return (values[min_ind] / min_val_div)", "docstring": "Find the best value according to given losses\n\nArgs:\nvalues: list of considered values\nlosses: list of obtained loss values corresponding to `values`\nmax_loss_div: maximal divergence of loss to be considered significant\nmin_val_div: minimum divergence of loss to be considered significant\n\nReturns:\nbest value divided by `min_val_div`", "source": "codesearchnet"}
{"code": "def correlation_matrix(df):\n    \n    columns = df.columns.tolist()\n    corr = pd.DataFrame(\n        np.corrcoef(df, rowvar=0), columns=columns, index=columns)\n    return corr", "docstring": "Returns a pandas DataFrame with the pair-wise correlations of the columns.\n\nArgs:\ndf: pandas DataFrame with columns to run diagnostics on", "source": "juraj-google-style"}
{"code": "def file_crc32(filePath):\n    \n    crc = 0\n    with open(filePath, 'rb') as f:\n        for block in _file_iter(f, _BLOCK_SIZE):\n            crc = binascii.crc32(block, crc) & 0xFFFFFFFF\n    return crc", "docstring": "计算文件的crc32检验码:\n\nArgs:\nfilePath: 待计算校验码的文件路径\n\nReturns:\n文件内容的crc32校验码。", "source": "juraj-google-style"}
{"code": "def remove(self, cluster_id):\n        \n        cluster = self._storage.pop(cluster_id)\n        cluster.cleanup()", "docstring": "remove cluster and data stuff\nArgs:\ncluster_id - cluster identity", "source": "juraj-google-style"}
{"code": "def FromTimeString(cls, time_string, dayfirst=False, gmt_as_timezone=True, timezone=pytz.UTC):\n    if ((not gmt_as_timezone) and time_string.endswith(' GMT')):\n        time_string = '{0:s}UTC'.format(time_string[:(- 3)])\n    try:\n        datetime_object = dateutil.parser.parse(time_string, dayfirst=dayfirst)\n    except (TypeError, ValueError) as exception:\n        raise errors.TimestampError('Unable to convert time string: {0:s} in to a datetime object with error: {1!s}'.format(time_string, exception))\n    if datetime_object.tzinfo:\n        datetime_object = datetime_object.astimezone(pytz.UTC)\n    else:\n        datetime_object = timezone.localize(datetime_object)\n    posix_time = int(calendar.timegm(datetime_object.utctimetuple()))\n    timestamp = (posix_time * definitions.MICROSECONDS_PER_SECOND)\n    return (timestamp + datetime_object.microsecond)", "docstring": "Converts a string containing a date and time value into a timestamp.\n\nArgs:\ntime_string: String that contains a date and time value.\ndayfirst: An optional boolean argument. If set to true then the\nparser will change the precedence in which it parses timestamps\nfrom MM-DD-YYYY to DD-MM-YYYY (and YYYY-MM-DD will be\nYYYY-DD-MM, etc).\ngmt_as_timezone: Sometimes the dateutil parser will interpret GMT and UTC\nthe same way, that is not make a distinction. By default\nthis is set to true, that is GMT can be interpreted\ndifferently than UTC. If that is not the expected result\nthis attribute can be set to false.\ntimezone: Optional timezone object (instance of pytz.timezone) that\nthe data and time value in the string represents. This value\nis used when the timezone cannot be determined from the string.\n\nReturns:\nThe timestamp which is an integer containing the number of micro seconds\nsince January 1, 1970, 00:00:00 UTC or 0 on error.\n\nRaises:\nTimestampError: if the time string could not be parsed.", "source": "codesearchnet"}
{"code": "def wait_ssh(roles, retries=100, interval=30):\n    utils_playbook = os.path.join(ANSIBLE_DIR, 'utils.yml')\n    options = {'enos_action': 'ping'}\n    for i in range(0, retries):\n        try:\n            run_ansible([utils_playbook], roles=roles, extra_vars=options, on_error_continue=False)\n            break\n        except EnosUnreachableHostsError as e:\n            logger.info(('Hosts unreachable: %s ' % e.hosts))\n            logger.info(('Retrying... %s/%s' % ((i + 1), retries)))\n            time.sleep(interval)\n    else:\n        raise EnosSSHNotReady('Maximum retries reached')", "docstring": "Wait for all the machines to be ssh-reachable\n\nLet ansible initiates a communication and retries if needed.\n\nArgs:\ninventory (string): path to the inventoy file to test\nretries (int): Number of time we'll be retrying an SSH connection\ninterval (int): Interval to wait in seconds between two retries", "source": "codesearchnet"}
{"code": "def set_quickchart_resource(self, resource):\n        \n        \n        if isinstance(resource, int) and not isinstance(resource, bool):\n            resource = self.get_resources()[resource]\n        if isinstance(resource, hdx.data.resource.Resource) or isinstance(resource, dict):\n            res = resource.get('id')\n            if res is None:\n                resource = resource['name']\n            else:\n                resource = res\n        elif not isinstance(resource, str):\n            raise hdx.data.hdxobject.HDXError('Resource id cannot be found in type %s!' % type(resource).__name__)\n        if is_valid_uuid(resource) is True:\n            search = 'id'\n        else:\n            search = 'name'\n        changed = False\n        for dataset_resource in self.resources:\n            if dataset_resource[search] == resource:\n                dataset_resource.enable_dataset_preview()\n                self.preview_resource()\n                changed = True\n            else:\n                dataset_resource.disable_dataset_preview()\n        return changed", "docstring": "Set the resource that will be used for displaying QuickCharts in dataset preview\n\nArgs:\nresource (Union[hdx.data.resource.Resource,Dict,str,int]): Either resource id or name, resource metadata from a Resource object or a dictionary or position\n\nReturns:\nbool: Returns True if resource for QuickCharts in dataset preview set or False if not", "source": "juraj-google-style"}
{"code": "def refactor_string(self, data, name):\n    features = _detect_future_features(data)\n    if ('print_function' in features):\n        self.driver.grammar = pygram.python_grammar_no_print_statement\n    try:\n        tree = self.driver.parse_string(data)\n    except Exception as err:\n        self.log_error(\"Can't parse %s: %s: %s\", name, err.__class__.__name__, err)\n        return\n    finally:\n        self.driver.grammar = self.grammar\n    tree.future_features = features\n    self.log_debug('Refactoring %s', name)\n    self.refactor_tree(tree, name)\n    return tree", "docstring": "Refactor a given input string.\n\nArgs:\ndata: a string holding the code to be refactored.\nname: a human-readable name for use in error/log messages.\n\nReturns:\nAn AST corresponding to the refactored input stream; None if\nthere were errors during the parse.", "source": "codesearchnet"}
{"code": "def sort_variant_file(infile):\n    \n    command = [\n            'sort',\n            ]\n    command.append('-n')\n    command.append('-k1')\n    command.append('-k3')\n\n    command = command + [infile, '-o', infile]\n\n    logger.info(\"Start sorting variants...\")\n    logger.info(\"Sort command: {0}\".format(' '.join(command)))\n    sort_start = datetime.now()\n    \n    try:\n        call(command)\n    except OSError as e:\n        logger.warning(\"unix command sort does not seem to exist on your system...\")\n        logger.warning(\"genmod needs unix sort to provide a sorted output.\")\n        logger.warning(\"Output VCF will not be sorted since genmod can not find\"\\\n                        \"unix sort\")\n        raise e\n\n    logger.info(\"Sorting done. Time to sort: {0}\".format(datetime.now()-sort_start))\n    \n    return", "docstring": "Sort a modified variant file.\nSorting is based on the first column and the POS.\n\nUses unix sort to sort the variants and overwrites the infile.\n\nArgs:\ninfile : A string that is the path to a file\nmode : 'chromosome' or 'rank'\noutfile : The path to an outfile where the variants should be printed\n\nReturns:\n0 if sorting was performed\n1 if variants where not sorted", "source": "juraj-google-style"}
{"code": "def selfSignCert(self, cert, pkey):\n    cert.set_issuer(cert.get_subject())\n    cert.sign(pkey, self.signing_digest)", "docstring": "Self-sign a certificate.\n\nArgs:\ncert (OpenSSL.crypto.X509): The certificate to sign.\npkey (OpenSSL.crypto.PKey): The PKey with which to sign the certificate.\n\nExamples:\nSign a given certificate with a given private key:\n\ncdir.selfSignCert(mycert, myotherprivatekey)\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def GetCharacterDisplayWidth(char):\n    if not isinstance(char, str):\n        return 1\n    char = unicodedata.normalize('NFC', char)\n    if unicodedata.combining(char) != 0:\n        return 0\n    elif unicodedata.category(char) == 'Cf':\n        return 0\n    elif unicodedata.east_asian_width(char) in 'FW':\n        return 2\n    else:\n        return 1", "docstring": "Returns the monospaced terminal display width of char.\n\nAssumptions:\n- monospaced display\n- ambiguous or unknown chars default to width 1\n- ASCII control char width is 1 => don't use this for control chars\n\nArgs:\nchar: The character to determine the display width of.\n\nReturns:\nThe monospaced terminal display width of char: either 0, 1, or 2.", "source": "github-repos"}
{"code": "def min(x, axis=None, keepdims=False):\n    return math_ops.reduce_min(x, axis, keepdims)", "docstring": "Minimum value in a tensor.\n\nArgs:\nx: A tensor or variable.\naxis: An integer, the axis to find minimum values.\nkeepdims: A boolean, whether to keep the dimensions or not.\nIf `keepdims` is `False`, the rank of the tensor is reduced\nby 1. If `keepdims` is `True`,\nthe reduced dimension is retained with length 1.\n\nReturns:\nA tensor with minimum values of `x`.", "source": "github-repos"}
{"code": "def get_size(fileobj):\n    \n\n    old_pos = fileobj.tell()\n    try:\n        fileobj.seek(0, 2)\n        return fileobj.tell()\n    finally:\n        fileobj.seek(old_pos, 0)", "docstring": "Returns the size of the file.\nThe position when passed in will be preserved if no error occurs.\n\nArgs:\nfileobj (fileobj)\nReturns:\nint: The size of the file\nRaises:\nIOError", "source": "juraj-google-style"}
{"code": "def branches():\n    out = shell.run('git branch', capture=True, never_pretend=True).stdout.strip()\n    return [x.strip('* \\t\\n') for x in out.splitlines()]", "docstring": "Return a list of branches in the current repo.\n\nReturns:\nlist[str]: A list of branches in the current repo.", "source": "codesearchnet"}
{"code": "def create_datastore_for_topline(self, delete_first=0, path=None):\n    data = load_yaml(script_dir_plus_file(join('..', 'hdx_datasource_topline.yml'), Resource))\n    self.create_datastore_from_dict_schema(data, delete_first, path=path)", "docstring": "For tabular data, create a resource in the HDX datastore which enables data preview in HDX using the built in\nYAML definition for a topline. If path is not supplied, the file is first downloaded from HDX.\n\nArgs:\ndelete_first (int): Delete datastore before creation. 0 = No, 1 = Yes, 2 = If no primary key. Defaults to 0.\npath (Optional[str]): Local path to file that was uploaded. Defaults to None.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _ParseCString(self, page_data, string_offset):\n    \n    cstring_map = self._GetDataTypeMap('cstring')\n\n    try:\n      value_string = self._ReadStructureFromByteStream(\n          page_data[string_offset:], string_offset, cstring_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError((\n          'Unable to map string data at offset: 0x{0:08x} with error: '\n          '{1!s}').format(string_offset, exception))\n\n    return value_string.rstrip('\\x00')", "docstring": "Parses a C string from the page data.\n\nArgs:\npage_data (bytes): page data.\nstring_offset (int): offset of the string relative to the start\nof the page.\n\nReturns:\nstr: string.\n\nRaises:\nParseError: when the string cannot be parsed.", "source": "juraj-google-style"}
{"code": "def fetch(self, customer_id, token_id, data={}, **kwargs):\n    url = '{}/{}/tokens/{}'.format(self.base_url, customer_id, token_id)\n    return self.get_url(url, data, **kwargs)", "docstring": "Fetch Token for given Id and given customer Id\n\nArgs:\ncustomer_id : Customer Id for which tokens have to be fetched\ntoken_id    : Id for which TOken object has to be fetched\n\nReturns:\nToken dict for given token Id", "source": "codesearchnet"}
{"code": "def remove_alias(type_):\n    \n    if isinstance(type_, cpptypes.type_t):\n        type_ref = type_\n    elif isinstance(type_, typedef.typedef_t):\n        type_ref = type_.decl_type\n    else:\n        \n        return type_\n    if type_ref.cache.remove_alias:\n        return type_ref.cache.remove_alias\n    no_alias = __remove_alias(type_ref.clone())\n    type_ref.cache.remove_alias = no_alias\n    return no_alias", "docstring": "Returns `type_t` without typedef\n\nArgs:\ntype_ (type_t | declaration_t): type or declaration\n\nReturns:\ntype_t: the type associated to the inputted declaration", "source": "juraj-google-style"}
{"code": "def body(self, features):\n    features['targets'] = features['inputs']\n    is_training = (self.hparams.mode == tf.estimator.ModeKeys.TRAIN)\n    inputs = tf.to_float(features['targets_raw'])\n    z = tf.random_uniform([self.hparams.batch_size, self.hparams.bottleneck_bits], minval=(- 1), maxval=1, name='z')\n    out_shape = common_layers.shape_list(inputs)[1:4]\n    g = self.generator(z, is_training, out_shape)\n    losses = self.losses(inputs, g)\n    summary_g_image = tf.reshape(g[(0, :)], ([1] + common_layers.shape_list(inputs)[1:]))\n    tf.summary.image('generated', summary_g_image, max_outputs=1)\n    if is_training:\n        return (tf.zeros_like(inputs), losses)\n    return (tf.reshape(g, tf.shape(inputs)), losses)", "docstring": "Body of the model.\n\nArgs:\nfeatures: a dictionary with the tensors.\n\nReturns:\nA pair (predictions, losses) where predictions is the generated image\nand losses is a dictionary of losses (that get added for the final loss).", "source": "codesearchnet"}
{"code": "def dump(self, filename, encoding=\"utf8\"):\n        \n        with open(filename, mode='w', encoding=encoding) as text_file:\n            text_file.write(self.single_string())", "docstring": "Dumps the ascii art in the file.\nArgs:\nfilename (str): File to dump the ascii art.\nencoding (str): Optional. Default \"utf-8\".", "source": "juraj-google-style"}
{"code": "def recipe_salesforce_to_bigquery(config, domain, client, secret, username, password, query, auth_read, dataset, table, schema):\n    salesforce(config, {'auth': auth_read, 'domain': domain, 'client': client, 'secret': secret, 'username': username, 'password': password, 'query': query, 'out': {'bigquery': {'dataset': dataset, 'table': table, 'schema': schema}}})", "docstring": "Move query results into a BigQuery table.\n\nArgs:\ndomain (string) - Retrieve from a Salesforce Domain.\nclient (string) - Retrieve from a Salesforce App.\nsecret (string) - Retrieve from a Salesforce App.\nusername (email) - Your Salesforce user email.\npassword (password) - Your Salesforce login password.\nquery (string) - The query to run in Salesforce.\nauth_read (authentication) - Credentials used for reading data.\ndataset (string) - Existing BigQuery dataset.\ntable (string) - Table to create from this report.\nschema (json) - Schema provided in JSON list format or empty list.", "source": "github-repos"}
{"code": "def get_messages(self, name):\n        \n\n        return self._loop.run_coroutine(self._client.get_messages(name))", "docstring": "Get stored messages for a service.\n\nArgs:\nname (string): The name of the service to get messages from.\n\nReturns:\nlist(ServiceMessage): A list of the messages stored for this service", "source": "juraj-google-style"}
{"code": "def convert(self, vroot, entry_variables):\n        \n        self.graph_info = GraphInfo(vroot)\n        self.entry_variables = entry_variables\n\n        with nn.parameter_scope(self.name):\n            \n            for t, func in enumerate(self.graph_info.funcs):\n                \n                if func.name in self.activation_functions:\n                    activation_func = func\n                    o = self._fixed_point_activation_conversion(\n                        activation_func)\n                    continue\n                \n                o = self._identity_conversion(func)\n\n        self.end_variable = o\n        return self.end_variable", "docstring": "All functions are replaced with the same `new` function.\n\nArgs:\nvroot (:obj:`Variable`): NNabla Variable\nentry_variables (:obj:`Variable`): Entry variable from which the conversion starts.", "source": "juraj-google-style"}
{"code": "def get_box_folder_location():\n    box_prefs_path = 'Library/Application Support/Box/Box Sync/sync_root_folder.txt'\n    box_home = None\n    box_prefs = os.path.join(os.environ['HOME'], box_prefs_path)\n    try:\n        with open(box_prefs, 'r') as sync_path:\n            data = sync_path.read()\n            box_home = data\n    except IOError:\n        error('Unable to find your Box prefs =(')\n    return box_home", "docstring": "Try to locate the Box folder.\n\nReturns:\n(str) Full path to the current Box folder", "source": "codesearchnet"}
{"code": "def GetEventData(self, data_type):\n    event_data = events.EventData(data_type=data_type)\n    for (property_name, property_value) in iter(self._properties.items()):\n        if isinstance(property_value, py2to3.BYTES_TYPE):\n            property_value = repr(property_value)\n        setattr(event_data, property_name, property_value)\n    return event_data", "docstring": "Retrieves the properties as event data.\n\nArgs:\ndata_type (str): event data type.\n\nReturns:\nEventData: event data.", "source": "codesearchnet"}
{"code": "def read_user_data(self, user_data_path):\n    raw_user_data = read_value_from_path(user_data_path)\n    variables = self.get_variables()\n    return parse_user_data(variables, raw_user_data, self.name)", "docstring": "Reads and parses a user_data file.\n\nArgs:\nuser_data_path (str):\npath to the userdata file\n\nReturns:\nstr: the parsed user data file", "source": "codesearchnet"}
{"code": "def flatten(schedule: ScheduleComponent, name: str=None) -> Schedule:\n    if (name is None):\n        name = schedule.name\n    return Schedule(*schedule.instructions, name=name)", "docstring": "Create a flattened schedule.\n\nArgs:\nschedule: Schedules to flatten\nname: Name of the new schedule. Defaults to first element of `schedules`", "source": "codesearchnet"}
{"code": "def sample(self, sample_shape=(), seed=None, name='sample'):\n    return self._call_sample_n(sample_shape, seed, name)", "docstring": "Generate samples of the specified shape.\n\nNote that a call to `sample()` without arguments will generate a single\nsample.\n\nArgs:\nsample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.\nseed: Python integer seed for RNG\nname: name to give to the op.\n\nReturns:\nsamples: a `Tensor` with prepended dimensions `sample_shape`.", "source": "github-repos"}
{"code": "def parse_log(file_path):\n    if (not os.path.isfile(file_path)):\n        return elements.error('Output Log', ('Could not open file: ' + file_path.split(os.sep)[(- 1)]))\n    headers = ['Converged Iterations', 'Avg. Iterations to Converge', 'Processor Count', 'Dycore Type']\n    with open(file_path, 'r') as f:\n        dycore_types = {'0': 'Glide', '1': 'Glam', '2': 'Glissade', '3': 'Albany_felix', '4': 'BISICLES'}\n        curr_step = 0\n        proc_count = 0\n        iter_number = 0\n        converged_iters = []\n        iters_to_converge = []\n        for line in f:\n            split = line.split()\n            if ('CISM dycore type' in line):\n                if (line.split()[(- 1)] == '='):\n                    dycore_type = dycore_types[next(f).strip()]\n                else:\n                    dycore_type = dycore_types[line.split()[(- 1)]]\n            elif ('total procs' in line):\n                proc_count += int(line.split()[(- 1)])\n            elif ('Nonlinear Solver Step' in line):\n                curr_step = int(line.split()[4])\n            elif ('Compute ice velocities, time = ' in line):\n                converged_iters.append(curr_step)\n                curr_step = float(line.split()[(- 1)])\n            elif ('\"SOLVE_STATUS_CONVERGED\"' in line):\n                split = line.split()\n                iters_to_converge.append(int(split[(split.index('\"SOLVE_STATUS_CONVERGED\"') + 2)]))\n            elif ('Compute dH/dt' in line):\n                iters_to_converge.append(int(iter_number))\n            elif ((len(split) > 0) and split[0].isdigit()):\n                iter_number = split[0]\n        if (iters_to_converge == []):\n            iters_to_converge.append(int(iter_number))\n    data = {'Dycore Type': dycore_type, 'Processor Count': proc_count, 'Converged Iterations': len(converged_iters), 'Avg. Iterations to Converge': np.mean(iters_to_converge)}\n    return elements.table('Output Log', headers, data)", "docstring": "Parse a CISM output log and extract some information.\n\nArgs:\nfile_path: absolute path to the log file\n\nReturn:\nA dictionary created by the elements object corresponding to\nthe results of the bit for bit testing", "source": "codesearchnet"}
{"code": "def create_handlers_map(prefix='.*'):\n  \n  return [\n      (prefix + '/output', _BarrierHandler),\n      (prefix + '/run', _PipelineHandler),\n      (prefix + '/finalized', _PipelineHandler),\n      (prefix + '/cleanup', _CleanupHandler),\n      (prefix + '/abort', _PipelineHandler),\n      (prefix + '/fanout', _FanoutHandler),\n      (prefix + '/fanout_abort', _FanoutAbortHandler),\n      (prefix + '/callback', _CallbackHandler),\n      (prefix + '/rpc/tree', status_ui._TreeStatusHandler),\n      (prefix + '/rpc/class_paths', status_ui._ClassPathListHandler),\n      (prefix + '/rpc/list', status_ui._RootListHandler),\n      (prefix + '(/.+)', status_ui._StatusUiHandler),\n      ]", "docstring": "Create new handlers map.\n\nArgs:\nprefix: url prefix to use.\n\nReturns:\nlist of (regexp, handler) pairs for WSGIApplication constructor.", "source": "juraj-google-style"}
{"code": "def with_doc(fn_with_doc_to_copy):\n\n    def decorator(wrapper_init):\n\n        @wrapt.decorator\n        def wrapping_fn(unused_wrapped, instance, args, kwargs):\n            wrapper_init(instance, *args, **kwargs)\n        return wrapping_fn(fn_with_doc_to_copy)\n    return decorator", "docstring": "Returns a decorator to copy documentation from the given function.\n\nDocstring is copied, including *args and **kwargs documentation.\n\nArgs:\nfn_with_doc_to_copy: Function whose docstring, including *args and\n**kwargs documentation, is to be copied.\n\nReturns:\nDecorated version of `wrapper_init` with documentation copied from\n`fn_with_doc_to_copy`.", "source": "codesearchnet"}
{"code": "def update_from_string(self, update_str: str):\n    d = dict((x.split('=') for x in update_str.split(',')))\n    for k, v in d.items():\n        if not hasattr(self, k):\n            raise ValueError(f\"key {k} isn't in the original config dict\")\n        old_v = getattr(self, k)\n        if isinstance(old_v, bool):\n            if v.lower() in ['true', '1', 'y', 'yes']:\n                v = True\n            elif v.lower() in ['false', '0', 'n', 'no']:\n                v = False\n            else:\n                raise ValueError(f\"can't derive true or false from {v} (key {k})\")\n        elif isinstance(old_v, int):\n            v = int(v)\n        elif isinstance(old_v, float):\n            v = float(v)\n        elif not isinstance(old_v, str):\n            raise TypeError(f'You can only update int, float, bool or string values in the config, got {v} for key {k}')\n        setattr(self, k, v)", "docstring": "Updates attributes of this class with attributes from `update_str`.\n\nThe expected format is ints, floats and strings as is, and for booleans use `true` or `false`. For example:\n\"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index\"\n\nThe keys to change have to already exist in the config object.\n\nArgs:\nupdate_str (`str`): String with attributes that should be updated for this class.", "source": "github-repos"}
{"code": "def createEditor(self, parent, option, index):\n    combo = QtGui.QComboBox(parent)\n    combo.addItems(SupportedDtypes.names())\n    combo.currentIndexChanged.connect(self.currentIndexChanged)\n    return combo", "docstring": "Creates an Editor Widget for the given index.\n\nEnables the user to manipulate the displayed data in place. An editor\nis created, which performs the change.\nThe widget used will be a `QComboBox` with all available datatypes in the\n`pandas` project.\n\nArgs:\nparent (QtCore.QWidget): Defines the parent for the created editor.\noption (QtGui.QStyleOptionViewItem): contains all the information\nthat QStyle functions need to draw the items.\nindex (QtCore.QModelIndex): The item/index which shall be edited.\n\nReturns:\nQtGui.QWidget: he widget used to edit the item specified by index\nfor editing.", "source": "codesearchnet"}
{"code": "def _example_short_number_for_cost(region_code, cost):\n    \n    metadata = PhoneMetadata.short_metadata_for_region(region_code)\n    if metadata is None:\n        return U_EMPTY_STRING\n    desc = None\n    if cost == ShortNumberCost.TOLL_FREE:\n        desc = metadata.toll_free\n    elif cost == ShortNumberCost.STANDARD_RATE:\n        desc = metadata.standard_rate\n    elif cost == ShortNumberCost.PREMIUM_RATE:\n        desc = metadata.premium_rate\n    else:\n        \n        \n        pass\n    if desc is not None and desc.example_number is not None:\n        return desc.example_number\n    return U_EMPTY_STRING", "docstring": "Gets a valid short number for the specified cost category.\n\nArguments:\nregion_code -- the region for which an example short number is needed.\ncost -- the cost category of number that is needed.\n\nReturns a valid short number for the specified region and cost\ncategory. Returns an empty string when the metadata does not contain such\ninformation, or the cost is UNKNOWN_COST.", "source": "juraj-google-style"}
{"code": "def tas53(msg):\n    \n    d = hex2bin(data(msg))\n\n    if d[33] == '0':\n        return None\n\n    tas = bin2int(d[34:46]) * 0.5   \n    return round(tas, 1)", "docstring": "Aircraft true airspeed, BDS 5,3 message\n\nArgs:\nmsg (String): 28 bytes hexadecimal message\n\nReturns:\nfloat: true airspeed in knots", "source": "juraj-google-style"}
{"code": "class ConditionalDetrEncoder(ConditionalDetrPreTrainedModel):\n\n    def __init__(self, config: ConditionalDetrConfig):\n        super().__init__(config)\n        self.dropout = config.dropout\n        self.layerdrop = config.encoder_layerdrop\n        self.layers = nn.ModuleList([ConditionalDetrEncoderLayer(config) for _ in range(config.encoder_layers)])\n        self.post_init()\n\n    def forward(self, inputs_embeds=None, attention_mask=None, object_queries=None, output_attentions=None, output_hidden_states=None, return_dict=None):\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        hidden_states = inputs_embeds\n        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n        if attention_mask is not None:\n            attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        for i, encoder_layer in enumerate(self.layers):\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            to_drop = False\n            if self.training:\n                dropout_probability = torch.rand([])\n                if dropout_probability < self.layerdrop:\n                    to_drop = True\n            if to_drop:\n                layer_outputs = (None, None)\n            else:\n                layer_outputs = encoder_layer(hidden_states, attention_mask, object_queries=object_queries, output_attentions=output_attentions)\n                hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a\n[`ConditionalDetrEncoderLayer`].\n\nThe encoder updates the flattened feature map through multiple self-attention layers.\n\nSmall tweak for ConditionalDETR:\n\n- object_queries are added to the forward pass.\n\nArgs:\nconfig: ConditionalDetrConfig", "source": "github-repos"}
{"code": "def split(x, axis=0):\n    from .function_bases import split as split_base\n    return split_base(x, axis, x.shape[axis])", "docstring": "Split arrays at the specified axis.\n\nIt returns a number corresponding the size of the given\naxis (i.e ``x.shape[axis]``) of :obj:`~nnabla.Variable` s.\n\nArgs:\nx(~nnabla.Variable): N-D array\naxis(int): Axis\n\nReturns: A :obj:`tuple` of :obj:`~nnabla.Variable` s\n\nSee Also:\n:func:`nnabla.function_bases.split`.", "source": "codesearchnet"}
{"code": "def _GetUncompressedStreamSize(self):\n    self._file_object.seek(0, os.SEEK_SET)\n    self._decompressor = self._GetDecompressor()\n    self._uncompressed_data = b''\n    compressed_data_offset = 0\n    compressed_data_size = self._file_object.get_size()\n    uncompressed_stream_size = 0\n    while (compressed_data_offset < compressed_data_size):\n        read_count = self._ReadCompressedData(self._COMPRESSED_DATA_BUFFER_SIZE)\n        if (read_count == 0):\n            break\n        compressed_data_offset += read_count\n        uncompressed_stream_size += self._uncompressed_data_size\n    return uncompressed_stream_size", "docstring": "Retrieves the uncompressed stream size.\n\nReturns:\nint: uncompressed stream size.", "source": "codesearchnet"}
{"code": "def calculate_expiration(self, token):\n    if (not token):\n        return None\n    now = datetime.utcnow()\n    time_to_live = self.config['expiration']\n    if ('exp' not in token):\n        return (now + timedelta(seconds=time_to_live))\n    elif self.config['refresh']:\n        exp = datetime.utcfromtimestamp(token['exp'])\n        if ((exp - now) < timedelta(seconds=(0.5 * time_to_live))):\n            return (now + timedelta(seconds=time_to_live))\n    return None", "docstring": "Calculate token expiration\n\nreturn expiration if the token need to set expiration or refresh,\notherwise return None.\n\nArgs:\ntoken (dict): a decoded token", "source": "codesearchnet"}
{"code": "def get_month_list(to_date, from_date):\n    num_months = get_months_apart(to_date, from_date)\n    month_offset = from_date.month\n    month_list = []\n    for month in range((month_offset - 1), (month_offset + num_months)):\n        year = (from_date.year + (month / 12))\n        real_month = ((month % 12) + 1)\n        month_list.append((year, real_month))\n    return month_list", "docstring": "Generate a list containing year+month between two dates.\n\nReturns:\n[(2013, 11), (2013, 12), (2014, 1)]", "source": "codesearchnet"}
{"code": "def __init__(self, num_evals, steps_per_run=1):\n    self._num_evals = num_evals\n    self._evals_completed = None\n    self._steps_per_run_initial_value = steps_per_run", "docstring": "Constructs the run hook.\n\nArgs:\nnum_evals: The number of evaluations to run for. if set to None, will\niterate the dataset until all inputs are exhausted.\nsteps_per_run: Number of steps executed per run call.", "source": "github-repos"}
{"code": "def generate(self, output_path=None, in_memory=False):\n    result = (dict() if in_memory else 0)\n    logger.info('Generating Statik build...')\n    try:\n        if ((output_path is None) and (not in_memory)):\n            raise InternalError('If project is not to be generated in-memory, an output path must be specified')\n        self.error_context.update(filename=self.config_file_path)\n        self.config = (self.config or StatikConfig(self.config_file_path))\n        if (self.config.encoding is not None):\n            logger.debug('Using encoding: %s', self.config.encoding)\n        else:\n            logger.debug('Using encoding: %s', self.config.encoding)\n        self.error_context.clear()\n        self.models = self.load_models()\n        self.template_engine = StatikTemplateEngine(self)\n        if (self.config.external_database is not None):\n            self.config.external_database.write_files(output_path, self.models)\n        self.views = self.load_views()\n        if (not self.views):\n            raise NoViewsError()\n        self.db = self.load_db_data(self.models)\n        self.project_context = self.load_project_context()\n        in_memory_result = self.process_views()\n        if in_memory:\n            result = in_memory_result\n        else:\n            file_count = self.dump_in_memory_result(in_memory_result, output_path)\n            logger.info('Wrote %d output file(s) to folder: %s', file_count, output_path)\n            self.copy_assets(output_path)\n            result = file_count\n        logger.info('Success!')\n    except StatikError as exc:\n        logger.debug(traceback.format_exc())\n        logger.error(exc.render())\n        raise exc\n    except Exception as exc:\n        logger.debug(traceback.format_exc())\n        _exc = StatikError(message=('Failed to build project. Run Statik in verbose mode (-v) to see ' + 'additional traceback information about this error.'), orig_exc=exc, context=self.error_context)\n        logger.error(_exc.render())\n        raise _exc\n    finally:\n        try:\n            if (self.db is not None):\n                self.db.shutdown()\n        except Exception as e:\n            logger.exception('Unable to clean up properly: %s', e)\n    return result", "docstring": "Executes the Statik project generator.\n\nArgs:\noutput_path: The path to which to write output files.\nin_memory: Whether or not to generate the results in memory. If True, this will\ngenerate the output result as a dictionary. If False, this will write the output\nto files in the output_path.\n\nReturns:\nIf in_memory is True, this returns a dictionary containing the actual generated static\ncontent. If in_memory is False, this returns an integer indicating the number of files\ngenerated in the output path.", "source": "codesearchnet"}
{"code": "def period_max_neighborhood_probability(self, threshold, radius, sigmas=None):\n    if (sigmas is None):\n        sigmas = [0]\n    weights = disk(radius)\n    neighborhood_prob = np.zeros(self.data.shape[2:], dtype=np.float32)\n    thresh_data = np.zeros(self.data.shape[2:], dtype=np.uint8)\n    for m in range(self.data.shape[0]):\n        thresh_data[(self.data[m].max(axis=0) >= threshold)] = 1\n        maximized = fftconvolve(thresh_data, weights, mode='same')\n        maximized[(maximized > 1)] = 1\n        neighborhood_prob += fftconvolve(maximized, weights, mode='same')\n    neighborhood_prob[(neighborhood_prob < 1)] = 0\n    neighborhood_prob /= (self.data.shape[0] * float(weights.sum()))\n    consensus_probs = []\n    for sigma in sigmas:\n        if (sigma > 0):\n            filtered_prob = gaussian_filter(neighborhood_prob, sigma=sigma)\n        else:\n            filtered_prob = neighborhood_prob\n        ec = EnsembleConsensus(filtered_prob, 'neighbor_prob_{0:02d}-hour_r_{1:d}_s_{2:d}'.format(self.data.shape[1], radius, sigma), self.ensemble_name, self.run_date, (self.variable + '_{0:0.2f}'.format(float(threshold))), self.start_date, self.end_date, '')\n        consensus_probs.append(ec)\n    return consensus_probs", "docstring": "Calculates the neighborhood probability of exceeding a threshold at any time over the period loaded.\n\nArgs:\nthreshold (float): splitting threshold for probability calculatations\nradius (int): distance from point in number of grid points to include in neighborhood calculation.\nsigmas (array of ints): Radii for Gaussian filter used to smooth neighborhood probabilities.\n\nReturns:\nlist of EnsembleConsensus objects", "source": "codesearchnet"}
{"code": "def calculate(self, token_list_x, token_list_y):\n        \n\n        x, y = self.unique(token_list_x, token_list_y)\n        try:\n            result = 2 * len(x & y) / float(sum(map(len, (x, y))))\n        except ZeroDivisionError:\n            result = 0.0\n        return result", "docstring": "Calculate similarity with the Dice coefficient.\n\nConcrete method.\n\nArgs:\ntoken_list_x:    [token, token, token, ...]\ntoken_list_y:    [token, token, token, ...]\n\nReturns:\nSimilarity.", "source": "juraj-google-style"}
{"code": "def __init__(self, *args, **kwargs):\n        \n        super(IssueTransaction, self).__init__(*args, **kwargs)\n        self.Type = TransactionType.IssueTransaction", "docstring": "Create an instance.\n\nArgs:\n*args:\n**kwargs:", "source": "juraj-google-style"}
{"code": "def get_tag(filepath: PurePath) -> Optional[Tag]:\n    with open(filepath, encoding='utf-8') as parsed_file:\n        lines = parsed_file.readlines()\n    line_start: Optional[int] = None\n    line_finish: Optional[int] = None\n    tag_prefix: Optional[str] = ''\n    for idx, line in enumerate(lines):\n        if line_start is None and line.endswith(Config.BEAM_PLAYGROUND_TITLE):\n            line_start = idx\n            prefix_len = len(line) - len(Config.BEAM_PLAYGROUND_TITLE)\n            tag_prefix = line[:prefix_len]\n        elif line_start and (not line.startswith(tag_prefix)):\n            line_finish = idx\n            break\n    if not line_start or not line_finish:\n        return None\n    embdedded_yaml_content = ''.join((line[len(tag_prefix):] for line in lines[line_start:line_finish]))\n    yml = yaml.load(embdedded_yaml_content, Loader=yaml.SafeLoader)\n    try:\n        return Tag(filepath=str(filepath), line_start=line_start, line_finish=line_finish, **yml[Config.BEAM_PLAYGROUND])\n    except pydantic.ValidationError as err:\n        if len(err.errors()) == 1 and err.errors()[0]['msg'] == 'multifile is True but no files defined':\n            logging.warning('incomplete multifile example ignored %s', filepath)\n            return None\n        raise", "docstring": "Parse file by filepath and find beam tag\n\nArgs:\nfilepath: path of the file\n\nReturns:\nIf file contains tag, returns Tag object\nIf file doesn't contain tag, returns None", "source": "github-repos"}
{"code": "def single_gate_matrix(gate, params=None):\n    \n\n    \n    \n    (theta, phi, lam) = map(float, single_gate_params(gate, params))\n\n    return np.array([[np.cos(theta / 2),\n                      -np.exp(1j * lam) * np.sin(theta / 2)],\n                     [np.exp(1j * phi) * np.sin(theta / 2),\n                      np.exp(1j * phi + 1j * lam) * np.cos(theta / 2)]])", "docstring": "Get the matrix for a single qubit.\n\nArgs:\ngate(str): the single qubit gate name\nparams(list): the operation parameters op['params']\nReturns:\narray: A numpy array representing the matrix", "source": "juraj-google-style"}
{"code": "def on_deleted(self, event):\n        \n        if not self._event_error:\n            self.logger.info(u\"Change detected from deletion of: %s\",\n                             event.src_path)\n            \n            self.compile_dependencies(event.src_path, include_self=False)", "docstring": "Called when a file or directory is deleted.\n\nTodo:\nMay be bugged with inspector and sass compiler since the does not\nexists anymore.\n\nArgs:\nevent: Watchdog event, ``watchdog.events.DirDeletedEvent`` or\n``watchdog.events.FileDeletedEvent``.", "source": "juraj-google-style"}
{"code": "def mark_point(img, x, y):\n    \n    overlay = img.copy()\n    output = img.copy()\n\n    alpha = 0.5\n    radius = max(5, min(img.shape[:2])\n    center = int(x), int(y)\n    color = (0, 0, 255)\n\n    cv2.circle(overlay, center, radius, color, -1)\n    cv2.addWeighted(overlay, alpha, output, 1-alpha, 0, output)\n    return output", "docstring": "Mark a point\n\nArgs:\n- img(numpy): the source image\n- x, y(int): position", "source": "juraj-google-style"}
{"code": "def decrypt(self, message):\n    message = json.loads(message)\n    unencrypted_msg = []\n    for line in message:\n        enc_line = binascii.a2b_base64(line)\n        unencrypted_line = rsa.decrypt(enc_line, self.private_key)\n        unencrypted_msg.append(unencrypted_line)\n    unencrypted_msg = ''.join(unencrypted_msg)\n    return unencrypted_msg", "docstring": "Decrypts a string using our own private key object.\n\nArgs:\nmessage (string): The string of the message to decrypt.\n\nReturns:\nThe unencrypted string.", "source": "codesearchnet"}
{"code": "def moses_pipeline(self, text: str) -> List[str]:\n    text = self.moses_punct_norm(text)\n    text = self.moses_tokenize(text)\n    text = tokenize_numbers(text)\n    return text", "docstring": "Does basic tokenization using [`sacremoses.MosesPunctNormalizer`] and [`sacremoses.MosesTokenizer`] with\n*aggressive_dash_splits=True* (see [`sacremoses.tokenize.MosesTokenizer.tokenize`]). Additionally, large\ncomma-separated numbers and floating point values are split. E.g. \"23,000 people are 1.80m tall\" -> \"23 @,@ 000\npeople are 1 @.@ 80m tall\"\n\nArgs:\ntext: Text to be tokenize\n\nReturns:\nA list of tokenized string\n\nExample:\n\n```python\n>>> tokenizer = TransfoXLTokenizer.from_pretrained(\"transfo-xl/transfo-xl-wt103\")\n>>> tokenizer.moses_pipeline(\"23,000 people are 1.80 m tall\")\n['23', '@,@', '000', 'people', 'are', '1', '@.@', '80', 'm', 'tall']\n```", "source": "github-repos"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        if token_ids_1 is not None:\n            raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')\n        return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]\n    if token_ids_1 is None:\n        return [1] + [0] * len(token_ids_0) + [1]\n    return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]", "docstring": "Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of ids.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nSet to True if the token list is already formatted with special tokens for the model\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def action_range_type(self) -> Sequence[str]:\n    fluents = self.domain.action_fluents\n    ordering = self.domain.action_fluent_ordering\n    return self._fluent_range_type(fluents, ordering)", "docstring": "The range type of each action fluent in canonical order.\n\nReturns:\nSequence[str]: A tuple of range types representing\nthe range of each fluent.", "source": "codesearchnet"}
{"code": "def enable(self, information, id_or_uri, timeout=-1):\n        \n\n        uri = self._client.build_uri(id_or_uri)\n\n        return self._client.update(information, uri, timeout=timeout)", "docstring": "Enables or disables a range.\n\nArgs:\ninformation (dict): Information to update.\nid_or_uri: ID or URI of range.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Updated resource.", "source": "juraj-google-style"}
{"code": "def read_from_hdx(identifier, configuration=None):\n    organization = Organization(configuration=configuration)\n    result = organization._load_from_hdx('organization', identifier)\n    if result:\n        return organization\n    return None", "docstring": "Reads the organization given by identifier from HDX and returns Organization object\n\nArgs:\nidentifier (str): Identifier of organization\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nOptional[Organization]: Organization object if successful read, None if not", "source": "codesearchnet"}
{"code": "def get_default_configfile_path():\n    base = homebase.user_config_dir(app_author=CONF_AUTHOR, app_name=CONF_APP, roaming=False, use_virtualenv=False, create=False)\n    path = os.path.join(base, CONF_FILENAME)\n    return path", "docstring": "Return the default configuration-file path.\n\nTypically returns a user-local configuration file; e.g:\n``~/.config/dwave/dwave.conf``.\n\nReturns:\nstr:\nConfiguration file path.\n\nExamples:\nThis example displays the default configuration file on an Ubuntu Unix system\nrunning IPython 2.7.\n\n>>> import dwave.cloud as dc\n>>> # Display paths\n>>> dc.config.get_configfile_paths(only_existing=False)   # doctest: +SKIP\n['/etc/xdg/xdg-ubuntu/dwave/dwave.conf',\n'/usr/share/upstart/xdg/dwave/dwave.conf',\n'/etc/xdg/dwave/dwave.conf',\n'/home/mary/.config/dwave/dwave.conf',\n'./dwave.conf']\n>>> # Find default configuration path\n>>> dc.config.get_default_configfile_path()   # doctest: +SKIP\n'/home/mary/.config/dwave/dwave.conf'", "source": "codesearchnet"}
{"code": "def get_config_value(self, overrides, skip_environment=False):\n    (label, override, key) = self._search_overrides(overrides, skip_environment)\n    if ((override is None) and (self.default is None) and self.required):\n        raise YapconfItemNotFound('Could not find config value for {0}'.format(self.fq_name), self)\n    if (override is None):\n        self.logger.debug('Config value not found for {0}, falling back to default.'.format(self.name))\n        value = self.default\n    else:\n        value = override[key]\n    if (value is None):\n        return value\n    converted_value = self.convert_config_value(value, label)\n    self._validate_value(converted_value)\n    return converted_value", "docstring": "Get the configuration value from all overrides.\n\nIterates over all overrides given to see if a value can be pulled\nout from them. It will convert each of these values to ensure they\nare the correct type.\n\nArgs:\noverrides: A list of tuples where each tuple is a label and a\ndictionary representing a configuration.\nskip_environment: Skip looking through the environment.\n\nReturns:\nThe converted configuration value.\n\nRaises:\nYapconfItemNotFound: If an item is required but could not be found\nin the configuration.\nYapconfItemError: If a possible value was found but the type\ncannot be determined.\nYapconfValueError: If a possible value is found but during\nconversion, an exception was raised.", "source": "codesearchnet"}
{"code": "def _parse_property(cls, name, value):\n    prop = cls._props.get(name)\n    return_value = value\n    if (not prop):\n        logger.debug(('\"%s\" with value \"%s\" is not a valid property for \"%s\".' % (name, value, cls)))\n        return_value = None\n    elif isinstance(prop, properties.Instance):\n        return_value = prop.instance_class.from_api(**value)\n    elif isinstance(prop, properties.List):\n        return_value = cls._parse_property_list(prop, value)\n    elif isinstance(prop, properties.Color):\n        return_value = cls._parse_property_color(value)\n    return return_value", "docstring": "Parse a property received from the API into an internal object.\n\nArgs:\nname (str): Name of the property on the object.\nvalue (mixed): The unparsed API value.\n\nRaises:\nHelpScoutValidationException: In the event that the property name\nis not found.\n\nReturns:\nmixed: A value compatible with the internal models.", "source": "codesearchnet"}
{"code": "def _parse_batch_lastlog(last_log):\n    regexp = re.compile('(-?[0-9]\\\\d*):\\\\W+(.*)')\n    wrong_commands = list()\n    for line in last_log:\n        result = regexp.match(line)\n        if (result is not None):\n            status_code = result.group(1)\n            command = result.group(2)\n            if (int(status_code) < 0):\n                wrong_commands.append((status_code, command))\n    return wrong_commands", "docstring": "This static method will help reading the result of the commit, command by command.\n\nArgs:\nlast_log(list): A list containing, line by line, the result of committing the changes.\n\nReturns:\nA list of tuples that went wrong. The tuple will contain (*status_code*, *command*)", "source": "codesearchnet"}
{"code": "def create_prefetch(self, addresses):\n        \n\n        with self._lock:\n            for add in addresses:\n                self._state[add] = _ContextFuture(address=add,\n                                                  wait_for_tree=True)", "docstring": "Create futures needed before starting the process of reading the\naddress's value from the merkle tree.\n\nArgs:\naddresses (list of str): addresses in the txn's inputs that\naren't in any base context (or any in the chain).", "source": "juraj-google-style"}
{"code": "def get_auth_token(self, user_payload):\n        \n        now = datetime.utcnow()\n        payload = {\n            'user': user_payload\n        }\n        if 'iat' in self.verify_claims:\n            payload['iat'] = now\n\n        if 'nbf' in self.verify_claims:\n            payload['nbf'] = now + self.leeway\n\n        if 'exp' in self.verify_claims:\n            payload['exp'] = now + self.expiration_delta\n\n        if self.audience is not None:\n            payload['aud'] = self.audience\n\n        if self.issuer is not None:\n            payload['iss'] = self.issuer\n\n        return jwt.encode(\n            payload,\n            self.secret_key,\n            algorithm=self.algorithm,\n            json_encoder=ExtendedJSONEncoder).decode('utf-8')", "docstring": "Create a JWT authentication token from ``user_payload``\n\nArgs:\nuser_payload(dict, required): A `dict` containing required information\nto create authentication token", "source": "juraj-google-style"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        local_stream = BytearrayStream()\n\n        if self._cryptographic_parameters:\n            self._cryptographic_parameters.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self._initialization_vector:\n            self._initialization_vector.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self._derivation_data:\n            self._derivation_data.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self._salt:\n            self._salt.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self._iteration_count:\n            self._iteration_count.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        self.length = local_stream.length()\n        super(DerivationParameters, self).write(\n            output_stream,\n            kmip_version=kmip_version\n        )\n        output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the DerivationParameters struct to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def get_bond_lengths(self, indices):\n    coords = ['x', 'y', 'z']\n    if isinstance(indices, pd.DataFrame):\n        i_pos = self.loc[(indices.index, coords)].values\n        b_pos = self.loc[(indices.loc[(:, 'b')], coords)].values\n    else:\n        indices = np.array(indices)\n        if (len(indices.shape) == 1):\n            indices = indices[(None, :)]\n        i_pos = self.loc[(indices[(:, 0)], coords)].values\n        b_pos = self.loc[(indices[(:, 1)], coords)].values\n    return np.linalg.norm((i_pos - b_pos), axis=1)", "docstring": "Return the distances between given atoms.\n\nCalculates the distance between the atoms with\nindices ``i`` and ``b``.\nThe indices can be given in three ways:\n\n* As simple list ``[i, b]``\n* As list of lists: ``[[i1, b1], [i2, b2]...]``\n* As :class:`pd.DataFrame` where ``i`` is taken from the index and\n``b`` from the respective column ``'b'``.\n\nArgs:\nindices (list):\n\nReturns:\n:class:`numpy.ndarray`: Vector of angles in degrees.", "source": "codesearchnet"}
{"code": "def matrix_worker(data):\n    matrix = data['matrix']\n    Logger.get_logger((__name__ + '.worker')).info(\"Processing pipeline for matrix entry '%s'\", matrix['name'])\n    env = matrix['env'].copy()\n    env.update({'PIPELINE_MATRIX': matrix['name']})\n    pipeline = Pipeline(model=data['model'], env=env, options=data['options'])\n    pipeline.hooks = data['hooks']\n    return pipeline.process(data['pipeline'])", "docstring": "Run pipelines in parallel.\n\nArgs:\ndata(dict): parameters for the pipeline (model, options, ...).\nReturns:\ndict: with two fields: success True/False and captured output (list of str).", "source": "codesearchnet"}
{"code": "def default_value(self):\n    if (callable(self.default) and self.call_default):\n        return self.default()\n    return self.default", "docstring": "Property to return the default value.\n\nIf the default value is callable and call_default is True, return\nthe result of default(). Else return default.\n\nReturns:\nobject: the default value.", "source": "codesearchnet"}
{"code": "class IncStdevTracker(WindowedTracker, StdevTracker):\n\n    def __init__(self, window_mode, **kwargs):\n        super().__init__(window_mode, **kwargs)\n        self._mean = 0\n        self._m2 = 0\n\n    def push(self, x):\n        \n        if not math.isnan(x):\n            self._n += 1\n            delta1 = x - self._mean\n        else:\n            delta1 = 0\n        if self._window_mode == WindowMode.SLIDING:\n            if len(self._queue) >= self._window_size and (not math.isnan((old_x := self.pop()))):\n                self._n -= 1\n                delta2 = self._mean - old_x\n            else:\n                delta2 = 0\n            super().push(x)\n        else:\n            delta2 = 0\n        if self._n > 0:\n            self._mean += (delta1 + delta2) / self._n\n            if delta1 != 0:\n                self._m2 += delta1 * (x - self._mean)\n            if delta2 != 0:\n                self._m2 += delta2 * (old_x - self._mean)\n        else:\n            self._mean = 0\n            self._m2 = 0\n\n    def get(self):\n        \n        if self._n < 2:\n            return float('nan')\n        dof = self._n - 1\n        return math.sqrt(self._m2 / dof)", "docstring": "Abstract base class for incremental standard deviation trackers.\n\nThis class implements an online algorithm for calculating standard deviation,\nupdating the standard deviation incrementally as new data points arrive.\n\nArgs:\nwindow_mode: A `WindowMode` enum specifying whether the window is `LANDMARK`\nor `SLIDING`.\n**kwargs: Keyword arguments passed to the parent class constructor.", "source": "github-repos"}
{"code": "def on_the_air(self, **kwargs):\n        \n        path = self._get_path('on_the_air')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the list of TV shows that are currently on the air. This query\nlooks for any TV show that has an episode with an air date in the\nnext 7 days.\n\nArgs:\npage: (optional) Minimum 1, maximum 1000.\nlanguage: (optional) ISO 639 code.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def _ResizeBicubicGrad(op: ops.Operation, grad):\n    allowed_types = [dtypes.float32, dtypes.float64]\n    grad0 = None\n    if op.inputs[0].dtype in allowed_types:\n        grad0 = gen_image_ops.resize_bicubic_grad(grad, op.inputs[0], align_corners=op.get_attr('align_corners'), half_pixel_centers=op.get_attr('half_pixel_centers'))\n    return [grad0, None]", "docstring": "The derivatives for bicubic resizing.\n\nArgs:\nop: The ResizeBicubic op.\ngrad: The tensor representing the gradient w.r.t. the output.\n\nReturns:\nThe gradients w.r.t. the input.", "source": "github-repos"}
{"code": "def _get_accepted(self, graph):\n        \n        accepted = []\n        for state in graph.states:\n            if state.final != TropicalWeight(float('inf')):\n                accepted.append(state)\n        return accepted", "docstring": "Find the accepted states\nArgs:\ngraph (DFA): The DFA states\nReturn:\nlist: Returns the list of the accepted states", "source": "juraj-google-style"}
{"code": "def __reg_query_value(handle, value_name):\n        \n        \n        item_value, item_type = win32api.RegQueryValueEx(handle, value_name)  \n        if six.PY2 and isinstance(item_value, six.string_types) and not isinstance(item_value, six.text_type):\n            try:\n                item_value = six.text_type(item_value, encoding='mbcs')\n            except UnicodeError:\n                pass\n        if item_type == win32con.REG_EXPAND_SZ:\n            \n            win32api.ExpandEnvironmentStrings(item_value)  \n            item_type = win32con.REG_SZ\n        return item_value, item_type", "docstring": "Calls RegQueryValueEx\n\nIf PY2 ensure unicode string and expand REG_EXPAND_SZ before returning\nRemember to catch not found exceptions when calling.\n\nArgs:\nhandle (object): open registry handle.\nvalue_name (str): Name of the value you wished returned\n\nReturns:\ntuple: type, value", "source": "juraj-google-style"}
{"code": "def set_peer_address(self, value=None, default=False, disable=False):\n        \n        return self._configure_mlag('peer-address', value, default, disable)", "docstring": "Configures the mlag peer-address value\n\nArgs:\nvalue (str): The value to configure the peer-address\ndefault (bool): Configures the peer-address using the\ndefault keyword\ndisable (bool): Negates the peer-address using the no keyword\n\nReturns:\nbool: Returns True if the commands complete successfully", "source": "juraj-google-style"}
{"code": "def isregex(value):\n    if (not value):\n        return False\n    return any((isregex_expr(value), isinstance(value, retype)))", "docstring": "Returns ``True`` if the input argument object is a native\nregular expression object, otherwise ``False``.\n\nArguments:\nvalue (mixed): input value to test.\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def populate(self, filename):\n        \n\n        if os.path.isfile(filename):\n            fid_st = os.stat(filename)\n            self.name = os.path.abspath(filename)\n            self.full_name = filename\n            self.size = fid_st.st_size\n            self.last_modified = fid_st.st_mtime\n            self.last_accessed = fid_st.st_atime\n            self.last_info_changed = fid_st.st_ctime\n            self.location = os.path.dirname(filename)", "docstring": "Finds the file-stats and populates the class with stat values.\n\nArgs:\nfilename (str): name of the file.", "source": "juraj-google-style"}
{"code": "def __init__(self, shape=None, dtype=dtypes.float32, indices_dtype=dtypes.int64, dense_shape_dtype=None, indices_shape=None):\n    self._shape = tensor_shape.as_shape(shape)\n    self._values_dtype = dtypes.as_dtype(dtype)\n    self._indices_dtype = dtypes.as_dtype(indices_dtype)\n    if dense_shape_dtype is None:\n        self._dense_shape_dtype = None\n    else:\n        self._dense_shape_dtype = dtypes.as_dtype(dense_shape_dtype)\n    self._indices_shape = tensor_shape.as_shape(indices_shape).with_rank(1)", "docstring": "Constructs a type specification for a `tf.IndexedSlices`.\n\nArgs:\nshape: The dense shape of the `IndexedSlices`, or `None` to allow any\ndense shape.\ndtype: `tf.DType` of values in the `IndexedSlices`.\nindices_dtype: `tf.DType` of the `indices` in the `IndexedSlices`.  One\nof `tf.int32` or `tf.int64`.\ndense_shape_dtype: `tf.DType` of the `dense_shape` in the `IndexedSlices`.\nOne of `tf.int32`, `tf.int64`, or `None` (if the `IndexedSlices` has\nno `dense_shape` tensor).\nindices_shape: The shape of the `indices` component, which indicates\nhow many slices are in the `IndexedSlices`.", "source": "github-repos"}
{"code": "def _GetClientLib(service_class_names, language, output_path, build_system, hostname=None, application_path=None):\n    client_libs = []\n    service_configs = GenApiConfig(service_class_names, hostname=hostname, config_string_generator=discovery_generator.DiscoveryGenerator(), application_path=application_path)\n    for (api_name_version, config) in service_configs.iteritems():\n        client_name = (api_name_version + '.zip')\n        client_libs.append(_GenClientLibFromContents(config, language, output_path, build_system, client_name))\n    return client_libs", "docstring": "Fetch client libraries from a cloud service.\n\nArgs:\nservice_class_names: A list of fully qualified ProtoRPC service names.\nlanguage: The client library language to generate. (java)\noutput_path: The directory to output the discovery docs to.\nbuild_system: The target build system for the client library language.\nhostname: A string hostname which will be used as the default version\nhostname. If no hostname is specificied in the @endpoints.api decorator,\nthis value is the fallback. Defaults to None.\napplication_path: A string containing the path to the AppEngine app.\n\nReturns:\nA list of paths to client libraries.", "source": "codesearchnet"}
{"code": "def init_test_variables(self, variables_mapping=None):\n        \n        variables_mapping = variables_mapping or {}\n        variables_mapping = utils.ensure_mapping_format(variables_mapping)\n        variables_mapping.update(self.session_variables_mapping)\n        parsed_variables_mapping = parser.parse_variables_mapping(variables_mapping)\n\n        self.test_variables_mapping = {}\n        \n        self.test_variables_mapping.update(parsed_variables_mapping)\n        self.test_variables_mapping.update(self.session_variables_mapping)", "docstring": "init test variables, called when each test(api) starts.\nvariables_mapping will be evaluated first.\n\nArgs:\nvariables_mapping (dict)\n{\n\"random\": \"${gen_random_string(5)}\",\n\"authorization\": \"${gen_md5($TOKEN, $data, $random)}\",\n\"data\": '{\"name\": \"user\", \"password\": \"123456\"}',\n\"TOKEN\": \"debugtalk\",\n}", "source": "juraj-google-style"}
{"code": "def create_analyzer_cli(dump):\n    analyzer = analyzer_cli.DebugAnalyzer(dump, _cli_config_from_temp_file())\n    registry = debugger_cli_common.CommandHandlerRegistry()\n    registry.register_command_handler('list_tensors', analyzer.list_tensors, analyzer.get_help('list_tensors'), prefix_aliases=['lt'])\n    registry.register_command_handler('node_info', analyzer.node_info, analyzer.get_help('node_info'), prefix_aliases=['ni'])\n    registry.register_command_handler('list_inputs', analyzer.list_inputs, analyzer.get_help('list_inputs'), prefix_aliases=['li'])\n    registry.register_command_handler('list_outputs', analyzer.list_outputs, analyzer.get_help('list_outputs'), prefix_aliases=['lo'])\n    registry.register_command_handler('print_tensor', analyzer.print_tensor, analyzer.get_help('print_tensor'), prefix_aliases=['pt'])\n    registry.register_command_handler('print_source', analyzer.print_source, analyzer.get_help('print_source'), prefix_aliases=['ps'])\n    registry.register_command_handler('list_source', analyzer.list_source, analyzer.get_help('list_source'), prefix_aliases=['ls'])\n    registry.register_command_handler('eval', analyzer.evaluate_expression, analyzer.get_help('eval'), prefix_aliases=['ev'])\n    return (analyzer, registry)", "docstring": "Create an analyzer CLI.\n\nArgs:\ndump: A `DebugDumpDir` object to base the analyzer CLI on.\n\nReturns:\n1) A `DebugAnalyzer` object created based on `dump`.\n2) A `CommandHandlerRegistry` that is based on the `DebugAnalyzer` object\nand has the common tfdbg commands, e.g., lt, ni, li, lo, registered.", "source": "github-repos"}
{"code": "def _fire_timers(self):\n    transform_fired_timers, _ = self._executor.evaluation_context.extract_all_timers()\n    for applied_ptransform, fired_timers in transform_fired_timers:\n        empty_bundle = self._executor.evaluation_context.create_empty_committed_bundle(applied_ptransform.inputs[0])\n        timer_completion_callback = _CompletionCallback(self._executor.evaluation_context, self._executor.all_updates, timer_firings=fired_timers)\n        self._executor.schedule_consumption(applied_ptransform, empty_bundle, fired_timers, timer_completion_callback)\n    return bool(transform_fired_timers)", "docstring": "Schedules triggered consumers if any timers fired.\n\nReturns:\nTrue if timers fired.", "source": "github-repos"}
{"code": "def has_event(self, event, cameo_code):\n        \n        if self.has_cameo_code(cameo_code):\n            entry = self.mapping.get(cameo_code)\n            if entry:\n                return entry[self.event_name[event]]\n        return False", "docstring": "Test whether there is an \"event2\" or \"event3\" entry for the given cameo code\nArgs:\nevent:\ncameo_code:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def fresnel_sin(x, name=None):\n    with ops.name_scope(name, 'fresnel_sin', [x]):\n        return gen_special_math_ops.fresnel_sin(x)", "docstring": "Computes Fresnel's sine integral of `x` element-wise.\n\nThe Fresnel sine integral is defined as the integral of `sin(t^2)` from\n`0` to `x`, with the domain of definition all real numbers.\n\n>>> tf.math.special.fresnel_sin([-1., -0.1, 0.1, 1.]).numpy()\narray([-0.43825912, -0.00052359,  0.00052359,  0.43825912], dtype=float32)\n\nThis implementation is based off of the Cephes math library.\n\nArgs:\nx: A `Tensor` or `SparseTensor`. Must be one of the following types:\n`float32`, `float64`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.fresnel first output.\n@end_compatibility", "source": "github-repos"}
{"code": "def _validate_pos_args_syntax(alias_name, alias_command):\n    \n    pos_args_from_alias = get_placeholders(alias_name)\n    \n    \n    pos_args_from_command = [x.split('|')[0].split('.')[0].strip() for x in get_placeholders(alias_command)]\n\n    if set(pos_args_from_alias) != set(pos_args_from_command):\n        arg_diff = set(pos_args_from_alias) ^ set(pos_args_from_command)\n        raise CLIError(INCONSISTENT_ARG_ERROR.format('' if len(arg_diff) == 1 else 's',\n                                                     arg_diff,\n                                                     'is' if len(arg_diff) == 1 else 'are'))", "docstring": "Check if the positional argument syntax is valid in alias name and alias command.\n\nArgs:\nalias_name: The name of the alias to validate.\nalias_command: The command to validate.", "source": "juraj-google-style"}
{"code": "def ProcessMessages(self, active_notifications, queue_manager, time_limit=0):\n    now = time.time()\n    processed = 0\n    for notification in active_notifications:\n        if (notification.session_id not in self.queued_flows):\n            if (time_limit and ((time.time() - now) > time_limit)):\n                break\n            processed += 1\n            self.queued_flows.Put(notification.session_id, 1)\n            self.thread_pool.AddTask(target=self._ProcessMessages, args=(notification, queue_manager.Copy()), name=self.__class__.__name__)\n    return processed", "docstring": "Processes all the flows in the messages.\n\nPrecondition: All tasks come from the same queue.\n\nNote that the server actually completes the requests in the\nflow when receiving the messages from the client. We do not really\nlook at the messages here at all any more - we just work from the\ncompleted messages in the flow RDFValue.\n\nArgs:\nactive_notifications: The list of notifications.\nqueue_manager: QueueManager object used to manage notifications,\nrequests and responses.\ntime_limit: If set return as soon as possible after this many seconds.\n\nReturns:\nThe number of processed flows.", "source": "codesearchnet"}
{"code": "def build_this_graph(G, settings, dont_update_shas_of=None):\n    \n    verbose = settings[\"verbose\"]\n    quiet = settings[\"quiet\"]\n    force = settings[\"force\"]\n    recon = settings[\"recon\"]\n    parallel = settings[\"parallel\"]\n    error = settings[\"error\"]\n    sprint = settings[\"sprint\"]\n\n    if not dont_update_shas_of:\n        dont_update_shas_of = []\n    sprint(\"Checking that graph is directed acyclic\", level=\"verbose\")\n    if not nx.is_directed_acyclic_graph(G):\n        errmes = \"Dependency resolution is impossible; \"\n        errmes += \"graph is not directed and acyclic\"\n        errmes += \"\\nCheck the Sakefile\\n\"\n        error(errmes)\n        sys.exit(1)\n    sprint(\"Dependency resolution is possible\", level=\"verbose\")\n    in_mem_shas = take_shas_of_all_files(G, settings)\n    from_store = {}\n    if not os.path.isfile(\".shastore\"):\n        write_shas_to_shastore(in_mem_shas)\n        in_mem_shas = {}\n        in_mem_shas['files'] = {}\n    with io.open(\".shastore\", \"r\") as fh:\n        shas_on_disk = fh.read()\n    from_store = yaml.load(shas_on_disk)\n    check_shastore_version(from_store, settings)\n    if not from_store:\n        write_shas_to_shastore(in_mem_shas)\n        in_mem_shas = {}\n        in_mem_shas['files'] = {}\n        with io.open(\".shastore\", \"r\") as fh:\n            shas_on_disk = fh.read()\n        from_store = yaml.load(shas_on_disk)\n    \n    if parallel:\n        for line in parallel_sort(G):\n            line = sorted(line)\n            out = \"Checking if targets '{}' need to be run\"\n            sprint(out.format(\", \".join(line)), level=\"verbose\")\n            to_build = []\n            for item in line:\n                if needs_to_run(G, item, in_mem_shas, from_store, settings):\n                    to_build.append(item)\n            if to_build:\n                if recon:\n                    if len(to_build) == 1:\n                        out = \"Would run target '{}'\"\n                        sprint(out.format(to_build[0]))\n                    else:\n                        out = \"Would run targets '{}' in parallel\"\n                        sprint(out.format(\", \".join(to_build)))\n                    continue\n                parallel_run_these(G, to_build, in_mem_shas, from_store,\n                                   settings, dont_update_shas_of)\n    \n    else:\n        \n        \n        targets = []\n        for line in parallel_sort(G):\n            for item in sorted(line):\n                targets.append(item)\n        for target in targets:\n            outstr = \"Checking if target '{}' needs to be run\"\n            sprint(outstr.format(target), level=\"verbose\")\n            if needs_to_run(G, target, in_mem_shas, from_store, settings):\n                if recon:\n                    sprint(\"Would run target: {}\".format(target))\n                    continue\n                run_the_target(G, target, settings)\n                node_dict = get_the_node_dict(G, target)\n                if \"output\" in node_dict:\n                    for output in acts.get_all_outputs(node_dict):\n                        if output not in dont_update_shas_of:\n                            in_mem_shas['files'][output] = {\"sha\": get_sha(output,\n                                                                           settings)}\n                            write_shas_to_shastore(in_mem_shas)\n                if \"dependencies\" in node_dict:\n                    for dep in acts.get_all_dependencies(node_dict):\n                        if dep not in dont_update_shas_of:\n                            in_mem_shas['files'][dep] = {\"sha\": get_sha(dep,\n                                                                        settings)}\n                            write_shas_to_shastore(in_mem_shas)\n\n    if recon:\n        return 0\n    in_mem_shas = take_shas_of_all_files(G, settings)\n    if in_mem_shas:\n        in_mem_shas = merge_from_store_and_in_mems(from_store, in_mem_shas,\n                                                   dont_update_shas_of)\n        write_shas_to_shastore(in_mem_shas)\n    sprint(\"Done\", color=True)\n    return 0", "docstring": "This is the master function that performs the building.\n\nArgs:\nA graph (often a subgraph)\nThe settings dictionary\nAn optional list of files to not update the shas of\n(needed when building specific targets)\n\nReturns:\n0 if successful\nUN-success results in a fatal error so it will return 0 or nothing", "source": "juraj-google-style"}
{"code": "def CreateJob(self, cron_args=None, job_id=None, token=None, enabled=True):\n    \n    if not job_id:\n      uid = random.UInt16()\n      job_id = \"%s_%s\" % (cron_args.flow_name, uid)\n\n    flow_runner_args = rdf_flow_runner.FlowRunnerArgs(\n        flow_name=\"CreateAndRunGenericHuntFlow\")\n\n    flow_args = rdf_hunts.CreateGenericHuntFlowArgs()\n    flow_args.hunt_args.flow_args = cron_args.flow_args\n    flow_args.hunt_args.flow_runner_args.flow_name = cron_args.flow_name\n    flow_args.hunt_runner_args = cron_args.hunt_runner_args\n    flow_args.hunt_runner_args.hunt_name = \"GenericHunt\"\n\n    create_cron_args = rdf_cronjobs.CreateCronJobFlowArgs(\n        description=cron_args.description,\n        periodicity=cron_args.frequency,\n        flow_runner_args=flow_runner_args,\n        flow_args=flow_args,\n        allow_overruns=cron_args.allow_overruns,\n        lifetime=cron_args.lifetime)\n\n    cron_job_urn = self.CRON_JOBS_PATH.Add(job_id)\n    with aff4.FACTORY.Create(\n        cron_job_urn,\n        aff4_type=CronJob,\n        mode=\"rw\",\n        token=token,\n        force_new_version=False) as cron_job:\n\n      \n      \n      existing_cron_args = cron_job.Get(cron_job.Schema.CRON_ARGS)\n      if existing_cron_args and existing_cron_args.start_time:\n        create_cron_args.start_time = existing_cron_args.start_time\n\n      if create_cron_args != existing_cron_args:\n        cron_job.Set(cron_job.Schema.CRON_ARGS(create_cron_args))\n\n      cron_job.Set(cron_job.Schema.DISABLED(not enabled))\n\n    return job_id", "docstring": "Creates a cron job that runs given flow with a given frequency.\n\nArgs:\ncron_args: A protobuf of type rdf_cronjobs.CreateCronJobArgs.\njob_id: Use this job_id instead of an autogenerated unique name (used for\nsystem cron jobs - we want them to have well-defined persistent name).\ntoken: Security token used for data store access.\nenabled: If False, the job object will be created, but will be disabled.\n\nReturns:\nName of the cron job created.", "source": "juraj-google-style"}
{"code": "def heartbeat(self, status_info):\n    for field in ('role', 'ttl', 'load'):\n        if (not (field in status_info)):\n            raise Exception('status_info is missing required field %s', repr(field))\n    val = status_info['ttl']\n    if ((not (isinstance(val, float) or isinstance(val, int))) or (val <= 0)):\n        raise Exception('ttl must be a number > 0')\n    updated_status_info = dict(status_info)\n    updated_status_info['last_heartbeat'] = r.now()\n    if (not ('first_heartbeat' in updated_status_info)):\n        updated_status_info['first_heartbeat'] = updated_status_info['last_heartbeat']\n    if (not ('host' in updated_status_info)):\n        updated_status_info['host'] = socket.gethostname()\n    if (not ('pid' in updated_status_info)):\n        updated_status_info['pid'] = os.getpid()\n    try:\n        result = self.rr.table(self.table).insert(updated_status_info, conflict='replace', return_changes=True).run()\n        return result['changes'][0]['new_val']\n    except:\n        self.logger.error('error updating service registry', exc_info=True)\n        return status_info", "docstring": "Update service status, indicating \"up\"-ness.\n\nArgs:\nstatus_info (dict): a dictionary representing the status of the\nservice\n\n`status_info` must have at least the fields 'role', 'load', and\n'ttl'. Some additional fields are populated automatically by this\nmethod. If the field 'id' is absent, it will be generated by rethinkdb.\n\nSee the ServiceRegistry class-level documentation for more information\nabout the various fields.\n\nReturns:\nOn success, returns the modified status info dict. On failure\ncommunicating with rethinkdb, returns `status_info` unmodified.\n\nRaises:\nException: if `status_info` is missing a required field, or a\n`status_info['ttl']` is not a number greater than zero", "source": "codesearchnet"}
{"code": "def calculate_heading(locator1, locator2):\n    (lat1, long1) = locator_to_latlong(locator1)\n    (lat2, long2) = locator_to_latlong(locator2)\n    r_lat1 = radians(lat1)\n    r_lon1 = radians(long1)\n    r_lat2 = radians(lat2)\n    r_lon2 = radians(long2)\n    d_lon = radians((long2 - long1))\n    b = atan2((sin(d_lon) * cos(r_lat2)), ((cos(r_lat1) * sin(r_lat2)) - ((sin(r_lat1) * cos(r_lat2)) * cos(d_lon))))\n    bd = degrees(b)\n    (br, bn) = divmod((bd + 360), 360)\n    return bn", "docstring": "calculates the heading from the first to the second locator\n\nArgs:\nlocator1 (string): Locator, either 4 or 6 characters\nlocator2 (string): Locator, either 4 or 6 characters\n\nReturns:\nfloat: Heading in deg\n\nRaises:\nValueError: When called with wrong or invalid input arg\nAttributeError: When args are not a string\n\nExample:\nThe following calculates the heading from locator1 to locator2\n\n>>> from pyhamtools.locator import calculate_heading\n>>> calculate_heading(\"JN48QM\", \"QF67bf\")\n74.3136", "source": "codesearchnet"}
{"code": "def make_calls(self, num_calls=1):\n    self._cull()\n    while ((self._outstanding_calls + num_calls) > self._max_calls_per_second):\n        time.sleep(0)\n        self._cull()\n    self._call_times.append(self.CallRecord(time=time.time(), num_calls=num_calls))\n    self._outstanding_calls += num_calls", "docstring": "Adds appropriate sleep to avoid making too many calls.\n\nArgs:\nnum_calls: int the number of calls which will be made", "source": "codesearchnet"}
{"code": "def get_decor(self, c, match_only=None):\n    if isinstance(c, Component):\n        if c:\n            if match_only:\n                c = Component({k: getattr(c, k, None) for k in match_only})\n            for decor in self.__list:\n                try:\n                    if (c == decor.component):\n                        return decor\n                except AttributeError:\n                    continue\n    else:\n        for decor in self.__list:\n            try:\n                if (getattr(c, 'mnemonic').lower() == decor.curve.mnemonic):\n                    return decor\n            except AttributeError:\n                continue\n    return Decor({'colour': '", "docstring": "Get the decor for a component.\n\nArgs:\nc (component): The component to look up.\nmatch_only (list of str): The component attributes to include in the\ncomparison. Default: All of them.\n\nReturns:\nDecor. The matching Decor from the Legend, or None if not found.", "source": "codesearchnet"}
{"code": "def operate_magmom(self, magmom):\n    magmom = Magmom(magmom)\n    transformed_moment = ((self.apply_rotation_only(magmom.global_moment) * np.linalg.det(self.rotation_matrix)) * self.time_reversal)\n    return Magmom.from_global_moment_and_saxis(transformed_moment, magmom.saxis)", "docstring": "Apply time reversal operator on the magnetic moment. Note that\nmagnetic moments transform as axial vectors, not polar vectors.\n\nSee 'Symmetry and magnetic structures', Rodríguez-Carvajal and\nBourée for a good discussion. DOI: 10.1051/epjconf/20122200010\n\nArgs:\nmagmom: Magnetic moment as electronic_structure.core.Magmom\nclass or as list or np array-like\n\nReturns:\nMagnetic moment after operator applied as Magmom class", "source": "codesearchnet"}
{"code": "def assert_false(expr, msg, extras=None):\n    if expr:\n        fail(msg, extras)", "docstring": "Assert an expression evaluates to false, otherwise fail the test.\n\nArgs:\nexpr: The expression that is evaluated.\nmsg: A string explaining the details in case of failure.\nextras: An optional field for extra information to be included in\ntest result.", "source": "github-repos"}
{"code": "def main(target_device):\n    jlink = pylink.JLink()\n    print('connecting to JLink...')\n    jlink.open()\n    print(('connecting to %s...' % target_device))\n    jlink.set_tif(pylink.enums.JLinkInterfaces.SWD)\n    jlink.connect(target_device)\n    print('connected, starting RTT...')\n    jlink.rtt_start()\n    while True:\n        try:\n            num_up = jlink.rtt_get_num_up_buffers()\n            num_down = jlink.rtt_get_num_down_buffers()\n            print(('RTT started, %d up bufs, %d down bufs.' % (num_up, num_down)))\n            break\n        except pylink.errors.JLinkRTTException:\n            time.sleep(0.1)\n    try:\n        thread.start_new_thread(read_rtt, (jlink,))\n        thread.start_new_thread(write_rtt, (jlink,))\n        while jlink.connected():\n            time.sleep(1)\n        print('JLink disconnected, exiting...')\n    except KeyboardInterrupt:\n        print('ctrl-c detected, exiting...')\n        pass", "docstring": "Creates an interactive terminal to the target via RTT.\n\nThe main loop opens a connection to the JLink, and then connects\nto the target device. RTT is started, the number of buffers is presented,\nand then two worker threads are spawned: one for read, and one for write.\n\nThe main loops sleeps until the JLink is either disconnected or the\nuser hits ctrl-c.\n\nArgs:\ntarget_device (string): The target CPU to connect to.\n\nReturns:\nAlways returns ``0`` or a JLinkException.\n\nRaises:\nJLinkException on error.", "source": "codesearchnet"}
{"code": "def __init__(self, op, specs, name):\n    self.op = op\n    self.specs = specs\n    self.name = name", "docstring": "Creates a `SaveableObject` object.\n\nArgs:\nop: the \"producer\" object that this class wraps; it produces a list of\ntensors to save.  E.g., a \"Variable\" object saving its backing tensor.\nspecs: a list of SaveSpec, each element of which describes one tensor to\nsave under this object. All Tensors must be on the same device.\nname: the name to save the object under.", "source": "github-repos"}
{"code": "def receiveds_parsing(receiveds):\n    \n\n    parsed = []\n    receiveds = [re.sub(JUNK_PATTERN, \" \", i).strip() for i in receiveds]\n    n = len(receiveds)\n    log.debug(\"Nr. of receiveds. {}\".format(n))\n\n    for idx, received in enumerate(receiveds):\n        log.debug(\"Parsing received {}/{}\".format(idx + 1, n))\n        log.debug(\"Try to parse {!r}\".format(received))\n        try:\n            \n            values_by_clause = parse_received(received)\n        except MailParserReceivedParsingError:\n            \n            parsed.append({'raw': received})\n        else:\n            \n            parsed.append(values_by_clause)\n\n    log.debug(\"len(receiveds) %s, len(parsed) %s\" % (\n        len(receiveds), len(parsed)))\n\n    if len(receiveds) != len(parsed):\n        \n        \n        log.error(\"len(receiveds): %s, len(parsed): %s, receiveds: %s, \\\n            parsed: %s\" % (len(receiveds), len(parsed), receiveds, parsed))\n        return receiveds_not_parsed(receiveds)\n\n    else:\n        \n        return receiveds_format(parsed)", "docstring": "This function parses the receiveds headers.\n\nArgs:\nreceiveds (list): list of raw receiveds headers\n\nReturns:\na list of parsed receiveds headers with first hop in first position", "source": "juraj-google-style"}
{"code": "def insert(self, name, entry_type, filename):\n        \n        if self.cursor is None:\n            raise RuntimeError(\n                'Open DB connection before attempting to call insert!')\n\n        db_entry = (name, entry_type, filename)\n\n        if self.verbose:\n            print('Inserting %s \"%s\" -> %s' % db_entry, file=sys.stderr)\n\n        self.cursor.execute(\n            , db_entry)", "docstring": "Insert an entry into the Zeal database.\n\nArgs:\nname: A string representing the name of the entry.\nentry_type: A string representing the entry type.\nfilename: A string representing the filename of the documentation\nfor the entry.\n\nRaises:\nRuntimeError: a database connection was not established before\ncalling insert()", "source": "juraj-google-style"}
{"code": "def find_pruneable_heads_and_indices(heads: list[int], n_heads: int, head_size: int, already_pruned_heads: set[int]) -> tuple[set[int], torch.LongTensor]:\n    mask = torch.ones(n_heads, head_size)\n    heads = set(heads) - already_pruned_heads\n    for head in heads:\n        head = head - sum((1 if h < head else 0 for h in already_pruned_heads))\n        mask[head] = 0\n    mask = mask.view(-1).contiguous().eq(1)\n    index: torch.LongTensor = torch.arange(len(mask))[mask].long()\n    return (heads, index)", "docstring": "Finds the heads and their indices taking `already_pruned_heads` into account.\n\nArgs:\nheads (`List[int]`): List of the indices of heads to prune.\nn_heads (`int`): The number of heads in the model.\nhead_size (`int`): The size of each head.\nalready_pruned_heads (`Set[int]`): A set of already pruned heads.\n\nReturns:\n`Tuple[Set[int], torch.LongTensor]`: A tuple with the indices of heads to prune taking `already_pruned_heads`\ninto account and the indices of rows/columns to keep in the layer weight.", "source": "github-repos"}
{"code": "def find_and_replace_channel_refs(self, text):\n        \n\n        match = True\n        pattern = re.compile('<\n        while match:\n            match = pattern.search(text)\n            if match:\n                text = text.replace(match.group(0), '\n\n        return text", "docstring": "Find occurrences of Slack channel referenfces and attempts to\nreplace them with just channel names.\n\nArgs:\ntext (string): The message text\nReturns:\nstring: The message text with channel references replaced.", "source": "juraj-google-style"}
{"code": "def add_plot_boundary(ax, padding=0.125):\n    \n    nodes = np.asfortranarray(\n        np.vstack([line.get_xydata() for line in ax.lines]).T\n    )\n    left, right, bottom, top = _helpers.bbox(nodes)\n    center_x = 0.5 * (right + left)\n    delta_x = right - left\n    center_y = 0.5 * (top + bottom)\n    delta_y = top - bottom\n    multiplier = (1.0 + padding) * 0.5\n    ax.set_xlim(\n        center_x - multiplier * delta_x, center_x + multiplier * delta_x\n    )\n    ax.set_ylim(\n        center_y - multiplier * delta_y, center_y + multiplier * delta_y\n    )", "docstring": "Add a buffer of empty space around a plot boundary.\n\n.. note::\n\nThis only uses ``line`` data from the axis. It **could**\nuse ``patch`` data, but doesn't at this time.\n\nArgs:\nax (matplotlib.artist.Artist): A matplotlib axis.\npadding (Optional[float]): Amount (as a fraction of width and height)\nof padding to add around data. Defaults to ``0.125``.", "source": "juraj-google-style"}
{"code": "def DownloadPqlResultToList(self, pql_query, values=None):\n    \n    results = []\n    self._PageThroughPqlSet(pql_query, results.append, values)\n    return results", "docstring": "Downloads the results of a PQL query to a list.\n\nArgs:\npql_query: str a statement filter to apply (the query should not include\nthe limit or the offset)\n[optional]\nvalues: A dict of python objects or a list of raw SOAP values to bind\nto the pql_query.\n\nReturns:\na list of lists with the first being the header row and each subsequent\nlist being a row of results.", "source": "juraj-google-style"}
{"code": "def _resolve_attribute(self, attribute):\n    value = self.attributes[attribute]\n    if (not value):\n        return None\n    resolved_value = re.sub('\\\\$\\\\((.*?)\\\\)', self._resolve_attribute_match, value)\n    return resolved_value", "docstring": "Recursively replaces references to other attributes with their value.\n\nArgs:\nattribute (str): The name of the attribute to resolve.\n\nReturns:\nstr: The resolved value of 'attribute'.", "source": "codesearchnet"}
{"code": "def collection(self, **kwargs):\n    path = self._get_path('collection')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Search for collections by name.\n\nArgs:\nquery: CGI escpaed string.\npage: (optional) Minimum value of 1. Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def delete_meta_features(self, path):\n    if os.path.exists(self.meta_features_path(path)):\n        os.remove(self.meta_features_path(path))", "docstring": "Deletes meta-features of base learner if it exists\n\nArgs:\npath (str): Absolute/local path of xcessiv folder", "source": "codesearchnet"}
{"code": "def fetch_git_package(self, config):\n        \n        \n        \n        from git import Repo\n\n        ref = self.determine_git_ref(config)\n        dir_name = self.sanitize_git_path(uri=config['uri'], ref=ref)\n        cached_dir_path = os.path.join(self.package_cache_dir, dir_name)\n\n        \n        if not os.path.isdir(cached_dir_path):\n            logger.debug(\"Remote repo %s does not appear to have been \"\n                         \"previously downloaded - starting clone to %s\",\n                         config['uri'],\n                         cached_dir_path)\n            tmp_dir = tempfile.mkdtemp(prefix='stacker')\n            try:\n                tmp_repo_path = os.path.join(tmp_dir, dir_name)\n                with Repo.clone_from(config['uri'], tmp_repo_path) as repo:\n                    repo.head.reference = ref\n                    repo.head.reset(index=True, working_tree=True)\n                shutil.move(tmp_repo_path, self.package_cache_dir)\n            finally:\n                shutil.rmtree(tmp_dir)\n        else:\n            logger.debug(\"Remote repo %s appears to have been previously \"\n                         \"cloned to %s -- bypassing download\",\n                         config['uri'],\n                         cached_dir_path)\n\n        \n        self.update_paths_and_config(config=config,\n                                     pkg_dir_name=dir_name)", "docstring": "Make a remote git repository available for local use.\n\nArgs:\nconfig (dict): git config dictionary", "source": "juraj-google-style"}
{"code": "def get_config(self, key_name):\n        \n        if key_name in self.config:\n            return self.config.get(key_name)\n        return self.Configuration.default(key_name, inst=self)", "docstring": "Return configuration value\n\nArgs:\nkey_name (str): configuration key\n\nReturns:\nThe value for the specified configuration key, or if not found\nin the config the default value specified in the Configuration Handler\nclass specified inside this component", "source": "juraj-google-style"}
{"code": "def timeparse(sval):\n    \n    match = re.match(r'\\s*' + TIMEFORMAT + r'\\s*$', sval, re.I)\n    if not match or not match.group(0).strip():\n        return\n\n    mdict = match.groupdict()\n    return sum(\n        MULTIPLIERS[k] * cast(v) for (k, v) in mdict.items() if v is not None)", "docstring": "Parse a time expression, returning it as a number of seconds.  If\npossible, the return value will be an `int`; if this is not\npossible, the return will be a `float`.  Returns `None` if a time\nexpression cannot be parsed from the given string.\nArguments:\n- `sval`: the string value to parse\n>>> timeparse('1m24s')\n84\n>>> timeparse('1.2 minutes')\n72\n>>> timeparse('1.2 seconds')\n1.2", "source": "juraj-google-style"}
{"code": "def _ReadCacheEntry(self, file_object, display_name, block_size):\n    file_offset = file_object.get_offset()\n    cache_entry_header_map = self._GetDataTypeMap('firefox_cache1_entry_header')\n    try:\n        (cache_entry_header, header_data_size) = self._ReadStructureFromFileObject(file_object, file_offset, cache_entry_header_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse Firefox cache entry header with error: {0!s}'.format(exception))\n    if (not self._ValidateCacheEntryHeader(cache_entry_header)):\n        file_offset = (block_size - header_data_size)\n        file_object.seek(file_offset, os.SEEK_CUR)\n        raise IOError('Not a valid Firefox cache record.')\n    body_data_size = (cache_entry_header.request_size + cache_entry_header.information_size)\n    cache_entry_body_data = self._ReadData(file_object, (file_offset + header_data_size), body_data_size)\n    url = cache_entry_body_data[:cache_entry_header.request_size].decode('ascii').rstrip('\\x00')\n    (request_method, response_code) = self._ParseHTTPHeaders(cache_entry_body_data[cache_entry_header.request_size:], file_offset, display_name)\n    cache_entry_data_size = (header_data_size + body_data_size)\n    (_, remaining_data_size) = divmod(cache_entry_data_size, block_size)\n    if (remaining_data_size > 0):\n        file_object.seek((block_size - remaining_data_size), os.SEEK_CUR)\n    event_data = FirefoxCacheEventData()\n    event_data.data_size = cache_entry_header.cached_data_size\n    event_data.fetch_count = cache_entry_header.fetch_count\n    event_data.info_size = cache_entry_header.information_size\n    event_data.location = cache_entry_header.location\n    event_data.request_method = request_method\n    event_data.request_size = cache_entry_header.request_size\n    event_data.response_code = response_code\n    event_data.url = url\n    event_data.version = '{0:d}.{1:d}'.format(cache_entry_header.major_format_version, cache_entry_header.minor_format_version)\n    return (cache_entry_header, event_data)", "docstring": "Reads a cache entry.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object.\ndisplay_name (str): display name.\nblock_size (int): block size.\n\nReturns:\ntuple: containing:\n\nfirefox_cache1_entry_header: cache record header structure.\nFirefoxCacheEventData: event data.\n\nRaises:\nIOError: if the cache record header cannot be validated.\nOSError: if the cache record header cannot be validated.\nParseError: if the cache record header cannot be parsed.", "source": "codesearchnet"}
{"code": "def input_elements(self, instruction_id, expected_inputs, abort_callback=None):\n    raise NotImplementedError(type(self))", "docstring": "Returns an iterable of all Element.Data and Element.Timers bundles for\ninstruction_id.\n\nThis iterable terminates only once the full set of data has been recieved\nfor each of the expected transforms. It may block waiting for more data.\n\nArgs:\ninstruction_id: which instruction the results must belong to\nexpected_inputs: which transforms to wait on for completion\nabort_callback: a callback to invoke if blocking returning whether\nto abort before consuming all the data", "source": "github-repos"}
{"code": "def __init__(self, template, capacity, max_length, scope):\n    \n    self._capacity = capacity\n    self._max_length = max_length\n    with tf.variable_scope(scope) as var_scope:\n      self._scope = var_scope\n      self._length = tf.Variable(tf.zeros(capacity, tf.int32), False)\n      self._buffers = tools.nested.map(\n          lambda x: tf.Variable(tf.zeros(\n              [capacity, max_length] + x.shape.as_list(), x.dtype), False),\n          template)", "docstring": "Create a memory that stores episodes.\n\nEach transition tuple consists of quantities specified by the template.\nThese quantities would typically be be observations, actions, rewards, and\ndone indicators.\n\nArgs:\ntemplate: Nested tensors to derive shapes and dtypes of each transition.\ncapacity: Number of episodes, or rows, hold by the memory.\nmax_length: Allocated sequence length for the episodes.\nscope: Variable scope to use for internal variables.", "source": "juraj-google-style"}
{"code": "def is_valid(self):\n    if self.key is None:\n        raise ValueError('Invalid DisplayDataItem %s. Key must not be None.' % self)\n    if self.namespace is None:\n        raise ValueError('Invalid DisplayDataItem %s. Namespace must not be None' % self)\n    if self.value is None:\n        raise ValueError('Invalid DisplayDataItem %s. Value must not be None' % self)\n    if self.type is None:\n        raise ValueError('Invalid DisplayDataItem. Value {} is of an unsupported type.'.format(self.value))", "docstring": "Checks that all the necessary fields of the :class:`DisplayDataItem`\nare filled in. It checks that neither key, namespace, value or type are\n:data:`None`.\n\nRaises:\nValueError: If the item does not have a key, namespace,\nvalue or type.", "source": "github-repos"}
{"code": "def _autodetect_num_gpus():\n    proc_gpus_path = '/proc/driver/nvidia/gpus'\n    if os.path.isdir(proc_gpus_path):\n        return len(os.listdir(proc_gpus_path))\n    return 0", "docstring": "Attempt to detect the number of GPUs on this machine.\n\nTODO(rkn): This currently assumes Nvidia GPUs and Linux.\n\nReturns:\nThe number of GPUs if any were detected, otherwise 0.", "source": "codesearchnet"}
{"code": "def event(self, **kwargs):\n        \n        if self.callback.noargs and self.streams == []:\n            self.param.warning(\n                'No streams declared. To update a DynamicMaps using '\n                'generators (or callables without arguments) use streams=[Next()]')\n            return\n        if self.streams == []:\n            self.param.warning('No streams on DynamicMap, calling event '\n                               'will have no effect')\n            return\n\n        stream_params = set(util.stream_parameters(self.streams))\n        invalid = [k for k in kwargs.keys() if k not in stream_params]\n        if invalid:\n            msg = 'Key(s) {invalid} do not correspond to stream parameters'\n            raise KeyError(msg.format(invalid = ', '.join('%r' % i for i in invalid)))\n\n        streams = []\n        for stream in self.streams:\n            contents = stream.contents\n            applicable_kws = {k:v for k,v in kwargs.items()\n                              if k in set(contents.keys())}\n            if not applicable_kws and contents:\n                continue\n            streams.append(stream)\n            rkwargs = util.rename_stream_kwargs(stream, applicable_kws, reverse=True)\n            stream.update(**rkwargs)\n\n        Stream.trigger(streams)", "docstring": "Updates attached streams and triggers events\n\nAutomatically find streams matching the supplied kwargs to\nupdate and trigger events on them.\n\nArgs:\n**kwargs: Events to update streams with", "source": "juraj-google-style"}
{"code": "def get_input_embeddings(self) -> keras.layers.Layer:\n    main_layer = getattr(self, self.base_model_prefix, self)\n    if main_layer is not self:\n        return main_layer.get_input_embeddings()\n    else:\n        raise NotImplementedError", "docstring": "Returns the model's input embeddings layer.\n\nReturns:\n`tf.Variable`: The embeddings layer mapping vocabulary to hidden states.", "source": "github-repos"}
{"code": "def SampleMemoryUsage(self, parser_name):\n    if self._memory_profiler:\n        used_memory = (self._process_information.GetUsedMemory() or 0)\n        self._memory_profiler.Sample(parser_name, used_memory)", "docstring": "Takes a sample of the memory usage for profiling.\n\nArgs:\nparser_name (str): name of the parser.", "source": "codesearchnet"}
{"code": "def hours(value: Union[int, float]) -> Duration:\n    return float(value * 60 * 60)", "docstring": "Converts input value from hours to a `Duration` in seconds.\n\nExample:\n```python\n>>> timestamps = [tp.duration.hours(i) for i in [1, 2, 10]]\n>>> timestamps\n[3600.0, 7200.0, 36000.0]\n\n>>> # Usage in a window operation\n>>> a = tp.event_set(timestamps=timestamps, features={\"f1\": [1, 5, -5]})\n>>> a.moving_sum(window_length=tp.duration.hours(2))\nindexes: ...\ntimestamps: [ 3600. 7200. 36000.]\n'f1': [ 1 6 -5]\n...\n\n```\n\nArgs:\nvalue: Number of hours.\n\nReturns:\nEquivalent number of seconds.", "source": "github-repos"}
{"code": "def not_equal(x, y):\n    return math_ops.not_equal(x, y)", "docstring": "Element-wise inequality between two tensors.\n\nArgs:\nx: Tensor or variable.\ny: Tensor or variable.\n\nReturns:\nA bool tensor.", "source": "github-repos"}
{"code": "def get_errors(self):\n    return [{cr.component_name: cr.get_error()} for cr in self.component_results if cr.has_error()]", "docstring": "If there were any business errors fetching data for this property,\nreturns the error messages.\n\nReturns:\nstring - the error message, or None if there was no error.", "source": "codesearchnet"}
{"code": "def polymorph_response(response, poly, bqm, penalty_strength=None, keep_penalty_variables=True, discard_unsatisfied=False):\n    record = response.record\n    penalty_vector = penalty_satisfaction(response, bqm)\n    original_variables = bqm.variables\n    if discard_unsatisfied:\n        samples_to_keep = list(map(bool, list(penalty_vector)))\n        penalty_vector = np.array(([True] * np.sum(samples_to_keep)))\n    else:\n        samples_to_keep = list(map(bool, ([1] * len(record.sample))))\n    samples = record.sample[samples_to_keep]\n    energy_vector = poly.energies((samples, response.variables))\n    if (not keep_penalty_variables):\n        original_variables = poly.variables\n        idxs = [response.variables.index[v] for v in original_variables]\n        samples = np.asarray(samples[(:, idxs)])\n    (num_samples, num_variables) = np.shape(samples)\n    datatypes = [('sample', np.dtype(np.int8), (num_variables,)), ('energy', energy_vector.dtype), ('penalty_satisfaction', penalty_vector.dtype)]\n    datatypes.extend(((name, record[name].dtype, record[name].shape[1:]) for name in record.dtype.names if (name not in {'sample', 'energy'})))\n    data = np.rec.array(np.empty(num_samples, dtype=datatypes))\n    data.sample = samples\n    data.energy = energy_vector\n    for name in record.dtype.names:\n        if (name not in {'sample', 'energy'}):\n            data[name] = record[name][samples_to_keep]\n    data['penalty_satisfaction'] = penalty_vector\n    response.info['reduction'] = bqm.info['reduction']\n    if (penalty_strength is not None):\n        response.info['penalty_strength'] = penalty_strength\n    return SampleSet(data, original_variables, response.info, response.vartype)", "docstring": "Transforms the sampleset for the higher order problem.\n\nGiven a response of a penalized HUBO, this function creates a new sampleset\nobject, taking into account penalty information and calculates the\nenergies of samples for the higherorder problem.\n\nArgs:\nresponse (:obj:`.SampleSet`): response for a penalized hubo.\n\npoly (:obj:`.BinaryPolynomial`):\nA binary polynomial.\n\nbqm (:obj:`dimod.BinaryQuadraticModel`): Binary quadratic model of the\nreduced problem.\n\npenalty_strength (float, optional): default is None, if provided,\nwill be added to the info field of the returned sampleSet object.\n\nkeep_penalty_variables (bool, optional): default is True. if False\nwill remove the variables used for penalty from the samples\n\ndiscard_unsatisfied (bool, optional): default is False. If True\nwill discard samples that do not satisfy the penalty conditions.\n\nReturns:\n(:obj:`.SampleSet'): A sampleSet object that has additional penalty\ninformation. The energies of samples are calculated for the HUBO\nignoring the penalty variables.", "source": "codesearchnet"}
{"code": "def extract_code(end_mark, current_str, str_array, line_num):\n    \n    if end_mark not in current_str:\n        reached_end = False\n        line_num += 1\n        while reached_end is False:\n            next_line = str_array[line_num]\n            if end_mark in next_line:\n                reached_end = True\n            else:\n                line_num += 1\n            current_str += next_line\n    clean_str = current_str.split(end_mark)[0]\n    return {'current_str': clean_str, 'line_num': line_num}", "docstring": "Extract a multi-line string from a string array, up to a specified end marker.\n\nArgs:\nend_mark (str): The end mark string to match for.\ncurrent_str (str): The first line of the string array.\nstr_array (list): An array of strings (lines).\nline_num (int): The current offset into the array.\n\nReturns:\nExtended string up to line with end marker.", "source": "juraj-google-style"}
{"code": "def ParseOptions(cls, options, configuration_object):\n    \n    if not isinstance(configuration_object, tools.CLITool):\n      raise errors.BadConfigObject(\n          'Configuration object is not an instance of CLITool')\n\n    analysis_plugins = cls._ParseStringOption(options, 'analysis_plugins')\n\n    if analysis_plugins and analysis_plugins.lower() != 'list':\n      plugin_names = analysis_manager.AnalysisPluginManager.GetPluginNames()\n      analysis_plugins = [name.strip() for name in analysis_plugins.split(',')]\n\n      difference = set(analysis_plugins).difference(plugin_names)\n      if difference:\n        raise errors.BadConfigOption(\n            'Non-existent analysis plugins specified: {0:s}'.format(\n                ' '.join(difference)))\n\n    setattr(configuration_object, '_analysis_plugins', analysis_plugins)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.", "source": "juraj-google-style"}
{"code": "def _SetHashers(self, hasher_names_string):\n    \n    if not hasher_names_string or hasher_names_string == 'none':\n      return\n\n    analyzer_object = analyzers_manager.AnalyzersManager.GetAnalyzerInstance(\n        'hashing')\n    analyzer_object.SetHasherNames(hasher_names_string)\n    self._analyzers.append(analyzer_object)", "docstring": "Sets the hasher names.\n\nArgs:\nhasher_names_string (str): comma separated names of the hashers\nto enable, where 'none' disables the hashing analyzer.", "source": "juraj-google-style"}
{"code": "def get_v2_constants(module: Any) -> Sequence[str]:\n    constants_v2 = []\n    tensorflow_constants_attr = API_ATTRS[TENSORFLOW_API_NAME].constants\n    if hasattr(module, tensorflow_constants_attr):\n        constants_v2.extend(getattr(module, tensorflow_constants_attr))\n    return constants_v2", "docstring": "Get a list of TF 2.0 constants in this module.\n\nArgs:\nmodule: TensorFlow module.\n\nReturns:\nList of all API constants under the given module.", "source": "github-repos"}
{"code": "def __init__(self, name):\n    \n    super(DependencyDefinition, self).__init__()\n    self.dpkg_name = None\n    self.is_optional = False\n    self.l2tbinaries_macos_name = None\n    self.l2tbinaries_name = None\n    self.maximum_version = None\n    self.minimum_version = None\n    self.name = name\n    self.pypi_name = None\n    self.python2_only = False\n    self.python3_only = False\n    self.rpm_name = None\n    self.version_property = None", "docstring": "Initializes a dependency configuration.\n\nArgs:\nname (str): name of the dependency.", "source": "juraj-google-style"}
{"code": "def range(self, dimension, data_range=True, dimension_range=True):\n        \n        dimension = self.get_dimension(dimension)\n        if dimension is None or (not data_range and not dimension_range):\n            return (None, None)\n        elif all(util.isfinite(v) for v in dimension.range) and dimension_range:\n            return dimension.range\n        elif data_range:\n            if dimension in self.kdims+self.vdims:\n                dim_vals = self.dimension_values(dimension.name)\n                lower, upper = util.find_range(dim_vals)\n            else:\n                dname = dimension.name\n                match_fn = lambda x: dname in x.kdims + x.vdims\n                range_fn = lambda x: x.range(dname)\n                ranges = self.traverse(range_fn, [match_fn])\n                lower, upper = util.max_range(ranges)\n        else:\n            lower, upper = (np.NaN, np.NaN)\n        if not dimension_range:\n            return lower, upper\n        return util.dimension_range(lower, upper, dimension.range, dimension.soft_range)", "docstring": "Return the lower and upper bounds of values along dimension.\n\nArgs:\ndimension: The dimension to compute the range on.\ndata_range (bool): Compute range from data values\ndimension_range (bool): Include Dimension ranges\nWhether to include Dimension range and soft_range\nin range calculation\n\nReturns:\nTuple containing the lower and upper bound", "source": "juraj-google-style"}
{"code": "def set_attribute(self, obj, attr, value):\n        \n        \n        if isinstance(obj, MutableMapping):\n            obj[attr] = value\n        else:\n            setattr(obj, attr, value)", "docstring": "Set value of attribute in given object instance.\n\nReason for existence of this method is the fact that 'attribute' can\nbe also a object's key if it is a dict or any other kind of mapping.\n\nArgs:\nobj (object): object instance to modify\nattr (str): attribute (or key) to change\nvalue: value to set", "source": "juraj-google-style"}
{"code": "def _atoms(atoms_string):\n    atoms = {}\n    for split in atoms_string.split(','):\n        sites = split.split('.')\n        el = sites.pop(0)\n        sites = list(map(int, sites))\n        atoms[el] = (np.array(sites) - 1)\n    return atoms", "docstring": "Parse the atom string.\n\nArgs:\natoms_string (str): The atoms to plot, in the form ``\"C.1.2.3,\"``.\n\nReturns:\ndict: The atomic indices over which to sum the DOS. Formatted as::\n\n{Element: [atom_indices]}.\n\nIndices are zero indexed for each atomic species. If an element symbol\nis included with an empty list, then all sites for that species are\nconsidered.", "source": "codesearchnet"}
{"code": "def which(cmd):\n    \n    def is_exe(fp):\n        return os.path.isfile(fp) and os.access(fp, os.X_OK)\n\n    fpath, fname = os.path.split(cmd)\n    if fpath:\n        if is_exe(cmd):\n            return cmd\n    else:\n        for path in os.environ[\"PATH\"].split(os.pathsep):\n            exe_file = os.path.join(path, cmd)\n            if is_exe(exe_file):\n                return exe_file\n    return None", "docstring": "Returns full path to a executable.\n\nArgs:\ncmd (str): Executable command to search for.\n\nReturns:\n(str) Full path to command. None if it is not found.\n\nExample::\n\nfull_path_to_python = which(\"python\")", "source": "juraj-google-style"}
{"code": "def _associate_click_tags(self, feed_item, creative):\n    click_tags = []\n    for click_tag in feed_item.get('click_tags', []):\n        lp = self.landing_page_dao.get(click_tag, column_name=FieldMap.CLICK_TAG_LANDING_PAGE_ID)\n        ct = {'eventName': click_tag.get(FieldMap.CLICK_TAG_EVENT, None), 'name': click_tag.get(FieldMap.CLICK_TAG_NAME, None), 'clickThroughUrl': {}}\n        if click_tag.get(FieldMap.CLICK_TAG_LANDING_PAGE_ID):\n            ct['clickThroughUrl']['landingPageId'] = click_tag.get(FieldMap.CLICK_TAG_LANDING_PAGE_ID) if not lp else lp['id']\n        elif click_tag.get(FieldMap.CLICK_TAG_CUSTOM_CLICK_THROUGH_URL):\n            ct['clickThroughUrl']['customClickThroughUrl'] = click_tag.get(FieldMap.CLICK_TAG_CUSTOM_CLICK_THROUGH_URL)\n        click_tags.append(ct)\n    if click_tags:\n        creative['clickTags'] = click_tags", "docstring": "Associate click tags with the respective creative DCM object.\n\nThis method transforms all child feed mapped earlier into DCM formatted\nassociations within the creative object so it can be pushed to the API.\n\nArgs:\nfeed_item: Feed item representing the creative.\ncreative: DCM creative object being created or updated.", "source": "github-repos"}
{"code": "def PrivateKeyFromWIF(wif):\n    if ((wif is None) or (len(wif) is not 52)):\n        raise ValueError('Please provide a wif with a length of 52 bytes (LEN: {0:d})'.format(len(wif)))\n    data = base58.b58decode(wif)\n    length = len(data)\n    if ((length is not 38) or (data[0] is not 128) or (data[33] is not 1)):\n        raise ValueError('Invalid format!')\n    checksum = Crypto.Hash256(data[0:34])[0:4]\n    if (checksum != data[34:]):\n        raise ValueError('Invalid WIF Checksum!')\n    return data[1:33]", "docstring": "Get the private key from a WIF key\n\nArgs:\nwif (str): The wif key\n\nReturns:\nbytes: The private key", "source": "codesearchnet"}
{"code": "def temporal_latent_to_dist(name, x, hparams, output_channels=None):\n    (_, _, width, _, res_channels) = common_layers.shape_list(x)\n    if (output_channels is None):\n        output_channels = res_channels\n    dilation_rates = get_dilation_rates(hparams, width)\n    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n        h = x\n        for i in range(hparams.latent_encoder_depth):\n            if hparams.latent_apply_dilations:\n                h2 = dilated_conv_stack(('dil_latent_3d_res_%d' % i), h, mid_channels=hparams.latent_encoder_width, output_channels=res_channels, dilation_rates=dilation_rates, activation=hparams.latent_activation, dropout=hparams.latent_dropout)\n            else:\n                h2 = conv_stack(('latent_3d_res_%d' % i), h, mid_channels=hparams.latent_encoder_width, output_channels=res_channels, activation=hparams.latent_activation, dropout=hparams.latent_dropout)\n            h += h2\n        h = h[(:, (- 1), :, :, :)]\n        h = conv('res_final', h, apply_actnorm=False, conv_init='zeros', output_channels=(2 * output_channels), filter_size=[1, 1])\n        (mean, log_scale) = (h[(:, :, :, 0::2)], h[(:, :, :, 1::2)])\n    return tfp.distributions.Normal(mean, tf.exp(log_scale))", "docstring": "Network that maps a time-indexed list of 3-D latents to a gaussian.\n\nArgs:\nname: variable scope.\nx: List of 4-D Tensors indexed by time, (NHWC)\nhparams: tf.contrib.training.Hparams.\noutput_channels: int, Number of channels of the output gaussian mean.\nReturns:\ndist: tfp.distributions.Normal", "source": "codesearchnet"}
{"code": "def _auth(f):\n        \n        @wraps(f)\n        def method(self, *args, **kwargs):\n            if not self._auth_token or datetime.utcnow() >= self._last_auth + timedelta(minutes=10):\n                \n                self.auth_refresh()\n\n            return f(self, *args, **kwargs)\n        return method", "docstring": "Makes sure the request has a valid authorization jwt before calling the wrapped function.\nIt does this by checking the timestamp of the last jwt and if > 10 minutes have elapsed,\nit refreshes it's existing jwt from the server.\nArgs:\nf: Function to wrap\n\nReturns:\nFunction, f", "source": "juraj-google-style"}
{"code": "def _infer_all_output_dims(self, inputs):\n    batch_size = tf.expand_dims(tf.shape(inputs)[0], 0)\n    out_channels = (self.output_channels,)\n    if (self._n == 1):\n        out_shape = ((1,) + self.output_shape)\n    else:\n        out_shape = self.output_shape\n    if self._data_format.startswith('NC'):\n        out_shape_tuple = (out_channels + out_shape)\n    elif (self._data_format.startswith('N') and self._data_format.endswith('C')):\n        out_shape_tuple = (out_shape + out_channels)\n    output_shape = tf.concat([batch_size, out_shape_tuple], 0)\n    return output_shape", "docstring": "Calculate the output shape for `inputs` after a deconvolution.\n\nArgs:\ninputs: A Tensor of shape `data_format` and of type `tf.float16`,\n`tf.bfloat16` or `tf.float32`.\n\nReturns:\noutput_shape: A tensor of shape (`batch_size`, `conv_output_shape`).", "source": "codesearchnet"}
{"code": "def remove_op_callback(op_callback):\n    ctx = context.context()\n    ctx.remove_op_callback(op_callback)\n    if ctx.executing_eagerly() and (not ctx.op_callbacks):\n        execute.execute = execute.quick_execute", "docstring": "Remove an already-added op callback.\n\nArgs:\nop_callback: The op callback to be removed.\n\nRaises:\nKeyError: If `op_callback` has not been registered using `add_op_callback()`\nbefore.", "source": "github-repos"}
{"code": "def add_step_timing_signal(x, step, hparams):\n  \n  if hparams.recurrence_type == \"act\":\n    num_steps = hparams.act_max_steps\n  else:\n    num_steps = hparams.num_rec_steps\n  channels = common_layers.shape_list(x)[-1]\n\n  if hparams.step_timing_signal_type == \"learned\":\n    signal = common_attention.get_layer_timing_signal_learned_1d(\n        channels, step, num_steps)\n\n  elif hparams.step_timing_signal_type == \"sinusoid\":\n    signal = common_attention.get_layer_timing_signal_sinusoid_1d(\n        channels, step, num_steps)\n\n  if hparams.add_or_concat_timing_signal == \"add\":\n    x_with_timing = x + common_layers.cast_like(signal, x)\n\n  elif hparams.add_or_concat_timing_signal == \"concat\":\n    batch_size = common_layers.shape_list(x)[0]\n    length = common_layers.shape_list(x)[1]\n    signal_tiled = tf.tile(signal, [batch_size, length, 1])\n    x_with_timing = tf.concat((x, signal_tiled), axis=-1)\n\n  return x_with_timing", "docstring": "Add n-dimensional embedding as the step (vertical) timing signal.\n\nArgs:\nx: a tensor with shape [batch, length, depth]\nstep: step\nhparams: model hyper parameters\n\nReturns:\na Tensor with the same shape as x.", "source": "juraj-google-style"}
{"code": "def AppendFlagValues(self, flag_values):\n    \n    for flag_name, flag in six.iteritems(flag_values.FlagDict()):\n      \n      \n      \n      \n      \n      if flag_name == flag.name:\n        try:\n          self[flag_name] = flag\n        except exceptions.DuplicateFlagError:\n          raise exceptions.DuplicateFlagError.from_flag(\n              flag_name, self, other_flag_values=flag_values)", "docstring": "Appends flags registered in another FlagValues instance.\n\nArgs:\nflag_values: registry to copy from", "source": "juraj-google-style"}
{"code": "def _eval_using_default_session(tensors, feed_dict, graph, session=None):\n    if session is None:\n        session = stack.get_default_session()\n        if session is None:\n            raise ValueError('Cannot evaluate tensor using `eval()`: No default session is registered. Use `with sess.as_default()` or pass an explicit session to `eval(session=sess)`')\n        if session.graph is not graph:\n            raise ValueError(\"Cannot use the default session to evaluate tensor: the tensor's graph is different from the session's graph. Pass an explicit session to `eval(session=sess)`.\")\n    elif session.graph is not graph:\n        raise ValueError(\"Cannot use the given session to evaluate tensor: the tensor's graph is different from the session's graph.\")\n    return session.run(tensors, feed_dict)", "docstring": "Uses the default session to evaluate one or more tensors.\n\nArgs:\ntensors: A single Tensor, or a list of Tensor objects.\nfeed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,\nnumpy ndarrays, TensorProtos, or strings.\ngraph: The graph in which the tensors are defined.\nsession: (Optional) A different session to use to evaluate \"tensors\".\n\nReturns:\nEither a single numpy ndarray if \"tensors\" is a single tensor; or a list\nof numpy ndarrays that each correspond to the respective element in\n\"tensors\".\n\nRaises:\nValueError: If no default session is available; the default session\ndoes not have \"graph\" as its graph; or if \"session\" is specified,\nand it does not have \"graph\" as its graph.", "source": "github-repos"}
{"code": "def read(alias_name, allow_none=False):\n    warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2)\n    return core.read('{0}_PORT'.format(alias_name), default=None, allow_none=allow_none)", "docstring": "Get the raw docker link value.\n\nGet the raw environment variable for the docker link\n\nArgs:\nalias_name: The environment variable name\ndefault: The default value if the link isn't available\nallow_none: If the return value can be `None` (i.e. optional)", "source": "codesearchnet"}
{"code": "async def from_api_token(cls, token=None, api_cls=SlackBotApi):\n    api = (api_cls.from_env() if (token is None) else api_cls(api_token=token))\n    data = (await api.execute_method(cls.API_AUTH_ENDPOINT))\n    return cls(data['user_id'], data['user'], api)", "docstring": "Create a new instance from the API token.\n\nArguments:\ntoken (:py:class:`str`, optional): The bot's API token\n(defaults to ``None``, which means looking in the\nenvironment).\napi_cls (:py:class:`type`, optional): The class to create\nas the ``api`` argument for API access (defaults to\n:py:class:`aslack.slack_api.SlackBotApi`).\n\nReturns:\n:py:class:`SlackBot`: The new instance.", "source": "codesearchnet"}
{"code": "def push(self, value):\n        \n\n        stream = DataStream.FromEncoded(value.stream)\n\n        if stream.stream_type == DataStream.OutputType:\n            if len(self.streaming_data) == self.streaming_length:\n                raise StorageFullError('Streaming buffer full')\n\n            self.streaming_data.append(value)\n        else:\n            if len(self.storage_data) == self.storage_length:\n                raise StorageFullError('Storage buffer full')\n\n            self.storage_data.append(value)", "docstring": "Store a new value for the given stream.\n\nArgs:\nvalue (IOTileReading): The value to store.  The stream\nparameter must have the correct value", "source": "juraj-google-style"}
{"code": "def parse(cls, op):\n    for event in cls:\n        if (event.value == int(op)):\n            return event\n    return None", "docstring": "Gets the enum for the op code\n\nArgs:\nop: value of the op code (will be casted to int)\n\nReturns:\nThe enum that matches the op code", "source": "codesearchnet"}
{"code": "def find_repo_by_name(name, repo_dir=None):\n    \n    if repo_dir is None:\n        repo_dir = config.get('template_repos')\n\n    ret, out, _ = utils.run_command([\n        'find',\n        repo_dir,\n        '-name',\n        '*.json',\n    ], )\n\n    repos = [\n        TemplateRepository.from_url(line.strip()) for line in out.split('\\n')\n        if len(line.strip())\n    ]\n\n    for repo in repos:\n        if repo.name == name:\n            return repo\n    raise RuntimeError('Could not find repo %s' % name)", "docstring": "Searches the given repo name inside the repo_dir (will use the config value\n'template_repos' if no repo dir passed), will rise an exception if not\nfound\n\nArgs:\nname (str): Name of the repo to search\nrepo_dir (str): Directory where to search the repo\n\nReturn:\nstr: path to the repo\n\nRaises:\nRuntimeError: if not found", "source": "juraj-google-style"}
{"code": "def kill_test_logger(logger):\n    \n    for h in list(logger.handlers):\n        logger.removeHandler(h)\n        if isinstance(h, logging.FileHandler):\n            h.close()", "docstring": "Cleans up a test logger object by removing all of its handlers.\n\nArgs:\nlogger: The logging object to clean up.", "source": "juraj-google-style"}
{"code": "def run_matrix(self, matrix_definition, document):\n    matrix = Matrix(matrix_definition, ('matrix(parallel)' in document))\n    process_data = MatrixProcessData()\n    process_data.options = self.options\n    process_data.pipeline = document['pipeline']\n    process_data.model = ({} if ('model' not in document) else document['model'])\n    process_data.hooks = Hooks(document)\n    return matrix.process(process_data)", "docstring": "Running pipeline via a matrix.\n\nArgs:\nmatrix_definition (dict): one concrete matrix item.\ndocument (dict): spline document (complete) as loaded from yaml file.", "source": "codesearchnet"}
{"code": "def eval(source, optimize=True, output=sys.stdout, input=sys.stdin, steps=(- 1)):\n    machine = execute(source, optimize=optimize, output=output, input=input, steps=steps)\n    ds = machine.stack\n    if (len(ds) == 0):\n        return None\n    elif (len(ds) == 1):\n        return ds[(- 1)]\n    else:\n        return ds", "docstring": "Compiles and runs program, returning the values on the stack.\n\nTo return the machine instead, see execute().\n\nArgs:\noptimize: Whether to optimize the code after parsing it.\noutput: Stream which program can write output to.\ninput: Stream which program can read input from.\nsteps: An optional maximum number of instructions to execute on the\nvirtual machine.  Set to -1 for no limit.\n\nReturns:\nNone: If the stack is empty\nobj: If the stack contains a single value\n[obj, obj, ...]: If the stack contains many values", "source": "codesearchnet"}
{"code": "def message_upperbound(self, tree, spins, subtheta):\n    energy_sources = set()\n    for (v, subtree) in tree.items():\n        assert all(((u in spins) for u in self._ancestors[v]))\n\n        def energy_contributions():\n            (yield subtheta.linear[v])\n            for (u, bias) in subtheta.adj[v].items():\n                if (u in spins):\n                    (yield Times(limitReal(spins[u]), bias))\n        energy = Plus(energy_contributions())\n        if subtree:\n            spins[v] = 1.0\n            plus = self.message_upperbound(subtree, spins, subtheta)\n            spins[v] = (- 1.0)\n            minus = self.message_upperbound(subtree, spins, subtheta)\n            del spins[v]\n        else:\n            plus = minus = limitReal(0.0)\n        m = FreshSymbol(REAL)\n        self.assertions.update({LE(m, Plus(energy, plus)), LE(m, Plus(Times(energy, limitReal((- 1.0))), minus))})\n        energy_sources.add(m)\n    return Plus(energy_sources)", "docstring": "Determine an upper bound on the energy of the elimination tree.\n\nArgs:\ntree (dict): The current elimination tree\nspins (dict): The current fixed spins\nsubtheta (dict): Theta with spins fixed.\n\nReturns:\nThe formula for the energy of the tree.", "source": "codesearchnet"}
{"code": "def get_random_url(ltd='com'):\n    url = ['https:\n    return ''.join(url)", "docstring": "Get a random url with the given ltd.\n\nArgs:\nltd (str): The ltd to use (e.g. com).\n\nReturns:\nstr: The random url.", "source": "codesearchnet"}
{"code": "def tomography_basis(basis, prep_fun=None, meas_fun=None):\n    ret = TomographyBasis(basis)\n    ret.prep_fun = prep_fun\n    ret.meas_fun = meas_fun\n    return ret", "docstring": "Generate a TomographyBasis object.\n\nSee TomographyBasis for further details.abs\n\nArgs:\nprep_fun (callable) optional: the function which adds preparation\ngates to a circuit.\nmeas_fun (callable) optional: the function which adds measurement\ngates to a circuit.\n\nReturns:\nTomographyBasis: A tomography basis.", "source": "codesearchnet"}
{"code": "def traverse_pagination(response, endpoint):\n    results = response.get('results', [])\n    next_page = response.get('next')\n    while next_page:\n        querystring = parse_qs(urlparse(next_page).query, keep_blank_values=True)\n        response = endpoint.get(**querystring)\n        results += response.get('results', [])\n        next_page = response.get('next')\n    return results", "docstring": "Traverse a paginated API response.\n\nExtracts and concatenates \"results\" (list of dict) returned by DRF-powered\nAPIs.\n\nArguments:\nresponse (Dict): Current response dict from service API\nendpoint (slumber Resource object): slumber Resource object from edx-rest-api-client\n\nReturns:\nlist of dict.", "source": "codesearchnet"}
{"code": "def repeat(element, count):\n    \n    if count < 0:\n        raise ValueError(\"repeat() count cannot be negative\")\n    return query(itertools.repeat(element, count))", "docstring": "Generate a sequence with one repeated value.\n\nNote: This method uses deferred execution.\n\nArgs:\nelement: The value to be repeated.\ncount: The number of times to repeat the value.\n\nRaises:\nValueError: If the count is negative.", "source": "juraj-google-style"}
{"code": "def get_output_details(self):\n    result = {}\n    for output_name, tensor_index in self._outputs:\n        result[output_name] = self._interpreter._get_tensor_details(tensor_index, self._subgraph_index)\n    return result", "docstring": "Gets output tensor details.\n\nReturns:\nA dictionary from input name to tensor details where each item is a\ndictionary with details about an output tensor. The dictionary contains\nthe same fields as described for `get_input_details()`.", "source": "github-repos"}
{"code": "def send_event(self, event_type, category=None, dimensions=None, properties=None, timestamp=None):\n    if (category and (category not in SUPPORTED_EVENT_CATEGORIES)):\n        raise ValueError(((('Event category is not one of the supported' + 'types: {') + ', '.join(SUPPORTED_EVENT_CATEGORIES)) + '}'))\n    data = {'eventType': event_type, 'category': category, 'dimensions': (dimensions or {}), 'properties': (properties or {}), 'timestamp': (int(timestamp) if timestamp else None)}\n    _logger.debug('Sending event to SignalFx: %s', data)\n    self._add_extra_dimensions(data)\n    return self._send_event(event_data=data, url='{0}/{1}'.format(self._endpoint, self._INGEST_ENDPOINT_EVENT_SUFFIX), session=self._session)", "docstring": "Send an event to SignalFx.\n\nArgs:\nevent_type (string): the event type (name of the event time\nseries).\ncategory (string): the category of the event.\ndimensions (dict): a map of event dimensions.\nproperties (dict): a map of extra properties on that event.\ntimestamp (float): timestamp when the event has occured", "source": "codesearchnet"}
{"code": "def gate_nodes(self):\n    nodes = []\n    for node in self.op_nodes():\n        if isinstance(node.op, Gate):\n            nodes.append(node)\n    return nodes", "docstring": "Get the list of gate nodes in the dag.\n\nReturns:\nlist: the list of node ids that represent gates.", "source": "codesearchnet"}
{"code": "def terminate(self, uuid):\n    request_url = (self._client.base_api_url + self.terminate_url.format(id=uuid))\n    response = self._client.session.post(request_url)\n    self.validate_request_success(response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_202_ACCEPTED)\n    return self.response_data_to_model_instance(response.json())", "docstring": "Terminate the task instance with given UUID.\n\nArgs:\nuuid (str): The UUID of the task instance to terminate.\n\nReturns:\n:class:`saltant.models.base_task_instance.BaseTaskInstance`:\nA task instance model instance representing the task\ninstance that was told to terminate.", "source": "codesearchnet"}
{"code": "def dependency_of_fetches(fetches, op):\n    try:\n        from tensorflow.python.client.session import _FetchHandler as FetchHandler\n        handler = FetchHandler(op.graph, fetches, {})\n        targets = tuple((handler.fetches() + handler.targets()))\n    except ImportError:\n        if isinstance(fetches, list):\n            targets = tuple(fetches)\n        elif isinstance(fetches, dict):\n            raise ValueError(\"Don't know how to parse dictionary to fetch list! This is a bug of tensorpack.\")\n        else:\n            targets = (fetches,)\n    return dependency_of_targets(targets, op)", "docstring": "Check that op is in the subgraph induced by the dependencies of fetches.\nfetches may have more general structure.\n\nArgs:\nfetches: An argument to `sess.run`. Nested structure will affect performance.\nop (tf.Operation or tf.Tensor):\n\nReturns:\nbool: True if any of `fetches` depend on `op`.", "source": "codesearchnet"}
{"code": "def ParseBookmarkRow(\n      self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    rev_host = self._GetRowValue(query_hash, row, 'rev_host')\n    bookmark_type = self._GetRowValue(query_hash, row, 'type')\n\n    event_data = FirefoxPlacesBookmarkEventData()\n    event_data.host = rev_host or 'N/A'\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.places_title = self._GetRowValue(query_hash, row, 'places_title')\n    event_data.query = query\n    event_data.title = self._GetRowValue(query_hash, row, 'bookmark_title')\n    event_data.type = self._BOOKMARK_TYPES.get(bookmark_type, 'N/A')\n    event_data.url = self._GetRowValue(query_hash, row, 'url')\n    event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count')\n\n    timestamp = self._GetRowValue(query_hash, row, 'dateAdded')\n    if timestamp:\n      date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n          timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_ADDED)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'lastModified')\n    if timestamp:\n      date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n          timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a bookmark row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def recipe_bigquery_view(config, auth_read, query, dataset, view, legacy):\n    bigquery(config, {'auth': auth_read, 'from': {'query': query, 'legacy': legacy}, 'to': {'dataset': dataset, 'view': view}})", "docstring": "Create a BigQuery view.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nquery (text) - SQL with newlines and all.\ndataset (string) - Existing BigQuery dataset.\nview (string) - View to create from this query.\nlegacy (boolean) - Query type must match source tables.", "source": "github-repos"}
{"code": "def unnest_primitive_type(beam_type: schema_pb2.FieldType):\n    avro_type = beam_type_to_avro_type(beam_type)\n    return avro_type['type'] if beam_type.WhichOneof('type_info') == 'atomic_type' else avro_type", "docstring": "unnests beam types that map to avro primitives or unions.\n\nif mapping to a avro primitive or a union, don't nest the field type\nfor complex types, like arrays, we need to nest the type.\nExample: { 'type': 'string' } -> 'string'\n{ 'type': 'array', 'items': 'string' }\n-> { 'type': 'array', 'items': 'string' }\n\nArgs:\nbeam_type: the beam type to map to avro.\n\nReturns:\nthe converted avro type with the primitive or union type unnested.", "source": "github-repos"}
{"code": "def list_vmss_sub(access_token, subscription_id):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/providers/Microsoft.Compute/virtualMachineScaleSets',\n                        '?api-version=', COMP_API])\n    return do_get_next(endpoint, access_token)", "docstring": "List VM Scale Sets in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body of VM scale sets.", "source": "juraj-google-style"}
{"code": "def variational_dropout(units, keep_prob, fixed_mask_dims=(1,)):\n    \n    units_shape = tf.shape(units)\n    noise_shape = [units_shape[n] for n in range(len(units.shape))]\n    for dim in fixed_mask_dims:\n        noise_shape[dim] = 1\n    return tf.nn.dropout(units, keep_prob, noise_shape)", "docstring": "Dropout with the same drop mask for all fixed_mask_dims\n\nArgs:\nunits: a tensor, usually with shapes [B x T x F], where\nB - batch size\nT - tokens dimension\nF - feature dimension\nkeep_prob: keep probability\nfixed_mask_dims: in these dimensions the mask will be the same\n\nReturns:\ndropped units tensor", "source": "juraj-google-style"}
{"code": "def remove_segments(self, segments_to_remove):\n        \n        v_ind = self.vertex_indices_in_segments(segments_to_remove)\n        self.segm = {name: faces for name, faces in self.segm.iteritems() if name not in segments_to_remove}\n        self.remove_vertices(v_ind)", "docstring": "Remove the faces and vertices for given segments, keeping all others.\n\nArgs:\nsegments_to_remove: a list of segnments whose vertices will be removed", "source": "juraj-google-style"}
{"code": "def AddScanNode(self, path_spec, parent_scan_node):\n    \n    scan_node = self._scan_nodes.get(path_spec, None)\n    if scan_node:\n      raise KeyError('Scan node already exists.')\n\n    scan_node = SourceScanNode(path_spec)\n    if parent_scan_node:\n      if parent_scan_node.path_spec not in self._scan_nodes:\n        raise RuntimeError('Parent scan node not present.')\n      scan_node.parent_node = parent_scan_node\n      parent_scan_node.sub_nodes.append(scan_node)\n\n    if not self._root_path_spec:\n      self._root_path_spec = path_spec\n\n    self._scan_nodes[path_spec] = scan_node\n\n    if path_spec.IsFileSystem():\n      self._file_system_scan_nodes[path_spec] = scan_node\n\n    self.updated = True\n    return scan_node", "docstring": "Adds a scan node for a certain path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nparent_scan_node (SourceScanNode): parent scan node or None.\n\nReturns:\nSourceScanNode: scan node.\n\nRaises:\nKeyError: if the scan node already exists.\nRuntimeError: if the parent scan node is not present.", "source": "juraj-google-style"}
{"code": "def deleted(self, deleted_since, filters=None, params=None):\n        \n\n        return self.tc_requests.deleted(\n            self.api_type,\n            self.api_sub_type,\n            deleted_since,\n            owner=self.owner,\n            filters=filters,\n            params=params,\n        )", "docstring": "Gets the indicators deleted.\n\nArgs:\nparams:\nfilters:\ndeleted_since: Date since its been deleted", "source": "juraj-google-style"}
{"code": "def slice_begin(self, tensor_shape, pnum):\n    \n    tensor_layout = self.tensor_layout(tensor_shape)\n    coordinates = pnum_to_processor_coordinates(self.shape, pnum)\n    ret = []\n    for dim_size, mesh_axis in zip(\n        tensor_shape.to_integer_list, tensor_layout.tensor_axis_to_mesh_axis):\n      if mesh_axis is None:\n        ret.append(0)\n      else:\n        ret.append(\n            dim_size \n    return ret", "docstring": "Begin position for the tensor slice for the given processor.\n\nArgs:\ntensor_shape: Shape.\npnum: int <= self.size.\n\nReturns:\nlist of integers with length tensor_shape.ndims.", "source": "juraj-google-style"}
{"code": "def dnd_setSnooze(self, *, num_minutes: int, **kwargs) -> SlackResponse:\n        \n        self._validate_xoxp_token()\n        kwargs.update({\"num_minutes\": num_minutes})\n        return self.api_call(\"dnd.setSnooze\", http_verb=\"GET\", params=kwargs)", "docstring": "Turns on Do Not Disturb mode for the current user, or changes its duration.\n\nArgs:\nnum_minutes (int): The snooze duration. e.g. 60", "source": "juraj-google-style"}
{"code": "def get_structure_property_dict(self, structure, include_base_props=True, ignore_errors=False):\n    s_props = ['trans_v', 'long_v', 'snyder_ac', 'snyder_opt', 'snyder_total', 'clarke_thermalcond', 'cahill_thermalcond', 'debye_temperature']\n    if (ignore_errors and ((self.k_vrh < 0) or (self.g_vrh < 0))):\n        sp_dict = {prop: None for prop in s_props}\n    else:\n        sp_dict = {prop: getattr(self, prop)(structure) for prop in s_props}\n    sp_dict['structure'] = structure\n    if include_base_props:\n        sp_dict.update(self.property_dict)\n    return sp_dict", "docstring": "returns a dictionary of properties derived from the elastic tensor\nand an associated structure\n\nArgs:\nstructure (Structure): structure object for which to calculate\nassociated properties\ninclude_base_props (bool): whether to include base properties,\nlike k_vrh, etc.\nignore_errors (bool): if set to true, will set problem properties\nthat depend on a physical tensor to None, defaults to False", "source": "codesearchnet"}
{"code": "def random_uniform(mesh, shape, **kwargs):\n    shape = convert_to_shape(shape)\n    return RandomOperation(mesh, shape, tf.random.uniform, **kwargs).outputs[0]", "docstring": "Random uniform.\n\nArgs:\nmesh: a Mesh\nshape: a Shape\n**kwargs: keyword args for tf.random.uniform, except seed\n\nReturns:\na Tensor", "source": "codesearchnet"}
{"code": "def __init__(self, fsntfs_attribute):\n    \n    if not fsntfs_attribute:\n      raise errors.BackEndError('Missing pyfsntfs attribute.')\n\n    super(NTFSAttribute, self).__init__()\n    self._fsntfs_attribute = fsntfs_attribute", "docstring": "Initializes the attribute object.\n\nArgs:\nfsntfs_attribute (pyfsntfs.attribute): NTFS attribute.\n\nRaises:\nBackEndError: if the pyfsntfs attribute is missing.", "source": "juraj-google-style"}
{"code": "def recipe_bigquery_query(config, auth_write, query, dataset, table, legacy):\n    bigquery(config, {'auth': auth_write, 'from': {'query': query, 'legacy': legacy}, 'to': {'dataset': dataset, 'table': table}})", "docstring": "Save query results into a BigQuery table.\n\nArgs:\nauth_write (authentication) - Credentials used for writing data.\nquery (text) - SQL with newlines and all.\ndataset (string) - Existing BigQuery dataset.\ntable (string) - Table to create from this query.\nlegacy (boolean) - Query type must match source tables.", "source": "github-repos"}
{"code": "def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor:\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n    last_hidden_state = text_outputs[0][:, 0, :]\n    text_features = self.text_projection(last_hidden_state)\n    return text_features", "docstring": "Returns:\ntext_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by\napplying the projection layer to the pooled output of [`AlignTextModel`].\n\nExamples:\n\n```python\n>>> from transformers import AutoTokenizer, AlignModel\n\n>>> model = AlignModel.from_pretrained(\"kakaobrain/align-base\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"kakaobrain/align-base\")\n\n>>> inputs = tokenizer([\"a photo of a cat\", \"a photo of a dog\"], padding=True, return_tensors=\"pt\")\n>>> text_features = model.get_text_features(**inputs)\n```", "source": "github-repos"}
{"code": "def __eq__(self, other):\n        \n        res = False\n        if type(self) is type(other) and \\\n                self.name == other.name and \\\n                self.size == other.size:\n            res = True\n        return res", "docstring": "Two Registers are the same if they are of the same type\n(i.e. quantum/classical), and have the same name and size.\n\nArgs:\nother (Register): other Register\n\nReturns:\nbool: are self and other equal.", "source": "juraj-google-style"}
{"code": "def label_contains(node, triggers):\n    for trigger in triggers:\n        if (trigger.trigger_word in node.label):\n            (yield TriggerNode(trigger, node))", "docstring": "Determine if node contains any of the trigger_words provided.\n\nArgs:\nnode(Node): CFG node to check.\ntrigger_words(list[Union[Sink, Source]]): list of trigger words to look for.\n\nReturns:\nIterable of TriggerNodes found. Can be multiple because multiple\ntrigger_words can be in one node.", "source": "codesearchnet"}
{"code": "def __call__(self, x: core.Tensor) -> Mapping[str, core.Tensor]:\n    out = nn_ops.conv2d(x, self.filters, strides=[1, 1, 2, 1], dilations=[1, 1, 1, 1], padding='SAME', data_format='NHWC')\n    return {'output': out}", "docstring": "Performs a 2D convolution operation.\n\nArgs:\nx: Input tensor to perform convolution on.\n\nReturns:\nA map of: output key -> output result.", "source": "github-repos"}
{"code": "def _has_valid_abs_ref(self, i, construction_table):\n    c_table = construction_table\n    abs_refs = constants.absolute_refs\n    A = np.empty((3, 3))\n    row = c_table.index.get_loc(i)\n    if (row > 2):\n        message = 'The index {i} is not from the first three, rows'.format\n        raise ValueError(message(i=i))\n    for k in range(3):\n        if (k < row):\n            A[k] = self.loc[(c_table.iloc[(row, k)], ['x', 'y', 'z'])]\n        else:\n            A[k] = abs_refs[c_table.iloc[(row, k)]]\n    (v1, v2) = ((A[2] - A[1]), (A[1] - A[0]))\n    K = np.cross(v1, v2)\n    zero = np.full(3, 0.0)\n    return (not (np.allclose(K, zero) or np.allclose(v1, zero) or np.allclose(v2, zero)))", "docstring": "Checks, if ``i`` uses valid absolute references.\n\nChecks for each index from first to third row of the\n``construction_table``, if the references are colinear.\nThis case has to be specially treated, because the references\nare not only atoms (to fix internal degrees of freedom) but also points\nin cartesian space called absolute references.\n(to fix translational and rotational degrees of freedom)\n\nArgs:\ni (label): The label has to be in the first three rows.\nconstruction_table (pd.DataFrame):\n\nReturns:\nbool:", "source": "codesearchnet"}
{"code": "def score_braycurtis(self, term1, term2, **kwargs):\n    t1_kde = self.kde(term1, **kwargs)\n    t2_kde = self.kde(term2, **kwargs)\n    return (1 - distance.braycurtis(t1_kde, t2_kde))", "docstring": "Compute a weighting score based on the \"City Block\" distance between\nthe kernel density estimates of two terms.\n\nArgs:\nterm1 (str)\nterm2 (str)\n\nReturns: float", "source": "codesearchnet"}
{"code": "def tag_sharding_attribute_for_dequeued_tensors(dequeues, dims):\n    nest.assert_shallow_structure(dequeues, dims)\n    return nest.map_structure_up_to(dequeues, _tag_sharding_attribute_for_dequeued_tensor, dequeues, dims)", "docstring": "Tags appropriate XLA sharding attribute to the dequeued tensors.\n\nArgs:\ndequeues: A list of dequeued tensors on TPU.\ndims: A list of integer describes how the tensor is partitioned.\n\nReturns:\nThe same dequeues with appropriate xla_sharding attribute.", "source": "github-repos"}
{"code": "def Delete(self, queue, tasks, mutation_pool=None):\n    if (queue is None):\n        return\n    if (mutation_pool is None):\n        raise ValueError(\"Mutation pool can't be none.\")\n    mutation_pool.QueueDeleteTasks(queue, tasks)", "docstring": "Removes the tasks from the queue.\n\nNote that tasks can already have been removed. It is not an error\nto re-delete an already deleted task.\n\nArgs:\nqueue: A queue to clear.\ntasks: A list of tasks to remove. Tasks may be Task() instances or integers\nrepresenting the task_id.\nmutation_pool: A MutationPool object to schedule deletions on.\n\nRaises:\nValueError: Mutation pool was not passed in.", "source": "codesearchnet"}
{"code": "def getcallargs_forhints(func, *type_args, **type_kwargs):\n    try:\n        signature = get_signature(func)\n    except ValueError as e:\n        logging.warning('Could not get signature for function: %s: %s', func, e)\n        return {}\n    try:\n        bindings = signature.bind(*type_args, **type_kwargs)\n    except TypeError as e:\n        raise TypeCheckError(e)\n    bound_args = bindings.arguments\n    for param in signature.parameters.values():\n        if param.name in bound_args:\n            if param.kind == param.VAR_POSITIONAL:\n                bound_args[param.name] = _normalize_var_positional_hint(bound_args[param.name])\n            elif param.kind == param.VAR_KEYWORD:\n                bound_args[param.name] = _normalize_var_keyword_hint(bound_args[param.name], param.name)\n        elif param.annotation != param.empty:\n            bound_args[param.name] = param.annotation\n        elif param.kind == param.VAR_POSITIONAL:\n            bound_args[param.name] = _ANY_VAR_POSITIONAL\n        elif param.kind == param.VAR_KEYWORD:\n            bound_args[param.name] = _ANY_VAR_KEYWORD\n        elif param.default is not param.empty:\n            bound_args[param.name] = typehints.Any\n        else:\n            raise ValueError('Unexpected unbound parameter: %s' % param.name)\n    return dict(bound_args)", "docstring": "Bind type_args and type_kwargs to func.\n\nWorks like inspect.getcallargs, with some modifications to support type hint\nchecks.\nFor unbound args, will use annotations and fall back to Any (or variants of\nAny).\n\nReturns:\nA mapping from parameter name to argument.", "source": "github-repos"}
{"code": "def poll(self, timeout=None):\n    p = select.poll()\n    p.register(self._fd, (select.POLLIN | select.POLLPRI))\n    events = p.poll(int((timeout * 1000)))\n    if (len(events) > 0):\n        return True\n    return False", "docstring": "Poll for data available for reading from the serial port.\n\n`timeout` can be positive for a timeout in seconds, 0 for a\nnon-blocking poll, or negative or None for a blocking poll. Default is\na blocking poll.\n\nArgs:\ntimeout (int, float, None): timeout duration in seconds.\n\nReturns:\nbool: ``True`` if data is available for reading from the serial port, ``False`` if not.", "source": "codesearchnet"}
{"code": "def _SetExtractionPreferredTimeZone(self, knowledge_base):\n    \n    \n    \n    if self._preferred_time_zone:\n      try:\n        knowledge_base.SetTimeZone(self._preferred_time_zone)\n      except ValueError:\n        \n        logger.warning(\n            'Unsupported time zone: {0:s}, defaulting to {1:s}'.format(\n                self._preferred_time_zone, knowledge_base._time_zone.zone))", "docstring": "Sets the preferred time zone before extraction.\n\nArgs:\nknowledge_base (KnowledgeBase): contains information from the source\ndata needed for parsing.", "source": "juraj-google-style"}
{"code": "def __init__(self, breakpoints_func=None):\n    \n    \n    self._run_key_to_original_graphs = dict()\n    self._run_key_to_debug_graphs = dict()\n\n    if breakpoints_func:\n      assert callable(breakpoints_func)\n      self._breakpoints_func = breakpoints_func", "docstring": "Constructor of RunStates.\n\nArgs:\nbreakpoint_func: A callable of the signatuer:\ndef breakpoint_func():\nwhich returns all the currently activated breakpoints.", "source": "juraj-google-style"}
{"code": "def set_s3_bucket(self, region, name, bucketName):\n        \n        ct = self.session.client('cloudtrail', region_name=region)\n        ct.update_trail(Name=name, S3BucketName=bucketName)\n\n        auditlog(\n            event='cloudtrail.set_s3_bucket',\n            actor=self.ns,\n            data={\n                'account': self.account.account_name,\n                'region': region\n            }\n        )\n        self.log.info('Updated S3BucketName to {} for {} in {}/{}'.format(\n            bucketName,\n            name,\n            self.account.account_name,\n            region\n        ))", "docstring": "Sets the S3 bucket location for logfile delivery\n\nArgs:\nregion (`str`): Name of the AWS region\nname (`str`): Name of the CloudTrail Trail\nbucketName (`str`): Name of the S3 bucket to deliver log files to\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def search_variants(self, variant_ids):\n        \n        query = {'_id': {'$in': variant_ids}}\n        \n        return self.db.variant.find(query)", "docstring": "Make a batch search for variants in the database\n\nArgs:\nvariant_ids(list(str)): List of variant ids\n\nReturns:\nres(pymngo.Cursor(variant_obj)): The result", "source": "juraj-google-style"}
{"code": "def verify_json(self, json, user_key, user_id, device_id):\n    try:\n        signatures = json.pop('signatures')\n    except KeyError:\n        return False\n    key_id = 'ed25519:{}'.format(device_id)\n    try:\n        signature_base64 = signatures[user_id][key_id]\n    except KeyError:\n        json['signatures'] = signatures\n        return False\n    unsigned = json.pop('unsigned', None)\n    try:\n        olm.ed25519_verify(user_key, encode_canonical_json(json), signature_base64)\n        success = True\n    except olm.utility.OlmVerifyError:\n        success = False\n    json['signatures'] = signatures\n    if unsigned:\n        json['unsigned'] = unsigned\n    return success", "docstring": "Verifies a signed key object's signature.\n\nThe object must have a 'signatures' key associated with an object of the form\n`user_id: {key_id: signature}`.\n\nArgs:\njson (dict): The JSON object to verify.\nuser_key (str): The public ed25519 key which was used to sign the object.\nuser_id (str): The user who owns the device.\ndevice_id (str): The device who owns the key.\n\nReturns:\nTrue if the verification was successful, False if not.", "source": "codesearchnet"}
{"code": "def _FlushCache(cls, format_categories):\n    \n    if definitions.FORMAT_CATEGORY_ARCHIVE in format_categories:\n      cls._archive_remainder_list = None\n      cls._archive_scanner = None\n      cls._archive_store = None\n\n    if definitions.FORMAT_CATEGORY_COMPRESSED_STREAM in format_categories:\n      cls._compressed_stream_remainder_list = None\n      cls._compressed_stream_scanner = None\n      cls._compressed_stream_store = None\n\n    if definitions.FORMAT_CATEGORY_FILE_SYSTEM in format_categories:\n      cls._file_system_remainder_list = None\n      cls._file_system_scanner = None\n      cls._file_system_store = None\n\n    if definitions.FORMAT_CATEGORY_STORAGE_MEDIA_IMAGE in format_categories:\n      cls._storage_media_image_remainder_list = None\n      cls._storage_media_image_scanner = None\n      cls._storage_media_image_store = None\n\n    if definitions.FORMAT_CATEGORY_VOLUME_SYSTEM in format_categories:\n      cls._volume_system_remainder_list = None\n      cls._volume_system_scanner = None\n      cls._volume_system_store = None", "docstring": "Flushes the cached objects for the specified format categories.\n\nArgs:\nformat_categories (set[str]): format categories.", "source": "juraj-google-style"}
{"code": "def get_country_by_name(self, country_name) -> 'Country':\n        \n        VALID_STR.validate(country_name, 'get_country_by_name', exc=ValueError)\n        if country_name not in self._countries_by_name.keys():\n            for country in self.countries:\n\n                if country.country_name == country_name:\n                    return country\n            raise ValueError(country_name)\n        else:\n            return self._countries_by_name[country_name]", "docstring": "Gets a country in this coalition by its name\n\nArgs:\ncountry_name: country name\n\nReturns: Country", "source": "juraj-google-style"}
{"code": "def close(self, virtual_account_id, data={}, **kwargs):\n    url = '{}/{}'.format(self.base_url, virtual_account_id)\n    data['status'] = 'closed'\n    return self.patch_url(url, data, **kwargs)", "docstring": "Close Virtual Account from given Id\n\nArgs:\nvirtual_account_id :\nId for which Virtual Account objects has to be Closed", "source": "codesearchnet"}
{"code": "def check_subword_sampling(tokenizer: PreTrainedTokenizer, text: Optional[str]=None, test_sentencepiece_ignore_case: bool=True) -> None:\n    text = 'This is a test for subword regularization.' if text is None else text\n    if test_sentencepiece_ignore_case:\n        text = text.lower()\n    tokens_list = []\n    for _ in range(5):\n        tokens_list.append(tokenizer.tokenize(text))\n    combinations = itertools.combinations(tokens_list, 2)\n    subword_sampling_found = False\n    for combination in combinations:\n        if combination[0] != combination[1]:\n            subword_sampling_found = True\n    unittest.TestCase().assertTrue(subword_sampling_found)\n    for tokens in tokens_list:\n        if test_sentencepiece_ignore_case:\n            unittest.TestCase().assertEqual(text, tokenizer.convert_tokens_to_string(tokens).lower())\n        else:\n            unittest.TestCase().assertEqual(text, tokenizer.convert_tokens_to_string(tokens))", "docstring": "Check if the tokenizer generates different results when subword regularization is enabled.\n\nSubword regularization augments training data with subword sampling.\nThis has a random component.\n\nArgs:\ntokenizer: The tokenizer to check.\ntext: The text to use for the checks.\ntest_sentencepiece_ignore_case: See `TokenizerTesterMixin.test_sentencepiece_ignore_case`.", "source": "github-repos"}
{"code": "def __init__(self, project: str=None, retry: Retry=None, timeout: float=120, metadata: Sequence[Tuple[str, str]]=(), catalog_name: str='default_catalog', event_store: str='default_event_store'):\n    self.project = project\n    self.retry = retry\n    self.timeout = timeout\n    self.metadata = metadata\n    self.catalog_name = catalog_name\n    self.event_store = event_store", "docstring": "Initializes a :class:`WriteUserEvent` transform.\n\nArgs:\nproject (str): Optional. GCP project name in which the catalog\ndata will be imported.\nretry: Optional. Designation of what\nerrors, if any, should be retried.\ntimeout (float): Optional. The amount of time, in seconds, to wait\nfor the request to complete.\nmetadata: Optional. Strings which\nshould be sent along with the request as metadata.\ncatalog_name (str): Optional. Name of the catalog.\nDefault: 'default_catalog'\nevent_store (str): Optional. Name of the event store.\nDefault: 'default_event_store'", "source": "github-repos"}
{"code": "def get_path_from_query_string(req):\n    \n    if req.args.get('path') is None:\n        raise exceptions.UserError('Path not found in query string')\n    return req.args.get('path')", "docstring": "Gets path from query string\n\nArgs:\nreq (flask.request): Request object from Flask\n\nReturns:\npath (str): Value of \"path\" parameter from query string\n\nRaises:\nexceptions.UserError: If \"path\" is not found in query string", "source": "juraj-google-style"}
{"code": "def CheckInvalidIncrement(filename, clean_lines, linenum, error):\n  \n  line = clean_lines.elided[linenum]\n  if _RE_PATTERN_INVALID_INCREMENT.match(line):\n    error(filename, linenum, 'runtime/invalid_increment', 5,\n          'Changing pointer instead of value (or unused value of operator*).')", "docstring": "Checks for invalid increment *count++.\n\nFor example following function:\nvoid increment_counter(int* count) {\n*count++;\n}\nis invalid, because it effectively does count++, moving pointer, and should\nbe replaced with ++*count, (*count)++ or *count += 1.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def update_panel(self, panel_obj, version=None, date_obj=None):\n    LOG.info('Updating panel %s', panel_obj['panel_name'])\n    date = panel_obj['date']\n    if version:\n        LOG.info('Updating version from {0} to version {1}'.format(panel_obj['version'], version))\n        panel_obj['version'] = version\n        if date_obj:\n            date = date_obj\n    else:\n        date = (date_obj or dt.datetime.now())\n    panel_obj['date'] = date\n    updated_panel = self.panel_collection.find_one_and_replace({'_id': panel_obj['_id']}, panel_obj, return_document=pymongo.ReturnDocument.AFTER)\n    return updated_panel", "docstring": "Replace a existing gene panel with a new one\n\nKeeps the object id\n\nArgs:\npanel_obj(dict)\nversion(float)\ndate_obj(datetime.datetime)\n\nReturns:\nupdated_panel(dict)", "source": "codesearchnet"}
{"code": "def list(sandbox_name, results=15, start=0):\n    \n    result = util.callm(\"%s/%s\" % ('sandbox', 'list'), {'sandbox':sandbox_name, 'results': results, 'start': start})\n    assets = result['response']['assets']\n    start = result['response']['start']\n    total = result['response']['total']\n\n    return ResultList(assets, start, total)", "docstring": "Returns a list of all assets available in this sandbox\n\nArgs:\nsandbox_name (str): A string representing the name of the sandbox\n\nKwargs:\nresults (int): An integer number of results to return\n\nstart (int): An integer starting value for the result set\n\nReturns:\nA list of asset dictionaries\n\nExample:\n\n>>> sandbox.list('bluenote')\n[{}, {}]\n>>>", "source": "juraj-google-style"}
{"code": "def get_contract_data(self, contract_name):\n        \n\n        contract_data_path = self.output_dir + '/{0}.json'.format(contract_name)\n        with open(contract_data_path, 'r') as contract_data_file:\n            contract_data = json.load(contract_data_file)\n\n        abi = contract_data['abi']\n        bytecode = contract_data['evm']['bytecode']['object']\n\n        return abi, bytecode", "docstring": "Returns the contract data for a given contract\n\nArgs:\ncontract_name (str): Name of the contract to return.\n\nReturns:\nstr, str: ABI and bytecode of the contract", "source": "juraj-google-style"}
{"code": "def __init__(self, batch_env):\n    \n    self._batch_env = batch_env\n    batch_dims = (len(self._batch_env),)\n    observ_shape = self._parse_shape(self._batch_env.observation_space)\n    observ_dtype = self._parse_dtype(self._batch_env.observation_space)\n    action_shape = self._parse_shape(self._batch_env.action_space)\n    action_dtype = self._parse_dtype(self._batch_env.action_space)\n    with tf.variable_scope('env_temporary'):\n      self._observ = tf.Variable(\n          lambda: tf.zeros(batch_dims + observ_shape, observ_dtype),\n          name='observ', trainable=False)\n      self._action = tf.Variable(\n          lambda: tf.zeros(batch_dims + action_shape, action_dtype),\n          name='action', trainable=False)\n      self._reward = tf.Variable(\n          lambda: tf.zeros(batch_dims, tf.float32),\n          name='reward', trainable=False)\n      self._done = tf.Variable(\n          lambda: tf.cast(tf.ones(batch_dims), tf.bool),\n          name='done', trainable=False)", "docstring": "Batch of environments inside the TensorFlow graph.\n\nArgs:\nbatch_env: Batch environment.", "source": "juraj-google-style"}
{"code": "def create_temp_grad(node, namer, tangent=False):\n  \n  if not isinstance(node, (gast.Subscript, gast.Name)):\n    raise TypeError\n\n  def _name_temp_grad(node):\n    name = namer.temp_grad(node.id, tangent)\n    temp_node = gast.Name(id=name, annotation=None, ctx=None)\n    return temp_node\n  if isinstance(node, gast.Subscript):\n    temp_node = _name_temp_grad(node.value)\n  else:\n    temp_node = _name_temp_grad(node)\n  anno.setanno(temp_node, 'temp_adjoint_var', node)\n  return temp_node", "docstring": "Create a variable to store partial gradients.\n\nArgs:\nnode: See `create_grad`.\nnamer: See `create_grad`.\ntangent: See `create_grad`.\n\nReturns:\nnode: See `create_grad`. Returns a node representing the partial gradient.\nNote that this is always a simple variable e.g. the temporary partial\nof `x[i]` can be something like `_dxi`.\n\nNodes are given an annotation `temp_adjoint_var`.", "source": "juraj-google-style"}
{"code": "def SetConfiguredUsers(self, users):\n    \n    prefix = self.logger.name + '-'\n    with tempfile.NamedTemporaryFile(\n        mode='w', prefix=prefix, delete=True) as updated_users:\n      updated_users_file = updated_users.name\n      for user in users:\n        updated_users.write(user + '\\n')\n      updated_users.flush()\n      if not os.path.exists(self.google_users_dir):\n        os.makedirs(self.google_users_dir)\n      shutil.copy(updated_users_file, self.google_users_file)\n\n    file_utils.SetPermissions(self.google_users_file, mode=0o600, uid=0, gid=0)", "docstring": "Set the list of configured Google user accounts.\n\nArgs:\nusers: list, the username strings of the Linux accounts.", "source": "juraj-google-style"}
{"code": "def call_remoteckan(self, *args, **kwargs):\n    requests_kwargs = kwargs.get('requests_kwargs', dict())\n    credentials = self._get_credentials()\n    if credentials:\n        requests_kwargs['auth'] = credentials\n    kwargs['requests_kwargs'] = requests_kwargs\n    apikey = kwargs.get('apikey', self.get_api_key())\n    kwargs['apikey'] = apikey\n    return self.remoteckan().call_action(*args, **kwargs)", "docstring": "Calls the remote CKAN\n\nArgs:\n*args: Arguments to pass to remote CKAN call_action method\n**kwargs: Keyword arguments to pass to remote CKAN call_action method\n\nReturns:\nDict: The response from the remote CKAN call_action method", "source": "codesearchnet"}
{"code": "def filepaths_in_dir(path):\n    \n    filepaths = []\n    for root, directories, filenames in os.walk(path):\n        for filename in filenames:\n            filepath = os.path.join(root, filename)\n            filepath = filepath.replace(path, '').lstrip('/')\n            filepaths.append(filepath)\n    return filepaths", "docstring": "Find all files in a directory, and return the relative paths to those files.\n\nArgs:\npath (str): the directory path to walk\n\nReturns:\nlist: the list of relative paths to all files inside of ``path`` or its\nsubdirectories.", "source": "juraj-google-style"}
{"code": "def DeregisterCredentials(cls, credentials):\n    \n    if credentials.type_indicator not in cls._credentials:\n      raise KeyError(\n          'Credential object not set for type indicator: {0:s}.'.format(\n              credentials.type_indicator))\n\n    del cls._credentials[credentials.type_indicator]", "docstring": "Deregisters a path specification credentials.\n\nArgs:\ncredentials (Credentials): credentials.\n\nRaises:\nKeyError: if credential object is not set for the corresponding\ntype indicator.", "source": "juraj-google-style"}
{"code": "def get_unscaled_gradients(self, grads):\n    loss_scale_reciprocal = 1.0 / self.loss_scale\n    return [_multiply_gradient(g, loss_scale_reciprocal) if g is not None else None for g in grads]", "docstring": "Unscales the gradients by the loss scale.\n\nThis method is only needed if you compute gradients manually, e.g. with\n`tf.GradientTape`. In that case, call this method to unscale the gradients\nafter computing them with `tf.GradientTape`. If you use\n`LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss\nscaling is automatically applied and this method is unneeded.\n\nIf this method is called, `get_scaled_loss` should also be called. See\nthe `tf.keras.mixed_precision.LossScaleOptimizer` doc for an\nexample.\n\nArgs:\ngrads: A list of tensors, each which will be divided by the loss scale.\nCan have None values, which are ignored.\n\nReturns:\nA new list the same size as `grads`, where every non-None value in `grads`\nis divided by `LossScaleOptimizer.loss_scale`.", "source": "github-repos"}
{"code": "def get_fieldset_index(fieldsets, index_or_name):\n    if isinstance(index_or_name, six.integer_types):\n        return index_or_name\n    for (key, value) in enumerate(fieldsets):\n        if (value[0] == index_or_name):\n            return key\n    raise KeyError(\"Key not found: '{}'.\".format(index_or_name))", "docstring": "Return the index of a fieldset in the ``fieldsets`` list.\n\nArgs:\nfieldsets (list): The original ``fieldsets`` list.\nindex_or_name (int or str): The value of the reference element, or directly its numeric index.\n\nReturns:\n(int) The index of the fieldset in the ``fieldsets`` list.", "source": "codesearchnet"}
{"code": "def make_ordered_column_names(include_label=True):\n    result = ['clicked'] if include_label else []\n    for name in _INTEGER_COLUMN_NAMES:\n        result.append(name)\n    for name in _CATEGORICAL_COLUMN_NAMES:\n        result.append(name)\n    return result", "docstring": "Returns the column names in the dataset in the order as they appear.\n\nArgs:\ninclude_label: Indicates whether the label feature should be included.\nReturns:\nA list of column names in the dataset.", "source": "github-repos"}
{"code": "def input_defs(self, transitive: bool=True) -> List['SymbolDefinition']:\n    parent_func = self.parent_func()\n    var_producers: Dict[str, Set[SymbolDefinition]] = {arg: set() for arg in parent_func.args}\n    var_producers[parent_func.name] = set()\n\n    def analyze_var_producers(k: pg.KeyPath, v: Any, p: pg.Symbolic):\n        del k, p\n        if v is self:\n            return pg.TraverseAction.STOP\n        if isinstance(v, SymbolDefinition):\n            var_entry = set([v])\n            if transitive:\n                for var_name in v.input_vars():\n                    var_entry.update(var_producers[var_name])\n            var_producers[v.name] = var_entry\n        if v is not parent_func and isinstance(v, Function):\n            return pg.TraverseAction.CONTINUE\n        return pg.TraverseAction.ENTER\n    pg.traverse(parent_func, analyze_var_producers)\n    dependencies: Set[SymbolDefinition] = set()\n    for var_name in self.input_vars():\n        if var_name not in var_producers:\n            raise ValueError(f\"Undefined variable {repr(var_name)} found in function '{parent_func.name}' line\n        dependencies.update(var_producers[var_name])\n    return sorted(dependencies, key=lambda x: x.line_number())", "docstring": "Returns the symbol definitions for the inputs of this code entity.\n\nArgs:\ntransitive: If True, transitive inputs will be included.\nOtherwise, only the direct dependencies will be included.\n\nReturns:\nA list of `SymbolDefinition` in their declaration order that produce\nthe inputs required for current code entity.", "source": "github-repos"}
{"code": "def replace_case(self, case_obj):\n    LOG.info('Saving case %s', case_obj['_id'])\n    case_obj['updated_at'] = (datetime.datetime.now(),)\n    updated_case = self.case_collection.find_one_and_replace({'_id': case_obj['_id']}, case_obj, return_document=pymongo.ReturnDocument.AFTER)\n    return updated_case", "docstring": "Replace a existing case with a new one\n\nKeeps the object id\n\nArgs:\ncase_obj(dict)\n\nReturns:\nupdated_case(dict)", "source": "codesearchnet"}
{"code": "def register_extension(self, group, name, extension):\n        \n\n        if isinstance(extension, str):\n            name, extension = self.load_extension(extension)[0]\n\n        if group not in self._registered_extensions:\n            self._registered_extensions[group] = []\n\n        self._registered_extensions[group].append((name, extension))", "docstring": "Register an extension.\n\nArgs:\ngroup (str): The type of the extension\nname (str): A name for the extension\nextension (str or class): If this is a string, then it will be\ninterpreted as a path to import and load.  Otherwise it\nwill be treated as the extension object itself.", "source": "juraj-google-style"}
{"code": "def serve(self, server=None):\n        \n        if server is None:\n            from wsgiref.simple_server import make_server\n            server = lambda app: make_server('', 8000, app).serve_forever()\n            print('Listening on 0.0.0.0:8000')\n        try:\n            server(self)\n        finally:\n            server.socket.close()", "docstring": "Serve app using wsgiref or provided server.\n\nArgs:\n- server (callable): An callable", "source": "juraj-google-style"}
{"code": "def _log_submission(submission, student_item):\n    \n    logger.info(\n        u\"Created submission uuid={submission_uuid} for \"\n        u\"(course_id={course_id}, item_id={item_id}, \"\n        u\"anonymous_student_id={anonymous_student_id})\"\n        .format(\n            submission_uuid=submission[\"uuid\"],\n            course_id=student_item[\"course_id\"],\n            item_id=student_item[\"item_id\"],\n            anonymous_student_id=student_item[\"student_id\"]\n        )\n    )", "docstring": "Log the creation of a submission.\n\nArgs:\nsubmission (dict): The serialized submission model.\nstudent_item (dict): The serialized student item model.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def db_en020(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `db_en020`'.format(value))\n    self._db_en020 = value", "docstring": "Corresponds to IDD Field `db_en020`\nmean coincident dry-bulb temperature to\nEnthalpy corresponding to 2.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `db_en020`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def total_covariance_fn(self):\n    return self._total_covariance_fn", "docstring": "The total covariance of the process between two times.\n\nReturns:\nA Python callable returning the integrated covariances between two times.\nThe callable accepts two real `Tensor` arguments. The first argument\nis the left end point and the second is the right end point of the time\ninterval for which the total covariance is needed.\n\nThe shape of the two input arguments and their dtypes must match.\nThe output of the callable is a `Tensor` of shape\n`times_shape + [dim, dim]` containing the integrated covariance matrix\nbetween the start times and end times.", "source": "github-repos"}
{"code": "def run(self, dag):\n    num_dag_qubits = sum([qreg.size for qreg in dag.qregs.values()])\n    if (num_dag_qubits > self.coupling_map.size()):\n        raise TranspilerError('Number of qubits greater than device.')\n    self.property_set['layout'] = Layout.generate_trivial_layout(*dag.qregs.values())", "docstring": "Pick a layout by assigning n circuit qubits to device qubits 0, .., n-1.\n\nArgs:\ndag (DAGCircuit): DAG to find layout for.\n\nRaises:\nTranspilerError: if dag wider than self.coupling_map", "source": "codesearchnet"}
{"code": "def __init__(self,\n                 input_file=\"mol.qin\",\n                 output_file=\"mol.qout\",\n                 rca_gdm_thresh=1.0E-3,\n                 scf_max_cycles=200):\n        \n        self.input_file = input_file\n        self.output_file = output_file\n        self.scf_max_cycles = scf_max_cycles\n        self.geom_max_cycles = geom_max_cycles\n        self.qcinp = QCInput.from_file(self.input_file)\n        self.outdata = None\n        self.errors = None\n        self.qchem_job = qchem_job", "docstring": "Initializes the error handler from a set of input and output files.\n\nArgs:\ninput_file (str): Name of the QChem input file.\noutput_file (str): Name of the QChem output file.\nrca_gdm_thresh (float): The threshold for the prior scf algorithm.\nIf last deltaE is larger than the threshold try RCA_DIIS\nfirst, else, try DIIS_GDM first.\nscf_max_cycles (int): The max iterations to set to fix SCF failure.", "source": "juraj-google-style"}
{"code": "def from_file(filename, file_format=\"xyz\"):\n        \n        mols = list(pb.readfile(str(file_format), str(filename)))\n        return BabelMolAdaptor(mols[0].OBMol)", "docstring": "Uses OpenBabel to read a molecule from a file in all supported formats.\n\nArgs:\nfilename: Filename of input file\nfile_format: String specifying any OpenBabel supported formats.\n\nReturns:\nBabelMolAdaptor object", "source": "juraj-google-style"}
{"code": "def output(self, value):\n        \n        return super(Source, self).output(self.stream, value)", "docstring": "SPL output port assignment expression.\n\nArguments:\nvalue(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`.\n\nReturns:\nExpression: Output assignment expression that is valid as a the context of this operator.", "source": "juraj-google-style"}
{"code": "def AddArguments(cls, argument_group):\n    \n    argument_group.add_argument(\n        '--name', '--timeline_name', '--timeline-name',\n        dest='timeline_name', type=str, action='store',\n        default=cls._DEFAULT_NAME, required=False, help=(\n            'The name of the timeline in Timesketch. Default: '\n            'hostname if present in the storage file. If no hostname '\n            'is found then manual input is used.'))\n\n    argument_group.add_argument(\n        '--index', dest='index', type=str, action='store',\n        default=cls._DEFAULT_UUID, required=False, help=(\n            'The name of the Elasticsearch index. Default: Generate a random '\n            'UUID'))\n\n    argument_group.add_argument(\n        '--flush_interval', '--flush-interval', dest='flush_interval',\n        type=int, action='store', default=cls._DEFAULT_FLUSH_INTERVAL,\n        required=False, help=(\n            'The number of events to queue up before sent in bulk '\n            'to Elasticsearch.'))\n\n    argument_group.add_argument(\n        '--doc_type', dest='document_type', type=str,\n        action='store', default=cls._DEFAULT_DOCUMENT_TYPE, help=(\n            'Name of the document type that will be used in ElasticSearch.'))\n\n    argument_group.add_argument(\n        '--username', dest='username', type=str,\n        action='store', default=cls._DEFAULT_USERNAME, help=(\n            'Username of a Timesketch user that will own the timeline.'))", "docstring": "Adds command line arguments the helper supports to an argument group.\n\nThis function takes an argument parser or an argument group object and adds\nto it all the command line arguments this helper supports.\n\nArgs:\nargument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\nargparse group.", "source": "juraj-google-style"}
{"code": "def get(self, vrf=None):\n    match = '^router ospf .*'\n    if vrf:\n        match += (' vrf %s' % vrf)\n    config = self.get_block(match)\n    if (not config):\n        return None\n    response = dict()\n    response.update(self._parse_router_id(config))\n    response.update(self._parse_vrf(config))\n    response.update(self._parse_networks(config))\n    response.update(self._parse_ospf_process_id(config))\n    response.update(self._parse_redistribution(config))\n    response.update(self._parse_shutdown(config))\n    return response", "docstring": "Returns the OSPF routing configuration\n\nArgs:\nvrf (str): VRF name to return OSPF routing config for\nReturns:\ndict:\nkeys: router_id (int): OSPF router-id\nvrf (str): VRF of the OSPF process\nnetworks (dict): All networks that\nare advertised in OSPF\nospf_process_id (int): OSPF proc id\nredistribution (dict): All protocols that\nare configured to be\nredistributed in OSPF\nshutdown (bool): Gives the current shutdown\noff the process", "source": "codesearchnet"}
{"code": "def MapFile(self, key_path_prefix, registry_file):\n    self._registry_files[key_path_prefix.upper()] = registry_file\n    registry_file.SetKeyPathPrefix(key_path_prefix)", "docstring": "Maps the Windows Registry file to a specific key path prefix.\n\nArgs:\nkey_path_prefix (str): key path prefix.\nregistry_file (WinRegistryFile): Windows Registry file.", "source": "codesearchnet"}
{"code": "def _shape_union(shapes):\n    return Shape(sorted(list(set(sum([s.dims for s in shapes], [])))))", "docstring": "A shape containing the union of all dimensions in the input shapes.\n\nArgs:\nshapes: a list of Shapes\n\nReturns:\na Shape", "source": "codesearchnet"}
{"code": "def get_problem_name(base_name, was_reversed=False, was_copy=False):\n  \n  if any(base_name.endswith(suffix) for suffix in (\"_rev\", \"_copy\")):\n    raise ValueError(\"`base_name` cannot end in '_rev' or '_copy'\")\n  name = base_name\n  if was_copy:\n    name = \"%s_copy\" % name\n  if was_reversed:\n    name = \"%s_rev\" % name\n  return name", "docstring": "Construct a problem name from base and reversed/copy options.\n\nInverse of `parse_problem_name`.\n\nArgs:\nbase_name: base problem name. Should not end in \"_rev\" or \"_copy\"\nwas_reversed: if the problem is to be reversed\nwas_copy: if the problem is to be copied\n\nReturns:\nstring name consistent with use with `parse_problem_name`.\n\nRaises:\nValueError if `base_name` ends with \"_rev\" or \"_copy\"", "source": "juraj-google-style"}
{"code": "def raw_dict_factory(cursor, row):\n        \n        d = {}\n        for idx, col in enumerate(cursor.description):\n            val = row[idx]\n            name = col[0]\n            if name == Field.Time_Stamp or name == Field.Meter_Address:\n                d[name] = str(val)\n                continue\n            if name == \"Raw_A\" or name == \"Raw_B\":\n                d[name] = str(val)\n                continue\n        return d", "docstring": "Sqlite callback accepting the cursor and the original row as a tuple.\n\nSimple return of JSON safe types, including raw read hex strings.\n\nArgs:\ncursor (sqlite cursor):  Original cursory\nrow (sqlite row tuple): Original row.\n\nReturns:\ndict: modified row.", "source": "juraj-google-style"}
{"code": "def fetch(args: List[str], env: Dict[str, str] = None,\n          encoding: str = sys.getdefaultencoding()) -> str:\n    \n    stdout, _ = run(args, env=env, capture_stdout=True,\n                    echo_stdout=False, encoding=encoding)\n    log.debug(stdout)\n    return stdout", "docstring": "Run a command and returns its stdout.\n\nArgs:\nargs: the command-line arguments\nenv: the operating system environment to use\nencoding: the encoding to use for ``stdout``\n\nReturns:\nthe command's ``stdout`` output", "source": "juraj-google-style"}
{"code": "def SetUpdateTimestamp(self, value):\n    if value is None or isinstance(value, int):\n        self._last_update_timestamp = value\n    else:\n        raise TypeError('timestamp can only be int or None, not %r', value)", "docstring": "Set the last update timestamp of this map.\n\nArgs:\nvalue:  An int containing seconds since epoch, or None.\n\nRaises:\nTypeError: The argument is not an int or None.", "source": "github-repos"}
{"code": "def repl_member_add(self, params):\n        \n        repl_config = self.config\n        member_id = max([member['_id'] for member in repl_config['members']]) + 1\n        member_config = self.member_create(params, member_id)\n        repl_config['members'].append(member_config)\n        if not self.repl_update(repl_config):\n            self.member_del(member_id, reconfig=True)\n            raise ReplicaSetError(\"Could not add member to ReplicaSet.\")\n        return member_id", "docstring": "create new mongod instances and add it to the replica set.\nArgs:\nparams - mongod params\nreturn True if operation success otherwise False", "source": "juraj-google-style"}
{"code": "async def pull(self, from_image: str, *, auth: Optional[Union[(MutableMapping, str, bytes)]]=None, tag: str=None, repo: str=None, stream: bool=False) -> Mapping:\n    image = from_image\n    params = {'fromImage': image}\n    headers = {}\n    if repo:\n        params['repo'] = repo\n    if tag:\n        params['tag'] = tag\n    if (auth is not None):\n        (registry, has_registry_host, _) = image.partition('/')\n        if (not has_registry_host):\n            raise ValueError('Image should have registry host when auth information is provided')\n        headers['X-Registry-Auth'] = compose_auth_header(auth, registry)\n    response = (await self.docker._query('images/create', 'POST', params=params, headers=headers))\n    return (await json_stream_result(response, stream=stream))", "docstring": "Similar to `docker pull`, pull an image locally\n\nArgs:\nfromImage: name of the image to pull\nrepo: repository name given to an image when it is imported\ntag: if empty when pulling an image all tags\nfor the given image to be pulled\nauth: special {'auth': base64} pull private repo", "source": "codesearchnet"}
{"code": "def from_filenames(filenames, transformations=None, primitive=True, extend_collection=False):\n    allcifs = []\n    for fname in filenames:\n        with open(fname, 'r') as f:\n            allcifs.append(f.read())\n    return CifTransmuter('\\n'.join(allcifs), transformations, primitive=primitive, extend_collection=extend_collection)", "docstring": "Generates a TransformedStructureCollection from a cif, possibly\ncontaining multiple structures.\n\nArgs:\nfilenames: List of strings of the cif files\ntransformations: New transformations to be applied to all\nstructures\nprimitive: Same meaning as in __init__.\nextend_collection: Same meaning as in __init__.", "source": "codesearchnet"}
{"code": "async def run_commentator(video_mode: str) -> None:\n    pya = pyaudio.PyAudio()\n    video_mode_enum = video.VideoMode(video_mode)\n    input_processor = video.VideoIn(video_mode=video_mode_enum) + audio_io.PyAudioIn(pya)\n\n    async def input_stream():\n        \n        try:\n            while True:\n                await asyncio.sleep(1)\n        finally:\n            yield content_api.ProcessorPart('Ending the stream')\n    commentator_processor = commentator.create_live_commentator(API_KEY)\n    consume_output = audio_io.PyAudioOut(pya)\n    live_commentary_agent = input_processor + commentator_processor + consume_output\n    async for _ in live_commentary_agent(input_stream()):\n        pass", "docstring": "Runs a live commentator in a CLI environment.\n\nThe commentator is run from a CLI environment. The audio and video input and\noutput are connected to the local machine's default input and output devices.\n\n\nArgs:\nvideo_mode: The video mode to use for the video. Can be CAMERA or SCREEN.", "source": "github-repos"}
{"code": "def _CheckIsFile(self, file_entry):\n    if (definitions.FILE_ENTRY_TYPE_FILE not in self._file_entry_types):\n        return False\n    return file_entry.IsFile()", "docstring": "Checks the is_file find specification.\n\nArgs:\nfile_entry (FileEntry): file entry.\n\nReturns:\nbool: True if the file entry matches the find specification, False if not.", "source": "codesearchnet"}
{"code": "def attribute_string(self):\n    escape_map = {ord('='): '%3D', ord(','): '%2C', ord(';'): '%3B', ord('&'): '%26', ord('\\t'): '%09'}\n    list_type = type(list())\n    attrs = self.attributes\n    if (type(attrs) is OrderedDict):\n        reserved_attrs = []\n        other_attrs = []\n        for (name, value) in attrs.items():\n            name = name.translate(escape_map)\n            if (type(value) == list_type):\n                value = ','.join([i.translate(escape_map) for i in value])\n            else:\n                value = value.translate(escape_map)\n            out_attr = '{0}={1}'.format(name, value)\n            if name[0].isupper():\n                reserved_attrs.append(out_attr)\n            else:\n                other_attrs.append(out_attr)\n        out_attrs = ';'.join((reserved_attrs + other_attrs))\n    else:\n        out_attrs = attrs\n    return out_attrs", "docstring": "Restore an entries attributes in original format, escaping reserved\ncharacters when necessary\n\nReturns:\nstr: escaped attributes as tag=value pairs, separated by semi-colon", "source": "codesearchnet"}
{"code": "def _deserialization_helper(self, state, ray_forking):\n        \n        worker = ray.worker.get_global_worker()\n        worker.check_connected()\n\n        if state[\"ray_forking\"]:\n            actor_handle_id = state[\"actor_handle_id\"]\n        else:\n            \n            \n            \n            \n            \n            \n            \n            \n            \n            \n            \n            actor_handle_id = compute_actor_handle_id_non_forked(\n                state[\"actor_handle_id\"], worker.current_task_id)\n\n        self.__init__(\n            state[\"actor_id\"],\n            state[\"module_name\"],\n            state[\"class_name\"],\n            state[\"actor_cursor\"],\n            state[\"actor_method_names\"],\n            state[\"method_signatures\"],\n            state[\"method_num_return_vals\"],\n            state[\"actor_creation_dummy_object_id\"],\n            state[\"actor_method_cpus\"],\n            \n            \n            state[\"actor_driver_id\"],\n            actor_handle_id=actor_handle_id)", "docstring": "This is defined in order to make pickling work.\n\nArgs:\nstate: The serialized state of the actor handle.\nray_forking: True if this is being called because Ray is forking\nthe actor handle and false if it is being called by pickling.", "source": "juraj-google-style"}
{"code": "def samefile(path1, path2):\n    \n    \n    path1, path1_is_storage = format_and_is_storage(path1)\n    path2, path2_is_storage = format_and_is_storage(path2)\n\n    \n    if not path1_is_storage and not path2_is_storage:\n        return os_path_samefile(path1, path2)\n\n    \n    if not path1_is_storage or not path2_is_storage:\n        return False\n\n    with handle_os_exceptions():\n        \n        system = get_instance(path1)\n        if system is not get_instance(path2):\n            return False\n\n        \n        elif system.relpath(path1) != system.relpath(path2):\n            return False\n\n    \n    return True", "docstring": "Return True if both pathname arguments refer to the same file or directory.\n\nEquivalent to \"os.path.samefile\".\n\nArgs:\npath1 (path-like object): Path or URL.\npath2 (path-like object): Path or URL.\n\nReturns:\nbool: True if same file or directory.", "source": "juraj-google-style"}
{"code": "def main(jlink_serial, device):\n    buf = StringIO.StringIO()\n    jlink = pylink.JLink(log=buf.write, detailed_log=buf.write)\n    jlink.open(serial_no=jlink_serial)\n    jlink.set_tif(pylink.enums.JLinkInterfaces.SWD)\n    jlink.connect(device, verbose=True)\n    sys.stdout.write(('ARM Id: %d\\n' % jlink.core_id()))\n    sys.stdout.write(('CPU Id: %d\\n' % jlink.core_cpu()))\n    sys.stdout.write(('Core Name: %s\\n' % jlink.core_name()))\n    sys.stdout.write(('Device Family: %d\\n' % jlink.device_family()))", "docstring": "Prints the core's information.\n\nArgs:\njlink_serial (str): the J-Link serial number\ndevice (str): the target CPU\n\nReturns:\nAlways returns ``0``.\n\nRaises:\nJLinkException: on error", "source": "codesearchnet"}
{"code": "def from_graph(cls, graph, linear_energy_ranges, quadratic_energy_ranges):\n    get_env().enable_infix_notation = True\n    theta = cls.empty(dimod.SPIN)\n    theta.add_offset(Symbol('offset', REAL))\n\n    def Linear(v):\n        'Create a Symbol for the linear bias including the energy range\\n            constraints.'\n        bias = Symbol('h_{}'.format(v), REAL)\n        (min_, max_) = linear_energy_ranges[v]\n        theta.assertions.add(LE(bias, limitReal(max_)))\n        theta.assertions.add(GE(bias, limitReal(min_)))\n        return bias\n\n    def Quadratic(u, v):\n        'Create a Symbol for the quadratic bias including the energy range\\n            constraints.'\n        bias = Symbol('J_{},{}'.format(u, v), REAL)\n        if ((v, u) in quadratic_energy_ranges):\n            (min_, max_) = quadratic_energy_ranges[(v, u)]\n        else:\n            (min_, max_) = quadratic_energy_ranges[(u, v)]\n        theta.assertions.add(LE(bias, limitReal(max_)))\n        theta.assertions.add(GE(bias, limitReal(min_)))\n        return bias\n    for v in graph.nodes:\n        theta.add_variable(v, Linear(v))\n    for (u, v) in graph.edges:\n        theta.add_interaction(u, v, Quadratic(u, v))\n    return theta", "docstring": "Create Theta from a graph and energy ranges.\n\nArgs:\ngraph (:obj:`networkx.Graph`):\nProvides the structure for Theta.\n\nlinear_energy_ranges (dict):\nA dict of the form {v: (min, max), ...} where min and max are the\nrange of values allowed to v.\nquadratic_energy_ranges (dict):\nA dict of the form {(u, v): (min, max), ...} where min and max\nare the range of values allowed to (u, v).\n\nReturns:\n:obj:`.Theta`", "source": "codesearchnet"}
{"code": "def from_dict(cls, pwinput_dict):\n        \n        pwinput = cls(structure=Structure.from_dict(pwinput_dict['structure']),\n                          pseudo=pwinput_dict['pseudo'],\n                          control=pwinput_dict['sections']['control'],\n                          system=pwinput_dict['sections']['system'],\n                          electrons=pwinput_dict['sections']['electrons'],\n                          ions=pwinput_dict['sections']['ions'],\n                          cell=pwinput_dict['sections']['cell'],\n                          kpoints_mode=pwinput_dict['kpoints_mode'],\n                          kpoints_grid=pwinput_dict['kpoints_grid'],\n                          kpoints_shift=pwinput_dict['kpoints_shift'])\n        return pwinput", "docstring": "Load a PWInput object from a dictionary.\n\nArgs:\npwinput_dict (dict): dictionary with PWInput data\n\nReturns:\nPWInput object", "source": "juraj-google-style"}
{"code": "def _check_positional_parameters(method_signature, base_signature, is_subtype, ctx):\n    check_types = True\n    for base_param_pos, base_param_name in enumerate(base_signature.param_names):\n        if base_param_pos == 0 or base_param_pos < base_signature.posonly_count:\n            continue\n        if base_param_name == '_':\n            continue\n        if base_param_pos < method_signature.posonly_count:\n            return SignatureError(SignatureErrorType.POSITIONAL_PARAMETER_COUNT_MISMATCH, 'Too many positional-only parameters in overriding method.')\n        elif base_param_pos < len(method_signature.param_names):\n            method_param_name = method_signature.param_names[base_param_pos]\n        else:\n            if method_signature.varargs_name:\n                break\n            return SignatureError(SignatureErrorType.POSITIONAL_PARAMETER_COUNT_MISMATCH, 'Not enough positional parameters in overriding method.')\n        method_param_name = method_signature.param_names[base_param_pos]\n        if method_param_name not in (base_param_name, '_'):\n            log.warning('Name mismatch for parameter %r.', base_param_name)\n            if not ctx.options.overriding_renamed_parameter_count_checks:\n                return None\n            check_types = False\n    remaining_method_params = method_signature.param_names[len(base_signature.param_names):] if not base_signature.varargs_name else []\n    for method_param_name in remaining_method_params:\n        if method_param_name in base_signature.kwonly_params:\n            continue\n        if method_param_name not in method_signature.defaults:\n            return SignatureError(SignatureErrorType.DEFAULT_PARAMETER_MISMATCH, f\"Parameter '{method_param_name}' must have a default value.\")\n    if not check_types:\n        return None\n    return _check_positional_parameter_annotations(method_signature, base_signature, is_subtype)", "docstring": "Checks that the positional parameters of the overriding method match.\n\nArgs:\nmethod_signature: signature of the overriding method.\nbase_signature: signature of the overridden method.\nis_subtype: a binary function to compare types.\nctx: Context\n\nReturns:\nSignatureError if a mismatch is detected. Otherwise returns None.", "source": "github-repos"}
{"code": "def new_space(self, name=None, bases=None, formula=None, refs=None):\n    space = self._impl.model.currentspace = self._impl.new_space(name=name, bases=get_impls(bases), formula=formula, refs=refs)\n    return space.interface", "docstring": "Create a child space.\n\nArgs:\nname (str, optional): Name of the space. Defaults to ``SpaceN``,\nwhere ``N`` is a number determined automatically.\nbases (optional): A space or a sequence of spaces to be the base\nspace(s) of the created space.\nformula (optional): Function to specify the parameters of\ndynamic child spaces. The signature of this function is used\nfor setting parameters for dynamic child spaces.\nThis function should return a mapping of keyword arguments\nto be passed to this method when the dynamic child spaces\nare created.\n\nReturns:\nThe new child space.", "source": "codesearchnet"}
{"code": "def stream_matching(self, address, name):\n        \n\n        matching = [x for x in self.entries if x.valid and x.target.matches(address, name)]\n\n        rpc_list = []\n        for var in matching:\n            rpc_list.extend(var.generate_rpcs(address))\n\n        return rpc_list", "docstring": "Return the RPCs needed to stream matching config variables to the given tile.\n\nThis function will return a list of tuples suitable for passing to\nEmulatedDevice.deferred_rpc.\n\nArgs:\naddress (int): The address of the tile that we wish to stream to\nname (str or bytes): The 6 character name of the target tile.\n\nReturns:\nlist of tuple: The list of RPCs to send to stream these variables to a tile.", "source": "juraj-google-style"}
{"code": "def DeleteFeaturesFromFeatureLayer(self, url, sql, chunksize=0):\n    fl = None\n    try:\n        fl = FeatureLayer(url=url, securityHandler=self._securityHandler)\n        totalDeleted = 0\n        if (chunksize > 0):\n            qRes = fl.query(where=sql, returnIDsOnly=True)\n            if ('error' in qRes):\n                print(qRes)\n                return qRes\n            elif ('objectIds' in qRes):\n                oids = qRes['objectIds']\n                total = len(oids)\n                if (total == 0):\n                    return {'success': True, 'message': 'No features matched the query'}\n                i = 0\n                print(('%s features to be deleted' % total))\n                while (i <= len(oids)):\n                    oidsDelete = ','.join((str(e) for e in oids[i:(i + chunksize)]))\n                    if (oidsDelete == ''):\n                        continue\n                    else:\n                        results = fl.deleteFeatures(objectIds=oidsDelete)\n                    if ('deleteResults' in results):\n                        totalDeleted += len(results['deleteResults'])\n                        print(('%s%% Completed: %s/%s ' % (int(((totalDeleted / float(total)) * 100)), totalDeleted, total)))\n                        i += chunksize\n                    else:\n                        print(results)\n                        return {'success': True, 'message': ('%s deleted' % totalDeleted)}\n                qRes = fl.query(where=sql, returnIDsOnly=True)\n                if ('objectIds' in qRes):\n                    oids = qRes['objectIds']\n                    if (len(oids) > 0):\n                        print(('%s features to be deleted' % len(oids)))\n                        results = fl.deleteFeatures(where=sql)\n                        if ('deleteResults' in results):\n                            totalDeleted += len(results['deleteResults'])\n                            return {'success': True, 'message': ('%s deleted' % totalDeleted)}\n                        else:\n                            return results\n                return {'success': True, 'message': ('%s deleted' % totalDeleted)}\n            else:\n                print(qRes)\n        else:\n            results = fl.deleteFeatures(where=sql)\n            if (results is not None):\n                if ('deleteResults' in results):\n                    return {'success': True, 'message': (totalDeleted + len(results['deleteResults']))}\n                else:\n                    return results\n    except:\n        (line, filename, synerror) = trace()\n        raise common.ArcRestHelperError({'function': 'DeleteFeaturesFromFeatureLayer', 'line': line, 'filename': filename, 'synerror': synerror})\n    finally:\n        fl = None\n        del fl\n        gc.collect()", "docstring": "Removes features from a hosted feature service layer by SQL query.\n\nArgs:\nurl (str): The URL of the feature service layer.\nsql (str): The SQL query to apply against the feature service.\nThose features that satisfy the query will be deleted.\nchunksize (int): The maximum amount of features to remove at a time. Defaults to 0.\nReturns:\nThe result from :py:func:`arcrest.agol.services.FeatureLayer.deleteFeatures`.\nNotes:\nIf you want to delete all features, it is suggested to use the SQL query ``\"1=1\"``.", "source": "codesearchnet"}
{"code": "def convert_upsample_bilinear(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting upsample...')\n    if (names == 'short'):\n        tf_name = ('UPSL' + random_string(4))\n    elif (names == 'keep'):\n        tf_name = w_name\n    else:\n        tf_name = (w_name + str(random.random()))\n    output_size = params['output_size']\n    align_corners = (params['align_corners'] > 0)\n\n    def target_layer(x, size=output_size, align_corners=align_corners):\n        import tensorflow as tf\n        x = tf.transpose(x, [0, 2, 3, 1])\n        x = tf.image.resize_images(x, size, align_corners=align_corners)\n        x = tf.transpose(x, [0, 3, 1, 2])\n        return x\n    lambda_layer = keras.layers.Lambda(target_layer)\n    layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert upsample_bilinear2d layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def xmlstring(self, pretty_print=False):\n    s = ElementTree.tostring(self.xml(), xml_declaration=False, pretty_print=pretty_print, encoding='utf-8')\n    if (sys.version < '3'):\n        if isinstance(s, str):\n            s = unicode(s, 'utf-8')\n    elif isinstance(s, bytes):\n        s = str(s, 'utf-8')\n    s = s.replace('ns0:', '')\n    s = s.replace(':ns0', '')\n    return s", "docstring": "Serialises this FoLiA element and all its contents to XML.\n\nReturns:\nstr: a string with XML representation for this element and all its children", "source": "codesearchnet"}
{"code": "def from_json(cls, json, image_config=None):\n        \n        cls.image_config = image_config\n        return cls(**{\n            attr: json.get(attr if key is None else key)\n            for attr, key in cls.JSON_MAPPING.items()\n        })", "docstring": "Create a model instance\n\nArguments:\njson (:py:class:`dict`): The parsed JSON data.\nimage_config (:py:class:`dict`): The API image configuration\ndata.\n\nReturns:\n:py:class:`BaseModel`: The model instance.", "source": "juraj-google-style"}
{"code": "def _update_context_field_binary_composition(present_locations, expression):\n    if (not any((isinstance(expression.left, ContextField), isinstance(expression.right, ContextField)))):\n        raise AssertionError(u'Received a BinaryComposition {} without any ContextField operands. This should never happen.'.format(expression))\n    if isinstance(expression.left, ContextField):\n        context_field = expression.left\n        (location_name, _) = context_field.location.get_location_name()\n        if (location_name not in present_locations):\n            return TrueLiteral\n    if isinstance(expression.right, ContextField):\n        context_field = expression.right\n        (location_name, _) = context_field.location.get_location_name()\n        if (location_name not in present_locations):\n            return TrueLiteral\n    return expression", "docstring": "Lower BinaryCompositions involving non-existent ContextFields to True.\n\nArgs:\npresent_locations: set of all locations in the current MatchQuery that have not been pruned\nexpression: BinaryComposition with at least one ContextField operand\n\nReturns:\nTrueLiteral iff either ContextField operand is not in `present_locations`,\nand the original expression otherwise", "source": "codesearchnet"}
{"code": "def wind_speed(self, value=999.0):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `wind_speed`'.format(value))\n        if (value < 0.0):\n            raise ValueError('value need to be greater or equal 0.0 for field `wind_speed`')\n        if (value > 40.0):\n            raise ValueError('value need to be smaller 40.0 for field `wind_speed`')\n    self._wind_speed = value", "docstring": "Corresponds to IDD Field `wind_speed`\n\nArgs:\nvalue (float): value for IDD Field `wind_speed`\nUnit: m/s\nvalue >= 0.0\nvalue <= 40.0\nMissing value: 999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def __new__(cls, name, bases, attrs):\n        \n        new_class = super(PipelineFormMediaMetaClass, cls).__new__(\n            cls, name, bases, attrs)\n\n        \n        \n        \n        \n        \n        \n        \n        if 'css_packages' in attrs:\n            new_class.css = PipelineFormMediaProperty(\n                cls._get_css_files, new_class, attrs.get('css') or {})\n\n        if 'js_packages' in attrs:\n            new_class.js = PipelineFormMediaProperty(\n                cls._get_js_files, new_class, attrs.get('js') or [])\n\n        return new_class", "docstring": "Construct the class.\n\nArgs:\nname (bytes):\nThe name of the class.\n\nbases (tuple):\nThe base classes for the class.\n\nattrs (dict):\nThe attributes going into the class.\n\nReturns:\ntype:\nThe new class.", "source": "juraj-google-style"}
{"code": "def google_api_build_errors(config, auth, api_call, errors):\n    if 'bigquery' in errors:\n        errors['bigquery']['schema'] = ERROR_SCHEMA\n        errors['bigquery']['format'] = 'JSON'\n        errors['bigquery']['skip_rows'] = 0\n        errors['bigquery']['disposition'] = 'WRITE_TRUNCATE'\n        table_create(config, errors['bigquery'].get('auth', auth), config.project, errors['bigquery']['dataset'], errors['bigquery']['table'], errors['bigquery']['schema'], overwrite=False)\n    return errors", "docstring": "Builds the BigQuery table to house the Google API call errors.\n\nOptional piece of the recipe, will create a BigQuery table for errors.\nTakes errors, which defines a bigquery endpoint, and adds fields.\n\nArgs:\nauth (string): either \"user\" or \"service\" to make the BigQuery call.\napi_call (dict): the JSON for the API call as defined in recipe.\nerrors (dict): defines where the data will be written\n\nReturns (dict):\nA modified results JSON with additional API values added.\n\nRaises:\nValueError: If a required key in the recipe is missing.", "source": "github-repos"}
{"code": "def logical_and(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return LogicalAnd().symbolic_call(x1, x2)\n    return backend.numpy.logical_and(x1, x2)", "docstring": "Computes the element-wise logical AND of the given input tensors.\n\nZeros are treated as `False` and non-zeros are treated as `True`.\n\nArgs:\nx1: Input tensor.\nx2: Input tensor.\n\nReturns:\nOutput tensor, element-wise logical AND of the inputs.", "source": "github-repos"}
{"code": "def _update_state_from_shard_states(self, state, shard_states, control):\n    \n    \n    state.active_shards, state.aborted_shards, state.failed_shards = 0, 0, 0\n    total_shards = 0\n    processed_counts = []\n    processed_status = []\n    state.counters_map.clear()\n\n    \n    for s in shard_states:\n      total_shards += 1\n      status = 'unknown'\n      if s.active:\n        state.active_shards += 1\n        status = 'running'\n      if s.result_status == model.ShardState.RESULT_SUCCESS:\n        status = 'success'\n      elif s.result_status == model.ShardState.RESULT_ABORTED:\n        state.aborted_shards += 1\n        status = 'aborted'\n      elif s.result_status == model.ShardState.RESULT_FAILED:\n        state.failed_shards += 1\n        status = 'failed'\n\n      \n      state.counters_map.add_map(s.counters_map)\n      processed_counts.append(s.counters_map.get(context.COUNTER_MAPPER_CALLS))\n      processed_status.append(status)\n\n    state.set_processed_counts(processed_counts, processed_status)\n    state.last_poll_time = datetime.datetime.utcfromtimestamp(self._time())\n\n    spec = state.mapreduce_spec\n\n    if total_shards != spec.mapper.shard_count:\n      logging.error(\"Found %d shard states. Expect %d. \"\n                    \"Issuing abort command to job '%s'\",\n                    total_shards, spec.mapper.shard_count,\n                    spec.mapreduce_id)\n      \n      model.MapreduceControl.abort(spec.mapreduce_id)\n\n    \n    \n    state.active = bool(state.active_shards)\n    if not control and (state.failed_shards or state.aborted_shards):\n      \n      model.MapreduceControl.abort(spec.mapreduce_id)\n\n    if not state.active:\n      \n      if state.failed_shards or not total_shards:\n        state.result_status = model.MapreduceState.RESULT_FAILED\n      \n      \n      elif state.aborted_shards:\n        state.result_status = model.MapreduceState.RESULT_ABORTED\n      else:\n        state.result_status = model.MapreduceState.RESULT_SUCCESS\n      self._finalize_outputs(spec, state)\n      self._finalize_job(spec, state)\n    else:\n      @db.transactional(retries=5)\n      def _put_state():\n        \n        fresh_state = model.MapreduceState.get_by_job_id(spec.mapreduce_id)\n        \n        \n        if not fresh_state.active:\n          logging.warning(\n              \"Job %s is not active. Looks like spurious task execution. \"\n              \"Dropping controller task.\", spec.mapreduce_id)\n          return\n        config = util.create_datastore_write_config(spec)\n        state.put(config=config)\n\n      _put_state()", "docstring": "Update mr state by examing shard states.\n\nArgs:\nstate: current mapreduce state as MapreduceState.\nshard_states: an iterator over shard states.\ncontrol: model.MapreduceControl entity.", "source": "juraj-google-style"}
{"code": "def check_and_update_resources(num_cpus, num_gpus, resources):\n    \n    if resources is None:\n        resources = {}\n    resources = resources.copy()\n    assert \"CPU\" not in resources\n    assert \"GPU\" not in resources\n    if num_cpus is not None:\n        resources[\"CPU\"] = num_cpus\n    if num_gpus is not None:\n        resources[\"GPU\"] = num_gpus\n\n    if \"CPU\" not in resources:\n        \n        \n        resources[\"CPU\"] = multiprocessing.cpu_count()\n\n    \n    gpu_ids = ray.utils.get_cuda_visible_devices()\n\n    \n    \n    if (\"GPU\" in resources and gpu_ids is not None\n            and resources[\"GPU\"] > len(gpu_ids)):\n        raise Exception(\"Attempting to start raylet with {} GPUs, \"\n                        \"but CUDA_VISIBLE_DEVICES contains {}.\".format(\n                            resources[\"GPU\"], gpu_ids))\n\n    if \"GPU\" not in resources:\n        \n        resources[\"GPU\"] = _autodetect_num_gpus()\n        \n        if gpu_ids is not None:\n            resources[\"GPU\"] = min(resources[\"GPU\"], len(gpu_ids))\n\n    resources = {\n        resource_label: resource_quantity\n        for resource_label, resource_quantity in resources.items()\n        if resource_quantity != 0\n    }\n\n    \n    for _, resource_quantity in resources.items():\n        assert (isinstance(resource_quantity, int)\n                or isinstance(resource_quantity, float))\n        if (isinstance(resource_quantity, float)\n                and not resource_quantity.is_integer()):\n            raise ValueError(\n                \"Resource quantities must all be whole numbers. Received {}.\".\n                format(resources))\n        if resource_quantity < 0:\n            raise ValueError(\n                \"Resource quantities must be nonnegative. Received {}.\".format(\n                    resources))\n        if resource_quantity > ray_constants.MAX_RESOURCE_QUANTITY:\n            raise ValueError(\"Resource quantities must be at most {}.\".format(\n                ray_constants.MAX_RESOURCE_QUANTITY))\n\n    return resources", "docstring": "Sanity check a resource dictionary and add sensible defaults.\n\nArgs:\nnum_cpus: The number of CPUs.\nnum_gpus: The number of GPUs.\nresources: A dictionary mapping resource names to resource quantities.\n\nReturns:\nA new resource dictionary.", "source": "juraj-google-style"}
{"code": "def _UpdateSudoer(self, user, sudoer=False):\n    if sudoer:\n        self.logger.info('Adding user %s to the Google sudoers group.', user)\n        command = self.gpasswd_add_cmd.format(user=user, group=self.google_sudoers_group)\n    else:\n        self.logger.info('Removing user %s from the Google sudoers group.', user)\n        command = self.gpasswd_remove_cmd.format(user=user, group=self.google_sudoers_group)\n    try:\n        subprocess.check_call(command.split(' '))\n    except subprocess.CalledProcessError as e:\n        self.logger.warning('Could not update user %s. %s.', user, str(e))\n        return False\n    else:\n        self.logger.debug('Removed user %s from the Google sudoers group.', user)\n        return True", "docstring": "Update sudoer group membership for a Linux user account.\n\nArgs:\nuser: string, the name of the Linux user account.\nsudoer: bool, True if the user should be a sudoer.\n\nReturns:\nbool, True if user update succeeded.", "source": "codesearchnet"}
{"code": "def stage_tc_batch(self, owner, staging_data):\n        \n        batch = self.tcex.batch(owner)\n        for group in staging_data.get('group') or []:\n            \n            variable = group.pop('variable', None)\n            path = group.pop('path', None)\n            data = self.path_data(group, path)\n            \n            if group.get('xid') is None:\n                \n                group['xid'] = self.stage_tc_batch_xid(group.get('type'), group.get('name'), owner)\n            \n            group['ownerName'] = owner\n            \n            batch.add_group(group)\n            \n            if variable is not None and data is not None:\n                self.stage_redis(variable, self.stage_tc_group_entity(data))\n        for indicator in staging_data.get('indicator') or []:\n            \n            variable = indicator.pop('variable', None)\n            path = indicator.pop('path', None)\n            if indicator.get('xid') is None:\n                indicator['xid'] = self.stage_tc_batch_xid(\n                    indicator.get('type'), indicator.get('summary'), owner\n                )\n            indicator['ownerName'] = owner\n            \n            batch.add_indicator(indicator)\n            data = self.path_data(dict(indicator), path)\n            if variable is not None and data is not None:\n                \n                \n                \n                self.stage_redis(variable, self.stage_tc_indicator_entity(data))\n        \n        batch_results = batch.submit()\n        self.log.debug('[stage] Batch Results: {}'.format(batch_results))\n        for error in batch_results.get('errors') or []:\n            self.log.error('[stage] {}'.format(error))", "docstring": "Stage data in ThreatConnect Platform using batch API.\n\nArgs:\nowner (str): The ThreatConnect owner to submit batch job.\nstaging_data (dict): A dict of ThreatConnect batch data.", "source": "juraj-google-style"}
{"code": "def __new__(mcls, name, parents, attributes):\n    \n    return type.__new__(mcls, name, parents, attributes)", "docstring": "Creates a new Type object (an instance of TypeMetaclass).\nArgs:\nname (str): the name of the new type.\nparents (list(str)): a list of superclasses.\nattributes: (???): a map from name to value for \"parameters\" for defining\nthe new type.", "source": "juraj-google-style"}
{"code": "def get_video_transcript_url(video_id, language_code):\n    \n    video_transcript = VideoTranscript.get_or_none(video_id, language_code)\n    if video_transcript:\n        return video_transcript.url()", "docstring": "Returns course video transcript url or None if no transcript\n\nArguments:\nvideo_id: it can be an edx_video_id or an external_id extracted from external sources in a video component.\nlanguage_code: language code of a video transcript", "source": "juraj-google-style"}
{"code": "def _create_node(self, index: int, name: str, external_id: Optional[str] = None) -> IGraphNode:\n        \n        return IGraphNode(graph=self._graph, index=index, name=name, external_id=external_id)", "docstring": "Returns a new `IGraphNode` instance with the given index and name.\n\nArguments:\nindex (int): The index of the node to create.\nname (str): The name of the node to create.\nexternal_id (Optional[str]): The external ID of the node.", "source": "juraj-google-style"}
{"code": "def sym_jsonify(self, compact: bool=True, type_info: bool=True, **kwargs) -> Any:\n    if not compact:\n        json_value = super().sym_jsonify(**kwargs)\n        assert isinstance(json_value, dict), json_value\n        if self._cloneable_metadata_keys:\n            json_value['_cloneable_metadata_keys'] = list(self._cloneable_metadata_keys)\n        return json_value\n    if self.children:\n        child_nodes = [c.sym_jsonify(compact, type_info=False, **kwargs) for c in self.children]\n        if self.value is not None:\n            if len(child_nodes) == 1:\n                single_choice = child_nodes[0]\n                if isinstance(single_choice, tuple):\n                    value = (self.value,) + single_choice\n                else:\n                    value = (self.value, single_choice)\n            else:\n                value = (self.value, child_nodes)\n        else:\n            value = child_nodes\n    else:\n        value = self.value\n    if type_info:\n        json_value = {utils.JSONConvertible.TYPE_NAME_KEY: self.__class__.__serialization_key__, 'format': 'compact', 'value': symbolic.to_json(value)}\n        if self.metadata:\n            json_value['metadata'] = symbolic.to_json(self.metadata)\n        if self._cloneable_metadata_keys:\n            json_value['_cloneable_metadata_keys'] = list(self._cloneable_metadata_keys)\n        return json_value\n    else:\n        return value", "docstring": "Convert DNA to JSON object.\n\nArgs:\ncompact: Whether use compact form. If compact, the nested number structure\nin DNA.parse will be used, otherwise members will be rendered out as\nregular symbolic Object.\ntype_info: If True, type information will be included in output, otherwise\ntype information will not be included. Applicable when compact is set\nto True.\n**kwargs: Keyword arguments that will be passed to symbolic.Object if\ncompact is False.\n\nReturns:\nJSON representation of DNA.", "source": "github-repos"}
{"code": "def make_graph_def_with_constant_nodes(node_sizes: Sequence[int], dtype: Optional[dtypes.DType]=None, **function_node_sizes) -> graph_pb2.GraphDef:\n    dtype = dtypes.float32\n    graph_def = graph_pb2.GraphDef()\n    n = 0\n\n    def add_nodes(node_list, sizes):\n        nonlocal n\n        for s in sizes:\n            node = node_list.add(name=f'Const_{n}', op='Const')\n            node.attr['value'].tensor.MergeFrom(tensor_util.make_tensor_proto(np.ones([]), dtype=dtype))\n            remaining_size = s - node.ByteSize()\n            if remaining_size < 0:\n                raise ValueError(f'Unable to create node of size {s} bytes.')\n            constant_size = [math.ceil(remaining_size / dtype.size)]\n            node.attr['value'].tensor.Clear()\n            node.attr['value'].tensor.MergeFrom(tensor_util.make_tensor_proto(np.random.random_sample(constant_size), dtype=dtype))\n            n += 1\n    add_nodes(graph_def.node, node_sizes)\n    for fn_name, fn_sizes in function_node_sizes.items():\n        fn = graph_def.library.function.add()\n        fn.signature.name = fn_name\n        add_nodes(fn.node_def, fn_sizes)\n    return graph_def", "docstring": "Creates a GraphDef with approximate node sizes.\n\nArgs:\nnode_sizes: list of ints, the approximate desired sizes of the nodes in the\nGraphDef.\ndtype: Dtype of encoded constant values (float32 or float64).\n**function_node_sizes: Map of function name to FunctionDef node sizes (see\n`node_sizes`).\n\nReturns:\nA GraphDef proto.", "source": "github-repos"}
{"code": "def split(self, amount):\n    split_objs = list(self.all())\n    if (not split_objs):\n        raise NoSplitsFoundForRecurringCost()\n    portions = [split_obj.portion for split_obj in split_objs]\n    split_amounts = ratio_split(amount, portions)\n    return [(split_objs[i], split_amount) for (i, split_amount) in enumerate(split_amounts)]", "docstring": "Split the value given by amount according to the RecurringCostSplit's portions\n\nArgs:\namount (Decimal):\n\nReturns:\nlist[(RecurringCostSplit, Decimal)]: A list with elements in the form (RecurringCostSplit, Decimal)", "source": "codesearchnet"}
{"code": "def save_model(self, fname, pretty=False):\n    with open(fname, 'w') as f:\n        xml_str = ET.tostring(self.root, encoding='unicode')\n        if pretty:\n            parsed_xml = xml.dom.minidom.parseString(xml_str)\n            xml_str = parsed_xml.toprettyxml(newl='')\n        f.write(xml_str)", "docstring": "Saves the xml to file.\n\nArgs:\nfname: output file location\npretty: attempts!! to pretty print the output", "source": "codesearchnet"}
{"code": "def add_execution_event(self, context_id, event):\n        \n        if context_id not in self._contexts:\n            LOGGER.warning(\"Context_id not in contexts, %s\", context_id)\n            return False\n\n        context = self._contexts.get(context_id)\n        context.add_execution_event(event)\n        return True", "docstring": "Within a context, append data to the execution result.\n\nArgs:\ncontext_id (str): the context id returned by create_context\ndata_type (str): type of data to append\ndata (bytes): data to append\n\nReturns:\n(bool): True if the operation is successful, False if\nthe context_id doesn't reference a known context.", "source": "juraj-google-style"}
{"code": "class EosTokenCriteria(StoppingCriteria):\n\n    def __init__(self, eos_token_id: Union[int, List[int], torch.Tensor]):\n        if not isinstance(eos_token_id, torch.Tensor):\n            if isinstance(eos_token_id, int):\n                eos_token_id = [eos_token_id]\n            eos_token_id = torch.tensor(eos_token_id)\n        self.eos_token_id = eos_token_id\n\n    @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)\n    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:\n        self.eos_token_id = self.eos_token_id.to(input_ids.device)\n        is_done = isin_mps_friendly(input_ids[:, -1], self.eos_token_id)\n        return is_done", "docstring": "This class can be used to stop generation whenever the \"end-of-sequence\" token is generated.\nBy default, it uses the `model.generation_config.eos_token_id`.\n\nArgs:\neos_token_id (`Union[int, List[int], torch.Tensor]`):\nThe id(s) of the *end-of-sequence* token.", "source": "github-repos"}
{"code": "def AssertIterableType(iterable, expected_item_type):\n  \n  \n  \n  \n  \n  \n  \n  if isinstance(iterable, collections.Iterator):\n    message = \"Expected iterable container but got iterator `%s` instead\"\n    message %= iterable\n    raise TypeError(message)\n\n  AssertType(iterable, collections.Iterable)\n  for item in iterable:\n    AssertType(item, expected_item_type)", "docstring": "Ensures that given iterable container has certain type.\n\nArgs:\niterable: An iterable container to assert the type for.\nexpected_item_type: An expected type of the container items.\n\nRaises:\nTypeError: If given container does is not an iterable or its items do not\nhave the expected type.", "source": "juraj-google-style"}
{"code": "def filecmp(filename_a, filename_b):\n    size_a = FileIO(filename_a, 'rb').size()\n    size_b = FileIO(filename_b, 'rb').size()\n    if size_a != size_b:\n        return False\n    crc_a = file_crc32(filename_a)\n    crc_b = file_crc32(filename_b)\n    return crc_a == crc_b", "docstring": "Compare two files, returning True if they are the same, False otherwise.\n\nWe check size first and return False quickly if the files are different sizes.\nIf they are the same size, we continue to generating a crc for the whole file.\n\nYou might wonder: why not use Python's `filecmp.cmp()` instead? The answer is\nthat the builtin library is not robust to the many different filesystems\nTensorFlow runs on, and so we here perform a similar comparison with\nthe more robust FileIO.\n\nArgs:\nfilename_a: string path to the first file.\nfilename_b: string path to the second file.\n\nReturns:\nTrue if the files are the same, False otherwise.", "source": "github-repos"}
{"code": "def registration_info_request(self, registration_id):\n        \n        return self.requests_session.get(\n            self.INFO_END_POINT + registration_id,\n            params={'details': 'true'}\n        )", "docstring": "Makes a request for registration info and returns the response object\n\nArgs:\nregistration_id: id to be checked\n\nReturns:\nresponse of registration info request", "source": "juraj-google-style"}
{"code": "def json(self) -> dict:\n    content = {}\n    content['name'] = self.name\n    content['callback'] = self.callback\n    self.control_json['content'] = content\n    return self.control_json", "docstring": "Returns json compatible state of the Button instance.\n\nReturns:\ncontrol_json: Json representation of Button state.", "source": "codesearchnet"}
{"code": "def __init__(self, dns_ip):\n        \n        self._dns_ip = dns_ip\n        self._resolver = ProxyResolver()\n        try:\n            self._resolver.set_proxies([self._dns_ip])\n        except async_dns.address.InvalidHost as e:\n            msg = f'RecordChecker got invalid DNS server IP: {e}.'\n            raise exceptions.InvalidDNSHost(msg)", "docstring": "Setup RecordChecker object.\n\nArgs:\ndns_ip: DNS server IP to query.", "source": "juraj-google-style"}
{"code": "def extract_object_files(archive_file: io.BufferedIOBase, dest_dir: str) -> None:\n    if not os.path.exists(dest_dir):\n        os.makedirs(dest_dir)\n    _check_archive_signature(archive_file)\n    extracted_files = dict()\n    for name, file_content in _extract_next_file(archive_file):\n        digest = hashlib.md5(file_content).digest()\n        for final_name in _generate_modified_filenames(name):\n            if final_name not in extracted_files:\n                extracted_files[final_name] = digest\n                with open(os.path.join(dest_dir, final_name), 'wb') as object_file:\n                    object_file.write(file_content)\n                break\n            elif extracted_files[final_name] == digest:\n                break", "docstring": "Extracts object files from the archive path to the destination directory.\n\nExtracts object files from the given BSD variant archive file. The extracted\nfiles are written to the destination directory, which will be created if the\ndirectory does not exist.\n\nColliding object file names are automatically renamed upon extraction in order\nto avoid unintended overwriting.\n\nArgs:\narchive_file: The archive file object pointing at its beginning.\ndest_dir: The destination directory path in which the extracted object files\nwill be written. The directory will be created if it does not exist.", "source": "github-repos"}
{"code": "def check_valid_values(function):\n    \n    def decorated(self, X, *args, **kwargs):\n\n        if isinstance(X, pd.DataFrame):\n            W = X.values\n\n        else:\n            W = X\n\n        if not len(W):\n            raise ValueError('Your dataset is empty.')\n\n        if W.dtype not in [np.dtype('float64'), np.dtype('int64')]:\n            raise ValueError('There are non-numerical values in your data.')\n\n        if np.isnan(W).any().any():\n            raise ValueError('There are nan values in your data.')\n\n        return function(self, X, *args, **kwargs)\n\n    return decorated", "docstring": "Raises an exception if the given values are not supported.\n\nArgs:\nfunction(callable): Method whose unique argument is a numpy.array-like object.\n\nReturns:\ncallable: Decorated function\n\nRaises:\nValueError: If there are missing or invalid values or if the dataset is empty.", "source": "juraj-google-style"}
{"code": "def _convert_as_saved_model(self):\n    temp_dir = tempfile.mkdtemp()\n    try:\n        self._freeze_keras_model(temp_dir)\n        if self.saved_model_dir:\n            return super(TFLiteKerasModelConverter, self).convert()\n    finally:\n        shutil.rmtree(temp_dir, True)", "docstring": "Converts a Keras model as a saved model.\n\nReturns:\nThe converted data in serialized format.", "source": "github-repos"}
{"code": "def _init_from_args(self, maximum_iterations, parallel_iterations, back_prop, swap_memory, name):\n    if not isinstance(parallel_iterations, int) or parallel_iterations <= 0:\n        raise ValueError(\"'parallel_iterations' must be a positive integer: %s\" % parallel_iterations)\n    self._name = ops.get_default_graph().unique_name(name)\n    self._maximum_iterations = maximum_iterations\n    self._parallel_iterations = parallel_iterations\n    self._back_prop = back_prop\n    self._swap_memory = swap_memory\n    self._pivot_for_pred = None\n    self._pivot_for_body = None\n    self._pivot = None\n    self._loop_exits = []\n    self._loop_enters = []\n    self._graph = ops.get_default_graph()", "docstring": "Creates a new `WhileContext` from arguments.\n\nArgs:\nmaximum_iterations: Optional upper bound on number of loop iterations.\nparallel_iterations: The number of iterations allowed to run in parallel.\nback_prop: Whether backprop is enabled for this while loop.\nswap_memory: Whether GPU-CPU memory swap is enabled for this loop.\nname: Optional name prefix for the returned tensors.\n\nRaises:\nValueError: If `parallel_iterations` has invalid value.", "source": "github-repos"}
{"code": "def setup(app):\n    for (name, (default, rebuild, _)) in ref.CONFIG_VALUES.iteritems():\n        app.add_config_value(name, default, rebuild)\n    app.add_directive('javaimport', ref.JavarefImportDirective)\n    app.add_role('javaref', ref.JavarefRole(app))\n    app.connect('builder-inited', initialize_env)\n    app.connect('env-purge-doc', ref.purge_imports)\n    app.connect('env-merge-info', ref.merge_imports)\n    app.connect('build-finished', ref.cleanup)", "docstring": "Register the extension with Sphinx.\n\nArgs:\napp: The Sphinx application.", "source": "codesearchnet"}
{"code": "def average(self, selector=identity):\n    if self.closed():\n        raise ValueError('Attempt to call average() on a closed Queryable.')\n    if (not is_callable(selector)):\n        raise TypeError('average() parameter selector={0} is not callable'.format(repr(selector)))\n    total = 0\n    count = 0\n    for item in self.select(selector):\n        total += item\n        count += 1\n    if (count == 0):\n        raise ValueError('Cannot compute average() of an empty sequence.')\n    return (total / count)", "docstring": "Return the arithmetic mean of the values in the sequence..\n\nAll of the source sequence will be consumed.\n\nNote: This method uses immediate execution.\n\nArgs:\nselector: An optional single argument function which will be used\nto project the elements of the sequence. If omitted, the\nidentity function is used.\n\nReturns:\nThe arithmetic mean value of the projected sequence.\n\nRaises:\nValueError: If the Queryable has been closed.\nValueError: I the source sequence is empty.", "source": "codesearchnet"}
{"code": "def sas_interconnects(self):\n    if (not self.__sas_interconnects):\n        self.__sas_interconnects = SasInterconnects(self.__connection)\n    return self.__sas_interconnects", "docstring": "Gets the SAS Interconnects API client.\n\nReturns:\nSasInterconnects:", "source": "codesearchnet"}
{"code": "def keras_model_summary(name, data, step=None):\n    import tensorflow.summary as summary\n    from tensorflow.compat.v1 import SummaryMetadata\n    summary_metadata = SummaryMetadata()\n    summary_metadata.plugin_data.plugin_name = 'graph_keras_model'\n    summary_metadata.plugin_data.content = b'1'\n    try:\n        json_string = data.to_json()\n    except Exception as exc:\n        warnings.warn(f'Model failed to serialize as JSON. Ignoring... {exc}')\n        return False\n    with summary.experimental.summary_scope(name, 'graph_keras_model', [data, step]) as (tag, _):\n        return summary.write(tag=tag, tensor=json_string, step=step, metadata=summary_metadata)", "docstring": "Writes a Keras model as JSON to as a Summary.\n\nWriting the Keras model configuration allows the TensorBoard graph plugin to\nrender a conceptual graph, as opposed to graph of ops. In case the model\nfails to serialize as JSON, it ignores and returns False.\n\nArgs:\nname: A name for this summary. The summary tag used for TensorBoard will\nbe this name prefixed by any active name scopes.\ndata: A Keras Model to write.\nstep: Explicit `int64`-castable monotonic step value for this summary.\nIf omitted, this defaults to `tf.summary.experimental.get_step()`,\nwhich must not be `None`.\n\nReturns:\nTrue on success, or False if no summary was written because no default\nsummary writer was available.\n\nRaises:\nValueError: if a default writer exists, but no step was provided and\n`tf.summary.experimental.get_step()` is `None`.", "source": "github-repos"}
{"code": "def to_soft(self, path_or_handle, as_gzip=False):\n        \n        if isinstance(path_or_handle, str):\n            if as_gzip:\n                with gzip.open(path_or_handle, 'wt') as outfile:\n                    outfile.write(self._get_object_as_soft())\n            else:\n                with open(path_or_handle, 'w') as outfile:\n                    outfile.write(self._get_object_as_soft())\n        else:\n            path_or_handle.write(self._get_object_as_soft())", "docstring": "Save the object in a SOFT format.\n\nArgs:\npath_or_handle (:obj:`str` or :obj:`file`): Path or handle to\noutput file\nas_gzip (:obj:`bool`): Save as gzip", "source": "juraj-google-style"}
{"code": "def do_labels_update(self, info, labels):\n        \n        if self.update_label_func:\n            self.update_label_func(self.label_name, info, labels)", "docstring": "Updates a dictionary of labels using the assigned update_op_func\n\nArgs:\ninfo (:class:`endpoints_management.control.report_request.Info`): the\ninfo instance to update\nlabels (dict[string[string]]): the labels dictionary\n\nReturn:\n`True` if desc is supported, otherwise `False`", "source": "juraj-google-style"}
{"code": "def set_organization(self, organization):\n        \n        \n        if isinstance(organization, hdx.data.organization.Organization) or isinstance(organization, dict):\n            if 'id' not in organization:\n                organization = hdx.data.organization.Organization.read_from_hdx(organization['name'], configuration=self.configuration)\n            organization = organization['id']\n        elif not isinstance(organization, str):\n            raise HDXError('Type %s cannot be added as a organization!' % type(organization).__name__)\n        if is_valid_uuid(organization) is False and organization != 'hdx':\n            raise HDXError('%s is not a valid organization id!' % organization)\n        self.data['owner_org'] = organization", "docstring": "Set the dataset's organization.\n\nArgs:\norganization (Union[Organization,Dict,str]): Either an Organization id or Organization metadata from an Organization object or dictionary.\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def namespace(self, mid: ModuleId) -> YangIdentifier:\n        \n        try:\n            mdata = self.modules[mid]\n        except KeyError:\n            raise ModuleNotRegistered(*mid) from None\n        return mdata.main_module[0]", "docstring": "Return the namespace corresponding to a module or submodule.\n\nArgs:\nmid: Module identifier.\n\nRaises:\nModuleNotRegistered: If `mid` is not registered in the data model.", "source": "juraj-google-style"}
{"code": "def __init__(self, src, raw_traces, trace_factory, filename):\n    self.text = src\n    self.traces = _collect_traces(raw_traces, trace_factory)\n    self.filename = filename\n    self._lines = src.split('\\n')\n    self._offsets = []\n    self._init_byte_offsets()", "docstring": "Initializer.\n\nArgs:\nsrc: The source text.\nraw_traces: Raw (opcode, symbol, types) values.\ntrace_factory: A subclass of source.AbstractTrace that will be used to\ninstantiate traces from raw values.\nfilename: The filename.", "source": "github-repos"}
{"code": "def to_html(self):\n    if (self.items is None):\n        return\n    else:\n        html = ('<ol%s>\\n' % self.html_attributes())\n        for item in self.items:\n            html += ('<li>%s</li>\\n' % item.to_html())\n        html += '</ol>'\n        return html", "docstring": "Render a Text MessageElement as html\n\nArgs:\nNone\n\nReturns:\nStr the html representation of the Text MessageElement\n\nRaises:\nErrors are propagated", "source": "codesearchnet"}
{"code": "def call_rpc(self, address, rpc_id, payload=b\"\"):\n        \n\n        return self.emulator.call_rpc_external(address, rpc_id, payload)", "docstring": "Call an RPC by its address and ID.\n\nThis will send the RPC to the background rpc dispatch thread and\nsynchronously wait for the response.\n\nArgs:\naddress (int): The address of the mock tile this RPC is for\nrpc_id (int): The number of the RPC\npayload (bytes): A byte string of payload parameters up to 20 bytes\n\nReturns:\nbytes: The response payload from the RPC", "source": "juraj-google-style"}
{"code": "def getitem_column_array(self, key):\n        \n        \n        numeric_indices = list(self.columns.get_indexer_for(key))\n\n        \n        \n        def getitem(df, internal_indices=[]):\n            return df.iloc[:, internal_indices]\n\n        result = self.data.apply_func_to_select_indices(\n            0, getitem, numeric_indices, keep_remaining=False\n        )\n        \n        \n        new_columns = self.columns[numeric_indices]\n        new_dtypes = self.dtypes[numeric_indices]\n        return self.__constructor__(result, self.index, new_columns, new_dtypes)", "docstring": "Get column data for target labels.\n\nArgs:\nkey: Target labels by which to retrieve data.\n\nReturns:\nA new QueryCompiler.", "source": "juraj-google-style"}
{"code": "def compile_files(raw_dir, raw_files, tag):\n    tf.logging.info(('Compiling files with tag %s.' % tag))\n    filename = ('%s-%s' % (_PREFIX, tag))\n    input_compiled_file = os.path.join(raw_dir, (filename + '.lang1'))\n    target_compiled_file = os.path.join(raw_dir, (filename + '.lang2'))\n    with tf.gfile.Open(input_compiled_file, mode='w') as input_writer:\n        with tf.gfile.Open(target_compiled_file, mode='w') as target_writer:\n            for i in range(len(raw_files['inputs'])):\n                input_file = raw_files['inputs'][i]\n                target_file = raw_files['targets'][i]\n                tf.logging.info(('Reading files %s and %s.' % (input_file, target_file)))\n                write_file(input_writer, input_file)\n                write_file(target_writer, target_file)\n    return (input_compiled_file, target_compiled_file)", "docstring": "Compile raw files into a single file for each language.\n\nArgs:\nraw_dir: Directory containing downloaded raw files.\nraw_files: Dict containing filenames of input and target data.\n{\"inputs\": list of files containing data in input language\n\"targets\": list of files containing corresponding data in target language\n}\ntag: String to append to the compiled filename.\n\nReturns:\nFull path of compiled input and target files.", "source": "codesearchnet"}
{"code": "def _represent_argument(directive_location, context, argument, inferred_type):\n    argument_name = argument[1:]\n    validate_safe_string(argument_name)\n    if is_variable_argument(argument):\n        existing_type = context['inputs'].get(argument_name, inferred_type)\n        if (not inferred_type.is_same_type(existing_type)):\n            raise GraphQLCompilationError(u'Incompatible types inferred for argument {}. The argument cannot simultaneously be {} and {}.'.format(argument, existing_type, inferred_type))\n        context['inputs'][argument_name] = inferred_type\n        return (expressions.Variable(argument, inferred_type), None)\n    elif is_tag_argument(argument):\n        argument_context = context['tags'].get(argument_name, None)\n        if (argument_context is None):\n            raise GraphQLCompilationError(u'Undeclared argument used: {}'.format(argument))\n        location = argument_context['location']\n        optional = argument_context['optional']\n        tag_inferred_type = argument_context['type']\n        if (location is None):\n            raise AssertionError(u'Argument declared without location: {}'.format(argument_name))\n        if (location.field is None):\n            raise AssertionError(u'Argument location is not a property field: {}'.format(location))\n        if (not inferred_type.is_same_type(tag_inferred_type)):\n            raise GraphQLCompilationError(u'The inferred type of the matching @tag directive does not match the inferred required type for this filter: {} vs {}'.format(tag_inferred_type, inferred_type))\n        field_is_local = (directive_location.at_vertex() == location.at_vertex())\n        non_existence_expression = None\n        if optional:\n            if field_is_local:\n                non_existence_expression = expressions.FalseLiteral\n            else:\n                non_existence_expression = expressions.BinaryComposition(u'=', expressions.ContextFieldExistence(location.at_vertex()), expressions.FalseLiteral)\n        if field_is_local:\n            representation = expressions.LocalField(argument_name)\n        else:\n            representation = expressions.ContextField(location, tag_inferred_type)\n        return (representation, non_existence_expression)\n    else:\n        raise GraphQLCompilationError(u'Non-argument type found: {}'.format(argument))", "docstring": "Return a two-element tuple that represents the argument to the directive being processed.\n\nArgs:\ndirective_location: Location where the directive is used.\ncontext: dict, various per-compilation data (e.g. declared tags, whether the current block\nis optional, etc.). May be mutated in-place in this function!\nargument: string, the name of the argument to the directive\ninferred_type: GraphQL type object specifying the inferred type of the argument\n\nReturns:\n(argument_expression, non_existence_expression)\n- argument_expression: an Expression object that captures the semantics of the argument\n- non_existence_expression: None or Expression object;\nIf the current block is not optional, this is set to None. Otherwise, it is an\nexpression that will evaluate to True if the argument is skipped as optional and\ntherefore not present, and False otherwise.", "source": "codesearchnet"}
{"code": "def _get_example_from_basic_type(type):\n        \n        if type == 'integer':\n            return [42, 24]\n        elif type == 'number':\n            return [5.5, 5.5]\n        elif type == 'string':\n            return ['string', 'string2']\n        elif type == 'datetime':\n            return ['2015-08-28T09:02:57.481Z', '2015-08-28T09:02:57.481Z']\n        elif type == 'boolean':\n            return [False, True]\n        elif type == 'null':\n            return ['null', 'null']", "docstring": "Get example from the given type.\n\nArgs:\ntype: the type you want an example of.\n\nReturns:\nAn array with two example values of the given type.", "source": "juraj-google-style"}
{"code": "def add_function(self, call_fn, name, match_layer_training_arg):\n    fn = LayerCall(self, self._maybe_wrap_with_training_arg(call_fn, match_layer_training_arg), name, input_signature=self.fn_input_signature)\n    self._functions[name] = fn.wrapped_call\n    return fn", "docstring": "Adds a layer call function to the collection.\n\nArgs:\ncall_fn: a python function\nname: Name of call function\nmatch_layer_training_arg: If True, removes the `training` from the\nfunction arguments when calling `call_fn`.\n\nReturns:\nLayerCall (tf.function)", "source": "github-repos"}
{"code": "def non_fluents_scope(self) -> Dict[(str, TensorFluent)]:\n    if (self.__dict__.get('non_fluents') is None):\n        self._initialize_non_fluents()\n    return dict(self.non_fluents)", "docstring": "Returns a partial scope with non-fluents.\n\nReturns:\nA mapping from non-fluent names to :obj:`rddl2tf.fluent.TensorFluent`.", "source": "codesearchnet"}
{"code": "def sg_all(tensor, opt):\n    r\n    return tf.reduce_all(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)", "docstring": "r\"\"\"Computes the \"logical and\" of elements across axis of a tensor.\n\nSee `tf.reduce_all()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` (automatically given by chain).\nopt:\naxis : A tuple/list of integers or an integer. The axis to reduce.\nkeep_dims: If true, retains reduced dimensions with length 1.\nname: If provided, replace current tensor's name.\n\nReturns:\nA `Tensor`.", "source": "juraj-google-style"}
{"code": "def _urllib_post(self, url, json='', data='', username='', password='', headers=None, timeout=30):\n    if (headers is None):\n        headers = {}\n    raw_store = json\n    raw_request = (json_lib.dumps(json) if json else urlencode(data))\n    url_request = Request(url, data=raw_request.encode('utf8'))\n    if json:\n        url_request.add_header('Content-Type', 'application/json')\n    elif (not data):\n        raise ValueError('Please provide either a json or a data field.')\n    headers['User-Agent'] = self.user_agent\n    raw_request = raw_store\n    if (username and password):\n        if (sys.version_info[0] >= 3):\n            basic_authstring = base64.encodebytes(('%s:%s' % (username, password)).encode()).decode().replace('\\n', '')\n        else:\n            basic_authstring = base64.encodestring(('%s:%s' % (username, password))).replace('\\n', '')\n        url_request.add_header('Authorization', ('Basic %s' % basic_authstring))\n    for (key, value) in headers.items():\n        url_request.add_header(key, str(value))\n    try:\n        response = urlopen(url_request, timeout=timeout)\n    except HTTPError as e:\n        raw_response = e.read()\n        return (raw_response, raw_request, e.getcode(), e.headers)\n    else:\n        raw_response = response.read()\n        response.close()\n        return (raw_response, raw_request, response.getcode(), dict(response.info()))", "docstring": "This function will POST to the url endpoint using urllib2. returning\nan AdyenResult object on 200 HTTP responce. Either json or data has to\nbe provided. If username and password are provided, basic auth will be\nused.\n\nArgs:\nurl (str):                  url to send the POST\njson (dict, optional):      Dict of the JSON to POST\ndata (dict, optional):      Dict, presumed flat structure of\nkey/value of request to place as\nwww-form\nusername (str, optional):    Username for basic auth. Must be\nuncluded as part of password.\npassword (str, optional):   Password for basic auth. Must be\nincluded as part of username.\nheaders (dict, optional):   Key/Value pairs of headers to include\ntimeout (int, optional): Default 30. Timeout for the request.\n\nReturns:\nstr:    Raw response received\nstr:    Raw request placed\nint:    HTTP status code, eg 200,404,401\ndict:   Key/Value pairs of the headers received.", "source": "codesearchnet"}
{"code": "def diff(self, other):\n    similar_param = {}\n    different_param = {}\n    for (k1, v1) in self.items():\n        if (k1 not in other):\n            different_param[k1] = {'INCAR1': v1, 'INCAR2': None}\n        elif (v1 != other[k1]):\n            different_param[k1] = {'INCAR1': v1, 'INCAR2': other[k1]}\n        else:\n            similar_param[k1] = v1\n    for (k2, v2) in other.items():\n        if ((k2 not in similar_param) and (k2 not in different_param)):\n            if (k2 not in self):\n                different_param[k2] = {'INCAR1': None, 'INCAR2': v2}\n    return {'Same': similar_param, 'Different': different_param}", "docstring": "Diff function for Incar.  Compares two Incars and indicates which\nparameters are the same and which are not. Useful for checking whether\ntwo runs were done using the same parameters.\n\nArgs:\nother (Incar): The other Incar object to compare to.\n\nReturns:\nDict of the following format:\n{\"Same\" : parameters_that_are_the_same,\n\"Different\": parameters_that_are_different}\nNote that the parameters are return as full dictionaries of values.\nE.g. {\"ISIF\":3}", "source": "codesearchnet"}
{"code": "def destination(self, bearing, distance):\n        \n        return (segment.destination(bearing, distance) for segment in self)", "docstring": "Calculate destination locations for given distance and bearings.\n\nArgs:\nbearing (float): Bearing to move on in degrees\ndistance (float): Distance in kilometres\n\nReturns:\nlist of list of Point: Groups of points shifted by ``distance``\nand ``bearing``", "source": "juraj-google-style"}
{"code": "def matches(self, desc):\n        \n        desc_value_type = desc.valueType or ValueType.STRING  \n        return (self.label_name == desc.key and\n                self.value_type == desc_value_type)", "docstring": "Determines if a given label descriptor matches this enum instance\n\nArgs:\ndesc (:class:`endpoints_management.gen.servicemanagement_v1_messages.LabelDescriptor`):\nthe instance to test\n\nReturn:\n`True` if desc is supported, otherwise `False`", "source": "juraj-google-style"}
{"code": "def compute_metrics(self, previous):\n    delta_t = self.time_difference(previous)\n    delta_x = self.distance(previous)\n    vel = 0\n    delta_v = 0\n    acc = 0\n    if (delta_t != 0):\n        vel = (delta_x / delta_t)\n        delta_v = (vel - previous.vel)\n        acc = (delta_v / delta_t)\n    self.dt = delta_t\n    self.dx = delta_x\n    self.acc = acc\n    self.vel = vel\n    return self", "docstring": "Computes the metrics of this point\n\nComputes and updates the dt, vel and acc attributes.\n\nArgs:\nprevious (:obj:`Point`): Point before\nReturns:\n:obj:`Point`: Self", "source": "codesearchnet"}
{"code": "def without_operations_touching(self, qubits: Iterable[raw_types.Qid]):\n    qubits = frozenset(qubits)\n    if (not self.operates_on(qubits)):\n        return self\n    return Moment((operation for operation in self.operations if qubits.isdisjoint(frozenset(operation.qubits))))", "docstring": "Returns an equal moment, but without ops on the given qubits.\n\nArgs:\nqubits: Operations that touch these will be removed.\n\nReturns:\nThe new moment.", "source": "codesearchnet"}
{"code": "def get_authorization_url(self, client_id=None, instance_id=None, redirect_uri=None, region=None, scope=None, state=None):\n    client_id = (client_id or self.client_id)\n    instance_id = (instance_id or self.instance_id)\n    redirect_uri = (redirect_uri or self.redirect_uri)\n    region = (region or self.region)\n    scope = (scope or self.scope)\n    state = (state or str(uuid.uuid4()))\n    self.state = state\n    return (Request('GET', self.auth_base_url, params={'client_id': client_id, 'instance_id': instance_id, 'redirect_uri': redirect_uri, 'region': region, 'response_type': 'code', 'scope': scope, 'state': state}).prepare().url, state)", "docstring": "Generate authorization URL.\n\nArgs:\nclient_id (str): OAuth2 client ID. Defaults to ``None``.\ninstance_id (str): App Instance ID. Defaults to ``None``.\nredirect_uri (str): Redirect URI. Defaults to ``None``.\nregion (str): App Region. Defaults to ``None``.\nscope (str): Permissions. Defaults to ``None``.\nstate (str): UUID to detect CSRF. Defaults to ``None``.\n\nReturns:\nstr, str: Auth URL, state", "source": "codesearchnet"}
{"code": "def from_api_repr(cls, resource):\n        \n        this = cls(None)\n\n        \n        \n        resource = copy.deepcopy(resource)\n        for training_run in resource.get(\"trainingRuns\", ()):\n            start_time = training_run.get(\"startTime\")\n            if not start_time or \"-\" in start_time:  \n                continue\n            start_time = datetime_helpers.from_microseconds(1e3 * float(start_time))\n            training_run[\"startTime\"] = datetime_helpers.to_rfc3339(start_time)\n\n        this._proto = json_format.ParseDict(resource, types.Model())\n        for key in six.itervalues(cls._PROPERTY_TO_API_FIELD):\n            \n            \n            if key in resource:\n                this._properties[key] = resource[key]\n        return this", "docstring": "Factory: construct a model resource given its API representation\n\nArgs:\nresource (Dict[str, object]):\nModel resource representation from the API\n\nReturns:\ngoogle.cloud.bigquery.model.Model: Model parsed from ``resource``.", "source": "juraj-google-style"}
{"code": "def create_in_hdx(self):\n    capacity = self.data.get('capacity')\n    if (capacity is not None):\n        del self.data['capacity']\n    self._create_in_hdx('user', 'id', 'name')\n    if (capacity is not None):\n        self.data['capacity'] = capacity", "docstring": "Check if user exists in HDX and if so, update it, otherwise create user\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def execute(self, sensor_graph, scope_stack):\n        \n\n        parent = scope_stack[-1]\n\n        try:\n            slot = parent.resolve_identifier('current_slot', SlotIdentifier)\n        except UnresolvedIdentifierError:\n            raise SensorGraphSemanticError(\"set config statement used outside of config block\")\n\n        if self.explicit_type is None or not isinstance(self.identifier, int):\n            raise SensorGraphSemanticError(\"Config variable type definitions are not yet supported\")\n\n        if isinstance(self.value, (bytes, bytearray)) and not self.explicit_type == 'binary':\n            raise SensorGraphSemanticError(\"You must pass the binary variable type when using encoded binary data\")\n\n        if not isinstance(self.value, (bytes, bytearray)) and self.explicit_type == 'binary':\n            raise SensorGraphSemanticError(\"You must pass an encoded binary value with binary type config variables\")\n\n        sensor_graph.add_config(slot, self.identifier, self.explicit_type, self.value)", "docstring": "Execute this statement on the sensor_graph given the current scope tree.\n\nThis adds a single config variable assignment to the current sensor graph\n\nArgs:\nsensor_graph (SensorGraph): The sensor graph that we are building or\nmodifying\nscope_stack (list(Scope)): A stack of nested scopes that may influence\nhow this statement allocates clocks or other stream resources.", "source": "juraj-google-style"}
{"code": "def checkAndRaise(pageNum, itemsPerPage):\n        \n        if pageNum < 1:\n            raise ErrPaginationLimits(ErrPaginationLimits.ERR_PAGE_NUM)\n        \n        if itemsPerPage < Settings.itemsPerPageMin or itemsPerPage > Settings.itemsPerPageMax:\n            raise ErrPaginationLimits(ErrPaginationLimits.ERR_ITEMS_PER_PAGE)", "docstring": "Check and Raise an Exception if needed\n\nArgs:\npageNum (int): Page number\nitemsPerPage (int): Number of items per Page\n\nRaises:\nErrPaginationLimits: If we are out of limits", "source": "juraj-google-style"}
{"code": "def get(self, key, default_value=__NoDefaultSpecified__):\n        \n        \n        os_env_string = ConfigReader.ENV_PREFIX + key\n        os_env_string = os_env_string.replace(\".\", \"_\")\n        if type(os.getenv(os_env_string)) != NoneType:\n            return os.getenv(os_env_string)\n\n        \n        for data_map in self._dataMaps:\n            try:\n                if \".\" in key:\n                    \n                    namespaces = key.split(\".\")\n                    temp_var = data_map\n                    for name in namespaces:\n                        temp_var = temp_var[name]\n                    return temp_var\n                else:\n                    value = data_map[key]\n                    return value\n            except (AttributeError, TypeError, KeyError):\n                pass\n\n        if default_value == self.__NoDefaultSpecified__:\n            raise KeyError(u(\"Key '{0}' does not exist\").format(key))\n        else:\n            return default_value", "docstring": "Gets the value from the yaml config based on the key.\n\nNo type casting is performed, any type casting should be\nperformed by the caller.\n\nArgs:\nkey (str) - Config setting key.\n\nKwargs:\ndefault_value - Default value to return if config is not specified.\n\nReturns:\nReturns value stored in config file.", "source": "juraj-google-style"}
{"code": "def create_from_wkt(self, wkt, item_type, ingest_source, **attributes):\n    geojson = load_wkt(wkt).__geo_interface__\n    vector = {'type': 'Feature', 'geometry': geojson, 'properties': {'item_type': item_type, 'ingest_source': ingest_source, 'attributes': attributes}}\n    return self.create(vector)[0]", "docstring": "Create a single vector in the vector service\n\nArgs:\nwkt (str): wkt representation of the geometry\nitem_type (str): item_type of the vector\ningest_source (str): source of the vector\nattributes: a set of key-value pairs of attributes\n\nReturns:\nid (str): string identifier of the vector created", "source": "codesearchnet"}
{"code": "def __init__(self, input_circuit: circuit.QuantumCircuit, name: Union[None, str]=None):\n    super().__init__(name=name)\n    input_circuit.build([])\n    self._circuit = input_circuit", "docstring": "Initializes a generic QuantumInference layer.\n\nArgs:\ninput_circuit: The parameterized quantum circuit on which to do inference.\nname: Identifier for this inference engine.", "source": "github-repos"}
{"code": "def load_file_to_base64_str(f_path):\n    \n    path = abs_path(f_path)\n    with io.open(path, 'rb') as f:\n        f_bytes = f.read()\n        base64_str = base64.b64encode(f_bytes).decode(\"utf-8\")\n        return base64_str", "docstring": "Loads the content of a file into a base64 string.\n\nArgs:\nf_path: full path to the file including the file name.\n\nReturns:\nA base64 string representing the content of the file in utf-8 encoding.", "source": "juraj-google-style"}
{"code": "def overwrite_view_source(project, dir_path):\n    project_html_location = ((dir_path / project) / HTML_LOCATION)\n    if (not project_html_location.exists()):\n        return\n    files_to_overwrite = [f for f in project_html_location.iterdir() if ('html' in f.suffix)]\n    for html_file in files_to_overwrite:\n        with open(html_file, 'r') as f:\n            html = f.readlines()\n        for (i, l) in enumerate(html):\n            if (TO_REPLACE_WITH_HOME in l):\n                html[i] = NEW_HOME_LINK\n                break\n        with open(html_file, 'w') as f:\n            f.writelines(html)", "docstring": "In the project's index.html built file, replace the top \"source\"\nlink with a link to the documentation's home, which is mkdoc's home\n\nArgs:\nproject (str): project to update\ndir_path (pathlib.Path): this file's path", "source": "codesearchnet"}
{"code": "def draw_text(img, pos, text, color, font_scale=0.4):\n    \n    img = img.astype(np.uint8)\n    x0, y0 = int(pos[0]), int(pos[1])\n    \n    font = cv2.FONT_HERSHEY_SIMPLEX\n    ((text_w, text_h), _) = cv2.getTextSize(text, font, font_scale, 1)\n    \n    if x0 + text_w > img.shape[1]:\n        x0 = img.shape[1] - text_w\n    if y0 - int(1.15 * text_h) < 0:\n        y0 = int(1.15 * text_h)\n    back_topleft = x0, y0 - int(1.3 * text_h)\n    back_bottomright = x0 + text_w, y0\n    cv2.rectangle(img, back_topleft, back_bottomright, color, -1)\n    \n    text_bottomleft = x0, y0 - int(0.25 * text_h)\n    cv2.putText(img, text, text_bottomleft, font, font_scale, (222, 222, 222), lineType=cv2.LINE_AA)\n    return img", "docstring": "Draw text on an image.\n\nArgs:\npos (tuple): x, y; the position of the text\ntext (str):\nfont_scale (float):\ncolor (tuple): a 3-tuple BGR color in [0, 255]", "source": "juraj-google-style"}
{"code": "def add_value(self, line):\n    if line.strip():\n        self._empty = False\n    if self.current_key in self.known_keys:\n        self.known_keys[self.current_key].append(line)\n    else:\n        self.unknown_keys[self.current_key].append(line)", "docstring": "Adds unstructured or multi-line value output to the current parsed\ninstrumentation block for outputting later.\n\nUsually, this will add extra lines to the value list for the current\nkey-value pair. However, sometimes, such as when instrumentation\nfailed to start, output does not follow the structured prefix format.\nIn this case, adding all of the output is still useful so that a user\ncan debug the issue.\n\nArgs:\nline: string, the raw instrumentation line to append to the value\nlist.", "source": "github-repos"}
{"code": "def _ParseRecordString(\n      self, record_strings_data, record_strings_data_offset, string_offset):\n    \n    if string_offset == 0:\n      return None\n\n    if string_offset & self._STRING_OFFSET_MSB:\n      if (string_offset >> 60) != 8:\n        raise errors.ParseError('Invalid inline record string flag.')\n\n      string_size = (string_offset >> 56) & 0x0f\n      if string_size >= 8:\n        raise errors.ParseError('Invalid inline record string size.')\n\n      string_data = bytes(bytearray([\n          string_offset >> (8 * byte_index) & 0xff\n          for byte_index in range(6, -1, -1)]))\n\n      try:\n        return string_data[:string_size].decode('utf-8')\n      except UnicodeDecodeError as exception:\n        raise errors.ParseError(\n            'Unable to decode inline record string with error: {0!s}.'.format(\n                exception))\n\n    data_offset = string_offset - record_strings_data_offset\n    record_string_map = self._GetDataTypeMap('asl_record_string')\n\n    try:\n      record_string = self._ReadStructureFromByteStream(\n          record_strings_data[data_offset:], string_offset, record_string_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError((\n          'Unable to parse record string at offset: 0x{0:08x} with error: '\n          '{1!s}').format(string_offset, exception))\n\n    return record_string.string.rstrip('\\x00')", "docstring": "Parses a record string.\n\nArgs:\nrecord_strings_data (bytes): record strings data.\nrecord_strings_data_offset (int): offset of the record strings data\nrelative to the start of the file.\nstring_offset (int): offset of the string relative to the start of\nthe file.\n\nReturns:\nstr: record string or None if string offset is 0.\n\nRaises:\nParseError: if the record string cannot be parsed.", "source": "juraj-google-style"}
{"code": "def fftn(x):\n    out = x\n    for axis in reversed(range(x.ndim)[1:]):\n        out = torch.fft.fft(out, axis=axis)\n    return out", "docstring": "Applies n-dimensional Fast Fourier Transform (FFT) to input array.\n\nArgs:\nx: Input n-dimensional array.\n\nReturns:\nn-dimensional Fourier transform of input n-dimensional array.", "source": "github-repos"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, **kwargs):\n    patch_embeddings = [self.vision_embed_tokens(patch.to(self.vision_embed_tokens.weight.dtype)).squeeze(0) for patch in pixel_values]\n    return patch_embeddings", "docstring": "Encodes images into continuous embeddings that can be forwarded to the language model.\n\nArgs:\npixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\nThe tensors corresponding to the input images.", "source": "github-repos"}
{"code": "def DownloadReportToFile(self, report_job_id, export_format, outfile, include_report_properties=False, include_totals_row=None, use_gzip_compression=True):\n    service = self._GetReportService()\n    if (include_totals_row is None):\n        include_totals_row = (True if (export_format != 'CSV_DUMP') else False)\n    opts = {'exportFormat': export_format, 'includeReportProperties': include_report_properties, 'includeTotalsRow': include_totals_row, 'useGzipCompression': use_gzip_compression}\n    report_url = service.getReportDownloadUrlWithOptions(report_job_id, opts)\n    _data_downloader_logger.info('Request Summary: Report job ID: %s, %s', report_job_id, opts)\n    response = self.url_opener.open(report_url)\n    _data_downloader_logger.debug('Incoming response: %s %s REDACTED REPORT DATA', response.code, response.msg)\n    while True:\n        chunk = response.read(_CHUNK_SIZE)\n        if (not chunk):\n            break\n        outfile.write(chunk)", "docstring": "Downloads report data and writes it to a file.\n\nThe report job must be completed before calling this function.\n\nArgs:\nreport_job_id: The ID of the report job to wait for, as a string.\nexport_format: The export format for the report file, as a string.\noutfile: A writeable, file-like object to write to.\ninclude_report_properties: Whether or not to include the report\nproperties (e.g. network, user, date generated...)\nin the generated report.\ninclude_totals_row: Whether or not to include the totals row.\nuse_gzip_compression: Whether or not to use gzip compression.", "source": "codesearchnet"}
{"code": "def show_qouts(self, nids=None, stream=sys.stdout):\n    lines = []\n    for task in self.iflat_tasks(status=self.S_QCRITICAL, nids=nids):\n        header = (('=== ' + task.qout_file.path) + '===')\n        lines.append(header)\n        if task.qout_file.exists:\n            with open(task.qout_file.path, 'rt') as fh:\n                lines += fh.readlines()\n        else:\n            lines.append('File does not exist!')\n        lines.append((('=' * len(header)) + (2 * '\\n')))\n    return stream.writelines(lines)", "docstring": "Write to the given stream the content of the queue output file for all tasks whose status is S_QCRITICAL.\n\nArgs:\nnids: optional list of node identifiers used to filter the tasks.\nstream: File-like object. Default: sys.stdout", "source": "codesearchnet"}
{"code": "def _should_stop(state, stopping_policy_fn):\n    return tf.convert_to_tensor(stopping_policy_fn(state.finished), name='should_stop', dtype=tf.bool)", "docstring": "Indicates whether the overall Brent search should continue.\n\nArgs:\nstate: A Python `_BrentSearchState` namedtuple.\nstopping_policy_fn: Python `callable` controlling the algorithm termination.\n\nReturns:\nA boolean value indicating whether the overall search should continue.", "source": "github-repos"}
{"code": "def restore_server_connection(self, port=None):", "docstring": "Reconnects to the server after the device was disconnected.\n\nInstead of creating a new instance of the client:\n- Uses the given port (or finds a new available host port if 0 or None is\ngiven).\n- Tries to connect to the remote server with the selected port.\n\nArgs:\nport: int, if given, this is the host port from which to connect to the\nremote device port. Otherwise, finds a new available port as host\nport.\n\nRaises:\nerrors.ServerRestoreConnectionError: when failed to restore the connection\nto the snippet server.", "source": "github-repos"}
{"code": "def build_position_encoding(position_encoding_type, out_channels=None, project_pos_dim=-1, trainable_position_encoding_kwargs=None, fourier_position_encoding_kwargs=None):\n    if position_encoding_type == 'trainable':\n        if not trainable_position_encoding_kwargs:\n            raise ValueError('Make sure to pass trainable_position_encoding_kwargs')\n        output_pos_enc = PerceiverTrainablePositionEncoding(**trainable_position_encoding_kwargs)\n    elif position_encoding_type == 'fourier':\n        if not fourier_position_encoding_kwargs:\n            raise ValueError('Make sure to pass fourier_position_encoding_kwargs')\n        output_pos_enc = PerceiverFourierPositionEncoding(**fourier_position_encoding_kwargs)\n    else:\n        raise ValueError(f'Unknown position encoding type: {position_encoding_type}.')\n    positions_projection = nn.Linear(out_channels, project_pos_dim) if project_pos_dim > 0 else nn.Identity()\n    return (output_pos_enc, positions_projection)", "docstring": "Builds the position encoding.\n\nArgs:\n- out_channels: refers to the number of channels of the position encodings.\n- project_pos_dim: if specified, will project the position encodings to this dimension.", "source": "github-repos"}
{"code": "def get_nn(self, structure, n):\n    return [e['site'] for e in self.get_nn_info(structure, n)]", "docstring": "Get near neighbors of site with index n in structure.\n\nArgs:\nstructure (Structure): input structure.\nn (integer): index of site in structure for which to determine\nneighbors.\nReturns:\nsites (list of Site objects): near neighbors.", "source": "codesearchnet"}
{"code": "def has_axis(self, axis):\n\t\t\n\n\t\tif self.type != EventType.POINTER_AXIS:\n\t\t\traise AttributeError(_wrong_meth.format(self.type))\n\t\treturn self._libinput.libinput_event_pointer_has_axis(\n\t\t\tself._handle, axis)", "docstring": "Check if the event has a valid value for the given axis.\n\nIf this method returns True for an axis and :meth:`get_axis_value`\nreturns a value of 0, the event is a scroll stop event.\n\nFor pointer events that are not of type\n:attr:`~libinput.constant.EventType.POINTER_AXIS`, this method raises\n:exc:`AttributeError`.\n\nArgs:\naxis (~libinput.constant.PointerAxis): The axis to check.\nReturns:\nbool: True if this event contains a value for this axis.\nRaises:\nAttributeError", "source": "juraj-google-style"}
{"code": "def move_to_destination(source, destination, job_name, sagemaker_session):\n    parsed_uri = urlparse(destination)\n    if (parsed_uri.scheme == 'file'):\n        recursive_copy(source, parsed_uri.path)\n        final_uri = destination\n    elif (parsed_uri.scheme == 's3'):\n        bucket = parsed_uri.netloc\n        path = ('%s%s' % (parsed_uri.path.lstrip('/'), job_name))\n        final_uri = ('s3:\n        sagemaker_session.upload_data(source, bucket, path)\n    else:\n        raise ValueError(('Invalid destination URI, must be s3:\n    shutil.rmtree(source)\n    return final_uri", "docstring": "move source to destination. Can handle uploading to S3\n\nArgs:\nsource (str): root directory to move\ndestination (str): file:// or s3:// URI that source will be moved to.\njob_name (str): SageMaker job name.\nsagemaker_session (sagemaker.Session): a sagemaker_session to interact with S3 if needed\n\nReturns:\n(str): destination URI", "source": "codesearchnet"}
{"code": "def Open(self):\n    if (not self._filename):\n        raise ValueError('Missing filename.')\n    if os.path.isfile(self._filename):\n        raise IOError('Unable to use an already existing file for output [{0:s}]'.format(self._filename))\n    options = {'constant_memory': True, 'strings_to_urls': False, 'strings_to_formulas': False, 'default_date_format': self._timestamp_format}\n    self._workbook = xlsxwriter.Workbook(self._filename, options)\n    self._sheet = self._workbook.add_worksheet('Sheet')\n    self._current_row = 0", "docstring": "Creates a new workbook.\n\nRaises:\nIOError: if the specified output file already exists.\nOSError: if the specified output file already exists.\nValueError: if the filename is not set.", "source": "codesearchnet"}
{"code": "def alias_tool(self, context_name, tool_name, tool_alias):\n        \n        data = self._context(context_name)\n        aliases = data[\"tool_aliases\"]\n        if tool_name in aliases:\n            raise SuiteError(\"Tool %r in context %r is already aliased to %r\"\n                             % (tool_name, context_name, aliases[tool_name]))\n        self._validate_tool(context_name, tool_name)\n        aliases[tool_name] = tool_alias\n        self._flush_tools()", "docstring": "Register an alias for a specific tool.\n\nNote that a tool alias takes precedence over a context prefix/suffix.\n\nArgs:\ncontext_name (str): Context containing the tool.\ntool_name (str): Name of tool to alias.\ntool_alias (str): Alias to give the tool.", "source": "juraj-google-style"}
{"code": "def add_snippet_client(self, name, package, config=None):\n    if name in self._snippet_clients:\n        raise Error(self, 'Name \"%s\" is already registered with package \"%s\", it cannot be used again.' % (name, self._snippet_clients[name].client.package))\n    for snippet_name, client in self._snippet_clients.items():\n        if package == client.package:\n            raise Error(self, 'Snippet package \"%s\" has already been loaded under name \"%s\".' % (package, snippet_name))\n    client = snippet_client_v2.SnippetClientV2(package=package, ad=self._device, config=config)\n    client.initialize()\n    self._snippet_clients[name] = client", "docstring": "Adds a snippet client to the management.\n\nArgs:\nname: string, the attribute name to which to attach the snippet\nclient. E.g. `name='maps'` attaches the snippet client to\n`ad.maps`.\npackage: string, the package name of the snippet apk to connect to.\nconfig: snippet_client_v2.Config, the configuration object for\ncontrolling the snippet behaviors. See the docstring of the `Config`\nclass for supported configurations.\n\nRaises:\nError, if a duplicated name or package is passed in.", "source": "github-repos"}
{"code": "def _orthogonal_kernel(self, ksize, cin, cout):\n    if cin > cout:\n        raise ValueError(f'The number of input channels (cin={cin}) cannot exceed the number of output channels (cout={cout}).')\n    orth = self._orthogonal_matrix(cout)[0:cin, :]\n    if ksize == 1:\n        return array_ops.expand_dims(orth, 0)\n    p = self._block_orth(self._symmetric_projection(cout))\n    for _ in range(ksize - 2):\n        temp = self._block_orth(self._symmetric_projection(cout))\n        p = self._matrix_conv(p, temp)\n    for i in range(ksize):\n        p[i] = math_ops.matmul(orth, p[i])\n    return self._dict_to_tensor(p, ksize)", "docstring": "Construct orthogonal kernel for convolution.\n\nArgs:\nksize: Kernel size.\ncin: Number of input channels.\ncout: Number of output channels.\n\nReturns:\nAn [ksize, ksize, cin, cout] orthogonal kernel.\nRaises:\nValueError: If cin > cout.", "source": "github-repos"}
{"code": "def monte_carlo_standard_error(chain, batch_size_generator=None, compute_method=None):\n    batch_size_generator = (batch_size_generator or SquareRootSingleBatch())\n    compute_method = (compute_method or BatchMeansMCSE())\n    batch_sizes = batch_size_generator.get_univariate_ess_batch_sizes(len(chain))\n    return np.min(list((compute_method.compute_standard_error(chain, b) for b in batch_sizes)))", "docstring": "Compute Monte Carlo standard errors for the expectations\n\nThis is a convenience function that calls the compute method for each batch size and returns the lowest ESS\nover the used batch sizes.\n\nArgs:\nchain (ndarray): the Markov chain\nbatch_size_generator (UniVariateESSBatchSizeGenerator): the method that generates that batch sizes\nwe will use. Per default it uses the :class:`SquareRootSingleBatch` method.\ncompute_method (ComputeMonteCarloStandardError): the method used to compute the standard error.\nBy default we will use the :class:`BatchMeansMCSE` method", "source": "codesearchnet"}
{"code": "def __init__(self, port_no=PortNo.OFPP_ANY):\n        \n        super().__init__()\n        self.port_no = port_no", "docstring": "Create a PortStatsRequest with the optional parameters below.\n\nArgs:\nport_no (:class:`int`, :class:`~pyof.v0x04.common.port.PortNo`):\n:attr:`StatsType.OFPST_PORT` message must request statistics\neither for a single port (specified in ``port_no``) or for all\nports (if ``port_no`` == :attr:`.PortNo.OFPP_ANY`).", "source": "juraj-google-style"}
{"code": "def sh(self, cmd, ignore_error=False, cwd=None, shell=False, **kwargs):\n        \n        kwargs.update({\n            'shell': shell,\n            'cwd': cwd or self.fpath,\n            'stderr': subprocess.STDOUT,\n            'stdout': subprocess.PIPE,\n            'ignore_error': ignore_error})\n        log.debug((('cmd', cmd), ('kwargs', kwargs)))\n        return sh(cmd, **kwargs)", "docstring": "Run a command with the current working directory set to self.fpath\n\nArgs:\ncmd (str or tuple): cmdstring or listlike\n\nKeyword Arguments:\nignore_error (bool): if False, raise an Exception if p.returncode is\nnot 0\ncwd (str): current working dir to run cmd with\nshell (bool): subprocess.Popen ``shell`` kwarg\n\nReturns:\nstr: stdout output of wrapped call to ``sh`` (``subprocess.Popen``)", "source": "juraj-google-style"}
{"code": "def auto_convert_string_cell(flagable, cell_str, position, worksheet, flags,\n                             units, parens_as_neg=True):\n    \n    conversion = cell_str.strip()\n\n    \n    if re.search(allregex.control_wrapping_regex, cell_str):\n        \n        stripped_cell = cell_str.strip()\n        mod_cell_str = stripped_cell[1:][:-1].strip()\n        neg_mult = False\n        \n        \n        if (stripped_cell[0] == '(' and stripped_cell[-1] == ')' and\n                re.search(allregex.contains_numerical_regex, mod_cell_str)):\n            \n            neg_mult = True\n        flagable.flag_change(flags, 'interpreted', position, worksheet,\n                            flagable.FLAGS['removed-wrapping'])\n        \n        converted_value = auto_convert_cell(flagable, mod_cell_str, position,\n                                            worksheet, flags, units)\n        neg_mult = neg_mult and check_cell_type(converted_value, get_cell_type(0))\n        if neg_mult and parens_as_neg:\n            flagable.flag_change(flags, 'interpreted', position, worksheet,\n                                 flagable.FLAGS['converted-wrapping-to-neg'])\n        return -converted_value if neg_mult else converted_value\n    \n    elif re.search(allregex.contains_numerical_regex, cell_str):\n        conversion = auto_convert_numeric_string_cell(flagable, conversion, position,\n                                                      worksheet, flags, units)\n    elif re.search(allregex.bool_regex, cell_str):\n        flagable.flag_change(flags, 'interpreted', position, worksheet,\n                             flagable.FLAGS['bool-to-int'])\n        conversion = 1 if re.search(allregex.true_bool_regex, cell_str) else 0\n\n    return conversion", "docstring": "Handles the string case of cell and attempts auto-conversion\nfor auto_convert_cell.\n\nArgs:\nparens_as_neg: Converts numerics surrounded by parens to negative values", "source": "juraj-google-style"}
{"code": "def deserialize_skycoord(d):\n    \n    if 'distance' in d:\n        args = (d['lon'], d['lat'], d['distance'])\n    else:\n        args = (d['lon'], d['lat'])\n\n    return coords.SkyCoord(\n        *args,\n        frame=d['frame'],\n        representation='spherical')", "docstring": "Deserializes a JSONified :obj:`astropy.coordinates.SkyCoord`.\n\nArgs:\nd (:obj:`dict`): A dictionary representation of a :obj:`SkyCoord` object.\n\nReturns:\nA :obj:`SkyCoord` object.", "source": "juraj-google-style"}
{"code": "def reset_sequence(self, topic):\n    if (topic in self.queues):\n        self.queues[topic].reset()", "docstring": "Reset the expected sequence number for a topic\n\nIf the topic is unknown, this does nothing.  This behaviour is\nuseful when you have wildcard topics that only create queues\nonce they receive the first message matching the topic.\n\nArgs:\ntopic (string): The topic to reset the packet queue on", "source": "codesearchnet"}
{"code": "def describe(self, req=None, resp=None, **kwargs):\n    description = {'params': OrderedDict([(name, param.describe()) for (name, param) in self.params.items()]), 'details': inspect.cleandoc((self.__class__.__doc__ or 'This resource does not have description yet')), 'name': self.__class__.__name__, 'methods': self.allowed_methods()}\n    if req:\n        description['path'] = req.path\n    description.update(**kwargs)\n    return description", "docstring": "Describe API resource using resource introspection.\n\nAdditional description on derrived resource class can be added using\nkeyword arguments and calling ``super().decribe()`` method call\nlike following:\n\n.. code-block:: python\n\nclass SomeResource(BaseResource):\ndef describe(req, resp, **kwargs):\nreturn super().describe(\nreq, resp, type='list', **kwargs\n)\n\nArgs:\nreq (falcon.Request): request object\nresp (falcon.Response): response object\nkwargs (dict): dictionary of values created from resource url\ntemplate\n\nReturns:\ndict: dictionary with resource descritpion information\n\n.. versionchanged:: 0.2.0\nThe `req` and `resp` parameters became optional to ease the\nimplementation of application-level documentation generators.", "source": "codesearchnet"}
{"code": "def call(self, inputs, state, **kwargs):\n    return self._call_wrapped_cell(inputs, state, cell_call_fn=self.cell.call, **kwargs)", "docstring": "Runs the RNN cell step computation.\n\nWhen `call` is being used, we assume that the wrapper object has been built,\nand therefore the wrapped cells has been built via its `build` method and\nits `call` method can be used directly.\n\nThis allows to use the wrapped cell and the non-wrapped cell equivalently\nwhen using `call` and `build`.\n\nArgs:\ninputs: A tensor with wrapped cell's input.\nstate: A tensor or tuple of tensors with wrapped cell's state.\n**kwargs: Additional arguments passed to the wrapped cell's `call`.\n\nReturns:\nA pair containing:\n\n- Output: A tensor with cell's output.\n- New state: A tensor or tuple of tensors with new wrapped cell's state.", "source": "github-repos"}
{"code": "def convert_to_beam_type(typ):\n    if (sys.version_info.major == 3 and sys.version_info.minor >= 10) and isinstance(typ, types.UnionType):\n        typ = typing.Union[typ]\n    if getattr(typ, '__module__', None) == 'typing':\n        typ = convert_typing_to_builtin(typ)\n    typ_module = getattr(typ, '__module__', None)\n    if isinstance(typ, typing.TypeVar):\n        if id(typ) not in _type_var_cache:\n            new_type_variable = typehints.TypeVariable(typ.__name__)\n            _type_var_cache[id(typ)] = new_type_variable\n            _type_var_cache[id(new_type_variable)] = typ\n        return _type_var_cache[id(typ)]\n    elif isinstance(typ, str):\n        _LOGGER.info('Converting string literal type hint to Any: \"%s\"', typ)\n        return typehints.Any\n    elif sys.version_info >= (3, 10) and isinstance(typ, typing.NewType):\n        _LOGGER.info('Converting NewType type hint to Any: \"%s\"', typ)\n        return typehints.Any\n    elif typ_module == 'apache_beam.typehints.native_type_compatibility' and getattr(typ, '__name__', typ.__origin__.__name__) == 'TypedWindowedValue':\n        pass\n    elif is_typeddict(typ):\n        return typehints.Dict[str, typehints.Any]\n    elif typ_module not in _CONVERTED_MODULES and (not is_builtin(typ)):\n        return typ\n    if typ_module == 'collections.abc' and getattr(typ, '__origin__', typ) not in _CONVERTED_COLLECTIONS:\n        return typ\n    type_map = [_TypeMapEntry(match=is_new_type, arity=0, beam_type=typehints.Any), _TypeMapEntry(match=is_forward_ref, arity=0, beam_type=typehints.Any), _TypeMapEntry(match=is_any, arity=0, beam_type=typehints.Any), _TypeMapEntry(match=_match_is_dict, arity=2, beam_type=typehints.Dict), _TypeMapEntry(match=_match_is_exactly_iterable, arity=1, beam_type=typehints.Iterable), _TypeMapEntry(match=_match_is_primitive(list), arity=1, beam_type=typehints.List), _TypeMapEntry(match=_match_is_primitive(frozenset), arity=1, beam_type=typehints.FrozenSet), _TypeMapEntry(match=_match_is_set, arity=1, beam_type=typehints.Set), _TypeMapEntry(match=match_is_named_tuple, arity=0, beam_type=typehints.Any), _TypeMapEntry(match=_match_is_primitive(tuple), arity=-1, beam_type=typehints.Tuple), _TypeMapEntry(match=_match_is_union, arity=-1, beam_type=typehints.Union), _TypeMapEntry(match=_match_issubclass(collections.abc.Generator), arity=3, beam_type=typehints.Generator), _TypeMapEntry(match=_match_issubclass(collections.abc.Iterator), arity=1, beam_type=typehints.Iterator), _TypeMapEntry(match=_match_is_exactly_collection, arity=1, beam_type=typehints.Collection), _TypeMapEntry(match=_match_issubclass(TypedWindowedValue), arity=1, beam_type=typehints.WindowedValue), _TypeMapEntry(match=_match_is_exactly_sequence, arity=1, beam_type=typehints.Sequence), _TypeMapEntry(match=_match_is_exactly_mapping, arity=2, beam_type=typehints.Mapping)]\n    matched_entry = next((entry for entry in type_map if entry.match(typ)), None)\n    if not matched_entry:\n        _LOGGER.info('Using Any for unsupported type: %s', typ)\n        return typehints.Any\n    args = _get_args(typ)\n    len_args = len(args)\n    if len_args == 0 and len_args != matched_entry.arity:\n        arity = matched_entry.arity\n        if _match_issubclass(typing.Tuple)(typ):\n            args = (typehints.TypeVariable('T'), Ellipsis)\n        elif _match_is_union(typ):\n            raise ValueError('Unsupported Union with no arguments.')\n        elif _match_issubclass(typing.Generator)(typ):\n            args = (typehints.TypeVariable('T_co'), type(None), type(None))\n        elif _match_issubclass(typing.Dict)(typ):\n            args = (typehints.TypeVariable('KT'), typehints.TypeVariable('VT'))\n        elif _match_issubclass(typing.Iterator)(typ) or _match_is_exactly_iterable(typ):\n            args = (typehints.TypeVariable('T_co'),)\n        else:\n            args = (typehints.TypeVariable('T'),) * arity\n    elif matched_entry.arity == -1:\n        arity = len_args\n    elif len_args == 1 and _safe_issubclass(getattr(typ, '__origin__', typ), collections.Counter):\n        args = (args[0], int)\n        len_args = 2\n        arity = matched_entry.arity\n    else:\n        arity = matched_entry.arity\n        if len_args != arity:\n            raise ValueError('expecting type %s to have arity %d, had arity %d instead' % (str(typ), arity, len_args))\n    typs = convert_to_beam_types(args)\n    if arity == 0:\n        return matched_entry.beam_type\n    elif arity == 1:\n        return matched_entry.beam_type[typs[0]]\n    else:\n        return matched_entry.beam_type[tuple(typs)]", "docstring": "Convert a given typing type to a Beam type.\n\nArgs:\ntyp (`typing.Union[type, str]`): typing type or string literal representing\na type.\n\nReturns:\ntype: The given type converted to a Beam type as far as we can do the\nconversion.\n\nRaises:\nValueError: The type was malformed.", "source": "github-repos"}
{"code": "def vmstat(stat):\n    \n    out = subprocess.check_output([\"vmstat\", \"-s\"])\n    stat = stat.encode(\"ascii\")\n    for line in out.split(b\"\\n\"):\n        line = line.strip()\n        if stat in line:\n            return int(line.split(b\" \")[0])\n    raise ValueError(\"Can't find {} in 'vmstat' output.\".format(stat))", "docstring": "Run vmstat and get a particular statistic.\n\nArgs:\nstat: The statistic that we are interested in retrieving.\n\nReturns:\nThe parsed output.", "source": "juraj-google-style"}
{"code": "def get_factors_iterative1(n):\n    \n\n    todo, res = [(n, 2, [])], []\n    while todo:\n        n, i, combi = todo.pop()\n        while i * i <= n:\n            if n % i == 0:\n                res += combi + [i, n\n                todo.append((n\n            i += 1\n    return res", "docstring": "[summary]\nComputes all factors of n.\nTranslated the function get_factors(...) in\na call-stack modell.\n\nArguments:\nn {[int]} -- [to analysed number]\n\nReturns:\n[list of lists] -- [all factors]", "source": "juraj-google-style"}
{"code": "def _create_make_unique(inputs):\n  \n  if inputs.shape.ndims != 2:\n    raise ValueError(\"Input of top_k_with_unique must be rank-2 \"\n                     \"but got: %s\" % inputs.shape)\n\n  height = inputs.shape[0]\n  width = inputs.shape[1]\n  zeros = tf.zeros([height, width], dtype=tf.int32)\n\n  \n  \n  log2_ceiling = int(math.ceil(math.log(int(width), 2)))\n  next_power_of_two = 1 << log2_ceiling\n  count_mask = ~(next_power_of_two - 1)\n  count_mask_r0 = tf.constant(count_mask)\n  count_mask_r2 = tf.fill([height, width], count_mask_r0)\n\n  \n  \n  \n  smallest_normal = 1 << 23\n  smallest_normal_r0 = tf.constant(smallest_normal, dtype=tf.int32)\n  smallest_normal_r2 = tf.fill([height, width], smallest_normal_r0)\n\n  \n  \n  low_bit_mask = ~(1 << 31)\n  low_bit_mask_r0 = tf.constant(low_bit_mask, dtype=tf.int32)\n  low_bit_mask_r2 = tf.fill([height, width], low_bit_mask_r0)\n\n  iota = tf.tile(tf.expand_dims(tf.range(width, dtype=tf.int32), 0),\n                 [height, 1])\n\n  \n  input_r2 = tf.bitcast(inputs, tf.int32)\n  abs_r2 = tf.bitwise.bitwise_and(input_r2, low_bit_mask_r2)\n  if_zero_r2 = tf.equal(abs_r2, zeros)\n  smallest_normal_preserving_sign_r2 = tf.bitwise.bitwise_or(\n      input_r2, smallest_normal_r2)\n  input_no_zeros_r2 = tf.where(\n      if_zero_r2, smallest_normal_preserving_sign_r2, input_r2)\n\n  \n  and_r2 = tf.bitwise.bitwise_and(input_no_zeros_r2, count_mask_r2)\n  or_r2 = tf.bitwise.bitwise_or(and_r2, iota)\n  return tf.bitcast(or_r2, tf.float32)", "docstring": "Replaces the lower bits of each element with iota.\n\nThe iota is used to derive the index, and also serves the purpose to\nmake each element unique to break ties.\n\nArgs:\ninputs: A tensor with rank of 2 and dtype of tf.float32.\n[batch_size, original_size].\n\nReturns:\nA tensor after element wise transformation, with dtype the same as inputs.\n[batch_size, original_size].\n\nRaises:\nValueError: If the rank of the input tensor does not equal 2.", "source": "juraj-google-style"}
{"code": "def from_rfc3339(rfc3339_text, with_nanos=False):\n    timestamp = strict_rfc3339.rfc3339_to_timestamp(rfc3339_text)\n    result = datetime.datetime.utcfromtimestamp(timestamp)\n    if with_nanos:\n        return (result, int(((timestamp - int(timestamp)) * 1000000000.0)))\n    else:\n        return result", "docstring": "Parse a RFC 3339 date string format to datetime.date.\n\nExample of accepted format: '1972-01-01T10:00:20.021-05:00'\n\n- By default, the result is a datetime.datetime\n- If with_nanos is true, the result is a 2-tuple, (datetime.datetime,\nnanos), where the second field represents the possible nanosecond\nresolution component of the second field.\n\nArgs:\nrfc3339_text (string): An rfc3339 formatted date string\nwith_nanos (bool): Determines if nanoseconds should be parsed from the\nstring\n\nRaises:\nValueError: if ``rfc3339_text`` is invalid\n\nReturns:\n:class:`datetime.datetime`: when with_nanos is False\ntuple(:class:`datetime.datetime`, int): when with_nanos is True", "source": "codesearchnet"}
{"code": "def fn(x: int) -> None:\n    pass", "docstring": "Test function\n\nArgs:\nx: The first input", "source": "github-repos"}
{"code": "def assert_present(self, selector, testid=None, **kwargs):\n        \n        self.info_log(\n            \"Assert present selector(%s) testid(%s)\" % (selector, testid)\n        )\n\n        wait_until_present = kwargs.get(\n            'wait_until_present',\n            BROME_CONFIG['proxy_driver']['wait_until_present_before_assert_present']  \n        )\n        self.debug_log(\n            \"effective wait_until_present: %s\" % wait_until_present\n        )\n\n        if wait_until_present:\n            element = self.wait_until_present(selector, raise_exception=False)\n        else:\n            element = self.is_present(selector)\n\n        if element:\n            if testid is not None:\n                self.create_test_result(testid, True)\n\n            return True\n        else:\n            if testid is not None:\n                self.create_test_result(testid, False)\n\n            return False", "docstring": "Assert that the element is present in the dom\n\nArgs:\nselector (str): the selector used to find the element\ntest_id (str): the test_id or a str\n\nKwargs:\nwait_until_present (bool)\n\nReturns:\nbool: True is the assertion succeed; False otherwise.", "source": "juraj-google-style"}
{"code": "def bootstrap_results(self, state):\n\n    def loss():\n        q = self._flattened_variational_distribution()\n        samples = q.sample(self.train_batch_size)\n        return tf.reduce_mean(input_tensor=(q.log_prob(samples) - self._flattened_target_log_prob(samples)), axis=(- 1))\n    lr = tf.convert_to_tensor(value=self.learning_rate, dtype=self._dtype)\n    dtype = lr.dtype\n    learning_rate = tf.compat.v2.optimizers.schedules.PiecewiseConstantDecay(list((self.num_train_steps * np.array([0.2, 0.8]).astype(dtype.as_numpy_dtype()))), [lr, (lr * 0.1), (lr * 0.01)])\n    opt = tf.compat.v2.optimizers.Adam(learning_rate)\n\n    @tf.function(autograph=False)\n    def train_step():\n        with tf.GradientTape() as tape:\n            loss_val = loss()\n        vals = tape.watched_variables()\n        grads = tape.gradient(loss_val, vals)\n        grads_and_vals = list(zip(grads, vals))\n        opt.apply_gradients(grads_and_vals)\n        return loss_val\n    for step in range(self.num_train_steps):\n        loss_val = train_step()\n        tf.debugging.assert_all_finite(loss_val, 'NeuTra loss is NaN at step {}'.format(step))\n        if self.train_debug_fn:\n            self.train_debug_fn(self, step, loss_val)\n    state_parts = tf.nest.flatten(state)\n    flat_state_shapes = tf.nest.flatten(self.state_shape)\n    batch_shape = tf.shape(input=state_parts[0])[:(- flat_state_shapes[0].ndims)]\n    return self._kernel.bootstrap_results(self._flattened_variational_distribution().sample(batch_shape, seed=self.seed))", "docstring": "Trains the bijector and creates initial `previous_kernel_results`.\n\nThe supplied `state` is only used to determine the number of chains to run\nin parallel_iterations\n\nArgs:\nstate: `Tensor` or Python `list` of `Tensor`s representing the initial\nstate(s) of the Markov chain(s). The first `r` dimensions index\nindependent chains, `r = tf.rank(target_log_prob_fn(*state))`.\n\nReturns:\nkernel_results: Instance of\n`UncalibratedHamiltonianMonteCarloKernelResults` inside\n`MetropolisHastingsResults` inside `TransformedTransitionKernelResults`\ninside `SimpleStepSizeAdaptationResults`.", "source": "codesearchnet"}
{"code": "def __init__(self, tokenizer=None, trie=None):\n        \n        pyee.EventEmitter.__init__(self)\n        self.tokenizer = tokenizer or EnglishTokenizer()\n        self.trie = trie or Trie()\n        self.regular_expressions_entities = []\n        self._regex_strings = set()\n        self.tagger = EntityTagger(self.trie, self.tokenizer, self.regular_expressions_entities)\n        self.intent_parsers = []", "docstring": "Initialize the IntentDeterminationEngine\n\nArgs:\ntokenizer(tokenizer) : tokenizer used to break up spoken text\nexample EnglishTokenizer()\ntrie(Trie): tree of matches to Entites", "source": "juraj-google-style"}
{"code": "def _GetNetworkInfo(self, signatures_key):\n    network_info = {}\n    for category in signatures_key.GetSubkeys():\n        for signature in category.GetSubkeys():\n            profile_guid_value = signature.GetValueByName('ProfileGuid')\n            if profile_guid_value:\n                profile_guid = profile_guid_value.GetDataAsObject()\n            else:\n                continue\n            default_gateway_mac_value = signature.GetValueByName('DefaultGatewayMac')\n            if default_gateway_mac_value:\n                default_gateway_mac = ':'.join(['{0:02x}'.format(octet) for octet in bytearray(default_gateway_mac_value.data)])\n            else:\n                default_gateway_mac = None\n            dns_suffix_value = signature.GetValueByName('DnsSuffix')\n            if dns_suffix_value:\n                dns_suffix = dns_suffix_value.GetDataAsObject()\n            else:\n                dns_suffix = None\n            network_info[profile_guid] = (default_gateway_mac, dns_suffix)\n    return network_info", "docstring": "Retrieves the network info within the signatures subkey.\n\nArgs:\nsignatures_key (dfwinreg.WinRegistryKey): a Windows Registry key.\n\nReturns:\ndict[str, tuple]: a tuple of default_gateway_mac and dns_suffix per\nprofile identifier (GUID).", "source": "codesearchnet"}
{"code": "def get_op_consumers(self, src_op_name):\n    return self._op_consumers[src_op_name]", "docstring": "Get all the downstream consumers of this op.\n\nOnly data (non-control) edges are tracked.\n\nArgs:\nsrc_op_name: Name of the op providing the tensor being consumed.\n\nReturns:\nA list of (src_slot, dst_op_name, dst_slot) tuples. In each item of\nthe list:\nsrc_slot: 0-based output slot of the op of which the output tensor\nis being consumed.\ndst_op_name: Name of the consuming op (e.g., \"Conv2D_3/BiasAdd\")\ndst_slot: 0-based input slot of the consuming op that receives\nthe tensor from this op.", "source": "github-repos"}
{"code": "def loads(string, triples=False, cls=PENMANCodec, **kwargs):\n    \n    codec = cls(**kwargs)\n    return list(codec.iterdecode(string, triples=triples))", "docstring": "Deserialize a list of PENMAN-encoded graphs from *string*.\n\nArgs:\nstring: a string containing graph data\ntriples: if True, read graphs as triples instead of as PENMAN\ncls: serialization codec class\nkwargs: keyword arguments passed to the constructor of *cls*\nReturns:\na list of Graph objects", "source": "juraj-google-style"}
{"code": "def _build(self, ids):\n    if (self._existing_vocab is None):\n        if (self.EMBEDDINGS not in self._initializers):\n            self._initializers[self.EMBEDDINGS] = tf.initializers.random_normal()\n        self._embeddings = tf.get_variable('embeddings', shape=[self._vocab_size, self._embed_dim], dtype=tf.float32, initializer=self._initializers[self.EMBEDDINGS], partitioner=self._partitioners.get(self.EMBEDDINGS, None), regularizer=self._regularizers.get(self.EMBEDDINGS, None), trainable=self._trainable)\n    else:\n        self._embeddings = tf.get_variable('embeddings', dtype=tf.float32, initializer=self._existing_vocab, regularizer=self._regularizers.get(self.EMBEDDINGS, None), trainable=self._trainable)\n    if self._densify_gradients:\n        embeddings = util.convert_gradient_to_tensor(self._embeddings)\n    else:\n        embeddings = self._embeddings\n    return tf.nn.embedding_lookup(embeddings, ids, name='embedding_lookup')", "docstring": "Lookup embeddings.\n\nLooks up an embedding vector for each value in `ids`. All ids must be within\n[0, vocab_size), else an `InvalidArgumentError` is raised at runtime.\n\nArgs:\nids: Tensor of dtype int64.\n\nReturns:\nTensor of tf.shape(ids) + [embedding_dim] and dtype float32.", "source": "codesearchnet"}
{"code": "def Detect(self, str_in):\n    \n\n    components = SplitIntoComponents(str_in)\n\n    extracted_paths = set()\n    for extractor in self.extractors:\n      extracted_paths.update(extractor.Extract(components))\n\n    results = set(extracted_paths)\n\n    for post_processor in self.post_processors:\n      processed_results = set()\n      for result in results:\n        processed_results.update(post_processor.Process(result))\n      results = processed_results\n\n    return results", "docstring": "Detects paths in a given string.\n\nArgs:\nstr_in: String where the paths should be detected.\n\nReturns:\nA list of paths (as strings) detected inside the given string.", "source": "juraj-google-style"}
{"code": "def get_target(self, target):\n    if (target not in self._target_cache):\n        self._target_cache[target] = self._get_target(target)\n    return self._target_cache[target]", "docstring": "Get the result of _get_target, cache it and return it.\n\nArgs:\ntarget (str): target to find.\n\nReturns:\nPackage/Module: package containing target or corresponding module.", "source": "codesearchnet"}
{"code": "def GetUserinfo(credentials, http=None):\n    http = (http or httplib2.Http())\n    url = _GetUserinfoUrl(credentials)\n    (response, content) = http.request(url)\n    if (response.status == http_client.BAD_REQUEST):\n        credentials.refresh(http)\n        url = _GetUserinfoUrl(credentials)\n        (response, content) = http.request(url)\n    return json.loads((content or '{}'))", "docstring": "Get the userinfo associated with the given credentials.\n\nThis is dependent on the token having either the userinfo.email or\nuserinfo.profile scope for the given token.\n\nArgs:\ncredentials: (oauth2client.client.Credentials) incoming credentials\nhttp: (httplib2.Http, optional) http instance to use\n\nReturns:\nThe email address for this token, or None if the required scopes\naren't available.", "source": "codesearchnet"}
{"code": "def moment_by_moment_schedule(device: Device, circuit: Circuit):\n    schedule = Schedule(device)\n    t = Timestamp()\n    for moment in circuit:\n        if (not moment.operations):\n            continue\n        for op in moment.operations:\n            scheduled_op = ScheduledOperation.op_at_on(op, t, device)\n            schedule.include(scheduled_operation=scheduled_op)\n            device.validate_scheduled_operation(schedule, scheduled_op)\n        max_duration = max((device.duration_of(op) for op in moment.operations))\n        t += max_duration\n    return schedule", "docstring": "Returns a schedule aligned with the moment structure of the Circuit.\n\nThis method attempts to create a schedule in which each moment of a circuit\nis scheduled starting at the same time. Given the constraints of the\ngiven device, such a schedule may not be possible, in this case the\nthe method will raise a ValueError with a description of the conflict.\n\nThe schedule that is produced will take each moments and schedule the\noperations in this moment in a time slice of length equal to the maximum\ntime of an operation in the moment.\n\nReturns:\nA Schedule for the circuit.\n\nRaises:\nValueError: if the scheduling cannot be done.", "source": "codesearchnet"}
{"code": "def retry_api_check(exception):\n    if isinstance(exception, apiclient.errors.HttpError):\n        if (exception.resp.status in TRANSIENT_HTTP_ERROR_CODES):\n            _print_error('Retrying...')\n            return True\n    if isinstance(exception, socket.error):\n        if (exception.errno in TRANSIENT_SOCKET_ERROR_CODES):\n            _print_error('Retrying...')\n            return True\n    if isinstance(exception, oauth2client.client.AccessTokenRefreshError):\n        _print_error('Retrying...')\n        return True\n    if isinstance(exception, SSLError):\n        _print_error('Retrying...')\n        return True\n    if isinstance(exception, ServerNotFoundError):\n        _print_error('Retrying...')\n        return True\n    return False", "docstring": "Return True if we should retry. False otherwise.\n\nArgs:\nexception: An exception to test for transience.\n\nReturns:\nTrue if we should retry. False otherwise.", "source": "codesearchnet"}
{"code": "def pickle_load(cls, filepath):\n        \n        if os.path.isdir(filepath):\n            \n            for dirpath, dirnames, filenames in os.walk(filepath):\n                fnames = [f for f in filenames if f == cls.PICKLE_FNAME]\n                if fnames:\n                    if len(fnames) == 1:\n                        filepath = os.path.join(dirpath, fnames[0])\n                        break  \n                    else:\n                        err_msg = \"Found multiple databases:\\n %s\" % str(fnames)\n                        raise RuntimeError(err_msg)\n            else:\n                err_msg = \"Cannot find %s inside directory %s\" % (cls.PICKLE_FNAME, filepath)\n                raise ValueError(err_msg)\n\n        with open(filepath, \"rb\") as fh:\n            new = pickle.load(fh)\n\n        \n        \n        \n        from .flows import Flow\n        flow_workdirs, new.flows = new.flows, []\n        for flow in map(Flow.pickle_load, flow_workdirs):\n            new.add_flow(flow)\n\n        return new", "docstring": "Loads the object from a pickle file.\n\nArgs:\nfilepath: Filename or directory name. It filepath is a directory, we\nscan the directory tree starting from filepath and we\nread the first pickle database. Raise RuntimeError if multiple\ndatabases are found.", "source": "juraj-google-style"}
{"code": "def PublishEvent(cls, event_name, msg, token=None):\n    cls.PublishMultipleEvents({event_name: [msg]}, token=token)", "docstring": "Publish the message into all listeners of the event.\n\nWe send the message to all event handlers which contain this\nstring in their EVENT static member. This allows the event to be\nsent to multiple interested listeners.\n\nArgs:\nevent_name: An event name.\nmsg: The message to send to the event handler.\ntoken: ACL token.\n\nRaises:\nValueError: If the message is invalid. The message must be a Semantic\nValue (instance of RDFValue) or a full GrrMessage.", "source": "codesearchnet"}
{"code": "def reshard(self, checkpoint_values: List[tensor.Tensor], shape_and_slice_spec: List[str]) -> tensor.Tensor:\n    del shape_and_slice_spec\n    if len(checkpoint_values) != 1:\n        raise ValueError('Default reshard expects a single checkpoint value.')\n    return checkpoint_values[0]", "docstring": "Reshards the checkpoint values as read from the checkpoint file.\n\nOverride this to reshard/modify the restored values\nArgs:\ncheckpoint_values: The values returned by the restore op, as read from\nfile.\nshape_and_slice_spec: The shape and slice spec required by the caller.\n\nReturns:\nList of restored Tensor values after being resharded.", "source": "github-repos"}
{"code": "def consume(self, msg):\n    msg['body'] = crypto.sign(msg['body'], **self.hub.config)\n    super(SigningRelayConsumer, self).consume(msg)", "docstring": "Sign the message prior to sending the message.\n\nArgs:\nmsg (dict): The message to sign and relay.", "source": "codesearchnet"}
{"code": "def Validate(self, value):\n    if (value is None):\n        return\n    if (not isinstance(value, self.rdfclass)):\n        try:\n            return self.rdfclass(value)\n        except rdfvalue.InitializeError:\n            raise TypeValueError(('Value for arg %s should be an %s' % (self.name, self.rdfclass.__name__)))\n    return value", "docstring": "Validate an RDFValue instance.\n\nArgs:\nvalue: An RDFValue instance or something which may be used to instantiate\nthe correct instance.\n\nRaises:\nTypeValueError: If the value is not a valid RDFValue instance or the\nrequired type.\n\nReturns:\nA Valid RDFValue instance.", "source": "codesearchnet"}
{"code": "def compile_state_invariants(self,\n            state: Sequence[tf.Tensor]) -> List[TensorFluent]:\n        \n        scope = self.state_invariant_scope(state)\n        invariants = []\n        with self.graph.as_default():\n            with tf.name_scope('state_invariants'):\n                for p in self.rddl.domain.invariants:\n                    fluent = self._compile_expression(p, scope)\n                    invariants.append(fluent)\n                return invariants", "docstring": "Compiles the state invarints given current `state` fluents.\n\nArgs:\nstate (Sequence[tf.Tensor]): The current state fluents.\n\nReturns:\nA list of :obj:`rddl2tf.fluent.TensorFluent`.", "source": "juraj-google-style"}
{"code": "def labels2onehot(labels: [List[str], List[List[str]], np.ndarray], classes: [list, np.ndarray]) -> np.ndarray:\n    n_classes = len(classes)\n    y = []\n    for sample in labels:\n        curr = np.zeros(n_classes)\n        if isinstance(sample, list):\n            for intent in sample:\n                if (intent not in classes):\n                    log.warning('Unknown intent {} detected. Assigning no class'.format(intent))\n                else:\n                    curr[np.where((np.array(classes) == intent))[0]] = 1\n        else:\n            curr[np.where((np.array(classes) == sample))[0]] = 1\n        y.append(curr)\n    y = np.asarray(y)\n    return y", "docstring": "Convert labels to one-hot vectors for multi-class multi-label classification\n\nArgs:\nlabels: list of samples where each sample is a class or a list of classes which sample belongs with\nclasses: array of classes' names\n\nReturns:\n2d array with one-hot representation of given samples", "source": "codesearchnet"}
{"code": "def db_wb020(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `db_wb020`'.format(value))\n\n        self._db_wb020 = value", "docstring": "Corresponds to IDD Field `db_wb020`\nmean coincident dry-bulb temperature to\nWet-bulb temperature corresponding to 2.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `db_wb020`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def _log_normalization(self, name='log_normalization'):\n    with tf.name_scope((name or 'log_normalization_lkj')):\n        logpi = np.log(np.pi)\n        ans = tf.zeros_like(self.concentration)\n        for k in range(1, self.dimension):\n            ans += (logpi * (k / 2.0))\n            ans += tf.math.lgamma((self.concentration + (((self.dimension - 1) - k) / 2.0)))\n            ans -= tf.math.lgamma((self.concentration + ((self.dimension - 1) / 2.0)))\n        return ans", "docstring": "Returns the log normalization of an LKJ distribution.\n\nArgs:\nname: Python `str` name prefixed to Ops created by this function.\n\nReturns:\nlog_z: A Tensor of the same shape and dtype as `concentration`, containing\nthe corresponding log normalizers.", "source": "codesearchnet"}
{"code": "def add_behaviour(self, behaviour, template=None):\n        \n        behaviour.set_agent(self)\n        if issubclass(type(behaviour), FSMBehaviour):\n            for _, state in behaviour.get_states().items():\n                state.set_agent(self)\n        behaviour.set_template(template)\n        self.behaviours.append(behaviour)\n        if self.is_alive():\n            behaviour.start()", "docstring": "Adds and starts a behaviour to the agent.\nIf template is not None it is used to match\nnew messages and deliver them to the behaviour.\n\nArgs:\nbehaviour (spade.behaviour.CyclicBehaviour): the behaviour to be started\ntemplate (spade.template.Template, optional): the template to match messages with (Default value = None)", "source": "juraj-google-style"}
{"code": "def train_on_batch(self, data: List[Iterable], labels: Iterable[list]) -> None:\n        \n        X, Y = self._transform_batch(data, labels)\n        self.model_.train_on_batch(X, Y)", "docstring": "Trains model on a single batch\n\nArgs:\ndata: a batch of word sequences\nlabels: a batch of correct tag sequences\nReturns:\nthe trained model", "source": "juraj-google-style"}
{"code": "def qubo_circuit(\n        graph: nx.Graph,\n        steps: int,\n        beta: Sequence,\n        gamma: Sequence) -> Circuit:\n    \n\n    qubits = list(graph.nodes())\n\n    \n    circ = Circuit()\n    for q0 in qubits:\n        circ += H(q0)\n\n    \n    for p in range(0, steps):\n\n        \n        for q0, q1 in graph.edges():\n            weight = graph[q0][q1].get('weight', 1.0)\n            \n            circ += ZZ(-weight * gamma[p] / np.pi, q0, q1)\n\n        for q0 in qubits:\n            node_weight = graph.nodes[q0].get('weight', None)\n            if node_weight is not None:\n                circ += RZ(node_weight, q0)\n\n        \n        for q0 in qubits:\n            circ += RX(beta[p], q0)\n\n    return circ", "docstring": "A QAOA circuit for the Quadratic Unconstrained Binary Optimization\nproblem (i.e. an Ising model).\n\nArgs:\ngraph : a networkx graph instance with optional edge and node weights\nsteps : number of QAOA steps\nbeta  : driver parameters (One per step)\ngamma : cost parameters (One per step)", "source": "juraj-google-style"}
{"code": "def support_set(self):\n    roots = set()\n    if self.has_attr():\n        roots.update(self.parent.support_set)\n    elif self.has_subscript():\n        roots.update(self.parent.support_set)\n        roots.update(self.qn[1].support_set)\n    else:\n        roots.add(self)\n    return roots", "docstring": "Returns the set of simple symbols that this QN relies on.\n\nThis would be the smallest set of symbols necessary for the QN to\nstatically resolve (assuming properties and index ranges are verified\nat runtime).\n\nExamples:\n'a.b' has only one support symbol, 'a'\n'a[i]' has two support symbols, 'a' and 'i'", "source": "github-repos"}
{"code": "def set_sleep_timer(self, sleep_time_seconds):\n        \n        \n        \n        \n        \n        try:\n            if sleep_time_seconds is None:\n                sleep_time = ''\n            else:\n                sleep_time = format(\n                    datetime.timedelta(seconds=int(sleep_time_seconds))\n                )\n            self.avTransport.ConfigureSleepTimer([\n                ('InstanceID', 0),\n                ('NewSleepTimerDuration', sleep_time),\n            ])\n        except SoCoUPnPException as err:\n            if 'Error 402 received' in str(err):\n                raise ValueError('invalid sleep_time_seconds, must be integer \\\n                    value between 0 and 86399 inclusive or None')\n            raise\n        except ValueError:\n            raise ValueError('invalid sleep_time_seconds, must be integer \\\n                value between 0 and 86399 inclusive or None')", "docstring": "Sets the sleep timer.\n\nArgs:\nsleep_time_seconds (int or NoneType): How long to wait before\nturning off speaker in seconds, None to cancel a sleep timer.\nMaximum value of 86399\n\nRaises:\nSoCoException: Upon errors interacting with Sonos controller\nValueError: Argument/Syntax errors", "source": "juraj-google-style"}
{"code": "def save(self, path):\n    data = self.encode()\n    with open(path, 'wb') as out:\n        out.write(data)", "docstring": "Save a binary copy of this report\n\nArgs:\npath (string): The path where we should save the binary copy of the report", "source": "codesearchnet"}
{"code": "def debase64(byte_str):\n    \n    \n    if isinstance(byte_str, str) and not PYTHON2:\n        byte_str = bytes(byte_str, 'utf-8')\n    return base64.b64decode(byte_str)", "docstring": "Decode base64 encoded bytes/strings.\n\nArgs:\n- ``byte_str``:  The string or bytes to base64 encode.\n\nReturns:\n- decoded string as type str for python2 and type byte for python3.", "source": "juraj-google-style"}
{"code": "def validate_seeded_answers_simple(answers, options, algo):\n    \n    seen_options = {}\n    for answer in answers:\n        if answer:\n            key = options[answer['answer']].get('text')\n            if options[answer['answer']].get('image_url'):\n                key += options[answer['answer']].get('image_url')\n            seen_options.setdefault(key, 0)\n            seen_options[key] += 1\n\n    missing_options = []\n    index = 1\n    for option in options:\n        key = option.get('text') + option.get('image_url') if option.get('image_url') else option.get('text')\n        if option.get('text') != 'n/a':\n            if seen_options.get(key, 0) == 0:\n                missing_options.append(_('Option ') + str(index))\n            index += 1\n\n    if missing_options:\n        return {'seed_error': _('Missing option seed(s): ') + ', '.join(missing_options)}\n\n    return None", "docstring": "This validator checks if the answers includes all possible options\n\nArgs:\nanswers (str): the answers to be checked\noptions (dict): all options that should exist in the answers\nalgo (str): selection algorithm\n\nReturns:\nNone if everything is good. Otherwise, the missing option error message.", "source": "juraj-google-style"}
{"code": "def block_matrix(A, B, C, D):\n    return vstackm((hstackm((A, B)), hstackm((C, D))))", "docstring": "r\"\"\"Generate the operator matrix with quadrants\n\n.. math::\n\n\\begin{pmatrix} A B \\\\ C D \\end{pmatrix}\n\nArgs:\nA (Matrix): Matrix of shape ``(n, m)``\nB (Matrix): Matrix of shape ``(n, k)``\nC (Matrix): Matrix of shape ``(l, m)``\nD (Matrix): Matrix of shape ``(l, k)``\n\nReturns:\nMatrix: The combined block matrix ``[[A, B], [C, D]]``.", "source": "codesearchnet"}
{"code": "def _get_elmt_amt_in_rxt(self, rxt):\n    return sum([rxt.get_el_amount(e) for e in self.pd.elements])", "docstring": "Computes total number of atoms in a reaction formula for elements\nnot in external reservoir. This method is used in the calculation\nof reaction energy per mol of reaction formula.\n\nArgs:\nrxt (Reaction): a reaction.\n\nReturns:\nTotal number of atoms for non_reservoir elements.", "source": "codesearchnet"}
{"code": "def set_size(a, validate_indices=True):\n    a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name='a')\n    if not isinstance(a, sparse_tensor.SparseTensor):\n        raise TypeError('Expected `SparseTensor`, got %s.' % a)\n    if a.values.dtype.base_dtype not in _VALID_DTYPES:\n        raise TypeError(f'Invalid dtype `{a.values.dtype}` not in supported dtypes: `{_VALID_DTYPES}`.')\n    return gen_set_ops.set_size(a.indices, a.values, a.dense_shape, validate_indices)", "docstring": "Compute number of unique elements along last dimension of `a`.\n\nArgs:\na: `SparseTensor`, with indices sorted in row-major order.\nvalidate_indices: Whether to validate the order and range of sparse indices\nin `a`. Note that setting this to `false` allows for undefined behavior\nwhen calling this function with invalid indices.\n\nReturns:\n`int32` `Tensor` of set sizes. For `a` ranked `n`, this is a `Tensor` with\nrank `n-1`, and the same 1st `n-1` dimensions as `a`. Each value is the\nnumber of unique elements in the corresponding `[0...n-1]` dimension of `a`.\n\nRaises:\nTypeError: If `a` is an invalid types.", "source": "github-repos"}
{"code": "def search(self, terms):\n        \n        messages = self._connection.get(\"search/%s\" % urllib.quote_plus(terms), key=\"messages\")\n        if messages:\n            messages = [Message(self, message) for message in messages]\n        return messages", "docstring": "Search transcripts.\n\nArgs:\nterms (str): Terms for search\n\nReturns:\narray. Messages", "source": "juraj-google-style"}
{"code": "def iuptri(items, diago=True, with_inds=False):\n    \n    for (ii, item1) in enumerate(items):\n        for (jj, item2) in enumerate(items):\n            do_yield = (jj >= ii) if diago else (jj > ii)\n            if do_yield:\n                if with_inds:\n                    yield (ii, jj), (item1, item2)\n                else:\n                    yield item1, item2", "docstring": "A generator that yields the upper triangle of the matrix (items x items)\n\nArgs:\nitems: Iterable object with elements [e0, e1, ...]\ndiago: False if diagonal matrix elements should be excluded\nwith_inds: If True, (i,j) (e_i, e_j) is returned else (e_i, e_j)\n\n>>> for (ij, mate) in iuptri([0,1], with_inds=True):\n...     print(\"ij:\", ij, \"mate:\", mate)\nij: (0, 0) mate: (0, 0)\nij: (0, 1) mate: (0, 1)\nij: (1, 1) mate: (1, 1)", "source": "juraj-google-style"}
{"code": "def __init__(self, hostname, auth=AnonymousAuth()):\n        \n        self._hostname = self._construct_full_hostname(hostname)\n        _logger.debug(\"Hostname is %s\" % self._hostname)\n        self._auth_info = auth", "docstring": "Initializer for the base class.\n\nSave the hostname to use for all requests as well as any\nauthentication info needed.\n\nArgs:\nhostname: The host for the requests.\nauth: The authentication info needed for any requests.", "source": "juraj-google-style"}
{"code": "def power(maf=0.5,beta=0.1, N=100, cutoff=5e-8):\n\t\n\t\n\t\n\tassert maf>=0.0 and maf<=0.5, \"maf needs to be between 0.0 and 0.5, got %f\" % maf\n\tif beta<0.0:\n\t\tbeta=-beta\n\tstd_beta = 1.0/np.sqrt(N*(2.0 * maf*(1.0-maf)))\n\tnon_centrality = beta\n\tbeta_samples = np.random.normal(loc=non_centrality, scale=std_beta)\n\tn_grid = 100000\n\tbeta_in = np.arange(0.5/(n_grid+1.0),(n_grid-0.5)/(n_grid+1.0),1.0/(n_grid+1.0)) \n\tbeta_theoretical = ((st.norm.isf(beta_in)* std_beta) + non_centrality)\n\tpvals = st.chi2.sf( (beta_theoretical/std_beta)*(beta_theoretical/std_beta) ,1.0) \n\t\n\tpower = (pvals<cutoff).mean()\n\treturn power, pvals", "docstring": "estimate power for a given allele frequency, effect size beta and sample size N\n\nAssumption:\n\nz-score = beta_ML distributed as p(0) = N(0,1.0(maf*(1-maf)*N))) under the null hypothesis\nthe actual beta_ML is distributed as p(alt) = N( beta , 1.0/(maf*(1-maf)N) )\n\n\n\nArguments:\nmaf:\tminor allele frequency of the SNP\nbeta:\teffect size of the SNP\nN:\t\tsample size (number of individuals)\nReturns:\npower:\tprobability to detect a SNP in that study with the given parameters", "source": "juraj-google-style"}
{"code": "def price(self, valuation_date, market, model=None, pricing_context=None, name=None):\n    model = model or rc.InterestRateModelType.LOGNORMAL_RATE\n    name = name or self._name + '_price'\n    with tf.name_scope(name):\n        swap_annuity = self._swap.annuity(valuation_date, market, model)\n        forward_swap_rate = self._swap.par_rate(valuation_date, market, model)\n        strike = self._swap.fixed_rate\n        expiry_time = dates.daycount_actual_365_fixed(start_date=valuation_date, end_date=self._expiry_date, dtype=self._dtype)\n        if model == rc.InterestRateModelType.LOGNORMAL_RATE:\n            option_value = self._price_lognormal_rate(market, pricing_context, forward_swap_rate, strike, expiry_time)\n        else:\n            raise ValueError('Unsupported model.')\n        return self._swap.notional[-1] * swap_annuity * option_value", "docstring": "Returns the present value of the swaption on the valuation date.\n\nArgs:\nvaluation_date: A scalar `DateTensor` specifying the date on which\nvaluation is being desired.\nmarket: A namedtuple of type `InterestRateMarket` which contains the\nnecessary information for pricing the FRA instrument.\nmodel: An optional input of type `InterestRateModelType` to specify which\nmodel to use for pricing.\nDefault value: `None` in which case LOGNORMAL_RATE model is used.\npricing_context: An optional input to provide additional parameters (such\nas model parameters) relevant for pricing.\nname: Python str. The name to give to the ops created by this function.\nDefault value: `None` which maps to 'price'.\n\nReturns:\nA Rank 1 `Tensor` of real type containing the modeled price of each IRS\ncontract based on the input market data.\n\nRaises:\nValueError: If an unsupported model is supplied to the function.", "source": "github-repos"}
{"code": "def ingress(self, envelope, http_headers, operation):\n    \n    if self._logger.isEnabledFor(logging.DEBUG):\n      self._logger.debug(_RESPONSE_XML_LOG_LINE,\n                         etree.tostring(envelope, pretty_print=True))\n\n    if self._logger.isEnabledFor(logging.WARN):\n      warn_data = {}\n      header = envelope.find(_HEADER_XPATH)\n      fault = envelope.find(_FAULT_XPATH)\n      if fault is not None:\n        warn_data['faultMessage'] = fault.find('faultstring').text\n\n        if header is not None:\n          header_data = {\n              re.sub(_REMOVE_NS_REGEXP, '', child.tag): child.text\n              for child in header[0]}\n          warn_data.update(header_data)\n\n        if 'serviceName' not in warn_data:\n          warn_data['serviceName'] = operation.binding.wsdl.services.keys()[0]\n\n        if 'methodName' not in warn_data:\n          warn_data['methodName'] = operation.name\n\n        self._logger.warn('Error summary: %s', warn_data)\n\n    return envelope, http_headers", "docstring": "Overrides the ingress function for response logging.\n\nArgs:\nenvelope: An Element with the SOAP request data.\nhttp_headers: A dict of the current http headers.\noperation: The SoapOperation instance.\n\nReturns:\nA tuple of the envelope and headers.", "source": "juraj-google-style"}
{"code": "def add_sample_tag_value(self, tag_name, new_sample_values):\n        \n        if tag_name in self.format_tags:\n            msg = \"New format value [{}] already exists.\".format(tag_name)\n            raise KeyError(msg)\n\n        if not self._samples_match(new_sample_values):\n            raise KeyError(\"Sample name values must match \"\n                           \"existing sample names\")\n        for sample in self.sample_tag_values.keys():\n            value = str(new_sample_values[sample])\n            self.sample_tag_values[sample][tag_name] = value", "docstring": "Appends a new format tag-value for all samples.\n\nArgs:\ntag_name: string tag name; must not already exist\nnew_sample\n\nRaises:\nKeyError: if tag_name to be added already exists", "source": "juraj-google-style"}
{"code": "def update_restore_inputs(self, checkpoint_key: str, shape_and_slice_spec: str) -> tuple[Sequence[str], Sequence[str]]:\n    keys = []\n    slices = []\n    logging.info('Updating restore v2 inputs for %s: %s', checkpoint_key, shape_and_slice_spec)\n    for i, layout in enumerate(self._to_shard_layout):\n        sub_checkpoint_key = checkpoint_key.replace(self._main_checkpoint_name, self._checkpoint_local_names[i])\n        logging.info('Will read sub key %s: %s', sub_checkpoint_key, layout.unsharded_shape)\n        keys.append(sub_checkpoint_key)\n        slices.append(_shard_info_str(layout.unsharded_shape, trackable_base.ShardInfo(offset=[0, 0], shape=layout.unsharded_shape)))\n    return (keys, slices)", "docstring": "Updates checkpoint key and slice spec acorrding to the resharding plan.\n\nArgs:\ncheckpoint_key: The input checkpoint key to be read.\nshape_and_slice_spec: The shape and slice spec of the checkpoint key to be\nread.\n\nReturns:\nA tuple of (keys, slices) that should be passed to restore_v2 inorder to\nreshard according to the resharding plan. The restored tensors from\nrestore_v2 op will usually be passed to reshard method of this class to\nget the final resharded value.", "source": "github-repos"}
{"code": "def eulers_totient(n):\n\n    \n\n    if not isinstance(n, int):\n        raise TypeError(\"Expecting a strictly positive integer\")\n    if n <= 0:\n        raise ValueError(\"Expecting a strictly positive integer\")\n\n    if n == 1:\n        return 1\n\n    result = 0\n    for i in range(1, n):\n        if gcd(i, n) == 1:\n            result += 1\n    return result", "docstring": "Calculate the value of Euler's totient for a given integer\n\nArgs:\nn (int): strictly positive integer\n\nReturns:\nThe value of Euler's totient for n\n\nRaises:\nTypeError: If either n or k is not an integer\nValueError: If either n or k is negative, or if k is strictly greater than n", "source": "juraj-google-style"}
{"code": "def write_uint8(self, value, little_endian=True):\n    if little_endian:\n        endian = '<'\n    else:\n        endian = '>'\n    return self.pack(('%sB' % endian), value)", "docstring": "Pack the value as an unsigned byte and write 1 byte to the stream.\n\nArgs:\nvalue:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint: the number of bytes written.", "source": "codesearchnet"}
{"code": "def check_copies(overwrite: bool=False, file: Optional[str]=None):\n    buffer = {}\n    if file is None:\n        all_files = glob.glob(os.path.join(TRANSFORMERS_PATH, '***.py'), recursive=True)\n        all_files = list(all_files) + list(all_test_files)\n    else:\n        all_files = [file]\n    diffs = []\n    for filename in all_files:\n        new_diffs = is_copy_consistent(filename, overwrite, buffer)\n        diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]\n    if not overwrite and len(diffs) > 0:\n        diff = '\\n'.join(diffs)\n        raise Exception('Found the following copy inconsistencies:\\n' + diff + '\\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.')", "docstring": "Check every file is copy-consistent with the original. Also check the model list in the main README and other\nREADMEs are consistent.\n\nArgs:\noverwrite (`bool`, *optional*, defaults to `False`):\nWhether or not to overwrite the copies when they don't match.\nfile (`bool`, *optional*):\nThe path to a specific file to check and/or fix.", "source": "github-repos"}
{"code": "def _is_in_control_flow(self, op):\n    return control_flow_util.IsInCond(op)", "docstring": "Returns true if the given op is inside a tf.cond or in tf.while_loop.\n\nArgs:\nop: A tensorflow op that should be checked whether in control flow or not.\nReturns:\nA boolean value whether the op is in control flow or not.", "source": "github-repos"}
{"code": "def dice_loss(inputs: Tensor, labels: Tensor, num_masks: int) -> Tensor:\n    probs = inputs.sigmoid().flatten(1)\n    numerator = 2 * (probs * labels).sum(-1)\n    denominator = probs.sum(-1) + labels.sum(-1)\n    loss = 1 - (numerator + 1) / (denominator + 1)\n    loss = loss.sum() / num_masks\n    return loss", "docstring": "Compute the DICE loss, similar to generalized IOU for masks as follows:\n\n$$ \\mathcal{L}_{\\text{dice}(x, y) = 1 - \\frac{2 * x \\cap y }{x \\cup y + 1}} $$\n\nIn practice, since `labels` is a binary mask, (only 0s and 1s), dice can be computed as follow\n\n$$ \\mathcal{L}_{\\text{dice}(x, y) = 1 - \\frac{2 * x * y }{x + y + 1}} $$\n\nArgs:\ninputs (`torch.Tensor`):\nA tensor representing a mask.\nlabels (`torch.Tensor`):\nA tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs\n(0 for the negative class and 1 for the positive class).\nnum_masks (`int`):\nThe number of masks present in the current batch, used for normalization.\n\nReturns:\n`torch.Tensor`: The computed loss.", "source": "github-repos"}
{"code": "def start_new_feature(**cc_kwargs):\n    \n    project = Project.from_path(pathlib.Path.cwd().resolve())\n    contrib_dir = project.get('contrib', 'module_path')\n\n    with tempfile.TemporaryDirectory() as tempdir:\n        \n        output_dir = tempdir\n        cc_kwargs['output_dir'] = output_dir\n        rendered_dir = render_feature_template(**cc_kwargs)\n\n        \n        src = rendered_dir\n        dst = contrib_dir\n        synctree(src, dst, onexist=_fail_if_feature_exists)\n\n    logger.info('Start new feature successful.')", "docstring": "Start a new feature within a ballet project\n\nRenders the feature template into a temporary directory, then copies the\nfeature files into the proper path within the contrib directory.\n\nArgs:\n**cc_kwargs: options for the cookiecutter template\n\nRaises:\nballet.exc.BalletError: the new feature has the same name as an\nexisting one", "source": "juraj-google-style"}
{"code": "def get_data(self,\n                 file_path=sys.stdin,\n                 delimiter=',',\n                 categories_delimiter=None):\n        \n        if file_path == sys.stdin:\n            logger.info('Read data from standard input')\n            lines = [line.replace('\\n', '') for line in file_path]\n        else:\n            logger.info('Read data from file ' + file_path)\n            with open(file_path) as file:\n                lines = list(file)\n        columns = lines[0].rstrip('\\n').split(delimiter)[1:]\n        categories = None\n        if categories_delimiter:\n            columns, categories = zip(*[c.split(categories_delimiter, 1)\n                                        for c in columns])\n        size = len(columns)\n        data = [list(map(int, l.split(delimiter)[1:]))\n                for l in lines[1:size + 1]]\n        return DesignStructureMatrix(data, columns, categories)", "docstring": "Implement get_dsm method from Provider class.\n\nParse CSV to return an instance of DSM.\n\nArgs:\nfile_path (str/fd): path or file descriptor.\ndelimiter (str): character(s) used as delimiter for columns.\ncategories_delimiter (str):\ncharacter(s) used as delimiter for categories and keys\n(first column).\n\nReturns:\nDSM: instance of DSM.", "source": "juraj-google-style"}
{"code": "def expand_source_files(filenames, cwd=None):\n    out = []\n    for f in expand_globpaths(filenames.split(), cwd):\n        if path_utils.isdir(f):\n            out += recursive_glob(path_utils.join(f, '**', '*.py'))\n        elif f.endswith('.py'):\n            out.append(f)\n        elif is_file_script(f, cwd):\n            out.append(f)\n    return set(out)", "docstring": "Expand a space-separated string of filenames passed in as sources.\n\nThis is a helper function for handling command line arguments that specify a\nlist of source files and directories.\n\nAny directories in filenames will be scanned recursively for .py files.\nAny files that do not end with \".py\" will be dropped.\n\nArgs:\nfilenames: A space-separated string of filenames to process.\ncwd: An optional working directory to expand relative paths\n\nReturns:\nA set of full paths to .py files", "source": "github-repos"}
{"code": "def _maybe_create_attribute(self, name, default_value):\n    if not hasattr(self, name):\n        self.__setattr__(name, default_value)", "docstring": "Create the attribute with the default value if it hasn't been created.\n\nThis is useful for fields that is used for tracking purpose,\n_trainable_weights, or _layers. Note that user could create a layer subclass\nand assign an internal field before invoking the Layer.__init__(), the\n__setattr__() need to create the tracking fields and __init__() need to not\noverride them.\n\nArgs:\nname: String, the name of the attribute.\ndefault_value: Object, the default value of the attribute.", "source": "github-repos"}
{"code": "def survey_basis(self, keys=None, alias=None, step=None):\n        \n        if keys is None:\n            keys = [k for k, v in self.data.items() if isinstance(v, Curve)]\n        else:\n            keys = utils.flatten_list(keys)\n\n        starts, stops, steps = [], [], []\n        for k in keys:\n            d = self.get_curve(k, alias=alias)\n            if keys and (d is None):\n                continue\n            try:\n                starts.append(d.basis[0])\n                stops.append(d.basis[-1])\n                steps.append(d.basis[1] - d.basis[0])\n            except Exception as e:\n                pass\n        if starts and stops and steps:\n            step = step or min(steps)\n            return np.arange(min(starts), max(stops)+1e-9, step)\n        else:\n            return None", "docstring": "Look at the basis of all the curves in ``well.data`` and return a\nbasis with the minimum start, maximum depth, and minimum step.\n\nArgs:\nkeys (list): List of strings: the keys of the data items to\nsurvey, if not all of them.\nalias (dict): a dictionary mapping mnemonics to lists of mnemonics.\nstep (float): a new step, if you want to change it.\n\nReturns:\nndarray. The most complete common basis.", "source": "juraj-google-style"}
{"code": "def to_molden(cartesian_list, buf=None, sort_index=True, overwrite=True, float_format='{:.6f}'.format):\n    if sort_index:\n        cartesian_list = [molecule.sort_index() for molecule in cartesian_list]\n    give_header = (((((((('[MOLDEN FORMAT]\\n' + '[N_GEO]\\n') + str(len(cartesian_list))) + '\\n') + '[GEOCONV]\\n') + 'energy\\n{energy}') + 'max-force\\n{max_force}') + 'rms-force\\n{rms_force}') + '[GEOMETRIES] (XYZ)\\n').format\n    values = (len(cartesian_list) * '1\\n')\n    energy = [str(m.metadata.get('energy', 1)) for m in cartesian_list]\n    energy = ('\\n'.join(energy) + '\\n')\n    header = give_header(energy=energy, max_force=values, rms_force=values)\n    coordinates = [x.to_xyz(sort_index=sort_index, float_format=float_format) for x in cartesian_list]\n    output = (header + '\\n'.join(coordinates))\n    if (buf is not None):\n        if overwrite:\n            with open(buf, mode='w') as f:\n                f.write(output)\n        else:\n            with open(buf, mode='x') as f:\n                f.write(output)\n    else:\n        return output", "docstring": "Write a list of Cartesians into a molden file.\n\n.. note:: Since it permamently writes a file, this function\nis strictly speaking **not sideeffect free**.\nThe list to be written is of course not changed.\n\nArgs:\ncartesian_list (list):\nbuf (str): StringIO-like, optional buffer to write to\nsort_index (bool): If sort_index is true, the Cartesian\nis sorted by the index before writing.\noverwrite (bool): May overwrite existing files.\nfloat_format (one-parameter function): Formatter function\nto apply to column’s elements if they are floats.\nThe result of this function must be a unicode string.\n\nReturns:\nformatted : string (or unicode, depending on data and options)", "source": "codesearchnet"}
{"code": "def GetMessages(self, formatter_mediator, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    event_values = event.CopyToDict()\n\n    priority_level = event_values.get('level', None)\n    if isinstance(priority_level, py2to3.INTEGER_TYPES):\n      event_values['level'] = '{0:s} ({1:d})'.format(\n          self._PRIORITY_LEVELS.get(priority_level, 'UNKNOWN'), priority_level)\n\n    \n    read_uid = event_values.get('read_uid', None)\n    if read_uid == -1:\n      event_values['read_uid'] = 'ALL'\n\n    \n    read_gid = event_values.get('read_gid', None)\n    if read_gid == -1:\n      event_values['read_gid'] = 'ALL'\n\n    \n    return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def markdown_to_html_with_extensions(text, options=0, extensions=None):\n    \n    if extensions is None:\n        extensions = []\n\n    core_extensions_ensure_registered()\n\n    cmark_extensions = []\n    for extension_name in extensions:\n        extension = find_syntax_extension(extension_name)\n        if extension is None:\n            raise ValueError('Unknown extension {}'.format(extension_name))\n        cmark_extensions.append(extension)\n\n    parser = parser_new(options=options)\n\n    try:\n        for extension in cmark_extensions:\n            parser_attach_syntax_extension(parser, extension)\n\n        parser_feed(parser, text)\n\n        root = parser_finish(parser)\n\n        if _cmark.lib.cmark_node_get_type(root) == _cmark.lib.CMARK_NODE_NONE:\n            raise ValueError('Error parsing markdown!')\n\n        extensions_ll = parser_get_syntax_extensions(parser)\n\n        output = render_html(root, options=options, extensions=extensions_ll)\n\n    finally:\n        parser_free(parser)\n\n    return output", "docstring": "Render the given text to Markdown, using extensions.\n\nThis is a high-level wrapper over the various functions needed to enable\nextensions, attach them to a parser, and render html.\n\nArgs:\ntext (str): The text to render to Markdown.\noptions (int): The cmark options.\nextensions (Sequence[str]): The list of extension names to use.\n\nReturns:\nstr: The rendered markdown.", "source": "juraj-google-style"}
{"code": "def __init__(self, *args, **kwargs):\n        \n        if \"widget\" not in kwargs:\n            kwargs[\"widget\"] = PasswordConfirmationInput(\n                confirm_with=kwargs.pop('confirm_with', None))\n\n        super(PasswordConfirmationField, self).__init__(*args, **kwargs)", "docstring": "Init method.\n\nArgs:\n*args (): Django's args for a form field.\n**kwargs (): Django's kwargs for a form field. Should contain a\nconfirm_with keyword argument to point to the password field.", "source": "juraj-google-style"}
{"code": "def add(self, term):\n    if isinstance(term, Conjunction):\n        for term_ in term.terms:\n            self.add(term_)\n    elif isinstance(term, Term):\n        self._terms.append(term)\n    else:\n        raise TypeError('Not a Term or Conjunction')", "docstring": "Add a term to the conjunction.\n\nArgs:\nterm (:class:`Term`, :class:`Conjunction`): term to add;\nif a :class:`Conjunction`, all of its terms are added\nto the current conjunction.\nRaises:\n:class:`TypeError`: when *term* is an invalid type", "source": "codesearchnet"}
{"code": "def set_domain_workgroup(workgroup):\n    if six.PY2:\n        workgroup = _to_unicode(workgroup)\n    with salt.utils.winapi.Com():\n        conn = wmi.WMI()\n        comp = conn.Win32_ComputerSystem()[0]\n        res = comp.JoinDomainOrWorkgroup(Name=workgroup.upper())\n    return (True if (not res[0]) else False)", "docstring": "Set the domain or workgroup the computer belongs to.\n\n.. versionadded:: 2019.2.0\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt 'minion-id' system.set_domain_workgroup LOCAL", "source": "codesearchnet"}
{"code": "def get_simulated_data(nmr_problems):\n    \n    \n    nmr_observed_tanks = 10\n\n    \n    nmr_tanks_ground_truth = normal(nmr_problems, 1, mean=250, std=30, ctype='uint')\n\n    \n    observations = uniform(nmr_problems, nmr_observed_tanks, low=0, high=nmr_tanks_ground_truth, ctype='uint')\n\n    return observations, nmr_tanks_ground_truth", "docstring": "Simulate some data.\n\nThis returns the simulated tank observations and the corresponding ground truth maximum number of tanks.\n\nArgs:\nnmr_problems (int): the number of problems\n\nReturns:\ntuple: (observations, nmr_tanks_ground_truth)", "source": "juraj-google-style"}
{"code": "def l2_distance_sq(t1, t2, name=None):\n    with tf.name_scope(name, 'l2_distance_sq', [t1, t2]) as scope:\n        t1 = tf.convert_to_tensor(t1, name='t1')\n        t2 = tf.convert_to_tensor(t2, name='t2')\n        return length_squared(tf.subtract(t1, t2), name=scope)", "docstring": "Square of l2 distance between t1 and t2.\n\nArgs:\nt1: A tensor.\nt2: A tensor that is the same size as t1.\nname: Optional name for this op.\nReturns:\nThe l2 distance between t1 and t2.", "source": "codesearchnet"}
{"code": "def RunStateMethod(self, method_name, request=None, responses=None):\n    \n    if self.rdf_flow.pending_termination:\n      self.Error(error_message=self.rdf_flow.pending_termination.reason)\n      return\n\n    client_id = self.rdf_flow.client_id\n\n    deadline = self.rdf_flow.processing_deadline\n    if deadline and rdfvalue.RDFDatetime.Now() > deadline:\n      raise flow.FlowError(\"Processing time for flow %s on %s expired.\" %\n                           (self.rdf_flow.flow_id, self.rdf_flow.client_id))\n\n    self.rdf_flow.current_state = method_name\n    if request and responses:\n      logging.debug(\"Running %s for flow %s on %s, %d responses.\", method_name,\n                    self.rdf_flow.flow_id, client_id, len(responses))\n    else:\n      logging.debug(\"Running %s for flow %s on %s\", method_name,\n                    self.rdf_flow.flow_id, client_id)\n\n    try:\n      try:\n        method = getattr(self, method_name)\n      except AttributeError:\n        raise ValueError(\"Flow %s has no state method %s\" %\n                         (self.__class__.__name__, method_name))\n\n      \n      responses = flow_responses.Responses.FromResponses(\n          request=request, responses=responses)\n\n      if responses.status is not None:\n        self.SaveResourceUsage(responses.status)\n\n      stats_collector_instance.Get().IncrementCounter(\"grr_worker_states_run\")\n\n      if method_name == \"Start\":\n        stats_collector_instance.Get().IncrementCounter(\n            \"flow_starts\", fields=[self.rdf_flow.flow_class_name])\n        method()\n      else:\n        method(responses)\n\n      if self.replies_to_process:\n        if self.rdf_flow.parent_hunt_id and not self.rdf_flow.parent_flow_id:\n          self._ProcessRepliesWithHuntOutputPlugins(self.replies_to_process)\n        else:\n          self._ProcessRepliesWithFlowOutputPlugins(self.replies_to_process)\n\n        self.replies_to_process = []\n\n    \n    \n    except Exception as e:  \n      \n      stats_collector_instance.Get().IncrementCounter(\n          \"flow_errors\", fields=[self.rdf_flow.flow_class_name])\n      logging.exception(\"Flow %s on %s raised %s.\", self.rdf_flow.flow_id,\n                        client_id, utils.SmartUnicode(e))\n\n      self.Error(\n          error_message=utils.SmartUnicode(e), backtrace=traceback.format_exc())", "docstring": "Completes the request by calling the state method.\n\nArgs:\nmethod_name: The name of the state method to call.\nrequest: A RequestState protobuf.\nresponses: A list of FlowMessages responding to the request.", "source": "juraj-google-style"}
{"code": "def _pick_or_create_inserted_op_moment_index(\n            self, splitter_index: int, op: ops.Operation,\n            strategy: InsertStrategy) -> int:\n        \n\n        if (strategy is InsertStrategy.NEW or\n                strategy is InsertStrategy.NEW_THEN_INLINE):\n            self._moments.insert(splitter_index, ops.Moment())\n            return splitter_index\n\n        if strategy is InsertStrategy.INLINE:\n            if (0 <= splitter_index - 1 < len(self._moments) and\n                    self._can_add_op_at(splitter_index - 1, op)):\n                return splitter_index - 1\n\n            return self._pick_or_create_inserted_op_moment_index(\n                splitter_index, op, InsertStrategy.NEW)\n\n        if strategy is InsertStrategy.EARLIEST:\n            if self._can_add_op_at(splitter_index, op):\n                p = self._prev_moment_available(op, splitter_index)\n                return p or 0\n\n            return self._pick_or_create_inserted_op_moment_index(\n                splitter_index, op, InsertStrategy.INLINE)\n\n        raise ValueError('Unrecognized append strategy: {}'.format(strategy))", "docstring": "Determines and prepares where an insertion will occur.\n\nArgs:\nsplitter_index: The index to insert at.\nop: The operation that will be inserted.\nstrategy: The insertion strategy.\n\nReturns:\nThe index of the (possibly new) moment where the insertion should\noccur.\n\nRaises:\nValueError: Unrecognized append strategy.", "source": "juraj-google-style"}
{"code": "def CleanVacuousVersions(clients=None, dry_run=True):\n    if (not clients):\n        index = client_index.CreateClientIndex()\n        clients = index.LookupClients(['.'])\n    clients.sort()\n    with data_store.DB.GetMutationPool() as pool:\n        logging.info('checking %d clients', len(clients))\n        for batch in collection.Batch(clients, 10000):\n            client_infos = data_store.DB.MultiResolvePrefix(batch, ['aff4:', 'aff4:'], data_store.DB.ALL_TIMESTAMPS)\n            for (client, type_list) in client_infos:\n                cleared = 0\n                kept = 0\n                updates = []\n                for (a, _, ts) in type_list:\n                    if (ts != 0):\n                        updates.append((ts, a))\n                updates = sorted(updates)\n                dirty = True\n                for (ts, a) in updates:\n                    if (a == 'aff4:type'):\n                        if dirty:\n                            kept += 1\n                            dirty = False\n                        else:\n                            cleared += 1\n                            if (not dry_run):\n                                pool.DeleteAttributes(client, ['aff4:type'], start=ts, end=ts)\n                                if (pool.Size() > 1000):\n                                    pool.Flush()\n                    else:\n                        dirty = True\n                logging.info('%s: kept %d and cleared %d', client, kept, cleared)", "docstring": "A script to remove no-op client versions.\n\nThis script removes versions of a client when it is identical to the previous,\nin the sense that no versioned attributes were changed since the previous\nclient version.\n\nArgs:\nclients: A list of ClientURN, if empty cleans all clients.\ndry_run: whether this is a dry run", "source": "codesearchnet"}
{"code": "def _populate(cls, as_of=None, delete=False):\n    billing_cycle_helper = get_billing_cycle()\n    billing_cycles_exist = BillingCycle.objects.exists()\n    try:\n        current_billing_cycle = BillingCycle.objects.as_of(date=as_of)\n    except BillingCycle.DoesNotExist:\n        current_billing_cycle = None\n    if (not billing_cycles_exist):\n        delete = False\n    if (billing_cycles_exist and (not current_billing_cycle)):\n        raise CannotPopulateForDateOutsideExistingCycles()\n    omit_current = (current_billing_cycle and delete)\n    stop_date = (as_of + relativedelta(years=settings.SWIFTWIND_BILLING_CYCLE_YEARS))\n    date_ranges = billing_cycle_helper.generate_date_ranges(as_of, stop_date=stop_date, omit_current=omit_current)\n    date_ranges = list(date_ranges)\n    beginning_date = date_ranges[0][0]\n    with db_transaction.atomic():\n        if delete:\n            cls.objects.filter(start_date__gte=beginning_date).delete()\n        for (start_date, end_date) in date_ranges:\n            exists = BillingCycle.objects.filter(date_range=(start_date, end_date)).exists()\n            if exists:\n                if delete:\n                    raise Exception('It should not be possible to get here as future billing cycles have just been deleted')\n                else:\n                    pass\n            else:\n                BillingCycle.objects.create(date_range=(start_date, end_date))", "docstring": "Populate the table with billing cycles starting from `as_of`\n\nArgs:\nas_of (date): The date at which to begin the populating\ndelete (bool): Should future billing cycles be deleted?", "source": "codesearchnet"}
{"code": "def from_string(cls, data, sigfigs=8):\n        \n        lines = data.split(\"\\n\")[:-1]\n        struc_lines = {\"HEADER\": [], \"VERS\": [], \"SYMGRP\": [],\n                       \"STRUC\": [], \"CLASS\": [], \"SITE\": []}\n        for line in lines:\n            if line != \"\" and not line.isspace():\n                if not line[0].isspace():\n                    cat = line.split()[0]\n                if cat in struc_lines:\n                    struc_lines[cat].append(line)\n                else:\n                    pass\n        for cat in struc_lines:\n            struc_lines[cat] = \" \".join(struc_lines[cat]).replace(\"= \", \"=\")\n\n        structure_tokens = {\"ALAT\": None,\n                            \"PLAT\": [],\n                            \"CLASS\": [],\n                            \"SITE\": []}\n\n        for cat in [\"STRUC\", \"CLASS\", \"SITE\"]:\n            fields = struc_lines[cat].split(\"=\")\n            for f, field in enumerate(fields):\n                token = field.split()[-1]\n                if token == \"ALAT\":\n                    alat = round(float(fields[f+1].split()[0]), sigfigs)\n                    structure_tokens[\"ALAT\"] = alat\n                elif token == \"ATOM\":\n                    atom = fields[f+1].split()[0]\n                    if not bool(re.match(\"E[0-9]*$\", atom)):\n                        if cat == \"CLASS\":\n                            structure_tokens[\"CLASS\"].append(atom)\n                        else:\n                            structure_tokens[\"SITE\"].append({\"ATOM\": atom})\n                    else:\n                        pass\n                elif token in [\"PLAT\", \"POS\"]:\n                    try:\n                        arr = np.array([round(float(i), sigfigs)\n                                       for i in fields[f+1].split()])\n                    except ValueError:\n                        arr = np.array([round(float(i), sigfigs)\n                                       for i in fields[f+1].split()[:-1]])\n                    if token == \"PLAT\":\n                        structure_tokens[\"PLAT\"] = arr.reshape([3, 3])\n                    elif not bool(re.match(\"E[0-9]*$\", atom)):\n                        structure_tokens[\"SITE\"][-1][\"POS\"] = arr\n                    else:\n                        pass\n                else:\n                    pass\n        try:\n            spcgrp_index = struc_lines[\"SYMGRP\"].index(\"SPCGRP\")\n            spcgrp = struc_lines[\"SYMGRP\"][spcgrp_index:spcgrp_index+12]\n            structure_tokens[\"SPCGRP\"] = spcgrp.split(\"=\")[1].split()[0]\n        except ValueError:\n            pass\n\n        for token in [\"HEADER\", \"VERS\"]:\n                try:\n                    value = re.split(token + r\"\\s*\", struc_lines[token])[1]\n                    structure_tokens[token] = value.strip()\n                except IndexError:\n                    pass\n        return LMTOCtrl.from_dict(structure_tokens)", "docstring": "Creates a CTRL file object from a string. This will mostly be\nused to read an LMTOCtrl object from a CTRL file. Empty spheres\nare ignored.\n\nArgs:\ndata: String representation of the CTRL file.\n\nReturns:\nAn LMTOCtrl object.", "source": "juraj-google-style"}
{"code": "def _call_post_with_user_override(self, sap_user_id, url, payload):\n        \n        SAPSuccessFactorsEnterpriseCustomerConfiguration = apps.get_model(  \n            'sap_success_factors',\n            'SAPSuccessFactorsEnterpriseCustomerConfiguration'\n        )\n        oauth_access_token, _ = SAPSuccessFactorsAPIClient.get_oauth_access_token(\n            self.enterprise_configuration.sapsf_base_url,\n            self.enterprise_configuration.key,\n            self.enterprise_configuration.secret,\n            self.enterprise_configuration.sapsf_company_id,\n            sap_user_id,\n            SAPSuccessFactorsEnterpriseCustomerConfiguration.USER_TYPE_USER\n        )\n\n        response = requests.post(\n            url,\n            data=payload,\n            headers={\n                'Authorization': 'Bearer {}'.format(oauth_access_token),\n                'content-type': 'application/json'\n            }\n        )\n\n        return response.status_code, response.text", "docstring": "Make a post request with an auth token acquired for a specific user to a SuccessFactors endpoint.\n\nArgs:\nsap_user_id (str): The user to use to retrieve an auth token.\nurl (str): The url to post to.\npayload (str): The json encoded payload to post.", "source": "juraj-google-style"}
{"code": "def apply(self, flag_set: AbstractSet[Flag], operand: AbstractSet[Flag]) \\\n            -> FrozenSet[Flag]:\n        \n        if self == FlagOp.ADD:\n            return frozenset(flag_set | operand)\n        elif self == FlagOp.DELETE:\n            return frozenset(flag_set - operand)\n        else:  \n            return frozenset(operand)", "docstring": "Apply the flag operation on the two sets, returning the result.\n\nArgs:\nflag_set: The flag set being operated on.\noperand: The flags to use as the operand.", "source": "juraj-google-style"}
{"code": "def parse_time_indices(s):\n  \n  if not s.startswith('['):\n    s = '[' + s + ']'\n  parsed = command_parser._parse_slices(s)\n  if len(parsed) != 1:\n    raise ValueError(\n        'Invalid number of slicing objects in time indices (%d)' % len(parsed))\n  else:\n    return parsed[0]", "docstring": "Parse a string as time indices.\n\nArgs:\ns: A valid slicing string for time indices. E.g., '-1', '[:]', ':', '2:10'\n\nReturns:\nA slice object.\n\nRaises:\nValueError: If `s` does not represent valid time indices.", "source": "juraj-google-style"}
{"code": "def delete(table, keyset):\n    delete = Mutation.Delete(table=table, key_set=keyset._to_pb())\n    return _Mutator(mutation=Mutation(delete=delete), rows=0, cells=0, operation=WriteMutation._OPERATION_DELETE, kwargs={'table': table, 'keyset': keyset})", "docstring": "Delete one or more table rows.\n\nArgs:\ntable: Name of the table to be modified.\nkeyset: Keys/ranges identifying rows to delete.", "source": "github-repos"}
{"code": "def build_global(self, global_node):\n    config_block_lines = self.__build_config_block(global_node.config_block)\n    return config.Global(config_block=config_block_lines)", "docstring": "parse `global` section, and return the config.Global\n\nArgs:\nglobal_node (TreeNode):  `global` section treenode\n\nReturns:\nconfig.Global: an object", "source": "codesearchnet"}
{"code": "def _get_free_gpu(max_gpu_utilization=40, min_free_memory=0.5, num_gpu=1):\n\n    def get_gpu_info():\n        gpu_info = subprocess.check_output(['nvidia-smi', '--format=csv,noheader,nounits', '--query-gpu=index,memory.total,memory.free,memory.used,utilization.gpu']).decode()\n        gpu_info = gpu_info.split('\\n')\n        gpu_info_array = []\n        for line in gpu_info:\n            if (len(line) > 0):\n                (gpu_id, total_memory, free_memory, used_memory, gpu_util) = line.split(',')\n                gpu_memory_util = (float(used_memory) / float(total_memory))\n                gpu_info_array.append((float(gpu_util), gpu_memory_util, gpu_id))\n        return gpu_info_array\n    num_times_to_average = 5\n    current_array = []\n    for ind in range(num_times_to_average):\n        current_array.append(get_gpu_info())\n        time.sleep(1)\n    num_gpus = len(current_array[0])\n    avg_array = [(0, 0, str(x)) for x in range(num_gpus)]\n    for ind in range(num_times_to_average):\n        for gpu_ind in range(num_gpus):\n            avg_array[gpu_ind] = ((avg_array[gpu_ind][0] + current_array[ind][gpu_ind][0]), (avg_array[gpu_ind][1] + current_array[ind][gpu_ind][1]), avg_array[gpu_ind][2])\n    for gpu_ind in range(num_gpus):\n        avg_array[gpu_ind] = ((float(avg_array[gpu_ind][0]) / num_times_to_average), (float(avg_array[gpu_ind][1]) / num_times_to_average), avg_array[gpu_ind][2])\n    avg_array.sort()\n    gpus_found = 0\n    gpus_to_use = ''\n    free_memory = 1.0\n    for current_gpu in avg_array:\n        if ((current_gpu[0] < max_gpu_utilization) and ((1 - current_gpu[1]) > min_free_memory)):\n            if (gpus_found == 0):\n                gpus_to_use = current_gpu[2]\n                free_memory = (1 - current_gpu[1])\n            else:\n                gpus_to_use = ((gpus_to_use + ',') + current_gpu[2])\n                free_memory = min(free_memory, (1 - current_gpu[1]))\n            gpus_found = (gpus_found + 1)\n        if (gpus_found == num_gpu):\n            break\n    return (gpus_to_use, free_memory)", "docstring": "Get available GPUs according to utilization thresholds.\n\nArgs:\n:max_gpu_utilization: percent utilization threshold to consider a GPU \"free\"\n:min_free_memory: percent free memory to consider a GPU \"free\"\n:num_gpu: number of requested GPUs\n\nReturns:\nA tuple of (available_gpus, minimum_free_memory), where available_gpus is a comma-delimited string of GPU ids, and minimum_free_memory\nis the lowest amount of free memory available on the available_gpus.", "source": "codesearchnet"}
{"code": "def parent(self) -> 'KeyPath':\n    if self.is_root:\n        raise KeyError('Parent of a root KeyPath does not exist.')\n    return KeyPath(self._keys[:-1])", "docstring": "The ``KeyPath`` object for current node's parent.\n\nExample::\n\npath = pg.KeyPath.parse('a.b.c.')\nassert path.parent == 'a.b'\n\nReturns:\nA ``KeyPath`` object for the parent of current node.\n\nRaises:\nKeyError: If current path is the root.", "source": "github-repos"}
{"code": "def _CopyDateFromString(self, date_string):\n    date_string_length = len(date_string)\n    if (date_string_length < 10):\n        raise ValueError('Date string too short.')\n    if ((date_string[4] != '-') or (date_string[7] != '-')):\n        raise ValueError('Invalid date string.')\n    try:\n        year = int(date_string[0:4], 10)\n    except ValueError:\n        raise ValueError('Unable to parse year.')\n    try:\n        month = int(date_string[5:7], 10)\n    except ValueError:\n        raise ValueError('Unable to parse month.')\n    try:\n        day_of_month = int(date_string[8:10], 10)\n    except ValueError:\n        raise ValueError('Unable to parse day of month.')\n    days_per_month = self._GetDaysPerMonth(year, month)\n    if ((day_of_month < 1) or (day_of_month > days_per_month)):\n        raise ValueError('Day of month value out of bounds.')\n    return (year, month, day_of_month)", "docstring": "Copies a date from a string.\n\nArgs:\ndate_string (str): date value formatted as: YYYY-MM-DD\n\nReturns:\ntuple[int, int, int]: year, month, day of month.\n\nRaises:\nValueError: if the date string is invalid or not supported.", "source": "codesearchnet"}
{"code": "def deserialize_block(value):\n    block = Block()\n    block.ParseFromString(value)\n    return BlockWrapper(block=block)", "docstring": "Deserialize a byte string into a BlockWrapper\n\nArgs:\nvalue (bytes): the byte string to deserialze\n\nReturns:\nBlockWrapper: a block wrapper instance", "source": "codesearchnet"}
{"code": "def OR(self):\n    clone = copy.deepcopy(self)\n    clone.adapter._QUERY_GLUE = ' OR '\n    return clone", "docstring": "Switches default query joiner from \" AND \" to \" OR \"\n\nReturns:\nSelf. Queryset object.", "source": "codesearchnet"}
{"code": "def build(self, spec, reset=True):\n        \n        if reset:\n            self.reset()\n\n        with self.model:\n\n            self.mu = 0.\n\n            for t in spec.terms.values():\n\n                data = t.data\n                label = t.name\n                dist_name = t.prior.name\n                dist_args = t.prior.args\n\n                n_cols = t.data.shape[1]\n\n                coef = self._build_dist(spec, label, dist_name,\n                                        shape=n_cols, **dist_args)\n\n                if t.random:\n                    self.mu += coef[t.group_index][:, None] * t.predictor\n                else:\n                    self.mu += pm.math.dot(data, coef)[:, None]\n\n            y = spec.y.data\n            y_prior = spec.family.prior\n            link_f = spec.family.link\n            if isinstance(link_f, string_types):\n                link_f = self.links[link_f]\n            else:\n                link_f = link_f\n            y_prior.args[spec.family.parent] = link_f(self.mu)\n            y_prior.args['observed'] = y\n            y_like = self._build_dist(spec, spec.y.name, y_prior.name,\n                                      **y_prior.args)\n\n            self.spec = spec", "docstring": "Compile the PyMC3 model from an abstract model specification.\nArgs:\nspec (Model): A bambi Model instance containing the abstract\nspecification of the model to compile.\nreset (bool): if True (default), resets the PyMC3BackEnd instance\nbefore compiling.", "source": "juraj-google-style"}
{"code": "def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):\n    vision_data = {}\n    if image_sizes is not None:\n        num_image_tokens = []\n        for height, width in image_sizes:\n            height, width = smart_resize(height, width, self.image_processor.spatial_factor, self.image_processor.min_pixels, self.image_processor.max_pixels)\n            height = height \n            width = width \n            image_seq_length = height * (width + 1)\n            num_image_tokens.append(image_seq_length)\n        num_image_patches = [1] * len(image_sizes)\n        vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})\n    return MultiModalData(**vision_data)", "docstring": "Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.\n\nArgs:\nimage_sizes (`List[List[int]]`, *optional*):\nThe input sizes formatted as (height, width) per each image.\n\nReturns:\n`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided\ninput modalities, along with other useful data.", "source": "github-repos"}
{"code": "def print_run_bidirectional_blast(reference, other_genome, dbtype, outdir):\n    \n    \n\n    if dbtype == 'nucl':\n        command = 'blastn'\n    elif dbtype == 'prot':\n        command = 'blastp'\n    else:\n        raise ValueError('dbtype must be \"nucl\" or \"prot\"')\n\n    r_folder, r_name, r_ext = utils.split_folder_and_path(reference)\n    g_folder, g_name, g_ext = utils.split_folder_and_path(other_genome)\n\n    \n    r_vs_g_name = r_name + '_vs_' + g_name\n    r_vs_g = r_vs_g_name + '_blast.out'\n    if op.exists(op.join(outdir, r_vs_g)) and os.stat(op.join(outdir, r_vs_g)).st_size != 0:\n        log.debug('{} vs {} BLAST already run'.format(r_name, g_name))\n    else:\n        cmd = '{} -query {} -db {} -outfmt 6 -out {}'.format(command, reference, g_name, r_vs_g)\n        utils.write_torque_script(command=cmd, err=r_vs_g_name, out=r_vs_g_name, name=r_vs_g_name,\n                                  outfile=op.join(outdir, r_vs_g_name) + '.sh',\n                                  walltime='00:15:00', queue='regular')\n\n    \n    g_vs_r_name = g_name + '_vs_' + r_name\n    g_vs_r = g_vs_r_name + '_blast.out'\n    if op.exists(op.join(outdir, g_vs_r)) and os.stat(op.join(outdir, g_vs_r)).st_size != 0:\n        log.debug('{} vs {} BLAST already run'.format(g_name, r_name))\n    else:\n        cmd = '{} -query {} -db {} -outfmt 6 -out {}'.format(command, other_genome, r_name, g_vs_r)\n        utils.write_torque_script(command=cmd, err=g_vs_r_name, out=g_vs_r_name, name=g_vs_r_name,\n                                  outfile=op.join(outdir, g_vs_r_name) + '.sh',\n                                  walltime='00:15:00', queue='regular')", "docstring": "Write torque submission files for running bidirectional blast on a server and print execution command.\n\nArgs:\nreference (str): Path to \"reference\" genome, aka your \"base strain\"\nother_genome (str): Path to other genome which will be BLASTed to the reference\ndbtype (str): \"nucl\" or \"prot\" - what format your genome files are in\noutdir (str): Path to folder where Torque scripts should be placed", "source": "juraj-google-style"}
{"code": "def write_reactions(self, stream, reactions, properties=None):\n        \n        self._write_entries(\n            stream, reactions, self.convert_reaction_entry, properties)", "docstring": "Write iterable of reactions as YAML object to stream.\n\nArgs:\nstream: File-like object.\ncompounds: Iterable of reaction entries.\nproperties: Set of reaction properties to output (or None to output\nall).", "source": "juraj-google-style"}
{"code": "def Cleanse(obj, encoding='utf-8'):\n    if isinstance(obj, int):\n        return obj\n    elif isinstance(obj, float):\n        if (obj == _INFINITY):\n            return 'Infinity'\n        elif (obj == _NEGATIVE_INFINITY):\n            return '-Infinity'\n        elif math.isnan(obj):\n            return 'NaN'\n        else:\n            return obj\n    elif isinstance(obj, bytes):\n        return tf.compat.as_text(obj, encoding)\n    elif isinstance(obj, (list, tuple)):\n        return [Cleanse(i, encoding) for i in obj]\n    elif isinstance(obj, set):\n        return [Cleanse(i, encoding) for i in sorted(obj)]\n    elif isinstance(obj, dict):\n        return {Cleanse(k, encoding): Cleanse(v, encoding) for (k, v) in obj.items()}\n    else:\n        return obj", "docstring": "Makes Python object appropriate for JSON serialization.\n\n- Replaces instances of Infinity/-Infinity/NaN with strings.\n- Turns byte strings into unicode strings.\n- Turns sets into sorted lists.\n- Turns tuples into lists.\n\nArgs:\nobj: Python data structure.\nencoding: Charset used to decode byte strings.\n\nReturns:\nUnicode JSON data structure.", "source": "codesearchnet"}
{"code": "def _ConsumeSingleByteString(self):\n    text = self.token\n    if ((len(text) < 1) or (text[0] not in _QUOTES)):\n        raise self._ParseError(('Expected string but found: %r' % (text,)))\n    if ((len(text) < 2) or (text[(- 1)] != text[0])):\n        raise self._ParseError(('String missing ending quote: %r' % (text,)))\n    try:\n        result = text_encoding.CUnescape(text[1:(- 1)])\n    except ValueError as e:\n        raise self._ParseError(str(e))\n    self.NextToken()\n    return result", "docstring": "Consume one token of a string literal.\n\nString literals (whether bytes or text) can come in multiple adjacent\ntokens which are automatically concatenated, like in C or Python.  This\nmethod only consumes one token.\n\nReturns:\nThe token parsed.\nRaises:\nParseError: When the wrong format data is found.", "source": "codesearchnet"}
{"code": "def fulfill_transaction(transaction, *, private_keys):\n    \n    if not isinstance(private_keys, (list, tuple)):\n        private_keys = [private_keys]\n\n    \n    \n    if isinstance(private_keys, tuple):\n        private_keys = list(private_keys)\n\n    transaction_obj = Transaction.from_dict(transaction)\n    try:\n        signed_transaction = transaction_obj.sign(private_keys)\n    except KeypairMismatchException as exc:\n        raise MissingPrivateKeyError('A private key is missing!') from exc\n\n    return signed_transaction.to_dict()", "docstring": "Fulfills the given transaction.\n\nArgs:\ntransaction (dict): The transaction to be fulfilled.\nprivate_keys (:obj:`str` | :obj:`list` | :obj:`tuple`): One or\nmore private keys to be used for fulfilling the\ntransaction.\n\nReturns:\ndict: The fulfilled transaction payload, ready to be sent to a\nBigchainDB federation.\n\nRaises:\n:exc:`~.exceptions.MissingPrivateKeyError`: If a private\nkey is missing.", "source": "juraj-google-style"}
{"code": "def _ExpandDirectories(filenames):\n    expanded = set()\n    for filename in filenames:\n        if (not os.path.isdir(filename)):\n            expanded.add(filename)\n            continue\n        for (root, _, files) in os.walk(filename):\n            for loopfile in files:\n                fullname = os.path.join(root, loopfile)\n                if fullname.startswith(('.' + os.path.sep)):\n                    fullname = fullname[len(('.' + os.path.sep)):]\n                expanded.add(fullname)\n    filtered = []\n    for filename in expanded:\n        if (os.path.splitext(filename)[1][1:] in GetAllExtensions()):\n            filtered.append(filename)\n    return filtered", "docstring": "Searches a list of filenames and replaces directories in the list with\nall files descending from those directories. Files with extensions not in\nthe valid extensions list are excluded.\n\nArgs:\nfilenames: A list of files or directories\n\nReturns:\nA list of all files that are members of filenames or descended from a\ndirectory in filenames", "source": "codesearchnet"}
{"code": "def raw_search(self, *args, **kwargs):\n        \n        limit = 50\n        try:\n            limit = kwargs['limit']\n        except KeyError:\n            pass\n\n        \n        self._mail.select(\"inbox\")\n\n        \n        try:\n            date = kwargs['date']\n            date_str = date.strftime(\"%d-%b-%Y\")\n            _, email_ids = self._mail.search(None, '(SINCE \"%s\")' % date_str)\n        except KeyError:\n            _, email_ids = self._mail.search(None, 'ALL')\n\n        \n        email_ids = email_ids[0].split()\n\n        matching_uids = []\n        for _ in range(1, min(limit, len(email_ids))):\n            email_id = email_ids.pop()\n            rfc_body = self._mail.fetch(email_id, \"(RFC822)\")[1][0][1]\n\n            match = True\n            for expr in args:\n                if re.search(expr, rfc_body) is None:\n                    match = False\n                    break\n\n            if match:\n                uid = re.search(\n                    \"UID\\\\D*(\\\\d+)\\\\D*\", self._mail.fetch(email_id, 'UID')[1][0]).group(1)\n                matching_uids.append(uid)\n\n        return matching_uids", "docstring": "Find the a set of emails matching each regular expression passed in against the (RFC822) content.\n\nArgs:\n*args: list of regular expressions.\n\nKwargs:\nlimit (int) - Limit to how many of the most resent emails to search through.\ndate (datetime) - If specified, it will filter avoid checking messages older\nthan this date.", "source": "juraj-google-style"}
{"code": "def __init__(self, value_type: typing.Optional[typing.Union[typing.Type[typing.Any], typing.Tuple[typing.Type[typing.Any], ...]]], default: typing.Any=MISSING_VALUE, transform: typing.Optional[typing.Callable[[typing.Any], typing.Any]]=None, is_noneable: bool=False, frozen: bool=False):\n    super().__init__()\n    self._value_type = value_type\n    self._is_noneable = is_noneable\n    self._frozen = False\n    self._default = MISSING_VALUE\n    self._transform = transform\n    self.set_default(default)\n    self._frozen = frozen", "docstring": "Constructor of ValueSpecBase.\n\nThis class provides common facilities for implementing ValueSpec,\nincluding type check, default value assignment, noneable handling,\nmissing value handling, and etc. Subclasses only need to handle value\nspecific logics in `apply`, `extend`, and `is_compatible`.\n\nArgs:\nvalue_type: Type or tuples of type or None. When a not-none value_type is\npresent, type check will be performed.\ndefault: (Optional) Default value. If not specified, it always require\nuser to provide. Or it can be any value that can be accepted by this\nspec, or None, which automatically add Noneable property to the spec.\ntransform: (Optional) user-defined function to be called on the input\nof `apply`. It could be used as a type converter or a custom\nvalidator which may raise errors.\nis_noneable: (Optional) If True, None is acceptable for this spec.\nfrozen: If True, values other than the default value is not accceptable.", "source": "github-repos"}
{"code": "def Print(self, output_writer):\n    \n    if self._extensions:\n      output_writer.Write('\\textensions: {0:s}\\n'.format(\n          ', '.join(self._extensions)))", "docstring": "Prints a human readable version of the filter.\n\nArgs:\noutput_writer (CLIOutputWriter): output writer.", "source": "juraj-google-style"}
{"code": "def compute_expand_dims_output_shape(input_shape, axis):\n    input_shape = list(input_shape)\n    if axis is None:\n        axis = len(input_shape)\n    axis = to_tuple_or_list(axis)\n    out_ndim = len(axis) + len(input_shape)\n    axis = [canonicalize_axis(a, out_ndim) for a in axis]\n    shape_iter = iter(input_shape)\n    new_shape = [1 if ax in axis else next(shape_iter) for ax in range(out_ndim)]\n    return tuple(new_shape)", "docstring": "Compute the output shape for the `expand_dims` operation.\n\nArgs:\ninput_shape: Input shape.\naxis: int or sequence of ints for the axis to expand.\n\nReturns:\nTuple of ints: The output shape after the `expand_dims` operation.", "source": "github-repos"}
{"code": "def take_node_screenshot(self, element, screenshot_path):\n        from PIL import Image\n        \n\n        temp_path = os.path.join(tempdir, screenshot_path)\n\n        el_x = int(element.location['x'])\n        el_y = int(element.location['y'])\n        el_height = int(element.size['height'])\n        el_width = int(element.size['width'])\n\n        if el_height == 0 or el_width == 0:\n            self.debug_log(\"take_node_screenshot cannot be taken because element width or height equal zero\")  \n            return False\n\n        bounding_box = (\n            el_x,\n            el_y,\n            (el_x + el_width),\n            (el_y + el_height)\n        )\n\n        self._driver.save_screenshot(temp_path)\n\n        base_image = Image.open(temp_path)\n\n        cropped_image = base_image.crop(bounding_box)\n\n        base_image = base_image.resize(cropped_image.size)\n\n        base_image.paste(cropped_image, (0, 0))\n\n        base_image.save(screenshot_path)", "docstring": "Take a screenshot of a node\n\nArgs:\nelement (object): the proxy_element\nscreenshot_path (str): the path where the screenshot will be saved", "source": "juraj-google-style"}
{"code": "def generated_tag_data(tags):\n    \n    generated_tags = []\n    for key, value in tags.items():\n        generated_tags.append({\n            'Key': key,\n            'Value': value,\n        })\n    return generated_tags", "docstring": "Convert :obj:`dict` to S3 Tag list.\n\nArgs:\ntags (dict): Dictonary of tag key and tag value passed.\n\nReturns:\nlist: List of dictionaries.", "source": "juraj-google-style"}
{"code": "def __init__(self, port=None, queue_id=None):\n        \n        super().__init__(action_type=ActionType.OFPAT_ENQUEUE, length=16)\n        self.port = port\n        self.queue_id = queue_id", "docstring": "Create an ActionEnqueue with the optional parameters below.\n\nArgs:\nport (physical port or :attr:`.Port.OFPP_IN_PORT`): Queue's port.\nqueue_id (int): Where to enqueue the packets.", "source": "juraj-google-style"}
{"code": "def get_data(name, train_batch_size, test_batch_size):\n    if (name not in ['mnist', 'cifar10']):\n        raise ValueError((\"Expected dataset 'mnist' or 'cifar10', but got %s\" % name))\n    dataset = getattr(tf.keras.datasets, name)\n    num_classes = 10\n    raw_data = dataset.load_data()\n    ((images_train, labels_train), (images_test, labels_test)) = raw_data\n    images_train = (images_train.astype(np.float32) / 255.0)\n    images_test = (images_test.astype(np.float32) / 255.0)\n    labels_train = labels_train.astype(np.int32).squeeze()\n    labels_test = labels_test.astype(np.int32).squeeze()\n    if (images_train.ndim == 3):\n        images_train = np.expand_dims(images_train, (- 1))\n        images_test = np.expand_dims(images_test, (- 1))\n    train_data = tf.data.Dataset.from_tensor_slices((images_train, labels_train))\n    test_data = tf.data.Dataset.from_tensor_slices((images_test, labels_test))\n    train_iterator = train_data.shuffle(buffer_size=len(images_train)).batch(train_batch_size).repeat().make_one_shot_iterator()\n    test_iterator = test_data.batch(test_batch_size).make_initializable_iterator()\n    return dict(train_iterator=train_iterator, test_iterator=test_iterator, num_classes=num_classes)", "docstring": "Gets training and testing dataset iterators.\n\nArgs:\nname: String. Name of dataset, either 'mnist' or 'cifar10'.\ntrain_batch_size: Integer. Batch size for training.\ntest_batch_size: Integer. Batch size for testing.\n\nReturns:\nDict containing:\ntrain_iterator: A tf.data.Iterator, over training data.\ntest_iterator: A tf.data.Iterator, over test data.\nnum_classes: Integer. Number of class labels.", "source": "codesearchnet"}
{"code": "def get_timestamp(self, url, xpath=None):\n    if (not path.exists(self.db_path)):\n        return None\n    if (self._query(url, xpath).count() > 0):\n        return self._query(url, xpath).one().queried_on", "docstring": "Get time stamp of cached query result.\n\nIf DB has not yet been initialized or url/xpath has not been queried yet, return None.\n\nArgs:\nurl (str): If given, clear specific item only. Otherwise remove the DB file.\nxpath (str): xpath to search (may be ``None``)\n\nReturns:\ndatetime.datetime: cached response timestamp, None if not available", "source": "codesearchnet"}
{"code": "def random_walk_uniform_fn(scale=1.0, name=None):\n\n    def _fn(state_parts, seed):\n        'Adds a uniform perturbation to the input state.\\n\\n    Args:\\n      state_parts: A list of `Tensor`s of any shape and real dtype representing\\n        the state parts of the `current_state` of the Markov chain.\\n      seed: `int` or None. The random seed for this `Op`. If `None`, no seed is\\n        applied.\\n        Default value: `None`.\\n\\n    Returns:\\n      perturbed_state_parts: A Python `list` of The `Tensor`s. Has the same\\n        shape and type as the `state_parts`.\\n\\n    Raises:\\n      ValueError: if `scale` does not broadcast with `state_parts`.\\n    '\n        with tf.compat.v1.name_scope(name, 'random_walk_uniform_fn', values=[state_parts, scale, seed]):\n            scales = (scale if mcmc_util.is_list_like(scale) else [scale])\n            if (len(scales) == 1):\n                scales *= len(state_parts)\n            if (len(state_parts) != len(scales)):\n                raise ValueError('`scale` must broadcast with `state_parts`.')\n            seed_stream = distributions.SeedStream(seed, salt='RandomWalkUniformFn')\n            next_state_parts = [tf.random.uniform(minval=(state_part - scale_part), maxval=(state_part + scale_part), shape=tf.shape(input=state_part), dtype=state_part.dtype.base_dtype, seed=seed_stream()) for (scale_part, state_part) in zip(scales, state_parts)]\n            return next_state_parts\n    return _fn", "docstring": "Returns a callable that adds a random uniform perturbation to the input.\n\nFor more details on `random_walk_uniform_fn`, see\n`random_walk_normal_fn`. `scale` might\nbe a `Tensor` or a list of `Tensor`s that should broadcast with state parts\nof the `current_state`. The generated uniform perturbation is sampled as a\nuniform point on the rectangle `[-scale, scale]`.\n\nArgs:\nscale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes`\ncontrolling the upper and lower bound of the uniform proposal\ndistribution.\nname: Python `str` name prefixed to Ops created by this function.\nDefault value: 'random_walk_uniform_fn'.\n\nReturns:\nrandom_walk_uniform_fn: A callable accepting a Python `list` of `Tensor`s\nrepresenting the state parts of the `current_state` and an `int`\nrepresenting the random seed used to generate the proposal. The callable\nreturns the same-type `list` of `Tensor`s as the input and represents the\nproposal for the RWM algorithm.", "source": "codesearchnet"}
{"code": "def export_gpx_file(self):\n    gpx = create_elem('gpx', GPX_ELEM_ATTRIB)\n    if (not self.metadata.bounds):\n        self.metadata.bounds = self[:]\n    gpx.append(self.metadata.togpx())\n    for place in self:\n        gpx.append(place.togpx())\n    return etree.ElementTree(gpx)", "docstring": "Generate GPX element tree from ``Waypoints`` object.\n\nReturns:\netree.ElementTree: GPX element tree depicting ``Waypoints`` object", "source": "codesearchnet"}
{"code": "def l2_regression_loss(y, target, name=None):\n    with tf.name_scope(name, 'l2_regression', [y, target]) as scope:\n        y = tf.convert_to_tensor(y, name='y')\n        target = tf.convert_to_tensor(target, name='target')\n        return tf.sqrt(l2_regression_sq_loss(y, target, name=scope))", "docstring": "Calculates the square root of the SSE between y and target.\n\nArgs:\ny: the calculated values.\ntarget: the desired values.\nname: the name for this op, defaults to l2_regression\nReturns:\nA tensorflow op.", "source": "codesearchnet"}
{"code": "def random_weights(n, bounds=(0., 1.), total=1.0):\n    \n    low = bounds[0]\n    high = bounds[1]\n\n    if high < low:\n        raise ValueError('Higher bound must be greater or '\n                         'equal to lower bound')\n\n    if n * high < total or n * low > total:\n        raise ValueError('solution not possible with given n and bounds')\n\n    w = [0] * n\n    tgt = -float(total)\n\n    for i in range(n):\n        rn = n - i - 1\n        rhigh = rn * high\n        rlow = rn * low\n\n        lowb = max(-rhigh - tgt, low)\n        highb = min(-rlow - tgt, high)\n\n        rw = random.uniform(lowb, highb)\n        w[i] = rw\n\n        tgt += rw\n\n    random.shuffle(w)\n    return w", "docstring": "Generate pseudo-random weights.\n\nReturns a list of random weights that is of length\nn, where each weight is in the range bounds, and\nwhere the weights sum up to total.\n\nUseful for creating random portfolios when benchmarking.\n\nArgs:\n* n (int): number of random weights\n* bounds ((low, high)): bounds for each weight\n* total (float): total sum of the weights", "source": "juraj-google-style"}
{"code": "def send_message(self):\n    start = time.time()\n    message = None\n    if (not self.initialized):\n        message = self.construct_start_message()\n        self.initialized = True\n    else:\n        message = self.construct_end_message()\n    self.send_UDP_message(message)\n    end = time.time()\n    return (end - start)", "docstring": "Send message over UDP.\n\nIf tracking is disables, the bytes_sent will always be set to -1\n\nReturns:\n(bytes_sent, time_taken)", "source": "codesearchnet"}
{"code": "def debug_object(obj, log_level: int = logging.DEBUG) -> None:\n    \n    msgs = [\"For {o!r}:\".format(o=obj)]\n    for attrname in dir(obj):\n        attribute = getattr(obj, attrname)\n        msgs.append(\"- {an!r}: {at!r}, of type {t!r}\".format(\n            an=attrname, at=attribute, t=type(attribute)))\n    log.log(log_level, \"{}\", \"\\n\".join(msgs))", "docstring": "Sends details about a Python to the log, specifically its ``repr()``\nrepresentation, and all of its attributes with their name, value, and type.\n\nArgs:\nobj: object to debug\nlog_level: log level to use; default is ``logging.DEBUG``", "source": "juraj-google-style"}
{"code": "def accepts(self, tp, converter):\n        \n\n        tp = ParameterizedProperty._validate_type_param(tp)\n        self.alternatives.append((tp, converter))\n        return self", "docstring": "Declare that other types may be converted to this property type.\n\nArgs:\ntp (Property) :\nA type that may be converted automatically to this property\ntype.\n\nconverter (callable) :\nA function accepting ``value`` to perform conversion of the\nvalue to this property type.\n\nReturns:\nself", "source": "juraj-google-style"}
{"code": "def add_curves_from_lasio(self, l, remap=None, funcs=None):\n        \n        params = {}\n        for field, (sect, code) in LAS_FIELDS['data'].items():\n            params[field] = utils.lasio_get(l,\n                                            sect,\n                                            code,\n                                            remap=remap,\n                                            funcs=funcs)\n\n        curves = {c.mnemonic: Curve.from_lasio_curve(c, **params)\n                  for c in l.curves}\n\n        \n        self.data.update(curves)\n\n        return None", "docstring": "Given a LAS file, add curves from it to the current well instance.\nEssentially just wraps ``add_curves_from_lasio()``.\n\nArgs:\nfname (str): The path of the LAS file to read curves from.\nremap (dict): Optional. A dict of 'old': 'new' LAS field names.\nfuncs (dict): Optional. A dict of 'las field': function() for\nimplementing a transform before loading. Can be a lambda.\n\nReturns:\nNone. Works in place.", "source": "juraj-google-style"}
{"code": "def distances(self, word, words):\n    \n\n    point = self[word]\n    vectors = np.asarray([self[w] for w in words])\n    diff = vectors - point\n    distances = np.linalg.norm(diff, axis=1)\n    return distances", "docstring": "Calculate eucledean pairwise distances between `word` and `words`.\n\nArgs:\nword (string): single word.\nwords (list): list of strings.\n\nReturns:\nnumpy array of the distances.\n\nNote:\nL2 metric is used to calculate distances.", "source": "juraj-google-style"}
{"code": "def get_cookie_header(queue_item):\n    header = []\n    path = URLHelper.get_path(queue_item.request.url)\n    for cookie in queue_item.request.cookies:\n        root_path = ((cookie.path == '') or (cookie.path == '/'))\n        if (path.startswith(cookie.path) or root_path):\n            header.append(((cookie.name + '=') + cookie.value))\n    return '&'.join(header)", "docstring": "Convert a requests cookie jar to a HTTP request cookie header value.\n\nArgs:\nqueue_item (:class:`nyawc.QueueItem`): The parent queue item of the new request.\n\nReturns:\nstr: The HTTP cookie header value.", "source": "codesearchnet"}
{"code": "def _render(self):\n        \n        message = Message()\n        message.add(Heading(tr('Problem'), **ORANGE_LEVEL_4_STYLE))\n        message.add(Paragraph(tr(\n            'The following problem(s) were encountered whilst running the '\n            'analysis.')))\n        items = BulletedList()\n        for p in reversed(self.problems):\n            \n            items.add(p)\n        message.add(items)\n\n        message.add(Heading(tr('Suggestion'), **GREEN_LEVEL_4_STYLE))\n        message.add(Paragraph(tr(\n            'You can try the following to resolve the issue:')))\n        if len(self.suggestions) < 1:\n            suggestions = self.standard_suggestions()\n            message.add(suggestions)\n        else:\n            items = BulletedList()\n            for s in reversed(self.suggestions):\n                if s is not None:\n                    items.add(s)\n            message.add(items)\n\n        if len(self.details) > 0:\n            items = BulletedList()\n            message.add(Heading(\n                tr('Details'), **ORANGE_LEVEL_5_STYLE))\n            message.add(Paragraph(tr(\n                'These additional details were reported when the problem '\n                'occurred.')))\n            for d in self.details:\n                if d is not None:\n                    items.add(d)\n            message.add(items)\n\n        message.add(Heading(tr(\n            'Diagnostics'), **TRACEBACK_STYLE))\n        message.add(self.tracebacks)\n        return message", "docstring": "Create a Message version of this ErrorMessage\n\nArgs:\nnone\n\nReturns:\nthe Message instance of this ErrorMessage\n\nRaises:\nErrors are propagated", "source": "juraj-google-style"}
{"code": "def set_inter_op_parallelism_threads(num_threads):\n    context.context().inter_op_parallelism_threads = num_threads", "docstring": "Set number of threads used for parallelism between independent operations.\n\nDetermines the number of threads used by independent non-blocking operations.\n0 means the system picks an appropriate number.\n\nArgs:\nnum_threads: Number of parallel threads", "source": "github-repos"}
{"code": "def create_timer(cb: Callable[[float], None], interval: float,\n                 delay_policy: TimerDelayPolicy = TimerDelayPolicy.DEFAULT,\n                 loop: Optional[asyncio.BaseEventLoop] = None) -> asyncio.Task:\n    \n    if not loop:\n        loop = asyncio.get_event_loop()\n\n    async def _timer():\n        fired_tasks = []\n        try:\n            while True:\n                if delay_policy == TimerDelayPolicy.CANCEL:\n                    for t in fired_tasks:\n                        if not t.done():\n                            t.cancel()\n                            await t\n                    fired_tasks.clear()\n                else:\n                    fired_tasks[:] = [t for t in fired_tasks if not t.done()]\n                t = loop.create_task(cb(interval=interval))\n                fired_tasks.append(t)\n                await asyncio.sleep(interval)\n        except asyncio.CancelledError:\n            for t in fired_tasks:\n                t.cancel()\n            await asyncio.gather(*fired_tasks)\n\n    return loop.create_task(_timer())", "docstring": "Schedule a timer with the given callable and the interval in seconds.\nThe interval value is also passed to the callable.\nIf the callable takes longer than the timer interval, all accumulated\ncallable's tasks will be cancelled when the timer is cancelled.\n\nArgs:\ncb: TODO - fill argument descriptions\n\nReturns:\nYou can stop the timer by cancelling the returned task.", "source": "juraj-google-style"}
{"code": "def dbmin_mean(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `dbmin_mean`'.format(value))\n    self._dbmin_mean = value", "docstring": "Corresponds to IDD Field `dbmin_mean`\nMean of extreme annual minimum dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmin_mean`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def gunzip_file(gz_path, new_path):\n  \n  if tf.gfile.Exists(new_path):\n    tf.logging.info(\"File %s already exists, skipping unpacking\" % new_path)\n    return\n  tf.logging.info(\"Unpacking %s to %s\" % (gz_path, new_path))\n  \n  mode = stat.S_IRWXU or stat.S_IXGRP or stat.S_IRGRP or stat.S_IROTH\n  os.chmod(os.path.dirname(new_path), mode)\n  with gzip.open(gz_path, \"rb\") as gz_file:\n    with tf.gfile.GFile(new_path, mode=\"wb\") as new_file:\n      for line in gz_file:\n        new_file.write(line)", "docstring": "Unzips from gz_path into new_path.\n\nArgs:\ngz_path: path to the zipped file.\nnew_path: path to where the file will be unzipped.", "source": "juraj-google-style"}
{"code": "def script(experiment, projects):\n    \n    benchbuild_c = local[local.path(sys.argv[0])]\n    slurm_script = local.cwd / experiment.name + \"-\" + str(\n        CFG['slurm']['script'])\n\n    srun = local[\"srun\"]\n    srun_args = []\n    if not CFG[\"slurm\"][\"multithread\"]:\n        srun_args.append(\"--hint=nomultithread\")\n    if not CFG[\"slurm\"][\"turbo\"]:\n        srun_args.append(\"--pstate-turbo=off\")\n\n    srun = srun[srun_args]\n    srun = srun[benchbuild_c[\"run\"]]\n\n    return __save__(slurm_script, srun, experiment, projects)", "docstring": "Prepare a slurm script that executes the experiment for a given project.\n\nArgs:\nexperiment: The experiment we want to execute\nprojects: All projects we generate an array job for.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.ReadRows = channel.unary_stream(\n            \"/google.bigtable.v2.Bigtable/ReadRows\",\n            request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.FromString,\n        )\n        self.SampleRowKeys = channel.unary_stream(\n            \"/google.bigtable.v2.Bigtable/SampleRowKeys\",\n            request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.FromString,\n        )\n        self.MutateRow = channel.unary_unary(\n            \"/google.bigtable.v2.Bigtable/MutateRow\",\n            request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.FromString,\n        )\n        self.MutateRows = channel.unary_stream(\n            \"/google.bigtable.v2.Bigtable/MutateRows\",\n            request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.FromString,\n        )\n        self.CheckAndMutateRow = channel.unary_unary(\n            \"/google.bigtable.v2.Bigtable/CheckAndMutateRow\",\n            request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString,\n        )\n        self.ReadModifyWriteRow = channel.unary_unary(\n            \"/google.bigtable.v2.Bigtable/ReadModifyWriteRow\",\n            request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def extend(*args):\n    if (not args):\n        return {}\n    first = args[0]\n    rest = args[1:]\n    out = type(first)(first)\n    for each in rest:\n        out.update(each)\n    return out", "docstring": "shallow dictionary merge\n\nArgs:\na: dict to extend\nb: dict to apply to a\n\nReturns:\nnew instance of the same type as _a_, with _a_ and _b_ merged.", "source": "codesearchnet"}
{"code": "def from_config(cls, config):\n    config.pop('dtype', None)\n    return cls(**config)", "docstring": "Instantiates an initializer from a configuration dictionary.\n\nExample:\n\n```python\ninitializer = RandomUniform(-1, 1)\nconfig = initializer.get_config()\ninitializer = RandomUniform.from_config(config)\n```\n\nArgs:\nconfig: A Python dictionary, the output of `get_config`.\n\nReturns:\nA `tf.keras.initializers.Initializer` instance.", "source": "github-repos"}
{"code": "def opensearch(self, query, results=10, redirect=True):\n    self._check_query(query, 'Query must be specified')\n    query_params = {'action': 'opensearch', 'search': query, 'limit': (100 if (results > 100) else results), 'redirects': ('resolve' if redirect else 'return'), 'warningsaserror': True, 'namespace': ''}\n    results = self.wiki_request(query_params)\n    self._check_error_response(results, query)\n    res = list()\n    for (i, item) in enumerate(results[1]):\n        res.append((item, results[2][i], results[3][i]))\n    return res", "docstring": "Execute a MediaWiki opensearch request, similar to search box\nsuggestions and conforming to the OpenSearch specification\n\nArgs:\nquery (str): Title to search for\nresults (int): Number of pages within the radius to return\nredirect (bool): If **False** return the redirect itself, \\\notherwise resolve redirects\nReturns:\nList: List of results that are stored in a tuple \\\n(Title, Summary, URL)", "source": "codesearchnet"}
{"code": "def has_same_sumformula(self, other):\n    same_atoms = True\n    for atom in set(self['atom']):\n        own_atom_number = len(self[(self['atom'] == atom)])\n        other_atom_number = len(other[(other['atom'] == atom)])\n        same_atoms = (own_atom_number == other_atom_number)\n        if (not same_atoms):\n            break\n    return same_atoms", "docstring": "Determines if ``other``  has the same sumformula\n\nArgs:\nother (molecule):\n\nReturns:\nbool:", "source": "codesearchnet"}
{"code": "def get_oauth_data(self, code, client_id, client_secret, state):\n        \n        request = self._get_request()\n        response = request.post(self.OAUTH_TOKEN_URL, {\n            \"state\": state,\n            \"code\": code,\n            \"grant_type\": \"authorization_code\",\n            \"client_id\": client_id,\n            \"client_secret\": client_secret\n        })\n        return HSAccessTokenAuth.from_response(response)", "docstring": "Get Oauth data from HelloSign\n\nArgs:\n\ncode (str):             Code returned by HelloSign for our callback url\n\nclient_id (str):        Client id of the associated app\n\nclient_secret (str):    Secret token of the associated app\n\nReturns:\nA HSAccessTokenAuth object", "source": "juraj-google-style"}
{"code": "def random_int_generator(maxrange):\n    \n    try:\n        return random.randint(0,maxrange)\n    except:\n        line, filename, synerror = trace()\n        raise ArcRestHelperError({\n                    \"function\": \"random_int_generator\",\n                    \"line\": line,\n                    \"filename\":  filename,\n                    \"synerror\": synerror,\n                                    }\n                                    )\n    finally:\n        pass", "docstring": "Generates a random integer from 0 to `maxrange`, inclusive.\n\nArgs:\nmaxrange (int): The upper range of integers to randomly choose.\n\nReturns:\nint: The randomly generated integer from :py:func:`random.randint`.\n\nExamples:\n>>> arcresthelper.common.random_int_generator(15)\n9", "source": "juraj-google-style"}
{"code": "def get_airport_metars_hist(self, iata):\n    url = (AIRPORT_BASE.format(iata) + '/weather')\n    return self._fr24.get_airport_metars_hist(url)", "docstring": "Retrieve the metar data for past 72 hours. The data will not be parsed to readable format.\n\nGiven the IATA code of an airport, this method returns the metar information for last 72 hours.\n\nArgs:\niata (str): The IATA code for an airport, e.g. HYD\n\nReturns:\nThe metar data for the airport\n\nExample::\n\nfrom pyflightdata import FlightData\nf=FlightData()\n#optional login\nf.login(myemail,mypassword)\nf.get_airport_metars_hist('HYD')", "source": "codesearchnet"}
{"code": "def packVersion(major, minor=0, patch=0):\n    \n\n    ret = patch & mask20\n    ret = ret | (minor & mask20) << 20\n    ret = ret | (major & mask20) << 20 * 2\n    return ret", "docstring": "Pack a set of major/minor/patch integers into a single integer for storage.\n\nArgs:\nmajor (int): Major version level integer.\nminor (int): Minor version level integer.\npatch (int): Patch version level integer.\n\nReturns:\nint:  System normalized integer value to represent a software version.", "source": "juraj-google-style"}
{"code": "def bin_hash160(string):\n    \n    intermed = hashlib.sha256(string).digest()\n    return hashlib.new('ripemd160', intermed).hexdigest()", "docstring": "Get a hash of the provided message using the ripemd160 algorithm.\n\nArgs:\nstring (str): message to hash.\n\nReturns:\nstr: hash as a double digit hex string.", "source": "juraj-google-style"}
{"code": "def get_coords(variant):\n    \n    coordinates = {\n        'chrom': None,\n        'end_chrom': None,\n        'sv_length': None,\n        'sv_type': None,\n        'pos': None,\n        'end': None,\n    }\n    chrom = variant.CHROM\n    if chrom.startswith(('chr', 'CHR', 'Chr')):\n        chrom = chrom[3:]\n    coordinates['chrom'] = chrom\n    end_chrom = chrom\n    \n    pos = int(variant.POS)\n    alt = variant.ALT[0]\n\n    \n    \n    end_pos = variant.INFO.get('END')\n    if end_pos:\n        end = int(end_pos)\n    else:\n        end = int(variant.end)\n    coordinates['end'] = end\n    \n    sv_type = variant.INFO.get('SVTYPE')\n    length = variant.INFO.get('SVLEN')\n    if length:\n        sv_len = abs(length)\n    else:\n        sv_len = end - pos\n\n    \n    if sv_type == 'BND':\n        other_coordinates = alt.strip('ACGTN[]').split(':')\n        end_chrom = other_coordinates[0]\n        if end_chrom.startswith(('chr', 'CHR', 'Chr')):\n            end_chrom = end_chrom[3:]\n\n        end = int(other_coordinates[1])\n\n        \n        sv_len = float('inf')\n\n    \n    if (sv_len == 0 and alt != '<INS>'):\n        sv_len = len(alt)\n\n    if (pos == end) and (sv_len > 0):\n        end = pos + sv_len\n\n    position = Position(chrom, pos)\n    end_position = Position(end_chrom, end)\n    \n    \n    if is_greater(position, end_position):\n        end_chrom = position.chrom\n        end = position.pos\n        \n        chrom = end_position.chrom\n        pos = end_position.pos\n    \n    coordinates['end_chrom'] = end_chrom\n    coordinates['pos'] = pos\n    coordinates['end'] = end\n    coordinates['sv_length'] = sv_len\n    coordinates['sv_type'] = sv_type\n    \n    return coordinates", "docstring": "Returns a dictionary with position information\n\nArgs:\nvariant(cyvcf2.Variant)\n\nReturns:\ncoordinates(dict)", "source": "juraj-google-style"}
{"code": "def get_instance(name, cls='system', storage=None, storage_parameters=None, unsecure=None, *args, **kwargs):\n    system_parameters = _system_parameters(unsecure=unsecure, storage_parameters=storage_parameters)\n    with _MOUNT_LOCK:\n        for root in MOUNTED:\n            if ((isinstance(root, Pattern) and root.match(name)) or ((not isinstance(root, Pattern)) and name.startswith(root))):\n                info = MOUNTED[root]\n                stored_parameters = (info.get('system_parameters') or dict())\n                if (not system_parameters):\n                    same_parameters = True\n                    system_parameters = stored_parameters\n                elif (system_parameters == stored_parameters):\n                    same_parameters = True\n                else:\n                    same_parameters = False\n                    system_parameters.update({key: value for (key, value) in stored_parameters.items() if (key not in system_parameters)})\n                break\n        else:\n            mount_info = mount(storage=storage, name=name, **system_parameters)\n            info = mount_info[tuple(mount_info)[0]]\n            same_parameters = True\n    if (cls == 'system'):\n        if same_parameters:\n            return info['system_cached']\n        else:\n            return info['system'](roots=info['roots'], **system_parameters)\n    if same_parameters:\n        if ('storage_parameters' not in system_parameters):\n            system_parameters['storage_parameters'] = dict()\n        system_parameters['storage_parameters']['pycosio.system_cached'] = info['system_cached']\n    kwargs.update(system_parameters)\n    return info[cls](*args, name=name, **kwargs)", "docstring": "Get a cloud object storage instance.\n\nArgs:\nname (str): File name, path or URL.\ncls (str): Type of class to instantiate.\n'raw', 'buffered' or 'system'.\nstorage (str): Storage name.\nstorage_parameters (dict): Storage configuration parameters.\nGenerally, client configuration and credentials.\nunsecure (bool): If True, disables TLS/SSL to improves\ntransfer performance. But makes connection unsecure.\nDefault to False.\nargs, kwargs: Instance arguments\n\nReturns:\npycosio._core.io_base.ObjectIOBase subclass: Instance", "source": "codesearchnet"}
{"code": "def run_program(self, src, filename, maximum_depth):\n    self.filename = filename\n    self._maximum_depth = maximum_depth\n    src = preprocess.augment_annotations(src)\n    src_tree = directors.parse_src(src, self.ctx.python_version)\n    code = self.compile_src(src, filename=filename, store_blockgraph=True)\n    director = directors.Director(src_tree, self.ctx.errorlog, filename, self.ctx.options.disable)\n    self.ctx.errorlog.set_error_filter(director.filter_error)\n    self._director = director\n    self.ctx.options.set_feature_flags(director.features)\n    self._branch_tracker = pattern_matching.BranchTracker(director.matches, self.ctx)\n    code = process_blocks.merge_annotations(code, self._director.annotations, self._director.param_annotations)\n    visitor = vm_utils.FindIgnoredTypeComments(self._director.type_comments)\n    pyc.visit(code, visitor)\n    for line in visitor.ignored_lines():\n        self.ctx.errorlog.ignored_type_comment(self.filename, line, self._director.type_comments[line])\n    if self.ctx.options.debug_constant_folding:\n        before = _bytecode_to_string(code)\n        code = constant_folding.fold_constants(code)\n        after = _bytecode_to_string(code)\n        print('\\n'.join(difflib.unified_diff(before.splitlines(), after.splitlines())))\n    else:\n        code = constant_folding.fold_constants(code)\n    process_blocks.adjust_returns(code, self._director.block_returns)\n    node, f_globals, f_locals, _ = self.run_bytecode(self.ctx.root_node, code)\n    logging.info('Done running bytecode, postprocessing globals')\n    for annot in itertools.chain.from_iterable(self.late_annotations.values()):\n        annot.resolve(node, f_globals, f_locals)\n        self.flatten_late_annotation(node, annot, f_globals)\n    self.late_annotations = None\n    assert not self.frames, 'Frames left over!'\n    log.info('Final node: <%d>%s', node.id, node.name)\n    return (node, f_globals.members)", "docstring": "Run the code and return the CFG nodes.\n\nArgs:\nsrc: The program source code.\nfilename: The filename the source is from.\nmaximum_depth: Maximum depth to follow call chains.\n\nReturns:\nA tuple (CFGNode, set) containing the last CFGNode of the program as\nwell as all the top-level names defined by it.", "source": "github-repos"}
{"code": "def asn(self, as_number, **kwargs):\n    indicator_obj = ASN(as_number, **kwargs)\n    return self._indicator(indicator_obj)", "docstring": "Add ASN data to Batch object.\n\nArgs:\nas_number (str): The value for this Indicator.\nconfidence (str, kwargs): The threat confidence for this Indicator.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nlast_modified (str, kwargs): The date timestamp the Indicator was last modified.\nrating (str, kwargs): The threat rating for this Indicator.\nxid (str, kwargs): The external id for this Indicator.\n\nReturns:\nobj: An instance of ASN.", "source": "codesearchnet"}
{"code": "def unpack(self, buff, item_class, offset=0):\n        \n        begin = offset\n        limit_buff = len(buff)\n\n        while begin < limit_buff:\n            item = item_class()\n            item.unpack(buff, begin)\n            self.append(item)\n            begin += item.get_size()", "docstring": "Unpack the elements of the list.\n\nArgs:\nbuff (bytes): The binary data to be unpacked.\nitem_class (:obj:`type`): Class of the expected items on this list.\noffset (int): If we need to shift the beginning of the data.", "source": "juraj-google-style"}
{"code": "def add_paths_argument(cls, group, argname, dest=None, help_=None):\n        \n        prefixed = '%s-%s' % (cls.argument_prefix, argname)\n        if dest is None:\n            dest = prefixed.replace('-', '_')\n            final_dest = dest[len(cls.argument_prefix) + 1:]\n        else:\n            final_dest = dest\n            dest = '%s_%s' % (cls.argument_prefix, dest)\n\n        group.add_argument('--%s' % prefixed, action='store', nargs='+',\n                           dest=dest, help=help_)\n        cls.paths_arguments[dest] = final_dest", "docstring": "Subclasses may call this to expose a paths argument.\n\nArgs:\ngroup: arparse.ArgumentGroup, the extension argument group\nargname: str, the name of the argument, will be namespaced.\ndest: str, similar to the `dest` argument of\n`argparse.ArgumentParser.add_argument`, will be namespaced.\nhelp_: str, similar to the `help` argument of\n`argparse.ArgumentParser.add_argument`.", "source": "juraj-google-style"}
{"code": "def inflate_nd_checker(identifier, definition):\n        \n        if isinstance(definition, bool):\n            return Checker(name=identifier, passes=definition)\n        elif isinstance(definition, dict):\n            return Checker(definition.pop('name', identifier), **definition)\n        else:\n            raise ValueError('%s type is not supported for no-data checkers, '\n                             'use bool or dict' % type(definition))", "docstring": "Inflate a no-data checker from a basic definition.\n\nArgs:\nidentifier (str): the no-data checker identifier / name.\ndefinition (bool/dict): a boolean acting as \"passes\" or a full\ndict definition with \"passes\" and \"allow_failure\".\n\nReturns:\nChecker: a checker instance.\n\nRaises:\nValueError: when the definition type is not bool or dict.", "source": "juraj-google-style"}
{"code": "def write_json(data, path, file_name):\n    \n    if os.path.exists(path) and not os.path.isdir(path):\n        return\n    elif not os.path.exists(path):\n        mkdir_p(path)\n    with open(os.path.join(path, file_name), 'w') as f:\n        json_tricks.dump(data, f, indent=4, primitives=True, allow_nan=True)", "docstring": "Write out data to a json file.\n\nArgs:\ndata: A dictionary representation of the data to write out\npath: The directory to output the file in\nfile_name: The name of the file to write out", "source": "juraj-google-style"}
{"code": "def __init__(self, metadata,\n               registry):\n    \n    self.metadata = metadata\n    self.fields = stats_utils.FieldDefinitionTuplesFromProtos(\n        metadata.fields_defs)\n    field_names = [name for name, _ in self.fields]\n\n    if metadata.metric_type == rdf_stats.MetricMetadata.MetricType.COUNTER:\n      self.metric = prometheus_client.Counter(\n          metadata.varname,\n          metadata.docstring,\n          labelnames=field_names,\n          registry=registry)\n    elif metadata.metric_type == rdf_stats.MetricMetadata.MetricType.EVENT:\n      bins = metadata.bins or [\n          0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1, 1.5, 2, 2.5, 3, 4, 5, 6, 7, 8,\n          9, 10, 15, 20, 50, 100\n      ]\n      self.metric = prometheus_client.Histogram(\n          metadata.varname,\n          metadata.docstring,\n          labelnames=field_names,\n          buckets=bins,\n          registry=registry)\n    elif metadata.metric_type == rdf_stats.MetricMetadata.MetricType.GAUGE:\n      self.metric = prometheus_client.Gauge(\n          metadata.varname,\n          metadata.docstring,\n          labelnames=field_names,\n          registry=registry)\n    else:\n      raise ValueError(\"Unknown metric type: {!r}\".format(metadata.metric_type))", "docstring": "Instantiates a new _Metric.\n\nArgs:\nmetadata: An rdf_stats.MetricMetadata instance describing this _Metric.\nregistry: A prometheus_client.Registry instance.\n\nRaises:\nValueError: metadata contains an unknown metric_type.", "source": "juraj-google-style"}
{"code": "def convert_to_numpy(cls, x):\n    return x", "docstring": "Convert a tensor to a NumPy array.\n\nOnly called after slicing using `__getitem__`.\n\nArgs:\nx: the tensor to convert.\nReturns: the converted tensor.", "source": "github-repos"}
{"code": "def _add_write_pbs(self, write_pbs):\n        \n        if self._read_only:\n            raise ValueError(_WRITE_READ_ONLY)\n\n        super(Transaction, self)._add_write_pbs(write_pbs)", "docstring": "Add `Write`` protobufs to this transaction.\n\nArgs:\nwrite_pbs (List[google.cloud.proto.firestore.v1beta1.\\\nwrite_pb2.Write]): A list of write protobufs to be added.\n\nRaises:\nValueError: If this transaction is read-only.", "source": "juraj-google-style"}
{"code": "def get_mapping_function(function_name, functions_mapping):\n    \n    if function_name in functions_mapping:\n        return functions_mapping[function_name]\n\n    elif function_name in [\"parameterize\", \"P\"]:\n        from httprunner import loader\n        return loader.load_csv_file\n\n    elif function_name in [\"environ\", \"ENV\"]:\n        return utils.get_os_environ\n\n    try:\n        \n        from httprunner import loader\n        built_in_functions = loader.load_builtin_functions()\n        return built_in_functions[function_name]\n    except KeyError:\n        pass\n\n    try:\n        \n        item_func = eval(function_name)\n        if callable(item_func):\n            \n            return item_func\n    except (NameError, TypeError):\n        \n        raise exceptions.FunctionNotFound(\"{} is not found.\".format(function_name))", "docstring": "get function from functions_mapping,\nif not found, then try to check if builtin function.\n\nArgs:\nvariable_name (str): variable name\nvariables_mapping (dict): variables mapping\n\nReturns:\nmapping function object.\n\nRaises:\nexceptions.FunctionNotFound: function is neither defined in debugtalk.py nor builtin.", "source": "juraj-google-style"}
{"code": "def setup_keyword(dist, _, value):\n    if (value is not True):\n        return\n    dist.entry_points = _ensure_entry_points_is_dict(dist.entry_points)\n    for (command, subcommands) in six.iteritems(_get_commands(dist)):\n        entry_point = '{command} = rcli.dispatcher:main'.format(command=command)\n        entry_points = dist.entry_points.setdefault('console_scripts', [])\n        if (entry_point not in entry_points):\n            entry_points.append(entry_point)\n        dist.entry_points.setdefault('rcli', []).extend(subcommands)", "docstring": "Add autodetected commands as entry points.\n\nArgs:\ndist: The distutils Distribution object for the project being\ninstalled.\n_: The keyword used in the setup function. Unused.\nvalue: The value set to the keyword in the setup function. If the value\nis not True, this function will do nothing.", "source": "codesearchnet"}
{"code": "def plot_pie(self, key='wall_time', minfract=0.05, **kwargs):\n    timers = self.timers()\n    n = len(timers)\n    import matplotlib.pyplot as plt\n    from matplotlib.gridspec import GridSpec\n    fig = plt.gcf()\n    gspec = GridSpec(n, 1)\n    for (idx, timer) in enumerate(timers):\n        ax = plt.subplot(gspec[(idx, 0)])\n        ax.set_title(str(timer))\n        timer.pie(ax=ax, key=key, minfract=minfract, show=False)\n    return fig", "docstring": "Plot pie charts of the different timers.\n\nArgs:\nkey: Keyword used to extract data from timers.\nminfract: Don't show sections whose relative weight is less that minfract.\n\nReturns:\n`matplotlib` figure", "source": "codesearchnet"}
{"code": "def _RemoveFromPool(self):\n    with self.pool.lock:\n        if (not self.pool.started):\n            return False\n        if (len(self.pool) <= self.pool.min_threads):\n            return False\n        self.pool._RemoveWorker(self.name)\n        return True", "docstring": "Remove ourselves from the pool.\n\nReturns:\nTrue if removal was possible, and False if it was not possible.", "source": "codesearchnet"}
{"code": "def partial_derivative_mu(mu, sigma, low, high, data):\n        \n        pd_mu = np.sum(data - mu) / sigma ** 2\n        pd_mu -= len(data) * ((norm.pdf(low, mu, sigma) - norm.pdf(high, mu, sigma))\n                              / (norm.cdf(high, mu, sigma) - norm.cdf(low, mu, sigma)))\n        return -pd_mu", "docstring": "The partial derivative with respect to the mean.\n\nArgs:\nmu (float): the mean of the truncated normal\nsigma (float): the std of the truncated normal\nlow (float): the lower truncation bound\nhigh (float): the upper truncation bound\ndata (ndarray): the one dimension list of data points for which we want to calculate the likelihood\n\nReturns:\nfloat: the partial derivative evaluated at the given point", "source": "juraj-google-style"}
{"code": "def _distance_graph(cls, inputs, clusters, distance_metric):\n    assert isinstance(inputs, list)\n    if distance_metric == SQUARED_EUCLIDEAN_DISTANCE:\n        return cls._compute_euclidean_distance(inputs, clusters)\n    elif distance_metric == COSINE_DISTANCE:\n        return cls._compute_cosine_distance(inputs, clusters, inputs_normalized=True)\n    else:\n        assert False, str(distance_metric)", "docstring": "Computes distance between each input and each cluster center.\n\nArgs:\ninputs: list of input Tensors.\nclusters: cluster Tensor.\ndistance_metric: distance metric used for clustering\n\nReturns:\nlist of Tensors, where each element corresponds to each element in inputs.\nThe value is the distance of each row to all the cluster centers.\nCurrently only Euclidean distance and cosine distance are supported.", "source": "github-repos"}
{"code": "def throw(self, exception_class, should_throw):\n    return self.__copy_and_set('throws', (self._throws + [(exception_class, should_throw)]))", "docstring": "Defines if the an exception should be thrown after the request is sent\n\nArgs:\nexception_class (class): The class of the exception to instantiate\nshould_throw (function): The predicate that should indicate if the exception\nshould be thrown. This function will be called with the response as a parameter\n\nReturns:\nThe request builder instance in order to chain calls", "source": "codesearchnet"}
{"code": "def discount_bond_price(self, short_rate: types.RealTensor, times: types.RealTensor, maturities: types.RealTensor, name: str=None) -> types.RealTensor:\n    name = name or self._name + '_discount_bond_prices'\n    with tf.name_scope(name):\n        short_rate = tf.convert_to_tensor(short_rate, self._dtype)\n        times = tf.convert_to_tensor(times, self._dtype)\n        maturities = tf.convert_to_tensor(maturities, self._dtype)\n        input_shape_times = times.shape.as_list()\n        times_flat = tf.reshape(times, shape=[-1])\n        mean_reversion = self._mean_reversion(times_flat)\n        volatility = self._volatility(times_flat)\n        y_t = self._compute_yt(times_flat, mean_reversion, volatility)\n        mean_reversion = tf.reshape(tf.transpose(mean_reversion), input_shape_times + [self._dim])\n        y_t = tf.reshape(tf.transpose(y_t), input_shape_times + [self._dim])\n        values = self._bond_reconstitution(times, maturities, mean_reversion, short_rate, y_t)\n        return values", "docstring": "Returns zero-coupon bond prices `P(t,T)` conditional on `r(t)`.\n\nArgs:\nshort_rate: A `Tensor` of real dtype and shape `batch_shape + [dim]`\nspecifying the short rate `r(t)`.\ntimes: A `Tensor` of real dtype and shape `batch_shape`. The time `t`\nat which discount bond prices are computed.\nmaturities: A `Tensor` of real dtype and shape `batch_shape`. The time\nto maturity of the discount bonds.\nname: Str. The name to give this op.\nDefault value: `discount_bond_prices`.\n\nReturns:\nA `Tensor` of real dtype and the same shape as `batch_shape + [dim]`\ncontaining the price of zero-coupon bonds.", "source": "github-repos"}
{"code": "def _add_variable_proxy_methods(var, proxy_tensor):\n    proxy_tensor.read_value = (lambda : tf.identity(proxy_tensor))\n    proxy_tensor.assign_sub = var.assign_sub\n    proxy_tensor.assign = var.assign\n    proxy_tensor.initialized_value = var.initialized_value", "docstring": "Proxy methods of underlying variable.\n\nThis enables our custom getters to still work with, e.g., batch norm.\n\nArgs:\nvar: Variable to proxy\nproxy_tensor: Tensor that is identity of var", "source": "codesearchnet"}
{"code": "def DeserializeFrom(reader):\n        \n        ttype = reader.ReadByte()\n        tx = None\n\n        from neo.Core.TX.RegisterTransaction import RegisterTransaction\n        from neo.Core.TX.IssueTransaction import IssueTransaction\n        from neo.Core.TX.ClaimTransaction import ClaimTransaction\n        from neo.Core.TX.MinerTransaction import MinerTransaction\n        from neo.Core.TX.PublishTransaction import PublishTransaction\n        from neo.Core.TX.InvocationTransaction import InvocationTransaction\n        from neo.Core.TX.EnrollmentTransaction import EnrollmentTransaction\n        from neo.Core.TX.StateTransaction import StateTransaction\n\n        if ttype == int.from_bytes(TransactionType.RegisterTransaction, 'little'):\n            tx = RegisterTransaction()\n        elif ttype == int.from_bytes(TransactionType.MinerTransaction, 'little'):\n            tx = MinerTransaction()\n        elif ttype == int.from_bytes(TransactionType.IssueTransaction, 'little'):\n            tx = IssueTransaction()\n        elif ttype == int.from_bytes(TransactionType.ClaimTransaction, 'little'):\n            tx = ClaimTransaction()\n        elif ttype == int.from_bytes(TransactionType.PublishTransaction, 'little'):\n            tx = PublishTransaction()\n        elif ttype == int.from_bytes(TransactionType.InvocationTransaction, 'little'):\n            tx = InvocationTransaction()\n        elif ttype == int.from_bytes(TransactionType.EnrollmentTransaction, 'little'):\n            tx = EnrollmentTransaction()\n        elif ttype == int.from_bytes(TransactionType.StateTransaction, 'little'):\n            tx = StateTransaction()\n        else:\n            tx = Transaction()\n            tx.Type = ttype\n\n        tx.DeserializeUnsignedWithoutType(reader)\n\n        tx.scripts = []\n        byt = reader.ReadVarInt()\n\n        if byt > 0:\n            for i in range(0, byt):\n                witness = Witness()\n                witness.Deserialize(reader)\n\n                tx.scripts.append(witness)\n\n        tx.OnDeserialized()\n\n        return tx", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):\n\nReturns:\nTransaction:", "source": "juraj-google-style"}
{"code": "def buy(self, product_id, order_type, **kwargs):\n    return self.place_order(product_id, 'buy', order_type, **kwargs)", "docstring": "Place a buy order.\n\nThis is included to maintain backwards compatibility with older versions\nof cbpro-Python. For maximum support from docstrings and function\nsignatures see the order type-specific functions place_limit_order,\nplace_market_order, and place_stop_order.\n\nArgs:\nproduct_id (str): Product to order (eg. 'BTC-USD')\norder_type (str): Order type ('limit', 'market', or 'stop')\n**kwargs: Additional arguments can be specified for different order\ntypes.\n\nReturns:\ndict: Order details. See `place_order` for example.", "source": "codesearchnet"}
{"code": "def download_image(self, handle, dest):\n    with log_utils.LogTask(('Download image %s' % handle), logger=LOGGER):\n        self.open_url(url=handle, dest=dest)\n    self.extract_image_xz(dest)", "docstring": "Downloads the image from the http server\n\nArgs:\nhandle (str): url from the `self.baseurl` to the remote template\ndest (str): Path to store the downloaded url to, must be a file\npath\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def discover(package, cls_match_func):\n    \n    matched_classes = set()\n\n    for _, module_name, _ in pkgutil.walk_packages(\n            package.__path__,\n            prefix=package.__name__ + '.',\n    ):\n        module = __import__(module_name, fromlist=[str('__trash')], level=0)\n\n        \n        for _, imported_class in inspect.getmembers(module, inspect.isclass):\n            \n            \n            if imported_class.__module__ != module.__name__:\n                continue\n\n            if cls_match_func(imported_class):\n                matched_classes.add(imported_class)\n\n    return matched_classes", "docstring": "Returns a set of classes in the directory matched by cls_match_func\n\nArgs:\npath - A Python package\ncls_match_func - Function taking a class and returning true if the\nclass is to be included in the output.", "source": "juraj-google-style"}
{"code": "def _get_row_partition_type_tensor_pairs(rt_input):\n    partitions = rt_input._nested_row_partitions\n    tail = [_get_row_partition_type_tensor_pairs_tail(x) for x in partitions[1:]]\n    if partitions[0]._value_rowids is not None:\n        return [('FIRST_DIM_SIZE', partitions[0].nrows()), ('VALUE_ROWIDS', partitions[0].value_rowids())] + tail\n    else:\n        return [('ROW_SPLITS', partitions[0].row_splits())] + tail", "docstring": "Gets a list of the row partitions for rt_input.\n\nIf value_rowids are defined, then they are used. Otherwise, row_splits\nare used. If the outermost level has value_rowids defind, then nrows is\nalso added.\n\nArgs:\nrt_input: a ragged tensor.\n\nReturns:\nA list of (row_partition_type, row_partition_tensor) pairs.", "source": "github-repos"}
{"code": "def get_details(self, ids):\n    if isinstance(ids, list):\n        if (len(ids) > 5):\n            ids = ids[:5]\n        id_param = (';'.join(ids) + '/')\n    else:\n        ids = str(ids)\n        id_param = (ids + '/')\n    (header, content) = self._http_request(id_param)\n    resp = json.loads(content)\n    if (not self._is_http_response_ok(header)):\n        error = resp.get('error_message', 'Unknown Error')\n        raise HttpException(header.status, header.reason, error)\n    return resp", "docstring": "Locu Venue Details API Call Wrapper\n\nArgs:\nlist of ids : ids of a particular venues to get insights about. Can process up to 5 ids", "source": "codesearchnet"}
{"code": "def __init__(self, scaffold=None, master='', config=None, max_wait_secs=30 * 60):\n    self._scaffold = scaffold or Scaffold()\n    self._session_manager = None\n    self._master = master\n    self._config = config\n    self._max_wait_secs = max_wait_secs", "docstring": "Initializes a worker session creator.\n\nArgs:\nscaffold: A `Scaffold` used for gathering or building supportive ops. If\nnot specified a default one is created. It's used to finalize the graph.\nmaster: `String` representation of the TensorFlow master to use.\nconfig: `ConfigProto` proto used to configure the session.\nmax_wait_secs: Maximum time to wait for the session to become available.", "source": "github-repos"}
{"code": "def stringize(\n        self,\n        rnf_profile=RnfProfile(),\n    ):\n        \n\n        sorted_segments = sorted(self.segments,\n         key=lambda x: (\n          x.genome_id * (10 ** 23) +\n          x.chr_id * (10 ** 21) +\n          (x.left + (int(x.left == 0) * x.right - 1)) * (10 ** 11) +\n          x.right * (10 ** 1) +\n          int(x.direction == \"F\")\n         )\n        )\n\n        segments_strings = [x.stringize(rnf_profile) for x in sorted_segments]\n\n        read_tuple_name = \"__\".join(\n            [\n                self.prefix,\n                format(self.read_tuple_id, 'x').zfill(rnf_profile.read_tuple_id_width),\n                \",\".join(segments_strings),\n                self.suffix,\n            ]\n        )\n\n        return read_tuple_name", "docstring": "Create RNF representation of this read.\n\nArgs:\nread_tuple_id_width (int): Maximal expected string length of read tuple ID.\ngenome_id_width (int): Maximal expected string length of genome ID.\nchr_id_width (int): Maximal expected string length of chromosome ID.\ncoor_width (int): Maximal expected string length of a coordinate.", "source": "juraj-google-style"}
{"code": "def implicit_static(cls, for_type=None, for_types=None):\n        \n        for type_ in cls.__get_type_args(for_type, for_types):\n            implementations = {}\n            for function in cls.required():\n                method = getattr(type_, function.__name__, None)\n                if not callable(method):\n                    raise TypeError(\n                        \"%s.implicit invokation on type %r is missing instance \"\n                        \"method %r.\"\n                        % (cls.__name__, type_, function.__name__))\n\n                implementations[function] = method\n\n            for function in cls.optional():\n                method = getattr(type_, function.__name__, None)\n\n                if callable(method):\n                    implementations[function] = method\n\n            return cls.implement(for_type=type_,\n                                 implementations=implementations)", "docstring": "Automatically generate implementations for a type.\n\nImplement the protocol for the 'for_type' type by dispatching each\nmember function of the protocol to an instance method of the same name\ndeclared on the type 'for_type'.\n\nArguments:\nfor_type: The type to implictly implement the protocol with.\n\nRaises:\nTypeError if not all implementations are provided by 'for_type'.", "source": "juraj-google-style"}
{"code": "def generate_pb_config(pb_id: str, pb_config: dict=None, workflow_config: dict=None) -> dict:\n    if (workflow_config is None):\n        workflow_config = dict()\n    if (pb_config is None):\n        pb_config = dict()\n    pb_type = pb_config.get('type', choice(PB_TYPES))\n    workflow_id = workflow_config.get('id')\n    if (workflow_id is None):\n        if (pb_type == 'offline'):\n            workflow_id = choice(OFFLINE_WORKFLOWS)\n        else:\n            workflow_id = choice(REALTIME_WORKFLOWS)\n    workflow_version = workflow_config.get('version', generate_version())\n    workflow_parameters = workflow_config.get('parameters', dict())\n    pb_data = dict(id=pb_id, version=__pb_version__, type=pb_type, priority=pb_config.get('priority', randint(0, 10)), dependencies=pb_config.get('dependencies', []), resources_required=pb_config.get('resources_required', []), workflow=dict(id=workflow_id, version=workflow_version, parameters=workflow_parameters))\n    return pb_data", "docstring": "Generate a PB configuration dictionary.\n\nArgs:\npb_id (str): Processing Block Id\npb_config (dict, optional) PB configuration.\nworkflow_config (dict, optional): Workflow configuration\n\nReturns:\ndict, PB configuration dictionary.", "source": "codesearchnet"}
{"code": "class InputExample:\n    example_id: str\n    question: str\n    contexts: list[str]\n    endings: list[str]\n    label: Optional[str]", "docstring": "A single training/test example for multiple choice\n\nArgs:\nexample_id: Unique id for the example.\nquestion: string. The untokenized text of the second sequence (question).\ncontexts: list of str. The untokenized text of the first sequence (context of corresponding question).\nendings: list of str. multiple choice's options. Its length must be equal to contexts' length.\nlabel: (Optional) string. The label of the example. This should be\nspecified for train and dev examples, but not for test examples.", "source": "github-repos"}
{"code": "def training_loop_hparams_from_scoped_overrides(scoped_overrides, trial_id):\n  \n  trial_hp_overrides = scoped_overrides.values()\n\n  \n  loop_hp = create_loop_hparams()\n  model_hp_name = trial_hp_overrides.get(\n      \"loop.generative_model_params\", loop_hp.generative_model_params)\n  model_hp = registry.hparams(model_hp_name).parse(FLAGS.hparams)\n  base_algo_params_name = trial_hp_overrides.get(\n      \"loop.base_algo_params\", loop_hp.base_algo_params)\n  algo_hp = registry.hparams(base_algo_params_name)\n\n  \n  combined_hp = merge_unscoped_hparams(\n      zip(HP_SCOPES, [loop_hp, model_hp, algo_hp]))\n  combined_hp.override_from_dict(trial_hp_overrides)\n\n  \n  loop_hp, model_hp, algo_hp = (\n      split_scoped_hparams(HP_SCOPES, combined_hp))\n\n  \n  model_hp_name = \"model_hp_%s\" % str(trial_id)\n  dynamic_register_hparams(model_hp_name, model_hp)\n  loop_hp.generative_model_params = model_hp_name\n\n  \n  algo_hp_name = \"algo_hp_%s\" % str(trial_id)\n  dynamic_register_hparams(algo_hp_name, algo_hp)\n  loop_hp.base_algo_params = algo_hp_name\n\n  return loop_hp", "docstring": "Create HParams suitable for training loop from scoped HParams.\n\nArgs:\nscoped_overrides: HParams, with keys all scoped by one of HP_SCOPES. These\nparameters are overrides for the base HParams created by\ncreate_loop_hparams.\ntrial_id: str, trial identifier. This is used to register unique HParams\nnames for the underlying model and ppo HParams.\n\nReturns:\nHParams suitable for passing to training_loop.", "source": "juraj-google-style"}
{"code": "def _is_apk_install_success(stdout: bytes, stderr: str) -> bool:\n    if utils.grep('Failure', stdout):\n        return False\n    return any([not stderr, stderr == 'Success', 'waiting for device' in stderr])", "docstring": "Checks output of the adb install command and decides if install succeeded.\n\nArgs:\nstdout: string, the standard out output of an adb install command.\nstderr: string, the standard error output of an adb install command.\n\nReturns:\nTrue if the installation succeeded; False otherwise.", "source": "github-repos"}
{"code": "def remove_duplicate_sg(security_groups):\n    \n    for each_sg, duplicate_sg_name in SECURITYGROUP_REPLACEMENTS.items():\n        if each_sg in security_groups and duplicate_sg_name in security_groups:\n            LOG.info('Duplicate SG found. Removing %s in favor of %s.', duplicate_sg_name, each_sg)\n            security_groups.remove(duplicate_sg_name)\n\n    return security_groups", "docstring": "Removes duplicate Security Groups that share a same name alias\n\nArgs:\nsecurity_groups (list): A list of security group id to compare against SECURITYGROUP_REPLACEMENTS\n\nReturns:\nsecurity_groups (list): A list of security groups with duplicate aliases removed", "source": "juraj-google-style"}
{"code": "def running_instances(self, context, process_name):\n        \n        handle = (id(context), process_name)\n        it = self.processes.get(handle, {}).itervalues()\n        entries = [x for x in it if x[0].poll() is None]\n        return entries", "docstring": "Get a list of running instances.\n\nArgs:\ncontext (`ResolvedContext`): Context the process is running in.\nprocess_name (str): Name of the process.\n\nReturns:\nList of (`subprocess.Popen`, start-time) 2-tuples, where start_time\nis the epoch time the process was added.", "source": "juraj-google-style"}
{"code": "def _FormatSocketUnixToken(self, token_data):\n    protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.socket_family, 'UNKNOWN')\n    return {'protocols': protocol, 'family': token_data.socket_family, 'path': token_data.socket_path}", "docstring": "Formats an Unix socket token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_sockunix): AUT_SOCKUNIX token data.\n\nReturns:\ndict[str, str]: token values.", "source": "codesearchnet"}
{"code": "def GetEntries(self, parser_mediator, match=None, **unused_kwargs):\n    \n    backup_alias_map = self._GetDataTypeMap('timemachine_backup_alias')\n\n    destinations = match.get('Destinations', [])\n    for destination in destinations:\n      backup_alias_data = destination.get('BackupAlias', b'')\n      try:\n        backup_alias = self._ReadStructureFromByteStream(\n            backup_alias_data, 0, backup_alias_map)\n        alias = backup_alias.string\n\n      except (ValueError, errors.ParseError) as exception:\n        parser_mediator.ProduceExtractionWarning(\n            'unable to parse backup alias value with error: {0!s}'.format(\n                exception))\n        alias = 'Unknown alias'\n\n      destination_identifier = (\n          destination.get('DestinationID', None) or 'Unknown device')\n\n      event_data = plist_event.PlistTimeEventData()\n      event_data.desc = 'TimeMachine Backup in {0:s} ({1:s})'.format(\n          alias, destination_identifier)\n      event_data.key = 'item/SnapshotDates'\n      event_data.root = '/Destinations'\n\n      snapshot_dates = destination.get('SnapshotDates', [])\n      for datetime_value in snapshot_dates:\n        event = time_events.PythonDatetimeEvent(\n            datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts relevant TimeMachine entries.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmatch (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.", "source": "juraj-google-style"}
{"code": "def _run_graph(self, device, input_shape, variable, num_inputs, axis, grad, num_iters):\n    graph = ops.Graph()\n    with graph.as_default():\n        outputs = build_graph(device, input_shape, variable, num_inputs, axis, grad)\n    config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(optimizer_options=config_pb2.OptimizerOptions(opt_level=config_pb2.OptimizerOptions.L0)))\n    with session_lib.Session(graph=graph, config=config) as session:\n        variables.global_variables_initializer().run()\n        _ = session.run(outputs)\n        start_time = time.time()\n        for _ in range(num_iters):\n            _ = session.run(outputs)\n        duration = time.time() - start_time\n        print('%s shape:%d/%d var: %r \n    name_template = 'concat_bench_{device}_input_shape_{shape}_variable_{variable}_num_inputs_{num_inputs}_axis_{axis}_grad_{grad}'\n    self.report_benchmark(name=name_template.format(device=device, num_inputs=num_inputs, variable=variable, grad=grad, shape=str(input_shape).replace(' ', ''), axis=str(axis), iters=num_iters))\n    return duration", "docstring": "Run the graph and print its execution time.\n\nArgs:\ndevice: string, the device to run on.\ninput_shape: shape of the input tensors.\nvariable: whether or not the input shape should be fixed\nnum_inputs: the number of inputs to concat\naxis: axis to be concat'ed\ngrad: if True compute the gradient\nnum_iters: number of steps to run.\n\nReturns:\nThe duration of the run in seconds.", "source": "github-repos"}
{"code": "def wait_for_boot_completion(self, timeout=DEFAULT_TIMEOUT_BOOT_COMPLETION_SECOND):\n    timeout_start = time.time()\n    self.adb.wait_for_device(timeout=timeout)\n    while (time.time() < (timeout_start + timeout)):\n        try:\n            if self.is_boot_completed():\n                return\n        except adb.AdbError:\n            pass\n        time.sleep(5)\n    raise DeviceError(self, 'Booting process timed out')", "docstring": "Waits for Android framework to broadcast ACTION_BOOT_COMPLETED.\n\nThis function times out after 15 minutes.\n\nArgs:\ntimeout: float, the number of seconds to wait before timing out.\nIf not specified, no timeout takes effect.", "source": "codesearchnet"}
{"code": "def Open(self, file_object):\n    \n    if not file_object:\n      raise ValueError('Missing file-like object.')\n\n    \n    \n    \n\n    file_object.seek(0, os.SEEK_SET)\n    data = file_object.read(len(self._HEADER_SIGNATURE))\n\n    if data != self._HEADER_SIGNATURE:\n      file_object.close()\n      raise IOError('Unsupported SQLite database signature.')\n\n    with tempfile.NamedTemporaryFile(delete=False) as temp_file:\n      self._temp_file_path = temp_file.name\n      while data:\n        temp_file.write(data)\n        data = file_object.read(self._COPY_BUFFER_SIZE)\n\n    self._connection = sqlite3.connect(self._temp_file_path)\n    self._connection.text_factory = bytes\n    self._cursor = self._connection.cursor()", "docstring": "Opens the database file object.\n\nArgs:\nfile_object (FileIO): file-like object.\n\nRaises:\nIOError: if the SQLite database signature does not match.\nOSError: if the SQLite database signature does not match.\nValueError: if the file-like object is invalid.", "source": "juraj-google-style"}
{"code": "def work_model_factory(*, validator=validators.is_work_model, **kwargs):\n    kwargs['ld_type'] = 'AbstractWork'\n    return _model_factory(validator=validator, **kwargs)", "docstring": "Generate a Work model.\n\nExpects ``data``, ``validator``, ``model_cls``, and ``ld_context``\nas keyword arguments.\n\nRaises:\n:exc:`ModelError`: If a non-'AbstractWork' ``ld_type`` keyword\nargument is given.", "source": "codesearchnet"}
{"code": "def _get_head_block(self, request):\n    if request.head_id:\n        if (self._id_regex.fullmatch(request.head_id) is None):\n            LOGGER.debug('Invalid head id requested: %s', request.head_id)\n            raise _ResponseFailed(self._status.NO_ROOT)\n        try:\n            return self._block_store[request.head_id]\n        except KeyError as e:\n            LOGGER.debug('Unable to find block \"%s\" in store', e)\n            raise _ResponseFailed(self._status.NO_ROOT)\n    else:\n        return self._get_chain_head()", "docstring": "Fetches the request specified head block, or the chain head.\n\nNote:\nThis method will fail if `_block_store` has not been set\n\nArgs:\nrequest (object): The parsed protobuf request object\n\nReturns:\nBlock: the block object at the head of the requested chain\n\nRaises:\nResponseFailed: Failed to retrieve a head block", "source": "codesearchnet"}
{"code": "def non_deterministic_ints(shape, dtype=dtypes.int64):\n    return gen_stateful_random_ops.non_deterministic_ints(shape=shape, dtype=dtype)", "docstring": "Non-deterministically generates some integers.\n\nThis op may use some OS-provided source of non-determinism (e.g. an RNG), so\neach execution will give different results.\n\nArgs:\nshape: the shape of the result.\ndtype: (optional) the dtype of the result.\n\nReturns:\na tensor whose element values are non-deterministically chosen.", "source": "github-repos"}
{"code": "def from_file_obj(cls, fp):\n    log.debug('Parsing email from file object')\n    try:\n        fp.seek(0)\n    except IOError:\n        pass\n    finally:\n        s = fp.read()\n    return cls.from_string(s)", "docstring": "Init a new object from a file-like object.\nNot for Outlook msg.\n\nArgs:\nfp (file-like object): file-like object of raw email\n\nReturns:\nInstance of MailParser", "source": "codesearchnet"}
{"code": "def find_library_windows(cls):\n        \n        dll = cls.get_appropriate_windows_sdk_name() + '.dll'\n        root = 'C:\\\\'\n        for d in os.listdir(root):\n            dir_path = os.path.join(root, d)\n\n            \n            if d.startswith('Program Files') and os.path.isdir(dir_path):\n                dir_path = os.path.join(dir_path, 'SEGGER')\n                if not os.path.isdir(dir_path):\n                    continue\n\n                \n                ds = filter(lambda x: x.startswith('JLink'), os.listdir(dir_path))\n                for jlink_dir in ds:\n                    \n                    \n                    lib_path = os.path.join(dir_path, jlink_dir, dll)\n                    if os.path.isfile(lib_path):\n                        yield lib_path", "docstring": "Loads the SEGGER DLL from the windows installation directory.\n\nOn Windows, these are found either under:\n- ``C:\\\\Program Files\\\\SEGGER\\\\JLink``\n- ``C:\\\\Program Files (x86)\\\\SEGGER\\\\JLink``.\n\nArgs:\ncls (Library): the ``Library`` class\n\nReturns:\nThe paths to the J-Link library files in the order that they are\nfound.", "source": "juraj-google-style"}
{"code": "def decrypt(key, ciphertext):\n    key = ''.join(key)\n    alphabet = string.ascii_letters\n    cipher_alphabet = (key.lower() + key.upper())\n    return ciphertext.translate(str.maketrans(cipher_alphabet, alphabet))", "docstring": "Decrypt Simple Substitution enciphered ``ciphertext`` using ``key``.\n\nExample:\n>>> decrypt(\"PQSTUVWXYZCODEBRAKINGFHJLM\", \"XUOOB\")\nHELLO\n\nArgs:\nkey (iterable): The key to use\nciphertext (str): The text to decrypt\n\nReturns:\nDecrypted ciphertext", "source": "codesearchnet"}
{"code": "def upgrade_code(self):\n    if (not self.__squid):\n        return ''\n    have_scan_key = '{0}\\\\{1}\\\\{2}'.format(self.__reg_hive, self.__reg_upgradecode_path, self.__reg_32bit)\n    if ((not self.__upgrade_codes) or (self.__reg_key_guid not in self.__upgrade_codes)):\n        try:\n            uc_handle = win32api.RegOpenKeyEx(getattr(win32con, self.__reg_hive), self.__reg_upgradecode_path, 0, (win32con.KEY_READ | self.__reg_32bit_access))\n        except pywintypes.error as exc:\n            if (exc.winerror == winerror.ERROR_FILE_NOT_FOUND):\n                log.warning('Not Found %s\\\\%s 32bit %s', self.__reg_hive, self.__reg_upgradecode_path, self.__reg_32bit)\n                return ''\n            raise\n        (squid_upgrade_code_all, _, _, suc_pytime) = zip(*win32api.RegEnumKeyEx(uc_handle))\n        if ((have_scan_key in self.__upgrade_code_have_scan) and (self.__upgrade_code_have_scan[have_scan_key] == (squid_upgrade_code_all, suc_pytime))):\n            log.debug('Scan skipped for upgrade codes, no changes (%s)', have_scan_key)\n            return ''\n        log.debug('Scan for upgrade codes (%s) for product codes', have_scan_key)\n        for upgrade_code_squid in squid_upgrade_code_all:\n            upgrade_code_guid = self.__squid_to_guid(upgrade_code_squid)\n            pc_handle = win32api.RegOpenKeyEx(uc_handle, upgrade_code_squid, 0, (win32con.KEY_READ | self.__reg_32bit_access))\n            (_, pc_val_count, _) = win32api.RegQueryInfoKey(pc_handle)\n            for item_index in range(pc_val_count):\n                product_code_guid = self.__squid_to_guid(win32api.RegEnumValue(pc_handle, item_index)[0])\n                if product_code_guid:\n                    self.__upgrade_codes[product_code_guid] = upgrade_code_guid\n            win32api.RegCloseKey(pc_handle)\n        win32api.RegCloseKey(uc_handle)\n        self.__upgrade_code_have_scan[have_scan_key] = (squid_upgrade_code_all, suc_pytime)\n    return self.__upgrade_codes.get(self.__reg_key_guid, '')", "docstring": "For installers which follow the Microsoft Installer standard, returns\nthe ``Upgrade code``.\n\nReturns:\nvalue (str): ``Upgrade code`` GUID for installed software.", "source": "codesearchnet"}
{"code": "def get_arguments(context):\n    context.assert_key_has_value(key='pype', caller=__name__)\n    pype = context.get_formatted('pype')\n    try:\n        pipeline_name = pype['name']\n        if (pipeline_name is None):\n            raise KeyInContextHasNoValueError(\"pypyr.steps.pype ['pype']['name'] exists but is empty.\")\n    except KeyError as err:\n        raise KeyNotInContextError(\"pypyr.steps.pype missing 'name' in the 'pype' context item. You need to specify the pipeline name to run another pipeline.\") from err\n    use_parent_context = pype.get('useParentContext', True)\n    pipe_arg = pype.get('pipeArg', None)\n    skip_parse = pype.get('skipParse', True)\n    raise_error = pype.get('raiseError', True)\n    loader = pype.get('loader', None)\n    return (pipeline_name, use_parent_context, pipe_arg, skip_parse, raise_error, loader)", "docstring": "Parse arguments for pype from context and assign default values.\n\nArgs:\ncontext: pypyr.context.Context. context is mandatory.\n\nReturns:\ntuple (pipeline_name, #str\nuse_parent_context, #bool\npipe_arg, #str\nskip_parse, #bool\nraise_error #bool\n)\n\nRaises:\npypyr.errors.KeyNotInContextError: if ['pype']['name'] is missing.\npypyr.errors.KeyInContextHasNoValueError: if ['pype']['name'] exists but\nis None.", "source": "codesearchnet"}
{"code": "def get_protocol_version(protocol=None, target=None):\n    \n\n    target = get_py_internals(target)\n\n    if protocol is None:\n        protocol = target['pickle_default_protocol']\n\n    if protocol > cPickle.HIGHEST_PROTOCOL:\n        warnings.warn('Downgrading pickle protocol, running python supports up to %d.' % cPickle.HIGHEST_PROTOCOL)\n        protocol = cPickle.HIGHEST_PROTOCOL\n\n    target_highest_protocol = target['pickle_highest_protocol']\n    if protocol > target_highest_protocol:\n        warnings.warn('Downgrading pickle protocol, target python supports up to %d.' % target_highest_protocol)\n        protocol = target_highest_protocol\n\n    return protocol", "docstring": "Return a suitable pickle protocol version for a given target.\n\nArguments:\ntarget: The internals description of the targeted python\nversion. If this is ``None`` the specification of the currently\nrunning python version will be used.\nprotocol(None or int): The requested protocol version (or None for the\ndefault of the target python version).\n\nReturns:\nint: A suitable pickle protocol version.", "source": "juraj-google-style"}
{"code": "def preprocess_input(x, data_format=None):\n    return x", "docstring": "A placeholder method for backward compatibility.\n\nThe preprocessing logic has been included in the convnext model\nimplementation. Users are no longer required to call this method to\nnormalize the input data. This method does nothing and only kept as a\nplaceholder to align the API surface between old and new version of model.\n\nArgs:\nx: A floating point `numpy.array` or a tensor.\ndata_format: Optional data format of the image tensor/array. Defaults to\nNone, in which case the global setting\n`keras.backend.image_data_format()` is used\n(unless you changed it, it defaults to `\"channels_last\"`).{mode}\n\nReturns:\nUnchanged `numpy.array` or tensor.", "source": "github-repos"}
{"code": "def generate_packer_filename(provider, region, builder):\n    filename = '{0}_{1}_{2}.json'.format(provider, region, builder)\n    return filename", "docstring": "Generate a filename to be used by packer.\n\nArgs:\nprovider (str): Name of Spinnaker provider.\nregion (str): Name of provider region to use.\nbuilder (str): Name of builder process type.\n\nReturns:\nstr: Generated filename based on parameters.", "source": "codesearchnet"}
{"code": "def get_ip_address_country(ip_address, parallel=False):\n    \n    def download_country_database(location=\"GeoLite2-Country.mmdb\"):\n        \n        if parallel:\n            logging.warning(\"Cannot download GeoIP database in parallel mode\")\n            return\n        url = \"https:\n              \"GeoLite2-Country.tar.gz\"\n        \n        headers = {\"User-Agent\": USER_AGENT}\n        original_filename = \"GeoLite2-Country.mmdb\"\n        try:\n            response = requests.get(url, headers=headers)\n            response.raise_for_status()\n            tar_bytes = response.content\n            tar_file = tarfile.open(fileobj=BytesIO(tar_bytes), mode=\"r:gz\")\n            tar_dir = tar_file.getnames()[0]\n            tar_path = \"{0}/{1}\".format(tar_dir, original_filename)\n            tar_file.extract(tar_path)\n            shutil.move(tar_path, location)\n            shutil.rmtree(tar_dir)\n        except Exception as e:\n            logger.warning(\"Error downloading {0}: {1}\".format(url,\n                                                               e.__str__()))\n\n    system_paths = [\n        \"GeoLite2-Country.mmdb\",\n        \"/usr/local/share/GeoIP/GeoLite2-Country.mmdb\",\n        \"/usr/share/GeoIP/GeoLite2-Country.mmdb\",\n        \"/var/lib/GeoIP/GeoLite2-Country.mmdb\",\n        \"/var/local/lib/GeoIP/GeoLite2-Country.mmdb\",\n        \"C:\\\\GeoIP\\\\GeoLite2-Country.mmdb\"\n    ]\n\n    db_path = None\n\n    for system_path in system_paths:\n        if os.path.exists(system_path):\n            db_path = system_path\n            break\n\n    if db_path is None:\n        db_path = os.path.join(tempdir, \"GeoLite2-Country.mmdb\")\n        if not os.path.exists(db_path):\n            download_country_database(db_path)\n            if not os.path.exists(db_path):\n                return None\n        else:\n            db_age = datetime.now() - datetime.fromtimestamp(\n                os.stat(db_path).st_mtime)\n            if db_age > timedelta(days=7):\n                download_country_database()\n        db_path = db_path\n\n    db_reader = geoip2.database.Reader(db_path)\n\n    country = None\n\n    try:\n        country = db_reader.country(ip_address).country.iso_code\n    except geoip2.errors.AddressNotFoundError:\n        pass\n\n    return country", "docstring": "Uses the MaxMind Geolite2 Country database to return the ISO code for the\ncountry associated with the given IPv4 or IPv6 address\n\nArgs:\nip_address (str): The IP address to query for\nparallel (bool): Parallel processing\n\nReturns:\nstr: And ISO country code associated with the given IP address", "source": "juraj-google-style"}
{"code": "def as_money(self, number, **options):\n    if isinstance(number, list):\n        return map((lambda val: self.as_money(val, **options)))\n    decimal = options.get('decimal')\n    number = self.parse(number, decimal)\n    if check_type(options, 'dict'):\n        options = self.settings['currency'].update(options)\n    formats = self._check_currency_format(options['format'])\n    use_format = (lambda num: (formats['pos'] if (num > 0) else (formats['neg'] if (num < 0) else formats['zero'])))(number)\n    precision = self._change_precision(number, options['precision'])\n    thousands = options['thousand']\n    decimal = options['decimal']\n    formater = self.format(abs(number), precision, thousands, decimal)\n    amount = use_format.replace('%s', options['symbol']).replace('%v', formater)\n    return amount", "docstring": "Format a number into currency.\n\nUsage: accounting.formatMoney(number, symbol, precision, thousandsSep,\ndecimalSep, format)\ndefaults: (0, \"$\", 2, \",\", \".\", \"%s%v\")\nLocalise by overriding the symbol, precision,\nthousand / decimal separators and format\nSecond param can be an object matching `settings.currency`\nwhich is the easiest way.\n\nArgs:\nnumber (TYPE): Description\nprecision (TYPE): Description\nthousand (TYPE): Description\ndecimal (TYPE): Description\n\nReturns:\nname (TYPE): Description", "source": "codesearchnet"}
{"code": "def sun_events(latitude, longitude, date, timezone=0, zenith=None):\n    return (sun_rise_set(latitude, longitude, date, 'rise', timezone, zenith), sun_rise_set(latitude, longitude, date, 'set', timezone, zenith))", "docstring": "Convenience function for calculating sunrise and sunset.\n\nCivil twilight starts/ends when the Sun's centre is 6 degrees below\nthe horizon.\n\nNautical twilight starts/ends when the Sun's centre is 12 degrees\nbelow the horizon.\n\nAstronomical twilight starts/ends when the Sun's centre is 18 degrees below\nthe horizon.\n\nArgs:\nlatitude (float): Location's latitude\nlongitude (float): Location's longitude\ndate (datetime.date): Calculate rise or set for given date\ntimezone (int): Offset from UTC in minutes\nzenith (str): Calculate rise/set events, or twilight times\n\nReturns:\ntuple of datetime.time: The time for the given events in the specified\ntimezone", "source": "codesearchnet"}
{"code": "def parse_functions(\n    bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors\n) -> Tuple[Parsed, Errors]:\n    \n    parens = char_locs[\"parens\"]\n\n    \n    if not parens:\n        bels_len = len(bels) - 1\n        span = (0, bels_len)\n        parsed[span] = {\n            \"name\": \"\".join(bels),\n            \"type\": \"Function\",\n            \"span\": span,\n            \"name_span\": (span),\n            \"function_level\": \"top\",\n        }\n        return parsed, errors\n\n    for sp in sorted(parens):  \n        ep, function_level = parens[sp]\n\n        \n        if bels[sp - 1] == \" \":\n            continue\n\n        \n        for i in range(sp - 1, 0, -1):\n            if bels[i] in [\" \", \",\", \"(\"]:  \n                if i < sp - 1:\n                    if ep == -1:\n                        span = (i + 1, len(bels) - 1)\n                    else:\n                        span = (i + 1, ep)\n\n                    parsed[span] = {\n                        \"name\": \"\".join(bels[i + 1 : sp]),\n                        \"type\": \"Function\",\n                        \"span\": span,\n                        \"name_span\": (i + 1, sp - 1),\n                        \"parens_span\": (sp, ep),\n                        \"function_level\": function_level,\n                    }\n                break\n        else:\n            if ep == -1:\n                span = (0, len(bels) - 1)\n            else:\n                span = (0, ep)\n\n            parsed[span] = {\n                \"name\": \"\".join(bels[0:sp]),\n                \"type\": \"Function\",\n                \"span\": span,\n                \"name_span\": (0, sp - 1),\n                \"parens_span\": (sp, ep),\n                \"function_level\": function_level,\n            }\n\n    return parsed, errors", "docstring": "Parse functions from BEL using paren, comma, quote character locations\n\nArgs:\nbels: BEL string as list of chars\nchar_locs: paren, comma, quote character locations\nerrors: Any error messages generated during the parse\n\nReturns:\n(functions, errors): function names and locations and error messages", "source": "juraj-google-style"}
{"code": "def url(self, pattern, method=None, name=None):\n\n    def _inner(call):\n        self._url_manager.add(pattern, method, call, name)\n        return call\n    return _inner", "docstring": "Decorator to map url pattern to the callable.\n\nArgs:\npattern (:obj:`str`): URL pattern to add. This is usually '/'\nseparated path. Parts of the URL can be parameterised using\ncurly braces.\nExamples: \"/\", \"/path/to/resource\", \"/resoures/{param}\"\nmethod (:obj:`str`, :obj:`list` of :obj:`str`, optional): HTTP\nmethods for the path specied. By default, GET method is added.\nValue can be either a single method, by passing a string, or\nmultiple methods, by passing a list of strings.\nname (:obj:`str`): Name for the pattern that can be used for\nreverse matching\n\nNote:\nA trailing '/' is always assumed in the pattern.\n\nExample:\n>>> @app.url(pattern='/path/to/resource', method='GET')\n>>> def function(ctx):\n>>>     return 'Hello world'\n\nSee Also:\n:func:`drongo.managers.url.UrlManager.add`", "source": "codesearchnet"}
{"code": "def _get_colordata(bs, elements, bs_projection):\n        \n        contribs = {}\n        if bs_projection and bs_projection.lower() == \"elements\":\n            projections = bs.get_projection_on_elements()\n\n        for spin in (Spin.up, Spin.down):\n            if spin in bs.bands:\n                contribs[spin] = []\n                for band_idx in range(bs.nb_bands):\n                    colors = []\n                    for k_idx in range(len(bs.kpoints)):\n                        if bs_projection and bs_projection.lower() == \"elements\":\n                            c = [0, 0, 0]\n                            projs = projections[spin][band_idx][k_idx]\n                            \n                            \n                            projs = dict(\n                                [(k, v ** 2) for k, v in projs.items()])\n                            total = sum(projs.values())\n                            if total > 0:\n                                for idx, e in enumerate(elements):\n                                    c[idx] = math.sqrt(projs[\n                                                           e] / total)  \n\n                            c = [c[1], c[2],\n                                 c[0]]  \n\n                        else:\n                            c = [0, 0, 0] if spin == Spin.up \\\n                                else [0, 0,\n                                      1]  \n\n                        colors.append(c)\n\n                    contribs[spin].append(colors)\n                contribs[spin] = np.array(contribs[spin])\n\n        return contribs", "docstring": "Get color data, including projected band structures\nArgs:\nbs: Bandstructure object\nelements: elements (in desired order) for setting to blue, red, green\nbs_projection: None for no projection, \"elements\" for element projection\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _alter_code(code, **attrs):\n    PyCode_New = ctypes.pythonapi.PyCode_New\n    PyCode_New.argtypes = (ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.py_object, ctypes.py_object, ctypes.py_object, ctypes.py_object, ctypes.py_object, ctypes.py_object, ctypes.py_object, ctypes.py_object, ctypes.c_int, ctypes.py_object)\n    PyCode_New.restype = ctypes.py_object\n    args = [[code.co_argcount, 'co_argcount'], [code.co_kwonlyargcount, 'co_kwonlyargcount'], [code.co_nlocals, 'co_nlocals'], [code.co_stacksize, 'co_stacksize'], [code.co_flags, 'co_flags'], [code.co_code, 'co_code'], [code.co_consts, 'co_consts'], [code.co_names, 'co_names'], [code.co_varnames, 'co_varnames'], [code.co_freevars, 'co_freevars'], [code.co_cellvars, 'co_cellvars'], [code.co_filename, 'co_filename'], [code.co_name, 'co_name'], [code.co_firstlineno, 'co_firstlineno'], [code.co_lnotab, 'co_lnotab']]\n    for arg in args:\n        if (arg[1] in attrs):\n            arg[0] = attrs[arg[1]]\n    return PyCode_New(args[0][0], args[1][0], args[2][0], args[3][0], args[4][0], args[5][0], args[6][0], args[7][0], args[8][0], args[9][0], args[10][0], args[11][0], args[12][0], args[13][0], args[14][0])", "docstring": "Create a new code object by altering some of ``code`` attributes\n\nArgs:\ncode: code objcect\nattrs: a mapping of names of code object attrs to their values", "source": "codesearchnet"}
{"code": "def all_cities():\n    cities = []\n    fname = pkg_resources.resource_filename(__name__, 'resources/CityPops.csv')\n    with open(fname, 'rU') as csvfile:\n        reader = csv.reader(csvfile, delimiter=',')\n        for row in reader:\n            cities.append(row[0])\n    cities.sort()\n    return cities", "docstring": "Get a list of all Backpage city names.\n\nReturns:\nlist of city names as Strings", "source": "codesearchnet"}
{"code": "def BuildParamsWithMask(self, graph_fn, dtype, input_shapes, output_shapes, input_mask, output_mask, extra_inputs, extra_outputs):\n\n    def _ValidateShapes(shapes):\n        for shape in shapes:\n            assert all(shape), f'Shape unspecified: {shape}'\n    _ValidateShapes(input_shapes)\n    _ValidateShapes(output_shapes)\n    assert len(input_mask) == len(input_shapes), f'Inconsistent input_mask and input_shapes: len({input_mask}) != len({input_shapes}).'\n    assert len(output_mask) == len(output_shapes), f'Inconsistent output_mask and output_shapes: len({output_mask}) != len({output_shapes}).'\n    for extra_in_shape, extra_out_shape in zip(extra_inputs, extra_outputs):\n        assert len(input_shapes) == len(extra_in_shape), f'Inconsistent input_shapes and extra_in_shape: len({input_shapes}) != len({extra_in_shape}).'\n        assert len(output_shapes) == len(extra_out_shape), f'Inconsistent output_shapes and extra_out_shape: len({output_shapes}) != len({extra_out_shape}).'\n    return TfTrtIntegrationTestParams(graph_fn=graph_fn, input_specs=[self._GetTensorSpec(shape, mask, dtype, 'input_%d' % i) for i, (shape, mask) in enumerate(zip(input_shapes, input_mask))], output_specs=[self._GetTensorSpec(shape, mask, dtype, 'output_%d' % i) for i, (shape, mask) in enumerate(zip(output_shapes, output_mask))], input_dims=[input_shapes] + extra_inputs, expected_output_dims=[output_shapes] + extra_outputs)", "docstring": "Build test parameters with static or dynamic input shapes.\n\nTo define dynamic shapes give a boolean mask that describes which\ndimensions to treat as known. The values in input_mask are interpreted the\nfollowing way:\n- True: known dim (use the corresponding value from input_shapes)\n- False: unknown dim (replace the corresponding value from input_shapes\nwith None)\nFor example, to define the first two dimension with unknown size use\ninput_shapes=[[1,2,1,8]], input_mask=[[False, False, True, True]].\n\nArgs:\ngraph_fn: The function to build the graph.\ndtype: The element type.\ninput_shapes: The input shapes.\noutput_shapes: The output shapes.\ninput_mask: The input shape masks.\noutput_mask: the output shape masks.\nextra_inputs: list of additional input shapes\nextra_outputs: list of additional outputs shapes\n\nReturns:\nThe test parameters.", "source": "github-repos"}
{"code": "def process_file(filename: str,\n                 filetypes: List[str],\n                 move_to: str,\n                 delete_if_not_specified_file_type: bool,\n                 show_zip_output: bool) -> None:\n    \n    \n    try:\n        reader = CorruptedOpenXmlReader(filename,\n                                        show_zip_output=show_zip_output)\n        if reader.file_type in filetypes:\n            log.info(\"Found {}: {}\", reader.description, filename)\n            if move_to:\n                dest_file = os.path.join(move_to, os.path.basename(filename))\n                _, ext = os.path.splitext(dest_file)\n                if ext != reader.suggested_extension():\n                    dest_file += reader.suggested_extension()\n                reader.move_to(destination_filename=dest_file)\n        else:\n            log.info(\"Unrecognized or unwanted contents: \" + filename)\n            if delete_if_not_specified_file_type:\n                log.info(\"Deleting: \" + filename)\n                os.remove(filename)\n    except Exception as e:\n        \n        \n        log.critical(\"Uncaught error in subprocess: {!r}\\n{}\", e,\n                     traceback.format_exc())\n        raise", "docstring": "Deals with an OpenXML, including if it is potentially corrupted.\n\nArgs:\nfilename: filename to process\nfiletypes: list of filetypes that we care about, e.g.\n``['docx', 'pptx', 'xlsx']``.\nmove_to: move matching files to this directory\ndelete_if_not_specified_file_type: if ``True``, and the file is **not**\na type specified in ``filetypes``, then delete the file.\nshow_zip_output: show the output from the external ``zip`` tool?", "source": "juraj-google-style"}
{"code": "def get_book_progress(self, asin):\n    kbp = self._get_api_call('get_book_progress', ('\"%s\"' % asin))\n    return KindleCloudReaderAPI._kbp_to_progress(kbp)", "docstring": "Returns the progress data available for a book.\n\nNOTE: A summary of the two progress formats can be found in the\ndocstring for `ReadingProgress`.\n\nArgs:\nasin: The asin of the book to be queried.\n\nReturns:\nA `ReadingProgress` instance corresponding to the book associated with\n`asin`.", "source": "codesearchnet"}
{"code": "def add_toolkit(topology, location):\n    \n    import streamsx.topology.topology\n    assert isinstance(topology, streamsx.topology.topology.Topology)\n    tkinfo = dict()\n    tkinfo['root'] = os.path.abspath(location)\n    topology.graph._spl_toolkits.append(tkinfo)", "docstring": "Add an SPL toolkit to a topology.\n\nArgs:\ntopology(Topology): Topology to include toolkit in.\nlocation(str): Location of the toolkit directory.", "source": "juraj-google-style"}
{"code": "def get_obj(self, objpath, metahash, dst_path):\n        \n        incachepath = self.path_in_cache(objpath, metahash)\n        if not os.path.exists(incachepath):\n            raise CacheMiss('%s not in cache.' % incachepath)\n        else:\n            log.debug('Cache hit! %s~%s', objpath, metahash.hexdigest())\n            if not os.path.exists(os.path.dirname(dst_path)):\n                os.makedirs(os.path.dirname(dst_path))\n            os.link(incachepath, dst_path)", "docstring": "Get object from cache, write it to dst_path.\n\nArgs:\nobjpath: filename relative to buildroot\n(example: mini-boot/blahblah/somefile.bin)\nmetahash: metahash. See targets/base.py\ndst_path: Absolute path where the file should be written.\nRaises:\nCacheMiss: if the item is not in the cache", "source": "juraj-google-style"}
{"code": "def is_generator_function(obj):\n    CO_GENERATOR = 32\n    return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and (obj.func_code.co_flags & CO_GENERATOR)))", "docstring": "Return true if the object is a user-defined generator function.\n\nGenerator function objects provides same attributes as functions.\nSee isfunction.__doc__ for attributes listing.\n\nAdapted from Python 2.6.\n\nArgs:\nobj: an object to test.\n\nReturns:\ntrue if the object is generator function.", "source": "codesearchnet"}
{"code": "def _log_submission(submission, student_item):\n    logger.info(u'Created submission uuid={submission_uuid} for (course_id={course_id}, item_id={item_id}, anonymous_student_id={anonymous_student_id})'.format(submission_uuid=submission['uuid'], course_id=student_item['course_id'], item_id=student_item['item_id'], anonymous_student_id=student_item['student_id']))", "docstring": "Log the creation of a submission.\n\nArgs:\nsubmission (dict): The serialized submission model.\nstudent_item (dict): The serialized student item model.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def GetFileEntryByPath(self, path):\n    \n    if path is None:\n      return None\n\n    file_entry_type, _ = self._paths.get(path, (None, None))\n    if not file_entry_type:\n      return None\n\n    path_spec = fake_path_spec.FakePathSpec(location=path)\n    return fake_file_entry.FakeFileEntry(\n        self._resolver_context, self, path_spec,\n        file_entry_type=file_entry_type)", "docstring": "Retrieves a file entry for a path.\n\nArgs:\npath (str): path of the file entry.\n\nReturns:\nFakeFileEntry: a file entry or None if not available.", "source": "juraj-google-style"}
{"code": "def pmean(tensor, axis_name=None):\n    if axis_name != _pmap_config.axis_name():\n        raise ValueError('axis_name (%s) is not equal to that of the surrounding pmap (%s)' % (axis_name, _pmap_config.axis_name()))\n    devices = _pmap_config.devices()\n    if devices is None:\n        raise ValueError(\"Can't retrieve the device list from the surrounding pmap\")\n    if tpu_devices(devices):\n        raise ValueError('pmean for TPU is not supported yet.')\n    else:\n        return gen_collective_ops.collective_reduce(input=tensor, group_size=len(devices), group_key=_GROUP_KEY, instance_key=_get_instance_key(), merge_op='Add', final_op='Div', subdiv_offsets=(0,))", "docstring": "Mean all-reduction.\n\nArgs:\ntensor: A tensor.\naxis_name: The axis name to reduce. Must equal to that of the surrounding\npmap.\n\nReturns:\nThe mean of the `tensor` replicas on each participating devices.", "source": "github-repos"}
{"code": "def defaults(cls, *options, **kwargs):\n        \n        if kwargs and len(kwargs) != 1 and list(kwargs.keys())[0] != 'backend':\n            raise Exception('opts.defaults only accepts \"backend\" keyword argument')\n\n        cls._linemagic(cls._expand_options(merge_options_to_dict(options)), backend=kwargs.get('backend'))", "docstring": "Set default options for a session.\n\nSet default options for a session. whether in a Python script or\na Jupyter notebook.\n\nArgs:\n*options: Option objects used to specify the defaults.\nbackend:  The plotting extension the options apply to", "source": "juraj-google-style"}
{"code": "def abort_expired_batches(self, request_timeout_ms, cluster):\n    expired_batches = []\n    to_remove = []\n    count = 0\n    for tp in list(self._batches.keys()):\n        assert (tp in self._tp_locks), 'TopicPartition not in locks dict'\n        if (tp in self.muted):\n            continue\n        with self._tp_locks[tp]:\n            dq = self._batches[tp]\n            for batch in dq:\n                is_full = bool((bool((batch != dq[(- 1)])) or batch.records.is_full()))\n                if batch.maybe_expire(request_timeout_ms, self.config['retry_backoff_ms'], self.config['linger_ms'], is_full):\n                    expired_batches.append(batch)\n                    to_remove.append(batch)\n                    count += 1\n                    self.deallocate(batch)\n                else:\n                    break\n            if to_remove:\n                for batch in to_remove:\n                    dq.remove(batch)\n                to_remove = []\n    if expired_batches:\n        log.warning('Expired %d batches in accumulator', count)\n    return expired_batches", "docstring": "Abort the batches that have been sitting in RecordAccumulator for\nmore than the configured request_timeout due to metadata being\nunavailable.\n\nArguments:\nrequest_timeout_ms (int): milliseconds to timeout\ncluster (ClusterMetadata): current metadata for kafka cluster\n\nReturns:\nlist of ProducerBatch that were expired", "source": "codesearchnet"}
{"code": "def on_snapshot(self, proto):\n    TargetChange = firestore_pb2.TargetChange\n    target_changetype_dispatch = {TargetChange.NO_CHANGE: self._on_snapshot_target_change_no_change, TargetChange.ADD: self._on_snapshot_target_change_add, TargetChange.REMOVE: self._on_snapshot_target_change_remove, TargetChange.RESET: self._on_snapshot_target_change_reset, TargetChange.CURRENT: self._on_snapshot_target_change_current}\n    target_change = proto.target_change\n    if str(target_change):\n        target_change_type = target_change.target_change_type\n        _LOGGER.debug(('on_snapshot: target change: ' + str(target_change_type)))\n        meth = target_changetype_dispatch.get(target_change_type)\n        if (meth is None):\n            _LOGGER.info(('on_snapshot: Unknown target change ' + str(target_change_type)))\n            self.close(reason=('Unknown target change type: %s ' % str(target_change_type)))\n        else:\n            try:\n                meth(proto)\n            except Exception as exc2:\n                _LOGGER.debug(('meth(proto) exc: ' + str(exc2)))\n                raise\n    elif str(proto.document_change):\n        _LOGGER.debug('on_snapshot: document change')\n        target_ids = (proto.document_change.target_ids or [])\n        removed_target_ids = (proto.document_change.removed_target_ids or [])\n        changed = False\n        removed = False\n        if (WATCH_TARGET_ID in target_ids):\n            changed = True\n        if (WATCH_TARGET_ID in removed_target_ids):\n            removed = True\n        if changed:\n            _LOGGER.debug('on_snapshot: document change: CHANGED')\n            document_change = proto.document_change\n            document = document_change.document\n            data = _helpers.decode_dict(document.fields, self._firestore)\n            document_name = document.name\n            db_str = self._firestore._database_string\n            db_str_documents = (db_str + '/documents/')\n            if document_name.startswith(db_str_documents):\n                document_name = document_name[len(db_str_documents):]\n            document_ref = self._firestore.document(document_name)\n            snapshot = self.DocumentSnapshot(reference=document_ref, data=data, exists=True, read_time=None, create_time=document.create_time, update_time=document.update_time)\n            self.change_map[document.name] = snapshot\n        elif removed:\n            _LOGGER.debug('on_snapshot: document change: REMOVED')\n            document = proto.document_change.document\n            self.change_map[document.name] = ChangeType.REMOVED\n    elif str(proto.document_delete):\n        _LOGGER.debug('on_snapshot: document change: DELETE')\n        name = proto.document_delete.document\n        self.change_map[name] = ChangeType.REMOVED\n    elif str(proto.document_remove):\n        _LOGGER.debug('on_snapshot: document change: REMOVE')\n        name = proto.document_remove.document\n        self.change_map[name] = ChangeType.REMOVED\n    elif proto.filter:\n        _LOGGER.debug('on_snapshot: filter update')\n        if (proto.filter.count != self._current_size()):\n            self._reset_docs()\n    else:\n        _LOGGER.debug('UNKNOWN TYPE. UHOH')\n        self.close(reason=ValueError(('Unknown listen response type: %s' % proto)))", "docstring": "Called everytime there is a response from listen. Collect changes\nand 'push' the changes in a batch to the customer when we receive\n'current' from the listen response.\n\nArgs:\nlisten_response(`google.cloud.firestore_v1beta1.types.ListenResponse`):\nCallback method that receives a object to", "source": "codesearchnet"}
{"code": "def GetLastHealthyElement(self):\n    for element in reversed(self.elements):\n        if not element.HasError():\n            return element\n    return self.elements[0]", "docstring": "Returns the last element of the trace that is not an error.\n\nThis element will contain the final component indicated by the trace.\n\nReturns:\nThe last element of the trace that is not an error.", "source": "github-repos"}
{"code": "def __init__(self, args):\n        \n        self.args = args.args\n        self.varargs = args.vararg\n        self.kwarg = args.kwarg\n        self.kwonlyargs = args.kwonlyargs\n        self.defaults = args.defaults\n        self.kw_defaults = args.kw_defaults\n\n        self.arguments = list()\n        if self.args:\n            self.arguments.extend([x.arg for x in self.args])\n        if self.varargs:\n            self.arguments.extend(self.varargs.arg)\n        if self.kwarg:\n            self.arguments.extend(self.kwarg.arg)\n        if self.kwonlyargs:\n            self.arguments.extend([x.arg for x in self.kwonlyargs])", "docstring": "Argument container class.\n\nArgs:\nargs(list(ast.args): The arguments in a function AST node.", "source": "juraj-google-style"}
{"code": "def build_cfg(cls, node):\n    \n    if not isinstance(node, gast.FunctionDef):\n      raise TypeError('input must be a function definition')\n    cfg = cls()\n    cfg.entry = Node(node.args)\n    cfg.head = [cfg.entry]\n    cfg.visit_statements(node.body)\n    cfg.exit = Node(None)\n    cfg.set_head(cfg.exit)\n    cfg.backlink(cfg.entry)\n    return cfg", "docstring": "Build a CFG for a function.\n\nArgs:\nnode: A function definition the body of which to analyze.\n\nReturns:\nA CFG object.\n\nRaises:\nTypeError: If the input is not a function definition.", "source": "juraj-google-style"}
{"code": "def pairwise_intersection(boxlist1, boxlist2):\n    \n    x_min1, y_min1, x_max1, y_max1 = tf.split(boxlist1, 4, axis=1)\n    x_min2, y_min2, x_max2, y_max2 = tf.split(boxlist2, 4, axis=1)\n    all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))\n    all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))\n    intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)\n    all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))\n    all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))\n    intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)\n    return intersect_heights * intersect_widths", "docstring": "Compute pairwise intersection areas between boxes.\n\nArgs:\nboxlist1: Nx4 floatbox\nboxlist2: Mx4\n\nReturns:\na tensor with shape [N, M] representing pairwise intersections", "source": "juraj-google-style"}
{"code": "def transformer_revnet_decoder(decoder_input,\n                               encoder_output,\n                               decoder_self_attention_bias,\n                               encoder_decoder_attention_bias,\n                               hparams,\n                               name=\"decoder\"):\n  \n\n  def f(x, side_input):\n    \n    decoder_self_attention_bias = side_input[0]\n    encoder_decoder_attention_bias = side_input[1]\n    encoder_output = side_input[2]\n\n    old_hid_size = hparams.hidden_size\n    hparams.hidden_size = old_hid_size \n\n    with tf.variable_scope(\"self_attention\"):\n      y = common_attention.multihead_attention(\n          common_layers.layer_preprocess(\n              x, hparams), None, decoder_self_attention_bias,\n          hparams.attention_key_channels or hparams.hidden_size,\n          hparams.attention_value_channels or hparams.hidden_size,\n          hparams.hidden_size, hparams.num_heads, hparams.attention_dropout)\n      y = common_layers.layer_postprocess(x, y, hparams)\n      if encoder_output is not None:\n        with tf.variable_scope(\"encdec_attention\"):\n          y = common_attention.multihead_attention(\n              common_layers.layer_preprocess(\n                  x, hparams), encoder_output, encoder_decoder_attention_bias,\n              hparams.attention_key_channels or hparams.hidden_size,\n              hparams.attention_value_channels or hparams.hidden_size,\n              hparams.hidden_size, hparams.num_heads, hparams.attention_dropout)\n          y = common_layers.layer_postprocess(x, y, hparams)\n    hparams.hidden_size = old_hid_size\n    return y\n\n  def g(x):\n    \n    old_hid_size = hparams.hidden_size\n    hparams.hidden_size = old_hid_size \n    with tf.variable_scope(\"ffn\"):\n      y = transformer.transformer_ffn_layer(\n          common_layers.layer_preprocess(x, hparams), hparams)\n      y = common_layers.layer_postprocess(x, y, hparams)\n    hparams.hidden_size = old_hid_size\n    return y\n\n  x1, x2 = tf.split(decoder_input, 2, axis=-1)\n\n  with tf.variable_scope(name):\n    y1, y2 = tf.contrib.layers.rev_block(\n        x1,\n        x2,\n        f,\n        g,\n        num_layers=hparams.num_hidden_layers,\n        f_side_input=[\n            decoder_self_attention_bias, encoder_decoder_attention_bias,\n            encoder_output\n        ],\n        is_training=hparams.mode == tf.estimator.ModeKeys.TRAIN)\n    y = tf.concat([y1, y2], axis=-1)\n    return common_layers.layer_preprocess(y, hparams)", "docstring": "A stack of transformer layers.\n\nArgs:\ndecoder_input: a Tensor\nencoder_output: a Tensor\ndecoder_self_attention_bias: bias Tensor for self-attention\n(see common_attention.attention_bias())\nencoder_decoder_attention_bias: bias Tensor for encoder-decoder attention\n(see common_attention.attention_bias())\nhparams: hyperparameters for model\nname: a string\n\nReturns:\ny: a Tensors", "source": "juraj-google-style"}
{"code": "def gather_available_device_info():\n    device_info_list = []\n    devices = device_lib.list_local_devices()\n    for d in devices:\n        device_info = test_log_pb2.AvailableDeviceInfo()\n        device_info.name = d.name\n        device_info.type = d.device_type\n        device_info.memory_limit = d.memory_limit\n        device_info.physical_description = d.physical_device_desc\n        device_info_list.append(device_info)\n    return device_info_list", "docstring": "Gather list of devices available to TensorFlow.\n\nReturns:\nA list of test_log_pb2.AvailableDeviceInfo messages.", "source": "github-repos"}
{"code": "def restore(self, state):\n        \n\n        own_properties = set(self.get_properties())\n        state_properties = set(state)\n\n        to_restore = own_properties.intersection(state_properties)\n\n        for name in to_restore:\n            value = state.get(name)\n\n            if name in self._complex_properties:\n                value = self._complex_properties[name][1](value)\n\n            setattr(self, name, value)", "docstring": "Restore this state from the output of a previous call to dump().\n\nOnly those properties in this object and listed in state will be\nupdated.  Other properties will not be modified and state may contain\nkeys that do not correspond with properties in this object.\n\nArgs:\nstate (dict): A serialized representation of this object.", "source": "juraj-google-style"}
{"code": "def set(self, **kwargs):\n    for (port_name, port_value) in kwargs.items():\n        if hasattr(port_value, 'value'):\n            port_value = port_value.value\n        self.inputs.__setattr__(port_name, port_value)", "docstring": "Set input values on task\n\nArgs:\narbitrary_keys: values for the keys\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def matches(x, y, regex_expr=False):\n    \n    \n    x = strip_regex(x) if regex_expr and isregex_expr(x) else x\n\n    \n    if PY_3:\n        \n        x = x.pattern if isregex(x) else x\n        \n        return test_case().assertRegex(y, x) or True\n\n    \n    if isinstance(x, str):\n        x = re.compile(x, re.IGNORECASE)\n\n    assert x.match(y) is not None", "docstring": "Tries to match a regular expression value ``x`` against ``y``.\nAliast``unittest.TestCase.assertEqual()``\n\nArguments:\nx (regex|str): regular expression to test.\ny (str): value to match.\nregex_expr (bool): enables regex string based expression matching.\n\nRaises:\nAssertionError: in case of mismatching.\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def local_reduction_attention(x, block_length, multihead_params):\n\n    @expert_utils.add_name_scope()\n    def dot_product_self_local_attention_flattened(q, k, v):\n        'Strided block local self-attention.\\n\\n    No overlap between the blocks.\\n\\n    Args:\\n      q (tf.Tensor): shape [batch, heads, length, depth_k]\\n      k (tf.Tensor): shape [batch, heads, length, depth_k]\\n      v (tf.Tensor): shape [batch, heads, length, depth_v]\\n\\n    Returns:\\n      tf.Tensor: shape [batch, heads, length, depth_v]\\n    '\n        (_, num_head, _, depth) = q.get_shape().as_list()\n\n        def pad_and_reshape(x):\n            'Split the length dim into [num_block, block_length].'\n            length_x = common_layers.shape_list(x)[2]\n            x = tf.pad(x, [[0, 0], [0, 0], [0, ((- length_x) % block_length)], [0, 0]])\n            x = tf.reshape(x, [common_layers.shape_list(x)[0], num_head, (common_layers.shape_list(x)[2] \n            return x\n        (q, k, v) = [pad_and_reshape(t) for t in (q, k, v)]\n        logits = tf.matmul(q, k, transpose_b=True)\n        logits = tf.reshape(logits, [common_layers.shape_list(logits)[0], num_head, common_layers.shape_list(logits)[2], (block_length ** 2)])\n        weights = tf.nn.softmax(logits)\n        weights = tf.reshape(weights, [common_layers.shape_list(weights)[0], num_head, common_layers.shape_list(weights)[2], block_length, block_length])\n        weights = tf.reduce_sum(weights, axis=3, keep_dims=True)\n        v_out = tf.matmul(weights, v)\n        v_out = tf.squeeze(v_out, axis=3)\n        return v_out\n    return multihead_attention(x, None, bias=None, output_depth=x.get_shape().as_list()[(- 1)], attention_type=dot_product_self_local_attention_flattened, **multihead_params)", "docstring": "Reduce the length dimension using self attention.\n\nArgs:\nx (tf.Tensor): float32 of shape [batch, length, depth]\nblock_length (int): Block length for local attention (Compression factor)\nmultihead_params (dict): parameters for multihead attention\n\nReturns:\ntf.Tensor: Compressed tensor of shape [batch, length // factor, depth]", "source": "codesearchnet"}
{"code": "def _restore_checkpoint(self, master: str, saver: saver_lib.Saver=None, checkpoint_dir: str=None, checkpoint_filename_with_path: str=None, wait_for_checkpoint=False, max_wait_secs=7200, config=None) -> Tuple[session.Session, bool]:\n    self._target = master\n    strategy = distribute_lib.get_strategy()\n    if strategy and hasattr(strategy.extended, '_experimental_initialize_system'):\n        strategy.extended._experimental_initialize_system()\n    sess = session.Session(self._target, graph=self._graph, config=config)\n    if checkpoint_dir and checkpoint_filename_with_path:\n        raise ValueError('Can not provide both checkpoint_dir and checkpoint_filename_with_path.')\n    if not saver or not (checkpoint_dir or checkpoint_filename_with_path):\n        return (sess, False)\n    if checkpoint_filename_with_path:\n        _restore_checkpoint_and_maybe_run_saved_model_initializers(sess, saver, checkpoint_filename_with_path)\n        return (sess, True)\n    wait_time = 0\n    ckpt = checkpoint_management.get_checkpoint_state(checkpoint_dir)\n    while not ckpt or not ckpt.model_checkpoint_path:\n        if wait_for_checkpoint and wait_time < max_wait_secs:\n            logging.info('Waiting for checkpoint to be available.')\n            time.sleep(self._recovery_wait_secs)\n            wait_time += self._recovery_wait_secs\n            ckpt = checkpoint_management.get_checkpoint_state(checkpoint_dir)\n        else:\n            return (sess, False)\n    _restore_checkpoint_and_maybe_run_saved_model_initializers(sess, saver, ckpt.model_checkpoint_path)\n    saver.recover_last_checkpoints(ckpt.all_model_checkpoint_paths)\n    return (sess, True)", "docstring": "Creates a `Session`, and tries to restore a checkpoint.\n\n\nArgs:\nmaster: `String` representation of the TensorFlow master to use.\nsaver: A `Saver` object used to restore a model.\ncheckpoint_dir: Path to the checkpoint files. The latest checkpoint in the\ndir will be used to restore.\ncheckpoint_filename_with_path: Full file name path to the checkpoint file.\nwait_for_checkpoint: Whether to wait for checkpoint to become available.\nmax_wait_secs: Maximum time to wait for checkpoints to become available.\nconfig: Optional `ConfigProto` proto used to configure the session.\n\nReturns:\nA pair (sess, is_restored) where 'is_restored' is `True` if\nthe session could be restored, `False` otherwise.\n\nRaises:\nValueError: If both checkpoint_dir and checkpoint_filename_with_path are\nset.", "source": "github-repos"}
{"code": "def GetModifyTimestamp(self):\n    if self.modify_time is None:\n        self.modify_time = self._ReadTimestamp(self.modify_file)\n    return self.modify_time", "docstring": "Return the timestamp of the last cache modification.\n\nArgs: None\n\nReturns:\nAn int with the number of seconds since epoch, or None if the timestamp\nfile doesn't exist or has errors.", "source": "github-repos"}
{"code": "def decode_metar(self, metar):\n        \n        try:\n            from metar import Metar\n        except:\n            return \"Unable to parse metars. Please install parser from https:\n        m = Metar.Metar(metar)\n        return m.string()", "docstring": "Simple method that decodes a given metar string.\n\nArgs:\nmetar (str): The metar data\n\nReturns:\nThe metar data in readable format\n\nExample::\n\nfrom pyflightdata import FlightData\nf=FlightData()\nf.decode_metar('WSSS 181030Z 04009KT 010V080 9999 FEW018TCU BKN300 29/22 Q1007 NOSIG')", "source": "juraj-google-style"}
{"code": "def touch(self, mode=438, exist_ok=True):\n    if self._closed:\n        self._raise_closed()\n    if self.exists():\n        if exist_ok:\n            self.filesystem.utime(self._path(), None)\n        else:\n            self.filesystem.raise_os_error(errno.EEXIST, self._path())\n    else:\n        fake_file = self.open('w')\n        fake_file.close()\n        self.chmod(mode)", "docstring": "Create a fake file for the path with the given access mode,\nif it doesn't exist.\n\nArgs:\nmode: the file mode for the file if it does not exist\nexist_ok: if the file already exists and this is True, nothing\nhappens, otherwise FileExistError is raised\n\nRaises:\nOSError: (Python 2 only) if the file exists and exits_ok is False.\nFileExistsError: (Python 3 only) if the file exists and exits_ok is\nFalse.", "source": "codesearchnet"}
{"code": "def GetClientURNsForHostnames(hostnames, token=None):\n  \n\n  if data_store.RelationalDBEnabled():\n    index = ClientIndex()\n  else:\n    index = CreateClientIndex(token=token)\n\n  keywords = set()\n  for hostname in hostnames:\n    if hostname.startswith(\"host:\"):\n      keywords.add(hostname)\n    else:\n      keywords.add(\"host:%s\" % hostname)\n  results = index.ReadClientPostingLists(keywords)\n\n  result = {}\n  for keyword, hits in iteritems(results):\n    result[keyword[len(\"host:\"):]] = hits\n  return result", "docstring": "Gets all client_ids for a given list of hostnames or FQDNS.\n\nArgs:\nhostnames: A list of hostnames / FQDNs.\ntoken: An ACL token.\n\nReturns:\nA dict with a list of all known GRR client_ids for each hostname.", "source": "juraj-google-style"}
{"code": "def _process_has_edge_degree_filter_directive(filter_operation_info, location, context, parameters):\n    if isinstance(filter_operation_info.field_ast, InlineFragment):\n        raise AssertionError(u'Received InlineFragment AST node in \"has_edge_degree\" filter handler. This should have been caught earlier: {}'.format(filter_operation_info.field_ast))\n    filtered_field_name = filter_operation_info.field_name\n    if ((filtered_field_name is None) or (not is_vertex_field_name(filtered_field_name))):\n        raise AssertionError(u'Invalid value for \"filtered_field_name\" in \"has_edge_degree\" filter: {}'.format(filtered_field_name))\n    if (not is_vertex_field_type(filter_operation_info.field_type)):\n        raise AssertionError(u'Invalid value for \"filter_operation_info.field_type\" in \"has_edge_degree\" filter: {}'.format(filter_operation_info))\n    argument = parameters[0]\n    if (not is_variable_argument(argument)):\n        raise GraphQLCompilationError(u'The \"has_edge_degree\" filter only supports runtime variable arguments. Tagged values are not supported.Argument name: {}'.format(argument))\n    argument_inferred_type = GraphQLInt\n    (argument_expression, non_existence_expression) = _represent_argument(location, context, argument, argument_inferred_type)\n    if (non_existence_expression is not None):\n        raise AssertionError(u'Since we do not support tagged values, non_existence_expression should have been None. However, it was: {}'.format(non_existence_expression))\n    argument_is_zero = expressions.BinaryComposition(u'=', argument_expression, expressions.ZeroLiteral)\n    edge_field_is_null = expressions.BinaryComposition(u'=', expressions.LocalField(filtered_field_name), expressions.NullLiteral)\n    edge_degree_is_zero = expressions.BinaryComposition(u'&&', argument_is_zero, edge_field_is_null)\n    edge_field_is_not_null = expressions.BinaryComposition(u'!=', expressions.LocalField(filtered_field_name), expressions.NullLiteral)\n    edge_degree = expressions.UnaryTransformation(u'size', expressions.LocalField(filtered_field_name))\n    edge_degree_matches_argument = expressions.BinaryComposition(u'=', edge_degree, argument_expression)\n    edge_degree_is_non_zero = expressions.BinaryComposition(u'&&', edge_field_is_not_null, edge_degree_matches_argument)\n    filter_predicate = expressions.BinaryComposition(u'||', edge_degree_is_zero, edge_degree_is_non_zero)\n    return blocks.Filter(filter_predicate)", "docstring": "Return a Filter basic block that checks the degree of the edge to the given vertex field.\n\nArgs:\nfilter_operation_info: FilterOperationInfo object, containing the directive and field info\nof the field where the filter is to be applied.\nlocation: Location where this filter is used.\ncontext: dict, various per-compilation data (e.g. declared tags, whether the current block\nis optional, etc.). May be mutated in-place in this function!\nparameters: list of 1 element, containing the value to check the edge degree against;\nif the parameter is optional and missing, the check will return True\n\nReturns:\na Filter basic block that performs the check", "source": "codesearchnet"}
{"code": "def sg_inject(path, mod_name):\n    import sys\n    if (path not in list(sys.path)):\n        sys.path.append(path)\n    globals()[mod_name] = importlib.import_module(mod_name)\n    for func_name in dir(globals()[mod_name]):\n        if isinstance(globals()[mod_name].__dict__.get(func_name), types.FunctionType):\n            if (not func_name.startswith('_')):\n                exec(('tf.Variable.%s = %s.%s' % (func_name, mod_name, func_name)))\n                exec(('tf.Tensor.%s = %s.%s' % (func_name, mod_name, func_name)))", "docstring": "r\"\"\"Converts all functions in the given Python module to sugar functions\nso that they can be used in a chainable manner.\n\nArgs:\npath: A string. Path to the Python module\nmod_name: A string. The name of the Python module to inject.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def serialize_cert_to_der(cert_obj):\n    \n    return cert_obj.public_bytes(\n        cryptography.hazmat.primitives.serialization.Encoding.DER\n    )", "docstring": "Serialize certificate to DER.\n\nArgs:\ncert_obj: cryptography.Certificate\n\nReturns:\nbytes: DER encoded certificate", "source": "juraj-google-style"}
{"code": "def set_key_color(self, color: Tuple[(int, int, int)]) -> None:\n    lib.TCOD_image_set_key_color(self.image_c, color)", "docstring": "Set a color to be transparent during blitting functions.\n\nArgs:\ncolor (Union[Tuple[int, int, int], Sequence[int]]):\nAn (r, g, b) sequence or Color instance.", "source": "codesearchnet"}
{"code": "def all_sample_md5s(self, type_tag=None):\n        \n\n        if type_tag:\n            cursor = self.database[self.sample_collection].find({'type_tag': type_tag}, {'md5': 1, '_id': 0})\n        else:\n            cursor = self.database[self.sample_collection].find({}, {'md5': 1, '_id': 0})\n        return [match.values()[0] for match in cursor]", "docstring": "Return a list of all md5 matching the type_tag ('exe','pdf', etc).\n\nArgs:\ntype_tag: the type of sample.\n\nReturns:\na list of matching samples.", "source": "juraj-google-style"}
{"code": "def _to_key_ranges_by_shard(cls, app, namespaces, shard_count, query_spec):\n    key_ranges_by_ns = []\n    for namespace in namespaces:\n        ranges = cls._split_ns_by_scatter(shard_count, namespace, query_spec.entity_kind, app)\n        random.shuffle(ranges)\n        key_ranges_by_ns.append(ranges)\n    ranges_by_shard = [[] for _ in range(shard_count)]\n    for ranges in key_ranges_by_ns:\n        for (i, k_range) in enumerate(ranges):\n            if k_range:\n                ranges_by_shard[i].append(k_range)\n    key_ranges_by_shard = []\n    for ranges in ranges_by_shard:\n        if ranges:\n            key_ranges_by_shard.append(key_ranges.KeyRangesFactory.create_from_list(ranges))\n    return key_ranges_by_shard", "docstring": "Get a list of key_ranges.KeyRanges objects, one for each shard.\n\nThis method uses scatter index to split each namespace into pieces\nand assign those pieces to shards.\n\nArgs:\napp: app_id in str.\nnamespaces: a list of namespaces in str.\nshard_count: number of shards to split.\nquery_spec: model.QuerySpec.\n\nReturns:\na list of key_ranges.KeyRanges objects.", "source": "codesearchnet"}
{"code": "def run_simulations(self, parameter_list, data_folder):\n        \n        self.data_folder = data_folder\n        with Pool(processes=MAX_PARALLEL_PROCESSES) as pool:\n            for result in pool.imap_unordered(self.launch_simulation,\n                                              parameter_list):\n                yield result", "docstring": "This function runs multiple simulations in parallel.\n\nArgs:\nparameter_list (list): list of parameter combinations to simulate.\ndata_folder (str): folder in which to create output folders.", "source": "juraj-google-style"}
{"code": "def filter_cold_days(input_data, month_filter):\n    projection_fields = ['year', 'month', 'day', 'mean_temp']\n    fields_of_interest = input_data | 'Projected' >> beam.Map(lambda row: {f: row[f] for f in projection_fields})\n    global_mean = AsSingleton(fields_of_interest | 'ExtractMean' >> beam.Map(lambda row: row['mean_temp']) | 'GlobalMean' >> beam.combiners.Mean.Globally())\n    return fields_of_interest | 'DesiredMonth' >> beam.Filter(lambda row: row['month'] == month_filter) | 'BelowMean' >> beam.Filter(lambda row, mean: row['mean_temp'] < mean, global_mean)", "docstring": "Workflow computing rows in a specific month with low temperatures.\n\nArgs:\ninput_data: a PCollection of dictionaries representing table rows. Each\ndictionary must have the keys ['year', 'month', 'day', and 'mean_temp'].\nmonth_filter: an int representing the month for which colder-than-average\ndays should be returned.\n\nReturns:\nA PCollection of dictionaries with the same keys described above. Each\nrow represents a day in the specified month where temperatures were\ncolder than the global mean temperature in the entire dataset.", "source": "github-repos"}
{"code": "def to_routing_header(params):\n    if (sys.version_info[0] < 3):\n        return urlencode(params).replace('%2F', '/')\n    return urlencode(params, safe='/')", "docstring": "Returns a routing header string for the given request parameters.\n\nArgs:\nparams (Mapping[str, Any]): A dictionary containing the request\nparameters used for routing.\n\nReturns:\nstr: The routing header string.", "source": "codesearchnet"}
{"code": "def insert_and_get(self, **fields):\n    if ((not self.conflict_target) and (not self.conflict_action)):\n        return super().create(**fields)\n    compiler = self._build_insert_compiler([fields])\n    rows = compiler.execute_sql(return_id=False)\n    columns = rows[0]\n    model_columns = {}\n    for field in self.model._meta.local_concrete_fields:\n        model_columns[field.column] = field.attname\n    model_init_fields = {}\n    for (column_name, column_value) in columns.items():\n        try:\n            model_init_fields[model_columns[column_name]] = column_value\n        except KeyError:\n            pass\n    return self.model(**model_init_fields)", "docstring": "Creates a new record in the database and then gets\nthe entire row.\n\nThis allows specifying custom conflict behavior using .on_conflict().\nIf no special behavior was specified, this uses the normal Django create(..)\n\nArguments:\nfields:\nThe fields of the row to create.\n\nReturns:\nThe model instance representing the row that was created.", "source": "codesearchnet"}
{"code": "def get_hyperparameters(self):\n    hyperparameters = {}\n    for (block_name, block) in self.blocks.items():\n        hyperparameters[block_name] = block.get_hyperparameters()\n    return hyperparameters", "docstring": "Get the current hyperparamters of each block.\n\nReturns:\ndict:\nA dictionary containing the block names as keys and\nthe current block hyperparameters dictionary as values.", "source": "codesearchnet"}
{"code": "def adapt_logger(logger):\n  \n  if isinstance(logger, logging.Logger):\n    return logger\n\n  \n  if isinstance(logger, (SimpleLogger, NoOpLogger)):\n    return logger.logger\n\n  \n  return logger", "docstring": "Adapt our custom logger.BaseLogger object into a standard logging.Logger object.\n\nAdaptations are:\n- NoOpLogger turns into a logger with a single NullHandler.\n- SimpleLogger turns into a logger with a StreamHandler and level.\n\nArgs:\nlogger: Possibly a logger.BaseLogger, or a standard python logging.Logger.\n\nReturns: a standard python logging.Logger.", "source": "juraj-google-style"}
{"code": "def _MakePackagePages(self, package, showprivate=False, nested=False, showinh=False):\n        \n\n        def checkNoNested(mod):\n            try:\n                all = mod.__all__\n            except AttributeError:\n                return False\n            mems = inspect.getmembers(mod, inspect.ismodule)\n            mems = [m for m in mems if m[0] in mod.__all__]\n\n            if len(mems) > 0:\n                return False\n            return True\n\n        \n        mods = inspect.getmembers(package, inspect.ismodule)\n        \n        nmods, pvt, npkgs = [], [], []\n        for mod in mods:\n            \n            if checkNoNested(mod[1]):\n                if mod[0][0] == '_': pvt.append(mod)\n                else: nmods.append(mod)\n            else: npkgs.append(mod)\n        if showprivate: nmods += pvt\n\n\n        \n            \n        files = []\n        ignore = []\n        for pkg in npkgs:\n            pt = '%s/%s/%s' % (self.path, package.__name__.replace('.', '/'), pkg[1].__name__.split('.')[-1])\n            if os.path.exists(pt): shutil.rmtree(pt)\n            os.makedirs(pt)\n            ignore += inspect.getmembers(pkg[1])\n            f = self._MakePackagePages(pkg[1], showprivate=showprivate, nested=True, showinh=showinh)\n            files.append(f.split(package.__name__.replace('.', '/')+'/')[1])\n\n        if nested:\n            try:\n                name = package.__displayname__\n            except AttributeError:\n                name = package.__name__\n            \n            index = r % (name, '*' * len(name))\n            \n            index += '\\n   '.join(files)\n            \n            index += '\\n   ' + self._ProduceContent(nmods, showprivate=showprivate, showinh=showinh)\n            findex = 'content/%s/index.rst' % (package.__name__.replace('.', '/'))\n\n            \n            with open(findex, 'w') as f:\n                if package.__doc__: f.write(package.__doc__)\n                f.write(index)\n\n            \n            return '\\n   ' + findex\n\n        \n        names = '\\n   %s/%s/' % ( self.path, package.__name__.replace('.', '/'))\n        nmods = [m for m in nmods if m not in ignore]\n        return names.join(self._ProduceContent(nmods, showprivate=showprivate, showinh=showinh).split('\\n   ')+files)", "docstring": "An internal helper to generate all of the pages for a given package\n\nArgs:\npackage (module): The top-level package to document\nshowprivate (bool): A flag for whether or not to display private members\nnested (bool): Foor internal use ONLY\n\nReturns:\nstr: The file names ready to be appended to a top-level toctree", "source": "juraj-google-style"}
{"code": "def assertAllDifferent(self, tensors):\n    tensors = [array_ops.reshape(t, shape=[-1]) for t in tensors]\n    ls = array_ops.concat(tensors, axis=0).numpy().tolist()\n    self.assertAllEqual(len(ls), len(set(ls)))", "docstring": "Checks that there are no duplicate elements anywhere among the tensors.\n\nArgs:\ntensors: a list of tensors. They can have different shapes.", "source": "github-repos"}
{"code": "def email(self, subject, text_body, html_body=None, sender=None, **kwargs):\n    self.configuration.emailer().send([self.data['email']], subject, text_body, html_body=html_body, sender=sender, **kwargs)", "docstring": "Emails a user.\n\nArgs:\nsubject (str): Email subject\ntext_body (str): Plain text email body\nhtml_body (str): HTML email body\nsender (Optional[str]): Email sender. Defaults to SMTP username.\n**kwargs: See below\nmail_options (List): Mail options (see smtplib documentation)\nrcpt_options (List): Recipient options (see smtplib documentation)\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def delete(self, webhookId):\n    check_type(webhookId, basestring, may_be_none=False)\n    self._session.delete(((API_ENDPOINT + '/') + webhookId))", "docstring": "Delete a webhook, by ID.\n\nArgs:\nwebhookId(basestring): The ID of the webhook to be deleted.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"}
{"code": "def ApprovalRevokeRaw(aff4_path, token):\n  \n  try:\n    urn = rdf_client.ClientURN(aff4_path)\n  except type_info.TypeValueError:\n    urn = rdfvalue.RDFURN(aff4_path)\n\n  approval_urn = aff4.ROOT_URN.Add(\"ACL\").Add(urn.Path()).Add(\n      token.username).Add(utils.EncodeReasonString(token.reason))\n\n  super_token = access_control.ACLToken(username=\"raw-approval-superuser\")\n  super_token.supervisor = True\n\n  approval_request = aff4.FACTORY.Open(\n      approval_urn, mode=\"rw\", token=super_token)\n  approval_request.DeleteAttribute(approval_request.Schema.APPROVER)\n  approval_request.Close()", "docstring": "Revokes an approval for a given token.\n\nThis method requires raw datastore access to manipulate approvals directly.\n\nArgs:\naff4_path: The aff4_path or client id the approval should be created for.\ntoken: The token that should be revoked.", "source": "juraj-google-style"}
{"code": "def parse_from_xml(root):\n    if (root.tag != 'ubcpi'):\n        raise UpdateFromXmlError(_('Every peer instruction tool must contain an \"ubcpi\" element.'))\n    display_name_el = root.find('display_name')\n    if (display_name_el is None):\n        raise UpdateFromXmlError(_('Every peer instruction tool must contain a \"display_name\" element.'))\n    else:\n        display_name = _safe_get_text(display_name_el)\n    rationale_size_min = (int(root.attrib['rationale_size_min']) if ('rationale_size_min' in root.attrib) else None)\n    rationale_size_max = (int(root.attrib['rationale_size_max']) if ('rationale_size_max' in root.attrib) else None)\n    question_el = root.find('question')\n    if (question_el is None):\n        raise UpdateFromXmlError(_('Every peer instruction must tool contain a \"question\" element.'))\n    else:\n        question = parse_question_xml(question_el)\n    options_el = root.find('options')\n    if (options_el is None):\n        raise UpdateFromXmlError(_('Every peer instruction must tool contain a \"options\" element.'))\n    else:\n        (options, correct_answer, correct_rationale) = parse_options_xml(options_el)\n    seeds_el = root.find('seeds')\n    if (seeds_el is None):\n        raise UpdateFromXmlError(_('Every peer instruction must tool contain a \"seeds\" element.'))\n    else:\n        seeds = parse_seeds_xml(seeds_el)\n    algo = (unicode(root.attrib['algorithm']) if ('algorithm' in root.attrib) else None)\n    num_responses = (unicode(root.attrib['num_responses']) if ('num_responses' in root.attrib) else None)\n    return {'display_name': display_name, 'question_text': question, 'options': options, 'rationale_size': {'min': rationale_size_min, 'max': rationale_size_max}, 'correct_answer': correct_answer, 'correct_rationale': correct_rationale, 'seeds': seeds, 'algo': {'name': algo, 'num_responses': num_responses}}", "docstring": "Update the UBCPI XBlock's content from an XML definition.\n\nWe need to be strict about the XML we accept, to avoid setting\nthe XBlock to an invalid state (which will then be persisted).\n\nArgs:\nroot (lxml.etree.Element): The XML definition of the XBlock's content.\n\nReturns:\nA dictionary of all of the XBlock's content.\n\nRaises:\nUpdateFromXmlError: The XML definition is invalid", "source": "codesearchnet"}
{"code": "def __init__(self, access_token, access_token_type, refresh_token=None, expires_in=None, state=None):\n        \n\n        self.access_token = access_token\n        self.access_token_type = access_token_type\n        self.refresh_token = refresh_token\n        self.expires_in = expires_in\n        self.state = state", "docstring": "Initialziation of the object\n\nArgs:\naccess_token (str): Access token\naccess_token_type (str): Access token type\nrefresh_token (str):\nexpires_in (int): Seconds after which the token will expire\nstate (str):", "source": "juraj-google-style"}
{"code": "def __new__(cls, month=1, day=1, hour=0, minute=0, leap_year=False):\n        \n        year = 2016 if leap_year else 2017\n        hour, minute = cls._calculate_hour_and_minute(hour + minute / 60.0)\n        try:\n            return datetime.__new__(cls, year, month, day, hour, minute)\n        except ValueError as e:\n            raise ValueError(\"{}:\\n\\t({}/{}@{}:{})(m/d@h:m)\".format(\n                e, month, day, hour, minute\n            ))", "docstring": "Create Ladybug datetime.\n\nArgs:\nmonth: A value for month between 1-12 (Defualt: 1).\nday: A value for day between 1-31 (Defualt: 1).\nhour: A value for hour between 0-23 (Defualt: 0).\nminute: A value for month between 0-59 (Defualt: 0).\nleap_year: A boolean to indicate if datetime is for a leap year\n(Default: False).", "source": "juraj-google-style"}
{"code": "def plot_densities(self, ax=None, **kwargs):\n    (ax, fig, plt) = get_ax_fig_plt(ax)\n    ax.grid(True)\n    ax.set_xlabel('r [Bohr]')\n    for (i, den_name) in enumerate(['ae_core_density', 'pseudo_core_density']):\n        rden = getattr(self, den_name)\n        label = ('$n_c$' if (i == 1) else '$\\\\tilde{n}_c$')\n        ax.plot(rden.mesh, (rden.mesh * rden.values), label=label, lw=2)\n    ax.legend(loc='best')\n    return fig", "docstring": "Plot the PAW densities.\n\nArgs:\nax: matplotlib :class:`Axes` or None if a new figure should be created.\n\nReturns:\n`matplotlib` figure", "source": "codesearchnet"}
{"code": "def GetArtifactCollectorArgs(flow_args, knowledge_base):\n    args = rdf_artifacts.ClientArtifactCollectorArgs()\n    args.knowledge_base = knowledge_base\n    args.apply_parsers = flow_args.apply_parsers\n    args.ignore_interpolation_errors = flow_args.ignore_interpolation_errors\n    args.max_file_size = flow_args.max_file_size\n    args.use_tsk = flow_args.use_tsk\n    if (not flow_args.recollect_knowledge_base):\n        artifact_names = flow_args.artifact_list\n    else:\n        artifact_names = GetArtifactsForCollection(knowledge_base.os, flow_args.artifact_list)\n    expander = ArtifactExpander(knowledge_base, flow_args.path_type, flow_args.max_file_size)\n    for artifact_name in artifact_names:\n        rdf_artifact = artifact_registry.REGISTRY.GetArtifact(artifact_name)\n        if (not MeetsConditions(knowledge_base, rdf_artifact)):\n            continue\n        if (artifact_name in expander.processed_artifacts):\n            continue\n        requested_by_user = (artifact_name in flow_args.artifact_list)\n        for expanded_artifact in expander.Expand(rdf_artifact, requested_by_user):\n            args.artifacts.append(expanded_artifact)\n    return args", "docstring": "Prepare bundle of artifacts and their dependencies for the client.\n\nArgs:\nflow_args: An `ArtifactCollectorFlowArgs` instance.\nknowledge_base: contains information about the client\n\nReturns:\nrdf value object containing a list of extended artifacts and the\nknowledge base", "source": "codesearchnet"}
{"code": "def is_uniform(self):\n    return self._uniform_row_length is not None", "docstring": "Returns true if the partition is known to be uniform statically.\n\nThis is based upon the existence of self._uniform_row_length. For example:\nRowPartition.from_row_lengths([3,3,3]).is_uniform()==false\nRowPartition.from_uniform_row_length(5, nvals=20).is_uniform()==true\nRowPartition.from_row_lengths([2,0,2]).is_uniform()==false\n\nReturns:\nWhether a RowPartition is known to be uniform statically.", "source": "github-repos"}
{"code": "def visualize_reconstruction(inputs, reconstruct, num=3, name='reconstruction'):\n    reconstruct = tf.clip_by_value(reconstruct, 0.0, 1.0)\n    inputs_and_reconstruct = tf.concat((inputs[:num], reconstruct[:num]), axis=0)\n    image_summary(inputs_and_reconstruct, name)", "docstring": "Visualizes the reconstruction of inputs in TensorBoard.\n\nArgs:\ninputs: A tensor of the original inputs, of shape [batch, timesteps,\nh, w, c].\nreconstruct: A tensor of a reconstruction of inputs, of shape\n[batch, timesteps, h, w, c].\nnum: Integer for the number of examples to visualize.\nname: String name of this summary.", "source": "codesearchnet"}
{"code": "def Detect(self, baseline, host_data):\n    result = CheckResult()\n    for detector in self.detectors:\n        finding = detector(baseline, host_data)\n        if finding:\n            result.ExtendAnomalies([finding])\n    if result:\n        return result", "docstring": "Run host_data through detectors and return them if a detector triggers.\n\nArgs:\nbaseline: The base set of rdf values used to evaluate whether an issue\nexists.\nhost_data: The rdf values passed back by the filters.\n\nReturns:\nA CheckResult message containing anomalies if any detectors identified an\nissue, None otherwise.", "source": "codesearchnet"}
{"code": "def GetEnabledInterfaces():\n    interfaces = []\n    show_args = ['/c', 'netsh', 'show', 'interface']\n    res = client_utils_common.Execute('cmd', show_args, time_limit=(- 1), bypass_whitelist=True)\n    pattern = re.compile('\\\\s*')\n    for line in res[0].split('\\r\\n'):\n        interface_info = pattern.split(line)\n        if ('Enabled' in interface_info):\n            interfaces.extend(interface_info[(- 1):])\n    return interfaces", "docstring": "Gives a list of enabled interfaces. Should work on all windows versions.\n\nReturns:\ninterfaces: Names of interfaces found enabled.", "source": "codesearchnet"}
{"code": "def get_port_monitor(self):\n    uri = '{}{}'.format(self.data['uri'], self.PORT_MONITOR_PATH)\n    return self._helper.do_get(uri)", "docstring": "Gets the port monitor configuration of a logical interconnect.\n\nReturns:\ndict: The Logical Interconnect.", "source": "codesearchnet"}
{"code": "def get_countries_in_region(cls, region, use_live=True, exception=None):\n        \n        \n        countriesdata = cls.countriesdata(use_live=use_live)\n        if isinstance(region, int):\n            regioncode = region\n        else:\n            regionupper = region.upper()\n            regioncode = countriesdata['regionnames2codes'].get(regionupper)\n\n        if regioncode is not None:\n            return countriesdata['regioncodes2countries'][regioncode]\n\n        if exception is not None:\n            raise exception\n        return list()", "docstring": "Get countries (ISO3 codes) in region\n\nArgs:\nregion (Union[int,str]): Three digit UNStats M49 region code or region name\nuse_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\nexception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.\n\nReturns:\nList(str): Sorted list of ISO3 country names", "source": "juraj-google-style"}
{"code": "def get_updates(\n        self,\n        display_all_distributions=False,\n        verbose=False\n    ):  \n        \n        if verbose:\n            logging.basicConfig(\n                stream=sys.stdout,\n                level=logging.INFO,\n                format='%(message)s',\n            )\n            logging.info('Checking installed packages for updates...')\n\n        updates = self._get_environment_updates(\n            display_all_distributions=display_all_distributions\n        )\n\n        if updates:\n            for update in updates:\n                logging.info(update)\n\n        if updates and self._csv_file_name:\n            self.write_updates_to_csv(updates)\n\n        if updates and self._new_config:\n            self.write_new_config(updates)\n\n        return updates", "docstring": "When called, get the environment updates and write updates to a CSV\nfile and if a new config has been provided, write a new configuration\nfile.\n\nArgs:\ndisplay_all_distributions (bool): Return distribution even if it is\nup-to-date.\nverbose (bool): If ``True``, log to terminal to terminal.", "source": "juraj-google-style"}
{"code": "def get_data(name, train_batch_size, test_batch_size):\n  \n  if name not in ['mnist', 'cifar10']:\n    raise ValueError(\n        'Expected dataset \\'mnist\\' or \\'cifar10\\', but got %s' % name)\n  dataset = getattr(tf.keras.datasets, name)\n  num_classes = 10\n\n  \n  raw_data = dataset.load_data()\n  (images_train, labels_train), (images_test, labels_test) = raw_data\n\n  \n  images_train = images_train.astype(np.float32) / 255.\n  images_test = images_test.astype(np.float32) / 255.\n  labels_train = labels_train.astype(np.int32).squeeze()\n  labels_test = labels_test.astype(np.int32).squeeze()\n\n  \n  if images_train.ndim == 3:\n    images_train = np.expand_dims(images_train, -1)\n    images_test = np.expand_dims(images_test, -1)\n\n  \n  train_data = tf.data.Dataset.from_tensor_slices((images_train, labels_train))\n  test_data = tf.data.Dataset.from_tensor_slices((images_test, labels_test))\n\n  \n  train_iterator = (\n      train_data\n      \n      \n      .shuffle(buffer_size=len(images_train))\n      .batch(train_batch_size)\n      .repeat()\n      .make_one_shot_iterator()\n  )\n  test_iterator = test_data.batch(test_batch_size).make_initializable_iterator()\n  return dict(\n      train_iterator=train_iterator,\n      test_iterator=test_iterator,\n      num_classes=num_classes)", "docstring": "Gets training and testing dataset iterators.\n\nArgs:\nname: String. Name of dataset, either 'mnist' or 'cifar10'.\ntrain_batch_size: Integer. Batch size for training.\ntest_batch_size: Integer. Batch size for testing.\n\nReturns:\nDict containing:\ntrain_iterator: A tf.data.Iterator, over training data.\ntest_iterator: A tf.data.Iterator, over test data.\nnum_classes: Integer. Number of class labels.", "source": "juraj-google-style"}
{"code": "def QueryAllFeatures(self, url=None, where='1=1', out_fields='*', timeFilter=None, geometryFilter=None, returnFeatureClass=False, out_fc=None, outSR=None, chunksize=1000, printIndent=''):\n    if (url is None):\n        return\n    fl = None\n    try:\n        fl = FeatureLayer(url=url, securityHandler=self._securityHandler)\n        qRes = fl.query(where=where, returnIDsOnly=True, timeFilter=timeFilter, geometryFilter=geometryFilter)\n        if ('error' in qRes):\n            print((printIndent + qRes))\n            return []\n        elif ('objectIds' in qRes):\n            oids = qRes['objectIds']\n            total = len(oids)\n            if (total == 0):\n                return fl.query(where=where, returnGeometry=True, out_fields=out_fields, timeFilter=timeFilter, geometryFilter=geometryFilter, outSR=outSR)\n            print((printIndent + ('%s features to be downloaded' % total)))\n            chunksize = min(chunksize, fl.maxRecordCount)\n            combinedResults = None\n            totalQueried = 0\n            for chunk in chunklist(l=oids, n=chunksize):\n                oidsQuery = ','.join(map(str, chunk))\n                if (not oidsQuery):\n                    continue\n                else:\n                    results = fl.query(objectIds=oidsQuery, returnGeometry=True, out_fields=out_fields, timeFilter=timeFilter, geometryFilter=geometryFilter, outSR=outSR)\n                    if isinstance(results, FeatureSet):\n                        if (combinedResults is None):\n                            combinedResults = results\n                        else:\n                            for feature in results.features:\n                                combinedResults.features.append(feature)\n                        totalQueried += len(results.features)\n                        print((printIndent + '{:.0%} Completed: {}/{}'.format((totalQueried / float(total)), totalQueried, total)))\n                    else:\n                        print((printIndent + results))\n            if (returnFeatureClass == True):\n                return combinedResults.save(*os.path.split(out_fc))\n            else:\n                return combinedResults\n        else:\n            print((printIndent + qRes))\n    except:\n        (line, filename, synerror) = trace()\n        raise common.ArcRestHelperError({'function': 'QueryAllFeatures', 'line': line, 'filename': filename, 'synerror': synerror})\n    finally:\n        fl = None\n        del fl\n        gc.collect()", "docstring": "Performs an SQL query against a hosted feature service layer\nand returns all features regardless of service limit.\n\nArgs:\nurl (str): The URL of the feature service layer.\nwhere - the selection sql statement\nout_fields - the attribute fields to return\ntimeFilter - a TimeFilter object where either the start time\nor start and end time are defined to limit the\nsearch results for a given time.  The values in\nthe timeFilter should be as UTC timestampes in\nmilliseconds.  No checking occurs to see if they\nare in the right format.\ngeometryFilter - a GeometryFilter object to parse down a given\nquery by another spatial dataset.\nreturnFeatureClass - Default False. If true, query will be\nreturned as feature class\nchunksize (int): The maximum amount of features to query at a time. Defaults to 1000.\nout_fc - only valid if returnFeatureClass is set to True.\nOutput location of query.\n\nOutput:\nA list of Feature Objects (default) or a path to the output featureclass if\nreturnFeatureClass is set to True.", "source": "codesearchnet"}
{"code": "def description(self, force_refresh=False):\n    if force_refresh:\n        self.clear_cache()\n    if (not self._tuning_job_describe_result):\n        self._tuning_job_describe_result = self._sage_client.describe_hyper_parameter_tuning_job(HyperParameterTuningJobName=self.name)\n    return self._tuning_job_describe_result", "docstring": "Call ``DescribeHyperParameterTuningJob`` for the hyperparameter tuning job.\n\nArgs:\nforce_refresh (bool): Set to True to fetch the latest data from SageMaker API.\n\nReturns:\ndict: The Amazon SageMaker response for ``DescribeHyperParameterTuningJob``.", "source": "codesearchnet"}
{"code": "def __init__(self, parent):\n        \n\n        super(ModuleUIFrame, self).__init__(parent)\n        self.columnconfigure(0, weight=1)\n        self.rowconfigure(1, weight=1)\n\n        \n        from ....datatools import get_data\n        data = get_data()\n\n        \n        api_frame = ttk.LabelFrame(self, padding=8, text=\"Google API\")\n        api_frame.grid(row=0, column=0, sticky=\"W E N S\")\n        api_frame.columnconfigure(0, weight=1)\n        \n        self.google_api_key = tk.StringVar()\n        ttk.Label(api_frame, text=\"Google API Key\").grid(column=0, row=0, sticky=\"W E N S\")\n        ttk.Entry(api_frame, textvariable=self.google_api_key).grid(\n            column=0, row=1, padx=0, pady=4, sticky=\"W E N S\")\n        self.soundcloud_client_id = tk.StringVar()\n        ttk.Label(api_frame, text=\"SoundCloud Client ID\").grid(column=0, row=2, sticky=\"W E N S\")\n        ttk.Entry(api_frame, textvariable=self.soundcloud_client_id).grid(\n            column=0, row=3, padx=0, pady=4, sticky=\"W E N S\")\n        ttk.Button(api_frame, command=lambda: self.update_keys(), text=\"Update API Data\").grid(\n            column=0, row=4, padx=0, pady=4, sticky=\"W E N S\")\n\n        if \"google_api_key\" in data[\"discord\"][\"keys\"]:\n            self.google_api_key.set(data[\"discord\"][\"keys\"][\"google_api_key\"])\n        if \"soundcloud_client_id\" in data[\"discord\"][\"keys\"]:\n            self.soundcloud_client_id.set(data[\"discord\"][\"keys\"][\"soundcloud_client_id\"])", "docstring": "Create a new UI for the module\n\nArgs:\nparent: A tk or ttk object", "source": "juraj-google-style"}
{"code": "def get_barycenter(self):\n    try:\n        mass = self['mass'].values\n    except KeyError:\n        mass = self.add_data('mass')['mass'].values\n    pos = self.loc[(:, ['x', 'y', 'z'])].values\n    return ((pos * mass[(:, None)]).sum(axis=0) / self.get_total_mass())", "docstring": "Return the mass weighted average location.\n\nArgs:\nNone\n\nReturns:\n:class:`numpy.ndarray`:", "source": "codesearchnet"}
{"code": "def createList(self, title=None, items=None):\n        \n        if items is None:\n            items = []\n\n        node = _node.List()\n        if title is not None:\n            node.title = title\n        for text, checked in items:\n            node.add(text, checked)\n        self.add(node)\n        return node", "docstring": "Create a new list and populate it. Any changes to the note will be uploaded when :py:meth:`sync` is called.\n\nArgs:\ntitle (str): The title of the list.\nitems (List[(str, bool)]): A list of tuples. Each tuple represents the text and checked status of the listitem.\n\nReturns:\ngkeepapi.node.List: The new list.", "source": "juraj-google-style"}
{"code": "def save_imgs(x, fname):\n  \n  n = x.shape[0]\n  fig = figure.Figure(figsize=(n, 1), frameon=False)\n  canvas = backend_agg.FigureCanvasAgg(fig)\n  for i in range(n):\n    ax = fig.add_subplot(1, n, i+1)\n    ax.imshow(x[i].squeeze(),\n              interpolation=\"none\",\n              cmap=cm.get_cmap(\"binary\"))\n    ax.axis(\"off\")\n  canvas.print_figure(fname, format=\"png\")\n  print(\"saved %s\" % fname)", "docstring": "Helper method to save a grid of images to a PNG file.\n\nArgs:\nx: A numpy array of shape [n_images, height, width].\nfname: The filename to write to (including extension).", "source": "juraj-google-style"}
{"code": "def wait_for(self, pattern, timeout=None):\n    should_continue = True\n    if self.block:\n        raise TypeError(NON_BLOCKING_ERROR_MESSAGE)\n\n    def stop(signum, frame):\n        nonlocal should_continue\n        if should_continue:\n            raise TimeoutError()\n    if timeout:\n        signal.signal(signal.SIGALRM, stop)\n        signal.alarm(timeout)\n    while should_continue:\n        output = (self.poll_output() + self.poll_error())\n        filtered = [line for line in output if re.match(pattern, line)]\n        if filtered:\n            should_continue = False", "docstring": "Block until a pattern have been found in stdout and stderr\n\nArgs:\npattern(:class:`~re.Pattern`): The pattern to search\ntimeout(int): Maximum number of second to wait. If None, wait infinitely\n\nRaises:\nTimeoutError: When timeout is reach", "source": "codesearchnet"}
{"code": "async def forget(request):\n    \n    auth_policy = request.get(POLICY_KEY)\n    if auth_policy is None:\n        raise RuntimeError('auth_middleware not installed')\n\n    return await auth_policy.forget(request)", "docstring": "Called to forget the userid for a request\n\nArgs:\nrequest: aiohttp Request object\n\nRaises:\nRuntimeError: Middleware is not installed", "source": "juraj-google-style"}
{"code": "def __init__(self, path):\n    \n    super(FilterFile, self).__init__()\n    self._path = path", "docstring": "Initializes a filter file.\n\nArgs:\npath (str): path to a file that contains one or more path filters.", "source": "juraj-google-style"}
{"code": "def duration_to_string(duration):\n    \n\n    m, s = divmod(duration, 60)\n    h, m = divmod(m, 60)\n    return \"%d:%02d:%02d\" % (h, m, s)", "docstring": "Converts a duration to a string\n\nArgs:\nduration (int): The duration in seconds to convert\n\nReturns s (str): The duration as a string", "source": "juraj-google-style"}
{"code": "def convert_gemm(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting Linear ...')\n\n    if names == 'short':\n        tf_name = 'FC' + random_string(6)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    bias_name = '{0}.bias'.format(w_name)\n    weights_name = '{0}.weight'.format(w_name)\n\n    W = weights[weights_name].numpy().transpose()\n    input_channels, output_channels = W.shape\n\n    keras_weights = [W]\n    has_bias = False\n    if bias_name in weights:\n        bias = weights[bias_name].numpy()\n        keras_weights = [W, bias]\n        has_bias = True\n\n    dense = keras.layers.Dense(\n        output_channels,\n        weights=keras_weights, use_bias=has_bias, name=tf_name, bias_initializer='zeros', kernel_initializer='zeros',\n    )\n\n    layers[scope_name] = dense(layers[inputs[0]])", "docstring": "Convert Linear.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def get_student_item_dict(self, anonymous_user_id=None):\n        \n\n        item_id = self._serialize_opaque_key(self.scope_ids.usage_id)\n\n        \n        \n        if hasattr(self, \"xmodule_runtime\"):\n            course_id = self.get_course_id()  \n\n            if anonymous_user_id:\n                student_id = anonymous_user_id\n            else:\n                student_id = self.xmodule_runtime.anonymous_student_id  \n        else:\n            course_id = \"edX/Enchantment_101/April_1\"\n            if self.scope_ids.user_id is None:\n                student_id = ''\n            else:\n                student_id = unicode(self.scope_ids.user_id)\n\n        student_item_dict = dict(\n            student_id=student_id,\n            item_id=item_id,\n            course_id=course_id,\n            item_type='ubcpi'\n        )\n        return student_item_dict", "docstring": "Create a student_item_dict from our surrounding context.\n\nSee also: submissions.api for details.\n\nArgs:\nanonymous_user_id(str): A unique anonymous_user_id for (user, course) pair.\nReturns:\n(dict): The student item associated with this XBlock instance. This\nincludes the student id, item id, and course id.", "source": "juraj-google-style"}
{"code": "def sym_getattr(self, key: Union[str, int], default: Any=RAISE_IF_NOT_FOUND) -> Any:\n    if not self.sym_hasattr(key):\n        if default is RAISE_IF_NOT_FOUND:\n            raise AttributeError(self._error_message(f'{self.__class__!r} object has no symbolic attribute {key!r}.'))\n        return default\n    return self._sym_getattr(key)", "docstring": "Gets a symbolic attribute.\n\nArgs:\nkey: Key of symbolic attribute.\ndefault: Default value if attribute does not exist. If absent,\n\nReturns:\nValue of symbolic attribute if found, otherwise the default value\nif it's specified.\n\nRaises:\nAttributeError if `key` does not exist and `default` is not provided.", "source": "github-repos"}
{"code": "def slice(array, start, size, ty):\n    weld_obj = WeldObject(encoder_, decoder_)\n    array_var = weld_obj.update(array)\n    if isinstance(array, WeldObject):\n        array_var = array.obj_id\n        weld_obj.dependencies[array_var] = array\n    weld_template = '\\n       map(\\n         %(array)s,\\n         |array: %(ty)s| slice(array, %(start)dL, %(size)dL)\\n       )\\n    '\n    weld_obj.weld_code = (weld_template % {'array': array_var, 'start': start, 'ty': ty, 'size': size})\n    return weld_obj", "docstring": "Returns a new array-of-arrays with each array truncated, starting at\nindex `start` for `length` characters.\n\nArgs:\narray (WeldObject / Numpy.ndarray): Input array\nstart (int): starting index\nsize (int): length to truncate at\nty (WeldType): Type of each element in the input array\n\nReturns:\nA WeldObject representing this computation", "source": "codesearchnet"}
{"code": "def Start(self, seed_list: List[str] = None, skip_seeds: bool = False) -> None:\n        \n        if not seed_list:\n            seed_list = settings.SEED_LIST\n\n        logger.debug(\"Starting up nodeleader\")\n        if not skip_seeds:\n            logger.debug(\"Attempting to connect to seed list...\")\n            for bootstrap in seed_list:\n                if not is_ip_address(bootstrap):\n                    host, port = bootstrap.split(':')\n                    bootstrap = f\"{hostname_to_ip(host)}:{port}\"\n                addr = Address(bootstrap)\n                self.KNOWN_ADDRS.append(addr)\n                self.SetupConnection(addr)\n\n        logger.debug(\"Starting up nodeleader: starting peer, mempool, and blockheight check loops\")\n        \n        self.start_peer_check_loop()\n        self.start_memcheck_loop()\n        self.start_blockheight_loop()\n\n        if settings.ACCEPT_INCOMING_PEERS and not self.incoming_server_running:\n            class OneShotFactory(Factory):\n                def __init__(self, leader):\n                    self.leader = leader\n\n                def buildProtocol(self, addr):\n                    print(f\"building new protocol for addr: {addr}\")\n                    self.leader.AddKnownAddress(Address(f\"{addr.host}:{addr.port}\"))\n                    p = NeoNode(incoming_client=True)\n                    p.factory = self\n                    return p\n\n            def listen_err(err):\n                print(f\"Failed start listening server for reason: {err.value}\")\n\n            def listen_ok(value):\n                self.incoming_server_running = True\n\n            logger.debug(f\"Starting up nodeleader: setting up listen server on port: {settings.NODE_PORT}\")\n            server_endpoint = TCP4ServerEndpoint(self.reactor, settings.NODE_PORT)\n            listenport_deferred = server_endpoint.listen(OneShotFactory(leader=self))\n            listenport_deferred.addCallback(listen_ok)\n            listenport_deferred.addErrback(listen_err)", "docstring": "Start connecting to the seed list.\n\nArgs:\nseed_list: a list of host:port strings if not supplied use list from `protocol.xxx.json`\nskip_seeds: skip connecting to seed list", "source": "juraj-google-style"}
{"code": "def list_storage_accounts_sub(access_token, subscription_id):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/providers/Microsoft.Storage/storageAccounts',\n                        '?api-version=', STORAGE_API])\n    return do_get(endpoint, access_token)", "docstring": "List the storage accounts in the specified subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body list of storage accounts.", "source": "juraj-google-style"}
{"code": "def save_shared_file(self, sharekey=None):\n        \n        endpoint = '/api/sharedfile/{sharekey}/save'.format(sharekey=sharekey)\n        data = self._make_request(\"POST\", endpoint=endpoint, data=None)\n\n        try:\n            sf = SharedFile.NewFromJSON(data)\n            sf.saved = True\n            return sf\n        except:\n            raise Exception(\"{0}\".format(data['error']))", "docstring": "Save a SharedFile to your Shake.\n\nArgs:\nsharekey (str): Sharekey for the file to save.\n\nReturns:\nSharedFile saved to your shake.", "source": "juraj-google-style"}
{"code": "def Query(self, query):\n    \n    cursor = self._database.cursor()\n    cursor.execute(query)\n    return cursor", "docstring": "Queries the database.\n\nArgs:\nquery (str): SQL query.\n\nReturns:\nsqlite3.Cursor: results.\n\nRaises:\nsqlite3.DatabaseError: if querying the database fails.", "source": "juraj-google-style"}
{"code": "def animate_cli(animation_, step, event):\n    while True:\n        time.sleep(step)\n        frame = next(animation_)\n        sys.stdout.write(frame)\n        sys.stdout.flush()\n        if event.is_set():\n            break\n    sys.stdout.write(animation_.get_erase_frame())\n    sys.stdout.flush()\n    animation_.reset()", "docstring": "Print out the animation cycle to stdout. This function is for use with\nsynchronous functions and must be run in a thread.\n\nArgs:\nanimation_ (generator): A generator that produces strings for the\nanimation. Should be endless.\nstep (float): Seconds between each animation frame.", "source": "codesearchnet"}
{"code": "def _create_service_api(credentials, service_name, version, developer_key=None, cache_discovery=False, http=None):\n    if (log.getEffectiveLevel() > logging.DEBUG):\n        logging.getLogger(discovery.__name__).setLevel(logging.WARNING)\n    discovery_kwargs = {'serviceName': service_name, 'version': version, 'developerKey': developer_key, 'cache_discovery': cache_discovery}\n    if http:\n        discovery_kwargs['http'] = http\n    else:\n        discovery_kwargs['credentials'] = credentials\n    return discovery.build(**discovery_kwargs)", "docstring": "Builds and returns a cloud API service object.\n\nArgs:\ncredentials (OAuth2Credentials): Credentials that will be used to\nauthenticate the API calls.\nservice_name (str): The name of the API.\nversion (str): The version of the API to use.\ndeveloper_key (str): The api key to use to determine the project\nassociated with the API call, most API services do not require\nthis to be set.\ncache_discovery (bool): Whether or not to cache the discovery doc.\n\nReturns:\nobject: A Resource object with methods for interacting with the service.", "source": "codesearchnet"}
{"code": "def get_events(self) -> List[Event]:\n    LOG.debug('Getting events for %s', self.key)\n    return get_events(self.key)", "docstring": "Get events associated with the scheduling object.\n\nReturns:\nlist of Event objects", "source": "codesearchnet"}
{"code": "def mounts(prefix, __mounts):\n    i = 0\n    mntpoints = []\n    for mount in __mounts:\n        if (not isinstance(mount, dict)):\n            mntpoint = '{0}/{1}'.format(prefix, str(i))\n            mntpoints.append(mntpoint)\n            i = (i + 1)\n    return mntpoints", "docstring": "Compute the mountpoints of the current user.\n\nArgs:\nprefix: Define where the job was running if it ran on a cluster.\nmounts: All mounts the user currently uses in his file system.\nReturn:\nmntpoints", "source": "codesearchnet"}
{"code": "def merge_input_csv_forecast_json(input_csv_file, forecast_json_path, condition_models, dist_models):\n    \n    try:\n        run_date = input_csv_file[:-4].split(\"_\")[-1]\n        print(run_date)\n        ens_member = \"_\".join(input_csv_file.split(\"/\")[-1][:-4].split(\"_\")[3:-1])\n        ens_name = input_csv_file.split(\"/\")[-1].split(\"_\")[2]\n        input_data = pd.read_csv(input_csv_file, index_col=\"Step_ID\")\n        full_json_path = forecast_json_path + \"{0}/{1}/\".format(run_date, ens_member)\n        track_ids = sorted(input_data[\"Track_ID\"].unique())\n        model_pred_cols = []\n        condition_models_ns = []\n        dist_models_ns = []\n        gamma_params = [\"Shape\", \"Location\", \"Scale\"]\n        for condition_model in condition_models:\n            model_pred_cols.append(condition_model.replace(\" \", \"-\") + \"_Condition\")\n            condition_models_ns.append(condition_model.replace(\" \", \"-\"))\n        for dist_model in dist_models:\n            dist_models_ns.append(dist_model.replace(\" \", \"-\"))\n            for param in gamma_params:\n                model_pred_cols.append(dist_model.replace(\" \", \"-\") + \"_\" + param)\n        pred_data = pd.DataFrame(index=input_data.index, columns=model_pred_cols,\n                                dtype=float)\n        for track_id in track_ids:\n            track_id_num = track_id.split(\"_\")[-1]\n            json_filename = full_json_path + \"{0}_{1}_{2}_model_track_{3}.json\".format(ens_name,\n                                                                                    run_date,\n                                                                                    ens_member,\n                                                                                    track_id_num)\n            json_file = open(json_filename)\n            json_data = json.load(json_file)\n            json_file.close()\n            for s, step in enumerate(json_data[\"features\"]):\n                step_id = track_id + \"_{0:02d}\".format(s)\n                for cond_model in condition_models_ns:\n                    pred_data.loc[step_id, cond_model + \"_Condition\"]  = step[\"properties\"][\"condition_\" + cond_model]\n                for dist_model in dist_models_ns:\n                    pred_data.loc[step_id, [dist_model + \"_\" + p\n                                            for p in gamma_params]] = step[\"properties\"][\"dist_\" + dist_model]\n        out_data = input_data.merge(pred_data, left_index=True, right_index=True)\n        return out_data, ens_name, ens_member\n    except Exception as e:\n        print(traceback.format_exc())\n        raise e", "docstring": "Reads forecasts from json files and merges them with the input data from the step csv files.\n\nArgs:\ninput_csv_file: Name of the input data csv file being processed\nforecast_json_path: Path to the forecast json files toplevel directory\ncondition_models: List of models used to forecast hail or no hail\ndist_models: List of models used to forecast the hail size distribution\n\nReturns:", "source": "juraj-google-style"}
{"code": "def list_resource_groups(access_token, subscription_id):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/',\n                        '?api-version=', RESOURCE_API])\n    return do_get(endpoint, access_token)", "docstring": "List the resource groups in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response.", "source": "juraj-google-style"}
{"code": "def stop(self, timeout=None):\n    self.stop_signal.set()\n    with self.queue.mutex:\n        self.queue.queue.clear()\n        self.queue.unfinished_tasks = 0\n        self.queue.not_full.notify()\n    self.run_thread.join(timeout)\n    _SHARED_SEQUENCES[self.uid] = None", "docstring": "Stops running threads and wait for them to exit, if necessary.\n\nShould be called by the same thread which called `start()`.\n\nArgs:\ntimeout: maximum time to wait on `thread.join()`", "source": "github-repos"}
{"code": "def _create_gates(self, inputs, memory):\n    num_gates = (2 * self._calculate_gate_size())\n    memory = tf.tanh(memory)\n    inputs = basic.BatchFlatten()(inputs)\n    gate_inputs = basic.BatchApply(basic.Linear(num_gates), n_dims=1)(inputs)\n    gate_inputs = tf.expand_dims(gate_inputs, axis=1)\n    gate_memory = basic.BatchApply(basic.Linear(num_gates))(memory)\n    gates = tf.split((gate_memory + gate_inputs), num_or_size_splits=2, axis=2)\n    (input_gate, forget_gate) = gates\n    input_gate = tf.sigmoid((input_gate + self._input_bias))\n    forget_gate = tf.sigmoid((forget_gate + self._forget_bias))\n    return (input_gate, forget_gate)", "docstring": "Create input and forget gates for this step using `inputs` and `memory`.\n\nArgs:\ninputs: Tensor input.\nmemory: The current state of memory.\n\nReturns:\ninput_gate: A LSTM-like insert gate.\nforget_gate: A LSTM-like forget gate.", "source": "codesearchnet"}
{"code": "def sender(self, jid: str):\n        \n        if jid is not None and not isinstance(jid, str):\n            raise TypeError(\"'sender' MUST be a string\")\n        self._sender = aioxmpp.JID.fromstr(jid) if jid is not None else None", "docstring": "Set jid of the sender\n\nArgs:\njid (str): jid of the sender", "source": "juraj-google-style"}
{"code": "def preprocess(self, dataset, mode, hparams, interleave=True):\n\n    def _preprocess(example):\n        examples = self.preprocess_example(example, mode, hparams)\n        if (not isinstance(examples, tf.data.Dataset)):\n            examples = tf.data.Dataset.from_tensors(examples)\n        return examples\n    if interleave:\n        dataset = dataset.apply(tf.data.experimental.parallel_interleave(_preprocess, sloppy=True, cycle_length=8))\n    else:\n        dataset = dataset.flat_map(_preprocess)\n    return dataset", "docstring": "Runtime preprocessing on the whole dataset.\n\nReturn a tf.data.Datset -- the preprocessed version of the given one.\nBy default this function calls preprocess_example.\n\nArgs:\ndataset: the Dataset of already decoded but not yet preprocessed features.\nmode: tf.estimator.ModeKeys\nhparams: HParams, model hyperparameters\ninterleave: bool, whether to use parallel_interleave, which is faster\nbut will alter the order of samples non-deterministically, or flat_map,\nwhich is slower but will preserve the sample order.\n\nReturns:\na Dataset", "source": "codesearchnet"}
{"code": "def getfutureimports(entity):\n    if not (tf_inspect.isfunction(entity) or tf_inspect.ismethod(entity)):\n        return tuple()\n    return tuple(sorted((name for name, value in entity.__globals__.items() if getattr(value, '__module__', None) == '__future__')))", "docstring": "Detects what future imports are necessary to safely execute entity source.\n\nArgs:\nentity: Any object\n\nReturns:\nA tuple of future strings", "source": "github-repos"}
{"code": "def ValidateDependencies(rdf_artifact):\n  \n  for dependency in GetArtifactDependencies(rdf_artifact):\n    try:\n      dependency_obj = REGISTRY.GetArtifact(dependency)\n    except rdf_artifacts.ArtifactNotRegisteredError as e:\n      raise rdf_artifacts.ArtifactDependencyError(\n          rdf_artifact, \"missing dependency\", cause=e)\n\n    message = dependency_obj.error_message\n    if message:\n      raise rdf_artifacts.ArtifactDependencyError(\n          rdf_artifact, \"dependency error\", cause=message)", "docstring": "Validates artifact dependencies.\n\nThis method checks whether all dependencies of the artifact are present\nand contain no errors.\n\nThis method can be called only after all other artifacts have been loaded.\n\nArgs:\nrdf_artifact: RDF object artifact.\n\nRaises:\nArtifactDependencyError: If a dependency is missing or contains errors.", "source": "juraj-google-style"}
{"code": "def __init__(self, row_partitions: Tuple[RowPartitionSpec, ...], static_inner_shape: tensor_shape.TensorShape, dtype: dtypes.DType):\n    if not isinstance(row_partitions, Iterable):\n        raise TypeError('row_partitions should be an Iterable')\n    row_partitions = tuple(row_partitions)\n    static_inner_shape = tensor_shape.as_shape(static_inner_shape)\n    dtype = dtypes.as_dtype(dtype)\n    if not all((isinstance(rp, RowPartitionSpec) for rp in row_partitions)):\n        raise TypeError('row_partitions should be an Iterable of RowPartitionSpecs')\n    if dtype != dtypes.int32 and dtype != dtypes.int64:\n        raise ValueError('dtype must be tf.int32 or tf.int64')\n    for spec in row_partitions:\n        if spec.dtype != dtype:\n            raise ValueError(f'dtype of {spec!r} is {spec.dtype!r}: expected {dtype!r}')\n    row_partitions = tuple(row_partitions)\n    inner_rank = static_inner_shape.rank\n    if inner_rank == 0:\n        if row_partitions:\n            raise ValueError('If row_partitions are provided, must have inner_rank > 0')\n    else:\n        num_slices_in_dimension = []\n        for i in range(len(row_partitions)):\n            rp = row_partitions[i]\n            result = tensor_shape.Dimension(rp.nrows)\n            if i > 0:\n                previous_rp = row_partitions[i - 1]\n                result = result.merge_with(previous_rp.nvals)\n                result = result.merge_with(num_slices_in_dimension[-1] * previous_rp.uniform_row_length)\n            num_slices_in_dimension.append(result)\n        if row_partitions:\n            last_rp = row_partitions[-1]\n            result = (num_slices_in_dimension[-1] * last_rp.uniform_row_length).merge_with(last_rp.nvals)\n            if inner_rank is not None:\n                result = result.merge_with(tensor_shape.dimension_at_index(static_inner_shape, 0))\n                static_inner_shape = result + static_inner_shape[1:]\n            num_slices_in_dimension.append(result)\n        for i in range(len(num_slices_in_dimension) - 1, 0, -1):\n            num_slices_in_dimension[i - 1] = num_slices_in_dimension[i - 1].merge_with(_safe_floor_div(num_slices_in_dimension[i], row_partitions[i - 1].uniform_row_length))\n        row_partitions = [RowPartitionSpec(nrows=num_slices_in_dimension[i].value, uniform_row_length=rp.uniform_row_length, nvals=num_slices_in_dimension[i + 1].value, dtype=rp.dtype) for i, rp in enumerate(row_partitions)]\n    self._static_inner_shape = static_inner_shape\n    self._inner_shape = tensor_lib.TensorSpec([inner_rank], dtype=dtype)\n    self._row_partitions = row_partitions", "docstring": "Create a Spec given row partitions, a static inner shape, and a dtype.\n\nArgs:\nrow_partitions: A sequence of `RowPartitionSpec`s describing how the\nragged shape is partitioned.\nstatic_inner_shape: The static shape of the flat_values.\ndtype: The DType used to encode the shape (tf.int64 or tf.int32).", "source": "github-repos"}
{"code": "def _initialize_slots(self, seed, hashvalues):\n        \n        self.seed = seed\n        self.hashvalues = self._parse_hashvalues(hashvalues)", "docstring": "Initialize the slots of the LeanMinHash.\n\nArgs:\nseed (int): The random seed controls the set of random\npermutation functions generated for this LeanMinHash.\nhashvalues: The hash values is the internal state of the LeanMinHash.", "source": "juraj-google-style"}
{"code": "def CallHwclock(logger):\n    command = ['/sbin/hwclock', '--hctosys']\n    try:\n        subprocess.check_call(command)\n    except subprocess.CalledProcessError:\n        logger.warning('Failed to sync system time with hardware clock.')\n    else:\n        logger.info('Synced system time with hardware clock.')", "docstring": "Sync clock using hwclock.\n\nArgs:\nlogger: logger object, used to write to SysLog and serial port.", "source": "codesearchnet"}
{"code": "def retry_loop(self, context, step_method):\n    logger.debug('starting')\n    context['retryCounter'] = 0\n    sleep = context.get_formatted_as_type(self.sleep, out_type=float)\n    if self.max:\n        max = context.get_formatted_as_type(self.max, out_type=int)\n        logger.info(f'retry decorator will try {max} times at {sleep}s intervals.')\n    else:\n        max = None\n        logger.info(f'retry decorator will try indefinitely at {sleep}s intervals.')\n    if poll.while_until_true(interval=sleep, max_attempts=max)(self.exec_iteration)(context=context, step_method=step_method):\n        logger.debug('retry loop complete, reporting success.')\n    logger.debug('retry loop done')\n    logger.debug('done')", "docstring": "Run step inside a retry loop.\n\nArgs:\ncontext: (pypyr.context.Context) The pypyr context. This arg will\nmutate - after method execution will contain the new\nupdated context.\nstep_method: (method/function) This is the method/function that\nwill execute on every loop iteration. Signature is:\nfunction(context)", "source": "codesearchnet"}
{"code": "def get_dataset_end_date(self, date_format=None):\n        \n        \n        dataset_date = self.get_dataset_end_date_as_datetime()\n        return self._get_formatted_date(dataset_date, date_format)", "docstring": "Get dataset date as string in specified format. For range returns start date.\nIf no format is supplied, an ISO 8601 string is returned.\n\nArgs:\ndate_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None.\n\nReturns:\nOptional[str]: Dataset date string or None if no date is set", "source": "juraj-google-style"}
{"code": "def checkPermissions(permissions=[], obj=None):\n    if (not obj):\n        return False\n    sm = getSecurityManager()\n    for perm in permissions:\n        if (not sm.checkPermission(perm, obj)):\n            return ''\n    return True", "docstring": "Checks if a user has permissions for a given object.\n\nArgs:\npermissions: The permissions the current user must be compliant with\nobj: The object for which the permissions apply\n\nReturns:\n1 if the user complies with all the permissions for the given object.\nOtherwise, it returns empty.", "source": "codesearchnet"}
{"code": "def _convert_tf1_model(flags):\n    if flags.custom_opdefs:\n        register_custom_opdefs(_parse_array(flags.custom_opdefs))\n    converter = _get_tflite_converter(flags)\n    if flags.inference_type:\n        converter.inference_type = _parse_inference_type(flags.inference_type, 'inference_type')\n    if flags.inference_input_type:\n        converter.inference_input_type = _parse_inference_type(flags.inference_input_type, 'inference_input_type')\n    if flags.output_format:\n        converter.output_format = _toco_flags_pb2.FileFormat.Value(flags.output_format)\n    if flags.mean_values and flags.std_dev_values:\n        input_arrays = converter.get_input_arrays()\n        std_dev_values = _parse_array(flags.std_dev_values, type_fn=float)\n        if converter.inference_type == dtypes.float32:\n            mean_values = _parse_array(flags.mean_values, type_fn=float)\n        else:\n            mean_values = _parse_array(flags.mean_values, type_fn=int)\n        quant_stats = list(zip(mean_values, std_dev_values))\n        if not flags.input_arrays and len(input_arrays) > 1 or len(input_arrays) != len(quant_stats):\n            raise ValueError(\"Mismatching --input_arrays, --std_dev_values, and --mean_values. The flags must have the same number of items. The current input arrays are '{0}'. --input_arrays must be present when specifying --std_dev_values and --mean_values with multiple input tensors in order to map between names and values.\".format(','.join(input_arrays)))\n        converter.quantized_input_stats = dict(list(zip(input_arrays, quant_stats)))\n    if flags.default_ranges_min is not None and flags.default_ranges_max is not None:\n        converter.default_ranges_stats = (flags.default_ranges_min, flags.default_ranges_max)\n    if flags.drop_control_dependency:\n        converter.drop_control_dependency = flags.drop_control_dependency\n    if flags.reorder_across_fake_quant:\n        converter.reorder_across_fake_quant = flags.reorder_across_fake_quant\n    if flags.change_concat_input_ranges:\n        converter.change_concat_input_ranges = flags.change_concat_input_ranges == 'TRUE'\n    if flags.allow_custom_ops:\n        converter.allow_custom_ops = flags.allow_custom_ops\n    if flags.target_ops:\n        ops_set_options = lite.OpsSet.get_options()\n        converter.target_spec.supported_ops = set()\n        for option in flags.target_ops.split(','):\n            if option not in ops_set_options:\n                raise ValueError('Invalid value for --target_ops. Options: {0}'.format(','.join(ops_set_options)))\n            converter.target_spec.supported_ops.add(lite.OpsSet(option))\n    if flags.experimental_select_user_tf_ops:\n        if lite.OpsSet.SELECT_TF_OPS not in converter.target_spec.supported_ops:\n            raise ValueError('--experimental_select_user_tf_ops can only be set if --target_ops contains SELECT_TF_OPS.')\n        user_op_set = set()\n        for op_name in flags.experimental_select_user_tf_ops.split(','):\n            user_op_set.add(op_name)\n        converter.target_spec.experimental_select_user_tf_ops = list(user_op_set)\n    if flags.post_training_quantize:\n        converter.optimizations = [lite.Optimize.DEFAULT]\n        if converter.inference_type != dtypes.float32:\n            print('--post_training_quantize quantizes a graph of inference_type FLOAT. Overriding inference_type to FLOAT.')\n            converter.inference_type = dtypes.float32\n    if flags.quantize_to_float16:\n        converter.target_spec.supported_types = [dtypes.float16]\n        if not flags.post_training_quantize:\n            print('--quantize_to_float16 will only take effect with the --post_training_quantize flag enabled.')\n    if flags.dump_graphviz_dir:\n        converter.dump_graphviz_dir = flags.dump_graphviz_dir\n    if flags.dump_graphviz_video:\n        converter.dump_graphviz_vode = flags.dump_graphviz_video\n    if flags.conversion_summary_dir:\n        converter.conversion_summary_dir = flags.conversion_summary_dir\n    converter.experimental_new_converter = flags.experimental_new_converter\n    if flags.experimental_new_quantizer is not None:\n        converter.experimental_new_quantizer = flags.experimental_new_quantizer\n    output_data = converter.convert()\n    with gfile.GFile(flags.output_file, 'wb') as f:\n        f.write(output_data)", "docstring": "Calls function to convert the TensorFlow 1.X model into a TFLite model.\n\nArgs:\nflags: argparse.Namespace object.\n\nRaises:\nValueError: Invalid flags.", "source": "github-repos"}
{"code": "def _get_read_preference(read_preference):\n    read_preference = getattr(pymongo.ReadPreference, read_preference, None)\n    if (read_preference is None):\n        raise ValueError(('Invalid read preference: %s' % read_preference))\n    return read_preference", "docstring": "Converts read_preference from string to pymongo.ReadPreference value.\n\nArgs:\nread_preference: string containig the read_preference from the\nconfig file\nReturns:\nA value from the pymongo.ReadPreference enum\n\nRaises:\nException: Invalid read preference", "source": "codesearchnet"}
{"code": "def blend(self, other, percent=0.5):\n    dest = (1.0 - percent)\n    rgb = tuple((((u * percent) + (v * dest)) for (u, v) in zip(self.__rgb, other.__rgb)))\n    a = ((self.__a * percent) + (other.__a * dest))\n    return Color(rgb, 'rgb', a, self.__wref)", "docstring": "blend this color with the other one.\n\nArgs:\n:other:\nthe grapefruit.Color to blend with this one.\n\nReturns:\nA grapefruit.Color instance which is the result of blending\nthis color on the other one.\n\n>>> c1 = Color.from_rgb(1, 0.5, 0, 0.2)\n>>> c2 = Color.from_rgb(1, 1, 1, 0.6)\n>>> c3 = c1.blend(c2)\n>>> c3\nColor(1.0, 0.75, 0.5, 0.4)", "source": "codesearchnet"}
{"code": "def merge_dims(value, outer_axis, inner_axis):\n    if outer_axis == inner_axis:\n        return value\n    while outer_axis == 0 and isinstance(value, RaggedTensor):\n        value = value.values\n        inner_axis -= 1\n        if inner_axis == 0:\n            return value\n    if not isinstance(value, RaggedTensor):\n        if value.shape.is_fully_defined():\n            old_shape = value.shape.as_list()\n            new_shape = old_shape[:outer_axis] + [-1] + old_shape[inner_axis + 1:]\n        else:\n            old_shape = array_ops.shape(value)\n            new_shape = array_ops.concat([old_shape[:outer_axis], [-1], old_shape[inner_axis + 1:]], axis=0)\n        return array_ops.reshape(value, new_shape)\n    if outer_axis > 1:\n        return value.with_values(merge_dims(value.values, outer_axis - 1, inner_axis - 1))\n    new_values = value.values\n    new_splits = value.row_splits\n    for axis in range(outer_axis, inner_axis):\n        if isinstance(new_values, RaggedTensor):\n            new_splits = array_ops.gather(new_values.row_splits, new_splits)\n            new_values = new_values.values\n        else:\n            shape_split = inner_axis - axis + 1\n            if new_values.shape.is_fully_defined():\n                old_shape = new_values.shape.as_list()\n                new_shape = [-1] + old_shape[shape_split:]\n                flat_size = _prod(old_shape[1:shape_split])\n            else:\n                old_shape = array_ops.shape(new_values)\n                new_shape = array_ops.concat([[-1], old_shape[shape_split:]], axis=0)\n                flat_size = math_ops.cast(math_ops.reduce_prod(old_shape[1:shape_split]), new_splits.dtype)\n            new_values = array_ops.reshape(new_values, new_shape)\n            new_splits = new_splits * flat_size\n            break\n    return RaggedTensor.from_row_splits(new_values, new_splits)", "docstring": "Merges value[outer_axis...inner_axis] into a single dimension.\n\nSee `RaggedTensor.merge_dims()` for more details.  This helper differs from\n`RaggedTensor.merge_dims()` in that `value` may be a dense or ragged tensor.\n\nArgs:\nvalue: A `RaggedTensor` or `Tensor`\nouter_axis: `int`\ninner_axis: `int`\n\nReturns:\nA flattened `RaggedTensor` or `Tensor`.", "source": "github-repos"}
{"code": "def write(gmt, out_path):\n    with open(out_path, 'w') as f:\n        for (_, each_dict) in enumerate(gmt):\n            f.write((each_dict[SET_IDENTIFIER_FIELD] + '\\t'))\n            f.write((each_dict[SET_DESC_FIELD] + '\\t'))\n            f.write('\\t'.join([str(entry) for entry in each_dict[SET_MEMBERS_FIELD]]))\n            f.write('\\n')", "docstring": "Write a GMT to a text file.\n\nArgs:\ngmt (GMT object): list of dicts\nout_path (string): output path\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "class QuantEmbedding(nn.Module):\n\n    def __init__(self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None, weight_bit=8, momentum=0.95, quant_mode=False):\n        super().__init__()\n        self.num_ = num_embeddings\n        self.dim = embedding_dim\n        self.padding_idx = padding_idx\n        self.max_norm = max_norm\n        self.norm_type = norm_type\n        self.scale_grad_by_freq = scale_grad_by_freq\n        self.sparse = sparse\n        self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim]))\n        self.register_buffer('weight_scaling_factor', torch.zeros(1))\n        self.register_buffer('weight_integer', torch.zeros_like(self.weight))\n        self.weight_bit = weight_bit\n        self.momentum = momentum\n        self.quant_mode = quant_mode\n        self.percentile_mode = False\n        self.weight_function = SymmetricQuantFunction.apply\n\n    def forward(self, x, positions=None, incremental_state=None):\n        if not self.quant_mode:\n            return (nn.functional.embedding(x, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse), None)\n        w = self.weight\n        w_transform = w.data.detach()\n        w_min = w_transform.min().expand(1)\n        w_max = w_transform.max().expand(1)\n        self.weight_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, False)\n        self.weight_integer = self.weight_function(self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor)\n        emb_int = nn.functional.embedding(x, self.weight_integer, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse)\n        return (emb_int * self.weight_scaling_factor, self.weight_scaling_factor)", "docstring": "Quantized version of `torch.nn.Embedding`. Adds quantization-specific arguments on top of `torch.nn.Embedding`.\n\nArgs:\nweight_bit (`int`, *optional*, defaults to `8`):\nBitwidth for the quantized weight.\nmomentum (`float`, *optional*, defaults to `0.95`):\nMomentum for updating the activation quantization range.\nquant_mode (`bool`, *optional*, defaults to `False`):\nWhether or not the layer is quantized.", "source": "github-repos"}
{"code": "def validate_additional_properties(self, valid_response, response):\n    assert isinstance(valid_response, dict)\n    assert isinstance(response, dict)\n    first_value = valid_response[list(valid_response)[0]]\n    if isinstance(first_value, dict):\n        definition = None\n        definition_name = self.get_dict_definition(first_value)\n        if (definition_name is None):\n            definition = self._definition_from_example(first_value)\n            definition_name = 'self generated'\n        for item in response.values():\n            if (not self.validate_definition(definition_name, item, definition=definition)):\n                return False\n        return True\n    if isinstance(first_value, list):\n        raise Exception('Not implemented yet')\n    try:\n        assert all((isinstance(y, type(first_value)) for (_, y) in response.items()))\n        assert all((isinstance(y, type(first_value)) for (_, y) in valid_response.items()))\n        return True\n    except Exception:\n        return False", "docstring": "Validates additional properties. In additional properties, we only\nneed to compare the values of the dict, not the keys\n\nArgs:\nvalid_response: An example response (for example generated in\n_get_example_from_properties(self, spec))\nType is DICT\nresponse: The actual dict coming from the response\nType is DICT\n\nReturns:\nA boolean - whether the actual response validates against the given example", "source": "codesearchnet"}
{"code": "def remove_volume(self, name, force=False):\n        \n        params = {}\n        if force:\n            if utils.version_lt(self._version, '1.25'):\n                raise errors.InvalidVersion(\n                    'force removal was introduced in API 1.25'\n                )\n            params = {'force': force}\n\n        url = self._url('/volumes/{0}', name, params=params)\n        resp = self._delete(url)\n        self._raise_for_status(resp)", "docstring": "Remove a volume. Similar to the ``docker volume rm`` command.\n\nArgs:\nname (str): The volume's name\nforce (bool): Force removal of volumes that were already removed\nout of band by the volume driver plugin.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf volume failed to remove.", "source": "juraj-google-style"}
{"code": "def metrics_format(self, metrics: dict[str, float]) -> dict[str, float]:\n    metrics_copy = metrics.copy()\n    for k, v in metrics_copy.items():\n        if '_mem_' in k:\n            metrics_copy[k] = f'{v >> 20}MB'\n        elif '_runtime' in k:\n            metrics_copy[k] = _secs2timedelta(v)\n        elif k == 'total_flos':\n            metrics_copy[k] = f'{int(v) >> 30}GF'\n        elif isinstance(metrics_copy[k], float):\n            metrics_copy[k] = round(v, 4)\n    return metrics_copy", "docstring": "Reformat Trainer metrics values to a human-readable format.\n\nArgs:\nmetrics (`Dict[str, float]`):\nThe metrics returned from train/evaluate/predict\n\nReturns:\nmetrics (`Dict[str, float]`): The reformatted metrics", "source": "github-repos"}
{"code": "def __init__(self, funcs, trackable_obj=None):\n    super(TFLiteFrozenGraphConverterV2, self).__init__()\n    self._funcs = funcs\n    self._trackable_obj = trackable_obj\n    self.experimental_lower_to_saved_model = True", "docstring": "Constructor for TFLiteConverter.\n\nArgs:\nfuncs: List of TensorFlow ConcreteFunctions. The list should not contain\nduplicate elements.\ntrackable_obj: tf.AutoTrackable object associated with `funcs`. A\nreference to this object needs to be maintained so that Variables do not\nget garbage collected since functions have a weak reference to\nVariables. This is only required when the tf.AutoTrackable object is not\nmaintained by the user (e.g. `from_saved_model`).", "source": "github-repos"}
{"code": "async def get(self, key):\n    log.info('Looking up key %s', key)\n    dkey = digest(key)\n    if (self.storage.get(dkey) is not None):\n        return self.storage.get(dkey)\n    node = Node(dkey)\n    nearest = self.protocol.router.find_neighbors(node)\n    if (not nearest):\n        log.warning('There are no known neighbors to get key %s', key)\n        return None\n    spider = ValueSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)\n    return (await spider.find())", "docstring": "Get a key if the network has it.\n\nReturns:\n:class:`None` if not found, the value otherwise.", "source": "codesearchnet"}
{"code": "def SetExtractionConfiguration(self, configuration):\n    self._hasher_file_size_limit = configuration.hasher_file_size_limit\n    self._SetHashers(configuration.hasher_names_string)\n    self._process_archives = configuration.process_archives\n    self._process_compressed_streams = configuration.process_compressed_streams\n    self._SetYaraRules(configuration.yara_rules_string)", "docstring": "Sets the extraction configuration settings.\n\nArgs:\nconfiguration (ExtractionConfiguration): extraction configuration.", "source": "codesearchnet"}
{"code": "def update(self, task_name, result):\n        \n        with open(self.filepath, 'rb') as f:\n            existing_results = pickle.load(f)\n        if task_name not in self.tasks:\n            self._add_task(task_name)\n            existing_results['tasks'].append(task_name)\n            existing_results['results'].append([])\n        task_name_idx = existing_results['tasks'].index(task_name)\n        results = existing_results['results'][task_name_idx]\n        results.append(result)\n        with open(self.filepath, 'wb') as f:\n            pickle.dump(existing_results, f)", "docstring": "Update the results file with new information.\n\nArgs:\ntask_name (str): Name of the currently running task. A previously unseen\n``task_name`` will create a new entry in both :attr:`tasks`\nand :attr:`results`.\nresult: This will be appended to the list in :attr:`results` which\ncorresponds to the ``task_name`` in ``task_name``:attr:`tasks`.", "source": "juraj-google-style"}
{"code": "def load_resource(resource_url: str, forceupdate: bool = False):\n    \n\n    log.info(f\"Loading resource {resource_url}\")\n\n    try:\n        \n        fo = bel.utils.download_file(resource_url)\n\n        if not fo:\n            log.error(f\"Could not download and open file {resource_url}\")\n            return \"Failed to download resource_url\"\n\n        \n        fo.seek(0)\n        with gzip.open(fo, \"rt\") as f:\n            metadata = json.loads(f.__next__())\n\n        if \"metadata\" not in metadata:\n            log.error(f\"Missing metadata entry for {resource_url}\")\n            return \"Cannot load resource file - missing metadata object in first line of file\"\n\n        \n        if metadata[\"metadata\"][\"type\"] == \"namespace\":\n            bel.resources.namespace.load_terms(fo, metadata, forceupdate)\n\n        elif metadata[\"metadata\"][\"type\"] == \"ortholog\":\n            bel.resources.ortholog.load_orthologs(fo, metadata)\n\n    finally:\n        fo.close()", "docstring": "Load BEL Resource file\n\nForceupdate will create a new index in Elasticsearch regardless of whether\nan index with the resource version already exists.\n\nArgs:\nresource_url: URL from which to download the resource to load into the BEL API\nforceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches", "source": "juraj-google-style"}
{"code": "def load(self, path):\n        \n        path = os.path.expandvars(os.path.expanduser(path))\n\n        gdg = cgaddag.gdg_load(path.encode(\"ascii\"))\n        if not gdg:\n            errno = ctypes.c_int.in_dll(ctypes.pythonapi, \"errno\").value\n            raise OSError(errno, os.strerror(errno), path)\n\n        self.__del__()\n        self.gdg = gdg.contents", "docstring": "Load a GADDAG from file, replacing the words currently in this GADDAG.\n\nArgs:\npath: path to saved GADDAG to be loaded.", "source": "juraj-google-style"}
{"code": "def _wrap_el(self, value):\n        \n        if isinstance(value, dict):\n            return {k: self._wrap_el(v) for k, v in value.items()}\n        elif isinstance(value, WebElement):\n            return {'ELEMENT': value.element_id}\n        elif isinstance(value, list) and not isinstance(value, str):\n            return [self._wrap_el(item) for item in value]\n        else:\n            return value", "docstring": "Convert WebElement Object to {'Element': 1234}\n\nArgs:\nvalue(str|list|dict): The local value.\n\nReturns:\nThe wrapped value.", "source": "juraj-google-style"}
{"code": "def recent_all_projects(self, limit=30, offset=0):\n        \n        method = 'GET'\n        url = ('/recent-builds?circle-token={token}&limit={limit}&'\n               'offset={offset}'.format(token=self.client.api_token,\n                                        limit=limit,\n                                        offset=offset))\n        json_data = self.client.request(method, url)\n        return json_data", "docstring": "Return information about recent builds across all projects.\n\nArgs:\nlimit (int), Number of builds to return, max=100, defaults=30.\noffset (int): Builds returned from this point, default=0.\n\nReturns:\nA list of dictionaries.", "source": "juraj-google-style"}
{"code": "def _process_dataset(name, directory, num_shards, labels_file):\n  \n  filenames, texts, labels = _find_image_files(directory, labels_file)\n  _process_image_files(name, filenames, texts, labels, num_shards)", "docstring": "Process a complete data set and save it as a TFRecord.\n\nArgs:\nname: string, unique identifier specifying the data set.\ndirectory: string, root path to the data set.\nnum_shards: integer number of shards for this data set.\nlabels_file: string, path to the labels file.", "source": "juraj-google-style"}
{"code": "def sort_recursive(data):\n    newdict = {}\n    for i in data.items():\n        if (type(i[1]) is dict):\n            newdict[i[0]] = sort_recursive(i[1])\n        else:\n            newdict[i[0]] = i[1]\n    return OrderedDict(sorted(newdict.items(), key=(lambda item: (compare_type(type(item[1])), item[0]))))", "docstring": "Recursively sorts all elements in a dictionary\n\nArgs:\ndata (dict): The dictionary to sort\n\nReturns:\nsorted_dict (OrderedDict): The sorted data dict", "source": "codesearchnet"}
{"code": "def remove_triple(self, subj: URIRef, pred: URIRef, obj: Union[(URIRef, Literal)]) -> None:\n    self.g.remove((subj, pred, obj))", "docstring": "Removes triple from rdflib Graph\n\nYou must input the triple in its URIRef or Literal form for each node exactly the way it\nwas inputed or it will not delete the triple.\n\nArgs:\nsubj: Entity subject to be removed it its the only node with this subject; else this is\njust going to delete a desciption I.E. predicate_object of this entity.\npred: Entity predicate to be removed\nobj: Entity object to be removed", "source": "codesearchnet"}
{"code": "def remove(self, processor_identity):\n        \n        with self._condition:\n            processor_types = self._identities.get(processor_identity)\n            if processor_types is None:\n                LOGGER.warning(\"transaction processor with identity %s tried \"\n                               \"to unregister but was not registered\",\n                               processor_identity)\n                return\n            for processor_type in processor_types:\n                if processor_type not in self._processors:\n                    LOGGER.warning(\"processor type %s not a known processor \"\n                                   \"type but is associated with identity %s\",\n                                   processor_type,\n                                   processor_identity)\n                    continue\n                self._processors[processor_type].remove_processor(\n                    processor_identity=processor_identity)\n                if not self._processors[processor_type]:\n                    del self._processors[processor_type]", "docstring": "Removes all of the Processors for\na particular transaction processor zeromq identity.\n\nArgs:\nprocessor_identity (str): The zeromq identity of the transaction\nprocessor.", "source": "juraj-google-style"}
{"code": "def postprocess(x, n_bits_x=8):\n  \n  x = tf.where(tf.is_finite(x), x, tf.ones_like(x))\n  x = tf.clip_by_value(x, -0.5, 0.5)\n  x += 0.5\n  x = x * 2**n_bits_x\n  return tf.cast(tf.clip_by_value(x, 0, 255), dtype=tf.uint8)", "docstring": "Converts x from [-0.5, 0.5], to [0, 255].\n\nArgs:\nx: 3-D or 4-D Tensor normalized between [-0.5, 0.5]\nn_bits_x: Number of bits representing each pixel of the output.\nDefaults to 8, to default to 256 possible values.\nReturns:\nx: 3-D or 4-D Tensor representing images or videos.", "source": "juraj-google-style"}
{"code": "def uses_star_kwargs_in_call(node):\n    if sys.version_info[:2] >= (3, 5):\n        for keyword in node.keywords:\n            if keyword.arg is None:\n                return True\n    elif node.kwargs:\n        return True\n    return False", "docstring": "Check if an ast.Call node uses arbitrary-length **kwargs.\n\nThis function works with the AST call node format of Python3.5+\nas well as the different AST format of earlier versions of Python.\n\nArgs:\nnode: The ast.Call node to check arg values for.\n\nReturns:\nTrue if the node uses starred variadic positional args or keyword args.\nFalse if it does not.", "source": "github-repos"}
{"code": "def findContours(*args, **kwargs):\n    if cv2.__version__.startswith('4'):\n        (contours, hierarchy) = cv2.findContours(*args, **kwargs)\n    elif cv2.__version__.startswith('3'):\n        (_, contours, hierarchy) = cv2.findContours(*args, **kwargs)\n    else:\n        raise AssertionError('cv2 must be either version 3 or 4 to call this method')\n    return (contours, hierarchy)", "docstring": "Wraps cv2.findContours to maintain compatiblity between versions\n3 and 4\n\nReturns:\ncontours, hierarchy", "source": "codesearchnet"}
{"code": "def pprint_value(self, value):\n        \n        own_type = type(value) if self.type is None else self.type\n        formatter = (self.value_format if self.value_format\n                     else self.type_formatters.get(own_type))\n        if formatter:\n            if callable(formatter):\n                return formatter(value)\n            elif isinstance(formatter, basestring):\n                if isinstance(value, (dt.datetime, dt.date)):\n                    return value.strftime(formatter)\n                elif isinstance(value, np.datetime64):\n                    return util.dt64_to_dt(value).strftime(formatter)\n                elif re.findall(r\"\\{(\\w+)\\}\", formatter):\n                    return formatter.format(value)\n                else:\n                    return formatter % value\n        return unicode(bytes_to_unicode(value))", "docstring": "Applies the applicable formatter to the value.\n\nArgs:\nvalue: Dimension value to format\n\nReturns:\nFormatted dimension value", "source": "juraj-google-style"}
{"code": "def safe_indicator(self, indicator, errors='strict'):\n        \n        if indicator is not None:\n            try:\n                indicator = quote(self.s(str(indicator), errors=errors), safe='~')\n            except KeyError:\n                indicator = quote(bytes(indicator), safe='~')\n        return indicator", "docstring": "Indicator encode value for safe HTTP request.\n\nArgs:\nindicator (string): Indicator to URL Encode\nerrors (string): The error handler type.\n\nReturns:\n(string): The urlencoded string", "source": "juraj-google-style"}
{"code": "def get_cuda_compute_capability(source_from_url=False):\n    if not GPU_TYPE:\n        if FLAGS.debug:\n            print('Warning: GPU_TYPE is empty. Make sure to call `get_gpu_type()` first.')\n    elif GPU_TYPE == 'unknown':\n        if FLAGS.debug:\n            print('Warning: Unknown GPU is detected. Skipping CUDA compute capability retrieval.')\n    else:\n        if source_from_url:\n            cuda_compute_capa = cuda_compute_capability.retrieve_from_web()\n        else:\n            cuda_compute_capa = cuda_compute_capability.retrieve_from_golden()\n        return cuda_compute_capa[GPU_TYPE]\n    return", "docstring": "Retrieves CUDA compute capability based on the detected GPU type.\n\nThis function uses the `cuda_compute_capability` module to retrieve the\ncorresponding CUDA compute capability for the given GPU type.\n\nArgs:\nsource_from_url: Boolean deciding whether to source compute capability\nfrom NVIDIA website or from a local golden file.\n\nReturns:\nList of all supported CUDA compute capabilities for the given GPU type.\ne.g. ['3.5', '3.7']", "source": "github-repos"}
{"code": "def GetPathSegmentAndSuffix(self, base_path, path):\n    \n    if path is None or base_path is None or not path.startswith(base_path):\n      return None, None\n\n    path_index = len(base_path)\n    if base_path and not base_path.endswith(self.PATH_SEPARATOR):\n      path_index += 1\n\n    if path_index == len(path):\n      return '', ''\n\n    path_segment, _, suffix = path[path_index:].partition(self.PATH_SEPARATOR)\n    return path_segment, suffix", "docstring": "Determines the path segment and suffix of the path.\n\nNone is returned if the path does not start with the base path and\nan empty string if the path exactly matches the base path.\n\nArgs:\nbase_path (str): base path.\npath (str): path.\n\nReturns:\ntuple[str, str]: path segment and suffix string.", "source": "juraj-google-style"}
{"code": "def post_comment(self, sharekey=None, comment=None):\n    endpoint = '/api/sharedfile/{0}/comments'.format(sharekey)\n    post_data = {'body': comment}\n    data = self._make_request('POST', endpoint=endpoint, data=post_data)\n    return Comment.NewFromJSON(data)", "docstring": "Post a comment on behalf of the current user to the\nSharedFile with the given sharekey.\n\nArgs:\nsharekey (str): Sharekey of the SharedFile to which you'd like\nto post a comment.\ncomment (str): Text of the comment to post.\n\nReturns:\nComment object.", "source": "codesearchnet"}
{"code": "def checkout_commit(repo: Repo, commit_id: str):\n    current_head = repo.head.commit if repo.head.is_detached else repo.head.ref\n    try:\n        repo.git.checkout(commit_id)\n        yield\n    finally:\n        repo.git.checkout(current_head)", "docstring": "Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit.\n\nArgs:\nrepo (`git.Repo`): A git repository (for instance the Transformers repo).\ncommit_id (`str`): The commit reference to checkout inside the context manager.", "source": "github-repos"}
{"code": "def migrate(connection, dsn):\n    \n    all_migrations = _get_all_migrations()\n    logger.debug('Collected migrations: {}'.format(all_migrations))\n\n    for version, modname in all_migrations:\n        if _is_missed(connection, version) and version <= SCHEMA_VERSION:\n            logger.info('Missed migration: {} migration is missed. Migrating...'.format(version))\n            module = __import__(modname, fromlist='dummy')\n\n            \n            \n            trans = connection.begin()\n            try:\n                module.Migration().migrate(connection)\n                _update_version(connection, version)\n                trans.commit()\n            except:\n                trans.rollback()\n                logger.error(\"Failed to migrate '{}'  on {} \".format(version, dsn))\n                raise", "docstring": "Collects all migrations and applies missed.\n\nArgs:\nconnection (sqlalchemy connection):", "source": "juraj-google-style"}
{"code": "def host_impl(self, run, tool):\n    hosts = {}\n    run_dir = self._run_dir(run)\n    if (not run_dir):\n        logger.warn('Cannot find asset directory for: %s', run)\n        return hosts\n    tool_pattern = ('*' + TOOLS[tool])\n    try:\n        files = tf.io.gfile.glob(os.path.join(run_dir, tool_pattern))\n        hosts = [os.path.basename(f).replace(TOOLS[tool], '') for f in files]\n    except tf.errors.OpError as e:\n        logger.warn('Cannot read asset directory: %s, OpError %s', run_dir, e)\n    return hosts", "docstring": "Returns available hosts for the run and tool in the log directory.\n\nIn the plugin log directory, each directory contains profile data for a\nsingle run (identified by the directory name), and files in the run\ndirectory contains data for different tools and hosts. The file that\ncontains profile for a specific tool \"x\" will have a prefix name TOOLS[\"x\"].\n\nExample:\nlog/\nrun1/\nplugins/\nprofile/\nhost1.trace\nhost2.trace\nrun2/\nplugins/\nprofile/\nhost1.trace\nhost2.trace\n\nReturns:\nA list of host names e.g.\n{\"host1\", \"host2\", \"host3\"} for the example.", "source": "codesearchnet"}
{"code": "def add_streamer(self, binary_descriptor):\n        \n\n        streamer = streamer_descriptor.parse_binary_descriptor(binary_descriptor)\n\n        try:\n            self.graph.add_streamer(streamer)\n            self.streamer_status[len(self.graph.streamers) - 1] = StreamerStatus()\n\n            return Error.NO_ERROR\n        except ResourceUsageError:\n            return _pack_sgerror(SensorGraphError.NO_MORE_STREAMER_RESOURCES)", "docstring": "Add a streamer to the sensor_graph using a binary streamer descriptor.\n\nArgs:\nbinary_descriptor (bytes): An encoded binary streamer descriptor.\n\nReturns:\nint: A packed error code", "source": "juraj-google-style"}
{"code": "def get_config(self, key, default=MISSING):\n        \n\n        keyname = \"config:\" + key\n\n        try:\n            return self.kvstore.get(keyname)\n        except KeyError:\n            if default is MISSING:\n                raise ArgumentError(\"No config value found for key\", key=key)\n\n            return default", "docstring": "Get the value of a persistent config key from the registry\n\nIf no default is specified and the key is not found ArgumentError is raised.\n\nArgs:\nkey (string): The key name to fetch\ndefault (string): an optional value to be returned if key cannot be found\n\nReturns:\nstring: the key's value", "source": "juraj-google-style"}
{"code": "def logical_enclosures(self):\n    if (not self.__logical_enclosures):\n        self.__logical_enclosures = LogicalEnclosures(self.__connection)\n    return self.__logical_enclosures", "docstring": "Gets the LogicalEnclosures API client.\n\nReturns:\nLogicalEnclosures:", "source": "codesearchnet"}
{"code": "def pnum_to_processor_coordinates(mesh_shape, pnum):\n    ret = []\n    for dimsize in mesh_shape.to_integer_list[::(- 1)]:\n        ret.append((pnum % dimsize))\n        pnum \n    return ret[::(- 1)]", "docstring": "Coordinates of a processor in the mesh.\n\nArgs:\nmesh_shape: a Shape\npnum: an integer less than len(mesh_shape)\n\nReturns:\na list of integers with length len(mesh_shape)", "source": "codesearchnet"}
{"code": "def diff_toDelta(self, diffs):\n    \n    text = []\n    for (op, data) in diffs:\n      if op == self.DIFF_INSERT:\n        \n        data = data.encode(\"utf-8\")\n        text.append(\"+\" + urllib.quote(data, \"!~*'();/?:@&=+$,\n      elif op == self.DIFF_DELETE:\n        text.append(\"-%d\" % len(data))\n      elif op == self.DIFF_EQUAL:\n        text.append(\"=%d\" % len(data))\n    return \"\\t\".join(text)", "docstring": "Crush the diff into an encoded string which describes the operations\nrequired to transform text1 into text2.\nE.g. =3\\t-2\\t+ing  -> Keep 3 chars, delete 2 chars, insert 'ing'.\nOperations are tab-separated.  Inserted text is escaped using %xx notation.\n\nArgs:\ndiffs: Array of diff tuples.\n\nReturns:\nDelta text.", "source": "juraj-google-style"}
{"code": "def _get_sqlite_columns(connection, table):\n    \n    \n    \n    SQL_TO_PYTHON_TYPES = {\n        'INT': int,\n        'INTEGER': int,\n        'TINYINT': int,\n        'SMALLINT': int,\n        'MEDIUMINT': int,\n        'BIGINT': int,\n        'UNSIGNED BIG INT': int,\n        'INT': int,\n        'INT8': int,\n        'NUMERIC': float,\n        'REAL': float,\n        'FLOAT': float,\n        'DOUBLE': float,\n        'BOOLEAN': bool,\n        'CHARACTER': str,\n        'VARCHAR': str,\n        'TEXT': str\n    }\n    query = 'PRAGMA table_info(\\'{}\\');'\n    result = connection.execute(query.format(table))\n    ret = []\n\n    for row in result:\n        position = row[0] + 1\n        name = row[1]\n        datatype = row[2]\n        try:\n            datatype = SQL_TO_PYTHON_TYPES[datatype]\n        except KeyError:\n            raise Exception(\n                'Do not know how to convert {} sql datatype to python data type.'\n                .format(datatype))\n        ret.append((name, datatype, position))\n    return ret", "docstring": "Returns list of tuple containg columns of the table.\n\nArgs:\nconnection: sqlalchemy connection to sqlite database.\ntable (str): name of the table\n\nReturns:\nlist of (name, datatype, position): where name is column name, datatype is\npython type of the column, position is ordinal position of the column.", "source": "juraj-google-style"}
{"code": "def _create_deployment_object(self, job_name, job_image,\n                                  deployment_name, port=80,\n                                  replicas=1,\n                                  cmd_string=None,\n                                  engine_json_file='~/.ipython/profile_default/security/ipcontroller-engine.json',\n                                  engine_dir='.',\n                                  volumes=[]):\n        \n\n        \n        \n        security_context = None\n        if self.user_id and self.group_id:\n            security_context = client.V1SecurityContext(run_as_group=self.group_id,\n                                                        run_as_user=self.user_id,\n                                                        run_as_non_root=self.run_as_non_root)\n\n        \n        environment_vars = client.V1EnvVar(name=\"TEST\", value=\"SOME DATA\")\n\n        launch_args = [\"-c\", \"{0}; /app/deploy.sh;\".format(cmd_string)]\n\n        volume_mounts = []\n        \n        for volume in volumes:\n            volume_mounts.append(client.V1VolumeMount(mount_path=volume[1],\n                                                      name=volume[0]))\n        \n        container = None\n        if security_context:\n            container = client.V1Container(\n                name=job_name,\n                image=job_image,\n                ports=[client.V1ContainerPort(container_port=port)],\n                volume_mounts=volume_mounts,\n                command=['/bin/bash'],\n                args=launch_args,\n                env=[environment_vars],\n                security_context=security_context)\n        else:\n            container = client.V1Container(\n                name=job_name,\n                image=job_image,\n                ports=[client.V1ContainerPort(container_port=port)],\n                volume_mounts=volume_mounts,\n                command=['/bin/bash'],\n                args=launch_args,\n                env=[environment_vars])\n        \n        secret = None\n        if self.secret:\n            secret = client.V1LocalObjectReference(name=self.secret)\n\n        \n        volume_defs = []\n        for volume in volumes:\n            volume_defs.append(client.V1Volume(name=volume[0],\n                                               persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(\n                                                   claim_name=volume[0])))\n\n        \n        template = client.V1PodTemplateSpec(\n            metadata=client.V1ObjectMeta(labels={\"app\": job_name}),\n            spec=client.V1PodSpec(containers=[container],\n                                  image_pull_secrets=[secret],\n                                  volumes=volume_defs\n                                  ))\n\n        \n        spec = client.ExtensionsV1beta1DeploymentSpec(replicas=replicas,\n                                                      template=template)\n\n        \n        deployment = client.ExtensionsV1beta1Deployment(\n            api_version=\"extensions/v1beta1\",\n            kind=\"Deployment\",\n            metadata=client.V1ObjectMeta(name=deployment_name),\n            spec=spec)\n\n        return deployment", "docstring": "Create a kubernetes deployment for the job.\nArgs:\n- job_name (string) : Name of the job and deployment\n- job_image (string) : Docker image to launch\nKWargs:\n- port (integer) : Container port\n- replicas : Number of replica containers to maintain\nReturns:\n- True: The deployment object to launch", "source": "juraj-google-style"}
{"code": "def resolve_import(self, item):\n    name = item.name\n    short_name = None\n    if (item.is_from and (not item.is_star)):\n        if ('.' in name.lstrip('.')):\n            rindex = name.rfind('.')\n        else:\n            rindex = (name.rfind('.') + 1)\n        short_name = name[:rindex]\n    if import_finder.is_builtin(name):\n        filename = (name + '.so')\n        return Builtin(filename, name)\n    (filename, level) = convert_to_path(name)\n    if level:\n        filename = os.path.normpath(os.path.join(self.current_directory, filename))\n    files = [(name, filename)]\n    if short_name:\n        short_filename = os.path.dirname(filename)\n        files.append((short_name, short_filename))\n    for (module_name, path) in files:\n        for fs in self.fs_path:\n            f = self._find_file(fs, path)\n            if ((not f) or (f == self.current_module.path)):\n                continue\n            if item.is_relative():\n                package_name = self.current_module.package_name\n                if (package_name is None):\n                    raise ImportException(name)\n                module_name = get_absolute_name(package_name, module_name)\n                if isinstance(self.current_module, System):\n                    return System(f, module_name)\n            return Local(f, module_name, fs)\n    if item.source:\n        (prefix, ext) = os.path.splitext(item.source)\n        mod_name = name\n        if short_name:\n            mod = prefix.replace(os.path.sep, '.')\n            mod = utils.strip_suffix(mod, '.__init__')\n            if ((not mod.endswith(name)) and mod.endswith(short_name)):\n                mod_name = short_name\n        if (ext == '.pyc'):\n            pyfile = (prefix + '.py')\n            if os.path.exists(pyfile):\n                return System(pyfile, mod_name)\n        elif (not ext):\n            pyfile = os.path.join(prefix, '__init__.py')\n            if os.path.exists(pyfile):\n                return System(pyfile, mod_name)\n        return System(item.source, mod_name)\n    raise ImportException(name)", "docstring": "Simulate how Python resolves imports.\n\nReturns the filename of the source file Python would load\nwhen processing a statement like 'import name' in the module\nwe're currently under.\n\nArgs:\nitem: An instance of ImportItem\n\nReturns:\nA filename\n\nRaises:\nImportException: If the module doesn't exist.", "source": "codesearchnet"}
{"code": "def is_valid_geometry(geometry):\n    if (isinstance(geometry, Polygon) or isinstance(geometry, MultiPolygon)):\n        return True\n    else:\n        return False", "docstring": "Confirm that the geometry type is of type Polygon or MultiPolygon.\n\nArgs:\ngeometry (BaseGeometry): BaseGeometry instance (e.g. Polygon)\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def test_sample_sabr(self, supply_grad_vol_fn):\n    dtype = np.float64\n    drift_fn = lambda _, x: tf.zeros_like(x)\n    beta = tf.constant(0.5, dtype=dtype)\n    volvol = tf.constant(1.0, dtype=dtype)\n    rho = tf.constant(0.2, dtype=dtype)\n\n    def vol_fn(t, x):\n        \n        del t\n        f = x[..., 0]\n        v = x[..., 1]\n        fb = f ** beta\n        m11 = v * fb * tf.math.sqrt(1 - tf.square(rho))\n        m12 = v * fb * rho\n        m21 = tf.zeros_like(m11)\n        m22 = volvol * v\n        mc1 = tf.concat([tf.expand_dims(m11, -1), tf.expand_dims(m21, -1)], -1)\n        mc2 = tf.concat([tf.expand_dims(m12, -1), tf.expand_dims(m22, -1)], -1)\n        should_be_zero = tf.expand_dims(tf.expand_dims((beta != 0) & (f <= 0.0), -1), -1)\n        vol_matrix = tf.concat([tf.expand_dims(mc1, -1), tf.expand_dims(mc2, -1)], -1)\n        return tf.where(should_be_zero, tf.zeros_like(vol_matrix), vol_matrix)\n    if supply_grad_vol_fn:\n\n        def _grad_volatility_fn(current_time, current_state, input_gradients):\n            return gradient.fwd_gradient(functools.partial(vol_fn, current_time), current_state, input_gradients=input_gradients, unconnected_gradients=tf.UnconnectedGradients.ZERO)\n        grad_volatility_fn = _grad_volatility_fn\n    else:\n        grad_volatility_fn = None\n    times = np.array([0.0, 0.1, 0.21, 0.32, 0.43, 0.55])\n    x0 = np.array([0.1, 0.2])\n    paths = self.evaluate(milstein_sampling.sample(dim=2, drift_fn=drift_fn, volatility_fn=vol_fn, times=times, num_samples=1000, initial_state=x0, grad_volatility_fn=grad_volatility_fn, random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC, time_step=0.01, seed=[1, 42]))\n    mean = np.average(paths)\n    stddev = np.std(paths)\n    euler_paths = self.evaluate(euler_sampling.sample(dim=2, drift_fn=drift_fn, volatility_fn=vol_fn, times=times, time_step=0.01, num_samples=10000, initial_state=x0, random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC, seed=[1, 42]))\n    euler_mean = np.average(euler_paths)\n    euler_stddev = np.std(euler_paths)\n    self.assertAllClose((mean, stddev), (euler_mean, euler_stddev), rtol=0.05, atol=0.05)", "docstring": "Tests path properties for SABR.\n\nWe construct the following Ito process.\n\n```\ndF_t = v_t * F_t ^ beta * dW_{F,t}\ndv_t = volvol * v_t * dW_{v,t}\ndW_{F,t} * dW_{v,t} = rho * dt\n```\n\n`F_t` is the forward. `v_t` is volatility. `beta` is the CEV parameter.\n`volvol` is volatility of volatility. `W_{F,t}` and `W_{v,t}` are two\ncorrelated Wiener processes with instantaneous correlation `rho`.\n\nArgs:\nsupply_grad_vol_fn: A bool. Whether or not to supply a grad_volatility_fn.", "source": "github-repos"}
{"code": "def assert_same_rank(self, other):\n        \n        other = as_shape(other)\n        if self.ndims is not None and other.ndims is not None:\n            if self.ndims != other.ndims:\n                raise ValueError(\n                    \"Shapes %s and %s must have the same rank\" % (self, other)\n                )", "docstring": "Raises an exception if `self` and `other` do not have convertible ranks.\n\nArgs:\nother: Another `TensorShape`.\n\nRaises:\nValueError: If `self` and `other` do not represent shapes with the\nsame rank.", "source": "juraj-google-style"}
{"code": "def is_kdump_iommu_enabled(self):\n    for line in self._boot_entries:\n        if (line.cmdline and (IOMMU in line.cmdline)):\n            return True\n    return False", "docstring": "Does any kernel have 'intel_iommu=on' set?\n\nReturns:\n(bool): ``True`` when 'intel_iommu=on' is set, otherwise returns ``False``", "source": "codesearchnet"}
{"code": "def add_data(self, data):\n        \n\n        if self.data_size - self.data_index < len(data):\n            return Error.DESTINATION_BUFFER_TOO_SMALL\n\n        if self.in_progress is not None:\n            self.in_progress.data += data\n\n        return Error.NO_ERROR", "docstring": "Add data to the currently in progress entry.\n\nArgs:\ndata (bytes): The data that we want to add.\n\nReturns:\nint: An error code", "source": "juraj-google-style"}
{"code": "def _apply_discount(values, discount_factors, exercise_index):\n    return discount_factors[exercise_index + 1] / discount_factors[exercise_index] * values", "docstring": "Returns discounted values at the exercise time.\n\nArgs:\nvalues: A real `Tensor` of shape `[num_samples, batch_size]`. Tracks the\noptimal cashflow of each sample path for each payoff dimension at\n`exercise_index`.\ndiscount_factors: A `Tensor` of shape\n`[num_exercise_times + 1, num_samples, batch_size]`. The `dtype` should be\nthe same as of `samples`.\nexercise_index: An integer scalar `Tensor` representing the index of the\nexercise time of interest. Should be less than `num_exercise_times`.\n\nReturns:\nA `[num_samples, batch_size]` `Tensor` whose entries represent the sum of\nthose elements to the right of `exercise_index` in `cashflow`, discounted to\nthe time indexed by `exercise_index`. When `exercise_index` is zero, the\nreturn represents the sum of the cashflow discounted to present value for\neach sample path.", "source": "github-repos"}
{"code": "def Resolve(self, env, resolved_params):\n    raise NotImplementedError('Resolve() is not implemented: ' + self.name)", "docstring": "Resolve object.\n\nIt resolves any internal attributes with unresolved values, then returns\nthe resolved values which can be used for graph.\n\nArgs:\nenv: Environment with all information necessary to resolve internal\nattributes.\nresolved_params: Resolved values which will possibly be referenced by\ninternal attributes.\n\nRaises:\nNotImplementedError", "source": "github-repos"}
{"code": "def pack(self, value=None):\n    if (value is None):\n        output = self.header.pack()\n        output += self.value.pack()\n        return output\n    elif isinstance(value, type(self)):\n        return value.pack()\n    else:\n        msg = '{} is not an instance of {}'.format(value, type(self).__name__)\n        raise PackException(msg)", "docstring": "Pack the TLV in a binary representation.\n\nReturns:\nbytes: Binary representation of the struct object.\n\nRaises:\n:exc:`~.exceptions.ValidationError`: If validation fails.", "source": "codesearchnet"}
{"code": "def experimental_make_numpy_dataset(self, numpy_input, session=None):\n    return self.extended.experimental_make_numpy_dataset(numpy_input, session=session)", "docstring": "Makes a tf.data.Dataset for input provided via a numpy array.\n\nThis avoids adding `numpy_input` as a large constant in the graph,\nand copies the data to the machine or machines that will be processing\nthe input.\n\nNote that you will likely need to use\ntf.distribute.Strategy.experimental_distribute_dataset\nwith the returned dataset to further distribute it with the strategy.\n\nExample:\n```\nnumpy_input = np.ones([10], dtype=np.float32)\ndataset = strategy.experimental_make_numpy_dataset(numpy_input)\ndist_dataset = strategy.experimental_distribute_dataset(dataset)\n```\n\nArgs:\nnumpy_input: A nest of NumPy input arrays that will be converted into a\ndataset. Note that lists of Numpy arrays are stacked, as that is normal\n`tf.data.Dataset` behavior.\nsession: (TensorFlow v1.x graph execution only) A session used for\ninitialization.\n\nReturns:\nA `tf.data.Dataset` representing `numpy_input`.", "source": "github-repos"}
{"code": "def __init__(self, initial_learning_rate, decay_steps, alpha=0.0, name=None):\n    super(CosineDecay, self).__init__()\n    self.initial_learning_rate = initial_learning_rate\n    self.decay_steps = decay_steps\n    self.alpha = alpha\n    self.name = name", "docstring": "Applies cosine decay to the learning rate.\n\nArgs:\ninitial_learning_rate: A scalar `float32` or `float64` Tensor or a\nPython number. The initial learning rate.\ndecay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.\nNumber of steps to decay over.\nalpha: A scalar `float32` or `float64` Tensor or a Python number.\nMinimum learning rate value as a fraction of initial_learning_rate.\nname: String. Optional name of the operation.  Defaults to 'CosineDecay'.", "source": "github-repos"}
{"code": "def _ParseInformationalOptions(self, options):\n    \n    self._debug_mode = getattr(options, 'debug', False)\n    self._quiet_mode = getattr(options, 'quiet', False)\n\n    if self._debug_mode and self._quiet_mode:\n      logger.warning(\n          'Cannot use debug and quiet mode at the same time, defaulting to '\n          'debug output.')", "docstring": "Parses the informational options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.", "source": "juraj-google-style"}
{"code": "def add_section(self, section):\n        \n        if section in self.sections():\n            raise DuplicateSectionError(section)\n        if isinstance(section, str):\n            \n            section = Section(section, container=self)\n        elif not isinstance(section, Section):\n            raise ValueError(\"Parameter must be a string or Section type!\")\n        self._structure.append(section)", "docstring": "Create a new section in the configuration.\n\nRaise DuplicateSectionError if a section by the specified name\nalready exists. Raise ValueError if name is DEFAULT.\n\nArgs:\nsection (str or :class:`Section`): name or Section type", "source": "juraj-google-style"}
{"code": "def get_flat_neurites(neuron, tol=0.1, method='ratio'):\n    return [n for n in neuron.neurites if is_flat(n, tol, method)]", "docstring": "Check if a neuron has neurites that are flat within a tolerance\n\nArgs:\nneurite(Neurite): neurite to operate on\ntol(float): the tolerance or the ratio\nmethod(string): 'tolerance' or 'ratio' described in :meth:`is_flat`\n\nReturns:\nBool list corresponding to the flatness check for each neurite\nin neuron neurites with respect to the given criteria", "source": "codesearchnet"}
{"code": "def sample(reader, writer, n, start=None, stop=None, tsCol=None, writeSampleOnly=True):\n    rows = list(reader)\n    if (tsCol is not None):\n        ts = rows[0][tsCol]\n        inc = (rows[1][tsCol] - ts)\n    if (start is None):\n        start = 0\n    if (stop is None):\n        stop = (len(rows) - 1)\n    initialN = ((stop - start) + 1)\n    numDeletes = (initialN - n)\n    for i in xrange(numDeletes):\n        delIndex = random.randint(start, (stop - i))\n        del rows[delIndex]\n    if writeSampleOnly:\n        rows = rows[start:(start + n)]\n    if (tsCol is not None):\n        ts = rows[0][tsCol]\n    for row in rows:\n        if (tsCol is not None):\n            row[tsCol] = ts\n            ts += inc\n        writer.appendRecord(row)", "docstring": "Samples n rows.\n\nArgs:\nreader: A FileRecordStream object with input data.\nwriter: A FileRecordStream object to write output data to.\nn: The number of elements to sample.\nstart: The first row in the range to sample from.\nstop: The last row in the range to sample from.\ntsCol: If specified, the timestamp column to update.\nwriteSampleOnly: If False, the rows before start are written before the\nsample and the rows after stop are written after the sample.", "source": "codesearchnet"}
{"code": "def clipped_zoom(img, zoom_factor):\n    h = img.shape[0]\n    ch = int(np.ceil((h / float(zoom_factor))))\n    top_h = ((h - ch) \n    w = img.shape[1]\n    cw = int(np.ceil((w / float(zoom_factor))))\n    top_w = ((w - cw) \n    img = tfds.core.lazy_imports.scipy.ndimage.zoom(img[(top_h:(top_h + ch), top_w:(top_w + cw))], (zoom_factor, zoom_factor, 1), order=1)\n    trim_top_h = ((img.shape[0] - h) \n    trim_top_w = ((img.shape[1] - w) \n    return img[(trim_top_h:(trim_top_h + h), trim_top_w:(trim_top_w + w))]", "docstring": "Zoom image with clipping.\n\nZoom the central part of the image and clip extra pixels.\n\nArgs:\nimg: numpy array, uncorrupted image.\nzoom_factor: numpy array, a sequence of float numbers for zoom factor.\n\nReturns:\nnumpy array, zoomed image after clipping.", "source": "codesearchnet"}
{"code": "def recipe_dv360_data_warehouse(config, auth_bigquery, auth_dv, recipe_slug, partners):\n    dataset(config, {'description': 'Create a dataset for bigquery tables.', 'auth': auth_bigquery, 'dataset': recipe_slug})\n    google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'partners.get', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'legacy': False, 'query': 'SELECT CAST(partnerId AS STRING) partnerId FROM (SELECT DISTINCT * FROM UNNEST({partners}) AS partnerId)', 'parameters': {'partners': partners}}}, 'iterate': False, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_Partners'}}})\n    google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'advertisers.list', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'query': 'SELECT DISTINCT CAST(partnerId\\nAS STRING) partnerId FROM `DV360_Partners`', 'legacy': False}}, 'iterate': True, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_Advertisers'}}})\n    google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'advertisers.insertionOrders.list', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`', 'legacy': False}}, 'iterate': True, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_InsertionOrders'}}})\n    google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'advertisers.lineItems.list', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`', 'legacy': False}}, 'iterate': True, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_LineItems'}}})\n    google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'advertisers.campaigns.list', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`', 'legacy': False}}, 'iterate': True, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_Campaigns'}}})\n    google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'advertisers.channels.list', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`', 'legacy': False}}, 'iterate': True, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_Channels'}}})\n    google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'advertisers.creatives.list', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`', 'legacy': False}}, 'iterate': True, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_Creatives'}}})\n    google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'inventorySources.list', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`', 'legacy': False}}, 'iterate': True, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_Inventory_Sources'}}})\n    google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'googleAudiences.list', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`', 'legacy': False}}, 'iterate': True, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_Google_Audiences'}}})\n    google_api(config, {'auth': auth_dv, 'api': 'displayvideo', 'version': 'v1', 'function': 'combinedAudiences.list', 'kwargs_remote': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`', 'legacy': False}}, 'iterate': True, 'results': {'bigquery': {'auth': auth_bigquery, 'dataset': recipe_slug, 'table': 'DV360_Combined_Audiences'}}})", "docstring": "Deploy a BigQuery dataset mirroring DV360 account structure. Foundation for\nsolutions on top.\n\nArgs:\nauth_bigquery (authentication) - Credentials used for writing data.\nauth_dv (authentication) - Credentials used for reading data.\nrecipe_slug (string) - Name of Google BigQuery dataset to create.\npartners (integer_list) - List of account ids to pull.", "source": "github-repos"}
{"code": "def subscribe(self, devices_to_bind=[]):\n    if (self.entity_api_key == ''):\n        return {'status': 'failure', 'response': 'No API key found in request'}\n    self.bind(devices_to_bind)\n    loop = asyncio.new_event_loop()\n    t1 = threading.Thread(target=self.start_subscribe_worker, args=(loop,))\n    t1.daemon = True\n    t1.start()", "docstring": "This function allows an entity to subscribe for data from the devices specified in the bind operation. It\ncreates a thread with an event loop to manager the tasks created in start_subscribe_worker.\n\nArgs:\ndevices_to_bind (list): an array of devices to listen to", "source": "codesearchnet"}
{"code": "def bootstrap(score_objs, n_boot=1000):\n    \n    all_samples = np.random.choice(score_objs, size=(n_boot, len(score_objs)), replace=True)\n    return all_samples.sum(axis=1)", "docstring": "Given a set of DistributedROC or DistributedReliability objects, this function performs a\nbootstrap resampling of the objects and returns n_boot aggregations of them.\n\nArgs:\nscore_objs: A list of DistributedROC or DistributedReliability objects. Objects must have an __add__ method\nn_boot (int): Number of bootstrap samples\n\nReturns:\nAn array of DistributedROC or DistributedReliability", "source": "juraj-google-style"}
{"code": "def request_with_retry(func, *args, **kwargs):\n    max_retries = kwargs.pop('max_retries', 30)\n    sleep = 2\n    retry_count = 0\n    while True:\n        try:\n            response = func(*args, **kwargs)\n            response.raise_for_status()\n            return response\n        except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.Timeout) as e:\n            if (retry_count == max_retries):\n                return e\n            retry_count += 1\n            delay = (sleep + ((random.random() * 0.25) * sleep))\n            if (isinstance(e, requests.exceptions.HTTPError) and (e.response.status_code == 429)):\n                logger.info(('Rate limit exceeded, retrying in %s seconds' % delay))\n            else:\n                logger.warning('requests_with_retry encountered retryable exception: %s. args: %s, kwargs: %s', e, args, kwargs)\n            time.sleep(delay)\n            sleep *= 2\n            if (sleep > MAX_SLEEP_SECONDS):\n                sleep = MAX_SLEEP_SECONDS\n        except requests.exceptions.RequestException as e:\n            logger.error(response.json()['error'])\n            logger.exception('requests_with_retry encountered unretryable exception: %s', e)\n            return e", "docstring": "Perform a requests http call, retrying with exponential backoff.\n\nArgs:\nfunc: An http-requesting function to call, like requests.post\nmax_retries: Maximum retries before giving up. By default we retry 30 times in ~2 hours before dropping the chunk\n*args: passed through to func\n**kwargs: passed through to func", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context):\n    \n    super(ZipFile, self).__init__(resolver_context)\n    self._compressed_data = b''\n    self._current_offset = 0\n    self._file_system = None\n    self._realign_offset = True\n    self._uncompressed_data = b''\n    self._uncompressed_data_offset = 0\n    self._uncompressed_data_size = 0\n    self._uncompressed_stream_size = None\n    self._zip_ext_file = None\n    self._zip_file = None\n    self._zip_info = None", "docstring": "Initializes a file-like object.\n\nArgs:\nresolver_context (Context): resolver context.", "source": "juraj-google-style"}
{"code": "class Blip2Encoder(nn.Module):\n\n    def __init__(self, config: Blip2Config):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([Blip2EncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for idx, encoder_layer in enumerate(self.layers):\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`Blip2EncoderLayer`].\n\nArgs:\nconfig (`Blip2Config`):\nThe corresponding vision configuration for the `Blip2Encoder`.", "source": "github-repos"}
{"code": "def assign_add(self, delta, use_locking=None, name=None, read_value=True):\n    with _handle_graph(self.handle), self._assign_dependencies():\n        assign_add_op = gen_resource_variable_ops.assign_add_variable_op(self.handle, ops.convert_to_tensor(delta, dtype=self.dtype), name=name)\n    if read_value:\n        return self._lazy_read(assign_add_op)\n    return assign_add_op", "docstring": "Adds a value to this variable.\n\nArgs:\ndelta: A `Tensor`. The value to add to this variable.\nuse_locking: If `True`, use locking during the operation.\nname: The name to use for the operation.\nread_value: A `bool`. Whether to read and return the new value of the\nvariable or not.\n\nReturns:\nIf `read_value` is `True`, this method will return the new value of the\nvariable after the assignment has completed. Otherwise, when in graph mode\nit will return the `Operation` that does the assignment, and when in eager\nmode it will return `None`.", "source": "github-repos"}
{"code": "def execute_plan(plan):\n    results = [action() for action in plan]\n    return [result for result in results if actns.step_has_failed(result)]", "docstring": "Execute the plan.\n\nArgs:\nplan (:obj:`list` of :obj:`actions.Step`): The plan we want to execute.\n\nReturns:\n(:obj:`list` of :obj:`actions.Step`): A list of failed actions.", "source": "codesearchnet"}
{"code": "def __init__(self, functions):\n    self._functions = functions\n    self._location_key_to_location = {}", "docstring": "Constructor.\n\nArgs:\nfunctions: A `Functions` object.", "source": "github-repos"}
{"code": "def process(self, element: Entity) -> Optional[Iterable[Text]]:\n    text_line = element.properties.get('content', '')\n    if not text_line:\n        self.empty_line_counter.inc()\n        return None\n    words = re.findall(\"[A-Za-z\\\\']+\", text_line)\n    for w in words:\n        self.word_length_counter.inc(len(w))\n        self.word_lengths_dist.update(len(w))\n        self.word_counter.inc()\n    return words", "docstring": "Extract words from the 'content' property of Cloud Datastore entities.\n\nThe element is a line of text.  If the line is blank, note that, too.\nArgs:\nelement: the input entity to be processed\nReturns:\nA list of words found.", "source": "github-repos"}
{"code": "def from_python_value(cls, value: Any) -> 'DType':\n    try:\n        return PY_TYPE_TO_DTYPE[type(value)]\n    except KeyError as e:\n        raise ValueError(f\"Couldn't find a dtype to store a value of type {type(value)}. Value is: {value}\") from e", "docstring": "Returns the corresponding DType for the given python-native value.\n\nArgs:\nvalue: A python variable to infer DType from (e.g: str, float).\n\nReturns:\nThe corresponding DType.\n\nRaises:\nValueError: If there's no DType implemented for this type of value.", "source": "github-repos"}
{"code": "def transformer_image_decoder(targets, encoder_output, ed_attention_bias, hparams, name=None):\n    with tf.variable_scope(name, default_name='transformer_dec'):\n        batch_size = common_layers.shape_list(targets)[0]\n        targets = tf.reshape(targets, [batch_size, hparams.img_len, hparams.img_len, (hparams.num_channels * hparams.hidden_size)])\n        (decoder_input, _, _) = cia.prepare_decoder(targets, hparams)\n        decoder_output = cia.transformer_decoder_layers(decoder_input, encoder_output, (hparams.num_decoder_layers or hparams.num_hidden_layers), hparams, attention_type=hparams.dec_attention_type, encoder_decoder_attention_bias=ed_attention_bias, name='decoder')\n        decoder_output = tf.reshape(decoder_output, [batch_size, hparams.img_len, (hparams.img_len * hparams.num_channels), hparams.hidden_size])\n        return decoder_output", "docstring": "Transformer image decoder over targets with local attention.\n\nArgs:\ntargets: Tensor of shape [batch, ...], and whose size is batch * height *\nwidth * hparams.num_channels * hparams.hidden_size.\nencoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size].\ned_attention_bias: Tensor which broadcasts with shape [batch,\nhparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias.\nhparams: HParams.\nname: string, variable scope.\n\nReturns:\nTensor of shape [batch, height, width * hparams.num_channels,\nhparams.hidden_size].", "source": "codesearchnet"}
{"code": "def set_action_env_var(environ_cp, var_name, query_item, enabled_by_default, question=None, yes_reply=None, no_reply=None, bazel_config_name=None):\n    var = int(get_var(environ_cp, var_name, query_item, enabled_by_default, question, yes_reply, no_reply))\n    if not bazel_config_name:\n        write_action_env_to_bazelrc(var_name, var)\n    elif var:\n        write_to_bazelrc('build --config=%s' % bazel_config_name)\n    environ_cp[var_name] = str(var)", "docstring": "Set boolean action_env variable.\n\nAsk user if query_item will be enabled. Default is used if no input is given.\nSet environment variable and write to .bazelrc.\n\nArgs:\nenviron_cp: copy of the os.environ.\nvar_name: string for name of environment variable, e.g. \"TF_NEED_CUDA\".\nquery_item: string for feature related to the variable, e.g. \"CUDA for\nNvidia GPUs\".\nenabled_by_default: boolean for default behavior.\nquestion: optional string for how to ask for user input.\nyes_reply: optional string for reply when feature is enabled.\nno_reply: optional string for reply when feature is disabled.\nbazel_config_name: adding config to .bazelrc instead of action_env.", "source": "github-repos"}
{"code": "def add_key_value(self, key, value):\n        \n        key = self._metadata_map.get(key, key)\n        if key in ['dateAdded', 'eventDate', 'firstSeen', 'publishDate']:\n            self._group_data[key] = self._utils.format_datetime(\n                value, date_format='%Y-%m-%dT%H:%M:%SZ'\n            )\n        elif key == 'file_content':\n            \n            pass\n        else:\n            self._group_data[key] = value", "docstring": "Add custom field to Group object.\n\n.. note:: The key must be the exact name required by the batch schema.\n\nExample::\n\ndocument = tcex.batch.group('Document', 'My Document')\ndocument.add_key_value('fileName', 'something.pdf')\n\nArgs:\nkey (str): The field key to add to the JSON batch data.\nvalue (str): The field value to add to the JSON batch data.", "source": "juraj-google-style"}
{"code": "def __add_kickoff_task(cls, job_config, mapreduce_spec):\n    \n    params = {\"mapreduce_id\": job_config.job_id}\n    \n    kickoff_task = taskqueue.Task(\n        \n        url=job_config._base_path + \"/kickoffjob_callback/\" + job_config.job_id,\n        headers=util._get_task_headers(job_config.job_id),\n        params=params)\n    if job_config._hooks_cls:\n      hooks = job_config._hooks_cls(mapreduce_spec)\n      try:\n        hooks.enqueue_kickoff_task(kickoff_task, job_config.queue_name)\n        return\n      except NotImplementedError:\n        pass\n    kickoff_task.add(job_config.queue_name, transactional=True)", "docstring": "Add kickoff task to taskqueue.\n\nArgs:\njob_config: map_job.JobConfig.\nmapreduce_spec: model.MapreduceSpec,", "source": "juraj-google-style"}
{"code": "def update(self, data):\n        \n        for key, value in data.items():\n            setattr(self, key, value)", "docstring": "Update the current memory record with the given data dict.\n\nArgs:\ndata (dict): Data dictionary to update the record attributes with.", "source": "juraj-google-style"}
{"code": "def InternalSendApdu(self, apdu_to_send):\n    \n    response = None\n    if not self.use_legacy_format:\n      response = apdu.ResponseApdu(self.transport.SendMsgBytes(\n          apdu_to_send.ToByteArray()))\n      if response.sw1 == 0x67 and response.sw2 == 0x00:\n        \n        \n        self.use_legacy_format = True\n        return self.InternalSendApdu(apdu_to_send)\n    else:\n      response = apdu.ResponseApdu(self.transport.SendMsgBytes(\n          apdu_to_send.ToLegacyU2FByteArray()))\n    return response", "docstring": "Send an APDU to the device.\n\nSends an APDU to the device, possibly falling back to the legacy\nencoding format that is not ISO7816-4 compatible.\n\nArgs:\napdu_to_send: The CommandApdu object to send\n\nReturns:\nThe ResponseApdu object constructed out of the devices reply.", "source": "juraj-google-style"}
{"code": "def from_string(rxn_string):\n        \n        rct_str, prod_str = rxn_string.split(\"->\")\n\n        def get_comp_amt(comp_str):\n            return {Composition(m.group(2)): float(m.group(1) or 1)\n                    for m in re.finditer(r\"([\\d\\.]*(?:[eE]-?[\\d\\.]+)?)\\s*([A-Z][\\w\\.\\(\\)]*)\",\n                                         comp_str)}\n        return BalancedReaction(get_comp_amt(rct_str), get_comp_amt(prod_str))", "docstring": "Generates a balanced reaction from a string. The reaction must\nalready be balanced.\n\nArgs:\nrxn_string:\nThe reaction string. For example, \"4 Li + O2-> 2Li2O\"\n\nReturns:\nBalancedReaction", "source": "juraj-google-style"}
{"code": "def assert_not_present(self, selector, testid=None, **kwargs):\n    self.info_log(('Assert not present selector(%s) testid(%s)' % (selector, testid)))\n    wait_until_not_present = kwargs.get('wait_until_not_present', BROME_CONFIG['proxy_driver']['wait_until_not_present_before_assert_not_present'])\n    self.debug_log(('effective wait_until_not_present: %s' % wait_until_not_present))\n    if wait_until_not_present:\n        ret = self.wait_until_not_present(selector, raise_exception=False)\n    else:\n        ret = (not self.is_present(selector))\n    if ret:\n        if (testid is not None):\n            self.create_test_result(testid, True)\n        return True\n    else:\n        if (testid is not None):\n            self.create_test_result(testid, False)\n        return False", "docstring": "Assert that the element is not present in the dom\n\nArgs:\nselector (str): the selector used to find the element\ntest_id (str): the test_id or a str\n\nKwargs:\nwait_until_not_present (bool)\n\nReturns:\nbool: True is the assertion succeed; False otherwise.", "source": "codesearchnet"}
{"code": "def get_maybe_abstract_instance(self, data):\n    if data.is_concrete:\n        data_type = type(data.pyval)\n        if data_type in self.primitive_instances:\n            return self.primitive_instances[data_type]\n    return data", "docstring": "Get an instance of the same type as the given data, abstract if possible.\n\nGet an abstract instance of primitive data stored as a\nConcreteValue. Return any other data as-is. This is used by\nconstant_to_var to discard concrete values that have been kept\naround for InterpreterFunction.\n\nThis method intentionally does not descend into containers, as doing so\ncauses new timeouts. If you need to discard concrete values inside\ncontainers, use abstract_utils.abstractify_variable instead.\n\nArguments:\ndata: The data.\n\nReturns:\nAn instance of the same type as the data, abstract if possible.", "source": "github-repos"}
{"code": "def _get_api_version(self):\n    url = '{base_url}/api/server_info'.format(base_url=self._base_url())\n    server_info = self._make_request(url=url, method='get')\n    return server_info['latest_api_version']", "docstring": "Fetches the most recent API version\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def _ConvertValueBinaryDataToUBInt64(self, value):\n    \n    if not value:\n      return None\n\n    integer_map = self._GetDataTypeMap('uint64be')\n\n    try:\n      return self._ReadStructureFromByteStream(value, 0, integer_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError(\n          'Unable to parse integer value with error: {0!s}'.format(\n              exception))", "docstring": "Converts a binary data value into an integer.\n\nArgs:\nvalue (bytes): binary data value containing an unsigned 64-bit big-endian\ninteger.\n\nReturns:\nint: integer representation of binary data value or None if value is\nnot set.\n\nRaises:\nParseError: if the integer value cannot be parsed.", "source": "juraj-google-style"}
{"code": "def get_opt_val(obj_pyxb, attr_str, default_val=None):\n    try:\n        return get_req_val(getattr(obj_pyxb, attr_str))\n    except (ValueError, AttributeError):\n        return default_val", "docstring": "Get an optional Simple Content value from a PyXB element.\n\nThe attributes for elements that are optional according to the schema and\nnot set in the PyXB object are present and set to None.\n\nPyXB validation will fail if required elements are missing.\n\nArgs:\nobj_pyxb: PyXB object\n\nattr_str: str\nName of an attribute that the PyXB object may contain.\n\ndefault_val: any object\nValue to return if the attribute is not present.\n\nReturns:\nstr : Value of the attribute if present, else ``default_val``.", "source": "codesearchnet"}
{"code": "def output(self, _filename):\n        \n\n        txt = \"\"\n        for contract in self.contracts:\n            print('Contract {}'.format(contract.name))\n            for function in contract.functions:\n                if function.contract == contract:\n                    print('\\tFunction {}'.format(function.full_name))\n                    for node in function.nodes:\n                        if node.expression:\n                            print('\\t\\tExpression: {}'.format(node.expression))\n                            print('\\t\\tIRs:')\n                            for ir in node.irs:\n                                print('\\t\\t\\t{}'.format(ir))\n                        elif node.irs:\n                            print('\\t\\tIRs:')\n                            for ir in node.irs:\n                                print('\\t\\t\\t{}'.format(ir))\n            for modifier in contract.modifiers:\n                if modifier.contract == contract:\n                    print('\\tModifier {}'.format(modifier.full_name))\n                    for node in modifier.nodes:\n                        print(node)\n                        if node.expression:\n                            print('\\t\\tExpression: {}'.format(node.expression))\n                            print('\\t\\tIRs:')\n                            for ir in node.irs:\n                                print('\\t\\t\\t{}'.format(ir))\n        self.info(txt)", "docstring": "_filename is not used\nArgs:\n_filename(string)", "source": "juraj-google-style"}
{"code": "def write_to_file(path, contents, file_type='text'):\n    FILE_TYPES = ('json', 'text', 'binary')\n    if (file_type not in FILE_TYPES):\n        raise ScriptWorkerException('Unknown file_type {} not in {}!'.format(file_type, FILE_TYPES))\n    if (file_type == 'json'):\n        contents = format_json(contents)\n    if (file_type == 'binary'):\n        with open(path, 'wb') as fh:\n            fh.write(contents)\n    else:\n        with open(path, 'w') as fh:\n            print(contents, file=fh, end='')", "docstring": "Write ``contents`` to ``path`` with optional formatting.\n\nSmall helper function to write ``contents`` to ``file`` with optional formatting.\n\nArgs:\npath (str): the path to write to\ncontents (str, object, or bytes): the contents to write to the file\nfile_type (str, optional): the type of file. Currently accepts\n``text`` or ``binary`` (contents are unchanged) or ``json`` (contents\nare formatted). Defaults to ``text``.\n\nRaises:\nScriptWorkerException: with an unknown ``file_type``\nTypeError: if ``file_type`` is ``json`` and ``contents`` isn't JSON serializable", "source": "codesearchnet"}
{"code": "def get_fleet(self, airline_key):\n    url = AIRLINE_FLEET_BASE.format(airline_key)\n    return self._fr24.get_airline_fleet_data(url, (self.AUTH_TOKEN != ''))", "docstring": "Get the fleet for a particular airline.\n\nGiven a airline code form the get_airlines() method output, this method returns the fleet for the airline.\n\nArgs:\nairline_key (str): The code for the airline on flightradar24\n\nReturns:\nA list of dicts, one for each aircraft in the airlines fleet\n\nExample::\nfrom pyflightdata import FlightData\nf=FlightData()\n#optional login\nf.login(myemail,mypassword)\nf.get_fleet('ai-aic')", "source": "codesearchnet"}
{"code": "def HasExactlyCalls(self, *calls):\n    if len(calls) == 1 and _IsIterable(calls[0]) and (not isinstance(calls[0], mock._Call)):\n        calls = calls[0]\n    return AssertThat(self._actual.mock_calls).ContainsExactlyElementsIn(calls)", "docstring": "Assert that the mocked function was called with exactly the given calls.\n\nArgs:\n*calls: iterable of mock.call objects. Developers may also pass a single\niterable of mock.call objects, for compatibility with mock's\nassert_has_calls() method, although this form is not preferred.\n\nReturns:\nIf the mocked function was called exactly with the expected calls, returns\nan _Ordered predicate on which .InOrder() can be subsequently called.\n\nRaises:\nTruthAssertionError: the mocked function is missing any of the expected\ncalls, or it contains any call not in the expected calls.", "source": "github-repos"}
{"code": "def main(argv=None):\n    \n    args = None\n    cmd = None\n\n    try:\n        args = parse_args(argv)\n\n        if args.quiet:\n            logger.setLevel(logging.CRITICAL)\n\n        elif args.verbose:\n            logger.setLevel(logging.DEBUG)\n\n        cmd = args.func(args)\n        ret = cmd.run_cmd()\n    except KeyboardInterrupt:\n        logger.exception(\"interrupted by the user\")\n        ret = 252\n    except NotDvcRepoError:\n        logger.exception(\"\")\n        ret = 253\n    except DvcParserError:\n        ret = 254\n    except Exception:  \n        logger.exception(\"unexpected error\")\n        ret = 255\n\n    Analytics().send_cmd(cmd, args, ret)\n\n    return ret", "docstring": "Run dvc CLI command.\n\nArgs:\nargv: optional list of arguments to parse. sys.argv is used by default.\n\nReturns:\nint: command's return code.", "source": "juraj-google-style"}
{"code": "def generate_sb(date: datetime.datetime, project: str,\n                programme_block: str) -> dict:\n    \n    date = date.strftime('%Y%m%d')\n    instance_id = randint(0, 9999)\n    sb_id = 'SB-{}-{}-{:04d}'.format(date, project, instance_id)\n    return dict(id=sb_id, project=project, programme_block=programme_block)", "docstring": "Generate a Scheduling Block data object.\n\nArgs:\ndate (datetime.datetime): UTC date of the SBI\nproject (str): Project Name\nprogramme_block (str): Programme\n\nReturns:\nstr, Scheduling Block Instance (SBI) ID.", "source": "juraj-google-style"}
{"code": "def get_releasenotes(repo_path, from_commit=None, bugtracker_url=''):\n    repo = dulwich.repo.Repo(repo_path)\n    tags = get_tags(repo)\n    refs = get_refs(repo)\n    maj_version = 0\n    feat_version = 0\n    fix_version = 0\n    start_including = False\n    release_notes_per_major = OrderedDict()\n    cur_line = ''\n    if (from_commit is None):\n        start_including = True\n    prev_version = (maj_version, feat_version, fix_version)\n    prev_version_str = ('%s.%s.%s' % prev_version)\n    bugs = []\n    features = []\n    api_break_changes = []\n    for (commit_sha, children) in reversed(get_children_per_first_parent(repo_path).items()):\n        commit = get_repo_object(repo, commit_sha)\n        (maj_version, feat_version, fix_version) = get_version(commit=commit, tags=tags, maj_version=maj_version, feat_version=feat_version, fix_version=fix_version, children=children)\n        version = (maj_version, feat_version, fix_version)\n        version_str = ('%s.%s.%s' % version)\n        if (start_including or commit_sha.startswith(from_commit) or fuzzy_matches_refs(from_commit, refs.get(commit_sha, []))):\n            start_including = True\n            parent_commit_type = get_commit_type(commit=commit, children=children, tags=tags, prev_version=prev_version)\n            cur_line = pretty_commit(commit=commit, version=version_str, bugtracker_url=bugtracker_url, commit_type=parent_commit_type)\n            for child in children:\n                commit_type = get_commit_type(commit=commit, tags=tags, prev_version=prev_version)\n                cur_line += pretty_commit(commit=child, version=None, commit_type=commit_type, bugtracker_url=bugtracker_url)\n            if (parent_commit_type == 'api_break'):\n                release_notes_per_major[prev_version_str] = (api_break_changes, features, bugs)\n                (bugs, features, api_break_changes) = ([], [], [])\n                api_break_changes.append(cur_line)\n            elif (parent_commit_type == 'feature'):\n                features.append(cur_line)\n            else:\n                bugs.append(cur_line)\n        prev_version = version\n        prev_version_str = version_str\n    release_notes_per_major[prev_version_str] = (api_break_changes, features, bugs)\n    releasenotes = ''\n    for (major_version, lines) in reversed(release_notes_per_major.items()):\n        (api_break_changes, features, bugs) = lines\n        releasenotes += (u'New changes for version %s\\n=================================\\n\\nAPI Breaking changes\\n--------------------\\n%s\\nNew features\\n------------\\n%s\\nBugfixes and minor changes\\n--------------------------\\n%s\\n\\n' % (major_version, ('\\n'.join(reversed(api_break_changes)) or 'No new API breaking changes\\n'), ('\\n'.join(reversed(features)) or 'No new features\\n'), ('\\n'.join(reversed(bugs)) or 'No new bugs\\n')))\n    return releasenotes.strip()", "docstring": "Given a repo and optionally a base revision to start from, will return\na text suitable for the relase notes announcement, grouping the bugs, the\nfeatures and the api-breaking changes.\n\nArgs:\nrepo_path(str): Path to the code git repository.\nfrom_commit(str): Refspec of the commit to start aggregating the\nauthors from.\nbugtracker_url(str): URL to be prepended to any bug ids found in the\ncommits.\n\nReturns:\nstr: Release notes text.", "source": "codesearchnet"}
{"code": "def set_custom_getter_compose(custom_getter):\n  \n  tf.get_variable_scope().set_custom_getter(\n      _compose_custom_getters(tf.get_variable_scope().custom_getter,\n                              custom_getter))", "docstring": "Set a custom getter in the current variable scope.\n\nDo not overwrite the existing custom getter - rather compose with it.\n\nArgs:\ncustom_getter: a custom getter.", "source": "juraj-google-style"}
{"code": "def swap(self, left, right):\n        \n        if type(left) is not type(right):\n            raise LayoutError('The method swap only works with elements of the same type.')\n        temp = self[left]\n        self[left] = self[right]\n        self[right] = temp", "docstring": "Swaps the map between left and right.\nArgs:\nleft (tuple or int): Item to swap with right.\nright (tuple or int): Item to swap with left.\nRaises:\nLayoutError: If left and right have not the same type.", "source": "juraj-google-style"}
{"code": "def get_mask(self, layers=None, output='vector', in_global_mask=True):\n        \n        if in_global_mask:\n            output = 'vector'\n\n        if layers is None:\n            layers = self.layers.keys()\n        elif not isinstance(layers, list):\n            layers = [layers]\n\n        layers = map(lambda x: x if isinstance(x, string_types)\n                     else self.stack[x], layers)\n        layers = [self.layers[l] for l in layers if l in self.layers]\n\n        \n        layers.append(self.full)\n        layers = np.vstack(layers).T.astype(bool)\n        mask = layers.all(axis=1)\n        mask = self.get_image(mask, output)\n        return mask[self.global_mask] if in_global_mask else mask", "docstring": "Set the current mask by taking the conjunction of all specified\nlayers.\n\nArgs:\nlayers: Which layers to include. See documentation for add() for\nformat.\ninclude_global_mask: Whether or not to automatically include the\nglobal mask (i.e., self.volume) in the conjunction.", "source": "juraj-google-style"}
{"code": "def CreateTaskCompletion(self):\n    self.completion_time = int((time.time() * definitions.MICROSECONDS_PER_SECOND))\n    task_completion = TaskCompletion()\n    task_completion.aborted = self.aborted\n    task_completion.identifier = self.identifier\n    task_completion.session_identifier = self.session_identifier\n    task_completion.timestamp = self.completion_time\n    return task_completion", "docstring": "Creates a task completion.\n\nReturns:\nTaskCompletion: task completion attribute container.", "source": "codesearchnet"}
{"code": "def fetch_url(self, url):\n        \n        url_path = urlparse.urlsplit(url).path\n        dst_path = os.path.basename(url_path)\n        dst_path = self.paths.prefixed(dst_path)\n        with LogTask('Downloading %s' % url):\n            urllib.urlretrieve(url=os.path.expandvars(url), filename=dst_path)\n\n        return dst_path", "docstring": "Retrieves the given url to the prefix\n\nArgs:\nurl(str): Url to retrieve\n\nReturns:\nstr: path to the downloaded file", "source": "juraj-google-style"}
{"code": "def override_from_dict(self, values_dict):\n    for (name, value) in values_dict.items():\n        self.set_hparam(name, value)\n    return self", "docstring": "Override existing hyperparameter values, parsing new values from a dictionary.\n\nArgs:\nvalues_dict: Dictionary of name:value pairs.\n\nReturns:\nThe `HParams` instance.\n\nRaises:\nKeyError: If a hyperparameter in `values_dict` doesn't exist.\nValueError: If `values_dict` cannot be parsed.", "source": "codesearchnet"}
{"code": "def score_and_learn(self, data):\n    assert self._underlying\n    if self._underlying._features is not None:\n        x = beam.Row(**{f: getattr(data, f) for f in self._underlying._features})\n    else:\n        x = beam.Row(**data._asdict())\n    y_pred = self._underlying.score_one(x)\n    self._underlying.learn_one(x)\n    return y_pred", "docstring": "Scores and learns from a single data point.\n\nArgs:\ndata: A `beam.Row` representing the input data point.\n\nReturns:\nfloat: The anomaly score predicted by the model.", "source": "github-repos"}
{"code": "def _rapply(input_layer, operation, *op_args, **op_kwargs):\n  \n  op_args = list(op_args)\n  op_args.append(input_layer.tensor)\n  return input_layer.with_tensor(operation(*op_args, **op_kwargs))", "docstring": "Applies the given operation to this after expanding op_args.\n\nArgs:\ninput_layer: The input layer for this op.\noperation: An operation that takes a tensor and the supplied args.\n*op_args: Extra arguments for operation.\n**op_kwargs: Keyword arguments for the operation.\nReturns:\nA new layer with operation applied.", "source": "juraj-google-style"}
{"code": "def process_rule(edges: Edges, ast: Function, rule: Mapping[str, Any], spec: BELSpec):\n    \n    ast_type = ast.__class__.__name__\n    trigger_functions = rule.get(\"trigger_function\", [])\n    trigger_types = rule.get(\"trigger_type\", [])\n    rule_subject = rule.get(\"subject\")\n    rule_relation = rule.get(\"relation\")\n    rule_object = rule.get(\"object\")\n\n    log.debug(f\"Running {rule_relation}  Type: {ast_type}\")\n\n    if isinstance(ast, Function):\n        function_name = ast.name\n        args = ast.args\n        parent_function = ast.parent_function\n\n        if function_name in trigger_functions:\n            if rule_subject == \"trigger_value\":\n                subject = ast\n\n            if rule_object == \"args\":\n                for arg in args:\n                    log.debug(f\"1: {subject} {arg}\")\n                    edge_ast = BELAst(subject, rule_relation, arg, spec)\n                    edges.append(edge_ast)\n            elif rule_object == \"parent_function\" and parent_function:\n                log.debug(f\"2: {subject} {parent_function}\")\n                edge_ast = BELAst(subject, rule_relation, parent_function, spec)\n                edges.append(edge_ast)\n\n        elif ast_type in trigger_types:\n            if rule_subject == \"trigger_value\":\n                subject = ast\n\n            if rule_object == \"args\":\n                for arg in args:\n                    log.debug(f\"3: {subject} {arg}\")\n                    edge_ast = BELAst(subject, rule_relation, arg, spec)\n                    edges.append(edge_ast)\n            elif rule_object == \"parent_function\" and parent_function:\n                log.debug(f\"4: {subject} {parent_function}\")\n                edge_ast = BELAst(subject, rule_relation, parent_function, spec)\n                edges.append(edge_ast)\n\n    if isinstance(ast, NSArg):\n        term = \"{}:{}\".format(ast.namespace, ast.value)\n        parent_function = ast.parent_function\n\n        if ast_type in trigger_types:\n            if rule_subject == \"trigger_value\":\n                subject = term\n\n            if rule_object == \"args\":\n                for arg in args:\n                    log.debug(f\"5: {subject} {arg}\")\n                    edge_ast = BELAst(subject, rule_relation, arg, spec)\n                    edges.append(edge_ast)\n            elif rule_object == \"parent_function\" and parent_function:\n                log.debug(f\"6: {subject} {parent_function}\")\n                edge_ast = BELAst(subject, rule_relation, parent_function, spec)\n                edges.append(edge_ast)\n\n    \n    if hasattr(ast, \"args\"):\n        for arg in ast.args:\n            process_rule(edges, arg, rule, spec)", "docstring": "Process computed edge rule\n\nRecursively processes BELAst versus a single computed edge rule\n\nArgs:\nedges (List[Tuple[Union[Function, str], str, Function]]): BEL Edge ASTs\nast (Function): BEL Function AST\nrule (Mapping[str, Any]: computed edge rule", "source": "juraj-google-style"}
{"code": "def _update_services_target_state(sdp_target_state: str):\n    service_states = get_service_state_list()\n    for service in service_states:\n        if (service.current_state != sdp_target_state):\n            LOG.debug('Setting the target state of %s to be %s', service.id, sdp_target_state)\n            service.update_target_state(sdp_target_state)", "docstring": "Update the target states of services based on SDP target state.\n\nWhen we get a new target state this function is called to ensure\ncomponents receive the target state(s) and/or act on them.\n\nArgs:\nsdp_target_state (str): Target state of SDP", "source": "codesearchnet"}
{"code": "def save(self, response_choice=None, async=False, callback=None):\n        \n        return self._manage_child_object(nurest_object=self, method=HTTP_METHOD_PUT, async=async, callback=callback, response_choice=response_choice)", "docstring": "Update object and call given callback in case of async call\n\nArgs:\nasync (bool): Boolean to make an asynchronous call. Default is False\ncallback (function): Callback method that will be triggered in case of asynchronous call\n\nExample:\n>>> entity.name = \"My Super Object\"\n>>> entity.save() # will save the new name in the server", "source": "juraj-google-style"}
{"code": "def split_window(self, fpath, vertical=False, size=None, bufopts=None):\n        \n        command = 'split {}'.format(fpath) if fpath else 'new'\n        if vertical:\n            command = 'v' + command\n        if size:\n            command = str(size) + command\n\n        self._vim.command(command)\n\n        if bufopts:\n            self.set_buffer_options(bufopts)", "docstring": "Open file in a new split window.\n\nArgs:\nfpath (str): Path of the file to open. If ``None``, a new empty\nsplit is created.\nvertical (bool): Whether to open a vertical split.\nsize (Optional[int]): The height (or width) to set for the new window.\nbufopts (Optional[dict]): Buffer-local options to set in the split window.\nSee :func:`.set_buffer_options`.", "source": "juraj-google-style"}
{"code": "def load_parameters(path, proto=None, needs_proto=False):\n    \n    _, ext = os.path.splitext(path)\n\n    if ext == '.h5':\n        \n        import warnings\n        warnings.simplefilter('ignore', category=FutureWarning)\n        import h5py\n        with h5py.File(path, 'r') as hd:\n            keys = []\n\n            def _get_keys(name):\n                ds = hd[name]\n                if not isinstance(ds, h5py.Dataset):\n                    \n                    return\n                \n                keys.append((ds.attrs.get('index', None), name))\n            hd.visit(_get_keys)\n            for _, key in sorted(keys):\n                ds = hd[key]\n\n                var = get_parameter_or_create(\n                    key, ds.shape, need_grad=ds.attrs['need_grad'])\n                var.data.cast(ds.dtype)[...] = ds[...]\n\n                if needs_proto:\n                    if proto is None:\n                        proto = nnabla_pb2.NNablaProtoBuf()\n                    parameter = proto.parameter.add()\n                    parameter.variable_name = key\n                    parameter.shape.dim.extend(ds.shape)\n                    parameter.data.extend(\n                        numpy.array(ds[...]).flatten().tolist())\n                    parameter.need_grad = False\n                    if ds.attrs['need_grad']:\n                        parameter.need_grad = True\n\n    else:\n        if proto is None:\n            proto = nnabla_pb2.NNablaProtoBuf()\n\n        if ext == '.protobuf':\n            with open(path, 'rb') as f:\n                proto.MergeFromString(f.read())\n                set_parameter_from_proto(proto)\n        elif ext == '.nntxt' or ext == '.prototxt':\n            with open(path, 'r') as f:\n                text_format.Merge(f.read(), proto)\n                set_parameter_from_proto(proto)\n\n        elif ext == '.nnp':\n            try:\n                tmpdir = tempfile.mkdtemp()\n                with zipfile.ZipFile(path, 'r') as nnp:\n                    for name in nnp.namelist():\n                        nnp.extract(name, tmpdir)\n                        _, ext = os.path.splitext(name)\n                        if ext in ['.protobuf', '.h5']:\n                            proto = load_parameters(os.path.join(\n                                tmpdir, name), proto, needs_proto)\n            finally:\n                shutil.rmtree(tmpdir)\n                logger.info(\"Parameter load ({}): {}\".format(format, path))\n        else:\n            pass  \n    return proto", "docstring": "Load parameters from a file with the specified format.\n\nArgs:\npath : path or file object", "source": "juraj-google-style"}
{"code": "def InsertMessage(self, message, timeout=None):\n    if (not isinstance(message, common_pb2.Message)):\n        raise InvalidArgument(('Attempt to send unexpected message type: %s' % message.__class__.__name__))\n    if (not message.HasField('source')):\n        message.source.service_name = self._service_name\n    if (not message.message_id):\n        message.message_id = os.urandom(32)\n    return self._RetryLoop((lambda t: self._stub.InsertMessage(message, timeout=t)))", "docstring": "Inserts a message into the Fleetspeak server.\n\nSets message.source, if unset.\n\nArgs:\nmessage: common_pb2.Message\nThe message to send.\n\ntimeout: How many seconds to try for.\n\nRaises:\ngrpc.RpcError: if the RPC fails.\nInvalidArgument: if message is not a common_pb2.Message.", "source": "codesearchnet"}
{"code": "def _client_send(self, msg):\n    try:\n        self._client.write(msg.encode('utf8') + b'\\n')\n        self._client.flush()\n        self.log.debug('Snippet sent %s.', msg)\n    except socket.error as e:\n        raise Error(self._ad, 'Encountered socket error \"%s\" sending RPC message \"%s\"' % (e, msg))", "docstring": "Sends an Rpc message through the connection.\n\nArgs:\nmsg: string, the message to send.\n\nRaises:\nError: a socket error occurred during the send.", "source": "github-repos"}
{"code": "def create_message(self, channel_id, text):\n    baseurl = (self.rest_baseurl + '/channels/{}/messages'.format(channel_id))\n    requests.post(baseurl, headers=self.headers, data=json.dumps({'content': text}))", "docstring": "Sends a message to a Discord channel or user via REST API\n\nArgs:\nchannel_id (string): ID of destingation Discord channel\ntext (string): Content of message", "source": "codesearchnet"}
{"code": "def _open_interface(self, client, uuid, iface, key):\n    conn_id = self._validate_connection('open_interface', uuid, key)\n    if (conn_id is None):\n        return\n    conn_data = self._connections[uuid]\n    conn_data['last_touch'] = monotonic()\n    slug = self._build_device_slug(uuid)\n    try:\n        resp = (yield self._manager.open_interface(conn_id, iface))\n    except Exception as exc:\n        self._logger.exception('Error in manager open interface')\n        resp = {'success': False, 'reason': ('Internal error: %s' % str(exc))}\n    message = {'type': 'response', 'operation': 'open_interface', 'client': client}\n    message['success'] = resp['success']\n    if (not message['success']):\n        message['failure_reason'] = resp['reason']\n    self._publish_response(slug, message)", "docstring": "Open an interface on a connected device.\n\nArgs:\nclient (string): The client id who is requesting this operation\nuuid (int): The id of the device we're opening the interface on\niface (string): The name of the interface that we're opening\nkey (string): The key to authenticate the caller", "source": "codesearchnet"}
{"code": "def get_cloudflare_records(self, *, account):\n    zones = []\n    for zobj in self.__cloudflare_list_zones(account=account):\n        try:\n            self.log.debug('Processing DNS zone CloudFlare/{}'.format(zobj['name']))\n            zone = {'zone_id': get_resource_id('cfz', zobj['name']), 'name': zobj['name'], 'source': 'CloudFlare', 'comment': None, 'tags': {}, 'records': []}\n            for record in self.__cloudflare_list_zone_records(account=account, zoneID=zobj['id']):\n                zone['records'].append({'id': get_resource_id('cfr', zobj['id'], ['{}={}'.format(k, v) for (k, v) in record.items()]), 'zone_id': zone['zone_id'], 'name': record['name'], 'value': record['value'], 'type': record['type']})\n            if (len(zone['records']) > 0):\n                zones.append(zone)\n        except CloudFlareError:\n            self.log.exception('Failed getting records for CloudFlare zone {}'.format(zobj['name']))\n    return zones", "docstring": "Return a `list` of `dict`s containing the zones and their records, obtained from the CloudFlare API\n\nReturns:\naccount (:obj:`CloudFlareAccount`): A CloudFlare Account object\n:obj:`list` of `dict`", "source": "codesearchnet"}
{"code": "def create_repository(cls, repository_data):\n    location = ('memory{%s}' % hex(id(repository_data)))\n    resource_pool = ResourcePool(cache_size=None)\n    repo = MemoryPackageRepository(location, resource_pool)\n    repo.data = repository_data\n    return repo", "docstring": "Create a standalone, in-memory repository.\n\nUsing this function bypasses the `package_repository_manager` singleton.\nThis is usually desired however, since in-memory repositories are for\ntemporarily storing programmatically created packages, which we do not\nwant to cache and that do not persist.\n\nArgs:\nrepository_data (dict): Repository data, see class docstring.\n\nReturns:\n`MemoryPackageRepository` object.", "source": "codesearchnet"}
{"code": "def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    encoder_hidden_states = encoder_outputs[0]\n    if encoder_attention_mask is None:\n        batch_size, sequence_length = encoder_hidden_states.shape[:2]\n        encoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    batch_size, sequence_length = decoder_input_ids.shape\n    if decoder_attention_mask is None:\n        decoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    if decoder_position_ids is None:\n        if past_key_values is not None:\n            raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.')\n        decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n    inputs = {'params': params or self.params}\n    if past_key_values:\n        inputs['cache'] = past_key_values\n        mutable = ['cache']\n    else:\n        mutable = False\n\n    def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):\n        decoder_module = module._get_decoder_module()\n        return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)\n    outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)\n    if past_key_values is not None and return_dict:\n        outputs, past = outputs\n        outputs['past_key_values'] = unfreeze(past['cache'])\n        return outputs\n    elif past_key_values is not None and (not return_dict):\n        outputs, past = outputs\n        outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> import jax.numpy as jnp\n>>> from transformers import AutoTokenizer, FlaxPegasusForConditionalGeneration\n\n>>> model = FlaxPegasusForConditionalGeneration.from_pretrained(\"google/pegasus-large\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google/pegasus-large\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, max_length=1024, return_tensors=\"np\")\n>>> encoder_outputs = model.encode(**inputs)\n\n>>> decoder_start_token_id = model.config.decoder_start_token_id\n>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype=\"i4\") * decoder_start_token_id\n\n>>> outputs = model.decode(decoder_input_ids, encoder_outputs)\n>>> last_decoder_hidden_states = outputs.last_hidden_state\n```", "source": "github-repos"}
{"code": "def all_label_values(self, label_list_ids=None):\n        \n        values = set()\n\n        for utterance in self.utterances.values():\n            values = values.union(utterance.all_label_values(label_list_ids=label_list_ids))\n\n        return values", "docstring": "Return a set of all label-values occurring in this corpus.\n\nArgs:\nlabel_list_ids (list): If not None, only labels from label-lists with an id contained in this list\nare considered.\n\nReturns:\n:class:`set`: A set of distinct label-values.", "source": "juraj-google-style"}
{"code": "def ListFileEntries(self, base_path_specs, output_writer):\n    \n    for base_path_spec in base_path_specs:\n      file_system = resolver.Resolver.OpenFileSystem(base_path_spec)\n      file_entry = resolver.Resolver.OpenFileEntry(base_path_spec)\n      if file_entry is None:\n        logging.warning(\n            'Unable to open base path specification:\\n{0:s}'.format(\n                base_path_spec.comparable))\n        return\n\n      self._ListFileEntry(file_system, file_entry, '', output_writer)", "docstring": "Lists file entries in the base path specification.\n\nArgs:\nbase_path_specs (list[dfvfs.PathSpec]): source path specification.\noutput_writer (StdoutWriter): output writer.", "source": "juraj-google-style"}
{"code": "def random_unitary(dim, seed=None):\n    if ((dim == 0) or (not math.log2(dim).is_integer())):\n        raise QiskitError('Desired unitary dimension not a positive power of 2.')\n    matrix = np.zeros([dim, dim], dtype=complex)\n    for j in range(dim):\n        if (j == 0):\n            a = random_state(dim, seed)\n        else:\n            a = random_state(dim)\n        matrix[(:, j)] = np.copy(a)\n        i = (j - 1)\n        while (i >= 0):\n            dc = np.vdot(matrix[(:, i)], a)\n            matrix[(:, j)] = (matrix[(:, j)] - (dc * matrix[(:, i)]))\n            i = (i - 1)\n        matrix[(:, j)] = (matrix[(:, j)] * (1.0 / np.sqrt(np.vdot(matrix[(:, j)], matrix[(:, j)]))))\n    return Operator(matrix)", "docstring": "Return a random dim x dim unitary Operator from the Haar measure.\n\nArgs:\ndim (int): the dim of the state space.\nseed (int): Optional. To set a random seed.\n\nReturns:\nOperator: (dim, dim) unitary operator.\n\nRaises:\nQiskitError: if dim is not a positive power of 2.", "source": "codesearchnet"}
{"code": "def __init__(self, tcex, domain, data_type, ttl_minutes=None, mapping=None):\n        \n        self.tcex = tcex\n\n        \n        self.ttl = None\n        if ttl_minutes is not None:\n            self.ttl = self._dt_to_epoch(datetime.now() - timedelta(minutes=int(ttl_minutes)))\n        self.ds = self.tcex.datastore(domain, data_type, mapping)", "docstring": "Initialize class properties.\n\nArgs:\ntcex (object): An instance of TcEx.\ndomain (): [description]\ndata_type ([type]): [description]\nttl_minutes (int, optional): Defaults to None. Number of minutes the cache is valid.\nmapping ([type], optional): Defaults to None. [description]", "source": "juraj-google-style"}
{"code": "def to_api(self):\n    vals = {}\n    for (attribute, attribute_type) in self._props.items():\n        prop = getattr(self, attribute)\n        vals[self._to_camel_case(attribute)] = self._to_api_value(attribute_type, prop)\n    return vals", "docstring": "Return a dictionary to send to the API.\n\nReturns:\ndict: Mapping representing this object that can be sent to the\nAPI.", "source": "codesearchnet"}
{"code": "def set_category(self, category):\n    pcategory = self.find('general/category')\n    pcategory.clear()\n    name = ElementTree.SubElement(pcategory, 'name')\n    if isinstance(category, Category):\n        id_ = ElementTree.SubElement(pcategory, 'id')\n        id_.text = category.id\n        name.text = category.name\n    elif isinstance(category, basestring):\n        name.text = category", "docstring": "Set the policy's category.\n\nArgs:\ncategory: A category object.", "source": "codesearchnet"}
{"code": "def get_revisions(page):\n  \n  start_string = \"    <revision>\\n\"\n  end_string = \"    </revision>\\n\"\n  ret = []\n  current_pos = 0\n  while True:\n    start_pos = page.find(start_string, current_pos)\n    if start_pos == -1:\n      break\n    end_pos = page.find(end_string, start_pos)\n    assert end_pos != -1\n    ret.append(page[start_pos + len(start_string):end_pos])\n    current_pos = end_pos + len(end_string)\n  return ret", "docstring": "Extract the revisions of a page.\n\nArgs:\npage: a string\nReturns:\na list of strings", "source": "juraj-google-style"}
{"code": "def _CaptureExpression(self, frame, expression):\n    \n    rc, value = _EvaluateExpression(frame, expression)\n    if not rc:\n      return {'name': expression, 'status': value}\n\n    return self.CaptureNamedVariable(expression, value, 0,\n                                     self.expression_capture_limits)", "docstring": "Evalutes the expression and captures it into a Variable object.\n\nArgs:\nframe: evaluation context.\nexpression: watched expression to compile and evaluate.\n\nReturns:\nVariable object (which will have error status if the expression fails\nto evaluate).", "source": "juraj-google-style"}
{"code": "def in_test_phase(x, alt, training=None):\n    return in_train_phase(alt, x, training=training)", "docstring": "Selects `x` in test phase, and `alt` otherwise.\n\nNote that `alt` should have the *same shape* as `x`.\n\nArgs:\nx: What to return in test phase\n(tensor or callable that returns a tensor).\nalt: What to return otherwise\n(tensor or callable that returns a tensor).\ntraining: Optional scalar tensor\n(or Python boolean, or Python integer)\nspecifying the learning phase.\n\nReturns:\nEither `x` or `alt` based on `K.learning_phase`.", "source": "github-repos"}
{"code": "def iaf_hparams(hidden_size=512, filter_size=4096):\n    hparams = common_hparams.basic_params1()\n    hparams.hidden_size = hidden_size\n    hparams.add_hparam('attention_key_channels', None)\n    hparams.add_hparam('attention_value_channels', None)\n    hparams.add_hparam('num_heads', 4)\n    hparams.add_hparam('attention_dropout', 0.1)\n    hparams.add_hparam('shared_rel', False)\n    hparams.add_hparam('block_width', 1)\n    hparams.add_hparam('block_length', 1)\n    hparams.add_hparam('q_filter_width', 1)\n    hparams.add_hparam('kv_filter_width', 1)\n    hparams.layer_preprocess_sequence = 'n'\n    hparams.layer_prepostprocess_dropout = 0.1\n    hparams.norm_type = 'layer'\n    hparams.norm_epsilon = 1e-06\n    hparams.layer_prepostprocess_dropout_broadcast_dims = ''\n    hparams.layer_postprocess_sequence = 'da'\n    hparams.add_hparam('filter_size', filter_size)\n    hparams.add_hparam('ffn_layer', 'conv_hidden_relu')\n    hparams.add_hparam('relu_dropout', 0.1)\n    return hparams", "docstring": "Create hyperpameters for inverse autoregressive flows.\n\nArgs:\nhidden_size: Width of attention layers and neural network output layer.\nfilter_size: Hidden layer width for neural network.\n\nReturns:\nhparams: Hyperpameters with basic presets for inverse autoregressive flows.", "source": "codesearchnet"}
{"code": "def UploadFile(self, fd, offset=0, amount=None):\n    return self._UploadChunkStream(self._streamer.StreamFile(fd, offset=offset, amount=amount))", "docstring": "Uploads chunks of a given file descriptor to the transfer store flow.\n\nArgs:\nfd: A file descriptor to upload.\noffset: An integer offset at which the file upload should start on.\namount: An upper bound on number of bytes to stream. If it is `None` then\nthe whole file is uploaded.\n\nReturns:\nA `BlobImageDescriptor` object.", "source": "codesearchnet"}
{"code": "def clone(self, callable=None, **overrides):\n    old = {k: v for (k, v) in self.get_param_values() if (k not in ['callable', 'name'])}\n    params = dict(old, **overrides)\n    callable = (self.callable if (callable is None) else callable)\n    return self.__class__(callable, **params)", "docstring": "Clones the Callable optionally with new settings\n\nArgs:\ncallable: New callable function to wrap\n**overrides: Parameter overrides to apply\n\nReturns:\nCloned Callable object", "source": "codesearchnet"}
{"code": "def _ParseFValue(self, registry_key):\n    registry_value = registry_key.GetValueByName('F')\n    if (not registry_value):\n        raise errors.ParseError('missing value: \"F\" in Windows Registry key: {0:s}.'.format(registry_key.name))\n    f_value_map = self._GetDataTypeMap('f_value')\n    try:\n        return self._ReadStructureFromByteStream(registry_value.data, 0, f_value_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError(exception)", "docstring": "Parses an F value.\n\nArgs:\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\n\nReturns:\nf_value: F value stored in the Windows Registry key.\n\nRaises:\nParseError: if the Windows Registry key does not contain an F value or\nF value cannot be parsed.", "source": "codesearchnet"}
{"code": "def remove_acl(path):\n    if ((platform.system() == constants.PLATFORM_DARWIN) and os.path.isfile('/bin/chmod')):\n        subprocess.call(['/bin/chmod', '-R', '-N', path])\n    elif ((platform.system() == constants.PLATFORM_LINUX) and os.path.isfile('/bin/setfacl')):\n        subprocess.call(['/bin/setfacl', '-R', '-b', path])", "docstring": "Remove the ACL of the file or folder located on the given path.\n\nAlso remove the ACL of any file and folder below the given one,\nrecursively.\n\nArgs:\npath (str): Path to the file or folder to remove the ACL for,\nrecursively.", "source": "codesearchnet"}
{"code": "def dump_database_as_insert_sql(engine: Engine,\n                                fileobj: TextIO = sys.stdout,\n                                include_ddl: bool = False,\n                                multirow: bool = False) -> None:\n    \n    for tablename in get_table_names(engine):\n        dump_table_as_insert_sql(\n            engine=engine,\n            table_name=tablename,\n            fileobj=fileobj,\n            include_ddl=include_ddl,\n            multirow=multirow\n        )", "docstring": "Reads an entire database and writes SQL to replicate it to the output\nfile-like object.\n\nArgs:\nengine: SQLAlchemy :class:`Engine`\nfileobj: file-like object to write to\ninclude_ddl: if ``True``, include the DDL to create the table as well\nmultirow: write multi-row ``INSERT`` statements", "source": "juraj-google-style"}
{"code": "def enum_value_descriptor_to_code_string(enum_value_descriptor: descriptor.EnumValueDescriptor) -> str:\n    original_code = annotation_utils.get_enum_value_original_code(enum_value_descriptor)\n    return original_code if original_code is not None else enum_value_descriptor.name.lower().replace('_', '-')", "docstring": "Returns the code string describing the enum value.\n\nArgs:\nenum_value_descriptor: The EnumValueDescriptor to convert.\n\nReturns:\nThe code string describing the enum value.", "source": "github-repos"}
{"code": "def rollaxis(a, axis, start=0):\n    if isinstance(a, np.ndarray):\n        return np.rollaxis(a, axis, start)\n    if (axis not in range(a.ndim)):\n        raise ValueError(('rollaxis: axis (%d) must be >=0 and < %d' % (axis, a.ndim)))\n    if (start not in range((a.ndim + 1))):\n        raise ValueError(('rollaxis: start (%d) must be >=0 and < %d' % (axis, (a.ndim + 1))))\n    axes = list(range(a.ndim))\n    axes.remove(axis)\n    axes.insert(start, axis)\n    return transpose(a, axes)", "docstring": "Roll the specified axis backwards, until it lies in a given position.\n\nArgs:\na (array_like): Input array.\naxis (int): The axis to roll backwards.  The positions of the other axes\ndo not change relative to one another.\nstart (int, optional): The axis is rolled until it lies before this\nposition.  The default, 0, results in a \"complete\" roll.\n\nReturns:\nres (ndarray)", "source": "codesearchnet"}
{"code": "def issubset(self, other):\n    other = self._cast_to_frameset(other)\n    if (other is NotImplemented):\n        return NotImplemented\n    return (self.items <= other.items)", "docstring": "Check if the contents of `self` is a subset of the contents of\n`other.`\n\nArgs:\nother (:class:`FrameSet`):\n\nReturns:\nbool:\n:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`", "source": "codesearchnet"}
{"code": "def compute_jaccard_index(x_set, y_set):\n    if ((not x_set) or (not y_set)):\n        return 0.0\n    intersection_cardinal = len((x_set & y_set))\n    union_cardinal = len((x_set | y_set))\n    return (intersection_cardinal / float(union_cardinal))", "docstring": "Return the Jaccard similarity coefficient of 2 given sets.\n\nArgs:\nx_set (set): first set.\ny_set (set): second set.\n\nReturns:\nfloat: Jaccard similarity coefficient.", "source": "codesearchnet"}
{"code": "def create_symmetric_key(self, algorithm, length):\n    if (algorithm not in self._symmetric_key_algorithms.keys()):\n        raise exceptions.InvalidField('The cryptographic algorithm {0} is not a supported symmetric key algorithm.'.format(algorithm))\n    cryptography_algorithm = self._symmetric_key_algorithms.get(algorithm)\n    if (length not in cryptography_algorithm.key_sizes):\n        raise exceptions.InvalidField('The cryptographic length ({0}) is not valid for the cryptographic algorithm ({1}).'.format(length, algorithm.name))\n    self.logger.info('Generating a {0} symmetric key with length: {1}'.format(algorithm.name, length))\n    key_bytes = os.urandom((length \n    try:\n        cryptography_algorithm(key_bytes)\n    except Exception as e:\n        self.logger.exception(e)\n        raise exceptions.CryptographicFailure('Invalid bytes for the provided cryptographic algorithm.')\n    return {'value': key_bytes, 'format': enums.KeyFormatType.RAW}", "docstring": "Create a symmetric key.\n\nArgs:\nalgorithm(CryptographicAlgorithm): An enumeration specifying the\nalgorithm for which the created key will be compliant.\nlength(int): The length of the key to be created. This value must\nbe compliant with the constraints of the provided algorithm.\n\nReturns:\ndict: A dictionary containing the key data, with the following\nkey/value fields:\n* value - the bytes of the key\n* format - a KeyFormatType enumeration for the bytes format\n\nRaises:\nInvalidField: Raised when the algorithm is unsupported or the\nlength is incompatible with the algorithm.\nCryptographicFailure: Raised when the key generation process\nfails.\n\nExample:\n>>> engine = CryptographyEngine()\n>>> key = engine.create_symmetric_key(\n...     CryptographicAlgorithm.AES, 256)", "source": "codesearchnet"}
{"code": "def set_el(cls, el, value):\n    if (not el):\n        return\n    tag_name = el.elt.tagName.lower()\n    if (tag_name == 'textarea'):\n        cls._set_textarea(el, value)\n    elif (tag_name == 'input'):\n        if ('typeahead' in el.class_name.lower()):\n            cls._set_typeahead(el, value)\n        else:\n            cls._set_input(el, value)\n    elif (tag_name == 'select'):\n        el.value = value\n    else:\n        raise ValueError(('Setter for %s (%s) not implemented!' % (tag_name, el.id)))", "docstring": "Set given `el` tag element to `value`.\n\nAutomatically choose proper method to set the `value` based on the type\nof the `el`.\n\nArgs:\nel (obj): Element reference to the input you want to convert to\ntypeahead.\nvalue (list): List of dicts with two keys: ``source`` and ``val``.", "source": "codesearchnet"}
{"code": "def submit(self):\n    if (self._future is not None):\n        raise JobError('We have already submitted the job!')\n    validate_qobj_against_schema(self._qobj)\n    self._future = self._executor.submit(self._fn, self._job_id, self._qobj)", "docstring": "Submit the job to the backend for execution.\n\nRaises:\nQobjValidationError: if the JSON serialization of the Qobj passed\nduring construction does not validate against the Qobj schema.\n\nJobError: if trying to re-submit the job.", "source": "codesearchnet"}
{"code": "def _ExtractWithFilter(self, source_path_specs, destination_path, output_writer, artifact_filters, filter_file, artifact_definitions_path, custom_artifacts_path, skip_duplicates=True):\n    extraction_engine = engine.BaseEngine()\n    if (self._source_type in self._SOURCE_TYPES_TO_PREPROCESS):\n        self._PreprocessSources(extraction_engine)\n    for source_path_spec in source_path_specs:\n        (file_system, mount_point) = self._GetSourceFileSystem(source_path_spec, resolver_context=self._resolver_context)\n        display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(source_path_spec)\n        output_writer.Write('Extracting file entries from: {0:s}\\n'.format(display_name))\n        filter_find_specs = extraction_engine.BuildFilterFindSpecs(artifact_definitions_path, custom_artifacts_path, extraction_engine.knowledge_base, artifact_filters, filter_file)\n        searcher = file_system_searcher.FileSystemSearcher(file_system, mount_point)\n        for path_spec in searcher.Find(find_specs=filter_find_specs):\n            self._ExtractFileEntry(path_spec, destination_path, output_writer, skip_duplicates=skip_duplicates)\n        file_system.Close()", "docstring": "Extracts files using a filter expression.\n\nThis method runs the file extraction process on the image and\npotentially on every VSS if that is wanted.\n\nArgs:\nsource_path_specs (list[dfvfs.PathSpec]): path specifications to extract.\ndestination_path (str): path where the extracted files should be stored.\noutput_writer (CLIOutputWriter): output writer.\nartifact_definitions_path (str): path to artifact definitions file.\ncustom_artifacts_path (str): path to custom artifact definitions file.\nartifact_filters (list[str]): names of artifact definitions that are\nused for filtering file system and Windows Registry key paths.\nfilter_file (str): path of the file that contains the filter file path\nfilters.\nskip_duplicates (Optional[bool]): True if files with duplicate content\nshould be skipped.", "source": "codesearchnet"}
{"code": "def _UpdateLatestProcessingTime(self, task):\n    \n    self._latest_task_processing_time = max(\n        self._latest_task_processing_time, task.last_processing_time)", "docstring": "Updates the latest processing time of the task manager from the task.\n\nThis method does not lock the manager and should be called by a method\nholding the manager lock.\n\nArgs:\ntask (Task): task to update the processing time of.", "source": "juraj-google-style"}
{"code": "def read_int64(self, little_endian=True):\n    if little_endian:\n        endian = '<'\n    else:\n        endian = '>'\n    return self.unpack(('%sq' % endian), 8)", "docstring": "Read 8 bytes as a signed integer value from the stream.\n\nArgs:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint:", "source": "codesearchnet"}
{"code": "def _maybe_commit(self, transaction):\n        \n        try:\n            transaction._commit()\n            return True\n        except exceptions.GoogleAPICallError as exc:\n            if transaction._read_only:\n                raise\n\n            if isinstance(exc, exceptions.Aborted):\n                \n                return False\n            else:\n                raise", "docstring": "Try to commit the transaction.\n\nIf the transaction is read-write and the ``Commit`` fails with the\n``ABORTED`` status code, it will be retried. Any other failure will\nnot be caught.\n\nArgs:\ntransaction (~.firestore_v1beta1.transaction.Transaction): The\ntransaction to be ``Commit``-ed.\n\nReturns:\nbool: Indicating if the commit succeeded.", "source": "juraj-google-style"}
{"code": "def allconcat_ring(xs, devices, concat_axis):\n  \n  n = len(xs)\n  if n == 1:\n    return xs\n  \n  parts = [[xs[target] if target == source else None for source in xrange(n)]\n           for target in xrange(n)]\n  for distance in xrange(1, n \n    for target in xrange(n):\n      source = (target + distance) % n\n      if parts[target][source] is None:\n        with tf.device(devices[target]):\n          parts[target][source] = tf.identity(parts[(target + 1) % n][source])\n      source = (target - distance) % n\n      if parts[target][source] is None:\n        with tf.device(devices[target]):\n          parts[target][source] = tf.identity(parts[(target - 1) % n][source])\n  return mtf.parallel(devices, tf.concat, parts, axis=[concat_axis] * n)", "docstring": "Concatenate all Tensors everywhere.\n\nPerformance-optimized for a ring of devices.\n\nArgs:\nxs: a list of n tf.Tensors\ndevices: a list of n strings\nconcat_axis: an integer\n\nReturns:\na list of n Tensors", "source": "juraj-google-style"}
{"code": "def get_slab(self, shift=0, tol=0.1, energy=None):\n    h = self._proj_height\n    p = (h / self.parent.lattice.d_hkl(self.miller_index))\n    if self.in_unit_planes:\n        nlayers_slab = int(math.ceil((self.min_slab_size / p)))\n        nlayers_vac = int(math.ceil((self.min_vac_size / p)))\n    else:\n        nlayers_slab = int(math.ceil((self.min_slab_size / h)))\n        nlayers_vac = int(math.ceil((self.min_vac_size / h)))\n    nlayers = (nlayers_slab + nlayers_vac)\n    species = self.oriented_unit_cell.species_and_occu\n    props = self.oriented_unit_cell.site_properties\n    props = {k: (v * nlayers_slab) for (k, v) in props.items()}\n    frac_coords = self.oriented_unit_cell.frac_coords\n    frac_coords = (np.array(frac_coords) + np.array([0, 0, (- shift)])[(None, :)])\n    frac_coords -= np.floor(frac_coords)\n    (a, b, c) = self.oriented_unit_cell.lattice.matrix\n    new_lattice = [a, b, (nlayers * c)]\n    frac_coords[(:, 2)] = (frac_coords[(:, 2)] / nlayers)\n    all_coords = []\n    for i in range(nlayers_slab):\n        fcoords = frac_coords.copy()\n        fcoords[(:, 2)] += (i / nlayers)\n        all_coords.extend(fcoords)\n    slab = Structure(new_lattice, (species * nlayers_slab), all_coords, site_properties=props)\n    scale_factor = self.slab_scale_factor\n    if self.lll_reduce:\n        lll_slab = slab.copy(sanitize=True)\n        mapping = lll_slab.lattice.find_mapping(slab.lattice)\n        scale_factor = np.dot(mapping[2], scale_factor)\n        slab = lll_slab\n    if self.center_slab:\n        avg_c = np.average([c[2] for c in slab.frac_coords])\n        slab.translate_sites(list(range(len(slab))), [0, 0, (0.5 - avg_c)])\n    if self.primitive:\n        prim = slab.get_primitive_structure(tolerance=tol)\n        if (energy is not None):\n            energy = ((prim.volume / slab.volume) * energy)\n        slab = prim\n    ouc = self.oriented_unit_cell.copy()\n    if self.primitive:\n        slab_l = slab.lattice\n        ouc = ouc.get_primitive_structure(constrain_latt={'a': slab_l.a, 'b': slab_l.b, 'alpha': slab_l.alpha, 'beta': slab_l.beta, 'gamma': slab_l.gamma})\n    return Slab(slab.lattice, slab.species_and_occu, slab.frac_coords, self.miller_index, ouc, shift, scale_factor, energy=energy, site_properties=slab.site_properties, reorient_lattice=self.reorient_lattice)", "docstring": "This method takes in shift value for the c lattice direction and\ngenerates a slab based on the given shift. You should rarely use this\nmethod. Instead, it is used by other generation algorithms to obtain\nall slabs.\n\nArg:\nshift (float): A shift value in Angstrom that determines how much a\nslab should be shifted.\ntol (float): Tolerance to determine primitive cell.\nenergy (float): An energy to assign to the slab.\n\nReturns:\n(Slab) A Slab object with a particular shifted oriented unit cell.", "source": "codesearchnet"}
{"code": "def _DecodeURL(self, url):\n    \n    if not url:\n      return ''\n\n    decoded_url = urlparse.unquote(url)\n    if isinstance(decoded_url, py2to3.BYTES_TYPE):\n      try:\n        decoded_url = decoded_url.decode('utf-8')\n      except UnicodeDecodeError as exception:\n        decoded_url = decoded_url.decode('utf-8', errors='replace')\n        logger.warning(\n            'Unable to decode URL: {0:s} with error: {1!s}'.format(\n                url, exception))\n\n    return decoded_url", "docstring": "Decodes the URL, replaces %XX to their corresponding characters.\n\nArgs:\nurl (str): encoded URL.\n\nReturns:\nstr: decoded URL.", "source": "juraj-google-style"}
{"code": "def recall_at_precision(y_true, y_pred, precision):\n    (y_true, y_pred) = _mask_value_nan(y_true, y_pred)\n    (precision, recall, _) = skm.precision_recall_curve(y_true, y_pred)\n    return recall[np.searchsorted((precision - precision), 0)]", "docstring": "Recall at a certain precision threshold\n\nArgs:\ny_true: true labels\ny_pred: predicted labels\nprecision: resired precision level at which where to compute the recall", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n        \n        self.AnalyzeSentiment = channel.unary_unary(\n            \"/google.cloud.language.v1beta2.LanguageService/AnalyzeSentiment\",\n            request_serializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnalyzeSentimentRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnalyzeSentimentResponse.FromString,\n        )\n        self.AnalyzeEntities = channel.unary_unary(\n            \"/google.cloud.language.v1beta2.LanguageService/AnalyzeEntities\",\n            request_serializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnalyzeEntitiesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnalyzeEntitiesResponse.FromString,\n        )\n        self.AnalyzeEntitySentiment = channel.unary_unary(\n            \"/google.cloud.language.v1beta2.LanguageService/AnalyzeEntitySentiment\",\n            request_serializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnalyzeEntitySentimentRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnalyzeEntitySentimentResponse.FromString,\n        )\n        self.AnalyzeSyntax = channel.unary_unary(\n            \"/google.cloud.language.v1beta2.LanguageService/AnalyzeSyntax\",\n            request_serializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnalyzeSyntaxRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnalyzeSyntaxResponse.FromString,\n        )\n        self.ClassifyText = channel.unary_unary(\n            \"/google.cloud.language.v1beta2.LanguageService/ClassifyText\",\n            request_serializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.ClassifyTextRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.ClassifyTextResponse.FromString,\n        )\n        self.AnnotateText = channel.unary_unary(\n            \"/google.cloud.language.v1beta2.LanguageService/AnnotateText\",\n            request_serializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnnotateTextRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnnotateTextResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def reqHeadTimeStamp(self, contract: Contract, whatToShow: str, useRTH: bool, formatDate: int=1) -> datetime.datetime:\n    return self._run(self.reqHeadTimeStampAsync(contract, whatToShow, useRTH, formatDate))", "docstring": "Get the datetime of earliest available historical data\nfor the contract.\n\nArgs:\ncontract: Contract of interest.\nuseRTH: If True then only show data from within Regular\nTrading Hours, if False then show all data.\nformatDate: If set to 2 then the result is returned as a\ntimezone-aware datetime.datetime with UTC timezone.", "source": "codesearchnet"}
{"code": "def GetMessages(self, formatter_mediator, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    event_values = event.CopyToDict()\n\n    file_entry_type = event_values.get('file_entry_type', None)\n    if file_entry_type is not None:\n      event_values['file_entry_type'] = self._FILE_ENTRY_TYPES.get(\n          file_entry_type, 'UNKNOWN')\n\n    \n    \n    if (not event_values.get('allocated', False) and\n        not event_values.get('is_allocated', False)):\n      event_values['unallocated'] = 'unallocated'\n\n    return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def save(self, recipe):\n        \n        \n        if 'id' in recipe and recipe['id'] is not None:\n            \n            self.logger.debug(\"Updating existing recipe: \" + json.dumps(recipe))\n            url = '%(base_url)s/recipe/json/%(recipe_id)s' % {\n                'base_url': self.base_url, 'recipe_id': recipe['id']\n            }\n            r = self.gbdx_connection.put(url, json=recipe)\n            try:\n                r.raise_for_status()\n            except:\n                print(r.text)\n                raise\n            return recipe['id']\n        else:\n            \n            self.logger.debug(\"Creating new recipe: \" + json.dumps(recipe))\n            url = '%(base_url)s/recipe/json' % {\n                'base_url': self.base_url\n            }\n            r = self.gbdx_connection.post(url, json=recipe)\n            try:\n                r.raise_for_status()\n            except:\n                print(r.text)\n                raise\n            recipe_json = r.json()\n            return recipe_json['id']", "docstring": "Saves an AnswerFactory Recipe\n\nArgs:\nrecipe (dict): Dictionary specifying a recipe\n\nReturns:\nAnswerFactory Recipe id", "source": "juraj-google-style"}
{"code": "def _inf_or_operator_handler_factory(c_start, is_delegate=True):\n    \n    @coroutine\n    def inf_or_operator_handler(c, ctx):\n        next_ctx = None\n        if not is_delegate:\n            ctx.value.append(c_start)\n            c, self = yield\n        else:\n            assert ctx.value[0] == c_start\n            assert c not in _DIGITS\n            ctx.queue.unread(c)\n            next_ctx = ctx\n            _, self = yield\n            assert c == _\n        maybe_inf = True\n        ctx.set_ion_type(IonType.FLOAT)\n        match_index = 0\n        trans = ctx.immediate_transition(self)\n        while True:\n            if maybe_inf:\n                if match_index < len(_INF_SUFFIX):\n                    maybe_inf = c == _INF_SUFFIX[match_index]\n                else:\n                    if _ends_value(c) or (ctx.container.ion_type is IonType.SEXP and c in _OPERATORS):\n                        yield ctx.event_transition(\n                            IonEvent, IonEventType.SCALAR, IonType.FLOAT, c_start == _MINUS and _NEG_INF or _POS_INF\n                        )\n                    else:\n                        maybe_inf = False\n            if maybe_inf:\n                match_index += 1\n            else:\n                ctx.set_unicode()\n                if match_index > 0:\n                    next_ctx = ctx.derive_child_context(ctx.whence)\n                    for ch in _INF_SUFFIX[0:match_index]:\n                        next_ctx.value.append(ch)\n                break\n            c, self = yield trans\n        if ctx.container is not _C_SEXP:\n            _illegal_character(c, next_ctx is None and ctx or next_ctx,\n                               'Illegal character following %s.' % (_chr(c_start),))\n        if match_index == 0:\n            if c in _OPERATORS:\n                yield ctx.immediate_transition(_operator_symbol_handler(c, ctx))\n            yield ctx.event_transition(IonEvent, IonEventType.SCALAR, IonType.SYMBOL, ctx.value.as_symbol())\n        yield _CompositeTransition(\n            ctx.event_transition(IonEvent, IonEventType.SCALAR, IonType.SYMBOL, ctx.value.as_symbol()),\n            ctx,\n            partial(_unquoted_symbol_handler, c),\n            next_ctx\n        )\n    return inf_or_operator_handler", "docstring": "Generates handler co-routines for values that may be `+inf` or `-inf`.\n\nArgs:\nc_start (int): The ordinal of the character that starts this token (either `+` or `-`).\nis_delegate (bool): True if a different handler began processing this token; otherwise, False. This will only\nbe true for `-inf`, because it is not the only value that can start with `-`; `+inf` is the only value\n(outside of a s-expression) that can start with `+`.", "source": "juraj-google-style"}
{"code": "def _create_filters(col_params, extractors):\n  \n  result = []\n  for col_param, extractor in zip(col_params, extractors):\n    a_filter = _create_filter(col_param, extractor)\n    if a_filter:\n      result.append(a_filter)\n  return result", "docstring": "Creates filters for the given col_params.\n\nArgs:\ncol_params: List of ListSessionGroupsRequest.ColParam protobufs.\nextractors: list of extractor functions of the same length as col_params.\nEach element should extract the column described by the corresponding\nelement of col_params.\nReturns:\nA list of filter functions. Each corresponding to a single\ncol_params.filter oneof field of _request", "source": "juraj-google-style"}
{"code": "def add(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return Add().symbolic_call(x1, x2)\n    return backend.numpy.add(x1, x2)", "docstring": "Add arguments element-wise.\n\nArgs:\nx1: First input tensor.\nx2: Second input tensor.\n\nReturns:\nThe tensor containing the element-wise sum of `x1` and `x2`.\n\nExamples:\n>>> x1 = keras.ops.convert_to_tensor([1, 4])\n>>> x2 = keras.ops.convert_to_tensor([5, 6])\n>>> keras.ops.add(x1, x2)\narray([6, 10], dtype=int32)\n\n`keras.ops.add` also broadcasts shapes:\n>>> x1 = keras.ops.convert_to_tensor(\n...     [[5, 4],\n...      [5, 6]]\n... )\n>>> x2 = keras.ops.convert_to_tensor([5, 6])\n>>> keras.ops.add(x1, x2)\narray([[10 10]\n[10 12]], shape=(2, 2), dtype=int32)", "source": "github-repos"}
{"code": "def _buckets_nearly_equal(a_dist, b_dist):\n    (a_type, a_buckets) = _detect_bucket_option(a_dist)\n    (b_type, b_buckets) = _detect_bucket_option(b_dist)\n    if (a_type != b_type):\n        return False\n    elif (a_type == u'linearBuckets'):\n        return _linear_buckets_nearly_equal(a_buckets, b_buckets)\n    elif (a_type == u'exponentialBuckets'):\n        return _exponential_buckets_nearly_equal(a_buckets, b_buckets)\n    elif (a_type == u'explicitBuckets'):\n        return _explicit_buckets_nearly_equal(a_buckets, b_buckets)\n    else:\n        return False", "docstring": "Determines whether two `Distributions` are nearly equal.\n\nArgs:\na_dist (:class:`Distribution`): an instance\nb_dist (:class:`Distribution`): another instance\n\nReturn:\nboolean: `True` if the two instances are approximately equal, otherwise\nFalse", "source": "codesearchnet"}
{"code": "def handle_triple(self, lhs, relation, rhs):\n    relation = relation.replace(':', '', 1)\n    if self.is_relation_inverted(relation):\n        (source, target, inverted) = (rhs, lhs, True)\n        relation = self.invert_relation(relation)\n    else:\n        (source, target, inverted) = (lhs, rhs, False)\n    source = _default_cast(source)\n    target = _default_cast(target)\n    if (relation == ''):\n        relation = None\n    return Triple(source, relation, target, inverted)", "docstring": "Process triples before they are added to the graph.\n\nNote that *lhs* and *rhs* are as they originally appeared, and\nmay be inverted. Inversions are detected by\nis_relation_inverted() and de-inverted by invert_relation().\n\nBy default, this function:\n* removes initial colons on relations\n* de-inverts all inverted relations\n* sets empty relations to `None`\n* casts numeric string sources and targets to their numeric\ntypes (e.g. float, int)\n\nArgs:\nlhs: the left hand side of an observed triple\nrelation: the triple relation (possibly inverted)\nrhs: the right hand side of an observed triple\nReturns:\nThe processed (source, relation, target) triple. By default,\nit is returned as a Triple object.", "source": "codesearchnet"}
{"code": "def get_http_raw(self, url=None, retry_count=3, headers=None, request_type='GET', form_data=None):\n    if (headers is None):\n        headers = {'Accept': 'text/html'}\n    enc_form_data = None\n    if form_data:\n        enc_form_data = urlencode(form_data)\n        try:\n            enc_form_data = bytes(enc_form_data, encoding='ascii')\n        except TypeError:\n            pass\n    try:\n        log.debug('HTTP query for {0} at {1}'.format(self.address_str, url))\n        try:\n            conn = Request(url=url, data=enc_form_data, headers=headers, **{'method': request_type})\n        except TypeError:\n            conn = Request(url=url, data=enc_form_data, headers=headers)\n        data = self.opener.open(conn, timeout=self.timeout)\n        try:\n            d = data.readall().decode('ascii', 'ignore')\n        except AttributeError:\n            d = data.read().decode('ascii', 'ignore')\n        return str(d)\n    except (URLError, socket.timeout, socket.error) as e:\n        log.debug('HTTP query socket error: {0}'.format(e))\n        if (retry_count > 0):\n            log.debug('HTTP query retrying (count: {0})'.format(str(retry_count)))\n            return self.get_http_raw(url=url, retry_count=(retry_count - 1), headers=headers, request_type=request_type, form_data=form_data)\n        else:\n            raise HTTPLookupError('HTTP lookup failed for {0}.'.format(url))\n    except HTTPLookupError as e:\n        raise e\n    except Exception:\n        raise HTTPLookupError('HTTP lookup failed for {0}.'.format(url))", "docstring": "The function for retrieving a raw HTML result via HTTP.\n\nArgs:\nurl (:obj:`str`): The URL to retrieve (required).\nretry_count (:obj:`int`): The number of times to retry in case\nsocket errors, timeouts, connection resets, etc. are\nencountered. Defaults to 3.\nheaders (:obj:`dict`): The HTTP headers. The Accept header\ndefaults to 'text/html'.\nrequest_type (:obj:`str`): Request type 'GET' or 'POST'. Defaults\nto 'GET'.\nform_data (:obj:`dict`): Optional form POST data.\n\nReturns:\nstr: The raw data.\n\nRaises:\nHTTPLookupError: The HTTP lookup failed.", "source": "codesearchnet"}
{"code": "def get_json_files(files, recursive=False):\n    \n    json_files = []\n\n    if not files:\n        return json_files\n\n    for fn in files:\n        if os.path.isdir(fn):\n            children = list_json_files(fn, recursive)\n            json_files.extend(children)\n        elif is_json(fn):\n            json_files.append(fn)\n        else:\n            continue\n\n    if not json_files:\n        raise NoJSONFileFoundError(\"No JSON files found!\")\n    return json_files", "docstring": "Return a list of files to validate from `files`. If a member of `files`\nis a directory, its children with a ``.json`` extension will be added to\nthe return value.\n\nArgs:\nfiles: A list of file paths and/or directory paths.\nrecursive: If ``true``, this will descend into any subdirectories\nof input directories.\n\nReturns:\nA list of file paths to validate.", "source": "juraj-google-style"}
{"code": "def _GetLinkedPath(self, event):\n    if hasattr(event, 'local_path'):\n        return event.local_path\n    if hasattr(event, 'network_path'):\n        return event.network_path\n    if hasattr(event, 'relative_path'):\n        paths = []\n        if hasattr(event, 'working_directory'):\n            paths.append(event.working_directory)\n        paths.append(event.relative_path)\n        return '\\\\'.join(paths)\n    return 'Unknown'", "docstring": "Determines the linked path.\n\nArgs:\nevent (EventObject): event that contains a linked path.\n\nReturns:\nstr: linked path.", "source": "codesearchnet"}
{"code": "def query_google(point, max_distance, key):\n    if (not key):\n        return []\n    if from_cache(GG_CACHE, point, max_distance):\n        return from_cache(GG_CACHE, point, max_distance)\n    req = requests.get((GOOGLE_PLACES_URL % (point.lat, point.lon, max_distance, key)))\n    if (req.status_code != 200):\n        return []\n    response = req.json()\n    results = response['results']\n    final_results = []\n    for local in results:\n        final_results.append({'label': local['name'], 'distance': Point(local['geometry']['location']['lat'], local['geometry']['location']['lng'], None).distance(point), 'types': local['types'], 'suggestion_type': 'GOOGLE'})\n    google_insert_cache(point, final_results)\n    return final_results", "docstring": "Queries google maps API for a location\n\nArgs:\npoint (:obj:`Point`): Point location to query\nmax_distance (float): Search radius, in meters\nkey (str): Valid google maps api key\nReturns:\n:obj:`list` of :obj:`dict`: List of locations with the following format:\n{\n'label': 'Coffee house',\n'types': 'Commerce',\n'suggestion_type': 'GOOGLE'\n}", "source": "codesearchnet"}
{"code": "def find_proxy_plugin(component, plugin_name):\n    reg = ComponentRegistry()\n    plugins = reg.load_extensions('iotile.proxy_plugin', comp_filter=component, class_filter=TileBusProxyPlugin, product_name='proxy_plugin')\n    for (_name, plugin) in plugins:\n        if (plugin.__name__ == plugin_name):\n            return plugin\n    raise DataError('Could not find proxy plugin module in registered components or installed distributions', component=component, name=plugin_name)", "docstring": "Attempt to find a proxy plugin provided by a specific component\n\nArgs:\ncomponent (string): The name of the component that provides the plugin\nplugin_name (string): The name of the plugin to load\n\nReturns:\nTileBuxProxyPlugin: The plugin, if found, otherwise raises DataError", "source": "codesearchnet"}
{"code": "def create_alias(target_path, alias_path):\n    if ((platform.system() == 'Windows') and (not alias_path.endswith('.lnk'))):\n        alias_path += '.lnk'\n    if os.path.lexists(alias_path):\n        os.remove(alias_path)\n    if (platform.system() == 'Windows'):\n        from win32com import client\n        shell = client.Dispatch('WScript.Shell')\n        shortcut = shell.CreateShortCut(alias_path)\n        shortcut.Targetpath = target_path\n        shortcut.save()\n    else:\n        os.symlink(target_path, alias_path)", "docstring": "Creates an alias at 'alias_path' pointing to the file 'target_path'.\n\nOn Unix, this is implemented via symlink. On Windows, this is done by\ncreating a Windows shortcut file.\n\nArgs:\ntarget_path: Destination path that the alias should point to.\nalias_path: Path at which to create the new alias.", "source": "codesearchnet"}
{"code": "def from_scf_task(cls, scf_task, ddk_tolerance=None, manager=None):\n    if (not isinstance(scf_task, ScfTask)):\n        raise TypeError(('task `%s` does not inherit from ScfTask' % scf_task))\n    new = cls(manager=manager)\n    multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance)\n    ddk_tasks = []\n    for ddk_inp in multi_ddk:\n        ddk_task = new.register_ddk_task(ddk_inp, deps={scf_task: 'WFK'})\n        ddk_tasks.append(ddk_task)\n    multi_dde = scf_task.input.make_dde_inputs(use_symmetries=False)\n    dde_tasks = []\n    dde_deps = {ddk_task: 'DDK' for ddk_task in ddk_tasks}\n    dde_deps.update({scf_task: 'WFK'})\n    for dde_inp in multi_dde:\n        dde_task = new.register_dde_task(dde_inp, deps=dde_deps)\n        dde_tasks.append(dde_task)\n    dte_deps = {scf_task: 'WFK DEN'}\n    dte_deps.update({dde_task: '1WF 1DEN' for dde_task in dde_tasks})\n    multi_dte = scf_task.input.make_dte_inputs()\n    dte_tasks = []\n    for dte_inp in multi_dte:\n        dte_task = new.register_dte_task(dte_inp, deps=dte_deps)\n        dte_tasks.append(dte_task)\n    return new", "docstring": "Build a DteWork from a ground-state task.\n\nArgs:\nscf_task: ScfTask object.\nddk_tolerance: tolerance used in the DDK run if with_becs. None to use AbiPy default.\nmanager: :class:`TaskManager` object.", "source": "codesearchnet"}
{"code": "def __init__(self, action, chunk_size=None):\n    \n    chunk_size = chunk_size or self.DEFAULT_CHUNK_SIZE\n\n    self._action = action\n    self._streamer = streaming.Streamer(chunk_size=chunk_size)", "docstring": "Initializes the uploader.\n\nArgs:\naction: A parent action that creates the uploader. Used to communicate\nwith the parent flow.\nchunk_size: A number of (uncompressed) bytes per a chunk.", "source": "juraj-google-style"}
{"code": "def shift(self, time: int) -> 'Interval':\n        \n        return Interval(self._begin + time, self._end + time)", "docstring": "Return a new interval shifted by `time` from self\n\nArgs:\ntime: time to be shifted\n\nReturns:\nInterval: interval shifted by `time`", "source": "juraj-google-style"}
{"code": "def explicit(fixed_qubits: Iterable[raw_types.Qid], fallback: Optional['QubitOrder']=None) -> 'QubitOrder':\n    result = tuple(fixed_qubits)\n    if (len(set(result)) < len(result)):\n        raise ValueError('Qubits appear in fixed_order twice: {}.'.format(result))\n\n    def func(qubits):\n        remaining = (set(qubits) - set(fixed_qubits))\n        if (not remaining):\n            return result\n        if (not fallback):\n            raise ValueError('Unexpected extra qubits: {}.'.format(remaining))\n        return (result + fallback.order_for(remaining))\n    return QubitOrder(func)", "docstring": "A basis that contains exactly the given qubits in the given order.\n\nArgs:\nfixed_qubits: The qubits in basis order.\nfallback: A fallback order to use for extra qubits not in the\nfixed_qubits list. Extra qubits will always come after the\nfixed_qubits, but will be ordered based on the fallback. If no\nfallback is specified, a ValueError is raised when extra qubits\nare specified.\n\nReturns:\nA Basis instance that forces the given qubits in the given order.", "source": "codesearchnet"}
{"code": "def compare_modules(file_, imports):\n    modules = parse_requirements(file_)\n    imports = [imports[i]['name'] for i in range(len(imports))]\n    modules = [modules[i]['name'] for i in range(len(modules))]\n    modules_not_imported = (set(modules) - set(imports))\n    return modules_not_imported", "docstring": "Compare modules in a file to imported modules in a project.\n\nArgs:\nfile_ (str): File to parse for modules to be compared.\nimports (tuple): Modules being imported in the project.\n\nReturns:\ntuple: The modules not imported in the project, but do exist in the\nspecified file.", "source": "codesearchnet"}
{"code": "def __init__(self, funcs, trackable_obj=None):\n    super(TFLiteConverterV2, self).__init__(funcs, trackable_obj)", "docstring": "Constructor for TFLiteConverter.\n\nArgs:\nfuncs: List of TensorFlow ConcreteFunctions. The list should not contain\nduplicate elements.\ntrackable_obj: tf.AutoTrackable object associated with `funcs`. A\nreference to this object needs to be maintained so that Variables do not\nget garbage collected since functions have a weak reference to\nVariables. This is only required when the tf.AutoTrackable object is not\nmaintained by the user (e.g. `from_saved_model`).", "source": "github-repos"}
{"code": "def __parse_tostr(self, text, **kwargs):\n        \n        n = self.options.get('nbest', 1)\n\n        if self._KW_BOUNDARY in kwargs:\n            patt = kwargs.get(self._KW_BOUNDARY, '.')\n            tokens = list(self.__split_pattern(text, patt))\n            text = ''.join([t[0] for t in tokens])\n\n            btext = self.__str2bytes(text)\n            self.__mecab.mecab_lattice_set_sentence(self.lattice, btext)\n\n            bpos = 0\n            self.__mecab.mecab_lattice_set_boundary_constraint(\n                self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY)\n\n            for (token, match) in tokens:\n                bpos += 1\n                if match:\n                    mark = self.MECAB_INSIDE_TOKEN\n                else:\n                    mark = self.MECAB_ANY_BOUNDARY\n\n                for _ in range(1, len(self.__str2bytes(token))):\n                    self.__mecab.mecab_lattice_set_boundary_constraint(\n                        self.lattice, bpos, mark)\n                    bpos += 1\n                self.__mecab.mecab_lattice_set_boundary_constraint(\n                    self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY)\n        elif self._KW_FEATURE in kwargs:\n            features = kwargs.get(self._KW_FEATURE, ())\n            fd = {morph: self.__str2bytes(feat) for morph, feat in features}\n\n            tokens = self.__split_features(text, [e[0] for e in features])\n            text = ''.join([t[0] for t in tokens])\n\n            btext = self.__str2bytes(text)\n            self.__mecab.mecab_lattice_set_sentence(self.lattice, btext)\n\n            bpos = 0\n            for chunk, match in tokens:\n                c = len(self.__str2bytes(chunk))\n                if match == True:\n                    self.__mecab.mecab_lattice_set_feature_constraint(\n                        self.lattice, bpos, bpos+c, fd[chunk])\n                bpos += c\n        else:\n            btext = self.__str2bytes(text)\n            self.__mecab.mecab_lattice_set_sentence(self.lattice, btext)\n\n        self.__mecab.mecab_parse_lattice(self.tagger, self.lattice)\n\n        if n > 1:\n            res = self.__mecab.mecab_lattice_nbest_tostr(self.lattice, n)\n        else:\n            res = self.__mecab.mecab_lattice_tostr(self.lattice)\n\n        if res != self.__ffi.NULL:\n            raw = self.__ffi.string(res)\n            return self.__bytes2str(raw).strip()\n        else:\n            err = self.__mecab.mecab_lattice_strerror(self.lattice)\n            logger.error(self.__bytes2str(self.__ffi.string(err)))\n            raise MeCabError(self.__bytes2str(self.__ffi.string(err)))", "docstring": "Builds and returns the MeCab function for parsing Unicode text.\n\nArgs:\nfn_name: MeCab function name that determines the function\nbehavior, either 'mecab_sparse_tostr' or\n'mecab_nbest_sparse_tostr'.\n\nReturns:\nA function definition, tailored to parsing Unicode text and\nreturning the result as a string suitable for display on stdout,\nusing either the default or N-best behavior.", "source": "juraj-google-style"}
{"code": "def pytest_terminal_summary_main(tr, id):\n    from _pytest.config import create_terminal_writer\n    if not len(id):\n        id = 'tests'\n    config = tr.config\n    orig_writer = config.get_terminal_writer()\n    orig_tbstyle = config.option.tbstyle\n    orig_reportchars = tr.reportchars\n    dir = f'reports/{id}'\n    Path(dir).mkdir(parents=True, exist_ok=True)\n    report_files = {k: f'{dir}/{k}.txt' for k in ['durations', 'errors', 'failures_long', 'failures_short', 'failures_line', 'passes', 'stats', 'summary_short', 'warnings']}\n    dlist = []\n    for replist in tr.stats.values():\n        for rep in replist:\n            if hasattr(rep, 'duration'):\n                dlist.append(rep)\n    if dlist:\n        dlist.sort(key=lambda x: x.duration, reverse=True)\n        with open(report_files['durations'], 'w') as f:\n            durations_min = 0.05\n            f.write('slowest durations\\n')\n            for i, rep in enumerate(dlist):\n                if rep.duration < durations_min:\n                    f.write(f'{len(dlist) - i} durations < {durations_min} secs were omitted')\n                    break\n                f.write(f'{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\\n')\n\n    def summary_failures_short(tr):\n        reports = tr.getreports('failed')\n        if not reports:\n            return\n        tr.write_sep('=', 'FAILURES SHORT STACK')\n        for rep in reports:\n            msg = tr._getfailureheadline(rep)\n            tr.write_sep('_', msg, red=True, bold=True)\n            longrepr = re.sub('.*_ _ _ (_ ){10,}_ _ ', '', rep.longreprtext, 0, re.M | re.S)\n            tr._tw.line(longrepr)\n    config.option.tbstyle = 'auto'\n    with open(report_files['failures_long'], 'w') as f:\n        tr._tw = create_terminal_writer(config, f)\n        tr.summary_failures()\n    with open(report_files['failures_short'], 'w') as f:\n        tr._tw = create_terminal_writer(config, f)\n        summary_failures_short(tr)\n    config.option.tbstyle = 'line'\n    with open(report_files['failures_line'], 'w') as f:\n        tr._tw = create_terminal_writer(config, f)\n        tr.summary_failures()\n    with open(report_files['errors'], 'w') as f:\n        tr._tw = create_terminal_writer(config, f)\n        tr.summary_errors()\n    with open(report_files['warnings'], 'w') as f:\n        tr._tw = create_terminal_writer(config, f)\n        tr.summary_warnings()\n        tr.summary_warnings()\n    tr.reportchars = 'wPpsxXEf'\n    with open(report_files['summary_short'], 'w') as f:\n        tr._tw = create_terminal_writer(config, f)\n        tr.short_test_summary()\n    with open(report_files['stats'], 'w') as f:\n        tr._tw = create_terminal_writer(config, f)\n        tr.summary_stats()\n    tr._tw = orig_writer\n    tr.reportchars = orig_reportchars\n    config.option.tbstyle = orig_tbstyle", "docstring": "Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current\ndirectory. The report files are prefixed with the test suite name.\n\nThis function emulates --duration and -rA pytest arguments.\n\nThis function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined\nthere.\n\nArgs:\n- tr: `terminalreporter` passed from `conftest.py`\n- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is\nneeded as some jobs have multiple runs of pytest, so we can't have them overwrite each other.\n\nNB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal\nchanges - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`\nplugins and interfere.", "source": "github-repos"}
{"code": "def with_claims(self, additional_claims):\n        \n        new_additional_claims = copy.deepcopy(self._additional_claims)\n        new_additional_claims.update(additional_claims or {})\n\n        return self.__class__(\n            self._signer,\n            service_account_email=self._service_account_email,\n            scopes=self._scopes,\n            token_uri=self._token_uri,\n            subject=self._subject,\n            project_id=self._project_id,\n            additional_claims=new_additional_claims)", "docstring": "Returns a copy of these credentials with modified claims.\n\nArgs:\nadditional_claims (Mapping[str, str]): Any additional claims for\nthe JWT payload. This will be merged with the current\nadditional claims.\n\nReturns:\ngoogle.auth.service_account.Credentials: A new credentials\ninstance.", "source": "juraj-google-style"}
{"code": "def add_variable(self, feature_column, var):\n    del feature_column, var\n    raise NotImplementedError('StateManager.add_variable')", "docstring": "Adds an existing variable to the state.\n\nArgs:\nfeature_column: A `FeatureColumn` object to associate this variable with.\nvar: The variable.", "source": "github-repos"}
{"code": "def create_exception_by_name(name, detailCode='0', description='', traceInformation=None, identifier=None, nodeId=None):\n    try:\n        dataone_exception = globals()[name]\n    except LookupError:\n        dataone_exception = ServiceFailure\n    return dataone_exception(detailCode, description, traceInformation, identifier, nodeId)", "docstring": "Create a DataONEException based object by name.\n\nArgs:\nname: str\nThe type name of a DataONE Exception. E.g. NotFound.\n\nIf an unknown type name is used, it is automatically set to ServiceFailure. As\nthe XML Schema for DataONE Exceptions does not restrict the type names, this\nmay occur when deserializing an exception not defined by DataONE.\n\ndetailCode: int\nOptional index into a table of predefined error conditions.\n\nSee Also:\nFor remaining args, see: ``DataONEException()``", "source": "codesearchnet"}
{"code": "def evaluate(conditions, leaf_evaluator):\n  \n\n  if isinstance(conditions, list):\n    if conditions[0] in list(EVALUATORS_BY_OPERATOR_TYPE.keys()):\n      return EVALUATORS_BY_OPERATOR_TYPE[conditions[0]](conditions[1:], leaf_evaluator)\n    else:\n      \n      return EVALUATORS_BY_OPERATOR_TYPE[ConditionOperatorTypes.OR](conditions, leaf_evaluator)\n\n  leaf_condition = conditions\n  return leaf_evaluator(leaf_condition)", "docstring": "Top level method to evaluate conditions.\n\nArgs:\nconditions: Nested array of and/or conditions, or a single leaf condition value of any type.\nExample: ['and', '0', ['or', '1', '2']]\nleaf_evaluator: Function which will be called to evaluate leaf condition values.\n\nReturns:\nBoolean: Result of evaluating the conditions using the operator rules and the leaf evaluator.\nNone: if conditions couldn't be evaluated.", "source": "juraj-google-style"}
{"code": "def optional(name, default) -> 'Wildcard':\n    return Wildcard(min_count=1, fixed_size=True, variable_name=name, optional=default)", "docstring": "Create a `Wildcard` that matches a single argument with a default value.\n\nIf the wildcard does not match, the substitution will contain the\ndefault value instead.\n\nArgs:\nname:\nThe name for the wildcard.\ndefault:\nThe default value of the wildcard.\n\nReturns:\nA n optional wildcard.", "source": "codesearchnet"}
{"code": "def select(self, selector):\n        \n        if self._is_single_string_selector(selector, 'name'):\n            \n            return self._all_models_by_name.get_all(selector['name'])\n        else:\n            return find(self._all_models.values(), selector)", "docstring": "Query this document for objects that match the given selector.\n\nArgs:\nselector (JSON-like query dictionary) : you can query by type or by\nname, e.g. ``{\"type\": HoverTool}``, ``{\"name\": \"mycircle\"}``\n\nReturns:\nseq[Model]", "source": "juraj-google-style"}
{"code": "def _extract_type_spec_recursively(value):\n    if isinstance(value, composite_tensor.CompositeTensor):\n        return value._type_spec\n    if isinstance(value, variables.Variable):\n        return resource_variable_ops.VariableSpec(value.shape, dtype=value.dtype, trainable=value.trainable)\n    if tensor_util.is_tensor(value):\n        return tensor_spec.TensorSpec(value.shape, value.dtype)\n    if isinstance(value, list):\n        return list((_extract_type_spec_recursively(v) for v in value))\n    if isinstance(value, data_structures.TrackableDataStructure):\n        return _extract_type_spec_recursively(value.__wrapped__)\n    if isinstance(value, tuple):\n        return type(value)((_extract_type_spec_recursively(x) for x in value))\n    if isinstance(value, dict):\n        return type(value)(((k, _extract_type_spec_recursively(v)) for k, v in value.items()))\n    return value", "docstring": "Return (collection of) `TypeSpec`(s) for `value` if it includes `Tensor`s.\n\nIf `value` is a `Tensor` or `CompositeTensor`, return its `TypeSpec`. If\n`value` is a collection containing `Tensor` values, recursively supplant them\nwith their respective `TypeSpec`s in a collection of parallel stucture.\n\nIf `value` is none of the above, return it unchanged.\n\nArgs:\nvalue: a Python `object` to (possibly) turn into a (collection of)\n`tf.TypeSpec`(s).\n\nReturns:\nspec: the `TypeSpec` or collection of `TypeSpec`s corresponding to `value`\nor `value`, if no `Tensor`s are found.", "source": "github-repos"}
{"code": "def track_event(self, name, properties=None, measurements=None):\n    data = channel.contracts.EventData()\n    data.name = (name or NULL_CONSTANT_STRING)\n    if properties:\n        data.properties = properties\n    if measurements:\n        data.measurements = measurements\n    self.track(data, self._context)", "docstring": "Send information about a single event that has occurred in the context of the application.\n\nArgs:\nname (str). the data to associate to this event.\\n\nproperties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\\n\nmeasurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)", "source": "codesearchnet"}
{"code": "def head(self, n=10):\n        \n        r = self.__repr__().split('\\n')\n        print('\\n'.join(r[:n]), end=' ')", "docstring": "Display the top of the file.\n\nArgs:\nn (int): Number of lines to display", "source": "juraj-google-style"}
{"code": "def __getitem__(self, key):\n    if key in self._layout_map:\n        return self._layout_map[key]\n    matching_keys = []\n    for k in self._layout_map:\n        if re.search(k, key):\n            matching_keys.append(k)\n    if len(matching_keys) > 1:\n        raise ValueError(f\"Path '{key}' matches multiple layout specification keys: {matching_keys}. Please make sure each tensor/variable path only matches at most one layout specification key in the LayoutMap.\")\n    elif len(matching_keys) == 1:\n        return self._layout_map[matching_keys[0]]\n    return None", "docstring": "Retrieves the corresponding layout by the string key.\n\nWhen there isn't an exact match, all the existing keys in the layout map\nwill be treated as a regex and map against the input key again. When\nthere are multiple matches for the regex, an `ValueError` will be\nraised. Returns `None` if there isn't any match found.\n\nArgs:\nkey: String key to query a layout.\n\nReturns:\nCorresponding layout based on the query.", "source": "github-repos"}
{"code": "def run(self, test_config, ref_dir, tmp_dir, mode, heartbeat=None, num_attempts=0):\n        \n        assert 'name' in test_config\n        name = test_config['name']\n\n        if 'ref' in test_config:\n            \n            assert 'run' in test_config\n            arm_config = { 'name': name }\n            if mode == 'test':\n                arm_config.update(test_config['run'])\n            elif mode == 'update':\n                arm_config.update(test_config['ref'])\n            test_config = arm_config\n\n        assert 'url' in test_config\n\n        test_dir = tempfile.mkdtemp(dir=tmp_dir)\n        log_file = os.path.join(test_dir, 'log.txt')\n        output_path = os.path.join(test_dir, 'screenshot.png')\n\n        logging.info('Test config:\\n%s', json.dumps(test_config, indent=2))\n\n        capture_config = copy.deepcopy(test_config.get('config', {}))\n        capture_config['targetUrl'] = test_config['url']\n        config_file = os.path.join(test_dir, 'config.json')\n        json.dump(capture_config, open(config_file, 'w'), indent=2)\n\n        ref_path = os.path.join(ref_dir, '%s.png' % name)\n        if mode == 'test':\n            assert os.path.exists(ref_path), (\n                'Reference image %s does not exist. '\n                'Try running in update mode.' % ref_path)\n        elif mode == 'update':\n            output_path = ref_path\n            ref_path = None\n        else:\n            raise ValueError('Invalid mode %s' % mode)\n\n        class NamedHeartbeat(workers.WorkflowItem):\n            def run(self, message):\n                yield heartbeat('%s: %s' % (name, message))\n\n        try:\n            yield CaptureAndDiffWorkflowItem(\n                    name, log_file, config_file, output_path, ref_path,\n                    heartbeat=NamedHeartbeat)\n        except capture_worker.CaptureFailedError, e:\n            if num_attempts >= e.max_attempts:\n                yield heartbeat('Unable to capture screenshot after %d tries.' % num_attempts)\n                raise e\n            else:\n                num_attempts += 1\n                yield heartbeat('Capture failed, retrying (%d)' % num_attempts)\n                yield OneTestWorkflowItem(test_config, ref_dir, tmp_dir, mode,\n                        heartbeat=heartbeat, num_attempts=num_attempts)", "docstring": "Build a CaptureAndDiffWorkflowItem for a test.\n\nArgs:\ntest_config: See test.yaml for structure of test_config.\nReturns: A CaptureAndDiffWorkflowItem", "source": "juraj-google-style"}
{"code": "def create_symlink(self, file_path, link_target, create_missing_dirs=True):\n    if (not self._is_link_supported()):\n        raise OSError('Symbolic links are not supported on Windows before Python 3.2')\n    file_path = self.make_string_path(file_path)\n    link_target = self.make_string_path(link_target)\n    file_path = self.normcase(file_path)\n    if self.ends_with_path_separator(file_path):\n        if self.exists(file_path):\n            self.raise_os_error(errno.EEXIST, file_path)\n        if self.exists(link_target):\n            if (not self.is_windows_fs):\n                self.raise_os_error(errno.ENOENT, file_path)\n        else:\n            if self.is_windows_fs:\n                self.raise_os_error(errno.EINVAL, link_target)\n            if (not self.exists(self._path_without_trailing_separators(file_path), check_link=True)):\n                self.raise_os_error(errno.ENOENT, link_target)\n            if self.is_macos:\n                if self.exists(file_path, check_link=True):\n                    self.remove_object(file_path)\n            else:\n                self.raise_os_error(errno.EEXIST, link_target)\n    if (not self.islink(file_path)):\n        file_path = self.resolve_path(file_path)\n    link_target = make_string_path(link_target)\n    return self.create_file_internally(file_path, st_mode=(S_IFLNK | PERM_DEF), contents=link_target, create_missing_dirs=create_missing_dirs, raw_io=True)", "docstring": "Create the specified symlink, pointed at the specified link target.\n\nArgs:\nfile_path:  path to the symlink to create\nlink_target:  the target of the symlink\ncreate_missing_dirs: If `True`, any missing parent directories of\nfile_path will be created\n\nReturns:\nThe newly created FakeFile object.\n\nRaises:\nOSError: if the symlink could not be created\n(see :py:meth:`create_file`).\nOSError: if on Windows before Python 3.2.", "source": "codesearchnet"}
{"code": "def _parse_resource(self, uri: str, json_obj: Dict[str, Any]) -> Optional[_T]:\n    json_parser = _json_parser.JsonParser(self.handler, self.resource_time_zone)\n    resource_type = json_obj.get('resourceType')\n    if resource_type is None:\n        raise ValueError(f'JSON for URI {uri} does not have a resource type.')\n    if resource_type == 'Bundle':\n        json_value = _find_resource_in_bundle(uri, json_obj)\n        if json_value is None:\n            return None\n        else:\n            target = self.proto_cls()\n            json_parser.merge_value(json_value, target)\n            return target\n    else:\n        target = self.proto_cls()\n        json_parser.merge_value(json_obj, target)\n        return target", "docstring": "Parses a protocol buffer for the given JSON object.\n\nArgs:\nuri: The URI of the resource to parse.\njson_obj: The JSON object to parse into a proto.\n\nReturns:\nThe protocol buffer for the resource or `None` if it can not be found.", "source": "github-repos"}
{"code": "def get_subdomain(url):\n        \n\n        if url not in URLHelper.__cache:\n            URLHelper.__cache[url] = urlparse(url)\n\n        return \".\".join(URLHelper.__cache[url].netloc.split(\".\")[:-2])", "docstring": "Get the subdomain of the given URL.\n\nArgs:\nurl (str): The URL to get the subdomain from.\n\nReturns:\nstr: The subdomain(s)", "source": "juraj-google-style"}
{"code": "def macro_tpm_sbs(self, state_by_state_micro_tpm):\n        \n        validate.tpm(state_by_state_micro_tpm, check_independence=False)\n\n        mapping = self.make_mapping()\n\n        num_macro_states = 2 ** len(self.macro_indices)\n        macro_tpm = np.zeros((num_macro_states, num_macro_states))\n\n        micro_states = range(2 ** len(self.micro_indices))\n        micro_state_transitions = itertools.product(micro_states, repeat=2)\n\n        \n        \n        \n        for previous_state, current_state in micro_state_transitions:\n            macro_tpm[mapping[previous_state], mapping[current_state]] += (\n                state_by_state_micro_tpm[previous_state, current_state])\n\n        \n        return np.array([distribution.normalize(row) for row in macro_tpm])", "docstring": "Create a state-by-state coarse-grained macro TPM.\n\nArgs:\nmicro_tpm (nd.array): The state-by-state TPM of the micro-system.\n\nReturns:\nnp.ndarray: The state-by-state TPM of the macro-system.", "source": "juraj-google-style"}
{"code": "def get_dataset(self, name):\n    url = (self.url() + '/resource/dataset/{}'.format(name))\n    req = self.remote_utils.get_url(url)\n    if (req.status_code is not 200):\n        raise RemoteDataNotFoundError('Could not find {}'.format(req.text))\n    else:\n        return req.json()", "docstring": "Returns info regarding a particular dataset.\n\nArugments:\nname (str): Dataset name\n\nReturns:\ndict: Dataset information", "source": "codesearchnet"}
{"code": "def _process_tensorlike(inputs):\n\n    def _convert_numpy_and_scipy(x):\n        if isinstance(x, np.ndarray):\n            dtype = None\n            if issubclass(x.dtype.type, np.floating):\n                dtype = backend.floatx()\n            return tensor_conversion.convert_to_tensor_v2_with_dispatch(x, dtype=dtype)\n        elif _is_scipy_sparse(x):\n            return _scipy_sparse_to_sparse_tensor(x)\n        return x\n    inputs = nest.map_structure(_convert_numpy_and_scipy, inputs)\n    return nest.list_to_tuple(inputs)", "docstring": "Process tensor-like inputs.\n\nThis function:\n\n(1) Converts `Numpy` arrays to `Tensor`s.\n(2) Converts `Scipy` sparse matrices to `SparseTensor`s.\n(2) Converts `list`s to `tuple`s (for `tf.data` support).\n\nArgs:\ninputs: Structure of `Tensor`s, `NumPy` arrays, or tensor-like.\n\nReturns:\nStructure of `Tensor`s or tensor-like.", "source": "github-repos"}
{"code": "def convert_gather(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting embedding ...')\n\n    if names == 'short':\n        tf_name = 'EMBD' + random_string(4)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    weights_name = '{0}.weight'.format(w_name)\n\n    W = weights[weights_name].numpy()\n    input_channels, output_channels = W.shape\n\n    keras_weights = [W]\n\n    dense = keras.layers.Embedding(\n        input_channels,\n        weights=keras_weights, output_dim=output_channels, name=tf_name\n    )\n    layers[scope_name] = dense(layers[inputs[1]])", "docstring": "Convert gather (embedding) layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def aggregate_field(self, field, combine_fn, dest):\n    return _GroupAndAggregate(self, ()).aggregate_field(field, combine_fn, dest)", "docstring": "Returns a grouping operation that also aggregates grouped values.\n\nArgs:\nfield: indicates the field to be aggregated\ncombine_fn: indicates the aggregation function to be used\ndest: indicates the name that will be used for the aggregate in the output\n\nMay be called repeatedly to aggregate multiple fields, e.g.\n\nGroupBy('key')\n.aggregate_field('some_attr', sum, 'sum_attr')\n.aggregate_field(lambda v: ..., MeanCombineFn, 'mean')", "source": "github-repos"}
{"code": "def uniform_full_int(self, shape, dtype=dtypes.uint64, name=None):\n    dtype = dtypes.as_dtype(dtype)\n    with ops.name_scope(name, 'stateful_uniform_full_int', [shape]) as name:\n        shape = _shape_tensor(shape)\n        return self._uniform_full_int(shape=shape, dtype=dtype, name=name)", "docstring": "Uniform distribution on an integer type's entire range.\n\nThis method is the same as setting `minval` and `maxval` to `None` in the\n`uniform` method.\n\nArgs:\nshape: the shape of the output.\ndtype: (optional) the integer type, default to uint64.\nname: (optional) the name of the node.\n\nReturns:\nA tensor of random numbers of the required shape.", "source": "github-repos"}
{"code": "def connection_made(self, transport):\n        \n        self.transport = transport\n        self.transport.sendto(self.message)\n        self.transport.close()", "docstring": "Create connection, use to send message and close.\n\nArgs:\ntransport (asyncio.DatagramTransport): Transport used for sending.", "source": "juraj-google-style"}
{"code": "def _get_bradcrack_data(bravais):\n        r\n        json_file = pkg_resources.resource_filename(__name__, 'bradcrack.json')\n        with open(json_file, 'r') as f:\n            bradcrack_data = load_json(f)\n            return bradcrack_data[bravais]", "docstring": "r\"\"\"Read Bradley--Cracknell k-points path from data file\n\nArgs:\nbravais (str): Lattice code including orientation e.g. 'trig_p_c'\n\nReturns:\ndict: kpoint path and special point locations, formatted as e.g.::\n\n{'kpoints': {'\\Gamma': [0., 0., 0.], 'X': [0., 0.5, 0.], ...},\n'path': [['\\Gamma', 'X', ..., 'P'], ['H', 'N', ...]]}", "source": "juraj-google-style"}
{"code": "def replace_tensors_by_numpy_ndarrays(repr_ds: RepresentativeDataset, sess: session.Session) -> RepresentativeDataset:\n    new_repr_ds = []\n    for sample in repr_ds:\n        new_sample = {}\n        for input_key, input_data in sample.items():\n            if isinstance(input_data, core.Tensor):\n                input_data = input_data.eval(session=sess)\n            new_sample[input_key] = input_data\n        new_repr_ds.append(new_sample)\n    return new_repr_ds", "docstring": "Replaces tf.Tensors in samples by their evaluated numpy arrays.\n\nNote: This should be run in graph mode (default in TF1) only.\n\nArgs:\nrepr_ds: Representative dataset to replace the tf.Tensors with their\nevaluated values. `repr_ds` is iterated through, so it may not be reusable\n(e.g. if it is a generator object).\nsess: Session instance used to evaluate tf.Tensors.\n\nReturns:\nThe new representative dataset where each tf.Tensor is replaced by its\nevaluated numpy ndarrays.", "source": "github-repos"}
{"code": "def _BuildScanTreeNode(self, path_filter_table, ignore_list):\n    paths_list = list(path_filter_table.paths)\n    ignore_list = list(ignore_list)\n    similarity_weights = _PathSegmentWeights()\n    occurrence_weights = _PathSegmentWeights()\n    value_weights = _PathSegmentWeights()\n    for path_segment_index in path_filter_table.path_segments_per_index.keys():\n        if (not path_filter_table.path_segments_per_index[path_segment_index]):\n            continue\n        similarity_weights.AddIndex(path_segment_index)\n        occurrence_weights.AddIndex(path_segment_index)\n        value_weights.AddIndex(path_segment_index)\n        path_segments = path_filter_table.GetPathSegments(path_segment_index)\n        number_of_path_segments = len(path_segments.keys())\n        if (number_of_path_segments > 1):\n            occurrence_weights.SetWeight(path_segment_index, number_of_path_segments)\n        for paths_per_segment_list in path_segments.values():\n            path_segment_weight = len(paths_per_segment_list)\n            if (path_segment_weight > 1):\n                similarity_weights.AddWeight(path_segment_index, path_segment_weight)\n    path_segment_index = self._GetMostSignificantPathSegmentIndex(paths_list, similarity_weights, occurrence_weights, value_weights)\n    ignore_list.append(path_segment_index)\n    if (path_segment_index < 0):\n        raise ValueError('Invalid path segment index value out of bounds.')\n    scan_tree_node = PathFilterScanTreeNode(path_segment_index)\n    path_segments = path_filter_table.GetPathSegments(path_segment_index)\n    for (path_segment, paths_per_segment_list) in path_segments.items():\n        if (not paths_per_segment_list):\n            raise ValueError('Invalid number of paths value out of bounds.')\n        if (len(paths_per_segment_list) == 1):\n            for path in paths_per_segment_list:\n                scan_tree_node.AddPathSegment(path_segment, path)\n        else:\n            sub_path_filter_table = _PathFilterTable(paths_per_segment_list, ignore_list, path_segment_separator=self._path_segment_separator)\n            scan_sub_node = self._BuildScanTreeNode(sub_path_filter_table, ignore_list)\n            scan_tree_node.AddPathSegment(path_segment, scan_sub_node)\n        for path in paths_per_segment_list:\n            paths_list.remove(path)\n    number_of_paths = len(paths_list)\n    if (number_of_paths == 1):\n        scan_tree_node.SetDefaultValue(paths_list[0])\n    elif (number_of_paths > 1):\n        path_filter_table = _PathFilterTable(paths_list, ignore_list, path_segment_separator=self._path_segment_separator)\n        scan_sub_node = self._BuildScanTreeNode(path_filter_table, ignore_list)\n        scan_tree_node.SetDefaultValue(scan_sub_node)\n    return scan_tree_node", "docstring": "Builds a scan tree node.\n\nArgs:\npath_filter_table: a path filter table object (instance of\n_PathFilterTable).\nignore_list: a list of path segment indexes to ignore, where 0 is the\nindex of the first path segment relative from the root.\n\nReturns:\nA scan tree node (instance of PathFilterScanTreeNode).\n\nRaises:\nValueError: if the path segment index value or the number of paths\nsegments value is out of bounds.", "source": "codesearchnet"}
{"code": "def closest_distance(item_a, time_a, item_b, time_b, max_value):\n    return (np.minimum(item_a.closest_distance(time_a, item_b, time_b), max_value) / float(max_value))", "docstring": "Euclidean distance between the pixels in item_a and item_b closest to each other.\n\nArgs:\nitem_a: STObject from the first set in ObjectMatcher\ntime_a: Time integer being evaluated\nitem_b: STObject from the second set in ObjectMatcher\ntime_b: Time integer being evaluated\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "codesearchnet"}
{"code": "def predict_next_action(self, state_key, next_action_list):\n        \n        if self.q_df is not None:\n            next_action_q_df = self.q_df[self.q_df.state_key == state_key]\n            next_action_q_df = next_action_q_df[next_action_q_df.action_key.isin(next_action_list)]\n            if next_action_q_df.shape[0] == 0:\n                return random.choice(next_action_list)\n            else:\n                if next_action_q_df.shape[0] == 1:\n                    max_q_action = next_action_q_df[\"action_key\"].values[0]\n                else:\n                    next_action_q_df = next_action_q_df.sort_values(by=[\"q_value\"], ascending=False)\n                    max_q_action = next_action_q_df.iloc[0, :][\"action_key\"]\n                return max_q_action\n        else:\n            return random.choice(next_action_list)", "docstring": "Predict next action by Q-Learning.\n\nArgs:\nstate_key:          The key of state in `self.t+1`.\nnext_action_list:   The possible action in `self.t+1`.\n\nReturns:\nThe key of action.", "source": "juraj-google-style"}
{"code": "def member_of(self, group):\n        \n        if isinstance(group, Group):\n            group = group.name\n        return self.groups.filter(name=group).exists()", "docstring": "Returns whether a user is a member of a certain group.\n\nArgs:\ngroup\nThe name of a group (string) or a group object\n\nReturns:\nBoolean", "source": "juraj-google-style"}
{"code": "def maybe_copy_file_to_directory(source_filepath, target_directory):\n  \n  if not tf.gfile.Exists(target_directory):\n    tf.logging.info(\"Creating directory %s\" % target_directory)\n    os.mkdir(target_directory)\n  target_filepath = os.path.join(target_directory,\n                                 os.path.basename(source_filepath))\n  if not tf.gfile.Exists(target_filepath):\n    tf.logging.info(\"Copying %s to %s\" % (source_filepath, target_filepath))\n    tf.gfile.Copy(source_filepath, target_filepath)\n    statinfo = os.stat(target_filepath)\n    tf.logging.info(\"Successfully copied %s, %s bytes.\" % (target_filepath,\n                                                           statinfo.st_size))\n  else:\n    tf.logging.info(\"Not copying, file already found: %s\" % target_filepath)\n  return target_filepath", "docstring": "Copy a file to a directory if it is not already there.\n\nReturns the target filepath.\n\nArgs:\nsource_filepath: a string\ntarget_directory: a string\n\nReturns:\na string", "source": "juraj-google-style"}
{"code": "def from_path(cls, path, suffix=''):\n\n    def _get_filepath(filename):\n        name_pattern = (((filename + suffix) + '*') if (filename != 'POTCAR') else (filename + '*'))\n        paths = glob.glob(os.path.join(path, name_pattern))\n        fpath = None\n        if (len(paths) >= 1):\n            paths.sort(reverse=True)\n            warning_msg = (('Multiple files detected, using %s' % os.path.basename(paths[0])) if (len(paths) > 1) else None)\n            fpath = paths[0]\n        else:\n            warning_msg = ('Could not find %s' % filename)\n            if (filename in ['AECCAR0', 'AECCAR2']):\n                warning_msg += ', cannot calculate charge transfer.'\n            elif (filename == 'POTCAR'):\n                warning_msg += ', interpret Bader results with caution.'\n        if warning_msg:\n            warnings.warn(warning_msg)\n        return fpath\n    chgcar_filename = _get_filepath('CHGCAR')\n    if (chgcar_filename is None):\n        raise IOError('Could not find CHGCAR!')\n    potcar_filename = _get_filepath('POTCAR')\n    aeccar0 = _get_filepath('AECCAR0')\n    aeccar2 = _get_filepath('AECCAR2')\n    if (aeccar0 and aeccar2):\n        chgref = (Chgcar.from_file(aeccar0) + Chgcar.from_file(aeccar2))\n        chgref_filename = 'CHGREF'\n        chgref.write_file(chgref_filename)\n    else:\n        chgref_filename = None\n    return cls(chgcar_filename, potcar_filename=potcar_filename, chgref_filename=chgref_filename)", "docstring": "Convenient constructor that takes in the path name of VASP run\nto perform Bader analysis.\n\nArgs:\npath (str): Name of directory where VASP output files are\nstored.\nsuffix (str): specific suffix to look for (e.g. '.relax1'\nfor 'CHGCAR.relax1.gz').", "source": "codesearchnet"}
{"code": "def _get_file_names(file_pattern, shuffle):\n    if isinstance(file_pattern, list):\n        if not file_pattern:\n            raise ValueError('Argument `file_pattern` should not be empty.')\n        file_names = []\n        for entry in file_pattern:\n            file_names.extend(gfile.Glob(entry))\n    else:\n        file_names = list(gfile.Glob(file_pattern))\n    if not file_names:\n        raise ValueError(f'No files match `file_pattern` {file_pattern}.')\n    if not shuffle:\n        file_names = sorted(file_names)\n    return file_names", "docstring": "Parse list of file names from pattern, optionally shuffled.\n\nArgs:\nfile_pattern: File glob pattern, or list of glob patterns.\nshuffle: Whether to shuffle the order of file names.\n\nReturns:\nList of file names matching `file_pattern`.\n\nRaises:\nValueError: If `file_pattern` is empty, or pattern matches no files.", "source": "github-repos"}
{"code": "def normalize(inputs, epsilon=1e-08, scope='ln'):\n    with tf.variable_scope(scope):\n        inputs_shape = inputs.get_shape()\n        params_shape = inputs_shape[(- 1):]\n        (mean, variance) = tf.nn.moments(inputs, [(- 1)], keep_dims=True)\n        beta = tf.Variable(tf.zeros(params_shape))\n        gamma = tf.Variable(tf.ones(params_shape))\n        normalized = ((inputs - mean) / ((variance + epsilon) ** 0.5))\n        outputs = ((gamma * normalized) + beta)\n    return outputs", "docstring": "Applies layer normalization.\n\nArgs:\ninputs: A tensor with 2 or more dimensions, where the first dimension has\n`batch_size`.\nepsilon: A floating number. A very small number for preventing ZeroDivision Error.\nscope: Optional scope for `variable_scope`.\nreuse: Boolean, whether to reuse the weights of a previous layer\nby the same name.\n\nReturns:\nA tensor with the same shape and data dtype as `inputs`.", "source": "codesearchnet"}
{"code": "def version(self, api_version=True):\n    url = self._url('/version', versioned_api=api_version)\n    return self._result(self._get(url), json=True)", "docstring": "Returns version information from the server. Similar to the ``docker\nversion`` command.\n\nReturns:\n(dict): The server version information\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def latlong_to_locator(latitude, longitude):\n    if ((longitude >= 180) or (longitude <= (- 180))):\n        raise ValueError\n    if ((latitude >= 90) or (latitude <= (- 90))):\n        raise ValueError\n    longitude += 180\n    latitude += 90\n    locator = chr((ord('A') + int((longitude / 20))))\n    locator += chr((ord('A') + int((latitude / 10))))\n    locator += chr((ord('0') + int(((longitude % 20) / 2))))\n    locator += chr((ord('0') + int((latitude % 10))))\n    locator += chr((ord('A') + int(((longitude - (int((longitude / 2)) * 2)) / (2 / 24)))))\n    locator += chr((ord('A') + int(((latitude - (int((latitude / 1)) * 1)) / (1 / 24)))))\n    return locator", "docstring": "converts WGS84 coordinates into the corresponding Maidenhead Locator\n\nArgs:\nlatitude (float): Latitude\nlongitude (float): Longitude\n\nReturns:\nstring: Maidenhead locator\n\nRaises:\nValueError: When called with wrong or invalid input args\nTypeError: When args are non float values\n\nExample:\nThe following example converts latitude and longitude into the Maidenhead locator\n\n>>> from pyhamtools.locator import latlong_to_locator\n>>> latitude = 48.5208333\n>>> longitude = 9.375\n>>> latlong_to_locator(latitude, longitude)\n'JN48QM'\n\nNote:\nLatitude (negative = West, positive = East)\nLongitude (negative = South, positive = North)", "source": "codesearchnet"}
{"code": "def set_status(self, status: Status, increment_try_count: bool=True, filename: str=None):\n    url = self.url_record.url\n    assert (not self._try_count_incremented), (url, status)\n    if increment_try_count:\n        self._try_count_incremented = True\n    _logger.debug(__('Marking URL {0} status {1}.', url, status))\n    url_result = URLResult()\n    url_result.filename = filename\n    self.app_session.factory['URLTable'].check_in(url, status, increment_try_count=increment_try_count, url_result=url_result)\n    self._processed = True", "docstring": "Mark the item with the given status.\n\nArgs:\nstatus: a value from :class:`Status`.\nincrement_try_count: if True, increment the ``try_count``\nvalue", "source": "codesearchnet"}
{"code": "def Environ(variable, default):\n    precondition.AssertType(variable, Text)\n    value = os.environ.get(variable, default)\n    if (value is None):\n        return default\n    if PY2:\n        value = value.decode('utf-8')\n    return value", "docstring": "A wrapper for `os.environ.get` that works the same way in both Pythons.\n\nArgs:\nvariable: A name of the variable to get the value of.\ndefault: A default value to return in case no value for the given variable\nis set.\n\nReturns:\nAn environment value of the given variable.", "source": "codesearchnet"}
{"code": "def template_instance(self):\n    ofs = self.offset()\n    if ((self.unpack_byte(0) & 15) == 15):\n        ofs += 4\n    return TemplateInstanceNode(self._buf, ofs, self._chunk, self)", "docstring": "parse the template instance node.\nthis is used to compute the location of the template definition structure.\n\nReturns:\nTemplateInstanceNode: the template instance.", "source": "codesearchnet"}
{"code": "def next(self):\n    if (self._mode != 'r'):\n        raise UnsupportedOperation(\"not available in 'w' mode\")\n    self._n += 1\n    if (self._n > self._nb_markers):\n        raise StopIteration()\n    return (self._bim.index[(self._n - 1)], self._read_current_marker())", "docstring": "Returns the next marker.\n\nReturns:\ntuple: The marker name as a string and its genotypes as a\n:py:class:`numpy.ndarray`.", "source": "codesearchnet"}
{"code": "def cdnode(self, astr_path):\n    l_absPath = []\n    (b_valid, l_absPath) = self.b_pathInTree(astr_path)\n    if b_valid:\n        self.l_cwd = l_absPath[:]\n        self.snode_current = self.snode_root\n        self.sbranch_current = self.sbranch_root\n        for node in l_absPath[1:]:\n            self.snode_current = self.snode_current.d_nodes[node]\n        self.sbranch_current.dict_branch = self.snode_current.snode_parent.d_nodes\n        return {'status': True, 'path': self.l_cwd}\n    return {'status': False, 'path': []}", "docstring": "Change working node to astr_path.\n\nThe path is converted to a list, split on '/'. By performing a 'cd'\nall parent and derived nodes need to be updated relative to\nnew location.\n\nArgs:\nastr_path (string): The path to cd to.\n\nReturns:\n{\"status\" : True/False , \"path\": l_cwd -- the path as list}", "source": "codesearchnet"}
{"code": "def delete_vnet(access_token, subscription_id, resource_group, name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/virtualNetworks/', name, '?api-version=', NETWORK_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete a virtual network.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nname (str): Name of the VNet.\n\nReturns:\nHTTP response. VNet JSON body.", "source": "codesearchnet"}
{"code": "def next_moments_operating_on(self, qubits: Iterable[ops.Qid], start_moment_index: int=0) -> Dict[(ops.Qid, int)]:\n    next_moments = {}\n    for q in qubits:\n        next_moment = self.next_moment_operating_on([q], start_moment_index)\n        next_moments[q] = (len(self._moments) if (next_moment is None) else next_moment)\n    return next_moments", "docstring": "Finds the index of the next moment that touches each qubit.\n\nArgs:\nqubits: The qubits to find the next moments acting on.\nstart_moment_index: The starting point of the search.\n\nReturns:\nThe index of the next moment that touches each qubit. If there\nis no such moment, the next moment is specified as the number of\nmoments in the circuit. Equivalently, can be characterized as one\nplus the index of the last moment after start_moment_index\n(inclusive) that does *not* act on a given qubit.", "source": "codesearchnet"}
{"code": "def rename(self, container, name):\n    url = self._url('/containers/{0}/rename', container)\n    params = {'name': name}\n    res = self._post(url, params=params)\n    self._raise_for_status(res)", "docstring": "Rename a container. Similar to the ``docker rename`` command.\n\nArgs:\ncontainer (str): ID of the container to rename\nname (str): New name for the container\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def find_faces(self, image, draw_box=False):\n    frame_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n    faces = self.cascade.detectMultiScale(frame_gray, scaleFactor=1.3, minNeighbors=5, minSize=(50, 50), flags=0)\n    if draw_box:\n        for (x, y, w, h) in faces:\n            cv2.rectangle(image, (x, y), ((x + w), (y + h)), (0, 255, 0), 2)\n    return faces", "docstring": "Uses a haarcascade to detect faces inside an image.\n\nArgs:\nimage: The image.\ndraw_box: If True, the image will be marked with a rectangle.\n\nReturn:\nThe faces as returned by OpenCV's detectMultiScale method for\ncascades.", "source": "codesearchnet"}
{"code": "def plot_thermodynamic_properties(self, tmin, tmax, ntemp, ylim=None, **kwargs):\n    temperatures = np.linspace(tmin, tmax, ntemp)\n    mol = ('' if self.structure else '-c')\n    fig = self._plot_thermo(self.dos.cv, temperatures, ylabel='Thermodynamic properties', ylim=ylim, label='$C_v$ (J/K/mol{})'.format(mol), **kwargs)\n    self._plot_thermo(self.dos.entropy, temperatures, ylim=ylim, ax=fig.axes[0], label='$S$ (J/K/mol{})'.format(mol), **kwargs)\n    self._plot_thermo(self.dos.internal_energy, temperatures, ylim=ylim, ax=fig.axes[0], factor=0.001, label='$\\\\Delta E$ (kJ/K/mol{})'.format(mol), **kwargs)\n    self._plot_thermo(self.dos.helmholtz_free_energy, temperatures, ylim=ylim, ax=fig.axes[0], factor=0.001, label='$\\\\Delta F$ (kJ/K/mol{})'.format(mol), **kwargs)\n    fig.axes[0].legend(loc='best')\n    return fig", "docstring": "Plots all the thermodynamic properties in a temperature range.\n\nArgs:\ntmin: minimum temperature\ntmax: maximum temperature\nntemp: number of steps\nylim: tuple specifying the y-axis limits.\nkwargs: kwargs passed to the matplotlib function 'plot'.\nReturns:\nmatplotlib figure", "source": "codesearchnet"}
{"code": "def get_item(self, name, bootstrap=False):\n    for item in self._get_items(bootstrap):\n        if (item.name == name):\n            return item\n    return None", "docstring": "Get a particular item in the specification.\n\nArgs:\nname (str): The name of the item to retrieve.\nbootstrap (bool): Only search bootstrap items\n\nReturns (YapconfItem):\nA YapconfItem if it is found, None otherwise.", "source": "codesearchnet"}
{"code": "def gradient_summaries(grad_vars, groups=None, scope='gradients'):\n    groups = (groups or {'all': '.*'})\n    grouped = collections.defaultdict(list)\n    for (grad, var) in grad_vars:\n        if (grad is None):\n            continue\n        for (name, pattern) in groups.items():\n            if re.match(pattern, var.name):\n                name = re.sub(pattern, name, var.name)\n                grouped[name].append(grad)\n    for name in groups:\n        if (name not in grouped):\n            tf.logging.warn(\"No variables matching '{}' group.\".format(name))\n    summaries = []\n    for (name, grads) in grouped.items():\n        grads = [tf.reshape(grad, [(- 1)]) for grad in grads]\n        grads = tf.concat(grads, 0)\n        summaries.append(tf.summary.histogram(((scope + '/') + name), grads))\n    return tf.summary.merge(summaries)", "docstring": "Create histogram summaries of the gradient.\n\nSummaries can be grouped via regexes matching variables names.\n\nArgs:\ngrad_vars: List of (gradient, variable) tuples as returned by optimizers.\ngroups: Mapping of name to regex for grouping summaries.\nscope: Name scope for this operation.\n\nReturns:\nSummary tensor.", "source": "codesearchnet"}
{"code": "def process_messages(self, max_messages=10000):\n        \n        subscribe_clients = [self.primary_subscribe_client]\n        for subscribe_client in subscribe_clients:\n            for _ in range(max_messages):\n                message = subscribe_client.get_message()\n                if message is None:\n                    \n                    break\n\n                \n                channel = message[\"channel\"]\n                data = message[\"data\"]\n\n                \n                if channel == ray.gcs_utils.XRAY_HEARTBEAT_BATCH_CHANNEL:\n                    \n                    message_handler = self.xray_heartbeat_batch_handler\n                elif channel == ray.gcs_utils.XRAY_DRIVER_CHANNEL:\n                    \n                    message_handler = self.xray_driver_removed_handler\n                else:\n                    raise Exception(\"This code should be unreachable.\")\n\n                \n                message_handler(channel, data)", "docstring": "Process all messages ready in the subscription channels.\n\nThis reads messages from the subscription channels and calls the\nappropriate handlers until there are no messages left.\n\nArgs:\nmax_messages: The maximum number of messages to process before\nreturning.", "source": "juraj-google-style"}
{"code": "def signHostCsr(self, xcsr, signas, outp=None, sans=None):\n    pkey = xcsr.get_pubkey()\n    name = xcsr.get_subject().CN\n    return self.genHostCert(name, csr=pkey, signas=signas, outp=outp, sans=sans)", "docstring": "Signs a host CSR with a CA keypair.\n\nArgs:\ncert (OpenSSL.crypto.X509Req): The certificate signing request.\nsignas (str): The CA keypair name to sign the CSR with.\noutp (synapse.lib.output.Output): The output buffer.\nsans (list): List of subject alternative names.\n\nExamples:\nSign a host key with the CA \"myca\":\n\ncdir.signHostCsr(mycsr, 'myca')\n\nReturns:\n((OpenSSL.crypto.PKey, OpenSSL.crypto.X509)):  Tuple containing the public key and certificate objects.", "source": "codesearchnet"}
{"code": "def cctop_check_status(jobid):\n    status = 'http:\n    status_text = requests.post(status)\n    return status_text.text", "docstring": "Check the status of a CCTOP job ID.\n\nArgs:\njobid (str): Job ID obtained when job was submitted\n\nReturns:\nstr: 'Finished' if the job is finished and results ready to be downloaded, 'Running' if still in progress,\n'Invalid' for any errors.", "source": "codesearchnet"}
{"code": "def _add_task(cls,\n                worker_task,\n                mapreduce_spec,\n                queue_name):\n    \n    if not _run_task_hook(mapreduce_spec.get_hooks(),\n                          \"enqueue_worker_task\",\n                          worker_task,\n                          queue_name):\n      try:\n        \n        \n        worker_task.add(queue_name)\n      except (taskqueue.TombstonedTaskError,\n              taskqueue.TaskAlreadyExistsError), e:\n        logging.warning(\"Task %r already exists. %s: %s\",\n                        worker_task.name,\n                        e.__class__,\n                        e)", "docstring": "Schedule slice scanning by adding it to the task queue.\n\nArgs:\nworker_task: a model.HugeTask task for slice. This is NOT a taskqueue\ntask.\nmapreduce_spec: an instance of model.MapreduceSpec.\nqueue_name: Optional queue to run on; uses the current queue of\nexecution or the default queue if unspecified.", "source": "juraj-google-style"}
{"code": "def write_files(dos, pdos, prefix=None, directory=None, zero_to_efermi=True):\n    if (len(dos.densities) == 1):\n        sdata = [[Spin.up, 1, '']]\n    else:\n        sdata = [[Spin.up, 1, '(up)'], [Spin.down, (- 1), '(down)']]\n    header = ['energy']\n    eners = ((dos.energies - dos.efermi) if zero_to_efermi else dos.energies)\n    tdos_data = [eners]\n    for (spin, sign, label) in sdata:\n        header.append('dos{}'.format(label))\n        tdos_data.append((dos.densities[spin] * sign))\n    tdos_data = np.stack(tdos_data, axis=1)\n    filename = ('{}_total_dos.dat'.format(prefix) if prefix else 'total_dos.dat')\n    if directory:\n        filename = os.path.join(directory, filename)\n    np.savetxt(filename, tdos_data, header=' '.join(header))\n    spin = len(dos.densities)\n    for (el, el_pdos) in pdos.items():\n        header = ['energy']\n        pdos_data = [eners]\n        for orb in sort_orbitals(el_pdos):\n            for (spin, sign, label) in sdata:\n                header.append('{}{}'.format(orb, label))\n                pdos_data.append((el_pdos[orb].densities[spin] * sign))\n        pdos_data = np.stack(pdos_data, axis=1)\n        if prefix:\n            filename = '{}_{}_dos.dat'.format(prefix, el)\n        else:\n            filename = '{}_dos.dat'.format(el)\n        if directory:\n            filename = os.path.join(directory, filename)\n        np.savetxt(filename, pdos_data, header=' '.join(header))", "docstring": "Write the density of states data to disk.\n\nArgs:\ndos (:obj:`~pymatgen.electronic_structure.dos.Dos` or \\\n:obj:`~pymatgen.electronic_structure.dos.CompleteDos`): The total\ndensity of states.\npdos (dict): The projected density of states. Formatted as a\n:obj:`dict` of :obj:`dict` mapping the elements and their orbitals\nto :obj:`~pymatgen.electronic_structure.dos.Dos` objects. For\nexample::\n\n{\n'Bi': {'s': Dos, 'p': Dos},\n'S': {'s': Dos}\n}\n\nprefix (:obj:`str`, optional): A prefix for file names.\ndirectory (:obj:`str`, optional): The directory in which to save files.\nzero_to_efermi (:obj:`bool`, optional): Normalise the energy such\nthat the Fermi level is set as 0 eV.", "source": "codesearchnet"}
{"code": "def has_apical_dendrite(neuron, min_number=1, treefun=_read_neurite_type):\n    \n    types = [treefun(n) for n in neuron.neurites]\n    return CheckResult(types.count(NeuriteType.apical_dendrite) >= min_number)", "docstring": "Check if a neuron has apical dendrites\n\nArguments:\nneuron(Neuron): The neuron object to test\nmin_number: minimum number of apical dendrites required\ntreefun: Optional function to calculate the tree type of neuron's\nneurites\n\nReturns:\nCheckResult with result", "source": "juraj-google-style"}
{"code": "def cursor_event(self, x, y, dx, dy):\n    self.sys_camera.rot_state(x, y)", "docstring": "The standard mouse movement event method.\nCan be overriden to add new functionality.\nBy default this feeds the system camera with new values.\n\nArgs:\nx: The current mouse x position\ny: The current mouse y position\ndx: Delta x postion (x position difference from the previous event)\ndy: Delta y postion (y position difference from the previous event)", "source": "codesearchnet"}
{"code": "def _get_default_retry_params():\n    default = getattr(_thread_local_settings, 'default_retry_params', None)\n    if ((default is None) or (not default.belong_to_current_request())):\n        return RetryParams()\n    else:\n        return copy.copy(default)", "docstring": "Get default RetryParams for current request and current thread.\n\nReturns:\nA new instance of the default RetryParams.", "source": "codesearchnet"}
{"code": "def all_reduce_ring(x, parallelism, maybe_reduce=True, use_bfloat16=True):\n    if (parallelism.n == 1):\n        return x\n    if maybe_reduce:\n        original_parallelism = parallelism\n        (parallelism, x) = reduce_by_device(parallelism, x, tf.add_n)\n    if (parallelism.n == 1):\n        y = x\n    else:\n        x_flat = parallelism(tf.reshape, x, ([[(- 1)]] * parallelism.n))\n        x_split = parallelism(common_layers.approximate_split, x_flat, parallelism.n, 0)\n\n        def _step(source_replica, target_replica, x_split, op='plus_eq'):\n            'Helper function - one step of summing or copying.\\n\\n      If op == \"plus_eq\", then adds source_replica into target_replica\\n      If op == \"copy\", then copies source_replica onto target_replica\\n\\n      These operations happen for all shards.  The replica numbers are offset\\n      by the shard numbers to keep all physical links busy.\\n\\n      Args:\\n        source_replica: an integer\\n        target_replica: an integer\\n        x_split: a list of lists of tensors\\n        op: a string\\n      '\n            for shard in range(parallelism.n):\n                source_device = ((shard + source_replica) % parallelism.n)\n                target_device = ((shard + target_replica) % parallelism.n)\n                source = x_split[source_device][shard]\n                if use_bfloat16:\n                    with tf.device(parallelism.devices[source_device]):\n                        source = tf.to_bfloat16(source)\n                with tf.device(parallelism.devices[target_device]):\n                    source = tf.to_float(source)\n                    if (op == 'plus_eq'):\n                        x_split[target_device][shard] += source\n                    else:\n                        assert (op == 'copy')\n                        x_split[target_device][shard] = tf.identity(source)\n        center = (parallelism.n \n        for i in reversed(range(center, (parallelism.n - 1))):\n            _step((i + 1), i, x_split, op='plus_eq')\n        for i in range(center):\n            _step(i, (i + 1), x_split, op='plus_eq')\n        for i in range(center, (parallelism.n - 1)):\n            _step(i, (i + 1), x_split, op='copy')\n        for i in reversed(range(center)):\n            _step((i + 1), i, x_split, op='copy')\n        x_concat = parallelism(tf.concat, x_split, 0)\n        y = parallelism(common_layers.reshape_like_all_dims, x_concat, x)\n    if maybe_reduce:\n        y = expand_by_device(original_parallelism, parallelism, y)\n    return y", "docstring": "Compute the sum of all Tensors and put the result everywhere.\n\nAssumes that the devices are connected in a ring.\n\nArgs:\nx: a list of Tensors with length parallelism.n\nparallelism: a expert_utils.Parallelism object.\nmaybe_reduce: a boolean - first reduce per device.\nuse_bfloat16: a boolean - saves bandwidth but loses precision\n\nReturns:\na list of Tensors with length parallelism.n", "source": "codesearchnet"}
{"code": "def run(self, env: env_tools.PreparedEnv, verbose: bool, previous_failures: Set['Check']) -> CheckResult:\n    if previous_failures.intersection(self.dependencies):\n        print(shell_tools.highlight(('Skipped ' + self.command_line_switch()), shell_tools.YELLOW))\n        return CheckResult(self, False, 'Skipped due to dependency failing.', None)\n    print(shell_tools.highlight(('Running ' + self.command_line_switch()), shell_tools.GREEN))\n    try:\n        (success, message) = self.perform_check(env, verbose=verbose)\n        result = CheckResult(self, success, message, None)\n    except Exception as ex:\n        result = CheckResult(self, False, 'Unexpected error.', ex)\n    print(shell_tools.highlight(('Finished ' + self.command_line_switch()), (shell_tools.GREEN if result.success else shell_tools.RED)))\n    if verbose:\n        print(result)\n    return result", "docstring": "Evaluates this check.\n\nArgs:\nenv: The prepared python environment to run the check in.\nverbose: When set, more progress output is produced.\nprevious_failures: Checks that have already run and failed.\n\nReturns:\nA CheckResult instance.", "source": "codesearchnet"}
{"code": "def tag(self, name, action='ADD', params=None):\n        \n        if not name:\n            self._tcex.handle_error(925, ['name', 'tag', 'name', 'name', name])\n\n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        if action in ['GET', 'ADD', 'DELETE']:\n            return self.tc_requests.tag(\n                self.api_type,\n                self.api_sub_type,\n                self.unique_id,\n                name,\n                action=action,\n                owner=self.owner,\n                params=params,\n            )\n        self._tcex.handle_error(925, ['action', 'tag', 'action', 'action', action])\n        return None", "docstring": "Adds a tag to a Indicator/Group/Victim/Security Label\nArgs:\nparams:\naction:\nname: The name of the tag", "source": "juraj-google-style"}
{"code": "def connect_with(self, wire_char):\n        \n\n        if len([qbit for qbit in self.qubit_layer if qbit is not None]) == 1:\n            \n            return\n\n        for label, affected_bits in self.connections:\n\n            if not affected_bits:\n                continue\n\n            affected_bits[0].connect(wire_char, ['bot'])\n            for affected_bit in affected_bits[1:-1]:\n                affected_bit.connect(wire_char, ['bot', 'top'])\n\n            affected_bits[-1].connect(wire_char, ['top'], label)\n\n            if label:\n                for affected_bit in affected_bits:\n                    affected_bit.right_fill = len(label) + len(affected_bit.mid)", "docstring": "Connects the elements in the layer using wire_char.\nArgs:\nwire_char (char): For example '║' or '│'.", "source": "juraj-google-style"}
{"code": "def step(self, action, blocking=True):\n    \n    promise = self.call('step', action)\n    if blocking:\n      return promise()\n    else:\n      return promise", "docstring": "Step the environment.\n\nArgs:\naction: The action to apply to the environment.\nblocking: Whether to wait for the result.\n\nReturns:\nTransition tuple when blocking, otherwise callable that returns the\ntransition tuple.", "source": "juraj-google-style"}
{"code": "def random_string():\n    numpy_state = np.random.get_state()\n    np.random.seed(None)\n    random_id = np.random.bytes(ray_constants.ID_SIZE)\n    np.random.set_state(numpy_state)\n    return random_id", "docstring": "Generate a random string to use as an ID.\n\nNote that users may seed numpy, which could cause this function to generate\nduplicate IDs. Therefore, we need to seed numpy ourselves, but we can't\ninterfere with the state of the user's random number generator, so we\nextract the state of the random number generator and reset it after we are\ndone.\n\nTODO(rkn): If we want to later guarantee that these are generated in a\ndeterministic manner, then we will need to make some changes here.\n\nReturns:\nA random byte string of length ray_constants.ID_SIZE.", "source": "codesearchnet"}
{"code": "def verify_task_in_task_graph(task_link, graph_defn, level=logging.CRITICAL):\n    ignore_keys = ('created', 'deadline', 'expires', 'dependencies', 'schedulerId')\n    errors = []\n    runtime_defn = deepcopy(task_link.task)\n    bad_deps = (set(runtime_defn['dependencies']) - set(graph_defn['task']['dependencies']))\n    bad_deps = (bad_deps - {task_link.decision_task_id})\n    if bad_deps:\n        errors.append(\"{} {} dependencies don't line up!\\n{}\".format(task_link.name, task_link.task_id, bad_deps))\n    runtime_defn['payload'] = _take_expires_out_from_artifacts_in_payload(runtime_defn['payload'])\n    graph_defn['task']['payload'] = _take_expires_out_from_artifacts_in_payload(graph_defn['task']['payload'])\n    for (key, value) in graph_defn['task'].items():\n        if (key in ignore_keys):\n            continue\n        if (value != runtime_defn[key]):\n            errors.append('{} {} {} differs!\\n graph: {}\\n task: {}'.format(task_link.name, task_link.task_id, key, format_json(value), format_json(runtime_defn[key])))\n    raise_on_errors(errors, level=level)", "docstring": "Verify a given task_link's task against a given graph task definition.\n\nThis is a helper function for ``verify_link_in_task_graph``; this is split\nout so we can call it multiple times when we fuzzy match.\n\nArgs:\ntask_link (LinkOfTrust): the link to try to match\ngraph_defn (dict): the task definition from the task-graph.json to match\n``task_link`` against\nlevel (int, optional): the logging level to use on errors. Defaults to logging.CRITICAL\n\nRaises:\nCoTError: on failure", "source": "codesearchnet"}
{"code": "def _validate_state_root(self, state_root):\n    if (self._state_root_regex.fullmatch(state_root) is None):\n        LOGGER.debug('Invalid state root: %s', state_root)\n        raise _ResponseFailed(self._status.INVALID_ROOT)", "docstring": "Validates a state root, raising a ResponseFailed error if invalid.\n\nArgs:\nstate_root (str): The state_root to validate\n\nRaises:\nResponseFailed: The state_root was invalid, and a status of\nINVALID_ROOT will be sent with the response.", "source": "codesearchnet"}
{"code": "def _get_node_dependencies(self, proto):\n    dependencies = {ref.local_name: ref.node_id for ref in proto.dependencies}\n    kind = proto.WhichOneof('kind')\n    if kind == 'function':\n        concrete_functions = proto.function.concrete_functions\n        for fn_name in concrete_functions:\n            for bound_input in self._proto.concrete_functions[fn_name].bound_inputs:\n                dependencies[bound_input] = bound_input\n    elif kind == 'bare_concrete_function':\n        fn_name = proto.bare_concrete_function.concrete_function_name\n        for bound_input in self._proto.concrete_functions[fn_name].bound_inputs:\n            dependencies[bound_input] = bound_input\n    elif kind == 'resource':\n        for child in proto.children:\n            if child.local_name == '_create_resource':\n                dependencies['_create_resource'] = child.node_id\n    return dependencies", "docstring": "Returns a dictionary of all dependencies of an object.\n\nArgs:\nproto: A SavedObject proto.\n\nReturns:\nDict mapping string dependency name *or* int node id to the node id.\nThe int node id key is used for mapping function captures.", "source": "github-repos"}
{"code": "def mnist_generator(tmp_dir, training, how_many, start_from=0):\n  \n  _get_mnist(tmp_dir)\n  d = _MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME\n  l = _MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME\n  return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from)", "docstring": "Image generator for MNIST.\n\nArgs:\ntmp_dir: path to temporary storage directory.\ntraining: a Boolean; if true, we use the train set, otherwise the test set.\nhow_many: how many images and labels to generate.\nstart_from: from which image to start.\n\nReturns:\nAn instance of image_generator that produces MNIST images.", "source": "juraj-google-style"}
{"code": "def _get_function_inputs(f, src_kwargs):\n    if hasattr(f, '_func'):\n        f = f._func\n    try:\n        argspec = inspect.getfullargspec(f)\n    except AttributeError:\n        argspec = inspect.getargspec(f)\n    fkwargs = {k: v for (k, v) in six.iteritems(src_kwargs) if (k in argspec.args)}\n    return fkwargs", "docstring": "Filters inputs to be compatible with function `f`'s signature.\n\nArgs:\nf: Function according to whose input signature we filter arguments.\nsrc_kwargs: Keyword arguments to filter according to `f`.\n\nReturns:\nkwargs: Dict of key-value pairs in `src_kwargs` which exist in `f`'s\nsignature.", "source": "codesearchnet"}
{"code": "def to_pil_image(self, image, rescale=None):\n    self._ensure_format_supported(image)\n    if is_torch_tensor(image):\n        image = image.numpy()\n    if isinstance(image, np.ndarray):\n        if rescale is None:\n            rescale = isinstance(image.flat[0], np.floating)\n        if image.ndim == 3 and image.shape[0] in [1, 3]:\n            image = image.transpose(1, 2, 0)\n        if rescale:\n            image = image * 255\n        image = image.astype(np.uint8)\n        return PIL.Image.fromarray(image)\n    return image", "docstring": "Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if\nneeded.\n\nArgs:\nimage (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor`):\nThe image to convert to the PIL Image format.\nrescale (`bool`, *optional*):\nWhether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will\ndefault to `True` if the image type is a floating type, `False` otherwise.", "source": "github-repos"}
{"code": "def index_filename_rel_other_index(self, other: str) -> str:\n        \n        return relpath(self.index_filename, start=dirname(other))", "docstring": "Returns the filename of this index, relative to the director of another\nindex. (For inserting a reference to this index into ``other``.)\n\nArgs:\nother: the other index\n\nReturns:\nrelative filename of our index", "source": "juraj-google-style"}
{"code": "def delete_device(self, auth_body, device_id):\n        \n        content = {\n            \"auth\": auth_body\n        }\n        return self._send(\"DELETE\", \"/devices/%s\" % device_id, content=content)", "docstring": "Deletes the given device, and invalidates any access token associated with it.\n\nNOTE: This endpoint uses the User-Interactive Authentication API.\n\nArgs:\nauth_body (dict): Authentication params.\ndevice_id (str): The device ID of the device to delete.", "source": "juraj-google-style"}
{"code": "def __init__(self, columns: list[str], hub_url: str, **kwargs):\n    super().__init__(columns=columns, **kwargs)\n    self.model_uri = hub_url", "docstring": "Embedding config for tensorflow hub models. This config can be used with\nMLTransform to embed image data. Models are loaded using the RunInference\nPTransform with the help of a ModelHandler.\n\nArgs:\ncolumns: The columns containing the images to be embedded.\nhub_url: The url of the tensorflow hub model.\nmin_batch_size: The minimum batch size to be used for inference.\nmax_batch_size: The maximum batch size to be used for inference.\nlarge_model: Whether to share the model across processes.", "source": "github-repos"}
{"code": "def set_default_backend(self, backend_name):\n    if (backend_name not in BACKENDS):\n        raise ValueError(f\"Unknown backend '{backend_name}'.\")\n    self._default_backend = backend_name", "docstring": "Set the default backend of this circuit.\n\nThis setting is only applied for this circuit.\nIf you want to change the default backend of all gates,\nuse `BlueqatGlobalSetting.set_default_backend()`.\n\nAfter set the default backend by this method,\nglobal setting is ignored even if `BlueqatGlobalSetting.set_default_backend()` is called.\nIf you want to use global default setting, call this method with backend_name=None.\n\nArgs:\nbackend_name (str or None): new default backend name.\nIf None is given, global setting is applied.\n\nRaises:\nValueError: If `backend_name` is not registered backend.", "source": "codesearchnet"}
{"code": "def _accept(random_sample: float, cost_diff: float, temp: float) -> Tuple[(bool, float)]:\n    exponent = ((- cost_diff) / temp)\n    if (exponent >= 0.0):\n        return (True, 1.0)\n    else:\n        probability = math.exp(exponent)\n    return ((probability > random_sample), probability)", "docstring": "Calculates probability and draws if solution should be accepted.\n\nBased on exp(-Delta*E/T) formula.\n\nArgs:\nrandom_sample: Uniformly distributed random number in the range [0, 1).\ncost_diff: Cost difference between new and previous solutions.\ntemp: Current temperature.\n\nReturns:\nTuple of boolean and float, with boolean equal to True if solution is\naccepted, and False otherwise. The float value is acceptance\nprobability.", "source": "codesearchnet"}
{"code": "def locked_get(self):\n    credential = self._backend.locked_get(self._key)\n    if (credential is not None):\n        credential.set_store(self)\n    return credential", "docstring": "Retrieves the current credentials from the store.\n\nReturns:\nAn instance of :class:`oauth2client.client.Credentials` or `None`.", "source": "codesearchnet"}
{"code": "def target_encode_plus(self, answer: str, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Optional[Union[bool, str]]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n    padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)\n    return self._target_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)", "docstring": "Prepare a answer string for the model.\n\nArgs:\nanswer `str`:\nCorresponding answer supervision to the queries for training the model.", "source": "github-repos"}
{"code": "def software_breakpoint(self):\n    software_types = [enums.JLinkBreakpoint.SW_RAM, enums.JLinkBreakpoint.SW_FLASH, enums.JLinkBreakpoint.SW]\n    return any(((self.Type & stype) for stype in software_types))", "docstring": "Returns whether this is a software breakpoint.\n\nArgs:\nself (JLinkBreakpointInfo): the ``JLinkBreakpointInfo`` instance\n\nReturns:\n``True`` if the breakpoint is a software breakpoint, otherwise\n``False``.", "source": "codesearchnet"}
{"code": "def get_program_by_title(self, program_title):\n    all_programs = self._load_data(self.PROGRAMS_ENDPOINT, default=[])\n    matching_programs = [program for program in all_programs if (program.get('title') == program_title)]\n    if (len(matching_programs) > 1):\n        raise MultipleProgramMatchError(len(matching_programs))\n    elif (len(matching_programs) == 1):\n        return matching_programs[0]\n    else:\n        return None", "docstring": "Return single program by name, or None if not found.\n\nArguments:\nprogram_title(string): Program title as seen by students and in Course Catalog Admin\n\nReturns:\ndict: Program data provided by Course Catalog API", "source": "codesearchnet"}
{"code": "def to_element(self):\n    if (not self.protocol_info):\n        raise DIDLMetadataError('Could not create Element for thisresource:protocolInfo not set (required).')\n    root = XML.Element('res')\n    root.attrib['protocolInfo'] = self.protocol_info\n    if (self.import_uri is not None):\n        root.attrib['importUri'] = self.import_uri\n    if (self.size is not None):\n        root.attrib['size'] = str(self.size)\n    if (self.duration is not None):\n        root.attrib['duration'] = self.duration\n    if (self.bitrate is not None):\n        root.attrib['bitrate'] = str(self.bitrate)\n    if (self.sample_frequency is not None):\n        root.attrib['sampleFrequency'] = str(self.sample_frequency)\n    if (self.bits_per_sample is not None):\n        root.attrib['bitsPerSample'] = str(self.bits_per_sample)\n    if (self.nr_audio_channels is not None):\n        root.attrib['nrAudioChannels'] = str(self.nr_audio_channels)\n    if (self.resolution is not None):\n        root.attrib['resolution'] = self.resolution\n    if (self.color_depth is not None):\n        root.attrib['colorDepth'] = str(self.color_depth)\n    if (self.protection is not None):\n        root.attrib['protection'] = self.protection\n    root.text = self.uri\n    return root", "docstring": "Return an ElementTree Element based on this resource.\n\nReturns:\n~xml.etree.ElementTree.Element: an Element.", "source": "codesearchnet"}
{"code": "def _findSourceLine(self, annotated_source, line_number):\n    index = None\n    for i, line in enumerate(annotated_source.lines):\n        if line.startswith('L%d ' % line_number):\n            index = i\n            break\n    return index", "docstring": "Find line of given line number in annotated source.\n\nArgs:\nannotated_source: (debugger_cli_common.RichTextLines) the annotated source\nline_number: (int) 1-based line number\n\nReturns:\n(int) If line_number is found, 0-based line index in\nannotated_source.lines. Otherwise, None.", "source": "github-repos"}
{"code": "def loadfn(fname):\n    if ((fnmatch(fname, '*POSCAR*') or fnmatch(fname, '*CONTCAR*') or ('.cif' in fname.lower())) or fnmatch(fname, '*.vasp')):\n        return Structure.from_file(fname)\n    elif fnmatch(fname, '*vasprun*'):\n        from pymatgen.io.vasp import Vasprun\n        return Vasprun(fname)\n    elif fnmatch(fname, '*.json*'):\n        from monty.serialization import loadfn\n        return loadfn(fname)", "docstring": "Convenience method to perform quick loading of data from a filename. The\ntype of object returned depends the file type.\n\nArgs:\nfname (string): A filename.\n\nReturns:\nNote that fname is matched using unix-style, i.e., fnmatch.\n(Structure) if *POSCAR*/*CONTCAR*/*.cif\n(Vasprun) *vasprun*\n(obj) if *json* (passthrough to monty.serialization.loadfn)", "source": "codesearchnet"}
{"code": "def create_extended_model(model, db_penalty=None, ex_penalty=None, tp_penalty=None, penalties=None):\n    model_extended = model.create_metabolic_model()\n    extra_compartment = model.extracellular_compartment\n    compartment_ids = set((c.id for c in model.compartments))\n    if (len(compartment_ids) > 0):\n        logger.info('Using all database reactions in compartments: {}...'.format(', '.join(('{}'.format(c) for c in compartment_ids))))\n        db_added = add_all_database_reactions(model_extended, compartment_ids)\n    else:\n        logger.warning('No compartments specified in the model; database reactions will not be used! Add compartment specification to model to include database reactions for those compartments.')\n        db_added = set()\n    logger.info('Using artificial exchange reactions for compartment: {}...'.format(extra_compartment))\n    ex_added = add_all_exchange_reactions(model_extended, extra_compartment, allow_duplicates=True)\n    boundaries = model.compartment_boundaries\n    if (len(boundaries) > 0):\n        logger.info('Using artificial transport reactions for the compartment boundaries: {}...'.format('; '.join(('{}<->{}'.format(c1, c2) for (c1, c2) in boundaries))))\n        tp_added = add_all_transport_reactions(model_extended, boundaries, allow_duplicates=True)\n    else:\n        logger.warning('No compartment boundaries specified in the model; artificial transport reactions will not be used!')\n        tp_added = set()\n    weights = {}\n    if (db_penalty is not None):\n        weights.update(((rxnid, db_penalty) for rxnid in db_added))\n    if (tp_penalty is not None):\n        weights.update(((rxnid, tp_penalty) for rxnid in tp_added))\n    if (ex_penalty is not None):\n        weights.update(((rxnid, ex_penalty) for rxnid in ex_added))\n    if (penalties is not None):\n        for (rxnid, penalty) in iteritems(penalties):\n            weights[rxnid] = penalty\n    return (model_extended, weights)", "docstring": "Create an extended model for gap-filling.\n\nCreate a :class:`psamm.metabolicmodel.MetabolicModel` with\nall reactions added (the reaction database in the model is taken\nto be the universal database) and also with artificial exchange\nand transport reactions added. Return the extended\n:class:`psamm.metabolicmodel.MetabolicModel`\nand a weight dictionary for added reactions in that model.\n\nArgs:\nmodel: :class:`psamm.datasource.native.NativeModel`.\ndb_penalty: penalty score for database reactions, default is `None`.\nex_penalty: penalty score for exchange reactions, default is `None`.\ntb_penalty: penalty score for transport reactions, default is `None`.\npenalties: a dictionary of penalty scores for database reactions.", "source": "codesearchnet"}
{"code": "def query(self, coords):\n        \n        \n        gal = coords\n        l = gal.l.deg\n        b = gal.b.deg\n\n        \n        scalar_input = not hasattr(l, '__len__')\n        if scalar_input:\n            l = np.array([l])\n            b = np.array([b])\n\n        \n        ebv = np.empty(l.shape, dtype='f8')\n        ebv[:] = np.nan\n\n        \n        idx = (b >= 65.) & (b <= 90.)\n        ebv[idx] = self._lb2ebv_northcap(l[idx], b[idx])\n\n        \n        idx = (b <= -65.) & (b >= -90.)\n        ebv[idx] = self._lb2ebv_southcap(l[idx], b[idx])\n\n        \n        idx = (b < 65.) & (b >= 10.)\n        ebv[idx] = self._lb2ebv_midnorth(l[idx], b[idx])\n\n        \n        idx = (b > -65.) & (b <= -10.)\n        ebv[idx] = self._lb2ebv_midsouth(l[idx], b[idx])\n\n        if scalar_input:\n            ebv = ebv[0]\n\n        return ebv", "docstring": "Returns E(B-V) at the specified location(s) on the sky.\n\nArgs:\ncoords (`astropy.coordinates.SkyCoord`): The coordinates to query.\n\nReturns:\nA float array of reddening, in units of E(B-V), at the given\ncoordinates. The shape of the output is the same as the shape of the\ncoordinates stored by `coords`.", "source": "juraj-google-style"}
{"code": "def tabulate_filetypes_rest(attrnames=None, header=None, flag_wrap_description=True, description_width=40, flag_leaf=True):\n    infos = get_filetypes_info(editor_quote='``', flag_leaf=flag_leaf)\n    (rows, header) = filetypes_info_to_rows_header(infos, attrnames, header, flag_wrap_description, description_width)\n    ret = a99.rest_table(rows, header)\n    return ret", "docstring": "Generates a reST multirow table\n\nArgs:\nattrnames: list of attribute names (keys of FILE_TYPE_INFO_ATTRS).\nDefaults to all attributes\nheader: list of strings containing headers. If not passed, uses default names\nflag_wrap_description: whether to wrap the description text\ndescription_width: width to wrap the description text (effective only if\nflag_wrap_description is True)\nflag_leaf: returns only classes that do not have subclasses\n(\"leaf\" nodes as in a class tree graph)", "source": "codesearchnet"}
{"code": "def swo_set_host_buffer_size(self, buf_size):\n    buf = ctypes.c_uint32(buf_size)\n    res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.SET_BUFFERSIZE_HOST, ctypes.byref(buf))\n    if (res < 0):\n        raise errors.JLinkException(res)\n    return None", "docstring": "Sets the size of the buffer used by the host to collect SWO data.\n\nArgs:\nself (JLink): the ``JLink`` instance\nbuf_size (int): the new size of the host buffer\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error", "source": "codesearchnet"}
{"code": "def peek(init, exposes, debug=False):\n\n    def _peek(store, container, _stack=None):\n        args = [store.peek(objname, container, _stack=_stack) for objname in exposes]\n        if debug:\n            print(args)\n        return init(*args)\n    return _peek", "docstring": "Default deserializer factory.\n\nArguments:\n\ninit (callable): type constructor.\n\nexposes (iterable): attributes to be peeked and passed to `init`.\n\nReturns:\n\ncallable: deserializer (`peek` routine).", "source": "codesearchnet"}
{"code": "def authenticate(self, email=None, password=None, source=None):\n    from gdata.service import BadAuthentication\n    Api.yt_service.email = (email if email else settings.YOUTUBE_AUTH_EMAIL)\n    Api.yt_service.password = (password if password else settings.YOUTUBE_AUTH_PASSWORD)\n    Api.yt_service.source = (source if source else settings.YOUTUBE_CLIENT_ID)\n    try:\n        Api.yt_service.ProgrammaticLogin()\n        self.authenticated = True\n    except BadAuthentication:\n        raise ApiError(_('Incorrect username or password'))", "docstring": "Authenticates the user and sets the GData Auth token.\nAll params are optional, if not set, we will use the ones on the settings, if no settings found, raises AttributeError\nparams are email, password and source. Source is the app id\n\nRaises:\ngdata.service.exceptions.BadAuthentication", "source": "codesearchnet"}
{"code": "def export_mt_variants(variants, sample_id):\n    \n    document_lines = []\n    for variant in variants:\n        line = []\n        position = variant.get('position')\n        change = '>'.join([variant.get('reference'),variant.get('alternative')])\n        line.append(position)\n        line.append(change)\n        line.append(str(position)+change)\n        genes = []\n        prot_effect = []\n        for gene in variant.get('genes'):\n            genes.append(gene.get('hgnc_symbol',''))\n            for transcript in gene.get('transcripts'):\n                if transcript.get('is_canonical') and transcript.get('protein_sequence_name'):\n                    prot_effect.append(urllib.parse.unquote(transcript.get('protein_sequence_name')))\n        line.append(','.join(prot_effect))\n        line.append(','.join(genes))\n        ref_ad = ''\n        alt_ad = ''\n        for sample in variant['samples']:\n            if sample.get('sample_id') == sample_id:\n                ref_ad = sample['allele_depths'][0]\n                alt_ad = sample['allele_depths'][1]\n        line.append(ref_ad)\n        line.append(alt_ad)\n        document_lines.append(line)\n    return document_lines", "docstring": "Export mitochondrial variants for a case to create a MT excel report\n\nArgs:\nvariants(list): all MT variants for a case, sorted by position\nsample_id(str) : the id of a sample within the case\n\nReturns:\ndocument_lines(list): list of lines to include in the document", "source": "juraj-google-style"}
{"code": "def step(self, actions):\n    \n\n    observations, raw_rewards, dones, infos = self._step(actions)\n\n    \n    raw_rewards = raw_rewards.astype(np.float32)\n    processed_rewards = self.process_rewards(raw_rewards)\n\n    \n    processed_observations = self.process_observations(observations)\n\n    \n    self.trajectories.step(processed_observations, raw_rewards,\n                           processed_rewards, dones, actions)\n\n    return processed_observations, processed_rewards, dones, infos", "docstring": "Takes a step in all environments.\n\nSubclasses should override _step to do the actual reset if something other\nthan the default implementation is desired.\n\nArgs:\nactions: Batch of actions.\n\nReturns:\n(preprocessed_observations, processed_rewards, dones, infos).", "source": "juraj-google-style"}
{"code": "def set_local_interface(self, value=None, default=False, disable=False):\n    return self._configure_mlag('local-interface', value, default, disable)", "docstring": "Configures the mlag local-interface value\n\nArgs:\nvalue (str): The value to configure the local-interface\ndefault (bool): Configures the local-interface using the\ndefault keyword\ndisable (bool): Negates the local-interface using the no keyword\n\nReturns:\nbool: Returns True if the commands complete successfully", "source": "codesearchnet"}
{"code": "def getSlicesForText(self, body, getFingerprint=None, startIndex=0, maxResults=10):\n        \n        return self._text.getSlicesForText(self._retina, body, getFingerprint, startIndex, maxResults)", "docstring": "Get a list of slices of the text\nArgs:\nbody, str: The text to be evaluated (required)\ngetFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)\nstartIndex, int: The start-index for pagination (optional)\nmaxResults, int: Max results per page (optional)\nReturns:\nlist of Text\nRaises:\nCorticalioException: if the request was not successful", "source": "juraj-google-style"}
{"code": "def __call__(self, parser, namespace, value, option_string=None, **kwargs):\n        \n\n        handle = copen(value, mode=self.mode, **self.kwargs)\n\n        setattr(namespace, self.dest, handle)", "docstring": "Detects and opens compressed files\n\nArgs:\nparser (ArgumentParser): parser used to generate values\n\nnamespace (Namespace): namespace to set values for\n\nvalue (str): actual value specified by user\n\noption_string (str): argument flag used to call this function\n\n**kwargs (various): optional arguments later passed to the\ncompression algorithm", "source": "juraj-google-style"}
{"code": "def send(self, message):\n    body = {'notificationType': self._notification_type, 'priority': self._priority, 'isOrganization': self._is_organization, 'message': message}\n    if self._recipients:\n        body['recipients'] = self._recipients\n    self._tcex.log.debug('notification body: {}'.format(json.dumps(body)))\n    resource = resource = self._tcex.resource('Notification')\n    resource.http_method = 'POST'\n    resource.body = json.dumps(body)\n    results = resource.request()\n    if (results.get('response').status_code == 200):\n        response = results.get('response').json()\n    elif (results.get('response').status_code == 400):\n        err = 'Failed to send notification ({})'.format(results.get('response').text)\n        self._tcex.log.error(err)\n        response = results.get('response').json()\n    else:\n        err = 'Failed to send notification ({})'.format(results.get('response').text)\n        self._tcex.log.error(err)\n        raise RuntimeError(err)\n    return response", "docstring": "Send our message\n\nArgs:\nmessage (str): The message to be sent.\n\nReturns:\nrequests.models.Response: The response from the request.", "source": "codesearchnet"}
{"code": "def from_filename(filename, require=None):\n    \n    with io.open(filename, 'r', encoding='utf-8') as json_file:\n        data = json.load(json_file)\n        return data, from_dict(data, require=require)", "docstring": "Reads a Google service account JSON file and returns its parsed info.\n\nArgs:\nfilename (str): The path to the service account .json file.\nrequire (Sequence[str]): List of keys required to be present in the\ninfo.\n\nReturns:\nTuple[ Mapping[str, str], google.auth.crypt.Signer ]: The verified\ninfo and a signer instance.", "source": "juraj-google-style"}
{"code": "def add_showcases(self, showcases, showcases_to_check=None):\n    if (showcases_to_check is None):\n        showcases_to_check = self.get_showcases()\n    allshowcasesadded = True\n    for showcase in showcases:\n        if (not self.add_showcase(showcase, showcases_to_check=showcases_to_check)):\n            allshowcasesadded = False\n    return allshowcasesadded", "docstring": "Add dataset to multiple showcases\n\nArgs:\nshowcases (List[Union[Showcase,Dict,str]]): A list of either showcase ids or showcase metadata from Showcase objects or dictionaries\nshowcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to showcases containing dataset.\n\nReturns:\nbool: True if all showcases added or False if any already present", "source": "codesearchnet"}
{"code": "def _any(objs, query):\n    for obj in objs:\n        if isinstance(obj, Document):\n            if _any(obj.roots, query):\n                return True\n        elif any((query(ref) for ref in obj.references())):\n            return True\n    else:\n        return False", "docstring": "Whether any of a collection of objects satisfies a given query predicate\n\nArgs:\nobjs (seq[Model or Document]) :\n\nquery (callable)\n\nReturns:\nTrue, if ``query(obj)`` is True for some object in ``objs``, else False", "source": "codesearchnet"}
{"code": "def get_first_model_with_rest_name(cls, rest_name):\n    models = cls.get_models_with_rest_name(rest_name)\n    if (len(models) > 0):\n        return models[0]\n    return None", "docstring": "Get the first model corresponding to a rest_name\n\nArgs:\nrest_name: the rest name", "source": "codesearchnet"}
{"code": "def nuc_v(msg):\n    \n    tc = typecode(msg)\n\n    if tc != 19:\n        raise RuntimeError(\"%s: Not an airborne velocity message, expecting TC = 19\" % msg)\n\n\n    msgbin = common.hex2bin(msg)\n    NUCv = common.bin2int(msgbin[42:45])\n\n    try:\n        HVE = uncertainty.NUCv[NUCv]['HVE']\n        VVE = uncertainty.NUCv[NUCv]['VVE']\n    except KeyError:\n        HVE, VVE = uncertainty.NA, uncertainty.NA\n\n    return HVE, VVE", "docstring": "Calculate NUCv, Navigation Uncertainty Category - Velocity (ADS-B version 1)\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string,\n\nReturns:\nint or string: 95% Horizontal Velocity Error\nint or string: 95% Vertical Velocity Error", "source": "juraj-google-style"}
{"code": "def run(self, dag):\n    self.layout = (self.layout or self.property_set['layout'])\n    if (self.layout is None):\n        raise TranspilerError('EnlargeWithAncilla requires property_set[\"layout\"] or \"layout\" parameter to run')\n    layout_virtual_qubits = self.layout.get_virtual_bits().keys()\n    new_qregs = set((virtual_qubit[0] for virtual_qubit in layout_virtual_qubits if (virtual_qubit not in dag.wires)))\n    for qreg in new_qregs:\n        dag.add_qreg(qreg)\n    return dag", "docstring": "Extends dag with virtual qubits that are in layout but not in the circuit yet.\n\nArgs:\ndag (DAGCircuit): DAG to extend.\n\nReturns:\nDAGCircuit: An extended DAG.\n\nRaises:\nTranspilerError: If there is not layout in the property set or not set at init time.", "source": "codesearchnet"}
{"code": "def FromString(val):\n    if isinstance(val, bytes):\n        val = val.decode('utf-8')\n    try:\n        return ContractParameterType[val]\n    except Exception as e:\n        pass\n    try:\n        if isinstance(val, (bytearray, bytes)):\n            int_val = int.from_bytes(val, 'little')\n        else:\n            int_val = int.from_bytes(binascii.unhexlify(val), 'little')\n    except (binascii.Error, TypeError) as e:\n        int_val = int(val)\n    return ContractParameterType(int_val)", "docstring": "Create a ContractParameterType object from a str\n\nArgs:\nval (str): the value to be converted to a ContractParameterType.\nval can be hex encoded (b'07'), int (7), string int (\"7\"), or string literal (\"String\")\n\nReturns:\nContractParameterType", "source": "codesearchnet"}
{"code": "def _create_typed_object_meta(get_fset):\n\n    def _get_fget(attr, private_attr, type_):\n        'Create a property getter method for an attribute.\\n\\n        Args:\\n            attr: The name of the attribute that will be retrieved.\\n            private_attr: The name of the attribute that will store any data\\n                related to the attribute.\\n            type_: The annotated type defining what values can be stored in the\\n                attribute.\\n\\n        Returns:\\n            A function that takes self and retrieves the private attribute from\\n            self.\\n        '\n\n        def _fget(self):\n            'Get attribute from self without revealing the private name.'\n            try:\n                return getattr(self, private_attr)\n            except AttributeError:\n                raise AttributeError(\"'{}' object has no attribute '{}'\".format(_get_type_name(type_), attr))\n        return _fget\n\n    class _AnnotatedObjectMeta(type):\n        'A metaclass that reads annotations from a class definition.'\n\n        def __new__(mcs, name, bases, attrs, **kwargs):\n            'Create class objs that replaces annotated attrs with properties.\\n\\n            Args:\\n                mcs: The class object being created.\\n                name: The name of the class to create.\\n                bases: The list of all base classes for the new class.\\n                attrs: The list of all attributes for the new class from the\\n                    definition.\\n\\n            Returns:\\n                A new class instance with the expected base classes and\\n                attributes, but with annotated, public, non-constant,\\n                non-method attributes replaced by property objects that\\n                validate against the annotated type.\\n            '\n            annotations = attrs.get('__annotations__', {})\n            use_comment_type_hints = ((not annotations) and (attrs.get('__module__') != __name__))\n            if use_comment_type_hints:\n                frame_source = _get_class_frame_source(name)\n                annotations = get_type_hints(*frame_source)\n            names = (list(attrs) + list(annotations))\n            typed_attrs = {}\n            for attr in names:\n                typed_attrs[attr] = attrs.get(attr)\n                if _is_propertyable(names, attrs, annotations, attr):\n                    private_attr = '__{}'.format(attr)\n                    if (attr in attrs):\n                        typed_attrs[private_attr] = attrs[attr]\n                    type_ = (Optional[annotations[attr]] if ((not use_comment_type_hints) and (attr in attrs) and (attrs[attr] is None)) else annotations[attr])\n                    typed_attrs[attr] = property(_get_fget(attr, private_attr, type_), get_fset(attr, private_attr, type_))\n            properties = [attr for attr in annotations if _is_propertyable(names, attrs, annotations, attr)]\n            typed_attrs['_tp__typed_properties'] = properties\n            typed_attrs['_tp__required_typed_properties'] = [attr for attr in properties if (((attr not in attrs) or ((attrs[attr] is None) and use_comment_type_hints)) and (NoneType not in getattr(annotations[attr], '__args__', ())))]\n            return super(_AnnotatedObjectMeta, mcs).__new__(mcs, name, bases, typed_attrs, **kwargs)\n    return _AnnotatedObjectMeta", "docstring": "Create a metaclass for typed objects.\n\nArgs:\nget_fset: A function that takes three parameters: the name of an\nattribute, the name of the private attribute that holds the\nproperty data, and a type. This function must an object method that\naccepts a value.\n\nReturns:\nA metaclass that reads annotations from a class definition and creates\nproperties for annotated, public, non-constant, non-method attributes\nthat will guarantee the type of the stored value matches the\nannotation.", "source": "codesearchnet"}
{"code": "def convert_to_tensors(self, inputs, tensor_type: Optional[Union[str, TensorType]]=None, prepend_batch_axis: bool=False):\n    if not isinstance(tensor_type, TensorType):\n        tensor_type = TensorType(tensor_type)\n    if tensor_type == TensorType.TENSORFLOW:\n        if not is_tf_available():\n            raise ImportError('Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.')\n        import tensorflow as tf\n        as_tensor = tf.constant\n        is_tensor = tf.is_tensor\n    elif tensor_type == TensorType.PYTORCH:\n        if not is_torch_available():\n            raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.')\n        import torch\n        as_tensor = torch.tensor\n        is_tensor = torch.is_tensor\n    elif tensor_type == TensorType.JAX:\n        if not is_flax_available():\n            raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.')\n        import jax.numpy as jnp\n        as_tensor = jnp.array\n        is_tensor = _is_jax\n    else:\n        as_tensor = np.asarray\n        is_tensor = _is_numpy\n    try:\n        if prepend_batch_axis:\n            inputs = [inputs]\n        if not is_tensor(inputs):\n            inputs = as_tensor(inputs)\n    except:\n        raise ValueError(\"Unable to create tensor, you should probably activate truncation and/or padding with 'padding=True' 'truncation=True' to have batched tensors with the same length.\")\n    return inputs", "docstring": "Convert the inner content to tensors.\n\nArgs:\ntensor_type (`str` or [`~utils.TensorType`], *optional*):\nThe type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If\nunset, no modification is done.\nprepend_batch_axis (`int`, *optional*, defaults to `False`):\nWhether or not to add the batch dimension during the conversion.", "source": "github-repos"}
{"code": "def get_vocab(self, vocab_name, **kwargs):\n        \n        vocab_dict = self.__get_vocab_dict__(vocab_name, **kwargs)\n\n        filepaths = list(set([os.path.join(self.cache_dir,\n                                           vocab_dict['filename']),\n                              os.path.join(self.vocab_dir,\n                                           vocab_dict['filename'])]))\n        for path in filepaths:\n            if os.path.exists(path):\n                with open(path, 'rb') as f_obj:\n                    vocab_dict.update({\"name\": vocab_name,\n                                       \"data\": f_obj.read(),\n                                       \"modified\": os.path.getmtime(path)})\n                return vocab_dict\n        download_locs = make_list(vocab_dict.get('download',[]))\n        for loc in download_locs:\n\n            loc_web = urllib.request.urlopen(loc)\n            \n            urllib.request.urlretrieve(loc, filepaths[0])\n            with open(filepaths[0], 'rb') as f_obj:\n                vocab_dict.update({\"name\": vocab_name,\n                                   \"data\": f_obj.read(),\n                                   \"modified\": os.path.getmtime(filepaths[0])})\n                return vocab_dict", "docstring": "Returns data stream of an rdf vocabulary\n\nargs:\nvocab_name: the name or uri of the vocab to return", "source": "juraj-google-style"}
{"code": "def meta_features_path(self, path):\n    return (os.path.join(path, app.config['XCESSIV_META_FEATURES_FOLDER'], str(self.id)) + '.npy')", "docstring": "Returns path for meta-features\n\nArgs:\npath (str): Absolute/local path of xcessiv folder", "source": "codesearchnet"}
{"code": "def normalize_genotypes(genotypes):\n    \n    genotypes = genotypes.genotypes\n    return (genotypes - np.nanmean(genotypes)) / np.nanstd(genotypes)", "docstring": "Normalize the genotypes.\n\nArgs:\ngenotypes (Genotypes): The genotypes to normalize.\n\nReturns:\nnumpy.array: The normalized genotypes.", "source": "juraj-google-style"}
{"code": "def __init__(self, value, opaque_type, name='Opaque Object'):\n        \n        super(OpaqueObject, self).__init__()\n\n        self._object_type = enums.ObjectType.OPAQUE_DATA\n\n        self.value = value\n        self.opaque_type = opaque_type\n        self.names.append(name)\n\n        \n        \n        self._digest = None\n        self._revocation_reason = None\n\n        \n        \n        self._destroy_date = None\n        self._compromise_occurrence_date = None\n        self._compromise_date = None\n\n        self.validate()", "docstring": "Create a OpaqueObject.\n\nArgs:\nvalue(bytes): The bytes representing opaque data.\nopaque_type(OpaqueDataType): An enumeration defining the type of\nthe opaque value.\nname(string): The string name of the opaque object.", "source": "juraj-google-style"}
{"code": "def _RDFClass(cls, table):\n    rdf_cls_name = 'OsqueryTable{}'.format(hash(table.query))\n    try:\n        return cls._rdf_cls_cache[rdf_cls_name]\n    except KeyError:\n        pass\n    rdf_cls = compatibility.MakeType(rdf_cls_name, (rdf_structs.RDFProtoStruct,), {})\n    rdf_cls.AddDescriptor(rdf_structs.ProtoEmbedded(name='metadata', field_number=1, nested=ExportedMetadata))\n    rdf_cls.AddDescriptor(rdf_structs.ProtoString(name='__query__', field_number=2))\n    for (idx, column) in enumerate(table.header.columns):\n        if (column.name == 'metadata'):\n            name = '__metadata__'\n        else:\n            name = column.name\n        descriptor = rdf_structs.ProtoString(name=name, field_number=(idx + 3))\n        rdf_cls.AddDescriptor(descriptor)\n    cls._rdf_cls_cache[rdf_cls_name] = rdf_cls\n    return rdf_cls", "docstring": "Creates a dynamic RDF proto struct class for given osquery table.\n\nThe fields of the proto will correspond to the columns of the table.\n\nArgs:\ntable: An osquery table for which the class is about to be generated.\n\nReturns:\nA class object corresponding to the given table.", "source": "codesearchnet"}
{"code": "def getbalance(self, user_id=\"\", as_decimal=True):\n        \n        balance = unicode(self.rpc.call(\"getbalance\", user_id))\n        self.logger.debug(\"\\\"\" + user_id + \"\\\"\", self.coin, \"balance:\", balance)\n        if as_decimal:\n            return Decimal(balance)\n        else:\n            return balance", "docstring": "Calculate the total balance in all addresses belonging to this user.\n\nArgs:\nuser_id (str): this user's unique identifier\nas_decimal (bool): balance is returned as a Decimal if True (default)\nor a string if False\n\nReturns:\nstr or Decimal: this account's total coin balance", "source": "juraj-google-style"}
{"code": "def _inject(self, value, settings):\n        \n        assert isinstance(value, string_types), 'Expected str; got {0.__class__}'.format(value)\n\n        begin, end = '{{', '}}'\n\n        if begin not in value:\n            return value, False\n\n        new_value = value\n        begin_pos, end_pos = 0, None\n        len_begin, len_end = len(begin), len(end)\n        len_value = len(new_value)\n\n        while begin_pos < len_value:\n            \n            begin_pos = new_value.find(begin, begin_pos)\n\n            if begin_pos == -1:\n                break\n\n            \n            before = new_value[:begin_pos]\n\n            \n            begin_pos += len_begin\n            end_pos = new_value.find(end, begin_pos)\n            if end_pos == -1:\n                raise ValueError('Unmatched {begin}...{end} in {value}'.format(**locals()))\n\n            \n            \n            name = new_value[begin_pos:end_pos]\n            name = name.strip()\n\n            if not name:\n                raise ValueError('Empty name in {value}'.format(**locals()))\n\n            \n            after_pos = end_pos + len_end\n            try:\n                after = new_value[after_pos:]\n            except IndexError:\n                \n                after = ''\n\n            \n            \n            try:\n                injection_value = settings.get_dotted(name)\n            except KeyError:\n                raise KeyError('{name} not found in {settings}'.format(**locals()))\n\n            if not isinstance(injection_value, string_types):\n                injection_value = self.strategy.encode_value(injection_value)\n\n            \n            \n            new_value = ''.join((before, injection_value, after))\n\n            \n            begin_pos = len(before) + len(injection_value)\n            len_value = len(new_value)\n\n        return new_value, (new_value != value)", "docstring": "Inject ``settings`` into ``value``.\n\nGo through ``value`` looking for ``{{NAME}}`` groups and replace\neach group with the value of the named item from ``settings``.\n\nArgs:\nvalue (str): The value to inject settings into\nsettings: An object that provides the dotted access interface\n\nReturns:\n(str, bool): The new value and whether the new value is\ndifferent from the original value", "source": "juraj-google-style"}
{"code": "def intent(self, user: str = None, token: Optional[str] = None) -> \"IntentAPI\":\n        \n        if self.is_real_user:\n            raise ValueError(\"Can't get child intent of real user\")\n        if token:\n            return IntentAPI(user, self.real_user(user, token), self.bot_intent(), self.state_store,\n                             self.intent_log)\n        return IntentAPI(user, self.user(user), self.bot_intent(), self.state_store,\n                         self.intent_log)", "docstring": "Get the intent API for a specific user.\n\nArgs:\nuser: The Matrix ID of the user whose intent API to get.\n\nReturns:\nThe IntentAPI for the given user.", "source": "juraj-google-style"}
{"code": "def __get_default_value_from_element(self, element):\n    if (element.name == 'select'):\n        options = element.find_all('option')\n        is_multiple = element.has_attr('multiple')\n        selected_options = [option for option in options if option.has_attr('selected')]\n        if ((not selected_options) and options):\n            selected_options = [options[0]]\n        selected_values = []\n        if is_multiple:\n            for option in selected_options:\n                value = (option['value'] if option.has_attr('value') else option.string)\n                selected_values.append(value)\n            return selected_values\n        elif (len(selected_options) >= 1):\n            if selected_options[0].has_attr('value'):\n                return selected_options[0]['value']\n            else:\n                return selected_options[0].string\n        return ''\n    if (element.name == 'textarea'):\n        return (element.string if (element.string is not None) else '')\n    if ((element.name == 'input') and element.has_attr('type')):\n        if (element['type'] in ('checkbox', 'radio')):\n            if (not element.has_attr('checked')):\n                return False\n            if element.has_attr('value'):\n                return element['value']\n            else:\n                return 'on'\n    if element.has_attr('value'):\n        return element['value']\n    return ''", "docstring": "Get the default value of a form element\n\nArgs:\nelements (obj): The soup element.\n\nReturns:\nstr: The default value", "source": "codesearchnet"}
{"code": "class RMSprop(Optimizer):\n\n    def __init__(self, lr=0.001, rho=0.9, epsilon=None, decay=0.0, **kwargs):\n        super(RMSprop, self).__init__(**kwargs)\n        with backend.name_scope(self.__class__.__name__):\n            self.lr = backend.variable(lr, name='lr')\n            self.rho = backend.variable(rho, name='rho')\n            self.decay = backend.variable(decay, name='decay')\n            self.iterations = backend.variable(0, dtype='int64', name='iterations')\n        if epsilon is None:\n            epsilon = backend.epsilon()\n        self.epsilon = epsilon\n        self.initial_decay = decay\n\n    def _create_all_weights(self, params):\n        accumulators = [backend.zeros(backend.int_shape(p), dtype=backend.dtype(p)) for p in params]\n        self.weights = accumulators\n        return accumulators\n\n    def get_updates(self, loss, params):\n        grads = self.get_gradients(loss, params)\n        accumulators = self._create_all_weights(params)\n        self.updates = [state_ops.assign_add(self.iterations, 1)]\n        lr = self.lr\n        if self.initial_decay > 0:\n            lr = lr * (1.0 / (1.0 + self.decay * math_ops.cast(self.iterations, backend.dtype(self.decay))))\n        for p, g, a in zip(params, grads, accumulators):\n            new_a = self.rho * a + (1.0 - self.rho) * math_ops.square(g)\n            self.updates.append(state_ops.assign(a, new_a))\n            new_p = p - lr * g / (backend.sqrt(new_a) + self.epsilon)\n            if getattr(p, 'constraint', None) is not None:\n                new_p = p.constraint(new_p)\n            self.updates.append(state_ops.assign(p, new_p))\n        return self.updates\n\n    def get_config(self):\n        config = {'lr': float(backend.get_value(self.lr)), 'rho': float(backend.get_value(self.rho)), 'decay': float(backend.get_value(self.decay)), 'epsilon': self.epsilon}\n        base_config = super(RMSprop, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))", "docstring": "RMSProp optimizer.\n\nIt is recommended to leave the parameters of this optimizer\nat their default values\n(except the learning rate, which can be freely tuned).\n\nArgs:\nlr: float >= 0. Learning rate.\nrho: float >= 0.\nepsilon: float >= 0. Fuzz factor.\nIf `None`, defaults to `backend.epsilon()`.\ndecay: float >= 0. Learning rate decay over each update.", "source": "github-repos"}
{"code": "def handle_message_registered(self, msg_data, host):\n    response = None\n    if (msg_data['method'] == 'EVENT'):\n        logger.debug(('<%s> <euuid:%s> Event message received' % (msg_data['cuuid'], msg_data['euuid'])))\n        response = self.event(msg_data['cuuid'], host, msg_data['euuid'], msg_data['event_data'], msg_data['timestamp'], msg_data['priority'])\n    elif (msg_data['method'] == 'OK EVENT'):\n        logger.debug(('<%s> <euuid:%s> Event confirmation message received' % (msg_data['cuuid'], msg_data['euuid'])))\n        try:\n            del self.event_uuids[msg_data['euuid']]\n        except KeyError:\n            logger.warning(('<%s> <euuid:%s> Euuid does not exist in event buffer. Key was removed before we could process it.' % (msg_data['cuuid'], msg_data['euuid'])))\n    elif (msg_data['method'] == 'OK NOTIFY'):\n        logger.debug(('<%s> <euuid:%s> Ok notify received' % (msg_data['cuuid'], msg_data['euuid'])))\n        try:\n            del self.event_uuids[msg_data['euuid']]\n        except KeyError:\n            logger.warning(('<%s> <euuid:%s> Euuid does not exist in event buffer. Key was removed before we could process it.' % (msg_data['cuuid'], msg_data['euuid'])))\n    return response", "docstring": "Processes messages that have been delivered by a registered client.\n\nArgs:\nmsg (string): The raw packet data delivered from the listener. This\ndata will be unserialized and then processed based on the packet's\nmethod.\nhost (tuple): The (address, host) tuple of the source message.\n\nReturns:\nA response that will be sent back to the client via the listener.", "source": "codesearchnet"}
{"code": "def __init__(self, **kwargs):\n        \n        try:\n            arguments = Adapter(Schema(ApplicationOptions.SCHEMA).validate(kwargs))\n            self.definition = arguments.definition\n            self.matrix_tags = [entry for entry in arguments.matrix_tags.split(',') if len(entry) > 0]\n            self.tags = [entry for entry in arguments.tags.split(',') if len(entry) > 0]\n            self.validate_only = arguments.validate_only\n            self.dry_run = arguments.dry_run\n            self.event_logging = arguments.event_logging\n            self.logging_config = arguments.logging_config\n            self.debug = arguments.debug\n            self.strict = arguments.strict\n            self.report = arguments.report\n            self.temporary_scripts_path = arguments.temporary_scripts_path\n        except SchemaError as exception:\n            logging.getLogger(__name__).error(exception)\n            raise RuntimeError(str(exception))", "docstring": "Initializing and validating fields.\n\nArgs:\nkwargs (dict): application command line options.", "source": "juraj-google-style"}
{"code": "def get(self, id=None, **kwargs):\n        \n        server_data = self.gitlab.http_get(self.path, **kwargs)\n        if server_data is None:\n            return None\n        return self._obj_cls(self, server_data)", "docstring": "Retrieve a single object.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nReturns:\nobject: The generated RESTObject\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the server cannot perform the request", "source": "juraj-google-style"}
{"code": "def sort_values(\n        self,\n        by,\n        axis=0,\n        ascending=True,\n        inplace=False,\n        kind=\"quicksort\",\n        na_position=\"last\",\n    ):\n        \n        axis = self._get_axis_number(axis)\n        if not is_list_like(by):\n            by = [by]\n        \n        \n        if axis == 0:\n            broadcast_value_dict = {col: self[col] for col in by}\n            broadcast_values = pandas.DataFrame(broadcast_value_dict, index=self.index)\n            new_index = broadcast_values.sort_values(\n                by=by,\n                axis=axis,\n                ascending=ascending,\n                kind=kind,\n                na_position=na_position,\n            ).index\n            return self.reindex(index=new_index, copy=not inplace)\n        else:\n            broadcast_value_list = [\n                self[row :: len(self.index)]._to_pandas() for row in by\n            ]\n            index_builder = list(zip(broadcast_value_list, by))\n            broadcast_values = pandas.concat(\n                [row for row, idx in index_builder], copy=False\n            )\n            broadcast_values.columns = self.columns\n            new_columns = broadcast_values.sort_values(\n                by=by,\n                axis=axis,\n                ascending=ascending,\n                kind=kind,\n                na_position=na_position,\n            ).columns\n            return self.reindex(columns=new_columns, copy=not inplace)", "docstring": "Sorts by a column/row or list of columns/rows.\n\nArgs:\nby: A list of labels for the axis to sort over.\naxis: The axis to sort.\nascending: Sort in ascending or descending order.\ninplace: If true, do the operation inplace.\nkind: How to sort.\nna_position: Where to put np.nan values.\n\nReturns:\nA sorted DataFrame.", "source": "juraj-google-style"}
{"code": "def parse_split(cls, header: bytes, body: bytes) -> 'MessageContent':\n    header_lines = cls._find_lines(header)\n    body_lines = cls._find_lines(body)\n    header_view = memoryview(header)\n    body_view = memoryview(body)\n    return cls._parse_split([header_view, body_view], header, body, header_view, body_view, header_lines, body_lines)", "docstring": "Parse the header and body bytestrings into message content.\n\nArgs:\nheader: The header bytestring to parse.\nbody: The body bytestring to parse.", "source": "codesearchnet"}
{"code": "async def attach_file(self, file_path: str, description: str=None) -> Attachment:\n    with open(file_path, 'rb') as f:\n        return (await self._attach(f.read(), description))", "docstring": "add a file as an attachment\n\n|methcoro|\n\nWarning:\n|unstable|\n\nArgs:\nfile_path: path to the file you want to add\ndescription: *optional* description for your attachment\n\nReturns:\nAttachment:\n\nRaises:\nValueError: file_path must not be None\nAPIException", "source": "codesearchnet"}
{"code": "def attribute_labels(\n        self, main_type, sub_type, unique_id, attribute_id, owner=None, params=None\n    ):\n        \n        params = params or {}\n        if owner:\n            params['owner'] = owner\n\n        if not sub_type:\n            url = '/v2/{}/{}/attributes/{}/securityLabels'.format(\n                main_type, unique_id, attribute_id\n            )\n        else:\n            url = '/v2/{}/{}/{}/attributes/{}/securityLabels'.format(\n                main_type, sub_type, unique_id, attribute_id\n            )\n\n        for l in self._iterate(url, params, 'securityLabel'):\n            yield l", "docstring": "Args:\nowner:\nmain_type:\nsub_type:\nunique_id:\nattribute_id:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def delete(filething):\n    \n\n    t = MP4(filething)\n    filething.fileobj.seek(0)\n    t.delete(filething)", "docstring": "delete(filething)\n\nArguments:\nfilething (filething)\nRaises:\nmutagen.MutagenError\n\nRemove tags from a file.", "source": "juraj-google-style"}
{"code": "def dependencies(self, user=None, napp=None):\n        \n        napps = self._get_napp_key('napp_dependencies', user, napp)\n        return [tuple(napp.split('/')) for napp in napps]", "docstring": "Get napp_dependencies from install NApp.\n\nArgs:\nuser(string)  A Username.\nnapp(string): A NApp name.\nReturns:\nnapps(list): List with tuples with Username and NApp name.\ne.g. [('kytos'/'of_core'), ('kytos/of_l2ls')]", "source": "juraj-google-style"}
{"code": "def update_locate_candidates(candidate, next_candidates, x_val, y_val, degree):\n    (centroid_x, centroid_y, width, candidate_nodes) = candidate\n    point = np.asfortranarray([x_val, y_val])\n    if (not _helpers.contains_nd(candidate_nodes, point)):\n        return\n    (nodes_a, nodes_b, nodes_c, nodes_d) = _surface_helpers.subdivide_nodes(candidate_nodes, degree)\n    half_width = (0.5 * width)\n    next_candidates.extend((((centroid_x - half_width), (centroid_y - half_width), half_width, nodes_a), (centroid_x, centroid_y, (- half_width), nodes_b), ((centroid_x + width), (centroid_y - half_width), half_width, nodes_c), ((centroid_x - half_width), (centroid_y + width), half_width, nodes_d)))", "docstring": "Update list of candidate surfaces during geometric search for a point.\n\n.. note::\n\nThis is used **only** as a helper for :func:`locate_point`.\n\nChecks if the point ``(x_val, y_val)`` is contained in the ``candidate``\nsurface. If not, this function does nothing. If the point is contaned,\nthe four subdivided surfaces from ``candidate`` are added to\n``next_candidates``.\n\nArgs:\ncandidate (Tuple[float, float, float, numpy.ndarray]): A 4-tuple\ndescribing a surface and its centroid / width. Contains\n\n* Three times centroid ``x``-value\n* Three times centroid ``y``-value\n* \"Width\" of parameter space for the surface\n* Control points for the surface\nnext_candidates (list): List of \"candidate\" sub-surfaces that may\ncontain the point being located.\nx_val (float): The ``x``-coordinate being located.\ny_val (float): The ``y``-coordinate being located.\ndegree (int): The degree of the surface.", "source": "codesearchnet"}
{"code": "def list_nics(access_token, subscription_id):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Network/', '/networkInterfaces?api-version=', NETWORK_API])\n    return do_get(endpoint, access_token)", "docstring": "List the network interfaces in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body of NICs list with properties.", "source": "codesearchnet"}
{"code": "def run(self, *args, **kwargs):\n    accounts = list(AWSAccount.get_all(include_disabled=False).values())\n    s3_acl = get_template('cloudtrail_s3_bucket_policy.json')\n    s3_bucket_name = self.dbconfig.get('bucket_name', self.ns)\n    s3_bucket_region = self.dbconfig.get('bucket_region', self.ns, 'us-west-2')\n    s3_bucket_account = AWSAccount.get(self.dbconfig.get('bucket_account', self.ns))\n    CloudTrail.create_s3_bucket(s3_bucket_name, s3_bucket_region, s3_bucket_account, s3_acl)\n    self.validate_sqs_policy(accounts)\n    for account in accounts:\n        ct = CloudTrail(account, s3_bucket_name, s3_bucket_region, self.log)\n        ct.run()", "docstring": "Entry point for the scheduler\n\nArgs:\n*args: Optional arguments\n**kwargs: Optional keyword arguments\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def get_config_parameter(config: ConfigParser, section: str, param: str, fn: Callable[([Any], Any)], default: Any) -> Any:\n    try:\n        value = fn(config.get(section, param))\n    except (TypeError, ValueError, NoOptionError):\n        log.warning('Configuration variable {} not found or improper in section [{}]; using default of {!r}', param, section, default)\n        if (default is None):\n            value = default\n        else:\n            value = fn(default)\n    return value", "docstring": "Fetch parameter from ``configparser`` ``.INI`` file.\n\nArgs:\nconfig: :class:`ConfigParser` object\nsection: section name within config file\nparam: name of parameter within section\nfn: function to apply to string parameter (e.g. ``int``)\ndefault: default value\n\nReturns:\nparameter value, or ``None`` if ``default is None``, or ``fn(default)``", "source": "codesearchnet"}
{"code": "def _StopMonitoringProcess(self, process):\n    \n    if process is None:\n      raise ValueError('Missing process.')\n\n    pid = process.pid\n\n    self._RaiseIfNotMonitored(pid)\n\n    del self._process_information_per_pid[pid]\n\n    rpc_client = self._rpc_clients_per_pid.get(pid, None)\n    if rpc_client:\n      rpc_client.Close()\n      del self._rpc_clients_per_pid[pid]\n\n    if pid in self._rpc_errors_per_pid:\n      del self._rpc_errors_per_pid[pid]\n\n    logger.debug('Stopped monitoring process: {0:s} (PID: {1:d})'.format(\n        process.name, pid))", "docstring": "Stops monitoring a process.\n\nArgs:\nprocess (MultiProcessBaseProcess): process.\n\nRaises:\nKeyError: if the process is not monitored.\nValueError: if the process is missing.", "source": "juraj-google-style"}
{"code": "def _random_segmentation(num_items, num_segments):\n    mask_indices = np.arange(num_items - 1) < num_segments - 1\n    np.random.shuffle(mask_indices)\n    first_in_segment = np.pad(mask_indices, [[1, 0]])\n    segment_id = np.cumsum(first_in_segment)\n    _, segment_length = np.unique(segment_id, return_counts=True)\n    return segment_length", "docstring": "Partition a sequence of items randomly into non-empty segments.\nArgs:\nnum_items: an integer scalar > 0\nnum_segments: an integer scalar in [1, num_items]\nReturns:\na Tensor with shape [num_segments] containing positive integers that add\nup to num_items", "source": "github-repos"}
{"code": "def AddBackpropLoopCounter(self, count, outer_grad_state):\n    in_separate_functions = count.graph is not ops.get_default_graph()\n    if in_separate_functions:\n        count = array_ops.identity(count)\n    else:\n        one = constant_op.constant(1, name='b_count')\n    self.Enter()\n    self.AddName(count.name)\n    enter_count = _Enter(count, self._name, is_constant=False, parallel_iterations=self._parallel_iterations, name='b_count')\n    self.loop_enters.append(enter_count)\n    merge_count = merge([enter_count, enter_count])[0]\n    self._pivot_for_pred = merge_count\n    if in_separate_functions:\n        one = constant_op.constant(1, name='b_count')\n    pred = math_ops.greater_equal(merge_count, one)\n    self._pivot = loop_cond(pred, name='b_count')\n    switch_count = switch(merge_count, self._pivot)\n    index = math_ops.subtract(switch_count[1], one)\n    self._pivot_for_body = index\n    next_count = _NextIteration(index)\n    merge_count.op._update_input(1, next_count)\n    final_zero = exit(switch_count[0], name='b_count')\n    self.loop_exits.append(final_zero)\n    if outer_grad_state is not None:\n        outer_grad_state.grad_sync._add_control_input(final_zero.op)\n    self.ExitResult([final_zero])\n    self.Exit()\n    return next_count", "docstring": "Add the backprop loop that controls the iterations.\n\nThis is added to the backprop loop. It is used to control the loop\ntermination of the backprop loop. Called in the outer context of\nthis grad context.\n\nThe pseudocode is:\n`n = count; while (n >= 1) { n--; }`\n\nNote that a control dependency is added to `final_zero` to ensure the\ncorrect execution order of stack pop ops.\n\nArgs:\ncount: The number of iterations for backprop.\nouter_grad_state: The outer grad state. None if not nested.\n\nReturns:\nThe loop index.", "source": "github-repos"}
{"code": "def __init__(self, sv, sess, step_counter=None):\n    super(SVStepCounterThread, self).__init__(sv.coord, sv.save_summaries_secs)\n    self._sv = sv\n    self._sess = sess\n    self._last_time = 0.0\n    self._last_step = 0\n    step_counter = sv.global_step if step_counter is None else step_counter\n    self._step_counter = step_counter\n    self._summary_tag = '%s/sec' % self._step_counter.op.name", "docstring": "Create a `SVStepCounterThread`.\n\nArgs:\nsv: A `Supervisor`.\nsess: A `Session`.\nstep_counter: A `Tensor` holding the step counter. By defaults, it uses\nsv.global_step.", "source": "github-repos"}
{"code": "def embedding_tables(self) -> Dict[tpu_embedding_v2_utils.TableConfig, tf_variables.Variable]:\n    if self._using_tpu:\n        if save_context.in_save_context():\n            return {table: self._variables[table.name]['parameters'].variables[0] for table in self._table_config}\n        raise RuntimeError('Unable to retrieve embedding tables when using a TPU strategy. If you need access, save your model, create this object under a CPU strategy and restore.')\n    self._maybe_build(None)\n    return {table: self._variables[table.name]['parameters'] for table in self._table_config}", "docstring": "Returns a dict of embedding tables, keyed by `TableConfig`.\n\nThis property only works when the `TPUEmbedding` object is created under a\nnon-TPU strategy. This is intended to be used to for CPU based lookup when\ncreating a serving checkpoint.\n\nReturns:\nA dict of embedding tables, keyed by `TableConfig`.\n\nRaises:\nRuntimeError: If object was created under a `TPUStrategy`.", "source": "github-repos"}
{"code": "def from_sub_model_configs(cls, semantic_config: BarkSemanticGenerationConfig, coarse_acoustics_config: BarkCoarseGenerationConfig, fine_acoustics_config: BarkFineGenerationConfig, **kwargs):\n    return cls(semantic_config=semantic_config.to_dict(), coarse_acoustics_config=coarse_acoustics_config.to_dict(), fine_acoustics_config=fine_acoustics_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`BarkGenerationConfig`] (or a derived class) from bark sub-models generation configuration.\n\nReturns:\n[`BarkGenerationConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def check_directory(path, human_readable_name):\n    if (not os.path.exists(path)):\n        LOGGER.error('%s directory does not exist: %s', human_readable_name, path)\n        return False\n    if (not os.path.isdir(path)):\n        LOGGER.error('%s directory is not a directory: %s', human_readable_name, path)\n        return False\n    errors = True\n    if (not os.access(path, os.R_OK)):\n        LOGGER.error('%s directory is not readable: %s', human_readable_name, path)\n        errors = False\n    if (not os.access(path, os.W_OK)):\n        LOGGER.error('%s directory is not writable: %s', human_readable_name, path)\n        errors = False\n    return errors", "docstring": "Verify that the directory exists and is readable and writable.\n\nArgs:\npath (str): a directory which should exist and be writable\nhuman_readable_name (str): a human readable string for the directory\nwhich is used in logging statements\n\nReturns:\nbool: False if an error exists, True otherwise.", "source": "codesearchnet"}
{"code": "def _validate_paths(self, settings, name, value):\n    return [self._validate_path(settings, name, item) for item in value]", "docstring": "Apply ``SettingsPostProcessor._validate_path`` to each element in\nlist.\n\nArgs:\nsettings (dict): Current settings.\nname (str): Setting name.\nvalue (list): List of paths to patch.\n\nRaises:\nboussole.exceptions.SettingsInvalidError: Once a path does not\nexists.\n\nReturns:\nlist: Validated paths.", "source": "codesearchnet"}
{"code": "def tool(name):\n    \n    \n    global g_tools\n\n    def decorator(fn):  \n        \n        g_tools[name] = fn\n        return fn\n\n    return decorator", "docstring": "Decorator for defining lint tools.\n\nArgs:\nname (str):\nThe name of the tool. This name will be used to identify the tool\nin `pelconf.yaml`.", "source": "juraj-google-style"}
{"code": "def parse_genotypes(variant, individuals, individual_positions):\n    \n    genotypes = []\n    for ind in individuals:\n        pos = individual_positions[ind['individual_id']]\n        genotypes.append(parse_genotype(variant, ind, pos))\n    return genotypes", "docstring": "Parse the genotype calls for a variant\n\nArgs:\nvariant(cyvcf2.Variant)\nindividuals: List[dict]\nindividual_positions(dict)\nReturns:\ngenotypes(list(dict)): A list of genotypes", "source": "juraj-google-style"}
{"code": "def validate_request_success(\n        response_text, request_url, status_code, expected_status_code\n    ):\n        \n        try:\n            assert status_code == expected_status_code\n        except AssertionError:\n            msg = (\n                \"Request to {url} failed with status {status_code}:\\n\"\n                \"The reponse from the request was as follows:\\n\\n\"\n                \"{content}\"\n            ).format(\n                url=request_url, status_code=status_code, content=response_text\n            )\n            raise BadHttpRequestError(msg)", "docstring": "Validates that a request was successful.\n\nArgs:\nresponse_text (str): The response body of the request.\nrequest_url (str): The URL the request was made at.\nstatus_code (int): The status code of the response.\nexpected_status_code (int): The expected status code of the\nresponse.\n\nRaises:\n:class:`saltant.exceptions.BadHttpRequestError`: The HTTP\nrequest failed.", "source": "juraj-google-style"}
{"code": "def _MakeMethodDescriptor(self, method_proto, service_name, package, scope,\n                            index):\n    \n    full_name = '.'.join((service_name, method_proto.name))\n    input_type = self._GetTypeFromScope(\n        package, method_proto.input_type, scope)\n    output_type = self._GetTypeFromScope(\n        package, method_proto.output_type, scope)\n    return descriptor.MethodDescriptor(name=method_proto.name,\n                                       full_name=full_name,\n                                       index=index,\n                                       containing_service=None,\n                                       input_type=input_type,\n                                       output_type=output_type,\n                                       options=_OptionsOrNone(method_proto))", "docstring": "Creates a method descriptor from a MethodDescriptorProto.\n\nArgs:\nmethod_proto: The proto describing the method.\nservice_name: The name of the containing service.\npackage: Optional package name to look up for types.\nscope: Scope containing available types.\nindex: Index of the method in the service.\n\nReturns:\nAn initialized MethodDescriptor object.", "source": "juraj-google-style"}
{"code": "def infer(msg, mrar=False):\n    \n    df = common.df(msg)\n\n    if common.allzeros(msg):\n        return 'EMPTY'\n\n    \n    if df == 17:\n        tc = common.typecode(msg)\n\n        if 1 <= tc <= 4:\n            return 'BDS08'  \n        if 5 <= tc <= 8:\n            return 'BDS06'  \n        if 9 <= tc <= 18:\n            return 'BDS05'  \n        if tc == 19:\n            return 'BDS09'  \n        if 20 <= tc <= 22:\n            return 'BDS05'  \n        if tc == 28:\n            return 'BDS61'  \n        if tc == 29:\n            return 'BDS62'  \n        if tc == 31:\n            return 'BDS65'  \n\n    \n    IS10 = bds10.is10(msg)\n    IS17 = bds17.is17(msg)\n    IS20 = bds20.is20(msg)\n    IS30 = bds30.is30(msg)\n    IS40 = bds40.is40(msg)\n    IS50 = bds50.is50(msg)\n    IS60 = bds60.is60(msg)\n    IS44 = bds44.is44(msg)\n    IS45 = bds45.is45(msg)\n\n    if mrar:\n        allbds = np.array([\"BDS10\", \"BDS17\", \"BDS20\", \"BDS30\", \"BDS40\",\n                           \"BDS44\", \"BDS45\", \"BDS50\", \"BDS60\"])\n        mask = [IS10, IS17, IS20, IS30, IS40, IS44, IS45, IS50, IS60]\n    else:\n        allbds = np.array([\"BDS10\", \"BDS17\", \"BDS20\", \"BDS30\", \"BDS40\",\n                           \"BDS50\", \"BDS60\"])\n        mask = [IS10, IS17, IS20, IS30, IS40, IS50, IS60]\n\n    bds = ','.join(sorted(allbds[mask]))\n\n    if len(bds) == 0:\n        return None\n    else:\n        return bds", "docstring": "Estimate the most likely BDS code of an message.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\nmrar (bool): Also infer MRAR (BDS 44) and MHR (BDS 45). Defaults to False.\n\nReturns:\nString or None: BDS version, or possible versions, or None if nothing matches.", "source": "juraj-google-style"}
{"code": "def transition(self, state, message=''):\n    with self.changes_squashed:\n        initial_state = self.state.value\n        if self.state_set.transition_allowed(initial_state=initial_state, target_state=state):\n            self.log.debug('%s: Transitioning from %s to %s', self.mri, initial_state, state)\n            if (state == ss.DISABLED):\n                alarm = Alarm.invalid('Disabled')\n            elif (state == ss.FAULT):\n                alarm = Alarm.major(message)\n            else:\n                alarm = Alarm()\n            self.update_health(self, HealthInfo(alarm))\n            self.state.set_value(state)\n            self.state.set_alarm(alarm)\n            for (child, writeable) in self._children_writeable[state].items():\n                if isinstance(child, AttributeModel):\n                    child.meta.set_writeable(writeable)\n                elif isinstance(child, MethodModel):\n                    child.set_writeable(writeable)\n        else:\n            raise TypeError(('Cannot transition from %s to %s' % (initial_state, state)))", "docstring": "Change to a new state if the transition is allowed\n\nArgs:\nstate (str): State to transition to\nmessage (str): Message if the transition is to a fault state", "source": "codesearchnet"}
{"code": "def proc_val(key, val):\n        \n\n        list_type_keys = list(VALID_FEFF_TAGS)\n        del list_type_keys[list_type_keys.index(\"ELNES\")]\n        del list_type_keys[list_type_keys.index(\"EXELFS\")]\n        boolean_type_keys = ()\n        float_type_keys = (\"S02\", \"EXAFS\", \"RPATH\")\n\n        def smart_int_or_float(numstr):\n            if numstr.find(\".\") != -1 or numstr.lower().find(\"e\") != -1:\n                return float(numstr)\n            else:\n                return int(numstr)\n\n        try:\n            if key.lower() == 'cif':\n                m = re.search(r\"\\w+.cif\", val)\n                return m.group(0)\n\n            if key in list_type_keys:\n                output = list()\n                toks = re.split(r\"\\s+\", val)\n\n                for tok in toks:\n                    m = re.match(r\"(\\d+)\\*([\\d\\.\\-\\+]+)\", tok)\n                    if m:\n                        output.extend([smart_int_or_float(m.group(2))] *\n                                      int(m.group(1)))\n                    else:\n                        output.append(smart_int_or_float(tok))\n                return output\n            if key in boolean_type_keys:\n                m = re.search(r\"^\\W+([TtFf])\", val)\n                if m:\n                    if m.group(1) == \"T\" or m.group(1) == \"t\":\n                        return True\n                    else:\n                        return False\n                raise ValueError(key + \" should be a boolean type!\")\n\n            if key in float_type_keys:\n                return float(val)\n\n        except ValueError:\n            return val.capitalize()\n\n        return val.capitalize()", "docstring": "Static helper method to convert Feff parameters to proper types, e.g.\nintegers, floats, lists, etc.\n\nArgs:\nkey: Feff parameter key\nval: Actual value of Feff parameter.", "source": "juraj-google-style"}
{"code": "def plot(self, fmt=None, fig=None, ax=None):\n    u = 4\n    v = 0.25\n    r = None\n    if ((fig is None) and (ax is None)):\n        fig = plt.figure(figsize=(u, 1))\n    else:\n        r = fig\n    if (ax is None):\n        ax = fig.add_axes([(0.1 * v), 0.1, (0.8 * v), 0.8])\n    else:\n        r = ax\n    rect1 = patches.Rectangle((0, 0), (u * v), (u * v), color=self.colour, lw=1, hatch=self.hatch, ec='k')\n    ax.add_patch(rect1)\n    ax.text((1.0 + ((0.1 * v) * u)), ((u * v) * 0.5), self.component.summary(fmt=fmt), fontsize=max(u, 15), verticalalignment='center', horizontalalignment='left')\n    ax.set_xlim([0, (u * v)])\n    ax.set_ylim([0, (u * v)])\n    ax.get_xaxis().set_visible(False)\n    ax.get_yaxis().set_visible(False)\n    ax.invert_yaxis()\n    return r", "docstring": "Make a simple plot of the Decor.\n\nArgs:\nfmt (str): A Python format string for the component summaries.\nfig (Pyplot figure): A figure, optional. Use either fig or ax, not\nboth.\nax (Pyplot axis): An axis, optional. Use either fig or ax, not\nboth.\n\nReturns:\nfig or ax or None. If you pass in an ax, you get it back. If you pass\nin a fig, you get it. If you pass nothing, the function creates a\nplot object as a side-effect.", "source": "codesearchnet"}
{"code": "def frosted_glass_blur(x, severity=1):\n    c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4, 2)][(severity - 1)]\n    x = np.uint8((tfds.core.lazy_imports.skimage.filters.gaussian((np.array(x) / 255.0), sigma=c[0], multichannel=True) * 255))\n    for _ in range(c[2]):\n        for h in range((x.shape[0] - c[1]), c[1], (- 1)):\n            for w in range((x.shape[1] - c[1]), c[1], (- 1)):\n                (dx, dy) = np.random.randint((- c[1]), c[1], size=(2,))\n                (h_prime, w_prime) = ((h + dy), (w + dx))\n                (x[(h, w)], x[(h_prime, w_prime)]) = (x[(h_prime, w_prime)], x[(h, w)])\n    x_clip = np.clip(tfds.core.lazy_imports.skimage.filters.gaussian((x / 255.0), sigma=c[0], multichannel=True), 0, 1)\n    x_clip *= 255\n    return around_and_astype(x_clip)", "docstring": "Frosted glass blurring to images.\n\nApply frosted glass blurring to images by shuffling pixels locally.\n\nArgs:\nx: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\nseverity: integer, severity of corruption.\n\nReturns:\nnumpy array, image with uint8 pixels in [0,255]. Applied frosted glass blur.", "source": "codesearchnet"}
{"code": "def __init__(self, hash_queue, hash_analysis_queue, **kwargs):\n    \n    super(HTTPHashAnalyzer, self).__init__(\n        hash_queue, hash_analysis_queue, **kwargs)\n    self._checked_for_old_python_version = False", "docstring": "Initializes a HTTP hash analyzer.\n\nArgs:\nhash_queue (Queue.queue): a queue that contains hashes to be analyzed.\nhash_analysis_queue (Queue.queue): queue that the analyzer will append\nHashAnalysis objects to.", "source": "juraj-google-style"}
{"code": "def resolve(self, context, provider):\n        \n        try:\n            self._value.resolve(context, provider)\n        except FailedLookup as e:\n            raise FailedVariableLookup(self.name, e.lookup, e.error)", "docstring": "Recursively resolve any lookups with the Variable.\n\nArgs:\ncontext (:class:`stacker.context.Context`): Current context for\nbuilding the stack\nprovider (:class:`stacker.provider.base.BaseProvider`): subclass of\nthe base provider", "source": "juraj-google-style"}
{"code": "def pprnt(input, return_data=False):\n    \n    HEADER = '\\033[95m'\n    OKBLUE = '\\033[94m'\n    OKGREEN = '\\033[32m'\n    WARNING = '\\033[93m'\n    FAIL = '\\033[91m'\n    ENDC = '\\033[0m'\n    BOLD = '\\033[1m'\n    UNDERLINE = '\\033[4m'\n    import json, re\n    result = json.dumps(input, sort_keys=True, indent=4)\n    result = re.sub(r'(\")(\\w*?_id)(\":)', r'\\1%s%s\\2%s\\3' % (BOLD, HEADER, ENDC), result)\n    result = re.sub(r'(\")(\\w*?_set)(\":)', r'\\1%s%s\\2%s\\3' % (BOLD, HEADER, ENDC), result)\n    result = re.sub(r'(\\n *?\")(\\w*?)(\":)', r'\\1%s%s\\2%s\\3' % (BOLD, OKGREEN, ENDC), result)\n    if not return_data:\n        print(result)\n    else:\n        return result", "docstring": "Prettier print for nested data\n\nArgs:\ninput: Input data\nreturn_data (bool): Default False. Print outs if False, returns if True.\nReturns:\nNone | Pretty formatted text representation of input data.", "source": "juraj-google-style"}
{"code": "def _srvmgr(cmd, return_json=False):\n    if isinstance(cmd, list):\n        cmd = ' '.join(cmd)\n    if return_json:\n        cmd = 'ConvertTo-Json -Compress -Depth 4 -InputObject @({0})'.format(cmd)\n    cmd = 'Import-Module WebAdministration; {0}'.format(cmd)\n    ret = __salt__['cmd.run_all'](cmd, shell='powershell', python_shell=True)\n    if (ret['retcode'] != 0):\n        msg = 'Unable to execute command: {0}\\nError: {1}'.format(cmd, ret['stderr'])\n        log.error(msg)\n    return ret", "docstring": "Execute a powershell command from the WebAdministration PS module.\n\nArgs:\ncmd (list): The command to execute in a list\nreturn_json (bool): True formats the return in JSON, False just returns\nthe output of the command.\n\nReturns:\nstr: The output from the command", "source": "codesearchnet"}
{"code": "def read_config(\n        config_filepath,\n        logger=logging.getLogger('ProsperCommon'),\n):\n    \n    config_parser = configparser.ConfigParser(\n        interpolation=ExtendedInterpolation(),\n        allow_no_value=True,\n        delimiters=('='),\n        inline_comment_prefixes=('\n    )\n    logger.debug('config_filepath=%s', config_filepath)\n\n    with open(config_filepath, 'r') as filehandle:\n        config_parser.read_file(filehandle)\n\n    return config_parser", "docstring": "fetch and parse config file\n\nArgs:\nconfig_filepath (str): path to config file.  abspath > relpath\nlogger (:obj:`logging.Logger`): logger to catch error msgs", "source": "juraj-google-style"}
{"code": "def parse_args(argv=None):\n    parent_parser = get_parent_parser()\n    desc = 'Data Version Control'\n    parser = DvcParser(prog='dvc', description=desc, parents=[parent_parser], formatter_class=argparse.RawTextHelpFormatter)\n    parser.add_argument('-V', '--version', action=VersionAction, nargs=0, help=\"Show program's version.\")\n    subparsers = parser.add_subparsers(title='Available Commands', metavar='COMMAND', dest='cmd', help='Use dvc COMMAND --help for command-specific help.')\n    fix_subparsers(subparsers)\n    for cmd in COMMANDS:\n        cmd.add_parser(subparsers, parent_parser)\n    args = parser.parse_args(argv)\n    return args", "docstring": "Parses CLI arguments.\n\nArgs:\nargv: optional list of arguments to parse. sys.argv is used by default.\n\nRaises:\ndvc.exceptions.DvcParserError: raised for argument parsing errors.", "source": "codesearchnet"}
{"code": "def report(vulnerabilities, fileobj, print_sanitised):\n    n_vulnerabilities = len(vulnerabilities)\n    unsanitised_vulnerabilities = [v for v in vulnerabilities if (not isinstance(v, SanitisedVulnerability))]\n    n_unsanitised = len(unsanitised_vulnerabilities)\n    n_sanitised = (n_vulnerabilities - n_unsanitised)\n    heading = '{} vulnerabilit{} found{}.\\n'.format(('No' if (n_unsanitised == 0) else n_unsanitised), ('y' if (n_unsanitised == 1) else 'ies'), (' (plus {} sanitised)'.format(n_sanitised) if n_sanitised else ''))\n    vulnerabilities_to_print = (vulnerabilities if print_sanitised else unsanitised_vulnerabilities)\n    with fileobj:\n        for (i, vulnerability) in enumerate(vulnerabilities_to_print, start=1):\n            fileobj.write(vulnerability_to_str(i, vulnerability))\n        if (n_unsanitised == 0):\n            fileobj.write(color(heading, GOOD))\n        else:\n            fileobj.write(color(heading, DANGER))", "docstring": "Prints issues in color-coded text format.\n\nArgs:\nvulnerabilities: list of vulnerabilities to report\nfileobj: The output file object, which may be sys.stdout", "source": "codesearchnet"}
{"code": "def __lt__(self, other):\n    \n    if not isinstance(other, DateTimeValues):\n      raise ValueError('Other not an instance of DateTimeValues')\n\n    normalized_timestamp = self._GetNormalizedTimestamp()\n    other_normalized_timestamp = other._GetNormalizedTimestamp()  \n\n    if normalized_timestamp is None:\n      return other_normalized_timestamp is not None\n\n    if other_normalized_timestamp is None:\n      return False\n\n    return normalized_timestamp < other_normalized_timestamp", "docstring": "Determines if the date time values are less than other.\n\nArgs:\nother (DateTimeValues): date time values to compare against.\n\nReturns:\nbool: True if the date time values are less than other.\n\nRaises:\nValueError: if other is not an instance of DateTimeValues.", "source": "juraj-google-style"}
{"code": "def Next(self):\n    try:\n        (self.key, self.value) = next(self.current)\n    except StopIteration:\n        if (self.current != self.second):\n            self.current = self.second\n            return self.Next()\n        return False\n    return True", "docstring": "Advances the iterator forward 1 step.\n\nReturns:\nbool: True if another item exists in the iterator, False otherwise.", "source": "codesearchnet"}
{"code": "class CaptureLogger:\n\n    def __init__(self, logger):\n        self.logger = logger\n        self.io = StringIO()\n        self.sh = logging.StreamHandler(self.io)\n        self.out = ''\n\n    def __enter__(self):\n        self.logger.addHandler(self.sh)\n        return self\n\n    def __exit__(self, *exc):\n        self.logger.removeHandler(self.sh)\n        self.out = self.io.getvalue()\n\n    def __repr__(self):\n        return f'captured: {self.out}\\n'", "docstring": "Context manager to capture `logging` streams\n\nArgs:\nlogger: 'logging` logger object\n\nReturns:\nThe captured output is available via `self.out`\n\nExample:\n\n```python\n>>> from transformers import logging\n>>> from transformers.testing_utils import CaptureLogger\n\n>>> msg = \"Testing 1, 2, 3\"\n>>> logging.set_verbosity_info()\n>>> logger = logging.get_logger(\"transformers.models.bart.tokenization_bart\")\n>>> with CaptureLogger(logger) as cl:\n...     logger.info(msg)\n>>> assert cl.out, msg + \"\n\"\n```", "source": "github-repos"}
{"code": "def _construct_concrete_function(func, output_graph_def, converted_input_indices):\n    input_tensors = func.graph.internal_captures\n    converted_inputs = object_identity.ObjectIdentitySet([input_tensors[index] for index in converted_input_indices])\n    not_converted_inputs = [tensor for tensor in func.inputs if tensor not in converted_inputs]\n    not_converted_inputs_map = {tensor.name: tensor for tensor in not_converted_inputs}\n    new_input_names = [tensor.name for tensor in not_converted_inputs]\n    new_output_names = [tensor.name for tensor in func.outputs]\n    for f in output_graph_def.library.function:\n        if context.context().has_function(f.signature.name):\n            context.context().remove_function(f.signature.name)\n    new_func = wrap_function.function_from_graph_def(output_graph_def, new_input_names, new_output_names)\n    for input_tensor in new_func.inputs:\n        input_tensor.set_shape(not_converted_inputs_map[input_tensor.name].shape)\n    return new_func", "docstring": "Constructs a concrete function from the `output_graph_def`.\n\nArgs:\nfunc: ConcreteFunction\noutput_graph_def: GraphDef proto.\nconverted_input_indices: Set of integers of input indices that were\nconverted to constants.\n\nReturns:\nConcreteFunction.", "source": "github-repos"}
{"code": "def _split_generators(self, dl_manager):\n    \n    splits = super(Imagenet2012Corrupted, self)._split_generators(dl_manager)\n    validation = splits[1]\n    return [validation]", "docstring": "Return the validation split of ImageNet2012.\n\nArgs:\ndl_manager: download manager object.\n\nReturns:\nvalidation split.", "source": "juraj-google-style"}
{"code": "def _get_input_target_path(self, local_file_path):\n    (path, filename) = os.path.split(local_file_path)\n    if ('*' in filename):\n        return (path + '/')\n    else:\n        return local_file_path", "docstring": "Returns a directory or file path to be the target for \"gsutil cp\".\n\nIf the filename contains a wildcard, then the target path must\nbe a directory in order to ensure consistency whether the source pattern\ncontains one or multiple files.\n\n\nArgs:\nlocal_file_path: A full path terminating in a file or a file wildcard.\n\nReturns:\nThe path to use as the \"gsutil cp\" target.", "source": "codesearchnet"}
{"code": "def update_data(func):\n    default = dict([(param.name, param.default) for param in inspect.signature(func).parameters.values() if (param.default != getattr(inspect, '_empty'))])\n\n    @wraps(func)\n    def wrapper(*args, **kwargs):\n        default.update(kwargs)\n        kwargs.update(default)\n        cur_mod = sys.modules[func.__module__]\n        logger = logs.get_logger(name_or_func=f'{cur_mod.__name__}.{func.__name__}', types='stream')\n        root_path = cur_mod.DATA_PATH\n        date_type = kwargs.pop('date_type', 'date')\n        save_static = kwargs.pop('save_static', True)\n        save_dynamic = kwargs.pop('save_dynamic', True)\n        symbol = kwargs.get('symbol')\n        file_kw = dict(func=func, symbol=symbol, root=root_path, date_type=date_type)\n        d_file = cache_file(has_date=True, **file_kw)\n        s_file = cache_file(has_date=False, **file_kw)\n        cached = kwargs.pop('cached', False)\n        if (cached and save_static and files.exists(s_file)):\n            logger.info(f'Reading data from {s_file} ...')\n            return pd.read_parquet(s_file)\n        data = func(*args, **kwargs)\n        if save_static:\n            files.create_folder(s_file, is_file=True)\n            save_data(data=data, file_fmt=s_file, append=False)\n            logger.info(f'Saved data file to {s_file} ...')\n        if save_dynamic:\n            drop_dups = kwargs.pop('drop_dups', None)\n            files.create_folder(d_file, is_file=True)\n            save_data(data=data, file_fmt=d_file, append=True, drop_dups=drop_dups)\n            logger.info(f'Saved data file to {d_file} ...')\n        return data\n    return wrapper", "docstring": "Decorator to save data more easily. Use parquet as data format\n\nArgs:\nfunc: function to load data from data source\n\nReturns:\nwrapped function", "source": "codesearchnet"}
{"code": "def set_element_type(entity, dtype, shape=UNSPECIFIED):\n    del entity\n    del dtype\n    del shape", "docstring": "Indicates that the entity is expected hold items of specified type/shape.\n\nThe staged TensorFlow ops will reflect and assert this data type. Ignored\notherwise.\n\nArgs:\nentity: The entity to annotate.\ndtype: TensorFlow dtype value to assert for entity.\nshape: Optional shape to assert for entity.", "source": "github-repos"}
{"code": "def encrypt_encoded(self, encoding, r_value):\n    obfuscator = (r_value or 1)\n    ciphertext = self.raw_encrypt(encoding.encoding, r_value=obfuscator)\n    encrypted_number = EncryptedNumber(self, ciphertext, encoding.exponent)\n    if (r_value is None):\n        encrypted_number.obfuscate()\n    return encrypted_number", "docstring": "Paillier encrypt an encoded value.\n\nArgs:\nencoding: The EncodedNumber instance.\nr_value (int): obfuscator for the ciphertext; by default (i.e.\nif *r_value* is None), a random value is used.\n\nReturns:\nEncryptedNumber: An encryption of *value*.", "source": "codesearchnet"}
{"code": "def write_pdb(self, custom_name='', out_suffix='', out_dir=None, custom_selection=None, force_rerun=False):\n    if (not custom_selection):\n        custom_selection = ModelSelection([0])\n    if ((not out_dir) or (not custom_name)):\n        if (not out_suffix):\n            out_suffix = '_new'\n    outfile = ssbio.utils.outfile_maker(inname=self.structure_file, outname=custom_name, append_to_name=out_suffix, outdir=out_dir, outext='.pdb')\n    try:\n        if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n            self.save(outfile, custom_selection)\n    except TypeError as e:\n        log.error('{}: unable to save structure in PDB file format'.format(self.structure_file))\n        raise TypeError(e)\n    return outfile", "docstring": "Write a new PDB file for the Structure's FIRST MODEL.\n\nSet custom_selection to a PDB.Select class for custom SMCRA selections.\n\nArgs:\ncustom_name: Filename of the new file (without extension)\nout_suffix: Optional string to append to new PDB file\nout_dir: Optional directory to output the file\ncustom_selection: Optional custom selection class\nforce_rerun: If existing file should be overwritten\n\nReturns:\nout_file: filepath of new PDB file", "source": "codesearchnet"}
{"code": "def multihead_self_attention_reduced(x, memory_antecedent=None, bias=None, factor=None, multihead_params=None, nonlinearity='none', reduction_type='conv', add_mask=True):\n    if ((not factor) or (not multihead_params)):\n        raise ValueError('factor and multihead_params should be set')\n    if (memory_antecedent is not None):\n        raise NotImplementedError('multihead_self_attention_reduced only works with self-attention')\n    depth = x.get_shape().as_list()[(- 1)]\n    if (reduction_type == 'attention'):\n        memory_x = local_reduction_attention(x, factor, multihead_params)\n    elif (reduction_type == 'conv'):\n        memory_x = conv_elems_1d(x, factor)\n    else:\n        raise ValueError('Unknown reduction type {}'.format(reduction_type))\n    if (nonlinearity == 'silu'):\n        memory_x *= tf.nn.sigmoid(memory_x)\n    elif (nonlinearity != 'none'):\n        raise ValueError('Unknown non linearity {}'.format(nonlinearity))\n    memory_x = tf.concat([x[(:, :1, :)], memory_x], axis=1)\n\n    @expert_utils.add_name_scope()\n    def construct_bias_vectors(t, axis):\n        length = tf.to_float(common_layers.shape_list(t)[1])\n        length_coordinates = tf.range(length, dtype=tf.float32)\n        length_coordinates = tf.expand_dims(length_coordinates, axis=axis)\n        return length_coordinates\n    if add_mask:\n        bias = (tf.to_float(tf.greater((construct_bias_vectors(memory_x, 0) * factor), (construct_bias_vectors(x, 1) + 0.001))) * (- 1000000000.0))\n        bias = tf.expand_dims(bias, axis=0)\n        bias = tf.expand_dims(bias, axis=0)\n    else:\n        bias = None\n    return multihead_attention(query_antecedent=x, memory_antecedent=memory_x, bias=bias, output_depth=depth, **multihead_params)", "docstring": "Reduce the length dimension by compressing with conv.\n\nArgs:\nx (tf.Tensor): float32 of shape [batch, length, depth]\nmemory_antecedent (tf.Tensor): Unsupported for now\nbias (tf.Tensor): Ignored\nfactor (int): compression factor for the memory sequence\nmultihead_params (dict): parameters for multihead attention\nnonlinearity (str): Add some non-linearity after the memory block\nreduction_type (str): type of compression\nadd_mask (bool): If True, add the bias to prevent attention to the future\n\nReturns:\n(tf.Tensor): float32 of shape [batch, length, depth]\n\nRaises:\nValueError: If reduction_type or nonlinearity is invalid", "source": "codesearchnet"}
{"code": "def directional_emd(direction, d1, d2):\n    \n    if direction == Direction.CAUSE:\n        func = hamming_emd\n    elif direction == Direction.EFFECT:\n        func = effect_emd\n    else:\n        \n        validate.direction(direction)\n\n    return round(func(d1, d2), config.PRECISION)", "docstring": "Compute the EMD between two repertoires for a given direction.\n\nThe full EMD computation is used for cause repertoires. A fast analytic\nsolution is used for effect repertoires.\n\nArgs:\ndirection (Direction): |CAUSE| or |EFFECT|.\nd1 (np.ndarray): The first repertoire.\nd2 (np.ndarray): The second repertoire.\n\nReturns:\nfloat: The EMD between ``d1`` and ``d2``, rounded to |PRECISION|.\n\nRaises:\nValueError: If ``direction`` is invalid.", "source": "juraj-google-style"}
{"code": "def to_dict(self):\n    return {'hostname': self.hostname, 'port': self.port, 'transport': self.transport, 'virtual_host': self.virtual_host}", "docstring": "Return a dictionary of the broker stats.\n\nReturns:\ndict: Dictionary of the stats.", "source": "codesearchnet"}
{"code": "def set_value(value_proto, value, exclude_from_indexes=None):\n    value_proto.Clear()\n    if isinstance(value, (list, tuple)):\n        for sub_value in value:\n            set_value(value_proto.array_value.values.add(), sub_value, exclude_from_indexes)\n        return\n    if isinstance(value, entity_pb2.Value):\n        value_proto.MergeFrom(value)\n    elif isinstance(value, unicode):\n        value_proto.string_value = value\n    elif isinstance(value, str):\n        value_proto.blob_value = value\n    elif isinstance(value, bool):\n        value_proto.boolean_value = value\n    elif isinstance(value, (int, long)):\n        value_proto.integer_value = value\n    elif isinstance(value, float):\n        value_proto.double_value = value\n    elif isinstance(value, datetime.datetime):\n        to_timestamp(value, value_proto.timestamp_value)\n    elif isinstance(value, entity_pb2.Key):\n        value_proto.key_value.CopyFrom(value)\n    elif isinstance(value, entity_pb2.Entity):\n        value_proto.entity_value.CopyFrom(value)\n    else:\n        raise TypeError(('value type: %r not supported' % (value,)))\n    if (exclude_from_indexes is not None):\n        value_proto.exclude_from_indexes = exclude_from_indexes", "docstring": "Set the corresponding datastore.Value _value field for the given arg.\n\nArgs:\nvalue_proto: datastore.Value proto message.\nvalue: python object or datastore.Value. (unicode value will set a\ndatastore string value, str value will set a blob string value).\nUndefined behavior if value is/contains value_proto.\nexclude_from_indexes: if the value should be exclude from indexes. None\nleaves indexing as is (defaults to False if value is not a Value\nmessage).\n\nRaises:\nTypeError: if the given value type is not supported.", "source": "codesearchnet"}
{"code": "def __init__(self, definition, data_visibility_policy):\n    \n    self.data_visibility_policy = data_visibility_policy\n\n    self.breakpoint = copy.deepcopy(definition)\n\n    self.breakpoint['stackFrames'] = []\n    self.breakpoint['evaluatedExpressions'] = []\n    self.breakpoint['variableTable'] = [{\n        'status': {\n            'isError': True,\n            'refersTo': 'VARIABLE_VALUE',\n            'description': {\n                'format': 'Buffer full. Use an expression to see more data'\n            }\n        }\n    }]\n\n    \n    self._var_table = self.breakpoint['variableTable']\n\n    \n    self._var_table_index = {}\n\n    \n    self._total_size = 0\n\n    \n    \n    self.max_frames = 20\n\n    \n    \n    self.max_expand_frames = 5\n\n    \n    \n    \n    \n    \n    self.max_size = 32768  \n\n    self.default_capture_limits = _CaptureLimits()\n\n    \n    \n    \n    \n    \n    \n    self.expression_capture_limits = _CaptureLimits(max_value_len=32768,\n                                                    max_list_items=32768)", "docstring": "Class constructor.\n\nArgs:\ndefinition: breakpoint definition that this class will augment with\ncaptured data.\ndata_visibility_policy: An object used to determine the visibiliy\nof a captured variable.  May be None if no policy is available.", "source": "juraj-google-style"}
{"code": "def _EvaluateExpressions(self, frame):\n    return [self._FormatExpression(frame, expression) for expression in (self._definition.get('expressions') or [])]", "docstring": "Evaluates watched expressions into a string form.\n\nIf expression evaluation fails, the error message is used as evaluated\nexpression string.\n\nArgs:\nframe: Python stack frame of breakpoint hit.\n\nReturns:\nArray of strings where each string corresponds to the breakpoint\nexpression with the same index.", "source": "codesearchnet"}
{"code": "def load_vocabulary(lang=\"en\", type=\"wiki\"):\n  \n  src_dir = \"{}_vocab\".format(type)\n  p = locate_resource(src_dir, lang)\n  return CountedVocabulary.from_vocabfile(p)", "docstring": "Return a CountedVocabulary object.\n\nArgs:\nlang (string): language code.\ntype (string): wiki,...", "source": "juraj-google-style"}
{"code": "def _BuildStations(self, stoplist):\n    \n    stations = []\n    dists = self._EuclidianDistances(stoplist)\n    stations = self._CalculateYLines(dists)\n    return stations", "docstring": "Dispatches the best algorithm for calculating station line position.\n\nArgs:\n# Class Stop is defined in transitfeed.py\nstoplist: [Stop, Stop, ...]\n# Class Trip is defined in transitfeed.py\ntriplist: [Trip, Trip, ...]\n\nReturns:\n# One integer y-coordinate for each station normalized between\n# 0 and X, where X is the height of the graph in pixels\n[0, 33, 140, ... , X]", "source": "juraj-google-style"}
{"code": "def _finish_operation_action(self, action):\n        \n\n        success = action.data['success']\n        conn_key = action.data['id']\n\n        if self._get_connection_state(conn_key) != self.InProgress:\n            self._logger.error(\"Invalid finish_operation action on a connection whose state is not InProgress, conn_key=%s\", str(conn_key))\n            return\n\n        \n        data = self._get_connection(conn_key)\n        callback = data['callback']\n        conn_id = data['conn_id']\n        args = action.data['callback_args']\n\n        data['state'] = self.Idle\n        data['microstate'] = None\n\n        callback(conn_id, self.id, success, *args)", "docstring": "Finish an attempted operation.\n\nArgs:\naction (ConnectionAction): the action object describing the result\nof the operation that we are finishing", "source": "juraj-google-style"}
{"code": "def _maybe_read_file(filename):\n    try:\n        with open(filename) as infile:\n            return infile.read()\n    except IOError as e:\n        if (e.errno == errno.ENOENT):\n            return None", "docstring": "Read the given file, if it exists.\n\nArgs:\nfilename: A path to a file.\n\nReturns:\nA string containing the file contents, or `None` if the file does\nnot exist.", "source": "codesearchnet"}
{"code": "def create_unique_base26_symlink(path, source):\n    retries = 0\n    while True:\n        name = find_matching_symlink(path, source)\n        if name:\n            return os.path.join(path, name)\n        names = [x for x in os.listdir(path) if os.path.islink(os.path.join(path, x))]\n        if names:\n            prev = max(names)\n        else:\n            prev = None\n        linkname = get_next_base26(prev)\n        linkpath = os.path.join(path, linkname)\n        try:\n            os.symlink(source, linkpath)\n            return linkpath\n        except OSError as e:\n            if (e.errno != errno.EEXIST):\n                raise\n        if (retries > 10):\n            raise RuntimeError('Variant shortlink not created - there was too much contention.')\n        retries += 1", "docstring": "Create a base-26 symlink in `path` pointing to `source`.\n\nIf such a symlink already exists, it is returned. Note that there is a small\nchance that this function may create a new symlink when there is already one\npointed at `source`.\n\nAssumes `path` only contains base26 symlinks.\n\nReturns:\nstr: Path to created symlink.", "source": "codesearchnet"}
{"code": "def assemble(self, header_json, metadata_json, content_json):\n        \n        header = json_decode(header_json)\n        if 'msgtype' not in header:\n            log.error(\"Bad header with no msgtype was: %r\", header)\n            raise ProtocolError(\"No 'msgtype' in header\")\n        return self._messages[header['msgtype']].assemble(\n            header_json, metadata_json, content_json\n        )", "docstring": "Create a Message instance assembled from json fragments.\n\nArgs:\nheader_json (``JSON``) :\n\nmetadata_json (``JSON``) :\n\ncontent_json (``JSON``) :\n\nReturns:\nmessage", "source": "juraj-google-style"}
{"code": "def crypto_withdraw(self, amount, currency, crypto_address):\n    params = {'amount': amount, 'currency': currency, 'crypto_address': crypto_address}\n    return self._send_message('post', '/withdrawals/crypto', data=json.dumps(params))", "docstring": "Withdraw funds to a crypto address.\n\nArgs:\namount (Decimal): The amount to withdraw\ncurrency (str): The type of currency (eg. 'BTC')\ncrypto_address (str): Crypto address to withdraw to.\n\nReturns:\ndict: Withdraw details. Example::\n{\n\"id\":\"593533d2-ff31-46e0-b22e-ca754147a96a\",\n\"amount\":\"10.00\",\n\"currency\": \"BTC\",\n}", "source": "codesearchnet"}
{"code": "def create_output_excerpts(self, test_info):\n    return []", "docstring": "Creates excerpts of the service's output files.\n\n[Optional] This method only applies to services with output files.\n\nFor services that generates output files, calling this method would\ncreate excerpts of the output files. An excerpt should contain info\nbetween two calls of `create_output_excerpts` or from the start of the\nservice to the call to `create_output_excerpts`.\n\nUse `AndroidDevice#generate_filename` to get the proper filenames for\nexcerpts.\n\nThis is usually called at the end of: `setup_class`, `teardown_test`,\nor `teardown_class`.\n\nArgs:\ntest_info: RuntimeTestInfo, the test info associated with the scope\nof the excerpts.\n\nReturns:\nList of strings, the absolute paths to the excerpt files created.\nEmpty list if no excerpt files are created.", "source": "github-repos"}
{"code": "def get(self, feed_item, required=False, column_name=None):\n    result = None\n    keys = []\n    id_value = feed_item.get(self._id_field, None) if column_name == None else feed_item.get(column_name, None)\n    if not id_value and self._search_field and feed_item.get(self._search_field, None):\n        store_key = feed_item[self._search_field]\n        if self._parent_filter_name:\n            if feed_item.get(self._parent_filter_field_name, None):\n                store_key = str(feed_item.get(self._parent_filter_field_name, None)) + store_key\n        result = store.get(self._entity, store_key)\n        if not result:\n            result, key = self._get_by_name(feed_item)\n            keys.append(key)\n        if not result and required:\n            raise Exception('ERROR: Could not find %s with name %s' % (self._entity, feed_item[self._search_field]))\n    elif id_value:\n        if isinstance(id_value, str) and id_value.startswith('ext'):\n            keys.append(id_value)\n            id_value = store.translate(self._entity, id_value)\n            if id_value and (not column_name):\n                feed_item[self._id_field] = id_value\n            elif id_value and column_name:\n                feed_item[column_name] = id_value\n        if id_value:\n            keys.append(id_value)\n            result = store.get(self._entity, id_value)\n            if not result:\n                result = self._get(feed_item)\n            if not result and required:\n                raise Exception('ERROR: Could not find %s with id %s' % (self._entity, id_value))\n    store.set(self._entity, keys, result)\n    return result", "docstring": "Retrieves an item.\n\nItems could be retrieved from a in memory cache in case it has already been\nretrieved within the current execution. Also, this method is capable of\ntranslating 'ext' placeholder IDs with concrete CM ids.\n\nArgs:\nfeed_item: Feed item from the Bulkdozer feed representing the item to\nretrieve.\n\nReturns:\nThe CM object that represents the identified entity.", "source": "github-repos"}
{"code": "def extend_transformations(self, transformations,\n                               return_alternatives=False):\n        \n        for t in transformations:\n            self.append_transformation(t,\n                                       return_alternatives=return_alternatives)", "docstring": "Extends a sequence of transformations to the TransformedStructure.\n\nArgs:\ntransformations: Sequence of Transformations\nreturn_alternatives: Whether to return alternative\nTransformedStructures for one-to-many transformations.\nreturn_alternatives can be a number, which stipulates the\ntotal number of structures to return.", "source": "juraj-google-style"}
{"code": "def repeat(self, caller: Caller[RequestT, ResponseT], request: RequestT, timeout: float, metrics_collector: Optional[_MetricsCollector]=None) -> ResponseT:\n    return _execute_request(caller, request, timeout, metrics_collector)", "docstring": "repeat method is called from the RequestResponseIO when\na repeater is enabled.\n\nArgs:\ncaller: a `~apache_beam.io.requestresponse.Caller` object that\ncalls the API.\nrequest: input request to repeat.\ntimeout: time to wait for the request to complete.\nmetrics_collector: (Optional) a\n`~apache_beam.io.requestresponse._MetricsCollector` object to\ncollect the metrics for RequestResponseIO.", "source": "github-repos"}
{"code": "def create(self, task_type_id, task_queue_id, arguments=None, name=''):\n    if (arguments is None):\n        arguments = {}\n    request_url = (self._client.base_api_url + self.list_url)\n    data_to_post = {'name': name, 'arguments': json.dumps(arguments), 'task_type': task_type_id, 'task_queue': task_queue_id}\n    response = self._client.session.post(request_url, data=data_to_post)\n    self.validate_request_success(response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_201_CREATED)\n    return self.response_data_to_model_instance(response.json())", "docstring": "Create a task instance.\n\nArgs:\ntask_type_id (int): The ID of the task type to base the task\ninstance on.\ntask_queue_id (int): The ID of the task queue to run the job\non.\narguments (dict, optional): The arguments to give the task\ntype.\nname (str, optional): A non-unique name to give the task\ninstance.\n\nReturns:\n:class:`saltant.models.base_task_instance.BaseTaskInstance`:\nA task instance model instance representing the task\ninstance just created.", "source": "codesearchnet"}
{"code": "async def register_user(self, password, **kwds):\n        \n        \n        user = await self._create_remote_user(password=password, **kwds)\n        \n        if not 'pk' in user:\n            \n            user['pk'] = user['id']\n\n        \n        match_query = self.model.user == user['id']\n\n        \n        if self.model.select().where(match_query).count() > 0:\n            \n            raise RuntimeError('The user is already registered.')\n\n        \n        password = self.model(user=user['id'], password=password)\n\n        \n        password.save()\n\n        \n        return {\n            'user': user,\n            'sessionToken': self._user_session_token(user)\n        }", "docstring": "This function is used to provide a sessionToken for later requests.\n\nArgs:\nuid (str): The", "source": "juraj-google-style"}
{"code": "def write(self, file_des, contents):\n    file_handle = self.filesystem.get_open_file(file_des)\n    if isinstance(file_handle, FakeDirWrapper):\n        self.filesystem.raise_os_error(errno.EBADF, file_handle.file_path)\n    if isinstance(file_handle, FakePipeWrapper):\n        return file_handle.write(contents)\n    file_handle.raw_io = True\n    file_handle._sync_io()\n    file_handle.update_flush_pos()\n    file_handle.write(contents)\n    file_handle.flush()\n    return len(contents)", "docstring": "Write string to file descriptor, returns number of bytes written.\n\nArgs:\nfile_des: An integer file descriptor for the file object requested.\ncontents: String of bytes to write to file.\n\nReturns:\nNumber of bytes written.\n\nRaises:\nOSError: bad file descriptor.\nTypeError: if file descriptor is not an integer.", "source": "codesearchnet"}
{"code": "def validate(self, corpus):\n    passed = True\n    results = {}\n    for validator in self.validators:\n        sub_result = validator.validate(corpus)\n        results[validator.name()] = sub_result\n        if (not sub_result.passed):\n            passed = False\n    return CombinedValidationResult(passed, results)", "docstring": "Perform validation on the given corpus.\n\nArgs:\ncorpus (Corpus): The corpus to test/validate.", "source": "codesearchnet"}
{"code": "def bold(self, action):\n        \n        if action =='on':\n            action = 'E'\n        elif action == 'off':\n            action = 'F'\n        else:\n            raise RuntimeError('Invalid action for function bold. Options are on and off')\n        self.send(chr(27)+action)", "docstring": "Enable/cancel bold printing\n\nArgs:\naction: Enable or disable bold printing. Options are 'on' and 'off'\nReturns:\nNone\nRaises:\nRuntimeError: Invalid action.", "source": "juraj-google-style"}
{"code": "def get_mel_conditioner_outputs(self, input_features: torch.FloatTensor, composer: str, generation_config: GenerationConfig, attention_mask: Optional[torch.FloatTensor]=None):\n    composer_to_feature_token = generation_config.composer_to_feature_token\n    if composer not in composer_to_feature_token.keys():\n        raise ValueError(f'Please choose a composer from {list(composer_to_feature_token.keys())}. Composer received - {composer}')\n    composer_value = composer_to_feature_token[composer]\n    composer_value = torch.tensor(composer_value, device=self.device)\n    composer_value = composer_value.repeat(input_features.shape[0])\n    embedding_offset = min(composer_to_feature_token.values())\n    input_features = self.mel_conditioner(feature=input_features, index_value=composer_value, embedding_offset=embedding_offset)\n    if attention_mask is not None:\n        input_features[~attention_mask[:, 0].bool()] = 0.0\n        attention_mask = torch.concatenate([attention_mask[:, 0].view(-1, 1), attention_mask], axis=1)\n        return (input_features, attention_mask)\n    return (input_features, None)", "docstring": "This method is used to concatenate mel conditioner tokens at the front of the input_features in order to\ncontrol the type of MIDI token generated by the model.\n\nArgs:\ninput_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\ninput features extracted from the feature extractor.\ncomposer (`str`):\ncomposer token which determines the type of MIDI tokens to be generated.\ngeneration_config (`~generation.GenerationConfig`):\nThe generation is used to get the composer-feature_token pair.\nattention_mask (``, *optional*):\nFor batched generation `input_features` are padded to have the same shape across all examples.\n`attention_mask` helps to determine which areas were padded and which were not.\n- 1 for tokens that are **not padded**,\n- 0 for tokens that are **padded**.", "source": "github-repos"}
{"code": "def get_wallace_tensor(self, tau):\n    b = (0.5 * ((((np.einsum('ml,kn->klmn', tau, np.eye(3)) + np.einsum('km,ln->klmn', tau, np.eye(3))) + np.einsum('nl,km->klmn', tau, np.eye(3))) + np.einsum('kn,lm->klmn', tau, np.eye(3))) + ((- 2) * np.einsum('kl,mn->klmn', tau, np.eye(3)))))\n    strain = self.get_strain_from_stress(tau)\n    b += self.get_effective_ecs(strain)\n    return b", "docstring": "Gets the Wallace Tensor for determining yield strength\ncriteria.\n\nArgs:\ntau (3x3 array-like): stress at which to evaluate\nthe wallace tensor", "source": "codesearchnet"}
{"code": "def iflag_unique_items(list_):\n    \n    seen = set()\n    def unseen(item):\n        if item in seen:\n            return False\n        seen.add(item)\n        return True\n    flag_iter = (unseen(item) for item in list_)\n    return flag_iter", "docstring": "Returns a list of flags corresponding to the first time an item is seen\n\nArgs:\nlist_ (list): list of items\n\nReturns:\nflag_iter", "source": "juraj-google-style"}
{"code": "def _do_sampling(self, logits, num_samples):\n    with self.session(), self.test_scope():\n        random_seed.set_random_seed(1618)\n        op = random_ops.multinomial(logits, num_samples, output_dtype=dtypes.int32)\n        d = self.evaluate(op)\n    batch_size, num_classes = logits.shape\n    freqs_mat = []\n    for i in range(batch_size):\n        cnts = dict(collections.Counter(d[i, :]))\n        self.assertLess(max(cnts.keys()), num_classes)\n        self.assertGreaterEqual(min(cnts.keys()), 0)\n        freqs = [cnts[k] * 1.0 / num_samples if k in cnts else 0 for k in range(num_classes)]\n        freqs_mat.append(freqs)\n    return freqs_mat", "docstring": "Categorical samples from given input.\n\nArgs:\nlogits: Numpy ndarray of shape [batch_size, num_classes].\nnum_samples: Int; number of samples to draw.\n\nReturns:\nFrequencies from sampled classes; shape [batch_size, num_classes].", "source": "github-repos"}
{"code": "def _ExtractMetadataFromFileEntry(self, mediator, file_entry, data_stream):\n    \n    \n    if file_entry.IsRoot() and file_entry.type_indicator not in (\n        self._TYPES_WITH_ROOT_METADATA):\n      return\n\n    \n    \n    \n    if data_stream and not data_stream.IsDefault():\n      return\n\n    display_name = mediator.GetDisplayName()\n    logger.debug(\n        '[ExtractMetadataFromFileEntry] processing file entry: {0:s}'.format(\n            display_name))\n\n    self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING\n\n    if self._processing_profiler:\n      self._processing_profiler.StartTiming('extracting')\n\n    self._event_extractor.ParseFileEntryMetadata(mediator, file_entry)\n\n    if self._processing_profiler:\n      self._processing_profiler.StopTiming('extracting')\n\n    self.processing_status = definitions.STATUS_INDICATOR_RUNNING", "docstring": "Extracts metadata from a file entry.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\nfile_entry (dfvfs.FileEntry): file entry to extract metadata from.\ndata_stream (dfvfs.DataStream): data stream or None if the file entry\nhas no data stream.", "source": "juraj-google-style"}
{"code": "def _objective_and_vega(volatilities):\n    vols = volatilities * sqrt_t / normalization\n    d1 = (norm_forwards - norm_strikes) / vols\n    implied_prices = (norm_forwards - norm_strikes) * _cdf(d1) + vols * _pdf(d1)\n    if is_call_options is not None:\n        put_prices = implied_prices - norm_forwards + norm_strikes\n        implied_prices = tf.where(is_call_options, implied_prices, put_prices)\n    vega = _pdf(d1) * sqrt_t / discount_factors / normalization\n    return (implied_prices - normalized_prices, vega)", "docstring": "Calculate the Bachelier price and vega for a given volatility.\n\nThis method returns normalized results.\n\nArgs:\nvolatilities: A real `Tensor` of same shape and dtype as `forwards`. The\nvolatility to expiry.\n\nReturns:\nA tuple containing (value, gradient) of the black scholes price, both of\nwhich are `Tensor`s of the same shape and dtype as `volatilities`.", "source": "github-repos"}
{"code": "def load_config(self, settings=None):\n    self._load_defaults()\n    if settings:\n        self.update(settings)\n    else:\n        config_paths = _get_config_files()\n        for p in config_paths:\n            conf = _process_config_file([p])\n            self.update(conf)\n    self._loaded = True\n    self._validate()", "docstring": "Load the configuration either from the config file, or from the given settings.\n\nArgs:\nsettings (dict): If given, the settings are pulled from this dictionary. Otherwise, the\nconfig file is used.", "source": "codesearchnet"}
{"code": "def _transform_local_field_to_expression(expression, node, context):\n    column_name = expression.field_name\n    column = sql_context_helpers.get_column(column_name, node, context)\n    return column", "docstring": "Transform a LocalField compiler expression into its SQLAlchemy expression representation.\n\nArgs:\nexpression: expression, LocalField compiler expression.\nnode: SqlNode, the SqlNode the expression applies to.\ncontext: CompilationContext, global compilation state and metadata.\n\nReturns:\nExpression, SQLAlchemy expression.", "source": "codesearchnet"}
{"code": "def _build(self, *args):\n    net = args\n    if (not self._layers):\n        if (len(args) == 1):\n            return args[0]\n        else:\n            return args\n    for layer in self._layers:\n        if isinstance(net, tuple):\n            net = layer(*net)\n        else:\n            net = layer(net)\n    return net", "docstring": "Connects the Sequential module into the graph.\n\nArgs:\n*args: A tuple of inputs, to be unpacked as the arguments to the first\nlayer.\n\nReturns:\nThe output value of the last layer.", "source": "codesearchnet"}
{"code": "def append(parent: ScheduleComponent, child: ScheduleComponent, name: str=None) -> Schedule:\n    common_channels = (set(parent.channels) & set(child.channels))\n    insertion_time = parent.ch_stop_time(*common_channels)\n    return insert(parent, insertion_time, child, name=name)", "docstring": "r\"\"\"Return a new schedule with by appending `child` to `parent` at\nthe last time of the `parent` schedule's channels\nover the intersection of the parent and child schedule's channels.\n\n$t = \\textrm{max}({x.stop\\_time |x \\in parent.channels \\cap child.channels})$\n\nArgs:\nparent: The schedule to be inserted into\nchild: The schedule to insert\nname: Name of the new schedule. Defaults to name of parent", "source": "codesearchnet"}
{"code": "def serve(name: str = \"\", port: int = 5000) -> None:\n    \n    logging.info(\" * Listening on port %s\", port)\n    httpd = HTTPServer((name, port), RequestHandler)\n    httpd.serve_forever()", "docstring": "A basic way to serve the methods.\n\nArgs:\nname: Server address.\nport: Server port.", "source": "juraj-google-style"}
{"code": "def __mod__(self, other: Union[_FormatArg, Iterable[_FormatArg]]) -> bytes:\n        \n        if isinstance(other, bytes):\n            return self.format([other])\n        elif hasattr(other, '__bytes__'):\n            supports_bytes = cast(SupportsBytes, other)\n            return self.format([bytes(supports_bytes)])\n        elif hasattr(other, '__iter__'):\n            items = cast(Iterable[_FormatArg], other)\n            return self.format(items)\n        return NotImplemented", "docstring": "String interpolation, shortcut for :meth:`.format`.\n\nArgs:\nother: The data interpolated into the format string.", "source": "juraj-google-style"}
{"code": "def robust_zscore(mat, ctrl_mat=None, min_mad=0.1):\n    if (ctrl_mat is not None):\n        medians = ctrl_mat.median(axis=1)\n        median_devs = abs(ctrl_mat.subtract(medians, axis=0))\n    else:\n        medians = mat.median(axis=1)\n        median_devs = abs(mat.subtract(medians, axis=0))\n    sub = mat.subtract(medians, axis='index')\n    mads = median_devs.median(axis=1)\n    mads = mads.clip(lower=min_mad)\n    zscore_df = sub.divide((mads * 1.4826), axis='index')\n    return zscore_df.round(rounding_precision)", "docstring": "Robustly z-score a pandas df along the rows.\n\nArgs:\nmat (pandas df): Matrix of data that z-scoring will be applied to\nctrl_mat (pandas df): Optional matrix from which to compute medians and MADs\n(e.g. vehicle control)\nmin_mad (float): Minimum MAD to threshold to; tiny MAD values will cause\nz-scores to blow up\n\nReturns:\nzscore_df (pandas_df): z-scored data", "source": "codesearchnet"}
{"code": "def random_get_float(\n    rnd: Optional[tcod.random.Random], mi: float, ma: float\n) -> float:\n    \n    return float(\n        lib.TCOD_random_get_double(rnd.random_c if rnd else ffi.NULL, mi, ma)\n    )", "docstring": "Return a random float in the range: ``mi`` <= n <= ``ma``.\n\nThe result is affected by calls to :any:`random_set_distribution`.\n\nArgs:\nrnd (Optional[Random]): A Random instance, or None to use the default.\nlow (float): The lower bound of the random range, inclusive.\nhigh (float): The upper bound of the random range, inclusive.\n\nReturns:\nfloat: A random double precision float\nin the range ``mi`` <= n <= ``ma``.", "source": "juraj-google-style"}
{"code": "def _encode(self, value, path_from_root):\n    if isinstance(value, dict):\n        json_value = {}\n        for (key, value) in six.iteritems(value):\n            json_value[key] = self._encode(value, (path_from_root + (key,)))\n        return json_value\n    else:\n        path = '.'.join(path_from_root)\n        if util.is_pandas_data_frame(value):\n            return util.encode_data_frame(path, value, self._run)\n        else:\n            (friendly_value, converted) = util.json_friendly(data_types.val_to_json(path, value))\n            (json_value, compressed) = util.maybe_compress_summary(friendly_value, util.get_h5_typename(value))\n            if compressed:\n                self.write_h5(path_from_root, friendly_value)\n            return json_value\n    '\\n            if isinstance(value, dict):\\n                json_child[key], converted = util.json_friendly(\\n                    self._encode(value, path_from_root + [key]))\\n            else:\\n        '", "docstring": "Normalize, compress, and encode sub-objects for backend storage.\n\nvalue: Object to encode.\npath_from_root: `tuple` of key strings from the top-level summary to the\ncurrent `value`.\n\nReturns:\nA new tree of dict's with large objects replaced with dictionaries\nwith \"_type\" entries that say which type the original data was.", "source": "codesearchnet"}
{"code": "def importFile(self, path, mode, outp=None):\n    if (not os.path.isfile(path)):\n        raise s_exc.NoSuchFile('File does not exist')\n    fname = os.path.split(path)[1]\n    parts = fname.rsplit('.', 1)\n    ext = (parts[1] if (len(parts) is 2) else None)\n    if ((not ext) or (ext not in ('crt', 'key', 'p12'))):\n        mesg = 'importFile only supports .crt, .key, .p12 extensions'\n        raise s_exc.BadFileExt(mesg=mesg, ext=ext)\n    newpath = s_common.genpath(self.certdir, mode, fname)\n    if os.path.isfile(newpath):\n        raise s_exc.FileExists('File already exists')\n    shutil.copy(path, newpath)\n    if (outp is not None):\n        outp.printf(('copied %s to %s' % (path, newpath)))", "docstring": "Imports certs and keys into the Synapse cert directory\n\nArgs:\npath (str): The path of the file to be imported.\nmode (str): The certdir subdirectory to import the file into.\n\nExamples:\nImport CA certifciate 'mycoolca.crt' to the 'cas' directory.\n\ncertdir.importFile('mycoolca.crt', 'cas')\n\nNotes:\nimportFile does not perform any validation on the files it imports.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def get_float_type_def(double_precision, include_complex=True):\n    \n    if include_complex:\n        with open(os.path.abspath(resource_filename('mot', 'data/opencl/complex.h')), 'r') as f:\n            complex_number_support = f.read()\n    else:\n        complex_number_support = ''\n\n    scipy_constants = \n\n    if double_precision:\n        return  + scipy_constants + complex_number_support\n    else:\n        return  + scipy_constants + complex_number_support", "docstring": "Get the model floating point type definition.\n\nArgs:\ndouble_precision (boolean): if True we will use the double type for the mot_float_type type.\nElse, we will use the single precision float type for the mot_float_type type.\ninclude_complex (boolean): if we include support for complex numbers\n\nReturns:\nstr: defines the mot_float_type types, the epsilon and the MIN and MAX values.", "source": "juraj-google-style"}
{"code": "def from_db_value(cls, value, *_) -> Optional[LocalizedValue]:\n        \n\n        if not value:\n            if getattr(settings, 'LOCALIZED_FIELDS_EXPERIMENTAL', False):\n                return None\n            else:\n                return cls.attr_class()\n\n        \n        \n        \n        if isinstance(value, list):\n            result = []\n            for inner_val in value:\n                if isinstance(inner_val, dict):\n                    if inner_val is None:\n                        result.append(None)\n                    else:\n                        result.append(cls.attr_class(inner_val))\n                else:\n                    result.append(inner_val)\n\n            return result\n\n        \n        \n        \n        if not isinstance(value, dict):\n            return value\n\n        return cls.attr_class(value)", "docstring": "Turns the specified database value into its Python\nequivalent.\n\nArguments:\nvalue:\nThe value that is stored in the database and\nneeds to be converted to its Python equivalent.\n\nReturns:\nA :see:LocalizedValue instance containing the\ndata extracted from the database.", "source": "juraj-google-style"}
{"code": "def getObjective(self, name):\n    return lock_and_call((lambda : Objective(self._impl.getObjective(name))), self._lock)", "docstring": "Get the objective with the corresponding name.\n\nArgs:\nname: Name of the objective to be found.\n\nRaises:\nTypeError: if the specified objective does not exist.", "source": "codesearchnet"}
{"code": "def word_probability(self, word, total_words=None):\n        \n        if total_words is None:\n            total_words = self._word_frequency.total_words\n        return self._word_frequency.dictionary[word] / total_words", "docstring": "Calculate the probability of the `word` being the desired, correct\nword\n\nArgs:\nword (str): The word for which the word probability is \\\ncalculated\ntotal_words (int): The total number of words to use in the \\\ncalculation; use the default for using the whole word \\\nfrequency\nReturns:\nfloat: The probability that the word is the correct word", "source": "juraj-google-style"}
{"code": "def labels(self, leaves=True, internal=True):\n        \n        if not isinstance(leaves, bool):\n            raise TypeError(\"leaves must be a bool\")\n        if not isinstance(internal, bool):\n            raise TypeError(\"internal must be a bool\")\n        for node in self.traverse_preorder():\n            if node.label is not None and ((leaves and node.is_leaf()) or (internal and not node.is_leaf())):\n                yield node.label", "docstring": "Generator over the (non-``None``) ``Node`` labels of this ``Tree``\n\nArgs:\n``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``", "source": "juraj-google-style"}
{"code": "def get_thread(self, thread_id, update_if_cached=True, raise_404=False):\n    cached_thread = self._thread_cache.get(thread_id)\n    if cached_thread:\n        if update_if_cached:\n            cached_thread.update()\n        return cached_thread\n    res = self._requests_session.get(self._url.thread_api_url(thread_id=thread_id))\n    if raise_404:\n        res.raise_for_status()\n    elif (not res.ok):\n        return None\n    thread = Thread._from_request(self, res, thread_id)\n    self._thread_cache[thread_id] = thread\n    return thread", "docstring": "Get a thread from 4chan via 4chan API.\n\nArgs:\nthread_id (int): Thread ID\nupdate_if_cached (bool): Whether the thread should be updated if it's already in our cache\nraise_404 (bool): Raise an Exception if thread has 404'd\n\nReturns:\n:class:`basc_py4chan.Thread`: Thread object", "source": "codesearchnet"}
{"code": "def marshal_bson(\n    obj,\n    types=BSON_TYPES,\n    fields=None,\n):\n    \n    return marshal_dict(\n        obj,\n        types,\n        fields=fields,\n    )", "docstring": "Recursively marshal a Python object to a BSON-compatible dict\nthat can be passed to PyMongo, Motor, etc...\n\nArgs:\nobj:    object, It's members can be nested Python\nobjects which will be converted to dictionaries\ntypes:  tuple-of-types, The BSON primitive types, typically\nyou would not change this\nfields: None-list-of-str, Explicitly marshal only these fields\nReturns:\ndict", "source": "juraj-google-style"}
{"code": "def __init__(self, email_url: str, mailing_list_name: str, extractor_name: str) -> None:\n\n        \n        \n        Extractor.__init__(self,\n                           input_type=InputType.TEXT,\n                           category=\"build_in_extractor\",\n                           name=extractor_name)\n\n        self.email_url = email_url\n        self.mailing_list_name = mailing_list_name", "docstring": "Initialize the extractor, storing mailing list and message information\nArgs:\nemail_url: str\nmailing_list_name: str\nextractor_name: str\n\nReturns:", "source": "juraj-google-style"}
{"code": "def floor(x, name=None):\n    return gen_math_ops.floor(x, name)", "docstring": "Returns element-wise largest integer not greater than x.\n\nBoth input range is `(-inf, inf)` and the\noutput range consists of all integer values.\n\nFor example:\n\n>>> x = tf.constant([1.3324, -1.5, 5.555, -2.532, 0.99, float(\"inf\")])\n>>> tf.floor(x).numpy()\narray([ 1., -2.,  5., -3.,  0., inf], dtype=float32)\n\nArgs:\nx:  A `Tensor`. Must be one of the following types: `bfloat16`, `half`,\n`float32`, `float64`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor`. Has the same type as x.", "source": "github-repos"}
{"code": "def parse_record(raw_record, is_training, dtype):\n    (image_buffer, label) = _parse_example_proto(raw_record)\n    image = imagenet_preprocessing.preprocess_image(image_buffer=image_buffer, output_height=_DEFAULT_IMAGE_SIZE, output_width=_DEFAULT_IMAGE_SIZE, num_channels=_NUM_CHANNELS, is_training=is_training)\n    image = tf.cast(image, dtype)\n    return (image, label)", "docstring": "Parses a record containing a training example of an image.\n\nThe input record is parsed into a label and image, and the image is passed\nthrough preprocessing steps (cropping, flipping, and so on).\n\nArgs:\nraw_record: scalar Tensor tf.string containing a serialized\nExample protocol buffer.\nis_training: A boolean denoting whether the input is for training.\ndtype: data type to use for images/features.\n\nReturns:\nTuple with processed image tensor and one-hot-encoded label tensor.", "source": "codesearchnet"}
{"code": "def commit_offsets_async(self, offsets, callback=None):\n        \n        self._invoke_completed_offset_commit_callbacks()\n        if not self.coordinator_unknown():\n            future = self._do_commit_offsets_async(offsets, callback)\n        else:\n            \n            \n            \n            \n            \n            \n            \n            future = self.lookup_coordinator()\n            future.add_callback(lambda r: functools.partial(self._do_commit_offsets_async, offsets, callback)())\n            if callback:\n                future.add_errback(lambda e: self.completed_offset_commits.appendleft((callback, offsets, e)))\n\n        \n        \n        \n        \n        self._client.poll(timeout_ms=0) \n\n        return future", "docstring": "Commit specific offsets asynchronously.\n\nArguments:\noffsets (dict {TopicPartition: OffsetAndMetadata}): what to commit\ncallback (callable, optional): called as callback(offsets, response)\nresponse will be either an Exception or a OffsetCommitResponse\nstruct. This callback can be used to trigger custom actions when\na commit request completes.\n\nReturns:\nkafka.future.Future", "source": "juraj-google-style"}
{"code": "def plot_main(pid, return_fig_ax=False):\n    \n\n    global WORKING_DIRECTORY, SNR_CUT\n\n    if isinstance(pid, PlotInput):\n        pid = pid.return_dict()\n\n    WORKING_DIRECTORY = '.'\n    if 'WORKING_DIRECTORY' not in pid['general'].keys():\n        pid['general']['WORKING_DIRECTORY'] = '.'\n\n    SNR_CUT = 5.0\n    if 'SNR_CUT' not in pid['general'].keys():\n        pid['general']['SNR_CUT'] = SNR_CUT\n\n    if \"switch_backend\" in pid['general'].keys():\n        plt.switch_backend(pid['general']['switch_backend'])\n\n    running_process = MakePlotProcess(\n        **{**pid, **pid['general'], **pid['plot_info'], **pid['figure']})\n\n    running_process.input_data()\n    running_process.setup_figure()\n    running_process.create_plots()\n\n    \n    if 'save_figure' in pid['figure'].keys():\n        if pid['figure']['save_figure'] is True:\n            running_process.fig.savefig(\n                pid['general']['WORKING_DIRECTORY'] + '/' + pid['figure']['output_path'],\n                **pid['figure']['savefig_kwargs'])\n\n    if 'show_figure' in pid['figure'].keys():\n        if pid['figure']['show_figure'] is True:\n            plt.show()\n\n    if return_fig_ax is True:\n        return running_process.fig, running_process.ax\n\n    return", "docstring": "Main function for creating these plots.\n\nReads in plot info dict from json file or dictionary in script.\n\nArgs:\nreturn_fig_ax (bool, optional): Return figure and axes objects.\n\nReturns:\n2-element tuple containing\n- **fig** (*obj*): Figure object for customization outside of those in this program.\n- **ax** (*obj*): Axes object for customization outside of those in this program.", "source": "juraj-google-style"}
{"code": "def __init__(self, key_wrapping_data=None):\n        \n        super(Key, self).__init__()\n\n        self.cryptographic_algorithm = None\n        self.cryptographic_length = None\n        self.key_format_type = None\n        self.key_wrapping_data = key_wrapping_data\n\n        \n        \n        self._cryptographic_parameters = list()\n\n        \n        \n        self._usage_limits = None", "docstring": "Create a Key object.\n\nArgs:\nkey_wrapping_data(dict): A dictionary containing key wrapping data\nsettings, describing how the key value has been wrapped.\nOptional, defaults to None.", "source": "juraj-google-style"}
{"code": "def floordiv(self, other, axis=\"columns\", level=None, fill_value=None):\n        \n        return self._binary_op(\n            \"floordiv\", other, axis=axis, level=level, fill_value=fill_value\n        )", "docstring": "Divides this DataFrame against another DataFrame/Series/scalar.\n\nArgs:\nother: The object to use to apply the divide against this.\naxis: The axis to divide over.\nlevel: The Multilevel index level to apply divide over.\nfill_value: The value to fill NaNs with.\n\nReturns:\nA new DataFrame with the Divide applied.", "source": "juraj-google-style"}
{"code": "def delete(self, version_name):\n    \n    name = ('%s/versions/%s' % (self._full_model_name, version_name))\n    response = self._api.projects().models().versions().delete(name=name).execute()\n    if 'name' not in response:\n      raise Exception('Invalid response from service. \"name\" is not found.')\n    _util.wait_for_long_running_operation(response['name'])", "docstring": "Delete a version of model.\n\nArgs:\nversion_name: the name of the version in short form, such as \"v1\".", "source": "juraj-google-style"}
{"code": "def access_token(self):\n    if ((self._access_token is None) or (self.expiration_time <= int(time.time()))):\n        resp = self.make_access_request()\n        self._access_token = resp.json()['access_token']\n    return self._access_token", "docstring": "Stores always valid OAuth2 access token.\n\nNote:\nAccessing this property may result in HTTP request.\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def _convert(x, factor1, factor2):\n    return ((x * factor2) / (((1 - x) * factor1) + (x * factor2)))", "docstring": "Converts mixing ratio x in comp1 - comp2 tie line to that in\nc1 - c2 tie line.\n\nArgs:\nx (float): Mixing ratio x in comp1 - comp2 tie line, a float\nbetween 0 and 1.\nfactor1 (float): Compositional ratio between composition c1 and\nprocessed composition comp1. E.g., factor for\nComposition('SiO2') and Composition('O') is 2.0.\nfactor2 (float): Compositional ratio between composition c2 and\nprocessed composition comp2.\n\nReturns:\nMixing ratio in c1 - c2 tie line, a float between 0 and 1.", "source": "codesearchnet"}
{"code": "def dict_to_schema(schema_dict, required, allow_custom_keys=True, modifier=None):\n    \n    if modifier:\n        modifier = Use(modifier)\n\n    def _to(value):\n        if isinstance(value, dict):\n            d = {}\n            for k, v in value.iteritems():\n                if isinstance(k, basestring):\n                    k = Required(k) if required else Optional(k)\n                d[k] = _to(v)\n            if allow_custom_keys:\n                d[Optional(basestring)] = modifier or object\n            schema = Schema(d)\n        elif modifier:\n            schema = And(value, modifier)\n        else:\n            schema = value\n        return schema\n\n    return _to(schema_dict)", "docstring": "Convert a dict of Schemas into a Schema.\n\nArgs:\nrequired (bool): Whether to make schema keys optional or required.\nallow_custom_keys (bool, optional): If True, creates a schema that\nallows custom items in dicts.\nmodifier (callable): Functor to apply to dict values - it is applied\nvia `Schema.Use`.\n\nReturns:\nA `Schema` object.", "source": "juraj-google-style"}
{"code": "def stop_loss(self, accountID, **kwargs):\n    return self.create(accountID, order=StopLossOrderRequest(**kwargs))", "docstring": "Shortcut to create a Stop Loss Order in an Account\n\nArgs:\naccountID : The ID of the Account\nkwargs : The arguments to create a StopLossOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "codesearchnet"}
{"code": "def _await_flow(self, client, flow_id):\n    print('{0:s}: Waiting to finish'.format(flow_id))\n    while True:\n        try:\n            status = client.Flow(flow_id).Get().data\n        except grr_errors.UnknownError:\n            msg = 'Unable to stat flow {0:s} for host {1:s}'.format(flow_id, client.data.os_info.fqdn.lower())\n            self.state.add_error(msg)\n            raise DFTimewolfError('Unable to stat flow {0:s} for host {1:s}'.format(flow_id, client.data.os_info.fqdn.lower()))\n        if (status.state == flows_pb2.FlowContext.ERROR):\n            message = status.context.backtrace\n            if ('ArtifactNotRegisteredError' in status.context.backtrace):\n                message = status.context.backtrace.split('\\n')[(- 2)]\n            raise DFTimewolfError('{0:s}: FAILED! Message from GRR:\\n{1:s}'.format(flow_id, message))\n        if (status.state == flows_pb2.FlowContext.TERMINATED):\n            print('{0:s}: Complete'.format(flow_id))\n            break\n        time.sleep(self._CHECK_FLOW_INTERVAL_SEC)", "docstring": "Awaits flow completion.\n\nArgs:\nclient: GRR Client object in which to await the flow.\nflow_id: string containing ID of flow to await.\n\nRaises:\nDFTimewolfError: if flow error encountered.", "source": "codesearchnet"}
{"code": "def blit(\n        self,\n        dest: tcod.console.Console,\n        fill_fore: bool = True,\n        fill_back: bool = True,\n    ) -> None:\n        \n        if not dest:\n            dest = tcod.console.Console._from_cdata(ffi.NULL)\n        if dest.width != self.width or dest.height != self.height:\n            raise ValueError(\n                \"ConsoleBuffer.blit: \"\n                \"Destination console has an incorrect size.\"\n            )\n\n        if fill_back:\n            bg = dest.bg.ravel()\n            bg[0::3] = self.back_r\n            bg[1::3] = self.back_g\n            bg[2::3] = self.back_b\n\n        if fill_fore:\n            fg = dest.fg.ravel()\n            fg[0::3] = self.fore_r\n            fg[1::3] = self.fore_g\n            fg[2::3] = self.fore_b\n            dest.ch.ravel()[:] = self.char", "docstring": "Use libtcod's \"fill\" functions to write the buffer to a console.\n\nArgs:\ndest (Console): Console object to modify.\nfill_fore (bool):\nIf True, fill the foreground color and characters.\nfill_back (bool):\nIf True, fill the background color.", "source": "juraj-google-style"}
{"code": "def normalize_placeholders(arg, inject_quotes=False):\n    number_placeholders = re.findall('{{\\\\s*\\\\d+\\\\s*}}', arg)\n    for number_placeholder in number_placeholders:\n        number = re.search('\\\\d+', number_placeholder).group()\n        arg = arg.replace(number_placeholder, (('{{_' + number) + '}}'))\n    return (arg.replace('{{', '\"{{').replace('}}', '}}\"') if inject_quotes else arg)", "docstring": "Normalize placeholders' names so that the template can be ingested into Jinja template engine.\n- Jinja does not accept numbers as placeholder names, so add a \"_\"\nbefore the numbers to make them valid placeholder names.\n- Surround placeholders expressions with \"\" so we can preserve spaces inside the positional arguments.\n\nArgs:\narg: The string to process.\ninject_qoutes: True if we want to surround placeholders with a pair of quotes.\n\nReturns:\nA processed string where placeholders are surrounded by \"\" and\nnumbered placeholders are prepended with \"_\".", "source": "codesearchnet"}
{"code": "def _get_course_content(course_id, course_url, sailthru_client, site_code, config):\n    cache_key = '{}:{}'.format(site_code, course_url)\n    response = cache.get(cache_key)\n    if (not response):\n        try:\n            sailthru_response = sailthru_client.api_get('content', {'id': course_url})\n            if (not sailthru_response.is_ok()):\n                response = {}\n            else:\n                response = sailthru_response.json\n                cache.set(cache_key, response, config.get('SAILTHRU_CACHE_TTL_SECONDS'))\n        except SailthruClientError:\n            response = {}\n        if (not response):\n            logger.error('Could not get course data from Sailthru on enroll/purchase event. Calling Ecommerce Course API to get course info for enrollment confirmation email')\n            response = _get_course_content_from_ecommerce(course_id, site_code=site_code)\n            if response:\n                cache.set(cache_key, response, config.get('SAILTHRU_CACHE_TTL_SECONDS'))\n    return response", "docstring": "Get course information using the Sailthru content api or from cache.\n\nIf there is an error, just return with an empty response.\n\nArguments:\ncourse_id (str): course key of the course\ncourse_url (str): LMS url for course info page.\nsailthru_client (object): SailthruClient\nsite_code (str): site code\nconfig (dict): config options\n\nReturns:\ncourse information from Sailthru", "source": "codesearchnet"}
{"code": "def find_or_build(cls, **kwargs):\n    keys = (kwargs.pop('keys') if ('keys' in kwargs) else [])\n    return (cls.first(**subdict(kwargs, keys)) or cls.build(**kwargs))", "docstring": "Checks if an instance already exists in db with these kwargs else\nreturns a new, saved instance of the service's model class.\n\nArgs:\n**kwargs: instance parameters", "source": "codesearchnet"}
{"code": "def fit(self, X=None, y=None, **kwargs):\n    context = {'X': X, 'y': y}\n    context.update(kwargs)\n    last_block_name = list(self.blocks.keys())[(- 1)]\n    for (block_name, block) in self.blocks.items():\n        LOGGER.debug('Fitting block %s', block_name)\n        try:\n            fit_args = self._get_block_args(block_name, block.fit_args, context)\n            block.fit(**fit_args)\n        except Exception:\n            LOGGER.exception('Exception caught fitting MLBlock %s', block_name)\n            raise\n        if (block_name != last_block_name):\n            LOGGER.debug('Producing block %s', block_name)\n            try:\n                produce_args = self._get_block_args(block_name, block.produce_args, context)\n                outputs = block.produce(**produce_args)\n                output_dict = self._get_outputs(block_name, outputs, block.produce_output)\n                context.update(output_dict)\n            except Exception:\n                LOGGER.exception('Exception caught producing MLBlock %s', block_name)\n                raise", "docstring": "Fit the blocks of this pipeline.\n\nSequentially call the `fit` and the `produce` methods of each block,\ncapturing the outputs each `produce` method before calling the `fit`\nmethod of the next one.\n\nDuring the whole process a context dictionary is built, where both the\npassed arguments and the captured outputs of the `produce` methods\nare stored, and from which the arguments for the next `fit` and\n`produce` calls will be taken.\n\nArgs:\nX: Fit Data, which the pipeline will learn from.\ny: Fit Data labels, which the pipeline will use to learn how to\nbehave.\n**kwargs: Any additional keyword arguments will be directly added\nto the context dictionary and available for the blocks.", "source": "codesearchnet"}
{"code": "def combine_first_two_dimensions(x):\n    ret = tf.reshape(x, tf.concat([[(- 1)], common_layers.shape_list(x)[2:]], 0))\n    old_shape = x.get_shape().dims\n    (a, b) = old_shape[:2]\n    new_shape = ([((a * b) if (a and b) else None)] + old_shape[2:])\n    ret.set_shape(new_shape)\n    return ret", "docstring": "Reshape x so that the first two dimension become one.\n\nArgs:\nx: a Tensor with shape [a, b, ...]\n\nReturns:\na Tensor with shape [ab, ...]", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context, file_data):\n    \n    super(FakeFile, self).__init__(resolver_context)\n    self._current_offset = 0\n    self._file_data = file_data\n    self._size = 0", "docstring": "Initializes a file-like object.\n\nArgs:\nresolver_context (Context): resolver context.\nfile_data (bytes): fake file data.", "source": "juraj-google-style"}
{"code": "def flush(self, hard=False):\n    if (not self.servers):\n        return\n    if hard:\n        self.client.flush_all()\n        self.reset_stats()\n    else:\n        from uuid import uuid4\n        tag = uuid4().hex\n        if self.debug:\n            tag = ('flushed' + tag)\n        self.current = tag", "docstring": "Drop existing entries from the cache.\n\nArgs:\nhard (bool): If True, all current entries are flushed from the\nserver(s), which affects all users. If False, only the local\nprocess is affected.", "source": "codesearchnet"}
{"code": "def sas_logical_interconnect_groups(self):\n    if (not self.__sas_logical_interconnect_groups):\n        self.__sas_logical_interconnect_groups = SasLogicalInterconnectGroups(self.__connection)\n    return self.__sas_logical_interconnect_groups", "docstring": "Gets the SasLogicalInterconnectGroups API client.\n\nReturns:\nSasLogicalInterconnectGroups:", "source": "codesearchnet"}
{"code": "def get_decomp_and_e_above_hull(self, entry, allow_negative=False):\n    if (entry in self.stable_entries):\n        return ({entry: 1}, 0)\n    comp = entry.composition\n    (facet, simplex) = self._get_facet_and_simplex(comp)\n    decomp_amts = simplex.bary_coords(self.pd_coords(comp))\n    decomp = {self.qhull_entries[f]: amt for (f, amt) in zip(facet, decomp_amts) if (abs(amt) > PhaseDiagram.numerical_tol)}\n    energies = [self.qhull_entries[i].energy_per_atom for i in facet]\n    ehull = (entry.energy_per_atom - np.dot(decomp_amts, energies))\n    if (allow_negative or (ehull >= (- PhaseDiagram.numerical_tol))):\n        return (decomp, ehull)\n    raise ValueError('No valid decomp found!')", "docstring": "Provides the decomposition and energy above convex hull for an entry.\nDue to caching, can be much faster if entries with the same composition\nare processed together.\n\nArgs:\nentry: A PDEntry like object\nallow_negative: Whether to allow negative e_above_hulls. Used to\ncalculate equilibrium reaction energies. Defaults to False.\n\nReturns:\n(decomp, energy above convex hull)  Stable entries should have\nenergy above hull of 0. The decomposition is provided as a dict of\n{Entry: amount}.", "source": "codesearchnet"}
{"code": "def attention_lm_prepare_decoder(targets, hparams):\n  \n  if hparams.prepend_mode == \"prepend_inputs_full_attention\":\n    decoder_self_attention_bias = (\n        common_attention.attention_bias_prepend_inputs_full_attention(\n            common_attention.embedding_to_padding(targets)))\n  else:\n    decoder_self_attention_bias = (\n        common_attention.attention_bias_lower_triangle(\n            common_layers.shape_list(targets)[1]))\n  decoder_input = common_layers.shift_right_3d(targets)\n  if hparams.pos == \"timing\":\n    decoder_input = common_attention.add_timing_signal_1d(decoder_input)\n  return (decoder_input, decoder_self_attention_bias)", "docstring": "Prepare one shard of the model for the decoder.\n\nArgs:\ntargets: a Tensor.\nhparams: run hyperparameters\n\nReturns:\ndecoder_input: a Tensor, bottom of decoder stack\ndecoder_self_attention_bias: a Tensor, containing large negative values\nto implement masked attention and possibly biases for diagonal alignments", "source": "juraj-google-style"}
{"code": "def is_native_xmon_op(op: ops.Operation) -> bool:\n    \n    return (isinstance(op, ops.GateOperation) and\n            is_native_xmon_gate(op.gate))", "docstring": "Check if the gate corresponding to an operation is a native xmon gate.\n\nArgs:\nop: Input operation.\n\nReturns:\nTrue if the operation is native to the xmon, false otherwise.", "source": "juraj-google-style"}
{"code": "def get_saved_issue_data(self, issue, namespace='open'):\n        \n\n        if isinstance(issue, int):\n            issue_number = str(issue)\n        elif isinstance(issue, basestring):\n            issue_number = issue\n        else:\n            issue_number = issue.number\n\n        issue_data_key = self._issue_data_key(namespace)\n        issue_data = self.data.get(issue_data_key,\n            {})\n\n        _data = issue_data.get(str(issue_number), {})\n        issue_data[str(issue_number)] = _data\n        return _data", "docstring": "Returns issue data from local data.\n\nArgs:\nissue:\n`int`. Github issue number.\nnamespace:\n`str`. Namespace for storing this issue.", "source": "juraj-google-style"}
{"code": "def handle_command(command):\n  \n  try:\n    cmds = command.split(None, 1)\n    cmd = cmds[0]\n    if cmd == 'new':\n      add_task(get_arg(cmds))\n    elif cmd == 'done':\n      mark_done(int(get_arg(cmds)))\n    elif cmd == 'list':\n      for task in format_tasks(list_tasks()):\n        print task\n    elif cmd == 'delete':\n      delete_task(int(get_arg(cmds)))\n    else:\n      print_usage()\n  except Exception, e:  \n    print e\n    print_usage()", "docstring": "Accepts a string command and performs an action.\n\nArgs:\ncommand: the command to run as a string.", "source": "juraj-google-style"}
{"code": "def list(self, **kwargs):\n        \n        resp = self.client.api.volumes(**kwargs)\n        if not resp.get('Volumes'):\n            return []\n        return [self.prepare_model(obj) for obj in resp['Volumes']]", "docstring": "List volumes. Similar to the ``docker volume ls`` command.\n\nArgs:\nfilters (dict): Server-side list filtering options.\n\nReturns:\n(list of :py:class:`Volume`): The volumes.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def dp020(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `dp020`'.format(value))\n    self._dp020 = value", "docstring": "Corresponds to IDD Field `dp020`\nDew-point temperature corresponding to 2.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `dp020`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def _execute(self, command, data=None, unpack=True):\n        \n        if not data:\n            data = {}\n        data.setdefault('element_id', self.element_id)\n        return self._driver._execute(command, data, unpack)", "docstring": "Private method to execute command with data.\n\nArgs:\ncommand(Command): The defined command.\ndata(dict): The uri variable and body.\n\nReturns:\nThe unwrapped value field in the json response.", "source": "juraj-google-style"}
{"code": "def __init__(self, tensor_callable, dtype, device):\n    super().__init__(tensor_callable, None, None, dtype, device)", "docstring": "Initializes a `Callable` object.\n\nArgs:\ntensor_callable: A callable that takes no arguments and returns a Tensor.\ndtype: Dtype of the tensor returned by the callable.\ndevice: Device of the tensor returned by the callable.", "source": "github-repos"}
{"code": "def get_dropout(x, rate=0.0, init=True):\n  \n  if init or rate == 0:\n    return x\n  return tf.layers.dropout(x, rate=rate, training=True)", "docstring": "Dropout x with dropout_rate = rate.\n\nApply zero dropout during init or prediction time.\n\nArgs:\nx: 4-D Tensor, shape=(NHWC).\nrate: Dropout rate.\ninit: Initialization.\nReturns:\nx: activations after dropout.", "source": "juraj-google-style"}
{"code": "def _reset_non_empty(self, indices):\n    \n    observ = tf.py_func(\n        self._batch_env.reset, [indices], self.observ_dtype, name=\"reset\")\n    observ.set_shape(indices.get_shape().concatenate(self.observ_shape))\n    with tf.control_dependencies([\n        tf.scatter_update(self._observ, indices, observ)]):\n      return tf.identity(observ)", "docstring": "Reset the batch of environments.\n\nArgs:\nindices: The batch indices of the environments to reset; defaults to all.\n\nReturns:\nBatch tensor of the new observations.", "source": "juraj-google-style"}
{"code": "def ConcatWith(x, tensor, dim):\n    \n    if type(tensor) != list:\n        tensor = [tensor]\n    return tf.concat([x] + tensor, dim)", "docstring": "A wrapper around ``tf.concat`` to cooperate with :class:`LinearWrap`.\n\nArgs:\nx (tf.Tensor): input\ntensor (list[tf.Tensor]): a tensor or list of tensors to concatenate with x.\nx will be at the beginning\ndim (int): the dimension along which to concatenate\n\nReturns:\ntf.Tensor: ``tf.concat([x] + tensor, dim)``", "source": "juraj-google-style"}
{"code": "def _copy_stream_position(position):\n    \n    if isinstance(position, types.StreamPosition):\n        output = types.StreamPosition()\n        output.CopyFrom(position)\n        return output\n\n    return types.StreamPosition(**position)", "docstring": "Copy a StreamPosition.\n\nArgs:\nposition (Union[ \\\ndict, \\\n~google.cloud.bigquery_storage_v1beta1.types.StreamPosition \\\n]):\nStreamPostion (or dictionary in StreamPosition format) to copy.\n\nReturns:\n~google.cloud.bigquery_storage_v1beta1.types.StreamPosition:\nA copy of the input StreamPostion.", "source": "juraj-google-style"}
{"code": "def Get(self, key):\n    \n    if key not in self._hash:\n      raise KeyError(key)\n\n    node = self._hash[key]\n\n    self._age.Unlink(node)\n    self._age.AppendNode(node)\n\n    return node.data", "docstring": "Fetch the object from cache.\n\nObjects may be flushed from cache at any time. Callers must always\nhandle the possibility of KeyError raised here.\n\nArgs:\nkey: The key used to access the object.\n\nReturns:\nCached object.\n\nRaises:\nKeyError: If the object is not present in the cache.", "source": "juraj-google-style"}
{"code": "def add_model_tags(self, tags: Union[List[str], str]) -> None:\n    if isinstance(tags, str):\n        tags = [tags]\n    if self.model_tags is None:\n        self.model_tags = []\n    for tag in tags:\n        if tag not in self.model_tags:\n            self.model_tags.append(tag)", "docstring": "Add custom tags into the model that gets pushed to the Hugging Face Hub. Will\nnot overwrite existing tags in the model.\n\nArgs:\ntags (`Union[List[str], str]`):\nThe desired tags to inject in the model\n\nExamples:\n\n```python\nfrom transformers import AutoModel\n\nmodel = AutoModel.from_pretrained(\"google-bert/bert-base-cased\")\n\nmodel.add_model_tags([\"custom\", \"custom-bert\"])\n\n# Push the model to your namespace with the name \"my-custom-bert\".\nmodel.push_to_hub(\"my-custom-bert\")\n```", "source": "github-repos"}
{"code": "def verify_binary(flag_name, process_args=None):\n    if (process_args is None):\n        process_args = []\n    path = getattr(FLAGS, flag_name)\n    if (not path):\n        logging.error(('Flag %r not set' % flag_name))\n        sys.exit(1)\n    with open(os.devnull, 'w') as dev_null:\n        try:\n            subprocess.check_call(([path] + process_args), stdout=dev_null, stderr=subprocess.STDOUT)\n        except:\n            logging.exception('--%s binary at path %r does not work', flag_name, path)\n            sys.exit(1)", "docstring": "Exits the program if the binary from the given flag doesn't run.\n\nArgs:\nflag_name: Name of the flag that should be the path to the binary.\nprocess_args: Args to pass to the binary to do nothing but verify\nthat it's working correctly (something like \"--version\") is good.\nOptional. Defaults to no args.\n\nRaises:\nSystemExit with error if the process did not work.", "source": "codesearchnet"}
{"code": "def consult_robots_txt(self, request: HTTPRequest) -> bool:\n        \n        if not self._robots_txt_checker:\n            return True\n\n        result = yield from self._robots_txt_checker.can_fetch(request)\n        return result", "docstring": "Consult by fetching robots.txt as needed.\n\nArgs:\nrequest: The request to be made\nto get the file.\n\nReturns:\nTrue if can fetch\n\nCoroutine", "source": "juraj-google-style"}
{"code": "def cache_penalty_model(penalty_model, database=None):\n    \n\n    \n    if not _is_index_labelled(penalty_model.graph):\n        mapping, __ = _graph_canonicalization(penalty_model.graph)\n        penalty_model = penalty_model.relabel_variables(mapping, inplace=False)\n\n    \n    \n    if database is None:\n        conn = cache_connect()\n    else:\n        conn = cache_connect(database)\n\n    \n    with conn as cur:\n        insert_penalty_model(cur, penalty_model)\n\n    \n    conn.close()", "docstring": "Caching function for penaltymodel_cache.\n\nArgs:\npenalty_model (:class:`penaltymodel.PenaltyModel`): Penalty model to\nbe cached.\ndatabase (str, optional): The path to the desired sqlite database\nfile. If None, will use the default.", "source": "juraj-google-style"}
{"code": "def original_args(self):\n    return self._original_args", "docstring": "A `SessionRunArgs` object holding the original arguments of `run()`.\n\nIf user called `MonitoredSession.run(fetches=a, feed_dict=b)`, then this\nfield is equal to SessionRunArgs(a, b).\n\nReturns:\nA `SessionRunArgs` object", "source": "github-repos"}
{"code": "def Webhook(self, request, global_params=None):\n    config = self.GetMethodConfig('Webhook')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "ReceiveWebhook is called when the API receives a GitHub webhook.\n\nArgs:\nrequest: (CloudbuildWebhookRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Empty) The response message.", "source": "github-repos"}
{"code": "def _command_template(self, switches, objectInput=None):\n        \n        command = [\"java\", \"-jar\", self.file_jar, \"-eUTF-8\"]\n        if self.memory_allocation:\n            command.append(\"-Xmx{}\".format(self.memory_allocation))\n        command.extend(switches)\n\n        if not objectInput:\n            objectInput = subprocess.PIPE\n\n        log.debug(\"Subprocess command: {}\".format(\", \".join(command)))\n\n        if six.PY2:\n            with open(os.devnull, \"w\") as devnull:\n                out = subprocess.Popen(\n                    command,\n                    stdin=objectInput,\n                    stdout=subprocess.PIPE,\n                    stderr=devnull)\n\n        elif six.PY3:\n            out = subprocess.Popen(\n                command,\n                stdin=objectInput,\n                stdout=subprocess.PIPE,\n                stderr=subprocess.DEVNULL)\n\n        stdoutdata, _ = out.communicate()\n        return stdoutdata.decode(\"utf-8\").strip()", "docstring": "Template for Tika app commands\n\nArgs:\nswitches (list): list of switches to Tika app Jar\nobjectInput (object): file object/standard input to analyze\n\nReturn:\nStandard output data (unicode Python 2, str Python 3)", "source": "juraj-google-style"}
{"code": "def get_categorical_features_to_sampling(examples, top_k):\n    observed_features = collections.defaultdict(list)\n    for example in examples:\n        for feature_name in get_categorical_feature_names(example):\n            original_feature = parse_original_feature_from_example(example, feature_name)\n            observed_features[feature_name].extend(original_feature.original_value)\n    result = {}\n    for (feature_name, feature_values) in sorted(iteritems(observed_features)):\n        samples = [word for (word, count) in collections.Counter(feature_values).most_common(top_k) if (count > 1)]\n        if samples:\n            result[feature_name] = {'samples': samples}\n    return result", "docstring": "Returns categorical features and a sampling of their most-common values.\n\nThe results of this slow function are used by the visualization repeatedly,\nso the results are cached.\n\nArgs:\nexamples: Examples to read to get feature samples.\ntop_k: Max number of samples to return per feature.\n\nReturns:\nA dict of feature_name -> {'samples': ['Married-civ-spouse',\n'Never-married', 'Divorced']}.\n\nThere is one key for each categorical feature.\n\nCurrently, the inner dict just has one key, but this structure leaves room\nfor further expansion, and mirrors the structure used by\n`get_numeric_features_to_observed_range`.", "source": "codesearchnet"}
{"code": "def _indent(lines, prefix='  '):\n    indented = []\n    for line in lines.split('\\n'):\n        indented.append((prefix + line))\n    return '\\n'.join(indented)", "docstring": "Indent some text.\n\nNote that this is present as ``textwrap.indent``, but not in Python 2.\n\nArgs:\nlines (str): The newline delimited string to be indented.\nprefix (Optional[str]): The prefix to indent each line with. Default\nto two spaces.\n\nReturns:\nstr: The newly indented content.", "source": "codesearchnet"}
{"code": "def on_created(self, event):\n    self._logger.debug('Detected create event on watched path: %s', event.src_path)\n    self._process_event(event)", "docstring": "Function called everytime a new file is created.\n\nArgs:\nevent: Event to process.", "source": "codesearchnet"}
{"code": "def generate_csr(self, csr_data, bay_number=None):\n        \n        uri = \"{}/https/certificaterequest\".format(self.data['uri'])\n\n        if bay_number:\n            uri += \"?bayNumber=%d\" % (bay_number)\n\n        headers = {'Content-Type': 'application/json'}\n\n        return self._helper.do_post(uri, csr_data, -1, headers)", "docstring": "Creates a Certificate Signing Request (CSR) for an enclosure.\n\nArgs:\ncsr_data: Dictionary with csr details.\nbay_number: OA from which the CSR should be generated.\n\nReturns:\nEnclosure.", "source": "juraj-google-style"}
{"code": "def valid_identifiers(self):\n    funcs = (list(utils.find_all(self.contexts[(- 1)])) + list(self.builtins))\n    return funcs", "docstring": "Get a list of all valid identifiers for the current context.\n\nReturns:\nlist(str): A list of all of the valid identifiers for this context", "source": "codesearchnet"}
{"code": "def set_display_name(self, display_name):\n    self.displayname = display_name\n    return self.api.set_display_name(self.user_id, display_name)", "docstring": "Set this users display name.\n\nArgs:\ndisplay_name (str): Display Name", "source": "codesearchnet"}
{"code": "def get_by_hostname(self, hostname):\n        \n        resources = self._client.get_all()\n\n        resources_filtered = [x for x in resources if x['hostname'] == hostname]\n\n        if resources_filtered:\n            return resources_filtered[0]\n        else:\n            return None", "docstring": "Retrieve a storage system by its hostname.\n\nWorks only in API500 onwards.\n\nArgs:\nhostname: Storage system hostname.\n\nReturns:\ndict", "source": "juraj-google-style"}
{"code": "def fillup_layer(names):  \n        \n        longest = max([len(name) for name in names])\n        inputs_wires = []\n        for name in names:\n            inputs_wires.append(InputWire(name.rjust(longest)))\n        return inputs_wires", "docstring": "Creates a layer with InputWire elements.\nArgs:\nnames (list): List of names for the wires.\n\nReturns:\nlist: The new layer", "source": "juraj-google-style"}
{"code": "def get_samples_live_last(self, sensor_id):\n    url = 'https:\n    headers = self.__gen_headers()\n    headers['Content-Type'] = 'application/json'\n    params = {'sensorId': sensor_id}\n    url = self.__append_url_params(url, params)\n    r = requests.get(url, headers=headers)\n    return r.json()", "docstring": "Get the last sample recorded by the sensor.\n\nArgs:\nsensor_id (string): hexadecimal id of the sensor to query, e.g.\n``0x0013A20040B65FAD``\n\nReturns:\nlist: dictionary objects containing sample data", "source": "codesearchnet"}
{"code": "def list_sku_versions(access_token, subscription_id, location, publisher, offer, sku):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/', 'locations/', location, '/publishers/', publisher, '/artifacttypes/vmimage/offers/', offer, '/skus/', sku, '/versions?api-version=', COMP_API])\n    return do_get(endpoint, access_token)", "docstring": "List available versions for a given publisher's sku.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nlocation (str): Azure data center location. E.g. westus.\npublisher (str): VM image publisher. E.g. MicrosoftWindowsServer.\noffer (str): VM image offer. E.g. WindowsServer.\nsku (str): VM image sku. E.g. 2016-Datacenter.\n\nReturns:\nHTTP response with JSON list of versions.", "source": "codesearchnet"}
{"code": "def reset_subscription_since(self, account_id, datetime_str):\n        \n        data = {\n            'account_id': account_id,\n            'datetime': datetime_str,\n        }\n        return self._perform_post_request(self.reset_subscription_since_endpoint, data, self.token_header)", "docstring": "Handler for `--reset-subscription-since` command.\n\nArgs:\naccount_id(int): id of the account to reset.\ndatetime_str(str): string representing the datetime used in the\nnext poll to retrieve data since.\n\nReturns:\n(str) json encoded response.\n\nNOTES:\nWe don't care about validation here, we demand the responsibility to\nthe backend.", "source": "juraj-google-style"}
{"code": "def disconnect(self):\n    result = False\n    logger.debug('SK8.disconnect({})'.format(self.conn_handle))\n    if (self.conn_handle >= 0):\n        logger.debug('Calling dongle disconnect')\n        result = self.dongle._disconnect(self.conn_handle)\n        self.conn_handle = (- 1)\n        self.packets = 0\n    return result", "docstring": "Disconnect the dongle from this SK8.\n\nSimply closes the active BLE connection to the device represented by the current instance.\n\nReturns:\nbool. True if connection was closed, False if not (e.g. if already closed).", "source": "codesearchnet"}
{"code": "def filter_devices(ads, func):\n    results = []\n    for ad in ads:\n        if func(ad):\n            results.append(ad)\n    return results", "docstring": "Finds the AndroidDevice instances from a list that match certain\nconditions.\n\nArgs:\nads: A list of AndroidDevice instances.\nfunc: A function that takes an AndroidDevice object and returns True\nif the device satisfies the filter condition.\n\nReturns:\nA list of AndroidDevice instances that satisfy the filter condition.", "source": "github-repos"}
{"code": "def _QueryHash(self, digest):\n    if (not self._url):\n        self._url = '{0:s}:\n    request_data = {self.lookup_hash: digest}\n    try:\n        json_response = self.MakeRequestAndDecodeJSON(self._url, 'POST', data=request_data)\n    except errors.ConnectionError as exception:\n        json_response = None\n        logger.error('Unable to query Viper with error: {0!s}.'.format(exception))\n    return json_response", "docstring": "Queries the Viper Server for a specfic hash.\n\nArgs:\ndigest (str): hash to look up.\n\nReturns:\ndict[str, object]: JSON response or None on error.", "source": "codesearchnet"}
{"code": "def is_unused(input, model_file=None, model_proto=None, name=None):\n  \n\n  return _gen_sentencepiece_processor_op.sentencepiece_get_piece_type(\n      input, model_file=model_file, model_proto=model_proto, name=name,\n      piece_type=2)", "docstring": "Returns true if input id is unused piece.\n\nArgs:\ninput: An arbitrary tensor of int32.\nmodel_file: The sentencepiece model file path.\nmodel_proto: The sentencepiece model serialized proto.\nEither `model_file` or `model_proto` must be set.\nname: The name argument that is passed to the op function.\nReturns:\nA tensor of bool with the same shape as input.", "source": "juraj-google-style"}
{"code": "def _validate_isvalid_uncertainty(self, isvalid_uncertainty, field, value):\n    self._validate_isvalid_quantity(True, field, value)\n    if ((len(value) > 1) and (value[1]['uncertainty-type'] != 'relative')):\n        if (value[1].get('uncertainty') is not None):\n            self._validate_isvalid_quantity(True, field, [value[1]['uncertainty']])\n        if (value[1].get('upper-uncertainty') is not None):\n            self._validate_isvalid_quantity(True, field, [value[1]['upper-uncertainty']])\n        if (value[1].get('lower-uncertainty') is not None):\n            self._validate_isvalid_quantity(True, field, [value[1]['lower-uncertainty']])", "docstring": "Checks for valid given value and appropriate units with uncertainty.\n\nArgs:\nisvalid_uncertainty (`bool`): flag from schema indicating uncertainty to be checked\nfield (`str`): property associated with the quantity in question.\nvalue (`list`): list with the string of the value of the quantity and a dictionary of\nthe uncertainty\n\nThe rule's arguments are validated against this schema:\n{'isvalid_uncertainty': {'type': 'bool'}, 'field': {'type': 'str'},\n'value': {'type': 'list'}}", "source": "codesearchnet"}
{"code": "def map(self, internalize: Callable[([TExternalQubit], TInternalQubit)], externalize: Callable[([TInternalQubit], TExternalQubit)]) -> 'QubitOrder':\n\n    def func(qubits):\n        unwrapped_qubits = [internalize(q) for q in qubits]\n        unwrapped_result = self.order_for(unwrapped_qubits)\n        return tuple((externalize(q) for q in unwrapped_result))\n    return QubitOrder(func)", "docstring": "Transforms the Basis so that it applies to wrapped qubits.\n\nArgs:\nexternalize: Converts an internal qubit understood by the underlying\nbasis into an external qubit understood by the caller.\ninternalize: Converts an external qubit understood by the caller\ninto an internal qubit understood by the underlying basis.\n\nReturns:\nA basis that transforms qubits understood by the caller into qubits\nunderstood by an underlying basis, uses that to order the qubits,\nthen wraps the ordered qubits back up for the caller.", "source": "codesearchnet"}
{"code": "def call(self, input_features: TFModelInputType | None=None, decoder_input_ids: np.ndarray | tf.Tensor | None=None, decoder_attention_mask: np.ndarray | tf.Tensor | None=None, decoder_position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, decoder_head_mask: np.ndarray | tf.Tensor | None=None, cross_attn_head_mask: np.ndarray | tf.Tensor | None=None, encoder_outputs: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, decoder_inputs_embeds: Optional[Tuple[Union[np.ndarray, tf.Tensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[Tuple[tf.Tensor], TFSeq2SeqModelOutput]:\n    outputs = self.model(input_features=input_features, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> import tensorflow as tf\n>>> from transformers import TFWhisperModel, AutoFeatureExtractor\n>>> from datasets import load_dataset\n\n>>> model = TFWhisperModel.from_pretrained(\"openai/whisper-base\")\n>>> feature_extractor = AutoFeatureExtractor.from_pretrained(\"openai/whisper-base\")\n>>> ds = load_dataset(\"hf-internal-testing/librispeech_asr_dummy\", \"clean\", split=\"validation\")\n>>> inputs = feature_extractor(ds[0][\"audio\"][\"array\"], return_tensors=\"tf\")\n>>> input_features = inputs.input_features\n>>> decoder_input_ids = tf.convert_to_tensor([[1, 1]]) * model.config.decoder_start_token_id\n>>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state\n>>> list(last_hidden_state.shape)\n[1, 2, 512]\n```", "source": "github-repos"}
{"code": "def write_block(self, block_, body):\n    \n    self.write('for ; πF.State() >= 0; πF.PopCheckpoint() {')\n    with self.indent_block():\n      self.write('switch πF.State() {')\n      self.write('case 0:')\n      for checkpoint in block_.checkpoints:\n        self.write_tmpl('case $state: goto Label$state', state=checkpoint)\n      self.write('default: panic(\"unexpected function state\")')\n      self.write('}')\n      \n      with self.indent_block(-1):\n        self.write(body)\n    self.write('}')", "docstring": "Outputs the boilerplate necessary for code blocks like functions.\n\nArgs:\nblock_: The Block object representing the code block.\nbody: String containing Go code making up the body of the code block.", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    \n\n    \n    filename = parser_mediator.GetFilename()\n    if not filename.startswith('$I'):\n      raise errors.UnableToParseFile('Filename must start with $I.')\n\n    file_header_map = self._GetDataTypeMap('recycle_bin_metadata_file_header')\n\n    try:\n      file_header, _ = self._ReadStructureFromFileObject(\n          file_object, 0, file_header_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.UnableToParseFile((\n          'Unable to parse Windows Recycle.Bin metadata file header with '\n          'error: {0!s}').format(exception))\n\n    if file_header.format_version not in self._SUPPORTED_FORMAT_VERSIONS:\n      raise errors.UnableToParseFile(\n          'Unsupported format version: {0:d}.'.format(\n              file_header.format_version))\n\n    if file_header.deletion_time == 0:\n      date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n    else:\n      date_time = dfdatetime_filetime.Filetime(\n          timestamp=file_header.deletion_time)\n\n    event_data = WinRecycleBinEventData()\n    try:\n      event_data.original_filename = self._ParseOriginalFilename(\n          file_object, file_header.format_version)\n    except (ValueError, errors.ParseError) as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to parse original filename with error: {0!s}.'.format(\n              exception))\n\n    event_data.file_size = file_header.original_file_size\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_DELETED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a Windows Recycle.Bin metadata ($I) file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def flush(self, force: bool=False) -> bool | Any:\n    return self._messages.flush(force=force)", "docstring": "Flushes the underlying log message queue.\n\nArgs:\n* force: If True, force queue to flush\n\nReturns:\n* True, if flushed with no errors\n* False, if not flushed\n* Error value from logger, if flushed with errors", "source": "github-repos"}
{"code": "def get_symbol(self, symbol):\n    self._ensure_symbols_loaded()\n    if (type(symbol) is int):\n        return self._symbols_by_index[symbol]\n    else:\n        return self._symbols_by_name[symbol]", "docstring": "Get a specific symbol by index or name.\n\nArgs:\nsymbol(int or str): The index or name of the symbol to return.\n\nReturns:\nELF.Symbol: The symbol.\n\nRaises:\nKeyError: The requested symbol does not exist.", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    try:\n      file_header = self._ReadFileHeader(file_object)\n    except (ValueError, errors.ParseError):\n      raise errors.UnableToParseFile('Unable to parse file header.')\n\n    tables = self._ReadTablesArray(file_object, file_header.tables_array_offset)\n\n    table = tables.get(self._RECORD_TYPE_APPLICATION_PASSWORD, None)\n    if table:\n      for record in table.records:\n        self._ParseApplicationPasswordRecord(parser_mediator, record)\n\n    table = tables.get(self._RECORD_TYPE_INTERNET_PASSWORD, None)\n    if table:\n      for record in table.records:\n        self._ParseInternetPasswordRecord(parser_mediator, record)", "docstring": "Parses a MacOS keychain file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def get_request_fields(self):\n    if hasattr(self, '_request_fields'):\n        return self._request_fields\n    include_fields = self.get_request_feature(self.INCLUDE)\n    exclude_fields = self.get_request_feature(self.EXCLUDE)\n    request_fields = {}\n    for (fields, include) in ((include_fields, True), (exclude_fields, False)):\n        if (fields is None):\n            continue\n        for field in fields:\n            field_segments = field.split('.')\n            num_segments = len(field_segments)\n            current_fields = request_fields\n            for (i, segment) in enumerate(field_segments):\n                last = (i == (num_segments - 1))\n                if segment:\n                    if last:\n                        current_fields[segment] = include\n                    else:\n                        if (segment not in current_fields):\n                            current_fields[segment] = {}\n                        current_fields = current_fields[segment]\n                elif (not last):\n                    raise exceptions.ParseError(('\"%s\" is not a valid field.' % field))\n    self._request_fields = request_fields\n    return request_fields", "docstring": "Parses the INCLUDE and EXCLUDE features.\n\nExtracts the dynamic field features from the request parameters\ninto a field map that can be passed to a serializer.\n\nReturns:\nA nested dict mapping serializer keys to\nTrue (include) or False (exclude).", "source": "codesearchnet"}
{"code": "def get_resource_from_handle(self, resource_handle):\n        \n        repo_type = resource_handle.get(\"repository_type\")\n        location = resource_handle.get(\"location\")\n        if not (repo_type and location):\n            raise ValueError(\"PackageRepositoryManager requires \"\n                             \"resource_handle objects to have a \"\n                             \"repository_type and location defined\")\n        path = \"%s@%s\" % (repo_type, location)\n        repo = self.get_repository(path)\n        resource = repo.get_resource_from_handle(resource_handle)\n        return resource", "docstring": "Get a resource.\n\nArgs:\nresource_handle (`ResourceHandle`): Handle of the resource.\n\nReturns:\n`PackageRepositoryResource` instance.", "source": "juraj-google-style"}
{"code": "def branch_lengths(self, terminal=True, internal=True):\n        \n        if not isinstance(terminal, bool):\n            raise TypeError(\"terminal must be a bool\")\n        if not isinstance(internal, bool):\n            raise TypeError(\"internal must be a bool\")\n        for node in self.traverse_preorder():\n            if (internal and not node.is_leaf()) or (terminal and node.is_leaf()):\n                if node.edge_length is None:\n                    yield 0\n                else:\n                    yield node.edge_length", "docstring": "Generator over the lengths of the selected branches of this ``Tree``. Edges with length ``None`` will be output as 0-length\n\nArgs:\n``terminal`` (``bool``): ``True`` to include terminal branches, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal branches, otherwise ``False``", "source": "juraj-google-style"}
{"code": "def from_string(cls, string):\n    lines = string.split('\\n')\n    timestep = int(lines[1])\n    natoms = int(lines[3])\n    box_arr = np.loadtxt(StringIO('\\n'.join(lines[5:8])))\n    bounds = box_arr[(:, :2)]\n    tilt = None\n    if ('xy xz yz' in lines[4]):\n        tilt = box_arr[(:, 2)]\n        x = (0, tilt[0], tilt[1], (tilt[0] + tilt[1]))\n        y = (0, tilt[2])\n        bounds -= np.array([[min(x), max(x)], [min(y), max(y)], [0, 0]])\n    box = LammpsBox(bounds, tilt)\n    data_head = lines[8].replace('ITEM: ATOMS', '').split()\n    data = pd.read_csv(StringIO('\\n'.join(lines[9:])), names=data_head, delim_whitespace=True)\n    return cls(timestep, natoms, box, data)", "docstring": "Constructor from string parsing.\n\nArgs:\nstring (str): Input string.", "source": "codesearchnet"}
{"code": "def get_default_query_from_module(module):\n  \n  if isinstance(module, types.ModuleType):\n    return module.__dict__.get(_SQL_MODULE_LAST, None)\n  return None", "docstring": "Given a %%sql module return the default (last) query for the module.\n\nArgs:\nmodule: the %%sql module.\n\nReturns:\nThe default query associated with this module.", "source": "juraj-google-style"}
{"code": "def PrintExtractionStatusHeader(self, processing_status):\n    self._output_writer.Write('Source path\\t\\t: {0:s}\\n'.format(self._source_path))\n    self._output_writer.Write('Source type\\t\\t: {0:s}\\n'.format(self._source_type))\n    if self._artifact_filters:\n        artifacts_string = ', '.join(self._artifact_filters)\n        self._output_writer.Write('Artifact filters\\t: {0:s}\\n'.format(artifacts_string))\n    if self._filter_file:\n        self._output_writer.Write('Filter file\\t\\t: {0:s}\\n'.format(self._filter_file))\n    self._PrintProcessingTime(processing_status)\n    self._PrintTasksStatus(processing_status)\n    self._output_writer.Write('\\n')", "docstring": "Prints the extraction status header.\n\nArgs:\nprocessing_status (ProcessingStatus): processing status.", "source": "codesearchnet"}
{"code": "def UpdateUser(self, user, ssh_keys):\n    if (not bool(USER_REGEX.match(user))):\n        self.logger.warning('Invalid user account name %s.', user)\n        return False\n    if (not self._GetUser(user)):\n        if (not (self._AddUser(user) and self._UpdateUserGroups(user, self.groups))):\n            return False\n    if (not self._UpdateSudoer(user, sudoer=True)):\n        return False\n    pw_entry = self._GetUser(user)\n    if (pw_entry and (os.path.basename(pw_entry.pw_shell) == 'nologin')):\n        message = 'Not updating user %s. User set `nologin` as login shell.'\n        self.logger.debug(message, user)\n        return True\n    try:\n        self._UpdateAuthorizedKeys(user, ssh_keys)\n    except (IOError, OSError) as e:\n        message = 'Could not update the authorized keys file for user %s. %s.'\n        self.logger.warning(message, user, str(e))\n        return False\n    else:\n        return True", "docstring": "Update a Linux user with authorized SSH keys.\n\nArgs:\nuser: string, the name of the Linux user account.\nssh_keys: list, the SSH key strings associated with the user.\n\nReturns:\nbool, True if the user account updated successfully.", "source": "codesearchnet"}
{"code": "def apply(self, func, workers=1, job_size=10000):\n    \n    if workers == 1:\n      for lines in self.iter_chunks(job_size):\n        yield func(lines)\n    else:\n      with ProcessPoolExecutor(max_workers=workers) as executor:\n        for result in executor.map(func, self.iter_chunks(job_size)):\n          yield result", "docstring": "Apply `func` to lines of text in parallel or sequential.\n\nArgs:\nfunc : a function that takes a list of lines.", "source": "juraj-google-style"}
{"code": "def CleanUpTest(cls, func):\n    \n    def new_method(self, *args, **kwargs):\n      mox_obj = getattr(self, 'mox', None)\n      cleanup_mox = False\n      if mox_obj and isinstance(mox_obj, Mox):\n        cleanup_mox = True\n      try:\n        func(self, *args, **kwargs)\n      finally:\n        if cleanup_mox:\n          mox_obj.UnsetStubs()\n      if cleanup_mox:\n        mox_obj.VerifyAll()\n    new_method.__name__ = func.__name__\n    new_method.__doc__ = func.__doc__\n    new_method.__module__ = func.__module__\n    return new_method", "docstring": "Adds Mox cleanup code to any MoxTestBase method.\n\nAlways unsets stubs after a test. Will verify all mocks for tests that\notherwise pass.\n\nArgs:\ncls: MoxTestBase or subclass; the class whose test method we are altering.\nfunc: method; the method of the MoxTestBase test class we wish to alter.\n\nReturns:\nThe modified method.", "source": "juraj-google-style"}
{"code": "def goto_step(self, inst: InstanceNode) -> InstanceNode:\n        \n        return inst.look_up(**self.parse_keys(inst.schema_node))", "docstring": "Return member instance of `inst` addressed by the receiver.\n\nArgs:\ninst: Current instance.", "source": "juraj-google-style"}
{"code": "def get_appliance(self, appliance_id):\n    \n    url = \"https:\n\n    headers = self.__gen_headers()\n    headers[\"Content-Type\"] = \"application/json\"\n\n    r = requests.get(url, headers=headers)\n    return r.json()", "docstring": "Get the information for a specified appliance\n\nArgs:\nappliance_id (string): identifiying string of appliance\n\nReturns:\nlist: dictionary object containing information about the specified appliance", "source": "juraj-google-style"}
{"code": "def Write2000256List(self, arr):\n        \n        for item in arr:\n            ba = bytearray(binascii.unhexlify(item))\n            ba.reverse()\n            self.WriteBytes(ba)", "docstring": "Write an array of 64 byte items to the stream.\n\nArgs:\narr (list): a list of 2000 items of 64 bytes in size.", "source": "juraj-google-style"}
{"code": "def __init__(self, sdat):\n        \n        self.sdat = sdat\n        self._last = UNDETERMINED\n        self._data = {None: _step.EmptyStep()}", "docstring": "Initialization of instances:\n\nArgs:\nsdat (:class:`StagyyData`): the StagyyData instance owning the\n:class:`_Steps` instance.\nAttributes:\nsdat (:class:`StagyyData`): the StagyyData instance owning the\n:class:`_Steps` instance.", "source": "juraj-google-style"}
{"code": "def do_block(args):\n    \n    rest_client = RestClient(args.url, args.user)\n\n    if args.subcommand == 'list':\n        block_generator = rest_client.list_blocks()\n        blocks = []\n        left = args.count\n        for block in block_generator:\n            blocks.append(block)\n            left -= 1\n            if left <= 0:\n                break\n\n        keys = ('num', 'block_id', 'batches', 'txns', 'signer')\n        headers = tuple(k.upper() if k != 'batches' else 'BATS' for k in keys)\n\n        def parse_block_row(block):\n            batches = block.get('batches', [])\n            txns = [t for b in batches for t in b['transactions']]\n            return (\n                block['header'].get('block_num', 0),\n                block['header_signature'],\n                len(batches),\n                len(txns),\n                block['header']['signer_public_key'])\n\n        if args.format == 'default':\n            fmt.print_terminal_table(headers, blocks, parse_block_row)\n\n        elif args.format == 'csv':\n            fmt.print_csv(headers, blocks, parse_block_row)\n\n        elif args.format == 'json' or args.format == 'yaml':\n            data = [{k: d for k, d in zip(keys, parse_block_row(b))}\n                    for b in blocks]\n\n            if args.format == 'yaml':\n                fmt.print_yaml(data)\n            elif args.format == 'json':\n                fmt.print_json(data)\n            else:\n                raise AssertionError('Missing handler: {}'.format(args.format))\n\n        else:\n            raise AssertionError('Missing handler: {}'.format(args.format))\n\n    if args.subcommand == 'show':\n        output = rest_client.get_block(args.block_id)\n\n        if args.key:\n            if args.key in output:\n                output = output[args.key]\n            elif args.key in output['header']:\n                output = output['header'][args.key]\n            else:\n                raise CliException(\n                    'key \"{}\" not found in block or header'.format(args.key))\n\n        if args.format == 'yaml':\n            fmt.print_yaml(output)\n        elif args.format == 'json':\n            fmt.print_json(output)\n        else:\n            raise AssertionError('Missing handler: {}'.format(args.format))", "docstring": "Runs the block list or block show command, printing output to the\nconsole\n\nArgs:\nargs: The parsed arguments sent to the command at runtime", "source": "juraj-google-style"}
{"code": "def scan(initial_state, scan_func):\n\n    def _apply_fn(dataset):\n        return dataset.scan(initial_state=initial_state, scan_func=scan_func)\n    return _apply_fn", "docstring": "A transformation that scans a function across an input dataset.\n\nThis transformation is a stateful relative of `tf.data.Dataset.map`.\nIn addition to mapping `scan_func` across the elements of the input dataset,\n`scan()` accumulates one or more state tensors, whose initial values are\n`initial_state`.\n\nArgs:\ninitial_state: A nested structure of tensors, representing the initial state\nof the accumulator.\nscan_func: A function that maps `(old_state, input_element)` to\n`(new_state, output_element)`. It must take two arguments and return a\npair of nested structures of tensors. The `new_state` must match the\nstructure of `initial_state`.\n\nReturns:\nA `Dataset` transformation function, which can be passed to\n`tf.data.Dataset.apply`.", "source": "github-repos"}
{"code": "def save(hdf5_filename, array):\n    hdf5_filename = os.path.expanduser(hdf5_filename)\n    try:\n        h = h5py.File(hdf5_filename, 'w')\n        h.create_dataset('CUTOUT', data=array)\n        h.close()\n    except Exception as e:\n        raise ValueError('Could not save HDF5 file {0}.'.format(hdf5_filename))\n    return hdf5_filename", "docstring": "Export a numpy array to a HDF5 file.\n\nArguments:\nhdf5_filename (str): A filename to which to save the HDF5 data\narray (numpy.ndarray): The numpy array to save to HDF5\n\nReturns:\nString. The expanded filename that now holds the HDF5 data", "source": "codesearchnet"}
{"code": "def recv_result_from_workers(self):\n    info = MPI.Status()\n    result = self.comm.recv(source=MPI.ANY_SOURCE, tag=RESULT_TAG, status=info)\n    logger.debug('Received result from workers: {}'.format(result))\n    return result", "docstring": "Receives a results from the MPI worker pool and send it out via 0mq\n\nReturns:\n--------\nresult: task result from the workers", "source": "codesearchnet"}
{"code": "def _get_dequantized_hist_mids_after_quantize(self, quant_min: float, quant_max: float) -> np.ndarray:\n    maxbound = 2 ** self._num_bits - 1\n    minbound = 0\n    scale = (quant_max - quant_min) / maxbound\n    zero_point = -quant_min / scale\n    if abs(zero_point) > 9000000000.0:\n        zero_point = 9000000000.0\n    if abs(scale) < 1e-09:\n        scale = 1e-09\n    zero_point = round(zero_point)\n    quantized_hist_mids = np.clip(np.round(self._hist_mids / scale) + zero_point, minbound, maxbound)\n    dequantized_hist_mids = scale * (quantized_hist_mids - zero_point)\n    return dequantized_hist_mids", "docstring": "Quantizes and dequantizes hist_mids using quant_min and quant_max.\n\nQuantization converts the range of numbers from [quant_min, quant_max] to\n[0, 2^num_bits - 1]. Values less than quant_min are converted to 0, and\nvalues greater than quant_max are converted to 2^num_bits - 1.\n\nThe histogram represents the distribution of the data, and our goal is to\nfind the quant_min and quant_max that best describe this distribution. To do\nthis, we quantize hist_mids using quant_min and quant_max and dequantize\nthem again. Then the difference between hist_mids and dequantized hist_mids\nequates to quantization error when using quant_min and quant_max.\n\n\nArgs:\nquant_min: The minimum real value that can be represented by a quantized\nvalue.\nquant_max: The maximum real value that can be represented by a quantized\nvalue.\n\nReturns:\ndequantized hist_mids after quantizing by quant_min and quant_max", "source": "github-repos"}
{"code": "def _protobuf_value_to_string(value):\n  \n  value_in_json = json_format.MessageToJson(value)\n  if value.HasField(\"string_value\"):\n    \n    return value_in_json[1:-1]\n  return value_in_json", "docstring": "Returns a string representation of given google.protobuf.Value message.\n\nArgs:\nvalue: google.protobuf.Value message. Assumed to be of type 'number',\n'string' or 'bool'.", "source": "juraj-google-style"}
{"code": "def fetch(self, webfonts):\n    sorted_keys = sorted(webfonts.keys())\n    for webfont_name in sorted_keys:\n        self.get(webfont_name, webfonts[webfont_name])", "docstring": "Store every defined webfonts.\n\nWebfont are stored with sort on their name.\n\nArgs:\nwebfonts (dict): Dictionnary of webfont settings from\n``settings.ICOMOON_WEBFONTS``.", "source": "codesearchnet"}
{"code": "def py_to_weld_type(self, obj):\n        \n        if isinstance(obj, np.ndarray):\n            dtype = str(obj.dtype)\n            if dtype == 'int16':\n                base = WeldInt16()\n            elif dtype == 'int32':\n                base = WeldInt()\n            elif dtype == 'int64':\n                base = WeldLong()\n            elif dtype == 'float32':\n                base = WeldFloat()\n            elif dtype == 'float64':\n                base = WeldDouble()\n            elif dtype == 'bool':\n                base = WeldBit()\n            else:\n                base = WeldVec(WeldChar())  \n            for i in xrange(obj.ndim):\n                base = WeldVec(base)\n        elif isinstance(obj, str):\n            base = WeldVec(WeldChar())\n        else:\n            raise Exception(\"Invalid object type: unable to infer NVL type\")\n        return base", "docstring": "Summary\n\nArgs:\nobj (TYPE): Description\n\nReturns:\nTYPE: Description\n\nRaises:\nException: Description", "source": "juraj-google-style"}
{"code": "def _filter_and_bucket_subtokens(subtoken_counts, min_count):\n  \n  \n  subtoken_buckets = []\n  for subtoken, count in six.iteritems(subtoken_counts):\n    if count < min_count:  \n      continue\n    while len(subtoken_buckets) <= len(subtoken):\n      subtoken_buckets.append(set())\n    subtoken_buckets[len(subtoken)].add(subtoken)\n  return subtoken_buckets", "docstring": "Return a bucketed list of subtokens that are filtered by count.\n\nArgs:\nsubtoken_counts: defaultdict mapping subtokens to their counts\nmin_count: int count used to filter subtokens\n\nReturns:\nList of subtoken sets, where subtokens in set i have the same length=i.", "source": "juraj-google-style"}
{"code": "def _bisect(self, begin, end, listener):\n    step = ((end.date - begin.date) / 2)\n    while (abs(step) >= self._eps_bisect):\n        date = (begin.date + step)\n        if (self.SPEAKER_MODE == 'global'):\n            orb = self.propagate(date)\n        else:\n            orb = begin.propagate(date)\n        if ((listener(begin) * listener(orb)) > 0):\n            begin = orb\n        else:\n            end = orb\n        step = ((end.date - begin.date) / 2)\n    else:\n        end.event = listener.info(end)\n        return end", "docstring": "This method search for the zero-crossing of the watched parameter\n\nArgs:\nbegin (Orbit):\nend (Orbit)\nlistener (Listener)\nReturn\nReturn", "source": "codesearchnet"}
{"code": "def _ParseStorageMediaOptions(self, options):\n    self._ParseStorageMediaImageOptions(options)\n    self._ParseVSSProcessingOptions(options)\n    self._ParseCredentialOptions(options)\n    self._ParseSourcePathOption(options)", "docstring": "Parses the storage media options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "codesearchnet"}
{"code": "def __init__(self, dataset_vid=None, vid=None, score=None):\n        \n        assert vid is not None, 'vid can not be None.'\n        assert dataset_vid is not None, 'dataset_vid can not be None.'\n        assert score is not None, 'score can not be None.'\n        self.dataset_vid = dataset_vid\n        self.vid = vid\n        self.score = score", "docstring": "Initalizes partition search result fields.\n\nArgs:\ndataset_vid (str): vid of the partition's dataset.\nvid (str): partition vid.\nscore (int): score of the search result.", "source": "juraj-google-style"}
{"code": "def from_pure(cls, z):\n        \n        return cls(cls._key, {z: 1.0}, {z: 1.0}, pyxray.element_symbol(z))", "docstring": "Creates a pure composition.\n\nArgs:\nz (int): atomic number", "source": "juraj-google-style"}
{"code": "def _client_receive(self):\n    try:\n        response = self._client.readline()\n        if self.verbose_logging:\n            self.log.debug('Snippet received: %s', response)\n        elif _MAX_RPC_RESP_LOGGING_LENGTH >= len(response):\n            self.log.debug('Snippet received: %s', response)\n        else:\n            self.log.debug('Snippet received: %s... %d chars are truncated', response[:_MAX_RPC_RESP_LOGGING_LENGTH], len(response) - _MAX_RPC_RESP_LOGGING_LENGTH)\n        return response\n    except socket.error as e:\n        raise Error(self._ad, 'Encountered socket error reading RPC response \"%s\"' % e)", "docstring": "Receives the server's response of an Rpc message.\n\nReturns:\nRaw byte string of the response.\n\nRaises:\nError: a socket error occurred during the read.", "source": "github-repos"}
{"code": "def _sort_records_map(records):\n    ctx = context.get()\n    l = len(records)\n    key_records = ([None] * l)\n    logging.debug('Parsing')\n    for i in range(l):\n        proto = kv_pb.KeyValue()\n        proto.ParseFromString(records[i])\n        key_records[i] = (proto.key(), records[i])\n    logging.debug('Sorting')\n    key_records.sort(cmp=_compare_keys)\n    logging.debug('Writing')\n    mapper_spec = ctx.mapreduce_spec.mapper\n    params = input_readers._get_params(mapper_spec)\n    bucket_name = params.get('bucket_name')\n    filename = ((((((ctx.mapreduce_spec.name + '/') + ctx.mapreduce_id) + '/output-') + ctx.shard_id) + '-') + str(int(time.time())))\n    full_filename = ('/%s/%s' % (bucket_name, filename))\n    filehandle = cloudstorage.open(full_filename, mode='w')\n    with output_writers.GCSRecordsPool(filehandle, ctx=ctx) as pool:\n        for key_record in key_records:\n            pool.append(key_record[1])\n    logging.debug('Finalizing')\n    filehandle.close()\n    entity = _OutputFile(key_name=full_filename, parent=_OutputFile.get_root_key(ctx.mapreduce_id))\n    entity.put()", "docstring": "Map function sorting records.\n\nConverts records to KeyValue protos, sorts them by key and writes them\ninto new GCS file. Creates _OutputFile entity to record resulting\nfile name.\n\nArgs:\nrecords: list of records which are serialized KeyValue protos.", "source": "codesearchnet"}
{"code": "def handle_erroneous_response(self, response: requests.Response) -> NoReturn:\n    logger.debug('handling erroneous response: %s', response)\n    try:\n        err = BugZooException.from_dict(response.json())\n    except Exception:\n        err = UnexpectedResponse(response)\n    raise err", "docstring": "Attempts to decode an erroneous response into an exception, and to\nsubsequently throw that exception.\n\nRaises:\nBugZooException: the exception described by the error response.\nUnexpectedResponse: if the response cannot be decoded to an\nexception.", "source": "codesearchnet"}
{"code": "def _Open(self, path_spec, mode='rb'):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(path_spec)\n\n    bde_volume = pybde.volume()\n    file_object = resolver.Resolver.OpenFileObject(\n        path_spec.parent, resolver_context=self._resolver_context)\n\n    try:\n      bde.BDEVolumeOpen(\n          bde_volume, path_spec, file_object, resolver.Resolver.key_chain)\n    except:\n      file_object.close()\n      raise\n\n    self._bde_volume = bde_volume\n    self._file_object = file_object", "docstring": "Opens the file system defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nmode (Optional[str]): file access mode. The default is 'rb'\nread-only binary.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file system could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def split(self, path):\n    path = path.strip()\n    if not path.startswith(BlobStorageFileSystem.AZURE_FILE_SYSTEM_PREFIX):\n        raise ValueError('Path %r must be Azure Blob Storage path.' % path)\n    prefix_len = len(BlobStorageFileSystem.AZURE_FILE_SYSTEM_PREFIX)\n    last_sep = path[prefix_len:].rfind('/')\n    if last_sep >= 0:\n        last_sep += prefix_len\n    if last_sep > 0:\n        return (path[:last_sep], path[last_sep + 1:])\n    elif last_sep < 0:\n        return (path, '')\n    else:\n        raise ValueError('Invalid path: %s' % path)", "docstring": "Splits the given path into two parts.\n\nSplits the path into a pair (head, tail) such that tail contains the last\ncomponent of the path and head contains everything up to that.\nFor file-systems other than the local file-system, head should include the\nprefix.\n\nArgs:\npath: path as a string\n\nReturns:\na pair of path components as strings.", "source": "github-repos"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n    residual = hidden_states\n    hidden_states = self.input_layernorm(hidden_states)\n    hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.post_attention_layernorm(hidden_states)\n    hidden_states = self.mlp(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (self_attn_weights,)\n    if use_cache:\n        outputs += (present_key_value,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`, *optional*): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.\nuse_cache (`bool`, *optional*):\nIf set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n(see `past_key_values`).\npast_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states", "source": "github-repos"}
{"code": "def select_with_condition(self, condition, key=None):\n        \n        condition = Condition.as_condition(condition)\n        new_confs = []\n\n        for conf in self:\n            \n            obj = conf if key is None else AttrDict(conf[key])\n            add_it = condition(obj=obj)\n            \n            if add_it: new_confs.append(conf)\n\n        self._confs = new_confs", "docstring": "Remove all the configurations that do not satisfy the given condition.\n\nArgs:\ncondition: dict or :class:`Condition` object with operators expressed with a Mongodb-like syntax\nkey: Selects the sub-dictionary on which condition is applied, e.g. key=\"vars\"\nif we have to filter the configurations depending on the values in vars", "source": "juraj-google-style"}
{"code": "def _apply_func_to_list_of_partitions(self, func, partitions, **kwargs):\n        \n        preprocessed_func = self.preprocess_func(func)\n        return [obj.apply(preprocessed_func, **kwargs) for obj in partitions]", "docstring": "Applies a function to a list of remote partitions.\n\nNote: The main use for this is to preprocess the func.\n\nArgs:\nfunc: The func to apply\npartitions: The list of partitions\n\nReturns:\nA list of BaseFramePartition objects.", "source": "juraj-google-style"}
{"code": "def make_nested_list_of_images(images: Union[list[ImageInput], ImageInput]) -> ImageInput:\n    if isinstance(images, (list, tuple)) and all((isinstance(images_i, (list, tuple)) for images_i in images)) and all((is_valid_list_of_images(images_i) for images_i in images)):\n        return images\n    if isinstance(images, (list, tuple)) and is_valid_list_of_images(images):\n        if is_pil_image(images[0]) or images[0].ndim == 3:\n            return [images]\n        if images[0].ndim == 4:\n            return [list(image) for image in images]\n    if is_valid_image(images):\n        if is_pil_image(images) or images.ndim == 3:\n            return [[images]]\n        if images.ndim == 4:\n            return [list(images)]\n    raise ValueError('Invalid input type. Must be a single image, a list of images, or a list of batches of images.')", "docstring": "Ensure that the output is a nested list of images.\nArgs:\nimages (`Union[List[ImageInput], ImageInput]`):\nThe input image.\nReturns:\nlist: A list of list of images or a list of 4d array of images.", "source": "github-repos"}
{"code": "def json_to_pybel(data, infer_bonds=False):\n    \n    obmol = ob.OBMol()\n    obmol.BeginModify()\n    for atom in data['atoms']:\n        obatom = obmol.NewAtom()\n        obatom.SetAtomicNum(table.GetAtomicNum(str(atom['element'])))\n        obatom.SetVector(*atom['location'])\n        if 'label' in atom:\n            pd = ob.OBPairData()\n            pd.SetAttribute('_atom_site_label')\n            pd.SetValue(atom['label'])\n            obatom.CloneData(pd)\n\n    \n    if 'bonds' not in data or not data['bonds']:\n        if infer_bonds:\n            obmol.ConnectTheDots()\n            obmol.PerceiveBondOrders()\n    \n    else:\n        for bond in data['bonds']:\n            if 'atoms' not in bond:\n                continue\n            obmol.AddBond(bond['atoms'][0] + 1, bond['atoms'][1] + 1,\n                          bond['order'])\n\n    \n    if 'unitcell' in data:\n        uc = ob.OBUnitCell()\n        uc.SetData(*(ob.vector3(*v) for v in data['unitcell']))\n        uc.SetSpaceGroup('P1')\n        obmol.CloneData(uc)\n    obmol.EndModify()\n\n    mol = pybel.Molecule(obmol)\n\n    \n    if 'charge' in data['atoms'][0]:\n        mol.OBMol.SetPartialChargesPerceived()\n        for atom, pyatom in zip(data['atoms'], mol.atoms):\n            pyatom.OBAtom.SetPartialCharge(atom['charge'])\n\n    return mol", "docstring": "Converts python data structure to pybel.Molecule.\n\nThis will infer bond data if not specified.\n\nArgs:\ndata: The loaded json data of a molecule, as a Python object\ninfer_bonds (Optional): If no bonds specified in input, infer them\nReturns:\nAn instance of `pybel.Molecule`", "source": "juraj-google-style"}
{"code": "def cast_if_floating_dtype_and_mismatch(targets, outputs):\n    if tensor_util.is_tf_type(targets):\n        return cast_single_tensor(targets, dtype=outputs[0].dtype)\n    new_targets = []\n    for target, out in zip(targets, outputs):\n        if isinstance(target, np.ndarray):\n            target = tensor_conversion.convert_to_tensor_v2_with_dispatch(target)\n        if target.dtype != out.dtype:\n            new_targets.append(cast_single_tensor(target, dtype=out.dtype))\n        else:\n            new_targets.append(target)\n    return new_targets", "docstring": "Returns target data tensors using correct datatype.\n\nChecks that each target and output pair are the same datatype. If not, casts\nthe target to the output's datatype.\n\nArgs:\ntargets: tensor or list of targets.\noutputs: tensor or list of outputs.\n\nReturns:\nTargets in appropriate datatype.", "source": "github-repos"}
{"code": "def transform(self, X):\n    sklearn.base.check_is_fitted(self)\n    X = _validate_data(self, X, reset=False)\n    return self.model_.predict(X)", "docstring": "Transform the data.\n\nArgs:\nX: array-like, shape=(n_samples, n_features)\nThe input samples.\n\nReturns:\nX_transformed: array-like, shape=(n_samples, n_features)\nThe transformed data.", "source": "github-repos"}
{"code": "def key_for_entity_group(cls, key):\n    \n    return model.Key(cls.KIND_NAME, cls.ID, parent=key.root())", "docstring": "Return the key for the entity group containing key.\n\nArgs:\nkey: a key for an entity group whose __entity_group__ key you want.\n\nReturns:\nThe __entity_group__ key for the entity group containing key.", "source": "juraj-google-style"}
{"code": "def apply_on_inputs(self, named_inputs: Dict[str, EventSetNode]) -> Dict[str, EventSetNode]:\n    g = deepcopy(self)\n    assert g.named_inputs is not None\n    assert g.named_outputs is not None\n    for name, new_node in named_inputs.items():\n        if name not in g.named_inputs:\n            raise ValueError(f\"Input node {name} is not in the graph's inputs. Inputs: {g.named_inputs}\")\n        old_node = g.named_inputs[name]\n        for operator in g.operators:\n            for name, inp in operator.inputs.items():\n                if inp is old_node:\n                    operator.inputs[name] = new_node\n    return g.named_outputs", "docstring": "Applies the operators in this graph to new inputs.\n\nNote that the objects in the modified graph are very inconsistent, but\nthat's okay since we won't use it anymore. When running it or save it\nthe graph will be re-inferred.\n\nArgs:\nnamed_inputs: The new inputs to the graph.\n\nReturns:\nThe graph's named outputs.", "source": "github-repos"}
{"code": "def _convert_to_seeder_format(dataset):\n    \n    data = {}\n    seed = {}\n\n    _add_if_set(data, \"name\", dataset.get(\"title\"))\n    _add_if_set(data, \"issn\", dataset.get(\"issn\"))\n    _add_if_set(data, \"annotation\", dataset.get(\"annotation\"))\n\n    rules = dataset.get(\"rules\", {})\n    if rules:\n        _add_if_set(data, \"frequency\", rules.get(\"frequency\"))\n\n        \n        _add_if_set(seed, \"budget\", rules.get(\"budget\"))\n        _add_if_set(seed, \"calendars\", rules.get(\"calendars\"))\n        _add_if_set(seed, \"global_reject\", rules.get(\"global_reject\"))\n        _add_if_set(seed, \"gentle_fetch\", rules.get(\"gentle_fetch\"))\n        _add_if_set(seed, \"javascript\", rules.get(\"javascript\"))\n        _add_if_set(seed, \"local_traps\", rules.get(\"local_traps\"))\n        _add_if_set(seed, \"youtube\", rules.get(\"youtube\"))\n\n        _add_if_set(seed, \"url\", dataset.get(\"url\"))\n\n    if seed:\n        data[\"seed\"] = seed\n\n    return data", "docstring": "WA KAT dataset has different structure from Seeder. This is convertor\nwhich converts WA-KAT -> Seeder data format.\n\nArgs:\ndataset (dict): WA-KAT dataset sent from frontend.\n\nReturns:\ndict: Dict with converted data.", "source": "juraj-google-style"}
{"code": "def delete(self, invoice_id, **kwargs):\n    url = '{}/{}'.format(self.base_url, invoice_id)\n    return self.delete_url(url, {}, **kwargs)", "docstring": "Delete an invoice\nYou can delete an invoice which is in the draft state.\n\nArgs:\ninvoice_id : Id for delete the invoice\nReturns:\nThe response is always be an empty array like this - []", "source": "codesearchnet"}
{"code": "def authenticate_identify(self, api_token, override=True):\n        \n        if (self.context.has_auth_params('Gem-Identify') and not override):\n            raise OverrideError('Gem-Identify')\n\n        if (not api_token or\n            not self.context.authorize('Gem-Identify', api_token=api_token)):\n            raise AuthUsageError(self.context, 'Gem-Identify')\n\n        return True", "docstring": "Set credentials for Identify authentication.\n\nArgs:\napi_token (str): Token issued to your Application through the Gem\nDeveloper Console.\noverride (boolean): Replace existing Application credentials.", "source": "juraj-google-style"}
{"code": "def format_checksum(checksum_pyxb):\n    return '{}/{}'.format(checksum_pyxb.algorithm.upper().replace('-', ''), checksum_pyxb.value().lower())", "docstring": "Create string representation of a PyXB Checksum object.\n\nArgs:\nPyXB Checksum object\n\nReturns:\nstr : Combined hexadecimal value and algorithm name.", "source": "codesearchnet"}
{"code": "def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n    size = get_size_dict(size, default_to_square=True)\n    if 'height' not in size or 'width' not in size:\n        raise ValueError('size dictionary must contain height and width keys')\n    return resize(image, (size['height'], size['width']), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Resize an image to a certain size.\n\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nsize (`Dict[str, int]`):\nThe size to resize the image to. Must contain height and width keys.\nresample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\nThe resampling filter to use when resizing the input.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format for the output image. If unset, the channel dimension format of the input\nimage is used.\ninput_data_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def download_items(cache_fn, start=None):\n    with SqliteDict(cache_fn) as db:\n        last_id = (db.get('last_id', 0) if (not start) else start)\n        _download_items(db, last_id)\n        db.commit()", "docstring": "Open the `cache_fn` as database and download all not-yet downloaded items.\n\nArgs:\ncache_fn (str): Path to the sqlite database. If not exists, it will be\ncreated.\nstart (int, default None): If set, start from this sysno.", "source": "codesearchnet"}
{"code": "def _DropCommonSuffixes(filename):\n  \n  for suffix in itertools.chain(\n      ('%s.%s' % (test_suffix.lstrip('_'), ext)\n       for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())),\n      ('%s.%s' % (suffix, ext)\n       for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))):\n    if (filename.endswith(suffix) and len(filename) > len(suffix) and\n        filename[-len(suffix) - 1] in ('-', '_')):\n      return filename[:-len(suffix) - 1]\n  return os.path.splitext(filename)[0]", "docstring": "Drops common suffixes like _test.cc or -inl.h from filename.\n\nFor example:\n>>> _DropCommonSuffixes('foo/foo-inl.h')\n'foo/foo'\n>>> _DropCommonSuffixes('foo/bar/foo.cc')\n'foo/bar/foo'\n>>> _DropCommonSuffixes('foo/foo_internal.h')\n'foo/foo'\n>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')\n'foo/foo_unusualinternal'\n\nArgs:\nfilename: The input filename.\n\nReturns:\nThe filename with the common suffix removed.", "source": "juraj-google-style"}
{"code": "def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    encoder_hidden_states = encoder_outputs[0]\n    if encoder_attention_mask is None:\n        batch_size, sequence_length = encoder_hidden_states.shape[:2]\n        encoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    batch_size, sequence_length = decoder_input_ids.shape\n    if decoder_attention_mask is None:\n        decoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n    inputs = {'params': params or self.params}\n    if past_key_values:\n        inputs['cache'] = past_key_values\n        mutable = ['cache']\n    else:\n        mutable = False\n\n    def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs):\n        decoder_module = module._get_decoder_module()\n        decoder_outputs = decoder_module(decoder_input_ids, decoder_attention_mask, **kwargs)\n        sequence_output = decoder_outputs[0]\n        if self.config.tie_word_embeddings:\n            sequence_output = sequence_output * self.config.d_model ** (-0.5)\n        if self.config.tie_word_embeddings:\n            shared_embedding = module.shared.variables['params']['embedding']\n            lm_logits = module.lm_head.apply({'params': {'kernel': shared_embedding.T}}, sequence_output)\n        else:\n            lm_logits = module.lm_head(sequence_output)\n        return (lm_logits, decoder_outputs)\n    outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)\n    if past_key_values is None:\n        lm_logits, decoder_outputs = outputs\n    else:\n        (lm_logits, decoder_outputs), past = outputs\n    if return_dict:\n        outputs = FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions)\n    else:\n        outputs = (lm_logits,) + decoder_outputs[1:]\n    if past_key_values is not None and return_dict:\n        outputs['past_key_values'] = unfreeze(past['cache'])\n        return outputs\n    elif past_key_values is not None and (not return_dict):\n        outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, FlaxT5ForConditionalGeneration\n>>> import jax.numpy as jnp\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google-t5/t5-small\")\n>>> model = FlaxT5ForConditionalGeneration.from_pretrained(\"google-t5/t5-small\")\n\n>>> text = \"summarize: My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, return_tensors=\"np\")\n>>> encoder_outputs = model.encode(**inputs)\n\n>>> decoder_start_token_id = model.config.decoder_start_token_id\n>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype=\"i4\") * decoder_start_token_id\n\n>>> outputs = model.decode(decoder_input_ids, encoder_outputs)\n>>> logits = outputs.logits\n```", "source": "github-repos"}
{"code": "def parse(self) -> Statement:\n        \n        self.opt_separator()\n        start = self.offset\n        res = self.statement()\n        if res.keyword not in [\"module\", \"submodule\"]:\n            self.offset = start\n            raise UnexpectedInput(self, \"'module' or 'submodule'\")\n        if self.name is not None and res.argument != self.name:\n            raise ModuleNameMismatch(res.argument, self.name)\n        if self.rev:\n            revst = res.find1(\"revision\")\n            if revst is None or revst.argument != self.rev:\n                raise ModuleRevisionMismatch(revst.argument, self.rev)\n        try:\n            self.opt_separator()\n        except EndOfInput:\n            return res\n        raise UnexpectedInput(self, \"end of input\")", "docstring": "Parse a complete YANG module or submodule.\n\nArgs:\nmtext: YANG module text.\n\nRaises:\nEndOfInput: If past the end of input.\nModuleNameMismatch: If parsed module name doesn't match `self.name`.\nModuleRevisionMismatch: If parsed revision date doesn't match `self.rev`.\nUnexpectedInput: If top-level statement isn't ``(sub)module``.", "source": "juraj-google-style"}
{"code": "def _write(self, file_prefix, session=None, options=None):\n    start_time = time.time()\n    output = self._saver.save(file_prefix=file_prefix, session=session, options=options)\n    end_time = time.time()\n    metrics.AddCheckpointWriteDuration(api_label=_CHECKPOINT_V1, microseconds=_get_duration_microseconds(start_time, end_time))\n    global _END_TIME_OF_LAST_WRITE\n    with _END_TIME_OF_LAST_WRITE_LOCK:\n        metrics.AddTrainingTimeSaved(api_label=_CHECKPOINT_V1, microseconds=_get_duration_microseconds(_END_TIME_OF_LAST_WRITE, end_time))\n        if checkpoint_context.in_preemption_save_context():\n            _preemption_checkpoint_saved_time_usecs.get_cell().increase_by(_get_duration_microseconds(_END_TIME_OF_LAST_WRITE, end_time))\n        _END_TIME_OF_LAST_WRITE = end_time\n    if tensor_util.is_tf_type(output):\n        if context.executing_eagerly():\n            output = compat.as_str(output.numpy())\n    else:\n        output = compat.as_str(output)\n    if options is not None and options.experimental_write_callbacks is not None:\n        _execute_callbacks(options.experimental_write_callbacks, output)\n    metrics.RecordCheckpointSize(api_label=_CHECKPOINT_V1, filesize=_get_checkpoint_size(output))\n    return output", "docstring": "Writes a training checkpoint.\n\nThe checkpoint includes variables created by this object and any\ntrackable objects it depends on at the time `Checkpoint.write()` is\ncalled.\n\n`write` does not number checkpoints, increment `save_counter`, or update the\nmetadata used by `tf.train.latest_checkpoint`. It is primarily intended for\nuse by higher level checkpoint management utilities. `save` provides a very\nbasic implementation of these features.\n\nArgs:\nfile_prefix: A prefix to use for the checkpoint filenames\n(/path/to/directory/and_a_prefix).\nsession: The session to evaluate variables in. Ignored when executing\neagerly. If not provided when graph building, the default session is\nused.\noptions: Optional `tf.train.CheckpointOptions` object.\n\nReturns:\nThe full path to the checkpoint (i.e. `file_prefix`).", "source": "github-repos"}
{"code": "def parse_uniprot_txt_file(infile):\n    uniprot_metadata_dict = {}\n    metadata = old_parse_uniprot_txt_file(infile)\n    metadata_keys = list(metadata.keys())\n    if metadata_keys:\n        metadata_key = metadata_keys[0]\n    else:\n        return uniprot_metadata_dict\n    uniprot_metadata_dict['seq_len'] = len(str(metadata[metadata_key]['sequence']))\n    uniprot_metadata_dict['reviewed'] = metadata[metadata_key]['is_reviewed']\n    uniprot_metadata_dict['seq_version'] = metadata[metadata_key]['sequence_version']\n    uniprot_metadata_dict['entry_version'] = metadata[metadata_key]['entry_version']\n    if ('gene' in metadata[metadata_key]):\n        uniprot_metadata_dict['gene_name'] = metadata[metadata_key]['gene']\n    if ('description' in metadata[metadata_key]):\n        uniprot_metadata_dict['description'] = metadata[metadata_key]['description']\n    if ('refseq' in metadata[metadata_key]):\n        uniprot_metadata_dict['refseq'] = metadata[metadata_key]['refseq']\n    if ('kegg' in metadata[metadata_key]):\n        uniprot_metadata_dict['kegg'] = metadata[metadata_key]['kegg']\n    if ('ec' in metadata[metadata_key]):\n        uniprot_metadata_dict['ec_number'] = metadata[metadata_key]['ec']\n    if ('pfam' in metadata[metadata_key]):\n        uniprot_metadata_dict['pfam'] = metadata[metadata_key]['pfam']\n    if ('pdbs' in metadata[metadata_key]):\n        uniprot_metadata_dict['pdbs'] = list(set(metadata[metadata_key]['pdbs']))\n    return uniprot_metadata_dict", "docstring": "Parse a raw UniProt metadata file and return a dictionary.\n\nArgs:\ninfile: Path to metadata file\n\nReturns:\ndict: Metadata dictionary", "source": "codesearchnet"}
{"code": "def create(self, secret_type, value=None):\n    if (secret_type is ObjectType.CERTIFICATE):\n        return self._create_certificate(value)\n    elif (secret_type is ObjectType.SYMMETRIC_KEY):\n        return self._create_symmetric_key(value)\n    elif (secret_type is ObjectType.PUBLIC_KEY):\n        return self._create_public_key(value)\n    elif (secret_type is ObjectType.PRIVATE_KEY):\n        return self._create_private_key(value)\n    elif (secret_type is ObjectType.SPLIT_KEY):\n        return self._create_split_key(value)\n    elif (secret_type is ObjectType.TEMPLATE):\n        return self._create_template(value)\n    elif (secret_type is ObjectType.SECRET_DATA):\n        return self._create_secret_data(value)\n    elif (secret_type is ObjectType.OPAQUE_DATA):\n        return self._create_opaque_data(value)\n    else:\n        raise TypeError('Unrecognized secret type: {0}'.format(secret_type))", "docstring": "Create a secret object of the specified type with the given value.\n\nArgs:\nsecret_type (ObjectType): An ObjectType enumeration specifying the\ntype of secret to create.\nvalue (dict): A dictionary containing secret data. Optional,\ndefaults to None.\n\nReturns:\nsecret: The newly constructed secret object.\n\nRaises:\nTypeError: If the provided secret type is unrecognized.\n\nExample:\n>>> factory.create(ObjectType.SYMMETRIC_KEY)\nSymmetricKey(...)", "source": "codesearchnet"}
{"code": "def write_hashes(self, arr):\n        \n        length = len(arr)\n        self.write_var_int(length)\n        for item in arr:\n            ba = bytearray(binascii.unhexlify(item))\n            ba.reverse()\n            self.write_bytes(ba)", "docstring": "Write an array of hashes to the stream.\n\nArgs:\narr (list): a list of 32 byte hashes.", "source": "juraj-google-style"}
{"code": "def ssim_value(self, target):\n        \n        \n        if not isinstance(target, SSIMImage) \\\n          or not np.array_equal(self.gaussian_kernel_1d,\n                                target.gaussian_kernel_1d):\n            target = SSIMImage(target, self.gaussian_kernel_1d, self.img.size)\n\n        img_mat_12 = self.img.img_gray * target.img_gray\n        img_mat_sigma_12 = convolve_gaussian_2d(\n            img_mat_12, self.gaussian_kernel_1d)\n        img_mat_mu_12 = self.img.img_gray_mu * target.img_gray_mu\n        img_mat_sigma_12 = img_mat_sigma_12 - img_mat_mu_12\n\n        \n        num_ssim = ((2 * img_mat_mu_12 + self.c_1) *\n                    (2 * img_mat_sigma_12 + self.c_2))\n\n        \n        den_ssim = (\n            (self.img.img_gray_mu_squared + target.img_gray_mu_squared +\n             self.c_1) *\n            (self.img.img_gray_sigma_squared +\n             target.img_gray_sigma_squared + self.c_2))\n\n        ssim_map = num_ssim / den_ssim\n        index = np.average(ssim_map)\n        return index", "docstring": "Compute the SSIM value from the reference image to the target image.\n\nArgs:\ntarget (str or PIL.Image): Input image to compare the reference image\nto. This may be a PIL Image object or, to save time, an SSIMImage\nobject (e.g. the img member of another SSIM object).\n\nReturns:\nComputed SSIM float value.", "source": "juraj-google-style"}
{"code": "def replace_flat_tensors_for_gradients(xs, flat_grads):\n    xs_structure = [_get_tensors_for_gradient(x) for x in xs]\n    grads = nest.pack_sequence_as(xs_structure, flat_grads)\n    return [_replace_tensors_for_gradient(x, grad) for x, grad in zip(xs, grads)]", "docstring": "Replaces Tensors that should be differentiated in `xs` with `flat_grads`.\n\nArgs:\nxs: A list of `Tensor`s or `CompositeTensor`s.\nflat_grads: A list of `Tensor`.\n\nReturns:\nA list of `Tensor` or `CompositeTensor`.", "source": "github-repos"}
{"code": "def groupby(iterable, key=0, filter=None):\n    \n    if isinstance(key, (basestring, int)):\n        key = itemgetter(key)\n    elif isinstance(key, (tuple, list)):\n        key = itemgetter(*key)\n    for label, grp in igroupby(iterable, key):\n        yield label, list(grp)", "docstring": "wrapper to itertools.groupby that returns a list of each group, rather\nthan a generator and accepts integers or strings as the key and\nautomatically converts them to callables with itemgetter(key)\n\nArguments:\niterable: iterable\nkey: string, int or callable that tells how to group\n\nReturns:\nan iterable where each item is the key and a *list* of that\ngroup. (itertools.groupby returns a generator of that group).\n\ne.g. groupby(iterable, 0)", "source": "juraj-google-style"}
{"code": "def vq_loss(x, targets, codebook_size, beta=0.25, decay=0.999, epsilon=1e-05, soft_em=False, num_samples=10, temperature=None, do_update=True):\n    x_shape = common_layers.shape_list(x)\n    target_shape = common_layers.shape_list(targets)\n    hidden_size = x_shape[(- 1)]\n    (means, _, _) = get_vq_codebook(codebook_size, hidden_size)\n    x = tf.reshape(x, [(- 1), hidden_size])\n    targets = tf.reshape(targets, [(- 1)])\n    one_hot_targets = tf.one_hot(targets, codebook_size)\n    target_means = tf.matmul(one_hot_targets, means)\n    (discrete_x, code_loss, distances) = vq_body(x, codebook_size, beta=beta, decay=decay, epsilon=epsilon, soft_em=soft_em, num_samples=num_samples, temperature=temperature, do_update=do_update)\n    logits = (- distances)\n    targets_loss = tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=targets)\n    targets_loss = tf.reduce_mean(targets_loss)\n    x_means = tf.matmul(discrete_x, means)\n    x_means = (x + tf.stop_gradient((x_means - x)))\n    discrete_x = tf.reshape(discrete_x, (x_shape[:(- 1)] + [codebook_size]))\n    target_means = tf.reshape(target_means, (target_shape + [hidden_size]))\n    return (discrete_x, x_means, target_means, code_loss, targets_loss)", "docstring": "Compute the loss of large vocab tensors using a VQAE codebook.\n\nArgs:\nx: Tensor of inputs to be quantized to nearest code\ntargets: Tensor of target indices to target codes\ncodebook_size: Size of quantization codebook\nbeta: scalar float for moving averages\ndecay: scalar float for moving averages\nepsilon: scalar float for moving averages\nsoft_em: boolean, whether to apply a soft sampling procedure\nnum_samples: if soft_em, number of samples to take\ntemperature: temperature if we want to sample nearest neighbors or None\ndo_update: whether to update the means; True by default, can be a Tensor\n\nReturns:\ndiscrete_x: one-hot Tensor indicating which codebook element is closest to x\nx_means: Tensor, on the forward pass: closest codebook element to x, on the\nbackwards pass: soft convex-combination of codebook elements by proximity\nto x\ntarget_means: the codebook elements corresponding to the targets\ncode_loss: loss driving x closer to its nearest codebook element\ntargets_loss: cross-entropy loss driving x closer to code corresponding to\ntarget", "source": "codesearchnet"}
{"code": "def from_url(url, format=None):\n    string = urllib2.urlopen(url).read()\n    if (PY3 is True):\n        string = string.decode('utf-8')\n    if format:\n        format = format.lower().replace(' ', '_')\n        func = parse.__getattr__(('from_%s' % format))\n    else:\n        func = parse.from_unknown_text\n    crs = func(string)\n    return crs", "docstring": "Returns the crs object from a string interpreted as a specified format, located at a given url site.\n\nArguments:\n\n- *url*: The url where the crs string is to be read from.\n- *format* (optional): Which format to parse the crs string as. One of \"ogc wkt\", \"esri wkt\", or \"proj4\".\nIf None, tries to autodetect the format for you (default).\n\nReturns:\n\n- CRS object.", "source": "codesearchnet"}
{"code": "def _read_content_or_path(content_or_path):\n    if ('\\n' in content_or_path.strip()):\n        return content_or_path\n    if (not os.path.exists(content_or_path)):\n        raise IOError((\"File '%s' doesn't exists!\" % content_or_path))\n    with open(content_or_path) as f:\n        return f.read()", "docstring": "If `content_or_path` contains ``\\\\n``, return it. Else assume, that it is\npath and read file at that path.\n\nArgs:\ncontent_or_path (str): Content or path to the file.\n\nReturns:\nstr: Content.\n\nRaises:\nIOError: whhen the file is not found.", "source": "codesearchnet"}
{"code": "def get_params(brightness, contrast, saturation, hue):\n    transforms = []\n    if (brightness is not None):\n        brightness_factor = random.uniform(brightness[0], brightness[1])\n        transforms.append(Lambda((lambda img: F.adjust_brightness(img, brightness_factor))))\n    if (contrast is not None):\n        contrast_factor = random.uniform(contrast[0], contrast[1])\n        transforms.append(Lambda((lambda img: F.adjust_contrast(img, contrast_factor))))\n    if (saturation is not None):\n        saturation_factor = random.uniform(saturation[0], saturation[1])\n        transforms.append(Lambda((lambda img: F.adjust_saturation(img, saturation_factor))))\n    if (hue is not None):\n        hue_factor = random.uniform(hue[0], hue[1])\n        transforms.append(Lambda((lambda img: F.adjust_hue(img, hue_factor))))\n    random.shuffle(transforms)\n    transform = Compose(transforms)\n    return transform", "docstring": "Get a randomized transform to be applied on image.\n\nArguments are same as that of __init__.\n\nReturns:\nTransform which randomly adjusts brightness, contrast and\nsaturation in a random order.", "source": "codesearchnet"}
{"code": "def destroy_walker(self, walker):\n    if walker.buffered:\n        self._queue_walkers.remove(walker)\n    else:\n        self._virtual_walkers.remove(walker)", "docstring": "Destroy a previously created stream walker.\n\nArgs:\nwalker (StreamWalker): The walker to remove from internal updating\nlists.", "source": "codesearchnet"}
{"code": "def need_rejoin(self):\n    if (not self._subscription.partitions_auto_assigned()):\n        return False\n    if self._auto_assign_all_partitions():\n        return False\n    if ((self._assignment_snapshot is not None) and (self._assignment_snapshot != self._metadata_snapshot)):\n        return True\n    if ((self._joined_subscription is not None) and (self._joined_subscription != self._subscription.subscription)):\n        return True\n    return super(ConsumerCoordinator, self).need_rejoin()", "docstring": "Check whether the group should be rejoined\n\nReturns:\nbool: True if consumer should rejoin group, False otherwise", "source": "codesearchnet"}
{"code": "def __init__(self, parameters, cost_fn_val):\n        \n\n        self.parameters = parameters\n        self.cost_fn_val = cost_fn_val\n        self.fitness_score = self.__calc_fitness_score(cost_fn_val)", "docstring": "Member object\n\nArgs:\nparameters (dictionary): dictionary of parameter names and values\ncost_fn_val (float): value returned by cost function using params", "source": "juraj-google-style"}
{"code": "def read_into(self, buffer, size=-1, *, offset=0, write_offset=0) -> None:\n        \n\n        return self.mglo.read_into(buffer, size, offset, write_offset)", "docstring": "Read the content into a buffer.\n\nArgs:\nbuffer (bytarray): The buffer that will receive the content.\nsize (int): The size. Value ``-1`` means all.\n\nKeyword Args:\noffset (int): The read offset.\nwrite_offset (int): The write offset.", "source": "juraj-google-style"}
{"code": "def populate_defaults(base_type, removed_method=False, removed_args=None):\n\n    def wrap(func):\n        if removed_method:\n            return func\n        base_argspec = getfullargspec(unwrap(getattr(base_type, func.__name__)))\n        if not base_argspec.defaults and (not base_argspec.kwonlydefaults):\n            return func\n        arg_to_default = {}\n        if base_argspec.defaults:\n            arg_to_default.update(zip(base_argspec.args[-len(base_argspec.defaults):], base_argspec.defaults))\n        if base_argspec.kwonlydefaults:\n            arg_to_default.update(base_argspec.kwonlydefaults)\n        unwrapped_func = unwrap(func)\n        func_argspec = getfullargspec(unwrapped_func)\n        num_non_defaults = len(func_argspec.args) - len(func_argspec.defaults or ())\n        defaults_to_populate = set(func_argspec.args[:num_non_defaults]).intersection(arg_to_default.keys())\n        if removed_args:\n            defaults_to_populate -= set(removed_args)\n        if 'copy' in arg_to_default and arg_to_default['copy'] is None:\n            arg_to_default['copy'] = True\n\n        @functools.wraps(func)\n        def wrapper(**kwargs):\n            for name in defaults_to_populate:\n                if name not in kwargs:\n                    kwargs[name] = arg_to_default[name]\n            return func(**kwargs)\n        return wrapper\n    return wrap", "docstring": "Populate default values for keyword arguments in decorated function.\n\nWhen applied to a function, this decorator creates a new function\nwith default values for all keyword arguments, based on the default values\nfor the identically-named method on `base_type`.\n\nFor internal use only. No backwards compatibility guarantees.\n\nArgs:\nbase_type: The pandas type of the method that this is trying to replicate.\nremoved_method: Whether this method has been removed in the running\nPandas version.\nremoved_args: If not empty, which arguments have been dropped in the\nrunning Pandas version.", "source": "github-repos"}
{"code": "def upsert_sweep(self, config):\n    mutation = gql('\\n        mutation UpsertSweep(\\n            $config: String,\\n            $description: String,\\n            $entityName: String!,\\n            $projectName: String!\\n        ) {\\n            upsertSweep(input: {\\n                config: $config,\\n                description: $description,\\n                entityName: $entityName,\\n                projectName: $projectName\\n            }) {\\n                sweep {\\n                    name\\n                }\\n            }\\n        }\\n        ')\n\n    def no_retry_400_or_404(e):\n        if (not isinstance(e, requests.HTTPError)):\n            return True\n        if ((e.response.status_code != 400) and (e.response.status_code != 404)):\n            return True\n        body = json.loads(e.response.content)\n        raise UsageError(body['errors'][0]['message'])\n    response = self.gql(mutation, variable_values={'config': yaml.dump(config), 'description': config.get('description'), 'entityName': self.settings('entity'), 'projectName': self.settings('project')}, check_retry_fn=no_retry_400_or_404)\n    return response['upsertSweep']['sweep']['name']", "docstring": "Upsert a sweep object.\n\nArgs:\nconfig (str): sweep config (will be converted to yaml)", "source": "codesearchnet"}
{"code": "def list_devices(device_type=None):\n    device_type = device_type.lower() if device_type else None\n    jax_devices = jax.devices(backend=device_type)\n    return [f'{device.platform}:{device.id}' for device in jax_devices]", "docstring": "Return all the available devices based on the device type.\n\nNote that this should return the global devices in a distributed setting.\n\nArgs:\ndevice_type: string of `\"cpu\"`, `\"gpu\"` or `\"tpu\"`. Defaults to `\"gpu\"`\nor `\"tpu\"` if available when device_type is not provided. Otherwise\nwill return the `\"cpu\"` devices.\n\nReturn:\nList of devices that are available for distribute computation.", "source": "github-repos"}
{"code": "def pad_nested_sequences(sequences, dtype='int32'):\n    max_sent_len = 0\n    max_word_len = 0\n    for sent in sequences:\n        max_sent_len = max(len(sent), max_sent_len)\n        for word in sent:\n            max_word_len = max(len(word), max_word_len)\n    x = np.zeros((len(sequences), max_sent_len, max_word_len)).astype(dtype)\n    for (i, sent) in enumerate(sequences):\n        for (j, word) in enumerate(sent):\n            x[(i, j, :len(word))] = word\n    return x", "docstring": "Pads nested sequences to the same length.\n\nThis function transforms a list of list sequences\ninto a 3D Numpy array of shape `(num_samples, max_sent_len, max_word_len)`.\n\nArgs:\nsequences: List of lists of lists.\ndtype: Type of the output sequences.\n\n# Returns\nx: Numpy array.", "source": "codesearchnet"}
{"code": "def eigenvalues(df):\n    \n    corr = np.corrcoef(df, rowvar=0)\n    eigvals = np.linalg.eigvals(corr)\n    return pd.Series(eigvals, df.columns, name='Eigenvalue')", "docstring": "Returns a pandas Series with eigenvalues of the correlation matrix.\n\nArgs:\ndf: pandas DataFrame with columns to run diagnostics on", "source": "juraj-google-style"}
{"code": "def __validate(self, value, validate_element):\n        \n        if not self.repeated:\n            return validate_element(value)\n        else:\n            \n            if isinstance(value, (list, tuple)):\n                result = []\n                for element in value:\n                    if element is None:\n                        try:\n                            name = self.name\n                        except AttributeError:\n                            raise ValidationError(\n                                'Repeated values for %s '\n                                'may not be None' % self.__class__.__name__)\n                        else:\n                            raise ValidationError(\n                                'Repeated values for field %s '\n                                'may not be None' % name)\n                    result.append(validate_element(element))\n                return result\n            elif value is not None:\n                try:\n                    name = self.name\n                except AttributeError:\n                    raise ValidationError('%s is repeated. Found: %s' % (\n                        self.__class__.__name__, value))\n                else:\n                    raise ValidationError(\n                        'Field %s is repeated. Found: %s' % (name, value))\n        return value", "docstring": "Internal validation function.\n\nValidate an internal value using a function to validate\nindividual elements.\n\nArgs:\nvalue: Value to validate.\nvalidate_element: Function to use to validate individual elements.\n\nRaises:\nValidationError if value is not expected type.", "source": "juraj-google-style"}
{"code": "def next_layer(self, original_rp, broadcast_rp):\n    gather_index = _next_layer_gather_index(self, original_rp, broadcast_rp)\n    return _LayerBroadcaster.from_gather_index(gather_index)", "docstring": "Create the next layer gather_index whether or not a broadcast happens.\n\n*---------self------->*\n|                     |\noriginal_rp           broadcast_rp\n|                     |\n\\|/                   \\|/\n*--next_broadcaster-->*\nArgs:\noriginal_rp: the original row partition.\nbroadcast_rp: the target row partition.\n\nReturns:\nthe gather_index for next_broadcaster.", "source": "github-repos"}
{"code": "def revoke(self, revocation_reason, uid=None, revocation_message=None, compromise_occurrence_date=None):\n    if (not isinstance(revocation_reason, enums.RevocationReasonCode)):\n        raise TypeError('revocation_reason must be a RevocationReasonCode enumeration')\n    if (uid is not None):\n        if (not isinstance(uid, six.string_types)):\n            raise TypeError('uid must be a string')\n    if (revocation_message is not None):\n        if (not isinstance(revocation_message, six.string_types)):\n            raise TypeError('revocation_message must be a string')\n    if (compromise_occurrence_date is not None):\n        if (not isinstance(compromise_occurrence_date, six.integer_types)):\n            raise TypeError('compromise_occurrence_date must be an integer')\n        compromise_occurrence_date = primitives.DateTime(compromise_occurrence_date, enums.Tags.COMPROMISE_OCCURRENCE_DATE)\n    result = self.proxy.revoke(revocation_reason, uid, revocation_message, compromise_occurrence_date)\n    status = result.result_status.value\n    if (status == enums.ResultStatus.SUCCESS):\n        return\n    else:\n        reason = result.result_reason.value\n        message = result.result_message.value\n        raise exceptions.KmipOperationFailure(status, reason, message)", "docstring": "Revoke a managed object stored by a KMIP appliance.\n\nArgs:\nrevocation_reason (RevocationReasonCode): An enumeration indicating\nthe revocation reason.\nuid (string): The unique ID of the managed object to revoke.\nOptional, defaults to None.\nrevocation_message (string): A message regarding the revocation.\nOptional, defaults to None.\ncompromise_occurrence_date (int): An integer, the number of seconds\nsince the epoch, which will be converted to the Datetime when\nthe managed object was first believed to be compromised.\nOptional, defaults to None.\n\nReturns:\nNone\n\nRaises:\nClientConnectionNotOpen: if the client connection is unusable\nKmipOperationFailure: if the operation result is a failure\nTypeError: if the input argument is invalid", "source": "codesearchnet"}
{"code": "def _consume_single_get(response_iterator):\n    all_responses = list(response_iterator)\n    if (len(all_responses) != 1):\n        raise ValueError('Unexpected response from `BatchGetDocumentsResponse`', all_responses, 'Expected only one result')\n    return all_responses[0]", "docstring": "Consume a gRPC stream that should contain a single response.\n\nThe stream will correspond to a ``BatchGetDocuments`` request made\nfor a single document.\n\nArgs:\nresponse_iterator (~google.cloud.exceptions.GrpcRendezvous): A\nstreaming iterator returned from a ``BatchGetDocuments``\nrequest.\n\nReturns:\n~google.cloud.proto.firestore.v1beta1.\\\nfirestore_pb2.BatchGetDocumentsResponse: The single \"get\"\nresponse in the batch.\n\nRaises:\nValueError: If anything other than exactly one response is returned.", "source": "codesearchnet"}
{"code": "def ndim(x):\n    if any_symbolic_tensors((x,)):\n        return Ndim().symbolic_call(x)\n    return backend.numpy.ndim(x)", "docstring": "Return the number of dimensions of a tensor.\n\nArgs:\nx: Input tensor.\n\nReturns:\nThe number of dimensions in `x`.", "source": "github-repos"}
{"code": "def slice(filename, number_tiles=None, col=None, row=None, save=True):\n    im = Image.open(filename)\n    (im_w, im_h) = im.size\n    columns = 0\n    rows = 0\n    if (not (number_tiles is None)):\n        validate_image(im, number_tiles)\n        (columns, rows) = calc_columns_rows(number_tiles)\n        extras = ((columns * rows) - number_tiles)\n    else:\n        validate_image_col_row(im, col, row)\n        columns = col\n        rows = row\n        extras = ((columns * rows) - number_tiles)\n    (tile_w, tile_h) = (int(floor((im_w / columns))), int(floor((im_h / rows))))\n    tiles = []\n    number = 1\n    for pos_y in range(0, (im_h - rows), tile_h):\n        for pos_x in range(0, (im_w - columns), tile_w):\n            area = (pos_x, pos_y, (pos_x + tile_w), (pos_y + tile_h))\n            image = im.crop(area)\n            position = ((int(floor((pos_x / tile_w))) + 1), (int(floor((pos_y / tile_h))) + 1))\n            coords = (pos_x, pos_y)\n            tile = Tile(image, number, position, coords)\n            tiles.append(tile)\n            number += 1\n    if save:\n        save_tiles(tiles, prefix=get_basename(filename), directory=os.path.dirname(filename))\n    return tuple(tiles)", "docstring": "Split an image into a specified number of tiles.\n\nArgs:\nfilename (str):  The filename of the image to split.\nnumber_tiles (int):  The number of tiles required.\n\nKwargs:\nsave (bool): Whether or not to save tiles to disk.\n\nReturns:\nTuple of :class:`Tile` instances.", "source": "codesearchnet"}
{"code": "def layout(mtf_graph, mesh_shape, mtf_outputs=()):\n    mesh_shape = mtf.convert_to_shape(mesh_shape)\n    estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape, mtf_outputs)\n    optimizer = layout_optimizer.LayoutOptimizer(estimator)\n    return mtf.convert_to_layout_rules(optimizer.solve())", "docstring": "Compute layout rules based on a computational graph and mesh shape.\n\nArgs:\nmtf_graph: a mtf.Graph.\nmesh_shape: an mtf.Shape, str, or listlike of mtf.Dimension.\nmtf_outputs: an optional iterable of mtf.Tensor, representing the outputs\nof the computation.\n\nReturns:\na mtf.LayoutRules", "source": "codesearchnet"}
{"code": "def index_path_for(window):\n    if output_path:\n        return '%s/INDEX-%s' % (output_path, window.max_timestamp())\n    else:\n        return None", "docstring": "Returns:\npath to the index file containing all shard names or None if no output_path\nis set", "source": "github-repos"}
{"code": "def load_config_file(appdirs=DEFAULT_APPDIRS, file_name=DEFAULT_CONFIG_FILENAME, fallback_config_instance=None):\n    if (not fallback_config_instance):\n        fallback_config_instance = backend_config_to_configparser(get_default_backend_config(appdirs))\n    config = SafeConfigParser()\n    path = get_config_path(appdirs, file_name)\n    if (not config.read(path)):\n        config = write_config_file(fallback_config_instance, appdirs=appdirs, file_name=file_name)\n    return config", "docstring": "Retrieve config information from file at default location.\n\nIf no config file is found a new one will be created either with ``fallback_config_instance``\nas content or if none is provided with the result of ``get_default_backend_config``.\n\nArgs:\nappdirs (HamsterAppDirs, optional): ``HamsterAppDirs`` instance storing app/user specific\npath information.\nfile_name (text_type, optional): Name of the config file. Defaults to\n``DEFAULT_CONFIG_FILENAME``.\nfallback_config_instance (ConfigParser): Backend config that is to be used to populate the\nconfig file that is created if no pre-existing one can be found.\n\nReturns:\nSafeConfigParser: Config loaded from file, either from the the  pre-existing config\nfile or the one created with fallback values.", "source": "codesearchnet"}
{"code": "def remove_regex(urls, regex):\n    \n\n    if not regex:\n        return urls\n\n    \n    if not isinstance(urls, (list, set, tuple)):\n        urls = [urls]\n\n    try:\n        non_matching_urls = [url for url in urls if not re.search(regex, url)]\n    except TypeError:\n        return []\n\n    return non_matching_urls", "docstring": "Parse a list for non-matches to a regex.\n\nArgs:\nurls: iterable of urls\nregex: string regex to be parsed for\n\nReturns:\nlist of strings not matching regex", "source": "juraj-google-style"}
{"code": "def put_rpc(self, address, rpc_id, arg_payload, response):\n        \n\n        self._rpc_queue.put_nowait((address, rpc_id, arg_payload, response))", "docstring": "Place an RPC onto the RPC queue.\n\nThe rpc will be dispatched asynchronously by the background dispatch\ntask.  This method must be called from the event loop.  This method\ndoes not block.\n\nArgs:\naddress (int): The address of the tile with the RPC\nrpc_id (int): The id of the rpc you want to call\narg_payload (bytes): The RPC payload\nrespones (GenericResponse): The object to use to signal the result.", "source": "juraj-google-style"}
{"code": "def _maybe_resolve_alias(alias, name_to_class, name_to_constant):\n    if not isinstance(alias.type, pytd.NamedType):\n        return alias\n    if alias.type.name in _TYPING_SETS:\n        return None\n    if '.' not in alias.type.name:\n        return alias\n    parts = alias.type.name.split('.')\n    if parts[0] not in name_to_class and parts[0] not in name_to_constant:\n        return alias\n    prev_value = None\n    value = name_to_class.get(parts[0]) or name_to_constant[parts[0]]\n    for part in parts[1:]:\n        prev_value = value\n        if isinstance(value, pytd.Constant):\n            if not isinstance(value.type, pytd.NamedType) or value.type.name not in name_to_class:\n                return alias\n            value = name_to_class[value.type.name]\n        if not isinstance(value, pytd.Class):\n            return alias\n        if part in value:\n            value = value.Lookup(part)\n        else:\n            for base in value.bases:\n                if base.name not in name_to_class:\n                    return alias\n                if part in name_to_class[base.name]:\n                    value = name_to_class[base.name].Lookup(part)\n                    break\n            else:\n                return alias\n    if isinstance(value, pytd.Class):\n        return pytd.Constant(alias.name, pytdgen.pytd_type(pytd.NamedType(alias.type.name)))\n    elif isinstance(value, pytd.Function):\n        return pytd.AliasMethod(value.Replace(name=alias.name), from_constant=isinstance(prev_value, pytd.Constant))\n    else:\n        return value.Replace(name=alias.name)", "docstring": "Resolve the alias if possible.\n\nArgs:\nalias: A pytd.Alias\nname_to_class: A class map used for resolution.\nname_to_constant: A constant map used for resolution.\n\nReturns:\nNone, if the alias pointed to an un-aliasable type.\nThe resolved value, if the alias was resolved.\nThe alias, if it was not resolved.", "source": "github-repos"}
{"code": "def exists(self, workflow_id):\n        \n        try:\n            db = self._client[self.database]\n            col = db[WORKFLOW_DATA_COLLECTION_NAME]\n            return col.find_one({\"_id\": ObjectId(workflow_id)}) is not None\n\n        except ConnectionFailure:\n            raise DataStoreNotConnected()", "docstring": "Checks whether a document with the specified workflow id already exists.\n\nArgs:\nworkflow_id (str): The workflow id that should be checked.\n\nRaises:\nDataStoreNotConnected: If the data store is not connected to the server.\n\nReturns:\nbool: ``True`` if a document with the specified workflow id exists.", "source": "juraj-google-style"}
{"code": "def print_gate(gate: Gate, ndigits: int = 2,\n               file: TextIO = None) -> None:\n    \n    N = gate.qubit_nb\n    gate_tensor = gate.vec.asarray()\n    lines = []\n    for index, amplitude in np.ndenumerate(gate_tensor):\n        ket = \"\".join([str(n) for n in index[0:N]])\n        bra = \"\".join([str(index[n]) for n in range(N, 2*N)])\n        if round(abs(amplitude)**2, ndigits) > 0.0:\n            lines.append('{} -> {} : {}'.format(bra, ket, amplitude))\n    lines.sort(key=lambda x: int(x[0:N]))\n    print('\\n'.join(lines), file=file)", "docstring": "Pretty print a gate tensor\n\nArgs:\ngate:\nndigits:\nfile: Stream to which to write. Defaults to stdout", "source": "juraj-google-style"}
{"code": "def has_member(self, device_object):\n    if (device_object.tag == 'computer'):\n        container_search = 'computers/computer'\n    elif (device_object.tag == 'mobile_device'):\n        container_search = 'mobile_devices/mobile_device'\n    else:\n        raise ValueError\n    return (len([device for device in self.findall(container_search) if (device.findtext('id') == device_object.id)]) is not 0)", "docstring": "Return bool whether group has a device as a member.\n\nArgs:\ndevice_object (Computer or MobileDevice). Membership is\ndetermined by ID, as names can be shared amongst devices.", "source": "codesearchnet"}
{"code": "def use_wrapped_call(layer, call_fn, default_training_value=None, return_method=False):\n    expects_training_arg = layer_uses_training_bool(layer)\n    if hasattr(call_fn, 'original_layer_call'):\n        original_call = call_fn.original_layer_call\n        call_fn = call_fn.__call__\n    else:\n        original_call = call_fn\n    fn, arg_spec = maybe_add_training_arg(original_call, call_fn, expects_training_arg, default_training_value)\n\n    def return_outputs_and_add_losses(*args, **kwargs):\n        \n        if return_method:\n            args = args[1:]\n        outputs, losses = fn(*args, **kwargs)\n        layer.add_loss(losses, inputs=True)\n        if context.executing_eagerly():\n            for i in layer._flatten_layers():\n                if i is not layer:\n                    i._eager_losses = [base_layer_utils.REVIVED_LOSS_PLACEHOLDER]\n        return outputs\n    decorated = tf_decorator.make_decorator(target=call_fn, decorator_func=return_outputs_and_add_losses, decorator_argspec=arg_spec)\n    if return_method:\n        return types.MethodType(decorated, layer)\n    else:\n        return decorated", "docstring": "Creates fn that adds the losses returned by call_fn & returns the outputs.\n\nArgs:\nlayer: A Keras layer object\ncall_fn: tf.function that takes layer inputs (and possibly a training arg),\nand returns a tuple of (outputs, list of losses).\ndefault_training_value: Default value of the training kwarg. If `None`, the\ndefault is `K.learning_phase()`.\nreturn_method: Whether to return a method bound to the layer.\n\nReturns:\nfunction that calls call_fn and returns the outputs. Losses returned by\ncall_fn are added to the layer losses.", "source": "github-repos"}
{"code": "def convert_to_date_tensor(date_inputs):\n    if isinstance(date_inputs, DateTensor):\n        return date_inputs\n    if hasattr(date_inputs, 'year'):\n        return from_datetimes(date_inputs)\n    if isinstance(date_inputs, np.ndarray):\n        date_inputs = date_inputs.astype('datetime64[D]')\n        return from_np_datetimes(date_inputs)\n    if tf.is_tensor(date_inputs):\n        return from_ordinals(date_inputs)\n    if isinstance(date_inputs, collections.abc.Sequence):\n        if not date_inputs:\n            return from_ordinals([])\n        test_element = date_inputs[0]\n        if hasattr(test_element, 'year'):\n            return from_datetimes(date_inputs)\n        if isinstance(test_element, collections.abc.Sequence):\n            return from_tuples(date_inputs)\n        if len(date_inputs) == 3:\n            return from_year_month_day(date_inputs[0], date_inputs[1], date_inputs[2])\n    try:\n        as_ordinals = tf.convert_to_tensor(date_inputs, dtype=tf.int32)\n        return from_ordinals(as_ordinals)\n    except ValueError as e:\n        raise ValueError('Failed to convert inputs to DateTensor. Unrecognized format. Error: ' + e)", "docstring": "Converts supplied data to a `DateTensor` if possible.\n\nArgs:\ndate_inputs: One of the supported types that can be converted to a\nDateTensor. The following input formats are supported. 1. Sequence of\n`datetime.datetime`, `datetime.date`, or any other structure with data\nattributes called 'year', 'month' and 'day'. 2. A numpy array of\n`datetime64` type. 3. Sequence of (year, month, day) Tuples. Months are\n1-based (with January as 1) and constants.Months enum may be used instead\nof ints. Days are also 1-based. 4. A tuple of three int32 `Tensor`s\ncontaining year, month and date as positive integers in that order. 5. A\nsingle int32 `Tensor` containing ordinals (i.e. number of days since 31\nDec 0 with 1 being 1 Jan 1.)\n\nReturns:\nA `DateTensor` object representing the supplied dates.\n\nRaises:\nValueError: If conversion fails for any reason.", "source": "github-repos"}
{"code": "def predict(self, x, add_intercept=False):\n        \n        \n        if x.min() < self.start:\n            raise Warning(\"x.min() < self.start\")\n        if x.max() > self.end:\n            raise Warning(\"x.max() > self.end\")\n\n        return get_X_spline(x=x,\n                            knots=self.knots,\n                            n_bases=self.n_bases,\n                            spline_order=self.spline_order,\n                            add_intercept=add_intercept)", "docstring": "For some x, predict the bn(x) for each base\n\nArguments:\nx: np.array; Vector of dimension 1\nadd_intercept: bool; should we add the intercept to the final array\n\nReturns:\nnp.array, of shape (len(x), n_bases + (add_intercept))", "source": "juraj-google-style"}
{"code": "def get_seqprop_within(self, chain_id, resnum, angstroms, only_protein=True, use_ca=False, custom_coord=None, return_resnums=False):\n    (polypep, resnums) = self.get_polypeptide_within(chain_id=chain_id, resnum=resnum, angstroms=angstroms, use_ca=use_ca, only_protein=only_protein, custom_coord=custom_coord, return_resnums=True)\n    chain_subseq = self.chains.get_by_id(chain_id).get_subsequence(resnums)\n    if return_resnums:\n        return (chain_subseq, resnums)\n    else:\n        return chain_subseq", "docstring": "Get a SeqProp object of the amino acids within X angstroms of the specified chain + residue number.\n\nArgs:\nresnum (int): Residue number of the structure\nchain_id (str): Chain ID of the residue number\nangstroms (float): Radius of the search sphere\nonly_protein (bool): If only protein atoms (no HETATMS) should be included in the returned sequence\nuse_ca (bool): If the alpha-carbon atom should be used for searching, default is False (last atom of residue used)\n\nReturns:\nSeqProp: Sequence that represents the amino acids in the vicinity of your residue number.", "source": "codesearchnet"}
{"code": "def resolve_for(self, node, exact=None):\n        \n\n        from capybara.driver.node import Node\n        from capybara.node.element import Element\n        from capybara.node.simple import Simple\n\n        @node.synchronize\n        def resolve():\n            if self.selector.format == \"css\":\n                children = node._find_css(self.css())\n            else:\n                children = node._find_xpath(self.xpath(exact))\n\n            def wrap(child):\n                if isinstance(child, Node):\n                    return Element(node.session, child, node, self)\n                else:\n                    return Simple(child)\n\n            children = [wrap(child) for child in children]\n\n            return Result(children, self)\n\n        return resolve()", "docstring": "Resolves this query relative to the given node.\n\nArgs:\nnode (node.Base): The node relative to which this query should be resolved.\nexact (bool, optional): Whether to exactly match text.\n\nReturns:\nlist[Element]: A list of elements matched by this query.", "source": "juraj-google-style"}
{"code": "def _string_from_ip_int(self, ip_int):\n        \n        octets = []\n        for _ in xrange(4):\n            octets.insert(0, str(ip_int & 0xFF))\n            ip_int >>= 8\n        return '.'.join(octets)", "docstring": "Turns a 32-bit integer into dotted decimal notation.\n\nArgs:\nip_int: An integer, the IP address.\n\nReturns:\nThe IP address as a string in dotted decimal notation.", "source": "juraj-google-style"}
{"code": "def get_event_q(self, event_name):\n    self.lock.acquire()\n    if ((not (event_name in self.event_dict)) or (self.event_dict[event_name] is None)):\n        self.event_dict[event_name] = queue.Queue()\n    self.lock.release()\n    event_queue = self.event_dict[event_name]\n    return event_queue", "docstring": "Obtain the queue storing events of the specified name.\n\nIf no event of this name has been polled, wait for one to.\n\nReturns:\nA queue storing all the events of the specified name.\nNone if timed out.\n\nRaises:\nqueue.Empty: Raised if the queue does not exist and timeout has\npassed.", "source": "codesearchnet"}
{"code": "def from_json(cls, data):\n    optional_keys = {'wind_direction': 0, 'rain': False, 'snow_on_ground': False}\n    assert ('wind_speed' in data), 'Required key \"wind_speed\" is missing!'\n    for (key, val) in optional_keys.items():\n        if (key not in data):\n            data[key] = val\n    return cls(data['wind_speed'], data['wind_direction'], data['rain'], data['snow_on_ground'])", "docstring": "Create a Wind Condition from a dictionary.\n\nArgs:\ndata = {\n\"wind_speed\": float,\n\"wind_direction\": float,\n\"rain\": bool,\n\"snow_on_ground\": bool}", "source": "codesearchnet"}
{"code": "def group_by(what, by):\n    return proso.dict.group_keys_by_values({x: by(x) for x in what})", "docstring": "Take a list and apply the given function on each its value, then group the\nvalues by the function results.\n\n.. testsetup::\n\nfrom proso.list import group_by\n\n.. doctest::\n\n>>> group_by([i for i in range(10)], by=lambda x: x % 2 == 0)\n{False: [1, 3, 5, 7, 9], True: [0, 2, 4, 6, 8]}\n\nArgs:\nwhat: a list which will be transformed\nby: a function which will be applied on values of the given list\n\nReturns:\ndict: values groupped by the function results", "source": "codesearchnet"}
{"code": "def simple_two_objective_reward(example):\n    num = int(example * 10) % 9 + 1\n    return [num, 10 - num]", "docstring": "Reward for the trivial search space.\n\nThe reward (i.e. fitness) is a 2-element list. The goal of the search,\ntherefore, is to find the pareto frontier in simple_two_objective_pareto\nfunction.\n\nArgs:\nexample: a materialized value.\n\nReturns:\nA 2-element list.", "source": "github-repos"}
{"code": "def __init__(\n        self, \n        batch_size=20,\n        seq_len=10, \n        min_pitch=24,\n        max_pitch=108\n\n    ):\n        \n        self.__batch_size = batch_size\n        self.__seq_len = seq_len\n        self.__dim = max_pitch - min_pitch", "docstring": "Init.\n\nArgs:\nbatch_size:         Batch size.\nseq_len:            The length of sequneces.\nThe length corresponds to the number of `time` splited by `time_fraction`.\n\nmin_pitch:          The minimum of note number.\nmax_pitch:          The maximum of note number.", "source": "juraj-google-style"}
{"code": "def isfile(self, path, follow_symlinks=True):\n    return self._is_of_type(path, S_IFREG, follow_symlinks)", "docstring": "Determine if path identifies a regular file.\n\nArgs:\npath: Path to filesystem object.\n\nReturns:\n`True` if path points to a regular file (following symlinks).\n\nRaises:\nTypeError: if path is None.", "source": "codesearchnet"}
{"code": "def do_decode(cls, obj, obj_type):\n    if (inspect.isclass(obj_type) and issubclass(obj_type, ConjureBeanType)):\n        return cls.decode_conjure_bean_type(obj, obj_type)\n    elif (inspect.isclass(obj_type) and issubclass(obj_type, ConjureUnionType)):\n        return cls.decode_conjure_union_type(obj, obj_type)\n    elif (inspect.isclass(obj_type) and issubclass(obj_type, ConjureEnumType)):\n        return cls.decode_conjure_enum_type(obj, obj_type)\n    elif isinstance(obj_type, DictType):\n        return cls.decode_dict(obj, obj_type.key_type, obj_type.value_type)\n    elif isinstance(obj_type, ListType):\n        return cls.decode_list(obj, obj_type.item_type)\n    elif isinstance(obj_type, OptionalType):\n        return cls.decode_optional(obj, obj_type.item_type)\n    return cls.decode_primitive(obj, obj_type)", "docstring": "Decodes json into the specified type\n\nArgs:\nobj: the json object to decode\nelement_type: a class object which is the type we're decoding into.", "source": "codesearchnet"}
{"code": "def prune_candidates(candidates):\n    \n    pruned = []\n    \n    \n    for first, second in candidates:\n        if first.__class__ is Linearization:\n            nodes1 = first.curve.nodes\n        else:\n            nodes1 = first.nodes\n        if second.__class__ is Linearization:\n            nodes2 = second.curve.nodes\n        else:\n            nodes2 = second.nodes\n        if convex_hull_collide(nodes1, nodes2):\n            pruned.append((first, second))\n    return pruned", "docstring": "Reduce number of candidate intersection pairs.\n\n.. note::\n\nThis is a helper for :func:`_all_intersections`.\n\nUses more strict bounding box intersection predicate by forming the\nactual convex hull of each candidate curve segment and then checking\nif those convex hulls collide.\n\nArgs:\ncandidates (List): An iterable of pairs of curves (or\nlinearized curves).\n\nReturns:\nList: A pruned list of curve pairs.", "source": "juraj-google-style"}
{"code": "def _tflearn_features(train_config, args):\n    feature_columns = []\n    target_name = train_config['target_column']\n    key_name = train_config['key_column']\n    for name in train_config['numerical_columns']:\n        if ((name != target_name) and (name != key_name)):\n            feature_columns.append(tf.contrib.layers.real_valued_column(name, dimension=1))\n    for name in train_config['categorical_columns']:\n        if ((name != target_name) and (name != key_name)):\n            transform_config = train_config['transforms'].get(name, {})\n            transform_name = transform_config.get('transform', None)\n            if is_dnn_model(args.model_type):\n                if (transform_name == 'embedding'):\n                    sparse = tf.contrib.layers.sparse_column_with_integerized_feature(name, bucket_size=train_config['vocab_stats'][name]['n_classes'])\n                    learn_feature = tf.contrib.layers.embedding_column(sparse, dimension=transform_config['embedding_dim'])\n                elif ((transform_name == 'one_hot') or (transform_name is None)):\n                    sparse = tf.contrib.layers.sparse_column_with_integerized_feature(name, bucket_size=train_config['vocab_stats'][name]['n_classes'])\n                    learn_feature = tf.contrib.layers.one_hot_column(sparse)\n                else:\n                    raise ValueError((\"Unknown transform name. Only 'embedding' and 'one_hot' transforms are supported. Got %s\" % transform_name))\n            elif is_linear_model(args.model_type):\n                if ((transform_name == 'one_hot') or (transform_name is None)):\n                    learn_feature = tf.contrib.layers.sparse_column_with_integerized_feature(name, bucket_size=train_config['vocab_stats'][name]['n_classes'])\n                elif (transform_name == 'embedding'):\n                    learn_feature = tf.contrib.layers.sparse_column_with_hash_bucket(name, hash_bucket_size=transform_config['embedding_dim'])\n                else:\n                    raise ValueError((\"Unknown transform name. Only 'embedding' and 'one_hot' transforms are supported. Got %s\" % transform_name))\n            feature_columns.append(learn_feature)\n    return feature_columns", "docstring": "Builds the tf.learn feature list.\n\nAll numerical features are just given real_valued_column because all the\npreprocessing transformations are done in preprocess_input. Categoriacl\nfeatures are processed here depending if the vocab map (from string to int)\nwas applied in preprocess_input.\n\nArgs:\ntrain_config: our train config object\nargs: command line args.\n\nReturns:\nList of TF lean feature columns.\n\nRaises:\nValueError: if wrong transforms are used for the model type.", "source": "codesearchnet"}
{"code": "def for_all_test_methods(decorator, *args, **kwargs):\n\n    def all_test_methods_impl(cls):\n        \n        for name in dir(cls):\n            value = getattr(cls, name)\n            if callable(value) and name.startswith('test') and (name != 'test_session'):\n                setattr(cls, name, decorator(*args, **kwargs)(value))\n        return cls\n    return all_test_methods_impl", "docstring": "Generate class-level decorator from given method-level decorator.\n\nIt is expected for the given decorator to take some arguments and return\na method that is then called on the test method to produce a decorated\nmethod.\n\nArgs:\ndecorator: The decorator to apply.\n*args: Positional arguments\n**kwargs: Keyword arguments\nReturns: Function that will decorate a given classes test methods with the\ndecorator.", "source": "github-repos"}
{"code": "def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    encoder_hidden_states = encoder_outputs[0]\n    if encoder_attention_mask is None:\n        batch_size, sequence_length = encoder_hidden_states.shape[:2]\n        encoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    batch_size, sequence_length = decoder_input_ids.shape\n    if decoder_attention_mask is None:\n        decoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    if decoder_position_ids is None:\n        if past_key_values is not None:\n            raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.')\n        decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n    params = {'params': params or self.params}\n    if past_key_values:\n        params['cache'] = past_key_values\n        mutable = ['cache']\n    else:\n        mutable = False\n\n    def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_hidden_states, **kwargs):\n        projection_module = module._get_projection_module()\n        decoder_module = module._get_decoder_module()\n        if projection_module is not None:\n            encoder_hidden_states = projection_module(encoder_hidden_states)\n        return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_hidden_states=encoder_hidden_states, **kwargs)\n    outputs = self.module.apply(params, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)\n    if past_key_values is not None and return_dict:\n        outputs, past = outputs\n        outputs['past_key_values'] = unfreeze(past['cache'])\n        return outputs\n    elif past_key_values is not None and (not return_dict):\n        outputs, past = outputs\n        outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import FlaxSpeechEncoderDecoderModel\n>>> import jax.numpy as jnp\n\n>>> # initialize a wav2vec2-2-bart from pretrained wav2vec2 and bart models. Note that the cross-attention layers will be randomly initialized\n>>> model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(\n...     \"facebook/wav2vec2-large-lv60\", \"facebook/bart-large\"\n... )\n\n>>> inputs = jnp.ones((2, 5000), dtype=jnp.float32)\n>>> encoder_outputs = model.encode(inputs)\n\n>>> decoder_start_token_id = model.config.decoder.bos_token_id\n>>> decoder_input_ids = jnp.ones((inputs.shape[0], 1), dtype=\"i4\") * decoder_start_token_id\n\n>>> outputs = model.decode(decoder_input_ids, encoder_outputs)\n>>> logits = outputs.logits\n```", "source": "github-repos"}
{"code": "def render_asset_html(self, path, tag_template):\n        \n        url = os.path.join(settings.STATIC_URL, path)\n\n        return tag_template.format(url=url)", "docstring": "Render HTML tag for a given path.\n\nArguments:\npath (string): Relative path from static directory.\ntag_template (string): Template string for HTML tag.\n\nReturns:\nstring: HTML tag with url from given path.", "source": "juraj-google-style"}
{"code": "def create_sprite_image(examples):\n    \n\n    def generate_image_from_thubnails(thumbnails, thumbnail_dims):\n      \n      num_thumbnails = tf.shape(thumbnails)[0].eval()\n      images_per_row = int(math.ceil(math.sqrt(num_thumbnails)))\n      thumb_height = thumbnail_dims[0]\n      thumb_width = thumbnail_dims[1]\n      master_height = images_per_row * thumb_height\n      master_width = images_per_row * thumb_width\n      num_channels = 3\n      master = np.zeros([master_height, master_width, num_channels])\n      for idx, image in enumerate(thumbnails.eval()):\n        left_idx = idx % images_per_row\n        top_idx = int(math.floor(idx / images_per_row))\n        left_start = left_idx * thumb_width\n        left_end = left_start + thumb_width\n        top_start = top_idx * thumb_height\n        top_end = top_start + thumb_height\n        master[top_start:top_end, left_start:left_end, :] = image\n      return tf.image.encode_png(master)\n\n    image_feature_name = 'image/encoded'\n    sprite_thumbnail_dim_px = 32\n    with tf.compat.v1.Session():\n      keys_to_features = {\n          image_feature_name:\n              tf.FixedLenFeature((), tf.string, default_value=''),\n      }\n      parsed = tf.parse_example(examples, keys_to_features)\n      images = tf.zeros([1, 1, 1, 1], tf.float32)\n      i = tf.constant(0)\n      thumbnail_dims = (sprite_thumbnail_dim_px,\n                        sprite_thumbnail_dim_px)\n      num_examples = tf.constant(len(examples))\n      encoded_images = parsed[image_feature_name]\n\n      \n      \n      def loop_body(i, encoded_images, images):\n        encoded_image = encoded_images[i]\n        image = tf.image.decode_jpeg(encoded_image, channels=3)\n        resized_image = tf.image.resize(image, thumbnail_dims)\n        expanded_image = tf.expand_dims(resized_image, 0)\n        images = tf.cond(\n            tf.equal(i, 0), lambda: expanded_image,\n            lambda: tf.concat([images, expanded_image], 0))\n        return i + 1, encoded_images, images\n\n      loop_out = tf.while_loop(\n          lambda i, encoded_images, images: tf.less(i, num_examples),\n          loop_body, [i, encoded_images, images],\n          shape_invariants=[\n              i.get_shape(),\n              encoded_images.get_shape(),\n              tf.TensorShape(None)\n          ])\n\n      \n      sprite = generate_image_from_thubnails(loop_out[2], thumbnail_dims)\n      return sprite.eval()", "docstring": "Returns an encoded sprite image for use in Facets Dive.\n\nArgs:\nexamples: A list of serialized example protos to get images for.\n\nReturns:\nAn encoded PNG.", "source": "juraj-google-style"}
{"code": "def delete(self, rid, raise_on_error=True):\n        \n        return self.ds.delete(rid, raise_on_error)", "docstring": "Write cache data to the data store.\n\nArgs:\nrid (str): The record identifier.\nraise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.\n\nReturns:\nobject : Python request response.", "source": "juraj-google-style"}
{"code": "def wrap_lines(self, text, indent_level, indent_size=4):\n    indent = ((' ' * indent_size) * indent_level)\n    lines = text.split('\\n')\n    wrapped_lines = []\n    for line in lines:\n        if (line == ''):\n            wrapped_lines.append(line)\n        else:\n            wrapped_lines.append((indent + line))\n    return '\\n'.join(wrapped_lines)", "docstring": "Indent a multiline string\n\nArgs:\ntext (string): The string to indent\nindent_level (int): The number of indent_size spaces to prepend\nto each line\nindent_size (int): The number of spaces to prepend for each indent\nlevel\n\nReturns:\nstring: The indented block of text", "source": "codesearchnet"}
{"code": "def concatenate(tup, axis=0):\n    from distob import engine\n    if (len(tup) is 0):\n        raise ValueError('need at least one array to concatenate')\n    first = tup[0]\n    others = tup[1:]\n    if (hasattr(first, 'concatenate') and hasattr(type(first), '__array_interface__')):\n        return first.concatenate(others, axis)\n    arrays = []\n    for ar in tup:\n        if isinstance(ar, DistArray):\n            if (axis == ar._distaxis):\n                arrays.extend(ar._subarrays)\n            else:\n                arrays.append(gather(ar))\n        elif isinstance(ar, RemoteArray):\n            arrays.append(ar)\n        elif isinstance(ar, Remote):\n            arrays.append(_remote_to_array(ar))\n        elif hasattr(type(ar), '__array_interface__'):\n            arrays.append(ar)\n        else:\n            arrays.append(np.array(ar))\n    if all((isinstance(ar, np.ndarray) for ar in arrays)):\n        return np.concatenate(arrays, axis)\n    total_length = 0\n    commonshape = list(arrays[0].shape)\n    commonshape[axis] = None\n    for ar in arrays:\n        total_length += ar.shape[axis]\n        shp = list(ar.shape)\n        shp[axis] = None\n        if (shp != commonshape):\n            raise ValueError('incompatible shapes for concatenation')\n    blocksize = (((total_length - 1) \n    rarrays = []\n    for ar in arrays:\n        if isinstance(ar, DistArray):\n            rarrays.extend(ar._subarrays)\n        elif isinstance(ar, RemoteArray):\n            rarrays.append(ar)\n        else:\n            da = _scatter_ndarray(ar, axis, blocksize)\n            for ra in da._subarrays:\n                rarrays.append(ra)\n            del da\n    del arrays\n    eid = rarrays[0]._id.engine\n    if all(((ra._id.engine == eid) for ra in rarrays)):\n        if (eid == engine.eid):\n            return concatenate([gather(r) for r in rarrays], axis)\n        else:\n            return call(concatenate, rarrays, axis)\n    else:\n        return DistArray(rarrays, axis)", "docstring": "Join a sequence of arrays together.\nWill aim to join `ndarray`, `RemoteArray`, and `DistArray` without moving\ntheir data, if they happen to be on different engines.\n\nArgs:\ntup (sequence of array_like): Arrays to be concatenated. They must have\nthe same shape, except in the dimension corresponding to `axis`.\naxis (int, optional): The axis along which the arrays will be joined.\n\nReturns:\nres: `ndarray`, if inputs were all local\n`RemoteArray`, if inputs were all on the same remote engine\n`DistArray`, if inputs were already scattered on different engines", "source": "codesearchnet"}
{"code": "def vmstats():\n    spi = SYSTEM_PERFORMANCE_INFORMATION()\n    retlen = ctypes.c_ulong()\n    ctypes.windll.ntdll.NtQuerySystemInformation(2, ctypes.byref(spi), ctypes.sizeof(spi), ctypes.byref(retlen))\n    ret = {}\n    for field in spi._fields_:\n        ret.update({field[0]: getattr(spi, field[0])})\n    return ret", "docstring": "Return information about the virtual memory on the machine\n\nReturns:\ndict: A dictionary of virtual memory stats\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt * status.vmstats", "source": "codesearchnet"}
{"code": "def logical_interconnect_groups(self):\n    if (not self.__logical_interconnect_groups):\n        self.__logical_interconnect_groups = LogicalInterconnectGroups(self.__connection)\n    return self.__logical_interconnect_groups", "docstring": "Gets the LogicalInterconnectGroups API client.\n\nReturns:\nLogicalInterconnectGroups:", "source": "codesearchnet"}
{"code": "def info(self, show_defaults=False):\n    pprinter = PrettyPrinter(show_options=True, show_defaults=show_defaults)\n    print(pprinter.pprint(self._obj))", "docstring": "Prints a repr of the object including any applied options.\n\nArgs:\nshow_defaults: Whether to include default options", "source": "codesearchnet"}
{"code": "def __init__(self, elements=None):\n        \n        super(TermList, self).__init__()\n        self._contents = set()\n        try:\n            for t in elements or []:\n                super(TermList, self).append(t)\n                self._contents.add(t.id)\n        except AttributeError:\n            raise TypeError('TermList can only contain Terms.')", "docstring": "Create a new `TermList`.\n\nArguments:\nelements (collections.Iterable, optional): an Iterable\nthat yields `Term` objects.\n\nRaises:\nTypeError: when the given ``elements`` are not instances\nof `Term`.", "source": "juraj-google-style"}
{"code": "def set_card_simple(self, title, content):\n    self.response.card.type = 'Simple'\n    self.response.card.title = title\n    self.response.card.content = content", "docstring": "Set response card as simple type.\n\ntitle and content cannot exceed 8,000 characters.\n\nArgs:\ntitle: str. Title of Simple or Standard type card.\ncontent: str. Content of Simple type card.", "source": "codesearchnet"}
{"code": "def WriteFileHash(self, path, hash_value):\n    string = '{0:s}\\t{1:s}'.format(hash_value, path)\n    encoded_string = self._EncodeString(string)\n    print(encoded_string)", "docstring": "Writes the file path and hash to stdout.\n\nArgs:\npath (str): path of the file.\nhash_value (str): message digest hash calculated over the file data.", "source": "codesearchnet"}
{"code": "def start(self, use_atexit=True):\n        \n        assert not self._process\n\n        _logger.debug('Starting process %s', self._proc_args)\n\n        process_future = asyncio.create_subprocess_exec(\n            stdin=subprocess.PIPE,\n            stdout=subprocess.PIPE,\n            stderr=subprocess.PIPE,\n            *self._proc_args\n        )\n        self._process = yield from process_future\n\n        self._stderr_reader = asyncio.async(self._read_stderr())\n        self._stdout_reader = asyncio.async(self._read_stdout())\n\n        if use_atexit:\n            atexit.register(self.close)", "docstring": "Start the executable.\n\nArgs:\nuse_atexit (bool): If True, the process will automatically be\nterminated at exit.", "source": "juraj-google-style"}
{"code": "def _checkBeginIndicesAnnotations(self, out, a):\n    begin_line_num = 0\n    while not out.lines[begin_line_num].startswith('array'):\n        begin_line_num += 1\n    element_index = 0\n    for line_num in range(begin_line_num, len(out.lines)):\n        line = out.lines[line_num]\n        if '...' in line:\n            raise ValueError('Unexpected found ellipses in line representing array')\n        matches = re.finditer(self._ELEMENT_REGEX, line)\n        for line_item_index, _ in enumerate(matches):\n            subscripts = list(np.unravel_index(element_index, a.shape))\n            if line_item_index == 0:\n                self.assertEqual({tensor_format.BEGIN_INDICES_KEY: subscripts}, out.annotations[line_num])\n            element_index += 1\n    self.assertEqual(element_index, np.size(a))", "docstring": "Check the beginning-index annotations of an ndarray representation.\n\nArgs:\nout: An instance of RichTextLines representing a numpy.ndarray.\na: The numpy.ndarray being represented.\n\nRaises:\nValueError: if any ellipses (\"...\") are found in the lines representing\nthe array.", "source": "github-repos"}
{"code": "def add_datasets(self, datasets, datasets_to_check=None):\n    if (datasets_to_check is None):\n        datasets_to_check = self.get_datasets()\n    alldatasetsadded = True\n    for dataset in datasets:\n        if (not self.add_dataset(dataset, datasets_to_check=datasets_to_check)):\n            alldatasetsadded = False\n    return alldatasetsadded", "docstring": "Add multiple datasets\n\nArgs:\ndatasets (List[Union[Dataset,Dict,str]]): A list of either dataset ids or dataset metadata from Dataset objects or dictionaries\ndatasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase.\n\nReturns:\nbool: True if all datasets added or False if any already present", "source": "codesearchnet"}
{"code": "def __init__(self, maximum_iterations=None, parallel_iterations=10, back_prop=True, swap_memory=False, name='while_context', grad_state=None, context_def=None, import_scope=None):\n    if context_def:\n        self._init_from_proto(context_def, import_scope=import_scope)\n    else:\n        ControlFlowContext.__init__(self)\n        self._init_from_args(maximum_iterations, parallel_iterations, back_prop, swap_memory, name)\n    self._grad_state = grad_state", "docstring": "\"Creates a `WhileContext`.\n\nArgs:\nmaximum_iterations: Optional upper bound on number of loop iterations.\nparallel_iterations: The number of iterations allowed to run in parallel.\nback_prop: Whether backprop is enabled for this while loop.\nswap_memory: Whether GPU-CPU memory swap is enabled for this loop.\nname: Optional name prefix for the returned tensors.\ngrad_state: The gradient loop state.\ncontext_def: Optional `WhileContextDef` protocol buffer to initialize the\n`Whilecontext` python object from.\nimport_scope: Optional `string`. Name scope to add. Only used when\ninitialing from protocol buffer.", "source": "github-repos"}
{"code": "def GetPresetsByOperatingSystem(self, operating_system):\n    preset_definitions = []\n    for preset_definition in self._definitions.values():\n        for preset_operating_system in preset_definition.operating_systems:\n            if preset_operating_system.IsEquivalent(operating_system):\n                preset_definitions.append(preset_definition)\n    return preset_definitions", "docstring": "Retrieves preset definitions for a specific operating system.\n\nArgs:\noperating_system (OperatingSystemArtifact): an operating system artifact\nattribute container.\n\nReturns:\nlist[PresetDefinition]: preset definition that correspond with the\noperating system.", "source": "codesearchnet"}
{"code": "def zbar_function(fname, restype, *args):\n    prototype = CFUNCTYPE(restype, *args)\n    return prototype((fname, load_libzbar()))", "docstring": "Returns a foreign function exported by `zbar`.\n\nArgs:\nfname (:obj:`str`): Name of the exported function as string.\nrestype (:obj:): Return type - one of the `ctypes` primitive C data\ntypes.\n*args: Arguments - a sequence of `ctypes` primitive C data types.\n\nReturns:\ncddl.CFunctionType: A wrapper around the function.", "source": "codesearchnet"}
{"code": "def __init__(self, file_path_prefix, coder, file_name_suffix='', num_shards=0, shard_name_template=None, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO, *, max_records_per_shard=None, max_bytes_per_shard=None, skip_if_empty=False):\n    if not isinstance(file_path_prefix, (str, ValueProvider)):\n        raise TypeError('file_path_prefix must be a string or ValueProvider;got %r instead' % file_path_prefix)\n    if not isinstance(file_name_suffix, (str, ValueProvider)):\n        raise TypeError('file_name_suffix must be a string or ValueProvider;got %r instead' % file_name_suffix)\n    if not CompressionTypes.is_valid_compression_type(compression_type):\n        raise TypeError('compression_type must be CompressionType object but was %s' % type(compression_type))\n    if shard_name_template is None:\n        shard_name_template = DEFAULT_SHARD_NAME_TEMPLATE\n    elif shard_name_template == '':\n        num_shards = 1\n    if isinstance(file_path_prefix, str):\n        file_path_prefix = StaticValueProvider(str, file_path_prefix)\n    if isinstance(file_name_suffix, str):\n        file_name_suffix = StaticValueProvider(str, file_name_suffix)\n    self.file_path_prefix = file_path_prefix\n    self.file_name_suffix = file_name_suffix\n    self.num_shards = num_shards\n    self.coder = coder\n    self.shard_name_format = self._template_to_format(shard_name_template)\n    self.shard_name_glob_format = self._template_to_glob_format(shard_name_template)\n    self.compression_type = compression_type\n    self.mime_type = mime_type\n    self.max_records_per_shard = max_records_per_shard\n    self.max_bytes_per_shard = max_bytes_per_shard\n    self.skip_if_empty = skip_if_empty", "docstring": "Raises:\nTypeError: if file path parameters are not a :class:`str` or\n:class:`~apache_beam.options.value_provider.ValueProvider`, or if\n**compression_type** is not member of\n:class:`~apache_beam.io.filesystem.CompressionTypes`.\nValueError: if **shard_name_template** is not of expected\nformat.", "source": "github-repos"}
{"code": "def search(nasbench, search_model, algo, repeat_id, max_train_hours=5000000.0):\n    nasbench.reset_budget_counters()\n    times, best_valids, best_tests = ([0.0], [0.0], [0.0])\n    valid_models = 0\n    time_spent = 0\n    start_time = time.time()\n    last_report_time = start_time\n    for model, feedback in pg.sample(search_model, algo, name=str(repeat_id)):\n        spec = model()\n        if nasbench.is_valid(spec):\n            results = nasbench.query(spec)\n            valid_models += 1\n            feedback(results['validation_accuracy'])\n            if results['validation_accuracy'] > best_valids[-1]:\n                best_valids.append(results['validation_accuracy'])\n                best_tests.append(results['test_accuracy'])\n            else:\n                best_valids.append(best_valids[-1])\n                best_tests.append(best_tests[-1])\n            time_spent, _ = nasbench.get_budget_counters()\n            times.append(time_spent)\n            if time_spent > max_train_hours:\n                feedback.end_loop()\n                break\n        else:\n            feedback.skip()\n        if feedback.id % 100 == 0:\n            now = time.time()\n            print(f'Tried {feedback.id} models, valid {valid_models}, time_spent {time_spent}, elapse since last report: {now - last_report_time} seconds.')\n            last_report_time = now\n    print(f'Total time elapse: {time.time() - start_time} seconds.')\n    return (times, best_valids, best_tests)", "docstring": "Define the search procedure.\n\nArgs:\nnasbench: NASBench object.\nsearch_model: which is a `model` object annotated with `oneof`.\nalgo: algorithm for search.\nrepeat_id: identifier of current repeat.\nmax_train_hours: max time budget to train the models, which is the sum\nof training time queried from NAS-Bench.\n\nReturns:\nA tuple of (total time spent at step i for all steps,\nbest validation accuracy at step i for all steps,\nbest test accuracy at step i for all steps)", "source": "github-repos"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    event_data = windows_events.WindowsRegistryEventData()\n    event_data.key_path = registry_key.path\n    event_data.offset = registry_key.offset\n    event_data.urls = self.URLS\n\n    values_dict = {}\n    for registry_value in registry_key.GetValues():\n      value_name = registry_value.name or '(default)'\n\n      if value_name == 'BootExecute':\n        \n        \n        if registry_value.DataIsString():\n          value_string = registry_value.GetDataAsObject()\n\n        elif registry_value.DataIsMultiString():\n          value_string = ''.join(registry_value.GetDataAsObject())\n\n        elif registry_value.DataIsBinaryData():\n          value_string = registry_value.GetDataAsObject()\n\n        else:\n          value_string = ''\n          error_string = (\n              'Key: {0:s}, value: {1:s}: unsupported value data type: '\n              '{2:s}.').format(\n                  registry_key.path, value_name,\n                  registry_value.data_type_string)\n          parser_mediator.ProduceExtractionWarning(error_string)\n\n        \n        event_data.regvalue = {'BootExecute': value_string}\n\n        event = time_events.DateTimeValuesEvent(\n            registry_key.last_written_time,\n            definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      else:\n        values_dict[value_name] = registry_value.GetDataAsObject()\n\n    event_data.regvalue = values_dict\n\n    event = time_events.DateTimeValuesEvent(\n        registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def GetOutputClass(cls, name):\n    if (not isinstance(name, py2to3.STRING_TYPES)):\n        raise ValueError('Name attribute is not a string.')\n    name = name.lower()\n    if (name not in cls._output_classes):\n        raise KeyError('Name: [{0:s}] not registered as an output module.'.format(name))\n    return cls._output_classes[name]", "docstring": "Retrieves the output class for a specific name.\n\nArgs:\nname (str): name of the output module.\n\nReturns:\ntype: output module class.\n\nRaises:\nKeyError: if there is no output class found with the supplied name.\nValueError: if name is not a string.", "source": "codesearchnet"}
{"code": "def poly_to_power_basis(bezier_coeffs):\n    (num_coeffs,) = bezier_coeffs.shape\n    if (num_coeffs == 1):\n        return bezier_coeffs\n    elif (num_coeffs == 2):\n        (coeff0, coeff1) = bezier_coeffs\n        return np.asfortranarray([coeff0, (coeff1 - coeff0)])\n    elif (num_coeffs == 3):\n        (coeff0, coeff1, coeff2) = bezier_coeffs\n        return np.asfortranarray([coeff0, (2.0 * (coeff1 - coeff0)), ((coeff2 - (2.0 * coeff1)) + coeff0)])\n    elif (num_coeffs == 4):\n        (coeff0, coeff1, coeff2, coeff3) = bezier_coeffs\n        return np.asfortranarray([coeff0, (3.0 * (coeff1 - coeff0)), (3.0 * ((coeff2 - (2.0 * coeff1)) + coeff0)), (((coeff3 - (3.0 * coeff2)) + (3.0 * coeff1)) - coeff0)])\n    else:\n        raise _helpers.UnsupportedDegree((num_coeffs - 1), supported=(0, 1, 2, 3))", "docstring": "Convert a B |eacute| zier curve to polynomial in power basis.\n\n.. note::\n\nThis assumes, but does not verify, that the \"B |eacute| zier\ndegree\" matches the true degree of the curve. Callers can\nguarantee this by calling :func:`.full_reduce`.\n\nArgs:\nbezier_coeffs (numpy.ndarray): A 1D array of coefficients in\nthe Bernstein basis.\n\nReturns:\nnumpy.ndarray: 1D array of coefficients in monomial basis.\n\nRaises:\n.UnsupportedDegree: If the degree of the curve is not among\n0, 1, 2 or 3.", "source": "codesearchnet"}
{"code": "def SCM(root_dir, repo=None):\n    if (Git.is_repo(root_dir) or Git.is_submodule(root_dir)):\n        return Git(root_dir, repo=repo)\n    return NoSCM(root_dir, repo=repo)", "docstring": "Returns SCM instance that corresponds to a repo at the specified\npath.\n\nArgs:\nroot_dir (str): path to a root directory of the repo.\nrepo (dvc.repo.Repo): dvc repo instance that root_dir belongs to.\n\nReturns:\ndvc.scm.base.Base: SCM instance.", "source": "codesearchnet"}
{"code": "def _clone_functional_model(model, input_tensors=None, layer_fn=_clone_layer):\n    if not isinstance(model, Model):\n        raise ValueError('Expected `model` argument to be a `Model` instance, got ', model)\n    if isinstance(model, Sequential):\n        raise ValueError('Expected `model` argument to be a functional `Model` instance, got a `Sequential` instance instead:', model)\n    if not model._is_graph_network:\n        raise ValueError('Expected `model` argument to be a functional `Model` instance, but got a subclass model instead.')\n    new_input_layers = {}\n    if input_tensors is not None:\n        input_tensors = nest.flatten(input_tensors)\n        for i, input_tensor in enumerate(input_tensors):\n            original_input_layer = model._input_layers[i]\n            if not backend.is_keras_tensor(input_tensor):\n                name = original_input_layer.name\n                input_tensor = Input(tensor=input_tensor, name='input_wrapper_for_' + name)\n                newly_created_input_layer = input_tensor._keras_history.layer\n                new_input_layers[original_input_layer] = newly_created_input_layer\n            else:\n                new_input_layers[original_input_layer] = original_input_layer\n    if not callable(layer_fn):\n        raise ValueError('Expected `layer_fn` argument to be a callable.')\n    model_configs, created_layers = _clone_layers_and_model_config(model, new_input_layers, layer_fn)\n    input_tensors, output_tensors, created_layers = functional.reconstruct_from_config(model_configs, created_layers=created_layers)\n    metrics_names = model.metrics_names\n    model = Model(input_tensors, output_tensors, name=model.name)\n    ancillary_layers = [layer for layer in created_layers.values() if layer not in model.layers]\n    if ancillary_layers:\n        new_nodes = nest.flatten([layer.inbound_nodes[1:] if functional._should_skip_first_node(layer) else layer.inbound_nodes for layer in created_layers.values()])\n        _insert_ancillary_layers(model, ancillary_layers, metrics_names, new_nodes)\n    return model", "docstring": "Clone a functional `Model` instance.\n\nModel cloning is similar to calling a model on new inputs,\nexcept that it creates new layers (and thus new weights) instead\nof sharing the weights of the existing layers.\n\nInput layers are always cloned.\n\nArgs:\nmodel: Instance of `Model`.\ninput_tensors: optional list of input tensors\nto build the model upon. If not provided,\nplaceholders will be created.\nlayer_fn: callable to be applied on non-input layers in the model. By\ndefault it clones the layer. Another example is to preserve the layer\nto share the weights. This is required when we create a per-replica\ncopy of the model with distribution strategy; we want the weights to\nbe shared but still feed inputs separately so we create new input\nlayers.\n\nReturns:\nAn instance of `Model` reproducing the behavior\nof the original model, on top of new inputs tensors,\nusing newly instantiated weights.\n\nRaises:\nValueError: in case of invalid `model` argument value or `layer_fn`\nargument value.", "source": "github-repos"}
{"code": "def wait_idle(self, timeout=1.0):\n\n    async def _awaiter():\n        background_work = {x.join() for x in self._work_queues}\n        for event in self._events:\n            if (not event.is_set()):\n                background_work.add(event.wait())\n        (_done, pending) = (await asyncio.wait(background_work, timeout=timeout))\n        if (len(pending) > 0):\n            raise TimeoutExpiredError('Timeout waiting for event loop to become idle', pending=pending)\n    if self._on_emulation_thread():\n        return asyncio.wait_for(_awaiter(), timeout=timeout)\n    self.run_task_external(_awaiter())\n    return None", "docstring": "Wait until the rpc queue is empty.\n\nThis method may be called either from within the event loop or from\noutside of it.  If it is called outside of the event loop it will\nblock the calling thread until the rpc queue is temporarily empty.\n\nIf it is called from within the event loop it will return an awaitable\nobject that can be used to wait for the same condition.\n\nThe awaitable object will already have a timeout if the timeout\nparameter is passed.\n\nArgs:\ntimeout (float): The maximum number of seconds to wait.", "source": "codesearchnet"}
{"code": "def reactions_to_files(model, dest, writer, split_subsystem):\n\n    def safe_file_name(origin_name):\n        safe_name = re.sub('\\\\W+', '_', origin_name, flags=re.UNICODE)\n        safe_name = re.sub('_+', '_', safe_name.lower(), flags=re.UNICODE)\n        safe_name = safe_name.strip('_')\n        return safe_name\n    common_reactions = []\n    reaction_files = []\n    if (not split_subsystem):\n        common_reactions = sorted(model.reactions, key=(lambda r: r.id))\n        if (len(common_reactions) > 0):\n            reaction_file = 'reactions.yaml'\n            with open(os.path.join(dest, reaction_file), 'w') as f:\n                writer.write_reactions(f, common_reactions)\n            reaction_files.append(reaction_file)\n    else:\n        subsystems = {}\n        for reaction in sorted(model.reactions, key=(lambda r: r.id)):\n            if ('subsystem' in reaction.properties):\n                subsystem_file = safe_file_name(reaction.properties['subsystem'])\n                subsystems.setdefault(subsystem_file, []).append(reaction)\n            else:\n                common_reactions.append(reaction)\n        subsystem_folder = 'reactions'\n        sub_existance = False\n        for (subsystem_file, reactions) in iteritems(subsystems):\n            if (len(reactions) < _MAX_REACTION_COUNT):\n                for reaction in reactions:\n                    common_reactions.append(reaction)\n            elif (len(reactions) > 0):\n                mkdir_p(os.path.join(dest, subsystem_folder))\n                subsystem_file = os.path.join(subsystem_folder, '{}.yaml'.format(subsystem_file))\n                with open(os.path.join(dest, subsystem_file), 'w') as f:\n                    writer.write_reactions(f, reactions)\n                reaction_files.append(subsystem_file)\n                sub_existance = True\n        reaction_files.sort()\n        if sub_existance:\n            reaction_file = os.path.join(subsystem_folder, 'other_reactions.yaml')\n        else:\n            reaction_file = 'reactions.yaml'\n        if (len(common_reactions) > 0):\n            with open(os.path.join(dest, reaction_file), 'w') as f:\n                writer.write_reactions(f, common_reactions)\n            reaction_files.append(reaction_file)\n    return reaction_files", "docstring": "Turn the reaction subsystems into their own files.\n\nIf a subsystem has a number of reactions over the threshold, it gets its\nown YAML file. All other reactions, those that don't have a subsystem or\nare in a subsystem that falls below the threshold, get added to a common\nreaction file.\n\nArgs:\nmodel: :class:`psamm_import.model.MetabolicModel`.\ndest: output path for model files.\nwriter: :class:`psamm.datasource.native.ModelWriter`.\nsplit_subsystem: Divide reactions into multiple files by subsystem.", "source": "codesearchnet"}
{"code": "def expand_valid_values(valid_values):\n    if ('${GROUP_TYPES}' in valid_values):\n        valid_values.remove('${GROUP_TYPES}')\n        valid_values.extend(['Adversary', 'Campaign', 'Document', 'Email', 'Event', 'Incident', 'Intrusion Set', 'Signature', 'Task', 'Threat'])\n    elif ('${OWNERS}' in valid_values):\n        valid_values.remove('${OWNERS}')\n        valid_values.append('')\n    elif ('${USERS}' in valid_values):\n        valid_values.remove('${USERS}')\n        valid_values.append('')\n    return valid_values", "docstring": "Expand supported playbook variables to their full list.\n\nArgs:\nvalid_values (list): The list of valid values for Choice or MultiChoice inputs.\n\nReturns:\nList: An expanded list of valid values for Choice or MultiChoice inputs.", "source": "codesearchnet"}
{"code": "def AddContract(self, contract):\n    if (not (contract.PublicKeyHash.ToBytes() in self._keys.keys())):\n        raise Exception('Invalid operation - public key mismatch')\n    self._contracts[contract.ScriptHash.ToBytes()] = contract\n    if (contract.ScriptHash in self._watch_only):\n        self._watch_only.remove(contract.ScriptHash)", "docstring": "Add a contract to the wallet.\n\nArgs:\ncontract (Contract): a contract of type neo.SmartContract.Contract.\n\nRaises:\nException: Invalid operation - public key mismatch.", "source": "codesearchnet"}
{"code": "def global_norm(t_list, name=None):\n    if not isinstance(t_list, collections_abc.Sequence) or isinstance(t_list, str):\n        raise TypeError(f'`t_list` should be a sequence of tensors. Received {type(t_list)}.')\n    t_list = list(t_list)\n    with ops.name_scope(name, 'global_norm', t_list) as name:\n        values = [ops.convert_to_tensor(t.values if isinstance(t, indexed_slices.IndexedSlices) else t, name='t_%d' % i) if t is not None else t for i, t in enumerate(t_list)]\n        half_squared_norms = []\n        for v in values:\n            if v is not None:\n                with ops.colocate_with(v):\n                    half_squared_norms.append(gen_nn_ops.l2_loss(v))\n        half_squared_norm = math_ops.reduce_sum(array_ops_stack.stack(half_squared_norms))\n        norm = math_ops.sqrt(half_squared_norm * constant_op.constant(2.0, dtype=half_squared_norm.dtype), name='global_norm')\n    return norm", "docstring": "Computes the global norm of multiple tensors.\n\nGiven a tuple or list of tensors `t_list`, this operation returns the\nglobal norm of the elements in all tensors in `t_list`. The global norm is\ncomputed as:\n\n`global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))`\n\nAny entries in `t_list` that are of type None are ignored.\n\nArgs:\nt_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.\nname: A name for the operation (optional).\n\nReturns:\nA 0-D (scalar) `Tensor` of type `float`.\n\nRaises:\nTypeError: If `t_list` is not a sequence.", "source": "github-repos"}
{"code": "def stage_in(self, file, executor):\n    if (file.scheme == 'ftp'):\n        working_dir = self.dfk.executors[executor].working_dir\n        stage_in_app = self._ftp_stage_in_app(executor=executor)\n        app_fut = stage_in_app(working_dir, outputs=[file])\n        return app_fut._outputs[0]\n    elif ((file.scheme == 'http') or (file.scheme == 'https')):\n        working_dir = self.dfk.executors[executor].working_dir\n        stage_in_app = self._http_stage_in_app(executor=executor)\n        app_fut = stage_in_app(working_dir, outputs=[file])\n        return app_fut._outputs[0]\n    elif (file.scheme == 'globus'):\n        globus_ep = self._get_globus_endpoint(executor)\n        stage_in_app = self._globus_stage_in_app()\n        app_fut = stage_in_app(globus_ep, outputs=[file])\n        return app_fut._outputs[0]\n    else:\n        raise Exception('Staging in with unknown file scheme {} is not supported'.format(file.scheme))", "docstring": "Transport the file from the input source to the executor.\n\nThis function returns a DataFuture.\n\nArgs:\n- self\n- file (File) : file to stage in\n- executor (str) : an executor the file is going to be staged in to.\nIf the executor argument is not specified for a file\nwith 'globus' scheme, the file will be staged in to\nthe first executor with the \"globus\" key in a config.", "source": "codesearchnet"}
{"code": "def get_added_vocab(self) -> dict[str, int]:\n    return {k.content: v for v, k in sorted(self.added_tokens_decoder.items(), key=lambda item: item[0])}", "docstring": "Returns the added tokens in the vocabulary as a dictionary of token to index.\n\nReturns:\n`Dict[str, int]`: The added tokens.", "source": "github-repos"}
{"code": "def DEFINE_alias(name, original_name, flag_values=FLAGS, module_name=None):  \n  \n  if original_name not in flag_values:\n    raise UnrecognizedFlagError(original_name)\n  flag = flag_values[original_name]\n\n  class _Parser(ArgumentParser):\n    \n\n    def parse(self, argument):\n      flag.parse(argument)\n      return flag.value\n\n  class _FlagAlias(Flag):\n    \n\n    @property\n    def value(self):\n      return flag.value\n\n    @value.setter\n    def value(self, value):\n      flag.value = value\n\n  help_msg = 'Alias for --%s.' % flag.name\n  \n  DEFINE_flag(_FlagAlias(_Parser(), flag.serializer, name, flag.default,\n                         help_msg, boolean=flag.boolean),\n              flag_values, module_name)", "docstring": "Defines an alias flag for an existing one.\n\nArgs:\nname: A string, name of the alias flag.\noriginal_name: A string, name of the original flag.\nflag_values: FlagValues object with which the flag will be registered.\nmodule_name: A string, the name of the module that defines this flag.\n\nRaises:\ngflags.FlagError:\nUnrecognizedFlagError: if the referenced flag doesn't exist.\nDuplicateFlagError: if the alias name has been used by some existing flag.", "source": "juraj-google-style"}
{"code": "def save(self, savefile):\n        \n        with open(str(savefile), 'wb') as f:\n            self.write_to_fp(f)\n            log.debug(\"Saved to %s\", savefile)", "docstring": "Do the TTS API request and write result to file.\n\nArgs:\nsavefile (string): The path and file name to save the ``mp3`` to.\n\nRaises:\n:class:`gTTSError`: When there's an error with the API request.", "source": "juraj-google-style"}
{"code": "def min_rank(series, ascending=True):\n    \n\n    ranks = series.rank(method='min', ascending=ascending)\n    return ranks", "docstring": "Equivalent to `series.rank(method='min', ascending=ascending)`.\n\nArgs:\nseries: column to rank.\n\nKwargs:\nascending (bool): whether to rank in ascending order (default is `True`).", "source": "juraj-google-style"}
{"code": "def compute_eos_token_mask(self, input_ids: torch.LongTensor, eos_token_id: int) -> torch.LongTensor:\n    self._check_input_ids_shape(input_ids)\n    noneos_masks = []\n    all_eos_equated = input_ids == eos_token_id\n    for eos_equated in all_eos_equated:\n        nonzero_idx = torch.nonzero(eos_equated)\n        noneos_mask = torch.ones_like(eos_equated)\n        if nonzero_idx.shape[0] != 0:\n            noneos_mask[nonzero_idx[0][0]:] = 0\n        noneos_masks.append(noneos_mask)\n    return torch.stack(noneos_masks, dim=0)", "docstring": "Computes repetitions mask.\n\n1 stands for ngrams that don't contain EOS tokens and vice versa.\n\nArgs:\ninput_ids (`torch.LongTensor`):\nInput token ids (batch_size, input_len).\neos_token_id (`int`):\nEOS token ID.\n\nReturns:\nEOS token mask (batch_size, input_len).", "source": "github-repos"}
{"code": "def ucast_ip_mask(ip_addr_and_mask, return_tuple=True):\n    \n    regex_ucast_ip_and_mask = __re.compile(\"^((22[0-3])|(2[0-1][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))/((3[0-2])|([1-2]?[0-9]))$\")\n    if return_tuple:\n        while not regex_ucast_ip_and_mask.match(ip_addr_and_mask):\n            print(\"Not a good unicast IP and CIDR mask combo.\")\n            print(\"Please try again.\")\n            ip_addr_and_mask = input(\"Please enter a unicast IP address and mask in the follwing format x.x.x.x/x: \")\n        ip_cidr_split = ip_addr_and_mask.split(\"/\")\n        ip_addr = ip_cidr_split[0]\n        cidr = ip_cidr_split[1]\n        return ip_addr, cidr\n    elif not return_tuple:\n        if not regex_ucast_ip_and_mask.match(ip_addr_and_mask):\n            return False\n        else:\n            return True", "docstring": "Function to check if a address is unicast and that the CIDR mask is good\nArgs:\nip_addr_and_mask: Unicast IP address and mask in the following format 192.168.1.1/24\nreturn_tuple: Set to True it returns a IP and mask in a tuple, set to False returns True or False\n\nReturns: see return_tuple for return options", "source": "juraj-google-style"}
{"code": "def _get_annotations(self, text, language=''):\n    body = {'document': {'type': 'PLAIN_TEXT', 'content': text}, 'features': {'extract_syntax': True}, 'encodingType': 'UTF32'}\n    if language:\n        body['document']['language'] = language\n    request = self.service.documents().annotateText(body=body)\n    response = request.execute()\n    tokens = response.get('tokens', [])\n    language = response.get('language')\n    return {'tokens': tokens, 'language': language}", "docstring": "Returns the list of annotations retrieved from the given text.\n\nArgs:\ntext (str): Input text.\nlanguage (:obj:`str`, optional): Language code.\n\nReturns:\nResults in a dictionary. :code:`tokens` contains the list of annotations\nand :code:`language` contains the inferred language from the input.", "source": "codesearchnet"}
{"code": "def connect_direct(self, connection_string, no_rpc=False, force=False):\n    if ((not force) and self.connected):\n        raise HardwareError((\"Cannot connect when we are already connected to '%s'\" % self.connection_string))\n    self._loop.run_coroutine(self.adapter.connect(0, connection_string))\n    try:\n        if no_rpc:\n            self._logger.info('Not opening RPC interface on device %s', self.connection_string)\n        else:\n            self._loop.run_coroutine(self.adapter.open_interface(0, 'rpc'))\n    except HardwareError as exc:\n        self._logger.exception('Error opening RPC interface on device %s', connection_string)\n        self._loop.run_coroutine(self.adapter.disconnect(0))\n        raise exc\n    except Exception as exc:\n        self._logger.exception('Error opening RPC interface on device %s', connection_string)\n        self._loop.run_coroutine(self.adapter.disconnect(0))\n        raise HardwareError(('Could not open RPC interface on device due to an exception: %s' % str(exc))) from exc\n    self.connected = True\n    self.connection_string = connection_string\n    self.connection_interrupted = False", "docstring": "Directly connect to a device using its stream specific connection string.\n\nNormally, all connections to a device include opening the RPC\ninterface to send RPCs.  However, there are certain, very specific,\ncircumstances when you would not want to or be able to open the RPC\ninterface (such as when you are using the debug interface on a bare\nMCU that has not been programmed yet).  In those cases you can pass\nno_rpc=True to not attempt to open the RPC interface.  If you do not\nopen the RPC interface at connection time, there is no public\ninterface to open it later, so you must disconnect and reconnect to\nthe device in order to open the interface.\n\nArgs:\nconnection_string (str): The connection string that identifies the desired device.\nno_rpc (bool): Do not open the RPC interface on the device (default=False).\nforce (bool): Whether to force another connection even if we think we are currently\nconnected.  This is for internal use and not designed to be set externally.", "source": "codesearchnet"}
{"code": "def _parse_exe_version_string(version_str):\n    matcher = re.search('Python (\\\\d+\\\\.\\\\d+)\\\\.\\\\d+', version_str)\n    if matcher:\n        return utils.version_from_string(matcher.group(1))\n    else:\n        return None", "docstring": "Parse the version string of a Python executable.\n\nArguments:\nversion_str: Version string as emitted by running `PYTHON_EXE -V`\n\nReturns:\nVersion as (major, minor) tuple, or None if it could not be determined.", "source": "github-repos"}
{"code": "def load(self, profile_args):\n        \n        for key, value in profile_args.items():\n            self.add(key, value)", "docstring": "Load provided CLI Args.\n\nArgs:\nargs (dict): Dictionary of args in key/value format.", "source": "juraj-google-style"}
{"code": "def highway_core_with_recurrent_dropout(hidden_size, num_layers, keep_prob=0.5, **kwargs):\n    core = HighwayCore(hidden_size, num_layers, **kwargs)\n    return (RecurrentDropoutWrapper(core, keep_prob), core)", "docstring": "Highway core with recurrent dropout.\n\nArgs:\nhidden_size: (int) Hidden size dimensionality.\nnum_layers: (int) Number of highway layers.\nkeep_prob: the probability to keep an entry when applying dropout.\n**kwargs: Extra keyword arguments to pass to the highway core.\n\nReturns:\nA tuple (train_core, test_core) where train_core is a higway core with\nrecurrent dropout enabled to be used for training and test_core is the\nsame highway core without recurrent dropout.", "source": "codesearchnet"}
{"code": "def __init__(self, name, annotation):\n        \n        self._name = name\n        self._annotation = annotation", "docstring": "Initializer.\n\nArgs:\nname: the name of the bound arg\nannotation: an Annotation", "source": "juraj-google-style"}
{"code": "def _has_extras(ctx):\n    \n    if not ctx.index.entries:\n        return False\n\n    return ctx.data_offset > 8 and ctx.data_offset > (ctx.signatures.offset_end + 8)", "docstring": "Determine if a MAR file has an additional section block or not.\n\nIt does this by looking at where file data starts in the file. If this\nstarts immediately after the signature data, then no additional sections are present.\n\nArgs:\nctx (context): construct parsing context\n\nReturns:\nTrue if the MAR file has an additional section block\nFalse otherwise", "source": "juraj-google-style"}
{"code": "def extract(self, text: str) -> List[Extraction]:\n        \n\n        doc = self._parser(text)\n\n        extractions = list()\n        for sent in doc.sents:\n            this_extraction = Extraction(value=sent.text,\n                                         extractor_name=self.name,\n                                         start_token=sent[0],\n                                         end_token=sent[-1],\n                                         start_char=sent.text[0],\n                                         end_char=sent.text[-1])\n            extractions.append(this_extraction)\n\n        return extractions", "docstring": "Splits text by sentences.\n\nArgs:\ntext (str): Input text to be extracted.\n\nReturns:\nList[Extraction]: the list of extraction or the empty list if there are no matches.", "source": "juraj-google-style"}
{"code": "def __init__(self, issues = None):\n\t\t\n\t\tself._issues = []\n\t\tself._config = {}\n\t\tself._project = None\n\t\tself.issues = issues", "docstring": "Class constructor.\n\nArgs:\nissues (list): List of `Issue` instances", "source": "juraj-google-style"}
{"code": "def compute_delta(deps: List[str], imports: List[str], rule_dir: str, source_to_rules: SourceToRule, rule_name: str) -> Optional[DepsDelta]:\n    issues = []\n    adds = set()\n    subs = set()\n    expanded_deps = set([expand_dep(dep, rule_dir) for dep in deps])\n    used_deps = set()\n    for imp in imports:\n        imp_items = tuple(imp.split('.'))\n        if imp_items[0] in BUILT_IN_MODULES or imp in BUILT_IN_MODULES:\n            continue\n        possible_srcs = list_possible_source_of_import(imp_items)\n        matching_possible_src = None\n        for possible_src in possible_srcs:\n            if possible_src in source_to_rules:\n                matching_possible_src = possible_src\n                break\n        if matching_possible_src is None:\n            issues.append(f'Cannot infer dependency for \"{imp}\". Possible source files: {possible_srcs}.')\n            continue\n        possible_deps = source_to_rules[matching_possible_src]\n        if len(possible_deps) > 1:\n            issues.append(f'Multiple possible rules for \"{imp}\"')\n        if possible_deps[0] == expand_dep(':' + rule_name, rule_dir):\n            continue\n        if possible_deps[0] not in expanded_deps:\n            adds.add(possible_deps[0])\n        else:\n            used_deps.add(possible_deps[0])\n    for dep in expanded_deps:\n        if dep in used_deps:\n            continue\n        subs.add(dep)\n    if adds or subs or issues:\n        return DepsDelta(adds=list(adds), subs=list(subs), issues=issues)\n    else:\n        return None", "docstring": "Computes the operation on the deps to support all the imports.\n\nArgs:\ndeps: Dependencies of the rule.\nimports: Imports of the rule.\nrule_dir: Path of the rule relative to the repo root.\nsource_to_rules: Mapping from all available source files to rules.", "source": "github-repos"}
{"code": "def add(name, **kwargs):\n    \n    if not info(name):\n        comp_obj = _get_computer_object()\n        try:\n            new_group = comp_obj.Create('group', name)\n            new_group.SetInfo()\n            log.info('Successfully created group %s', name)\n        except pywintypes.com_error as exc:\n            msg = 'Failed to create group {0}. {1}'.format(\n                name, win32api.FormatMessage(exc.excepinfo[5]))\n            log.error(msg)\n            return False\n    else:\n        log.warning('The group %s already exists.', name)\n        return False\n    return True", "docstring": "Add the specified group\n\nArgs:\n\nname (str):\nThe name of the group to add\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' group.add foo", "source": "juraj-google-style"}
{"code": "def get_class_attributes(cls):\n    for (name, value) in cls.__dict__.items():\n        if GenericStruct._is_pyof_attribute(value):\n            (yield (name, value))", "docstring": "Return a generator for class attributes' names and value.\n\nThis method strict relies on the PEP 520 (Preserving Class Attribute\nDefinition Order), implemented on Python 3.6. So, if this behaviour\nchanges this whole lib can loose its functionality (since the\nattributes order are a strong requirement.) For the same reason, this\nlib will not work on python versions earlier than 3.6.\n\n.. code-block:: python3\n\nfor name, value in self.get_class_attributes():\nprint(\"attribute name: {}\".format(name))\nprint(\"attribute type: {}\".format(value))\n\nReturns:\ngenerator: tuples with attribute name and value.", "source": "codesearchnet"}
{"code": "def inference(self, observed_arr):\n        \n        if observed_arr.ndim < 4:\n            \n            observed_arr = np.expand_dims(observed_arr, axis=1)\n            self.__add_channel_flag = True\n        else:\n            self.__add_channel_flag = False\n\n        return super().inference(observed_arr)", "docstring": "Draws samples from the `true` distribution.\n\nArgs:\nobserved_arr:     `np.ndarray` of observed data points.\n\nReturns:\n`np.ndarray` of inferenced.", "source": "juraj-google-style"}
{"code": "def stop_dag(self, name=None):\n    return self._client.send(Request(action='stop_dag', payload={'name': (name if (name is not None) else self._dag_name)})).success", "docstring": "Send a stop signal to the specified dag or the dag that hosts this task.\n\nArgs:\nname str: The name of the dag that should be stopped. If no name is given the\ndag that hosts this task is stopped.\n\nUpon receiving the stop signal, the dag will not queue any new tasks and wait\nfor running tasks to terminate.\n\nReturns:\nbool: True if the signal was sent successfully.", "source": "codesearchnet"}
{"code": "def PrintResponse(batch_job_helper, response_xml):\n  \n  response = batch_job_helper.ParseResponse(response_xml)\n\n  if 'rval' in response['mutateResponse']:\n    for data in response['mutateResponse']['rval']:\n      if 'errorList' in data:\n        print 'Operation %s - FAILURE:' % data['index']\n        print '\\terrorType=%s' % data['errorList']['errors']['ApiError.Type']\n        print '\\ttrigger=%s' % data['errorList']['errors']['trigger']\n        print '\\terrorString=%s' % data['errorList']['errors']['errorString']\n        print '\\tfieldPath=%s' % data['errorList']['errors']['fieldPath']\n        print '\\treason=%s' % data['errorList']['errors']['reason']\n      if 'result' in data:\n        print 'Operation %s - SUCCESS.' % data['index']", "docstring": "Prints the BatchJobService response.\n\nArgs:\nbatch_job_helper: a BatchJobHelper instance.\nresponse_xml: a string containing a response from the BatchJobService.", "source": "juraj-google-style"}
{"code": "def compute_match(mapping, weight_dict):\n    \n    \n    if veryVerbose:\n        print(\"Computing match for mapping\", file=DEBUG_LOG)\n        print(mapping, file=DEBUG_LOG)\n    if tuple(mapping) in match_triple_dict:\n        if veryVerbose:\n            print(\"saved value\", match_triple_dict[tuple(mapping)], file=DEBUG_LOG)\n        return match_triple_dict[tuple(mapping)]\n    match_num = 0\n    \n    for i, m in enumerate(mapping):\n        if m == -1:\n            \n            continue\n        \n        current_node_pair = (i, m)\n        if current_node_pair not in weight_dict:\n            continue\n        if veryVerbose:\n            print(\"node_pair\", current_node_pair, file=DEBUG_LOG)\n        for key in weight_dict[current_node_pair]:\n            if key == -1:\n                \n                match_num += weight_dict[current_node_pair][key]\n                if veryVerbose:\n                    print(\"instance/attribute match\", weight_dict[current_node_pair][key], file=DEBUG_LOG)\n            \n            \n            \n            elif key[0] < i:\n                continue\n            elif mapping[key[0]] == key[1]:\n                match_num += weight_dict[current_node_pair][key]\n                if veryVerbose:\n                    print(\"relation match with\", key, weight_dict[current_node_pair][key], file=DEBUG_LOG)\n    if veryVerbose:\n        print(\"match computing complete, result:\", match_num, file=DEBUG_LOG)\n    \n    match_triple_dict[tuple(mapping)] = match_num\n    return match_num", "docstring": "Given a node mapping, compute match number based on weight_dict.\nArgs:\nmappings: a list of node index in AMR 2. The ith element (value j) means node i in AMR 1 maps to node j in AMR 2.\nReturns:\nmatching triple number\nComplexity: O(m*n) , m is the node number of AMR 1, n is the node number of AMR 2", "source": "juraj-google-style"}
{"code": "def get_attributes(self, uid=None, attribute_names=None):\n    if (uid is not None):\n        if (not isinstance(uid, six.string_types)):\n            raise TypeError('uid must be a string')\n    if (attribute_names is not None):\n        if (not isinstance(attribute_names, list)):\n            raise TypeError('attribute_names must be a list of strings')\n        else:\n            for attribute_name in attribute_names:\n                if (not isinstance(attribute_name, six.string_types)):\n                    raise TypeError('attribute_names must be a list of strings')\n    result = self.proxy.get_attributes(uid, attribute_names)\n    status = result.result_status.value\n    if (status == enums.ResultStatus.SUCCESS):\n        return (result.uuid, result.attributes)\n    else:\n        reason = result.result_reason.value\n        message = result.result_message.value\n        raise exceptions.KmipOperationFailure(status, reason, message)", "docstring": "Get the attributes associated with a managed object.\n\nIf the uid is not specified, the appliance will use the ID placeholder\nby default.\n\nIf the attribute_names list is not specified, the appliance will\nreturn all viable attributes for the managed object.\n\nArgs:\nuid (string): The unique ID of the managed object with which the\nretrieved attributes should be associated. Optional, defaults\nto None.\nattribute_names (list): A list of string attribute names\nindicating which attributes should be retrieved. Optional,\ndefaults to None.", "source": "codesearchnet"}
{"code": "def _truncate(self, processed_features: Union[dict[str, np.ndarray], BatchFeature], max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, truncation: Optional[bool]=None):\n    if not truncation:\n        return processed_features\n    elif truncation and max_length is None:\n        raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.')\n    required_input = processed_features[self.model_input_names[0]]\n    if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):\n        max_length = (max_length \n    needs_to_be_truncated = len(required_input) > max_length\n    if needs_to_be_truncated:\n        processed_features[self.model_input_names[0]] = processed_features[self.model_input_names[0]][:max_length]\n        if 'attention_mask' in processed_features:\n            processed_features['attention_mask'] = processed_features['attention_mask'][:max_length]\n    return processed_features", "docstring": "Truncate inputs to predefined length or max length in the batch\n\nArgs:\nprocessed_features(`Union[Dict[str, np.ndarray], BatchFeature]`):\nDictionary of input values (`np.ndarray[float]`) / input vectors (`List[np.ndarray[float]]`) or batch\nof inputs values (`List[np.ndarray[int]]`) / input vectors (`List[np.ndarray[int]]`)\nmax_length (`int`, *optional*):\nmaximum length of the returned list and optionally padding length (see below)\npad_to_multiple_of (`int`, *optional*) :\nInteger if set will pad the sequence to a multiple of the provided value. This is especially useful to\nenable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs\nwhich benefit from having sequence lengths be a multiple of 128.\ntruncation (`bool`, *optional*):\nActivates truncation to cut input sequences longer than `max_length` to `max_length`.", "source": "github-repos"}
{"code": "def decode_base64(data):\n    \n    data = bytes(data, encoding=\"ascii\")\n    missing_padding = len(data) % 4\n    if missing_padding != 0:\n        data += b'=' * (4 - missing_padding)\n    return base64.b64decode(data)", "docstring": "Decodes a base64 string, with padding being optional\n\nArgs:\ndata: A base64 encoded string\n\nReturns:\nbytes: The decoded bytes", "source": "juraj-google-style"}
{"code": "def formatted(self, func):\n        \n        other = EscapedString.__new__(EscapedString)\n        other.strings = []\n\n        for is_literal, value in self.strings:\n            if not is_literal:\n                value = func(value)\n            other.strings.append((is_literal, value))\n        return other", "docstring": "Return the string with non-literal parts formatted.\n\nArgs:\nfunc (callable): Callable that translates a string into a\nformatted string.\n\nReturns:\n`EscapedString` object.", "source": "juraj-google-style"}
{"code": "def get_idx_types(rng_def, ranges):\n    \n    idx_types = rng_def.get('kds_esIndexType', []).copy()\n    if not idx_types:\n        nested = False\n        for rng in ranges:\n            if range_is_obj(rng, __MODULE__.rdfclass):\n                nested = True\n        if nested:\n            idx_types.append('es_Nested')\n    return idx_types", "docstring": "Returns the elasticsearch index types for the obj\n\nargs:\nrng_def: the range defintion dictionay\nranges: rdfproperty ranges", "source": "juraj-google-style"}
{"code": "def __generate_reference__(self, triple_map, **kwargs):\n        \n        raw_value = self.source.get(str(triple_map.reference))\n        if raw_value is None or len(raw_value) < 1:\n            return\n        if hasattr(triple_map, \"datatype\"):\n            if triple_map.datatype == NS_MGR.xsd.anyURI.rdflib:\n                output = rdflib.URIRef(raw_value)\n            else:\n                output = rdflib.Literal(\n                    raw_value,\n                    datatype=triple_map.datatype)\n        else:\n            output = rdflib.Literal(raw_value)\n        return output", "docstring": "Generates a RDF entity based on triple map\n\nArgs:\ntriple_map(SimpleNamespace): Triple Map", "source": "juraj-google-style"}
{"code": "def check_tx(self, raw_transaction):\n    self.abort_if_abci_chain_is_not_synced()\n    logger.debug('check_tx: %s', raw_transaction)\n    transaction = decode_transaction(raw_transaction)\n    if self.bigchaindb.is_valid_transaction(transaction):\n        logger.debug('check_tx: VALID')\n        return ResponseCheckTx(code=CodeTypeOk)\n    else:\n        logger.debug('check_tx: INVALID')\n        return ResponseCheckTx(code=CodeTypeError)", "docstring": "Validate the transaction before entry into\nthe mempool.\n\nArgs:\nraw_tx: a raw string (in bytes) transaction.", "source": "codesearchnet"}
{"code": "def spliceext(filepath, s):\n    \n    root, ext = os.path.splitext(safepath(filepath))\n    return root + s + ext", "docstring": "Add s into filepath before the extension\n\nArgs:\nfilepath (str, path): file path\ns (str): string to splice\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def create_unit(self, name, unit):\n    self._single_request('Units.Set', unitName=name, body={'desiredState': unit.desiredState, 'options': unit.options})\n    return self.get_unit(name)", "docstring": "Create a new Unit in the cluster\n\nCreate and modify Unit entities to communicate to fleet the desired state of the cluster.\nThis simply declares what should be happening; the backend system still has to react to\nthe changes in this desired state. The actual state of the system is communicated with\nUnitState entities.\n\n\nArgs:\nname (str): The name of the unit to create\nunit (Unit): The unit to submit to fleet\n\nReturns:\nUnit: The unit that was created\n\nRaises:\nfleet.v1.errors.APIError: Fleet returned a response code >= 400", "source": "codesearchnet"}
{"code": "def remove(self, future):\n    if self._loop.get_debug():\n        logger.debug('Removing %s from the linked list.', future)\n    if (future.prev is None):\n        assert (future is self.head)\n        self.head = future.next\n        if (self.head is None):\n            self.tail = None\n            if (not self.cancelled()):\n                self.set_result(None)\n        else:\n            self.head.prev = None\n    elif (future.next is None):\n        assert (future is self.tail)\n        self.tail = future.prev\n        if (self.tail is None):\n            self.head = None\n            if (not self.cancelled()):\n                self.set_result(None)\n        else:\n            self.tail.prev = None", "docstring": "Remove an object from the linked list.\n\nArgs:\nfuture (PlasmaObjectFuture): A PlasmaObjectFuture instance.", "source": "codesearchnet"}
{"code": "def UnlockScanNode(self, path_spec):\n    if (not self.HasScanNode(path_spec)):\n        raise KeyError('Scan node does not exist.')\n    if (path_spec not in self._locked_scan_nodes):\n        raise KeyError('Scan node is not locked.')\n    del self._locked_scan_nodes[path_spec]\n    self._scan_nodes[path_spec].scanned = False", "docstring": "Marks a scan node as unlocked.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nRaises:\nKeyError: if the scan node does not exists or is not locked.", "source": "codesearchnet"}
{"code": "def user_avatar_url(username, size=64, default=\"retro\"):\n    \n    openid = \"http:\n    return libravatar_url(openid=openid, size=size, default=default)", "docstring": "Get the avatar URL of the provided Fedora username.\n\nThe URL is returned from the Libravatar service.\n\nArgs:\nusername (str): The username to get the avatar of.\nsize (int): Size of the avatar in pixels (it's a square).\ndefault (str): Default avatar to return if not found.\nReturns:\nstr: The URL to the avatar image.", "source": "juraj-google-style"}
{"code": "def __lt__(self, other):\n    other = as_dimension(other)\n    if self._value is None or other.value is None:\n        return None\n    else:\n        return self._value < other.value", "docstring": "Returns True if `self` is known to be less than `other`.\n\nDimensions are compared as follows:\n\n```python\n(tf.compat.v1.Dimension(m)    < tf.compat.v1.Dimension(n))    == (m < n)\n(tf.compat.v1.Dimension(m)    < tf.compat.v1.Dimension(None)) == None\n(tf.compat.v1.Dimension(None) < tf.compat.v1.Dimension(n))    == None\n(tf.compat.v1.Dimension(None) < tf.compat.v1.Dimension(None)) == None\n```\n\nArgs:\nother: Another Dimension.\n\nReturns:\nThe value of `self.value < other.value` if both are known, otherwise\nNone.", "source": "github-repos"}
{"code": "class LabelSmoother:\n    epsilon: float = 0.1\n    ignore_index: int = -100\n\n    def __call__(self, model_output, labels, shift_labels=False):\n        logits = model_output['logits'] if isinstance(model_output, dict) else model_output[0]\n        if shift_labels:\n            logits = logits[..., :-1, :].contiguous()\n            labels = labels[..., 1:].contiguous()\n        log_probs = -nn.functional.log_softmax(logits, dim=-1)\n        if labels.dim() == log_probs.dim() - 1:\n            labels = labels.unsqueeze(-1)\n        padding_mask = labels.eq(self.ignore_index)\n        labels = torch.clamp(labels, min=0)\n        nll_loss = log_probs.gather(dim=-1, index=labels)\n        smoothed_loss = log_probs.sum(dim=-1, keepdim=True, dtype=torch.float32)\n        nll_loss.masked_fill_(padding_mask, 0.0)\n        smoothed_loss.masked_fill_(padding_mask, 0.0)\n        num_active_elements = padding_mask.numel() - padding_mask.long().sum()\n        nll_loss = nll_loss.sum() / num_active_elements\n        smoothed_loss = smoothed_loss.sum() / (num_active_elements * log_probs.shape[-1])\n        return (1 - self.epsilon) * nll_loss + self.epsilon * smoothed_loss", "docstring": "Adds label-smoothing on a pre-computed output from a Transformers model.\n\nArgs:\nepsilon (`float`, *optional*, defaults to 0.1):\nThe label smoothing factor.\nignore_index (`int`, *optional*, defaults to -100):\nThe index in the labels to ignore when computing the loss.", "source": "github-repos"}
{"code": "def url(self, suffix=\"\"):\n        \n        return super(neuroRemote,\n                     self).url('{}/'.format(self._ext) + suffix)", "docstring": "Return a constructed URL, appending an optional suffix (uri path).\n\nArguments:\nsuffix (str : \"\"): The suffix to append to the end of the URL\n\nReturns:\nstr: The complete URL", "source": "juraj-google-style"}
{"code": "def shape(self):\n    return self._shape", "docstring": "The statically known shape of the RaggedTensor.\n\nExamples:\n\n>>> rt = tf.ragged.constant([[0], [1, 2]])\n>>> tf.type_spec_from_value(rt).shape\nTensorShape([2, None])\n\n>>> rt = tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1)\n>>> tf.type_spec_from_value(rt).shape\nTensorShape([2, None, 2])\n\nReturns:\nA `tf.TensorShape` containing the statically known shape of the\nRaggedTensor. Ragged dimensions have a size of `None`.", "source": "github-repos"}
{"code": "def disconnect_container_from_network(self, container, net_id,\n                                          force=False):\n        \n        data = {\"Container\": container}\n        if force:\n            if version_lt(self._version, '1.22'):\n                raise InvalidVersion(\n                    'Forced disconnect was introduced in API 1.22'\n                )\n            data['Force'] = force\n        url = self._url(\"/networks/{0}/disconnect\", net_id)\n        res = self._post_json(url, data=data)\n        self._raise_for_status(res)", "docstring": "Disconnect a container from a network.\n\nArgs:\ncontainer (str): container ID or name to be disconnected from the\nnetwork\nnet_id (str): network ID\nforce (bool): Force the container to disconnect from a network.\nDefault: ``False``", "source": "juraj-google-style"}
{"code": "def py_hash(key, num_buckets):\n    \n    b, j = -1, 0\n\n    if num_buckets < 1:\n        raise ValueError('num_buckets must be a positive number')\n\n    while j < num_buckets:\n        b = int(j)\n        key = ((key * long(2862933555777941757)) + 1) & 0xffffffffffffffff\n        j = float(b + 1) * (float(1 << 31) / float((key >> 33) + 1))\n\n    return int(b)", "docstring": "Generate a number in the range [0, num_buckets).\n\nArgs:\nkey (int): The key to hash.\nnum_buckets (int): Number of buckets to use.\n\nReturns:\nThe bucket number `key` computes to.\n\nRaises:\nValueError: If `num_buckets` is not a positive number.", "source": "juraj-google-style"}
{"code": "def ParseLeakFilesTable(\n      self, parser_mediator, database=None, table=None, **unused_kwargs):\n    \n    if database is None:\n      raise ValueError('Missing database value.')\n\n    if table is None:\n      raise ValueError('Missing table value.')\n\n    for esedb_record in table.records:\n      if parser_mediator.abort:\n        break\n\n      record_values = self._GetRecordValues(\n          parser_mediator, table.name, esedb_record)\n\n      event_data = MsieWebCacheLeakFilesEventData()\n      event_data.cached_filename = record_values.get('Filename', None)\n      event_data.leak_identifier = record_values.get('LeakId', None)\n\n      timestamp = record_values.get('CreationTime', None)\n      if timestamp:\n        date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_CREATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses the LeakFiles table.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ndatabase (Optional[pyesedb.file]): ESE database.\ntable (Optional[pyesedb.table]): table.\n\nRaises:\nValueError: if the database or table value is missing.", "source": "juraj-google-style"}
{"code": "def print_headers(head, outfile=None, silent=False):\n    \n    for header_line in head.print_header():\n        \n        if outfile:\n            outfile.write(header_line+'\\n')\n        else:\n            if not silent:\n                print(header_line)\n    return", "docstring": "Print the vcf headers.\n\nIf a result file is provided headers will be printed here, otherwise\nthey are printed to stdout.\n\nArgs:\nhead (HeaderParser): A vcf header object\noutfile (FileHandle): A file handle\nsilent (Bool): If nothing should be printed.", "source": "juraj-google-style"}
{"code": "def entityLabel(rdfGraph, anEntity, language=DEFAULT_LANGUAGE, getall=True):\n    \n\n    if getall:\n        temp = []\n        for o in rdfGraph.objects(anEntity, RDFS.label):\n            temp += [o]\n        return temp\n    else:\n        for o in rdfGraph.objects(anEntity, RDFS.label):\n            if getattr(o, 'language') and getattr(o, 'language') == language:\n                return o\n        return \"\"", "docstring": "Returns the rdfs.label value of an entity (class or property), if existing.\nDefaults to DEFAULT_LANGUAGE. Returns the RDF.Literal resource\n\nArgs:\nlanguage: 'en', 'it' etc..\ngetall: returns a list of all labels rather than a string", "source": "juraj-google-style"}
{"code": "def update(self, node_spec):\n    return self.client.api.update_node(self.id, self.version, node_spec)", "docstring": "Update the node's configuration.\n\nArgs:\nnode_spec (dict): Configuration settings to update. Any values\nnot provided will be removed. Default: ``None``\n\nReturns:\n`True` if the request went through.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.\n\nExample:\n\n>>> node_spec = {'Availability': 'active',\n'Name': 'node-name',\n'Role': 'manager',\n'Labels': {'foo': 'bar'}\n}\n>>> node.update(node_spec)", "source": "codesearchnet"}
{"code": "def get_tz(tz) -> str:\n    \n    from xbbg.const import exch_info\n\n    if tz is None: return DEFAULT_TZ\n\n    to_tz = tz\n    if isinstance(tz, str):\n        if hasattr(TimeZone, tz):\n            to_tz = getattr(TimeZone, tz)\n        else:\n            exch = exch_info(ticker=tz)\n            if 'tz' in exch.index:\n                to_tz = exch.tz\n\n    return to_tz", "docstring": "Convert tz from ticker / shorthands to timezone\n\nArgs:\ntz: ticker or timezone shorthands\n\nReturns:\nstr: Python timzone\n\nExamples:\n>>> get_tz('NY')\n'America/New_York'\n>>> get_tz(TimeZone.NY)\n'America/New_York'\n>>> get_tz('BHP AU Equity')\n'Australia/Sydney'", "source": "juraj-google-style"}
{"code": "def normalize_list_like_lines(generation):\n    lines = generation.split('\\n')\n    output_lines = []\n    for line_no, line in enumerate(lines):\n        match = re.search('. ([-*]) ', line)\n        if not match or line[0] not in ('-', '*'):\n            output_lines.append(line)\n            continue\n        delim = match.group(1) + ' '\n        splits = line.split(delim)[1:]\n        replacement = ''\n        delim1 = line[0] + ' '\n        for i, item in enumerate(splits):\n            level = 0\n            potential_numeral, _, rest = item.strip().partition(' ')\n            if not rest:\n                continue\n            if re.match('^[\\\\dixv]+((?:\\\\.[\\\\dixv])?)+$', potential_numeral, flags=re.I | re.M):\n                level = potential_numeral.count('.')\n            replacement += ('\\n' if i > 0 else '') + '\\t' * level + (delim if i > 0 or line_no == 0 else delim1) + item.strip()\n        if line_no == len(lines) - 1:\n            replacement += '\\n'\n        output_lines.append(replacement)\n    return '\\n'.join(output_lines)", "docstring": "Normalize lines in the given text that resemble list items. The function looks for lines that start optionally with\n'-' or '*', possibly followed by Roman numerals or digits indicating nesting levels. The function reformats such\nlines to make them more structured.\n\nArgs:\ngeneration (str): The input text containing lines that need to be normalized.\n\nReturns:\nstr: The input text with the list-like lines normalized.\n\nNote:\nThe function uses regular expressions to identify and reformat the list-like lines. The patterns capture\noptional bullet points, nesting levels indicated by numerals, and the actual list item content. The\nnormalization adjusts the bullet point style and nesting levels based on the captured patterns.", "source": "github-repos"}
{"code": "def find_bind_module(name, verbose=False):\n    \n    bindnames = get_bind_modules(verbose=verbose)\n    bindfile = bindnames.get(name)\n\n    if bindfile:\n        return bindfile\n\n    if not verbose:\n        return None\n\n    \n    fuzzy_matches = get_close_pkgs(name, bindnames.keys())\n\n    if fuzzy_matches:\n        rows = [(x[0], bindnames[x[0]]) for x in fuzzy_matches]\n        print \"'%s' not found. Close matches:\" % name\n        print '\\n'.join(columnise(rows))\n    else:\n        print \"No matches.\"\n\n    return None", "docstring": "Find the bind module matching the given name.\n\nArgs:\nname (str): Name of package to find bind module for.\nverbose (bool): If True, print extra output.\n\nReturns:\nstr: Filepath to bind module .py file, or None if not found.", "source": "juraj-google-style"}
{"code": "def forward(self, hidden: torch.Tensor):\n    if self.mode == 'mix_channel':\n        hidden = self.channel_feature_mixer(hidden)\n    hidden = self.patch_mixer(hidden)\n    hidden = self.feature_mixer(hidden)\n    return hidden", "docstring": "Args:\nhidden (`torch.Tensor` of shape `(batch_size, num_patches, d_model)`):\nInput tensor to the layer.\n\nReturns:\n`torch.Tensor`: Transformed tensor.", "source": "github-repos"}
{"code": "def connected_emulators(self, host=enums.JLinkHost.USB):\n    res = self._dll.JLINKARM_EMU_GetList(host, 0, 0)\n    if (res < 0):\n        raise errors.JLinkException(res)\n    num_devices = res\n    info = (structs.JLinkConnectInfo * num_devices)()\n    num_found = self._dll.JLINKARM_EMU_GetList(host, info, num_devices)\n    if (num_found < 0):\n        raise errors.JLinkException(num_found)\n    return list(info)[:num_found]", "docstring": "Returns a list of all the connected emulators.\n\nArgs:\nself (JLink): the ``JLink`` instance\nhost (int): host type to search (default: ``JLinkHost.USB``)\n\nReturns:\nList of ``JLinkConnectInfo`` specifying the connected emulators.\n\nRaises:\nJLinkException: if fails to enumerate devices.", "source": "codesearchnet"}
{"code": "def get_config_parameter_boolean(config: ConfigParser, section: str, param: str, default: bool) -> bool:\n    try:\n        value = config.getboolean(section, param)\n    except (TypeError, ValueError, NoOptionError):\n        log.warning('Configuration variable {} not found or improper in section [{}]; using default of {!r}', param, section, default)\n        value = default\n    return value", "docstring": "Get Boolean parameter from ``configparser`` ``.INI`` file.\n\nArgs:\nconfig: :class:`ConfigParser` object\nsection: section name within config file\nparam: name of parameter within section\ndefault: default value\nReturns:\nparameter value, or default", "source": "codesearchnet"}
{"code": "def create_transformation(self, rotation=None, translation=None):\n    mat = None\n    if (rotation is not None):\n        mat = Matrix44.from_eulers(Vector3(rotation))\n    if (translation is not None):\n        trans = matrix44.create_from_translation(Vector3(translation))\n        if (mat is None):\n            mat = trans\n        else:\n            mat = matrix44.multiply(mat, trans)\n    return mat", "docstring": "Creates a transformation matrix woth rotations and translation.\n\nArgs:\nrotation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3`\ntranslation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3`\n\nReturns:\nA 4x4 matrix as a :py:class:`numpy.array`", "source": "codesearchnet"}
{"code": "def get_name(cls):\n    global _registry_loaded\n    if (not _registry_loaded):\n        load_message_classes()\n    try:\n        return _class_to_schema_name[cls]\n    except KeyError:\n        raise TypeError('The class {} is not in the message registry, which indicates it is not in the current list of entry points for \"fedora_messaging\". Please check that the class has been added to your package\\'s entry points.'.format(repr(cls)))", "docstring": "Retrieve the schema name associated with a message class.\n\nReturns:\nstr: The schema name.\n\nRaises:\nTypeError: If the message class isn't registered. Check your entry point\nfor correctness.", "source": "codesearchnet"}
{"code": "def GetNotificationsForAllShards(self, queue):\n    \n    notifications_by_session_id = {}\n    for queue_shard in self.GetAllNotificationShards(queue):\n      self._GetUnsortedNotifications(\n          queue_shard, notifications_by_session_id=notifications_by_session_id)\n\n    return notifications_by_session_id.values()", "docstring": "Returns notifications for all shards of a queue at once.\n\nUsed by worker_test_lib.MockWorker to cover all shards with a single worker.\n\nArgs:\nqueue: usually rdfvalue.RDFURN(\"aff4:/W\")\n\nReturns:\nList of rdf_flows.GrrNotification objects", "source": "juraj-google-style"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, vision_feature_layers: Optional[Union[int, List[int]]]=None):\n    vision_feature_layers = vision_feature_layers if vision_feature_layers is not None else self.config.vision_feature_layers\n    image_outputs = self.vision_tower(pixel_values, output_hidden_states=True)\n    if isinstance(vision_feature_layers, int):\n        image_features = image_outputs.hidden_states[vision_feature_layers][:, 1:]\n    else:\n        image_features = [image_outputs.hidden_states[index][:, 1:] for index in vision_feature_layers]\n        image_features = torch.cat(image_features, dim=-1)\n    image_features = self.multi_modal_projector(image_features)\n    return image_features", "docstring": "Obtains image last hidden states from the vision tower and apply multimodal projection.\n\nArgs:\npixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)\nThe tensors corresponding to the input images.\nvision_feature_layers (`Union[int, List[int]]`):\nThe vision feature layer, or the list of indexes of the layers to select\nthe vision feature.\nReturns:\nimage_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).", "source": "github-repos"}
{"code": "def get_reverse_dns(ip_address, cache=None, nameservers=None, timeout=2.0):\n    hostname = None\n    try:\n        address = dns.reversename.from_address(ip_address)\n        hostname = query_dns(address, 'PTR', cache=cache, nameservers=nameservers, timeout=timeout)[0]\n    except dns.exception.DNSException:\n        pass\n    return hostname", "docstring": "Resolves an IP address to a hostname using a reverse DNS query\n\nArgs:\nip_address (str): The IP address to resolve\ncache (ExpiringDict): Cache storage\nnameservers (list): A list of one or more nameservers to use\n(Cloudflare's public DNS resolvers by default)\ntimeout (float): Sets the DNS query timeout in seconds\n\nReturns:\nstr: The reverse DNS hostname (if any)", "source": "codesearchnet"}
{"code": "def _aggregation_op(cls, op: Callable[([tf.Tensor, Optional[Sequence[int]]], tf.Tensor)], x: 'TensorFluent', vars_list: List[str]) -> 'TensorFluent':\n    axis = cls._varslist2axis(x, vars_list)\n    t = op(x.tensor, axis)\n    scope = []\n    for var in x.scope.as_list():\n        if (var not in vars_list):\n            scope.append(var)\n    batch = x.batch\n    return TensorFluent(t, scope, batch=batch)", "docstring": "Returns a TensorFluent for the aggregation `op` applied to fluent `x`.\n\nArgs:\nop: The aggregation operation.\nx: The input fluent.\nvars_list: The list of variables to be aggregated over.\n\nReturns:\nA TensorFluent wrapping the aggregation operator's output.", "source": "codesearchnet"}
{"code": "def longest_existing_path(_path):\n    existing_path = _path\n    while True:\n        _path_new = os.path.dirname(existing_path)\n        if exists(_path_new):\n            existing_path = _path_new\n            break\n        if (_path_new == existing_path):\n            print('!!! [utool] This is a very illformated path indeed.')\n            existing_path = ''\n            break\n        existing_path = _path_new\n    return existing_path", "docstring": "r\"\"\"\nReturns the longest root of _path that exists\n\nArgs:\n_path (str):  path string\n\nReturns:\nstr: _path -  path string\n\nCommandLine:\npython -m utool.util_path --exec-longest_existing_path\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_path import *  # NOQA\n>>> import utool as ut\n>>> target = dirname(ut.__file__)\n>>> _path = join(target, 'nonexist/foobar')\n>>> existing_path = longest_existing_path(_path)\n>>> result = ('existing_path = %s' % (str(existing_path),))\n>>> print(result)\n>>> assert existing_path == target", "source": "codesearchnet"}
{"code": "def from_bulk_and_miller(cls, structure, miller_index, min_slab_size=8.0, min_vacuum_size=10.0, max_normal_search=None, center_slab=True, selective_dynamics=False, undercoord_threshold=0.09):\n    vnn_bulk = VoronoiNN(tol=0.05)\n    bulk_coords = [len(vnn_bulk.get_nn(structure, n)) for n in range(len(structure))]\n    struct = structure.copy(site_properties={'bulk_coordinations': bulk_coords})\n    slabs = generate_all_slabs(struct, max_index=max(miller_index), min_slab_size=min_slab_size, min_vacuum_size=min_vacuum_size, max_normal_search=max_normal_search, center_slab=center_slab)\n    slab_dict = {slab.miller_index: slab for slab in slabs}\n    if (miller_index not in slab_dict):\n        raise ValueError('Miller index not in slab dict')\n    this_slab = slab_dict[miller_index]\n    vnn_surface = VoronoiNN(tol=0.05, allow_pathological=True)\n    (surf_props, undercoords) = ([], [])\n    this_mi_vec = get_mi_vec(this_slab)\n    mi_mags = [np.dot(this_mi_vec, site.coords) for site in this_slab]\n    average_mi_mag = np.average(mi_mags)\n    for (n, site) in enumerate(this_slab):\n        bulk_coord = this_slab.site_properties['bulk_coordinations'][n]\n        slab_coord = len(vnn_surface.get_nn(this_slab, n))\n        mi_mag = np.dot(this_mi_vec, site.coords)\n        undercoord = ((bulk_coord - slab_coord) / bulk_coord)\n        undercoords += [undercoord]\n        if ((undercoord > undercoord_threshold) and (mi_mag > average_mi_mag)):\n            surf_props += ['surface']\n        else:\n            surf_props += ['subsurface']\n    new_site_properties = {'surface_properties': surf_props, 'undercoords': undercoords}\n    new_slab = this_slab.copy(site_properties=new_site_properties)\n    return cls(new_slab, selective_dynamics)", "docstring": "This method constructs the adsorbate site finder from a bulk\nstructure and a miller index, which allows the surface sites\nto be determined from the difference in bulk and slab coordination,\nas opposed to the height threshold.\n\nArgs:\nstructure (Structure): structure from which slab\ninput to the ASF is constructed\nmiller_index (3-tuple or list): miller index to be used\nmin_slab_size (float): min slab size for slab generation\nmin_vacuum_size (float): min vacuum size for slab generation\nmax_normal_search (int): max normal search for slab generation\ncenter_slab (bool): whether to center slab in slab generation\nselective dynamics (bool): whether to assign surface sites\nto selective dynamics\nundercoord_threshold (float): threshold of \"undercoordation\"\nto use for the assignment of surface sites.  Default is\n0.1, for which surface sites will be designated if they\nare 10% less coordinated than their bulk counterpart", "source": "codesearchnet"}
{"code": "def byte_swap_tensor_content(tensor, from_endiness, to_endiness):\n    if tensor.dtype in byte_swappable:\n        tshape = tensor.tensor_shape.dim\n        tensor_bytes = tensor.tensor_content\n        if tensor_bytes:\n            tensor_size = 1\n            for sz in tshape:\n                if sz.size != 0:\n                    tensor_size *= sz.size\n            chunksize = len(tensor_bytes) \n            to_swap = [tensor_bytes[i:i + chunksize] for i in range(0, len(tensor_bytes), chunksize)]\n            tensor.tensor_content = b''.join([int.from_bytes(byteswap, from_endiness).to_bytes(chunksize, to_endiness) for byteswap in to_swap])", "docstring": "Byte swaps.\n\nArgs:\ntensor: Target tensor to change endiness.\nfrom_endiness: The original endianness format. \"big\" or \"little\"\nto_endiness: The target endianness format. \"big\" or \"little\"", "source": "github-repos"}
{"code": "def slice(filename, number_tiles=None, col=None, row=None, save=True):\n    \n    im = Image.open(filename)\n    im_w, im_h = im.size\n\n    columns = 0\n    rows = 0\n    if not number_tiles is None:\n        validate_image(im, number_tiles)\n        columns, rows = calc_columns_rows(number_tiles)\n        extras = (columns * rows) - number_tiles\n    else:\n        validate_image_col_row(im, col, row)\n        columns = col\n        rows = row\n        extras = (columns * rows) - number_tiles\n\n\n    tile_w, tile_h = int(floor(im_w / columns)), int(floor(im_h / rows))\n\n    tiles = []\n    number = 1\n    for pos_y in range(0, im_h - rows, tile_h): \n        for pos_x in range(0, im_w - columns, tile_w): \n            area = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h)\n            image = im.crop(area)\n            position = (int(floor(pos_x / tile_w)) + 1,\n                        int(floor(pos_y / tile_h)) + 1)\n            coords = (pos_x, pos_y)\n            tile = Tile(image, number, position, coords)\n            tiles.append(tile)\n            number += 1\n    if save:\n        save_tiles(tiles,\n                   prefix=get_basename(filename),\n                   directory=os.path.dirname(filename))\n    return tuple(tiles)", "docstring": "Split an image into a specified number of tiles.\n\nArgs:\nfilename (str):  The filename of the image to split.\nnumber_tiles (int):  The number of tiles required.\n\nKwargs:\nsave (bool): Whether or not to save tiles to disk.\n\nReturns:\nTuple of :class:`Tile` instances.", "source": "juraj-google-style"}
{"code": "def inflate_plugins(self, plugins_definition, inflate_method):\n        \n        if isinstance(plugins_definition, list):\n            return self.inflate_plugin_list(plugins_definition, inflate_method)\n        elif isinstance(plugins_definition, dict):\n            return self.inflate_plugin_dict(plugins_definition, inflate_method)\n        else:\n            raise ValueError('%s type is not supported for a plugin list, '\n                             'use list or dict' % type(plugins_definition))", "docstring": "Inflate multiple plugins based on a list/dict definition.\n\nArgs:\nplugins_definition (list/dict): the plugins definitions.\ninflate_method (method): the method to indlate each plugin.\n\nReturns:\nlist: a list of plugin instances.\n\nRaises:\nValueError: when the definition type is not list or dict.", "source": "juraj-google-style"}
{"code": "def read(cls, data):\n        \n        if isinstance(data, pd.DataFrame):\n            output = OrderedDict({})\n            output['version'] = '2.0'\n            output['class'] = 'dimension'\n            [label] = [x for x in list(data.columns.values) if\n                       x not in ['id', 'index']]\n            output['label'] = label\n            output['category'] = OrderedDict({})\n            output['category']['index'] = data.id.tolist()\n            output['category']['label'] = OrderedDict(\n                zip(data.id.values, data[label].values))\n            return cls(output)\n        elif isinstance(data, OrderedDict):\n            return cls(data)\n        elif isinstance(data, basestring) and data.startswith((\"http:\n                                                               \"https:\n                                                               \"ftp:\n                                                               \"ftps:\n            return cls(request(data))\n        elif isinstance(data,basestring):\n            try:\n                json_dict = json.loads(data, object_pairs_hook=OrderedDict)\n                return cls(json_dict)\n            except ValueError:\n                raise\n        else:\n            try:\n                json_dict = json.load(data, object_pairs_hook=OrderedDict)\n                return cls(json_dict)\n            except ValueError:\n                raise", "docstring": "Reads data from URL, Dataframe, JSON string, JSON file\nor OrderedDict.\nArgs:\ndata: can be a Pandas Dataframe, a JSON string, a JSON file,\nan OrderedDict or a URL pointing to a JSONstat file.\n\nReturns:\nAn object of class Dimension populated with data.", "source": "juraj-google-style"}
{"code": "def _get_new_finished_state(self, state, new_seq, new_log_probs):\n    i = state[_StateKeys.CUR_INDEX]\n    finished_seq = state[_StateKeys.FINISHED_SEQ]\n    finished_scores = state[_StateKeys.FINISHED_SCORES]\n    finished_flags = state[_StateKeys.FINISHED_FLAGS]\n    finished_seq = tf.concat([finished_seq, tf.zeros([self.batch_size, self.beam_size, 1], tf.int32)], axis=2)\n    length_norm = _length_normalization(self.alpha, (i + 1))\n    new_scores = (new_log_probs / length_norm)\n    new_finished_flags = tf.equal(new_seq[(:, :, (- 1))], self.eos_id)\n    new_scores += ((1.0 - tf.to_float(new_finished_flags)) * (- INF))\n    finished_seq = tf.concat([finished_seq, new_seq], axis=1)\n    finished_scores = tf.concat([finished_scores, new_scores], axis=1)\n    finished_flags = tf.concat([finished_flags, new_finished_flags], axis=1)\n    (top_finished_seq, top_finished_scores, top_finished_flags) = _gather_topk_beams([finished_seq, finished_scores, finished_flags], finished_scores, self.batch_size, self.beam_size)\n    return {_StateKeys.FINISHED_SEQ: top_finished_seq, _StateKeys.FINISHED_SCORES: top_finished_scores, _StateKeys.FINISHED_FLAGS: top_finished_flags}", "docstring": "Combine new and old finished sequences, and gather the top k sequences.\n\nArgs:\nstate: A dictionary with the current loop state.\nnew_seq: New sequences generated by growing the current alive sequences\nint32 tensor with shape [batch_size, beam_size, i + 1]\nnew_log_probs: Log probabilities of new sequences\nfloat32 tensor with shape [batch_size, beam_size]\n\nReturns:\nDictionary with finished keys from _StateKeys:\n{Top beam_size finished sequences based on score,\nScores of finished sequences,\nFinished flags of finished sequences}", "source": "codesearchnet"}
{"code": "def load_and_use(path):\n    example_cond, example_a, example_b = _get_example_tensors()\n    restored = tf.saved_model.load(path)\n    return restored.use_multiplex(example_cond, example_a, example_b)", "docstring": "Load and used a model that was previously created by `save()`.\n\nArgs:\npath: Directory to load model from, typically the same directory that was\nused by save().\n\nReturns:\nA tensor that is the result of using the multiplex op that is\ntf.constant([1, 20, 3, 40, 5], dtype=tf.int64).", "source": "github-repos"}
{"code": "def rename_document(self, did, name):\n    payload = {'name': name}\n    return self._api.request('post', ('/api/documents/' + did), body=payload)", "docstring": "Renames the specified document.\n\nArgs:\n- did (str): Document ID\n- name (str): New document name\n\nReturns:\n- requests.Response: Onshape response data", "source": "codesearchnet"}
{"code": "def is_link(path):\n    \n    if sys.getwindowsversion().major < 6:\n        raise SaltInvocationError('Symlinks are only supported on Windows Vista or later.')\n\n    try:\n        return salt.utils.path.islink(path)\n    except Exception as exc:\n        raise CommandExecutionError(exc)", "docstring": "Check if the path is a symlink\n\nThis is only supported on Windows Vista or later.\n\nInline with Unix behavior, this function will raise an error if the path\nis not a symlink, however, the error raised will be a SaltInvocationError,\nnot an OSError.\n\nArgs:\npath (str): The path to a file or directory\n\nReturns:\nbool: True if path is a symlink, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' file.is_link /path/to/link", "source": "juraj-google-style"}
{"code": "def _identify_eds_ing(first, second):\n        \n        A = set([first.L, first.R])\n        A.update(first.D)\n\n        B = set([second.L, second.R])\n        B.update(second.D)\n\n        depend_set = A & B\n        left, right = sorted(list(A ^ B))\n\n        return left, right, depend_set", "docstring": "Find nodes connecting adjacent edges.\n\nArgs:\nfirst(Edge): Edge object representing the first edge.\nsecond(Edge): Edge object representing the second edge.\n\nReturns:\ntuple[int, int, set[int]]: The first two values represent left and right node\nindicies of the new edge. The third value is the new dependence set.", "source": "juraj-google-style"}
{"code": "def wait_for_js(function):\n\n    @functools.wraps(function)\n    def wrapper(*args, **kwargs):\n        if (len(args) < 1):\n            return function(*args, **kwargs)\n        else:\n            self = args[0]\n            if hasattr(self, 'wait_for_js'):\n                self.wait_for_js()\n            return function(*args, **kwargs)\n    return wrapper", "docstring": "Method decorator that waits for JavaScript dependencies before executing `function`.\nIf the function is not a method, the decorator has no effect.\n\nArgs:\nfunction (callable): Method to decorate.\n\nReturns:\nDecorated method", "source": "codesearchnet"}
{"code": "def _calc_block_mean_variance(image, mask, blocksize):\n    I = image.copy()\n    I_f = (I.astype(np.float32) / 255.0)\n    result = np.zeros(((image.shape[0] / blocksize), (image.shape[1] / blocksize)), dtype=np.float32)\n    for i in xrange(0, (image.shape[0] - blocksize), blocksize):\n        for j in xrange(0, (image.shape[1] - blocksize), blocksize):\n            patch = I_f[(i:((i + blocksize) + 1), j:((j + blocksize) + 1))]\n            mask_patch = mask[(i:((i + blocksize) + 1), j:((j + blocksize) + 1))]\n            tmp1 = np.zeros((blocksize, blocksize))\n            tmp2 = np.zeros((blocksize, blocksize))\n            (mean, std_dev) = cv2.meanStdDev(patch, tmp1, tmp2, mask_patch)\n            value = 0\n            if (std_dev[0][0] > MEAN_VARIANCE_THRESHOLD):\n                value = mean[0][0]\n            result[((i / blocksize), (j / blocksize))] = value\n    small_image = cv2.resize(I, ((image.shape[1] / blocksize), (image.shape[0] / blocksize)))\n    (res, inpaintmask) = cv2.threshold(result, 0.02, 1, cv2.THRESH_BINARY)\n    inpainted = cv2.inpaint(small_image, inpaintmask.astype(np.uint8), 5, cv2.INPAINT_TELEA)\n    res = cv2.resize(inpainted, (image.shape[1], image.shape[0]))\n    return res", "docstring": "Adaptively determines image background.\n\nArgs:\nimage: image converted 1-channel image.\nmask: 1-channel mask, same size as image.\nblocksize: adaptive algorithm parameter.\n\nReturns:\nimage of same size as input with foreground inpainted with background.", "source": "codesearchnet"}
{"code": "def train(self, debug=True, force=False, single_thread=False, timeout=20):\n    if ((not self.must_train) and (not force)):\n        return\n    self.padaos.compile()\n    self.train_thread = Thread(target=self._train, kwargs=dict(debug=debug, single_thread=single_thread, timeout=timeout), daemon=True)\n    self.train_thread.start()\n    self.train_thread.join(timeout)\n    self.must_train = False\n    return (not self.train_thread.is_alive())", "docstring": "Trains all the loaded intents that need to be updated\nIf a cache file exists with the same hash as the intent file,\nthe intent will not be trained and just loaded from file\n\nArgs:\ndebug (bool): Whether to print a message to stdout each time a new intent is trained\nforce (bool): Whether to force training if already finished\nsingle_thread (bool): Whether to force running in a single thread\ntimeout (float): Seconds before cancelling training\nReturns:\nbool: True if training succeeded without timeout", "source": "codesearchnet"}
{"code": "def model_fn(features, labels, mode, params, config):\n  \n  del labels, config\n\n  if params[\"analytic_kl\"] and params[\"mixture_components\"] != 1:\n    raise NotImplementedError(\n        \"Using `analytic_kl` is only supported when `mixture_components = 1` \"\n        \"since there's no closed form otherwise.\")\n\n  encoder = make_encoder(params[\"activation\"],\n                         params[\"latent_size\"],\n                         params[\"base_depth\"])\n  decoder = make_decoder(params[\"activation\"],\n                         params[\"latent_size\"],\n                         IMAGE_SHAPE,\n                         params[\"base_depth\"])\n  latent_prior = make_mixture_prior(params[\"latent_size\"],\n                                    params[\"mixture_components\"])\n\n  image_tile_summary(\n      \"input\", tf.cast(features, dtype=tf.float32), rows=1, cols=16)\n\n  approx_posterior = encoder(features)\n  approx_posterior_sample = approx_posterior.sample(params[\"n_samples\"])\n  decoder_likelihood = decoder(approx_posterior_sample)\n  image_tile_summary(\n      \"recon/sample\",\n      tf.cast(decoder_likelihood.sample()[:3, :16], dtype=tf.float32),\n      rows=3,\n      cols=16)\n  image_tile_summary(\n      \"recon/mean\",\n      decoder_likelihood.mean()[:3, :16],\n      rows=3,\n      cols=16)\n\n  \n  distortion = -decoder_likelihood.log_prob(features)\n  avg_distortion = tf.reduce_mean(input_tensor=distortion)\n  tf.compat.v1.summary.scalar(\"distortion\", avg_distortion)\n\n  if params[\"analytic_kl\"]:\n    rate = tfd.kl_divergence(approx_posterior, latent_prior)\n  else:\n    rate = (approx_posterior.log_prob(approx_posterior_sample)\n            - latent_prior.log_prob(approx_posterior_sample))\n  avg_rate = tf.reduce_mean(input_tensor=rate)\n  tf.compat.v1.summary.scalar(\"rate\", avg_rate)\n\n  elbo_local = -(rate + distortion)\n\n  elbo = tf.reduce_mean(input_tensor=elbo_local)\n  loss = -elbo\n  tf.compat.v1.summary.scalar(\"elbo\", elbo)\n\n  importance_weighted_elbo = tf.reduce_mean(\n      input_tensor=tf.reduce_logsumexp(input_tensor=elbo_local, axis=0) -\n      tf.math.log(tf.cast(params[\"n_samples\"], dtype=tf.float32)))\n  tf.compat.v1.summary.scalar(\"elbo/importance_weighted\",\n                              importance_weighted_elbo)\n\n  \n  random_image = decoder(latent_prior.sample(16))\n  image_tile_summary(\n      \"random/sample\",\n      tf.cast(random_image.sample(), dtype=tf.float32),\n      rows=4,\n      cols=4)\n  image_tile_summary(\"random/mean\", random_image.mean(), rows=4, cols=4)\n\n  \n  global_step = tf.compat.v1.train.get_or_create_global_step()\n  learning_rate = tf.compat.v1.train.cosine_decay(\n      params[\"learning_rate\"], global_step, params[\"max_steps\"])\n  tf.compat.v1.summary.scalar(\"learning_rate\", learning_rate)\n  optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate)\n  train_op = optimizer.minimize(loss, global_step=global_step)\n\n  return tf.estimator.EstimatorSpec(\n      mode=mode,\n      loss=loss,\n      train_op=train_op,\n      eval_metric_ops={\n          \"elbo\":\n              tf.compat.v1.metrics.mean(elbo),\n          \"elbo/importance_weighted\":\n              tf.compat.v1.metrics.mean(importance_weighted_elbo),\n          \"rate\":\n              tf.compat.v1.metrics.mean(avg_rate),\n          \"distortion\":\n              tf.compat.v1.metrics.mean(avg_distortion),\n      },\n  )", "docstring": "Builds the model function for use in an estimator.\n\nArguments:\nfeatures: The input features for the estimator.\nlabels: The labels, unused here.\nmode: Signifies whether it is train or test or predict.\nparams: Some hyperparameters as a dictionary.\nconfig: The RunConfig, unused here.\n\nReturns:\nEstimatorSpec: A tf.estimator.EstimatorSpec instance.", "source": "juraj-google-style"}
{"code": "def num_connected_components(self, unitary_only=False):\n        \n        \n        reg_offset = 0\n        reg_map = {}\n\n        if unitary_only:\n            regs = self.qregs\n        else:\n            regs = self.qregs+self.cregs\n\n        for reg in regs:\n            reg_map[reg.name] = reg_offset\n            reg_offset += reg.size\n        \n        sub_graphs = [[bit] for bit in range(reg_offset)]\n\n        num_sub_graphs = len(sub_graphs)\n\n        \n        \n        for instr, qargs, cargs in self.data:\n            if unitary_only:\n                args = qargs\n                num_qargs = len(args)\n            else:\n                args = qargs+cargs\n                num_qargs = len(args) + (1 if instr.control else 0)\n\n            if num_qargs >= 2 and instr.name not in ['barrier', 'snapshot']:\n                graphs_touched = []\n                num_touched = 0\n                \n                \n                if instr.control and not unitary_only:\n                    creg = instr.control[0]\n                    creg_int = reg_map[creg.name]\n                    for coff in range(creg.size):\n                        temp_int = creg_int+coff\n                        for k in range(num_sub_graphs):\n                            if temp_int in sub_graphs[k]:\n                                graphs_touched.append(k)\n                                num_touched += 1\n                                break\n\n                for item in args:\n                    reg_int = reg_map[item[0].name]+item[1]\n                    for k in range(num_sub_graphs):\n                        if reg_int in sub_graphs[k]:\n                            if k not in graphs_touched:\n                                graphs_touched.append(k)\n                                num_touched += 1\n                                break\n\n                \n                \n                \n                if num_touched > 1:\n                    connections = []\n                    for idx in graphs_touched:\n                        connections.extend(sub_graphs[idx])\n                    _sub_graphs = []\n                    for idx in range(num_sub_graphs):\n                        if idx not in graphs_touched:\n                            _sub_graphs.append(sub_graphs[idx])\n                    _sub_graphs.append(connections)\n                    sub_graphs = _sub_graphs\n                    num_sub_graphs -= (num_touched-1)\n            \n            if num_sub_graphs == 1:\n                break\n        return num_sub_graphs", "docstring": "How many non-entangled subcircuits can the circuit be factored to.\n\nArgs:\nunitary_only (bool): Compute only unitary part of graph.\n\nReturns:\nint: Number of connected components in circuit.", "source": "juraj-google-style"}
{"code": "def __init__(self, event_type: str):\n        \n        if not isinstance(event_type, str) or event_type == \"\":\n            raise TypeError(\"Invalid event type: {}\".format(event_type))\n\n        self._event_type: str = event_type\n        self._target: EventDispatcherBase = None", "docstring": "Constructor.\n\nArgs:\nevent_type (str): The type - string identifier - of the event.\nMust not be `None` or empty string.", "source": "juraj-google-style"}
{"code": "def init_c_overturn(step):\n    (rbot, rtop) = misc.get_rbounds(step)\n    xieut = step.sdat.par['tracersin']['fe_eut']\n    k_fe = step.sdat.par['tracersin']['k_fe']\n    xi0l = step.sdat.par['tracersin']['fe_cont']\n    xi0s = (k_fe * xi0l)\n    xired = (xi0l / xieut)\n    rsup = (((rtop ** 3) - ((xired ** (1 / (1 - k_fe))) * ((rtop ** 3) - (rbot ** 3)))) ** (1 / 3))\n\n    def initprof(rpos):\n        'Theoretical initial profile.'\n        if (rpos < rsup):\n            return (xi0s * ((((rtop ** 3) - (rbot ** 3)) / ((rtop ** 3) - (rpos ** 3))) ** (1 - k_fe)))\n        return xieut\n    rad = np.linspace(rbot, rtop, 500)\n    initprof = np.vectorize(initprof)\n    return (initprof(rad), rad)", "docstring": "Initial concentration.\n\nThis compute the resulting composition profile if fractional\ncrystallization of a SMO is assumed.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nReturns:\ntuple of :class:`numpy.array`: the composition and the radial position\nat which it is evaluated.", "source": "codesearchnet"}
{"code": "def get_first_content(el_list, alt=None, strip=True):\n    \n    if not el_list:\n        return alt\n\n    content = el_list[0].getContent()\n\n    if strip:\n        content = content.strip()\n\n    if not content:\n        return alt\n\n    return content", "docstring": "Return content of the first element in `el_list` or `alt`. Also return `alt`\nif the content string of first element is blank.\n\nArgs:\nel_list (list): List of HTMLElement objects.\nalt (default None): Value returner when list or content is blank.\nstrip (bool, default True): Call .strip() to content.\n\nReturns:\nstr or alt: String representation of the content of the first element \\\nor `alt` if not found.", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states):\n    hidden_states = hidden_states.transpose(-1, 1)\n    hidden_states = self.conv1(hidden_states)\n    hidden_states = torch.relu(hidden_states)\n    hidden_states = self.dropout(hidden_states)\n    hidden_states = self.conv2(hidden_states)\n    hidden_states = hidden_states.transpose(-1, 1)\n    return hidden_states", "docstring": "Calculate forward propagation.\n\nArgs:\nhidden_states (torch.Tensor): Batch of input tensors (batch_size, time, input_channels).\n\nReturns:\ntorch.Tensor: Batch of output tensors (batch_size, time, hidden_channels).", "source": "github-repos"}
{"code": "def parse_data_types_and_routes_from_doc_ref(api, doc, namespace_context, ignore_missing_entries=False):\n    assert (doc is not None)\n    data_types = set()\n    routes = defaultdict(set)\n    for match in doc_ref_re.finditer(doc):\n        try:\n            tag = match.group('tag')\n            val = match.group('val')\n            supplied_namespace = api.namespaces[namespace_context]\n            if (tag == 'field'):\n                if ('.' in val):\n                    (type_name, __) = val.split('.', 1)\n                    doc_type = supplied_namespace.data_type_by_name[type_name]\n                    data_types.add(doc_type)\n                else:\n                    pass\n            elif (tag == 'route'):\n                if ('.' in val):\n                    (namespace_name, val) = val.split('.', 1)\n                    namespace = api.namespaces[namespace_name]\n                else:\n                    namespace = supplied_namespace\n                try:\n                    (route_name, version) = parse_route_name_and_version(val)\n                except ValueError as ex:\n                    raise KeyError(str(ex))\n                route = namespace.routes_by_name[route_name].at_version[version]\n                routes[namespace.name].add(route)\n            elif (tag == 'type'):\n                if ('.' in val):\n                    (namespace_name, val) = val.split('.', 1)\n                    doc_type = api.namespaces[namespace_name].data_type_by_name[val]\n                    data_types.add(doc_type)\n                else:\n                    doc_type = supplied_namespace.data_type_by_name[val]\n                    data_types.add(doc_type)\n        except KeyError:\n            if (not ignore_missing_entries):\n                raise\n    return (data_types, routes)", "docstring": "Given a documentation string, parse it and return all references to other\ndata types and routes.\n\nArgs:\n- api: The API containing this doc ref.\n- doc: The documentation string to parse.\n- namespace_context: The namespace name relative to this documentation.\n- ignore_missing_entries: If set, this will skip references to nonexistent data types instead\nof raising an exception.\n\nReturns:\n- a tuple of referenced data types and routes", "source": "codesearchnet"}
{"code": "def get_or_create(self, defaults=None, **kwargs):\n    try:\n        return (self.get(**kwargs), False)\n    except ObjectDoesNotExist:\n        pass\n    data = (defaults or {})\n    data.update(kwargs)\n    return (self._model_class(**data).blocking_save(), True)", "docstring": "Looks up an object with the given kwargs, creating a new one if necessary.\n\nArgs:\ndefaults (dict): Used when we create a new object. Must map to fields\nof the model.\n\\*\\*kwargs: Used both for filtering and new object creation.\n\nReturns:\nA tuple of (object, created), where created is a boolean variable\nspecifies whether the object was newly created or not.\n\nExample:\nIn the following example, *code* and *name* fields are used to query the DB.\n\n.. code-block:: python\n\nobj, is_new = Permission.objects.get_or_create({'description': desc},\ncode=code, name=name)\n\n{description: desc} dict is just for new creations. If we can't find any\nrecords by filtering on *code* and *name*, then we create a new object by\nusing all of the inputs.", "source": "codesearchnet"}
{"code": "def __init__(self, retriever):\n    \n    self._page_token = None\n    self._first_page = True\n    self._retriever = retriever\n    self._count = 0", "docstring": "Initializes an instance of an Iterator.\n\nArgs:\nretriever: a function that can retrieve the next page of items.", "source": "juraj-google-style"}
{"code": "def impad_to_multiple(img, divisor, pad_val=0):\n    \n    pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor\n    pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor\n    return impad(img, (pad_h, pad_w), pad_val)", "docstring": "Pad an image to ensure each edge to be multiple to some number.\n\nArgs:\nimg (ndarray): Image to be padded.\ndivisor (int): Padded image edges will be multiple to divisor.\npad_val (number or sequence): Same as :func:`impad`.\n\nReturns:\nndarray: The padded image.", "source": "juraj-google-style"}
{"code": "def getShareInfo(item):\n    key = f'_syn_sharinfo_{item.__class__.__module__}_{item.__class__.__qualname__}'\n    info = getattr(item, key, None)\n    if (info is not None):\n        return info\n    meths = {}\n    info = {'meths': meths}\n    for name in dir(item):\n        if name.startswith('_'):\n            continue\n        attr = getattr(item, name, None)\n        if (not callable(attr)):\n            continue\n        wrapped = getattr(attr, '__syn_wrapped__', None)\n        if (wrapped in unwraps):\n            real = inspect.unwrap(attr)\n            if inspect.isasyncgenfunction(real):\n                meths[name] = {'genr': True}\n                continue\n        if inspect.isasyncgenfunction(attr):\n            meths[name] = {'genr': True}\n    try:\n        setattr(item, key, info)\n    except Exception as e:\n        logger.exception(f'Failed to set magic on {item}')\n    try:\n        setattr(item.__class__, key, info)\n    except Exception as e:\n        logger.exception(f'Failed to set magic on {item.__class__}')\n    return info", "docstring": "Get a dictionary of special annotations for a Telepath Proxy.\n\nArgs:\nitem:  Item to inspect.\n\nNotes:\nThis will set the ``_syn_telemeth`` attribute on the item\nand the items class, so this data is only computed once.\n\nReturns:\ndict: A dictionary of methods requiring special handling by the proxy.", "source": "codesearchnet"}
{"code": "class XGBoostModelHandlerDatatable(XGBoostModelHandler[datatable.Frame, PredictionResult, Union[xgboost.Booster, xgboost.XGBModel]]):\n\n    def run_inference(self, batch: Sequence[datatable.Frame], model: Union[xgboost.Booster, xgboost.XGBModel], inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n        \n        return self._inference_fn(batch, model, inference_args)\n\n    def get_num_bytes(self, batch: Sequence[datatable.Frame]) -> int:\n        \n        return sum((sys.getsizeof(element) for element in batch))", "docstring": "Implementation of the ModelHandler interface for XGBoost\nusing datatable dataframes as input.\n\nExample Usage::\n\npcoll | RunInference(\nXGBoostModelHandlerDatatable(\nmodel_class=\"XGBoost Model Class\",\nmodel_state=\"my_model_state.json\")))\n\nArgs:\nmodel_class: class of the XGBoost model that defines the model\nstructure.\nmodel_state: path to a json file that contains the model's\nconfiguration.\ninference_fn: the inference function to use during RunInference.\ndefault=default_xgboost_inference_fn", "source": "github-repos"}
{"code": "def update(self, friendly_name=None, description=None, query=None):\n    self._table._load_info()\n    if (query is not None):\n        if isinstance(query, _query.Query):\n            query = query.sql\n        self._table._info['view'] = {'query': query}\n    self._table.update(friendly_name=friendly_name, description=description)", "docstring": "Selectively updates View information.\n\nAny parameters that are None (the default) are not applied in the update.\n\nArgs:\nfriendly_name: if not None, the new friendly name.\ndescription: if not None, the new description.\nquery: if not None, a new query string for the View.", "source": "codesearchnet"}
{"code": "def default_peek(python_type, exposes):\n    \n    with_args = False\n    make = python_type\n    try:\n        make()\n    except (SystemExit, KeyboardInterrupt):\n        raise\n    except:\n        make = lambda: python_type.__new__(python_type)\n        try:\n            make()\n        except (SystemExit, KeyboardInterrupt):\n            raise\n        except:\n            make = lambda args: python_type.__new__(python_type, *args)\n            with_args = True\n    def missing(attr):\n        return AttributeError(\"can't set attribute '{}' ({})\".format(attr, python_type))\n    if with_args:\n        def peek(store, container, _stack=None):\n            state = []\n            for attr in exposes: \n                \n                if attr in container:\n                    state.append(store.peek(attr, container, _stack=_stack))\n                else:\n                    state.append(None)\n            return make(state)\n    elif '__dict__' in exposes:\n        def peek(store, container, _stack=None):\n            obj = make()\n            for attr in container:\n                val = store.peek(attr, container, _stack=_stack)\n                try:\n                    setattr(obj, attr, val)\n                except AttributeError:\n                    raise missing(attr)\n            return obj\n    else:\n        def peek(store, container, _stack=None):\n            obj = make()\n            for attr in exposes: \n                \n                if attr in container:\n                    val = store.peek(attr, container, _stack=_stack)\n                else:\n                    val = None\n                try:\n                    setattr(obj, attr, val)\n                except AttributeError:\n                    raise missing(attr)\n            return obj\n    return peek", "docstring": "Autoserializer factory.\n\nWorks best in Python 3.\n\nArguments:\n\npython_type (type): type constructor.\n\nexposes (iterable): sequence of attributes.\n\nReturns:\n\ncallable: deserializer (`peek` routine).", "source": "juraj-google-style"}
{"code": "def _unify_call_signature(i, dist_fn):\n    if distribution_util.is_distribution_instance(dist_fn):\n        return ((lambda *_: dist_fn), None)\n    if (not callable(dist_fn)):\n        raise TypeError('{} must be either `tfd.Distribution`-like or `callable`.'.format(dist_fn))\n    args = _get_required_args(dist_fn)\n    if (not args):\n        return ((lambda *_: dist_fn()), ())\n\n    @functools.wraps(dist_fn)\n    def dist_fn_wrapped(*xs):\n        'Calls `dist_fn` with reversed and truncated args.'\n        if (i != len(xs)):\n            raise ValueError('Internal Error: Unexpected number of inputs provided to {}-th distribution maker (dist_fn: {}, expected: {}, saw: {}).'.format(i, dist_fn, i, len(xs)))\n        if (len(xs) < len(args)):\n            raise ValueError('Internal Error: Too few inputs provided to {}-th distribution maker (dist_fn: {}, expected: {}, saw: {}).'.format(i, dist_fn, len(args), len(xs)))\n        return dist_fn(*reversed(xs[(- len(args)):]))\n    return (dist_fn_wrapped, args)", "docstring": "Creates `dist_fn_wrapped` which calls `dist_fn` with all prev nodes.\n\nArgs:\ni: Python `int` corresponding to position in topologically sorted DAG.\ndist_fn: Python `callable` which takes a subset of previously constructed\ndistributions (in reverse order) and produces a new distribution instance.\n\nReturns:\ndist_fn_wrapped: Python `callable` which takes all previous distributions\n(in non reverse order) and produces a  new distribution instance.\nargs: `tuple` of `str` representing the arg names of `dist_fn` (and in non\nwrapped, \"natural\" order). `None` is returned only if the input is not a\n`callable`.", "source": "codesearchnet"}
{"code": "class Permute(Layer):\n\n    def __init__(self, dims, **kwargs):\n        super(Permute, self).__init__(**kwargs)\n        self.dims = tuple(dims)\n        if sorted(dims) != list(range(1, len(dims) + 1)):\n            raise ValueError('Invalid permutation `dims` for Permute Layer: %s. The set of indices in `dims` must be consecutive and start from 1.' % (dims,))\n        self.input_spec = InputSpec(ndim=len(self.dims) + 1)\n\n    def compute_output_shape(self, input_shape):\n        input_shape = tensor_shape.TensorShape(input_shape).as_list()\n        output_shape = copy.copy(input_shape)\n        for i, dim in enumerate(self.dims):\n            target_dim = input_shape[dim]\n            output_shape[i + 1] = target_dim\n        return tensor_shape.TensorShape(output_shape)\n\n    def call(self, inputs):\n        return array_ops.transpose(inputs, perm=(0,) + self.dims)\n\n    def get_config(self):\n        config = {'dims': self.dims}\n        base_config = super(Permute, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))", "docstring": "Permutes the dimensions of the input according to a given pattern.\n\nUseful e.g. connecting RNNs and convnets.\n\nExample:\n\n```python\nmodel = Sequential()\nmodel.add(Permute((2, 1), input_shape=(10, 64)))\n# now: model.output_shape == (None, 64, 10)\n# note: `None` is the batch dimension\n```\n\nArgs:\ndims: Tuple of integers. Permutation pattern does not include the\nsamples dimension. Indexing starts at 1.\nFor instance, `(2, 1)` permutes the first and second dimensions\nof the input.\n\nInput shape:\nArbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model.\n\nOutput shape:\nSame as the input shape, but with the dimensions re-ordered according\nto the specified pattern.", "source": "github-repos"}
{"code": "def to_representation(self, value):\n        \n        if not value:\n            return None\n\n        image = get_thumbnail(value, self.geometry_string, **self.options)\n\n        try:\n            request = self.context.get('request', None)\n            return request.build_absolute_uri(image.url)\n        except:\n            try:\n                return super(HyperlinkedSorlImageField, self).to_representation(image)\n            except AttributeError:  \n                return super(HyperlinkedSorlImageField, self).to_native(image.url)", "docstring": "Perform the actual serialization.\n\nArgs:\nvalue: the image to transform\nReturns:\na url pointing at a scaled and cached image", "source": "juraj-google-style"}
{"code": "def highlight(text: str, color_code: int, bold: bool=False) -> str:\n    return '{}\\x1b[{}m{}\\x1b[0m'.format(('\\x1b[1m' if bold else ''), color_code, text)", "docstring": "Wraps the given string with terminal color codes.\n\nArgs:\ntext: The content to highlight.\ncolor_code: The color to highlight with, e.g. 'shelltools.RED'.\nbold: Whether to bold the content in addition to coloring.\n\nReturns:\nThe highlighted string.", "source": "codesearchnet"}
{"code": "def List(self, request, global_params=None):\n    config = self.GetMethodConfig('List')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "List all GitHubEnterpriseConfigs for a given project.\n\nArgs:\nrequest: (CloudbuildProjectsGithubEnterpriseConfigsListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ListGithubEnterpriseConfigsResponse) The response message.", "source": "github-repos"}
{"code": "def _EnforceProcessMemoryLimit(self, memory_limit):\n    if resource:\n        if (memory_limit is None):\n            memory_limit = (((4 * 1024) * 1024) * 1024)\n        elif (memory_limit == 0):\n            memory_limit = resource.RLIM_INFINITY\n        resource.setrlimit(resource.RLIMIT_DATA, (memory_limit, memory_limit))", "docstring": "Enforces a process memory limit.\n\nArgs:\nmemory_limit (int): maximum number of bytes the process is allowed\nto allocate, where 0 represents no limit and None a default of\n4 GiB.", "source": "codesearchnet"}
{"code": "def run(self, dag):\n        \n        self.layout = self.layout or self.property_set['layout']\n\n        if self.layout is None:\n            raise TranspilerError(\"EnlargeWithAncilla requires property_set[\\\"layout\\\"] or\"\n                                  \" \\\"layout\\\" parameter to run\")\n\n        layout_virtual_qubits = self.layout.get_virtual_bits().keys()\n        new_qregs = set(virtual_qubit[0] for virtual_qubit in layout_virtual_qubits\n                        if virtual_qubit not in dag.wires)\n\n        for qreg in new_qregs:\n            dag.add_qreg(qreg)\n\n        return dag", "docstring": "Extends dag with virtual qubits that are in layout but not in the circuit yet.\n\nArgs:\ndag (DAGCircuit): DAG to extend.\n\nReturns:\nDAGCircuit: An extended DAG.\n\nRaises:\nTranspilerError: If there is not layout in the property set or not set at init time.", "source": "juraj-google-style"}
{"code": "def disable_control_flow_v2(unused_msg: str) -> Callable[[_F], _F]:\n\n    def wrapper(func: _F) -> _F:\n        func._disable_control_flow_v2 = True\n        return func\n    return wrapper", "docstring": "Decorator for a function in a with_control_flow_v2 enabled test class.\n\nBlocks the function from being run with v2 control flow ops.\n\nArgs:\nunused_msg: Reason for disabling.\n\nReturns:\nThe wrapped function with _disable_control_flow_v2 attr set to True.", "source": "github-repos"}
{"code": "def xml(self):\n    self.pendingvalidation()\n    E = ElementMaker(namespace='http:\n    attribs = {}\n    attribs['{http:\n    attribs['version'] = FOLIAVERSION\n    attribs['generator'] = ('pynlpl.formats.folia-v' + LIBVERSION)\n    metadataattribs = {}\n    metadataattribs[(('{' + NSFOLIA) + '}type')] = self.metadatatype\n    if isinstance(self.metadata, ExternalMetaData):\n        metadataattribs[(('{' + NSFOLIA) + '}src')] = self.metadata.url\n    e = E.FoLiA(E.metadata(E.annotations(*self.xmldeclarations()), *self.xmlmetadata(), **metadataattribs), **attribs)\n    for text in self.data:\n        e.append(text.xml())\n    return e", "docstring": "Serialise the document to XML.\n\nReturns:\nlxml.etree.Element\n\nSee also:\n:meth:`Document.xmlstring`", "source": "codesearchnet"}
{"code": "def traverse_nodes(self, node_set, depth=0):\n    tab = '  '\n    result = list()\n    for n in node_set:\n        repr = (n if (self.nodes[n]['type'] == 'variable') else f\"{n}{inspect.signature(self.nodes[n]['lambda_fn'])}\")\n        result.append(f'{(tab * depth)}{repr}')\n        result.extend(self.traverse_nodes(self.successors(n), depth=(depth + 1)))\n    return result", "docstring": "BFS traversal of nodes that returns name traversal as large string.\n\nArgs:\nnode_set: Set of input nodes to begin traversal.\ndepth: Current traversal depth for child node viewing.\n\nReturns:\ntype: String containing tabbed traversal view.", "source": "codesearchnet"}
{"code": "def CheckTaskToMerge(self, task):\n    \n    with self._lock:\n      is_abandoned = task.identifier in self._tasks_abandoned\n      is_processing = task.identifier in self._tasks_processing\n      is_queued = task.identifier in self._tasks_queued\n\n      if not is_queued and not is_processing and not is_abandoned:\n        raise KeyError('Status of task {0:s} is unknown.'.format(\n            task.identifier))\n\n      return is_queued or is_processing or is_abandoned and not task.has_retry", "docstring": "Checks if the task should be merged.\n\nArgs:\ntask (Task): task.\n\nReturns:\nbool: True if the task should be merged.\n\nRaises:\nKeyError: if the task was not queued, processing or abandoned.", "source": "juraj-google-style"}
{"code": "def put_many(self, type: Type[T], items: Iterable[T]) -> None:\n        \n        LOGGER.info(\"Getting SinkHandlers for \\\"{type}\\\"\".format(type=type.__name__))\n        try:\n            handlers = self._put_types[type]\n        except KeyError:\n            try:\n                LOGGER.info(\"Building new SinkHandlers for \\\"{type}\\\"\".format(type=type.__name__))\n                handlers = self._put_handlers(type)\n            except NoConversionError:\n                handlers = None\n            self._get_types[type] = handlers\n\n        LOGGER.info(\"Creating new PipelineContext\")\n        context = self._new_context()\n\n        LOGGER.info(\"Sending items \\\"{items}\\\" to SourceHandlers\".format(items=items))\n        if handlers is not None:\n            items = list(items)\n            for handler in handlers:\n                handler.put_many(items, context)", "docstring": "Puts multiple objects of the same type into the data sink. The objects may be transformed into a new type for insertion if necessary.\n\nArgs:\nitems: An iterable (e.g. list) of objects to be inserted into the data pipeline.", "source": "juraj-google-style"}
{"code": "def CreateAdsWithCustomizations(client, adgroup_ids, feed_name):\n    adgroup_ad_service = client.GetService('AdGroupAdService', 'v201809')\n    expanded_text_ad = {'xsi_type': 'ExpandedTextAd', 'headlinePart1': ('Luxury Cruise to {=%s.Name}' % feed_name), 'headlinePart2': ('Only {=%s.Price}' % feed_name), 'description': ('Offer ends in {=countdown(%s.Date)}!' % feed_name), 'finalUrls': ['http:\n    operations = [{'operator': 'ADD', 'operand': {'adGroupId': adgroup, 'ad': expanded_text_ad}} for adgroup in adgroup_ids]\n    response = adgroup_ad_service.mutate(operations)\n    if (response and ('value' in response)):\n        for ad in response['value']:\n            print(('Created an ad with ID \"%s\", type \"%s\", and status \"%s\".' % (ad['ad']['id'], ad['ad']['Ad.Type'], ad['status'])))\n    else:\n        raise errors.GoogleAdsError('No ads were added.')", "docstring": "Creates ExpandedTextAds that use ad customizations for specified AdGroups.\n\nArgs:\nclient: an AdWordsClient instance.\nadgroup_ids: a list containing the AdGroup ids to add ExpandedTextAds to.\nfeed_name: the name of the feed used to apply customizations.\n\nRaises:\nGoogleAdsError: if no ExpandedTextAds were added.", "source": "codesearchnet"}
{"code": "def structure_np_to_list(data):\n    if isinstance(data, np.ndarray):\n        return data.tolist()\n    if isinstance(data, dict):\n        return {key: structure_np_to_list(value) for key, value in data.items()}\n    if isinstance(data, list):\n        return [structure_np_to_list(item) for item in data]\n    if isinstance(data, (int, float, str, bytes)):\n        return data\n    raise ValueError(f'Non supported type {type(data)}')", "docstring": "Apply a function to a recursive structure of dict and list.\n\nArgs:\ndata: The data to apply the function to.\n\nReturns:\nThe data with the function applied.", "source": "github-repos"}
{"code": "def _AddHeader(self, fp):\n    text = textwrap.wrap(textwrap.dedent(self.config_header), break_on_hyphens=False)\n    fp.write('\\n'.join([('\n    fp.write('\\n\\n')", "docstring": "Create a file header in the config.\n\nArgs:\nfp: int, a file pointer for writing the header.", "source": "codesearchnet"}
{"code": "def rot90(array, k=1, axes=(0, 1)):\n    array = convert_to_tensor(array)\n    if array.ndim < 2:\n        raise ValueError(f'Input array must have at least 2 dimensions. Received: array.ndim={array.ndim}')\n    if len(axes) != 2 or axes[0] == axes[1]:\n        raise ValueError(f'Invalid axes: {axes}. Axes must be a tuple of two different dimensions.')\n    axes = tuple((axis if axis >= 0 else array.ndim + axis for axis in axes))\n    if not builtins.all((0 <= axis < array.ndim for axis in axes)):\n        raise ValueError(f'Invalid axes {axes} for tensor with {array.ndim} dimensions')\n    rotated = torch.rot90(array, k=k, dims=axes)\n    if isinstance(array, np.ndarray):\n        rotated = rotated.cpu().numpy()\n    return rotated", "docstring": "Rotate an array by 90 degrees in the specified plane using PyTorch.\n\nArgs:\narray: Input tensor\nk: Number of 90-degree rotations (default=1)\naxes: Tuple of two axes that define the\nplane of rotation (defaults to `(0, 1)`).\n\nReturns:\nRotated tensor", "source": "github-repos"}
{"code": "def heightmap_clamp(hm: np.ndarray, mi: float, ma: float) -> None:\n    hm.clip(mi, ma)", "docstring": "Clamp all values on this heightmap between ``mi`` and ``ma``\n\nArgs:\nhm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.\nmi (float): The lower bound to clamp to.\nma (float): The upper bound to clamp to.\n\n.. deprecated:: 2.0\nDo ``hm.clip(mi, ma)`` instead.", "source": "codesearchnet"}
{"code": "def dprintx(passeditem, special=False):\n    if DEBUGALL:\n        if special:\n            from pprint import pprint\n            pprint(passeditem)\n        else:\n            print(('%s%s%s' % (C_TI, passeditem, C_NORM)))", "docstring": "Print Text if DEBUGALL set, optionally with PrettyPrint.\n\nArgs:\npasseditem (str): item to print\nspecial (bool): determines if item prints with PrettyPrint\nor regular print.", "source": "codesearchnet"}
{"code": "def get_parameters(params=None, path='', grad_only=True):\n    global current_scope\n    if (params is None):\n        params = OrderedDict()\n    for (k, v) in iteritems(current_scope):\n        if isinstance(v, dict):\n            with parameter_scope(k):\n                params = get_parameters(params, ('/'.join([path, k]) if path else k), grad_only=grad_only)\n        else:\n            assert isinstance(v, nn.Variable)\n            if ((not grad_only) or v.need_grad):\n                params[('/'.join([path, k]) if path else k)] = v\n    return params", "docstring": "Get parameter Variables under the current parameter scope.\n\nArgs:\nparams (dict): Internal use. User doesn't set it manually.\npath (str): Internal use.  User doesn't set it manually.\ngrad_only (bool): Retrieve all parameters under the current scope if\nFalse, while only parameters with need_grad=True are retrieved\nif True.\n\nReturns:\ndict: {:obj:`str` : :obj:`~nnabla.Variable`}", "source": "codesearchnet"}
{"code": "def depth_april_average_ground_temperature(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `depth_april_average_ground_temperature`'.format(value))\n\n        self._depth_april_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_april_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_april_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def __init__(self, component=None, action=None, target=None, args=None, filename=None, lineno=None, error=None, capacity=None):\n    self.component = component\n    self._action = action\n    self._target = target\n    self.args = args\n    self._filename = filename\n    self._lineno = lineno\n    self._error = error\n    self._separator = False\n    self._capacity = capacity", "docstring": "Instantiates a FireTraceElement.\n\nArgs:\ncomponent: The result of this element of the trace.\naction: The type of action (e.g. instantiating a class) taking place.\ntarget: (string) The name of the component being acted upon.\nargs: The args consumed by the represented action.\nfilename: The file in which the action is defined, or None if N/A.\nlineno: The line number on which the action is defined, or None if N/A.\nerror: The error represented by the action, or None if N/A.\ncapacity: (bool) Whether the action could have accepted additional args.", "source": "github-repos"}
{"code": "def input_fn(is_training, data_dir, batch_size, num_epochs=1, num_gpus=None, dtype=tf.float32):\n    mlperf_log.resnet_print(key=mlperf_log.INPUT_ORDER)\n    filenames = get_filenames(is_training, data_dir)\n    dataset = tf.data.Dataset.from_tensor_slices(filenames)\n    if is_training:\n        dataset = dataset.shuffle(buffer_size=_NUM_TRAIN_FILES)\n    dataset = dataset.flat_map(tf.data.TFRecordDataset)\n    return resnet_run_loop.process_record_dataset(dataset=dataset, is_training=is_training, batch_size=batch_size, shuffle_buffer=_SHUFFLE_BUFFER, parse_record_fn=parse_record, num_epochs=num_epochs, num_gpus=num_gpus, examples_per_epoch=(_NUM_IMAGES['train'] if is_training else None), dtype=dtype)", "docstring": "Input function which provides batches for train or eval.\n\nArgs:\nis_training: A boolean denoting whether the input is for training.\ndata_dir: The directory containing the input data.\nbatch_size: The number of samples per batch.\nnum_epochs: The number of epochs to repeat the dataset.\nnum_gpus: The number of gpus used for training.\ndtype: Data type to use for images/features\n\nReturns:\nA dataset that can be used for iteration.", "source": "codesearchnet"}
{"code": "def is_chief(cluster_spec=None, task_type=None, task_id=None):\n    if has_worker_context():\n        return dc_context.get_current_worker_context().is_chief\n    _validate_cluster_spec(cluster_spec, task_type, task_id)\n    cluster_spec = normalize_cluster_spec(cluster_spec).as_dict()\n    if task_type == 'chief' or task_type == 'evaluator':\n        return True\n    if 'chief' not in cluster_spec and task_type == 'worker' and (task_id == 0):\n        return True\n    return False", "docstring": "Returns whether the given task is chief in the cluster.\n\nSince there is at most one evaluator and the evaluator itself should be\nindependent of the training cluster, the evaluator job is also a chief job on\nits own.\n\nIf this is currently running under a `_WorkerContext` of distribute\ncoordinator, the arguments can be omitted as the result is already available.\n\nArgs:\ncluster_spec: a dict, `ClusterDef` or `ClusterSpec` object specifying the\ncluster configurations.\ntask_type: the task type in the cluster.\ntask_id: the task id in the cluster.\n\nReturns:\na boolean indicating whether the given task is chief.\n\nRaises:\nValueError: if `task_type` is not in the `cluster_spec` or `task_id` exceeds\nthe maximum id of the `task_type`.", "source": "github-repos"}
{"code": "def ToScriptHash(self, address):\n        \n        if len(address) == 34:\n            if address[0] == 'A':\n                data = b58decode(address)\n                if data[0] != self.AddressVersion:\n                    raise ValueError('Not correct Coin Version')\n\n                checksum = Crypto.Default().Hash256(data[:21])[:4]\n                if checksum != data[21:]:\n                    raise Exception('Address format error')\n                return UInt160(data=data[1:21])\n            else:\n                raise Exception('Address format error')\n        else:\n            raise ValueError('Not correct Address, wrong length.')", "docstring": "Retrieve the script_hash based from an address.\n\nArgs:\naddress (str): a base58 encoded address.\n\nRaises:\nValuesError: if an invalid address is supplied or the coin version is incorrect\nException: if the address string does not start with 'A' or the checksum fails\n\nReturns:\nUInt160: script hash.", "source": "juraj-google-style"}
{"code": "def set_metadata(self, entity_type, entity_id, metadata):\n    if (not is_valid_uuid(entity_id)):\n        raise StorageArgumentException('Invalid UUID for entity_id: {0}'.format(entity_id))\n    if (not isinstance(metadata, dict)):\n        raise StorageArgumentException('The metadata was not provided as a dictionary')\n    return self._authenticated_request.to_endpoint('{}/{}/metadata/'.format(entity_type, entity_id)).with_json_body(metadata).return_body().post()", "docstring": "Set metadata for an entity.\n\nArgs:\nentity_type (str): Type of the entity. Admitted values: ['project',\n'folder', 'file'].\nentity_id (str): The UUID of the entity to be modified.\nmetadata (dict): A dictionary of key/value pairs to be written as\nmetadata.\n\nWarning:\nIt will replace all existing metadata with the provided dictionary.\n\nReturns:\nA dictionary of the updated metadata::\n\n{\nu'bar': u'200',\nu'foo': u'100'\n}\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "codesearchnet"}
{"code": "def get_file_list(self):\n    if os.path.isdir(self.root_path):\n        return [os.path.join(self.root_path, f) for f in os.listdir(self.root_path) if os.path.isfile(os.path.join(self.root_path, f))]\n    else:\n        return [self.root_path]", "docstring": "Retrieve the list of absolute paths to all the files in this data source.\n\nReturns:\nList[str] List of absolute paths.", "source": "codesearchnet"}
{"code": "def allzeros(msg):\n    \n    d = hex2bin(data(msg))\n\n    if bin2int(d) > 0:\n        return False\n    else:\n        return True", "docstring": "check if the data bits are all zeros\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nbool: True or False", "source": "juraj-google-style"}
{"code": "def run_census(flags_obj, ctx):\n  \n  train_file = os.path.join(flags_obj.data_dir, census_dataset.TRAINING_FILE)\n  test_file = os.path.join(flags_obj.data_dir, census_dataset.EVAL_FILE)\n\n  \n  def train_input_fn():\n    return census_dataset.input_fn(\n        train_file, flags_obj.epochs_between_evals, True, flags_obj.batch_size)\n\n  def eval_input_fn():\n    return census_dataset.input_fn(test_file, 1, False, flags_obj.batch_size)\n\n  tensors_to_log = {\n      'average_loss': '{loss_prefix}head/truediv',\n      'loss': '{loss_prefix}head/weighted_loss/Sum'\n  }\n\n  \n  model_helpers.apply_clean(flags.FLAGS)\n  model = build_estimator(\n      model_dir=flags_obj.model_dir, model_type=flags_obj.model_type,\n      model_column_fn=census_dataset.build_model_columns,\n      inter_op=flags_obj.inter_op_parallelism_threads,\n      intra_op=flags_obj.intra_op_parallelism_threads,\n      ctx=ctx)\n\n  loss_prefix = LOSS_PREFIX.get(flags_obj.model_type, '')\n  tensors_to_log = {k: v.format(loss_prefix=loss_prefix)\n                    for k, v in tensors_to_log.items()}\n  train_hooks = hooks_helper.get_train_hooks(\n      flags_obj.hooks, model_dir=flags_obj.model_dir,\n      batch_size=flags_obj.batch_size, tensors_to_log=tensors_to_log)\n\n  \n  \n  train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, hooks=train_hooks)\n  eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn)\n  tf.estimator.train_and_evaluate(model, train_spec, eval_spec)", "docstring": "Construct all necessary functions and call run_loop.\n\nArgs:\nflags_obj: Object containing user specified flags.", "source": "juraj-google-style"}
{"code": "def check_file(self, fs, info):\n    if ((self.exclude is not None) and fs.match(self.exclude, info.name)):\n        return False\n    return fs.match(self.filter, info.name)", "docstring": "Check if a filename should be included.\n\nOverride to exclude files from the walk.\n\nArguments:\nfs (FS): A filesystem instance.\ninfo (Info): A resource info object.\n\nReturns:\nbool: `True` if the file should be included.", "source": "codesearchnet"}
{"code": "def expo(base=2, factor=1, max_value=None):\n    n = 0\n    while True:\n        a = (factor * (base ** n))\n        if ((max_value is None) or (a < max_value)):\n            (yield a)\n            n += 1\n        else:\n            (yield max_value)", "docstring": "Generator for exponential decay.\n\nArgs:\nbase: The mathematical base of the exponentiation operation\nfactor: Factor to multiply the exponentation by.\nmax_value: The maximum value to yield. Once the value in the\ntrue exponential sequence exceeds this, the value\nof max_value will forever after be yielded.", "source": "codesearchnet"}
{"code": "def convert_drive(self, shift, instruction):\n        \n        command_dict = {\n            'name': instruction.command.name,\n            't0': shift+instruction.start_time,\n            'ch': instruction.channels[0].name\n        }\n        return self._qobj_model(**command_dict)", "docstring": "Return converted `PulseInstruction`.\n\nArgs:\nshift(int): Offset time.\ninstruction (PulseInstruction): drive instruction.\nReturns:\ndict: Dictionary of required parameters.", "source": "juraj-google-style"}
{"code": "def ApplyParsersToResponses(parser_factory, responses, flow_obj):\n    knowledge_base = flow_obj.state.knowledge_base\n    parsed_responses = []\n    if parser_factory.HasSingleResponseParsers():\n        for response in responses:\n            for parser in parser_factory.SingleResponseParsers():\n                parsed_responses.extend(parser.ParseResponse(knowledge_base, response, flow_obj.args.path_type))\n    for parser in parser_factory.MultiResponseParsers():\n        parsed_responses.extend(parser.ParseResponses(knowledge_base, responses))\n    has_single_file_parsers = parser_factory.HasSingleFileParsers()\n    has_multi_file_parsers = parser_factory.HasMultiFileParsers()\n    if (has_single_file_parsers or has_multi_file_parsers):\n        precondition.AssertIterableType(responses, rdf_client_fs.StatEntry)\n        pathspecs = [response.pathspec for response in responses]\n        if data_store.RelationalDBEnabled():\n            filedescs = []\n            for pathspec in pathspecs:\n                client_path = db.ClientPath.FromPathSpec(flow_obj.client_id, pathspec)\n                filedescs.append(file_store.OpenFile(client_path))\n        else:\n            filedescs = MultiOpenAff4File(flow_obj, pathspecs)\n    if has_single_file_parsers:\n        for (response, filedesc) in zip(responses, filedescs):\n            for parser in parser_factory.SingleFileParsers():\n                parsed_responses.extend(parser.ParseFile(knowledge_base, response.pathspec, filedesc))\n    if has_multi_file_parsers:\n        for parser in parser_factory.MultiFileParsers():\n            parsed_responses.extend(parser.ParseFiles(knowledge_base, pathspecs, filedescs))\n    return (parsed_responses or responses)", "docstring": "Parse responses with applicable parsers.\n\nArgs:\nparser_factory: A parser factory for specific artifact.\nresponses: A list of responses from the client.\nflow_obj: An artifact collection flow.\n\nReturns:\nA list of (possibly parsed) responses.", "source": "codesearchnet"}
{"code": "def get_route_lines_route(self, **kwargs):\n    select_date = ('%02d/%02d/%d' % (kwargs.get('day', '01'), kwargs.get('month', '01'), kwargs.get('year', '1970')))\n    params = {'SelectDate': select_date, 'Lines': util.ints_to_string(kwargs.get('lines', []))}\n    result = self.make_request('geo', 'get_route_lines_route', **params)\n    if (not util.check_result(result)):\n        return (False, result.get('resultDescription', 'UNKNOWN ERROR'))\n    values = util.response_list(result, 'resultValues')\n    return (True, [emtype.RouteLinesItem(**a) for a in values])", "docstring": "Obtain itinerary for one or more lines in the given date.\n\nArgs:\nday (int): Day of the month in format DD.\nThe number is automatically padded if it only has one digit.\nmonth (int): Month number in format MM.\nThe number is automatically padded if it only has one digit.\nyear (int): Year number in format YYYY.\nlines (list[int] | int): Lines to query, may be empty to get\nall the lines.\n\nReturns:\nStatus boolean and parsed response (list[RouteLinesItem]), or message\nstring in case of error.", "source": "codesearchnet"}
{"code": "def generate(self):\n        \n\n        result = self._gen(self.optimized, self.splitstring)\n        if self.splitstring and result is not None:\n            result = result[1:]\n        return result", "docstring": "Generates a new random string from the start symbol\nArgs:\nNone\nReturns:\nstr: The generated string", "source": "juraj-google-style"}
{"code": "def match(self, url):\n    try:\n        urlSchemes = self._urlSchemes.itervalues()\n    except AttributeError:\n        urlSchemes = self._urlSchemes.values()\n    for urlScheme in urlSchemes:\n        if urlScheme.match(url):\n            return True\n    return False", "docstring": "Try to find if url matches against any of the schemes within this\nendpoint.\n\nArgs:\nurl: The url to match against each scheme\n\nReturns:\nTrue if a matching scheme was found for the url, False otherwise", "source": "codesearchnet"}
{"code": "def parse_vep_header(vcf_obj):\n    \n    vep_header = []\n    \n    if 'CSQ' in vcf_obj:\n        \n        csq_info = vcf_obj['CSQ']\n        format_info = parse_header_format(csq_info['Description'])\n        vep_header = [key.upper() for key in format_info.split('|')]\n    \n    return vep_header", "docstring": "Return a list with the VEP header\n\nThe vep header is collected from CSQ in the vcf file\nAll keys are capitalized\n\nArgs:\nvcf_obj(cyvcf2.VCF)\n\nReturns:\nvep_header(list)", "source": "juraj-google-style"}
{"code": "def plot_grid(step):\n    rad = get_rprof(step, 'r')[0]\n    drad = get_rprof(step, 'dr')[0]\n    (_, unit) = step.sdat.scale(1, 'm')\n    if unit:\n        unit = ' ({})'.format(unit)\n    (fig, (ax1, ax2)) = plt.subplots(2, sharex=True)\n    ax1.plot(rad, '-ko')\n    ax1.set_ylabel(('$r$' + unit))\n    ax2.plot(drad, '-ko')\n    ax2.set_ylabel(('$dr$' + unit))\n    ax2.set_xlim([(- 0.5), (len(rad) - 0.5)])\n    ax2.set_xlabel('Cell number')\n    misc.saveplot(fig, 'grid', step.istep)", "docstring": "Plot cell position and thickness.\n\nThe figure is call grid_N.pdf where N is replace by the step index.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.", "source": "codesearchnet"}
{"code": "def to_polars(evset: EventSet, tp_string_to_pl_string: bool=True, timestamp_to_datetime: bool=True, timestamps: bool=True) -> 'pl.DataFrame':\n    pl = import_pl()\n    timestamp_key = 'timestamp'\n    index_names = evset.schema.index_names()\n    feature_names = evset.schema.feature_names()\n    column_names = index_names + feature_names\n    if timestamps:\n        column_names += [timestamp_key]\n    data_dict = {column_name: [] for column_name in column_names}\n    for index, data in evset.data.items():\n        assert isinstance(index, tuple)\n        if timestamps:\n            timestamps_data = data.timestamps\n            if evset.schema.is_unix_timestamp and timestamp_to_datetime:\n                datetime_series = pl.from_epoch(pl.Series(timestamps_data), time_unit='s')\n                data_dict[timestamp_key].extend(datetime_series)\n            else:\n                data_dict[timestamp_key].extend(timestamps_data)\n        for feature_name, feature in zip(feature_names, data.features):\n            data_dict[feature_name].extend(feature)\n        num_timestamps = len(data.timestamps)\n        for index_name, index_item in zip(index_names, index):\n            data_dict[index_name].extend([index_item] * num_timestamps)\n    for col_name, col_data in data_dict.items():\n        data_dict[col_name] = pl.Series(col_data)\n    if tp_string_to_pl_string:\n        for feature in evset.schema.features:\n            if feature.dtype == DType.STRING:\n                data_dict[feature.name] = data_dict[feature.name].cast(pl.Utf8)\n        for index in evset.schema.indexes:\n            if index.dtype == DType.STRING:\n                data_dict[index.name] = data_dict[index.name].cast(pl.Utf8)\n    return pl.DataFrame(data_dict)", "docstring": "Converts an  [`EventSet`][temporian.EventSet] to a Polars DataFrame.\n\nUsage example:\n```python\n>>> from datetime import datetime\n\n>>> evset = tp.event_set(\n...     timestamps=[datetime(2015, 1, 1), datetime(2015, 1, 2)],\n...     features={\n...         \"feature_1\": [0.5, 0.6],\n...         \"my_index\": [\"red\", \"yellow\"],\n...    },\n...    indexes=[\"my_index\"],\n... )\n\n>>> df = tp.to_polars(evset)\n\n\n```\n\nArgs:\nevset: Input EventSet.\ntimestamp_to_datetime: If true, convert epoch timestamps to Polars Date objects.\ntimestamps: If true, include the timestamps as a column in the DataFrame.\ntp_string_to_pl_string: If true, cast Temporian strings to Polars Object.\n\nReturns:\nA Polars DataFrame created from the EventSet.", "source": "github-repos"}
{"code": "def adversary(self, name, **kwargs):\n        \n        group_obj = Adversary(name, **kwargs)\n        return self._group(group_obj)", "docstring": "Add Adversary data to Batch object.\n\nArgs:\nname (str): The name for this Group.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nxid (str, kwargs): The external id for this Group.\n\nReturns:\nobj: An instance of Adversary.", "source": "juraj-google-style"}
{"code": "def get_data_location(self, catalog_id):\n    try:\n        record = self.get(catalog_id)\n    except:\n        return None\n    if (('Landsat8' in record['type']) and ('LandsatAcquisition' in record['type'])):\n        bucket = record['properties']['bucketName']\n        prefix = record['properties']['bucketPrefix']\n        return ((('s3:\n    if ('DigitalGlobeAcquisition' in record['type']):\n        o = Ordering()\n        res = o.location([catalog_id])\n        return res['acquisitions'][0]['location']\n    return None", "docstring": "Find and return the S3 data location given a catalog_id.\n\nArgs:\ncatalog_id: The catalog ID\n\nReturns:\nA string containing the s3 location of the data associated with a catalog ID.  Returns\nNone if the catalog ID is not found, or if there is no data yet associated with it.", "source": "codesearchnet"}
{"code": "def add_update_resources(self, resources, ignore_datasetid=False):\n        \n        \n        if not isinstance(resources, list):\n            raise HDXError('Resources should be a list!')\n        for resource in resources:\n            self.add_update_resource(resource, ignore_datasetid)", "docstring": "Add new or update existing resources with new metadata to the dataset\n\nArgs:\nresources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries\nignore_datasetid (bool): Whether to ignore dataset id in the resource. Defaults to False.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def one_or_more(e, delimiter=None):\n    \n    if delimiter is None:\n        delimiter = lambda s, grm, pos: (s, Ignore, (pos, pos))\n    msg = 'Expected one or more of: {}'.format(repr(e))\n    def match_one_or_more(s, grm=None, pos=0):\n        start = pos\n        s, obj, span = e(s, grm, pos)\n        pos = span[1]\n        data = [] if obj is Ignore else [obj]\n        try:\n            while True:\n                s, obj, span = delimiter(s, grm, pos)\n                pos = span[1]\n                if obj is not Ignore:\n                    data.append(obj)\n                s, obj, span = e(s, grm, pos)\n                pos = span[1]\n                if obj is not Ignore:\n                    data.append(obj)\n        except PegreError:\n            pass\n        return PegreResult(s, data, (start, pos))\n    return match_one_or_more", "docstring": "Create a PEG function to match one or more expressions.\n\nArgs:\ne: the expression to match\ndelimiter: an optional expression to match between the\nprimary *e* matches.", "source": "juraj-google-style"}
{"code": "def _evaluateTFLiteModel(self, tflite_model, input_data, input_shapes=None):\n    interpreter = Interpreter(model_content=tflite_model)\n    input_details = interpreter.get_input_details()\n    if input_shapes:\n        for idx, (shape_signature, final_shape) in enumerate(input_shapes):\n            self.assertTrue((input_details[idx]['shape_signature'] == shape_signature).all())\n            index = input_details[idx]['index']\n            interpreter.resize_tensor_input(index, final_shape, strict=True)\n    interpreter.allocate_tensors()\n    output_details = interpreter.get_output_details()\n    input_details = interpreter.get_input_details()\n    for input_tensor, tensor_data in zip(input_details, input_data):\n        interpreter.set_tensor(input_tensor['index'], tensor_data.numpy())\n    interpreter.invoke()\n    return [interpreter.get_tensor(details['index']) for details in output_details]", "docstring": "Evaluates the model on the `input_data`.\n\nArgs:\ntflite_model: TensorFlow Lite model.\ninput_data: List of EagerTensor const ops containing the input data for\neach input tensor.\ninput_shapes: List of tuples representing the `shape_signature` and the\nnew shape of each input tensor that has unknown dimensions.\n\nReturns:\n[np.ndarray]", "source": "github-repos"}
{"code": "def add_imported_namespace(self, namespace, imported_alias=False, imported_data_type=False, imported_annotation=False, imported_annotation_type=False):\n    assert (self.name != namespace.name), 'Namespace cannot import itself.'\n    reason = self._imported_namespaces.setdefault(namespace, _ImportReason())\n    if imported_alias:\n        reason.alias = True\n    if imported_data_type:\n        reason.data_type = True\n    if imported_annotation:\n        reason.annotation = True\n    if imported_annotation_type:\n        reason.annotation_type = True", "docstring": "Keeps track of namespaces that this namespace imports.\n\nArgs:\nnamespace (Namespace): The imported namespace.\nimported_alias (bool): Set if this namespace references an alias\nin the imported namespace.\nimported_data_type (bool): Set if this namespace references a\ndata type in the imported namespace.\nimported_annotation (bool): Set if this namespace references a\nannotation in the imported namespace.\nimported_annotation_type (bool): Set if this namespace references an\nannotation in the imported namespace, possibly indirectly (by\nreferencing an annotation elsewhere that has this type).", "source": "codesearchnet"}
{"code": "def match_tracks(self, model_tracks, obs_tracks, unique_matches=True, closest_matches=False):\n    if unique_matches:\n        pairings = self.track_matcher.match_tracks(model_tracks, obs_tracks, closest_matches=closest_matches)\n    else:\n        pairings = self.track_matcher.neighbor_matches(model_tracks, obs_tracks)\n    return pairings", "docstring": "Match forecast and observed tracks.\n\nArgs:\nmodel_tracks:\nobs_tracks:\nunique_matches:\nclosest_matches:\n\nReturns:", "source": "codesearchnet"}
{"code": "def run_shell_cmd(args):\n    proc = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n    return proc.communicate()", "docstring": "Executes shell commands and returns output.\n\nArgs:\nargs: String of shell commands to run.\n\nReturns:\nTuple output (stdoutdata, stderrdata) from running the shell commands.", "source": "github-repos"}
{"code": "def merge(self, status: 'Status[Input, Output]') -> 'Status[Input, Output]':\n    if ((status is None) or (status.farthest is None)):\n        pass\n    elif (self.farthest is None):\n        self.farthest = status.farthest\n        self.expected = status.expected\n    elif (status.farthest.position < self.farthest.position):\n        pass\n    elif (status.farthest.position > self.farthest.position):\n        self.farthest = status.farthest\n        self.expected = status.expected\n    else:\n        self.expected = (status.expected + self.expected)\n    return self", "docstring": "Merge the failure message from another status into this one.\n\nWhichever status represents parsing that has gone the farthest is\nretained. If both statuses have gone the same distance, then the\nexpected values from both are retained.\n\nArgs:\nstatus: The status to merge into this one.\n\nReturns:\nThis ``Status`` which may have ``farthest`` and ``expected``\nupdated accordingly.", "source": "codesearchnet"}
{"code": "def _get_mpr_table(self, connection, partition):\n    virtual_table = partition.vid\n    table = '{}_v'.format(virtual_table)\n    logger.debug('Looking for materialized table of the partition.\\n    partition: {}'.format(partition.name))\n    table_exists = self._relation_exists(connection, table)\n    if table_exists:\n        logger.debug('Materialized table of the partition found.\\n    partition: {}, table: {}'.format(partition.name, table))\n        return table\n    logger.debug('Looking for a virtual table of the partition.\\n    partition: {}'.format(partition.name))\n    virtual_exists = self._relation_exists(connection, virtual_table)\n    if virtual_exists:\n        logger.debug('Virtual table of the partition found.\\n    partition: {}, table: {}'.format(partition.name, table))\n        return virtual_table\n    raise MissingTableError('sqlite database does not have table for mpr of {} partition.'.format(partition.vid))", "docstring": "Returns name of the sqlite table who stores mpr data.\n\nArgs:\nconnection (apsw.Connection): connection to sqlite database who stores mpr data.\npartition (orm.Partition):\n\nReturns:\nstr:\n\nRaises:\nMissingTableError: if partition table not found in the db.", "source": "codesearchnet"}
{"code": "def MakeType(name, base_classes, namespace):\n    precondition.AssertType(name, str)\n    if PY2:\n        name = name.encode('ascii')\n    return type(name, base_classes, namespace)", "docstring": "A compatibility wrapper for the `type` built-in function.\n\nIn Python 2 `type` (used as a type constructor) requires the name argument to\nbe a `bytes` object whereas in Python 3 it is required to be an `unicode`\nobject. Since class name is human readable text rather than arbitrary stream\nof bytes, the Python 3 behaviour is considered to be the sane one.\n\nOnce support for Python 2 is dropped all invocations of this call can be\nreplaced with the `type` built-in.\n\nArgs:\nname: A name of the type to create.\nbase_classes: A tuple of base classes that the returned type is supposed to\nderive from.\nnamespace: A dictionary of methods and fields that the returned type is\nsupposed to contain.\n\nReturns:\nA new type with specified parameters.", "source": "codesearchnet"}
{"code": "def SetParseFn(fn, *arguments):\n\n    def _Decorator(func):\n        parse_fns = GetParseFns(func)\n        if not arguments:\n            parse_fns['default'] = fn\n        else:\n            for argument in arguments:\n                parse_fns['named'][argument] = fn\n        _SetMetadata(func, FIRE_PARSE_FNS, parse_fns)\n        return func\n    return _Decorator", "docstring": "Sets the fn for Fire to use to parse args when calling the decorated fn.\n\nArgs:\nfn: The function to be used for parsing arguments.\n*arguments: The arguments for which to use the parse fn. If none are listed,\nthen this will set the default parse function.\nReturns:\nThe decorated function, which now has metadata telling Fire how to perform.", "source": "github-repos"}
{"code": "def __init__(self, org=None, library=None, branch=None, version_guid=None, **kwargs):\n        \n        if 'offering' in kwargs:\n            raise ValueError(\"'offering' is not a valid field for a LibraryLocator.\")\n\n        if 'course' in kwargs:\n            if library is not None:\n                raise ValueError(\"Cannot specify both 'library' and 'course'\")\n            warnings.warn(\n                \"For LibraryLocators, use 'library' instead of 'course'.\",\n                DeprecationWarning,\n                stacklevel=2\n            )\n            library = kwargs.pop('course')\n\n        run = kwargs.pop('run', self.RUN)\n        if run != self.RUN:\n            raise ValueError(\"Invalid run. Should be '{}' or None.\".format(self.RUN))\n\n        if version_guid:\n            version_guid = self.as_object_id(version_guid)\n\n        for name, value in [['org', org], ['library', library], ['branch', branch]]:\n            if not (value is None or self.ALLOWED_ID_RE.match(value)):\n                raise InvalidKeyError(self.__class__,\n                                      u\"Special characters not allowed in field {}: '{}'\".format(name, value))\n\n        if kwargs.get('deprecated', False):\n            raise InvalidKeyError(self.__class__, 'LibraryLocator cannot have deprecated=True')\n\n        super(LibraryLocator, self).__init__(\n            org=org,\n            library=library,\n            branch=branch,\n            version_guid=version_guid,\n            **kwargs\n        )\n\n        if self.version_guid is None and (self.org is None or self.library is None):  \n            raise InvalidKeyError(self.__class__, \"Either version_guid or org and library should be set\")", "docstring": "Construct a LibraryLocator\n\nArgs:\nversion_guid (string or ObjectId): optional unique id for the version\norg, library: the standard definition. Optional only if version_guid given.\nbranch (string): the optional branch such as 'draft', 'published', 'staged', 'beta'", "source": "juraj-google-style"}
{"code": "def flatten(vari):\n    if isinstance(vari, Poly):\n        shape = int(numpy.prod(vari.shape))\n        return reshape(vari, (shape,))\n    return numpy.array(vari).flatten()", "docstring": "Flatten a shapeable quantity.\n\nArgs:\nvari (chaospy.poly.base.Poly, numpy.ndarray):\nShapeable input quantity.\n\nReturns:\n(chaospy.poly.base.Poly, numpy.ndarray):\nSame type as ``vari`` with `len(Q.shape)==1`.\n\nExamples:\n>>> P = chaospy.reshape(chaospy.prange(4), (2,2))\n>>> print(P)\n[[1, q0], [q0^2, q0^3]]\n>>> print(chaospy.flatten(P))\n[1, q0, q0^2, q0^3]", "source": "codesearchnet"}
{"code": "def play_match(black_model, white_model, games, sgf_dir):\n    with utils.logged_timer('Loading weights'):\n        black_net = dual_net.DualNetwork(black_model)\n        white_net = dual_net.DualNetwork(white_model)\n    readouts = FLAGS.num_readouts\n    black = MCTSPlayer(black_net, two_player_mode=True)\n    white = MCTSPlayer(white_net, two_player_mode=True)\n    black_name = os.path.basename(black_net.save_file)\n    white_name = os.path.basename(white_net.save_file)\n    for i in range(games):\n        num_move = 0\n        for player in [black, white]:\n            player.initialize_game()\n            first_node = player.root.select_leaf()\n            (prob, val) = player.network.run(first_node.position)\n            first_node.incorporate_results(prob, val, first_node)\n        while True:\n            start = time.time()\n            active = (white if (num_move % 2) else black)\n            inactive = (black if (num_move % 2) else white)\n            current_readouts = active.root.N\n            while (active.root.N < (current_readouts + readouts)):\n                active.tree_search()\n            if (FLAGS.verbose >= 3):\n                print(active.root.position)\n            if active.should_resign():\n                active.set_result(((- 1) * active.root.position.to_play), was_resign=True)\n                inactive.set_result(active.root.position.to_play, was_resign=True)\n            if active.is_done():\n                fname = '{:d}-{:s}-vs-{:s}-{:d}.sgf'.format(int(time.time()), white_name, black_name, i)\n                active.set_result(active.root.position.result(), was_resign=False)\n                with gfile.GFile(os.path.join(sgf_dir, fname), 'w') as _file:\n                    sgfstr = sgf_wrapper.make_sgf(active.position.recent, active.result_string, black_name=black_name, white_name=white_name)\n                    _file.write(sgfstr)\n                print('Finished game', i, active.result_string)\n                break\n            move = active.pick_move()\n            active.play_move(move)\n            inactive.play_move(move)\n            dur = (time.time() - start)\n            num_move += 1\n            if ((FLAGS.verbose > 1) or ((FLAGS.verbose == 1) and ((num_move % 10) == 9))):\n                timeper = ((dur / readouts) * 100.0)\n                print(active.root.position)\n                print(('%d: %d readouts, %.3f s/100. (%.2f sec)' % (num_move, readouts, timeper, dur)))", "docstring": "Plays matches between two neural nets.\n\nArgs:\nblack_model: Path to the model for black player\nwhite_model: Path to the model for white player", "source": "codesearchnet"}
{"code": "def guess_leb_size(path):\n    f = open(path, 'rb')\n    f.seek(0, 2)\n    file_size = (f.tell() + 1)\n    f.seek(0)\n    block_size = None\n    for _ in range(0, file_size, FILE_CHUNK_SZ):\n        buf = f.read(FILE_CHUNK_SZ)\n        for m in re.finditer(UBIFS_NODE_MAGIC, buf):\n            start = m.start()\n            chdr = nodes.common_hdr(buf[start:(start + UBIFS_COMMON_HDR_SZ)])\n            if (chdr and (chdr.node_type == UBIFS_SB_NODE)):\n                sb_start = (start + UBIFS_COMMON_HDR_SZ)\n                sb_end = (sb_start + UBIFS_SB_NODE_SZ)\n                if (chdr.len != len(buf[sb_start:sb_end])):\n                    f.seek(sb_start)\n                    buf = f.read(UBIFS_SB_NODE_SZ)\n                else:\n                    buf = buf[sb_start:sb_end]\n                sbn = nodes.sb_node(buf)\n                block_size = sbn.leb_size\n                f.close()\n                return block_size\n    f.close()\n    return block_size", "docstring": "Get LEB size from superblock\n\nArguments:\nStr:path    -- Path to file.\n\nReturns:\nInt         -- LEB size.\n\nSearches file for superblock and retrieves leb size.", "source": "codesearchnet"}
{"code": "def on_deleted(self, event):\n    if (not self._event_error):\n        self.logger.info(u'Change detected from deletion of: %s', event.src_path)\n        self.compile_dependencies(event.src_path, include_self=False)", "docstring": "Called when a file or directory is deleted.\n\nTodo:\nMay be bugged with inspector and sass compiler since the does not\nexists anymore.\n\nArgs:\nevent: Watchdog event, ``watchdog.events.DirDeletedEvent`` or\n``watchdog.events.FileDeletedEvent``.", "source": "codesearchnet"}
{"code": "def _check_approval_wrapper(self, grr_object, grr_function, *args, **kwargs):\n    \n    approval_sent = False\n\n    while True:\n      try:\n        return grr_function(*args, **kwargs)\n      except grr_errors.AccessForbiddenError as exception:\n        print('No valid approval found: {0!s}'.format(exception))\n        \n        if approval_sent:\n          print('Approval not yet granted, waiting {0:d}s'.format(\n              self._CHECK_APPROVAL_INTERVAL_SEC))\n          time.sleep(self._CHECK_APPROVAL_INTERVAL_SEC)\n          continue\n\n        \n        if not self.approvers:\n          message = ('GRR needs approval but no approvers specified '\n                     '(hint: use --approvers)')\n          self.state.add_error(message, critical=True)\n          return None\n\n        \n        grr_object.CreateApproval(\n            reason=self.reason, notified_users=self.approvers)\n        approval_sent = True\n        print('{0!s}: approval request sent to: {1!s} (reason: {2:s})'.format(\n            grr_object, self.approvers, self.reason))", "docstring": "Wraps a call to GRR functions checking for approval.\n\nArgs:\ngrr_object: the GRR object to create the eventual approval on.\ngrr_function: The GRR function requiring approval.\n*args: Positional arguments that are to be passed to `grr_function`.\n**kwargs: Keyword arguments that are to be passed to `grr_function`.\n\nReturns:\nThe return value of the execution of grr_function(*args, **kwargs).", "source": "juraj-google-style"}
{"code": "def load(self, languages=[]):\n    duckling_load = self.clojure.var('duckling.core', 'load!')\n    clojure_hashmap = self.clojure.var('clojure.core', 'hash-map')\n    clojure_list = self.clojure.var('clojure.core', 'list')\n    if languages:\n        iso_languages = [Language.convert_to_iso(lang) for lang in languages]\n        duckling_load.invoke(clojure_hashmap.invoke(self.clojure.read(':languages'), clojure_list.invoke(*iso_languages)))\n    else:\n        duckling_load.invoke()\n    self._is_loaded = True", "docstring": "Loads the Duckling corpus.\n\nLanguages can be specified, defaults to all.\n\nArgs:\nlanguages: Optional parameter to specify languages,\ne.g. [Duckling.ENGLISH, Duckling.FRENCH] or supported ISO 639-1 Codes (e.g. [\"en\", \"fr\"])", "source": "codesearchnet"}
{"code": "def _delete_from_hdx(self, object_type, id_field_name):\n        \n        \n        if id_field_name not in self.data:\n            raise HDXError('No %s field (mandatory) in %s!' % (id_field_name, object_type))\n        self._save_to_hdx('delete', id_field_name)", "docstring": "Helper method to deletes a resource from HDX\n\nArgs:\nobject_type (str): Description of HDX object type (for messages)\nid_field_name (str): Name of field containing HDX object identifier\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def decorate(self, record):\n    color = 'gray'\n    if (record.levelno == logging.WARNING):\n        color = 'yellow'\n    if (record.levelno == logging.INFO):\n        color = 'green'\n    if (record.levelno == logging.DEBUG):\n        color = 'gray'\n    if (record.levelno >= logging.ERROR):\n        color = 'red'\n    notify = False\n    if (record.levelno >= logging.ERROR):\n        nofiy = True\n    payload = {'color': color, 'notify': notify, 'message_format': 'text'}\n    return payload", "docstring": "Build up HipChat specific values for log record\n\nArgs:\nrecord (:obj:`logging.record`): log message object\n\nReturns:\ndict: params for POST request", "source": "codesearchnet"}
{"code": "def get_compatible_generator_action(self, filename):\n        \n        \n        for action in self.__generator_actions:\n            if action.act_on_file(filename):\n                return action\n\n        return None", "docstring": "Return the **first** compatible :class:`GeneratorAction` for a given filename or ``None`` if none is found.\n\nArgs:\nfilename (str): The filename of the template to process.", "source": "juraj-google-style"}
{"code": "def set_column_sizes(self, values):\n        \n        self.style['grid-template-columns'] = ' '.join(map(lambda value: (str(value) if str(value).endswith('%') else str(value) + '%') , values))", "docstring": "Sets the size value for each column\n\nArgs:\nvalues (iterable of int or str): values are treated as percentage.", "source": "juraj-google-style"}
{"code": "def __init__(self, scopes, service_account_id=None, token_maker=None,\n               retry_params=None):\n    \n\n    if isinstance(scopes, basestring):\n      scopes = [scopes]\n    self.scopes = scopes\n    self.service_account_id = service_account_id\n    self.make_token_async = token_maker or _config.TOKEN_MAKER\n    if not retry_params:\n      retry_params = api_utils._get_default_retry_params()\n    self.retry_params = retry_params\n    self.user_agent = {'User-Agent': retry_params._user_agent}\n    self.expiration_headroom = random.randint(60, 240)", "docstring": "Constructor.\n\nArgs:\nscopes: A scope or a list of scopes.\nservice_account_id: Internal use only.\ntoken_maker: An asynchronous function of the form\n(scopes, service_account_id) -> (token, expires).\nretry_params: An instance of api_utils.RetryParams. If None, the\ndefault for current thread will be used.", "source": "juraj-google-style"}
{"code": "def get_key(key, data_structure):\n        \n        if key == '/':\n            return data_structure\n\n        path = key.split('/')\n        \n        path[0] or path.pop(0)\n        current_value = data_structure\n        while path:\n            current_key = path.pop(0)\n            try:\n                current_key = int(current_key)\n            except ValueError:\n                pass\n\n            try:\n                current_value = current_value[current_key]\n            except (KeyError, IndexError):\n                LOGGER.debug('failed to extract path {}'.format(key))\n                return None\n\n        return current_value", "docstring": "Helper method for extracting values from a nested data structure.\n\nArgs:\nkey (str): The path to the vales (a series of keys and indexes\nseparated by '/')\ndata_structure (dict or list): The data structure from which the\nvalue will be extracted.\n\nReturns:\nstr: The values associated with key", "source": "juraj-google-style"}
{"code": "def from_join(cls, join: Join) -> 'ConditionalJoin':\n        \n\n        return cls(\n            join.table_name,\n            join.parent_alias,\n            join.table_alias,\n            join.join_type,\n            join.join_field,\n            join.nullable\n        )", "docstring": "Creates a new :see:ConditionalJoin from the\nspecified :see:Join object.\n\nArguments:\njoin:\nThe :see:Join object to create the\n:see:ConditionalJoin object from.\n\nReturns:\nA :see:ConditionalJoin object created from\nthe :see:Join object.", "source": "juraj-google-style"}
{"code": "def set_all_pattern_variables(self, patternnumber, sp0, ti0, sp1, ti1, sp2, ti2, sp3, ti3, sp4, ti4, sp5, ti5, sp6, ti6, sp7, ti7, actual_step, additional_cycles, link_pattern):\n    _checkPatternNumber(patternnumber)\n    self.set_pattern_step_setpoint(patternnumber, 0, sp0)\n    self.set_pattern_step_setpoint(patternnumber, 1, sp1)\n    self.set_pattern_step_setpoint(patternnumber, 2, sp2)\n    self.set_pattern_step_setpoint(patternnumber, 3, sp3)\n    self.set_pattern_step_setpoint(patternnumber, 4, sp4)\n    self.set_pattern_step_setpoint(patternnumber, 5, sp5)\n    self.set_pattern_step_setpoint(patternnumber, 6, sp6)\n    self.set_pattern_step_setpoint(patternnumber, 7, sp7)\n    self.set_pattern_step_time(patternnumber, 0, ti0)\n    self.set_pattern_step_time(patternnumber, 1, ti1)\n    self.set_pattern_step_time(patternnumber, 2, ti2)\n    self.set_pattern_step_time(patternnumber, 3, ti3)\n    self.set_pattern_step_time(patternnumber, 4, ti4)\n    self.set_pattern_step_time(patternnumber, 5, ti5)\n    self.set_pattern_step_time(patternnumber, 6, ti6)\n    self.set_pattern_step_time(patternnumber, 7, ti7)\n    self.set_pattern_additional_cycles(patternnumber, additional_cycles)\n    self.set_pattern_link_topattern(patternnumber, link_pattern)\n    self.set_pattern_actual_step(patternnumber, actual_step)", "docstring": "Set all variables for a given pattern at one time.\n\nArgs:\n* patternnumber (integer): 0-7\n* sp[*n*] (float): setpoint value for step *n*\n* ti[*n*] (integer??): step time for step *n*, 0-900\n* actual_step (int): ?\n* additional_cycles(int): ?\n* link_pattern(int): ?", "source": "codesearchnet"}
{"code": "def inner_text(node):\n    from lxml import etree\n    parts = [node.text]\n    for child in node.getchildren():\n        parts.append(etree.tostring(child, encoding='utf-8', method='text'))\n        parts.append(child.tail)\n    return ''.join(map(decode_bytes, filter(None, parts)))", "docstring": "Returns the inner text of a given XML node, excluding tags.\n\nArgs:\nnode: (lxml.etree.Element): The node whose inner text is desired.\n\nReturns:\nstr: The inner text of the node.", "source": "codesearchnet"}
{"code": "def fts_contrast2(self, fs, ft_name, inv):\n    inv_fts = [self.fts(x) for x in inv if (set(fs) <= self.fts(x))]\n    for a in inv_fts:\n        for b in inv_fts:\n            if (a != b):\n                diff = (a ^ b)\n                if (len(diff) == 2):\n                    if all([(nm == ft_name) for (_, nm) in diff]):\n                        return True\n    return False", "docstring": "Return `True` if there is a segment in `inv` that contrasts in feature\n`ft_name`.\n\nArgs:\nfs (list): feature specifications used to filter `inv`.\nft_name (str): name of the feature where contrast must be present.\ninv (list): collection of segments represented as Unicode segments.\n\nReturns:\nbool: `True` if two segments in `inv` are identical in features except\nfor feature `ft_name`", "source": "codesearchnet"}
{"code": "def on_test_begin(self, logs=None):", "docstring": "Called at the beginning of evaluation or validation.\n\nSubclasses should override for any actions to run.\n\nArgs:\nlogs: Dict. Currently no data is passed to this argument for this\nmethod but that may change in the future.", "source": "github-repos"}
{"code": "def set_sig_figs(n=4):\n    u.default_format = (('.' + str(n)) + 'g')\n    pd.options.display.float_format = (('{:,.' + str(n)) + '}').format", "docstring": "Set the number of significant figures used to print Pint, Pandas, and\nNumPy quantities.\n\nArgs:\nn (int): Number of significant figures to display.", "source": "codesearchnet"}
{"code": "def derive_annotations(self, annotations):\n    cls = type(self)\n    return cls(self[0], self[1], self[2], self[3], annotations, self[5])", "docstring": "Derives a new event from this one setting the ``annotations`` attribute.\n\nArgs:\nannotations: (Sequence[Union[amazon.ion.symbols.SymbolToken, unicode]]):\nThe annotations associated with the derived event.\n\nReturns:\nIonEvent: The newly generated event.", "source": "codesearchnet"}
{"code": "def _ProduceContent(self, mods, showprivate=False, showinh=False):\n    result = ''\n    nestedresult = ''\n    for mod in mods:\n        try:\n            all = mod[1].__all__\n        except AttributeError:\n            raise RuntimeError(('Module (%s) MUST have `__all__` defined.' % mod[1].__name__))\n        if ((not showprivate) and (mod[0][0:1] == '_')):\n            continue\n        if (mod[0][0:2] == '__'):\n            continue\n        result += self._ProduceSingleContent(mod, showprivate, showinh)\n    return result", "docstring": "An internal helper to create pages for several modules that do not have nested modules.\nThis will automatically generate the needed RSF to document each module module\nand save the module to its own page appropriately.\n\nArgs:\nmods (module): The modules to document that do not contain nested modules\nshowprivate (bool): A flag for whether or not to display private members\n\nReturns:\nstr: The file names ready to be appended to a toctree", "source": "codesearchnet"}
{"code": "def _freeze_keras_model(self, output_dir):\n    try:\n        self._keras_model.save(output_dir, save_format='tf')\n    except Exception:\n        return None\n    tag_set = set([_tag_constants.SERVING])\n    signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY\n    graph_def, input_tensors, output_tensors, sess_graph = _freeze_saved_model(output_dir, None, None, None, tag_set, signature_key)\n    self.saved_model_dir = output_dir\n    self._saved_model_tags = tag_set\n    self._saved_model_exported_names = [signature_key]\n    self._parse_saved_model_args()\n    if self.saved_model_dir:\n        self._graph_def = graph_def\n        self._input_tensors = input_tensors\n        self._output_tensors = output_tensors\n        self._debug_info_func = _build_debug_info_func(sess_graph)", "docstring": "Save Keras model to Saved Model format.\n\nArgs:\noutput_dir: The output directory to save the SavedModel.", "source": "github-repos"}
{"code": "def build(self, client, nobuild=False, usecache=True, pull=False):\n    if (not nobuild):\n        self.update_source_images(client, usecache=usecache, pull=pull)\n    width = utils.get_console_width()\n    cprint(('\\n' + ('=' * width)), color='white', attrs=['bold'])\n    line = ('STARTING BUILD for \"%s\" (image definition \"%s\" from %s)\\n' % (self.targetname, self.imagename, self.steps[(- 1)].sourcefile))\n    cprint(_centered(line, width), color='blue', attrs=['bold'])\n    for (istep, step) in enumerate(self.steps):\n        print(colored('* Step', 'blue'), colored(('%d/%d' % ((istep + 1), len(self.steps))), 'blue', attrs=['bold']), colored('for image', color='blue'), colored(self.imagename, color='blue', attrs=['bold']))\n        if (not nobuild):\n            if step.bust_cache:\n                stackkey = self._get_stack_key(istep)\n                if (stackkey in _rebuilt):\n                    step.bust_cache = False\n            step.build(client, usecache=usecache)\n            print(colored('* Created intermediate image', 'green'), colored(step.buildname, 'green', attrs=['bold']), end='\\n\\n')\n            if step.bust_cache:\n                _rebuilt.add(stackkey)\n    finalimage = step.buildname\n    if (not nobuild):\n        self.finalizenames(client, finalimage)\n        line = ('FINISHED BUILDING \"%s\" (image definition \"%s\" from %s)' % (self.targetname, self.imagename, self.steps[(- 1)].sourcefile))\n        cprint(_centered(line, width), color='green', attrs=['bold'])\n        cprint(('=' * width), color='white', attrs=['bold'], end='\\n\\n')", "docstring": "Drives the build of the final image - get the list of steps and execute them.\n\nArgs:\nclient (docker.Client): docker client object that will build the image\nnobuild (bool): just create dockerfiles, don't actually build the image\nusecache (bool): use docker cache, or rebuild everything from scratch?\npull (bool): try to pull new versions of repository images?", "source": "codesearchnet"}
{"code": "def stop(self, wait=True):\n    assert (not self._stopped), 'Already stopped'\n    self._stopped = True\n    self._tornado.stop(wait)\n    self._http.stop()", "docstring": "Stop the Bokeh Server.\n\nThis stops and removes all Bokeh Server ``IOLoop`` callbacks, as well\nas stops the ``HTTPServer`` that this instance was configured with.\n\nArgs:\nfast (bool):\nWhether to wait for orderly cleanup (default: True)\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _CompositeMapByteStream(\n      self, byte_stream, byte_offset=0, context=None, **unused_kwargs):\n    \n    elements_data_size = None\n    elements_terminator = None\n    number_of_elements = None\n\n    if self._HasElementsDataSize():\n      elements_data_size = self._EvaluateElementsDataSize(context)\n\n      element_byte_size = self._element_data_type_definition.GetByteSize()\n      if element_byte_size is not None:\n        number_of_elements, _ = divmod(elements_data_size, element_byte_size)\n      else:\n        elements_terminator = (\n            self._element_data_type_definition.elements_terminator)\n\n    elif self._HasElementsTerminator():\n      elements_terminator = self._data_type_definition.elements_terminator\n\n    elif self._HasNumberOfElements():\n      number_of_elements = self._EvaluateNumberOfElements(context)\n\n    if elements_terminator is None and number_of_elements is None:\n      raise errors.MappingError(\n          'Unable to determine element terminator or number of elements')\n\n    context_state = getattr(context, 'state', {})\n\n    elements_data_offset = context_state.get('elements_data_offset', 0)\n    element_index = context_state.get('element_index', 0)\n    element_value = None\n    mapped_values = context_state.get('mapped_values', [])\n    size_hints = context_state.get('size_hints', {})\n    subcontext = context_state.get('context', None)\n\n    if not subcontext:\n      subcontext = DataTypeMapContext()\n\n    try:\n      while byte_stream[byte_offset:]:\n        if (number_of_elements is not None and\n            element_index == number_of_elements):\n          break\n\n        if (elements_data_size is not None and\n            elements_data_offset >= elements_data_size):\n          break\n\n        element_value = self._element_data_type_map.MapByteStream(\n            byte_stream, byte_offset=byte_offset, context=subcontext)\n\n        byte_offset += subcontext.byte_size\n        elements_data_offset += subcontext.byte_size\n        element_index += 1\n        mapped_values.append(element_value)\n\n        if (elements_terminator is not None and\n            element_value == elements_terminator):\n          break\n\n    except errors.ByteStreamTooSmallError as exception:\n      context_state['context'] = subcontext\n      context_state['elements_data_offset'] = elements_data_offset\n      context_state['element_index'] = element_index\n      context_state['mapped_values'] = mapped_values\n      raise errors.ByteStreamTooSmallError(exception)\n\n    except Exception as exception:\n      raise errors.MappingError(exception)\n\n    if number_of_elements is not None and element_index != number_of_elements:\n      context_state['context'] = subcontext\n      context_state['elements_data_offset'] = elements_data_offset\n      context_state['element_index'] = element_index\n      context_state['mapped_values'] = mapped_values\n\n      error_string = (\n          'Unable to read: {0:s} from byte stream at offset: {1:d} '\n          'with error: missing element: {2:d}').format(\n              self._data_type_definition.name, byte_offset, element_index - 1)\n      raise errors.ByteStreamTooSmallError(error_string)\n\n    if (elements_terminator is not None and\n        element_value != elements_terminator and (\n            elements_data_size is None or\n            elements_data_offset < elements_data_size)):\n      byte_stream_size = len(byte_stream)\n\n      size_hints[self._data_type_definition.name] = DataTypeMapSizeHint(\n          byte_stream_size - byte_offset)\n\n      context_state['context'] = subcontext\n      context_state['elements_data_offset'] = elements_data_offset\n      context_state['element_index'] = element_index\n      context_state['mapped_values'] = mapped_values\n      context_state['size_hints'] = size_hints\n\n      error_string = (\n          'Unable to read: {0:s} from byte stream at offset: {1:d} '\n          'with error: unable to find elements terminator').format(\n              self._data_type_definition.name, byte_offset)\n      raise errors.ByteStreamTooSmallError(error_string)\n\n    if context:\n      context.byte_size = elements_data_offset\n      context.state = {}\n\n    return tuple(mapped_values)", "docstring": "Maps a sequence of composite data types on a byte stream.\n\nArgs:\nbyte_stream (bytes): byte stream.\nbyte_offset (Optional[int]): offset into the byte stream where to start.\ncontext (Optional[DataTypeMapContext]): data type map context.\n\nReturns:\ntuple[object, ...]: mapped values.\n\nRaises:\nByteStreamTooSmallError: if the byte stream is too small.\nMappingError: if the data type definition cannot be mapped on\nthe byte stream.", "source": "juraj-google-style"}
{"code": "def with_rank_at_least(x, rank):\n    return type(x)(tf.TensorShape(x).with_rank_at_least(rank))", "docstring": "Returns a shape based on `x` with at least the given `rank`.\n\nFor more details, see `help(tf.TensorShape.with_rank_at_least)`.\n\nArgs:\nx: object representing a shape; convertible to `tf.TensorShape`.\nrank: An `int` representing the minimum rank of `x` or else an assertion is\nraised.\n\nReturns:\nshape: a shape having `type(x)` but guaranteed to have at least the given\nrank (or else an assertion was raised).\n\nRaises:\nValueError: If `x` does not represent a shape with at least the given\n`rank`.", "source": "codesearchnet"}
{"code": "def _GetISO8601String(self, structure):\n    \n    time_zone_offset = structure.time_zone_offset\n\n    try:\n      time_zone_offset_hours = int(time_zone_offset[1:3], 10)\n      time_zone_offset_minutes = int(time_zone_offset[3:5], 10)\n    except (IndexError, TypeError, ValueError) as exception:\n      raise ValueError(\n          'unable to parse time zone offset with error: {0!s}.'.format(\n              exception))\n\n    try:\n      iso8601 = (\n          '{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}.{6:03d}'\n          '{7:s}{8:02d}:{9:02d}').format(\n              structure.year, structure.month, structure.day,\n              structure.hours, structure.minutes, structure.seconds,\n              structure.microseconds, time_zone_offset[0],\n              time_zone_offset_hours, time_zone_offset_minutes)\n    except ValueError as exception:\n      raise ValueError(\n          'unable to format date time string with error: {0!s}.'.format(\n              exception))\n\n    return iso8601", "docstring": "Retrieves an ISO 8601 date time string from the structure.\n\nThe date and time values in Google Drive Sync log files are formatted as:\n\"2018-01-24 18:25:08,454 -0800\".\n\nArgs:\nstructure (pyparsing.ParseResults): structure of tokens derived from a\nline of a text file.\n\nReturns:\nstr: ISO 8601 date time string.\n\nRaises:\nValueError: if the structure cannot be converted into a date time string.", "source": "juraj-google-style"}
{"code": "def __init__(self, mackup, files, dry_run, verbose):\n        \n        assert isinstance(mackup, Mackup)\n        assert isinstance(files, set)\n\n        self.mackup = mackup\n        self.files = list(files)\n        self.dry_run = dry_run\n        self.verbose = verbose", "docstring": "Create an ApplicationProfile instance.\n\nArgs:\nmackup (Mackup)\nfiles (list)", "source": "juraj-google-style"}
{"code": "def _add_impact_severity(self, variant_obj):\n        \n        if variant_obj.most_severe_consequence:\n            variant_obj.impact_severity = IMPACT_SEVERITIES.get(\n                variant_obj.most_severe_consequence\n            )", "docstring": "Add the impact severity for the most severe consequence\n\nArgs:\nvariant_obj (puzzle.models.Variant)", "source": "juraj-google-style"}
{"code": "def _do_revoke(self, http, token):\n        \n        logger.info('Revoking token')\n        query_params = {'token': token}\n        token_revoke_uri = _helpers.update_query_params(\n            self.revoke_uri, query_params)\n        resp, content = transport.request(http, token_revoke_uri)\n        if resp.status == http_client.METHOD_NOT_ALLOWED:\n            body = urllib.parse.urlencode(query_params)\n            resp, content = transport.request(http, token_revoke_uri,\n                                              method='POST', body=body)\n        if resp.status == http_client.OK:\n            self.invalid = True\n        else:\n            error_msg = 'Invalid response {0}.'.format(resp.status)\n            try:\n                d = json.loads(_helpers._from_bytes(content))\n                if 'error' in d:\n                    error_msg = d['error']\n            except (TypeError, ValueError):\n                pass\n            raise TokenRevokeError(error_msg)\n\n        if self.store:\n            self.store.delete()", "docstring": "Revokes this credential and deletes the stored copy (if it exists).\n\nArgs:\nhttp: an object to be used to make HTTP requests.\ntoken: A string used as the token to be revoked. Can be either an\naccess_token or refresh_token.\n\nRaises:\nTokenRevokeError: If the revoke request does not return with a\n200 OK.", "source": "juraj-google-style"}
{"code": "def __init__(self, address, ap):\n        \n        super(ReadRequest, self).__init__(address=address, ap=ap)", "docstring": "Initializes the base class.\n\nArgs:\nself (ReadRequest): the ``ReadRequest`` instance\naddress (int): the register index\nap (bool): ``True`` if this request is to an Access Port Access\nRegister, otherwise ``False`` for a Debug Port Access Register\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def CreateAdGroup(client, campaign_id):\n  \n  ad_group_service = client.GetService('AdGroupService', 'v201809')\n\n  ad_group = {\n      'name': 'Dynamic remarketing ad group',\n      'campaignId': campaign_id,\n      'status': 'ENABLED'\n  }\n\n  operations = [{\n      'operator': 'ADD',\n      'operand': ad_group\n  }]\n\n  return ad_group_service.mutate(operations)['value'][0]", "docstring": "Creates a dynamic remarketing campaign.\n\nArgs:\nclient: an AdWordsClient instance.\ncampaign_id: an int campaign ID.\n\nReturns:\nThe ad group that was successfully created.", "source": "juraj-google-style"}
{"code": "def init_app(self, app, client_id=None):\n    if (not self.client_id):\n        if client_id:\n            self.client_id = client_id\n        else:\n            self.client_id = app.name", "docstring": "Initialize the Micropub extension if it was not given app\nin the constructor.\n\nArgs:\napp (flask.Flask): the flask application to extend.\nclient_id (string, optional): the IndieAuth client id, will be\ndisplayed when the user is asked to authorize this client. If not\nprovided, the app name will be used.", "source": "codesearchnet"}
{"code": "def Append(self, value=None, **kwarg):\n    \n    if self.rdf_type is not None:\n      if (isinstance(value, rdfvalue.RDFValue) and\n          value.__class__ != self.rdf_type):\n        raise ValueError(\"Can only accept %s\" % self.rdf_type)\n\n      try:\n        \n        value = self.rdf_type(value, **kwarg)  \n      except (TypeError, ValueError):\n        raise ValueError(\"Unable to initialize %s from type %s\" %\n                         (self.__class__.__name__, type(value)))\n\n    self.content.Append(DataBlob().SetValue(value))", "docstring": "Add another member to the array.\n\nArgs:\nvalue: The new data to append to the array.\n**kwarg:  Create a new element from these keywords.\n\nReturns:\nThe value which was added. This can be modified further by the caller and\nchanges will be propagated here.\n\nRaises:\nValueError: If the value to add is not allowed.", "source": "juraj-google-style"}
{"code": "def load(self, data_dir):\n        \n        \n        K.set_learning_phase(0)\n\n        \n        try:\n            latest_ckpt = max(glob.iglob(\n                os.path.join(data_dir, '*.h*5')), key=os.path.getctime)\n            latest_ckpt_name = os.path.basename(latest_ckpt)\n            latest_ckpt_time = str(\n                datetime.fromtimestamp(os.path.getmtime(latest_ckpt)))\n        except ValueError:\n            raise FileNotFoundError('No checkpoint (.hdf5 or .h5) files '\n                                    'available at {}'.format(data_dir))\n        try:\n            latest_json = max(glob.iglob(os.path.join(data_dir, '*.json')),\n                              key=os.path.getctime)\n            with open(latest_json, 'r') as f:\n                model_json = json.loads(f.read())\n                self._model = model_from_json(model_json)\n\n            self._model.load_weights(latest_ckpt)\n        except ValueError:\n            try:\n                self._model = load_model(latest_ckpt)\n            except ValueError:\n                raise FileNotFoundError('The (.hdf5 or .h5) files available at'\n                                        '{} don\\'t have the model'\n                                        ' architecture.'\n                                        .format(latest_ckpt))\n\n        self._sess = K.get_session()\n        self._tf_predict_var = self._model.outputs[0]\n        self._tf_input_var = self._model.inputs[0]\n        self._model_name = type(self).__name__\n        self._latest_ckpt_name = latest_ckpt_name\n        self._latest_ckpt_time = latest_ckpt_time", "docstring": "Load graph and weight data.\n\nArgs:\ndata_dir (:obj:`str`): location of Keras checkpoint (`.hdf5`) files\nand model (in `.json`) structure.  The default behavior\nis to take the latest of each, by OS timestamp.", "source": "juraj-google-style"}
{"code": "def close(self):\n    if not self.closed:\n        self._uploader.finish()\n    super().close()", "docstring": "Complete the upload and close this stream.\n\nThis method has no effect if the stream is already closed.\n\nRaises:\nAny error encountered by the uploader.", "source": "github-repos"}
{"code": "def _get_args(cls, args):\n        \n        \n        if isinstance(args, tuple):\n            raise TypeError(\n                \"{}[...] takes exactly one argument.\".format(cls.__name__)\n            )\n        return super(_StringMeta, cls)._get_args((_STR_TYPE, args))", "docstring": "Return the parameters necessary to check type boundaries.\n\nArgs:\nargs: A slice representing the minimum and maximum lengths allowed\nfor values of that string.\n\nReturns:\nA tuple with three parameters: a type, a slice, and the len\nfunction.", "source": "juraj-google-style"}
{"code": "def save_archive(archive):\n    _assert_obj_type(archive, obj_type=DBArchive)\n    _get_handler().store_object(archive)\n    return archive.to_comm(light_request=True)", "docstring": "Save `archive` into database and into proper indexes.\n\nAttr:\narchive (obj): Instance of the :class:`.DBArchive`.\n\nReturns:\nobj: :class:`.DBArchive` without data.\n\nRaises:\nInvalidType: When the `archive` is not instance of :class:`.DBArchive`.\nUnindexablePublication: When there is no index (property) which can be\nused to index `archive` in database.", "source": "codesearchnet"}
{"code": "def get_num_patches(self, image_height: int, image_width: int, patch_size: Optional[Dict[str, int]]=None) -> int:\n    patch_size = patch_size if patch_size is not None else self.patch_size\n    patch_height, patch_width = (self.patch_size['height'], self.patch_size['width'])\n    if image_height % patch_height != 0:\n        raise ValueError(f'image_height={image_height!r} must be divisible by {patch_height}')\n    if image_width % patch_width != 0:\n        raise ValueError(f'image_width={image_width!r} must be divisible by {patch_width}')\n    num_patches_per_dim_h = image_height \n    num_patches_per_dim_w = image_width \n    num_patches = num_patches_per_dim_h * num_patches_per_dim_w\n    return num_patches", "docstring": "Calculate number of patches required to encode an image.\n\nArgs:\nimage_height (`int`):\nHeight of the image.\nimage_width (`int`):\nWidth of the image.\npatch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`):\nDictionary in the format `{\"height\": int, \"width\": int}` specifying the size of the patches.", "source": "github-repos"}
{"code": "def _CreateStyleFromConfigParser(config):\n    section = 'yapf' if config.has_section('yapf') else 'style'\n    if config.has_option('style', 'based_on_style'):\n        based_on = config.get('style', 'based_on_style').lower()\n        base_style = _STYLE_NAME_TO_FACTORY[based_on]()\n    elif config.has_option('yapf', 'based_on_style'):\n        based_on = config.get('yapf', 'based_on_style').lower()\n        base_style = _STYLE_NAME_TO_FACTORY[based_on]()\n    else:\n        base_style = _GLOBAL_STYLE_FACTORY()\n    for option, value in config.items(section):\n        if option.lower() == 'based_on_style':\n            continue\n        option = option.upper()\n        if option not in _STYLE_OPTION_VALUE_CONVERTER:\n            raise StyleConfigError('Unknown style option \"{0}\"'.format(option))\n        try:\n            base_style[option] = _STYLE_OPTION_VALUE_CONVERTER[option](value)\n        except ValueError:\n            raise StyleConfigError(\"'{}' is not a valid setting for {}.\".format(value, option))\n    return base_style", "docstring": "Create a style dict from a configuration file.\n\nArguments:\nconfig: a ConfigParser object.\n\nReturns:\nA style dict.\n\nRaises:\nStyleConfigError: if an unknown style option was encountered.", "source": "github-repos"}
{"code": "def create_board(self, board_json):\n    return trolly.board.Board(trello_client=self, board_id=board_json['id'], name=board_json['name'], data=board_json)", "docstring": "Create Board object from a JSON object\n\nReturns:\nBoard: The board from the given `board_json`.", "source": "codesearchnet"}
{"code": "def _html_tree_view_content(self, *, view: 'HtmlTreeView', name: Optional[str]=None, parent: Any=None, root_path: Optional[KeyPath]=None, **kwargs) -> Html:\n    return view.content(self, name=name, parent=parent, root_path=root_path, **kwargs)", "docstring": "Returns the main content for the object.\n\nArgs:\nview: The view to render the object.\nname: The name of the object.\nparent: The parent of the object.\nroot_path: The key path of the object relative to the root.\n**kwargs: kwargs to pass to the view. See `_html_tree_view_config` for\nthe builtin arguments.\n\nReturns:\nThe rendered HTML as the main content of the object.", "source": "github-repos"}
{"code": "def ensure_valid_input(model, tokens, input_names):\n    print('Ensuring inputs are in correct order')\n    model_args_name = model.forward.__code__.co_varnames\n    model_args, ordered_input_names = ([], [])\n    for arg_name in model_args_name[1:]:\n        if arg_name in input_names:\n            ordered_input_names.append(arg_name)\n            model_args.append(tokens[arg_name])\n        else:\n            print(f'{arg_name} is not present in the generated input list.')\n            break\n    print(f'Generated inputs order: {ordered_input_names}')\n    return (ordered_input_names, tuple(model_args))", "docstring": "Ensure inputs are presented in the correct order, without any Non\n\nArgs:\nmodel: The model used to forward the input data\ntokens: BatchEncoding holding the input data\ninput_names: The name of the inputs\n\nReturns: Tuple", "source": "github-repos"}
{"code": "def _get_kernel_arguments(self):\n    declarations = []\n    for (name, data) in self._kernel_data.items():\n        declarations.extend(data.get_kernel_parameters(('_' + name)))\n    return declarations", "docstring": "Get the list of kernel arguments for loading the kernel data elements into the kernel.\n\nThis will use the sorted keys for looping through the kernel input items.\n\nReturns:\nlist of str: the list of parameter definitions", "source": "codesearchnet"}
{"code": "def Open(self, file_object):\n    \n    file_object.seek(0, os.SEEK_SET)\n    signature_data = file_object.read(6)\n\n    self.file_format = None\n    if len(signature_data) > 2:\n      if signature_data[:2] == self._CPIO_SIGNATURE_BINARY_BIG_ENDIAN:\n        self.file_format = 'bin-big-endian'\n      elif signature_data[:2] == self._CPIO_SIGNATURE_BINARY_LITTLE_ENDIAN:\n        self.file_format = 'bin-little-endian'\n      elif signature_data == self._CPIO_SIGNATURE_PORTABLE_ASCII:\n        self.file_format = 'odc'\n      elif signature_data == self._CPIO_SIGNATURE_NEW_ASCII:\n        self.file_format = 'newc'\n      elif signature_data == self._CPIO_SIGNATURE_NEW_ASCII_WITH_CHECKSUM:\n        self.file_format = 'crc'\n\n    if self.file_format is None:\n      raise IOError('Unsupported CPIO format.')\n\n    self._file_object = file_object\n    self._file_size = file_object.get_size()\n\n    self._ReadFileEntries(self._file_object)", "docstring": "Opens the CPIO archive file.\n\nArgs:\nfile_object (FileIO): a file-like object.\n\nRaises:\nIOError: if the file format signature is not supported.\nOSError: if the file format signature is not supported.", "source": "juraj-google-style"}
{"code": "def _GetUserTypeAndPassword(username, password=None, is_admin=False):\n    if is_admin:\n        user_type = api_user.ApiGrrUser.UserType.USER_TYPE_ADMIN\n    else:\n        user_type = api_user.ApiGrrUser.UserType.USER_TYPE_STANDARD\n    if (password is None):\n        password = getpass.getpass(prompt=(\"Please enter password for user '%s':\" % username))\n    return (user_type, password)", "docstring": "Returns the user-type and password for a user.\n\nArgs:\nusername: Username for the user.\npassword: Password for the user. If None, or not provided, we will prompt\nfor one via the terminal.\nis_admin: Indicates whether the user should have admin privileges.", "source": "codesearchnet"}
{"code": "def _get_cuda_compute_capabilities_or_die() -> list[str]:\n    try:\n        nvidia_smi = _find_executable_or_die('nvidia-smi')\n        nvidia_smi_proc = subprocess.run([nvidia_smi, '--query-gpu=compute_cap', '--format=csv,noheader'], capture_output=True, check=True, text=True)\n        capabilities = sorted(set(nvidia_smi_proc.stdout.strip().split('\\n')))\n        logging.info('Found CUDA compute capabilities: %s', capabilities)\n        return capabilities\n    except (RuntimeError, subprocess.CalledProcessError) as e:\n        logging.info('Could not find nvidia-smi, or nvidia-smi command failed. Please pass capabilities directly using --cuda_compute_capabilities.')\n        raise e", "docstring": "Finds compute capabilities via nvidia-smi or rasies exception.\n\nReturns:\nlist of unique, sorted strings representing compute capabilities:\nRaises:\nRuntimeError: if path to nvidia-smi couldn't be found.\nsubprocess.CalledProcessError: if nvidia-smi process failed.", "source": "github-repos"}
{"code": "def relative_probability_from_lookup_table(self, jump_lookup_table):\n    l1 = self.initial_site.label\n    l2 = self.final_site.label\n    c1 = self.initial_site.nn_occupation()\n    c2 = self.final_site.nn_occupation()\n    return jump_lookup_table.jump_probability[l1][l2][c1][c2]", "docstring": "Relative probability of accepting this jump from a lookup-table.\n\nArgs:\njump_lookup_table (LookupTable): the lookup table to be used for this jump.\n\nReturns:\n(Float): relative probability of accepting this jump.", "source": "codesearchnet"}
{"code": "def copy_file(source, destination, unique=False, sort=False, case_sensitive=True, create_path=False):\n    _File.copy(source, destination, unique, sort, case_sensitive, create_path)", "docstring": "Python utility to create file\n\nArgs:\nsource: absolute/relative path of source file\ndestination: absolute/relative path of destination file.\nUse same as source for replacing the content of existing file.\nunique: Copy only unique lines from file\nsort: Sort the content of file\ncase_sensitive: unique/sort operations to be performed case-sensitive string\ncreate_path: Recursively create the path to destination directory in case not found\n\nReturns: None", "source": "codesearchnet"}
{"code": "def fashion_mnist_generator(tmp_dir, training, how_many, start_from=0):\n  \n  _get_fashion_mnist(tmp_dir)\n  d = _FASHION_MNIST_LOCAL_FILE_PREFIX + (\n      _MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME)\n  l = _FASHION_MNIST_LOCAL_FILE_PREFIX + (\n      _MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME)\n  return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from)", "docstring": "Image generator for FashionMNIST.\n\nArgs:\ntmp_dir: path to temporary storage directory.\ntraining: a Boolean; if true, we use the train set, otherwise the test set.\nhow_many: how many images and labels to generate.\nstart_from: from which image to start.\n\nReturns:\nAn instance of image_generator that produces MNIST images.", "source": "juraj-google-style"}
{"code": "def replace(self, **kw):\n    \n    if \"tzinfo\" in kw:\n      if kw[\"tzinfo\"] is None:\n        raise TypeError(\"Can not remove the timezone use asdatetime()\")\n      else:\n        tzinfo = kw[\"tzinfo\"]\n        del kw[\"tzinfo\"]\n    else:\n      tzinfo = None\n\n    is_dst = None\n    if \"is_dst\" in kw:\n      is_dst = kw[\"is_dst\"]\n      del kw[\"is_dst\"]\n    else:\n      \n      is_dst = self.is_dst\n\n    replaced = self.asdatetime().replace(**kw)\n\n    return type(self)(\n        replaced, tzinfo=tzinfo or self.tzinfo.zone, is_dst=is_dst)", "docstring": "Return datetime with new specified fields given as arguments.\n\nFor example, dt.replace(days=4) would return a new datetime_tz object with\nexactly the same as dt but with the days attribute equal to 4.\n\nAny attribute can be replaced, but tzinfo can not be set to None.\n\nArgs:\nAny datetime_tz attribute.\n\nReturns:\nA datetime_tz object with the attributes replaced.\n\nRaises:\nTypeError: If the given replacement is invalid.", "source": "juraj-google-style"}
{"code": "def _get_index(self, data: _instance_base.Instance | ConcreteValue) -> int | None:\n    if isinstance(data, ConcreteValue):\n        return self.ctx.convert.value_to_constant(data, (int, type(None)))\n    elif isinstance(data, _instance_base.Instance):\n        if data.cls != self.ctx.convert.int_type:\n            raise abstract_utils.ConversionError()\n        else:\n            return None\n    else:\n        raise abstract_utils.ConversionError()", "docstring": "Helper function for getslice_slot that extracts int or None from data.\n\nIf data is an Instance of int, None is returned.\n\nArgs:\ndata: The object to extract from. Usually a ConcreteValue or an Instance.\n\nReturns:\nThe value (an int or None) of the index.\n\nRaises:\nabstract_utils.ConversionError: If the data could not be converted.", "source": "github-repos"}
{"code": "def register(self, name):\n        \n        if name not in settings.CODEMIRROR_SETTINGS:\n            msg = (\"Given config name '{}' does not exists in \"\n                   \"'settings.CODEMIRROR_SETTINGS'.\")\n            raise UnknowConfigError(msg.format(name))\n\n        parameters = copy.deepcopy(self.default_internal_config)\n        parameters.update(copy.deepcopy(\n            settings.CODEMIRROR_SETTINGS[name]\n        ))\n\n        \n        if 'css_bundle_name' not in parameters:\n            css_template_name = settings.CODEMIRROR_BUNDLE_CSS_NAME\n            parameters['css_bundle_name'] = css_template_name.format(\n                settings_name=name\n            )\n        if 'js_bundle_name' not in parameters:\n            js_template_name = settings.CODEMIRROR_BUNDLE_JS_NAME\n            parameters['js_bundle_name'] = js_template_name.format(\n                settings_name=name\n            )\n\n        self.registry[name] = parameters\n\n        return parameters", "docstring": "Register configuration for an editor instance.\n\nArguments:\nname (string): Config name from available ones in\n``settings.CODEMIRROR_SETTINGS``.\n\nRaises:\nUnknowConfigError: If given config name does not exist in\n``settings.CODEMIRROR_SETTINGS``.\n\nReturns:\ndict: Registred config dict.", "source": "juraj-google-style"}
{"code": "def __init__(self, input_reader=None, output_writer=None):\n    \n    super(PinfoTool, self).__init__(\n        input_reader=input_reader, output_writer=output_writer)\n    self._compare_storage_file_path = None\n    self._output_filename = None\n    self._output_format = None\n    self._process_memory_limit = None\n    self._storage_file_path = None\n\n    self._verbose = False\n    self.compare_storage_information = False", "docstring": "Initializes the CLI tool object.\n\nArgs:\ninput_reader (Optional[InputReader]): input reader, where None indicates\nthat the stdin input reader should be used.\noutput_writer (Optional[OutputWriter]): output writer, where None\nindicates that the stdout output writer should be used.", "source": "juraj-google-style"}
{"code": "def secondary_training_status_message(job_description, prev_description):\n    if ((job_description is None) or (job_description.get('SecondaryStatusTransitions') is None) or (len(job_description.get('SecondaryStatusTransitions')) == 0)):\n        return ''\n    prev_description_secondary_transitions = (prev_description.get('SecondaryStatusTransitions') if (prev_description is not None) else None)\n    prev_transitions_num = (len(prev_description['SecondaryStatusTransitions']) if (prev_description_secondary_transitions is not None) else 0)\n    current_transitions = job_description['SecondaryStatusTransitions']\n    if (len(current_transitions) == prev_transitions_num):\n        transitions_to_print = current_transitions[(- 1):]\n    else:\n        transitions_to_print = current_transitions[(prev_transitions_num - len(current_transitions)):]\n    status_strs = []\n    for transition in transitions_to_print:\n        message = transition['StatusMessage']\n        time_str = datetime.utcfromtimestamp(time.mktime(job_description['LastModifiedTime'].timetuple())).strftime('%Y-%m-%d %H:%M:%S')\n        status_strs.append('{} {} - {}'.format(time_str, transition['Status'], message))\n    return '\\n'.join(status_strs)", "docstring": "Returns a string contains last modified time and the secondary training job status message.\n\nArgs:\njob_description: Returned response from DescribeTrainingJob call\nprev_description: Previous job description from DescribeTrainingJob call\n\nReturns:\nstr: Job status string to be printed.", "source": "codesearchnet"}
{"code": "def initialize_references_json(references_json, references, setter=None):\n    for obj in references_json:\n        obj_id = obj['id']\n        obj_attrs = obj['attributes']\n        instance = references[obj_id]\n        HasProps.__init__(instance)\n        instance.update_from_json(obj_attrs, models=references, setter=setter)", "docstring": "Given a JSON representation of the models in a graph, and new model\nobjects, set the properties on the models from the JSON\n\nArgs:\nreferences_json (``JSON``)\nJSON specifying attributes and values to initialize new model\nobjects with.\n\nreferences (dict[str, Model])\nA dictionary mapping model IDs to newly created (but not yet\ninitialized) Bokeh models.\n\n**This is an \"out\" parameter**. The values it contains will be\nmodified in-place.\n\nsetter (ClientSession or ServerSession or None, optional) :\nThis is used to prevent \"boomerang\" updates to Bokeh apps.\n(default: None)\n\nIn the context of a Bokeh server application, incoming updates\nto properties will be annotated with the session that is\ndoing the updating. This value is propagated through any\nsubsequent change notifications that the update triggers.\nThe session can compare the event setter to itself, and\nsuppress any updates that originate from itself.", "source": "codesearchnet"}
{"code": "def reduce_per_replica(values, strategy, reduction='first'):\n\n    def _reduce(v):\n        \n        if reduction == 'concat' and _collective_all_reduce_multi_worker(strategy):\n            return _multi_worker_concat(v, strategy)\n        if not _is_per_replica_instance(v):\n            return v\n        elif reduction == 'first':\n            return strategy.unwrap(v)[0]\n        elif reduction == 'concat':\n            if _is_tpu_multi_host(strategy):\n                return _tpu_multi_host_concat(v, strategy)\n            else:\n                return concat(strategy.unwrap(v))\n        else:\n            raise ValueError('`reduction` must be \"first\" or \"concat\".')\n    return nest.map_structure(_reduce, values)", "docstring": "Reduce PerReplica objects.\n\nArgs:\nvalues: Structure of `PerReplica` objects or `Tensor`s. `Tensor`s are\nreturned as-is.\nstrategy: `tf.distribute.Strategy` object.\nreduction: One of 'first', 'concat'.\n\nReturns:\nStructure of `Tensor`s.", "source": "github-repos"}
{"code": "def replace_vars(config, env):\n    if isinstance(config, dict):\n        for (k, v) in list(config.items()):\n            if (isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple)):\n                replace_vars(v, env)\n            elif isinstance(v, basestring):\n                config[k] = expand_var(v, env)\n    elif isinstance(config, list):\n        for (i, v) in enumerate(config):\n            if (isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple)):\n                replace_vars(v, env)\n            elif isinstance(v, basestring):\n                config[i] = expand_var(v, env)\n    elif isinstance(config, tuple):\n        for v in config:\n            if (isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple)):\n                replace_vars(v, env)", "docstring": "Replace variable references in config using the supplied env dictionary.\n\nArgs:\nconfig: the config to parse. Can be a tuple, list or dict.\nenv: user supplied dictionary.\n\nRaises:\nException if any variable references are not found in env.", "source": "codesearchnet"}
{"code": "def get_interpolated_gap(self, tol=0.001, abs_tol=False, spin=None):\n    tdos = (self.y if (len(self.ydim) == 1) else np.sum(self.y, axis=1))\n    if (not abs_tol):\n        tol = ((tol * tdos.sum()) / tdos.shape[0])\n    energies = self.x\n    below_fermi = [i for i in range(len(energies)) if ((energies[i] < self.efermi) and (tdos[i] > tol))]\n    above_fermi = [i for i in range(len(energies)) if ((energies[i] > self.efermi) and (tdos[i] > tol))]\n    vbm_start = max(below_fermi)\n    cbm_start = min(above_fermi)\n    if (vbm_start == cbm_start):\n        return (0.0, self.efermi, self.efermi)\n    else:\n        terminal_dens = tdos[vbm_start:(vbm_start + 2)][::(- 1)]\n        terminal_energies = energies[vbm_start:(vbm_start + 2)][::(- 1)]\n        start = get_linear_interpolated_value(terminal_dens, terminal_energies, tol)\n        terminal_dens = tdos[(cbm_start - 1):(cbm_start + 1)]\n        terminal_energies = energies[(cbm_start - 1):(cbm_start + 1)]\n        end = get_linear_interpolated_value(terminal_dens, terminal_energies, tol)\n        return ((end - start), end, start)", "docstring": "Expects a DOS object and finds the gap\n\nArgs:\ntol: tolerance in occupations for determining the gap\nabs_tol: Set to True for an absolute tolerance and False for a\nrelative one.\nspin: Possible values are None - finds the gap in the summed\ndensities, Up - finds the gap in the up spin channel,\nDown - finds the gap in the down spin channel.\n\nReturns:\n(gap, cbm, vbm):\nTuple of floats in eV corresponding to the gap, cbm and vbm.", "source": "codesearchnet"}
{"code": "def consume(self, callback, bindings=None, queues=None, exchanges=None):\n    self._bindings = (bindings or config.conf['bindings'])\n    self._queues = (queues or config.conf['queues'])\n    self._exchanges = (exchanges or config.conf['exchanges'])\n    if inspect.isclass(callback):\n        cb_obj = callback()\n        if (not callable(cb_obj)):\n            raise ValueError('Callback must be a class that implements __call__ or a function.')\n        self._consumer_callback = cb_obj\n    elif callable(callback):\n        self._consumer_callback = callback\n    else:\n        raise ValueError('Callback must be a class that implements __call__ or a function.')\n    self._running = True\n    self.connect()\n    self._connection.ioloop.start()", "docstring": "Consume messages from a message queue.\n\nSimply define a callable to be used as the callback when messages are\ndelivered and specify the queue bindings. This call blocks. The callback\nsignature should accept a single positional argument which is an\ninstance of a :class:`Message` (or a sub-class of it).\n\nArgs:\ncallback (callable): The callable to pass the message to when one\narrives.\nbindings (list of dict): A list of dictionaries describing bindings\nfor queues. Refer to the :ref:`conf-bindings` configuration\ndocumentation for the format.\nqueues (dict): A dictionary of queues to ensure exist. Refer to the\n:ref:`conf-queues` configuration documentation for the format.\nexchanges (dict): A dictionary of exchanges to ensure exist. Refer\nto the :ref:`conf-exchanges` configuration documentation for the\nformat.\n\nRaises:\nHaltConsumer: Raised when the consumer halts.\nValueError: If the callback isn't a callable object or a class with\n__call__ defined.", "source": "codesearchnet"}
{"code": "def put(self, entity):\n    \n    actual_entity = _normalize_entity(entity)\n    if actual_entity is None:\n      return self.ndb_put(entity)\n    self.puts.append(actual_entity)", "docstring": "Registers entity to put to datastore.\n\nArgs:\nentity: an entity or model instance to put.", "source": "juraj-google-style"}
{"code": "def Runs(self):\n    with self._accumulators_mutex:\n        items = list(six.iteritems(self._accumulators))\n    return {run_name: accumulator.Tags() for (run_name, accumulator) in items}", "docstring": "Return all the run names in the `EventMultiplexer`.\n\nReturns:\n```\n{runName: { scalarValues: [tagA, tagB, tagC],\ngraph: true, meta_graph: true}}\n```", "source": "codesearchnet"}
{"code": "def Decode(data, encoding=None):\n    encoding = encoding or GetConsoleAttr().GetEncoding()\n    return encoding_util.Decode(data, encoding=encoding)", "docstring": "Converts the given string, bytes, or object to a text string.\n\nArgs:\ndata: Any bytes, string, or object that has str() or unicode() methods.\nencoding: A suggesting encoding used to decode. If this encoding doesn't\nwork, other defaults are tried. Defaults to\nGetConsoleAttr().GetEncoding().\n\nReturns:\nA text string representation of the data.", "source": "github-repos"}
{"code": "def __init__(self, location, resource_pool):\n        \n        super(MemoryPackageRepository, self).__init__(location, resource_pool)\n        self.data = {}\n        self.register_resource(MemoryPackageFamilyResource)\n        self.register_resource(MemoryPackageResource)\n        self.register_resource(MemoryVariantResource)", "docstring": "Create an in-memory package repository.\n\nArgs:\nlocation (str): Path containing the package repository.", "source": "juraj-google-style"}
{"code": "def log_histogram(self, name, value, step=None):\n        \n        if isinstance(value, six.string_types):\n            raise TypeError('\"value\" should be a number, got {}'\n                            .format(type(value)))\n\n        self._check_step(step)\n        tf_name = self._ensure_tf_name(name)\n\n        summary = self._histogram_summary(tf_name, value, step=step)\n        self._log_summary(tf_name, summary, value, step=step)", "docstring": "Log a histogram for given name on given step.\n\nArgs:\nname (str): name of the variable (it will be converted to a valid\ntensorflow summary name).\nvalue (tuple or list): either list of numbers\nto be summarized as a histogram, or a tuple of bin_edges and\nbincounts that directly define a histogram.\nstep (int): non-negative integer used for visualization", "source": "juraj-google-style"}
{"code": "def log_combinations(n, counts, name='log_combinations'):\n    with ops.name_scope(name, values=[n, counts]):\n        n = ops.convert_to_tensor(n, name='n')\n        counts = ops.convert_to_tensor(counts, name='counts')\n        total_permutations = math_ops.lgamma(n + 1)\n        counts_factorial = math_ops.lgamma(counts + 1)\n        redundant_permutations = math_ops.reduce_sum(counts_factorial, axis=[-1])\n        return total_permutations - redundant_permutations", "docstring": "Multinomial coefficient.\n\nGiven `n` and `counts`, where `counts` has last dimension `k`, we compute\nthe multinomial coefficient as:\n\n```n! / sum_i n_i!```\n\nwhere `i` runs over all `k` classes.\n\nArgs:\nn: Floating-point `Tensor` broadcastable with `counts`. This represents `n`\noutcomes.\ncounts: Floating-point `Tensor` broadcastable with `n`. This represents\ncounts in `k` classes, where `k` is the last dimension of the tensor.\nname: A name for this operation (optional).\n\nReturns:\n`Tensor` representing the multinomial coefficient between `n` and `counts`.", "source": "github-repos"}
{"code": "def check_tx(self, raw_transaction):\n        \n\n        self.abort_if_abci_chain_is_not_synced()\n\n        logger.debug('check_tx: %s', raw_transaction)\n        transaction = decode_transaction(raw_transaction)\n        if self.bigchaindb.is_valid_transaction(transaction):\n            logger.debug('check_tx: VALID')\n            return ResponseCheckTx(code=CodeTypeOk)\n        else:\n            logger.debug('check_tx: INVALID')\n            return ResponseCheckTx(code=CodeTypeError)", "docstring": "Validate the transaction before entry into\nthe mempool.\n\nArgs:\nraw_tx: a raw string (in bytes) transaction.", "source": "juraj-google-style"}
{"code": "def read_probes(self, key):\n        \n        assert key in list(self._PROBES.keys())\n\n        if key == 'output':\n            value = self._output\n\n        return value", "docstring": "requestes value from the instrument and returns it\nArgs:\nkey: name of requested value\n\nReturns: reads values from instrument", "source": "juraj-google-style"}
{"code": "def _group_similar(items: List[T],\n                   comparer: Callable[[T, T], bool]) -> List[List[T]]:\n    \n    groups = []  \n    used = set()  \n    for i in range(len(items)):\n        if i not in used:\n            group = [items[i]]\n            for j in range(i + 1, len(items)):\n                if j not in used and comparer(items[i], items[j]):\n                    used.add(j)\n                    group.append(items[j])\n            groups.append(group)\n    return groups", "docstring": "Combines similar items into groups.\n\nArgs:\nitems: The list of items to group.\ncomparer: Determines if two items are similar.\n\nReturns:\nA list of groups of items.", "source": "juraj-google-style"}
{"code": "def _MergeTaskStorage(self, storage_writer):\n    \n    if self._processing_profiler:\n      self._processing_profiler.StartTiming('merge_check')\n\n    for task_identifier in storage_writer.GetProcessedTaskIdentifiers():\n      try:\n        task = self._task_manager.GetProcessedTaskByIdentifier(task_identifier)\n\n        self._task_manager.SampleTaskStatus(task, 'processed')\n\n        to_merge = self._task_manager.CheckTaskToMerge(task)\n        if not to_merge:\n          storage_writer.RemoveProcessedTaskStorage(task)\n\n          self._task_manager.RemoveTask(task)\n          self._task_manager.SampleTaskStatus(task, 'removed_processed')\n\n        else:\n          storage_writer.PrepareMergeTaskStorage(task)\n          self._task_manager.UpdateTaskAsPendingMerge(task)\n\n      except KeyError:\n        logger.error(\n            'Unable to retrieve task: {0:s} to prepare it to be merged.'.format(\n                task_identifier))\n        continue\n\n    if self._processing_profiler:\n      self._processing_profiler.StopTiming('merge_check')\n\n    task = None\n    if not self._storage_merge_reader_on_hold:\n      task = self._task_manager.GetTaskPendingMerge(self._merge_task)\n\n    \n    \n    if task or self._storage_merge_reader:\n      self._status = definitions.STATUS_INDICATOR_MERGING\n\n      if self._processing_profiler:\n        self._processing_profiler.StartTiming('merge')\n\n      if task:\n        if self._storage_merge_reader:\n          self._merge_task_on_hold = self._merge_task\n          self._storage_merge_reader_on_hold = self._storage_merge_reader\n\n          self._task_manager.SampleTaskStatus(\n              self._merge_task_on_hold, 'merge_on_hold')\n\n        self._merge_task = task\n        try:\n          self._storage_merge_reader = storage_writer.StartMergeTaskStorage(\n              task)\n\n          self._task_manager.SampleTaskStatus(task, 'merge_started')\n\n        except IOError as exception:\n          logger.error((\n              'Unable to merge results of task: {0:s} '\n              'with error: {1!s}').format(task.identifier, exception))\n          self._storage_merge_reader = None\n\n      if self._storage_merge_reader:\n        fully_merged = self._storage_merge_reader.MergeAttributeContainers(\n            maximum_number_of_containers=self._MAXIMUM_NUMBER_OF_CONTAINERS)\n      else:\n        \n        \n        \n        fully_merged = True\n\n      if self._processing_profiler:\n        self._processing_profiler.StopTiming('merge')\n\n      if fully_merged:\n        try:\n          self._task_manager.CompleteTask(self._merge_task)\n\n        except KeyError as exception:\n          logger.error(\n              'Unable to complete task: {0:s} with error: {1!s}'.format(\n                  self._merge_task.identifier, exception))\n\n        if not self._storage_merge_reader_on_hold:\n          self._merge_task = None\n          self._storage_merge_reader = None\n        else:\n          self._merge_task = self._merge_task_on_hold\n          self._storage_merge_reader = self._storage_merge_reader_on_hold\n\n          self._merge_task_on_hold = None\n          self._storage_merge_reader_on_hold = None\n\n          self._task_manager.SampleTaskStatus(\n              self._merge_task, 'merge_resumed')\n\n      self._status = definitions.STATUS_INDICATOR_RUNNING\n      self._number_of_produced_events = storage_writer.number_of_events\n      self._number_of_produced_sources = storage_writer.number_of_event_sources\n      self._number_of_produced_warnings = storage_writer.number_of_warnings", "docstring": "Merges a task storage with the session storage.\n\nThis function checks all task stores that are ready to merge and updates\nthe scheduled tasks. Note that to prevent this function holding up\nthe task scheduling loop only the first available task storage is merged.\n\nArgs:\nstorage_writer (StorageWriter): storage writer for a session storage used\nto merge task storage.", "source": "juraj-google-style"}
{"code": "def find_library_linux(cls):\n    dll = Library.JLINK_SDK_NAME\n    root = os.path.join('/', 'opt', 'SEGGER')\n    for (directory_name, subdirs, files) in os.walk(root):\n        fnames = []\n        x86_found = False\n        for f in files:\n            path = os.path.join(directory_name, f)\n            if (os.path.isfile(path) and f.startswith(dll)):\n                fnames.append(f)\n                if ('_x86' in path):\n                    x86_found = True\n        for fname in fnames:\n            fpath = os.path.join(directory_name, fname)\n            if util.is_os_64bit():\n                if ('_x86' not in fname):\n                    (yield fpath)\n            elif x86_found:\n                if ('_x86' in fname):\n                    (yield fpath)\n            else:\n                (yield fpath)", "docstring": "Loads the SEGGER DLL from the root directory.\n\nOn Linux, the SEGGER tools are installed under the ``/opt/SEGGER``\ndirectory with versioned directories having the suffix ``_VERSION``.\n\nArgs:\ncls (Library): the ``Library`` class\n\nReturns:\nThe paths to the J-Link library files in the order that they are\nfound.", "source": "codesearchnet"}
{"code": "def _merge_choice_field(self, json_value: Any, choice_field: descriptor.FieldDescriptor, field_name: str, parent: message.Message) -> None:\n    choice_field_name = _get_choice_field_name(choice_field, field_name)\n    choice_field_map = _get_field_map(choice_field.message_type)\n    choice_value_field = choice_field_map.get(choice_field_name)\n    if choice_value_field is None:\n        raise ValueError(f'Cannot find {choice_field_name!r} on {choice_field.full_name}')\n    choice_message = proto_utils.set_in_parent_or_add(parent, choice_field)\n    self._merge_field(json_value, choice_value_field, choice_message)", "docstring": "Creates a Message based on the choice_field Descriptor and json_value.\n\nThe resulting message is merged into parent.\n\nArgs:\njson_value: The JSON value to merge into a message of the type described\nby choice_field.\nchoice_field: The field descriptor of the FHIR choice type on parent.\nfield_name: The nested field name of the choice type, e.g.: _valueBoolean.\nparent: The parent Message to merge into.", "source": "github-repos"}
{"code": "def get_metrics_collector(self, prefix: str=''):\n    metrics_namespace = self._metrics_namespace if self._metrics_namespace else self._model_handler.get_metrics_namespace()\n    if self._model_handler.override_metrics(metrics_namespace):\n        return None\n    return _MetricsCollector(metrics_namespace, prefix=prefix)", "docstring": "Args:\nprefix: Unique identifier for metrics, used when models\nare updated using side input.", "source": "github-repos"}
{"code": "def error(channel, title, description):\n    \n\n    \n    gui = ui_embed.UI(\n        channel,\n        title,\n        description,\n        modulename=modulename\n    )\n\n    return gui", "docstring": "Creates an embed UI containing an error message\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\ntitle (str): The title of the embed\ndescription (str): The description for the error\n\nReturns:\nui (ui_embed.UI): The embed UI object", "source": "juraj-google-style"}
{"code": "def __call__(self, input_1: EventSet, input_2: EventSet) -> Dict[str, EventSet]:\n    assert isinstance(self.operator, BaseBinaryOperator)\n    output_schema = self.output_schema('output')\n    if len(input_1.schema.features) != len(input_2.schema.features):\n        raise ValueError('Both EventSets must have the same number of features.')\n    num_features = len(input_1.schema.features)\n    dst_evset = EventSet(data={}, schema=output_schema)\n    assert len(input_1.data) == len(input_2.data)\n    for index_key, index_data in input_1.data.items():\n        input_1_features = index_data.features\n        input_2_features = input_2.data[index_key].features\n        dst_features = []\n        for feature_idx in range(num_features):\n            input_1_feature = input_1_features[feature_idx]\n            input_2_feature = input_2_features[feature_idx]\n            assert input_1_feature.dtype.type == input_2_feature.dtype.type\n            result = self._do_operation(input_1_feature, input_2_feature, input_1.schema.features[feature_idx].dtype)\n            dst_features.append(result)\n        dst_evset.set_index_value(index_key, IndexData(features=dst_features, timestamps=index_data.timestamps, schema=output_schema), normalize=False)\n    return {'output': dst_evset}", "docstring": "Applies the corresponding arithmetic operation between two EventSets.\n\nArgs:\ninput_1: First EventSet.\ninput_2: Second EventSet.\n\nReturns:\nResult of the operation.\n\nRaises:\nValueError: If sampling of both EventSets is not equal.", "source": "github-repos"}
{"code": "def _is_valid(self, value):\n    if hasattr(self._type, 'istypeof'):\n        return self._type.istypeof(value)\n    else:\n        return isinstance(value, self._type)", "docstring": "Return True if the input value is valid for insertion into the\ninner list.\n\nArgs:\nvalue: An object about to be inserted.", "source": "codesearchnet"}
{"code": "def check_the_end_flag(self, state_key):\n        \n        \n        x, y = state_key\n        end_point_tuple = np.where(self.__map_arr == self.__end_point_label)\n        end_point_x_arr, end_point_y_arr = end_point_tuple\n        if x == end_point_x_arr[0] and y == end_point_y_arr[0]:\n            return True\n        else:\n            return False", "docstring": "Check the end flag.\n\nIf this return value is `True`, the learning is end.\n\nArgs:\nstate_key:    The key of state in `self.t`.\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def get_diff(value1, value2, name1, name2):\n    lines1 = [(line + '\\n') for line in value1.splitlines()]\n    lines2 = [(line + '\\n') for line in value2.splitlines()]\n    diff_lines = difflib.context_diff(lines1, lines2, fromfile=name1, tofile=name2)\n    return ''.join(diff_lines)", "docstring": "Get a diff between two strings.\n\nArgs:\nvalue1 (str): First string to be compared.\nvalue2 (str): Second string to be compared.\nname1 (str): Name of the first string.\nname2 (str): Name of the second string.\n\nReturns:\nstr: The full diff.", "source": "codesearchnet"}
{"code": "def plot(self, ax=None, return_fig=False, **kwargs):\n    if (ax is None):\n        fig = plt.figure(figsize=(2, 10))\n        ax = fig.add_subplot(111)\n        return_ax = False\n    else:\n        return_ax = True\n    hypertime = np.linspace(self.start, self.stop, (((10 * self.size) - 1) + 1))\n    hyperamp = np.interp(hypertime, self.basis, self)\n    ax.plot(hyperamp, hypertime, 'k')\n    ax.fill_betweenx(hypertime, hyperamp, 0, (hyperamp > 0.0), facecolor='k', lw=0)\n    ax.invert_yaxis()\n    ax.set_title(self.name)\n    if return_ax:\n        return ax\n    elif return_fig:\n        return fig\n    else:\n        return None", "docstring": "Plot a synthetic.\n\nArgs:\nax (ax): A matplotlib axis.\nlegend (Legend): For now, only here to match API for other plot\nmethods.\nreturn_fig (bool): whether to return the matplotlib figure.\nDefault False.\n\nReturns:\nax. If you passed in an ax, otherwise None.", "source": "codesearchnet"}
{"code": "def open_repository(path, spor_dir='.spor'):\n    \n    root = _find_root_dir(path, spor_dir)\n    return Repository(root, spor_dir)", "docstring": "Open an existing repository.\n\nArgs:\npath: Path to any file or directory within the repository.\nspor_dir: The name of the directory containing spor data.\n\nReturns: A `Repository` instance.\n\nRaises:\nValueError: No repository is found.", "source": "juraj-google-style"}
{"code": "def _from_definition(fdef, grad_func=None):\n    func = None\n    argnames = [arg.name for arg in fdef.signature.input_arg]\n    input_types = tuple((dtypes.as_dtype(arg.type) for arg in fdef.signature.input_arg))\n    func_name = fdef.signature.name\n    python_grad_func = None\n    out_names = [arg.name for arg in fdef.signature.output_arg]\n    result = _DefinedFunction(func, argnames, input_types, func_name, grad_func, python_grad_func, out_names)\n    if is_oss:\n        serialized = fdef.SerializeToString()\n        c_func = c_api.TF_FunctionImportFunctionDef(serialized)\n    else:\n        c_func = c_api.TF_FunctionImportFunctionDefNoSerialization(fdef)\n    result._c_func = c_api_util.ScopedTFFunction(c_func, func_name)\n    result._extra_inputs = []\n    result._op_def = fdef.signature\n    return result", "docstring": "Creates a _DefinedFunction initialized from a FunctionDef proto.\n\nArgs:\nfdef: a FunctionDef\ngrad_func: a _DefinedFunction or None\n\nReturns:\nA _DefinedFunction representing fdef", "source": "github-repos"}
{"code": "def traverse(self, fn=None, specs=None, full_breadth=True):\n    if (fn is None):\n        fn = (lambda x: x)\n    if ((specs is not None) and (not isinstance(specs, (list, set, tuple)))):\n        specs = [specs]\n    accumulator = []\n    matches = (specs is None)\n    if (not matches):\n        for spec in specs:\n            matches = self.matches(spec)\n            if matches:\n                break\n    if matches:\n        accumulator.append(fn(self))\n    if self._deep_indexable:\n        for el in self:\n            if (el is None):\n                continue\n            accumulator += el.traverse(fn, specs, full_breadth)\n            if (not full_breadth):\n                break\n    return accumulator", "docstring": "Traverses object returning matching items\n\nTraverses the set of children of the object, collecting the\nall objects matching the defined specs. Each object can be\nprocessed with the supplied function.\n\nArgs:\nfn (function, optional): Function applied to matched objects\nspecs: List of specs to match\nSpecs must be types, functions or type[.group][.label]\nspecs to select objects to return, by default applies\nto all objects.\nfull_breadth: Whether to traverse all objects\nWhether to traverse the full set of objects on each\ncontainer or only the first.\n\nReturns:\nlist: List of objects that matched", "source": "codesearchnet"}
{"code": "def mtf_slice(x, begin, size, slice_dim_name, name=None):\n  \n  return SliceOperation(\n      x, begin, size, slice_dim_name, name=name).outputs[0]", "docstring": "Slice operation.\n\nCall externally as mtf.slice()\n\nArgs:\nx: a list of Tensors\nbegin: integer, where to begin slicing from along the axis\nsize: integer, size to slice from axis.\nslice_dim_name: string, dimension name of slicing axis.\nname: an optional string\nReturns:\na Tensor with shape extended by output_shape for the last axis.", "source": "juraj-google-style"}
{"code": "def wait_for_plug_update(self, plug_name, remote_state, timeout_s):\n    \n    plug = self._plugs_by_name.get(plug_name)\n\n    if plug is None:\n      raise InvalidPlugError('Cannot wait on unknown plug \"%s\".' % plug_name)\n\n    if not isinstance(plug, FrontendAwareBasePlug):\n      raise InvalidPlugError('Cannot wait on a plug %s that is not an subclass '\n                             'of FrontendAwareBasePlug.' % plug_name)\n\n    state, update_event = plug.asdict_with_event()\n    if state != remote_state:\n      return state\n\n    if update_event.wait(timeout_s):\n      return plug._asdict()", "docstring": "Wait for a change in the state of a frontend-aware plug.\n\nArgs:\nplug_name: Plug name, e.g. 'openhtf.plugs.user_input.UserInput'.\nremote_state: The last observed state.\ntimeout_s: Number of seconds to wait for an update.\n\nReturns:\nAn updated state, or None if the timeout runs out.\n\nRaises:\nInvalidPlugError: The plug can't be waited on either because it's not in\nuse or it's not a frontend-aware plug.", "source": "juraj-google-style"}
{"code": "def _tf_data_packed_nest_with_indices(structure, flat, index):\n    packed = []\n    for s in _tf_data_yield_value(structure):\n        if _tf_data_is_nested(s):\n            new_index, child = _tf_data_packed_nest_with_indices(s, flat, index)\n            packed.append(sequence_like(s, child))\n            index = new_index\n        else:\n            packed.append(flat[index])\n            index += 1\n    return (index, packed)", "docstring": "Helper function for pack_nest_as.\n\nArgs:\nstructure: Substructure (tuple of elements and/or tuples) to mimic\nflat: Flattened values to output substructure for.\nindex: Index at which to start reading from flat.\n\nReturns:\nThe tuple (new_index, child), where:\n* new_index - the updated index into `flat` having processed `structure`.\n* packed - the subset of `flat` corresponding to `structure`,\nhaving started at `index`, and packed into the same nested\nformat.\n\nRaises:\nValueError: if `structure` contains more elements than `flat`\n(assuming indexing starts from `index`).", "source": "github-repos"}
{"code": "def add_constant(self, stream, value):\n    if (stream in self.constant_database):\n        raise ArgumentError('Attempted to set the same constant twice', stream=stream, old_value=self.constant_database[stream], new_value=value)\n    self.constant_database[stream] = value", "docstring": "Store a constant value for use in this sensor graph.\n\nConstant assignments occur after all sensor graph nodes have been\nallocated since they must be propogated to all appropriate virtual\nstream walkers.\n\nArgs:\nstream (DataStream): The constant stream to assign the value to\nvalue (int): The value to assign.", "source": "codesearchnet"}
{"code": "def GreaterThan(self, value):\n    self._awql = self._CreateSingleValueCondition(value, '>')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"greater than\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "codesearchnet"}
{"code": "def _validate_sub(claims, subject=None):\n    if ('sub' not in claims):\n        return\n    if (not isinstance(claims['sub'], string_types)):\n        raise JWTClaimsError('Subject must be a string.')\n    if (subject is not None):\n        if (claims.get('sub') != subject):\n            raise JWTClaimsError('Invalid subject')", "docstring": "Validates that the 'sub' claim is valid.\n\nThe \"sub\" (subject) claim identifies the principal that is the\nsubject of the JWT.  The claims in a JWT are normally statements\nabout the subject.  The subject value MUST either be scoped to be\nlocally unique in the context of the issuer or be globally unique.\nThe processing of this claim is generally application specific.  The\n\"sub\" value is a case-sensitive string containing a StringOrURI\nvalue.  Use of this claim is OPTIONAL.\n\nArgs:\nclaims (dict): The claims dictionary to validate.\nsubject (str): The subject of the token.", "source": "codesearchnet"}
{"code": "def distribution(self, start=None, end=None, normalized=True, mask=None):\n    (start, end, mask) = self._check_boundaries(start, end, mask=mask)\n    counter = histogram.Histogram()\n    for (start, end, _) in mask.iterperiods(value=True):\n        for (t0, t1, value) in self.iterperiods(start, end):\n            duration = utils.duration_to_number((t1 - t0), units='seconds')\n            try:\n                counter[value] += duration\n            except histogram.UnorderableElements as e:\n                counter = histogram.Histogram.from_dict(dict(counter), key=hash)\n                counter[value] += duration\n    if normalized:\n        return counter.normalized()\n    else:\n        return counter", "docstring": "Calculate the distribution of values over the given time range from\n`start` to `end`.\n\nArgs:\n\nstart (orderable, optional): The lower time bound of\nwhen to calculate the distribution. By default, the\nfirst time point will be used.\n\nend (orderable, optional): The upper time bound of\nwhen to calculate the distribution. By default, the\nlast time point will be used.\n\nnormalized (bool): If True, distribution will sum to\none. If False and the time values of the TimeSeries\nare datetimes, the units will be seconds.\n\nmask (:obj:`TimeSeries`, optional): A\ndomain on which to calculate the distribution.\n\nReturns:\n\n:obj:`Histogram` with the results.", "source": "codesearchnet"}
{"code": "def consume(self, key, amount=1, rate=None, capacity=None, **kwargs):\n    bucket = self.get_bucket(key, rate, capacity, **kwargs)\n    return bucket.consume(amount)", "docstring": "Consume an amount for a given key.\n\nNon-default rate/capacity can be given to override Throttler defaults.\n\nReturns:\nbool: whether the units could be consumed", "source": "codesearchnet"}
{"code": "def list_deployment_operations(access_token, subscription_id, rg_name, deployment_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rg_name, '/providers/Microsoft.Resources/deployments/', deployment_name, '/operations', '?api-version=', BASE_API])\n    return do_get(endpoint, access_token)", "docstring": "List all operations involved in a given deployment.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrg_name (str): Azure resource group name.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def NormalizePath(path):\n    path = os.path.normpath(path)\n    for sys_path in sys.path:\n        if (not sys_path):\n            continue\n        sys_path = os.path.join(sys_path, '')\n        if path.startswith(sys_path):\n            return path[len(sys_path):]\n    return path", "docstring": "Removes any Python system path prefix from the given path.\n\nPython keeps almost all paths absolute. This is not what we actually\nwant to return. This loops through system paths (directories in which\nPython will load modules). If \"path\" is relative to one of them, the\ndirectory prefix is removed.\n\nArgs:\npath: absolute path to normalize (relative paths will not be altered)\n\nReturns:\nRelative path if \"path\" is within one of the sys.path directories or\nthe input otherwise.", "source": "codesearchnet"}
{"code": "def load_panel(panel_path, adapter, date=None, display_name=None, version=None, panel_type=None, \n               panel_id=None, institute=None):\n    \n    panel_lines = get_file_handle(panel_path)\n\n    try:\n        \n        panel_info = get_panel_info(\n            panel_lines=panel_lines,\n            panel_id=panel_id,\n            institute=institute,\n            version=version,\n            date=date,\n            display_name=display_name\n            )\n    except Exception as err:\n        raise err\n\n    version = None\n    if panel_info.get('version'):\n        version = float(panel_info['version'])\n\n    panel_id = panel_info['panel_id']\n    display_name = panel_info['display_name'] or panel_id\n    institute = panel_info['institute']\n    date = panel_info['date']\n\n    if not institute:\n        raise SyntaxError(\"A Panel has to belong to a institute\")\n\n    \n    if not adapter.institute(institute):\n        raise SyntaxError(\"Institute {0} does not exist in database\".format(institute))\n\n    if not panel_id:\n        raise SyntaxError(\"A Panel has to have a panel id\")\n    \n    if version:\n        existing_panel = adapter.gene_panel(panel_id, version)\n    else:\n        \n        existing_panel = adapter.gene_panel(panel_id)\n        version = 1.0\n        LOG.info(\"Set version to %s\", version)\n\n    if existing_panel:\n        LOG.info(\"found existing panel\")\n        if version == existing_panel['version']:\n            LOG.warning(\"Panel with same version exists in database\")\n            LOG.info(\"Reload with updated version\")\n            raise SyntaxError()\n        display_name = display_name or existing_panel['display_name']\n        institute = institute or existing_panel['institute']\n    \n    parsed_panel = parse_gene_panel(\n        path=panel_path,\n        institute=institute,\n        panel_type=panel_type,\n        date=date,\n        version=version,\n        panel_id=panel_id,\n        display_name=display_name,\n    )\n    \n    try:\n        adapter.load_panel(parsed_panel=parsed_panel)\n    except Exception as err:\n        raise err", "docstring": "Load a manually curated gene panel into scout\n\nArgs:\npanel_path(str): path to gene panel file\nadapter(scout.adapter.MongoAdapter)\ndate(str): date of gene panel on format 2017-12-24\ndisplay_name(str)\nversion(float)\npanel_type(str)\npanel_id(str)\ninstitute(str)", "source": "juraj-google-style"}
{"code": "def from_backbone_and_decoder_configs(cls, backbone_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs):\n    return cls(backbone_config=backbone_config, decoder_config=decoder_config, **kwargs)", "docstring": "Instantiate a [`MaskFormerConfig`] (or a derived class) from a pre-trained backbone model configuration and DETR model\nconfiguration.\n\nArgs:\nbackbone_config ([`PretrainedConfig`]):\nThe backbone configuration.\ndecoder_config ([`PretrainedConfig`]):\nThe transformer decoder configuration to use.\n\nReturns:\n[`MaskFormerConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def _zero_out_grad(op, grad):\n    to_zero = op.inputs[0]\n    shape = array_ops.shape(to_zero)\n    index = array_ops.zeros_like(shape)\n    first_grad = array_ops.reshape(grad, [-1])[0]\n    to_zero_grad = sparse_ops.sparse_to_dense([index], shape, first_grad, 0)\n    return [to_zero_grad]", "docstring": "The gradients for `zero_out`.\n\nArgs:\nop: The `zero_out` `Operation` that we are differentiating, which we can use\nto find the inputs and outputs of the original op.\ngrad: Gradient with respect to the output of the `zero_out` op.\n\nReturns:\nGradients with respect to the input of `zero_out`.", "source": "github-repos"}
{"code": "def load(url_or_handle, cache=None, **kwargs):\n    ext = get_extension(url_or_handle)\n    try:\n        loader = loaders[ext.lower()]\n        message = \"Using inferred loader '%s' due to passed file extension '%s'.\"\n        log.debug(message, loader.__name__[6:], ext)\n        return load_using_loader(url_or_handle, loader, cache, **kwargs)\n    except KeyError:\n        log.warning(\"Unknown extension '%s', attempting to load as image.\", ext)\n        try:\n            with read_handle(url_or_handle, cache=cache) as handle:\n                result = _load_img(handle)\n        except Exception as e:\n            message = 'Could not load resource %s as image. Supported extensions: %s'\n            log.error(message, url_or_handle, list(loaders))\n            raise RuntimeError(message.format(url_or_handle, list(loaders)))\n        else:\n            log.info(\"Unknown extension '%s' successfully loaded as image.\", ext)\n            return result", "docstring": "Load a file.\n\nFile format is inferred from url. File retrieval strategy is inferred from\nURL. Returned object type is inferred from url extension.\n\nArgs:\nurl_or_handle: a (reachable) URL, or an already open file handle\n\nRaises:\nRuntimeError: If file extension or URL is not supported.", "source": "codesearchnet"}
{"code": "def get_item(env, name, default=None):\n  \n  \n  for key in name.split('.'):\n    if isinstance(env, dict) and key in env:\n      env = env[key]\n    elif isinstance(env, types.ModuleType) and key in env.__dict__:\n      env = env.__dict__[key]\n    else:\n      return default\n  return env", "docstring": "Get an item from a dictionary, handling nested lookups with dotted notation.\n\nArgs:\nenv: the environment (dictionary) to use to look up the name.\nname: the name to look up, in dotted notation.\ndefault: the value to return if the name if not found.\n\nReturns:\nThe result of looking up the name, if found; else the default.", "source": "juraj-google-style"}
{"code": "def ref_for_message_type(self, message_type):\n    name = self.__normalized_name(message_type)\n    if (name not in self.__schemas):\n        raise KeyError('Message has not been parsed: %s', name)\n    return name", "docstring": "Returns the JSON Schema id for the given message.\n\nArgs:\nmessage_type: protorpc.message.Message class to be parsed.\n\nReturns:\nstring, The JSON Schema id.\n\nRaises:\nKeyError: if the message hasn't been parsed via add_message().", "source": "codesearchnet"}
{"code": "def completely_parse_reader(parser: Parser[Input, Output], reader: Reader[Input]) -> Result[Output]:\n    \n    result = (parser << eof).consume(reader)\n\n    if isinstance(result, Continue):\n        return Success(result.value)\n    else:\n        used = set()\n        unique_expected = []\n        for expected_lambda in result.expected:\n            expected = expected_lambda()\n            if expected not in used:\n                used.add(expected)\n                unique_expected.append(expected)\n\n        return Failure(result.farthest.expected_error(' or '.join(unique_expected)))", "docstring": "Consume reader and return Success only on complete consumption.\n\nThis is a helper function for ``parse`` methods, which return ``Success``\nwhen the input is completely consumed and ``Failure`` with an appropriate\nmessage otherwise.\n\nArgs:\nparser: The parser doing the consuming\nreader: The input being consumed\n\nReturns:\nA parsing ``Result``", "source": "juraj-google-style"}
{"code": "def add_noise_curve(self, name, noise_type='ASD', is_wd_background=False):\n    if is_wd_background:\n        self.sensitivity_input.wd_noise = name\n        self.sensitivity_input.wd_noise_type_in = noise_type\n    else:\n        if ('sensitivity_curves' not in self.sensitivity_input.__dict__):\n            self.sensitivity_input.sensitivity_curves = []\n        if ('noise_type_in' not in self.sensitivity_input.__dict__):\n            self.sensitivity_input.noise_type_in = []\n        self.sensitivity_input.sensitivity_curves.append(name)\n        self.sensitivity_input.noise_type_in.append(noise_type)\n    return", "docstring": "Add a noise curve for generation.\n\nThis will add a noise curve for an SNR calculation by appending to the sensitivity_curves\nlist within the sensitivity_input dictionary.\n\nThe name of the noise curve prior to the file extension will appear as its\nlabel in the final output dataset. Therefore, it is recommended prior to\nrunning the generator that file names are renamed to simple names\nfor later reference.\n\nArgs:\nname (str): Name of noise curve including file extension inside input_folder.\nnoise_type (str, optional): Type of noise. Choices are `ASD`, `PSD`, or `char_strain`.\nDefault is ASD.\nis_wd_background (bool, optional): If True, this sensitivity is used as the white dwarf\nbackground noise. Default is False.", "source": "codesearchnet"}
{"code": "def get_default_connection_info(self, provider_name):\n    provider = self._provider_client.get_by_name(provider_name)\n    if provider:\n        return provider['defaultConnectionInfo']\n    else:\n        return {}", "docstring": "Gets default connection info for a specific provider.\n\nArgs:\nprovider_name: Name of the provider.\n\nReturns:\ndict: Default connection information.", "source": "codesearchnet"}
{"code": "def sonority_from_fts(self, seg):\n        \n\n        def match(m):\n            return self.fm.match(fts(m), seg)\n\n        minusHi = BoolTree(match('-hi'), 9, 8)\n        minusNas = BoolTree(match('-nas'), 6, 5)\n        plusVoi1 = BoolTree(match('+voi'), 4, 3)\n        plusVoi2 = BoolTree(match('+voi'), 2, 1)\n        plusCont = BoolTree(match('+cont'), plusVoi1, plusVoi2)\n        plusSon = BoolTree(match('+son'), minusNas, plusCont)\n        minusCons = BoolTree(match('-cons'), 7, plusSon)\n        plusSyl = BoolTree(match('+syl'), minusHi, minusCons)\n        return plusSyl.get_value()", "docstring": "Given a segment as features, returns the sonority on a scale of 1\nto 9.\n\nArgs:\nseg (list): collection of (value, feature) pairs representing\na segment (vowel or consonant)\n\nReturns:\nint: sonority of `seg` between 1 and 9", "source": "juraj-google-style"}
{"code": "def rotate_sites(self, indices=None, theta=0, axis=None, anchor=None,\n                     to_unit_cell=True):\n        \n\n        from numpy.linalg import norm\n        from numpy import cross, eye\n        from scipy.linalg import expm\n\n        if indices is None:\n            indices = range(len(self))\n\n        if axis is None:\n            axis = [0, 0, 1]\n\n        if anchor is None:\n            anchor = [0, 0, 0]\n\n        anchor = np.array(anchor)\n        axis = np.array(axis)\n\n        theta %= 2 * np.pi\n\n        rm = expm(cross(eye(3), axis / norm(axis)) * theta)\n        for i in indices:\n            site = self._sites[i]\n            coords = ((np.dot(rm, np.array(site.coords - anchor).T)).T + anchor).ravel()\n            new_site = PeriodicSite(\n                site.species, coords, self._lattice,\n                to_unit_cell=to_unit_cell, coords_are_cartesian=True,\n                properties=site.properties)\n            self._sites[i] = new_site", "docstring": "Rotate specific sites by some angle around vector at anchor.\n\nArgs:\nindices (list): List of site indices on which to perform the\ntranslation.\ntheta (float): Angle in radians\naxis (3x1 array): Rotation axis vector.\nanchor (3x1 array): Point of rotation.\nto_unit_cell (bool): Whether new sites are transformed to unit\ncell", "source": "juraj-google-style"}
{"code": "def resetAndRejoin(self, timeout):\n        \n        print '%s call resetAndRejoin' % self.port\n        print timeout\n        try:\n            if self.__sendCommand(WPANCTL_CMD + 'setprop Daemon:AutoAssociateAfterReset false')[0] != 'Fail':\n                time.sleep(0.5)\n                if self.__sendCommand(WPANCTL_CMD + 'reset')[0] != 'Fail':\n                    self.isPowerDown = True\n                else:\n                    return False\n            else:\n                return False\n            time.sleep(timeout)\n\n            if self.deviceRole == Thread_Device_Role.SED:\n                self.setPollingRate(self.sedPollingRate)\n\n            if self.__sendCommand(WPANCTL_CMD + 'attach')[0] != 'Fail':\n                time.sleep(3)\n            else:\n                return False\n\n            if self.__sendCommand(WPANCTL_CMD + 'setprop Daemon:AutoAssociateAfterReset true')[0] == 'Fail':\n                return False\n\n            if self.__stripValue(self.__sendCommand(WPANCTL_CMD + 'getprop -v NCP:State')[0]) != 'associated':\n                print '[FAIL] reset and rejoin'\n                return False\n            return True\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger('resetAndRejoin() Error: ' + str(e))", "docstring": "reset and join back Thread Network with a given timeout delay\n\nArgs:\ntimeout: a timeout interval before rejoin Thread Network\n\nReturns:\nTrue: successful to reset and rejoin Thread Network\nFalse: fail to reset and rejoin the Thread Network", "source": "juraj-google-style"}
{"code": "def read_bit(self, registeraddress, functioncode=2):\n    _checkFunctioncode(functioncode, [1, 2])\n    return self._genericCommand(functioncode, registeraddress)", "docstring": "Read one bit from the slave.\n\nArgs:\n* registeraddress (int): The slave register address (use decimal numbers, not hex).\n* functioncode (int): Modbus function code. Can be 1 or 2.\n\nReturns:\nThe bit value 0 or 1 (int).\n\nRaises:\nValueError, TypeError, IOError", "source": "codesearchnet"}
{"code": "def get_metadata(self, handle):\n        \n        handle = os.path.expanduser(os.path.expandvars(handle))\n        with open(self._prefixed('%s.metadata' % handle)) as f:\n            return json.load(f)", "docstring": "Returns the associated metadata info for the given handle, the metadata\nfile must exist (``handle + '.metadata'``).\n\nArgs:\nhandle (str): Path to the template to get the metadata from\n\nReturns:\ndict: Metadata for the given handle", "source": "juraj-google-style"}
{"code": "def transform_to_mods_periodical(marc_xml, uuid, url):\n    marc_xml = _read_content_or_path(marc_xml)\n    transformed = xslt_transformation(marc_xml, _absolute_template_path('MARC21toPeriodicalTitle.xsl'))\n    return _apply_postprocessing(marc_xml=marc_xml, xml=transformed, func=mods_postprocessor.postprocess_periodical, uuid=uuid, url=url)", "docstring": "Convert `marc_xml` to periodical MODS data format.\n\nArgs:\nmarc_xml (str): Filename or XML string. Don't use ``\\\\n`` in case of\nfilename.\nuuid (str): UUID string giving the package ID.\nurl (str): URL of the publication (public or not).\n\nReturns:\nlist: Collection of transformed xml strings.", "source": "codesearchnet"}
{"code": "def StatEntryFromPath(path, pathspec, ext_attrs=True):\n  \n  try:\n    stat = filesystem.Stat.FromPath(path)\n  except (IOError, OSError) as error:\n    logging.error(\"Failed to obtain stat for '%s': %s\", pathspec, error)\n    return rdf_client_fs.StatEntry(pathspec=pathspec)\n\n  return StatEntryFromStat(stat, pathspec, ext_attrs=ext_attrs)", "docstring": "Builds a stat entry object from a given path.\n\nArgs:\npath: A path (string value) to stat.\npathspec: A `PathSpec` corresponding to the `path`.\next_attrs: Whether to include extended file attributes in the result.\n\nReturns:\n`StatEntry` object.", "source": "juraj-google-style"}
{"code": "def victim(self, main_type, sub_type, unique_id, victim_id, params=None):\n        \n        params = params or {}\n\n        if not sub_type:\n            url = '/v2/{}/{}/victims/{}'.format(main_type, unique_id, victim_id)\n        else:\n            url = '/v2/{}/{}/{}/victims/{}'.format(main_type, sub_type, unique_id, victim_id)\n\n        return self.tcex.session.get(url, params=params)", "docstring": "Args:\nmain_type:\nsub_type:\nunique_id:\nvictim_id:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def replace_urls(status):\n    \n    text = status.text\n\n    if not has_url(status):\n        return text\n\n    urls = [(e['indices'], e['expanded_url']) for e in status.entities['urls']]\n    urls.sort(key=lambda x: x[0][0], reverse=True)\n\n    for (start, end), url in urls:\n        text = text[:start] + url + text[end:]\n\n    return text", "docstring": "Replace shorturls in a status with expanded urls.\n\nArgs:\nstatus (tweepy.status): A tweepy status object\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def _get_status_code(self, http_status):\n    \n    try:\n      return int(http_status.split(' ', 1)[0])\n    except TypeError:\n      _logger.warning('Unable to find status code in HTTP status %r.',\n                      http_status)\n    return 500", "docstring": "Get the HTTP status code from an HTTP status string.\n\nArgs:\nhttp_status: A string containing a HTTP status code and reason.\n\nReturns:\nAn integer with the status code number from http_status.", "source": "juraj-google-style"}
{"code": "def asin(cls, x: 'TensorFluent') -> 'TensorFluent':\n        \n        return cls._unary_op(x, tf.asin, tf.float32)", "docstring": "Returns a TensorFluent for the arcsin function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the arcsin function.", "source": "juraj-google-style"}
{"code": "def ContainsAny(self, *values):\n    \n    self._awql = self._CreateMultipleValuesCondition(values, 'CONTAINS_ANY')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"contains any\".\n\nArgs:\n*values: The values to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    data = file_object.read(self._HEADER_READ_SIZE)\n    if not data.startswith(b'<?xml'):\n      raise errors.UnableToParseFile(\n          'Not an Android usage history file [not XML]')\n\n    _, _, data = data.partition(b'\\n')\n    if not data.startswith(b'<usage-history'):\n      raise errors.UnableToParseFile(\n          'Not an Android usage history file [wrong XML root key]')\n\n    \n    \n    file_object.seek(0, os.SEEK_SET)\n\n    xml = ElementTree.parse(file_object)\n    root_node = xml.getroot()\n\n    for application_node in root_node:\n      package_name = application_node.get('name', None)\n\n      for part_node in application_node.iter():\n        if part_node.tag != 'comp':\n          continue\n\n        last_resume_time = part_node.get('lrt', None)\n        if last_resume_time is None:\n          parser_mediator.ProduceExtractionWarning('missing last resume time.')\n          continue\n\n        try:\n          last_resume_time = int(last_resume_time, 10)\n        except ValueError:\n          parser_mediator.ProduceExtractionWarning(\n              'unsupported last resume time: {0:s}.'.format(last_resume_time))\n          continue\n\n        event_data = AndroidAppUsageEventData()\n        event_data.component = part_node.get('name', None)\n        event_data.package = package_name\n\n        date_time = dfdatetime_java_time.JavaTime(timestamp=last_resume_time)\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_LAST_RESUME)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an Android usage-history file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def predict(self, X, break_ties=\"random\", return_probs=False, **kwargs):\n        \n        Y_s = self.predict_proba(X, **kwargs)\n        self._check(Y_s, typ=list)\n        self._check(Y_s[0], typ=np.ndarray)\n\n        Y_p = []\n        for Y_ts in Y_s:\n            Y_tp = self._break_ties(Y_ts, break_ties)\n            Y_p.append(Y_tp.astype(np.int))\n\n        if return_probs:\n            return Y_p, Y_s\n        else:\n            return Y_p", "docstring": "Predicts int labels for an input X on all tasks\n\nArgs:\nX: The input for the predict_proba method\nbreak_ties: A tie-breaking policy\nreturn_probs: Return the predicted probabilities as well\n\nReturns:\nY_p: A t-length list of n-dim np.ndarrays of predictions in [1, K_t]\n[Optionally: Y_s: A t-length list of [n, K_t] np.ndarrays of\npredicted probabilities]", "source": "juraj-google-style"}
{"code": "def write_buffers(self, conn, locked=True):\n        \n        if conn is None:\n            raise ValueError(\"Cannot write_buffers to connection None\")\n        sent = 0\n        for header, payload in self._buffers:\n            yield conn.write_message(header, locked=locked)\n            yield conn.write_message(payload, binary=True, locked=locked)\n            sent += (len(header) + len(payload))\n        raise gen.Return(sent)", "docstring": "Write any buffer headers and payloads to the given connection.\n\nArgs:\nconn (object) :\nMay be any object with a ``write_message`` method. Typically,\na Tornado ``WSHandler`` or ``WebSocketClientConnection``\n\nlocked (bool) :\n\nReturns:\nint : number of bytes sent", "source": "juraj-google-style"}
{"code": "def __init__(self, filename, filename_info, filetype_info):\n        \n        super(VIIRSActiveFiresTextFileHandler, self).__init__(filename, filename_info, filetype_info)\n\n        if not os.path.isfile(filename):\n            return\n\n        self.file_content = dd.read_csv(filename, skiprows=15, header=None,\n                                        names=[\"latitude\", \"longitude\",\n                                               \"T13\", \"Along-scan\", \"Along-track\", \"detection_confidence\",\n                                               \"power\"])", "docstring": "Makes sure filepath is valid and then reads data into a Dask DataFrame\n\nArgs:\nfilename: Filename\nfilename_info: Filename information\nfiletype_info: Filetype information", "source": "juraj-google-style"}
{"code": "def weights(self):\n    return self.trainable_weights + self.non_trainable_weights", "docstring": "Returns the list of all layer variables/weights.\n\nReturns:\nA list of variables.", "source": "github-repos"}
{"code": "def add_phase(self, name, done, score, summary, steps, report_every=None, log_every=None, checkpoint_every=None, feed=None):\n    done = tf.convert_to_tensor(done, tf.bool)\n    score = tf.convert_to_tensor(score, tf.float32)\n    summary = tf.convert_to_tensor(summary, tf.string)\n    feed = (feed or {})\n    if ((done.shape.ndims is None) or (score.shape.ndims is None)):\n        raise ValueError(\"Rank of 'done' and 'score' tensors must be known.\")\n    writer = (self._logdir and tf.summary.FileWriter(os.path.join(self._logdir, name), tf.get_default_graph(), flush_secs=60))\n    op = self._define_step(done, score, summary)\n    batch = (1 if (score.shape.ndims == 0) else score.shape[0].value)\n    self._phases.append(_Phase(name, writer, op, batch, int(steps), feed, report_every, log_every, checkpoint_every))", "docstring": "Add a phase to the loop protocol.\n\nIf the model breaks long computation into multiple steps, the done tensor\nindicates whether the current score should be added to the mean counter.\nFor example, in reinforcement learning we only have a valid score at the\nend of the episode.\n\nScore and done tensors can either be scalars or vectors, to support\nsingle and batched computations.\n\nArgs:\nname: Name for the phase, used for the summary writer.\ndone: Tensor indicating whether current score can be used.\nscore: Tensor holding the current, possibly intermediate, score.\nsummary: Tensor holding summary string to write if not an empty string.\nsteps: Duration of the phase in steps.\nreport_every: Yield mean score every this number of steps.\nlog_every: Request summaries via `log` tensor every this number of steps.\ncheckpoint_every: Write checkpoint every this number of steps.\nfeed: Additional feed dictionary for the session run call.\n\nRaises:\nValueError: Unknown rank for done or score tensors.", "source": "codesearchnet"}
{"code": "def as_list(self, label=1, **kwargs):\n        \n        label_to_use = label if self.mode == \"classification\" else self.dummy_label\n        ans = self.domain_mapper.map_exp_ids(self.local_exp[label_to_use], **kwargs)\n        ans = [(x[0], float(x[1])) for x in ans]\n        return ans", "docstring": "Returns the explanation as a list.\n\nArgs:\nlabel: desired label. If you ask for a label for which an\nexplanation wasn't computed, will throw an exception.\nWill be ignored for regression explanations.\nkwargs: keyword arguments, passed to domain_mapper\n\nReturns:\nlist of tuples (representation, weight), where representation is\ngiven by domain_mapper. Weight is a float.", "source": "juraj-google-style"}
{"code": "def napoleon_to_sphinx(docstring, **config_params):\n    \n    if \"napoleon_use_param\" not in config_params:\n        config_params[\"napoleon_use_param\"] = False\n\n    if \"napoleon_use_rtype\" not in config_params:\n        config_params[\"napoleon_use_rtype\"] = False\n\n    config = Config(**config_params)\n\n    return str(GoogleDocstring(docstring, config))", "docstring": "Convert napoleon docstring to plain sphinx string.\n\nArgs:\ndocstring (str): Docstring in napoleon format.\n**config_params (dict): Whatever napoleon doc configuration you want.\n\nReturns:\nstr: Sphinx string.", "source": "juraj-google-style"}
{"code": "def update(self, instance, validated_data):\n    is_primary = validated_data.pop('is_primary', False)\n    instance = super(EmailSerializer, self).update(instance, validated_data)\n    if is_primary:\n        instance.set_primary()\n    return instance", "docstring": "Update the instance the serializer is bound to.\n\nArgs:\ninstance:\nThe instance the serializer is bound to.\nvalidated_data:\nThe data to update the serializer with.\n\nReturns:\nThe updated instance.", "source": "codesearchnet"}
{"code": "def in_flight_request_count(self, node_id=None):\n    if (node_id is not None):\n        conn = self._conns.get(node_id)\n        if (conn is None):\n            return 0\n        return len(conn.in_flight_requests)\n    else:\n        return sum([len(conn.in_flight_requests) for conn in list(self._conns.values())])", "docstring": "Get the number of in-flight requests for a node or all nodes.\n\nArguments:\nnode_id (int, optional): a specific node to check. If unspecified,\nreturn the total for all nodes\n\nReturns:\nint: pending in-flight requests for the node, or all nodes if None", "source": "codesearchnet"}
{"code": "def make_bitransformer(input_vocab_size=gin.REQUIRED, output_vocab_size=gin.REQUIRED, layout=None, mesh_shape=None):\n    with gin.config_scope('encoder'):\n        encoder = Unitransformer(layer_stack=make_layer_stack(), input_vocab_size=input_vocab_size, output_vocab_size=None, autoregressive=False, name='encoder', layout=layout, mesh_shape=mesh_shape)\n    with gin.config_scope('decoder'):\n        decoder = Unitransformer(layer_stack=make_layer_stack(), input_vocab_size=output_vocab_size, output_vocab_size=output_vocab_size, autoregressive=True, name='decoder', layout=layout, mesh_shape=mesh_shape)\n    return Bitransformer(encoder, decoder)", "docstring": "Gin-configurable bitransformer constructor.\n\nIn your config file you need to set the encoder and decoder layers like this:\nencoder/make_layer_stack.layers = [\n@transformer_layers.SelfAttention,\n@transformer_layers.DenseReluDense,\n]\ndecoder/make_layer_stack.layers = [\n@transformer_layers.SelfAttention,\n@transformer_layers.EncDecAttention,\n@transformer_layers.DenseReluDense,\n]\n\nArgs:\ninput_vocab_size: a integer\noutput_vocab_size: an integer\nlayout: optional - an input to mtf.convert_to_layout_rules\nSome layers (e.g. MoE layers) cheat by looking at layout and mesh_shape\nmesh_shape: optional - an input to mtf.convert_to_shape\nSome layers (e.g. MoE layers) cheat by looking at layout and mesh_shape\nReturns:\na Bitransformer", "source": "codesearchnet"}
{"code": "def initialize(log_file, project_dir=None, debug=False):\n    print_splash()\n    log.setup_logging(log_file, print_log_location=False, debug=debug)\n    logger = log.get_logger('pipeline')\n    if (project_dir is not None):\n        make_dir(os.path.normpath(project_dir))\n        logger.info('PROJECT DIRECTORY: {}'.format(project_dir))\n        logger.info('')\n    logger.info('LOG LOCATION: {}'.format(log_file))\n    print('')\n    return logger", "docstring": "Initializes an AbTools pipeline.\n\nInitialization includes printing the AbTools splash, setting up logging,\ncreating the project directory, and logging both the project directory\nand the log location.\n\nArgs:\n\nlog_file (str): Path to the log file. Required.\n\nproject_dir (str): Path to the project directory. If not provided,\nthe project directory won't be created and the location won't be logged.\n\ndebug (bool): If ``True``, the logging level will be set to ``logging.DEBUG``.\nDefault is ``FALSE``, which logs at ``logging.INFO``.\n\nReturns:\n\nlogger", "source": "codesearchnet"}
{"code": "class EfficientNetBlock(nn.Module):\n\n    def __init__(self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int, expand_ratio: int, kernel_size: int, drop_rate: float, id_skip: bool, adjust_padding: bool):\n        super().__init__()\n        self.expand_ratio = expand_ratio\n        self.expand = True if self.expand_ratio != 1 else False\n        expand_in_dim = in_dim * expand_ratio\n        if self.expand:\n            self.expansion = EfficientNetExpansionLayer(config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride)\n        self.depthwise_conv = EfficientNetDepthwiseLayer(config=config, in_dim=expand_in_dim if self.expand else in_dim, stride=stride, kernel_size=kernel_size, adjust_padding=adjust_padding)\n        self.squeeze_excite = EfficientNetSqueezeExciteLayer(config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand)\n        self.projection = EfficientNetFinalBlockLayer(config=config, in_dim=expand_in_dim if self.expand else in_dim, out_dim=out_dim, stride=stride, drop_rate=drop_rate, id_skip=id_skip)\n\n    def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:\n        embeddings = hidden_states\n        if self.expand_ratio != 1:\n            hidden_states = self.expansion(hidden_states)\n        hidden_states = self.depthwise_conv(hidden_states)\n        hidden_states = self.squeeze_excite(hidden_states)\n        hidden_states = self.projection(embeddings, hidden_states)\n        return hidden_states", "docstring": "This corresponds to the expansion and depthwise convolution phase of each block in the original implementation.\n\nArgs:\nconfig ([`EfficientNetConfig`]):\nModel configuration class.\nin_dim (`int`):\nNumber of input channels.\nout_dim (`int`):\nNumber of output channels.\nstride (`int`):\nStride size to be used in convolution layers.\nexpand_ratio (`int`):\nExpand ratio to set the output dimensions for the expansion and squeeze-excite layers.\nkernel_size (`int`):\nKernel size for the depthwise convolution layer.\ndrop_rate (`float`):\nDropout rate to be used in the final phase of each block.\nid_skip (`bool`):\nWhether to apply dropout and sum the final hidden states with the input embeddings during the final phase\nof each block. Set to `True` for the first block of each stage.\nadjust_padding (`bool`):\nWhether to apply padding to only right and bottom side of the input kernel before the depthwise convolution\noperation, set to `True` for inputs with odd input sizes.", "source": "github-repos"}
{"code": "def ProcessConfigOverrides(filename):\n  \n\n  abs_filename = os.path.abspath(filename)\n  cfg_filters = []\n  keep_looking = True\n  while keep_looking:\n    abs_path, base_name = os.path.split(abs_filename)\n    if not base_name:\n      break  \n\n    cfg_file = os.path.join(abs_path, \"CPPLINT.cfg\")\n    abs_filename = abs_path\n    if not os.path.isfile(cfg_file):\n      continue\n\n    try:\n      with open(cfg_file) as file_handle:\n        for line in file_handle:\n          line, _, _ = line.partition('\n          if not line.strip():\n            continue\n\n          name, _, val = line.partition('=')\n          name = name.strip()\n          val = val.strip()\n          if name == 'set noparent':\n            keep_looking = False\n          elif name == 'filter':\n            cfg_filters.append(val)\n          elif name == 'exclude_files':\n            \n            \n            \n            \n            \n            \n            if base_name:\n              pattern = re.compile(val)\n              if pattern.match(base_name):\n                _cpplint_state.PrintInfo('Ignoring \"%s\": file excluded by '\n                    '\"%s\". File path component \"%s\" matches pattern \"%s\"\\n' %\n                    (filename, cfg_file, base_name, val))\n                return False\n          elif name == 'linelength':\n            global _line_length\n            try:\n                _line_length = int(val)\n            except ValueError:\n                _cpplint_state.PrintError('Line length must be numeric.')\n          elif name == 'extensions':\n              global _valid_extensions\n              try:\n                  extensions = [ext.strip() for ext in val.split(',')]\n                  _valid_extensions = set(extensions)\n              except ValueError:\n                  sys.stderr.write('Extensions should be a comma-separated list of values;'\n                                   'for example: extensions=hpp,cpp\\n'\n                                   'This could not be parsed: \"%s\"' % (val,))\n          elif name == 'headers':\n              global _header_extensions\n              try:\n                  extensions = [ext.strip() for ext in val.split(',')]\n                  _header_extensions = set(extensions)\n              except ValueError:\n                  sys.stderr.write('Extensions should be a comma-separated list of values;'\n                                   'for example: extensions=hpp,cpp\\n'\n                                   'This could not be parsed: \"%s\"' % (val,))\n          elif name == 'root':\n            global _root\n            _root = val\n          else:\n            _cpplint_state.PrintError(\n                'Invalid configuration option (%s) in file %s\\n' %\n                (name, cfg_file))\n\n    except IOError:\n      _cpplint_state.PrintError(\n          \"Skipping config file '%s': Can't open for reading\\n\" % cfg_file)\n      keep_looking = False\n\n  \n  \n  for cfg_filter in reversed(cfg_filters):\n     _AddFilters(cfg_filter)\n\n  return True", "docstring": "Loads the configuration files and processes the config overrides.\n\nArgs:\nfilename: The name of the file being processed by the linter.\n\nReturns:\nFalse if the current |filename| should not be processed further.", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, ...]:\n    residual = hidden_states\n    hidden_states = self.layer_norm1(hidden_states)\n    hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.layer_norm2(hidden_states)\n    hidden_states = self.mlp(hidden_states)\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`):\nInput to the layer of shape `(batch, seq_len, embed_dim)`.\nattention_mask (`torch.FloatTensor`):\nAttention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.\noutput_attentions (`bool`, *optional*, defaults to `False`):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def prepare_stack_for_update(self, stack, tags):\n    if self.is_stack_destroyed(stack):\n        return False\n    elif self.is_stack_completed(stack):\n        return True\n    stack_name = self.get_stack_name(stack)\n    stack_status = self.get_stack_status(stack)\n    if self.is_stack_in_progress(stack):\n        raise exceptions.StackUpdateBadStatus(stack_name, stack_status, 'Update already in-progress')\n    if (not self.is_stack_recreatable(stack)):\n        raise exceptions.StackUpdateBadStatus(stack_name, stack_status, 'Unsupported state for re-creation')\n    if (not self.recreate_failed):\n        raise exceptions.StackUpdateBadStatus(stack_name, stack_status, 'Stack re-creation is disabled. Run stacker again with the --recreate-failed option to force it to be deleted and created from scratch.')\n    stack_tags = self.get_stack_tags(stack)\n    if (not check_tags_contain(stack_tags, tags)):\n        raise exceptions.StackUpdateBadStatus(stack_name, stack_status, 'Tags differ from current configuration, possibly not created with stacker')\n    if self.interactive:\n        sys.stdout.write(('The \"%s\" stack is in a failed state (%s).\\nIt cannot be updated, but it can be deleted and re-created.\\nAll its current resources will IRREVERSIBLY DESTROYED.\\nProceed carefully!\\n\\n' % (stack_name, stack_status)))\n        sys.stdout.flush()\n        ask_for_approval(include_verbose=False)\n    logger.warn('Destroying stack \"%s\" for re-creation', stack_name)\n    self.destroy_stack(stack)\n    return False", "docstring": "Prepare a stack for updating\n\nIt may involve deleting the stack if is has failed it's initial\ncreation. The deletion is only allowed if:\n- The stack contains all the tags configured in the current context;\n- The stack is in one of the statuses considered safe to re-create\n- ``recreate_failed`` is enabled, due to either being explicitly\nenabled by the user, or because interactive mode is on.\n\nArgs:\nstack (dict): a stack object returned from get_stack\ntags (list): list of expected tags that must be present in the\nstack if it must be re-created\n\nReturns:\nbool: True if the stack can be updated, False if it must be\nre-created", "source": "codesearchnet"}
{"code": "def performSearch(emails=[], nThreads=16, secondsBeforeTimeout=5):\n    \n    \n    _startTime = time.time()\n\n    def hasRunOutOfTime(oldEpoch):\n        \n        now = time.time()\n        return now - oldEpoch >= secondsBeforeTimeout\n\n    results = []\n\n    args = []\n\n    \n    for e in emails:\n        if weCanCheckTheseDomains(e):\n            args.append((e))\n\n    \n    if len(args) == 0:\n        return results\n\n    \n    if nThreads <= 0 or nThreads > len(args):\n        nThreads = len(args)\n\n    \n    \n    \n    try:\n        original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)\n        pool = Pool(nThreads)\n        signal.signal(signal.SIGINT, original_sigint_handler)\n    except ValueError:\n        \n        pool = Pool(nThreads)\n\n    poolResults = []\n    try:\n        def log_result(result):\n            \n            poolResults.append(result)\n\n        for m in emails:\n            parameters = ( m, )\n            res = pool.apply_async(pool_function, args=parameters, callback=log_result)\n            try:\n                res.get(3)\n            except TimeoutError as e:\n                general.warning(\"\\n[!] Process timeouted for '{}'.\\n\".format(parameters))\n        pool.close()\n    except KeyboardInterrupt:\n        print(general.warning(\"\\n[!] Process manually stopped by the user. Terminating workers.\\n\"))\n        pool.terminate()\n\n        pending = \"\"\n\n        print(general.warning(\"[!] The following email providers were not processed:\"))\n        for m in emails:\n            processed = False\n            for result in poolResults:\n                if str(m) in json.dumps(result[\"data\"]):\n                    processed = True\n                    break\n            if not processed:\n                print(\"\\t- \" + str(p))\n                pending += \" \" + str(m)\n\n        print(\"\\n\")\n        print(general.warning(\"If you want to relaunch the app with these platforms you can always run the command with: \"))\n        print(\"\\t mailfy.py ... -p \" + general.emphasis(pending))\n        print(\"\\n\")\n        print(general.warning(\"If you prefer to avoid these platforms you can manually evade them for whatever reason with: \"))\n        print(\"\\t mailfy.py ... -x \" + general.emphasis(pending))\n        print(\"\\n\")\n    pool.join()\n\n    \n    \n    for serArray in poolResults:\n        data = serArray[\"data\"]\n        \n        if data != None and data != {}:\n            results.append(data)\n\n    pool.close()\n\n    return results", "docstring": "Method to perform the mail verification process.\n\nArgs:\n-----\nemails: list of emails to be verified.\nplatforms: list of strings representing the wrappers to be used.\nnThreads: the number of threads to be used. Default: 16 threads.\nsecondsBeforeTimeout: number of seconds to wait before raising a\ntimeout. Default: 5 seconds.\n\nReturns:\n--------\nThe results collected.", "source": "juraj-google-style"}
{"code": "def shift(schedule: ScheduleComponent, time: int, name: str=None) -> Schedule:\n    if (name is None):\n        name = schedule.name\n    return union((time, schedule), name=name)", "docstring": "Return schedule shifted by `time`.\n\nArgs:\nschedule: The schedule to shift\ntime: The time to shift by\nname: Name of shifted schedule. Defaults to name of `schedule`", "source": "codesearchnet"}
{"code": "def build_eval_session(module_spec, class_count):\n  \n  \n  eval_graph, bottleneck_tensor, resized_input_tensor, wants_quantization = (\n      create_module_graph(module_spec))\n\n  eval_sess = tf.Session(graph=eval_graph)\n  with eval_graph.as_default():\n    \n    (_, _, bottleneck_input,\n     ground_truth_input, final_tensor) = add_final_retrain_ops(\n         class_count, FLAGS.final_tensor_name, bottleneck_tensor,\n         wants_quantization, is_training=False)\n\n    \n    \n    tf.train.Saver().restore(eval_sess, CHECKPOINT_NAME)\n\n    evaluation_step, prediction = add_evaluation_step(final_tensor,\n                                                      ground_truth_input)\n\n  return (eval_sess, resized_input_tensor, bottleneck_input, ground_truth_input,\n          evaluation_step, prediction)", "docstring": "Builds an restored eval session without train operations for exporting.\n\nArgs:\nmodule_spec: The hub.ModuleSpec for the image module being used.\nclass_count: Number of classes\n\nReturns:\nEval session containing the restored eval graph.\nThe bottleneck input, ground truth, eval step, and prediction tensors.", "source": "juraj-google-style"}
{"code": "def update_variant(self, variant_obj):\n    LOG.debug('Updating variant %s', variant_obj.get('simple_id'))\n    new_variant = self.variant_collection.find_one_and_replace({'_id': variant_obj['_id']}, variant_obj, return_document=pymongo.ReturnDocument.AFTER)\n    return new_variant", "docstring": "Update one variant document in the database.\n\nThis means that the variant in the database will be replaced by variant_obj.\n\nArgs:\nvariant_obj(dict)\n\nReturns:\nnew_variant(dict)", "source": "codesearchnet"}
{"code": "def find_elb_dns_zone_id(name='', env='dev', region='us-east-1'):\n    LOG.info('Find %s ELB DNS Zone ID in %s [%s].', name, env, region)\n    client = boto3.Session(profile_name=env).client('elb', region_name=region)\n    elbs = client.describe_load_balancers(LoadBalancerNames=[name])\n    return elbs['LoadBalancerDescriptions'][0]['CanonicalHostedZoneNameID']", "docstring": "Get an application's AWS elb dns zone id.\n\nArgs:\nname (str): ELB name\nenv (str): Environment/account of ELB\nregion (str): AWS Region\n\nReturns:\nstr: elb DNS zone ID", "source": "codesearchnet"}
{"code": "def add_capability(capability, source=None, limit_access=False, image=None, restart=False):\n    if (salt.utils.versions.version_cmp(__grains__['osversion'], '10') == (- 1)):\n        raise NotImplementedError('`install_capability` is not available on this version of Windows: {0}'.format(__grains__['osversion']))\n    cmd = ['DISM', '/Quiet', ('/Image:{0}'.format(image) if image else '/Online'), '/Add-Capability', '/CapabilityName:{0}'.format(capability)]\n    if source:\n        cmd.append('/Source:{0}'.format(source))\n    if limit_access:\n        cmd.append('/LimitAccess')\n    if (not restart):\n        cmd.append('/NoRestart')\n    return __salt__['cmd.run_all'](cmd)", "docstring": "Install a capability\n\nArgs:\ncapability (str): The capability to install\nsource (Optional[str]): The optional source of the capability. Default\nis set by group policy and can be Windows Update.\nlimit_access (Optional[bool]): Prevent DISM from contacting Windows\nUpdate for the source package\nimage (Optional[str]): The path to the root directory of an offline\nWindows image. If `None` is passed, the running operating system is\ntargeted. Default is None.\nrestart (Optional[bool]): Reboot the machine if required by the install\n\nRaises:\nNotImplementedError: For all versions of Windows that are not Windows 10\nand later. Server editions of Windows use ServerManager instead.\n\nReturns:\ndict: A dictionary containing the results of the command\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' dism.add_capability Tools.Graphics.DirectX~~~~0.0.1.0", "source": "codesearchnet"}
{"code": "def processor_coordinates_to_pnum(mesh_shape, coord):\n    ret = 0\n    multiplier = 1\n    for (c, d) in zip(coord[::(- 1)], mesh_shape.to_integer_list[::(- 1)]):\n        ret += (multiplier * c)\n        multiplier *= d\n    return ret", "docstring": "Inverse of pnum_to_processor_coordinates.\n\nArgs:\nmesh_shape: a Shape\ncoord: a list of integers with length len(mesh_shape)\n\nReturns:\nan integer less than len(mesh_shape)", "source": "codesearchnet"}
{"code": "def set_parent(self, parent):\n    if (not isinstance(parent, Node)):\n        raise TypeError('parent must be a Node')\n    self.parent = parent", "docstring": "Set the parent of this ``Node`` object. Use this carefully, otherwise you may damage the structure of this ``Tree`` object.\n\nArgs:\n``Node``: The new parent of this ``Node``", "source": "codesearchnet"}
{"code": "def typed_returnvalue(self, type_name, formatter=None):\n        \n        self.return_info = ReturnInfo(type_name, formatter, True, None)", "docstring": "Add type information to the return value of this function.\n\nArgs:\ntype_name (str): The name of the type of the return value.\nformatter (str): An optional name of a formatting function specified\nfor the type given in type_name.", "source": "juraj-google-style"}
{"code": "def update_box_field(self, box_key, field):\n\t\t\n\t\t\n\t\tself._raise_unimplemented_error()\n\t\t\n\t\turi = '/'.join([self.api_uri,\n\t\t\t\t\t\tself.boxes_suffix,\n\t\t\t\t\t\tbox_key,\n\t\t\t\t\t\tself.fields_suffix\n\t\t\t\t\t\t])\n\t\treturn self._update_field(uri, field)", "docstring": "Upates box field as specified\nArgs:\nbox_key\t\tkey for pipeline where the fields lives\nfield \t\t\t\tStreakField object with fresh data\nreturns\t\t\t\t(status code, updated field dict)", "source": "juraj-google-style"}
{"code": "def are_symmetrically_equivalent(self, sites1, sites2, symm_prec=0.001):\n\n    def in_sites(site):\n        for test_site in sites1:\n            if test_site.is_periodic_image(site, symm_prec, False):\n                return True\n        return False\n    for op in self:\n        newsites2 = [PeriodicSite(site.species, op.operate(site.frac_coords), site.lattice) for site in sites2]\n        for site in newsites2:\n            if (not in_sites(site)):\n                break\n        else:\n            return True\n    return False", "docstring": "Given two sets of PeriodicSites, test if they are actually\nsymmetrically equivalent under this space group.  Useful, for example,\nif you want to test if selecting atoms 1 and 2 out of a set of 4 atoms\nare symmetrically the same as selecting atoms 3 and 4, etc.\n\nOne use is in PartialRemoveSpecie transformation to return only\nsymmetrically distinct arrangements of atoms.\n\nArgs:\nsites1 ([Site]): 1st set of sites\nsites2 ([Site]): 2nd set of sites\nsymm_prec (float): Tolerance in atomic distance to test if atoms\nare symmetrically similar.\n\nReturns:\n(bool): Whether the two sets of sites are symmetrically\nequivalent.", "source": "codesearchnet"}
{"code": "def write_version_and_dims(version, dims, f):\n    \n    f.write((\"\n    f.write((dims[0] + \"\\t\" + dims[1] + \"\\t\" + dims[2] + \"\\t\" + dims[3] + \"\\n\"))", "docstring": "Write first two lines of gct file.\n\nArgs:\nversion (string): 1.3 by default\ndims (list of strings): length = 4\nf (file handle): handle of output file\nReturns:\nnothing", "source": "juraj-google-style"}
{"code": "def generate_poisson_data(centers, n_cells, cluster_probs=None):\n    \n    genes, clusters = centers.shape\n    output = np.zeros((genes, n_cells))\n    if cluster_probs is None:\n        cluster_probs = np.ones(clusters)/clusters\n    labels = []\n    for i in range(n_cells):\n        c = np.random.choice(range(clusters), p=cluster_probs)\n        labels.append(c)\n        output[:,i] = np.random.poisson(centers[:,c])\n    return output, np.array(labels)", "docstring": "Generates poisson-distributed data, given a set of means for each cluster.\n\nArgs:\ncenters (array): genes x clusters matrix\nn_cells (int): number of output cells\ncluster_probs (array): prior probability for each cluster.\nDefault: uniform.\n\nReturns:\noutput - array with shape genes x n_cells\nlabels - array of cluster labels", "source": "juraj-google-style"}
{"code": "def update_reserved_vlan_range(self, id_or_uri, vlan_pool, force=False):\n    uri = (self._client.build_uri(id_or_uri) + '/reserved-vlan-range')\n    return self._client.update(resource=vlan_pool, uri=uri, force=force, default_values=self.DEFAULT_VALUES)", "docstring": "Updates the reserved vlan ID range for the fabric.\n\nNote:\nThis method is only available on HPE Synergy.\n\nArgs:\nid_or_uri: ID or URI of fabric.\nvlan_pool (dict): vlan-pool data to update.\nforce:  If set to true, the operation completes despite any problems with network connectivity or errors\non the resource itself. The default is false.\n\nReturns:\ndict: The fabric", "source": "codesearchnet"}
{"code": "def new_contract_proxy(self, contract_interface, contract_address: Address):\n        \n        return ContractProxy(\n            self,\n            contract=self.new_contract(contract_interface, contract_address),\n        )", "docstring": "Return a proxy for interacting with a smart contract.\n\nArgs:\ncontract_interface: The contract interface as defined by the json.\naddress: The contract's address.", "source": "juraj-google-style"}
{"code": "def get_savable_components(self):\n    components = self.get_components()\n    components = [components[name] for name in sorted(components)]\n    return set(filter((lambda x: isinstance(x, util.SavableComponent)), components))", "docstring": "Returns the list of all of the components this model consists of that can be individually saved and restored.\nFor instance the network or distribution.\n\nReturns:\nList of util.SavableComponent", "source": "codesearchnet"}
{"code": "def run_example(example_coroutine, *extra_args):\n    args = _get_parser(extra_args).parse_args()\n    logging.basicConfig(level=(logging.DEBUG if args.debug else logging.WARNING))\n    cookies = hangups.auth.get_auth_stdin(args.token_path)\n    client = hangups.Client(cookies)\n    loop = asyncio.get_event_loop()\n    task = asyncio.ensure_future(_async_main(example_coroutine, client, args), loop=loop)\n    try:\n        loop.run_until_complete(task)\n    except KeyboardInterrupt:\n        task.cancel()\n        loop.run_until_complete(task)\n    finally:\n        loop.close()", "docstring": "Run a hangups example coroutine.\n\nArgs:\nexample_coroutine (coroutine): Coroutine to run with a connected\nhangups client and arguments namespace as arguments.\nextra_args (str): Any extra command line arguments required by the\nexample.", "source": "codesearchnet"}
{"code": "def _absolute_template_path(fn):\n    \n    return os.path.join(os.path.dirname(__file__), \"xslt\", fn)", "docstring": "Return absolute path for filename from local ``xslt/`` directory.\n\nArgs:\nfn (str): Filename. ``MARC21slim2MODS3-4-NDK.xsl`` for example.\n\nReturns:\nstr: Absolute path to `fn` in ``xslt`` dicretory..", "source": "juraj-google-style"}
{"code": "def get_cross_replica_context():\n    return _get_per_thread_mode().cross_replica_context", "docstring": "Returns the current tf.distribute.Strategy if in a cross-replica context.\n\nDEPRECATED: Please use `in_cross_replica_context()` and\n`get_strategy()` instead.\n\nReturns:\nReturns the current `tf.distribute.Strategy` object in a cross-replica\ncontext, or `None`.\n\nExactly one of `get_replica_context()` and `get_cross_replica_context()`\nwill return `None` in a particular block.", "source": "github-repos"}
{"code": "class PoolerStartLogits(nn.Module):\n\n    def __init__(self, config: PretrainedConfig):\n        super().__init__()\n        self.dense = nn.Linear(config.hidden_size, 1)\n        logger.warning_once('[DEPRECATION WARNING] `PoolerStartLogits` is deprecated and will be removed in v4.53. Please use model-specific class, e.g. `XLMPoolerStartLogits`.')\n\n    def forward(self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor]=None) -> torch.FloatTensor:\n        \n        x = self.dense(hidden_states).squeeze(-1)\n        if p_mask is not None:\n            if get_parameter_dtype(self) == torch.float16:\n                x = x * (1 - p_mask) - 65500 * p_mask\n            else:\n                x = x * (1 - p_mask) - 1e+30 * p_mask\n        return x", "docstring": "Compute SQuAD start logits from sequence hidden states.\n\nArgs:\nconfig ([`PretrainedConfig`]):\nThe config used by the model, will be used to grab the `hidden_size` of the model.", "source": "github-repos"}
{"code": "def wavelength_match(a, b):\n    if (type(a) == (type(b) or (isinstance(a, numbers.Number) and isinstance(b, numbers.Number)))):\n        return (a == b)\n    elif ((a is None) or (b is None)):\n        return False\n    elif (isinstance(a, (list, tuple)) and (len(a) == 3)):\n        return (a[0] <= b <= a[2])\n    elif (isinstance(b, (list, tuple)) and (len(b) == 3)):\n        return (b[0] <= a <= b[2])\n    else:\n        raise ValueError('Can only compare wavelengths of length 1 or 3')", "docstring": "Return if two wavelengths are equal.\n\nArgs:\na (tuple or scalar): (min wl, nominal wl, max wl) or scalar wl\nb (tuple or scalar): (min wl, nominal wl, max wl) or scalar wl", "source": "codesearchnet"}
{"code": "def lbest_idx(state, idx):\n    \n    swarm = state.swarm\n    n_s = state.params['n_s']\n    cmp = comparator(swarm[0].best_fitness)\n    indices = __lbest_indices__(len(swarm), n_s, idx)\n    best = None\n    for i in indices:\n        if best is None or cmp(swarm[i].best_fitness, swarm[best].best_fitness):\n            best = i\n    return best", "docstring": "lbest Neighbourhood topology function.\n\nNeighbourhood size is determined by state.params['n_s'].\n\nArgs:\nstate: cipy.algorithms.pso.State: The state of the PSO algorithm.\nidx: int: index of the particle in the swarm.\n\nReturns:\nint: The index of the lbest particle.", "source": "juraj-google-style"}
{"code": "def as_dict(self):\n    tags_dict = dict(self)\n    tags_dict['@module'] = self.__class__.__module__\n    tags_dict['@class'] = self.__class__.__name__\n    return tags_dict", "docstring": "Dict representation.\n\nReturns:\nDictionary of parameters from fefftags object", "source": "codesearchnet"}
{"code": "def mark_done(task_id):\n  \n  task = Task.get_by_id(task_id)\n  if task is None:\n    raise ValueError('Task with id %d does not exist' % task_id)\n  task.done = True\n  task.put()", "docstring": "Marks a task as done.\n\nArgs:\ntask_id: The integer id of the task to update.\n\nRaises:\nValueError: if the requested task doesn't exist.", "source": "juraj-google-style"}
{"code": "def _AvailableString(variables, verbose=False):\n    modules = []\n    other = []\n    for name, value in variables.items():\n        if not verbose and name.startswith('_'):\n            continue\n        if '-' in name or '/' in name:\n            continue\n        if inspect.ismodule(value):\n            modules.append(name)\n        else:\n            other.append(name)\n    lists = [('Modules', modules), ('Objects', other)]\n    list_strs = []\n    for name, varlist in lists:\n        if varlist:\n            items_str = ', '.join(sorted(varlist))\n            list_strs.append(f'{name}: {items_str}')\n    lists_str = '\\n'.join(list_strs)\n    return f'Fire is starting a Python REPL with the following objects:\\n{lists_str}\\n'", "docstring": "Returns a string describing what objects are available in the Python REPL.\n\nArgs:\nvariables: A dict of the object to be available in the REPL.\nverbose: Whether to include 'hidden' members, those keys starting with _.\nReturns:\nA string fit for printing at the start of the REPL, indicating what objects\nare available for the user to use.", "source": "github-repos"}
{"code": "def drop(self, items):\n    self._manager.leaser.remove(items)\n    self._manager.maybe_resume_consumer()", "docstring": "Remove the given messages from lease management.\n\nArgs:\nitems(Sequence[DropRequest]): The items to drop.", "source": "codesearchnet"}
{"code": "def pack_sequence_as(structure, flat_sequence):\n    return nest_util.pack_sequence_as(nest_util.Modality.DATA, structure, flat_sequence, expand_composites=False)", "docstring": "Returns a given flattened sequence packed into a nest.\n\nIf `structure` is a scalar, `flat_sequence` must be a single-element list;\nin this case the return value is `flat_sequence[0]`.\n\nArgs:\nstructure: tuple or list constructed of scalars and/or other tuples/lists,\nor a scalar.  Note: numpy arrays are considered scalars.\nflat_sequence: flat sequence to pack.\n\nReturns:\npacked: `flat_sequence` converted to have the same recursive structure as\n`structure`.\n\nRaises:\nValueError: If nest and structure have different element counts.", "source": "github-repos"}
{"code": "def AddEnumDescriptor(self, enum_desc):\n    if (not isinstance(enum_desc, descriptor.EnumDescriptor)):\n        raise TypeError('Expected instance of descriptor.EnumDescriptor.')\n    self._enum_descriptors[enum_desc.full_name] = enum_desc\n    self.AddFileDescriptor(enum_desc.file)", "docstring": "Adds an EnumDescriptor to the pool.\n\nThis method also registers the FileDescriptor associated with the message.\n\nArgs:\nenum_desc: An EnumDescriptor.", "source": "codesearchnet"}
{"code": "def stop_apppool(name):\n    \n    ps_cmd = ['Stop-WebAppPool', r\"'{0}'\".format(name)]\n\n    cmd_ret = _srvmgr(ps_cmd)\n\n    return cmd_ret['retcode'] == 0", "docstring": "Stop an IIS application pool.\n\n.. versionadded:: 2017.7.0\n\nArgs:\nname (str): The name of the App Pool to stop.\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.stop_apppool name='MyTestPool'", "source": "juraj-google-style"}
{"code": "def register_backend(name, backend, allow_overwrite=False):\n    if hasattr(Circuit, ('run_with_' + name)):\n        if allow_overwrite:\n            warnings.warn(f'Circuit has attribute `run_with_{name}`.')\n        else:\n            raise ValueError(f'Circuit has attribute `run_with_{name}`.')\n    if (not allow_overwrite):\n        if (name in BACKENDS):\n            raise ValueError(f\"Backend '{name}' is already registered as backend.\")\n    BACKENDS[name] = backend", "docstring": "Register new backend.\n\nArgs:\nname (str): The name of backend.\ngateclass (type): The type object of backend\nallow_overwrite (bool, optional): If True, allow to overwrite the existing backend.\nOtherwise, raise the ValueError.\n\nRaises:\nValueError: The name is duplicated with existing backend.\nWhen `allow_overwrite=True`, this error is not raised.", "source": "codesearchnet"}
{"code": "def _resolve_subkeys(key, separator='.'):\n    parts = key.split(separator, 1)\n    if (len(parts) > 1):\n        return parts\n    else:\n        return (parts[0], None)", "docstring": "Resolve a potentially nested key.\n\nIf the key contains the ``separator`` (e.g. ``.``) then the key will be\nsplit on the first instance of the subkey::\n\n>>> _resolve_subkeys('a.b.c')\n('a', 'b.c')\n>>> _resolve_subkeys('d|e|f', separator='|')\n('d', 'e|f')\n\nIf not, the subkey will be :data:`None`::\n\n>>> _resolve_subkeys('foo')\n('foo', None)\n\nArgs:\nkey (str): A string that may or may not contain the separator.\nseparator (str): The namespace separator. Defaults to `.`.\n\nReturns:\nTuple[str, str]: The key and subkey(s).", "source": "codesearchnet"}
{"code": "def mrc_to_marc(mrc):\n    \n    \n    lines = [\n        line\n        for line in mrc.splitlines()\n        if line.strip()\n    ]\n\n    def split_to_parts(lines):\n        for line in lines:\n            first_part, second_part = line.split(\" L \", 1)\n\n            yield line, first_part, second_part.lstrip()\n\n    control_lines = []\n    data_lines = []\n    for line, first_part, second_part in split_to_parts(lines):\n        if second_part.startswith(\"$\"):\n            data_lines.append(line)\n        else:\n            control_lines.append(line)\n\n    \n    record = MARCXMLRecord()\n    record.oai_marc = True\n    for line, descr, content in split_to_parts(control_lines):\n        record.controlfields[descr.strip()[:3]] = content\n\n    def get_subfield_dict(line):\n        fields = (\n            (field[0], field[1:])\n            for field in line.split(\"$$\")[1:]\n        )\n\n        fields_dict = defaultdict(list)\n        for key, val in fields:\n            fields_dict[key].append(val)\n\n        return fields_dict\n\n    \n    for line, descr, content_line in split_to_parts(data_lines):\n        name = descr[:3]\n        i1 = descr[3]\n        i2 = descr[4]\n\n        record.add_data_field(\n            name,\n            i1,\n            i2,\n            get_subfield_dict(content_line)\n        )\n\n    return record.to_XML()", "docstring": "Convert MRC data format to MARC XML.\n\nArgs:\nmrc (str): MRC as string.\n\nReturns:\nstr: XML with MARC.", "source": "juraj-google-style"}
{"code": "def __init__(self, thresholds=np.arange(0, 1.1, 0.1), obs_threshold=1.0, input_str=None):\n        \n        self.thresholds = thresholds\n        self.obs_threshold = obs_threshold\n        self.contingency_tables = pd.DataFrame(np.zeros((thresholds.size, 4), dtype=int),\n                                               columns=[\"TP\", \"FP\", \"FN\", \"TN\"])\n        if input_str is not None:\n            self.from_str(input_str)", "docstring": "Initializes the DistributedROC object. If input_str is not None, then the DistributedROC object is\ninitialized with the contents of input_str. Otherwise an empty contingency table is created.\n\nArgs:\nthresholds (numpy.array): Array of thresholds in increasing order.\nobs_threshold (float): Split threshold (>= is positive event) (< is negative event)\ninput_str (None or str): String containing information for DistributedROC", "source": "juraj-google-style"}
{"code": "def convert_clip(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting clip ...')\n    if (params['min'] == 0):\n        print('using ReLU({0})'.format(params['max']))\n        layer = keras.layers.ReLU(max_value=params['max'])\n    else:\n\n        def target_layer(x, vmin=params['min'], vmax=params['max']):\n            import tensorflow as tf\n            return tf.clip_by_value(x, vmin, vmax)\n        layer = keras.layers.Lambda(target_layer)\n    layers[scope_name] = layer(layers[inputs[0]])", "docstring": "Convert clip operation.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def delete_existing_policy(self, scaling_policy, server_group):\n    self.log.info('Deleting policy %s on %s', scaling_policy['policyName'], server_group)\n    delete_dict = {'application': self.app, 'description': 'Delete scaling policy', 'job': [{'policyName': scaling_policy['policyName'], 'serverGroupName': server_group, 'credentials': self.env, 'region': self.region, 'provider': 'aws', 'type': 'deleteScalingPolicy', 'user': 'foremast-autoscaling-policy'}]}\n    wait_for_task(json.dumps(delete_dict))", "docstring": "Given a scaling_policy and server_group, deletes the existing scaling_policy.\nScaling policies need to be deleted instead of upserted for consistency.\n\nArgs:\nscaling_policy (json): the scaling_policy json from Spinnaker that should be deleted\nserver_group (str): the affected server_group", "source": "codesearchnet"}
{"code": "def zenith_luminance(self, value=9999.0):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `zenith_luminance`'.format(value))\n            if value < 0.0:\n                raise ValueError('value need to be greater or equal 0.0 '\n                                 'for field `zenith_luminance`')\n\n        self._zenith_luminance = value", "docstring": "Corresponds to IDD Field `zenith_luminance`\nwill be missing if >= 9999\n\nArgs:\nvalue (float): value for IDD Field `zenith_luminance`\nUnit: Cd/m2\nvalue >= 0.0\nMissing value: 9999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def noise_get(n: tcod.noise.Noise, f: Sequence[float], typ: int=NOISE_DEFAULT) -> float:\n    return float(lib.TCOD_noise_get_ex(n.noise_c, ffi.new('float[4]', f), typ))", "docstring": "Return the noise value sampled from the ``f`` coordinate.\n\n``f`` should be a tuple or list with a length matching\n:any:`Noise.dimensions`.\nIf ``f`` is shoerter than :any:`Noise.dimensions` the missing coordinates\nwill be filled with zeros.\n\nArgs:\nn (Noise): A Noise instance.\nf (Sequence[float]): The point to sample the noise from.\ntyp (int): The noise algorithm to use.\n\nReturns:\nfloat: The sampled noise value.", "source": "codesearchnet"}
{"code": "def propagate(self, token, channel):\n    if (self.get_propagate_status(token, channel) != u'0'):\n        return\n    url = self.url('sd/{}/{}/setPropagate/1/'.format(token, channel))\n    req = self.remote_utils.get_url(url)\n    if (req.status_code is not 200):\n        raise RemoteDataUploadError('Propagate fail: {}'.format(req.text))\n    return True", "docstring": "Kick off the propagate function on the remote server.\n\nArguments:\ntoken (str): The token to propagate\nchannel (str): The channel to propagate\n\nReturns:\nboolean: Success", "source": "codesearchnet"}
{"code": "def Deserialize(self, reader):\n        \n        super(AssetState, self).Deserialize(reader)\n        self.AssetId = reader.ReadUInt256()\n        self.AssetType = reader.ReadByte()\n        self.Name = reader.ReadVarString()\n\n        position = reader.stream.tell()\n\n        try:\n            self.Amount = reader.ReadFixed8()\n        except Exception as e:\n            reader.stream.seek(position)\n            self.Amount = reader.ReadFixed8()\n\n        self.Available = reader.ReadFixed8()\n        self.Precision = reader.ReadByte()\n\n        \n        reader.ReadByte()\n\n        self.Fee = reader.ReadFixed8()\n        self.FeeAddress = reader.ReadUInt160()\n        self.Owner = ECDSA.Deserialize_Secp256r1(reader)\n        self.Admin = reader.ReadUInt160()\n        self.Issuer = reader.ReadUInt160()\n        self.Expiration = reader.ReadUInt32()\n        self.IsFrozen = reader.ReadBool()", "docstring": "Deserialize full object.\n\nArgs:\nreader (neocore.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def List(self, request, global_params=None):\n    config = self.GetMethodConfig('List')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "List all GitHubEnterpriseConfigs for a given project.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsGithubEnterpriseConfigsListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ListGithubEnterpriseConfigsResponse) The response message.", "source": "github-repos"}
{"code": "def __init__(self, name: Union[str, Sequence[str]], _sql_data_type: StandardSqlDataType, _sql_alias: Optional[str]=None) -> None:\n    if isinstance(name, str):\n        self.dotted_path = (name,)\n    else:\n        self.dotted_path = name\n    self._sql_data_type = _sql_data_type\n    self._sql_alias = _sql_alias", "docstring": "Builds an identifier.\n\nArgs:\nname: Either a single name or a sequence of names representing a dotted\npath. A sequence like ('a', 'b') will result in SQL like 'SELECT a.b'.\n_sql_data_type: The type of the values behind the identifier.\n_sql_alias: The alias of the identifier. Defaults to the last element in\nthe dotted identifier path.", "source": "github-repos"}
{"code": "def strip_unused(input_graph_def, input_node_names, output_node_names, placeholder_type_enum):\n    for name in input_node_names:\n        if ':' in name:\n            raise ValueError(f\"Name '{name}' appears to refer to a Tensor, not an Operation.\")\n    not_found = {name for name in input_node_names}\n    inputs_replaced_graph_def = graph_pb2.GraphDef()\n    for node in input_graph_def.node:\n        if node.name in input_node_names:\n            not_found.remove(node.name)\n            placeholder_node = node_def_pb2.NodeDef()\n            placeholder_node.op = 'Placeholder'\n            placeholder_node.name = node.name\n            if isinstance(placeholder_type_enum, list):\n                input_node_index = input_node_names.index(node.name)\n                placeholder_node.attr['dtype'].CopyFrom(attr_value_pb2.AttrValue(type=placeholder_type_enum[input_node_index]))\n            else:\n                placeholder_node.attr['dtype'].CopyFrom(attr_value_pb2.AttrValue(type=placeholder_type_enum))\n            if '_output_shapes' in node.attr:\n                placeholder_node.attr['_output_shapes'].CopyFrom(node.attr['_output_shapes'])\n            if 'shape' in node.attr:\n                placeholder_node.attr['shape'].CopyFrom(node.attr['shape'])\n            inputs_replaced_graph_def.node.extend([placeholder_node])\n        else:\n            inputs_replaced_graph_def.node.extend([copy.deepcopy(node)])\n    if not_found:\n        raise KeyError(f'The following input nodes were not found: {not_found}.')\n    output_graph_def = graph_util.extract_sub_graph(inputs_replaced_graph_def, output_node_names)\n    return output_graph_def", "docstring": "Removes unused nodes from a GraphDef.\n\nArgs:\ninput_graph_def: A graph with nodes we want to prune.\ninput_node_names: A list of the nodes we use as inputs.\noutput_node_names: A list of the output nodes.\nplaceholder_type_enum: The AttrValue enum for the placeholder data type, or\na list that specifies one value per input node name.\n\nReturns:\nA `GraphDef` with all unnecessary ops removed.\n\nRaises:\nValueError: If any element in `input_node_names` refers to a tensor instead\nof an operation.\nKeyError: If any element in `input_node_names` is not found in the graph.", "source": "github-repos"}
{"code": "def launchQueryForMode(self, query=None, mode=None):\n    qURL = self.createURL(word=query, mode=mode)\n    i3Browser = browser.Browser()\n    try:\n        if self.needsCredentials[mode]:\n            self._getAuthenticated(i3Browser, qURL)\n            data = i3Browser.recoverURL(qURL)\n        else:\n            data = i3Browser.recoverURL(qURL)\n        return data\n    except KeyError:\n        print(general.error(\"[*] '{}' is not a valid mode for this wrapper ({}).\".format(mode, self.__class__.__name__)))\n    return None", "docstring": "Method that launches an i3Browser to collect data.\n\nArgs:\n-----\nquery: The query to be performed\nmode: The mode to be used to build the query.\n\nReturn:\n-------\nA string containing the recovered data or None.", "source": "codesearchnet"}
{"code": "def branch_lengths(self, terminal=True, internal=True):\n    if (not isinstance(terminal, bool)):\n        raise TypeError('terminal must be a bool')\n    if (not isinstance(internal, bool)):\n        raise TypeError('internal must be a bool')\n    for node in self.traverse_preorder():\n        if ((internal and (not node.is_leaf())) or (terminal and node.is_leaf())):\n            if (node.edge_length is None):\n                (yield 0)\n            else:\n                (yield node.edge_length)", "docstring": "Generator over the lengths of the selected branches of this ``Tree``. Edges with length ``None`` will be output as 0-length\n\nArgs:\n``terminal`` (``bool``): ``True`` to include terminal branches, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal branches, otherwise ``False``", "source": "codesearchnet"}
{"code": "def parse_raw_fact(raw_fact):\n\n    def at_split(string):\n        \"\\n        Return everything in front of the (leftmost) '@'-symbol, if it was used.\\n\\n        Args:\\n            string (str):  The string to be parsed.\\n\\n        Returns:\\n            tuple: (front, back) representing the substrings before and after the\\n                most left ``@`` symbol. If no such symbol was present at all,\\n                ``back=None``. Both substrings have been trimmed of any leading\\n                and trailing whitespace.\\n\\n        Note:\\n            If our string contains multiple ``@`` symbols, all but the most left\\n            one will be treated as part of the regular ``back`` string.\\n            This allows for usage of the symbol in descriptions, categories and tags.\\n\\n            Also note that *no tags are extracted* any tags included will be considered\\n            part of the ``category`` string. We are likely to remove this parsing function\\n            in ``0.14.0`` in favour of a regex based solution so we will not spend\\n            time on tags for now\\n        \"\n        result = string.split('@', 1)\n        length = len(result)\n        if (length == 1):\n            (front, back) = (result[0].strip(), None)\n        else:\n            (front, back) = result\n            (front, back) = (front.strip(), back.strip())\n        return (front, back)\n\n    def comma_split(string):\n        '\\n        Split string at the most left comma.\\n\\n        Args:\\n            string (str): String to be processed. At this stage this should\\n                look something like ``<Category> and <tags>, <Description>\\n\\n\\n        Returns\\n            tuple: (category_and_tags, description). Both substrings have their\\n                leading/trailing whitespace removed.\\n                ``category_and_tags`` may include >=0 tags indicated by a leading ``\n        result = string.split(',', 1)\n        length = len(result)\n        if (length == 1):\n            (category, description) = (result[0].strip(), None)\n        else:\n            (category, description) = tuple(result)\n            (category, description) = (category.strip(), description.strip())\n        return (category.strip(), description)\n    (time_info, rest) = time_helpers.extract_time_info(raw_fact)\n    (activity_name, back) = at_split(rest)\n    if back:\n        (category_name, description) = comma_split(back)\n    else:\n        (category_name, description) = (None, None)\n    return {'timeinfo': time_info, 'category': category_name, 'activity': activity_name, 'description': description}", "docstring": "Extract semantically meaningful sub-components from a ``raw fact`` text.\n\nArgs:\nraw_fact (text_type): ``raw fact`` text to be parsed.\n\nReturns:\ndict: dict with sub-components as values.", "source": "codesearchnet"}
{"code": "def unused(node):\n    cfg.forward(node, cfg.ReachingDefinitions())\n    unused_obj = Unused()\n    unused_obj.visit(node)\n    return unused_obj.unused", "docstring": "Find unused definitions that can be remove.\n\nThis runs reaching definitions analysis followed by a walk over the AST to\nfind all variable definitions that are not used later on.\n\nArgs:\nnode: The AST of e.g. a function body to find unused variable definitions.\n\nReturns:\nunused: After visiting all the nodes, this attribute contanis a set of\ndefinitions in the form of `(variable_name, node)` pairs which are\nunused in this AST.", "source": "codesearchnet"}
{"code": "def set_max_freq(self, max_freq=None):\n    if max_freq:\n        self['max_freq'] = max_freq\n    else:\n        for frequency in self['frequencies']:\n            if self['max_freq']:\n                if (frequency['value'] > self['max_freq']):\n                    self['max_freq'] = frequency['value']\n            else:\n                self['max_freq'] = frequency['value']\n    return", "docstring": "Set the max frequency for the variant\n\nIf max_freq use this, otherwise go through all frequencies and\nset the highest as self['max_freq']\n\nArgs:\nmax_freq (float): The max frequency", "source": "codesearchnet"}
{"code": "def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        super(ExtensionInformation, self).read(\n            istream,\n            kmip_version=kmip_version\n        )\n        tstream = BytearrayStream(istream.read(self.length))\n\n        self.extension_name.read(tstream, kmip_version=kmip_version)\n\n        if self.is_tag_next(Tags.EXTENSION_TAG, tstream):\n            self.extension_tag = ExtensionTag()\n            self.extension_tag.read(tstream, kmip_version=kmip_version)\n        if self.is_tag_next(Tags.EXTENSION_TYPE, tstream):\n            self.extension_type = ExtensionType()\n            self.extension_type.read(tstream, kmip_version=kmip_version)\n\n        self.is_oversized(tstream)\n        self.validate()", "docstring": "Read the data encoding the ExtensionInformation object and decode it\ninto its constituent parts.\n\nArgs:\nistream (Stream): A data stream containing encoded object data,\nsupporting a read method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def get_interpolated_value(self, x):\n        \n        if len(self.ydim) == 1:\n            return get_linear_interpolated_value(self.x, self.y, x)\n        else:\n            return [get_linear_interpolated_value(self.x, self.y[:, k], x)\n                    for k in range(self.ydim[1])]", "docstring": "Returns an interpolated y value for a particular x value.\n\nArgs:\nx: x value to return the y value for\n\nReturns:\nValue of y at x", "source": "juraj-google-style"}
{"code": "def FromJson(json):\n    type = ContractParameterType.FromString(json['type'])\n    value = json['value']\n    param = ContractParameter(type=type, value=None)\n    if ((type == ContractParameterType.Signature) or (type == ContractParameterType.ByteArray)):\n        param.Value = bytearray.fromhex(value)\n    elif (type == ContractParameterType.Boolean):\n        param.Value = bool(value)\n    elif (type == ContractParameterType.Integer):\n        param.Value = int(value)\n    elif (type == ContractParameterType.Hash160):\n        param.Value = UInt160.ParseString(value)\n    elif (type == ContractParameterType.Hash256):\n        param.Value = UInt256.ParseString(value)\n    elif (type == ContractParameterType.PublicKey):\n        param.Value = ECDSA.decode_secp256r1(value).G\n    elif (type == ContractParameterType.String):\n        param.Value = str(value)\n    elif (type == ContractParameterType.Array):\n        val = [ContractParameter.FromJson(item) for item in value]\n        param.Value = val\n    return param", "docstring": "Convert a json object to a ContractParameter object\n\nArgs:\nitem (dict): The item to convert to a ContractParameter object\n\nReturns:\nContractParameter", "source": "codesearchnet"}
{"code": "def set_lacp_fallback(self, name, mode=None):\n    if (mode not in ['disabled', 'static', 'individual']):\n        return False\n    disable = (True if (mode == 'disabled') else False)\n    commands = [('interface %s' % name)]\n    commands.append(self.command_builder('port-channel lacp fallback', value=mode, disable=disable))\n    return self.configure(commands)", "docstring": "Configures the Port-Channel lacp_fallback\n\nArgs:\nname(str): The Port-Channel interface name\n\nmode(str): The Port-Channel LACP fallback setting\nValid values are 'disabled', 'static', 'individual':\n\n* static  - Fallback to static LAG mode\n* individual - Fallback to individual ports\n* disabled - Disable LACP fallback\n\nReturns:\nTrue if the operation succeeds otherwise False is returned", "source": "codesearchnet"}
{"code": "def ParseMetadataFile(self, parser_mediator, file_entry, data_stream_name):\n    parent_path_spec = getattr(file_entry.path_spec, 'parent', None)\n    filename_upper = file_entry.name.upper()\n    if (self._mft_parser and parent_path_spec and (filename_upper in ('$MFT', '$MFTMIRR')) and (not data_stream_name)):\n        self._ParseDataStreamWithParser(parser_mediator, self._mft_parser, file_entry, '')\n    elif (self._usnjrnl_parser and parent_path_spec and (filename_upper == '$USNJRNL') and (data_stream_name == '$J')):\n        volume_file_object = path_spec_resolver.Resolver.OpenFileObject(parent_path_spec, resolver_context=parser_mediator.resolver_context)\n        try:\n            self._ParseFileEntryWithParser(parser_mediator, self._usnjrnl_parser, file_entry, file_object=volume_file_object)\n        finally:\n            volume_file_object.close()", "docstring": "Parses a metadata file.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nfile_entry (dfvfs.FileEntry): file entry.\ndata_stream_name (str): data stream name.", "source": "codesearchnet"}
{"code": "def _derive_namespaces(self):\n\n\t\t\n\n\t\t\n\t\tfor graph in [self.diffs.overlap, self.diffs.removed, self.diffs.added]:\n\t\t\tfor s,p,o in graph:\n\t\t\t\ttry:\n\t\t\t\t\tns_prefix, ns_uri, predicate = graph.compute_qname(p) \n\t\t\t\t\tself.update_namespaces.add(ns_uri)\n\t\t\t\texcept:\n\t\t\t\t\tlogger.debug('could not parse Object URI: %s' % ns_uri)\n\t\t\t\ttry:\n\t\t\t\t\tns_prefix, ns_uri, predicate = graph.compute_qname(o) \n\t\t\t\t\tself.update_namespaces.add(ns_uri)\n\t\t\t\texcept:\n\t\t\t\t\tlogger.debug('could not parse Object URI: %s' % ns_uri)\n\t\tlogger.debug(self.update_namespaces)\n\n\t\t\n\t\t\n\t\tfor ns_uri in self.update_namespaces:\n\t\t\tfor k in self.prefixes.__dict__:\n\t\t\t\tif str(ns_uri) == str(self.prefixes.__dict__[k]):\n\t\t\t\t\tlogger.debug('adding prefix %s for uri %s to unique_prefixes' % (k,str(ns_uri)))\n\t\t\t\t\tself.update_prefixes[k] = self.prefixes.__dict__[k]", "docstring": "Small method to loop through three graphs in self.diffs, identify unique namespace URIs.\nThen, loop through provided dictionary of prefixes and pin one to another.\n\nArgs:\nNone: uses self.prefixes and self.diffs\n\nReturns:\nNone: sets self.update_namespaces and self.update_prefixes", "source": "juraj-google-style"}
{"code": "def dataframe(start_row=0, max_rows=None, use_cache=True):\n    output = QueryOutput()\n    output._output_type = 'dataframe'\n    output._dataframe_start_row = start_row\n    output._dataframe_max_rows = max_rows\n    output._use_cache = use_cache\n    return output", "docstring": "Construct a query output object where the result is a dataframe\n\nArgs:\nstart_row: the row of the table at which to start the export (default 0).\nmax_rows: an upper limit on the number of rows to export (default None).\nuse_cache: whether to use cached results or not (default True).", "source": "codesearchnet"}
{"code": "def call_rpc(*inputs, **kwargs):\n    rpc_executor = kwargs['rpc_executor']\n    output = []\n    try:\n        value = inputs[1].pop()\n        addr = (value.value >> 16)\n        rpc_id = (value.value & 65535)\n        reading_value = rpc_executor.rpc(addr, rpc_id)\n        output.append(IOTileReading(0, 0, reading_value))\n    except (HardwareError, StreamEmptyError):\n        pass\n    for input_x in inputs:\n        input_x.skip_all()\n    return output", "docstring": "Call an RPC based on the encoded value read from input b.\n\nThe response of the RPC must be a 4 byte value that is used as\nthe output of this call.  The encoded RPC must be a 32 bit value\nencoded as \"BBH\":\nB: ignored, should be 0\nB: the address of the tile that we should call\nH: The id of the RPC to call\n\nAll other readings are then skipped so that there are no\nreadings in any input queue when this function returns\n\nReturns:\nlist(IOTileReading)", "source": "codesearchnet"}
{"code": "def openResultsInBrowser(res):\n    print(emphasis('\\n\\tOpening URIs in the default web browser...'))\n    urisToBrowser(['https:\n    time.sleep(2)\n    uris = []\n    for r in res:\n        for att in r['attributes']:\n            if (att['type'] == 'i3visio.uri'):\n                uris.append(att['value'])\n    urisToBrowser(uris)", "docstring": "Method that collects the URI from a list of entities and opens them\n\nArgs:\n-----\nres: A list containing several i3visio entities.", "source": "codesearchnet"}
{"code": "def from_scf_task(cls, scf_task, ddk_tolerance=None, manager=None):\n        \n        if not isinstance(scf_task, ScfTask):\n            raise TypeError(\"task `%s` does not inherit from ScfTask\" % scf_task)\n\n        new = cls(manager=manager)\n\n        \n        multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance)\n\n        ddk_tasks = []\n        for ddk_inp in multi_ddk:\n            ddk_task = new.register_ddk_task(ddk_inp, deps={scf_task: \"WFK\"})\n            ddk_tasks.append(ddk_task)\n\n        \n        \n        multi_dde = scf_task.input.make_dde_inputs(use_symmetries=False)\n\n        \n        \n        \n        dde_tasks = []\n        dde_deps = {ddk_task: \"DDK\" for ddk_task in ddk_tasks}\n        dde_deps.update({scf_task: \"WFK\"})\n        for dde_inp in multi_dde:\n            dde_task = new.register_dde_task(dde_inp, deps=dde_deps)\n            dde_tasks.append(dde_task)\n\n        \n        dte_deps = {scf_task: \"WFK DEN\"}\n        dte_deps.update({dde_task: \"1WF 1DEN\" for dde_task in dde_tasks})\n\n        multi_dte = scf_task.input.make_dte_inputs()\n        dte_tasks = []\n        for dte_inp in multi_dte:\n             dte_task = new.register_dte_task(dte_inp, deps=dte_deps)\n             dte_tasks.append(dte_task)\n\n        return new", "docstring": "Build a DteWork from a ground-state task.\n\nArgs:\nscf_task: ScfTask object.\nddk_tolerance: tolerance used in the DDK run if with_becs. None to use AbiPy default.\nmanager: :class:`TaskManager` object.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n                 input_file=\"mol.qin\",\n                 output_file=\"mol.qout\",\n                 scf_max_cycles=200,\n                 geom_max_cycles=200):\n        \n        self.input_file = input_file\n        self.output_file = output_file\n        self.scf_max_cycles = scf_max_cycles\n        self.geom_max_cycles = geom_max_cycles\n        self.outdata = None\n        self.errors = []\n        self.opt_error_history = []", "docstring": "Initializes the error handler from a set of input and output files.\n\nArgs:\ninput_file (str): Name of the QChem input file.\noutput_file (str): Name of the QChem output file.\nscf_max_cycles (int): The max iterations to set to fix SCF failure.\ngeom_max_cycles (int): The max iterations to set to fix geometry\noptimization failure.", "source": "juraj-google-style"}
{"code": "def lat_id(self, line):\n    if (self.grid == 'WAC'):\n        lat = (((((1 + self.LINE_PROJECTION_OFFSET) - line) * self.MAP_SCALE) * 0.001) / self.A_AXIS_RADIUS)\n        return ((lat * 180) / np.pi)\n    else:\n        lat = (float(self.CENTER_LATITUDE) - (((line - float(self.LINE_PROJECTION_OFFSET)) - 1) / float(self.MAP_RESOLUTION)))\n        return lat", "docstring": "Return the corresponding latitude\n\nArgs:\nline (int): Line number\n\nReturns:\nCorreponding latitude in degree", "source": "codesearchnet"}
{"code": "def caleom(date):\n        \n        date = parsefun(date)\n        date += datetime.timedelta(days=32-date.day)\n        date -= datetime.timedelta(days=date.day)\n        return date", "docstring": "Adjust date to last day of the month, regardless of work days.\n\nArgs:\ndate (date, datetime or str): Date to be adjusted.\n\nReturns:\ndatetime: Adjusted date.", "source": "juraj-google-style"}
{"code": "def CheckDisjointCalendars(self):\n    a_service_periods = self.feed_merger.a_schedule.GetServicePeriodList()\n    b_service_periods = self.feed_merger.b_schedule.GetServicePeriodList()\n    for a_service_period in a_service_periods:\n        (a_start, a_end) = a_service_period.GetDateRange()\n        for b_service_period in b_service_periods:\n            (b_start, b_end) = b_service_period.GetDateRange()\n            overlap_start = max(a_start, b_start)\n            overlap_end = min(a_end, b_end)\n            if (overlap_end >= overlap_start):\n                return False\n    return True", "docstring": "Check whether any old service periods intersect with any new ones.\n\nThis is a rather coarse check based on\ntransitfeed.SevicePeriod.GetDateRange.\n\nReturns:\nTrue if the calendars are disjoint or False if not.", "source": "codesearchnet"}
{"code": "def _ConstructAndTestGradient(self, image_shape, kernel_shape, strides, rates, padding, use_gpu, dtype=dtypes.float32):\n    assert image_shape[3] == kernel_shape[2]\n    np.random.seed(1)\n    image = np.random.random_sample(image_shape).astype(np.float32)\n    kernel = np.random.random_sample(kernel_shape).astype(np.float32)\n    strides = [1] + strides + [1]\n    rates = [1] + rates + [1]\n    image_tensor = constant_op.constant(image, shape=image_shape, name='input', dtype=dtype)\n    kernel_tensor = constant_op.constant(kernel, shape=kernel_shape, name='filter', dtype=dtype)\n\n    def compute_dilation2d(image_tensor, kernel_tensor):\n        return nn_ops.dilation2d(image_tensor, kernel_tensor, strides=strides, rates=rates, padding=padding, name='dilation2d')\n    with test_util.device(use_gpu=use_gpu):\n        with self.cached_session():\n            err1 = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient(lambda x: compute_dilation2d(x, kernel_tensor), [image_tensor]))\n            err2 = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient(lambda x: compute_dilation2d(image_tensor, x), [kernel_tensor]))\n            err = max(err1, err2)\n    print('Dilation gradient error = %f' % err)\n    if dtype == dtypes.bfloat16:\n        self.assertLess(err, 4.0)\n    else:\n        self.assertLess(err, 0.0001)", "docstring": "Verifies the gradients of the dilation function.\n\nArgs:\nimage_shape: Input shape, [batch, in_height, in_width, channels].\nkernel_shape: Filter shape, [filter_height, filter_width, channels].\nstrides: Output strides, specified as [stride_height, stride_width].\nrates: Atrous rates, specified as [rate_height, rate_width].\npadding: Padding type.\nuse_gpu: Whether we are running on GPU.", "source": "github-repos"}
{"code": "def _get_function_inputs(f, src_kwargs):\n  \n  if hasattr(f, \"_func\"):  \n    f = f._func  \n\n  try:  \n    argspec = inspect.getfullargspec(f)\n  except AttributeError:\n    argspec = inspect.getargspec(f)\n\n  fkwargs = {k: v for k, v in six.iteritems(src_kwargs) if k in argspec.args}\n  return fkwargs", "docstring": "Filters inputs to be compatible with function `f`'s signature.\n\nArgs:\nf: Function according to whose input signature we filter arguments.\nsrc_kwargs: Keyword arguments to filter according to `f`.\n\nReturns:\nkwargs: Dict of key-value pairs in `src_kwargs` which exist in `f`'s\nsignature.", "source": "juraj-google-style"}
{"code": "def stop_loss_replace(self, accountID, orderID, **kwargs):\n    return self.replace(accountID, orderID, order=StopLossOrderRequest(**kwargs))", "docstring": "Shortcut to replace a pending Stop Loss Order in an Account\n\nArgs:\naccountID : The ID of the Account\norderID : The ID of the Stop Loss Order to replace\nkwargs : The arguments to create a StopLossOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "codesearchnet"}
{"code": "def t2t_train(model_name, dataset_name,\n              data_dir=None, output_dir=None, config_file=None, config=None):\n  \n  if model_name not in _MODEL_REGISTRY:\n    raise ValueError(\"Model %s not in registry. Available models:\\n * %s.\" %\n                     (model_name, \"\\n * \".join(_MODEL_REGISTRY.keys())))\n  model_class = _MODEL_REGISTRY[model_name]()\n  gin.bind_parameter(\"train_fn.model_class\", model_class)\n  gin.bind_parameter(\"train_fn.dataset\", dataset_name)\n  gin.parse_config_files_and_bindings(config_file, config)\n  \n  train_fn(data_dir, output_dir=output_dir)", "docstring": "Main function to train the given model on the given dataset.\n\nArgs:\nmodel_name: The name of the model to train.\ndataset_name: The name of the dataset to train on.\ndata_dir: Directory where the data is located.\noutput_dir: Directory where to put the logs and checkpoints.\nconfig_file: the gin configuration file to use.\nconfig: string (in gin format) to override gin parameters.", "source": "juraj-google-style"}
{"code": "def directed_bipartition(seq, nontrivial=False):\n    bipartitions = [(tuple((seq[i] for i in part0_idx)), tuple((seq[j] for j in part1_idx))) for (part0_idx, part1_idx) in directed_bipartition_indices(len(seq))]\n    if nontrivial:\n        return bipartitions[1:(- 1)]\n    return bipartitions", "docstring": "Return a list of directed bipartitions for a sequence.\n\nArgs:\nseq (Iterable): The sequence to partition.\n\nReturns:\nlist[tuple[tuple]]: A list of tuples containing each of the two\nparts.\n\nExample:\n>>> directed_bipartition((1, 2, 3))  # doctest: +NORMALIZE_WHITESPACE\n[((), (1, 2, 3)),\n((1,), (2, 3)),\n((2,), (1, 3)),\n((1, 2), (3,)),\n((3,), (1, 2)),\n((1, 3), (2,)),\n((2, 3), (1,)),\n((1, 2, 3), ())]", "source": "codesearchnet"}
{"code": "def summarize(self, test_arr, vectorizable_token, sentence_list, limit=5):\n        \n        if isinstance(vectorizable_token, VectorizableToken) is False:\n            raise TypeError()\n\n        _ = self.inference(test_arr)\n        _, loss_arr, _ = self.compute_retrospective_loss()\n\n        loss_list = loss_arr.tolist()\n\n        abstract_list = []\n        for i in range(limit):\n            key = loss_arr.argmin()\n            _ = loss_list.pop(key)\n            loss_arr = np.array(loss_list)\n\n            seq_arr = test_arr[key]\n            token_arr = vectorizable_token.tokenize(seq_arr.tolist())\n            s = \" \".join(token_arr.tolist())\n            _s = \"\".join(token_arr.tolist())\n\n            for sentence in sentence_list:\n                if s in sentence or _s in sentence:\n                    abstract_list.append(sentence)\n                    abstract_list = list(set(abstract_list))\n\n            if len(abstract_list) >= limit:\n                break\n\n        return abstract_list", "docstring": "Summarize input document.\n\nArgs:\ntest_arr:               `np.ndarray` of observed data points..\nvectorizable_token:     is-a `VectorizableToken`.\nsentence_list:          `list` of all sentences.\nlimit:                  The number of selected abstract sentence.\n\nReturns:\n`list` of `str` of abstract sentences.", "source": "juraj-google-style"}
{"code": "def get_reduced(self, column_reductions):\n        \n        for cr in column_reductions:\n            if cr not in self.column_reductions:\n                raise ValueError(\"Column reduction %r is not known to this Aggregator!\" % cr)\n        return self.reduced_df[column_reductions]", "docstring": "This function gets called by ColumnFunction._apply(). After a ColumnFunction\nhas been passed to Aggregator's constructor, the ColumnFunction can use this function\nto request the populated, aggregated columns that correspond to its ColumnReductions.\n\nArgs:\ncolumn_reduction (list[ColumnReduction])\n\nReturns:\npd.DataFrame: A dataframe, where the column names are ColumnReductions.", "source": "juraj-google-style"}
{"code": "def _SetYaraRules(self, yara_rules_string):\n    \n    if not yara_rules_string:\n      return\n\n    analyzer_object = analyzers_manager.AnalyzersManager.GetAnalyzerInstance(\n        'yara')\n    analyzer_object.SetRules(yara_rules_string)\n    self._analyzers.append(analyzer_object)", "docstring": "Sets the Yara rules.\n\nArgs:\nyara_rules_string (str): unparsed Yara rule definitions.", "source": "juraj-google-style"}
{"code": "def readyup_entity(self, label: str, type: str, uid: Union[(int, str)]=None, comment: str=None, definition: str=None, superclass: str=None, synonyms: list=None, existing_ids: List[dict]=None) -> dict:\n    entity = dict(label=label, type=type)\n    if uid:\n        entity['uid'] = uid\n    if definition:\n        entity['definition'] = definition\n    if comment:\n        entity['comment'] = comment\n    if superclass:\n        entity['superclass'] = {'ilx_id': self.fix_ilx(superclass)}\n    if synonyms:\n        entity['synonyms'] = [{'literal': syn} for syn in synonyms]\n    if existing_ids:\n        if (existing_ids[0].get('curie') and existing_ids[0].get('iri')):\n            pass\n        else:\n            exit('Need curie and iri for existing_ids in List[dict] form')\n        entity['existing_ids'] = existing_ids\n    return entity", "docstring": "Setups the entity to be InterLex ready\n\nArgs:\nlabel: name of entity\ntype: entities type\nCan be any of the following: term, cde, fde, pde, annotation, relationship\nuid: usually fine and auto completes to api user ID, but if you provide one with a\nclearance higher than 0 you can make your own custom. Good for mass imports by one\nperson to avoid label collides.\ndefinition: entities definition\ncomment: a foot note regarding either the interpretation of the data or the data itself\nsuperclass: entity is a sub-part of this entity\nExample: Organ is a superclass to Brain\nsynonyms: entity synonyms\nexisting_ids: existing curie/iris that link data | couldnt format this easier\nReturns:\ndict", "source": "codesearchnet"}
{"code": "def clean(self, value, *_):\n    if ((not value) or (not isinstance(value, LocalizedValue))):\n        return None\n    is_all_null = True\n    for (lang_code, _) in settings.LANGUAGES:\n        if (value.get(lang_code) is not None):\n            is_all_null = False\n            break\n    if (is_all_null and self.null):\n        return None\n    return value", "docstring": "Cleans the specified value into something we\ncan store in the database.\n\nFor example, when all the language fields are\nleft empty, and the field is allowed to be null,\nwe will store None instead of empty keys.\n\nArguments:\nvalue:\nThe value to clean.\n\nReturns:\nThe cleaned value, ready for database storage.", "source": "codesearchnet"}
{"code": "def autodiff_ast(func, wrt, motion, mode, preserve_result, check_dims, verbose):\n    node = annotate.resolve_calls(func)\n    node = desugar.explicit_loop_indexes(node)\n    fence.validate(node, inspect.getsource(func))\n    node = anf_.anf(node)\n    if (verbose >= 2):\n        print('ANF')\n        print(quoting.to_source(node))\n    if (mode == 'reverse'):\n        (node, required, stack) = reverse_ad.reverse_ad(node.body[0], wrt, preserve_result, check_dims)\n        if (verbose >= 2):\n            print('RAW')\n            print(quoting.to_source(node))\n        if (motion == 'split'):\n            node = reverse_ad.split(node, stack)\n        else:\n            node = reverse_ad.joint(node)\n        if (verbose >= 2):\n            print('MOTION')\n            print(quoting.to_source(node))\n    elif (mode == 'forward'):\n        (node, required) = forward_ad.forward_ad(node.body[0], wrt, preserve_result, check_dims)\n    return (node, required)", "docstring": "Perform AD on a single function and return the AST.\n\nArgs:\nSee `grad`.\n\nReturns:\nnode: The AST of a module containing the adjoint and primal function\ndefinitions.\nrequired: A list of non-built in functions that this function called, and\nof which the primals and adjoints need to be made available in order\nfor the returned function to run.", "source": "codesearchnet"}
{"code": "def identify_triggers(\n    cfg,\n    sources,\n    sinks,\n    lattice,\n    nosec_lines\n):\n    \n    assignment_nodes = filter_cfg_nodes(cfg, AssignmentNode)\n    tainted_nodes = filter_cfg_nodes(cfg, TaintedNode)\n    tainted_trigger_nodes = [\n        TriggerNode(\n            Source('Framework function URL parameter'),\n            cfg_node=node\n        ) for node in tainted_nodes\n    ]\n    sources_in_file = find_triggers(assignment_nodes, sources, nosec_lines)\n    sources_in_file.extend(tainted_trigger_nodes)\n\n    find_secondary_sources(assignment_nodes, sources_in_file, lattice)\n\n    sinks_in_file = find_triggers(cfg.nodes, sinks, nosec_lines)\n\n    sanitiser_node_dict = build_sanitiser_node_dict(cfg, sinks_in_file)\n\n    return Triggers(sources_in_file, sinks_in_file, sanitiser_node_dict)", "docstring": "Identify sources, sinks and sanitisers in a CFG.\n\nArgs:\ncfg(CFG): CFG to find sources, sinks and sanitisers in.\nsources(tuple): list of sources, a source is a (source, sanitiser) tuple.\nsinks(tuple): list of sources, a sink is a (sink, sanitiser) tuple.\nnosec_lines(set): lines with # nosec whitelisting\n\nReturns:\nTriggers tuple with sink and source nodes and a sanitiser node dict.", "source": "juraj-google-style"}
{"code": "def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        tstream = BytearrayStream()\n\n        \n        if self.unique_identifier is not None:\n            self.unique_identifier.write(tstream, kmip_version=kmip_version)\n\n        \n        self.length = tstream.length()\n        super(ActivateRequestPayload, self).write(\n            ostream,\n            kmip_version=kmip_version\n        )\n        ostream.write(tstream.buffer)", "docstring": "Write the data encoding the ActivateRequestPayload object to a stream.\nArgs:\nostream (Stream): A data stream in which to encode object data,\nsupporting a write method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def coerce(self, value):\n        \n        if isinstance(value, compat.basestring):\n\n            return value\n\n        return str(value)", "docstring": "Convert any value into a string value.\n\nArgs:\nvalue (any): The value to coerce.\n\nReturns:\nstr: The string representation of the value.", "source": "juraj-google-style"}
{"code": "def match_Signature_against_Signature(self, sig1, sig2, subst, skip_self=False):\n    subst.update({p.type_param: None for p in sig1.template + sig2.template})\n    params1 = sig1.params\n    params2 = sig2.params\n    if skip_self:\n        assert params1 and params1[0].name == 'self'\n        params1 = params1[1:]\n        if params2 and params2[0].name == 'self':\n            params2 = params2[1:]\n    equalities = []\n    if len(params1) > len(params2) and (not sig2.has_optional):\n        return booleq.FALSE\n    if sig1.starargs is not None and sig2.starargs is not None:\n        equalities.append(self.match_type_against_type(sig1.starargs.type, sig2.starargs.type, subst))\n    if sig1.starstarargs is not None and sig2.starstarargs is not None:\n        equalities.append(self.match_type_against_type(sig1.starstarargs.type, sig2.starstarargs.type, subst))\n    for p1, p2 in zip(params1, params2):\n        if p1.optional and (not p2.optional):\n            return booleq.FALSE\n    for i, p2 in enumerate(params2):\n        if i >= len(params1):\n            if not p2.optional:\n                return booleq.FALSE\n            else:\n                pass\n        else:\n            p1 = params1[i]\n            if p1.name != p2.name and (not (pytd_utils.ANON_PARAM.match(p1.name) or pytd_utils.ANON_PARAM.match(p2.name))):\n                return booleq.FALSE\n            equalities.append(self.match_type_against_type(p1.type, p2.type, subst))\n    equalities.append(self.match_type_against_type(sig1.return_type, sig2.return_type, subst))\n    return booleq.And(equalities)", "docstring": "Match a pytd.Signature against another pytd.Signature.\n\nArgs:\nsig1: The caller\nsig2: The callee\nsubst: Current type parameters.\nskip_self: If True, doesn't compare the first parameter, which is\nconsidered (and verified) to be \"self\".\n\nReturns:\nAn instance of booleq.BooleanTerm, i.e. a boolean formula.", "source": "github-repos"}
{"code": "def interceptable(func):\n  \n  @functools.wraps(func)\n  def func_wrapped(*args, **kwargs):\n    with get_next_interceptor() as interceptor:\n      return interceptor(func, *args, **kwargs)\n\n  return func_wrapped", "docstring": "Decorator that wraps `func` so that its execution is intercepted.\n\nThe wrapper passes `func` to the interceptor for the current thread.\n\nIf there is no next interceptor, we perform an \"immediate\" call to `func`.\nThat is, `func` terminates without forwarding its execution to another\ninterceptor.\n\nArgs:\nfunc: Function to wrap.\n\nReturns:\nThe decorated function.", "source": "juraj-google-style"}
{"code": "def element_or_none(self, using, value):\n        \n        try:\n            return self._execute(Command.FIND_ELEMENT, {\n                'using': using,\n                'value': value\n            })\n        except:\n            return None", "docstring": "Check if an element in the current context.\n\nSupport:\nAndroid iOS Web(WebView)\n\nArgs:\nusing(str): The element location strategy.\nvalue(str): The value of the location strategy.\n\nReturns:\nReturn Element if the element does exists and return None otherwise.\n\nRaises:\nWebDriverException.", "source": "juraj-google-style"}
{"code": "def error_message(channel, err_title, err_message):\n    \n\n    \n    gui = ui_embed.UI(\n            channel,\n            err_title,\n            err_message,\n            modulename=modulename,\n            colour=modulecolor_error\n    )\n\n    return gui", "docstring": "Creates an embed UI for the topic update\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\nerr_title: The title for the error\nerr_message: The message for the error\n\nReturns:\nembed: The created embed", "source": "juraj-google-style"}
{"code": "def rand_ascii_str(length):\n    letters = [random.choice(ascii_letters_and_digits) for _ in range(length)]\n    return ''.join(letters)", "docstring": "Generates a random string of specified length, composed of ascii letters\nand digits.\n\nArgs:\nlength: The number of characters in the string.\n\nReturns:\nThe random string generated.", "source": "github-repos"}
{"code": "def avl_split_first(root):\n    if (root is None):\n        raise IndexError('Empty tree has no maximum element')\n    (root, left, right) = avl_release_kids(root)\n    if (left is None):\n        (new_root, first_node) = (right, root)\n    else:\n        (new_left, first_node) = avl_split_first(left)\n        new_root = avl_join(new_left, right, root)\n    return (new_root, first_node)", "docstring": "Removes the minimum element from the tree\n\nReturns:\ntuple: new_root, first_node\n\nO(log(n)) = O(height(root))", "source": "codesearchnet"}
{"code": "def multiply(self, other):\n        \n        if not isinstance(other, Number):\n            raise QiskitError(\"other is not a number\")\n        return SuperOp(other * self._data, self.input_dims(),\n                       self.output_dims())", "docstring": "Return the QuantumChannel self + other.\n\nArgs:\nother (complex): a complex number.\n\nReturns:\nSuperOp: the scalar multiplication other * self as a SuperOp object.\n\nRaises:\nQiskitError: if other is not a valid scalar.", "source": "juraj-google-style"}
{"code": "def __call__(self, data):\n        \n        if isinstance(data, dict):\n            \n            return json.dumps({k: _ndarray_to_list(v) for k, v in six.iteritems(data)})\n\n        \n        if hasattr(data, 'read'):\n            return _json_serialize_from_buffer(data)\n\n        return json.dumps(_ndarray_to_list(data))", "docstring": "Take data of various formats and serialize them into the expected request body.\nThis uses information about supported input formats for the deployed model.\n\nArgs:\ndata (object): Data to be serialized.\n\nReturns:\nobject: Serialized data used for the request.", "source": "juraj-google-style"}
{"code": "def _to_bfloat16_unbiased(x, noise):\n  \n  x_sign = tf.sign(x)\n  \n  x = x * x_sign + 1e-30\n  cand1 = tf.to_bfloat16(x)\n  cand1_f = tf.to_float(cand1)\n  \n  \n  \n  cand2 = tf.to_bfloat16(\n      tf.where(tf.greater(x, cand1_f), cand1_f * 1.005, cand1_f * 0.995))\n  ret = _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2)\n  return ret * tf.to_bfloat16(x_sign)", "docstring": "Convert a float32 to a bfloat16 using randomized roundoff.\n\nArgs:\nx: A float32 Tensor.\nnoise: a float32 Tensor with values in [0, 1), broadcastable to tf.shape(x)\nReturns:\nA float32 Tensor.", "source": "juraj-google-style"}
{"code": "def source_required(src_file):\n    if (not src_file.exists()):\n        return True\n    required = True\n    hash_file = src_file.with_suffix('.hash', depth=0)\n    LOG.debug('Hash file location: %s', hash_file)\n    if hash_file.exists():\n        new_hash = get_hash_of_dirs(src_file)\n        with open(hash_file, 'r') as h_file:\n            old_hash = h_file.readline()\n        required = (not (new_hash == old_hash))\n        if required:\n            from benchbuild.utils.cmd import rm\n            rm('-r', src_file)\n            rm(hash_file)\n    if required:\n        LOG.info('Source required for: %s', src_file)\n        LOG.debug('Reason: src-exists: %s hash-exists: %s', src_file.exists(), hash_file.exists())\n    return required", "docstring": "Check, if a download is required.\n\nArgs:\nsrc_file: The filename to check for.\nsrc_root: The path we find the file in.\n\nReturns:\nTrue, if we need to download something, False otherwise.", "source": "codesearchnet"}
{"code": "def in_to_out(self, in_path, out_path=None):\n    if is_same_file(in_path, out_path):\n        logger.debug('in path and out path are the same file. writing to temp file and then replacing in path with the temp file.')\n        out_path = None\n    logger.debug(f'opening source file: {in_path}')\n    with open(in_path) as infile:\n        obj = self.object_representer.load(infile)\n    if out_path:\n        logger.debug(f'opening destination file for writing: {out_path}')\n        ensure_dir(out_path)\n        with open(out_path, 'w') as outfile:\n            self.object_representer.dump(outfile, self.formatter(obj))\n        return\n    else:\n        logger.debug('opening temp file for writing...')\n        with NamedTemporaryFile(mode='w+t', dir=os.path.dirname(in_path), delete=False) as outfile:\n            self.object_representer.dump(outfile, self.formatter(obj))\n        logger.debug(f'moving temp file to: {in_path}')\n        move_temp_file(outfile.name, infile.name)", "docstring": "Load file into object, formats, writes object to out.\n\nIf in_path and out_path point to the same thing it will in-place edit\nand overwrite the in path. Even easier, if you do want to edit a file\nin place, don't specify out_path, or set it to None.\n\nArgs:\nin_path: str or path-like. Must refer to a single existing file.\nout_path: str or path-like. Must refer to a single destination file\nlocation. will create directory structure if it doesn't\nexist.\nIf out_path is not specified or None, will in-place edit\nand overwrite the in-files.\n\nReturns:\nNone.", "source": "codesearchnet"}
{"code": "def _broadcast_grad(op, accumulated_grad):\n    grads = [t for t in accumulated_grad.op.inputs]\n    for t in grads:\n        _check_device(t)\n    with ops.device(op.device):\n        return gen_nccl_ops.nccl_reduce(input=grads, reduction='sum')", "docstring": "The gradients for input `Operation` of `broadcast`.\n\nArgs:\nop: The `broadcast send` `Operation` that we are differentiating.\naccumulated_grad: Accumulated gradients with respect to the output of the\n`broadcast` op.\n\nReturns:\nGradients with respect to the input of `broadcast`.", "source": "github-repos"}
{"code": "def get_query_info(sql, con, partition_column):\n    engine = create_engine(con)\n    if is_table(engine, sql):\n        table_metadata = get_table_metadata(engine, sql)\n        query = build_query_from_table(sql)\n        cols = get_table_columns(table_metadata)\n    else:\n        check_query(sql)\n        query = sql.replace(';', '')\n        cols = get_query_columns(engine, query)\n    cols_names = list(cols.keys())\n    return (cols_names, query)", "docstring": "Return a columns name list and the query string\n\nArgs:\nsql: SQL query or table name\ncon: database connection or url string\npartition_column: column used to share the data between the workers\n\nReturns:\nColumns name list and query string", "source": "codesearchnet"}
{"code": "def create_latin_hypercube_samples(order, dim=1):\n    randoms = numpy.random.random((order * dim)).reshape((dim, order))\n    for dim_ in range(dim):\n        perm = numpy.random.permutation(order)\n        randoms[dim_] = ((perm + randoms[dim_]) / order)\n    return randoms", "docstring": "Latin Hypercube sampling.\n\nArgs:\norder (int):\nThe order of the latin hyper-cube. Defines the number of samples.\ndim (int):\nThe number of dimensions in the latin hyper-cube.\n\nReturns (numpy.ndarray):\nLatin hyper-cube with ``shape == (dim, order)``.", "source": "codesearchnet"}
{"code": "def GetFileEntryByPathSpec(self, path_spec):\n    \n    tsk_vs_part, partition_index = tsk_partition.GetTSKVsPartByPathSpec(\n        self._tsk_volume, path_spec)\n\n    location = getattr(path_spec, 'location', None)\n\n    \n    \n    if tsk_vs_part is None:\n      if location is None or location != self.LOCATION_ROOT:\n        return None\n\n      return tsk_partition_file_entry.TSKPartitionFileEntry(\n          self._resolver_context, self, path_spec, is_root=True,\n          is_virtual=True)\n\n    if location is None and partition_index is not None:\n      path_spec.location = '/p{0:d}'.format(partition_index)\n\n    return tsk_partition_file_entry.TSKPartitionFileEntry(\n        self._resolver_context, self, path_spec)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\nTSKPartitionFileEntry: a file entry or None of not available.", "source": "juraj-google-style"}
{"code": "def __init__(self, step_stats: step_stats_pb2.StepStats, graph: Optional[Any]=None) -> None:\n    self._origin_step_stats = step_stats\n    self._step_stats = None\n    self._graph = graph\n    self._chrome_trace = _ChromeTraceFormatter()\n    self._next_pid = 0\n    self._device_pids = {}\n    self._tensor_pids = {}\n    self._tensors = {}\n    self._next_flow_id = 0\n    self._flow_starts = {}\n    self._alloc_times = {}\n    self._allocator_maximums = {}", "docstring": "Constructs a new Timeline.\n\nA 'Timeline' is used for visualizing the execution of a TensorFlow\ncomputation.  It shows the timings and concurrency of execution at\nthe granularity of TensorFlow Ops.\nThis class is not thread safe.\n\nArgs:\nstep_stats: The 'step_stats_pb2.StepStats' proto recording execution\ntimes.\ngraph: (Optional) The 'Graph' that was executed.", "source": "github-repos"}
{"code": "def tensor_layout(self, tensor_shape, mesh_shape):\n    ret = [self.tensor_dimension_to_mesh_axis(d, mesh_shape) for d in tensor_shape]\n    not_nones = [a for a in ret if (a is not None)]\n    if (len(not_nones) != len(set(not_nones))):\n        raise ValueError(('Two Tensor Dimensions may not map to the same Mesh Dimension: layout=%s tensor_shape=%s mesh_shape=%s ' % (self, tensor_shape, mesh_shape)))\n    return TensorLayout(ret)", "docstring": "Computes TensorLayout given a Tensor Shape and a Mesh Shape.\n\nArgs:\ntensor_shape: Shape.\nmesh_shape: Shape.\n\nReturns:\nTensorLayout.\n\nRaises:\nValueError: If two Tensor Dimensions map to the same Mesh Dimensions.", "source": "codesearchnet"}
{"code": "def load_image(buf, request_components=0):\n    x = ffi.new('int*')\n    y = ffi.new('int*')\n    n = ffi.new('int*')\n    cbuf = ffi.from_buffer(buf)\n    bitmap = lib.stbi_load_from_memory(ffi.cast('unsigned char*', cbuf), len(buf), x, y, n, request_components)\n    pybuffer = ffi.buffer(bitmap, ((x[0] * y[0]) * n[0]))\n    return (pybuffer, x[0], y[0], n[0])", "docstring": "Load a png or jpeg image into a bitmap buffer.\n\nArgs:\nbuf (Buffer): Buffer to load\nrequest_components (int): If you want to force number of components\n\nReturns:\n\nA tuple containing:\n\n- Bitmap buffer\n- width of bitmap\n- height of bitmap\n- number of components", "source": "codesearchnet"}
{"code": "def clone(self, deep: bool=False, memo: Optional[Any]=None, override: Optional[Dict[str, Any]]=None) -> 'Symbolic':\n    return self.sym_clone(deep, memo, override)", "docstring": "Clones current object symbolically.\n\nArgs:\ndeep: If True, perform deep copy (equivalent to copy.deepcopy). Otherwise\nshallow copy (equivalent to copy.copy).\nmemo: Memo object for deep clone.\noverride: An optional dict of key path to new values to override cloned\nvalue.\n\nReturns:\nA copy of self.", "source": "github-repos"}
{"code": "def if_then_else(cls,\n            condition: 'TensorFluent',\n            true_case: 'TensorFluent',\n            false_case: 'TensorFluent') -> 'TensorFluent':\n        \n        true = TensorFluent.constant(True, tf.bool)\n        false = TensorFluent.constant(False, tf.bool)\n        ite = (condition == true) * true_case + (condition == false) * false_case\n        if true_case.dtype == tf.bool and false_case.dtype == tf.bool:\n            ite = ite.cast(tf.bool)\n        return ite", "docstring": "Returns a TensorFluent for the control op if-then-else.\n\nArgs:\ncondition: Boolean fluent for the if condition.\ntrue_case: Fluent returned in the true clause.\nfalse_case: Fluent returned in the false clause.\n\nReturns:\nA TensorFluent wrapping the if-then-else control statement.\n\nRaises:\nValueError: If cases don't have same shape.", "source": "juraj-google-style"}
{"code": "def attention_lm_moe_large():\n    hparams = attention_lm_moe_base()\n    hparams.num_hidden_layers = 5\n    hparams.moe_layers = '3'\n    hparams.hidden_size = 1024\n    hparams.num_heads = 16\n    hparams.filter_size = 4096\n    hparams.moe_hidden_sizes = '4096'\n    hparams.moe_num_experts = 128\n    hparams.layer_prepostprocess_dropout = 0.2\n    return hparams", "docstring": "Large model for distributed training.\n\nOver 1B parameters, so requires multi-gpu training due to memory\nrequirements.\n\non lm1b_32k:\nAfter 45K steps on 8 GPUs (synchronous):\neval_log_ppl_per_token = 3.18\neval_ppl_per_word = exp(1.107893 * eval_log_ppl_per_token) = 33.9\n\nReturns:\nan hparams object.", "source": "codesearchnet"}
{"code": "def __init__(self, text: str, schema_data: SchemaData, mid: ModuleId):\n        \n        super().__init__(text)\n        self.mid = mid\n        self.schema_data = schema_data", "docstring": "Initialize the parser instance.\n\nArgs:\ntext: Feature expression text.\nschema_data: Data for the current schema.\nmid: Identifier of the context module.\n\nRaises:\nModuleNotRegistered: If `mid` is not registered in the data model.", "source": "juraj-google-style"}
{"code": "def bulk_actions(objects, index, action):\n    assert (index != '_all'), \"index arg must be a valid index name. '_all' is a reserved term.\"\n    logger.info(\"Creating bulk '%s' actions for '%s'\", action, index)\n    for obj in objects:\n        try:\n            logger.debug(\"Appending '%s' action for '%r'\", action, obj)\n            (yield obj.as_search_action(index=index, action=action))\n        except Exception:\n            logger.exception('Unable to create search action for %s', obj)", "docstring": "Yield bulk api 'actions' from a collection of objects.\n\nThe output from this method can be fed in to the bulk\napi helpers - each document returned by get_documents\nis decorated with the appropriate bulk api op_type.\n\nArgs:\nobjects: iterable (queryset, list, ...) of SearchDocumentMixin\nobjects. If the objects passed in is a generator, then this\nfunction will yield the results rather than returning them.\nindex: string, the name of the index to target - the index name\nis embedded into the return value and is used by the bulk api.\naction: string ['index' | 'update' | 'delete'] - this decides\nhow the final document is formatted.", "source": "codesearchnet"}
{"code": "def get_node(self, role: str, default=None) -> BioCNode:\n    return next((node for node in self.nodes if (node.role == role)), default)", "docstring": "Get the first node with role\n\nArgs:\nrole: role\ndefault: node returned instead of raising StopIteration\n\nReturns:\nthe first node with role", "source": "codesearchnet"}
{"code": "def parser(self, column: str, parser: str, error: str, value: Any) -> None:\n    log = self._build_parser_message(column, parser, error, value)\n    self.queue_log_message(log)", "docstring": "Adds parser error information to base log message and\nsends it to the logger for writing.\n\nArgs:\n* column: column where the rule is applied\n* parser: parser function that failed and raises this message\n* error: error that occurred\n* value: value that fails to parse\n\nReturns:\n* None", "source": "github-repos"}
{"code": "def earliest_date(dates, full_date=False):\n    min_date = min((PartialDate.loads(date) for date in dates))\n    if ((not min_date.month) and full_date):\n        min_date.month = 1\n    if ((not min_date.day) and full_date):\n        min_date.day = 1\n    return min_date.dumps()", "docstring": "Return the earliest among the schema-compliant dates.\n\nThis is a convenience wrapper around :ref:`PartialDate`, which should be\nused instead if more features are needed.\n\nArgs:\ndates(list): List of dates from which oldest/earliest one will be returned\nfull_date(bool): Adds month and/or day as \"01\" if they are missing\nReturns:\nstr: Earliest date from provided list", "source": "codesearchnet"}
{"code": "def distances_from_root(self, leaves=True, internal=True, unlabeled=False):\n        \n        if not isinstance(leaves, bool):\n            raise TypeError(\"leaves must be a bool\")\n        if not isinstance(internal, bool):\n            raise TypeError(\"internal must be a bool\")\n        if not isinstance(unlabeled, bool):\n            raise TypeError(\"unlabeled must be a bool\")\n        if leaves or internal:\n            d = dict()\n            for node in self.traverse_preorder():\n                if node.is_root():\n                    d[node] = 0\n                else:\n                    d[node] = d[node.parent]\n                if node.edge_length is not None:\n                    d[node] += node.edge_length\n                if ((leaves and node.is_leaf()) or (internal and not node.is_leaf())) and (unlabeled or node.label is not None):\n                    yield (node,d[node])", "docstring": "Generator over the root-to-node distances of this ``Tree``; (node,distance) tuples\n\nArgs:\n``terminal`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``\n\n``unlabeled`` (``bool``): ``True`` to include unlabeled nodes, otherwise ``False``", "source": "juraj-google-style"}
{"code": "def merge_nodes(self, n1: str, n2: str, same_polarity: bool=True):\n    for p in self.predecessors(n1):\n        for st in self[p][n1]['InfluenceStatements']:\n            if (not same_polarity):\n                st.obj_delta['polarity'] = (- st.obj_delta['polarity'])\n            st.obj.db_refs['UN'][0] = (n2, st.obj.db_refs['UN'][0][1])\n        if (not self.has_edge(p, n2)):\n            self.add_edge(p, n2)\n            self[p][n2]['InfluenceStatements'] = self[p][n1]['InfluenceStatements']\n        else:\n            self[p][n2]['InfluenceStatements'] += self[p][n1]['InfluenceStatements']\n    for s in self.successors(n1):\n        for st in self.edges[(n1, s)]['InfluenceStatements']:\n            if (not same_polarity):\n                st.subj_delta['polarity'] = (- st.subj_delta['polarity'])\n            st.subj.db_refs['UN'][0] = (n2, st.subj.db_refs['UN'][0][1])\n        if (not self.has_edge(n2, s)):\n            self.add_edge(n2, s)\n            self[n2][s]['InfluenceStatements'] = self[n1][s]['InfluenceStatements']\n        else:\n            self[n2][s]['InfluenceStatements'] += self[n1][s]['InfluenceStatements']\n    self.remove_node(n1)", "docstring": "Merge node n1 into node n2, with the option to specify relative\npolarity.\n\nArgs:\nn1\nn2\nsame_polarity", "source": "codesearchnet"}
{"code": "async def get_matches(self, state: MatchState=MatchState.all_):\n    matches = (await self.connection('GET', 'tournaments/{}/matches'.format(self._tournament_id), state=state.value, participant_id=self._id))\n    ms = []\n    for m in matches:\n        ms.append((await self._tournament.get_match(m['match']['id'])))\n    return ms", "docstring": "Return the matches of the given state\n\n|methcoro|\n\nArgs:\nstate: see :class:`MatchState`\n\nRaises:\nAPIException", "source": "codesearchnet"}
{"code": "def _find_sequence(self) -> List[GridQubit]:\n    tail = self._sequence_search(self._start, [])\n    tail.pop(0)\n    head = self._sequence_search(self._start, tail)\n    head.reverse()\n    return self._expand_sequence((head + tail))", "docstring": "Looks for a sequence starting at a given qubit.\n\nSearch is issued twice from the starting qubit, so that longest possible\nsequence is found. Starting qubit might not be the first qubit on the\nreturned sequence.\n\nReturns:\nThe longest sequence found by this method.", "source": "codesearchnet"}
{"code": "def conv_input_length(output_length, filter_size, padding, stride):\n    if output_length is None:\n        return None\n    assert padding in {'same', 'valid', 'full'}\n    if padding == 'same':\n        pad = filter_size \n    elif padding == 'valid':\n        pad = 0\n    elif padding == 'full':\n        pad = filter_size - 1\n    return (output_length - 1) * stride - 2 * pad + filter_size", "docstring": "Determines input length of a convolution given output length.\n\nArgs:\noutput_length: integer.\nfilter_size: integer.\npadding: one of \"same\", \"valid\", \"full\".\nstride: integer.\n\nReturns:\nThe input length (integer).", "source": "github-repos"}
{"code": "def MakeSuiteFromDict(d, name=''):\n    suite = Suite(name=name)\n    suite.SetDict(d)\n    suite.Normalize()\n    return suite", "docstring": "Makes a suite from a map from values to probabilities.\n\nArgs:\nd: dictionary that maps values to probabilities\nname: string name for this suite\n\nReturns:\nSuite object", "source": "codesearchnet"}
{"code": "def get_parser(segmenter, **options):\n    if (segmenter == 'nlapi'):\n        return NLAPIParser(**options)\n    elif (segmenter == 'mecab'):\n        return MecabParser()\n    elif (segmenter == 'tinysegmenter'):\n        return TinysegmenterParser()\n    else:\n        raise ValueError('Segmenter {} is not supported.'.format(segmenter))", "docstring": "Gets a parser.\n\nArgs:\nsegmenter (str): Segmenter to use.\noptions (:obj:`dict`, optional): Optional settings.\n\nReturns:\nParser (:obj:`budou.parser.Parser`)\n\nRaises:\nValueError: If unsupported segmenter is specified.", "source": "codesearchnet"}
{"code": "def ReleaseFileObject(self, file_object):\n    \n    identifier, cache_value = self._file_object_cache.GetCacheValueByObject(\n        file_object)\n\n    if not identifier:\n      raise RuntimeError('Object not cached.')\n\n    if not cache_value:\n      raise RuntimeError('Invalid cache value.')\n\n    self._file_object_cache.ReleaseObject(identifier)\n\n    result = cache_value.IsDereferenced()\n    if result:\n      self._file_object_cache.RemoveObject(identifier)\n\n    return result", "docstring": "Releases a cached file-like object.\n\nArgs:\nfile_object (FileIO): file-like object.\n\nReturns:\nbool: True if the file-like object can be closed.\n\nRaises:\nPathSpecError: if the path specification is incorrect.\nRuntimeError: if the file-like object is not cached or an inconsistency\nis detected in the cache.", "source": "juraj-google-style"}
{"code": "def load_strain(self, strain_id, strain_genome_file):\n        \n        \n        strain_gp = GEMPRO(gem_name=strain_id, genome_path=strain_genome_file, write_protein_fasta_files=False)\n        \n\n        self.strains.append(strain_gp)\n        return self.strains.get_by_id(strain_id)", "docstring": "Load a strain as a new GEM-PRO by its ID and associated genome file. Stored in the ``strains`` attribute.\n\nArgs:\nstrain_id (str): Strain ID\nstrain_genome_file (str): Path to strain genome file", "source": "juraj-google-style"}
{"code": "def truncate_rationale(rationale, max_length=MAX_RATIONALE_SIZE_IN_EVENT):\n    if (isinstance(rationale, basestring) and (max_length is not None) and (len(rationale) > max_length)):\n        return (rationale[0:max_length], True)\n    else:\n        return (rationale, False)", "docstring": "Truncates the rationale for analytics event emission if necessary\n\nArgs:\nrationale (string): the string value of the rationale\nmax_length (int): the max length for truncation\n\nReturns:\ntruncated_value (string): the possibly truncated version of the rationale\nwas_truncated (bool): returns true if the rationale is truncated", "source": "codesearchnet"}
{"code": "def _sync_content_metadata(self, serialized_data):\n        \n        url = self.enterprise_configuration.sapsf_base_url + self.global_sap_config.course_api_path\n        try:\n            status_code, response_body = self._call_post_with_session(url, serialized_data)\n        except requests.exceptions.RequestException as exc:\n            raise ClientError(\n                'SAPSuccessFactorsAPIClient request failed: {error} {message}'.format(\n                    error=exc.__class__.__name__,\n                    message=str(exc)\n                )\n            )\n\n        if status_code >= 400:\n            raise ClientError(\n                'SAPSuccessFactorsAPIClient request failed with status {status_code}: {message}'.format(\n                    status_code=status_code,\n                    message=response_body\n                )\n            )", "docstring": "Create/update/delete content metadata records using the SuccessFactors OCN Course Import API endpoint.\n\nArguments:\nserialized_data: Serialized JSON string representing a list of content metadata items.\n\nRaises:\nClientError: If SuccessFactors API call fails.", "source": "juraj-google-style"}
{"code": "def lower(self, lowering):\n    old_shape = self.inputs[0].shape\n    new_shape = self.outputs[0].shape\n    mesh_impl = lowering.mesh_impl(self)\n    slices = lowering.tensors[self.inputs[0]]\n    mesh_axis_to_cumprod_old = mesh_impl.mesh_axis_to_cumprod(old_shape)\n    mesh_axis_to_cumprod_new = mesh_impl.mesh_axis_to_cumprod(new_shape)\n    mesh_axes_allsplit = []\n    mesh_axes_allconcat = []\n    mesh_axes_alltoall = []\n    for (mesh_axis, (old_cumprod, new_cumprod)) in enumerate(zip(mesh_axis_to_cumprod_old, mesh_axis_to_cumprod_new)):\n        if (new_cumprod != old_cumprod):\n            if (old_cumprod is None):\n                mesh_axes_allsplit.append(mesh_axis)\n            elif (new_cumprod is None):\n                mesh_axes_allconcat.append(mesh_axis)\n            else:\n                mesh_axes_alltoall.append(mesh_axis)\n    laid_out_size = mesh_impl.laid_out_size(old_shape)\n    for mesh_axis in mesh_axes_allsplit:\n        tensor_axis = old_shape.cumprod_to_tensor_axis(mesh_axis_to_cumprod_new[mesh_axis])\n        if (tensor_axis is None):\n            raise NotImplementedError(('Try first reshaping to insert a new tf dimension, then changing layout. input_shape=%s output_shape=%s' % (self.inputs[0].shape, self.outputs[0].shape)))\n        slices = mesh_impl.allsplit(slices, mesh_axis, tensor_axis)\n        laid_out_size \n    for mesh_axis in mesh_axes_alltoall:\n        split_tensor_axis = old_shape.cumprod_to_tensor_axis(mesh_axis_to_cumprod_new[mesh_axis])\n        if (split_tensor_axis is None):\n            raise NotImplementedError(('Try first reshaping to insert a new tf dimension, then changing layout. input_shape=%s output_shape=%s' % (self.inputs[0].shape, self.outputs[0].shape)))\n        concat_tensor_axis = old_shape.cumprod_to_tensor_axis(mesh_axis_to_cumprod_old[mesh_axis])\n        assert (concat_tensor_axis is not None)\n        slices = mesh_impl.alltoall(slices, mesh_axis, split_tensor_axis, concat_tensor_axis)\n        lowering.add_counter(('alltoall/%s/reshape_op' % mesh_axis), laid_out_size)\n    for mesh_axis in mesh_axes_allconcat:\n        tensor_axis = old_shape.cumprod_to_tensor_axis(mesh_axis_to_cumprod_old[mesh_axis])\n        assert (tensor_axis is not None)\n        slices = mesh_impl.allconcat(slices, mesh_axis, tensor_axis)\n        laid_out_size *= mesh_impl.shape[mesh_axis].size\n        lowering.add_counter(('allconcat/%s/reshape_op' % mesh_axis), laid_out_size)\n    old_slice_shape = mesh_impl.slice_shape(old_shape)\n    new_slice_shape = mesh_impl.slice_shape(new_shape)\n    if (new_slice_shape != old_slice_shape):\n\n        def reshape_fn(x):\n            return tf.reshape(x, new_slice_shape)\n        slices = mesh_impl.slicewise(reshape_fn, slices)\n    lowering.set_tensor_lowering(self.outputs[0], slices)", "docstring": "Lower the ReshapeOperation.\n\nReshaping can require collective communication between processors.\nWe haven't yet implemented all possible reshapes.  We try to handle the\ncommon cases here - otherwise we raise a NotImplementedError.\n\nArgs:\nlowering: a Lowering\nRaises:\nNotImplementedError: if we haven't covered this case", "source": "codesearchnet"}
{"code": "def handle_message(self, ch, method, properties, body):\n        \n        input = {}\n        headers = {}\n        try:\n            self.sessid = method.routing_key\n\n            input = json_decode(body)\n            data = input['data']\n\n            \n            \n            if 'path' in data:\n                if data['path'] in VIEW_METHODS:\n                    data['view'] = data['path']\n                else:\n                    data['wf'] = data['path']\n            session = Session(self.sessid)\n\n            headers = {'remote_ip': input['_zops_remote_ip'],\n                       'source': input['_zops_source']}\n\n            if 'wf' in data:\n                output = self._handle_workflow(session, data, headers)\n            elif 'job' in data:\n\n                self._handle_job(session, data, headers)\n                return\n            else:\n                output = self._handle_view(session, data, headers)\n\n        except HTTPError as e:\n            import sys\n            if hasattr(sys, '_called_from_test'):\n                raise\n            output = {\"cmd\": \"error\", \"error\": self._prepare_error_msg(e.message), \"code\": e.code}\n            log.exception(\"Http error occurred\")\n        except:\n            self.current = Current(session=session, input=data)\n            self.current.headers = headers\n            import sys\n            if hasattr(sys, '_called_from_test'):\n                raise\n            err = traceback.format_exc()\n            output = {\"cmd\": \"error\", \"error\": self._prepare_error_msg(err), \"code\": 500}\n            log.exception(\"Worker error occurred with messsage body:\\n%s\" % body)\n        if 'callbackID' in input:\n            output['callbackID'] = input['callbackID']\n        log.info(\"OUTPUT for %s: %s\" % (self.sessid, output))\n        output['reply_timestamp'] = time()\n        self.send_output(output)", "docstring": "this is a pika.basic_consumer callback\nhandles client inputs, runs appropriate workflows and views\n\nArgs:\nch: amqp channel\nmethod: amqp method\nproperties:\nbody: message body", "source": "juraj-google-style"}
{"code": "def icao(msg):\n    \n\n    DF = df(msg)\n\n    if DF in (11, 17, 18):\n        addr = msg[2:8]\n    elif DF in (0, 4, 5, 16, 20, 21):\n        c0 = bin2int(crc(msg, encode=True))\n        c1 = hex2int(msg[-6:])\n        addr = '%06X' % (c0 ^ c1)\n    else:\n        addr = None\n\n    return addr", "docstring": "Calculate the ICAO address from an Mode-S message\nwith DF4, DF5, DF20, DF21\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nString: ICAO address in 6 bytes hexadecimal string", "source": "juraj-google-style"}
{"code": "def draw_curve(self, grid_characters: BoxDrawCharacterSet, *, top: bool=False, left: bool=False, right: bool=False, bottom: bool=False, crossing_char: Optional[str]=None):\n    if (not any([top, left, right, bottom])):\n        return\n    sign_top = ((+ 1) if top else ((- 1) if self.top else 0))\n    sign_bottom = ((+ 1) if bottom else ((- 1) if self.bottom else 0))\n    sign_left = ((+ 1) if left else ((- 1) if self.left else 0))\n    sign_right = ((+ 1) if right else ((- 1) if self.right else 0))\n    if top:\n        self.top = grid_characters.top_bottom\n    if bottom:\n        self.bottom = grid_characters.top_bottom\n    if left:\n        self.left = grid_characters.left_right\n    if right:\n        self.right = grid_characters.left_right\n    if (not all([crossing_char, self.top, self.bottom, self.left, self.right])):\n        crossing_char = box_draw_character(self._prev_curve_grid_chars, grid_characters, top=sign_top, bottom=sign_bottom, left=sign_left, right=sign_right)\n    self.center = (crossing_char or '')\n    self._prev_curve_grid_chars = grid_characters", "docstring": "Draws lines in the box using the given character set.\n\nSupports merging the new lines with the lines from a previous call to\ndraw_curve, including when they have different character sets (assuming\nthere exist characters merging the two).\n\nArgs:\ngrid_characters: The character set to draw the curve with.\ntop: Draw topward leg?\nleft: Draw leftward leg?\nright: Draw rightward leg?\nbottom: Draw downward leg?\ncrossing_char: Overrides the all-legs-present character. Useful for\nascii diagrams, where the + doesn't always look the clearest.", "source": "codesearchnet"}
{"code": "def retry(exceptions, tries=5, delay=1, backoff=2, logger=None):\n    \n\n    def deco_retry(func):\n        @wraps(func)\n        async def f_retry(self, *args, **kwargs):\n            if not iscoroutine(func):\n                f = coroutine(func)\n            else:\n                f = func\n\n            mtries, mdelay = tries, delay\n            while mtries > 1:\n                try:\n                    return await f(self, *args, **kwargs)\n                except exceptions:\n                    if logger:\n                        logger.info('Retrying %s after %s seconds', f.__name__, mdelay)\n                    sleep(mdelay)\n                    mtries -= 1\n                    mdelay *= backoff\n            return await f(self, *args, **kwargs)\n\n        return f_retry\n\n    return deco_retry", "docstring": "Retry calling the decorated function using an exponential backoff.\n\nArgs:\nexceptions: The exception to check. may be a tuple of\nexceptions to check.\ntries: Number of times to try (not retry) before giving up.\ndelay: Initial delay between retries in seconds.\nbackoff: Backoff multiplier (e.g. value of 2 will double the delay\neach retry).\nlogger: Logger to use. If None, print.", "source": "juraj-google-style"}
{"code": "def shakespeare(chunk_size):\n    file_name = maybe_download('http:\n    with open(file_name) as f:\n        shakespeare_full = f.read()\n    length = ((len(shakespeare_full) \n    if (length < len(shakespeare_full)):\n        shakespeare_full = shakespeare_full[:length]\n    arr = np.array([convert_to_int(c) for c in shakespeare_full])[0:((len(shakespeare_full) / chunk_size) * chunk_size)]\n    return arr.reshape(((len(arr) / chunk_size), chunk_size))", "docstring": "Downloads Shakespeare, converts it into ASCII codes and chunks it.\n\nArgs:\nchunk_size: The dataset is broken down so that it is shaped into batches x\nchunk_size.\nReturns:\nA numpy array of ASCII codes shaped into batches x chunk_size.", "source": "codesearchnet"}
{"code": "def MatchBestComponentName(self, component):\n    fd = self.OpenAsContainer()\n    file_listing = set(fd.ListNames())\n    if (component not in file_listing):\n        lower_component = component.lower()\n        for x in file_listing:\n            if (lower_component == x.lower()):\n                component = x\n                break\n    if (fd.supported_pathtype != self.pathspec.pathtype):\n        new_pathspec = rdf_paths.PathSpec(path=component, pathtype=fd.supported_pathtype)\n    else:\n        new_pathspec = self.pathspec.last.Copy()\n        new_pathspec.path = component\n    return new_pathspec", "docstring": "Returns the name of the component which matches best our base listing.\n\nIn order to do the best case insensitive matching we list the files in the\nbase handler and return the base match for this component.\n\nArgs:\ncomponent: A component name which should be present in this directory.\n\nReturns:\nthe best component name.", "source": "codesearchnet"}
{"code": "def from_string(cls, key_pem, is_x509_cert):\n    key_pem = _helpers._to_bytes(key_pem)\n    if is_x509_cert:\n        der = rsa.pem.load_pem(key_pem, 'CERTIFICATE')\n        (asn1_cert, remaining) = decoder.decode(der, asn1Spec=Certificate())\n        if (remaining != b''):\n            raise ValueError('Unused bytes', remaining)\n        cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo']\n        key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey'])\n        pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER')\n    else:\n        pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM')\n    return cls(pubkey)", "docstring": "Construct an RsaVerifier instance from a string.\n\nArgs:\nkey_pem: string, public key in PEM format.\nis_x509_cert: bool, True if key_pem is an X509 cert, otherwise it\nis expected to be an RSA key in PEM format.\n\nReturns:\nRsaVerifier instance.\n\nRaises:\nValueError: if the key_pem can't be parsed. In either case, error\nwill begin with 'No PEM start marker'. If\n``is_x509_cert`` is True, will fail to find the\n\"-----BEGIN CERTIFICATE-----\" error, otherwise fails\nto find \"-----BEGIN RSA PUBLIC KEY-----\".", "source": "codesearchnet"}
{"code": "def resolve_type(arg):\n    \n    \n    arg_type = type(arg)\n    if arg_type == list:\n        assert isinstance(arg, list)  \n        sample = arg[:min(4, len(arg))]\n        tentative_type = TentativeType()\n        for sample_item in sample:\n            tentative_type.add(resolve_type(sample_item))\n        return ListType(tentative_type)\n    elif arg_type == set:\n        assert isinstance(arg, set)  \n        sample = []\n        iterator = iter(arg)\n        for i in range(0, min(4, len(arg))):\n            sample.append(next(iterator))\n        tentative_type = TentativeType()\n        for sample_item in sample:\n            tentative_type.add(resolve_type(sample_item))\n        return SetType(tentative_type)\n    elif arg_type == FakeIterator:\n        assert isinstance(arg, FakeIterator)  \n        sample = []\n        iterator = iter(arg)\n        for i in range(0, min(4, len(arg))):\n            sample.append(next(iterator))\n        tentative_type = TentativeType()\n        for sample_item in sample:\n            tentative_type.add(resolve_type(sample_item))\n        return IteratorType(tentative_type)\n    elif arg_type == tuple:\n        assert isinstance(arg, tuple)  \n        sample = list(arg[:min(10, len(arg))])\n        return TupleType([resolve_type(sample_item) for sample_item in sample])\n    elif arg_type == dict:\n        assert isinstance(arg, dict)  \n        key_tt = TentativeType()\n        val_tt = TentativeType()\n        for i, (k, v) in enumerate(iteritems(arg)):\n            if i > 4:\n                break\n            key_tt.add(resolve_type(k))\n            val_tt.add(resolve_type(v))\n        return DictType(key_tt, val_tt)\n    else:\n        return type(arg)", "docstring": "Resolve object to one of our internal collection types or generic built-in type.\n\nArgs:\narg: object to resolve", "source": "juraj-google-style"}
{"code": "class MeanAbsolutePercentageError(MeanMetricWrapper):\n\n    def __init__(self, name='mean_absolute_percentage_error', dtype=None):\n        super(MeanAbsolutePercentageError, self).__init__(mean_absolute_percentage_error, name, dtype=dtype)", "docstring": "Computes the mean absolute percentage error between `y_true` and `y_pred`.\n\nArgs:\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.\n\nStandalone usage:\n\n>>> m = tf.keras.metrics.MeanAbsolutePercentageError()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])\n>>> m.result().numpy()\n250000000.0\n\n>>> m.reset_state()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],\n...                sample_weight=[1, 0])\n>>> m.result().numpy()\n500000000.0\n\nUsage with `compile()` API:\n\n```python\nmodel.compile(\noptimizer='sgd',\nloss='mse',\nmetrics=[tf.keras.metrics.MeanAbsolutePercentageError()])\n```", "source": "github-repos"}
{"code": "def read_geojson(filename):\n    json_file = open(filename)\n    data = json.load(json_file)\n    json_file.close()\n    times = data['properties']['times']\n    main_data = dict(timesteps=[], masks=[], x=[], y=[], i=[], j=[])\n    attribute_data = dict()\n    for feature in data['features']:\n        for main_name in main_data.keys():\n            main_data[main_name].append(np.array(feature['properties'][main_name]))\n        for (k, v) in feature['properties']['attributes'].items():\n            if (k not in attribute_data.keys()):\n                attribute_data[k] = [np.array(v)]\n            else:\n                attribute_data[k].append(np.array(v))\n    kwargs = {}\n    for kw in ['dx', 'step', 'u', 'v']:\n        if (kw in data['properties'].keys()):\n            kwargs[kw] = data['properties'][kw]\n    sto = STObject(main_data['timesteps'], main_data['masks'], main_data['x'], main_data['y'], main_data['i'], main_data['j'], times[0], times[(- 1)], **kwargs)\n    for (k, v) in attribute_data.items():\n        sto.attributes[k] = v\n    return sto", "docstring": "Reads a geojson file containing an STObject and initializes a new STObject from the information in the file.\n\nArgs:\nfilename: Name of the geojson file\n\nReturns:\nan STObject", "source": "codesearchnet"}
{"code": "def length_from_embedding(emb):\n    return tf.cast(tf.reduce_sum(mask_from_embedding(emb), [1, 2, 3]), tf.int32)", "docstring": "Compute the length of each sequence in the batch.\n\nArgs:\nemb: a sequence embedding Tensor with shape [batch, max_time, 1, depth].\nReturns:\na Tensor with shape [batch].", "source": "codesearchnet"}
{"code": "def extract_table(tabletag):\n    theadtag = tabletag.find_next('thead')\n    headertags = theadtag.find_all('th')\n    if (len(headertags) == 0):\n        headertags = theadtag.find_all('td')\n    headers = []\n    for tag in headertags:\n        headers.append(get_text(tag))\n    tbodytag = tabletag.find_next('tbody')\n    trtags = tbodytag.find_all('tr')\n    table = list()\n    for trtag in trtags:\n        row = dict()\n        tdtags = trtag.find_all('td')\n        for (i, tag) in enumerate(tdtags):\n            row[headers[i]] = get_text(tag)\n        table.append(row)\n    return table", "docstring": "Extract HTML table as list of dictionaries\n\nArgs:\ntabletag (Tag): BeautifulSoup tag\n\nReturns:\nstr: Text of tag stripped of leading and trailing whitespace and newlines and with &nbsp replaced with space", "source": "codesearchnet"}
{"code": "class Content:\n    text: Optional[str] = None", "docstring": "Container for embeddable content. Add new types as when as necessary.\n\nArgs:\ntext: Text content to be embedded", "source": "github-repos"}
{"code": "def get(self, name):\n        \n        if name.startswith('\n            return self.tags.get(name[1:])\n        return self.props.get(name)", "docstring": "Return a secondary property value from the Node.\n\nArgs:\nname (str): The name of a secondary property.\n\nReturns:\n(obj): The secondary property value or None.", "source": "juraj-google-style"}
{"code": "def matches(self, stream):\n    if (self.match_type != stream.stream_type):\n        return False\n    if (self.match_id is not None):\n        return (self.match_id == stream.stream_id)\n    if (self.match_spec == DataStreamSelector.MatchUserOnly):\n        return (not stream.system)\n    elif (self.match_spec == DataStreamSelector.MatchSystemOnly):\n        return stream.system\n    elif (self.match_spec == DataStreamSelector.MatchUserAndBreaks):\n        return ((not stream.system) or (stream.system and (stream.stream_id in DataStream.KnownBreakStreams)))\n    return True", "docstring": "Check if this selector matches the given stream\n\nArgs:\nstream (DataStream): The stream to check\n\nReturns:\nbool: True if this selector matches the stream", "source": "codesearchnet"}
{"code": "def _check_conditional_statement(statement, num_collections):\n    correct_var = list(ascii_lowercase)[:num_collections]\n    st_statement = BaseCollection._remove_operators(statement)\n    parsed_st = [s for s in st_statement if s.isalpha()]\n    for var in parsed_st:\n        if (var not in correct_var):\n            raise ValueError('Invalid conditional statement: {}\\n Statement should be a valid Python statement and the variables should be named as follows: {}'.format(statement, ', '.join(correct_var)))\n    return correct_var", "docstring": "Method to check conditional statements to be sure that they are valid.\n\nArgs:\nstatement: A conditional statement as a string (e.g. a>25 and a%5==0).\nThe variable should always be named as 'a' (without quotations).\nnum_collections: An integer representing the number of data collections\nthat the statement will be evaluating.\n\nReturn:\ncorrect_var: A list of the correct variable names that should be\nused within the statement (eg. ['a', 'b', 'c'])", "source": "codesearchnet"}
{"code": "def parse_row(schema, data):\n\n    def parse_value(data_type, value):\n        'Parses a value returned from a BigQuery response.\\n\\n      Args:\\n        data_type: the type of the value as specified by the schema.\\n        value: the raw value to return (before casting to data_type).\\n\\n      Returns:\\n        The value cast to the data_type.\\n      '\n        if (value is not None):\n            if (value == 'null'):\n                value = None\n            elif (data_type == 'INTEGER'):\n                value = int(value)\n            elif (data_type == 'FLOAT'):\n                value = float(value)\n            elif (data_type == 'TIMESTAMP'):\n                value = datetime.datetime.utcfromtimestamp(float(value))\n            elif (data_type == 'BOOLEAN'):\n                value = (value == 'true')\n            elif (type(value) != str):\n                value = str(value)\n        return value\n    row = {}\n    if (data is None):\n        return row\n    for (i, (field, schema_field)) in enumerate(zip(data['f'], schema)):\n        val = field['v']\n        name = schema_field['name']\n        data_type = schema_field['type']\n        repeated = (True if (('mode' in schema_field) and (schema_field['mode'] == 'REPEATED')) else False)\n        if (repeated and (val is None)):\n            row[name] = []\n        elif (data_type == 'RECORD'):\n            sub_schema = schema_field['fields']\n            if repeated:\n                row[name] = [Parser.parse_row(sub_schema, v['v']) for v in val]\n            else:\n                row[name] = Parser.parse_row(sub_schema, val)\n        elif repeated:\n            row[name] = [parse_value(data_type, v['v']) for v in val]\n        else:\n            row[name] = parse_value(data_type, val)\n    return row", "docstring": "Parses a row from query results into an equivalent object.\n\nArgs:\nschema: the array of fields defining the schema of the data.\ndata: the JSON row from a query result.\nReturns:\nThe parsed row object.", "source": "codesearchnet"}
{"code": "def update_course_runs(self, course_runs, enterprise_customer, enterprise_context):\n    updated_course_runs = []\n    for course_run in course_runs:\n        track_selection_url = utils.get_course_track_selection_url(course_run=course_run, query_parameters=dict(enterprise_context, **utils.get_enterprise_utm_context(enterprise_customer)))\n        enrollment_url = enterprise_customer.get_course_run_enrollment_url(course_run.get('key'))\n        course_run.update({'enrollment_url': enrollment_url, 'track_selection_url': track_selection_url})\n        marketing_url = course_run.get('marketing_url')\n        if marketing_url:\n            query_parameters = dict(enterprise_context, **utils.get_enterprise_utm_context(enterprise_customer))\n            course_run.update({'marketing_url': utils.update_query_parameters(marketing_url, query_parameters)})\n        updated_course_runs.append(course_run)\n    return updated_course_runs", "docstring": "Update Marketing urls in course metadata and return updated course.\n\nArguments:\ncourse_runs (list): List of course runs.\nenterprise_customer (EnterpriseCustomer): enterprise customer instance.\nenterprise_context (dict): The context to inject into URLs.\n\nReturns:\n(dict): Dictionary containing updated course metadata.", "source": "codesearchnet"}
{"code": "class MaxPooling2D(keras_layers.MaxPooling2D, base.Layer):\n\n    def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs):\n        if strides is None:\n            raise ValueError('Argument `strides` must not be None.')\n        super(MaxPooling2D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)", "docstring": "Max pooling layer for 2D inputs (e.g. images).\n\nArgs:\npool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)\nspecifying the size of the pooling window.\nCan be a single integer to specify the same value for\nall spatial dimensions.\nstrides: An integer or tuple/list of 2 integers,\nspecifying the strides of the pooling operation.\nCan be a single integer to specify the same value for\nall spatial dimensions.\npadding: A string. The padding method, either 'valid' or 'same'.\nCase-insensitive.\ndata_format: A string. The ordering of the dimensions in the inputs.\n`channels_last` (default) and `channels_first` are supported.\n`channels_last` corresponds to inputs with shape\n`(batch, height, width, channels)` while `channels_first` corresponds to\ninputs with shape `(batch, channels, height, width)`.\nname: A string, the name of the layer.", "source": "github-repos"}
{"code": "def import_tsv(self, tsv_file):\n    r = fapi.upload_entities_tsv(self.namespace, self.name, self.tsv_file, self.api_url)\n    fapi._check_response_code(r, 201)", "docstring": "Upload entity data to workspace from tsv loadfile.\n\nArgs:\ntsv_file (file): Tab-delimited file of entity data", "source": "codesearchnet"}
{"code": "def sg_train_func(func):\n    r\n    @wraps(func)\n    def wrapper(**kwargs):\n        r\n        opt = tf.sg_opt(kwargs)\n\n        \n        opt += tf.sg_opt(lr=0.001,\n                         save_dir='asset/train',\n                         max_ep=1000, ep_size=100000,\n                         save_interval=600, log_interval=60,\n                         eval_metric=[],\n                         max_keep=5, keep_interval=1,\n                         tqdm=True)\n\n        \n        epoch, loss = -1, None\n\n        \n        saver = tf.train.Saver(max_to_keep=opt.max_keep,\n                               keep_checkpoint_every_n_hours=opt.keep_interval)\n\n        \n        for m in opt.eval_metric:\n            tf.sg_summary_metric(m)\n\n        \n        log_dir = opt.save_dir + '/run-%02d%02d-%02d%02d' % tuple(time.localtime(time.time()))[1:5]\n        summary_writer = tf.summary.FileWriter(log_dir)\n\n        \n        def console_log(sess_):\n            if epoch >= 0:\n                tf.sg_info('\\tEpoch[%03d:gs=%d] - loss = %s' %\n                           (epoch, sess_.run(tf.sg_global_step()),\n                            ('NA' if loss is None else '%8.6f' % loss)))\n\n        \n        sv = tf.train.Supervisor(logdir=opt.save_dir,\n                                 saver=saver,\n                                 save_model_secs=opt.save_interval,\n                                 summary_writer=summary_writer,\n                                 save_summaries_secs=opt.log_interval,\n                                 global_step=tf.sg_global_step(),\n                                 local_init_op=tf.sg_phase().assign(True))\n\n        \n        with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:\n\n            \n            if not opt.tqdm:\n                sv.loop(opt.log_interval, console_log, args=(sess, ))\n\n            \n            _step = sess.run(tf.sg_global_step())\n            ep = _step \n\n            \n            if ep <= opt.max_ep:\n\n                \n                tf.sg_info('Training started from epoch[%03d]-step[%d].' % (ep, _step))\n\n                \n                for ep in range(ep, opt.max_ep + 1):\n\n                    \n                    start_step = sess.run(tf.sg_global_step()) % opt.ep_size\n                    epoch = ep\n\n                    \n                    if opt.tqdm:\n                        iterator = tqdm(range(start_step, opt.ep_size), total=opt.ep_size, initial=start_step,\n                                        desc='train', ncols=70, unit='b', leave=False)\n                    else:\n                        iterator = range(start_step, opt.ep_size)\n\n                    \n                    for _ in iterator:\n\n                        \n                        if sv.should_stop():\n                            break\n\n                        \n                        batch_loss = func(sess, opt)\n\n                        \n                        if batch_loss is not None and \\\n                                not np.isnan(batch_loss.all()) and not np.isinf(batch_loss.all()):\n                            if loss is None:\n                                loss = np.mean(batch_loss)\n                            else:\n                                loss = loss * 0.9 + np.mean(batch_loss) * 0.1\n\n                    \n                    console_log(sess)\n\n                \n                saver.save(sess, opt.save_dir + '/model.ckpt', global_step=sess.run(tf.sg_global_step()))\n\n                \n                tf.sg_info('Training finished at epoch[%d]-step[%d].' % (ep, sess.run(tf.sg_global_step())))\n            else:\n                tf.sg_info('Training already finished at epoch[%d]-step[%d].' %\n                           (ep - 1, sess.run(tf.sg_global_step())))\n\n    return wrapper", "docstring": "r\"\"\" Decorates a function `func` as sg_train_func.\n\nArgs:\nfunc: A function to decorate", "source": "juraj-google-style"}
{"code": "def __init__(self, current):\n        import sys\n        \n\n        read_existing = set(sys.PYOKO_LOGS['read']) - set(sys.PYOKO_LOGS['new'])\n\n        current.output = {\n            'response': \"DB Access Stats: {}\".format(str(sys.PYOKO_STAT_COUNTER),\n                                                     str(read_existing)),\n            'http_headers': (('Content-Type', 'text/plain'),),\n        }\n\n        sys.PYOKO_LOGS = {\n            \"save\": 0,\n            \"update\": 0,\n            \"read\": 0,\n            \"count\": 0,\n            \"search\": 0,\n        }", "docstring": "GET method handler\nArgs:\nreq: Request object.\nresp: Response object.", "source": "juraj-google-style"}
{"code": "def _set_control_flow_context(self, ctx) -> None:\n    self._control_flow_context = ctx", "docstring": "Sets the current control flow context of this op.\n\nArgs:\nctx: a context object.", "source": "github-repos"}
{"code": "def _sort_course_modes(self, modes):\n\n    def slug_weight(mode):\n        '\\n            Assign a weight to the course mode dictionary based on the position of its slug in the sorting list.\\n            '\n        sorting_slugs = COURSE_MODE_SORT_ORDER\n        sorting_slugs_size = len(sorting_slugs)\n        if (mode['slug'] in sorting_slugs):\n            return (sorting_slugs_size - sorting_slugs.index(mode['slug']))\n        return 0\n    return sorted(modes, key=slug_weight, reverse=True)", "docstring": "Sort the course mode dictionaries by slug according to the COURSE_MODE_SORT_ORDER constant.\n\nArguments:\nmodes (list): A list of course mode dictionaries.\nReturns:\nlist: A list with the course modes dictionaries sorted by slug.", "source": "codesearchnet"}
{"code": "def _image_url(array, fmt='png', mode=\"data\", quality=90, domain=None):\n  \n  supported_modes = (\"data\")\n  if mode not in supported_modes:\n    message = \"Unsupported mode '%s', should be one of '%s'.\"\n    raise ValueError(message, mode, supported_modes)\n\n  image_data = serialize_array(array, fmt=fmt, quality=quality)\n  base64_byte_string = base64.b64encode(image_data).decode('ascii')\n  return \"data:image/\" + fmt.upper() + \";base64,\" + base64_byte_string", "docstring": "Create a data URL representing an image from a PIL.Image.\n\nArgs:\nimage: a numpy\nmode: presently only supports \"data\" for data URL\n\nReturns:\nURL representing image", "source": "juraj-google-style"}
{"code": "def MakeZip(self, input_dir, output_file):\n    \n\n    logging.info(\"Generating zip template file at %s\", output_file)\n    zf = zipfile.ZipFile(output_file, \"w\")\n    oldwd = os.getcwd()\n    os.chdir(input_dir)\n    for path in [\"debian\", \"rpmbuild\", \"fleetspeak\"]:\n      for root, _, files in os.walk(path):\n        for f in files:\n          zf.write(os.path.join(root, f))\n    zf.close()\n    os.chdir(oldwd)", "docstring": "Creates a ZIP archive of the files in the input directory.\n\nArgs:\ninput_dir: the name of the input directory.\noutput_file: the name of the output ZIP archive without extension.", "source": "juraj-google-style"}
{"code": "def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):\n        \n        \n        countriesdata = cls.countriesdata(use_live=use_live)\n        m49 = countriesdata['m49iso3'].get(iso3)\n        if m49 is not None:\n            return m49\n\n        if exception is not None:\n            raise exception\n        return None", "docstring": "Get M49 from ISO3 code\n\nArgs:\niso3 (str): ISO3 code for which to get M49 code\nuse_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\nexception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\nReturns:\nOptional[int]: M49 code", "source": "juraj-google-style"}
{"code": "def _parse_phone(self, val):\n    ret = {'type': None, 'value': None}\n    try:\n        ret['type'] = val[1]['type']\n    except (IndexError, KeyError, ValueError, TypeError):\n        pass\n    ret['value'] = val[3].strip()\n    try:\n        self.vars['phone'].append(ret)\n    except AttributeError:\n        self.vars['phone'] = []\n        self.vars['phone'].append(ret)", "docstring": "The function for parsing the vcard phone numbers.\n\nArgs:\nval (:obj:`list`): The value to parse.", "source": "codesearchnet"}
{"code": "def validate_checksum( filename, md5sum ):\n    \n    filename = match_filename( filename )\n    md5_hash = file_md5( filename=filename )\n    if md5_hash != md5sum:\n        raise ValueError('md5 checksums are inconsistent: {}'.format( filename ))", "docstring": "Compares the md5 checksum of a file with an expected value.\nIf the calculated and expected checksum values are not equal,\nValueError is raised.\nIf the filename `foo` is not found, will try to read a gzipped file named\n`foo.gz`. In this case, the checksum is calculated for the unzipped file.\n\nArgs:\nfilename (str): Path for the file to be checksummed.\nmd5sum (str):  The expected hex checksum.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def rst_content(self,\n                    prefix: str = \"\",\n                    suffix: str = \"\",\n                    heading_underline_char: str = \"=\",\n                    method: AutodocMethod = None) -> str:\n        \n        spacer = \"    \"\n        \n        if method is None:\n            method = self.method\n        is_python = self.is_python\n        if method == AutodocMethod.BEST:\n            if is_python:\n                method = AutodocMethod.AUTOMODULE\n            else:\n                method = AutodocMethod.CONTENTS\n        elif method == AutodocMethod.AUTOMODULE:\n            if not is_python:\n                method = AutodocMethod.CONTENTS\n\n        \n        if method == AutodocMethod.AUTOMODULE:\n            if self.source_rst_title_style_python:\n                title = self.python_module_name\n            else:\n                title = self.source_filename_rel_project_root\n            instruction = \".. automodule:: {modulename}\\n    :members:\".format(\n                modulename=self.python_module_name\n            )\n        elif method == AutodocMethod.CONTENTS:\n            title = self.source_filename_rel_project_root\n            \n            \n            \n            \n\n            instruction = (\n                \".. literalinclude:: {filename}\\n\"\n                \"{spacer}:language: {language}\".format(\n                    filename=self.source_filename_rel_rst_file,\n                    spacer=spacer,\n                    language=self.pygments_language\n                )\n            )\n        else:\n            raise ValueError(\"Bad method!\")\n\n        \n        content = .format(\n            filename=self.rst_filename_rel_project_root,\n            AUTOGENERATED_COMMENT=AUTOGENERATED_COMMENT,\n            prefix=prefix,\n            underlined_title=rst_underline(\n                title, underline_char=heading_underline_char),\n            instruction=instruction,\n            suffix=suffix,\n        ).strip() + \"\\n\"\n        return content", "docstring": "Returns the text contents of an RST file that will automatically\ndocument our source file.\n\nArgs:\nprefix: prefix, e.g. RST copyright comment\nsuffix: suffix, after the part we're creating\nheading_underline_char: RST character to use to underline the\nheading\nmethod: optional method to override ``self.method``; see\nconstructor\n\nReturns:\nthe RST contents", "source": "juraj-google-style"}
{"code": "def flatten(iterable):\n    \n    return itertools.chain.from_iterable(a if isinstance(a,Iterable) and not isinstance(a, str) else [a] for a in iterable)", "docstring": "This function allows a simple a way to iterate over a \"complex\" iterable, for example,\nif the input [12, [23], (4, 3), \"lkjasddf\"], this will return an Iterable that returns\n12, 23, 4, 3 and \"lkjasddf\".\n\nArgs:\niterable (Iterable) - A complex iterable that will be flattened\n\nReturns:\n(Iterable): An Iterable that flattens multiple interables", "source": "juraj-google-style"}
{"code": "def wigner_data(q_result, meas_qubits, labels, shots=None):\n    \n    num = len(meas_qubits)\n\n    dim = 2**num\n    p = [0.5 + 0.5 * np.sqrt(3), 0.5 - 0.5 * np.sqrt(3)]\n    parity = 1\n\n    for i in range(num):\n        parity = np.kron(parity, p)\n\n    w = [0] * len(labels)\n    wpt = 0\n    counts = [marginal_counts(q_result.get_counts(circ), meas_qubits)\n              for circ in labels]\n    for entry in counts:\n        x = [0] * dim\n\n        for i in range(dim):\n            if bin(i)[2:].zfill(num) in entry:\n                x[i] = float(entry[bin(i)[2:].zfill(num)])\n\n        if shots is None:\n            shots = np.sum(x)\n\n        for i in range(dim):\n            w[wpt] = w[wpt] + (x[i] / shots) * parity[i]\n        wpt += 1\n\n    return w", "docstring": "Get the value of the Wigner function from measurement results.\n\nArgs:\nq_result (Result): Results from execution of a state tomography\ncircuits on a backend.\nmeas_qubits (list[int]): a list of the qubit indexes measured.\nlabels (list[str]): a list of names of the circuits\nshots (int): number of shots\n\nReturns:\nlist: The values of the Wigner function at measured points in\nphase space", "source": "juraj-google-style"}
{"code": "def eval_from_json(json):\n        \n        changes = poloniex.get_gains_losses(poloniex.parse_changes(json))\n        return RSI.eval_algorithm(changes['gains'], changes['losses'])", "docstring": "Evaluates RSI from JSON (typically Poloniex API response)\n\nArgs:\njson: List of dates where each entry is a dict of raw market data.\n\nReturns:\nFloat between 0 and 100, momentum indicator\nof a market measuring the speed and change of price movements.", "source": "juraj-google-style"}
{"code": "def _ParseFileHeader(self, file_object):\n    \n    file_header_map = self._GetDataTypeMap(\n        'chrome_cache_data_block_file_header')\n\n    try:\n      file_header, _ = self._ReadStructureFromFileObject(\n          file_object, 0, file_header_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.ParseError(\n          'Unable to parse data block file header with error: {0!s}'.format(\n              exception))\n\n    if file_header.signature != self._FILE_SIGNATURE:\n      raise errors.ParseError('Unsupported data block file signature')\n\n    format_version = '{0:d}.{1:d}'.format(\n        file_header.major_version, file_header.minor_version)\n    if format_version not in ('2.0', '2.1'):\n      raise errors.ParseError(\n          'Unsupported data block file format version: {0:s}'.format(\n              format_version))\n\n    if file_header.block_size not in (256, 1024, 4096):\n      raise errors.ParseError(\n          'Unsupported data block file block size: {0:d}'.format(\n              file_header.block_size))", "docstring": "Parses the file header.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object to parse.\n\nRaises:\nParseError: if the file header cannot be read.", "source": "juraj-google-style"}
{"code": "def print_dict(d, show_missing=True):\n  \n  for k, v in sorted(d.items()):\n    if (not v) and show_missing:\n      \n      print('{} -'.format(k))\n    elif isinstance(v, list):\n      \n      print(k)\n      for item in v:\n        print('   {}'.format(item))\n    elif isinstance(v, dict):\n      \n      print(k)\n      for kk, vv in sorted(v.items()):\n        print('   {:<20} {}'.format(kk, vv))", "docstring": "Prints a shallow dict to console.\n\nArgs:\nd: Dict to print.\nshow_missing: Whether to show keys with empty values.", "source": "juraj-google-style"}
{"code": "def run_step(context):\n    logger.debug('started')\n    context.clear()\n    logger.info(f'Context wiped. New context size: {len(context)}')\n    logger.debug('done')", "docstring": "Wipe the entire context.\n\nArgs:\nContext is a dictionary or dictionary-like.\nDoes not require any specific keys in context.", "source": "codesearchnet"}
{"code": "def create(labels=None, **kw):\n    if (labels is not None):\n        kw[u'labels'] = encoding.PyValueToMessage(MetricValue.LabelsValue, labels)\n    return MetricValue(**kw)", "docstring": "Constructs a new metric value.\n\nThis acts as an alternate to MetricValue constructor which\nsimplifies specification of labels.  Rather than having to create\na MetricValue.Labels instance, all that's necessary to specify the\nrequired string.\n\nArgs:\nlabels (dict([string, [string]]):\n**kw: any other valid keyword args valid in the MetricValue constructor\n\nReturns\n:class:`MetricValue`: the created instance", "source": "codesearchnet"}
{"code": "def service_account_email(self):\n    if (self._service_account_email is None):\n        self._service_account_email = app_identity.get_service_account_name()\n    return self._service_account_email", "docstring": "Get the email for the current service account.\n\nReturns:\nstring, The email associated with the Google App Engine\nservice account.", "source": "codesearchnet"}
{"code": "def CheckForNewlineAtEOF(filename, lines, error):\n    if ((len(lines) < 3) or lines[(- 2)]):\n        error(filename, (len(lines) - 2), 'whitespace/ending_newline', 5, 'Could not find a newline character at the end of the file.')", "docstring": "Logs an error if there is no newline char at the end of the file.\n\nArgs:\nfilename: The name of the current file.\nlines: An array of strings, each representing a line of the file.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def create_file_writer(logdir, max_queue=None, flush_millis=None, filename_suffix=None, name=None):\n    if logdir is None:\n        return _NoopSummaryWriter()\n    logdir = str(logdir)\n    with ops.device('cpu:0'):\n        if max_queue is None:\n            max_queue = constant_op.constant(10)\n        if flush_millis is None:\n            flush_millis = constant_op.constant(2 * 60 * 1000)\n        if filename_suffix is None:\n            filename_suffix = constant_op.constant('.v2')\n        if name is None:\n            name = 'logdir:' + logdir\n        resource = gen_summary_ops.summary_writer(shared_name=name)\n        return _LegacyResourceSummaryWriter(resource=resource, init_op_fn=functools.partial(gen_summary_ops.create_summary_file_writer, logdir=logdir, max_queue=max_queue, flush_millis=flush_millis, filename_suffix=filename_suffix))", "docstring": "Creates a summary file writer in the current context under the given name.\n\nArgs:\nlogdir: a string, or None. If a string, creates a summary file writer\nwhich writes to the directory named by the string. If None, returns\na mock object which acts like a summary writer but does nothing,\nuseful to use as a context manager.\nmax_queue: the largest number of summaries to keep in a queue; will\nflush once the queue gets bigger than this. Defaults to 10.\nflush_millis: the largest interval between flushes. Defaults to 120,000.\nfilename_suffix: optional suffix for the event file name. Defaults to `.v2`.\nname: Shared name for this SummaryWriter resource stored to default\nGraph. Defaults to the provided logdir prefixed with `logdir:`. Note: if a\nsummary writer resource with this shared name already exists, the returned\nSummaryWriter wraps that resource and the other arguments have no effect.\n\nReturns:\nEither a summary writer or an empty object which can be used as a\nsummary writer.", "source": "github-repos"}
{"code": "def _starts_with(field, filter_value):\n    valid = False\n    if field.startswith(filter_value):\n        valid = True\n    return valid", "docstring": "Validate field starts with provided value.\n\nArgs:\nfilter_value (string): A string or list of values.\n\nReturns:\n(boolean): Results of validation", "source": "codesearchnet"}
{"code": "def get(self, path_info):\n    assert (path_info['scheme'] == 'local')\n    path = path_info['path']\n    if (not os.path.exists(path)):\n        return None\n    (actual_mtime, actual_size) = get_mtime_and_size(path)\n    actual_inode = get_inode(path)\n    existing_record = self.get_state_record_for_inode(actual_inode)\n    if (not existing_record):\n        return None\n    (mtime, size, checksum, _) = existing_record\n    if self._file_metadata_changed(actual_mtime, mtime, actual_size, size):\n        return None\n    self._update_state_record_timestamp_for_inode(actual_inode)\n    return checksum", "docstring": "Gets the checksum for the specified path info. Checksum will be\nretrieved from the state database if available.\n\nArgs:\npath_info (dict): path info to get the checksum for.\n\nReturns:\nstr or None: checksum for the specified path info or None if it\ndoesn't exist in the state database.", "source": "codesearchnet"}
{"code": "def get_size_with_aspect_ratio(image_size: Tuple[int, int], size: int, max_size: Optional[int]=None, mod_size: int=16) -> Tuple[int, int]:\n    height, width = image_size\n    raw_size = None\n    if max_size is not None:\n        min_original_size = float(min((height, width)))\n        max_original_size = float(max((height, width)))\n        if max_original_size / min_original_size * size > max_size:\n            raw_size = max_size * min_original_size / max_original_size\n            size = int(round(raw_size))\n    if width < height:\n        ow = size\n        if max_size is not None and raw_size is not None:\n            oh = int(raw_size * height / width)\n        else:\n            oh = int(size * height / width)\n    elif height <= width and height == size or (width <= height and width == size):\n        oh, ow = (height, width)\n    else:\n        oh = size\n        if max_size is not None and raw_size is not None:\n            ow = int(raw_size * width / height)\n        else:\n            ow = int(size * width / height)\n    if mod_size is not None:\n        ow_mod = np.mod(ow, mod_size)\n        oh_mod = np.mod(oh, mod_size)\n        ow = ow - ow_mod\n        oh = oh - oh_mod\n    return (oh, ow)", "docstring": "Computes the output image size given the input image size and the desired output size with multiple of divisible_size.\n\nArgs:\nimage_size (`Tuple[int, int]`):\nThe input image size.\nsize (`int`):\nThe desired output size.\nmax_size (`int`, *optional*):\nThe maximum allowed output size.\nmod_size (`int`, *optional*):\nThe size to make multiple of mod_size.", "source": "github-repos"}
{"code": "def CaptureVariablesList(self, items, depth, empty_message, limits):\n    v = []\n    for (name, value) in items:\n        if ((self._total_size >= self.max_size) or (len(v) >= limits.max_list_items)):\n            v.append({'status': {'refersTo': 'VARIABLE_VALUE', 'description': {'format': 'Only first $0 items were captured. Use in an expression to see all items.', 'parameters': [str(len(v))]}}})\n            break\n        v.append(self.CaptureNamedVariable(name, value, depth, limits))\n    if (not v):\n        return [{'status': {'refersTo': 'VARIABLE_NAME', 'description': {'format': empty_message}}}]\n    return v", "docstring": "Captures list of named items.\n\nArgs:\nitems: iterable of (name, value) tuples.\ndepth: nested depth of dictionaries and vectors for items.\nempty_message: info status message to set if items is empty.\nlimits: Per-object limits for capturing variable data.\n\nReturns:\nList of formatted variable objects.", "source": "codesearchnet"}
{"code": "def dataset_docs_str(datasets=None):\n    module_to_builder = make_module_to_builder_dict(datasets)\n    sections = sorted(list(module_to_builder.keys()))\n    section_tocs = []\n    section_docs = []\n    for section in sections:\n        builders = tf.nest.flatten(module_to_builder[section])\n        builders = sorted(builders, key=(lambda b: b.name))\n        builder_docs = [document_single_builder(builder) for builder in builders]\n        section_doc = SECTION_DATASETS.format(section_name=section, datasets='\\n'.join(builder_docs))\n        section_toc = create_section_toc(section, builders)\n        section_docs.append(section_doc)\n        section_tocs.append(section_toc)\n    full_doc = DOC.format(toc='\\n'.join(section_tocs), datasets='\\n'.join(section_docs))\n    return full_doc", "docstring": "Create dataset documentation string for given datasets.\n\nArgs:\ndatasets: list of datasets for which to create documentation.\nIf None, then all available datasets will be used.\n\nReturns:\nstring describing the datasets (in the MarkDown format).", "source": "codesearchnet"}
{"code": "def letter_score(letter):\n    score_map = {1: ['a', 'e', 'i', 'o', 'u', 'l', 'n', 'r', 's', 't'], 2: ['d', 'g'], 3: ['b', 'c', 'm', 'p'], 4: ['f', 'h', 'v', 'w', 'y'], 5: ['k'], 8: ['j', 'x'], 10: ['q', 'z']}\n    for (score, letters) in score_map.items():\n        if (letter.lower() in letters):\n            return score\n    else:\n        raise TypeError('Invalid letter: %s', letter)", "docstring": "Returns the Scrabble score of a letter.\n\nArgs:\nletter: a single character string\n\nRaises:\nTypeError if a non-Scrabble character is supplied", "source": "codesearchnet"}
{"code": "def send_batches(self, batch_list):\n    if isinstance(batch_list, BaseMessage):\n        batch_list = batch_list.SerializeToString()\n    return self._post('/batches', batch_list)", "docstring": "Sends a list of batches to the validator.\n\nArgs:\nbatch_list (:obj:`BatchList`): the list of batches\n\nReturns:\ndict: the json result data, as a dict", "source": "codesearchnet"}
{"code": "def scroll(self, x, y):\n        \n        assert isinstance(x, _INTTYPES), \"x must be an integer, got %s\" % repr(x)\n        assert isinstance(y, _INTTYPES), \"y must be an integer, got %s\" % repr(x)\n        def getSlide(x, length):\n            \n            if x > 0:\n                srcx = 0\n                length -= x\n            elif x < 0:\n                srcx = abs(x)\n                x = 0\n                length -= srcx\n            else:\n                srcx = 0\n            return x, length, srcx\n        def getCover(x, length):\n            \n            cover = (0, length) \n            uncover = None  \n            if x > 0: \n                cover = (x, length - x)\n                uncover = (0, x)\n            elif x < 0: \n                x = abs(x)\n                cover = (0, length - x)\n                uncover = (length - x, x)\n            return cover, uncover\n\n        width, height = self.get_size()\n        if abs(x) >= width or abs(y) >= height:\n            return self.clear() \n\n        \n        coverX, uncoverX = getCover(x, width)\n        coverY, uncoverY = getCover(y, height)\n        \n        \n        \n        \n        \n\n        \n        x, width, srcx = getSlide(x, width)\n        y, height, srcy = getSlide(y, height)\n        self.blit(self, x, y, width, height, srcx, srcy)\n        if uncoverX: \n            self.draw_rect(uncoverX[0], coverY[0], uncoverX[1], coverY[1],\n                           0x20, self._fg, self._bg)\n        if uncoverY: \n            self.draw_rect(coverX[0], uncoverY[0], coverX[1], uncoverY[1],\n                           0x20, self._fg, self._bg)\n        if uncoverX and uncoverY: \n            self.draw_rect(uncoverX[0], uncoverY[0], uncoverX[1], uncoverY[1],\n                           0x20, self._fg, self._bg)", "docstring": "Scroll the contents of the console in the direction of x,y.\n\nUncovered areas will be cleared to the default background color.\nDoes not move the virutal cursor.\n\nArgs:\nx (int): Distance to scroll along the x-axis.\ny (int): Distance to scroll along the y-axis.\n\nReturns:\nIterator[Tuple[int, int]]: An iterator over the (x, y) coordinates\nof any tile uncovered after scrolling.\n\n.. seealso:: :any:`set_colors`", "source": "juraj-google-style"}
{"code": "def get_course_id(self, course_uuid):\n    course_data = self.get('courseguide/course?uuid={uuid}'.format(uuid=(course_uuid or self.course_id)), params=None)\n    try:\n        return course_data['response']['docs'][0]['id']\n    except KeyError:\n        failure_message = 'KeyError in get_course_id - got {0}'.format(course_data)\n        log.exception(failure_message)\n        raise PyLmodUnexpectedData(failure_message)\n    except TypeError:\n        failure_message = 'TypeError in get_course_id - got {0}'.format(course_data)\n        log.exception(failure_message)\n        raise PyLmodUnexpectedData(failure_message)", "docstring": "Get course id based on uuid.\n\nArgs:\nuuid (str): course uuid, i.e. /project/mitxdemosite\n\nRaises:\nPyLmodUnexpectedData: No course data was returned.\nrequests.RequestException: Exception connection error\n\nReturns:\nint: numeric course id", "source": "codesearchnet"}
{"code": "def split_data(X, y, ratio=(0.8, 0.1, 0.1)):\n    \n    assert(sum(ratio) == 1 and len(ratio) == 3)\n    X_train, X_rest, y_train, y_rest = train_test_split(\n        X, y, train_size=ratio[0])\n    X_val, X_test, y_val, y_test = train_test_split(\n        X_rest, y_rest, train_size=ratio[1])\n    return X_train, X_val, X_test, y_train, y_val, y_test", "docstring": "Splits data into a training, validation, and test set.\n\nArgs:\nX: text data\ny: data labels\nratio: the ratio for splitting. Default: (0.8, 0.1, 0.1)\n\nReturns:\nsplit data: X_train, X_val, X_test, y_train, y_val, y_test", "source": "juraj-google-style"}
{"code": "class GraniteMoeMoE(nn.Module):\n\n    def __init__(self, config: GraniteMoeConfig):\n        super(GraniteMoeMoE, self).__init__()\n        self.input_size = config.hidden_size\n        self.hidden_size = config.intermediate_size\n        self.activation = ACT2FN[config.hidden_act]\n        self.input_linear = GraniteMoeParallelExperts(config.num_local_experts, self.input_size, self.hidden_size * 2)\n        self.output_linear = GraniteMoeParallelExperts(config.num_local_experts, self.hidden_size, self.input_size)\n        self.router = GraniteMoeTopKGating(input_size=self.input_size, num_experts=config.num_local_experts, top_k=config.num_experts_per_tok)\n\n    def forward(self, layer_input):\n        \n        bsz, length, emb_size = layer_input.size()\n        layer_input = layer_input.reshape(-1, emb_size)\n        _, batch_index, batch_gates, expert_size, router_logits = self.router(layer_input)\n        expert_inputs = layer_input[batch_index]\n        hidden_states = self.input_linear(expert_inputs, expert_size)\n        chunked_hidden_states = hidden_states.chunk(2, dim=-1)\n        hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1]\n        expert_outputs = self.output_linear(hidden_states, expert_size)\n        expert_outputs = expert_outputs * batch_gates[:, None]\n        zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device)\n        layer_output = zeros.index_add(0, batch_index, expert_outputs)\n        layer_output = layer_output.view(bsz, length, self.input_size)\n        return (layer_output, router_logits)", "docstring": "A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.\n\nArgs:\nconfig:\nConfiguration object with model hyperparameters.", "source": "github-repos"}
{"code": "def _process_event(self, event):\n    if ((not event.is_directory) and (not event.src_path.endswith(BATCH_EXTENSION))):\n        self._logger.info('Detected file change: %s', event.src_path)\n        self._batch.process_file(event.src_path)", "docstring": "Process received events.\n\nProcess events received, applying normalization for those\nevents referencing a new or changed file and only if it's\nnot the result of a previous normalization.\n\nArgs:\nevent: Event to process.", "source": "codesearchnet"}
{"code": "def number_of_records_per_hour(self, value=None):\n    if (value is not None):\n        try:\n            value = int(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type int for field `number_of_records_per_hour`'.format(value))\n    self._number_of_records_per_hour = value", "docstring": "Corresponds to IDD Field `number_of_records_per_hour`\n\nArgs:\nvalue (int): value for IDD Field `number_of_records_per_hour`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def _num_relevant(labels, k):\n    if k < 1:\n        raise ValueError(f'Invalid k={k}')\n    with ops.name_scope(None, 'num_relevant', (labels,)) as scope:\n        labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)\n        if isinstance(labels, sparse_tensor.SparseTensor):\n            return math_ops.minimum(sets.set_size(labels), k, name=scope)\n        num_labels = math_ops.reduce_sum(array_ops.where_v2(math_ops.greater_equal(labels, 0), array_ops.ones_like(labels), array_ops.zeros_like(labels)), axis=-1)\n        return math_ops.minimum(num_labels, k, name=scope)", "docstring": "Computes number of relevant values for each row in labels.\n\nFor labels with shape [D1, ... DN, num_labels], this is the minimum of\n`num_labels` and `k`.\n\nArgs:\nlabels: `int64` `Tensor` or `SparseTensor` with shape\n[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\ntarget classes for the associated prediction. Commonly, N=1 and `labels`\nhas shape [batch_size, num_labels].\nk: Integer, k for @k metric.\n\nReturns:\nInteger `Tensor` of shape [D1, ... DN], where each value is the number of\nrelevant values for that row.\n\nRaises:\nValueError: if inputs have invalid dtypes or values.", "source": "github-repos"}
{"code": "def _validate_query_parameters(self, query, action_spec):\n    processed_params = []\n    for (param_name, param_value) in query.items():\n        if (param_name in action_spec['parameters'].keys()):\n            processed_params.append(param_name)\n            if (action_spec['parameters'][param_name]['type'] == 'array'):\n                if (not isinstance(param_value, list)):\n                    return False\n                else:\n                    for i in param_value:\n                        if (not self.check_type(i, action_spec['parameters'][param_name]['items']['type'])):\n                            return False\n            elif (not self.check_type(param_value, action_spec['parameters'][param_name]['type'])):\n                return False\n    if (not all(((param in processed_params) for (param, spec) in action_spec['parameters'].items() if ((spec['in'] == 'query') and ('required' in spec) and spec['required'])))):\n        return False\n    return True", "docstring": "Check the query parameter for the action specification.\n\nArgs:\nquery: query parameter to check.\naction_spec: specification of the action.\n\nReturns:\nTrue if the query is valid.", "source": "codesearchnet"}
{"code": "def _get_left_right_blocks(x):\n  \n  (_, x_num_outer_h_blocks, x_num_outer_w_blocks, x_memory_flange_h,\n   x_memory_flange_w, depth) = common_layers.shape_list(x)\n  x_left_right_blocks = tf.slice(x,\n                                 [0, 1, 0, 0, 0, 0],\n                                 [-1, x_num_outer_h_blocks-2, -1, -1,\n                                  -1, -1])\n  num_blocks_h = (x_num_outer_h_blocks-2)\n  x_left_right_blocks = tf.reshape(x_left_right_blocks,\n                                   [-1,\n                                    num_blocks_h,\n                                    2, x_num_outer_w_blocks,\n                                    x_memory_flange_h,\n                                    x_memory_flange_w, depth])\n  x_left_right_blocks = tf.transpose(x_left_right_blocks,\n                                     [0, 1, 3, 2, 4, 5, 6])\n  x_left_right_blocks = tf.reshape(x_left_right_blocks,\n                                   [-1, num_blocks_h,\n                                    x_num_outer_w_blocks, 2*x_memory_flange_h,\n                                    x_memory_flange_w, depth])\n  \n  x_left_blocks, x_right_blocks = _split_along_width(x_left_right_blocks)\n\n  return x_left_blocks, x_right_blocks", "docstring": "Helper function. Assumes that memory_flange is half of query sizes.\n\nThis function splits the tensor of width 'n' into two halves, where the\nfirst half gets the width indices 0, 2, 4.. and the second half gets the\nwidth indices 3, 5, ... We also fuse two blocks along the h dimension.\n\nArgs:\nx: a 6-d tensor.\n\nReturns:\nx_left_blocks, x_right_blocks: Two 6-d tensors", "source": "juraj-google-style"}
{"code": "def _from_record(data):\n    \n    if isinstance(data, dict):\n      return Schema._from_dict_record(data)\n    elif isinstance(data, list):\n      return Schema._from_list_record(data)\n    else:\n      raise Exception('Cannot create a schema from record %s' % str(data))", "docstring": "Infer a BigQuery table schema from a list of fields or a dictionary. The typeof the elements\nis used. For a list, the field names are simply 'Column1', 'Column2', etc.\n\nArgs:\ndata: The list of fields or dictionary.\nReturns:\nA list of dictionaries containing field 'name' and 'type' entries, suitable for use in a\nBigQuery Tables resource schema.", "source": "juraj-google-style"}
{"code": "def create_exponential(num_finite_buckets, growth_factor, scale):\n    if (num_finite_buckets <= 0):\n        raise ValueError(_BAD_NUM_FINITE_BUCKETS)\n    if (growth_factor <= 1.0):\n        raise ValueError((_BAD_FLOAT_ARG % (u'growth factor', 1.0)))\n    if (scale <= 0.0):\n        raise ValueError((_BAD_FLOAT_ARG % (u'scale', 0.0)))\n    return sc_messages.Distribution(bucketCounts=([0] * (num_finite_buckets + 2)), exponentialBuckets=sc_messages.ExponentialBuckets(numFiniteBuckets=num_finite_buckets, growthFactor=growth_factor, scale=scale))", "docstring": "Creates a new instance of distribution with exponential buckets\n\nArgs:\nnum_finite_buckets (int): initializes number of finite buckets\ngrowth_factor (float): initializes the growth factor\nscale (float): initializes the scale\n\nReturn:\n:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`\n\nRaises:\nValueError: if the args are invalid for creating an instance", "source": "codesearchnet"}
{"code": "def add_http_endpoint(self, url, request_handler):\n        \n        self.app.router.add_route('*', url, request_handler)", "docstring": "This method provides a programatic way of added invidual routes\nto the http server.\n\nArgs:\nurl (str): the url to be handled by the request_handler\nrequest_handler (nautilus.network.RequestHandler): The request handler", "source": "juraj-google-style"}
{"code": "def _get_radius(site):\n        \n        if hasattr(site.specie, 'oxi_state'):\n            el = site.specie.element\n            oxi = site.specie.oxi_state\n\n            if oxi == 0:\n                return CrystalNN._get_default_radius(site)\n\n            elif oxi in el.ionic_radii:\n                return el.ionic_radii[oxi]\n\n            \n            elif int(math.floor(oxi)) in el.ionic_radii and \\\n                    int(math.ceil(oxi)) in el.ionic_radii:\n                oxi_low = el.ionic_radii[int(math.floor(oxi))]\n                oxi_high = el.ionic_radii[int(math.ceil(oxi))]\n                x = oxi - int(math.floor(oxi))\n                return (1 - x) * oxi_low + x * oxi_high\n\n            elif oxi > 0 and el.average_cationic_radius > 0:\n                return el.average_cationic_radius\n\n            elif oxi < 0 and el.average_anionic_radius > 0:\n                return el.average_anionic_radius\n\n        else:\n            warnings.warn(\"CrystalNN: distance cutoffs set but no oxidation \"\n                          \"states specified on sites! For better results, set \"\n                          \"the site oxidation states in the structure.\")\n        return 0", "docstring": "An internal method to get the expected radius for a site with\noxidation state.\nArgs:\nsite: (Site)\n\nReturns:\nOxidation-state dependent radius: ionic, covalent, or atomic.\nReturns 0 if no oxidation state or appropriate radius is found.", "source": "juraj-google-style"}
{"code": "def bump(component='patch', exact=None):\n    \n    \n    old_ver = current()\n\n    if exact is None:\n        new_ver = _bump_version(old_ver, component)\n    else:\n        new_ver = exact\n\n    write(new_ver)\n    return old_ver, new_ver", "docstring": "Bump the given version component.\n\nArgs:\ncomponent (str):\nWhat part of the version should be bumped. Can be one of:\n\n- major\n- minor\n- patch\n\nexact (str):\nThe exact version that should be set instead of bumping the current\none.\n\nReturns:\ntuple(str, str): A tuple of old and bumped version.", "source": "juraj-google-style"}
{"code": "def _parse_hostname(self):\n    value = 'localhost'\n    match = re.search('^hostname ([^\\\\s]+)$', self.config, re.M)\n    if match:\n        value = match.group(1)\n    return dict(hostname=value)", "docstring": "Parses the global config and returns the hostname value\n\nReturns:\ndict: The configured value for hostname.  The returned dict\nobject is intended to be merged into the resource dict", "source": "codesearchnet"}
{"code": "def distorted_inputs(data_dir, batch_size):\n  \n  filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)\n               for i in xrange(1, 6)]\n  for f in filenames:\n    if not tf.gfile.Exists(f):\n      raise ValueError('Failed to find file: ' + f)\n\n  \n  filename_queue = tf.train.string_input_producer(filenames)\n\n  \n  read_input = read_cifar10(filename_queue)\n  reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n  height = IMAGE_SIZE\n  width = IMAGE_SIZE\n\n  \n  \n\n  \n  distorted_image = tf.random_crop(reshaped_image, [height, width, 3])\n\n  \n  distorted_image = tf.image.random_flip_left_right(distorted_image)\n\n  \n  \n  distorted_image = tf.image.random_brightness(distorted_image,\n                                               max_delta=63)\n  distorted_image = tf.image.random_contrast(distorted_image,\n                                             lower=0.2, upper=1.8)\n\n  \n  float_image = tf.image.per_image_standardization(distorted_image)\n\n  \n  float_image.set_shape([height, width, 3])\n  read_input.label.set_shape([1])\n\n  \n  min_fraction_of_examples_in_queue = 0.4\n  min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *\n                           min_fraction_of_examples_in_queue)\n  print ('Filling queue with %d CIFAR images before starting to train. '\n         'This will take a few minutes.' % min_queue_examples)\n\n  \n  return _generate_image_and_label_batch(float_image, read_input.label,\n                                         min_queue_examples, batch_size,\n                                         shuffle=True)", "docstring": "Construct distorted input for CIFAR training using the Reader ops.\n\nArgs:\ndata_dir: Path to the CIFAR-10 data directory.\nbatch_size: Number of images per batch.\n\nReturns:\nimages: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\nlabels: Labels. 1D tensor of [batch_size] size.", "source": "juraj-google-style"}
{"code": "def segs(self, word):\n        \n        return [m.group('all') for m in self.seg_regex.finditer(word)]", "docstring": "Returns a list of segments from a word\n\nArgs:\nword (unicode): input word as Unicode IPA string\n\nReturns:\nlist: list of strings corresponding to segments found in `word`", "source": "juraj-google-style"}
{"code": "def run_iperf_client(self, server_host, extra_args=''):\n    out = self.adb.shell('iperf3 -c %s %s' % (server_host, extra_args))\n    clean_out = str(out, 'utf-8').strip().split('\\n')\n    if 'error' in clean_out[0].lower():\n        return (False, clean_out)\n    return (True, clean_out)", "docstring": "Start iperf client on the device.\n\nReturn status as true if iperf client start successfully.\nAnd data flow information as results.\n\nArgs:\nserver_host: Address of the iperf server.\nextra_args: A string representing extra arguments for iperf client,\ne.g. '-i 1 -t 30'.\n\nReturns:\nstatus: true if iperf client start successfully.\nresults: results have data flow information", "source": "github-repos"}
{"code": "def batch_workflow_status(self, batch_workflow_id):\n    self.logger.debug(('Get status of batch workflow: ' + batch_workflow_id))\n    url = ('%(base_url)s/batch_workflows/%(batch_id)s' % {'base_url': self.base_url, 'batch_id': batch_workflow_id})\n    r = self.gbdx_connection.get(url)\n    return r.json()", "docstring": "Checks GBDX batch workflow status.\n\nArgs:\nbatch workflow_id (str): Batch workflow id.\n\nReturns:\nBatch Workflow status (str).", "source": "codesearchnet"}
{"code": "def rtt_write(self, buffer_index, data):\n    buf_size = len(data)\n    buf = (ctypes.c_ubyte * buf_size)(*bytearray(data))\n    bytes_written = self._dll.JLINK_RTTERMINAL_Write(buffer_index, buf, buf_size)\n    if (bytes_written < 0):\n        raise errors.JLinkRTTException(bytes_written)\n    return bytes_written", "docstring": "Writes data to the RTT buffer.\n\nThis method will write at most len(data) bytes to the specified RTT\nbuffer.\n\nArgs:\nself (JLink): the ``JLink`` instance\nbuffer_index (int): the index of the RTT buffer to write to\ndata (list): the list of bytes to write to the RTT buffer\n\nReturns:\nThe number of bytes successfully written to the RTT buffer.\n\nRaises:\nJLinkRTTException if the underlying JLINK_RTTERMINAL_Write call fails.", "source": "codesearchnet"}
{"code": "def clean_decodes(ids, vocab_size, eos_id=1):\n  \n  ret = []\n  for i in ids:\n    if i == eos_id:\n      break\n    if i >= vocab_size:\n      break\n    ret.append(int(i))\n  return ret", "docstring": "Stop at EOS or padding or OOV.\n\nArgs:\nids: a list of integers\nvocab_size: an integer\neos_id: EOS id\n\nReturns:\na list of integers", "source": "juraj-google-style"}
{"code": "def _is_valid_netmask(self, netmask):\n        \n        mask = netmask.split('.')\n        if len(mask) == 4:\n            try:\n                for x in mask:\n                    if int(x) not in self._valid_mask_octets:\n                        return False\n            except ValueError:\n                \n                return False\n            for idx, y in enumerate(mask):\n                if idx > 0 and y > mask[idx - 1]:\n                    return False\n            return True\n        try:\n            netmask = int(netmask)\n        except ValueError:\n            return False\n        return 0 <= netmask <= self._max_prefixlen", "docstring": "Verify that the netmask is valid.\n\nArgs:\nnetmask: A string, either a prefix or dotted decimal\nnetmask.\n\nReturns:\nA boolean, True if the prefix represents a valid IPv4\nnetmask.", "source": "juraj-google-style"}
{"code": "def _cardinality_test_combinations():\n\n    def _reduce_cases_to_combinations(result, case):\n        name, dataset_fn, sharding_policy, expected_result = case\n        return result + combinations.combine(dataset_fn=combinations.NamedObject(name, dataset_fn), sharding_policy=sharding_policy, expected_result=expected_result)\n\n    def _cases_to_combinations(cases):\n        return functools.reduce(_reduce_cases_to_combinations, cases, [])\n\n    def _infinite_dataset_with_hint_shard():\n        return dataset_ops.Dataset.range(10).shard(distribute.SHARD_HINT, distribute.SHARD_HINT).repeat()\n\n    def _empty_dataset_with_hint_shard():\n        return dataset_ops.Dataset.range(0).shard(distribute.SHARD_HINT, distribute.SHARD_HINT)\n    v2_only_cases = [('NoShardingInfinite', lambda: dataset_ops.Dataset.range(10).repeat(), data_service_ops.ShardingPolicy.OFF, dataset_ops.INFINITE), ('DynamicShardingInfinite', lambda: dataset_ops.Dataset.range(5).repeat(), data_service_ops.ShardingPolicy.DYNAMIC, dataset_ops.INFINITE), ('DataShardingInfinite', lambda: dataset_ops.Dataset.range(10).repeat(), data_service_ops.ShardingPolicy.DATA, dataset_ops.INFINITE), ('NoShardingZero', lambda: dataset_ops.Dataset.range(0), data_service_ops.ShardingPolicy.OFF, 0), ('DynamicShardingZero', lambda: dataset_ops.Dataset.range(0), data_service_ops.ShardingPolicy.DYNAMIC, 0), ('DataShardingZero', lambda: dataset_ops.Dataset.range(0), data_service_ops.ShardingPolicy.DATA, 0), ('FileOrDataShardingZero', lambda: dataset_ops.Dataset.range(0), data_service_ops.ShardingPolicy.FILE_OR_DATA, 0), ('HintShardingZero', _empty_dataset_with_hint_shard, data_service_ops.ShardingPolicy.HINT, dataset_ops.UNKNOWN)]\n    v1_and_v2_cases = [('Finite', lambda: dataset_ops.Dataset.range(10), data_service_ops.ShardingPolicy.OFF, dataset_ops.UNKNOWN), ('FileOrDataShardingUnknown', lambda: dataset_ops.Dataset.range(10).repeat(), data_service_ops.ShardingPolicy.FILE_OR_DATA, dataset_ops.UNKNOWN), ('HintShardingUnknown', _infinite_dataset_with_hint_shard, data_service_ops.ShardingPolicy.HINT, dataset_ops.UNKNOWN)]\n    v2_only_combinations = combinations.times(combinations.combine(tf_api_version=2, mode=['eager', 'graph']), _cases_to_combinations(v2_only_cases))\n    v1_and_v2_combinations = combinations.times(combinations.combine(tf_api_version=[1, 2], mode=['eager', 'graph']), _cases_to_combinations(v1_and_v2_cases))\n    return v2_only_combinations + v1_and_v2_combinations", "docstring": "Generate test combinations for data service cardinality tests.\n\nWe test only V2 combinations for the infinite and 0 cases because the `map`\ntransformation for compression makes the cardinality unknown in TF1.\n\nReturns:\ntest combinations.", "source": "github-repos"}
{"code": "def segs_safe(self, word):\n        \n        segs = []\n        while word:\n            m = self.seg_regex.match(word)\n            if m:\n                segs.append(m.group(1))\n                word = word[len(m.group(1)):]\n            else:\n                segs.append(word[0])\n                word = word[1:]\n        return segs", "docstring": "Return a list of segments (as strings) from a word\n\nCharacters that are not valid segments are included in the list as\nindividual characters.\n\nArgs:\nword (unicode): word as an IPA string\n\nReturns:\nlist: list of Unicode IPA strings corresponding to segments in\n`word`", "source": "juraj-google-style"}
{"code": "def generate(organization, package, destination):\n    \n    gen = ResourceGenerator(organization, package)\n\n    tmp = tempfile.NamedTemporaryFile(mode='w+t', delete=False)\n    try:\n        tmp.write(gen.conf())\n    finally:\n        tmp.close()\n\n    shutil.copy(tmp.name, os.path.join(destination, 'conf.py'))\n\n    tmp = tempfile.NamedTemporaryFile(mode='w+t', delete=False)\n    try:\n        tmp.write(gen.makefile())\n    finally:\n        tmp.close()\n\n    shutil.copy(tmp.name, os.path.join(destination, 'Makefile'))", "docstring": "Generates the Sphinx configuration and Makefile.\n\nArgs:\norganization (str): the organization name.\npackage (str): the package to be documented.\ndestination (str): the destination directory.", "source": "juraj-google-style"}
{"code": "def _classify_segment(self, address, length):\n    end_address = ((address + length) - 1)\n    (_, start_seg) = self._find_address(address)\n    (_, end_seg) = self._find_address(end_address)\n    if ((start_seg is not None) or (end_seg is not None)):\n        raise ArgumentError('Overlapping segments are not yet supported', address=address, length=length)\n    return DisjointSegment()", "docstring": "Determine how a new data segment fits into our existing world\n\nParams:\naddress (int): The address we wish to classify\nlength (int): The length of the segment\n\nReturns:\nint: One of SparseMemoryMap.prepended", "source": "codesearchnet"}
{"code": "def abort(self, abort_message=''):\n    \n    \n    \n    if (self.async and self._root_pipeline_key == self._pipeline_key and\n        not self.try_cancel()):\n      \n      \n      return False\n    else:\n      return self._context.begin_abort(\n          self._root_pipeline_key, abort_message=abort_message)", "docstring": "Mark the entire pipeline up to the root as aborted.\n\nNote this should only be called from *outside* the context of a running\npipeline. Synchronous and generator pipelines should raise the 'Abort'\nexception to cause this behavior during execution.\n\nArgs:\nabort_message: Optional message explaining why the abort happened.\n\nReturns:\nTrue if the abort signal was sent successfully; False if the pipeline\ncould not be aborted for any reason.", "source": "juraj-google-style"}
{"code": "def __init__(self, dims):\n        \n        \n        if dims is None:\n            self._dims = None\n        elif isinstance(dims, compat.bytes_or_text_types):\n            raise TypeError(\n                \"A string has ambiguous TensorShape, please wrap in a \"\n                \"list or convert to an int: %s\" % dims\n            )\n        elif isinstance(dims, tensor_shape_pb2.TensorShapeProto):\n            if dims.unknown_rank:\n                self._dims = None\n            else:\n                self._dims = [\n                    \n                    as_dimension(dim.size if dim.size != -1 else None)\n                    for dim in dims.dim\n                ]\n        elif isinstance(dims, TensorShape):\n            self._dims = dims.dims\n        else:\n            try:\n                dims_iter = iter(dims)\n            except TypeError:\n                \n                self._dims = [as_dimension(dims)]\n            else:\n                \n                self._dims = [as_dimension(d) for d in dims_iter]\n        self._ndims = None", "docstring": "Creates a new TensorShape with the given dimensions.\n\nArgs:\ndims: A list of Dimensions, or None if the shape is unspecified.\nDEPRECATED: A single integer is treated as a singleton list.\n\nRaises:\nTypeError: If dims cannot be converted to a list of dimensions.", "source": "juraj-google-style"}
{"code": "def Update(self, other, callback):\n    self.conditions.update(other.conditions)\n    self._Register(other.conditions, callback)", "docstring": "Adds existing triggers to this set, optionally rebuilding the registry.\n\nUsed to aggregate trigger methods from Probes to Methods to Checks.\n\nArgs:\nother: Another Triggers object.\ncallback: Registers all the updated triggers to the specified function.", "source": "codesearchnet"}
{"code": "def _crop(image, offset_height, offset_width, crop_height, crop_width):\n    original_shape = tf.shape(image)\n    rank_assertion = tf.Assert(tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.'])\n    with tf.control_dependencies([rank_assertion]):\n        cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])\n    size_assertion = tf.Assert(tf.logical_and(tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.'])\n    offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))\n    with tf.control_dependencies([size_assertion]):\n        image = tf.slice(image, offsets, cropped_shape)\n    return tf.reshape(image, cropped_shape)", "docstring": "Crops the given image using the provided offsets and sizes.\n\nNote that the method doesn't assume we know the input image size but it does\nassume we know the input image rank.\n\nArgs:\nimage: `Tensor` image of shape [height, width, channels].\noffset_height: `Tensor` indicating the height offset.\noffset_width: `Tensor` indicating the width offset.\ncrop_height: the height of the cropped image.\ncrop_width: the width of the cropped image.\n\nReturns:\nthe cropped (and resized) image.\n\nRaises:\nInvalidArgumentError: if the rank is not 3 or if the image dimensions are\nless than the crop size.", "source": "codesearchnet"}
{"code": "def on_skip(self, record):", "docstring": "A function that is executed upon a test being skipped.\n\nImplementation is optional.\n\nArgs:\nrecord: records.TestResultRecord, a copy of the test record for\nthis test, containing all information of the test execution\nincluding exception objects.", "source": "github-repos"}
{"code": "def check(self, dsm, **kwargs):\n    logger.debug(('Entities = %s' % dsm.entities))\n    messages = []\n    code_clean = True\n    threshold = kwargs.pop('threshold', 1)\n    (rows, _) = dsm.size\n    for i in range(0, rows):\n        if (dsm.data[i][0] > threshold):\n            messages.append(('Number of issues (%d) in module %s > threshold (%d)' % (dsm.data[i][0], dsm.entities[i], threshold)))\n            code_clean = False\n    return (code_clean, '\\n'.join(messages))", "docstring": "Check code clean.\n\nArgs:\ndsm (:class:`DesignStructureMatrix`): the DSM to check.\n\nReturns:\nbool, str: True if code clean else False, messages", "source": "codesearchnet"}
{"code": "def _get_sorted_methods(self, methods):\n    \n    if not methods:\n      return methods\n\n    \n    def _sorted_methods_comparison(method_info1, method_info2):\n      \n\n      def _score_path(path):\n        \n        score = 0\n        parts = path.split('/')\n        for part in parts:\n          score <<= 1\n          if not part or part[0] != '{':\n            \n            score += 1\n        \n        \n        \n        score <<= 31 - len(parts)\n        return score\n\n      \n      path_score1 = _score_path(method_info1[1].get('path', ''))\n      path_score2 = _score_path(method_info2[1].get('path', ''))\n      if path_score1 != path_score2:\n        return path_score2 - path_score1\n\n      \n      path_result = cmp(method_info1[1].get('path', ''),\n                        method_info2[1].get('path', ''))\n      if path_result != 0:\n        return path_result\n\n      \n      method_result = cmp(method_info1[1].get('httpMethod', ''),\n                          method_info2[1].get('httpMethod', ''))\n      return method_result\n\n    return sorted(methods.items(), _sorted_methods_comparison)", "docstring": "Get a copy of 'methods' sorted the way they would be on the live server.\n\nArgs:\nmethods: JSON configuration of an API's methods.\n\nReturns:\nThe same configuration with the methods sorted based on what order\nthey'll be checked by the server.", "source": "juraj-google-style"}
{"code": "def clip_to_image_size(bounding_boxes, height=None, width=None, bounding_box_format='xyxy'):\n    box_utils = BoundingBox()\n    if backend_utils.in_tf_graph():\n        box_utils.backend.set_backend('tensorflow')\n    bounding_boxes = box_utils.clip_to_image_size(bounding_boxes, height=height, width=width, bounding_box_format=bounding_box_format)\n    box_utils.backend.reset()\n    return bounding_boxes", "docstring": "Clips bounding boxes to be within the image dimensions.\nArgs:\nbounding_boxes: A dictionary with 'boxes' shape `(N, 4)` or\n`(batch, N, 4)` and 'labels' shape `(N,)` or `(batch, N,)`.\nheight: Image height.\nwidth: Image width.\nbounding_box_format: The format of the input bounding boxes. Defaults to\n`\"xyxy\"`.\n\nReturns:\nClipped bounding boxes.\n\nExample:\n```python\nboxes = {\"boxes\": np.array([[-10, -20, 150, 160], [50, 40, 70, 80]]),\n\"labels\": np.array([0, 1])}\nclipped_boxes = keras.utils.bounding_boxes.clip_to_image_size(\nboxes, height=100, width=120,\n)\n# Output will have boxes clipped to the image boundaries, and labels\n# potentially adjusted if the clipped area becomes zero\n```", "source": "github-repos"}
{"code": "def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name):\n        \n        \n        if type_ is not Any:\n            return \"{}.{}[{}, {}]\".format(\n                cls.__module__,\n                cls.__name__,\n                cls._get_fullname(type_),\n                keyfunc_name,\n            )\n        return \"{}.{}[{}]\".format(cls.__module__, cls.__name__, keyfunc_name)", "docstring": "Return a class representation using the slice parameters.\n\nArgs:\ntype_: The type the class was sliced with.\nbound: The boundaries specified for the values of type_.\nkeyfunc: The comparison function used to check the value\nboundaries.\nkeyfunc_name: The name of keyfunc.\n\nReturns:\nA string representing the class.", "source": "juraj-google-style"}
{"code": "def _add_jump_node(self, ast_node, guards):\n    node = self._add_new_node(ast_node)\n    self.leaves = set()\n    self.finally_sections[node] = guards\n    return node", "docstring": "Grows the graph by adding a jump node.\n\nJump nodes are added to the current leaf set, and the leaf set becomes\nempty. If the jump node is the last in a cond section, then it may be added\nback to the leaf set by a separate mechanism.\n\nArgs:\nast_node: ast.AST\nguards: Tuple[ast.AST, ...], the finally sections active for this node\n\nReturns:\nNode", "source": "github-repos"}
{"code": "def translate_index(index_name):\n    uuid = SEARCH_INDEX_UUIDS.get(index_name.strip().lower())\n    if (not uuid):\n        try:\n            index_info = globus_sdk.SearchClient().get_index(index_name).data\n            if (not isinstance(index_info, dict)):\n                raise ValueError('Multiple UUIDs possible')\n            uuid = index_info.get('id', index_name)\n        except Exception:\n            uuid = index_name\n    return uuid", "docstring": "Translate a known Globus Search index into the index UUID.\nThe UUID is the proper way to access indices, and will eventually be the only way.\nThis method will return names it cannot disambiguate.\n\nArguments:\nindex_name (str): The name of the index.\n\nReturns:\nstr: The UUID of the index. If the index is not known and is not unambiguous,\nthis will be the ``index_name`` unchanged instead.", "source": "codesearchnet"}
{"code": "def guass(self, mu: float, sigma: float) -> float:\n    return float(lib.TCOD_random_get_gaussian_double(self.random_c, mu, sigma))", "docstring": "Return a random number using Gaussian distribution.\n\nArgs:\nmu (float): The median returned value.\nsigma (float): The standard deviation.\n\nReturns:\nfloat: A random float.", "source": "codesearchnet"}
{"code": "def ping(request, timeout=_METADATA_DEFAULT_TIMEOUT, retry_count=3):\n    retries = 0\n    while (retries < retry_count):\n        try:\n            response = request(url=_METADATA_IP_ROOT, method='GET', headers=_METADATA_HEADERS, timeout=timeout)\n            metadata_flavor = response.headers.get(_METADATA_FLAVOR_HEADER)\n            return ((response.status == http_client.OK) and (metadata_flavor == _METADATA_FLAVOR_VALUE))\n        except exceptions.TransportError:\n            _LOGGER.info('Compute Engine Metadata server unavailable onattempt %s of %s', (retries + 1), retry_count)\n            retries += 1\n    return False", "docstring": "Checks to see if the metadata server is available.\n\nArgs:\nrequest (google.auth.transport.Request): A callable used to make\nHTTP requests.\ntimeout (int): How long to wait for the metadata server to respond.\nretry_count (int): How many times to attempt connecting to metadata\nserver using above timeout.\n\nReturns:\nbool: True if the metadata server is reachable, False otherwise.", "source": "codesearchnet"}
{"code": "def _generate_malformed_query(data):\n    if isinstance(data, six.text_type):\n        query_str = data.replace(':', ' ')\n    else:\n        query_str = ' '.join([word.strip(':') for word in data.children])\n    return {'simple_query_string': {'fields': ['_all'], 'query': query_str}}", "docstring": "Generates a query on the ``_all`` field with all the query content.\n\nArgs:\ndata (six.text_type or list): The query in the format of ``six.text_type`` (when used from parsing driver)\nor ``list`` when used from withing the ES visitor.", "source": "codesearchnet"}
{"code": "def _handle_captcha(captcha_data, message=''): \n        \n        from tempfile import NamedTemporaryFile\n        tmpf = NamedTemporaryFile(suffix='.png')\n        tmpf.write(captcha_data)\n        tmpf.flush()\n        captcha_text = input('Please take a look at the captcha image \"%s\" and provide the code:' % tmpf.name)\n        tmpf.close()\n        return captcha_text", "docstring": "Called when a captcha must be solved\nWrites the image to a temporary file and asks the user to enter the code.\n\nArgs:\ncaptcha_data: Bytestring of the PNG captcha image.\nmessage: Optional. A message from Steam service.\n\nReturns:\nA string containing the solved captcha code.", "source": "juraj-google-style"}
{"code": "def info(self, user_id):\n        \n        resp = self._rtm_client.get('v1/user.info?user_id={}'.format(user_id))\n        if resp.is_fail():\n            raise RTMServiceError('Failed to get user information', resp)\n\n        return resp.data['result']", "docstring": "Gets user information by user id\n\nArgs:\nuser_id(int): the id of user\n\nReturns:\nUser\n\nThrows:\nRTMServiceError when request failed", "source": "juraj-google-style"}
{"code": "def __init__(self, options, queue_item):\n        \n\n        self.options = options\n        self.queue_item = queue_item", "docstring": "Construct the HTMLSoupLinkScraper instance.\n\nArgs:\noptions (:class:`nyawc.Options`): The settins/options object.\nqueue_item (:class:`nyawc.QueueItem`): The queue item containing a response the scrape.", "source": "juraj-google-style"}
{"code": "def clean(self, value):\n\t\t\n\n\t\t\n\t\tif value is None and self._optional:\n\t\t\treturn None\n\n\t\t\n\t\tif not isinstance(value, dict):\n\t\t\traise ValueError('value')\n\n\t\t\n\t\treturn {str(self._key.clean(k)):self._node.clean(v) for k,v in iteritems(value)}", "docstring": "Clean\n\nMakes sure both the key and value are properly stored in their correct\nrepresentation\n\nArguments:\nvalue {mixed} -- The value to clean\n\nRaises:\nValueError\n\nReturns:\nmixed", "source": "juraj-google-style"}
{"code": "def calculate_weights(correlation_matrix, min_wt):\n    \n    \n    np.fill_diagonal(correlation_matrix.values, np.nan)\n\n    \n    correlation_matrix = correlation_matrix.clip(lower=0)\n\n    \n    raw_weights = correlation_matrix.mean(axis=1)\n\n    \n    raw_weights = raw_weights.clip(lower=min_wt)\n\n    \n    weights = raw_weights / sum(raw_weights)\n\n    return raw_weights.round(rounding_precision), weights.round(rounding_precision)", "docstring": "Calculate a weight for each profile based on its correlation to other\nreplicates. Negative correlations are clipped to 0, and weights are clipped\nto be min_wt at the least.\n\nArgs:\ncorrelation_matrix (pandas df): Correlations between all replicates\nmin_wt (float): Minimum raw weight when calculating weighted average\n\nReturns:\nraw weights (pandas series):  Mean correlation to other replicates\nweights (pandas series): raw_weights normalized such that they add to 1", "source": "juraj-google-style"}
{"code": "def allow_inbound_connection(self):\n    LOGGER.debug('Determining whether inbound connection should be allowed. num connections: %s max %s', len(self._connections), self._max_incoming_connections)\n    return (self._max_incoming_connections >= len(self._connections))", "docstring": "Determines if an additional incoming network connection\nshould be permitted.\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def shannon_entropy(time_series):\n    \n\n    \n    if not isinstance(time_series, str):\n        time_series = list(time_series)\n\n    \n    data_set = list(set(time_series))\n    freq_list = []\n    for entry in data_set:\n        counter = 0.\n        for i in time_series:\n            if i == entry:\n                counter += 1\n        freq_list.append(float(counter) / len(time_series))\n\n    \n    ent = 0.0\n    for freq in freq_list:\n        ent += freq * np.log2(freq)\n    ent = -ent\n    return ent", "docstring": "Return the Shannon Entropy of the sample data.\n\nArgs:\ntime_series: Vector or string of the sample data\n\nReturns:\nThe Shannon Entropy as float value", "source": "juraj-google-style"}
{"code": "def bytes_to_readable_str(num_bytes, include_b=False):\n    if (num_bytes is None):\n        return str(num_bytes)\n    if (num_bytes < 1024):\n        result = ('%d' % num_bytes)\n    elif (num_bytes < 1048576):\n        result = ('%.2fk' % (num_bytes / float((1 << 10))))\n    elif (num_bytes < 1073741824):\n        result = ('%.2fM' % (num_bytes / float((1 << 20))))\n    else:\n        result = ('%.2fG' % (num_bytes / float((1 << 30))))\n    if include_b:\n        result += 'B'\n    return result", "docstring": "Generate a human-readable string representing number of bytes.\n\nThe units B, kB, MB and GB are used.\n\nArgs:\nnum_bytes: (`int` or None) Number of bytes.\ninclude_b: (`bool`) Include the letter B at the end of the unit.\n\nReturns:\n(`str`) A string representing the number of bytes in a human-readable way,\nincluding a unit at the end.", "source": "codesearchnet"}
{"code": "def write_temp_bird_conf(dummy_ip_prefix, config_file, variable_name, prefixes):\n    log = logging.getLogger(PROGRAM_NAME)\n    comment = '\n    tm_file = os.path.join(os.path.dirname(config_file), str(time.time()))\n    log.debug('going to write to %s', tm_file)\n    try:\n        with open(tm_file, 'w') as tmpf:\n            tmpf.write('\n            tmpf.write('{c}\\n'.format(c=comment))\n            tmpf.write('define {n} =\\n'.format(n=variable_name))\n            tmpf.write('{s}[\\n'.format(s=(4 * ' ')))\n            tmpf.write(',\\n'.join([((' ' * 8) + n) for n in prefixes]))\n            tmpf.write('\\n{s}];\\n'.format(s=(4 * ' ')))\n    except OSError as error:\n        log.critical('failed to write temporary file %s: %s. This is a FATAL error, this exiting main program', tm_file, error)\n        sys.exit(1)\n    else:\n        return tm_file", "docstring": "Write in a temporary file the list of IP-Prefixes.\n\nA failure to create and write the temporary file will exit main program.\n\nArguments:\ndummy_ip_prefix (str): The dummy IP prefix, which must be always\nconfig_file (str): The file name of bird configuration\nvariable_name (str): The name of the variable set in bird configuration\nprefixes (list): The list of IP-Prefixes to write\n\nReturns:\nThe filename of the temporary file", "source": "codesearchnet"}
{"code": "def _resolve_attribute_match(self, match):\n        \n        if match.group(1) == 'cluster':\n            return str(self.cluster_id)\n\n        return self.get(match.group(1), match.group(0))", "docstring": "Replaces a reference to an attribute with the value of the attribute.\n\nArgs:\nmatch (re.match object): A match object containing a match to a reference to an attribute.", "source": "juraj-google-style"}
{"code": "def render_secrets(config_path, secret_path):\n    with open(secret_path, 'r') as s_fh:\n        secret_ini = anyconfig.load(s_fh, ac_parser='ini')\n    with open(config_path, 'r') as c_fh:\n        raw_cfg = c_fh.read()\n    rendered_cfg = anytemplate.renders(raw_cfg, secret_ini, at_engine='jinja2')\n    p_config = ProsperConfig(config_path)\n    local_config = configparser.ConfigParser()\n    local_config.optionxform = str\n    local_config.read_string(rendered_cfg)\n    p_config.local_config = local_config\n    return p_config", "docstring": "combine a jinja template with a secret .ini file\n\nArgs:\nconfig_path (str): path to .cfg file with jinja templating\nsecret_path (str): path to .ini-like secrets file\n\nReturns:\nProsperConfig: rendered configuration object", "source": "codesearchnet"}
{"code": "def flatten(dictionary, separator='.', prefix=''):\n    \n    new_dict = {}\n    for key, value in dictionary.items():\n        new_key = prefix + separator + key if prefix else key\n        if isinstance(value, collections.MutableMapping):\n            new_dict.update(flatten(value, separator, new_key))\n\n        elif isinstance(value, list):\n            new_value = []\n            for item in value:\n                if isinstance(item, collections.MutableMapping):\n                    new_value.append(flatten(item, separator, new_key))\n                else:\n                    new_value.append(item)\n            new_dict[new_key] = new_value\n\n        else:\n            new_dict[new_key] = value\n\n    return new_dict", "docstring": "Flatten the dictionary keys are separated by separator\n\nArguments:\ndictionary {dict} -- The dictionary to be flattened.\n\nKeyword Arguments:\nseparator {str} -- The separator to use (default is '.'). It will\ncrush items with key conflicts.\nprefix {str} -- Used for recursive calls.\n\nReturns:\ndict -- The flattened dictionary.", "source": "juraj-google-style"}
{"code": "def get_mpkg_ids(mpkg):\n    \n    mpkg = _quote(mpkg)\n    package_infos = []\n    base_path = os.path.dirname(mpkg)\n\n    \n    cmd = 'find {0} -name *.pkg'.format(base_path)\n    out = __salt__['cmd.run'](cmd, python_shell=True)\n\n    pkg_files = out.split('\\n')\n    for p in pkg_files:\n        package_infos.extend(get_pkg_id(p))\n\n    return package_infos", "docstring": "Attempt to get the package IDs from a mounted .mpkg file\n\nArgs:\nmpkg (str): The location of the mounted mpkg file\n\nReturns:\nlist: List of package IDs\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' macpackage.get_mpkg_ids /dev/disk2", "source": "juraj-google-style"}
{"code": "def _join_lines(lines):\n    if not lines:\n        return None\n    started = False\n    group_texts = []\n    group_lines = []\n    for line in lines:\n        stripped_line = line.strip()\n        if stripped_line:\n            started = True\n            group_lines.append(stripped_line)\n        elif started:\n            group_text = ' '.join(group_lines)\n            group_texts.append(group_text)\n            group_lines = []\n    if group_lines:\n        group_text = ' '.join(group_lines)\n        group_texts.append(group_text)\n    return '\\n\\n'.join(group_texts)", "docstring": "Joins lines with the appropriate connective whitespace.\n\nThis puts a single space between consecutive lines, unless there's a blank\nline, in which case a full blank line is included.\n\nArgs:\nlines: A list of lines to join.\nReturns:\nA string, the lines joined together.", "source": "github-repos"}
{"code": "def _makedirs(self, path):\n        \n        try:\n            oldmask = os.umask(0)\n            os.makedirs(path, self._conf['dmode'])\n            os.umask(oldmask)\n        except OSError as e:\n            if(e.errno == errno.EACCES):\n                raise Exception('not sufficent permissions to write on fsdb folder: \"{0}\"'.format(path))\n            elif(e.errno == errno.EEXIST):\n                fstat = os.stat(path)\n                if not stat.S_ISDIR(fstat.st_mode):\n                    raise Exception('fsdb folder already exists but it is not a regular folder: \"{0}\"'.format(path))\n                elif not os.access(path, os.R_OK and os.W_OK):\n                    raise Exception('not sufficent permissions to write on fsdb folder: \"{0}\"'.format(path))\n            else:\n                raise e", "docstring": "Make folders recursively for the given path and\ncheck read and write permission on the path\nArgs:\npath -- path to the leaf folder", "source": "juraj-google-style"}
{"code": "def plot_job_history(jobs, interval='year'):\n\n    def get_date(job):\n        'Returns a datetime object from a IBMQJob instance.\\n\\n        Args:\\n            job (IBMQJob): A job.\\n\\n        Returns:\\n            dt: A datetime object.\\n        '\n        return datetime.datetime.strptime(job.creation_date(), '%Y-%m-%dT%H:%M:%S.%fZ')\n    current_time = datetime.datetime.now()\n    if (interval == 'year'):\n        bins = [(current_time - datetime.timedelta(days=((k * 365) / 12))) for k in range(12)]\n    elif (interval == 'month'):\n        bins = [(current_time - datetime.timedelta(days=k)) for k in range(30)]\n    elif (interval == 'week'):\n        bins = [(current_time - datetime.timedelta(days=k)) for k in range(7)]\n    binned_jobs = ([0] * len(bins))\n    if (interval == 'year'):\n        for job in jobs:\n            for (ind, dat) in enumerate(bins):\n                date = get_date(job)\n                if (date.month == dat.month):\n                    binned_jobs[ind] += 1\n                    break\n            else:\n                continue\n    else:\n        for job in jobs:\n            for (ind, dat) in enumerate(bins):\n                date = get_date(job)\n                if ((date.day == dat.day) and (date.month == dat.month)):\n                    binned_jobs[ind] += 1\n                    break\n            else:\n                continue\n    nz_bins = []\n    nz_idx = []\n    for (ind, val) in enumerate(binned_jobs):\n        if (val != 0):\n            nz_idx.append(ind)\n            nz_bins.append(val)\n    total_jobs = sum(binned_jobs)\n    colors = ['\n    if (interval == 'year'):\n        labels = ['{}-{}'.format(str(bins[b].year)[2:], bins[b].month) for b in nz_idx]\n    else:\n        labels = ['{}-{}'.format(bins[b].month, bins[b].day) for b in nz_idx]\n    (fig, ax) = plt.subplots(1, 1, figsize=(5, 5))\n    ax.pie(nz_bins[::(- 1)], labels=labels, colors=colors, textprops={'fontsize': 14}, rotatelabels=True, counterclock=False)\n    ax.add_artist(Circle((0, 0), 0.7, color='white', zorder=1))\n    ax.text(0, 0, total_jobs, horizontalalignment='center', verticalalignment='center', fontsize=26)\n    fig.tight_layout()\n    return fig", "docstring": "Plots the job history of the user from the given list of jobs.\n\nArgs:\njobs (list): A list of jobs with type IBMQjob.\ninterval (str): Interval over which to examine.\n\nReturns:\nfig: A Matplotlib figure instance.", "source": "codesearchnet"}
{"code": "def edges(self, tail_head_iter):\n        \n        edge = self._edge_plain\n        quote = self._quote_edge\n        lines = (edge % (quote(t), quote(h)) for t, h in tail_head_iter)\n        self.body.extend(lines)", "docstring": "Create a bunch of edges.\n\nArgs:\ntail_head_iter: Iterable of ``(tail_name, head_name)`` pairs.", "source": "juraj-google-style"}
{"code": "def _get_node_attribute_at_index(self, node_index, attr, attr_name):\n    if not self._inbound_nodes:\n        raise RuntimeError('The layer has never been called and thus has no defined ' + attr_name + '.')\n    if not len(self._inbound_nodes) > node_index:\n        raise ValueError('Asked to get ' + attr_name + ' at node ' + str(node_index) + ', but the layer has only ' + str(len(self._inbound_nodes)) + ' inbound nodes.')\n    values = getattr(self._inbound_nodes[node_index], attr)\n    if isinstance(values, list) and len(values) == 1:\n        return values[0]\n    else:\n        return values", "docstring": "Private utility to retrieves an attribute (e.g. inputs) from a node.\n\nThis is used to implement the methods:\n- get_input_shape_at\n- get_output_shape_at\n- get_input_at\netc...\n\nArgs:\nnode_index: Integer index of the node from which\nto retrieve the attribute.\nattr: Exact node attribute name.\nattr_name: Human-readable attribute name, for error messages.\n\nReturns:\nThe layer's attribute `attr` at the node of index `node_index`.\n\nRaises:\nRuntimeError: If the layer has no inbound nodes, or if called in Eager\nmode.\nValueError: If the index provided does not match any node.", "source": "github-repos"}
{"code": "def add_paths_argument(cls, group, argname, dest=None, help_=None):\n    prefixed = ('%s-%s' % (cls.argument_prefix, argname))\n    if (dest is None):\n        dest = prefixed.replace('-', '_')\n        final_dest = dest[(len(cls.argument_prefix) + 1):]\n    else:\n        final_dest = dest\n        dest = ('%s_%s' % (cls.argument_prefix, dest))\n    group.add_argument(('--%s' % prefixed), action='store', nargs='+', dest=dest, help=help_)\n    cls.paths_arguments[dest] = final_dest", "docstring": "Subclasses may call this to expose a paths argument.\n\nArgs:\ngroup: arparse.ArgumentGroup, the extension argument group\nargname: str, the name of the argument, will be namespaced.\ndest: str, similar to the `dest` argument of\n`argparse.ArgumentParser.add_argument`, will be namespaced.\nhelp_: str, similar to the `help` argument of\n`argparse.ArgumentParser.add_argument`.", "source": "codesearchnet"}
{"code": "def prepare_words_list(wanted_words):\n    return [SILENCE_LABEL, UNKNOWN_WORD_LABEL] + wanted_words", "docstring": "Prepends common tokens to the custom word list.\n\nArgs:\nwanted_words: List of strings containing the custom words.\n\nReturns:\nList with the standard silence and unknown tokens added.", "source": "github-repos"}
{"code": "def assignees(self, assignee=None, resource_id=None):\n    if (resource_id is not None):\n        self.resource_id(resource_id)\n    self._request_uri = '{}/assignees'.format(self._request_uri)\n    if (assignee is not None):\n        self._request_uri = '{}/{}'.format(self._request_uri, assignee)", "docstring": "Add an assignee to a Task\n\nGET: /v2/tasks/{uniqueId}/assignees\nGET: /v2/tasks/{uniqueId}/assignees/{assigneeId}\nPOST: /v2/tasks/{uniqueId}/assignees/{assigneeId}\nDELETE: /v2/tasks/{uniqueId}/assignees/{assigneeId}\n\nArgs:\nassignee (Optional [string]): The assignee name.\nresource_id (Optional [string]): The task ID.", "source": "codesearchnet"}
{"code": "def __init__(\n      self, name, data_type_definition, aliases=None, data_type=None,\n      description=None, urls=None):\n    \n    super(StringDefinition, self).__init__(\n        name, data_type_definition, aliases=aliases, data_type=data_type,\n        description=description, urls=urls)\n    self.encoding = 'ascii'", "docstring": "Initializes a string data type definition.\n\nArgs:\nname (str): name.\ndata_type_definition (DataTypeDefinition): string element data type\ndefinition.\naliases (Optional[list[str]]): aliases.\ndata_type (Optional[str]): name of the string element data type.\ndescription (Optional[str]): description.\nurls (Optional[list[str]]): URLs.", "source": "juraj-google-style"}
{"code": "def get_root_dir_with_all_resources():\n    script_dir = get_data_files_path()\n    directories = [script_dir]\n    data_files_dir = ''\n    while True:\n        candidate_dir = directories[-1]\n        current_directory = _os.path.basename(candidate_dir)\n        if '.runfiles' in current_directory:\n            if len(directories) > 1:\n                data_files_dir = directories[-2]\n            break\n        else:\n            new_candidate_dir = _os.path.dirname(candidate_dir)\n            if new_candidate_dir == candidate_dir:\n                break\n            else:\n                directories.append(new_candidate_dir)\n    return data_files_dir or script_dir", "docstring": "Get a root directory containing all the data attributes in the build rule.\n\nReturns:\nThe path to the specified file present in the data attribute of py_test\nor py_binary. Falls back to returning the same as get_data_files_path if it\nfails to detect a bazel runfiles directory.", "source": "github-repos"}
{"code": "def _send(self, line):\n    if (not line.endswith('\\r\\n')):\n        if line.endswith('\\n'):\n            logger.debug('Fixing bare LF before sending data to socket')\n            line = (line[0:(- 1)] + '\\r\\n')\n        else:\n            logger.debug('Fixing missing CRLF before sending data to socket')\n            line = (line + '\\r\\n')\n    logger.debug(('Client sent: ' + line.rstrip()))\n    self._socket.send(line)", "docstring": "Write a line of data to the server.\n\nArgs:\nline -- A single line of data to write to the socket.", "source": "codesearchnet"}
{"code": "def filter_embeddings(embeddings, vocab, dim):\n    if (not isinstance(embeddings, dict)):\n        return\n    _embeddings = np.zeros([len(vocab), dim])\n    for word in vocab:\n        if (word in embeddings):\n            word_idx = vocab[word]\n            _embeddings[word_idx] = embeddings[word]\n    return _embeddings", "docstring": "Loads word vectors in numpy array.\n\nArgs:\nembeddings (dict): a dictionary of numpy array.\nvocab (dict): word_index lookup table.\n\nReturns:\nnumpy array: an array of word embeddings.", "source": "codesearchnet"}
{"code": "def run_feature_selection(self, df_data, target, idx=0, **kwargs):\n        \n        list_features = list(df_data.columns.values)\n        list_features.remove(target)\n        df_target = pd.DataFrame(df_data[target], columns=[target])\n        df_features = df_data[list_features]\n\n        return self.predict_features(df_features, df_target, idx=idx, **kwargs)", "docstring": "Run feature selection for one node: wrapper around\n``self.predict_features``.\n\nArgs:\ndf_data (pandas.DataFrame): All the observational data\ntarget (str): Name of the target variable\nidx (int): (optional) For printing purposes\n\nReturns:\nlist: scores of each feature relatively to the target", "source": "juraj-google-style"}
{"code": "def push_doc(self, document):\n    msg = self._protocol.create('PUSH-DOC', document)\n    reply = self._send_message_wait_for_reply(msg)\n    if (reply is None):\n        raise RuntimeError('Connection to server was lost')\n    elif (reply.header['msgtype'] == 'ERROR'):\n        raise RuntimeError(('Failed to push document: ' + reply.content['text']))\n    else:\n        return reply", "docstring": "Push a document to the server, overwriting any existing server-side doc.\n\nArgs:\ndocument : (Document)\nA Document to push to the server\n\nReturns:\nThe server reply", "source": "codesearchnet"}
{"code": "def _postprocess_non_flat_outputs(outputs: Any, need_spmd_partitioning: bool) -> Tuple[List[Optional[core_types.Tensor]], List[ops.Operation], List[Any]]:\n    flat_outputs = nest.flatten(outputs, expand_composites=True)\n    for i, o in enumerate(flat_outputs):\n        if o is None:\n            flat_outputs[i] = None\n            continue\n        if isinstance(o, ops.Operation):\n            raise ValueError(f'tpu.rewrite does not support Operation as return value in non-flat output structure. You can set returned Operations as control dependencies of returned Tensors so Operations are triggered when Tensors are evaluated. Operation found: \"{o.name}\"')\n        try:\n            o = ops.convert_to_tensor(o)\n        except Exception as e:\n            raise ValueError(f'TPU function return values must all either be Operations or convertible to Tensors. Got error: \"{e}\"')\n        if need_spmd_partitioning:\n            o = array_ops.identity(o)\n            o.op._set_attr('_tpu_output_identity', attr_value_pb2.AttrValue(b=True))\n            flat_outputs[i] = array_ops.identity(o)\n        else:\n            with ops.device(o.device if o.device else core(0)):\n                o = array_ops.identity(o)\n                o.op._set_attr('_tpu_output_identity', attr_value_pb2.AttrValue(b=True))\n                flat_outputs[i] = array_ops.identity(o)\n    return (flat_outputs, [], outputs)", "docstring": "Validates non-flat outputs, add backs device assignments and other attrs.\n\nArgs:\noutputs: Output from `computation` inside `tpu.rewrite`.\nneed_spmd_partitioning: Whether XLA SPMD partitioning is needed.\n\nReturns:\n- Tensors extracted from outputs.\n- An empty Operations list because Operations are not allowed in non-flat\noutputs.\n- A pack template for use with nest.pack_sequence_as to pack the tensors.", "source": "github-repos"}
{"code": "def pull_doc(self, document):\n    msg = self._protocol.create('PULL-DOC-REQ')\n    reply = self._send_message_wait_for_reply(msg)\n    if (reply is None):\n        raise RuntimeError('Connection to server was lost')\n    elif (reply.header['msgtype'] == 'ERROR'):\n        raise RuntimeError(('Failed to pull document: ' + reply.content['text']))\n    else:\n        reply.push_to_document(document)", "docstring": "Pull a document from the server, overwriting the passed-in document\n\nArgs:\ndocument : (Document)\nThe document to overwrite with server content.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def reorder(\n            miz_file_path: typing.Union[str, Path],\n            target_dir: typing.Union[str, Path],\n            skip_options_file: bool,\n    ):\n        \n\n        miz_file_path = elib.path.ensure_file(miz_file_path)\n        target_dir_path = elib.path.ensure_dir(target_dir, must_exist=False)\n\n        LOGGER.debug('re-ordering miz file: %s', miz_file_path)\n        LOGGER.debug('destination folder: %s', target_dir)\n        LOGGER.debug('%s option file', \"skipping\" if skip_options_file else \"including\")\n\n        if not target_dir_path.exists():\n            LOGGER.debug('creating directory %s', target_dir_path)\n            target_dir_path.mkdir(exist_ok=True)\n\n        with Miz(miz_file_path, overwrite=True) as miz_:\n\n            def mirror_dir(src: Path, dst: Path):\n                \n                LOGGER.debug('mirroring: %s -> %s', src, dst)\n\n                LOGGER.debug('comparing directories')\n                diff_ = dircmp(str(src), str(dst), ignore)\n\n                diff_list = diff_.left_only + diff_.diff_files\n                LOGGER.debug('differences: %s', diff_list)\n\n                for __diff in diff_list:\n                    source = Path(diff_.left, __diff)\n                    target = Path(diff_.right, __diff)\n                    LOGGER.debug('looking at: %s', __diff)\n                    if source.is_dir():\n                        LOGGER.debug('isdir: %s', __diff)\n                        if not target.exists():\n                            LOGGER.debug('creating: %s', __diff)\n                            target.mkdir()\n                        mirror_dir(source, target)\n                    else:\n                        LOGGER.debug('copying: %s', __diff)\n                        shutil.copy2(str(source), diff_.right)\n                for sub in diff_.subdirs.values():\n\n                    mirror_dir(Path(sub.left), Path(sub.right))\n\n            \n            miz_._encode()\n\n            if skip_options_file:\n                ignore = ['options']\n            else:\n                ignore = []\n\n            mirror_dir(Path(miz_.temp_dir), target_dir_path)", "docstring": "Re-orders a miz file into a folder (flattened)\n\nArgs:\nmiz_file_path: source miz file\ntarget_dir: folder to flatten the content into\nskip_options_file: do not re-order option file", "source": "juraj-google-style"}
{"code": "def remove_time_limit_wrapper(env):\n    if isinstance(env, gym.wrappers.TimeLimit):\n        env = env.env\n    env_ = env\n    while isinstance(env_, gym.Wrapper):\n        if isinstance(env_, gym.wrappers.TimeLimit):\n            raise ValueError('Can remove only top-level TimeLimit gym.Wrapper.')\n        env_ = env_.env\n    return env", "docstring": "Removes top level TimeLimit Wrapper.\n\nRemoves TimeLimit Wrapper from top level if exists, throws error if any other\nTimeLimit Wrapper is present in stack.\n\nArgs:\nenv: environment\n\nReturns:\nthe env with removed time limit wrapper.", "source": "codesearchnet"}
{"code": "def format_config(sensor_graph):\n    cmdfile = CommandFile('Config Variables', '1.0')\n    for slot in sorted(sensor_graph.config_database, key=(lambda x: x.encode())):\n        for (conf_var, conf_def) in sorted(sensor_graph.config_database[slot].items()):\n            (conf_type, conf_val) = conf_def\n            if (conf_type == 'binary'):\n                conf_val = ('hex:' + hexlify(conf_val))\n            cmdfile.add('set_variable', slot, conf_var, conf_type, conf_val)\n    return cmdfile.dump()", "docstring": "Extract the config variables from this sensor graph in ASCII format.\n\nArgs:\nsensor_graph (SensorGraph): the sensor graph that we want to format\n\nReturns:\nstr: The ascii output lines concatenated as a single string", "source": "codesearchnet"}
{"code": "def add_keyed(self, value, key, date=None, return_value=False):\n        \n        return self.add(value, date, return_value, key)", "docstring": "Add keyed metrics data to collection.\n\nArgs:\nvalue (str): The value of the metric.\nkey (str): The key value for keyed metrics.\ndate (str, optional): The optional date of the metric.\nreturn_value (bool, default:False): Tell the API to return the updates metric value.\n\nReturn:\ndict: If return_value is True a dict with the current value for the time period\nis returned.", "source": "juraj-google-style"}
{"code": "def _GetAttributeNames(self, data_type_definition):\n    \n    if not data_type_definition:\n      raise errors.FormatError('Missing data type definition')\n\n    attribute_names = []\n    for member_definition in data_type_definition.members:\n      attribute_names.append(member_definition.name)\n\n    return attribute_names", "docstring": "Determines the attribute (or field) names of the members.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\n\nReturns:\nlist[str]: attribute names.\n\nRaises:\nFormatError: if the attribute names cannot be determined from the data\ntype definition.", "source": "juraj-google-style"}
{"code": "def applyFeatures(self, new_features, conflict='error', missing='error'):\n    OPTIONS = ['error', 'ignore', 'me', 'other']\n    assert (missing in OPTIONS), 'Invalid value in `missing`.'\n    assert (conflict in OPTIONS), 'Invalid value in `missing`.'\n    self0 = self.clone()\n    if isinstance(new_features, Features):\n        new_features = new_features.features\n    for f in new_features:\n        self0.addFeature(f, conflict=conflict, missing=missing)\n    self.props = self0.props\n    return self", "docstring": "Apply the constrain of the features passed to this instance.\n\n.. warning::\nFeature instances are only considered, that is, SoftFeatures will be\nnot considered.\n\nArgs:\n\n- new_features(Features): features to apply\n- conflict(str): if a property hasn't compatible values/constrains, do:\n- ``\"error\"``: raise exception.\n- ``\"ignore\"``: nothing.\n- ``\"me\"``: preserve the original value.\n- ``\"other\"``: set like the passed feature.\n- missing(str): if a property is missing in some side, do:\n- ``\"error\"``: raise exception.\n- ``\"ignore\"``: nothing.\n- ``\"me\"``: preserve the original value.\n- ``\"other\"``: set like the passed feature.", "source": "codesearchnet"}
{"code": "def quad_genz_keister_18(order):\n    \n    order = sorted(GENZ_KEISTER_18.keys())[order]\n\n    abscissas, weights = GENZ_KEISTER_18[order]\n    abscissas = numpy.array(abscissas)\n    weights = numpy.array(weights)\n\n    weights /= numpy.sum(weights)\n    abscissas *= numpy.sqrt(2)\n\n    return abscissas, weights", "docstring": "Hermite Genz-Keister 18 rule.\n\nArgs:\norder (int):\nThe quadrature order. Must be in the interval (0, 8).\n\nReturns:\n(:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]):\nAbscissas and weights\n\nExamples:\n>>> abscissas, weights = quad_genz_keister_18(1)\n>>> print(numpy.around(abscissas, 4))\n[-1.7321  0.      1.7321]\n>>> print(numpy.around(weights, 4))\n[0.1667 0.6667 0.1667]", "source": "juraj-google-style"}
{"code": "def _parse_config(self, requires_cfg=True):\n    if (len(self.config_paths) > 0):\n        try:\n            self._find_config()\n        except BisonError:\n            if (not requires_cfg):\n                return\n            raise\n        try:\n            with open(self.config_file, 'r') as f:\n                parsed = self._fmt_to_parser[self.config_format](f)\n        except Exception as e:\n            raise BisonError('Failed to parse config file: {}'.format(self.config_file)) from e\n        self._full_config = None\n        self._config = parsed", "docstring": "Parse the configuration file, if one is configured, and add it to\nthe `Bison` state.\n\nArgs:\nrequires_cfg (bool): Specify whether or not parsing should fail\nif a config file is not found. (default: True)", "source": "codesearchnet"}
{"code": "def _is_default_hook(default_hook, hook):\n    if (not hasattr(default_hook, '__call__')):\n        raise TypeError('Default hooks for ndb.model.Model must be callable')\n    if (not hasattr(hook, '__call__')):\n        raise TypeError('Hooks must be callable')\n    return (default_hook.im_func is hook.im_func)", "docstring": "Checks whether a specific hook is in its default state.\n\nArgs:\ncls: A ndb.model.Model class.\ndefault_hook: Callable specified by ndb internally (do not override).\nhook: The hook defined by a model class using _post_*_hook.\n\nRaises:\nTypeError if either the default hook or the tested hook are not callable.", "source": "codesearchnet"}
{"code": "def CreateDefaultPartition(client, ad_group_id):\n    ad_group_criterion_service = client.GetService('AdGroupCriterionService', version='v201809')\n    operations = [{'operator': 'ADD', 'operand': {'xsi_type': 'BiddableAdGroupCriterion', 'adGroupId': ad_group_id, 'criterion': {'xsi_type': 'ProductPartition', 'partitionType': 'UNIT'}, 'biddingStrategyConfiguration': {'bids': [{'xsi_type': 'CpcBid', 'bid': {'microAmount': 500000}}]}}}]\n    ad_group_criterion = ad_group_criterion_service.mutate(operations)['value'][0]\n    print(('Ad group criterion with ID \"%d\" in ad group with ID \"%d\" was added.' % (ad_group_criterion['criterion']['id'], ad_group_criterion['adGroupId'])))", "docstring": "Creates a default partition.\n\nArgs:\nclient: an AdWordsClient instance.\nad_group_id: an integer ID for an ad group.", "source": "codesearchnet"}
{"code": "def __init__(self, queue_property=None, length=None):\n        \n        super().__init__()\n        self.queue_property = queue_property\n        self.length = length", "docstring": "Create a QueuePropHeader with the optional parameters below.\n\nArgs:\nqueue_property (~pyof.v0x04.common.queue.QueueProperties):\nThe queue property.\nlength (int): Length of property, including this header.", "source": "juraj-google-style"}
{"code": "def darken(self, amount):\n        \n        hsl = self.to_hsl()\n        hsl.l = self.clamp(hsl.l - amount)\n        return self.from_hsl(hsl)", "docstring": "Darken (reduce the luminance) of this color.\n\nArgs:\namount (float) :\nAmount to reduce the luminance by (clamped above zero)\n\nReturns:\nColor", "source": "juraj-google-style"}
{"code": "def from_verb(cls, verb):\n        \n        pattern = r'^(?P<meta>[A-Z]+)(?P<version>\\d+)(?P<action>[A-Z]+)(?P<arg1>\\d+)?(\\/(?P<arg2>\\d+))?$'\n        try:\n            verb = verb.decode()\n        except AttributeError:\n            pass\n        match = re.match(pattern, verb)\n        if not match:\n            raise SpoolverbError('Invalid spoolverb: {}'.format(verb))\n\n        data = match.groupdict()\n        meta = data['meta']\n        version = data['version']\n        action = data['action']\n        if action == 'EDITIONS':\n            num_editions = data['arg1']\n            return cls(meta=meta, version=version, action=action, num_editions=int(num_editions))\n        elif action == 'LOAN':\n            \n            try:\n                edition_num = int(data['arg1'])\n            except TypeError:\n                edition_num = 0\n            loan_start = data['arg2'][:6]\n            loan_end = data['arg2'][6:]\n            return cls(meta=meta, version=version, action=action, edition_num=int(edition_num),\n                       loan_start=loan_start, loan_end=loan_end)\n        elif action in ['FUEL', 'PIECE', 'CONSIGNEDREGISTRATION']:\n            \n            return cls(meta=meta, version=version, action=action)\n        else:\n            edition_num = data['arg1']\n            return cls(meta=meta, version=version, action=action, edition_num=int(edition_num))", "docstring": "Constructs a :class:`Spoolverb` instance from the string\nrepresentation of the given verb.\n\nArgs:\nverb (str): representation of the verb e.g.:\n``'ASCRIBESPOOL01LOAN12/150526150528'``. Can also be in\nbinary format (:obj:`bytes`): ``b'ASCRIBESPOOL01PIECE'``.\n\nReturns:\n:class:`Spoolverb` instance.", "source": "juraj-google-style"}
{"code": "def matches(self, desc):\n    return ((self.metric_name == desc.name) and (self.kind == desc.metricKind) and (self.value_type == desc.valueType))", "docstring": "Determines if a given metric descriptor matches this enum instance\n\nArgs:\ndesc (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricDescriptor`): the\ninstance to test\n\nReturn:\n`True` if desc is supported, otherwise `False`", "source": "codesearchnet"}
{"code": "def _step(time, output_ta_t, prev_output, *states):\n    current_input = tuple((ta.read(time) for ta in input_ta))\n    current_input = tf.nest.pack_sequence_as(inputs, current_input)\n    mask_t = masking_fn(time)\n    output, new_states = step_function(current_input, tuple(states) + tuple(constants))\n    flat_output = tf.nest.flatten(output)\n    flat_mask_output = flat_zero_output if zero_output_for_mask else tf.nest.flatten(prev_output)\n    flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output)\n    flat_state = tf.nest.flatten(states)\n    flat_new_state = tf.nest.flatten(new_states)\n    for state, new_state in zip(flat_state, flat_new_state):\n        if isinstance(new_state, tf.Tensor):\n            new_state.set_shape(state.shape)\n    flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state)\n    new_states = tf.nest.pack_sequence_as(new_states, flat_final_state)\n    ta_index_to_write = time if return_all_outputs else 0\n    output_ta_t = tuple((ta.write(ta_index_to_write, out) for ta, out in zip(output_ta_t, flat_new_output)))\n    return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(new_states)", "docstring": "RNN step function.\n\nArgs:\ntime: Current timestep value.\noutput_ta_t: TensorArray.\nprev_output: tuple of outputs from time - 1.\n*states: List of states.\n\nReturns:\nTuple: `(time + 1, output_ta_t, output) + tuple(new_states)`", "source": "github-repos"}
{"code": "def validate(self, *args, **kwargs):\n    return super(ParameterValidator, self)._validate(*args, **kwargs)", "docstring": "Validate a parameter dict against a parameter schema from an ocrd-tool.json\n\nArgs:\nobj (dict):\nschema (dict):", "source": "codesearchnet"}
{"code": "def generate_sitemap(self, path='sitemap.xml', https=False):\n    sitemap = russell.sitemap.generate_sitemap(self, https=https)\n    self.write_file(path, sitemap)", "docstring": "Generate an XML sitemap.\n\nArgs:\npath (str): The name of the file to write to.\nhttps (bool): If True, links inside the sitemap with relative scheme\n(e.g. example.com/something) will be set to HTTPS. If False (the\ndefault), they will be set to plain HTTP.", "source": "codesearchnet"}
{"code": "def sample_point(input_features: torch.Tensor, point_coordinates: torch.Tensor, add_dim=False, **kwargs) -> torch.Tensor:\n    if point_coordinates.dim() == 3:\n        add_dim = True\n        point_coordinates = point_coordinates.unsqueeze(2)\n    point_features = torch.nn.functional.grid_sample(input_features, 2.0 * point_coordinates - 1.0, **kwargs)\n    if add_dim:\n        point_features = point_features.squeeze(3)\n    return point_features", "docstring": "A wrapper around `torch.nn.functional.grid_sample` to support 3D point_coordinates tensors.\n\nArgs:\ninput_features (`torch.Tensor` of shape (batch_size, channels, height, width)):\nA tensor that contains features map on a height * width grid\npoint_coordinates (`torch.Tensor` of shape (batch_size, num_points, 2) or (batch_size, grid_height, grid_width,:\n2)):\nA tensor that contains [0, 1] * [0, 1] normalized point coordinates\nadd_dim (`bool`):\nboolean value to keep track of added dimension\n\nReturns:\npoint_features (`torch.Tensor` of shape (batch_size, channels, num_points) or (batch_size, channels,\nheight_grid, width_grid):\nA tensor that contains features for points in `point_coordinates`.", "source": "github-repos"}
{"code": "def match_main(self, text, pattern, loc):\n    \n    \n    if text == None or pattern == None:\n      raise ValueError(\"Null inputs. (match_main)\")\n\n    loc = max(0, min(loc, len(text)))\n    if text == pattern:\n      \n      return 0\n    elif not text:\n      \n      return -1\n    elif text[loc:loc + len(pattern)] == pattern:\n      \n      return loc\n    else:\n      \n      match = self.match_bitap(text, pattern, loc)\n      return match", "docstring": "Locate the best instance of 'pattern' in 'text' near 'loc'.\n\nArgs:\ntext: The text to search.\npattern: The pattern to search for.\nloc: The location to search around.\n\nReturns:\nBest match index or -1.", "source": "juraj-google-style"}
{"code": "def find_signature(self, signature_id=None, signer_email_address=None):\n    if self.signatures:\n        for signature in self.signatures:\n            if ((signature.signature_id == signature_id) or (signature.signer_email_address == signer_email_address)):\n                return signature", "docstring": "Return a signature for the given parameters\n\nArgs:\n\nsignature_id (str):             Id of the signature to retrieve.\nsigner_email_address (str):     Email address of the associated signer for the signature to retrieve.\n\nReturns:\nA Signature object or None", "source": "codesearchnet"}
{"code": "def version(self):\n    cmd = b'version\\r\\n'\n    results = self._misc_cmd([cmd], b'version', False)\n    (before, _, after) = results[0].partition(b' ')\n    if (before != b'VERSION'):\n        raise MemcacheUnknownError(('Received unexpected response: %s' % results[0]))\n    return after", "docstring": "The memcached \"version\" command.\n\nReturns:\nA string of the memcached version.", "source": "codesearchnet"}
{"code": "def validate_variable_type(var_name, var_type, value):\n    if isinstance(var_type, CFNType):\n        value = CFNParameter(name=var_name, value=value)\n    elif isinstance(var_type, TroposphereType):\n        try:\n            value = var_type.create(value)\n        except Exception as exc:\n            name = '{}.create'.format(var_type.resource_name)\n            raise ValidatorError(var_name, name, value, exc)\n    elif (not isinstance(value, var_type)):\n        raise ValueError(('Value for variable %s must be of type %s. Actual type: %s.' % (var_name, var_type, type(value))))\n    return value", "docstring": "Ensures the value is the correct variable type.\n\nArgs:\nvar_name (str): The name of the defined variable on a blueprint.\nvar_type (type): The type that the value should be.\nvalue (obj): The object representing the value provided for the\nvariable\n\nReturns:\nobject: Returns the appropriate value object. If the original value\nwas of CFNType, the returned value will be wrapped in CFNParameter.\n\nRaises:\nValueError: If the `value` isn't of `var_type` and can't be cast as\nthat type, this is raised.", "source": "codesearchnet"}
{"code": "def display_hierarchy_helper(root, parent_id_to_children, depth):\n  \n  print '%s%s (%s)' % ('%s+--' % ('|'.join(['  '] * depth)),\n                       root['name'], root['id'])\n\n  \n  for child in parent_id_to_children.get(root['id'], []):\n    display_hierarchy_helper(child, parent_id_to_children, depth + 1)", "docstring": "Recursive helper for displaying the hierarchy.\n\nArgs:\nroot: The current root ad unit.\nparent_id_to_children: The overall map of parent ids to children.\ndepth: The current depth.", "source": "juraj-google-style"}
{"code": "def generate_sbi_config(num_pbs: int=3, project: str='sip', programme_block: str='sip_demos', pb_config: Union[(dict, List[dict])]=None, workflow_config: Union[(dict, List[dict])]=None, register_workflows=False) -> dict:\n    if isinstance(workflow_config, dict):\n        workflow_config = [workflow_config]\n    if isinstance(pb_config, dict):\n        pb_config = [pb_config]\n    utc_now = datetime.datetime.utcnow()\n    pb_list = []\n    for i in range(num_pbs):\n        pb_id = ProcessingBlock.get_id(utc_now)\n        if (workflow_config is not None):\n            _workflow_config = workflow_config[i]\n        else:\n            _workflow_config = None\n        if (pb_config is not None):\n            _pb_config = pb_config[i]\n        else:\n            _pb_config = None\n        pb_dict = generate_pb_config(pb_id, _pb_config, _workflow_config)\n        pb_list.append(pb_dict)\n    sbi_config = dict(id=SchedulingBlockInstance.get_id(utc_now, project), version=__sbi_version__, scheduling_block=generate_sb(utc_now, project, programme_block), processing_blocks=pb_list)\n    if register_workflows:\n        add_workflow_definitions(sbi_config)\n    return sbi_config", "docstring": "Generate a SBI configuration dictionary.\n\nArgs:\nnum_pbs (int, optional): Number of Processing Blocks (default = 3)\nproject (str, optional): Project to associate the SBI with.\nprogramme_block (str, optional): SBI programme block\npb_config (dict, List[dict], optional): PB configuration\nworkflow_config (dict, List[dict], optional): Workflow configuration\nregister_workflows (bool, optional): If true also register workflows.\n\nReturns:\ndict, SBI configuration dictionary", "source": "codesearchnet"}
{"code": "def copy_to_mesh(tensor: Any, layout: layout_lib.Layout, source_layout: Optional[layout_lib.Layout]=None) -> tensor_lib.Tensor:\n    del source_layout\n    return relayout(tensor, layout)", "docstring": "Copies a tf.Tensor onto the DTensor device with the given layout.\n\nCopies a regular tf.Tensor onto the DTensor device. Use the mesh attached to\n`layout` as target mesh. This method currently only supports replicated\nlayouts, or one-to-one copies for sharded layouts.\n\nArgs:\ntensor: A regular tf.Tensor to be copied as a DTensor.\nlayout: Target layout (and mesh) for the result DTensor.\nsource_layout: Source layout of the tensor before copy. This argument\nis deprecated.\n\nReturns:\nA DTensor on the DTensor device with the given layout.", "source": "github-repos"}
{"code": "def _Conv2DBackpropInputGrad(op: ops.Operation, grad):\n    return [None, gen_nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]), op.inputs[2], dilations=op.get_attr('dilations'), strides=op.get_attr('strides'), padding=op.get_attr('padding'), explicit_paddings=op.get_attr('explicit_paddings'), use_cudnn_on_gpu=op.get_attr('use_cudnn_on_gpu'), data_format=op.get_attr('data_format').decode()), gen_nn_ops.conv2d(grad, op.inputs[1], dilations=op.get_attr('dilations'), strides=op.get_attr('strides'), padding=op.get_attr('padding'), explicit_paddings=op.get_attr('explicit_paddings'), use_cudnn_on_gpu=op.get_attr('use_cudnn_on_gpu'), data_format=op.get_attr('data_format').decode())]", "docstring": "The derivatives for deconvolution.\n\nArgs:\nop: the Deconvolution op.\ngrad: the tensor representing the gradient w.r.t. the output\n\nReturns:\nthe gradients w.r.t. the input and the filter", "source": "github-repos"}
{"code": "def distance(p_a, p_b):\n    return sqrt((((p_a.lat - p_b.lat) ** 2) + ((p_a.lon - p_b.lon) ** 2)))", "docstring": "Euclidean distance, between two points\n\nArgs:\np_a (:obj:`Point`)\np_b (:obj:`Point`)\nReturns:\nfloat: distance, in degrees", "source": "codesearchnet"}
{"code": "def _gather_saveables_for_checkpoint(self):\n\n    def _saveable_factory(name=self._common_name):\n        return _MirroredSaveable(self, self._primary, name)\n    return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}", "docstring": "Overrides Trackable method.\n\nThis allows both name-based and object-based save and restore of\nMirroredVariables.\n\nReturns:\nA dictionary mapping attribute names to `SaveableObject` factories.", "source": "github-repos"}
{"code": "def preprocess(self, xs):\n        \n        return [self.nesting_field.preprocess(x)\n                for x in super(NestedField, self).preprocess(xs)]", "docstring": "Preprocess a single example.\n\nFirstly, tokenization and the supplied preprocessing pipeline is applied. Since\nthis field is always sequential, the result is a list. Then, each element of\nthe list is preprocessed using ``self.nesting_field.preprocess`` and the resulting\nlist is returned.\n\nArguments:\nxs (list or str): The input to preprocess.\n\nReturns:\nlist: The preprocessed list.", "source": "juraj-google-style"}
{"code": "def begin_block(self, req_begin_block):\n        \n        self.abort_if_abci_chain_is_not_synced()\n\n        chain_shift = 0 if self.chain is None else self.chain['height']\n        logger.debug('BEGIN BLOCK, height:%s, num_txs:%s',\n                     req_begin_block.header.height + chain_shift,\n                     req_begin_block.header.num_txs)\n\n        self.block_txn_ids = []\n        self.block_transactions = []\n        return ResponseBeginBlock()", "docstring": "Initialize list of transaction.\nArgs:\nreq_begin_block: block object which contains block header\nand block hash.", "source": "juraj-google-style"}
{"code": "def transfer_project(self, to_project_id, **kwargs):\n    path = ('/groups/%s/projects/%s' % (self.id, to_project_id))\n    self.manager.gitlab.http_post(path, **kwargs)", "docstring": "Transfer a project to this group.\n\nArgs:\nto_project_id (int): ID of the project to transfer\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabTransferProjectError: If the project could not be transfered", "source": "codesearchnet"}
{"code": "class SwinPatchMerging(nn.Module):\n\n    def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:\n        super().__init__()\n        self.input_resolution = input_resolution\n        self.dim = dim\n        self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)\n        self.norm = norm_layer(4 * dim)\n\n    def maybe_pad(self, input_feature, height, width):\n        should_pad = height % 2 == 1 or width % 2 == 1\n        if should_pad:\n            pad_values = (0, 0, 0, width % 2, 0, height % 2)\n            input_feature = nn.functional.pad(input_feature, pad_values)\n        return input_feature\n\n    def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor:\n        height, width = input_dimensions\n        batch_size, dim, num_channels = input_feature.shape\n        input_feature = input_feature.view(batch_size, height, width, num_channels)\n        input_feature = self.maybe_pad(input_feature, height, width)\n        input_feature_0 = input_feature[:, 0::2, 0::2, :]\n        input_feature_1 = input_feature[:, 1::2, 0::2, :]\n        input_feature_2 = input_feature[:, 0::2, 1::2, :]\n        input_feature_3 = input_feature[:, 1::2, 1::2, :]\n        input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)\n        input_feature = input_feature.view(batch_size, -1, 4 * num_channels)\n        input_feature = self.norm(input_feature)\n        input_feature = self.reduction(input_feature)\n        return input_feature", "docstring": "Patch Merging Layer.\n\nArgs:\ninput_resolution (`Tuple[int]`):\nResolution of input feature.\ndim (`int`):\nNumber of input channels.\nnorm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):\nNormalization layer class.", "source": "github-repos"}
{"code": "def ProcessGlobalSuppresions(lines):\n  \n  for line in lines:\n    if _SEARCH_C_FILE.search(line):\n      for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:\n        _global_error_suppressions[category] = True\n    if _SEARCH_KERNEL_FILE.search(line):\n      for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES:\n        _global_error_suppressions[category] = True", "docstring": "Updates the list of global error suppressions.\n\nParses any lint directives in the file that have global effect.\n\nArgs:\nlines: An array of strings, each representing a line of the file, with the\nlast element being empty if the file is terminated with a newline.", "source": "juraj-google-style"}
{"code": "def HasOutputClass(cls, name):\n    if (not isinstance(name, py2to3.STRING_TYPES)):\n        return False\n    return (name.lower() in cls._output_classes)", "docstring": "Determines if a specific output class is registered with the manager.\n\nArgs:\nname (str): name of the output module.\n\nReturns:\nbool: True if the output class is registered.", "source": "codesearchnet"}
{"code": "def collect(manifest=default_manifest, tmp_path=None, compress=False):\n    manifest = load_manifest(manifest)\n    client = manifest.get('client', {})\n    plugins = manifest.get('plugins', {})\n    run_strategy = client.get('run_strategy', {'name': 'parallel'})\n    apply_default_enabled(plugins.get('default_component_enabled', False))\n    load_packages(plugins.get('packages', []))\n    apply_blacklist(client.get('blacklist', {}))\n    apply_configs(plugins)\n    to_persist = get_to_persist(client.get('persist', set()))\n    hostname = call('hostname -f', env=SAFE_ENV).strip()\n    suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n    relative_path = ('insights-%s-%s' % (hostname, suffix))\n    tmp_path = (tmp_path or tempfile.gettempdir())\n    output_path = os.path.join(tmp_path, relative_path)\n    fs.ensure_path(output_path)\n    fs.touch(os.path.join(output_path, 'insights_archive.txt'))\n    broker = dr.Broker()\n    ctx = create_context(client.get('context', {}))\n    broker[ctx.__class__] = ctx\n    parallel = (run_strategy.get('name') == 'parallel')\n    pool_args = run_strategy.get('args', {})\n    with get_pool(parallel, pool_args) as pool:\n        h = Hydration(output_path, pool=pool)\n        broker.add_observer(h.make_persister(to_persist))\n        dr.run_all(broker=broker, pool=pool)\n    if compress:\n        return create_archive(output_path)\n    return output_path", "docstring": "This is the collection entry point. It accepts a manifest, a temporary\ndirectory in which to store output, and a boolean for optional compression.\n\nArgs:\nmanifest (str or dict): json document or dictionary containing the\ncollection manifest. See default_manifest for an example.\ntmp_path (str): The temporary directory that will be used to create a\nworking directory for storing component output as well as the final\ntar.gz if one is generated.\ncompress (boolean): True to create a tar.gz and remove the original\nworkspace containing output. False to leave the workspace without\ncreating a tar.gz\n\nReturns:\nThe full path to the created tar.gz or workspace.", "source": "codesearchnet"}
{"code": "def parents(self, as_resources=False):\n    parents = [o for (s, p, o) in self.rdf.graph.triples((None, self.rdf.prefixes.fedora.hasParent, None))]\n    if as_resources:\n        logger.debug('retrieving parent as resource')\n        parents = [self.repo.get_resource(parent) for parent in parents]\n    return parents", "docstring": "method to return hierarchical parents of this resource\n\nArgs:\nas_resources (bool): if True, opens each as appropriate resource type instead of return URI only\n\nReturns:\n(list): list of resources", "source": "codesearchnet"}
{"code": "def get_dir_size(path: str='.') -> int:\n    total = 0\n    for root, _, files in os.walk(path):\n        for filename in files:\n            total += os.path.getsize(os.path.join(root, filename))\n    return total", "docstring": "Get the total size of files and sub-directories under the path.\n\nArgs:\npath: Path of a directory or a file to calculate the total size.\n\nReturns:\nTotal size of the directory or a file.", "source": "github-repos"}
{"code": "def get_replacement_transform(self, ptransform):\n    raise NotImplementedError", "docstring": "Provides a runner specific override for a given PTransform.\n\nArgs:\nptransform: PTransform to be replaced.\n\nReturns:\nA PTransform that will be the replacement for the PTransform given as an\nargument.", "source": "github-repos"}
{"code": "def _handle_error(response):\n    code = response.status_code\n    if (200 <= code < 400):\n        return\n    if (code == 400):\n        sys.stderr.write((response.text + '\\n'))\n        raise BadRequest(response)\n    elif (code == 401):\n        sys.stderr.write((response.text + '\\n'))\n        raise UnauthorizedAccess(response)\n    elif (code == 403):\n        sys.stderr.write((response.text + '\\n'))\n        raise ForbiddenAccess(response)\n    elif (code == 404):\n        sys.stderr.write((response.text + '\\n'))\n        raise ResourceNotFound(response)\n    elif (code == 405):\n        sys.stderr.write((response.text + '\\n'))\n        raise MethodNotAllowed(response)\n    elif (code == 409):\n        sys.stderr.write((response.text + '\\n'))\n        raise ResourceConflict(response)\n    elif (code == 422):\n        sys.stderr.write((response.text + '\\n'))\n        raise ResourceInvalid(response)\n    elif (code in (449, 502, 503, 504)):\n        sys.stderr.write((response.text + '\\n'))\n        raise RetryWithDelay(response)\n    elif (401 <= code < 500):\n        sys.stderr.write((response.text + '\\n'))\n        raise ClientError(response)\n    elif (500 <= code < 600):\n        sys.stderr.write((response.text + '\\n'))\n        raise ServerError(response)\n    else:\n        raise ConnectionError(response)", "docstring": "Raise exceptions in response to any http errors\n\nArgs:\nresponse: A Response object\n\nRaises:\nBadRequest: if HTTP error code 400 returned.\nUnauthorizedAccess: if HTTP error code 401 returned.\nForbiddenAccess: if HTTP error code 403 returned.\nResourceNotFound: if HTTP error code 404 is returned.\nMethodNotAllowed: if HTTP error code 405 is returned.\nResourceConflict: if HTTP error code 409 is returned.\nResourceInvalid: if HTTP error code 422 is returned.\nClientError: if HTTP error code falls in 401 - 499.\nServerError: if HTTP error code falls in 500 - 599.\nConnectionError: if unknown HTTP error code returned.", "source": "codesearchnet"}
{"code": "def ReceiveMessages(self, client_id, messages):\n    \n    if data_store.RelationalDBEnabled():\n      return self.ReceiveMessagesRelationalFlows(client_id, messages)\n\n    now = time.time()\n    with queue_manager.QueueManager(token=self.token) as manager:\n      for session_id, msgs in iteritems(\n          collection.Group(messages, operator.attrgetter(\"session_id\"))):\n\n        \n        leftover_msgs = self.HandleWellKnownFlows(msgs)\n\n        unprocessed_msgs = []\n        for msg in leftover_msgs:\n          if (msg.auth_state == msg.AuthorizationState.AUTHENTICATED or\n              msg.session_id == self.unauth_allowed_session_id):\n            unprocessed_msgs.append(msg)\n\n        if len(unprocessed_msgs) < len(leftover_msgs):\n          logging.info(\"Dropped %d unauthenticated messages for %s\",\n                       len(leftover_msgs) - len(unprocessed_msgs), client_id)\n\n        if not unprocessed_msgs:\n          continue\n\n        for msg in unprocessed_msgs:\n          manager.QueueResponse(msg)\n\n        for msg in unprocessed_msgs:\n          \n          \n          if msg.request_id == 0:\n            manager.QueueNotification(session_id=msg.session_id)\n            \n            break\n          elif msg.type == rdf_flows.GrrMessage.Type.STATUS:\n            \n            \n            \n            \n            \n            if msg.HasTaskID():\n              manager.DeQueueClientRequest(msg)\n\n            manager.QueueNotification(\n                session_id=msg.session_id, last_status=msg.request_id)\n\n            stat = rdf_flows.GrrStatus(msg.payload)\n            if stat.status == rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED:\n              \n              crash_details = rdf_client.ClientCrash(\n                  client_id=client_id,\n                  session_id=session_id,\n                  backtrace=stat.backtrace,\n                  crash_message=stat.error_message,\n                  nanny_status=stat.nanny_status,\n                  timestamp=rdfvalue.RDFDatetime.Now())\n              events.Events.PublishEvent(\n                  \"ClientCrash\", crash_details, token=self.token)\n\n    logging.debug(\"Received %s messages from %s in %s sec\", len(messages),\n                  client_id,\n                  time.time() - now)", "docstring": "Receives and processes the messages from the source.\n\nFor each message we update the request object, and place the\nresponse in that request's queue. If the request is complete, we\nsend a message to the worker.\n\nArgs:\nclient_id: The client which sent the messages.\nmessages: A list of GrrMessage RDFValues.", "source": "juraj-google-style"}
{"code": "def forEach(self) -> 'ColumnExpressionBuilder':\n    return ColumnExpressionBuilder(self._builder, self._column_name, self._children, True, True)", "docstring": "The forEach() function.\n\nUnnests the repeated values from a FHIR path. If the FHIR path does not\nreturn a collection, we treat that as a collection with a single value.\nOnce this function is called, the FHIR path is sealed to be immutable.\n\nReturns:\nA new ColumnExpressionBuilder with needs_unnest set to True.", "source": "github-repos"}
{"code": "def __init__(self, object_local_name: str, checkpoint_local_names: Sequence[str], to_shard_layout: Optional[Sequence[sparse_core_layout_pb2.SparseCoreTableLayout]]=None, to_unshard_layout: Optional[Sequence[sparse_core_layout_pb2.SparseCoreTableLayout]]=None):\n    self._object_local_name = object_local_name\n    self._checkpoint_local_names = checkpoint_local_names\n    self._to_shard_layout = to_shard_layout\n    self._to_unshard_layout = to_unshard_layout\n    self._main_checkpoint_name = checkpoint_local_names[0]", "docstring": "Initializes  Reshard callback.\n\nArgs:\nobject_local_name:  The local name of the object being restored.\ncheckpoint_local_names: The local names of the checkpoint positions that\nneed to be read.\nto_shard_layout: (Optional) Target layouts as specified in the embedding\nbeing restored.\nto_unshard_layout: (Optional) Layouts as stored in checkpoint being\nrestored from.", "source": "github-repos"}
{"code": "def negative(x):\n    if any_symbolic_tensors((x,)):\n        return Negative().symbolic_call(x)\n    return backend.numpy.negative(x)", "docstring": "Numerical negative, element-wise.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput tensor, `y = -x`.", "source": "github-repos"}
{"code": "def __calculate_boltzmann_factor(self, state_key, next_action_list):\n    sigmoid = self.__calculate_sigmoid()\n    q_df = self.q_df[(self.q_df.state_key == state_key)]\n    q_df = q_df[q_df.isin(next_action_list)]\n    q_df['boltzmann_factor'] = (q_df['q_value'] / sigmoid)\n    q_df['boltzmann_factor'] = q_df['boltzmann_factor'].apply(np.exp)\n    q_df['boltzmann_factor'] = (q_df['boltzmann_factor'] / q_df['boltzmann_factor'].sum())\n    return q_df", "docstring": "Calculate boltzmann factor.\n\nArgs:\nstate_key:              The key of state.\nnext_action_list:       The possible action in `self.t+1`.\nIf the length of this list is 0, all action should be possible.\n\nReturns:\n[(`The key of action`, `boltzmann probability`)]", "source": "codesearchnet"}
{"code": "def prepare_aot(aot: list[str], srcs_dir: str) -> None:\n    for file in aot:\n        if 'external/local_tsl/' in file:\n            copy_file(file, srcs_dir, 'external/local_tsl/')\n        elif 'external/local_xla/' in file:\n            copy_file(file, srcs_dir, 'external/local_xla/')\n        else:\n            copy_file(file, srcs_dir)\n    shutil.move(os.path.join(srcs_dir, 'tensorflow/tools/pip_package/xla_build/CMakeLists.txt'), os.path.join(srcs_dir, 'CMakeLists.txt'))", "docstring": "Rearrange xla_aot files in target the target directory.\n\nArgs:\naot: a list of paths to files that should be in xla_aot directory.\nsrcs_dir: target directory where files are copied to.", "source": "github-repos"}
{"code": "def _get_resource_list(self, rsrc_dict):\n    if ('collections' in rsrc_dict):\n        return rsrc_dict['collections']\n    if ('experiments' in rsrc_dict):\n        return rsrc_dict['experiments']\n    if ('channels' in rsrc_dict):\n        return rsrc_dict['channels']\n    if ('coords' in rsrc_dict):\n        return rsrc_dict['coords']\n    raise RuntimeError('Invalid list response received from Boss.  No known resource type returned.')", "docstring": "Extracts list of resources from the HTTP response.\n\nArgs:\nrsrc_dict (dict): HTTP response encoded in a dictionary.\n\nReturns:\n(list[string]): List of a type of resource (collections, experiments, etc).\n\nRaises:\n(RuntimeError): If rsrc_dict does not contain any known resources.", "source": "codesearchnet"}
{"code": "def pytd_cls_to_instance_var(self, cls, subst=None, node=None, source_sets=None, discard_concrete_values=False):\n    source_sets = source_sets or [[]]\n    node = node or self.ctx.root_node\n    kwargs = {'subst': subst, 'node': node, 'source_sets': source_sets, 'discard_concrete_values': discard_concrete_values}\n\n    def constant_to_instance_value(new_type):\n        return self.constant_to_value(abstract_utils.AsInstance(new_type), subst, node)\n    if isinstance(cls, pytd.AnythingType):\n        return self.unsolvable.to_variable(node)\n    elif isinstance(cls, pytd.GenericType) and cls.name == 'typing.ClassVar':\n        param, = cls.parameters\n        return self.pytd_cls_to_instance_var(param, **kwargs)\n    var = self.ctx.program.NewVariable()\n    for t in pytd_utils.UnpackUnion(cls):\n        if isinstance(t, pytd.TypeParameter):\n            if not subst or t.full_name not in subst:\n                raise self.TypeParameterError(t.full_name)\n            else:\n                for v in subst[t.full_name].bindings:\n                    for source_set in source_sets:\n                        if discard_concrete_values:\n                            value = self.get_maybe_abstract_instance(v.data)\n                        else:\n                            value = v.data\n                        var.AddBinding(value, source_set + [v], node)\n        elif isinstance(t, pytd.NothingType):\n            pass\n        else:\n            if isinstance(t, pytd.Annotated):\n                typ = constant_to_instance_value(t.base_type)\n                value = self._apply_metadata_annotations(typ, t.annotations)\n            else:\n                value = constant_to_instance_value(t)\n            for source_set in source_sets:\n                var.AddBinding(value, source_set, node)\n    return var", "docstring": "Convert a constant instance to a Variable.\n\nThis converts a constant to a cfg.Variable. Unlike constant_to_value, it\ncan handle things that need to be represented as a Variable with multiple\npossible values (i.e., a union type), like pytd.Function.\n\nArgs:\ncls: The pytd class to convert.\nsubst: The current type parameters.\nnode: The current CFG node. (For instances)\nsource_sets: An iterator over instances of SourceSet (or just tuples).\ndiscard_concrete_values: Whether concrete values should be discarded from\ntype parameters.\n\nReturns:\nA cfg.Variable.\nRaises:\nTypeParameterError: if conversion is attempted on a type parameter without\na substitution.\nValueError: if pytype is not of a known type.", "source": "github-repos"}
{"code": "def __init__(self, value_type, default: typing.Optional[numbers.Number]=MISSING_VALUE, min_value: typing.Optional[numbers.Number]=None, max_value: typing.Optional[numbers.Number]=None, is_noneable: bool=False, frozen: bool=False):\n    if min_value is not None and max_value is not None and (min_value > max_value):\n        raise ValueError(f'\"max_value\" must be equal or greater than \"min_value\". Encountered: min_value={min_value}, max_value={max_value}.')\n    self._min_value = min_value\n    self._max_value = max_value\n    super().__init__(value_type, default, is_noneable=is_noneable, frozen=frozen)", "docstring": "Constructor.\n\nArgs:\nvalue_type: Type of number.\ndefault: Default value for this spec.\nmin_value: (Optional) minimum value of acceptable values.\nmax_value: (Optional) maximum value of acceptable values.\nis_noneable: If True, None is acceptable.\nfrozen: If True, values other than the default value is not accceptable.", "source": "github-repos"}
{"code": "def should_update(stack):\n    if stack.locked:\n        if (not stack.force):\n            logger.debug('Stack %s locked and not in --force list. Refusing to update.', stack.name)\n            return False\n        else:\n            logger.debug('Stack %s locked, but is in --force list.', stack.name)\n    return True", "docstring": "Tests whether a stack should be submitted for updates to CF.\n\nArgs:\nstack (:class:`stacker.stack.Stack`): The stack object to check.\n\nReturns:\nbool: If the stack should be updated, return True.", "source": "codesearchnet"}
{"code": "def _summary(self, name, tensor):\n    if (tensor.shape.ndims == 0):\n        return tf.summary.scalar(name, tensor)\n    else:\n        return tf.summary.histogram(name, tensor)", "docstring": "Create a scalar or histogram summary matching the rank of the tensor.\n\nArgs:\nname: Name for the summary.\ntensor: Tensor to summarize.\n\nReturns:\nSummary tensor.", "source": "codesearchnet"}
{"code": "def get_extra_locals(self):\n    raise NotImplementedError('subclasses must override this')", "docstring": "Returns extra static local variables to be made to transformed code.\n\nSubclasses must override this.\n\nReturns:\nextra_locals: A Dict[Text, Any] containing additional variables to make\navailable to the transformed code.", "source": "github-repos"}
{"code": "def load_glove(file):\n    model = {}\n    with open(file, encoding='utf8', errors='ignore') as f:\n        for line in f:\n            line = line.split(' ')\n            word = line[0]\n            vector = np.array([float(val) for val in line[1:]])\n            model[word] = vector\n    return model", "docstring": "Loads GloVe vectors in numpy array.\n\nArgs:\nfile (str): a path to a glove file.\n\nReturn:\ndict: a dict of numpy arrays.", "source": "codesearchnet"}
{"code": "def get_program_type_by_slug(self, slug):\n        \n        return self._load_data(\n            self.PROGRAM_TYPES_ENDPOINT,\n            resource_id=slug,\n            default=None,\n        )", "docstring": "Get a program type by its slug.\n\nArguments:\nslug (str): The slug to identify the program type.\n\nReturns:\ndict: A program type object.", "source": "juraj-google-style"}
{"code": "def scatter_mul(self, sparse_delta, use_locking=False, name=None):\n    raise NotImplementedError", "docstring": "Multiply this variable by `tf.IndexedSlices`.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to multiply this variable by.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nThe updated variable.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"}
{"code": "def old_collective_correlation(self):\n    if self.has_run:\n        return (self.atoms.collective_dr_squared() / float(self.number_of_jumps))\n    else:\n        return None", "docstring": "Returns the collective correlation factor, f_I\n\nArgs:\nNone\n\nReturns:\n(Float): The collective correlation factor, f_I.\n\nNotes:\nThis function assumes that the jump distance between sites has\nbeen normalised to a=1. If the jumps distance is not equal to 1\nthen the value returned by this function should be divided by a^2.\nEven better, use self.collective_correlation", "source": "codesearchnet"}
{"code": "def clean_for_serialization(self, data):\n        \n\n        if isinstance(data, dict):\n            for k in data.keys():\n                if (k.startswith('__')): \n                    del data[k]\n                elif isinstance(data[k], bson.objectid.ObjectId): \n                    del data[k]\n                elif isinstance(data[k], datetime.datetime):\n                    data[k] = data[k].isoformat()+'Z'\n                elif isinstance(data[k], dict):\n                    data[k] = self.clean_for_serialization(data[k])\n                elif isinstance(data[k], list):\n                    data[k] = [self.clean_for_serialization(item) for item in data[k]]\n        return data", "docstring": "Clean data in preparation for serialization.\n\nDeletes items having key either a BSON, datetime, dict or a list instance, or\nstarting with __.\n\nArgs:\ndata: Sample data to be serialized.\n\nReturns:\nCleaned data dictionary.", "source": "juraj-google-style"}
{"code": "def getsize(self, path=None, client_kwargs=None, header=None):\n    return self._getsize_from_header(self.head(path, client_kwargs, header))", "docstring": "Return the size, in bytes, of path.\n\nArgs:\npath (str): File path or URL.\nclient_kwargs (dict): Client arguments.\nheader (dict): Object header.\n\nReturns:\nint: Size in bytes.", "source": "codesearchnet"}
{"code": "def _get_control_flow_context(self):\n    return self._control_flow_context", "docstring": "Returns the current control flow context.\n\nReturns:\nA context object.", "source": "github-repos"}
{"code": "def run_repair_pdb(self, silent=False, force_rerun=False):\n        \n        \n        foldx_repair_pdb = 'foldx --command=RepairPDB --pdb={}'.format(self.pdb_file)\n\n        \n        foldx_repair_outfile = '{}_Repair.pdb'.format(op.splitext(self.pdb_file)[0])\n\n        \n        ssbio.utils.command_runner(shell_command=foldx_repair_pdb, force_rerun_flag=force_rerun, silent=silent,\n                                   outfile_checker=foldx_repair_outfile, cwd=self.foldx_dir)\n\n        \n\n        self.repaired_pdb_outfile = foldx_repair_outfile", "docstring": "Run FoldX RepairPDB on this PDB file.\n\nOriginal command::\n\nfoldx --command=RepairPDB --pdb=4bxi.pdb\n\nArgs:\nsilent (bool): If FoldX output should be silenced from printing to the shell.\nforce_rerun (bool): If FoldX RepairPDB should be rerun even if a repaired file exists.", "source": "juraj-google-style"}
{"code": "def __init__(self, a_schedule, b_schedule, merged_schedule,\n               problem_reporter):\n    \n    self.a_schedule = a_schedule\n    self.b_schedule = b_schedule\n    self.merged_schedule = merged_schedule\n    self.a_merge_map = {}\n    self.b_merge_map = {}\n    self.a_zone_map = {}\n    self.b_zone_map = {}\n    self._mergers = []\n    self._idnum = max(self._FindLargestIdPostfixNumber(self.a_schedule),\n                      self._FindLargestIdPostfixNumber(self.b_schedule))\n\n    self.problem_reporter = problem_reporter", "docstring": "Initialise the merger.\n\nOnce this initialiser has been called, a_schedule and b_schedule should\nnot be modified.\n\nArgs:\na_schedule: The old schedule, an instance of transitfeed.Schedule.\nb_schedule: The new schedule, an instance of transitfeed.Schedule.\nproblem_reporter: The problem reporter, an instance of\ntransitfeed.ProblemReporter.", "source": "juraj-google-style"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    cls = [self.cls_token_id]\n    sep = [self.sep_token_id]\n    return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A MobileBERT sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def point_consensus(self, consensus_type):\n        \n        if \"mean\" in consensus_type:\n            consensus_data = np.mean(self.data, axis=0)\n        elif \"std\" in consensus_type:\n            consensus_data = np.std(self.data, axis=0)\n        elif \"median\" in consensus_type:\n            consensus_data = np.median(self.data, axis=0)\n        elif \"max\" in consensus_type:\n            consensus_data = np.max(self.data, axis=0)\n        elif \"percentile\" in consensus_type:\n            percentile = int(consensus_type.split(\"_\")[1])\n            consensus_data = np.percentile(self.data, percentile, axis=0)\n        else:\n            consensus_data = np.zeros(self.data.shape[1:])\n        consensus = EnsembleConsensus(consensus_data, consensus_type, self.ensemble_name,\n                                      self.run_date, self.variable, self.start_date, self.end_date, self.units)\n        return consensus", "docstring": "Calculate grid-point statistics across ensemble members.\n\nArgs:\nconsensus_type: mean, std, median, max, or percentile_nn\n\nReturns:\nEnsembleConsensus containing point statistic", "source": "juraj-google-style"}
{"code": "def propagate(self, token, channel):\n        \n        if self.get_propagate_status(token, channel) != u'0':\n            return\n        url = self.url('sd/{}/{}/setPropagate/1/'.format(token, channel))\n        req = self.remote_utils.get_url(url)\n        if req.status_code is not 200:\n            raise RemoteDataUploadError('Propagate fail: {}'.format(req.text))\n        return True", "docstring": "Kick off the propagate function on the remote server.\n\nArguments:\ntoken (str): The token to propagate\nchannel (str): The channel to propagate\n\nReturns:\nboolean: Success", "source": "juraj-google-style"}
{"code": "def listtransactions(self, user_id='', count=10, start_at=0):\n    txlist = self.rpc.call('listtransactions', user_id, count, start_at)\n    self.logger.debug(('Got transaction list for ' + str(user_id)))\n    return txlist", "docstring": "List all transactions associated with this account.\n\nArgs:\nuser_id (str): this user's unique identifier\ncount (int): number of transactions to return (default=10)\nstart_at (int): start the list at this transaction (default=0)\n\nReturns:\nlist [dict]: transactions associated with this user's account", "source": "codesearchnet"}
{"code": "def _CreateRouteOptions(self, **kwargs):\n    options = {'proto': self.proto_id, 'scope': 'host'}\n    options.update(kwargs)\n    return options", "docstring": "Create a dictionary of parameters to append to the ip route command.\n\nArgs:\n**kwargs: dict, the string parameters to update in the ip route command.\n\nReturns:\ndict, the string parameters to append to the ip route command.", "source": "codesearchnet"}
{"code": "def delete_with_casper_admin_save(self, pkg):\n    if (pkg.__class__.__name__ == 'Package'):\n        package_to_delete = pkg.id\n    elif isinstance(pkg, int):\n        package_to_delete = pkg\n    elif isinstance(pkg, str):\n        package_to_delete = self.connection['jss'].Package(pkg).id\n    else:\n        raise TypeError\n    data_dict = {'username': self.connection['jss'].user, 'password': self.connection['jss'].password, 'deletedPackageID': package_to_delete}\n    self.connection['jss'].session.post(url=self.connection['delete_url'], data=data_dict)", "docstring": "Delete a pkg from the distribution server.\n\nArgs:\npkg: Can be a jss.Package object, an int ID of a package, or\na filename.", "source": "codesearchnet"}
{"code": "def set_energy(self, spins, target_energy):\n    spin_energy = self.energy(spins)\n    self.assertions.add(Equals(spin_energy, limitReal(target_energy)))", "docstring": "Set the energy of Theta with spins fixed to target_energy.\n\nArgs:\nspins (dict): Spin values for a subset of the variables in Theta.\ntarget_energy (float): The desired energy for Theta with spins fixed.\n\nNotes:\nAdd equality constraint to assertions.", "source": "codesearchnet"}
{"code": "def __setitem__(self, key, value):\n        \n        if not self._is_valid(value):\n            value = self._fix_value(value)\n        self._inner.__setitem__(key, value)", "docstring": "Attempt to set the value at position `key` to the `value`.\n\nIf a value is not the correct type, an attempt will be made to\nconvert it to the correct type.\n\nArgs:\nkey: An index.\nvalue: A value to set.", "source": "juraj-google-style"}
{"code": "def __init__(self, binaryDirectory=None):\n        \n        if binaryDirectory is None:\n            self._impl = amplpython.Environment()\n        else:\n            self._impl = amplpython.Environment(binaryDirectory)", "docstring": "Constructor with ability to select the location of the AMPL binary.\nNote that if binaryDirectory is set, the automatic lookup for an AMPL\nexecutable will not be executed.\n\nArgs:\nbinaryDirectory: The directory in which look for the AMPL Binary.", "source": "juraj-google-style"}
{"code": "def cpfs(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor], noise: Optional[Noise]=None) -> Tuple[(List[TensorFluent], List[TensorFluent])]:\n    scope = self.transition_scope(state, action)\n    batch_size = int(state[0].shape[0])\n    (interm_fluents, next_state_fluents) = self.compile_cpfs(scope, batch_size, noise)\n    interms = [fluent for (_, fluent) in interm_fluents]\n    next_state = [fluent for (_, fluent) in next_state_fluents]\n    return (interms, next_state)", "docstring": "Compiles the intermediate and next state fluent CPFs given\nthe current `state` and `action`.\n\nArgs:\nstate (Sequence[tf.Tensor]): A tuple of state tensors.\naction (Sequence[tf.Tensor]): A tuple of action tensors.\n\nReturns:\nTuple[List[TensorFluent], List[TensorFluent]]: A pair of lists of TensorFluent\nrepresenting the intermediate and state CPFs.", "source": "codesearchnet"}
{"code": "def assimilate(self, path):\n    try:\n        d = self.get_task_doc(path)\n        if ((self.mapi_key is not None) and (d['state'] == 'successful')):\n            self.calculate_stability(d)\n        tid = self._insert_doc(d)\n        return tid\n    except Exception as ex:\n        import traceback\n        logger.error(traceback.format_exc())\n        return False", "docstring": "Parses vasp runs. Then insert the result into the db. and return the\ntask_id or doc of the insertion.\n\nReturns:\nIf in simulate_mode, the entire doc is returned for debugging\npurposes. Else, only the task_id of the inserted doc is returned.", "source": "codesearchnet"}
{"code": "def check_requirements_file(req_file, skip_packages):\n    \n    reqs = read_requirements(req_file)\n    if skip_packages is not None:\n        reqs = [req for req in reqs if req.name not in skip_packages]\n    outdated_reqs = filter(None, [check_req(req) for req in reqs])\n    return outdated_reqs", "docstring": "Return list of outdated requirements.\n\nArgs:\nreq_file (str): Filename of requirements file\nskip_packages (list): List of package names to ignore.", "source": "juraj-google-style"}
{"code": "def _circuit_as_layers(circuit: circuits.Circuit, grouping: _QubitGrouping) -> List[_TransformsThenCzs]:\n    frontier = {q: 0 for q in circuit.all_qubits()}\n    layers = []\n    while True:\n        any_group_matrices = False\n        group_matrices = []\n        for g in grouping.groups:\n            start_frontier = {q: frontier[q] for q in g}\n            end_frontier = circuit.reachable_frontier_from(start_frontier)\n            mergeable_ops = circuit.findall_operations_between(start_frontier, end_frontier)\n            for (q, v) in end_frontier.items():\n                frontier[q] = v\n            group_matrix = np.eye((1 << len(g))).reshape(((2, 2) * len(g)))\n            if mergeable_ops:\n                any_group_matrices = True\n            for (_, op) in mergeable_ops:\n                group_matrix = linalg.targeted_left_multiply(left_matrix=protocols.unitary(op).reshape(((2, 2) * len(op.qubits))), right_target=group_matrix, target_axes=[grouping.loc(q)[1] for q in op.qubits])\n            group_matrices.append(np.transpose(group_matrix.reshape((1 << len(g)), (1 << len(g)))))\n        end_frontier = circuit.reachable_frontier_from(frontier, is_blocker=(lambda op: grouping.all_in_same_group(*op.qubits)))\n        cz_ops = circuit.findall_operations_between(frontier, end_frontier)\n        frontier = end_frontier\n        cz_indices = []\n        for (_, cz) in cz_ops:\n            (a, b) = cz.qubits\n            assert (cz == ops.CZ(a, b))\n            cz_indices.append((grouping.ind(a), grouping.ind(b)))\n        if ((not any_group_matrices) and (not cz_indices)):\n            break\n        layer = _TransformsThenCzs(group_matrices=group_matrices, cz_indices=cz_indices)\n        layers.append(layer)\n    assert (frontier == {q: len(circuit) for q in circuit.all_qubits()})\n    return layers", "docstring": "Transforms a circuit into a series of GroupMatrix+CZ layers.\n\nArgs:\ncircuit: The circuit to transform.\ngrouping: How the circuit's qubits are combined into groups.\n\nReturns:\nA list of layers. Each layer has a matrix to apply to each group of\nqubits, and a list of CZs to apply to pairs of qubits crossing\nbetween groups.", "source": "codesearchnet"}
{"code": "def qualifyContracts(self, *contracts: List[Contract]) -> List[Contract]:\n    return self._run(self.qualifyContractsAsync(*contracts))", "docstring": "Fully qualify the given contracts in-place. This will fill in\nthe missing fields in the contract, especially the conId.\n\nReturns a list of contracts that have been successfully qualified.\n\nThis method is blocking.\n\nArgs:\ncontracts: Contracts to qualify.", "source": "codesearchnet"}
{"code": "def __init__(self, name, aliases=None, description=None, urls=None):\n    \n    super(StorageDataTypeDefinition, self).__init__(\n        name, aliases=aliases, description=description, urls=urls)\n    self.byte_order = definitions.BYTE_ORDER_NATIVE", "docstring": "Initializes a storage data type definition.\n\nArgs:\nname (str): name.\naliases (Optional[list[str]]): aliases.\ndescription (Optional[str]): description.\nurls (Optional[list[str]]): URLs.", "source": "juraj-google-style"}
{"code": "def recipe_policebot(config, recipe_name):\n    drive(config, {'auth': 'user', 'hour': [], 'copy': {'source': 'https:", "docstring": "A tool that helps enforce CM object name conventions by checking names against a\nset of client-defined patterns, and emailing violations to appropriate\nagency teams on a daily basis.\n\nArgs:\nrecipe_name (string) - Name of document to deploy to.", "source": "github-repos"}
{"code": "def find_nearest(a, value, index=False):\n    i = np.abs((a - value)).argmin()\n    if index:\n        return i\n    else:\n        return a[i]", "docstring": "Find the array value, or index of the array value, closest to some given\nvalue.\n\nArgs:\na (ndarray)\nvalue (float)\nindex (bool): whether to return the index instead of the array value.\n\nReturns:\nfloat. The array value (or index, as int) nearest the specified value.", "source": "codesearchnet"}
{"code": "def _check_jwt_claims(jwt_claims):\n    current_time = time.time()\n    expiration = jwt_claims[u'exp']\n    if (not isinstance(expiration, INT_TYPES)):\n        raise suppliers.UnauthenticatedException(u'Malformed claim: \"exp\" must be an integer')\n    if (current_time >= expiration):\n        raise suppliers.UnauthenticatedException(u'The auth token has already expired')\n    if (u'nbf' not in jwt_claims):\n        return\n    not_before_time = jwt_claims[u'nbf']\n    if (not isinstance(not_before_time, INT_TYPES)):\n        raise suppliers.UnauthenticatedException(u'Malformed claim: \"nbf\" must be an integer')\n    if (current_time < not_before_time):\n        raise suppliers.UnauthenticatedException(u'Current time is less than the \"nbf\" time')", "docstring": "Checks whether the JWT claims should be accepted.\n\nSpecifically, this method checks the \"exp\" claim and the \"nbf\" claim (if\npresent), and raises UnauthenticatedException if 1) the current time is\nbefore the time identified by the \"nbf\" claim, or 2) the current time is\nequal to or after the time identified by the \"exp\" claim.\n\nArgs:\njwt_claims: the JWT claims whose expiratio to be checked.\n\nRaises:\nUnauthenticatedException: When the \"exp\" claim is malformed or the JWT has\nalready expired.", "source": "codesearchnet"}
{"code": "def FindExtensionByName(self, full_name):\n    \n    full_name = _NormalizeFullyQualifiedName(full_name)\n    message_name, _, extension_name = full_name.rpartition('.')\n    try:\n      \n      scope = self.FindMessageTypeByName(message_name)\n    except KeyError:\n      \n      scope = self.FindFileContainingSymbol(full_name)\n    return scope.extensions_by_name[extension_name]", "docstring": "Loads the named extension descriptor from the pool.\n\nArgs:\nfull_name: The full name of the extension descriptor to load.\n\nReturns:\nA FieldDescriptor, describing the named extension.", "source": "juraj-google-style"}
{"code": "def fetch_lid(self, woeid):\n    rss = self._fetch_xml(LID_LOOKUP_URL.format(woeid, 'f'))\n    try:\n        link = rss.find('channel/link').text\n    except AttributeError:\n        return None\n    lid = re.search('[A-Za-z]{4}[0-9]{4}', link).group()\n    return lid", "docstring": "Fetch a location's corresponding LID.\n\nArgs:\nwoeid: (string) the location's WOEID.\n\nReturns:\na string containing the requested LID or None if the LID could\nnot be found.\n\nRaises:\nurllib.error.URLError: urllib.request could not open the URL\n(Python 3).\nurllib2.URLError: urllib2 could not open the URL (Python 2).\nxml.etree.ElementTree.ParseError: xml.etree.ElementTree failed to\nparse the XML document.", "source": "codesearchnet"}
{"code": "def detect_gpt(self, filename, offset, fs_guid):\n        \n        self.logger.debug('Detecting GPT partition type')\n\n        if fs_guid not in self.__gpt_plugins:\n            return None\n        else:\n            plugins = self.__gpt_plugins.get(fs_guid)\n            for plugin in plugins:\n                if plugin.detect(filename, offset):\n                    return plugin.get_volume_object()\n\n        return None", "docstring": "Used by rawdisk.session.Session to match gpt partitions agains\nfilesystem plugins.\n\nArgs:\nfilename: device or file that it will read in order to detect the\nfilesystem\nfs_id: filesystem guid to match\n(ex. {EBD0A0A2-B9E5-4433-87C0-68B6B72699C7})\noffset: offset for the filesystem that is being matched\n\nReturns:\nVolume object supplied by matched plugin.\nIf there is no match, None is returned", "source": "juraj-google-style"}
{"code": "def print_version():\n    \n    v = get_version()\n\n    try:\n        s = _STR_WIN[v]\n    except KeyError:\n        s = \"Unknow OS\"\n\n    print(\"-----------------------------------------------------------\")\n    print(\"\n    print(\"Python Version                           : {}.{}.{}\".format(*sys.version_info[:3]))\n    print(\"Windows Version String                   : {}\".format(s))\n    print(\"Windows Major Version                    : {}\".format(v[0]))\n    print(\"Windows Minor Version                    : {}\".format(v[1]))\n    print(\"Windows Service Pack (or Build) Version  : {}\".format(v[2]))\n    print(\"Is Windows Server                        : {}\".format('Yes' if v[3]==1 else 'No'))\n    print(\"Is Windows 10 (or Windows Server 2016)   : {}\".format('Yes' if v >= WIN_10 else 'No'))\n    print(\"-----------------------------------------------------------\")", "docstring": "Print get_version() return value in a readable format.\n\nParams:\nNone\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _cache_at_least(self, size):\n    try:\n        while (len(self._result_cache) < size):\n            self._result_cache.append(next(self._result_iter))\n        return True\n    except StopIteration:\n        return False", "docstring": "Attempts to fill the result cache with at least the given number of results.\n\nReturns:\nbool: Whether the cache contains at least the given size.", "source": "codesearchnet"}
{"code": "def ParseRow(self, parser_mediator, row_offset, row):\n    \n    timestamp = self._ParseTimestamp(parser_mediator, row)\n    if timestamp is None:\n      return\n\n    try:\n      action = int(row['action'], 10)\n    except (ValueError, TypeError):\n      action = None\n\n    try:\n      scan_type = int(row['scan_type'], 10)\n    except (ValueError, TypeError):\n      scan_type = None\n\n    event_data = TrendMicroAVEventData()\n    event_data.action = action\n    event_data.filename = row['filename']\n    event_data.offset = row_offset\n    event_data.path = row['path']\n    event_data.scan_type = scan_type\n    event_data.threat = row['threat']\n\n    event = time_events.DateTimeValuesEvent(\n        timestamp, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a line of the log file and produces events.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrow_offset (int): line number of the row.\nrow (dict[str, str]): fields of a single row, as specified in COLUMNS.", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor):\n    gate_score = self.act(self.w_0(hidden_states))\n    hidden_states = self.w_1(hidden_states)\n    hidden_states = gate_score * hidden_states\n    return hidden_states", "docstring": "Transform an input tensor from one feature space to another via a nonlinear operation\n\nArgs:\nhidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`)", "source": "github-repos"}
{"code": "def image_channel_compress_top(body_output, targets, model_hparams, vocab_size):\n    del targets\n    with tf.variable_scope('image_channel_compress_modality'):\n        hidden_size = model_hparams.hidden_size\n        img_len = model_hparams.img_len\n        channels = 3\n        batch = common_layers.shape_list(body_output)[0]\n        x = tf.layers.conv2d(body_output, (hidden_size * channels), kernel_size=(1, 1), strides=(1, 1), padding='VALID', activation=tf.nn.relu, name='decompress_conv')\n        x = tf.reshape(x, [batch, img_len, (img_len * channels), hidden_size])\n        x = common_layers.layer_preprocess(x, model_hparams)\n        x = tf.layers.dense(x, vocab_size, use_bias=True, activation=None, name='output_conv')\n        x = tf.reshape(x, [batch, img_len, img_len, channels, vocab_size])\n        return x", "docstring": "Transforms body output to return logits.\n\nArgs:\nbody_output: Tensor of shape [batch, img_len, img_len, depth].\ntargets:\nmodel_hparams: HParams, model hyperparmeters.\nvocab_size: int, vocabulary size.\n\nReturns:\nTensor of shape [batch, img_len, img_len, channels, vocab_size].", "source": "codesearchnet"}
{"code": "def read_config_string_options(obj: Any,\n                               parser: ConfigParser,\n                               section: str,\n                               options: Iterable[str],\n                               default: str = None) -> None:\n    \n    \n    \n    for o in options:\n        setattr(obj, o, get_config_string_option(parser, section, o,\n                                                 default=default))", "docstring": "Reads config options and writes them as attributes of ``obj``, with\nattribute names as per ``options``.\n\nArgs:\nobj: the object to modify\nparser: instance of :class:`ConfigParser`\nsection: section name within config file\noptions: option (variable) names within that section\ndefault: value to use for any missing options\n\nReturns:", "source": "juraj-google-style"}
{"code": "def replace(s, pattern, replacement):\n    \n    \n    \n    \n    \n    \n    \n    \n    def _replacement(matchobj):\n        return replacement\n    return re.sub(pattern, _replacement, s)", "docstring": "Replaces occurrences of a match string in a given\nstring and returns the new string. The match string\ncan be a regex expression.\n\nArgs:\ns (str):           the string to modify\npattern (str):     the search expression\nreplacement (str): the string to replace each match with", "source": "juraj-google-style"}
{"code": "def prune(self, limit=None, n=None, percentile=None, keep_ends=False):\n        \n        strip = self.copy()\n\n        if not (limit or n or percentile):\n            m = \"You must provide a limit or n or percentile for pruning.\"\n            raise StriplogError(m)\n        if limit:\n            prune = [i for i, iv in enumerate(strip) if iv.thickness < limit]\n        if n:\n            prune = strip.thinnest(n=n, index=True)\n        if percentile:\n            n = np.floor(len(strip)*percentile/100)\n            prune = strip.thinnest(n=n, index=True)\n\n        if keep_ends:\n            first, last = 0, len(strip) - 1\n            if first in prune:\n                prune.remove(first)\n            if last in prune:\n                prune.remove(last)\n\n        del strip[prune]\n\n        return strip", "docstring": "Remove intervals below a certain limit thickness. In place.\n\nArgs:\nlimit (float): Anything thinner than this will be pruned.\nn (int): The n thinnest beds will be pruned.\npercentile (float): The thinnest specified percentile will be\npruned.\nkeep_ends (bool): Whether to keep the first and last, regardless\nof whether they meet the pruning criteria.", "source": "juraj-google-style"}
{"code": "def to_frame(self, **kwargs):\n        r\n        df = export.write_dataframe(self._values, **kwargs)\n        df.name = self.title\n        return df", "docstring": "r\"\"\"Return a pandas DataFrame loaded from the worksheet data.\n\nArgs:\n\\**kwargs: passed to ``pandas.read_csv()`` (e.g. ``header``, ``index_col``)\nReturns:\npandas.DataFrame: new ``DataFrame`` instance", "source": "juraj-google-style"}
{"code": "def _PendingCount(to_ops: list[ops.Operation], from_ops: list[ops.Operation], colocate_gradients_with_ops, func_graphs, xs_set):\n    reached_ops = set()\n    _MarkReachedOps(from_ops, reached_ops, func_graphs)\n    reachable_to_ops = set((op for op in to_ops if op in reached_ops))\n    between_ops = set()\n    between_op_list = []\n    queue = collections.deque()\n    queue.extend(to_ops)\n    while queue:\n        op = queue.popleft()\n        if op in reached_ops:\n            between_ops.add(op)\n            between_op_list.append(op)\n            reached_ops.remove(op)\n            for inp in _NonEagerInputs(op, xs_set):\n                queue.append(inp.op)\n    loop_state = control_flow_state.MaybeCreateControlFlowState(between_op_list, between_ops, colocate_gradients_with_ops)\n    pending_count = collections.defaultdict(int)\n    for op in between_op_list:\n        for x in _NonEagerInputs(op, xs_set):\n            if x.op in between_ops:\n                pending_count[x.op] += 1\n    return (reachable_to_ops, pending_count, loop_state)", "docstring": "Initialize the pending count for ops between two lists of Operations.\n\n'pending_count[op]' indicates the number of backprop inputs\nto this operation.\n\nArgs:\nto_ops: list of Operations.\nfrom_ops: list of Operations.\ncolocate_gradients_with_ops: Python bool.  See docstring of gradients().\nfunc_graphs: list of FuncGraphs. This method will traverse through\nthese functions if they capture from_ops or any reachable ops. This is\nuseful if to_ops occur in a function and from_ops are in an outer function\nor graph.\nxs_set: ObjectIdentitySet of Tensors.\n\nReturns:\nA tuple containing: (1) the subset of to_ops reachable from from_ops by a\npath of zero or more backpropagatable tensors, (2) a mapping from operation\nto the number of backprop inputs to that op, and (3) a ControlFlowState\nobject which is not None if the ops between from_ops and to_ops contain\ncontrol flow loops.", "source": "github-repos"}
{"code": "def PushBack(self, string='', **unused_kwargs):\n    \n    self.buffer = string + self.buffer\n    self.processed_buffer = self.processed_buffer[:-len(string)]", "docstring": "Push the match back on the stream.\n\nArgs:\nstring: optional data.", "source": "juraj-google-style"}
{"code": "def get_ip_prefixes_from_bird(filename):\n    \n    prefixes = []\n    with open(filename, 'r') as bird_conf:\n        lines = bird_conf.read()\n\n    for line in lines.splitlines():\n        line = line.strip(', ')\n        if valid_ip_prefix(line):\n            prefixes.append(line)\n\n    return prefixes", "docstring": "Build a list of IP prefixes found in Bird configuration.\n\nArguments:\nfilename (str): The absolute path of the Bird configuration file.\n\nNotes:\nIt can only parse a file with the following format\n\ndefine ACAST_PS_ADVERTISE =\n[\n10.189.200.155/32,\n10.189.200.255/32\n];\n\nReturns:\nA list of IP prefixes.", "source": "juraj-google-style"}
{"code": "def transpose(self, name=None):\n    if (name is None):\n        name = (self.module_name + '_transpose')\n    if (self._data_format == DATA_FORMAT_NWC):\n        stride = self._stride[1:(- 1)]\n    else:\n        stride = self._stride[2:]\n    return Conv1D(output_channels=(lambda : self.input_channels), kernel_shape=self.kernel_shape, stride=stride, padding=self.padding, use_bias=self._use_bias, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, data_format=self._data_format, custom_getter=self._custom_getter, name=name)", "docstring": "Returns matching `Conv1D` module.\n\nArgs:\nname: Optional string assigning name of transpose module. The default name\nis constructed by appending \"_transpose\" to `self.name`.\n\nReturns:\n`Conv1D` module.", "source": "codesearchnet"}
{"code": "def _GetSignedBinaryMetadata(binary_type, relative_path):\n  \n  root_urn = _GetSignedBlobsRoots()[binary_type]\n  binary_urn = root_urn.Add(relative_path)\n  blob_iterator, timestamp = signed_binary_utils.FetchBlobsForSignedBinary(\n      binary_urn)\n  binary_size = 0\n  has_valid_signature = True\n  for blob in blob_iterator:\n    binary_size += len(blob.data)\n    if not has_valid_signature:\n      \n      \n      continue\n    try:\n      blob.Verify(config.CONFIG[\"Client.executable_signing_public_key\"])\n    except rdf_crypto.Error:\n      has_valid_signature = False\n\n  return ApiGrrBinary(\n      path=relative_path,\n      type=binary_type,\n      size=binary_size,\n      timestamp=timestamp,\n      has_valid_signature=has_valid_signature)", "docstring": "Fetches metadata for the given binary from the datastore.\n\nArgs:\nbinary_type: ApiGrrBinary.Type of the binary.\nrelative_path: Relative path of the binary, relative to the canonical URN\nroots for signed binaries (see _GetSignedBlobsRoots()).\n\nReturns:\nAn ApiGrrBinary RDFProtoStruct containing metadata for the binary.", "source": "juraj-google-style"}
{"code": "def _call(sig, *inputs, **kwargs):\n    if len(inputs) != len(sig.input_arg):\n        raise ValueError(f'Expected {len(sig.input_arg):d} arguments, got {len(inputs):d}.')\n    name = kwargs.pop('name', None)\n    g = ops.get_default_graph()\n    func_name = sig.name\n    if name is None:\n        name = func_name\n    attrs = _parse_kwargs_as_attrs(func_name, **kwargs)\n    output_types = [dtypes.DType(x.type) for x in sig.output_arg]\n    op = g._create_op_internal(func_name, list(inputs), output_types, name=name, attrs=attrs, op_def=sig)\n    if op.outputs:\n        if len(op.outputs) == 1:\n            ret = op.outputs[0]\n        else:\n            ret = tuple(op.outputs)\n    else:\n        ret = op\n    return (ret, op)", "docstring": "Adds a node calling a function.\n\nThis adds a `call` op to the default graph that calls the function\nof signature `sig`, passing the tensors in `inputs` as arguments.\nIt returns the outputs of the call, which are one or more tensors.\n\n`sig` is OpDefArg.a `_DefinedFunction` object.\n\nYou can pass an optional keyword parameter `name=string` to name the\nadded operation.\n\nYou can pass an optional keyword parameter `noinline=True|False` to\ninstruct the runtime not to inline the function body into the call\nsite.\n\nArgs:\nsig: OpDefArg. The signature of the function.\n*inputs: arguments to the function.\n**kwargs: Optional keyword arguments.  Can only contain 'name' or\n'noinline'.\n\nReturns:\nA 2-element tuple. First element: a Tensor if the function returns a single\nvalue; a list of Tensors if the function returns multiple value; the\nOperation if the function returns no values. Second element: the Operation.\n\nRaises:\nValueError: if the arguments are invalid.", "source": "github-repos"}
{"code": "def valid(self, name):\n    name = re.sub('[^0-9a-zA-Z_]', '', name)\n    if re.match('[0-9]', name):\n        name = ('_' + name)\n    return name", "docstring": "Ensure a variable name is valid.\n\nNote: Assumes variable names are ASCII, which isn't necessarily true in\nPython 3.\n\nArgs:\nname: A proposed variable name.\n\nReturns:\nA valid version of the name.", "source": "codesearchnet"}
{"code": "def quality_score(self, tests, alias=None):\n        \n        results = self.quality(tests, alias=alias).values()\n        if results:\n            return sum(results) / len(results)\n        return -1", "docstring": "Run a series of tests and return the normalized score.\n1.0:   Passed all tests.\n(0-1): Passed a fraction of tests.\n0.0:   Passed no tests.\n-1.0:  Took no tests.\n\nArgs:\ntests (list): a list of functions.\nalias (dict): a dictionary mapping mnemonics to lists of mnemonics.\n\nReturns:\nfloat. The fraction of tests passed, or -1 for 'took no tests'.", "source": "juraj-google-style"}
{"code": "def _get_target(self, target):\n    depth = (target.count('.') + 1)\n    parts = target.split('.', 1)\n    for m in self.modules:\n        if (parts[0] == m.name):\n            if (depth < 3):\n                return m\n    for p in self.packages:\n        if (parts[0] == p.name):\n            if (depth == 1):\n                return p\n            target = p._get_target(parts[1])\n            if target:\n                return target\n            if (depth < 3):\n                return p\n    return None", "docstring": "Get the Package or Module related to given target.\n\nArgs:\ntarget (str): target to find.\n\nReturns:\nPackage/Module: package containing target or corresponding module.", "source": "codesearchnet"}
{"code": "def pathcase(string):\n    string = snakecase(string)\n    if (not string):\n        return string\n    return re.sub('_', '/', string)", "docstring": "Convert string into path case.\nJoin punctuation with slash.\n\nArgs:\nstring: String to convert.\n\nReturns:\nstring: Path cased string.", "source": "codesearchnet"}
{"code": "def LogUpdate(self, data):\n    for hypo in self.Values():\n        like = self.LogLikelihood(data, hypo)\n        self.Incr(hypo, like)", "docstring": "Updates a suite of hypotheses based on new data.\n\nModifies the suite directly; if you want to keep the original, make\na copy.\n\nNote: unlike Update, LogUpdate does not normalize.\n\nArgs:\ndata: any representation of the data", "source": "codesearchnet"}
{"code": "def ideal_atom_mask(prot: Protein) -> np.ndarray:\n    return residue_constants.STANDARD_ATOM_MASK[prot.aatype]", "docstring": "Computes an ideal atom mask.\n\n`Protein.atom_mask` typically is defined according to the atoms that are reported in the PDB. This function\ncomputes a mask according to heavy atoms that should be present in the given sequence of amino acids.\n\nArgs:\nprot: `Protein` whose fields are `numpy.ndarray` objects.\n\nReturns:\nAn ideal atom mask.", "source": "github-repos"}
{"code": "def rapidfire(self, check_status=True, max_nlaunch=-1, max_loops=1, sleep_time=5, **kwargs):\n        \n        self.check_pid_file()\n        self.set_spectator_mode(False)\n        if check_status: self.check_status()\n        from .launcher import PyLauncher\n        return PyLauncher(self, **kwargs).rapidfire(max_nlaunch=max_nlaunch, max_loops=max_loops, sleep_time=sleep_time)", "docstring": "Use :class:`PyLauncher` to submits tasks in rapidfire mode.\nkwargs contains the options passed to the launcher.\n\nArgs:\ncheck_status:\nmax_nlaunch: Maximum number of launches. default: no limit.\nmax_loops: Maximum number of loops\nsleep_time: seconds to sleep between rapidfire loop iterations\n\nReturn:\nNumber of tasks submitted.", "source": "juraj-google-style"}
{"code": "def unpack_guid(self, offset):\n        \n        o = self._offset + offset\n\n        try:\n            _bin = bytes(self._buf[o:o + 16])\n        except IndexError:\n            raise OverrunBufferException(o, len(self._buf))\n\n        \n        h = [six.indexbytes(_bin, i) for i in range(len(_bin))]\n        return .format(\n            h[3], h[2], h[1], h[0],\n            h[5], h[4],\n            h[7], h[6],\n            h[8], h[9],\n            h[10], h[11], h[12], h[13], h[14], h[15])", "docstring": "Returns a string containing a GUID starting at the relative offset.\nArguments:\n- `offset`: The relative offset from the start of the block.\nThrows:\n- `OverrunBufferException`", "source": "juraj-google-style"}
{"code": "def start(self, timeout=None):\n    assert (self.state == STOPPED), 'Process already started'\n    self.state = STARTING\n    should_publish = self._start_controllers(self._controllers.values(), timeout)\n    if should_publish:\n        self._publish_controllers(timeout)\n    self.state = STARTED", "docstring": "Start the process going\n\nArgs:\ntimeout (float): Maximum amount of time to wait for each spawned\nprocess. None means forever", "source": "codesearchnet"}
{"code": "def __init__(self, definition):\n    \n    self._definition = definition\n\n    \n    \n    self.max_value_len = 256\n\n    \n    self.max_depth = 2\n\n    \n    self.max_list_items = 10\n\n    \n    self.max_sublist_items = 5\n\n    \n    self.quota_recovery_ms = 500\n\n    \n    self._quota_recovery_start_time = None\n\n    \n    level = self._definition.get('logLevel')\n    if not level or level == 'INFO':\n      self._log_message = log_info_message\n    elif level == 'WARNING':\n      self._log_message = log_warning_message\n    elif level == 'ERROR':\n      self._log_message = log_error_message\n    else:\n      self._log_message = None", "docstring": "Class constructor.\n\nArgs:\ndefinition: breakpoint definition indicating log level, message, etc.", "source": "juraj-google-style"}
{"code": "def single_lf_summary(Y_p, Y=None):\n    L = sparse.csr_matrix(arraylike_to_numpy(Y_p).reshape((- 1), 1))\n    return lf_summary(L, Y)", "docstring": "Calculates coverage, overlap, conflicts, and accuracy for a single LF\n\nArgs:\nY_p: a np.array or torch.Tensor of predicted labels\nY: a np.array or torch.Tensor of true labels (if known)", "source": "codesearchnet"}
{"code": "def __init__(self, filename):\n    \n    super(FileNameFileEntryFilter, self).__init__()\n    self._filename = filename.lower()", "docstring": "Initializes a file entry filter.\n\nArgs:\nfilename (str): name of the file.", "source": "juraj-google-style"}
{"code": "def do_ams_put(endpoint, path, body, access_token, rformat=\"json\", ds_min_version=\"3.0;NetFx\"):\n    \n    min_ds = dsversion_min\n    content_acceptformat = json_acceptformat\n    if rformat == \"json_only\":\n        min_ds = ds_min_version\n        content_acceptformat = json_only_acceptformat\n        headers = {\"Content-Type\": content_acceptformat,\n                   \"DataServiceVersion\": min_ds,\n                   \"MaxDataServiceVersion\": dsversion_max,\n                   \"Accept\": json_acceptformat,\n                   \"Accept-Charset\" : charset,\n                   \"Authorization\": \"Bearer \" + access_token,\n                   \"x-ms-version\" : xmsversion}\n    response = requests.put(endpoint, data=body, headers=headers, allow_redirects=False)\n    \n    \n    if response.status_code == 301:\n        redirected_url = ''.join([response.headers['location'], path])\n        response = requests.put(redirected_url, data=body, headers=headers)\n        return response", "docstring": "Do a AMS HTTP PUT request and return JSON.\nArgs:\nendpoint (str): Azure Media Services Initial Endpoint.\npath (str): Azure Media Services Endpoint Path.\nbody  (str): Azure Media Services Content Body.\naccess_token (str): A valid Azure authentication token.\nrformat (str): A required JSON Accept Format.\nds_min_version (str): A required DS MIN Version.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def convert_collections_to_typing(typ):\n    if hasattr(typ, '__iter__'):\n        if hasattr(typ, '__next__'):\n            typ = typing.Iterator[typ.__args__]\n        elif hasattr(typ, 'send') and hasattr(typ, 'throw'):\n            typ = typing.Generator[typ.__args__]\n        elif _match_is_exactly_iterable(typ):\n            typ = typing.Iterable[typ.__args__]\n    return typ", "docstring": "Converts a given collections.abc type to a typing object.\n\nArgs:\ntyp: an object inheriting from a collections.abc object\n\nReturns:\ntype: The corresponding typing object.", "source": "github-repos"}
{"code": "def load(source, triples=False, cls=PENMANCodec, **kwargs):\n    \n    decode = cls(**kwargs).iterdecode\n    if hasattr(source, 'read'):\n        return list(decode(source.read()))\n    else:\n        with open(source) as fh:\n            return list(decode(fh.read()))", "docstring": "Deserialize a list of PENMAN-encoded graphs from *source*.\n\nArgs:\nsource: a filename or file-like object to read from\ntriples: if True, read graphs as triples instead of as PENMAN\ncls: serialization codec class\nkwargs: keyword arguments passed to the constructor of *cls*\nReturns:\na list of Graph objects", "source": "juraj-google-style"}
{"code": "def _canonicalize_jit_arguments(inp):\n    return nest.map_structure(_canonicalize_jit_arg, inp)", "docstring": "Canonicalize arguments to be used for jit.\n\nArgs:\ninp: a nested structure of arguments to be canonicalized (i.e. to be\nconverted to Tensors). Only tf_np.ndarray and things accepted by\n`tf.convert_to_tensor` will be converted.\n\nReturns:\nThe canonicalized version.", "source": "github-repos"}
{"code": "def get_help(self, prefix='', include_special_flags=True):\n    flags_by_module = self.flags_by_module_dict()\n    if flags_by_module:\n        modules = sorted(flags_by_module)\n        main_module = sys.argv[0]\n        if (main_module in modules):\n            modules.remove(main_module)\n            modules = ([main_module] + modules)\n        return self._get_help_for_modules(modules, prefix, include_special_flags)\n    else:\n        output_lines = []\n        values = six.itervalues(self._flags())\n        if include_special_flags:\n            values = itertools.chain(values, six.itervalues(_helpers.SPECIAL_FLAGS._flags()))\n        self._render_flag_list(values, output_lines, prefix)\n        return '\\n'.join(output_lines)", "docstring": "Returns a help string for all known flags.\n\nArgs:\nprefix: str, per-line output prefix.\ninclude_special_flags: bool, whether to include description of\nSPECIAL_FLAGS, i.e. --flagfile and --undefok.\n\nReturns:\nstr, formatted help message.", "source": "codesearchnet"}
{"code": "def __init__(self, device):\n    super(OneDeviceStrategy, self).__init__(OneDeviceExtended(self, device))\n    distribute_lib.distribution_strategy_gauge.get_cell('V2').set('OneDeviceStrategy')", "docstring": "Creates a `OneDeviceStrategy`.\n\nArgs:\ndevice: Device string identifier for the device on which the variables\nshould be placed. See class docs for more details on how the device is\nused. Examples: \"/cpu:0\", \"/gpu:0\", \"/device:CPU:0\", \"/device:GPU:0\"", "source": "github-repos"}
{"code": "def slice_naive(self, key):\n        \n        cls = self.__class__\n        key = check_key(self, key)\n        return cls(self.loc[key])", "docstring": "Slice a data object based on its index, either by value (.loc) or\nposition (.iloc).\n\nArgs:\nkey: Single index value, slice, tuple, or list of indices/positionals\n\nReturns:\ndata: Slice of self", "source": "juraj-google-style"}
{"code": "def filepattern(self, data_dir, mode, shard=None):\n    \n    path = os.path.join(data_dir, self.dataset_filename())\n    shard_str = \"-%05d\" % shard if shard is not None else \"\"\n    if mode == DatasetSplit.TRAIN:\n      suffix = \"train\"\n    elif mode in [DatasetSplit.EVAL, tf.estimator.ModeKeys.PREDICT]:\n      suffix = \"dev\"\n    else:\n      assert mode == DatasetSplit.TEST\n      suffix = \"test\"\n\n    return \"%s-%s%s*\" % (path, suffix, shard_str)", "docstring": "Get filepattern for data files for mode.\n\nMatches mode to a suffix.\n* DatasetSplit.TRAIN: train\n* DatasetSplit.EVAL: dev\n* DatasetSplit.TEST: test\n* tf.estimator.ModeKeys.PREDICT: dev\n\nArgs:\ndata_dir: str, data directory.\nmode: DatasetSplit\nshard: int, if provided, will only read data from the specified shard.\n\nReturns:\nfilepattern str", "source": "juraj-google-style"}
{"code": "def _info_to_string(info):\n    for key in _TENSORBOARD_INFO_FIELDS:\n        field_type = _TENSORBOARD_INFO_FIELDS[key]\n        if (not isinstance(getattr(info, key), field_type.runtime_type)):\n            raise ValueError(('expected %r of type %s, but found: %r' % (key, field_type.runtime_type, getattr(info, key))))\n    if (info.version != version.VERSION):\n        raise ValueError((\"expected 'version' to be %r, but found: %r\" % (version.VERSION, info.version)))\n    json_value = {k: _TENSORBOARD_INFO_FIELDS[k].serialize(getattr(info, k)) for k in _TENSORBOARD_INFO_FIELDS}\n    return json.dumps(json_value, sort_keys=True, indent=4)", "docstring": "Convert a `TensorBoardInfo` to string form to be stored on disk.\n\nThe format returned by this function is opaque and should only be\ninterpreted by `_info_from_string`.\n\nArgs:\ninfo: A valid `TensorBoardInfo` object.\n\nRaises:\nValueError: If any field on `info` is not of the correct type.\n\nReturns:\nA string representation of the provided `TensorBoardInfo`.", "source": "codesearchnet"}
{"code": "def __init__(self, image, segments):\n        \n        self.image = image\n        self.segments = segments\n        self.intercept = {}\n        self.local_exp = {}\n        self.local_pred = None", "docstring": "Init function.\n\nArgs:\nimage: 3d numpy array\nsegments: 2d numpy array, with the output from skimage.segmentation", "source": "juraj-google-style"}
{"code": "def account(transition, direction=Direction.BIDIRECTIONAL):\n    \n    if direction != Direction.BIDIRECTIONAL:\n        return directed_account(transition, direction)\n\n    return Account(directed_account(transition, Direction.CAUSE) +\n                   directed_account(transition, Direction.EFFECT))", "docstring": "Return the set of all causal links for a |Transition|.\n\nArgs:\ntransition (Transition): The transition of interest.\n\nKeyword Args:\ndirection (Direction): By default the account contains actual causes\nand actual effects.", "source": "juraj-google-style"}
{"code": "def clone(self, spec=None, **overrides):\n    settings = dict(self.get_param_values(), **overrides)\n    if (spec is None):\n        spec = (self.name, overrides.get('label', self.label))\n    if (('label' in overrides) and isinstance(spec, basestring)):\n        spec = (spec, overrides['label'])\n    elif (('label' in overrides) and isinstance(spec, tuple)):\n        if (overrides['label'] != spec[1]):\n            self.param.warning('Using label as supplied by keyword ({!r}), ignoring tuple value {!r}'.format(overrides['label'], spec[1]))\n        spec = (spec[0], overrides['label'])\n    return self.__class__(spec, **{k: v for (k, v) in settings.items() if (k not in ['name', 'label'])})", "docstring": "Clones the Dimension with new parameters\n\nDerive a new Dimension that inherits existing parameters\nexcept for the supplied, explicit overrides\n\nArgs:\nspec (tuple, optional): Dimension tuple specification\n**overrides: Dimension parameter overrides\n\nReturns:\nCloned Dimension object", "source": "codesearchnet"}
{"code": "async def send_with_attachments(subject, message, filepaths, config):\n    \n    email_ = MIMEMultipart()\n    email_.attach(MIMEText(message))\n    email_[\"Subject\"] = subject\n    email_[\"From\"] = get_attribute_from_config(config, EMAIL_SECTION_KEY, USER_KEY)\n    email_[\"To\"] = get_attribute_from_config(config, EMAIL_SECTION_KEY, RECEIVER_KEY)\n    _attach_files(filepaths, email_)\n    await _send_email(email_, config)", "docstring": "Send an email from the user (a gmail) to the receiver.\n\nArgs:\nsubject (str): Subject of the email.\nmessage (str): A message.\nfilepaths (list(str)): Filepaths to files to be attached.\nconfig (defaultdict): A defaultdict.", "source": "juraj-google-style"}
{"code": "async def _pb_request(self, endpoint, request_pb, response_pb):\n    logger.debug('Sending Protocol Buffer request %s:\\n%s', endpoint, request_pb)\n    res = (await self._base_request('https:\n    try:\n        response_pb.ParseFromString(base64.b64decode(res.body))\n    except binascii.Error as e:\n        raise exceptions.NetworkError('Failed to decode base64 response: {}'.format(e))\n    except google.protobuf.message.DecodeError as e:\n        raise exceptions.NetworkError('Failed to decode Protocol Buffer response: {}'.format(e))\n    logger.debug('Received Protocol Buffer response:\\n%s', response_pb)\n    status = response_pb.response_header.status\n    if (status != hangouts_pb2.RESPONSE_STATUS_OK):\n        description = response_pb.response_header.error_description\n        raise exceptions.NetworkError(\"Request failed with status {}: '{}'\".format(status, description))", "docstring": "Send a Protocol Buffer formatted chat API request.\n\nArgs:\nendpoint (str): The chat API endpoint to use.\nrequest_pb: The request body as a Protocol Buffer message.\nresponse_pb: The response body as a Protocol Buffer message.\n\nRaises:\nNetworkError: If the request fails.", "source": "codesearchnet"}
{"code": "def get_new_profile_template(self):\n    uri = '{}/new-profile-template'.format(self.data['uri'])\n    return self._helper.do_get(uri)", "docstring": "Retrieves the profile template for a given server profile.\n\nReturns:\ndict: Server profile template.", "source": "codesearchnet"}
{"code": "def dump(self, content, filepath, indent=4):\n        \n        with open(filepath, 'w') as fp:\n            json.dump(content, fp, indent=indent)", "docstring": "Dump settings content to filepath.\n\nArgs:\ncontent (str): Settings content.\nfilepath (str): Settings file location.", "source": "juraj-google-style"}
{"code": "def get_range(self, start=None, stop=None):\n    return self.from_iterable(self.ranges(start, stop))", "docstring": "Return a RangeMap for the range start to stop.\n\nReturns:\nA RangeMap", "source": "codesearchnet"}
{"code": "def get_module(module_abs_import):\n    logger.debug('starting')\n    logger.debug(f'loading module {module_abs_import}')\n    try:\n        imported_module = importlib.import_module(module_abs_import)\n        logger.debug('done')\n        return imported_module\n    except ModuleNotFoundError as err:\n        msg = f\"The module doesn't exist. Looking for a file like this: {module_abs_import}\"\n        extended_msg = f\n        logger.error(msg)\n        raise PyModuleNotFoundError(extended_msg) from err", "docstring": "Use importlib to get the module dynamically.\n\nGet instance of the module specified by the module_abs_import.\nThis means that module_abs_import must be resolvable from this package.\n\nArgs:\nmodule_abs_import: string. Absolute name of module to import.\n\nRaises:\nPyModuleNotFoundError: if module not found.", "source": "codesearchnet"}
{"code": "def migrate_indexes(aggregate_indexes=None, forensic_indexes=None):\n    version = 2\n    if (aggregate_indexes is None):\n        aggregate_indexes = []\n    if (forensic_indexes is None):\n        forensic_indexes = []\n    for aggregate_index_name in aggregate_indexes:\n        if (not Index(aggregate_index_name).exists()):\n            continue\n        aggregate_index = Index(aggregate_index_name)\n        doc = 'doc'\n        fo_field = 'published_policy.fo'\n        fo = 'fo'\n        fo_mapping = aggregate_index.get_field_mapping(fields=[fo_field])\n        fo_mapping = fo_mapping[list(fo_mapping.keys())[0]]['mappings']\n        if (doc not in fo_mapping):\n            continue\n        fo_mapping = fo_mapping[doc][fo_field]['mapping'][fo]\n        fo_type = fo_mapping['type']\n        if (fo_type == 'long'):\n            new_index_name = '{0}-v{1}'.format(aggregate_index_name, version)\n            body = {'properties': {'published_policy.fo': {'type': 'text', 'fields': {'keyword': {'type': 'keyword', 'ignore_above': 256}}}}}\n            Index(new_index_name).create()\n            Index(new_index_name).put_mapping(doc_type=doc, body=body)\n            reindex(connections.get_connection(), aggregate_index_name, new_index_name)\n            Index(aggregate_index_name).delete()\n    for forensic_index in forensic_indexes:\n        pass", "docstring": "Updates index mappings\n\nArgs:\naggregate_indexes (list): A list of aggregate index names\nforensic_indexes (list): A list of forensic index names", "source": "codesearchnet"}
{"code": "def CheckGlobalStatic(filename, clean_lines, linenum, error):\n    line = clean_lines.elided[linenum]\n    if (((linenum + 1) < clean_lines.NumLines()) and (not Search('[;({]', line))):\n        line += clean_lines.elided[(linenum + 1)].strip()\n    match = Match('((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\\\\b(.*)', line)\n    if (match and (not Search('\\\\bstring\\\\b(\\\\s+const)?\\\\s*\\\\*\\\\s*(const\\\\s+)?\\\\w', line)) and (not Search('\\\\boperator\\\\W', line)) and (not Match('\\\\s*(<.*>)?(::[a-zA-Z0-9_]+)*\\\\s*\\\\(([^\"]|$)', match.group(3)))):\n        error(filename, linenum, 'runtime/string', 4, ('For a static/global string constant, use a C style string instead: \"%schar %s[]\".' % (match.group(1), match.group(2))))\n    if Search('\\\\b([A-Za-z0-9_]*_)\\\\(\\\\1\\\\)', line):\n        error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.')", "docstring": "Check for unsafe global or static objects.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def _grad_variance(self):\n    grad_var_ops = []\n    tensor_to_avg = []\n    for (t, g) in zip(self._vars, self._grad):\n        if isinstance(g, tf.IndexedSlices):\n            tensor_to_avg.append(tf.reshape(tf.unsorted_segment_sum(g.values, g.indices, g.dense_shape[0]), shape=t.get_shape()))\n        else:\n            tensor_to_avg.append(g)\n    avg_op = self._moving_averager.apply(tensor_to_avg)\n    grad_var_ops.append(avg_op)\n    with tf.control_dependencies([avg_op]):\n        self._grad_avg = [self._moving_averager.average(val) for val in tensor_to_avg]\n        self._grad_avg_squared = [tf.square(val) for val in self._grad_avg]\n    self._grad_var = tf.maximum(tf.constant(1e-06, dtype=self._grad_norm_squared_avg.dtype), (self._grad_norm_squared_avg - tf.add_n([tf.reduce_sum(val) for val in self._grad_avg_squared])))\n    if self._sparsity_debias:\n        self._grad_var *= self._sparsity_avg\n    return grad_var_ops", "docstring": "Estimate of gradient Variance.\n\nReturns:\nC_t ops.", "source": "codesearchnet"}
{"code": "def ReadFileObject(self, definitions_registry, file_object):\n    \n    last_definition_object = None\n    error_location = None\n    error_message = None\n\n    try:\n      yaml_generator = yaml.safe_load_all(file_object)\n\n      for yaml_definition in yaml_generator:\n        definition_object = self._ReadDefinition(\n            definitions_registry, yaml_definition)\n        if not definition_object:\n          error_location = self._GetFormatErrorLocation(\n              yaml_definition, last_definition_object)\n          error_message = '{0:s} Missing definition object.'.format(\n              error_location)\n          raise errors.FormatError(error_message)\n\n        definitions_registry.RegisterDefinition(definition_object)\n        last_definition_object = definition_object\n\n    except errors.DefinitionReaderError as exception:\n      error_message = 'in: {0:s} {1:s}'.format(\n          exception.name or '<NAMELESS>', exception.message)\n      raise errors.FormatError(error_message)\n\n    except (yaml.reader.ReaderError, yaml.scanner.ScannerError) as exception:\n      error_location = self._GetFormatErrorLocation({}, last_definition_object)\n      error_message = '{0:s} {1!s}'.format(error_location, exception)\n      raise errors.FormatError(error_message)", "docstring": "Reads data type definitions from a file-like object into the registry.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\nfile_object (file): file-like object to read from.\n\nRaises:\nFormatError: if the definitions values are missing or if the format is\nincorrect.", "source": "juraj-google-style"}
{"code": "def _build_select_and_next_from_expressions(self, builders: Tuple[column_expression_builder.ColumnExpressionBuilder, ...], child_builders: MutableSequence[column_expression_builder.ColumnExpressionBuilder], columns_selected: MutableSequence[str]) -> Tuple[MutableSequence[str], MutableSequence[str]]:\n    select_expressions, next_from_expressions = ([], [])\n    for column_name in columns_selected:\n        select_expressions.append(f'(SELECT {column_name}) AS {column_name}')\n    for builder in builders:\n        child_builders.extend(builder.children)\n        column_alias = _get_column_alias(builder)\n        if builder.column_name:\n            columns_selected.append(column_alias)\n        needs_unnest = builder.needs_unnest or builder.children\n        select_expression = self._encode(builder=builder, select_scalars_as_array=needs_unnest)\n        if needs_unnest:\n            select_expression = f'{select_expression} AS {column_alias}_needs_unnest_'\n            next_from_expressions.append(self._build_next_from_expression(column_alias))\n        else:\n            select_expression = f'{select_expression} AS {column_alias}'\n        select_expressions.append(select_expression)\n    return (select_expressions, next_from_expressions)", "docstring": "Build select expressions and next from expressions from the builders.\n\nArgs:\nbuilders: the immutable current builders to compute select expressions.\nchild_builders: collects the current given builders' children for the next\nround.\ncolumns_selected: accumulatively collects columns which has already been\nhandled completely.\n\nReturns:\nThe select expressions and next from expressions computed form the given\nbuilders.", "source": "github-repos"}
{"code": "def node_attributes(self, node_name, device_name=None):\n    if not self._debug_graphs:\n        raise LookupError('No partition graphs have been loaded.')\n    device_name = self._infer_device_name(device_name, node_name)\n    return self._debug_graphs[device_name].node_attributes[node_name]", "docstring": "Get the attributes of a node.\n\nArgs:\nnode_name: Name of the node in question.\ndevice_name: (`str`) name of the device. If there is only one device or if\nnode_name exists on only one device, this argument is optional.\n\nReturns:\nAttributes of the node.\n\nRaises:\nLookupError: If no partition graphs have been loaded.", "source": "github-repos"}
{"code": "def cudnn_bi_gru(units, n_hidden, seq_lengths=None, n_layers=1, trainable_initial_states=False, name='cudnn_bi_gru', reuse=False):\n    with tf.variable_scope(name, reuse=reuse):\n        if (seq_lengths is None):\n            seq_lengths = (tf.ones([tf.shape(units)[0]], dtype=tf.int32) * tf.shape(units)[1])\n        with tf.variable_scope('Forward'):\n            (h_fw, h_last_fw) = cudnn_gru_wrapper(units, n_hidden, n_layers=n_layers, trainable_initial_states=trainable_initial_states, seq_lengths=seq_lengths, reuse=reuse)\n        with tf.variable_scope('Backward'):\n            reversed_units = tf.reverse_sequence(units, seq_lengths=seq_lengths, seq_dim=1, batch_dim=0)\n            (h_bw, h_last_bw) = cudnn_gru_wrapper(reversed_units, n_hidden, n_layers=n_layers, trainable_initial_states=trainable_initial_states, seq_lengths=seq_lengths, reuse=reuse)\n            h_bw = tf.reverse_sequence(h_bw, seq_lengths=seq_lengths, seq_dim=1, batch_dim=0)\n    return ((h_fw, h_bw), (h_last_fw, h_last_bw))", "docstring": "Fast CuDNN Bi-GRU implementation\n\nArgs:\nunits: tf.Tensor with dimensions [B x T x F], where\nB - batch size\nT - number of tokens\nF - features\nn_hidden: dimensionality of hidden state\nseq_lengths: number of tokens in each sample in the batch\nn_layers: number of layers\ntrainable_initial_states: whether to create a special trainable variable\nto initialize the hidden states of the network or use just zeros\nname: name of the variable scope to use\nreuse:whether to reuse already initialized variable\n\n\nReturns:\nh - all hidden states along T dimension,\ntf.Tensor with dimensionality [B x T x F]\nh_last - last hidden state, tf.Tensor with dimensionality [B x H * 2]\nwhere H - number of hidden units", "source": "codesearchnet"}
{"code": "def print_args(output=sys.stdout):\n    \n    def decorator(func):\n        \n        @wraps(func)\n        def _(*args, **kwargs):\n            \n            output.write(\n                \"Args: {0}, KwArgs: {1}\\n\".format(str(args), str(kwargs)))\n            return func(*args, **kwargs)\n        return _\n    return decorator", "docstring": "Decorate a function so that print arguments before calling it.\n\nArgs:\noutput: writable to print args. (Default: sys.stdout)", "source": "juraj-google-style"}
{"code": "def _AnalyzeEvents(self, storage_writer, analysis_plugins, event_filter=None):\n    \n    self._status = definitions.STATUS_INDICATOR_RUNNING\n    self._number_of_consumed_events = 0\n    self._number_of_consumed_reports = 0\n    self._number_of_consumed_sources = 0\n    self._number_of_consumed_warnings = 0\n    self._number_of_produced_events = 0\n    self._number_of_produced_reports = 0\n    self._number_of_produced_sources = 0\n    self._number_of_produced_warnings = 0\n\n    number_of_filtered_events = 0\n\n    logger.debug('Processing events.')\n\n    filter_limit = getattr(event_filter, 'limit', None)\n\n    for event in storage_writer.GetSortedEvents():\n      event_data_identifier = event.GetEventDataIdentifier()\n      if event_data_identifier:\n        event_data = storage_writer.GetEventDataByIdentifier(\n            event_data_identifier)\n        if event_data:\n          for attribute_name, attribute_value in event_data.GetAttributes():\n            setattr(event, attribute_name, attribute_value)\n\n      event_identifier = event.GetIdentifier()\n      event.tag = self._event_tag_index.GetEventTagByIdentifier(\n          storage_writer, event_identifier)\n\n      if event_filter:\n        filter_match = event_filter.Match(event)\n      else:\n        filter_match = None\n\n      \n      if filter_match == False:\n        number_of_filtered_events += 1\n        continue\n\n      for event_queue in self._event_queues.values():\n        \n        event_queue.PushItem(event)\n\n      self._number_of_consumed_events += 1\n\n      if (event_filter and filter_limit and\n          filter_limit == self._number_of_consumed_events):\n        break\n\n    logger.debug('Finished pushing events to analysis plugins.')\n    \n    for event_queue in self._event_queues.values():\n      event_queue.PushItem(plaso_queue.QueueAbort(), block=False)\n\n    logger.debug('Processing analysis plugin results.')\n\n    \n    plugin_names = [plugin_name for plugin_name in analysis_plugins.keys()]\n    while plugin_names:\n      for plugin_name in list(plugin_names):\n        if self._abort:\n          break\n\n        \n        task = tasks.Task()\n        task.identifier = plugin_name\n\n        merge_ready = storage_writer.CheckTaskReadyForMerge(task)\n        if merge_ready:\n          storage_writer.PrepareMergeTaskStorage(task)\n          self._status = definitions.STATUS_INDICATOR_MERGING\n\n          event_queue = self._event_queues[plugin_name]\n          del self._event_queues[plugin_name]\n\n          event_queue.Close()\n\n          storage_merge_reader = storage_writer.StartMergeTaskStorage(task)\n\n          storage_merge_reader.MergeAttributeContainers(\n              callback=self._MergeEventTag)\n          \n          plugin_names.remove(plugin_name)\n\n          self._status = definitions.STATUS_INDICATOR_RUNNING\n\n          self._number_of_produced_event_tags = (\n              storage_writer.number_of_event_tags)\n          self._number_of_produced_reports = (\n              storage_writer.number_of_analysis_reports)\n\n    try:\n      storage_writer.StopTaskStorage(abort=self._abort)\n    except (IOError, OSError) as exception:\n      logger.error('Unable to stop task storage with error: {0!s}'.format(\n          exception))\n\n    if self._abort:\n      logger.debug('Processing aborted.')\n    else:\n      logger.debug('Processing completed.')\n\n    events_counter = collections.Counter()\n    events_counter['Events filtered'] = number_of_filtered_events\n    events_counter['Events processed'] = self._number_of_consumed_events\n\n    return events_counter", "docstring": "Analyzes events in a plaso storage.\n\nArgs:\nstorage_writer (StorageWriter): storage writer.\nanalysis_plugins (dict[str, AnalysisPlugin]): analysis plugins that\nshould be run and their names.\nevent_filter (Optional[FilterObject]): event filter.\n\nReturns:\ncollections.Counter: counter containing information about the events\nprocessed and filtered.\n\nRaises:\nRuntimeError: if a non-recoverable situation is encountered.", "source": "juraj-google-style"}
{"code": "def submodules(self):\n    submodules = []\n    submodules.extend(self.modules)\n    for p in self.packages:\n        submodules.extend(p.submodules)\n    return submodules", "docstring": "Property to return all sub-modules of the node, recursively.\n\nReturns:\nlist of Module: the sub-modules.", "source": "codesearchnet"}
{"code": "def MakePmfFromList(t, name=''):\n    \n    hist = MakeHistFromList(t)\n    d = hist.GetDict()\n    pmf = Pmf(d, name)\n    pmf.Normalize()\n    return pmf", "docstring": "Makes a PMF from an unsorted sequence of values.\n\nArgs:\nt: sequence of numbers\nname: string name for this PMF\n\nReturns:\nPmf object", "source": "juraj-google-style"}
{"code": "def send_command(self, command, arg=None):\n    \n    if arg is not None:\n      command = '%s:%s' % (command, arg)\n    self._write(six.StringIO(command), len(command))", "docstring": "Sends a command to the device.\n\nArgs:\ncommand: The command to send.\narg: Optional argument to the command.", "source": "juraj-google-style"}
{"code": "def delete_items_by_index(list_, index_list, copy=False):\n    if copy:\n        list_ = list_[:]\n    index_list_ = [((len(list_) + x) if (x < 0) else x) for x in index_list]\n    index_list_ = sorted(index_list_, reverse=True)\n    for index in index_list_:\n        del list_[index]\n    return list_", "docstring": "Remove items from ``list_`` at positions specified in ``index_list``\nThe original ``list_`` is preserved if ``copy`` is True\n\nArgs:\nlist_ (list):\nindex_list (list):\ncopy (bool): preserves original list if True\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_list import *  # NOQA\n>>> list_ = [8, 1, 8, 1, 6, 6, 3, 4, 4, 5, 6]\n>>> index_list = [2, -1]\n>>> result = delete_items_by_index(list_, index_list)\n>>> print(result)\n[8, 1, 1, 6, 6, 3, 4, 4, 5]", "source": "codesearchnet"}
{"code": "def __optimize_deconvolution_layer(self, learning_rate, epoch):\n        \n        params_list = []\n        grads_list = []\n\n        for i in range(len(self.__deconvolution_layer_list)):\n            if self.__deconvolution_layer_list[i].delta_weight_arr.shape[0] > 0:\n                params_list.append(self.__deconvolution_layer_list[i].graph.weight_arr)\n                grads_list.append(self.__deconvolution_layer_list[i].delta_weight_arr)\n\n        for i in range(len(self.__deconvolution_layer_list)):\n            if self.__deconvolution_layer_list[i].delta_bias_arr.shape[0] > 0:\n                params_list.append(self.__deconvolution_layer_list[i].graph.bias_arr)\n                grads_list.append(self.__deconvolution_layer_list[i].delta_bias_arr)\n\n        params_list = self.__opt_params.optimize(\n            params_list,\n            grads_list,\n            learning_rate\n        )\n\n        i = 0\n        for i in range(len(self.__deconvolution_layer_list)):\n            if self.__deconvolution_layer_list[i].delta_weight_arr.shape[0] > 0:\n                self.__deconvolution_layer_list[i].graph.weight_arr = params_list.pop(0)\n                if ((epoch + 1) % self.__attenuate_epoch == 0):\n                    self.__deconvolution_layer_list[i].graph.weight_arr = self.__opt_params.constrain_weight(\n                        self.__deconvolution_layer_list[i].graph.weight_arr\n                    )\n\n        for i in range(len(self.__deconvolution_layer_list)):\n            if self.__deconvolution_layer_list[i].delta_bias_arr.shape[0] > 0:\n                self.__deconvolution_layer_list[i].graph.bias_arr = params_list.pop(0)\n\n        for i in range(len(self.__deconvolution_layer_list)):\n            if self.__deconvolution_layer_list[i].delta_weight_arr.shape[0] > 0:\n                if self.__deconvolution_layer_list[i].delta_bias_arr.shape[0] > 0:\n                    self.__deconvolution_layer_list[i].reset_delta()", "docstring": "Back propagation for Deconvolution layer.\n\nArgs:\nlearning_rate:  Learning rate.\nepoch:          Now epoch.", "source": "juraj-google-style"}
{"code": "def convert_attribute_name_to_tag(value):\n    if (not isinstance(value, six.string_types)):\n        raise ValueError('The attribute name must be a string.')\n    for entry in attribute_name_tag_table:\n        if (value == entry[0]):\n            return entry[1]\n    raise ValueError(\"Unrecognized attribute name: '{}'\".format(value))", "docstring": "A utility function that converts an attribute name string into the\ncorresponding attribute tag.\n\nFor example: 'State' -> enums.Tags.STATE\n\nArgs:\nvalue (string): The string name of the attribute.\n\nReturns:\nenum: The Tags enumeration value that corresponds to the attribute\nname string.\n\nRaises:\nValueError: if the attribute name string is not a string or if it is\nan unrecognized attribute name", "source": "codesearchnet"}
{"code": "def aggregate_groups(self, ct_agg, nr_groups, skip_key, carray_factor, groupby_cols, agg_ops, dtype_dict, bool_arr=None):\n    for col in groupby_cols:\n        result_array = ctable_ext.groupby_value(self[col], carray_factor, nr_groups, skip_key)\n        if (bool_arr is not None):\n            result_array = np.delete(result_array, skip_key)\n        ct_agg.addcol(result_array, name=col)\n        del result_array\n    for (input_col_name, output_col_name, agg_op) in agg_ops:\n        input_col = self[input_col_name]\n        output_col_dtype = dtype_dict[output_col_name]\n        input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)\n        output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)\n        if (agg_op == 'sum'):\n            ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups, skip_key, input_buffer, output_buffer)\n        elif (agg_op == 'mean'):\n            ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups, skip_key, input_buffer, output_buffer)\n        elif (agg_op == 'std'):\n            ctable_ext.aggregate_std(input_col, carray_factor, nr_groups, skip_key, input_buffer, output_buffer)\n        elif (agg_op == 'count'):\n            ctable_ext.aggregate_count(input_col, carray_factor, nr_groups, skip_key, input_buffer, output_buffer)\n        elif (agg_op == 'count_distinct'):\n            ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups, skip_key, input_buffer, output_buffer)\n        elif (agg_op == 'sorted_count_distinct'):\n            ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups, skip_key, input_buffer, output_buffer)\n        else:\n            raise KeyError(('Unknown aggregation operation ' + str(agg_op)))\n        if (bool_arr is not None):\n            output_buffer = np.delete(output_buffer, skip_key)\n        ct_agg.addcol(output_buffer, name=output_col_name)\n        del output_buffer\n    ct_agg.delcol('tmp_col_bquery__')", "docstring": "Perform aggregation and place the result in the given ctable.\n\nArgs:\nct_agg (ctable): the table to hold the aggregation\nnr_groups (int): the number of groups (number of rows in output table)\nskip_key (int): index of the output row to remove from results (used for filtering)\ncarray_factor: the carray for each row in the table a reference to the the unique group index\ngroupby_cols: the list of 'dimension' columns that are used to perform the groupby over\noutput_agg_ops (list): list of tuples of the form: (input_col, agg_op)\ninput_col (string): name of the column to act on\nagg_op (int): aggregation operation to perform\nbool_arr: a boolean array containing the filter", "source": "codesearchnet"}
{"code": "def Decompress(self, compressed_data):\n    \n    try:\n      uncompressed_data = self._zlib_decompressor.decompress(compressed_data)\n      remaining_compressed_data = getattr(\n          self._zlib_decompressor, 'unused_data', b'')\n\n    except zlib.error as exception:\n      raise errors.BackEndError((\n          'Unable to decompress zlib compressed stream with error: '\n          '{0!s}.').format(exception))\n\n    return uncompressed_data, remaining_compressed_data", "docstring": "Decompresses the compressed data.\n\nArgs:\ncompressed_data (bytes): compressed data.\n\nReturns:\ntuple(bytes, bytes): uncompressed data and remaining compressed data.\n\nRaises:\nBackEndError: if the zlib compressed stream cannot be decompressed.", "source": "juraj-google-style"}
{"code": "def emulate(self, context=None, start=None, end=None, arch_mode=None, hooks=None, max_instrs=None, print_asm=False):\n    if (arch_mode is not None):\n        self._load(arch_mode=arch_mode)\n    context = (context if context else {})\n    start_addr = (start if start else self.binary.ea_start)\n    end_addr = (end if end else self.binary.ea_end)\n    hooks = (hooks if hooks else {})\n    for (reg, val) in context.get('registers', {}).items():\n        self.ir_emulator.registers[reg] = val\n    for (addr, val) in context.get('memory', {}).items():\n        self.ir_emulator.memory.write(addr, 4, val)\n    self.emulator.emulate(start_addr, end_addr, hooks, max_instrs, print_asm)\n    context_out = {'registers': {}, 'memory': {}}\n    for (reg, val) in self.ir_emulator.registers.items():\n        context_out['registers'][reg] = val\n    return context_out", "docstring": "Emulate native code.\n\nArgs:\ncontext (dict): Processor context (register and/or memory).\nstart (int): Start address.\nend (int): End address.\narch_mode (int): Architecture mode.\nhooks (dict): Hooks by address.\nmax_instrs (int): Maximum number of instructions to execute.\nprint_asm (bool): Print asm.\n\nReturns:\ndict: Processor context.", "source": "codesearchnet"}
{"code": "def learn(self, grad_arr):\n        \n        encoder_delta_arr, _, encoder_grads_list = self.__encoder_decoder_controller.encoder.hidden_back_propagate(\n            grad_arr[:, -1]\n        )\n        encoder_grads_list.insert(0, None)\n        encoder_grads_list.insert(0, None)\n\n        self.__encoder_decoder_controller.encoder.optimize(\n            encoder_grads_list, \n            self.__learning_rate,\n            1\n        )\n\n        return encoder_delta_arr", "docstring": "Update this Discriminator by ascending its stochastic gradient.\n\nArgs:\ngrad_arr:   `np.ndarray` of gradients.\n\nReturns:\n`np.ndarray` of delta or gradients.", "source": "juraj-google-style"}
{"code": "def _txn_is_in_valid_batch(self, txn_id):\n    batch = self._batches_by_txn_id[txn_id]\n    return all((self._txn_results[sig].is_valid for sig in set(self._txn_results).intersection((txn.header_signature for txn in batch.transactions))))", "docstring": "Returns whether the transaction is in a valid batch.\n\nArgs:\ntxn_id (str): The transaction header signature.\n\nReturns:\n(bool): True if the txn's batch is valid, False otherwise.", "source": "codesearchnet"}
{"code": "def _get_next_empty_bitmap(self):\n    for (i, byte) in enumerate(self._bitmap):\n        if (byte != 255):\n            for offset in range(8):\n                if (not (byte & (1 << offset))):\n                    return ((i * 8) + offset)", "docstring": "Returns the next empty entry.\n\nReturns:\nint: The value of the empty entry", "source": "codesearchnet"}
{"code": "def get_wf_from_path(self, path):\n        \n        with open(path) as fp:\n            content = fp.read()\n        return [(os.path.basename(os.path.splitext(path)[0]), content), ]", "docstring": "load xml from given path\nArgs:\npath: diagram path\n\nReturns:", "source": "juraj-google-style"}
{"code": "def chhome(name, home, **kwargs):\n    \n    if six.PY2:\n        name = _to_unicode(name)\n        home = _to_unicode(home)\n\n    kwargs = salt.utils.args.clean_kwargs(**kwargs)\n    persist = kwargs.pop('persist', False)\n    if kwargs:\n        salt.utils.args.invalid_kwargs(kwargs)\n    if persist:\n        log.info('Ignoring unsupported \\'persist\\' argument to user.chhome')\n\n    pre_info = info(name)\n\n    if not pre_info:\n        return False\n\n    if home == pre_info['home']:\n        return True\n\n    if not update(name=name, home=home):\n        return False\n\n    post_info = info(name)\n    if post_info['home'] != pre_info['home']:\n        return post_info['home'] == home\n\n    return False", "docstring": "Change the home directory of the user, pass True for persist to move files\nto the new home directory if the old home directory exist.\n\nArgs:\nname (str): The name of the user whose home directory you wish to change\n\nhome (str): The new location of the home directory\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' user.chhome foo \\\\\\\\fileserver\\\\home\\\\foo True", "source": "juraj-google-style"}
{"code": "def tf_step(\n        self,\n        time,\n        variables,\n        arguments,\n        fn_loss,\n        **kwargs\n    ):\n        \n        unperturbed_loss = fn_loss(**arguments)\n\n        \n        perturbations = [tf.random_normal(shape=util.shape(variable)) * self.learning_rate for variable in variables]\n        applied = self.apply_step(variables=variables, deltas=perturbations)\n\n        with tf.control_dependencies(control_inputs=(applied,)):\n            perturbed_loss = fn_loss(**arguments)\n            direction = tf.sign(x=(unperturbed_loss - perturbed_loss))\n            deltas_sum = [direction * perturbation for perturbation in perturbations]\n\n        if self.unroll_loop:\n            \n            previous_perturbations = perturbations\n            for sample in xrange(self.num_samples):\n\n                with tf.control_dependencies(control_inputs=deltas_sum):\n                    perturbations = [tf.random_normal(shape=util.shape(variable)) * self.learning_rate for variable in variables]\n                    perturbation_deltas = [\n                        pert - prev_pert for pert, prev_pert in zip(perturbations, previous_perturbations)\n                    ]\n                    applied = self.apply_step(variables=variables, deltas=perturbation_deltas)\n                    previous_perturbations = perturbations\n\n                with tf.control_dependencies(control_inputs=(applied,)):\n                    perturbed_loss = fn_loss(**arguments)\n                    direction = tf.sign(x=(unperturbed_loss - perturbed_loss))\n                    deltas_sum = [delta + direction * perturbation for delta, perturbation in zip(deltas_sum, perturbations)]\n\n        else:\n            \n            def body(iteration, deltas_sum, previous_perturbations):\n\n                with tf.control_dependencies(control_inputs=deltas_sum):\n                    perturbations = [tf.random_normal(shape=util.shape(variable)) * self.learning_rate for variable in variables]\n                    perturbation_deltas = [\n                        pert - prev_pert for pert, prev_pert in zip(perturbations, previous_perturbations)\n                    ]\n                    applied = self.apply_step(variables=variables, deltas=perturbation_deltas)\n\n                with tf.control_dependencies(control_inputs=(applied,)):\n                    perturbed_loss = fn_loss(**arguments)\n                    direction = tf.sign(x=(unperturbed_loss - perturbed_loss))\n                    deltas_sum = [delta + direction * perturbation for delta, perturbation in zip(deltas_sum, perturbations)]\n\n                return iteration + 1, deltas_sum, perturbations\n\n            def cond(iteration, deltas_sum, previous_perturbation):\n                return iteration < self.num_samples - 1\n\n            _, deltas_sum, perturbations = tf.while_loop(cond=cond, body=body, loop_vars=(0, deltas_sum, perturbations))\n\n        with tf.control_dependencies(control_inputs=deltas_sum):\n            deltas = [delta / self.num_samples for delta in deltas_sum]\n            perturbation_deltas = [delta - pert for delta, pert in zip(deltas, perturbations)]\n            applied = self.apply_step(variables=variables, deltas=perturbation_deltas)\n\n        with tf.control_dependencies(control_inputs=(applied,)):\n            \n            return [delta + 0.0 for delta in deltas]", "docstring": "Creates the TensorFlow operations for performing an optimization step.\n\nArgs:\ntime: Time tensor.\nvariables: List of variables to optimize.\narguments: Dict of arguments for callables, like fn_loss.\nfn_loss: A callable returning the loss of the current model.\n**kwargs: Additional arguments, not used.\n\nReturns:\nList of delta tensors corresponding to the updates for each optimized variable.", "source": "juraj-google-style"}
{"code": "def get_all(self, attrs: Iterable[FetchAttribute]) -> Sequence[Tuple[(FetchAttribute, MaybeBytes)]]:\n    ret: List[Tuple[(FetchAttribute, MaybeBytes)]] = []\n    for attr in attrs:\n        try:\n            ret.append((attr.for_response, self.get(attr)))\n        except NotFetchable:\n            pass\n    return ret", "docstring": "Return a list of tuples containing the attribute iself and the bytes\nrepresentation of that attribute from the message.\n\nArgs:\nattrs: The fetch attributes.", "source": "codesearchnet"}
{"code": "def top_rated(self, **kwargs):\n        \n        path = self._get_path('top_rated')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the list of top rated movies. By default, this list will only\ninclude movies that have 10 or more votes. This list refreshes every\nday.\n\nArgs:\npage: (optional) Minimum value of 1.  Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def get_registry_data(self, name, auth_config=None):\n    return RegistryData(image_name=name, attrs=self.client.api.inspect_distribution(name, auth_config), client=self.client, collection=self)", "docstring": "Gets the registry data for an image.\n\nArgs:\nname (str): The name of the image.\nauth_config (dict): Override the credentials that are found in the\nconfig for this request.  ``auth_config`` should contain the\n``username`` and ``password`` keys to be valid.\n\nReturns:\n(:py:class:`RegistryData`): The data object.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def gru_feedfwd(a_t, h_prev, filters, name=None):\n    with tf.variable_scope(name, default_name='GRU', values=[a_t, h_prev]):\n        z_t = tf.sigmoid((tpu_conv1d(a_t, filters, 1, padding='SAME', name='W_z') + tpu_conv1d(h_prev, filters, 1, padding='SAME', name='U_z')))\n        r_t = tf.sigmoid((tpu_conv1d(a_t, filters, 1, padding='SAME', name='W_r') + tpu_conv1d(h_prev, filters, 1, padding='SAME', name='U_r')))\n        h_tilde = tf.tanh((tpu_conv1d(a_t, filters, 1, padding='SAME', name='W') + tpu_conv1d((r_t * h_prev), filters, 1, padding='SAME', name='U')))\n        h_t = (((1.0 - z_t) * h_prev) + (z_t * h_tilde))\n    return h_t", "docstring": "position-wise Feed-fwd GRU gates following the MPNN.\n\nArgs:\na_t: Tensor of shape [batch, length, depth] of current input\nh_prev: Tensor of shape [batch, length, depth] of prev input\nfilters: an integer specifying number of dimensions of the filters\nname: A string\nReturns:\nh_t: [batch, length, filters] hidden state", "source": "codesearchnet"}
{"code": "def node(self, force_new_node: bool=False) -> EventSetNode:\n    if self._internal_node is not None and (not force_new_node):\n        return self._internal_node\n    self._internal_node = create_node_with_new_reference(schema=self._schema, name=self._name)\n    return self._internal_node", "docstring": "Creates an [`EventSetNode`][temporian.EventSetNode] able to consume\nthis EventSet.\n\nIf called multiple times with `force_new_node=False` (default), the same\nnode is returned.\n\nUsage example:\n```python\n>>> my_evset = tp.event_set(\n...     timestamps=[1, 2, 3, 4],\n...     features={\n...         \"feature_1\": [0.5, 0.6, np.nan, 0.9],\n...         \"feature_2\": [\"red\", \"blue\", \"red\", \"blue\"],\n...     },\n... )\n>>> my_node = my_evset.node()\n\n```\n\nArgs:\nforce_new_node: If false (default), return the same node each time\n`node` is called. If true, a new node is created each time.\n\nReturns:\nAn EventSetNode able to consume this EventSet.", "source": "github-repos"}
{"code": "def acquire(self, blocking=True, timeout=-1):\n        \n        result = self.lock.acquire(blocking, timeout)\n        return result", "docstring": "Acquire the :attr:`lock`\n\nArgs:\nblocking (bool): See :meth:`threading.Lock.acquire`\ntimeout (float): See :meth:`threading.Lock.acquire`\n\nReturns:\nbool: :obj:`True` if the lock was acquired, otherwise :obj:`False`", "source": "juraj-google-style"}
{"code": "def module_import(module_path):\n    try:\n        module = __import__(module_path)\n        components = module_path.split('.')\n        for component in components[1:]:\n            module = getattr(module, component)\n        return module\n    except ImportError:\n        raise BadModulePathError(('Unable to find module \"%s\".' % (module_path,)))", "docstring": "Imports the module indicated in name\n\nArgs:\nmodule_path: string representing a module path such as\n'app.config' or 'app.extras.my_module'\nReturns:\nthe module matching name of the last component, ie: for\n'app.extras.my_module' it returns a\nreference to my_module\nRaises:\nBadModulePathError if the module is not found", "source": "codesearchnet"}
{"code": "def unpause(self, container):\n    url = self._url('/containers/{0}/unpause', container)\n    res = self._post(url)\n    self._raise_for_status(res)", "docstring": "Unpause all processes within a container.\n\nArgs:\ncontainer (str): The container to unpause", "source": "codesearchnet"}
{"code": "def _in_gae_environment():\n    if (SETTINGS.env_name is not None):\n        return (SETTINGS.env_name in ('GAE_PRODUCTION', 'GAE_LOCAL'))\n    try:\n        import google.appengine\n    except ImportError:\n        pass\n    else:\n        server_software = os.environ.get(_SERVER_SOFTWARE, '')\n        if server_software.startswith('Google App Engine/'):\n            SETTINGS.env_name = 'GAE_PRODUCTION'\n            return True\n        elif server_software.startswith('Development/'):\n            SETTINGS.env_name = 'GAE_LOCAL'\n            return True\n    return False", "docstring": "Detects if the code is running in the App Engine environment.\n\nReturns:\nTrue if running in the GAE environment, False otherwise.", "source": "codesearchnet"}
{"code": "async def destroy_tournament(self, t: Tournament):\n    (await self.connection('DELETE', 'tournaments/{}'.format(t.id)))\n    if (t in self.tournaments):\n        self.tournaments.remove(t)", "docstring": "completely removes a tournament from Challonge\n\n|methcoro|\n\nNote:\n|from_api| Deletes a tournament along with all its associated records. There is no undo, so use with care!\n\nRaises:\nAPIException", "source": "codesearchnet"}
{"code": "def index_last_dim_with_indices(x, indices):\n    assert (len(x.shape) == (len(indices.shape) + 1))\n    x_shape = shape_list(x)\n    vocab_size = x_shape[(- 1)]\n    flat_x = tf.reshape(x, [list_product(x_shape[:(- 1)]), vocab_size])\n    flat_indices = tf.reshape(indices, [list_product(x_shape[:(- 1)])])\n    idx = tf.stack([tf.range(tf.to_int64(shape_list(flat_indices)[0])), tf.to_int64(flat_indices)], axis=1)\n    flat_x_idx = tf.gather_nd(flat_x, idx)\n    x_idx = tf.reshape(flat_x_idx, x_shape[:(- 1)])\n    return x_idx", "docstring": "Use indices to index into the last axis of x.\n\nThis can be useful for recovering the actual probabilities of a sample from a\nprobability distribution.\n\nArgs:\nx: Tensor, n-d.\nindices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1)\ndimensions of x. The values of indices will be used to index into the last\naxis of x.\n\nReturns:\nTensor, (n-1)-d.", "source": "codesearchnet"}
{"code": "def derive_field_name(self, field_name):\n        \n        cls = type(self)\n        \n        return cls(\n            self[0],\n            self[1],\n            self[2],\n            field_name,\n            self[4],\n            self[5]\n        )", "docstring": "Derives a new event from this one setting the ``field_name`` attribute.\n\nArgs:\nfield_name (Union[amazon.ion.symbols.SymbolToken, unicode]): The field name to set.\nReturns:\nIonEvent: The newly generated event.", "source": "juraj-google-style"}
{"code": "def call(self, input_ids: TFModelInputType=None, attention_mask: tf.Tensor | None=None, decoder_input_ids: tf.Tensor | None=None, decoder_attention_mask: tf.Tensor | None=None, decoder_position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, decoder_head_mask: tf.Tensor | None=None, cross_attn_head_mask: tf.Tensor | None=None, encoder_outputs: Optional[TFBaseModelOutput]=None, past_key_values: Optional[Tuple[Tuple[tf.Tensor]]]=None, inputs_embeds: tf.Tensor | None=None, decoder_inputs_embeds: tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]:\n    if labels is not None:\n        labels = tf.where(labels == self.config.pad_token_id, tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), labels)\n        use_cache = False\n        if decoder_input_ids is None and decoder_inputs_embeds is None:\n            decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)\n    outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n    lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)\n    lm_logits = self.bias_layer(lm_logits)\n    masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)\n    if not return_dict:\n        output = (lm_logits,) + outputs[1:]\n        return (masked_lm_loss,) + output if masked_lm_loss is not None else output\n    return TFSeq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)", "docstring": "labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\nLabels for computing the masked language modeling loss. Indices should either be in `[0, ...,\nconfig.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nReturns:", "source": "github-repos"}
{"code": "def validate_gcs_path(path, require_object):\n  \n  bucket, key = datalab.storage._bucket.parse_name(path)\n  if bucket is None:\n    raise Exception('Invalid GCS path \"%s\"' % path)\n  if require_object and key is None:\n    raise Exception('It appears the GCS path \"%s\" is a bucket path but not an object path' % path)", "docstring": "Check whether a given path is a valid GCS path.\n\nArgs:\npath: the config to check.\nrequire_object: if True, the path has to be an object path but not bucket path.\n\nRaises:\nException if the path is invalid", "source": "juraj-google-style"}
{"code": "def ensure_app_config_dir(appname, *args):\n    \n    from ubelt import util_path\n    dpath = get_app_config_dir(appname, *args)\n    util_path.ensuredir(dpath)\n    return dpath", "docstring": "Calls `get_app_config_dir` but ensures the directory exists.\n\nArgs:\nappname (str): the name of the application\n*args: any other subdirectories may be specified\n\nSeeAlso:\nget_app_config_dir\n\nExample:\n>>> import ubelt as ub\n>>> dpath = ub.ensure_app_config_dir('ubelt')\n>>> assert exists(dpath)", "source": "juraj-google-style"}
{"code": "def read_graph_from_string(txt):\n    if (not txt.startswith('{')):\n        return read_dot(txt)\n\n    def conv(value):\n        if isinstance(value, basestring):\n            return (('\"' + value) + '\"')\n        else:\n            return value\n    doc = literal_eval(txt)\n    g = digraph()\n    for (attrs, values) in doc.get('nodes', []):\n        attrs = [(k, conv(v)) for (k, v) in attrs]\n        for value in values:\n            if isinstance(value, basestring):\n                node_name = value\n                attrs_ = attrs\n            else:\n                (node_name, label) = value\n                attrs_ = (attrs + [('label', conv(label))])\n            g.add_node(node_name, attrs=attrs_)\n    for (attrs, values) in doc.get('edges', []):\n        attrs_ = [(k, conv(v)) for (k, v) in attrs]\n        for value in values:\n            if (len(value) == 3):\n                edge = value[:2]\n                label = value[(- 1)]\n            else:\n                edge = value\n                label = ''\n            g.add_edge(edge, label=label, attrs=attrs_)\n    return g", "docstring": "Read a graph from a string, either in dot format, or our own\ncompressed format.\n\nReturns:\n`pygraph.digraph`: Graph object.", "source": "codesearchnet"}
{"code": "def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n    with open(json_file_path, 'w', encoding='utf-8') as writer:\n        writer.write(self.to_json_string())", "docstring": "Save this instance to a JSON file.\n\nArgs:\njson_file_path (`str` or `os.PathLike`):\nPath to the JSON file in which this processor instance's parameters will be saved.", "source": "github-repos"}
{"code": "def run(cls, **kwargs):\n        \n\n        \n        err_pointer, tmp_pointer, new_bytes = 0, 0, 0\n        print_logs_live = kwargs.pop(\"print_logs_live\", None) \n\n        cmd = cls.create(**kwargs)\n\n        sighandler = SignalHandler()\n\n        while not Command.is_done(cmd.status):\n            if sighandler.received_term_signal:\n                logging.warning(\"Received signal {}. Canceling Qubole Command ID: {}\".format(sighandler.last_signal, cmd.id))\n                cls.cancel(cmd)\n                exit()\n            time.sleep(Qubole.poll_interval)\n            cmd = cls.find(cmd.id)\n            if print_logs_live is True:\n                log, err_length, tmp_length = cmd.get_log_partial(err_pointer, tmp_pointer)\n\n                \n                \n                if err_length != \"0\":\n                    err_pointer += int(err_length)\n                    new_bytes = int(err_length) + int(tmp_length) - tmp_pointer\n                    tmp_pointer = int(tmp_length)\n                else:\n                    tmp_pointer += int(tmp_length)\n                    new_bytes = int(tmp_length)\n\n                if len(log) > 0 and new_bytes > 0:\n                    print(log[-new_bytes:], file=sys.stderr)\n\n        return cmd", "docstring": "Create a command object by issuing a POST request to the /command endpoint\nWaits until the command is complete. Repeatedly polls to check status\n\nArgs:\n`**kwargs`: keyword arguments specific to command type\n\nReturns:\nCommand object", "source": "juraj-google-style"}
{"code": "def get_all_if_deleted(self):\n    with self._lock:\n        results = {}\n        for (add, fut) in self._state.items():\n            if self._contains_and_deleted(add):\n                results[add] = fut.result()\n        return results", "docstring": "Return all the addresses deleted in the context.\nUseful in the squash method.\n\nReturns:\n(dict of str to bytes): The addresses and bytes that have\nbeen deleted in the context.", "source": "codesearchnet"}
{"code": "def normalize(self, inplace=False):\n    if inplace:\n        nrm = self.norm()\n        self.data /= nrm\n        return None\n    nrm = self.norm()\n    data_copy = np.array(self.data, copy=True)\n    data_copy /= nrm\n    return Quaternion(data_copy)", "docstring": "Normalizes a Quaternion to unit length\nso that it represents a valid rotation.\n\nArgs:\ninplace (bool): Do an inplace normalization.\n\nReturns:\nQuaternion: Normalized quaternion.", "source": "codesearchnet"}
{"code": "def get_contacts(self):\n    for (jid, item) in self.roster.items.items():\n        try:\n            self._contacts[jid.bare()].update(item.export_as_json())\n        except KeyError:\n            self._contacts[jid.bare()] = item.export_as_json()\n    return self._contacts", "docstring": "Returns list of contacts\n\nReturns:\ndict: the roster of contacts", "source": "codesearchnet"}
{"code": "def absl_to_standard(level):\n    if (not isinstance(level, int)):\n        raise TypeError('Expect an int level, found {}'.format(type(level)))\n    if (level < ABSL_FATAL):\n        level = ABSL_FATAL\n    if (level <= ABSL_DEBUG):\n        return ABSL_TO_STANDARD[level]\n    return ((STANDARD_DEBUG - level) + 1)", "docstring": "Converts an integer level from the absl value to the standard value.\n\nArgs:\nlevel: int, an absl.logging level.\n\nRaises:\nTypeError: Raised when level is not an integer.\n\nReturns:\nThe corresponding integer level for use in standard logging.", "source": "codesearchnet"}
{"code": "def create_asset_delivery_policy(access_token, ams_account, key_delivery_url):\n    path = '/AssetDeliveryPolicies'\n    endpoint = ''.join([ams_rest_endpoint, path])\n    body = (('{ \\t\\t\"Name\":\"AssetDeliveryPolicy\", \\t\\t\"AssetDeliveryProtocol\":\"4\", \\t\\t\"AssetDeliveryPolicyType\":\"3\", \\t\\t\"AssetDeliveryConfiguration\":\"[{ \\t\\t\\t\\\\\"Key\\\\\":\\\\\"2\\\\\", \\t\\t\\t\\\\\"Value\\\\\":\\\\\"' + key_delivery_url) + '\\\\\"}]\" \\t}')\n    return do_ams_post(endpoint, path, body, access_token)", "docstring": "Create Media Service Asset Delivery Policy.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nams_account (str): Media Service Account.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def authenticate(json_path=None):\n    msg = 'budou.authentication() is deprecated. Please use budou.get_parser() to obtain a parser instead.'\n    warnings.warn(msg, DeprecationWarning)\n    parser = get_parser('nlapi', credentials_path=json_path)\n    return parser", "docstring": "Gets a Natural Language API parser by authenticating the API.\n\n**This method is deprecated.** Please use :obj:`budou.get_parser` to obtain a\nparser instead.\n\nArgs:\njson_path (:obj:`str`, optional): The file path to the service account's\ncredentials.\n\nReturns:\nParser. (:obj:`budou.parser.NLAPIParser`)", "source": "codesearchnet"}
{"code": "def get_compiler_ir(self, device_name, platform_name, function_name, flat_args, captured_inputs, stage='hlo'):\n    return pywrap_tfe.TF_GetCompilerIr(self._context_handle, function_name, stage, device_name, flat_args, captured_inputs, platform_name)", "docstring": "Get the compiler IR bytes.\n\nArgs:\ndevice_name: The name of the device with the form as\n\"/job:localhost/replica:0/task:0/device:CPU:0\", \"/device:TPU:0\" etc.\nWhen this is used, actual device is needed for getting the compiler IR.\nplatform_name: The name of the platform, e.g. \"TPU\". When this is used,\nfirst we find a device whose name contains the platform, if it is found\nwe get the compiler IR by device. Otherwise the compiler IR is obtained\nas if using that device. The former logic of falling back to device is\nnecessary, as there are cases of TF variables that need to access\ndevices, but the upper layer may generally choose platform for getting\ncompiler IR in a device-agnostic way.\nfunction_name: The name of the function to get the compiler IR.\nflat_args: The flat argument inputs.\ncaptured_inputs: The inputs that are captured.\nstage: The exported stage for the given function.\n\nReturns:\nThe compiler IR bytes.", "source": "github-repos"}
{"code": "def copy_scoped_meta_graph(from_scope, to_scope, from_graph=None, to_graph=None):\n    from_graph = from_graph or ops.get_default_graph()\n    to_graph = to_graph or ops.get_default_graph()\n    if from_graph == to_graph and from_scope == to_scope:\n        raise ValueError(f\"'from_scope' and 'to_scope' need to be different when performing copy in the same graph. Received: 'from_graph': {from_graph}, 'to_graph': {to_graph}, 'from_scope': {from_scope}, 'to_scope': {to_scope}.\")\n    orig_meta_graph, var_list = export_scoped_meta_graph(export_scope=from_scope, graph=from_graph)\n    var_list = import_scoped_meta_graph(orig_meta_graph, graph=to_graph, import_scope=to_scope)\n    return var_list", "docstring": "Copies a sub-meta_graph from one scope to another.\n\nArgs:\nfrom_scope: `String` name scope containing the subgraph to be copied.\nto_scope: `String` name scope under which the copied subgraph will reside.\nfrom_graph: Optional `Graph` from which to copy the subgraph. If `None`, the\ndefault graph is use.\nto_graph: Optional `Graph` to which to copy the subgraph. If `None`, the\ndefault graph is used.\n\nReturns:\nA dictionary of `Variables` that has been copied into `to_scope`.\n\nRaises:\nValueError: If `from_scope` and `to_scope` are the same while\n`from_graph` and `to_graph` are also the same.", "source": "github-repos"}
{"code": "def df(self):\n        \n        import pandas as pd\n        return pd.concat([w.df(uwi=True) for w in self])", "docstring": "Makes a pandas DataFrame containing Curve data for all the wells\nin the Project. The DataFrame has a dual index of well UWI and\ncurve Depths. Requires `pandas`.\n\nArgs:\nNo arguments.\n\nReturns:\n`pandas.DataFrame`.", "source": "juraj-google-style"}
{"code": "def eval_algorithm(curr, prev):\n        \n        if curr['close'] > prev['close']:\n            v = curr['volume']\n        elif curr['close'] < prev['close']:\n            v = curr['volume'] * -1\n        else:\n            v = 0\n        return prev['obv'] + v", "docstring": "Evaluates OBV\n\nArgs:\ncurr: Dict of current volume and close\nprev: Dict of previous OBV and close\n\nReturns:\nFloat of OBV", "source": "juraj-google-style"}
{"code": "def create(self, name, nopassword=None, secret=None, encryption=None):\n    if (secret is not None):\n        return self.create_with_secret(name, secret, encryption)\n    elif (nopassword is True):\n        return self.create_with_nopassword(name)\n    else:\n        raise TypeError('either \"nopassword\" or \"secret\" must be specified to create a user')", "docstring": "Creates a new user on the local system.\n\nCreating users requires either a secret (password) or the nopassword\nkeyword to be specified.\n\nArgs:\nname (str): The name of the user to craete\n\nnopassword (bool): Configures the user to be able to authenticate\nwithout a password challenage\n\nsecret (str): The secret (password) to assign to this user\n\nencryption (str): Specifies how the secret is encoded.  Valid\nvalues are \"cleartext\", \"md5\", \"sha512\".  The default is\n\"cleartext\"\n\nReturns:\nTrue if the operation was successful otherwise False\n\nRaises:\nTypeError: if the required arguments are not satisfied", "source": "codesearchnet"}
{"code": "def parse_mapping(mapping_file: Optional[str]) -> configparser.ConfigParser:\n    LOGGER.debug('Parsing mapping file. Command line: %s', mapping_file)\n\n    def parse(mapping_file):\n        config = configparser.ConfigParser()\n        config.read_file(mapping_file)\n        return config\n    if (mapping_file is not None):\n        LOGGER.debug('Parsing command line mapping file')\n        return parse(mapping_file)\n    xdg_config_dir = xdg.BaseDirectory.load_first_config('pass-git-helper')\n    if (xdg_config_dir is None):\n        raise RuntimeError('No mapping configured so far at any XDG config location. Please create {config_file}'.format(config_file=DEFAULT_CONFIG_FILE))\n    mapping_file = os.path.join(xdg_config_dir, CONFIG_FILE_NAME)\n    LOGGER.debug('Parsing mapping file %s', mapping_file)\n    with open(mapping_file, 'r') as file_handle:\n        return parse(file_handle)", "docstring": "Parse the file containing the mappings from hosts to pass entries.\n\nArgs:\nmapping_file:\nName of the file to parse. If ``None``, the default file from the\nXDG location is used.", "source": "codesearchnet"}
{"code": "def identity(n, dtype=None):\n    return backend.numpy.identity(n, dtype=dtype)", "docstring": "Return the identity tensor.\n\nThe identity tensor is a square tensor with ones on the main diagonal and\nzeros elsewhere.\n\nArgs:\nn: Number of rows (and columns) in the `n x n` output tensor.\ndtype: Data type of the output tensor.\n\nReturns:\nThe identity tensor.", "source": "github-repos"}
{"code": "def get(self, file_path, ref, **kwargs):\n        \n        file_path = file_path.replace('/', '%2F')\n        return GetMixin.get(self, file_path, ref=ref, **kwargs)", "docstring": "Retrieve a single file.\n\nArgs:\nfile_path (str): Path of the file to retrieve\nref (str): Name of the branch, tag or commit\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the file could not be retrieved\n\nReturns:\nobject: The generated RESTObject", "source": "juraj-google-style"}
{"code": "def _copy_hdxobjects(self, hdxobjects, hdxobjectclass, attribute_to_copy=None):\n        \n        \n        newhdxobjects = list()\n        for hdxobject in hdxobjects:\n            newhdxobjectdata = copy.deepcopy(hdxobject.data)\n            newhdxobject = hdxobjectclass(newhdxobjectdata, configuration=self.configuration)\n            if attribute_to_copy:\n                value = getattr(hdxobject, attribute_to_copy)\n                setattr(newhdxobject, attribute_to_copy, value)\n            newhdxobjects.append(newhdxobject)\n        return newhdxobjects", "docstring": "Helper function to make a deep copy of a supplied list of HDX objects\n\nArgs:\nhdxobjects (List[T <= HDXObject]): list of HDX objects to copy\nhdxobjectclass (type): Type of the HDX Objects to be copied\nattribute_to_copy (Optional[str]): An attribute to copy over from the HDX object. Defaults to None.\n\nReturns:\nList[T <= HDXObject]: Deep copy of list of HDX objects", "source": "juraj-google-style"}
{"code": "def _get_ami_dict(json_url):\n    \n    LOG.info(\"Getting AMI from %s\", json_url)\n    response = requests.get(json_url)\n    assert response.ok, \"Error getting ami info from {}\".format(json_url)\n    ami_dict = response.json()\n    LOG.debug('AMI json contents: %s', ami_dict)\n    return ami_dict", "docstring": "Get ami from a web url.\n\nArgs:\nregion (str): AWS Region to find AMI ID.\n\nReturns:\ndict: Contents in dictionary format.", "source": "juraj-google-style"}
{"code": "def __init__(self, maxsize=0):\n    self._maxsize = maxsize\n    self._queue = collections.deque()\n    self._closed = False\n    self._mutex = threading.Lock()\n    self._not_empty = threading.Condition(self._mutex)\n    self._not_full = threading.Condition(self._mutex)", "docstring": "Create a queue object with a given maximum size.\n\nArgs:\nmaxsize: int size of queue. If <= 0, the queue size is infinite.", "source": "github-repos"}
{"code": "def _future_command_unlocked(self, cmd):\n        \n\n        future = self._loop.create_future()\n        asyncio_loop = self._loop.get_loop()\n\n        def _done_callback(result):\n            retval = result['return_value']\n\n            if not result['result']:\n                future.set_exception(HardwareError(\"Error executing synchronous command\",\n                                                   command=cmd, return_value=retval))\n            else:\n                future.set_result(retval)\n\n        callback = functools.partial(asyncio_loop.call_soon_threadsafe, _done_callback)\n        self._commands.put((cmd, callback, True, None))\n\n        return future", "docstring": "Run command as a coroutine and return a future.\n\nArgs:\nloop (BackgroundEventLoop): The loop that we should attach\nthe future too.\ncmd (list): The command and arguments that we wish to call.\n\nReturns:\nasyncio.Future: An awaitable future with the result of the operation.", "source": "juraj-google-style"}
{"code": "def _get_tables(self, base_dir):\n    table_dict = {}\n    for table in self.metadata['tables']:\n        if table['use']:\n            relative_path = os.path.join(base_dir, self.metadata['path'], table['path'])\n            data_table = pd.read_csv(relative_path)\n            pii_fields = self._get_pii_fields(table)\n            data_table = self._anonymize_table(data_table, pii_fields)\n            table_dict[table['name']] = (data_table, table)\n    return table_dict", "docstring": "Load the contents of meta_file and the corresponding data.\n\nIf fields containing Personally Identifiable Information are detected in the metadata\nthey are anonymized before asign them into `table_dict`.\n\nArgs:\nbase_dir(str): Root folder of the dataset files.\n\nReturns:\ndict: Mapping str -> tuple(pandas.DataFrame, dict)", "source": "codesearchnet"}
{"code": "def initialize_environments(self, batch_size=1):\n    \n    assert batch_size >= 1\n    self._batch_size = batch_size\n\n    self._envs = [gym.make(self.base_env_name) for _ in range(batch_size)]\n    if self._env_wrapper_fn is not None:\n      self._envs = list(map(self._env_wrapper_fn, self._envs))\n\n    \n    \n    \n    if self._observation_space:\n      assert str(self._observation_space) == str(\n          self._envs[0].observation_space)\n    else:\n      \n      \n      \n      \n      self._observation_space = self._envs[0].observation_space\n\n    \n    if self._action_space:\n      assert str(self._action_space) == str(self._envs[0].action_space)\n    else:\n      self._action_space = self._envs[0].action_space\n\n    self._verify_same_spaces()\n\n    \n    \n    if self.reward_range is None:\n      self._reward_range = self._envs[0].reward_range\n\n    \n    \n    \n    \n    self._trajectories = trajectory.BatchTrajectory(batch_size=batch_size)", "docstring": "Initializes the environments and trajectories.\n\nSubclasses can override this if they don't want a default implementation\nwhich initializes `batch_size` environments, but must take care to\ninitialize self._trajectories (this is checked in __init__ anyways).\n\nArgs:\nbatch_size: (int) Number of `self.base_env_name` envs to initialize.", "source": "juraj-google-style"}
{"code": "def files_comments_add(self, *, comment: str, file: str, **kwargs) -> SlackResponse:\n        \n        kwargs.update({\"comment\": comment, \"file\": file})\n        return self.api_call(\"files.comments.add\", json=kwargs)", "docstring": "Add a comment to an existing file.\n\nArgs:\ncomment (str): The body of the comment.\ne.g. 'Everyone should take a moment to read this file.'\nfile (str): The file id. e.g. 'F1234467890'", "source": "juraj-google-style"}
{"code": "def rjust_text(text, width=80, indent=0, subsequent=None):\n    \n    text = re.sub(r\"\\s+\", \" \", text).strip()\n    if subsequent is None:\n        subsequent = indent\n    wrapper = TextWrapper(\n        width=width,\n        break_long_words=False,\n        replace_whitespace=True,\n        initial_indent=\" \" * (indent + subsequent),\n        subsequent_indent=\" \" * subsequent,\n    )\n    return wrapper.fill(text)[subsequent:]", "docstring": "Wrap text and adjust it to right border.\n\nSame as L{wrap_text} with the difference that the text is aligned against\nthe right text border.\n\nArgs:\ntext (str): Text to wrap and align.\nwidth (int): Maximum number of characters per line.\nindent (int): Indentation of the first line.\nsubsequent (int or None): Indentation of all other lines, if it is\n``None``, then the indentation will be same as for the first line.", "source": "juraj-google-style"}
{"code": "def _CheckWindowsRegistryKeyPath(self, filename, artifact_definition, key_path):\n    result = True\n    key_path_segments = key_path.lower().split('\\\\')\n    if (key_path_segments[0] == '%%current_control_set%%'):\n        result = False\n        logging.warning('Artifact definition: {0:s} in file: {1:s} contains Windows Registry key path that starts with %%CURRENT_CONTROL_SET%%. Replace %%CURRENT_CONTROL_SET%% with HKEY_LOCAL_MACHINE\\\\System\\\\CurrentControlSet'.format(artifact_definition.name, filename))\n    for (segment_index, key_path_segment) in enumerate(key_path_segments):\n        if (key_path_segment.startswith('%%') and key_path_segment.endswith('%%')):\n            if ((segment_index == 1) and (key_path_segment == '%%users.sid%%') and (key_path_segments[0] == 'hkey_users')):\n                continue\n            if key_path_segment.startswith('%%environ_'):\n                result = False\n                logging.warning('Artifact definition: {0:s} in file: {1:s} contains Windows Registry key path that contains an environment variable: \"{2:s}\". Usage of environment variables in key paths is not encouraged at this time.'.format(artifact_definition.name, filename, key_path_segment))\n            elif key_path_segment.startswith('%%users.'):\n                result = False\n                logging.warning('Artifact definition: {0:s} in file: {1:s} contains Windows Registry key path that contains a users variable: \"{2:s}\". Usage of users variables in key paths, except for \"HKEY_USERS\\\\%%users.sid%%\", is not encouraged at this time.'.format(artifact_definition.name, filename, key_path_segment))\n    return result", "docstring": "Checks if a path is a valid Windows Registry key path.\n\nArgs:\nfilename (str): name of the artifacts definition file.\nartifact_definition (ArtifactDefinition): artifact definition.\nkey_path (str): Windows Registry key path to validate.\n\nReturns:\nbool: True if the Windows Registry key path is valid.", "source": "codesearchnet"}
{"code": "def parse_uri(self, uri=None):\n    if (not uri):\n        return rdflib.term.URIRef(self.root)\n    elif (type(uri) == str):\n        if ((type(uri) == str) and (not uri.startswith('http'))):\n            return rdflib.term.URIRef(('%s%s' % (self.root, uri)))\n        else:\n            return rdflib.term.URIRef(uri)\n    elif (type(uri) == rdflib.term.URIRef):\n        return uri\n    else:\n        raise TypeError('invalid URI input')", "docstring": "parses and cleans up possible uri inputs, return instance of rdflib.term.URIRef\n\nArgs:\nuri (rdflib.term.URIRef,str): input URI\n\nReturns:\nrdflib.term.URIRef", "source": "codesearchnet"}
{"code": "def deep_update(original, new_dict, new_keys_allowed, whitelist):\n    for (k, value) in new_dict.items():\n        if (k not in original):\n            if (not new_keys_allowed):\n                raise Exception('Unknown config parameter `{}` '.format(k))\n        if isinstance(original.get(k), dict):\n            if (k in whitelist):\n                deep_update(original[k], value, True, [])\n            else:\n                deep_update(original[k], value, new_keys_allowed, [])\n        else:\n            original[k] = value\n    return original", "docstring": "Updates original dict with values from new_dict recursively.\nIf new key is introduced in new_dict, then if new_keys_allowed is not\nTrue, an error will be thrown. Further, for sub-dicts, if the key is\nin the whitelist, then new subkeys can be introduced.\n\nArgs:\noriginal (dict): Dictionary with default values.\nnew_dict (dict): Dictionary with values to be updated\nnew_keys_allowed (bool): Whether new keys are allowed.\nwhitelist (list): List of keys that correspond to dict values\nwhere new subkeys can be introduced. This is only at\nthe top level.", "source": "codesearchnet"}
{"code": "def List(self, request, global_params=None):\n    config = self.GetMethodConfig('List')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Lists all projects to which you have been granted any project role.\n\nArgs:\nrequest: (BigqueryProjectsListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ProjectList) The response message.", "source": "github-repos"}
{"code": "def tf_step(self, time, variables, **kwargs):\n    fn_loss = kwargs['fn_loss']\n    if (variables is None):\n        variables = tf.trainable_variables\n    return tf.gradients(fn_loss, variables)", "docstring": "Creates the TensorFlow operations for performing an optimization step on the given variables, including\nactually changing the values of the variables.\n\nArgs:\ntime: Time tensor. Not used for this optimizer.\nvariables: List of variables to optimize.\n**kwargs:\nfn_loss : loss function tensor to differentiate.\n\nReturns:\nList of delta tensors corresponding to the updates for each optimized variable.", "source": "codesearchnet"}
{"code": "def compute_average_oxidation_state(site):\n    \n    try:\n        avg_oxi = sum([sp.oxi_state * occu\n                       for sp, occu in site.species.items()\n                       if sp is not None])\n        return avg_oxi\n    except AttributeError:\n        pass\n    try:\n        return site.charge\n    except AttributeError:\n        raise ValueError(\"Ewald summation can only be performed on structures \"\n                         \"that are either oxidation state decorated or have \"\n                         \"site charges.\")", "docstring": "Calculates the average oxidation state of a site\n\nArgs:\nsite: Site to compute average oxidation state\n\nReturns:\nAverage oxidation state of site.", "source": "juraj-google-style"}
{"code": "def max_zoom(self):\n    zoom_levels = [map_layer.max_zoom for map_layer in self.layers]\n    return max(zoom_levels)", "docstring": "Get the maximal zoom level of all layers.\n\nReturns:\nint: the maximum of all zoom levels of all layers\n\nRaises:\nValueError: if no layers exist", "source": "codesearchnet"}
{"code": "def get(self, name):\n    name = str(name)\n    if (name not in self._properties):\n        raise ArgumentError('Unknown property in DeviceModel', name=name)\n    return self._properties[name]", "docstring": "Get a device model property.\n\nArgs:\nname (str): The name of the property to get", "source": "codesearchnet"}
{"code": "def __init__(self, key, committed, attempted):\n    self.key = key\n    self.committed = committed\n    self.attempted = attempted", "docstring": "Initializes ``MetricResult``.\nArgs:\nkey: A ``MetricKey`` object.\ncommitted: Metric data that has been committed (e.g. logical updates)\nattempted: Metric data that has been attempted (e.g. physical updates)", "source": "github-repos"}
{"code": "def extract_certs(certs_txt: str) -> List[crypto.X509]:\n    \n    pattern = r'-----BEGIN CERTIFICATE-----.+?-----END CERTIFICATE-----'\n    certs_txt = re.findall(pattern, certs_txt, flags=re.DOTALL)\n    certs = [crypto.load_certificate(crypto.FILETYPE_PEM, cert_txt) for cert_txt in certs_txt]\n    return certs", "docstring": "Extracts pycrypto X509 objects from SSL certificates chain string.\n\nArgs:\ncerts_txt: SSL certificates chain string.\n\nReturns:\nresult: List of pycrypto X509 objects.", "source": "juraj-google-style"}
{"code": "def _pad_batch(self, images: list['torch.Tensor'], return_tensors: Optional[Union[str, TensorType]]) -> tuple:\n    max_size = get_max_height_width(images)\n    grouped_images, grouped_images_index = group_images_by_shape(images)\n    processed_images = {}\n    processed_masks = {}\n    for shape, stacked_images in grouped_images.items():\n        if return_tensors == 'pt' and len(stacked_images) > 0:\n            device = stacked_images.device\n            mask_template = torch.zeros(max_size, dtype=torch.int64, device=device)\n        original_size = stacked_images.shape[-2:]\n        needs_padding = original_size[0] != max_size[0] or original_size[1] != max_size[1]\n        if needs_padding:\n            padding_bottom = max_size[0] - original_size[0]\n            padding_right = max_size[1] - original_size[1]\n            padding = [0, 0, padding_right, padding_bottom]\n            padded_images = F.pad(stacked_images, padding, fill=0)\n            pixel_mask = mask_template.clone()\n            pixel_mask[:original_size[0], :original_size[1]].fill_(1)\n            pixel_masks = pixel_mask.unsqueeze(0).repeat(stacked_images.shape[0], 1, 1)\n        else:\n            padded_images = stacked_images\n            pixel_masks = torch.ones((stacked_images.shape[0], max_size[0], max_size[1]), dtype=torch.int64, device=stacked_images.device)\n        processed_images[shape] = padded_images\n        processed_masks[shape] = pixel_masks\n    padded_images = reorder_images(processed_images, grouped_images_index)\n    pixel_masks = reorder_images(processed_masks, grouped_images_index)\n    if return_tensors == 'pt' and padded_images:\n        padded_images = torch.stack(padded_images)\n        pixel_masks = torch.stack(pixel_masks)\n    return (padded_images, pixel_masks)", "docstring": "Pad a batch of images to the same size based on the maximum dimensions.\n\nArgs:\nimages (`list[torch.Tensor]`): List of images to pad.\nreturn_tensors (`str` or `TensorType`, *optional*): The type of tensors to return.\n\nReturns:\n`tuple`: Tuple containing padded images and pixel masks.", "source": "github-repos"}
{"code": "def geotiff(self, **kwargs):\n    if ('proj' not in kwargs):\n        kwargs['proj'] = self.proj\n    return to_geotiff(self, **kwargs)", "docstring": "Creates a geotiff on the filesystem\n\nArgs:\npath (str): optional, path to write the geotiff file to, default is ./output.tif\nproj (str): optional, EPSG string of projection to reproject to\nspec (str): optional, if set to 'rgb', write out color-balanced 8-bit RGB tif\nbands (list): optional, list of bands to export. If spec='rgb' will default to RGB bands,\notherwise will export all bands\n\nReturns:\nstr: path the geotiff was written to", "source": "codesearchnet"}
{"code": "def _GetUsernameFromProfilePath(self, path):\n    \n    \n    while path and path[-1] == '\\\\':\n      path = path[:-1]\n\n    if path:\n      _, _, path = path.rpartition('\\\\')\n    return path", "docstring": "Retrieves the username from a Windows profile path.\n\nTrailing path path segment are ignored.\n\nArgs:\npath (str): a Windows path with '\\\\' as path segment separator.\n\nReturns:\nstr: basename which is the last path segment.", "source": "juraj-google-style"}
{"code": "def on_value_event(self, event):\n    raise NotImplementedError('on_value_event() is not implemented in the base servicer class')", "docstring": "Callback for Event proto received through the gRPC stream.\n\nThis Event proto carries a Tensor in its summary.value[0] field.\n\nArgs:\nevent: The Event proto from the stream to be processed.", "source": "github-repos"}
{"code": "def multi_rouge_n(sequences, scores_ids, n=2):\n    ngrams = [_get_word_ngrams(n, sequence) for sequence in sequences]\n    counts = [len(ngram) for ngram in ngrams]\n    scores = []\n    for (hyp_id, ref_id) in scores_ids:\n        evaluated_ngrams = ngrams[hyp_id]\n        evaluated_count = counts[hyp_id]\n        reference_ngrams = ngrams[ref_id]\n        reference_count = counts[ref_id]\n        overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)\n        overlapping_count = len(overlapping_ngrams)\n        scores += [f_r_p_rouge_n(evaluated_count, reference_count, overlapping_count)]\n    return scores", "docstring": "Efficient way to compute highly repetitive scoring\ni.e. sequences are involved multiple time\n\nArgs:\nsequences(list[str]): list of sequences (either hyp or ref)\nscores_ids(list[tuple(int)]): list of pairs (hyp_id, ref_id)\nie. scores[i] = rouge_n(scores_ids[i][0],\nscores_ids[i][1])\n\nReturns:\nscores: list of length `len(scores_ids)` containing rouge `n`\nscores as a dict with 'f', 'r', 'p'\nRaises:\nKeyError: if there's a value of i in scores_ids that is not in\n[0, len(sequences)[", "source": "codesearchnet"}
{"code": "def decode(self, encoded):\n        \n        encoded = super().decode(encoded)\n        return self.tokenizer.decode([self.itos[index] for index in encoded])", "docstring": "Decodes a tensor into a sequence.\n\nArgs:\nencoded (torch.Tensor): Encoded sequence.\n\nReturns:\nstr: Sequence decoded from ``encoded``.", "source": "juraj-google-style"}
{"code": "def phase_uniquizer(all_phases):\n  \n  measurement_name_maker = UniqueNameMaker(\n      itertools.chain.from_iterable(\n          phase.measurements.keys() for phase in all_phases\n          if phase.measurements))\n  attachment_names = list(itertools.chain.from_iterable(\n      phase.attachments.keys() for phase in all_phases))\n  attachment_names.extend(itertools.chain.from_iterable([\n      'multidim_' + name for name, meas in phase.measurements.items()\n      if meas.dimensions is not None\n  ] for phase in all_phases if phase.measurements))\n  attachment_name_maker = UniqueNameMaker(attachment_names)\n  for phase in all_phases:\n    \n    for name, _ in sorted(phase.measurements.items()):\n      old_name = name\n      name = measurement_name_maker.make_unique(name)\n\n      phase.measurements[old_name].name = name\n      phase.measurements[name] = phase.measurements.pop(old_name)\n    \n    for name, _ in sorted(phase.attachments.items()):\n      old_name = name\n      name = attachment_name_maker.make_unique(name)\n      phase.attachments[old_name].name = name\n      phase.attachments[name] = phase.attachments.pop(old_name)\n  return all_phases", "docstring": "Makes the names of phase measurement and attachments unique.\n\nThis function will make the names of measurements and attachments unique.\nIt modifies the input all_phases.\n\nArgs:\nall_phases: the phases to make unique\n\nReturns:\nthe phases now modified.", "source": "juraj-google-style"}
{"code": "def _shape_invariant_to_type_spec(self, shape):\n    raise NotImplementedError(f'{type(self).__name__}._shape_invariant_to_type_spec')", "docstring": "Returns a TypeSpec given a shape invariant (used by `tf.while_loop`).\n\nArgs:\nshape: A `tf.TensorShape` object.  The shape invariant for this\n`CompositeTensor`, or `None` if a default shape invariant should be used\n(based on the value of this `CompositeTensor`).\n\nReturns:\nA nested structure whose values are `tf.TensorShape` objects, specifying\nthe shape invariants for the tensors that comprise this `CompositeTensor`.", "source": "github-repos"}
{"code": "def determine_action(self, issue):\n        \n        resource_type = self.resource_types[issue.resource.resource_type_id]\n        issue_alert_schedule = self.alert_schedule[resource_type] if \\\n            resource_type in self.alert_schedule \\\n            else self.alert_schedule['*']\n\n        action_item = {\n            'action': None,\n            'action_description': None,\n            'last_alert': issue.last_alert,\n            'issue': issue,\n            'resource': self.resource_classes[self.resource_types[issue.resource.resource_type_id]](issue.resource),\n            'owners': [],\n            'stop_after': issue_alert_schedule['stop'],\n            'remove_after': issue_alert_schedule['remove'],\n            'notes': issue.notes,\n            'missing_tags': issue.missing_tags\n        }\n\n        time_elapsed = time.time() - issue.created\n        stop_schedule = pytimeparse.parse(issue_alert_schedule['stop'])\n        remove_schedule = pytimeparse.parse(issue_alert_schedule['remove'])\n\n        if self.collect_only:\n            action_item['action'] = AuditActions.IGNORE\n        elif remove_schedule and time_elapsed >= remove_schedule:\n            action_item['action'] = AuditActions.REMOVE\n            action_item['action_description'] = 'Resource removed'\n            action_item['last_alert'] = remove_schedule\n            if issue.update({'last_alert': remove_schedule}):\n                db.session.add(issue.issue)\n\n        elif stop_schedule and time_elapsed >= stop_schedule:\n            action_item['action'] = AuditActions.STOP\n            action_item['action_description'] = 'Resource stopped'\n            action_item['last_alert'] = stop_schedule\n            if issue.update({'last_alert': stop_schedule}):\n                db.session.add(issue.issue)\n\n        else:\n            alert_selection = self.determine_alert(\n                issue_alert_schedule['alert'],\n                issue.get_property('created').value,\n                issue.get_property('last_alert').value\n            )\n            if alert_selection:\n                action_item['action'] = AuditActions.ALERT\n                action_item['action_description'] = '{} alert'.format(alert_selection)\n                action_item['last_alert'] = alert_selection\n                if issue.update({'last_alert': alert_selection}):\n                    db.session.add(issue.issue)\n            else:\n                action_item['action'] = AuditActions.IGNORE\n\n        db.session.commit()\n        return action_item", "docstring": "Determine the action we should take for the issue\n\nArgs:\nissue: Issue to determine action for\n\nReturns:\n`dict`", "source": "juraj-google-style"}
{"code": "def navbar(self):\n    window = BaseWindow(self.selenium, self.selenium.current_window_handle)\n    with self.selenium.context(self.selenium.CONTEXT_CHROME):\n        el = self.selenium.find_element(*self._nav_bar_locator)\n        return NavBar(window, el)", "docstring": "Provide access to the Navigation Bar.\n\nReturns:\n:py:class:`NavBar`: FoxPuppet NavBar object.", "source": "codesearchnet"}
{"code": "def save_image(tensor, filename, nrow=8, padding=2, pad_value=0):\n    from PIL import Image\n    grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value)\n    im = Image.fromarray(pre_pillow_float_img_process(grid))\n    im.save(filename)", "docstring": "Save a given Tensor into an image file.\n\nArgs:\ntensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\nsaves the tensor as a grid of images by calling ``make_grid``.\n**kwargs: Other arguments are documented in ``make_grid``.", "source": "codesearchnet"}
{"code": "def from_stat_file(cls, statfile, timestep=1, is_leap_year=False):\n    stat = STAT(statfile)\n\n    def check_missing(opt_data, data_name):\n        if (opt_data == []):\n            raise ValueError('Stat file contains no optical data.')\n        for (i, x) in enumerate(opt_data):\n            if (x is None):\n                raise ValueError('Missing optical depth data for {} at month {}'.format(data_name, i))\n    check_missing(stat.monthly_tau_beam, 'monthly_tau_beam')\n    check_missing(stat.monthly_tau_diffuse, 'monthly_tau_diffuse')\n    return cls.from_ashrae_revised_clear_sky(stat.location, stat.monthly_tau_beam, stat.monthly_tau_diffuse, timestep, is_leap_year)", "docstring": "Create an ASHRAE Revised Clear Sky wea object from the monthly sky\noptical depths in a .stat file.\n\nArgs:\nstatfile: Full path to the .stat file.\ntimestep: An optional integer to set the number of time steps per\nhour. Default is 1 for one value per hour.\nis_leap_year: A boolean to indicate if values are representing a leap year.\nDefault is False.", "source": "codesearchnet"}
{"code": "def params(self):\n    payload = self.payload\n    d = {}\n    for (i, p) in enumerate(payload['currentConfiguration']):\n        type_name = p['typeName']\n        cp = payload['configurationParameters'][i]['message']\n        name = cp['parameterName']\n        if (type_name == 'BTMParameterQuantity'):\n            try:\n                v = q(p['message']['expression'])\n            except:\n                v = q(p['message']['value'], p['message']['units'])\n        elif (type_name == 'BTMParameterBoolean'):\n            v = p['message']['value']\n        elif (type_name == 'BTMParameterEnum'):\n            enum = p['message']['value']\n            enum_map = {d['message']['option']: i for (i, d) in enumerate(cp['options'])}\n            v = cp['options'][enum_map[enum]]['message']['optionName']\n        d[name] = v\n    return d", "docstring": "Get the params of response data from the API.\n\nReturns:\n- d (dict): Dictionary mapping of all configuration values", "source": "codesearchnet"}
{"code": "def conv_json(self, uri_format='sparql_uri', add_ids=False):\n\n    def convert_item(ivalue):\n        ' converts an idividual value to a json value\\n\\n            Args:\\n                ivalue: value of the item to convert\\n\\n            Returns:\\n                JSON serializable value\\n            '\n        nvalue = ivalue\n        if isinstance(ivalue, BaseRdfDataType):\n            if (ivalue.type == 'uri'):\n                if (ivalue.startswith('pyuri') and (uri_format == 'pyuri')):\n                    nvalue = getattr(ivalue, 'sparql')\n                else:\n                    nvalue = getattr(ivalue, uri_format)\n            else:\n                nvalue = ivalue.to_json\n        elif isinstance(ivalue, RdfClassBase):\n            if (ivalue.subject.type == 'uri'):\n                nvalue = ivalue.conv_json(uri_format, add_ids)\n            elif (ivalue.subject.type == 'bnode'):\n                nvalue = ivalue.conv_json(uri_format, add_ids)\n        elif isinstance(ivalue, list):\n            nvalue = []\n            for item in ivalue:\n                temp = convert_item(item)\n                nvalue.append(temp)\n        return nvalue\n    rtn_val = {key: convert_item(value) for (key, value) in self.items()}\n    if add_ids:\n        if (self.subject.type == 'uri'):\n            rtn_val['uri'] = self.subject.sparql_uri\n            rtn_val['id'] = sha1(rtn_val['uri'].encode()).hexdigest()\n    return rtn_val", "docstring": "converts the class to a json compatable python dictionary\n\nArgs:\nuri_format('sparql_uri','pyuri'): The format that uri values will\nbe returned\n\nReturns:\ndict: a json compatabile python dictionary", "source": "codesearchnet"}
{"code": "def declare(self, name, description=None, **kwargs):\n    if (not self._is_valid_key(name)):\n        raise self.InvalidKeyError('Invalid key name, must begin with a lowercase letter', name)\n    if (name in self._declarations):\n        raise self.KeyAlreadyDeclaredError('Configuration key already declared', name)\n    self._declarations[name] = self.Declaration(name, description=description, **kwargs)", "docstring": "Declare a configuration key with the given name.\n\nArgs:\nname: Configuration key to declare, must not have been already declared.\ndescription: If provided, use this as the description for this key.\n**kwargs: Other kwargs to pass to the Declaration, only default_value\nis currently supported.", "source": "codesearchnet"}
{"code": "def __contains__(self, k):\n        \n        chain = ChainMap(self.scopes, self.globals)\n        return chain.__contains__(k)", "docstring": "Check whether a variable has been assigned to.\n\nThis is **not** the same kind of element-of as described in the\nclass documentation.\n\nArgs:\nk (str): The name of the variable to check.\n\nReturns:\nbool: Whether or not the variable has been assigned to.", "source": "juraj-google-style"}
{"code": "def put(value):\n    \n    worker = global_worker\n    worker.check_connected()\n    with profiling.profile(\"ray.put\"):\n        if worker.mode == LOCAL_MODE:\n            \n            return value\n        object_id = ray._raylet.compute_put_id(\n            worker.current_task_id,\n            worker.task_context.put_index,\n        )\n        worker.put_object(object_id, value)\n        worker.task_context.put_index += 1\n        return object_id", "docstring": "Store an object in the object store.\n\nArgs:\nvalue: The Python object to be stored.\n\nReturns:\nThe object ID assigned to this value.", "source": "juraj-google-style"}
{"code": "def convert_one(self, op: ops.Operation) -> ops.OP_TREE:\n        \n\n        \n        if not isinstance(op, ops.GateOperation):\n            raise TypeError(\"{!r} is not a gate operation.\".format(op))\n\n        if is_native_ion_gate(op.gate):\n            return [op]\n        \n        if isinstance(op.gate, ops.HPowGate) and op.gate.exponent == 1:\n            return [ops.Rx(np.pi).on(op.qubits[0]),\n                    ops.Ry(-1 * np.pi/2).on(op.qubits[0])]\n        \n        if isinstance(op.gate, ops.CNotPowGate) and op.gate.exponent == 1:\n            return [ops.Ry(np.pi/2).on(op.qubits[0]),\n                    MS(np.pi/4).on(op.qubits[0], op.qubits[1]),\n                    ops.Rx(-1*np.pi/2).on(op.qubits[0]),\n                    ops.Rx(-1*np.pi/2).on(op.qubits[1]),\n                    ops.Ry(-1*np.pi/2).on(op.qubits[0])]\n        \n        mat = protocols.unitary(op, None) if len(\n            op.qubits) <= 2 else None\n        if mat is not None and len(op.qubits) == 1:\n            gates = optimizers.single_qubit_matrix_to_phased_x_z(mat)\n            return [g.on(op.qubits[0]) for g in gates]\n        elif mat is not None and len(op.qubits) == 2:\n            return two_qubit_matrix_to_ion_operations(\n                op.qubits[0], op.qubits[1], mat)\n        else:\n            if self.ignore_failures:\n                return [op]\n            else:\n                raise TypeError(\n                    \"Don't know how to work with {!r}. \"\n                    \"It isn't a native Ion Trap operation, \"\n                    \"a 1 or 2 qubit gate with a known unitary, \"\n                    \"or composite.\".format(op.gate))", "docstring": "Convert a single (one- or two-qubit) operation\n\ninto ion trap native gates\nArgs:\nop: gate operation to be converted\n\nReturns:\nthe desired operation implemented with ion trap gates", "source": "juraj-google-style"}
{"code": "def get_project_id():\n    if (os.name == 'nt'):\n        command = _CLOUD_SDK_WINDOWS_COMMAND\n    else:\n        command = _CLOUD_SDK_POSIX_COMMAND\n    try:\n        output = subprocess.check_output(((command,) + _CLOUD_SDK_CONFIG_COMMAND), stderr=subprocess.STDOUT)\n    except (subprocess.CalledProcessError, OSError, IOError):\n        return None\n    try:\n        configuration = json.loads(output.decode('utf-8'))\n    except ValueError:\n        return None\n    try:\n        return configuration['configuration']['properties']['core']['project']\n    except KeyError:\n        return None", "docstring": "Gets the project ID from the Cloud SDK.\n\nReturns:\nOptional[str]: The project ID.", "source": "codesearchnet"}
{"code": "def get_metadata_attribute(self, metaname):\n        \n        metadata_value = self.metadata.get(metaname, None)\n        if metadata_value is None:\n            raise NoMetadataException(\n                \"No metadata attribute named %s\" % metaname)\n        if not isinstance(metadata_value, list):\n            raise TypeError(\"Metadata is not a list and it should be.\")\n\n        if len(metadata_value) > 1:\n            return metadata_value\n        else:\n            return metadata_value[0]", "docstring": "Get the metadata attribute by the name.\n\nArgs:\nmetaname (:obj:`str`): Name of the attribute\n\nReturns:\n:obj:`list` or :obj:`str`: Value(s) of the requested metadata\nattribute\n\nRaises:\nNoMetadataException: Attribute error\nTypeError: Metadata should be a list", "source": "juraj-google-style"}
{"code": "def has_basal_dendrite(neuron, min_number=1, treefun=_read_neurite_type):\n    types = [treefun(n) for n in neuron.neurites]\n    return CheckResult((types.count(NeuriteType.basal_dendrite) >= min_number))", "docstring": "Check if a neuron has basal dendrites\n\nArguments:\nneuron(Neuron): The neuron object to test\nmin_number: minimum number of basal dendrites required\ntreefun: Optional function to calculate the tree type of neuron's\nneurites\n\nReturns:\nCheckResult with result", "source": "codesearchnet"}
{"code": "def get_all_profiles(store='local'):\n    return {'Domain Profile': get_all_settings(profile='domain', store=store), 'Private Profile': get_all_settings(profile='private', store=store), 'Public Profile': get_all_settings(profile='public', store=store)}", "docstring": "Gets all properties for all profiles in the specified store\n\nArgs:\n\nstore (str):\nThe store to use. This is either the local firewall policy or the\npolicy defined by local group policy. Valid options are:\n\n- lgpo\n- local\n\nDefault is ``local``\n\nReturns:\ndict: A dictionary containing the specified settings for each profile", "source": "codesearchnet"}
{"code": "def ensure_model_downloaded(repo_id: Optional[str]=None, revision: Optional[str]=None, local_dir: Optional[str]=None) -> str:\n    if local_dir is not None:\n        if os.path.exists(local_dir):\n            print(f'Using provided local directory: {local_dir}')\n        else:\n            os.makedirs(local_dir, exist_ok=True)\n            print(f'Created local directory: {local_dir}')\n    if repo_id is None:\n        raise ValueError('Either repo_id or local_dir must be provided')\n    print(f'Ensuring {repo_id} (revision: {revision or 'latest'}) is downloaded...')\n    try:\n        download_dir = snapshot_download(repo_id, revision=revision, local_files_only=True, local_dir=local_dir)\n        print(f'Found model files locally at {download_dir}')\n        return download_dir\n    except Exception:\n        print(f'Downloading model files for {repo_id}...')\n        download_dir = snapshot_download(repo_id, revision=revision, local_files_only=False, local_dir=local_dir)\n        print(f'Downloaded model files to {download_dir}')\n        return download_dir", "docstring": "Ensures model files are downloaded locally, downloads them if not.\nReturns path to local files.\n\nArgs:\nrepo_id: The Hugging Face model repo ID (required if local_dir not provided)\nrevision: Optional git revision to use\nlocal_dir: Optional local directory path where model files should be stored/found", "source": "github-repos"}
{"code": "def variables(self):\n\n    def deref(weak_v):\n        v = weak_v()\n        if v is None:\n            raise AssertionError('Called a function referencing variables which have been deleted. This likely means that function-local variables were created and not referenced elsewhere in the program. This is generally a mistake; consider storing variables in an object attribute on first call.')\n        return v\n    return tuple((deref(v) for v in self._weak_variables))", "docstring": "A sequence of variables accessed by this FuncGraph.\n\nNote that functions keep only weak references to variables. Calling the\nfunction after a variable it accesses has been deleted is an error.\n\nReturns:\nSequence of variables for this func graph.", "source": "github-repos"}
{"code": "def match(self, message: Message) -> bool:\n        \n        if self.template:\n            return self.template.match(message)\n        return True", "docstring": "Matches a message with the behaviour's template\n\nArgs:\nmessage(spade.message.Message): the message to match with\n\nReturns:\nbool: wheter the messaged matches or not", "source": "juraj-google-style"}
{"code": "def _aggregate_additional_loss(self, loss):\n    if not backend.is_float_dtype(loss.dtype):\n        loss = ops.cast(loss, dtype=backend.floatx())\n    return ops.sum(loss)", "docstring": "Aggregates losses from `add_loss`, regularizers and sublayers.\n\nArgs:\nloss: A tensor representing the additional loss to aggregate.\n\nReturns:\nA tensor representing the summed loss, cast to the `floatx()` if\nnecessary.", "source": "github-repos"}
{"code": "def get_observation_coordinates(self, x, y, hdulist_index):\n    return self.hdulist[hdulist_index].converter.get_inverse_converter().convert((x, y))", "docstring": "Retrieves the location of a point using the coordinate system of\nthe original observation, i.e. the original image before any\ncutouts were done.\n\nReturns:\n(x, y) in the original image coordinate system.\n@param x: x-pixel location in the cutout frame of reference\n@param y: y-pixel location in the cutout frame of reference\n@param idx: index of hdu in hdulist that the given x/y corresponds to.", "source": "codesearchnet"}
{"code": "def ReadFromFile(self, artifacts_reader, filename):\n    \n    for artifact_definition in artifacts_reader.ReadFile(filename):\n      self.RegisterDefinition(artifact_definition)", "docstring": "Reads artifact definitions into the registry from a file.\n\nArgs:\nartifacts_reader (ArtifactsReader): an artifacts reader.\nfilename (str): name of the file to read from.", "source": "juraj-google-style"}
{"code": "class MeanMetricWrapper(Mean):\n\n    def __init__(self, fn, name=None, dtype=None, **kwargs):\n        super(MeanMetricWrapper, self).__init__(name=name, dtype=dtype)\n        self._fn = fn\n        self._fn_kwargs = kwargs\n\n    def update_state(self, y_true, y_pred, sample_weight=None):\n        \n        y_true = math_ops.cast(y_true, self._dtype)\n        y_pred = math_ops.cast(y_pred, self._dtype)\n        [y_true, y_pred], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values([y_true, y_pred], sample_weight)\n        y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)\n        ag_fn = autograph.tf_convert(self._fn, ag_ctx.control_status_ctx())\n        matches = ag_fn(y_true, y_pred, **self._fn_kwargs)\n        return super(MeanMetricWrapper, self).update_state(matches, sample_weight=sample_weight)\n\n    def get_config(self):\n        config = {}\n        if type(self) is MeanMetricWrapper:\n            config['fn'] = self._fn\n        for k, v in self._fn_kwargs.items():\n            config[k] = backend.eval(v) if is_tensor_or_variable(v) else v\n        base_config = super(MeanMetricWrapper, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))\n\n    @classmethod\n    def from_config(cls, config):\n        fn = config.pop('fn', None)\n        if cls is MeanMetricWrapper:\n            return cls(get(fn), **config)\n        return super(MeanMetricWrapper, cls).from_config(config)", "docstring": "Wraps a stateless metric function with the Mean metric.\n\nYou could use this class to quickly build a mean metric from a function. The\nfunction needs to have the signature `fn(y_true, y_pred)` and return a\nper-sample loss array. `MeanMetricWrapper.result()` will return\nthe average metric value across all samples seen so far.\n\nFor example:\n\n```python\ndef accuracy(y_true, y_pred):\nreturn tf.cast(tf.math.equal(y_true, y_pred), tf.float32)\n\naccuracy_metric = tf.keras.metrics.MeanMetricWrapper(fn=accuracy)\n\nkeras_model.compile(..., metrics=accuracy_metric)\n```\n\nArgs:\nfn: The metric function to wrap, with signature `fn(y_true, y_pred,\n**kwargs)`.\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.\n**kwargs: Keyword arguments to pass on to `fn`.", "source": "github-repos"}
{"code": "def start_engine(self, **kwargs):\n    self.current = WFCurrent(**kwargs)\n    self.wf_state = {'in_external': False, 'finished': False}\n    if (not self.current.new_token):\n        self.wf_state = self.current.wf_cache.get(self.wf_state)\n        self.current.workflow_name = self.wf_state['name']\n        if ('subject' in self.wf_state):\n            self.current.input['id'] = self.wf_state['subject']\n            self.current.task_data['object_id'] = self.wf_state['subject']\n    self.check_for_authentication()\n    self.check_for_permission()\n    self.workflow = self.load_or_create_workflow()\n    if ('form' in self.current.input):\n        form = self.current.input['form']\n        if ('form_name' in form):\n            self.current.task_data[form['form_name']] = form\n    start_init_values = (self.workflow_spec.wf_properties.get('init', 'False') == 'True')\n    if start_init_values:\n        WFInit = get_object_from_path(settings.WF_INITIAL_VALUES)()\n        WFInit.assign_wf_initial_values(self.current)\n    log_msg = ('\\n\\n::::::::::: ENGINE STARTED :::::::::::\\n\\tWF: %s (Possible) TASK:%s\\n\\tCMD:%s\\n\\tSUBCMD:%s' % (self.workflow.name, self.workflow.get_tasks(Task.READY), self.current.input.get('cmd'), self.current.input.get('subcmd')))\n    log.debug(log_msg)\n    sys._zops_wf_state_log = log_msg\n    self.current.workflow = self.workflow", "docstring": "Initializes the workflow with given request, response objects and diagram name.\n\nArgs:\nsession:\ninput:\nworkflow_name (str): Name of workflow diagram without \".bpmn\" suffix.\nFile must be placed under one of configured :py:attr:`~zengine.settings.WORKFLOW_PACKAGES_PATHS`", "source": "codesearchnet"}
{"code": "def from_df(cls, data, entities=None, source='contrast'):\n        \n        variables = []\n        for col in data.columns:\n            _data = pd.DataFrame(data[col].values, columns=['amplitude'])\n            if entities is not None:\n                _data = pd.concat([_data, entities], axis=1, sort=True)\n            variables.append(SimpleVariable(name=col, data=_data, source=source))\n        return BIDSVariableCollection(variables)", "docstring": "Create a Collection from a pandas DataFrame.\n\nArgs:\ndf (DataFrame): The DataFrame to convert to a Collection. Each\ncolumn will be converted to a SimpleVariable.\nentities (DataFrame): An optional second DataFrame containing\nentity information.\nsource (str): The value to set as the source for all Variables.\n\nReturns:\nA BIDSVariableCollection.", "source": "juraj-google-style"}
{"code": "def _representative_structure_setter(self, structprop, keep_chain, clean=True, keep_chemicals=None, out_suffix='_clean', outdir=None, force_rerun=False):\n    if (not outdir):\n        outdir = self.structure_dir\n        if (not outdir):\n            raise ValueError('Output directory must be specified')\n    new_id = 'REP-{}'.format(structprop.id)\n    if self.structures.has_id(new_id):\n        if force_rerun:\n            existing = self.structures.get_by_id(new_id)\n            self.structures.remove(existing)\n    if clean:\n        final_pdb = structprop.clean_structure(outdir=outdir, out_suffix=out_suffix, keep_chemicals=keep_chemicals, keep_chains=keep_chain, force_rerun=force_rerun)\n        log.debug('{}: cleaned structure and saved new file at {}'.format(structprop.id, final_pdb))\n    else:\n        final_pdb = structprop.structure_path\n    self.representative_structure = StructProp(ident=new_id, chains=keep_chain, mapped_chains=keep_chain, structure_path=final_pdb, file_type='pdb')\n    self.representative_chain = keep_chain\n    self.representative_structure.update(structprop.get_dict_with_chain(chain=keep_chain), only_keys=self.__representative_structure_attributes, overwrite=True)\n    self.representative_structure.original_structure_id = structprop.id\n    self.representative_structure.parse_structure()\n    self.structures.append(self.representative_structure)", "docstring": "Set the representative structure by 1) cleaning it and 2) copying over attributes of the original structure.\n\nThe structure is copied because the chains stored may change, and cleaning it makes a new PDB file.\n\nArgs:\nstructprop (StructProp): StructProp object to set as representative\nkeep_chain (str): Chain ID to keep\nclean (bool): If the PDB file should be cleaned (see ssbio.structure.utils.cleanpdb)\nkeep_chemicals (str, list): Keep specified chemical names\nout_suffix (str): Suffix to append to clean PDB file\noutdir (str): Path to output directory\n\nReturns:\nStructProp: representative structure", "source": "codesearchnet"}
{"code": "def group(self, group_type, name, **kwargs):\n    group_obj = Group(group_type, name, **kwargs)\n    return self._group(group_obj)", "docstring": "Add Group data to Batch object.\n\nArgs:\ngroup_type (str): The ThreatConnect define Group type.\nname (str): The name for this Group.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nxid (str, kwargs): The external id for this Group.\n\nReturns:\nobj: An instance of Group.", "source": "codesearchnet"}
{"code": "def set_computer_name(name):\n    if six.PY2:\n        name = _to_unicode(name)\n    if windll.kernel32.SetComputerNameExW(win32con.ComputerNamePhysicalDnsHostname, name):\n        ret = {'Computer Name': {'Current': get_computer_name()}}\n        pending = get_pending_computer_name()\n        if (pending not in (None, False)):\n            ret['Computer Name']['Pending'] = pending\n        return ret\n    return False", "docstring": "Set the Windows computer name\n\nArgs:\n\nname (str):\nThe new name to give the computer. Requires a reboot to take effect.\n\nReturns:\ndict:\nReturns a dictionary containing the old and new names if successful.\n``False`` if not.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt 'minion-id' system.set_computer_name 'DavesComputer'", "source": "codesearchnet"}
{"code": "def get_partstudio_tessellatededges(self, did, wid, eid):\n    return self._api.request('get', (((((('/api/partstudios/d/' + did) + '/w/') + wid) + '/e/') + eid) + '/tessellatededges'))", "docstring": "Gets the tessellation of the edges of all parts in a part studio.\n\nArgs:\n- did (str): Document ID\n- wid (str): Workspace ID\n- eid (str): Element ID\n\nReturns:\n- requests.Response: Onshape response data", "source": "codesearchnet"}
{"code": "def configure_callbacks(callbacks, model, do_validation=False, batch_size=None, epochs=None, steps_per_epoch=None, samples=None, verbose=1, count_mode='steps', mode=ModeKeys.TRAIN):\n    if isinstance(callbacks, CallbackList):\n        return callbacks\n    if not callbacks:\n        callbacks = []\n    if mode == ModeKeys.TRAIN:\n        model.history = History()\n        callbacks = [BaseLogger()] + (callbacks or []) + [model.history]\n        if verbose:\n            callbacks.append(ProgbarLogger(count_mode))\n    callback_list = CallbackList(callbacks)\n    callback_model = model._get_callback_model()\n    callback_list.set_model(callback_model)\n    set_callback_parameters(callback_list, model, do_validation=do_validation, batch_size=batch_size, epochs=epochs, steps_per_epoch=steps_per_epoch, samples=samples, verbose=verbose, mode=mode)\n    callback_list.model.stop_training = False\n    return callback_list", "docstring": "Configures callbacks for use in various training loops.\n\nArgs:\ncallbacks: List of Callbacks.\nmodel: Model being trained.\ndo_validation: Whether or not validation loop will be run.\nbatch_size: Number of samples per batch.\nepochs: Number of epoch to train.\nsteps_per_epoch: Number of batches to run per training epoch.\nsamples: Number of training samples.\nverbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.\ncount_mode: One of 'steps' or 'samples'. Per-batch or per-sample count.\nmode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.\nWhich loop mode to configure callbacks for.\n\nReturns:\nInstance of CallbackList used to control all Callbacks.", "source": "github-repos"}
{"code": "def DownloadPqlResultToCsv(self, pql_query, file_handle, values=None):\n    pql_writer = csv.writer(file_handle, delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)\n    self._PageThroughPqlSet(pql_query, pql_writer.writerow, values)", "docstring": "Downloads the results of a PQL query to CSV.\n\nArgs:\npql_query: str a statement filter to apply (the query should not include\nthe limit or the offset)\nfile_handle: file the file object to write to.\n[optional]\nvalues: A dict of python objects or a list of raw SOAP values to bind\nto the pql_query.", "source": "codesearchnet"}
{"code": "def add(self, timestamp, information):\n    try:\n        item = Schema(CollectorStage.schema_event_items()).validate({'timestamp': timestamp, 'information': information})\n        self.events.append(item)\n    except SchemaError as exception:\n        Logger.get_logger(__name__).error(exception)\n        raise RuntimeError(str(exception))", "docstring": "Add event information.\n\nArgs:\ntimestamp (int): event timestamp.\ninformation (dict): event information.\n\nRaises:\nRuntimeError: when validation of parameters has failed.", "source": "codesearchnet"}
{"code": "def _close_open_file(self, file_des):\n        \n        self.open_files[file_des] = None\n        heapq.heappush(self._free_fd_heap, file_des)", "docstring": "Remove file object with given descriptor from the list\nof open files.\n\nSets the entry in open_files to None.\n\nArgs:\nfile_des: Descriptor of file object to be removed from\nopen files list.", "source": "juraj-google-style"}
{"code": "def tee_log(tee_file: TextIO, loglevel: int) -> None:\n    \n    handler = get_monochrome_handler(stream=tee_file)\n    handler.setLevel(loglevel)\n    rootlogger = logging.getLogger()\n    rootlogger.addHandler(handler)\n    \n    with TeeContextManager(tee_file, capture_stdout=True):\n        with TeeContextManager(tee_file, capture_stderr=True):\n            try:\n                yield\n            except Exception:\n                \n                \n                exc_type, exc_value, exc_traceback = sys.exc_info()\n                lines = traceback.format_exception(exc_type, exc_value,\n                                                   exc_traceback)\n                log.critical(\"\\n\" + \"\".join(lines))\n                raise", "docstring": "Context manager to add a file output stream to our logging system.\n\nArgs:\ntee_file: file-like object to write to\nloglevel: log level (e.g. ``logging.DEBUG``) to use for this stream", "source": "juraj-google-style"}
{"code": "def transform_column_source_data(data, buffers=None, cols=None):\n    to_transform = (set(data) if (cols is None) else set(cols))\n    data_copy = {}\n    for key in to_transform:\n        if (pd and isinstance(data[key], (pd.Series, pd.Index))):\n            data_copy[key] = transform_series(data[key], buffers=buffers)\n        elif isinstance(data[key], np.ndarray):\n            data_copy[key] = transform_array(data[key], buffers=buffers)\n        else:\n            data_copy[key] = traverse_data(data[key], buffers=buffers)\n    return data_copy", "docstring": "Transform ``ColumnSourceData`` data to a serialized format\n\nArgs:\ndata (dict) : the mapping of names to data columns to transform\n\nbuffers (set, optional) :\nIf binary buffers are desired, the buffers parameter may be\nprovided, and any columns that may be sent as binary buffers\nwill be added to the set. If None, then only base64 encoding\nwill be used (default: None)\n\n**This is an \"out\" parameter**. The values it contains will be\nmodified in-place.\n\ncols (list[str], optional) :\nOptional list of subset of columns to transform. If None, all\ncolumns will be transformed (default: None)\n\nReturns:\nJSON compatible dict", "source": "codesearchnet"}
{"code": "def set_position_p(self, pvalue):\n        \n        pvalue_msb = int(pvalue) >> 8\n        pvalue_lsb = int(pvalue) & 0xff\n        data = []\n        data.append(0x0B)\n        data.append(self.servoid)\n        data.append(RAM_WRITE_REQ)\n        data.append(POSITION_KP_RAM)\n        data.append(BYTE2)\n        data.append( pvalue_lsb)\n        data.append( pvalue_msb)\n        send_data(data)", "docstring": "Set the P gain of the  position PID\n\nArgs:\n\npvalue (int): P value", "source": "juraj-google-style"}
{"code": "def inversion(origin=(0, 0, 0)):\n    mat = (- np.eye(4))\n    mat[(3, 3)] = 1\n    mat[(0:3, 3)] = (2 * np.array(origin))\n    return SymmOp(mat)", "docstring": "Inversion symmetry operation about axis.\n\nArgs:\norigin (3x1 array): Origin of the inversion operation. Defaults\nto [0, 0, 0].\n\nReturns:\nSymmOp representing an inversion operation about the origin.", "source": "codesearchnet"}
{"code": "def transform_to_mods_mono(marc_xml, uuid, url):\n    \n    marc_xml = _read_content_or_path(marc_xml)\n\n    transformed = xslt_transformation(\n        marc_xml,\n        _absolute_template_path(\"MARC21slim2MODS3-4-NDK.xsl\")\n    )\n\n    return _apply_postprocessing(\n        marc_xml=marc_xml,\n        xml=transformed,\n        func=mods_postprocessor.postprocess_monograph,\n        uuid=uuid,\n        url=url,\n    )", "docstring": "Convert `marc_xml` to MODS data format.\n\nArgs:\nmarc_xml (str): Filename or XML string. Don't use ``\\\\n`` in case of\nfilename.\nuuid (str): UUID string giving the package ID.\nurl (str): URL of the publication (public or not).\n\nReturns:\nlist: Collection of transformed xml strings.", "source": "juraj-google-style"}
{"code": "def get_residue_annotations(self, start_resnum, end_resnum=None):\n        \n        if not end_resnum:\n            end_resnum = start_resnum\n\n        \n        f = SeqFeature(FeatureLocation(start_resnum - 1, end_resnum))\n\n        \n        return f.extract(self).letter_annotations", "docstring": "Retrieve letter annotations for a residue or a range of residues\n\nArgs:\nstart_resnum (int): Residue number\nend_resnum (int): Optional residue number, specify if a range is desired\n\nReturns:\ndict: Letter annotations for this residue or residues", "source": "juraj-google-style"}
{"code": "def predict(parameters, X):\n    \n\n    \n    \n    A2, cache = forward_propagation(X, parameters)\n    predictions = np.array([1 if (i > 0.5) else 0 for i in A2[0]])\n\n    return predictions", "docstring": "Using the learned parameters, predicts a class for each example in X\n\nArguments:\nparameters -- python dictionary containing your parameters\nX -- input data of size (n_x, m)\n\nReturns\npredictions -- vector of predictions of our model (red: 0 / blue: 1)", "source": "juraj-google-style"}
{"code": "def check_destinations(destinations):\n    if isinstance(destinations, (resource_variable_ops.BaseResourceVariable, tensor_lib.Tensor)):\n        return bool(destinations.device)\n    return bool(destinations)", "docstring": "Checks whether `destinations` is not empty.\n\nArgs:\ndestinations: a `DistributedValues`, variable, or string object.\n\nReturns:\nBoolean which is True if `destinations` is not empty.", "source": "github-repos"}
{"code": "def post_content(url, headers={}, post_data={}, decoded=True, **kwargs):\n    \n    if kwargs.get('post_data_raw'):\n        logging.debug('post_content: %s\\npost_data_raw: %s' % (url, kwargs['post_data_raw']))\n    else:\n        logging.debug('post_content: %s\\npost_data: %s' % (url, post_data))\n\n    req = request.Request(url, headers=headers)\n    if cookies:\n        cookies.add_cookie_header(req)\n        req.headers.update(req.unredirected_hdrs)\n    if kwargs.get('post_data_raw'):\n        post_data_enc = bytes(kwargs['post_data_raw'], 'utf-8')\n    else:\n        post_data_enc = bytes(parse.urlencode(post_data), 'utf-8')\n    response = urlopen_with_retry(req, data=post_data_enc)\n    data = response.read()\n\n    \n    content_encoding = response.getheader('Content-Encoding')\n    if content_encoding == 'gzip':\n        data = ungzip(data)\n    elif content_encoding == 'deflate':\n        data = undeflate(data)\n\n    \n    if decoded:\n        charset = match1(\n            response.getheader('Content-Type'), r'charset=([\\w-]+)'\n        )\n        if charset is not None:\n            data = data.decode(charset)\n        else:\n            data = data.decode('utf-8')\n\n    return data", "docstring": "Post the content of a URL via sending a HTTP POST request.\n\nArgs:\nurl: A URL.\nheaders: Request headers used by the client.\ndecoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.\n\nReturns:\nThe content as a string.", "source": "juraj-google-style"}
{"code": "def add_event(self, event_):\n    conv_event = self._wrap_event(event_)\n    if (conv_event.id_ not in self._events_dict):\n        self._events.append(conv_event)\n        self._events_dict[conv_event.id_] = conv_event\n    else:\n        logger.info('Conversation %s ignoring duplicate event %s', self.id_, conv_event.id_)\n        return None\n    return conv_event", "docstring": "Add an event to the conversation.\n\nThis method is used by :class:`.ConversationList` to maintain this\ninstance.\n\nArgs:\nevent_: ``Event`` message.\n\nReturns:\n:class:`.ConversationEvent` representing the event.", "source": "codesearchnet"}
{"code": "def _kl_uniform_uniform(a, b, name=None):\n  \n  with tf.name_scope(name or \"kl_uniform_uniform\"):\n    \n    \n    \n    \n    final_batch_shape = distribution_util.get_broadcast_shape(\n        a.low, b.low, a.high, b.high)\n    dtype = dtype_util.common_dtype(\n        [a.low, a.high, b.low, b.high], tf.float32)\n    return tf.where((b.low <= a.low) & (a.high <= b.high),\n                    tf.math.log(b.high - b.low) - tf.math.log(a.high - a.low),\n                    tf.broadcast_to(\n                        dtype_util.as_numpy_dtype(dtype)(np.inf),\n                        final_batch_shape))", "docstring": "Calculate the batched KL divergence KL(a || b) with a and b Uniform.\n\nNote that the KL divergence is infinite if the support of `a` is not a subset\nof the support of `b`.\n\nArgs:\na: instance of a Uniform distribution object.\nb: instance of a Uniform distribution object.\nname: (optional) Name to use for created operations.\ndefault is \"kl_uniform_uniform\".\n\nReturns:\nBatchwise KL(a || b)", "source": "juraj-google-style"}
{"code": "def security_label_pivot(self, security_label_resource):\n    resource = self.copy()\n    resource._request_uri = '{}/{}'.format(security_label_resource.request_uri, resource._request_uri)\n    return resource", "docstring": "Pivot point on security labels for this resource.\n\nThis method will return all *resources* (group, indicators, task,\nvictims, etc) for this resource that have the provided security\nlabel applied.\n\n**Example Endpoints URI's**\n\n+--------------+----------------------------------------------------------------------+\n| HTTP Method  | API Endpoint URI's                                                   |\n+==============+======================================================================+\n| GET          | /v2/securityLabels/{resourceId}/groups/{resourceType}                |\n+--------------+----------------------------------------------------------------------+\n| GET          | /v2/securityLabels/{resourceId}/groups/{resourceType}/{uniqueId}     |\n+--------------+----------------------------------------------------------------------+\n| GET          | /v2/securityLabels/{resourceId}/indicators/{resourceType}            |\n+--------------+----------------------------------------------------------------------+\n| GET          | /v2/securityLabels/{resourceId}/indicators/{resourceType}/{uniqueId} |\n+--------------+----------------------------------------------------------------------+\n\nArgs:\nresource_id (string): The resource pivot id (security label name).", "source": "codesearchnet"}
{"code": "def GetEnvironmentVariable(self, name):\n    name = name.upper()\n    return self._environment_variables.get(name, None)", "docstring": "Retrieves an environment variable.\n\nArgs:\nname (str): name of the environment variable.\n\nReturns:\nEnvironmentVariableArtifact: environment variable artifact or None\nif there was no value set for the given name.", "source": "codesearchnet"}
{"code": "def process_api_config_response(self, config_json):\n    \n    with self._config_lock:\n      self._add_discovery_config()\n      for config in config_json.get('items', []):\n        lookup_key = config.get('name', ''), config.get('version', '')\n        self._configs[lookup_key] = config\n\n      for config in self._configs.itervalues():\n        name = config.get('name', '')\n        api_version = config.get('api_version', '')\n        path_version = config.get('path_version', '')\n        sorted_methods = self._get_sorted_methods(config.get('methods', {}))\n\n\n        for method_name, method in sorted_methods:\n          self._save_rest_method(method_name, name, path_version, method)", "docstring": "Parses a JSON API config and registers methods for dispatch.\n\nSide effects:\nParses method name, etc. for all methods and updates the indexing\ndata structures with the information.\n\nArgs:\nconfig_json: A dict, the JSON body of the getApiConfigs response.", "source": "juraj-google-style"}
{"code": "def split_input(cls, mapper_spec):\n    \n    reader_spec = cls.get_params(mapper_spec, allow_old=False)\n    bucket = reader_spec[cls.BUCKET_NAME_PARAM]\n    filenames = reader_spec[cls.OBJECT_NAMES_PARAM]\n    delimiter = reader_spec.get(cls.DELIMITER_PARAM)\n    account_id = reader_spec.get(cls._ACCOUNT_ID_PARAM)\n    buffer_size = reader_spec.get(cls.BUFFER_SIZE_PARAM)\n    fail_on_missing_input = reader_spec.get(cls.FAIL_ON_MISSING_INPUT)\n\n    \n    all_filenames = []\n    for filename in filenames:\n      if filename.endswith(\"*\"):\n        all_filenames.extend(\n            [file_stat.filename for file_stat in cloudstorage.listbucket(\n                \"/\" + bucket + \"/\" + filename[:-1], delimiter=delimiter,\n                _account_id=account_id)])\n      else:\n        all_filenames.append(\"/%s/%s\" % (bucket, filename))\n\n    \n    readers = []\n    for shard in range(0, mapper_spec.shard_count):\n      shard_filenames = all_filenames[shard::mapper_spec.shard_count]\n      if shard_filenames:\n        reader = cls(\n            shard_filenames, buffer_size=buffer_size, _account_id=account_id,\n            delimiter=delimiter)\n        reader._fail_on_missing_input = fail_on_missing_input\n        readers.append(reader)\n    return readers", "docstring": "Returns a list of input readers.\n\nAn equal number of input files are assigned to each shard (+/- 1). If there\nare fewer files than shards, fewer than the requested number of shards will\nbe used. Input files are currently never split (although for some formats\ncould be and may be split in a future implementation).\n\nArgs:\nmapper_spec: an instance of model.MapperSpec.\n\nReturns:\nA list of InputReaders. None when no input data can be found.", "source": "juraj-google-style"}
{"code": "def any(x, axis=None, keepdims=False):\n    x = math_ops.cast(x, dtypes_module.bool)\n    return math_ops.reduce_any(x, axis, keepdims)", "docstring": "Bitwise reduction (logical OR).\n\nArgs:\nx: Tensor or variable.\naxis: axis along which to perform the reduction.\nkeepdims: whether the drop or broadcast the reduction axes.\n\nReturns:\nA uint8 tensor (0s and 1s).", "source": "github-repos"}
{"code": "def distance(self, other):\n        \n        return np.linalg.norm(other.coords - self.coords)", "docstring": "Get distance between two sites.\n\nArgs:\nother: Other site.\n\nReturns:\nDistance (float)", "source": "juraj-google-style"}
{"code": "def write_version_and_dims(version, dims, f):\n    f.write((('\n    f.write((((((((dims[0] + '\\t') + dims[1]) + '\\t') + dims[2]) + '\\t') + dims[3]) + '\\n'))", "docstring": "Write first two lines of gct file.\n\nArgs:\nversion (string): 1.3 by default\ndims (list of strings): length = 4\nf (file handle): handle of output file\nReturns:\nnothing", "source": "codesearchnet"}
{"code": "async def get_entry(config, url):\n    \n\n    previous = config.cache.get(\n        'entry', url,\n        schema_version=SCHEMA_VERSION) if config.cache else None\n\n    headers = previous.caching if previous else None\n\n    request = await utils.retry_get(config, url, headers=headers)\n    if not request or not request.success:\n        LOGGER.error(\"Could not get entry %s: %d\", url,\n                     request.status if request else -1)\n        return None, previous, False\n\n    \n    if request.cached:\n        return previous, previous, False\n\n    current = Entry(request)\n\n    \n    if config.cache:\n        config.cache.set('entry', url, current)\n\n    return current, previous, (not previous\n                               or previous.digest != current.digest\n                               or previous.status != current.status)", "docstring": "Given an entry URL, return the entry\n\nArguments:\n\nconfig -- the configuration\nurl -- the URL of the entry\n\nReturns: 3-tuple of (current, previous, updated)", "source": "juraj-google-style"}
{"code": "def _check_multiple_access_to_resources(self, captured_resources, exclusive_resource_access):\n    for sg in ops.get_collection(CRITICAL_SECTION_EXECUTIONS):\n        if self._is_self_handle(sg.handle):\n            continue\n        if not (exclusive_resource_access or sg.exclusive_resource_access):\n            continue\n        resource_intersection = captured_resources.intersection(sg.resources)\n        if resource_intersection:\n            raise ValueError(f\"This execution would access resources: {list(resource_intersection)}. Either this lock (CriticalSection: {self._handle}) or lock '{sg}' (CriticalSection: {sg.handle}) requested exclusive resource access of this resource. Did you mean to call execute with keyword argument exclusive_resource_access=False?\")", "docstring": "Raise if captured_resources are accessed by another CriticalSection.\n\nArgs:\ncaptured_resources: Set of tensors of type resource.\nexclusive_resource_access: Whether this execution requires exclusive\nresource access.\n\nRaises:\nValueError: If any tensors in `captured_resources` are also accessed\nby another `CriticalSection`, and at least one of them requires\nexclusive resource access.", "source": "github-repos"}
{"code": "def get_size(self, value=None):\n        \n        if value is None:\n            value = self._value\n\n        if hasattr(value, 'get_size'):\n            return value.get_size()\n\n        return len(self.pack(value))", "docstring": "Return the size in bytes.\n\nArgs:\nvalue (bytes): In structs, the user can assign other value instead\nof this class' instance. Here, in such cases, ``self`` is a\nclass attribute of the struct.\n\nReturns:\nint: The address size in bytes.", "source": "juraj-google-style"}
{"code": "def RunScripts(self, script_dict):\n    \n    metadata_types = ['%s-script-url', '%s-script']\n    metadata_keys = [key % self.script_type for key in metadata_types]\n    metadata_keys = [key for key in metadata_keys if script_dict.get(key)]\n    if not metadata_keys:\n      self.logger.info('No %s scripts found in metadata.', self.script_type)\n    for metadata_key in metadata_keys:\n      metadata_script = script_dict.get(metadata_key)\n      self._MakeExecutable(metadata_script)\n      self._RunScript(metadata_key, metadata_script)", "docstring": "Run the metadata scripts; execute a URL script first if one is provided.\n\nArgs:\nscript_dict: a dictionary mapping metadata keys to script files.", "source": "juraj-google-style"}
{"code": "def array_to_int_csv(array_data):\n    flattened_array = array_data.flatten()\n    array_as_strings = [item.astype(int).astype(str) for item in flattened_array]\n    return ','.join(array_as_strings)", "docstring": "Converts all elements in a numerical array to a comma-separated string.\n\nArgs:\narray_data: Numerical array to convert.\n\nReturns:\nString containing array values as integers, separated by commas.", "source": "github-repos"}
{"code": "def run(in_file_nose, out_dir_unitth):\n    suites = Converter.read_nose(in_file_nose)\n    Converter.write_unitth(suites, out_dir_unitth)", "docstring": "Convert nose-style test reports to UnitTH-style test reports by splitting modules into separate XML files\n\nArgs:\nin_file_nose (:obj:`str`): path to nose-style test report\nout_file_unitth (:obj:`str`): path to save UnitTH-style test reports", "source": "codesearchnet"}
{"code": "def __init__(self, *args, allow_partial: bool=False, sealed: Optional[bool]=None, root_path: Optional[utils.KeyPath]=None, explicit_init: bool=False, **kwargs):\n    if sealed is None:\n        sealed = not self.__class__.allow_symbolic_mutation\n    if not isinstance(allow_partial, bool):\n        raise TypeError(f\"Expect bool type for argument 'allow_partial' in symbolic.Object.__init__ but encountered {allow_partial}.\")\n    super().__init__(allow_partial=allow_partial, accessor_writable=self.__class__.allow_symbolic_assignment, sealed=sealed, root_path=root_path, init_super=not explicit_init)\n    _, unmatched_keys = self.__class__.__schema__.resolve(list(kwargs.keys()))\n    if unmatched_keys:\n        arg_phrase = utils.auto_plural(len(unmatched_keys), 'argument')\n        keys_str = utils.comma_delimited_str(unmatched_keys)\n        raise TypeError(f'{self.__class__.__name__}.__init__() got unexpected keyword {arg_phrase}: {keys_str}')\n    field_args = {}\n    init_arg_names = self.__class__.init_arg_list\n    if args:\n        if not self.__class__.__schema__.fields:\n            raise TypeError(f'{self.__class__.__name__}() takes no arguments.')\n        elif init_arg_names and init_arg_names[-1].startswith('*'):\n            vararg_name = init_arg_names[-1][1:]\n            vararg_field = self.__class__.__schema__.get_field(vararg_name)\n            assert vararg_field is not None\n            num_named_args = len(init_arg_names) - 1\n            field_args[vararg_name] = list(args[num_named_args:])\n            args = args[:num_named_args]\n        elif len(args) > len(init_arg_names):\n            arg_phrase = utils.auto_plural(len(init_arg_names), 'argument')\n            was_phrase = utils.auto_plural(len(args), 'was', 'were')\n            raise TypeError(f'{self.__class__.__name__}.__init__() takes {len(init_arg_names)} positional {arg_phrase} but {len(args)} {was_phrase} given.')\n        for i, arg_value in enumerate(args):\n            arg_name = init_arg_names[i]\n            field_args[arg_name] = arg_value\n    for k, v in kwargs.items():\n        if k in field_args:\n            values_str = utils.comma_delimited_str([field_args[k], v])\n            raise TypeError(f\"{self.__class__.__name__}.__init__() got multiple values for argument '{k}': {values_str}.\")\n        field_args[k] = v\n    if not base.accepts_partial(self):\n        missing_args = []\n        for field in self.__class__.__schema__.fields.values():\n            if not field.value.has_default and isinstance(field.key, pg_typing.ConstStrKey) and (field.key not in field_args):\n                missing_args.append(str(field.key))\n        if missing_args:\n            arg_phrase = utils.auto_plural(len(missing_args), 'argument')\n            keys_str = utils.comma_delimited_str(missing_args)\n            raise TypeError(f'{self.__class__.__name__}.__init__() missing {len(missing_args)} required {arg_phrase}: {keys_str}.')\n    self._set_raw_attr('_sym_attributes', pg_dict.Dict(field_args, value_spec=self.__class__.sym_fields, allow_partial=allow_partial, sealed=sealed, accessor_writable=True, root_path=root_path, as_object_attributes_container=True))\n    self._sym_attributes.sym_setparent(self)\n    self._on_init()\n    self.seal(sealed)", "docstring": "Create an Object instance.\n\nArgs:\n*args: positional arguments.\nallow_partial: If True, the object can be partial.\nsealed: If True, seal the object from future modification (unless under\na `pg.seal(False)` context manager). If False, treat the object as\nunsealed. If None, it's determined by `cls.allow_symbolic_mutation`.\nroot_path: The symbolic path for current object. By default it's None,\nwhich indicates that newly constructed object does not have a parent.\nexplicit_init: Should set to `True` when `__init__` is called via\n`pg.Object.__init__` instead of `super().__init__`.\n**kwargs: key/value arguments that align with the schema. All required\nkeys in the schema must be specified, and values should be acceptable\naccording to their value spec.\n\nRaises:\nKeyError: When required key(s) are missing.\nValueError: When value(s) are not acceptable by their value spec.", "source": "github-repos"}
{"code": "async def get_word(self, term: str) -> 'asyncurban.word.Word':\n        \n        resp = await self._get(term=term)\n        return Word(resp['list'][0])", "docstring": "Gets the first matching word available.\n\nArgs:\nterm: The word to be defined.\n\nReturns:\nThe closest matching :class:`Word` from UrbanDictionary.\n\nRaises:\nUrbanConnectionError: If the response status isn't ``200``.\nWordNotFoundError: If the response doesn't contain data (i.e. no word found).", "source": "juraj-google-style"}
{"code": "def configure(self, argv=('',), **kwargs):\n    parser = argparse_flags.ArgumentParser(prog='tensorboard', description='TensorBoard is a suite of web applications for inspecting and understanding your TensorFlow runs and graphs. https:\n    for loader in self.plugin_loaders:\n        loader.define_flags(parser)\n    arg0 = (argv[0] if argv else '')\n    flags = parser.parse_args(argv[1:])\n    self.cache_key = manager.cache_key(working_directory=os.getcwd(), arguments=argv[1:], configure_kwargs=kwargs)\n    if (absl_flags and arg0):\n        for flag in set(absl_flags.FLAGS.get_key_flags_for_module(arg0)):\n            if hasattr(flags, flag.name):\n                raise ValueError(('Conflicting Abseil flag: %s' % flag.name))\n            setattr(flags, flag.name, flag.value)\n    for (k, v) in kwargs.items():\n        if (not hasattr(flags, k)):\n            raise ValueError(('Unknown TensorBoard flag: %s' % k))\n        setattr(flags, k, v)\n    for loader in self.plugin_loaders:\n        loader.fix_flags(flags)\n    self.flags = flags\n    return [arg0]", "docstring": "Configures TensorBoard behavior via flags.\n\nThis method will populate the \"flags\" property with an argparse.Namespace\nrepresenting flag values parsed from the provided argv list, overridden by\nexplicit flags from remaining keyword arguments.\n\nArgs:\nargv: Can be set to CLI args equivalent to sys.argv; the first arg is\ntaken to be the name of the path being executed.\nkwargs: Additional arguments will override what was parsed from\nargv. They must be passed as Python data structures, e.g.\n`foo=1` rather than `foo=\"1\"`.\n\nReturns:\nEither argv[:1] if argv was non-empty, or [''] otherwise, as a mechanism\nfor absl.app.run() compatibility.\n\nRaises:\nValueError: If flag values are invalid.", "source": "codesearchnet"}
{"code": "def split_strings(string, separators):\n    \n    logger = logging.getLogger('extract_vcf.split_strings')\n    logger.debug(\"splitting string '{0}' with separators {1}\".format(\n        string, separators\n    ))\n    results = []\n    \n    def recursion(recursive_string, separators, i=1):\n        \n        if i == len(separators):\n            for value in recursive_string.split(separators[i-1]):\n                logger.debug(\"Adding {0} to results\".format(value))\n                results.append(value)\n        else:\n            for value in recursive_string.split(separators[i-1]):\n                recursion(value, separators, i+1)\n    if len(separators) > 0:\n        recursion(string, separators)\n    else:\n        results = [string]\n    \n    return results", "docstring": "Split a string with arbitrary number of separators.\nReturn a list with the splitted values\n\nArguments:\nstring (str): ex. \"a:1|2,b:2\"\nseparators (list): ex. [',',':','|']\n\nReturns:\nresults (list) : ex. ['a','1','2','b','2']", "source": "juraj-google-style"}
{"code": "async def starttls(self, context=None):\n    if (not self.use_aioopenssl):\n        raise BadImplementationError('This connection does not use aioopenssl')\n    import aioopenssl\n    import OpenSSL\n    (await self.ehlo_or_helo_if_needed())\n    if ('starttls' not in self.esmtp_extensions):\n        raise SMTPCommandNotSupportedError('STARTTLS')\n    (code, message) = (await self.do_cmd('STARTTLS', success=(220,)))\n    if (context is None):\n        context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)\n    (await self.transport.starttls(ssl_context=context))\n    self.last_ehlo_response = (None, None)\n    self.last_helo_response = (None, None)\n    self.supports_esmtp = False\n    self.esmtp_extensions = {}\n    self.auth_mechanisms = []\n    return (code, message)", "docstring": "Upgrades the connection to the SMTP server into TLS mode.\n\nIf there has been no previous EHLO or HELO command this session, this\nmethod tries ESMTP EHLO first.\n\nIf the server supports SSL/TLS, this will encrypt the rest of the SMTP\nsession.\n\nRaises:\nSMTPCommandNotSupportedError: If the server does not support STARTTLS.\nSMTPCommandFailedError: If the STARTTLS command fails\nBadImplementationError: If the connection does not use aioopenssl.\n\nArgs:\ncontext (:obj:`OpenSSL.SSL.Context`): SSL context\n\nReturns:\n(int, message): A (code, message) 2-tuple containing the server\nresponse.", "source": "codesearchnet"}
{"code": "def get_chain(self, name, table='filter'):\n    return [r for r in self.rules if ((r['table'] == table) and (r['chain'] == name))]", "docstring": "Get the list of rules for a particular chain. Chain order is kept intact.\n\nArgs:\nname (str): chain name, e.g. ``\ntable (str): table name, defaults to ``filter``\n\nReturns:\nlist: rules", "source": "codesearchnet"}
{"code": "def set_server_def_retries(self, retries):\n    self._set_server_def_retries = retries", "docstring": "Set the number of retries to use when calling SetServerDef.\n\nIn cases where many servers run in high-preemption environments, jobs could\nbe preempted during startup and initial connection via SetServerDef. Retries\nallow for more robust connection in these environments.\n\nArgs:\nretries: int specifying the number of connection retries before failing.\nRetries follow an exponential backoff waiting period with min value 1ms,\nmax value 10s, and exponent 1.3.", "source": "github-repos"}
{"code": "def set_as_default(self, step=None):\n    self.as_default(step).__enter__()", "docstring": "Enables this summary writer for the current thread.\n\nFor convenience, if `step` is not None, this function also sets a default\nvalue for the `step` parameter used in summary-writing functions elsewhere\nin the API so that it need not be explicitly passed in every such\ninvocation. The value can be a constant or a variable.\n\nNote: when setting `step` in a @tf.function, the step value will be\ncaptured at the time the function is traced, so changes to the step outside\nthe function will not be reflected inside the function unless using\na `tf.Variable` step.\n\nArgs:\nstep: An `int64`-castable default step value, or `None`. When not `None`,\nthe current step is modified to the given value. When `None`, the\ncurrent step is not modified.", "source": "github-repos"}
{"code": "def pad_tensor(tensor, length, padding_index=DEFAULT_PADDING_INDEX):\n    \n    n_padding = length - tensor.shape[0]\n    assert n_padding >= 0\n    if n_padding == 0:\n        return tensor\n    padding = tensor.new(n_padding, *tensor.shape[1:]).fill_(padding_index)\n    return torch.cat((tensor, padding), dim=0)", "docstring": "Pad a ``tensor`` to ``length`` with ``padding_index``.\n\nArgs:\ntensor (torch.Tensor [n, ...]): Tensor to pad.\nlength (int): Pad the ``tensor`` up to ``length``.\npadding_index (int, optional): Index to pad tensor with.\n\nReturns\n(torch.Tensor [length, ...]) Padded Tensor.", "source": "juraj-google-style"}
{"code": "def discrete_bottleneck(self, x):\n    x_reshaped = self.slice_hidden(x)\n    x_means_hot = []\n    x_means = 0\n    loss = 0\n    (x_means_hot, x_means, q_loss, e_loss) = self.embedding_lookup(x_reshaped, self.means)\n    if self.hparams.ema:\n        tf.logging.info('Using EMA with beta = {}'.format(self.hparams.beta))\n        updated_ema_count = moving_averages.assign_moving_average(self.ema_count, tf.reduce_sum(tf.reshape(x_means_hot, shape=[(- 1), self.hparams.num_blocks, self.hparams.block_v_size]), axis=0), self.hparams.decay, zero_debias=False)\n        dw = tf.matmul(tf.transpose(x_means_hot, perm=[1, 2, 0]), tf.transpose(x_reshaped, perm=[1, 0, 2]))\n        updated_ema_means = moving_averages.assign_moving_average(self.ema_means, dw, self.hparams.decay, zero_debias=False)\n        n = tf.reduce_sum(updated_ema_count, axis=(- 1), keep_dims=True)\n        updated_ema_count = (((updated_ema_count + self.hparams.epsilon) / (n + ((2 ** self.hparams.z_size) * self.hparams.epsilon))) * n)\n        updated_ema_means = (updated_ema_means / tf.expand_dims(updated_ema_count, axis=(- 1)))\n        with tf.control_dependencies([e_loss]):\n            update_means = tf.assign(self.means, updated_ema_means)\n            with tf.control_dependencies([update_means]):\n                loss += (self.hparams.beta * e_loss)\n    else:\n        loss += (q_loss + (self.hparams.beta * e_loss))\n    x_means_idx = tf.argmax(x_means_hot, axis=(- 1))\n    num_bits = int((self.hparams.z_size \n    x_means_bits = self.int_to_bit(x_means_idx, num_bits=num_bits, base=2)\n    x_discrete = self.bit_to_int(tf.to_int32(x_means_bits), num_bits=self.hparams.z_size, base=2)\n    shape_x = common_layers.shape_list(x)\n    shape_discrete = shape_x[:(- 1)]\n    x_discrete = tf.reshape(x_discrete, shape_discrete)\n    x_means = tf.reshape(x_means, shape=shape_x)\n    h1 = (x + tf.stop_gradient((x_means - x)))\n    h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name='vch2')\n    res = tf.layers.dense(tf.nn.relu(h2), self.hparams.hidden_size, name='vcfin')\n    embed_fn = partial(self.embed)\n    return {'dense': res, 'discrete': x_discrete, 'loss': loss, 'embed': embed_fn}", "docstring": "Discretization bottleneck for latent variables.\n\nArgs:\nx: Input to the discretization bottleneck.\n\nReturns:\nEmbedding to pass to the decoder, discrete latent, loss, and the\nembedding\nfunction.\n\nRaises:\nValueError: If projection_tensors is None for reshape_method\nproject, or\nema_count or ema_means is None if we are using ema, or unknown\nargs.", "source": "codesearchnet"}
{"code": "def load_weights_from_hdf5_group_by_name(f, model, skip_mismatch=False):\n    if 'keras_version' in f.attrs:\n        original_keras_version = f.attrs['keras_version']\n        if hasattr(original_keras_version, 'decode'):\n            original_keras_version = original_keras_version.decode('utf8')\n    else:\n        original_keras_version = '1'\n    if 'backend' in f.attrs:\n        original_backend = f.attrs['backend']\n        if hasattr(original_backend, 'decode'):\n            original_backend = original_backend.decode('utf8')\n    else:\n        original_backend = None\n    layer_names = load_attributes_from_hdf5_group(f, 'layer_names')\n    index = {}\n    for layer in model.layers:\n        if layer.name:\n            index.setdefault(layer.name, []).append(layer)\n    for k, name in enumerate(layer_names):\n        g = f[name]\n        weight_values = load_subset_weights_from_hdf5_group(g)\n        for layer in index.get(name, []):\n            symbolic_weights = _legacy_weights(layer)\n            if len(weight_values) != len(symbolic_weights):\n                if skip_mismatch:\n                    warnings.warn(f'Skipping loading of weights for layer \n                    continue\n                raise ValueError(f'Weight count mismatch for layer \n            _set_weights(layer, symbolic_weights, weight_values, skip_mismatch=skip_mismatch, name=f'layer \n    if 'top_level_model_weights' in f:\n        symbolic_weights = model._trainable_variables + model._non_trainable_variables\n        weight_values = load_subset_weights_from_hdf5_group(f['top_level_model_weights'])\n        if len(weight_values) != len(symbolic_weights):\n            if skip_mismatch:\n                warnings.warn(f'Skipping loading top-level weights for model due to mismatch in number of weights. Model expects {len(symbolic_weights)} top-level weight(s). Received {len(weight_values)} saved top-level weight(s)', stacklevel=2)\n            else:\n                raise ValueError(f'Weight count mismatch for top-level weights of model. Model expects {len(symbolic_weights)} top-level weight(s). Received {len(weight_values)} saved top-level weight(s)')\n        else:\n            _set_weights(model, symbolic_weights, weight_values, skip_mismatch=skip_mismatch, name='top-level model')", "docstring": "Implements name-based weight loading (instead of topological loading).\n\nLayers that have no matching name are skipped.\n\nArgs:\nf: A pointer to a HDF5 group.\nmodel: Model instance.\nskip_mismatch: Boolean, whether to skip loading of layers\nwhere there is a mismatch in the number of weights,\nor a mismatch in the shape of the weights.\n\nRaises:\nValueError: in case of mismatch between provided layers\nand weights file and skip_match=False.", "source": "github-repos"}
{"code": "def write(self, file_des, contents):\n        \n        file_handle = self.filesystem.get_open_file(file_des)\n        if isinstance(file_handle, FakeDirWrapper):\n            self.filesystem.raise_os_error(errno.EBADF, file_handle.file_path)\n\n        if isinstance(file_handle, FakePipeWrapper):\n            return file_handle.write(contents)\n\n        file_handle.raw_io = True\n        file_handle._sync_io()\n        file_handle.update_flush_pos()\n        file_handle.write(contents)\n        file_handle.flush()\n        return len(contents)", "docstring": "Write string to file descriptor, returns number of bytes written.\n\nArgs:\nfile_des: An integer file descriptor for the file object requested.\ncontents: String of bytes to write to file.\n\nReturns:\nNumber of bytes written.\n\nRaises:\nOSError: bad file descriptor.\nTypeError: if file descriptor is not an integer.", "source": "juraj-google-style"}
{"code": "def check_oversized_pickle(pickled, name, obj_type, worker):\n    \n    length = len(pickled)\n    if length <= ray_constants.PICKLE_OBJECT_WARNING_SIZE:\n        return\n    warning_message = (\n        \"Warning: The {} {} has size {} when pickled. \"\n        \"It will be stored in Redis, which could cause memory issues. \"\n        \"This may mean that its definition uses a large array or other object.\"\n    ).format(obj_type, name, length)\n    push_error_to_driver(\n        worker,\n        ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR,\n        warning_message,\n        driver_id=worker.task_driver_id)", "docstring": "Send a warning message if the pickled object is too large.\n\nArgs:\npickled: the pickled object.\nname: name of the pickled object.\nobj_type: type of the pickled object, can be 'function',\n'remote function', 'actor', or 'object'.\nworker: the worker used to send warning message.", "source": "juraj-google-style"}
{"code": "def __init__(self, img_input, p=6.):\n        \n        super(LPNorm, self).__init__()\n        if p < 1:\n            raise ValueError('p value should range between [1, inf)')\n        self.name = \"L-{} Norm Loss\".format(p)\n        self.p = p\n        self.img = img_input", "docstring": "Builds a L-p norm function. This regularizer encourages the intensity of pixels to stay bounded.\ni.e., prevents pixels from taking on very large values.\n\nArgs:\nimg_input: 4D image input tensor to the model of shape: `(samples, channels, rows, cols)`\nif data_format='channels_first' or `(samples, rows, cols, channels)` if data_format='channels_last'.\np: The pth norm to use. If p = float('inf'), infinity-norm will be used.", "source": "juraj-google-style"}
{"code": "def handle_error(self, code, message_values=None, raise_error=True):\n        \n        try:\n            if message_values is None:\n                message_values = []\n            message = self.error_codes.message(code).format(*message_values)\n            self.log.error('Error code: {}, {}'.format(code, message))\n        except AttributeError:\n            self.log.error('Incorrect error code provided ({}).'.format(code))\n            raise RuntimeError(1000, 'Generic Failure, see logs for more details.')\n        except IndexError:\n            self.log.error(\n                'Incorrect message values provided for error code {} ({}).'.format(\n                    code, message_values\n                )\n            )\n            raise RuntimeError(1000, 'Generic Failure, see logs for more details.')\n        if raise_error:\n            raise RuntimeError(code, message)", "docstring": "Raise RuntimeError\n\nArgs:\ncode (integer): The error code from API or SDK.\nmessage (string): The error message from API or SDK.", "source": "juraj-google-style"}
{"code": "def AddBudget(self, client_customer_id, micro_amount):\n    self.client.SetClientCustomerId(client_customer_id)\n    budget_service = self.client.GetService('BudgetService')\n    operations = [{'operator': 'ADD', 'operand': {'name': ('Budget \n    return budget_service.mutate(operations)['value'][0]['budgetId']", "docstring": "Create a new Budget with the given microAmount.\n\nArgs:\nclient_customer_id: str Client Customer Id used to create Budget.\nmicro_amount: str The budget represented in micros.\n\nReturns:\nstr BudgetId of the newly created Budget.", "source": "codesearchnet"}
{"code": "def AsParameterType(type: ContractParameterType, item: StackItem):\n        \n        if type == ContractParameterType.Integer:\n            return ContractParameter(type, value=item.GetBigInteger())\n        elif type == ContractParameterType.Boolean:\n            return ContractParameter(type, value=item.GetBoolean())\n        elif type == ContractParameterType.Array:\n            output = [ContractParameter.ToParameter(subitem) for subitem in item.GetArray()]\n            return ContractParameter(type, value=output)\n        elif type == ContractParameterType.String:\n            return ContractParameter(type, value=item.GetString())\n        elif type == ContractParameterType.InteropInterface:\n            return ContractParameter(type, value=item.GetInterface())\n        \n        else:\n            return ContractParameter(type, value=item.GetByteArray())", "docstring": "Convert a StackItem to a ContractParameter object of a specified ContractParameterType\nArgs:\ntype (neo.SmartContract.ContractParameterType): The ContractParameterType to convert to\nitem (neo.VM.InteropService.StackItem): The item to convert to a ContractParameter object\n\nReturns:", "source": "juraj-google-style"}
{"code": "def expand(self):\n    return self.element_wise((lambda o: (o.expand() if isinstance(o, QuantumExpression) else o)))", "docstring": "Expand each matrix element distributively.\n\nReturns:\nMatrix: Expanded matrix.", "source": "codesearchnet"}
{"code": "def quad_genz_keister_22(order):\n    order = sorted(GENZ_KEISTER_22.keys())[order]\n    (abscissas, weights) = GENZ_KEISTER_22[order]\n    abscissas = numpy.array(abscissas)\n    weights = numpy.array(weights)\n    weights /= numpy.sum(weights)\n    abscissas *= numpy.sqrt(2)\n    return (abscissas, weights)", "docstring": "Hermite Genz-Keister 22 rule.\n\nArgs:\norder (int):\nThe quadrature order. Must be in the interval (0, 8).\n\nReturns:\n(:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]):\nAbscissas and weights\n\nExamples:\n>>> abscissas, weights = quad_genz_keister_22(1)\n>>> print(numpy.around(abscissas, 4))\n[-1.7321  0.      1.7321]\n>>> print(numpy.around(weights, 4))\n[0.1667 0.6667 0.1667]", "source": "codesearchnet"}
{"code": "def rename_nodes(self, renaming_map):\n    if (not isinstance(renaming_map, dict)):\n        raise TypeError('renaming_map must be a dict')\n    for node in self.traverse_preorder():\n        if (node.label in renaming_map):\n            node.label = renaming_map[node.label]", "docstring": "Rename nodes in this ``Tree``\n\nArgs:\n``renaming_map`` (``dict``): A dictionary mapping old labels (keys) to new labels (values)", "source": "codesearchnet"}
{"code": "def _run_check(self):\n    cmd = shlex.split(self.config['check_cmd'])\n    self.log.info('running %s', ' '.join(cmd))\n    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n    start_time = time.time()\n    try:\n        (outs, errs) = proc.communicate(timeout=self.config['check_timeout'])\n    except subprocess.TimeoutExpired:\n        self.log.error('check timed out')\n        if (proc.poll() is None):\n            try:\n                proc.kill()\n            except PermissionError:\n                self.log.warning('failed to kill check due to adequate access rights, check could be running under another user(root) via sudo')\n        return False\n    else:\n        msg = 'check duration {t:.3f}ms'.format(t=((time.time() - start_time) * 1000))\n        self.log.info(msg)\n        if (proc.returncode != 0):\n            self.log.info('stderr from the check %s', errs)\n            self.log.info('stdout from the check %s', outs)\n        return (proc.returncode == 0)", "docstring": "Execute a check command.\n\nReturns:\nTrue if the exit code of the command is 0 otherwise False.", "source": "codesearchnet"}
{"code": "def __learn_labels(self, labels):\n        \n        if self.feature_length > 0:\n            result = list(self.labels.classes_)\n        else:\n            result = []\n\n        for label in labels:\n            result.append(label)\n        self.labels.fit(result)", "docstring": "Learns new labels, this method is intended for internal use\n\nArgs:\nlabels (:obj:`list` of :obj:`str`): Labels to learn", "source": "juraj-google-style"}
{"code": "def data_period_name_or_description(self, value=None):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type str '\n                    'for field `data_period_name_or_description`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `data_period_name_or_description`')\n\n        self._data_period_name_or_description = value", "docstring": "Corresponds to IDD Field `data_period_name_or_description`\n\nArgs:\nvalue (str): value for IDD Field `data_period_name_or_description`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def _write_handle(self, conn, handle, ack, value, timeout=1.0):\n    conn_handle = conn\n    char_handle = handle\n\n    def write_handle_acked(event):\n        if ((event.command_class == 4) and (event.command == 1)):\n            (conn, _, char) = unpack('<BHH', event.payload)\n            return ((conn_handle == conn) and (char_handle == char))\n    data_len = len(value)\n    if (data_len > 20):\n        return (False, {'reason': 'Data too long to write'})\n    payload = struct.pack(('<BHB%ds' % data_len), conn_handle, char_handle, data_len, value)\n    try:\n        if ack:\n            response = self._send_command(4, 5, payload)\n        else:\n            response = self._send_command(4, 6, payload)\n    except InternalTimeoutError:\n        return (False, {'reason': 'Timeout waiting for response to command in _write_handle'})\n    (_, result) = unpack('<BH', response.payload)\n    if (result != 0):\n        return (False, {'reason': 'Error writing to handle', 'error_code': result})\n    if ack:\n        events = self._wait_process_events(timeout, (lambda x: False), write_handle_acked)\n        if (len(events) == 0):\n            return (False, {'reason': 'Timeout waiting for acknowledge on write'})\n        (_, result, _) = unpack('<BHH', events[0].payload)\n        if (result != 0):\n            return (False, {'reason': 'Error received during write to handle', 'error_code': result})\n    return (True, None)", "docstring": "Write to a BLE device characteristic by its handle\n\nArgs:\nconn (int): The connection handle for the device we should read from\nhandle (int): The characteristics handle we should read\nack (bool): Should this be an acknowledges write or unacknowledged\ntimeout (float): How long to wait before failing\nvalue (bytearray): The value that we should write", "source": "codesearchnet"}
{"code": "def GaussianBlur(X, ksize_width, ksize_height, sigma_x, sigma_y):\n    return image_transform(X, cv2.GaussianBlur, ksize=(ksize_width, ksize_height), sigmaX=sigma_x, sigmaY=sigma_y)", "docstring": "Apply Gaussian blur to the given data.\n\nArgs:\nX: data to blur\nkernel_size: Gaussian kernel size\nstddev: Gaussian kernel standard deviation (in both X and Y directions)", "source": "codesearchnet"}
{"code": "def add_real_file(self, source_path, read_only=True, target_path=None):\n    target_path = (target_path or source_path)\n    source_path = make_string_path(source_path)\n    target_path = self.make_string_path(target_path)\n    real_stat = os.stat(source_path)\n    fake_file = self.create_file_internally(target_path, read_from_real_fs=True)\n    fake_file.stat_result.set_from_stat_result(real_stat)\n    if read_only:\n        fake_file.st_mode &= 261924\n    fake_file.file_path = source_path\n    self.change_disk_usage(fake_file.size, fake_file.name, fake_file.st_dev)\n    return fake_file", "docstring": "Create `file_path`, including all the parent directories along the\nway, for an existing real file. The contents of the real file are read\nonly on demand.\n\nArgs:\nsource_path: Path to an existing file in the real file system\nread_only: If `True` (the default), writing to the fake file\nraises an exception.  Otherwise, writing to the file changes\nthe fake file only.\ntarget_path: If given, the path of the target direction,\notherwise it is equal to `source_path`.\n\nReturns:\nthe newly created FakeFile object.\n\nRaises:\nOSError: if the file does not exist in the real file system.\nIOError: if the file already exists in the fake file system.\n\n.. note:: On most systems, accessing the fake file's contents may\nupdate both the real and fake files' `atime` (access time).\nIn this particular case, `add_real_file()` violates the rule\nthat `pyfakefs` must not modify the real file system.", "source": "codesearchnet"}
{"code": "def WriteStackFrameWithId(self, stack_frame_with_id):\n    debug_event = debug_event_pb2.DebugEvent(stack_frame_with_id=stack_frame_with_id)\n    self._EnsureTimestampAdded(debug_event)\n    _pywrap_debug_events_writer.WriteStackFrameWithId(self._dump_root, debug_event)", "docstring": "Write a StackFrameWithId proto with the writer.\n\nArgs:\nstack_frame_with_id: A StackFrameWithId proto, describing the content a\nstack frame involved in the execution of the debugged TensorFlow\nprogram.", "source": "github-repos"}
{"code": "def get_img_shape(img):\n    if isinstance(img, np.ndarray):\n        shape = img.shape\n    else:\n        shape = K.int_shape(img)\n    if (K.image_data_format() == 'channels_last'):\n        shape = list(shape)\n        shape.insert(1, shape[(- 1)])\n        shape = tuple(shape[:(- 1)])\n    return shape", "docstring": "Returns image shape in a backend agnostic manner.\n\nArgs:\nimg: An image tensor of shape: `(channels, image_dims...)` if data_format='channels_first' or\n`(image_dims..., channels)` if data_format='channels_last'.\n\nReturns:\nTuple containing image shape information in `(samples, channels, image_dims...)` order.", "source": "codesearchnet"}
{"code": "def _check(self, check, radl):\n    if (check[0] == float):\n        if ((not isinstance(self.value, int)) and (not isinstance(self.value, float))):\n            raise RADLParseException(('Invalid type; expected %s' % check[0]), line=self.line)\n    elif (check[0] == str):\n        if ((not isinstance(self.value, str)) and (not isinstance(self.value, unicode))):\n            raise RADLParseException(('Invalid type; expected %s' % check[0]), line=self.line)\n    elif (not isinstance(self.value, check[0])):\n        raise RADLParseException(('Invalid type; expected %s' % check[0]), line=self.line)\n    if ((isinstance(self.value, str) or isinstance(self.value, unicode)) and (self.prop.find('version') == (- 1))):\n        if (self.operator != '='):\n            raise RADLParseException(\"Invalid operator; expected '='\", line=self.line)\n    elif (isinstance(self.value, int) or isinstance(self.value, float) or (self.prop.find('version') >= 0)):\n        if (self.operator not in ['=', '<=', '>=', '>', '<']):\n            raise RADLParseException((\"Invalid operator; expected '=', '<=', \" + \"'>=', '>' or '<'\"), line=self.line)\n    elif isinstance(self.value, Features):\n        if (self.operator != 'contains'):\n            raise RADLParseException(\"Invalid operator; expected 'contains'\", line=self.line)\n    if isinstance(check[1], list):\n        if (self.value.upper() not in check[1]):\n            raise RADLParseException(('Invalid value; expected one of %s' % check[1]), line=self.line)\n    elif callable(check[1]):\n        if (not check[1](self, radl)):\n            raise RADLParseException((\"Invalid value in property '%s'\" % self.prop), line=self.line)\n    if ((len(check) < 3) or (check[2] is None)):\n        if self.unit:\n            raise RADLParseException('Invalid unit; expected none', line=self.line)\n    elif ((len(check) > 2) and check[2]):\n        if (self.unit.upper() not in check[2]):\n            raise RADLParseException(('Invalid unit; expected one of %s' % check[2]), line=self.line)\n    return True", "docstring": "Check type, operator and unit in a feature.\n\nArgs:\n- check(tuple):\n- v[0]: expected type of the feature value.\n- v[1]: can be a list of possible values or a function to test the value or None.\n- v[2] (optional): can be a list of possible units; if None or not set the\nunit valid is none.\n- radl: second argument passed when calling v[1].", "source": "codesearchnet"}
{"code": "def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n    known_args, pipeline_args = parse_known_args(argv)\n    pipeline_options = PipelineOptions(pipeline_args)\n    pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n    model_handler = VLLMCompletionsModelHandler(model_name=known_args.model)\n    input_examples = COMPLETION_EXAMPLES\n    if known_args.chat:\n        model_handler = VLLMChatModelHandler(model_name=known_args.model, chat_template_path=known_args.chat_template)\n        input_examples = CHAT_EXAMPLES\n    pipeline = test_pipeline\n    if not test_pipeline:\n        pipeline = beam.Pipeline(options=pipeline_options)\n    examples = pipeline | 'Create examples' >> beam.Create(input_examples)\n    predictions = examples | 'RunInference' >> RunInference(model_handler)\n    process_output = predictions | 'Process Predictions' >> beam.ParDo(PostProcessor())\n    _ = process_output | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)\n    result = pipeline.run()\n    result.wait_until_finish()\n    return result", "docstring": "Args:\nargv: Command line arguments defined for this example.\nsave_main_session: Used for internal testing.\ntest_pipeline: Used for internal testing.", "source": "github-repos"}
{"code": "def get_accepted_features(features, proposed_feature):\n\n    def eq(feature):\n        'Features are equal if they have the same source\\n\\n        At least in this implementation...\\n        '\n        return (feature.source == proposed_feature.source)\n    result = lfilter(complement(eq), features)\n    if ((len(features) - len(result)) == 1):\n        return result\n    elif (len(result) == len(features)):\n        raise BalletError(\"Did not find match for proposed feature within 'contrib'\")\n    else:\n        raise BalletError('Unexpected condition (n_features={}, n_result={})'.format(len(features), len(result)))", "docstring": "Deselect candidate features from list of all features\n\nArgs:\nfeatures (List[Feature]): collection of all features in the ballet\nproject: both accepted features and candidate ones that have not\nbeen accepted\nproposed_feature (Feature): candidate feature that has not been\naccepted\n\nReturns:\nList[Feature]: list of features with the proposed feature not in it.\n\nRaises:\nballet.exc.BalletError: Could not deselect exactly the proposed\nfeature.", "source": "codesearchnet"}
{"code": "def due_date(self, due_date):\n        \n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        due_date = self._utils.format_datetime(due_date, date_format='%Y-%m-%dT%H:%M:%SZ')\n        self._data['dueDate'] = due_date\n        request = {'dueDate': due_date}\n        return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)", "docstring": "Sets the task due_date\nArgs:\ndue_date: Converted to %Y-%m-%dT%H:%M:%SZ date format", "source": "juraj-google-style"}
{"code": "def delete_endpoint_config(self, endpoint_config_name):\n    LOGGER.info('Deleting endpoint configuration with name: {}'.format(endpoint_config_name))\n    self.sagemaker_client.delete_endpoint_config(EndpointConfigName=endpoint_config_name)", "docstring": "Delete an Amazon SageMaker endpoint configuration.\n\nArgs:\nendpoint_config_name (str): Name of the Amazon SageMaker endpoint configuration to delete.", "source": "codesearchnet"}
{"code": "def export_to_tf_tensor(self, x):\n    \n    mesh_impl = self.mesh_impl(x)\n    return mesh_impl.export_to_tf_tensor(\n        x, self.tensors[x].to_laid_out_tensor())", "docstring": "Turn a Tensor into a tf.Tensor.\n\nArgs:\nx: Tensor.\n\nReturns:\ntf.Tensor.", "source": "juraj-google-style"}
{"code": "def get_psd(self, omega):\n    w = np.asarray(omega)\n    (alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag) = self.coefficients\n    p = get_psd_value(alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag, w.flatten())\n    return p.reshape(w.shape)", "docstring": "Compute the PSD of the term for an array of angular frequencies\n\nArgs:\nomega (array[...]): An array of frequencies where the PSD should\nbe evaluated.\n\nReturns:\nThe value of the PSD for each ``omega``. This will have the same\nshape as ``omega``.", "source": "codesearchnet"}
{"code": "def check_errors(self, is_global=False):\n    errors = (self.global_errors if is_global else self.errors)\n    if errors:\n        print('dfTimewolf encountered one or more errors:')\n        for (error, critical) in errors:\n            print('{0:s}  {1:s}'.format(('CRITICAL: ' if critical else ''), error))\n            if critical:\n                print('Critical error found. Aborting.')\n                sys.exit((- 1))", "docstring": "Checks for errors and exits if any of them are critical.\n\nArgs:\nis_global: If True, check the global_errors attribute. If false, check the\nerror attribute.", "source": "codesearchnet"}
{"code": "def _ParseExtensionsString(self, extensions_string):\n    if (not extensions_string):\n        return\n    extensions_string = extensions_string.lower()\n    extensions = [extension.strip() for extension in extensions_string.split(',')]\n    file_entry_filter = file_entry_filters.ExtensionsFileEntryFilter(extensions)\n    self._filter_collection.AddFilter(file_entry_filter)", "docstring": "Parses the extensions string.\n\nArgs:\nextensions_string (str): comma separated extensions to filter.", "source": "codesearchnet"}
{"code": "def reinit_nested_vars(variables, indices=None):\n    if isinstance(variables, (tuple, list)):\n        return tf.group(*[reinit_nested_vars(variable, indices) for variable in variables])\n    if (indices is None):\n        return variables.assign(tf.zeros_like(variables))\n    else:\n        zeros = tf.zeros(([tf.shape(indices)[0]] + variables.shape[1:].as_list()))\n        return tf.scatter_update(variables, indices, zeros)", "docstring": "Reset all variables in a nested tuple to zeros.\n\nArgs:\nvariables: Nested tuple or list of variables.\nindices: Batch indices to reset, defaults to all.\n\nReturns:\nOperation.", "source": "codesearchnet"}
{"code": "def get_uuid_string(low=None, high=None, **x):\n    \n    if low is None or high is None:\n        return None\n    x = ''.join([parse_part(low), parse_part(high)])\n    return '-'.join([x[:8], x[8:12], x[12:16], x[16:20], x[20:32]])", "docstring": "This method parses a UUID protobuf message type from its component\n'high' and 'low' longs into a standard formatted UUID string\n\nArgs:\nx (dict): containing keys, 'low' and 'high' corresponding to the UUID\nprotobuf message type\n\nReturns:\nstr: UUID formatted string", "source": "juraj-google-style"}
{"code": "def convert_positional_argument(self, index, arg_value):\n    if self._has_self:\n        if (index == 0):\n            return arg_value\n        index -= 1\n    arg_name = self.arg_names[index]\n    return self.convert_argument(arg_name, arg_value)", "docstring": "Convert and validate a positional argument.\n\nArgs:\nindex (int): The positional index of the argument\narg_value (object): The value to convert and validate\n\nReturns:\nobject: The converted value.", "source": "codesearchnet"}
{"code": "def search(cls, five9, filters):\n        \n        return cls._name_search(five9.configuration.getWebConnectors, filters)", "docstring": "Search for a record on the remote and return the results.\n\nArgs:\nfive9 (five9.Five9): The authenticated Five9 remote.\nfilters (dict): A dictionary of search parameters, keyed by the\nname of the field to search. This should conform to the\nschema defined in :func:`five9.Five9.create_criteria`.\n\nReturns:\nlist[BaseModel]: A list of records representing the result.", "source": "juraj-google-style"}
{"code": "def get_vm_extension(access_token, subscription_id, resource_group, vm_name, extension_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Compute/virtualMachines/', vm_name,\n                        '/extensions/', extension_name,\n                        '?api-version=', COMP_API])\n    return do_get(endpoint, access_token)", "docstring": "Get details about a VM extension.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvm_name (str): Name of the virtual machine.\nextension_name (str): VM extension name.\n\nReturns:\nHTTP response. JSON body of VM extension properties.", "source": "juraj-google-style"}
{"code": "def load(self, steps_dir=None, step_file=None, step_list=None):\n    self._closed()\n    self.steps_library.load(steps_dir=steps_dir, step_file=step_file, step_list=step_list)", "docstring": "Load CWL steps into the WorkflowGenerator's steps library.\n\nAdds steps (command line tools and workflows) to the\n``WorkflowGenerator``'s steps library. These steps can be used to\ncreate workflows.\n\nArgs:\nsteps_dir (str): path to directory containing CWL files. All CWL in\nthe directory are loaded.\nstep_file (str): path to a file containing a CWL step that will be\nadded to the steps library.", "source": "codesearchnet"}
{"code": "def b_fit_score(self, x, y):\n    x = np.reshape(minmax_scale(x), ((- 1), 1))\n    y = np.reshape(minmax_scale(y), ((- 1), 1))\n    poly = PolynomialFeatures(degree=self.degree)\n    poly_x = poly.fit_transform(x)\n    poly_x[(:, 1)] = 0\n    poly_x[(:, 2)] = 0\n    regressor = LinearRegression()\n    regressor.fit(poly_x, y)\n    y_predict = regressor.predict(poly_x)\n    error = mean_squared_error(y_predict, y)\n    return error", "docstring": "Compute the RECI fit score\n\nArgs:\nx (numpy.ndarray): Variable 1\ny (numpy.ndarray): Variable 2\n\nReturns:\nfloat: RECI fit score", "source": "codesearchnet"}
{"code": "def _tokens_to_subtoken_ids(self, tokens):\n    \n    ret = []\n    for token in tokens:\n      ret.extend(self._token_to_subtoken_ids(token))\n    return ret", "docstring": "Converts a list of tokens to a list of subtoken ids.\n\nArgs:\ntokens: a list of strings.\nReturns:\na list of integers in the range [0, vocab_size)", "source": "juraj-google-style"}
{"code": "def isomorphic(q, g, check_varprops=True):\n    \n\n    qdg = _make_digraph(q, check_varprops)\n    gdg = _make_digraph(g, check_varprops)\n    def nem(qd, gd):  \n        return qd.get('sig') == gd.get('sig')\n    return nx.is_isomorphic(qdg, gdg, node_match=nem, edge_match=nem)", "docstring": "Return `True` if Xmrs objects *q* and *g* are isomorphic.\n\nIsomorphicity compares the predicates of an Xmrs, the variable\nproperties of their predications (if `check_varprops=True`),\nconstant arguments, and the argument structure between\npredications. Node IDs and Lnk values are ignored.\n\nArgs:\nq: the left Xmrs to compare\ng: the right Xmrs to compare\ncheck_varprops: if `True`, make sure variable properties are\nequal for mapped predications", "source": "juraj-google-style"}
{"code": "def _tensor_product(t1, t2):\n  \n  return tf.matmul(tf.expand_dims(t1, axis=-1), tf.expand_dims(t2, axis=-2))", "docstring": "Computes the outer product of two possibly batched vectors.\n\nArgs:\nt1: A `tf.Tensor` of shape `[..., n]`.\nt2: A `tf.Tensor` of shape `[..., m]`.\n\nReturns:\nA tensor of shape `[..., n, m]` with matching batch dimensions, let's call\nit `r`, whose components are:\n\n```None\nr[..., i, j] = t1[..., i] * t2[..., j]\n```", "source": "juraj-google-style"}
{"code": "def exit_code(self) -> Any:\n    if (self._done() or self.is_killed()):\n        return self._exit_code\n    else:\n        raise BehaviourNotFinishedException", "docstring": "Returns the exit_code of the behaviour.\nIt only works when the behaviour is done or killed,\notherwise it raises an exception.\n\nReturns:\nobject: the exit code of the behaviour", "source": "codesearchnet"}
{"code": "def create(self, name, redirect_uri=None):\n    data = dict(name=name)\n    if redirect_uri:\n        data['redirect_uri'] = redirect_uri\n    auth_request_resource = self.resource.create(data)\n    return (auth_request_resource.attributes['metadata']['device_token'], auth_request_resource.attributes['mfa_uri'])", "docstring": "Create a new Device object.\n\nDevices tie Users and Applications together. For your Application to\naccess and act on behalf of a User, the User must authorize a Device\ncreated by your Application.\n\nThis function will return a `device_token` which you must store and use\nafter the Device is approved in\n`client.authenticate_device(api_token, device_token)`\n\nThe second value returned is an `mfa_uri` which is the location the User\nmust visit to approve the new device. After this function completes,\nyou should launch a new browser tab or webview with this value as the\nlocation. After the User approves the Device, they will be redirected to\nthe redirect_uri you specify in this call.\n\nArgs:\nname (str): Human-readable name for the device\n(e.g. \"Suzanne's iPhone\")\nredirect_uri (str, optional): A URI to which to redirect the User after\nthey approve the new Device.\n\nReturns: A tuple of (device_token, mfa_uri)", "source": "codesearchnet"}
{"code": "def is_instance(state, inst, not_instance_msg=None):\n    state.assert_is(['object_assignments'], 'is_instance', ['check_object'])\n    sol_name = state.solution_parts.get('name')\n    stu_name = state.student_parts.get('name')\n    if (not_instance_msg is None):\n        not_instance_msg = 'Is it a {{inst.__name__}}?'\n    if (not isInstanceInProcess(sol_name, inst, state.solution_process)):\n        raise InstructorError(('`is_instance()` noticed that `%s` is not a `%s` in the solution process.' % (sol_name, inst.__name__)))\n    _msg = state.build_message(not_instance_msg, {'inst': inst})\n    feedback = Feedback(_msg, state)\n    state.do_test(InstanceProcessTest(stu_name, inst, state.student_process, feedback))\n    return state", "docstring": "Check whether an object is an instance of a certain class.\n\n``is_instance()`` can currently only be used when chained from ``check_object()``, the function that is\nused to 'zoom in' on the object of interest.\n\nArgs:\ninst (class): The class that the object should have.\nnot_instance_msg (str): When specified, this overrides the automatically generated message in case\nthe object does not have the expected class.\nstate (State): The state that is passed in through the SCT chain (don't specify this).\n\n:Example:\n\nStudent code and solution code::\n\nimport numpy as np\narr = np.array([1, 2, 3, 4, 5])\n\nSCT::\n\n# Verify the class of arr\nimport numpy\nEx().check_object('arr').is_instance(numpy.ndarray)", "source": "codesearchnet"}
{"code": "def sync_job_info(self, job_name):\n        \n        job_path = os.path.join(self._logdir, job_name)\n\n        if job_name not in self._monitored_jobs:\n            self._create_job_info(job_path)\n            self._monitored_jobs.add(job_name)\n        else:\n            self._update_job_info(job_path)\n\n        expr_dirs = filter(lambda d: os.path.isdir(os.path.join(job_path, d)),\n                           os.listdir(job_path))\n\n        for expr_dir_name in expr_dirs:\n            self.sync_trial_info(job_path, expr_dir_name)\n\n        self._update_job_info(job_path)", "docstring": "Load information of the job with the given job name.\n\n1. Traverse each experiment sub-directory and sync information\nfor each trial.\n2. Create or update the job information, together with the job\nmeta file.\n\nArgs:\njob_name (str) name of the Tune experiment", "source": "juraj-google-style"}
{"code": "def get_pipeline_yaml(file):\n    \n    tag_representers = [PyString, SicString]\n\n    yaml_loader = get_yaml_parser_safe()\n\n    for representer in tag_representers:\n        yaml_loader.register_class(representer)\n\n    pipeline_definition = yaml_loader.load(file)\n    return pipeline_definition", "docstring": "Return pipeline yaml from open file object.\n\nUse specific custom representers to model the custom pypyr pipeline yaml\nformat, to load in special literal types like py and sic strings.\n\nIf looking to extend the pypyr pipeline syntax with special types, add\nthese to the tag_representers list.\n\nArgs:\nfile: open file-like object.\n\nReturns:\ndict-like representation of loaded yaml.", "source": "juraj-google-style"}
{"code": "def trim(self, len_):\n        \n        other = Version(None)\n        other.tokens = self.tokens[:len_]\n        other.seps = self.seps[:len_ - 1]\n        return other", "docstring": "Return a copy of the version, possibly with less tokens.\n\nArgs:\nlen_ (int): New version length. If >= current length, an\nunchanged copy of the version is returned.", "source": "juraj-google-style"}
{"code": "def parse(self, data):\n        \n        self.binding_var_count = 0\n        self.segment_count = 0\n\n        segments = self.parser.parse(data)\n        \n        path_wildcard = False\n        for segment in segments:\n            if segment.kind == _TERMINAL and segment.literal == '**':\n                if path_wildcard:\n                    raise ValidationException(\n                        'validation error: path template cannot contain more '\n                        'than one path wildcard')\n                path_wildcard = True\n        return segments", "docstring": "Returns a list of path template segments parsed from data.\n\nArgs:\ndata: A path template string.\nReturns:\nA list of _Segment.", "source": "juraj-google-style"}
{"code": "def crawl(self,\n              feeder_kwargs=None,\n              parser_kwargs=None,\n              downloader_kwargs=None):\n        \n        self.signal.reset()\n        self.logger.info('start crawling...')\n\n        feeder_kwargs = {} if feeder_kwargs is None else feeder_kwargs\n        parser_kwargs = {} if parser_kwargs is None else parser_kwargs\n        downloader_kwargs = {} if downloader_kwargs is None else downloader_kwargs\n\n        self.logger.info('starting %d feeder threads...',\n                         self.feeder.thread_num)\n        self.feeder.start(**feeder_kwargs)\n\n        self.logger.info('starting %d parser threads...',\n                         self.parser.thread_num)\n        self.parser.start(**parser_kwargs)\n\n        self.logger.info('starting %d downloader threads...',\n                         self.downloader.thread_num)\n        self.downloader.start(**downloader_kwargs)\n\n        while True:\n            if not self.feeder.is_alive():\n                self.signal.set(feeder_exited=True)\n            if not self.parser.is_alive():\n                self.signal.set(parser_exited=True)\n            if not self.downloader.is_alive():\n                break\n            time.sleep(1)\n\n        if not self.feeder.in_queue.empty():\n            self.feeder.clear_buffer()\n        if not self.parser.in_queue.empty():\n            self.parser.clear_buffer()\n        if not self.downloader.in_queue.empty():\n            self.downloader.clear_buffer(True)\n\n        self.logger.info('Crawling task done!')", "docstring": "Start crawling\n\nThis method will start feeder, parser and download and wait\nuntil all threads exit.\n\nArgs:\nfeeder_kwargs (dict, optional): Arguments to be passed to ``feeder.start()``\nparser_kwargs (dict, optional): Arguments to be passed to ``parser.start()``\ndownloader_kwargs (dict, optional): Arguments to be passed to\n``downloader.start()``", "source": "juraj-google-style"}
{"code": "def get(self, id_or_url, default=None):\n    if ('/' in id_or_url):\n        id = urls.SheetUrl.from_string(id_or_url).id\n    else:\n        id = id_or_url\n    try:\n        return self[id]\n    except KeyError:\n        return default", "docstring": "Fetch and return the spreadsheet with the given id or url.\n\nArgs:\nid_or_url (str): unique alphanumeric id or URL of the spreadsheet\nReturns:\nNew SpreadSheet instance or given default if none is found\nRaises:\nValueError: if an URL is given from which no id could be extracted", "source": "codesearchnet"}
{"code": "def getField(self, fld_name):\n        \n        result = \"\"\n        if fld_name in self.m_req:\n            result = self.m_req[fld_name][MeterData.StringValue]\n        else:\n            ekm_log(\"Requested nonexistent field: \" + fld_name)\n\n        return result", "docstring": "Return :class:`~ekmmeters.Field` content, scaled and formatted.\n\nArgs:\nfld_name (str): A :class:`~ekmmeters.Field` value which is on your meter.\n\nReturns:\nstr: String value (scaled if numeric) for the field.", "source": "juraj-google-style"}
{"code": "def GetParserAndPluginNames(cls, parser_filter_expression=None):\n    parser_and_plugin_names = []\n    for (parser_name, parser_class) in cls.GetParsers(parser_filter_expression=parser_filter_expression):\n        parser_and_plugin_names.append(parser_name)\n        if parser_class.SupportsPlugins():\n            for (plugin_name, _) in parser_class.GetPlugins():\n                parser_and_plugin_names.append('{0:s}/{1:s}'.format(parser_name, plugin_name))\n    return parser_and_plugin_names", "docstring": "Retrieves the parser and parser plugin names.\n\nArgs:\nparser_filter_expression (Optional[str]): parser filter expression,\nwhere None represents all parsers and plugins.\n\nReturns:\nlist[str]: parser and parser plugin names.", "source": "codesearchnet"}
{"code": "def __init__(self, entry_type, tag_name, tag_kind, **kwargs):\n        \n        super(TagProcessorWithEntryTypeAndFindByNamePlusKind,\n              self).__init__(**kwargs)\n\n        \n        self.entry_type = entry_type\n\n        \n        self.reference_tag_name = tag_name\n        self.reference_tag_kind = tag_kind", "docstring": "Initializer.\n\nArgs:\nentry_type: A string that should be returned by get_entry_type()\nfor all (matching) tags.\ntag_name: The unicode string name that matching tags should have.\ntag_kind: The unicode string \"kind\" attribute that matching tags\nshould have.", "source": "juraj-google-style"}
{"code": "def pull_df(self, md5):\n        \n        try:\n            _packed_df = self.workbench.get_dataframe(md5)\n            _df = pd.read_msgpack(lz4.loads(_packed_df))\n            return _df\n        except zerorpc.exceptions.RemoteError as e:\n            return repr_to_str_decorator.r_to_s(self._data_not_found)(e)", "docstring": "Wrapper for the Workbench get_dataframe method\nArgs:\nmd5: pull the dataframe identified by this md5\nReturns:\nThe uncompressed/unserialized dataframe", "source": "juraj-google-style"}
{"code": "def get_configs(__pkg: str, __name: str = 'config') -> List[str]:\n    \n    dirs = [user_config(__pkg), ]\n    dirs.extend(path.expanduser(path.sep.join([d, __pkg]))\n                for d in getenv('XDG_CONFIG_DIRS', '/etc/xdg').split(':'))\n    configs = []\n    for dname in reversed(dirs):\n        test_path = path.join(dname, __name)\n        if path.exists(test_path):\n            configs.append(test_path)\n    return configs", "docstring": "Return all configs for given package.\n\nArgs:\n__pkg: Package name\n__name: Configuration file name", "source": "juraj-google-style"}
{"code": "def update_config_data(msg, cfg):\n    for attr in msg:\n        if ((attr in cfg.data[msg.profile]) and (attr is not 'auth')):\n            cfg.data[msg.profile][attr] = getattr(msg, attr)", "docstring": "Updates the profile's config entry with values set in each attr by the\nuser.  This will overwrite existing values.\n\nArgs:\n:msg: (Message class) an instance of a message class.\n:cfg: (jsonconfig.Config) config instance.", "source": "codesearchnet"}
{"code": "def increment(self, size: int):\n    assert (size >= 0), size\n    self.files += 1\n    self.size += size\n    self.bandwidth_meter.feed(size)", "docstring": "Increment the number of files downloaded.\n\nArgs:\nsize: The size of the file", "source": "codesearchnet"}
{"code": "def get_annotations_dict(members: dict[str, cfg.Variable]) -> '_instances.AnnotationsDict | None':\n    if '__annotations__' not in members:\n        return None\n    annots_var = members['__annotations__']\n    try:\n        annots = get_atomic_value(annots_var)\n    except ConversionError:\n        return None\n    return annots if isinstance(annots, _abstract.AnnotationsDict) else None", "docstring": "Get __annotations__ from a members map.\n\nReturns None rather than {} if the dict does not exist so that callers always\nhave a reference to the actual dictionary, and can mutate it if needed.\n\nArgs:\nmembers: A dict of member name to variable\n\nReturns:\nmembers['__annotations__'] unpacked as a python dict, or None", "source": "github-repos"}
{"code": "def _run_command(argv):\n    (command_name, argv) = _get_command_and_argv(argv)\n    _LOGGER.info('Running command \"%s %s\" with args: %s', settings.command, command_name, argv)\n    subcommand = _get_subcommand(command_name)\n    func = call.get_callable(subcommand)\n    doc = usage.format_usage(subcommand.__doc__)\n    args = _get_parsed_args(command_name, doc, argv)\n    return (call.call(func, args) or 0)", "docstring": "Run the command with the given CLI options and exit.\n\nCommand functions are expected to have a __doc__ string that is parseable\nby docopt.\n\nArgs:\nargv: The list of command line arguments supplied for a command. The\nfirst argument is expected to be the name of the command to be run.\nNote that this is different than the full arguments parsed by\ndocopt for the entire program.\n\nRaises:\nValueError: Raised if the user attempted to run an invalid command.", "source": "codesearchnet"}
{"code": "def validate_sns_topic_subscription(self, region):\n        \n        sns = self.session.client('sns', region_name=region)\n        arn = 'arn:aws:sns:{}:{}:{}'.format(region, self.account.account_number, self.topic_name)\n        try:\n            data = sns.list_subscriptions_by_topic(TopicArn=arn)\n        except ClientError as ex:\n            self.log.error('Failed to list subscriptions by topic in {} ({}): {}'.format(\n                self.account.account_name,\n                region,\n                ex\n            ))\n            return False\n\n        for sub in data['Subscriptions']:\n            if sub['Endpoint'] == self.sqs_queue:\n                if sub['SubscriptionArn'] == 'PendingConfirmation':\n                    self.log.warning('Subscription pending confirmation for {} in {}'.format(\n                        self.account.account_name,\n                        region\n                    ))\n                    return False\n                return True\n\n        return False", "docstring": "Validates SQS subscription to the SNS topic. Returns `True` if subscribed or `False` if not subscribed\nor topic is missing\n\nArgs:\nregion (str): Name of AWS Region\n\nReturns:\n`bool`", "source": "juraj-google-style"}
{"code": "def imresize(img, size, return_scale=False, interpolation='bilinear'):\n    \n    h, w = img.shape[:2]\n    resized_img = cv2.resize(\n        img, size, interpolation=interp_codes[interpolation])\n    if not return_scale:\n        return resized_img\n    else:\n        w_scale = size[0] / w\n        h_scale = size[1] / h\n        return resized_img, w_scale, h_scale", "docstring": "Resize image to a given size.\n\nArgs:\nimg (ndarray): The input image.\nsize (tuple): Target (w, h).\nreturn_scale (bool): Whether to return `w_scale` and `h_scale`.\ninterpolation (str): Interpolation method, accepted values are\n\"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\".\n\nReturns:\ntuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or\n`resized_img`.", "source": "juraj-google-style"}
{"code": "def recursive_print(name, val, spaces=0):\n    if name is None:\n        msg = None\n    else:\n        fmt = '.' * max(0, spaces - 2) + '\n        msg = fmt.format(name)\n    if isinstance(val, dict):\n        if msg is not None:\n            print(msg)\n        for k in val.keys():\n            recursive_print(k, val[k], spaces + 2)\n    elif isinstance(val, torch.Tensor):\n        print(msg, ':', val.size())\n    else:\n        print(msg, ':', val)", "docstring": "Recursively print the structure of a checkpoint. This function is taken from `convert_megatron_gpt2_checkpoint.py`\n\nArgs:\nname (str): the name of the current tensor parameter\nval (Tuple(int)): the shape of the current tensor parameter\nspaces (int): the number of spaces to print before the output for a nested structure", "source": "github-repos"}
{"code": "def writeInput(self, session, directory, name):\n    self.project_directory = directory\n    with tmp_chdir(directory):\n        replaceParamFile = self.replaceParamFile\n        self.write(session=session, directory=directory, name=name)\n        self._writeXput(session=session, directory=directory, fileCards=self.INPUT_FILES, name=name, replaceParamFile=replaceParamFile)\n        self._writeXputMaps(session=session, directory=directory, mapCards=self.INPUT_MAPS, name=name, replaceParamFile=replaceParamFile)", "docstring": "Write only input files for a GSSHA project from the database to file.\n\nArgs:\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database\ndirectory (str): Directory where the files will be written.\nname (str): Name that will be given to project when written (e.g.: 'example'). Files that follow the project\nnaming convention will be given this name with the appropriate extension (e.g.: 'example.prj',\n'example.cmt', and 'example.gag'). Files that do not follow this convention will retain their original\nfile names.", "source": "codesearchnet"}
{"code": "def center_crop(self, image: 'torch.Tensor', size: dict[str, int], **kwargs) -> 'torch.Tensor':\n    if size.height is None or size.width is None:\n        raise ValueError(f\"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}\")\n    return F.center_crop(image, (size['height'], size['width']))", "docstring": "Center crop an image to `(size[\"height\"], size[\"width\"])`. If the input size is smaller than `crop_size` along\nany edge, the image is padded with 0's and then center cropped.\n\nArgs:\nimage (`\"torch.Tensor\"`):\nImage to center crop.\nsize (`Dict[str, int]`):\nSize of the output image.\n\nReturns:\n`torch.Tensor`: The center cropped image.", "source": "github-repos"}
{"code": "def define_simulation_graph(batch_env, algo_cls, config):\n  \n  \n  step = tf.Variable(0, False, dtype=tf.int32, name='global_step')\n  is_training = tf.placeholder(tf.bool, name='is_training')\n  should_log = tf.placeholder(tf.bool, name='should_log')\n  do_report = tf.placeholder(tf.bool, name='do_report')\n  force_reset = tf.placeholder(tf.bool, name='force_reset')\n  algo = algo_cls(batch_env, step, is_training, should_log, config)\n  done, score, summary = tools.simulate(\n      batch_env, algo, should_log, force_reset)\n  message = 'Graph contains {} trainable variables.'\n  tf.logging.info(message.format(tools.count_weights()))\n  \n  return tools.AttrDict(locals())", "docstring": "Define the algorithm and environment interaction.\n\nArgs:\nbatch_env: In-graph environments object.\nalgo_cls: Constructor of a batch algorithm.\nconfig: Configuration object for the algorithm.\n\nReturns:\nObject providing graph elements via attributes.", "source": "juraj-google-style"}
{"code": "def prune_volumes(self, filters=None):\n    params = {}\n    if filters:\n        params['filters'] = utils.convert_filters(filters)\n    url = self._url('/volumes/prune')\n    return self._result(self._post(url, params=params), True)", "docstring": "Delete unused volumes\n\nArgs:\nfilters (dict): Filters to process on the prune list.\n\nReturns:\n(dict): A dict containing a list of deleted volume names and\nthe amount of disk space reclaimed in bytes.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def _is_padded_shape_compatible_with(padded_shape, input_component_shape):\n    if padded_shape.dims is None or input_component_shape.dims is None:\n        return True\n    if len(padded_shape.dims) != len(input_component_shape.dims):\n        return False\n    for padded_dim, input_dim in zip(padded_shape.dims, input_component_shape.dims):\n        if padded_dim.value is not None and input_dim.value is not None and (padded_dim.value < input_dim.value):\n            return False\n    return True", "docstring": "Returns `True` if `input_component_shape` can be padded to `padded_shape`.\n\nArgs:\npadded_shape: A `tf.TensorShape`.\ninput_component_shape: A `tf.TensorShape`.\n\nReturns:\n`True` if `input_component_shape` can be padded to `padded_shape`, otherwise\n`False`.", "source": "github-repos"}
{"code": "def compute_average_oxidation_state(site):\n    try:\n        avg_oxi = sum([(sp.oxi_state * occu) for (sp, occu) in site.species.items() if (sp is not None)])\n        return avg_oxi\n    except AttributeError:\n        pass\n    try:\n        return site.charge\n    except AttributeError:\n        raise ValueError('Ewald summation can only be performed on structures that are either oxidation state decorated or have site charges.')", "docstring": "Calculates the average oxidation state of a site\n\nArgs:\nsite: Site to compute average oxidation state\n\nReturns:\nAverage oxidation state of site.", "source": "codesearchnet"}
{"code": "def kmer_count(seq_list, k):\n    all_kmers = generate_all_kmers(k)\n    kmer_count_list = []\n    for seq in seq_list:\n        kmer_count_list.append([seq.count(kmer) for kmer in all_kmers])\n    return pd.DataFrame(kmer_count_list, columns=all_kmers)", "docstring": "Generate k-mer counts from a set of sequences\n\nArgs:\nseq_list (iterable): List of DNA sequences (with letters from {A, C, G, T})\nk (int): K in k-mer.\nReturns:\npandas.DataFrame: Count matrix for seach sequence in seq_list\n\nExample:\n>>> kmer_count([\"ACGTTAT\", \"GACGCGA\"], 2)\nAA  AC  AG  AT  CA  CC  CG  CT  GA  GC  GG  GT  TA  TC  TG  TT\n0   0   1   0   1   0   0   1   0   0   0   0   1   1   0   0   1\n1   0   1   0   0   0   0   2   0   2   1   0   0   0   0   0   0", "source": "codesearchnet"}
{"code": "def load_entity(self, name, file_name, reload_cache=False):\n    Entity.verify_name(name)\n    self.entities.load(Entity.wrap_name(name), file_name, reload_cache)\n    with open(file_name) as f:\n        self.padaos.add_entity(name, f.read().split('\\n'))\n    self.must_train = True", "docstring": "Loads an entity, optionally checking the cache first\n\nArgs:\nname (str): The associated name of the entity\nfile_name (str): The location of the entity file\nreload_cache (bool): Whether to refresh all of cache", "source": "codesearchnet"}
{"code": "def save_plot(self, filename, img_format=\"eps\", **kwargs):\n        \n        plt = self.get_plot(**kwargs)\n        plt.savefig(filename, format=img_format)", "docstring": "Save matplotlib plot to a file.\n\nArgs:\nfilename: Filename to write to.\nimg_format: Image format to use. Defaults to EPS.", "source": "juraj-google-style"}
{"code": "def _checksum(cls, line):\n    tr_table = str.maketrans({c: None for c in (ascii_uppercase + '+ .')})\n    no_letters = line[:68].translate(tr_table).replace('-', '1')\n    return (sum([int(l) for l in no_letters]) % 10)", "docstring": "Compute the checksum of a full line\n\nArgs:\nline (str): Line to compute the checksum from\nReturn:\nint: Checksum (modulo 10)", "source": "codesearchnet"}
{"code": "def _RunOsLoginControl(self, params):\n    try:\n        return subprocess.call(([constants.OSLOGIN_CONTROL_SCRIPT] + params))\n    except OSError as e:\n        if (e.errno == errno.ENOENT):\n            return None\n        else:\n            raise", "docstring": "Run the OS Login control script.\n\nArgs:\nparams: list, the params to pass to the script\n\nReturns:\nint, the return code from the call, or None if the script is not found.", "source": "codesearchnet"}
{"code": "def has_enough_gas_reserve(raiden, channels_to_open: int=0) -> Tuple[(bool, int)]:\n    secure_reserve_estimate = get_reserve_estimate(raiden, channels_to_open)\n    current_account_balance = raiden.chain.client.balance(raiden.chain.client.address)\n    return ((secure_reserve_estimate <= current_account_balance), secure_reserve_estimate)", "docstring": "Checks if the account has enough balance to handle the lifecycles of all\nopen channels as well as the to be created channels.\n\nNote: This is just an estimation.\n\nArgs:\nraiden: A raiden service instance\nchannels_to_open: The number of new channels that should be opened\n\nReturns:\nTuple of a boolean denoting if the account has enough balance for\nthe remaining lifecycle events and the estimate for the remaining\nlifecycle cost", "source": "codesearchnet"}
{"code": "def _supervised_signature_def(method_name, inputs, loss=None, predictions=None, metrics=None):\n    if inputs is None or not inputs:\n        raise ValueError('{} inputs cannot be None or empty.'.format(method_name))\n    signature_inputs = {key: utils.build_tensor_info(tensor) for key, tensor in inputs.items()}\n    signature_outputs = {}\n    for output_set in (loss, predictions, metrics):\n        if output_set is not None:\n            sig_out = {key: utils.build_tensor_info(tensor) for key, tensor in output_set.items()}\n            signature_outputs.update(sig_out)\n    signature_def = signature_def_utils.build_signature_def(signature_inputs, signature_outputs, method_name)\n    return signature_def", "docstring": "Creates a signature for training and eval data.\n\nThis function produces signatures that describe the inputs and outputs\nof a supervised process, such as training or evaluation, that\nresults in loss, metrics, and the like. Note that this function only requires\ninputs to be not None.\n\nArgs:\nmethod_name: Method name of the SignatureDef as a string.\ninputs: dict of string to `Tensor`.\nloss: dict of string to `Tensor` representing computed loss.\npredictions: dict of string to `Tensor` representing the output predictions.\nmetrics: dict of string to `Tensor` representing metric ops.\n\nReturns:\nA train- or eval-flavored signature_def.\n\nRaises:\nValueError: If inputs or outputs is `None`.", "source": "github-repos"}
{"code": "def comment_to_ast(self, comment, link_resolver):\n    assert (comment is not None)\n    text = comment.description\n    if (self.remove_xml_tags or (comment.filename in self.gdbus_codegen_sources)):\n        text = re.sub('<.*?>', '', text)\n    if self.escape_html:\n        text = cgi.escape(text)\n    (ast, diagnostics) = cmark.gtkdoc_to_ast(text, link_resolver)\n    for diag in diagnostics:\n        if (comment.filename and (comment.filename not in self.gdbus_codegen_sources)):\n            column = (diag.column + comment.col_offset)\n            if (diag.lineno == 0):\n                column += comment.initial_col_offset\n            lines = text.split('\\n')\n            line = lines[diag.lineno]\n            i = 0\n            while (line[i] == ' '):\n                i += 1\n            column += (i - 1)\n            if ((diag.lineno > 0) and any([(c != ' ') for c in lines[(diag.lineno - 1)]])):\n                column += 1\n            lineno = (- 1)\n            if (comment.lineno != (- 1)):\n                lineno = (((comment.lineno - 1) + comment.line_offset) + diag.lineno)\n            warn(diag.code, message=diag.message, filename=comment.filename, lineno=lineno, column=column)\n    return ast", "docstring": "Given a gtk-doc comment string, returns an opaque PyCapsule\ncontaining the document root.\n\nThis is an optimization allowing to parse the docstring only\nonce, and to render it multiple times with\n`ast_to_html`, links discovery and\nmost of the link resolution being lazily done in that second phase.\n\nIf you don't care about performance, you should simply\nuse `translate`.\n\nArgs:\ntext: unicode, the docstring to parse.\nlink_resolver: hotdoc.core.links.LinkResolver, an object\nwhich will be called to retrieve `hotdoc.core.links.Link`\nobjects.\n\nReturns:\ncapsule: A PyCapsule wrapping an opaque C pointer, which\ncan be passed to `ast_to_html`\nafterwards.\ndiagnostics: A list of diagnostics as output by the gtk-doc cmark\nextension", "source": "codesearchnet"}
{"code": "def get_course_details(self, course_id):\n    return self._load_data(self.COURSES_ENDPOINT, resource_id=course_id, many=False)", "docstring": "Return the details of a single course by id - not a course run id.\n\nArgs:\ncourse_id (str): The unique id for the course in question.\n\nReturns:\ndict: Details of the course in question.", "source": "codesearchnet"}
{"code": "def get_input_info_dict(self, signature=None):\n    return self._spec.get_input_info_dict(signature=signature, tags=self._tags)", "docstring": "Describes the inputs required by a signature.\n\nArgs:\nsignature: A string with the signature to get inputs information for.\nIf None, the default signature is used if defined.\n\nReturns:\nThe result of ModuleSpec.get_input_info_dict() for the given signature,\nand the graph variant selected by `tags` when this Module was initialized.\n\nRaises:\nKeyError: if there is no such signature.", "source": "codesearchnet"}
{"code": "def get_message(self, message_id):\n        \n\n        for message in self.messages:\n            if message.id == message_id:\n                return message\n\n        raise ArgumentError(\"Message ID not found\", message_id=message_id)", "docstring": "Get a message by its persistent id.\n\nArgs:\nmessage_id (int): The id of the message that we're looking for", "source": "juraj-google-style"}
{"code": "def deprecated_argument_lookup(new_name, new_value, old_name, old_value):\n    if old_value is not None:\n        if new_value is not None:\n            raise ValueError(f\"Cannot specify both '{old_name}' and '{new_name}'.\")\n        return old_value\n    return new_value", "docstring": "Looks up deprecated argument name and ensures both are not used.\n\nArgs:\nnew_name: new name of argument\nnew_value: value of new argument (or None if not used)\nold_name: old name of argument\nold_value: value of old argument (or None if not used)\n\nReturns:\nThe effective argument that should be used.\nRaises:\nValueError: if new_value and old_value are both non-null", "source": "github-repos"}
{"code": "def _validator(code_or_name, validator_type):\n    \n    if validator_type == \"error\":\n        from .errors import codes\n        from .errors import EXT\n    elif validator_type == \"warning\":\n        from .warnings import codes\n        from .warnings import EXT\n    else:\n        pass \n\n    def decorator(func):\n        def wrapper(*args, **kw):\n            extra = func(*args, **kw)\n            if extra is None: return []\n            if isinstance(code_or_name, string_types):\n                code = EXT\n                name = codes[code][0] + \":\" + code_or_name\n            else:\n                code = code_or_name\n                name = codes[code][0]\n            text = codes[code][1]\n            return [(code, name, text, extra)]\n        wrapper.validator_type = validator_type\n        return wrapper\n\n    return decorator", "docstring": "Internal shared implementation to handle both error and warning\nvalidation checks.\n\nArgs:\ncode code_or_name (int or str) : a defined error code or custom message\nvalidator_type (str) : either \"error\" or \"warning\"\n\nReturns:\nvalidation decorator", "source": "juraj-google-style"}
{"code": "def parse(self, argument):\n    if (not isinstance(argument, six.string_types)):\n        raise TypeError('flag value must be a string, found \"{}\"'.format(type(argument)))\n    return argument", "docstring": "Parses the string argument and returns the native value.\n\nBy default it returns its argument unmodified.\n\nArgs:\nargument: string argument passed in the commandline.\n\nRaises:\nValueError: Raised when it fails to parse the argument.\nTypeError: Raised when the argument has the wrong type.\n\nReturns:\nThe parsed value in native type.", "source": "codesearchnet"}
{"code": "def is_commutable(expr1, expr2, eps=1e-08):\n    return (sum(((x * x.conjugate()).real for x in commutator(expr1, expr2).coeffs())) < eps)", "docstring": "Test whether expr1 and expr2 are commutable.\n\nArgs:\nexpr1 (Expr, Term or Pauli operator): Pauli's expression.\nexpr2 (Expr, Term or Pauli operator): Pauli's expression.\neps (float, optional): Machine epsilon.\nIf |[expr1, expr2]| < eps, consider it is commutable.\n\nReturns:\nbool: if expr1 and expr2 are commutable, returns True, otherwise False.", "source": "codesearchnet"}
{"code": "def relocate(source, destination, move=False):\n    venv = api.VirtualEnvironment(source)\n    if (not move):\n        venv.relocate(destination)\n        return None\n    venv.move(destination)\n    return None", "docstring": "Adjust the virtual environment settings and optional move it.\n\nArgs:\nsource (str): Path to the existing virtual environment.\ndestination (str): Desired path of the virtual environment.\nmove (bool): Whether or not to actually move the files. Default False.", "source": "codesearchnet"}
{"code": "def callable_eq(x: Optional[Callable[..., Any]], y: Optional[Callable[..., Any]]) -> bool:\n    if x is y:\n        return True\n    if x is None or y is None:\n        return False\n    if inspect.isfunction(x) and inspect.isfunction(y):\n        return _code_eq(x.__code__, y.__code__)\n    elif inspect.ismethod(x) and inspect.ismethod(y):\n        return _code_eq(x.__code__, y.__code__) and x.__self__ is y.__self__\n    return x == y", "docstring": "Returns True if two (maybe) callables are equal.\n\nFor functions: `x` and `y` are considered equal when they are the same\ninstance or have the same code (e.g. lambda x: x).\n\nFor methods: `x` and `y` are considered equal when:\nstatic method: The same method from the same class hierarchy. E.g. subclass\ninherits a base class' static method.\nclass method: The same method from the same class. Inherited class method\nare considered different class method.\ninstance method: When `self` is not bound, the same method from the same\nclass hierarchy (like static method). When `self` is bound, the same\nmethod on the same object.\n\nArgs:\nx: An optional function or method object.\ny: An optinoal function or method object.\n\nReturns:\nReturns True if `x` and `y` are considered equal. Meaning that they are\neither the same instance or derived from the same code and have the same\neffect.", "source": "github-repos"}
{"code": "def fastq_verifier(entries, ambiguous=False):\n    if ambiguous:\n        regex = '^@.+{0}[ACGTURYKMSWBDHVNX]+{0}\\\\+.*{0}[!\"\n    else:\n        regex = '^@.+{0}[ACGTU]+{0}\\\\+.*{0}[!-~]+{0}$'.format(os.linesep)\n    delimiter = '{0}'.format(os.linesep)\n    for entry in entries:\n        if (len(entry.sequence) != len(entry.quality)):\n            msg = 'The number of bases in {0} does not match the number of quality scores'.format(entry.id)\n            raise FormatError(message=msg)\n        try:\n            entry_verifier([entry.write()], regex, delimiter)\n        except FormatError as error:\n            if (error.part == 0):\n                msg = 'Unknown Header Error with {0}'.format(entry.id)\n                raise FormatError(message=msg)\n            elif ((error.part == 1) and ambiguous):\n                msg = '{0} contains a base not in [ACGTURYKMSWBDHVNX]'.format(entry.id)\n                raise FormatError(message=msg)\n            elif ((error.part == 1) and (not ambiguous)):\n                msg = '{0} contains a base not in [ACGTU]'.format(entry.id)\n                raise FormatError(message=msg)\n            elif (error.part == 2):\n                msg = 'Unknown error with line 3 of {0}'.format(entry.id)\n                raise FormatError(message=msg)\n            elif (error.part == 3):\n                msg = '{0} contains a quality score not in [!-~]'.format(entry.id)\n                raise FormatError(message=msg)\n            else:\n                msg = '{0}: Unknown Error: Likely a Bug'.format(entry.id)\n                raise FormatError(message=msg)", "docstring": "Raises error if invalid FASTQ format detected\n\nArgs:\nentries (list): A list of FastqEntry instances\n\nambiguous (bool): Permit ambiguous bases, i.e. permit non-ACGTU bases\n\nRaises:\nFormatError: Error when FASTQ format incorrect with descriptive message\n\nExample:\n>>> from bio_utils.iterators import fastq_iter\n>>> import os\n>>> entries = r'@entry1{0}AAGGATTCG{0}+{0}112234432{0}' \\\n...           r'@entry{0}AGGTCCCCCG{0}+{0}4229888884{0}' \\\n...           r'@entry3{0}GCCTAGC{0}9ddsa5n'.format(os.linesep)\n>>> fastq_entries = fastq_iter(iter(entries.split(os.linesep)))\n>>> fastq_verifier(fastq_entries)", "source": "codesearchnet"}
{"code": "def _should_pack(arg):\n    return isinstance(arg, list)", "docstring": "Determines whether the caller needs to pack the argument in a tuple.\n\nIf user-defined function returns a list of tensors, `nest.flatten()` and\n`ops.convert_to_tensor()` and would conspire to attempt to stack those tensors\ninto a single tensor because the tf.data version of `nest.flatten()` does\nnot recurse into lists. Since it is more likely that the list arose from\nreturning the result of an operation (such as `tf.numpy_function()`) that\nreturns a list of not-necessarily-stackable tensors, we treat the returned\nvalue as a `tuple` instead. A user wishing to pack the return value into a\nsingle tensor can use an explicit `tf.stack()` before returning.\n\nArgs:\narg: argument to check\n\nReturns:\nIndication of whether the caller needs to pack the argument in a tuple.", "source": "github-repos"}
{"code": "def ws050(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `ws050`'.format(value))\n    self._ws050 = value", "docstring": "Corresponds to IDD Field `ws050`\nWind speed corresponding 5.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `ws050`\nUnit: m/s\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def from_json_file(cls, filename):\n        \n        with open(filename, 'r') as fp:\n            return cls(json.load(fp))", "docstring": "Load a lexicon from a JSON file.\n\nArgs:\nfilename (str): The path to a JSON dump.", "source": "juraj-google-style"}
{"code": "def update_qos_aggregated_configuration(self, qos_configuration, timeout=(- 1)):\n    uri = '{}{}'.format(self.data['uri'], self.QOS_AGGREGATED_CONFIGURATION)\n    return self._helper.update(qos_configuration, uri=uri, timeout=timeout)", "docstring": "Updates the QoS aggregated configuration for the logical interconnect.\n\nArgs:\nqos_configuration:\nQOS configuration.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation in\nOneView, just stops waiting for its completion.\n\nReturns:\ndict: Logical Interconnect.", "source": "codesearchnet"}
{"code": "def stepBy(self, steps):\n        \n        self.setValue(self.value() + steps*self.singleStep())", "docstring": "steps value up/down by a single step. Single step is defined in singleStep().\n\nArgs:\nsteps (int): positiv int steps up, negativ steps down", "source": "juraj-google-style"}
{"code": "def _BuildFindSpecsFromArtifact(self, definition, environment_variables):\n    \n    find_specs = []\n    for source in definition.sources:\n      if source.type_indicator == artifact_types.TYPE_INDICATOR_FILE:\n        for path_entry in set(source.paths):\n          specifications = self._BuildFindSpecsFromFileSourcePath(\n              path_entry, source.separator, environment_variables,\n              self._knowledge_base.user_accounts)\n          find_specs.extend(specifications)\n          self.file_system_artifact_names.add(definition.name)\n\n      elif (source.type_indicator ==\n            artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY):\n        for key_path in set(source.keys):\n          if ArtifactDefinitionsFilterHelper.CheckKeyCompatibility(key_path):\n            specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path)\n            find_specs.extend(specifications)\n            self.registry_artifact_names.add(definition.name)\n\n      elif (source.type_indicator ==\n            artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE):\n        \n        \n\n        \n        key_paths = {\n            key_value['key'] for key_value in source.key_value_pairs}\n        key_paths_string = ', '.join(key_paths)\n\n        logger.warning((\n            'Windows Registry values are not supported, extracting keys: '\n            '\"{0!s}\"').format(key_paths_string))\n\n        for key_path in key_paths:\n          if ArtifactDefinitionsFilterHelper.CheckKeyCompatibility(key_path):\n            specifications = self._BuildFindSpecsFromRegistrySourceKey(key_path)\n            find_specs.extend(specifications)\n            self.registry_artifact_names.add(definition.name)\n\n      elif (source.type_indicator ==\n            artifact_types.TYPE_INDICATOR_ARTIFACT_GROUP):\n        for name in source.names:\n          specifications = self._BuildFindSpecsFromGroupName(\n              name, environment_variables)\n          find_specs.extend(specifications)\n\n      else:\n        logger.warning(\n            'Unsupported artifact definition source type: \"{0:s}\"'.format(\n                source.type_indicator))\n\n    return find_specs", "docstring": "Builds find specifications from an artifact definition.\n\nArgs:\ndefinition (artifacts.ArtifactDefinition): artifact definition.\nenvironment_variables (list[EnvironmentVariableArtifact]):\nenvironment variables.\n\nReturns:\nlist[dfvfs.FindSpec|dfwinreg.FindSpec]: dfVFS or dfWinReg find\nspecifications.", "source": "juraj-google-style"}
{"code": "def _mouseDown(x, y, button):\n    \n    if button == 'left':\n        try:\n            _sendMouseEvent(MOUSEEVENTF_LEFTDOWN, x, y)\n        except (PermissionError, OSError): \n            pass\n    elif button == 'middle':\n        try:\n            _sendMouseEvent(MOUSEEVENTF_MIDDLEDOWN, x, y)\n        except (PermissionError, OSError): \n            pass\n    elif button == 'right':\n        try:\n            _sendMouseEvent(MOUSEEVENTF_RIGHTDOWN, x, y)\n        except (PermissionError, OSError): \n            pass\n    else:\n        assert False, \"button argument not in ('left', 'middle', 'right')\"", "docstring": "Send the mouse down event to Windows by calling the mouse_event() win32\nfunction.\n\nArgs:\nx (int): The x position of the mouse event.\ny (int): The y position of the mouse event.\nbutton (str): The mouse button, either 'left', 'middle', or 'right'\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _path_formatter(self, suffix):\n        \n        if suffix.lower() == \"mirror\":\n            path_items = [self.bucket, self.s3path]\n        else:\n            path_items = [self.bucket, self.s3path, suffix]\n\n        path = '/'.join(path_items)\n        s3_format = \"s3:\n        formatted_path = path.replace('\n        full_path = s3_format.format(formatted_path)\n        return full_path", "docstring": "Format the s3 path properly.\n\nArgs:\nsuffix (str): suffix to add on to an s3 path\n\nReturns:\nstr: formatted path", "source": "juraj-google-style"}
{"code": "def date_to_epoch(year, month, day):\n    \n    return int(date_to_delorean(year, month, day).epoch)", "docstring": "Converts a date to epoch in UTC\n\nArgs:\nyear: int between 1 and 9999.\nmonth: int between 1 and 12.\nday: int between 1 and 31.\n\nReturns:\nInt epoch in UTC from date.", "source": "juraj-google-style"}
{"code": "def Parse(self, rdf_data):\n    if self._filter:\n        return list(self._filter.Parse(rdf_data, self.expression))\n    return rdf_data", "docstring": "Process rdf data through the filter.\n\nFilters sift data according to filter rules. Data that passes the filter\nrule is kept, other data is dropped.\n\nIf no filter method is provided, the data is returned as a list.\nOtherwise, a items that meet filter conditions are returned in a list.\n\nArgs:\nrdf_data: Host data that has already been processed by a Parser into RDF.\n\nReturns:\nA list containing data items that matched the filter rules.", "source": "codesearchnet"}
{"code": "def pymmh3_hash128_x64(key: Union[bytes, bytearray], seed: int) -> int:\n    \n\n    def fmix(k):\n        k ^= k >> 33\n        k = (k * 0xff51afd7ed558ccd) & 0xFFFFFFFFFFFFFFFF\n        k ^= k >> 33\n        k = (k * 0xc4ceb9fe1a85ec53) & 0xFFFFFFFFFFFFFFFF\n        k ^= k >> 33\n        return k\n\n    length = len(key)\n    nblocks = int(length / 16)\n\n    h1 = seed\n    h2 = seed\n\n    c1 = 0x87c37b91114253d5\n    c2 = 0x4cf5ad432745937f\n\n    \n    for block_start in range(0, nblocks * 8, 8):\n        \n        k1 = (\n            key[2 * block_start + 7] << 56 |\n            key[2 * block_start + 6] << 48 |\n            key[2 * block_start + 5] << 40 |\n            key[2 * block_start + 4] << 32 |\n            key[2 * block_start + 3] << 24 |\n            key[2 * block_start + 2] << 16 |\n            key[2 * block_start + 1] << 8 |\n            key[2 * block_start + 0]\n        )\n\n        k2 = (\n            key[2 * block_start + 15] << 56 |\n            key[2 * block_start + 14] << 48 |\n            key[2 * block_start + 13] << 40 |\n            key[2 * block_start + 12] << 32 |\n            key[2 * block_start + 11] << 24 |\n            key[2 * block_start + 10] << 16 |\n            key[2 * block_start + 9] << 8 |\n            key[2 * block_start + 8]\n        )\n\n        k1 = (c1 * k1) & 0xFFFFFFFFFFFFFFFF\n        k1 = (k1 << 31 | k1 >> 33) & 0xFFFFFFFFFFFFFFFF  \n        k1 = (c2 * k1) & 0xFFFFFFFFFFFFFFFF\n        h1 ^= k1\n\n        h1 = (h1 << 27 | h1 >> 37) & 0xFFFFFFFFFFFFFFFF  \n        h1 = (h1 + h2) & 0xFFFFFFFFFFFFFFFF\n        h1 = (h1 * 5 + 0x52dce729) & 0xFFFFFFFFFFFFFFFF\n\n        k2 = (c2 * k2) & 0xFFFFFFFFFFFFFFFF\n        k2 = (k2 << 33 | k2 >> 31) & 0xFFFFFFFFFFFFFFFF  \n        k2 = (c1 * k2) & 0xFFFFFFFFFFFFFFFF\n        h2 ^= k2\n\n        h2 = (h2 << 31 | h2 >> 33) & 0xFFFFFFFFFFFFFFFF  \n        h2 = (h1 + h2) & 0xFFFFFFFFFFFFFFFF\n        h2 = (h2 * 5 + 0x38495ab5) & 0xFFFFFFFFFFFFFFFF\n\n    \n    tail_index = nblocks * 16\n    k1 = 0\n    k2 = 0\n    tail_size = length & 15\n\n    if tail_size >= 15:\n        k2 ^= key[tail_index + 14] << 48\n    if tail_size >= 14:\n        k2 ^= key[tail_index + 13] << 40\n    if tail_size >= 13:\n        k2 ^= key[tail_index + 12] << 32\n    if tail_size >= 12:\n        k2 ^= key[tail_index + 11] << 24\n    if tail_size >= 11:\n        k2 ^= key[tail_index + 10] << 16\n    if tail_size >= 10:\n        k2 ^= key[tail_index + 9] << 8\n    if tail_size >= 9:\n        k2 ^= key[tail_index + 8]\n\n    if tail_size > 8:\n        k2 = (k2 * c2) & 0xFFFFFFFFFFFFFFFF\n        k2 = (k2 << 33 | k2 >> 31) & 0xFFFFFFFFFFFFFFFF  \n        k2 = (k2 * c1) & 0xFFFFFFFFFFFFFFFF\n        h2 ^= k2\n\n    if tail_size >= 8:\n        k1 ^= key[tail_index + 7] << 56\n    if tail_size >= 7:\n        k1 ^= key[tail_index + 6] << 48\n    if tail_size >= 6:\n        k1 ^= key[tail_index + 5] << 40\n    if tail_size >= 5:\n        k1 ^= key[tail_index + 4] << 32\n    if tail_size >= 4:\n        k1 ^= key[tail_index + 3] << 24\n    if tail_size >= 3:\n        k1 ^= key[tail_index + 2] << 16\n    if tail_size >= 2:\n        k1 ^= key[tail_index + 1] << 8\n    if tail_size >= 1:\n        k1 ^= key[tail_index + 0]\n\n    if tail_size > 0:\n        k1 = (k1 * c1) & 0xFFFFFFFFFFFFFFFF\n        k1 = (k1 << 31 | k1 >> 33) & 0xFFFFFFFFFFFFFFFF  \n        k1 = (k1 * c2) & 0xFFFFFFFFFFFFFFFF\n        h1 ^= k1\n\n    \n    h1 ^= length\n    h2 ^= length\n\n    h1 = (h1 + h2) & 0xFFFFFFFFFFFFFFFF\n    h2 = (h1 + h2) & 0xFFFFFFFFFFFFFFFF\n\n    h1 = fmix(h1)\n    h2 = fmix(h2)\n\n    h1 = (h1 + h2) & 0xFFFFFFFFFFFFFFFF\n    h2 = (h1 + h2) & 0xFFFFFFFFFFFFFFFF\n\n    return h2 << 64 | h1", "docstring": "Implements 128-bit murmur3 hash for x64, as per ``pymmh3``, with some\nbugfixes.\n\nArgs:\nkey: data to hash\nseed: seed\n\nReturns:\ninteger hash", "source": "juraj-google-style"}
{"code": "def GetFile(self, map_name, dst_file, current_file, location=None):\n    if map_name == config.MAP_PASSWORD:\n        return self.GetPasswdFile(dst_file, current_file)\n    elif map_name == config.MAP_GROUP:\n        return self.GetGroupFile(dst_file, current_file)\n    elif map_name == config.MAP_SHADOW:\n        return self.GetShadowFile(dst_file, current_file)\n    elif map_name == config.MAP_NETGROUP:\n        return self.GetNetgroupFile(dst_file, current_file)\n    elif map_name == config.MAP_AUTOMOUNT:\n        return self.GetAutomountFile(dst_file, current_file, location=location)\n    raise error.UnsupportedMap('Source can not fetch %s' % map_name)", "docstring": "Retrieve a file from this source.\n\nArgs:\nmap_name: A string representation of the map whose file you want\ndst_file: Temporary filename to write to.\ncurrent_file: Path to the current cache.\nlocation: optional field used by automounts to indicate a specific map\n\nReturns:\npath to new file\n\nRaises:\nUnsupportedMap: for unknown source maps", "source": "github-repos"}
{"code": "def execute_edit(args, root_dir=None):\n    EDITOR = os.environ.get('EDITOR', 'vim')\n    key = args['key']\n    status = command_factory('status')({}, root_dir=root_dir)\n    if ((not isinstance(status['data'], str)) and (key in status['data'])):\n        if (status['data'][key]['status'] in ['queued', 'stashed']):\n            command = status['data'][key]['command']\n        else:\n            print(\"Entry is not 'queued' or 'stashed'\")\n            sys.exit(1)\n    else:\n        print('No entry with this key')\n        sys.exit(1)\n    with tempfile.NamedTemporaryFile(suffix='.tmp') as tf:\n        tf.write(command.encode('utf-8'))\n        tf.flush()\n        call([EDITOR, tf.name])\n        tf.seek(0)\n        edited_command = tf.read().decode('utf-8')\n    print_command_factory('edit')({'key': key, 'command': edited_command}, root_dir=root_dir)", "docstring": "Edit a existing queue command in the daemon.\n\nArgs:\nargs['key'] int: The key of the queue entry to be edited\nroot_dir (string): The path to the root directory the daemon is running in.", "source": "codesearchnet"}
{"code": "def add_outbound_connection(self, uri):\n    LOGGER.debug('Adding connection to %s', uri)\n    conn = OutboundConnection(connections=self._connections, endpoint=uri, dispatcher=self._dispatcher, zmq_identity=self._zmq_identity, secured=self._secured, server_public_key=self._server_public_key, server_private_key=self._server_private_key, future_callback_threadpool=self._future_callback_threadpool, heartbeat=True, connection_timeout=self._connection_timeout)\n    self.outbound_connections[uri] = conn\n    conn.start()\n    self._add_connection(conn, uri)\n    connect_message = ConnectionRequest(endpoint=self._public_endpoint)\n    conn.send(validator_pb2.Message.NETWORK_CONNECT, connect_message.SerializeToString(), callback=partial(self._connect_callback, connection=conn))\n    return conn", "docstring": "Adds an outbound connection to the network.\n\nArgs:\nuri (str): The zmq-style (e.g. tcp://hostname:port) uri\nto attempt to connect to.", "source": "codesearchnet"}
{"code": "def component_mget(self, zip_data, components):\n        \n        if not isinstance(components, list):\n            print(\"Components param must be a list\")\n            return\n\n        query_params = {\"components\": \",\".join(components)}\n\n        return self.fetch_identifier_component(\n            \"zip/component_mget\", zip_data, query_params)", "docstring": "Call the zip component_mget endpoint\n\nArgs:\n- zip_data - As described in the class docstring.\n- components - A list of strings for each component to include in the request.\nExample: [\"zip/details\", \"zip/volatility\"]", "source": "juraj-google-style"}
{"code": "def _run_async(self, urls):\n        \n        loop = asyncio.get_event_loop()\n        results = loop.run_until_complete(self._async_loop(urls))\n        return results", "docstring": "Asynchronous event loop execution\n\nArgs:\nurls (list): URLs to fetch\n\nReturns:\nresults (obj): All URL requests' responses", "source": "juraj-google-style"}
{"code": "def log_softmax(logits, axis=None, name=None, dim=None):\n    axis = deprecation.deprecated_argument_lookup('axis', axis, 'dim', dim)\n    if axis is None:\n        axis = -1\n    return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name)", "docstring": "Computes log softmax activations.\n\nFor each batch `i` and class `j` we have\n\nlogsoftmax = logits - log(reduce_sum(exp(logits), axis))\n\nArgs:\nlogits: A non-empty `Tensor`. Must be one of the following types: `half`,\n`float32`, `float64`.\naxis: The dimension softmax would be performed on. The default is -1 which\nindicates the last dimension.\nname: A name for the operation (optional).\ndim: Deprecated alias for `axis`.\n\nReturns:\nA `Tensor`. Has the same type as `logits`. Same shape as `logits`.\n\nRaises:\nInvalidArgumentError: if `logits` is empty or `axis` is beyond the last\ndimension of `logits`.", "source": "github-repos"}
{"code": "def write(self, message, cur_time=None):\n        \n        if cur_time is None:\n            cur_time = time.time()\n        lines = self._line_buffer.add_string(message)\n        for line in lines:\n            \n            timestamp = ''\n            if self._prepend_timestamp:\n                timestamp = datetime.datetime.utcfromtimestamp(\n                    cur_time).isoformat() + ' '\n            line = u'{}{}{}'.format(self._line_prepend, timestamp, line)\n            self._fsapi.push(self._filename, line)", "docstring": "Write some text to the pusher.\n\nArgs:\nmessage: a string to push for this file.\ncur_time: used for unit testing. override line timestamp.", "source": "juraj-google-style"}
{"code": "def unzip(self, overwrite: bool = False):\n        \n\n        if self.zip_content and not overwrite:\n            raise FileExistsError(str(self.temp_dir))\n\n        LOGGER.debug('unzipping miz to temp dir')\n\n        try:\n\n            with ZipFile(str(self.miz_path)) as zip_file:\n\n                LOGGER.debug('reading infolist')\n\n                self.zip_content = [f.filename for f in zip_file.infolist()]\n\n                self._extract_files_from_zip(zip_file)\n\n        except BadZipFile:\n            raise BadZipFile(str(self.miz_path))\n\n        except:  \n            LOGGER.exception('error while unzipping miz file: %s', self.miz_path)\n            raise\n\n        LOGGER.debug('checking miz content')\n\n        \n        for miz_item in ['mission', 'options', 'warehouses', 'l10n/DEFAULT/dictionary', 'l10n/DEFAULT/mapResource']:\n            if not Path(self.temp_dir.joinpath(miz_item)).exists():\n                LOGGER.error('missing file in miz: %s', miz_item)\n                raise FileNotFoundError(miz_item)\n\n        self._check_extracted_content()\n\n        LOGGER.debug('all files have been found, miz successfully unzipped')", "docstring": "Flattens a MIZ file into the temp dir\n\nArgs:\noverwrite: allow overwriting exiting files", "source": "juraj-google-style"}
{"code": "def get_full_psd_matrix(self):\n    if (self.matrix_m is not None):\n        return (self.matrix_h, self.matrix_m)\n    h_columns = []\n    for i in range((self.nn_params.num_hidden_layers + 1)):\n        current_col_elems = []\n        for j in range(i):\n            current_col_elems.append(tf.zeros([self.nn_params.sizes[j], self.nn_params.sizes[i]]))\n        if (i == 0):\n            current_col_elems.append(utils.diag(self.lambda_lu[i]))\n        else:\n            current_col_elems.append(utils.diag((self.lambda_lu[i] + self.lambda_quad[i])))\n        if (i < self.nn_params.num_hidden_layers):\n            current_col_elems.append(tf.matmul(utils.diag(((- 1) * self.lambda_quad[(i + 1)])), self.nn_params.weights[i]))\n        for j in range((i + 2), (self.nn_params.num_hidden_layers + 1)):\n            current_col_elems.append(tf.zeros([self.nn_params.sizes[j], self.nn_params.sizes[i]]))\n        current_column = tf.concat(current_col_elems, 0)\n        h_columns.append(current_column)\n    self.matrix_h = tf.concat(h_columns, 1)\n    self.matrix_h = (self.matrix_h + tf.transpose(self.matrix_h))\n    self.matrix_m = tf.concat([tf.concat([tf.reshape(self.nu, (1, 1)), tf.transpose(self.vector_g)], axis=1), tf.concat([self.vector_g, self.matrix_h], axis=1)], axis=0)\n    return (self.matrix_h, self.matrix_m)", "docstring": "Function that returns the tf graph corresponding to the entire matrix M.\n\nReturns:\nmatrix_h: unrolled version of tf matrix corresponding to H\nmatrix_m: unrolled tf matrix corresponding to M", "source": "codesearchnet"}
{"code": "async def get(self, uid: int, cached_msg: CachedMessage=None, requirement: FetchRequirement=FetchRequirement.METADATA) -> Optional[MessageT]:\n    ...", "docstring": "Return the message with the given UID.\n\nArgs:\nuid: The message UID.\ncached_msg: The last known cached message.\nrequirement: The data required from each message.\n\nRaises:\nIndexError: The UID is not valid in the mailbox.", "source": "codesearchnet"}
{"code": "def monitor(service_addr, duration_ms, level=1):\n    return _pywrap_profiler_plugin.monitor(_strip_prefix(service_addr, _GRPC_PREFIX), duration_ms, level, True)", "docstring": "Sends grpc requests to profiler server to perform on-demand monitoring.\n\nThe monitoring result is a light weight performance summary of your model\nexecution. This method will block the caller thread until it receives the\nmonitoring result. This method currently supports Cloud TPU only.\n\nArgs:\nservice_addr: gRPC address of profiler service e.g. grpc://10.0.0.2:8466.\nduration_ms: Duration of monitoring in ms.\nlevel: Choose a monitoring level between 1 and 2 to monitor your job. Level\n2 is more verbose than level 1 and shows more metrics.\n\nReturns:\nA string of monitoring output.\n\nExample usage:\n\n```python\n# Continuously send gRPC requests to the Cloud TPU to monitor the model\n# execution.\n\nfor query in range(0, 100):\nprint(\ntf.profiler.experimental.client.monitor('grpc://10.0.0.2:8466', 1000))\n```", "source": "github-repos"}
{"code": "def upload_benchmark_files(opts):\n    client = datastore.Client()\n    for fname in list_files_by_mtime(opts.datadir):\n        fpath = os.path.join(opts.datadir, fname)\n        try:\n            with open(fpath, 'r') as fd:\n                if trylock(fd):\n                    upload_benchmark_data(client, fd.read())\n                    shutil.move(fpath, os.path.join(opts.archivedir, fname))\n        except Exception as e:\n            print(\"Cannot process '%s', skipping. Error: %s\" % (fpath, e))", "docstring": "Find benchmark files, process them, and upload their data to the datastore.\n\nLocate benchmark files in the data directory, process them, and upload their\ndata to the datastore.  After processing each file, move it to the archive\ndirectory for safe-keeping.  Each file is locked for processing, which allows\nmultiple uploader instances to run concurrently if needed, each one handling\ndifferent benchmark files, skipping those already locked by another.\n\nArgs:\nopts: command line options object\n\nNote: To use locking, the file is first opened, then its descriptor is used to\nlock and read it.  The lock is released when the file is closed.  Do not open\nthat same file a 2nd time while the lock is already held, because when that\n2nd file descriptor is closed, the lock will be released prematurely.", "source": "github-repos"}
{"code": "def prepare_context(pipeline, context_in_string, context):\n    logger.debug('starting')\n    parsed_context = get_parsed_context(pipeline=pipeline, context_in_string=context_in_string)\n    context.update(parsed_context)\n    logger.debug('done')", "docstring": "Prepare context for pipeline run.\n\nArgs:\npipeline: dict. Dictionary representing the pipeline.\ncontext_in_string: string. Argument string used to initialize context.\ncontext: pypyr.context.Context. Merge any new context generated from\ncontext_in_string into this context instance.\n\nReturns:\nNone. The context instance to use for the pipeline run is contained\nin the context arg, it's not passed back as a function return.", "source": "codesearchnet"}
{"code": "def AddEvent(self, event):\n    \n    self._RaiseIfNotWritable()\n\n    \n    \n    event_data_identifier = event.GetEventDataIdentifier()\n    if event_data_identifier:\n      if not isinstance(event_data_identifier, identifiers.FakeIdentifier):\n        raise IOError('Unsupported event data identifier type: {0:s}'.format(\n            type(event_data_identifier)))\n\n    event = self._PrepareAttributeContainer(event)\n\n    self._events.append(event)\n    self.number_of_events += 1", "docstring": "Adds an event.\n\nArgs:\nevent (EventObject): event.\n\nRaises:\nIOError: when the storage writer is closed or\nif the event data identifier type is not supported.\nOSError: when the storage writer is closed or\nif the event data identifier type is not supported.", "source": "juraj-google-style"}
{"code": "def from_int(i):\n    point = ECPointAffine.from_int(bitcoin_curve, i)\n    return PublicKey.from_point(point)", "docstring": "Generates a public key object from an integer.\n\nNote:\nThis assumes that the upper 32 bytes of the integer\nare the x component of the public key point and the\nlower 32 bytes are the y component.\n\nArgs:\ni (Bignum): A 512-bit integer representing the public\nkey point on the secp256k1 curve.\n\nReturns:\nPublicKey: A PublicKey object.", "source": "codesearchnet"}
{"code": "def _get_device_dict_and_cores(devices):\n    device_map = collections.defaultdict(list)\n    num_cores = 0\n    for device in devices:\n        match = _TPU_DEVICE_REGEX.match(device.name)\n        if match:\n            host_id = match.group('host_id')\n            core_id = match.group('core_id')\n            device_map[host_id].append(core_id)\n            num_cores += 1\n    return DeviceDetails(device_map, num_cores)", "docstring": "Returns a dict of hosts to cores and total cores given devices names.\n\nReturns a namedtuple with two attributes:\ndevice_map: A map of host_ids to a list of core_ids.\ntotal_cores: The total number of cores within the TPU system.\n\nArgs:\ndevices: A list of devices returned by session.list_devices()", "source": "github-repos"}
{"code": "def get_markdown_files(self, dir_):\n    md_files = OrderedSet()\n    for (root, _, files) in os.walk(dir_):\n        for name in files:\n            split = os.path.splitext(name)\n            if (len(split) == 1):\n                continue\n            if (split[1] in ('.markdown', '.md', '.yaml')):\n                md_files.add(os.path.join(root, name))\n    return md_files", "docstring": "Get all the markdown files in a folder, recursively\n\nArgs:\ndir_: str, a toplevel folder to walk.", "source": "codesearchnet"}
{"code": "def annotate(self, framedata):\n    for artist in self.annotation_artists:\n        artist.remove()\n    self.annotation_artists = []\n    for annotation in self.annotations:\n        if (annotation[2] > framedata):\n            return\n        if (annotation[2] == framedata):\n            pos = annotation[0:2]\n            shape = self.annotations_default['shape']\n            color = self.annotations_default['color']\n            size = self.annotations_default['size']\n            line = self.annotations_default['line']\n            if (len(annotation) > 3):\n                shape = annotation[3].get('shape', shape)\n                color = annotation[3].get('color', color)\n                size = annotation[3].get('size', size)\n                line = annotation[3].get('line', line)\n            if ((shape == 'CIRC') and hasattr(size, '__len__')):\n                size = 30\n            if (not hasattr(color, '__len__')):\n                color = ((color,) * 3)\n            if (shape == 'RECT'):\n                patch = patches.Rectangle(((pos[0] - (size[0] \n            elif (shape == 'CIRC'):\n                patch = patches.CirclePolygon(pos, radius=size, fc='none', ec=color, lw=line)\n            self.annotation_artists.append(patch)\n            self.axes_processed.add_artist(self.annotation_artists[(- 1)])", "docstring": "Annotates the processed axis with given annotations for\nthe provided framedata.\n\nArgs:\nframedata: The current frame number.", "source": "codesearchnet"}
{"code": "def outgoing_edges(self, node):\n        \n        \n        edges = self.edges()\n        out_edges = []\n        for out_node, in_node in edges:\n            if node is out_node:\n                out_edges.append((out_node, in_node))\n        return tuple(out_edges)", "docstring": "Returns a ``tuple`` of outgoing edges for a **node object**.\n\nArguments:\n\n- node(``object``) **node object** present in the graph to be queried\nfor outgoing edges.", "source": "juraj-google-style"}
{"code": "def is_artifact_optional(chain, task_id, path):\n    upstream_artifacts = chain.task['payload'].get('upstreamArtifacts', [])\n    optional_artifacts_per_task_id = get_optional_artifacts_per_task_id(upstream_artifacts)\n    return (path in optional_artifacts_per_task_id.get(task_id, []))", "docstring": "Tells whether an artifact is flagged as optional or not.\n\nArgs:\nchain (ChainOfTrust): the chain of trust object\ntask_id (str): the id of the aforementioned task\n\nReturns:\nbool: True if artifact is optional", "source": "codesearchnet"}
{"code": "def remove(self, force=False):\n    return self.client.api.remove_volume(self.id, force=force)", "docstring": "Remove this volume.\n\nArgs:\nforce (bool): Force removal of volumes that were already removed\nout of band by the volume driver plugin.\nRaises:\n:py:class:`docker.errors.APIError`\nIf volume failed to remove.", "source": "codesearchnet"}
{"code": "def cv_squared(x):\n  \n  epsilon = 1e-10\n  float_size = tf.to_float(tf.size(x)) + epsilon\n  mean = tf.reduce_sum(x) / float_size\n  variance = tf.reduce_sum(tf.squared_difference(x, mean)) / float_size\n  return variance / (tf.square(mean) + epsilon)", "docstring": "The squared coefficient of variation of a sample.\n\nUseful as a loss to encourage a positive distribution to be more uniform.\nEpsilons added for numerical stability.\nReturns 0 for an empty Tensor.\n\nArgs:\nx: a `Tensor`.\n\nReturns:\na `Scalar`.", "source": "juraj-google-style"}
{"code": "def create_transformation(self, rotation=None, translation=None):\n        \n        mat = None\n        if rotation is not None:\n            mat = Matrix44.from_eulers(Vector3(rotation))\n\n        if translation is not None:\n            trans = matrix44.create_from_translation(Vector3(translation))\n            if mat is None:\n                mat = trans\n            else:\n                mat = matrix44.multiply(mat, trans)\n\n        return mat", "docstring": "Creates a transformation matrix woth rotations and translation.\n\nArgs:\nrotation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3`\ntranslation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3`\n\nReturns:\nA 4x4 matrix as a :py:class:`numpy.array`", "source": "juraj-google-style"}
{"code": "def zero(duration: int, name: str = None) -> SamplePulse:\n    \n    return _sampled_zero_pulse(duration, name=name)", "docstring": "Generates zero-sampled `SamplePulse`.\n\nArgs:\nduration: Duration of pulse. Must be greater than zero.\nname: Name of pulse.", "source": "juraj-google-style"}
{"code": "def _validate_bn_layer(self, layer):\n    \n    if (not isinstance(layer, tf.keras.layers.BatchNormalization) and\n        not isinstance(layer, tf.compat.v1.layers.BatchNormalization)):\n      raise ValueError(\n          \"batchnorm_layer must be an instance of BatchNormalization layer.\")\n    if layer.renorm:\n      raise ValueError(\"BatchNorm Bijector does not support renormalization.\")\n    if layer.virtual_batch_size:\n      raise ValueError(\n          \"BatchNorm Bijector does not support virtual batch sizes.\")", "docstring": "Check for valid BatchNormalization layer.\n\nArgs:\nlayer: Instance of `tf.layers.BatchNormalization`.\nRaises:\nValueError: If batchnorm_layer argument is not an instance of\n`tf.layers.BatchNormalization`, or if `batchnorm_layer.renorm=True` or\nif `batchnorm_layer.virtual_batch_size` is specified.", "source": "juraj-google-style"}
{"code": "def quarter_ellipsis_functions(xx, yy):\n    \n    npxx = np.array(xx)\n    npyy = np.array(yy)\n    if np.any(npxx == npyy):\n        raise RuntimeError('Invalid points for quarter_ellipsis_functions')\n    if np.all(npxx < npyy) or np.all(npxx > npyy):\n        if npxx[0] < npyy[0]:\n            p1 = npxx\n            p2 = npyy\n        else:\n            p1 = npyy\n            p2 = npxx\n        c_lower = np.array([p1[0], p2[1]])\n        c_upper = np.array([p2[0], p1[1]])\n        b2 = (p2[1] - p1[1]) ** 2\n    else:\n        if npxx[0] < npyy[0]:\n            p1 = npxx\n            p2 = npyy\n        else:\n            p1 = npyy\n            p2 = npxx\n        c_lower = np.array([p2[0], p1[1]])\n        c_upper = np.array([p1[0], p2[1]])\n        b2 = (p1[1] - p2[1]) ** 2\n    b2overa2 = b2 / (p2[0] - p1[0]) ** 2\n\n    def lower(x):\n        return c_lower[1] - np.sqrt(b2 - b2overa2 * (x - c_lower[0]) ** 2)\n\n    def upper(x):\n        return c_upper[1] + np.sqrt(b2 - b2overa2 * (x - c_upper[0]) ** 2)\n\n    return {'lower': lower, 'upper': upper}", "docstring": "Method that creates two quarter-ellipse functions based on points xx and yy. The ellipsis is supposed to\nbe aligned with the axes. The two ellipsis pass through the two points xx and yy.\n\nArgs:\nxx:\nFirst point\nyy:\nSecond point\n\nReturns:\nA dictionary with the lower and upper quarter ellipsis functions.", "source": "juraj-google-style"}
{"code": "def get_registration_id_info(self, registration_id):\n        \n        response = self.registration_info_request(registration_id)\n        if response.status_code == 200:\n            return response.json()\n        return None", "docstring": "Returns details related to a registration id if it exists otherwise return None\n\nArgs:\nregistration_id: id to be checked\n\nReturns:\ndict: info about registration id\nNone: if id doesn't exist", "source": "juraj-google-style"}
{"code": "def deploy(target):\n    if (not os.getenv(CIRCLECI_ENV_VAR)):\n        raise EnvironmentError('Must be on CircleCI to run this script')\n    current_branch = os.getenv('CIRCLE_BRANCH')\n    if ((target == 'PROD') and (current_branch != 'master')):\n        raise EnvironmentError(f'Refusing to deploy to production from branch {current_branch!r}. Production deploys can only be made from master.')\n    if (target in ('PROD', 'TEST')):\n        pypi_username = os.getenv(f'{target}_PYPI_USERNAME')\n        pypi_password = os.getenv(f'{target}_PYPI_PASSWORD')\n    else:\n        raise ValueError(f\"Deploy target must be 'PROD' or 'TEST', got {target!r}.\")\n    if (not (pypi_username and pypi_password)):\n        raise EnvironmentError(f\"Missing '{target}_PYPI_USERNAME' and/or '{target}_PYPI_PASSWORD' environment variables. These are required to push to PyPI.\")\n    os.environ['TWINE_USERNAME'] = pypi_username\n    os.environ['TWINE_PASSWORD'] = pypi_password\n    _shell('git config --global user.email \"dev@cloverhealth.com\"')\n    _shell('git config --global user.name \"Circle CI\"')\n    _shell('git config push.default current')\n    ret = _shell('make version', stdout=subprocess.PIPE)\n    version = ret.stdout.decode('utf-8').strip()\n    print(f'Deploying version {version!r}...')\n    _shell(f'git tag -f -a {version} -m \"Version {version}\"')\n    _shell(f'sed -i.bak \"s/^__version__ = .*/__version__ = {version!r}/\" */version.py')\n    _shell('python setup.py sdist bdist_wheel')\n    _shell('git add ChangeLog AUTHORS */version.py')\n    _shell('git commit --no-verify -m \"Merge autogenerated files [skip ci]\"')\n    _pypi_push('dist')\n    _shell('git push --follow-tags')\n    print(f'Deployment complete. Latest version is {version}.')", "docstring": "Deploys the package and documentation.\n\nProceeds in the following steps:\n\n1. Ensures proper environment variables are set and checks that we are on Circle CI\n2. Tags the repository with the new version\n3. Creates a standard distribution and a wheel\n4. Updates version.py to have the proper version\n5. Commits the ChangeLog, AUTHORS, and version.py file\n6. Pushes to PyPI\n7. Pushes the tags and newly committed files\n\nRaises:\n`EnvironmentError`:\n- Not running on CircleCI\n- `*_PYPI_USERNAME` and/or `*_PYPI_PASSWORD` environment variables\nare missing\n- Attempting to deploy to production from a branch that isn't master", "source": "codesearchnet"}
{"code": "def recalculate_concepts(self, concepts, lang=None):\n    if (len(concepts) == 0):\n        return\n    if (lang is None):\n        items = Concept.objects.get_concept_item_mapping(concepts=Concept.objects.filter(pk__in=set(flatten(concepts.values()))))\n    else:\n        items = Concept.objects.get_concept_item_mapping(lang=lang)\n    environment = get_environment()\n    mastery_threshold = get_mastery_trashold()\n    for (user, concepts) in concepts.items():\n        all_items = list(set(flatten([items[c] for c in concepts])))\n        answer_counts = environment.number_of_answers_more_items(all_items, user)\n        correct_answer_counts = environment.number_of_correct_answers_more_items(all_items, user)\n        predictions = dict(list(zip(all_items, get_predictive_model().predict_more_items(environment, user, all_items, time=get_time_for_knowledge_overview()))))\n        new_user_stats = []\n        stats_to_delete_condition = Q()\n        for concept in concepts:\n            answer_aggregates = Answer.objects.filter(user=user, item__in=items[concept]).aggregate(time_spent=Sum('response_time'), sessions=Count('session', True), time_first=Min('time'), time_last=Max('time'))\n            stats = {'answer_count': sum((answer_counts[i] for i in items[concept])), 'correct_answer_count': sum((correct_answer_counts[i] for i in items[concept])), 'item_count': len(items[concept]), 'practiced_items_count': sum([(answer_counts[i] > 0) for i in items[concept]]), 'mastered_items_count': sum([(predictions[i] >= mastery_threshold) for i in items[concept]]), 'prediction': (sum([predictions[i] for i in items[concept]]) / len(items[concept])), 'time_spent': (answer_aggregates['time_spent'] / 1000), 'session_count': answer_aggregates['sessions'], 'time_first': answer_aggregates['time_first'].timestamp(), 'time_last': answer_aggregates['time_last'].timestamp()}\n            stats_to_delete_condition |= Q(user=user, concept=concept)\n            for (stat_name, value) in stats.items():\n                new_user_stats.append(UserStat(user_id=user, concept_id=concept, stat=stat_name, value=value))\n        self.filter(stats_to_delete_condition).delete()\n        self.bulk_create(new_user_stats)", "docstring": "Recalculated given concepts for given users\n\nArgs:\nconcepts (dict): user id (int -> set of concepts to recalculate)\nlang(Optional[str]): language used to get items in all concepts (cached).\nDefaults to None, in that case are get items only in used concepts", "source": "codesearchnet"}
{"code": "def filter(self, nodes):\n    filtered_dag = DAG()\n    for node in nodes:\n        filtered_dag.add_node_if_not_exists(node)\n        for edge in self.all_downstreams(node):\n            filtered_dag.add_node_if_not_exists(edge)\n    for (node, edges) in self.graph.items():\n        if (node in filtered_dag.graph):\n            filtered_dag.graph[node] = edges\n    return filtered_dag", "docstring": "Returns a new DAG with only the given nodes and their\ndependencies.\n\nArgs:\nnodes (list): The nodes you are interested in.\n\nReturns:\n:class:`stacker.dag.DAG`: The filtered graph.", "source": "codesearchnet"}
{"code": "def configure_sbi(self, sbi_config: dict, schema_path: str = None):\n        \n        if not self.active:\n            raise RuntimeError(\"Unable to add SBIs to inactive subarray!\")\n        sbi_config['subarray_id'] = self._id\n        sbi = SchedulingBlockInstance.from_config(sbi_config, schema_path)\n        self._add_sbi_id(sbi_config['id'])\n        return sbi", "docstring": "Add a new SBI to the database associated with this subarray.\n\nArgs:\nsbi_config (dict): SBI configuration.\nschema_path (str, optional): Path to the SBI config schema.", "source": "juraj-google-style"}
{"code": "def load(self, file_name):\n        \n\n        new_rundata = self.loader(file_name)\n        new_rundata = self.inspect(new_rundata)\n        return new_rundata", "docstring": "Load a raw data-file\n\nArgs:\nfile_name (path)\n\nReturns:\nloaded test", "source": "juraj-google-style"}
{"code": "def delete(filething):\n    \n\n    t = OggTheora(filething)\n    filething.fileobj.seek(0)\n    t.delete(filething)", "docstring": "delete(filething)\n\nArguments:\nfilething (filething)\nRaises:\nmutagen.MutagenError\n\nRemove tags from a file.", "source": "juraj-google-style"}
{"code": "def Validate(self, value):\n    \n    if value is None:\n      return None\n\n    if not isinstance(value, self.rdfclass):\n      \n      try:\n        r = self.rdfclass()\n        r.FromDict(value)\n        return r\n      except (AttributeError, TypeError, rdfvalue.InitializeError):\n        \n        \n        \n        raise TypeValueError(\"Value for arg %s should be an %s\" %\n                             (self.name, self.rdfclass.__name__))\n\n    return value", "docstring": "Validate the value.\n\nArgs:\nvalue: Value is expected to be a dict-like object that a given RDFStruct\ncan be initialized from.\n\nRaises:\nTypeValueError: If the value is not a valid dict-like object that a given\nRDFStruct can be initialized from.\n\nReturns:\nA valid instance of self.rdfclass or None.", "source": "juraj-google-style"}
{"code": "def get_value(data, key):\n    \n    ref = data\n    try:\n        for subkey in key.split('.'):\n            if isinstance(ref, dict):\n                ref = ref[subkey]\n            else:\n                print('CRITICAL: Cannot use subkey %s on non-dictionary element' % subkey)\n                return None\n        return ref\n\n    \n    except KeyError:\n        return None", "docstring": "Follow the dot notation to get the proper field, then perform the action\n\nArgs:\ndata: the data as a dictionary (required to be a dictionary)\nkey: the key (as dot notation) into the data that gives the field (IP.src)\n\nReturns:\nthe value of the field(subfield) if it exist, otherwise None", "source": "juraj-google-style"}
{"code": "def get(self, key, default=None):\n    return self._fetch_cmd(b'get', [key], False).get(key, default)", "docstring": "The memcached \"get\" command, but only for one key, as a convenience.\n\nArgs:\nkey: str, see class docs for details.\ndefault: value that will be returned if the key was not found.\n\nReturns:\nThe value for the key, or default if the key wasn't found.", "source": "codesearchnet"}
{"code": "def structure_2_lmpdata(structure, ff_elements=None, atom_style='charge'):\n    s = structure.get_sorted_structure()\n    (a, b, c) = s.lattice.abc\n    m = s.lattice.matrix\n    xhi = a\n    xy = np.dot(m[1], (m[0] / xhi))\n    yhi = np.sqrt(((b ** 2) - (xy ** 2)))\n    xz = np.dot(m[2], (m[0] / xhi))\n    yz = ((np.dot(m[1], m[2]) - (xy * xz)) / yhi)\n    zhi = np.sqrt((((c ** 2) - (xz ** 2)) - (yz ** 2)))\n    box_bounds = [[0.0, xhi], [0.0, yhi], [0.0, zhi]]\n    box_tilt = [xy, xz, yz]\n    box_tilt = (None if (not any(box_tilt)) else box_tilt)\n    box = LammpsBox(box_bounds, box_tilt)\n    new_latt = Lattice([[xhi, 0, 0], [xy, yhi, 0], [xz, yz, zhi]])\n    s.lattice = new_latt\n    symbols = list(s.symbol_set)\n    if ff_elements:\n        symbols.extend(ff_elements)\n    elements = sorted((Element(el) for el in set(symbols)))\n    mass_info = [tuple(([i.symbol] * 2)) for i in elements]\n    ff = ForceField(mass_info)\n    topo = Topology(s)\n    return LammpsData.from_ff_and_topologies(box=box, ff=ff, topologies=[topo], atom_style=atom_style)", "docstring": "Converts a structure to a LammpsData object with no force field\nparameters and topologies.\n\nArgs:\nstructure (Structure): Input structure.\nff_elements ([str]): List of strings of elements that must be\npresent due to force field settings but not necessarily in\nthe structure. Default to None.\natom_style (str): Choose between \"atomic\" (neutral) and\n\"charge\" (charged). Default to \"charge\".\n\nReturns:\nLammpsData", "source": "codesearchnet"}
{"code": "def get_metric(name, constructor, *args, **kwargs):\n    metric = _registered_metrics.get(name)\n    if metric is not None:\n        return metric\n    else:\n        return constructor(name, *args, **kwargs)", "docstring": "Return an existing metric or create a new one for the given name.\n\nArgs:\nname: The name of the metric.\nconstructor: A class to instantiate if a new metric is required.\n*args: Additional positional args to pass to the constructor.\n**kwargs: Keyword args for the constructor.\n\nReturns:\nThe current metric registered to name, or a new one created by\ninvoking constructor(name, *args, **kwargs).", "source": "github-repos"}
{"code": "def _translate_name(name):\n    underscored = inflection.underscore(name)\n    dasherized = inflection.dasherize(underscored)\n    words = dasherized.split('-')\n    last_word = words.pop()\n    words.append(inflection.pluralize(last_word))\n    return '-'.join(words)", "docstring": "Translate the class name to the API endpoint.\n\nFor example, Car would become cars, FastCar would become fast-cars.\n\nArgs:\nname (string): Camel case name (singular)\n\nReturns:\nstring: A pluraised, dasherized string.", "source": "codesearchnet"}
{"code": "def resize(self, container, height, width):\n        \n        params = {'h': height, 'w': width}\n        url = self._url(\"/containers/{0}/resize\", container)\n        res = self._post(url, params=params)\n        self._raise_for_status(res)", "docstring": "Resize the tty session.\n\nArgs:\ncontainer (str or dict): The container to resize\nheight (int): Height of tty session\nwidth (int): Width of tty session\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def GetValueLength(rd, pos):\n    rd = bytearray(rd)\n    key = rd[pos]\n    if (key == LONG_ITEM_ENCODING):\n        if ((pos + 1) < len(rd)):\n            return (3, rd[(pos + 1)])\n        else:\n            raise errors.HidError('Malformed report descriptor')\n    else:\n        code = (key & 3)\n        if (code <= 2):\n            return (1, code)\n        elif (code == 3):\n            return (1, 4)\n    raise errors.HidError('Cannot happen')", "docstring": "Get value length for a key in rd.\n\nFor a key at position pos in the Report Descriptor rd, return the length\nof the associated value.  This supports both short and long format\nvalues.\n\nArgs:\nrd: Report Descriptor\npos: The position of the key in rd.\n\nReturns:\n(key_size, data_len) where key_size is the number of bytes occupied by\nthe key and data_len is the length of the value associated by the key.", "source": "codesearchnet"}
{"code": "def on_message(self, message):\n        \n        \n        try:\n            self.log.debug(\"Got message %s\", message)\n            d = json_decode(message)\n            response = deserialize_object(d, Response)\n            if isinstance(response, (Return, Error)):\n                request = self._request_lookup.pop(response.id)\n                if isinstance(response, Error):\n                    \n                    response.message = ResponseError(response.message)\n            else:\n                request = self._request_lookup[response.id]\n            \n            cothread.Callback(request.callback, response)\n        except Exception:\n            \n            \n            self.log.exception(\"on_message(%r) failed\", message)", "docstring": "Pass response from server to process receive queue\n\nArgs:\nmessage(str): Received message", "source": "juraj-google-style"}
{"code": "def __init__(self,\n               initializer=None,\n               age=None,\n               base=\"aff4:/flows\",\n               queue=DEFAULT_FLOW_QUEUE,\n               flow_name=None):\n    \n    if initializer is None:\n      \n      if flow_name is None:\n        flow_name = random.UInt32()\n\n      if isinstance(flow_name, int):\n        initializer = RDFURN(base).Add(\"%s:%X\" % (queue.Basename(), flow_name))\n      else:\n        initializer = RDFURN(base).Add(\"%s:%s\" % (queue.Basename(), flow_name))\n    else:\n      if isinstance(initializer, RDFURN):\n        try:\n          self.ValidateID(initializer.Basename())\n        except ValueError as e:\n          raise InitializeError(\n              \"Invalid URN for SessionID: %s, %s\" % (initializer, e))\n\n    super(SessionID, self).__init__(initializer=initializer, age=age)", "docstring": "Constructor.\n\nArgs:\ninitializer: A string or another RDFURN.\nage: The age of this entry.\nbase: The base namespace this session id lives in.\nqueue: The queue to use.\nflow_name: The name of this flow or its random id.\n\nRaises:\nInitializeError: The given URN cannot be converted to a SessionID.", "source": "juraj-google-style"}
{"code": "def NewFromJSON(data):\n        \n        return Comment(\n            body=data.get('body', None),\n            posted_at=data.get('posted_at', None),\n            user=User.NewFromJSON(data.get('user', None))\n        )", "docstring": "Create a new Comment instance from a JSON dict.\n\nArgs:\ndata (dict): JSON dictionary representing a Comment.\n\nReturns:\nA Comment instance.", "source": "juraj-google-style"}
{"code": "def from_coffeescript(cls, func, v_func, args={}):\n    compiled = nodejs_compile(func, lang='coffeescript', file='???')\n    if ('error' in compiled):\n        raise CompilationError(compiled.error)\n    v_compiled = nodejs_compile(v_func, lang='coffeescript', file='???')\n    if ('error' in v_compiled):\n        raise CompilationError(v_compiled.error)\n    return cls(func=compiled.code, v_func=v_compiled.code, args=args)", "docstring": "Create a ``CustomJSTransform`` instance from a pair of CoffeeScript\nsnippets. The function bodies are translated to JavaScript functions\nusing node and therefore require return statements.\n\nThe ``func`` snippet namespace will contain the variable ``x`` (the\nuntransformed value) at render time. The ``v_func`` snippet namespace\nwill contain the variable ``xs`` (the untransformed vector) at render\ntime.\n\nExample:\n\n.. code-block:: coffeescript\n\nfunc = \"return Math.cos(x)\"\nv_func = \"return [Math.cos(x) for x in xs]\"\n\ntransform = CustomJSTransform.from_coffeescript(func, v_func)\n\nArgs:\nfunc (str) : a coffeescript snippet to transform a single ``x`` value\n\nv_func (str) : a coffeescript snippet function to transform a vector ``xs``\n\nReturns:\nCustomJSTransform", "source": "codesearchnet"}
{"code": "def GenerateDateTripsDeparturesList(self, date_start, date_end):\n    \n\n    service_id_to_trips = defaultdict(lambda: 0)\n    service_id_to_departures = defaultdict(lambda: 0)\n    for trip in self.GetTripList():\n      headway_start_times = trip.GetFrequencyStartTimes()\n      if headway_start_times:\n        trip_runs = len(headway_start_times)\n      else:\n        trip_runs = 1\n\n      service_id_to_trips[trip.service_id] += trip_runs\n      service_id_to_departures[trip.service_id] += (\n          (trip.GetCountStopTimes() - 1) * trip_runs)\n\n    date_services = self.GetServicePeriodsActiveEachDate(date_start, date_end)\n    date_trips = []\n\n    for date, services in date_services:\n      day_trips = sum(service_id_to_trips[s.service_id] for s in services)\n      day_departures = sum(\n          service_id_to_departures[s.service_id] for s in services)\n      date_trips.append((date, day_trips, day_departures))\n    return date_trips", "docstring": "Return a list of (date object, number of trips, number of departures).\n\nThe list is generated for dates in the range [date_start, date_end).\n\nArgs:\ndate_start: The first date in the list, a date object\ndate_end: The first date after the list, a date object\n\nReturns:\na list of (date object, number of trips, number of departures) tuples", "source": "juraj-google-style"}
{"code": "def __init__(self, config=None):\n        \n        self.driver = get_database_instance(config)\n        self.logger = logging.getLogger('Plugin')\n        logging.basicConfig(level=logging.INFO)", "docstring": "Initialize a :class:`~.Plugin` instance and connect to MongoDB.\nArgs:\n*nodes (str): One or more URLs of MongoDB nodes to\nconnect to as the persistence layer", "source": "juraj-google-style"}
{"code": "def state_range_type(self) -> Sequence[str]:\n    fluents = self.domain.state_fluents\n    ordering = self.domain.state_fluent_ordering\n    return self._fluent_range_type(fluents, ordering)", "docstring": "The range type of each state fluent in canonical order.\n\nReturns:\nSequence[str]: A tuple of range types representing\nthe range of each fluent.", "source": "codesearchnet"}
{"code": "def _percentile(self, values, percent, key=lambda x: x):\n        \n        vals = sorted(values)\n        k = (len(vals) - 1) * (percent / 100)\n        f = math.floor(k)\n        c = math.ceil(k)\n        if f == c:\n            return key(vals[int(k)])\n        d0 = key(vals[int(f)]) * (c - k)\n        d1 = key(vals[int(c)]) * (k - f)\n        return d0 + d1", "docstring": "Find the percentile of a list of values.\n\nArgs:\nvalues: A list of values for which percentiles are desired\npercent: A float value from 0 to 100 representing the requested percentile.\nkey: optional key function to compute value from each element of N.\n\nReturn:\nThe percentile of the values", "source": "juraj-google-style"}
{"code": "def load_function_def_library(library, saved_object_graph=None, load_shared_name_suffix=None, wrapper_function=None):\n    library_function_names = set((fdef.signature.name for fdef in library.function))\n    functions = {}\n    renamed_functions = {}\n    if ops.executing_eagerly_outside_functions():\n        graph = ops.Graph()\n    else:\n        graph = ops.get_default_graph()\n    if load_shared_name_suffix is None:\n        load_shared_name_suffix = '_load_{}'.format(ops.uid())\n    library_gradient_names = {}\n    new_gradient_op_types = {}\n    gradients_to_register = {}\n    for gdef in library.registered_gradients:\n        if gdef.registered_op_type:\n            new_op_type = custom_gradient.generate_name()\n            old_op_type = compat.as_bytes(gdef.registered_op_type)\n            library_gradient_names[old_op_type] = gdef.gradient_func\n            new_gradient_op_types[old_op_type] = new_op_type\n            gradients_to_register[gdef.gradient_func] = new_op_type\n    function_deps = {}\n    for fdef in library.function:\n        function_deps[fdef.signature.name] = _list_function_deps(fdef, library_function_names, library_gradient_names)\n    loaded_gradients = {}\n    for fdef in _sort_function_defs(library, function_deps):\n        orig_name = _fix_fdef_in_place(fdef, functions, load_shared_name_suffix, new_gradient_op_types)\n        structured_input_signature = None\n        structured_outputs = None\n        if saved_object_graph is not None and orig_name in saved_object_graph.concrete_functions:\n            proto = saved_object_graph.concrete_functions[orig_name]\n            structured_input_signature = nested_structure_coder.decode_proto(proto.canonicalized_input_signature)\n            structured_outputs = nested_structure_coder.decode_proto(proto.output_signature)\n        with graph.as_default():\n            func_graph = function_def_lib.function_def_to_graph(fdef, structured_input_signature=structured_input_signature, structured_outputs=structured_outputs)\n        _restore_gradient_functions(func_graph, renamed_functions, loaded_gradients)\n        for dep in function_deps[orig_name]:\n            functions[dep].add_to_graph(func_graph)\n        if '_input_shapes' in fdef.attr:\n            del fdef.attr['_input_shapes']\n        function_type = function_type_lib.from_structured_signature(func_graph.structured_input_signature, func_graph.structured_outputs, func_graph.function_captures.capture_types)\n        func = function_lib.ConcreteFunction.from_func_graph(func_graph, function_type, attrs=fdef.attr)\n        if wrapper_function:\n            func = wrapper_function(func)\n        func.add_to_graph(graph)\n        functions[orig_name] = func\n        renamed_functions[func.name] = func\n        if any((op.type == 'TRTEngineOp' for op in func_graph.get_operations())):\n            func.add_to_graph(ops.get_default_graph())\n        if orig_name in gradients_to_register:\n            gradient_op_type = gradients_to_register[orig_name]\n            loaded_gradients[compat.as_bytes(gradient_op_type)] = func\n            ops.RegisterGradient(gradient_op_type)(_gen_gradient_func(func))\n    return functions", "docstring": "Load a set of functions as concrete functions without captured inputs.\n\nFunctions names are manipulated during load such that they do not overlap\nwith previously created ones.\n\nGradients are re-registered under new names. Ops that reference the gradients\nare updated to reflect the new registered names.\n\nArgs:\nlibrary: FunctionDefLibrary proto message.\nsaved_object_graph: SavedObjectGraph proto message. If not passed in,\nconcrete function structured signatures and outputs will not be set.\nload_shared_name_suffix: If specified, used to uniquify shared names.\nOtherwise, a unique name is generated.\nwrapper_function: An object that will be wrapped on newly created functions.\n\nReturns:\nMap of original function names in the library to instances of\n`ConcreteFunction` without captured inputs.\n\nRaises:\nValueError: if functions dependencies have a cycle.", "source": "github-repos"}
{"code": "def rename(self, source_file_names, destination_file_names):\n    if not len(source_file_names) == len(destination_file_names):\n        message = 'Unable to rename unequal number of sources and destinations.'\n        raise BeamIOError(message)\n    src_dest_pairs = list(zip(source_file_names, destination_file_names))\n    results = self._blobstorageIO().rename_files(src_dest_pairs)\n    exceptions = {(src, dest): error for src, dest, error in results if error is not None}\n    if exceptions:\n        raise BeamIOError('Rename operation failed.', exceptions)", "docstring": "Rename the files at the source list to the destination list.\nSource and destination lists should be of the same size.\n\nArgs:\nsource_file_names: List of file paths that need to be moved\ndestination_file_names: List of destination_file_names for the files\n\nRaises:\n``BeamIOError``: if any of the rename operations fail", "source": "github-repos"}
{"code": "def get_by_uri(self, uri):\n        \n        self._helper.validate_resource_uri(uri)\n        data = self._helper.do_get(uri)\n\n        if data:\n            new_resource = self.new(self._connection, data)\n        else:\n            new_resource = None\n\n        return new_resource", "docstring": "Retrieves a resource by its URI\n\nArgs:\nuri: URI of the resource\n\nReturns:\nResource object", "source": "juraj-google-style"}
{"code": "def get_provider_fn_decorations(provider_fn, default_arg_names):\n    \n    if hasattr(provider_fn, _IS_WRAPPER_ATTR):\n        provider_decorations = getattr(provider_fn, _PROVIDER_DECORATIONS_ATTR)\n        if provider_decorations:\n            expanded_provider_decorations = []\n            for provider_decoration in provider_decorations:\n                \n                \n                if provider_decoration.in_scope_id is None:\n                    provider_decoration.in_scope_id = scoping.DEFAULT_SCOPE\n                if provider_decoration.arg_name is not None:\n                    expanded_provider_decorations.append(provider_decoration)\n                else:\n                    expanded_provider_decorations.extend(\n                        [ProviderDecoration(default_arg_name,\n                                            provider_decoration.annotated_with,\n                                            provider_decoration.in_scope_id)\n                         for default_arg_name in default_arg_names])\n            return expanded_provider_decorations\n    return [ProviderDecoration(default_arg_name,\n                               annotated_with=None,\n                               in_scope_id=scoping.DEFAULT_SCOPE)\n            for default_arg_name in default_arg_names]", "docstring": "Retrieves the provider method-relevant info set by decorators.\n\nIf any info wasn't set by decorators, then defaults are returned.\n\nArgs:\nprovider_fn: a (possibly decorated) provider function\ndefault_arg_names: the (possibly empty) arg names to use if none were\nspecified via @provides()\nReturns:\na sequence of ProviderDecoration", "source": "juraj-google-style"}
{"code": "def shape(self):\n    raise NotImplementedError", "docstring": "The `TensorShape` of this variable.\n\nReturns:\nA `TensorShape`.", "source": "github-repos"}
{"code": "def expect_no_raises(message=None, extras=None):\n    try:\n        yield\n    except Exception as e:\n        e_record = records.ExceptionRecord(e)\n        if extras:\n            e_record.extras = extras\n        msg = message or 'Got an unexpected exception'\n        details = '%s: %s' % (msg, e_record.details)\n        logging.exception(details)\n        e_record.details = details\n        recorder.add_error(e_record)", "docstring": "Expects no exception is raised in a context.\n\nIf the expectation is not met, the test is marked as fail after its\nexecution finishes.\n\nA default message is added to the exception `details`.\n\nArgs:\nmessage: string, custom message to add to exception's `details`.\nextras: An optional field for extra information to be included in test\nresult.", "source": "github-repos"}
{"code": "def owner_set(self):\n    owners = set()\n    if self.has_attr() or self.has_subscript():\n        owners.add(self.parent)\n        owners.update(self.parent.owner_set)\n    return owners", "docstring": "Returns all the symbols (simple or composite) that own this QN.\n\nIn other words, if this symbol was modified, the symbols in the owner set\nmay also be affected.\n\nExamples:\n'a.b[c.d]' has two owners, 'a' and 'a.b'", "source": "github-repos"}
{"code": "def _ImportPythonModule(module_name):\n    try:\n        module_object = list(map(__import__, [module_name]))[0]\n    except ImportError:\n        return None\n    if ('.' in module_name):\n        for submodule_name in module_name.split('.')[1:]:\n            module_object = getattr(module_object, submodule_name, None)\n    return module_object", "docstring": "Imports a Python module.\n\nArgs:\nmodule_name (str): name of the module.\n\nReturns:\nmodule: Python module or None if the module cannot be imported.", "source": "codesearchnet"}
{"code": "def get_layer(self, name=None, index=None):\n    if index is not None and name is not None:\n        raise ValueError('Provide only a layer name or a layer index.')\n    if index is not None:\n        if len(self.layers) <= index:\n            raise ValueError('Was asked to retrieve layer at index ' + str(index) + ' but model only has ' + str(len(self.layers)) + ' layers.')\n        else:\n            return self.layers[index]\n    if name is not None:\n        for layer in self.layers:\n            if layer.name == name:\n                return layer\n        raise ValueError('No such layer: ' + name + '.')\n    raise ValueError('Provide either a layer name or layer index.')", "docstring": "Retrieves a layer based on either its name (unique) or index.\n\nIf `name` and `index` are both provided, `index` will take precedence.\nIndices are based on order of horizontal graph traversal (bottom-up).\n\nArgs:\nname: String, name of layer.\nindex: Integer, index of layer.\n\nReturns:\nA layer instance.\n\nRaises:\nValueError: In case of invalid layer name or index.", "source": "github-repos"}
{"code": "async def _grab_connection(self, url):\n    (scheme, host, _, _, _, _) = urlparse(url)\n    host_loc = urlunparse((scheme, host, '', '', '', ''))\n    sock = self._checkout_connection(host_loc)\n    if (sock is None):\n        sock = (await self._make_connection(host_loc))\n    return sock", "docstring": "The connection pool handler. Returns a connection\nto the caller. If there are no connections ready, and\nas many connections checked out as there are available total,\nwe yield control to the event loop.\n\nIf there is a connection ready or space to create a new one, we\npop/create it, register it as checked out, and return it.\n\nArgs:\nurl (str): breaks the url down and uses the top level location\ninfo to see if we have any connections to the location already\nlying around.", "source": "codesearchnet"}
{"code": "def execute(self, asm_instr):\n        \n        \n        self.ir_emulator.registers[self.ip] = asm_instr.address + asm_instr.size\n\n        \n        if self.arch_info.instr_is_syscall(asm_instr):\n            raise Syscall()\n\n        \n        return self.__execute(asm_instr)", "docstring": "Execute an assembler instruction.\n\nArgs:\nasm_instr (X86Instruction): A instruction to execute.\n\nReturns:\nA int. The address of the next instruction to execute.", "source": "juraj-google-style"}
{"code": "def get_device_locations(mesh: layout_lib.Mesh, client_id: Optional[int]=None) -> List[Dict[str, int]]:\n    if mesh.device_type() != _TPU_DEVICE_TYPE:\n        raise ValueError('The mesh must be a TPU mesh')\n    if client_id is None or client_id == config.client_id():\n        return mesh.local_device_locations()\n    raise NotImplementedError(\"Looking up other clients' device locations is not supported\")", "docstring": "Returns the device locations of all TPU cores local to the given client.\n\nA device location is a dictionary from dimension names to indices on those\ndimensions. For example, for a 2x2 mesh ('x', 'y'), this function returns a\npermutation of this list:\n\n[{'x': 0, 'y': 0},\n{'x': 0, 'y': 1},\n{'x': 1, 'y': 0},\n{'x': 1, 'y': 1}].\n\nNote that device IDs and device locations are equivalent. The former is a\nlinearization of the latter along mesh dimensions.\n\nArgs:\nmesh: A TPU mesh.\nclient_id: Optional; A DTensor client ID. If empty, query this client.", "source": "github-repos"}
{"code": "def _get_typed_list_value(self, key, target_type, type_convert, is_optional=False, is_secret=False, is_local=False, default=None, options=None):\n    value = self._get_typed_value(key=key, target_type=list, type_convert=json.loads, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options)\n    if (not value):\n        return default\n    raise_type = ('dict' if (target_type == Mapping) else target_type)\n    if (not isinstance(value, list)):\n        raise RheaError('Cannot convert value `{}` (key: `{}`) to `{}`'.format(value, key, raise_type))\n    result = []\n    for v in value:\n        if isinstance(v, six.string_types):\n            try:\n                result.append(type_convert(v))\n            except ValueError:\n                raise RheaError('Cannot convert value `{}` (found in list key: `{}`) to `{}`'.format(v, key, raise_type))\n        elif isinstance(v, target_type):\n            result.append(v)\n        else:\n            raise RheaError('Cannot convert value `{}` (found in list key: `{}`) to `{}`'.format(v, key, raise_type))\n    return result", "docstring": "Return the value corresponding to the key converted first to list\nthan each element to the given type.\n\nArgs:\nkey: the dict key.\ntarget_type: The type we expect the variable or key to be in.\ntype_convert: A lambda expression that converts the key to the desired type.\nis_optional: To raise an error if key was not found.\nis_secret: If the key is a secret.\nis_local: If the key is a local to this service.\ndefault: default value if is_optional is True.\noptions: list/tuple if provided, the value must be one of these values.", "source": "codesearchnet"}
{"code": "def nCr(n, r):\n    \n    f = math.factorial\n    return int(f(n) / f(r) / f(n-r))", "docstring": "Calculates nCr.\n\nArgs:\nn (int): total number of items.\nr (int): items to choose\n\nReturns:\nnCr.", "source": "juraj-google-style"}
{"code": "def prepare_adiabatic_limit(slh, k=None):\n    \n    if k is None:\n        k = symbols('k', positive=True)\n    Ld = slh.L.dag()\n    LdL = (Ld * slh.L)[0, 0]\n    K = (-LdL / 2 + I * slh.H).expand().simplify_scalar()\n    N = slh.S.dag()\n    B, A, Y = K.series_expand(k, 0, 2)\n    G, F = Ld.series_expand(k, 0, 1)\n\n    return Y, A, B, F, G, N", "docstring": "Prepare the adiabatic elimination on an SLH object\n\nArgs:\nslh: The SLH object to take the limit for\nk: The scaling parameter $k \\rightarrow \\infty$. The default is a\npositive symbol 'k'\n\nReturns:\ntuple: The objects ``Y, A, B, F, G, N``\nnecessary to compute the limiting system.", "source": "juraj-google-style"}
{"code": "def assert_stmt(expression1, expression2):\n    if not callable(expression2):\n        raise ValueError('{} must be a callable'.format(expression2))\n    args, _, keywords, _ = tf_inspect.getargspec(expression2)\n    if args or keywords:\n        raise ValueError('{} may not have any arguments'.format(expression2))\n    if tensor_util.is_tf_type(expression1):\n        return _tf_assert_stmt(expression1, expression2)\n    else:\n        return _py_assert_stmt(expression1, expression2)", "docstring": "Functional form of an assert statement.\n\nThis follows the semantics of the Python assert statement, however the\nconcrete implementations may deviate from it. See the respective\nimplementation for details.\n\nIn general, the assert statement should not be used for control flow.\nFurthermore, it is encouraged that the assertion expressions should not have\nside effects.\n\nArgs:\nexpression1: Any\nexpression2: Callable[[], Any], returns the expression to include in the\nerror message when expression1 evaluates to False. When expression1 is\nTrue, the result of expression2 will not be evaluated, however,\nexpression2 itself may be evaluated in some implementations.\n\nReturns:\nAny, implementation-dependent.\n\nRaises:\nValueError: if any arguments are illegal.", "source": "github-repos"}
{"code": "def find_library_linux(cls):\n        \n        dll = Library.JLINK_SDK_NAME\n        root = os.path.join('/', 'opt', 'SEGGER')\n\n        for (directory_name, subdirs, files) in os.walk(root):\n            fnames = []\n            x86_found = False\n            for f in files:\n                path = os.path.join(directory_name, f)\n                if os.path.isfile(path) and f.startswith(dll):\n                    fnames.append(f)\n                    if '_x86' in path:\n                        x86_found = True\n\n            for fname in fnames:\n                fpath = os.path.join(directory_name, fname)\n                if util.is_os_64bit():\n                    if '_x86' not in fname:\n                        yield fpath\n                elif x86_found:\n                    if '_x86' in fname:\n                        yield fpath\n                else:\n                    yield fpath", "docstring": "Loads the SEGGER DLL from the root directory.\n\nOn Linux, the SEGGER tools are installed under the ``/opt/SEGGER``\ndirectory with versioned directories having the suffix ``_VERSION``.\n\nArgs:\ncls (Library): the ``Library`` class\n\nReturns:\nThe paths to the J-Link library files in the order that they are\nfound.", "source": "juraj-google-style"}
{"code": "def generate_nb_state_data(means, weights, R):\n    \n    cells = weights.shape[1]\n    \n    x_true = np.dot(means, weights)\n    \n    R_ = np.tile(R, (cells, 1)).T\n    P_true = x_true/(R_ + x_true)\n    sample = np.random.negative_binomial(np.tile(R, (cells, 1)).T, P_true)\n    return sample.astype(float)", "docstring": "Generates data according to the Negative Binomial Convex Mixture Model.\n\nArgs:\nmeans (array): Cell types- genes x clusters\nweights (array): Cell cluster assignments- clusters x cells\nR (array): dispersion parameter - 1 x genes\n\nReturns:\ndata matrix - genes x cells", "source": "juraj-google-style"}
{"code": "def validate_arg_values(ast, bo):\n    \n\n    if not bo.api_url:\n        log.info(\"No API endpoint defined\")\n        return bo\n\n    log.debug(f\"AST: {ast}\")\n\n    \n    if isinstance(ast, NSArg):\n        term_id = \"{}:{}\".format(ast.namespace, ast.value)\n        value_types = ast.value_types\n        log.debug(f\"Value types: {value_types}  AST value: {ast.value}\")\n        \n        if ast.namespace == \"DEFAULT\":  \n            for value_type in value_types:\n                default_namespace = [\n                    ns[\"name\"] for ns in bo.spec[\"namespaces\"][value_type][\"info\"]\n                ] + [\n                    ns[\"abbreviation\"]\n                    for ns in bo.spec[\"namespaces\"][value_type][\"info\"]\n                ]\n\n                if ast.value in default_namespace:\n                    log.debug(\"Default namespace valid term: {}\".format(term_id))\n                    break\n            else:  \n                log.debug(\"Default namespace invalid term: {}\".format(term_id))\n                bo.validation_messages.append(\n                    (\"WARNING\", f\"Default Term: {term_id} not found\")\n                )\n\n        \n        else:\n            request_url = bo.api_url + \"/terms/{}\".format(\n                url_path_param_quoting(term_id)\n            )\n            log.info(f\"Validate Arg Values url {request_url}\")\n            r = get_url(request_url)\n            if r and r.status_code == 200:\n                result = r.json()\n                \n\n                log.debug(\n                    f'AST.value_types  {ast.value_types}  Entity types {result.get(\"entity_types\", [])}'\n                )\n\n                \n                if (\n                    len(\n                        set(ast.value_types).intersection(\n                            result.get(\"entity_types\", [])\n                        )\n                    )\n                    == 0\n                ):\n                    log.debug(\n                        \"Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}\".format(\n                            term_id, ast.value_types, result.get(\"entity_types\", [])\n                        )\n                    )\n                    bo.validation_messages.append(\n                        (\n                            \"WARNING\",\n                            \"Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}\".format(\n                                term_id, ast.value_types, result.get(\"entity_types\", [])\n                            ),\n                        )\n                    )\n\n                if term_id in result.get(\"obsolete_ids\", []):\n                    bo.validation_messages.append(\n                        (\n                            \"WARNING\",\n                            f'Obsolete term: {term_id}  Current term: {result[\"id\"]}',\n                        )\n                    )\n\n            elif r.status_code == 404:\n                bo.validation_messages.append(\n                    (\"WARNING\", f\"Term: {term_id} not found in namespace\")\n                )\n            else:\n                log.error(f\"Status {r.status_code} - Bad URL: {request_url}\")\n\n    \n    if isinstance(ast, StrArg):\n        log.debug(f\"  Check String Arg: {ast.value}  {ast.value_types}\")\n        for value_type in ast.value_types:\n            \n            if re.match(\"/\", value_type):\n                value_type = re.sub(\"^/\", \"\", value_type)\n                value_type = re.sub(\"/$\", \"\", value_type)\n                match = re.match(value_type, ast.value)\n                if match:\n                    break\n            if value_type in bo.spec[\"namespaces\"]:\n                default_namespace = [\n                    ns[\"name\"] for ns in bo.spec[\"namespaces\"][value_type][\"info\"]\n                ] + [\n                    ns[\"abbreviation\"]\n                    for ns in bo.spec[\"namespaces\"][value_type][\"info\"]\n                ]\n                if ast.value in default_namespace:\n                    break\n        else:  \n            bo.validation_messages.append(\n                (\n                    \"WARNING\",\n                    f\"String value {ast.value} does not match default namespace value or regex pattern: {ast.value_types}\",\n                )\n            )\n\n    \n    if hasattr(ast, \"args\"):\n        for arg in ast.args:\n            validate_arg_values(arg, bo)\n\n    return bo", "docstring": "Recursively validate arg (NSArg and StrArg) values\n\nCheck that NSArgs are found in BELbio API and match appropriate entity_type.\nCheck that StrArgs match their value - either default namespace or regex string\n\nGenerate a WARNING if not.\n\nArgs:\nbo: bel object\n\nReturns:\nbel object", "source": "juraj-google-style"}
{"code": "def sspro8_summary(self):\n    summary = {}\n    records = ssbio.protein.sequence.utils.fasta.load_fasta_file(self.out_sspro8)\n    for r in records:\n        seq_summary = {}\n        seq_summary['percent_H-sspro8'] = (r.seq.count('H') / float(len(r)))\n        seq_summary['percent_G-sspro8'] = (r.seq.count('G') / float(len(r)))\n        seq_summary['percent_I-sspro8'] = (r.seq.count('I') / float(len(r)))\n        seq_summary['percent_E-sspro8'] = (r.seq.count('E') / float(len(r)))\n        seq_summary['percent_B-sspro8'] = (r.seq.count('B') / float(len(r)))\n        seq_summary['percent_T-sspro8'] = (r.seq.count('T') / float(len(r)))\n        seq_summary['percent_S-sspro8'] = (r.seq.count('S') / float(len(r)))\n        seq_summary['percent_C-sspro8'] = (r.seq.count('C') / float(len(r)))\n        summary[r.id] = seq_summary\n    return summary", "docstring": "Parse the SSpro8 output file and return a summary of secondary structure composition.\n\nThe output file is just a FASTA formatted file, so you can get residue level\ninformation by parsing it like a normal sequence file.\n\nReturns:\ndict: Percentage of:\nH: alpha-helix\nG: 310-helix\nI: pi-helix (extremely rare)\nE: extended strand\nB: beta-bridge\nT: turn\nS: bend\nC: the rest", "source": "codesearchnet"}
{"code": "def _sort_scores_and_boxes(scores, boxes):\n    with ops.name_scope('sort_scores_and_boxes'):\n        sorted_scores_indices = sort_ops.argsort(scores, axis=1, direction='DESCENDING')\n        sorted_scores = array_ops.gather(scores, sorted_scores_indices, axis=1, batch_dims=1)\n        sorted_boxes = array_ops.gather(boxes, sorted_scores_indices, axis=1, batch_dims=1)\n    return (sorted_scores, sorted_boxes, sorted_scores_indices)", "docstring": "Sort boxes based their score from highest to lowest.\n\nArgs:\nscores: a tensor with a shape of [batch_size, num_boxes] representing\nthe scores of boxes.\nboxes: a tensor with a shape of [batch_size, num_boxes, 4] representing\nthe boxes.\nReturns:\nsorted_scores: a tensor with a shape of [batch_size, num_boxes]\nrepresenting the sorted scores.\nsorted_boxes: a tensor representing the sorted boxes.\nsorted_scores_indices: a tensor with a shape of [batch_size, num_boxes]\nrepresenting the index of the scores in a sorted descending order.", "source": "github-repos"}
{"code": "def update(self, **kwargs):\n    kwargs = self._preprocess_params(kwargs)\n    kwargs = self.preprocess_kwargs_before_update(kwargs)\n    for (key, value) in kwargs.iteritems():\n        cls = type(self)\n        if ((not hasattr(cls, key)) or isinstance(getattr(cls, key), property)):\n            continue\n        if (key not in self._no_overwrite_):\n            setattr(self, key, value)\n        if isinstance(getattr(self, key), OrderingList):\n            getattr(self, key).reorder()\n        elif isinstance(getattr(cls, key), AssociationProxyInstance):\n            target_name = getattr(cls, key).target_collection\n            target_rel = getattr(self, target_name)\n            if isinstance(target_rel, OrderingList):\n                target_rel.reorder()\n    try:\n        self.session.commit()\n        return self\n    except Exception as e:\n        self.session.rollback()\n        raise e", "docstring": "Updates an instance.\n\nArgs:\n**kwargs  :  Arbitrary keyword arguments. Column names are\nkeywords and their new values are the values.\n\nExamples:\n\n>>> customer.update(email=\"newemail@x.com\", name=\"new\")", "source": "codesearchnet"}
{"code": "def get_voltage(self, cycle=None, dataset_number=None, full=True):\n        \n\n        dataset_number = self._validate_dataset_number(dataset_number)\n        if dataset_number is None:\n            self._report_empty_dataset()\n            return\n        cycle_index_header = self.headers_normal.cycle_index_txt\n        voltage_header = self.headers_normal.voltage_txt\n        \n\n        test = self.datasets[dataset_number].dfdata\n        if cycle:\n            self.logger.debug(\"getting voltage curve for cycle\")\n            c = test[(test[cycle_index_header] == cycle)]\n            if not self.is_empty(c):\n                v = c[voltage_header]\n                return v\n        else:\n            if not full:\n                self.logger.debug(\n                    \"getting list of voltage-curves for all cycles\"\n                )\n                v = []\n                no_cycles = np.amax(test[cycle_index_header])\n                for j in range(1, no_cycles + 1):\n                    txt = \"Cycle  %i:  \" % j\n                    self.logger.debug(txt)\n                    c = test[(test[cycle_index_header] == j)]\n                    v.append(c[voltage_header])\n            else:\n                self.logger.debug(\"getting frame of all voltage-curves\")\n                v = test[voltage_header]\n            return v", "docstring": "Returns voltage (in V).\n\nArgs:\ncycle: cycle number (all cycles if None)\ndataset_number: first dataset if None\nfull: valid only for cycle=None (i.e. all cycles), returns the full\npandas.Series if True, else a list of pandas.Series\n\nReturns:\npandas.Series (or list of pandas.Series if cycle=None og full=False)", "source": "juraj-google-style"}
{"code": "def library_line(self, file_name):\n    gulplib_set = (lambda : ('GULP_LIB' in os.environ.keys()))\n    readable = (lambda f: (os.path.isfile(f) and os.access(f, os.R_OK)))\n    gin = ''\n    (dirpath, fname) = os.path.split(file_name)\n    if (dirpath and readable(file_name)):\n        gin = ('library ' + file_name)\n    else:\n        fpath = os.path.join(os.getcwd(), file_name)\n        if readable(fpath):\n            gin = ('library ' + fpath)\n        elif gulplib_set():\n            fpath = os.path.join(os.environ['GULP_LIB'], file_name)\n            if readable(fpath):\n                gin = ('library ' + file_name)\n    if gin:\n        return (gin + '\\n')\n    else:\n        raise GulpError('GULP Library not found')", "docstring": "Specifies GULP library file to read species and potential parameters.\nIf using library don't specify species and potential\nin the input file and vice versa. Make sure the elements of\nstructure are in the library file.\n\nArgs:\nfile_name: Name of GULP library file\n\nReturns:\nGULP input string specifying library option", "source": "codesearchnet"}
{"code": "def worker(url_key, property_name, function, function_arguments):\n    error_msg = None\n    try:\n        data = function(*function_arguments)\n    except Exception as e:\n        data = []\n        error_msg = ('Error: ' + traceback.format_exc().strip())\n        error_msg += ('\\n' + str(e.message))\n    if error_msg:\n        logger.error(error_msg)\n        error_msg = None\n    func_name = str(function.__name__)\n    logger.info(('Attempting to save output from `%s`.' % func_name))\n    return _save_to_database(url=url_key, property_name=property_name, data=data)", "docstring": "This function usually runs as process on the background.\n\nIt runs ``function(*function_arguments)`` and then stores them in REST API\nstorage.\n\nWarning:\nThis function puts data into DB, isntead of returning them.\n\nArgs:\nurl_key (str): Key which will be used for database lookup.\nproperty_name (str): Name of the property used to store data.\nfunction (obj): Function used to load the data.\nfunction_arguments (list): List of parameters for function which will\nbe called to retreive data.\nerror_log_path (str): If set, log errors into this file, otherwise\nstderr.", "source": "codesearchnet"}
{"code": "def keywords_special_characters(keywords):\n    \n    invalid_chars = '!\\\"\n    if any(char in invalid_chars for char in keywords):\n        raise ValidationError(MESSAGE_KEYWORD_SPECIAL_CHARS)", "docstring": "Confirms that the keywords don't contain special characters\n\nArgs:\nkeywords (str)\n\nRaises:\ndjango.forms.ValidationError", "source": "juraj-google-style"}
{"code": "def _TerminateProcessByPid(self, pid):\n    self._RaiseIfNotRegistered(pid)\n    process = self._processes_per_pid[pid]\n    self._TerminateProcess(process)\n    self._StopMonitoringProcess(process)", "docstring": "Terminate a process that's monitored by the engine.\n\nArgs:\npid (int): process identifier (PID).\n\nRaises:\nKeyError: if the process is not registered with and monitored by the\nengine.", "source": "codesearchnet"}
{"code": "def group_pairs(blocks, layout_blocks_list):\n    \n\n    image_dict={}\n    for block_id in layout_blocks_list:\n        image_seq=blocks[block_id].ec_hdr.image_seq\n        if image_seq not in image_dict:\n            image_dict[image_seq]=[block_id]\n        else:\n            image_dict[image_seq].append(block_id)\n\n    log(group_pairs, 'Layout blocks found at PEBs: %s' % list(image_dict.values()))\n\n    return list(image_dict.values())", "docstring": "Sort a list of layout blocks into pairs\n\nArguments:\nList:blocks        -- List of block objects\nList:layout_blocks -- List of layout block indexes\n\nReturns:\nList -- Layout block pair indexes grouped in a list", "source": "juraj-google-style"}
{"code": "def scheduler(self, sleep_time=0.2):\n    while self.listening:\n        if self.scheduled_calls:\n            timestamp = time.time()\n            self.scheduled_calls[:] = [item for item in self.scheduled_calls if (not self.time_reached(timestamp, item))]\n        time.sleep(sleep_time)\n    logger.info('Shutting down the call scheduler...')", "docstring": "Starts the scheduler to check for scheduled calls and execute them\nat the correct time.\n\nArgs:\nsleep_time (float): The amount of time to wait in seconds between\neach loop iteration. This prevents the scheduler from consuming\n100% of the host's CPU. Defaults to 0.2 seconds.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def get_values(self, field_name: str) -> List[object]:\n    result = list()\n    if self.validate_field(field_name):\n        for value_key in self._kg.get(field_name):\n            result.append(value_key['value'])\n    return result", "docstring": "Get a list of all the values of a field.\n\nArgs:\nfield_name:\n\nReturns: the list of values (not the keys)", "source": "codesearchnet"}
{"code": "def split_field_path(path):\n    \n    if not path:\n        return []\n\n    elements = []\n    want_dot = False\n\n    for element in _tokenize_field_path(path):\n        if want_dot:\n            if element != \".\":\n                raise ValueError(\"Invalid path: {}\".format(path))\n            else:\n                want_dot = False\n        else:\n            if element == \".\":\n                raise ValueError(\"Invalid path: {}\".format(path))\n            elements.append(element)\n            want_dot = True\n\n    if not want_dot or not elements:\n        raise ValueError(\"Invalid path: {}\".format(path))\n\n    return elements", "docstring": "Split a field path into valid elements (without dots).\n\nArgs:\npath (str): field path to be lexed.\nReturns:\nList(str): tokens\nRaises:\nValueError: if the path does not match the elements-interspersed-\nwith-dots pattern.", "source": "juraj-google-style"}
{"code": "def predict_task(self, X, t=0, break_ties='random', **kwargs):\n    Y_tp = self.predict_task_proba(X, t=t, **kwargs)\n    Y_tph = self._break_ties(Y_tp, break_ties)\n    return Y_tph", "docstring": "Predicts int labels for an input X on task t\n\nArgs:\nX: The input for the predict_task_proba method\nt: The task index to predict\nReturns:\nAn n-dim tensor of int predictions for the specified task", "source": "codesearchnet"}
{"code": "def export_artifacts(self, processed_artifacts, sketch_id):\n    for (timeline_name, artifact_path) in processed_artifacts:\n        print('Uploading {0:s} to timeline {1:s}'.format(artifact_path, timeline_name))\n        new_timeline_id = self.upload_timeline(timeline_name, artifact_path)\n        self.add_timeline_to_sketch(sketch_id, new_timeline_id)\n    return sketch_id", "docstring": "Upload provided artifacts to specified, or new if non-existent, sketch.\n\nArgs:\nprocessed_artifacts:  List of (timeline_name, artifact_path) tuples\nsketch_id: ID of sketch to append the timeline to\n\nReturns:\nint: ID of sketch.", "source": "codesearchnet"}
{"code": "async def skip(self, query='1'):\n    if (not (self.state == 'ready')):\n        logger.debug(\"Trying to skip from wrong state '{}'\".format(self.state))\n        return\n    if (query == ''):\n        query = '1'\n    elif (query == 'all'):\n        query = str((len(self.queue) + 1))\n    try:\n        num = int(query)\n    except TypeError:\n        self.statuslog.error('Skip argument must be a number')\n    except ValueError:\n        self.statuslog.error('Skip argument must be a number')\n    else:\n        self.statuslog.info('Skipping')\n        for i in range((num - 1)):\n            if (len(self.queue) > 0):\n                self.prev_queue.append(self.queue.pop(0))\n        try:\n            self.streamer.stop()\n        except Exception as e:\n            logger.exception(e)", "docstring": "The skip command\n\nArgs:\nquery (str): The number of items to skip", "source": "codesearchnet"}
{"code": "def __init__(self, win_registry):\n    \n    if not win_registry:\n      raise ValueError('Missing Windows Registry value.')\n\n    super(WinRegistrySearcher, self).__init__()\n    self._win_registry = win_registry", "docstring": "Initializes a Windows Registry searcher.\n\nArgs:\nwin_registry (WinRegistry): Windows Registry.\n\nRaises:\nValueError: when Windows Registry is not set.", "source": "juraj-google-style"}
{"code": "def update_plot_limits(ax, white_space):\n    \n\n    if hasattr(ax, 'zz_dataLim'):\n        bounds = ax.xy_dataLim.bounds\n        ax.set_xlim(bounds[0] - white_space, bounds[0] + bounds[2] + white_space)\n        ax.set_ylim(bounds[1] - white_space, bounds[1] + bounds[3] + white_space)\n\n        bounds = ax.zz_dataLim.bounds\n        ax.set_zlim(bounds[0] - white_space, bounds[0] + bounds[2] + white_space)\n    else:\n        bounds = ax.dataLim.bounds\n        assert not any(map(np.isinf, bounds)), 'Cannot set bounds if dataLim has infinite elements'\n        ax.set_xlim(bounds[0] - white_space, bounds[0] + bounds[2] + white_space)\n        ax.set_ylim(bounds[1] - white_space, bounds[1] + bounds[3] + white_space)", "docstring": "Sets the limit options of a matplotlib plot.\n\nArgs:\nax: matplotlib axes\nwhite_space(float): whitespace added to surround the tight limit of the data\n\nNote: This relies on ax.dataLim (in 2d) and ax.[xy, zz]_dataLim being set in 3d", "source": "juraj-google-style"}
{"code": "def order_nodes(nodes: Sequence[_OrderableNode]) -> list[_OrderableNode]:\n    if not nodes:\n        return []\n    root = nodes[0]\n    predecessor_map = compute_predecessors(nodes)\n    dead = {node for node, predecessors in predecessor_map.items() if root not in predecessors}\n    queue = {root: predecessor_map[root]}\n    order = []\n    seen = set()\n    while queue:\n        _, _, node = min(((len(predecessors), node.id, node) for node, predecessors in queue.items()))\n        del queue[node]\n        if node in seen:\n            continue\n        order.append(node)\n        seen.add(node)\n        for _, predecessors in queue.items():\n            predecessors.discard(node)\n        for n in node.outgoing:\n            if n not in queue:\n                queue[n] = predecessor_map[n] - seen\n    assert len(set(order) | dead) == len(set(nodes))\n    return order", "docstring": "Build an ancestors first traversal of CFG nodes.\n\nThis guarantees that at least one predecessor of a block is scheduled before\nthe block itself, and it also tries to schedule as many of them before the\nblock as possible (so e.g. if two branches merge in a node, it prefers to\nprocess both the branches before that node).\n\nArgs:\nnodes: A list of nodes or blocks. They have two attributes: \"id\" (an int to\nenable deterministic sorting) and \"outgoing\" (a list of nodes).\n\nReturns:\nA list of nodes in the proper order.", "source": "github-repos"}
{"code": "def list_vms(access_token, subscription_id, resource_group):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Compute/virtualMachines',\n                        '?api-version=', COMP_API])\n    return do_get(endpoint, access_token)", "docstring": "List VMs in a resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\n\nReturns:\nHTTP response. JSON body of a list of VM model views.", "source": "juraj-google-style"}
{"code": "def play(env, transpose=True, fps=30, nop_=0):\n    assert isinstance(env.observation_space, gym.spaces.box.Box)\n    obs_s = env.observation_space\n    is_bw = (len(obs_s.shape) == 2)\n    is_rgb = ((len(obs_s.shape) == 3) and (obs_s.shape[2] in [1, 3]))\n    assert (is_bw or is_rgb)\n    if hasattr(env, 'get_keys_to_action'):\n        keys_to_action = env.get_keys_to_action()\n    elif hasattr(env.unwrapped, 'get_keys_to_action'):\n        keys_to_action = env.unwrapped.get_keys_to_action()\n    else:\n        raise ValueError('env has no get_keys_to_action method')\n    relevant_keys = set(sum(map(list, keys_to_action.keys()), []))\n    video_size = (env.observation_space.shape[0], env.observation_space.shape[1])\n    if transpose:\n        video_size = tuple(reversed(video_size))\n    pressed_keys = []\n    running = True\n    env_done = True\n    flags = ((pygame.RESIZABLE | pygame.HWSURFACE) | pygame.DOUBLEBUF)\n    screen = pygame.display.set_mode(video_size, flags)\n    pygame.event.set_blocked(pygame.MOUSEMOTION)\n    if (env.spec is not None):\n        pygame.display.set_caption(env.spec.id)\n    else:\n        pygame.display.set_caption('nes-py')\n    clock = pygame.time.Clock()\n    while running:\n        if env_done:\n            env_done = False\n            obs = env.reset()\n        else:\n            action = keys_to_action.get(tuple(sorted(pressed_keys)), nop_)\n            (obs, rew, env_done, info) = env.step(action)\n        if (obs is not None):\n            if (len(obs.shape) == 2):\n                obs = obs[(:, :, None)]\n            if (obs.shape[2] == 1):\n                obs = obs.repeat(3, axis=2)\n            display_arr(screen, obs, video_size, transpose)\n        for event in pygame.event.get():\n            if (event.type == pygame.KEYDOWN):\n                if (event.key in relevant_keys):\n                    pressed_keys.append(event.key)\n                elif (event.key == 27):\n                    running = False\n                elif (event.key == ord('e')):\n                    env.unwrapped._backup()\n                elif (event.key == ord('r')):\n                    env.unwrapped._restore()\n            elif (event.type == pygame.KEYUP):\n                if (event.key in relevant_keys):\n                    pressed_keys.remove(event.key)\n            elif (event.type == pygame.QUIT):\n                running = False\n        pygame.display.flip()\n        clock.tick(fps)\n    pygame.quit()", "docstring": "Play the game using the keyboard as a human.\n\nArgs:\nenv (gym.Env): the environment to use for playing\ntranspose (bool): whether to transpose frame before viewing them\nfps (int): number of steps of the environment to execute every second\nnop_ (any): the object to use as a null op action for the environment\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "class PerceiverAudioPreprocessor(AbstractPreprocessor):\n\n    def __init__(self, config, prep_type: str='patches', samples_per_patch: int=96, position_encoding_type: str='fourier', concat_or_add_pos: str='concat', out_channels=64, project_pos_dim=-1, **position_encoding_kwargs):\n        super().__init__()\n        self.config = config\n        if prep_type not in ('patches',):\n            raise ValueError(f\"Prep_type {prep_type} is invalid, can only be 'patches'.\")\n        if concat_or_add_pos not in ['concat', 'add']:\n            raise ValueError(f\"Concat_or_pos {concat_or_add_pos} is invalid, can only be 'concat' or 'add'.\")\n        self.samples_per_patch = samples_per_patch\n        self.position_encoding_type = position_encoding_type\n        self.concat_or_add_pos = concat_or_add_pos\n        self.project_pos_dim = project_pos_dim\n        self.position_embeddings, self.positions_projection = build_position_encoding(position_encoding_type=position_encoding_type, out_channels=out_channels, project_pos_dim=project_pos_dim, **position_encoding_kwargs)\n\n    @property\n    def num_channels(self) -> int:\n        if self.project_pos_dim > 0:\n            pos_dim = self.project_pos_dim\n        else:\n            pos_dim = self.position_embeddings.output_size()\n        if self.concat_or_add_pos == 'add':\n            return pos_dim\n        return self.samples_per_patch + pos_dim\n\n    def _build_network_inputs(self, inputs):\n        \n        batch_size = inputs.shape[0]\n        index_dims = inputs.shape[1:-1]\n        if self.position_encoding_type == 'trainable':\n            pos_enc = self.position_embeddings(batch_size)\n        elif self.position_encoding_type == 'fourier':\n            pos_enc = self.position_embeddings(index_dims, batch_size, device=inputs.device, dtype=inputs.dtype)\n        pos_enc = self.positions_projection(pos_enc)\n        if self.concat_or_add_pos == 'concat':\n            inputs_with_pos = torch.cat([inputs, pos_enc], dim=-1)\n        elif self.concat_or_add_pos == 'add':\n            inputs_with_pos = inputs + pos_enc\n        return (inputs_with_pos, inputs)\n\n    def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor]=None, network_input_is_1d: bool=True, interpolate_pos_encoding: bool=False):\n        inputs = torch.reshape(inputs, [inputs.shape[0], -1, self.samples_per_patch])\n        inputs, inputs_without_pos = self._build_network_inputs(inputs)\n        modality_sizes = None\n        return (inputs, modality_sizes, inputs_without_pos)", "docstring": "Audio preprocessing for Perceiver Encoder.\n\nArgs:\nconfig ([*PerceiverConfig*]):\nModel configuration.\nprep_type (`str`, *optional*, defaults to `\"patches\"`):\nPreprocessor type to use. Only \"patches\" is supported.\nsamples_per_patch (`int`, *optional*, defaults to 96):\nNumber of samples per patch.\nposition_encoding_type (`str`, *optional*, defaults to `\"fourier\"`):\nType of position encoding to use. Can be \"trainable\" or \"fourier\".\nconcat_or_add_pos (`str`, *optional*, defaults to `\"concat\"`):\nHow to concatenate the position encoding to the input. Can be \"concat\" or \"add\".\nout_channels (`int`, *optional*, defaults to 64):\nNumber of channels in the output.\nproject_pos_dim (`int`, *optional*, defaults to -1):\nDimension of the position encoding to project to. If -1, no projection is applied.\n**position_encoding_kwargs (`Dict`, *optional*):\nKeyword arguments for the position encoding.", "source": "github-repos"}
{"code": "def get_node_sum(self, age=None):\n    if (age is None):\n        age = self.age\n    return (age if (self.comp == 1) else int(((pow(self.comp, (age + 1)) - 1) / (self.comp - 1))))", "docstring": "Get sum of all branches in the tree.\n\nReturns:\nint: The sum of all nodes grown until the age.", "source": "codesearchnet"}
{"code": "def expand(self, *args, **kwargs):\n    if args:\n        if (len(args) == 1):\n            data_dict = args[0]\n            trace = kwargs.get('trace')\n            style = kwargs.get('style')\n        else:\n            raise TypeError(('expand() only takes 1 positional argument (got %s)' % args))\n    else:\n        data_dict = kwargs\n        trace = None\n        style = None\n    tokens = []\n    group = _MakeGroupFromRootSection(self._program, self.undefined_str)\n    if style:\n        style.execute(data_dict, tokens.append, group=group, trace=trace)\n    else:\n        self.execute(data_dict, tokens.append, group=group, trace=trace)\n    return JoinTokens(tokens)", "docstring": "Expands the template with the given data dictionary, returning a string.\n\nThis is a small wrapper around execute(), and is the most convenient\ninterface.\n\nArgs:\ndata_dict: The JSON data dictionary.  Like the builtin dict() constructor,\nit can take a single dictionary as a positional argument, or arbitrary\nkeyword arguments.\ntrace: Trace object for debugging\nstyle: Template instance to be treated as a style for this template (the\n\"outside\")\n\nReturns:\nThe return value could be a str() or unicode() instance, depending on the\nthe type of the template string passed in, and what the types the strings\nin the dictionary are.", "source": "codesearchnet"}
{"code": "def _close_rpc_interface(self, connection_id, callback):\n    try:\n        context = self.connections.get_context(connection_id)\n    except ArgumentError:\n        callback(connection_id, self.id, False, 'Could not find connection information')\n        return\n    self.connections.begin_operation(connection_id, 'close_interface', callback, self.get_config('default_timeout'))\n    try:\n        service = context['services'][TileBusService]\n        header_characteristic = service[ReceiveHeaderChar]\n        payload_characteristic = service[ReceivePayloadChar]\n    except KeyError:\n        self.connections.finish_operation(connection_id, False, \"Can't find characteristics to open rpc interface\")\n        return\n    self.bable.set_notification(enabled=False, connection_handle=context['connection_handle'], characteristic=header_characteristic, on_notification_set=[self._on_interface_closed, context, payload_characteristic], timeout=1.0)", "docstring": "Disable RPC interface for this IOTile device\n\nArgs:\nconnection_id (int): The unique identifier for the connection\ncallback (callback): Callback to be called when this command finishes\ncallback(conn_id, adapter_id, success, failure_reason)", "source": "codesearchnet"}
{"code": "def set_invite_only(self, invite_only):\n    join_rule = ('invite' if invite_only else 'public')\n    try:\n        self.client.api.set_join_rule(self.room_id, join_rule)\n        self.invite_only = invite_only\n        return True\n    except MatrixRequestError:\n        return False", "docstring": "Set how the room can be joined.\n\nArgs:\ninvite_only(bool): If True, users will have to be invited to join\nthe room. If False, anyone who knows the room link can join.\n\nReturns:\nTrue if successful, False if not", "source": "codesearchnet"}
{"code": "def add_data(self, data):\n    if (self.state == self.ErrorState):\n        return\n    self.raw_data += bytearray(data)\n    still_processing = True\n    while still_processing:\n        still_processing = self.process_data()", "docstring": "Add data to our stream, emitting reports as each new one is seen\n\nArgs:\ndata (bytearray): A chunk of new data to add", "source": "codesearchnet"}
{"code": "def occurrence(self, indicator=None):\n    self._request_entity = 'fileOccurrence'\n    self._request_uri = '{}/fileOccurrences'.format(self._request_uri)\n    if (indicator is not None):\n        self._request_uri = '{}/{}/fileOccurrences'.format(self._api_uri, indicator)", "docstring": "Update the URI to retrieve file occurrences for the provided indicator.\n\nArgs:\nindicator (string): The indicator to retrieve file occurrences.", "source": "codesearchnet"}
{"code": "def export(self, filepath, encoding=\"utf-8\", gzipped=True):\n        \n        data = json.dumps(self.word_frequency.dictionary, sort_keys=True)\n        write_file(filepath, encoding, gzipped, data)", "docstring": "Export the word frequency list for import in the future\n\nArgs:\nfilepath (str): The filepath to the exported dictionary\nencoding (str): The encoding of the resulting output\ngzipped (bool): Whether to gzip the dictionary or not", "source": "juraj-google-style"}
{"code": "def set_query_parameter(url, param_name, param_value):\n    (scheme, netloc, path, query_string, fragment) = urlsplit(url)\n    query_params = parse_qs(query_string)\n    query_params[param_name] = [param_value]\n    new_query_string = urlencode(query_params, doseq=True)\n    return urlunsplit((scheme, netloc, path, new_query_string, fragment))", "docstring": "Given a URL, set or replace a query parameter and return the modified URL.\n\nArgs:\nurl: a given  URL\nparam_name: the parameter name to add\nparam_value: the parameter value\nReturns:\nURL with the added parameter", "source": "codesearchnet"}
{"code": "def create(self, *args, **kwargs):\n        \n        data = self.get_data('floating_ips/',\n                             type=POST,\n                             params={'droplet_id': self.droplet_id})\n\n        if data:\n            self.ip = data['floating_ip']['ip']\n            self.region = data['floating_ip']['region']\n\n        return self", "docstring": "Creates a FloatingIP and assigns it to a Droplet.\n\nNote: Every argument and parameter given to this method will be\nassigned to the object.\n\nArgs:\ndroplet_id: int - droplet id", "source": "juraj-google-style"}
{"code": "def thumbnail(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n    input_height, input_width = get_image_size(image, channel_dim=input_data_format)\n    output_height, output_width = (size['height'], size['width'])\n    height = min(input_height, output_height)\n    width = min(input_width, output_width)\n    if height == input_height and width == input_width:\n        return image\n    if input_height > input_width:\n        width = int(input_width * height / input_height)\n    elif input_width > input_height:\n        height = int(input_height * width / input_width)\n    return resize(image, size=(height, width), resample=resample, reducing_gap=2.0, data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any\ncorresponding dimension of the specified size.\n\nArgs:\nimage (`np.ndarray`):\nThe image to be resized.\nsize (`Dict[str, int]`):\nThe size `{\"height\": h, \"width\": w}` to resize the image to.\nresample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\nThe resampling filter to use.\ndata_format (`Optional[Union[str, ChannelDimension]]`, *optional*):\nThe data format of the output image. If unset, the same format as the input image is used.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def _read_transitions(self):\n        \n        states = []\n        i = 0\n        regex = re.compile('[ \\t\\n\\r:,]+')\n        found = 0  \n        state = 0  \n        substate = 0  \n        mapping = []  \n        cur_line = None\n        with open(self.outfile) as flex_file:\n            for cur_line in flex_file:\n                if cur_line[0:35] == \"static yyconst flex_int16_t yy_nxt[\" or cur_line[0:33] == \"static const flex_int16_t yy_nxt[\":\n                    found = 1\n                    \n                    continue\n                if found == 1:\n                    if state == 0 and cur_line[0:5] == \"    {\":\n                        state = 1\n                        continue\n                    if state == 1 and cur_line[0:7] == \"    } ;\":\n                        state = 0\n                        break\n\n                    if substate == 0 and cur_line[0:5] == \"    {\":\n                        mapping = []\n                        substate = 1\n                        continue\n                    if substate == 1:\n                        if cur_line[0:6] != \"    },\":\n                            cur_line = \"\".join(cur_line.split())\n                            if cur_line == '':\n                                continue\n                            if cur_line[cur_line.__len__() - 1] == ',':\n                                splitted_line = regex.split(\n                                    cur_line[:cur_line.__len__() - 1])\n                            else:\n                                splitted_line = regex.split(cur_line)\n                            mapping = mapping + splitted_line\n                            continue\n                        else:\n                            cleared = []\n                            for j in mapping:\n                                cleared.append(int(j))\n                            states.append(cleared)\n                            mapping = []\n                            substate = 0\n\n        return states", "docstring": "Read DFA transitions from flex compiled file\nArgs:\nNone\nReturns:\nlist: The list of states and the destination for a character", "source": "juraj-google-style"}
{"code": "def cctop_save_xml(jobid, outpath):\n    status = cctop_check_status(jobid=jobid)\n    if (status == 'Finished'):\n        result = 'http:\n        result_text = requests.post(result)\n        with open(outpath, 'w') as f:\n            f.write(result_text.text)\n        return outpath\n    else:\n        raise ConnectionRefusedError('CCTOP job incomplete, status is \"{}\"'.format(status))", "docstring": "Save the CCTOP results file in XML format.\n\nArgs:\njobid (str): Job ID obtained when job was submitted\noutpath (str): Path to output filename\n\nReturns:\nstr: Path to output filename", "source": "codesearchnet"}
{"code": "def enable_beacon(name, **kwargs):\n    \n\n    ret = {'comment': [],\n           'result': True}\n\n    if not name:\n        ret['comment'] = 'Beacon name is required.'\n        ret['result'] = False\n        return ret\n\n    if 'test' in kwargs and kwargs['test']:\n        ret['comment'] = 'Beacon {0} would be enabled.'.format(name)\n    else:\n        _beacons = list_(return_yaml=False, **kwargs)\n        if name not in _beacons:\n            ret['comment'] = 'Beacon {0} is not currently configured.' \\\n                             ''.format(name)\n            ret['result'] = False\n            return ret\n\n        try:\n            eventer = salt.utils.event.get_event('minion', opts=__opts__)\n            res = __salt__['event.fire']({'func': 'enable_beacon',\n                                          'name': name},\n                                         'manage_beacons')\n            if res:\n                event_ret = eventer.get_event(\n                    tag='/salt/minion/minion_beacon_enabled_complete',\n                    wait=kwargs.get('timeout', 30))\n                if event_ret and event_ret['complete']:\n                    beacons = event_ret['beacons']\n                    beacon_config_dict = _get_beacon_config_dict(beacons[name])\n\n                    if 'enabled' in beacon_config_dict and beacon_config_dict['enabled']:\n                        ret['result'] = True\n                        ret['comment'] = 'Enabled beacon {0} on minion.' \\\n                                         ''.format(name)\n                    else:\n                        ret['result'] = False\n                        ret['comment'] = 'Failed to enable beacon {0} on ' \\\n                                         'minion.'.format(name)\n                elif event_ret:\n                    ret['result'] = False\n                    ret['comment'] = event_ret['comment']\n                else:\n                    ret['result'] = False\n                    ret['comment'] = 'Did not receive the manage event ' \\\n                                     'before the timeout of {0}s' \\\n                                     ''.format(kwargs.get('timeout', 30))\n                return ret\n        except KeyError:\n            \n            \n            ret['result'] = False\n            ret['comment'] = 'Event module not available. Beacon enable job ' \\\n                             'failed.'\n    return ret", "docstring": "Enable a beacon on the minion.\n\nArgs:\nname (str): Name of the beacon to enable.\n\nReturns:\ndict: Boolean and status message on success or failure of enable.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' beacons.enable_beacon ps", "source": "juraj-google-style"}
{"code": "def _set_input(el, value):\n    if isinstance(value, dict):\n        el.value = value['val']\n    elif (type(value) in [list, tuple]):\n        el.value = ', '.join((item['val'] for item in value))\n    else:\n        el.value = value", "docstring": "Set content of given `el` to `value`.\n\nArgs:\nel (obj): El reference to input you wish to set.\nvalue (obj/list): Value to which the `el` will be set.", "source": "codesearchnet"}
{"code": "def city(self, value=None):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type str '\n                                 'for field `city`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `city`')\n\n        self._city = value", "docstring": "Corresponds to IDD Field `city`\n\nArgs:\nvalue (str): value for IDD Field `city`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def add_grad(left, right):\n    assert ((left is not None) and (right is not None))\n    left_type = type(left)\n    right_type = type(right)\n    if (left_type is ZeroGradient):\n        return right\n    if (right_type is ZeroGradient):\n        return left\n    return grad_adders[(left_type, right_type)](left, right)", "docstring": "Recursively add the gradient of two objects.\n\nArgs:\nleft: The left value to add. Can be either an array, a number, list or\ndictionary.\nright: The right value. Must be of the same type (recursively) as the left.\n\nReturns:\nThe sum of the two gradients, which will of the same type.", "source": "codesearchnet"}
{"code": "def singleprint_from_saved_model(export_dir: str) -> str:\n    try:\n        return singleprint_from_fingerprint_proto(export_dir)\n    except ValueError:\n        pass\n    try:\n        write_fingerprint(export_dir)\n        return singleprint_from_fingerprint_proto(export_dir)\n    except ValueError:\n        pass\n    try:\n        return singleprint_from_saved_model_proto(export_dir)\n    except ValueError as e:\n        raise ValueError(e) from None", "docstring": "Returns the singleprint of the SavedModel in `export_dir`.\n\nFirst tries to construct the singleprint from `fingerprint.pb`, then from\n`saved_model.pb`. Attempts to write the `fingerprint.pb` if not found, but\ndoesn't return an error if it isn't writeable.\n\nArgs:\nexport_dir: The directory that contains the SavedModel.\n\nReturns:\nA string containing the singleprint of the SavedModel in `export_dir`.\n\nRaises:\nValueError: If a valid singleprint cannot be constructed from the\nSavedModel.", "source": "github-repos"}
{"code": "def try_checkpoint_metadata(self, trial):\n    if (trial._checkpoint.storage == Checkpoint.MEMORY):\n        logger.debug('Not saving data for trial w/ memory checkpoint.')\n        return\n    try:\n        logger.debug('Saving trial metadata.')\n        self._cached_trial_state[trial.trial_id] = trial.__getstate__()\n    except Exception:\n        logger.exception('Error checkpointing trial metadata.')", "docstring": "Checkpoints metadata.\n\nArgs:\ntrial (Trial): Trial to checkpoint.", "source": "codesearchnet"}
{"code": "def __init__(self,\n               text_encoder_config=None,\n               language_pair=(None, None),\n               **kwargs):\n    \n    encoder_name = (\n        text_encoder_config.name if text_encoder_config else \"plain_text\")\n    name = \"%s%s_%s\" % (language_pair[0], language_pair[1], encoder_name)\n\n    description = (\n        \"Translation dataset from %s to %s, uses encoder %s.\") % (\n            language_pair[0], language_pair[1], encoder_name)\n    super(FloresConfig, self).__init__(\n        name=name, description=description, **kwargs)\n    self.text_encoder_config = (\n        text_encoder_config or tfds.features.text.TextEncoderConfig())\n\n    \n    assert \"en\" in language_pair, (\n        \"Config language pair must contain `en`, got: %s\",\n        self.builder_config.language_pair)\n    source, target = language_pair\n    non_en = source if target == \"en\" else target\n    assert non_en in [\"ne\", \"si\"], (\n        \"Invalid non-en language in pair: %s\", non_en)\n\n    self.language_pair = language_pair", "docstring": "BuilderConfig for FLoRes.\n\nArgs:\ntext_encoder_config: `tfds.features.text.TextEncoderConfig`, configuration\nfor the `tfds.features.text.TextEncoder` used for the features feature.\nlanguage_pair: pair of languages that will be used for translation. Should\ncontain 2-letter coded strings. First will be used at source and second\nas target in supervised mode. For example: (\"se\", \"en\").\n**kwargs: keyword arguments forwarded to super.", "source": "juraj-google-style"}
{"code": "def python_value(self, value):\n    value = super(PendulumDateTimeField, self).python_value(value)\n    if isinstance(value, datetime.datetime):\n        value = pendulum.instance(value)\n    elif isinstance(value, datetime.date):\n        value = pendulum.instance(datetime.datetime.combine(value, datetime.datetime.min.time()))\n    elif isinstance(value, string_types):\n        value = pendulum.parse(value)\n    return value", "docstring": "Return the value in the database as an Pendulum object.\n\nReturns:\npendulum.Pendulum:\nAn instance of Pendulum with the field filled in.", "source": "codesearchnet"}
{"code": "def inspect_volume(self, name):\n    url = self._url('/volumes/{0}', name)\n    return self._result(self._get(url), True)", "docstring": "Retrieve volume info by name.\n\nArgs:\nname (str): volume name\n\nReturns:\n(dict): Volume information dictionary\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.\n\nExample:\n\n>>> cli.inspect_volume('foobar')\n{u'Driver': u'local',\nu'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',\nu'Name': u'foobar'}", "source": "codesearchnet"}
{"code": "def get_chief_queue_runner(self):\n    if self._gradients_applied is False:\n        raise ValueError('Should be called after apply_gradients().')\n    return self._chief_queue_runner", "docstring": "Returns the QueueRunner for the chief to execute.\n\nThis includes the operations to synchronize replicas: aggregate gradients,\napply to variables, increment global step, insert tokens to token queue.\n\nNote that this can only be called after calling apply_gradients() which\nactually generates this queuerunner.\n\nReturns:\nA `QueueRunner` for chief to execute.\n\nRaises:\nValueError: If this is called before apply_gradients().", "source": "github-repos"}
{"code": "def recipe_url(config, auth, status, read, dataset, table):\n    url(config, {'auth': auth, 'status': status, 'read': read, 'urls': {'bigquery': {'dataset': dataset, 'query': table, 'legacy': False}}, 'to': {'bigquery': {'dataset': dataset, 'table': table}}})", "docstring": "Pull URL list from a table, fetch them, and write the results to another table.\n\nArgs:\nauth (authentication) - Credentials used for rading and writing data.\nstatus (boolean) - Pull status of HTTP request.\nread (boolean) - Pull data from HTTP request.\ndataset (string) - Name of Google BigQuery dataset to write.\ntable (string) - Name of Google BigQuery table to write.", "source": "github-repos"}
{"code": "def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs):\n    vision_data = {}\n    if image_sizes is not None:\n        images_kwargs = LlavaOnevisionProcessorKwargs._defaults.get('images_kwargs', {})\n        images_kwargs.update(kwargs)\n        size = images_kwargs.get('size', None) or self.image_processor.size\n        size = (size['shortest_edge'], size['shortest_edge']) if 'shortest_edge' in size else (min(size['height'], size['width']), min(size['height'], size['width']))\n        processed_height, processed_width = size\n        batch_num_image_tokens = []\n        num_image_patches = [1] * len(image_sizes)\n        for image_size in image_sizes:\n            orig_height, orig_width = image_size\n            num_image_tokens = self._get_number_of_features(orig_height, orig_width, processed_height, processed_width)\n            if self.vision_feature_select_strategy == 'default':\n                num_image_tokens -= 1\n            batch_num_image_tokens.append(num_image_tokens)\n        vision_data.update({'num_image_tokens': batch_num_image_tokens, 'num_image_patches': num_image_patches})\n    return MultiModalData(**vision_data)", "docstring": "Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.\nArgs:\nimage_sizes (List[List[str]], *optional*):\nThe input sizes formatted as (height, width) per each image.\nvideo_sizes (List[List[str]], *optional*):\nThe input sizes formatted as (num_frames, height, width) per each video.\naudio_lengths (List[int], *optional*):\nThe input length formatted as per each audio.\nReturns:\nDict[str, List[int]]: A dictionary mapping each modality (\"image\", \"video\", \"audio\")\nto a list containing the number of placeholder tokens required. If the model doesn't accept\na certain modality or no input sizes are provided, the dict value is set to an empty list.", "source": "github-repos"}
{"code": "def align_up(offset, align):\n    \n    remain = offset % align\n    if remain == 0:\n        return offset\n    else:\n        return offset + (align - remain)", "docstring": "Align ``offset`` up to ``align`` boundary.\n\nArgs:\noffset (int): value to be aligned.\nalign (int): alignment boundary.\n\nReturns:\nint: aligned offset.\n\n>>> align_up(3, 2)\n4\n\n>>> align_up(3, 1)\n3", "source": "juraj-google-style"}
{"code": "def get_pe(self):\n    return PE(self.rest_client.make_request(self.pe), self.rest_client)", "docstring": "Get the Streams processing element this operator is executing in.\n\nReturns:\nPE: Processing element for this operator.\n\n.. versionadded:: 1.9", "source": "codesearchnet"}
{"code": "def ParseOptions(cls, options, output_module):\n    \n    if not isinstance(output_module, sqlite_4n6time.SQLite4n6TimeOutputModule):\n      raise errors.BadConfigObject(\n          'Output module is not an instance of SQLite4n6TimeOutputModule')\n\n    shared_4n6time_output.Shared4n6TimeOutputArgumentsHelper.ParseOptions(\n        options, output_module)\n\n    filename = getattr(options, 'write', None)\n    if not filename:\n      raise errors.BadConfigOption(\n          'Output filename was not provided use \"-w filename\" to specify.')\n\n    output_module.SetFilename(filename)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\noutput_module (OutputModule): output module to configure.\n\nRaises:\nBadConfigObject: when the output module object is of the wrong type.\nBadConfigOption: when the output filename was not provided.", "source": "juraj-google-style"}
{"code": "def recommend(self, limit=10):\n        \n        expected_list = [(arm_id, beta_dist.expected_value()) for arm_id, beta_dist in self.__beta_dist_dict.items()]\n        expected_list = sorted(expected_list, key=lambda x: x[1], reverse=True)\n        return expected_list[:limit]", "docstring": "Listup arms and expected value.\n\nArgs:\nlimit:      Length of the list.\n\nReturns:\n[Tuple(`Arms master id`, `expected value`)]", "source": "juraj-google-style"}
{"code": "def get_parameter_dict(self, include_frozen=False):\n        \n        return OrderedDict(zip(\n            self.get_parameter_names(include_frozen=include_frozen),\n            self.get_parameter_vector(include_frozen=include_frozen),\n        ))", "docstring": "Get an ordered dictionary of the parameters\n\nArgs:\ninclude_frozen (Optional[bool]): Should the frozen parameters be\nincluded in the returned value? (default: ``False``)", "source": "juraj-google-style"}
{"code": "def get_contact(self, jid):\n    try:\n        return self.get_contacts()[jid.bare()]\n    except KeyError:\n        raise ContactNotFound\n    except AttributeError:\n        raise AttributeError('jid must be an aioxmpp.JID object')", "docstring": "Returns a contact\n\nArgs:\njid (aioxmpp.JID): jid of the contact\n\nReturns:\ndict: the roster of contacts", "source": "codesearchnet"}
{"code": "def pad_sparse_embedding_lookup_indices(sparse_indices, padded_size):\n    batch_size = sparse_indices.dense_shape[0]\n    sparse_indices = sparse_ops.sparse_slice(sparse_indices, [0, 0], [batch_size, padded_size])\n    indices, values = (sparse_indices.indices, sparse_indices.values)\n    padded_values = array_ops.scatter_nd(indices, math_ops.cast(values, dtypes.int32), shape=(batch_size, padded_size))\n    weights = array_ops.ones_like(values, dtype=dtypes.float32)\n    padded_mask = array_ops.scatter_nd(indices, weights, shape=(batch_size, padded_size))\n    return (padded_values, padded_mask)", "docstring": "Creates statically-sized Tensors containing indices and weights.\n\nFrom third_party/cloud_tpu/models/movielens/tpu_embedding.py\n\nAlso computes sparse_indices.values % embedding_table_size, for equivalent\nfunctionality to sparse_column_with_integerized_feature. The returned\npadded weight Tensor also doubles as a mask indicating which values in\nthe returned padded indices Tensor are indices versus padded zeros.\n\nArgs:\nsparse_indices: SparseTensor of embedding lookup indices.\npadded_size: Number of columns of the returned Tensors. Indices which fall\nout of bounds will be truncated to the padded size.\n\nReturns:\n(sparse_indices.values padded to the specified size,\na mask the same size as the returned padded values in which 0s\nindicate padded locations and 1s (or values from sparse_weights)\nindicate actual values)", "source": "github-repos"}
{"code": "def dumps(ms, single=False, properties=False, pretty_print=True, show_status=False, predicate_modifiers=False, **kwargs):\n    if ((not pretty_print) and kwargs.get('indent')):\n        pretty_print = True\n    if single:\n        ms = [ms]\n    return serialize(ms, properties=properties, pretty_print=pretty_print, show_status=show_status, predicate_modifiers=predicate_modifiers, **kwargs)", "docstring": "Serialize an Xmrs object to a Eds representation\n\nArgs:\nms: an iterator of :class:`~delphin.mrs.xmrs.Xmrs` objects to\nserialize (unless the *single* option is `True`)\nsingle (bool): if `True`, treat *ms* as a single\n:class:`~delphin.mrs.xmrs.Xmrs` object instead of as an\niterator\nproperties (bool): if `False`, suppress variable properties\npretty_print (bool): if `True`, add newlines and indentation\nshow_status (bool): if `True`, annotate disconnected graphs and\nnodes\nReturns:\nan :class:`Eds` string representation of a corpus of Xmrs", "source": "codesearchnet"}
{"code": "def _confirm_overwrite(filename):\n        \n\n        message = '{}Would you like to overwrite the contents of {} (y/[n])? '.format(\n            c.Fore.MAGENTA, filename\n        )\n        \n        response = raw_input(message)  \n        response = response.lower()\n\n        if response in ['y', 'yes']:\n            return True\n        return False", "docstring": "Confirm overwrite of template files.\n\nMake sure the user would like to continue downloading a file which will overwrite a file\nin the current directory.\n\nArgs:\nfilename (str): The name of the file to overwrite.\n\nReturns:\nbool: True if the user specifies a \"yes\" response.", "source": "juraj-google-style"}
{"code": "def parse_date_range(date, alt_end_date=None):\n    NOT_ENDED = '9999'\n    all_years = re.findall('\\\\d{4}', date)\n    if alt_end_date:\n        NOT_ENDED = alt_end_date\n    if (not all_years):\n        return ('****', NOT_ENDED)\n    elif (len(all_years) == 1):\n        return (all_years[0], NOT_ENDED)\n    return (all_years[0], all_years[1])", "docstring": "Parse input `date` string in free-text format for four-digit long groups.\n\nArgs:\ndate (str): Input containing years.\n\nReturns:\ntuple: ``(from, to)`` as four-digit strings.", "source": "codesearchnet"}
{"code": "def int_to_str_digit(n):\n    \n    \n    if n < 10:\n        return str(n)\n    \n    elif n < 36:\n        return chr(n + 55)\n    \n    else:\n        return chr(n + 61)", "docstring": "Converts a positive integer, to a single string character.\nWhere: 9 -> \"9\", 10 -> \"A\", 11 -> \"B\", 12 -> \"C\", ...etc\n\nArgs:\nn(int): A positve integer number.\n\nReturns:\nThe character representation of the input digit of value n (str).", "source": "juraj-google-style"}
{"code": "def validate_list(self, value):\n    if (len(value) > self.max_items):\n        raise ValidationError(u'list must not contain more than {max_items} items.'.format(max_items=self.max_items))\n    if (all((isinstance(item, six.string_types) for item in value)) is False):\n        raise ValidationError(u'list must only contain strings.')\n    return value", "docstring": "Validate data before saving to database.\n\nArguemtns:\nvalue(list): list to be validated\n\nReturns:\nlist if validation is successful\n\nRaises:\nValidationError", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context, encoding='utf-8'):\n    \n    super(TARFileSystem, self).__init__(resolver_context)\n    self._file_object = None\n    self._tar_file = None\n    self.encoding = encoding", "docstring": "Initializes a file system.\n\nArgs:\nresolver_context (Context): resolver context.\nencoding (Optional[str]): file entry name encoding.", "source": "juraj-google-style"}
{"code": "def replace_list(items, match, replacement):\n    return [replace(item, match, replacement) for item in items]", "docstring": "Replaces occurrences of a match string in a given list of strings and returns\na list of new strings. The match string can be a regex expression.\n\nArgs:\nitems (list):       the list of strings to modify.\nmatch (str):        the search expression.\nreplacement (str):  the string to replace with.", "source": "codesearchnet"}
{"code": "def get_metric_fns(metric_names, labels, outputs):\n    metric_fns = {}\n    for metric_name in metric_names:\n        metric_fn_name = metric_name.split('/')[(- 1)]\n        if hasattr(metrics, metric_fn_name):\n            metric_fn = getattr(metrics, metric_fn_name)\n            metric_fns[metric_name] = metric_fn(labels, outputs)\n        else:\n            raise ValueError('Metric {} is not implemented'.format(metric_fn_name))\n    return metric_fns", "docstring": "Generate a dictionary of metric name to metric function.\n\nArgs:\nmetric_names: list of strings in the format \"prefix/metric_function_name\".\nmetric_function_name should refer to a function name in metrics.py. The\nprefix will be included in the key in the returned dict.\nlabels: a tensor where batch is the first dimension.\noutputs: a tensor of model predictions, same dimensionality as labels.\n\nReturns:\nmetric_fns: dict of metric functions keyed by their name.", "source": "codesearchnet"}
{"code": "def get_plot_frame(map_obj, key_map, cached=False):\n    if (map_obj.kdims and (len(map_obj.kdims) == 1) and (map_obj.kdims[0] == 'Frame')):\n        return map_obj.last\n    key = tuple((key_map[kd.name] for kd in map_obj.kdims if (kd.name in key_map)))\n    if ((key in map_obj.data) and cached):\n        return map_obj.data[key]\n    else:\n        try:\n            return map_obj[key]\n        except KeyError:\n            return None\n        except StopIteration as e:\n            raise e\n        except Exception:\n            print(traceback.format_exc())\n            return None", "docstring": "Returns the current frame in a mapping given a key mapping.\n\nArgs:\nobj: Nested Dimensioned object\nkey_map: Dictionary mapping between dimensions and key value\ncached: Whether to allow looking up key in cache\n\nReturns:\nThe item in the mapping corresponding to the supplied key.", "source": "codesearchnet"}
{"code": "def assemble(ops, target=None):\n    \n\n\n    target = get_py_internals(target)\n\n    opmap = target['opmap']\n    hasjrel = target['hasjrel']\n    hasjabs = target['hasjabs']\n    hasjump = set(hasjrel) | set(hasjabs)\n    have_argument = target['have_argument']\n    extended_arg = target['extended_arg']\n    wordcode = target['wordcode']\n\n    if not wordcode:\n        def encode_op(output, op_code, op_arg=None):\n            n = 1\n            if op_arg is None:\n                output.append(op_code)\n            else:\n                n += 2\n                ext_arg = op_arg >> 16\n                if ext_arg:\n                    n += 3\n                    output.extend([extended_arg, ext_arg & 255, ext_arg >> 8])\n                    op_arg &= 65535\n                output.extend([op_code, op_arg & 255, op_arg >> 8])\n            return n\n    else:\n        def encode_op(output, op_code, op_arg=None):\n            n = 2\n            if op_arg is None:\n                output.extend([op_code, 0])\n            else:\n                ext_arg = op_arg >> 8\n                if ext_arg:\n                    n += encode_op(extended_arg, ext_arg)\n                output.extend([op_code, op_arg & 255])\n            return n\n\n    \n    \n    \n    \n    \n\n    label_address = {}\n    while True:\n        retry = False\n        output = bytearray()\n        address = 0\n\n        for op in ops:\n            if isinstance(op, Label):\n                if label_address.get(op) != address:\n                    retry = True\n                    label_address[op] = address\n                continue\n\n            op_code = opmap[op.name]\n            op_arg = op.arg\n\n            if op_code >= have_argument and op_arg is None:\n                \n                raise ValueError('Opcode %s requires argument.' % op)\n            elif op_code < have_argument and op_arg is not None:\n                \n                raise ValueError('Opcode %s should not have an argument.' % op)\n            elif isinstance(op_arg, Label):\n                if op_code not in hasjump:\n                    \n                    raise ValueError('Did not expect label as argument for opcode %s.' % op)\n\n                if op_arg not in ops:\n                    \n                    raise ValueError('Label is not part of this op list.')\n\n                \n                op_arg = label_address.get(op_arg)\n                if op_arg is None:\n                    \n                    address += encode_op(output, op_code, 0)\n                    continue\n\n                if op_code in hasjrel:\n                    op_arg -= address\n            elif op_code in hasjump:\n                \n                raise ValueError('Expected label as argument for opcode %s.' % op)\n\n            \n            n = encode_op(output, op_code, op_arg)\n            address += n\n\n            if op_code in hasjrel:\n                if not wordcode:\n                    op_arg = output[-2] + (output[-1] << 8)\n                    if op_arg < n:\n                        ext_arg = output[-5] + (output[-4] << 8) - 1\n                        output[-5], output[-4] = ext_arg & 255, ext_arg >> 8\n                        op_arg += 65536\n                    op_arg -= n\n                    output[-2], output[-1] = op_arg & 255, op_arg >> 8\n                else:\n                    for i in itertools.count(1, 2):\n                        if n <= output[-i]:\n                            output[-i] -= n\n                            break\n                        output[-i] += 256 - n\n                        n = 1\n\n        if not retry:\n            return bytes(output)", "docstring": "Assemble a set of :class:`Op` and :class:`Label` instance back into\nbytecode.\n\nArguments:\nops(list): A list of opcodes and labels (as returned by\n:func:`disassemble`).\ntarget: The opcode specification of the targeted python\nversion. If this is ``None`` the specification of the currently\nrunning python version will be used.\n\nReturns:\nbytes: The assembled bytecode.", "source": "juraj-google-style"}
{"code": "def response_data_to_model_instance(self, response_data):\n        \n        \n        response_data[\"datetime_created\"] = dateutil.parser.parse(\n            response_data[\"datetime_created\"]\n        )\n\n        \n        return super(\n            BaseTaskTypeManager, self\n        ).response_data_to_model_instance(response_data)", "docstring": "Convert response data to a task type model.\n\nArgs:\nresponse_data (dict): The data from the request's response.\n\nReturns:\n:class:`saltant.models.base_task_type.BaseTaskType`:\nA model instance representing the task type from the\nreponse data.", "source": "juraj-google-style"}
{"code": "def _CreateUserIdentifier(identifier_type=None, value=None):\n  \n  if identifier_type in _HASHED_IDENTIFIER_TYPES:\n    \n    \n    value = hashlib.sha256(value.strip().lower()).hexdigest()\n\n  user_identifier = {\n      'userIdentifierType': identifier_type,\n      'value': value\n  }\n\n  return user_identifier", "docstring": "Creates a user identifier from the specified type and value.\n\nArgs:\nidentifier_type: a str specifying the type of user identifier.\nvalue: a str value of the identifier; to be hashed using SHA-256 if needed.\n\nReturns:\nA dict specifying a user identifier, with a value hashed using SHA-256 if\nneeded.", "source": "juraj-google-style"}
{"code": "def _BiasAddGradGrad(op: ops.Operation, received_grad):\n    try:\n        data_format = op.get_attr('data_format')\n    except ValueError:\n        data_format = None\n    shape = array_ops.shape(op.inputs[0])\n    bias_shape = array_ops.shape(received_grad)\n    if data_format == b'NCHW':\n        expanded_shape = array_ops.concat([array_ops.ones_like(shape[:1]), bias_shape, array_ops.ones_like(shape[2:])], 0)\n        tile_mults = array_ops.concat([shape[:1], [1], shape[2:]], 0)\n    else:\n        expanded_shape = array_ops.concat([array_ops.ones_like(shape[:-1]), bias_shape], 0)\n        tile_mults = array_ops.concat([shape[:-1], [1]], 0)\n    expanded_grad = array_ops.reshape(received_grad, expanded_shape)\n    return array_ops.tile(expanded_grad, tile_mults)", "docstring": "Gradient for the BiasAddGrad op.\n\nArgs:\nop: BiasAddGrad op for which we are calculating gradients.\nreceived_grad: The gradients passed to the BiasAddGrad op.\n\nReturns:\nA single gradient Tensor for the input to BiasAddGrad (which\nis the gradient of the bias term in BiasAdd)", "source": "github-repos"}
{"code": "def post_content(url, headers={}, post_data={}, decoded=True, **kwargs):\n    if kwargs.get('post_data_raw'):\n        logging.debug(('post_content: %s\\npost_data_raw: %s' % (url, kwargs['post_data_raw'])))\n    else:\n        logging.debug(('post_content: %s\\npost_data: %s' % (url, post_data)))\n    req = request.Request(url, headers=headers)\n    if cookies:\n        cookies.add_cookie_header(req)\n        req.headers.update(req.unredirected_hdrs)\n    if kwargs.get('post_data_raw'):\n        post_data_enc = bytes(kwargs['post_data_raw'], 'utf-8')\n    else:\n        post_data_enc = bytes(parse.urlencode(post_data), 'utf-8')\n    response = urlopen_with_retry(req, data=post_data_enc)\n    data = response.read()\n    content_encoding = response.getheader('Content-Encoding')\n    if (content_encoding == 'gzip'):\n        data = ungzip(data)\n    elif (content_encoding == 'deflate'):\n        data = undeflate(data)\n    if decoded:\n        charset = match1(response.getheader('Content-Type'), 'charset=([\\\\w-]+)')\n        if (charset is not None):\n            data = data.decode(charset)\n        else:\n            data = data.decode('utf-8')\n    return data", "docstring": "Post the content of a URL via sending a HTTP POST request.\n\nArgs:\nurl: A URL.\nheaders: Request headers used by the client.\ndecoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.\n\nReturns:\nThe content as a string.", "source": "codesearchnet"}
{"code": "def mahalanobis_distances(df, axis=0):\n    df = (df.transpose() if (axis == 1) else df)\n    means = df.mean()\n    try:\n        inv_cov = np.linalg.inv(df.cov())\n    except LinAlgError:\n        return pd.Series(([np.NAN] * len(df.index)), df.index, name='Mahalanobis')\n    dists = []\n    for (i, sample) in df.iterrows():\n        dists.append(mahalanobis(sample, means, inv_cov))\n    return pd.Series(dists, df.index, name='Mahalanobis')", "docstring": "Returns a pandas Series with Mahalanobis distances for each sample on the\naxis.\n\nNote: does not work well when # of observations < # of dimensions\nWill either return NaN in answer\nor (in the extreme case) fail with a Singular Matrix LinAlgError\n\nArgs:\ndf: pandas DataFrame with columns to run diagnostics on\naxis: 0 to find outlier rows, 1 to find outlier columns", "source": "codesearchnet"}
{"code": "def get_course_current_grades(self, course_id):\n        \n        resp = self.requester.get(\n            urljoin(\n                self.base_url,\n                '/api/grades/v1/courses/{course_key}/'.format(course_key=course_id)\n            )\n        )\n        resp.raise_for_status()\n        resp_json = resp.json()\n        if 'results' in resp_json:\n            grade_entries = [CurrentGrade(entry) for entry in resp_json[\"results\"]]\n            while resp_json['next'] is not None:\n                resp = self.requester.get(resp_json['next'])\n                resp.raise_for_status()\n                resp_json = resp.json()\n                grade_entries.extend((CurrentGrade(entry) for entry in resp_json[\"results\"]))\n        else:\n            grade_entries = [CurrentGrade(entry) for entry in resp_json]\n\n        return CurrentGradesByCourse(grade_entries)", "docstring": "Returns a CurrentGradesByCourse object for all users in the specified course.\n\nArgs:\ncourse_id (str): an edX course ids.\n\nReturns:\nCurrentGradesByCourse: object representing the student current grades\n\nAuthorization:\nThe authenticated user must have staff permissions to see grades for all users\nin a course.", "source": "juraj-google-style"}
{"code": "def _form_output(span_doc: span, output_format: str, relations: Dict, patterns: List) -> str:\n        \n\n        format_value = []\n        output_inf = [a_pattern.in_output for a_pattern in patterns]\n        for i in range(len(output_inf)):\n            token_range = relations[i]\n            if token_range and output_inf[i]:\n                format_value.append(span_doc[token_range[0]:token_range[1]].text)\n\n        if not output_format:\n            return \" \".join(format_value)\n\n        result_str = re.sub(\"{}\", \" \".join(format_value), output_format)\n\n        positions = re.findall(\"{[0-9]+}\", result_str)\n\n        if not positions:\n            return result_str\n\n        position_indices = [int(x[1:-1]) for x in positions]\n        if max(position_indices) < len(format_value):\n            result_str = result_str.format(*format_value)\n        else:\n            try:\n                result_str = result_str.format(\"\", *format_value)\n            except:\n                positions = [x for x in positions if int(x[1:-1]) > len(format_value)-1 or int(x[1:-1]) < 0]\n                for pos in positions:\n                    result_str = result_str.replace(pos, \"\")\n                result_str = result_str.format(*format_value)\n\n        return result_str", "docstring": "Form an output value according to user input of output_format\nArgs:\nspan_doc: span\nformat: str\nrelations: Dict\npatterns: List\n\nReturns: str", "source": "juraj-google-style"}
{"code": "def deserialize(config, custom_objects=None):\n    return serialization_lib.deserialize_keras_object(config, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects)", "docstring": "Deserializes a serialized metric class/function instance.\n\nArgs:\nconfig: Metric configuration.\ncustom_objects: Optional dictionary mapping names (strings)\nto custom objects (classes and functions) to be\nconsidered during deserialization.\n\nReturns:\nA Keras `Metric` instance or a metric function.", "source": "github-repos"}
{"code": "def extract_header_comment_key_value_tuples_from_file(file_descriptor):\n    \n    file_data = file_descriptor.read()\n    findall_result = re.findall(HEADER_COMMENT_KEY_VALUE_TUPLES_REGEX, file_data, re.MULTILINE | re.DOTALL)\n\n    returned_list = []\n    for header_comment, _ignored, raw_comments, key, value in findall_result:\n        comments = re.findall(\"/\\* (.*?) \\*/\", raw_comments)\n        if len(comments) == 0:\n            comments = [u\"\"]\n        returned_list.append((header_comment, comments, key, value))\n\n    return returned_list", "docstring": "Extracts tuples representing comments and localization entries from strings file.\n\nArgs:\nfile_descriptor (file): The file to read the tuples from\n\nReturns:\nlist : List of tuples representing the headers and localization entries.", "source": "juraj-google-style"}
{"code": "def _ParseRecord(\n      self, parser_mediator, record_index, evtx_record, recovered=False):\n    \n    event_data = self._GetEventData(\n        parser_mediator, record_index, evtx_record, recovered=recovered)\n\n    try:\n      written_time = evtx_record.get_written_time_as_integer()\n    except OverflowError as exception:\n      parser_mediator.ProduceExtractionWarning((\n          'unable to read written time from event record: {0:d} '\n          'with error: {1!s}').format(record_index, exception))\n\n      written_time = None\n\n    if not written_time:\n      date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n    else:\n      date_time = dfdatetime_filetime.Filetime(timestamp=written_time)\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extract data from a Windows XML EventLog (EVTX) record.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrecord_index (int): event record index.\nevtx_record (pyevtx.record): event record.\nrecovered (Optional[bool]): True if the record was recovered.", "source": "juraj-google-style"}
{"code": "def set_filetype(self, filetype, bufnr=None):\n    if bufnr:\n        self._vim.command(((str(bufnr) + 'bufdo set filetype=') + filetype))\n    else:\n        self._vim.command(('set filetype=' + filetype))", "docstring": "Set filetype for a buffer.\n\nNote: it's a quirk of Vim's Python API that using the buffer.options\ndictionary to set filetype does not trigger ``FileType`` autocommands,\nhence this implementation executes as a command instead.\n\nArgs:\nfiletype (str): The filetype to set.\nbufnr (Optional[int]): A Vim buffer number, current if ``None``.", "source": "codesearchnet"}
{"code": "def _pre_action(self, action):\n        \n\n        \n        assert len(action) == self.dof, \"environment got invalid action dimension\"\n        low, high = self.action_spec\n        action = np.clip(action, low, high)\n\n        if self.has_gripper:\n            arm_action = action[: self.mujoco_robot.dof]\n            gripper_action_in = action[\n                self.mujoco_robot.dof : self.mujoco_robot.dof + self.gripper.dof\n            ]\n            gripper_action_actual = self.gripper.format_action(gripper_action_in)\n            action = np.concatenate([arm_action, gripper_action_actual])\n\n        \n        ctrl_range = self.sim.model.actuator_ctrlrange\n        bias = 0.5 * (ctrl_range[:, 1] + ctrl_range[:, 0])\n        weight = 0.5 * (ctrl_range[:, 1] - ctrl_range[:, 0])\n        applied_action = bias + weight * action\n        self.sim.data.ctrl[:] = applied_action\n\n        \n        self.sim.data.qfrc_applied[\n            self._ref_joint_vel_indexes\n        ] = self.sim.data.qfrc_bias[self._ref_joint_vel_indexes]\n\n        if self.use_indicator_object:\n            self.sim.data.qfrc_applied[\n                self._ref_indicator_vel_low : self._ref_indicator_vel_high\n            ] = self.sim.data.qfrc_bias[\n                self._ref_indicator_vel_low : self._ref_indicator_vel_high\n            ]", "docstring": "Overrides the superclass method to actuate the robot with the\npassed joint velocities and gripper control.\n\nArgs:\naction (numpy array): The control to apply to the robot. The first\n@self.mujoco_robot.dof dimensions should be the desired\nnormalized joint velocities and if the robot has\na gripper, the next @self.gripper.dof dimensions should be\nactuation controls for the gripper.", "source": "juraj-google-style"}
{"code": "def delete_group(self, name):\n        \n        self.project_service.set_auth(self._token_project)\n        return self.project_service.delete_group(name)", "docstring": "Delete given group.\n\nArgs:\nname (string): Name of group.\n\nReturns:\n(bool): True on success.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def latex_sanitize_command_name(_cmdname):\n    r\n    import utool as ut\n    command_name = _cmdname\n    try:\n        def subroman(match):\n            import roman\n            try:\n                groupdict = match.groupdict()\n                num = int(groupdict['num'])\n                if num == 0:\n                    return ''\n                return roman.toRoman(num)\n            except Exception as ex:\n                ut.printex(ex, keys=['groupdict'])\n                raise\n        command_name = re.sub(ut.named_field('num', r'\\d+'), subroman, command_name)\n    except ImportError as ex:\n        if ut.SUPER_STRICT:\n            ut.printex(ex)\n            raise\n    \n    command_name = re.sub(r'[\\d' + re.escape('\n    \n    \n    \n    \n    \n    str_list = re.split('[_ ]', command_name)\n    \n    command_name = ut.to_camel_case('_'.join(str_list), mixed=True)\n    return command_name", "docstring": "r\"\"\"\nArgs:\n_cmdname (?):\n\nReturns:\n?: command_name\n\nCommandLine:\npython -m utool.util_latex --exec-latex_sanitize_command_name\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_latex import *  # NOQA\n>>> _cmdname = '#foo bar.'\n>>> command_name = latex_sanitize_command_name(_cmdname)\n>>> result = ('command_name = %s' % (str(command_name),))\n>>> print(result)\nFooBar", "source": "juraj-google-style"}
{"code": "def get_ratio(self, max_denominator=5, index_none=None):\n        \n        structure = self.initial_structure\n        lat_type = self.lat_type\n        if lat_type == 't' or lat_type == 'h':\n            \n            a, c = (structure.lattice.a, structure.lattice.c)\n            if c > a:\n                frac = Fraction(c ** 2 / a ** 2).limit_denominator(max_denominator)\n                ratio = [frac.numerator, frac.denominator]\n            else:\n                frac = Fraction(a ** 2 / c ** 2).limit_denominator(max_denominator)\n                ratio = [frac.denominator, frac.numerator]\n        elif lat_type == 'r':\n            \n            cos_alpha = cos(structure.lattice.alpha / 180 * np.pi)\n            frac = Fraction((1 + 2 * cos_alpha) / cos_alpha).limit_denominator(max_denominator)\n            ratio = [frac.numerator, frac.denominator]\n        elif lat_type == 'o':\n            \n            ratio = [None] * 3\n            lat = (structure.lattice.c, structure.lattice.b, structure.lattice.a)\n            index = [0, 1, 2]\n            if index_none is None:\n                min_index = np.argmin(lat)\n                index.pop(min_index)\n                frac1 = Fraction(lat[index[0]] ** 2 / lat[min_index] ** 2).limit_denominator(max_denominator)\n                frac2 = Fraction(lat[index[1]] ** 2 / lat[min_index] ** 2).limit_denominator(max_denominator)\n                com_lcm = lcm(frac1.denominator, frac2.denominator)\n                ratio[min_index] = com_lcm\n                ratio[index[0]] = frac1.numerator * int(round((com_lcm / frac1.denominator)))\n                ratio[index[1]] = frac2.numerator * int(round((com_lcm / frac2.denominator)))\n            else:\n                index.pop(index_none)\n                if (lat[index[0]] > lat[index[1]]):\n                    frac = Fraction(lat[index[0]] ** 2 / lat[index[1]] ** 2).limit_denominator(max_denominator)\n                    ratio[index[0]] = frac.numerator\n                    ratio[index[1]] = frac.denominator\n                else:\n                    frac = Fraction(lat[index[1]] ** 2 / lat[index[0]] ** 2).limit_denominator(max_denominator)\n                    ratio[index[1]] = frac.numerator\n                    ratio[index[0]] = frac.denominator\n        elif lat_type == 'c':\n            raise RuntimeError('Cubic system does not need axial ratio.')\n        else:\n            raise RuntimeError('Lattice type not implemented.')\n        return ratio", "docstring": "find the axial ratio needed for GB generator input.\nArgs:\nmax_denominator (int): the maximum denominator for\nthe computed ratio, default to be 5.\nindex_none (int): specify the irrational axis.\n0-a, 1-b, 2-c. Only may be needed for orthorombic system.\nReturns:\naxial ratio needed for GB generator (list of integers).", "source": "juraj-google-style"}
{"code": "def stop_tuning_job(self, name):\n        \n        try:\n            LOGGER.info('Stopping tuning job: {}'.format(name))\n            self.sagemaker_client.stop_hyper_parameter_tuning_job(HyperParameterTuningJobName=name)\n        except ClientError as e:\n            error_code = e.response['Error']['Code']\n            \n            if error_code == 'ValidationException':\n                LOGGER.info('Tuning job: {} is already stopped or not running.'.format(name))\n            else:\n                LOGGER.error('Error occurred while attempting to stop tuning job: {}. Please try again.'.format(name))\n                raise", "docstring": "Stop the Amazon SageMaker hyperparameter tuning job with the specified name.\n\nArgs:\nname (str): Name of the Amazon SageMaker hyperparameter tuning job.\n\nRaises:\nClientError: If an error occurs while trying to stop the hyperparameter tuning job.", "source": "juraj-google-style"}
{"code": "def clean_df(df, header=None, **read_csv_kwargs):\n    df = read_csv(df, header=header, **read_csv_kwargs)\n    df = df.fillna(' ')\n    for col in df.columns:\n        df[col] = df[col].apply(unicode2ascii)\n    return df", "docstring": "Convert UTF8 characters in a CSV file or dataframe into ASCII\n\nArgs:\ndf (DataFrame or str): DataFrame or path or url to CSV", "source": "codesearchnet"}
{"code": "def sanitize_filename(filename):\n    dirname = os.path.dirname(filename)\n    basename = os.path.basename(filename)\n    basename = _sanitize_windows_filename(basename)\n    basename = _truncate_filename(basename, LINUX_MAX_FILENAME_LENGTH)\n    basename = basename.replace(' ', '_')\n    return os.path.join(dirname, basename)", "docstring": "Sanitizes a filename for various operating systems.\n\nArgs:\nfilename: string, the filename to sanitize.\n\nReturns:\nA string that is safe to use as a filename on various operating systems.", "source": "github-repos"}
{"code": "def AddConnectedPeer(self, peer):\n        \n        \n        self.RemoveFromQueue(peer.address)\n        self.AddKnownAddress(peer.address)\n\n        if len(self.Peers) > settings.CONNECTED_PEER_MAX:\n            peer.Disconnect(\"Max connected peers reached\", isDead=False)\n\n        if peer not in self.Peers:\n            self.Peers.append(peer)\n        else:\n            \n            \n            \n            self.RemoveKnownAddress(peer.address)\n            peer.Disconnect()", "docstring": "Add a new connect peer to the known peers list.\n\nArgs:\npeer (NeoNode): instance.", "source": "juraj-google-style"}
{"code": "def add_time(data):\n    payload = data['data']\n    updated = data['updated'].date()\n    if (updated == date.today()):\n        payload['last_updated'] = data['updated'].strftime('today at %H:%M:%S')\n    elif (updated >= (date.today() - timedelta(days=1))):\n        payload['last_updated'] = 'yesterday'\n    elif (updated >= (date.today() - timedelta(days=7))):\n        payload['last_updated'] = updated.strftime('on %A')\n    else:\n        payload['last_updated'] = updated.strftime('%Y-%m-%d')\n    return payload", "docstring": "And a friendly update time to the supplied data.\n\nArguments:\ndata (:py:class:`dict`): The response data and its update time.\n\nReturns:\n:py:class:`dict`: The data with a friendly update time.", "source": "codesearchnet"}
{"code": "def to_pandas(self):\n    dataframe = self.get().to_pandas()\n    assert ((type(dataframe) is pandas.DataFrame) or (type(dataframe) is pandas.Series))\n    return dataframe", "docstring": "Convert the object stored in this partition to a Pandas DataFrame.\n\nReturns:\nA Pandas DataFrame.", "source": "codesearchnet"}
{"code": "def remove_by_threshold(self, threshold=5):\n    keys = [x for x in self._dictionary.keys()]\n    for key in keys:\n        if (self._dictionary[key] <= threshold):\n            self._dictionary.pop(key)\n    self._update_dictionary()", "docstring": "Remove all words at, or below, the provided threshold\n\nArgs:\nthreshold (int): The threshold at which a word is to be \\\nremoved", "source": "codesearchnet"}
{"code": "def download(self, url, destination_path):\n    self._pbar_url.update_total(1)\n    future = self._executor.submit(self._sync_download, url, destination_path)\n    return promise.Promise.resolve(future)", "docstring": "Download url to given path.\n\nReturns Promise -> sha256 of downloaded file.\n\nArgs:\nurl: address of resource to download.\ndestination_path: `str`, path to directory where to download the resource.\n\nReturns:\nPromise obj -> (`str`, int): (downloaded object checksum, size in bytes).", "source": "codesearchnet"}
{"code": "def CreateSmartShoppingAd(client, ad_group_id):\n  \n  ad_group_ad_service = client.GetService('AdGroupAdService', version='v201809')\n  \n  adgroup_ad = {\n      'adGroupId': ad_group_id,\n      \n      'ad': {\n          'xsi_type': 'GoalOptimizedShoppingAd'\n      }\n  }\n\n  ad_operation = {\n      'operator': 'ADD',\n      'operand': adgroup_ad\n  }\n\n  \n  ad_result = ad_group_ad_service.mutate([ad_operation])\n\n  for adgroup_ad in ad_result['value']:\n    print 'Smart Shopping ad with ID \"%s\" was added.' % adgroup_ad['ad']['id']", "docstring": "Adds a new Smart Shopping ad.\n\nArgs:\nclient: an AdWordsClient instance.\nad_group_id: an integer ID for an ad group.", "source": "juraj-google-style"}
{"code": "def Validate(self, problems, validate_children=True):\n    \n    self.ValidateRouteId(problems)\n    self.ValidateServicePeriod(problems)\n    self.ValidateDirectionId(problems)\n    self.ValidateTripId(problems)\n    self.ValidateShapeIdsExistInShapeList(problems)\n    self.ValidateRouteIdExistsInRouteList(problems)\n    self.ValidateServiceIdExistsInServiceList(problems)\n    self.ValidateBikesAllowed(problems)\n    self.ValidateWheelchairAccessible(problems)\n    if self._schedule and validate_children:\n      self.ValidateChildren(problems)", "docstring": "Validate attributes of this object.\n\nCheck that this object has all required values set to a valid value without\nreference to the rest of the schedule. If the _schedule attribute is set\nthen check that references such as route_id and service_id are correct.\n\nArgs:\nproblems: A ProblemReporter object\nvalidate_children: if True and the _schedule attribute is set than call\nValidateChildren", "source": "juraj-google-style"}
{"code": "def poll(self, query_id=None, sequence_no=None, params=None, **kwargs):\n    path = '/logging-service/v1/queries/{}/{}'.format(query_id, sequence_no)\n    r = self._httpclient.request(method='GET', url=self.url, params=params, path=path, **kwargs)\n    return r", "docstring": "Poll for asynchronous query results.\n\nContinue to poll for results until this endpoint reports\nJOB_FINISHED or JOB_FAILED. The results of queries can be\nreturned in multiple pages, each of which may contain many log\nrecords. Use this endpoint to poll for query result batches, as\nwell as to track query result status.\n\nArgs:\nparams (dict): Payload/request dictionary.\nquery_id (str): Specifies the ID of the query job.\nsequence_no (int): Specifies the sequenceNo.\n**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.\n\nReturns:\nrequests.Response: Requests Response() object.\n\nExamples:\nRefer to ``logging_query.py`` example.", "source": "codesearchnet"}
{"code": "def custom_apply(self, path: utils.KeyPath, value_spec: pg_typing.ValueSpec, allow_partial: bool, child_transform: Optional[Callable[[utils.KeyPath, pg_typing.Field, Any], Any]]=None) -> Tuple[bool, 'List']:\n    proceed_with_standard_apply = True\n    if self._value_spec:\n        if value_spec and (not value_spec.is_compatible(self._value_spec)):\n            raise ValueError(utils.message_on_path(f'List (spec={self._value_spec!r}) cannot be assigned to an incompatible field (spec={value_spec!r}).', path))\n        if self._allow_partial == allow_partial:\n            proceed_with_standard_apply = False\n        else:\n            self._allow_partial = allow_partial\n    elif isinstance(value_spec, pg_typing.List):\n        self._value_spec = value_spec\n    return (proceed_with_standard_apply, self)", "docstring": "Implement pg.typing.CustomTyping interface.\n\nArgs:\npath: KeyPath of current object.\nvalue_spec: Origin value spec of the field.\nallow_partial: Whether allow partial object to be created.\nchild_transform: Function to transform child node values in dict_obj into\ntheir final values. Transform function is called on leaf nodes first,\nthen on their containers, recursively.\n\nReturns:\nA tuple (proceed_with_standard_apply, transformed value)", "source": "github-repos"}
{"code": "def Lookup(self, name):\n    if not self._name2item:\n        self._InitCache()\n    return self._name2item[name]", "docstring": "Convenience function: Look up a given name in the class namespace.\n\nTries to find a method or constant by this name in the class.\n\nArgs:\nname: Name to look up.\n\nReturns:\nA Constant or Function instance.\n\nRaises:\nKeyError: if this identifier doesn't exist in this class.", "source": "github-repos"}
{"code": "def register_validator(flag_name, checker, message='Flag validation failed', flag_values=_flagvalues.FLAGS):\n    v = SingleFlagValidator(flag_name, checker, message)\n    _add_validator(flag_values, v)", "docstring": "Adds a constraint, which will be enforced during program execution.\n\nThe constraint is validated when flags are initially parsed, and after each\nchange of the corresponding flag's value.\nArgs:\nflag_name: str, name of the flag to be checked.\nchecker: callable, a function to validate the flag.\ninput - A single positional argument: The value of the corresponding\nflag (string, boolean, etc.  This value will be passed to checker\nby the library).\noutput - bool, True if validator constraint is satisfied.\nIf constraint is not satisfied, it should either return False or\nraise flags.ValidationError(desired_error_message).\nmessage: str, error text to be shown to the user if checker returns False.\nIf checker raises flags.ValidationError, message from the raised\nerror will be shown.\nflag_values: flags.FlagValues, optional FlagValues instance to validate\nagainst.\nRaises:\nAttributeError: Raised when flag_name is not registered as a valid flag\nname.", "source": "codesearchnet"}
{"code": "def copy_raw_block(self):\n    ctable = []\n    (r, c) = (0, 0)\n    try:\n        for row_index in range(self.start[0], self.end[0]):\n            r = row_index\n            row = []\n            ctable.append(row)\n            for column_index in range(self.start[1], self.end[1]):\n                c = column_index\n                row.append(self.table[row_index][column_index])\n    except IndexError:\n        raise InvalidBlockError(('Missing table element at [%d, %d]' % (r, c)))\n    return ctable", "docstring": "Copies the block as it was originally specified by start and end into a new table.\n\nReturns:\nA copy of the block with no block transformations.", "source": "codesearchnet"}
{"code": "def decode_row(line, fields=None):\n    cols = line.rstrip('\\n').split(_field_delimiter)\n    cols = list(map(unescape, cols))\n    if (fields is not None):\n        if (len(cols) != len(fields)):\n            raise ItsdbError('Wrong number of fields: {} != {}'.format(len(cols), len(fields)))\n        for i in range(len(cols)):\n            col = cols[i]\n            if col:\n                field = fields[i]\n                col = _cast_to_datatype(col, field)\n            cols[i] = col\n    return cols", "docstring": "Decode a raw line from a profile into a list of column values.\n\nDecoding involves splitting the line by the field delimiter\n(`\"@\"` by default) and unescaping special characters. If *fields*\nis given, cast the values into the datatype given by their\nrespective Field object.\n\nArgs:\nline: a raw line from a [incr tsdb()] profile.\nfields: a list or Relation object of Fields for the row\nReturns:\nA list of column values.", "source": "codesearchnet"}
{"code": "def quantize_flow(flow, max_val=0.02, norm=True):\n    \n    h, w, _ = flow.shape\n    dx = flow[..., 0]\n    dy = flow[..., 1]\n    if norm:\n        dx = dx / w  \n        dy = dy / h\n    \n    flow_comps = [\n        quantize(d, -max_val, max_val, 255, np.uint8) for d in [dx, dy]\n    ]\n    return tuple(flow_comps)", "docstring": "Quantize flow to [0, 255].\n\nAfter this step, the size of flow will be much smaller, and can be\ndumped as jpeg images.\n\nArgs:\nflow (ndarray): (h, w, 2) array of optical flow.\nmax_val (float): Maximum value of flow, values beyond\n[-max_val, max_val] will be truncated.\nnorm (bool): Whether to divide flow values by image width/height.\n\nReturns:\ntuple[ndarray]: Quantized dx and dy.", "source": "juraj-google-style"}
{"code": "def fpn_map_rois_to_levels(boxes):\n    sqrtarea = tf.sqrt(tf_area(boxes))\n    level = tf.cast(tf.floor((4 + (tf.log(((sqrtarea * (1.0 / 224)) + 1e-06)) * (1.0 / np.log(2))))), tf.int32)\n    level_ids = [tf.where((level <= 2)), tf.where(tf.equal(level, 3)), tf.where(tf.equal(level, 4)), tf.where((level >= 5))]\n    level_ids = [tf.reshape(x, [(- 1)], name='roi_level{}_id'.format((i + 2))) for (i, x) in enumerate(level_ids)]\n    num_in_levels = [tf.size(x, name='num_roi_level{}'.format((i + 2))) for (i, x) in enumerate(level_ids)]\n    add_moving_summary(*num_in_levels)\n    level_boxes = [tf.gather(boxes, ids) for ids in level_ids]\n    return (level_ids, level_boxes)", "docstring": "Assign boxes to level 2~5.\n\nArgs:\nboxes (nx4):\n\nReturns:\n[tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices of boxes in its level.\n[tf.Tensor]: 4 tensors, the gathered boxes in each level.\n\nBe careful that the returned tensor could be empty.", "source": "codesearchnet"}
{"code": "def nb_ll(data, P, R):\n    \n    \n    \n    genes, cells = data.shape\n    clusters = P.shape[1]\n    lls = np.zeros((cells, clusters))\n    for c in range(clusters):\n        P_c = P[:,c].reshape((genes, 1))\n        R_c = R[:,c].reshape((genes, 1))\n        \n        ll = gammaln(R_c + data) - gammaln(R_c) \n        ll += data*np.log(P_c) + xlog1py(R_c, -P_c)\n        \n        lls[:,c] = ll.sum(0)\n    return lls", "docstring": "Returns the negative binomial log-likelihood of the data.\n\nArgs:\ndata (array): genes x cells\nP (array): NB success probability param - genes x clusters\nR (array): NB stopping param - genes x clusters\n\nReturns:\ncells x clusters array of log-likelihoods", "source": "juraj-google-style"}
{"code": "def set_style(self, column, style):\n        \n\n        column_idx = None\n\n        while len(self.headers) > len(self.__style_list):\n            self.__style_list.append(None)\n\n        if isinstance(column, six.integer_types):\n            column_idx = column\n        elif isinstance(column, six.string_types):\n            try:\n                column_idx = self.headers.index(column)\n            except ValueError:\n                pass\n\n        if column_idx is not None:\n            self.__style_list[column_idx] = style\n            self.__clear_preprocess()\n            self._dp_extractor.format_flags_list = [\n                _ts_to_flag[self.__get_thousand_separator(col_idx)]\n                for col_idx in range(len(self.__style_list))\n            ]\n            return\n\n        raise ValueError(\"column must be an int or string: actual={}\".format(column))", "docstring": "Set |Style| for a specific column.\n\nArgs:\ncolumn (|int| or |str|):\nColumn specifier. column index or header name correlated with the column.\nstyle (|Style|):\nStyle value to be set to the column.\n\nRaises:\nValueError: If the column specifier is invalid.", "source": "juraj-google-style"}
{"code": "def run_without_tensor_float_32(description: str) -> Callable[[Callable[..., Any]], Callable[..., None]]:\n\n    def decorator(f: Callable[..., Any]) -> Callable[..., None]:\n\n        @functools.wraps(f)\n        def decorated(*args, **kwargs):\n            allowed = config.tensor_float_32_execution_enabled()\n            try:\n                config.enable_tensor_float_32_execution(False)\n                f(*args, **kwargs)\n            finally:\n                config.enable_tensor_float_32_execution(allowed)\n        return tf_decorator.make_decorator(f, decorated)\n    return decorator", "docstring": "Execute test with TensorFloat-32 disabled.\n\nWhile almost every real-world deep learning model runs fine with\nTensorFloat-32, many tests use assertAllClose or similar methods.\nTensorFloat-32 matmuls typically will cause such methods to fail with the\ndefault tolerances.\n\nArgs:\ndescription: A description used for documentation purposes, describing why\nthe test requires TensorFloat-32 to be disabled.\n\nReturns:\nDecorator which runs a test with TensorFloat-32 disabled.", "source": "github-repos"}
{"code": "def _find_longest_parent_path(path_set, path):\n    while (path not in path_set):\n        if (not path):\n            return None\n        path = os.path.dirname(path)\n    return path", "docstring": "Finds the longest \"parent-path\" of 'path' in 'path_set'.\n\nThis function takes and returns \"path-like\" strings which are strings\nmade of strings separated by os.sep. No file access is performed here, so\nthese strings need not correspond to actual files in some file-system..\nThis function returns the longest ancestor path\nFor example, for path_set=[\"/foo/bar\", \"/foo\", \"/bar/foo\"] and\npath=\"/foo/bar/sub_dir\", returns \"/foo/bar\".\n\nArgs:\npath_set: set of path-like strings -- e.g. a list of strings separated by\nos.sep. No actual disk-access is performed here, so these need not\ncorrespond to actual files.\npath: a path-like string.\n\nReturns:\nThe element in path_set which is the longest parent directory of 'path'.", "source": "codesearchnet"}
{"code": "def profile(self, num):\n        \n        baseuri = self._BASE_URI + \"company/{}\".format(num)\n        res = self.session.get(baseuri)\n        self.handle_http_error(res)\n        return res", "docstring": "Search for company profile by company number.\n\nArgs:\nnum (str): Company number to search on.", "source": "juraj-google-style"}
{"code": "def from_json(cls, data):\n        \n        required_keys = ('name', 'day_type', 'location', 'dry_bulb_condition',\n                         'humidity_condition', 'wind_condition', 'sky_condition')\n        for key in required_keys:\n            assert key in data, 'Required key \"{}\" is missing!'.format(key)\n\n        return cls(data['name'], data['day_type'], Location.from_json(data['location']),\n                   DryBulbCondition.from_json(data['dry_bulb_condition']),\n                   HumidityCondition.from_json(data['humidity_condition']),\n                   WindCondition.from_json(data['wind_condition']),\n                   SkyCondition.from_json(data['sky_condition']))", "docstring": "Create a Design Day from a dictionary.\n\nArgs:\ndata = {\n\"name\": string,\n\"day_type\": string,\n\"location\": ladybug Location schema,\n\"dry_bulb_condition\": ladybug DryBulbCondition schema,\n\"humidity_condition\": ladybug HumidityCondition schema,\n\"wind_condition\": ladybug WindCondition schema,\n\"sky_condition\": ladybug SkyCondition schema}", "source": "juraj-google-style"}
{"code": "def request_stop(self, ex=None):\n    with self._lock:\n        ex = self._filter_exception(ex)\n        if self._joined:\n            if isinstance(ex, tuple):\n                _, ex_instance, _ = ex\n                raise ex_instance\n            elif ex is not None:\n                _, ex_instance, _ = sys.exc_info()\n                raise ex_instance\n        if not self._stop_event.is_set():\n            if ex and self._exc_info_to_raise is None:\n                if isinstance(ex, tuple):\n                    logging.info('Error reported to Coordinator: %s', compat.as_str_any(ex[1]), exc_info=ex)\n                    self._exc_info_to_raise = ex\n                else:\n                    logging.info('Error reported to Coordinator: %s, %s', type(ex), compat.as_str_any(ex))\n                    self._exc_info_to_raise = sys.exc_info()\n                if len(self._exc_info_to_raise) != 3 or not self._exc_info_to_raise[0] or (not self._exc_info_to_raise[1]):\n                    try:\n                        raise ValueError('ex must be a tuple or sys.exc_info must return the current exception: %s' % self._exc_info_to_raise)\n                    except ValueError:\n                        self._exc_info_to_raise = sys.exc_info()\n            self._stop_event.set()", "docstring": "Request that the threads stop.\n\nAfter this is called, calls to `should_stop()` will return `True`.\n\nNote: If an exception is being passed in, in must be in the context of\nhandling the exception (i.e. `try: ... except Exception as ex: ...`) and not\na newly created one.\n\nArgs:\nex: Optional `Exception`, or Python `exc_info` tuple as returned by\n`sys.exc_info()`.  If this is the first call to `request_stop()` the\ncorresponding exception is recorded and re-raised from `join()`.", "source": "github-repos"}
{"code": "def _on_channel_close(self, channel, reply_code_or_reason, reply_text=None):\n        \n        if isinstance(reply_code_or_reason, pika_errs.ChannelClosed):\n            reply_code = reply_code_or_reason.reply_code\n            reply_text = reply_code_or_reason.reply_text\n        elif isinstance(reply_code_or_reason, int):\n            reply_code = reply_code_or_reason\n        else:\n            reply_code = 0\n            reply_text = str(reply_code_or_reason)\n\n        _log.info(\"Channel %r closed (%d): %s\", channel, reply_code, reply_text)\n        self._channel = None", "docstring": "Callback invoked when the channel is closed.\n\nArgs:\nchannel (pika.channel.Channel): The channel that got closed.\nreply_code_or_reason (int|Exception): The reason why the channel\nwas closed. In older versions of pika, this is the AMQP code.\nreply_text (str): The human-readable reason for the channel's\nclosure (only in older versions of pika).", "source": "juraj-google-style"}
{"code": "def Current():\n    return Architecture._MACHINE_TO_ARCHITECTURE.get(platform.machine().lower())", "docstring": "Determines the current system architecture.\n\nReturns:\nArchitectureTuple, One of the Architecture constants or None if it cannot\nbe determined.", "source": "github-repos"}
{"code": "def loadnetcdf(filename, copy=True):\n    filename = str(Path(filename).expanduser())\n    if copy:\n        dataarray = xr.open_dataarray(filename).copy()\n    else:\n        dataarray = xr.open_dataarray(filename, chunks={})\n    if (dataarray.name is None):\n        dataarray.name = filename.rstrip('.nc')\n    for (key, val) in dataarray.coords.items():\n        if (val.dtype.kind == 'S'):\n            dataarray[key] = val.astype('U')\n        elif (val.dtype == np.int32):\n            dataarray[key] = val.astype('i8')\n    return dataarray", "docstring": "Load a dataarray from a NetCDF file.\n\nArgs:\nfilename (str): Filename (*.nc).\ncopy (bool): If True, dataarray is copied in memory. Default is True.\n\nReturns:\ndataarray (xarray.DataArray): Loaded dataarray.", "source": "codesearchnet"}
{"code": "def _find_docstring_line_for_no_body(self, start):\n    tracked = sorted(list(self._tokenized_triple_quotes.keys()))\n    for i in tracked:\n        if (min(start, i) == start):\n            return i\n    return None", "docstring": "Find the docstring associated with a definition with no body\nin the node.\n\nIn these cases, the provided start and end line number for that\nelement are the same, so we must get the docstring based on the\nsequential position of known docstrings.\n\nArgs:\nstart: the row where the class / function starts.\n\nReturns:\nint: the row number where the docstring is found.", "source": "codesearchnet"}
{"code": "def get(self, key=None):\n        \n\n        if key:\n            key = ub_to_str(key)\n            if settings.ENABLE_CACHING:\n                return self.get_from_cache(key) or self.set_to_cache(self._get_from_riak(key))\n\n            else:\n                return self._get_from_riak(key)\n\n        else:\n            self._exec_query()\n            if not self._solr_cache['docs']:\n                raise ObjectDoesNotExist(\"%s %s\" % (self.index_name, self.compiled_query))\n\n            if self.count() > 1:\n                raise MultipleObjectsReturned(\n                    \"%s objects returned for %s\" % (self.count(),\n                                                    self._model_class.__name__))\n\n            return self._get_from_riak(self._solr_cache['docs'][0]['_yz_rk'])", "docstring": "If key is not None, tries to get obj from cache first. If not\nfound, tries to get from riak and sets to cache.\n\nIf key is None, then execute solr query and checks result. Returns\nobj data and key tuple or raises exception ObjectDoesNotExist or\nMultipleObjectsReturned.\n\nArgs:\nkey(str): obj key\nReturn:\n(tuple): obj data dict, obj key", "source": "juraj-google-style"}
{"code": "def serialize_to_normalized_compact_json(py_obj):\n    \n    return json.dumps(\n        py_obj, sort_keys=True, separators=(',', ':'), cls=ToJsonCompatibleTypes\n    )", "docstring": "Serialize a native object to normalized, compact JSON.\n\nThe JSON string is normalized by sorting any dictionary keys. It will be on a single\nline without whitespace between elements.\n\nArgs:\npy_obj: object\nAny object that can be represented in JSON. Some types, such as datetimes are\nautomatically converted to strings.\n\nReturns:\nstr: normalized, compact JSON string.", "source": "juraj-google-style"}
{"code": "def id_pools_vsn_ranges(self):\n    if (not self.__id_pools_vsn_ranges):\n        self.__id_pools_vsn_ranges = IdPoolsRanges('vsn', self.__connection)\n    return self.__id_pools_vsn_ranges", "docstring": "Gets the IdPoolsRanges API Client for VSN Ranges.\n\nReturns:\nIdPoolsRanges:", "source": "codesearchnet"}
{"code": "def percent_point(self, U):\n        \n        self.check_fit()\n\n        return scipy.optimize.brentq(self._brentq_cdf(U), -1000.0, 1000.0)", "docstring": "Given a cdf value, returns a value in original space.\n\nArgs:\nU(numpy.array): cdf values in [0,1]\n\nReturns:\nnumpy.array: value in original space", "source": "juraj-google-style"}
{"code": "def attribute(self, attribute_id, action='GET', params=None):\n        \n        if params is None:\n            params = {}\n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        if action == 'GET':\n            return self.tc_requests.get_attribute(\n                self.api_type,\n                self.api_sub_type,\n                self.unique_id,\n                attribute_id,\n                owner=self.owner,\n                params=params,\n            )\n\n        if action == 'DELETE':\n            return self.tc_requests.delete_attribute(\n                self.api_type, self.api_sub_type, self.unique_id, attribute_id, owner=self.owner\n            )\n\n        self._tcex.handle_error(925, ['action', 'attribute', 'action', 'action', action])\n        return None", "docstring": "Gets the attribute from a Group/Indicator or Victim\n\n\nArgs:\naction:\nparams:\nattribute_id:\n\nReturns: attribute json", "source": "juraj-google-style"}
{"code": "def makeDoubleLinked(dom, parent=None):\n    dom.parent = parent\n    for child in dom.childs:\n        child.parent = dom\n        makeDoubleLinked(child, dom)", "docstring": "Standard output from `dhtmlparser` is single-linked tree. This will make it\ndouble-linked.\n\nArgs:\ndom (obj): :class:`.HTMLElement` instance.\nparent (obj, default None): Don't use this, it is used in recursive\ncall.", "source": "codesearchnet"}
{"code": "def clone(self, to_namespace, to_name):\n        \n        r = fapi.clone_workspace(self.namespace, self.name,\n                                 to_namespace, to_name, self.api_url)\n        fapi._check_response_code(r, 201)\n        return Workspace(to_namespace, to_name, self.api_url)", "docstring": "Clone this workspace.\n\nArgs:\nto_namespace (str): Target workspace namespace\nto_name (str): Target workspace name", "source": "juraj-google-style"}
{"code": "def project_surface(surface, angle=DEFAULT_ANGLE):\n    z_coef = np.sin(np.radians(angle))\n    y_coef = np.cos(np.radians(angle))\n    (surface_height, surface_width) = surface.shape\n    slope = np.tile(np.linspace(0.0, 1.0, surface_height), [surface_width, 1]).T\n    return ((slope * y_coef) + (surface * z_coef))", "docstring": "Returns the height of the surface when projected at the given angle.\n\nArgs:\nsurface (surface): the surface to project\nangle (float): the angle at which to project the surface\n\nReturns:\nsurface: A projected surface.", "source": "codesearchnet"}
{"code": "def extract_paths(self, paths, ignore_nopath):\n        \n        try:\n            if self._has_tar_and_gzip():\n                self._extract_paths_tar_gz(paths, ignore_nopath)\n            else:\n                self._extract_paths_scp(paths, ignore_nopath)\n        except (ssh.LagoSSHTimeoutException, LagoVMNotRunningError):\n            raise ExtractPathError(\n                'Unable to extract paths from {0}: unreachable with SSH'.\n                format(self.vm.name())\n            )", "docstring": "Extract the given paths from the domain\nArgs:\npaths(list of str): paths to extract\nignore_nopath(boolean): if True will ignore none existing paths.\nReturns:\nNone\nRaises:\n:exc:`~lago.plugins.vm.ExtractPathNoPathError`: if a none existing\npath was found on the VM, and ``ignore_nopath`` is True.\n:exc:`~lago.plugins.vm.ExtractPathError`: on all other failures.", "source": "juraj-google-style"}
{"code": "def check_or_generate_pyi(options) -> AnalysisResult:\n    loader = load_pytd.create_loader(options)\n    compiler_error = None\n    other_error_info = ''\n    src = ''\n    try:\n        src = read_source_file(options.input, options.open_function)\n        if options.check:\n            ctx = check_py(src=src, options=options, loader=loader).context\n            ast, result = (None, None)\n        else:\n            ret, result = generate_pyi(src=src, options=options, loader=loader)\n            ctx = ret.context\n            ast = ret.ast\n    except utils.UsageError:\n        raise\n    except pyc.CompileError as e:\n        compiler_error = (options.input, e.line, e.error)\n    except constant_folding.ConstantError as e:\n        compiler_error = (options.input, e.lineno, e.message)\n    except IndentationError as e:\n        compiler_error = (options.input, e.lineno, e.msg)\n    except libcst.ParserSyntaxError as e:\n        compiler_error = (options.input, e.raw_line, e.message)\n    except SyntaxError as e:\n        compiler_error = (options.input, e.lineno, e.msg)\n    except directors.SkipFileError:\n        other_error_info = '\n    except Exception as e:\n        if options.nofail:\n            log.warning('***Caught exception: %s', str(e), exc_info=True)\n            if not options.check:\n                other_error_info = '\n        else:\n            prefix = str(e.args[0]) if e.args else ''\n            e.args = (f'{prefix}\\nFile: {options.input}',) + e.args[1:]\n            raise\n    else:\n        return AnalysisResult(ctx, ast, result)\n    ctx = context.Context(options, loader, src=src)\n    if compiler_error:\n        ctx.errorlog.python_compiler_error(*compiler_error)\n    ast = pytd_builtins.GetDefaultAst(parser.PyiOptions.from_toplevel_options(options))\n    result = pytd_builtins.DEFAULT_SRC + other_error_info\n    return AnalysisResult(ctx, ast, result)", "docstring": "Returns results from running pytype.\n\nArgs:\noptions: config.Options object.\n\nReturns:\nAn AnalysisResult.", "source": "github-repos"}
{"code": "def reports_progress(reporter):\n\n    def decorator(func):\n\n        @wraps(func)\n        def wrapper(*args, **kwargs):\n            with progress_reporter(reporter):\n                return func(*args, **kwargs)\n        return wrapper\n    return decorator", "docstring": "A decorator factory to mark functions which report progress.\n\nArgs:\nreporter: A zero-argument callable to report progress.\nThe callable provided should have the means to both\nretrieve and display current progress information.", "source": "codesearchnet"}
{"code": "def get_hash(self, handle):\n        \n        handle = os.path.expanduser(os.path.expandvars(handle))\n        with open(self._prefixed('%s.hash' % handle)) as f:\n            return f.read()", "docstring": "Returns the associated hash for the given handle, the hash file must\nexist (``handle + '.hash'``).\n\nArgs:\nhandle (str): Path to the template to get the hash from\n\nReturns:\nstr: Hash for the given handle", "source": "juraj-google-style"}
{"code": "def decode(self, spec, encoded_value):\n    raise NotImplementedError(f'{type(self).__name__}.decode')", "docstring": "Decodes `value` from a batchable tensor encoding.\n\nArgs:\nspec: The TypeSpec for the result value.  If encoded values with spec `s`\nwere batched, then `spec` should be `s.batch(batch_size)`; or if encoded\nvalues with spec `s` were unbatched, then `spec` should be\n`s.unbatch()`.\nencoded_value: A nest of values returned by `encode`; or a nest of values\nthat was formed by stacking, unstacking, or concatenating the\ncorresponding elements of values returned by `encode`.\n\nReturns:\nA value compatible with `type_spec`.", "source": "github-repos"}
{"code": "def download_patric_genomes(self, ids, force_rerun=False):\n    ids = ssbio.utils.force_list(ids)\n    counter = 0\n    log.info('Downloading sequences from PATRIC...')\n    for patric_id in tqdm(ids):\n        f = ssbio.databases.patric.download_coding_sequences(patric_id=patric_id, seqtype='protein', outdir=self.sequences_by_organism_dir, force_rerun=force_rerun)\n        if f:\n            self.load_strain(patric_id, f)\n            counter += 1\n            log.debug('{}: downloaded sequence'.format(patric_id))\n        else:\n            log.warning('{}: unable to download sequence'.format(patric_id))\n    log.info('Created {} new strain GEM-PROs, accessible at \"strains\" attribute'.format(counter))", "docstring": "Download genome files from PATRIC given a list of PATRIC genome IDs and load them as strains.\n\nArgs:\nids (str, list): PATRIC ID or list of PATRIC IDs\nforce_rerun (bool): If genome files should be downloaded again even if they exist", "source": "codesearchnet"}
{"code": "def to_string(cls, error_code):\n        \n        if error_code == cls.EMU_NO_CONNECTION:\n            return 'No connection to emulator.'\n        elif error_code == cls.EMU_COMM_ERROR:\n            return 'Emulator connection error.'\n        elif error_code == cls.DLL_NOT_OPEN:\n            return 'DLL has not been opened.  Did you call \\'.connect()\\'?'\n        elif error_code == cls.VCC_FAILURE:\n            return 'Target system has no power.'\n        elif error_code == cls.INVALID_HANDLE:\n            return 'Given file / memory handle is invalid.'\n        elif error_code == cls.NO_CPU_FOUND:\n            return 'Could not find supported CPU.'\n        elif error_code == cls.EMU_FEATURE_UNSUPPORTED:\n            return 'Emulator does not support the selected feature.'\n        elif error_code == cls.EMU_NO_MEMORY:\n            return 'Emulator out of memory.'\n        elif error_code == cls.TIF_STATUS_ERROR:\n            return 'Target interface error.'\n        elif error_code == cls.FLASH_PROG_COMPARE_FAILED:\n            return 'Programmed data differs from source data.'\n        elif error_code == cls.FLASH_PROG_PROGRAM_FAILED:\n            return 'Programming error occured.'\n        elif error_code == cls.FLASH_PROG_VERIFY_FAILED:\n            return 'Error while verifying programmed data.'\n        elif error_code == cls.OPEN_FILE_FAILED:\n            return 'Specified file could not be opened.'\n        elif error_code == cls.UNKNOWN_FILE_FORMAT:\n            return 'File format of selected file is not supported.'\n        elif error_code == cls.WRITE_TARGET_MEMORY_FAILED:\n            return 'Could not write target memory.'\n        elif error_code == cls.DEVICE_FEATURE_NOT_SUPPORTED:\n            return 'Feature not supported by connected device.'\n        elif error_code == cls.WRONG_USER_CONFIG:\n            return 'User configured DLL parameters incorrectly.'\n        elif error_code == cls.NO_TARGET_DEVICE_SELECTED:\n            return 'User did not specify core to connect to.'\n        elif error_code == cls.CPU_IN_LOW_POWER_MODE:\n            return 'Target CPU is in low power mode.'\n        elif error_code == cls.UNSPECIFIED_ERROR:\n            return 'Unspecified error.'\n        raise ValueError('Invalid error code: %d' % error_code)", "docstring": "Returns the string message for the given ``error_code``.\n\nArgs:\ncls (JlinkGlobalErrors): the ``JLinkGlobalErrors`` class\nerror_code (int): error code to convert\n\nReturns:\nAn error string corresponding to the error code.\n\nRaises:\nValueError: if the error code is invalid.", "source": "juraj-google-style"}
{"code": "def _get_upload_cmd(self, mirror=False):\n    if mirror:\n        dest_uri = self.s3_mirror_uri\n    else:\n        dest_uri = self.s3_version_uri\n    cmd = 'aws s3 sync {} {} --delete --exact-timestamps --profile {}'.format(self.artifact_path, dest_uri, self.env)\n    return cmd", "docstring": "Generate the S3 CLI upload command\n\nArgs:\nmirror (bool): If true, uses a flat directory structure instead of nesting under a version.\n\nReturns:\nstr: The full CLI command to run.", "source": "codesearchnet"}
{"code": "def info(self, server_id):\n        \n        result = self._storage[server_id].info()\n        result['id'] = server_id\n        return result", "docstring": "return dicionary object with info about server\nArgs:\nserver_id - server identity", "source": "juraj-google-style"}
{"code": "def unreferenced_vert(script):\n    \n    if script.ml_version == '1.3.4BETA':\n        filter_xml = '  <filter name=\"Remove Unreferenced Vertex\"/>\\n'\n    else:\n        filter_xml = '  <filter name=\"Remove Unreferenced Vertices\"/>\\n'\n    util.write_filter(script, filter_xml)\n    return None", "docstring": "Check for every vertex on the mesh: if it is NOT referenced by a face,\nremoves it.\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter to.\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "juraj-google-style"}
{"code": "def avg_branch_length(self, terminal=True, internal=True):\n    if (not isinstance(terminal, bool)):\n        raise TypeError('terminal must be a bool')\n    if (not isinstance(internal, bool)):\n        raise TypeError('internal must be a bool')\n    if ((not internal) and (not terminal)):\n        raise RuntimeError('Must select either internal or terminal branches (or both)')\n    tot = 0.0\n    num = 0\n    for node in self.traverse_preorder():\n        if (((node.edge_length is not None) and (internal and (not node.is_leaf()))) or (terminal and node.is_leaf())):\n            tot += node.edge_length\n            num += 1\n    return (tot / num)", "docstring": "Compute the average length of the selected branches of this ``Tree``. Edges with length ``None`` will be treated as 0-length\n\nArgs:\n``terminal`` (``bool``): ``True`` to include terminal branches, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal branches, otherwise ``False``\n\nReturns:\nThe average length of the selected branches", "source": "codesearchnet"}
{"code": "def _ReadElementSequenceDataTypeDefinition(self, definitions_registry, definition_values, data_type_definition_class, definition_name, supported_definition_values):\n    unsupported_definition_values = set(definition_values.keys()).difference(supported_definition_values)\n    if unsupported_definition_values:\n        error_message = 'unsupported definition values: {0:s}'.format(', '.join(unsupported_definition_values))\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    element_data_type = definition_values.get('element_data_type', None)\n    if (not element_data_type):\n        error_message = 'missing element data type'\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    elements_data_size = definition_values.get('elements_data_size', None)\n    elements_terminator = definition_values.get('elements_terminator', None)\n    number_of_elements = definition_values.get('number_of_elements', None)\n    size_values = (elements_data_size, elements_terminator, number_of_elements)\n    size_values = [value for value in size_values if (value is not None)]\n    if (not size_values):\n        error_message = 'missing element data size, elements terminator and number of elements'\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    if (len(size_values) > 1):\n        error_message = 'element data size, elements terminator and number of elements not allowed to be set at the same time'\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    element_data_type_definition = definitions_registry.GetDefinitionByName(element_data_type)\n    if (not element_data_type_definition):\n        error_message = 'undefined element data type: {0:s}.'.format(element_data_type)\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    element_byte_size = element_data_type_definition.GetByteSize()\n    element_type_indicator = element_data_type_definition.TYPE_INDICATOR\n    if ((not element_byte_size) and (element_type_indicator != definitions.TYPE_INDICATOR_STRING)):\n        error_message = 'unsupported variable size element data type: {0:s}'.format(element_data_type)\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    aliases = definition_values.get('aliases', None)\n    description = definition_values.get('description', None)\n    urls = definition_values.get('urls', None)\n    definition_object = data_type_definition_class(definition_name, element_data_type_definition, aliases=aliases, data_type=element_data_type, description=description, urls=urls)\n    if (elements_data_size is not None):\n        try:\n            definition_object.elements_data_size = int(elements_data_size)\n        except ValueError:\n            definition_object.elements_data_size_expression = elements_data_size\n    elif (elements_terminator is not None):\n        if isinstance(elements_terminator, py2to3.UNICODE_TYPE):\n            elements_terminator = elements_terminator.encode('ascii')\n        definition_object.elements_terminator = elements_terminator\n    elif (number_of_elements is not None):\n        try:\n            definition_object.number_of_elements = int(number_of_elements)\n        except ValueError:\n            definition_object.number_of_elements_expression = number_of_elements\n    return definition_object", "docstring": "Reads an element sequence data type definition.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\ndefinition_values (dict[str, object]): definition values.\ndata_type_definition_class (str): data type definition class.\ndefinition_name (str): name of the definition.\nsupported_definition_values (set[str]): names of the supported definition\nvalues.\n\nReturns:\nSequenceDefinition: sequence data type definition.\n\nRaises:\nDefinitionReaderError: if the definitions values are missing or if\nthe format is incorrect.", "source": "codesearchnet"}
{"code": "def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(ApplicationSpecificInformation, self).read(istream, kmip_version=kmip_version)\n    tstream = BytearrayStream(istream.read(self.length))\n    self.application_namespace.read(tstream, kmip_version=kmip_version)\n    self.application_data.read(tstream, kmip_version=kmip_version)\n    self.is_oversized(tstream)\n    self.validate()", "docstring": "Read the data encoding the ApplicationSpecificInformation object and\ndecode it into its constituent parts.\n\nArgs:\nistream (Stream): A data stream containing encoded object data,\nsupporting a read method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def effect_emd(d1, d2):\n    return sum((abs((marginal_zero(d1, i) - marginal_zero(d2, i))) for i in range(d1.ndim)))", "docstring": "Compute the EMD between two effect repertoires.\n\nBecause the nodes are independent, the EMD between effect repertoires is\nequal to the sum of the EMDs between the marginal distributions of each\nnode, and the EMD between marginal distribution for a node is the absolute\ndifference in the probabilities that the node is OFF.\n\nArgs:\nd1 (np.ndarray): The first repertoire.\nd2 (np.ndarray): The second repertoire.\n\nReturns:\nfloat: The EMD between ``d1`` and ``d2``.", "source": "codesearchnet"}
{"code": "def _datetime_from_json(value, field):\n    \n    if _not_null(value, field):\n        if \".\" in value:\n            \n            return datetime.datetime.strptime(value, _RFC3339_MICROS_NO_ZULU)\n        else:\n            \n            return datetime.datetime.strptime(value, _RFC3339_NO_FRACTION)\n    else:\n        return None", "docstring": "Coerce 'value' to a datetime, if set or not nullable.\n\nArgs:\nvalue (str): The timestamp.\nfield (.SchemaField): The field corresponding to the value.\n\nReturns:\nOptional[datetime.datetime]: The parsed datetime object from\n``value`` if the ``field`` is not null (otherwise it is\n:data:`None`).", "source": "juraj-google-style"}
{"code": "def _create_and_save_vocab_table_lookup_qat_model_tf1(self, output_path: str, tags: Collection[str], signature_def_key: str) -> Tuple[Mapping[str, core.Tensor], Mapping[str, core.Tensor]]:\n    with session.Session(graph=ops.Graph()) as sess:\n        input_vocabs_placeholder, lookup_tensor, output_tensor = self._create_vocab_table_lookup_qat_model_tf1(sess)\n        inputs = {'input_vocabs': input_vocabs_placeholder}\n        outputs = {'lookup': lookup_tensor, 'output': output_tensor}\n        self._save_tf1_model(sess, output_path, signature_def_key, tags, inputs=inputs, outputs=outputs, init_op=lookup_ops.tables_initializer(), assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS))\n    return (inputs, outputs)", "docstring": "Creates and saves a simple QAT model that uses a vocab table.\n\nArgs:\noutput_path: Path to the directory to save the created model.\ntags: Set of strings that identifies the saved meta graph.\nsignature_def_key: Name of the SignatureDef. Used to identify the\nSignatureDef within the meta graph.\n\nReturns:\ninputs: A mapping of input_key -> input_tensor (placeholder). The input\nkey is \"input_vocabs\".\noutputs: A mapping of output_key -> output_tensor. The output keys are\n\"lookup\" and \"output\".", "source": "github-repos"}
{"code": "def StreamMedia(self, callback=None, finish_callback=None,\n                    additional_headers=None):\n        \n        return self.__StreamMedia(\n            callback=callback, finish_callback=finish_callback,\n            additional_headers=additional_headers, use_chunks=False)", "docstring": "Send this resumable upload in a single request.\n\nArgs:\ncallback: Progress callback function with inputs\n(http_wrapper.Response, transfer.Upload)\nfinish_callback: Final callback function with inputs\n(http_wrapper.Response, transfer.Upload)\nadditional_headers: Dict of headers to include with the upload\nhttp_wrapper.Request.\n\nReturns:\nhttp_wrapper.Response of final response.", "source": "juraj-google-style"}
{"code": "class PerceiverClassificationPostprocessor(nn.Module):\n\n    def __init__(self, config: PerceiverConfig, in_channels: int) -> None:\n        super().__init__()\n        self.classifier = nn.Linear(in_channels, config.num_labels)\n\n    def forward(self, inputs, pos: Optional[torch.Tensor]=None, modality_sizes=None) -> torch.Tensor:\n        logits = self.classifier(inputs)\n        return logits[:, 0, :]", "docstring": "Classification postprocessing for Perceiver. Can be used to convert the decoder output to classification logits.\n\nArgs:\nconfig ([*PerceiverConfig*]):\nModel configuration.\nin_channels (`int`):\nNumber of channels in the input.", "source": "github-repos"}
{"code": "def register_magics(store_name='_ampl_cells', ampl_object=None):\n    from IPython.core.magic import Magics, magics_class, cell_magic, line_magic\n\n    @magics_class\n    class StoreAMPL(Magics):\n\n        def __init__(self, shell=None, **kwargs):\n            Magics.__init__(self, shell=shell, **kwargs)\n            self._store = []\n            shell.user_ns[store_name] = self._store\n\n        @cell_magic\n        def ampl(self, line, cell):\n            'Store the cell in the store'\n            self._store.append(cell)\n\n        @cell_magic\n        def ampl_eval(self, line, cell):\n            'Evaluate the cell'\n            ampl_object.eval(cell)\n\n        @line_magic\n        def get_ampl(self, line):\n            'Retrieve the store'\n            return self._store\n    get_ipython().register_magics(StoreAMPL)", "docstring": "Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``.\n\nArgs:\nstore_name: Name of the store where ``%%ampl cells`` will be stored.\nampl_object: Object used to evaluate ``%%ampl_eval`` cells.", "source": "codesearchnet"}
{"code": "def _get_loss_object(self, loss):\n    if loss is None:\n        return None\n    loss = losses_mod.get(loss)\n    if not isinstance(loss, losses_mod.Loss):\n        loss_name = get_custom_object_name(loss)\n        if loss_name is None:\n            raise ValueError('Loss should be a callable, found: {}'.format(loss))\n        loss = losses_mod.LossFunctionWrapper(loss, name=loss_name)\n    loss._allow_sum_over_batch_size = True\n    return loss", "docstring": "Returns a `Loss` object.\n\nConverts the user-supplied loss to a `Loss` object. Also allows\n`SUM_OVER_BATCH_SIZE` reduction to be used for this loss.\n\nArgs:\nloss: A string, function, or `Loss` object.\n\nReturns:\nA `Loss` object.", "source": "github-repos"}
{"code": "def interact_GxG(pheno, snps1, snps2=None, K=None, covs=None):\n    if (K is None):\n        K = SP.eye(N)\n    N = snps1.shape[0]\n    if (snps2 is None):\n        snps2 = snps1\n    return interact_GxE(snps=snps1, pheno=pheno, env=snps2, covs=covs, K=K)", "docstring": "Epistasis test between two sets of SNPs\n\nArgs:\npheno:  [N x 1] SP.array of 1 phenotype for N individuals\nsnps1:  [N x S1] SP.array of S1 SNPs for N individuals\nsnps2:  [N x S2] SP.array of S2 SNPs for N individuals\nK:      [N x N] SP.array of LMM-covariance/kinship koefficients (optional)\nIf not provided, then linear regression analysis is performed\ncovs:   [N x D] SP.array of D covariates for N individuals\n\nReturns:\npv:     [S2 x S1] SP.array of P values for epistasis tests beten all SNPs in\nsnps1 and snps2", "source": "codesearchnet"}
{"code": "def dtime(sdat, tstart=None, tend=None):\n    tseries = sdat.tseries_between(tstart, tend)\n    time = tseries['t'].values\n    return ((time[1:] - time[:(- 1)]), time[:(- 1)])", "docstring": "Time increment dt.\n\nCompute dt as a function of time.\n\nArgs:\nsdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.\ntstart (float): time at which the computation should start. Use the\nbeginning of the time series data if set to None.\ntend (float): time at which the computation should end. Use the\nend of the time series data if set to None.\nReturns:\ntuple of :class:`numpy.array`: dt and time arrays.", "source": "codesearchnet"}
{"code": "def order_verification(self, institute, case, user, link, variant):\n    LOG.info('Creating event for ordering validation for variant {0}'.format(variant['display_name']))\n    updated_variant = self.variant_collection.find_one_and_update({'_id': variant['_id']}, {'$set': {'sanger_ordered': True}}, return_document=pymongo.ReturnDocument.AFTER)\n    self.create_event(institute=institute, case=case, user=user, link=link, category='variant', verb='sanger', variant=variant, subject=variant['display_name'])\n    LOG.info('Creating event for ordering sanger for case {0}'.format(case['display_name']))\n    self.create_event(institute=institute, case=case, user=user, link=link, category='case', verb='sanger', variant=variant, subject=variant['display_name'])\n    return updated_variant", "docstring": "Create an event for a variant verification for a variant\nand an event for a variant verification for a case\n\nArguments:\ninstitute (dict): A Institute object\ncase (dict): Case object\nuser (dict): A User object\nlink (str): The url to be used in the event\nvariant (dict): A variant object\n\nReturns:\nupdated_variant(dict)", "source": "codesearchnet"}
{"code": "def merge(self, ref_name: str):\n    if self.is_dirty():\n        LOGGER.error('repository is dirty; cannot merge: %s', ref_name)\n        sys.exit((- 1))\n    LOGGER.info('merging ref: \"%s\" into branch: %s', ref_name, self.get_current_branch())\n    self.repo.git.merge(ref_name)", "docstring": "Merges two refs\n\nArgs:\nref_name: ref to merge in the current one", "source": "codesearchnet"}
{"code": "def push_file(self, source, dest_dir):\n        \n\n        local_dest = dest_dir + '/' + os.path.basename(source)\n\n        \n        if os.path.dirname(source) != dest_dir:\n            try:\n                shutil.copyfile(source, local_dest)\n                os.chmod(local_dest, 0o777)\n\n            except OSError as e:\n                raise FileCopyException(e, self.hostname)\n\n        return local_dest", "docstring": "If the source files dirpath is the same as dest_dir, a copy\nis not necessary, and nothing is done. Else a copy is made.\n\nArgs:\n- source (string) : Path to the source file\n- dest_dir (string) : Path to the directory to which the files is to be copied\n\nReturns:\n- destination_path (String) : Absolute path of the destination file\n\nRaises:\n- FileCopyException : If file copy failed.", "source": "juraj-google-style"}
{"code": "def get_concept(self, conceptId, lang='en'):\n    url = urljoin((self.concept_service + '/'), conceptId)\n    (res, status_code) = self.get(url, params={'lang': lang})\n    if (status_code != 200):\n        logger.debug('Fetch concept failed.')\n    return (self.decode(res), status_code)", "docstring": "Fetch the concept from the Knowledge base\n\nArgs:\nid (str): The concept id to be fetched, it can be Wikipedia\npage id or Wikiedata id.\n\nReturns:\ndict, int: A dict containing the concept information; an integer\nrepresenting the response code.", "source": "codesearchnet"}
{"code": "def get(self, key):\n    self._create_file_if_none_exists()\n    with open(self.filename, 'rb') as file_object:\n        cache_pickle = pickle.load(file_object)\n        val = cache_pickle.get(key, None)\n        return val", "docstring": "Gets a value by a key.\n\nArgs:\nkey (str): Key to retrieve the value.\n\nReturns: Retrieved value.", "source": "codesearchnet"}
{"code": "def draw_points(self, *points):\n        \n        point_array = ffi.new('SDL_Point[]', len(points))\n        for i, p in enumerate(points):\n            point_array[i] = p._ptr[0]\n        check_int_err(lib.SDL_RenderDrawPoints(self._ptr, point_array, len(points)))", "docstring": "Draw multiple points on the current rendering target.\n\nArgs:\n*points (Point): The points to draw.\n\nRaises:\nSDLError: If an error is encountered.", "source": "juraj-google-style"}
{"code": "def _hash_sequence(self, sighash_type, anyone_can_pay):\n        \n        if anyone_can_pay or sighash_type == shared.SIGHASH_SINGLE:\n            \n            \n            return b'\\x00' * 32\n        else:\n            \n            sequences = ByteData()\n            for tx_in in self.tx_ins:\n                sequences += tx_in.sequence\n            return utils.hash256(sequences.to_bytes())", "docstring": "BIP143 hashSequence implementation\n\nArgs:\nsighash_type    (int): SIGHASH_SINGLE or SIGHASH_ALL\nanyone_can_pay (bool): true if ANYONECANPAY should be set\nReturns:\n(bytes): the hashSequence, a 32 byte hash", "source": "juraj-google-style"}
{"code": "def _load_tmp_fact(filepath):\n    from hamster_lib import Fact\n    try:\n        with open(filepath, 'rb') as fobj:\n            fact = pickle.load(fobj)\n    except IOError:\n        fact = False\n    else:\n        if (not isinstance(fact, Fact)):\n            raise TypeError(_(\"Something went wrong. It seems our pickled file does not contain valid Fact instance. [Content: '{content}'; Type: {type}\".format(content=fact, type=type(fact))))\n    return fact", "docstring": "Load an 'ongoing fact' from a given location.\n\nArgs:\nfilepath: Full path to the tmpfile location.\n\nReturns:\nhamster_lib.Fact: ``Fact`` representing the 'ongoing fact'. Returns ``False``\nif no file was found.\n\nRaises:\nTypeError: If for some reason our stored instance is no instance of\n``hamster_lib.Fact``.", "source": "codesearchnet"}
{"code": "def validate(request: Union[Dict, List], schema: dict) -> Union[Dict, List]:\n    \n    jsonschema_validate(request, schema)\n    return request", "docstring": "Wraps jsonschema.validate, returning the same object passed in.\n\nArgs:\nrequest: The deserialized-from-json request.\nschema: The jsonschema schema to validate against.\n\nRaises:\njsonschema.ValidationError", "source": "juraj-google-style"}
{"code": "def as_dataframe(self, max_rows=None):\n    max_rows = (len(self._timeseries_list) if (max_rows is None) else max_rows)\n    headers = [{'resource': ts.resource._asdict(), 'metric': ts.metric._asdict()} for ts in self._timeseries_list[:max_rows]]\n    if (not headers):\n        return pandas.DataFrame()\n    dataframe = pandas.io.json.json_normalize(headers)\n    dataframe.columns = pandas.MultiIndex.from_tuples([((col, '') if (col == 'resource.type') else col.rsplit('.', 1)) for col in dataframe.columns])\n    resource_keys = google.cloud.monitoring._dataframe._sorted_resource_labels(dataframe['resource.labels'].columns)\n    sorted_columns = [('resource.type', '')]\n    sorted_columns += [('resource.labels', key) for key in resource_keys]\n    sorted_columns += sorted((col for col in dataframe.columns if (col[0] == 'metric.labels')))\n    dataframe = dataframe[sorted_columns]\n    dataframe = dataframe.sort_values(sorted_columns)\n    dataframe = dataframe.reset_index(drop=True).fillna('')\n    return dataframe", "docstring": "Creates a pandas dataframe from the query metadata.\n\nArgs:\nmax_rows: The maximum number of timeseries metadata to return. If None,\nreturn all.\n\nReturns:\nA pandas dataframe containing the resource type, resource labels and\nmetric labels. Each row in this dataframe corresponds to the metadata\nfrom one time series.", "source": "codesearchnet"}
{"code": "def load_user_config(vcs):\n    \n    config_path = os.path.join(vcs.path, 'eci.yaml')\n    if not os.path.exists(config_path):\n        raise ConfigNotFoundError\n    with open(config_path, 'r') as f:\n        try:\n            config = yaml.safe_load(f)\n        except yaml.YAMLError:\n            raise ConfigFormatError\n    if not isinstance(config, dict):\n        raise ConfigFormatError\n    for k, v in _default_config.iteritems():\n        config.setdefault(k, v)\n    for k, v in _config_types.iteritems():\n        if not isinstance(config[k], v):\n            raise ConfigFormatError\n    return config", "docstring": "Load the user config\n\nArgs:\nvcs (easyci.vcs.base.Vcs) - the vcs object for the current project\n\nReturns:\ndict - the config\n\nRaises:\nConfigFormatError\nConfigNotFoundError", "source": "juraj-google-style"}
{"code": "def test_encode_with_non_root_fhir_path_constraint_succeeds(self, fhir_path_expression: str, expected_sql_expression: str, expected_fhir_path_sql_expression: str, expected_fields_referenced: List[str]):\n    self.maxDiff = None\n    constraint = self.build_constraint(fhir_path_expression=fhir_path_expression)\n    self.assert_constraint_is_equal_to_expression(base_id='Hospital', element_definition_id='Hospital.patients', constraint=constraint, expected_sql_expression=expected_sql_expression, expected_fhir_path_sql_expression=expected_fhir_path_sql_expression, expected_fields_referenced=expected_fields_referenced)", "docstring": "Tests that a \"transitive constraint\" is properly encoded.\n\nA \"transitive constraint\" is a constraint defined relative to a resource\nelsewhere in the FHIR resource graph than what we're querying against.\n\nArgs:\nfhir_path_expression: The FHIRPath expression to encode.\nexpected_sql_expression: The expected generated Standard SQL.\nexpected_fhir_path_sql_expression: The expected generated Standard SQL\nwithout any contextual subqueries.\nexpected_fields_referenced: The expected fields_referenced_by_expression\nattribute on the resulting constraint.", "source": "github-repos"}
{"code": "def variable_summaries(vars_, groups=None, scope='weights'):\n    groups = (groups or {'all': '.*'})\n    grouped = collections.defaultdict(list)\n    for var in vars_:\n        for (name, pattern) in groups.items():\n            if re.match(pattern, var.name):\n                name = re.sub(pattern, name, var.name)\n                grouped[name].append(var)\n    for name in groups:\n        if (name not in grouped):\n            tf.logging.warn(\"No variables matching '{}' group.\".format(name))\n    summaries = []\n    for (name, vars_) in grouped.items():\n        vars_ = [tf.reshape(var, [(- 1)]) for var in vars_]\n        vars_ = tf.concat(vars_, 0)\n        summaries.append(tf.summary.histogram(((scope + '/') + name), vars_))\n    return tf.summary.merge(summaries)", "docstring": "Create histogram summaries for the provided variables.\n\nSummaries can be grouped via regexes matching variables names.\n\nArgs:\nvars_: List of variables to summarize.\ngroups: Mapping of name to regex for grouping summaries.\nscope: Name scope for this operation.\n\nReturns:\nSummary tensor.", "source": "codesearchnet"}
{"code": "def from_filenames(filenames, transformations=None, primitive=True,\n                       extend_collection=False):\n        \n\n        allcifs = []\n        for fname in filenames:\n            with open(fname, \"r\") as f:\n                allcifs.append(f.read())\n        return CifTransmuter(\"\\n\".join(allcifs), transformations,\n                             primitive=primitive,\n                             extend_collection=extend_collection)", "docstring": "Generates a TransformedStructureCollection from a cif, possibly\ncontaining multiple structures.\n\nArgs:\nfilenames: List of strings of the cif files\ntransformations: New transformations to be applied to all\nstructures\nprimitive: Same meaning as in __init__.\nextend_collection: Same meaning as in __init__.", "source": "juraj-google-style"}
{"code": "def extract(self, destination, format='csv', csv_delimiter=None, csv_header=True, compress=False):\n    job = self.extract_async(destination, format=format, csv_delimiter=csv_delimiter, csv_header=csv_header, compress=compress)\n    if (job is not None):\n        job.wait()\n    return job", "docstring": "Exports the table to GCS; blocks until complete.\n\nArgs:\ndestination: the destination URI(s). Can be a single URI or a list.\nformat: the format to use for the exported data; one of 'csv', 'json', or 'avro'\n(default 'csv').\ncsv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','\ncsv_header: for CSV exports, whether to include an initial header line. Default true.\ncompress: whether to compress the data on export. Compression is not supported for\nAVRO format. Defaults to False.\nReturns:\nA Job object for the completed export Job if it was started successfully; else None.", "source": "codesearchnet"}
{"code": "def add_chain_ids(self, chains):\n    chains = ssbio.utils.force_list(chains)\n    for c in chains:\n        if self.chains.has_id(c):\n            log.debug('{}: chain already present'.format(c))\n        else:\n            chain_prop = ChainProp(ident=c, pdb_parent=self.id)\n            self.chains.append(chain_prop)\n            log.debug('{}: added to chains list'.format(c))", "docstring": "Add chains by ID into the chains attribute\n\nArgs:\nchains (str, list): Chain ID or list of IDs", "source": "codesearchnet"}
{"code": "async def _handle_set_typing_notification(self, set_typing_notification):\n        \n        conv_id = set_typing_notification.conversation_id.id\n        res = parsers.parse_typing_status_message(set_typing_notification)\n        await self.on_typing.fire(res)\n        try:\n            conv = await self._get_or_fetch_conversation(conv_id)\n        except exceptions.NetworkError:\n            logger.warning(\n                'Failed to fetch conversation for typing notification: %s',\n                conv_id\n            )\n        else:\n            await conv.on_typing.fire(res)", "docstring": "Receive SetTypingNotification and update the conversation.\n\nArgs:\nset_typing_notification: hangouts_pb2.SetTypingNotification\ninstance", "source": "juraj-google-style"}
{"code": "def _parse_example_raw(serialized, names, params, name):\n    if params.num_features == 0:\n        raise ValueError('Must provide at least one feature key.')\n    with ops.name_scope(name, 'ParseExample', [serialized, names]):\n        names = [] if names is None else names\n        serialized = ops.convert_to_tensor(serialized, name='serialized')\n        if params.ragged_keys and serialized.shape.ndims is None:\n            raise ValueError('serialized must have statically-known rank to parse ragged features.')\n        outputs = gen_parsing_ops.parse_example_v2(serialized=serialized, names=names, sparse_keys=params.sparse_keys, dense_keys=params.dense_keys, ragged_keys=params.ragged_keys, dense_defaults=params.dense_defaults_vec, num_sparse=len(params.sparse_keys), sparse_types=params.sparse_types, ragged_value_types=params.ragged_value_types, ragged_split_types=params.ragged_split_types, dense_shapes=params.dense_shapes_as_proto, name=name)\n        sparse_indices, sparse_values, sparse_shapes, dense_values, ragged_values, ragged_row_splits = outputs\n        ragged_tensors = parsing_config._build_ragged_tensors(serialized.shape, ragged_values, ragged_row_splits)\n        sparse_tensors = [sparse_tensor.SparseTensor(ix, val, shape) for ix, val, shape in zip(sparse_indices, sparse_values, sparse_shapes)]\n        return dict(zip(params.sparse_keys + params.dense_keys + params.ragged_keys, sparse_tensors + dense_values + ragged_tensors))", "docstring": "Parses `Example` protos.\n\nArgs:\nserialized: A vector (1-D Tensor) of strings, a batch of binary\nserialized `Example` protos.\nnames: A vector (1-D Tensor) of strings (optional), the names of\nthe serialized protos.\nparams: A `ParseOpParams` containing the parameters for the parse op.\nname: A name for this operation (optional).\n\nReturns:\nA `dict` mapping keys to `Tensor`s and `SparseTensor`s and `RaggedTensor`s.", "source": "github-repos"}
{"code": "def add_comment(self, comment):\n        \n        if not comment:\n            return\n\n        self.__comments[comment.name] = comment\n        self.comment_added_signal(self, comment)", "docstring": "Add a comment to the database.\n\nArgs:\ncomment (hotdoc.core.Comment): comment to add", "source": "juraj-google-style"}
{"code": "def _pack_images(images, rows, cols):\n  \n  shape = onp.shape(images)\n  width, height, depth = shape[-3:]\n  images = onp.reshape(images, (-1, width, height, depth))\n  batch = onp.shape(images)[0]\n  rows = onp.minimum(rows, batch)\n  cols = onp.minimum(batch \n  images = images[:rows * cols]\n  images = onp.reshape(images, (rows, cols, width, height, depth))\n  images = onp.transpose(images, [0, 2, 1, 3, 4])\n  images = onp.reshape(images, [rows * width, cols * height, depth])\n  return images", "docstring": "Helper utility to make a tiled field of images from numpy arrays.\n\nArgs:\nimages: Image tensor in shape [N, W, H, C].\nrows: Number of images per row in tiled image.\ncols: Number of images per column in tiled image.\n\nReturns:\nA tiled image of shape [W * rows, H * cols, C].\nTruncates incomplete rows.", "source": "juraj-google-style"}
{"code": "def export_to_dir(network, export_dir):\n    \n\n    package_path = ding0.__path__[0]\n\n    network.export_to_csv_folder(os.path.join(package_path,\n                                              'output',\n                                              'debug',\n                                              'grid',\n                                              export_dir))", "docstring": "Exports PyPSA network as CSV files to directory\n\nArgs:\nnetwork: pypsa.Network\nexport_dir: str\nSub-directory in output/debug/grid/ where csv Files of PyPSA network are exported to.", "source": "juraj-google-style"}
{"code": "def hashed(field_name, percent, fields=None, count=0):\n    \n    if field_name is None:\n      raise Exception('Hash field must be specified')\n\n    def _hashed_sampling(sql):\n      projection = Sampling._create_projection(fields)\n      sql = 'SELECT %s FROM (%s) WHERE MOD(ABS(FARM_FINGERPRINT(CAST(%s AS STRING))), 100) < %d' % \\\n            (projection, sql, field_name, percent)\n      if count != 0:\n        sql = '%s LIMIT %d' % (sql, count)\n      return sql\n    return _hashed_sampling", "docstring": "Provides a sampling strategy based on hashing and selecting a percentage of data.\n\nArgs:\nfield_name: the name of the field to hash.\npercent: the percentage of the resulting hashes to select.\nfields: an optional list of field names to retrieve.\ncount: optional maximum count of rows to pick.\nReturns:\nA sampling function that can be applied to get a hash-based sampling.", "source": "juraj-google-style"}
{"code": "def create_detector(self, detector):\n        \n        resp = self._post(self._u(self._DETECTOR_ENDPOINT_SUFFIX),\n                          data=detector)\n        resp.raise_for_status()\n        return resp.json()", "docstring": "Creates a new detector.\n\nArgs:\ndetector (object): the detector model object. Will be serialized as\nJSON.\nReturns:\ndictionary of the response (created detector model).", "source": "juraj-google-style"}
{"code": "def dummy_inputs(self):\n    input_ids = tf.constant(DUMMY_INPUTS, dtype=tf.int32)\n    batch_size, seq_len = input_ids.shape\n    VISION_DUMMY_INPUTS = tf.random.uniform(shape=(batch_size, self.config.vision_config.num_channels, self.config.vision_config.image_size, self.config.vision_config.image_size), dtype=tf.float32)\n    pixel_values = tf.constant(VISION_DUMMY_INPUTS)\n    dummy = {'pixel_values': pixel_values, 'input_ids': input_ids}\n    return dummy", "docstring": "Dummy inputs to build the network.\n\nReturns:\n`Dict[str, tf.Tensor]`: The dummy inputs.", "source": "github-repos"}
{"code": "def __init__(self, shape, layout_rules):\n    \n    self._shape = convert_to_shape(shape)\n    self._layout_rules = convert_to_layout_rules(layout_rules)", "docstring": "Creates a mesh implementation.\n\nArgs:\nshape: Shape.\nlayout_rules: LayoutRules.", "source": "juraj-google-style"}
{"code": "def MapByteStream(self, byte_stream, byte_offset=0, **kwargs):\n    \n    byte_stream = super(StringMap, self).MapByteStream(\n        byte_stream, byte_offset=byte_offset, **kwargs)\n\n    if self._HasElementsTerminator():\n      \n      \n      elements_terminator = self._data_type_definition.elements_terminator\n      elements_terminator_size = len(elements_terminator)\n\n      byte_offset = 0\n      byte_stream_size = len(byte_stream)\n\n      while byte_offset < byte_stream_size:\n        end_offset = byte_offset + elements_terminator_size\n        if byte_stream[byte_offset:end_offset] == elements_terminator:\n          break\n\n        byte_offset += elements_terminator_size\n\n      byte_stream = byte_stream[:byte_offset]\n\n    try:\n      return byte_stream.decode(self._data_type_definition.encoding)\n\n    except Exception as exception:\n      error_string = (\n          'Unable to read: {0:s} from byte stream at offset: {1:d} '\n          'with error: {2!s}').format(\n              self._data_type_definition.name, byte_offset, exception)\n      raise errors.MappingError(error_string)", "docstring": "Maps the data type on a byte stream.\n\nArgs:\nbyte_stream (bytes): byte stream.\nbyte_offset (Optional[int]): offset into the byte stream where to start.\n\nReturns:\nstr: mapped values.\n\nRaises:\nMappingError: if the data type definition cannot be mapped on\nthe byte stream.", "source": "juraj-google-style"}
{"code": "def _get_profile_data_generator(self):\n    node_to_traceback = defaultdict(list)\n    node_to_op_type = defaultdict(str)\n    for op in self._graph.get_operations():\n        node_to_traceback[op.name] = op.traceback\n        node_to_op_type[op.name] = op.type\n\n    def profile_data_generator(device_step_stats):\n        for node_stats in device_step_stats.node_stats:\n            if node_stats.node_name == '_SOURCE' or node_stats.node_name == '_SINK':\n                continue\n            yield ProfileDatum(node_stats, node_to_op_type[node_stats.node_name], node_to_traceback[node_stats.node_name])\n    return profile_data_generator", "docstring": "Get function that generates `ProfileDatum` objects.\n\nReturns:\nA function that generates `ProfileDatum` objects.", "source": "github-repos"}
{"code": "def ones(shape, dtype=None):\n    return backend.numpy.ones(shape, dtype=dtype)", "docstring": "Return a new tensor of given shape and type, filled with ones.\n\nArgs:\nshape: Shape of the new tensor.\ndtype: Desired data type of the tensor.\n\nReturns:\nTensor of ones with the given shape and dtype.", "source": "github-repos"}
{"code": "def add_get_parameters(url, parameters, percent_encode=True):\n    \n    url_parts = list(parse.urlparse(url))\n    query = dict(parse.parse_qs(url_parts[4]))\n    query.update(parameters)\n\n    if percent_encode:\n        url_parts[4] = parse.urlencode(query)\n    else:\n        url_parts[4] = \"&\".join([key + \"=\" + value for key, value in query.items()])\n\n    return parse.urlunparse(url_parts)", "docstring": "Utility function to add GET parameters to an existing URL.\n\nArgs:\nparameters\nA dictionary of the parameters that should be added.\npercent_encode\nWhether the query parameters should be percent encoded.\n\nReturns:\nThe updated URL.", "source": "juraj-google-style"}
{"code": "def _FindKeys(self, key, names, matches):\n    for (name, subkey) in iter(key.items()):\n        if (name in names):\n            matches.append((name, subkey))\n        if isinstance(subkey, dict):\n            self._FindKeys(subkey, names, matches)", "docstring": "Searches the plist key hierarchy for keys with matching names.\n\nIf a match is found a tuple of the key name and value is added to\nthe matches list.\n\nArgs:\nkey (dict[str, object]): plist key.\nnames (list[str]): names of the keys to match.\nmatches (list[str]): keys with matching names.", "source": "codesearchnet"}
{"code": "def __init__(self, pattern, flags=0):\n    \n\n    self.regex = re.compile(pattern, flags=flags)", "docstring": "Initialize.\n\nArgs:\n# pattern is the regular expression to search for\npattern: str\n# flags passed to re.compile function as the second argument\nflags: int", "source": "juraj-google-style"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_buffer = utils.BytearrayStream()\n    if self._unique_identifier:\n        self._unique_identifier.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The GetAttributeList response payload is missing the unique identifier field.')\n    if self._attribute_names:\n        if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n            for attribute_name in self._attribute_names:\n                attribute_name.write(local_buffer, kmip_version=kmip_version)\n        else:\n            for attribute_name in self._attribute_names:\n                t = enums.convert_attribute_name_to_tag(attribute_name.value)\n                e = primitives.Enumeration(enums.Tags, value=t, tag=enums.Tags.ATTRIBUTE_REFERENCE)\n                e.write(local_buffer, kmip_version=kmip_version)\n    else:\n        raise exceptions.InvalidField('The GetAttributeList response payload is missing the attribute names field.')\n    self.length = local_buffer.length()\n    super(GetAttributeListResponsePayload, self).write(output_buffer, kmip_version=kmip_version)\n    output_buffer.write(local_buffer.buffer)", "docstring": "Write the data encoding the GetAttributeList response payload to a\nstream.\n\nArgs:\noutput_buffer (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nInvalidField: Raised if the unique identifier or attribute name\nare not defined.", "source": "codesearchnet"}
{"code": "def _construct_graph(self, vertex_dict, edge_dict, default_vertex_attrs, default_edge_attrs):\n    with self._lock:\n        self._graph = pydot.Dot()\n        if default_vertex_attrs:\n            self._graph.set_node_defaults(**default_vertex_attrs)\n        if default_edge_attrs:\n            self._graph.set_edge_defaults(**default_edge_attrs)\n        self._vertex_refs = {}\n        self._edge_refs = {}\n        for vertex, vertex_attrs in vertex_dict.items():\n            vertex_ref = pydot.Node(vertex, **vertex_attrs)\n            self._vertex_refs[vertex] = vertex_ref\n            self._graph.add_node(vertex_ref)\n        for edge, edge_attrs in edge_dict.items():\n            vertex_src = self._vertex_refs[edge[0]]\n            vertex_dst = self._vertex_refs[edge[1]]\n            edge_ref = pydot.Edge(vertex_src, vertex_dst, **edge_attrs)\n            self._edge_refs[edge] = edge_ref\n            self._graph.add_edge(edge_ref)", "docstring": "Constructs the pydot.Dot object for the pipeline graph.\n\nArgs:\nvertex_dict: (Dict[str, Dict[str, str]]) maps vertex names to attributes\nedge_dict: (Dict[(str, str), Dict[str, str]]) maps vertex name pairs to\nattributes\ndefault_vertex_attrs: (Dict[str, str]) a dict of attributes\ndefault_edge_attrs: (Dict[str, str]) a dict of attributes", "source": "github-repos"}
{"code": "def vectorize(self, token_list):\n        \n        sentence_list = [token_list]\n        test_observed_arr = self.__setup_dataset(sentence_list, self.__token_master_list)\n        pred_arr = self.__controller.inference(test_observed_arr)\n        return self.__controller.get_feature_points()", "docstring": "Tokenize token list.\n\nArgs:\ntoken_list:   The list of tokens..\n\nReturns:\n[vector of token, vector of token, vector of token, ...]", "source": "juraj-google-style"}
{"code": "def tournament_selection(population, fitnesses, num_competitors=2, diversity_weight=0.0):\n    if (diversity_weight <= 0.0):\n        fitness_pop = zip(fitnesses, population)\n        return [max(random.sample(fitness_pop, num_competitors))[1] for _ in range(len(population))]\n    else:\n        indices = range(len(population))\n        selected_solutions = []\n        for _ in range(len(population)):\n            competitor_indices = random.sample(indices, num_competitors)\n            if (random.uniform(0.0, 1.0) < (1.0 / (1.0 + diversity_weight))):\n                selected_solutions.append(max(zip([fitnesses[i] for i in competitor_indices], [population[i] for i in competitor_indices]))[(- 1)])\n            else:\n                selected_solutions.append(max(zip([_diversity_metric(population[i], selected_solutions) for i in competitor_indices], [fitnesses[i] for i in competitor_indices], [population[i] for i in competitor_indices]))[(- 1)])\n        return selected_solutions", "docstring": "Create a list of parents with tournament selection.\n\nArgs:\npopulation: A list of solutions.\nfitnesses: A list of fitness values corresponding to solutions in population.\nnum_competitors: Number of solutions to compare every round.\nBest solution among competitors is selected.\ndiversity_weight: Weight of diversity metric.\nDetermines how frequently diversity is used to select tournament winners.\nNote that fitness is given a weight of 1.0.\ndiversity_weight == 1.0 gives equal weight to diversity and fitness.", "source": "codesearchnet"}
{"code": "def __init__(self, sess, watch_fn=None, thread_name_filter=None, pass_through_operrors=False):\n    BaseDebugWrapperSession.__init__(self, sess, thread_name_filter=thread_name_filter, pass_through_operrors=pass_through_operrors)\n    self._watch_fn = None\n    if watch_fn is not None:\n        if not callable(watch_fn):\n            raise TypeError('watch_fn is not callable')\n        self._watch_fn = watch_fn", "docstring": "Constructor of NonInteractiveDebugWrapperSession.\n\nArgs:\nsess: The TensorFlow `Session` object being wrapped.\nwatch_fn: (`Callable`) A Callable that maps the fetches and feeds of a\ndebugged `Session.run()` call to `WatchOptions.`\n* Args:\n* `fetches`: the fetches to the `Session.run()` call.\n* `feeds`: the feeds to the `Session.run()` call.\n\n* Returns:\n(`tf_debug.WatchOptions`) An object containing debug options including\nthe debug ops to use, the node names, op types and/or tensor data\ntypes to watch, etc. See the documentation of `tf_debug.WatchOptions`\nfor more details.\nthread_name_filter: Regular-expression white list for threads on which the\nwrapper session will be active. See doc of `BaseDebugWrapperSession` for\nmore details.\npass_through_operrors: If true, all captured OpErrors will be\npropagated.  By default this captures all OpErrors.\nRaises:\nTypeError: If a non-None `watch_fn` is specified and it is not callable.", "source": "github-repos"}
{"code": "def write_to_file(self, file_path):\n    with gfile.Open(file_path, 'w') as f:\n        for line in self._lines:\n            f.write(line + '\\n')", "docstring": "Write the object itself to file, in a plain format.\n\nThe font_attr_segs and annotations are ignored.\n\nArgs:\nfile_path: (str) path of the file to write to.", "source": "github-repos"}
{"code": "def call(command, collect_missing=False, silent=True):\n  r\n  return (_execCommand if silent else execCommand)(shlex.split(command), collect_missing)", "docstring": "r\"\"\"Calls a task, as if it were called from the command line.\n\nArgs:\ncommand (str): A route followed by params (as if it were entered in the shell).\ncollect_missing (bool): Collects any missing argument for the command through the shell. Defaults to False.\n\nReturns:\nThe return value of the called command.", "source": "juraj-google-style"}
{"code": "def ParsePageVisitedRow(\n      self, parser_mediator, query, row, cache=None, database=None,\n      **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    from_visit = self._GetRowValue(query_hash, row, 'from_visit')\n    hidden = self._GetRowValue(query_hash, row, 'hidden')\n    rev_host = self._GetRowValue(query_hash, row, 'rev_host')\n    typed = self._GetRowValue(query_hash, row, 'typed')\n\n    \n    extras = []\n    if from_visit:\n      extras.append('visited from: {0:s}'.format(\n          self._GetUrl(from_visit, cache, database)))\n\n    if hidden == '1':\n      extras.append('(url hidden)')\n\n    if typed == '1':\n      extras.append('(directly typed)')\n    else:\n      extras.append('(URL not typed directly)')\n\n    event_data = FirefoxPlacesPageVisitedEventData()\n    event_data.host = self._ReverseHostname(rev_host)\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.title = self._GetRowValue(query_hash, row, 'title')\n    event_data.url = self._GetRowValue(query_hash, row, 'url')\n    event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count')\n    event_data.visit_type = self._GetRowValue(query_hash, row, 'visit_type')\n\n    if extras:\n      event_data.extra = extras\n\n    timestamp = self._GetRowValue(query_hash, row, 'visit_date')\n    if timestamp:\n      date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n          timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a page visited row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.\ncache (Optional[SQLiteCache]): cache.\ndatabase (Optional[SQLiteDatabase]): database.", "source": "juraj-google-style"}
{"code": "def load_parameters(self, path):\n        \n        nn.load_parameters(path)\n        for v in self.get_modules():\n            if not isinstance(v, tuple):\n                continue\n            prefix, module = v\n            for k, v in module.__dict__.items():\n                if not isinstance(v, nn.Variable):\n                    continue\n                pname = k\n                name = \"{}/{}\".format(prefix, pname)\n                \n                param0 = v\n                param1 = nn.parameter.pop_parameter(name)\n                if param0 is None:\n                    raise ValueError(\n                        \"Model does not have {} parameter.\".format(name))\n                param0.d = param1.d.copy()\n                nn.logger.info(\"`{}` loaded.)\".format(name))", "docstring": "Load parameters from a file with the specified format.\n\nArgs:\npath : path or file object", "source": "juraj-google-style"}
{"code": "def _get_hash(self, file_obj):\n    size = 0\n    hash_buider = self.hash_builder()\n    for piece in self._get_file_iterator(file_obj):\n        hash_buider.update(piece)\n        size += len(piece)\n    file_obj.seek(0)\n    return ('%s_%x' % (hash_buider.hexdigest(), size))", "docstring": "Compute hash for the `file_obj`.\n\nAttr:\nfile_obj (obj): File-like object with ``.write()`` and ``.seek()``.\n\nReturns:\nstr: Hexdigest of the hash.", "source": "codesearchnet"}
{"code": "class PerceiverClassificationDecoder(PerceiverAbstractDecoder):\n\n    def __init__(self, config, **decoder_kwargs):\n        super().__init__()\n        self.num_labels = config.num_labels\n        self.decoder = PerceiverBasicDecoder(config, output_num_channels=self.num_labels, output_index_dims=1, **decoder_kwargs)\n\n    @property\n    def num_query_channels(self) -> int:\n        return self.decoder.num_query_channels\n\n    def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):\n        return self.decoder.decoder_query(inputs, modality_sizes, inputs_without_pos, subsampled_points=subsampled_points)\n\n    def forward(self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> PerceiverDecoderOutput:\n        decoder_outputs = self.decoder(query, z, output_attentions=output_attentions)\n        logits = decoder_outputs.logits[:, 0, :]\n        return PerceiverDecoderOutput(logits=logits, cross_attentions=decoder_outputs.cross_attentions)", "docstring": "Cross-attention based classification decoder. Light-weight wrapper of [`PerceiverBasicDecoder`] for logit output.\nWill turn the output of the Perceiver encoder which is of shape (batch_size, num_latents, d_latents) to a tensor of\nshape (batch_size, num_labels). The queries are of shape (batch_size, 1, num_labels).\n\nArgs:\nconfig ([`PerceiverConfig`]):\nModel configuration.", "source": "github-repos"}
{"code": "def mark_done(task_id):\n    task = Task.get_by_id(task_id)\n    if (task is None):\n        raise ValueError(('Task with id %d does not exist' % task_id))\n    task.done = True\n    task.put()", "docstring": "Marks a task as done.\n\nArgs:\ntask_id: The integer id of the task to update.\n\nRaises:\nValueError: if the requested task doesn't exist.", "source": "codesearchnet"}
{"code": "def scan_directory(self, dirname, exclude_exts=(), exclude_fnames=()):\n    for (i, ext) in enumerate(exclude_exts):\n        if (not ext.strip().startswith('.')):\n            exclude_exts[i] = ('.' + ext.strip())\n    paths = []\n    for fname in os.listdir(dirname):\n        (root, ext) = os.path.splitext(fname)\n        path = os.path.join(dirname, fname)\n        if ((ext in exclude_exts) or (fname in exclude_fnames) or fname.startswith('.') or (not os.path.isfile(path))):\n            continue\n        paths.append(path)\n    pseudos = []\n    for path in paths:\n        try:\n            pseudo = self.parse(path)\n        except:\n            pseudo = None\n        if (pseudo is not None):\n            pseudos.append(pseudo)\n            self._parsed_paths.extend(path)\n        else:\n            self._wrong_paths.extend(path)\n    return pseudos", "docstring": "Analyze the files contained in directory dirname.\n\nArgs:\ndirname: directory path\nexclude_exts: list of file extensions that should be skipped.\nexclude_fnames: list of file names that should be skipped.\n\nReturns:\nList of pseudopotential objects.", "source": "codesearchnet"}
{"code": "def indicators(self, indicator_type=None, filters=None, params=None):\n        \n        indicator = self._tcex.ti.indicator(indicator_type)\n        for i in self.tc_requests.indicators_from_tag(\n            indicator, self.name, filters=filters, params=params\n        ):\n            yield i", "docstring": "Gets all indicators from a tag.\n\nArgs:\nparams:\nfilters:\nindicator_type:", "source": "juraj-google-style"}
{"code": "def use_test_undeclared_outputs_dir(self):\n    return self.is_flag_on(FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR)", "docstring": "Decides the output directory of the report and trace files.\n\nArgs:\nNone.\n\nReturns:\nTrue if the output files should be written to the\ntest-undeclared-outputs-directory defined via an\nenv variable.", "source": "github-repos"}
{"code": "def VFSMultiOpen(pathspecs, progress_callback=None):\n    precondition.AssertIterableType(pathspecs, rdf_paths.PathSpec)\n    vfs_open = functools.partial(VFSOpen, progress_callback=progress_callback)\n    return context.MultiContext(map(vfs_open, pathspecs))", "docstring": "Opens multiple files specified by given path-specs.\n\nSee documentation for `VFSOpen` for more information.\n\nArgs:\npathspecs: A list of pathspec instances of files to open.\nprogress_callback: A callback function to call to notify about progress\n\nReturns:\nA context manager yielding file-like objects.", "source": "codesearchnet"}
{"code": "def jax_gather(params, indices, batch_dims=2):\n\n    def _jax_gather(params, indices):\n        return params[indices]\n    for _ in range(batch_dims):\n        _jax_gather = jax.vmap(_jax_gather, in_axes=(0, 0))\n    return _jax_gather(params, indices)", "docstring": "Gather the indices from params correctly (equivalent to tf.gather but with modifications)\n\nArgs:\nparams: (bsz, n_heads, num_blocks, block_size, head_dim)\nindices: (<num_blocks, 1)", "source": "github-repos"}
{"code": "def save(self, path):\n    self.clip.write_videofile(path, audio_fps=self.clip.audio.fps)", "docstring": "Save source video to file.\n\nArgs:\npath (str): Filename to save to.\n\nNotes: Saves entire source video to file, not just currently selected\nframes.", "source": "codesearchnet"}
{"code": "def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(BigInteger, self).read(istream, kmip_version=kmip_version)\n    if (self.length % 8):\n        raise exceptions.InvalidPrimitiveLength('invalid big integer length read; expected: multiple of 8, observed: {0}'.format(self.length))\n    sign = 1\n    binary = ''\n    for _ in range(self.length):\n        byte = struct.unpack('!B', istream.read(1))[0]\n        bits = '{0:b}'.format(byte)\n        pad = (len(bits) % 8)\n        if pad:\n            bits = (('0' * (8 - pad)) + bits)\n        binary += bits\n    if (binary[0] == '1'):\n        sign = (- 1)\n        binary = binary.replace('1', 'i')\n        binary = binary.replace('0', '1')\n        binary = binary.replace('i', '0')\n        pivot = binary.rfind('0')\n        binary = ((binary[0:pivot] + '1') + ('0' * len(binary[(pivot + 1):])))\n    self.value = (int(binary, 2) * sign)", "docstring": "Read the encoding of the BigInteger from the input stream.\n\nArgs:\nistream (stream): A buffer containing the encoded bytes of the\nvalue of a BigInteger. Usually a BytearrayStream object.\nRequired.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nInvalidPrimitiveLength: if the big integer encoding read in has\nan invalid encoded length.", "source": "codesearchnet"}
{"code": "def parse_args(test: ArgList=None) -> argparse.Namespace:\n    parser = argparse.ArgumentParser(description=__doc__)\n    parser.add_argument('encoded_train_data', help='File path for the encoded training data.')\n    parser.add_argument('-o', '--output', help=f'Output file path for the learned weights. (default: {DEFAULT_OUTPUT_NAME})', type=str, default=DEFAULT_OUTPUT_NAME)\n    parser.add_argument('--log', help=f'Output file path for the training log. (default: {DEFAULT_LOG_NAME})', type=str, default=DEFAULT_LOG_NAME)\n    parser.add_argument('--feature-thres', help=f'Threshold value of the minimum feature frequency. (default: {DEFAULT_FEATURE_THRES})', type=int, default=DEFAULT_FEATURE_THRES)\n    parser.add_argument('--iter', help=f'Number of iterations for training. (default: {DEFAULT_ITERATION})', type=int, default=DEFAULT_ITERATION)\n    parser.add_argument('--out-span', help=f'Iteration span to output metrics and weights. (default: {DEFAULT_OUT_SPAN})', type=int, default=DEFAULT_OUT_SPAN)\n    parser.add_argument('--val-data', help='File path for the encoded validation data.', type=str)\n    if test is None:\n        return parser.parse_args()\n    else:\n        return parser.parse_args(test)", "docstring": "Parses commandline arguments.\n\nArgs:\ntest (typing.Optional[typing.List[str]], optional): Commandline args for\ntesting. Defaults to None.\n\nReturns:\nargparse.Namespace: Parsed data of args.", "source": "github-repos"}
{"code": "def _VerifyValues(self, input_sizes=None, filter_sizes=None, strides=None, dilations=None, padding=None, data_format_src='NHWC', data_format_dst='NHWC', expected=None, op_name='Conv2D'):\n    total_size_1 = np.prod(input_sizes)\n    total_size_2 = np.prod(filter_sizes)\n    x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes)\n    x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(filter_sizes)\n    strides = [1] + strides + [1]\n    if dilations is None:\n        dilations = [1, 1]\n    dilations = [1] + dilations + [1]\n    expected = test_utils.ConvertBetweenDataFormats(expected, data_format_src, data_format_dst)\n    x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src, data_format_dst)\n    input_sizes = test_utils.PermuteDimsBetweenDataFormats(input_sizes, data_format_src, data_format_dst)\n    strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src, data_format_dst)\n    dilations = test_utils.PermuteDimsBetweenDataFormats(dilations, data_format_src, data_format_dst)\n    with self.session() as sess:\n        t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes)\n        t2 = array_ops.placeholder(dtypes.float32, shape=filter_sizes)\n        with self.test_scope():\n            if op_name == 'Conv':\n                conv_format = 'CHANNELS_LAST' if data_format_dst == 'NHWC' else 'CHANNELS_FIRST'\n                out = gen_nn_ops.conv(t1, t2, strides=strides, padding=padding, data_format=conv_format, dilations=dilations)\n            elif op_name == 'Conv2D':\n                out = nn_ops.conv2d(t1, t2, strides=strides, padding=padding, data_format=data_format_dst, dilations=dilations)\n            else:\n                raise ValueError('Invalid op name: %s' % op_name)\n        value = sess.run(out, {t1: x1, t2: x2})\n        self.assertAllClose(expected, value, 0.001)", "docstring": "Tests that tf.nn.conv2d produces the expected value.\n\nArgs:\ninput_sizes: Input tensor dimensions in [batch, input_rows, input_cols,\ninput_depth].\nfilter_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,\ninput_depth, output_depth].\nstrides: Strides.\ndilations: RHS dilations.\npadding: Padding type.\ndata_format_src: Data format input is in.\ndata_format_dst: Data format verification will run and input is converted\nto.\nexpected: Expected output.\nop_name: Name of operation to test (Conv/Conv2D)", "source": "github-repos"}
{"code": "def related_domains(self, domains):\n        \n        api_name = 'opendns-related_domains'\n        fmt_url_path = u'links/name/{0}.json'\n        return self._multi_get(api_name, fmt_url_path, domains)", "docstring": "Get list of domain names that have been seen requested around the\nsame time (up to 60 seconds before or after) to the given domain name.\n\nArgs:\ndomains: an enumerable of strings domain names\nReturns:\nAn enumerable of [domain name, scores]", "source": "juraj-google-style"}
{"code": "def _parse_line(self, instrumentation_block, line):\n    if instrumentation_block.state == _InstrumentationBlockStates.METHOD:\n        return self._parse_method_block_line(instrumentation_block, line)\n    elif instrumentation_block.state == _InstrumentationBlockStates.RESULT:\n        return self._parse_result_block_line(instrumentation_block, line)\n    else:\n        return self._parse_unknown_block_line(instrumentation_block, line)", "docstring": "Parses an arbitrary line from the instrumentation output based upon\nthe current parser state.\n\nArgs:\ninstrumentation_block: _InstrumentationBlock, an instrumentation\nblock with any of the possible parser states.\nline: string, the raw instrumentation output line to parse\nappropriately.\n\nReturns:\nThe next instrumenation block to continue parsing with.", "source": "github-repos"}
{"code": "def add_get(self, path, controller, template, raw=False):\n        \n        if raw:\n            fn = controller\n        else:\n            fn = self._prepare_controller(controller, template)\n        self.app.router.add_get(path, fn)", "docstring": "Setup a route of type GET\n\nArgs:\npath (str): URL to listen to\ncontroller (coroutine): the coroutine to handle the request\ntemplate (str): the template to render the response or None if it is a JSON response\nraw (bool): indicates if post-processing (jinja, json, etc) is needed or not", "source": "juraj-google-style"}
{"code": "def empty_like(x, init=None):\n    x = ops.convert_to_tensor(x)\n    return gen_array_ops.empty(array_ops.shape(x), x.dtype, init=init)", "docstring": "Returns a non-initialized tensor with the same shape and dtype as x.\n\nArgs:\nx: A Tensor.\ninit: Initialize the returned tensor with the default value of\nx.dtype(), if True. Otherwise, do not initialize. Defaults to\nNone.\n\nReturns:\nA tensor y, whose dtype and shape are the same as those of x.\ny is guaranteed not to be an alias of x. Upon return, y may contain\narbitrary data.", "source": "github-repos"}
{"code": "def valid(self, value, level=[]):\n\t\t\n\n\t\t\n\t\tself.validation_failures = []\n\n\t\t\n\t\tif value is None and self._optional:\n\t\t\treturn True\n\n\t\t\n\t\tif not isinstance(value, dict):\n\t\t\tself.validation_failures.append(('.'.join(level), str(value)))\n\t\t\treturn False\n\n\t\t\n\t\tbRet = True\n\n\t\t\n\t\tfor k in self._nodes:\n\n\t\t\t\n\t\t\tlLevel = level[:]\n\t\t\tlLevel.append(k)\n\n\t\t\t\n\t\t\tif k not in value:\n\n\t\t\t\t\n\t\t\t\tif not self._nodes[k]._optional:\n\t\t\t\t\tself.validation_failures.append(('.'.join(lLevel), 'missing'))\n\t\t\t\t\tbRet = False\n\n\t\t\t\t\n\t\t\t\tcontinue\n\n\t\t\t\n\t\t\tif not self._nodes[k].valid(value[k], lLevel):\n\t\t\t\tself.validation_failures.extend(self._nodes[k].validation_failures)\n\t\t\t\tbRet = False\n\t\t\t\tcontinue\n\n\t\t\t\n\t\t\tif k in self._requires:\n\n\t\t\t\t\n\t\t\t\tfor f in self._requires[k]:\n\n\t\t\t\t\t\n\t\t\t\t\tif f not in value or value[f] in ('0000-00-00','',None):\n\t\t\t\t\t\tself.validation_failures.append(('.'.join(lLevel), 'requires \\'%s\\' to also be set' % str(f)))\n\t\t\t\t\t\tbRet = False\n\n\t\t\n\t\treturn bRet", "docstring": "Valid\n\nChecks if a value is valid based on the instance's values\n\nArguments:\nvalue {mixed} -- The value to validate\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def chmod_r(root: str, permission: int) -> None:\n    os.chmod(root, permission)\n    for (dirpath, dirnames, filenames) in os.walk(root):\n        for d in dirnames:\n            os.chmod(os.path.join(dirpath, d), permission)\n        for f in filenames:\n            os.chmod(os.path.join(dirpath, f), permission)", "docstring": "Recursive ``chmod``.\n\nArgs:\nroot: directory to walk down\npermission: e.g. ``e.g. stat.S_IWUSR``", "source": "codesearchnet"}
{"code": "def pre_run_cell(self, cellno, code):\n        \n        \n        \n        self.cellid = cellno\n        \n        \n        \n        import ast\n        if findloop(ast.parse(code)):\n            \n            \n            from acorn.logging.decoration import set_streamlining\n            set_streamlining(True)\n\n            \n            from time import time\n            self.pre = {\n                \"m\": \"loop\",\n                \"a\": None,\n                \"s\": time(),\n                \"r\": None,\n                \"c\": code,\n            }", "docstring": "Executes before the user-entered code in `ipython` is run. This\nintercepts loops and other problematic code that would produce lots of\ndatabase entries and streamlines it to produce only a single entry.\n\nArgs:\ncellno (int): the cell number that is about to be executed.\ncode (str): python source code that is about to be executed.", "source": "juraj-google-style"}
{"code": "def get_ini(self, incl_unset=False):\n        \n\n        configp = configparser.ConfigParser(allow_no_value=True)\n        configp.read_dict(self._config)\n        with StringIO() as config_ini:\n            if self._parser:\n                self._parser.set_defaults(\n                    **self.get_section(self.root_section)\n                )\n                argparse_ini = argparse_to_ini(\n                    parser=self._parser, incl_unset=incl_unset\n                )\n                return argparse_ini\n            else:\n                configp.write(config_ini)\n                return config_ini.getvalue()", "docstring": "Return the config dictionary in INI format\nArgs:\nincl_unset (bool): include variables with no defaults.\n\nReturns:\nstr: string of the config file in INI format", "source": "juraj-google-style"}
{"code": "def _display(port=None, height=None, print_message=False, display_handle=None):\n    if (height is None):\n        height = 800\n    if (port is None):\n        infos = manager.get_all()\n        if (not infos):\n            raise ValueError(\"Can't display TensorBoard: no known instances running.\")\n        else:\n            info = max(manager.get_all(), key=(lambda x: x.start_time))\n            port = info.port\n    else:\n        infos = [i for i in manager.get_all() if (i.port == port)]\n        info = (max(infos, key=(lambda x: x.start_time)) if infos else None)\n    if print_message:\n        if (info is not None):\n            message = 'Selecting TensorBoard with {data_source} (started {delta} ago; port {port}, pid {pid}).'.format(data_source=manager.data_source_from_info(info), delta=_time_delta_from_info(info), port=info.port, pid=info.pid)\n            print(message)\n        else:\n            pass\n    fn = {_CONTEXT_COLAB: _display_colab, _CONTEXT_IPYTHON: _display_ipython, _CONTEXT_NONE: _display_cli}[_get_context()]\n    return fn(port=port, height=height, display_handle=display_handle)", "docstring": "Internal version of `display`.\n\nArgs:\nport: As with `display`.\nheight: As with `display`.\nprint_message: True to print which TensorBoard instance was selected\nfor display (if applicable), or False otherwise.\ndisplay_handle: If not None, an IPython display handle into which to\nrender TensorBoard.", "source": "codesearchnet"}
{"code": "def set_flowcontrol(self, name, direction, value=None, default=False, disable=False):\n    if (value is not None):\n        if (value not in ['on', 'off']):\n            raise ValueError('invalid flowcontrol value')\n    if (direction not in ['send', 'receive']):\n        raise ValueError('invalid direction specified')\n    commands = [('interface %s' % name)]\n    commands.append(self.command_builder(('flowcontrol %s' % direction), value=value, default=default, disable=disable))\n    return self.configure(commands)", "docstring": "Configures the interface flowcontrol value\n\nArgs:\nname (string): The interface identifier.  It must be a full\ninterface name (ie Ethernet, not Et)\n\ndirection (string): one of either 'send' or 'receive'\n\nvalue (boolean): True if the interface should enable flow control\npacket handling, otherwise False\n\ndefault (boolean): Specifies to default the interface flow control\nsend or receive value\n\ndisable (boolean): Specifies to disable the interface flow control\nsend or receive value\n\nReturns:\nTrue if the operation succeeds otherwise False is returned", "source": "codesearchnet"}
{"code": "def process(self, batch, device=None):\n        \n        padded = self.pad(batch)\n        tensor = self.numericalize(padded, device=device)\n        return tensor", "docstring": "Process a list of examples to create a torch.Tensor.\n\nPad, numericalize, and postprocess a batch and create a tensor.\n\nArgs:\nbatch (list(object)): A list of object from a batch of examples.\nReturns:\ntorch.autograd.Variable: Processed object given the input\nand custom postprocessing Pipeline.", "source": "juraj-google-style"}
{"code": "def coupling_efficiency(mode_solver, fibre_mfd, fibre_offset_x=0, fibre_offset_y=0, n_eff_fibre=1.441):\n    etas = []\n    gaus = _make_gaussian(mode_solver._structure.xc, mode_solver._structure.yc, fibre_mfd, fibre_offset_x, fibre_offset_y)\n    for (mode, n_eff) in zip(mode_solver.modes, mode_solver.n_effs):\n        o = abs(_overlap(mode, gaus))\n        t = abs(transmission(n_eff, n_eff_fibre))\n        eta = (o * t)\n        etas.append(eta)\n    return etas", "docstring": "Finds the coupling efficiency between a solved\nfundamental mode and a fibre of given MFD.\n\nArgs:\nmode_solver (_ModeSolver): Mode solver that\nhas found a fundamental mode.\nfibre_mfd (float): The mode-field diameter\n(MFD) of the fibre.\nfibre_offset_x (float): Offset the fibre\nfrom the centre position of the window\nin x. Default is 0 (no offset).\nfibre_offset_y (float): Offset the fibre\nfrom the centre position of the window\nin y. Default is 0 (no offset).\nn_eff_fibre (float): The effective index\nof the fibre mode.  Default is 1.441.\n\nReturns:\nfloat: The power coupling efficiency.", "source": "codesearchnet"}
{"code": "def needle_statistics(infile):\n    \n\n    alignments = list(AlignIO.parse(infile, \"emboss\"))\n    alignment_properties = defaultdict(dict)\n\n    with open(infile) as f:\n        line = f.readline()\n\n        for i in range(len(alignments)):\n            while line.rstrip() != \"\n                line = f.readline()\n                if not line:\n                    raise StopIteration\n\n            while line[0] == \"\n                \n                \n                parts = line[1:].split(\":\", 1)\n                key = parts[0].lower().strip()\n                if key == '1':\n                    a_id = parts[1].strip()\n                if key == '2':\n                    b_id = parts[1].strip()\n                if key == 'identity':\n                    ident_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()\n                    ident_num = int(ident_parse[0].split('/')[0])\n                    ident_percent = float(ident_parse[1])\n                    alignment_properties[a_id + '_' + b_id]['identity'] = ident_num\n                    alignment_properties[a_id + '_' + b_id]['percent_identity'] = ident_percent\n                if key == 'similarity':\n                    sim_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()\n                    sim_num = int(sim_parse[0].split('/')[0])\n                    sim_percent = float(sim_parse[1])\n                    alignment_properties[a_id + '_' + b_id]['similarity'] = sim_num\n                    alignment_properties[a_id + '_' + b_id]['percent_similarity'] = sim_percent\n                if key == 'gaps':\n                    gap_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()\n                    gap_num = int(gap_parse[0].split('/')[0])\n                    gap_percent = float(gap_parse[1])\n                    alignment_properties[a_id + '_' + b_id]['gaps'] = gap_num\n                    alignment_properties[a_id + '_' + b_id]['percent_gaps'] = gap_percent\n                if key == 'score':\n                    score = float(parts[1].strip())\n                    alignment_properties[a_id + '_' + b_id]['score'] = score\n\n                \n                line = f.readline()\n\n    return alignment_properties", "docstring": "Reads in a needle alignment file and spits out statistics of the alignment.\n\nArgs:\ninfile (str): Alignment file name\n\nReturns:\ndict: alignment_properties - a dictionary telling you the number of gaps, identity, etc.", "source": "juraj-google-style"}
{"code": "def assign_nested_vars(variables, tensors, indices=None):\n    if isinstance(variables, (tuple, list)):\n        return tf.group(*[assign_nested_vars(variable, tensor) for (variable, tensor) in zip(variables, tensors)])\n    if (indices is None):\n        return variables.assign(tensors)\n    else:\n        return tf.scatter_update(variables, indices, tensors)", "docstring": "Assign tensors to matching nested tuple of variables.\n\nArgs:\nvariables: Nested tuple or list of variables to update.\ntensors: Nested tuple or list of tensors to assign.\nindices: Batch indices to assign to; default to all.\n\nReturns:\nOperation.", "source": "codesearchnet"}
{"code": "def abs_path_from_base(base_path, rel_path):\n    \n    \n    return os.path.abspath(\n        os.path.join(\n            os.path.dirname(sys._getframe(1).f_code.co_filename), base_path, rel_path\n        )\n    )", "docstring": "Join a base and a relative path and return an absolute path to the resulting\nlocation.\n\nArgs:\nbase_path: str\nRelative or absolute path to prepend to ``rel_path``.\n\nrel_path: str\nPath relative to the location of the module file from which this function is called.\n\nReturns:\nstr : Absolute path to the location specified by ``rel_path``.", "source": "juraj-google-style"}
{"code": "async def search_participant(self, name, force_update=False):\n        \n        if force_update or self.participants is None:\n            await self.get_participants()\n        if self.participants is not None:\n            for p in self.participants:\n                if p.name == name:\n                    return p\n        return None", "docstring": "search a participant by (display) name\n\n|methcoro|\n\nArgs:\nname: display name of the participant\nforce_update (dfault=False): True to force an update to the Challonge API\n\nReturns:\nParticipant: None if not found\n\nRaises:\nAPIException", "source": "juraj-google-style"}
{"code": "def get_credentials(self):\n    with self.AUTHENTICATION_LOCK:\n        log.info('Starting authentication for %s', self.target)\n        store = oauth2client.file.Storage(self.credentials_path)\n        credentials = store.get()\n        if ((not credentials) or credentials.invalid):\n            log.info('No valid login. Starting OAUTH flow.')\n            flow = oauth2client.client.flow_from_clientsecrets(self.client_secret_path, self.SCOPES)\n            flow.user_agent = self.APPLICATION_NAME\n            flags = oauth2client.tools.argparser.parse_args([])\n            credentials = oauth2client.tools.run_flow(flow, store, flags)\n            log.info('Storing credentials to %r', self.credentials_path)\n        return credentials", "docstring": "Gets valid user credentials from storage.\n\nIf nothing has been stored, or if the stored credentials are invalid,\nthe OAuth2 flow is completed to obtain the new credentials.\n\nReturns:\nCredentials, the obtained credential.", "source": "codesearchnet"}
{"code": "def _get_example_from_basic_type(type):\n    if (type == 'integer'):\n        return [42, 24]\n    elif (type == 'number'):\n        return [5.5, 5.5]\n    elif (type == 'string'):\n        return ['string', 'string2']\n    elif (type == 'datetime'):\n        return ['2015-08-28T09:02:57.481Z', '2015-08-28T09:02:57.481Z']\n    elif (type == 'boolean'):\n        return [False, True]\n    elif (type == 'null'):\n        return ['null', 'null']", "docstring": "Get example from the given type.\n\nArgs:\ntype: the type you want an example of.\n\nReturns:\nAn array with two example values of the given type.", "source": "codesearchnet"}
{"code": "def drop_if(df, fun):\n    \n\n    def _filter_f(col):\n        try:\n            return fun(df[col])\n        except:\n            return False\n\n    cols = list(filter(_filter_f, df.columns))\n    return df.drop(cols, axis=1)", "docstring": "Drops columns where fun(ction) is true\nArgs:\nfun: a function that will be applied to columns", "source": "juraj-google-style"}
{"code": "def as_functor(func: Callable, ignore_extra_args: bool=False) -> Functor:\n    return functor_class(func)(ignore_extra_args=ignore_extra_args)", "docstring": "Make a functor object from a regular python function.\n\nNOTE(daiyip): This method is designed to create on-the-go functor object,\nusually for lambdas. To create a reusable functor class, please use\n`functor_class` method.\n\nArgs:\nfunc: A regular python function.\nignore_extra_args: If True, extra argument which is not acceptable by `func`\nwill be ignored.\n\nReturns:\nFunctor object from input function.", "source": "github-repos"}
{"code": "def push_file(self, local_source, remote_dir):\n    remote_dest = ((remote_dir + '/') + os.path.basename(local_source))\n    try:\n        self.makedirs(remote_dir, exist_ok=True)\n    except IOError as e:\n        logger.exception('Pushing {0} to {1} failed'.format(local_source, remote_dir))\n        if (e.errno == 2):\n            raise BadScriptPath(e, self.hostname)\n        elif (e.errno == 13):\n            raise BadPermsScriptPath(e, self.hostname)\n        else:\n            logger.exception('File push failed due to SFTP client failure')\n            raise FileCopyException(e, self.hostname)\n    try:\n        self.sftp_client.put(local_source, remote_dest, confirm=True)\n        self.sftp_client.chmod(remote_dest, 511)\n    except Exception as e:\n        logger.exception('File push from local source {} to remote destination {} failed'.format(local_source, remote_dest))\n        raise FileCopyException(e, self.hostname)\n    return remote_dest", "docstring": "Transport a local file to a directory on a remote machine\n\nArgs:\n- local_source (string): Path\n- remote_dir (string): Remote path\n\nReturns:\n- str: Path to copied file on remote machine\n\nRaises:\n- BadScriptPath : if script path on the remote side is bad\n- BadPermsScriptPath : You do not have perms to make the channel script dir\n- FileCopyException : FileCopy failed.", "source": "codesearchnet"}
{"code": "def decorate_set_on_listener(prototype):\n    \n    \n    def add_annotation(method):\n        method._event_info = {}\n        method._event_info['name'] = method.__name__\n        method._event_info['prototype'] = prototype\n        return method\n\n    return add_annotation", "docstring": "Private decorator for use in the editor.\nAllows the Editor to create listener methods.\n\nArgs:\nparams (str): The list of parameters for the listener\nmethod (es. \"(self, new_value)\")", "source": "juraj-google-style"}
{"code": "def _ParseRecords(self, parser_mediator, evtx_file):\n    \n    \n    \n    \n    \n\n    for record_index in range(evtx_file.number_of_records):\n      if parser_mediator.abort:\n        break\n\n      try:\n        evtx_record = evtx_file.get_record(record_index)\n        self._ParseRecord(parser_mediator, record_index, evtx_record)\n\n      except IOError as exception:\n        parser_mediator.ProduceExtractionWarning(\n            'unable to parse event record: {0:d} with error: {1!s}'.format(\n                record_index, exception))\n\n    for record_index in range(evtx_file.number_of_recovered_records):\n      if parser_mediator.abort:\n        break\n\n      try:\n        evtx_record = evtx_file.get_recovered_record(record_index)\n        self._ParseRecord(\n            parser_mediator, record_index, evtx_record, recovered=True)\n\n      except IOError as exception:\n        parser_mediator.ProduceExtractionWarning((\n            'unable to parse recovered event record: {0:d} with error: '\n            '{1!s}').format(record_index, exception))", "docstring": "Parses Windows XML EventLog (EVTX) records.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nevtx_file (pyevt.file): Windows XML EventLog (EVTX) file.", "source": "juraj-google-style"}
{"code": "def linear(self, x):\n    \n    with tf.name_scope(\"presoftmax_linear\"):\n      batch_size = tf.shape(x)[0]\n      length = tf.shape(x)[1]\n\n      x = tf.reshape(x, [-1, self.hidden_size])\n      logits = tf.matmul(x, self.shared_weights, transpose_b=True)\n\n      return tf.reshape(logits, [batch_size, length, self.vocab_size])", "docstring": "Computes logits by running x through a linear layer.\n\nArgs:\nx: A float32 tensor with shape [batch_size, length, hidden_size]\nReturns:\nfloat32 tensor with shape [batch_size, length, vocab_size].", "source": "juraj-google-style"}
{"code": "def get_environ(cls, prefix):\n    return ((key[(len(prefix) + 1):], value) for (key, value) in os.environ.items() if key.startswith(('%s_' % prefix)))", "docstring": "Retrieves environment variables from a namespace.\n\nArgs:\nprefix (str): The prefix, without a trailing underscore.\n\nReturns:\nlist: A list of environment variable keys and values.", "source": "codesearchnet"}
{"code": "def swipe(self, x1, y1, x2, y2, duration=0.5):\n        \n        scale = self.scale\n        x1, y1, x2, y2 = x1/scale, y1/scale, x2/scale, y2/scale\n        self.session.swipe(x1, y1, x2, y2, duration)", "docstring": "Simulate swipe operation\nArgs:\nx1, y1(int): from position\nx2, y2(int): to position\nduration(float): swipe duration, unit seconds", "source": "juraj-google-style"}
{"code": "def compare(expr, value, regex_expr=False):\n    if (expr == value):\n        return True\n    negate = False\n    if isinstance(expr, str):\n        negate = expr.startswith(NEGATE)\n        expr = (strip_negate(expr) if negate else expr)\n    try:\n        test(expr, value, regex_expr=regex_expr)\n    except Exception as err:\n        if negate:\n            return True\n        else:\n            raise err\n    return True", "docstring": "Compares an string or regular expression againast a given value.\n\nArguments:\nexpr (str|regex): string or regular expression value to compare.\nvalue (str): value to compare against to.\nregex_expr (bool, optional): enables string based regex matching.\n\nRaises:\nAssertionError: in case of assertion error.\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def firmware_version(self):\n        \n        buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n        self._dll.JLINKARM_GetFirmwareString(buf, self.MAX_BUF_SIZE)\n        return ctypes.string_at(buf).decode()", "docstring": "Returns a firmware identification string of the connected J-Link.\n\nIt consists of the following:\n- Product Name (e.g. J-Link)\n- The string: compiled\n- Compile data and time.\n- Optional additional information.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nFirmware identification string.", "source": "juraj-google-style"}
{"code": "def poll(self, channel_id=None, json=None, **kwargs):\n    path = '/event-service/v1/channels/{}/poll'.format(channel_id)\n    r = self._httpclient.request(method='POST', url=self.url, json=json, path=path, **kwargs)\n    return r", "docstring": "Read one or more events from a channel.\n\nReads events (log records) from the identified channel. Events\nare read in chronological order.\n\nArgs:\nchannel_id (str): The channel ID.\njson (dict): Payload/request body.\n**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.\n\nReturns:\nrequests.Response: Requests Response() object.\n\nExamples:\nRefer to ``event_poll.py`` example.", "source": "codesearchnet"}
{"code": "def _GetTimeValue(self, name):\n    \n    timestamp = getattr(self._tsk_file.info.meta, name, None)\n\n    if self._file_system_type in self._TSK_HAS_NANO_FS_TYPES:\n      name_fragment = '{0:s}_nano'.format(name)\n      fraction_of_second = getattr(\n          self._tsk_file.info.meta, name_fragment, None)\n    else:\n      fraction_of_second = None\n\n    return TSKTime(timestamp=timestamp, fraction_of_second=fraction_of_second)", "docstring": "Retrieves a date and time value.\n\nArgs:\nname (str): name of the date and time value, for example \"atime\" or\n\"mtime\".\n\nReturns:\ndfdatetime.DateTimeValues: date and time value or None if not available.", "source": "juraj-google-style"}
{"code": "def get_commands_in_namespace(namespace=None, level=1):\n    \n    from ..command import Command  \n    commands = {}\n    if namespace is None:\n        frame = inspect.stack()[level][0]\n        namespace = frame.f_globals\n    elif inspect.ismodule(namespace):\n        namespace = vars(namespace)\n    for name in namespace:\n        obj = namespace[name]\n        if isinstance(obj, Command):\n            commands[name] = obj\n    return OrderedDict((name, commands[name]) for name in sorted(commands))", "docstring": "Get commands in namespace.\n\nArgs:\nnamespace (dict|module): Typically a module. If not passed, the\nglobals from the call site will be used.\nlevel (int): If not called from the global scope, set this\nappropriately to account for the call stack.\n\nReturns:\nOrderedDict: The commands found in the namespace, ordered by\nname.\n\nCan be used to create ``__all__`` lists::\n\n__all__ = list(get_commands_in_namespace())", "source": "juraj-google-style"}
{"code": "def get_current(self, cycle=None, dataset_number=None, full=True):\n        \n\n        dataset_number = self._validate_dataset_number(dataset_number)\n        if dataset_number is None:\n            self._report_empty_dataset()\n            return\n        cycle_index_header = self.headers_normal.cycle_index_txt\n        current_header = self.headers_normal.current_txt\n        \n\n        test = self.datasets[dataset_number].dfdata\n        if cycle:\n            self.logger.debug(f\"getting current for cycle {cycle}\")\n            c = test[(test[cycle_index_header] == cycle)]\n            if not self.is_empty(c):\n                v = c[current_header]\n                return v\n        else:\n            if not full:\n                self.logger.debug(\n                    \"getting a list of current-curves for all cycles\"\n                )\n                v = []\n                no_cycles = np.amax(test[cycle_index_header])\n                for j in range(1, no_cycles + 1):\n                    txt = \"Cycle  %i:  \" % j\n                    self.logger.debug(txt)\n                    c = test[(test[cycle_index_header] == j)]\n                    v.append(c[current_header])\n            else:\n                self.logger.debug(\"getting all current-curves \")\n                v = test[current_header]\n            return v", "docstring": "Returns current (in mA).\n\nArgs:\ncycle: cycle number (all cycles if None)\ndataset_number: first dataset if None\nfull: valid only for cycle=None (i.e. all cycles), returns the full\npandas.Series if True, else a list of pandas.Series\n\nReturns:\npandas.Series (or list of pandas.Series if cycle=None og full=False)", "source": "juraj-google-style"}
{"code": "def enumeration(*values, **kwargs):\n    if (not (values and all(((isinstance(value, string_types) and value) for value in values)))):\n        raise ValueError(('expected a non-empty sequence of strings, got %s' % values))\n    if (len(values) != len(set(values))):\n        raise ValueError(('enumeration items must be unique, got %s' % values))\n    attrs = {value: value for value in values}\n    attrs.update({'_values': list(values), '_default': values[0], '_case_sensitive': kwargs.get('case_sensitive', True), '_quote': kwargs.get('quote', False)})\n    return type(str('Enumeration'), (Enumeration,), attrs)()", "docstring": "Create an |Enumeration| object from a sequence of values.\n\nCall ``enumeration`` with a sequence of (unique) strings to create an\nEnumeration object:\n\n.. code-block:: python\n\n#: Specify the horizontal alignment for rendering text\nTextAlign = enumeration(\"left\", \"right\", \"center\")\n\nArgs:\nvalues (str) : string enumeration values, passed as positional arguments\n\nThe order of arguments is the order of the enumeration, and the\nfirst element will be considered the default value when used\nto create |Enum| properties.\n\nKeyword Args:\ncase_sensitive (bool, optional) :\nWhether validation should consider case or not (default: True)\n\nquote (bool, optional):\nWhther values should be quoted in the string representations\n(default: False)\n\nRaises:\nValueError if values empty, if any value is not a string or not unique\n\nReturns:\nEnumeration", "source": "codesearchnet"}
{"code": "def username(self, value):\n    self._username = value\n    self._connectionXML.set('username', value)", "docstring": "Set the connection's username property.\n\nArgs:\nvalue:  New username value. String.\n\nReturns:\nNothing.", "source": "codesearchnet"}
{"code": "def build_kalman_mean_step(get_transition_matrix_for_timestep, get_transition_noise_for_timestep, get_observation_matrix_for_timestep, get_observation_noise_for_timestep):\n\n    def mean_step(previous_means, t):\n        'Single step of prior mean recursion.'\n        (previous_latent_mean, _) = previous_means\n        latent_mean = _propagate_mean(previous_latent_mean, get_transition_matrix_for_timestep((t - 1)), get_transition_noise_for_timestep((t - 1)))\n        observation_mean = _propagate_mean(latent_mean, get_observation_matrix_for_timestep(t), get_observation_noise_for_timestep(t))\n        return (latent_mean, observation_mean)\n    return mean_step", "docstring": "Build a callable that performs one step of Kalman mean recursion.\n\nArgs:\nget_transition_matrix_for_timestep: callable taking a timestep\nas an integer `Tensor` argument, and returning a `LinearOperator`\nof shape `[latent_size, latent_size]`.\nget_transition_noise_for_timestep: callable taking a timestep as\nan integer `Tensor` argument, and returning a\n`MultivariateNormalLinearOperator` of event shape\n`[latent_size]`.\nget_observation_matrix_for_timestep: callable taking a timestep\nas an integer `Tensor` argument, and returning a `LinearOperator`\nof shape `[observation_size, observation_size]`.\nget_observation_noise_for_timestep: callable taking a timestep as\nan integer `Tensor` argument, and returning a\n`MultivariateNormalLinearOperator` of event shape\n`[observation_size]`.\n\nReturns:\nkalman_mean_step: a callable that computes latent state and\nobservation means at time `t`, given latent mean at time `t-1`.", "source": "codesearchnet"}
{"code": "def inspect_plugin(self, name):\n    url = self._url('/plugins/{0}/json', name)\n    return self._result(self._get(url), True)", "docstring": "Retrieve plugin metadata.\n\nArgs:\nname (string): The name of the plugin. The ``:latest`` tag is\noptional, and is the default if omitted.\n\nReturns:\nA dict containing plugin info", "source": "codesearchnet"}
{"code": "def searchsorted(sorted_sequence, values, side='left'):\n    if any_symbolic_tensors((sorted_sequence, values)):\n        return SearchSorted(side=side).symbolic_call(sorted_sequence, values)\n    sorted_sequence = backend.convert_to_tensor(sorted_sequence)\n    values = backend.convert_to_tensor(values)\n    return backend.numpy.searchsorted(sorted_sequence, values, side=side)", "docstring": "Perform a binary search, returning indices for insertion of `values`\ninto `sorted_sequence` that maintain the sorting order.\n\nArgs:\nsorted_sequence: 1-D input tensor, sorted along the innermost\ndimension.\nvalues: N-D tensor of query insertion values.\nside: 'left' or 'right', specifying the direction in which to insert\nfor the equality case (tie-breaker).\n\nReturns:\nTensor of insertion indices of same shape as `values`.", "source": "github-repos"}
{"code": "def Benchmark(tf_bench, builder_fn, use_xla_jit, device, separate_compiled_gradients=False):\n    with ops.Graph().as_default():\n        name = None\n        targets = []\n        with ops.device(device):\n            fetches = []\n            jit_scope = jit.experimental_jit_scope\n            with jit_scope(compile_ops=use_xla_jit, separate_compiled_gradients=separate_compiled_gradients):\n                name, fetches = builder_fn()\n            for fetch in fetches:\n                targets.append(array_ops.identity(fetch).op)\n        config = config_pb2.ConfigProto(allow_soft_placement=True)\n        with session.Session(config=config) as sess:\n            sess.run(variables.global_variables_initializer())\n            xla = 'xla_' if use_xla_jit else ''\n            tf_bench.run_op_benchmark(sess, targets, name='%s_%s%s' % (name, xla, device))", "docstring": "Build a graph and run benchmarks against it, with or without XLA.\n\nArgs:\ntf_bench: An instance of tf.test.Benchmark, used to run the benchmark.\nbuilder_fn: A function that builds a graph when invoked, and returns\n(name, fetches), where name is the name of the test, and fetches\nis a list of tensors to fetch as output.\nuse_xla_jit: If true compile with the XLA JIT, otherwise use regular TF.\ndevice: The tensorflow device to run on, e.g. \"cpu\", \"gpu\".\nseparate_compiled_gradients: If true put each gradient subgraph into a\nseparate compilation scope. This gives fine-grained control over which\nportions of the graph will be compiled as a single unit. Compiling\ngradients separately may yield better performance for some graphs.\nThe scope is named based on the scope of the forward computation as well\nas the name of the gradients. As a result, the gradients will be compiled\nin a scope that is separate from both the forward computation, and from\nother gradients.", "source": "github-repos"}
{"code": "def activate(self, uid=None):\n    if (uid is not None):\n        if (not isinstance(uid, six.string_types)):\n            raise TypeError('uid must be a string')\n    result = self.proxy.activate(uid)\n    status = result.result_status.value\n    if (status == enums.ResultStatus.SUCCESS):\n        return\n    else:\n        reason = result.result_reason.value\n        message = result.result_message.value\n        raise exceptions.KmipOperationFailure(status, reason, message)", "docstring": "Activate a managed object stored by a KMIP appliance.\n\nArgs:\nuid (string): The unique ID of the managed object to activate.\nOptional, defaults to None.\n\nReturns:\nNone\n\nRaises:\nClientConnectionNotOpen: if the client connection is unusable\nKmipOperationFailure: if the operation result is a failure\nTypeError: if the input argument is invalid", "source": "codesearchnet"}
{"code": "def _find_classes(self, dir):\n        \n        if sys.version_info >= (3, 5):\n            \n            classes = [d.name for d in os.scandir(dir) if d.is_dir()]\n        else:\n            classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\n        classes.sort()\n        class_to_idx = {classes[i]: i for i in range(len(classes))}\n        return classes, class_to_idx", "docstring": "Finds the class folders in a dataset.\n\nArgs:\ndir (string): Root directory path.\n\nReturns:\ntuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.\n\nEnsures:\nNo class is a subdirectory of another.", "source": "juraj-google-style"}
{"code": "def migrate(belstr: str) -> str:\n    bo.ast = bel.lang.partialparse.get_ast_obj(belstr, '2.0.0')\n    return migrate_ast(bo.ast).to_string()", "docstring": "Migrate BEL 1 to 2.0.0\n\nArgs:\nbel: BEL 1\n\nReturns:\nbel: BEL 2", "source": "codesearchnet"}
{"code": "def register_domain(self, domain=0, tokenizer=None, trie=None):\n    self.domains[domain] = IntentDeterminationEngine(tokenizer=tokenizer, trie=trie)", "docstring": "Register a domain with the intent engine.\n\nArgs:\ntokenizer(tokenizer): The tokenizer you wish to use.\ntrie(Trie): the Trie() you wish to use.\ndomain(str): a string representing the domain you wish to add", "source": "codesearchnet"}
{"code": "def _get_func_name():\n    return tf_inspect.stack()[1][3]", "docstring": "Get the name of current function.\n\nReturns:\nString that is the name of current function.", "source": "github-repos"}
{"code": "def solve(self):\n    hierarchy = type_match.get_all_subclasses([self.ast, self.builtins])\n    factory_protocols = type_match.TypeMatch(hierarchy)\n    factory_partial = type_match.TypeMatch(hierarchy)\n    solver_protocols = factory_protocols.solver\n    solver_partial = factory_partial.solver\n    unknown_classes = set()\n    partial_classes = set()\n    complete_classes = set()\n    for cls in self.ast.classes:\n        if is_unknown(cls):\n            solver_protocols.register_variable(cls.name)\n            solver_partial.register_variable(cls.name)\n            unknown_classes.add(cls)\n        elif is_partial(cls):\n            partial_classes.add(cls)\n        else:\n            complete_classes.add(cls)\n    protocol_classes_and_aliases = set(self.protocols.classes)\n    for alias in self.protocols.aliases:\n        if not isinstance(alias.type, pytd.AnythingType) and alias.name != 'protocols.Protocol':\n            protocol_classes_and_aliases.add(alias.type.cls)\n    for protocol in protocol_classes_and_aliases:\n        for unknown in unknown_classes:\n            self.match_unknown_against_protocol(factory_protocols, solver_protocols, unknown, protocol)\n    for complete in complete_classes.union(self.builtins.classes):\n        for partial in partial_classes:\n            if escape.unpack_partial(partial.name) == complete.name:\n                self.match_partial_against_complete(factory_partial, solver_partial, partial, complete)\n    partial_functions = set()\n    complete_functions = set()\n    for f in self.ast.functions:\n        if is_partial(f):\n            partial_functions.add(f)\n        else:\n            complete_functions.add(f)\n    for partial in partial_functions:\n        for complete in complete_functions.union(self.builtins.functions):\n            if escape.unpack_partial(partial.name) == complete.name:\n                self.match_call_record(factory_partial, solver_partial, partial, complete)\n    log.info('=========== Equations to solve =============\\n%s', solver_protocols)\n    log.info('=========== Equations to solve (end) =======')\n    solved_protocols = solver_protocols.solve()\n    log.info('=========== Call trace equations to solve =============\\n%s', solver_partial)\n    log.info('=========== Call trace equations to solve (end) =======')\n    solved_partial = solver_partial.solve()\n    merged_solution = {}\n    for unknown in itertools.chain(solved_protocols, solved_partial):\n        if unknown in solved_protocols and unknown in solved_partial:\n            merged_solution[unknown] = solved_protocols[unknown].union(solved_partial[unknown])\n            merged_solution[unknown].discard('?')\n        elif unknown in solved_protocols:\n            merged_solution[unknown] = solved_protocols[unknown]\n        else:\n            merged_solution[unknown] = solved_partial[unknown]\n    return merged_solution", "docstring": "Solve the equations generated from the pytd.\n\nReturns:\nA dictionary (str->str), mapping unknown class names to known class names.\nRaises:\nAssertionError: If we detect an internal error.", "source": "github-repos"}
{"code": "def reply_code_tuple(code: int) -> Tuple[(int, int, int)]:\n    return ((code", "docstring": "Return the reply code as a tuple.\n\nArgs:\ncode: The reply code.\n\nReturns:\nEach item in the tuple is the digit.", "source": "codesearchnet"}
{"code": "def process(self):\n    client = self._get_client_by_hostname(self.host)\n    self._await_flow(client, self.flow_id)\n    collected_flow_data = self._download_files(client, self.flow_id)\n    if collected_flow_data:\n        print('{0:s}: Downloaded: {1:s}'.format(self.flow_id, collected_flow_data))\n        fqdn = client.data.os_info.fqdn.lower()\n        self.state.output.append((fqdn, collected_flow_data))", "docstring": "Collect the results.\n\nRaises:\nDFTimewolfError: if no files specified", "source": "codesearchnet"}
{"code": "def TerminateFlow(client_id, flow_id, reason=None, flow_state=rdf_flow_objects.Flow.FlowState.ERROR):\n    to_terminate = [data_store.REL_DB.ReadFlowObject(client_id, flow_id)]\n    while to_terminate:\n        next_to_terminate = []\n        for rdf_flow in to_terminate:\n            _TerminateFlow(rdf_flow, reason=reason, flow_state=flow_state)\n            next_to_terminate.extend(data_store.REL_DB.ReadChildFlowObjects(rdf_flow.client_id, rdf_flow.flow_id))\n        to_terminate = next_to_terminate", "docstring": "Terminates a flow and all of its children.\n\nArgs:\nclient_id: Client ID of a flow to terminate.\nflow_id: Flow ID of a flow to terminate.\nreason: String with a termination reason.\nflow_state: Flow state to be assigned to a flow after termination. Defaults\nto FlowState.ERROR.", "source": "codesearchnet"}
{"code": "def fasta_verifier(entries, ambiguous=False):\n    if ambiguous:\n        regex = '^>.+{0}[ACGTURYKMSWBDHVNX]+{0}$'.format(os.linesep)\n    else:\n        regex = '^>.+{0}[ACGTU]+{0}$'.format(os.linesep)\n    delimiter = '{0}'.format(os.linesep)\n    for entry in entries:\n        try:\n            entry_verifier([entry.write()], regex, delimiter)\n        except FormatError as error:\n            if (error.part == 0):\n                msg = 'Unknown Header Error with {0}'.format(entry.id)\n                raise FormatError(message=msg)\n            elif ((error.part == 1) and ambiguous):\n                msg = '{0} contains a base not in [ACGTURYKMSWBDHVNX]'.format(entry.id)\n                raise FormatError(message=msg)\n            elif ((error.part == 1) and (not ambiguous)):\n                msg = '{0} contains a base not in [ACGTU]'.format(entry.id)\n                raise FormatError(message=msg)\n            else:\n                msg = '{0}: Unknown Error: Likely a Bug'.format(entry.id)\n                raise FormatError(message=msg)", "docstring": "Raises error if invalid FASTA format detected\n\nArgs:\nentries (list): A list of FastaEntry instances\n\nambiguous (bool): Permit ambiguous bases, i.e. permit non-ACGTU bases\n\nRaises:\nFormatError: Error when FASTA format incorrect with descriptive message\n\nExample:\n>>> from bio_utils.iterators import fasta_iter\n>>> import os\n>>> entries = r'>entry1{0}AAGGATTCG{0}' \\\n...           r'>entry{0}AGGTCCCCCG{0}' \\\n...           r'>entry3{0}GCCTAGC{0}'.format(os.linesep)\n>>> fasta_entries = fasta_iter(iter(entries.split(os.linesep)))\n>>> fasta_verifier(fasta_entries)", "source": "codesearchnet"}
{"code": "def __init__(self, column_names=None, title=None):\n    \n    super(BaseTableView, self).__init__()\n    self._columns = column_names or []\n    self._number_of_columns = len(self._columns)\n    self._rows = []\n    self._title = title", "docstring": "Initializes a table view.\n\nArgs:\ncolumn_names (Optional[list[str]]): column names.\ntitle (Optional[str]): title.", "source": "juraj-google-style"}
{"code": "def _get_tensor_details(self, tensor_index, subgraph_index):\n    tensor_index = int(tensor_index)\n    subgraph_index = int(subgraph_index)\n    tensor_name = self._interpreter.TensorName(tensor_index, subgraph_index)\n    tensor_size = self._interpreter.TensorSize(tensor_index, subgraph_index)\n    tensor_size_signature = self._interpreter.TensorSizeSignature(tensor_index, subgraph_index)\n    tensor_type = self._interpreter.TensorType(tensor_index, subgraph_index)\n    tensor_quantization = self._interpreter.TensorQuantization(tensor_index, subgraph_index)\n    tensor_quantization_params = self._interpreter.TensorQuantizationParameters(tensor_index, subgraph_index)\n    tensor_sparsity_params = self._interpreter.TensorSparsityParameters(tensor_index, subgraph_index)\n    if not tensor_type:\n        raise ValueError('Could not get tensor details')\n    details = {'name': tensor_name, 'index': tensor_index, 'shape': tensor_size, 'shape_signature': tensor_size_signature, 'dtype': tensor_type, 'quantization': tensor_quantization, 'quantization_parameters': {'scales': tensor_quantization_params[0], 'zero_points': tensor_quantization_params[1], 'quantized_dimension': tensor_quantization_params[2]}, 'sparsity_parameters': tensor_sparsity_params}\n    return details", "docstring": "Gets tensor details.\n\nArgs:\ntensor_index: Tensor index of tensor to query.\nsubgraph_index: Index of the subgraph.\n\nReturns:\nA dictionary containing the following fields of the tensor:\n'name': The tensor name.\n'index': The tensor index in the subgraph.\n'shape': The shape of the tensor.\n'quantization': Deprecated, use 'quantization_parameters'. This field\nonly works for per-tensor quantization, whereas\n'quantization_parameters' work in all cases.\n'quantization_parameters': The parameters used to quantize the tensor:\n'scales': List of scales (one if per-tensor quantization)\n'zero_points': List of zero_points (one if per-tensor quantization)\n'quantized_dimension': Specifies the dimension of per-axis\nquantization, in the case of multiple scales/zero_points.\n\nRaises:\nValueError: If tensor_index is invalid.", "source": "github-repos"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    regf_file = pyregf.file() \n    try:\n      regf_file.open_file_object(file_object)\n    except IOError:\n      \n      \n      return\n\n    root_key = regf_file.get_root_key()\n    if root_key is None:\n      regf_file.close()\n      return\n\n    root_file_key = root_key.get_sub_key_by_path(self._AMCACHE_ROOT_FILE_KEY)\n    if root_file_key is None:\n      regf_file.close()\n      return\n\n    for volume_key in root_file_key.sub_keys:\n      for am_entry in volume_key.sub_keys:\n        self._ProcessAMCacheFileKey(am_entry, parser_mediator)\n\n    root_program_key = root_key.get_sub_key_by_path(\n        self._AMCACHE_ROOT_PROGRAM_KEY)\n    if root_program_key is None:\n      regf_file.close()\n      return\n\n    for am_entry in root_program_key.sub_keys:\n      self._ProcessAMCacheProgramKey(am_entry, parser_mediator)\n\n    regf_file.close()", "docstring": "Parses an Amcache.hve file for events.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.", "source": "juraj-google-style"}
{"code": "def calculate_entropy(self, entropy_string):\n        \n        total = 0\n        for char in entropy_string:\n            if char.isalpha():\n                prob = self.frequency[char.lower()]\n                total += - math.log(prob) / math.log(2)\n        logging.debug(\"Entropy score: {0}\".format(total))\n        return total", "docstring": "Calculates the entropy of a string based on known frequency of\nEnglish letters.\n\nArgs:\nentropy_string: A str representing the string to calculate.\n\nReturns:\nA negative float with the total entropy of the string (higher\nis better).", "source": "juraj-google-style"}
{"code": "def list_merge(list_a, list_b):\n    result = []\n    for item in list_a:\n        if (not (item in result)):\n            result.append(item)\n    for item in list_b:\n        if (not (item in result)):\n            result.append(item)\n    return result", "docstring": "Merge two lists without duplicating items\n\nArgs:\nlist_a: list\nlist_b: list\nReturns:\nNew list with deduplicated items from list_a and list_b", "source": "codesearchnet"}
{"code": "def get(self, key=None, indices=None, name=None):\n    if key is None:\n        return self._popitem(indices=indices, name=name)\n    else:\n        return self._pop(key, indices=indices, name=name)", "docstring": "If the key is provided, the associated (key, value) is returned from the staging area.\n\nIf the key is not in the staging area, this method will block until\nthe associated (key, value) is inserted.\nIf no key is provided and the staging area is ordered,\nthe (key, value) with the smallest key will be returned.\nOtherwise, a random (key, value) will be returned.\n\nIf the staging area is empty when this operation executes,\nit will block until there is an element to dequeue.\n\nArgs:\nkey: Key associated with the required data (Optional)\nindices: Partial list of tensors to retrieve (optional).\nA list of integer or string indices.\nString indices are only valid if the Staging Area\nhas names associated with it.\nname: A name for the operation (optional)\n\nReturns:\nThe created op", "source": "github-repos"}
{"code": "def is_adb_available():\n    ret, out, err = utils.run_command('which adb', shell=True)\n    clean_out = out.decode('utf-8').strip()\n    if clean_out:\n        return True\n    return False", "docstring": "Checks if adb is available as a command line tool.\n\nReturns:\nTrue if adb binary is available in console, False otherwise.", "source": "github-repos"}
{"code": "def search(self, scope, search, **kwargs):\n        \n        data = {'scope': scope, 'search': search}\n        path = '/projects/%s/search' % self.get_id()\n        return self.manager.gitlab.http_list(path, query_data=data, **kwargs)", "docstring": "Search the project resources matching the provided string.'\n\nArgs:\nscope (str): Scope of the search\nsearch (str): Search string\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabSearchError: If the server failed to perform the request\n\nReturns:\nGitlabList: A list of dicts describing the resources found.", "source": "juraj-google-style"}
{"code": "def discretize(self, contact_id=0, accuracy=0.004, dt=0.001):\n        \n\n        if not self.event_points:\n            return []\n\n        events = []\n        action_dt = accuracy / self.speed\n        dt = dt or action_dt\n\n        ep0 = self.event_points[0]\n        for _ in range(int(ep0[0] / dt)):\n            events.append(['s', dt])\n        events.append(['d', ep0[1], contact_id])\n        for i, ep in enumerate(self.event_points[1:]):\n            prev_ts = self.event_points[i][0]\n            curr_ts = ep[0]\n            p0 = self.event_points[i][1]\n            p1 = ep[1]\n            if p0 == p1:\n                \n                for _ in range(int((curr_ts - prev_ts) / dt)):\n                    events.append(['s', dt])\n            else:\n                \n                dpoints = track_sampling([p0, p1], accuracy)\n                for p in dpoints:\n                    events.append(['m', p, contact_id])\n                    for _ in range(int(action_dt / dt)):\n                        events.append(['s', dt])\n\n        events.append(['u', contact_id])\n        return events", "docstring": "Sample this motion track into discretized motion events.\n\nArgs:\ncontact_id: contact point id\naccuracy: motion minimum difference in space\ndt: sample time difference", "source": "juraj-google-style"}
{"code": "def pop(self, key, default=None):\n        \n        return self._dictionary.pop(key.lower(), default)", "docstring": "Remove the key and return the associated value or default if not\nfound\n\nArgs:\nkey (str): The key to remove\ndefault (obj): The value to return if key is not present", "source": "juraj-google-style"}
{"code": "def copy_update(pb_message, **kwds):\n    \n    result = pb_message.__class__()\n    result.CopyFrom(pb_message)\n    for k, v in kwds.items():\n        setattr(result, k, v)\n    return result", "docstring": "Returns a copy of the PB object, with some fields updated.\n\nArgs:\npb_message:\n**kwds:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def call_plugins(self, step):\n    for plugin in self.plugins:\n        try:\n            getattr(plugin, step)()\n        except AttributeError:\n            self.logger.debug(\"{} doesn't exist on plugin {}\".format(step, plugin))\n        except TypeError:\n            self.logger.debug('{} on plugin {} is not callable'.format(step, plugin))", "docstring": "For each plugins, check if a \"step\" method exist on it, and call it\n\nArgs:\nstep (str): The method to search and call on each plugin", "source": "codesearchnet"}
{"code": "def is44(msg):\n    \n    if allzeros(msg):\n        return False\n\n    d = hex2bin(data(msg))\n\n    \n    if wrongstatus(d, 5, 6, 23):\n        return False\n\n    if wrongstatus(d, 35, 36, 46):\n        return False\n\n    if wrongstatus(d, 47, 48, 49):\n        return False\n\n    if wrongstatus(d, 50, 51, 56):\n        return False\n\n    \n    if bin2int(d[0:4]) > 4:\n        return False\n\n    vw = wind44(msg)\n    if vw is not None and vw[0] > 250:\n        return False\n\n    temp, temp2 = temp44(msg)\n    if min(temp, temp2) > 60 or max(temp, temp2) < -80:\n        return False\n\n    return True", "docstring": "Check if a message is likely to be BDS code 4,4.\n\nMeteorological routine air report\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nbool: True or False", "source": "juraj-google-style"}
{"code": "def __init__(\n      self, name, aliases=None, description=None, false_value=0, urls=None):\n    \n    super(BooleanDefinition, self).__init__(\n        name, aliases=aliases, description=description, urls=urls)\n    self.false_value = false_value\n    self.true_value = None", "docstring": "Initializes a boolean data type definition.\n\nArgs:\nname (str): name.\naliases (Optional[list[str]]): aliases.\ndescription (Optional[str]): description.\nfalse_value (Optional[int]): value that represents false.\nurls (Optional[list[str]]): URLs.", "source": "juraj-google-style"}
{"code": "def _free_array(self, handle: int):\n        \n        with self._lock:\n            if self._arrays[handle] is not None:\n                self._arrays[handle] = None\n                self._count -= 1", "docstring": "Frees the memory for the array with the given handle.\n\nArgs:\nhandle: The handle of the array whose memory should be freed. This\nhandle must come from the _create_array method.", "source": "juraj-google-style"}
{"code": "def _make_model(self, data, key=None):\n        \n        if data['deleted'] and not self.adapter.want_deleted:\n            raise ObjectDoesNotExist('Deleted object returned')\n        model = self._model_class(self._current_context,\n                                  _pass_perm_checks=self._pass_perm_checks)\n\n        model.setattr('key', ub_to_str(key) if key else ub_to_str(data.get('key')))\n        model = model.set_data(data, from_db=True)\n        model._initial_data = model.clean_value()\n        return model", "docstring": "Creates a model instance with the given data.\n\nArgs:\ndata: Model data returned from DB.\nkey: Object key\nReturns:\npyoko.Model object.", "source": "juraj-google-style"}
{"code": "def delete(self, resource, timeout=(- 1)):\n    self._client.delete(resource=resource, timeout=timeout)", "docstring": "Delete all the labels for a resource.\n\nArgs:\nresource (dict): Object to delete.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.", "source": "codesearchnet"}
{"code": "def write(self, source=None, **kwargs):\n    if (not source):\n        source = self.msg\n    return self._writer.write(source=source, **kwargs)", "docstring": "Wrappe    r to call the writer's write method if present.\n\nArgs:\nsource(pandasdmx.model.Message, iterable): stuff to be written.\nIf a :class:`pandasdmx.model.Message` is given, the writer\nitself must determine what to write unless specified in the\nkeyword arguments. If an iterable is given,\nthe writer should write each item. Keyword arguments may\nspecify what to do with the output depending on the writer's API. Defaults to self.msg.\n\nReturns:\ntype: anything the writer returns.", "source": "codesearchnet"}
{"code": "def _get_object_from_python_path(python_path):\n        \n        \n        python_path = python_path.split('.')\n        module_path = python_path[:-1]\n        object_class = python_path[-1]\n\n        if isinstance(module_path, list):\n            module_path = '.'.join(module_path)\n\n        \n        module = import_module(module_path)\n        schema = getattr(module, object_class)\n\n        if isclass(schema):\n            schema = schema()\n\n        return schema", "docstring": "Method that will fetch a Marshmallow schema from a path to it.\n\nArgs:\npython_path (str): The string path to the Marshmallow schema.\n\nReturns:\nmarshmallow.Schema: The schema matching the provided path.\n\nRaises:\nTypeError: This is raised if the specified object isn't\na Marshmallow schema.", "source": "juraj-google-style"}
{"code": "def load_json(json_filespec):\n  \n  json_fh = open(json_filespec)\n  config_dict = json.load(json_fh)\n  json_fh.close()\n  return config_dict", "docstring": "Loads JSON from a config file\nArgs:\njson_filespec: path/to/file.json\nReturns:\na dict made from the JSON read, if successful\nRaises:\nIOError if the file could not be opened\nValueError if the JSON could not be read successfully\nRuntimeError if something else went wrong", "source": "juraj-google-style"}
{"code": "def _SendItem(self, zmq_socket, item, block=True):\n    \n    try:\n      logger.debug('{0:s} sending item'.format(self.name))\n      if block:\n        zmq_socket.send_pyobj(item)\n      else:\n        zmq_socket.send_pyobj(item, zmq.DONTWAIT)\n      logger.debug('{0:s} sent item'.format(self.name))\n      return True\n\n    except zmq.error.Again:\n      logger.debug('{0:s} could not send an item'.format(self.name))\n\n    except zmq.error.ZMQError as exception:\n      if exception.errno == errno.EINTR:\n        logger.error(\n            'ZMQ syscall interrupted in {0:s}.'.format(\n                self.name))\n\n    return False", "docstring": "Attempts to send an item to a ZeroMQ socket.\n\nArgs:\nzmq_socket (zmq.Socket): used to the send the item.\nitem (object): sent on the queue. Will be pickled prior to sending.\nblock (Optional[bool]): whether the push should be performed in blocking\nor non-blocking mode.\n\nReturns:\nbool: whether the item was sent successfully.", "source": "juraj-google-style"}
{"code": "def set_shutter_level(self, level=0.0):\n        \n        data = {\"channelIndex\": 1, \"deviceId\": self.id, \"shutterLevel\": level}\n        return self._restCall(\"device/control/setShutterLevel\", body=json.dumps(data))", "docstring": "sets the shutter level\n\nArgs:\nlevel(float): the new level of the shutter. 0.0 = open, 1.0 = closed\nReturns:\nthe result of the _restCall", "source": "juraj-google-style"}
{"code": "def pkcs12_key_as_pem(private_key_bytes, private_key_password):\n    \n    private_key_password = _helpers._to_bytes(private_key_password)\n    pkcs12 = crypto.load_pkcs12(private_key_bytes, private_key_password)\n    return crypto.dump_privatekey(crypto.FILETYPE_PEM,\n                                  pkcs12.get_privatekey())", "docstring": "Convert the contents of a PKCS#12 key to PEM using pyOpenSSL.\n\nArgs:\nprivate_key_bytes: Bytes. PKCS#12 key in DER format.\nprivate_key_password: String. Password for PKCS#12 key.\n\nReturns:\nString. PEM contents of ``private_key_bytes``.", "source": "juraj-google-style"}
{"code": "def set_all_pattern_variables(self, patternnumber, \\\n        sp0, ti0, sp1, ti1, sp2, ti2, sp3, ti3, sp4, ti4, sp5, ti5, sp6, ti6, sp7, ti7, \\\n        actual_step, additional_cycles, link_pattern):\n        \n        _checkPatternNumber(patternnumber)\n        \n        self.set_pattern_step_setpoint(patternnumber, 0, sp0)\n        self.set_pattern_step_setpoint(patternnumber, 1, sp1)\n        self.set_pattern_step_setpoint(patternnumber, 2, sp2)\n        self.set_pattern_step_setpoint(patternnumber, 3, sp3)\n        self.set_pattern_step_setpoint(patternnumber, 4, sp4)\n        self.set_pattern_step_setpoint(patternnumber, 5, sp5)\n        self.set_pattern_step_setpoint(patternnumber, 6, sp6)\n        self.set_pattern_step_setpoint(patternnumber, 7, sp7)\n        self.set_pattern_step_time(    patternnumber, 0, ti0)\n        self.set_pattern_step_time(    patternnumber, 1, ti1)\n        self.set_pattern_step_time(    patternnumber, 2, ti2)\n        self.set_pattern_step_time(    patternnumber, 3, ti3)\n        self.set_pattern_step_time(    patternnumber, 4, ti4)\n        self.set_pattern_step_time(    patternnumber, 5, ti5)\n        self.set_pattern_step_time(    patternnumber, 6, ti6)\n        self.set_pattern_step_time(    patternnumber, 7, ti7)\n        self.set_pattern_additional_cycles(patternnumber, additional_cycles)\n        self.set_pattern_link_topattern(   patternnumber, link_pattern)\n        self.set_pattern_actual_step(      patternnumber, actual_step)", "docstring": "Set all variables for a given pattern at one time.\n\nArgs:\n* patternnumber (integer): 0-7\n* sp[*n*] (float): setpoint value for step *n*\n* ti[*n*] (integer??): step time for step *n*, 0-900\n* actual_step (int): ?\n* additional_cycles(int): ?\n* link_pattern(int): ?", "source": "juraj-google-style"}
{"code": "def get_orbit(name, date):\n    \n\n    \n\n    if name not in [x.name for x in Bsp().top.list]:\n        raise UnknownBodyError(name)\n\n    for a, b in Bsp().top.steps(name):\n        if b.name not in _propagator_cache:\n\n            \n            propagator = type(\n                \"%sBspPropagator\" % b.name,\n                (GenericBspPropagator,),\n                {'src': a, 'dst': b}\n            )\n\n            \n            \n            center = Pck()[b.full_name.title()]\n\n            \n            propagator.propagate(date).as_frame(b.name, center=center)\n            _propagator_cache[b.name] = propagator\n\n    if Bsp().top not in _propagator_cache:\n        _propagator_cache[Bsp().top.name] = EarthPropagator()\n\n    return _propagator_cache[name].propagate(date)", "docstring": "Retrieve the orbit of a solar system object\n\nArgs:\nname (str): The name of the body desired. For exact nomenclature, see\n:py:func:`available_planets`\ndate (Date): Date at which the state vector will be extracted\nReturn:\nOrbit: Orbit of the desired object, in the reference frame in which it is declared in\nthe .bsp file", "source": "juraj-google-style"}
{"code": "def es_indexers(cls, base_class=None, role='rdf_class', **kwargs):\n        \n\n        def _prop_filter(prop, value, **kwargs):\n            \n\n            try:\n                use_prop = len(set(value.owl_inverseOf) - parent_props) > 0\n            except AttributeError:\n                use_prop = True\n            if prop in nested_props and use_prop:\n                return True\n            return False\n        if not base_class:\n            base_class = cls\n        rtn_list = []\n        \n        if kwargs.get(\"depth\"): \n            kwargs['depth'] += 1\n            initial = False\n        else:\n            initial = True\n            kwargs['depth'] = 1\n            kwargs['class'] = cls.__name__\n            kwargs['class_obj'] = cls\n        if kwargs.get('class_obj'):\n            parent_props = set(cls.properties)\n        else:\n            parent_props = set()\n        if role == 'rdf_class':\n            for value in cls.properties.values():\n                \n                rtn_list += value.es_indexers(base_class, **kwargs)\n\n        elif role == 'es_Nested':\n            if cls == base_class:\n                nested_props = LABEL_FIELDS\n            else:\n                nested_props = cls.es_defs.get('kds_esNestedProps',\n                                               list(cls.properties.keys()))\n            used_props = [value\n                          for prop, value in cls.properties.items() \\\n                          if _prop_filter(prop, value, **kwargs)]\n            for value in cls.properties.values():\n                \n                rtn_list += value.es_indexers(base_class, **kwargs)\n\n        if cls.es_defs.get('kds_esIndex',[None])[0]:\n            rtn_list += [cls]\n        return list(set(rtn_list))", "docstring": "Returns the es mapping for the class\n\nargs:\n-----\nbase_class: The root class being indexed\nrole: the role states how the class should be mapped depending\nupon whether it is used as a subject of an object. options\nare es_Nested or rdf_class", "source": "juraj-google-style"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor):\n    image_outputs = self.vision_tower(pixel_values)\n    selected_image_feature = image_outputs.last_hidden_state\n    image_features = self.multi_modal_projector(selected_image_feature)\n    image_features = image_features / self.config.text_config.hidden_size ** 0.5\n    return image_features", "docstring": "Obtains image last hidden states from the vision tower and apply multimodal projection.\n\nArgs:\npixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)\nThe tensors corresponding to the input images.\nReturns:\nimage_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).", "source": "github-repos"}
{"code": "def compress_summary(summary, epsilon):\n    if summary.shape[1] * epsilon < 1:\n        return summary\n    percents = epsilon + np.arange(0.0, 1.0, epsilon)\n    cum_weights = summary[1].cumsum()\n    cum_weight_percents = cum_weights / cum_weights[-1]\n    new_bins = np.interp(percents, cum_weight_percents, summary[0])\n    cum_weights = np.interp(percents, cum_weight_percents, cum_weights)\n    new_weights = cum_weights - np.concatenate((np.array([0]), cum_weights[:-1]))\n    summary = np.stack((new_bins, new_weights))\n    return summary.astype('float32')", "docstring": "Compress a summary to within `epsilon` accuracy.\n\nThe compression step is needed to keep the summary sizes small after\nmerging, and also used to return the final target boundaries. It finds the\nnew bins based on interpolating cumulative weight percentages from the large\nsummary.  Taking the difference of the cumulative weights from the previous\nbin's cumulative weight will give the new weight for that bin.\n\nArgs:\nsummary: 2D `np.ndarray` summary to be compressed.\nepsilon: A `'float32'` that determines the approximate desired\nprecision.\n\nReturns:\nA 2D `np.ndarray` that is a compressed summary. First column is the\ninterpolated partition values, the second is the weights (counts).", "source": "github-repos"}
{"code": "def __move(self, current_pos):\n        \n        if self.__move_range is not None:\n            next_pos = np.random.randint(current_pos - self.__move_range, current_pos + self.__move_range)\n            if next_pos < 0:\n                next_pos = 0\n            elif next_pos >= self.var_arr.shape[0] - 1:\n                next_pos = self.var_arr.shape[0] - 1\n            return next_pos\n        else:\n            next_pos = np.random.randint(self.var_arr.shape[0] - 1)\n            return next_pos", "docstring": "Move in the feature map.\n\nArgs:\ncurrent_pos:    The now position.\n\nReturns:\nThe next position.", "source": "juraj-google-style"}
{"code": "def search(self, query, results=10, suggestion=False):\n        \n\n        self._check_query(query, \"Query must be specified\")\n\n        search_params = {\n            \"list\": \"search\",\n            \"srprop\": \"\",\n            \"srlimit\": results,\n            \"srsearch\": query,\n        }\n        if suggestion:\n            search_params[\"srinfo\"] = \"suggestion\"\n\n        raw_results = self.wiki_request(search_params)\n\n        self._check_error_response(raw_results, query)\n\n        search_results = [d[\"title\"] for d in raw_results[\"query\"][\"search\"]]\n\n        if suggestion:\n            sug = None\n            if raw_results[\"query\"].get(\"searchinfo\"):\n                sug = raw_results[\"query\"][\"searchinfo\"][\"suggestion\"]\n            return search_results, sug\n        return search_results", "docstring": "Search for similar titles\n\nArgs:\nquery (str): Page title\nresults (int): Number of pages to return\nsuggestion (bool): Use suggestion\nReturns:\ntuple or list: tuple (list results, suggestion) if \\\nsuggestion is **True**; list of results \\\notherwise", "source": "juraj-google-style"}
{"code": "def from_dict(cls, cls_dict, fallback_xsi_type=None):\n    if (not cls_dict):\n        return None\n    if isinstance(cls_dict, six.string_types):\n        if (not getattr(cls, '_convert_strings', False)):\n            return cls_dict\n    try:\n        typekey = cls.dictkey(cls_dict)\n    except TypeError:\n        typekey = fallback_xsi_type\n    klass = cls.entity_class(typekey)\n    return klass.from_dict(cls_dict)", "docstring": "Parse the dictionary and return an Entity instance.\n\nThis will attempt to extract type information from the input\ndictionary and pass it to entity_class to resolve the correct class\nfor the type.\n\nArgs:\ncls_dict: A dictionary representation of an Entity object.\nfallback_xsi_type: An xsi_type to use for string input, which\ndoesn't have properties\n\nReturns:\nAn Entity instance.", "source": "codesearchnet"}
{"code": "def find_elements_by_class(self, class_, update=False) -> Elements:\n    return self.find_elements(by=By.CLASS, value=class_, update=update)", "docstring": "Finds multiple elements by class.\n\nArgs:\nclass_: The class of the elements to be found.\nupdate: If the interface has changed, this option should be True.\n\nReturns:\nA list with elements if any was found. An empty list if not.\n\nRaises:\nNoSuchElementException - If the element wasn't found.\n\nUsage:\nelements = driver.find_elements_by_class('foo')", "source": "codesearchnet"}
{"code": "def to_element(self, include_namespaces=False):\n        \n        elt_attrib = {}\n        if include_namespaces:\n            elt_attrib.update({\n                'xmlns': \"urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/\",\n                'xmlns:dc': \"http:\n                'xmlns:upnp': \"urn:schemas-upnp-org:metadata-1-0/upnp/\",\n            })\n        elt_attrib.update({\n            'parentID': self.parent_id,\n            'restricted': 'true' if self.restricted else 'false',\n            'id': self.item_id\n        })\n        elt = XML.Element(self.tag, elt_attrib)\n\n        \n        XML.SubElement(elt, 'dc:title').text = self.title\n\n        \n        for resource in self.resources:\n            elt.append(resource.to_element())\n\n        \n        \n        for key, value in self._translation.items():\n            if hasattr(self, key):\n                \n                \n                \n                tag = \"%s:%s\" % value if value[0] else \"%s\" % value[1]\n                XML.SubElement(elt, tag).text = (\"%s\" % getattr(self, key))\n        \n        XML.SubElement(elt, 'upnp:class').text = self.item_class\n\n        \n        desc_attrib = {'id': 'cdudn', 'nameSpace':\n                       'urn:schemas-rinconnetworks-com:metadata-1-0/'}\n        desc_elt = XML.SubElement(elt, 'desc', desc_attrib)\n        desc_elt.text = self.desc\n\n        return elt", "docstring": "Return an ElementTree Element representing this instance.\n\nArgs:\ninclude_namespaces (bool, optional): If True, include xml\nnamespace attributes on the root element\n\nReturn:\n~xml.etree.ElementTree.Element: an Element.", "source": "juraj-google-style"}
{"code": "def run_tpm(tpm, time_scale):\n    sbs_tpm = convert.state_by_node2state_by_state(tpm)\n    if sparse(tpm):\n        tpm = sparse_time(sbs_tpm, time_scale)\n    else:\n        tpm = dense_time(sbs_tpm, time_scale)\n    return convert.state_by_state2state_by_node(tpm)", "docstring": "Iterate a TPM by the specified number of time steps.\n\nArgs:\ntpm (np.ndarray): A state-by-node tpm.\ntime_scale (int): The number of steps to run the tpm.\n\nReturns:\nnp.ndarray", "source": "codesearchnet"}
{"code": "def _add_arg(self, key, value, mask=False):\n        \n        if self.lang == 'python':\n            self._add_arg_python(key, value, mask)\n        elif self.lang == 'java':\n            self._add_arg_java(key, value, mask)", "docstring": "Add CLI Arg for the correct language.\n\nArgs:\nkey (string): The CLI Args key (e.g., --name).\nvalue (string): The CLI Args value (e.g., bob).\nmask (boolean, default:False): Indicates whether no mask value.", "source": "juraj-google-style"}
{"code": "def _resize_image(image, height, width):\n  \n  return tf.image.resize_images(\n      image, [height, width], method=tf.image.ResizeMethod.BILINEAR,\n      align_corners=False)", "docstring": "Simple wrapper around tf.resize_images.\n\nThis is primarily to make sure we use the same `ResizeMethod` and other\ndetails each time.\n\nArgs:\nimage: A 3-D image `Tensor`.\nheight: The target height for the resized image.\nwidth: The target width for the resized image.\n\nReturns:\nresized_image: A 3-D tensor containing the resized image. The first two\ndimensions have the shape [height, width].", "source": "juraj-google-style"}
{"code": "def onchange(self, new_value):\n        \n        self.disable_refresh()\n        self.set_value(new_value)\n        self.enable_refresh()\n        return (new_value, )", "docstring": "Called when the user changes the TextInput content.\nWith single_line=True it fires in case of focus lost and Enter key pressed.\nWith single_line=False it fires at each key released.\n\nArgs:\nnew_value (str): the new string content of the TextInput.", "source": "juraj-google-style"}
{"code": "def clear_extra_selections(self, key):\n        \n        for decoration in self.extra_selections_dict.get(key, []):\n            self.decorations.remove(decoration)\n        self.extra_selections_dict[key] = []", "docstring": "Remove decorations added through set_extra_selections.\n\nArgs:\nkey (str) name of the extra selections group.", "source": "juraj-google-style"}
{"code": "def get(self, key):\n    path = self.object_path(key)\n    return self._read_object(path)", "docstring": "Return the object named by key or None if it does not exist.\n\nArgs:\nkey: Key naming the object to retrieve\n\nReturns:\nobject or None", "source": "codesearchnet"}
{"code": "def expand(self, url):\n        \n        url = self.clean_url(url)\n        expand_url = f'{self.api_url}v1/expand'\n        payload = {\n            'domain': getattr(self, 'domain', 'adf.ly'),\n            'advert_type': getattr(self, 'type', 'int'),\n            'group_id': getattr(self, 'group_id', None),\n            'key': self.api_key,\n            'user_id': self.user_id,\n            'url': url,\n        }\n        response = self._post(expand_url, data=payload)\n        if not response.ok:\n            raise BadAPIResponseException(response.content)\n\n        try:\n            data = response.json()\n        except json.decoder.JSONDecodeError:\n            raise BadAPIResponseException('API response could not be decoded')\n\n        if data.get('errors'):\n            errors = ','.join(i['msg'] for i in data['errors'])\n            raise ShorteningErrorException(errors)\n\n        if not data.get('data'):\n            raise BadAPIResponseException(response.content)\n\n        return data['data'][0]['url']", "docstring": "Expand implementation for Adf.ly\nArgs:\nurl: the URL you want to expand\n\nReturns:\nA string containing the expanded URL\n\nRaises:\nBadAPIResponseException: If the data is malformed or we got a bad\nstatus code on API response\nShorteningErrorException: If the API Returns an error as response", "source": "juraj-google-style"}
{"code": "def _log_unnorm_prob(self, x, name=None):\n    with tf.name_scope((name or 'log_unnorm_prob_lkj')):\n        x = tf.convert_to_tensor(value=x, name='x')\n        if self.input_output_cholesky:\n            logdet = (2.0 * tf.reduce_sum(input_tensor=tf.math.log(tf.linalg.diag_part(x)), axis=[(- 1)]))\n        else:\n            (_, logdet) = tf.linalg.slogdet(x)\n        answer = ((self.concentration - 1.0) * logdet)\n        return answer", "docstring": "Returns the unnormalized log density of an LKJ distribution.\n\nArgs:\nx: `float` or `double` `Tensor` of correlation matrices.  The shape of `x`\nmust be `B + [D, D]`, where `B` broadcasts with the shape of\n`concentration`.\nname: Python `str` name prefixed to Ops created by this function.\n\nReturns:\nlog_p: A Tensor of the unnormalized log density of each matrix element of\n`x`, with respect to an LKJ distribution with parameter the\ncorresponding element of `concentration`.", "source": "codesearchnet"}
{"code": "def truncate(string, maxchar):\n    \n    if maxchar < 4:\n        raise TruncateError(\"Maxchar must be > 3\")\n\n    if len(string) <= maxchar:\n        return string\n    else:\n        return string[:maxchar - 3] + \"...\"", "docstring": "Truncate a string to a maximum number of characters.\n\nIf the string is longer than maxchar, then remove excess\ncharacters and append an ellipses.\n\nArguments:\n\nstring (str): String to truncate.\nmaxchar (int): Maximum length of string in characters. Must be >= 4.\n\nReturns:\n\nstr: Of length <= maxchar.\n\nRaises:\n\nTruncateError: In case of an error.", "source": "juraj-google-style"}
{"code": "def _ConditionalFormatMessages(self, event_values):\n    string_pieces = []\n    for (map_index, attribute_name) in enumerate(self._format_string_pieces_map):\n        if ((not attribute_name) or (attribute_name in event_values)):\n            if attribute_name:\n                attribute = event_values.get(attribute_name, None)\n                if ((not isinstance(attribute, (bool, float))) and (not isinstance(attribute, py2to3.INTEGER_TYPES)) and (not attribute)):\n                    continue\n            string_pieces.append(self.FORMAT_STRING_PIECES[map_index])\n    format_string = self.FORMAT_STRING_SEPARATOR.join(string_pieces)\n    string_pieces = []\n    for (map_index, attribute_name) in enumerate(self._format_string_short_pieces_map):\n        if ((not attribute_name) or event_values.get(attribute_name, None)):\n            string_pieces.append(self.FORMAT_STRING_SHORT_PIECES[map_index])\n    short_format_string = self.FORMAT_STRING_SEPARATOR.join(string_pieces)\n    return self._FormatMessages(format_string, short_format_string, event_values)", "docstring": "Determines the conditional formatted message strings.\n\nArgs:\nevent_values (dict[str, object]): event values.\n\nReturns:\ntuple(str, str): formatted message string and short message string.", "source": "codesearchnet"}
{"code": "def add_group_coordinator(self, group, response):\n    log.debug('Updating coordinator for %s: %s', group, response)\n    error_type = Errors.for_code(response.error_code)\n    if (error_type is not Errors.NoError):\n        log.error('GroupCoordinatorResponse error: %s', error_type)\n        self._groups[group] = (- 1)\n        return False\n    node_id = response.coordinator_id\n    coordinator = BrokerMetadata(response.coordinator_id, response.host, response.port, None)\n    if (node_id not in self._brokers):\n        self._brokers[node_id] = coordinator\n    else:\n        node = self._brokers[node_id]\n        if ((coordinator.host != node.host) or (coordinator.port != node.port)):\n            log.error('GroupCoordinator metadata conflicts with existing broker metadata. Coordinator: %s, Broker: %s', coordinator, node)\n            self._groups[group] = node_id\n            return False\n    log.info('Group coordinator for %s is %s', group, coordinator)\n    self._groups[group] = node_id\n    return True", "docstring": "Update with metadata for a group coordinator\n\nArguments:\ngroup (str): name of group from GroupCoordinatorRequest\nresponse (GroupCoordinatorResponse): broker response\n\nReturns:\nbool: True if metadata is updated, False on error", "source": "codesearchnet"}
{"code": "def request(session, url, rule_payload, **kwargs):\n    \n    if isinstance(rule_payload, dict):\n        rule_payload = json.dumps(rule_payload)\n    logger.debug(\"sending request\")\n    result = session.post(url, data=rule_payload, **kwargs)\n    return result", "docstring": "Executes a request with the given payload and arguments.\n\nArgs:\nsession (requests.Session): the valid session object\nurl (str): Valid API endpoint\nrule_payload (str or dict): rule package for the POST. If you pass a\ndictionary, it will be converted into JSON.", "source": "juraj-google-style"}
{"code": "def _ParseOrMerge(self, lines, message):\n    \n    tokenizer = Tokenizer(lines)\n    while not tokenizer.AtEnd():\n      self._MergeField(tokenizer, message)", "docstring": "Converts a text representation of a protocol message into a message.\n\nArgs:\nlines: Lines of a message's text representation.\nmessage: A protocol buffer message to merge into.\n\nRaises:\nParseError: On text parsing problems.", "source": "juraj-google-style"}
{"code": "def _sample_actions(self, state: Sequence[tf.Tensor]) -> Tuple[(Sequence[tf.Tensor], tf.Tensor, tf.Tensor)]:\n    default = self.compiler.compile_default_action(self.batch_size)\n    bound_constraints = self.compiler.compile_action_bound_constraints(state)\n    action = self._sample_action(bound_constraints, default)\n    (n, action, checking) = self._check_preconditions(state, action, bound_constraints, default)\n    return (action, n, checking)", "docstring": "Returns sampled action fluents and tensors related to the sampling.\n\nArgs:\nstate (Sequence[tf.Tensor]): A list of state fluents.\n\nReturns:\nTuple[Sequence[tf.Tensor], tf.Tensor, tf.Tensor]: A tuple with\naction fluents, an integer tensor for the number of samples, and\na boolean tensor for checking all action preconditions.", "source": "codesearchnet"}
{"code": "def retry_loop(retries, delay_in_seconds, conditions, function):\n    if (not isinstance(retries, Integral)):\n        raise TypeError(retries)\n    if (delay_in_seconds < 0):\n        raise TypeError(delay_in_seconds)\n    attempts = 0\n    value = None\n    err = None\n    while (attempts <= retries):\n        try:\n            value = function()\n            for condition in conditions:\n                if condition.on_value(value):\n                    break\n            else:\n                return value\n        except Exception as exc:\n            err = exc\n            for condition in conditions:\n                if condition.on_exception(exc):\n                    break\n            else:\n                raise\n        attempts += 1\n        sleep(delay_in_seconds)\n    else:\n        if err:\n            raise err\n        else:\n            raise ValueError('Max retries ({}) reached and return the value is still {}.'.format(attempts, value))\n    return value", "docstring": "Actually performs the retry loop used by the retry decorator\nand handler functions. Failures for retrying are defined by\nthe RetryConditions passed in. If the maximum number of\nretries has been reached then it raises the most recent\nerror or a ValueError on the most recent result value.\n\nArgs:\nretries (Integral): Maximum number of times to retry.\ndelay_in_seconds (Integral): Number of seconds to wait\nbetween retries.\nconditions (list): A list of retry conditions the can\ntrigger a retry on a return value or exception.\nfunction (function): The function to wrap.\n\nReturns:\nvalue: The return value from function", "source": "codesearchnet"}
{"code": "def start_naive_bayes(automated_run, session, path):\n    \n    module = functions.import_string_code_as_module(automated_run.source)\n    random_state = 8 if not hasattr(module, 'random_state') else module.random_state\n    assert module.metric_to_optimize in automated_run.base_learner_origin.metric_generators\n\n    \n    base_estimator = automated_run.base_learner_origin.return_estimator()\n    base_estimator.set_params(**module.default_params)\n    default_params = functions.make_serializable(base_estimator.get_params())\n    non_searchable_params = dict((key, val) for key, val in iteritems(default_params)\n                                 if key not in module.pbounds)\n\n    \n    existing_base_learners = []\n    for base_learner in automated_run.base_learner_origin.base_learners:\n        if not base_learner.job_status == 'finished':\n            continue\n        in_search_space = True\n        for key, val in iteritems(non_searchable_params):\n            if base_learner.hyperparameters[key] != val:\n                in_search_space = False\n                break  \n        if in_search_space:\n            existing_base_learners.append(base_learner)\n\n    \n    target = []\n    initialization_dict = dict((key, list()) for key in module.pbounds.keys())\n    for base_learner in existing_base_learners:\n        \n        all_numerical = True\n        for key in module.pbounds.keys():\n            if not isinstance(base_learner.hyperparameters[key], numbers.Number):\n                all_numerical = False\n                break\n        if not all_numerical:\n            continue  \n\n        for key in module.pbounds.keys():\n            initialization_dict[key].append(base_learner.hyperparameters[key])\n        target.append(base_learner.individual_score[module.metric_to_optimize])\n    initialization_dict['target'] = target if not module.invert_metric \\\n        else list(map(lambda x: -x, target))\n    print('{} existing in initialization dictionary'.\n          format(len(initialization_dict['target'])))\n\n    \n    func_to_optimize = return_func_to_optimize(\n        path, session, automated_run.base_learner_origin, module.default_params,\n        module.metric_to_optimize, module.invert_metric, set(module.integers)\n    )\n\n    \n    bo = BayesianOptimization(func_to_optimize, module.pbounds)\n\n    bo.initialize(initialization_dict)\n\n    np.random.seed(random_state)\n\n    bo.maximize(**module.maximize_config)", "docstring": "Starts naive bayes automated run\n\nArgs:\nautomated_run (xcessiv.models.AutomatedRun): Automated run object\n\nsession: Valid SQLAlchemy session\n\npath (str, unicode): Path to project folder", "source": "juraj-google-style"}
{"code": "def setup_logging(args=None):\n    logging_level = logging.WARNING\n    if ((args is not None) and args.verbose):\n        logging_level = logging.INFO\n    config = {'level': logging_level, 'format': 'jtlocalize:%(message)s'}\n    if ((args is not None) and (args.log_path != '')):\n        config['filename'] = args.log_path\n    logging.basicConfig(**config)", "docstring": "Setup logging module.\n\nArgs:\nargs (optional): The arguments returned by the argparse module.", "source": "codesearchnet"}
{"code": "def gen_cartesian_product(*args):\n    if (not args):\n        return []\n    elif (len(args) == 1):\n        return args[0]\n    product_list = []\n    for product_item_tuple in itertools.product(*args):\n        product_item_dict = {}\n        for item in product_item_tuple:\n            product_item_dict.update(item)\n        product_list.append(product_item_dict)\n    return product_list", "docstring": "generate cartesian product for lists\n\nArgs:\nargs (list of list): lists to be generated with cartesian product\n\nReturns:\nlist: cartesian product in list\n\nExamples:\n\n>>> arg1 = [{\"a\": 1}, {\"a\": 2}]\n>>> arg2 = [{\"x\": 111, \"y\": 112}, {\"x\": 121, \"y\": 122}]\n>>> args = [arg1, arg2]\n>>> gen_cartesian_product(*args)\n>>> # same as below\n>>> gen_cartesian_product(arg1, arg2)\n[\n{'a': 1, 'x': 111, 'y': 112},\n{'a': 1, 'x': 121, 'y': 122},\n{'a': 2, 'x': 111, 'y': 112},\n{'a': 2, 'x': 121, 'y': 122}\n]", "source": "codesearchnet"}
{"code": "def get_generic_type(val: '_base.BaseValue') -> '_classes.ParameterizedClass | None':\n    is_class = isinstance(val, _abstract.Class)\n    if is_class:\n        cls = val\n    elif isinstance(val.cls, _abstract.Class):\n        cls = val.cls\n    else:\n        return None\n    for parent_cls in cls.mro:\n        if isinstance(parent_cls, _abstract.ParameterizedClass):\n            base_cls = parent_cls.base_cls\n        else:\n            base_cls = parent_cls\n        if isinstance(base_cls, _abstract.Class) and base_cls.template:\n            ctx = base_cls.ctx\n            params = {item.name: item for item in base_cls.template}\n            generic_cls = _abstract.ParameterizedClass(base_cls, params, ctx)\n            if is_class:\n                return _abstract.ParameterizedClass(ctx.convert.type_type, {T: generic_cls}, ctx)\n            else:\n                return generic_cls\n    return None", "docstring": "Gets the generic type of an abstract value.\n\nArgs:\nval: The abstract value.\n\nReturns:\nThe type of the value, with concrete type parameters replaced by TypeVars.\nFor example, the generic type of `[0]` is `List[T]`.", "source": "github-repos"}
{"code": "def match(obj, matchers=TYPES):\n    buf = get_bytes(obj)\n    for matcher in matchers:\n        if matcher.match(buf):\n            return matcher\n    return None", "docstring": "Matches the given input againts the available\nfile type matchers.\n\nArgs:\nobj: path to file, bytes or bytearray.\n\nReturns:\nType instance if type matches. Otherwise None.\n\nRaises:\nTypeError: if obj is not a supported type.", "source": "codesearchnet"}
{"code": "def set_parameters(self, parameters_dict):\n    DB.set_hash_value(self._key, 'parameters', parameters_dict)\n    self.publish('parameters_updated')", "docstring": "Set the subarray parameters.\n\nArgs:\nparameters_dict (dict): Dictionary of Subarray parameters", "source": "codesearchnet"}
{"code": "def generate_enum_doc(enum_descriptor, locations, path, name_prefix=''):\n    \n    print(make_subsection(name_prefix + enum_descriptor.name))\n    location = locations[path]\n    if location.HasField('leading_comments'):\n        print(textwrap.dedent(location.leading_comments))\n\n    row_tuples = []\n    for value_index, value in enumerate(enum_descriptor.value):\n        field_location = locations[path + (2, value_index)]\n        row_tuples.append((\n            make_code(value.name),\n            value.number,\n            textwrap.fill(get_comment_from_location(field_location), INFINITY),\n        ))\n    print_table(('Name', 'Number', 'Description'), row_tuples)", "docstring": "Generate doc for an enum.\n\nArgs:\nenum_descriptor: descriptor_pb2.EnumDescriptorProto instance for enum\nto generate docs for.\nlocations: Dictionary of location paths tuples to\ndescriptor_pb2.SourceCodeInfo.Location instances.\npath: Path tuple to the enum definition.\nname_prefix: Optional prefix for this enum's name.", "source": "juraj-google-style"}
{"code": "def get_ams_access_token(accountname, accountkey):\n    accountkey_encoded = urllib.parse.quote(accountkey, safe='')\n    body = (((('grant_type=client_credentials&client_id=' + accountname) + '&client_secret=') + accountkey_encoded) + ' &scope=urn%3aWindowsAzureMediaServices')\n    return do_ams_auth(ams_auth_endpoint, body)", "docstring": "Get Media Services Authentication Token.\n\nArgs:\naccountname (str): Azure Media Services account name.\naccountkey (str): Azure Media Services Key.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def _is_ndb(self):\n    if isinstance(self._model, type):\n        if ((_NDB_MODEL is not None) and issubclass(self._model, _NDB_MODEL)):\n            return True\n        elif issubclass(self._model, db.Model):\n            return False\n    raise TypeError('Model class not an NDB or DB model: {0}.'.format(self._model))", "docstring": "Determine whether the model of the instance is an NDB model.\n\nReturns:\nBoolean indicating whether or not the model is an NDB or DB model.", "source": "codesearchnet"}
{"code": "def get_raw_data(self, url, *args, **kwargs):\n    res = self._conn.get(url, headers=self._prepare_headers(**kwargs))\n    if (res.status_code == 200):\n        return res.content\n    else:\n        return None", "docstring": "Gets data from url as bytes\n\nReturns content under the provided url as bytes\nie. for binary data\n\nArgs:\n**url**: address of the wanted data\n\n.. versionadded:: 0.3.2\n**additional_headers**: (optional) Additional headers\nto be used with request\n\nReturns:\nbytes", "source": "codesearchnet"}
{"code": "def _span_attrs_to_pb(span_attr, proto_type):\n    \n    attr_pb = getattr(trace_pb2.Span, proto_type)()\n    ParseDict(span_attr, attr_pb)\n    return attr_pb", "docstring": "Convert a span attribute dict to protobuf, including Links, Attributes,\nTimeEvents.\n\nArgs:\nspan_attr (dict): A dict that needs to be converted to protobuf.\nproto_type (str): The type of the Protobuf.\n\nReturns:\nAn instance of the specified protobuf.", "source": "juraj-google-style"}
{"code": "def select_serial_number_row(self, serial_number):\n        \n        sheet = self.table\n        col = self.db_sheet_cols.id\n        rows = sheet.loc[:, col] == serial_number\n        return sheet.loc[rows, :]", "docstring": "Select row for identification number serial_number\n\nArgs:\nserial_number: serial number\n\nReturns:\npandas.DataFrame", "source": "juraj-google-style"}
{"code": "def _decode_helper(self, pred_logits, format):\n    if format == DecodeType.CHARACTER:\n        decoder = self.char_decode\n        eos_token = 1\n        eos_str = '[s]'\n    elif format == DecodeType.BPE:\n        decoder = self.bpe_decode\n        eos_token = 2\n        eos_str = '\n    elif format == DecodeType.WORDPIECE:\n        decoder = self.wp_decode\n        eos_token = 102\n        eos_str = '[SEP]'\n    else:\n        raise ValueError(f'Format {format} is not supported.')\n    dec_strs, conf_scores = ([], [])\n    batch_size = pred_logits.size(0)\n    batch_max_length = pred_logits.size(1)\n    _, preds_index = pred_logits.topk(1, dim=-1, largest=True, sorted=True)\n    preds_index = preds_index.view(-1, batch_max_length)[:, 1:]\n    preds_str = decoder(preds_index)\n    preds_max_prob, _ = torch.nn.functional.softmax(pred_logits, dim=2).max(dim=2)\n    preds_max_prob = preds_max_prob[:, 1:]\n    for index in range(batch_size):\n        pred_eos = preds_str[index].find(eos_str)\n        pred = preds_str[index][:pred_eos]\n        pred_index = preds_index[index].tolist()\n        pred_eos_index = pred_index.index(eos_token) if eos_token in pred_index else -1\n        pred_max_prob = preds_max_prob[index][:pred_eos_index + 1]\n        confidence_score = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0\n        dec_strs.append(pred)\n        conf_scores.append(confidence_score)\n    return (dec_strs, conf_scores)", "docstring": "Convert a list of lists of bpe token ids into a list of strings by calling bpe tokenizer.\n\nArgs:\npred_logits (`torch.Tensor`):\nList of model prediction logits.\nformat (`Union[DecoderType, str]`):\nType of model prediction. Must be one of ['char', 'bpe', 'wp'].\nReturns:\n`tuple`:\ndec_strs(`str`): The decode strings of model prediction. conf_scores(`List[float]`): The confidence\nscore of model prediction.", "source": "github-repos"}
{"code": "def _coords2idx(self, coords):\n    x = self._coords2vec(coords)\n    idx = self._kd.query(x, p=self._metric_p, distance_upper_bound=self._max_pix_scale)\n    return idx[1]", "docstring": "Converts from sky coordinates to pixel indices.\n\nArgs:\ncoords (:obj:`astropy.coordinates.SkyCoord`): Sky coordinates.\n\nReturns:\nPixel indices of the coordinates, with the same shape as the input\ncoordinates. Pixels which are outside the map are given an index\nequal to the number of pixels in the map.", "source": "codesearchnet"}
{"code": "def get_cookiecutter_config(template, default_config=None, version=None):\n    \n    default_config = default_config or {}\n    config_dict = cc_config.get_user_config()\n    repo_dir, _ = cc_repository.determine_repo_dir(\n        template=template,\n        abbreviations=config_dict['abbreviations'],\n        clone_to_dir=config_dict['cookiecutters_dir'],\n        checkout=version,\n        no_input=True)\n    context_file = os.path.join(repo_dir, 'cookiecutter.json')\n    context = cc_generate.generate_context(\n        context_file=context_file,\n        default_context={**config_dict['default_context'], **default_config})\n    return repo_dir, cc_prompt.prompt_for_config(context)", "docstring": "Obtains the configuration used for cookiecutter templating\n\nArgs:\ntemplate: Path to the template\ndefault_config (dict, optional): The default configuration\nversion (str, optional): The git SHA or branch to use when\nchecking out template. Defaults to latest version\n\nReturns:\ntuple: The cookiecutter repo directory and the config dict", "source": "juraj-google-style"}
{"code": "def _generate_queries_for_title_symbols(title_field, query_value):\n    values_tokenized_by_whitespace = query_value.split()\n    symbol_queries = []\n    for value in values_tokenized_by_whitespace:\n        if any(((character in value) for character in ElasticSearchVisitor.TITLE_SYMBOL_INDICATING_CHARACTER)):\n            symbol_queries.append(generate_match_query('.'.join([title_field, FieldVariations.search]), value, with_operator_and=False))\n    return wrap_queries_in_bool_clauses_if_more_than_one(symbol_queries, use_must_clause=True)", "docstring": "Generate queries for any symbols in the title against the whitespace tokenized field of titles.\n\nReturns:\n(dict): The query or queries for the whitespace tokenized field of titles. If none such tokens exist, then\nreturns an empty dict.\nNotes:\nSplits the value stream into tokens according to whitespace.\nHeuristically identifies the ones that contain symbol-indicating-characters (examples of those tokens are\n\"g-2\", \"SU(2)\").", "source": "codesearchnet"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Union[int, List[int]], vision_feature_select_strategy: str, **kwargs):\n    if vision_feature_select_strategy not in ['default', 'full']:\n        raise ValueError(f'Unexpected select feature strategy: {self.vision_feature_select_strategy}')\n    kwargs = {k: v for k, v in kwargs.items() if v is not None}\n    image_outputs = self.vision_model(pixel_values, output_hidden_states=False, **kwargs)\n    hidden_state = image_outputs.last_hidden_state\n    return hidden_state", "docstring": "Obtains image last hidden states from the vision tower and apply al projection.\n\nArgs:\npixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)\nThe tensors corresponding to the input images.\nvision_feature_layer (`Union[int, List[int]]`):\nThe index of the layer to select the vision feature. If multiple indices are provided,\nthe vision feature of the corresponding indices will be concatenated to form the\nvision features.\nvision_feature_select_strategy (`str`):\nThe feature selection strategy used to select the vision feature from the vision backbone.\nCan be one of `\"default\"` or `\"full\"`\nReturns:\nimage_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).", "source": "github-repos"}
{"code": "def GetFormattedField(self, event, field_name):\n    callback_name = self._FIELD_FORMAT_CALLBACKS.get(field_name, None)\n    callback_function = None\n    if callback_name:\n        callback_function = getattr(self, callback_name, None)\n    if callback_function:\n        output_value = callback_function(event)\n    else:\n        output_value = getattr(event, field_name, '-')\n    if (output_value is None):\n        output_value = '-'\n    elif (not isinstance(output_value, py2to3.STRING_TYPES)):\n        output_value = '{0!s}'.format(output_value)\n    return output_value", "docstring": "Formats the specified field.\n\nArgs:\nevent (EventObject): event.\nfield_name (str): name of the field.\n\nReturns:\nstr: value of the field.", "source": "codesearchnet"}
{"code": "def format_diff_xml(a_xml, b_xml):\n    \n    return '\\n'.join(\n        difflib.ndiff(\n            reformat_to_pretty_xml(a_xml).splitlines(),\n            reformat_to_pretty_xml(b_xml).splitlines(),\n        )\n    )", "docstring": "Create a diff between two XML documents.\n\nArgs:\na_xml: str\nb_xml: str\n\nReturns:\nstr : `Differ`-style delta", "source": "juraj-google-style"}
{"code": "def json_set_auths(recipe, auth):\n    if isinstance(recipe, dict):\n        if 'auth' in recipe:\n            recipe['auth'] = auth\n        for key, value in recipe.items():\n            json_set_auths(value, auth)\n    elif isinstance(recipe, list) or isinstance(recipe, tuple):\n        for index, value in enumerate(recipe):\n            json_set_auths(value, auth)\n    return recipe", "docstring": "Recusrsively finds auth in script JSON and sets them.\n\nArgs:\nrecipe: (dict) A dictionary representation fo the JSON script.\nauth: (string) Either 'service' or 'user'.\n\nReturns:\n(recipe) same structure but with all auth fields replaced.", "source": "github-repos"}
{"code": "def _table_filename(tbl_filename):\n    tbl_filename = str(tbl_filename)\n    txfn = _normalize_table_path(tbl_filename)\n    gzfn = (txfn + '.gz')\n    if os.path.exists(txfn):\n        if (os.path.exists(gzfn) and (os.stat(gzfn).st_mtime > os.stat(txfn).st_mtime)):\n            tbl_filename = gzfn\n        else:\n            tbl_filename = txfn\n    elif os.path.exists(gzfn):\n        tbl_filename = gzfn\n    else:\n        raise ItsdbError('Table does not exist at {}(.gz)'.format(tbl_filename))\n    return tbl_filename", "docstring": "Determine if the table path should end in .gz or not and return it.\n\nA .gz path is preferred only if it exists and is newer than any\nregular text file path.\n\nRaises:\n:class:`delphin.exceptions.ItsdbError`: when neither the .gz\nnor text file exist.", "source": "codesearchnet"}
{"code": "def stub_batch(cls, size, **kwargs):\n    return [cls.stub(**kwargs) for _ in range(size)]", "docstring": "Stub a batch of instances of the given class, with overriden attrs.\n\nArgs:\nsize (int): the number of instances to stub\n\nReturns:\nobject list: the stubbed instances", "source": "codesearchnet"}
{"code": "async def on_message(message):\n    \n\n    \n    server = message.server\n    author = message.author\n    channel = message.channel\n    content = message.content\n\n    data = datatools.get_data()\n\n    if not data[\"discord\"][\"servers\"][server.id][_data.modulename][\"activated\"]:\n        return\n\n    \n    if server is not None and author != channel.server.me:\n        \n        flipchecked = api_flipcheck.flipcheck(content)\n        if flipchecked:\n            await client.send_typing(channel)\n            await client.send_message(channel, flipchecked)", "docstring": "The on_message event handler for this module\n\nArgs:\nmessage (discord.Message): Input message", "source": "juraj-google-style"}
{"code": "def kill_pid(self, pid):\n    try:\n        p = psutil.Process(pid)\n        p.terminate()\n        self.info_log(('Killed [pid:%s][name:%s]' % (p.pid, p.name())))\n    except psutil.NoSuchProcess:\n        self.error_log(('No such process: [pid:%s]' % pid))", "docstring": "Kill process by pid\n\nArgs:\npid (int)", "source": "codesearchnet"}
{"code": "def __eq__(self, other):\n        \n        if type(self) is type(other) and \\\n                self._qubits == other._qubits:\n            return True\n        return False", "docstring": "Two device specs are the same if they have the same qubits.\n\nArgs:\nother (DeviceSpecification): other DeviceSpecification\n\nReturns:\nbool: are self and other equal.", "source": "juraj-google-style"}
{"code": "async def addFeedNodes(self, name, items):\n        \n        func = self.core.getFeedFunc(name)\n        if func is None:\n            raise s_exc.NoSuchName(name=name)\n\n        logger.info(f'adding feed nodes ({name}): {len(items)}')\n\n        async for node in func(self, items):\n            yield node", "docstring": "Call a feed function and return what it returns (typically yields Node()s).\n\nArgs:\nname (str): The name of the feed record type.\nitems (list): A list of records of the given feed type.\n\nReturns:\n(object): The return value from the feed function. Typically Node() generator.", "source": "juraj-google-style"}
{"code": "def squeeze(name, x, factor=2, reverse=True):\n    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n        shape = common_layers.shape_list(x)\n        if (factor == 1):\n            return x\n        height = int(shape[1])\n        width = int(shape[2])\n        n_channels = int(shape[3])\n        if (not reverse):\n            assert (((height % factor) == 0) and ((width % factor) == 0))\n            x = tf.reshape(x, [(- 1), (height \n            x = tf.transpose(x, [0, 1, 3, 5, 2, 4])\n            x = tf.reshape(x, [(- 1), (height \n        else:\n            x = tf.reshape(x, ((- 1), height, width, int((n_channels / (factor ** 2))), factor, factor))\n            x = tf.transpose(x, [0, 1, 4, 2, 5, 3])\n            x = tf.reshape(x, ((- 1), int((height * factor)), int((width * factor)), int((n_channels / (factor ** 2)))))\n        return x", "docstring": "Block-wise spatial squeezing of x to increase the number of channels.\n\nArgs:\nname: Used for variable scoping.\nx: 4-D Tensor of shape (batch_size X H X W X C)\nfactor: Factor by which the spatial dimensions should be squeezed.\nreverse: Squueze or unsqueeze operation.\n\nReturns:\nx: 4-D Tensor of shape (batch_size X (H//factor) X (W//factor) X\n(cXfactor^2). If reverse is True, then it is factor = (1 / factor)", "source": "codesearchnet"}
{"code": "def match_variables(self, pattern, return_type='name'):\n    pattern = re.compile(pattern)\n    vars_ = [v for v in self.variables.values() if pattern.search(v.name)]\n    return (vars_ if return_type.startswith('var') else [v.name for v in vars_])", "docstring": "Return columns whose names match the provided regex pattern.\n\nArgs:\npattern (str): A regex pattern to match all variable names against.\nreturn_type (str): What to return. Must be one of:\n'name': Returns a list of names of matching variables.\n'variable': Returns a list of Variable objects whose names\nmatch.", "source": "codesearchnet"}
{"code": "def __init__(self, cache_file_name, update_cache=True):\n        \n        self._cache_file_name = cache_file_name\n        self._cache = self._read_cache_from_file()\n        self._update_cache = update_cache", "docstring": "Opens the cache file and reads previous results.\n\nArgs:\ncache_file_name: string file name\nupdate_cache: Specifies whether ApiCache should write out the\ncache file when closing it", "source": "juraj-google-style"}
{"code": "def set_configuration_from_sharded_input_tensors(self, input_tensors):\n    if not self._frozen:\n        self._tuple_shapes = None\n    number_of_shards = len(input_tensors)\n    self.set_number_of_shards(number_of_shards)\n    for t in input_tensors:\n        if len(t) != self.number_of_tuple_elements:\n            raise ValueError(f'input_tensors is {str(input_tensors)} but must be a list of lists, where each inner list has length number_of_tuple_elements={self.number_of_tuple_elements}')\n    sharded_shapes = [[t[i].shape for t in input_tensors] for i in range(self.number_of_tuple_elements)]\n    unsharded_shapes = [policy.get_unsharded_shape(s) for policy, s in zip(self._sharding_policies, sharded_shapes)]\n    self.set_tuple_shapes(unsharded_shapes)\n    for i in range(1, self.number_of_shards):\n        for t1, t2 in zip(input_tensors[0], input_tensors[i]):\n            if t1.dtype != t2.dtype:\n                raise TypeError(f'types of the tuple elements of input_tensors {str(input_tensors)} are not consistent')\n    self.set_tuple_types([t.dtype for t in input_tensors[0]])", "docstring": "Sets the shapes and types of the queue tuple elements.\n\ninput_tensors is a list of lists of Tensors whose types and shapes are used\nto set the queue configuration. The length of the outer list is the number\nof shards required, and each inner list is the tuple of Tensors to use to\ndetermine the types and shapes of the corresponding shard. This method\ndepends on the shard dimension, and calling it freezes the shard policy.\n\nArgs:\ninput_tensors: list of lists of Tensors. The outer list length corresponds\nto the desired number of shards, and each inner list is the size\nand shape of the desired configuration of the corresponding shard.\n\nRaises:\nValueError: if any inner list is not a list of length\nself.number_of_tuple_elements; or the inner lists do not combine to\nform a consistent unsharded shape.\nTypeError: if the types of the Tensors in the inner lists do not match.", "source": "github-repos"}
{"code": "def of_definition(service_def):\n    vcap_services = streamsx.topology.context._vcap_from_service_definition(service_def)\n    service_name = streamsx.topology.context._name_from_service_definition(service_def)\n    return StreamingAnalyticsConnection(vcap_services, service_name)", "docstring": "Create a connection to a Streaming Analytics service.\n\nThe single service is defined by `service_def` which can be one of\n\n* The `service credentials` copied from the `Service credentials` page of the service console (not the Streams console). Credentials are provided in JSON format. They contain such as the API key and secret, as well as connection information for the service.\n* A JSON object (`dict`) of the form: ``{ \"type\": \"streaming-analytics\", \"name\": \"service name\", \"credentials\": {...} }`` with the `service credentials` as the value of the ``credentials`` key.\n\nArgs:\nservice_def(dict): Definition of the service to connect to.\n\nReturns:\nStreamingAnalyticsConnection: Connection to defined service.", "source": "codesearchnet"}
{"code": "def delete(self, name, version, _lock=True):\n    link_path = self._link_path(name)\n    if _lock:\n        file_lock = _exclusive_lock(self._lock_path('links', name))\n    else:\n        file_lock = _no_lock()\n    with file_lock:\n        logger.debug('Acquired or inherited lock for link %s.', name)\n        if (not _path_exists(link_path)):\n            raise FiletrackerFileNotFoundError\n        if (_file_version(link_path) > version):\n            logger.info('Tried to delete newer version of %s (%d < %d), ignoring.', name, version, _file_version(link_path))\n            return False\n        digest = self._digest_for_link(name)\n        with _exclusive_lock(self._lock_path('blobs', digest)):\n            logger.debug('Acquired lock for blob %s.', digest)\n            should_delete_blob = False\n            with self._db_transaction() as txn:\n                logger.debug('Started DB transaction (deleting link).')\n                digest_bytes = digest.encode()\n                link_count = self.db.get(digest_bytes, txn=txn)\n                if (link_count is None):\n                    raise RuntimeError('File exists but has no key in db')\n                link_count = int(link_count)\n                if (link_count == 1):\n                    logger.debug('Deleting last link to blob %s.', digest)\n                    self.db.delete(digest_bytes, txn=txn)\n                    self.db.delete('{}:logical_size'.format(digest).encode(), txn=txn)\n                    should_delete_blob = True\n                else:\n                    new_count = str((link_count - 1)).encode()\n                    self.db.put(digest_bytes, new_count, txn=txn)\n                logger.debug('Committing DB transaction (deleting link).')\n            logger.debug('Committed DB transaction (deleting link).')\n            os.unlink(link_path)\n            logger.debug('Deleted link %s.', name)\n            if should_delete_blob:\n                os.unlink(self._blob_path(digest))\n        logger.debug('Released lock for blob %s.', digest)\n    logger.debug('Released (or gave back) lock for link %s.', name)\n    return True", "docstring": "Removes a file from the storage.\n\nArgs:\nname: name of the file being deleted.\nMay contain slashes that are treated as path separators.\nversion: file \"version\" that is meant to be deleted\nIf the file that is stored has newer version than provided,\nit will not be deleted.\nlock: whether or not to acquire locks\nThis is for internal use only,\nnormal users should always leave it set to True.\nReturns whether or not the file has been deleted.", "source": "codesearchnet"}
{"code": "def _FormatOtherFileToken(self, token_data):\n    timestamp = (token_data.microseconds + (token_data.timestamp * definitions.MICROSECONDS_PER_SECOND))\n    date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)\n    date_time_string = date_time.CopyToDateTimeString()\n    return {'string': token_data.name.rstrip('\\x00'), 'timestamp': date_time_string}", "docstring": "Formats an other file token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_other_file32): AUT_OTHER_FILE32 token data.\n\nReturns:\ndict[str, str]: token values.", "source": "codesearchnet"}
{"code": "def GetLayerFromFeatureServiceByURL(self, url, layerName='', returnURLOnly=False):\n    fs = None\n    try:\n        fs = arcrest.agol.FeatureService(url=url, securityHandler=self._securityHandler)\n        return self.GetLayerFromFeatureService(fs=fs, layerName=layerName, returnURLOnly=returnURLOnly)\n    except:\n        (line, filename, synerror) = trace()\n        raise common.ArcRestHelperError({'function': 'GetLayerFromFeatureServiceByURL', 'line': line, 'filename': filename, 'synerror': synerror})\n    finally:\n        fs = None\n        del fs\n        gc.collect()", "docstring": "Obtains a layer from a feature service by URL reference.\n\nArgs:\nurl (str): The URL of the feature service.\nlayerName (str): The name of the layer. Defaults to ``\"\"``.\nreturnURLOnly (bool): A boolean value to return the URL of the layer. Defaults to ``False``.\nReturns:\nWhen ``returnURLOnly`` is ``True``, the URL of the layer is returned.\n\nWhen ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`.", "source": "codesearchnet"}
{"code": "def decode(tokens):\n    token_is_alnum = [(t[0] in _ALPHANUMERIC_CHAR_SET) for t in tokens]\n    ret = []\n    for (i, token) in enumerate(tokens):\n        if ((i > 0) and token_is_alnum[(i - 1)] and token_is_alnum[i]):\n            ret.append(u' ')\n        ret.append(token)\n    return ''.join(ret)", "docstring": "Decode a list of tokens to a unicode string.\n\nArgs:\ntokens: a list of Unicode strings\nReturns:\na unicode string", "source": "codesearchnet"}
{"code": "def kill(container, rm=True):\n    container = get_container(container)\n    if (not container):\n        raise Exception(('No such container: %s' % container))\n    unbind_all(container['ip'])\n    sudo(('docker kill %s' % container['name']))\n    if rm:\n        sudo(('docker rm %s' % container['name']))", "docstring": "Kill a container\n\nArgs:\n* container: Container name or ID\n* rm=True: Remove the container or not", "source": "codesearchnet"}
{"code": "def GaussianBlur(X, ksize_width, ksize_height, sigma_x, sigma_y):\n    \n    return image_transform(\n        X,\n        cv2.GaussianBlur,\n        ksize=(ksize_width, ksize_height),\n        sigmaX=sigma_x,\n        sigmaY=sigma_y\n    )", "docstring": "Apply Gaussian blur to the given data.\n\nArgs:\nX: data to blur\nkernel_size: Gaussian kernel size\nstddev: Gaussian kernel standard deviation (in both X and Y directions)", "source": "juraj-google-style"}
{"code": "def write_config_json(config_file, data):\n    \n    outfile = None\n    try:\n        with open(config_file, 'w') as outfile:\n            json.dump(data, outfile)\n    except:\n        line, filename, synerror = trace()\n        raise ArcRestHelperError({\n                    \"function\": \"init_config_json\",\n                    \"line\": line,\n                    \"filename\":  filename,\n                    \"synerror\": synerror,\n                                    }\n                                    )\n    finally:\n        outfile = None\n\n        del outfile\n\n        gc.collect()", "docstring": "Serializes an object to disk.\n\nArgs:\nconfig_file (str): The path on disk to save the file.\ndata (object): The object to serialize.", "source": "juraj-google-style"}
{"code": "def configure_vlan(self, vid, commands):\n        \n        commands = make_iterable(commands)\n        commands.insert(0, 'vlan %s' % vid)\n        return self.configure(commands)", "docstring": "Configures the specified Vlan using commands\n\nArgs:\nvid (str): The VLAN ID to configure\ncommands: The list of commands to configure\n\nReturns:\nTrue if the commands completed successfully", "source": "juraj-google-style"}
{"code": "def in_sorted(values, value):\n    index = bisect.bisect_left(values, value)\n    if (index >= len(values)):\n        return False\n    return (values[index] == value)", "docstring": "Checks if a value is in a sorted list.\n\nUses the :mod:`bisect` builtin to find the insertion point for\n``value``.\n\nArgs:\nvalues (List[int]): Integers sorted in ascending order.\nvalue (int): Value to check if contained in ``values``.\n\nReturns:\nbool: Indicating if the value is contained.", "source": "codesearchnet"}
{"code": "def __init__(self, ones_prefactor):\n    super().__init__()\n    self.ones_prefactor = ones_prefactor", "docstring": "Initializes an AllOnes layer.\n\nArgs:\nones_prefactor: the scalar to emit when all ones is detected.", "source": "github-repos"}
{"code": "def power(maf=0.5, beta=0.1, N=100, cutoff=5e-08):\n    '\\n\\tstd(snp)=sqrt(2.0*maf*(1-maf)) \\n\\tpower = \\\\int \\n\\n\\tbeta_ML = (snp^T*snp)^{-1}*snp^T*Y = cov(snp,Y)/var(snp)   \\n\\tE[beta_ML]\\t= (snp^T*snp)^{-1}*snp^T*E[Y]  \\n\\t\\t\\t\\t= (snp^T*snp)^{-1}*snp^T*snp * beta\\n\\t\\t\\t\\t= beta\\n\\tVar[beta_ML]= (snp^T*snp)^{-1}*(snp^T*snp)*(snp^T*snp)^{-1}\\n\\t\\t\\t\\t= (snp^T*snp)^{-1}\\n\\t\\t\\t\\t= 1/N * var(snp)\\n\\t\\t\\t\\t= 1/N * maf*(1-maf)\\n\\t'\n    assert ((maf >= 0.0) and (maf <= 0.5)), ('maf needs to be between 0.0 and 0.5, got %f' % maf)\n    if (beta < 0.0):\n        beta = (- beta)\n    std_beta = (1.0 / np.sqrt((N * ((2.0 * maf) * (1.0 - maf)))))\n    non_centrality = beta\n    beta_samples = np.random.normal(loc=non_centrality, scale=std_beta)\n    n_grid = 100000\n    beta_in = np.arange((0.5 / (n_grid + 1.0)), ((n_grid - 0.5) / (n_grid + 1.0)), (1.0 / (n_grid + 1.0)))\n    beta_theoretical = ((st.norm.isf(beta_in) * std_beta) + non_centrality)\n    pvals = st.chi2.sf(((beta_theoretical / std_beta) * (beta_theoretical / std_beta)), 1.0)\n    power = (pvals < cutoff).mean()\n    return (power, pvals)", "docstring": "estimate power for a given allele frequency, effect size beta and sample size N\n\nAssumption:\n\nz-score = beta_ML distributed as p(0) = N(0,1.0(maf*(1-maf)*N))) under the null hypothesis\nthe actual beta_ML is distributed as p(alt) = N( beta , 1.0/(maf*(1-maf)N) )\n\n\n\nArguments:\nmaf:\tminor allele frequency of the SNP\nbeta:\teffect size of the SNP\nN:\t\tsample size (number of individuals)\nReturns:\npower:\tprobability to detect a SNP in that study with the given parameters", "source": "codesearchnet"}
{"code": "def export(self, path, session):\n    \n    if self._graph is not tf_v1.get_default_graph():\n      raise RuntimeError(\"default graph differs from the graph where the \"\n                         \"module was instantiated.\")\n    if self._graph is not session.graph:\n      raise RuntimeError(\"session graph differs from the graph where the \"\n                         \"module was instantiated.\")\n    self._impl.export(path, session)", "docstring": "Exports the module with the variables from the session in `path`.\n\nNote that it is the module definition in the ModuleSpec used to create this\nmodule that gets exported. The session is only used to provide the value\nof variables.\n\nArgs:\npath: path where to export the module to.\nsession: session where to export the variables from.\n\nRaises:\nRuntimeError: if there is an issue during the export.", "source": "juraj-google-style"}
{"code": "def var(series):\n    \n    if np.issubdtype(series.dtype, np.number):\n        return series.var()\n    else:\n        return np.nan", "docstring": "Returns the variance of values in a series.\n\nArgs:\nseries (pandas.Series): column to summarize.", "source": "juraj-google-style"}
{"code": "def filter_by(cls, **kwargs):\n        \n        limit = kwargs.pop('limit', None)\n        reverse = kwargs.pop('reverse', False)\n        q = cls.query.filter_by(**kwargs)\n        if reverse:\n            q = q.order_by(cls.id.desc())\n        if limit:\n            q = q.limit(limit)\n        return q", "docstring": "Same as SQLAlchemy's filter_by. Additionally this accepts\ntwo special keyword arguments `limit` and `reverse` for limiting\nthe results and reversing the order respectively.\n\nArgs:\n\n**kwargs: filter parameters\n\nExamples:\n\n>>> user = User.filter_by(email=\"new@x.com\")\n\n>>> shipments = Shipment.filter_by(country=\"India\", limit=3, reverse=True)", "source": "juraj-google-style"}
{"code": "def match_globs(path, patterns):\n    \n    \n    for pattern in (p for p in patterns if p):\n        if pattern.startswith('/'):\n            regex = fnmatch.translate(pattern[1:])\n\n            temp_path = path[1:] if path.startswith('/') else path\n\n            m = re.search(regex, temp_path)\n\n            if m and m.start() == 0:\n                return True\n\n        elif fnmatch.fnmatch(path, pattern):\n                return True\n\n    return False", "docstring": "Test whether the given *path* matches any patterns in *patterns*\n\nArgs:\npath (str):\nA file path to test for matches.\npatterns (list[str]):\nA list of glob string patterns to test against. If *path* matches\nany of those patters, it will return True.\n\nReturns:\nbool: **True** if the *path* matches any pattern in *patterns*.", "source": "juraj-google-style"}
{"code": "def get_gradient_components(self, value):\n    return value", "docstring": "Returns the components of `value` that should be included in gradients.\n\nFor a ResourceVariable, its gradient component is its handle tensor.\nFor now, we return the ResourceVariable because the gradient infrastructure\nhas special logic to handle ResourceVariables. We should remove the special\nlogic and return the handle tensor.\n\nArgs:\nvalue: A `ResourceVariable`.\n\nReturns:\n`value` itself.", "source": "github-repos"}
{"code": "def _extract_units(self, obj, value):\n        \n        if isinstance(value, dict):\n            if 'units' in value:\n                value = copy(value) \n            units = value.pop(\"units\", None)\n            if units:\n                self.units_prop.__set__(obj, units)\n        return value", "docstring": "Internal helper for dealing with units associated units properties\nwhen setting values on |UnitsSpec| properties.\n\nWhen ``value`` is a dict, this function may mutate the value of the\nassociated units property.\n\nArgs:\nobj (HasProps) : instance to update units spec property value for\nvalue (obj) : new value to set for the property\n\nReturns:\ncopy of ``value``, with 'units' key and value removed when\napplicable", "source": "juraj-google-style"}
{"code": "def run(self, path_or_tests, dot_env_path=None, mapping=None):\n        \n        if validator.is_testcase_path(path_or_tests):\n            return self.run_path(path_or_tests, dot_env_path, mapping)\n        elif validator.is_testcases(path_or_tests):\n            return self.run_tests(path_or_tests)\n        else:\n            raise exceptions.ParamsError(\"Invalid testcase path or testcases: {}\".format(path_or_tests))", "docstring": "main interface.\n\nArgs:\npath_or_tests:\nstr: testcase/testsuite file/foler path\ndict: valid testcase/testsuite data", "source": "juraj-google-style"}
{"code": "def CoinFromRef(coin_ref, tx_output, state=CoinState.Unconfirmed, transaction=None):\n        \n        coin = Coin(coin_reference=coin_ref, tx_output=tx_output, state=state)\n        coin._transaction = transaction\n        return coin", "docstring": "Get a Coin object using a CoinReference.\n\nArgs:\ncoin_ref (neo.Core.CoinReference): an object representing a single UTXO / transaction input.\ntx_output (neo.Core.Transaction.TransactionOutput): an object representing a transaction output.\nstate (neo.Core.State.CoinState):\n\nReturns:\nCoin: self.", "source": "juraj-google-style"}
{"code": "def get_reduced_structure(self, reduction_algo=\"niggli\"):\n        \n        if reduction_algo == \"niggli\":\n            reduced_latt = self._lattice.get_niggli_reduced_lattice()\n        elif reduction_algo == \"LLL\":\n            reduced_latt = self._lattice.get_lll_reduced_lattice()\n        else:\n            raise ValueError(\"Invalid reduction algo : {}\"\n                             .format(reduction_algo))\n\n        if reduced_latt != self.lattice:\n            return self.__class__(reduced_latt, self.species_and_occu,\n                                  self.cart_coords,\n                                  coords_are_cartesian=True, to_unit_cell=True,\n                                  site_properties=self.site_properties, charge=self._charge)\n        else:\n            return self.copy()", "docstring": "Get a reduced structure.\n\nArgs:\nreduction_algo (str): The lattice reduction algorithm to use.\nCurrently supported options are \"niggli\" or \"LLL\".", "source": "juraj-google-style"}
{"code": "def get_tensor_spec(batches):\n    from keras.src.utils.module_utils import tensorflow as tf\n\n    def get_single_tensor_spec(*tensors):\n        x = tensors[0]\n        rank = len(x.shape)\n        if rank < 1:\n            raise ValueError(f'When passing a dataset to a Keras model, the arrays must be at least rank 1. Received: {x} of rank {len(x.shape)}.')\n        for t in tensors:\n            if len(t.shape) != rank:\n                raise ValueError(f'When passing a dataset to a Keras model, the corresponding arrays in each batch must have the same rank. Received: {x} and {t}')\n        shape = []\n        for dims in zip(*[list(x.shape) for x in tensors]):\n            dims_set = set(dims)\n            shape.append(dims_set.pop() if len(dims_set) == 1 else None)\n        shape[0] = None\n        dtype = backend.standardize_dtype(x.dtype)\n        if isinstance(x, tf.RaggedTensor):\n            return tf.RaggedTensorSpec(shape=shape, dtype=dtype, ragged_rank=x.ragged_rank, row_splits_dtype=x.row_splits.dtype)\n        if isinstance(x, tf.SparseTensor) or is_scipy_sparse(x) or is_jax_sparse(x):\n            return tf.SparseTensorSpec(shape=shape, dtype=dtype)\n        else:\n            return tf.TensorSpec(shape=shape, dtype=dtype)\n    return tree.map_structure(get_single_tensor_spec, *batches)", "docstring": "Return the common tensor spec for a list of batches.\n\nArgs:\nbatches: list of structures of tensors. The structures must be\nidentical, but the shape at each leaf may be different.\nReturns: the common tensor spec for all the batches.", "source": "github-repos"}
{"code": "def install(package_name):\n    \n    holodeck_path = util.get_holodeck_path()\n    binary_website = \"https:\n\n    if package_name not in packages:\n        raise HolodeckException(\"Unknown package name \" + package_name)\n    package_url = packages[package_name]\n\n    print(\"Installing \" + package_name + \" at \" + holodeck_path)\n    install_path = os.path.join(holodeck_path, \"worlds\")\n    binary_url = binary_website + util.get_os_key() + \"_\" + package_url\n    _download_binary(binary_url, install_path)\n    if os.name == \"posix\":\n        _make_binary_excecutable(package_name, install_path)", "docstring": "Installs a holodeck package.\n\nArgs:\npackage_name (str): The name of the package to install", "source": "juraj-google-style"}
{"code": "def get_include():\n    import tensorflow as tf\n    return _os_path.join(_os_path.dirname(tf.__file__), 'include')", "docstring": "Get the directory containing the TensorFlow C++ header files.\n\nReturns:\nThe directory as string.", "source": "github-repos"}
{"code": "def update_firmware(self, firmware_information, force=False):\n    firmware_uri = '{}/firmware'.format(self.data['uri'])\n    result = self._helper.update(firmware_information, firmware_uri, force=force)\n    self.refresh()\n    return result", "docstring": "Installs firmware to the member interconnects of a SAS Logical Interconnect.\n\nArgs:\nfirmware_information: Options to install firmware to a SAS Logical Interconnect.\nforce: If sets to true, the operation completes despite any problems with the network connectivy\nor the erros on the resource itself.\nReturns:\ndict: SAS Logical Interconnect Firmware.", "source": "codesearchnet"}
{"code": "def constant_to_var(self, pyval, subst=None, node=None, source_sets=None, discard_concrete_values=False):\n    source_sets = source_sets or [[]]\n    node = node or self.ctx.root_node\n    kwargs = {'subst': subst, 'node': node, 'source_sets': source_sets, 'discard_concrete_values': discard_concrete_values}\n\n    def constant_to_value(new_pyval):\n        return self.constant_to_value(new_pyval, subst, node)\n    if isinstance(pyval, pytd.NothingType):\n        return self.ctx.program.NewVariable([], [], self.ctx.root_node)\n    elif isinstance(pyval, pytd.Alias):\n        return self.constant_to_var(pyval.type, **kwargs)\n    elif isinstance(pyval, abstract_utils.AsInstance):\n        cls = pyval.cls\n        if isinstance(pyval, abstract_utils.AsReturnValue) and isinstance(cls, pytd.NothingType):\n            return self.never.to_variable(node)\n        else:\n            return self.pytd_cls_to_instance_var(cls, **kwargs)\n    elif isinstance(pyval, pytd.Constant):\n        return self.pytd_cls_to_instance_var(pyval.type, **kwargs)\n    result = constant_to_value(pyval)\n    if result is not None:\n        return result.to_variable(node)\n    assert pyval.__class__ != cfg.Variable, pyval\n    if pyval.__class__ == tuple:\n        content = (self.constant_to_var(v, **kwargs) for v in pyval)\n        return self.build_tuple(self.ctx.root_node, content)\n    raise ValueError(f'Cannot convert {pyval.__class__} to an abstract value')", "docstring": "Convert a constant to a Variable.\n\nThis converts a constant to a cfg.Variable. Unlike constant_to_value, it\ncan handle things that need to be represented as a Variable with multiple\npossible values (i.e., a union type), like pytd.Function.\n\nArgs:\npyval: The Python constant to convert. Can be a PyTD definition or a\nbuiltin constant.\nsubst: The current type parameters.\nnode: The current CFG node. (For instances)\nsource_sets: An iterator over instances of SourceSet (or just tuples).\ndiscard_concrete_values: Whether concrete values should be discarded from\ntype parameters.\n\nReturns:\nA cfg.Variable.\nRaises:\nTypeParameterError: if conversion is attempted on a type parameter without\na substitution.\nValueError: if pytype is not of a known type.", "source": "github-repos"}
{"code": "def ping(request, timeout=_METADATA_DEFAULT_TIMEOUT, retry_count=3):\n    \n    \n    \n    \n    \n    \n    \n    retries = 0\n    while retries < retry_count:\n        try:\n            response = request(\n                url=_METADATA_IP_ROOT, method='GET', headers=_METADATA_HEADERS,\n                timeout=timeout)\n\n            metadata_flavor = response.headers.get(_METADATA_FLAVOR_HEADER)\n            return (response.status == http_client.OK and\n                    metadata_flavor == _METADATA_FLAVOR_VALUE)\n\n        except exceptions.TransportError:\n            _LOGGER.info('Compute Engine Metadata server unavailable on'\n                         'attempt %s of %s', retries+1, retry_count)\n            retries += 1\n\n    return False", "docstring": "Checks to see if the metadata server is available.\n\nArgs:\nrequest (google.auth.transport.Request): A callable used to make\nHTTP requests.\ntimeout (int): How long to wait for the metadata server to respond.\nretry_count (int): How many times to attempt connecting to metadata\nserver using above timeout.\n\nReturns:\nbool: True if the metadata server is reachable, False otherwise.", "source": "juraj-google-style"}
{"code": "class Upsample(nn.Module):\n\n    def __init__(self, scale, num_features):\n        super().__init__()\n        self.scale = scale\n        if scale & scale - 1 == 0:\n            for i in range(int(math.log(scale, 2))):\n                self.add_module(f'convolution_{i}', nn.Conv2d(num_features, 4 * num_features, 3, 1, 1))\n                self.add_module(f'pixelshuffle_{i}', nn.PixelShuffle(2))\n        elif scale == 3:\n            self.convolution = nn.Conv2d(num_features, 9 * num_features, 3, 1, 1)\n            self.pixelshuffle = nn.PixelShuffle(3)\n        else:\n            raise ValueError(f'Scale {scale} is not supported. Supported scales: 2^n and 3.')\n\n    def forward(self, hidden_state):\n        if self.scale & self.scale - 1 == 0:\n            for i in range(int(math.log(self.scale, 2))):\n                hidden_state = self.__getattr__(f'convolution_{i}')(hidden_state)\n                hidden_state = self.__getattr__(f'pixelshuffle_{i}')(hidden_state)\n        elif self.scale == 3:\n            hidden_state = self.convolution(hidden_state)\n            hidden_state = self.pixelshuffle(hidden_state)\n        return hidden_state", "docstring": "Upsample module.\n\nArgs:\nscale (`int`):\nScale factor. Supported scales: 2^n and 3.\nnum_features (`int`):\nChannel number of intermediate features.", "source": "github-repos"}
{"code": "def copy(source, destination, ignore=None, adapter=None, fatal=True, logger=LOG.debug):\n    \n    return _file_op(source, destination, _copy, adapter, fatal, logger, ignore=ignore)", "docstring": "Copy source -> destination\n\nArgs:\nsource (str | None): Source file or folder\ndestination (str | None): Destination file or folder\nignore (callable | list | str | None): Names to be ignored\nadapter (callable | None): Optional function to call on 'source' before copy\nfatal (bool | None): Abort execution on failure if True\nlogger (callable | None): Logger to use\n\nReturns:\n(int): 1 if effectively done, 0 if no-op, -1 on failure", "source": "juraj-google-style"}
{"code": "def restriction_coder(self):\n    return coders.registry.get_coder(object)", "docstring": "Returns a ``Coder`` for restrictions.\n\nReturned``Coder`` will be used for the restrictions produced by the current\n``RestrictionProvider``.\n\nReturns:\nan object of type ``Coder``.", "source": "github-repos"}
{"code": "def _save_tf1_model(self, sess: session.Session, saved_model_path: str, signature_key: str, tags: Collection[str], inputs: Mapping[str, core.Tensor], outputs: Mapping[str, core.Tensor], init_op: Optional[ops.Operation]=None, assets_collection: Optional[Sequence[core.Symbol]]=None) -> None:\n    v1_builder = builder.SavedModelBuilder(saved_model_path)\n    sig_def = signature_def_utils_impl.predict_signature_def(inputs=inputs, outputs=outputs)\n    v1_builder.add_meta_graph_and_variables(sess, tags, signature_def_map={signature_key: sig_def}, main_op=init_op, assets_collection=assets_collection)\n    v1_builder.save()", "docstring": "Saves a TF1 model.\n\nArgs:\nsess: Current tf.Session object.\nsaved_model_path: Directory to save the model.\nsignature_key: The key to the SignatureDef that inputs & outputs\ncorrespond to.\ntags: Set of tags associated with the model.\ninputs: Input name -> input tensor mapping.\noutputs: Output name -> output tensor mapping.\ninit_op: Op for initialization.\nassets_collection: Assets collection. This collection is a list of string\ntensors. Each tensor contains the asset file names.", "source": "github-repos"}
{"code": "def register_app(self, app):\n        \n        app.route(self.uri, methods=self.methods)(self.callable_obj)\n\n        return self", "docstring": "Register the route object to a `bottle.Bottle` app instance.\n\nArgs:\napp (instance):\n\nReturns:\nRoute instance (for chaining purposes)", "source": "juraj-google-style"}
{"code": "def values(self):\n    all_values = [v.decode('utf-8') for (k, v) in self.rdb.hgetall(self.session_hash).items()]\n    return all_values", "docstring": "Returns a list of all values in the dictionary.\n\nReturns:\nlist of str: [value1,value2,...,valueN]", "source": "codesearchnet"}
{"code": "def get_contract_state(self, contract_hash, id=None, endpoint=None):\n        \n        return self._call_endpoint(GET_CONTRACT_STATE, params=[contract_hash], id=id, endpoint=endpoint)", "docstring": "Get a contract state object by its hash\nArgs:\ncontract_hash: (str) the hash of the contract to lookup, for example 'd7678dd97c000be3f33e9362e673101bac4ca654'\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"}
{"code": "def warn_logging(logger):\n\n    def showwarning(message, category, filename, lineno, file=None, line=None):\n        logger.warning(message)\n    return showwarning", "docstring": "Create a `showwarning` function that uses the given logger.\n\nArguments:\nlogger (~logging.Logger): the logger to use.\n\nReturns:\nfunction: a function that can be used as the `warnings.showwarning`\ncallback.", "source": "codesearchnet"}
{"code": "def new_panel(store, institute_id, panel_name, display_name, csv_lines):\n    \n    institute_obj = store.institute(institute_id)\n    if institute_obj is None:\n        flash(\"{}: institute not found\".format(institute_id))\n        return None\n\n    panel_obj = store.gene_panel(panel_name)\n    if panel_obj:\n        flash(\"panel already exists: {} - {}\".format(panel_obj['panel_name'],\n                                                     panel_obj['display_name']))\n        return None\n\n    log.debug(\"parse genes from CSV input\")\n    try:\n        new_genes = parse_genes(csv_lines)\n    except SyntaxError as error:\n        flash(error.args[0], 'danger')\n        return None\n\n    log.debug(\"build new gene panel\")\n\n    panel_id = None\n    try:\n        panel_data = build_panel(dict(\n            panel_name=panel_name,\n            institute=institute_obj['_id'],\n            version=1.0,\n            date=dt.datetime.now(),\n            display_name=display_name,\n            genes=new_genes,\n        ), store)\n        panel_id= store.add_gene_panel(panel_data)\n\n    except Exception as err:\n        log.error('An error occurred while adding the gene panel {}'.format(err))\n\n    return panel_id", "docstring": "Create a new gene panel.\n\nArgs:\nstore(scout.adapter.MongoAdapter)\ninstitute_id(str)\npanel_name(str)\ndisplay_name(str)\ncsv_lines(iterable(str)): Stream with genes\n\nReturns:\npanel_id: the ID of the new panel document created or None", "source": "juraj-google-style"}
{"code": "def is_attribute_deprecated(self, attribute):\n    rule_set = self._attribute_rule_sets.get(attribute)\n    if rule_set.version_deprecated:\n        if (self._version >= rule_set.version_deprecated):\n            return True\n        else:\n            return False\n    else:\n        return False", "docstring": "Check if the attribute is deprecated by the current KMIP version.\n\nArgs:\nattribute (string): The name of the attribute\n(e.g., 'Unique Identifier'). Required.", "source": "codesearchnet"}
{"code": "def _get_command_and_argv(argv):\n    command_name = argv[0]\n    if (not command_name):\n        argv = argv[1:]\n    elif (command_name == settings.command):\n        argv.remove(command_name)\n    return (command_name, argv)", "docstring": "Extract the command name and arguments to pass to docopt.\n\nArgs:\nargv: The argument list being used to run the command.\n\nReturns:\nA tuple containing the name of the command and the arguments to pass\nto docopt.", "source": "codesearchnet"}
{"code": "def _batchNumpyGather(self, params, indices, axis, batch_dims):\n    if batch_dims == 0:\n        return np.take(params, indices, axis=axis)\n    self.assertEqual(params.shape[0], indices.shape[0])\n    if axis > 0:\n        axis -= 1\n    return np.stack([self._batchNumpyGather(params[i], indices[i], axis, batch_dims - 1) for i in range(params.shape[0])])", "docstring": "Performs a batch gather by making recursive calls to np.take().\n\nThis is used by testBatchDims() to construct the expected value.\n\nArgs:\nparams: A numpy array\nindices: A numpy array\naxis: An integer\nbatch_dims: An integer\nReturns:\nA numpy array", "source": "github-repos"}
{"code": "def _get_setting(self, key, default_value=None, value_type=str):\n    try:\n        state_entry = self._state_view.get(SettingsView.setting_address(key))\n    except KeyError:\n        return default_value\n    if (state_entry is not None):\n        setting = Setting()\n        setting.ParseFromString(state_entry)\n        for setting_entry in setting.entries:\n            if (setting_entry.key == key):\n                return value_type(setting_entry.value)\n    return default_value", "docstring": "Get the setting stored at the given key.\n\nArgs:\nkey (str): the setting key\ndefault_value (str, optional): The default value, if none is\nfound. Defaults to None.\nvalue_type (function, optional): The type of a setting value.\nDefaults to `str`.\n\nReturns:\nstr: The value of the setting if found, default_value\notherwise.", "source": "codesearchnet"}
{"code": "def merge(self, workdir, pot_files, out_dvdb, delete_source=True):\n        \n        \n        pot_files = [os.path.abspath(s) for s in list_strings(pot_files)]\n        if not os.path.isabs(out_dvdb):\n            out_dvdb = os.path.join(os.path.abspath(workdir), os.path.basename(out_dvdb))\n\n        if self.verbose:\n            print(\"Will merge %d files into output DVDB %s\" % (len(pot_files), out_dvdb))\n            for i, f in enumerate(pot_files):\n                print(\" [%d] %s\" % (i, f))\n\n        \n        if len(pot_files) == 1:\n            with open(pot_files[0], \"r\") as inh, open(out_dvdb, \"w\") as out:\n                for line in inh:\n                    out.write(line)\n            return out_dvdb\n\n        self.stdin_fname, self.stdout_fname, self.stderr_fname = \\\n            map(os.path.join, 3 * [os.path.abspath(workdir)], [\"mrgdvdb.stdin\", \"mrgdvdb.stdout\", \"mrgdvdb.stderr\"])\n\n        inp = StringIO()\n        inp.write(out_dvdb + \"\\n\")             \n        inp.write(str(len(pot_files)) + \"\\n\")  \n\n        \n        for fname in pot_files:\n            inp.write(fname + \"\\n\")\n\n        self.stdin_data = [s for s in inp.getvalue()]\n\n        with open(self.stdin_fname, \"wt\") as fh:\n            fh.writelines(self.stdin_data)\n            \n            fh.flush()\n            os.fsync(fh.fileno())\n\n        retcode = self.execute(workdir)\n        if retcode == 0 and delete_source:\n            \n            for f in pot_files:\n                try:\n                    os.remove(f)\n                except IOError:\n                    pass\n\n        return out_dvdb", "docstring": "Merge POT files containing 1st order DFPT potential\nreturn the absolute path of the new database in workdir.\n\nArgs:\ndelete_source: True if POT1 files should be removed after (successful) merge.", "source": "juraj-google-style"}
{"code": "def translate_file(\n    estimator, subtokenizer, input_file, output_file=None,\n    print_all_translations=True):\n  \n  batch_size = _DECODE_BATCH_SIZE\n\n  \n  \n  sorted_inputs, sorted_keys = _get_sorted_inputs(input_file)\n  num_decode_batches = (len(sorted_inputs) - 1) \n\n  def input_generator():\n    \n    for i, line in enumerate(sorted_inputs):\n      if i % batch_size == 0:\n        batch_num = (i \n\n        print(\"Decoding batch %d out of %d.\" % (batch_num, num_decode_batches))\n      yield _encode_and_add_eos(line, subtokenizer)\n\n  def input_fn():\n    \n    ds = tf.data.Dataset.from_generator(\n        input_generator, tf.int64, tf.TensorShape([None]))\n    ds = ds.padded_batch(batch_size, [None])\n    return ds\n\n  translations = []\n  for i, prediction in enumerate(estimator.predict(input_fn)):\n    translation = _trim_and_decode(prediction[\"outputs\"], subtokenizer)\n    translations.append(translation)\n\n    if print_all_translations:\n      print(\"Translating:\")\n      print(\"\\tInput: %s\" % sorted_inputs[i])\n      print(\"\\tOutput: %s\\n\" % translation)\n      print(\"=\" * 100)\n\n  \n  if output_file is not None:\n    if tf.gfile.IsDirectory(output_file):\n      raise ValueError(\"File output is a directory, will not save outputs to \"\n                       \"file.\")\n    tf.logging.info(\"Writing to file %s\" % output_file)\n    with tf.gfile.Open(output_file, \"w\") as f:\n      for index in xrange(len(sorted_keys)):\n        f.write(\"%s\\n\" % translations[sorted_keys[index]])", "docstring": "Translate lines in file, and save to output file if specified.\n\nArgs:\nestimator: tf.Estimator used to generate the translations.\nsubtokenizer: Subtokenizer object for encoding and decoding source and\ntranslated lines.\ninput_file: file containing lines to translate\noutput_file: file that stores the generated translations.\nprint_all_translations: If true, all translations are printed to stdout.\n\nRaises:\nValueError: if output file is invalid.", "source": "juraj-google-style"}
{"code": "def giant_text_sqltype(dialect: Dialect) -> str:\n    \n    if dialect.name == SqlaDialectName.SQLSERVER:\n        return 'NVARCHAR(MAX)'\n    elif dialect.name == SqlaDialectName.MYSQL:\n        return 'LONGTEXT'\n    else:\n        raise ValueError(\"Unknown dialect: {}\".format(dialect.name))", "docstring": "Returns the SQL column type used to make very large text columns for a\ngiven dialect.\n\nArgs:\ndialect: a SQLAlchemy :class:`Dialect`\nReturns:\nthe SQL data type of \"giant text\", typically 'LONGTEXT' for MySQL\nand 'NVARCHAR(MAX)' for SQL Server.", "source": "juraj-google-style"}
{"code": "def walk(self, walker):\n        \n\n        def walk_func(step):\n            \n            \n            \n            for dep in self.graph.downstream(step.name):\n                if not dep.ok:\n                    step.set_status(FailedStatus(\"dependency has failed\"))\n                    return step.ok\n\n            return step.run()\n\n        return self.graph.walk(walker, walk_func)", "docstring": "Walks each step in the underlying graph, in topological order.\n\nArgs:\nwalker (func): a walker function to be passed to\n:class:`stacker.dag.DAG` to walk the graph.", "source": "juraj-google-style"}
{"code": "def pattern_from_collections_and_statement(data_collections, statement):\n    BaseCollection.are_collections_aligned(data_collections)\n    correct_var = BaseCollection._check_conditional_statement(statement, len(data_collections))\n    num_statement_clean = BaseCollection._replace_operators(statement)\n    pattern = []\n    for i in xrange(len(data_collections[0])):\n        num_statement = num_statement_clean\n        for (j, coll) in enumerate(data_collections):\n            var = correct_var[j]\n            num_statement = num_statement.replace(var, str(coll[i]))\n        num_statement = BaseCollection._restore_operators(num_statement)\n        pattern.append(eval(num_statement, {}))\n    return pattern", "docstring": "Generate a list of booleans from data collections and a conditional statement.\n\nArgs:\ndata_collections: A list of aligned Data Collections to be evaluated\nagainst the statement.\nstatement: A conditional statement as a string (e.g. a>25 and a%5==0).\nThe variable should always be named as 'a' (without quotations).\n\nReturn:\npattern: A list of True/False booleans with the length of the\nData Collections where True meets the conditional statement\nand False does not.", "source": "codesearchnet"}
{"code": "def run(self, instance):\n    last = instance\n    for item in self.stack:\n        if isinstance(item, str):\n            last = getattr(last, item)\n        else:\n            last = last(*item[0], **item[1])\n    self.stack = []\n    return last", "docstring": "Run the recorded chain of methods on `instance`.\n\nArgs:\ninstance: an object.", "source": "codesearchnet"}
{"code": "def save(self, representative_dataset: RepresentativeDatasetMapping) -> Mapping[str, _RepresentativeDatasetFile]:\n    dataset_file_map = {}\n    for signature_def_key, repr_ds in representative_dataset.items():\n        if signature_def_key not in self.path_map:\n            raise ValueError(f'SignatureDef key does not exist in the provided path_map: {signature_def_key}')\n        dataset_file_map[signature_def_key] = self._save_tf_record_dataset(repr_ds, signature_def_key)\n    return dataset_file_map", "docstring": "Saves the representative dataset.\n\nArgs:\nrepresentative_dataset: Signature def key -> representative dataset\nmapping. Each dataset is saved in a separate TFRecord file whose path\nmatches the signature def key of `path_map`.\n\nRaises:\nValueError: When the signature def key in `representative_dataset` is not\npresent in the `path_map`.\n\nReturns:\nA map from signature key to the RepresentativeDatasetFile instance\ncontains the path to the saved file.", "source": "github-repos"}
{"code": "def absorption_coefficient(dielectric):\n    energies_in_eV = np.array(dielectric[0])\n    real_dielectric = parse_dielectric_data(dielectric[1])\n    imag_dielectric = parse_dielectric_data(dielectric[2])\n    epsilon_1 = np.mean(real_dielectric, axis=1)\n    epsilon_2 = np.mean(imag_dielectric, axis=1)\n    return (((((2.0 * np.sqrt(2.0)) * pi) * eV_to_recip_cm) * energies_in_eV) * np.sqrt(((- epsilon_1) + np.sqrt(((epsilon_1 ** 2) + (epsilon_2 ** 2))))))", "docstring": "Calculate the optical absorption coefficient from an input set of\npymatgen vasprun dielectric constant data.\n\nArgs:\ndielectric (list): A list containing the dielectric response function\nin the pymatgen vasprun format.\n\n| element 0: list of energies\n| element 1: real dielectric tensors, in ``[xx, yy, zz, xy, xz, yz]`` format.\n| element 2: imaginary dielectric tensors, in ``[xx, yy, zz, xy, xz, yz]`` format.\n\nReturns:\n(np.array): absorption coefficient using eV as frequency units (cm^-1).\n\nNotes:\nThe absorption coefficient is calculated as\n\n.. math:: \\\\alpha = \\\\frac{2\\sqrt{2} \\pi}{\\lambda} \\sqrt{-\\epsilon_1+\\sqrt{\\epsilon_1^2+\\epsilon_2^2}}", "source": "codesearchnet"}
{"code": "def delete_record(self, record):\n        \n        self.children.remove(record.resource)\n        record.delete()", "docstring": "Remove a DNSRecord\n\nArgs:\nrecord (:obj:`DNSRecord`): :obj:`DNSRecord` to remove\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def wait_for_redis_to_start(redis_ip_address, redis_port, password=None, num_retries=5):\n    redis_client = redis.StrictRedis(host=redis_ip_address, port=redis_port, password=password)\n    counter = 0\n    while (counter < num_retries):\n        try:\n            logger.info('Waiting for redis server at {}:{} to respond...'.format(redis_ip_address, redis_port))\n            redis_client.client_list()\n        except redis.ConnectionError:\n            time.sleep(1)\n            logger.info('Failed to connect to the redis server, retrying.')\n            counter += 1\n        else:\n            break\n    if (counter == num_retries):\n        raise Exception('Unable to connect to Redis. If the Redis instance is on a different machine, check that your firewall is configured properly.')", "docstring": "Wait for a Redis server to be available.\n\nThis is accomplished by creating a Redis client and sending a random\ncommand to the server until the command gets through.\n\nArgs:\nredis_ip_address (str): The IP address of the redis server.\nredis_port (int): The port of the redis server.\npassword (str): The password of the redis server.\nnum_retries (int): The number of times to try connecting with redis.\nThe client will sleep for one second between attempts.\n\nRaises:\nException: An exception is raised if we could not connect with Redis.", "source": "codesearchnet"}
{"code": "def _update_trial_info(self, expr_dir):\n    trial_id = expr_dir[(- 8):]\n    meta_file = os.path.join(expr_dir, EXPR_META_FILE)\n    meta = parse_json(meta_file)\n    result_file = os.path.join(expr_dir, EXPR_RESULT_FILE)\n    offset = self._result_offsets.get(trial_id, 0)\n    (results, new_offset) = parse_multiple_json(result_file, offset)\n    self._add_results(results, trial_id)\n    self._result_offsets[trial_id] = new_offset\n    if meta:\n        TrialRecord.objects.filter(trial_id=trial_id).update(trial_status=meta['status'], end_time=timestamp2date(meta.get('end_time', None)))\n    elif (len(results) > 0):\n        metrics = {'episode_reward': results[(- 1)].get('episode_reward_mean', None), 'accuracy': results[(- 1)].get('mean_accuracy', None), 'loss': results[(- 1)].get('loss', None)}\n        if results[(- 1)].get('done'):\n            TrialRecord.objects.filter(trial_id=trial_id).update(trial_status='TERMINATED', end_time=results[(- 1)].get('date', None), metrics=str(metrics))\n        else:\n            TrialRecord.objects.filter(trial_id=trial_id).update(metrics=str(metrics))", "docstring": "Update information for given trial.\n\nMeta file will be loaded if exists, and the trial information\nin db backend will be updated.\n\nArgs:\nexpr_dir(str)", "source": "codesearchnet"}
{"code": "def vector_projection(v1, v2):\n    return ((scalar_projection(v1, v2) * v2) / np.linalg.norm(v2))", "docstring": "compute the vector projection of v1 upon v2\n\nArgs:\nv1, v2: iterable\nindices 0, 1, 2 corresponding to cartesian coordinates\n\nReturns:\n3-vector of the projection of point p onto the direction of v", "source": "codesearchnet"}
{"code": "def imflip(img, direction='horizontal'):\n    \n    assert direction in ['horizontal', 'vertical']\n    if direction == 'horizontal':\n        return np.flip(img, axis=1)\n    else:\n        return np.flip(img, axis=0)", "docstring": "Flip an image horizontally or vertically.\n\nArgs:\nimg (ndarray): Image to be flipped.\ndirection (str): The flip direction, either \"horizontal\" or \"vertical\".\n\nReturns:\nndarray: The flipped image.", "source": "juraj-google-style"}
{"code": "def _maybe_download_corpora(tmp_dir):\n    mnli_filename = 'MNLI.zip'\n    mnli_finalpath = os.path.join(tmp_dir, 'MNLI')\n    if (not tf.gfile.Exists(mnli_finalpath)):\n        zip_filepath = generator_utils.maybe_download(tmp_dir, mnli_filename, _MNLI_URL)\n        zip_ref = zipfile.ZipFile(zip_filepath, 'r')\n        zip_ref.extractall(tmp_dir)\n        zip_ref.close()\n    return mnli_finalpath", "docstring": "Download corpora for multinli.\n\nArgs:\ntmp_dir: a string\nReturns:\na string", "source": "codesearchnet"}
{"code": "def universal_transformer_with_gru_as_transition_function(layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None):\n    (state, unused_inputs, unused_memory) = tf.unstack(layer_inputs, num=None, axis=0, name='unstack')\n    assert (not hparams.add_step_timing_signal)\n    mh_attention_input = step_preprocess(state, step, hparams)\n    transition_function_input = attention_unit(mh_attention_input)\n    if hparams.add_ffn_unit_to_the_transition_function:\n        transition_function_input = ffn_unit(transition_function_input)\n    transition_function_input = common_layers.layer_preprocess(transition_function_input, hparams)\n    with tf.variable_scope('gru'):\n        transition_function_update_gate = _ffn_layer_multi_inputs([transition_function_input, state], hparams, name='update', bias_initializer=tf.constant_initializer(1.0), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=False, postprocess=False)\n        tf.contrib.summary.scalar('gru_update_gate', tf.reduce_mean(transition_function_update_gate))\n        transition_function_reset_gate = _ffn_layer_multi_inputs([transition_function_input, state], hparams, name='reset', bias_initializer=tf.constant_initializer(1.0), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=False, postprocess=False)\n        tf.contrib.summary.scalar('gru_reset_gate', tf.reduce_mean(transition_function_reset_gate))\n        reset_state = (transition_function_reset_gate * state)\n        transition_function_candidate = _ffn_layer_multi_inputs([transition_function_input, reset_state], hparams, name='candidate', bias_initializer=tf.zeros_initializer(), activation=tf.tanh, pad_remover=pad_remover, preprocess=False, postprocess=False)\n        transition_function_output = (((1 - transition_function_update_gate) * transition_function_input) + (transition_function_update_gate * transition_function_candidate))\n    transition_function_output = common_layers.layer_preprocess(transition_function_output, hparams)\n    return (transition_function_output, unused_inputs, unused_memory)", "docstring": "Universal Transformer which uses a gru as transition function.\n\nIt's kind of like having a gru, filliped vertically next to the Universal\nTransformer that controls the flow of the information in depth,\nover different steps of the Universal Transformer.\n\nArgs:\nlayer_inputs:\n- state: state\n- inputs: not used here\n- memory: not used here\nstep: indicates number of steps taken so far\nhparams: model hyper-parameters.\nffn_unit: feed-forward unit\nattention_unit: multi-head attention unit\npad_remover: to mask out padding in convolutional layers (efficiency).\nReturns:\nlayer_output:\nnew_state: new state\ninputs: not uesed\nmemory: not used", "source": "codesearchnet"}
{"code": "def _df_index_name(df):\n    if df.index.name:\n        return df.index.name\n    elif df.index.names:\n        try:\n            return '_'.join(df.index.names)\n        except TypeError:\n            return 'index'\n    else:\n        return 'index'", "docstring": "Return the Bokeh-appropriate column name for a ``DataFrame`` index\n\nIf there is no named index, then `\"index\" is returned.\n\nIf there is a single named index, then ``df.index.name`` is returned.\n\nIf there is a multi-index, and the index names are all strings, then\nthe names are joined with '_' and the result is returned, e.g. for a\nmulti-index ``['ind1', 'ind2']`` the result will be \"ind1_ind2\".\nOtherwise if any index name is not a string, the fallback name \"index\"\nis returned.\n\nArgs:\ndf (DataFrame) : the ``DataFrame`` to find an index name for\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def load_yaml_by_relpath(cls, directories, rel_path, log_debug=False):\n        \n        for d in directories:\n            if d.startswith(os.path.expanduser('~')) and not os.path.exists(d):\n                os.makedirs(d)\n            possible_path = os.path.join(d, rel_path)\n            if os.path.exists(possible_path):\n                loaded = cls.load_yaml_by_path(possible_path, log_debug=log_debug)\n                if loaded is not None:\n                    return (possible_path, cls.load_yaml_by_path(possible_path))\n\n        return None", "docstring": "Load a yaml file with path that is relative to one of given directories.\n\nArgs:\ndirectories: list of directories to search\nname: relative path of the yaml file to load\nlog_debug: log all messages as debug\nReturns:\ntuple (fullpath, loaded yaml structure) or None if not found", "source": "juraj-google-style"}
{"code": "def to_maildir(self, flags: Iterable[Union[bytes, Flag]]) -> str:\n        \n        codes = []\n        for flag in flags:\n            if isinstance(flag, bytes):\n                flag = Flag(flag)\n            from_sys = self._from_sys.get(flag)\n            if from_sys is not None:\n                codes.append(from_sys)\n            else:\n                from_kwd = self._from_kwd.get(flag)\n                if from_kwd is not None:\n                    codes.append(from_kwd)\n        return ''.join(codes)", "docstring": "Return the string of letter codes that are used to map to defined\nIMAP flags and keywords.\n\nArgs:\nflags: The flags and keywords to map.", "source": "juraj-google-style"}
{"code": "def _get_client_fqdn(self, client_info_contents):\n    \n    yamldict = yaml.safe_load(client_info_contents)\n    fqdn = yamldict['system_info']['fqdn']\n    client_id = yamldict['client_id'].split('/')[1]\n    return client_id, fqdn", "docstring": "Extracts a GRR client's FQDN from its client_info.yaml file.\n\nArgs:\nclient_info_contents: The contents of the client_info.yaml file.\n\nReturns:\nA (str, str) tuple representing client ID and client FQDN.", "source": "juraj-google-style"}
{"code": "def load_from_xml(self, path):\n    with open(os.path.expanduser(path), 'r') as ifile:\n        et = ElementTree.parse(ifile)\n    root = et.getroot()\n    all_objects = {}\n    for child in root:\n        obj_type = self.__getattribute__(child.tag)\n        objects = [obj_type(obj) for obj in child]\n        all_objects[child.tag] = JSSObjectList(self.factory, None, objects)\n    return all_objects", "docstring": "Load all objects from XML file and return as dict.\n\nThe dict returned will have keys named the same as the\nJSSObject classes contained, and the values will be\nJSSObjectLists of all full objects of that class (for example,\nthe equivalent of my_jss.Computer().retrieve_all()).\n\nThis method can potentially take a very long time!\n\nArgs:\npath: String file path to the file you wish to load from.\nPath will have ~ expanded prior to opening.", "source": "codesearchnet"}
{"code": "def PlistValueToPlainValue(plist):\n  \n\n  if isinstance(plist, dict):\n    ret_value = dict()\n    for key, value in iteritems(plist):\n      ret_value[key] = PlistValueToPlainValue(value)\n    return ret_value\n  elif isinstance(plist, list):\n    return [PlistValueToPlainValue(value) for value in plist]\n  elif isinstance(plist, datetime.datetime):\n    return (calendar.timegm(plist.utctimetuple()) * 1000000) + plist.microsecond\n  return plist", "docstring": "Takes the plist contents generated by binplist and returns a plain dict.\n\nbinplist uses rich types to express some of the plist types. We need to\nconvert them to types that RDFValueArray will be able to transport.\n\nArgs:\nplist: A plist to convert.\n\nReturns:\nA simple python type.", "source": "juraj-google-style"}
{"code": "def read(path, encoding='utf-8'):\n    try:\n        with io.open(path, encoding=encoding) as f:\n            return f.read()\n    except Exception as e:\n        logger.error('read: %s failed. Error: %s', path, e)\n        return ''", "docstring": "Read the content of the file.\n\nArgs:\npath (str): Path to the file\nencoding (str): File encoding. Default: utf-8\n\nReturns:\nstr: File content or empty string if there was an error", "source": "codesearchnet"}
{"code": "def _create_slots(self, table: 'TableConfig', variable_creator: Callable[[Text, init_ops_v2.Initializer], tf_variables.Variable], initializer_wrapper: Optional[Callable[[str, init_ops_v2.Initializer], init_ops_v2.Initializer]]=None) -> Dict[Text, tf_variables.Variable]:\n    names = self._slot_names()\n    initializers = self._slot_initializers()\n    if initializer_wrapper is not None:\n        initializers = [initializer_wrapper(name, initializer) for name, initializer in zip(names, initializers)]\n    if self.slot_variable_creation_fn is not None:\n        return self.slot_variable_creation_fn(table, names, initializers)\n    else:\n        slots = {}\n        for slot, initializer in zip(names, initializers):\n            slots[slot] = variable_creator(slot, initializer)\n        return slots", "docstring": "Creates slot variables for table.\n\nArgs:\ntable: The table variable to create slots for.\nvariable_creator: A function which creates variables. Takes parameters\n'name', 'initializer'.\ninitializer_wrapper: A function that wraps the initializer.\n\nReturns:\nA dict of variables, keyed by self._slot_names().", "source": "github-repos"}
{"code": "def _pack_with_custom_ops(dataset, keys, length):\n  \n  from tensor2tensor.data_generators.ops import pack_sequences_ops  \n  \n  if len(keys) == 1:\n    k1, = keys\n    k2 = k1\n  elif len(keys) == 2:\n    k1, k2 = keys\n  else:\n    raise ValueError(\"must have 1 or 2 keys\")\n  def map_fn_custom(x):\n    \n    (k1_packed, k1_segmengation, k1_position,\n     k2_packed, k2_segmentation, k2_position) = (\n         pack_sequences_ops.pack_sequences2(x[k1], x[k2], length))\n    packed = {\n        k1: k1_packed,\n        k1 + \"_segmentation\": k1_segmengation,\n        k1 + \"_position\": k1_position,\n    }\n    if len(keys) == 2:\n      packed.update({\n          k2: k2_packed,\n          k2 + \"_segmentation\": k2_segmentation,\n          k2 + \"_position\": k2_position,\n      })\n    return packed\n  dataset = dataset.map(map_fn_custom,\n                        num_parallel_calls=tf.data.experimental.AUTOTUNE)\n  dataset = dataset.flat_map(tf.data.Dataset.from_tensor_slices)\n  return dataset", "docstring": "Helper-function for packing a dataset which has already been batched.\n\nSee pack_dataset()\n\nRelies on custom ops which require a custom compiled binary.\nFaster than _pack_with_tf_ops(), and denser packing.\n\nArgs:\ndataset: a dataset containing padded batches of examples.\nkeys: a list of strings (must have length 1 or 2)\nlength: an integer\n\nReturns:\na dataset.", "source": "juraj-google-style"}
{"code": "def get_unfrozen_copy(values):\n    if isinstance(values, (frozendict, dict)):\n        return {key: get_unfrozen_copy(value) for (key, value) in values.items()}\n    elif isinstance(values, (list, tuple)):\n        return [get_unfrozen_copy(value) for value in values]\n    return values", "docstring": "Recursively convert `value`'s tuple values into lists, and frozendicts into dicts.\n\nArgs:\nvalues (frozendict/tuple): the frozendict/tuple.\n\nReturns:\nvalues (dict/list): the unfrozen copy.", "source": "codesearchnet"}
{"code": "def has_kwargs(fn):\n    if isinstance(fn, functools.partial):\n        fn = fn.func\n    elif _is_callable_object(fn):\n        fn = fn.__call__\n    elif not callable(fn):\n        raise TypeError(f'Argument `fn` should be a callable. Received: fn={fn} (of type {type(fn)})')\n    return tf_inspect.getfullargspec(fn).varkw is not None", "docstring": "Returns whether the passed callable has **kwargs in its signature.\n\nArgs:\nfn: Function, or function-like object (e.g., result of `functools.partial`).\n\nReturns:\n`bool`: if `fn` has **kwargs in its signature.\n\nRaises:\n`TypeError`: If fn is not a Function, or function-like object.", "source": "github-repos"}
{"code": "def IsPathSuffix(mod_path, path):\n  \n  return (mod_path.endswith(path) and\n          (len(mod_path) == len(path) or\n           mod_path[:-len(path)].endswith(os.sep)))", "docstring": "Checks whether path is a full path suffix of mod_path.\n\nArgs:\nmod_path: Must be an absolute path to a source file. Must not have\nfile extension.\npath: A relative path. Must not have file extension.\n\nReturns:\nTrue if path is a full path suffix of mod_path. False otherwise.", "source": "juraj-google-style"}
{"code": "def raster_statistics(raster_file):\n    ds = gdal_Open(raster_file)\n    band = ds.GetRasterBand(1)\n    (minv, maxv, meanv, std) = band.ComputeStatistics(False)\n    return (minv, maxv, meanv, std)", "docstring": "Get basic statistics of raster data.\n\nArgs:\nraster_file: raster file path.\n\nReturns:\nmin, max, mean, std.", "source": "codesearchnet"}
{"code": "def _rewrite_insert(self, sql, params, return_id=False):\n    returning = (self.qn(self.query.model._meta.pk.attname) if return_id else '*')\n    if (self.query.conflict_action.value == 'UPDATE'):\n        return self._rewrite_insert_update(sql, params, returning)\n    elif (self.query.conflict_action.value == 'NOTHING'):\n        return self._rewrite_insert_nothing(sql, params, returning)\n    raise SuspiciousOperation(('%s is not a valid conflict action, specify ConflictAction.UPDATE or ConflictAction.NOTHING.' % str(self.query.conflict_action)))", "docstring": "Rewrites a formed SQL INSERT query to include\nthe ON CONFLICT clause.\n\nArguments:\nsql:\nThe SQL INSERT query to rewrite.\n\nparams:\nThe parameters passed to the query.\n\nreturning:\nWhat to put in the `RETURNING` clause\nof the resulting query.\n\nReturns:\nA tuple of the rewritten SQL query and new params.", "source": "codesearchnet"}
{"code": "def l2_regression_sq_loss(y, target, name=None):\n    with tf.name_scope(name, 'l2_regression_sq', [y, target]) as scope:\n        y = tf.convert_to_tensor(y, name='y')\n        target = tf.convert_to_tensor(target, name='target')\n        return reduce_batch_sum(tf.square((y - target)), name=scope)", "docstring": "Calculates the sum of squared errors between y and target.\n\nArgs:\ny: the calculated values.\ntarget: the desired values.\nname: the name for this op, defaults to l2_regression\nReturns:\nA tensorflow op.", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    scca_file = pyscca.file()\n    try:\n        scca_file.open_file_object(file_object)\n    except IOError as exception:\n        parser_mediator.ProduceExtractionWarning('unable to open file with error: {0!s}'.format(exception))\n        return\n    format_version = scca_file.format_version\n    executable_filename = scca_file.executable_filename\n    prefetch_hash = scca_file.prefetch_hash\n    run_count = scca_file.run_count\n    number_of_volumes = scca_file.number_of_volumes\n    volume_serial_numbers = []\n    volume_device_paths = []\n    path = ''\n    for volume_information in iter(scca_file.volumes):\n        volume_serial_number = volume_information.serial_number\n        volume_device_path = volume_information.device_path\n        volume_serial_numbers.append(volume_serial_number)\n        volume_device_paths.append(volume_device_path)\n        timestamp = volume_information.get_creation_time_as_integer()\n        if timestamp:\n            event_data = windows_events.WindowsVolumeEventData()\n            event_data.device_path = volume_device_path\n            event_data.origin = parser_mediator.GetFilename()\n            event_data.serial_number = volume_serial_number\n            date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n            event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n            parser_mediator.ProduceEventWithEventData(event, event_data)\n        for filename in iter(scca_file.filenames):\n            if (not filename):\n                continue\n            if (filename.startswith(volume_device_path) and filename.endswith(executable_filename)):\n                (_, _, path) = filename.partition(volume_device_path)\n    mapped_files = []\n    for (entry_index, file_metrics) in enumerate(scca_file.file_metrics_entries):\n        mapped_file_string = file_metrics.filename\n        if (not mapped_file_string):\n            parser_mediator.ProduceExtractionWarning('missing filename for file metrics entry: {0:d}'.format(entry_index))\n            continue\n        file_reference = file_metrics.file_reference\n        if file_reference:\n            mapped_file_string = '{0:s} [MFT entry: {1:d}, sequence: {2:d}]'.format(mapped_file_string, (file_reference & 281474976710655), (file_reference >> 48))\n        mapped_files.append(mapped_file_string)\n    event_data = WinPrefetchExecutionEventData()\n    event_data.executable = executable_filename\n    event_data.mapped_files = mapped_files\n    event_data.number_of_volumes = number_of_volumes\n    event_data.path = path\n    event_data.prefetch_hash = prefetch_hash\n    event_data.run_count = run_count\n    event_data.version = format_version\n    event_data.volume_device_paths = volume_device_paths\n    event_data.volume_serial_numbers = volume_serial_numbers\n    timestamp = scca_file.get_last_run_time_as_integer(0)\n    if (not timestamp):\n        parser_mediator.ProduceExtractionWarning('missing last run time')\n        date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n    else:\n        date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_RUN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n    if (format_version >= 26):\n        for last_run_time_index in range(1, 8):\n            timestamp = scca_file.get_last_run_time_as_integer(last_run_time_index)\n            if (not timestamp):\n                continue\n            date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n            date_time_description = 'Previous {0:s}'.format(definitions.TIME_DESCRIPTION_LAST_RUN)\n            event = time_events.DateTimeValuesEvent(date_time, date_time_description)\n            parser_mediator.ProduceEventWithEventData(event, event_data)\n    scca_file.close()", "docstring": "Parses a Windows Prefetch file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.", "source": "codesearchnet"}
{"code": "def dr( self, cell_lengths ):\n        \n        half_cell_lengths = cell_lengths / 2.0\n        this_dr = self.final_site.r - self.initial_site.r\n        for i in range( 3 ):\n            if this_dr[ i ] > half_cell_lengths[ i ]:\n                this_dr[ i ] -= cell_lengths[ i ]\n            if this_dr[ i ] < -half_cell_lengths[ i ]:\n                this_dr[ i ] += cell_lengths[ i ]\n        return this_dr", "docstring": "Particle displacement vector for this jump\n\nArgs:\ncell_lengths (np.array(x,y,z)): Cell lengths for the orthogonal simulation cell.\n\nReturns\n(np.array(x,y,z)): dr", "source": "juraj-google-style"}
{"code": "def _num_elements(self) -> NoReturn:\n    raise NotImplementedError()", "docstring": "Number of elements of this Tensor.\n\nUnlike regular Tensors, the number of elements is always known for\nEagerTensors.\n\nThis is more performant than tensor.shape.num_elements\n\nReturns:\nLong - num elements in the tensor", "source": "github-repos"}
{"code": "def guass(self, mu: float, sigma: float) -> float:\n        \n        return float(\n            lib.TCOD_random_get_gaussian_double(self.random_c, mu, sigma)\n        )", "docstring": "Return a random number using Gaussian distribution.\n\nArgs:\nmu (float): The median returned value.\nsigma (float): The standard deviation.\n\nReturns:\nfloat: A random float.", "source": "juraj-google-style"}
{"code": "def flatlist_dropdup(list_of_lists):\n    \n    return list(set([str(item) for sublist in list_of_lists for item in sublist]))", "docstring": "Make a single list out of a list of lists, and drop all duplicates.\n\nArgs:\nlist_of_lists: List of lists.\n\nReturns:\nlist: List of single objects.", "source": "juraj-google-style"}
{"code": "def __init__(self, iterator_resource, initializer, output_types, output_shapes, output_classes):\n    self._iterator_resource = iterator_resource\n    self._initializer = initializer\n    if output_types is None or output_shapes is None or output_classes is None:\n        raise ValueError(f'All of `output_types`, `output_shapes`, and `output_classes` must be specified to create an iterator. Got `output_types` = {output_types!r}, `output_shapes` = {output_shapes!r}, `output_classes` = {output_classes!r}.')\n    self._element_spec = structure.convert_legacy_structure(output_types, output_shapes, output_classes)\n    self._flat_tensor_shapes = structure.get_flat_tensor_shapes(self._element_spec)\n    self._flat_tensor_types = structure.get_flat_tensor_types(self._element_spec)\n    self._string_handle = gen_dataset_ops.iterator_to_string_handle(self._iterator_resource)\n    self._get_next_call_count = 0\n    ops.add_to_collection(GLOBAL_ITERATORS, self._iterator_resource)", "docstring": "Creates a new iterator from the given iterator resource.\n\nNote: Most users will not call this initializer directly, and will\ninstead use `Dataset.make_initializable_iterator()` or\n`Dataset.make_one_shot_iterator()`.\n\nArgs:\niterator_resource: A `tf.resource` scalar `tf.Tensor` representing the\niterator.\ninitializer: A `tf.Operation` that should be run to initialize this\niterator.\noutput_types: A (nested) structure of `tf.DType` objects corresponding to\neach component of an element of this iterator.\noutput_shapes: A (nested) structure of `tf.TensorShape` objects\ncorresponding to each component of an element of this iterator.\noutput_classes: A (nested) structure of Python `type` objects\ncorresponding to each component of an element of this iterator.\n\nRaises:\nTypeError: If `output_types`, `output_shapes`, or `output_classes` is not\nspecified.", "source": "github-repos"}
{"code": "class Monitor(object):\n\n    def __init__(self, namespace: str, name_prefix: str) -> None:\n        self.namespace = namespace\n        self.name_prefix = name_prefix\n        self.doFn = MonitorDoFn(namespace, name_prefix)", "docstring": "A monitor of elements with support for later retrieving their metrics\n\nmonitor objects contains a doFn to record metrics\n\nArgs:\nnamespace: the namespace all metrics within this Monitor uses\nname_prefix: a prefix for this Monitor's metrics' names, intended to\nbe unique in per-monitor basis in pipeline", "source": "github-repos"}
{"code": "def remove_hairs_decorator(fn=None, hairs=HAIRS):\n    \n    def decorator_wrapper(fn):\n        @wraps(fn)\n        def decorator(*args, **kwargs):\n            out = fn(*args, **kwargs)\n\n            return remove_hairs(out, hairs)\n\n        return decorator\n\n    if fn:\n        return decorator_wrapper(fn)\n\n    return decorator_wrapper", "docstring": "Parametrized decorator wrapping the :func:`remove_hairs` function.\n\nArgs:\nhairs (str, default HAIRS): List of characters which should be removed.\nSee :attr:`HAIRS` for details.", "source": "juraj-google-style"}
{"code": "def get_tqdm_kwargs(**kwargs):\n    \n    default = dict(\n        smoothing=0.5,\n        dynamic_ncols=True,\n        ascii=True,\n        bar_format='{l_bar}{bar}|{n_fmt}/{total_fmt}[{elapsed}<{remaining},{rate_noinv_fmt}]'\n    )\n\n    try:\n        \n        interval = float(os.environ['TENSORPACK_PROGRESS_REFRESH'])\n    except KeyError:\n        interval = _pick_tqdm_interval(kwargs.get('file', sys.stderr))\n\n    default['mininterval'] = interval\n    default.update(kwargs)\n    return default", "docstring": "Return default arguments to be used with tqdm.\n\nArgs:\nkwargs: extra arguments to be used.\nReturns:\ndict:", "source": "juraj-google-style"}
{"code": "def add_business_days(self, date_tensor, num_days, roll_convention=constants.BusinessDayConvention.NONE):\n    control_deps = []\n    if roll_convention == constants.BusinessDayConvention.NONE:\n        message = 'Some dates in date_tensor are not business days. Please specify the roll_convention argument.'\n        is_bus_day = self.is_business_day(date_tensor)\n        control_deps.append(tf.debugging.assert_equal(is_bus_day, True, message=message))\n    else:\n        date_tensor = self.roll_to_business_day(date_tensor, roll_convention)\n    with tf.control_dependencies(control_deps):\n        cumul_bus_days_table = self._compute_cumul_bus_days_table()\n        cumul_bus_days = self._gather(cumul_bus_days_table, date_tensor.ordinal() - self._ordinal_offset + 1)\n        target_cumul_bus_days = cumul_bus_days + num_days\n        bus_day_ordinals_table = self._compute_bus_day_ordinals_table()\n        ordinals = self._gather(bus_day_ordinals_table, target_cumul_bus_days)\n        with tf.control_dependencies(self._assert_ordinals_in_bounds(ordinals)):\n            return dt.from_ordinals(ordinals, validate=False)", "docstring": "Adds given number of business days to given dates.\n\nNote that this is different from calling `add_period_and_roll` with\nPeriodType.DAY. For example, adding 5 business days to Monday gives the next\nMonday (unless there are holidays on this week or next Monday). Adding 5\ndays and rolling means landing on Saturday and then rolling either to next\nMonday or to Friday of the same week, depending on the roll convention.\n\nIf any of the dates in `date_tensor` are not business days, they will be\nrolled to business days before doing the addition. If `roll_convention` is\n`NONE`, and any dates are not business days, an exception is raised.\n\nArgs:\ndate_tensor: DateTensor of dates to advance from.\nnum_days: Tensor of int32 type broadcastable to `date_tensor`.\nroll_convention: BusinessDayConvention. Determines how to roll a date that\nfalls on a holiday.\n\nReturns:\nThe resulting DateTensor.", "source": "github-repos"}
{"code": "def Readdir(self, path, fh=None):\n    if self.DataRefreshRequired(path):\n        self._RunAndWaitForVFSFileUpdate(path)\n    return super(GRRFuse, self).Readdir(path, fh=None)", "docstring": "Updates the directory listing from the client.\n\nArgs:\npath: The path to the directory to update. Client is inferred from this.\nfh: A file handler. Not used.\n\nReturns:\nA list of filenames.", "source": "codesearchnet"}
{"code": "def parse_config_input_output(args=sys.argv):\n    \n    parser = argparse.ArgumentParser(\n        description='Process the input files using the given config')\n    parser.add_argument(\n        'config_file',\n        help='Configuration file.',\n        metavar='FILE', type=extant_file)\n    parser.add_argument(\n        'input_dir',\n        help='Directory containing the input files.',\n        metavar='DIR', type=extant_dir)\n    parser.add_argument(\n        'output_dir',\n        help='Directory where the output files should be saved.',\n        metavar='DIR', type=extant_dir)\n    return parser.parse_args(args[1:])", "docstring": "Parse the args using the config_file, input_dir, output_dir pattern\n\nArgs:\nargs: sys.argv\n\nReturns:\nThe populated namespace object from parser.parse_args().\n\nRaises:\nTBD", "source": "juraj-google-style"}
{"code": "def underlying_variable_ref(t):\n    while (t.op.type in ['Identity', 'ReadVariableOp', 'Enter']):\n        t = t.op.inputs[0]\n    op_type = t.op.type\n    if (('Variable' in op_type) or ('VarHandle' in op_type)):\n        return t\n    else:\n        return None", "docstring": "Find the underlying variable ref.\n\nTraverses through Identity, ReadVariableOp, and Enter ops.\nStops when op type has Variable or VarHandle in name.\n\nArgs:\nt: a Tensor\n\nReturns:\na Tensor that is a variable ref, or None on error.", "source": "codesearchnet"}
{"code": "def save_results(vcs, signature, result_path, patterns):\n    \n    results_directory = _get_results_directory(vcs, signature)\n    if not os.path.exists(results_directory):\n        os.makedirs(results_directory)\n    with open(os.path.join(results_directory, 'patterns'), 'w') as f:\n        f.write('\\n'.join(patterns))\n    if not os.path.exists(os.path.join(results_directory, 'results')):\n        os.mkdir(os.path.join(results_directory, 'results'))\n    includes = ['--include={}'.format(x)\n                for x in patterns]\n    cmd = ['rsync', '-r'] + includes + ['--exclude=*',\n                                        os.path.join(result_path, ''),\n                                        os.path.join(results_directory, 'results', '')]\n    subprocess.check_call(cmd)", "docstring": "Save results matching `patterns` at `result_path`.\n\nArgs:\nvcs (easyci.vcs.base.Vcs) - the VCS object for the actual project\n(not the disposable copy)\nsignature (str) - the project state signature\nresult_path (str) - the path containing the result, usually\na disposable copy of the project\npatterns (str) - `rsync`-compatible patterns matching test results\nto save.", "source": "juraj-google-style"}
{"code": "def findLabel(self, query, create=False):\n    if isinstance(query, six.string_types):\n        query = query.lower()\n    for label in self._labels.values():\n        if ((isinstance(query, six.string_types) and (query == label.name.lower())) or (isinstance(query, Pattern) and query.search(label.name))):\n            return label\n    return (self.createLabel(query) if (create and isinstance(query, six.string_types)) else None)", "docstring": "Find a label with the given name.\n\nArgs:\nname (Union[_sre.SRE_Pattern, str]): A str or regular expression to match against the name.\ncreate (bool): Whether to create the label if it doesn't exist (only if name is a str).\n\nReturns:\nUnion[gkeepapi.node.Label, None]: The label.", "source": "codesearchnet"}
{"code": "def GetIndentLevel(line):\n    indent = Match('^( *)\\\\S', line)\n    if indent:\n        return len(indent.group(1))\n    else:\n        return 0", "docstring": "Return the number of leading spaces in line.\n\nArgs:\nline: A string to check.\n\nReturns:\nAn integer count of leading spaces, possibly zero.", "source": "codesearchnet"}
{"code": "def GetServiceAccount(self, request, global_params=None):\n    config = self.GetMethodConfig('GetServiceAccount')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Returns the email address of the service account for your project used for interactions with Google Cloud KMS.\n\nArgs:\nrequest: (BigqueryProjectsGetServiceAccountRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(GetServiceAccountResponse) The response message.", "source": "github-repos"}
{"code": "def GetKeyByPath(self, key_path):\n    \n    root_key_path, _, key_path = key_path.partition(\n        definitions.KEY_PATH_SEPARATOR)\n\n    \n    root_key_path = root_key_path.upper()\n    root_key_path = self._ROOT_KEY_ALIASES.get(root_key_path, root_key_path)\n\n    if root_key_path not in self._ROOT_KEYS:\n      raise RuntimeError('Unsupported root key: {0:s}'.format(root_key_path))\n\n    key_path = definitions.KEY_PATH_SEPARATOR.join([root_key_path, key_path])\n    key_path_upper = key_path.upper()\n\n    for virtual_key_path, virtual_key_callback in self._VIRTUAL_KEYS:\n      virtual_key_path_upper = virtual_key_path.upper()\n      if key_path_upper.startswith(virtual_key_path_upper):\n        key_path_suffix = key_path[len(virtual_key_path):]\n\n        callback_function = getattr(self, virtual_key_callback)\n        virtual_key = callback_function(key_path_suffix)\n        if not virtual_key:\n          raise RuntimeError('Unable to resolve virtual key: {0:s}.'.format(\n              virtual_key_path))\n\n        return virtual_key\n\n    key_path_prefix_upper, registry_file = self._GetFileByPath(key_path_upper)\n    if not registry_file:\n      return None\n\n    if not key_path_upper.startswith(key_path_prefix_upper):\n      raise RuntimeError('Key path prefix mismatch.')\n\n    key_path_suffix = key_path[len(key_path_prefix_upper):]\n    key_path = key_path_suffix or definitions.KEY_PATH_SEPARATOR\n    return registry_file.GetKeyByPath(key_path)", "docstring": "Retrieves the key for a specific path.\n\nArgs:\nkey_path (str): Windows Registry key path.\n\nReturns:\nWinRegistryKey: Windows Registry key or None if not available.\n\nRaises:\nRuntimeError: if the root key is not supported.", "source": "juraj-google-style"}
{"code": "def of(cls, msg_header: MessageHeader) -> 'MessageDecoder':\n    cte_hdr = msg_header.parsed.content_transfer_encoding\n    return cls.of_cte(cte_hdr)", "docstring": "Return a decoder from the message header object.\n\nSee Also:\n:meth:`.of_cte`\n\nArgs:\nmsg_header: The message header object.", "source": "codesearchnet"}
{"code": "def _run_benchmarks(regex):\n    registry = list(GLOBAL_BENCHMARK_REGISTRY)\n    selected_benchmarks = []\n    for benchmark in registry:\n        benchmark_name = '%s.%s' % (benchmark.__module__, benchmark.__name__)\n        attrs = dir(benchmark)\n        benchmark_instance = None\n        for attr in attrs:\n            if not attr.startswith('benchmark'):\n                continue\n            candidate_benchmark_fn = getattr(benchmark, attr)\n            if not callable(candidate_benchmark_fn):\n                continue\n            full_benchmark_name = '%s.%s' % (benchmark_name, attr)\n            if regex == 'all' or re.search(regex, full_benchmark_name):\n                selected_benchmarks.append(full_benchmark_name)\n                benchmark_instance = benchmark_instance or benchmark()\n                instance_benchmark_fn = getattr(benchmark_instance, attr)\n                instance_benchmark_fn()\n    if not selected_benchmarks:\n        raise ValueError(\"No benchmarks matched the pattern: '{}'\".format(regex))", "docstring": "Run benchmarks that match regex `regex`.\n\nThis function goes through the global benchmark registry, and matches\nbenchmark class and method names of the form\n`module.name.BenchmarkClass.benchmarkMethod` to the given regex.\nIf a method matches, it is run.\n\nArgs:\nregex: The string regular expression to match Benchmark classes against.\n\nRaises:\nValueError: If no benchmarks were selected by the input regex.", "source": "github-repos"}
{"code": "def info(self, **kwargs):\n    path = self._get_series_id_season_number_path('info')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the primary information about a TV season by its season number.\n\nArgs:\nlanguage: (optional) ISO 639 code.\nappend_to_response: (optional) Comma separated, any TV series\nmethod.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def ws45(msg):\n    d = hex2bin(data(msg))\n    if (d[3] == '0'):\n        return None\n    ws = bin2int(d[4:6])\n    return ws", "docstring": "Wind shear.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nint: Wind shear level. 0=NIL, 1=Light, 2=Moderate, 3=Severe", "source": "codesearchnet"}
{"code": "def get_custom_object_name(obj):\n    if hasattr(obj, 'name'):\n        return obj.name\n    elif hasattr(obj, '__name__'):\n        return obj.__name__\n    elif hasattr(obj, '__class__'):\n        return generic_utils.to_snake_case(obj.__class__.__name__)\n    else:\n        return None", "docstring": "Returns the name to use for a custom loss or metric callable.\n\nArgs:\nobj: Custom loss of metric callable\n\nReturns:\nName to use, or `None` if the object was not recognized.", "source": "github-repos"}
{"code": "def _add_loss_summaries(total_loss):\n  \n  \n  loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n  losses = tf.get_collection('losses')\n  loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n  \n  \n  for l in losses + [total_loss]:\n    \n    \n    tf.summary.scalar(l.op.name + ' (raw)', l)\n    tf.summary.scalar(l.op.name, loss_averages.average(l))\n\n  return loss_averages_op", "docstring": "Add summaries for losses in CIFAR-10 model.\n\nGenerates moving average for all losses and associated summaries for\nvisualizing the performance of the network.\n\nArgs:\ntotal_loss: Total loss from loss().\nReturns:\nloss_averages_op: op for generating moving averages of losses.", "source": "juraj-google-style"}
{"code": "def _CreateStopsFolder(self, schedule, doc):\n    \n    if not schedule.GetStopList():\n      return None\n    stop_folder = self._CreateFolder(doc, 'Stops')\n    stop_folder_selection = self._StopFolderSelectionMethod(stop_folder)\n    stop_style_selection = self._StopStyleSelectionMethod(doc)\n    stops = list(schedule.GetStopList())\n    stops.sort(key=lambda x: x.stop_name)\n    for stop in stops:\n      (folder, pathway_folder) = stop_folder_selection(stop)\n      (style_id, pathway_style_id) = stop_style_selection(stop)\n      self._CreateStopPlacemark(folder, stop, style_id)\n      if (self.show_stop_hierarchy and\n          stop.location_type != transitfeed.Stop.LOCATION_TYPE_STATION and\n          stop.parent_station and stop.parent_station in schedule.stops):\n        placemark = self._CreatePlacemark(\n            pathway_folder, stop.stop_name, pathway_style_id)\n        parent_station = schedule.stops[stop.parent_station]\n        coordinates = [(stop.stop_lon, stop.stop_lat),\n                       (parent_station.stop_lon, parent_station.stop_lat)]\n        self._CreateLineString(placemark, coordinates)\n    return stop_folder", "docstring": "Create a KML Folder containing placemarks for each stop in the schedule.\n\nIf there are no stops in the schedule then no folder is created.\n\nArgs:\nschedule: The transitfeed.Schedule instance.\ndoc: The KML Document ElementTree.Element instance.\n\nReturns:\nThe Folder ElementTree.Element instance or None if there are no stops.", "source": "juraj-google-style"}
{"code": "def sia(transition, direction=Direction.BIDIRECTIONAL):\n    validate.direction(direction, allow_bi=True)\n    log.info('Calculating big-alpha for %s...', transition)\n    if (not transition):\n        log.info('Transition %s is empty; returning null SIA immediately.', transition)\n        return _null_ac_sia(transition, direction)\n    if (not connectivity.is_weak(transition.network.cm, transition.node_indices)):\n        log.info('%s is not strongly/weakly connected; returning null SIA immediately.', transition)\n        return _null_ac_sia(transition, direction)\n    log.debug('Finding unpartitioned account...')\n    unpartitioned_account = account(transition, direction)\n    log.debug('Found unpartitioned account.')\n    if (not unpartitioned_account):\n        log.info('Empty unpartitioned account; returning null AC SIA immediately.')\n        return _null_ac_sia(transition, direction)\n    cuts = _get_cuts(transition, direction)\n    engine = ComputeACSystemIrreducibility(cuts, transition, direction, unpartitioned_account)\n    result = engine.run_sequential()\n    log.info('Finished calculating big-ac-phi data for %s.', transition)\n    log.debug('RESULT: \\n%s', result)\n    return result", "docstring": "Return the minimal information partition of a transition in a specific\ndirection.\n\nArgs:\ntransition (Transition): The candidate system.\n\nReturns:\nAcSystemIrreducibilityAnalysis: A nested structure containing all the\ndata from the intermediate calculations. The top level contains the\nbasic irreducibility information for the given subsystem.", "source": "codesearchnet"}
{"code": "def set_maintainer(self, maintainer):\n        \n        \n        if isinstance(maintainer, hdx.data.user.User) or isinstance(maintainer, dict):\n            if 'id' not in maintainer:\n                maintainer = hdx.data.user.User.read_from_hdx(maintainer['name'], configuration=self.configuration)\n            maintainer = maintainer['id']\n        elif not isinstance(maintainer, str):\n            raise HDXError('Type %s cannot be added as a maintainer!' % type(maintainer).__name__)\n        if is_valid_uuid(maintainer) is False:\n            raise HDXError('%s is not a valid user id for a maintainer!' % maintainer)\n        self.data['maintainer'] = maintainer", "docstring": "Set the dataset's maintainer.\n\nArgs:\nmaintainer (Union[User,Dict,str]): Either a user id or User metadata from a User object or dictionary.\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def from_respecth(cls, filename_xml, file_author='', file_author_orcid=''):\n    properties = ReSpecTh_to_ChemKED(filename_xml, file_author, file_author_orcid, validate=False)\n    return cls(dict_input=properties)", "docstring": "Construct a ChemKED instance directly from a ReSpecTh file.\n\nArguments:\nfilename_xml (`str`): Filename of the ReSpecTh-formatted XML file to be imported\nfile_author (`str`, optional): File author to be added to the list generated from the\nXML file\nfile_author_orcid (`str`, optional): ORCID for the file author being added to the list\nof file authors\n\nReturns:\n`ChemKED`: Instance of the `ChemKED` class containing the data in ``filename_xml``.\n\nExamples:\n>>> ck = ChemKED.from_respecth('respecth_file.xml')\n>>> ck = ChemKED.from_respecth('respecth_file.xml', file_author='Bryan W. Weber')\n>>> ck = ChemKED.from_respecth('respecth_file.xml', file_author='Bryan W. Weber',\nfile_author_orcid='0000-0000-0000-0000')", "source": "codesearchnet"}
{"code": "def update_firmware(self, firmware_information, force=False):\n        \n        firmware_uri = \"{}/firmware\".format(self.data[\"uri\"])\n        result = self._helper.update(firmware_information, firmware_uri, force=force)\n        self.refresh()\n\n        return result", "docstring": "Installs firmware to the member interconnects of a SAS Logical Interconnect.\n\nArgs:\nfirmware_information: Options to install firmware to a SAS Logical Interconnect.\nforce: If sets to true, the operation completes despite any problems with the network connectivy\nor the erros on the resource itself.\nReturns:\ndict: SAS Logical Interconnect Firmware.", "source": "juraj-google-style"}
{"code": "def GetArtifactPathDependencies(rdf_artifact):\n  \n  deps = set()\n  for source in rdf_artifact.sources:\n    for arg, value in iteritems(source.attributes):\n      paths = []\n      if arg in [\"path\", \"query\"]:\n        paths.append(value)\n      if arg == \"key_value_pairs\":\n        \n        paths.extend([x[\"key\"] for x in value])\n      if arg in [\"keys\", \"paths\", \"path_list\", \"content_regex_list\"]:\n        paths.extend(value)\n      for path in paths:\n        for match in artifact_utils.INTERPOLATED_REGEX.finditer(path):\n          deps.add(match.group()[2:-2])  \n  deps.update(GetArtifactParserDependencies(rdf_artifact))\n  return deps", "docstring": "Return a set of knowledgebase path dependencies.\n\nArgs:\nrdf_artifact: RDF artifact object.\n\nReturns:\nA set of strings for the required kb objects e.g.\n[\"users.appdata\", \"systemroot\"]", "source": "juraj-google-style"}
{"code": "def generate_plaintext_random(plain_vocab, distribution, train_samples,\n                              length):\n  \n  if distribution is not None:\n    assert len(distribution) == len(plain_vocab)\n\n  train_indices = np.random.choice(\n      range(len(plain_vocab)), (train_samples, length), p=distribution)\n\n  return train_indices", "docstring": "Generates samples of text from the provided vocabulary.\n\nArgs:\nplain_vocab: vocabulary.\ndistribution: distribution.\ntrain_samples: samples for training.\nlength: length.\n\nReturns:\ntrain_indices (np.array of Integers): random integers for training.\nshape = [num_samples, length]\ntest_indices (np.array of Integers): random integers for testing.\nshape = [num_samples, length]\nplain_vocab   (list of Integers): unique vocabularies.", "source": "juraj-google-style"}
{"code": "def filter_error(self, error):\n    if error.filename != self._filename or error.line is None:\n        return True\n    if error.name == 'bad-return-type' and error.opcode_name in ('RETURN_VALUE', 'RETURN_CONST') and (error.line not in self.return_lines):\n        _, end = self._function_ranges.find_outermost(error.line)\n        if end:\n            error.set_line(end)\n    line = error.line or sys.maxsize\n    return line not in self._ignore and line not in self._disables[_ALL_ERRORS] and (line not in self._disables[error.name])", "docstring": "Return whether the error should be logged.\n\nThis method is suitable for use as an error filter.\n\nArgs:\nerror: An error._Error object.\n\nReturns:\nTrue iff the error should be included in the log.", "source": "github-repos"}
{"code": "def add_space(self, line):\n    if (not isinstance(self.last_item, Space)):\n        space = Space(self._structure)\n        self._structure.append(space)\n    self.last_item.add_line(line)\n    return self", "docstring": "Add a Space object to the section\n\nUsed during initial parsing mainly\n\nArgs:\nline (str): one line that defines the space, maybe whitespaces", "source": "codesearchnet"}
{"code": "def __init__(self, message):\n        \n        super(ItemNotFound, self).__init__(\n            reason=enums.ResultReason.ITEM_NOT_FOUND,\n            message=message\n        )", "docstring": "Create an ItemNotFound exception.\n\nArgs:\nmessage (string): A string containing information about the error.", "source": "juraj-google-style"}
{"code": "def IsDeletedOrDefault(clean_lines, linenum):\n    open_paren = clean_lines.elided[linenum].find('(')\n    if (open_paren < 0):\n        return False\n    (close_line, _, close_paren) = CloseExpression(clean_lines, linenum, open_paren)\n    if (close_paren < 0):\n        return False\n    return Match('\\\\s*=\\\\s*(?:delete|default)\\\\b', close_line[close_paren:])", "docstring": "Check if current constructor or operator is deleted or default.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nReturns:\nTrue if this is a deleted or default constructor.", "source": "codesearchnet"}
{"code": "def get_ggt(self, n, u):\n    gk = self[0].einsum_sequence([n, u, n, u])\n    result = ((- ((((2 * gk) * np.outer(u, u)) + self[0].einsum_sequence([n, n])) + self[1].einsum_sequence([n, u, n, u]))) / (2 * gk))\n    return result", "docstring": "Gets the Generalized Gruneisen tensor for a given\nthird-order elastic tensor expansion.\n\nArgs:\nn (3x1 array-like): normal mode direction\nu (3x1 array-like): polarization direction", "source": "codesearchnet"}
{"code": "def build_bird_configuration(config):\n    \n    bird_configuration = {}\n\n    if config.getboolean('daemon', 'ipv4'):\n        if os.path.islink(config.get('daemon', 'bird_conf')):\n            config_file = os.path.realpath(config.get('daemon', 'bird_conf'))\n            print(\"'bird_conf' is set to a symbolic link ({s} -> {d}, but we \"\n                  \"will use the canonical path of that link\"\n                  .format(s=config.get('daemon', 'bird_conf'), d=config_file))\n        else:\n            config_file = config.get('daemon', 'bird_conf')\n\n        dummy_ip_prefix = config.get('daemon', 'dummy_ip_prefix')\n        if not valid_ip_prefix(dummy_ip_prefix):\n            raise ValueError(\"invalid dummy IPv4 prefix: {i}\"\n                             .format(i=dummy_ip_prefix))\n\n        bird_configuration[4] = {\n            'config_file': config_file,\n            'variable_name': config.get('daemon', 'bird_variable'),\n            'dummy_ip_prefix': dummy_ip_prefix,\n            'reconfigure_cmd': config.get('daemon', 'bird_reconfigure_cmd'),\n            'keep_changes': config.getboolean('daemon', 'bird_keep_changes'),\n            'changes_counter': config.getint('daemon', 'bird_changes_counter')\n        }\n    if config.getboolean('daemon', 'ipv6'):\n        if os.path.islink(config.get('daemon', 'bird6_conf')):\n            config_file = os.path.realpath(config.get('daemon', 'bird6_conf'))\n            print(\"'bird6_conf' is set to a symbolic link ({s} -> {d}, but we \"\n                  \"will use the canonical path of that link\"\n                  .format(s=config.get('daemon', 'bird6_conf'), d=config_file))\n        else:\n            config_file = config.get('daemon', 'bird6_conf')\n\n        dummy_ip_prefix = config.get('daemon', 'dummy_ip6_prefix')\n        if not valid_ip_prefix(dummy_ip_prefix):\n            raise ValueError(\"invalid dummy IPv6 prefix: {i}\"\n                             .format(i=dummy_ip_prefix))\n        bird_configuration[6] = {\n            'config_file': config_file,\n            'variable_name': config.get('daemon', 'bird6_variable'),\n            'dummy_ip_prefix': dummy_ip_prefix,\n            'reconfigure_cmd': config.get('daemon', 'bird6_reconfigure_cmd'),\n            'keep_changes': config.getboolean('daemon', 'bird6_keep_changes'),\n            'changes_counter': config.getint('daemon', 'bird6_changes_counter')\n        }\n\n    return bird_configuration", "docstring": "Build bird configuration structure.\n\nFirst it performs a sanity check against bird settings and then builds a\ndictionary structure with bird configuration per IP version.\n\nArguments:\nconfig (obj): A configparser object which holds our configuration.\n\nReturns:\nA dictionary\n\nRaises:\nValueError if sanity check fails.", "source": "juraj-google-style"}
{"code": "def set_boolean(self, option, value):\n        \n        if not isinstance(value, bool):\n            raise TypeError(\"%s must be a boolean\" % option)\n\n        self.options[option] = str(value).lower()", "docstring": "Set a boolean option.\n\nArgs:\noption (str): name of option.\nvalue (bool): value of the option.\n\nRaises:\nTypeError: Value must be a boolean.", "source": "juraj-google-style"}
{"code": "def __init__(self, timestamp=None):\n    \n    super(DelphiDateTime, self).__init__()\n    self._precision = definitions.PRECISION_1_MILLISECOND\n    self._timestamp = timestamp", "docstring": "Initializes a Delphi TDateTime timestamp.\n\nArgs:\ntimestamp (Optional[float]): Delphi TDateTime timestamp.", "source": "juraj-google-style"}
{"code": "def _get_metrics_from_layers(layers):\n    metrics = []\n    layers = layer_utils.filter_empty_layer_containers(layers)\n    for layer in layers:\n        if isinstance(layer, Model):\n            metrics.extend(layer._metrics)\n            metrics.extend(_get_metrics_from_layers(layer.layers))\n        else:\n            metrics.extend(layer.metrics)\n    return metrics", "docstring": "Returns list of metrics from the given layers.\n\nThis will not include the `compile` metrics of a model layer.\n\nArgs:\nlayers: List of layers.\n\nReturns:\nList of metrics.", "source": "github-repos"}
{"code": "def image(self, tag, image, step=None):\n    \n    image = onp.array(image)\n    if step is None:\n      step = self._step\n    else:\n      self._step = step\n    if len(onp.shape(image)) == 2:\n      image = image[:, :, onp.newaxis]\n    if onp.shape(image)[-1] == 1:\n      image = onp.repeat(image, 3, axis=-1)\n    image_strio = io.BytesIO()\n    plt.imsave(image_strio, image, format='png')\n    image_summary = Summary.Image(\n        encoded_image_string=image_strio.getvalue(),\n        colorspace=3,\n        height=image.shape[0],\n        width=image.shape[1])\n    summary = Summary(value=[Summary.Value(tag=tag, image=image_summary)])\n    self.add_summary(summary, step)", "docstring": "Saves RGB image summary from onp.ndarray [H,W], [H,W,1], or [H,W,3].\n\nArgs:\ntag: str: label for this data\nimage: ndarray: [H,W], [H,W,1], [H,W,3] save image in greyscale or colors/\nstep: int: training step", "source": "juraj-google-style"}
{"code": "def launchctl(sub_cmd, *args, **kwargs):\n    return_stdout = kwargs.pop('return_stdout', False)\n    cmd = ['launchctl', sub_cmd]\n    cmd.extend(args)\n    kwargs['python_shell'] = False\n    kwargs = salt.utils.args.clean_kwargs(**kwargs)\n    ret = __salt__['cmd.run_all'](cmd, **kwargs)\n    error = _check_launchctl_stderr(ret)\n    if (ret['retcode'] or error):\n        out = 'Failed to {0} service:\\n'.format(sub_cmd)\n        out += 'stdout: {0}\\n'.format(ret['stdout'])\n        out += 'stderr: {0}\\n'.format(ret['stderr'])\n        out += 'retcode: {0}'.format(ret['retcode'])\n        raise CommandExecutionError(out)\n    else:\n        return (ret['stdout'] if return_stdout else True)", "docstring": "Run a launchctl command and raise an error if it fails\n\nArgs: additional args are passed to launchctl\nsub_cmd (str): Sub command supplied to launchctl\n\nKwargs: passed to ``cmd.run_all``\nreturn_stdout (bool): A keyword argument. If true return the stdout of\nthe launchctl command\n\nReturns:\nbool: ``True`` if successful\nstr: The stdout of the launchctl command if requested\n\nRaises:\nCommandExecutionError: If command fails\n\nCLI Example:\n\n.. code-block:: bash\n\nimport salt.utils.mac_service\nsalt.utils.mac_service.launchctl('debug', 'org.cups.cupsd')", "source": "codesearchnet"}
{"code": "def match_docstring_with_signature(obj: Any) -> Optional[Tuple[str, str]]:\n    if len(getattr(obj, '__doc__', '')) == 0:\n        return\n    try:\n        source, _ = inspect.getsourcelines(obj)\n    except OSError:\n        source = []\n    idx = 0\n    while idx < len(source) and '\"\"\"' not in source[idx]:\n        idx += 1\n    ignore_order = False\n    if idx < len(source):\n        line_before_docstring = source[idx - 1]\n        if re.search('^\\\\s*\n            return\n        elif re.search('^\\\\s*\n            ignore_order = True\n    signature = inspect.signature(obj).parameters\n    obj_doc_lines = obj.__doc__.split('\\n')\n    idx = 0\n    while idx < len(obj_doc_lines) and _re_args.search(obj_doc_lines[idx]) is None:\n        idx += 1\n    if idx == len(obj_doc_lines):\n        return\n    if 'kwargs' in signature and signature['kwargs'].annotation != inspect._empty:\n        return\n    indent = find_indent(obj_doc_lines[idx])\n    arguments = {}\n    current_arg = None\n    idx += 1\n    start_idx = idx\n    while idx < len(obj_doc_lines) and (len(obj_doc_lines[idx].strip()) == 0 or find_indent(obj_doc_lines[idx]) > indent):\n        if find_indent(obj_doc_lines[idx]) == indent + 4:\n            re_search_arg = _re_parse_arg.search(obj_doc_lines[idx])\n            if re_search_arg is not None:\n                _, name, description = re_search_arg.groups()\n                current_arg = name\n                if name in signature:\n                    default = signature[name].default\n                    if signature[name].kind is inspect._ParameterKind.VAR_KEYWORD:\n                        default = None\n                    new_description = replace_default_in_arg_description(description, default)\n                else:\n                    new_description = description\n                init_doc = _re_parse_arg.sub(f'\\\\1\\\\2 ({new_description}):', obj_doc_lines[idx])\n                arguments[current_arg] = [init_doc]\n        elif current_arg is not None:\n            arguments[current_arg].append(obj_doc_lines[idx])\n        idx += 1\n    idx -= 1\n    if current_arg:\n        while len(obj_doc_lines[idx].strip()) == 0:\n            arguments[current_arg] = arguments[current_arg][:-1]\n            idx -= 1\n    idx += 1\n    old_doc_arg = '\\n'.join(obj_doc_lines[start_idx:idx])\n    old_arguments = list(arguments.keys())\n    arguments = {name: '\\n'.join(doc) for name, doc in arguments.items()}\n    for name in set(signature.keys()) - set(arguments.keys()):\n        arg = signature[name]\n        if name.startswith('_') or arg.kind in [inspect._ParameterKind.VAR_KEYWORD, inspect._ParameterKind.VAR_POSITIONAL]:\n            arguments[name] = ''\n        else:\n            arg_desc = get_default_description(arg)\n            arguments[name] = ' ' * (indent + 4) + f'{name} ({arg_desc}): <fill_docstring>'\n    if ignore_order:\n        new_param_docs = [arguments[name] for name in old_arguments if name in signature]\n        missing = set(signature.keys()) - set(old_arguments)\n        new_param_docs.extend([arguments[name] for name in missing if len(arguments[name]) > 0])\n    else:\n        new_param_docs = [arguments[name] for name in signature.keys() if len(arguments[name]) > 0]\n    new_doc_arg = '\\n'.join(new_param_docs)\n    return (old_doc_arg, new_doc_arg)", "docstring": "Matches the docstring of an object with its signature.\n\nArgs:\nobj (`Any`): The object to process.\n\nReturns:\n`Optional[Tuple[str, str]]`: Returns `None` if there is no docstring or no parameters documented in the\ndocstring, otherwise returns a tuple of two strings: the current documentation of the arguments in the\ndocstring and the one matched with the signature.", "source": "github-repos"}
{"code": "def is_outlier(df, item_id, segment_id, price):\n    if ((segment_id, item_id) not in df.index):\n        return False\n    mean = df.loc[(segment_id, item_id)]['mean']\n    std = df.loc[(segment_id, item_id)]['std']\n    return gaussian_outlier.is_outlier(x=price, mean=mean, standard_deviation=std)", "docstring": "Verify if a item is an outlier compared to the\nother occurrences of the same item, based on his price.\n\nArgs:\nitem_id: idPlanilhaItens\nsegment_id: idSegmento\nprice: VlUnitarioAprovado", "source": "codesearchnet"}
{"code": "def add_transcript(self, transcript):\n        \n        logger.debug(\"Adding transcript {0} to variant {1}\".format(\n            transcript, self['variant_id']))\n        self['transcripts'].append(transcript)", "docstring": "Add the information transcript\n\nThis adds a transcript dict to variant['transcripts']\n\nArgs:\ntranscript (dict): A transcript dictionary", "source": "juraj-google-style"}
{"code": "def get_airports(self, country):\n        \n        url = AIRPORT_BASE.format(country.replace(\" \", \"-\"))\n        return self._fr24.get_airports_data(url)", "docstring": "Returns a list of all the airports\nFor a given country this returns a list of dicts, one for each airport, with information like the iata code of the airport etc\n\nArgs:\ncountry (str): The country for which the airports will be fetched\n\nExample::\n\nfrom pyflightdata import FlightData\nf=FlightData()\nf.get_airports('India')", "source": "juraj-google-style"}
{"code": "def download(self, resource_id):\n        \n        self.resource_id(str(resource_id))\n        self._request_uri = '{}/download'.format(self._request_uri)", "docstring": "Update the request URI to download the document for this resource.\n\nArgs:\nresource_id (integer): The group id.", "source": "juraj-google-style"}
{"code": "def GetClientURNsForHostnames(hostnames, token=None):\n    if data_store.RelationalDBEnabled():\n        index = ClientIndex()\n    else:\n        index = CreateClientIndex(token=token)\n    keywords = set()\n    for hostname in hostnames:\n        if hostname.startswith('host:'):\n            keywords.add(hostname)\n        else:\n            keywords.add(('host:%s' % hostname))\n    results = index.ReadClientPostingLists(keywords)\n    result = {}\n    for (keyword, hits) in iteritems(results):\n        result[keyword[len('host:'):]] = hits\n    return result", "docstring": "Gets all client_ids for a given list of hostnames or FQDNS.\n\nArgs:\nhostnames: A list of hostnames / FQDNs.\ntoken: An ACL token.\n\nReturns:\nA dict with a list of all known GRR client_ids for each hostname.", "source": "codesearchnet"}
{"code": "def _process_config_item(item, dirname):\n    item = copy.deepcopy(item)\n    html = item.get('html', None)\n    if (not html):\n        raise UserWarning((\"Can't find HTML source for item:\\n%s\" % str(item)))\n    link = (html if (':\n    del item['html']\n    for (key, val) in item.items():\n        if ('notfoundmsg' in val):\n            val['notfoundmsg'] = val['notfoundmsg'].replace('$name', key)\n    return {'html': _get_source(link), 'link': link, 'vars': item}", "docstring": "Process one item from the configuration file, which contains multiple items\nsaved as dictionary.\n\nThis function reads additional data from the config and do some\nreplacements - for example, if you specify url, it will download data\nfrom this url and so on.\n\nArgs:\nitem (dict): Item, which will be processed.\n\nNote:\nReturned data format::\n{\n\"link\": \"link to html page/file\",\n\"html\": \"html code from file/url\",\n\"vars\": {\n\"varname\": {\n\"data\": \"matching data..\",\n...\n}\n}\n}\n\nReturns:\ndict: Dictionary in format showed above.", "source": "codesearchnet"}
{"code": "def ParseDom(self, dom, feed):\n    shape_num = 0\n    for node in dom.getElementsByTagName('Placemark'):\n        p = self.ParsePlacemark(node)\n        if p.IsPoint():\n            (lon, lat) = p.coordinates[0]\n            m = self.stopNameRe.search(p.name)\n            feed.AddStop(lat, lon, m.group(1))\n        elif p.IsLine():\n            self.ConvertPlacemarkToShape(p, feed)", "docstring": "Parses the given kml dom tree and updates the Google transit feed object.\n\nArgs:\ndom - kml dom tree\nfeed - an instance of Schedule class to be updated", "source": "codesearchnet"}
{"code": "def makeDoubleLinked(dom, parent=None):\n    \n    dom.parent = parent\n\n    for child in dom.childs:\n        child.parent = dom\n        makeDoubleLinked(child, dom)", "docstring": "Standard output from `dhtmlparser` is single-linked tree. This will make it\ndouble-linked.\n\nArgs:\ndom (obj): :class:`.HTMLElement` instance.\nparent (obj, default None): Don't use this, it is used in recursive\ncall.", "source": "juraj-google-style"}
{"code": "def decision_points(self) -> List[DecisionPoint]:\n    return self._decision_points", "docstring": "Returns all decision points in their declaration order.\n\nReturns:\nAll decision points in current space. For multi-choices, the sub-choice\nobjects will be returned. Users can call `spec.parent_choice` to access\nthe parent multi-choice node.", "source": "github-repos"}
{"code": "def modify_binding(site, binding, hostheader=None, ipaddress=None, port=None, sslflags=None):\n    if ((sslflags is not None) and (sslflags not in _VALID_SSL_FLAGS)):\n        message = \"Invalid sslflags '{0}' specified. Valid sslflags range: {1}..{2}\".format(sslflags, _VALID_SSL_FLAGS[0], _VALID_SSL_FLAGS[(- 1)])\n        raise SaltInvocationError(message)\n    current_sites = list_sites()\n    if (site not in current_sites):\n        log.debug(\"Site '%s' not defined.\", site)\n        return False\n    current_bindings = list_bindings(site)\n    if (binding not in current_bindings):\n        log.debug(\"Binding '%s' not defined.\", binding)\n        return False\n    (i, p, h) = binding.split(':')\n    new_binding = ':'.join([(ipaddress if (ipaddress is not None) else i), (six.text_type(port) if (port is not None) else six.text_type(p)), (hostheader if (hostheader is not None) else h)])\n    if (new_binding != binding):\n        ps_cmd = ['Set-WebBinding', '-Name', \"'{0}'\".format(site), '-BindingInformation', \"'{0}'\".format(binding), '-PropertyName', 'BindingInformation', '-Value', \"'{0}'\".format(new_binding)]\n        cmd_ret = _srvmgr(ps_cmd)\n        if (cmd_ret['retcode'] != 0):\n            msg = 'Unable to modify binding: {0}\\nError: {1}'.format(binding, cmd_ret['stderr'])\n            raise CommandExecutionError(msg)\n    if ((sslflags is not None) and (sslflags != current_sites[site]['bindings'][binding]['sslflags'])):\n        ps_cmd = ['Set-WebBinding', '-Name', \"'{0}'\".format(site), '-BindingInformation', \"'{0}'\".format(new_binding), '-PropertyName', 'sslflags', '-Value', \"'{0}'\".format(sslflags)]\n        cmd_ret = _srvmgr(ps_cmd)\n        if (cmd_ret['retcode'] != 0):\n            msg = 'Unable to modify binding SSL Flags: {0}\\nError: {1}'.format(sslflags, cmd_ret['stderr'])\n            raise CommandExecutionError(msg)\n    log.debug('Binding modified successfully: %s', binding)\n    return True", "docstring": "Modify an IIS Web Binding. Use ``site`` and ``binding`` to target the\nbinding.\n\n.. versionadded:: 2017.7.0\n\nArgs:\nsite (str): The IIS site name.\nbinding (str): The binding to edit. This is a combination of the\nIP address, port, and hostheader. It is in the following format:\nipaddress:port:hostheader. For example, ``*:80:`` or\n``*:80:salt.com``\nhostheader (str): The host header of the binding. Usually the hostname.\nipaddress (str): The IP address of the binding.\nport (int): The TCP port of the binding.\nsslflags (str): The flags representing certificate type and storage of\nthe binding.\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\nThe following will seat the host header of binding ``*:80:`` for ``site0``\nto ``example.com``\n\n.. code-block:: bash\n\nsalt '*' win_iis.modify_binding site='site0' binding='*:80:' hostheader='example.com'", "source": "codesearchnet"}
{"code": "def get_pattern_additional_cycles(self, patternnumber):\n    _checkPatternNumber(patternnumber)\n    address = _calculateRegisterAddress('cycles', patternnumber)\n    return self.read_register(address)", "docstring": "Get the number of additional cycles for a given pattern.\n\nArgs:\npatternnumber (integer): 0-7\n\nReturns:\nThe number of additional cycles (int).", "source": "codesearchnet"}
{"code": "def list_depth(list_, func=max, _depth=0):\n    \n    depth_list = [list_depth(item, func=func, _depth=_depth + 1)\n                  for item in  list_ if util_type.is_listlike(item)]\n    if len(depth_list) > 0:\n        return func(depth_list)\n    else:\n        return _depth", "docstring": "Returns the deepest level of nesting within a list of lists\n\nArgs:\nlist_  : a nested listlike object\nfunc   : depth aggregation strategy (defaults to max)\n_depth : internal var\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_list import *  # NOQA\n>>> list_ = [[[[[1]]], [3]], [[1], [3]], [[1], [3]]]\n>>> result = (list_depth(list_, _depth=0))\n>>> print(result)", "source": "juraj-google-style"}
{"code": "def encode(self, s):\n    if s.endswith('.mp3'):\n        out_filepath = (s[:(- 4)] + '.wav')\n        call(['sox', '--guard', s, '-r', '16k', '-b', '16', '-c', '1', out_filepath])\n        s = out_filepath\n    elif (not s.endswith('.wav')):\n        out_filepath = (s + '.wav')\n        if (not os.path.exists(out_filepath)):\n            call(['sox', '-r', '16k', '-b', '16', '-c', '1', s, out_filepath])\n        s = out_filepath\n    (rate, data) = wavfile.read(s)\n    assert (rate == self._sample_rate)\n    assert (len(data.shape) == 1)\n    if (data.dtype not in [np.float32, np.float64]):\n        data = (data.astype(np.float32) / np.iinfo(data.dtype).max)\n    return data.tolist()", "docstring": "Transform a string with a filename into a list of float32.\n\nArgs:\ns: path to the file with a waveform.\n\nReturns:\nsamples: list of int16s", "source": "codesearchnet"}
{"code": "def _create_triangular_filter_bank(fft_freqs: np.ndarray, filter_freqs: np.ndarray) -> np.ndarray:\n    filter_diff = np.diff(filter_freqs)\n    slopes = np.expand_dims(filter_freqs, 0) - np.expand_dims(fft_freqs, 1)\n    down_slopes = -slopes[:, :-2] / filter_diff[:-1]\n    up_slopes = slopes[:, 2:] / filter_diff[1:]\n    return np.maximum(np.zeros(1), np.minimum(down_slopes, up_slopes))", "docstring": "Creates a triangular filter bank.\n\nAdapted from *torchaudio* and *librosa*.\n\nArgs:\nfft_freqs (`np.ndarray` of shape `(num_frequency_bins,)`):\nDiscrete frequencies of the FFT bins in Hz.\nfilter_freqs (`np.ndarray` of shape `(num_mel_filters,)`):\nCenter frequencies of the triangular filters to create, in Hz.\n\nReturns:\n`np.ndarray` of shape `(num_frequency_bins, num_mel_filters)`", "source": "github-repos"}
{"code": "def delete_case(self, case):\n        \n        mongo_case = self.case(case)\n\n        if not mongo_case:\n            raise CaseError(\"Tried to delete case {0} but could not find case\".format(\n                case.get('case_id')\n            ))\n        LOG.info(\"Removing case {0} from database\".format(\n            mongo_case.get('case_id')\n        ))\n        self.db.case.delete_one({'_id': mongo_case['_id']})\n\n        return", "docstring": "Delete case from the database\n\nDelete a case from the database\n\nArgs:\ncase (dict): A case dictionary", "source": "juraj-google-style"}
{"code": "def serialize_example(transformed_json_data, info_dict):\n    import six\n    import tensorflow as tf\n\n    def _make_int64_list(x):\n        return tf.train.Feature(int64_list=tf.train.Int64List(value=x))\n\n    def _make_bytes_list(x):\n        return tf.train.Feature(bytes_list=tf.train.BytesList(value=x))\n\n    def _make_float_list(x):\n        return tf.train.Feature(float_list=tf.train.FloatList(value=x))\n    if (sorted(six.iterkeys(transformed_json_data)) != sorted(six.iterkeys(info_dict))):\n        raise ValueError(('Keys do not match %s, %s' % (list(six.iterkeys(transformed_json_data)), list(six.iterkeys(info_dict)))))\n    ex_dict = {}\n    for (name, info) in six.iteritems(info_dict):\n        if (info['dtype'] == tf.int64):\n            ex_dict[name] = _make_int64_list(transformed_json_data[name])\n        elif (info['dtype'] == tf.float32):\n            ex_dict[name] = _make_float_list(transformed_json_data[name])\n        elif (info['dtype'] == tf.string):\n            ex_dict[name] = _make_bytes_list(transformed_json_data[name])\n        else:\n            raise ValueError(('Unsupported data type %s' % info['dtype']))\n    ex = tf.train.Example(features=tf.train.Features(feature=ex_dict))\n    return ex.SerializeToString()", "docstring": "Makes a serialized tf.example.\n\nArgs:\ntransformed_json_data: dict of transformed data.\ninfo_dict: output of feature_transforms.get_transfrormed_feature_info()\n\nReturns:\nThe serialized tf.example version of transformed_json_data.", "source": "codesearchnet"}
{"code": "def get_gan_loss(self, true_frames, gen_frames, name):\n    with tf.variable_scope(('%s_discriminator' % name), reuse=tf.AUTO_REUSE):\n        (gan_d_loss, _, fake_logits_stop) = self.d_step(true_frames, gen_frames)\n    with tf.variable_scope(('%s_discriminator' % name), reuse=True):\n        (gan_g_loss_pos_d, gan_g_loss_neg_d) = self.g_step(gen_frames, fake_logits_stop)\n    gan_g_loss = (gan_g_loss_pos_d + gan_g_loss_neg_d)\n    tf.summary.scalar(('gan_loss_%s' % name), (gan_g_loss_pos_d + gan_d_loss))\n    if (self.hparams.gan_optimization == 'joint'):\n        gan_loss = (gan_g_loss + gan_d_loss)\n    else:\n        curr_step = self.get_iteration_num()\n        gan_loss = tf.cond(tf.logical_not(((curr_step % 2) == 0)), (lambda : gan_g_loss), (lambda : gan_d_loss))\n    return gan_loss", "docstring": "Get the discriminator + generator loss at every step.\n\nThis performs an 1:1 update of the discriminator and generator at every\nstep.\n\nArgs:\ntrue_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)\nAssumed to be ground truth.\ngen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)\nAssumed to be fake.\nname: discriminator scope.\nReturns:\nloss: 0-D Tensor, with d_loss + g_loss", "source": "codesearchnet"}
{"code": "def RemoveScanNode(self, path_spec):\n    scan_node = self._scan_nodes.get(path_spec, None)\n    if (not scan_node):\n        return None\n    if scan_node.sub_nodes:\n        raise RuntimeError('Scan node has sub nodes.')\n    parent_scan_node = scan_node.parent_node\n    if parent_scan_node:\n        parent_scan_node.sub_nodes.remove(scan_node)\n    if (path_spec == self._root_path_spec):\n        self._root_path_spec = None\n    del self._scan_nodes[path_spec]\n    if path_spec.IsFileSystem():\n        del self._file_system_scan_nodes[path_spec]\n    return parent_scan_node", "docstring": "Removes a scan node of a certain path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nSourceScanNode: parent scan node or None if not available.\n\nRaises:\nRuntimeError: if the scan node has sub nodes.", "source": "codesearchnet"}
{"code": "def getFingerprintsForTexts(self, strings, sparsity=1.0):\n        \n        body = [{\"text\": s} for s in strings]\n        return self._text.getRepresentationsForBulkText(self._retina, json.dumps(body), sparsity)", "docstring": "Bulk get Fingerprint for text.\nArgs:\nstrings, list(str): A list of texts to be evaluated (required)\nsparsity, float: Sparsify the resulting expression to this percentage (optional)\nReturns:\nlist of Fingerprint\nRaises:\nCorticalioException: if the request was not successful", "source": "juraj-google-style"}
{"code": "def _EvaluateExpressions(self, frame):\n    \n    return [self._FormatExpression(frame, expression) for expression in\n            self._definition.get('expressions') or []]", "docstring": "Evaluates watched expressions into a string form.\n\nIf expression evaluation fails, the error message is used as evaluated\nexpression string.\n\nArgs:\nframe: Python stack frame of breakpoint hit.\n\nReturns:\nArray of strings where each string corresponds to the breakpoint\nexpression with the same index.", "source": "juraj-google-style"}
{"code": "def scalar_pb(tag, data, description=None):\n  \n  arr = np.array(data)\n  if arr.shape != ():\n    raise ValueError('Expected scalar shape for tensor, got shape: %s.'\n                     % arr.shape)\n  if arr.dtype.kind not in ('b', 'i', 'u', 'f'):  \n    raise ValueError('Cast %s to float is not supported' % arr.dtype.name)\n  tensor_proto = tensor_util.make_tensor_proto(arr.astype(np.float32))\n  summary_metadata = metadata.create_summary_metadata(\n      display_name=None, description=description)\n  summary = summary_pb2.Summary()\n  summary.value.add(tag=tag,\n                    metadata=summary_metadata,\n                    tensor=tensor_proto)\n  return summary", "docstring": "Create a scalar summary_pb2.Summary protobuf.\n\nArguments:\ntag: String tag for the summary.\ndata: A 0-dimensional `np.array` or a compatible python number type.\ndescription: Optional long-form description for this summary, as a\n`str`. Markdown is supported. Defaults to empty.\n\nRaises:\nValueError: If the type or shape of the data is unsupported.\n\nReturns:\nA `summary_pb2.Summary` protobuf object.", "source": "juraj-google-style"}
{"code": "def arcsinh(x):\n    if any_symbolic_tensors((x,)):\n        return Arcsinh().symbolic_call(x)\n    return backend.numpy.arcsinh(x)", "docstring": "Inverse hyperbolic sine, element-wise.\n\nArguments:\nx: Input tensor.\n\nReturns:\nOutput tensor of same shape as `x`.\n\nExample:\n>>> x = keras.ops.convert_to_tensor([1, -1, 0])\n>>> keras.ops.arcsinh(x)\narray([0.88137364, -0.88137364, 0.0], dtype=float32)", "source": "github-repos"}
{"code": "def decrypt_block(self, cipherText):\n    if (not self.initialized):\n        raise TypeError('CamCrypt object has not been initialized')\n    if (len(cipherText) != BLOCK_SIZE):\n        raise ValueError(('cipherText must be %d bytes long (received %d bytes)' % (BLOCK_SIZE, len(cipherText))))\n    plain = ctypes.create_string_buffer(BLOCK_SIZE)\n    self.decblock(self.bitlen, cipherText, self.keytable, plain)\n    return plain.raw", "docstring": "Decrypt a 16-byte block of data.\n\nNOTE: This function was formerly called `decrypt`, but was changed when\nsupport for decrypting arbitrary-length strings was added.\n\nArgs:\ncipherText (str): 16-byte data.\n\nReturns:\n16-byte str.\n\nRaises:\nTypeError if CamCrypt object has not been initialized.\nValueError if `cipherText` is not BLOCK_SIZE (i.e. 16) bytes.", "source": "codesearchnet"}
{"code": "def commit(self, sourcedir, targetdir, abs_config, abs_sourcedir, abs_targetdir):\n    (config_path, config_filename) = os.path.split(abs_config)\n    if (not os.path.exists(config_path)):\n        os.makedirs(config_path)\n    if (not os.path.exists(abs_sourcedir)):\n        os.makedirs(abs_sourcedir)\n    if (not os.path.exists(abs_targetdir)):\n        os.makedirs(abs_targetdir)\n    self.backend_engine.dump({'SOURCES_PATH': sourcedir, 'TARGET_PATH': targetdir, 'LIBRARY_PATHS': [], 'OUTPUT_STYLES': 'nested', 'SOURCE_COMMENTS': False, 'EXCLUDES': []}, abs_config, indent=4)", "docstring": "Commit project structure and configuration file\n\nArgs:\nsourcedir (string): Source directory path.\ntargetdir (string): Compiled files target directory path.\nabs_config (string): Configuration file absolute path.\nabs_sourcedir (string): ``sourcedir`` expanded as absolute path.\nabs_targetdir (string): ``targetdir`` expanded as absolute path.", "source": "codesearchnet"}
{"code": "def layout(mtf_graph, mesh_shape, mtf_outputs=()):\n  \n  mesh_shape = mtf.convert_to_shape(mesh_shape)\n  estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape,\n                                               mtf_outputs)\n  optimizer = layout_optimizer.LayoutOptimizer(estimator)\n  return mtf.convert_to_layout_rules(optimizer.solve())", "docstring": "Compute layout rules based on a computational graph and mesh shape.\n\nArgs:\nmtf_graph: a mtf.Graph.\nmesh_shape: an mtf.Shape, str, or listlike of mtf.Dimension.\nmtf_outputs: an optional iterable of mtf.Tensor, representing the outputs\nof the computation.\n\nReturns:\na mtf.LayoutRules", "source": "juraj-google-style"}
{"code": "def _execute_with_retries(conn, function, **kwargs):\n    r = {}\n    max_attempts = 18\n    max_retry_delay = 10\n    for attempt in range(max_attempts):\n        log.info('attempt: %s function: %s', attempt, function)\n        try:\n            fn = getattr(conn, function)\n            r['result'] = fn(**kwargs)\n            return r\n        except botocore.exceptions.ClientError as e:\n            error_code = e.response['Error']['Code']\n            if (('LimitExceededException' in error_code) or ('ResourceInUseException' in error_code)):\n                log.debug('Retrying due to AWS exception', exc_info=True)\n                time.sleep(_jittered_backoff(attempt, max_retry_delay))\n            else:\n                r['error'] = e.response['Error']\n                log.error(r['error'])\n                r['result'] = None\n                return r\n    r['error'] = 'Tried to execute function {0} {1} times, but was unable'.format(function, max_attempts)\n    log.error(r['error'])\n    return r", "docstring": "Retry if we're rate limited by AWS or blocked by another call.\nGive up and return error message if resource not found or argument is invalid.\n\nconn\nThe connection established by the calling method via _get_conn()\n\nfunction\nThe function to call on conn. i.e. create_stream\n\n**kwargs\nAny kwargs required by the above function, with their keywords\ni.e. StreamName=stream_name\n\nReturns:\nThe result dict with the HTTP response and JSON data if applicable\nas 'result', or an error as 'error'\n\nCLI example::\n\nsalt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs", "source": "codesearchnet"}
{"code": "def isValidUnit(self, w):\n        \n        bad = set(['point', 'a'])\n        if w in bad:\n            return False\n\n        try:\n            pq.Quantity(0.0, w)\n            return True\n        except:\n            return w == '/'", "docstring": "Checks if a string represents a valid quantities unit.\n\nArgs:\nw (str): A string to be tested against the set of valid\nquantities units.\n\nReturns:\nTrue if the string can be used as a unit in the quantities\nmodule.", "source": "juraj-google-style"}
{"code": "def _constrain_L2_grad(op, grad):\n    inp = op.inputs[0]\n    inp_norm = tf.norm(inp)\n    unit_inp = (inp / inp_norm)\n    grad_projection = dot(unit_inp, grad)\n    parallel_grad = (unit_inp * grad_projection)\n    is_in_ball = tf.less_equal(inp_norm, 1)\n    is_pointed_inward = tf.less(grad_projection, 0)\n    allow_grad = tf.logical_or(is_in_ball, is_pointed_inward)\n    clip_grad = tf.logical_not(allow_grad)\n    clipped_grad = tf.cond(clip_grad, (lambda : (grad - parallel_grad)), (lambda : grad))\n    return clipped_grad", "docstring": "Gradient for constrained optimization on an L2 unit ball.\n\nThis function projects the gradient onto the ball if you are on the boundary\n(or outside!), but leaves it untouched if you are inside the ball.\n\nArgs:\nop: the tensorflow op we're computing the gradient for.\ngrad: gradient we need to backprop\n\nReturns:\n(projected if necessary) gradient.", "source": "codesearchnet"}
{"code": "def retrieve_instance_links(self):\n    instance_links = {}\n    self.log.debug('LINKS IS %s', LINKS)\n    for (key, value) in LINKS.items():\n        if (value not in self.pipeline_config['instance_links'].values()):\n            instance_links[key] = value\n    return instance_links", "docstring": "Appends on existing instance links\n\nReturns:\ninstance_links: A dictionary containing all the instance links in LINKS and not in pipeline_config", "source": "codesearchnet"}
{"code": "def authenticate(self, request):\n    request = request._request\n    user = getattr(request, 'user', None)\n    if ((not user) or user.is_anonymous):\n        return None\n    self.enforce_csrf(request)\n    return (user, None)", "docstring": "Authenticate the user, requiring a logged-in account and CSRF.\n\nThis is exactly the same as the `SessionAuthentication` implementation,\nwith the `user.is_active` check removed.\n\nArgs:\nrequest (HttpRequest)\n\nReturns:\nTuple of `(user, token)`\n\nRaises:\nPermissionDenied: The CSRF token check failed.", "source": "codesearchnet"}
{"code": "def decode(self, audio_codes: torch.Tensor, audio_scales: torch.Tensor, padding_mask: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor, torch.Tensor], EncodecDecoderOutput]:\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    chunk_length = self.config.chunk_length\n    if chunk_length is None:\n        if len(audio_codes) != 1:\n            raise ValueError(f'Expected one frame, got {len(audio_codes)}')\n        audio_values = self._decode_frame(audio_codes[0], audio_scales[0])\n    else:\n        decoded_frames = []\n        for frame, scale in zip(audio_codes, audio_scales):\n            frames = self._decode_frame(frame, scale)\n            decoded_frames.append(frames)\n        audio_values = self._linear_overlap_add(decoded_frames, self.config.chunk_stride or 1)\n    if padding_mask is not None and padding_mask.shape[-1] < audio_values.shape[-1]:\n        audio_values = audio_values[..., :padding_mask.shape[-1]]\n    if not return_dict:\n        return (audio_values,)\n    return EncodecDecoderOutput(audio_values)", "docstring": "Decodes the given frames into an output audio waveform.\n\nNote that the output might be a bit bigger than the input. In that case, any extra steps at the end can be\ntrimmed.\n\nArgs:\naudio_codes (`torch.LongTensor`  of shape `(batch_size, nb_chunks, chunk_length)`, *optional*):\nDiscret code embeddings computed using `model.encode`.\naudio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*):\nScaling factor for each `audio_codes` input.\npadding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):\nPadding mask used to pad the `input_values`.\nreturn_dict (`bool`, *optional*):\nWhether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.", "source": "github-repos"}
{"code": "def to_env_vars(self):\n    env = {'hosts': self.hosts, 'network_interface_name': self.network_interface_name, 'hps': self.hyperparameters, 'user_entry_point': self.user_entry_point, 'framework_params': self.additional_framework_parameters, 'resource_config': self.resource_config, 'input_data_config': self.input_data_config, 'output_data_dir': self.output_data_dir, 'channels': sorted(self.channel_input_dirs.keys()), 'current_host': self.current_host, 'module_name': self.module_name, 'log_level': self.log_level, 'framework_module': self.framework_module, 'input_dir': self.input_dir, 'input_config_dir': self.input_config_dir, 'output_dir': self.output_dir, 'num_cpus': self.num_cpus, 'num_gpus': self.num_gpus, 'model_dir': self.model_dir, 'module_dir': self.module_dir, 'training_env': dict(self), 'user_args': self.to_cmd_args(), 'output_intermediate_dir': self.output_intermediate_dir}\n    for (name, path) in self.channel_input_dirs.items():\n        env[('channel_%s' % name)] = path\n    for (key, value) in self.hyperparameters.items():\n        env[('hp_%s' % key)] = value\n    return _mapping.to_env_vars(env)", "docstring": "Environment variable representation of the training environment\n\nReturns:\ndict: an instance of dictionary", "source": "codesearchnet"}
{"code": "def GetMap(self, map_name, since=None, location=None):\n    if map_name == config.MAP_PASSWORD:\n        return self.GetPasswdMap(since)\n    elif map_name == config.MAP_SSHKEY:\n        return self.GetSshkeyMap(since)\n    elif map_name == config.MAP_GROUP:\n        return self.GetGroupMap(since)\n    elif map_name == config.MAP_SHADOW:\n        return self.GetShadowMap(since)\n    elif map_name == config.MAP_NETGROUP:\n        return self.GetNetgroupMap(since)\n    elif map_name == config.MAP_AUTOMOUNT:\n        return self.GetAutomountMap(since, location=location)\n    raise error.UnsupportedMap('Source can not fetch %s' % map_name)", "docstring": "Get a specific map from this source.\n\nArgs:\nmap_name: A string representation of the map you want\nsince: optional timestamp for incremental query\nlocation: optional field used by automounts to indicate a specific map\n\nReturns:\nA Map child class for the map requested.\n\nRaises:\nUnsupportedMap: for unknown source maps", "source": "github-repos"}
{"code": "def recipe_manual(config, auth_read):\n    hello(config, {'auth': auth_read, 'hour': [], 'say': 'Hello Manual', 'sleep': 0})", "docstring": "Used by tests.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.", "source": "github-repos"}
{"code": "def can_acomp(cat_id):\n    \n    url = 'https:\n    auth = Auth()\n    r = _req_with_retries(auth.gbdx_connection, url)\n    try: \n        data = r.json()\n        return data['acompVersion'] is not None\n    except:\n        return False", "docstring": "Checks to see if a CatalogID can be atmos. compensated or not.\n\nArgs:\ncatalogID (str): The catalog ID from the platform catalog.\nReturns:\navailable (bool): Whether or not the image can be acomp'd", "source": "juraj-google-style"}
{"code": "def appliance_node_information(self):\n    if (not self.__appliance_node_information):\n        self.__appliance_node_information = ApplianceNodeInformation(self.__connection)\n    return self.__appliance_node_information", "docstring": "Gets the ApplianceNodeInformation API client.\n\nReturns:\nApplianceNodeInformation:", "source": "codesearchnet"}
{"code": "def _make_headers(self, method, path, query={}, headers={}):\n        \n\n        date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')\n        nonce = self._make_nonce()\n        ctype = headers.get('Content-Type') if headers.get('Content-Type') else 'application/json'\n\n        auth = self._make_auth(method, date, nonce, path, query=query, ctype=ctype)\n\n        req_headers = {\n            'Content-Type': 'application/json',\n            'Date': date,\n            'On-Nonce': nonce,\n            'Authorization': auth,\n            'User-Agent': 'Onshape Python Sample App',\n            'Accept': 'application/json'\n        }\n\n        \n        for h in headers:\n            req_headers[h] = headers[h]\n\n        return req_headers", "docstring": "Creates a headers object to sign the request\n\nArgs:\n- method (str): HTTP method\n- path (str): Request path, e.g. /api/documents. No query string\n- query (dict, default={}): Query string in key-value format\n- headers (dict, default={}): Other headers to pass in\n\nReturns:\n- dict: Dictionary containing all headers", "source": "juraj-google-style"}
{"code": "def _CanProcessKeyWithPlugin(self, registry_key, plugin):\n    for registry_key_filter in plugin.FILTERS:\n        if getattr(registry_key_filter, 'key_paths', []):\n            continue\n        if registry_key_filter.Match(registry_key):\n            return True\n    return False", "docstring": "Determines if a plugin can process a Windows Registry key or its values.\n\nArgs:\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\nplugin (WindowsRegistryPlugin): Windows Registry plugin.\n\nReturns:\nbool: True if the Registry key can be processed with the plugin.", "source": "codesearchnet"}
{"code": "def _acquire_given_subnet(self, uuid_path, subnet):\n        \n        lease = self.create_lease_object_from_subnet(subnet)\n        self._take_lease(lease, uuid_path)\n\n        return lease.to_ip_network()", "docstring": "Try to create a lease for subnet\n\nArgs:\nuuid_path (str): Path to the uuid file of a :class:`lago.Prefix`\nsubnet (str): dotted ipv4 subnet\n(for example ```192.168.200.0```)\n\nReturns:\nnetaddr.IPNetwork: Which represents the selected subnet\n\nRaises:\nLagoSubnetLeaseException: If the requested subnet is not in the\nrange of this store or its already been taken", "source": "juraj-google-style"}
{"code": "def __mod__(self, other):\n    other = as_dimension(other)\n    if self._value is None or other.value is None:\n        return Dimension(None)\n    else:\n        return Dimension(self._value % other.value)", "docstring": "Returns `self` modulo `other`.\n\nDimension modulo are computed as follows:\n\n```python\ntf.compat.v1.Dimension(m)    % tf.compat.v1.Dimension(n)     ==\ntf.compat.v1.Dimension(m % n)\ntf.compat.v1.Dimension(m)    % tf.compat.v1.Dimension(None)  # equiv. to\ntf.compat.v1.Dimension(None)\ntf.compat.v1.Dimension(None) % tf.compat.v1.Dimension(n)     # equiv. to\ntf.compat.v1.Dimension(None)\ntf.compat.v1.Dimension(None) % tf.compat.v1.Dimension(None)  # equiv. to\ntf.compat.v1.Dimension(None)\n```\n\nArgs:\nother: Another Dimension, or a value accepted by `as_dimension`.\n\nReturns:\nA Dimension whose value is `self` modulo `other`.", "source": "github-repos"}
{"code": "def AnalyzeEvents(self):\n    session = engine.BaseEngine.CreateSession(command_line_arguments=self._command_line_arguments, preferred_encoding=self.preferred_encoding)\n    storage_reader = storage_factory.StorageFactory.CreateStorageReaderForFile(self._storage_file_path)\n    if (not storage_reader):\n        logger.error('Format of storage file: {0:s} not supported'.format(self._storage_file_path))\n        return\n    self._number_of_analysis_reports = storage_reader.GetNumberOfAnalysisReports()\n    storage_reader.Close()\n    configuration = self._CreateProcessingConfiguration(self._knowledge_base)\n    counter = collections.Counter()\n    if (self._output_format != 'null'):\n        self._status_view.SetMode(self._status_view_mode)\n        self._status_view.SetStorageFileInformation(self._storage_file_path)\n        status_update_callback = self._status_view.GetAnalysisStatusUpdateCallback()\n        storage_reader = storage_factory.StorageFactory.CreateStorageReaderForFile(self._storage_file_path)\n        analysis_engine = psort.PsortMultiProcessEngine(use_zeromq=self._use_zeromq)\n        analysis_engine.ExportEvents(self._knowledge_base, storage_reader, self._output_module, configuration, deduplicate_events=self._deduplicate_events, status_update_callback=status_update_callback, time_slice=self._time_slice, use_time_slicer=self._use_time_slicer)\n    for (item, value) in iter(session.analysis_reports_counter.items()):\n        counter[item] = value\n    if self._quiet_mode:\n        return\n    self._output_writer.Write('Processing completed.\\n')\n    table_view = views.ViewsFactory.GetTableView(self._views_format_type, title='Counter')\n    for (element, count) in counter.most_common():\n        if (not element):\n            element = 'N/A'\n        table_view.AddRow([element, count])\n    table_view.Write(self._output_writer)\n    storage_reader = storage_factory.StorageFactory.CreateStorageReaderForFile(self._storage_file_path)\n    self._PrintAnalysisReportsDetails(storage_reader, self._number_of_analysis_reports)\n    self._output_writer.Write('Storage file is {0:s}\\n'.format(self._storage_file_path))", "docstring": "Analyzes events from a plaso storage file and generate a report.\n\nRaises:\nBadConfigOption: when a configuration parameter fails validation.\nRuntimeError: if a non-recoverable situation is encountered.", "source": "codesearchnet"}
{"code": "def most_uncertain_by_mask(self, mask, y):\n    idxs = np.where(mask)[0]\n    return idxs[np.argsort(np.abs((self.probs[(idxs, y)] - (1 / self.num_classes))))[:4]]", "docstring": "Extracts the first 4 most uncertain indexes from the ordered list of probabilities\n\nArguments:\nmask (numpy.ndarray): the mask of probabilities specific to the selected class; a boolean array with shape (num_of_samples,) which contains True where class==selected_class, and False everywhere else\ny (int): the selected class\n\nReturns:\nidxs (ndarray): An array of indexes of length 4", "source": "codesearchnet"}
{"code": "def create_state(self, state_manager):\n    pass", "docstring": "Uses the `state_manager` to create state for the FeatureColumn.\n\nArgs:\nstate_manager: A `StateManager` to create / access resources such as\nlookup tables and variables.", "source": "github-repos"}
{"code": "def build_and_pickle_dump(self, abivalidate=False):\n    self.build()\n    if (not abivalidate):\n        return self.pickle_dump()\n    (isok, errors) = self.abivalidate_inputs()\n    if isok:\n        return self.pickle_dump()\n    errlines = []\n    for (i, e) in enumerate(errors):\n        errlines.append(('[%d] %s' % (i, e)))\n    raise ValueError('\\n'.join(errlines))", "docstring": "Build dirs and file of the `Flow` and save the object in pickle format.\nReturns 0 if success\n\nArgs:\nabivalidate: If True, all the input files are validate by calling\nthe abinit parser. If the validation fails, ValueError is raise.", "source": "codesearchnet"}
{"code": "def to_matrix(xx, yy, zz, xy, yz, xz):\n    matrix = np.array([[xx, xy, xz], [xy, yy, yz], [xz, yz, zz]])\n    return matrix", "docstring": "Convert a list of matrix components to a symmetric 3x3 matrix.\nInputs should be in the order xx, yy, zz, xy, yz, xz.\n\nArgs:\nxx (float): xx component of the matrix.\nyy (float): yy component of the matrix.\nzz (float): zz component of the matrix.\nxy (float): xy component of the matrix.\nyz (float): yz component of the matrix.\nxz (float): xz component of the matrix.\n\nReturns:\n(np.array): The matrix, as a 3x3 numpy array.", "source": "codesearchnet"}
{"code": "def dynamics(start, end=None):\n\n    def _(sequence):\n        if (start in _dynamic_markers_to_velocity):\n            start_velocity = _dynamic_markers_to_velocity[start]\n            start_marker = start\n        else:\n            raise ValueError(('Unknown start dynamic: %s, must be in %s' % (start, _dynamic_markers_to_velocity.keys())))\n        if (end is None):\n            end_velocity = start_velocity\n            end_marker = start_marker\n        elif (end in _dynamic_markers_to_velocity):\n            end_velocity = _dynamic_markers_to_velocity[end]\n            end_marker = end\n        else:\n            raise ValueError(('Unknown end dynamic: %s, must be in %s' % (start, _dynamic_markers_to_velocity.keys())))\n        retval = sequence.__class__([Point(point) for point in sequence._elements])\n        velocity_interval = (((float(end_velocity) - float(start_velocity)) / (len(retval) - 1)) if (len(retval) > 1) else 0)\n        velocities = [int((start_velocity + (velocity_interval * pos))) for pos in range(len(retval))]\n        if (start_velocity > end_velocity):\n            retval[0]['dynamic'] = 'diminuendo'\n            retval[(- 1)]['dynamic'] = end_marker\n        elif (start_velocity < end_velocity):\n            retval[0]['dynamic'] = 'crescendo'\n            retval[(- 1)]['dynamic'] = end_marker\n        else:\n            retval[0]['dynamic'] = start_marker\n        for (point, velocity) in zip(retval, velocities):\n            point['velocity'] = velocity\n        return retval\n    return _", "docstring": "Apply dynamics to a sequence. If end is specified, it will crescendo or diminuendo linearly from start to end dynamics.\n\nYou can pass any of these strings as dynamic markers: ['pppppp', 'ppppp', 'pppp', 'ppp', 'pp', 'p', 'mp', 'mf', 'f', 'ff', 'fff', ''ffff]\n\nArgs:\nstart: beginning dynamic marker, if no end is specified all notes will get this marker\nend: ending dynamic marker, if unspecified the entire sequence will get the start dynamic marker\n\nExample usage:\n\ns1 | dynamics('p')  # play a sequence in piano\ns2 | dynamics('p', 'ff')  # crescendo from p to ff\ns3 | dynamics('ff', 'p')  # diminuendo from ff to p", "source": "codesearchnet"}
{"code": "def permutation_matrix(permutation):\n    assert check_permutation(permutation)\n    n = len(permutation)\n    op_matrix = np_zeros((n, n), dtype=int)\n    for (i, j) in enumerate(permutation):\n        op_matrix[(j, i)] = 1\n    return Matrix(op_matrix)", "docstring": "r\"\"\"Return orthogonal permutation matrix for permutation tuple\n\nReturn an orthogonal permutation matrix :math:`M_\\sigma`\nfor a permutation :math:`\\sigma` defined by the image tuple\n:math:`(\\sigma(1), \\sigma(2),\\dots \\sigma(n))`,\nsuch that\n\n.. math::\n\nM_\\sigma \\vec{e}_i = \\vec{e}_{\\sigma(i)}\n\nwhere :math:`\\vec{e}_k` is the k-th standard basis vector.\nThis definition ensures a composition law:\n\n.. math::\n\nM_{\\sigma \\cdot \\tau} = M_\\sigma M_\\tau.\n\nThe column form of :math:`M_\\sigma` is thus given by\n\n.. math::\n\nM = (\n\\vec{e}_{\\sigma(1)},\n\\vec{e}_{\\sigma(2)},\n\\dots \\vec{e}_{\\sigma(n)}).\n\nArgs:\npermutation (tuple): A permutation image tuple (zero-based indices!)", "source": "codesearchnet"}
{"code": "def _get_value_from_match(self, key, match):\n    value = match.groups(1)[0]\n    clean_value = str(value).lstrip().rstrip()\n    if (clean_value == 'true'):\n        self._log.info('Got value of \"%s\" as boolean true.', key)\n        return True\n    if (clean_value == 'false'):\n        self._log.info('Got value of \"%s\" as boolean false.', key)\n        return False\n    try:\n        float_value = float(clean_value)\n        self._log.info('Got value of \"%s\" as float \"%f\".', key, float_value)\n        return float_value\n    except ValueError:\n        self._log.info('Got value of \"%s\" as string \"%s\".', key, clean_value)\n        return clean_value", "docstring": "Gets the value of the property in the given MatchObject.\n\nArgs:\nkey (str):           Key of the property looked-up.\nmatch (MatchObject): The matched property.\n\nReturn:\nThe discovered value, as a string or boolean.", "source": "codesearchnet"}
{"code": "def get_key(key, data_structure):\n    if (key == '/'):\n        return data_structure\n    path = key.split('/')\n    (path[0] or path.pop(0))\n    current_value = data_structure\n    while path:\n        current_key = path.pop(0)\n        try:\n            current_key = int(current_key)\n        except ValueError:\n            pass\n        try:\n            current_value = current_value[current_key]\n        except (KeyError, IndexError):\n            LOGGER.debug('failed to extract path {}'.format(key))\n            return None\n    return current_value", "docstring": "Helper method for extracting values from a nested data structure.\n\nArgs:\nkey (str): The path to the vales (a series of keys and indexes\nseparated by '/')\ndata_structure (dict or list): The data structure from which the\nvalue will be extracted.\n\nReturns:\nstr: The values associated with key", "source": "codesearchnet"}
{"code": "def FromId(architecture_id, error_on_unknown=True):\n    if not architecture_id:\n        return None\n    for arch in Architecture._ALL:\n        if arch.id == architecture_id:\n            return arch\n    if error_on_unknown:\n        raise InvalidEnumValue(architecture_id, 'Architecture', [value.id for value in Architecture._ALL])\n    return None", "docstring": "Gets the enum corresponding to the given architecture id.\n\nArgs:\narchitecture_id: str, The architecture id to parse\nerror_on_unknown: bool, True to raise an exception if the id is unknown,\nFalse to just return None.\n\nRaises:\nInvalidEnumValue: If the given value cannot be parsed.\n\nReturns:\nArchitectureTuple, One of the Architecture constants or None if the input\nis None.", "source": "github-repos"}
{"code": "def register_with_password(self, username, password):\n        \n        response = self.api.register(\n                auth_body={\"type\": \"m.login.dummy\"},\n                kind='user',\n                username=username,\n                password=password,\n        )\n        return self._post_registration(response)", "docstring": "Register for a new account on this HS.\n\nArgs:\nusername (str): Account username\npassword (str): Account password\n\nReturns:\nstr: Access Token\n\nRaises:\nMatrixRequestError", "source": "juraj-google-style"}
{"code": "def partial_derivative_mu(mu, sigma, low, high, data):\n    pd_mu = (np.sum((data - mu)) / (sigma ** 2))\n    pd_mu -= (len(data) * ((norm.pdf(low, mu, sigma) - norm.pdf(high, mu, sigma)) / (norm.cdf(high, mu, sigma) - norm.cdf(low, mu, sigma))))\n    return (- pd_mu)", "docstring": "The partial derivative with respect to the mean.\n\nArgs:\nmu (float): the mean of the truncated normal\nsigma (float): the std of the truncated normal\nlow (float): the lower truncation bound\nhigh (float): the upper truncation bound\ndata (ndarray): the one dimension list of data points for which we want to calculate the likelihood\n\nReturns:\nfloat: the partial derivative evaluated at the given point", "source": "codesearchnet"}
{"code": "def increment(self, size: int):\n        \n        assert size >= 0, size\n\n        self.files += 1\n        self.size += size\n        self.bandwidth_meter.feed(size)", "docstring": "Increment the number of files downloaded.\n\nArgs:\nsize: The size of the file", "source": "juraj-google-style"}
{"code": "def experimental_run_functions_eagerly(run_eagerly):\n    return run_functions_eagerly(run_eagerly)", "docstring": "Enables / disables eager execution of `tf.function`s.\n\nCalling `tf.config.experimental_run_functions_eagerly(True)` will make all\ninvocations of `tf.function` run eagerly instead of running as a traced graph\nfunction.\n\nSee `tf.config.run_functions_eagerly` for an example.\n\nNote: This flag has no effect on functions passed into tf.data transformations\nas arguments. tf.data functions are never executed eagerly and are always\nexecuted as a compiled Tensorflow Graph.\n\nArgs:\nrun_eagerly: Boolean. Whether to run functions eagerly.\n\nReturns:\nNone", "source": "github-repos"}
{"code": "def find_divisors(n):\n    if (not isinstance(n, int)):\n        raise TypeError('Expecting a strictly positive integer')\n    if (n <= 0):\n        raise ValueError('Expecting a strictly positive integer')\n    for i in range(1, (int((n ** 0.5)) + 1)):\n        if ((n % i) == 0):\n            divisors = {i, (n \n            for divisor in divisors:\n                (yield divisor)", "docstring": "Find all the positive divisors of the given integer n.\n\nArgs:\nn (int): strictly positive integer\n\nReturns:\nA generator of all the positive divisors of n\n\nRaises:\nTypeError: if n is not an integer\nValueError: if n is negative", "source": "codesearchnet"}
{"code": "def _extend_op(values, leaf_op, empty_st_op=None):\n    if not isinstance(values, Sequence):\n        raise ValueError('Expected a list')\n    if not values:\n        raise ValueError('List cannot be empty')\n    if empty_st_op is None:\n        empty_st_op = empty_st_op_like_zeros(leaf_op)\n    value = values[0]\n    if isinstance(value, StructuredTensor):\n        empty_result = empty_st_op(values)\n        if not value.field_names():\n            return empty_result\n        new_fields = {}\n        for k in value.field_names():\n            new_fields[k] = _extend_op([v.field_value(k) for v in values], leaf_op, empty_st_op)\n        return StructuredTensor.from_fields(new_fields, shape=empty_result.shape)\n    else:\n        return leaf_op(values)", "docstring": "Extend an op from RaggedTensor and Tensor to StructuredTensor.\n\nVisits all children of the structured tensor, and children of children,\napplying leaf_op whenever it reaches a leaf, and empty_st_op whenever\nit reaches an internal node without children.\n\nArgs:\nvalues: a list of structured tensors, ragged tensors, or tensors. All must\nhave the same type. If they are structured tensors, they must have the\nsame paths.\nleaf_op: an op for handling non-structured tensor.\nempty_st_op: op to create a structured tensor without fields.\n\nReturns:\nthe result of the extended op (a StructuredTensor, RaggedTensor, or Tensor)\n\nRaises:\nValueError:\nIf values is not a Sequence or is empty.", "source": "github-repos"}
{"code": "def read_classification_results(storage_client, file_path):\n    if storage_client:\n        success = False\n        retry_count = 0\n        while (retry_count < 4):\n            try:\n                blob = storage_client.get_blob(file_path)\n                if (not blob):\n                    return {}\n                if (blob.size > MAX_ALLOWED_CLASSIFICATION_RESULT_SIZE):\n                    logging.warning('Skipping classification result because its too big: %d bytes for %s', blob.size, file_path)\n                    return None\n                buf = BytesIO()\n                blob.download_to_file(buf)\n                buf.seek(0)\n                success = True\n                break\n            except Exception:\n                retry_count += 1\n                time.sleep(5)\n        if (not success):\n            return None\n    else:\n        try:\n            with open(file_path, 'rb') as f:\n                buf = BytesIO(f.read())\n        except IOError:\n            return None\n    result = {}\n    if PY3:\n        buf = StringIO(buf.read().decode('UTF-8'))\n    for row in csv.reader(buf):\n        try:\n            image_filename = row[0]\n            if (image_filename.endswith('.png') or image_filename.endswith('.jpg')):\n                image_filename = image_filename[:image_filename.rfind('.')]\n            label = int(row[1])\n        except (IndexError, ValueError):\n            continue\n        result[image_filename] = label\n    return result", "docstring": "Reads classification results from the file in Cloud Storage.\n\nThis method reads file with classification results produced by running\ndefense on singe batch of adversarial images.\n\nArgs:\nstorage_client: instance of CompetitionStorageClient or None for local file\nfile_path: path of the file with results\n\nReturns:\ndictionary where keys are image names or IDs and values are classification\nlabels", "source": "codesearchnet"}
{"code": "def load_mutation_rates(path=None):\n    \n    \n    if path is None:\n        path = resource_filename(__name__, \"data/rates.txt\")\n    \n    rates = []\n    with open(path) as handle:\n        for line in handle:\n            if line.startswith(\"from\"): \n                continue\n            \n            line = [ x.encode('utf8') for x in line.strip().split() ]\n            rates.append(line)\n    \n    return rates", "docstring": "load sequence context-based mutation rates\n\nArgs:\npath: path to table of sequence context-based mutation rates. If None,\nthis defaults to per-trinucleotide rates provided by Kaitlin Samocha\n(Broad Institute).\n\nReturns:\nlist of [initial, changed, rate] lists e.g. [['AGA', 'ATA', '5e-8']]", "source": "juraj-google-style"}
{"code": "def set_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS):\n    if not server_def:\n        raise ValueError('server_def is None.')\n    self._server_def = server_def\n    if self._context_handle:\n        server_def_str = server_def.SerializeToString()\n        pywrap_tfe.TFE_ContextSetServerDef(self._context_handle, keep_alive_secs, server_def_str)\n        self._initialize_logical_devices()\n    self._clear_caches()\n    _device_parsing_cache.clear()", "docstring": "Allow setting a server_def on the context.\n\nWhen a server def is replaced, it effectively clears a bunch of caches\nwithin the context. If you attempt to use a tensor object that was pointing\nto a tensor on the remote device, it will raise an error.\n\nArgs:\nserver_def: A tensorflow::ServerDef proto. Enables execution on remote\ndevices.\nkeep_alive_secs: Num. seconds after which the remote end will hang up. As\nlong as the client is still alive, the server state for the context will\nbe kept alive. If the client is killed (or there is some failure), the\nserver will clean up its context keep_alive_secs after the final RPC it\nreceives.\n\nRaises:\nValueError: if server_def is None.", "source": "github-repos"}
{"code": "def find(self, *index):\n    assert (self.wrapFunction is not None)\n    if ((len(index) == 1) and isinstance(index[0], (tuple, list))):\n        index = index[0]\n    it = self._impl.find(Tuple(index)._impl)\n    if (it == self._impl.end()):\n        return None\n    else:\n        return self.wrapFunction(it)", "docstring": "Searches the current entity for an instance with the specified index.\n\nReturns:\nThe wanted instance if found, otherwise it returns `None`.", "source": "codesearchnet"}
{"code": "def recursive_import(root):\n    for _, name, _ in pkgutil.walk_packages(root.__path__, prefix=root.__name__ + '.'):\n        try:\n            importlib.import_module(name)\n        except (AttributeError, ImportError):\n            pass", "docstring": "Recursively imports all the sub-modules under a root package.\n\nArgs:\nroot: A python package.", "source": "github-repos"}
{"code": "def FromFile(cls, in_path):\n        \n\n        with open(in_path, \"rb\") as infile:\n            in_data = json.load(infile)\n\n        if not ('trace', 'selectors') in in_data:\n            raise ArgumentError(\"Invalid trace file format\", keys=in_data.keys(), expected=('trace', 'selectors'))\n\n        selectors = [DataStreamSelector.FromString(x) for x in in_data['selectors']]\n        readings = [IOTileReading(x['time'], DataStream.FromString(x['stream']).encode(), x['value'], reading_id=x['reading_id']) for x in in_data['trace']]\n\n        return SimulationTrace(readings, selectors=selectors)", "docstring": "Load a previously saved ascii representation of this simulation trace.\n\nArgs:\nin_path (str): The path of the input file that we should load.\n\nReturns:\nSimulationTrace: The loaded trace object.", "source": "juraj-google-style"}
{"code": "def get_replicas(self, service_id: str) -> str:\n    replicas = []\n    if (not self._manager):\n        raise RuntimeError('Only the Swarm manager node can retrieve replication level of the service')\n    service_tasks = self._client.services.get(service_id).tasks()\n    for task in service_tasks:\n        if (task['Status']['State'] == 'running'):\n            replicas.append(task)\n    return len(replicas)", "docstring": "Get the replication level of a service.\n\nArgs:\nservice_id (str): docker swarm service id\n\nReturns:\nstr, replication level of the service", "source": "codesearchnet"}
{"code": "def traverse_postorder(self, leaves=True, internal=True):\n    s1 = deque()\n    s2 = deque()\n    s1.append(self)\n    while (len(s1) != 0):\n        n = s1.pop()\n        s2.append(n)\n        s1.extend(n.children)\n    while (len(s2) != 0):\n        n = s2.pop()\n        if ((leaves and n.is_leaf()) or (internal and (not n.is_leaf()))):\n            (yield n)", "docstring": "Perform a postorder traversal starting at this ``Node`` object\n\nArgs:\n``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``", "source": "codesearchnet"}
{"code": "def AddOption(self, descriptor, constant=False):\n    if self.initialized:\n        raise AlreadyInitializedError(('Config was already initialized when defining %s' % descriptor.name))\n    descriptor.section = descriptor.name.split('.')[0]\n    if (descriptor.name in self.type_infos):\n        logging.warning('Config Option %s multiply defined!', descriptor.name)\n    self.type_infos.Append(descriptor)\n    if constant:\n        self.constants.add(descriptor.name)\n    self.defaults[descriptor.name] = descriptor.GetDefault()\n    self.FlushCache()", "docstring": "Registers an option with the configuration system.\n\nArgs:\ndescriptor: A TypeInfoObject instance describing the option.\nconstant: If this is set, the option is treated as a constant - it can be\nread at any time (before parsing the configuration) and it's an error to\ntry to override it in a config file.\n\nRaises:\nRuntimeError: The descriptor's name must contain a . to denote the section\nname, otherwise we raise.\nAlreadyInitializedError: If the config has already been read it's too late\nto define new options.", "source": "codesearchnet"}
{"code": "def get_all_aminames(i_info):\n    for i in i_info:\n        try:\n            i_info[i]['aminame'] = EC2R.Image(i_info[i]['ami']).name\n        except AttributeError:\n            i_info[i]['aminame'] = 'Unknown'\n    return i_info", "docstring": "Get Image_Name for each instance in i_info.\n\nArgs:\ni_info (dict): information on instances and details.\nReturns:\ni_info (dict): i_info is returned with the aminame\nadded for each instance.", "source": "codesearchnet"}
{"code": "def _convert_to_tensors_or_sparse_tensors(a, b):\n    a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name='a')\n    if a.dtype.base_dtype not in _VALID_DTYPES:\n        raise TypeError(f\"'a' has invalid dtype `{a.dtype}` not in supported dtypes: `{_VALID_DTYPES}`.\")\n    b = sparse_tensor.convert_to_tensor_or_sparse_tensor(b, name='b')\n    if b.dtype.base_dtype != a.dtype.base_dtype:\n        raise TypeError(\"Types don't match, %s vs %s.\" % (a.dtype, b.dtype))\n    if isinstance(a, sparse_tensor.SparseTensor) and (not isinstance(b, sparse_tensor.SparseTensor)):\n        return (b, a, True)\n    return (a, b, False)", "docstring": "Convert to tensor types, and flip order if necessary.\n\nArgs:\na: `Tensor` or `SparseTensor` of the same type as `b`.\nb: `Tensor` or `SparseTensor` of the same type as `a`.\n\nReturns:\nTuple of `(a, b, flipped)`, where `a` and `b` have been converted to\n`Tensor` or `SparseTensor`, and `flipped` indicates whether the order has\nbeen flipped to make it dense,sparse instead of sparse,dense (since the set\nops do not support the latter).", "source": "github-repos"}
{"code": "def verify_oauth2_token(id_token, request, audience=None):\n    \n    return verify_token(\n        id_token, request, audience=audience,\n        certs_url=_GOOGLE_OAUTH2_CERTS_URL)", "docstring": "Verifies an ID Token issued by Google's OAuth 2.0 authorization server.\n\nArgs:\nid_token (Union[str, bytes]): The encoded token.\nrequest (google.auth.transport.Request): The object used to make\nHTTP requests.\naudience (str): The audience that this token is intended for. This is\ntypically your application's OAuth 2.0 client ID. If None then the\naudience is not verified.\n\nReturns:\nMapping[str, Any]: The decoded token.", "source": "juraj-google-style"}
{"code": "def sign(check_request):\n    \n    if not isinstance(check_request, sc_messages.CheckRequest):\n        raise ValueError(u'Invalid request')\n    op = check_request.operation\n    if op is None or op.operationName is None or op.consumerId is None:\n        logging.error(u'Bad %s: not initialized => not signed', check_request)\n        raise ValueError(u'check request must be initialized with an operation')\n    md5 = hashlib.md5()\n    md5.update(op.operationName.encode('utf-8'))\n    md5.update(b'\\x00')\n    md5.update(op.consumerId.encode('utf-8'))\n    if op.labels:\n        signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels))\n    for value_set in op.metricValueSets:\n        md5.update(b'\\x00')\n        md5.update(value_set.metricName.encode('utf-8'))\n        for mv in value_set.metricValues:\n            metric_value.update_hash(md5, mv)\n\n    md5.update(b'\\x00')\n    if op.quotaProperties:\n        \n        \n        \n        md5.update(repr(op.quotaProperties).encode('utf-8'))\n\n    md5.update(b'\\x00')\n    return md5.digest()", "docstring": "Obtains a signature for an operation in a `CheckRequest`\n\nArgs:\nop (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an\noperation used in a `CheckRequest`\n\nReturns:\nstring: a secure hash generated from the operation", "source": "juraj-google-style"}
{"code": "def _ListDir(dirpath, pathtype):\n  \n  pathspec = rdf_paths.PathSpec(path=dirpath, pathtype=pathtype)\n  childpaths = []\n  try:\n    file_obj = vfs.VFSOpen(pathspec)\n    for path in file_obj.ListNames():\n      \n      \n      \n      \n      if pathtype != rdf_paths.PathSpec.PathType.REGISTRY or path:\n        childpaths.append(path)\n  except IOError:\n    pass\n\n  return childpaths", "docstring": "Returns children of a given directory.\n\nThis function is intended to be used by the `PathComponent` subclasses to get\ninitial list of potential children that then need to be filtered according to\nthe rules of a specific component.\n\nArgs:\ndirpath: A path to the directory.\npathtype: The pathtype to use.\n\nRaises:\nValueError: in case of unsupported path types.", "source": "juraj-google-style"}
{"code": "def execute_add(args, root_dir=None):\n    \n\n    \n    \n    command = ' '.join(args['command'])\n\n    \n    instruction = {\n        'command': command,\n        'path': os.getcwd()\n    }\n    print_command_factory('add')(instruction, root_dir)", "docstring": "Add a new command to the daemon queue.\n\nArgs:\nargs['command'] (list(str)): The actual programm call. Something like ['ls', '-a'] or ['ls -al']\nroot_dir (string): The path to the root directory the daemon is running in.", "source": "juraj-google-style"}
{"code": "def _get_stringlist_from_commastring(self, field):\n        \n        \n        strings = self.data.get(field)\n        if strings:\n            return strings.split(',')\n        else:\n            return list()", "docstring": "Return list of strings from comma separated list\n\nArgs:\nfield (str): Field containing comma separated list\n\nReturns:\nList[str]: List of strings", "source": "juraj-google-style"}
{"code": "def _get_class_frame_source(class_name):\n    \n    \n    for frame_info in inspect.stack():\n        try:\n            with open(frame_info[1]) as fp:\n                src = \"\".join(fp.readlines()[frame_info[2] - 1 :])\n        except IOError:\n            continue\n        if re.search(r\"\\bclass\\b\\s+\\b{}\\b\".format(class_name), src):\n            reader = six.StringIO(src).readline\n            tokens = tokenize.generate_tokens(reader)\n            source_tokens = []\n            indent_level = 0\n            base_indent_level = 0\n            has_base_level = False\n            for token, value, _, _, _ in tokens:  \n                source_tokens.append((token, value))\n                if token == tokenize.INDENT:\n                    indent_level += 1\n                elif token == tokenize.DEDENT:\n                    indent_level -= 1\n                    if has_base_level and indent_level <= base_indent_level:\n                        return (\n                            tokenize.untokenize(source_tokens),\n                            frame_info[0].f_globals,\n                            frame_info[0].f_locals,\n                        )\n                elif not has_base_level:\n                    has_base_level = True\n                    base_indent_level = indent_level\n    raise TypeError(\n        'Unable to retrieve source for class \"{}\"'.format(class_name)\n    )", "docstring": "Return the source code for a class by checking the frame stack.\n\nThis is necessary because it is not possible to get the source of a class\nbeing created by a metaclass directly.\n\nArgs:\nclass_name: The class to look for on the stack.\n\nReturns:\nThe source code for the requested class if the class was found and the\nsource was accessible.", "source": "juraj-google-style"}
{"code": "def _lookup_dependency(self, name, cached_dependencies=None):\n    if cached_dependencies:\n        return cached_dependencies.get(name)\n    return self._self_unconditional_dependency_names.get(name)", "docstring": "Look up a dependency by name.\n\nMay be overridden to include conditional dependencies.\n\nArgs:\nname: The local name of the dependency.\ncached_dependencies: Optional dict containing all computed dependencies\nreturned by `self._trackable_children()`.\n\nReturns:\nA `Trackable` object, or `None` if no dependency by this name was\nfound.", "source": "github-repos"}
{"code": "def _add_consequences(self, variant_obj, raw_variant_line):\n        \n        consequences = []\n        for consequence in SO_TERMS:\n            if consequence in raw_variant_line:\n                consequences.append(consequence)\n        \n        variant_obj.consequences = consequences", "docstring": "Add the consequences found for a variant\n\nArgs:\nvariant_obj (puzzle.models.Variant)\nraw_variant_line (str): A raw vcf variant line", "source": "juraj-google-style"}
{"code": "async def _on_report_notification(self, event):\n    conn_string = event.get('connection_string')\n    report = self._report_parser.deserialize_report(event.get('serialized_report'))\n    self.notify_event(conn_string, 'report', report)", "docstring": "Callback function called when a report event is received.\n\nArgs:\nevent (dict): The report_event", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n        \n        self.ComputeThreatListDiff = channel.unary_unary(\n            \"/google.cloud.webrisk.v1beta1.WebRiskServiceV1Beta1/ComputeThreatListDiff\",\n            request_serializer=google_dot_cloud_dot_webrisk__v1beta1_dot_proto_dot_webrisk__pb2.ComputeThreatListDiffRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_webrisk__v1beta1_dot_proto_dot_webrisk__pb2.ComputeThreatListDiffResponse.FromString,\n        )\n        self.SearchUris = channel.unary_unary(\n            \"/google.cloud.webrisk.v1beta1.WebRiskServiceV1Beta1/SearchUris\",\n            request_serializer=google_dot_cloud_dot_webrisk__v1beta1_dot_proto_dot_webrisk__pb2.SearchUrisRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_webrisk__v1beta1_dot_proto_dot_webrisk__pb2.SearchUrisResponse.FromString,\n        )\n        self.SearchHashes = channel.unary_unary(\n            \"/google.cloud.webrisk.v1beta1.WebRiskServiceV1Beta1/SearchHashes\",\n            request_serializer=google_dot_cloud_dot_webrisk__v1beta1_dot_proto_dot_webrisk__pb2.SearchHashesRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_webrisk__v1beta1_dot_proto_dot_webrisk__pb2.SearchHashesResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def _load_third_party_packages(self):\n    modules = collections.defaultdict(set)\n    stubs = set()\n    for third_party_file in self._store.list_files('stubs'):\n        parts = third_party_file.split(path_utils.sep)\n        filename = parts[-1]\n        if filename == 'METADATA.toml' or parts[1] == '@tests':\n            continue\n        if filename.endswith('.pyi'):\n            stubs.add(parts[0])\n        name, _ = path_utils.splitext(parts[1])\n        modules[parts[0]].add(name)\n    packages = collections.defaultdict(set)\n    for package, names in modules.items():\n        for name in names:\n            if package in stubs:\n                packages[name].add(package)\n    return packages", "docstring": "Loads package and Python version information for typeshed/stubs/.\n\nstubs/ contains type information for third-party packages. Each top-level\ndirectory corresponds to one PyPI package and contains one or more modules,\nplus a metadata file (METADATA.toml). The top-level directory may contain a\n@tests subdirectory for typeshed testing.\n\nReturns:\nA mapping from module name to a set of package names.", "source": "github-repos"}
{"code": "def predict_array(self, arr):\n    precompute = self.precompute\n    self.precompute = False\n    pred = super().predict_array(arr)\n    self.precompute = precompute\n    return pred", "docstring": "This over-ride is necessary because otherwise the learner method accesses the wrong model when it is called\nwith precompute set to true\n\nArgs:\narr: a numpy array to be used as input to the model for prediction purposes\nReturns:\na numpy array containing the predictions from the model", "source": "codesearchnet"}
{"code": "def get_ordered_names(self, features):\n        \n\n        idxs = np.where(\n            np.in1d(self.data.columns.values, np.array(features)))[0]\n        return list(self.data.columns[idxs].values)", "docstring": "Given a list of features, returns features in order that they\nappear in database.\n\nArgs:\nfeatures (list): A list or 1D numpy array of named features to\nreturn.\n\nReturns:\nA list of features in order they appear in database.", "source": "juraj-google-style"}
{"code": "def get_covalent_bonds(self, tol=0.2):\n        \n        bonds = []\n        for site1, site2 in itertools.combinations(self._sites, 2):\n            if CovalentBond.is_bonded(site1, site2, tol):\n                bonds.append(CovalentBond(site1, site2))\n        return bonds", "docstring": "Determines the covalent bonds in a molecule.\n\nArgs:\ntol (float): The tol to determine bonds in a structure. See\nCovalentBond.is_bonded.\n\nReturns:\nList of bonds", "source": "juraj-google-style"}
{"code": "def initialize_means(data, clusters, k):\n    init_w = np.zeros((data.shape[0], k))\n    if sparse.issparse(data):\n        for i in range(k):\n            if (data[(:, (clusters == i))].shape[1] == 0):\n                point = np.random.randint(0, data.shape[1])\n                init_w[(:, i)] = data[(:, point)].toarray().flatten()\n            else:\n                init_w[(:, i)] = (np.array(data[(:, (clusters == i))].mean(1)).flatten() + eps)\n    else:\n        for i in range(k):\n            if (data[(:, (clusters == i))].shape[1] == 0):\n                point = np.random.randint(0, data.shape[1])\n                init_w[(:, i)] = data[(:, point)].flatten()\n            else:\n                init_w[(:, i)] = (data[(:, (clusters == i))].mean(1) + eps)\n    return init_w", "docstring": "Initializes the M matrix given the data and a set of cluster labels.\nCluster centers are set to the mean of each cluster.\n\nArgs:\ndata (array): genes x cells\nclusters (array): 1d array of ints (0...k-1)\nk (int): number of clusters", "source": "codesearchnet"}
{"code": "def write_unitth(suites, out_dir):\n    if (not os.path.isdir(out_dir)):\n        os.mkdir(out_dir)\n    for (classname, cases) in suites.items():\n        doc_xml = minidom.Document()\n        suite_xml = doc_xml.createElement('testsuite')\n        suite_xml.setAttribute('name', classname)\n        suite_xml.setAttribute('tests', str(len(cases)))\n        suite_xml.setAttribute('errors', str(sum((('error' in case) for case in cases))))\n        suite_xml.setAttribute('failures', str(sum((('failure' in case) for case in cases))))\n        suite_xml.setAttribute('skipped', str(sum((('skipped' in case) for case in cases))))\n        suite_xml.setAttribute('time', '{:.3f}'.format(sum((case['time'] for case in cases))))\n        doc_xml.appendChild(suite_xml)\n        for case in cases:\n            case_xml = doc_xml.createElement('testcase')\n            case_xml.setAttribute('classname', classname)\n            case_xml.setAttribute('name', case['name'])\n            case_xml.setAttribute('time', '{:.3f}'.format(case['time']))\n            suite_xml.appendChild(case_xml)\n            if ('skipped' in case):\n                skipped_xml = doc_xml.createElement('skipped')\n                skipped_xml.setAttribute('type', case['skipped']['type'])\n                skipped_xml.setAttribute('message', case['skipped']['message'])\n                case_xml.appendChild(skipped_xml)\n                skipped_text_xml = doc_xml.createCDATASection(case['skipped']['text'])\n                skipped_xml.appendChild(skipped_text_xml)\n            if ('failure' in case):\n                failure_xml = doc_xml.createElement('failure')\n                failure_xml.setAttribute('type', case['failure']['type'])\n                failure_xml.setAttribute('message', case['failure']['message'])\n                case_xml.appendChild(failure_xml)\n                failure_text_xml = doc_xml.createCDATASection(case['failure']['text'])\n                failure_xml.appendChild(failure_text_xml)\n            if ('error' in case):\n                error_xml = doc_xml.createElement('error')\n                error_xml.setAttribute('type', case['error']['type'])\n                error_xml.setAttribute('message', case['error']['message'])\n                case_xml.appendChild(error_xml)\n                error_text_xml = doc_xml.createCDATASection(case['error']['text'])\n                error_xml.appendChild(error_text_xml)\n        with open(os.path.join(out_dir, '{}.xml'.format(classname)), 'w') as output:\n            doc_xml.writexml(output, encoding='utf-8', addindent='', newl='')\n        doc_xml.unlink()", "docstring": "Write UnitTH-style test reports\n\nArgs:\nsuites (:obj:`dict`): dictionary of test suites\nout_dir (:obj:`str`): path to save UnitTH-style test reports", "source": "codesearchnet"}
{"code": "def cache_memlimit(self, memlimit):\n        \n\n        self._fetch_cmd(b'cache_memlimit', [str(int(memlimit))], False)\n        return True", "docstring": "The memcached \"cache_memlimit\" command.\n\nArgs:\nmemlimit: int, the number of megabytes to set as the new cache memory\nlimit.\n\nReturns:\nIf no exception is raised, always returns True.", "source": "juraj-google-style"}
{"code": "def find_overlaps(self, index=False):\n        \n        return self.__find_incongruities(op=operator.gt, index=index)", "docstring": "Find overlaps in a striplog.\n\nArgs:\nindex (bool): If True, returns indices of intervals with\ngaps after them.\n\nReturns:\nStriplog: A striplog of all the overlaps as intervals.", "source": "juraj-google-style"}
{"code": "def same_intersection(intersection1, intersection2, wiggle=(0.5 ** 40)):\n    if (intersection1.index_first != intersection2.index_first):\n        return False\n    if (intersection1.index_second != intersection2.index_second):\n        return False\n    return np.allclose([intersection1.s, intersection1.t], [intersection2.s, intersection2.t], atol=0.0, rtol=wiggle)", "docstring": "Check if two intersections are close to machine precision.\n\n.. note::\n\nThis is a helper used only by :func:`verify_duplicates`, which in turn\nis only used by :func:`generic_intersect`.\n\nArgs:\nintersection1 (.Intersection): The first intersection.\nintersection2 (.Intersection): The second intersection.\nwiggle (Optional[float]): The amount of relative error allowed\nin parameter values.\n\nReturns:\nbool: Indicates if the two intersections are the same to\nmachine precision.", "source": "codesearchnet"}
{"code": "def publish(msg='checkpoint: publish package'):\n    test = check()\n    if test.succeeded:\n        sdist = local('python setup.py sdist')\n        if sdist.succeeded:\n            build = local('python setup.py build && python setup.py bdist_egg')\n            if build.succeeded:\n                upload = local('twine upload dist/*')\n                if upload.succeeded:\n                    tag()", "docstring": "Deploy the app to PYPI.\n\nArgs:\nmsg (str, optional): Description", "source": "codesearchnet"}
{"code": "def get_object(cls, api_token, id):\n    load_balancer = cls(token=api_token, id=id)\n    load_balancer.load()\n    return load_balancer", "docstring": "Class method that will return a LoadBalancer object by its ID.\n\nArgs:\napi_token (str): DigitalOcean API token\nid (str): Load Balancer ID", "source": "codesearchnet"}
{"code": "def country(self, value=None):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type str '\n                                 'for field `country`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `country`')\n\n        self._country = value", "docstring": "Corresponds to IDD Field `country`\n\nArgs:\nvalue (str): value for IDD Field `country`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def pretty_printer(cls, primitive_handler_: primitive_handler.PrimitiveHandler, indent_size: int) -> 'JsonPrinter':\n    return cls(primitive_handler_, _PrettyJsonTextGenerator(indent_size), _FhirJsonFormat.PURE)", "docstring": "Returns a printer for FHIR JSON with spaces and newlines.\n\nArgs:\nprimitive_handler_: Responsible for returning PrimitiveWrappers.\nindent_size: The size of space indentation for lexical scoping.", "source": "github-repos"}
{"code": "def _set_mtu_to_nics(self, conf):\n        \n        for dom_name, dom_spec in conf.get('domains', {}).items():\n            for idx, nic in enumerate(dom_spec.get('nics', [])):\n                net = self._get_net(conf, dom_name, nic)\n                mtu = net.get('mtu', 1500)\n                if mtu != 1500:\n                    nic['mtu'] = mtu", "docstring": "For all the nics of all the domains in the conf that have MTU set,\nsave the MTU on the NIC definition.\n\nArgs:\nconf (dict): Configuration spec to extract the domains from\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def fullStats(a, b):\n    stats = [['bias', 'Bias', bias(a, b)], ['stderr', 'Standard Deviation Error', stderr(a, b)], ['mae', 'Mean Absolute Error', mae(a, b)], ['rmse', 'Root Mean Square Error', rmse(a, b)], ['nmse', 'Normalized Mean Square Error', nmse(a, b)], ['mfbe', 'Mean Fractionalized bias Error', mfbe(a, b)], ['fa2', 'Factor of Two', fa(a, b, 2)], ['foex', 'Factor of Exceedance', foex(a, b)], ['correlation', 'Correlation R', correlation(a, b)], ['determination', 'Coefficient of Determination r2', determination(a, b)], ['gmb', 'Geometric Mean Bias', gmb(a, b)], ['gmv', 'Geometric Mean Variance', gmv(a, b)], ['fmt', 'Figure of Merit in Time', fmt(a, b)]]\n    rec = np.rec.fromrecords(stats, names=('stat', 'description', 'result'))\n    df = pd.DataFrame.from_records(rec, index='stat')\n    return df", "docstring": "Performs several stats on a against b, typically a is the predictions\narray, and b the observations array\n\nReturns:\nA dataFrame of stat name, stat description, result", "source": "codesearchnet"}
{"code": "def __init__(\n      self, resolver_context, file_system, path_spec, is_root=False,\n      is_virtual=False):\n    \n    bde_volume = file_system.GetBDEVolume()\n    if bde_volume is None:\n      raise errors.BackEndError('Missing BDE volume.')\n\n    super(BDEFileEntry, self).__init__(\n        resolver_context, file_system, path_spec, is_root=is_root,\n        is_virtual=is_virtual)\n    self._bde_volume = bde_volume\n    self.entry_type = definitions.FILE_ENTRY_TYPE_FILE", "docstring": "Initializes the file entry object.\n\nArgs:\nresolver_context (Context): resolver context.\nfile_system (FileSystem): file system.\npath_spec (PathSpec): path specification.\nis_root (Optional[bool]): True if the file entry is the root file entry\nof the corresponding file system.\nis_virtual (Optional[bool]): True if the file entry is a virtual file\n\nRaises:\nBackEndError: when the BDE volume is missing.", "source": "juraj-google-style"}
{"code": "def register_for_auto_class(cls, auto_class='AutoModel'):\n    if not isinstance(auto_class, str):\n        auto_class = auto_class.__name__\n    import transformers.models.auto as auto_module\n    if not hasattr(auto_module, auto_class):\n        raise ValueError(f'{auto_class} is not a valid auto class.')\n    cls._auto_class = auto_class", "docstring": "Register this class with a given auto class. This should only be used for custom models as the ones in the\nlibrary are already mapped with an auto class.\n\n\n\nArgs:\nauto_class (`str` or `type`, *optional*, defaults to `\"AutoModel\"`):\nThe auto class to register this new model with.", "source": "github-repos"}
{"code": "def asset(self, asset_id, asset_type, action='GET'):\n        \n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        if asset_type == 'PHONE':\n            return self.tc_requests.adversary_phone_asset(\n                self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action\n            )\n        if asset_type == 'HANDLER':\n            return self.tc_requests.adversary_handle_asset(\n                self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action\n            )\n        if asset_type == 'URL':\n            return self.tc_requests.adversary_url_asset(\n                self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action\n            )\n        self._tcex.handle_error(\n            925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type]\n        )\n        return None", "docstring": "Gets the asset with the provided id\nArgs:\nasset_id: The id of the asset to be retrieved\nasset_type: (str) Either PHONE, HANDLER, or URL\naction:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def cancel(\n        self,\n        accountID,\n        orderSpecifier,\n        **kwargs\n    ):\n        \n\n        request = Request(\n            'PUT',\n            '/v3/accounts/{accountID}/orders/{orderSpecifier}/cancel'\n        )\n\n        request.set_path_param(\n            'accountID',\n            accountID\n        )\n\n        request.set_path_param(\n            'orderSpecifier',\n            orderSpecifier\n        )\n\n        response = self.ctx.request(request)\n\n\n        if response.content_type is None:\n            return response\n\n        if not response.content_type.startswith(\"application/json\"):\n            return response\n\n        jbody = json.loads(response.raw_body)\n\n        parsed_body = {}\n\n        \n        \n        \n        if str(response.status) == \"200\":\n            if jbody.get('orderCancelTransaction') is not None:\n                parsed_body['orderCancelTransaction'] = \\\n                    self.ctx.transaction.OrderCancelTransaction.from_dict(\n                        jbody['orderCancelTransaction'],\n                        self.ctx\n                    )\n\n            if jbody.get('relatedTransactionIDs') is not None:\n                parsed_body['relatedTransactionIDs'] = \\\n                    jbody.get('relatedTransactionIDs')\n\n            if jbody.get('lastTransactionID') is not None:\n                parsed_body['lastTransactionID'] = \\\n                    jbody.get('lastTransactionID')\n\n        elif str(response.status) == \"401\":\n            if jbody.get('errorCode') is not None:\n                parsed_body['errorCode'] = \\\n                    jbody.get('errorCode')\n\n            if jbody.get('errorMessage') is not None:\n                parsed_body['errorMessage'] = \\\n                    jbody.get('errorMessage')\n\n        elif str(response.status) == \"404\":\n            if jbody.get('orderCancelRejectTransaction') is not None:\n                parsed_body['orderCancelRejectTransaction'] = \\\n                    self.ctx.transaction.OrderCancelRejectTransaction.from_dict(\n                        jbody['orderCancelRejectTransaction'],\n                        self.ctx\n                    )\n\n            if jbody.get('relatedTransactionIDs') is not None:\n                parsed_body['relatedTransactionIDs'] = \\\n                    jbody.get('relatedTransactionIDs')\n\n            if jbody.get('lastTransactionID') is not None:\n                parsed_body['lastTransactionID'] = \\\n                    jbody.get('lastTransactionID')\n\n            if jbody.get('errorCode') is not None:\n                parsed_body['errorCode'] = \\\n                    jbody.get('errorCode')\n\n            if jbody.get('errorMessage') is not None:\n                parsed_body['errorMessage'] = \\\n                    jbody.get('errorMessage')\n\n        elif str(response.status) == \"405\":\n            if jbody.get('errorCode') is not None:\n                parsed_body['errorCode'] = \\\n                    jbody.get('errorCode')\n\n            if jbody.get('errorMessage') is not None:\n                parsed_body['errorMessage'] = \\\n                    jbody.get('errorMessage')\n\n        \n        \n        \n        else:\n            parsed_body = jbody\n\n        response.body = parsed_body\n\n        return response", "docstring": "Cancel a pending Order in an Account\n\nArgs:\naccountID:\nAccount Identifier\norderSpecifier:\nThe Order Specifier\n\nReturns:\nv20.response.Response containing the results from submitting the\nrequest", "source": "juraj-google-style"}
{"code": "def read(alias_name, allow_none=False):\n    \n    warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2)\n    return core.read('{0}_PORT'.format(alias_name), default=None, allow_none=allow_none)", "docstring": "Get the raw docker link value.\n\nGet the raw environment variable for the docker link\n\nArgs:\nalias_name: The environment variable name\ndefault: The default value if the link isn't available\nallow_none: If the return value can be `None` (i.e. optional)", "source": "juraj-google-style"}
{"code": "def cancelPnLSingle(self, account: str, modelCode: str, conId: int):\n    key = (account, modelCode, conId)\n    reqId = self.wrapper.pnlSingleKey2ReqId.pop(key, None)\n    if reqId:\n        self.client.cancelPnLSingle(reqId)\n        self.wrapper.pnlSingles.pop(reqId, None)\n    else:\n        self._logger.error(f'cancelPnLSingle: No subscription for account {account}, modelCode {modelCode}, conId {conId}')", "docstring": "Cancel PnLSingle subscription for the given account, modelCode\nand conId.\n\nArgs:\naccount: Cancel for this account name.\nmodelCode: Cancel for this account model.\nconId: Cancel for this contract ID.", "source": "codesearchnet"}
{"code": "def default_datastore_policy(key):\n    flag = None\n    if (key is not None):\n        modelclass = model.Model._kind_map.get(key.kind())\n        if (modelclass is not None):\n            policy = getattr(modelclass, '_use_datastore', None)\n            if (policy is not None):\n                if isinstance(policy, bool):\n                    flag = policy\n                else:\n                    flag = policy(key)\n    return flag", "docstring": "Default datastore policy.\n\nThis defers to _use_datastore on the Model class.\n\nArgs:\nkey: Key instance.\n\nReturns:\nA bool or None.", "source": "codesearchnet"}
{"code": "def _read_mode_tcpao(self, size, kind):\n    key_ = self._read_unpack(1)\n    rkey = self._read_unpack(1)\n    mac_ = self._read_fileng((size - 2))\n    data = dict(kind=kind, length=size, keyid=key_, rnextkeyid=rkey, mac=mac_)\n    return data", "docstring": "Read Authentication option.\n\nPositional arguments:\n* size - int, length of option\n* kind - int, 29 (TCP Authentication Option)\n\nReturns:\n* dict -- extracted Authentication (AO) option\n\nStructure of TCP AOopt [RFC 5925]:\n+------------+------------+------------+------------+\n|  Kind=29   |   Length   |   KeyID    | RNextKeyID |\n+------------+------------+------------+------------+\n|                     MAC           ...\n+-----------------------------------...\n\n...-----------------+\n...  MAC (con't)    |\n...-----------------+\n\nOctets      Bits        Name                    Description\n0           0     tcp.ao.kind             Kind (29)\n1           8     tcp.ao.length           Length\n2          16     tcp.ao.keyid            KeyID\n3          24     tcp.ao.rnextkeyid       RNextKeyID\n4          32     tcp.ao.mac              Message Authentication Code", "source": "codesearchnet"}
{"code": "def get_mpkg_ids(mpkg):\n    mpkg = _quote(mpkg)\n    package_infos = []\n    base_path = os.path.dirname(mpkg)\n    cmd = 'find {0} -name *.pkg'.format(base_path)\n    out = __salt__['cmd.run'](cmd, python_shell=True)\n    pkg_files = out.split('\\n')\n    for p in pkg_files:\n        package_infos.extend(get_pkg_id(p))\n    return package_infos", "docstring": "Attempt to get the package IDs from a mounted .mpkg file\n\nArgs:\nmpkg (str): The location of the mounted mpkg file\n\nReturns:\nlist: List of package IDs\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' macpackage.get_mpkg_ids /dev/disk2", "source": "codesearchnet"}
{"code": "def sensor_id(self):\n    if hasattr(self, '_sensor_id'):\n        return self._sensor_id\n    relationships = self._json_data.get('relationships')\n    sensor_id = relationships.get('sensor').get('data').get('id')\n    self._sensor_id = sensor_id\n    return sensor_id", "docstring": "The id of the sensor of this data point.\n\nReturns:\n\nThe id of the sensor that generated this datapoint. Will\nthrow an AttributeError if no sensor id was found in the\nunderlyign data.", "source": "codesearchnet"}
{"code": "def _add_future(cls, future):\n    if cls._local._activated:\n        cls._local._in_order_futures.add(future)", "docstring": "Adds a future to the list of in-order futures thus far.\n\nArgs:\nfuture: The future to add to the list.", "source": "codesearchnet"}
{"code": "def __init__(self, action, debug_urls, debug_ops='DebugIdentity', node_name_regex_allowlist=None, op_type_regex_allowlist=None, tensor_dtype_regex_allowlist=None, tolerate_debug_op_creation_failures=False):\n    _check_type(action, str)\n    self.action = action\n    _check_type(debug_urls, list)\n    self.debug_urls = debug_urls\n    self.debug_ops = debug_ops\n    self.node_name_regex_allowlist = node_name_regex_allowlist\n    self.op_type_regex_allowlist = op_type_regex_allowlist\n    self.tensor_dtype_regex_allowlist = tensor_dtype_regex_allowlist\n    self.tolerate_debug_op_creation_failures = tolerate_debug_op_creation_failures", "docstring": "Constructor of `OnRunStartResponse`.\n\nArgs:\naction: (`OnRunStartAction`) the action actually taken by the wrapped\nsession for the run() call.\ndebug_urls: (`list` of `str`) debug_urls used in watching the tensors\nduring the run() call.\ndebug_ops: (`str` or `list` of `str`) Debug op(s) to be used by the\ndebugger.\nnode_name_regex_allowlist: Regular-expression allowlist for node\nname.\nop_type_regex_allowlist: Regular-expression allowlist for op type.\ntensor_dtype_regex_allowlist: Regular-expression allowlist for tensor\ndtype.\ntolerate_debug_op_creation_failures: Whether debug op creation failures\nare to be tolerated.", "source": "github-repos"}
{"code": "def setup(self, target_directory=None):  \n    \n    self._target_directory = target_directory\n    if not target_directory:\n      self._target_directory = tempfile.mkdtemp()\n    elif not os.path.exists(target_directory):\n      try:\n        os.makedirs(target_directory)\n      except OSError as exception:\n        message = 'An unknown error occurred: {0!s}'.format(exception)\n        self.state.add_error(message, critical=True)", "docstring": "Sets up the _target_directory attribute.\n\nArgs:\ntarget_directory: Directory in which collected files will be dumped.", "source": "juraj-google-style"}
{"code": "def default(self, obj):\n        \n        if isinstance(obj, decimal.Decimal):\n            obj = format(obj, 'f')\n            str_digit = text_type(obj)\n\n            return (str_digit.rstrip('0').rstrip('.')\n                    if '.' in str_digit\n                    else str_digit)\n\n        elif isinstance(obj, phonenumbers.PhoneNumber):\n            return phonenumbers.format_number(\n                obj,\n                phonenumbers.PhoneNumberFormat.E164\n            )\n\n        elif isinstance(obj, pendulum.Pendulum):\n            return text_type(obj)\n\n        elif isinstance(obj, arrow.Arrow):\n            return text_type(obj)\n\n        elif isinstance(obj, (datetime.datetime, datetime.date)):\n            return obj.isoformat()\n\n        try:\n            return list(iter(obj))\n        except TypeError:\n            pass\n\n        return super(FleakerJSONEncoder, self).default(obj)", "docstring": "Encode individual objects into their JSON representation.\n\nThis method is used by :class:`flask.json.JSONEncoder` to encode\nindividual items in the JSON object.\n\nArgs:\nobj (object): Any Python object we wish to convert to JSON.\n\nReturns:\nstr: The stringified, valid JSON representation of our provided\nobject.", "source": "juraj-google-style"}
{"code": "def report(self, branch, commit, infourl=None):\n    issue_number = self._get_report_issue_number()\n    if issue_number:\n        self._report_as_comment(issue_number, branch, commit, infourl)\n    else:\n        self._report_as_issue(branch, commit, infourl)", "docstring": "Report on GitHub that the specified branch is failing to build at\nthe specified commit. The method will open an issue indicating that\nthe branch is failing. If there is an issue already open, it will add a\ncomment avoiding to report twice about the same failure.\n\nArgs:\nbranch (str): branch name to report about.\ncommit (str): commit hash at which the build fails.\ninfourl (str): URL with extra info about the failure such as the\nbuild logs.", "source": "codesearchnet"}
{"code": "def minimum_image(self, r1, r2):\n    delta_r = (r2 - r1)\n    delta_r = np.array([((x - math.copysign(1.0, x)) if (abs(x) > 0.5) else x) for x in delta_r])\n    return delta_r", "docstring": "Find the minimum image vector from point r1 to point r2.\n\nArgs:\nr1 (np.array): fractional coordinates of point r1.\nr2 (np.array): fractional coordinates of point r2.\n\nReturns:\n(np.array): the fractional coordinate vector from r1 to the nearest image of r2.", "source": "codesearchnet"}
{"code": "def get_void_volume_surfarea(structure, rad_dict=None, chan_rad=0.3,\n                             probe_rad=0.1):\n    \n    with ScratchDir('.'):\n        name = \"temp_zeo\"\n        zeo_inp_filename = name + \".cssr\"\n        ZeoCssr(structure).write_file(zeo_inp_filename)\n\n        rad_file = None\n        if rad_dict:\n            rad_file = name + \".rad\"\n            with open(rad_file, 'w') as fp:\n                for el in rad_dict.keys():\n                    fp.write(\"{0}     {1}\".format(el, rad_dict[el]))\n\n        atmnet = AtomNetwork.read_from_CSSR(zeo_inp_filename, True, rad_file)\n        vol_str = volume(atmnet, 0.3, probe_rad, 10000)\n        sa_str = surface_area(atmnet, 0.3, probe_rad, 10000)\n        vol = None\n        sa = None\n        for line in vol_str.split(\"\\n\"):\n            if \"Number_of_pockets\" in line:\n                fields = line.split()\n                if float(fields[1]) > 1:\n                    vol = -1.0\n                    break\n                if float(fields[1]) == 0:\n                    vol = -1.0\n                    break\n                vol = float(fields[3])\n        for line in sa_str.split(\"\\n\"):\n            if \"Number_of_pockets\" in line:\n                fields = line.split()\n                if float(fields[1]) > 1:\n                    \n                    sa = -1.0\n                    break\n                if float(fields[1]) == 0:\n                    sa = -1.0\n                    break\n                sa = float(fields[3])\n\n    if not vol or not sa:\n        raise ValueError(\"Error in zeo++ output stream\")\n    return vol, sa", "docstring": "Computes the volume and surface area of isolated void using Zeo++.\nUseful to compute the volume and surface area of vacant site.\n\nArgs:\nstructure: pymatgen Structure containing vacancy\nrad_dict(optional): Dictionary with short name of elements and their\nradii.\nchan_rad(optional): Minimum channel Radius.\nprobe_rad(optional): Probe radius for Monte Carlo sampling.\n\nReturns:\nvolume: floating number representing the volume of void", "source": "juraj-google-style"}
{"code": "def qualified_name(self):\n    o = VersionedObject.construct(self.name, self.version)\n    return str(o)", "docstring": "Get the qualified name of the package.\n\nReturns:\nstr: Name of the package with version, eg \"maya-2016.1\".", "source": "codesearchnet"}
{"code": "def douglas_adi_step(theta=0.5):\n\n    def _step_fn(time, next_time, coord_grid, value_grid, boundary_conditions, second_order_coeff_fn, first_order_coeff_fn, zeroth_order_coeff_fn, inner_second_order_coeff_fn, inner_first_order_coeff_fn, num_steps_performed, dtype=None, name=None):\n        \n        del num_steps_performed\n        name = name or 'douglas_adi_step'\n        return multidim_parabolic_equation_step(time, next_time, coord_grid, value_grid, boundary_conditions, douglas_adi_scheme(theta), second_order_coeff_fn, first_order_coeff_fn, zeroth_order_coeff_fn, inner_second_order_coeff_fn, inner_first_order_coeff_fn, dtype=dtype, name=name)\n    return _step_fn", "docstring": "Creates a stepper function with Crank-Nicolson time marching scheme.\n\nDouglas ADI scheme is the simplest time marching scheme for solving parabolic\nPDEs with multiple spatial dimensions. The time step consists of several\nsubsteps: the first one is fully explicit, and the following `N` steps are\nimplicit with respect to contributions of one of the `N` axes (hence \"ADI\" -\nalternating direction implicit). See `douglas_adi_scheme` below for more\ndetails.\n\nArgs:\ntheta: positive Number. `theta = 0` corresponds to fully explicit scheme.\nThe larger `theta` the stronger are the corrections by the implicit\nsubsteps. The recommended value is `theta = 0.5`, because the scheme is\nsecond order accurate in that case, unless mixed second derivative terms are\npresent in the PDE.\nReturns:\nCallable to be used in finite-difference PDE solvers (see fd_solvers.py).", "source": "github-repos"}
{"code": "def parse(self, argument):\n    if isinstance(argument, self.enum_class):\n        return argument\n    if (argument not in self.enum_class.__members__):\n        raise ValueError(('value should be one of <%s>' % '|'.join(self.enum_class.__members__.keys())))\n    else:\n        return self.enum_class[argument]", "docstring": "Determines validity of argument and returns the correct element of enum.\n\nArgs:\nargument: str or Enum class member, the supplied flag value.\n\nReturns:\nThe first matching Enum class member in Enum class.\n\nRaises:\nValueError: Raised when argument didn't match anything in enum.", "source": "codesearchnet"}
{"code": "def set_time(self, value: float):\n    if (value < 0):\n        value = 0\n    self.offset += (self.get_time() - value)", "docstring": "Set the current time. This can be used to jump in the timeline.\n\nArgs:\nvalue (float): The new time", "source": "codesearchnet"}
{"code": "def __register_methods(self, parsed_config):\n    \n    methods = parsed_config.get('methods')\n    if not methods:\n      return\n\n    for method_name, method in methods.iteritems():\n      self.__api_methods[method_name] = method.get('rosyMethod')", "docstring": "Register all methods from the given api config file.\n\nMethods are stored in a map from method_name to rosyMethod,\nthe name of the ProtoRPC method to be called on the backend.\nIf no rosyMethod was specified the value will be None.\n\nArgs:\nparsed_config: The JSON object with the API configuration being added.", "source": "juraj-google-style"}
{"code": "def run(self):\n    if (not self.block):\n        self.output = []\n        self.error = []\n        self.thread = threading.Thread(target=self.run_non_blocking)\n        self.thread.start()\n    else:\n        self.__create_process()\n        self.process.wait()\n        if (self._stdout is not None):\n            self.output = self.process.stdout.read().decode('utf-8')\n        if (self._stderr is not None):\n            self.error = self.process.stderr.read().decode('utf-8')\n        self.return_code = self.process.returncode\n    return self", "docstring": "Run the shell command\n\nReturns:\nShellCommand: return this ShellCommand instance for chaining", "source": "codesearchnet"}
{"code": "def train(total_loss, global_step):\n  \n  \n  num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size\n  decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)\n\n  \n  lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,\n                                  global_step,\n                                  decay_steps,\n                                  LEARNING_RATE_DECAY_FACTOR,\n                                  staircase=True)\n  tf.summary.scalar('learning_rate', lr)\n\n  \n  loss_averages_op = _add_loss_summaries(total_loss)\n\n  \n  with tf.control_dependencies([loss_averages_op]):\n    opt = tf.train.GradientDescentOptimizer(lr)\n    grads = opt.compute_gradients(total_loss)\n\n  \n  apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n\n  \n  for var in tf.trainable_variables():\n    tf.summary.histogram(var.op.name, var)\n\n  \n  for grad, var in grads:\n    if grad is not None:\n      tf.summary.histogram(var.op.name + '/gradients', grad)\n\n  \n  variable_averages = tf.train.ExponentialMovingAverage(\n      MOVING_AVERAGE_DECAY, global_step)\n  variables_averages_op = variable_averages.apply(tf.trainable_variables())\n\n  with tf.control_dependencies([apply_gradient_op, variables_averages_op]):\n    train_op = tf.no_op(name='train')\n\n  return train_op", "docstring": "Train CIFAR-10 model.\n\nCreate an optimizer and apply to all trainable variables. Add moving\naverage for all trainable variables.\n\nArgs:\ntotal_loss: Total loss from loss().\nglobal_step: Integer Variable counting the number of training steps\nprocessed.\nReturns:\ntrain_op: op for training.", "source": "juraj-google-style"}
{"code": "def continuous_partition_data(data, bins='auto', n_bins=10):\n    if (bins == 'uniform'):\n        bins = np.linspace(start=np.min(data), stop=np.max(data), num=(n_bins + 1))\n    elif (bins == 'ntile'):\n        bins = np.percentile(data, np.linspace(start=0, stop=100, num=(n_bins + 1)))\n    elif (bins != 'auto'):\n        raise ValueError('Invalid parameter for bins argument')\n    (hist, bin_edges) = np.histogram(data, bins, density=False)\n    return {'bins': bin_edges, 'weights': (hist / len(data))}", "docstring": "Convenience method for building a partition object on continuous data\n\nArgs:\ndata (list-like): The data from which to construct the estimate.\nbins (string): One of 'uniform' (for uniformly spaced bins), 'ntile' (for percentile-spaced bins), or 'auto' (for automatically spaced bins)\nn_bins (int): Ignored if bins is auto.\n\nReturns:\nA new partition_object::\n\n{\n\"bins\": (list) The endpoints of the partial partition of reals,\n\"weights\": (list) The densities of the bins implied by the partition.\n}", "source": "codesearchnet"}
{"code": "def mangle_scope_tree(root, toplevel):\n\n    def mangle(scope):\n        if ((scope.get_enclosing_scope() is None) and (not toplevel)):\n            return\n        for name in scope.symbols:\n            mangled_name = scope.get_next_mangled_name()\n            scope.mangled[name] = mangled_name\n            scope.rev_mangled[mangled_name] = name\n\n    def visit(node):\n        mangle(node)\n        for child in node.children:\n            visit(child)\n    visit(root)", "docstring": "Walk over a scope tree and mangle symbol names.\n\nArgs:\ntoplevel: Defines if global scope should be mangled or not.", "source": "codesearchnet"}
{"code": "def get_or_create_node(self, level, entities, *args, **kwargs):\n    result = self.get_nodes(level, entities)\n    if result:\n        if (len(result) > 1):\n            raise ValueError(\"More than one matching Node found! If you're expecting more than one Node, use get_nodes() instead of get_or_create_node().\")\n        return result[0]\n    if (level == 'run'):\n        node = RunNode(entities, *args, **kwargs)\n    else:\n        node = Node(level, entities)\n    entities = dict(entities, node_index=len(self.nodes), level=level)\n    self.nodes.append(node)\n    node_row = pd.Series(entities)\n    self.index = self.index.append(node_row, ignore_index=True)\n    return node", "docstring": "Retrieves a child Node based on the specified criteria, creating a\nnew Node if necessary.\n\nArgs:\nentities (dict): Dictionary of entities specifying which Node to\nreturn.\nargs, kwargs: Optional positional or named arguments to pass onto\nclass-specific initializers. These arguments are only used if\na Node that matches the passed entities doesn't already exist,\nand a new one must be created.\n\nReturns:\nA Node instance.", "source": "codesearchnet"}
{"code": "def word_error_rate(raw_predictions, labels, lookup=None, weights_fn=common_layers.weights_nonzero):\n\n    def from_tokens(raw, lookup_):\n        gathered = tf.gather(lookup_, tf.cast(raw, tf.int32))\n        joined = tf.regex_replace(tf.reduce_join(gathered, axis=1), b'<EOS>.*', b'')\n        cleaned = tf.regex_replace(joined, b'_', b' ')\n        tokens = tf.string_split(cleaned, ' ')\n        return tokens\n\n    def from_characters(raw, lookup_):\n        'Convert ascii+2 encoded codes to string-tokens.'\n        corrected = tf.bitcast(tf.clip_by_value(tf.subtract(raw, 2), 0, 255), tf.uint8)\n        gathered = tf.gather(lookup_, tf.cast(corrected, tf.int32))[(:, :, 0)]\n        joined = tf.reduce_join(gathered, axis=1)\n        cleaned = tf.regex_replace(joined, b'\\x00', b'')\n        tokens = tf.string_split(cleaned, ' ')\n        return tokens\n    if (lookup is None):\n        lookup = tf.constant([chr(i) for i in range(256)])\n        convert_fn = from_characters\n    else:\n        convert_fn = from_tokens\n    if (weights_fn is not common_layers.weights_nonzero):\n        raise ValueError('Only weights_nonzero can be used for this metric.')\n    with tf.variable_scope('word_error_rate', values=[raw_predictions, labels]):\n        raw_predictions = tf.squeeze(tf.argmax(raw_predictions, axis=(- 1)), axis=(2, 3))\n        labels = tf.squeeze(labels, axis=(2, 3))\n        reference = convert_fn(labels, lookup)\n        predictions = convert_fn(raw_predictions, lookup)\n        distance = tf.reduce_sum(tf.edit_distance(predictions, reference, normalize=False))\n        reference_length = tf.cast(tf.size(reference.values, out_type=tf.int32), dtype=tf.float32)\n        return ((distance / reference_length), reference_length)", "docstring": "Calculate word error rate.\n\nArgs:\nraw_predictions: The raw predictions.\nlabels: The actual labels.\nlookup: A tf.constant mapping indices to output tokens.\nweights_fn: Weighting function.\n\nReturns:\nThe word error rate.", "source": "codesearchnet"}
{"code": "def LoadGDAL(filename, no_data=None):\n    \n  if not GDAL_AVAILABLE:\n    raise Exception(\"richdem.LoadGDAL() requires GDAL.\")\n\n  allowed_types = {gdal.GDT_Byte,gdal.GDT_Int16,gdal.GDT_Int32,gdal.GDT_UInt16,gdal.GDT_UInt32,gdal.GDT_Float32,gdal.GDT_Float64}\n\n  \n  src_ds  = gdal.Open(filename)\n  srcband = src_ds.GetRasterBand(1)\n\n  if no_data is None:\n    no_data = srcband.GetNoDataValue()\n    if no_data is None:\n      raise Exception(\"The source data did not have a NoData value. Please use the no_data argument to specify one. If should not be equal to any of the actual data values. If you are using all possible data values, then the situation is pretty hopeless - sorry.\")\n\n  srcdata = rdarray(srcband.ReadAsArray(), no_data=no_data)\n\n  \n  \n\n  if not srcband.DataType in allowed_types:\n    raise Exception(\"This datatype is not supported. Please file a bug report on RichDEM.\")\n\n  srcdata.projection   = src_ds.GetProjectionRef()\n  srcdata.geotransform = src_ds.GetGeoTransform()\n\n  srcdata.metadata = dict()\n  for k,v in src_ds.GetMetadata().items():\n    srcdata.metadata[k] = v\n\n  _AddAnalysis(srcdata, \"LoadGDAL(filename={0}, no_data={1})\".format(filename, no_data))\n\n  return srcdata", "docstring": "Read a GDAL file.\n\nOpens any file GDAL can read, selects the first raster band, and loads it\nand its metadata into a RichDEM array of the appropriate data type.\n\nIf you need to do something more complicated, look at the source of this\nfunction.\n\nArgs:\nfilename (str):    Name of the raster file to open\nno_data  (float):  Optionally, set the no_data value to this.\n\nReturns:\nA RichDEM array", "source": "juraj-google-style"}
{"code": "def find_triggers(nodes, trigger_words, nosec_lines):\n    trigger_nodes = list()\n    for node in nodes:\n        if (node.line_number not in nosec_lines):\n            trigger_nodes.extend(iter(label_contains(node, trigger_words)))\n    return trigger_nodes", "docstring": "Find triggers from the trigger_word_list in the nodes.\n\nArgs:\nnodes(list[Node]): the nodes to find triggers in.\ntrigger_word_list(list[Union[Sink, Source]]): list of trigger words to look for.\nnosec_lines(set): lines with # nosec whitelisting\n\nReturns:\nList of found TriggerNodes", "source": "codesearchnet"}
{"code": "def serialize(struct, format, target=None, encoding='utf-8'):\n    if (hasattr(target, 'encoding') and target.encoding):\n        raise AnyMarkupError('Input file must be opened in binary mode')\n    fname = None\n    if hasattr(target, 'name'):\n        fname = target.name\n    fmt = _get_format(format, fname)\n    try:\n        serialized = _do_serialize(struct, fmt, encoding)\n        if (target is None):\n            return serialized\n        else:\n            return target.write(serialized)\n    except Exception as e:\n        raise AnyMarkupError(e, traceback.format_exc())", "docstring": "Serialize given structure and return it as encoded string or write it to file-like object.\n\nArgs:\nstruct: structure (dict or list) with unicode members to serialize; note that list\ncan only be serialized to json\nformat: specify markup format to serialize structure as\ntarget: binary-opened file-like object to serialize to; if None (default),\nthe result will be returned instead of writing to `target`\nencoding: encoding to use when serializing, defaults to utf-8\nReturns:\nbytestring with serialized structure if `target` is None; return value of\n`target.write` otherwise\nRaises:\nAnyMarkupError if a problem occurs while serializing", "source": "codesearchnet"}
{"code": "def set_refresh(self, timeout, callback, *callback_args):\n        \n        GObject.timeout_add(timeout, callback, *callback_args)", "docstring": "It is just stub for simplify setting timeout.\nArgs:\ntimeout (int): timeout in milliseconds, after which callback will be called\ncallback (callable): usually, just a function that will be called each time after timeout\n*callback_args (any type): arguments that will be passed to callback function", "source": "juraj-google-style"}
{"code": "def read_configs_(self):\n    if (not self.config_files_):\n        return ({}, [], [])\n    content = {section: {} for section in self}\n    empty_files = []\n    faulty_files = []\n    for cfile in self.config_files_:\n        conf_dict = self.read_config_(cfile)\n        if (conf_dict is None):\n            faulty_files.append(cfile)\n            continue\n        elif (not conf_dict):\n            empty_files.append(cfile)\n            continue\n        for (section, secdict) in conf_dict.items():\n            content[section].update(secdict)\n    return (content, empty_files, faulty_files)", "docstring": "Read config files and set config values accordingly.\n\nReturns:\n(dict, list, list): respectively content of files, list of\nmissing/empty files and list of files for which a parsing error\narised.", "source": "codesearchnet"}
{"code": "def train_model(preprocessed_dataset_path: str, trained_model_path: str, base_artifact_path: str):\n    timestamp = time.time()\n    model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg16', pretrained=True)\n    target_path = f'{base_artifact_path}/training/trained_model_{timestamp}.pt'\n    target_path_gcsfuse = target_path.replace('gs:\n    Path(target_path_gcsfuse).parent.mkdir(parents=True, exist_ok=True)\n    torch.save(model.state_dict(), target_path_gcsfuse)\n    Path(trained_model_path).parent.mkdir(parents=True, exist_ok=True)\n    with open(trained_model_path, 'w') as f:\n        f.write(target_path)", "docstring": "Placeholder method to load a model from the torch hub and save it.\n\nArgs:\npreprocessed_dataset_path (str): Path to the preprocessed dataset\ntrained_model_path (str): Output path for the trained model\nbase_artifact_path (str): path to the base directory of where artifacts can be stored for\nthis component", "source": "github-repos"}
{"code": "def _hertz_to_mel(self, frequencies_hertz):\n    return _MEL_HIGH_FREQUENCY_Q * self.backend.numpy.log(1.0 + frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ)", "docstring": "Converts frequencies in `frequencies_hertz` in Hertz to the\nmel scale.\n\nArgs:\nfrequencies_hertz: A tensor of frequencies in Hertz.\nname: An optional name for the operation.\n\nReturns:\nA tensor of the same shape and type of `frequencies_hertz`\ncontaining frequencies in the mel scale.", "source": "github-repos"}
{"code": "def Notify(self, message_type, subject, msg, source):\n    pending = self.Get(self.Schema.PENDING_NOTIFICATIONS)\n    if (pending is None):\n        pending = self.Schema.PENDING_NOTIFICATIONS()\n    if (message_type.split(':', 2)[0] not in rdf_flows.Notification.notification_types):\n        raise TypeError(('Invalid notification type %s' % message_type))\n    pending.Append(type=message_type, subject=subject, message=msg, source=source, timestamp=int((time.time() * 1000000.0)))\n    while (len(pending) > 50):\n        pending.Pop(0)\n    self.Set(self.Schema.PENDING_NOTIFICATIONS, pending)", "docstring": "Send an AFF4-based notification to the user in the UI.\n\nArgs:\nmessage_type: One of aff4_grr.Notification.notification_types e.g.\n\"ViewObject\", \"HostInformation\", \"GrantAccess\" or\nthe same with an added \":[new-style notification type] suffix, e.g.\n\"ViewObject:TYPE_CLIENT_INTERROGATED\".\nsubject: The subject to use, normally a URN.\nmsg: The message to display.\nsource: The class doing the notification.\n\nRaises:\nTypeError: On invalid message_type.", "source": "codesearchnet"}
{"code": "def menu(self, prompt, choices):\n    menu = ([prompt] + ['{0}. {1}'.format(*choice) for choice in enumerate(choices, start=1)])\n    command = 'inputlist({})'.format(repr(menu))\n    choice = int(self._vim.eval(command))\n    if (not (0 < choice < len(menu))):\n        return\n    return choices[(choice - 1)]", "docstring": "Presents a selection menu and returns the user's choice.\n\nArgs:\nprompt (str): Text to ask the user what to select.\nchoices (Sequence[str]): Values for the user to select from.\n\nReturns:\nThe value selected by the user, or ``None``.\n\nTodo:\nNice opportunity to provide a hook for Unite.vim, etc. here.", "source": "codesearchnet"}
{"code": "def plot(self, freq=None, figsize=(15, 5), title=None, logy=False, **kwargs):\n    if (title is None):\n        title = self._get_default_plot_title(freq, 'Equity Progression')\n    ser = self._get_series(freq).rebase()\n    return ser.plot(figsize=figsize, logy=logy, title=title, **kwargs)", "docstring": "Helper function for plotting the series.\n\nArgs:\n* freq (str): Data frequency used for display purposes.\nRefer to pandas docs for valid freq strings.\n* figsize ((x,y)): figure size\n* title (str): Title if default not appropriate\n* logy (bool): log-scale for y axis\n* kwargs: passed to pandas' plot method", "source": "codesearchnet"}
{"code": "def check_graphs(*args):\n    graph = None\n    for i, sgv in enumerate(args):\n        if graph is None and sgv.graph is not None:\n            graph = sgv.graph\n        elif sgv.graph is not None and sgv.graph is not graph:\n            raise ValueError(f'args[{i}] does not belong to the same graph as other arguments.')", "docstring": "Check that all the element in args belong to the same graph.\n\nArgs:\n*args: a list of object with a obj.graph property.\nRaises:\nValueError: if all the elements do not belong to the same graph.", "source": "github-repos"}
{"code": "def update_serial(self, new_serial):\n    new_serial = str(new_serial)\n    if self.has_active_service:\n        raise DeviceError(self, 'Cannot change device serial number when there is service running.')\n    if self._debug_tag == self.serial:\n        self._debug_tag = new_serial\n    self._serial = new_serial\n    self.adb.serial = new_serial\n    self.fastboot.serial = new_serial", "docstring": "Updates the serial number of a device.\n\nThe \"serial number\" used with adb's `-s` arg is not necessarily the\nactual serial number. For remote devices, it could be a combination of\nhost names and port numbers.\n\nThis is used for when such identifier of remote devices changes during\na test. For example, when a remote device reboots, it may come back\nwith a different serial number.\n\nThis is NOT meant for switching the object to represent another device.\n\nWe intentionally did not make it a regular setter of the serial\nproperty so people don't accidentally call this without understanding\nthe consequences.\n\nArgs:\nnew_serial: string, the new serial number for the same device.\n\nRaises:\nDeviceError: tries to update serial when any service is running.", "source": "github-repos"}
{"code": "def word_score(word, input_letters, questions=0):\n    score = 0\n    bingo = 0\n    filled_by_blanks = []\n    rack = list(input_letters)\n    for letter in word:\n        if (letter in rack):\n            bingo += 1\n            score += letter_score(letter)\n            rack.remove(letter)\n        else:\n            filled_by_blanks.append(letter_score(letter))\n    for blank_score in sorted(filled_by_blanks, reverse=True):\n        if (questions > 0):\n            score += blank_score\n            questions -= 1\n    if (bingo > 6):\n        score += 50\n    return score", "docstring": "Checks the Scrabble score of a single word.\n\nArgs:\nword: a string to check the Scrabble score of\ninput_letters: the letters in our rack\nquestions: integer of the tiles already on the board to build on\n\nReturns:\nan integer Scrabble score amount for the word", "source": "codesearchnet"}
{"code": "def __init__(self, context, name, task_id=None):\n        \n        self.name = name\n        self.context = context\n        self.task_id = task_id or get_task_id(context.claim_task)\n        self.task = context.task\n        self.task_type = guess_task_type(name, self.task)\n        self.worker_impl = guess_worker_impl(self)  \n        self.decision_task_id = get_decision_task_id(self.task)\n        self.parent_task_id = get_parent_task_id(self.task)\n        self.links = []", "docstring": "Initialize ChainOfTrust.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context\nname (str): the name of the task (e.g., signing)\ntask_id (str, optional): the task_id of the task.  If None, use\n``get_task_id(context.claim_task)``.  Defaults to None.", "source": "juraj-google-style"}
{"code": "def uptime(ut, facter):\n    ut = ut\n    if (ut and ut.loadavg):\n        return Uptime(ut.currtime, ut.updays, ut.uphhmm, ut.users, ut.loadavg, ut.uptime)\n    ft = facter\n    if (ft and hasattr(ft, 'uptime_seconds')):\n        import datetime\n        secs = int(ft.uptime_seconds)\n        up_dd = (secs \n        up_hh = ((secs % (3600 * 24)) \n        up_mm = ((secs % 3600) \n        updays = (str(up_dd) if (up_dd > 0) else '')\n        uphhmm = ('%02d:%02d' % (up_hh, up_mm))\n        up_time = datetime.timedelta(seconds=secs)\n        return Uptime(None, updays, uphhmm, None, None, up_time)\n    raise Exception('Unable to get uptime information.')", "docstring": "Check uptime and facts to get the uptime information.\n\nPrefer uptime to facts.\n\nReturns:\ninsights.combiners.uptime.Uptime: A named tuple with `currtime`,\n`updays`, `uphhmm`, `users`, `loadavg` and `uptime` components.\n\nRaises:\nException: If no data is available from both of the parsers.", "source": "codesearchnet"}
{"code": "def write(self, data):\n    \n    start_time = time.time()\n    self._get_write_buffer().write(data)\n    ctx = context.get()\n    operation.counters.Increment(COUNTER_IO_WRITE_BYTES, len(data))(ctx)\n    operation.counters.Increment(\n        COUNTER_IO_WRITE_MSEC, int((time.time() - start_time) * 1000))(ctx)", "docstring": "Write data to the GoogleCloudStorage file.\n\nArgs:\ndata: string containing the data to be written.", "source": "juraj-google-style"}
{"code": "def resize_to(self, width, height):\n        \n\n        self.driver.resize_window_to(self.handle, width, height)", "docstring": "Resizes the window to the given dimensions.\n\nIf this method was called for a window that is not current, then after calling this method\nthe current window should remain the same as it was before calling this method.\n\nArgs:\nwidth (int): The new window width in pixels.\nheight (int): The new window height in pixels.", "source": "juraj-google-style"}
{"code": "def __call__(self, utterances_batch: list, utterances_ids: Optional[list] = None) -> list:\n        \n        responses_batch = self._call(utterances_batch, utterances_ids)\n\n        batch_size = len(utterances_batch)\n        ids = utterances_ids or list(range(batch_size))\n\n        for utt_batch_idx, utt_id in enumerate(ids):\n            self.history[utt_id].append(str(utterances_batch[utt_batch_idx]))\n            self.dialog_logger.log_in(utterances_batch[utt_batch_idx], utt_id)\n\n            self.history[utt_id].append(str(responses_batch[utt_batch_idx]))\n            self.dialog_logger.log_out(responses_batch[utt_batch_idx], utt_id)\n\n        return responses_batch", "docstring": "Wraps _call method and updates utterances history.\n\nArgs:\nutterances_batch: Batch of incoming utterances.\nutterances_ids: Batch of dialog IDs corresponding to incoming utterances.\n\nReturns:\nresponses: A batch of responses corresponding to the\nutterance batch received by agent.", "source": "juraj-google-style"}
{"code": "def _download_files(self, client, flow_id):\n    output_file_path = os.path.join(self.output_path, '.'.join((flow_id, 'zip')))\n    if os.path.exists(output_file_path):\n        print('{0:s} already exists: Skipping'.format(output_file_path))\n        return None\n    flow = client.Flow(flow_id)\n    file_archive = flow.GetFilesArchive()\n    file_archive.WriteToFile(output_file_path)\n    fqdn = client.data.os_info.fqdn.lower()\n    client_output_file = os.path.join(self.output_path, fqdn)\n    if (not os.path.isdir(client_output_file)):\n        os.makedirs(client_output_file)\n    with zipfile.ZipFile(output_file_path) as archive:\n        archive.extractall(path=client_output_file)\n    os.remove(output_file_path)\n    return client_output_file", "docstring": "Download files from the specified flow.\n\nArgs:\nclient: GRR Client object to which to download flow data from.\nflow_id: GRR flow ID.\n\nReturns:\nstr: path of downloaded files.", "source": "codesearchnet"}
{"code": "def chat_meMessage(self, *, channel: str, text: str, **kwargs) -> SlackResponse:\n        \n        kwargs.update({\"channel\": channel, \"text\": text})\n        return self.api_call(\"chat.meMessage\", json=kwargs)", "docstring": "Share a me message into a channel.\n\nArgs:\nchannel (str): The channel id. e.g. 'C1234567890'\ntext (str): The message you'd like to share. e.g. 'Hello world'", "source": "juraj-google-style"}
{"code": "def get_all(self, attrs: Iterable[FetchAttribute]) \\\n            -> Sequence[Tuple[FetchAttribute, MaybeBytes]]:\n        \n        ret: List[Tuple[FetchAttribute, MaybeBytes]] = []\n        for attr in attrs:\n            try:\n                ret.append((attr.for_response, self.get(attr)))\n            except NotFetchable:\n                pass\n        return ret", "docstring": "Return a list of tuples containing the attribute iself and the bytes\nrepresentation of that attribute from the message.\n\nArgs:\nattrs: The fetch attributes.", "source": "juraj-google-style"}
{"code": "def add_file(self, filename, file_content):\n        \n        self._group_data['fileName'] = filename\n        self._file_content = file_content", "docstring": "Add a file for Document and Report types.\n\nExample::\n\ndocument = tcex.batch.group('Document', 'My Document')\ndocument.add_file('my_file.txt', 'my contents')\n\nArgs:\nfilename (str): The name of the file.\nfile_content (bytes|method|str): The contents of the file or callback to get contents.", "source": "juraj-google-style"}
{"code": "def validate(export_formats):\n    for i in range(len(export_formats)):\n        export_formats[i] = export_formats[i].strip().lower()\n        if (export_formats[i] not in [ExportFormat.CHECKPOINT, ExportFormat.MODEL]):\n            raise TuneError(('Unsupported export format: ' + export_formats[i]))", "docstring": "Validates export_formats.\n\nRaises:\nValueError if the format is unknown.", "source": "codesearchnet"}
{"code": "def __getitem__(self, key):\n        \n        path = self.keypath(key)\n        if fs.exists(path):\n            return path\n        else:\n            raise KeyError(key)", "docstring": "Get path to file in cache.\n\nArguments:\nkey: Key.\n\nReturns:\nstr: Path to cache value.\n\nRaises:\nKeyErorr: If key not in cache.", "source": "juraj-google-style"}
{"code": "def ProcessListDirectory(self, responses):\n    \n    if not responses.success:\n      raise flow.FlowError(\"Unable to list directory.\")\n\n    with data_store.DB.GetMutationPool() as pool:\n      for response in responses:\n        stat_entry = rdf_client_fs.StatEntry(response)\n        filesystem.CreateAFF4Object(\n            stat_entry, self.client_urn, pool, token=self.token)\n        self.SendReply(stat_entry)", "docstring": "Processes the results of the ListDirectory client action.\n\nArgs:\nresponses: a flow Responses object.", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    page_header_map = self._GetDataTypeMap('dls_page_header')\n\n    try:\n      page_header, file_offset = self._ReadStructureFromFileObject(\n          file_object, 0, page_header_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.UnableToParseFile(\n          'Unable to parse page header with error: {0!s}'.format(\n              exception))\n\n    if page_header.signature not in self._DLS_SIGNATURES:\n      raise errors.UnableToParseFile('Invalid file signature')\n\n    current_page_end = page_header.page_size\n\n    file_entry = parser_mediator.GetFileEntry()\n    date_time = self._GetParentModificationTime(file_entry)\n    \n    \n    if date_time:\n      timestamp_description = definitions.TIME_DESCRIPTION_RECORDED\n    else:\n      date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n      timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME\n    event = time_events.DateTimeValuesEvent(date_time, timestamp_description)\n\n    file_size = file_object.get_size()\n    while file_offset < file_size:\n      if file_offset >= current_page_end:\n        try:\n          page_header, header_size = self._ParseDLSPageHeader(\n              file_object, file_offset)\n        except errors.ParseError as exception:\n          parser_mediator.ProduceExtractionWarning(\n              'Unable to parse page header with error: {0!s}'.format(\n                  exception))\n          break\n\n        current_page_end += page_header.page_size\n        file_offset += header_size\n        continue\n\n      if page_header.signature == self._DLS_V1_SIGNATURE:\n        record_map = self._GetDataTypeMap('dls_record_v1')\n      else:\n        record_map = self._GetDataTypeMap('dls_record_v2')\n\n      try:\n        record, record_length = self._ReadStructureFromFileObject(\n            file_object, file_offset, record_map)\n        file_offset += record_length\n      except (ValueError, errors.ParseError) as exception:\n        parser_mediator.ProduceExtractionWarning(\n            'Unable to parse page record with error: {0!s}'.format(\n                exception))\n        break\n\n      event_data = self._BuildEventData(record)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an fseventsd file.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the header cannot be parsed.", "source": "juraj-google-style"}
{"code": "def create_position_ids_from_inputs_embeds(self, inputs_embeds):\n    input_shape = inputs_embeds.size()[:-1]\n    sequence_length = input_shape[1]\n    position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device)\n    return position_ids.unsqueeze(0).expand(input_shape)", "docstring": "Args:\nWe are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.:\ninputs_embeds: torch.Tensor\nReturns: torch.Tensor", "source": "github-repos"}
{"code": "def min(x, axis=None, keepdims=False, initial=None):\n    if any_symbolic_tensors((x,)):\n        return Min(axis=axis, keepdims=keepdims, initial=initial).symbolic_call(x)\n    return backend.numpy.min(x, axis=axis, keepdims=keepdims, initial=initial)", "docstring": "Return the minimum of a tensor or minimum along an axis.\n\nArgs:\nx: Input tensor.\naxis: Axis or axes along which to operate. By default, flattened input\nis used.\nkeepdims: If this is set to `True`, the axes which are reduced are left\nin the result as dimensions with size one. Defaults to `False`.\ninitial: The maximum value of an output element. Defaults to `None`.\n\nReturns:\nMinimum of `x`.", "source": "github-repos"}
{"code": "def __init__(self, input_size: int, num_experts: int, top_k: int):\n    super().__init__()\n    self.num_experts = num_experts\n    self.input_size = input_size\n    self.top_k = top_k\n    self.layer = nn.Linear(input_size, num_experts, bias=False)", "docstring": "Initialize the top-k gating mechanism.\nArgs:\ninput_size (`int`):\nSize of the input.\nnum_experts (`int`):\nNumber of experts.\ntop_k (`int`):\nNumber of top experts to select.", "source": "github-repos"}
{"code": "def ToJsonString(self):\n    nanos = (self.nanos % _NANOS_PER_SECOND)\n    total_sec = (self.seconds + ((self.nanos - nanos) \n    seconds = (total_sec % _SECONDS_PER_DAY)\n    days = ((total_sec - seconds) \n    dt = (datetime(1970, 1, 1) + timedelta(days, seconds))\n    result = dt.isoformat()\n    if ((nanos % 1000000000.0) == 0):\n        return (result + 'Z')\n    if ((nanos % 1000000.0) == 0):\n        return (result + ('.%03dZ' % (nanos / 1000000.0)))\n    if ((nanos % 1000.0) == 0):\n        return (result + ('.%06dZ' % (nanos / 1000.0)))\n    return (result + ('.%09dZ' % nanos))", "docstring": "Converts Timestamp to RFC 3339 date string format.\n\nReturns:\nA string converted from timestamp. The string is always Z-normalized\nand uses 3, 6 or 9 fractional digits as required to represent the\nexact time. Example of the return format: '1972-01-01T10:00:20.021Z'", "source": "codesearchnet"}
{"code": "def add_layer_timing_signal_sinusoid_1d(x, layer, num_layers):\n  \n\n  channels = common_layers.shape_list(x)[-1]\n  signal = get_layer_timing_signal_sinusoid_1d(channels, layer, num_layers)\n\n  return x + signal", "docstring": "Add sinusoids of different frequencies as layer (vertical) timing signal.\n\nArgs:\nx: a Tensor with shape [batch, length, channels]\nlayer: layer num\nnum_layers: total number of layers\n\nReturns:\na Tensor the same shape as x.", "source": "juraj-google-style"}
{"code": "def discover(package, cls_match_func):\n    matched_classes = set()\n    for (_, module_name, _) in pkgutil.walk_packages(package.__path__, prefix=(package.__name__ + '.')):\n        module = __import__(module_name, fromlist=[str('__trash')], level=0)\n        for (_, imported_class) in inspect.getmembers(module, inspect.isclass):\n            if (imported_class.__module__ != module.__name__):\n                continue\n            if cls_match_func(imported_class):\n                matched_classes.add(imported_class)\n    return matched_classes", "docstring": "Returns a set of classes in the directory matched by cls_match_func\n\nArgs:\npath - A Python package\ncls_match_func - Function taking a class and returning true if the\nclass is to be included in the output.", "source": "codesearchnet"}
{"code": "def plot_chmap(cube, kidid, ax=None, **kwargs):\n    \n    if ax is None:\n        ax = plt.gca()\n\n    index = np.where(cube.kidid == kidid)[0]\n    if len(index) == 0:\n        raise KeyError('Such a kidid does not exist.')\n    index = int(index)\n\n    im = ax.pcolormesh(cube.x, cube.y, cube[:, :, index].T, **kwargs)\n    ax.set_xlabel('x')\n    ax.set_ylabel('y')\n    ax.set_title('intensity map ch \n    return im", "docstring": "Plot an intensity map.\n\nArgs:\ncube (xarray.DataArray): Cube which the spectrum information is included.\nkidid (int): Kidid.\nax (matplotlib.axes): Axis the figure is plotted on.\nkwargs (optional): Plot options passed to ax.imshow().", "source": "juraj-google-style"}
{"code": "def get_roaster_state(self):\n    value = self._current_state.value\n    if (value == b'\\x02\\x01'):\n        return 'idle'\n    elif (value == b'\\x04\\x04'):\n        return 'cooling'\n    elif (value == b'\\x08\\x01'):\n        return 'sleeping'\n    elif ((value == b'\\x00\\x00') or (value == b'')):\n        return 'connecting'\n    elif (value == b'\\x04\\x02'):\n        return 'roasting'\n    else:\n        return 'unknown'", "docstring": "Returns a string based upon the current state of the roaster. Will\nraise an exception if the state is unknown.\n\nReturns:\n'idle' if idle,\n'sleeping' if sleeping,\n'cooling' if cooling,\n'roasting' if roasting,\n'connecting' if in hardware connection phase,\n'unknown' otherwise", "source": "codesearchnet"}
{"code": "def update_dynamic_gene_list(self, case, hgnc_symbols=None, hgnc_ids=None,\n                                 phenotype_ids=None, build='37'):\n        \n        dynamic_gene_list = []\n        res = []\n        if hgnc_ids:\n            LOG.info(\"Fetching genes by hgnc id\")\n            res = self.hgnc_collection.find({'hgnc_id': {'$in': hgnc_ids}, 'build': build})\n        elif hgnc_symbols:\n            LOG.info(\"Fetching genes by hgnc symbols\")\n            res = []\n            for symbol in hgnc_symbols:\n                for gene_obj in self.gene_by_alias(symbol=symbol, build=build):\n                    res.append(gene_obj)\n\n        for gene_obj in res:\n            dynamic_gene_list.append(\n                {\n                    'hgnc_symbol': gene_obj['hgnc_symbol'],\n                    'hgnc_id': gene_obj['hgnc_id'],\n                    'description': gene_obj['description'],\n                }\n            )\n\n        LOG.info(\"Update dynamic gene panel for: %s\", case['display_name'])\n        updated_case = self.case_collection.find_one_and_update(\n            {'_id': case['_id']},\n            {'$set': {'dynamic_gene_list': dynamic_gene_list,\n                      'dynamic_panel_phenotypes': phenotype_ids or []}},\n            return_document=pymongo.ReturnDocument.AFTER\n        )\n        LOG.debug(\"Case updated\")\n        return updated_case", "docstring": "Update the dynamic gene list for a case\n\nAdds a list of dictionaries to case['dynamic_gene_list'] that looks like\n\n{\nhgnc_symbol: str,\nhgnc_id: int,\ndescription: str\n}\n\nArguments:\ncase (dict): The case that should be updated\nhgnc_symbols (iterable): A list of hgnc_symbols\nhgnc_ids (iterable): A list of hgnc_ids\n\nReturns:\nupdated_case(dict)", "source": "juraj-google-style"}
{"code": "def get_batch(self):\n    params = [self._batch_size, self._num_objects, self._num_features]\n    (inputs, labels) = tf.py_func(self._get_batch_data, params, [tf.float32, tf.float32])\n    inputs = tf.reshape(inputs, [self._batch_size, self._num_objects, (self._num_features + (self._num_objects * 3))])\n    labels = tf.reshape(labels, [(- 1)])\n    return (inputs, labels)", "docstring": "Returns set of nth-farthest input tensors and labels.\n\nReturns:\n1. tf.Tensor (`batch_size`, `num_objects`,\n(`num_features` + 3 * `num_objects`)).\n2. tf.Tensor (`batch_size`). Output object reference label.", "source": "codesearchnet"}
{"code": "def to_json(self, with_volumes=True):\n    data = super().to_json()\n    if with_volumes:\n        data['volumes'] = [{'volumeId': vol.id, 'volumeType': vol.volume_type, 'size': vol.size} for vol in self.volumes]\n    return data", "docstring": "Augment the base `to_json` function, adding information about volumes\n\nReturns:\n`dict`", "source": "codesearchnet"}
{"code": "def Get(self, request, global_params=None):\n    config = self.GetMethodConfig('Get')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Retrieve a `BitbucketServerConfig`. This API is experimental.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsBitbucketServerConfigsGetRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(BitbucketServerConfig) The response message.", "source": "github-repos"}
{"code": "def format_level_2_memory(memory, header=None):\n    \n    memory_list = []\n    for shot_memory in memory:\n        memory_list.append(format_counts_memory(shot_memory, header))\n    return memory_list", "docstring": "Format an experiment result memory object for measurement level 2.\n\nArgs:\nmemory (list): Memory from experiment with `meas_level==2` and `memory==True`.\nheader (dict): the experiment header dictionary containing\nuseful information for postprocessing.\n\nReturns:\nlist[str]: List of bitstrings", "source": "juraj-google-style"}
{"code": "def relative_humidity(self, value=999):\n    if (value is not None):\n        try:\n            value = int(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type int for field `relative_humidity`'.format(value))\n        if (value < 0):\n            raise ValueError('value need to be greater or equal 0 for field `relative_humidity`')\n        if (value > 110):\n            raise ValueError('value need to be smaller 110 for field `relative_humidity`')\n    self._relative_humidity = value", "docstring": "Corresponds to IDD Field `relative_humidity`\n\nArgs:\nvalue (int): value for IDD Field `relative_humidity`\nvalue >= 0\nvalue <= 110\nMissing value: 999\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def get_dc_keywords(index_page):\n    keyword_lists = (keyword_list.split() for keyword_list in parse_meta(index_page, 'dc.keywords', 'DC'))\n    return [SourceString(keyword, source='DC') for keyword in sum(keyword_lists, [])]", "docstring": "Return list of `keywords` parsed from Dublin core.\n\nArgs:\nindex_page (str): Content of the page as UTF-8 string\n\nReturns:\nlist: List of :class:`.SourceString` objects.", "source": "codesearchnet"}
{"code": "def convert(self, point):\n        \n        x, y = point\n        (x1, y1) = x - self.x_offset, y - self.y_offset\n        logger.debug(\"converted {} {} ==> {} {}\".format(x, y, x1, y1))\n        return x1, y1", "docstring": "Convert a point from one coordinate system to another.\n\nArgs:\npoint: tuple(int x, int y)\nThe point in the original coordinate system.\n\nReturns:\nconverted_point: tuple(int x, int y)\nThe point in the new coordinate system.\n\nExample: convert coordinate from original image into a pixel location\nwithin a cutout image.\n\n@rtype: list(float,float)", "source": "juraj-google-style"}
{"code": "def _event_size(event_shape, name=None):\n  \n  with tf.compat.v1.name_scope(name, 'event_size', [event_shape]):\n    event_shape = tf.convert_to_tensor(\n        value=event_shape, dtype=tf.int32, name='event_shape')\n\n    event_shape_const = tf.get_static_value(event_shape)\n    if event_shape_const is not None:\n      return np.prod(event_shape_const)\n    else:\n      return tf.reduce_prod(input_tensor=event_shape)", "docstring": "Computes the number of elements in a tensor with shape `event_shape`.\n\nArgs:\nevent_shape: A tensor shape.\nname: The name to use for the tensor op to compute the number of elements\n(if such an op needs to be created).\n\nReturns:\nevent_size: The number of elements in `tensor_shape`.  Returns a numpy int\nwhen the number of elements can be computed immediately.  Otherwise, returns\na scalar tensor.", "source": "juraj-google-style"}
{"code": "def from_row_lengths(cls, row_lengths, validate=True, dtype=None, dtype_hint=None):\n    if not isinstance(validate, bool):\n        raise TypeError('validate must have type bool')\n    with ops.name_scope(None, 'RowPartitionFromRowLengths', [row_lengths]):\n        row_lengths = cls._convert_row_partition(row_lengths, 'row_lengths', dtype_hint=dtype_hint, dtype=dtype)\n        row_lengths.shape.assert_has_rank(1)\n        if validate:\n            msg = 'Arguments to from_row_lengths do not form a valid RowPartition'\n            checks = [check_ops.assert_rank(row_lengths, 1, message=msg), check_ops.assert_non_negative(row_lengths, message=msg)]\n            row_lengths = control_flow_ops.with_dependencies(checks, row_lengths)\n        row_limits = math_ops.cumsum(row_lengths)\n        row_splits = array_ops.concat([[0], row_limits], axis=0)\n        return cls(row_splits=row_splits, row_lengths=row_lengths, internal=_row_partition_factory_key)", "docstring": "Creates a `RowPartition` with rows partitioned by `row_lengths`.\n\nThis `RowPartition` divides a sequence `values` into rows by indicating\nthe length of each row:\n\n```python\npartitioned_rows = [[values.pop(0) for _ in range(length)]\nfor length in row_lengths]\n```\n\nArgs:\nrow_lengths: A 1-D integer tensor with shape `[nrows]`.  Must be\nnonnegative.\nvalidate: If true, then use assertions to check that the arguments form a\nvalid `RowPartition`.\n\ndtype: Optional dtype for the RowPartition. If missing, the type\nis inferred from the type of `row_lengths`, dtype_hint, or tf.int64.\ndtype_hint: Optional dtype for the RowPartition, used when dtype\nis None. In some cases, a caller may not have a dtype in mind when\nconverting to a tensor, so dtype_hint can be used as a soft preference.\nIf the conversion to `dtype_hint` is not possible, this argument has no\neffect.\n\nReturns:\nA `RowPartition`.", "source": "github-repos"}
{"code": "def freeze_graph(session, outputs):\n    return convert_to_constants.convert_variables_to_constants(session, session.graph.as_graph_def(), [x.op.name for x in outputs])", "docstring": "Freeze the current graph.\n\nArgs:\nsession: Tensorflow sessions containing the graph\noutputs: List of output tensors\n\nReturns:\nThe frozen graph_def.", "source": "github-repos"}
{"code": "def resolves_for(self, session):\n    if self.url:\n        self.actual_path = session.current_url\n    else:\n        result = urlparse(session.current_url)\n        if self.only_path:\n            self.actual_path = result.path\n        else:\n            request_uri = result.path\n            if result.query:\n                request_uri += '?{0}'.format(result.query)\n            self.actual_path = request_uri\n    if isregex(self.expected_path):\n        return self.expected_path.search(self.actual_path)\n    else:\n        return (normalize_url(self.actual_path) == normalize_url(self.expected_path))", "docstring": "Returns whether this query resolves for the given session.\n\nArgs:\nsession (Session): The session for which this query should be executed.\n\nReturns:\nbool: Whether this query resolves.", "source": "codesearchnet"}
{"code": "def replace(self, **kwargs):\n    clone = copy(self)\n    clone.transforms = list(clone.transforms)\n    for (key, value) in kwargs.items():\n        if (not hasattr(clone, key)):\n            raise TypeError(u'replace() got an unexpected keyword argument {!r}'.format(key))\n        setattr(clone, key, value)\n    return clone", "docstring": "Return a copy of this `Query`, but with attributes specified\nas keyword arguments replaced by the keyword values.\n\nKeyword Args:\nAttributes/values to replace in the copy.\n\nReturns:\nA copy of the query that has its attributes updated with the specified values.\n\nRaises:\nTypeError: The `Query` does not have the specified attribute.", "source": "codesearchnet"}
{"code": "def GetFileObject(self, data_stream_name=''):\n    \n    if data_stream_name:\n      return None\n\n    return resolver.Resolver.OpenFileObject(\n        self.path_spec, resolver_context=self._resolver_context)", "docstring": "Retrieves the file-like object.\n\nArgs:\ndata_stream_name (Optional[str]): name of the data stream, where an empty\nstring represents the default data stream.\n\nReturns:\nFileIO: a file-like object or None if not available.", "source": "juraj-google-style"}
{"code": "def get_user_info(self):\n    resp = self.requester.get(urljoin(self.base_url, '/api/mobile/v0.5/my_user_info'))\n    resp.raise_for_status()\n    return Info(resp.json())", "docstring": "Returns a UserInfo object for the logged in user.\n\nReturns:\nUserInfo: object representing the student current grades", "source": "codesearchnet"}
{"code": "def add_config(self, slot, config_id, config_type, value):\n    if (slot not in self.config_database):\n        self.config_database[slot] = {}\n    self.config_database[slot][config_id] = (config_type, value)", "docstring": "Add a config variable assignment to this sensor graph.\n\nArgs:\nslot (SlotIdentifier): The slot identifier that this config\nvariable is assigned to.\nconfig_id (int): The 16-bit id of this config_id\nconfig_type (str): The type of the config variable, currently\nsupported are fixed width integer types, strings and binary\nblobs.\nvalue (str|int|bytes): The value to assign to the config variable.", "source": "codesearchnet"}
{"code": "class _EmbeddingHandler(ModelHandler):\n\n    def __init__(self, embeddings_manager: EmbeddingsManager):\n        self.embedding_config = embeddings_manager\n        self._underlying = self.embedding_config.get_model_handler()\n        self.columns = self.embedding_config.get_columns_to_apply()\n\n    def load_model(self):\n        model = self._underlying.load_model()\n        return model\n\n    def _validate_column_data(self, batch):\n        pass\n\n    def run_inference(self, batch: Sequence[dict[str, list[str]]], model: ModelT, inference_args: Optional[dict[str, Any]]=None) -> list[dict[str, Union[list[float], list[str]]]]:\n        \n        embedding_input = self.embedding_config.type_adapter.input_fn(batch)\n        self._validate_column_data(batch=embedding_input)\n        prediction = self._underlying.run_inference(embedding_input, model, inference_args)\n        if isinstance(prediction, np.ndarray):\n            prediction_seq = prediction.tolist()\n        elif isinstance(prediction, Iterable) and (not isinstance(prediction, (str, bytes))):\n            prediction_seq = list(prediction)\n        else:\n            prediction_seq = [prediction]\n        return self.embedding_config.type_adapter.output_fn(batch, prediction_seq)\n\n    def get_metrics_namespace(self) -> str:\n        return self._underlying.get_metrics_namespace() or 'BeamML_EmbeddingHandler'\n\n    def batch_elements_kwargs(self) -> Mapping[str, Any]:\n        batch_sizes_map = {}\n        if self.embedding_config.max_batch_size:\n            batch_sizes_map['max_batch_size'] = self.embedding_config.max_batch_size\n        if self.embedding_config.min_batch_size:\n            batch_sizes_map['min_batch_size'] = self.embedding_config.min_batch_size\n        return self._underlying.batch_elements_kwargs() or batch_sizes_map\n\n    def __repr__(self):\n        return self._underlying.__repr__()\n\n    def validate_inference_args(self, _):\n        pass", "docstring": "A ModelHandler intended to be work on list[dict[str, Any]] inputs.\n\nThe inputs to the model handler are expected to be a list of dicts.\n\nFor example, if the original mode is used with RunInference to take a\nPCollection[E] to a PCollection[P], this ModelHandler would take a\nPCollection[dict[str, E]] to a PCollection[dict[str, P]].\n\n_EmbeddingHandler will accept an EmbeddingsManager instance, which\ncontains the details of the model to be loaded and the inference_fn to be\nused. The purpose of _EmbeddingHandler is to generate embeddings for\ngeneral inputs using the EmbeddingsManager instance.\n\nThis is an internal class and offers no backwards compatibility guarantees.\n\nArgs:\nembeddings_manager: An EmbeddingsManager instance.", "source": "github-repos"}
{"code": "def get_global_namespace(decls):\n    \n    found = [\n        decl for decl in scopedef.make_flatten(decls) if decl.name == '::' and\n        isinstance(decl, namespace_t)]\n    if len(found) == 1:\n        return found[0]\n    raise RuntimeError(\"Unable to find global namespace.\")", "docstring": "Get the global namespace (::) from a declaration tree.\n\nArgs:\ndecls (list[declaration_t]): a list of declarations\n\nReturns:\nnamespace_t: the global namespace_t object (::)", "source": "juraj-google-style"}
{"code": "def parse_options(cls, options):\n        \n        d = {}\n        for filename_check, dictionary in cls.filename_checks.items():\n            \n            filename_data = getattr(options, filename_check)\n            if len(filename_data) != 0:\n                parsed_params = {}\n                for single_line in filename_data:\n                    a = [s.strip() for s in single_line.split('=')]\n                    \n                    if a[0] in ['filter_regex', 'filename_regex']:\n                        parsed_params[a[0]] = a[1]\n                d[filename_check] = parsed_params\n        cls.filename_checks.update(d)\n        \n        cls.filename_checks = {x: y for x, y in cls.filename_checks.items() if len(y) > 0}", "docstring": "Required by flake8\nparse the options, called after add_options\n\nArgs:\noptions (dict): options to be parsed", "source": "juraj-google-style"}
{"code": "def to_value_list(original_strings, corenlp_values=None):\n    assert isinstance(original_strings, (list, tuple, set))\n    if (corenlp_values is not None):\n        assert isinstance(corenlp_values, (list, tuple, set))\n        assert (len(original_strings) == len(corenlp_values))\n        return list(set((to_value(x, y) for (x, y) in zip(original_strings, corenlp_values))))\n    else:\n        return list(set((to_value(x) for x in original_strings)))", "docstring": "Convert a list of strings to a list of Values\n\nArgs:\noriginal_strings (list[basestring])\ncorenlp_values (list[basestring or None])\nReturns:\nlist[Value]", "source": "codesearchnet"}
{"code": "def list_files_by_mtime(dirpath):\n    files = [f for f in os.listdir(dirpath) if is_real_file(dirpath, f)]\n    return sorted(files, key=lambda f: get_mtime(dirpath, f))", "docstring": "Return a list of files in the directory, sorted in increasing \"mtime\".\n\nReturn a list of files in the given directory, sorted from older to newer file\naccording to their modification times.  Only return actual files, skipping\ndirectories, symbolic links, pipes, etc.\n\nArgs:\ndirpath: directory pathname\n\nReturns:\nA list of file names relative to the given directory path.", "source": "github-repos"}
{"code": "def __init__(self, value: Any, compute_derived: bool=False, where: Optional[Callable[[base.HyperPrimitive], bool]]=None):\n    super().__init__()\n    self._value = value\n    self._root_path = utils.KeyPath()\n    self._compute_derived = compute_derived\n    self._where = where\n    self._parse_generators()", "docstring": "Constructor.\n\nArgs:\nvalue: Value (maybe) annotated with generators to use as template.\ncompute_derived: Whether to compute derived value at this level.\nWe only want to compute derived value at root level since reference path\nmay go out of scope of a non-root ObjectTemplate.\nwhere: Function to filter hyper primitives. If None, all hyper primitives\nfrom `value` will be included in the encoding/decoding process.\nOtherwise only the hyper primitives on which 'where' returns True will\nbe included. `where` can be useful to partition a search space into\nseparate optimization processes.\nPlease see 'ObjectTemplate' docstr for details.", "source": "github-repos"}
{"code": "def autorotate(image, orientation=None):\n    \n    orientation_value = orientation if orientation else \\\n        image._getexif().get(EXIF_KEYS.get('Orientation'))\n    if orientation_value is None:\n        raise ImDirectException(\"No orientation available in Exif \"\n                                \"tag or given explicitly.\")\n\n    if orientation_value in (1, 2):\n        i = image\n    elif orientation_value in (3, 4):\n        i = image.transpose(Image.ROTATE_180)\n    elif orientation_value in (5, 6):\n        i = image.transpose(Image.ROTATE_270)\n    elif orientation_value in (7, 8):\n        i = image.transpose(Image.ROTATE_90)\n    else:\n        i = image\n\n    if orientation_value in (2, 4, 5, 7):\n        i = i.transpose(Image.FLIP_LEFT_RIGHT)\n\n    return i", "docstring": "Rotate and return an image according to its Exif information.\n\nROTATION_NEEDED = {\n1: 0,\n2: 0 (Mirrored),\n3: 180,\n4: 180 (Mirrored),\n5: -90 (Mirrored),\n6: -90,\n7: 90 (Mirrored),\n8: 90,\n}\n\nArgs:\nimage (PIL.Image.Image): PIL image to rotate\norientation (): Optional orientation value in [1, 8]\n\nReturns:\nA :py:class:`~PIL.Image.Image` image.", "source": "juraj-google-style"}
{"code": "def _tensor_product(self, other, reverse=False):\n        \n        if not isinstance(other, Chi):\n            other = Chi(other)\n        if reverse:\n            input_dims = self.input_dims() + other.input_dims()\n            output_dims = self.output_dims() + other.output_dims()\n            data = np.kron(other.data, self._data)\n        else:\n            input_dims = other.input_dims() + self.input_dims()\n            output_dims = other.output_dims() + self.output_dims()\n            data = np.kron(self._data, other.data)\n        return Chi(data, input_dims, output_dims)", "docstring": "Return the tensor product channel.\n\nArgs:\nother (QuantumChannel): a quantum channel.\nreverse (bool): If False return self ⊗ other, if True return\nif True return (other ⊗ self) [Default: False\nReturns:\nChi: the tensor product channel as a Chi object.\n\nRaises:\nQiskitError: if other is not a QuantumChannel subclass.", "source": "juraj-google-style"}
{"code": "def _ConvertInputMapValues(name, input_map):\n    if not all((isinstance(v, tensor.Tensor) for v in input_map.values())):\n        if name == '':\n            raise ValueError('tf.import_graph_def() requires a non-empty `name` if `input_map` contains non-Tensor values. Try calling tf.convert_to_tensor() on `input_map` values before calling tf.import_graph_def().')\n        with ops.name_scope('_inputs'):\n            input_map = {k: ops.convert_to_tensor(v) for k, v in input_map.items()}\n    return input_map", "docstring": "Ensures all input map values are tensors.\n\nThis should be called from inside the import name scope.\n\nArgs:\nname: the `name` argument passed to import_graph_def\ninput_map: the `input_map` argument passed to import_graph_def.\n\nReturns:\nAn possibly-updated version of `input_map`.\n\nRaises:\nValueError: if input map values cannot be converted due to empty name scope.", "source": "github-repos"}
{"code": "def acquire_multi(self, n=1):\n    browsers = []\n    with self._lock:\n        if (len(self._in_use) >= self.size):\n            raise NoBrowsersAvailable\n        while ((len(self._in_use) < self.size) and (len(browsers) < n)):\n            browser = self._fresh_browser()\n            browsers.append(browser)\n            self._in_use.add(browser)\n    return browsers", "docstring": "Returns a list of up to `n` browsers.\n\nRaises:\nNoBrowsersAvailable if none available", "source": "codesearchnet"}
{"code": "def execute_work_items(work_items, config):\n    return celery.group((worker_task.s(work_item, config) for work_item in work_items))", "docstring": "Execute a suite of tests for a given set of work items.\n\nArgs:\nwork_items: An iterable of `work_db.WorkItem`s.\nconfig: The configuration to use for the test execution.\n\nReturns: An iterable of WorkItems.", "source": "codesearchnet"}
{"code": "def is_user_profile_valid(user_profile):\n  \n\n  if not user_profile:\n    return False\n\n  if not type(user_profile) is dict:\n    return False\n\n  if UserProfile.USER_ID_KEY not in user_profile:\n    return False\n\n  if UserProfile.EXPERIMENT_BUCKET_MAP_KEY not in user_profile:\n    return False\n\n  experiment_bucket_map = user_profile.get(UserProfile.EXPERIMENT_BUCKET_MAP_KEY)\n  if not type(experiment_bucket_map) is dict:\n    return False\n\n  for decision in experiment_bucket_map.values():\n    if type(decision) is not dict or UserProfile.VARIATION_ID_KEY not in decision:\n      return False\n\n  return True", "docstring": "Determine if provided user profile is valid or not.\n\nArgs:\nuser_profile: User's profile which needs to be validated.\n\nReturns:\nBoolean depending upon whether profile is valid or not.", "source": "juraj-google-style"}
{"code": "def __init__(self, max_size=-1, client_timeout=-1, autoclose=False,\n                 **client_kwargs):\n        \n        self.max_size = max_size\n        self.client_timeout = client_timeout\n        self.client_kwargs = client_kwargs\n        self.__ioloop = client_kwargs.get('ioloop',\n                                          tornado.ioloop.IOLoop.instance())\n        self.autoclose = autoclose\n        self.__pool = deque()\n        if self.max_size != -1:\n            self.__sem = tornado.locks.Semaphore(self.max_size)\n        else:\n            self.__sem = None\n        self.__autoclose_periodic = None\n        if self.autoclose and self.client_timeout > 0:\n            every = int(self.client_timeout) * 100\n            if int(tornado.version[0]) >= 5:\n                cb = tornado.ioloop.PeriodicCallback(self._autoclose,\n                                                     every)\n            else:\n                cb = tornado.ioloop.PeriodicCallback(self._autoclose,\n                                                     every, self.__ioloop)\n            self.__autoclose_periodic = cb\n            self.__autoclose_periodic.start()", "docstring": "Constructor.\n\nArgs:\nmax_size (int): max size of the pool (-1 means \"no limit\").\nclient_timeout (int): timeout in seconds of a connection released\nto the pool (-1 means \"no timeout\").\nautoclose (boolean): automatically disconnect released connections\nwith lifetime > client_timeout (test made every\nclient_timeout/10 seconds).\nclient_kwargs (dict): Client constructor arguments.", "source": "juraj-google-style"}
{"code": "def ccy_pair(local, base='USD') -> CurrencyPair:\n    ccy_param = param.load_info(cat='ccy')\n    if (f'{local}{base}' in ccy_param):\n        info = ccy_param[f'{local}{base}']\n    elif (f'{base}{local}' in ccy_param):\n        info = ccy_param[f'{base}{local}']\n        info['factor'] = (1.0 / info.get('factor', 1.0))\n        info['power'] = (- info.get('power', 1))\n    elif (base.lower() == local.lower()):\n        info = dict(ticker='')\n        info['factor'] = 1.0\n        if (base[(- 1)].lower() == base[(- 1)]):\n            info['factor'] /= 100.0\n        if (local[(- 1)].lower() == local[(- 1)]):\n            info['factor'] *= 100.0\n    else:\n        logger = logs.get_logger(ccy_pair)\n        logger.error(f'incorrect currency - local {local} / base {base}')\n        return CurrencyPair(ticker='', factor=1.0, power=1)\n    if ('factor' not in info):\n        info['factor'] = 1.0\n    if ('power' not in info):\n        info['power'] = 1\n    return CurrencyPair(**info)", "docstring": "Currency pair info\n\nArgs:\nlocal: local currency\nbase: base currency\n\nReturns:\nCurrencyPair\n\nExamples:\n>>> ccy_pair(local='HKD', base='USD')\nCurrencyPair(ticker='HKD Curncy', factor=1.0, power=1)\n>>> ccy_pair(local='GBp')\nCurrencyPair(ticker='GBP Curncy', factor=100, power=-1)\n>>> ccy_pair(local='USD', base='GBp')\nCurrencyPair(ticker='GBP Curncy', factor=0.01, power=1)\n>>> ccy_pair(local='XYZ', base='USD')\nCurrencyPair(ticker='', factor=1.0, power=1)\n>>> ccy_pair(local='GBP', base='GBp')\nCurrencyPair(ticker='', factor=0.01, power=1)\n>>> ccy_pair(local='GBp', base='GBP')\nCurrencyPair(ticker='', factor=100.0, power=1)", "source": "codesearchnet"}
{"code": "def recipe_dcm(config, auth_read, account, body, delete):\n    dcm(config, {'auth': auth_read, 'report': {'account': account, 'body': body}, 'delete': delete})", "docstring": "Create a CM report from a JSON definition.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\naccount (string) - NA\nbody (json) - NA\ndelete (boolean) - NA", "source": "github-repos"}
{"code": "def __init__(self, loss_tensor, fail_on_nan_loss=True):\n    self._loss_tensor = loss_tensor\n    self._fail_on_nan_loss = fail_on_nan_loss", "docstring": "Initializes a `NanTensorHook`.\n\nArgs:\nloss_tensor: `Tensor`, the loss tensor.\nfail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.", "source": "github-repos"}
{"code": "def _global_report_benchmark(name, iters=None, cpu_time=None, wall_time=None, throughput=None, extras=None, metrics=None):\n    logging.info('Benchmark [%s] iters: %d, wall_time: %g, cpu_time: %g,throughput: %g, extras: %s, metrics: %s', name, iters if iters is not None else -1, wall_time if wall_time is not None else -1, cpu_time if cpu_time is not None else -1, throughput if throughput is not None else -1, str(extras) if extras else 'None', str(metrics) if metrics else 'None')\n    entries = test_log_pb2.BenchmarkEntries()\n    entry = entries.entry.add()\n    entry.name = name\n    if iters is not None:\n        entry.iters = iters\n    if cpu_time is not None:\n        entry.cpu_time = cpu_time\n    if wall_time is not None:\n        entry.wall_time = wall_time\n    if throughput is not None:\n        entry.throughput = throughput\n    if extras is not None:\n        if not isinstance(extras, dict):\n            raise TypeError('extras must be a dict')\n        for k, v in extras.items():\n            if isinstance(v, numbers.Number):\n                entry.extras[k].double_value = v\n            else:\n                entry.extras[k].string_value = str(v)\n    if metrics is not None:\n        if not isinstance(metrics, list):\n            raise TypeError('metrics must be a list')\n        for metric in metrics:\n            if 'name' not in metric:\n                raise TypeError(\"metric must has a 'name' field\")\n            if 'value' not in metric:\n                raise TypeError(\"metric must has a 'value' field\")\n            metric_entry = entry.metrics.add()\n            metric_entry.name = metric['name']\n            metric_entry.value = metric['value']\n            if 'min_value' in metric:\n                metric_entry.min_value.value = metric['min_value']\n            if 'max_value' in metric:\n                metric_entry.max_value.value = metric['max_value']\n    test_env = os.environ.get(TEST_REPORTER_TEST_ENV, None)\n    if test_env is None:\n        print(str(entries))\n        return\n    serialized_entry = entries.SerializeToString()\n    mangled_name = name.replace('/', '__')\n    output_path = '%s%s' % (test_env, mangled_name)\n    if gfile.Exists(output_path):\n        raise IOError('File already exists: %s' % output_path)\n    with gfile.GFile(output_path, 'wb') as out:\n        out.write(serialized_entry)", "docstring": "Method for recording a benchmark directly.\n\nArgs:\nname: The BenchmarkEntry name.\niters: (optional) How many iterations were run\ncpu_time: (optional) Total cpu time in seconds\nwall_time: (optional) Total wall time in seconds\nthroughput: (optional) Throughput (in MB/s)\nextras: (optional) Dict mapping string keys to additional benchmark info.\nmetrics: (optional) A list of dict representing metrics generated by the\nbenchmark. Each dict should contain keys 'name' and'value'. A dict\ncan optionally contain keys 'min_value' and 'max_value'.\n\nRaises:\nTypeError: if extras is not a dict.\nIOError: if the benchmark output file already exists.", "source": "github-repos"}
{"code": "def output(self, filename):\n        \n\n        if not filename.endswith('.dot'):\n            filename += '.dot'\n        if filename == \".dot\":\n            filename = \"all_contracts.dot\"\n\n        with open(filename, 'w', encoding='utf8') as f:\n            self.info(f'Call Graph: {filename}')\n            f.write('\\n'.join(['strict digraph {'] + [self._process_functions(self.slither.functions)] +  ['}']))\n\n\n        for derived_contract in self.slither.contracts_derived:\n            with open(f'{derived_contract.name}.dot', 'w', encoding='utf8') as f:\n                self.info(f'Call Graph: {derived_contract.name}.dot')\n                f.write('\\n'.join(['strict digraph {'] + [self._process_functions(derived_contract.functions)] +  ['}']))", "docstring": "Output the graph in filename\nArgs:\nfilename(string)", "source": "juraj-google-style"}
{"code": "def _inquire(self, **kwargs):\n    if (rname_rfc6680 is None):\n        raise NotImplementedError('Your GSSAPI implementation does not support RFC 6680 (the GSSAPI naming extensions)')\n    if (not kwargs):\n        default_val = True\n    else:\n        default_val = False\n    attrs = kwargs.get('attrs', default_val)\n    mech_name = kwargs.get('mech_name', default_val)\n    return rname_rfc6680.inquire_name(self, mech_name=mech_name, attrs=attrs)", "docstring": "Inspect this name for information.\n\nThis method inspects the name for information.\n\nIf no keyword arguments are passed, all available information\nis returned.  Otherwise, only the keyword arguments that\nare passed and set to `True` are returned.\n\nArgs:\nmech_name (bool): get whether this is a mechanism name,\nand, if so, the associated mechanism\nattrs (bool): get the attributes names for this name\n\nReturns:\nInquireNameResult: the results of the inquiry, with unused\nfields set to None\n\nRaises:\nGSSError", "source": "codesearchnet"}
{"code": "def get_memory_region(x, query_block_shape, memory_flange, q_indices):\n  \n  \n  \n  x_query_padded = pad_to_multiple_2d(x, query_block_shape)\n  x_center = gather_blocks_2d(x_query_padded, q_indices)\n  \n  paddings = [[0, 0], [0, 0], [memory_flange[0], 0],\n              [memory_flange[1], memory_flange[1]], [0, 0]]\n  x_memory_padded = tf.pad(x_query_padded, paddings)\n  left_x = None\n  top_x = None\n  \n  \n  \n  \n  if memory_flange[1] > 0:\n    left_x_region = x_memory_padded[:, :, memory_flange[\n        0]:, :-(query_block_shape[1] + memory_flange[1]), :]\n    left_memory_shape = (query_block_shape[0], memory_flange[1])\n    left_indices = gather_indices_2d(left_x_region, left_memory_shape,\n                                     query_block_shape)\n    left_x = gather_blocks_2d(left_x_region, left_indices)\n  \n  if memory_flange[0] > 0:\n    top_x_region = x_memory_padded[:, :, :-query_block_shape[0], :, :]\n\n    top_memory_shape = (memory_flange[0],\n                        query_block_shape[1] + 2 * memory_flange[1])\n\n    top_indices = gather_indices_2d(top_x_region, top_memory_shape,\n                                    query_block_shape)\n\n    top_x = gather_blocks_2d(top_x_region, top_indices)\n  x_flange = None\n  if top_x is not None and left_x is not None:\n    x_flange = tf.concat([top_x, left_x], axis=3)\n  else:\n    x_flange = top_x if top_x is not None else left_x\n  return x_flange, x_center", "docstring": "Get the memory regions that surround a 2d query.\n\nThe memory regions will be the left and top right.\n\nArgs:\nx: A tensor with shape [batch, heads, height, width, depth]\nquery_block_shape: a 2-d tuple of integers\nmemory_flange: a 2-d tuple of integers\nq_indices: a tensor of indices for each of the center blocks.\n[num_blocks, block_length]\nReturns:\nx_flange: A tensor of shape [batch, heads, #blocks, block_length, depth]", "source": "juraj-google-style"}
{"code": "def make_group_index(self, groupby_cols, bool_arr):\n        \n        factor_list, values_list = self.factorize_groupby_cols(groupby_cols)\n\n        \n        if len(factor_list) == 0:\n            \n            \n            tmp_rootdir = self.create_tmp_rootdir()\n            carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')\n            carray_values = ['Total']\n        elif len(factor_list) == 1:\n            \n            \n            carray_factor = factor_list[0]\n            carray_values = values_list[0]\n        else:\n            \n            \n            if self.group_cache_valid(col_list=groupby_cols):\n                \n                col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))\n                col_factor_rootdir = col_rootdir + '.factor'\n                carray_factor = bcolz.carray(rootdir=col_factor_rootdir)\n                col_values_rootdir = col_rootdir + '.values'\n                carray_values = bcolz.carray(rootdir=col_values_rootdir)\n            else:\n                \n                carray_factor, carray_values = \\\n                    self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)\n\n        nr_groups = len(carray_values)\n        skip_key = None\n\n        if bool_arr is not None:\n            \n            tmp_rootdir = self.create_tmp_rootdir()\n            carray_factor = bcolz.eval(\n                '(factor + 1) * bool - 1',\n                user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')\n            \n            tmp_rootdir = self.create_tmp_rootdir()\n            labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')\n            carray_factor, values = ctable_ext.factorize(carray_factor, labels)\n            \n            \n            filter_check = \\\n                [key for key, value in values.items() if value == -1]\n            if filter_check:\n                skip_key = filter_check[0]\n            \n            nr_groups = len(values)\n\n        \n        \n        \n\n        if skip_key is None:\n            \n            skip_key = nr_groups\n\n        return carray_factor, nr_groups, skip_key", "docstring": "Create unique groups for groupby loop\n\nArgs:\nfactor_list:\nvalues_list:\ngroupby_cols:\nbool_arr:\n\nReturns:\ncarray: (carray_factor)\nint: (nr_groups) the number of resulting groups\nint: (skip_key)", "source": "juraj-google-style"}
{"code": "def learn(self, grad_arr):\n        \n        deconvolution_layer_list = self.__deconvolution_layer_list[::-1]\n        for i in range(len(deconvolution_layer_list)):\n            try:\n                grad_arr = deconvolution_layer_list[i].back_propagate(grad_arr)\n            except:\n                self.__logger.debug(\"Error raised in Convolution layer \" + str(i + 1))\n                raise\n\n        self.__optimize_deconvolution_layer(self.__learning_rate, 1)\n\n        layerable_cnn_list = self.__convolutional_auto_encoder.layerable_cnn_list[::-1]\n        for i in range(len(layerable_cnn_list)):\n            try:\n                grad_arr = layerable_cnn_list[i].back_propagate(grad_arr)\n            except:\n                self.__logger.debug(\n                    \"Delta computation raised an error in CNN layer \" + str(len(layerable_cnn_list) - i)\n                )\n                raise\n\n        self.__convolutional_auto_encoder.optimize(self.__learning_rate, 1)\n\n        return grad_arr", "docstring": "Update this Discriminator by ascending its stochastic gradient.\n\nArgs:\ngrad_arr:   `np.ndarray` of gradients.\n\nReturns:\n`np.ndarray` of delta or gradients.", "source": "juraj-google-style"}
{"code": "def decode_datetime(encoded_datetime):\n    \n    \n    \n    \n    time_zone_match = _TIME_ZONE_RE.search(encoded_datetime)\n    if time_zone_match:\n        time_string = encoded_datetime[:time_zone_match.start(1)].upper()\n    else:\n        time_string = encoded_datetime.upper()\n\n    if '.' in time_string:\n        format_string = '%Y-%m-%dT%H:%M:%S.%f'\n    else:\n        format_string = '%Y-%m-%dT%H:%M:%S'\n\n    decoded_datetime = datetime.datetime.strptime(time_string, format_string)\n\n    if not time_zone_match:\n        return decoded_datetime\n\n    \n    \n    \n    if time_zone_match.group('z'):\n        offset_minutes = 0\n    else:\n        sign = time_zone_match.group('sign')\n        hours, minutes = [int(value) for value in\n                          time_zone_match.group('hours', 'minutes')]\n        offset_minutes = hours * 60 + minutes\n        if sign == '-':\n            offset_minutes *= -1\n\n    return datetime.datetime(decoded_datetime.year,\n                             decoded_datetime.month,\n                             decoded_datetime.day,\n                             decoded_datetime.hour,\n                             decoded_datetime.minute,\n                             decoded_datetime.second,\n                             decoded_datetime.microsecond,\n                             TimeZoneOffset(offset_minutes))", "docstring": "Decode a DateTimeField parameter from a string to a python datetime.\n\nArgs:\nencoded_datetime: A string in RFC 3339 format.\n\nReturns:\nA datetime object with the date and time specified in encoded_datetime.\n\nRaises:\nValueError: If the string is not in a recognized format.", "source": "juraj-google-style"}
{"code": "def _ReadSelectedVolumes(self, volume_system, prefix='v'):\n    \n    volume_identifiers_string = self._input_reader.Read()\n    volume_identifiers_string = volume_identifiers_string.strip()\n\n    if not volume_identifiers_string:\n      return []\n\n    selected_volumes = self._ParseVolumeIdentifiersString(\n        volume_identifiers_string, prefix=prefix)\n\n    if selected_volumes == ['all']:\n      return [\n          '{0:s}{1:d}'.format(prefix, volume_index)\n          for volume_index in range(1, volume_system.number_of_volumes + 1)]\n\n    return selected_volumes", "docstring": "Reads the selected volumes provided by the user.\n\nArgs:\nvolume_system (APFSVolumeSystem): volume system.\nprefix (Optional[str]): volume identifier prefix.\n\nReturns:\nlist[str]: selected volume identifiers including prefix.\n\nRaises:\nKeyboardInterrupt: if the user requested to abort.\nValueError: if the volume identifiers string could not be parsed.", "source": "juraj-google-style"}
{"code": "def read_xyz(cls, buf, start_index=0, get_bonds=True, nrows=None, engine=None):\n    frame = pd.read_table(buf, skiprows=2, comment='\n    remove_digits = partial(re.sub, '[0-9]+', '')\n    frame['atom'] = frame['atom'].apply(remove_digits)\n    molecule = cls(frame)\n    molecule.index = range(start_index, (start_index + len(molecule)))\n    if get_bonds:\n        molecule.get_bonds(use_lookup=False, set_lookup=True)\n    return molecule", "docstring": "Read a file of coordinate information.\n\nReads xyz-files.\n\nArgs:\ninputfile (str):\nstart_index (int):\nget_bonds (bool):\nnrows (int): Number of rows of file to read.\nNote that the first two rows are implicitly excluded.\nengine (str): Wrapper for argument of :func:`pandas.read_csv`.\n\nReturns:\nCartesian:", "source": "codesearchnet"}
{"code": "def create_version(self, version_label):\n    version_response = self.repo.api.http_request('POST', ('%s/fcr:versions' % self.uri), data=None, headers={'Slug': version_label})\n    if (version_response.status_code == 201):\n        logger.debug(('version created: %s' % version_response.headers['Location']))\n        self._affix_version(version_response.headers['Location'], version_label)", "docstring": "method to create a new version of the resource as it currently stands\n\n- Note: this will create a version based on the current live instance of the resource,\nnot the local version, which might require self.update() to update.\n\nArgs:\nversion_label (str): label to be used for version\n\nReturns:\n(ResourceVersion): instance of ResourceVersion, also appended to self.versions", "source": "codesearchnet"}
{"code": "def _create_extractors(col_params):\n  \n  result = []\n  for col_param in col_params:\n    result.append(_create_extractor(col_param))\n  return result", "docstring": "Creates extractors to extract properties corresponding to 'col_params'.\n\nArgs:\ncol_params: List of ListSessionGroupsRequest.ColParam protobufs.\nReturns:\nA list of extractor functions. The ith element in the\nreturned list extracts the column corresponding to the ith element of\n_request.col_params", "source": "juraj-google-style"}
{"code": "def with_target_audience(self, target_audience):\n        \n        return self.__class__(\n            self._signer,\n            service_account_email=self._service_account_email,\n            token_uri=self._token_uri,\n            target_audience=target_audience,\n            additional_claims=self._additional_claims.copy())", "docstring": "Create a copy of these credentials with the specified target\naudience.\n\nArgs:\ntarget_audience (str): The intended audience for these credentials,\nused when requesting the ID Token.\n\nReturns:\ngoogle.auth.service_account.IDTokenCredentials: A new credentials\ninstance.", "source": "juraj-google-style"}
{"code": "def members(name, members_list, **kwargs):\n    \n    members_list = [salt.utils.win_functions.get_sam_name(m) for m in members_list.split(\",\")]\n    if not isinstance(members_list, list):\n        log.debug('member_list is not a list')\n        return False\n\n    try:\n        obj_group = _get_group_object(name)\n    except pywintypes.com_error as exc:\n        \n        msg = 'Failed to access group {0}. {1}'.format(\n            name, win32api.FormatMessage(exc.excepinfo[5]))\n        log.error(msg)\n        return False\n\n    existing_members = [_get_username(x) for x in obj_group.members()]\n    existing_members.sort()\n    members_list.sort()\n\n    if existing_members == members_list:\n        log.info('%s membership is correct', name)\n        return True\n\n    \n    success = True\n    for member in members_list:\n        if member not in existing_members:\n            try:\n                obj_group.Add('WinNT:\n                log.info('User added: %s', member)\n            except pywintypes.com_error as exc:\n                msg = 'Failed to add {0} to {1}. {2}'.format(\n                    member, name, win32api.FormatMessage(exc.excepinfo[5]))\n                log.error(msg)\n                success = False\n\n    \n    for member in existing_members:\n        if member not in members_list:\n            try:\n                obj_group.Remove('WinNT:\n                log.info('User removed: %s', member)\n            except pywintypes.com_error as exc:\n                msg = 'Failed to remove {0} from {1}. {2}'.format(\n                    member, name, win32api.FormatMessage(exc.excepinfo[5]))\n                log.error(msg)\n                success = False\n\n    return success", "docstring": "Ensure a group contains only the members in the list\n\nArgs:\n\nname (str):\nThe name of the group to modify\n\nmembers_list (str):\nA single user or a comma separated list of users. The group will\ncontain only the users specified in this list.\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' group.members foo 'user1,user2,user3'", "source": "juraj-google-style"}
{"code": "def send_email_message(self, recipient, subject, html_message, text_message, sender_email, sender_name):\n    if (not current_app.testing):\n        from flask_sendmail import Message\n        message = Message(subject, recipients=[recipient], html=html_message, body=text_message)\n        self.mail.send(message)", "docstring": "Send email message via Flask-Sendmail.\n\nArgs:\nrecipient: Email address or tuple of (Name, Email-address).\nsubject: Subject line.\nhtml_message: The message body in HTML.\ntext_message: The message body in plain text.", "source": "codesearchnet"}
{"code": "def parsetime(text):\n    \n    mins, maxs = text.split('-', 1)\n    minv = s_time.parse(mins)\n    maxv = s_time.parse(maxs, base=minv)\n    return minv, maxv", "docstring": "Parse an interval time string and return a (min,max) tuple.\n\nArgs:\ntext (str): A time interval string\n\nReturns:\n((int,int)):    A epoch millis epoch time string", "source": "juraj-google-style"}
{"code": "def reply(self, reply_comment):\n        \n        payload = '{ \"Comment\": \"' + reply_comment + '\"}'\n        endpoint = 'https:\n\n        self._make_api_call('post', endpoint, data=payload)", "docstring": "Reply to the Message.\n\nNotes:\nHTML can be inserted in the string and will be interpreted properly by Outlook.\n\nArgs:\nreply_comment: String message to send with email.", "source": "juraj-google-style"}
{"code": "def bottleneck_block(cnn, depth, depth_bottleneck, stride, pre_activation):\n    \n    if pre_activation:\n        bottleneck_block_v2(cnn, depth, depth_bottleneck, stride)\n    else:\n        bottleneck_block_v1(cnn, depth, depth_bottleneck, stride)", "docstring": "Bottleneck block with identity short-cut.\n\nArgs:\ncnn: the network to append bottleneck blocks.\ndepth: the number of output filters for this bottleneck block.\ndepth_bottleneck: the number of bottleneck filters for this block.\nstride: Stride used in the first layer of the bottleneck block.\npre_activation: use pre_activation structure used in v2 or not.", "source": "juraj-google-style"}
{"code": "def solveAsync(self, callback):\n        \n        def async_call():\n            self._lock.acquire()\n            try:\n                self._impl.solve()\n            except Exception:\n                self._lock.release()\n                raise\n            else:\n                self._lock.release()\n                callback.run()\n        Thread(target=async_call).start()", "docstring": "Solve the current model asynchronously.\n\nArgs:\ncallback: Callback to be executed when the solver is done.", "source": "juraj-google-style"}
{"code": "def dropout(inputs, keep_prob=0.5, is_training=True, scope=None):\n    if (is_training and (keep_prob > 0)):\n        with tf.name_scope(scope, 'Dropout', [inputs]):\n            return tf.nn.dropout(inputs, keep_prob)\n    else:\n        return inputs", "docstring": "Returns a dropout layer applied to the input.\n\nArgs:\ninputs: the tensor to pass to the Dropout layer.\nkeep_prob: the probability of keeping each input unit.\nis_training: whether or not the model is in training mode. If so, dropout is\napplied and values scaled. Otherwise, inputs is returned.\nscope: Optional scope for name_scope.\n\nReturns:\na tensor representing the output of the operation.", "source": "codesearchnet"}
{"code": "def _parse_dataset(file_path, tmp_dir, train):\n  \n  input_path = file_path\n  file_name = 'train' if train else 'dev'\n  gen_output_path = os.path.join(tmp_dir, file_name + '.txt')\n  example_output_path = os.path.join(tmp_dir, _EXAMPLES_FILE)\n\n  print('input path: ' + input_path)\n  print('gen_output_path: ' + gen_output_path)\n  print('example_output_path: ' + example_output_path)\n\n  input_file = tf.gfile.Open(input_path, mode='r')\n  examples = []\n  for counter, line in enumerate(input_file):\n    if counter == 0:  \n      continue\n    \n    line_split = line.split('\\t')\n\n    parse1 = line_split[_PARSE1_INDEX]\n    parse2 = line_split[_PARSE2_INDEX]\n    consensus_label = line_split[_LABEL_INDEX]\n\n    tokens1 = _get_tokens_and_tags(parse1)\n    tokens2 = _get_tokens_and_tags(parse2)\n\n    tokens1_str = ' '.join(tokens1)\n    tokens2_str = ' '.join(tokens2)\n\n    if consensus_label != '-':\n      examples.append([tokens1_str, tokens2_str, consensus_label])\n\n  input_file.close()\n\n  \n  with tf.gfile.GFile(gen_output_path, 'w') as f:\n    for tokens1_str, tokens2_str, consensus_label in examples:\n      f.write('%s\\t%s\\t%s\\n' % (tokens1_str, tokens2_str, consensus_label))\n\n  if train:\n    \n    with tf.gfile.GFile(example_output_path, 'w') as f:\n      for tokens1_str, tokens2_str, consensus_label in examples:\n        f.write('%s %s\\n' % (tokens1_str, tokens2_str))", "docstring": "Convert the dataset in to a simpler format.\n\nThis function creates two files. One for being processed to produce a vocab\nand another to generate the data.\n\nArgs:\nfile_path: string, path to the file to parse.\ntmp_dir: string, path to the directory to output the files.\ntrain: bool, indicating if we are parsing the training set.", "source": "juraj-google-style"}
{"code": "def load_profile_variants(adapter, variant_file):\n\n    \n\n    vcf_info = check_vcf(variant_file)\n    nr_variants = vcf_info['nr_variants']\n    variant_type = vcf_info['variant_type']\n\n    if variant_type != 'snv':\n        LOG.critical('Variants used for profiling must be SNVs only')\n        raise VcfError\n\n    vcf = get_vcf(variant_file)\n\n    profile_variants = [build_profile_variant(variant) for variant in vcf]\n    adapter.add_profile_variants(profile_variants)", "docstring": "Loads variants used for profiling\n\nArgs:\nadapter (loqusdb.plugins.Adapter): initialized plugin\nvariant_file(str): Path to variant file", "source": "juraj-google-style"}
{"code": "def by_issn(issn):\n    old_url = aleph.ALEPH_URL\n    aleph.ALEPH_URL = NTK_ALEPH_URL\n    records = aleph.getISSNsXML(issn, base='STK02')\n    aleph.ALEPH_URL = old_url\n    for record in records:\n        marc = MARCXMLRecord(record)\n        additional_info = {'222': marc.get('222', None), 'PER': marc.get('PER', None), '776': marc.get('776', None), '008': marc.get('008', None), 'alt_end_date': ''}\n        additional_info = {key: val for (key, val) in additional_info.iteritems() if val}\n        alt_end_date = None\n        alt_creation_date = None\n        if additional_info['008']:\n            alt_creation_date = additional_info['008'][7:11]\n            alt_end_date = additional_info['008'][11:15]\n            if (alt_end_date in ['9999', '****']):\n                alt_creation_date += '-'\n                alt_end_date = None\n            additional_info['alt_end_date'] = alt_end_date\n        author = Author.parse_author(marc)\n        model = Model(url=_first_or_none(marc.get('856u')), conspect=_first_or_none(marc.get('072a')), annotation_tags=_first_or_none(marc.get('520a')), periodicity=_first_or_none(marc.get('310a')), title_tags=_first_or_none(marc.get('222a')), subtitle_tags=_first_or_none(marc.get('245b')), place_tags=remove_hairs((_first_or_none(marc.get('260a')) or '')), author_tags=(author._asdict() if author else None), publisher_tags=remove_hairs(((_first_or_none(marc.get('260b')) or _first_or_none(marc.get('264b')) or ''),), ', '), creation_dates=_first_or_none(marc.get('260c', [alt_creation_date])), lang_tags=_first_or_none(marc.get('040b')), keyword_tags=marc.get('650a07'), source_info=_first_or_none(marc.get('500a')), original_xml=record, additional_info=additional_info)\n        (yield _add_source(model))", "docstring": "Query aleph for records with given `issn`. The lookup is directed to the\nNTK's Aleph.\n\nArgs:\nissn (str): ISSN of the periodical.\n\nReturns:\nobj: :class:`Model` instances for each record.", "source": "codesearchnet"}
{"code": "def _init_profile_batch(self, profile_batch):\n    profile_batch_error_message = f'profile_batch must be a non-negative integer or 2-tuple of positive integers. A pair of positive integers signifies a range of batches to profile. Found: {profile_batch}'\n    if isinstance(profile_batch, str):\n        profile_batch = str(profile_batch).split(',')\n        profile_batch = tree.map_structure(int, profile_batch)\n    if isinstance(profile_batch, int):\n        self._start_batch = profile_batch\n        self._stop_batch = profile_batch\n    elif isinstance(profile_batch, (tuple, list)) and len(profile_batch) == 2:\n        self._start_batch, self._stop_batch = profile_batch\n    else:\n        raise ValueError(profile_batch_error_message)\n    if self._start_batch < 0 or self._stop_batch < self._start_batch:\n        raise ValueError(profile_batch_error_message)\n    self._profiler_started = False\n    self._batch_trace_context = None\n    if self._start_batch > 0:\n        self._start_profiler(logdir='')\n        self._stop_profiler(save=False)\n    self._is_tracing = False\n    self._should_trace = not (self._start_batch == 0 and self._stop_batch == 0)", "docstring": "Validate profile_batch value and set the range of batches to profile.\n\nSets values of _start_batch and _stop_batch attributes,\nspecifying the start and stop batch to profile.\nSetting `profile_batch=0` disables profiling.\n\nArgs:\nprofile_batch: The range of batches to profile. Should be a\nnon-negative integer or a comma separated string of pair of positive\nintegers. A pair of positive integers signify a range of batches to\nprofile.\n\nRaises:\nValueError: If profile_batch is not an integer or a comma separated\npair of positive integers.", "source": "github-repos"}
{"code": "def get_volume():\n    if (system.get_name() == 'windows'):\n        pass\n    elif (system.get_name() == 'mac'):\n        volume = system.get_cmd_out(['osascript', '-e', 'set ovol to output volume of (get volume settings); return the quoted form of ovol'])\n        return (int(volume) * 10)\n    else:\n        volume = system.get_cmd_out(\"amixer get Master |grep % |awk '{print $5}'|sed -e 's/\\\\[\n        return int(volume.replace('%', ''))", "docstring": "Get the volume.\n\nGet the current volume.\n\nReturns:\nint: The current volume (percentage, between 0 and 100).", "source": "codesearchnet"}
{"code": "def codemirror_field_css_bundle(field):\n    manifesto = CodemirrorAssetTagRender()\n    manifesto.register_from_fields(field)\n    try:\n        bundle_name = manifesto.css_bundle_names()[0]\n    except IndexError:\n        msg = \"Given field with configuration name '{}' does not have a Javascript bundle name\"\n        raise CodeMirrorFieldBundleError(msg.format(field.config_name))\n    return bundle_name", "docstring": "Filter to get CodeMirror CSS bundle name needed for a single field.\n\nExample:\n::\n\n{% load djangocodemirror_tags %}\n{{ form.myfield|codemirror_field_css_bundle }}\n\nArguments:\nfield (djangocodemirror.fields.CodeMirrorField): A form field.\n\nRaises:\nCodeMirrorFieldBundleError: Raised if Codemirror configuration from\nfield does not have a bundle name.\n\nReturns:\nstring: Bundle name to load with webassets.", "source": "codesearchnet"}
{"code": "def constant_value(pred):\n    if isinstance(pred, int):\n        if pred == 1:\n            pred = True\n        elif pred == 0:\n            pred = False\n    if isinstance(pred, variables.Variable):\n        return None\n    return smart_module.smart_constant_value(pred)", "docstring": "Return the bool value for `pred`, or None if `pred` had a dynamic value.\n\nArgs:\npred: A scalar, either a Python bool or a TensorFlow boolean variable\nor tensor, or the Python integer 1 or 0.\n\nReturns:\nTrue or False if `pred` has a constant boolean value, None otherwise.\n\nRaises:\nTypeError: If `pred` is not a Variable, Tensor or bool, or Python\ninteger 1 or 0.", "source": "github-repos"}
{"code": "def _path_components(self, path):\n    if ((not path) or (path == self._path_separator(path))):\n        return []\n    (drive, path) = self.splitdrive(path)\n    path_components = path.split(self._path_separator(path))\n    assert (drive or path_components)\n    if (not path_components[0]):\n        if ((len(path_components) > 1) and (not path_components[1])):\n            path_components = []\n        else:\n            path_components = path_components[1:]\n    if drive:\n        path_components.insert(0, drive)\n    return path_components", "docstring": "Breaks the path into a list of component names.\n\nDoes not include the root directory as a component, as all paths\nare considered relative to the root directory for the FakeFilesystem.\nCallers should basically follow this pattern:\n\n.. code:: python\n\nfile_path = self.absnormpath(file_path)\npath_components = self._path_components(file_path)\ncurrent_dir = self.root\nfor component in path_components:\nif component not in current_dir.contents:\nraise IOError\n_do_stuff_with_component(current_dir, component)\ncurrent_dir = current_dir.get_entry(component)\n\nArgs:\npath:  Path to tokenize.\n\nReturns:\nThe list of names split from path.", "source": "codesearchnet"}
{"code": "def thread_safe_client(client, lock=None):\n    if (lock is None):\n        lock = threading.Lock()\n    return _ThreadSafeProxy(client, lock)", "docstring": "Create a thread-safe proxy which locks every method call\nfor the given client.\n\nArgs:\nclient: the client object to be guarded.\nlock: the lock object that will be used to lock client's methods.\nIf None, a new lock will be used.\n\nReturns:\nA thread-safe proxy for the given client.", "source": "codesearchnet"}
{"code": "def deserialize_ndarray_npy(d):\n    \n    with io.BytesIO() as f:\n        f.write(json.loads(d['npy']).encode('latin-1'))\n        f.seek(0)\n        return np.load(f)", "docstring": "Deserializes a JSONified :obj:`numpy.ndarray` that was created using numpy's\n:obj:`save` function.\n\nArgs:\nd (:obj:`dict`): A dictionary representation of an :obj:`ndarray` object, created\nusing :obj:`numpy.save`.\n\nReturns:\nAn :obj:`ndarray` object.", "source": "juraj-google-style"}
{"code": "def open_stream(self, destination, timeout_ms=None):\n    timeout = timeouts.PolledTimeout.from_millis(timeout_ms)\n    stream_transport = self._make_stream_transport()\n    self.transport.write_message(adb_message.AdbMessage(command='OPEN', arg0=stream_transport.local_id, arg1=0, data=(destination + '\\x00')), timeout)\n    if (not stream_transport.ensure_opened(timeout)):\n        return None\n    return AdbStream(destination, stream_transport)", "docstring": "Opens a new stream to a destination service on the device.\n\nNot the same as the posix 'open' or any other Open methods, this\ncorresponds to the OPEN message described in the ADB protocol\ndocumentation mentioned above.  It creates a stream (uniquely identified\nby remote/local ids) that connects to a particular service endpoint.\n\nArgs:\ndestination: The service:command string, see ADB documentation.\ntimeout_ms: Timeout in milliseconds for the Open to succeed (or as a\nPolledTimeout object).\n\nRaises:\nAdbProtocolError: Wrong local_id sent to us, or we didn't get a ready\nresponse.\n\nReturns:\nAn AdbStream object that can be used to read/write data to the specified\nservice endpoint, or None if the requested service couldn't be opened.", "source": "codesearchnet"}
{"code": "def get_subject_without_validation(jwt_bu64):\n    try:\n        jwt_dict = get_jwt_dict(jwt_bu64)\n    except JwtException as e:\n        return log_jwt_bu64_info(logging.error, str(e), jwt_bu64)\n    try:\n        return jwt_dict['sub']\n    except LookupError:\n        log_jwt_dict_info(logging.error, 'Missing \"sub\" key', jwt_dict)", "docstring": "Extract subject from the JWT without validating the JWT.\n\n- The extracted subject cannot be trusted for authn or authz.\n\nArgs:\njwt_bu64: bytes\nJWT, encoded using a a URL safe flavor of Base64.\n\nReturns:\nstr: The subject contained in the JWT.", "source": "codesearchnet"}
{"code": "def _get_validation_labels(val_path):\n    \n    labels_path = tfds.core.get_tfds_path(_VALIDATION_LABELS_FNAME)\n    with tf.io.gfile.GFile(labels_path) as labels_f:\n      labels = labels_f.read().strip().split('\\n')\n    with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj:\n      tar = tarfile.open(mode='r:', fileobj=tar_f_obj)\n      images = sorted(tar.getnames())\n    return dict(zip(images, labels))", "docstring": "Returns labels for validation.\n\nArgs:\nval_path: path to TAR file containing validation images. It is used to\nretrieve the name of pictures and associate them to labels.\n\nReturns:\ndict, mapping from image name (str) to label (str).", "source": "juraj-google-style"}
{"code": "def diff_charsToLines(self, diffs, lineArray):\n    for i in range(len(diffs)):\n        text = []\n        for char in diffs[i][1]:\n            text.append(lineArray[ord(char)])\n        diffs[i] = (diffs[i][0], ''.join(text))", "docstring": "Rehydrate the text in a diff from a string of line hashes to real lines\nof text.\n\nArgs:\ndiffs: Array of diff tuples.\nlineArray: Array of unique strings.", "source": "codesearchnet"}
{"code": "def forward(self, x):\n        \n        head_outputs = [None] * self.t\n\n        \n        if isinstance(self.input_layer, list):  \n            input_outputs = [mod(x) for mod, x in zip(self.input_layer, x)]\n            x = torch.stack(input_outputs, dim=1)\n\n            \n            for t in self.task_map[0]:\n                head = self.heads[t]\n                head_outputs[t] = head(input_outputs[t])\n        else:  \n            x = self.input_layer(x)\n\n            \n            for t in self.task_map[0]:\n                head = self.heads[t]\n                head_outputs[t] = head(x)\n\n        \n        for i, layer in enumerate(self.middle_layers, start=1):\n            x = layer(x)\n\n            \n            for t in self.task_map[i]:\n                head = self.heads[t]\n                \n                if self.config[\"pass_predictions\"] and bool(self.task_graph.parents[t]):\n                    task_input = [x]\n                    for p in self.task_graph.parents[t]:\n                        task_input.append(head_outputs[p])\n                    task_input = torch.stack(task_input, dim=1)\n                else:\n                    task_input = x\n                head_outputs[t] = head(task_input)\n        return head_outputs", "docstring": "Returns a list of outputs for tasks 0,...t-1\n\nArgs:\nx: a [batch_size, ...] batch from X", "source": "juraj-google-style"}
{"code": "def send_message(\n        self, request: str, response_expected: bool, **kwargs: Any\n    ) -> Response:\n        \n        payload = str(request) + self.delimiter\n        self.socket.send(payload.encode(self.encoding))\n\n        response = bytes()\n        decoded = None\n\n        \n        \n        while True:\n            response += self.socket.recv(1024)\n\n            decoded = response.decode(self.encoding)\n            if len(decoded) < self.delimiter_length:\n                continue\n\n            \n            elif decoded[-self.delimiter_length :] == self.delimiter:\n                break\n\n        assert decoded is not None\n        return Response(decoded[: -self.delimiter_length])", "docstring": "Transport the message to the server and return the response.\n\nArgs:\nrequest: The JSON-RPC request string.\nresponse_expected: Whether the request expects a response.\n\nReturns:\nA Response object.", "source": "juraj-google-style"}
{"code": "def userhome(username=None):\n    \n    if username is None:\n        \n        if 'HOME' in os.environ:\n            userhome_dpath = os.environ['HOME']\n        else:  \n            if sys.platform.startswith('win32'):\n                \n                if 'USERPROFILE' in os.environ:\n                    userhome_dpath = os.environ['USERPROFILE']\n                elif 'HOMEPATH' in os.environ:\n                    drive = os.environ.get('HOMEDRIVE', '')\n                    userhome_dpath = join(drive, os.environ['HOMEPATH'])\n                else:\n                    raise OSError(\"Cannot determine the user's home directory\")\n            else:\n                \n                import pwd\n                userhome_dpath = pwd.getpwuid(os.getuid()).pw_dir\n    else:\n        \n        if sys.platform.startswith('win32'):  \n            \n            c_users = dirname(userhome())\n            userhome_dpath = join(c_users, username)\n            if not exists(userhome_dpath):\n                raise KeyError('Unknown user: {}'.format(username))\n        else:\n            import pwd\n            try:\n                pwent = pwd.getpwnam(username)\n            except KeyError:  \n                raise KeyError('Unknown user: {}'.format(username))\n            userhome_dpath = pwent.pw_dir\n    return userhome_dpath", "docstring": "Returns the user's home directory.\nIf `username` is None, this is the directory for the current user.\n\nArgs:\nusername (str): name of a user on the system\n\nReturns:\nPathLike: userhome_dpath: path to the home directory\n\nExample:\n>>> import getpass\n>>> username = getpass.getuser()\n>>> assert userhome() == expanduser('~')\n>>> assert userhome(username) == expanduser('~')", "source": "juraj-google-style"}
{"code": "def quantization_mode(self):\n    return self._quantization_mode", "docstring": "The quantization mode of this policy.\n\nReturns:\nThe quantization mode of this policy, as a string. If this policy is\nnot quantized, it will return `None`.", "source": "github-repos"}
{"code": "def index_update(x, idx, y):\n    return _index_update_helper(tf_np.ndarray._with_index_update, x, idx, y)", "docstring": "Pure equivalent of `x[idx] = y`.\n\nReturns the value of x that would result from the NumPy-style indexed\nassignment `x[idx] = y`. Because it's a pure function, `x` itself won't be\nchanged.\n\nArgs:\nx: an array with the values to be updated.\nidx: a Numpy-style index, consisting of `None`, integers, slice objects,\nellipses, ndarrays with integer dtypes, or a tuple of the above.\ny: the array of updates. `y` must be broadcastable to the shape of the array\nthat would be returned by `x[idx]`.\n\nReturns:\nThe updated version of `x`.", "source": "github-repos"}
{"code": "def _cmd_quote(cmd):\n    r\n    \n    pattern = re.compile('^(\\\\\"|\\').*|.*(\\\\\"|\\')$')\n    while pattern.match(cmd) is not None:\n        cmd = cmd.strip('\"').strip('\\'')\n    \n    \n    cmd = '\"{0}\"'.format(cmd)\n    return cmd", "docstring": "r'''\nHelper function to properly format the path to the binary for the service\nMust be wrapped in double quotes to account for paths that have spaces. For\nexample:\n\n``\"C:\\Program Files\\Path\\to\\bin.exe\"``\n\nArgs:\ncmd (str): Full path to the binary\n\nReturns:\nstr: Properly quoted path to the binary", "source": "juraj-google-style"}
{"code": "def parse_pair_args(labels, argclass):\n    label_data = set()\n    for arg in labels:\n        (name, value) = split_pair(arg, '=', nullable_idx=1)\n        label_data.add(argclass(name, value))\n    return label_data", "docstring": "Parse flags of key=value pairs and return a list of argclass.\n\nFor pair variables, we need to:\n* split the input into name=value pairs (value optional)\n* Create the EnvParam object\n\nArgs:\nlabels: list of 'key' or 'key=value' strings.\nargclass: Container class for args, must instantiate with argclass(k, v).\n\nReturns:\nlist of argclass objects.", "source": "codesearchnet"}
{"code": "def GetCommand(self, include_separators=True):\n    args = []\n    if self.name:\n        args.append(self.name)\n    for element in self.elements:\n        if element.HasError():\n            continue\n        if element.args:\n            args.extend(element.args)\n        if element.HasSeparator() and include_separators:\n            args.append(self.separator)\n    if self.NeedsSeparator() and include_separators:\n        args.append(self.separator)\n    return ' '.join((self._Quote(arg) for arg in args))", "docstring": "Returns the command representing the trace up to this point.\n\nArgs:\ninclude_separators: Whether or not to include separators in the command.\n\nReturns:\nA string representing a Fire CLI command that would produce this trace.", "source": "github-repos"}
{"code": "def update_endpoint(self, endpoint_name, endpoint_config_name):\n        \n        if not _deployment_entity_exists(lambda: self.sagemaker_client.describe_endpoint(EndpointName=endpoint_name)):\n            raise ValueError('Endpoint with name \"{}\" does not exist; please use an existing endpoint name'\n                             .format(endpoint_name))\n\n        self.sagemaker_client.update_endpoint(EndpointName=endpoint_name,\n                                              EndpointConfigName=endpoint_config_name)\n        return endpoint_name", "docstring": "Update an Amazon SageMaker ``Endpoint`` according to the endpoint configuration specified in the request\n\nRaise an error if endpoint with endpoint_name does not exist.\n\nArgs:\nendpoint_name (str): Name of the Amazon SageMaker ``Endpoint`` to update.\nendpoint_config_name (str): Name of the Amazon SageMaker endpoint configuration to deploy.\n\nReturns:\nstr: Name of the Amazon SageMaker ``Endpoint`` being updated.", "source": "juraj-google-style"}
{"code": "def _extract_relative_dates(self, text: str) -> List[Extraction]:\n        \n        if not text or not self._etk:\n            return list()\n        base = self._settings[RELATIVE_BASE] if self._settings[RELATIVE_BASE] else datetime.datetime.now()\n        if not self._settings[RETURN_AS_TIMEZONE_AWARE]:\n            base = base.replace(tzinfo=None)\n        elif not base.tzinfo:\n            base = base.astimezone(self._default_tz)\n        res = SpacyRuleExtractor(self._etk.default_nlp, spacy_rules, 'relative_date_extractor').extract(text)\n        ans = list()\n        for relative_date in res:\n            if relative_date.rule_id == 'direction_number_unit':\n                direction, measure, unit = relative_date.value.split()\n                measure = num_to_digit[measure.lower()]\n            elif relative_date.rule_id == 'number_unit_direction':\n                measure, unit, direction = relative_date.value.split()\n                measure = num_to_digit[measure.lower()]\n            elif relative_date.rule_id == 'direction_digit_unit':\n                direction, measure, unit = relative_date.value.split()\n            elif relative_date.rule_id == 'digit_unit_direction':\n                measure, unit, direction = relative_date.value.split()\n            elif relative_date.rule_id == 'direction_unit':\n                direction, unit = relative_date.value.split()\n                measure = '1'\n            elif relative_date.rule_id == 'the_day':\n                unit = 'days'\n                key_ = relative_date.value.split()[-1].lower()\n                if key_ == 'today':\n                    direction = 'ago'\n                    measure = '0'\n                else:\n                    direction = 'ago' if key_ == 'yesterday' else 'later'\n                    measure = '1' if len(relative_date.value.split()) == 1 else '2'\n            else:\n                continue\n            unit = unit if unit[-1] == 's' else unit+'s'\n            direction = directions[direction.lower()] if direction.lower() in directions else '+'\n            delta_args = {unit: int(direction+measure)}\n            relative_delta = relativedelta(**delta_args)\n            date = self._post_process_date(base+relative_delta)\n            if date:\n                extraction_date = self._wrap_extraction(date,\n                                                       relative_date.value,\n                                                       relative_date.provenance['start_char'],\n                                                       relative_date.provenance['end_char'])\n                if extraction_date:\n                    ans.append(extraction_date)\n        return ans", "docstring": "Extract relative dates using spaCy rules\n\nArgs:\ntext: str - the text to extract the relative date strings from\n\nReturns: List of Extraction(s)", "source": "juraj-google-style"}
{"code": "def flatten_dict_items(dictionary):\n    return _pywrap_nest.FlattenDictItems(dictionary)", "docstring": "Returns a dictionary with flattened keys and values.\n\nThis function flattens the keys and values of a dictionary, which can be\narbitrarily nested structures, and returns the flattened version of such\nstructures:\n\n```python\nexample_dictionary = {(4, 5, (6, 8)): (\"a\", \"b\", (\"c\", \"d\"))}\nresult = {4: \"a\", 5: \"b\", 6: \"c\", 8: \"d\"}\nflatten_dict_items(example_dictionary) == result\n```\n\nThe input dictionary must satisfy two properties:\n\n1. Its keys and values should have the same exact nested structure.\n2. The set of all flattened keys of the dictionary must not contain repeated\nkeys.\n\nArgs:\ndictionary: the dictionary to zip\n\nReturns:\nThe zipped dictionary.\n\nRaises:\nTypeError: If the input is not a dictionary.\nValueError: If any key and value do not have the same structure layout, or\nif keys are not unique.", "source": "github-repos"}
{"code": "def convert_tanh(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting tanh ...')\n\n    if names == 'short':\n        tf_name = 'TANH' + random_string(4)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    tanh = keras.layers.Activation('tanh', name=tf_name)\n    layers[scope_name] = tanh(layers[inputs[0]])", "docstring": "Convert tanh layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def get_package(self, name) -> 'EffectPackage':\n        \n        name, cls_name = parse_package_string(name)\n\n        try:\n            return self.package_map[name]\n        except KeyError:\n            raise EffectError(\"No package '{}' registered\".format(name))", "docstring": "Get a package by python path. Can also contain path to an effect.\n\nArgs:\nname (str): Path to effect package or effect\n\nReturns:\nThe requested EffectPackage\n\nRaises:\nEffectError when no package is found", "source": "juraj-google-style"}
{"code": "def _events_from_file(filepath):\n    records = list(tf.compat.v1.python_io.tf_record_iterator(filepath))\n    result = []\n    for r in records:\n        event = tf.compat.v1.Event()\n        event.ParseFromString(r)\n        result.append(event)\n    return result", "docstring": "Returns all events in a single event file.\n\nArgs:\nfilepath: Path to the event file.\n\nReturns:\nA list of all tf.compat.v1.Event protos in the event file.", "source": "github-repos"}
{"code": "def _parse_peer_link(self, config):\n        \n        match = re.search(r'peer-link (\\S+)', config)\n        value = match.group(1) if match else None\n        return dict(peer_link=value)", "docstring": "Scans the config block and parses the peer-link value\n\nArgs:\nconfig (str): The config block to scan\n\nReturns:\ndict: A dict object that is intended to be merged into the\nresource dict", "source": "juraj-google-style"}
{"code": "def get_path(self, url):\n        \n        cache_path = self._url_to_path(url)\n        if os.path.exists(cache_path):\n            return cache_path\n\n        return None", "docstring": "Returns the path of a cached resource.\n\nArgs:\nurl: The url of the resource\n\nReturns:\nThe path to the cached resource or None if not in the cache", "source": "juraj-google-style"}
{"code": "def evaluate(conditions, leaf_evaluator):\n    if isinstance(conditions, list):\n        if (conditions[0] in list(EVALUATORS_BY_OPERATOR_TYPE.keys())):\n            return EVALUATORS_BY_OPERATOR_TYPE[conditions[0]](conditions[1:], leaf_evaluator)\n        else:\n            return EVALUATORS_BY_OPERATOR_TYPE[ConditionOperatorTypes.OR](conditions, leaf_evaluator)\n    leaf_condition = conditions\n    return leaf_evaluator(leaf_condition)", "docstring": "Top level method to evaluate conditions.\n\nArgs:\nconditions: Nested array of and/or conditions, or a single leaf condition value of any type.\nExample: ['and', '0', ['or', '1', '2']]\nleaf_evaluator: Function which will be called to evaluate leaf condition values.\n\nReturns:\nBoolean: Result of evaluating the conditions using the operator rules and the leaf evaluator.\nNone: if conditions couldn't be evaluated.", "source": "codesearchnet"}
{"code": "def forward(self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, _offload_inference: bool=False, _z_reference_list: Optional[Sequence[torch.Tensor]]=None) -> torch.Tensor:\n    z = [z]\n    q = self.linear_q(s)\n    kv = self.linear_kv(s)\n    q = q.view(q.shape[:-1] + (self.num_heads, -1))\n    kv = kv.view(kv.shape[:-1] + (self.num_heads, -1))\n    k, v = torch.split(kv, self.hidden_dim, dim=-1)\n    q_pts = self.linear_q_points(s)\n    q_pts = torch.split(q_pts, q_pts.shape[-1] \n    q_pts = torch.stack(q_pts, dim=-1)\n    q_pts = r[..., None].apply(q_pts)\n    q_pts = q_pts.view(q_pts.shape[:-2] + (self.num_heads, self.num_qk_points, 3))\n    kv_pts = self.linear_kv_points(s)\n    kv_pts = torch.split(kv_pts, kv_pts.shape[-1] \n    kv_pts = torch.stack(kv_pts, dim=-1)\n    kv_pts = r[..., None].apply(kv_pts)\n    kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.num_heads, -1, 3))\n    k_pts, v_pts = torch.split(kv_pts, [self.num_qk_points, self.num_v_points], dim=-2)\n    b = self.linear_b(z[0])\n    if _offload_inference:\n        assert sys.getrefcount(z[0]) == 2\n        z[0] = z[0].cpu()\n    if is_fp16_enabled():\n        with torch.cuda.amp.autocast(enabled=False):\n            a = torch.matmul(permute_final_dims(q.float(), (1, 0, 2)), permute_final_dims(k.float(), (1, 2, 0)))\n    else:\n        a = torch.matmul(permute_final_dims(q, (1, 0, 2)), permute_final_dims(k, (1, 2, 0)))\n    a *= math.sqrt(1.0 / (3 * self.hidden_dim))\n    a += math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1))\n    pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5)\n    pt_att = pt_att ** 2\n    pt_att = sum(torch.unbind(pt_att, dim=-1))\n    head_weights = self.softplus(self.head_weights).view(*(1,) * len(pt_att.shape[:-2]) + (-1, 1))\n    head_weights = head_weights * math.sqrt(1.0 / (3 * (self.num_qk_points * 9.0 / 2)))\n    pt_att = pt_att * head_weights\n    pt_att = torch.sum(pt_att, dim=-1) * -0.5\n    square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2)\n    square_mask = self.config.inf * (square_mask - 1)\n    pt_att = permute_final_dims(pt_att, (2, 0, 1))\n    a = a + pt_att\n    a = a + square_mask.unsqueeze(-3)\n    a = self.softmax(a)\n    o = torch.matmul(a, v.transpose(-2, -3).to(dtype=a.dtype)).transpose(-2, -3)\n    o = flatten_final_dims(o, 2)\n    o_pt = torch.sum(a[..., None, :, :, None] * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :], dim=-2)\n    o_pt = permute_final_dims(o_pt, (2, 0, 3, 1))\n    o_pt = r[..., None, None].invert_apply(o_pt)\n    o_pt_norm = flatten_final_dims(torch.sqrt(torch.sum(o_pt ** 2, dim=-1) + self.config.epsilon), 2)\n    o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3)\n    if _offload_inference:\n        z[0] = z[0].to(o_pt.device)\n    o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype))\n    o_pair = flatten_final_dims(o_pair, 2)\n    s = self.linear_out(torch.cat((o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1).to(dtype=z[0].dtype))\n    return s", "docstring": "Args:\ns:\n[*, N_res, C_s] single representation\nz:\n[*, N_res, N_res, C_z] pair representation\nr:\n[*, N_res] transformation object\nmask:\n[*, N_res] mask\nReturns:\n[*, N_res, C_s] single representation update", "source": "github-repos"}
{"code": "def Open(self, file_object):\n    \n    self._file_object = file_object\n    self._regf_file.open_file_object(self._file_object)\n    return True", "docstring": "Opens the Windows Registry file using a file-like object.\n\nArgs:\nfile_object (file): file-like object.\n\nReturns:\nbool: True if successful or False if not.", "source": "juraj-google-style"}
{"code": "def _retrieve_offsets(self, timestamps, timeout_ms=float('inf')):\n    if (not timestamps):\n        return {}\n    start_time = time.time()\n    remaining_ms = timeout_ms\n    while (remaining_ms > 0):\n        future = self._send_offset_requests(timestamps)\n        self._client.poll(future=future, timeout_ms=remaining_ms)\n        if future.succeeded():\n            return future.value\n        if (not future.retriable()):\n            raise future.exception\n        elapsed_ms = ((time.time() - start_time) * 1000)\n        remaining_ms = (timeout_ms - elapsed_ms)\n        if (remaining_ms < 0):\n            break\n        if future.exception.invalid_metadata:\n            refresh_future = self._client.cluster.request_update()\n            self._client.poll(future=refresh_future, timeout_ms=remaining_ms)\n        else:\n            time.sleep((self.config['retry_backoff_ms'] / 1000.0))\n        elapsed_ms = ((time.time() - start_time) * 1000)\n        remaining_ms = (timeout_ms - elapsed_ms)\n    raise Errors.KafkaTimeoutError(('Failed to get offsets by timestamps in %s ms' % (timeout_ms,)))", "docstring": "Fetch offset for each partition passed in ``timestamps`` map.\n\nBlocks until offsets are obtained, a non-retriable exception is raised\nor ``timeout_ms`` passed.\n\nArguments:\ntimestamps: {TopicPartition: int} dict with timestamps to fetch\noffsets by. -1 for the latest available, -2 for the earliest\navailable. Otherwise timestamp is treated as epoch miliseconds.\n\nReturns:\n{TopicPartition: (int, int)}: Mapping of partition to\nretrieved offset and timestamp. If offset does not exist for\nthe provided timestamp, that partition will be missing from\nthis mapping.", "source": "codesearchnet"}
{"code": "def internal_convert_n_to_tensor_or_composite(values, dtype=None, name=None, as_ref=False) -> list[Union[EagerTensor, SymbolicTensor, composite_tensor.CompositeTensor, type(None)]]:\n    if not isinstance(values, collections_abc.Sequence):\n        raise TypeError('values must be a sequence.')\n    ret = []\n    for i, value in enumerate(values):\n        if value is None:\n            ret.append(value)\n        else:\n            n = None if name is None else '%s_%d' % (name, i)\n            ret.append(internal_convert_to_tensor_or_composite(value, dtype=dtype, name=n, as_ref=as_ref))\n    return ret", "docstring": "Converts `values` to a list of `Tensor` or `CompositeTensor` objects.\n\nAny `CompositeTensor` objects in `values` are returned unmodified.\n\nArgs:\nvalues: A list of `None`, `CompositeTensor`, or objects that can be consumed\nby `convert_to_tensor()`.\ndtype: (Optional.) The required `DType` of the returned `Tensor`s or\n`CompositeTensor`s.\nname: (Optional.) A name prefix to used when a new `Tensor` is created, in\nwhich case element `i` will be given the name `name + '_' + i`.\nas_ref: True if the caller wants the results as ref tensors.\n\nReturns:\nA list of `Tensor`, `CompositeTensor`, and/or `None` objects.\n\nRaises:\nTypeError: If no conversion function is registered for an element in\n`values`.\nRuntimeError: If a registered conversion function returns an invalid\nvalue.", "source": "github-repos"}
{"code": "def get_new_requests(self):\n    content_type = self.__queue_item.response.headers.get('content-type')\n    scrapers = self.__get_all_scrapers()\n    new_requests = []\n    for scraper in scrapers:\n        instance = scraper(self.__options, self.__queue_item)\n        if self.__content_type_matches(content_type, instance.content_types):\n            new_requests.extend(instance.get_requests())\n    return new_requests", "docstring": "Retrieve all the new request that were found in this request.\n\nReturns:\nlist(:class:`nyawc.http.Request`): A list of request objects.", "source": "codesearchnet"}
{"code": "def get_examples(self, compact=False):\n    examples = copy.deepcopy(self._examples)\n    if (not compact):\n        return examples\n\n    def make_compact(d):\n        if (not isinstance(d, dict)):\n            return\n        for key in d:\n            if isinstance(d[key], dict):\n                inner_d = d[key]\n                if ((len(inner_d) == 1) and ('.tag' in inner_d)):\n                    d[key] = inner_d['.tag']\n                else:\n                    make_compact(inner_d)\n            if isinstance(d[key], list):\n                for item in d[key]:\n                    make_compact(item)\n    for example in examples.values():\n        if (isinstance(example.value, dict) and (len(example.value) == 1) and ('.tag' in example.value)):\n            example.value = example.value['.tag']\n        else:\n            make_compact(example.value)\n    return examples", "docstring": "Returns an OrderedDict mapping labels to Example objects.\n\nArgs:\ncompact (bool): If True, union members of void type are converted\nto their compact representation: no \".tag\" key or containing\ndict, just the tag as a string.", "source": "codesearchnet"}
{"code": "def get_chain(self, name, table=\"filter\"):\n        \n        return [r for r in self.rules if r[\"table\"] == table and r[\"chain\"] == name]", "docstring": "Get the list of rules for a particular chain. Chain order is kept intact.\n\nArgs:\nname (str): chain name, e.g. ``\ntable (str): table name, defaults to ``filter``\n\nReturns:\nlist: rules", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.ListNotificationChannelDescriptors = channel.unary_unary(\n            \"/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelDescriptorsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelDescriptorsResponse.FromString,\n        )\n        self.GetNotificationChannelDescriptor = channel.unary_unary(\n            \"/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelDescriptorRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannelDescriptor.FromString,\n        )\n        self.ListNotificationChannels = channel.unary_unary(\n            \"/google.monitoring.v3.NotificationChannelService/ListNotificationChannels\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.ListNotificationChannelsResponse.FromString,\n        )\n        self.GetNotificationChannel = channel.unary_unary(\n            \"/google.monitoring.v3.NotificationChannelService/GetNotificationChannel\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.FromString,\n        )\n        self.CreateNotificationChannel = channel.unary_unary(\n            \"/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.CreateNotificationChannelRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.FromString,\n        )\n        self.UpdateNotificationChannel = channel.unary_unary(\n            \"/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.UpdateNotificationChannelRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.FromString,\n        )\n        self.DeleteNotificationChannel = channel.unary_unary(\n            \"/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.DeleteNotificationChannelRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n        self.SendNotificationChannelVerificationCode = channel.unary_unary(\n            \"/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.SendNotificationChannelVerificationCodeRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n        self.GetNotificationChannelVerificationCode = channel.unary_unary(\n            \"/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelVerificationCodeRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.GetNotificationChannelVerificationCodeResponse.FromString,\n        )\n        self.VerifyNotificationChannel = channel.unary_unary(\n            \"/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__service__pb2.VerifyNotificationChannelRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_notification__pb2.NotificationChannel.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def write_to_file(self, filename='material_index.dat', plot=True):\n        \n        path = os.path.dirname(sys.modules[__name__].__file__) + '/'\n\n        dir_plot = 'material_index/'\n        if not os.path.exists(dir_plot):\n            os.makedirs(dir_plot)\n\n        for axis, name in zip(self.axes, self.axes_str):\n            root, ext = os.path.splitext(filename)\n            fn = dir_plot + root + '_'+ name + ext\n            with open(fn, 'w') as fs:\n                for n_row in np.abs(axis.n[::-1]):\n                    n_str = ','.join([str(v) for v in n_row])\n                    fs.write(n_str+'\\n')\n\n            if plot:\n                filename_image_prefix, _ = os.path.splitext(fn)\n                filename_image = filename_image_prefix + '.png'\n                args = {\n                    'title': 'Refractive Index Profile: %s' % name,\n                    'x_pts': self.xx.x_pts,\n                    'y_pts': self.xx.y_pts,\n                    'x_min': self.xx.x_min,\n                    'x_max': self.xx.x_max,\n                    'y_min': self.xx.y_min,\n                    'y_max': self.xx.y_max,\n                    'filename_data': fn,\n                    'filename_image': filename_image\n                }\n                if MPL:\n                    heatmap = np.loadtxt(args['filename_data'], delimiter=',')\n                    plt.clf()\n                    plt.title(args['title'])\n                    plt.xlabel('$x$')\n                    plt.ylabel('$y$')\n                    plt.imshow(np.flipud(heatmap),\n                               extent=(args['x_min'], args['x_max'], args['y_min'], args['y_max']),\n                               aspect=\"auto\")\n                    plt.colorbar()\n                    plt.savefig(filename_image)\n                else:\n                    gp.gnuplot(path+'structure.gpi', args, silent=False)", "docstring": "Write the refractive index profile to file.\n\nArgs:\nfilename (str): The nominal filename the refractive\nindex data should be saved to.\nplot (bool): `True` if plots should be generates,\notherwise `False`.  Default is `True`.", "source": "juraj-google-style"}
{"code": "def _database_string(self):\n    if (self._database_string_internal is None):\n        db_str = firestore_client.FirestoreClient.database_root_path(self.project, self._database)\n        self._database_string_internal = db_str\n    return self._database_string_internal", "docstring": "The database string corresponding to this client's project.\n\nThis value is lazy-loaded and cached.\n\nWill be of the form\n\n``projects/{project_id}/databases/{database_id}``\n\nbut ``database_id == '(default)'`` for the time being.\n\nReturns:\nstr: The fully-qualified database string for the current\nproject. (The default database is also in this string.)", "source": "codesearchnet"}
{"code": "def smear(self, sigma):\n        \n        diff = [self.x[i + 1] - self.x[i] for i in range(len(self.x) - 1)]\n        avg_x_per_step = np.sum(diff) / len(diff)\n        if len(self.ydim) == 1:\n            self.y = gaussian_filter1d(self.y, sigma / avg_x_per_step)\n        else:\n            self.y = np.array([\n                gaussian_filter1d(self.y[:, k], sigma / avg_x_per_step)\n                for k in range(self.ydim[1])]).T", "docstring": "Apply Gaussian smearing to spectrum y value.\n\nArgs:\nsigma: Std dev for Gaussian smear function", "source": "juraj-google-style"}
{"code": "def parse(cls, buf: memoryview, params: Params) \\\n            -> Tuple[Parseable, memoryview]:\n        \n        for data_type in params.expected:\n            try:\n                return data_type.parse(buf, params)\n            except NotParseable:\n                pass\n        raise UnexpectedType(buf)", "docstring": "Parses the given buffer by attempting to parse the list of\n:attr:`~Params.expected` types until one of them succeeds,\nthen returns the parsed object.\n\nArgs:\nbuf: The bytes containing the data to be parsed.\nparams: The parameters used by some parseable types.", "source": "juraj-google-style"}
{"code": "def _get_segments(self, start, request_size):\n    \n    if not request_size:\n      return []\n\n    end = start + request_size\n    futures = []\n\n    while request_size > self._max_request_size:\n      futures.append(self._get_segment(start, self._max_request_size))\n      request_size -= self._max_request_size\n      start += self._max_request_size\n    if start < end:\n      futures.append(self._get_segment(start, end - start))\n    return [fut.get_result() for fut in futures]", "docstring": "Get segments of the file from Google Storage as a list.\n\nA large request is broken into segments to avoid hitting urlfetch\nresponse size limit. Each segment is returned from a separate urlfetch.\n\nArgs:\nstart: start offset to request. Inclusive. Have to be within the\nrange of the file.\nrequest_size: number of bytes to request.\n\nReturns:\nA list of file segments in order", "source": "juraj-google-style"}
{"code": "def lock(vcs, lock_object, wait=True):\n    if wait:\n        timeout = (- 1)\n    else:\n        timeout = 0\n    lock_path = _get_lock_path(vcs, lock_object)\n    lock = filelock.FileLock(lock_path)\n    with lock.acquire(timeout=timeout):\n        (yield)", "docstring": "A context manager that grabs the lock and releases it when done.\n\nThis blocks until the lock can be acquired.\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\nlock_object (Lock)\nwait (boolean) - whether to wait for the lock or error out\n\nRaises:\nTimeout", "source": "codesearchnet"}
{"code": "def get_shape(value: Union[types.FloatTensor, types.IntTensor]) -> types.IntTensor:\n    result = value.shape\n    return tf.shape(value) if None in result.as_list() else result", "docstring": "Returns the `shape` of a given `Tensor`.\n\nArgs:\nvalue: Scalar `Tensor of integers or real values.\n\nReturns:\n`Tensor` of integers with rank 1.", "source": "github-repos"}
{"code": "def is_registered(self, prefix):\n    return self._resolve_prefix(prefix) is not None", "docstring": "Test if a command prefix or its alias is has a registered handler.\n\nArgs:\nprefix: A prefix or its alias, as a str.\n\nReturns:\nTrue iff a handler is registered for prefix.", "source": "github-repos"}
{"code": "def parse(self, text, key=None):\n    try:\n        data = json.loads(text)\n    except ValueError as e:\n        raise ValueError(('%s: Value: [%s]' % (e, text)))\n    if (data and key):\n        if (key not in data):\n            raise ValueError(('Invalid response (key %s not found): %s' % (key, data)))\n        data = data[key]\n    return data", "docstring": "Parses a response.\n\nArgs:\ntext (str): Text to parse\n\nKwargs:\nkey (str): Key to look for, if any\n\nReturns:\nParsed value\n\nRaises:\nValueError", "source": "codesearchnet"}
{"code": "def plot_heldout_prediction(input_vals, probs, fname, n=10, title=''):\n    fig = figure.Figure(figsize=(9, (3 * n)))\n    canvas = backend_agg.FigureCanvasAgg(fig)\n    for i in range(n):\n        ax = fig.add_subplot(n, 3, ((3 * i) + 1))\n        ax.imshow(input_vals[(i, :)].reshape(IMAGE_SHAPE[:(- 1)]), interpolation='None')\n        ax = fig.add_subplot(n, 3, ((3 * i) + 2))\n        for prob_sample in probs:\n            sns.barplot(np.arange(10), prob_sample[(i, :)], alpha=0.1, ax=ax)\n            ax.set_ylim([0, 1])\n        ax.set_title('posterior samples')\n        ax = fig.add_subplot(n, 3, ((3 * i) + 3))\n        sns.barplot(np.arange(10), np.mean(probs[(:, i, :)], axis=0), ax=ax)\n        ax.set_ylim([0, 1])\n        ax.set_title('predictive probs')\n    fig.suptitle(title)\n    fig.tight_layout()\n    canvas.print_figure(fname, format='png')\n    print('saved {}'.format(fname))", "docstring": "Save a PNG plot visualizing posterior uncertainty on heldout data.\n\nArgs:\ninput_vals: A `float`-like Numpy `array` of shape\n`[num_heldout] + IMAGE_SHAPE`, containing heldout input images.\nprobs: A `float`-like Numpy array of shape `[num_monte_carlo,\nnum_heldout, num_classes]` containing Monte Carlo samples of\nclass probabilities for each heldout sample.\nfname: Python `str` filename to save the plot to.\nn: Python `int` number of datapoints to vizualize.\ntitle: Python `str` title for the plot.", "source": "codesearchnet"}
{"code": "def from_text_vision_configs(cls, text_config: BlipTextConfig, vision_config: BlipVisionConfig, **kwargs):\n    return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`BlipConfig`] (or a derived class) from blip text model configuration and blip vision model\nconfiguration.\n\nReturns:\n[`BlipConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def printMe(self, selfKey, selfValue):\n        \n        text = '<key>{keyName}</key>\\n'.format(keyName=selfKey)\n\n        if len(selfValue) == 0:\n            return ''\n        else:\n            valueText = ''\n            for element in selfValue:\n                if singleOrPair(element) == 'Single':\n                    valueText += element.printMe(element.tag, element.value)\n                elif singleOrPair(element) == 'Pair':\n                    valueText += element.printMe(element.key, element.value)\n                \n\n        text += valueText\n        return text", "docstring": "Parse the single and its value and return the parsed str.\n\nArgs:\nselfTag (str): The tag. Normally just ``self.tag``\nselfValue (list): a list of value elements(single, subclasses, str, int). Normally just ``self.value``\n\nReturns:\nstr: A parsed text", "source": "juraj-google-style"}
{"code": "def SetCampaignTargetingCriteria(client, campaign):\n  \n  campaign_criterion_service = client.GetService('CampaignCriterionService')\n\n  \n  \n  criteria = [\n      {\n          'xsi_type': 'Location',\n          'id': 21137  \n      },\n      {\n          'xsi_type': 'Location',\n          'id': 2484  \n      },\n      {\n          'xsi_type': 'Language',\n          'id': 1000  \n      },\n      {\n          'xsi_type': 'Language',\n          'id': 1003  \n      }\n  ]\n\n  operations = [{\n      'operator': 'ADD',\n      'operand': {\n          'campaignId': campaign['id'],\n          'criterion': criterion\n      }\n  } for criterion in criteria]\n\n  response = campaign_criterion_service.mutate(operations)\n\n  if response and 'value' in response:\n    \n    for criterion in response['value']:\n      print ('Campaign criteria of type \"%s\" and id \"%s\" was added.'\n             % (criterion['criterion']['type'],\n                criterion['criterion']['id']))", "docstring": "Sets targeting criteria for the given campaign.\n\nArgs:\nclient: An AdWordsClient instance.\ncampaign: A suds object representing the campaign we wish to attach\ntargeting criteria.", "source": "juraj-google-style"}
{"code": "def upgrade_name(self, user_):\n        \n        if user_.name_type > self.name_type:\n            self.full_name = user_.full_name\n            self.first_name = user_.first_name\n            self.name_type = user_.name_type\n            logger.debug('Added %s name to User \"%s\": %s',\n                         self.name_type.name.lower(), self.full_name, self)", "docstring": "Upgrade name type of this user.\n\nGoogle Voice participants often first appear with no name at all, and\nthen get upgraded unpredictably to numbers (\"+12125551212\") or names.\n\nArgs:\nuser_ (~hangups.user.User): User to upgrade with.", "source": "juraj-google-style"}
{"code": "def has_node_with_value(self, value):\n        \n        for node in self.node_list:\n            if node.value == value:\n                return True\n        else:\n            return False", "docstring": "Whether any node in ``self.node_list`` has the value ``value``.\n\nArgs:\nvalue (Any): The value to find in ``self.node_list``\n\nReturns: bool\n\nExample:\n>>> from blur.markov.node import Node\n>>> node_1 = Node('One')\n>>> graph = Graph([node_1])\n>>> graph.has_node_with_value('One')\nTrue\n>>> graph.has_node_with_value('Foo')\nFalse", "source": "juraj-google-style"}
{"code": "def get_distribution_dict(metric_type, submit_timestamp, dist, metric_id):\n    return DistributionMetric(dist, submit_timestamp, metric_id, metric_type).as_dict()", "docstring": "Function creates :class:`DistributionMetric`\n\nArgs:\nmetric_type(str): type of value from distribution metric which will\nbe saved (ex. max, min, mean, sum)\nsubmit_timestamp: timestamp when metric is saved\ndist(object) distribution object from pipeline result\nmetric_id(uuid): id of the current test run\n\nReturns:\ndictionary prepared for saving according to schema", "source": "github-repos"}
{"code": "def profile_view(request, user_id=None):\n    \n    if request.user.is_eighthoffice and \"full\" not in request.GET and user_id is not None:\n        return redirect(\"eighth_profile\", user_id=user_id)\n\n    if user_id is not None:\n        try:\n            profile_user = User.objects.get(id=user_id)\n\n            if profile_user is None:\n                raise Http404\n        except User.DoesNotExist:\n            raise Http404\n    else:\n        profile_user = request.user\n\n    num_blocks = 6\n\n    eighth_schedule = []\n    start_block = EighthBlock.objects.get_first_upcoming_block()\n\n    blocks = []\n    if start_block:\n        blocks = [start_block] + list(start_block.next_blocks(num_blocks - 1))\n\n    for block in blocks:\n        sch = {\"block\": block}\n        try:\n            sch[\"signup\"] = EighthSignup.objects.get(scheduled_activity__block=block, user=profile_user)\n        except EighthSignup.DoesNotExist:\n            sch[\"signup\"] = None\n        except MultipleObjectsReturned:\n            client.captureException()\n            sch[\"signup\"] = None\n        eighth_schedule.append(sch)\n\n    if profile_user.is_eighth_sponsor:\n        sponsor = EighthSponsor.objects.get(user=profile_user)\n        start_date = get_start_date(request)\n        eighth_sponsor_schedule = (EighthScheduledActivity.objects.for_sponsor(sponsor).filter(block__date__gte=start_date).order_by(\n            \"block__date\", \"block__block_letter\"))\n        eighth_sponsor_schedule = eighth_sponsor_schedule[:10]\n    else:\n        eighth_sponsor_schedule = None\n\n    admin_or_teacher = (request.user.is_eighth_admin or request.user.is_teacher)\n    can_view_eighth = (profile_user.can_view_eighth or request.user == profile_user)\n    eighth_restricted_msg = (not can_view_eighth and admin_or_teacher)\n\n    if not can_view_eighth and not request.user.is_eighth_admin and not request.user.is_teacher:\n        eighth_schedule = []\n\n    has_been_nominated = profile_user.username in [\n        u.nominee.username for u in request.user.nomination_votes.filter(position__position_name=settings.NOMINATION_POSITION)\n    ]\n    context = {\n        \"profile_user\": profile_user,\n        \"eighth_schedule\": eighth_schedule,\n        \"can_view_eighth\": can_view_eighth,\n        \"eighth_restricted_msg\": eighth_restricted_msg,\n        \"eighth_sponsor_schedule\": eighth_sponsor_schedule,\n        \"nominations_active\": settings.NOMINATIONS_ACTIVE,\n        \"nomination_position\": settings.NOMINATION_POSITION,\n        \"has_been_nominated\": has_been_nominated\n    }\n    return render(request, \"users/profile.html\", context)", "docstring": "Displays a view of a user's profile.\n\nArgs:\nuser_id\nThe ID of the user whose profile is being viewed. If not\nspecified, show the user's own profile.", "source": "juraj-google-style"}
{"code": "def _apply_conv(self, inputs, w):\n    \n    w_dw, w_pw = w\n    outputs = tf.nn.separable_conv2d(inputs,\n                                     w_dw,\n                                     w_pw,\n                                     rate=self._rate,\n                                     strides=self.stride,\n                                     padding=self._conv_op_padding,\n                                     data_format=self._data_format)\n    return outputs", "docstring": "Apply a `separable_conv2d` operation on `inputs` using `w`.\n\nArgs:\ninputs: A Tensor of shape `data_format` and of type `tf.float16`,\n`tf.bfloat16` or `tf.float32`.\nw: A tuple of weight matrices of the same type as `inputs`, the first\nbeing the depthwise weight matrix, and the second being the pointwise\nweight matrix.\n\nReturns:\noutputs: The result of the convolution operation on `inputs`.", "source": "juraj-google-style"}
{"code": "def is_storage(url, storage=None):\n    \n    if storage:\n        return True\n    split_url = url.split(':\n    if len(split_url) == 2 and split_url[0].lower() != 'file':\n        return True\n    return False", "docstring": "Check if file is a local file or a storage file.\n\nFile is considered local if:\n- URL is a local path.\n- URL starts by \"file://\"\n- a \"storage\" is provided.\n\nArgs:\nurl (str): file path or URL\nstorage (str): Storage name.\n\nReturns:\nbool: return True if file is local.", "source": "juraj-google-style"}
{"code": "def association(self, group_xid):\n    association = {'groupXid': group_xid}\n    self._indicator_data.setdefault('associatedGroups', []).append(association)", "docstring": "Add association using xid value.\n\nArgs:\ngroup_xid (str): The external id of the Group to associate.", "source": "codesearchnet"}
{"code": "def hint_for_accuracy(self, accuracy=\"normal\"):\n        \n        if not self.has_dojo_report:\n            return Hint(ecut=0., pawecutdg=0.)\n\n        \n        if \"hints\" in self.dojo_report:\n            return Hint.from_dict(self.dojo_report[\"hints\"][accuracy])\n        elif \"ppgen_hints\" in self.dojo_report:\n            return Hint.from_dict(self.dojo_report[\"ppgen_hints\"][accuracy])\n        return Hint(ecut=0., pawecutdg=0.)", "docstring": "Returns a :class:`Hint` object with the suggested value of ecut [Ha] and\npawecutdg [Ha] for the given accuracy.\necut and pawecutdg are set to zero if no hint is available.\n\nArgs:\naccuracy: [\"low\", \"normal\", \"high\"]", "source": "juraj-google-style"}
{"code": "def incident(self, name, **kwargs):\n        \n        group_obj = Incident(name, **kwargs)\n        return self._group(group_obj)", "docstring": "Add Incident data to Batch object.\n\nArgs:\nname (str): The name for this Group.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nevent_date (str, kwargs): The event datetime expression for this Group.\nstatus (str, kwargs): The status for this Group.\nxid (str, kwargs): The external id for this Group.\n\nReturns:\nobj: An instance of Incident.", "source": "juraj-google-style"}
{"code": "def set_logging_verbosity(level):\n    valid_levels = {'FATAL': logging.FATAL, 'ERROR': logging.ERROR, 'WARNING': logging.WARNING, 'INFO': logging.INFO, 'DEBUG': logging.DEBUG}\n    verbosity = valid_levels.get(level)\n    if verbosity is None:\n        raise ValueError(f'Please pass a valid level for logging verbosity. Expected one of: {set(valid_levels.keys())}. Received: {level}')\n    logging.set_verbosity(verbosity)", "docstring": "Sets the verbosity level for logging.\n\nSupported log levels are as follows:\n\n- `\"FATAL\"` (least verbose)\n- `\"ERROR\"`\n- `\"WARNING\"`\n- `\"INFO\"`\n- `\"DEBUG\"` (most verbose)\n\nArgs:\nlevel: A string corresponding to the level of verbosity for logging.", "source": "github-repos"}
{"code": "def _check_keyword_only_parameters(method_signature, base_signature, is_subtype):\n    base_kwonly_params = set(base_signature.kwonly_params)\n    method_kwonly_params = set(method_signature.kwonly_params)\n    method_defaults = set(method_signature.defaults)\n    if not base_signature.kwargs_name:\n        for method_param_name in method_kwonly_params.difference(base_kwonly_params).difference(method_defaults):\n            return SignatureError(SignatureErrorType.DEFAULT_PARAMETER_MISMATCH, f\"Parameter '{method_param_name}' must have a default value.\")\n    for base_param_name in base_kwonly_params.difference(method_kwonly_params):\n        try:\n            method_param_index = method_signature.param_names.index(base_param_name)\n        except ValueError:\n            if not method_signature.kwargs_name:\n                return SignatureError(SignatureErrorType.KWONLY_PARAMETER_NAME_MISMATCH, f\"Parameter '{base_param_name}' not found in overriding method.\")\n        else:\n            if method_param_index < method_signature.posonly_count:\n                return SignatureError(SignatureErrorType.KWONLY_PARAMETER_NAME_MISMATCH, f\"Keyword-only parameter '{base_param_name}' of the overridden method has the same name as a positional-only parameterof the overriding method.\")\n    for base_param_name in base_signature.kwonly_params:\n        try:\n            base_param_type = base_signature.annotations[base_param_name]\n        except KeyError:\n            continue\n        if base_param_name in method_kwonly_params or base_param_name in method_signature.param_names:\n            method_param_name = base_param_name\n        elif method_signature.kwargs_name:\n            method_param_name = method_signature.kwargs_name\n        else:\n            continue\n        try:\n            method_param_type = method_signature.annotations[method_param_name]\n        except KeyError:\n            continue\n        if method_param_name == method_signature.kwargs_name:\n            if isinstance(method_param_type, abstract.ParameterizedClass):\n                method_param_type = method_param_type.get_formal_type_parameter(abstract_utils.V)\n            else:\n                continue\n        if not is_subtype(base_param_type, method_param_type):\n            return SignatureError(SignatureErrorType.KWONLY_PARAMETER_TYPE_MISMATCH, f\"Type mismatch for parameter '{base_param_name}'.\")\n    return None", "docstring": "Checks that the keyword-only parameters of the overriding method match.\n\nArgs:\nmethod_signature: signature of the overriding method.\nbase_signature: signature of the overridden method.\nis_subtype: a binary function to compare types.\n\nReturns:\nSignatureError if a mismatch is detected. Otherwise returns None.", "source": "github-repos"}
{"code": "def read_tree_nexml(nexml):\n    \n    if not isinstance(nexml, str):\n        raise TypeError(\"nexml must be a str\")\n    if nexml.lower().endswith('.gz'): \n        f = gopen(expanduser(nexml))\n    elif isfile(expanduser(nexml)): \n        f = open(expanduser(nexml))\n    else:\n        f = nexml.splitlines()\n    trees = dict(); id_to_node = dict(); tree_id = None\n    for line in f:\n        if isinstance(line,bytes):\n            l = line.decode().strip()\n        else:\n            l = line.strip()\n        l_lower = l.lower()\n        \n        if l_lower.startswith('<tree '):\n            if tree_id is not None:\n                raise ValueError(INVALID_NEXML)\n            parts = l.split()\n            for part in parts:\n                if '=' in part:\n                    k,v = part.split('='); k = k.strip()\n                    if k.lower() == 'id':\n                        tree_id = v.split('\"')[1]; break\n            if tree_id is None:\n                raise ValueError(INVALID_NEXML)\n            trees[tree_id] = Tree(); trees[tree_id].root = None\n        \n        elif l_lower.replace(' ','').startswith('</tree>'):\n            if tree_id is None:\n                raise ValueError(INVALID_NEXML)\n            id_to_node = dict(); tree_id = None\n        \n        elif l_lower.startswith('<node '):\n            if tree_id is None:\n                raise ValueError(INVALID_NEXML)\n            node_id = None; node_label = None; is_root = False\n            k = ''; v = ''; in_key = True; in_quote = False\n            for i in range(6, len(l)):\n                if l[i] == '\"' or l[i] == \"'\":\n                    in_quote = not in_quote\n                if not in_quote and in_key and l[i] == '=':\n                    in_key = False\n                elif not in_quote and not in_key and (l[i] == '\"' or l[i] == \"'\"):\n                    k = k.strip()\n                    if k.lower() == 'id':\n                        node_id = v\n                    elif k.lower() == 'label':\n                        node_label = v\n                    elif k.lower() == 'root' and v.strip().lower() == 'true':\n                        is_root = True\n                    in_key = True; k = ''; v = ''\n                elif in_key and not (l[i] == '\"' or l[i] == \"'\"):\n                    k += l[i]\n                elif not in_key and not (l[i] == '\"' or l[i] == \"'\"):\n                    v += l[i]\n            if node_id is None or node_id in id_to_node:\n                raise ValueError(INVALID_NEXML)\n            id_to_node[node_id] = Node(label=node_label)\n            if is_root:\n                if trees[tree_id].root is not None:\n                    raise ValueError(INVALID_NEXML)\n                trees[tree_id].root = id_to_node[node_id]\n        \n        elif l_lower.startswith('<edge '):\n            if tree_id is None:\n                raise ValueError(INVALID_NEXML)\n            source = None; target = None; length = None\n            parts = l.split()\n            for part in parts:\n                if '=' in part:\n                    k,v = part.split('='); k = k.strip(); k_lower = k.lower()\n                    if k_lower == 'source':\n                        source = v.split('\"')[1]\n                    elif k_lower == 'target':\n                        target = v.split('\"')[1]\n                    elif k_lower == 'length':\n                        length = float(v.split('\"')[1])\n            if source is None or target is None or length is None:\n                raise ValueError(INVALID_NEXML)\n            if source not in id_to_node:\n                raise ValueError(INVALID_NEXML)\n            if target not in id_to_node:\n                raise ValueError(INVALID_NEXML)\n            id_to_node[source].add_child(id_to_node[target])\n            id_to_node[target].edge_length = length\n        elif l_lower.startswith('<rootedge '):\n            if tree_id is None:\n                raise ValueError(INVALID_NEXML)\n            root_node = None; length = None\n            parts = l.split()\n            for part in parts:\n                if '=' in part:\n                    k,v = part.split('='); k = k.strip(); k_lower = k.lower()\n                    if k_lower == 'target':\n                        root_node = id_to_node[v.split('\"')[1]]\n                    elif k_lower == 'length':\n                        length = float(v.split('\"')[1])\n            if trees[tree_id].root is None:\n                raise ValueError(INVALID_NEXML)\n            if root_node is not None and trees[tree_id].root != root_node:\n                raise ValueError(INVALID_NEXML)\n            trees[tree_id].root.edge_length = length\n    if hasattr(f,'close'):\n        f.close()\n    return trees", "docstring": "Read a tree from a NeXML string or file\n\nArgs:\n``nexml`` (``str``): Either a NeXML string or the path to a NeXML file (plain-text or gzipped)\n\nReturns:\n``dict`` of ``Tree``: A dictionary of the trees represented by ``nexml``, where keys are tree names (``str``) and values are ``Tree`` objects", "source": "juraj-google-style"}
{"code": "def forward_pass(self, vector, layer_index, is_transpose=False, is_abs=False):\n    if ((layer_index < 0) or (layer_index > self.num_hidden_layers)):\n        raise ValueError('Invalid layer index')\n    layer_type = self.layer_types[layer_index]\n    weight = self.weights[layer_index]\n    if is_abs:\n        weight = tf.abs(weight)\n    if is_transpose:\n        vector = tf.reshape(vector, self.output_shapes[layer_index])\n    else:\n        vector = tf.reshape(vector, self.input_shapes[layer_index])\n    if (layer_type in {'ff', 'ff_relu'}):\n        if is_transpose:\n            weight = tf.transpose(weight)\n        return_vector = tf.matmul(weight, vector)\n    elif (layer_type in {'conv', 'conv_relu'}):\n        if is_transpose:\n            return_vector = tf.nn.conv2d_transpose(vector, weight, output_shape=self.input_shapes[layer_index], strides=[1, self.cnn_params[layer_index]['stride'], self.cnn_params[layer_index]['stride'], 1], padding=self.cnn_params[layer_index]['padding'])\n        else:\n            return_vector = tf.nn.conv2d(vector, weight, strides=[1, self.cnn_params[layer_index]['stride'], self.cnn_params[layer_index]['stride'], 1], padding=self.cnn_params[layer_index]['padding'])\n    else:\n        raise NotImplementedError('Unsupported layer type: {0}'.format(layer_type))\n    if is_transpose:\n        return tf.reshape(return_vector, (self.sizes[layer_index], 1))\n    return tf.reshape(return_vector, (self.sizes[(layer_index + 1)], 1))", "docstring": "Performs forward pass through the layer weights at layer_index.\n\nArgs:\nvector: vector that has to be passed through in forward pass\nlayer_index: index of the layer\nis_transpose: whether the weights of the layer have to be transposed\nis_abs: whether to take the absolute value of the weights\n\nReturns:\ntensor that corresponds to the forward pass through the layer\nRaises:\nValueError: if the layer_index is negative or more than num hidden layers", "source": "codesearchnet"}
{"code": "def in_batches(iterable, batch_size):\n    items = list(iterable)\n    size = len(items)\n    for i in range(0, size, batch_size):\n        (yield items[i:min((i + batch_size), size)])", "docstring": "Split the given iterable into batches.\n\nArgs:\niterable (Iterable[Any]):\nThe iterable you want to split into batches.\nbatch_size (int):\nThe size of each bach. The last batch will be probably smaller (if\nthe number of elements cannot be equally divided.\n\nReturns:\nGenerator[list[Any]]: Will yield all items in batches of **batch_size**\nsize.\n\nExample:\n\n>>> from peltak.core import util\n>>>\n>>> batches = util.in_batches([1, 2, 3, 4, 5, 6, 7], 3)\n>>> batches = list(batches)     # so we can query for lenght\n>>> len(batches)\n3\n>>> batches\n[[1, 2, 3], [4, 5, 6], [7]]", "source": "codesearchnet"}
{"code": "def _stdout_list_split(retcode, stdout='', splitstring='\\n'):\n    \n    if retcode == 0:\n        ret = stdout.split(splitstring)\n        return ret\n    else:\n        return False", "docstring": "Evaulates Open vSwitch command`s retcode value.\n\nArgs:\nretcode: Value of retcode field from response, should be 0, 1 or 2.\nstdout: Value of stdout filed from response.\nsplitstring: String used to split the stdout default new line.\n\nReturns:\nList or False.", "source": "juraj-google-style"}
{"code": "def ApprovalCreateRaw(aff4_path, reason='', expire_in=(((60 * 60) * 24) * 7), token=None, approval_type='ClientApproval'):\n    if (approval_type in ['ClientApproval', security.ClientApproval]):\n        urn = rdf_client.ClientURN(aff4_path)\n    else:\n        urn = rdfvalue.RDFURN(aff4_path)\n    if (not token):\n        expiry = (time.time() + expire_in)\n        token = access_control.ACLToken(reason=reason, expiry=expiry)\n    if (not token.reason):\n        raise RuntimeError('Cannot create approval with empty reason')\n    if (not token.username):\n        token.username = getpass.getuser()\n    approval_urn = security.ApprovalRequestor.ApprovalUrnBuilder(urn.Path(), token.username, token.reason)\n    super_token = access_control.ACLToken(username='raw-approval-superuser')\n    super_token.supervisor = True\n    if isinstance(approval_type, string_types):\n        approval_type_cls = aff4.AFF4Object.classes[approval_type]\n    else:\n        approval_type_cls = approval_type\n    approval_request = aff4.FACTORY.Create(approval_urn, approval_type_cls, mode='rw', token=super_token)\n    approval_request.AddAttribute(approval_request.Schema.APPROVER(('%s1-raw' % token.username)))\n    approval_request.AddAttribute(approval_request.Schema.APPROVER(('%s-raw2' % token.username)))\n    approval_request.Close()", "docstring": "Creates an approval with raw access.\n\nThis method requires raw datastore access to manipulate approvals directly.\nThis currently doesn't work for hunt or cron approvals, because they check\nthat each approver has the admin label.  Since the fake users don't exist the\ncheck fails.\n\nArgs:\naff4_path: The aff4_path or client id the approval should be created for.\nreason: The reason to put in the token.\nexpire_in: Expiry in seconds to use in the token.\ntoken: The token that will be used. If this is specified reason and expiry\nare ignored.\napproval_type: The type of the approval to create.\n\nReturns:\nThe token.\n\nRaises:\nRuntimeError: On bad token.", "source": "codesearchnet"}
{"code": "def awscli_defaults(os_type=None):\n    \n\n    try:\n        if os_type is None:\n            os_type = platform.system()\n\n        if os_type == 'Linux':\n            HOME = os.environ['HOME']\n            awscli_credentials = HOME + '/.aws/credentials'\n            awscli_config = HOME + '/.aws/config'\n        elif os_type == 'Windows':\n            username = os.getenv('username')\n            awscli_credentials = 'C:\\\\Users\\\\' + username + '\\\\.aws\\\\credentials'\n            awscli_config = 'C:\\\\Users\\\\' + username + '\\\\.aws\\\\config'\n        elif os_type == 'Java':\n            logger.warning('Unsupported OS. No information')\n            HOME = os.environ['HOME']\n            awscli_credentials = HOME + '/.aws/credentials'\n            awscli_config = HOME + '/.aws/config'\n        alt_credentials = os.getenv('AWS_SHARED_CREDENTIALS_FILE')\n    except OSError as e:\n        logger.exception(\n            '%s: problem determining local os environment %s' %\n            (inspect.stack()[0][3], str(e))\n            )\n        raise e\n    return {\n                'awscli_defaults': {\n                    'awscli_credentials': awscli_credentials,\n                    'awscli_config': awscli_config,\n                    'alt_credentials': alt_credentials\n                }\n            }", "docstring": "Summary:\nParse, update local awscli config credentials\nArgs:\n:user (str):  USERNAME, only required when run on windows os\nReturns:\nTYPE: dict object containing key, value pairs describing\nos information", "source": "juraj-google-style"}
{"code": "def get_name_servers(self, id_or_uri):\n        \n\n        uri = self._client.build_uri(id_or_uri) + \"/nameServers\"\n        return self._client.get(uri)", "docstring": "Gets the named servers for an interconnect.\n\nArgs:\nid_or_uri:  Can be either the interconnect id or the interconnect uri.\n\nReturns:\ndict: the name servers for an interconnect.", "source": "juraj-google-style"}
{"code": "def _client_send(self, msg):\n    try:\n        self._client.write((msg.encode('utf8') + b'\\n'))\n        self._client.flush()\n        self.log.debug('Snippet sent %s.', msg)\n    except socket.error as e:\n        raise Error(self._ad, ('Encountered socket error \"%s\" sending RPC message \"%s\"' % (e, msg)))", "docstring": "Sends an Rpc message through the connection.\n\nArgs:\nmsg: string, the message to send.\n\nRaises:\nError: a socket error occurred during the send.", "source": "codesearchnet"}
{"code": "def BSearch(a, x, lo=0, hi=None):\n    \n    if len(a) == 0: return -1\n    hi = hi if hi is not None else len(a)\n    pos = bisect_left(a, x, lo, hi)\n    return pos if pos != hi and a[pos] == x else -1", "docstring": "Returns index of x in a, or -1 if x not in a.\n\nArguments:\na -- ordered numeric sequence\nx -- element to search within a\nlo -- lowest index to consider in search*\nhi -- highest index to consider in search*\n\n*bisect.bisect_left capability that we don't need to loose.", "source": "juraj-google-style"}
{"code": "def _create_datadict(cls, internal_name):\n        \n        if internal_name == \"LOCATION\":\n            return Location()\n        if internal_name == \"DESIGN CONDITIONS\":\n            return DesignConditions()\n        if internal_name == \"TYPICAL/EXTREME PERIODS\":\n            return TypicalOrExtremePeriods()\n        if internal_name == \"GROUND TEMPERATURES\":\n            return GroundTemperatures()\n        if internal_name == \"HOLIDAYS/DAYLIGHT SAVINGS\":\n            return HolidaysOrDaylightSavings()\n        if internal_name == \"COMMENTS 1\":\n            return Comments1()\n        if internal_name == \"COMMENTS 2\":\n            return Comments2()\n        if internal_name == \"DATA PERIODS\":\n            return DataPeriods()\n        raise ValueError(\n            \"No DataDictionary known for {}\".format(internal_name))", "docstring": "Creates an object depending on `internal_name`\n\nArgs:\ninternal_name (str): IDD name\n\nRaises:\nValueError: if `internal_name` cannot be matched to a data dictionary object", "source": "juraj-google-style"}
{"code": "def file(self, owner=None, **kwargs):\n        \n        return File(self.tcex, owner=owner, **kwargs)", "docstring": "Create the File TI object.\n\nArgs:\nowner:\n**kwargs:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def __init__(self, direction, edge_name, depth, within_optional_scope=False):\n        \n        super(Recurse, self).__init__(\n            direction, edge_name, depth, within_optional_scope=within_optional_scope)\n        self.direction = direction\n        self.edge_name = edge_name\n        self.depth = depth\n        \n        self.within_optional_scope = within_optional_scope\n        self.validate()", "docstring": "Create a new Recurse block which traverses the given edge up to \"depth\" times.\n\nArgs:\ndirection: string, 'in' or 'out'.\nedge_name: string obeying variable name rules (see validate_safe_string).\ndepth: int, always greater than or equal to 1.\n\nReturns:\nnew Recurse object", "source": "juraj-google-style"}
{"code": "def ray_get_and_free(object_ids):\n    global _last_free_time\n    global _to_free\n    result = ray.get(object_ids)\n    if (type(object_ids) is not list):\n        object_ids = [object_ids]\n    _to_free.extend(object_ids)\n    now = time.time()\n    if ((len(_to_free) > MAX_FREE_QUEUE_SIZE) or ((now - _last_free_time) > FREE_DELAY_S)):\n        ray.internal.free(_to_free)\n        _to_free = []\n        _last_free_time = now\n    return result", "docstring": "Call ray.get and then queue the object ids for deletion.\n\nThis function should be used whenever possible in RLlib, to optimize\nmemory usage. The only exception is when an object_id is shared among\nmultiple readers.\n\nArgs:\nobject_ids (ObjectID|List[ObjectID]): Object ids to fetch and free.\n\nReturns:\nThe result of ray.get(object_ids).", "source": "codesearchnet"}
{"code": "def chmod(self, path, mode, follow_symlinks=True):\n        \n        try:\n            file_object = self.resolve(path, follow_symlinks, allow_fd=True)\n        except IOError as io_error:\n            if io_error.errno == errno.ENOENT:\n                self.raise_os_error(errno.ENOENT, path)\n            raise\n        if self.is_windows_fs:\n            if mode & PERM_WRITE:\n                file_object.st_mode = file_object.st_mode | 0o222\n            else:\n                file_object.st_mode = file_object.st_mode & 0o777555\n        else:\n            file_object.st_mode = ((file_object.st_mode & ~PERM_ALL) |\n                                   (mode & PERM_ALL))\n        file_object.st_ctime = time.time()", "docstring": "Change the permissions of a file as encoded in integer mode.\n\nArgs:\npath: (str) Path to the file.\nmode: (int) Permissions.\nfollow_symlinks: If `False` and `path` points to a symlink,\nthe link itself is affected instead of the linked object.", "source": "juraj-google-style"}
{"code": "def __ge__(self, other):\n    other = as_dimension(other)\n    if self._value is None or other.value is None:\n        return None\n    else:\n        return self._value >= other.value", "docstring": "Returns True if `self` is known to be greater than or equal to `other`.\n\nDimensions are compared as follows:\n\n```python\n(tf.compat.v1.Dimension(m)    >= tf.compat.v1.Dimension(n))    == (m >= n)\n(tf.compat.v1.Dimension(m)    >= tf.compat.v1.Dimension(None)) == None\n(tf.compat.v1.Dimension(None) >= tf.compat.v1.Dimension(n))    == None\n(tf.compat.v1.Dimension(None) >= tf.compat.v1.Dimension(None)) == None\n```\n\nArgs:\nother: Another Dimension.\n\nReturns:\nThe value of `self.value >= other.value` if both are known, otherwise\nNone.", "source": "github-repos"}
{"code": "def serialize_to_xml(root, block):\n    root.tag = 'ubcpi'\n    if (block.rationale_size is not None):\n        if block.rationale_size.get('min'):\n            root.set('rationale_size_min', unicode(block.rationale_size.get('min')))\n        if block.rationale_size.get('max'):\n            root.set('rationale_size_max', unicode(block.rationale_size['max']))\n    if block.algo:\n        if block.algo.get('name'):\n            root.set('algorithm', block.algo.get('name'))\n        if block.algo.get('num_responses'):\n            root.set('num_responses', unicode(block.algo.get('num_responses')))\n    display_name = etree.SubElement(root, 'display_name')\n    display_name.text = block.display_name\n    question = etree.SubElement(root, 'question')\n    question_text = etree.SubElement(question, 'text')\n    question_text.text = block.question_text['text']\n    serialize_image(block.question_text, question)\n    options = etree.SubElement(root, 'options')\n    serialize_options(options, block)\n    seeds = etree.SubElement(root, 'seeds')\n    serialize_seeds(seeds, block)", "docstring": "Serialize the Peer Instruction XBlock's content to XML.\n\nArgs:\nblock (PeerInstructionXBlock): The peer instruction block to serialize.\nroot (etree.Element): The XML root node to update.\n\nReturns:\netree.Element", "source": "codesearchnet"}
{"code": "def __init__(self, name, description, optional=False):\n        \n        self.name = name\n        self.description = description\n        self.optional = optional", "docstring": "Parameter descriptor\n\nArgs:\nname: 1 word parameter identifier.\ndescription: short description of the purpose of the parameter. What does it configure/do.\noptional: flag indicating whether the parameter is optional. Defaults to mandatory (false).", "source": "juraj-google-style"}
{"code": "def _ParseIndexTable(\n      self, parser_mediator, file_system, file_entry, index_table):\n    \n    \n    path_segments = file_system.SplitPath(file_entry.path_spec.location)\n\n    data_block_files = {}\n    for cache_address in index_table:\n      if cache_address.filename not in data_block_files:\n        \n        \n        path_segments.pop()\n        path_segments.append(cache_address.filename)\n\n        \n        \n        kwargs = {}\n        if file_entry.path_spec.parent:\n          kwargs['parent'] = file_entry.path_spec.parent\n        kwargs['location'] = file_system.JoinPath(path_segments)\n\n        data_block_file_path_spec = path_spec_factory.Factory.NewPathSpec(\n            file_entry.path_spec.TYPE_INDICATOR, **kwargs)\n\n        try:\n          data_block_file_entry = path_spec_resolver.Resolver.OpenFileEntry(\n              data_block_file_path_spec)\n        except RuntimeError as exception:\n          message = (\n              'Unable to open data block file: {0:s} with error: '\n              '{1!s}'.format(kwargs['location'], exception))\n          parser_mediator.ProduceExtractionWarning(message)\n          data_block_file_entry = None\n\n        if not data_block_file_entry:\n          message = 'Missing data block file: {0:s}'.format(\n              cache_address.filename)\n          parser_mediator.ProduceExtractionWarning(message)\n          data_block_file_object = None\n\n        else:\n          data_block_file_object = data_block_file_entry.GetFileObject()\n\n          try:\n            self._data_block_file_parser.ParseFileObject(\n                parser_mediator, data_block_file_object)\n          except (IOError, errors.ParseError) as exception:\n            message = (\n                'Unable to parse data block file: {0:s} with error: '\n                '{1!s}').format(cache_address.filename, exception)\n            parser_mediator.ProduceExtractionWarning(message)\n            data_block_file_object.close()\n            data_block_file_object = None\n\n        data_block_files[cache_address.filename] = data_block_file_object\n\n    try:\n      self._ParseCacheEntries(\n          parser_mediator, index_table, data_block_files)\n    finally:\n      for data_block_file_object in iter(data_block_files.values()):\n        if data_block_file_object:\n          data_block_file_object.close()", "docstring": "Parses a Chrome Cache index table.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_system (dfvfs.FileSystem): file system.\nfile_entry (dfvfs.FileEntry): file entry.\nindex_table (list[CacheAddress]): the cache addresses which are stored in\nthe index file.", "source": "juraj-google-style"}
{"code": "def is_generator(obj):\n    if isinstance(obj, types.GeneratorType):\n        return True\n    CO_GENERATOR = 32\n    return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and (obj.func_code.co_flags & CO_GENERATOR)))", "docstring": "Return true if the object is generator or generator function.\n\nGenerator function objects provides same attributes as functions.\nSee isfunction.__doc__ for attributes listing.\n\nAdapted from Python 2.6.\n\nArgs:\nobj: an object to test.\n\nReturns:\ntrue if the object is generator function.", "source": "codesearchnet"}
{"code": "def cases(store, case_query, limit=100):\n    case_groups = {status: [] for status in CASE_STATUSES}\n    for case_obj in case_query.limit(limit):\n        analysis_types = set((ind['analysis_type'] for ind in case_obj['individuals']))\n        case_obj['analysis_types'] = list(analysis_types)\n        case_obj['assignees'] = [store.user(user_email) for user_email in case_obj.get('assignees', [])]\n        case_groups[case_obj['status']].append(case_obj)\n        case_obj['is_rerun'] = (len(case_obj.get('analyses', [])) > 0)\n        case_obj['clinvar_variants'] = store.case_to_clinVars(case_obj['_id'])\n        case_obj['display_track'] = TRACKS[case_obj.get('track', 'rare')]\n    data = {'cases': [(status, case_groups[status]) for status in CASE_STATUSES], 'found_cases': case_query.count(), 'limit': limit}\n    return data", "docstring": "Preprocess case objects.\n\nAdd the necessary information to display the 'cases' view\n\nArgs:\nstore(adapter.MongoAdapter)\ncase_query(pymongo.Cursor)\nlimit(int): Maximum number of cases to display\n\nReturns:\ndata(dict): includes the cases, how many there are and the limit.", "source": "codesearchnet"}
{"code": "def _nested_from_proto(nested_proto, process_leafs):\n  \n  if not isinstance(nested_proto, module_pb2.NestedData):\n    raise base_errors.ModuleInfoError(\"Expected module_pb2.NestedData.\")\n\n  if nested_proto.HasField(\"value\"):\n    value = nested_proto.value\n    if not value:\n      value = _UnserializableObject()\n    else:\n      value = process_leafs(value)\n    return value\n  elif nested_proto.HasField(\"list\"):\n    return [_nested_from_proto(child, process_leafs)\n            for child in nested_proto.list.list]\n  elif nested_proto.HasField(\"tuple\"):\n    return tuple(_nested_from_proto(child, process_leafs)\n                 for child in nested_proto.tuple.list)\n  elif nested_proto.HasField(\"dict\"):\n    return {name: _nested_from_proto(child, process_leafs)\n            for name, child in six.iteritems(nested_proto.dict.map)}\n  elif nested_proto.HasField(\"named_tuple\"):\n    tmp_dict = {name: _nested_from_proto(child, process_leafs)\n                for name, child in six.iteritems(nested_proto.named_tuple.map)}\n    \n    NamedTuple = collections.namedtuple(  \n        nested_proto.named_tuple.name, tmp_dict.keys())\n    return NamedTuple(**tmp_dict)\n  elif nested_proto.HasField(\"special_type\"):\n    if nested_proto.special_type.name not in _TO_PROTO_SPECIAL_TYPES:\n      return _UnserializableObject()\n    type_info = _TO_PROTO_SPECIAL_TYPES[nested_proto.special_type.name]\n    return type_info.from_proto(nested_proto.special_type.object, process_leafs)\n  else:\n    raise base_errors.ModuleInfoError(\n        \"Cannot deserialize a `ModuleInfo` protobuf with no fields.\")", "docstring": "Deserializes `nested_proto`.\n\nArgs:\nnested_proto: An instance of `module_pb2.NestedData`.\nprocess_leafs: A function to be applied to the leaf values of the nested\nstructure.\n\nReturns:\nAn instance of `string`, `tuple`, `dict` or `namedtuple`.\n\nRaises:\nbase_errors.ModuleInfoError: If the probobuf is of the wrong type or\nif some of its fields are missing.", "source": "juraj-google-style"}
{"code": "def ensure_s3_bucket(s3_client, bucket_name, bucket_region):\n    try:\n        s3_client.head_bucket(Bucket=bucket_name)\n    except botocore.exceptions.ClientError as e:\n        if (e.response['Error']['Message'] == 'Not Found'):\n            logger.debug('Creating bucket %s.', bucket_name)\n            create_args = {'Bucket': bucket_name}\n            location_constraint = s3_bucket_location_constraint(bucket_region)\n            if location_constraint:\n                create_args['CreateBucketConfiguration'] = {'LocationConstraint': location_constraint}\n            s3_client.create_bucket(**create_args)\n        elif (e.response['Error']['Message'] == 'Forbidden'):\n            logger.exception(('Access denied for bucket %s.  Did ' + 'you remember to use a globally unique name?'), bucket_name)\n            raise\n        else:\n            logger.exception('Error creating bucket %s. Error %s', bucket_name, e.response)\n            raise", "docstring": "Ensure an s3 bucket exists, if it does not then create it.\n\nArgs:\ns3_client (:class:`botocore.client.Client`): An s3 client used to\nverify and create the bucket.\nbucket_name (str): The bucket being checked/created.\nbucket_region (str, optional): The region to create the bucket in. If\nnot provided, will be determined by s3_client's region.", "source": "codesearchnet"}
{"code": "def WrapCFTypeInPython(self, obj):\n    \n    obj_type = self.dll.CFGetTypeID(obj)\n    if obj_type == self.dll.CFBooleanGetTypeID():\n      return CFBoolean(obj)\n    elif obj_type == self.dll.CFNumberGetTypeID():\n      return CFNumber(obj)\n    elif obj_type == self.dll.CFStringGetTypeID():\n      return CFString(obj)\n    elif obj_type == self.dll.CFDictionaryGetTypeID():\n      return CFDictionary(obj)\n    elif obj_type == self.dll.CFArrayGetTypeID():\n      return CFArray(obj)\n    else:\n      raise TypeError('Unknown type for object: {0}'.format(obj))", "docstring": "Package a CoreFoundation object in a Python wrapper.\n\nArgs:\nobj: The CoreFoundation object.\nReturns:\nOne of CFBoolean, CFNumber, CFString, CFDictionary, CFArray.\nRaises:\nTypeError: If the type is not supported.", "source": "juraj-google-style"}
{"code": "def _system_parameters(**kwargs):\n    return {key: value for (key, value) in kwargs.items() if ((value is not None) or (value == {}))}", "docstring": "Returns system keyword arguments removing Nones.\n\nArgs:\nkwargs: system keyword arguments.\n\nReturns:\ndict: system keyword arguments.", "source": "codesearchnet"}
{"code": "def make_calls(self, num_calls=1):\n        \n        self._cull()\n        while self._outstanding_calls + num_calls > self._max_calls_per_second:\n            time.sleep(0)  \n            self._cull()\n\n        self._call_times.append(self.CallRecord(time=time.time(), num_calls=num_calls))\n        self._outstanding_calls += num_calls", "docstring": "Adds appropriate sleep to avoid making too many calls.\n\nArgs:\nnum_calls: int the number of calls which will be made", "source": "juraj-google-style"}
{"code": "def restore(self, file_prefix: str, options: Optional[checkpoint_options.CheckpointOptions]=None) -> Dict[str, ops.Operation]:\n    if options is not None and options.experimental_io_device is not None:\n        raise ValueError('Specified experimental_io_device in DTensor checkpoint is not supported.')\n    del options\n    restore_specs = []\n    tensor_structure = []\n    for saveable in self._saveable_objects:\n        saveable_tensor_structure = []\n        tensor_structure.append(saveable_tensor_structure)\n        for spec in saveable.specs:\n            saveable_tensor_structure.append(spec.name)\n            if isinstance(spec, d_variable.DSaveSpec):\n                restore_specs.append((spec.name, spec.slice_spec, spec.dtype, spec.layout, spec.global_shape))\n            elif isinstance(spec, saveable_object.SaveSpec):\n                restore_specs.append((spec.name, spec.slice_spec, spec.dtype, layout.Layout.replicated(self._mesh.host_mesh(), spec.tensor.shape.rank).to_string(), spec.tensor.shape.as_list()))\n    tensor_names, tensor_slices, tensor_dtypes, layouts, global_shapes = zip(*restore_specs)\n    with ops.device(api.device_name()):\n        restored_tensors = gen_dtensor_ops.d_tensor_restore_v2(prefix=file_prefix, tensor_names=tensor_names, shape_and_slices=tensor_slices, input_shapes=global_shapes, input_layouts=layouts, dtypes=tensor_dtypes)\n    structured_restored_tensors = nest.pack_sequence_as(tensor_structure, restored_tensors)\n    restore_ops = {}\n    for saveable, restored_tensors in zip(self._saveable_objects, structured_restored_tensors):\n        restore_ops[saveable.name] = saveable.restore(restored_tensors, restored_shapes=None)\n    return restore_ops", "docstring": "Restore the saveable objects from a checkpoint with `file_prefix`.\n\nArgs:\nfile_prefix: A string or scalar string Tensor containing the prefix for\nfiles to read from.\noptions: Optional `CheckpointOptions` object. This is unused in DTensor.\n\nReturns:\nA dictionary mapping from SaveableObject names to restore operations.", "source": "github-repos"}
{"code": "def write(self, message, cur_time=None):\n    if (cur_time is None):\n        cur_time = time.time()\n    lines = self._line_buffer.add_string(message)\n    for line in lines:\n        timestamp = ''\n        if self._prepend_timestamp:\n            timestamp = (datetime.datetime.utcfromtimestamp(cur_time).isoformat() + ' ')\n        line = u'{}{}{}'.format(self._line_prepend, timestamp, line)\n        self._fsapi.push(self._filename, line)", "docstring": "Write some text to the pusher.\n\nArgs:\nmessage: a string to push for this file.\ncur_time: used for unit testing. override line timestamp.", "source": "codesearchnet"}
{"code": "def get_vertex(self, key):\n    if (key in self.vertex_map):\n        return self.vertex_map[key]\n    vertex = self.new_vertex()\n    self.vertex_map[key] = vertex\n    return vertex", "docstring": "Returns or Creates a Vertex mapped by key.\n\nArgs:\nkey: A string reference for a vertex.  May refer to a new Vertex in which\ncase it will be created.\n\nReturns:\nA the Vertex mapped to by key.", "source": "codesearchnet"}
{"code": "def learning_rate_with_decay(batch_size, batch_denom, num_images, boundary_epochs, decay_rates, base_lr=0.1, enable_lars=False):\n    initial_learning_rate = ((base_lr * batch_size) / batch_denom)\n    batches_per_epoch = (num_images / batch_size)\n    boundaries = [int((batches_per_epoch * epoch)) for epoch in boundary_epochs]\n    vals = [(initial_learning_rate * decay) for decay in decay_rates]\n\n    def learning_rate_fn(global_step):\n        lr = tf.train.piecewise_constant(global_step, boundaries, vals)\n        warmup_steps = int((batches_per_epoch * 5))\n        warmup_lr = ((initial_learning_rate * tf.cast(global_step, tf.float32)) / tf.cast(warmup_steps, tf.float32))\n        return tf.cond((global_step < warmup_steps), (lambda : warmup_lr), (lambda : lr))\n\n    def poly_rate_fn(global_step):\n        'Handles linear scaling rule, gradual warmup, and LR decay.\\n\\n    The learning rate starts at 0, then it increases linearly per step.  After\\n    flags.poly_warmup_epochs, we reach the base learning rate (scaled to account\\n    for batch size). The learning rate is then decayed using a polynomial rate\\n    decay schedule with power 2.0.\\n\\n    Args:\\n    global_step: the current global_step\\n\\n    Returns:\\n    returns the current learning rate\\n    '\n        if (batch_size < 8192):\n            plr = 5.0\n            w_epochs = 5\n        elif (batch_size < 16384):\n            plr = 10.0\n            w_epochs = 5\n        elif (batch_size < 32768):\n            plr = 25.0\n            w_epochs = 5\n        else:\n            plr = 32.0\n            w_epochs = 14\n        w_steps = int((w_epochs * batches_per_epoch))\n        wrate = ((plr * tf.cast(global_step, tf.float32)) / tf.cast(w_steps, tf.float32))\n        num_epochs = 90\n        train_steps = (batches_per_epoch * num_epochs)\n        min_step = tf.constant(1, dtype=tf.int64)\n        decay_steps = tf.maximum(min_step, tf.subtract(global_step, w_steps))\n        poly_rate = tf.train.polynomial_decay(plr, decay_steps, ((train_steps - w_steps) + 1), power=2.0)\n        return tf.where((global_step <= w_steps), wrate, poly_rate)\n    if enable_lars:\n        return poly_rate_fn\n    return learning_rate_fn", "docstring": "Get a learning rate that decays step-wise as training progresses.\n\nArgs:\nbatch_size: the number of examples processed in each training batch.\nbatch_denom: this value will be used to scale the base learning rate.\n`0.1 * batch size` is divided by this number, such that when\nbatch_denom == batch_size, the initial learning rate will be 0.1.\nnum_images: total number of images that will be used for training.\nboundary_epochs: list of ints representing the epochs at which we\ndecay the learning rate.\ndecay_rates: list of floats representing the decay rates to be used\nfor scaling the learning rate. It should have one more element\nthan `boundary_epochs`, and all elements should have the same type.\nbase_lr: Initial learning rate scaled based on batch_denom.\n\nReturns:\nReturns a function that takes a single argument - the number of batches\ntrained so far (global_step)- and returns the learning rate to be used\nfor training the next batch.", "source": "codesearchnet"}
{"code": "def get_constant_state(self):\n    ret = self.constant_states[self.next_constant_state]\n    self.next_constant_state += 1\n    return ret", "docstring": "Read state that was written in \"first_part\" mode.\n\nReturns:\na structure", "source": "codesearchnet"}
{"code": "def download_url(url, root, filename=None, md5=None):\n    from six.moves import urllib\n    root = os.path.expanduser(root)\n    if (not filename):\n        filename = os.path.basename(url)\n    fpath = os.path.join(root, filename)\n    makedir_exist_ok(root)\n    if (os.path.isfile(fpath) and check_integrity(fpath, md5)):\n        print(('Using downloaded and verified file: ' + fpath))\n    else:\n        try:\n            print(((('Downloading ' + url) + ' to ') + fpath))\n            urllib.request.urlretrieve(url, fpath, reporthook=gen_bar_updater())\n        except OSError:\n            if (url[:5] == 'https'):\n                url = url.replace('https:', 'http:')\n                print(((('Failed download. Trying https -> http instead. Downloading ' + url) + ' to ') + fpath))\n                urllib.request.urlretrieve(url, fpath, reporthook=gen_bar_updater())", "docstring": "Download a file from a url and place it in root.\n\nArgs:\nurl (str): URL to download file from\nroot (str): Directory to place downloaded file in\nfilename (str, optional): Name to save the file under. If None, use the basename of the URL\nmd5 (str, optional): MD5 checksum of the download. If None, do not check", "source": "codesearchnet"}
{"code": "def ifft2(x):\n    if any_symbolic_tensors(x):\n        return IFFT2().symbolic_call(x)\n    return backend.math.ifft2(x)", "docstring": "Computes the 2D Inverse Fast Fourier Transform along the last two axes of\ninput.\n\nArgs:\nx: Tuple of the real and imaginary parts of the input tensor. Both\ntensors in the tuple should be of floating type.\n\nReturns:\nA tuple containing two tensors - the real and imaginary parts of the\noutput.\n\nExample:\n\n>>> x = (\n...     keras.ops.convert_to_tensor([[1., 2.], [2., 1.]]),\n...     keras.ops.convert_to_tensor([[0., 1.], [1., 0.]]),\n... )\n>>> ifft2(x)\n(array([[ 6.,  0.],\n[ 0., -2.]], dtype=float32), array([[ 2.,  0.],\n[ 0., -2.]], dtype=float32))", "source": "github-repos"}
{"code": "def get_variation(self, experiment_key, user_id, attributes=None):\n    \n\n    if not self.is_valid:\n      self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_variation'))\n      return None\n\n    if not validator.is_non_empty_string(experiment_key):\n      self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))\n      return None\n\n    if not isinstance(user_id, string_types):\n      self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))\n      return None\n\n    experiment = self.config.get_experiment_from_key(experiment_key)\n    variation_key = None\n\n    if not experiment:\n      self.logger.info('Experiment key \"%s\" is invalid. Not activating user \"%s\".' % (\n        experiment_key,\n        user_id\n      ))\n      return None\n\n    if not self._validate_user_inputs(attributes):\n      return None\n\n    variation = self.decision_service.get_variation(experiment, user_id, attributes)\n    if variation:\n      variation_key = variation.key\n\n    if self.config.is_feature_experiment(experiment.id):\n      decision_notification_type = enums.DecisionNotificationTypes.FEATURE_TEST\n    else:\n      decision_notification_type = enums.DecisionNotificationTypes.AB_TEST\n\n    self.notification_center.send_notifications(\n      enums.NotificationTypes.DECISION,\n      decision_notification_type,\n      user_id,\n      attributes or {},\n      {\n         'experiment_key': experiment_key,\n         'variation_key': variation_key\n      }\n    )\n\n    return variation_key", "docstring": "Gets variation where user will be bucketed.\n\nArgs:\nexperiment_key: Experiment for which user variation needs to be determined.\nuser_id: ID for user.\nattributes: Dict representing user attributes.\n\nReturns:\nVariation key representing the variation the user will be bucketed in.\nNone if user is not in experiment or if experiment is not Running.", "source": "juraj-google-style"}
{"code": "def create_error(msg, cause=None):\n    status_code = config.exc_to_code(cause)\n    status_name = config.NAME_STATUS_CODES.get(status_code)\n    if (status_name == 'INVALID_ARGUMENT'):\n        return InvalidArgumentError(msg, cause=cause)\n    else:\n        return GaxError(msg, cause=cause)", "docstring": "Creates a ``GaxError`` or subclass.\n\nAttributes:\nmsg (string): describes the error that occurred.\ncause (Exception, optional): the exception raised by a lower\nlayer of the RPC stack (for example, gRPC) that caused this\nexception, or None if this exception originated in GAX.\n\nReturns:\n.GaxError: The exception that wraps ``cause``.", "source": "codesearchnet"}
{"code": "def markdown_table(data, headers):\n    \n\n    maxx = [max([len(x) for x in column]) for column in zip(*data)]\n    maxx = [max(ll) for ll in zip(maxx, [len(x) for x in headers])]\n    mask = \" | \".join([\"%-{0:d}s\".format(n) for n in maxx])\n\n    ret = [mask % headers]\n\n    ret.append(\" | \".join([\"-\"*n for n in maxx]))\n    for line in data:\n        ret.append(mask % line)\n    return ret", "docstring": "Creates MarkDown table. Returns list of strings\n\nArguments:\ndata -- [(cell00, cell01, ...), (cell10, cell11, ...), ...]\nheaders -- sequence of strings: (header0, header1, ...)", "source": "juraj-google-style"}
{"code": "def escalatees(self, escalatee=None, resource_id=None):\n        \n        if resource_id is not None:\n            self.resource_id(resource_id)\n        self._request_uri = '{}/escalatees'.format(self._request_uri)\n        if escalatee is not None:\n            self._request_uri = '{}/{}'.format(self._request_uri, escalatee)", "docstring": "Add an escalatee to a Task\n\nGET: /v2/tasks/{uniqueId}/escalatees\nGET: /v2/tasks/{uniqueId}/escalatees/{escalateeId}\nPOST: /v2/tasks/{uniqueId}/escalatees/{escalateeId}\nDELETE: /v2/tasks/{uniqueId}/escalatees/{escalateeId}\n\nArgs:\nescalatee (Optional [string]): The escalatee name.\nresource_id (Optional [string]): The task ID.", "source": "juraj-google-style"}
{"code": "def officers(self, num, **kwargs):\n    baseuri = (self._BASE_URI + 'company/{}/officers'.format(num))\n    res = self.session.get(baseuri, params=kwargs)\n    self.handle_http_error(res)\n    return res", "docstring": "Search for a company's registered officers by company number.\n\nArgs:\nnum (str): Company number to search on.\nkwargs (dict): additional keywords passed into\nrequests.session.get *params* keyword.", "source": "codesearchnet"}
{"code": "def __init__(self, config):\n    super().__init__()\n    in_channels = config.bottleneck_features\n    self.transformer_encoder = nn.ModuleList([ZoeDepthTransformerEncoderLayer(config) for _ in range(config.num_patch_transformer_layers)])\n    self.embedding_convPxP = nn.Conv2d(in_channels, config.patch_transformer_hidden_size, kernel_size=1, stride=1, padding=0)", "docstring": "ViT-like transformer block\n\nArgs:\nconfig (`ZoeDepthConfig`):\nModel configuration class defining the model architecture.", "source": "github-repos"}
{"code": "def datalab(line, cell=None):\n  \n  parser = google.datalab.utils.commands.CommandParser(\n      prog='%datalab',\n      description=)\n\n  config_parser = parser.subcommand(\n      'config', help='List or set API-specific configurations.')\n  config_sub_commands = config_parser.add_subparsers(dest='command')\n\n  \n  config_list_parser = config_sub_commands.add_parser(\n      'list', help='List configurations')\n  config_list_parser.set_defaults(func=_config_list_fn)\n\n  \n  config_set_parser = config_sub_commands.add_parser(\n      'set', help='Set configurations')\n  config_set_parser.add_argument(\n      '-n', '--name',\n      help='The name of the configuration value', required=True)\n  config_set_parser.add_argument(\n      '-v', '--value', help='The value to set', required=True)\n  config_set_parser.set_defaults(func=_config_set_fn)\n\n  project_parser = parser.subcommand(\n      'project', help='Get or set the default project ID')\n  project_sub_commands = project_parser.add_subparsers(dest='command')\n\n  \n  project_get_parser = project_sub_commands.add_parser(\n      'get', help='Get the default project ID')\n  project_get_parser.set_defaults(func=_project_get_fn)\n\n  \n  project_set_parser = project_sub_commands.add_parser(\n      'set', help='Set the default project ID')\n  project_set_parser.add_argument(\n      '-p', '--project', help='The default project ID', required=True)\n  project_set_parser.set_defaults(func=_project_set_fn)\n\n  return google.datalab.utils.commands.handle_magic_line(line, cell, parser)", "docstring": "Implements the datalab cell magic for ipython notebooks.\n\nArgs:\nline: the contents of the datalab line.\nReturns:\nThe results of executing the cell.", "source": "juraj-google-style"}
{"code": "def Failed(self):\n    interval = self._current_interval_sec\n    self._current_interval_sec = min(self.max_interval_sec, (self._current_interval_sec * self.multiplier))\n    return interval", "docstring": "Indicates that a request has failed.\n\nReturns:\nTime interval to wait before retrying (in seconds).", "source": "codesearchnet"}
{"code": "def _get_raw_feature_as_tensor(self, key):\n    raw_feature = self._features[key]\n    feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(raw_feature)\n\n    def expand_dims(input_tensor):\n        if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n            return sparse_ops.sparse_reshape(input_tensor, [array_ops.shape(input_tensor)[0], 1])\n        else:\n            return array_ops.expand_dims(input_tensor, -1)\n    rank = feature_tensor.get_shape().ndims\n    if rank is not None:\n        if rank == 0:\n            raise ValueError('Feature (key: {}) cannot have rank 0. Given: {}'.format(key, feature_tensor))\n        return feature_tensor if rank != 1 else expand_dims(feature_tensor)\n    with ops.control_dependencies([check_ops.assert_positive(array_ops.rank(feature_tensor), message='Feature (key: {}) cannot have rank 0. Given: {}'.format(key, feature_tensor))]):\n        return cond.cond(math_ops.equal(1, array_ops.rank(feature_tensor)), lambda: expand_dims(feature_tensor), lambda: feature_tensor)", "docstring": "Gets the raw_feature (keyed by `key`) as `tensor`.\n\nThe raw feature is converted to (sparse) tensor and maybe expand dim.\n\nFor both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if\nthe rank is 1. This supports dynamic rank also. For rank 0 raw feature, will\nerror out as it is not supported.\n\nArgs:\nkey: A `str` key to access the raw feature.\n\nReturns:\nA `Tensor` or `SparseTensor`.\n\nRaises:\nValueError: if the raw feature has rank 0.", "source": "github-repos"}
{"code": "def cut_sphere(self, radius=15.0, origin=None, outside_sliced=True, preserve_bonds=False):\n    if (origin is None):\n        origin = np.zeros(3)\n    elif pd.api.types.is_list_like(origin):\n        origin = np.array(origin, dtype='f8')\n    else:\n        origin = self.loc[(origin, ['x', 'y', 'z'])]\n    molecule = self.get_distance_to(origin)\n    if outside_sliced:\n        molecule = molecule[(molecule['distance'] < radius)]\n    else:\n        molecule = molecule[(molecule['distance'] > radius)]\n    if preserve_bonds:\n        molecule = self._preserve_bonds(molecule)\n    return molecule", "docstring": "Cut a sphere specified by origin and radius.\n\nArgs:\nradius (float):\norigin (list): Please note that you can also pass an\ninteger. In this case it is interpreted as the\nindex of the atom which is taken as origin.\noutside_sliced (bool): Atoms outside/inside the sphere\nare cut out.\npreserve_bonds (bool): Do not cut covalent bonds.\n\nReturns:\nCartesian:", "source": "codesearchnet"}
{"code": "def GetQueryValuesFromDict(cls, d, version=sorted(_SERVICE_MAP.keys())[(- 1)]):\n    return [{'key': key, 'value': cls.GetValueRepresentation(value, version)} for (key, value) in d.iteritems()]", "docstring": "Converts a dict of python types into a list of PQL types.\n\nArgs:\nd: A dictionary of variable names to python types.\nversion: A string identifying the Ad Manager version the values object\nis compatible with. This defaults to what is currently the latest\nversion. This will be updated in future releases to point to what is\nthen the latest version.\n\nReturns:\nA list of variables formatted for PQL statements which are compatible with\na particular API version.", "source": "codesearchnet"}
{"code": "def _example_from_allof(self, prop_spec):\n    example_dict = {}\n    for definition in prop_spec['allOf']:\n        update = self.get_example_from_prop_spec(definition, True)\n        example_dict.update(update)\n    return example_dict", "docstring": "Get the examples from an allOf section.\n\nArgs:\nprop_spec: property specification you want an example of.\n\nReturns:\nAn example dict", "source": "codesearchnet"}
{"code": "def _read_range(self, start, end=0):\n        \n        stream = _BytesIO()\n        try:\n            with _handle_azure_exception():\n                self._get_to_stream(\n                    stream=stream, start_range=start,\n                    end_range=(end - 1) if end else None, **self._client_kwargs)\n\n        \n        except _AzureHttpError as exception:\n            if exception.status_code == 416:\n                \n                return bytes()\n            raise\n\n        return stream.getvalue()", "docstring": "Read a range of bytes in stream.\n\nArgs:\nstart (int): Start stream position.\nend (int): End stream position.\n0 To not specify end.\n\nReturns:\nbytes: number of bytes read", "source": "juraj-google-style"}
{"code": "def pretty_polyfit_plot(x, y, deg=1, xlabel=None, ylabel=None, **kwargs):\n    \n    plt = pretty_plot(**kwargs)\n    pp = np.polyfit(x, y, deg)\n    xp = np.linspace(min(x), max(x), 200)\n    plt.plot(xp, np.polyval(pp, xp), 'k--', x, y, 'o')\n    if xlabel:\n        plt.xlabel(xlabel)\n    if ylabel:\n        plt.ylabel(ylabel)\n    return plt", "docstring": "Convenience method to plot data with trend lines based on polynomial fit.\n\nArgs:\nx: Sequence of x data.\ny: Sequence of y data.\ndeg (int): Degree of polynomial. Defaults to 1.\nxlabel (str): Label for x-axis.\nylabel (str): Label for y-axis.\n\\\\*\\\\*kwargs: Keyword args passed to pretty_plot.\n\nReturns:\nmatplotlib.pyplot object.", "source": "juraj-google-style"}
{"code": "def _logger(self):\n    log_level = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL}\n    level = log_level.get(self.args.logging_level.lower())\n    tx_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s '\n    tx_format += '(%(funcName)s:%(lineno)d)'\n    formatter = logging.Formatter(tx_format)\n    log = logging.getLogger('tcrun')\n    if (not os.access('log', os.W_OK)):\n        os.makedirs('log')\n    logfile = os.path.join('log', 'run.log')\n    fh = logging.FileHandler(logfile)\n    fh.set_name('fh')\n    fh.setLevel(logging.DEBUG)\n    fh.setFormatter(formatter)\n    log.addHandler(fh)\n    log.setLevel(level)\n    log.info('Logging Level: {}'.format(logging.getLevelName(level)))\n    return log", "docstring": "Create logger instance.\n\nReturns:\nlogger: An instance of logging", "source": "codesearchnet"}
{"code": "def _ReadTable(self, tables, file_object, table_offset):\n    \n    table_header = self._ReadTableHeader(file_object, table_offset)\n\n    for record_offset in table_header.record_offsets:\n      if record_offset == 0:\n        continue\n\n      record_offset += table_offset\n\n      if table_header.record_type == self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INFO:\n        self._ReadRecordSchemaInformation(tables, file_object, record_offset)\n      elif table_header.record_type == (\n          self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INDEXES):\n        self._ReadRecordSchemaIndexes(tables, file_object, record_offset)\n      elif table_header.record_type == (\n          self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_ATTRIBUTES):\n        self._ReadRecordSchemaAttributes(tables, file_object, record_offset)\n      else:\n        self._ReadRecord(\n            tables, file_object, record_offset, table_header.record_type)", "docstring": "Reads the table.\n\nArgs:\ntables (dict[int, KeychainDatabaseTable]): tables per identifier.\nfile_object (file): file-like object.\ntable_offset (int): offset of the table relative to the start of\nthe file.\n\nRaises:\nParseError: if the table cannot be read.", "source": "juraj-google-style"}
{"code": "def mme_matches(case_obj, institute_obj, mme_base_url, mme_token):\n    data = {'institute': institute_obj, 'case': case_obj, 'server_errors': []}\n    matches = {}\n    if (not case_obj.get('mme_submission')):\n        return None\n    for patient in case_obj['mme_submission']['patients']:\n        patient_id = patient['id']\n        matches[patient_id] = None\n        url = ''.join([mme_base_url, '/matches/', patient_id])\n        server_resp = matchmaker_request(url=url, token=mme_token, method='GET')\n        if ('status_code' in server_resp):\n            pat_matches = []\n            if server_resp.get('matches'):\n                pat_matches = parse_matches(patient_id, server_resp['matches'])\n            matches[patient_id] = pat_matches\n        else:\n            LOG.warning('Server returned error message: {}'.format(server_resp['message']))\n            data['server_errors'].append(server_resp['message'])\n    data['matches'] = matches\n    return data", "docstring": "Show Matchmaker submission data for a sample and eventual matches.\n\nArgs:\ncase_obj(dict): a scout case object\ninstitute_obj(dict): an institute object\nmme_base_url(str) base url of the MME server\nmme_token(str) auth token of the MME server\n\nReturns:\ndata(dict): data to display in the html template", "source": "codesearchnet"}
{"code": "def kms_decrypt(kms_client, secret):\n  \n  try:\n    decrypted_secret = kms_client.decrypt(CiphertextBlob=base64.b64decode(secret))['Plaintext']\n  except TypeError:\n    fail(\"Malformed base64 string data\")\n  except ClientError as error:\n    if error.response[\"Error\"][\"Code\"] == \"InvalidCiphertextException\":\n      fail(\"The decrypt request was rejected because the specified ciphertext \\\n      has been corrupted or is otherwise invalid.\", error)\n    elif error.response[\"Error\"][\"Code\"] == \"NotFoundException\":\n      fail(\"The decrypt request was rejected because the specified entity or resource could not be found.\", error)\n    else:\n      fail(\"boto3 exception occurred while performing kms decrypt operation.\", error)\n  return decrypted_secret", "docstring": "Decrypt kms-encrypted string\nArgs:\nkms_client (boto3 kms client object): Instantiated kms client object. Usually created through create_aws_clients.\nsecret (string): base64 encoded value to be decrypted\nReturns:\na populated EFPWContext object\nRaises:\nSystemExit(1): If there is an error with the boto3 decryption call (ex. malformed secret)", "source": "juraj-google-style"}
{"code": "def get_loggable_url(url):\n    loggable_url = (url or '')\n    for secret_string in ('bewit=', 'AWSAccessKeyId=', 'access_token='):\n        parts = loggable_url.split(secret_string)\n        loggable_url = parts[0]\n    if (loggable_url != url):\n        loggable_url = '{}<snip>'.format(loggable_url)\n    return loggable_url", "docstring": "Strip out secrets from taskcluster urls.\n\nArgs:\nurl (str): the url to strip\n\nReturns:\nstr: the loggable url", "source": "codesearchnet"}
{"code": "def sparse(self, rows: np.ndarray = None, cols: np.ndarray = None, layer: str = None) -> scipy.sparse.coo_matrix:\n\t\t\n\t\tif layer is None:\n\t\t\treturn self.layers[\"\"].sparse(rows=rows, cols=cols)\n\t\telse:\n\t\t\treturn self.layers[layer].sparse(rows=rows, cols=cols)", "docstring": "Return the main matrix or specified layer as a scipy.sparse.coo_matrix, without loading dense matrix in RAM\n\nArgs:\nrows:\t\tRows to include, or None to include all\ncols:\t\tColumns to include, or None to include all\nlayer:\t\tLayer to return, or None to return the default layer\n\nReturns:\nSparse matrix (:class:`scipy.sparse.coo_matrix`)", "source": "juraj-google-style"}
{"code": "def __init__(self, initial_learning_rate, first_decay_steps, t_mul=2.0, m_mul=1.0, alpha=0.0, name=None):\n    super(CosineDecayRestarts, self).__init__()\n    self.initial_learning_rate = initial_learning_rate\n    self.first_decay_steps = first_decay_steps\n    self._t_mul = t_mul\n    self._m_mul = m_mul\n    self.alpha = alpha\n    self.name = name", "docstring": "Applies cosine decay with restarts to the learning rate.\n\nArgs:\ninitial_learning_rate: A scalar `float32` or `float64` Tensor or a Python\nnumber. The initial learning rate.\nfirst_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python\nnumber. Number of steps to decay over.\nt_mul: A scalar `float32` or `float64` `Tensor` or a Python number.\nUsed to derive the number of iterations in the i-th period\nm_mul: A scalar `float32` or `float64` `Tensor` or a Python number.\nUsed to derive the initial learning rate of the i-th period:\nalpha: A scalar `float32` or `float64` Tensor or a Python number.\nMinimum learning rate value as a fraction of the initial_learning_rate.\nname: String. Optional name of the operation.  Defaults to 'SGDRDecay'.", "source": "github-repos"}
{"code": "def current_missing(**kwargs) -> int:\n    data_path = os.environ.get(BBG_ROOT, '').replace('\\\\', '/')\n    if (not data_path):\n        return 0\n    return len(files.all_files(f'{data_path}/Logs/{missing_info(**kwargs)}'))", "docstring": "Check number of trials for missing values\n\nReturns:\nint: number of trials already tried", "source": "codesearchnet"}
{"code": "def get_max_num_classes(self):\n    num = 0\n    for task in self.task_list:\n        if hasattr(task, 'num_classes'):\n            if (num < task.num_classes):\n                num = task.num_classes\n    return num", "docstring": "Compute the maximum number of classes any subtask has.\n\nThis is useful for modifying the size of the softmax to include the output\nlabels for the classification tasks. Currently, labels from different tasks\nare overloaded.\n\nReturns:\nnum: Highest number of output classes in any text classification sub-task\nwithin this MultiProblem.", "source": "codesearchnet"}
{"code": "def create(provider, count=1, name=None, **kwargs):\n    count = int(count)\n    provider = provider_by_name(provider)\n    options = provider.create_server_defaults\n    options.update(kwargs)\n    names = ([name] * count)\n    provider.validate_create_options(**options)\n    return provider.create_servers(count, names, **options)", "docstring": "r'''\nCreate one or more cloud servers\n\nArgs:\n* provider (str): Cloud provider, e.g. ec2, digitalocean\n* count (int) =1: Number of instances\n* name (str) =None: Name of server(s)\n* \\**kwargs: Provider-specific flags", "source": "codesearchnet"}
{"code": "def make_padding_config(padding_config: PaddingConfig | Sequence[tuple[int, int, int]]) -> PaddingConfig:\n    if not isinstance(padding_config, PaddingConfig):\n        triples = padding_config\n        padding_config = PaddingConfig()\n        for lo, hi, interior in triples:\n            dimension = PaddingConfigDimension()\n            dimension.edge_padding_low = lo\n            dimension.edge_padding_high = hi\n            dimension.interior_padding = interior\n            padding_config.dimensions.append(dimension)\n    return padding_config", "docstring": "Create PaddingConfig proto from list of triples of integers.\n\nArgs:\npadding_config: either a PaddingConfig or a list of integer triples\n(edge_padding_low, edge_padding_high, interior_padding) representing the\nconfiguration of the padding operation.\n\nReturns:\nA `PaddingConfig` object.", "source": "github-repos"}
{"code": "def ProgChunks(list_, chunksize, nInput=None, **kwargs):\n    if (nInput is None):\n        nInput = len(list_)\n    n_chunks = get_num_chunks(nInput, chunksize)\n    kwargs['length'] = n_chunks\n    if ('freq' not in kwargs):\n        kwargs['freq'] = 1\n    chunk_iter = util_iter.ichunks(list_, chunksize)\n    progiter_ = ProgressIter(chunk_iter, **kwargs)\n    return progiter_", "docstring": "Yeilds an iterator in chunks and computes progress\nProgress version of ut.ichunks\n\nArgs:\nlist_ (list):\nchunksize (?):\nnInput (None): (default = None)\n\nKwargs:\nlength, freq\n\nReturns:\nProgressIter: progiter_\n\nCommandLine:\npython -m utool.util_progress ProgChunks --show\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_progress import *  # NOQA\n>>> import utool as ut\n>>> list_ = range(100)\n>>> chunksize = 10\n>>> nInput = None\n>>> progiter_ = ProgChunks(list_, chunksize, nInput)\n>>> iter_ = iter(progiter_)\n>>> chunk = six.next(iter_)\n>>> assert len(chunk) == 10\n>>> rest = ut.flatten(list(progiter_))\n>>> assert len(rest) == 90", "source": "codesearchnet"}
{"code": "def whois_domains_history(self, domains):\n        \n        api_name = 'opendns-whois-domain-history'\n        fmt_url_path = u'whois/{0}/history'\n        return self._multi_get(api_name, fmt_url_path, domains)", "docstring": "Calls WHOIS domain history end point\n\nArgs:\ndomains: An enumerable of domains\nReturns:\nA dict of {domain: domain_history_result}", "source": "juraj-google-style"}
{"code": "def __init__(self, base: ModelHandler[ExampleT, PredictionT, ModelT], preprocess_fn: Callable[[PreProcessT], ExampleT]):\n    self._base = base\n    self._env_vars = getattr(base, '_env_vars', {})\n    self._preprocess_fn = preprocess_fn", "docstring": "A ModelHandler that has a preprocessing function associated with it.\n\nArgs:\nbase: An implementation of the underlying model handler.\npreprocess_fn: the preprocessing function to use.", "source": "github-repos"}
{"code": "def create(cls, **kwargs):\n        \n\n        conn = Qubole.agent()\n        if kwargs.get('command_type') is None:\n            kwargs['command_type'] = cls.__name__\n        if kwargs.get('tags') is not None:\n            kwargs['tags'] = kwargs['tags'].split(',')\n\n        return cls(conn.post(cls.rest_entity_path, data=kwargs))", "docstring": "Create a command object by issuing a POST request to the /command endpoint\nNote - this does not wait for the command to complete\n\nArgs:\n`**kwargs`: keyword arguments specific to command type\n\nReturns:\nCommand object", "source": "juraj-google-style"}
{"code": "def show_corrections(self, status=None, nids=None):\n        \n        nrows, ncols = get_terminal_size()\n        count = 0\n        for task in self.iflat_tasks(status=status, nids=nids):\n            if task.num_corrections == 0: continue\n            count += 1\n            print(make_banner(str(task), width=ncols, mark=\"=\"))\n            for corr in task.corrections:\n                pprint(corr)\n\n        if not count: print(\"No correction found.\")\n        return count", "docstring": "Show the corrections applied to the flow at run-time.\n\nArgs:\nstatus: if not None, only the tasks with this status are select.\nnids: optional list of node identifiers used to filter the tasks.\n\nReturn: The number of corrections found.", "source": "juraj-google-style"}
{"code": "def _dispatch(self, event, listener, *args, **kwargs):\n    if (asyncio.iscoroutinefunction(listener) or (isinstance(listener, functools.partial) and asyncio.iscoroutinefunction(listener.func))):\n        return self._dispatch_coroutine(event, listener, *args, **kwargs)\n    return self._dispatch_function(event, listener, *args, **kwargs)", "docstring": "Dispatch an event to a listener.\n\nArgs:\nevent (str): The name of the event that triggered this call.\nlistener (def or async def): The listener to trigger.\n*args: Any number of positional arguments.\n**kwargs: Any number of keyword arguments.\n\nThis method inspects the listener. If it is a def it dispatches the\nlistener to a method that will execute that def. If it is an async def\nit dispatches it to a method that will schedule the resulting coro with\nthe event loop.", "source": "codesearchnet"}
{"code": "def segmentation_to_mask(polys, height, width):\n    polys = [p.flatten().tolist() for p in polys]\n    assert (len(polys) > 0), 'Polygons are empty!'\n    import pycocotools.mask as cocomask\n    rles = cocomask.frPyObjects(polys, height, width)\n    rle = cocomask.merge(rles)\n    return cocomask.decode(rle)", "docstring": "Convert polygons to binary masks.\n\nArgs:\npolys: a list of nx2 float array. Each array contains many (x, y) coordinates.\n\nReturns:\na binary matrix of (height, width)", "source": "codesearchnet"}
{"code": "async def getTypeNorm(self, name, valu):\n    tobj = self.model.type(name)\n    if (tobj is None):\n        raise s_exc.NoSuchType(mesg=f'The type {name} does not exist.', name=name)\n    (norm, info) = tobj.norm(valu)\n    return (norm, info)", "docstring": "Get the normalized type value based on the Cortex data model.\n\nArgs:\nname (str): The type to normalize.\nvalu: The value to normalize.\n\nReturns:\n(tuple): A two item tuple, containing the normed value and the info dictionary.\n\nRaises:\ns_exc.NoSuchType: If the type does not exist.\ns_exc.BadTypeValu: If the value fails to normalize.", "source": "codesearchnet"}
{"code": "def closest_point_to(self, point, thr=20.0):\n        \n        i = 0\n        point_arr = point.gen2arr()\n\n        def closest_in_line(pointA, pointB):\n            temp = closest_point(pointA.gen2arr(), pointB.gen2arr(), point_arr)\n            return Point(temp[1], temp[0], None)\n\n        for (p_a, p_b) in pairwise(self.points):\n            candidate = closest_in_line(p_a, p_b)\n            if candidate.distance(point) <= thr:\n                if p_a.distance(point) <= thr:\n                    return i, p_a\n                elif p_b.distance(point) <= thr:\n                    return i + 1, p_b\n                else:\n                    return i, candidate\n            i = i + 1\n        return -1, None", "docstring": "Finds the closest point in the segment to a given point\n\nArgs:\npoint (:obj:`Point`)\nthr (float, optional): Distance threshold, in meters, to be considered\nthe same point. Defaults to 20.0\nReturns:\n(int, Point): Index of the point. -1 if doesn't exist. A point is given if it's along the segment", "source": "juraj-google-style"}
{"code": "def __call__(self, *args, **kwargs) -> Any:\n    args, kwargs = self._parse_call_time_overrides(*args, **kwargs)\n    signature = self.__signature__\n    if self.is_subclassed_functor:\n        for arg_spec, arg_value in zip(signature.args, args):\n            kwargs[arg_spec.name] = arg_value\n        with self._apply_call_time_overrides_to_members(**kwargs):\n            return_value = self._call()\n    else:\n        return_value = self._call(*args, **kwargs)\n    if signature.return_value and flags.is_type_check_enabled() and (pg_typing.MISSING_VALUE != return_value):\n        return_value = signature.return_value.apply(return_value, root_path=self.sym_path + 'returns')\n    if flags.is_tracking_origin() and isinstance(return_value, base.Symbolic):\n        return_value.sym_setorigin(self, 'return')\n    return return_value", "docstring": "Call with late bound arguments.\n\nArgs:\n*args: list arguments.\n**kwargs: keyword arguments.\n\nReturns:\nAny.\n\nRaises:\nTypeError: got multiple values for arguments or extra argument name.", "source": "github-repos"}
{"code": "def add_catalog_from_URL(self, votable_URL, votable_options={}):\n        \n        self.votable_URL= votable_URL\n        self.votable_options= votable_options\n        self.votable_from_URL_flag= not self.votable_from_URL_flag", "docstring": "load a VOTable table from an url and load its data into the widget\nArgs:\nvotable_URL: string url\nvotable_options: dictionary object", "source": "juraj-google-style"}
{"code": "def to_timestamp(self, data):\n    result = pd.Series(index=data.index)\n    _slice = (~ data[self.col_name].isnull())\n    result[_slice] = data[_slice][self.col_name].astype('int64')\n    return result", "docstring": "Transform a datetime series into linux epoch.\n\nArgs:\ndata(pandas.DataFrame): DataFrame containins a column named as `self.col_name`.\n\nReturns:\npandas.Series", "source": "codesearchnet"}
{"code": "def validate_metadata(train_config):\n  \n\n  \n  if len(train_config['csv_header']) != len(train_config['csv_defaults']):\n    raise ValueError('Unequal number of columns in input features file and '\n                     'schema file.')\n\n  \n  \n  \n  sorted_columns = sorted(train_config['csv_header'] +\n                          [train_config['target_column']])\n\n  sorted_columns2 = sorted(train_config['categorical_columns'] +\n                           train_config['numerical_columns'] +\n                           [train_config['key_column']] +\n                           [train_config['target_column']])\n  if sorted_columns2 != sorted_columns:\n    raise ValueError('Each csv header must be a numerical/categorical type, a '\n                     ' key, or a target.')", "docstring": "Perform some checks that the trainig config is correct.\n\nArgs:\ntrain_config: train config as produced by merge_metadata()\n\nRaises:\nValueError: if columns look wrong.", "source": "juraj-google-style"}
{"code": "def code_cell(sourcecode):\n    r\n    import utool as ut\n    sourcecode = ut.remove_codeblock_syntax_sentinals(sourcecode)\n    cell_header = ut.codeblock(\n        )\n    cell_footer = ut.codeblock(\n        )\n    if sourcecode is None:\n        source_line_repr = ' []\\n'\n    else:\n        lines = sourcecode.split('\\n')\n        line_list = [line + '\\n' if count < len(lines) else line\n                     for count, line in enumerate(lines, start=1)]\n        \n        repr_line_list = [repr_single_for_md(line) for line in line_list]\n        source_line_repr = ut.indent(',\\n'.join(repr_line_list), ' ' * 2)\n        source_line_repr = ' [\\n' + source_line_repr + '\\n ]\\n'\n    return (cell_header + source_line_repr + cell_footer)", "docstring": "r\"\"\"\nArgs:\nsourcecode (str):\n\nReturns:\nstr: json formatted ipython notebook code cell\n\nCommandLine:\npython -m ibeis.templates.generate_notebook --exec-code_cell\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from ibeis.templates.generate_notebook import *  # NOQA\n>>> sourcecode = notebook_cells.timestamp_distribution[1]\n>>> sourcecode = notebook_cells.initialize[1]\n>>> result = code_cell(sourcecode)\n>>> print(result)", "source": "juraj-google-style"}
{"code": "def lazy_property(fn):\n    \n    attr_name = '_lazy_' + fn.__name__\n\n    @property\n    @wraps(fn)\n    def _lazy_property(self):\n        if not hasattr(self, attr_name):\n            setattr(self, attr_name, fn(self))\n        return getattr(self, attr_name)\n    return _lazy_property", "docstring": "Decorator that makes a property lazy-evaluated whilst preserving\ndocstrings.\n\nArgs:\nfn (function): the property in question\n\nReturns:\nevaluated version of the property.", "source": "juraj-google-style"}
{"code": "def rotate_texture(texture, rotation, x_offset=0.5, y_offset=0.5):\n    \n    x, y = texture\n    x = x.copy() - x_offset\n    y = y.copy() - y_offset\n    angle = np.radians(rotation)\n    x_rot = x * np.cos(angle) + y * np.sin(angle)\n    y_rot = x * -np.sin(angle) + y * np.cos(angle)\n    return x_rot + x_offset, y_rot + y_offset", "docstring": "Rotates the given texture by a given angle.\n\nArgs:\ntexture (texture): the texture to rotate\nrotation (float): the angle of rotation in degrees\nx_offset (float): the x component of the center of rotation (optional)\ny_offset (float): the y component of the center of rotation (optional)\n\nReturns:\ntexture: A texture.", "source": "juraj-google-style"}
{"code": "def load(cls, path):\n        \n        with open(path, 'r') as in_file:\n            metadata = json.load(in_file)\n\n        return cls.from_dict(metadata)", "docstring": "Create a new MLPipeline from a JSON specification.\n\nThe JSON file format is the same as the one created by the `to_dict` method.\n\nArgs:\npath (str): Path of the JSON file to load.\n\nReturns:\nMLPipeline:\nA new MLPipeline instance with the specification found\nin the JSON file.", "source": "juraj-google-style"}
{"code": "def parse_ped(ped_stream, family_type='ped'):\n    \n    pedigree = FamilyParser(ped_stream, family_type=family_type)\n\n    if len(pedigree.families) != 1:\n        raise PedigreeError(\"Only one case per ped file is allowed\")\n\n    family_id = list(pedigree.families.keys())[0]\n    family = pedigree.families[family_id]\n\n    samples = [{\n        'sample_id': ind_id,\n        'father': individual.father,\n        'mother': individual.mother,\n        \n        'sex': SEX_MAP[individual.sex],\n        'phenotype': PHENOTYPE_MAP[int(individual.phenotype)],\n    } for ind_id, individual in family.individuals.items()]\n\n    return family_id, samples", "docstring": "Parse out minimal family information from a PED file.\n\nArgs:\nped_stream(iterable(str))\nfamily_type(str): Format of the pedigree information\n\nReturns:\nfamily_id(str), samples(list[dict])", "source": "juraj-google-style"}
{"code": "def trace_max_buffer_capacity(self):\n    cmd = enums.JLinkTraceCommand.GET_MAX_CAPACITY\n    data = ctypes.c_uint32(0)\n    res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n    if (res == 1):\n        raise errors.JLinkException('Failed to get max trace buffer size.')\n    return data.value", "docstring": "Retrieves the maximum size the trace buffer can be configured with.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\nThe maximum configurable capacity for the trace buffer.", "source": "codesearchnet"}
{"code": "def SetProtocol(self, protocol):\n    \n    protocol = protocol.lower().strip()\n    if protocol not in ['http', 'https']:\n      raise ValueError('Invalid protocol specified for Viper lookup')\n    self._analyzer.SetProtocol(protocol)", "docstring": "Sets the protocol that will be used to query Viper.\n\nArgs:\nprotocol (str): protocol to use to query Viper. Either 'http' or 'https'.\n\nRaises:\nValueError: If an invalid protocol is selected.", "source": "juraj-google-style"}
{"code": "def subscribe(self, clock_name: str=None, clock_slots: Iterable[str]=None, subscriptions: Dict[(str, Any)]={}):\n    for area in subscriptions:\n        init_full(self, area, subscriptions[area])\n        subscriptions[area] = {'slots': subscriptions[area]}\n    if (clock_name is not None):\n        self.clock_name = clock_name\n        self.clock_slots = clock_slots\n        subscriptions[clock_name] = {'slots': clock_slots, 'buffer-length': 1}\n    self.setup(puller=True, subscriptions=subscriptions)", "docstring": "Subscribes this Area to the given Areas and optionally given Slots. Must be called before the Area is run.\n\nArgs:\nclock_name: The name of the Area that is used as synchronizing Clock.\nclock_slots: The slots of the Clock relevant to this Area.\nsubscriptions: A dictionary containing the relevant Areas names as keys and optionally the Slots as values.", "source": "codesearchnet"}
{"code": "def __init__(self, executable: _PATH = 'default') -> None:\n        \n\n        _default_path = os.path.join(\n            os.path.dirname(__file__), 'executable', 'adb.exe')\n\n        if executable == 'default':\n            self.path = _default_path\n        elif executable.endswith('adb.exe'):\n            if not os.path.isfile(executable):\n                raise FileNotFoundError(f'{self.path!r} does not exist.')\n            self.path = executable\n        elif executable in ['adb', 'adb.exe']:\n            PATH = os.environ['PATH']\n            if not ('adb' in PATH or 'android' in PATH or 'platform-tools' in PATH):\n                raise EnvironmentError('PATH does not exist.')\n            self.path = executable\n        else:\n            self.path = _default_path", "docstring": "Creates a new instance of the Commands.\n\nArgs:\nexecutable_path: Path to the AndroidDriver. On the Windows platform, the best choice is default.", "source": "juraj-google-style"}
{"code": "def get_measurements(region, core_info, data, extra_offset=0):\n    measurements = []\n    clean_core_info = [x for x in core_info if x]\n    cores = len(clean_core_info)\n    for k in data:\n        if (k not in ['1', 'Region Info', 'Event', 'Metric', 'CPU clock']):\n            slot = data[k]\n            for i in range(cores):\n                core = core_info[i]\n                idx = (extra_offset + i)\n                if (core and slot[idx]):\n                    measurements.append((region, k, core, slot[idx]))\n    return measurements", "docstring": "Get the complete measurement info from likwid's region info.\n\nArgs:\nregion: The region we took a measurement in.\ncore_info: The core information.\ndata: The raw data.\nextra_offset (int): default = 0\n\nReturns (list((region, metric, core, value))):\nA list of measurement tuples, a tuple contains the information about\nthe region, the metric, the core and the actual value.", "source": "codesearchnet"}
{"code": "def AddStationDecoration(self, index, color='\n    tmpstr = str()\n    num_stations = len(self._stations)\n    ind = int(index)\n    if self._stations:\n        if (0 < ind < num_stations):\n            y = self._stations[ind]\n            tmpstr = ('<polyline class=\"Dec\" stroke=\"%s\" points=\"%s,%s,%s,%s\" />' % (color, 20, ((20 + y) + 0.5), (self._gwidth + 20), ((20 + y) + 0.5)))\n    self._decorators.append(tmpstr)", "docstring": "Flushes existing decorations and highlights the given station-line.\n\nArgs:\n# Integer, index of stop to be highlighted.\nindex: 4\n# An optional string with a html color code\ncolor: \"#fff\"", "source": "codesearchnet"}
{"code": "def _parse_mtu(self, config):\n        \n        match = re.search(r'mtu (\\d+)', config)\n        return dict(mtu=int(match.group(1)))", "docstring": "Parses the config block and returns the configured IP MTU value\n\nThe provided configuration block is scanned and the configured value\nfor the IP MTU is returned as a dict object.  The IP MTU value is\nexpected to always be present in the provided config block\n\nArgs:\nconfig (str): The interface configuration block to parse\n\nReturn:\ndict: A dict object intended to be merged into the resource dict", "source": "juraj-google-style"}
{"code": "def add(self, rule: 'functions.ReplacementRule') -> None:\n    self.matcher.add(rule.pattern, rule.replacement)", "docstring": "Add a new rule to the replacer.\n\nArgs:\nrule:\nThe rule to add.", "source": "codesearchnet"}
{"code": "def parse_config_files_and_bindings(config_files, bindings, finalize_config=True, skip_unknown=False):\n    if (config_files is None):\n        config_files = []\n    if (bindings is None):\n        bindings = ''\n    for config_file in config_files:\n        parse_config_file(config_file, skip_unknown)\n    parse_config(bindings, skip_unknown)\n    if finalize_config:\n        finalize()", "docstring": "Parse a list of config files followed by extra Gin bindings.\n\nThis function is equivalent to:\n\nfor config_file in config_files:\ngin.parse_config_file(config_file, skip_configurables)\ngin.parse_config(bindings, skip_configurables)\nif finalize_config:\ngin.finalize()\n\nArgs:\nconfig_files: A list of paths to the Gin config files.\nbindings: A list of individual parameter binding strings.\nfinalize_config: Whether to finalize the config after parsing and binding\n(defaults to True).\nskip_unknown: A boolean indicating whether unknown configurables and imports\nshould be skipped instead of causing errors (alternatively a list of\nconfigurable names to skip if unknown). See `parse_config` for additional\ndetails.", "source": "codesearchnet"}
{"code": "def oauth2_callback(request):\n    \n    if 'error' in request.GET:\n        reason = request.GET.get(\n            'error_description', request.GET.get('error', ''))\n        reason = html.escape(reason)\n        return http.HttpResponseBadRequest(\n            'Authorization failed {0}'.format(reason))\n\n    try:\n        encoded_state = request.GET['state']\n        code = request.GET['code']\n    except KeyError:\n        return http.HttpResponseBadRequest(\n            'Request missing state or authorization code')\n\n    try:\n        server_csrf = request.session[_CSRF_KEY]\n    except KeyError:\n        return http.HttpResponseBadRequest(\n            'No existing session for this flow.')\n\n    try:\n        state = json.loads(encoded_state)\n        client_csrf = state['csrf_token']\n        return_url = state['return_url']\n    except (ValueError, KeyError):\n        return http.HttpResponseBadRequest('Invalid state parameter.')\n\n    if client_csrf != server_csrf:\n        return http.HttpResponseBadRequest('Invalid CSRF token.')\n\n    flow = _get_flow_for_token(client_csrf, request)\n\n    if not flow:\n        return http.HttpResponseBadRequest('Missing Oauth2 flow.')\n\n    try:\n        credentials = flow.step2_exchange(code)\n    except client.FlowExchangeError as exchange_error:\n        return http.HttpResponseBadRequest(\n            'An error has occurred: {0}'.format(exchange_error))\n\n    get_storage(request).put(credentials)\n\n    signals.oauth2_authorized.send(sender=signals.oauth2_authorized,\n                                   request=request, credentials=credentials)\n\n    return shortcuts.redirect(return_url)", "docstring": "View that handles the user's return from OAuth2 provider.\n\nThis view verifies the CSRF state and OAuth authorization code, and on\nsuccess stores the credentials obtained in the storage provider,\nand redirects to the return_url specified in the authorize view and\nstored in the session.\n\nArgs:\nrequest: Django request.\n\nReturns:\nA redirect response back to the return_url.", "source": "juraj-google-style"}
{"code": "def _validate_first_message(cls, msg):\n        \n        data = cls._unpack_message(msg)\n        logger.debug(data)\n        if data != cls.RTM_HANDSHAKE:\n            raise SlackApiError('Unexpected response: {!r}'.format(data))\n        logger.info('Joined real-time messaging.')", "docstring": "Check the first message matches the expected handshake.\n\nNote:\nThe handshake is provided as :py:attr:`RTM_HANDSHAKE`.\n\nArguments:\nmsg (:py:class:`aiohttp.Message`): The message to validate.\n\nRaises:\n:py:class:`SlackApiError`: If the data doesn't match the\nexpected handshake.", "source": "juraj-google-style"}
{"code": "def iter_replace_strings(replacements):\n\n    def function_iter_replace_strings(iterable_strings):\n        'Yield a formatted string from iterable_strings using a generator.\\n\\n            Args:\\n                iterable_strings: Iterable containing strings. E.g a file-like\\n                                  object.\\n\\n            Returns:\\n                Yields formatted line.\\n\\n            '\n        for string in iterable_strings:\n            (yield reduce((lambda s, kv: s.replace(*kv)), replacements.items(), string))\n    return function_iter_replace_strings", "docstring": "Create a function that uses replacement pairs to process a string.\n\nThe returned function takes an iterator and yields on each processed\nline.\n\nArgs:\nreplacements: Dict containing 'find_string': 'replace_string' pairs\n\nReturns:\nfunction with signature: iterator of strings = function(iterable)", "source": "codesearchnet"}
{"code": "def diff_commonOverlap(self, text1, text2):\n    text1_length = len(text1)\n    text2_length = len(text2)\n    if ((text1_length == 0) or (text2_length == 0)):\n        return 0\n    if (text1_length > text2_length):\n        text1 = text1[(- text2_length):]\n    elif (text1_length < text2_length):\n        text2 = text2[:text1_length]\n    text_length = min(text1_length, text2_length)\n    if (text1 == text2):\n        return text_length\n    best = 0\n    length = 1\n    while True:\n        pattern = text1[(- length):]\n        found = text2.find(pattern)\n        if (found == (- 1)):\n            return best\n        length += found\n        if ((found == 0) or (text1[(- length):] == text2[:length])):\n            best = length\n            length += 1", "docstring": "Determine if the suffix of one string is the prefix of another.\n\nArgs:\ntext1 First string.\ntext2 Second string.\n\nReturns:\nThe number of characters common to the end of the first\nstring and the start of the second string.", "source": "codesearchnet"}
{"code": "def binary_accuracy(y_true, y_pred, threshold=0.5):\n    y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n    threshold = math_ops.cast(threshold, y_pred.dtype)\n    y_pred = math_ops.cast(y_pred > threshold, y_pred.dtype)\n    return backend.mean(math_ops.equal(y_true, y_pred), axis=-1)", "docstring": "Calculates how often predictions match binary labels.\n\nStandalone usage:\n>>> y_true = [[1], [1], [0], [0]]\n>>> y_pred = [[1], [1], [0], [0]]\n>>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred)\n>>> assert m.shape == (4,)\n>>> m.numpy()\narray([1., 1., 1., 1.], dtype=float32)\n\nArgs:\ny_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\ny_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\nthreshold: (Optional) Float representing the threshold for deciding whether\nprediction values are 1 or 0.\n\nReturns:\nBinary accuracy values. shape = `[batch_size, d0, .. dN-1]`", "source": "github-repos"}
{"code": "def send(self, message_type, data, callback=None, one_way=False):\n        \n        message = validator_pb2.Message(\n            correlation_id=_generate_id(),\n            content=data,\n            message_type=message_type)\n\n        fut = future.Future(message.correlation_id, message.content,\n                            callback, timeout=self._connection_timeout)\n        if not one_way:\n            self._futures.put(fut)\n\n        self._send_receive_thread.send_message(message)\n        return fut", "docstring": "Sends a message of message_type\n\nArgs:\nmessage_type (validator_pb2.Message): enum value\ndata (bytes): serialized protobuf\ncallback (function): a callback function to call when a\nresponse to this message is received\n\nReturns:\nfuture.Future", "source": "juraj-google-style"}
{"code": "def _plot_depth_track(self, ax, md, kind='MD'):\n        \n        if kind == 'MD':\n            ax.set_yscale('bounded', vmin=md.min(), vmax=md.max())\n            \n        elif kind == 'TVD':\n            tvd = self.location.md2tvd(md)\n            ax.set_yscale('piecewise', x=tvd, y=md)\n            \n        else:\n            raise Exception(\"Kind must be MD or TVD\")\n\n        for sp in ax.spines.values():\n            sp.set_color('gray')\n\n        if ax.is_first_col():\n            pad = -10\n            ax.spines['left'].set_color('none')\n            ax.yaxis.set_ticks_position('right')\n            for label in ax.get_yticklabels():\n                label.set_horizontalalignment('right')\n        elif ax.is_last_col():\n            pad = -10\n            ax.spines['right'].set_color('none')\n            ax.yaxis.set_ticks_position('left')\n            for label in ax.get_yticklabels():\n                label.set_horizontalalignment('left')\n        else:\n            pad = -30\n            for label in ax.get_yticklabels():\n                label.set_horizontalalignment('center')\n\n        ax.tick_params(axis='y', colors='gray', labelsize=12, pad=pad)\n        ax.set_xticks([])\n\n        ax.set(xticks=[])\n        ax.depth_track = True\n\n        return ax", "docstring": "Private function. Depth track plotting.\n\nArgs:\nax (ax): A matplotlib axis.\nmd (ndarray): The measured depths of the track.\nkind (str): The kind of track to plot.\n\nReturns:\nax.", "source": "juraj-google-style"}
{"code": "def parse_record(cls, vcf_line, sample_names):\n    vcf_fields = vcf_line.rstrip('\\r\\n').split('\\t')\n    (chrom, pos, rid, ref, alt, qual, rfilter, info) = vcf_fields[0:8]\n    sample_fields = []\n    sample_tag_values = {}\n    if (len(vcf_fields) > 9):\n        rformat = vcf_fields[8]\n        sample_fields = vcf_fields[9:]\n        sample_tag_values = VcfRecord._sample_tag_values(sample_names, rformat, sample_fields)\n    return VcfRecord(chrom, pos, ref, alt, rid, qual, rfilter, info, sample_tag_values)", "docstring": "Alternative constructor that parses VcfRecord from VCF string.\n\nAspire to parse/represent the data such that it could be reliably\nround-tripped. (This nicety means INFO fields and FORMAT tags should be\ntreated as ordered to avoid shuffling.)\n\nArgs:\nvcf_line: the VCF variant record as a string; tab separated fields,\ntrailing newlines are ignored. Must have at least 8 fixed fields\n(through INFO)\nsample_names: a list of sample name strings; these should match\nthe VCF header column\nReturns:\nA mutable VcfRecord.", "source": "codesearchnet"}
{"code": "def publishMap(self, maps_info, fsInfo=None, itInfo=None):\n        \n        if self.securityhandler is None:\n            print (\"Security handler required\")\n            return\n        itemInfo = None\n        itemId = None\n        map_results = None\n        replaceInfo = None\n        replaceItem = None\n        map_info = None\n        admin = None\n        try:\n            admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)\n            map_results = []\n            for map_info in maps_info:\n                itemInfo = {}\n\n                if 'ReplaceInfo' in map_info:\n                    replaceInfo = map_info['ReplaceInfo']\n                else:\n                    replaceInfo = None\n\n\n                if replaceInfo != None:\n\n                    for replaceItem in replaceInfo:\n                        if replaceItem['ReplaceType'] == 'Layer':\n\n                            if fsInfo is not None:\n\n                                for fs in fsInfo:\n                                    if fs is not None and replaceItem['ReplaceString'] == fs['ReplaceTag']:\n                                        replaceItem['ReplaceString'] = fs['FSInfo']['url']\n                                        replaceItem['ItemID'] = fs['FSInfo']['itemId']\n                                        replaceItem['ItemFolder'] = fs['FSInfo']['folderId']\n                                        if 'convertCase' in fs['FSInfo']:\n                                            replaceItem['convertCase'] = fs['FSInfo']['convertCase']\n                                    elif 'ItemID' in replaceItem:\n                                        if 'ItemFolder' in replaceItem == False:\n\n                                            itemId = replaceItem['ItemID']\n                                            itemInfo = admin.content.getItem(itemId=itemId)\n                                            if itemInfo.owner:\n                                                if itemInfo.owner == self._securityHandler.username and itemInfo.ownerFolder:\n                                                    replaceItem['ItemFolder'] = itemInfo.ownerFolder\n                                                else:\n                                                    replaceItem['ItemFolder'] = None\n                        elif replaceItem['ReplaceType'] == 'Global':\n\n                            if itInfo is not None:\n\n                                for itm in itInfo:\n                                    if itm is not None:\n\n                                        if replaceItem['ReplaceString'] == itm['ReplaceTag']:\n                                            if 'ItemInfo' in itm:\n                                                if 'url' in itm['ItemInfo']:\n                                                    replaceItem['ReplaceString'] = itm['ItemInfo']['url']\n\n\n                if 'ReplaceTag' in map_info:\n\n                    itemInfo = {\"ReplaceTag\":map_info['ReplaceTag'] }\n                else:\n                    itemInfo = {\"ReplaceTag\":\"{WebMap}\" }\n\n                itemInfo['MapInfo']  = self._publishMap(config=map_info,\n                                                   replaceInfo=replaceInfo)\n                map_results.append(itemInfo)\n                print (\"%s webmap created\" % itemInfo['MapInfo']['Name'])\n            return map_results\n\n        except common.ArcRestHelperError as e:\n            raise e\n        except Exception as e:\n\n            line, filename, synerror = trace()\n            raise common.ArcRestHelperError({\n                 \"function\": \"publishMap\",\n                 \"line\": line,\n                 \"filename\":  filename,\n                 \"synerror\": synerror,\n            })\n        finally:\n            itemInfo = None\n            itemId = None\n            replaceInfo = None\n            replaceItem = None\n            map_info = None\n            admin = None\n\n            del itemInfo\n            del itemId\n            del replaceInfo\n            del replaceItem\n            del map_info\n            del admin\n\n            gc.collect()", "docstring": "Publishes a list of maps.\n\nArgs:\nmaps_info (list): A list of JSON configuration maps to publish.\n\nReturns:\nlist: A list of results from :py:meth:`arcrest.manageorg._content.UserItem.updateItem`.", "source": "juraj-google-style"}
{"code": "def __init__(self, pipeline: 'Pipeline', tag: Optional[str]=None, element_type: Optional[Union[type, 'typehints.TypeConstraint']]=None, windowing: Optional['Windowing']=None, is_bounded=True):\n    self.pipeline = pipeline\n    self.tag = tag\n    self.element_type = element_type\n    self.producer: Optional[AppliedPTransform] = None\n    self.is_bounded = is_bounded\n    if windowing:\n        self._windowing = windowing\n    self.requires_deterministic_key_coder = None", "docstring": "Initializes a PValue with all arguments hidden behind keyword arguments.\n\nArgs:\npipeline: Pipeline object for this PValue.\ntag: Tag of this PValue.\nelement_type: The type of this PValue.", "source": "github-repos"}
{"code": "def serialize_array(array, domain=(0, 1), fmt='png', quality=70):\n    normalized = _normalize_array(array, domain=domain)\n    return _serialize_normalized_array(normalized, fmt=fmt, quality=quality)", "docstring": "Given an arbitrary rank-3 NumPy array,\nreturns the byte representation of the encoded image.\n\nArgs:\narray: NumPy array of dtype uint8 and range 0 to 255\ndomain: expected range of values in array, see `_normalize_array()`\nfmt: string describing desired file format, defaults to 'png'\nquality: specifies compression quality from 0 to 100 for lossy formats\n\nReturns:\nimage data as BytesIO buffer", "source": "codesearchnet"}
{"code": "def get_2d_sincos_pos_embed(embed_dim, grid_size, add_cls_token=False):\n    grid_h = np.arange(grid_size, dtype=np.float32)\n    grid_w = np.arange(grid_size, dtype=np.float32)\n    grid = np.meshgrid(grid_w, grid_h)\n    grid = np.stack(grid, axis=0)\n    grid = grid.reshape([2, 1, grid_size, grid_size])\n    pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)\n    if add_cls_token:\n        pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)\n    return pos_embed", "docstring": "Create 2D sin/cos positional embeddings.\n\nArgs:\nembed_dim (`int`):\nEmbedding dimension.\ngrid_size (`int`):\nThe grid height and width.\nadd_cls_token (`bool`, *optional*, defaults to `False`):\nWhether or not to add a classification (CLS) token.\n\nReturns:\n(`torch.FloatTensor` of shape (grid_size*grid_size, embed_dim) or (1+grid_size*grid_size, embed_dim): the\nposition embeddings (with or without classification token)", "source": "github-repos"}
{"code": "def update(self, resource, timeout=(- 1)):\n    return self._client.update(resource, timeout=timeout, default_values=self.DEFAULT_VALUES, uri=self.URI)", "docstring": "Updates a User.\n\nArgs:\nresource (dict): Object to update.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.\n\nReturns:\ndict: Updated resource.", "source": "codesearchnet"}
{"code": "def _applyInter(finter0, finter1, conflict=\"ignore\"):\n        \n\n        OPTIONS = [\"error\", \"ignore\", \"me\", \"other\"]\n        assert conflict in OPTIONS, \"Invalid value in `conflict`.\"\n\n        \n        min_int = -2**63\n        \n        inter0 = tuple([f.getValue() if f else min_int for f in finter0])\n        inter1 = tuple([f.getValue() if f else min_int for f in finter1])\n        le00 = inter0[0] <= inter1[0]                           \n        le01 = inter1[1] == min_int or inter0[0] <= inter1[1]   \n        le11 = inter1[1] == min_int or (inter0[1] != min_int and inter0[1] <= inter1[1])  \n        ge00 = not le00 or inter0[0] == inter1[0]               \n        ge10 = inter0[1] == min_int or inter0[1] >= inter1[0]   \n\n        \n        \n        \n        \n        \n        \n\n        \n        if le00 and ge10 and le11:                       \n            return finter1[0], finter0[1]\n        elif le00 and ge10 and not le11:                 \n            return finter1\n        elif ge00 and le01 and le11:                     \n            return finter0\n        elif ge00 and le01 and not le11:                 \n            return finter0[0], finter1[1]\n        elif conflict == \"me\":\n            return finter0\n        elif conflict == \"other\":\n            return finter1\n        elif conflict == \"error\":\n            raise Exception(\"Disjoint intervals!\")\n        return None", "docstring": "Return the restriction of first interval by the second.\n\nArgs:\n\n- inter0, inter1 (tuple of Feature): intervals\n\nReturn(tuple of Feature): the resulting interval\n- conflict(str): if a property hasn't compatible values/constrains, do:\n- ``\"error\"``: raise exception.\n- ``\"ignore\"``: return None.\n- ``\"me\"``: return finter0.\n- ``\"other\"``: return finter1.", "source": "juraj-google-style"}
{"code": "def GetExcludePatternsForDir(dirname):\n    ignore_patterns = []\n    yapfignore_file = os.path.join(dirname, '.yapfignore')\n    if os.path.exists(yapfignore_file):\n        ignore_patterns += _GetExcludePatternsFromYapfIgnore(yapfignore_file)\n    pyproject_toml_file = os.path.join(dirname, 'pyproject.toml')\n    if os.path.exists(pyproject_toml_file):\n        ignore_patterns += _GetExcludePatternsFromPyprojectToml(pyproject_toml_file)\n    return ignore_patterns", "docstring": "Return patterns of files to exclude from ignorefile in a given directory.\n\nLooks for .yapfignore in the directory dirname.\n\nArguments:\ndirname: (unicode) The name of the directory.\n\nReturns:\nA List of file patterns to exclude if ignore file is found, otherwise empty\nList.", "source": "github-repos"}
{"code": "def metadata_path(self, m_path):\n        \n        if not m_path:\n            self.metadata_dir = None\n            self.metadata_file = None\n\n        else:\n            if not op.exists(m_path):\n                raise OSError('{}: file does not exist!'.format(m_path))\n\n            if not op.dirname(m_path):\n                self.metadata_dir = '.'\n            else:\n                self.metadata_dir = op.dirname(m_path)\n            self.metadata_file = op.basename(m_path)\n\n            \n            \n            tmp_sr = SeqIO.read(self.metadata_path, 'uniprot-xml')\n            parsed = parse_uniprot_xml_metadata(tmp_sr)\n            self.update(parsed, overwrite=True)", "docstring": "Provide pointers to the paths of the metadata file\n\nArgs:\nm_path: Path to metadata file", "source": "juraj-google-style"}
{"code": "def serialize(layer):\n    return serialization_lib.serialize_keras_object(layer)", "docstring": "Returns the layer configuration as a Python dict.\n\nArgs:\nlayer: A `keras.layers.Layer` instance to serialize.\n\nReturns:\nPython dict which contains the configuration of the layer.", "source": "github-repos"}
{"code": "def __init__(self, path):\n        \n        self.path = os.path.join(path, app.config['XCESSIV_NOTEBOOK_NAME'])", "docstring": "Initialize context manager\n\nArgs:\npath (str, unicode): Path to project folder", "source": "juraj-google-style"}
{"code": "def __init__(self, substream_name: str='realtime', video_mode: VideoMode=VideoMode.CAMERA):\n    self._video_mode = video_mode\n    self._substream_name = substream_name", "docstring": "Initializes the processor.\n\nArgs:\nsubstream_name: The name of the substream to use for the generated images.\nvideo_mode: The video mode to use for the video. Can be CAMERA or SCREEN.", "source": "github-repos"}
{"code": "def image(cam):\n    \n    \n    yield marv.set_header(title=cam.topic)\n    msg = yield marv.pull(cam)\n    if msg is None:\n        return\n\n    \n    pytype = get_message_type(cam)\n    rosmsg = pytype()\n    rosmsg.deserialize(msg.data)\n\n    \n    name = '{}.jpg'.format(cam.topic.replace('/', ':')[1:])\n    imgfile = yield marv.make_file(name)\n    img = imgmsg_to_cv2(rosmsg, \"rgb8\")\n    cv2.imwrite(imgfile.path, img, (cv2.IMWRITE_JPEG_QUALITY, 60))\n    yield marv.push(imgfile)", "docstring": "Extract first image of input stream to jpg file.\n\nArgs:\ncam: Input stream of raw rosbag messages.\n\nReturns:\nFile instance for first image of input stream.", "source": "juraj-google-style"}
{"code": "def CopyAFF4ToLocal(aff4_urn, target_dir, token=None, overwrite=False):\n  \n  try:\n    fd = aff4.FACTORY.Open(aff4_urn, token=token)\n    filepath = os.path.join(target_dir, fd.urn.Path()[1:])\n\n    \n    if isinstance(fd, standard.VFSDirectory):\n      try:\n        os.makedirs(filepath)\n      except OSError:\n        pass\n\n      return None\n    \n    elif isinstance(fd, aff4.AFF4Stream):\n      if not os.path.isfile(filepath):\n        try:\n          \n          os.makedirs(os.path.dirname(filepath))\n        except OSError:\n          pass\n        DownloadFile(fd, filepath)\n      elif (os.stat(filepath)[stat.ST_SIZE] != fd.Get(fd.Schema.SIZE) or\n            overwrite):\n        \n        DownloadFile(fd, filepath)\n      else:\n        logging.info(\"File %s exists, skipping\", filepath)\n\n      return filepath\n    else:\n      raise ValueError(\"Opened urn is neither a downloaded file nor a \"\n                       \"directory: %s\" % aff4_urn)\n\n  except IOError as e:\n    logging.exception(\"Failed to read %s due to %s\", aff4_urn, e)\n    raise", "docstring": "Copy an AFF4 object that supports a read interface to local filesystem.\n\nArgs:\naff4_urn: URN of thing to copy.\ntarget_dir: Directory to copy the file to.\ntoken: Auth token.\noverwrite: If True overwrite the file if it exists.\n\nReturns:\nIf aff4_urn points to a file, returns path to the downloaded file.\nOtherwise returns None.\n\nBy default file will only be overwritten if file size differs.", "source": "juraj-google-style"}
{"code": "def unpack_x_y_sample_weight(data):\n    if isinstance(data, list):\n        data = tuple(data)\n    if not isinstance(data, tuple):\n        return (data, None, None)\n    elif len(data) == 1:\n        return (data[0], None, None)\n    elif len(data) == 2:\n        return (data[0], data[1], None)\n    elif len(data) == 3:\n        return (data[0], data[1], data[2])\n    error_msg = f'Data is expected to be in format `x`, `(x,)`, `(x, y)`, or `(x, y, sample_weight)`, found: {data}'\n    raise ValueError(error_msg)", "docstring": "Unpacks user-provided data tuple.\n\nThis is a convenience utility to be used when overriding\n`Model.train_step`, `Model.test_step`, or `Model.predict_step`.\nThis utility makes it easy to support data of the form `(x,)`,\n`(x, y)`, or `(x, y, sample_weight)`.\n\nExample:\n\n>>> features_batch = ops.ones((10, 5))\n>>> labels_batch = ops.zeros((10, 5))\n>>> data = (features_batch, labels_batch)\n>>> # `y` and `sample_weight` will default to `None` if not provided.\n>>> x, y, sample_weight = unpack_x_y_sample_weight(data)\n>>> sample_weight is None\nTrue\n\nArgs:\ndata: A tuple of the form `(x,)`, `(x, y)`, or `(x, y, sample_weight)`.\n\nReturns:\nThe unpacked tuple, with `None`s for `y` and `sample_weight` if they are\nnot provided.", "source": "github-repos"}
{"code": "def _write_reqs(amend: bool = False, stage: bool = False):\n    \n    LOGGER.info('writing requirements')\n\n    base_cmd = 'pipenv lock -r'\n    _write_reqs_file(f'{base_cmd}', 'requirements.txt')\n    _write_reqs_file(f'{base_cmd} -d', 'requirements-dev.txt')\n    files_to_add = ['Pipfile', 'requirements.txt', 'requirements-dev.txt']\n\n    if amend:\n        CTX.repo.amend_commit(append_to_msg='update requirements [auto]', files_to_add=files_to_add)\n    elif stage:\n        CTX.repo.stage_subset(*files_to_add)", "docstring": "Writes the requirement files\n\nArgs:\namend: amend last commit with changes\nstage: stage changes", "source": "juraj-google-style"}
{"code": "def generate_filename(self, file_type, time_identifier=None, extension_name=None):\n    time_str = time_identifier\n    if time_identifier is None:\n        time_str = mobly_logger.get_log_file_timestamp()\n    elif isinstance(time_identifier, runtime_test_info.RuntimeTestInfo):\n        time_str = time_identifier.signature\n    filename_tokens = [file_type]\n    if self.debug_tag != self.serial:\n        filename_tokens.append(self.debug_tag)\n    filename_tokens.extend([self.serial, self.model, time_str])\n    filename_str = ','.join(filename_tokens)\n    if extension_name is not None:\n        filename_str = '%s.%s' % (filename_str, extension_name)\n    filename_str = mobly_logger.sanitize_filename(filename_str)\n    self.log.debug('Generated filename: %s', filename_str)\n    return filename_str", "docstring": "Generates a name for an output file related to this device.\n\nThe name follows the pattern:\n\n{file type},{debug_tag},{serial},{model},{time identifier}.{ext}\n\n\"debug_tag\" is only added if it's different from the serial. \"ext\" is\nadded if specified by user.\n\nArgs:\nfile_type: string, type of this file, like \"logcat\" etc.\ntime_identifier: string or RuntimeTestInfo. If a `RuntimeTestInfo`\nis passed in, the `signature` of the test case will be used. If\na string is passed in, the string itself will be used.\nOtherwise the current timestamp will be used.\nextension_name: string, the extension name of the file.\n\nReturns:\nString, the filename generated.", "source": "github-repos"}
{"code": "def of_type_function(function: _evaluation.OfTypeFunction, operand_result: Optional[_sql_data_types.IdentifierSelect], params_result: Collection[_sql_data_types.StandardSqlExpression]) -> _sql_data_types.Select:\n    if operand_result is None:\n        raise ValueError('ofType() cannot be called without an operand.')\n    if len(params_result) != 1:\n        raise ValueError('ofType must have a data type parameter.')\n    sql_alias = 'ofType_'\n    attribute = function.base_type_str\n    return_type = _sql_data_types.get_standard_sql_data_type(function.return_type)\n    return dataclasses.replace(operand_result, select_part=operand_result.select_part.dot(attribute, return_type, sql_alias=sql_alias))", "docstring": "Generates Spark SQL representing the FHIRPath ofType() function.\n\nReturns the resource of the given type, typically used in choice types.\n\nArgs:\nfunction: The FHIRPath AST `MatchesFunction` node\noperand_result: The expression which is being evaluated\nparams_result: The parameter passed in to function\n\nReturns:\nA compiled Spark SQL expression.\n\nRaises:\nValueError: When the function is called without an operand, or the length of\nparams_result is not one.", "source": "github-repos"}
{"code": "def quote(src_string, return_expr=False):\n  \n  node = parse_string(src_string)\n  body = node.body\n  if len(body) == 1:\n    if isinstance(body[0], gast.Expr) and not return_expr:\n      out = body[0].value\n    else:\n      out = body[0]\n  else:\n    out = node\n  return out", "docstring": "Go from source code to AST nodes.\n\nThis function returns a tree without enclosing `Module` or `Expr` nodes.\n\nArgs:\nsrc_string: The source code to parse.\nreturn_expr: Whether or not to return a containing expression. This can be\nset to `True` if the result is to be part of a series of statements.\n\nReturns:\nAn AST of the given source code.", "source": "juraj-google-style"}
{"code": "def _get_commands(dist):\n    py_files = (f for f in setuptools.findall() if (os.path.splitext(f)[1].lower() == '.py'))\n    pkg_files = (f for f in py_files if (_get_package_name(f) in dist.packages))\n    commands = {}\n    for file_name in pkg_files:\n        with open(file_name) as py_file:\n            module = typing.cast(ast.Module, ast.parse(py_file.read()))\n        module_name = _get_module_name(file_name)\n        _append_commands(commands, module_name, _get_module_commands(module))\n        _append_commands(commands, module_name, _get_class_commands(module))\n        _append_commands(commands, module_name, _get_function_commands(module))\n    return commands", "docstring": "Find all commands belonging to the given distribution.\n\nArgs:\ndist: The Distribution to search for docopt-compatible docstrings that\ncan be used to generate command entry points.\n\nReturns:\nA dictionary containing a mapping of primary commands to sets of\nsubcommands.", "source": "codesearchnet"}
{"code": "def unlock_kinetis_abort_clear():\n    flags = registers.AbortRegisterFlags()\n    flags.STKCMPCLR = 1\n    flags.STKERRCLR = 1\n    flags.WDERRCLR = 1\n    flags.ORUNERRCLR = 1\n    return flags.value", "docstring": "Returns the abort register clear code.\n\nReturns:\nThe abort register clear code.", "source": "codesearchnet"}
{"code": "def __init__(self, recommender, repeat=True, maxlen=None, debug=False):\n        \n        self.rec = recommender\n        self.feature_rec = issubclass(recommender.__class__, FeatureRecommenderMixin)\n\n        self.repeat = repeat\n\n        \n        \n        self.item_buffer = deque(maxlen=maxlen)\n\n        self.debug = debug", "docstring": "Set/initialize parameters.\n\nArgs:\nrecommender (Recommender): Instance of a recommender which has been initialized.\nrepeat (boolean): Choose whether the same item can be repeatedly interacted by the same user.\nmaxlen (int): Size of an item buffer which stores most recently observed items.", "source": "juraj-google-style"}
{"code": "def event_vars(self):\n    if (self._event_vars is None):\n        self._event_vars = list(self.iter_event_vars())\n    return self._event_vars", "docstring": "The service's eventable variables.\n\nReturns:\nlist(tuple): A list of (variable name, data type) tuples.", "source": "codesearchnet"}
{"code": "def get(self):\n    config = self.config\n    if (not config):\n        return None\n    response = dict()\n    response.update(self._parse_source_interface(config))\n    response.update(self._parse_servers(config))\n    return response", "docstring": "Returns the current NTP configuration\n\nThe Ntp resource returns the following:\n\n* source_interface (str): The interface port that specifies\nNTP server\n* servers (list): A list of the NTP servers that have been\nassigned to the node. Each entry in the\nlist is a key/value pair of the name of\nthe server as the key and None or 'prefer'\nas the value if the server is preferred.\n\nReturns:\nA Python dictionary object of key/value pairs that represents\nthe current NTP configuration of the node::\n\n{\n\"source_interface\": 'Loopback0',\n'servers': [\n{ '1.1.1.1': None },\n{ '1.1.1.2': 'prefer' },\n{ '1.1.1.3': 'prefer' },\n{ '1.1.1.4': None },\n]\n}", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n    residual = hidden_states\n    hidden_states = self.input_layernorm(hidden_states)\n    hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.post_attention_layernorm(hidden_states)\n    hidden_states = self.mlp(hidden_states)\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (self_attn_weights,)\n    if use_cache:\n        outputs += (present_key_value,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`, *optional*): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.\nuse_cache (`bool`, *optional*):\nIf set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n(see `past_key_values`).\npast_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states", "source": "github-repos"}
{"code": "def np_auc(self, predictions, labels, weights):\n    if weights is None:\n        weights = np.ones(np.size(predictions))\n    is_positive = labels > 0\n    num_positives = np.sum(weights[is_positive])\n    num_negatives = np.sum(weights[~is_positive])\n    inds = np.argsort(-predictions)\n    sorted_labels = labels[inds]\n    sorted_weights = weights[inds]\n    is_positive = sorted_labels > 0\n    tp = np.cumsum(sorted_weights * is_positive) / num_positives\n    return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives", "docstring": "Computes the AUC explicitly using Numpy.\n\nArgs:\npredictions: an ndarray with shape [N].\nlabels: an ndarray with shape [N].\nweights: an ndarray with shape [N].\n\nReturns:\nthe area under the ROC curve.", "source": "github-repos"}
{"code": "def __init__(self, parser):\n        \n        self.parser = parser\n        args = getattr(self, 'args', ())\n        for arg in args:\n            flags = arg[0]\n            if not isinstance(flags, tuple):\n                flags = (flags,)\n            self.parser.add_argument(*flags, **arg[1])", "docstring": "Initialize the subcommand with its parser\n\nArgs:\nparser (Parser) : an Argparse ``Parser`` instance to configure\nwith the args for this subcommand.\n\nThis method will automatically add all the arguments described in\n``self.args``. Subclasses can perform any additional customizations\non ``self.parser``.", "source": "juraj-google-style"}
{"code": "def project_group_token(self, group_tokens):\n    projected_group_tokens = self.mlp_inter(group_tokens)\n    projected_group_tokens = self.norm_post_tokens(projected_group_tokens)\n    return projected_group_tokens", "docstring": "Args:\ngroup_tokens (torch.Tensor): group tokens, [batch_size, num_group_tokens, channels]\n\nReturns:\nprojected_group_tokens (torch.Tensor): [batch_size, num_output_groups, channels]", "source": "github-repos"}
{"code": "def __init__(self, parent):\n        \n\n        super(ModuleUIFrame, self).__init__(parent, padding=8)\n        self.columnconfigure(0, weight=1)\n        self.rowconfigure(1, weight=1)\n\n        chat = ChatFrame(self)\n        chat.grid(column=0, row=0, sticky=\"W E N S\")", "docstring": "The console tab for bethebot\n\nArgs:\nparent: tk or ttk element", "source": "juraj-google-style"}
{"code": "def set_float(self, option, value):\n        \n        if not isinstance(value, float):\n            raise TypeError(\"Value must be a float\")\n        self.options[option] = value", "docstring": "Set a float option.\n\nArgs:\noption (str): name of option.\nvalue (float): value of the option.\n\nRaises:\nTypeError: Value must be a float.", "source": "juraj-google-style"}
{"code": "def _BreakpointEvent(self, event, frame):\n    error_status = None\n    if (event != native.BREAKPOINT_EVENT_HIT):\n        error_status = _BREAKPOINT_EVENT_STATUS[event]\n    elif (self.definition.get('action') == 'LOG'):\n        error_status = self._collector.Log(frame)\n        if (not error_status):\n            return\n    if (not self._SetCompleted()):\n        return\n    self.Clear()\n    if error_status:\n        self._CompleteBreakpoint({'status': error_status})\n        return\n    collector = capture_collector.CaptureCollector(self.definition, self.data_visibility_policy)\n    try:\n        collector.Collect(frame)\n    except BaseException as e:\n        native.LogInfo(('Internal error during data capture: %s' % repr(e)))\n        error_status = {'isError': True, 'description': {'format': ('Internal error while capturing data: %s' % repr(e))}}\n        self._CompleteBreakpoint({'status': error_status})\n        return\n    except:\n        native.LogInfo('Unknown exception raised')\n        error_status = {'isError': True, 'description': {'format': 'Unknown internal error'}}\n        self._CompleteBreakpoint({'status': error_status})\n        return\n    self._CompleteBreakpoint(collector.breakpoint, is_incremental=False)", "docstring": "Callback invoked by cdbg_native when breakpoint hits.\n\nArgs:\nevent: breakpoint event (see kIntegerConstants in native_module.cc).\nframe: Python stack frame of breakpoint hit or None for other events.", "source": "codesearchnet"}
{"code": "def operation_spec(input_element_spec: Optional[pg.typing.ValueSpec]=None, output_element_spec: Optional[pg.typing.ValueSpec]=None) -> pg.typing.ValueSpec:\n    if input_element_spec is None:\n        input_element_spec = pg.typing.Object(pg.DNA)\n    if output_element_spec is None:\n        output_element_spec = pg.typing.Object(pg.DNA)\n    return pg.typing.Callable([pg.typing.List(input_element_spec)], returns=pg.typing.List(output_element_spec))", "docstring": "Returns the value spec (PyGlove typing) for an evolutionary operation.\n\nWe use `pg.typing.Callable` instead of `pg.typing.Object(Operation)`\nto make it more flexible to plugin lambdas.\n\nArgs:\ninput_element_spec: The value spec for input element.\noutput_element_spec: The value spec for output element.\n\nReturns:\nA value spec for Callable[[List[DNA]], List[DNA]].", "source": "github-repos"}
{"code": "def merge(self, options):\n    merged = copy.deepcopy(self)\n    if options is None:\n        return merged\n    if options.bytes_per_pack != 0:\n        merged.bytes_per_pack = options.bytes_per_pack\n    if options.timeout_seconds is not None:\n        merged.timeout_seconds = options.timeout_seconds\n    if options.implementation != CommunicationImplementation.AUTO:\n        merged.implementation = options.implementation\n    return merged", "docstring": "Merges with another options and returns a new one.\n\nValues specified in the `options` takes precedence if they're not the\ndefault.\n\nArgs:\noptions: a `tf.distribute.experimental.CollectiveCommunication`.\n\nReturns:\nA new `tf.distribute.experimental.CollectiveCommunication`.", "source": "github-repos"}
{"code": "class SecondaryBufferedQuantileTracker(WindowedTracker, QuantileTracker):\n\n    def __init__(self, master: QuantileTracker, q):\n        assert isinstance(master, BufferedQuantileTracker), 'Cannot create secondary tracker from non-BufferedQuantileTracker'\n        self._master = master\n        super().__init__(self._master._window_mode)\n        QuantileTracker.__init__(self, q)\n        self._sorted_items = self._master._sorted_items\n\n    def push(self, x):\n        \n        pass\n\n    def get(self):\n        \n        return self._master._get_helper(self._master._sorted_items, self._q)", "docstring": "A secondary quantile tracker that shares its data with a master tracker.\n\nThis tracker acts as a read-only view of the master tracker's data, providing\nquantile calculations without maintaining its own independent buffer. It\nrelies on the master's sorted items for quantile estimations.\n\nArgs:\nmaster: The BufferedQuantileTracker instance to share data with.\nq: A list of quantiles to track.", "source": "github-repos"}
{"code": "def create_initial(self, address_values):\n        \n\n        with self._lock:\n            for add, val in address_values:\n                self._state[add] = _ContextFuture(address=add, result=val)", "docstring": "Create futures from inputs with the current value for that address\nat the start of that context.\n\nArgs:\naddress_values (list of tuple): The tuple is string, bytes of the\naddress and value.", "source": "juraj-google-style"}
{"code": "def supported_tifs(self):\n    buf = ctypes.c_uint32()\n    self._dll.JLINKARM_TIF_GetAvailable(ctypes.byref(buf))\n    return buf.value", "docstring": "Returns a bitmask of the supported target interfaces.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nBitfield specifying which target interfaces are supported.", "source": "codesearchnet"}
{"code": "def topic_update(channel, topic_channel):\n    \n\n    if topic_channel is not None:\n        try:\n            channel_message = \"Topic channel is now `{}`.\".format(topic_channel.name)\n        except Exception as e:\n            logger.exception(e)\n            channel_message = \"Topic channel has been updated.\"\n    else:\n        channel_message = \"Topic channel has been cleared.\"\n\n    \n    gui = ui_embed.UI(\n            channel,\n            \"Topic channel updated\",\n            channel_message,\n            modulename=modulename,\n            colour=modulecolor_info\n    )\n\n    return gui", "docstring": "Creates an embed UI for the topic update\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\ntopic_channel: The new topic channel\n\nReturns:\nembed: The created embed", "source": "juraj-google-style"}
{"code": "def output_types(self):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), self._element_spec)", "docstring": "Returns the type of each component of an element of this iterator.\n\nReturns:\nA nested structure of `tf.DType` objects corresponding to each component\nof an element of this dataset.", "source": "github-repos"}
{"code": "def inputs(self, name):\n        \n        self._closed()\n\n        step = self._get_step(name, make_copy=False)\n        return step.list_inputs()", "docstring": "List input names and types of a step in the steps library.\n\nArgs:\nname (str): name of a step in the steps library.", "source": "juraj-google-style"}
{"code": "def active_futures(ticker: str, dt) -> str:\n    \n    t_info = ticker.split()\n    prefix, asset = ' '.join(t_info[:-1]), t_info[-1]\n    info = const.market_info(f'{prefix[:-1]}1 {asset}')\n\n    f1, f2 = f'{prefix[:-1]}1 {asset}', f'{prefix[:-1]}2 {asset}'\n    fut_2 = fut_ticker(gen_ticker=f2, dt=dt, freq=info['freq'])\n    fut_1 = fut_ticker(gen_ticker=f1, dt=dt, freq=info['freq'])\n\n    fut_tk = bdp(tickers=[fut_1, fut_2], flds='Last_Tradeable_Dt', cache=True)\n\n    if pd.Timestamp(dt).month < pd.Timestamp(fut_tk.last_tradeable_dt[0]).month: return fut_1\n\n    d1 = bdib(ticker=f1, dt=dt)\n    d2 = bdib(ticker=f2, dt=dt)\n\n    return fut_1 if d1[f1].volume.sum() > d2[f2].volume.sum() else fut_2", "docstring": "Active futures contract\n\nArgs:\nticker: futures ticker, i.e., ESA Index, Z A Index, CLA Comdty, etc.\ndt: date\n\nReturns:\nstr: ticker name", "source": "juraj-google-style"}
{"code": "def load_state(self, in_path):\n        \n\n        with open(in_path, \"r\") as infile:\n            state = json.load(infile)\n\n        self.restore_state(state)", "docstring": "Load the current state of this emulated object from a file.\n\nThe file should have been produced by a previous call to save_state.\n\nArgs:\nin_path (str): The path to the saved state dump that you wish\nto load.", "source": "juraj-google-style"}
{"code": "async def remember(self, request, user_id):\n    ticket = self._new_ticket(request, user_id)\n    (await self.remember_ticket(request, ticket))", "docstring": "Called to store the userid for a request.\n\nThis function creates a ticket from the request and user_id, and calls\nthe abstract function remember_ticket() to store the ticket.\n\nArgs:\nrequest: aiohttp Request object.\nuser_id: String representing the user_id to remember", "source": "codesearchnet"}
{"code": "def pop(self, identifier, default=None):\n        \n        if identifier in self.children:\n            item = self[identifier]\n            self.__delitem__(identifier)\n            return item\n        else:\n            return default", "docstring": "Pop a node of the AttrTree using its path string.\n\nArgs:\nidentifier: Path string of the node to return\ndefault: Value to return if no node is found\n\nReturns:\nThe node that was removed from the AttrTree", "source": "juraj-google-style"}
{"code": "def _bytestringToLong(bytestring, signed=False, numberOfRegisters=2):\n    \n    _checkString(bytestring, 'byte string', minlength=4, maxlength=4)\n    _checkBool(signed, description='signed parameter')\n    _checkInt(numberOfRegisters, minvalue=2, maxvalue=2, description='number of registers')\n\n    formatcode = '>'  \n    if signed:\n        formatcode += 'l'  \n    else:\n        formatcode += 'L'  \n\n    return _unpack(formatcode, bytestring)", "docstring": "Convert a bytestring to a long integer.\n\nLong integers (32 bits = 4 bytes) are stored in two consecutive 16-bit registers in the slave.\n\nArgs:\n* bytestring (str): A string of length 4.\n* signed (bol): Whether large positive values should be interpreted as negative values.\n* numberOfRegisters (int): Should be 2. For error checking only.\n\nReturns:\nThe numerical value (int).\n\nRaises:\nValueError, TypeError", "source": "juraj-google-style"}
{"code": "def byte_swap_tflite_buffer(tflite_model, from_endiness, to_endiness):\n    if tflite_model is None:\n        return None\n    model = convert_bytearray_to_object(tflite_model)\n    byte_swap_tflite_model_obj(model, from_endiness, to_endiness)\n    return convert_object_to_bytearray(model)", "docstring": "Generates a new model byte array after byte swapping its buffers field.\n\nArgs:\ntflite_model: TFLite flatbuffer in a byte array.\nfrom_endiness: The original endianness format of the buffers in\ntflite_model.\nto_endiness: The destined endianness format of the buffers in tflite_model.\n\nReturns:\nTFLite flatbuffer in a byte array, after being byte swapped to to_endiness\nformat.", "source": "github-repos"}
{"code": "def atoms(lines):\n    conv_charge_table = {0: 0, 1: 3, 2: 2, 3: 1, 4: 0, 5: (- 1), 6: (- 2), 7: (- 3)}\n    results = {}\n    for (i, line) in enumerate(lines):\n        symbol = line[31:34].rstrip()\n        try:\n            atom = Atom(symbol)\n        except KeyError:\n            raise ValueError(symbol)\n        xpos = float(line[0:10])\n        ypos = float(line[10:20])\n        zpos = float(line[20:30])\n        atom.coords = (xpos, ypos, zpos)\n        atom.mass_diff = int(line[34:37])\n        old_sdf_charge = int(line[37:40])\n        atom.charge = conv_charge_table[old_sdf_charge]\n        if (old_sdf_charge == 4):\n            atom.radical = 1\n        results[(i + 1)] = {'atom': atom}\n    return results", "docstring": "Parse atom block into atom objects\n\nReturns:\ndict: networkx nodes", "source": "codesearchnet"}
{"code": "def front(self, n):\n    new_dtypes = (self._dtype_cache if (self._dtype_cache is None) else self._dtype_cache[:n])\n    if self._is_transposed:\n        result = self.__constructor__(self.data.transpose().take(0, n).transpose(), self.index, self.columns[:n], new_dtypes)\n        result._is_transposed = True\n    else:\n        result = self.__constructor__(self.data.take(1, n), self.index, self.columns[:n], new_dtypes)\n    return result", "docstring": "Returns the first n columns.\n\nArgs:\nn: Integer containing the number of columns to return.\n\nReturns:\nDataManager containing the first n columns of the original DataManager.", "source": "codesearchnet"}
{"code": "def set_state(self, vid, value=None, default=False, disable=False):\n        \n        cmds = self.command_builder('state', value=value, default=default,\n                                    disable=disable)\n        return self.configure_vlan(vid, cmds)", "docstring": "Configures the VLAN state\n\nEosVersion:\n4.13.7M\n\nArgs:\nvid (str): The VLAN ID to configure\nvalue (str): The value to set the vlan state to\ndefault (bool): Configures the vlan state to its default value\ndisable (bool): Negates the vlan state\n\nReturns:\nTrue if the operation was successful otherwise False", "source": "juraj-google-style"}
{"code": "def _ReadTimestamp(self, filename):\n    if not os.path.exists(filename):\n        return None\n    try:\n        timestamp_file = open(filename, 'r')\n        timestamp_string = timestamp_file.read().strip()\n    except IOError as e:\n        self.log.warning('error opening timestamp file: %s', e)\n        timestamp_string = None\n    else:\n        timestamp_file.close()\n    self.log.debug('read timestamp %s from file %r', timestamp_string, filename)\n    if timestamp_string is not None:\n        try:\n            timestamp = int(calendar.timegm(time.strptime(timestamp_string + ' UTC', '%Y-%m-%dT%H:%M:%SZ %Z')))\n        except ValueError as e:\n            self.log.error('cannot parse timestamp file %r: %s', filename, e)\n            timestamp = None\n    else:\n        timestamp = None\n    now = self._GetCurrentTime()\n    if timestamp and timestamp > now:\n        self.log.warning('timestamp %r from %r is in the future, now is %r', timestamp_string, filename, now)\n        if timestamp - now >= 60 * 60:\n            self.log.info('Resetting timestamp to now.')\n            timestamp = now\n    return timestamp", "docstring": "Return a timestamp from a file.\n\nThe timestamp file format is a single line, containing a string in the\nISO-8601 format YYYY-MM-DDThh:mm:ssZ (i.e. UTC time).  We do not support\nall ISO-8601 formats for reasons of convenience in the code.\n\nTimestamps internal to nss_cache deliberately do not carry milliseconds.\n\nArgs:\nfilename:  A String naming the file to read from.\n\nReturns:\nAn int with the number of seconds since epoch, or None if the timestamp\nfile doesn't exist or has errors.", "source": "github-repos"}
{"code": "def __getitem__(self, index):\n    del index\n    raise NotImplementedError", "docstring": "Gets batch at position `index`.\n\nArgs:\nindex: position of the batch in the PyDataset.\n\nReturns:\nA batch", "source": "github-repos"}
{"code": "def Py3GetFullArgSpec(fn):\n    try:\n        sig = inspect._signature_from_callable(fn, skip_bound_arg=True, follow_wrapper_chains=True, sigcls=inspect.Signature)\n    except Exception:\n        raise TypeError('Unsupported callable.')\n    args = []\n    varargs = None\n    varkw = None\n    kwonlyargs = []\n    defaults = ()\n    annotations = {}\n    defaults = ()\n    kwdefaults = {}\n    if sig.return_annotation is not sig.empty:\n        annotations['return'] = sig.return_annotation\n    for param in sig.parameters.values():\n        kind = param.kind\n        name = param.name\n        if kind is inspect._POSITIONAL_ONLY:\n            args.append(name)\n        elif kind is inspect._POSITIONAL_OR_KEYWORD:\n            args.append(name)\n            if param.default is not param.empty:\n                defaults += (param.default,)\n        elif kind is inspect._VAR_POSITIONAL:\n            varargs = name\n        elif kind is inspect._KEYWORD_ONLY:\n            kwonlyargs.append(name)\n            if param.default is not param.empty:\n                kwdefaults[name] = param.default\n        elif kind is inspect._VAR_KEYWORD:\n            varkw = name\n        if param.annotation is not param.empty:\n            annotations[name] = param.annotation\n    if not kwdefaults:\n        kwdefaults = None\n    if not defaults:\n        defaults = None\n    return inspect.FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwdefaults, annotations)", "docstring": "A alternative to the builtin getfullargspec.\n\nThe builtin inspect.getfullargspec uses:\n`skip_bound_args=False, follow_wrapped_chains=False`\nin order to be backwards compatible.\n\nThis function instead skips bound args (self) and follows wrapped chains.\n\nArgs:\nfn: The function or class of interest.\nReturns:\nAn inspect.FullArgSpec namedtuple with the full arg spec of the function.", "source": "github-repos"}
{"code": "def recipe_to_python(name, description, instructions, tasks, parameters={}, project=None, client_credentials=None, user_credentials=None, service_credentials=None):\n    tasks = json_expand_queries(tasks)\n    code = DISCLAIMER\n    code += 'import argparse\\n'\n    code += 'import textwrap\\n\\n'\n    code += 'from starthinker.util.configuration import Configuration\\n'\n    imported = set()\n    for task in tasks:\n        script, task = next(iter(task.items()))\n        if script not in imported:\n            code += 'from starthinker.task.%s.run import %s\\n' % (script, script)\n            imported.add(script)\n    code += '\\n'\n    code += '\\n'\n    fields = json_get_fields(tasks)\n    if fields:\n        code += 'def recipe_%s(config, %s):\\n' % (name, ', '.join([f['name'] for f in fields]))\n    else:\n        code += 'def recipe_%s(config):\\n' % name\n    if description or fields:\n        code += '  \\n\\n'\n    for task in tasks:\n        script, task = next(iter(task.items()))\n        code += '  %s(config, %s)\\n\\n' % (script, dict_to_python(task, indent=1))\n    code += '\\n'\n    code += '\\n'\n    code += 'if __name__ == \"__main__\":\\n'\n    code += parameters_to_argparse(description, instructions, fields)\n    code += '\\n'\n    code += '  args = parser.parse_args()\\n'\n    code += '\\n'\n    code += '  config = Configuration(\\n    project=args.project,\\n    user=args.user,\\n    service=args.service,\\n    client=args.client,\\n    key=args.key,\\n    verbose=args.verbose\\n  )'\n    code += '\\n\\n'\n    if fields:\n        code += '  recipe_%s(config, %s)\\n' % (name, ', '.join(['args.%s' % f['name'] for f in fields]))\n    else:\n        code += '  recipe_%s(config)\\n' % name\n    return code", "docstring": "Converts a JSON recipe into a python stand alone example.\n\nSets up multiple steps to execute recipe:\n1. Install starthinker from repository\n2. Get Cloud Project ID.\n3. Get Client Credentials ( optional if User Credentials exist ).\n4. Enter Recipe parameters if fields present.\n5. Execute recipe tasks.\n\nArgs:\n* name: (string) The name of the notebook.\n* description: (string) A description fo the recipe.\n* instructions: (string) Recipe manual instructions, for example connecting datastudios.\n* tasks: (list) The task JSON to execute.\n* parameters: (dict) Values for field parameters in tasks, optional.\n* project: (string) The GCP project id.\n* client_credentials: (string) The GCP Desktop Client Credentials in JSON string.\n* user_credentials: (string) Not used, placeholder.\n* service_credentials: (string) Not used, placeholder.\n\nReturns:\n* (string) Rendered example source code to be written to a py file.", "source": "github-repos"}
{"code": "def _calculate_expected_result(dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config):\n    if config.use_gumbel_for_cells:\n        gumbel_dist = torch.distributions.RelaxedBernoulli(temperature=config.temperature, logits=dist_per_cell.logits * config.temperature)\n        scaled_probability_per_cell = gumbel_dist.sample()\n    else:\n        scaled_probability_per_cell = dist_per_cell.probs\n    scaled_probability_per_cell = scaled_probability_per_cell / numeric_values_scale * input_mask_float\n    count_result = torch.sum(scaled_probability_per_cell, dim=1)\n    numeric_values_masked = torch.where(torch.isnan(numeric_values), torch.zeros_like(numeric_values), numeric_values)\n    sum_result = torch.sum(scaled_probability_per_cell * numeric_values_masked, dim=1)\n    avg_approximation = config.average_approximation_function\n    if avg_approximation == AverageApproximationFunction.RATIO:\n        average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION)\n    elif avg_approximation == AverageApproximationFunction.FIRST_ORDER:\n        ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1\n        average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell / ex, dim=1)\n    elif avg_approximation == AverageApproximationFunction.SECOND_ORDER:\n        ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1\n        pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell)\n        var = torch.sum(pointwise_var, dim=1, keepdim=True) - pointwise_var\n        multiplier = (var / torch.square(ex) + 1) / ex\n        average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell * multiplier, dim=1)\n    else:\n        raise ValueError(f'Invalid average_approximation_function: {config.average_approximation_function}')\n    if config.use_gumbel_for_aggregation:\n        gumbel_dist = torch.distributions.RelaxedOneHotCategorical(config.aggregation_temperature, logits=logits_aggregation[:, 1:])\n        aggregation_op_only_probs = gumbel_dist.sample()\n    else:\n        aggregation_op_only_probs = nn.functional.softmax(logits_aggregation[:, 1:] / config.aggregation_temperature, dim=-1)\n    all_results = torch.cat([torch.unsqueeze(sum_result, dim=1), torch.unsqueeze(average_result, dim=1), torch.unsqueeze(count_result, dim=1)], dim=1)\n    expected_result = torch.sum(all_results * aggregation_op_only_probs, dim=1)\n    return expected_result", "docstring": "Calculates the expected result given cell and aggregation probabilities.\n\nArgs:\ndist_per_cell (`torch.distributions.Bernoulli`):\nCell selection distribution for each cell.\nnumeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`):\nNumeric values of every token. Nan for tokens which are not numeric values.\nnumeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`):\nScale of the numeric values of every token.\ninput_mask_float (`torch.FloatTensor` of shape `(batch_size, seq_length)`):\nMask for the table, without question tokens and table headers.\nlogits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):\nLogits per aggregation operation.\nconfig ([`TapasConfig`]):\nModel configuration class with all the hyperparameters of the model\n\nReturns:\nexpected_result (`torch.FloatTensor` of shape `(batch_size,)`): The expected result per example.", "source": "github-repos"}
{"code": "def add_mutex_switch(parser, dest, arguments=set(), default=None, single_arg=False, required=False):\n    if (default is not None):\n        assert (default in arguments)\n    if isinstance(arguments, set):\n        arguments = {k: None for k in arguments}\n    if (not single_arg):\n        mg = parser.add_mutually_exclusive_group(required=required)\n        for (name, help_text) in arguments.items():\n            kwargs = {'action': 'store_const', 'dest': dest, 'const': name, 'help': help_text}\n            if (default == name):\n                kwargs['default'] = name\n            mg.add_argument('--{}'.format(name), **kwargs)\n        return mg\n    else:\n        kwargs = {'dest': dest, 'type': str, 'default': default, 'help': '\\n'.join(('{}: {}'.format(k, v) for (k, v) in arguments.items())), 'choices': list(arguments.keys())}\n        return parser.add_argument('--{}'.format(dest), **kwargs)", "docstring": "Adds mutually exclusive switch arguments.\n\nArgs:\narguments: a dictionary that maps switch name to helper text. Use\nsets to skip help texts.", "source": "codesearchnet"}
{"code": "def convert_op_hints_to_stubs(session=None, graph_def=None, write_callback=lambda graph_def, comments: None):\n    if session is not None and graph_def is not None:\n        raise ValueError('Provide only one of session and graph_def.')\n    if session is not None:\n        return _convert_op_hints_to_stubs_helper(session.graph_def, write_callback)\n    elif graph_def is not None:\n        return _convert_op_hints_to_stubs_helper(graph_def, write_callback)\n    else:\n        raise ValueError('Must specify session or graph_def as input.')", "docstring": "Converts a graphdef with LiteOp hints into stub operations.\n\nThis is used to prepare for toco conversion of complex intrinsic usages.\nNote: only one of session or graph_def should be used, not both.\n\nArgs:\nsession: A TensorFlow session that contains the graph to convert.\ngraph_def: A graph def that we should convert.\nwrite_callback: A function pointer that can be used to write intermediate\nsteps of graph transformation (optional).\n\nReturns:\nA new graphdef with all ops contained in OpHints being replaced by\na single op call with the right parameters.\nRaises:\nValueError: If both session and graph_def are provided.", "source": "github-repos"}
{"code": "def anti_commutator(A, B=None):\n    \n    if B:\n        return A * B + B * A\n    return SPre(A) + SPost(A)", "docstring": "If ``B != None``, return the anti-commutator :math:`\\{A,B\\}`, otherwise\nreturn the super-operator :math:`\\{A,\\cdot\\}`.  The super-operator\n:math:`\\{A,\\cdot\\}` maps any other operator ``B`` to the anti-commutator\n:math:`\\{A, B\\} = A B + B A`.\n\nArgs:\nA: The first operator to form all anti-commutators of.\nB: The second operator to form the anti-commutator of, or None.\n\nReturns:\nSuperOperator: The linear superoperator :math:`[A,\\cdot]`", "source": "juraj-google-style"}
{"code": "def update_pipeline_stage(self, stage):\n\t\t\n\t\t\n\t\tpayload = None\n\t\tif  type(stage) is not StreakStage:\n\t\t\treturn requests.codes.bad_request, None\n\n\t\tpayload = stage.to_dict(rw = True)\n\t\n\t\t\n\t\t\n\t\t\n\t\ttry:\n\t\t\turi = '/'.join([self.api_uri,\n\t\t\t\t\t\t\tself.pipelines_suffix,\n\t\t\t\t\t\t\tstage.attributes['pipelineKey'],\n\t\t\t\t\t\t\tself.stages_suffix,\n\t\t\t\t\t\t\tstage.attributes['key']\n\t\t\t\t\t\t\t])\n\t\texcept KeyError:\n\t\t\treturn requests.codes.bad_request, None\n\t\n\t\tcode, data = self._req('post', uri , json.dumps(payload))\n\t\t\n\t\treturn code, data", "docstring": "Updates a box with the provided attributes.\nArgs:\npipeline_key\treqiured identifier for the pipeline\nstage\t\t\tStreakStage object\nkwargs\t\t\t{name}\nreturn\t\t\t(status code, stage dict)", "source": "juraj-google-style"}
{"code": "def create_new(mapreduce_id=None, gettime=datetime.datetime.now):\n    if (not mapreduce_id):\n        mapreduce_id = MapreduceState.new_mapreduce_id()\n    state = MapreduceState(key_name=mapreduce_id, last_poll_time=gettime())\n    state.set_processed_counts([], [])\n    return state", "docstring": "Create a new MapreduceState.\n\nArgs:\nmapreduce_id: Mapreduce id as string.\ngettime: Used for testing.", "source": "codesearchnet"}
{"code": "def remove_son(self, son):\n        \n        self._sons = [x for x in self._sons if x.node_id != son.node_id]", "docstring": "Remove the son node. Do nothing if the node is not a son\n\nArgs:\nfathers: list of fathers to add", "source": "juraj-google-style"}
{"code": "def __init__(self, name, common_channel_mask=True, **kwargs):\n        \n        self.common_channel_mask = common_channel_mask\n        super(GenericCompositor, self).__init__(name, **kwargs)", "docstring": "Collect custom configuration values.\n\nArgs:\ncommon_channel_mask (bool): If True, mask all the channels with\na mask that combines all the invalid areas of the given data.", "source": "juraj-google-style"}
{"code": "def __init__(self, incoming_client=False):\n        \n        from neo.Network.NodeLeader import NodeLeader\n\n        self.leader = NodeLeader.Instance()\n        self.nodeid = self.leader.NodeId\n        self.remote_nodeid = random.randint(1294967200, 4294967200)\n        self.endpoint = ''\n        self.address = None\n        self.buffer_in = bytearray()\n        self.myblockrequests = set()\n        self.bytes_in = 0\n        self.bytes_out = 0\n\n        self.sync_mode = MODE_CATCHUP\n\n        self.host = None\n        self.port = None\n\n        self.incoming_client = incoming_client\n        self.handshake_complete = False\n        self.expect_verack_next = False\n        self.start_outstanding_data_request = {HEARTBEAT_BLOCKS: 0, HEARTBEAT_HEADERS: 0}\n\n        self.block_loop = None\n        self.block_loop_deferred = None\n\n        self.peer_loop = None\n        self.peer_loop_deferred = None\n\n        self.header_loop = None\n        self.header_loop_deferred = None\n\n        self.disconnect_deferred = None\n        self.disconnecting = False\n\n        logger.debug(f\"{self.prefix} new node created, not yet connected\")", "docstring": "Create an instance.\nThe NeoNode class is the equivalent of the C# RemoteNode.cs class. It represents a single Node connected to the client.\n\nArgs:\nincoming_client (bool): True if node is an incoming client and the handshake should be initiated.", "source": "juraj-google-style"}
{"code": "def ensure_s3_bucket(s3_client, bucket_name, bucket_region):\n    \n    try:\n        s3_client.head_bucket(Bucket=bucket_name)\n    except botocore.exceptions.ClientError as e:\n        if e.response['Error']['Message'] == \"Not Found\":\n            logger.debug(\"Creating bucket %s.\", bucket_name)\n            create_args = {\"Bucket\": bucket_name}\n            location_constraint = s3_bucket_location_constraint(\n                bucket_region\n            )\n            if location_constraint:\n                create_args[\"CreateBucketConfiguration\"] = {\n                    \"LocationConstraint\": location_constraint\n                }\n            s3_client.create_bucket(**create_args)\n        elif e.response['Error']['Message'] == \"Forbidden\":\n            logger.exception(\"Access denied for bucket %s.  Did \" +\n                             \"you remember to use a globally unique name?\",\n                             bucket_name)\n            raise\n        else:\n            logger.exception(\"Error creating bucket %s. Error %s\",\n                             bucket_name, e.response)\n            raise", "docstring": "Ensure an s3 bucket exists, if it does not then create it.\n\nArgs:\ns3_client (:class:`botocore.client.Client`): An s3 client used to\nverify and create the bucket.\nbucket_name (str): The bucket being checked/created.\nbucket_region (str, optional): The region to create the bucket in. If\nnot provided, will be determined by s3_client's region.", "source": "juraj-google-style"}
{"code": "def read_config(contents):\n    file_obj = io.StringIO(contents)\n    config = six.moves.configparser.ConfigParser()\n    config.readfp(file_obj)\n    return config", "docstring": "Reads pylintrc config into native ConfigParser object.\n\nArgs:\ncontents (str): The contents of the file containing the INI config.\n\nReturns:\nConfigParser.ConfigParser: The parsed configuration.", "source": "codesearchnet"}
{"code": "def _StopExtractionProcesses(self, abort=False):\n    \n    logger.debug('Stopping extraction processes.')\n    self._StopMonitoringProcesses()\n\n    \n    \n    \n\n    if abort:\n      \n      self._AbortTerminate()\n\n    logger.debug('Emptying task queue.')\n    self._task_queue.Empty()\n\n    \n    \n    for _ in self._processes_per_pid:\n      try:\n        self._task_queue.PushItem(plaso_queue.QueueAbort(), block=False)\n      except errors.QueueFull:\n        logger.warning('Task queue full, unable to push abort message.')\n\n    \n    self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)\n    self._task_queue.Close(abort=abort)\n\n    if not abort:\n      \n      self._AbortTerminate()\n      self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)\n      self._task_queue.Close(abort=True)\n\n    \n    self._AbortKill()", "docstring": "Stops the extraction processes.\n\nArgs:\nabort (bool): True to indicated the stop is issued on abort.", "source": "juraj-google-style"}
{"code": "def execute_add(args, root_dir=None):\n    command = ' '.join(args['command'])\n    instruction = {'command': command, 'path': os.getcwd()}\n    print_command_factory('add')(instruction, root_dir)", "docstring": "Add a new command to the daemon queue.\n\nArgs:\nargs['command'] (list(str)): The actual programm call. Something like ['ls', '-a'] or ['ls -al']\nroot_dir (string): The path to the root directory the daemon is running in.", "source": "codesearchnet"}
{"code": "def get_gains_losses(changes):\n    res = {'gains': [], 'losses': []}\n    for change in changes:\n        if (change > 0):\n            res['gains'].append(change)\n        else:\n            res['losses'].append((change * (- 1)))\n    logger.debug('Gains: {0}'.format(res['gains']))\n    logger.debug('Losses: {0}'.format(res['losses']))\n    return res", "docstring": "Categorizes changes into gains and losses\n\nArgs:\nchanges: List of floats of price changes between entries in JSON.\n\nReturns:\nDict of changes with keys 'gains' and 'losses'.\nAll values are positive.", "source": "codesearchnet"}
{"code": "def _compute_useful_frames(tb, num):\n    defining_frame_index = _find_index_of_defining_frame(tb)\n    innermost_excluded = min(defining_frame_index + 2 + 1, len(tb))\n    outermost_included = max(innermost_excluded - num, 0)\n    return tb[outermost_included:innermost_excluded]", "docstring": "Return a list of frames, which form a 'useful' stack.\n\nStarting from the defining frame to the outermost one, this method computes\nthe contiguous portion of the 'useful' stack trace and returns the selected\nframes.\n\nArgs:\ntb: A list of traceback frames (as from Operation.traceback).\nnum: total number of frames to return.\n\nReturns:\nA list of frames.", "source": "github-repos"}
{"code": "def get_params(img, output_size):\n        \n        w, h = img.size\n        th, tw = output_size\n        if w == tw and h == th:\n            return 0, 0, h, w\n\n        i = random.randint(0, h - th)\n        j = random.randint(0, w - tw)\n        return i, j, th, tw", "docstring": "Get parameters for ``crop`` for a random crop.\n\nArgs:\nimg (PIL Image): Image to be cropped.\noutput_size (tuple): Expected output size of the crop.\n\nReturns:\ntuple: params (i, j, h, w) to be passed to ``crop`` for random crop.", "source": "juraj-google-style"}
{"code": "def _perp_eigendecompose(matrix: np.ndarray, rtol: float=1e-05, atol: float=1e-08) -> Tuple[(np.array, List[np.ndarray])]:\n    (vals, cols) = np.linalg.eig(matrix)\n    vecs = [cols[(:, i)] for i in range(len(cols))]\n    for i in range(len(vecs)):\n        vecs[i] = np.reshape(vecs[i], (len(vecs[i]), vecs[i].ndim))\n    n = len(vecs)\n    groups = _group_similar(list(range(n)), (lambda k1, k2: np.allclose(vals[k1], vals[k2], rtol=rtol)))\n    for g in groups:\n        (q, _) = np.linalg.qr(np.hstack([vecs[i] for i in g]))\n        for i in range(len(g)):\n            vecs[g[i]] = q[(:, i)]\n    return (vals, vecs)", "docstring": "An eigendecomposition that ensures eigenvectors are perpendicular.\n\nnumpy.linalg.eig doesn't guarantee that eigenvectors from the same\neigenspace will be perpendicular. This method uses Gram-Schmidt to recover\na perpendicular set. It further checks that all eigenvectors are\nperpendicular and raises an ArithmeticError otherwise.\n\nArgs:\nmatrix: The matrix to decompose.\nrtol: Relative threshold for determining whether eigenvalues are from\nthe same eigenspace and whether eigenvectors are perpendicular.\natol: Absolute threshold for determining whether eigenvalues are from\nthe same eigenspace and whether eigenvectors are perpendicular.\n\nReturns:\nThe eigenvalues and column eigenvectors. The i'th eigenvalue is\nassociated with the i'th column eigenvector.\n\nRaises:\nArithmeticError: Failed to find perpendicular eigenvectors.", "source": "codesearchnet"}
{"code": "def _find_image_bounding_boxes(filenames, image_to_bboxes):\n    num_image_bbox = 0\n    bboxes = []\n    for f in filenames:\n        basename = os.path.basename(f)\n        if (basename in image_to_bboxes):\n            bboxes.append(image_to_bboxes[basename])\n            num_image_bbox += 1\n        else:\n            bboxes.append([])\n    print(('Found %d images with bboxes out of %d images' % (num_image_bbox, len(filenames))))\n    return bboxes", "docstring": "Find the bounding boxes for a given image file.\n\nArgs:\nfilenames: list of strings; each string is a path to an image file.\nimage_to_bboxes: dictionary mapping image file names to a list of\nbounding boxes. This list contains 0+ bounding boxes.\nReturns:\nList of bounding boxes for each image. Note that each entry in this\nlist might contain from 0+ entries corresponding to the number of bounding\nbox annotations for the image.", "source": "codesearchnet"}
{"code": "def generate_rpcs(self, address):\n        \n\n        rpc_list = []\n\n        for offset in range(2, len(self.data), 16):\n            rpc = (address, rpcs.SET_CONFIG_VARIABLE, self.var_id, offset - 2, self.data[offset:offset + 16])\n            rpc_list.append(rpc)\n\n        return rpc_list", "docstring": "Generate the RPCs needed to stream this config variable to a tile.\n\nArgs:\naddress (int): The address of the tile that we should stream to.\n\nReturns:\nlist of tuples: A list of argument tuples for each RPC.\n\nThese tuples can be passed to EmulatedDevice.rpc to actually make\nthe RPCs.", "source": "juraj-google-style"}
{"code": "def remove(self, keys, name=None):\n    return self.erase(keys, name)", "docstring": "Removes `keys` and its associated values from the table.\n\nIf a key is not present in the table, it is silently ignored.\n\nArgs:\nkeys: Keys to remove. Can be a tensor of any shape. Must match the table's\nkey type.\nname: A name for the operation (optional).\n\nReturns:\nThe created Operation.\n\nRaises:\nTypeError: when `keys` do not match the table data types.", "source": "github-repos"}
{"code": "def autocov(x):\n    \n\n    acorr = autocorr(x)\n    varx = np.var(x, ddof=1) * (len(x) - 1) / len(x)\n    acov = acorr * varx\n    return acov", "docstring": "Compute autocovariance estimates for every lag for the input array.\n\nArgs:\nx (array-like): An array containing MCMC samples.\n\nReturns:\nnp.ndarray: An array of the same size as the input array.", "source": "juraj-google-style"}
{"code": "def gt(left: Any, right: Any) -> bool:\n    return lt(right, left)", "docstring": "Returns True if a value is symbolically greater than the other value.\n\nRefer to :func:`pyglove.lt` for the definition of symbolic comparison.\n\nArgs:\nleft: The left-hand value to compare.\nright: The right-hand value to compare.\n\nReturns:\nTrue if the left value is symbolically greater than the right value.", "source": "github-repos"}
{"code": "def needs_to_run(G, target, in_mem_shas, from_store, settings):\n    \n    force = settings[\"force\"]\n    sprint = settings[\"sprint\"]\n\n    if(force):\n        sprint(\"Target rebuild is being forced so {} needs to run\".format(target),\n               level=\"verbose\")\n        return True\n    node_dict = get_the_node_dict(G, target)\n    if 'output' in node_dict:\n        for output in acts.get_all_outputs(node_dict):\n            if not os.path.isfile(output):\n                outstr = \"Output file '{}' is missing so it needs to run\"\n                sprint(outstr.format(output), level=\"verbose\")\n                return True\n    if 'dependencies' not in node_dict:\n        \n        sprint(\"Target {} has no dependencies and needs to run\".format(target),\n               level=\"verbose\")\n        return True\n    for dep in node_dict['dependencies']:\n        \n        \n        \n        \n        if ('files' in in_mem_shas and dep not in in_mem_shas['files'] or\n            'files' not in in_mem_shas):\n            outstr = \"Dep '{}' doesn't exist in memory so it needs to run\"\n            sprint(outstr.format(dep), level=\"verbose\")\n            return True\n        now_sha = in_mem_shas['files'][dep]['sha']\n        if ('files' in from_store and dep not in from_store['files'] or\n            'files' not in from_store):\n            outst = \"Dep '{}' doesn't exist in shastore so it needs to run\"\n            sprint(outst.format(dep), level=\"verbose\")\n            return True\n        old_sha = from_store['files'][dep]['sha']\n        if now_sha != old_sha:\n            outstr = \"There's a mismatch for dep {} so it needs to run\"\n            sprint(outstr.format(dep), level=\"verbose\")\n            return True\n    sprint(\"Target '{}' doesn't need to run\".format(target), level=\"verbose\")\n    return False", "docstring": "Determines if a target needs to run. This can happen in two ways:\n(a) If a dependency of the target has changed\n(b) If an output of the target is missing\n\nArgs:\nThe graph we are going to build\nThe name of the target\nThe dictionary of the current shas held in memory\nThe dictionary of the shas from the shastore\nThe settings dictionary\n\nReturns:\nTrue if the target needs to be run\nFalse if not", "source": "juraj-google-style"}
{"code": "async def find_person(self, query):\n    url = self.url_builder('search/person', dict(), url_params=OrderedDict([('query', query), ('include_adult', False)]))\n    data = (await self.get_data(url))\n    if (data is None):\n        return\n    return [Person.from_json(item, self.config['data'].get('images')) for item in data.get('results', [])]", "docstring": "Retrieve person data by search query.\n\nArguments:\nquery (:py:class:`str`): Query to search for.\n\nReturns:\n:py:class:`list`: Possible matches.", "source": "codesearchnet"}
{"code": "def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n    known_args, pipeline_args = parse_known_args(argv)\n    pipeline_options = PipelineOptions(pipeline_args)\n    pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n    pipeline = test_pipeline\n    if not test_pipeline:\n        pipeline = beam.Pipeline(options=pipeline_options)\n    data = pipeline | read_csv(known_args.input)\n    features = ['longitude', 'latitude', 'median_income']\n    housing_features = to_pcollection(data[features])\n    model = housing_features | beam.Map(lambda record: list(record)) | 'Train clustering model' >> OnlineClustering(OnlineKMeans, n_clusters=6, batch_size=256, cluster_args={}, checkpoints_path=known_args.checkpoints_path)\n    _ = housing_features | beam.Map(lambda sample: np.array(sample)) | 'RunInference' >> AssignClusterLabelsInMemoryModel(model=pvalue.AsSingleton(model), model_id='kmeans', n_clusters=6, batch_size=512) | beam.Map(print)\n    result = pipeline.run()\n    result.wait_until_finish()\n    return result", "docstring": "Args:\nargv: Command line arguments defined for this example.\nsave_main_session: Used for internal testing.\ntest_pipeline: Used for internal testing.", "source": "github-repos"}
{"code": "def fswap(p, q):\n    \n\n    yield cirq.ISWAP(q, p), cirq.Z(p) ** 1.5\n    yield cirq.Z(q) ** 1.5", "docstring": "Decompose the Fermionic SWAP gate into two single-qubit gates and\none iSWAP gate.\n\nArgs:\np: the id of the first qubit\nq: the id of the second qubit", "source": "juraj-google-style"}
{"code": "def memoise(cls, func):\n\n    @functools.wraps(func)\n    def f(*a):\n        for arg in a:\n            if isinstance(arg, User):\n                user = arg\n                break\n        else:\n            raise ValueError('One position argument must be a User')\n        func_key = (func, tuple(a))\n        cache = cls.get_cache(user)\n        if (func_key not in cache):\n            cache[func_key] = func(*a)\n        return cache[func_key]\n    return f", "docstring": "Decorator that stores the result of the stored function in the\nuser's results cache until the batch completes. Keyword arguments are\nnot yet supported.\n\nArguments:\nfunc (callable(*a)): The function whose results we want\nto store. The positional arguments, ``a``, are used as cache\nkeys.\n\nReturns:\ncallable(*a): The memosing version of ``func``.", "source": "codesearchnet"}
{"code": "def _abort_workflow(pb: ProcessingBlock, workflow_stage_dict: dict, docker: DockerSwarmClient):\n    _abort_flag = False\n    if _abort_flag:\n        for workflow_stage in pb.workflow_stages:\n            for (service_id, _) in workflow_stage_dict[workflow_stage.id]['services'].items():\n                docker.delete_service(service_id)\n                LOG.info('Deleted Service Id %s', service_id)\n        return True\n    return False", "docstring": "Abort the workflow.\n\nTODO(BMo): This function currently does nothing as the abort flag\nis hardcoded to False!\n\nThis function is used by `execute_processing_block`.\n\nArgs:\npb (ProcessingBlock): Configuration database Processing block object.\nworkflow_stage_dict (dict): Workflow stage metadata dictionary.\ndocker (DockerClient): Docker Swarm Client object.\n\nReturns:\nbool, True if the stage is aborted, otherwise False.", "source": "codesearchnet"}
{"code": "def from_row_partitions(cls, row_partitions, dtype=None):\n    if not row_partitions:\n        raise ValueError('row_partitions cannot be empty')\n    inner_shape = [row_partitions[-1].nvals()]\n    return DynamicRaggedShape(row_partitions, inner_shape, dtype=dtype)", "docstring": "Create a shape from row_partitions.\n\nArgs:\nrow_partitions: a nonempty list of RowPartition objects.\ndtype: the dtype to use, or None to use the row_partitions dtype.\n\nReturns:\na DynamicRaggedShape with inner_rank==1.", "source": "github-repos"}
{"code": "def __init__(self, state_view):\n        \n        self._state_view = state_view\n\n        \n        \n        \n        \n        self.get_setting = lru_cache(maxsize=128)(self._get_setting)", "docstring": "Creates a SettingsView, given a StateView for merkle tree access.\n\nArgs:\nstate_view (:obj:`StateView`): a state view", "source": "juraj-google-style"}
{"code": "def _to_node(self, operand: BuilderOperand) -> _evaluation.ExpressionNode:\n    if isinstance(operand, Builder):\n        return operand.node\n    else:\n        as_message = None if operand is None else self._primitive_to_message(operand)\n        primitive_type = _fhir_path_data_types.primitive_type_from_type_code(type(operand).__name__)\n        return _evaluation.LiteralNode(self.node.context, as_message, self._primitive_to_fhir_path(operand), primitive_type)", "docstring": "Returns a node from a Builder or Comparable.\n\nArgs:\noperand: An input to the operator that is either a comparable or Builder.\n\nReturns:\nAn ExpressionNode.", "source": "github-repos"}
{"code": "def line(xo: int, yo: int, xd: int, yd: int, py_callback: Callable[([int, int], bool)]) -> bool:\n    for (x, y) in line_iter(xo, yo, xd, yd):\n        if (not py_callback(x, y)):\n            break\n    else:\n        return True\n    return False", "docstring": "Iterate over a line using a callback function.\n\nYour callback function will take x and y parameters and return True to\ncontinue iteration or False to stop iteration and return.\n\nThis function includes both the start and end points.\n\nArgs:\nxo (int): X starting point.\nyo (int): Y starting point.\nxd (int): X destination point.\nyd (int): Y destination point.\npy_callback (Callable[[int, int], bool]):\nA callback which takes x and y parameters and returns bool.\n\nReturns:\nbool: False if the callback cancels the line interation by\nreturning False or None, otherwise True.\n\n.. deprecated:: 2.0\nUse `line_iter` instead.", "source": "codesearchnet"}
{"code": "def start(self, workers=1, max_queue_size=10):\n    if self.use_multiprocessing:\n        self.executor_fn = self._get_executor_init(workers)\n    else:\n        self.executor_fn = lambda _: get_pool_class(False)(workers)\n    self.workers = workers\n    self.queue = queue.Queue(max_queue_size)\n    self.stop_signal = threading.Event()\n    self.run_thread = threading.Thread(target=self._run)\n    self.run_thread.daemon = True\n    self.run_thread.start()", "docstring": "Starts the handler's workers.\n\nArgs:\nworkers: Number of workers.\nmax_queue_size: queue size\n(when full, workers could block on `put()`)", "source": "github-repos"}
{"code": "def __init__(self, args=None, cmd=None):\n    \n    if args is None or cmd is None:\n      raise errors.FormatError('Missing args or cmd value.')\n\n    super(CommandSourceType, self).__init__()\n    self.args = args\n    self.cmd = cmd", "docstring": "Initializes a source type.\n\nArgs:\nargs (list[str]): arguments to the command to run.\ncmd (str): command to run.\n\nRaises:\nFormatError: when args or cmd is not set.", "source": "juraj-google-style"}
{"code": "def ReadVarString(self, max=sys.maxsize):\n        \n        length = self.ReadVarInt(max)\n        return self.unpack(str(length) + 's', length)", "docstring": "Similar to `ReadString` but expects a variable length indicator instead of the fixed 1 byte indicator.\n\nArgs:\nmax (int): (Optional) maximum number of bytes to read.\n\nReturns:\nbytes:", "source": "juraj-google-style"}
{"code": "def AddCommandLineArguments(cls, argument_group, category=None, names=None):\n    for (helper_name, helper_class) in sorted(cls._helper_classes.items()):\n        if ((category and (helper_class.CATEGORY != category)) or (names and (helper_name not in names))):\n            continue\n        helper_class.AddArguments(argument_group)", "docstring": "Adds command line arguments to a configuration object.\n\nArgs:\nargument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\nargparse group.\ncategory (Optional[str]): category of helpers to apply to\nthe group, such as storage, output, where None will apply the\narguments to all helpers. The category can be used to add arguments\nto a specific group of registered helpers.\nnames (Optional[list[str]]): names of argument helpers to apply,\nwhere None will apply the arguments to all helpers.", "source": "codesearchnet"}
{"code": "def weight_memory_size(weights):\n    unique_weights = {id(w): w for w in weights}.values()\n    total_memory_size = 0\n    for w in unique_weights:\n        total_memory_size += _compute_memory_size(w.shape, w.dtype)\n    return total_memory_size / 8", "docstring": "Compute the memory footprint for weights based on their dtypes.\n\nArgs:\nweights: An iterable contains the weights to compute weight size.\n\nReturns:\nThe total memory size (in Bytes) of the weights.", "source": "github-repos"}
{"code": "def most_specific_common_supertype(self, others: Sequence[trace.TraceType]) -> Optional['TypeSpec']:\n    if any((type(self) is not type(other) for other in others)):\n        return None\n    has_supertype = True\n\n    def make_supertype_attribute(attribute_self, *attribute_others):\n        nonlocal has_supertype\n        if not has_supertype:\n            return\n        if isinstance(attribute_self, trace.TraceType):\n            attribute_supertype = attribute_self.most_specific_common_supertype(attribute_others)\n            if attribute_supertype is None:\n                has_supertype = False\n                return\n            return attribute_supertype\n        else:\n            if not all((attribute_self == attribute_other for attribute_other in attribute_others)):\n                has_supertype = False\n                return\n            return attribute_self\n    try:\n        serialized_supertype = nest.map_structure(make_supertype_attribute, self._serialize(), *(o._serialize() for o in others))\n    except (ValueError, TypeError):\n        return None\n    return self._deserialize(serialized_supertype) if has_supertype else None", "docstring": "Returns the most specific supertype TypeSpec  of `self` and `others`.\n\nImplements the tf.types.experimental.func.TraceType interface.\n\nIf not overridden by a subclass, the default behavior is to assume the\nTypeSpec is covariant upon attributes that implement TraceType and\ninvariant upon rest of the attributes as well as the structure and type\nof the TypeSpec.\n\nArgs:\nothers: A sequence of TraceTypes.", "source": "github-repos"}
{"code": "def run(self, args):\n    kwargs = {}\n    kwargs['path'] = args.file[0]\n    kwargs['addr'] = args.addr\n    kwargs['on_progress'] = pylink.util.flash_progress_callback\n    jlink = self.create_jlink(args)\n    _ = jlink.flash_file(**kwargs)\n    print('Flashed device successfully.')", "docstring": "Flashes the device connected to the J-Link.\n\nArgs:\nself (FlashCommand): the ``FlashCommand`` instance\nargs (Namespace): the arguments passed on the command-line\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def _UpdateCampaignDSASetting(client, campaign_id, feed_id):\n  \n  \n  campaign_service = client.GetService('CampaignService', version='v201809')\n\n  selector = {\n      'fields': ['Id', 'Settings'],\n      'predicates': [{\n          'field': 'Id',\n          'operator': 'EQUALS',\n          'values': [campaign_id]\n      }]\n  }\n\n  response = campaign_service.get(selector)\n\n  if response['totalNumEntries']:\n    campaign = response['entries'][0]\n  else:\n    raise ValueError('No campaign with ID \"%d\" exists.' % campaign_id)\n\n  if not campaign['settings']:\n    raise ValueError('This is not a DSA campaign.')\n\n  dsa_setting = None\n\n  campaign_settings = campaign['settings']\n\n  for setting in campaign_settings:\n    if setting['Setting.Type'] == 'DynamicSearchAdsSetting':\n      dsa_setting = setting\n      break\n\n  if dsa_setting is None:\n    raise ValueError('This is not a DSA campaign.')\n\n  dsa_setting['pageFeed'] = {\n      'feedIds': [feed_id]\n  }\n\n  \n  \n  dsa_setting['useSuppliedUrlsOnly'] = True\n\n  operation = {\n      'operand': {\n          'id': campaign_id,\n          'settings': campaign_settings\n      },\n      'operator': 'SET'\n  }\n\n  campaign_service.mutate([operation])\n  print 'DSA page feed for campaign ID \"%d\" was updated with feed ID \"%d\".' % (\n      campaign_id, feed_id)", "docstring": "Updates the campaign DSA setting to DSA pagefeeds.\n\nArgs:\nclient: an AdWordsClient instance.\ncampaign_id: a str Campaign ID.\nfeed_id: a str page Feed ID.\n\nRaises:\nValueError: If the given campaign is found not to be a dynamic search ad\ncampaign.", "source": "juraj-google-style"}
{"code": "def describe_enum_value(enum_value):\n    \n    enum_value_descriptor = EnumValueDescriptor()\n    enum_value_descriptor.name = six.text_type(enum_value.name)\n    enum_value_descriptor.number = enum_value.number\n    return enum_value_descriptor", "docstring": "Build descriptor for Enum instance.\n\nArgs:\nenum_value: Enum value to provide descriptor for.\n\nReturns:\nInitialized EnumValueDescriptor instance describing the Enum instance.", "source": "juraj-google-style"}
{"code": "def configure_from_environment(self, whitelist_keys=False, whitelist=None):\n    self._configure_from_mapping(os.environ, whitelist_keys=whitelist_keys, whitelist=whitelist)\n    return self", "docstring": "Configure from the entire set of available environment variables.\n\nThis is really a shorthand for grabbing ``os.environ`` and passing to\n:meth:`_configure_from_mapping`.\n\nAs always, only uppercase keys are loaded.\n\nKeyword Args:\nwhitelist_keys (bool):\nShould we whitelist the keys by only pulling those that are\nalready present in the config? Useful for avoiding adding\nthings like ``LESSPIPE`` to your app config. If no whitelist is\nprovided, we use the current config keys as our whitelist.\nwhitelist (list[str]):\nAn explicit list of keys that should be allowed. If provided\nand ``whitelist_keys`` is true, we will use that as our\nwhitelist instead of pre-existing app config keys.\n\nReturns:\nfleaker.base.BaseApplication:\nReturns itself.", "source": "codesearchnet"}
{"code": "def clean_file(self):\n    data = self.cleaned_data['file']\n    available_parsers = self.get_parsers()\n    for parser in available_parsers:\n        try:\n            return parser.parse_file(data)\n        except parsers.ParserError:\n            pass\n    raise forms.ValidationError(('No parser could read the file. Tried with parsers %s.' % (', ' % (force_text(p) for p in available_parsers))))", "docstring": "Analyse the uploaded file, and return the parsed lines.\n\nReturns:\ntuple of tuples of cells content (as text).", "source": "codesearchnet"}
{"code": "def create_monitoring_info(urn, type_urn, payload, labels=None) -> metrics_pb2.MonitoringInfo:\n    try:\n        return metrics_pb2.MonitoringInfo(urn=urn, type=type_urn, labels=labels or {}, payload=payload)\n    except TypeError as e:\n        raise RuntimeError(f'Failed to create MonitoringInfo for urn {urn} type {type} labels ' + '{labels} and payload {payload}') from e", "docstring": "Return the gauge monitoring info for the URN, type, metric and labels.\n\nArgs:\nurn: The URN of the monitoring info/metric.\ntype_urn: The URN of the type of the monitoring info/metric.\ni.e. beam:metrics:sum_int_64, beam:metrics:latest_int_64.\npayload: The payload field to use in the monitoring info.\nlabels: The label dictionary to use in the MonitoringInfo.", "source": "github-repos"}
{"code": "def _parse_date(dataset_date, date_format):\n    if (date_format is None):\n        try:\n            return parser.parse(dataset_date)\n        except (ValueError, OverflowError) as e:\n            raisefrom(HDXError, 'Invalid dataset date!', e)\n    else:\n        try:\n            return datetime.strptime(dataset_date, date_format)\n        except ValueError as e:\n            raisefrom(HDXError, 'Invalid dataset date!', e)", "docstring": "Parse dataset date from string using specified format. If no format is supplied, the function will guess.\nFor unambiguous formats, this should be fine.\n\nArgs:\ndataset_date (str): Dataset date string\ndate_format (Optional[str]): Date format. If None is given, will attempt to guess. Defaults to None.\n\nReturns:\ndatetime.datetime", "source": "codesearchnet"}
{"code": "def parse(self, string, strict=True):\n        \n        if isinstance(string, bytes):\n            errors = 'strict' if strict else 'replace'\n            string = string.decode(self.encoding, errors=errors)\n\n        if not self.raw:\n            self.raw = string\n        else:\n            self.raw += string\n\n        lines = unfold_lines(string).splitlines()\n        for line in lines:\n            if line:\n                if ':' not in line:\n                    if strict:\n                        raise ValueError('Field missing colon.')\n                    else:\n                        continue\n\n                name, value = line.split(':', 1)\n                name = name.strip()\n                value = value.strip()\n                self.add(name, value)", "docstring": "Parse the string or bytes.\n\nArgs:\nstrict (bool): If True, errors will not be ignored\n\nRaises:\n:class:`ValueError` if the record is malformed.", "source": "juraj-google-style"}
{"code": "def send(self, msg):\n    slipDriver = sliplib.Driver()\n    slipData = slipDriver.send(msg)\n    res = self._serialPort.write(slipData)\n    return res", "docstring": "Encodes data to slip protocol and then sends over serial port\n\nUses the SlipLib module to convert the message data into SLIP format.\nThe message is then sent over the serial port opened with the instance\nof the Faraday class used when invoking send().\n\nArgs:\nmsg (bytes): Bytes format message to send over serial port.\n\nReturns:\nint: Number of bytes transmitted over the serial port.", "source": "codesearchnet"}
{"code": "def dump_migration_session_state(raw):\n    \n    class BlockStyle(str): pass\n    class SessionDumper(yaml.SafeDumper): pass\n    def str_block_formatter(dumper, data):\n        return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')\n    SessionDumper.add_representer(BlockStyle, str_block_formatter)\n\n    raw = deepcopy(raw)\n    for step in raw:\n        step['output'] = BlockStyle(step['output'])\n        step['traceback'] = BlockStyle(step['traceback'])\n    return yaml.dump(raw, Dumper=SessionDumper)", "docstring": "Serialize a migration session state to yaml using nicer formatting\n\nArgs:\nraw: object to serialize\nReturns: string (of yaml)\n\nSpecifically, this forces the \"output\" member of state step dicts (e.g.\nstate[0]['output']) to use block formatting. For example, rather than this:\n\n- migration: [app, migration_name]\noutput: \"line 1\\nline2\\nline3\"\n\nYou get this:\n\n- migration: [app, migration_name]\noutput: |\nline 1\nline 2\nline 3", "source": "juraj-google-style"}
{"code": "def split_list_by_n(l, n):\n    n = max(1, n)\n    return list((l[i:(i + n)] for i in range(0, len(l), n)))", "docstring": "Split a list into lists of size n.\n\nArgs:\nl: List of stuff.\nn: Size of new lists.\n\nReturns:\nlist: List of lists each of size n derived from l.", "source": "codesearchnet"}
{"code": "def logs_urlpatterns(admin_view=(lambda x: x)):\n    return [url('^$', admin_view(LogsMenu.as_view()), name='logs'), url('^status_codes$', admin_view(LogsStatusCodes.as_view()), name='logs_status_codes'), url('^status_codes_by_date$', admin_view(LogsStatusCodesByDate.as_view()), name='logs_status_codes_by_date'), url('^most_visited_pages$', admin_view(LogsMostVisitedPages.as_view()), name='logs_most_visited_pages')]", "docstring": "Return the URL patterns for the logs views.\n\nArgs:\nadmin_view (callable): admin_view method from an AdminSite instance.\n\nReturns:\nlist: the URL patterns for the logs views.", "source": "codesearchnet"}
{"code": "def patch(self, id, name=None, description=None, whitelisted_container_task_types=None, whitelisted_executable_task_types=None):\n    request_url = (self._client.base_api_url + self.detail_url.format(id=id))\n    data_to_patch = {}\n    if (name is not None):\n        data_to_patch['name'] = name\n    if (description is not None):\n        data_to_patch['description'] = description\n    if (whitelisted_container_task_types is not None):\n        data_to_patch['whitelisted_container_task_types'] = whitelisted_container_task_types\n    if (whitelisted_executable_task_types is not None):\n        data_to_patch['whitelisted_executable_task_types'] = whitelisted_executable_task_types\n    response = self._client.session.patch(request_url, data=data_to_patch)\n    self.validate_request_success(response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_200_OK)\n    return self.response_data_to_model_instance(response.json())", "docstring": "Partially updates a task whitelist on the saltant server.\n\nArgs:\nid (int): The ID of the task whitelist.\nname (str, optional): The name of the task whitelist.\ndescription (str, optional): A description of the task whitelist.\nwhitelisted_container_task_types (list, optional): A list of\nwhitelisted container task type IDs.\nwhitelisted_executable_task_types (list, optional): A list\nof whitelisted executable task type IDs.\n\nReturns:\n:class:`saltant.models.task_whitelist.TaskWhitelist`:\nA task whitelist model instance representing the task\nwhitelist just updated.", "source": "codesearchnet"}
{"code": "def _parse_metadata(self, message):\n        \n\n        metadata = Metadata(source=self.actor_urn).__dict__\n        if 'author' in message['d']:\n            metadata['source_user'] = message['d']['author']['username']\n        else:\n            metadata['source_user'] = None\n        if 'channel_id' in message['d']:\n            metadata['source_channel'] = message['d']['channel_id']\n        else:\n            metadata['source_channel'] = None\n        metadata['user_id'] = metadata['source_user']\n        metadata['display_name'] = metadata['source_user']\n\n        metadata['source_connector'] = 'discord'\n\n        return metadata", "docstring": "Sets metadata in Legobot message\n\nArgs:\nmessage (dict): Full message from Discord websocket connection\"\n\nReturns:\nLegobot.Metadata", "source": "juraj-google-style"}
{"code": "def __init__(self, log_path, ref_path, run_path, output_path):\n        \n        process_worker.ProcessWorkflow.__init__(\n            self, log_path, timeout_seconds=FLAGS.pdiff_timeout)\n        self.ref_path = ref_path\n        self.run_path = run_path\n        self.output_path = output_path", "docstring": "Initializer.\n\nArgs:\nlog_path: Where to write the verbose logging output.\nref_path: Path to reference screenshot to diff.\nrun_path: Path to the most recent run screenshot to diff.\noutput_path: Where the diff image should be written, if any.", "source": "juraj-google-style"}
{"code": "def has_value(self, name=None):\n    raise NotImplementedError('Optional.has_value()')", "docstring": "Returns a tensor that evaluates to `True` if this optional has a value.\n\n>>> optional = tf.experimental.Optional.from_value(42)\n>>> print(optional.has_value())\ntf.Tensor(True, shape=(), dtype=bool)\n\nArgs:\nname: (Optional.) A name for the created operation.\n\nReturns:\nA scalar `tf.Tensor` of type `tf.bool`.", "source": "github-repos"}
{"code": "def BuildParams(self, graph_fn, dtype, input_shapes, output_shapes):\n    input_mask = [[False] + [True] * (len(shape) - 1) for shape in input_shapes]\n    output_mask = [[False] + [True] * (len(shape) - 1) if shape else [] for shape in output_shapes]\n    return self.BuildParamsWithMask(graph_fn, dtype, input_shapes, output_shapes, input_mask, output_mask, [], [])", "docstring": "Build test parameters.\n\nThe input_shapes and output_shapes arguments are known (static) shapes that\ncan be used to generate test data. To define the model, we also specify\ncorresponding input/output TensorSpecs. These are defined using the shape\narguments. For each input tensor we define:\n\ninput_spec = [None] + input_shape[1:]\n\nand similarly for output shapes. This means that we leave the first (batch)\ndimension unknown, the rest is just copied from the shapes arg.\n\nArgs:\ngraph_fn: The function to build the graph.\ndtype: The element type.\ninput_shapes: The input shapes.\noutput_shapes: The output shapes.\n\nReturns:\nThe test parameters.", "source": "github-repos"}
{"code": "def nr_cases(self, institute_id=None):\n    query = {}\n    if institute_id:\n        query['collaborators'] = institute_id\n    LOG.debug('Fetch all cases with query {0}'.format(query))\n    nr_cases = self.case_collection.find(query).count()\n    return nr_cases", "docstring": "Return the number of cases\n\nThis function will change when we migrate to 3.7.1\n\nArgs:\ncollaborator(str): Institute id\n\nReturns:\nnr_cases(int)", "source": "codesearchnet"}
{"code": "def cn_occupation_energy(self, delta_occupation=None):\n    nn_occupations = self.site_specific_nn_occupation()\n    if delta_occupation:\n        for site in delta_occupation:\n            assert (site in nn_occupations)\n            nn_occupations[site] += delta_occupation[site]\n    return sum([self.cn_occupation_energies[s][n] for (s, n) in nn_occupations.items()])", "docstring": "The coordination-number dependent energy for this site.\n\nArgs:\ndelta_occupation (:obj:Dict(Str:Int), optional): A dictionary of a change in (site-type specific) coordination number, e.g. { 'A' : 1, 'B' : -1 }.\nIf this is not None, the coordination-number dependent energy is calculated including these changes in neighbour-site occupations. Defaults to None\n\nReturns:\n(Float): The coordination-number dependent energy for this site.", "source": "codesearchnet"}
{"code": "def str2dict(str_in):\n    dict_out = safe_eval(str_in)\n    if (not isinstance(dict_out, dict)):\n        dict_out = None\n    return dict_out", "docstring": "Extracts a dict from a string.\n\nArgs:\nstr_in (string) that contains python dict\nReturns:\n(dict) or None if no valid dict was found\nRaises:\n-", "source": "codesearchnet"}
{"code": "def from_pretrained(cls, pretrained_processor_name_or_path, speaker_embeddings_dict_path='speaker_embeddings_path.json', **kwargs):\n    if speaker_embeddings_dict_path is not None:\n        speaker_embeddings_path = cached_file(pretrained_processor_name_or_path, speaker_embeddings_dict_path, subfolder=kwargs.pop('subfolder', None), cache_dir=kwargs.pop('cache_dir', None), force_download=kwargs.pop('force_download', False), proxies=kwargs.pop('proxies', None), resume_download=kwargs.pop('resume_download', None), local_files_only=kwargs.pop('local_files_only', False), token=kwargs.pop('use_auth_token', None), revision=kwargs.pop('revision', None), _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False)\n        if speaker_embeddings_path is None:\n            logger.warning(f'`{os.path.join(pretrained_processor_name_or_path, speaker_embeddings_dict_path)}` does not exists\\n                    , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\\n                    dictionary if wanted, otherwise set `speaker_embeddings_dict_path=None`.')\n            speaker_embeddings = None\n        else:\n            with open(speaker_embeddings_path) as speaker_embeddings_json:\n                speaker_embeddings = json.load(speaker_embeddings_json)\n    else:\n        speaker_embeddings = None\n    tokenizer = AutoTokenizer.from_pretrained(pretrained_processor_name_or_path, **kwargs)\n    return cls(tokenizer=tokenizer, speaker_embeddings=speaker_embeddings)", "docstring": "Instantiate a Bark processor associated with a pretrained model.\n\nArgs:\npretrained_model_name_or_path (`str` or `os.PathLike`):\nThis can be either:\n\n- a string, the *model id* of a pretrained [`BarkProcessor`] hosted inside a model repo on\nhuggingface.co.\n- a path to a *directory* containing a processor saved using the [`~BarkProcessor.save_pretrained`]\nmethod, e.g., `./my_model_directory/`.\nspeaker_embeddings_dict_path (`str`, *optional*, defaults to `\"speaker_embeddings_path.json\"`):\nThe name of the `.json` file containing the speaker_embeddings dictionary located in\n`pretrained_model_name_or_path`. If `None`, no speaker_embeddings is loaded.\n**kwargs\nAdditional keyword arguments passed along to both\n[`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`].", "source": "github-repos"}
{"code": "class TFSharedEmbeddings(keras.layers.Layer):\n\n    def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float]=None, **kwargs):\n        super().__init__(**kwargs)\n        self.vocab_size = vocab_size\n        self.hidden_size = hidden_size\n        self.initializer_range = hidden_size ** (-0.5) if initializer_range is None else initializer_range\n        warnings.warn('`TFSharedEmbeddings` is scheduled for deletion in v4.32, use `keras.layers.Embedding` instead.', DeprecationWarning)\n\n    def build(self, input_shape):\n        \n        self.weight = self.add_weight('weight', shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range))\n        super().build(input_shape)\n\n    def get_config(self):\n        config = {'vocab_size': self.vocab_size, 'hidden_size': self.hidden_size, 'initializer_range': self.initializer_range}\n        base_config = super().get_config()\n        return dict(list(base_config.items()) + list(config.items()))\n\n    def call(self, inputs: tf.Tensor, mode: str='embedding') -> tf.Tensor:\n        \n        if mode == 'embedding':\n            return self._embedding(inputs)\n        elif mode == 'linear':\n            return self._linear(inputs)\n        else:\n            raise ValueError(f'mode {mode} is not valid.')\n\n    def _embedding(self, input_ids):\n        \n        return tf.gather(self.weight, input_ids)\n\n    def _linear(self, inputs):\n        \n        first_dims = shape_list(inputs)[:-1]\n        x = tf.reshape(inputs, [-1, self.hidden_size])\n        logits = tf.matmul(x, self.weight, transpose_b=True)\n        return tf.reshape(logits, first_dims + [self.vocab_size])", "docstring": "Construct shared token embeddings.\n\nThe weights of the embedding layer is usually shared with the weights of the linear decoder when doing language\nmodeling.\n\nArgs:\nvocab_size (`int`):\nThe size of the vocabulary, e.g., the number of unique tokens.\nhidden_size (`int`):\nThe size of the embedding vectors.\ninitializer_range (`float`, *optional*):\nThe standard deviation to use when initializing the weights. If no value is provided, it will default to\n\\\\(1/\\sqrt{hidden\\_size}\\\\).\nkwargs (`Dict[str, Any]`, *optional*):\nAdditional keyword arguments passed along to the `__init__` of `keras.layers.Layer`.", "source": "github-repos"}
{"code": "def total_seconds(td):\n  \n  secs = td.seconds + td.days * 24 * 3600\n  if td.microseconds:\n    secs += 1\n  return secs", "docstring": "convert a timedelta to seconds.\n\nThis is patterned after timedelta.total_seconds, which is only\navailable in python 27.\n\nArgs:\ntd: a timedelta object.\n\nReturns:\ntotal seconds within a timedelta. Rounded up to seconds.", "source": "juraj-google-style"}
{"code": "def _check_call_func(self, node):\n        \n        func = utils.safe_infer(node.func)\n        types = (\"str\", \"unicode\")\n        methods = (\"format\",)\n        if is_method_call(func, types, methods) and not is_complex_format_str(\n            func.bound\n        ):\n            self.add_message(\"logging-format-interpolation\", node=node)", "docstring": "Checks that function call is not format_string.format().\n\nArgs:\nnode (astroid.node_classes.Call):\nCall AST node to be checked.", "source": "juraj-google-style"}
{"code": "def log_variable_sizes(var_list=None, tag=None, verbose=False):\n  \n  if var_list is None:\n    var_list = tf.trainable_variables()\n  if tag is None:\n    tag = \"Trainable Variables\"\n\n  if not var_list:\n    return\n\n  name_to_var = {v.name: v for v in var_list}\n  total_size = 0\n  for v_name in sorted(list(name_to_var)):\n    v = name_to_var[v_name]\n    v_size = int(np.prod(np.array(v.shape.as_list())))\n    if verbose:\n      tf.logging.info(\"Weight    %s\\tshape    %s\\tsize    %d\",\n                      v.name[:-2].ljust(80),\n                      str(v.shape).ljust(20), v_size)\n    total_size += v_size\n  tf.logging.info(\"%s Total size: %d\", tag, total_size)", "docstring": "Log the sizes and shapes of variables, and the total size.\n\nArgs:\nvar_list: a list of variables; defaults to trainable_variables\ntag: a string; defaults to \"Trainable Variables\"\nverbose: bool, if True, log every weight; otherwise, log total size only.", "source": "juraj-google-style"}
{"code": "def local_file(self, filename):\n    LOG.info('Retrieving \"%s\" from \"%s\".', filename, self.runway_dir)\n    file_contents = ''\n    file_path = os.path.join(self.runway_dir, filename)\n    try:\n        with open(file_path, 'rt') as lookup_file:\n            file_contents = lookup_file.read()\n    except FileNotFoundError:\n        LOG.warning('File missing \"%s\".', file_path)\n        raise\n    LOG.debug('Local file contents:\\n%s', file_contents)\n    return file_contents", "docstring": "Read the local file in _self.runway_dir_.\n\nArgs:\nfilename (str): Name of file to retrieve relative to root of\n_runway_dir_.\n\nReturns:\nstr: Contents of local file.\n\nRaises:\nFileNotFoundError: Requested file missing.", "source": "codesearchnet"}
{"code": "def Process(self, parser_mediator, date_time, syslog_tokens, **kwargs):\n    body = syslog_tokens.get('body', None)\n    if (not body):\n        raise AttributeError('Missing required attribute: body')\n    for (key, grammar) in iter(self.MESSAGE_GRAMMARS):\n        try:\n            tokens = grammar.parseString(body)\n            syslog_tokens.update(tokens.asDict())\n            self.ParseMessage(parser_mediator, key, date_time, syslog_tokens)\n            return\n        except pyparsing.ParseException:\n            pass\n    raise errors.WrongPlugin('Unable to create event from: {0:s}'.format(body))", "docstring": "Processes the data structure produced by the parser.\n\nArgs:\nparser_mediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\ndate_time (dfdatetime.DateTimeValues): date and time values.\nsyslog_tokens (dict[str, str]): names of the fields extracted by the\nsyslog parser and the matching grammar, and values are the values of\nthose fields.\n\nRaises:\nAttributeError: If the syslog_tokens do not include a 'body' attribute.\nWrongPlugin: If the plugin is unable to parse the syslog tokens.", "source": "codesearchnet"}
{"code": "def combine(self, x):\n    depth = tf.shape(x)[(- 1)]\n    x *= tf.expand_dims(self._nonpadding, (- 1))\n    ret = tf.unsorted_segment_sum(x, self._flat_indices, num_segments=(self._batch * self._length))\n    ret = tf.reshape(ret, [self._batch, self._length, depth])\n    return ret", "docstring": "Return the output from the experts.\n\nWhen one example goes to multiple experts, the outputs are summed.\n\nArgs:\nx: a Tensor with shape [batch, num_experts, expert_capacity, depth]\n\nReturns:\na `Tensor` with shape `[batch, length, depth]", "source": "codesearchnet"}
{"code": "def scalar_mul(scalar, x, name=None):\n    base_dtype = dtypes.as_dtype(x.dtype).base_dtype\n    scalar = ops.convert_to_tensor(scalar, dtype=base_dtype, name='scalar')\n    shape = scalar.get_shape()\n    if shape.ndims == 0:\n        if isinstance(x, indexed_slices.IndexedSlices):\n            return indexed_slices.IndexedSlices(gen_math_ops.mul(scalar, x.values, name), x.indices, x.dense_shape)\n        else:\n            return gen_math_ops.mul(scalar, x, name)\n    else:\n        raise ValueError(f'The input scalar must be a 0-D value. Received shape {shape}.')", "docstring": "Multiplies a scalar times a `Tensor` or `IndexedSlices` object.\n\nThis is a special case of `tf.math.multiply`, where the first value must be a\n`scalar`. Unlike the general form of `tf.math.multiply`, this is operation is\nguaranteed to be efficient for `tf.IndexedSlices`.\n\n>>> x = tf.reshape(tf.range(30, dtype=tf.float32), [10, 3])\n>>> with tf.GradientTape() as g:\n...   g.watch(x)\n...   y = tf.gather(x, [1, 2])  # IndexedSlices\n...   z = tf.math.scalar_mul(10.0, y)\n\nArgs:\nscalar: A 0-D scalar `Tensor`. Must have known shape.\nx: A `Tensor` or `IndexedSlices` to be scaled.\nname: A name for the operation (optional).\n\nReturns:\n`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.\n\nRaises:\nValueError: if scalar is not a 0-D `scalar`.", "source": "github-repos"}
{"code": "def compute_dtype(self):\n    return self._dtype_policy.compute_dtype", "docstring": "The dtype of the layer's computations.\n\nThis is equivalent to `Layer.dtype_policy.compute_dtype`. Unless\nmixed precision is used, this is the same as `Layer.dtype`, the dtype of\nthe weights.\n\nLayers automatically cast their inputs to the compute dtype, which causes\ncomputations and the output to be in the compute dtype as well. This is done\nby the base Layer class in `Layer.__call__`, so you do not have to insert\nthese casts if implementing your own layer.\n\nLayers often perform certain internal computations in higher precision when\n`compute_dtype` is float16 or bfloat16 for numeric stability. The output\nwill still typically be float16 or bfloat16 in such cases.\n\nReturns:\nThe layer's compute dtype.", "source": "github-repos"}
{"code": "def update_video(self, video_id, title='', description='', keywords='', access_control=AccessControl.Unlisted):\n    if (not self.authenticated):\n        raise ApiError(_('Authentication is required'))\n    entry = self.fetch_video(video_id)\n    extension = self._access_control(access_control)\n    if extension:\n        entry.extension_elements = extension\n    if title:\n        entry.media.title.text = title\n    if description:\n        entry.media.description.text = description\n    success = Api.yt_service.UpdateVideoEntry(entry)\n    return success", "docstring": "Updates the video\n\nAuthentication is required\n\nParams:\nentry: video entry fetch via 'fetch_video()'\ntitle: string\ndescription: string\nkeywords: string\n\nReturns:\na video entry on success\nNone otherwise", "source": "codesearchnet"}
{"code": "def functions(start=None, end=None):\n    \n    start, end = fix_addresses(start, end)\n\n    for func_t in idautils.Functions(start, end):\n        yield Function(func_t)", "docstring": "Get all functions in range.\n\nArgs:\nstart: Start address of the range. Defaults to IDB start.\nend: End address of the range. Defaults to IDB end.\n\nReturns:\nThis is a generator that iterates over all the functions in the IDB.", "source": "juraj-google-style"}
{"code": "def calculate_oobatake_dS(seq, temp):\n    \n\n    seq = ssbio.protein.sequence.utils.cast_to_str(seq)\n\n    dS = 0\n    temp += 273.15\n    T0 = 298.15\n    dCp_sum = _sum_of_dCp(seq)\n    for aa in seq:\n        S0 = oobatake_dictionary[aa]['dS']\n        dS += S0\n    return dS + dCp_sum * math.log(temp / T0)", "docstring": "Get dS using Oobatake method in units cal/mol.\n\nArgs:\nseq (str, Seq, SeqRecord): Amino acid sequence\ntemp (float): Temperature in degrees C\n\nReturns:\nfloat: dS in units cal/mol", "source": "juraj-google-style"}
{"code": "def list_autoscale_settings(access_token, subscription_id):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/microsoft.insights/', '/autoscaleSettings?api-version=', INSIGHTS_API])\n    return do_get(endpoint, access_token)", "docstring": "List the autoscale settings in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body of autoscale settings.", "source": "codesearchnet"}
{"code": "def disconnect_sync(self, conn_id):\n    done = threading.Event()\n    result = {}\n\n    def disconnect_done(conn_id, adapter_id, status, reason):\n        result['success'] = status\n        result['failure_reason'] = reason\n        done.set()\n    self.disconnect_async(conn_id, disconnect_done)\n    done.wait()\n    return result", "docstring": "Synchronously disconnect from a connected device\n\nArgs:\nconn_id (int): A unique identifier that will refer to this connection\n\nReturns:\ndict: A dictionary with two elements\n'success': a bool with the result of the connection attempt\n'failure_reason': a string with the reason for the failure if we failed", "source": "codesearchnet"}
{"code": "class Mamba2Output(ModelOutput):\n    last_hidden_state: Optional[torch.FloatTensor] = None\n    cache_params: Optional[Mamba2Cache] = None\n    hidden_states: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "Class for the MAMBA2 model outputs.\n\nArgs:\nlast_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\nSequence of hidden-states at the output of the last layer of the model.\ncache_params (`Mamba2Cache`):\nThe state of the model at the last time step. Can be used in a forward method with the next `input_ids` to\navoid providing the old `input_ids`.\n\nIncludes both the State space model state matrices after the selective scan, and the Convolutional states\nhidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\nTuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\none for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\nHidden-states of the model at the output of each layer plus the optional initial embedding outputs.", "source": "github-repos"}
{"code": "def beta_to_uni(text, strict=False):\n    param_key = (strict,)\n    try:\n        t = _BETA_CONVERSION_TRIES[param_key]\n    except KeyError:\n        t = _create_conversion_trie(*param_key)\n        _BETA_CONVERSION_TRIES[param_key] = t\n    transform = []\n    idx = 0\n    possible_word_boundary = False\n    while (idx < len(text)):\n        if (possible_word_boundary and _penultimate_sigma_word_final(transform)):\n            transform[(- 2)] = _FINAL_LC_SIGMA\n        step = t.longest_prefix(text[idx:(idx + _MAX_BETA_TOKEN_LEN)])\n        if step:\n            possible_word_boundary = (text[idx] in _BETA_PUNCTUATION)\n            (key, value) = step\n            transform.append(value)\n            idx += len(key)\n        else:\n            possible_word_boundary = True\n            transform.append(text[idx])\n            idx += 1\n    if (possible_word_boundary and _penultimate_sigma_word_final(transform)):\n        transform[(- 2)] = _FINAL_LC_SIGMA\n    elif ((len(transform) > 0) and (transform[(- 1)] == _MEDIAL_LC_SIGMA)):\n        transform[(- 1)] = _FINAL_LC_SIGMA\n    converted = ''.join(transform)\n    return converted", "docstring": "Converts the given text from betacode to unicode.\n\nArgs:\ntext: The beta code text to convert. All of this text must be betacode.\nstrict: Flag to allow for flexible diacritic order on input.\n\nReturns:\nThe converted text.", "source": "codesearchnet"}
{"code": "def handle_subscribe(self, request, path):\n    ret = []\n    if path:\n        name = path[0]\n        if (name not in self.children):\n            self.children[name] = NotifierNode(getattr(self.data, name, None), self)\n        ret += self.children[name].handle_subscribe(request, path[1:])\n    else:\n        serialized = serialize_object(self.data)\n        if request.delta:\n            self.delta_requests.append(request)\n            ret.append(request.delta_response([[[], serialized]]))\n        else:\n            self.update_requests.append(request)\n            ret.append(request.update_response(serialized))\n    return ret", "docstring": "Add to the list of request to notify, and notify the initial value of\nthe data held\n\nArgs:\nrequest (Subscribe): The subscribe request\npath (list): The relative path from ourself\n\nReturns:\nlist: [(callback, Response)] that need to be called", "source": "codesearchnet"}
{"code": "def evaluate_ising(linear, quad, state):\n    \n\n    \n    if _numpy and isinstance(state, np.ndarray):\n        return evaluate_ising(linear, quad, state.tolist())\n\n    \n    energy = 0.0\n    for index, value in uniform_iterator(linear):\n        energy += state[index] * value\n    for (index_a, index_b), value in six.iteritems(quad):\n        energy += value * state[index_a] * state[index_b]\n    return energy", "docstring": "Calculate the energy of a state given the Hamiltonian.\n\nArgs:\nlinear: Linear Hamiltonian terms.\nquad: Quadratic Hamiltonian terms.\nstate: Vector of spins describing the system state.\n\nReturns:\nEnergy of the state evaluated by the given energy function.", "source": "juraj-google-style"}
{"code": "def __init__(self, nonce_id=None, nonce_value=None):\n        \n        super(Nonce, self).__init__(tag=enums.Tags.NONCE)\n\n        self._nonce_id = None\n        self._nonce_value = None\n\n        self.nonce_id = nonce_id\n        self.nonce_value = nonce_value", "docstring": "Construct a Nonce struct.\n\nArgs:\nnonce_id (bytes): A binary string representing the ID of the nonce\nvalue. Optional, defaults to None. Required for encoding and\ndecoding.\nnonce_value (bytes): A binary string representing a random value.\nOptional, defaults to None. Required for encoding and decoding.", "source": "juraj-google-style"}
{"code": "def _empty_dict_pylist_from_row_partitions(row_partitions, nrows):\n    if not row_partitions:\n        return [{} for _ in range(nrows)]\n    else:\n        values = _empty_dict_pylist_from_row_partitions(row_partitions[1:], row_partitions[0].row_splits()[-1])\n        splits = row_partitions[0].row_splits()\n        return [values[splits[i]:splits[i + 1]] for i in range(len(splits) - 1)]", "docstring": "Returns a python list of empty dicts from the given row partitions.\n\nArgs:\nrow_partitions: The row-partitions describing the ragged shape of the\nresult.\nnrows: The number of rows in the outermost row-partition.  (Or if\n`len(row_partitions)==0`, then the number of empty dicts to return.)\n\nReturns:\nA nested python list whose leaves (if any) are empty python dicts.", "source": "github-repos"}
{"code": "def _cursor_pb(cursor_pair):\n    \n    if cursor_pair is not None:\n        data, before = cursor_pair\n        value_pbs = [_helpers.encode_value(value) for value in data]\n        return query_pb2.Cursor(values=value_pbs, before=before)", "docstring": "Convert a cursor pair to a protobuf.\n\nIf ``cursor_pair`` is :data:`None`, just returns :data:`None`.\n\nArgs:\ncursor_pair (Optional[Tuple[list, bool]]): Two-tuple of\n\n* a list of field values.\n* a ``before`` flag\n\nReturns:\nOptional[google.cloud.firestore_v1beta1.types.Cursor]: A\nprotobuf cursor corresponding to the values.", "source": "juraj-google-style"}
{"code": "def writeCmdMsg(self, msg):\n    ekm_log(((('(writeCmdMsg | ' + self.getContext()) + ') ') + msg))\n    self.m_command_msg = msg", "docstring": "Internal method to set the command result string.\n\nArgs:\nmsg (str): Message built during command.", "source": "codesearchnet"}
{"code": "def _jacobian_both(nodes, degree, dimension):\n    r\n    _, num_nodes = nodes.shape\n    result = np.empty((2 * dimension, num_nodes - degree - 1), order=\"F\")\n    result[:dimension, :] = jacobian_s(nodes, degree, dimension)\n    result[dimension:, :] = jacobian_t(nodes, degree, dimension)\n    return result", "docstring": "r\"\"\"Compute :math:`s` and :math:`t` partial of :math:`B`.\n\n.. note::\n\nThere is also a Fortran implementation of this function, which\nwill be used if it can be built.\n\nArgs:\nnodes (numpy.ndarray): Array of nodes in a surface.\ndegree (int): The degree of the surface.\ndimension (int): The dimension the surface lives in.\n\nReturns:\nnumpy.ndarray: Nodes of the Jacobian surfaces in\nB |eacute| zier form.", "source": "juraj-google-style"}
{"code": "def __get_state_by_id(cls, job_id):\n    state = model.MapreduceState.get_by_job_id(job_id)\n    if (state is None):\n        raise ValueError(('Job state for job %s is missing.' % job_id))\n    return state", "docstring": "Get job state by id.\n\nArgs:\njob_id: job id.\n\nReturns:\nmodel.MapreduceState for the job.\n\nRaises:\nValueError: if the job state is missing.", "source": "codesearchnet"}
{"code": "def build_synchronize_decorator():\n    lock = threading.Lock()\n\n    def lock_decorator(fn):\n\n        @functools.wraps(fn)\n        def lock_decorated(*args, **kwargs):\n            with lock:\n                return fn(*args, **kwargs)\n        return lock_decorated\n    return lock_decorator", "docstring": "Returns a decorator which prevents concurrent calls to functions.\n\nUsage:\nsynchronized = build_synchronize_decorator()\n\n@synchronized\ndef read_value():\n...\n\n@synchronized\ndef write_value(x):\n...\n\nReturns:\nmake_threadsafe (fct): The decorator which lock all functions to which it\nis applied under a same lock", "source": "codesearchnet"}
{"code": "def get_list(self, obj_class, data, subset):\n    url = obj_class.get_url(data)\n    if (obj_class.can_list and obj_class.can_get):\n        if ((subset and (len(subset) == 1) and (subset[0].upper() == 'BASIC')) and (obj_class is jssobjects.Computer)):\n            url += '/subset/basic'\n        result = self.jss.get(url)\n        if obj_class.container:\n            result = result.find(obj_class.container)\n        return self._build_jss_object_list(result, obj_class)\n    elif obj_class.can_get:\n        xmldata = self.jss.get(url)\n        return obj_class(self.jss, xmldata)\n    else:\n        raise JSSMethodNotAllowedError(obj_class.__class__.__name__)", "docstring": "Get a list of objects as JSSObjectList.\n\nArgs:\nobj_class: The JSSObject subclass type to search for.\ndata: None\nsubset: Some objects support a subset for listing; namely\nComputer, with subset=\"basic\".\n\nReturns:\nJSSObjectList", "source": "codesearchnet"}
{"code": "def jump( self ):\n        \n        potential_jumps = self.potential_jumps()\n        if not potential_jumps:\n            raise BlockedLatticeError('No moves are possible in this lattice')\n        all_transitions = transitions.Transitions( self.potential_jumps() )\n        random_jump = all_transitions.random()\n        delta_t = all_transitions.time_to_jump()\n        self.time += delta_t\n        self.update_site_occupation_times( delta_t )\n        self.update( random_jump )\n        return( all_transitions.time_to_jump() )", "docstring": "Select a jump at random from all potential jumps, then update the lattice state.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def take_bug_report(self, test_name=None, begin_time=None, timeout=300, destination=None):\n    prefix = DEFAULT_BUG_REPORT_NAME\n    if test_name:\n        prefix = '%s,%s' % (DEFAULT_BUG_REPORT_NAME, test_name)\n    if begin_time is None:\n        begin_time = mobly_logger.get_log_file_timestamp()\n    new_br = True\n    try:\n        stdout = self.adb.shell('bugreportz -v').decode('utf-8')\n        if 'not found' in stdout:\n            new_br = False\n    except adb.AdbError:\n        new_br = False\n    if destination is None:\n        destination = os.path.join(self.log_path, 'BugReports')\n    br_path = utils.abs_path(destination)\n    utils.create_dir(br_path)\n    filename = self.generate_filename(prefix, str(begin_time), 'txt')\n    if new_br:\n        filename = filename.replace('.txt', '.zip')\n    full_out_path = os.path.join(br_path, filename)\n    self.wait_for_boot_completion()\n    self.log.debug('Start taking bugreport.')\n    if new_br:\n        out = self.adb.shell('bugreportz', timeout=timeout).decode('utf-8')\n        if not out.startswith('OK'):\n            raise DeviceError(self, 'Failed to take bugreport: %s' % out)\n        br_out_path = out.split(':')[1].strip()\n        self.adb.pull([br_out_path, full_out_path])\n        self.adb.shell(['rm', br_out_path])\n    else:\n        self.adb.bugreport(' > \"%s\"' % full_out_path, shell=True, timeout=timeout)\n    self.log.debug('Bugreport taken at %s.', full_out_path)\n    return full_out_path", "docstring": "Takes a bug report on the device and stores it in a file.\n\nArgs:\ntest_name: Name of the test method that triggered this bug report.\nbegin_time: Timestamp of when the test started. If not set, then\nthis will default to the current time.\ntimeout: float, the number of seconds to wait for bugreport to\ncomplete, default is 5min.\ndestination: string, path to the directory where the bugreport\nshould be saved.\n\nReturns:\nA string that is the absolute path to the bug report on the host.", "source": "github-repos"}
{"code": "def space(self, newlines=1):\n        \n        space = Space()\n        for line in range(newlines):\n            space.add_line('\\n')\n        self._container.structure.insert(self._idx, space)\n        self._idx += 1\n        return self", "docstring": "Creates a vertical space of newlines\n\nArgs:\nnewlines (int): number of empty lines\n\nReturns:\nself for chaining", "source": "juraj-google-style"}
{"code": "def relative_batch_tokens_ids_to_midi(self, tokens: np.ndarray, beatstep: np.ndarray, beat_offset_idx: int=0, bars_per_batch: int=2, cutoff_time_idx: int=12):\n    beat_offset_idx = 0 if beat_offset_idx is None else beat_offset_idx\n    notes = self.relative_batch_tokens_ids_to_notes(tokens=tokens, beat_offset_idx=beat_offset_idx, bars_per_batch=bars_per_batch, cutoff_time_idx=cutoff_time_idx)\n    midi = self.notes_to_midi(notes, beatstep, offset_sec=beatstep[beat_offset_idx])\n    return midi", "docstring": "Converts tokens to Midi. This method calls `relative_batch_tokens_ids_to_notes` method to convert batch tokens\nto notes then uses `notes_to_midi` method to convert them to Midi.\n\nArgs:\ntokens (`numpy.ndarray`):\nDenotes tokens which alongside beatstep will be converted to Midi.\nbeatstep (`np.ndarray`):\nWe get beatstep from feature extractor which is also used to get Midi.\nbeat_offset_idx (`int`, *optional*, defaults to 0):\nDenotes beat offset index for each note in generated Midi.\nbars_per_batch (`int`, *optional*, defaults to 2):\nA parameter to control the Midi output generation.\ncutoff_time_idx (`int`, *optional*, defaults to 12):\nDenotes the cutoff time index for each note in generated Midi.", "source": "github-repos"}
{"code": "def compress_mean(x, dim, compression_factor):\n  \n  dims = x.shape.dims\n  pos = dims.index(dim)\n  compressed_dim = mtf.Dimension(dim.name, dim.size \n  compression_factor_dim = mtf.Dimension(\n      \"compression_factor\", compression_factor)\n  new_shape = (\n      dims[:pos] + [compressed_dim, compression_factor_dim] + dims[pos + 1:])\n  x = mtf.reshape(x, new_shape)\n  x = mtf.reduce_mean(x, reduced_dim=compression_factor_dim)\n  return x", "docstring": "Compress by taking group means.\n\nArgs:\nx: a Tensor\ndim: a dimension in x.shape\ncompression_factor: an integer\n\nReturns:\na Tensor", "source": "juraj-google-style"}
{"code": "def deploy(app_id, version, promote, quiet):\n    gae_app = GaeApp.for_branch(git.current_branch().name)\n    if ((gae_app is None) and (None in (app_id, version))):\n        msg = \"Can't find an AppEngine app setup for branch <35>{}<32> and--project and --version were not given.\"\n        log.err(msg, git.current_branch().name)\n        sys.exit(1)\n    if (version is not None):\n        gae_app.version = version\n    if (app_id is not None):\n        gae_app.app_id = app_id\n    gae_app.deploy(promote, quiet)", "docstring": "Deploy the app to AppEngine.\n\nArgs:\napp_id (str):\nAppEngine App ID. Overrides config value app_id if given.\nversion (str):\nAppEngine project version. Overrides config values if given.\npromote (bool):\nIf set to **True** promote the current remote app version to the one\nthat's being deployed.\nquiet (bool):\nIf set to **True** this will pass the ``--quiet`` flag to gcloud\ncommand.", "source": "codesearchnet"}
{"code": "def _broadcast_dynamic_shape_extended_helper(a: DynamicRaggedShape, b: DynamicRaggedShape) -> Tuple[DynamicRaggedShape, _Broadcaster, _Broadcaster]:\n    assert a.rank <= b.rank\n    assert 2 <= b.rank\n    assert 1 <= a.rank\n    a_rps = a._as_row_partitions()\n    b_rps = b._as_row_partitions()\n    if len(a_rps) < len(b_rps):\n        a_nrows = a[0]\n        a_nrows_static = tensor_util.constant_value(a_nrows)\n        if a_nrows_static is not None:\n            a_nrows = a_nrows_static\n        neg_one_a_rp = RowPartition.from_uniform_row_length(uniform_row_length=a_nrows, nrows=1, nvals=a_nrows)\n        neg_one_b_rp = b_rps[-(len(a_rps) + 1)]\n        neg_one_ac, neg_one_bc = _broadcast_dynamic_shape_first_layer(constant_op.constant(1, dtype=b_rps[0].dtype), neg_one_b_rp.nrows())\n        c_zero, ac_zero, bc_zero = _broadcast_dynamic_shape_next_layer(neg_one_ac, neg_one_bc, neg_one_a_rp, neg_one_b_rp)\n        b_rps_tail = b_rps[-len(a_rps):] if len(a_rps) >= 1 else []\n        c_suffix, ac_layers, bc_layers = _broadcast_dynamic_shape_from_rps(ac_zero, bc_zero, a_rps, b_rps_tail)\n        return _broadcast_dynamic_shape_extended_complete(a=a, b=b, b_rps=b_rps, c_suffix=[c_zero] + c_suffix, ac=[ac_zero] + ac_layers, bc_suffix=[neg_one_bc, bc_zero] + bc_layers)\n    else:\n        assert len(a_rps) == len(b_rps)\n        ac_zero, bc_zero = _broadcast_dynamic_shape_first_layer(a_rps[0].nrows(), b_rps[0].nrows())\n        c_rps, a_layers, b_layers = _broadcast_dynamic_shape_from_rps(ac_zero, bc_zero, a_rps, b_rps)\n        return _broadcast_dynamic_shape_extended_complete(a=a, b=b, b_rps=b_rps, c_suffix=c_rps, ac=[ac_zero] + a_layers, bc_suffix=[bc_zero] + b_layers)", "docstring": "Helper for broadcast_dynamic_shape_extended.\n\nHere, we force:\na.rank <= b.rank\n2 <= b.rank\n1 <= a.rank\nArgs:\na: a DynamicRaggedShape\nb: a DynamicRaggedShape\n\nReturns:\nA triple of a shape and two broadcasters.", "source": "github-repos"}
{"code": "def read_chunks(self, chunk_size, start, step, count) -> bytes:\n        \n\n        return self.mglo.read_chunks(chunk_size, start, step, count)", "docstring": "Read the content.\n\nRead and concatenate the chunks of size chunk_size\nusing offsets calculated from start, step and stop.\n\nArgs:\nchunk_size (int): The chunk size.\nstart (int): First offset.\nstep (int): Offset increment.\ncount (int): The number of offsets.\n\nReturns:\nbytes", "source": "juraj-google-style"}
{"code": "def __init__(self, namespace: Optional[str], name: Optional[str], urn: Optional[str]=None, labels: Optional[Dict[str, str]]=None) -> None:\n    if not urn:\n        if not namespace:\n            raise ValueError('Metric namespace must be non-empty')\n        if not name:\n            raise ValueError('Metric name must be non-empty')\n    self.namespace = namespace\n    self.name = name\n    self.urn = urn\n    self.labels = labels if labels else {}", "docstring": "Initializes ``MetricName``.\n\nNote: namespace and name should be set for user metrics,\nurn and labels should be set for an arbitrary metric to package into a\nMonitoringInfo.\n\nArgs:\nnamespace: A string with the namespace of a metric.\nname: A string with the name of a metric.\nurn: URN to populate on a MonitoringInfo, when sending to RunnerHarness.\nlabels: Labels to populate on a MonitoringInfo", "source": "github-repos"}
{"code": "def update_current_state(self, value: str,\n                             force: bool = False) -> datetime:\n        \n        value = value.lower()\n        if not force:\n            current_state = self.current_state\n            \n            \n            if current_state == 'unknown':\n                allowed_transitions = self._allowed_states\n            else:\n                allowed_transitions = self._allowed_transitions[current_state]\n                allowed_transitions.append(current_state)\n\n            LOG.debug('Updating current state of %s to %s', self._id, value)\n\n            if value not in allowed_transitions:\n                raise ValueError(\"Invalid current state update: '{}'. '{}' \"\n                                 \"can be transitioned to states: {}\"\n                                 .format(value, current_state,\n                                         allowed_transitions))\n\n        return self._update_state('current', value)", "docstring": "Update the current state.\n\nArgs:\nvalue (str): New value for sdp state\nforce (bool): If true, ignore allowed transitions\n\nReturns:\ndatetime, update timestamp\n\nRaises:\nValueError: If the specified current state is not allowed.", "source": "juraj-google-style"}
{"code": "def compute_mask_offsets(shard_id2num_examples):\n  \n  total_num_examples = sum(shard_id2num_examples)\n\n  mask_offsets = []\n  total_num_examples = 0\n  for num_examples_in_shard in shard_id2num_examples:\n    \n    \n    mask_offsets.append(total_num_examples % 100)\n    total_num_examples += num_examples_in_shard\n\n  return mask_offsets", "docstring": "Return the list of offsets associated with each shards.\n\nArgs:\nshard_id2num_examples: `list[int]`, mapping shard_id=>num_examples\n\nReturns:\nmask_offsets: `list[int]`, offset to skip for each of the shard", "source": "juraj-google-style"}
{"code": "def files_info(self, *, id: str, **kwargs) -> SlackResponse:\n    kwargs.update({'id': id})\n    return self.api_call('files.info', http_verb='GET', params=kwargs)", "docstring": "Gets information about a team file.\n\nArgs:\nid (str): The file id. e.g. 'F1234467890'", "source": "codesearchnet"}
{"code": "def get(self, name):\n    return self.prepare_model(self.client.api.inspect_plugin(name))", "docstring": "Gets a plugin.\n\nArgs:\nname (str): The name of the plugin.\n\nReturns:\n(:py:class:`Plugin`): The plugin.\n\nRaises:\n:py:class:`docker.errors.NotFound` If the plugin does not\nexist.\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def load(self, label_lookup_path, uid_lookup_path):\n    \n    if not tf.gfile.Exists(uid_lookup_path):\n      tf.logging.fatal('File does not exist %s', uid_lookup_path)\n    if not tf.gfile.Exists(label_lookup_path):\n      tf.logging.fatal('File does not exist %s', label_lookup_path)\n\n    \n    proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()\n    uid_to_human = {}\n    p = re.compile(r'[n\\d]*[ \\S,]*')\n    for line in proto_as_ascii_lines:\n      parsed_items = p.findall(line)\n      uid = parsed_items[0]\n      human_string = parsed_items[2]\n      uid_to_human[uid] = human_string\n\n    \n    node_id_to_uid = {}\n    proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()\n    for line in proto_as_ascii:\n      if line.startswith('  target_class:'):\n        target_class = int(line.split(': ')[1])\n      if line.startswith('  target_class_string:'):\n        target_class_string = line.split(': ')[1]\n        node_id_to_uid[target_class] = target_class_string[1:-2]\n\n    \n    node_id_to_name = {}\n    for key, val in node_id_to_uid.items():\n      if val not in uid_to_human:\n        tf.logging.fatal('Failed to locate: %s', val)\n      name = uid_to_human[val]\n      node_id_to_name[key] = name\n\n    return node_id_to_name", "docstring": "Loads a human readable English name for each softmax node.\n\nArgs:\nlabel_lookup_path: string UID to integer node ID.\nuid_lookup_path: string UID to human-readable string.\n\nReturns:\ndict from integer node ID to human-readable string.", "source": "juraj-google-style"}
{"code": "def _ProcessUnknownEnums(message, encoded_message):\n    if (not encoded_message):\n        return message\n    decoded_message = json.loads(six.ensure_str(encoded_message))\n    for field in message.all_fields():\n        if (isinstance(field, messages.EnumField) and (field.name in decoded_message) and (message.get_assigned_value(field.name) is None)):\n            message.set_unrecognized_field(field.name, decoded_message[field.name], messages.Variant.ENUM)\n    return message", "docstring": "Add unknown enum values from encoded_message as unknown fields.\n\nProtoRPC diverges from the usual protocol buffer behavior here and\ndoesn't allow unknown fields. Throwing on unknown fields makes it\nimpossible to let servers add new enum values and stay compatible\nwith older clients, which isn't reasonable for us. We simply store\nunrecognized enum values as unknown fields, and all is well.\n\nArgs:\nmessage: Proto message we've decoded thus far.\nencoded_message: JSON string we're decoding.\n\nReturns:\nmessage, with any unknown enums stored as unrecognized fields.", "source": "codesearchnet"}
{"code": "def get_modules():\n    ret = list()\n    valid_extensions = ('.psd1', '.psm1', '.cdxml', '.xaml', '.dll')\n    root_paths = []\n    home_dir = os.environ.get('HOME', os.environ.get('HOMEPATH'))\n    system_dir = '{0}\\\\System32'.format(os.environ.get('WINDIR', 'C:\\\\Windows'))\n    program_files = os.environ.get('ProgramFiles', 'C:\\\\Program Files')\n    default_paths = ['{0}/.local/share/powershell/Modules'.format(home_dir), '/usr/local/share/powershell/Modules', '{0}\\\\WindowsPowerShell\\\\v1.0\\\\Modules\\\\'.format(system_dir), '{0}\\\\WindowsPowerShell\\\\Modules'.format(program_files)]\n    default_paths = ';'.join(default_paths)\n    ps_module_path = os.environ.get('PSModulePath', default_paths)\n    ps_module_path = ps_module_path.split(';')\n    for item in ps_module_path:\n        if os.path.exists(item):\n            root_paths.append(item)\n    if (not root_paths):\n        log.error('Default paths not found')\n        return ret\n    for root_path in root_paths:\n        if (not os.path.isdir(root_path)):\n            continue\n        for (root_dir, sub_dirs, file_names) in salt.utils.path.os_walk(root_path):\n            for file_name in file_names:\n                (base_name, file_extension) = os.path.splitext(file_name)\n                if (file_extension.lower() in valid_extensions):\n                    dir_name = os.path.basename(os.path.normpath(root_dir))\n                    if ((dir_name not in ret) and (base_name.lower() == dir_name.lower())):\n                        del sub_dirs[:]\n                        ret.append(dir_name)\n    return ret", "docstring": "Get a list of the PowerShell modules which are potentially available to be\nimported. The intent is to mimic the functionality of ``Get-Module\n-ListAvailable | Select-Object -Expand Name``, without the delay of loading\nPowerShell to do so.\n\nReturns:\nlist: A list of modules available to Powershell\n\nExample:\n\n.. code-block:: python\n\nimport salt.utils.powershell\nmodules = salt.utils.powershell.get_modules()", "source": "codesearchnet"}
{"code": "def _ReadRecordHeader(self, file_object, record_header_offset):\n    data_type_map = self._GetDataTypeMap('keychain_record_header')\n    (record_header, _) = self._ReadStructureFromFileObject(file_object, record_header_offset, data_type_map)\n    return record_header", "docstring": "Reads the record header.\n\nArgs:\nfile_object (file): file-like object.\nrecord_header_offset (int): offset of the record header relative to\nthe start of the file.\n\nReturns:\nkeychain_record_header: record header.\n\nRaises:\nParseError: if the record header cannot be read.", "source": "codesearchnet"}
{"code": "def _GetModuleCodeObjects(module):\n    visit_recorder = _VisitRecorder()\n    current = [module]\n    code_objects = set()\n    while current:\n        current = _FindCodeObjectsReferents(module, current, visit_recorder)\n        code_objects |= current\n        current = [code_object.co_consts for code_object in current]\n    return code_objects", "docstring": "Gets all code objects defined in the specified module.\n\nThere are two BFS traversals involved. One in this function and the other in\n_FindCodeObjectsReferents. Only the BFS in _FindCodeObjectsReferents has\na depth limit. This function does not. The motivation is that this function\nexplores code object of the module and they can have any arbitrary nesting\nlevel. _FindCodeObjectsReferents, on the other hand, traverses through class\ndefinitions and random references. It's much more expensive and will likely\ngo into unrelated objects.\n\nThere is also a limit on how many total objects are going to be traversed in\nall. This limit makes sure that if something goes wrong, the lookup doesn't\nhang.\n\nArgs:\nmodule: module to explore.\n\nReturns:\nSet of code objects defined in module.", "source": "codesearchnet"}
{"code": "def restore_collection(backup):\n    for (k, v) in six.iteritems(backup):\n        del tf.get_collection_ref(k)[:]\n        tf.get_collection_ref(k).extend(v)", "docstring": "Restore from a collection backup.\n\nArgs:\nbackup (dict):", "source": "codesearchnet"}
{"code": "def gather(params, indices, validate_indices=None, name=None, axis=None, batch_dims=0):\n    if name is None:\n        name = 'gather'\n    with ops.name_scope(name):\n        if axis is None:\n            axis = batch_dims\n        axis = array_ops.get_positive_axis(axis, params.shape.rank, ndims_name='params.shape.rank')\n        indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(indices, name='indices')\n\n        def leaf_op(p):\n            return array_ops.gather(p, indices, validate_indices=validate_indices, axis=axis, batch_dims=batch_dims, name=None)\n        return _extend_op_single(params, leaf_op)", "docstring": "tf.gather for structured tensors.\n\nDoes not support (yet) checks on illegal axis values, et cetera.\n\nIndices must be a ragged or dense tensor.\nArgs:\nparams: a structured tensor to be gathered\nindices: a ragged tensor or tensor to gather by.\nvalidate_indices: whether to validate the indices\nname: the name of the op(s).\naxis: the axis in params to gather on.\nbatch_dims: the number of batch dimensions.\n\nReturns:\nthe params reorganized according to indices.", "source": "github-repos"}
{"code": "def save_screenshot(driver, name):\n    if hasattr(driver, 'save_screenshot'):\n        screenshot_dir = os.environ.get('SCREENSHOT_DIR')\n        if (not screenshot_dir):\n            LOGGER.warning('The SCREENSHOT_DIR environment variable was not set; not saving a screenshot')\n            return\n        elif (not os.path.exists(screenshot_dir)):\n            os.makedirs(screenshot_dir)\n        image_name = os.path.join(screenshot_dir, (name + '.png'))\n        driver.save_screenshot(image_name)\n    else:\n        msg = u\"Browser does not support screenshots. Could not save screenshot '{name}'\".format(name=name)\n        LOGGER.warning(msg)", "docstring": "Save a screenshot of the browser.\n\nThe location of the screenshot can be configured\nby the environment variable `SCREENSHOT_DIR`.  If not set,\nthis defaults to the current working directory.\n\nArgs:\ndriver (selenium.webdriver): The Selenium-controlled browser.\nname (str): A name for the screenshot, which will be used in the output file name.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def load(self, *modules):\n    for module in modules:\n        if isinstance(module, six.string_types):\n            try:\n                module = get_object(module)\n            except Exception as e:\n                self.errors[module] = e\n                continue\n        self.modules[module.__package__] = module\n        for (loader, module_name, is_pkg) in pkgutil.walk_packages(module.__path__):\n            full_name = '{}.{}'.format(_package(module), module_name)\n            try:\n                self.modules[full_name] = get_object(full_name)\n                if is_pkg:\n                    self.load(self.modules[full_name])\n            except Exception as e:\n                self.errors[full_name] = e", "docstring": "Load one or more modules.\n\nArgs:\nmodules: Either a string full path to a module or an actual module\nobject.", "source": "codesearchnet"}
{"code": "def __init__(self, context_name = 'default'):\n\t\t\n\t\tif context_name in self.contexts:\n\t\t\traise Error(\"A context named '%s' already exists\" % (context_name,))\n\n\t\tself.name = context_name\n\t\tself.handlers = {}\n\n\t\tself.contexts[self.name] = self", "docstring": "Create a new Bubbler context\n\nParams:\ncontext_name (string):\nName of this context\n\nRaises:\nbubbler.Error:\nIf this context name already exists", "source": "juraj-google-style"}
{"code": "def changes(self, **kwargs):\n    path = ('%s/%s/changes' % (self.manager.path, self.get_id()))\n    return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "List the merge request changes.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabListError: If the list could not be retrieved\n\nReturns:\nRESTObjectList: List of changes", "source": "codesearchnet"}
{"code": "def parse(self, body):\n    if isinstance(body, six.string_types):\n        body = json.loads(body)\n    version = body['version']\n    self.version = version\n    session = body['session']\n    self.session.new = session['new']\n    self.session.session_id = session['sessionId']\n    application_id = session['application']['applicationId']\n    self.session.application.application_id = application_id\n    if (('attributes' in session) and session['attributes']):\n        self.session.attributes = session.get('attributes', {})\n    else:\n        self.session.attributes = {}\n    self.session.user.user_id = session['user']['userId']\n    self.session.user.access_token = session['user'].get('accessToken', 0)\n    request = body['request']\n    if (request['type'] == 'LaunchRequest'):\n        self.request = LaunchRequest()\n    elif (request['type'] == 'IntentRequest'):\n        self.request = IntentRequest()\n        self.request.intent = Intent()\n        intent = request['intent']\n        self.request.intent.name = intent['name']\n        if (('slots' in intent) and intent['slots']):\n            for (name, slot) in six.iteritems(intent['slots']):\n                self.request.intent.slots[name] = Slot()\n                self.request.intent.slots[name].name = slot['name']\n                self.request.intent.slots[name].value = slot.get('value')\n    elif (request['type'] == 'SessionEndedRequest'):\n        self.request = SessionEndedRequest()\n        self.request.reason = request['reason']\n    self.request.type = request['type']\n    self.request.request_id = request['requestId']\n    self.request.timestamp = request['timestamp']\n    return self", "docstring": "Parse JSON request, storing content in object attributes.\n\nArgs:\nbody: str. HTTP request body.\n\nReturns:\nself", "source": "codesearchnet"}
{"code": "def get_student_certificate(self, username, course_id):\n        \n        \n        resp = self.requester.get(\n            urljoin(\n                self.base_url,\n                '/api/certificates/v0/certificates/{username}/courses/{course_key}/'.format(\n                    username=username,\n                    course_key=course_id\n                )\n            )\n        )\n\n        resp.raise_for_status()\n\n        return Certificate(resp.json())", "docstring": "Returns an Certificate object with the user certificates\n\nArgs:\nusername (str): an edx user's username\ncourse_id (str): an edX course id.\n\nReturns:\nCertificate: object representing the student certificate for a course", "source": "juraj-google-style"}
{"code": "def read(self, n):\n    \n    if self._EOF:\n      return \"\"\n\n    while self._seg_index <= self._last_seg_index:\n      result = self._read_from_seg(n)\n      if result != \"\":\n        return result\n      else:\n        self._next_seg()\n\n    self._EOF = True\n    return \"\"", "docstring": "Read data from file segs.\n\nArgs:\nn: max bytes to read. Must be positive.\n\nReturns:\nsome bytes. May be smaller than n bytes. \"\" when no more data is left.", "source": "juraj-google-style"}
{"code": "def console_get_width(con: tcod.console.Console) -> int:\n    \n    return int(lib.TCOD_console_get_width(_console(con)))", "docstring": "Return the width of a console.\n\nArgs:\ncon (Console): Any Console instance.\n\nReturns:\nint: The width of a Console.\n\n.. deprecated:: 2.0\nUse `Console.width` instead.", "source": "juraj-google-style"}
{"code": "def lookup_prefix(self, prefix, timestamp=timestamp_now):\n    prefix = prefix.strip().upper()\n    if ((self._lookuptype == 'clublogxml') or (self._lookuptype == 'countryfile')):\n        return self._check_data_for_date(prefix, timestamp, self._prefixes, self._prefixes_index)\n    elif (self._lookuptype == 'redis'):\n        (data_dict, index) = self._get_dicts_from_redis('_prefix_', '_prefix_index_', self._redis_prefix, prefix)\n        return self._check_data_for_date(prefix, timestamp, data_dict, index)\n    raise KeyError", "docstring": "Returns lookup data of a Prefix\n\nArgs:\nprefix (string): Prefix of a Amateur Radio callsign\ntimestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)\n\nReturns:\ndict: Dictionary containing the country specific data of the Prefix\n\nRaises:\nKeyError: No matching Prefix found\nAPIKeyMissingError: API Key for Clublog missing or incorrect\n\nExample:\nThe following code shows how to obtain the information for the prefix \"DH\" from the countryfile.com\ndatabase (default database).\n\n>>> from pyhamtools import LookupLib\n>>> myLookupLib = LookupLib()\n>>> print myLookupLib.lookup_prefix(\"DH\")\n{\n'adif': 230,\n'country': u'Fed. Rep. of Germany',\n'longitude': 10.0,\n'cqz': 14,\n'ituz': 28,\n'latitude': 51.0,\n'continent': u'EU'\n}\n\nNote:\nThis method is available for\n\n- clublogxml\n- countryfile\n- redis", "source": "codesearchnet"}
{"code": "def _freeze_concrete_function(self):\n    if len(self._funcs) == 0:\n        raise ValueError('No ConcreteFunction is specified.')\n    if len(self._funcs) > 1:\n        raise ValueError('This converter can only convert a single ConcreteFunction. Converting multiple functions is under development.')\n    frozen_func, graph_def = _convert_to_constants.convert_variables_to_constants_v2_as_graph(self._funcs[0], lower_control_flow=False)\n    input_tensors = [tensor for tensor in frozen_func.inputs if tensor.dtype != _dtypes.resource]\n    output_tensors = frozen_func.outputs\n    return (graph_def, input_tensors, output_tensors, frozen_func)", "docstring": "Convert the given ConcreteFunction to frozen graph.\n\nReturns:\ngraph_def: The frozen GraphDef.\ninput_tensors: List of input tensors.\noutput_tensors: List of output tensors.\nfrozen_func: The frozen ConcreteFunction.\n\nRaises:\nValueError: none or multiple ConcreteFunctions provided.", "source": "github-repos"}
{"code": "def get_changelog(repo_path, from_commit=None):\n    \n    repo = dulwich.repo.Repo(repo_path)\n    tags = get_tags(repo)\n    refs = get_refs(repo)\n    changelog = []\n    maj_version = 0\n    feat_version = 0\n    fix_version = 0\n    start_including = False\n\n    cur_line = ''\n    if from_commit is None:\n        start_including = True\n\n    for commit_sha, children in reversed(\n        get_children_per_first_parent(repo_path).items()\n    ):\n        commit = repo.get_object(commit_sha)\n        maj_version, feat_version, fix_version = get_version(\n            commit=commit,\n            tags=tags,\n            maj_version=maj_version,\n            feat_version=feat_version,\n            fix_version=fix_version,\n        )\n        version = '%s.%s.%s' % (maj_version, feat_version, fix_version)\n\n        if (\n            start_including or commit_sha.startswith(from_commit)\n            or fuzzy_matches_refs(from_commit, refs.get(commit_sha, []))\n        ):\n            cur_line = pretty_commit(\n                commit,\n                version,\n            )\n            for child in children:\n                cur_line += pretty_commit(repo.get_object(child), version=None)\n            start_including = True\n            changelog.append(cur_line)\n\n    return '\\n'.join(reversed(changelog))", "docstring": "Given a repo path and an option commit/tag/refspec to start from, will\nget the rpm compatible changelog\n\nArgs:\nrepo_path (str): path to the git repo\nfrom_commit (str): refspec (partial commit hash, tag, branch, full\nrefspec, partial refspec) to start the changelog from\n\nReturns:\nstr: Rpm compatible changelog", "source": "juraj-google-style"}
{"code": "def get_by_ip_hostname(self, ip_hostname):\n    resources = self._client.get_all()\n    resources_filtered = [x for x in resources if (x['credentials']['ip_hostname'] == ip_hostname)]\n    if resources_filtered:\n        return resources_filtered[0]\n    else:\n        return None", "docstring": "Retrieve a storage system by its IP.\n\nWorks only with API version <= 300.\n\nArgs:\nip_hostname: Storage system IP or hostname.\n\nReturns:\ndict", "source": "codesearchnet"}
{"code": "def map_defun(fn, elems, output_dtypes, output_shapes, max_intra_op_parallelism=1):\n    if not isinstance(elems, list):\n        raise ValueError(f'`elems` must be a list of tensors, but was {elems}.')\n    if not isinstance(output_dtypes, list):\n        raise ValueError(f'`output_dtypes` must be a list of `tf.DType` objects, but was {output_dtypes}.')\n    if not isinstance(output_shapes, list):\n        raise ValueError(f'`output_shapes` must be a list of `tf.TensorShape` objects, but was {output_shapes}.')\n    concrete_fn = fn.get_concrete_function()\n    elems = [ops.convert_to_tensor(e) for e in elems]\n    output_shapes = [tensor_shape.TensorShape(s) for s in output_shapes]\n    return gen_dataset_ops.map_defun(elems, concrete_fn.captured_inputs, output_dtypes, output_shapes, concrete_fn, max_intra_op_parallelism)", "docstring": "Map a function on the list of tensors unpacked from `elems` on dimension 0.\n\nArgs:\nfn: A function (`function.defun`) that takes a list of tensors and returns\nanother list of tensors. The output list has the same types as\noutput_dtypes. The elements of the output list have the same dimension 0\nas `elems`, and the remaining dimensions correspond to those of\n`fn_output_shapes`.\nelems: A list of tensors.\noutput_dtypes: A list of dtypes corresponding to the output types of the\nfunction.\noutput_shapes: A list of `TensorShape`s corresponding to the output shapes\nfrom each invocation of the function on slices of inputs.\nmax_intra_op_parallelism: An integer. If positive, sets the max parallelism\nlimit of each function call to this.\n\nRaises:\nValueError: if any of the inputs are malformed.\n\nReturns:\nA list of `Tensor` objects with the same types as `output_dtypes`.", "source": "github-repos"}
{"code": "def element_wise_op(array, other, op, ty):\n    \n    weld_obj = WeldObject(encoder_, decoder_)\n\n    array_var = weld_obj.update(array)\n    if isinstance(array, WeldObject):\n        array_var = array.obj_id\n        weld_obj.dependencies[array_var] = array\n\n    other_var = weld_obj.update(other)\n    if isinstance(other, WeldObject):\n        other_var = other.obj_id\n        weld_obj.dependencies[other_var] = other\n\n    weld_template = \n\n    weld_obj.weld_code = weld_template % {\"array\": array_var,\n                                          \"other\": other_var,\n                                          \"ty\": ty, \"op\": op}\n    return weld_obj", "docstring": "Operation of series and other, element-wise (binary operator add)\n\nArgs:\narray (WeldObject / Numpy.ndarray): Input array\nother (WeldObject / Numpy.ndarray): Second Input array\nop (str): Op string used to compute element-wise operation (+ / *)\nty (WeldType): Type of each element in the input array\n\nReturns:\nA WeldObject representing this computation", "source": "juraj-google-style"}
{"code": "def get_variation_from_id(self, experiment_key, variation_id):\n    \n\n    variation_map = self.variation_id_map.get(experiment_key)\n\n    if variation_map:\n      variation = variation_map.get(variation_id)\n      if variation:\n        return variation\n      else:\n        self.logger.error('Variation ID \"%s\" is not in datafile.' % variation_id)\n        self.error_handler.handle_error(exceptions.InvalidVariationException(enums.Errors.INVALID_VARIATION_ERROR))\n        return None\n\n    self.logger.error('Experiment key \"%s\" is not in datafile.' % experiment_key)\n    self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR))\n    return None", "docstring": "Get variation given experiment and variation ID.\n\nArgs:\nexperiment: Key representing parent experiment of variation.\nvariation_id: ID representing the variation.\n\nReturns\nObject representing the variation.", "source": "juraj-google-style"}
{"code": "def __init__(self, max_str_len: int=100):\n    self.training_bar = None\n    self.prediction_bar = None\n    self.max_str_len = max_str_len", "docstring": "Initialize the callback with optional max_str_len parameter to control string truncation length.\n\nArgs:\nmax_str_len (`int`):\nMaximum length of strings to display in logs.\nLonger strings will be truncated with a message.", "source": "github-repos"}
{"code": "def pop_chunk(self, chunk_max_size):\n        \n        if self._total_length < chunk_max_size:\n            \n            res = self._tobytes()\n            self.clear()\n            return res\n        first_iteration = True\n        while True:\n            try:\n                data = self._deque.popleft()\n                data_length = len(data)\n                self._total_length -= data_length\n                if first_iteration:\n                    \n                    if data_length == chunk_max_size:\n                        \n                        return data\n                    elif data_length > chunk_max_size:\n                        \n                        \n                        view = self._get_pointer_or_memoryview(data,\n                                                               data_length)\n                        self.appendleft(view[chunk_max_size:])\n                        return view[:chunk_max_size]\n                    else:\n                        \n                        \n                        chunk_write_buffer = WriteBuffer()\n                else:\n                    \n                    if chunk_write_buffer._total_length + data_length \\\n                       > chunk_max_size:\n                        view = self._get_pointer_or_memoryview(data,\n                                                               data_length)\n                        limit = chunk_max_size - \\\n                            chunk_write_buffer._total_length - data_length\n                        self.appendleft(view[limit:])\n                        data = view[:limit]\n                chunk_write_buffer.append(data)\n                if chunk_write_buffer._total_length >= chunk_max_size:\n                    break\n            except IndexError:\n                \n                self._has_view = False\n                break\n            first_iteration = False\n        return chunk_write_buffer._tobytes()", "docstring": "Pops a chunk of the given max size.\n\nOptimized to avoid too much string copies.\n\nArgs:\nchunk_max_size (int): max size of the returned chunk.\n\nReturns:\nstring (bytes) with a size <= chunk_max_size.", "source": "juraj-google-style"}
{"code": "def listNodes(self, vendorSpecific=None):\n        \n        response = self.listNodesResponse(vendorSpecific)\n        return self._read_dataone_type_response(response, 'NodeList')", "docstring": "See Also: listNodesResponse()\n\nArgs:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def any_to_datetime(self, time_input, tz=None):\n    dt_value = self.unix_time_to_datetime(time_input, tz)\n    if (dt_value is None):\n        dt_value = self.date_to_datetime(time_input, tz)\n    if (dt_value is None):\n        dt_value = self.human_date_to_datetime(time_input, tz)\n    if (dt_value is None):\n        raise RuntimeError('Could not format input ({}) to datetime string.'.format(time_input))\n    return dt_value", "docstring": "Return datetime object from multiple formats.\n\nFormats:\n\n#. Human Input (e.g 30 days ago, last friday)\n#. ISO 8601 (e.g. 2017-11-08T16:52:42Z)\n#. Loose Date format (e.g. 2017 12 25)\n#. Unix Time/Posix Time/Epoch Time (e.g. 1510686617 or 1510686617.298753)\n\nArgs:\ntime_input (string): The time input string (see formats above).\ntz (string): The time zone for the returned data.\n\nReturns:\n(datetime.datetime): Python datetime.datetime object.", "source": "codesearchnet"}
{"code": "def register_for_auto_class(cls, auto_class='TFAutoModel'):\n    if not isinstance(auto_class, str):\n        auto_class = auto_class.__name__\n    import transformers.models.auto as auto_module\n    if not hasattr(auto_module, auto_class):\n        raise ValueError(f'{auto_class} is not a valid auto class.')\n    cls._auto_class = auto_class", "docstring": "Register this class with a given auto class. This should only be used for custom models as the ones in the\nlibrary are already mapped with an auto class.\n\n\n\nArgs:\nauto_class (`str` or `type`, *optional*, defaults to `\"TFAutoModel\"`):\nThe auto class to register this new model with.", "source": "github-repos"}
{"code": "def remove_redistribution(self, protocol):\n    protocols = ['bgp', 'rip', 'static', 'connected']\n    if (protocol not in protocols):\n        raise ValueError('redistributed protocol must bebgp, connected, rip or static')\n    cmd = 'no redistribute {}'.format(protocol)\n    return self.configure_ospf(cmd)", "docstring": "Removes a protocol redistribution to OSPF\n\nArgs:\nprotocol (str):  protocol to redistribute\nroute_map_name (str): route-map to be used to\nfilter the protocols\nReturns:\nbool: True if the command completes successfully\nException:\nValueError:  This will be raised if the protocol pass is not one\nof the following: [rip, bgp, static, connected]", "source": "codesearchnet"}
{"code": "def _get_degree(num_nodes):\n        \n        \n        \n        \n        d_float = 0.5 * (np.sqrt(8.0 * num_nodes + 1.0) - 3.0)\n        d_int = int(np.round(d_float))\n        if (d_int + 1) * (d_int + 2) == 2 * num_nodes:\n            return d_int\n\n        else:\n            raise ValueError(num_nodes, \"not a triangular number\")", "docstring": "Get the degree of the current surface.\n\nArgs:\nnum_nodes (int): The number of control points for a\nB |eacute| zier surface.\n\nReturns:\nint: The degree :math:`d` such that :math:`(d + 1)(d + 2)/2`\nequals ``num_nodes``.\n\nRaises:\nValueError: If ``num_nodes`` isn't a triangular number.", "source": "juraj-google-style"}
{"code": "class MimiDecoderOutput(ModelOutput):\n    audio_values: Optional[torch.FloatTensor] = None\n    decoder_past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None", "docstring": "Args:\naudio_values (`torch.FloatTensor`  of shape `(batch_size, segment_length)`, *optional*):\nDecoded audio values, obtained using the decoder part of Mimi.\ndecoder_past_key_values (`Cache`, *optional*):\nPre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer.\nThis typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\nThe model will output the same cache format that is fed as input.\n\nIf `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't\nhave their past key value states given to this model).", "source": "github-repos"}
{"code": "def kill_mprocess(process):\n    \n    if process and proc_alive(process):\n        process.terminate()\n        process.communicate()\n    return not proc_alive(process)", "docstring": "kill process\nArgs:\nprocess - Popen object for process", "source": "juraj-google-style"}
{"code": "def getargspec(obj):\n    if isinstance(obj, functools.partial):\n        return _get_argspec_for_partial(obj)\n    decorators, target = tf_decorator.unwrap(obj)\n    spec = next((d.decorator_argspec for d in decorators if d.decorator_argspec is not None), None)\n    if spec:\n        return spec\n    try:\n        return _getargspec(target)\n    except TypeError:\n        pass\n    if isinstance(target, type):\n        try:\n            return _getargspec(target.__init__)\n        except TypeError:\n            pass\n        try:\n            return _getargspec(target.__new__)\n        except TypeError:\n            pass\n    return _getargspec(type(target).__call__)", "docstring": "TFDecorator-aware replacement for `inspect.getargspec`.\n\nNote: `getfullargspec` is recommended as the python 2/3 compatible\nreplacement for this function.\n\nArgs:\nobj: A function, partial function, or callable object, possibly decorated.\n\nReturns:\nThe `ArgSpec` that describes the signature of the outermost decorator that\nchanges the callable's signature, or the `ArgSpec` that describes\nthe object if not decorated.\n\nRaises:\nValueError: When callable's signature can not be expressed with\nArgSpec.\nTypeError: For objects of unsupported types.", "source": "github-repos"}
{"code": "def wait_until_final(self, poll_interval=1, timeout=60):\n        \n        start_time = time.time()\n        elapsed = 0\n        while (self.status != \"complete\" and\n                (timeout <= 0 or elapsed < timeout)):\n            time.sleep(poll_interval)\n            self.refresh()\n            elapsed = time.time() - start_time", "docstring": "It will poll the URL to grab the latest status resource in a given\ntimeout and time interval.\n\nArgs:\npoll_interval (int): how often to poll the status service.\ntimeout (int): how long to poll the URL until giving up. Use <= 0\nto wait forever", "source": "juraj-google-style"}
{"code": "def save_graph_def(file_name, frozen_graph_def):\n    tf.io.write_graph(frozen_graph_def, os.path.dirname(file_name), os.path.basename(file_name), as_text=False)\n    tf.compat.v1.logging.info('Saved frozen graph to %s', file_name)", "docstring": "Writes a graph def file out to disk.\n\nArgs:\nfile_name: Where to save the file.\nfrozen_graph_def: GraphDef proto object to save.", "source": "github-repos"}
{"code": "def ws_db004(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `ws_db004`'.format(value))\n    self._ws_db004 = value", "docstring": "Corresponds to IDD Field `ws_db004`\nMean wind speed coincident with 0.4% dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `ws_db004`\nUnit: m/s\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def pretty_dump(fn):\n\n    @wraps(fn)\n    def pretty_dump_wrapper(*args, **kwargs):\n        response.content_type = 'application/json; charset=utf-8'\n        return json.dumps(fn(*args, **kwargs), indent=4, separators=(',', ': '))\n    return pretty_dump_wrapper", "docstring": "Decorator used to output prettified JSON.\n\n``response.content_type`` is set to ``application/json; charset=utf-8``.\n\nArgs:\nfn (fn pointer): Function returning any basic python data structure.\n\nReturns:\nstr: Data converted to prettified JSON.", "source": "codesearchnet"}
{"code": "def __init__(self, key_type=None, value_type=None, min_length=None, max_length=None, empty=True):\n        \n        super(DictTypeChecker, self).__init__(base_type=dict)\n        self.key_type = key_type\n        self.value_type = value_type\n        self.min_length = min_length\n        self.max_length = max_length\n        self.empty = empty", "docstring": "Initialization method.\n\nArgs:\nkey_type (type): the type of the dict keys.\nvalue_type (type): the type of the dict values.\nmin_length (int): minimum length of the dict (included).\nmax_length (int): maximum length of the dict (included).\nempty (bool): whether empty dict is allowed.", "source": "juraj-google-style"}
{"code": "def _get_argspec_for_partial(obj):\n    n_prune_args = len(obj.args)\n    partial_keywords = obj.keywords or {}\n    args, varargs, keywords, defaults = getargspec(obj.func)\n    args = args[n_prune_args:]\n    no_default = object()\n    all_defaults = [no_default] * len(args)\n    if defaults:\n        all_defaults[-len(defaults):] = defaults\n    for kw, default in iter(partial_keywords.items()):\n        if kw in args:\n            idx = args.index(kw)\n            all_defaults[idx] = default\n        elif not keywords:\n            raise ValueError(f'{obj} does not have a **kwargs parameter, but contains an unknown partial keyword {kw}.')\n    first_default = next((idx for idx, x in enumerate(all_defaults) if x is not no_default), None)\n    if first_default is None:\n        return ArgSpec(args, varargs, keywords, None)\n    invalid_default_values = [args[i] for i, j in enumerate(all_defaults) if j is no_default and i > first_default]\n    if invalid_default_values:\n        raise ValueError(f'{obj} has some keyword-only arguments, which are not supported: {invalid_default_values}.')\n    return ArgSpec(args, varargs, keywords, tuple(all_defaults[first_default:]))", "docstring": "Implements `getargspec` for `functools.partial` objects.\n\nArgs:\nobj: The `functools.partial` object\nReturns:\nAn `inspect.ArgSpec`\nRaises:\nValueError: When callable's signature can not be expressed with\nArgSpec.", "source": "github-repos"}
{"code": "def set_custom_getter_compose(custom_getter):\n    tf.get_variable_scope().set_custom_getter(_compose_custom_getters(tf.get_variable_scope().custom_getter, custom_getter))", "docstring": "Set a custom getter in the current variable scope.\n\nDo not overwrite the existing custom getter - rather compose with it.\n\nArgs:\ncustom_getter: a custom getter.", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context, encoding='utf-8'):\n    \n    super(ZipFileSystem, self).__init__(resolver_context)\n    self._file_object = None\n    self._zip_file = None\n    self.encoding = encoding", "docstring": "Initializes a file system.\n\nArgs:\nresolver_context (Context): a resolver context.\nencoding (Optional[str]): encoding of the file entry name.", "source": "juraj-google-style"}
{"code": "def _publish_to_subscribers(event: Event):\n    subscribers = get_subscribers(event.object_type)\n    for sub in subscribers:\n        DB.prepend_to_list(_keys.published(event.object_type, sub), event.id, pipeline=True)\n        event_dict = deepcopy(event.config)\n        event_dict.pop('id')\n        DB.set_hash_value(_keys.data(event.object_type, sub), event.id, str(event_dict), pipeline=True)\n    DB.publish(event.object_type, event.id, pipeline=True)", "docstring": "Publish and event to all subscribers.\n\n- Adds the event id to the published event list for all subscribers.\n- Adds the event data to the published event data for all subscribers.\n- Publishes the event id notification to all subscribers.\n\nArgs:\nevent (Event): Event object to publish.", "source": "codesearchnet"}
{"code": "def resolve_import(name, is_from, is_star):\n    if (name.startswith('.') or is_builtin(name)):\n        return None\n    ret = _resolve_import(name)\n    if ((ret is None) and is_from and (not is_star)):\n        (package, _) = name.rsplit('.', 1)\n        ret = _resolve_import(package)\n    return ret", "docstring": "Use python to resolve an import.\n\nArgs:\nname: The fully qualified module name.\n\nReturns:\nThe path to the module source file or None.", "source": "codesearchnet"}
{"code": "def send_message(msg: 'EFBMsg') -> Optional['EFBMsg']:\n    \n    global middlewares, master, slaves\n\n    if msg is None:\n        return\n\n    \n    for i in middlewares:\n        m = i.process_message(msg)\n        if m is None:\n            return None\n        \n        assert m is not None\n        msg = m\n\n    msg.verify()\n\n    if msg.deliver_to.channel_id == master.channel_id:\n        return master.send_message(msg)\n    elif msg.deliver_to.channel_id in slaves:\n        return slaves[msg.deliver_to.channel_id].send_message(msg)\n    else:\n        raise EFBChannelNotFound(msg)", "docstring": "Deliver a message to the destination channel.\n\nArgs:\nmsg (EFBMsg): The message\n\nReturns:\nThe message sent by the destination channel,\nincludes the updated message ID from there.\nReturns ``None`` if the message is not sent.", "source": "juraj-google-style"}
{"code": "def enable_sns_notification(self, region, trailName):\n    ct = self.session.client('cloudtrail', region_name=region)\n    ct.update_trail(Name=trailName, SnsTopicName=self.topic_name)\n    auditlog(event='cloudtrail.enable_sns_notification', actor=self.ns, data={'account': self.account.account_name, 'region': region})\n    self.log.info('Enabled SNS notifications for trail {} in {}/{}'.format(trailName, self.account.account_name, region))", "docstring": "Enable SNS notifications for a Trail\n\nArgs:\nregion (`str`): Name of the AWS region\ntrailName (`str`): Name of the CloudTrail Trail\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def grepPDF(self, path):\n    with open(path, 'rb') as pdf_file_obj:\n        match = set()\n        text = ''\n        pdf_reader = PyPDF2.PdfFileReader(pdf_file_obj)\n        pages = pdf_reader.numPages\n        for page in range(pages):\n            page_obj = pdf_reader.getPage(page)\n            text += ('\\n' + page_obj.extractText())\n        match.update(set((x.lower() for x in re.findall(self._keywords, text, re.IGNORECASE))))\n    return match", "docstring": "Parse PDF files text content for keywords.\n\nArgs:\npath: PDF file path.\n\nReturns:\nmatch: set of unique occurrences of every match.", "source": "codesearchnet"}
{"code": "def convert_builtin_to_typing(typ):\n    if getattr(typ, '__origin__', None) in _BUILTINS_TO_TYPING:\n        args = map(convert_builtin_to_typing, typ.__args__)\n        typ = _BUILTINS_TO_TYPING[typ.__origin__].copy_with(tuple(args))\n    return typ", "docstring": "Convert recursively a given builtin to a typing object.\n\nArgs:\ntyp (`builtins`): builtin object that exist in _BUILTINS_TO_TYPING.\n\nReturns:\ntype: The given builtins converted to a type.", "source": "github-repos"}
{"code": "def __init__(self, validator_map):\n        \n        self.validators = dict(validator_map)\n        v_sorted = sorted(self.validators.items(), key=lambda t: t[0])\n        self.validator_descriptions = ['{}:<{}>'.format(k, v) for k, v in v_sorted]\n        self.name = 'dict({})'.format(', '.join(self.validator_descriptions))\n        self.description = '\\nDict options: \\n  '\n        self.description += '\\n  '.join(self.validator_descriptions)\n        self.kv_regex = re.compile(r'[=:]+')", "docstring": "Create a dictonary type from a dictionary of other types\nArgs:\nvalidator_map -- a mapping from names to types\nExamples:\n>>> Dict({'a': int, 'b': int})('a:1,b:2')\n{'a': 1, 'b': 2}\n\n>>> Dict({'a': str, 'b': int})('a:asdf b=1234')\n{'a': 'asdf', 'b': 1234}\n\n>>> Dict({'a': Int() | Keyword('', None), 'b': Int()})('a,b=1')\n{'a': None, 'b': 1}", "source": "juraj-google-style"}
{"code": "def __call__(self, class_logits, box_regression):\n        \n\n        class_logits = cat(class_logits, dim=0)\n        box_regression = cat(box_regression, dim=0)\n        device = class_logits.device\n\n        if not hasattr(self, \"_proposals\"):\n            raise RuntimeError(\"subsample needs to be called before\")\n\n        proposals = self._proposals\n\n        labels = cat([proposal.get_field(\"labels\") for proposal in proposals], dim=0)\n        regression_targets = cat(\n            [proposal.get_field(\"regression_targets\") for proposal in proposals], dim=0\n        )\n\n        classification_loss = F.cross_entropy(class_logits, labels)\n\n        \n        \n        \n        sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1)\n        labels_pos = labels[sampled_pos_inds_subset]\n        if self.cls_agnostic_bbox_reg:\n            map_inds = torch.tensor([4, 5, 6, 7], device=device)\n        else:\n            map_inds = 4 * labels_pos[:, None] + torch.tensor(\n                [0, 1, 2, 3], device=device)\n\n        box_loss = smooth_l1_loss(\n            box_regression[sampled_pos_inds_subset[:, None], map_inds],\n            regression_targets[sampled_pos_inds_subset],\n            size_average=False,\n            beta=1,\n        )\n        box_loss = box_loss / labels.numel()\n\n        return classification_loss, box_loss", "docstring": "Computes the loss for Faster R-CNN.\nThis requires that the subsample method has been called beforehand.\n\nArguments:\nclass_logits (list[Tensor])\nbox_regression (list[Tensor])\n\nReturns:\nclassification_loss (Tensor)\nbox_loss (Tensor)", "source": "juraj-google-style"}
{"code": "async def request(context, url, timeout=60, method='get', good=(200,), retry=tuple(range(500, 512)), return_type='text', **kwargs):\n    session = context.session\n    loggable_url = get_loggable_url(url)\n    async with async_timeout.timeout(timeout):\n        log.debug('{} {}'.format(method.upper(), loggable_url))\n        async with session.request(method, url, **kwargs) as resp:\n            log.debug('Status {}'.format(resp.status))\n            message = 'Bad status {}'.format(resp.status)\n            if (resp.status in retry):\n                raise ScriptWorkerRetryException(message)\n            if (resp.status not in good):\n                raise ScriptWorkerException(message)\n            if (return_type == 'text'):\n                return (await resp.text())\n            elif (return_type == 'json'):\n                return (await resp.json())\n            else:\n                return resp", "docstring": "Async aiohttp request wrapper.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\nurl (str): the url to request\ntimeout (int, optional): timeout after this many seconds. Default is 60.\nmethod (str, optional): The request method to use.  Default is 'get'.\ngood (list, optional): the set of good status codes.  Default is (200, )\nretry (list, optional): the set of status codes that result in a retry.\nDefault is tuple(range(500, 512)).\nreturn_type (str, optional): The type of value to return.  Takes\n'json' or 'text'; other values will return the response object.\nDefault is text.\n**kwargs: the kwargs to send to the aiohttp request function.\n\nReturns:\nobject: the response text() if return_type is 'text'; the response\njson() if return_type is 'json'; the aiohttp request response\nobject otherwise.\n\nRaises:\nScriptWorkerRetryException: if the status code is in the retry list.\nScriptWorkerException: if the status code is not in the retry list or\ngood list.", "source": "codesearchnet"}
{"code": "def CanSplit(self, must_split):\n    current = self.next_token\n    previous = current.previous_token\n    if current.is_pseudo:\n        return False\n    if not must_split and subtypes.DICTIONARY_KEY_PART in current.subtypes and (subtypes.DICTIONARY_KEY not in current.subtypes) and (not style.Get('ALLOW_MULTILINE_DICTIONARY_KEYS')):\n        return False\n    if not must_split and subtypes.DICTIONARY_VALUE in current.subtypes and (not style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE')):\n        return False\n    if previous and previous.value == '(' and (current.value == ')'):\n        token = previous.previous_token\n        while token:\n            prev = token.previous_token\n            if not prev or prev.name not in {'NAME', 'DOT'}:\n                break\n            token = token.previous_token\n        if token and subtypes.DICTIONARY_VALUE in token.subtypes:\n            if not style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE'):\n                return False\n    if previous and previous.value == '.' and (current.value == '.'):\n        return False\n    return current.can_break_before", "docstring": "Determine if we can split before the next token.\n\nArguments:\nmust_split: (bool) A newline was required before this token.\n\nReturns:\nTrue if the line can be split before the next token.", "source": "github-repos"}
{"code": "def assignment_propagation(node):\n  \n  n_reads = read_counts(node)\n\n  to_remove = []\n  for succ in gast.walk(node):\n    \n    \n    if (isinstance(succ, gast.Assign) and isinstance(succ.value, gast.Name) and\n        len(succ.targets) == 1 and isinstance(succ.targets[0], gast.Name)):\n      rhs_name = succ.value.id\n      \n      rhs_defs = [def_[1] for def_ in anno.getanno(succ, 'definitions_in')\n                  if def_[0] == rhs_name]\n      \n      \n      \n      if (len(rhs_defs) == 1 and isinstance(rhs_defs[0], gast.Assign) and\n          n_reads[rhs_defs[0]] == 1 and\n          isinstance(rhs_defs[0].value, gast.Name) and\n          isinstance(rhs_defs[0].targets[0], gast.Name)):\n        \n        to_remove.append(rhs_defs[0])\n        \n        succ.value = rhs_defs[0].value\n\n  \n  transformers.Remove(to_remove).visit(node)\n  anno.clearanno(node)\n  return node", "docstring": "Perform assignment propagation.\n\nAssignment propagation is not a compiler optimization as much as a\nreadability optimization. If a variable name is used only once, it gets\nrenamed when possible e.g. `y = x; z = y` will become `z = x`.\n\nArgs:\nnode: The AST to optimize.\n\nReturns:\nThe optimized AST.", "source": "juraj-google-style"}
{"code": "def foreach_worker(self, fn):\n    results = ray.get([w.foreach_worker.remote(fn) for w in self.workers])\n    return results", "docstring": "Apply the given function to each remote worker.\n\nReturns:\nList of results from applying the function.", "source": "codesearchnet"}
{"code": "def complies_with_scope(queue_item, new_request, scope):\n        \n\n        if not URLHelper.is_parsable(queue_item.request.url):\n            return False\n\n        if not URLHelper.is_parsable(new_request.url):\n            return False\n\n        if scope.request_methods:\n            if not queue_item.request.method in scope.request_methods:\n                return False\n\n        if scope.protocol_must_match:\n            if URLHelper.get_protocol(queue_item.request.url) != URLHelper.get_protocol(new_request.url):\n                return False\n\n        if scope.subdomain_must_match:\n            current_subdomain = URLHelper.get_subdomain(queue_item.request.url)\n            new_subdomain = URLHelper.get_subdomain(new_request.url)\n\n            www_matches = False\n\n            if current_subdomain == \"www\" and new_subdomain == \"\":\n                www_matches = True\n\n            if new_subdomain == \"www\" and current_subdomain == \"\":\n                www_matches = True\n\n            if not www_matches and current_subdomain != new_subdomain:\n                return False\n\n        if scope.hostname_must_match:\n            if URLHelper.get_hostname(queue_item.request.url) != URLHelper.get_hostname(new_request.url):\n                return False\n\n        if scope.tld_must_match:\n            if URLHelper.get_tld(queue_item.request.url) != URLHelper.get_tld(new_request.url):\n                return False\n\n        return True", "docstring": "Check if the new request complies with the crawling scope.\n\nArgs:\nqueue_item (:class:`nyawc.QueueItem`): The parent queue item of the new request.\nnew_request (:class:`nyawc.http.Request`): The request to check.\nscope (:class:`nyawc.Options.OptionsScope`): The scope to check.\n\nReturns:\nbool: True if it complies, False otherwise.", "source": "juraj-google-style"}
{"code": "def get_tokens(self, *, payer_id, credit_card_token_id, start_date, end_date):\n    payload = {'language': self.client.language.value, 'command': PaymentCommand.GET_TOKENS.value, 'merchant': {'apiLogin': self.client.api_login, 'apiKey': self.client.api_key}, 'creditCardTokenInformation': {'payerId': payer_id, 'creditCardTokenId': credit_card_token_id, 'startDate': start_date.strftime('%Y-%m-%dT%H:%M:%S'), 'endDate': end_date.strftime('%Y-%m-%dT%H:%M:%S')}, 'test': self.client.is_test}\n    return self.client._post(self.url, json=payload)", "docstring": "With this functionality you can query previously the Credit Cards Token.\n\nArgs:\npayer_id:\ncredit_card_token_id:\nstart_date:\nend_date:\n\nReturns:", "source": "codesearchnet"}
{"code": "def initialize(self, input_shape, rng):\n    try:\n        if (not self._first_init):\n            return ()\n        self._first_init = False\n        self._params = self.new_parameters(input_shape, rng)\n        return self._params\n    except Exception:\n        (name, trace) = (self.__class__.__name__, _short_traceback())\n        raise LayerError(name, 'initialize', self._caller, input_shape, trace)", "docstring": "Initialize the layer given an input shape and rng.\n\nReturns new_parameters(input_shape, rng) on the first call and () on any\nsubsequent call, as the layer is already initialized. This is used for\nnetworks that share parameters, so the layer only produces them once.\n\nNote that all arguments and return values can be tuples or dictionaries\nor arbitraty nested structures composed of tuples and dictionaries.\n\nArgs:\ninput_shape: a tuple representing the shape of the input.\nrng: random number generator.\n\nReturns:\nNewly created parameters on the first call and () on all subsequent calls.", "source": "codesearchnet"}
{"code": "def open_file_with_default_program(file_path, background=False, return_cmd=False):\n    desktop_env = system.get_name()\n    if (desktop_env == 'windows'):\n        open_file_cmd = ('explorer.exe ' + (\"'%s'\" % file_path))\n    elif (desktop_env == 'mac'):\n        open_file_cmd = ('open ' + (\"'%s'\" % file_path))\n    else:\n        file_mime_type = system.get_cmd_out(['xdg-mime', 'query', 'filetype', file_path])\n        desktop_file = system.get_cmd_out(['xdg-mime', 'query', 'default', file_mime_type])\n        open_file_cmd = desktopfile.execute(desktopfile.locate(desktop_file)[0], files=[file_path], return_cmd=True)\n    if return_cmd:\n        return open_file_cmd\n    else:\n        def_program_proc = sp.Popen(open_file_cmd, shell=True)\n        if (not background):\n            def_program_proc.wait()", "docstring": "Opens a file with the default program for that type.\n\nOpen the file with the user's preferred application.\n\nArgs:\nfile_path  (str) : Path to the file to be opened.\nbackground (bool): Run the program in the background, instead of waiting for completion. Defaults to ``False``.\nreturn_cmd (bool): Returns the command to run the program (str) instead of running it. Defaults to ``False``.\n\nReturns:\nstr: Only if ``return_cmd``, the command to run the program is returned instead of running it. Else returns nothing.", "source": "codesearchnet"}
{"code": "def _parse_result_block_line(self, instrumentation_block, line):\n    instrumentation_block.add_value(line)\n    return instrumentation_block", "docstring": "Parses the instrumentation result block's line.\n\nArgs:\ninstrumentation_block: _InstrumentationBlock, the instrumentation\nresult block for the instrumentation run.\nline: string, the raw instrumentation output to add to the\ninstrumenation result block's _InstrumentationResultBlocki\nobject.\n\nReturns:\nThe instrumentation result block for the instrumentation run.", "source": "github-repos"}
{"code": "def find(self, binding_id, instance):\n        \n        binding = AtlasServiceBinding.Binding(binding_id, instance)\n        self.backend.storage.populate(binding)\n        return binding", "docstring": "find an instance\n\nCreate a new instance and populate it with data stored if it exists.\n\nArgs:\nbinding_id (string): UUID of the binding\ninstance (AtlasServiceInstance.Instance): instance\n\nReturns:\nAtlasServiceBinding: A binding", "source": "juraj-google-style"}
{"code": "def valid_ip_prefix(ip_prefix):\n    try:\n        ip_prefix = ipaddress.ip_network(ip_prefix)\n    except ValueError:\n        return False\n    else:\n        if ((ip_prefix.version == 4) and (ip_prefix.max_prefixlen != 32)):\n            return False\n        if ((ip_prefix.version == 6) and (ip_prefix.max_prefixlen != 128)):\n            return False\n        return True", "docstring": "Perform a sanity check on ip_prefix.\n\nArguments:\nip_prefix (str): The IP-Prefix to validate\n\nReturns:\nTrue if ip_prefix is a valid IPv4 address with prefix length 32 or a\nvalid IPv6 address with prefix length 128, otherwise False", "source": "codesearchnet"}
{"code": "def download_files_maybe_extract(urls, directory, check_files=[]):\n    \n    check_files = [os.path.join(directory, f) for f in check_files]\n    if _check_download(*check_files):\n        return\n\n    for url in urls:\n        download_file_maybe_extract(url=url, directory=directory)\n\n    if not _check_download(*check_files):\n        raise ValueError('[DOWNLOAD FAILED] `*check_files` not found')", "docstring": "Download the files at ``urls`` to ``directory``. Extract to ``directory`` if tar or zip.\n\nArgs:\nurls (str): Url of files.\ndirectory (str): Directory to download to.\ncheck_files (list of str): Check if these files exist, ensuring the download succeeded.\nIf these files exist before the download, the download is skipped.\n\nRaises:\nValueError: Error if one of the ``check_files`` are not found following the download.", "source": "juraj-google-style"}
{"code": "def help_members(obj, use_other=False):\n    r\n    import utool as ut\n    attrnames = dir(obj)\n    attr_list = [getattr(obj, attrname) for attrname in attrnames]\n    attr_types = ut.lmap(ut.type_str, map(type, attr_list))\n    unique_types, groupxs = ut.group_indices(attr_types)\n    type_to_items = ut.dzip(unique_types, ut.apply_grouping(attr_list, groupxs))\n    type_to_itemname = ut.dzip(unique_types, ut.apply_grouping(attrnames, groupxs))\n    \n    \n    memtypes = ['instancemethod']  \n    func_mems = ut.dict_subset(type_to_items, memtypes, [])\n\n    func_list = ut.flatten(func_mems.values())\n    defsig_list = []\n    num_unbound_args_list = []\n    num_args_list = []\n    for func in func_list:\n        \n        argspec = ut.get_func_argspec(func)\n        args = argspec.args\n        unbound_args = get_unbound_args(argspec)\n        defsig = ut.func_defsig(func)\n        defsig_list.append(defsig)\n        num_unbound_args_list.append(len(unbound_args))\n        num_args_list.append(len(args))\n\n    group = ut.hierarchical_group_items(defsig_list, [num_unbound_args_list, num_args_list])\n    print(repr(obj))\n    print(ut.repr3(group, strvals=True))\n\n    if use_other:\n        other_mems = ut.delete_keys(type_to_items.copy(), memtypes)\n        other_mems_attrnames = ut.dict_subset(type_to_itemname, other_mems.keys())\n        named_other_attrs = ut.dict_union_combine(other_mems_attrnames, other_mems, lambda x, y: list(zip(x, y)))\n        print(ut.repr4(named_other_attrs, nl=2, strvals=True))", "docstring": "r\"\"\"\nInspects members of a class\n\nArgs:\nobj (class or module):\n\nCommandLine:\npython -m utool.util_inspect help_members\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_inspect import *  # NOQA\n>>> import utool as ut\n>>> obj = ut.DynStruct\n>>> result = help_members(obj)\n>>> print(result)", "source": "juraj-google-style"}
{"code": "def nltk_stemmer(stemmer, token, i=None, tokens=None):\n    \n\n    def wrapped_stem(token, metadata=None):\n        return stemmer.stem(token)\n\n    return token.update(wrapped_stem)", "docstring": "Wrapper around a NLTK SnowballStemmer, which includes stop words for\neach language.\n\nArgs:\nstemmer (SnowballStemmer): Stemmer instance that performs the stemming.\ntoken (lunr.Token): The token to stem.\ni (int): The index of the token in a set.\ntokens (list): A list of tokens representing the set.", "source": "juraj-google-style"}
{"code": "def __init__(self, key_path_prefix):\n    \n    super(WindowsRegistryKeyPathPrefixFilter, self).__init__()\n    self._key_path_prefix = key_path_prefix", "docstring": "Initializes a Windows Registry key filter.\n\nArgs:\nkey_path_prefix (str): the key path prefix.", "source": "juraj-google-style"}
{"code": "def filter_spec(spec, paths, wildcard='*', separator='/'):\n    \n\n    def remove_key(path, spec):\n        if len(path) == 0:\n            return\n        elif len(path) == 1:\n            key = path.pop()\n            if not isinstance(spec, collections.Mapping):\n                raise LagoUserException(\n                    'You have tried to remove the following key - \"{key}\".\\n'\n                    'Keys can not be removed from type {spec_type}\\n'\n                    'Please verify that path - \"{{path}}\" is valid'.format(\n                        key=key, spec_type=type(spec)\n                    )\n                )\n            if key == wildcard:\n                spec.clear()\n            else:\n                spec.pop(key, None)\n        else:\n            current = path[0]\n            if current == wildcard:\n                if isinstance(spec, list):\n                    iterator = iter(spec)\n                elif isinstance(spec, collections.Mapping):\n                    iterator = spec.itervalues()\n                else:\n                    raise LagoUserException(\n                        'Glob char {char} should refer only to dict or list, '\n                        'not to {spec_type}\\n'\n                        'Please fix path - \"{{path}}\"'.format(\n                            char=wildcard, spec_type=type(spec)\n                        )\n                    )\n\n                for i in iterator:\n                    remove_key(path[1:], i)\n            else:\n                try:\n                    remove_key(path[1:], spec[current])\n                except KeyError:\n                    raise LagoUserException(\n                        'Malformed path \"{{path}}\", key \"{key}\" '\n                        'does not exist'.format(key=current)\n                    )\n                except TypeError:\n                    raise LagoUserException(\n                        'Malformed path \"{{path}}\", can not get '\n                        'by key from type {spec_type}'.format(\n                            spec_type=type(spec)\n                        )\n                    )\n\n    for path in paths:\n        try:\n            remove_key(path.split(separator), spec)\n        except LagoUserException as e:\n            e.message = e.message.format(path=path)\n            raise", "docstring": "Remove keys from a spec file.\nFor example, with the following path: domains/*/disks/*/metadata\nall the metadata dicts from all domains disks will be removed.\n\nArgs:\nspec (dict): spec to remove keys from\npaths (list): list of paths to the keys that should be removed\nwildcard (str): wildcard character\nseparator (str): path separator\n\nReturns:\nNone\n\nRaises:\nutils.LagoUserException: If a malformed path was detected", "source": "juraj-google-style"}
{"code": "def parse_datetime(value):\n    \n    if not value:\n        return None\n    elif isinstance(value, datetime.datetime):\n        return value\n    return dateutil.parser.parse(value)", "docstring": "Attempts to parse `value` into an instance of ``datetime.datetime``. If\n`value` is ``None``, this function will return ``None``.\n\nArgs:\nvalue: A timestamp. This can be a string or datetime.datetime value.", "source": "juraj-google-style"}
{"code": "def check_initial_web_request(self, item_session: ItemSession, request: HTTPRequest) -> Tuple[(bool, str)]:\n    (verdict, reason, test_info) = self.consult_filters(item_session.request.url_info, item_session.url_record)\n    if (verdict and self._robots_txt_checker):\n        can_fetch = (yield from self.consult_robots_txt(request))\n        if (not can_fetch):\n            verdict = False\n            reason = 'robotstxt'\n    (verdict, reason) = self.consult_hook(item_session, verdict, reason, test_info)\n    return (verdict, reason)", "docstring": "Check robots.txt, URL filters, and scripting hook.\n\nReturns:\ntuple: (bool, str)\n\nCoroutine.", "source": "codesearchnet"}
{"code": "def _rows_event_to_dict(e, stream):\n    \n    pk_cols = e.primary_key if isinstance(e.primary_key, (list, tuple)) \\\n        else (e.primary_key, )\n\n    if isinstance(e, row_event.UpdateRowsEvent):\n        sig = signals.rows_updated\n        action = 'update'\n        row_converter = _convert_update_row\n    elif isinstance(e, row_event.WriteRowsEvent):\n        sig = signals.rows_inserted\n        action = 'insert'\n        row_converter = _convert_write_row\n    elif isinstance(e, row_event.DeleteRowsEvent):\n        sig = signals.rows_deleted\n        action = 'delete'\n        row_converter = _convert_write_row\n    else:\n        assert False, 'Invalid binlog event'\n\n    meta = {\n        'time': e.timestamp,\n        'log_pos': stream.log_pos,\n        'log_file': stream.log_file,\n        'schema': e.schema,\n        'table': e.table,\n        'action': action,\n    }\n    rows = list(map(row_converter, e.rows))\n    for row in rows:\n        row['keys'] = {k: row['values'][k] for k in pk_cols}\n    return rows, meta", "docstring": "Convert RowsEvent to a dict\n\nArgs:\ne (pymysqlreplication.row_event.RowsEvent): the event\nstream (pymysqlreplication.BinLogStreamReader):\nthe stream that yields event\n\nReturns:\ndict: event's data as a dict", "source": "juraj-google-style"}
{"code": "def get_variable_layout(self, variable):\n    raise NotImplementedError()", "docstring": "Retrieve the `TensorLayout` for the variable.\n\nArgs:\nvariable: A `Variable` instance.\n\nreturn:\nThe `TensorLayout` for the variable, which can be used by\n`backend.distribute_value()` to redistribute a variable.", "source": "github-repos"}
{"code": "def __init__(self, components):\n    global _next_device_number, _next_device_number_lock\n    self.components = tuple((device_util.canonicalize(d) for d in components))\n    if not self.components:\n        raise ValueError('ParallelDevice requires at least one component.')\n    ctx = context.context()\n    with _next_device_number_lock:\n        self._name = '{}/device:CUSTOM:{}'.format(ctx.host_address_space(), _next_device_number)\n        _next_device_number += 1\n    device, device_info = _pywrap_parallel_device.GetParallelDeviceCapsules(self._name, self.components)\n    context.register_custom_device(device, self._name, device_info)\n    self._device_ids = None\n    self._device_scope = None\n    _all_parallel_devices[self._name] = self", "docstring": "Creates a device which executes operations in parallel on `components`.\n\nArgs:\ncomponents: A list of device names. Each operation executed on the\nreturned device executes on these component devices.\n\nReturns:\nA string with the name of the newly created device.", "source": "github-repos"}
{"code": "def put_image(self, name, val):\n    assert isinstance(val, np.ndarray)\n    arr = image_to_nhwc(val)\n    self._dispatch((lambda m: m.process_image(name, arr)))\n    s = create_image_summary(name, arr)\n    self._dispatch((lambda m: m.process_summary(s)))", "docstring": "Put an image.\n\nArgs:\nname (str):\nval (np.ndarray): 2D, 3D (HWC) or 4D (NHWC) numpy array of images\nin range [0,255]. If channel is 3, assumed to be RGB.", "source": "codesearchnet"}
{"code": "def copy_all_a(input_a, *other_inputs, **kwargs):\n    output = []\n    while (input_a.count() > 0):\n        output.append(input_a.pop())\n    for input_x in other_inputs:\n        input_x.skip_all()\n    return output", "docstring": "Copy all readings in input a into the output.\n\nAll other inputs are skipped so that after this function runs there are no\nreadings left in any of the input walkers when the function finishes, even\nif it generated no output readings.\n\nReturns:\nlist(IOTileReading)", "source": "codesearchnet"}
{"code": "def generate_zip_data(M, L, n_cells, cluster_probs=None):\n    \n    genes, clusters = M.shape\n    output = np.zeros((genes, n_cells))\n    if cluster_probs is None:\n        cluster_probs = np.ones(clusters)/clusters\n    zip_p = np.random.random((genes, n_cells))\n    labels = []\n    for i in range(n_cells):\n        c = np.random.choice(range(clusters), p=cluster_probs)\n        labels.append(c)\n        output[:,i] = np.where(zip_p[:,i] < L[:,c], 0, np.random.poisson(M[:,c]))\n    return output, np.array(labels)", "docstring": "Generates zero-inflated poisson-distributed data, given a set of means and zero probs for each cluster.\n\nArgs:\nM (array): genes x clusters matrix\nL (array): genes x clusters matrix - zero-inflation parameters\nn_cells (int): number of output cells\ncluster_probs (array): prior probability for each cluster.\nDefault: uniform.\n\nReturns:\noutput - array with shape genes x n_cells\nlabels - array of cluster labels", "source": "juraj-google-style"}
{"code": "def _create_and_save_state(cls, mapreduce_spec, _app):\n    \n    state = model.MapreduceState.create_new(mapreduce_spec.mapreduce_id)\n    state.mapreduce_spec = mapreduce_spec\n    state.active = True\n    state.active_shards = 0\n    if _app:\n      state.app_id = _app\n    config = util.create_datastore_write_config(mapreduce_spec)\n    state.put(config=config)\n    return state", "docstring": "Save mapreduce state to datastore.\n\nSave state to datastore so that UI can see it immediately.\n\nArgs:\nmapreduce_spec: model.MapreduceSpec,\n_app: app id if specified. None otherwise.\n\nReturns:\nThe saved Mapreduce state.", "source": "juraj-google-style"}
{"code": "def concat(self, name=None):\n    return self._implementation.concat(name=name)", "docstring": "Return the values in the TensorArray as a concatenated `Tensor`.\n\nAll of the values must have been written, their ranks must match, and\nand their shapes must all match for all dimensions except the first.\n\nArgs:\nname: A name for the operation (optional).\n\nReturns:\nAll the tensors in the TensorArray concatenated into one tensor.", "source": "github-repos"}
{"code": "def clone(self, name=None):\n    \n    if name is None:\n      name = self.module_name + \"_clone\"\n    return Linear(output_size=self.output_size,\n                  use_bias=self._use_bias,\n                  initializers=self._initializers,\n                  partitioners=self._partitioners,\n                  regularizers=self._regularizers,\n                  name=name)", "docstring": "Returns a cloned `Linear` module.\n\nArgs:\nname: Optional string assigning name of cloned module. The default name\nis constructed by appending \"_clone\" to `self.module_name`.\n\nReturns:\nCloned `Linear` module.", "source": "juraj-google-style"}
{"code": "def get_nmr_quadrupole_moment(self, isotope=None):\n        \n\n        quad_mom = self._el.nmr_quadrupole_moment\n\n        if len(quad_mom) == 0:\n            return 0.0\n\n        if isotope is None:\n            isotopes = list(quad_mom.keys())\n            isotopes.sort(key=lambda x: int(x.split(\"-\")[1]), reverse=False)\n            return quad_mom.get(isotopes[0], 0.0)\n        else:\n            if isotope not in quad_mom:\n                raise ValueError(\"No quadrupole moment for isotope {}\".format(\n                    isotope))\n            return quad_mom.get(isotope, 0.0)", "docstring": "Gets the nuclear electric quadrupole moment in units of\ne*millibarns\n\nArgs:\nisotope (str): the isotope to get the quadrupole moment for\ndefault is None, which gets the lowest mass isotope", "source": "juraj-google-style"}
{"code": "def create_grad(node, namer, tangent=False):\n    if (not isinstance(node, (gast.Subscript, gast.Name, gast.Str))):\n        raise TypeError\n    if anno.hasanno(node, 'temp_var'):\n        return create_grad(anno.getanno(node, 'temp_var'), namer, tangent)\n\n    def _name_grad(node):\n        if (not isinstance(node, gast.Name)):\n            raise TypeError\n        varname = node.id\n        name = namer.grad(varname, tangent)\n        grad_node = gast.Name(id=name, ctx=None, annotation=None)\n        anno.setanno(grad_node, 'adjoint_var', node)\n        return grad_node\n    if isinstance(node, gast.Subscript):\n        grad_node = create_grad(node.value, namer, tangent=tangent)\n        grad_node.ctx = gast.Load()\n        return gast.Subscript(value=grad_node, slice=node.slice, ctx=None)\n    elif isinstance(node, gast.Str):\n        grad_node = create_grad(gast.Name(id=node.s, ctx=None, annotation=None), namer, tangent=tangent)\n        return gast.Str(grad_node.id)\n    else:\n        return _name_grad(node)", "docstring": "Given a variable, create a variable for the gradient.\n\nArgs:\nnode: A node to create a gradient for, can be a normal variable (`x`) or a\nsubscript (`x[i]`).\nnamer: The namer object which will determine the name to use for the\ngradient.\ntangent: Whether a tangent (instead of adjoint) is created.\n\nReturns:\nnode: A node representing the gradient with the correct name e.g. the\ngradient of `x[i]` is `dx[i]`.\n\nNote that this returns an invalid node, with the `ctx` attribute\nmissing. It is assumed that this attribute is filled in later.\n\nNode has an `adjoint_var` annotation referring to the node it is an\nadjoint of.", "source": "codesearchnet"}
{"code": "def _create_mirrored_tpu_variables(**kwargs):\n    initial_value = None\n    value_list = []\n    for i, d in enumerate(devices):\n        with ops.device(d):\n            if i == 0:\n                initial_value = kwargs['initial_value']\n                with maybe_init_scope():\n                    initial_value = initial_value() if callable(initial_value) else initial_value\n            if i > 0:\n                var0name = value_list[0].name.split(':')[0]\n                kwargs['name'] = '%s/replica_%d/' % (var0name, i)\n            kwargs['initial_value'] = initial_value\n            with context.device_policy(context.DEVICE_PLACEMENT_SILENT):\n                v = next_creator(**kwargs)\n            assert not isinstance(v, tpu_values.TPUMirroredVariable)\n            value_list.append(v)\n    return value_list", "docstring": "Returns a list of `tf.Variable`s.\n\nThe list contains `number_replicas` `tf.Variable`s and can be used to\ninitialize a `TPUMirroredVariable`.\n\nArgs:\n**kwargs: the keyword arguments for creating a variable", "source": "github-repos"}
{"code": "def org(self, notification_type, priority='Low'):\n    self._notification_type = notification_type\n    self._recipients = None\n    self._priority = priority\n    self._is_organization = True", "docstring": "Set vars for the passed in data. Used for org notification.\n\n.. code-block:: javascript\n\n{\n\"notificationType\": notification_type,\n\"priority\": priority\n\"isOrganization\": true\n}\n\nArgs:\nnotification_type (str): The notification type.\npriority (str): The priority: Low, Medium, High.", "source": "codesearchnet"}
{"code": "def _build_kernel(self, kernel_source, compile_flags=()):\n        \n        return cl.Program(self._cl_context, kernel_source).build(' '.join(compile_flags))", "docstring": "Convenience function for building the kernel for this worker.\n\nArgs:\nkernel_source (str): the kernel source to use for building the kernel\n\nReturns:\ncl.Program: a compiled CL kernel", "source": "juraj-google-style"}
{"code": "def flush(cls, *args):\n        \n        return _remove_keys([], [(cls._make_key(args) if args else cls.PREFIX) + '*'])", "docstring": "Removes all keys of this namespace\nWithout args, clears all keys starting with cls.PREFIX\nif called with args, clears keys starting with given cls.PREFIX + args\n\nArgs:\n*args: Arbitrary number of arguments.\n\nReturns:\nList of removed keys.", "source": "juraj-google-style"}
{"code": "def num_nodes(self, leaves=True, internal=True):\n    if (not isinstance(leaves, bool)):\n        raise TypeError('leaves must be a bool')\n    if (not isinstance(internal, bool)):\n        raise TypeError('internal must be a bool')\n    num = 0\n    for node in self.traverse_preorder():\n        if ((leaves and node.is_leaf()) or (internal and (not node.is_leaf()))):\n            num += 1\n    return num", "docstring": "Compute the total number of selected nodes in this ``Tree``\n\nArgs:\n``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``\n\nReturns:\n``int``: The total number of selected nodes in this ``Tree``", "source": "codesearchnet"}
{"code": "def datporod(gnomoutfile):\n    results = subprocess.check_output(['datporod', gnomoutfile]).decode('utf-8').strip().split()\n    return (float(results[0]), float(results[1]), float(results[2]))", "docstring": "Run datporod and return the estimated Porod volume.\n\nReturns:\nRadius of gyration found in the input file\nI0 found in the input file\nVporod: the estimated Porod volume", "source": "codesearchnet"}
{"code": "def tf_output(c_op, index):\n    ret = c_api.TF_Output()\n    ret.oper = c_op\n    ret.index = index\n    return ret", "docstring": "Returns a wrapped TF_Output with specified operation and index.\n\nArgs:\nc_op: wrapped TF_Operation\nindex: integer\n\nReturns:\nWrapped TF_Output", "source": "github-repos"}
{"code": "def _dict_func(self, func, axis, *args, **kwargs):\n        \n        if \"axis\" not in kwargs:\n            kwargs[\"axis\"] = axis\n\n        if axis == 0:\n            index = self.columns\n        else:\n            index = self.index\n        func = {idx: func[key] for key in func for idx in index.get_indexer_for([key])}\n\n        def dict_apply_builder(df, func_dict={}):\n            \n            \n            return pandas.DataFrame(df.apply(func_dict, *args, **kwargs))\n\n        result_data = self.data.apply_func_to_select_indices_along_full_axis(\n            axis, dict_apply_builder, func, keep_remaining=False\n        )\n        full_result = self._post_process_apply(result_data, axis)\n        return full_result", "docstring": "Apply function to certain indices across given axis.\n\nArgs:\nfunc: The function to apply.\naxis: Target axis to apply the function along.\n\nReturns:\nA new PandasQueryCompiler.", "source": "juraj-google-style"}
{"code": "def _reset(self, indices):\n    self.assert_common_preconditions()\n    return np.stack([self._envs[index].reset() for index in indices])", "docstring": "Resets environments at indices shouldn't pre-process or record.\n\nSubclasses should override this to do the actual reset if something other\nthan the default implementation is desired.\n\nArgs:\nindices: list of indices of underlying envs to call reset on.\n\nReturns:\nnp.ndarray of stacked observations from the reset-ed envs.", "source": "codesearchnet"}
{"code": "def CaseGroups(unicode_dir=_UNICODE_DIR):\n  \n\n  \n  togroup = {}\n\n  def DoLine(codes, fields):\n    \n    (_, foldtype, lower, _) = fields\n    if foldtype not in (\"C\", \"S\"):\n      return\n    lower = _UInt(lower)\n    togroup.setdefault(lower, [lower]).extend(codes)\n\n  ReadUnicodeTable(unicode_dir+\"/CaseFolding.txt\", 4, DoLine)\n\n  groups = togroup.values()\n  for g in groups:\n    g.sort()\n  groups.sort()\n  return togroup, groups", "docstring": "Returns list of Unicode code groups equivalent under case folding.\n\nEach group is a sorted list of code points,\nand the list of groups is sorted by first code point\nin the group.\n\nArgs:\nunicode_dir: Unicode data directory\n\nReturns:\nlist of Unicode code groups", "source": "juraj-google-style"}
{"code": "def _get_response(self, endpoint, request_dict):\n    http_error = 'Could not connect to the API. This could be because you have no internet connection, a parameter was input incorrectly, or the API is currently down. Please try again.'\n    json_error = 'Could not retrieve JSON values. Try again with a shorter date range.'\n    try:\n        qsp = urllib.parse.urlencode(request_dict, doseq=True)\n        resp = urllib.request.urlopen((((self.base_url + endpoint) + '?') + qsp)).read()\n    except (AttributeError or NameError):\n        try:\n            qsp = urllib.urlencode(request_dict, doseq=True)\n            resp = urllib2.urlopen((((self.base_url + endpoint) + '?') + qsp)).read()\n        except urllib2.URLError:\n            raise MesoPyError(http_error)\n    except urllib.error.URLError:\n        raise MesoPyError(http_error)\n    try:\n        json_data = json.loads(resp.decode('utf-8'))\n    except ValueError:\n        raise MesoPyError(json_error)\n    return self._checkresponse(json_data)", "docstring": "Returns a dictionary of data requested by each function.\n\nArguments:\n----------\nendpoint: string, mandatory\nSet in all other methods, this is the API endpoint specific to each function.\nrequest_dict: string, mandatory\nA dictionary of parameters that are formatted into the API call.\n\nReturns:\n--------\nresponse: A dictionary that has been dumped from JSON.\n\nRaises:\n-------\nMesoPyError: Overrides the exceptions given in the requests library to give more custom error messages.\nConnection_error occurs if no internet connection exists. Timeout_error occurs if the request takes too\nlong and redirect_error is shown if the url is formatted incorrectly.", "source": "codesearchnet"}
{"code": "def from_proto(cls, struct_def_proto: message.Message, backbone_element_path: Optional[str]=None, element_type: Optional[str]=None, parent_definitions: Optional[ChildDefinitions]=None) -> 'StructureDataType':\n    struct_def = cast(Any, struct_def_proto)\n    raw_url = struct_def.url.value\n    base_type = struct_def.type.value\n    element_type = element_type if element_type else base_type\n    qualified_path = f'{element_type}.{backbone_element_path}' if backbone_element_path else element_type\n    child_defs = ChildDefinitions()\n    slices: dict[str, _SliceBuilder] = collections.defaultdict(lambda: _SliceBuilder(None, None, []))\n    root_element_definition = None\n    for elem in struct_def.snapshot.element:\n        if elem.base.path.value == 'Extension.url':\n            continue\n        path = _get_analytic_path(elem.path.value, elem.id.value)\n        if path == qualified_path:\n            if elem.slice_name.value:\n                slice_def = slices[f':{elem.slice_name.value}']\n                slice_def.slice_def = elem\n                slice_def.relative_path = ''\n            else:\n                root_element_definition = elem\n            continue\n        if re.search(f'^{qualified_path}\\\\.\\\\w+', path):\n            relative_path = path[len(qualified_path) + 1:]\n            closest_slice_ancestor = re.search(f'^{qualified_path}[\\\\.]?(.*(?<!.extension):[\\\\w-]+)(?:$|\\\\.)', elem.id.value)\n            if closest_slice_ancestor is None:\n                child_defs.add_definition(relative_path, elem)\n            else:\n                slice_def = slices[closest_slice_ancestor[1]]\n                if elem.slice_name.value:\n                    slice_def.slice_def = elem\n                    slice_def.relative_path = relative_path\n                else:\n                    slice_def.slice_rules.append((relative_path, elem))\n    if parent_definitions is not None:\n        child_defs.update(parent_definitions)\n    if not root_element_definition:\n        raise ValueError(f'StructureDataType {raw_url} searching on {qualified_path}  missing root element definition. {struct_def}')\n    return cls(structure_definition=struct_def, backbone_element_path=backbone_element_path, base_type=base_type, element_type=element_type, _child_defs=child_defs, _slices=tuple((slice_def.to_slice() for slice_def in slices.values())), _raw_url=raw_url, root_element_definition=root_element_definition, cardinality=Cardinality.SCALAR)", "docstring": "Creates a StructureDataType from a proto.\n\nArgs:\nstruct_def_proto: Proto containing information about the structure\ndefinition.\nbackbone_element_path: Optional path to the structure def.\nelement_type: Potential alternative type name for the type.\nparent_definitions: Element definitions defined by parent structure\ndefinitions which should override definitions in `struct_def_proto`. If\nstructure definitions supply element definitions at nested paths, e.g.\nFoo.bar.baz.quux, those element definitions need to be passed via the\n`parent_definitions` argument to ensure element definitions will be\nchosen from the parent rather than `struct_def_proto`. e.g. if\n`struct_def_proto` defines 'Baz.quux,' the parent's 'Foo.bar.baz.quux'\ndefinition must be provided here in order to be chosen over the\n`struct_def_proto` definition.\n\nReturns:\nA StructureDataType.", "source": "github-repos"}
{"code": "class Idefics2Encoder(nn.Module):\n\n    def __init__(self, config: Idefics2Config):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([Idefics2EncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for encoder_layer in self.layers:\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, output_attentions)\n            else:\n                layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`Idefics2EncoderLayer`].\n\nArgs:\nconfig: Idefics2Config", "source": "github-repos"}
{"code": "def merge_tags(left, right, factory=Tags):\n    \n\n    if isinstance(left, Mapping):\n        tags = dict(left)\n    elif hasattr(left, 'tags'):\n        tags = _tags_to_dict(left.tags)\n    else:\n        tags = _tags_to_dict(left)\n\n    if isinstance(right, Mapping):\n        tags.update(right)\n    elif hasattr(left, 'tags'):\n        tags.update(_tags_to_dict(right.tags))\n    else:\n        tags.update(_tags_to_dict(right))\n\n    return factory(**tags)", "docstring": "Merge two sets of tags into a new troposphere object\n\nArgs:\nleft (Union[dict, troposphere.Tags]): dictionary or Tags object to be\nmerged with lower priority\nright (Union[dict, troposphere.Tags]): dictionary or Tags object to be\nmerged with higher priority\nfactory (type): Type of object to create. Defaults to the troposphere\nTags class.", "source": "juraj-google-style"}
{"code": "def _get_paddings_constant(paddings):\n    if isinstance(paddings, tensor_lib.Tensor):\n        return tensor_util.constant_value(paddings, partial=True)\n    elif isinstance(paddings, (list, tuple)):\n        return [_get_paddings_constant(x) for x in paddings]\n    else:\n        return paddings", "docstring": "Helper to get the constant values of the paddings arg to pad().\n\nUsed under V1 graph mode to facilitate computation of the shape of the output\ntensor of `pad()`.\n\nArgs:\npaddings: The same paddings arg as passed to pad(). Can be a Tensor, or\na nested list or tuple of Tensor and/or numbers.\n\nReturns:\nA nested list or numbers or `None`, in which `None` indicates unknown\npadding size.", "source": "github-repos"}
{"code": "def remove_collisions(self, min_dist=0.5):\n    s_f_coords = self.structure.frac_coords\n    f_coords = self.extrema_coords\n    if (len(f_coords) == 0):\n        if (self.extrema_type is None):\n            logger.warning('Please run ChargeDensityAnalyzer.get_local_extrema first!')\n            return\n        new_f_coords = []\n        self._update_extrema(new_f_coords, self.extrema_type)\n        return new_f_coords\n    dist_matrix = self.structure.lattice.get_all_distances(f_coords, s_f_coords)\n    all_dist = np.min(dist_matrix, axis=1)\n    new_f_coords = []\n    for (i, f) in enumerate(f_coords):\n        if (all_dist[i] > min_dist):\n            new_f_coords.append(f)\n    self._update_extrema(new_f_coords, self.extrema_type)\n    return new_f_coords", "docstring": "Remove predicted sites that are too close to existing atoms in the\nstructure.\n\nArgs:\nmin_dist (float): The minimum distance (in Angstrom) that\na predicted site needs to be from existing atoms. A min_dist\nwith value <= 0 returns all sites without distance checking.", "source": "codesearchnet"}
{"code": "def generate_code(meta, prefix=None, node=False, min=False):\n    \n    if isinstance(meta, dict):\n        url_prefix, auth_header, resources = parse_meta(meta)\n    else:\n        url_prefix, auth_header, resources = meta\n    if prefix is not None:\n        url_prefix = prefix\n    core = render_core(url_prefix, auth_header, resources)\n    if min:\n        filename = 'res.web.min.js'\n    else:\n        filename = 'res.web.js'\n    if node:\n        filename = 'res.node.js'\n    base = read_file(filename)\n    return base.replace('\"", "docstring": "Generate res.js\n\nArgs:\nmeta: tuple(url_prefix, auth_header, resources) or metadata of API\nReturns:\nres.js source code", "source": "juraj-google-style"}
{"code": "def create_schema(self, model, waiting_models):\n        \n        bucket_name = model._get_bucket_name()\n        index_name = \"%s_%s\" % (settings.DEFAULT_BUCKET_TYPE, bucket_name)\n        ins = model(fake_context)\n        fields = self.get_schema_fields(ins._collect_index_fields())\n        new_schema = self.compile_schema(fields)\n        schema = get_schema_from_solr(index_name)\n        if not (schema == new_schema):\n            try:\n                client.create_search_schema(index_name, new_schema)\n                print(\"+ %s (%s) search schema is created.\" % (model.__name__, index_name))\n            except:\n                print(\"+ %s (%s) search schema checking operation is taken to queue.\" % (\n                    model.__name__, index_name))\n                waiting_models.append(model)", "docstring": "Creates search schemas.\n\nArgs:\nmodel: model to execute\nwaiting_models: if riak can't return response immediately, model is taken to queue.\nAfter first execution session, method is executed with waiting models and controlled.\nAnd be ensured that all given models are executed properly.\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _as_document(self, partition):\n    schema = ' '.join((u'{} {} {} {} {}'.format(c.id, c.vid, c.name, c.altname, c.description) for c in partition.table.columns))\n    values = ''\n    for stat in partition.stats:\n        if stat.uvalues:\n            values += (' '.join((e[:200] for e in stat.uvalues)) + '\\n')\n\n    def resum(g):\n        try:\n            return str(GVid.parse(g).summarize())\n        except KeyError:\n            return g\n        except ValueError:\n            logger.debug(\"Failed to parse gvid '{}' from partition '{}' grain coverage\".format(g, partition.identity.vname))\n            return g\n    keywords = ((((' '.join(partition.space_coverage) + ' ') + ' '.join([resum(g) for g in partition.grain_coverage if resum(g)])) + ' ') + ' '.join((str(x) for x in partition.time_coverage)))\n    doc_field = u('{} {} {} {} {} {}').format(values, schema, ' '.join([u('{}').format(partition.identity.vid), u('{}').format(partition.identity.id_), u('{}').format(partition.identity.name), u('{}').format(partition.identity.vname)]), partition.display.title, partition.display.description, partition.display.sub_description, partition.display.time_description, partition.display.geo_description)\n    document = dict(vid=u('{}').format(partition.identity.vid), dataset_vid=u('{}').format(partition.identity.as_dataset().vid), title=u('{}').format(partition.table.description), keywords=u('{}').format(keywords), doc=doc_field)\n    return document", "docstring": "Converts given partition to the document indexed by FTS backend.\n\nArgs:\npartition (orm.Partition): partition to convert.\n\nReturns:\ndict with structure matches to BasePartitionIndex._schema.", "source": "codesearchnet"}
{"code": "def _get_bases(type_):\n        \n        \n        try:\n\n            class _(type_):  \n                \n\n            BaseClass = type_\n        except TypeError:\n            BaseClass = object\n\n        class MetaClass(_ValidationMeta, BaseClass.__class__):  \n            \n\n        return BaseClass, MetaClass", "docstring": "Get the base and meta classes to use in creating a subclass.\n\nArgs:\ntype_: The type to subclass.\n\nReturns:\nA tuple containing two values: a base class, and a metaclass.", "source": "juraj-google-style"}
{"code": "def write_signatures(self, signatures):\n        \n        self.fileobj.seek(self.signature_offset)\n        sig_entries = [dict(algorithm_id=id_,\n                            size=len(sig),\n                            signature=sig)\n                       for (id_, sig) in signatures]\n\n        sigs = sigs_header.build(dict(\n            filesize=self.filesize,\n            count=len(signatures),\n            sigs=sig_entries,\n        ))\n        self.fileobj.write(sigs)\n        signatures_len = len(sigs)\n        self.additional_offset = self.signature_offset + signatures_len\n        \n        if not self.additional_offset == self.fileobj.tell():  \n            raise IOError('ended up at unexpected offset')", "docstring": "Write signature data to the MAR file.\n\nArgs:\nsignatures (list): list of signature tuples of the form\n(algorithm_id, signature_data)", "source": "juraj-google-style"}
{"code": "def __init__(self, name, context=None):\n    \n    super(Job, self).__init__(name)\n    if context is None:\n      context = datalab.Context.default()\n    self._context = context\n    self._api = discovery.build('ml', 'v1', credentials=self._context.credentials)\n    if not name.startswith('projects/'):\n      name = 'projects/' + self._context.project_id + '/jobs/' + name\n    self._name = name\n    self._refresh_state()", "docstring": "Initializes an instance of a CloudML Job.\n\nArgs:\nname: the name of the job. It can be an operation full name\n(\"projects/[project_id]/jobs/[operation_name]\") or just [operation_name].\ncontext: an optional Context object providing project_id and credentials.", "source": "juraj-google-style"}
{"code": "def parse(self, template):\n        \n        self._compile_delimiters()\n\n        start_index = 0\n        content_end_index, parsed_section, section_key = None, None, None\n        parsed_template = ParsedTemplate()\n\n        states = []\n\n        while True:\n            match = self._template_re.search(template, start_index)\n\n            if match is None:\n                break\n\n            match_index = match.start()\n            end_index = match.end()\n\n            matches = match.groupdict()\n\n            \n            if matches['change'] is not None:\n                matches.update(tag='=', tag_key=matches['delims'])\n            elif matches['raw'] is not None:\n                matches.update(tag='&', tag_key=matches['raw_name'])\n\n            tag_type = matches['tag']\n            tag_key = matches['tag_key']\n            leading_whitespace = matches['whitespace']\n\n            \n            \n            did_tag_begin_line = match_index == 0 or template[match_index - 1] in END_OF_LINE_CHARACTERS\n            did_tag_end_line = end_index == len(template) or template[end_index] in END_OF_LINE_CHARACTERS\n            is_tag_interpolating = tag_type in ['', '&']\n\n            if did_tag_begin_line and did_tag_end_line and not is_tag_interpolating:\n                if end_index < len(template):\n                    end_index += template[end_index] == '\\r' and 1 or 0\n                if end_index < len(template):\n                    end_index += template[end_index] == '\\n' and 1 or 0\n            elif leading_whitespace:\n                match_index += len(leading_whitespace)\n                leading_whitespace = ''\n\n            \n            if start_index != match_index:\n                parsed_template.add(template[start_index:match_index])\n\n            start_index = end_index\n\n            if tag_type in ('\n                \n                state = (tag_type, end_index, section_key, parsed_template)\n                states.append(state)\n\n                \n                section_key, parsed_template = tag_key, ParsedTemplate()\n                continue\n\n            if tag_type == '/':\n                if tag_key != section_key:\n                    raise ParsingError(\"Section end tag mismatch: %s != %s\" % (tag_key, section_key))\n\n                \n                parsed_section = parsed_template\n\n                (tag_type, section_start_index, section_key, parsed_template) = states.pop()\n                node = self._make_section_node(template, tag_type, tag_key, parsed_section,\n                                               section_start_index, match_index)\n\n            else:\n                node = self._make_interpolation_node(tag_type, tag_key, leading_whitespace)\n\n            parsed_template.add(node)\n\n        \n        if start_index != len(template):\n            parsed_template.add(template[start_index:])\n\n        return parsed_template", "docstring": "Parse a template string starting at some index.\n\nThis method uses the current tag delimiter.\n\nArguments:\n\ntemplate: a unicode string that is the template to parse.\n\nindex: the index at which to start parsing.\n\nReturns:\n\na ParsedTemplate instance.", "source": "juraj-google-style"}
{"code": "def write_file_to_zip_with_neutral_metadata(zfile, filename, content):\n    \n    info = zipfile.ZipInfo(filename, date_time=(2015, 10, 21, 7, 28, 0))\n    info.compress_type = zipfile.ZIP_DEFLATED\n    info.comment = \"\".encode()\n    info.create_system = 0\n    zfile.writestr(info, content)", "docstring": "Write the string `content` to `filename` in the open ZipFile `zfile`.\nArgs:\nzfile (ZipFile): open ZipFile to write the content into\nfilename (str): the file path within the zip file to write into\ncontent (str): the content to write into the zip\nReturns: None", "source": "juraj-google-style"}
{"code": "def leave_swarm(self, force=False):\n        \n        url = self._url('/swarm/leave')\n        response = self._post(url, params={'force': force})\n        \n        if force and response.status_code == http_client.NOT_ACCEPTABLE:\n            return True\n        \n        \n        if force and response.status_code == http_client.SERVICE_UNAVAILABLE:\n            return True\n        self._raise_for_status(response)\n        return True", "docstring": "Leave a swarm.\n\nArgs:\nforce (bool): Leave the swarm even if this node is a manager.\nDefault: ``False``\n\nReturns:\n``True`` if the request went through.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def _retrieve_info(self, request):\n        \n        info = _metadata.get_service_account_info(\n            request,\n            service_account=self._service_account_email)\n\n        self._service_account_email = info['email']\n        self._scopes = info['scopes']", "docstring": "Retrieve information about the service account.\n\nUpdates the scopes and retrieves the full service account email.\n\nArgs:\nrequest (google.auth.transport.Request): The object used to make\nHTTP requests.", "source": "juraj-google-style"}
{"code": "def locked_put(self, credentials):\n        \n        entity, _ = self.model_class.objects.get_or_create(\n            **{self.key_name: self.key_value})\n\n        setattr(entity, self.property_name, credentials)\n        entity.save()", "docstring": "Write a Credentials to the Django datastore.\n\nArgs:\ncredentials: Credentials, the credentials to store.", "source": "juraj-google-style"}
{"code": "def __init__(self, *, separator_stride_cls: Type[message.Message], code_cls: Type[message.Message], default_timezone: str) -> None:\n    self.separator_stride_cls = separator_stride_cls\n    self.code_cls = code_cls\n    self.default_timezone = default_timezone", "docstring": "Creates a new instance of primitive_wrappers.Context.\n\nArgs:\nseparator_stride_cls: The Base64BinarySeparatorStride type to use when\nparsing/printing Base64Binary FHIR primitives.\ncode_cls: The Code type to use when parsing/printing profiled-Code\nprimitives.\ndefault_timezone: The default timezone to use for date/time-like primitive\nparsing/printing.", "source": "github-repos"}
{"code": "def profile_settings_args(self, ij, required):\n        \n        if self.args.permutation_id is not None:\n            if 'sqlite3' not in sys.modules:\n                print('The sqlite3 module needs to be build-in to Python for this feature.')\n                sys.exit(1)\n            profile_args = self.profile_settings_args_layout_json(required)\n        else:\n            profile_args = self.profile_settings_args_install_json(ij, required)\n        return profile_args", "docstring": "Return args based on install.json or layout.json params.\n\nArgs:\nij (dict): The install.json contents.\nrequired (bool): If True only required args will be returned.\n\nReturns:\ndict: Dictionary of required or optional App args.", "source": "juraj-google-style"}
{"code": "def validate_user_name(self, user_name, timeout=-1):\n        \n        uri = self.URI + '/validateLoginName/' + user_name\n        return self._client.create_with_zero_body(uri=uri, timeout=timeout)", "docstring": "Verifies if a userName is already in use.\n\nArgs:\nuser_name:\nThe userName to be verified.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation in\nOneView, just stops waiting for its completion.\n\nReturns: True if user name is in use, False if it is not.", "source": "juraj-google-style"}
{"code": "class _BigTableWriteFn(beam.DoFn):\n\n    def __init__(self, project_id, instance_id, table_id, flush_count, max_row_bytes):\n        \n        super().__init__()\n        self.beam_options = {'project_id': project_id, 'instance_id': instance_id, 'table_id': table_id, 'flush_count': flush_count, 'max_row_bytes': max_row_bytes}\n        self.table = None\n        self.batcher = None\n        self.service_call_metric = None\n        self.written = Metrics.counter(self.__class__, 'Written Row')\n\n    def __getstate__(self):\n        return self.beam_options\n\n    def __setstate__(self, options):\n        self.beam_options = options\n        self.table = None\n        self.batcher = None\n        self.service_call_metric = None\n        self.written = Metrics.counter(self.__class__, 'Written Row')\n\n    def write_mutate_metrics(self, status_list):\n        for status in status_list:\n            code = status.code if status else None\n            grpc_status_string = ServiceCallMetric.bigtable_error_code_to_grpc_status_string(code)\n            self.service_call_metric.call(grpc_status_string)\n\n    def start_service_call_metrics(self, project_id, instance_id, table_id):\n        resource = resource_identifiers.BigtableTable(project_id, instance_id, table_id)\n        labels = {monitoring_infos.SERVICE_LABEL: 'BigTable', monitoring_infos.METHOD_LABEL: 'google.bigtable.v2.MutateRows', monitoring_infos.RESOURCE_LABEL: resource, monitoring_infos.BIGTABLE_PROJECT_ID_LABEL: self.beam_options['project_id'], monitoring_infos.INSTANCE_ID_LABEL: self.beam_options['instance_id'], monitoring_infos.TABLE_ID_LABEL: self.beam_options['table_id']}\n        return ServiceCallMetric(request_count_urn=monitoring_infos.API_REQUEST_COUNT_URN, base_labels=labels)\n\n    def start_bundle(self):\n        if self.table is None:\n            client = Client(project=self.beam_options['project_id'])\n            instance = client.instance(self.beam_options['instance_id'])\n            self.table = instance.table(self.beam_options['table_id'])\n        self.service_call_metric = self.start_service_call_metrics(self.beam_options['project_id'], self.beam_options['instance_id'], self.beam_options['table_id'])\n        self.batcher = MutationsBatcher(self.table, batch_completed_callback=self.write_mutate_metrics, flush_count=self.beam_options['flush_count'], max_row_bytes=self.beam_options['max_row_bytes'])\n\n    def process(self, row):\n        self.written.inc()\n        self.batcher.mutate(row)\n\n    def finish_bundle(self):\n        if self.batcher:\n            self.batcher.close()\n            self.batcher = None\n            Lineage.sinks().add('bigtable', self.beam_options['project_id'], self.beam_options['instance_id'], self.beam_options['table_id'])\n\n    def display_data(self):\n        return {'projectId': DisplayDataItem(self.beam_options['project_id'], label='Bigtable Project Id'), 'instanceId': DisplayDataItem(self.beam_options['instance_id'], label='Bigtable Instance Id'), 'tableId': DisplayDataItem(self.beam_options['table_id'], label='Bigtable Table Id')}", "docstring": "Creates the connector can call and add_row to the batcher using each\nrow in beam pipe line\nArgs:\nproject_id(str): GCP Project ID\ninstance_id(str): GCP Instance ID\ntable_id(str): GCP Table ID\nflush_count(int): Max number of rows to flush\nmax_row_bytes(int) Max number of row mutations size to flush", "source": "github-repos"}
{"code": "def from_text_vision_configs(cls, text_config: CLIPTextConfig, vision_config: CLIPVisionConfig, **kwargs):\n    return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`CLIPConfig`] (or a derived class) from clip text model configuration and clip vision model\nconfiguration.\n\nReturns:\n[`CLIPConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def filter_benchmarks(benchmarks, bench_funcs, base_ver):\n    \n    for bm in list(benchmarks):\n        func = bench_funcs[bm]\n        if getattr(func, '_python2_only', False) and (3, 0) <= base_ver:\n            benchmarks.discard(bm)\n            logging.info(\"Skipping Python2-only benchmark %s; \"\n                         \"not compatible with Python %s\" % (bm, base_ver))\n            continue\n    return benchmarks", "docstring": "Filters out benchmarks not supported by both Pythons.\n\nArgs:\nbenchmarks: a set() of benchmark names\nbench_funcs: dict mapping benchmark names to functions\npython: the interpereter commands (as lists)\n\nReturns:\nThe filtered set of benchmark names", "source": "juraj-google-style"}
{"code": "def resize_to(self, width, height):\n    self.driver.resize_window_to(self.handle, width, height)", "docstring": "Resizes the window to the given dimensions.\n\nIf this method was called for a window that is not current, then after calling this method\nthe current window should remain the same as it was before calling this method.\n\nArgs:\nwidth (int): The new window width in pixels.\nheight (int): The new window height in pixels.", "source": "codesearchnet"}
{"code": "def VerifyMessageSignature(self, unused_response_comms, packed_message_list, cipher, cipher_verified, api_version, remote_public_key):\n    _ = api_version\n    result = rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED\n    if (cipher_verified or cipher.VerifyCipherSignature(remote_public_key)):\n        stats_collector_instance.Get().IncrementCounter('grr_authenticated_messages')\n        result = rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED\n    if (packed_message_list.timestamp != self.timestamp):\n        result = rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED\n    if (not cipher.cipher_metadata):\n        cipher.cipher_metadata = rdf_flows.CipherMetadata(source=packed_message_list.source)\n    return result", "docstring": "Verify the message list signature.\n\nThis is the way the messages are verified in the client.\n\nIn the client we also check that the nonce returned by the server is correct\n(the timestamp doubles as a nonce). If the nonce fails we deem the response\nunauthenticated since it might have resulted from a replay attack.\n\nArgs:\npacked_message_list: The PackedMessageList rdfvalue from the server.\ncipher: The cipher belonging to the remote end.\ncipher_verified: If True, the cipher's signature is not verified again.\napi_version: The api version we should use.\nremote_public_key: The public key of the source.\n\nReturns:\nAn rdf_flows.GrrMessage.AuthorizationState.\n\nRaises:\nDecryptionError: if the message is corrupt.", "source": "codesearchnet"}
{"code": "def run(func, options, args=(), kwargs={}, host='localhost', port=8000):  \n    \n    run_stats = run_profilers((func, args, kwargs), options)\n\n    result = None\n    for prof in run_stats:\n        if not result:\n            result = run_stats[prof]['result']\n        del run_stats[prof]['result']  \n\n    post_data = gzip.compress(\n        json.dumps(run_stats).encode('utf-8'))\n    urllib.request.urlopen('http:\n    return result", "docstring": "Runs profilers on a function.\n\nArgs:\nfunc: A Python function.\noptions: A string with profilers configuration (i.e. 'cmh').\nargs: func non-keyword arguments.\nkwargs: func keyword arguments.\nhost: Host name to send collected data.\nport: Port number to send collected data.\n\nReturns:\nA result of func execution.", "source": "juraj-google-style"}
{"code": "def calculate_entropy(self, entropy_string):\n    total = 0\n    for char in entropy_string:\n        if char.isalpha():\n            prob = self.frequency[char.lower()]\n            total += ((- math.log(prob)) / math.log(2))\n    logging.debug('Entropy score: {0}'.format(total))\n    return total", "docstring": "Calculates the entropy of a string based on known frequency of\nEnglish letters.\n\nArgs:\nentropy_string: A str representing the string to calculate.\n\nReturns:\nA negative float with the total entropy of the string (higher\nis better).", "source": "codesearchnet"}
{"code": "def _get_group_object(name):\n    with salt.utils.winapi.Com():\n        nt = win32com.client.Dispatch('AdsNameSpaces')\n    return nt.GetObject('', (('WinNT:", "docstring": "A helper function to get a specified group object\n\nArgs:\n\nname (str): The name of the object\n\nReturns:\nobject: The specified group object", "source": "codesearchnet"}
{"code": "def CopyFromDateTimeString(self, time_string):\n    \n    date_time_values = self._CopyDateTimeFromString(time_string)\n\n    self._CopyFromDateTimeValues(date_time_values)", "docstring": "Copies time elements from a date and time string.\n\nArgs:\ntime_string (str): date and time value formatted as:\nYYYY-MM-DD hh:mm:ss.######[+-]##:##\n\nWhere # are numeric digits ranging from 0 to 9 and the seconds\nfraction can be either 3 or 6 digits. The time of day, seconds\nfraction and time zone offset are optional. The default time zone\nis UTC.", "source": "juraj-google-style"}
{"code": "def chrome_decrypt(encrypted_value: bytes, key: bytes, init_vector: bytes) \\\n        -> str:\n    \n    \n    \n    encrypted_value = encrypted_value[3:]\n\n    cipher = AES.new(key, AES.MODE_CBC, IV=init_vector)\n    decrypted = cipher.decrypt(encrypted_value)\n\n    return clean(decrypted)", "docstring": "Decrypt Chrome/Chromium's encrypted cookies.\n\nArgs:\nencrypted_value: Encrypted cookie from Chrome/Chromium's cookie file\nkey: Key to decrypt encrypted_value\ninit_vector: Initialization vector for decrypting encrypted_value\nReturns:\nDecrypted value of encrypted_value", "source": "juraj-google-style"}
{"code": "def Trim(self, flags):\n        \n        logger.info(\"Trimming!\")\n        flags = bytearray(flags)\n        length = 1 << self.Depth - 1\n        while len(flags) < length:\n            flags.append(0)\n\n        MerkleTree._TrimNode(self.Root, 0, self.Depth, flags)", "docstring": "Trim the nodes from the tree keeping only the root hash.\n\nArgs:\nflags: \"0000\" for trimming, any other value for keeping the nodes.", "source": "juraj-google-style"}
{"code": "def _format_output(content, typ):\n    if ('csv' in str(typ)):\n        return _format_csv(content, delimiter=',')\n    if ('tsv' in str(typ)):\n        return _format_csv(content, delimiter='\\t')\n    return content", "docstring": "Tabularize the content according to its type.\n\nArgs:\ncontent (str): The content of a metric.\ntyp (str): The type of metric -- (raw|json|tsv|htsv|csv|hcsv).\n\nReturns:\nstr: Content in a raw or tabular format.", "source": "codesearchnet"}
{"code": "def _Initialize(self, http, url):\n        \n        self.EnsureUninitialized()\n        if self.http is None:\n            self.__http = http or http_wrapper.GetHttp()\n        self.__url = url", "docstring": "Initialize this download by setting self.http and self.url.\n\nWe want the user to be able to override self.http by having set\nthe value in the constructor; in that case, we ignore the provided\nhttp.\n\nArgs:\nhttp: An httplib2.Http instance or None.\nurl: The url for this transfer.\n\nReturns:\nNone. Initializes self.", "source": "juraj-google-style"}
{"code": "def download_image(self, device_label, image_id, file_name):\n    response = None\n    try:\n        response = requests.get(urls.download_image(self._giid, device_label, image_id), headers={'Cookie': 'vid={}'.format(self._vid)}, stream=True)\n    except requests.exceptions.RequestException as ex:\n        raise RequestError(ex)\n    _validate_response(response)\n    with open(file_name, 'wb') as image_file:\n        for chunk in response.iter_content(chunk_size=1024):\n            if chunk:\n                image_file.write(chunk)", "docstring": "Download image taken by a smartcam\n\nArgs:\ndevice_label (str): device label of camera\nimage_id (str): image id from image series\nfile_name (str): path to file", "source": "codesearchnet"}
{"code": "def register_lookup_handler(lookup_type, handler_or_path):\n    handler = handler_or_path\n    if isinstance(handler_or_path, basestring):\n        handler = load_object_from_string(handler_or_path)\n    LOOKUP_HANDLERS[lookup_type] = handler\n    if (type(handler) != type):\n        logger = logging.getLogger(__name__)\n        logger.warning(('Registering lookup `%s`: Please upgrade to use the new style of Lookups.' % lookup_type))\n        warnings.warn(('Lookup `%s`: Please upgrade to use the new style of Lookups.' % lookup_type), DeprecationWarning, stacklevel=2)", "docstring": "Register a lookup handler.\n\nArgs:\nlookup_type (str): Name to register the handler under\nhandler_or_path (OneOf[func, str]): a function or a path to a handler", "source": "codesearchnet"}
{"code": "def _DetermineOperatingSystem(self, searcher):\n    find_specs = [file_system_searcher.FindSpec(location='/etc', case_sensitive=False), file_system_searcher.FindSpec(location='/System/Library', case_sensitive=False), file_system_searcher.FindSpec(location='/Windows/System32', case_sensitive=False), file_system_searcher.FindSpec(location='/WINNT/System32', case_sensitive=False), file_system_searcher.FindSpec(location='/WINNT35/System32', case_sensitive=False), file_system_searcher.FindSpec(location='/WTSRV/System32', case_sensitive=False)]\n    locations = []\n    for path_spec in searcher.Find(find_specs=find_specs):\n        relative_path = searcher.GetRelativePath(path_spec)\n        if relative_path:\n            locations.append(relative_path.lower())\n    windows_locations = set(['/windows/system32', '\\\\windows\\\\system32', '/winnt/system32', '\\\\winnt\\\\system32', '/winnt35/system32', '\\\\winnt35\\\\system32', '\\\\wtsrv\\\\system32', '/wtsrv/system32'])\n    operating_system = definitions.OPERATING_SYSTEM_FAMILY_UNKNOWN\n    if windows_locations.intersection(set(locations)):\n        operating_system = definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT\n    elif ('/system/library' in locations):\n        operating_system = definitions.OPERATING_SYSTEM_FAMILY_MACOS\n    elif ('/etc' in locations):\n        operating_system = definitions.OPERATING_SYSTEM_FAMILY_LINUX\n    return operating_system", "docstring": "Tries to determine the underlying operating system.\n\nArgs:\nsearcher (dfvfs.FileSystemSearcher): file system searcher.\n\nReturns:\nstr: operating system for example \"Windows\". This should be one of\nthe values in definitions.OPERATING_SYSTEM_FAMILIES.", "source": "codesearchnet"}
{"code": "def inference(self, observed_arr):\n        \n        for i in range(len(self.__deconvolution_layer_list)):\n            try:\n                observed_arr = self.__deconvolution_layer_list[i].forward_propagate(observed_arr)\n            except:\n                self.__logger.debug(\"Error raised in Deconvolution layer \" + str(i + 1))\n                raise\n\n        return observed_arr", "docstring": "Draws samples from the `fake` distribution.\n\nArgs:\nobserved_arr:     `np.ndarray` of observed data points.\n\nReturns:\n`np.ndarray` of inferenced.", "source": "juraj-google-style"}
{"code": "def load_steps(working_dir=None, steps_dir=None, step_file=None, step_list=None):\n    if (steps_dir is not None):\n        step_files = glob.glob(os.path.join(steps_dir, '*.cwl'))\n    elif (step_file is not None):\n        step_files = [step_file]\n    elif (step_list is not None):\n        step_files = []\n        for path in step_list:\n            if os.path.isdir(path):\n                step_files += glob.glob(os.path.join(path, '*.cwl'))\n            else:\n                step_files.append(path)\n    else:\n        step_files = []\n    if (working_dir is not None):\n        step_files = sort_loading_order(step_files)\n    steps = {}\n    for f in step_files:\n        if (working_dir is not None):\n            if ((not (working_dir == os.path.dirname(f))) and (not is_url(f))):\n                copied_file = os.path.join(working_dir, os.path.basename(f))\n                shutil.copy2(f, copied_file)\n                f = copied_file\n        try:\n            s = Step(f)\n            steps[s.name] = s\n        except (NotImplementedError, ValidationException, PackedWorkflowException) as e:\n            logger.warning(e)\n    return steps", "docstring": "Return a dictionary containing Steps read from file.\n\nArgs:\nsteps_dir (str, optional): path to directory containing CWL files.\nstep_file (str, optional): path or http(s) url to a single CWL file.\nstep_list (list, optional): a list of directories, urls or local file\npaths to CWL files or directories containing CWL files.\n\nReturn:\ndict containing (name, Step) entries.", "source": "codesearchnet"}
{"code": "def tag_file(filename, artist, title, year=None, genre=None, artwork_url=None, album=None, track_number=None, url=None):\n    try:\n        audio = EasyMP3(filename)\n        audio.tags = None\n        audio['artist'] = artist\n        audio['title'] = title\n        if year:\n            audio['date'] = str(year)\n        if album:\n            audio['album'] = album\n        if track_number:\n            audio['tracknumber'] = track_number\n        if genre:\n            audio['genre'] = genre\n        if url:\n            audio['website'] = url\n        audio.save()\n        if artwork_url:\n            artwork_url = artwork_url.replace('https', 'http')\n            mime = 'image/jpeg'\n            if ('.jpg' in artwork_url):\n                mime = 'image/jpeg'\n            if ('.png' in artwork_url):\n                mime = 'image/png'\n            if ('-large' in artwork_url):\n                new_artwork_url = artwork_url.replace('-large', '-t500x500')\n                try:\n                    image_data = requests.get(new_artwork_url).content\n                except Exception as e:\n                    image_data = requests.get(artwork_url).content\n            else:\n                image_data = requests.get(artwork_url).content\n            audio = MP3(filename, ID3=OldID3)\n            audio.tags.add(APIC(encoding=3, mime=mime, type=3, desc='Cover', data=image_data))\n            audio.save()\n        if url:\n            audio = MP3(filename, ID3=OldID3)\n            audio.tags.add(WXXX(encoding=3, url=url))\n            audio.save()\n        return True\n    except Exception as e:\n        puts((colored.red('Problem tagging file: ') + colored.white('Is this file a WAV?')))\n        return False", "docstring": "Attempt to put ID3 tags on a file.\n\nArgs:\nartist (str):\ntitle (str):\nyear (int):\ngenre (str):\nartwork_url (str):\nalbum (str):\ntrack_number (str):\nfilename (str):\nurl (str):", "source": "codesearchnet"}
{"code": "def remove_acl(path):\n    \n    \n    if (platform.system() == constants.PLATFORM_DARWIN and\n            os.path.isfile('/bin/chmod')):\n        subprocess.call(['/bin/chmod', '-R', '-N', path])\n    elif ((platform.system() == constants.PLATFORM_LINUX) and\n            os.path.isfile('/bin/setfacl')):\n        subprocess.call(['/bin/setfacl', '-R', '-b', path])", "docstring": "Remove the ACL of the file or folder located on the given path.\n\nAlso remove the ACL of any file and folder below the given one,\nrecursively.\n\nArgs:\npath (str): Path to the file or folder to remove the ACL for,\nrecursively.", "source": "juraj-google-style"}
{"code": "def __eq__(self, other):\n        \n        if other.__class__ is not self.__class__:\n            return NotImplemented\n        return (\n            self._tp__get_typed_properties()\n            == other._tp__get_typed_properties()\n        )", "docstring": "Test if two objects of the same base class are equal.\n\nIf the objects are not of the same class, Python will default to\ncomparison-by-ID.\n\nArgs:\nother: The object to compare for equality.\n\nReturns:\nTrue if the objects are equal; else False.", "source": "juraj-google-style"}
{"code": "def delete(self, name, **kwargs):\n        \n        self.gitlab.http_delete(self.path, query_data={'name': name}, **kwargs)", "docstring": "Delete a Label on the server.\n\nArgs:\nname: The name of the label\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabDeleteError: If the server cannot perform the request", "source": "juraj-google-style"}
{"code": "def AddEventTags(self, event_tags):\n    \n    self._RaiseIfNotWritable()\n\n    for event_tag in event_tags:\n      self.AddEventTag(event_tag)", "docstring": "Adds event tags.\n\nArgs:\nevent_tags (list[EventTag]): event tags.\n\nRaises:\nIOError: when the storage file is closed or read-only or\nif the event tags cannot be serialized.\nOSError: when the storage file is closed or read-only or\nif the event tags cannot be serialized.", "source": "juraj-google-style"}
{"code": "def _LastEntryTimestamp(dct, upper_bound_timestamp):\n    if (upper_bound_timestamp is None):\n        upper_bound = (lambda _: True)\n    else:\n        upper_bound = (lambda key: (key <= upper_bound_timestamp))\n    try:\n        return max(filter(upper_bound, iterkeys(dct)))\n    except ValueError:\n        return None", "docstring": "Searches for greatest timestamp lower than the specified one.\n\nArgs:\ndct: A dictionary from timestamps to some items.\nupper_bound_timestamp: An upper bound for timestamp to be returned.\n\nReturns:\nGreatest timestamp that is lower than the specified one. If no such value\nexists, `None` is returned.", "source": "codesearchnet"}
{"code": "def df_first_row_to_dict(df):\n    if (df is not None):\n        return [dict(r) for (i, r) in df.head(1).iterrows()][0]", "docstring": "First DataFrame row to list of dict\n\nArgs:\ndf (pandas.DataFrame): A DataFrame with at least one row\n\nReturns:\nA list of dict that looks like:\n\n[{'C1': 'x'}, {'C2': 'y'}, {'C3': 'z'}]\n\nfrom a DataFrame that looks like:\n\nC1  C2  C3\n1   x   y   z\n\nElse if `df` is `None`, returns `None`", "source": "codesearchnet"}
{"code": "def GetCredential(self, path_spec, identifier):\n    \n    credentials = self._credentials_per_path_spec.get(path_spec.comparable, {})\n    return credentials.get(identifier, None)", "docstring": "Retrieves a specific credential from the key chain.\n\nArgs:\npath_spec (PathSpec): path specification.\nidentifier (str): credential identifier.\n\nReturns:\nobject: credential or None if the credential for the path specification\nis not set.", "source": "juraj-google-style"}
{"code": "def resize(self, height, width):\n        \n        return self.client.api.resize(self.id, height, width)", "docstring": "Resize the tty session.\n\nArgs:\nheight (int): Height of tty session\nwidth (int): Width of tty session\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def pretokenized_tfrecord_dataset(filenames, text2self, eos_included, repeat, batch_size, sequence_length):\n    dataset = tf.data.TFRecordDataset(filenames, buffer_size=((64 * 1024) * 1024))\n    if repeat:\n        dataset = dataset.repeat()\n    keys = (['targets'] if text2self else ['inputs', 'targets'])\n\n    def decode_example(serialized_example):\n        'Return a dict of Tensors from a serialized tensorflow.Example.'\n        data_fields = {}\n        data_items_to_decoders = {}\n        for k in keys:\n            data_fields[k] = tf.VarLenFeature(tf.int64)\n            data_items_to_decoders[k] = tf.contrib.slim.tfexample_decoder.Tensor(k)\n        decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(data_fields, data_items_to_decoders)\n        decode_items = list(sorted(data_items_to_decoders))\n        decoded = decoder.decode(serialized_example, items=decode_items)\n        if (not eos_included):\n            decoded = [tf.concat([v, [1]], 0) for v in decoded]\n        return dict(zip(decode_items, decoded))\n    dataset = dataset.map(decode_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n    return pack_and_batch(dataset, batch_size, sequence_length)", "docstring": "Reads tensor2tensor-style data files.\n\nThe dataset is defined by sets of TFRecord files of TFExample protos.\nThere should be a \"targets\" feature (a 1d tensor of integers)\nIf not text2self, there should also be an \"inputs\" feature.\nOther features get ignored.\n\neos_included specifies whether the inputs and targets were written with an\nEOS token, as in tensor2tensor\n\nArgs:\nfilenames: a list of strings\ntext2self: a boolean\neos_included: a boolean\nrepeat: a boolean\nbatch_size: an integer\nsequence_length: an integer\nReturns:\nA tf.data.Dataset of batches", "source": "codesearchnet"}
{"code": "def merge(self, options):\n    if (not options):\n        return _CallSettings(timeout=self.timeout, retry=self.retry, page_descriptor=self.page_descriptor, page_token=self.page_token, bundler=self.bundler, bundle_descriptor=self.bundle_descriptor, kwargs=self.kwargs)\n    else:\n        if (options.timeout == OPTION_INHERIT):\n            timeout = self.timeout\n        else:\n            timeout = options.timeout\n        if (options.retry == OPTION_INHERIT):\n            retry = self.retry\n        else:\n            retry = options.retry\n        if (options.page_token == OPTION_INHERIT):\n            page_token = self.page_token\n        else:\n            page_token = options.page_token\n        if options.is_bundling:\n            bundler = self.bundler\n        else:\n            bundler = None\n        if (options.kwargs == OPTION_INHERIT):\n            kwargs = self.kwargs\n        else:\n            kwargs = self.kwargs.copy()\n            kwargs.update(options.kwargs)\n        return _CallSettings(timeout=timeout, retry=retry, page_descriptor=self.page_descriptor, page_token=page_token, bundler=bundler, bundle_descriptor=self.bundle_descriptor, kwargs=kwargs)", "docstring": "Returns new _CallSettings merged from this and a CallOptions object.\n\nNote that passing if the CallOptions instance specifies a page_token,\nthe merged _CallSettings will have ``flatten_pages`` disabled. This\npermits toggling per-resource/per-page page streaming.\n\nArgs:\noptions (CallOptions): an instance whose values override\nthose in this object. If None, ``merge`` returns a copy of this\nobject\n\nReturns:\nCallSettings: The merged settings and options.", "source": "codesearchnet"}
{"code": "def ip_geoloc(ip, hit_api=True):\n    from ..logs.models import IPInfoCheck\n    try:\n        obj = IPInfoCheck.objects.get(ip_address=ip).ip_info\n    except IPInfoCheck.DoesNotExist:\n        if hit_api:\n            try:\n                obj = IPInfoCheck.check_ip(ip)\n            except RateExceededError:\n                return None\n        else:\n            return None\n    return (obj.latitude, obj.longitude)", "docstring": "Get IP geolocation.\n\nArgs:\nip (str): IP address to use if no data provided.\nhit_api (bool): whether to hit api if info not found.\n\nReturns:\nstr: latitude and longitude, comma-separated.", "source": "codesearchnet"}
{"code": "def _archive_elements(self):\n        \n        try:\n            stackfile_key, propertyfile_key = self._craft_s3_keys()\n\n            template_file = self._config.get('environment', {}).get('template', None)\n            bucket = self._config.get('environment', {}).get('bucket', None)\n            if not os.path.isfile(template_file):\n                logging.info(\"{} is not actually a file\".format(template_file))\n                return False\n\n            logging.info('Copying parameters to s3:\n            temp_file_name = '/tmp/{}'.format((str(uuid.uuid4()))[:8])\n            with open(temp_file_name, 'w') as dump_file:\n                json.dump(self._parameters, dump_file, indent=4)\n\n            self._s3.upload_file(temp_file_name, bucket, propertyfile_key)\n\n            logging.info('Copying {} to s3:\n            self._s3.upload_file(template_file, bucket, stackfile_key)\n\n            self._templateUrl = 'https:\n            logging.info(\"template_url: \" + self._templateUrl)\n            return True\n        except Exception as x:\n            logging.error('Exception caught in copy_stuff_to_S3(): {}'.format(x))\n            traceback.print_exc(file=sys.stdout)\n            return False", "docstring": "Cloud Formation likes to take the template from S3 so here we put the\ntemplate into S3. We also store the parameters file that was used in\nthis run. Note: you can pass anything as the version string but you\nshould at least consider a version control tag or git commit hash as\nthe version.\n\nArgs:\nNone\n\nReturns:\nTrue if the stuff lands in S3 or False if the file doesn't\nreally exist or the upload goes sideways.", "source": "juraj-google-style"}
{"code": "def jsonify(data, pretty=False, **kwargs):\n    isod = isinstance(data, OrderedDict)\n    params = {'for_json': True, 'default': _complex_encode}\n    if pretty:\n        params['indent'] = 2\n        params['sort_keys'] = (False if isod else True)\n    params.update(kwargs)\n    try:\n        return json.dumps(data, ensure_ascii=False, **params)\n    except UnicodeDecodeError:\n        return json.dumps(data, **params)", "docstring": "Serialize Python objects to JSON with optional 'pretty' formatting\n\nRaises:\nTypeError: from :mod:`json` lib\nValueError: from :mod:`json` lib\nJSONDecodeError: from :mod:`json` lib", "source": "codesearchnet"}
{"code": "def bearing(self, format='numeric'):\n        \n        bearings = []\n        for segment in self:\n            if len(segment) < 2:\n                bearings.append([])\n            else:\n                bearings.append(segment.bearing(format))\n        return bearings", "docstring": "Calculate bearing between locations in segments.\n\nArgs:\nformat (str): Format of the bearing string to return\n\nReturns:\nlist of list of float: Groups of bearings between points in\nsegments", "source": "juraj-google-style"}
{"code": "def get_all_anchors(stride=None, sizes=None):\n    \n    if stride is None:\n        stride = cfg.RPN.ANCHOR_STRIDE\n    if sizes is None:\n        sizes = cfg.RPN.ANCHOR_SIZES\n    \n    \n    \n    cell_anchors = generate_anchors(\n        stride,\n        scales=np.array(sizes, dtype=np.float) / stride,\n        ratios=np.array(cfg.RPN.ANCHOR_RATIOS, dtype=np.float))\n    \n    \n\n    max_size = cfg.PREPROC.MAX_SIZE\n    field_size = int(np.ceil(max_size / stride))\n    shifts = np.arange(0, field_size) * stride\n    shift_x, shift_y = np.meshgrid(shifts, shifts)\n    shift_x = shift_x.flatten()\n    shift_y = shift_y.flatten()\n    shifts = np.vstack((shift_x, shift_y, shift_x, shift_y)).transpose()\n    \n    K = shifts.shape[0]\n\n    A = cell_anchors.shape[0]\n    field_of_anchors = (\n        cell_anchors.reshape((1, A, 4)) +\n        shifts.reshape((1, K, 4)).transpose((1, 0, 2)))\n    field_of_anchors = field_of_anchors.reshape((field_size, field_size, A, 4))\n    \n    \n    \n    field_of_anchors = field_of_anchors.astype('float32')\n    field_of_anchors[:, :, :, [2, 3]] += 1\n    return field_of_anchors", "docstring": "Get all anchors in the largest possible image, shifted, floatbox\nArgs:\nstride (int): the stride of anchors.\nsizes (tuple[int]): the sizes (sqrt area) of anchors\n\nReturns:\nanchors: SxSxNUM_ANCHORx4, where S == ceil(MAX_SIZE/STRIDE), floatbox\nThe layout in the NUM_ANCHOR dim is NUM_RATIO x NUM_SIZE.", "source": "juraj-google-style"}
{"code": "def __init__(self, module_name, text):\n    \n    super(Report, self).__init__()\n    self.module_name = module_name\n    self.text = text", "docstring": "Initializes the analysis report.\nArgs:\nmodule_name (str): name of the analysis plugin that generated\nthe report.\ntext (str): report text.", "source": "juraj-google-style"}
{"code": "def parse_args(bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors) -> Tuple[(Parsed, Errors)]:\n    commas = char_locs['commas']\n    for span in parsed:\n        if ((parsed[span]['type'] != 'Function') or ('parens_span' not in parsed[span])):\n            continue\n        (sp, ep) = parsed[span]['parens_span']\n        if (ep == (- 1)):\n            args_end = (len(bels) - 1)\n        else:\n            args_end = (ep - 1)\n        args = []\n        arg_start = (sp + 1)\n        each_arg_end_list = sorted(([(end - 1) for end in commas.get(sp, [])] + [args_end]))\n        for arg_end in each_arg_end_list:\n            while ((arg_start < args_end) and (bels[arg_start] == ' ')):\n                arg_start += 1\n            trimmed_arg_end = arg_end\n            while ((trimmed_arg_end > arg_start) and (bels[trimmed_arg_end] == ' ')):\n                trimmed_arg_end -= 1\n            if (trimmed_arg_end < arg_start):\n                trimmed_arg_end = arg_start\n            arg = ''.join(bels[arg_start:(trimmed_arg_end + 1)])\n            args.append({'arg': arg, 'span': (arg_start, trimmed_arg_end)})\n            arg_start = (arg_end + 2)\n        parsed[span]['args'] = args\n    return (parsed, errors)", "docstring": "Parse arguments from functions\n\nArgs:\nbels: BEL string as list of chars\nchar_locs: char locations for parens, commas and quotes\nparsed: function locations\nerrors: error messages\n\nReturns:\n(functions, errors): function and arg locations plus error messages", "source": "codesearchnet"}
{"code": "def _process_active_view_and_verification(self, placement, feed_item):\n    if FieldMap.PLACEMENT_ACTIVE_VIEW_AND_VERIFICATION in feed_item:\n        if feed_item.get(FieldMap.PLACEMENT_ACTIVE_VIEW_AND_VERIFICATION, None) == 'ON':\n            placement['vpaidAdapterChoice'] = 'HTML5'\n            placement['videoActiveViewOptOut'] = False\n        elif feed_item.get(FieldMap.PLACEMENT_ACTIVE_VIEW_AND_VERIFICATION, None) == 'OFF':\n            placement['vpaidAdapterChoice'] = 'DEFAULT'\n            placement['videoActiveViewOptOut'] = True\n        elif feed_item[FieldMap.PLACEMENT_ACTIVE_VIEW_AND_VERIFICATION] == 'LET_DCM_DECIDE' or feed_item[FieldMap.PLACEMENT_ACTIVE_VIEW_AND_VERIFICATION] == '':\n            placement['vpaidAdapterChoice'] = 'DEFAULT'\n            placement['videoActiveViewOptOut'] = False\n        else:\n            raise Exception('%s is not a valid value for the placement Active View and Verification field' % feed_item.get(FieldMap.PLACEMENT_ACTIVE_VIEW_AND_VERIFICATION, None))", "docstring": "Updates / creates active view and verification settings.\n\nThis method updates the CM item by setting or creating active view and\nverification settings based on the Bulkdozer feed configurations.\n\nArgs:\nplacement: The CM placement object to be updated.\nfeed_item: The Bulkdozer feed item with the configurations.\n\nRaises:\nException: In case the values for active view and verification enumeration\nis invalid.", "source": "github-repos"}
{"code": "def noise_get(\n    n: tcod.noise.Noise, f: Sequence[float], typ: int = NOISE_DEFAULT\n) -> float:\n    \n    return float(lib.TCOD_noise_get_ex(n.noise_c, ffi.new(\"float[4]\", f), typ))", "docstring": "Return the noise value sampled from the ``f`` coordinate.\n\n``f`` should be a tuple or list with a length matching\n:any:`Noise.dimensions`.\nIf ``f`` is shoerter than :any:`Noise.dimensions` the missing coordinates\nwill be filled with zeros.\n\nArgs:\nn (Noise): A Noise instance.\nf (Sequence[float]): The point to sample the noise from.\ntyp (int): The noise algorithm to use.\n\nReturns:\nfloat: The sampled noise value.", "source": "juraj-google-style"}
{"code": "def __init__(self, query_functions=None):\n        \n        super(QueryRequestPayload, self).__init__(enums.Tags.REQUEST_PAYLOAD)\n\n        self._query_functions = None\n\n        self.query_functions = query_functions", "docstring": "Construct a QueryRequestPayload object.\n\nArgs:\nquery_functions (list): A list of QueryFunction enumerations.", "source": "juraj-google-style"}
{"code": "def as_operation(self):\n    result = encoding.CopyProtoMessage(self._op)\n    names = sorted(self._metric_values_by_name_then_sign.keys())\n    for name in names:\n        mvs = self._metric_values_by_name_then_sign[name]\n        result.metricValueSets.append(sc_messages.MetricValueSet(metricName=name, metricValues=mvs.values()))\n    return result", "docstring": "Obtains a single `Operation` representing this instances contents.\n\nReturns:\n:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`", "source": "codesearchnet"}
{"code": "def get_vmss_vm(access_token, subscription_id, resource_group, vmss_name, instance_id):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,\n                        '/virtualMachines/', str(instance_id),\n                        '?api-version=', COMP_API])\n    return do_get(endpoint, access_token)", "docstring": "Get individual VMSS VM details.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvmss_name (str): Name of the virtual machine scale set.\ninstance_id (int): VM ID of the scale set VM.\n\nReturns:\nHTTP response. JSON body of VMSS VM model view.", "source": "juraj-google-style"}
{"code": "def previous_weekday(date):\n    \n    weekday = date.weekday()\n    if weekday == 0:\n        n_days = 3\n    elif weekday == 6:\n        n_days = 2\n    else:\n        n_days = 1\n    return date - datetime.timedelta(days=n_days)", "docstring": "Returns the last weekday before date\n\nArgs:\ndate (datetime or datetime.date)\nReturns:\n(datetime or datetime.date)\nRaises:\n-", "source": "juraj-google-style"}
{"code": "def build_eval_session(module_spec, class_count):\n    (eval_graph, bottleneck_tensor, resized_input_tensor, wants_quantization) = create_module_graph(module_spec)\n    eval_sess = tf.Session(graph=eval_graph)\n    with eval_graph.as_default():\n        (_, _, bottleneck_input, ground_truth_input, final_tensor) = add_final_retrain_ops(class_count, FLAGS.final_tensor_name, bottleneck_tensor, wants_quantization, is_training=False)\n        tf.train.Saver().restore(eval_sess, CHECKPOINT_NAME)\n        (evaluation_step, prediction) = add_evaluation_step(final_tensor, ground_truth_input)\n    return (eval_sess, resized_input_tensor, bottleneck_input, ground_truth_input, evaluation_step, prediction)", "docstring": "Builds an restored eval session without train operations for exporting.\n\nArgs:\nmodule_spec: The hub.ModuleSpec for the image module being used.\nclass_count: Number of classes\n\nReturns:\nEval session containing the restored eval graph.\nThe bottleneck input, ground truth, eval step, and prediction tensors.", "source": "codesearchnet"}
{"code": "def add_cookie(self, cookie_dict):\n    if (not isinstance(cookie_dict, dict)):\n        raise TypeError('Type of the cookie must be a dict.')\n    if ((not cookie_dict.get('name', None)) or (not cookie_dict.get('value', None))):\n        raise KeyError(\"Missing required keys, 'name' and 'value' must be provided.\")\n    self._execute(Command.ADD_COOKIE, {'cookie': cookie_dict})", "docstring": "Set a cookie.\n\nSupport:\nWeb(WebView)\n\nArgs:\ncookie_dict: A dictionary contain keys: \"name\", \"value\",\n[\"path\"], [\"domain\"], [\"secure\"], [\"httpOnly\"], [\"expiry\"].\n\nReturns:\nWebElement Object.", "source": "codesearchnet"}
{"code": "def count(self, value):\n        \n        if value == self._defaults['count'] and 'count' in self._values:\n            del self._values['count']\n        else:\n            self._values['count'] = value", "docstring": "The count property.\n\nArgs:\nvalue (int). the property value.", "source": "juraj-google-style"}
{"code": "def bytes_to_long(bytesdata: bytes) -> int:\n    assert (len(bytesdata) == 8)\n    return sum(((b << (k * 8)) for (k, b) in enumerate(bytesdata)))", "docstring": "Converts an 8-byte sequence to a long integer.\n\nArgs:\nbytesdata: 8 consecutive bytes, as a ``bytes`` object, in\nlittle-endian format (least significant byte [LSB] first)\n\nReturns:\ninteger", "source": "codesearchnet"}
{"code": "def PushEventSource(self, event_source):\n    if (event_source.file_entry_type == dfvfs_definitions.FILE_ENTRY_TYPE_DIRECTORY):\n        weight = 1\n    else:\n        weight = 100\n    heap_values = (weight, time.time(), event_source)\n    heapq.heappush(self._heap, heap_values)", "docstring": "Pushes an event source onto the heap.\n\nArgs:\nevent_source (EventSource): event source.", "source": "codesearchnet"}
{"code": "def backend():\n    return 'tensorflow'", "docstring": "Publicly accessible method for determining the current backend.\n\nOnly exists for API compatibility with multi-backend Keras.\n\nReturns:\nThe string \"tensorflow\".", "source": "github-repos"}
{"code": "def postprocess_docs(self, docs, input_strings, prefix, n_docs, return_tensors=None):\n\n    def cat_input_and_doc(doc_title, doc_text, input_string, prefix):\n        if doc_title.startswith('\"'):\n            doc_title = doc_title[1:]\n        if doc_title.endswith('\"'):\n            doc_title = doc_title[:-1]\n        if prefix is None:\n            prefix = ''\n        out = (prefix + doc_title + self.config.title_sep + doc_text + self.config.doc_sep + input_string).replace('  ', ' ')\n        return out\n    rag_input_strings = [cat_input_and_doc(docs[i]['title'][j], docs[i]['text'][j], input_strings[i], prefix) for i in range(len(docs)) for j in range(n_docs)]\n    contextualized_inputs = self.generator_tokenizer.batch_encode_plus(rag_input_strings, max_length=self.config.max_combined_length, return_tensors=return_tensors, padding='max_length', truncation=True)\n    return (contextualized_inputs['input_ids'], contextualized_inputs['attention_mask'])", "docstring": "Postprocessing retrieved `docs` and combining them with `input_strings`.\n\nArgs:\ndocs  (`dict`):\nRetrieved documents.\ninput_strings (`str`):\nInput strings decoded by `preprocess_query`.\nprefix (`str`):\nPrefix added at the beginning of each input, typically used with T5-based models.\n\nReturn:\n`tuple(tensors)`: a tuple consisting of two elements: contextualized `input_ids` and a compatible\n`attention_mask`.", "source": "github-repos"}
{"code": "def list(self, path):\n    self.__validate_storage_path(path)\n    entity = self.api_client.get_entity_by_query(path=path)\n    if (entity['entity_type'] not in self.__BROWSABLE_TYPES):\n        raise StorageArgumentException('The entity type \"{0}\" cannot belisted'.format(entity['entity_type']))\n    entity_uuid = entity['uuid']\n    file_names = []\n    more_pages = True\n    page_number = 1\n    while more_pages:\n        response = self.api_client.list_folder_content(entity_uuid, page=page_number, ordering='name')\n        more_pages = (response['next'] is not None)\n        page_number += 1\n        for child in response['results']:\n            pattern = ('/{name}' if (child['entity_type'] == 'folder') else '{name}')\n            file_names.append(pattern.format(name=child['name']))\n    return file_names", "docstring": "List the entities found directly under the given path.\n\nArgs:\npath (str): The path of the entity to be listed. Must start with a '/'.\n\nReturns:\nThe list of entity names directly under the given path:\n\nu'/12345/folder_1'\n\nRaises:\nStorageArgumentException: Invalid arguments\nStorageForbiddenException: Server response code 403\nStorageNotFoundException: Server response code 404\nStorageException: other 400-600 error codes", "source": "codesearchnet"}
{"code": "def set_scf_initial_guess(self, guess='SAD'):\n    availabel_guesses = {'core', 'sad', 'gwh', 'read', 'fragmo'}\n    if (guess.lower() not in availabel_guesses):\n        raise ValueError((('The guess method ' + guess) + ' is not supported yet'))\n    self.params['rem']['scf_guess'] = guess.lower()", "docstring": "Set initial guess method to be used for SCF\n\nArgs:\nguess: The initial guess method. (str)", "source": "codesearchnet"}
{"code": "def sed(regexpr, repl, force=False, recursive=False, dpath_list=None, fpath_list=None, verbose=None, include_patterns=None, exclude_patterns=[]):\n    if (include_patterns is None):\n        include_patterns = ['*.py', '*.pyx', '*.pxi', '*.cxx', '*.cpp', '*.hxx', '*.hpp', '*.c', '*.h', '*.html', '*.tex']\n    if (dpath_list is None):\n        dpath_list = [os.getcwd()]\n    if (verbose is None):\n        verbose = ut.NOT_QUIET\n    if (fpath_list is None):\n        greater_exclude_dirs = get_standard_exclude_dnames()\n        exclude_dirs = []\n        fpath_generator = matching_fpaths(dpath_list, include_patterns, exclude_dirs, greater_exclude_dirs=greater_exclude_dirs, recursive=recursive, exclude_patterns=exclude_patterns)\n    else:\n        fpath_generator = fpath_list\n    if verbose:\n        print(('sed-ing %r' % (dpath_list,)))\n        print((' * regular expression : %r' % (regexpr,)))\n        print((' * replacement        : %r' % (repl,)))\n        print((' * include_patterns   : %r' % (include_patterns,)))\n        print((' * recursive: %r' % (recursive,)))\n        print((' * force: %r' % (force,)))\n        from utool import util_str\n        print((' * fpath_list: %s' % (util_str.repr3(fpath_list),)))\n    regexpr = extend_regex(regexpr)\n    num_changed = 0\n    num_files_checked = 0\n    fpaths_changed = []\n    for fpath in fpath_generator:\n        num_files_checked += 1\n        changed_lines = sedfile(fpath, regexpr, repl, force, verbose=verbose)\n        if (changed_lines is not None):\n            fpaths_changed.append(fpath)\n            num_changed += len(changed_lines)\n    import utool as ut\n    print(('num_files_checked = %r' % (num_files_checked,)))\n    print(('fpaths_changed = %s' % (ut.repr3(sorted(fpaths_changed)),)))\n    print(('total lines changed = %r' % (num_changed,)))", "docstring": "Python implementation of sed. NOT FINISHED\n\nsearches and replaces text in files\n\nArgs:\nregexpr (str): regx patterns to find\nrepl (str): text to replace\nforce (bool):\nrecursive (bool):\ndpath_list (list): directories to search (defaults to cwd)", "source": "codesearchnet"}
{"code": "def plot_main(pid, return_fig_ax=False):\n    global WORKING_DIRECTORY, SNR_CUT\n    if isinstance(pid, PlotInput):\n        pid = pid.return_dict()\n    WORKING_DIRECTORY = '.'\n    if ('WORKING_DIRECTORY' not in pid['general'].keys()):\n        pid['general']['WORKING_DIRECTORY'] = '.'\n    SNR_CUT = 5.0\n    if ('SNR_CUT' not in pid['general'].keys()):\n        pid['general']['SNR_CUT'] = SNR_CUT\n    if ('switch_backend' in pid['general'].keys()):\n        plt.switch_backend(pid['general']['switch_backend'])\n    running_process = MakePlotProcess(**{**pid, **pid['general'], **pid['plot_info'], **pid['figure']})\n    running_process.input_data()\n    running_process.setup_figure()\n    running_process.create_plots()\n    if ('save_figure' in pid['figure'].keys()):\n        if (pid['figure']['save_figure'] is True):\n            running_process.fig.savefig(((pid['general']['WORKING_DIRECTORY'] + '/') + pid['figure']['output_path']), **pid['figure']['savefig_kwargs'])\n    if ('show_figure' in pid['figure'].keys()):\n        if (pid['figure']['show_figure'] is True):\n            plt.show()\n    if (return_fig_ax is True):\n        return (running_process.fig, running_process.ax)\n    return", "docstring": "Main function for creating these plots.\n\nReads in plot info dict from json file or dictionary in script.\n\nArgs:\nreturn_fig_ax (bool, optional): Return figure and axes objects.\n\nReturns:\n2-element tuple containing\n- **fig** (*obj*): Figure object for customization outside of those in this program.\n- **ax** (*obj*): Axes object for customization outside of those in this program.", "source": "codesearchnet"}
{"code": "def add_node(self, node_id, name, labels):\n        \n        node = self.graph_db.get_or_create_indexed_node('Node', 'node_id', node_id, {'node_id': node_id, 'name': name})\n        try:\n            node.add_labels(*labels)\n        except NotImplementedError:\n            pass", "docstring": "Add the node with name and labels.\n\nArgs:\nnode_id: Id for the node.\nname: Name for the node.\nlabels: Label for the node.\n\nRaises:\nNotImplementedError: When adding labels is not supported.", "source": "juraj-google-style"}
{"code": "def filter_by_pattern(self, pattern):\n        \n        _filt_values, _filt_datetimes = self._filter_by_pattern(pattern)\n        collection = HourlyDiscontinuousCollection(\n            self.header.duplicate(), _filt_values, _filt_datetimes)\n        collection._validated_a_period = True\n        return collection", "docstring": "Filter the Data Collection based on a list of booleans.\n\nArgs:\npattern: A list of True/False values.  Typically, this is a list\nwith a length matching the length of the Data Collections values\nbut it can also be a pattern to be repeated over the Data Collection.\n\nReturn:\nA new Data Collection with filtered data", "source": "juraj-google-style"}
{"code": "def ensure_exe(exe_name: str, *paths: str):  \n    \n    if not elib_run.find_executable(exe_name, *paths):\n        LOGGER.error('could not find \"%s.exe\" on this system', exe_name)\n        sys.exit(-1)", "docstring": "Makes sure that an executable can be found on the system path.\nWill exit the program if the executable cannot be found\n\nArgs:\nexe_name: name of the executable\npaths: optional path(s) to be searched; if not specified, search the whole system", "source": "juraj-google-style"}
{"code": "def intify(x):\n    \n    if isinstance(x, int):\n        return x\n\n    try:\n        return int(x, 0)\n    except (TypeError, ValueError):\n        return None", "docstring": "Ensure ( or coerce ) a value into being an integer or None.\n\nArgs:\nx (obj):    An object to intify\n\nReturns:\n(int):  The int value ( or None )", "source": "juraj-google-style"}
{"code": "def _split_op(self, identifier, hs_label=None, dagger=False, args=None):\n    if self._isinstance(identifier, 'SymbolicLabelBase'):\n        identifier = QnetAsciiDefaultPrinter()._print_SCALAR_TYPES(identifier.expr)\n    (name, total_subscript) = self._split_identifier(identifier)\n    total_superscript = ''\n    if (hs_label not in [None, '']):\n        if (self._settings['show_hs_label'] == 'subscript'):\n            if (len(total_subscript) == 0):\n                total_subscript = (('(' + hs_label) + ')')\n            else:\n                total_subscript += ((',(' + hs_label) + ')')\n        else:\n            total_superscript += (('(' + hs_label) + ')')\n    if dagger:\n        total_superscript += self._dagger_sym\n    args_str = ''\n    if ((args is not None) and (len(args) > 0)):\n        args_str = ((self._parenth_left + ','.join([self.doprint(arg) for arg in args])) + self._parenth_right)\n    return (name, total_subscript, total_superscript, args_str)", "docstring": "Return `name`, total `subscript`, total `superscript` and\n`arguments` str. All of the returned strings are fully rendered.\n\nArgs:\nidentifier (str or SymbolicLabelBase): A (non-rendered/ascii)\nidentifier that may include a subscript. The output `name` will\nbe the `identifier` without any subscript\nhs_label (str): The rendered label for the Hilbert space of the\noperator, or None. Returned unchanged.\ndagger (bool): Flag to indicate whether the operator is daggered.\nIf True, :attr:`dagger_sym` will be included in the\n`superscript` (or  `subscript`, depending on the settings)\nargs (list or None): List of arguments (expressions). Each element\nwill be rendered with :meth:`doprint`. The total list of args\nwill then be joined with commas, enclosed\nwith :attr:`_parenth_left` and :attr:`parenth_right`, and\nreturnd as the `arguments` string", "source": "codesearchnet"}
{"code": "def create_attribute_model(self, initial_value=None):\n        \n        \n        attr = self.attribute_class(meta=self, value=initial_value)\n        return attr", "docstring": "Make an AttributeModel instance of the correct type for this Meta\n\nArgs:\ninitial_value: The initial value the Attribute should take\n\nReturns:\nAttributeModel: The created attribute model instance", "source": "juraj-google-style"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    result = cls + token_ids_0 + sep\n    if token_ids_1 is not None:\n        result += token_ids_1 + sep\n    return result", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A CANINE sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def _md5sum(file_path):\n    md5 = hashlib.md5()\n    with open(file_path, 'rb') as md5_file:\n        while True:\n            data = md5_file.read(((1024 * 1024) * 4))\n            if (not data):\n                break\n            md5.update(data)\n    return md5.digest()", "docstring": "Helper function that builds and md5sum from a file in chunks.\n\nArgs:\nfile_path: The path to the file you want an md5sum for.\n\nReturns:\nA string containing an md5sum.", "source": "codesearchnet"}
{"code": "def CalculateWaitForRetry(retry_attempt, max_wait=60):\n    \n\n    wait_time = 2 ** retry_attempt\n    max_jitter = wait_time / 4.0\n    wait_time += random.uniform(-max_jitter, max_jitter)\n    return max(1, min(wait_time, max_wait))", "docstring": "Calculates amount of time to wait before a retry attempt.\n\nWait time grows exponentially with the number of attempts. A\nrandom amount of jitter is added to spread out retry attempts from\ndifferent clients.\n\nArgs:\nretry_attempt: Retry attempt counter.\nmax_wait: Upper bound for wait time [seconds].\n\nReturns:\nNumber of seconds to wait before retrying request.", "source": "juraj-google-style"}
{"code": "def matrix_rank(a, tol=None, validate_args=False, name=None):\n    with ops.name_scope(name or 'matrix_rank'):\n        a = ops.convert_to_tensor(a, dtype_hint=dtypes.float32, name='a')\n        assertions = _maybe_validate_matrix(a, validate_args)\n        if assertions:\n            with ops.control_dependencies(assertions):\n                a = array_ops.identity(a)\n        s = svd(a, compute_uv=False)\n        if tol is None:\n            if a.shape[-2:].is_fully_defined():\n                m = np.max(a.shape[-2:].as_list())\n            else:\n                m = math_ops.reduce_max(array_ops.shape(a)[-2:])\n            eps = np.finfo(a.dtype.as_numpy_dtype).eps\n            tol = eps * math_ops.cast(m, a.dtype) * math_ops.reduce_max(s, axis=-1, keepdims=True)\n        return math_ops.reduce_sum(math_ops.cast(s > tol, dtypes.int32), axis=-1)", "docstring": "Compute the matrix rank of one or more matrices.\n\nArgs:\na: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be\npseudo-inverted.\ntol: Threshold below which the singular value is counted as 'zero'.\nDefault value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`).\nvalidate_args: When `True`, additional assertions might be embedded in the\ngraph.\nDefault value: `False` (i.e., no graph assertions are added).\nname: Python `str` prefixed to ops created by this function.\nDefault value: 'matrix_rank'.\n\nReturns:\nmatrix_rank: (Batch of) `int32` scalars representing the number of non-zero\nsingular values.", "source": "github-repos"}
{"code": "def open_image(fn):\n    flags = ((cv2.IMREAD_UNCHANGED + cv2.IMREAD_ANYDEPTH) + cv2.IMREAD_ANYCOLOR)\n    if ((not os.path.exists(fn)) and (not str(fn).startswith('http'))):\n        raise OSError('No such file or directory: {}'.format(fn))\n    elif (os.path.isdir(fn) and (not str(fn).startswith('http'))):\n        raise OSError('Is a directory: {}'.format(fn))\n    elif isdicom(fn):\n        slice = pydicom.read_file(fn)\n        if slice.PhotometricInterpretation.startswith('MONOCHROME'):\n            im = np.stack(([slice.pixel_array] * 3), (- 1))\n            return (im / ((1 << slice.BitsStored) - 1))\n        else:\n            raise OSError('Unsupported DICOM image with PhotometricInterpretation=={}'.format(slice.PhotometricInterpretation))\n    else:\n        try:\n            if str(fn).startswith('http'):\n                req = urllib.urlopen(str(fn))\n                image = np.asarray(bytearray(req.read()), dtype='uint8')\n                im = (cv2.imdecode(image, flags).astype(np.float32) / 255)\n            else:\n                im = (cv2.imread(str(fn), flags).astype(np.float32) / 255)\n            if (im is None):\n                raise OSError(f'File not recognized by opencv: {fn}')\n            return cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n        except Exception as e:\n            raise OSError('Error handling image at: {}'.format(fn)) from e", "docstring": "Opens an image using OpenCV given the file path.\n\nArguments:\nfn: the file path of the image\n\nReturns:\nThe image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0", "source": "codesearchnet"}
{"code": "def get_token(wallet: 'Wallet', token_str: str) -> 'NEP5Token.NEP5Token':\n    if token_str.startswith('0x'):\n        token_str = token_str[2:]\n    token = None\n    for t in wallet.GetTokens().values():\n        if (token_str in [t.symbol, t.ScriptHash.ToString()]):\n            token = t\n            break\n    if (not isinstance(token, NEP5Token.NEP5Token)):\n        raise ValueError('The given token argument does not represent a known NEP5 token')\n    return token", "docstring": "Try to get a NEP-5 token based on the symbol or script_hash\n\nArgs:\nwallet: wallet instance\ntoken_str: symbol or script_hash (accepts script hash with or without 0x prefix)\nRaises:\nValueError: if token is not found\n\nReturns:\nNEP5Token instance if found.", "source": "codesearchnet"}
{"code": "def __getitem__(self, slice_: Tuple[Union[slice, np.ndarray, int], Union[slice, np.ndarray, int]]) -> loompy.LoomView:\n\t\t\n\t\tif type(slice_) is not tuple or len(slice_) is not 2:\n\t\t\traise ValueError(\"Views require slices along two dimensions\")\n\n\t\trows = slice_[0]\n\t\tcols = slice_[1]\n\n\t\tra = self.ds.ra[rows]\n\t\trow_graphs = self.ds.row_graphs[rows]\n\t\tca = self.ds.ca[cols]\n\t\tcol_graphs = self.ds.col_graphs[cols]\n\t\tlayers = self.ds.layer[rows, cols]\n\n\t\treturn loompy.LoomView(layers, ra, ca, row_graphs, col_graphs, filename=self.ds.filename, file_attrs=self.ds.attrs)", "docstring": "Create a new view by slicing through the loom file or view\n\nArgs:\nslice_ (2-tuple of slice, int or np.ndarray): \tHow to slice the file or view\n\nReturns:\nA LoomView object, an in-memory representation of the sliced file", "source": "juraj-google-style"}
{"code": "def loads(conditions_string):\n  \n  decoder = ConditionDecoder(_audience_condition_deserializer)\n\n  \n  \n  json_decoder = json.JSONDecoder(object_hook=decoder.object_hook)\n\n  \n  condition_structure = json_decoder.decode(conditions_string)\n  condition_list = decoder.condition_list\n\n  return (condition_structure, condition_list)", "docstring": "Deserializes the conditions property into its corresponding\ncomponents: the condition_structure and the condition_list.\n\nArgs:\nconditions_string: String defining valid and/or conditions.\n\nReturns:\nA tuple of (condition_structure, condition_list).\ncondition_structure: nested list of operators and placeholders for operands.\ncondition_list: list of conditions whose index correspond to the values of the placeholders.", "source": "juraj-google-style"}
{"code": "def resolve_context(self, verbosity=0, max_fails=(- 1), timestamp=None, callback=None, buf=None, package_load_callback=None):\n    package_filter = PackageFilterList.from_pod(self.package_filter)\n    context = ResolvedContext(self.request, package_paths=self.packages_path, package_filter=package_filter, verbosity=verbosity, max_fails=max_fails, timestamp=timestamp, buf=buf, callback=callback, package_load_callback=package_load_callback, caching=self.caching)\n    if context.success:\n        if (self._context and self._context.load_path):\n            context.set_load_path(self._context.load_path)\n        self._set_context(context)\n        self._modified = True\n    return context", "docstring": "Update the current context by performing a re-resolve.\n\nThe newly resolved context is only applied if it is a successful solve.\n\nReturns:\n`ResolvedContext` object, which may be a successful or failed solve.", "source": "codesearchnet"}
{"code": "def update(self, span: typing.Tuple[int, int], line_type: LineType) -> None:\n        \n        first_block_line, last_block_line = span\n        for i in range(first_block_line, last_block_line + 1):\n            try:\n                self.__setitem__(i, line_type)\n            except ValueError as error:\n                raise ValidationError(i + self.fn_offset, 1, 'AAA99 {}'.format(error))", "docstring": "Updates line types for a block's span.\n\nArgs:\nspan: First and last relative line number of a Block.\nline_type: The type of line to update to.\n\nRaises:\nValidationError: A special error on collision. This prevents Flake8\nfrom crashing because it is converted to a Flake8 error tuple,\nbut it indicates to the user that something went wrong with\nprocessing the function.", "source": "juraj-google-style"}
{"code": "def save_config(config, logdir=None):\n    if logdir:\n        with config.unlocked:\n            config.logdir = logdir\n        message = 'Start a new run and write summaries and checkpoints to {}.'\n        tf.logging.info(message.format(config.logdir))\n        tf.gfile.MakeDirs(config.logdir)\n        config_path = os.path.join(config.logdir, 'config.yaml')\n        with tf.gfile.FastGFile(config_path, 'w') as file_:\n            yaml.dump(config, file_, default_flow_style=False)\n    else:\n        message = 'Start a new run without storing summaries and checkpoints since no logging directory was specified.'\n        tf.logging.info(message)\n    return config", "docstring": "Save a new configuration by name.\n\nIf a logging directory is specified, is will be created and the configuration\nwill be stored there. Otherwise, a log message will be printed.\n\nArgs:\nconfig: Configuration object.\nlogdir: Location for writing summaries and checkpoints if specified.\n\nReturns:\nConfiguration object.", "source": "codesearchnet"}
{"code": "def create_binary(self, key, value):\n    data = None\n    if ((key is not None) and (value is not None)):\n        try:\n            data = self.db.create(key.strip(), json.dumps(base64.b64encode(bytes(value)).decode('utf-8')))\n        except TypeError:\n            data = self.db.create(key.strip(), json.dumps(base64.b64encode(bytes(value, 'utf-8')).decode('utf-8')))\n    else:\n        self.tcex.log.warning(u'The key or value field was None.')\n    return data", "docstring": "Create method of CRUD operation for binary data.\n\nArgs:\nkey (string): The variable to write to the DB.\nvalue (any): The data to write to the DB.\n\nReturns:\n(string): Result of DB write.", "source": "codesearchnet"}
{"code": "def get_class_locals(cls_name: str, allow_methods: bool, ordering, ctx):\n    out = collections.OrderedDict()\n    if cls_name not in ctx.vm.local_ops:\n        return out\n    for op in ctx.vm.local_ops[cls_name]:\n        local = ctx.vm.annotated_locals[cls_name][op.name]\n        if not is_relevant_class_local(local, op.name, allow_methods):\n            continue\n        if ordering is Ordering.FIRST_ANNOTATE:\n            if not op.is_annotate() or op.name in out:\n                continue\n        else:\n            assert ordering is Ordering.LAST_ASSIGN\n            if not op.is_assign():\n                continue\n            elif op.name in out:\n                out.move_to_end(op.name)\n        out[op.name] = local\n    return out", "docstring": "Gets a dictionary of the class's local variables.\n\nArgs:\ncls_name: The name of an abstract.InterpreterClass.\nallow_methods: A bool, whether to allow methods as variables.\nordering: A classgen.Ordering describing the order in which the variables\nshould appear.\nctx: The abstract context.\n\nReturns:\nA collections.OrderedDict of the locals.", "source": "github-repos"}
{"code": "def _ImageDimensions(images, dynamic_shape=False):\n  \n  \n  \n  \n  if dynamic_shape:\n    return array_ops.unpack(array_ops.shape(images))\n  else:\n    return images.get_shape().as_list()", "docstring": "Returns the dimensions of an image tensor.\nArgs:\nimages: 4-D Tensor of shape [batch, height, width, channels]\ndynamic_shape: Whether the input image has undertermined shape. If set to\n`True`, shape information will be retrieved at run time. Default to\n`False`.\n\nReturns:\nlist of integers [batch, height, width, channels]", "source": "juraj-google-style"}
{"code": "def __init__(self, definitions: fhir_package.FhirPackageManager, handler: primitive_handler.PrimitiveHandler, error_reporter: fhir_errors.ErrorReporter, options: Optional[SqlGenerationOptions]=None) -> None:\n    self._options = options or SqlGenerationOptions()\n    self._context = context.MockFhirPathContext(definitions.iter_structure_definitions())\n    self._primitive_handler = handler\n    self._bq_interpreter = _bigquery_interpreter.BigQuerySqlInterpreter(value_set_codes_table=self._options.value_set_codes_table, value_set_codes_definitions=self._options.value_set_codes_definitions or definitions)\n    self._error_reporter = error_reporter\n    self._options.skip_keys.update(_SKIP_KEYS)\n    self._ctx: List[expressions.Builder] = []\n    self._in_progress: Set[_PathStep] = set()\n    self._type_code_to_regex_map: Dict[str, _RegexInfo] = {}\n    self._regex_columns_generated = set()\n    self._requirement_column_names: Set[str] = set()\n    self._visited_element_definitions: Set[Tuple[str, str]] = set()\n    self._visited_slices: Set[Tuple[str, str]] = set()", "docstring": "Creates a new instance of `FhirProfileStandardSqlEncoder`.\n\nArgs:\ndefinitions: The FHIR resource \"graph\" for traversal and encoding of\nconstraints.\nhandler: Computes primitives with respect to the specification.\nerror_reporter: A `fhir_errors.ErrorReporter` delegate for error-handling.\noptions: Defines a list of optional settings that can be used to customize\nthe behaviour of FhirProfileStandardSqlEncoder.", "source": "github-repos"}
{"code": "def brake_on(self):\n        \n        data = []\n        data.append(0x0A)\n        data.append(self.servoid)\n        data.append(RAM_WRITE_REQ)\n        data.append(TORQUE_CONTROL_RAM)\n        data.append(0x01)\n        data.append(0x40)\n        send_data(data)", "docstring": "Set the Brakes of Herkulex\n\nIn braked mode, position control and velocity control\nwill not work, enable torque before that\n\nArgs:\nnone", "source": "juraj-google-style"}
{"code": "def _clone_sequential_model(model, input_tensors=None, layer_fn=_clone_layer):\n    if not isinstance(model, Sequential):\n        raise ValueError('Expected `model` argument to be a `Sequential` model instance, but got:', model)\n    if not callable(layer_fn):\n        raise ValueError('Expected `layer_fn` argument to be a callable.')\n    layers = []\n    layer_map = {}\n    for layer in model._flatten_layers(include_self=False, recursive=False):\n        if isinstance(layer, InputLayer) and input_tensors is not None:\n            continue\n        cloned_layer = _clone_layer(layer) if isinstance(layer, InputLayer) else layer_fn(layer)\n        layers.append(cloned_layer)\n        layer_map[layer] = cloned_layer\n    layers, ancillary_layers = _remove_ancillary_layers(model, layer_map, layers)\n    if input_tensors is None:\n        cloned_model = Sequential(layers=layers, name=model.name)\n    elif len(generic_utils.to_list(input_tensors)) != 1:\n        raise ValueError('To clone a `Sequential` model, we expect  at most one tensor as part of `input_tensors`.')\n    else:\n        if isinstance(input_tensors, tuple):\n            input_tensors = list(input_tensors)\n        x = generic_utils.to_list(input_tensors)[0]\n        if backend.is_keras_tensor(x):\n            origin_layer = x._keras_history.layer\n            if isinstance(origin_layer, InputLayer):\n                cloned_model = Sequential(layers=[origin_layer] + layers, name=model.name)\n            else:\n                raise ValueError('Cannot clone a `Sequential` model on top of a tensor that comes from a Keras layer other than an `InputLayer`. Use the functional API instead.')\n        else:\n            input_tensor = Input(tensor=x, name='input_wrapper_for_' + str(x.name))\n            input_layer = input_tensor._keras_history.layer\n            cloned_model = Sequential(layers=[input_layer] + layers, name=model.name)\n    if not ancillary_layers:\n        return cloned_model\n    tensor_map = {}\n    for depth, cloned_nodes in cloned_model._nodes_by_depth.items():\n        nodes = model._nodes_by_depth[depth]\n        for cloned_node, node in zip(cloned_nodes, nodes):\n            if isinstance(cloned_node.output_tensors, list):\n                for j, output_tensor in enumerate(cloned_node.output_tensors):\n                    tensor_map[node.output_tensors[j]] = output_tensor\n            else:\n                tensor_map[node.output_tensors] = cloned_node.output_tensors\n    new_nodes = _make_new_nodes({depth: nodes for depth, nodes in model._nodes_by_depth.items() if depth < 0}, layer_fn, layer_map, tensor_map)\n    _insert_ancillary_layers(cloned_model, ancillary_layers, model.metrics_names, new_nodes)\n    return cloned_model", "docstring": "Clone a `Sequential` model instance.\n\nModel cloning is similar to calling a model on new inputs,\nexcept that it creates new layers (and thus new weights) instead\nof sharing the weights of the existing layers.\n\nArgs:\nmodel: Instance of `Sequential`.\ninput_tensors: optional list of input tensors\nto build the model upon. If not provided,\nplaceholders will be created.\nlayer_fn: callable to be applied on non-input layers in the model. By\ndefault it clones the layer. Another example is to preserve the layer\nto share the weights. This is required when we create a per-replica\ncopy of the model with distribution strategy; we want the weights to\nbe shared but still feed inputs separately so we create new input\nlayers.\n\nReturns:\nAn instance of `Sequential` reproducing the behavior\nof the original model, on top of new inputs tensors,\nusing newly instantiated weights.\n\nRaises:\nValueError: in case of invalid `model` argument value or `layer_fn`\nargument value.", "source": "github-repos"}
{"code": "def __init__(self, unkeyed: ModelHandler[ExampleT, PredictionT, ModelT]):\n    if len(unkeyed.get_preprocess_fns()) or len(unkeyed.get_postprocess_fns()):\n        raise Exception('Cannot make make an unkeyed model handler with pre or postprocessing functions defined into a keyed model handler. All pre/postprocessing functions must be defined on the outer modelhandler.')\n    self._unkeyed = unkeyed\n    self._env_vars = getattr(unkeyed, '_env_vars', {})", "docstring": "A ModelHandler that takes examples that might have keys and returns\npredictions that might have keys.\n\nFor example, if the original model is used with RunInference to take a\nPCollection[E] to a PCollection[P], this ModelHandler would take either\nPCollection[E] to a PCollection[P] or PCollection[tuple[K, E]] to a\nPCollection[tuple[K, P]], depending on the whether the elements are\ntuples. This pattern makes it possible to associate the outputs with the\ninputs based on the key.\n\nNote that you cannot use this ModelHandler if E is a tuple type.\nIn addition, either all examples should be keyed, or none of them.\n\nArgs:\nunkeyed: An implementation of ModelHandler that does not require keys.", "source": "github-repos"}
{"code": "def split_string(str_src, spliters=None, elim_empty=False):\n        \n        \n        if is_string(spliters):\n            spliters = [spliters]\n        if spliters is None or not spliters:\n            spliters = [' ', '\\t']\n        dest_strs = list()\n        src_strs = [str_src]\n        while True:\n            old_dest_strs = src_strs[:]\n            for s in spliters:\n                for src_s in src_strs:\n                    temp_strs = src_s.split(s)\n                    for temp_s in temp_strs:\n                        temp_s = temp_s.strip()\n                        if temp_s == '' and elim_empty:\n                            continue\n                        if is_string(temp_s):\n                            temp_s = str(temp_s)\n                        dest_strs.append(temp_s)\n                src_strs = dest_strs[:]\n                dest_strs = list()\n            if old_dest_strs == src_strs:\n                dest_strs = src_strs[:]\n                break\n        return dest_strs", "docstring": "Split string by split character space(' ') and indent('\\t') as default\n\nExamples:\n>>> StringClass.split_string('exec -ini test.ini', ' ')\n['exec', '-ini', 'test.ini']\n\nArgs:\nstr_src: source string\nspliters: e.g. [' ', '\\t'], [], ' ', None\nelim_empty: Eliminate empty (i.e., '') or not.\n\nReturns:\nsplit sub-strings as list", "source": "juraj-google-style"}
{"code": "def to_dict(self, remove_nones=False):\n        \n        content = {}\n        \n        \n        for key in self._translation:\n            if hasattr(self, key):\n                content[key] = getattr(self, key)\n        \n        \n        content['parent_id'] = self.parent_id\n        content['item_id'] = self.item_id\n        content['restricted'] = self.restricted\n        content['title'] = self.title\n        if self.resources != []:\n            content['resources'] = [resource.to_dict(remove_nones=remove_nones)\n                                    for resource in self.resources]\n        content['desc'] = self.desc\n        return content", "docstring": "Return the dict representation of the instance.\n\nArgs:\nremove_nones (bool, optional): Optionally remove dictionary\nelements when their value is `None`.\n\nReturns:\ndict: a dict representation of the `DidlObject`.", "source": "juraj-google-style"}
{"code": "def __init__(self, optimizer, scope='global-optimizer', summary_labels=()):\n        \n        super(GlobalOptimizer, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new global optimizer instance.\n\nArgs:\noptimizer: The optimizer which is modified by this meta optimizer.", "source": "juraj-google-style"}
{"code": "def on_core_metadata_event(self, event):\n    raise NotImplementedError('on_core_metadata_event() is not implemented in the base servicer class')", "docstring": "Callback for core metadata.\n\nArgs:\nevent: The Event proto that carries a JSON string in its\n`log_message.message` field.\n\nReturns:\n`None` or an `EventReply` proto to be sent back to the client. If `None`,\nan `EventReply` proto construct with the default no-arg constructor will\nbe sent back to the client.", "source": "github-repos"}
{"code": "def check(self, uid=None, usage_limits_count=None, cryptographic_usage_mask=None, lease_time=None):\n    if (uid is not None):\n        if (not isinstance(uid, six.string_types)):\n            raise TypeError('The unique identifier must be a string.')\n    if (usage_limits_count is not None):\n        if (not isinstance(usage_limits_count, six.integer_types)):\n            raise TypeError('The usage limits count must be an integer.')\n    if (cryptographic_usage_mask is not None):\n        if ((not isinstance(cryptographic_usage_mask, list)) or (not all((isinstance(x, enums.CryptographicUsageMask) for x in cryptographic_usage_mask)))):\n            raise TypeError('The cryptographic usage mask must be a list of CryptographicUsageMask enumerations.')\n    if (lease_time is not None):\n        if (not isinstance(lease_time, six.integer_types)):\n            raise TypeError('The lease time must be an integer.')\n    result = self.proxy.check(uid, usage_limits_count, cryptographic_usage_mask, lease_time)\n    status = result.get('result_status')\n    if (status == enums.ResultStatus.SUCCESS):\n        return result.get('unique_identifier')\n    else:\n        raise exceptions.KmipOperationFailure(status, result.get('result_reason'), result.get('result_message'))", "docstring": "Check the constraints for a managed object.\n\nArgs:\nuid (string): The unique ID of the managed object to check.\nOptional, defaults to None.\nusage_limits_count (int): The number of items that can be secured\nwith the specified managed object. Optional, defaults to None.\ncryptographic_usage_mask (list): A list of CryptographicUsageMask\nenumerations specifying the operations possible with the\nspecified managed object. Optional, defaults to None.\nlease_time (int): The number of seconds that can be leased for the\nspecified managed object. Optional, defaults to None.", "source": "codesearchnet"}
{"code": "def group(self, group_type=None, owner=None, **kwargs):\n        \n\n        group = None\n        if not group_type:\n            return Group(self.tcex, None, None, owner=owner, **kwargs)\n\n        name = kwargs.pop('name', None)\n        group_type = group_type.upper()\n        if group_type == 'ADVERSARY':\n            group = Adversary(self.tcex, name, owner=owner, **kwargs)\n        if group_type == 'CAMPAIGN':\n            group = Campaign(self.tcex, name, owner=owner, **kwargs)\n        if group_type == 'DOCUMENT':\n            group = Document(self.tcex, name, kwargs.pop('file_name', None), owner=owner, **kwargs)\n        if group_type == 'EVENT':\n            group = Event(self.tcex, name, owner=owner, **kwargs)\n        if group_type == 'EMAIL':\n            group = Email(\n                self.tcex,\n                name,\n                kwargs.pop('to', None),\n                kwargs.pop('from_addr', None),\n                kwargs.pop('subject', None),\n                kwargs.pop('body', None),\n                kwargs.pop('header', None),\n                owner=owner,\n                **kwargs\n            )\n        if group_type == 'INCIDENT':\n            group = Incident(self.tcex, name, owner=owner, **kwargs)\n        if group_type == 'INTRUSION SET':\n            group = IntrusionSet(self.tcex, name, owner=owner, **kwargs)\n        if group_type == 'REPORT':\n            group = Report(self.tcex, name, owner=owner, **kwargs)\n        if group_type == 'SIGNATURE':\n            group = Signature(\n                self.tcex,\n                name,\n                kwargs.pop('file_name', None),\n                kwargs.pop('file_type', None),\n                kwargs.pop('file_text', None),\n                owner=owner,\n                **kwargs\n            )\n        if group_type == 'THREAT':\n            group = Threat(self.tcex, name, owner=owner, **kwargs)\n        if group_type == 'TASK':\n            group = Task(\n                self.tcex,\n                name,\n                kwargs.pop('status', 'Not Started'),\n                kwargs.pop('due_date', None),\n                kwargs.pop('reminder_date', None),\n                kwargs.pop('escalation_date', None),\n                owner=owner,\n                **kwargs\n            )\n        return group", "docstring": "Create the Group TI object.\n\nArgs:\nowner:\ngroup_type:\n**kwargs:\n\nReturn:", "source": "juraj-google-style"}
{"code": "async def send_event(self, con, name, payload):\n    message = dict(type='event', name=name, payload=payload)\n    encoded = pack(message)\n    (await con.send(encoded))", "docstring": "Send an event to a client connection.\n\nThis method will push an event message to the client with the given\nname and payload.  You need to have access to the the ``connection``\nobject for the client, which is only available once the client has\nconnected and passed to self.prepare_conn(connection).\n\nArgs:\ncon (websockets.Connection): The connection to use to send\nthe event.\nname (str): The name of the event to send.\npayload (object): The msgpack-serializable object so send\nas the event's payload.", "source": "codesearchnet"}
{"code": "def create_blocking_connection(host):\n    \n    return pika.BlockingConnection(\n        amqpdaemon.getConParams(\n            settings.get_amqp_settings()[host.lower()][\"vhost\"]\n        )\n    )", "docstring": "Return properly created blocking connection.\n\nArgs:\nhost (str): Host as it is defined in :func:`.get_amqp_settings`.\n\nUses :func:`edeposit.amqp.amqpdaemon.getConParams`.", "source": "juraj-google-style"}
{"code": "def get_schema_path(schema, resolved=False):\n\n    def _strip_first_path_elem(path):\n        \"Pass doctests.\\n\\n        Strip the first element of the given path, returning an empty string if\\n        there are no more elements. For example, 'something/other' will end up\\n        as 'other', but  passing then 'other' will return ''\\n        \"\n        stripped_path = path.split(os.path.sep, 1)[1:]\n        return ''.join(stripped_path)\n\n    def _schema_to_normalized_path(schema):\n        \"Pass doctests.\\n\\n        Extracts the path from the url, makes sure to get rid of any '..' in\\n        the path and adds the json extension if not there.\\n        \"\n        path = os.path.normpath((os.path.sep + urlsplit(schema).path))\n        if path.startswith(os.path.sep):\n            path = path[1:]\n        if (not path.endswith('.json')):\n            path += '.json'\n        return path\n    path = _schema_to_normalized_path(schema)\n    while path:\n        if resolved:\n            schema_path = os.path.abspath(os.path.join(_resolved_schema_root_path, path))\n        else:\n            schema_path = os.path.abspath(os.path.join(_schema_root_path, path))\n        if os.path.exists(schema_path):\n            return os.path.abspath(schema_path)\n        path = _strip_first_path_elem(path)\n    raise SchemaNotFound(schema=schema)", "docstring": "Retrieve the installed path for the given schema.\n\nArgs:\nschema(str): relative or absolute url of the schema to validate, for\nexample, 'records/authors.json' or 'jobs.json', or just the name of the\nschema, like 'jobs'.\nresolved(bool): if True, the returned path points to a fully resolved\nschema, that is to the schema with all `$ref` replaced by their\ntargets.\n\nReturns:\nstr: path to the given schema name.\n\nRaises:\nSchemaNotFound: if no schema could be found.", "source": "codesearchnet"}
{"code": "def _add_new_ide_controller_helper(ide_controller_label, controller_key, bus_number):\n    if (controller_key is None):\n        controller_key = randint((- 200), 250)\n    ide_spec = vim.vm.device.VirtualDeviceSpec()\n    ide_spec.device = vim.vm.device.VirtualIDEController()\n    ide_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n    ide_spec.device.key = controller_key\n    ide_spec.device.busNumber = bus_number\n    ide_spec.device.deviceInfo = vim.Description()\n    ide_spec.device.deviceInfo.label = ide_controller_label\n    ide_spec.device.deviceInfo.summary = ide_controller_label\n    return ide_spec", "docstring": "Helper function for adding new IDE controllers\n\n.. versionadded:: 2016.3.0\n\nArgs:\nide_controller_label: label of the IDE controller\ncontroller_key: if not None, the controller key to use; otherwise it is randomly generated\nbus_number: bus number\n\nReturns: created device spec for an IDE controller", "source": "codesearchnet"}
{"code": "def noisy_operation(self, operation: 'cirq.Operation') -> 'cirq.OP_TREE':\n        \n        if not hasattr(self.noisy_moments, '_not_overridden'):\n            return self.noisy_moments([ops.Moment([operation])],\n                                      operation.qubits)\n\n        if not hasattr(self.noisy_moment, '_not_overridden'):\n            return self.noisy_moment(ops.Moment([operation]), operation.qubits)\n\n        assert False, 'Should be unreachable.'", "docstring": "Adds noise to an individual operation.\n\nArgs:\noperation: The operation to make noisy.\n\nReturns:\nAn OP_TREE corresponding to the noisy operations implementing the\nnoisy version of the given operation.", "source": "juraj-google-style"}
{"code": "def decode_jpeg(image_buffer, scope=None):\n  \n  with tf.name_scope(values=[image_buffer], name=scope,\n                     default_name='decode_jpeg'):\n    \n    \n    \n    \n    image = tf.image.decode_jpeg(image_buffer, channels=3)\n\n    \n    \n    \n    image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n    return image", "docstring": "Decode a JPEG string into one 3-D float image Tensor.\n\nArgs:\nimage_buffer: scalar string Tensor.\nscope: Optional scope for name_scope.\nReturns:\n3-D float Tensor with values ranging from [0, 1).", "source": "juraj-google-style"}
{"code": "def __init__(self, model, ncats, alpha_lambda=1.0, beta_lambda=2.0,\n        freeparams=['alpha_lambda', 'beta_lambda']):\n        \n\n        \n        \n        \n        new_max_beta = DiscreteGamma(self.PARAMLIMITS[\"alpha_lambda\"][1],\n                self.PARAMLIMITS[\"beta_lambda\"][0], ncats)[-1]\n        new_limits = model.PARAMLIMITS\n        new_limits[\"beta\"] = (new_limits[\"beta\"][0], new_max_beta)\n        model.PARAMLIMITS = new_limits\n\n        super(GammaDistributedBetaModel, self).__init__(model, \"beta\",\n                ncats, alpha_lambda=1.0, beta_lambda=2.0,\n                freeparams=['alpha_lambda', 'beta_lambda'])\n\n        assert all([scipy.allclose(new_max_beta, m.PARAMLIMITS[\"beta\"][1])\n                for m in self._models]), (\"{0}\\n{1}\".format(\n                new_max_beta, '\\n'.join([m.PARAMLIMITS[\"beta\"][1]\n                for m in self._models])))", "docstring": "Initialize an `GammaDistributedModel` object.\n\nThe `lambda_param` is set to \"beta\".\n\nArgs:\n`model` `ncats`,`alpha_lambda`, `beta_lambda`, `freeparams`\nMeaning described in main class doc string for\n`GammaDistributedModel`.", "source": "juraj-google-style"}
{"code": "def run(self, dag):\n    num_dag_qubits = sum([qreg.size for qreg in dag.qregs.values()])\n    if (num_dag_qubits > self.coupling_map.size()):\n        raise TranspilerError('Number of qubits greater than device.')\n    best_sub = self._best_subset(num_dag_qubits)\n    layout = Layout()\n    map_iter = 0\n    for qreg in dag.qregs.values():\n        for i in range(qreg.size):\n            layout[(qreg, i)] = int(best_sub[map_iter])\n            map_iter += 1\n    self.property_set['layout'] = layout", "docstring": "Pick a convenient layout depending on the best matching\nqubit connectivity, and set the property `layout`.\n\nArgs:\ndag (DAGCircuit): DAG to find layout for.\n\nRaises:\nTranspilerError: if dag wider than self.coupling_map", "source": "codesearchnet"}
{"code": "def add_observer(self, callback):\n        \n        if callback in self._observers:\n            raise ValueError('{} is already an observer of {}'\n                             .format(callback, self))\n        self._observers.append(callback)", "docstring": "Add an observer to this event.\n\nArgs:\ncallback: A function or coroutine callback to call when the event\nis fired.\n\nRaises:\nValueError: If the callback has already been added.", "source": "juraj-google-style"}
{"code": "def serialize(self):\n    return gen_boosted_trees_ops.boosted_trees_serialize_ensemble(self.resource_handle)", "docstring": "Serializes the ensemble into proto and returns the serialized proto.\n\nReturns:\nstamp_token: int64 scalar Tensor to denote the stamp of the resource.\nserialized_proto: string scalar Tensor of the serialized proto.", "source": "github-repos"}
{"code": "def _expand_to_beam_size(tensor, beam_size):\n  \n  tensor = tf.expand_dims(tensor, axis=1)\n  tile_dims = [1] * tensor.shape.ndims\n  tile_dims[1] = beam_size\n\n  return tf.tile(tensor, tile_dims)", "docstring": "Tiles a given tensor by beam_size.\n\nArgs:\ntensor: tensor to tile [batch_size, ...]\nbeam_size: How much to tile the tensor by.\n\nReturns:\nTiled tensor [batch_size, beam_size, ...]", "source": "juraj-google-style"}
{"code": "def _text_checker(job, interval, _interval_set=False, quiet=False, output=sys.stdout):\n    \n    status = job.status()\n    msg = status.value\n    prev_msg = msg\n    msg_len = len(msg)\n\n    if not quiet:\n        print('\\r%s: %s' % ('Job Status', msg), end='', file=output)\n    while status.name not in ['DONE', 'CANCELLED', 'ERROR']:\n        time.sleep(interval)\n        status = job.status()\n        msg = status.value\n\n        if status.name == 'QUEUED':\n            msg += ' (%s)' % job.queue_position()\n            if not _interval_set:\n                interval = max(job.queue_position(), 2)\n        else:\n            if not _interval_set:\n                interval = 2\n\n        \n        if len(msg) < msg_len:\n            msg += ' ' * (msg_len - len(msg))\n        elif len(msg) > msg_len:\n            msg_len = len(msg)\n\n        if msg != prev_msg and not quiet:\n            print('\\r%s: %s' % ('Job Status', msg), end='', file=output)\n            prev_msg = msg\n    if not quiet:\n        print('', file=output)", "docstring": "A text-based job status checker\n\nArgs:\njob (BaseJob): The job to check.\ninterval (int): The interval at which to check.\n_interval_set (bool): Was interval time set by user?\nquiet (bool): If True, do not print status messages.\noutput (file): The file like object to write status messages to.\nBy default this is sys.stdout.", "source": "juraj-google-style"}
{"code": "def format_error_message(exception_message, task_exception=False):\n    lines = exception_message.split('\\n')\n    if task_exception:\n        lines = (lines[0:1] + lines[3:])\n        pass\n    return '\\n'.join(lines)", "docstring": "Improve the formatting of an exception thrown by a remote function.\n\nThis method takes a traceback from an exception and makes it nicer by\nremoving a few uninformative lines and adding some space to indent the\nremaining lines nicely.\n\nArgs:\nexception_message (str): A message generated by traceback.format_exc().\n\nReturns:\nA string of the formatted exception message.", "source": "codesearchnet"}
{"code": "def double_linked_dom(str_or_dom):\n    \n    dom = str_or_dom\n    if not isinstance(str_or_dom, dhtmlparser.HTMLElement):\n        dom = dhtmlparser.parseString(str_or_dom)\n\n    dhtmlparser.makeDoubleLinked(dom)\n\n    return dom", "docstring": "Create double linked DOM from input.\n\nIn case of string, parse it, make it double-linked. In case of DOM, just\nmake it double-linked.\n\nArgs:\nstr_or_dom (str/HTMLelement): String or HTMLelement instance.\n\nReturns:\nobj: HTMLelement with parsed, double-linked content from `str_or_dom`.", "source": "juraj-google-style"}
{"code": "def get_logger(name):\n    logger = logging.getLogger(name)\n    logger.addHandler(logging.NullHandler())\n    return logger", "docstring": "Gets a logger\n\nArguments:\nname - the name you wish to log as\n\nReturns:\nA logger!", "source": "codesearchnet"}
{"code": "def get_restore_path(self, status=None):\n        \n        status = self.get_status() if status is None else status\n        return config.get_restore_path(status.name.lower())", "docstring": "get_restore_path: get path to restoration file\nArgs:\nstatus (str): step to get restore file (optional)\nReturns: string path to restoration file", "source": "juraj-google-style"}
{"code": "def get_gated_grpc_tensors(self, matching_debug_op=None):\n    with self._grpc_gated_lock:\n        matching_debug_op = (matching_debug_op or 'DebugIdentity')\n        if (matching_debug_op not in self._grpc_gated_tensors):\n            node_name_to_op_type = dict(((node.name, node.op) for node in self._graph_def.node))\n            gated = []\n            for node in self._graph_def.node:\n                if (node.op == matching_debug_op):\n                    for attr_key in node.attr:\n                        if ((attr_key == 'gated_grpc') and node.attr[attr_key].b):\n                            (node_name, output_slot, _, debug_op) = debug_graphs.parse_debug_node_name(node.name)\n                            gated.append((node_name, node_name_to_op_type[node_name], output_slot, debug_op))\n                            break\n            self._grpc_gated_tensors[matching_debug_op] = gated\n        return self._grpc_gated_tensors[matching_debug_op]", "docstring": "Extract all nodes with gated-gRPC debug ops attached.\n\nUses cached values if available.\nThis method is thread-safe.\n\nArgs:\ngraph_def: A tf.GraphDef proto.\nmatching_debug_op: Return tensors and nodes with only matching the\nspecified debug op name (optional). If `None`, will extract only\n`DebugIdentity` debug ops.\n\nReturns:\nA list of (node_name, op_type, output_slot, debug_op) tuples.", "source": "codesearchnet"}
{"code": "def _create_output_from_match(self, match_result):\n    if isinstance(match_result, dict):\n        return LinterOutput(self.name, **match_result)\n    return LinterOutput(self.name, *match_result)", "docstring": "Create Result instance from pattern match results.\n\nArgs:\nmatch: Pattern match.", "source": "codesearchnet"}
{"code": "def write_var_int(self, value, little_endian=True):\n        \n        if not isinstance(value, int):\n            raise SDKException(ErrorCode.param_err('%s not int type.' % value))\n\n        if value < 0:\n            raise SDKException(ErrorCode.param_err('%d too small.' % value))\n\n        elif value < 0xfd:\n            return self.write_byte(value)\n\n        elif value <= 0xffff:\n            self.write_byte(0xfd)\n            return self.write_uint16(value, little_endian)\n\n        elif value <= 0xFFFFFFFF:\n            self.write_byte(0xfe)\n            return self.write_uint32(value, little_endian)\n\n        else:\n            self.write_byte(0xff)\n            return self.write_uint64(value, little_endian)", "docstring": "Write an integer value in a space saving way to the stream.\n\nArgs:\nvalue (int):\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nRaises:\nSDKException: if `value` is not of type int.\nSDKException: if `value` is < 0.\n\nReturns:\nint: the number of bytes written.", "source": "juraj-google-style"}
{"code": "def _GetDictFromStringsTable(self, parser_mediator, table):\n    if (not table):\n        return {}\n    record_values = {}\n    for record in table.records:\n        if parser_mediator.abort:\n            break\n        if (record.get_number_of_values() != 2):\n            continue\n        identification = self._GetRecordValue(record, 0)\n        filename = self._GetRecordValue(record, 1)\n        if (not identification):\n            continue\n        record_values[identification] = filename\n    return record_values", "docstring": "Build a dictionary of the value in the strings table.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ntable (pyesedb.table): strings table.\n\nReturns:\ndict[str,object]: values per column name.", "source": "codesearchnet"}
{"code": "def __init__(self, schema, max_files_per_bundle=_DEFAULT_MAX_WRITERS_PER_BUNDLE, max_file_size=_DEFAULT_MAX_FILE_SIZE, file_format=None):\n    self.schema = schema\n    self.max_files_per_bundle = max_files_per_bundle\n    self.max_file_size = max_file_size\n    self.file_format = file_format or bigquery_tools.FileFormat.JSON", "docstring": "Initialize a :class:`WriteRecordsToFile`.\n\nArgs:\nmax_files_per_bundle (int): The maximum number of files that can be kept\nopen during execution of this step in a worker. This is to avoid over-\nwhelming the worker memory.\nmax_file_size (int): The maximum size in bytes for a file to be used in\nan export job.", "source": "github-repos"}
{"code": "def set_agent(self, agent):\n        \n        self.agent = agent\n        self.queue = asyncio.Queue(loop=self.agent.loop)\n        self.presence = agent.presence\n        self.web = agent.web", "docstring": "Links behaviour with its owner agent\n\nArgs:\nagent (spade.agent.Agent): the agent who owns the behaviour", "source": "juraj-google-style"}
{"code": "def __init__(self, initial_learning_rate, decay_steps, initial_variance=1.0, variance_decay=0.55, num_periods=0.5, alpha=0.0, beta=0.001, name=None):\n    super(NoisyLinearCosineDecay, self).__init__()\n    self.initial_learning_rate = initial_learning_rate\n    self.decay_steps = decay_steps\n    self.initial_variance = initial_variance\n    self.variance_decay = variance_decay\n    self.num_periods = num_periods\n    self.alpha = alpha\n    self.beta = beta\n    self.name = name", "docstring": "Applies noisy linear cosine decay to the learning rate.\n\nArgs:\ninitial_learning_rate: A scalar `float32` or `float64` Tensor or a Python\nnumber. The initial learning rate.\ndecay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.\nNumber of steps to decay over.\ninitial_variance: initial variance for the noise. See computation above.\nvariance_decay: decay for the noise's variance. See computation above.\nnum_periods: Number of periods in the cosine part of the decay.\nSee computation above.\nalpha: See computation above.\nbeta: See computation above.\nname: String.  Optional name of the operation.  Defaults to\n'NoisyLinearCosineDecay'.", "source": "github-repos"}
{"code": "def _generate_unique_name(self, symbol_name: str) -> str:\n    normalized_symbol_name = self._NON_SYMBOL_NAME_CHARS_REGEX.sub('_', symbol_name)\n    normalized_name_conflict_count = self._num_symbols_with_normalized_name.get(normalized_symbol_name, 0)\n    self._num_symbols_with_normalized_name[normalized_symbol_name] = normalized_name_conflict_count + 1\n    optional_disambiguation_suffix = '' if normalized_name_conflict_count == 0 else f'_{normalized_name_conflict_count}'\n    return f'{normalized_symbol_name}{optional_disambiguation_suffix}'", "docstring": "Translates a symbol name to a unique FileCheck capture name.\n\nReplaces all characters other than letters, numbers, and underscores with\nunderscores. If the resulting name has already been used, appends a counter\nto disambiguate it. For example, this could result in the following sequence\nof replacements:\n1.) \"foo.bar.baz\" -> \"foo_bar_baz\"\n2.) \"foo.bar_baz\" -> \"foo_bar_baz_1\"\n3.) \"foo_bar.baz\" -> \"foo_bar_baz_2\"\n4.) \"foo_bar_baz\" -> \"foo_bar_baz_3\"\n\nArgs:\nsymbol_name: The original symbol name.\n\nReturns:\nThe generated FileCheck capture name.", "source": "github-repos"}
{"code": "def load_and_print_resfile(filename, info_dict=None):\n    if (info_dict is None):\n        info_dict = dict()\n        info_dict['mass'] = 1.23\n        info_dict['nom_cap'] = 3600\n        info_dict['tot_mass'] = 2.33\n    d = CellpyData()\n    print('filename:', filename)\n    print('info_dict in:', end=' ')\n    print(info_dict)\n    d.from_raw(filename)\n    d.set_mass(info_dict['mass'])\n    d.make_step_table()\n    d.make_summary()\n    for test in d.datasets:\n        print('newtest')\n        print(test)\n    return info_dict", "docstring": "Load a raw data file and print information.\n\nArgs:\nfilename (str): name of the resfile.\ninfo_dict (dict):\n\nReturns:\ninfo (str): string describing something.", "source": "codesearchnet"}
{"code": "def compress(element):\n    element_spec = structure.type_spec_from_value(element)\n    tensor_list = structure.to_tensor_list(element_spec, element)\n    return ged_ops.compress_element(tensor_list)", "docstring": "Compress a dataset element.\n\nArgs:\nelement: A nested structure of types supported by Tensorflow.\n\nReturns:\nA variant tensor representing the compressed element. This variant can be\npassed to `uncompress` to get back the original element.", "source": "github-repos"}
{"code": "def _RemoveAllFlagAppearances(self, name):\n    flag_dict = self.FlagDict()\n    if (name not in flag_dict):\n        raise exceptions.UnrecognizedFlagError(name)\n    flag = flag_dict[name]\n    names_to_remove = {name}\n    names_to_remove.add(flag.name)\n    if flag.short_name:\n        names_to_remove.add(flag.short_name)\n    for n in names_to_remove:\n        self.__delattr__(n)", "docstring": "Removes flag with name for all appearances.\n\nA flag can be registered with its long name and an optional short name.\nThis method removes both of them. This is different than __delattr__.\n\nArgs:\nname: Either flag's long name or short name.\n\nRaises:\nUnrecognizedFlagError: When flag name is not found.", "source": "codesearchnet"}
{"code": "def test_enrichment(pcoll, enrichment_handler: str, handler_config: Dict[str, Any], timeout: Optional[float]=30):\n    if enrichment_handler == 'BigTable':\n        row_key = handler_config['row_key']\n        bt_data = INPUT_TABLES['BigTable', handler_config['instance_id'], handler_config['table_id']]\n        products = {str(data[row_key]): data for data in bt_data}\n\n        def _fn(row):\n            left = row._asdict()\n            right = products[str(left[row_key])]\n            left['product'] = left.get('product', None) or right\n            return beam.Row(**left)\n    elif enrichment_handler == 'BigQuery':\n        row_key = handler_config['fields']\n        dataset, table = handler_config['table_name'].split('.')[-2:]\n        bq_data = INPUT_TABLES['BigQuery', str(dataset), str(table)]\n        bq_data = {tuple((str(data[key]) for key in row_key)): data for data in bq_data}\n\n        def _fn(row):\n            left = row._asdict()\n            right = bq_data[tuple((str(left[k]) for k in row_key))]\n            row = {key: left.get(key, None) or right[key] for key in {*left.keys(), *right.keys()}}\n            return beam.Row(**row)\n    else:\n        raise ValueError(f'{enrichment_handler} is not a valid enrichment_handler.')\n    return pcoll | beam.Map(_fn)", "docstring": "Mocks the Enrichment transform for testing purposes.\n\nThis PTransform simulates the behavior of the Enrichment transform by\nlooking up data from predefined in-memory tables based on the provided\n`enrichment_handler` and `handler_config`.\n\nNote: The Github action that invokes these tests does not have gcp\ndependencies installed which is a prerequisite to\napache_beam.transforms.enrichment.Enrichment as a top-level import.\n\nArgs:\npcoll: The input PCollection.\nenrichment_handler: A string indicating the type of enrichment handler\nto simulate (e.g., 'BigTable', 'BigQuery').\nhandler_config: A dictionary containing configuration details for the\nsimulated handler (e.g., table names, row keys, fields).\ntimeout: An optional timeout value (ignored in this mock).\n\nReturns:\nA PCollection containing the enriched data.", "source": "github-repos"}
{"code": "def capture_image(self, device_label):\n        \n        response = None\n        try:\n            response = requests.post(\n                urls.imagecapture(self._giid, device_label),\n                headers={\n                    'Content-Type': 'application/json',\n                    'Cookie': 'vid={}'.format(self._vid)})\n        except requests.exceptions.RequestException as ex:\n            raise RequestError(ex)\n        _validate_response(response)", "docstring": "Capture smartcam image\n\nArgs:\ndevice_label (str): device label of camera", "source": "juraj-google-style"}
{"code": "def stream_sync(self, report, timeout=120.0):\n    done = AwaitableResponse()\n    self.stream(report, callback=done.set_result)\n    return done.wait(timeout)", "docstring": "Send a report and wait for it to finish.\n\nThis awaitable coroutine wraps VirtualIOTileDevice.stream() and turns\nthe callback into an awaitable object.  The appropriate usage of this\nmethod is by calling it inside the event loop as:\n\nawait device.stream_sync(data)\n\nArgs:\nreport (IOTileReport): The report that should be streamed.\ntimeout (float): The maximum number of seconds to wait before\ntiming out.\n\nReturns:\nawaitable: An awaitable object with the result.\n\nThe result will be True if the data was sent successfully\nor False if the data could not be sent in its entirety.\n\nWhen False is returned, there is no guarantee about how much of\nthe data was sent, if any, just that it was not known to be\nsuccessfully sent.", "source": "codesearchnet"}
{"code": "def nr_cases(self, institute_id=None):\n        \n        query = {}\n\n        if institute_id:\n            query['collaborators'] = institute_id\n\n        LOG.debug(\"Fetch all cases with query {0}\".format(query))\n        nr_cases = self.case_collection.find(query).count()\n\n        return nr_cases", "docstring": "Return the number of cases\n\nThis function will change when we migrate to 3.7.1\n\nArgs:\ncollaborator(str): Institute id\n\nReturns:\nnr_cases(int)", "source": "juraj-google-style"}
{"code": "def _Inputs(op: ops.Operation, xs_set):\n    if _IsFunction(op.graph):\n        inputs = []\n        for t in op.inputs:\n            if t not in xs_set:\n                t = _MaybeCaptured(t)\n            inputs.append(t)\n        return inputs\n    else:\n        return op.inputs", "docstring": "Returns the inputs of op, crossing closure boundaries where necessary.\n\nArgs:\nop: Operation\nxs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t.\n\nReturns:\nA list of tensors. The tensors may be from multiple Graph/FuncGraphs if op\nis in a FuncGraph and has captured inputs.", "source": "github-repos"}
{"code": "def _index_to_ansi_values(self, index):\n        \n        if self.__class__.__name__[0] == 'F':   \n            if index < 8:\n                index += ANSI_FG_LO_BASE\n            else:\n                index += (ANSI_FG_HI_BASE - 8)  \n        else:                                   \n            if index < 8:\n                index += ANSI_BG_LO_BASE\n            else:\n                index += (ANSI_BG_HI_BASE - 8)  \n        return [str(index)]", "docstring": "Converts an palette index to the corresponding ANSI color.\n\nArguments:\nindex   - an int (from 0-15)\nReturns:\nindex as str in a list for compatibility with values.", "source": "juraj-google-style"}
{"code": "def notify_batches_finished(self, statuses):\n        \n        with self._wait_condition:\n            self._statuses = statuses\n            self._wait_condition.notify()", "docstring": "Called by the BatchTracker the _BatchWaiter is observing. Should not\nbe called by handlers.\n\nArgs:\nstatuses (dict of int): A dict with keys of batch ids, and values\nof status enums", "source": "juraj-google-style"}
{"code": "def add_sample(a_float, dist):\n    (dist_type, _) = _detect_bucket_option(dist)\n    if (dist_type == u'exponentialBuckets'):\n        _update_general_statistics(a_float, dist)\n        _update_exponential_bucket_count(a_float, dist)\n    elif (dist_type == u'linearBuckets'):\n        _update_general_statistics(a_float, dist)\n        _update_linear_bucket_count(a_float, dist)\n    elif (dist_type == u'explicitBuckets'):\n        _update_general_statistics(a_float, dist)\n        _update_explicit_bucket_count(a_float, dist)\n    else:\n        _logger.error(u'Could not determine bucket option type for %s', dist)\n        raise ValueError(u'Unknown bucket option type')", "docstring": "Adds `a_float` to `dist`, updating its existing buckets.\n\nArgs:\na_float (float): a new value\ndist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):\nthe Distribution being updated\n\nRaises:\nValueError: if `dist` does not have known bucket options defined\nValueError: if there are not enough bucket count fields in `dist`", "source": "codesearchnet"}
{"code": "def metadata(self):\n    if (self._info is None):\n        try:\n            self._info = self._api.buckets_get(self._name)\n        except Exception as e:\n            raise e\n    return (BucketMetadata(self._info) if self._info else None)", "docstring": "Retrieves metadata about the bucket.\n\nReturns:\nA BucketMetadata instance with information about this bucket.\nRaises:\nException if there was an error requesting the bucket's metadata.", "source": "codesearchnet"}
{"code": "def load_extra_data(cls, data):\n    \n    try:\n      cls._extra_config.update(json.loads(data))\n    except ValueError as exception:\n      sys.stderr.write('Could convert to JSON. {0:s}'.format(exception))\n      exit(-1)", "docstring": "Loads extra JSON configuration parameters from a data buffer.\n\nThe data buffer must represent a JSON object.\n\nArgs:\ndata: str, the buffer to load the JSON data from.", "source": "juraj-google-style"}
{"code": "def potcar_eatom_list_from_outcar(filename='OUTCAR'):\n    with open(filename) as f:\n        outcar = f.read()\n    eatom_re = re.compile('energy of atom\\\\s+\\\\d+\\\\s+EATOM=\\\\s*([-\\\\d\\\\.]+)')\n    eatom = [float(e) for e in eatom_re.findall(outcar)]\n    return eatom", "docstring": "Returns a list of EATOM values for the pseudopotentials used.\n\nArgs:\nfilename (Str, optional): OUTCAR filename. Defaults to 'OUTCAR'.\n\nReturns:\n(List(Float)): A list of EATOM values, in the order they appear in the OUTCAR.", "source": "codesearchnet"}
{"code": "def build_graph(device, n, m, k, transpose_a, transpose_b, dtype):\n    with ops.device('%s' % device):\n        if not transpose_a:\n            x = variable_v1.VariableV1(random_ops.random_uniform([n, m], dtype=dtype), use_resource=False)\n        else:\n            x = variable_v1.VariableV1(random_ops.random_uniform([m, n], dtype=dtype), use_resource=False)\n        if not transpose_b:\n            y = variable_v1.VariableV1(random_ops.random_uniform([m, k], dtype=dtype), use_resource=False)\n        else:\n            y = variable_v1.VariableV1(random_ops.random_uniform([k, m], dtype=dtype), use_resource=False)\n        z = math_ops.matmul(x, y, transpose_a=transpose_a, transpose_b=transpose_b)\n        return control_flow_ops.group(z)", "docstring": "Build a graph containing a sequence of matmul operations.\n\nArgs:\ndevice: String, the device to run on.\nn: tensor A's first dimension size.\nm: tensor A's second dimension size.\nk: tensor B's second dimension size.\ntranspose_a: boolean value to show if tensor A is transposed.\ntranspose_b: boolean value to show if tensor B is transposed.\ndtype: numpy data type of the input tensor.\n\nReturns:\nA matmul operation to run()", "source": "github-repos"}
{"code": "def __init__(self, val, unit, unit_type=None):\n        \n        if unit_type is not None and str(unit) not in ALL_UNITS[unit_type]:\n            raise UnitError(\n                \"{} is not a supported unit for {}\".format(unit, unit_type))\n        self._unit = Unit(unit)\n        self._unit_type = unit_type", "docstring": "Initializes a float with unit.\n\nArgs:\nval (float): Value\nunit (Unit): A unit. E.g., \"C\".\nunit_type (str): A type of unit. E.g., \"charge\"", "source": "juraj-google-style"}
{"code": "def cancel(self):\n    try:\n        del self._protocol._consumers[self.queue]\n    except (KeyError, AttributeError):\n        pass\n    try:\n        del self._protocol.factory._consumers[self.queue]\n    except (KeyError, AttributeError):\n        pass\n    self._running = False\n    (yield self._read_loop)\n    try:\n        (yield self._channel.basic_cancel(consumer_tag=self._tag))\n    except pika.exceptions.AMQPChannelError:\n        pass\n    try:\n        (yield self._channel.close())\n    except pika.exceptions.AMQPChannelError:\n        pass\n    if (not self.result.called):\n        self.result.callback(self)", "docstring": "Cancel the consumer and clean up resources associated with it.\nConsumers that are canceled are allowed to finish processing any\nmessages before halting.\n\nReturns:\ndefer.Deferred: A deferred that fires when the consumer has finished\nprocessing any message it was in the middle of and has been successfully\ncanceled.", "source": "codesearchnet"}
{"code": "def _new_open_bin(self, width=None, height=None, rid=None):\n    factories_to_delete = set()\n    new_bin = None\n    for (key, binfac) in self._empty_bins.items():\n        if (not binfac.fits_inside(width, height)):\n            continue\n        new_bin = binfac.new_bin()\n        if (new_bin is None):\n            continue\n        self._open_bins.append(new_bin)\n        if binfac.is_empty():\n            factories_to_delete.add(key)\n        break\n    for f in factories_to_delete:\n        del self._empty_bins[f]\n    return new_bin", "docstring": "Extract the next empty bin and append it to open bins\n\nReturns:\nPackingAlgorithm: Initialized empty packing bin.\nNone: No bin big enough for the rectangle was found", "source": "codesearchnet"}
{"code": "def __init__(self, cls):\n        \n        super(EnumType, self).__init__()\n        self._cls = cls", "docstring": "Create a new EnumType. This new EnumType requires a class object in the\nconstructor. The class is used to construct new instances of the Enum\nwhen the integer value is retrieved from the database.\n\nArgs:\ncls(class): An Enum class used to create new instances from integer\nvalues.", "source": "juraj-google-style"}
{"code": "def with_params(self, params):\n    copy = params.copy()\n    copy.update(self._params)\n    return self.__copy_and_set('params', copy)", "docstring": "Adds parameters to the request params\n\nArgs:\nparams (dict): The parameters to add to the request params\n\nReturns:\nThe request builder instance in order to chain calls", "source": "codesearchnet"}
{"code": "def convert_dict_to_params(src_dict):\n    return '&'.join(['{}={}'.format(key, value) for (key, value) in src_dict.items()])", "docstring": "convert dict to params string\n\nArgs:\nsrc_dict (dict): source mapping data structure\n\nReturns:\nstr: string params data\n\nExamples:\n>>> src_dict = {\n\"a\": 1,\n\"b\": 2\n}\n>>> convert_dict_to_params(src_dict)\n>>> \"a=1&b=2\"", "source": "codesearchnet"}
{"code": "def __split_off_extra_attributes(self, mapping: CommentedMap, known_attrs: List[str]) -> CommentedMap:\n    attr_names = list(mapping.keys())\n    main_attrs = mapping.copy()\n    extra_attrs = OrderedDict(mapping.items())\n    for name in attr_names:\n        if ((name not in known_attrs) or (name == 'yatiml_extra')):\n            del main_attrs[name]\n        else:\n            del extra_attrs[name]\n    main_attrs['yatiml_extra'] = extra_attrs\n    return main_attrs", "docstring": "Separates the extra attributes in mapping into yatiml_extra.\n\nThis returns a mapping containing all key-value pairs from \\\nmapping whose key is in known_attrs, and an additional key \\\nyatiml_extra which maps to a dict containing the remaining \\\nkey-value pairs.\n\nArgs:\nmapping: The mapping to split\nknown_attrs: Attributes that should be kept in the main \\\nmap, and not moved to yatiml_extra.\n\nReturns:\nA map with attributes reorganised as described above.", "source": "codesearchnet"}
{"code": "def fetch(self, card_id, data={}, **kwargs):\n    return super(Card, self).fetch(card_id, data, **kwargs)", "docstring": "Fetch Card for given Id\n\nArgs:\ncard_id : Id for which card object has to be retrieved\n\nReturns:\nCard dict for given card Id", "source": "codesearchnet"}
{"code": "def _ClassifyInclude(fileinfo, include, is_system):\n    is_cpp_h = (include in _CPP_HEADERS)\n    if (is_system and (os.path.splitext(include)[1] in ['.hpp', '.hxx', '.h++'])):\n        is_system = False\n    if is_system:\n        if is_cpp_h:\n            return _CPP_SYS_HEADER\n        else:\n            return _C_SYS_HEADER\n    (target_dir, target_base) = os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName()))\n    (include_dir, include_base) = os.path.split(_DropCommonSuffixes(include))\n    target_dir_pub = os.path.normpath((target_dir + '/../public'))\n    target_dir_pub = target_dir_pub.replace('\\\\', '/')\n    if ((target_base == include_base) and ((include_dir == target_dir) or (include_dir == target_dir_pub))):\n        return _LIKELY_MY_HEADER\n    target_first_component = _RE_FIRST_COMPONENT.match(target_base)\n    include_first_component = _RE_FIRST_COMPONENT.match(include_base)\n    if (target_first_component and include_first_component and (target_first_component.group(0) == include_first_component.group(0))):\n        return _POSSIBLE_MY_HEADER\n    return _OTHER_HEADER", "docstring": "Figures out what kind of header 'include' is.\n\nArgs:\nfileinfo: The current file cpplint is running over. A FileInfo instance.\ninclude: The path to a #included file.\nis_system: True if the #include used <> rather than \"\".\n\nReturns:\nOne of the _XXX_HEADER constants.\n\nFor example:\n>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)\n_C_SYS_HEADER\n>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)\n_CPP_SYS_HEADER\n>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)\n_LIKELY_MY_HEADER\n>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),\n...                  'bar/foo_other_ext.h', False)\n_POSSIBLE_MY_HEADER\n>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)\n_OTHER_HEADER", "source": "codesearchnet"}
{"code": "def generate_output_entities(self, json_data=None, hr=True, show_name=False, colorize=True):\n    output = ''\n    short = (HR_RDAP['entities']['_short'] if hr else 'entities')\n    name = (HR_RDAP['entities']['_name'] if (hr and show_name) else None)\n    output += generate_output(line='0', short=short, name=name, is_parent=(False if ((json_data is None) or (json_data['entities'] is None)) else True), value=('None' if ((json_data is None) or (json_data['entities'] is None)) else None), colorize=colorize)\n    if (json_data is not None):\n        for ent in json_data['entities']:\n            output += generate_output(line='1', value=ent, colorize=colorize)\n    return output", "docstring": "The function for generating CLI output RDAP entity results.\n\nArgs:\njson_data (:obj:`dict`): The data to process. Defaults to None.\nhr (:obj:`bool`): Enable human readable key translations. Defaults\nto True.\nshow_name (:obj:`bool`): Show human readable name (default is to\nonly show short). Defaults to False.\ncolorize (:obj:`bool`): Colorize the console output with ANSI\ncolors. Defaults to True.\n\nReturns:\nstr: The generated output.", "source": "codesearchnet"}
{"code": "def response_data_to_model_instance(self, response_data):\n        \n        \n        response_data[\"datetime_created\"] = dateutil.parser.parse(\n            response_data[\"datetime_created\"]\n        )\n\n        if response_data[\"datetime_finished\"]:\n            response_data[\"datetime_finished\"] = dateutil.parser.parse(\n                response_data[\"datetime_finished\"]\n            )\n\n        \n        return super(\n            BaseTaskInstanceManager, self\n        ).response_data_to_model_instance(response_data)", "docstring": "Convert response data to a task instance model.\n\nArgs:\nresponse_data (dict): The data from the request's response.\n\nReturns:\n:class:`saltant.models.base_task_instance.BaseTaskInstance`:\nA task instance model instance representing the task\ninstance from the reponse data.", "source": "juraj-google-style"}
{"code": "def send_recv(self, message, timeout=10.0):\n        \n        response_queue = self.send(message)\n        response = self.recv(response_queue, timeout)\n        return response", "docstring": "Send a message to a PandABox and wait for the response\n\nArgs:\nmessage (str): The message to send\ntimeout (float): How long to wait before raising queue.Empty\n\nReturns:\nstr: The response", "source": "juraj-google-style"}
{"code": "def _check_positional_parameter_annotations(method_signature, base_signature, is_subtype):\n    for param_index in range(max(len(base_signature.param_names), len(method_signature.param_names))):\n        if param_index == 0:\n            continue\n        if param_index < len(base_signature.param_names):\n            base_param_name = base_signature.param_names[param_index]\n        elif base_signature.varargs_name:\n            base_param_name = base_signature.varargs_name\n        else:\n            break\n        try:\n            base_param_type = base_signature.annotations[base_param_name]\n        except KeyError:\n            continue\n        if base_param_name == base_signature.varargs_name:\n            base_param_type = _get_varargs_annotation_type(base_param_type)\n            if base_param_type is None:\n                continue\n        if param_index < method_signature.posonly_count:\n            method_param_name = method_signature.param_names[param_index]\n        elif param_index < len(method_signature.param_names):\n            if base_param_name == '_' or method_signature.param_names[param_index] == '_':\n                method_param_name = method_signature.param_names[param_index]\n            else:\n                method_param_name = base_param_name\n        elif method_signature.varargs_name:\n            method_param_name = method_signature.varargs_name\n        else:\n            break\n        try:\n            method_param_type = method_signature.annotations[method_param_name]\n        except KeyError:\n            continue\n        if method_param_name == method_signature.varargs_name:\n            method_param_type = _get_varargs_annotation_type(method_param_type)\n            if method_param_type is None:\n                continue\n        if not is_subtype(base_param_type, method_param_type):\n            return SignatureError(SignatureErrorType.POSITIONAL_PARAMETER_TYPE_MISMATCH, f\"Type mismatch for parameter '{method_param_name}'.\")\n    return None", "docstring": "Checks type annotations for positional parameters of the overriding method.\n\nArgs:\nmethod_signature: signature of the overriding method.\nbase_signature: signature of the overridden method.\nis_subtype: a binary function to compare types.\n\nReturns:\nSignatureError if a mismatch is detected. Otherwise returns None.", "source": "github-repos"}
{"code": "def plot_ticks(ax, tick_fontsize=12, xticks=None, xticks_args=None, yticks=None, yticks_args=None, zticks=None, zticks_args=None):\n    if (xticks is not None):\n        ax.set_xticks(xticks)\n        xticks_args = dict_if_none(xticks_args)\n        ax.xaxis.set_tick_params(labelsize=tick_fontsize, **xticks_args)\n    if (yticks is not None):\n        ax.set_yticks(yticks)\n        yticks_args = dict_if_none(yticks_args)\n        ax.yaxis.set_tick_params(labelsize=tick_fontsize, **yticks_args)\n    if (zticks is not None):\n        ax.set_zticks(zticks)\n        zticks_args = dict_if_none(zticks_args)\n        ax.zaxis.set_tick_params(labelsize=tick_fontsize, **zticks_args)", "docstring": "Function that defines the labels options of a matplotlib plot.\n\nArgs:\nax: matplotlib axes\ntick_fontsize (int): Defines the size of the ticks' font\nxticks([list of ticks]): Defines the values of x ticks in the figure\nxticks_arg(dict):  Passsed into matplotlib as xticks arguments\nyticks([list of ticks]): Defines the values of y ticks in the figure\nyticks_arg(dict):  Passsed into matplotlib as yticks arguments\nzticks([list of ticks]): Defines the values of z ticks in the figure\nzticks_arg(dict):  Passsed into matplotlib as zticks arguments", "source": "codesearchnet"}
{"code": "class CSVLogger(Callback):\n\n    def __init__(self, filename, separator=',', append=False):\n        self.sep = separator\n        self.filename = path_to_string(filename)\n        self.append = append\n        self.writer = None\n        self.keys = None\n        self.append_header = True\n        super(CSVLogger, self).__init__()\n\n    def on_train_begin(self, logs=None):\n        if self.append:\n            if file_io.file_exists_v2(self.filename):\n                with gfile.GFile(self.filename, 'r') as f:\n                    self.append_header = not bool(len(f.readline()))\n            mode = 'a'\n        else:\n            mode = 'w'\n        self.csv_file = gfile.GFile(self.filename, mode)\n\n    def on_epoch_end(self, epoch, logs=None):\n        logs = logs or {}\n\n        def handle_value(k):\n            is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0\n            if isinstance(k, str):\n                return k\n            elif isinstance(k, collections.abc.Iterable) and (not is_zero_dim_ndarray):\n                return '\"[%s]\"' % ', '.join(map(str, k))\n            else:\n                return k\n        if self.keys is None:\n            self.keys = sorted(logs.keys())\n        if self.model.stop_training:\n            logs = dict(((k, logs[k]) if k in logs else (k, 'NA') for k in self.keys))\n        if not self.writer:\n\n            class CustomDialect(csv.excel):\n                delimiter = self.sep\n            fieldnames = ['epoch'] + self.keys\n            self.writer = csv.DictWriter(self.csv_file, fieldnames=fieldnames, dialect=CustomDialect)\n            if self.append_header:\n                self.writer.writeheader()\n        row_dict = collections.OrderedDict({'epoch': epoch})\n        row_dict.update(((key, handle_value(logs[key])) for key in self.keys))\n        self.writer.writerow(row_dict)\n        self.csv_file.flush()\n\n    def on_train_end(self, logs=None):\n        self.csv_file.close()\n        self.writer = None", "docstring": "Callback that streams epoch results to a CSV file.\n\nSupports all values that can be represented as a string,\nincluding 1D iterables such as `np.ndarray`.\n\nExample:\n\n```python\ncsv_logger = CSVLogger('training.log')\nmodel.fit(X_train, Y_train, callbacks=[csv_logger])\n```\n\nArgs:\nfilename: Filename of the CSV file, e.g. `'run/log.csv'`.\nseparator: String used to separate elements in the CSV file.\nappend: Boolean. True: append if file exists (useful for continuing\ntraining). False: overwrite existing file.", "source": "github-repos"}
{"code": "def batch_insert(self, insertions: Iterable[Tuple[(int, ops.OP_TREE)]]) -> None:\n    copy = self.copy()\n    shift = 0\n    insertions = sorted(insertions, key=(lambda e: e[0]))\n    groups = _group_until_different(insertions, key=(lambda e: e[0]), value=(lambda e: e[1]))\n    for (i, group) in groups:\n        insert_index = (i + shift)\n        next_index = copy.insert(insert_index, reversed(group), InsertStrategy.EARLIEST)\n        if (next_index > insert_index):\n            shift += (next_index - insert_index)\n    self._moments = copy._moments", "docstring": "Applies a batched insert operation to the circuit.\n\nTransparently handles the fact that earlier insertions may shift\nthe index that later insertions should occur at. For example, if you\ninsert an operation at index 2 and at index 4, but the insert at index 2\ncauses a new moment to be created, then the insert at \"4\" will actually\noccur at index 5 to account for the shift from the new moment.\n\nAll insertions are done with the strategy 'EARLIEST'.\n\nWhen multiple inserts occur at the same index, the gates from the later\ninserts end up before the gates from the earlier inserts (exactly as if\nyou'd called list.insert several times with the same index: the later\ninserts shift the earliest inserts forward).\n\nArgs:\ninsertions: A sequence of (insert_index, operations) pairs\nindicating operations to add into the circuit at specific\nplaces.", "source": "codesearchnet"}
{"code": "def stack(self, value):\n        \n        if value == self._defaults['stack'] and 'stack' in self._values:\n            del self._values['stack']\n        else:\n            self._values['stack'] = value", "docstring": "The stack property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def add_snmp_community(self, **kwargs):\n    community = kwargs.pop('community')\n    callback = kwargs.pop('callback', self._callback)\n    config = ET.Element('config')\n    snmp_server = ET.SubElement(config, 'snmp-server', xmlns='urn:brocade.com:mgmt:brocade-snmp')\n    community_el = ET.SubElement(snmp_server, 'community')\n    community_name = ET.SubElement(community_el, 'community')\n    community_name.text = community\n    return callback(config)", "docstring": "Add SNMP Community to NOS device.\n\nArgs:\ncommunity (str): Community string to be added to device.\ncallback (function): A function executed upon completion of the\nmethod.  The only parameter passed to `callback` will be the\n``ElementTree`` `config`.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nKeyError: if `community` is not defined.", "source": "codesearchnet"}
{"code": "def scatter_mul(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n    return self._lazy_read(gen_resource_variable_ops.resource_scatter_mul(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))", "docstring": "Multiply this variable by `tf.IndexedSlices`.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to multiply this variable by.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nThe updated variable.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"}
{"code": "def _FormatNotes(self, event):\n    inode = event.inode\n    if (inode is None):\n        inode = '-'\n    notes = getattr(event, 'notes', '')\n    if (not notes):\n        display_name = getattr(event, 'display_name', '')\n        notes = 'File: {0:s} inode: {1!s}'.format(display_name, inode)\n    return self._SanitizeField(notes)", "docstring": "Formats the notes.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: formatted notes field.", "source": "codesearchnet"}
{"code": "def __set__(self, instance, value):\n        \n        if value is None and self.default:\n            self._cache[instance] = self.default\n\n        else:\n            try:\n                cleaned_value = self.field_value(value)\n\n            except NodeTypeError as node_error:\n                raise SchemaNodeError('{}.{}: {}'.format(\n                    instance.__class__.__name__, self.alias, node_error.args[0])\n                )\n\n            try:\n                self.is_valid(cleaned_value)\n\n            except SchemaNodeValidatorError as error:\n                raise SchemaNodeError(\n                    '{}.{} Error for value `{}` : {}'.format(\n                        instance.__class__.__name__,\n                        self.alias,\n                        value,\n                        error.args[0]\n                    )\n                )\n\n            self._cache[instance] = cleaned_value", "docstring": "Python descriptor protocol `__set__` magic method.\n\nArgs:\ninstance (object): The instance with descriptor attribute.\nvalue (object): The value for instance attribute.", "source": "juraj-google-style"}
{"code": "def parse(self, data):\n    data = '\\n'.join(self.strip(data.split('\\n')))\n    tag_re = re.compile('^:\\\\n?(?P<full_tag>(?P<tag>[0-9]{2}|NS)(?P<sub_tag>[A-Z])?):', re.MULTILINE)\n    matches = list(tag_re.finditer(data))\n    valid_matches = list(self.sanatize_tag_id_matches(matches))\n    for (i, match) in enumerate(valid_matches):\n        tag_id = self.normalize_tag_id(match.group('tag'))\n        tag = (self.tags.get(match.group('full_tag')) or self.tags[tag_id])\n        if valid_matches[(i + 1):]:\n            tag_data = data[match.end():valid_matches[(i + 1)].start()].strip()\n        else:\n            tag_data = data[match.end():].strip()\n        tag_dict = tag.parse(self, tag_data)\n        for processor in self.processors.get(('pre_%s' % tag.slug), []):\n            tag_dict = processor(self, tag, tag_dict)\n        result = tag(self, tag_dict)\n        for processor in self.processors.get(('post_%s' % tag.slug), []):\n            result = processor(self, tag, tag_dict, result)\n        if isinstance(tag, mt940.tags.Statement):\n            if (not self.transactions):\n                transaction = Transaction(self)\n                self.transactions.append(transaction)\n            if transaction.data.get('id'):\n                transaction = Transaction(self, result)\n                self.transactions.append(transaction)\n            else:\n                transaction.data.update(result)\n        elif (issubclass(tag.scope, Transaction) and self.transactions):\n            for (k, v) in _compat.iteritems(result):\n                if ((k in transaction.data) and hasattr(v, 'strip')):\n                    transaction.data[k] += ('\\n%s' % v.strip())\n                else:\n                    transaction.data[k] = v\n        elif issubclass(tag.scope, Transactions):\n            self.data.update(result)\n    return self.transactions", "docstring": "Parses mt940 data, expects a string with data\n\nArgs:\ndata (str): The MT940 data\n\nReturns: :py:class:`list` of :py:class:`Transaction`", "source": "codesearchnet"}
{"code": "def register(model_type, config, exist_ok=False) -> None:\n    if issubclass(config, PretrainedConfig) and config.model_type != model_type:\n        raise ValueError(f'The config you are passing has a `model_type` attribute that is not consistent with the model type you passed (config has {config.model_type} and you passed {model_type}. Fix one of those so they match!')\n    CONFIG_MAPPING.register(model_type, config, exist_ok=exist_ok)", "docstring": "Register a new configuration for this class.\n\nArgs:\nmodel_type (`str`): The model type like \"bert\" or \"gpt\".\nconfig ([`PretrainedConfig`]): The config to register.", "source": "github-repos"}
{"code": "def task(self, task_uuid):\n        \n        request = clearly_pb2.FindTaskRequest(task_uuid=task_uuid)\n        task = self._stub.find_task(request)\n        if task.uuid:\n            ClearlyClient._display_task(task, True, True, True)\n        else:\n            print(EMPTY)", "docstring": "Finds one specific task.\n\nArgs:\ntask_uuid (str): the task id", "source": "juraj-google-style"}
{"code": "def __init__(self, context):\n    \n    del context  \n    self._debugger_data_server = None\n    self._server_thread = None\n    self._grpc_port = None", "docstring": "Constructs a debugger plugin for TensorBoard.\n\nThis plugin adds handlers for retrieving debugger-related data. The plugin\nalso starts a debugger data server once the log directory is passed to the\nplugin via the call to get_plugin_apps.\n\nArgs:\ncontext: A base_plugin.TBContext instance.", "source": "juraj-google-style"}
{"code": "def listen(self, log, noprint=True):\n    try:\n        result = self.decode_event(log.topics, log.data)\n    except ValueError:\n        return\n    if (not noprint):\n        print(result)\n    return result", "docstring": "Return a dictionary representation of the Log instance.\n\nNote:\nThis function won't work with anonymous events.\n\nArgs:\nlog (processblock.Log): The Log instance that needs to be parsed.\nnoprint (bool): Flag to turn off priting of the decoded log instance.", "source": "codesearchnet"}
{"code": "def write_file(path, content, mode='w'):\n    \n    \n    from peltak.core import context\n    from peltak.core import log\n\n    if context.get('pretend', False):\n        log.info(\"Would overwrite <34>{path}<32> with:\\n<90>{content}\",\n                 path=path,\n                 content=content)\n    else:\n        with open(path, mode) as fp:\n            fp.write(content)", "docstring": "--pretend aware file writing.\n\nYou can always write files manually but you should always handle the\n--pretend case.\n\nArgs:\npath (str):\ncontent (str):\nmode (str):", "source": "juraj-google-style"}
{"code": "def run_eagerly(self):\n    if self._run_eagerly is True and (not context.executing_eagerly()):\n        raise ValueError('You can only set `run_eagerly=True` if eager execution is enabled.')\n    if not self.dynamic:\n        if self._run_eagerly is None:\n            return def_function.functions_run_eagerly()\n        else:\n            return self._run_eagerly\n    else:\n        if not context.executing_eagerly():\n            raise ValueError('Your model contains layers that can only be successfully run in eager execution (layers constructed with `dynamic=True`). You must enable eager execution with `tf.enable_eager_execution()`.')\n        if self._run_eagerly is False:\n            raise ValueError('Your model contains layers that can only be successfully run in eager execution (layers constructed with `dynamic=True`). You cannot set `run_eagerly=False`.')\n        return context.executing_eagerly()", "docstring": "Settable attribute indicating whether the model should run eagerly.\n\nRunning eagerly means that your model will be run step by step,\nlike Python code. Your model might run slower, but it should become easier\nfor you to debug it by stepping into individual layer calls.\n\nBy default, we will attempt to compile your model to a static graph to\ndeliver the best execution performance.\n\nReturns:\nBoolean, whether the model should run eagerly.", "source": "github-repos"}
{"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    return metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight)", "docstring": "Accumulates true positive and false positive statistics.\n\nArgs:\ny_true: The ground truth values, with the same dimensions as `y_pred`.\nWill be cast to `bool`.\ny_pred: The predicted values. Each element must be in the range `[0, 1]`.\nsample_weight: Optional weighting of each example. Defaults to 1. Can be a\n`Tensor` whose rank is either 0, or the same rank as `y_true`, and must\nbe broadcastable to `y_true`.\n\nReturns:\nUpdate op.", "source": "github-repos"}
{"code": "def GetAccounts(self):\n    selector = {'fields': ['CustomerId', 'CanManageClients']}\n    accounts = self.client.GetService('ManagedCustomerService').get(selector)\n    return accounts['entries']", "docstring": "Return the client accounts associated with the user's manager account.\n\nReturns:\nlist List of ManagedCustomer data objects.", "source": "codesearchnet"}
{"code": "def _get_bounding_box(self, box: 'torch.Tensor') -> Dict[str, int]:\n    if self.framework != 'pt':\n        raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.')\n    xmin, ymin, xmax, ymax = box.int().tolist()\n    bbox = {'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax}\n    return bbox", "docstring": "Turns list [xmin, xmax, ymin, ymax] into dict { \"xmin\": xmin, ... }\n\nArgs:\nbox (`torch.Tensor`): Tensor containing the coordinates in corners format.\n\nReturns:\nbbox (`Dict[str, int]`): Dict containing the coordinates in corners format.", "source": "github-repos"}
{"code": "def select_qadapter(self, pconfs):\n        \n        \n        policy, max_ncpus = self.policy, self.max_cores\n        pconfs = pconfs.get_ordered_with_policy(policy, max_ncpus)\n\n        if policy.precedence == \"qadapter\":\n\n            \n            for qadpos, qad in enumerate(self.qads):\n                possible_pconfs = [pc for pc in pconfs if qad.can_run_pconf(pc)]\n\n                if qad.allocation == \"nodes\":\n                \n                    \n                    for pconf in possible_pconfs:\n                        if pconf.num_cores % qad.hw.cores_per_node == 0:\n                            return self._use_qadpos_pconf(qadpos, pconf)\n\n                \n                if possible_pconfs:\n                    return self._use_qadpos_pconf(qadpos, possible_pconfs[0])\n\n        elif policy.precedence == \"autoparal_conf\":\n            \n            for pconf in pconfs:\n                for qadpos, qad in enumerate(self.qads):\n\n                    if qad.allocation == \"nodes\" and not pconf.num_cores % qad.hw.cores_per_node == 0:\n                        continue \n\n                    if qad.can_run_pconf(pconf):\n                        return self._use_qadpos_pconf(qadpos, pconf)\n\n        else:\n            raise ValueError(\"Wrong value of policy.precedence = %s\" % policy.precedence)\n\n        \n        raise RuntimeError(\"Cannot find qadapter for this run!\")", "docstring": "Given a list of parallel configurations, pconfs, this method select an `optimal` configuration\naccording to some criterion as well as the :class:`QueueAdapter` to use.\n\nArgs:\npconfs: :class:`ParalHints` object with the list of parallel configurations\n\nReturns:\n:class:`ParallelConf` object with the `optimal` configuration.", "source": "juraj-google-style"}
{"code": "def code_verifier(n_bytes=64):\n    \n    verifier = base64.urlsafe_b64encode(os.urandom(n_bytes)).rstrip(b'=')\n    \n    \n    if len(verifier) < 43:\n        raise ValueError(\"Verifier too short. n_bytes must be > 30.\")\n    elif len(verifier) > 128:\n        raise ValueError(\"Verifier too long. n_bytes must be < 97.\")\n    else:\n        return verifier", "docstring": "Generates a 'code_verifier' as described in section 4.1 of RFC 7636.\n\nThis is a 'high-entropy cryptographic random string' that will be\nimpractical for an attacker to guess.\n\nArgs:\nn_bytes: integer between 31 and 96, inclusive. default: 64\nnumber of bytes of entropy to include in verifier.\n\nReturns:\nBytestring, representing urlsafe base64-encoded random data.", "source": "juraj-google-style"}
{"code": "def _lower_non_existent_context_field_filters(match_traversals, visitor_fn):\n    new_match_traversals = []\n    for match_traversal in match_traversals:\n        new_match_traversal = []\n        for step in match_traversal:\n            if (step.where_block is not None):\n                new_filter = step.where_block.visit_and_update_expressions(visitor_fn)\n                if (new_filter.predicate == TrueLiteral):\n                    new_filter = None\n                new_step = step._replace(where_block=new_filter)\n            else:\n                new_step = step\n            new_match_traversal.append(new_step)\n        new_match_traversals.append(new_match_traversal)\n    return new_match_traversals", "docstring": "Return new match traversals, lowering filters involving non-existent ContextFields.\n\nExpressions involving non-existent ContextFields are evaluated to TrueLiteral.\nBinaryCompositions, where one of the operands is lowered to a TrueLiteral,\nare lowered appropriately based on the present operator (u'||' and u'&&' are affected).\nTernaryConditionals, where the predicate is lowered to a TrueLiteral,\nare replaced by their if_true predicate.\nThe `visitor_fn` implements these behaviors (see `_update_context_field_expression`).\n\nArgs:\nmatch_traversals: list of match traversal enitities to be lowered\nvisitor_fn: visit_and_update function for lowering expressions in given match traversal\n\nReturns:\nnew list of match_traversals, with all filter expressions lowered", "source": "codesearchnet"}
{"code": "def generate_output_list(self, source, key, val, line='2', hr=True, show_name=False, colorize=True):\n    output = generate_output(line=line, short=(HR_RDAP[source][key]['_short'] if hr else key), name=(HR_RDAP[source][key]['_name'] if (hr and show_name) else None), is_parent=(False if ((val is None) or (len(val) == 0)) else True), value=('None' if ((val is None) or (len(val) == 0)) else None), colorize=colorize)\n    if (val is not None):\n        for item in val:\n            output += generate_output(line=str((int(line) + 1)), value=item, colorize=colorize)\n    return output", "docstring": "The function for generating CLI output RDAP list results.\n\nArgs:\nsource (:obj:`str`): The parent key 'network' or 'objects'\n(required).\nkey (:obj:`str`): The event key 'events' or 'events_actor'\n(required).\nval (:obj:`dict`): The event dictionary (required).\nline (:obj:`str`): The line number (0-4). Determines indentation.\nDefaults to '0'.\nhr (:obj:`bool`): Enable human readable key translations. Defaults\nto True.\nshow_name (:obj:`bool`): Show human readable name (default is to\nonly show short). Defaults to False.\ncolorize (:obj:`bool`): Colorize the console output with ANSI\ncolors. Defaults to True.\n\nReturns:\nstr: The generated output.", "source": "codesearchnet"}
{"code": "def async_decorator(func):\n    \n\n    @functools.wraps(func)\n    def async_wrapper(*args, **kwargs):\n        \n        if 'callback' not in kwargs or not kwargs['callback']:\n            return func(*args, **kwargs)\n\n        callback = kwargs.pop('callback')\n\n        if not callable(callback):\n            raise TypeError('Expected \\'callback\\' is not callable.')\n\n        def thread_func(*args, **kwargs):\n            \n            exception, res = None, None\n            try:\n                res = func(*args, **kwargs)\n            except Exception as e:\n                exception = e\n            return callback(exception, res)\n\n        thread = threads.ThreadReturn(target=thread_func,\n                                      args=args,\n                                      kwargs=kwargs)\n        thread.daemon = True\n        thread.start()\n        return thread\n\n    return async_wrapper", "docstring": "Asynchronous function decorator.  Interprets the function as being\nasynchronous, so returns a function that will handle calling the\nFunction asynchronously.\n\nArgs:\nfunc (function): function to be called asynchronously\n\nReturns:\nThe wrapped function.\n\nRaises:\nAttributeError: if ``func`` is not callable", "source": "juraj-google-style"}
{"code": "def mean(x, axis=None, keepdims=False):\n    \n    from .function_bases import mean as mean_base\n    if axis is None:\n        axis = range(x.ndim)\n    elif not hasattr(axis, '__iter__'):\n        axis = [axis]\n    return mean_base(x, axis, keepdims)", "docstring": "Reduction along axes with mean operation.\n\nArgs:\nx (Variable): An input variable.\naxis (None, int or tuple of ints): Axis or axes along which mean is\ncalculated. Passing the default value `None` will reduce all dimensions.\nkeepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.\n\nReturns:\n~nnabla.Variable: N-D array.", "source": "juraj-google-style"}
{"code": "def find_word_groups(self, text, category, proximity=2):\n    f = re.IGNORECASE\n    words = getattr(self, category)\n    regex = re.compile((('(\\\\b' + '\\\\b|\\\\b'.join(words)) + '\\\\b)'), flags=f)\n    candidates = regex.finditer(text)\n    (starts, ends) = ([], [])\n    groups = []\n    for item in candidates:\n        starts.append(item.span()[0])\n        ends.append(item.span()[1])\n        groups.append(item.group().lower())\n    new_starts = []\n    new_groups = []\n    skip = False\n    for (i, g) in enumerate(groups):\n        if skip:\n            skip = False\n            continue\n        if ((i < (len(groups) - 1)) and ((starts[(i + 1)] - ends[i]) <= proximity)):\n            if (g[(- 1)] == '-'):\n                sep = ''\n            else:\n                sep = ' '\n            new_groups.append(((g + sep) + groups[(i + 1)]))\n            new_starts.append(starts[i])\n            skip = True\n        else:\n            if (g not in new_groups):\n                new_groups.append(g)\n                new_starts.append(starts[i])\n            skip = False\n    return new_groups", "docstring": "Given a string and a category, finds and combines words into\ngroups based on their proximity.\n\nArgs:\ntext (str): Some text.\ntokens (list): A list of regex strings.\n\nReturns:\nlist. The combined strings it found.\n\nExample:\nCOLOURS = [r\"red(?:dish)?\", r\"grey(?:ish)?\", r\"green(?:ish)?\"]\ns = 'GREYISH-GREEN limestone with RED or GREY sandstone.'\nfind_word_groups(s, COLOURS) --> ['greyish green', 'red', 'grey']", "source": "codesearchnet"}
{"code": "def open(self, filepath):\n    with io.open(filepath, 'r', encoding='utf-8') as fp:\n        content = fp.read()\n    return content", "docstring": "Open settings backend to return its content\n\nArgs:\nfilepath (str): Settings object, depends from backend\n\nReturns:\nstring: File content.", "source": "codesearchnet"}
{"code": "def with_params(self, params):\n        \n        copy = params.copy()\n        copy.update(self._params)\n        return self.__copy_and_set('params', copy)", "docstring": "Adds parameters to the request params\n\nArgs:\nparams (dict): The parameters to add to the request params\n\nReturns:\nThe request builder instance in order to chain calls", "source": "juraj-google-style"}
{"code": "def docstring(documentation, prepend=False, join=''):\n\n    def decorator(func):\n        current = (func.__doc__ if func.__doc__ else '').strip()\n        doc = documentation.strip()\n        new = '\\n'.join(([doc, join, current] if prepend else [current, join, doc]))\n        lines = len(new.strip().splitlines())\n        if (lines == 1):\n            func.__doc__ = new.strip()\n        else:\n            func.__doc__ = (new.strip() + '\\n')\n        return func\n    return decorator", "docstring": "r\"\"\"Prepend or append a string to the current documentation of the function.\n\nThis decorator should be robust even if ``func.__doc__`` is None\n(for example, if -OO was passed to the interpreter).\n\nUsage::\n\n@docstring('Appended this line')\ndef func():\n\"This docstring will have a line below.\"\npass\n\n>>> print(func.__doc__)\nThis docstring will have a line below.\n\nAppended this line\n\nArgs:\ndocumentation (str): Documentation string that should be added,\nappended or prepended to the current documentation string.\nprepend (bool): Prepend the documentation string to the current\ndocumentation if ``True`` else append. default=``False``\njoin (str): String used to separate docstrings. default='\\n'", "source": "codesearchnet"}
{"code": "def GetExecutionDetails(self, request, global_params=None):\n    config = self.GetMethodConfig('GetExecutionDetails')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Request detailed information about the execution status of the job. EXPERIMENTAL. This API is subject to change or removal without notice.\n\nArgs:\nrequest: (DataflowProjectsLocationsJobsGetExecutionDetailsRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(JobExecutionDetails) The response message.", "source": "github-repos"}
{"code": "def main_loop(self, steps_per_epoch, starting_epoch, max_epoch):\n        \n        with self.sess.as_default():\n            self.loop.config(steps_per_epoch, starting_epoch, max_epoch)\n            self.loop.update_global_step()\n            try:\n                self._callbacks.before_train()\n                \n                \n                self.loop.update_global_step()\n                for self.loop._epoch_num in range(\n                        self.loop.starting_epoch, self.loop.max_epoch + 1):\n                    logger.info(\"Start Epoch {} ...\".format(self.loop.epoch_num))\n                    self._callbacks.before_epoch()\n                    start_time = time.time()\n                    for self.loop._local_step in range(self.loop.steps_per_epoch):\n                        if self.hooked_sess.should_stop():\n                            return\n                        self.run_step()  \n                        self._callbacks.trigger_step()\n                    self._callbacks.after_epoch()\n                    logger.info(\"Epoch {} (global_step {}) finished, time:{}.\".format(\n                        self.loop.epoch_num, self.loop.global_step, humanize_time_delta(time.time() - start_time)))\n\n                    \n                    self._callbacks.trigger_epoch()\n                logger.info(\"Training has finished!\")\n            except (StopTraining, tf.errors.OutOfRangeError) as e:\n                logger.info(\"Training was stopped by exception {}.\".format(str(e)))\n            except KeyboardInterrupt:\n                logger.info(\"Detected Ctrl-C and exiting main loop.\")\n                raise\n            finally:\n                self._callbacks.after_train()\n                self.hooked_sess.close()", "docstring": "Run the main training loop.\n\nArgs:\nsteps_per_epoch, starting_epoch, max_epoch (int):", "source": "juraj-google-style"}
{"code": "def loads(cls, name):\n    if (not isinstance(name, six.string_types)):\n        raise TypeError(u'arguments to {classname} must be of type {string_types}'.format(classname=cls.__name__, string_types=repr(six.string_types)))\n    if ((not name) or name.isspace()):\n        raise ValueError('name must not be empty')\n    return cls(name)", "docstring": "Load a parsed name from a string.\n\nRaises:\nTypeError: when name isn't a type of `six.string_types`.\nValueError: when name is empty or None.", "source": "codesearchnet"}
{"code": "def get_random_email(ltd='com'):\n    email = [RandomInputHelper.get_random_value(6, [string.ascii_lowercase]), '@', RandomInputHelper.get_random_value(6, [string.ascii_lowercase]), '.', ltd]\n    return ''.join(email)", "docstring": "Get a random email address with the given ltd.\n\nArgs:\nltd (str): The ltd to use (e.g. com).\n\nReturns:\nstr: The random email.", "source": "codesearchnet"}
{"code": "def read_proto(file_name: str, proto_cls: Type[_T]) -> _T:\n    raw_text = ''\n    proto = proto_cls()\n    with open(file_name, 'r', encoding='utf-8') as f:\n        raw_text = f.read()\n    return text_format.Parse(raw_text, proto)", "docstring": "Reads a protobuf in prototxt format from file_name.\n\nData is parsed into an instance of `proto_cls`.\n\nArgs:\nfile_name: The file to read from.\nproto_cls: The type of protobuf message to parse as.\n\nReturns:\nThe protobuf message in the file.", "source": "github-repos"}
{"code": "def dict2str(self, d: Dict, joiner: str) -> str:\n        \n        result = str()\n        for key in d:\n            result = result + str(key) + \" : \"\n            if isinstance(d[key], list):\n                result = result + self.list2str(d[key], joiner) + joiner\n            elif isinstance(d[key], dict):\n                result = result + self.dict2str(d[key], joiner) + joiner\n            elif d[key]:\n                result = result + str(d[key]) + joiner\n        return result", "docstring": "Convert dict to str as input for tokenizer\n\nArgs:\nd (dict): dict for converting\njoiner (str): join the elements using this string to separate them.\n\nReturns: the value of the dict as a string", "source": "juraj-google-style"}
{"code": "def pauli_group(number_of_qubits, case='weight'):\n    if (number_of_qubits < 5):\n        temp_set = []\n        if (case == 'weight'):\n            tmp = pauli_group(number_of_qubits, case='tensor')\n            return sorted(tmp, key=(lambda x: (- np.count_nonzero((np.array(x.to_label(), 'c') == b'I')))))\n        elif (case == 'tensor'):\n            for k in range((4 ** number_of_qubits)):\n                z = np.zeros(number_of_qubits, dtype=np.bool)\n                x = np.zeros(number_of_qubits, dtype=np.bool)\n                for j in range(number_of_qubits):\n                    element = ((k \n                    if (element == 1):\n                        x[j] = True\n                    elif (element == 2):\n                        z[j] = True\n                        x[j] = True\n                    elif (element == 3):\n                        z[j] = True\n                temp_set.append(Pauli(z, x))\n            return temp_set\n        else:\n            raise QiskitError(\"Only support 'weight' or 'tensor' cases but you have {}.\".format(case))\n    raise QiskitError('Only support number of qubits is less than 5')", "docstring": "Return the Pauli group with 4^n elements.\n\nThe phases have been removed.\ncase 'weight' is ordered by Pauli weights and\ncase 'tensor' is ordered by I,X,Y,Z counting lowest qubit fastest.\n\nArgs:\nnumber_of_qubits (int): number of qubits\ncase (str): determines ordering of group elements ('weight' or 'tensor')\n\nReturns:\nlist: list of Pauli objects\n\nRaises:\nQiskitError: case is not 'weight' or 'tensor'\nQiskitError: number_of_qubits is larger than 4", "source": "codesearchnet"}
{"code": "def unite(df, colname, *args, **kwargs):\n    to_unite = list([a for a in flatten(args)])\n    sep = kwargs.get('sep', '_')\n    remove = kwargs.get('remove', True)\n    na_action = kwargs.get('na_action', 'maintain')\n    if (na_action == 'maintain'):\n        df[colname] = df[to_unite].apply((lambda x: (np.nan if any(x.isnull()) else sep.join(x.map(str)))), axis=1)\n    elif (na_action == 'ignore'):\n        df[colname] = df[to_unite].apply((lambda x: sep.join(x[(~ x.isnull())].map(str))), axis=1)\n    elif (na_action == 'as_string'):\n        df[colname] = df[to_unite].astype(str).apply((lambda x: sep.join(x)), axis=1)\n    if remove:\n        df.drop(to_unite, axis=1, inplace=True)\n    return df", "docstring": "Does the inverse of `separate`, joining columns together by a specified\nseparator.\n\nAny columns that are not strings will be converted to strings.\n\nArgs:\ndf (pandas.DataFrame): DataFrame passed in through the pipe.\ncolname (str): the name of the new joined column.\n*args: list of columns to be joined, which can be strings, symbolic, or\ninteger positions.\n\nKwargs:\nsep (str): the string separator to join the columns with.\nremove (bool): Boolean indicating whether or not to remove the\noriginal columns.\nna_action (str): can be one of `'maintain'` (the default),\n'`ignore'`, or `'as_string'`. The default will make the new column\nrow a `NaN` value if any of the original column cells at that\nrow contained `NaN`. '`ignore'` will treat any `NaN` value as an\nempty string during joining. `'as_string'` will convert any `NaN`\nvalue to the string `'nan'` prior to joining.", "source": "codesearchnet"}
{"code": "def html2text(__html: str, *, width: int=80, ascii_replacements: bool=False) -> str:\n    html2.BODY_WIDTH = width\n    html2.UNICODE_SNOB = ascii_replacements\n    return html2.html2text(__html).strip()", "docstring": "HTML to plain text renderer.\n\nSee also: :pypi:`html2text`\n\nArgs:\n__html: Text to process\nwidth: Paragraph width\nascii_replacements: Use pseudo-ASCII replacements for Unicode\nReturns:\nRendered text", "source": "codesearchnet"}
{"code": "def _look_adjacent(self, vectors, num_chunks_before, num_chunks_after):\n    if num_chunks_before == 0 and num_chunks_after == 0:\n        return vectors\n    slices = []\n    for i in range(-num_chunks_before, num_chunks_after + 1):\n        if i == 0:\n            slices.append(vectors)\n        else:\n            slices.append(torch.cat([vectors[:, :, i:, ...], vectors[:, :, :i, ...]], dim=2))\n    return torch.cat(slices, dim=3)", "docstring": "Used to implement attention between consecutive chunks.\n\nArgs:\nvectors: array of shape [batch_size, num_attention_heads, n_chunks, chunk_len, ...]\nnum_chunks_before: chunks before current chunk to include in attention\nnum_chunks_after: chunks after current chunk to include in attention\n\nReturns:\ntensor of shape [num_chunks, N * chunk_length, ...], where N = (1 + num_chunks_before + num_chunks_after).", "source": "github-repos"}
{"code": "def key_to_kind(cls, key):\n    \n    if key.kind() == Kind.KIND_NAME:\n      return key.id()\n    else:\n      return key.parent().id()", "docstring": "Return the kind specified by a given __property__ key.\n\nArgs:\nkey: key whose kind name is requested.\n\nReturns:\nThe kind specified by key.", "source": "juraj-google-style"}
{"code": "def add_string_pairs_from_button_element(xib_file, results, button, special_ui_components_prefix):\n    \n    button_entry_comment = extract_element_internationalized_comment(button)\n    if button_entry_comment is None:\n        return\n\n    for state in button.getElementsByTagName('state'):\n        state_name = state.attributes['key'].value\n        state_entry_comment = button_entry_comment + \" - \" + state_name + \" state of button\"\n        if not add_string_pairs_from_attributed_ui_element(results, state, state_entry_comment):\n            try:\n                button_entry_key = state.attributes['title'].value\n            except KeyError:\n                try:\n                    button_entry_key = state.getElementsByTagName('string')[0].firstChild.nodeValue\n                except Exception:\n                    continue\n\n            results.append((button_entry_key, state_entry_comment))\n\n    warn_if_element_not_of_class(button, 'Button', special_ui_components_prefix)", "docstring": "Adds strings pairs from a button xib element.\n\nArgs:\nxib_file (str): Path to the xib file.\nresults (list): The list to add the results to.\nbutton(element): The button element from the xib, to extract the string pairs from.\nspecial_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)", "source": "juraj-google-style"}
{"code": "def minimum_required(version):\n        \n        def _minimum_required(func):\n            \n            @functools.wraps(func)\n            def wrapper(self, *args, **kwargs):\n                \n                if list(self.version) < list(version):\n                    raise errors.JLinkException('Version %s required.' % version)\n                return func(self, *args, **kwargs)\n            return wrapper\n        return _minimum_required", "docstring": "Decorator to specify the minimum SDK version required.\n\nArgs:\nversion (str): valid version string\n\nReturns:\nA decorator function.", "source": "juraj-google-style"}
{"code": "def parse_email(data, strip_attachment_payloads=False):\n    \n\n    if type(data) == bytes:\n        if is_outlook_msg(data):\n            data = convert_outlook_msg(data)\n        data = data.decode(\"utf-8\", errors=\"replace\")\n    parsed_email = mailparser.parse_from_string(data)\n    headers = json.loads(parsed_email.headers_json).copy()\n    parsed_email = json.loads(parsed_email.mail_json).copy()\n    parsed_email[\"headers\"] = headers\n\n    if \"received\" in parsed_email:\n        for received in parsed_email[\"received\"]:\n            if \"date_utc\" in received:\n                if received[\"date_utc\"] is None:\n                    del received[\"date_utc\"]\n                else:\n                    received[\"date_utc\"] = received[\"date_utc\"].replace(\"T\",\n                                                                        \" \")\n\n    if \"from\" not in parsed_email:\n        if \"From\" in parsed_email[\"headers\"]:\n            parsed_email[\"from\"] = parsed_email[\"Headers\"][\"From\"]\n        else:\n            parsed_email[\"from\"] = None\n\n    if parsed_email[\"from\"] is not None:\n        parsed_email[\"from\"] = parse_email_address(parsed_email[\"from\"][0])\n\n    if \"date\" in parsed_email:\n        parsed_email[\"date\"] = parsed_email[\"date\"].replace(\"T\", \" \")\n    else:\n        parsed_email[\"date\"] = None\n    if \"reply_to\" in parsed_email:\n        parsed_email[\"reply_to\"] = list(map(lambda x: parse_email_address(x),\n                                            parsed_email[\"reply_to\"]))\n    else:\n        parsed_email[\"reply_to\"] = []\n\n    if \"to\" in parsed_email:\n        parsed_email[\"to\"] = list(map(lambda x: parse_email_address(x),\n                                      parsed_email[\"to\"]))\n    else:\n        parsed_email[\"to\"] = []\n\n    if \"cc\" in parsed_email:\n        parsed_email[\"cc\"] = list(map(lambda x: parse_email_address(x),\n                                      parsed_email[\"cc\"]))\n    else:\n        parsed_email[\"cc\"] = []\n\n    if \"bcc\" in parsed_email:\n        parsed_email[\"bcc\"] = list(map(lambda x: parse_email_address(x),\n                                       parsed_email[\"bcc\"]))\n    else:\n        parsed_email[\"bcc\"] = []\n\n    if \"delivered_to\" in parsed_email:\n        parsed_email[\"delivered_to\"] = list(\n            map(lambda x: parse_email_address(x),\n                parsed_email[\"delivered_to\"])\n        )\n\n    if \"attachments\" not in parsed_email:\n        parsed_email[\"attachments\"] = []\n    else:\n        for attachment in parsed_email[\"attachments\"]:\n            if \"payload\" in attachment:\n                payload = attachment[\"payload\"]\n                try:\n                    if \"content_transfer_encoding\" in attachment:\n                        if attachment[\"content_transfer_encoding\"] == \"base64\":\n                            payload = decode_base64(payload)\n                        else:\n                            payload = str.encode(payload)\n                    attachment[\"sha256\"] = hashlib.sha256(payload).hexdigest()\n                except Exception as e:\n                    logger.debug(\"Unable to decode attachment: {0}\".format(\n                        e.__str__()\n                    ))\n        if strip_attachment_payloads:\n            for attachment in parsed_email[\"attachments\"]:\n                if \"payload\" in attachment:\n                    del attachment[\"payload\"]\n\n    if \"subject\" not in parsed_email:\n        parsed_email[\"subject\"] = None\n\n    parsed_email[\"filename_safe_subject\"] = get_filename_safe_string(\n        parsed_email[\"subject\"])\n\n    if \"body\" not in parsed_email:\n        parsed_email[\"body\"] = None\n\n    return parsed_email", "docstring": "A simplified email parser\n\nArgs:\ndata: The RFC 822 message string, or MSG binary\nstrip_attachment_payloads (bool): Remove attachment payloads\n\nReturns (dict): Parsed email data", "source": "juraj-google-style"}
{"code": "def _CheckIsSocket(self, file_entry):\n    \n    if definitions.FILE_ENTRY_TYPE_SOCKET not in self._file_entry_types:\n      return False\n    return file_entry.IsSocket()", "docstring": "Checks the is_socket find specification.\n\nArgs:\nfile_entry (FileEntry): file entry.\n\nReturns:\nbool: True if the file entry matches the find specification, False if not.", "source": "juraj-google-style"}
{"code": "def normalize_datetime_to_utc(dt):\n    return datetime.datetime(*dt.utctimetuple()[:6], microsecond=dt.microsecond, tzinfo=datetime.timezone.utc)", "docstring": "Adjust datetime to UTC.\n\nApply the timezone offset to the datetime and set the timezone to UTC.\n\nThis is a no-op if the datetime is already in UTC.\n\nArgs:\ndt : datetime\n- tz-aware: Used in the formatted string.\n- tz-naive: Assumed to be in UTC.\n\nReturns:\ndatetime\nThe returned datetime is always timezone aware and in UTC.\n\nNotes:\nThis forces a new object to be returned, which fixes an issue with\nserialization to XML in PyXB. PyXB uses a mixin together with\ndatetime to handle the XML xs:dateTime. That type keeps track of\ntimezone information included in the original XML doc, which conflicts if we\nreturn it here as part of a datetime mixin.\n\nSee Also:\n``cast_naive_datetime_to_tz()``", "source": "codesearchnet"}
{"code": "def _generate_placements(self, width, height):\n        \n        skyline = self._skyline\n\n        points = collections.deque()\n\n        left_index = right_index = 0 \n        support_height = skyline[0].top\n        support_index = 0 \n    \n        placements = self._placement_points_generator(skyline, width)\n        for p in placements:\n\n            \n            if p+width > skyline[right_index].right:\n                for right_index in range(right_index+1, len(skyline)):\n                    if skyline[right_index].top >= support_height:\n                        support_index = right_index\n                        support_height = skyline[right_index].top\n                    if p+width <= skyline[right_index].right:\n                        break\n                \n            \n            if p >= skyline[left_index].right:\n                left_index +=1\n           \n            \n            if support_index < left_index:\n                support_index = left_index\n                support_height = skyline[left_index].top\n                for i in range(left_index, right_index+1):\n                    if skyline[i].top >= support_height:\n                        support_index = i\n                        support_height = skyline[i].top\n\n            \n            if support_height+height <= self.height:\n                points.append((Rectangle(p, support_height, width, height),\\\n                    left_index, right_index))\n\n        return points", "docstring": "Generate a list with\n\nArguments:\nskyline (list): SkylineHSegment list\nwidth (number):\n\nReturns:\ntuple (Rectangle, fitness):\nRectangle: Rectangle in valid position\nleft_skyline: Index for the skyline under the rectangle left edge.\nright_skyline: Index for the skyline under the rectangle right edte.", "source": "juraj-google-style"}
{"code": "def dataframe(self, force_refresh=False):\n    if force_refresh:\n        self.clear_cache()\n    if (self._dataframe is None):\n        self._dataframe = self._fetch_dataframe()\n    return self._dataframe", "docstring": "A pandas dataframe with lots of interesting results about this object.\nCreated by calling SageMaker List and Describe APIs and converting them into\na convenient tabular summary.\n\nArgs:\nforce_refresh (bool): Set to True to fetch the latest data from SageMaker API.", "source": "codesearchnet"}
{"code": "def parse_string_descriptor(string_desc):\n    \n\n    if not isinstance(string_desc, str):\n        string_desc = str(string_desc)\n\n    if not string_desc.endswith(';'):\n        string_desc += ';'\n\n    parsed = get_streamer_parser().parseString(string_desc)[0]\n\n    realtime = 'realtime' in parsed\n    broadcast = 'broadcast' in parsed\n    encrypted = 'security' in parsed and parsed['security'] == 'encrypted'\n    signed = 'security' in parsed and parsed['security'] == 'signed'\n    auto = 'manual' not in parsed\n\n    with_other = None\n    if 'with_other' in parsed:\n        with_other = parsed['with_other']\n        auto = False\n\n    dest = SlotIdentifier.FromString('controller')\n    if 'explicit_tile' in parsed:\n        dest = parsed['explicit_tile']\n\n    selector = parsed['selector']\n\n    \n    if realtime and (encrypted or signed):\n        raise SensorGraphSemanticError(\"Realtime streamers cannot be either signed or encrypted\")\n\n    if broadcast and (encrypted or signed):\n        raise SensorGraphSemanticError(\"Broadcast streamers cannot be either signed or encrypted\")\n\n    report_type = 'broadcast' if broadcast else 'telegram'\n    dest = dest\n    selector = selector\n\n    if realtime or broadcast:\n        report_format = u'individual'\n    elif signed:\n        report_format = u'signedlist_userkey'\n    elif encrypted:\n        raise SensorGraphSemanticError(\"Encrypted streamers are not yet supported\")\n    else:\n        report_format = u'hashedlist'\n\n    return DataStreamer(selector, dest, report_format, auto, report_type=report_type, with_other=with_other)", "docstring": "Parse a string descriptor of a streamer into a DataStreamer object.\n\nArgs:\nstring_desc (str): The string descriptor that we wish to parse.\n\nReturns:\nDataStreamer: A DataStreamer object representing the streamer.", "source": "juraj-google-style"}
{"code": "def get_staged_signatures(vcs):\n    staged_path = _get_staged_history_path(vcs)\n    known_signatures = []\n    if os.path.exists(staged_path):\n        with open(staged_path, 'r') as f:\n            known_signatures = f.read().split()\n    return known_signatures", "docstring": "Get the list of staged signatures\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\n\nReturns:\nlist(basestring) - list of signatures", "source": "codesearchnet"}
{"code": "def __init__(self, configuration_file='dependencies.ini'):\n    \n    super(DependencyHelper, self).__init__()\n    self._test_dependencies = {}\n    self.dependencies = {}\n\n    dependency_reader = DependencyDefinitionReader()\n\n    with open(configuration_file, 'r') as file_object:\n      for dependency in dependency_reader.Read(file_object):\n        self.dependencies[dependency.name] = dependency\n\n    dependency = DependencyDefinition('mock')\n    dependency.minimum_version = '0.7.1'\n    dependency.version_property = '__version__'\n    self._test_dependencies['mock'] = dependency", "docstring": "Initializes a dependency helper.\n\nArgs:\nconfiguration_file (Optional[str]): path to the dependencies\nconfiguration file.", "source": "juraj-google-style"}
{"code": "def _Open(self, path_spec, mode='rb'):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    encoding_method = getattr(path_spec, 'encoding_method', None)\n    if not encoding_method:\n      raise errors.PathSpecError(\n          'Unsupported path specification without encoding method.')\n\n    self._encoding_method = encoding_method", "docstring": "Opens the file system defined by path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\nmode (Optional[str]): file access mode. The default is 'rb' which\nrepresents read-only binary.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file system could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def sparse_read(self, indices, name=None):\n    raise AttributeError", "docstring": "Gather slices from params axis axis according to indices.\n\nThis function supports a subset of tf.gather, see tf.gather for details on\nusage.\n\nArgs:\nindices: The index `Tensor`.  Must be one of the following types: `int32`,\n`int64`. Must be in range `[0, params.shape[axis])`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor`. Has the same type as `params`.", "source": "github-repos"}
{"code": "def pyc_load(fp):\n    \n\n    magic_1 = U16(fp.read(2), target=MARSHAL_TARGET)\n    magic_2 = U16(fp.read(2), target=MARSHAL_TARGET)\n\n    internals = MAGIC_MAP.get(magic_1)\n    if internals is None:\n        raise ValueError('Invalid or unknown magic (%d).' % magic_1)\n\n    if magic_2 != 2573:\n        raise ValueError('Invalid secondary magic (%d).' % magic_2)\n\n    timestamp = datetime.datetime.fromtimestamp(U32(fp.read(4), target=MARSHAL_TARGET))\n\n    if internals['version'] >= 33:\n        file_size = U32(fp.read(4))\n    else:\n        file_size = None\n\n    code_object = marshal_load(fp, internals)\n\n    return PycFile(magic_1, internals, timestamp, file_size, code_object)", "docstring": "Load a .pyc file from a file-like object.\n\nArguments:\nfp(file): The file-like object to read.\n\nReturns:\nPycFile: The parsed representation of the .pyc file.", "source": "juraj-google-style"}
{"code": "def apply_mutation(module_path, operator, occurrence):\n    \n    module_ast = get_ast(module_path, python_version=operator.python_version)\n    original_code = module_ast.get_code()\n    visitor = MutationVisitor(occurrence, operator)\n    mutated_ast = visitor.walk(module_ast)\n\n    mutated_code = None\n    if visitor.mutation_applied:\n        mutated_code = mutated_ast.get_code()\n        with module_path.open(mode='wt', encoding='utf-8') as handle:\n            handle.write(mutated_code)\n            handle.flush()\n\n    return original_code, mutated_code", "docstring": "Apply a specific mutation to a file on disk.\n\nArgs:\nmodule_path: The path to the module to mutate.\noperator: The `operator` instance to use.\noccurrence: The occurrence of the operator to apply.\n\nReturns: A `(unmutated-code, mutated-code)` tuple to the with-block. If there was\nno mutation performed, the `mutated-code` is `None`.", "source": "juraj-google-style"}
{"code": "def _expand_ellipsis(key_list, num_remaining_dims):\n    if num_remaining_dims is None:\n        raise ValueError('Ellipsis not supported for unknown shape RaggedTensors')\n    num_indices = sum((1 for idx in key_list if idx is not array_ops.newaxis))\n    if num_indices > num_remaining_dims + 1:\n        raise IndexError('Too many indices for RaggedTensor')\n    elif num_indices == num_remaining_dims + 1:\n        return key_list[1:]\n    else:\n        return [slice(None, None, None)] + key_list", "docstring": "Expands the ellipsis at the start of `key_list`.\n\nAssumes that the first element of `key_list` is Ellipsis.  This will either\nremove the Ellipsis (if it corresponds to zero indices) or prepend a new\n`slice(None, None, None)` (if it corresponds to more than zero indices).\n\nArgs:\nkey_list: The arguments to `__getitem__()`.\nnum_remaining_dims: The number of dimensions remaining.\n\nReturns:\nA copy of `key_list` with he ellipsis expanded.\nRaises:\nValueError: If ragged_rank.shape.ndims is None\nIndexError: If there are too many elements in `key_list`.", "source": "github-repos"}
{"code": "def duplicate(script, layer_num=None):\n    filter_xml = '  <filter name=\"Duplicate Current layer\"/>\\n'\n    if isinstance(script, mlx.FilterScript):\n        if ((layer_num is None) or (layer_num == script.current_layer())):\n            util.write_filter(script, filter_xml)\n            script.add_layer('{}_copy'.format(script.layer_stack[script.current_layer()]), True)\n        else:\n            change(script, layer_num)\n            util.write_filter(script, filter_xml)\n            script.add_layer('{}_copy'.format(script.layer_stack[layer_num]), True)\n    else:\n        util.write_filter(script, filter_xml)\n    return None", "docstring": "Duplicate a layer.\n\nNew layer label is '*_copy'.\n\nArgs:\nscript: the mlx.FilterScript object or script filename to write\nthe filter to.\nlayer_num (int): layer number to duplicate. Default is the\ncurrent layer. Not supported on the file base API.\n\nLayer stack:\nCreates a new layer\nChanges current layer to the new layer\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "codesearchnet"}
{"code": "def combine(a1, a2):\n    \n    if not isinstance(a1, list):\n        a1 = [a1]\n    if not isinstance(a2, list):\n        a2 = [a2]\n    return a1 + a2", "docstring": "Combine to argument into a single flat list\n\nIt is used when you are not sure whether arguments are lists but want to combine them into one flat list\n\nArgs:\na1: list or other thing\na2: list or other thing\n\nReturns:\nlist: a flat list contain a1 and a2", "source": "juraj-google-style"}
{"code": "def get_spectre_plot(self, sigma=0.05, step=0.01):\n    from pymatgen.util.plotting import pretty_plot\n    from matplotlib.mlab import normpdf\n    plt = pretty_plot(12, 8)\n    transitions = self.read_excitation_energies()\n    minval = (min([val[0] for val in transitions]) - (5.0 * sigma))\n    maxval = (max([val[0] for val in transitions]) + (5.0 * sigma))\n    npts = (int(((maxval - minval) / step)) + 1)\n    eneval = np.linspace(minval, maxval, npts)\n    lambdaval = [(((cst.h * cst.c) / (val * cst.e)) * 1000000000.0) for val in eneval]\n    spectre = np.zeros(npts)\n    for trans in transitions:\n        spectre += (trans[2] * normpdf(eneval, trans[0], sigma))\n    spectre /= spectre.max()\n    plt.plot(lambdaval, spectre, 'r-', label='spectre')\n    data = {'energies': eneval, 'lambda': lambdaval, 'xas': spectre}\n    plt.vlines([val[1] for val in transitions], 0.0, [val[2] for val in transitions], color='blue', label='transitions', linewidth=2)\n    plt.xlabel('$\\\\lambda$ (nm)')\n    plt.ylabel('Arbitrary unit')\n    plt.legend()\n    return (data, plt)", "docstring": "Get a matplotlib plot of the UV-visible xas. Transition are plotted\nas vertical lines and as a sum of normal functions with sigma with. The\nbroadening is applied in energy and the xas is plotted as a function\nof the wavelength.\n\nArgs:\nsigma: Full width at half maximum in eV for normal functions.\nstep: bin interval in eV\n\nReturns:\nA dict: {\"energies\": values, \"lambda\": values, \"xas\": values}\nwhere values are lists of abscissa (energies, lamba) and\nthe sum of gaussian functions (xas).\nA matplotlib plot.", "source": "codesearchnet"}
{"code": "def _illegal_character(c, ctx, message=''):\n    \n    container_type = ctx.container.ion_type is None and 'top-level' or ctx.container.ion_type.name\n    value_type = ctx.ion_type is None and 'unknown' or ctx.ion_type.name\n    if c is None:\n        header = 'Illegal token'\n    else:\n        c = 'EOF' if BufferQueue.is_eof(c) else _chr(c)\n        header = 'Illegal character %s' % (c,)\n    raise IonException('%s at position %d in %s value contained in %s. %s Pending value: %s'\n                       % (header, ctx.queue.position, value_type, container_type, message, ctx.value))", "docstring": "Raises an IonException upon encountering the given illegal character in the given context.\n\nArgs:\nc (int|None): Ordinal of the illegal character.\nctx (_HandlerContext):  Context in which the illegal character was encountered.\nmessage (Optional[str]): Additional information, as necessary.", "source": "juraj-google-style"}
{"code": "def AddSubkey(self, registry_key):\n    \n    name = registry_key.name.upper()\n    if name in self._subkeys:\n      raise KeyError(\n          'Subkey: {0:s} already exists.'.format(registry_key.name))\n\n    self._subkeys[name] = registry_key\n\n    key_path = key_paths.JoinKeyPath([self._key_path, registry_key.name])\n    registry_key._key_path = key_path", "docstring": "Adds a subkey.\n\nArgs:\nregistry_key (WinRegistryKey): Windows Registry subkey.\n\nRaises:\nKeyError: if the subkey already exists.", "source": "juraj-google-style"}
{"code": "def read_tensor_tracer_event_file(event_file):\n    step_occurrence_count = collections.defaultdict(int)\n    step_occurrence_list = []\n    for trace_event in summary_iterator.summary_iterator(event_file):\n        if not trace_event.HasField('summary'):\n            continue\n        if len(trace_event.summary.value) != 1:\n            raise ValueError('Single step contains %d summary values, expected 1.' % len(trace_event.summary.value))\n        step = trace_event.step\n        step_occurrence_count[step] += 1\n        occurrence_idx = step_occurrence_count[step] - 1\n        occurrence_size = len(step_occurrence_list)\n        if occurrence_idx == occurrence_size:\n            new_occurrence = collections.defaultdict(dict)\n            step_occurrence_list.append(new_occurrence)\n        elif occurrence_idx > occurrence_size:\n            raise ValueError('Unexpected: occurrence_idx (%d) > occurrence_size (%d)' % (occurrence_idx, occurrence_size))\n        tensor_value = trace_event.summary.value[0]\n        tensor_name = tensor_value.tag\n        real_shape = [d.size for d in tensor_value.tensor.tensor_shape.dim]\n        tensor_content = np.frombuffer(tensor_value.tensor.tensor_content, dtypes.DType(tensor_value.tensor.dtype).as_numpy_dtype()).reshape(real_shape)\n        step_occurrence_list[occurrence_idx][step][tensor_name] = tensor_content\n    return step_occurrence_list", "docstring": "Reads the event file written by tensor tracer.\n\nThis can be used to read the full tensors written into binary event files by\nby TensorTracer with trace_mode=full_tensor_summary.\n\nExample usage:\nresult_dict_list = tensor_tracer.read_tensor_tracer_event_file(\nevent_file_path)\nfor result_dict in result_dict_list:\nfor step, tensor_dict in result_dict.items():\nfor tensor_name, full_tensor_content in tensor_dict.items():\nlogging.info(tensor_name, full_tensor_content)\n\nArgs:\nevent_file: Path to the event file that contains only tensor tracer events.\nReturns:\nA list of event dictionaries, each of which with the form:\n{step_number: {tensor_name: tensor_content}}. This is a list instead of\na single event dictionary because it is possible that an event file may\nhave multiple event traces, each of them covering the same step ranges.\nRaises:\nValueError: If an unexpected trace is found.", "source": "github-repos"}
{"code": "def Downsampled(cls, stats, interval=None):\n    interval = (interval or cls.DEFAULT_SAMPLING_INTERVAL)\n    result = cls(stats)\n    result.cpu_samples = cls._Downsample(kind=CpuSample, samples=stats.cpu_samples, interval=interval)\n    result.io_samples = cls._Downsample(kind=IOSample, samples=stats.io_samples, interval=interval)\n    return result", "docstring": "Constructs a copy of given stats but downsampled to given interval.\n\nArgs:\nstats: A `ClientStats` instance.\ninterval: A downsampling interval.\n\nReturns:\nA downsampled `ClientStats` instance.", "source": "codesearchnet"}
{"code": "def eig(x):\n    if any_symbolic_tensors((x,)):\n        return Eig().symbolic_call(x)\n    return _eig(x)", "docstring": "Computes the eigenvalues and eigenvectors of a square matrix.\n\nArgs:\nx: Input tensor of shape `(..., M, M)`.\n\nReturns:\nA tuple of two tensors: a tensor of shape `(..., M)` containing\neigenvalues and a tensor of shape `(..., M, M)` containing eigenvectors.", "source": "github-repos"}
{"code": "def update_handler(Model, name=None, **kwds):\n\n    async def action_handler(service, action_type, payload, props, notify=True, **kwds):\n        if (action_type == get_crud_action('update', (name or Model))):\n            try:\n                message_props = {}\n                if ('correlation_id' in props):\n                    message_props['correlation_id'] = props['correlation_id']\n                pk_field = Model.primary_key()\n                if (not (pk_field.name in payload)):\n                    raise ValueError('Must specify the pk of the model when updating')\n                model = Model.select().where((pk_field == payload[pk_field.name])).get()\n                payload.pop(pk_field.name, None)\n                for (key, value) in payload.items():\n                    setattr(model, key, value)\n                model.save()\n                if notify:\n                    (await service.event_broker.send(payload=ModelSerializer().serialize(model), action_type=change_action_status(action_type, success_status()), **message_props))\n            except Exception as err:\n                if notify:\n                    (await service.event_broker.send(payload=str(err), action_type=change_action_status(action_type, error_status()), **message_props))\n                else:\n                    raise err\n    return action_handler", "docstring": "This factory returns an action handler that updates a new instance of\nthe specified model when a update action is recieved, assuming the\naction follows nautilus convetions.\n\nArgs:\nModel (nautilus.BaseModel): The model to update when the action\nreceived.\n\nReturns:\nfunction(type, payload): The action handler for this model", "source": "codesearchnet"}
{"code": "def add(self, *l):\n    for a in flatten(l):\n        self._add([self.Inner(a)], self.l)", "docstring": "add inner to outer\n\nArgs:\n*l: element that is passed into Inner init", "source": "codesearchnet"}
{"code": "def __init__(self, name, min_val, max_val):\n        \n\n        self.name = name\n        self.min_val = min_val\n        self.max_val = max_val\n        if type(min_val) != type(max_val):\n            raise ValueError('Supplied min_val is not the same type as\\\n                             supplied max_val: {}, {}'.format(\n                                 type(min_val),\n                                 type(max_val))\n                             )\n        self.dtype = type(min_val + max_val)\n        if self.dtype not in SUPPORTED_DTYPES:\n            raise ValueError('Unsupported data type: use {}'\n                             .format(SUPPORTED_DTYPES))", "docstring": "Parameter object\n\nArgs:\nname (str): name of the parameter\nmin_val (int or float): minimum allowed value for the parameter\nmax_val (int or float): maximum allowed value for the parameter", "source": "juraj-google-style"}
{"code": "def template(self):\n    instance = self.template_instance()\n    offset = (self._chunk.offset() + instance.template_offset())\n    node = TemplateNode(self._buf, offset, self._chunk, instance)\n    return node", "docstring": "parse the template referenced by this root node.\nnote, this template structure is not guaranteed to be located within the root node's boundaries.\n\nReturns:\nTemplateNode: the template.", "source": "codesearchnet"}
{"code": "def get_glibc_version():\n    key = 'glibc_ver'\n    out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])\n    if err and FLAGS.debug:\n        print('Error in detecting GCC version:\\n %s' % str(err))\n    return out.strip(b'\\n')", "docstring": "Retrieves version of GLIBC detected.\n\nReturns:\nString that is the version of GLIBC.\ne.g. '2.24'", "source": "github-repos"}
{"code": "def report_uninitialized_resources(resource_list=None, name='report_uninitialized_resources'):\n    if resource_list is None:\n        resource_list = shared_resources() + local_resources()\n    with ops.name_scope(name):\n        local_device = os.environ.get('TF_DEVICE_FOR_UNINITIALIZED_VARIABLE_REPORTING', '/cpu:0')\n        with ops.device(local_device):\n            if not resource_list:\n                return array_ops.constant([], dtype=dtypes.string)\n            variables_mask = math_ops.logical_not(array_ops_stack.stack([r.is_initialized for r in resource_list]))\n            variable_names_tensor = array_ops.constant([s.handle.name for s in resource_list])\n            return array_ops.boolean_mask(variable_names_tensor, variables_mask)", "docstring": "Returns the names of all uninitialized resources in resource_list.\n\nIf the returned tensor is empty then all resources have been initialized.\n\nArgs:\nresource_list: resources to check. If None, will use shared_resources() +\nlocal_resources().\nname: name for the resource-checking op.\n\nReturns:\nTensor containing names of the handles of all resources which have not\nyet been initialized.", "source": "github-repos"}
{"code": "def shift(x, offset, dim, wrap, name=None):\n    return ShiftOperation(x, offset, dim, wrap, name=name).outputs[0]", "docstring": "Shift operation.\n\nShift x right by +offset in dimension dim.\n\nArgs:\nx: a Tensor\noffset: an integer. If negative, shift left instead of right.\ndim: a Dimension of x\nwrap: a boolean - whether to wrap (True) or pad with zeros (False).\nname: an optional string\n\nReturns:\na Tensor with the same shape and dtype as x", "source": "codesearchnet"}
{"code": "def get_processes(sort_by_name=True):\n    \n    if sort_by_name:\n        return sorted(\n            _list_processes(),\n            key=cmp_to_key(\n                lambda p1, p2: (cmp(p1.name, p2.name) or cmp(p1.pid, p2.pid))\n            ),\n        )\n    else:\n        return sorted(\n            _list_processes(),\n            key=cmp_to_key(\n                lambda p1, p2: (cmp(p1.pid, p2.pid) or cmp(p1.name, p2.name))\n            ),\n        )", "docstring": "Retrieve a list of processes sorted by name.\n\nArgs:\nsort_by_name (bool): Sort the list by name or by process ID's.\n\nReturns:\nlist of (int, str) or list of (int, str, str): List of process id,\nprocess name and optional cmdline tuples.", "source": "juraj-google-style"}
{"code": "def validate(cls, mapper_spec):\n    \n    if mapper_spec.output_writer_class() != cls:\n      raise errors.BadWriterParamsError(\"Output writer class mismatch\")\n    params = output_writers._get_params(mapper_spec)\n    \n    if cls.BUCKET_NAME_PARAM not in params:\n      raise errors.BadWriterParamsError(\n          \"%s is required for the _HashingGCSOutputWriter\" %\n          cls.BUCKET_NAME_PARAM)", "docstring": "Validates mapper specification.\n\nArgs:\nmapper_spec: an instance of model.MapperSpec to validate.\nRaises:\nBadWriterParamsError: when Output writer class mismatch.", "source": "juraj-google-style"}
{"code": "def separate_words(text, acronyms=None):\n    (words, _case, _sep) = case_parse.parse_case(text, acronyms, preserve_case=True)\n    return ' '.join(words)", "docstring": "Return text in \"seperate words\" style.\n\nArgs:\ntext: input string to convert case\ndetect_acronyms: should attempt to detect acronyms\nacronyms: a list of acronyms to detect\n\n>>> separate_words(\"HELLO_WORLD\")\n'HELLO WORLD'\n>>> separate_words(\"helloHTMLWorld\", True, [\"HTML\"])\n'hello HTML World'", "source": "codesearchnet"}
{"code": "def __init__(self, instrument, probe_name, name = None, info = None, buffer_length = 100):\n        \n\n\n        assert isinstance(instrument, Instrument)\n        assert isinstance(probe_name, str)\n        assert probe_name in instrument._PROBES\n\n\n        if name is None:\n            name = probe_name\n        assert isinstance(name, str)\n\n        if info is None:\n            info = ''\n        assert isinstance(info, str)\n\n        self.name = name\n        self.info = info\n        self.instrument = instrument\n        self.probe_name = probe_name\n\n        self.buffer = deque(maxlen = buffer_length)", "docstring": "creates a probe...\nArgs:\nname (optinal):  name of probe, if not provided take name of function\nsettings (optinal): a Parameter object that contains all the information needed in the script", "source": "juraj-google-style"}
{"code": "async def reset(self):\n    params = {'include_participants': (1 if AUTO_GET_PARTICIPANTS else 0), 'include_matches': (1 if AUTO_GET_MATCHES else 0)}\n    res = (await self.connection('POST', 'tournaments/{}/reset'.format(self._id), **params))\n    self._refresh_from_json(res)", "docstring": "reset the tournament on Challonge\n\n|methcoro|\n\nNote:\n|from_api| Reset a tournament, clearing all of its scores and attachments. You can then add/remove/edit participants before starting the tournament again.\n\nRaises:\nAPIException", "source": "codesearchnet"}
{"code": "def ndim(x):\n    return x.shape.rank", "docstring": "Returns the number of axes in a tensor, as an integer.\n\nArgs:\nx: Tensor or variable.\n\nReturns:\nInteger (scalar), number of axes.\n\nExamples:\n\n\n>>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))\n>>> val = np.array([[1, 2], [3, 4]])\n>>> kvar = tf.keras.backend.variable(value=val)\n>>> tf.keras.backend.ndim(input)\n3\n>>> tf.keras.backend.ndim(kvar)\n2", "source": "github-repos"}
{"code": "def _checkBool(inputvalue, description='inputvalue'):\n    \n    _checkString(description, minlength=1, description='description string')\n    if not isinstance(inputvalue, bool):\n        raise TypeError('The {0} must be boolean. Given: {1!r}'.format(description, inputvalue))", "docstring": "Check that the given inputvalue is a boolean.\n\nArgs:\n* inputvalue (boolean): The value to be checked.\n* description (string): Used in error messages for the checked inputvalue.\n\nRaises:\nTypeError, ValueError", "source": "juraj-google-style"}
{"code": "def query_gal(self, l, b, d=None, **kwargs):\n    if (not isinstance(l, units.Quantity)):\n        l = (l * units.deg)\n    if (not isinstance(b, units.Quantity)):\n        b = (b * units.deg)\n    if (d is None):\n        coords = coordinates.SkyCoord(l, b, frame='galactic')\n    else:\n        if (not isinstance(d, units.Quantity)):\n            d = (d * units.kpc)\n        coords = coordinates.SkyCoord(l, b, distance=d, frame='galactic')\n    return self.query(coords, **kwargs)", "docstring": "Query using Galactic coordinates.\n\nArgs:\nl (:obj:`float`, scalar or array-like): Galactic longitude, in degrees,\nor as an :obj:`astropy.unit.Quantity`.\nb (:obj:`float`, scalar or array-like): Galactic latitude, in degrees,\nor as an :obj:`astropy.unit.Quantity`.\nd (Optional[:obj:`float`, scalar or array-like]): Distance from the Solar\nSystem, in kpc, or as an :obj:`astropy.unit.Quantity`. Defaults to\n``None``, meaning no distance is specified.\n**kwargs: Any additional keyword arguments accepted by derived\nclasses.\n\nReturns:\nThe results of the query, which must be implemented by derived\nclasses.", "source": "codesearchnet"}
{"code": "def list_filters(self):\n\n    def _row_gen(attributes):\n        for attr in attributes.values():\n            (yield (attr.name, attr.type, attr.description))\n    return pd.DataFrame.from_records(_row_gen(self.filters), columns=['name', 'type', 'description'])", "docstring": "Lists available filters in a readable DataFrame format.\n\nReturns:\npd.DataFrame: Frame listing available filters.", "source": "codesearchnet"}
{"code": "def get_groups(self, **kwargs):\n        \n        \n        params = {\n            'cultureInfo': util.language_code(kwargs.get('lang'))\n        }\n\n        \n        result = self.make_request('geo', 'get_groups', **params)\n\n        if not util.check_result(result):\n            return False, result.get('resultDescription', 'UNKNOWN ERROR')\n\n        \n        values = util.response_list(result, 'resultValues')\n        return True, [emtype.GeoGroupItem(**a) for a in values]", "docstring": "Obtain line types and details.\n\nArgs:\nlang (str): Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[GeoGroupItem]), or message\nstring in case of error.", "source": "juraj-google-style"}
{"code": "def apply(self, inputs, *args, **kwargs):\n    warnings.warn('`layer.apply` is deprecated and will be removed in a future version. Please use `layer.__call__` method instead.')\n    return self.__call__(inputs, *args, **kwargs)", "docstring": "Deprecated, do NOT use!\n\nThis is an alias of `self.__call__`.\n\nArgs:\ninputs: Input tensor(s).\n*args: additional positional arguments to be passed to `self.call`.\n**kwargs: additional keyword arguments to be passed to `self.call`.\n\nReturns:\nOutput tensor(s).", "source": "github-repos"}
{"code": "def datasets_get(self, dataset_name):\n    \n    url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)\n    return datalab.utils.Http.request(url, credentials=self._credentials)", "docstring": "Issues a request to retrieve information about a dataset.\n\nArgs:\ndataset_name: the name of the dataset\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"}
{"code": "def add(self, rule: 'functions.ReplacementRule') -> None:\n        \n        self.matcher.add(rule.pattern, rule.replacement)", "docstring": "Add a new rule to the replacer.\n\nArgs:\nrule:\nThe rule to add.", "source": "juraj-google-style"}
{"code": "def dr(self, atom1, atom2):\n    return self.cell.dr(atom1.r, atom2.r)", "docstring": "Calculate the distance between two atoms.\n\nArgs:\natom1 (vasppy.Atom): Atom 1.\natom2 (vasppy.Atom): Atom 2.\n\nReturns:\n(float): The distance between Atom 1 and Atom 2.", "source": "codesearchnet"}
{"code": "def _ws_on_open(self, ws: websocket.WebSocketApp):\n    payload = {'op': WebSocketEvent.IDENTIFY.value, 'd': {'token': self.token, 'properties': {'$os': sys.platform, '$browser': 'Pycord', '$device': 'Pycord', '$referrer': '', '$referring_domain': ''}, 'compress': True, 'large_threshold': 250}}\n    self.logger.debug('Sending identify payload')\n    ws.send(json.dumps(payload))\n    self.connected = True", "docstring": "Callback for sending the initial authentication data\n\nThis \"payload\" contains the required data to authenticate this websocket\nclient as a suitable bot connection to the Discord websocket.\n\nArgs:\nws: websocket connection", "source": "codesearchnet"}
{"code": "def get_item(dictionary, tuple_key, default_value):\n    \n    u, v = tuple_key\n\n    \n    tuple1 = dictionary.get((u, v), None)\n    tuple2 = dictionary.get((v, u), None)\n\n    \n    return tuple1 or tuple2 or default_value", "docstring": "Grab values from a dictionary using an unordered tuple as a key.\n\nDictionary should not contain None, 0, or False as dictionary values.\n\nArgs:\ndictionary: Dictionary that uses two-element tuple as keys\ntuple_key: Unordered tuple of two elements\ndefault_value: Value that is returned when the tuple_key is not found in the dictionary", "source": "juraj-google-style"}
{"code": "def get_lattice_type(number):\n        \n        f = lambda i, j: i <= number <= j\n        cs = {'triclinic': (1, 2), 'monoclinic': (3, 15),\n              'orthorhombic': (16, 74), 'tetragonal': (75, 142),\n              'trigonal': (143, 167), 'hexagonal': (168, 194),\n              'cubic': (195, 230)}\n\n        crystal_system = None\n        for k, v in cs.items():\n            if f(*v):\n                crystal_system = k\n                break\n\n        if number in [146, 148, 155, 160, 161, 166, 167]:\n            return \"rhombohedral\"\n        elif crystal_system == \"trigonal\":\n            return \"hexagonal\"\n        else:\n            return crystal_system", "docstring": "Return the lattice crystal system.\n\nHexagonal cells are differentiated into rhombohedral and hexagonal\nlattices.\n\nArgs:\nnumber (int): The international space group number.\n\nReturns:\nstr: The lattice crystal system.", "source": "juraj-google-style"}
{"code": "def download_structure(pdb_id, file_type, outdir='', only_header=False, force_rerun=False):\n    \n    \n\n    pdb_id = pdb_id.lower()\n    file_type = file_type.lower()\n    file_types = ['pdb', 'pdb.gz', 'mmcif', 'cif', 'cif.gz', 'xml.gz', 'mmtf', 'mmtf.gz']\n    if file_type not in file_types:\n        raise ValueError('Invalid file type, must be either: pdb, pdb.gz, cif, cif.gz, xml.gz, mmtf, mmtf.gz')\n\n    if file_type == 'mmtf':\n        file_type = 'mmtf.gz'\n\n    if file_type.endswith('.gz'):\n        gzipped = True\n    else:\n        gzipped = False\n\n    if file_type == 'mmcif':\n        file_type = 'cif'\n\n    if only_header:\n        folder = 'header'\n        outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))\n    else:\n        folder = 'download'\n        outfile = op.join(outdir, '{}.{}'.format(pdb_id, file_type))\n\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n        if file_type == 'mmtf.gz' or file_type == 'mmtf':\n            mmtf_api = '1.0'\n            download_link = 'http:\n        else:\n            download_link = 'http:\n\n        urlretrieve(download_link, outfile)\n\n        if gzipped:\n            outfile = ssbio.utils.gunzip_file(infile=outfile,\n                                              outfile=outfile.strip('.gz'),\n                                              outdir=outdir,\n                                              delete_original=False,\n                                              force_rerun_flag=force_rerun)\n\n        log.debug('{}: saved structure file'.format(outfile))\n    else:\n        if file_type == 'mmtf.gz':\n            outfile = op.join(outdir, '{}.{}'.format(pdb_id, 'mmtf'))\n        log.debug('{}: structure file already saved'.format(outfile))\n\n    return outfile", "docstring": "Download a structure from the RCSB PDB by ID. Specify the file type desired.\n\nArgs:\npdb_id: PDB ID\nfile_type: pdb, pdb.gz, mmcif, cif, cif.gz, xml.gz, mmtf, mmtf.gz\noutdir: Optional output directory\nonly_header: If only the header file should be downloaded\nforce_rerun: If the file should be downloaded again even if it exists\n\nReturns:\nstr: Path to outfile", "source": "juraj-google-style"}
{"code": "def expand(self, tags, clique_scoring_func=None):\n        \n        lattice = Lattice()\n        overlapping_spans = []\n\n        def end_token_index():\n            return max([t.get('end_token') for t in overlapping_spans])\n\n        for i in xrange(len(tags)):\n            tag = tags[i]\n\n            if len(overlapping_spans) > 0 and end_token_index() >= tag.get('start_token'):\n                overlapping_spans.append(tag)\n            elif len(overlapping_spans) > 1:\n                cliques = list(self._sub_expand(overlapping_spans))\n                if clique_scoring_func:\n                    cliques = sorted(cliques, key=lambda e: -1 * clique_scoring_func(e))\n                lattice.append(cliques)\n                overlapping_spans = [tag]\n            else:\n                lattice.append(overlapping_spans)\n                overlapping_spans = [tag]\n        if len(overlapping_spans) > 1:\n            cliques = list(self._sub_expand(overlapping_spans))\n            if clique_scoring_func:\n                    cliques = sorted(cliques, key=lambda e: -1 * clique_scoring_func(e))\n            lattice.append(cliques)\n        else:\n            lattice.append(overlapping_spans)\n\n        return lattice.traverse()", "docstring": "This is the main function to expand tags into cliques\n\nArgs:\ntags (list): a list of tags to find the cliques.\nclique_scoring_func (func): a function that returns a float\nvalue for the clique\n\nReturns:\nlist : a list of cliques", "source": "juraj-google-style"}
{"code": "def auth_required(func):\n\n    @wraps(func)\n    async def wrapper(*args):\n        if ((await get_auth(args[(- 1)])) is None):\n            raise web.HTTPForbidden()\n        return (await func(*args))\n    return wrapper", "docstring": "Utility decorator that checks if a user has been authenticated for this\nrequest.\n\nAllows views to be decorated like:\n\n@auth_required\ndef view_func(request):\npass\n\nproviding a simple means to ensure that whoever is calling the function has\nthe correct authentication details.\n\nArgs:\nfunc: Function object being decorated and raises HTTPForbidden if not\n\nReturns:\nA function object that will raise web.HTTPForbidden() if the passed\nrequest does not have the correct permissions to access the view.", "source": "codesearchnet"}
{"code": "def update_connection_public_key(self, connection_id, public_key):\n        \n        if connection_id in self._connections:\n            connection_info = self._connections[connection_id]\n            self._connections[connection_id] = \\\n                ConnectionInfo(connection_info.connection_type,\n                               connection_info.connection,\n                               connection_info.uri,\n                               connection_info.status,\n                               public_key)\n        else:\n            LOGGER.debug(\"Could not update the public key %s for \"\n                         \"connection_id %s. The connection does not \"\n                         \"exist.\",\n                         public_key,\n                         connection_id)", "docstring": "Adds the public_key to the connection definition.\n\nArgs:\nconnection_id (str): The identifier for the connection.\npublic_key (str): The public key used to enforce permissions on\nconnections.", "source": "juraj-google-style"}
{"code": "def find(self, username):\n        \n        filter = ['(uid={})'.format(username)]\n        results = self.client.search(filter)\n\n        if len(results) < 1:\n            raise ldap_tools.exceptions.NoUserFound(\n                'User ({}) not found'.format(username))\n            return  \n        elif len(results) > 1:\n            raise ldap_tools.exceptions.TooManyResults(\n                'Multiple users found. Please narrow your search.')\n            return  \n        else:\n            return results", "docstring": "Find user with given username.\n\nArgs:\nusername Username of the user to search for\n\nRaises:\nldap_tools.exceptions.NoUserFound: No users returned by LDAP\nldap_tools.exceptions.TooManyResults:\nMultiple users returned by LDAP", "source": "juraj-google-style"}
{"code": "def read_eof(self, echo=None):\n    d = b''\n    while True:\n        try:\n            d += self.read(1, echo)\n        except EOFError:\n            return d", "docstring": "Read until the channel is closed.\n\nArgs:\necho(bool): Whether to write the read data to stdout.\n\nReturns:\nbytes: The read data.", "source": "codesearchnet"}
{"code": "def get_pipeline_definition(pipeline_name, working_dir):\n    logger.debug('starting')\n    pipeline_path = get_pipeline_path(pipeline_name=pipeline_name, working_directory=working_dir)\n    logger.debug(f'Trying to open pipeline at path {pipeline_path}')\n    try:\n        with open(pipeline_path) as yaml_file:\n            pipeline_definition = pypyr.yaml.get_pipeline_yaml(yaml_file)\n            logger.debug(f'found {len(pipeline_definition)} stages in pipeline.')\n    except FileNotFoundError:\n        logger.error(f\"The pipeline doesn't exist. Looking for a file here: {pipeline_name}.yaml in the /pipelines sub directory.\")\n        raise\n    logger.debug('pipeline definition loaded')\n    logger.debug('done')\n    return pipeline_definition", "docstring": "Open and parse the pipeline definition yaml.\n\nParses pipeline yaml and returns dictionary representing the pipeline.\n\npipeline_name.yaml should be in the working_dir/pipelines/ directory.\n\nArgs:\npipeline_name: string. Name of pipeline. This will be the file-name of\nthe pipeline - i.e {pipeline_name}.yaml\nworking_dir: path. Start looking in\n./working_dir/pipelines/pipeline_name.yaml\n\nReturns:\ndict describing the pipeline, parsed from the pipeline yaml.\n\nRaises:\nFileNotFoundError: pipeline_name.yaml not found in the various pipeline\ndirs.", "source": "codesearchnet"}
{"code": "def call_projection_function(self, hist: Hist) -> Hist:\n    for axis in self.projection_axes:\n        logger.debug(f'Apply projection axes hist range: {axis.name}')\n        axis.apply_range_set(hist)\n    projected_hist = None\n    if (hasattr(hist, 'ProjectionND') and hasattr(hist, 'Projection')):\n        projected_hist = self._project_THn(hist=hist)\n    elif (hasattr(hist, 'ProjectionZ') and hasattr(hist, 'Project3D')):\n        projected_hist = self._project_TH3(hist=hist)\n    elif (hasattr(hist, 'ProjectionX') and hasattr(hist, 'ProjectionY')):\n        projected_hist = self._project_TH2(hist=hist)\n    else:\n        raise TypeError(type(hist), f'Could not recognize hist {hist} of type {type(hist)}')\n    self.cleanup_cuts(hist, cut_axes=self.projection_axes)\n    return projected_hist", "docstring": "Calls the actual projection function for the hist.\n\nArgs:\nhist: Histogram from which the projections should be performed.\nReturns:\nThe projected histogram.", "source": "codesearchnet"}
{"code": "def __init__(self, session, object_factory):\n        \n        check_type(session, RestSession, may_be_none=False)\n\n        super(RoomsAPI, self).__init__()\n\n        self._session = session\n        self._object_factory = object_factory", "docstring": "Initialize a new RoomsAPI object with the provided RestSession.\n\nArgs:\nsession(RestSession): The RESTful session object to be used for\nAPI calls to the Webex Teams service.\n\nRaises:\nTypeError: If the parameter types are incorrect.", "source": "juraj-google-style"}
{"code": "def FromString(cls, indata):\n        \n\n        lines = [x.strip() for x in indata.split(\"\\n\") if not x.startswith('\n\n        if len(lines) < 3:\n            raise DataError(\"Invalid CommandFile string that did not contain 3 header lines\", lines=lines)\n\n        fmt_line, version_line, ascii_line = lines[:3]\n\n        if not version_line.startswith(\"Format: \"):\n            raise DataError(\"Invalid format version that did not start with 'Format: '\", line=version_line)\n\n        version = version_line[8:]\n\n        if ascii_line != \"Type: ASCII\":\n            raise DataError(\"Unknown file type line (expected Type: ASCII)\", line=ascii_line)\n\n        cmds = [cls.decode(x) for x in lines[3:]]\n        return CommandFile(fmt_line, version, cmds)", "docstring": "Load a CommandFile from a string.\n\nThe string should be produced from a previous call to\nencode.\n\nArgs:\nindata (str): The encoded input data.\n\nReturns:\nCommandFile: The decoded CommandFile object.", "source": "juraj-google-style"}
{"code": "def fastcc_is_consistent(model, epsilon, solver):\n    for reaction in fastcc(model, epsilon, solver):\n        return False\n    return True", "docstring": "Quickly check whether model is consistent\n\nReturn true if the model is consistent. If it is only necessary to know\nwhether a model is consistent, this function is fast as it will return\nthe result as soon as it finds a single inconsistent reaction.\n\nArgs:\nmodel: :class:`MetabolicModel` to solve.\nepsilon: Flux threshold value.\nsolver: LP solver instance to use.", "source": "codesearchnet"}
{"code": "def results_tc(self, key, value):\n        \n        if os.access(self.default_args.tc_out_path, os.W_OK):\n            results_file = '{}/results.tc'.format(self.default_args.tc_out_path)\n        else:\n            results_file = 'results.tc'\n\n        new = True\n        open(results_file, 'a').close()  \n        with open(results_file, 'r+') as fh:\n            results = ''\n            for line in fh.read().strip().split('\\n'):\n                if not line:\n                    continue\n                try:\n                    k, v = line.split(' = ')\n                except ValueError:\n                    \n                    k, v = line.split(' =')\n                if k == key:\n                    v = value\n                    new = False\n                if v is not None:\n                    results += '{} = {}\\n'.format(k, v)\n            if new and value is not None:  \n                results += '{} = {}\\n'.format(key, value)\n            fh.seek(0)\n            fh.write(results)\n            fh.truncate()", "docstring": "Write data to results_tc file in TcEX specified directory.\n\nThe TcEx platform support persistent values between executions of the App.  This\nmethod will store the values for TC to read and put into the Database.\n\nArgs:\nkey (string): The data key to be stored.\nvalue (string): The data value to be stored.", "source": "juraj-google-style"}
{"code": "def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):\n    countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)\n    if (countryinfo is not None):\n        return countryinfo.get('\n    return None", "docstring": "Get country name from ISO3 code\n\nArgs:\niso3 (str): ISO3 code for which to get country name\nuse_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\nexception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\nReturns:\nOptional[str]: Country name", "source": "codesearchnet"}
{"code": "def victim_phone_assets(self, main_type, sub_type, unique_id, params=None):\n        \n        params = params or {}\n\n        if not sub_type:\n            url = '/v2/{}/{}/victimAssets/phoneNumbers'.format(main_type, unique_id)\n        else:\n            url = '/v2/{}/{}/{}/victimAssets/phoneNumbers'.format(main_type, sub_type, unique_id)\n\n        for vpa in self._iterate(url, params, 'victimPhone'):\n            yield vpa", "docstring": "Args:\nmain_type:\nsub_type:\nunique_id:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def merkleroot(hashes):\n    if (not hashes):\n        return sha3_256(b'').hexdigest()\n    if (len(hashes) == 1):\n        return hexlify(hashes[0]).decode()\n    if ((len(hashes) % 2) == 1):\n        hashes.append(hashes[(- 1)])\n    parent_hashes = [sha3_256((hashes[i] + hashes[(i + 1)])).digest() for i in range(0, (len(hashes) - 1), 2)]\n    return merkleroot(parent_hashes)", "docstring": "Computes the merkle root for a given list.\n\nArgs:\nhashes (:obj:`list` of :obj:`bytes`): The leaves of the tree.\n\nReturns:\nstr: Merkle root in hexadecimal form.", "source": "codesearchnet"}
{"code": "def make_multi_lagger(lags, groupby_kwargs=None):\n    laggers = [SingleLagger(l, groupby_kwargs=groupby_kwargs) for l in lags]\n    feature_union = FeatureUnion([(repr(lagger), lagger) for lagger in laggers])\n    return feature_union", "docstring": "Return a union of transformers that apply different lags\n\nArgs:\nlags (Collection[int]): collection of lags to apply\ngroupby_kwargs (dict): keyword arguments to pd.DataFrame.groupby", "source": "codesearchnet"}
{"code": "def stack_inputs(self, stack_indices=None, tile_variants=False):\n    if stack_indices is None:\n        stack_indices = range(len(self._inputs))\n    length = self.pfor.loop_len_vector\n    for i in stack_indices:\n        inp = self._inputs[i]\n        is_variant = inp.t.dtype == dtypes.variant\n        if not inp.is_stacked:\n            self._inputs[i] = _stack(inp.t, length)\n            if tile_variants and is_variant:\n                self._inputs[i] = wrap(_tile_variant_with_length(self._inputs[i].t, length), True)\n        elif not tile_variants and is_variant:\n            self._inputs[i] = wrap(_untile_variant(self._inputs[i].t), True)", "docstring": "Stacks unstacked inputs at `stack_indices`.\n\nArgs:\nstack_indices: indices of inputs at which stacking is done. If None,\nstacking is done at all indices.\ntile_variants: If True, affected indices which have a variant dtype will\nbe tiled after this operation to match the expected shape of a\nvectorized tensor. Variants generally need to be un-tiled when they are\ninputs to operations and tiled when returned.", "source": "github-repos"}
{"code": "def _event_to_pb(event):\n        \n        if isinstance(event, (TaskData, Task)):\n            key, klass = 'task', clearly_pb2.TaskMessage\n        elif isinstance(event, (WorkerData, Worker)):\n            key, klass = 'worker', clearly_pb2.WorkerMessage\n        else:\n            raise ValueError('unknown event')\n        keys = klass.DESCRIPTOR.fields_by_name.keys()\n        \n        data = {k: v for k, v in\n                getattr(event, '_asdict',  \n                        lambda: {f: getattr(event, f) for f in event._fields})  \n                ().items() if k in keys}\n        return key, klass(**data)", "docstring": "Supports converting internal TaskData and WorkerData, as well as\ncelery Task and Worker to proto buffers messages.\n\nArgs:\nevent (Union[TaskData|Task|WorkerData|Worker]):\n\nReturns:\nProtoBuf object", "source": "juraj-google-style"}
{"code": "def _get_starting_population(initial_population, initial_position, population_size, population_stddev, seed):\n    if (initial_population is not None):\n        return [tf.convert_to_tensor(value=part) for part in initial_population]\n    seed_stream = distributions.SeedStream(seed, salt='get_starting_population')\n    population = []\n    for part in initial_position:\n        part = tf.convert_to_tensor(value=part)\n        part_event_shape = tf.shape(input=part)\n        population_part_shape = tf.concat([[(population_size - 1)], part_event_shape], axis=0)\n        population_part = tf.random.normal(population_part_shape, stddev=population_stddev, dtype=part.dtype.base_dtype, seed=seed_stream())\n        population_part += part\n        population_part = tf.concat([[part], population_part], axis=0)\n        population.append(population_part)\n    return population", "docstring": "Constructs the initial population.\n\nIf an initial population is not already provided, this function constructs\na population by adding random normal noise to the initial position.\n\nArgs:\ninitial_population: None or a list of `Tensor`s. The initial population.\ninitial_position: None or a list of `Tensor`s. The initial position.\nIf initial_population is None, this argument must not be None.\npopulation_size: Scalar integer `Tensor`. The number of members in the\npopulation. If the initial population is not None, this parameter is\nignored.\npopulation_stddev: A positive scalar real `Tensor` of the same dtype\nas `initial_position` or `initial_population` (whichever is not None).\nThis parameter is ignored if `initial_population`\nis specified. Used to generate the population from the\n`initial_position` by adding random normal noise with zero mean and\nthe specified standard deviation.\nseed: Seed for random number generation.\n\nReturns:\nA list of `Tensor`s. The initial population.", "source": "codesearchnet"}
{"code": "def plot_time_elapsed(filename, elapsed=False, unit='s', plot_kwargs=None):\n    \n    import matplotlib.pyplot as plt\n\n    if plot_kwargs is None:\n        plot_kwargs = {}\n\n    data_column = 3 if elapsed else 1\n    data = np.genfromtxt(filename, dtype='i8,f4',\n                         usecols=(0, data_column), names=['k', 'v'])\n    index = data['k']\n    values = data['v']\n    if unit == 's':\n        pass\n    elif unit == 'm':\n        values /= 60\n    elif unit == 'h':\n        values /= 3600\n    elif unit == 'd':\n        values /= 3600 * 24\n    else:\n        raise ValueError('The argument `unit` must be chosen from {s|m|h|d}.')\n    plt.plot(index, values, **plot_kwargs)", "docstring": "Plot series data from MonitorTimeElapsed output text file.\n\nArgs:\nfilename (str): Path to *.series.txt file produced by :obj:`~nnabla.MonitorSeries` class.\nelapsed (bool): If ``True``, it plots the total elapsed time.\nunit (str):\nTime unit chosen from ``'s'``, ``'m'``, ``'h'``, or ``'d'``.\nplot_kwags (dict, optional):\nKeyward arguments passed to :function:`matplotlib.pyplot.plot`.\n\nNote:\nmatplotlib package is required.", "source": "juraj-google-style"}
{"code": "def to_parquet(evset: EventSet, path: str, **kwargs):\n    df = to_pandas(evset)\n    df.to_parquet(path, **kwargs)", "docstring": "Saves an [`EventSet`][temporian.EventSet] to a CSV file.\n\nExample:\n```python\n>>> output_path = str(tmp_dir / \"output_data.parquet\")\n>>> evset = tp.event_set(timestamps=[1,], features={\"f1\": [0.1]})\n>>> tp.to_parquet(evset, output_path)\n\n```\n\nArgs:\nevset: EventSet to save.\npath: Path to the file.", "source": "github-repos"}
{"code": "def _on_scan_request(self, sequence, topic, message):\n    if messages.ProbeCommand.matches(message):\n        self._logger.debug('Received probe message on topic %s, message=%s', topic, message)\n        self._loop.add_callback(self._publish_scan_response, message['client'])\n    else:\n        self._logger.warn('Invalid message received on topic %s, message=%s', topic, message)", "docstring": "Process a request for scanning information\n\nArgs:\nsequence (int:) The sequence number of the packet received\ntopic (string): The topic this message was received on\nmessage_type (string): The type of the packet received\nmessage (dict): The message itself", "source": "codesearchnet"}
{"code": "def sg_reuse(tensor, **opt):\n    r\n    opt = tf.sg_opt(opt)\n    assert hasattr(tensor, '_sugar'), 'cannot reuse this node.'\n    assert opt.input is not None, 'input is mandatory.'\n\n    \n    nodes, prev = [tensor], tensor._sugar.prev\n    while prev is not None:\n        nodes = [prev] + nodes\n        prev = prev._sugar.prev if hasattr(prev, '_sugar') else None\n\n    \n    out = opt.input\n    for node in nodes[1:]:  \n        if node._sugar.is_layer:\n            fn = tf.sg_layer_func(node._sugar.func)\n            if node._sugar.arg.scope_name:\n                with tf.variable_scope(node._sugar.arg.scope_name):\n                    out = fn(out, **(node._sugar.arg + tf.sg_opt(name=node._sugar.name, reuse=True)))\n            else:\n                out = fn(out, **(node._sugar.arg + tf.sg_opt(name=node._sugar.name, reuse=True)))\n        else:\n            out = node._sugar.func(out, node._sugar.arg)\n\n    return out", "docstring": "r\"\"\" Reconstruct computational graph of `tensor` so all the parameters\ncan be reused and replace its input tensor with `opt.input`.\n\nArgs:\ntensor: A `Tensor` (automatically given by chaining).\n**opt:\ninput: A `Tensor` that will replace the original input tensor.\n\nReturns:\nReconstructed tensor nodes.", "source": "juraj-google-style"}
{"code": "def convert_to_ndarray(test_obj, a):\n    if tf.is_tensor(a):\n        a = test_obj.evaluate(a)\n    if not isinstance(a, np.ndarray):\n        return np.array(a)\n    return a", "docstring": "Converts the input `a` into an ndarray.\n\nArgs:\ntest_obj: An object which has the `evaluate` method. Used to evaluate `a` if\n`a` is a Tensor.\na: Object to be converted to an ndarray.\n\nReturns:\nAn ndarray containing the values of `a`.", "source": "github-repos"}
{"code": "def show_bokehjs(bokehjs_action, develop=False):\n    print()\n    if develop:\n        print('Installed Bokeh for DEVELOPMENT:')\n    else:\n        print('Installed Bokeh:')\n    if (bokehjs_action in ['built', 'installed']):\n        print(('  - using %s built BokehJS from bokehjs/build\\n' % (bright(yellow('NEWLY')) if (bokehjs_action == 'built') else bright(yellow('PREVIOUSLY')))))\n    else:\n        print((\"  - using %s BokehJS, located in 'bokeh.server.static'\\n\" % bright(yellow('PACKAGED'))))\n    print()", "docstring": "Print a useful report after setuptools output describing where and how\nBokehJS is installed.\n\nArgs:\nbokehjs_action (str) : one of 'built', 'installed', or 'packaged'\nhow (or if) BokehJS was installed into the python source tree\n\ndevelop (bool, optional) :\nwhether the command was for \"develop\" mode (default: False)\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def parse_unique_urlencoded(content):\n    \n    urlencoded_params = urllib.parse.parse_qs(content)\n    params = {}\n    for key, value in six.iteritems(urlencoded_params):\n        if len(value) != 1:\n            msg = ('URL-encoded content contains a repeated value:'\n                   '%s -> %s' % (key, ', '.join(value)))\n            raise ValueError(msg)\n        params[key] = value[0]\n    return params", "docstring": "Parses unique key-value parameters from urlencoded content.\n\nArgs:\ncontent: string, URL-encoded key-value pairs.\n\nReturns:\ndict, The key-value pairs from ``content``.\n\nRaises:\nValueError: if one of the keys is repeated.", "source": "juraj-google-style"}
{"code": "def get_decomposition_energy(self, entry, pH, V):\n    if (self._multielement and (not isinstance(entry, MultiEntry))):\n        possible_entries = self._generate_multielement_entries(self._filtered_entries, forced_include=[entry])\n        if (entry.phase_type == 'solid'):\n            possible_entries = [e for e in possible_entries if (e.phase_type.count('Solid') == 1)]\n        possible_energies = [e.normalized_energy_at_conditions(pH, V) for e in possible_entries]\n    else:\n        possible_energies = [entry.normalized_energy_at_conditions(pH, V)]\n    min_energy = np.min(possible_energies, axis=0)\n    hull = self.get_hull_energy(pH, V)\n    return (min_energy - hull)", "docstring": "Finds decomposition to most stable entry\n\nArgs:\nentry (PourbaixEntry): PourbaixEntry corresponding to\ncompound to find the decomposition for\npH (float): pH at which to find the decomposition\nV (float): voltage at which to find the decomposition\n\nReturns:\nreaction corresponding to the decomposition", "source": "codesearchnet"}
{"code": "def _ReadIntegerDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):\n    definition_object = self._ReadFixedSizeDataTypeDefinition(definitions_registry, definition_values, data_types.IntegerDefinition, definition_name, self._SUPPORTED_ATTRIBUTES_INTEGER, is_member=is_member, supported_size_values=(1, 2, 4, 8))\n    attributes = definition_values.get('attributes', None)\n    if attributes:\n        format_attribute = attributes.get('format', definitions.FORMAT_SIGNED)\n        if (format_attribute not in self._INTEGER_FORMAT_ATTRIBUTES):\n            error_message = 'unsupported format attribute: {0!s}'.format(format_attribute)\n            raise errors.DefinitionReaderError(definition_name, error_message)\n        definition_object.format = format_attribute\n    return definition_object", "docstring": "Reads an integer data type definition.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\ndefinition_values (dict[str, object]): definition values.\ndefinition_name (str): name of the definition.\nis_member (Optional[bool]): True if the data type definition is a member\ndata type definition.\n\nReturns:\nIntegerDataTypeDefinition: integer data type definition.\n\nRaises:\nDefinitionReaderError: if the definitions values are missing or if\nthe format is incorrect.", "source": "codesearchnet"}
{"code": "def load(self, *modules):\n        \n        for module in modules:\n            if isinstance(module, six.string_types):\n                try:\n                    module = get_object(module)\n                except Exception as e:\n                    self.errors[module] = e\n                    continue\n            self.modules[module.__package__] = module\n            for (loader, module_name, is_pkg) in pkgutil.walk_packages(\n                module.__path__\n            ):\n                full_name = \"{}.{}\".format(_package(module), module_name)\n                try:\n                    self.modules[full_name] = get_object(full_name)\n                    if is_pkg:\n                        self.load(self.modules[full_name])\n                except Exception as e:\n                    self.errors[full_name] = e", "docstring": "Load one or more modules.\n\nArgs:\nmodules: Either a string full path to a module or an actual module\nobject.", "source": "juraj-google-style"}
{"code": "def case(store, institute_obj, case_obj):\n    \n    \n    case_obj['individual_ids'] = []\n    for individual in case_obj['individuals']:\n        try:\n            sex = int(individual.get('sex', 0))\n        except ValueError as err:\n            sex = 0\n        individual['sex_human'] = SEX_MAP[sex]\n\n        pheno_map = PHENOTYPE_MAP\n        if case_obj.get('track', 'rare') == 'cancer':\n            pheno_map = CANCER_PHENOTYPE_MAP\n\n        individual['phenotype_human'] = pheno_map.get(individual['phenotype'])\n        case_obj['individual_ids'].append(individual['individual_id'])\n\n    case_obj['assignees'] = [store.user(user_email) for user_email in\n                             case_obj.get('assignees', [])]\n\n    \n    suspects = [store.variant(variant_id) or variant_id for variant_id in\n                case_obj.get('suspects', [])]\n    causatives = [store.variant(variant_id) or variant_id for variant_id in\n                  case_obj.get('causatives', [])]\n\n    \n    distinct_genes = set()\n    case_obj['panel_names'] = []\n    for panel_info in case_obj.get('panels', []):\n        if not panel_info.get('is_default'):\n            continue\n        panel_obj = store.gene_panel(panel_info['panel_name'], version=panel_info.get('version'))\n        distinct_genes.update([gene['hgnc_id'] for gene in panel_obj.get('genes', [])])\n        full_name = \"{} ({})\".format(panel_obj['display_name'], panel_obj['version'])\n        case_obj['panel_names'].append(full_name)\n    case_obj['default_genes'] = list(distinct_genes)\n    for hpo_term in itertools.chain(case_obj.get('phenotype_groups', []),\n                                    case_obj.get('phenotype_terms', [])):\n        hpo_term['hpo_link'] = (\"http:\n                                .format(hpo_term['phenotype_id']))\n\n    \n    o_collaborators = []\n    for collab_id in case_obj['collaborators']:\n        if collab_id != case_obj['owner'] and store.institute(collab_id):\n            o_collaborators.append(store.institute(collab_id))\n\n    case_obj['o_collaborators'] = [(collab_obj['_id'], collab_obj['display_name']) for\n                                   collab_obj in o_collaborators]\n\n    irrelevant_ids = ('cust000', institute_obj['_id'])\n    collab_ids = [(collab['_id'], collab['display_name']) for collab in store.institutes() if\n                  (collab['_id'] not in irrelevant_ids) and\n                  (collab['_id'] not in case_obj['collaborators'])]\n\n    events = list(store.events(institute_obj, case=case_obj))\n    for event in events:\n        event['verb'] = VERBS_MAP[event['verb']]\n\n    case_obj['clinvar_variants'] = store.case_to_clinVars(case_obj['_id'])\n\n    \n    pheno_groups = institute_obj.get('phenotype_groups') or PHENOTYPE_GROUPS\n\n    data = {\n        'status_class': STATUS_MAP.get(case_obj['status']),\n        'other_causatives': store.check_causatives(case_obj=case_obj),\n        'comments': store.events(institute_obj, case=case_obj, comments=True),\n        'hpo_groups': pheno_groups,\n        'events': events,\n        'suspects': suspects,\n        'causatives': causatives,\n        'collaborators': collab_ids,\n        'cohort_tags': COHORT_TAGS,\n        'mme_nodes': current_app.mme_nodes, \n    }\n\n    return data", "docstring": "Preprocess a single case.\n\nPrepare the case to be displayed in the case view.\n\nArgs:\nstore(adapter.MongoAdapter)\ninstitute_obj(models.Institute)\ncase_obj(models.Case)\n\nReturns:\ndata(dict): includes the cases, how many there are and the limit.", "source": "juraj-google-style"}
{"code": "def convert_squeeze(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting squeeze ...')\n\n    if len(params['axes']) > 1:\n        raise AssertionError('Cannot convert squeeze by multiple dimensions')\n\n    def target_layer(x, axis=int(params['axes'][0])):\n        import tensorflow as tf\n        return tf.squeeze(x, axis=axis)\n\n    lambda_layer = keras.layers.Lambda(target_layer)\n    layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert squeeze operation.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def Render(self):\n    xs = [self.xs[0]]\n    ps = [0.0]\n    for (i, p) in enumerate(self.ps):\n        xs.append(self.xs[i])\n        ps.append(p)\n        try:\n            xs.append(self.xs[(i + 1)])\n            ps.append(p)\n        except IndexError:\n            pass\n    return (xs, ps)", "docstring": "Generates a sequence of points suitable for plotting.\n\nAn empirical CDF is a step function; linear interpolation\ncan be misleading.\n\nReturns:\ntuple of (xs, ps)", "source": "codesearchnet"}
{"code": "def gradient_tensors(self):\n    return self._gradient_tensors", "docstring": "Get the gradient tensors that this object is aware of.\n\nReturns:\nA dict mapping x-tensor names to gradient tensor objects. x-tensor refers\nto the tensors on the denominator of the differentation.", "source": "github-repos"}
{"code": "def RemoveEventAttribute(self, attribute_name):\n    \n    if attribute_name not in self._extra_event_attributes:\n      raise KeyError('Event attribute: {0:s} not set'.format(attribute_name))\n\n    del self._extra_event_attributes[attribute_name]", "docstring": "Removes an attribute from being set on all events produced.\n\nArgs:\nattribute_name (str): name of the attribute to remove.\n\nRaises:\nKeyError: if the event attribute is not set.", "source": "juraj-google-style"}
{"code": "def handle_encodnig(html):\n    encoding = _get_encoding(dhtmlparser.parseString(html.split('</head>')[0]))\n    if (encoding == 'utf-8'):\n        return html\n    return html.decode(encoding).encode('utf-8')", "docstring": "Look for encoding in given `html`. Try to convert `html` to utf-8.\n\nArgs:\nhtml (str): HTML code as string.\n\nReturns:\nstr: HTML code encoded in UTF.", "source": "codesearchnet"}
{"code": "def list_merge(list_a, list_b):\n    \n    \n    \n    result = []\n    for item in list_a:\n        if not item in result:\n            result.append(item)\n    for item in list_b:\n        if not item in result:\n            result.append(item)\n    return result", "docstring": "Merge two lists without duplicating items\n\nArgs:\nlist_a: list\nlist_b: list\nReturns:\nNew list with deduplicated items from list_a and list_b", "source": "juraj-google-style"}
{"code": "def pop(self, rebuild=True):\n    layer = self._layers.pop()\n    self.built = False\n    self._functional = None\n    if rebuild:\n        self._maybe_rebuild()\n    return layer", "docstring": "Removes the last layer in the model.\n\nArgs:\nrebuild: `bool`. Whether to rebuild the model after removing\nthe layer. Defaults to `True`.\n\nReturns:\nlayer: layer instance.", "source": "github-repos"}
{"code": "def add_input(self, input_):\n    if (not isinstance(input_, Input)):\n        raise TypeError('`input_` must be a Input instance')\n    self.inputs.append(input_)", "docstring": "Adds an input to a Transaction's list of inputs.\n\nArgs:\ninput_ (:class:`~bigchaindb.common.transaction.\nInput`): An Input to be added to the Transaction.", "source": "codesearchnet"}
{"code": "def wc(filename, contents, parsed=None, is_jekyll=False):\n    \n    if is_jekyll:\n        fmt = 'jekyll'\n    else:\n        fmt = 'md/txt'\n    body = parsed.strip() if parsed else contents.strip()\n\n    \n    words = re.sub(r'\\s+', ' ', body, re.MULTILINE)\n    for punctuation in INTERSTITIAL_PUNCTUATION:\n        words = re.sub(punctuation, ' ', words)\n    punct = re.compile('[^\\w\\s]', re.U)\n    words = punct.sub('', words)\n\n    \n    real_characters = re.sub(r'\\s', '', words)\n\n    \n    paragraphs = [1 if len(x) == 0 else 0 for x in\n                  contents.strip().splitlines()]\n    for index, paragraph in enumerate(paragraphs):\n        if paragraph == 1 and paragraphs[index + 1] == 1:\n            paragraphs[index] = 0\n\n    return {\n        'counts': {\n            'file': filename,\n            'type': fmt,\n            'paragraphs': sum(paragraphs) + 1,\n            'words': len(re.split('\\s+', words)),\n            'characters_real': len(real_characters),\n            'characters_total': len(words),\n        }\n    }", "docstring": "Count the words, characters, and paragraphs in a string.\n\nArgs:\ncontents: the original string to count\nfilename (optional): the filename as provided to the CLI\nparsed (optional): a parsed string, expected to be plaintext only\nis_jekyll: whether the original contents were from a Jekyll file\n\nReturns:\nAn object containing the various counts", "source": "juraj-google-style"}
{"code": "def iter_packages(name, range_=None, paths=None):\n    entries = _get_families(name, paths)\n    seen = set()\n    for (repo, family_resource) in entries:\n        for package_resource in repo.iter_packages(family_resource):\n            key = (package_resource.name, package_resource.version)\n            if (key in seen):\n                continue\n            seen.add(key)\n            if range_:\n                if isinstance(range_, basestring):\n                    range_ = VersionRange(range_)\n                if (package_resource.version not in range_):\n                    continue\n            (yield Package(package_resource))", "docstring": "Iterate over `Package` instances, in no particular order.\n\nPackages of the same name and version earlier in the search path take\nprecedence - equivalent packages later in the paths are ignored. Packages\nare not returned in any specific order.\n\nArgs:\nname (str): Name of the package, eg 'maya'.\nrange_ (VersionRange or str): If provided, limits the versions returned\nto those in `range_`.\npaths (list of str, optional): paths to search for packages, defaults\nto `config.packages_path`.\n\nReturns:\n`Package` iterator.", "source": "codesearchnet"}
{"code": "def resource(self, resource_type):\n    try:\n        resource = getattr(self.resources, self.safe_rt(resource_type))(self)\n    except AttributeError:\n        self._resources(True)\n        resource = getattr(self.resources, self.safe_rt(resource_type))(self)\n    return resource", "docstring": "Get instance of Resource Class with dynamic type.\n\nArgs:\nresource_type: The resource type name (e.g Adversary, User Agent, etc).\n\nReturns:\n(object): Instance of Resource Object child class.", "source": "codesearchnet"}
{"code": "def process(self, element, *args, **kwargs):\n    (text, uid), prediction = element\n    embedding = prediction.inference\n    l2_norm = np.linalg.norm(embedding)\n    yield {'text': text, 'id': uid, 'embedding': embedding / l2_norm}", "docstring": "For each element in the input PCollection, normalize the embedding vector, and\nyield a new element with the normalized embedding added\n\nArgs:\nelement: The element to be processed.", "source": "github-repos"}
{"code": "def easeOutBack(n, s=1.70158):\n    _checkRange(n)\n    n = (n - 1)\n    return (((n * n) * (((s + 1) * n) + s)) + 1)", "docstring": "A tween function that overshoots the destination a little and then backs into the destination.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "codesearchnet"}
{"code": "def swapdim(P, dim1=1, dim2=0):\n    if (not isinstance(P, Poly)):\n        return numpy.swapaxes(P, dim1, dim2)\n    dim = P.dim\n    shape = P.shape\n    dtype = P.dtype\n    if (dim1 == dim2):\n        return P\n    m = max(dim1, dim2)\n    if (P.dim <= m):\n        P = chaospy.poly.dimension.setdim(P, (m + 1))\n        dim = (m + 1)\n    A = {}\n    for key in P.keys:\n        val = P.A[key]\n        key = list(key)\n        (key[dim1], key[dim2]) = (key[dim2], key[dim1])\n        A[tuple(key)] = val\n    return Poly(A, dim, shape, dtype)", "docstring": "Swap the dim between two variables.\n\nArgs:\nP (Poly):\nInput polynomial.\ndim1 (int):\nFirst dim\ndim2 (int):\nSecond dim.\n\nReturns:\n(Poly):\nPolynomial with swapped dimensions.\n\nExamples:\n>>> x,y = variable(2)\n>>> P = x**4-y\n>>> print(P)\nq0^4-q1\n>>> print(swapdim(P))\nq1^4-q0", "source": "codesearchnet"}
{"code": "def enable_collective_ops(self, server_def):\n    if not server_def:\n        raise ValueError('server_def is None.')\n    self._collective_ops_server_def = server_def\n    if self._context_handle is not None:\n        logging.warning('Enabling collective ops after program startup may cause error when accessing previously created tensors.')\n        with self._initialize_lock:\n            assert self._initialized\n            server_def_str = self._collective_ops_server_def.SerializeToString()\n            pywrap_tfe.TFE_EnableCollectiveOps(self._context_handle, server_def_str)\n            self._initialize_logical_devices()\n            self._clear_caches()", "docstring": "Enable distributed collective ops with an appropriate server_def.\n\nArgs:\nserver_def: A tensorflow::ServerDef proto. Enables execution on remote\ndevices.\n\nRaises:\nValueError: if server_def is None.\nRuntimeError: if this method is not called at program startup.", "source": "github-repos"}
{"code": "def _get_generated_ngrams(banned_ngrams, prev_input_ids, ngram_size, cur_len):\n    start_idx = cur_len + 1 - ngram_size\n    ngram_idx = tuple(prev_input_ids[start_idx:cur_len].tolist())\n    return banned_ngrams.get(ngram_idx, [])", "docstring": "Determines the banned tokens for the current hypothesis based on previously generated n-grams.\n\nArgs:\nbanned_ngrams (`dict`):\nA dictionary containing previously generated n-grams for each hypothesis.\nprev_input_ids (`torch.Tensor`):\nGenerated token ids for the current hypothesis.\nngram_size (`int`):\nThe number sequential tokens taken as a group which may only occur once before being banned.\ncur_len (`int`):\nThe current length of the token sequences for which the n-grams are being checked.\n\nReturns:\nList of tokens that are banned.", "source": "github-repos"}
{"code": "def _assert_validators(self, validators):\n    \n    for validator in sorted(\n        validators, key=lambda validator: validator.insertion_index):\n      try:\n        validator.verify(self)\n      except _exceptions.ValidationError as e:\n        message = validator.print_flags_with_values(self)\n        raise _exceptions.IllegalFlagValueError('%s: %s' % (message, str(e)))", "docstring": "Asserts if all validators in the list are satisfied.\n\nIt asserts validators in the order they were created.\n\nArgs:\nvalidators: Iterable(validators.Validator), validators to be\nverified.\nRaises:\nAttributeError: Raised if validators work with a non-existing flag.\nIllegalFlagValueError: Raised if validation fails for at least one\nvalidator.", "source": "juraj-google-style"}
{"code": "def create_parser(default_name: str) -> argparse.ArgumentParser:\n    \n    argparser = argparse.ArgumentParser(fromfile_prefix_chars='@')\n    argparser.add_argument('-H', '--host',\n                           help='Host to which the app binds. [%(default)s]',\n                           default='0.0.0.0')\n    argparser.add_argument('-p', '--port',\n                           help='Port to which the app binds. [%(default)s]',\n                           default=5000,\n                           type=int)\n    argparser.add_argument('-o', '--output',\n                           help='Logging output. [%(default)s]')\n    argparser.add_argument('-n', '--name',\n                           help='Service name. This will be used as prefix for all endpoints. [%(default)s]',\n                           default=default_name)\n    argparser.add_argument('--debug',\n                           help='Run the app in debug mode. [%(default)s]',\n                           action='store_true')\n    argparser.add_argument('--eventbus-host',\n                           help='Hostname at which the eventbus can be reached [%(default)s]',\n                           default='eventbus')\n    argparser.add_argument('--eventbus-port',\n                           help='Port at which the eventbus can be reached [%(default)s]',\n                           default=5672,\n                           type=int)\n    return argparser", "docstring": "Creates the default brewblox_service ArgumentParser.\nService-agnostic arguments are added.\n\nThe parser allows calling code to add additional arguments before using it in create_app()\n\nArgs:\ndefault_name (str):\ndefault value for the --name commandline argument.\n\nReturns:\nargparse.ArgumentParser: a Python ArgumentParser with defaults set.", "source": "juraj-google-style"}
{"code": "def set_pair(self, term1, term2, value, **kwargs):\n    key = self.key(term1, term2)\n    self.keys.update([term1, term2])\n    self.pairs[key] = value", "docstring": "Set the value for a pair of terms.\n\nArgs:\nterm1 (str)\nterm2 (str)\nvalue (mixed)", "source": "codesearchnet"}
{"code": "def __init__(self, coords):\n        \n        self._coords = np.array(coords)\n        self.simplex_dim, self.space_dim = self._coords.shape\n        self.origin = self._coords[-1]\n        if self.simplex_dim == self.space_dim + 1:\n            \n            self.T = self._coords[:-1] - self.origin\n            self.T_inv = np.linalg.inv(self.T)", "docstring": "Initializes a Simplex from vertex coordinates.\n\nArgs:\ncoords ([[float]]): Coords of the vertices of the simplex. E.g.,\n[[1, 2, 3], [2, 4, 5], [6, 7, 8], [8, 9, 10].", "source": "juraj-google-style"}
{"code": "def summary(self, fmt=None, initial=True, default=''):\n    if (default and (not self.__dict__)):\n        return default\n    if (fmt == ''):\n        return default\n    keys = [k for (k, v) in self.__dict__.items() if (v is not '')]\n    f = (fmt or (('{' + '}, {'.join(keys)) + '}'))\n    try:\n        summary = CustomFormatter().format(f, **self.__dict__)\n    except KeyError as e:\n        raise ComponentError(('Error building summary, ' + str(e)))\n    if (summary and initial and (not fmt)):\n        summary = (summary[0].upper() + summary[1:])\n    return summary", "docstring": "Given a format string, return a summary description of a component.\n\nArgs:\ncomponent (dict): A component dictionary.\nfmt (str): Describes the format with a string. If no format is\ngiven, you will just get a list of attributes. If you give the\nempty string (''), you'll get `default` back. By default this\ngives you the empty string, effectively suppressing the\nsummary.\ninitial (bool): Whether to capitialize the first letter. Default is\nTrue.\ndefault (str): What to give if there's no component defined.\n\nReturns:\nstr: A summary string.\n\nExample:\n\nr = Component({'colour': 'Red',\n'grainsize': 'VF-F',\n'lithology': 'Sandstone'})\n\nr.summary()  -->  'Red, vf-f, sandstone'", "source": "codesearchnet"}
{"code": "def script_dir_plus_file(filename, pyobject, follow_symlinks=True):\n    \n    return join(script_dir(pyobject, follow_symlinks), filename)", "docstring": "Get current script's directory and then append a filename\n\nArgs:\nfilename (str): Filename to append to directory path\npyobject (Any): Any Python object in the script\nfollow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.\n\nReturns:\nstr: Current script's directory and with filename appended", "source": "juraj-google-style"}
{"code": "def join(self, *data: Iterable[MaybeBytes]) -> bytes:\n    return self.how.join([bytes(item) for item in chain(*data)])", "docstring": "Iterable join on a delimiter.\n\nArgs:\ndata: Iterable of items to join.\n\nExamples:\n::\n\nBytesFormat(b' ').join([b'one', b'two', b'three'])", "source": "codesearchnet"}
{"code": "def export(self, top=True):\n        \n        out = []\n        if top:\n            out.append(self._internal_name)\n        out.append(self._to_str(self.year))\n        out.append(self._to_str(self.month))\n        out.append(self._to_str(self.day))\n        out.append(self._to_str(self.hour))\n        out.append(self._to_str(self.minute))\n        out.append(self._to_str(self.data_source_and_uncertainty_flags))\n        out.append(self._to_str(self.dry_bulb_temperature))\n        out.append(self._to_str(self.dew_point_temperature))\n        out.append(self._to_str(self.relative_humidity))\n        out.append(self._to_str(self.atmospheric_station_pressure))\n        out.append(self._to_str(self.extraterrestrial_horizontal_radiation))\n        out.append(self._to_str(self.extraterrestrial_direct_normal_radiation))\n        out.append(self._to_str(self.horizontal_infrared_radiation_intensity))\n        out.append(self._to_str(self.global_horizontal_radiation))\n        out.append(self._to_str(self.direct_normal_radiation))\n        out.append(self._to_str(self.diffuse_horizontal_radiation))\n        out.append(self._to_str(self.global_horizontal_illuminance))\n        out.append(self._to_str(self.direct_normal_illuminance))\n        out.append(self._to_str(self.diffuse_horizontal_illuminance))\n        out.append(self._to_str(self.zenith_luminance))\n        out.append(self._to_str(self.wind_direction))\n        out.append(self._to_str(self.wind_speed))\n        out.append(self._to_str(self.total_sky_cover))\n        out.append(self._to_str(self.opaque_sky_cover))\n        out.append(self._to_str(self.visibility))\n        out.append(self._to_str(self.ceiling_height))\n        out.append(self._to_str(self.present_weather_observation))\n        out.append(self._to_str(self.present_weather_codes))\n        out.append(self._to_str(self.precipitable_water))\n        out.append(self._to_str(self.aerosol_optical_depth))\n        out.append(self._to_str(self.snow_depth))\n        out.append(self._to_str(self.days_since_last_snowfall))\n        out.append(self._to_str(self.albedo))\n        out.append(self._to_str(self.liquid_precipitation_depth))\n        out.append(self._to_str(self.liquid_precipitation_quantity))\n        return \",\".join(out)", "docstring": "Exports object to its string representation.\n\nArgs:\ntop (bool):  if True appends `internal_name` before values.\nAll non list objects should be exported with value top=True,\nall list objects, that are embedded in as fields inlist objects\nshould be exported with `top`=False\n\nReturns:\nstr: The objects string representation", "source": "juraj-google-style"}
{"code": "def ParseDestList(self, parser_mediator, olecf_item):\n    header_map = self._GetDataTypeMap('dest_list_header')\n    try:\n        (header, entry_offset) = self._ReadStructureFromFileObject(olecf_item, 0, header_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.UnableToParseFile('Unable to parse DestList header with error: {0!s}'.format(exception))\n    if (header.format_version == 1):\n        entry_map = self._GetDataTypeMap('dest_list_entry_v1')\n    elif (header.format_version in (3, 4)):\n        entry_map = self._GetDataTypeMap('dest_list_entry_v3')\n    else:\n        parser_mediator.ProduceExtractionWarning('unsupported format version: {0:d}.'.format(header.format_version))\n        return\n    while (entry_offset < olecf_item.size):\n        try:\n            (entry, entry_data_size) = self._ReadStructureFromFileObject(olecf_item, entry_offset, entry_map)\n        except (ValueError, errors.ParseError) as exception:\n            raise errors.UnableToParseFile('Unable to parse DestList entry with error: {0!s}'.format(exception))\n        display_name = 'DestList entry at offset: 0x{0:08x}'.format(entry_offset)\n        try:\n            droid_volume_identifier = self._ParseDistributedTrackingIdentifier(parser_mediator, entry.droid_volume_identifier, display_name)\n        except (TypeError, ValueError) as exception:\n            droid_volume_identifier = ''\n            parser_mediator.ProduceExtractionWarning('unable to read droid volume identifier with error: {0!s}'.format(exception))\n        try:\n            droid_file_identifier = self._ParseDistributedTrackingIdentifier(parser_mediator, entry.droid_file_identifier, display_name)\n        except (TypeError, ValueError) as exception:\n            droid_file_identifier = ''\n            parser_mediator.ProduceExtractionWarning('unable to read droid file identifier with error: {0!s}'.format(exception))\n        try:\n            birth_droid_volume_identifier = self._ParseDistributedTrackingIdentifier(parser_mediator, entry.birth_droid_volume_identifier, display_name)\n        except (TypeError, ValueError) as exception:\n            birth_droid_volume_identifier = ''\n            parser_mediator.ProduceExtractionWarning('unable to read birth droid volume identifier with error: {0:s}'.format(exception))\n        try:\n            birth_droid_file_identifier = self._ParseDistributedTrackingIdentifier(parser_mediator, entry.birth_droid_file_identifier, display_name)\n        except (TypeError, ValueError) as exception:\n            birth_droid_file_identifier = ''\n            parser_mediator.ProduceExtractionWarning('unable to read birth droid file identifier with error: {0:s}'.format(exception))\n        if (entry.last_modification_time == 0):\n            date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n        else:\n            date_time = dfdatetime_filetime.Filetime(timestamp=entry.last_modification_time)\n        event_data = AutomaticDestinationsDestListEntryEventData()\n        event_data.birth_droid_file_identifier = birth_droid_file_identifier\n        event_data.birth_droid_volume_identifier = birth_droid_volume_identifier\n        event_data.droid_file_identifier = droid_file_identifier\n        event_data.droid_volume_identifier = droid_volume_identifier\n        event_data.entry_number = entry.entry_number\n        event_data.hostname = entry.hostname.rstrip('\\x00')\n        event_data.offset = entry_offset\n        event_data.path = entry.path.rstrip('\\x00')\n        event_data.pin_status = entry.pin_status\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n        entry_offset += entry_data_size", "docstring": "Parses the DestList OLECF item.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nolecf_item (pyolecf.item): OLECF item.\n\nRaises:\nUnableToParseFile: if the DestList cannot be parsed.", "source": "codesearchnet"}
{"code": "def preprocess(self, xs):\n    return [self.nesting_field.preprocess(x) for x in super(NestedField, self).preprocess(xs)]", "docstring": "Preprocess a single example.\n\nFirstly, tokenization and the supplied preprocessing pipeline is applied. Since\nthis field is always sequential, the result is a list. Then, each element of\nthe list is preprocessed using ``self.nesting_field.preprocess`` and the resulting\nlist is returned.\n\nArguments:\nxs (list or str): The input to preprocess.\n\nReturns:\nlist: The preprocessed list.", "source": "codesearchnet"}
{"code": "def write(self, name, **data):\n        \n\n        data[\"name\"] = name\n        if not (\"timestamp\" in data):\n            data[\"timestamp\"] = datetime.utcnow()\n\n        try:\n            self.client.index(\n                index=self.get_index(),\n                doc_type=self.doc_type,\n                id=None,\n                body=data\n            )\n        except TransportError as exc:\n            logger.warning('writing metric %r failure %r', data, exc)", "docstring": "Write the metric to elasticsearch\n\nArgs:\nname (str): The name of the metric to write\ndata (dict): Additional data to store with the metric", "source": "juraj-google-style"}
{"code": "def call(self, hidden_states: tf.Tensor, prev_group_token: tf.Tensor | None=None, output_attentions: bool=False, training: bool=False) -> Tuple[tf.Tensor]:\n    if self.with_group_token:\n        group_token = tf.tile(self.group_token, multiples=(shape_list(hidden_states)[0], 1, 1))\n        if self.group_projector is not None:\n            for layer in self.group_projector:\n                prev_group_token = layer(prev_group_token)\n            group_token = group_token + prev_group_token\n    else:\n        group_token = None\n    x = hidden_states\n    cat_x = self.concat_x(x, group_token)\n    for layer in self.layers:\n        layer_out = layer(cat_x, attention_mask=None, causal_attention_mask=None, output_attentions=None)\n        cat_x = layer_out[0]\n    x, group_token = self.split_x(cat_x)\n    attention = None\n    if self.downsample is not None:\n        x, attention = self.downsample(x, group_token)\n    outputs = (x, group_token)\n    if output_attentions:\n        outputs = outputs + (attention,)\n    return outputs", "docstring": "Args:\nhidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`tf.Tensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n`(config.encoder_attention_heads,)`.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the grouping tensors of Grouping block.", "source": "github-repos"}
{"code": "def __init__(self, num_packs=1):\n    if num_packs <= 0:\n        raise ValueError('num_packs must be greater than zero.')\n    self.num_packs = num_packs", "docstring": "Initialize the _ConcatAndSplitPacker object.\n\nArgs:\nnum_packs: specifies the number of split packs that will be\nformed.\n\nRaises:\nValueError: if num_packs is not greater than 0.", "source": "github-repos"}
{"code": "def requires_swimlane_version(min_version=None, max_version=None):\n    if ((min_version is None) and (max_version is None)):\n        raise ValueError('Must provide either min_version, max_version, or both')\n    if (min_version and max_version and (compare_versions(min_version, max_version) < 0)):\n        raise ValueError('min_version must be <= max_version ({}, {})'.format(min_version, max_version))\n\n    def decorator(func):\n\n        @functools.wraps(func)\n        def wrapper(self, *args, **kwargs):\n            swimlane = self._swimlane\n            if (min_version and (compare_versions(min_version, swimlane.build_version, True) < 0)):\n                raise InvalidSwimlaneBuildVersion(swimlane, min_version, max_version)\n            if (max_version and (compare_versions(swimlane.build_version, max_version, True) < 0)):\n                raise InvalidSwimlaneBuildVersion(swimlane, min_version, max_version)\n            return func(self, *args, **kwargs)\n        return wrapper\n    return decorator", "docstring": "Decorator for SwimlaneResolver methods verifying Swimlane server build version is within a given inclusive range\n\nRaises:\nInvalidVersion: Raised before decorated method call if Swimlane server version is out of provided range\nValueError: If neither min_version or max_version were provided, or if those values conflict (2.15 < 2.14)", "source": "codesearchnet"}
{"code": "def remove_metric(self, metric_name):\n        \n        with self._lock:\n            metric = self._metrics.pop(metric_name, None)\n            if metric:\n                for reporter in self._reporters:\n                    reporter.metric_removal(metric)\n            return metric", "docstring": "Remove a metric if it exists and return it. Return None otherwise.\nIf a metric is removed, `metric_removal` will be invoked\nfor each reporter.\n\nArguments:\nmetric_name (MetricName): The name of the metric\n\nReturns:\nKafkaMetric: the removed `KafkaMetric` or None if no such\nmetric exists", "source": "juraj-google-style"}
{"code": "def set_examples(self, examples):\n    \n    self.store('examples', examples)\n    if len(examples) > 0:\n      self.store('are_sequence_examples',\n                 isinstance(examples[0], tf.train.SequenceExample))\n    return self", "docstring": "Sets the examples to be displayed in WIT.\n\nArgs:\nexamples: List of example protos.\n\nReturns:\nself, in order to enabled method chaining.", "source": "juraj-google-style"}
{"code": "def get_mem_usage(**kwargs):\n    try:\n        con_mem_data_list = con._client.get_memory(session=kwargs['con']._session, memory_level=kwargs['mem_type'])\n        usedram = 0\n        freeram = 0\n        for con_mem_data in con_mem_data_list:\n            page_size = con_mem_data.page_size\n            node_memory_data_list = con_mem_data.node_memory_data\n            for node_memory_data in node_memory_data_list:\n                ram = (node_memory_data.num_pages * page_size)\n                is_free = node_memory_data.is_free\n                if is_free:\n                    freeram += ram\n                else:\n                    usedram += ram\n        totalallocated = (usedram + freeram)\n        if (totalallocated > 0):\n            totalallocated = round(((totalallocated / 1024) / 1024), 1)\n            usedram = round(((usedram / 1024) / 1024), 1)\n            freeram = round(((freeram / 1024) / 1024), 1)\n        ramusage = {}\n        ramusage['usedram'] = usedram\n        ramusage['freeram'] = freeram\n        ramusage['totalallocated'] = totalallocated\n        ramusage['errormessage'] = ''\n    except Exception as e:\n        errormessage = ('Get memory failed with error: ' + str(e))\n        logging.error(errormessage)\n        ramusage['errormessage'] = errormessage\n    return ramusage", "docstring": "Calculates memory statistics from mapd_server _client.get_memory call\n\nKwargs:\ncon(class 'pymapd.connection.Connection'): Mapd connection\nmem_type(str): [gpu, cpu] Type of memory to gather metrics for\n\nReturns:\nramusage(dict):::\nusedram(float): Amount of memory (in MB) used\nfreeram(float): Amount of memory (in MB) free\ntotalallocated(float): Total amount of memory (in MB) allocated\nerrormessage(str): Error if returned by get_memory call\nrawdata(list): Raw data returned from get_memory call", "source": "codesearchnet"}
{"code": "def select_inputs(self, address, nfees, ntokens, min_confirmations=6):\n        \n        unspents = self._t.get(address, min_confirmations=min_confirmations)['unspents']\n        unspents = [u for u in unspents if u not in self._spents.queue]\n        if len(unspents) == 0:\n            raise Exception(\"No spendable outputs found\")\n\n        fees = [u for u in unspents if u['amount'] == self.fee][:nfees]\n        tokens = [u for u in unspents if u['amount'] == self.token][:ntokens]\n        if len(fees) != nfees or len(tokens) != ntokens:\n            raise SpoolFundsError(\"Not enough outputs to spend. Refill your wallet\")\n        if self._spents.qsize() > self.SPENTS_QUEUE_MAXSIZE - (nfees + ntokens):\n            [self._spents.get() for i in range(self._spents.qsize() + nfees + ntokens - self.SPENTS_QUEUE_MAXSIZE)]\n        [self._spents.put(fee) for fee in fees]\n        [self._spents.put(token) for token in tokens]\n        return fees + tokens", "docstring": "Selects the inputs for the spool transaction.\n\nArgs:\naddress (str): bitcoin address to select inputs for\nnfees (int): number of fees\nntokens (int): number of tokens\nmin_confirmations (Optional[int]): minimum number of required\nconfirmations; defaults to 6", "source": "juraj-google-style"}
{"code": "def _compile_graphql_generic(language, lowering_func, query_emitter_func, schema, graphql_string, type_equivalence_hints, compiler_metadata):\n    ir_and_metadata = graphql_to_ir(schema, graphql_string, type_equivalence_hints=type_equivalence_hints)\n    lowered_ir_blocks = lowering_func(ir_and_metadata.ir_blocks, ir_and_metadata.query_metadata_table, type_equivalence_hints=type_equivalence_hints)\n    query = query_emitter_func(lowered_ir_blocks, compiler_metadata)\n    return CompilationResult(query=query, language=language, output_metadata=ir_and_metadata.output_metadata, input_metadata=ir_and_metadata.input_metadata)", "docstring": "Compile the GraphQL input, lowering and emitting the query using the given functions.\n\nArgs:\nlanguage: string indicating the target language to compile to.\nlowering_func: Function to lower the compiler IR into a compatible form for the target\nlanguage backend.\nquery_emitter_func: Function that emits a query in the target language from the lowered IR.\nschema: GraphQL schema object describing the schema of the graph to be queried.\ngraphql_string: the GraphQL query to compile to the target language, as a string.\ntype_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.\ncompiler_metadata: optional target specific metadata for usage by the query_emitter_func.\n\nReturns:\na CompilationResult object", "source": "codesearchnet"}
{"code": "def object_metadata(save_path):\n    reader = py_checkpoint_reader.NewCheckpointReader(save_path)\n    try:\n        object_graph_string = reader.get_tensor(base.OBJECT_GRAPH_PROTO_KEY)\n    except errors_impl.NotFoundError:\n        raise ValueError(f'The specified checkpoint \"{save_path}\" does not appear to be object-based (saved with TF2) since it is missing the key \"{base.OBJECT_GRAPH_PROTO_KEY}\". Likely it was created with the TF1 name-based saver and does not contain an object dependency graph.')\n    object_graph_proto = trackable_object_graph_pb2.TrackableObjectGraph()\n    object_graph_proto.ParseFromString(object_graph_string)\n    return object_graph_proto", "docstring": "Retrieves information about the objects in a checkpoint.\n\nExample usage:\n\n```python\nobject_graph = tf.contrib.checkpoint.object_metadata(\ntf.train.latest_checkpoint(checkpoint_directory))\nckpt_variable_names = set()\nfor node in object_graph.nodes:\nfor attribute in node.attributes:\nckpt_variable_names.add(attribute.full_name)\n```\n\nArgs:\nsave_path: The path to the checkpoint, as returned by `save` or\n`tf.train.latest_checkpoint`.\n\nReturns:\nA parsed `tf.contrib.checkpoint.TrackableObjectGraph` protocol buffer.\nRaises:\nValueError: If an object graph was not found in the checkpoint.", "source": "github-repos"}
{"code": "def get_individuals(variant_source, case_lines=None, case_type='ped', variant_mode='vcf'):\n        \n        individuals = []\n        ind_dict ={}\n\n        if variant_mode == 'vcf':\n            head = get_header(variant_source)\n            \n\n            for index, ind in enumerate(head.individuals):\n                ind_dict[ind] = index\n\n            if case_lines:\n                \n                family_parser = FamilyParser(case_lines, family_type=case_type)\n                families = family_parser.families\n                logger.debug(\"Found families {0}\".format(\n                            ','.join(list(families.keys()))))\n                if len(families) != 1:\n                    logger.error(\"Only one family can be used with vcf adapter\")\n                    raise IOError\n\n                case_id = list(families.keys())[0]\n                logger.debug(\"Family used in analysis: {0}\".format(case_id))\n\n                for ind_id in family_parser.individuals:\n                    ind = family_parser.individuals[ind_id]\n                    logger.info(\"Found individual {0}\".format(ind.individual_id))\n                    try:\n                        individual = Individual(\n                            ind_id=ind_id,\n                            case_id=case_id,\n                            mother=ind.mother,\n                            father=ind.father,\n                            sex=str(ind.sex),\n                            phenotype=str(ind.phenotype),\n                            variant_source=variant_source,\n                            ind_index=ind_dict[ind_id],\n                            )\n                        individuals.append(individual)\n                    except KeyError as err:\n                        \n                        \n                        raise PedigreeError(\n                            family_id=case_id,\n                            individual_id=ind_id,\n                            message=\"Individual {0} exists in ped file but not in vcf\".format(ind_id)\n                            )\n\n            else:\n                case_id = os.path.basename(variant_source)\n\n                for ind in ind_dict:\n                    individual = Individual(\n                        ind_id=ind,\n                        case_id=case_id,\n                        variant_source=variant_source,\n                        ind_index=ind_dict[ind]\n                        )\n                    individuals.append(individual)\n\n                    logger.debug(\"Found individual {0} in {1}\".format(\n                                 ind, variant_source))\n        elif variant_mode == 'gemini':\n            gq = GeminiQuery(variant_source)\n            \n            ind_dict = gq.sample_to_idx\n            query = \"SELECT * from samples\"\n            gq.run(query)\n            for individual in gq:\n                logger.debug(\"Found individual {0} with family id {1}\".format(\n                    individual['name'], individual['family_id']))\n                individuals.append(\n                    Individual(\n                        ind_id=individual['name'],\n                        case_id=individual['family_id'],\n                        mother=individual['maternal_id'],\n                        father=individual['paternal_id'],\n                        sex=individual['sex'],\n                        phenotype=individual['phenotype'],\n                        ind_index=ind_dict.get(individual['name']),\n                        variant_source=variant_source,\n                        bam_path=None)\n                        )\n\n        return individuals", "docstring": "Get the individuals from a vcf file, gemini database, and/or a ped file.\n\nArgs:\nvariant_source (str): Path to a variant source\ncase_lines(Iterable): Ped like lines\ncase_type(str): Format of ped lines\n\nReturns:\nindividuals (generator): generator with Individuals", "source": "juraj-google-style"}
{"code": "def _indexed_case_helper(branch_fns, default, branch_index, name, lower_using_switch_merge=None):\n    branch_fns = _indexed_case_verify_and_canonicalize_args(branch_fns, default, branch_index)\n    with ops.name_scope(name, 'case', [branch_index]):\n        if context.executing_eagerly() and (not hasattr(branch_index, 'graph')):\n            branch_index = array_ops.where(math_ops.less(branch_index, 0) | math_ops.greater_equal(branch_index, len(branch_fns)), len(branch_fns) - 1, branch_index)\n            return branch_fns[int(branch_index)]()\n        return cond_v2.indexed_case(branch_index, branch_fns, lower_using_switch_merge=lower_using_switch_merge)", "docstring": "Implementation of case that emits the n-way indexed Case op.\n\nArgs:\nbranch_fns: Dict or list of pairs of a boolean scalar tensor, and a callable\nwhich returns a list of tensors.\ndefault: Optional callable that returns a list of tensors.\nbranch_index: Optional int `Tensor`, which selects for the corresponding\npred_fn_pair.\nname: A name for this operation (optional).\nlower_using_switch_merge: Lower this op using switch merge ops (optional).\n\nReturns:\nThe tensors returned by the pair whose key matched branch_index, or\nthose returned by `default` if none does.\n\nRaises:\nTypeError: If `branch_fns` is not a list/dictionary.\nTypeError: If `branch_fns` is a list but does not contain 2-tuples or\ncallables.\nTypeError: If `fns[i]` is not callable for any i, or `default` is not\ncallable.", "source": "github-repos"}
{"code": "def print_stack_events(self):\n        \n        first_token = '7be7981bd6287dd8112305e8f3822a6f'\n        keep_going = True\n        next_token = first_token\n        current_request_token = None\n        rows = []\n        try:\n            while keep_going and next_token:\n                if next_token == first_token:\n                    response = self._cf_client.describe_stack_events(\n                        StackName=self._stack_name\n                    )\n                else:\n                    response = self._cf_client.describe_stack_events(\n                        StackName=self._stack_name,\n                        NextToken=next_token\n                    )\n\n                next_token = response.get('NextToken', None)\n                for event in response['StackEvents']:\n                    row = []\n                    event_time = event.get('Timestamp')\n                    request_token = event.get('ClientRequestToken', 'unknown')\n                    if current_request_token is None:\n                        current_request_token = request_token\n                    elif current_request_token != request_token:\n                        keep_going = False\n                        break\n\n                    row.append(event_time.strftime('%x %X'))\n                    row.append(event.get('LogicalResourceId'))\n                    row.append(event.get('ResourceStatus'))\n                    row.append(event.get('ResourceStatusReason', ''))\n                    rows.append(row)\n\n            if len(rows) > 0:\n                print('\\nEvents for the current upsert:')\n                print(tabulate(rows, headers=['Time', 'Logical ID', 'Status', 'Message']))\n                return True\n            else:\n                print('\\nNo stack events found\\n')\n        except Exception as wtf:\n            print(wtf)\n\n        return False", "docstring": "List events from the given stack\n\nArgs:\nNone\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _with_dependencies(self, dependencies):\n    new_row_splits = control_flow_ops.with_dependencies(dependencies, self._row_splits)\n    return RowPartition(row_splits=new_row_splits, row_lengths=self._row_lengths, value_rowids=self._value_rowids, nrows=self._nrows, uniform_row_length=self._uniform_row_length, internal=_row_partition_factory_key)", "docstring": "Returns a new RowPartition equal to self with control dependencies.\n\nSpecifically, self._row_splits is gated by the given control dependencies.\nUsed to add sanity checks to the constructors.\n\nArgs:\ndependencies: a list of tensors to use as dependencies.\n\nReturns:\nA new RowPartition object.", "source": "github-repos"}
{"code": "def get_atom_map(structure):\n    \n    syms = [site.specie.symbol for site in structure]\n    unique_pot_atoms = []\n    [unique_pot_atoms.append(i) for i in syms if not unique_pot_atoms.count(i)]\n    atom_map = {}\n    for i, atom in enumerate(unique_pot_atoms):\n        atom_map[atom] = i + 1\n    return atom_map", "docstring": "Returns a dict that maps each atomic symbol to a unique integer starting\nfrom 1.\n\nArgs:\nstructure (Structure)\n\nReturns:\ndict", "source": "juraj-google-style"}
{"code": "def emit_counters(self, category, name, pid, timestamp, counters):\n    event = self._create_event('C', category, name, pid, 0, timestamp)\n    event['args'] = counters.copy()\n    self._events.append(event)", "docstring": "Emits a counter record for the dictionary 'counters'.\n\nArgs:\ncategory: The event category as a string.\nname:  The event name as a string.\npid:  Identifier of the process generating this event as an integer.\ntimestamp:  The timestamp of this event as a long integer.\ncounters: Dictionary of counter values.", "source": "github-repos"}
{"code": "def create_tracker(self, restriction):\n    raise NotImplementedError", "docstring": "Produces a new ``RestrictionTracker`` for the given restriction.\n\nThis API is required to be implemented.\n\nArgs:\nrestriction: an object that defines a restriction as identified by a\nSplittable ``DoFn`` that utilizes the current ``RestrictionProvider``.\nFor example, a tuple that gives a range of positions for a Splittable\n``DoFn`` that reads files based on byte positions.\nReturns: an object of type ``RestrictionTracker``.", "source": "github-repos"}
{"code": "async def claim_work(context):\n    \n    log.debug(\"Calling claimWork...\")\n    payload = {\n        'workerGroup': context.config['worker_group'],\n        'workerId': context.config['worker_id'],\n        \n        \n        'tasks': 1,\n    }\n    try:\n        return await context.queue.claimWork(\n            context.config['provisioner_id'],\n            context.config['worker_type'],\n            payload\n        )\n    except (taskcluster.exceptions.TaskclusterFailure, aiohttp.ClientError) as exc:\n        log.warning(\"{} {}\".format(exc.__class__, exc))", "docstring": "Find and claim the next pending task in the queue, if any.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\n\nReturns:\ndict: a dict containing a list of the task definitions of the tasks claimed.", "source": "juraj-google-style"}
{"code": "def siblings(self, as_resources=False):\n    siblings = set()\n    for parent in self.parents(as_resources=True):\n        for sibling in parent.children(as_resources=as_resources):\n            siblings.add(sibling)\n    if as_resources:\n        siblings.remove(self)\n    if (not as_resources):\n        siblings.remove(self.uri)\n    return list(siblings)", "docstring": "method to return hierarchical siblings of this resource.\n\nArgs:\nas_resources (bool): if True, opens each as appropriate resource type instead of return URI only\n\nReturns:\n(list): list of resources", "source": "codesearchnet"}
{"code": "def make_access_request(self):\n    del self.issued_at\n    assertion = b'.'.join((self.header(), self.claims(), self.signature()))\n    post_data = {'grant_type': GRANT_TYPE, 'assertion': assertion}\n    resp = requests.post(AUDIENCE, post_data)\n    if (resp.status_code != 200):\n        raise AuthenticationError(resp)\n    return resp", "docstring": "Makes an OAuth2 access token request with crafted JWT and signature.\n\nThe core of this module. Based on arguments it creates proper JWT\nfor you and signs it with supplied private key.\nRegardless of present valid token, it always clears\n``issued_at`` property, which in turn results in requesting\nfresh OAuth2 access token.\n\nReturns:\nrequests.Response\n\nRaises:\ngoogle_oauth.exceptions.AuthenticationError:\nIf there was any non-200 HTTP-code from Google.\nrequests.RequestException:\nSomething went wrong when doing HTTP request.", "source": "codesearchnet"}
{"code": "def get_overall_services_health(self) -> str:\n    services_health_status = self.get_services_health()\n    health_status = all(((status == 'Healthy') for status in services_health_status.values()))\n    if health_status:\n        overall_status = 'Healthy'\n    else:\n        overall_status = 'Unhealthy'\n    return overall_status", "docstring": "Get the overall health of all the services.\n\nReturns:\nstr, overall health status", "source": "codesearchnet"}
{"code": "def _parse_device(s: str) -> Tuple[(List[GridQubit], Dict[(str, Set[GridQubit])])]:\n    lines = s.strip().split('\\n')\n    qubits = []\n    measurement_lines = {}\n    for (row, line) in enumerate(lines):\n        for (col, c) in enumerate(line.strip()):\n            if (c != '-'):\n                qubit = GridQubit(row, col)\n                qubits.append(qubit)\n                measurement_line = measurement_lines.setdefault(c, set())\n                measurement_line.add(qubit)\n    return (qubits, measurement_lines)", "docstring": "Parse ASCIIart device layout into info about qubits and connectivity.\n\nArgs:\ns: String representing the qubit layout. Each line represents a row,\nand each character in the row is a qubit, or a blank site if the\ncharacter is a hyphen '-'. Different letters for the qubit specify\nwhich measurement line that qubit is connected to, e.g. all 'A'\nqubits share a measurement line. Leading and trailing spaces on\neach line are ignored.\n\nReturns:\nA list of qubits and a dict mapping measurement line name to the qubits\non that measurement line.", "source": "codesearchnet"}
{"code": "def _PrepareAttributeContainer(self, attribute_container):\n    \n    attribute_values_hash = hash(attribute_container.GetAttributeValuesString())\n    identifier = identifiers.FakeIdentifier(attribute_values_hash)\n    attribute_container.SetIdentifier(identifier)\n\n    \n    return copy.deepcopy(attribute_container)", "docstring": "Prepares an attribute container for storage.\n\nArgs:\nattribute_container (AttributeContainer): attribute container.\n\nReturns:\nAttributeContainer: copy of the attribute container to store in\nthe fake storage.", "source": "juraj-google-style"}
{"code": "def _get_connection_state(self, conn_or_int_id):\n    key = conn_or_int_id\n    if isinstance(key, str):\n        table = self._int_connections\n    elif isinstance(key, int):\n        table = self._connections\n    else:\n        raise ArgumentError('You must supply either an int connection id or a string internal id to _get_connection_state', id=key)\n    if (key not in table):\n        return self.Disconnected\n    data = table[key]\n    return data['state']", "docstring": "Get a connection's state by either conn_id or internal_id\n\nThis routine must only be called from the internal worker thread.\n\nArgs:\nconn_or_int_id (int, string): The external integer connection id or\nand internal string connection id", "source": "codesearchnet"}
{"code": "def _encode_dict_as_row(record, column_name_map):\n    for k in list(record.keys()):\n        v = record[k]\n        if (isinstance(v, pandas.Timestamp) or isinstance(v, datetime.datetime)):\n            v = record[k] = record[k].isoformat()\n        if (k not in column_name_map):\n            column_name_map[k] = ''.join((c for c in k if (c in Table._VALID_COLUMN_NAME_CHARACTERS)))\n        new_k = column_name_map[k]\n        if (k != new_k):\n            record[new_k] = v\n            del record[k]\n    return record", "docstring": "Encode a dictionary representing a table row in a form suitable for streaming to BQ.\n\nThis includes encoding timestamps as ISO-compatible strings and removing invalid\ncharacters from column names.\n\nArgs:\nrecord: a Python dictionary representing the table row.\ncolumn_name_map: a dictionary mapping dictionary keys to column names. This is initially\nempty and built up by this method when it first encounters each column, then used as a\ncache subsequently.\nReturns:\nThe sanitized dictionary.", "source": "codesearchnet"}
{"code": "def decode_header_part(header):\n    \n    if not header:\n        return six.text_type()\n\n    output = six.text_type()\n\n    try:\n        for d, c in decode_header(header):\n            c = c if c else 'utf-8'\n            output += ported_string(d, c, 'ignore')\n\n    \n    except (HeaderParseError, UnicodeError):\n        log.error(\"Failed decoding header part: {}\".format(header))\n        output += header\n\n    return output", "docstring": "Given an raw header returns an decoded header\n\nArgs:\nheader (string): header to decode\n\nReturns:\nstr (Python 3) or unicode (Python 2)", "source": "juraj-google-style"}
{"code": "def ceil(cls, x: 'TensorFluent') -> 'TensorFluent':\n    return cls._unary_op(x, tf.ceil, tf.float32)", "docstring": "Returns a TensorFluent for the ceil function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the ceil function.", "source": "codesearchnet"}
{"code": "def open(in_file, in_fmt=None):\n    fmt = in_file.split('.')[(- 1)]\n    if in_fmt:\n        fmt = in_fmt\n    fmt = fmt.lower()\n    if (fmt in ['png', 'jpg', 'tiff', 'tif', 'jpeg']):\n        return Image.open(in_file)\n    else:\n        raise NotImplementedError('Cannot open file of type {fmt}'.format(fmt))", "docstring": "Reads in a file from disk.\n\nArguments:\nin_file: The name of the file to read in\nin_fmt: The format of in_file, if you want to be explicit\n\nReturns:\nnumpy.ndarray", "source": "codesearchnet"}
{"code": "def _parse_publisher(details):\n    publisher = _get_td_or_none(details, 'ctl00_ContentPlaceHolder1_tblRowNakladatel')\n    if (not publisher):\n        return None\n    publisher = dhtmlparser.removeTags(publisher).strip()\n    if (not publisher):\n        return None\n    return publisher", "docstring": "Parse publisher of the book.\n\nArgs:\ndetails (obj): HTMLElement containing slice of the page with details.\n\nReturns:\nstr/None: Publisher's name as string or None if not found.", "source": "codesearchnet"}
{"code": "def add_to_loader(loader_cls: Type, classes: List[Type]) -> None:\n    if (not isinstance(classes, list)):\n        classes = [classes]\n    for class_ in classes:\n        tag = '!{}'.format(class_.__name__)\n        if issubclass(class_, enum.Enum):\n            loader_cls.add_constructor(tag, EnumConstructor(class_))\n        elif (issubclass(class_, str) or issubclass(class_, UserString)):\n            loader_cls.add_constructor(tag, UserStringConstructor(class_))\n        else:\n            loader_cls.add_constructor(tag, Constructor(class_))\n        if (not hasattr(loader_cls, '_registered_classes')):\n            loader_cls._registered_classes = dict()\n        loader_cls._registered_classes[tag] = class_", "docstring": "Registers one or more classes with a YAtiML loader.\n\nOnce a class has been registered, it can be recognized and \\\nconstructed when reading a YAML text.\n\nArgs:\nloader_cls: The loader to register the classes with.\nclasses: The class(es) to register, a plain Python class or a \\\nlist of them.", "source": "codesearchnet"}
{"code": "def update_dns_zone_record(env, zone_id, **kwargs):\n    \n    client = boto3.Session(profile_name=env).client('route53')\n    response = {}\n\n    hosted_zone_info = client.get_hosted_zone(Id=zone_id)\n    zone_name = hosted_zone_info['HostedZone']['Name'].rstrip('.')\n    dns_name = kwargs.get('dns_name')\n\n    if dns_name and dns_name.endswith(zone_name):\n        dns_name_aws = kwargs.get('dns_name_aws')\n        \n        dns_json = get_template(template_file='infrastructure/dns_upsert.json.j2', **kwargs)\n        LOG.info('Attempting to create DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id,\n                 zone_name)\n        try:\n            response = client.change_resource_record_sets(\n                HostedZoneId=zone_id,\n                ChangeBatch=json.loads(dns_json), )\n            LOG.info('Upserted DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id, zone_name)\n        except botocore.exceptions.ClientError as error:\n            LOG.info('Error creating DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id,\n                     zone_name)\n            LOG.debug(error)\n    else:\n        LOG.info('Skipping creating DNS record %s in non-matching Hosted Zone %s (%s)', dns_name, zone_id, zone_name)\n\n    LOG.debug('Route53 JSON Response: \\n%s', pformat(response))", "docstring": "Create a Route53 CNAME record in _env_ zone.\n\nArgs:\nenv (str): Deployment environment.\nzone_id (str): Route53 zone id.\n\nKeyword Args:\ndns_name (str): FQDN of application's dns entry to add/update.\ndns_name_aws (str): FQDN of AWS resource\ndns_ttl (int): DNS time-to-live (ttl)", "source": "juraj-google-style"}
{"code": "def to_xml(self, xmllint=False):\n        \n        root = self._tree.getroot()\n        ret = ET.tostring(ET.ElementTree(root), pretty_print=True)\n        if xmllint:\n            ret = xmllint_format(ret)\n        return ret", "docstring": "Serialize all properties as pretty-printed XML\n\nArgs:\nxmllint (boolean): Format with ``xmllint`` in addition to pretty-printing", "source": "juraj-google-style"}
{"code": "def match_filenames_once(pattern, name=None):\n    with ops.name_scope(name, 'matching_filenames', [pattern]) as name:\n        return variable_v1.VariableV1(name=name, initial_value=io_ops.matching_files(pattern), trainable=False, validate_shape=False, collections=[ops.GraphKeys.LOCAL_VARIABLES])", "docstring": "Save the list of files matching pattern, so it is only computed once.\n\nNOTE: The order of the files returned is deterministic.\n\nArgs:\npattern: A file pattern (glob), or 1D tensor of file patterns.\nname: A name for the operations (optional).\n\nReturns:\nA variable that is initialized to the list of files matching the pattern(s).", "source": "github-repos"}
{"code": "def sheets_tab_delete(config, auth, sheet_url_or_name, sheet_tab):\n    if config.verbose:\n        print('SHEETS DELETE', sheet_url_or_name, sheet_tab)\n    spreadsheet = sheets_get(config, auth, sheet_url_or_name)\n    if spreadsheet:\n        if len(spreadsheet['sheets']) == 1 and spreadsheet['sheets'][0]['properties']['title'] == sheet_tab:\n            file_delete(config, auth, spreadsheet['properties']['title'], parent=None)\n        else:\n            sheet_id, tab_id = sheets_tab_id(config, auth, sheet_url_or_name, sheet_tab)\n            if tab_id is not None:\n                sheets_batch_update(config, auth, sheet_url_or_name, {'requests': [{'deleteSheet': {'sheetId': tab_id}}]})", "docstring": "Delete a tab in a sheet.\n\nArgs:\nconfig - see starthinker/util/configuration.py\nauth - user or service\nurl_or_name - one of: URL, document title, or id\nsheet_tab - name of tab to get id for\n\nNo Return", "source": "github-repos"}
{"code": "def __setattr__(self, name: str, val: np.ndarray) -> None:\n\t\t\n\t\tif name.startswith(\"!\"):\n\t\t\tsuper(AttributeManager, self).__setattr__(name[1:], val)\n\t\telif \"/\" in name:\n\t\t\traise KeyError(\"Attribute name cannot contain slash (/)\")\n\t\telse:\n\t\t\tif self.ds is not None:\n\t\t\t\tvalues = loompy.normalize_attr_values(val)\n\t\t\t\ta = [\"/row_attrs/\", \"/col_attrs/\"][self.axis]\n\t\t\t\tif self.ds.shape[self.axis] != 0 and values.shape[0] != self.ds.shape[self.axis]:\n\t\t\t\t\traise ValueError(f\"Attribute '{name}' must have exactly {self.ds.shape[self.axis]} values but {len(values)} were given\")\n\t\t\t\tif self.ds._file[a].__contains__(name):\n\t\t\t\t\tdel self.ds._file[a + name]\n\t\t\t\tself.ds._file[a + name] = values  \n\t\t\t\tself.ds._file[a + name].attrs[\"last_modified\"] = timestamp()\n\t\t\t\tself.ds._file[a].attrs[\"last_modified\"] = timestamp()\n\t\t\t\tself.ds._file.attrs[\"last_modified\"] = timestamp()\n\t\t\t\tself.ds._file.flush()\n\t\t\t\tself.__dict__[\"storage\"][name] = loompy.materialize_attr_values(self.ds._file[a][name][:])\n\t\t\telse:\n\t\t\t\tself.__dict__[\"storage\"][name] = val", "docstring": "Set the value of a named attribute\n\nArgs:\nname (str) \t\t\tName of the attribute\nval (np.ndarray)\tValue of the attribute\n\nRemarks:\nLength must match the corresponding matrix dimension\nThe values are automatically HMTL escaped and converted to ASCII for storage", "source": "juraj-google-style"}
{"code": "def register_hook(self, hook, priority='NORMAL'):\n        \n        assert isinstance(hook, Hook)\n        if hasattr(hook, 'priority'):\n            raise ValueError('\"priority\" is a reserved attribute for hooks')\n        priority = get_priority(priority)\n        hook.priority = priority\n        \n        inserted = False\n        for i in range(len(self._hooks) - 1, -1, -1):\n            if priority >= self._hooks[i].priority:\n                self._hooks.insert(i + 1, hook)\n                inserted = True\n                break\n        if not inserted:\n            self._hooks.insert(0, hook)", "docstring": "Register a hook into the hook list.\n\nArgs:\nhook (:obj:`Hook`): The hook to be registered.\npriority (int or str or :obj:`Priority`): Hook priority.\nLower value means higher priority.", "source": "juraj-google-style"}
{"code": "def egress(self, envelope, http_headers, operation, binding_options):\n    custom_headers = self._header_handler.GetHTTPHeaders()\n    http_headers.update(custom_headers)\n    return (envelope, http_headers)", "docstring": "Overriding the egress function to set our headers.\n\nArgs:\nenvelope: An Element with the SOAP request data.\nhttp_headers: A dict of the current http headers.\noperation: The SoapOperation instance.\nbinding_options: An options dict for the SOAP binding.\n\nReturns:\nA tuple of the envelope and headers.", "source": "codesearchnet"}
{"code": "def check_dihedral(self, construction_table):\n    c_table = construction_table\n    angles = self.get_angle_degrees(c_table.iloc[(3:, :)].values)\n    problem_index = np.nonzero(((175 < angles) | (angles < 5)))[0]\n    rename = dict(enumerate(c_table.index[3:]))\n    problem_index = [rename[i] for i in problem_index]\n    return problem_index", "docstring": "Checks, if the dihedral defining atom is colinear.\n\nChecks for each index starting from the third row of the\n``construction_table``, if the reference atoms are colinear.\n\nArgs:\nconstruction_table (pd.DataFrame):\n\nReturns:\nlist: A list of problematic indices.", "source": "codesearchnet"}
{"code": "def prepare_request(\n    url: Union[str, methods],\n    data: Optional[MutableMapping],\n    headers: Optional[MutableMapping],\n    global_headers: MutableMapping,\n    token: str,\n    as_json: Optional[bool] = None,\n) -> Tuple[str, Union[str, MutableMapping], MutableMapping]:\n    \n\n    if isinstance(url, methods):\n        as_json = as_json or url.value[3]\n        real_url = url.value[0]\n    else:\n        real_url = url\n        as_json = False\n\n    if not headers:\n        headers = {**global_headers}\n    else:\n        headers = {**global_headers, **headers}\n\n    payload: Optional[Union[str, MutableMapping]] = None\n    if real_url.startswith(HOOK_URL) or (real_url.startswith(ROOT_URL) and as_json):\n        payload, headers = _prepare_json_request(data, token, headers)\n    elif real_url.startswith(ROOT_URL) and not as_json:\n        payload = _prepare_form_encoded_request(data, token)\n    else:\n        real_url = ROOT_URL + real_url\n        payload = _prepare_form_encoded_request(data, token)\n\n    return real_url, payload, headers", "docstring": "Prepare outgoing request\n\nCreate url, headers, add token to the body and if needed json encode it\n\nArgs:\nurl: :class:`slack.methods` item or string of url\ndata: Outgoing data\nheaders: Custom headers\nglobal_headers: Global headers\ntoken: Slack API token\nas_json: Post JSON to the slack API\nReturns:\n:py:class:`tuple` (url, body, headers)", "source": "juraj-google-style"}
{"code": "def __init__(self, bits: List[int], initializer: tf.keras.initializers.Initializer=tf.keras.initializers.RandomUniform(), name: Union[None, str]=None):\n    pre_process = [energy_utils.SpinsFromBitstrings()]\n    post_process = [energy_utils.VariableDot(initializer=initializer)]\n    super().__init__(bits, pre_process + post_process, name)\n    self._post_process = post_process", "docstring": "Initializes a BernoulliEnergy.\n\nArgs:\nbits: Unique labels for the bits on which this distribution is supported.\ninitializer: A `tf.keras.initializers.Initializer` which specifies how to\ninitialize the values of the parameters.\nname: Optional name for the model.", "source": "github-repos"}
{"code": "def replace_punctuation(self, text, excluded=None, replacement=''):\n        \n        if excluded is None:\n            excluded = set()\n        elif not isinstance(excluded, set):\n            excluded = set(excluded)\n        punct = ''.join(self.__punctuation.difference(excluded))\n\n        return self.replace_characters(\n            text, characters=punct, replacement=replacement)", "docstring": "Replace punctuation symbols in text.\n\nRemoves punctuation from input text or replaces them with a\nstring if specified. Characters replaced will be those\nin string.punctuation.\n\nArgs:\ntext: The text to be processed.\nexcluded: Set of characters to exclude.\nreplacement: New text that will replace punctuation.\n\nReturns:\nThe text without punctuation.", "source": "juraj-google-style"}
{"code": "def update_file(filename, result, content, indent):\n    \n    \n    parts = re.split('---+', content, 2)\n\n    \n    frontmatter = yaml.safe_load(parts[1])\n\n    \n    frontmatter['counts'] = result['counts']\n\n    \n    \n    parts[1] = '\\n{}'.format(\n        yaml.safe_dump(frontmatter, default_flow_style=False, indent=indent))\n    result = '---'.join(parts)\n\n    \n    with open(filename, 'wb') as f:\n        f.write(result.encode('utf-8'))\n    print('{} updated.'.format(filename))", "docstring": "Updates a Jekyll file to contain the counts form an object\n\nThis just converts the results to YAML and adds to the Jekyll frontmatter.\n\nArgs:\nfilename: the Jekyll file to update\nresult: the results object from `wc`\ncontent: the contents of the original file\nindent: the indentation level for dumping YAML", "source": "juraj-google-style"}
{"code": "def scrape_hive_url(mc_url, num_tracks=sys.maxsize, folders=False, custom_path=''):\n    try:\n        data = get_hive_data(mc_url)\n    except Exception as e:\n        puts_safe((colored.red('Problem downloading ') + mc_url))\n        print(e)\n    filenames = []\n    return filenames", "docstring": "Scrape a Hive.co download page.\n\nReturns:\nlist: filenames to open", "source": "codesearchnet"}
{"code": "def plot(self, figure_list):\n        \n        \n        if not self.data == {} and self.data['image_data'] is  None:\n            axes = figure_list[0].axes[0]\n            if len(axes.images)>0:\n                self.data['image_data'] = np.array(axes.images[0].get_array())\n                self.data['extent'] = np.array(axes.images[0].get_extent())\n                self.plot_settings['cmap'] = axes.images[0].get_cmap().name\n                self.plot_settings['xlabel'] = axes.get_xlabel()\n                self.plot_settings['ylabel'] = axes.get_ylabel()\n                self.plot_settings['title'] = axes.get_title()\n                self.plot_settings['interpol'] = axes.images[0].get_interpolation()\n\n        Script.plot(self, figure_list)", "docstring": "Plots a dot on top of each selected NV, with a corresponding number denoting the order in which the NVs are\nlisted.\nPrecondition: must have an existing image in figure_list[0] to plot over\nArgs:\nfigure_list:", "source": "juraj-google-style"}
{"code": "def run_task_external(self, coroutine):\n    self.verify_calling_thread(False, 'run_task_external must not be called from the emulation thread')\n    future = asyncio.run_coroutine_threadsafe(coroutine, self._loop)\n    return future.result()", "docstring": "Inject a task into the emulation loop and wait for it to finish.\n\nThe coroutine parameter is run as a Task inside the EmulationLoop\nuntil it completes and the return value (or any raised Exception) is\npased back into the caller's thread.\n\nArgs:\ncoroutine (coroutine): The task to inject into the event loop.\n\nReturns:\nobject: Whatever the coroutine returned.", "source": "codesearchnet"}
{"code": "def __init__(\n      self, cipher_mode=None, initialization_vector=None, key=None, **kwargs):\n    \n    if not key:\n      raise ValueError('Missing key.')\n\n    cipher_mode = self.ENCRYPTION_MODES.get(cipher_mode, None)\n    if cipher_mode is None:\n      raise ValueError('Unsupported cipher mode: {0!s}'.format(cipher_mode))\n\n    if cipher_mode != Blowfish.MODE_ECB and not initialization_vector:\n      \n      \n      raise ValueError('Missing initialization vector.')\n\n    super(BlowfishDecrypter, self).__init__()\n    if cipher_mode == Blowfish.MODE_ECB:\n      self._blowfish_cipher = Blowfish.new(key, mode=cipher_mode)\n    else:\n      self._blowfish_cipher = Blowfish.new(\n          key, IV=initialization_vector, mode=cipher_mode)", "docstring": "Initializes a decrypter.\n\nArgs:\ncipher_mode (Optional[str]): cipher mode.\ninitialization_vector (Optional[bytes]): initialization vector.\nkey (Optional[bytes]): key.\nkwargs (dict): keyword arguments depending on the decrypter.\n\nRaises:\nValueError: when key is not set, block cipher mode is not supported,\nor initialization_vector is required and not set.", "source": "juraj-google-style"}
{"code": "def _get_instance(self, iname, namespace, property_list, local_only, include_class_origin, include_qualifiers):\n    instance_repo = self._get_instance_repo(namespace)\n    rtn_tup = self._find_instance(iname, instance_repo)\n    inst = rtn_tup[1]\n    if (inst is None):\n        raise CIMError(CIM_ERR_NOT_FOUND, _format('Instance not found in repository namespace {0!A}. Path={1!A}', namespace, iname))\n    rtn_inst = deepcopy(inst)\n    if local_only:\n        for p in rtn_inst:\n            class_origin = rtn_inst.properties[p].class_origin\n            if (class_origin and (class_origin != inst.classname)):\n                del rtn_inst[p]\n    if ((not self._repo_lite) and local_only):\n        try:\n            cl = self._get_class(iname.classname, namespace, local_only=local_only)\n        except CIMError as ce:\n            if (ce.status_code == CIM_ERR_NOT_FOUND):\n                raise CIMError(CIM_ERR_INVALID_CLASS, _format('Class {0!A} not found for instance {1!A} in namespace {2!A}.', iname.classname, iname, namespace))\n        class_pl = cl.properties.keys()\n        for p in list(rtn_inst):\n            if (p not in class_pl):\n                del rtn_inst[p]\n    self._filter_properties(rtn_inst, property_list)\n    if (not include_qualifiers):\n        self._remove_qualifiers(rtn_inst)\n    if (not include_class_origin):\n        self._remove_classorigin(rtn_inst)\n    return rtn_inst", "docstring": "Local method implements getinstance. This is generally used by\nother instance methods that need to get an instance from the\nrepository.\n\nIt attempts to get the instance, copies it, and filters it\nfor input parameters like localonly, includequalifiers, and\npropertylist.\n\nReturns:\n\nCIMInstance copy from the repository with property_list filtered,\nand qualifers removed if include_qualifiers=False and\nclass origin removed if include_class_origin False", "source": "codesearchnet"}
{"code": "def get_enabled():\n    raw_services = _get_services()\n    services = set()\n    for service in raw_services:\n        if (info(service['ServiceName'])['StartType'] in ['Auto']):\n            services.add(service['ServiceName'])\n    return sorted(services)", "docstring": "Return a list of enabled services. Enabled is defined as a service that is\nmarked to Auto Start.\n\nReturns:\nlist: A list of enabled services\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' service.get_enabled", "source": "codesearchnet"}
{"code": "def __init__(self, max_iterations, damping, unroll_loop=False):\n        \n        assert damping >= 0.0\n        self.damping = damping\n\n        super(ConjugateGradient, self).__init__(max_iterations=max_iterations, unroll_loop=unroll_loop)", "docstring": "Creates a new conjugate gradient solver instance.\n\nArgs:\nmax_iterations: Maximum number of iterations before termination.\ndamping: Damping factor.\nunroll_loop: Unrolls the TensorFlow while loop if true.", "source": "juraj-google-style"}
{"code": "def content_ratings(self, **kwargs):\n        \n        path = self._get_id_path('content_ratings')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the content ratings for a TV Series.\n\nArgs:\nlanguage: (optional) ISO 639 code.\nappend_to_response: (optional) Comma separated, any collection\nmethod.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def add_ema_control_dependencies(vector_quantizer, one_hot_assignments, codes, commitment_loss, decay):\n    updated_ema_count = moving_averages.assign_moving_average(vector_quantizer.ema_count, tf.reduce_sum(input_tensor=one_hot_assignments, axis=[0, 1]), decay, zero_debias=False)\n    updated_ema_means = moving_averages.assign_moving_average(vector_quantizer.ema_means, tf.reduce_sum(input_tensor=(tf.expand_dims(codes, 2) * tf.expand_dims(one_hot_assignments, 3)), axis=[0, 1]), decay, zero_debias=False)\n    perturbed_ema_count = (updated_ema_count + 1e-05)\n    with tf.control_dependencies([commitment_loss]):\n        update_means = tf.compat.v1.assign(vector_quantizer.codebook, (updated_ema_means / perturbed_ema_count[(..., tf.newaxis)]))\n        with tf.control_dependencies([update_means]):\n            return tf.identity(commitment_loss)", "docstring": "Add control dependencies to the commmitment loss to update the codebook.\n\nArgs:\nvector_quantizer: An instance of the VectorQuantizer class.\none_hot_assignments: The one-hot vectors corresponding to the matched\ncodebook entry for each code in the batch.\ncodes: A `float`-like `Tensor` containing the latent vectors to be compared\nto the codebook.\ncommitment_loss: The commitment loss from comparing the encoder outputs to\ntheir neighboring codebook entries.\ndecay: Decay factor for exponential moving average.\n\nReturns:\ncommitment_loss: Commitment loss with control dependencies.", "source": "codesearchnet"}
{"code": "def push(self, x):\n    self._median_tracker.push(x)\n    median = self._median_tracker.get()\n    self._diff_median_tracker.push(abs(x - median))", "docstring": "Adds a new value to the tracker and updates the MAD.\n\nArgs:\nx: The value to be added to the tracked stream.", "source": "github-repos"}
{"code": "class FlaxGreedySearchOutput(ModelOutput):\n    sequences: Optional[jnp.ndarray] = None", "docstring": "Flax Base class for outputs of decoder-only generation models using greedy search.\n\n\nArgs:\nsequences (`jnp.ndarray` of shape `(batch_size, max_length)`):\nThe generated sequences.", "source": "github-repos"}
{"code": "def get_service_for_handle(self, handle):\n        \n        for s in self.services.values():\n            if s.start_handle <= handle and s.end_handle >= handle:\n                return s\n\n        return None", "docstring": "Given a characteristic handle, return the :class:`Service` object that\nthe handle belongs to.\n\nArgs:\nhandle (int): the characteristic handle\n\nReturns:\nNone if no service matches the given handle, otherwise a :class:`Service` object.", "source": "juraj-google-style"}
{"code": "def batch_inputs(dataset, batch_size, train, num_preprocess_threads=None, num_readers=1):\n    with tf.name_scope('batch_processing'):\n        data_files = dataset.data_files()\n        if (data_files is None):\n            raise ValueError('No data files found for this dataset')\n        if train:\n            filename_queue = tf.train.string_input_producer(data_files, shuffle=True, capacity=16)\n        else:\n            filename_queue = tf.train.string_input_producer(data_files, shuffle=False, capacity=1)\n        if (num_preprocess_threads is None):\n            num_preprocess_threads = FLAGS.num_preprocess_threads\n        if (num_preprocess_threads % 4):\n            raise ValueError('Please make num_preprocess_threads a multiple of 4 (%d % 4 != 0).', num_preprocess_threads)\n        if (num_readers is None):\n            num_readers = FLAGS.num_readers\n        if (num_readers < 1):\n            raise ValueError('Please make num_readers at least 1')\n        examples_per_shard = 1024\n        min_queue_examples = (examples_per_shard * FLAGS.input_queue_memory_factor)\n        if train:\n            examples_queue = tf.RandomShuffleQueue(capacity=(min_queue_examples + (3 * batch_size)), min_after_dequeue=min_queue_examples, dtypes=[tf.string])\n        else:\n            examples_queue = tf.FIFOQueue(capacity=(examples_per_shard + (3 * batch_size)), dtypes=[tf.string])\n        if (num_readers > 1):\n            enqueue_ops = []\n            for _ in range(num_readers):\n                reader = dataset.reader()\n                (_, value) = reader.read(filename_queue)\n                enqueue_ops.append(examples_queue.enqueue([value]))\n            tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops))\n            example_serialized = examples_queue.dequeue()\n        else:\n            reader = dataset.reader()\n            (_, example_serialized) = reader.read(filename_queue)\n        images_and_labels = []\n        for thread_id in range(num_preprocess_threads):\n            (image_buffer, label_index, bbox, _) = parse_example_proto(example_serialized)\n            image = image_preprocessing(image_buffer, bbox, train, thread_id)\n            images_and_labels.append([image, label_index])\n        (images, label_index_batch) = tf.train.batch_join(images_and_labels, batch_size=batch_size, capacity=((2 * num_preprocess_threads) * batch_size))\n        height = FLAGS.image_size\n        width = FLAGS.image_size\n        depth = 3\n        images = tf.cast(images, tf.float32)\n        images = tf.reshape(images, shape=[batch_size, height, width, depth])\n        tf.summary.image('images', images)\n        return (images, tf.reshape(label_index_batch, [batch_size]))", "docstring": "Contruct batches of training or evaluation examples from the image dataset.\n\nArgs:\ndataset: instance of Dataset class specifying the dataset.\nSee dataset.py for details.\nbatch_size: integer\ntrain: boolean\nnum_preprocess_threads: integer, total number of preprocessing threads\nnum_readers: integer, number of parallel readers\n\nReturns:\nimages: 4-D float Tensor of a batch of images\nlabels: 1-D integer Tensor of [batch_size].\n\nRaises:\nValueError: if data is not found", "source": "codesearchnet"}
{"code": "def nlargest(self, n=None):\n    if (n is None):\n        return sorted(self.counts(), key=itemgetter(1), reverse=True)\n    else:\n        return heapq.nlargest(n, self.counts(), key=itemgetter(1))", "docstring": "List the n most common elements and their counts.\n\nList is from the most\ncommon to the least.  If n is None, the list all element counts.\n\nRun time should be O(m log m) where m is len(self)\nArgs:\nn (int): The number of elements to return", "source": "codesearchnet"}
{"code": "def Create(self, name):\n    \n    precondition.AssertType(name, Text)\n\n    try:\n      constructor = self._constructors[name]\n    except KeyError:\n      message = \"No constructor for name '%s' has been registered\"\n      message %= name\n      raise ValueError(message)\n\n    instance = constructor()\n    if not isinstance(instance, self._cls):\n      message = (\"Constructor %r for name '%s' returned instance of type %r \"\n                 \"(expected %r)\")\n      message %= (constructor, name, type(instance), self._cls)\n      raise TypeError(message)\n\n    return instance", "docstring": "Creates a new instance.\n\nArgs:\nname: A name identifying the constructor to use for instantiation.\n\nReturns:\nAn instance of the type that the factory supports.", "source": "juraj-google-style"}
{"code": "def _GetUsers(self, key_path_suffix):\n    \n    user_key_name, _, key_path_suffix = key_path_suffix.partition(\n        definitions.KEY_PATH_SEPARATOR)\n\n    \n    \n    if user_key_name == '.DEFAULT':\n      search_key_name = 'S-1-5-18'\n    else:\n      search_key_name = user_key_name\n\n    user_profile_list_key = self.GetKeyByPath(self._USER_PROFILE_LIST_KEY_PATH)\n    if not user_profile_list_key:\n      return None\n\n    for user_profile_key in user_profile_list_key.GetSubkeys():\n      if search_key_name == user_profile_key.name:\n        profile_path_value = user_profile_key.GetValueByName('ProfileImagePath')\n        if not profile_path_value:\n          break\n\n        profile_path = profile_path_value.GetDataAsObject()\n        if not profile_path:\n          break\n\n        key_name_upper = user_profile_key.name.upper()\n        if key_name_upper.endswith('_CLASSES'):\n          profile_path = '\\\\'.join([\n              profile_path, 'AppData', 'Local', 'Microsoft', 'Windows',\n              'UsrClass.dat'])\n        else:\n          profile_path = '\\\\'.join([profile_path, 'NTUSER.DAT'])\n\n        profile_path_upper = profile_path.upper()\n        registry_file = self._GetCachedUserFileByPath(profile_path_upper)\n        if not registry_file:\n          break\n\n        key_path_prefix = definitions.KEY_PATH_SEPARATOR.join([\n            'HKEY_USERS', user_key_name])\n        key_path = ''.join([key_path_prefix, key_path_suffix])\n\n        registry_file.SetKeyPathPrefix(key_path_prefix)\n        return registry_file.GetKeyByPath(key_path)\n\n    return None", "docstring": "Virtual key callback to determine the users sub keys.\n\nArgs:\nkey_path_suffix (str): users Windows Registry key path suffix with\nleading path separator.\n\nReturns:\nWinRegistryKey: the users Windows Registry key or None if not available.", "source": "juraj-google-style"}
{"code": "def __init__(self, parent):\n        \n        logger.debug(\"Initialising log panel\")\n\n        super(Log, self).__init__(parent, padding=8, text=\"Python console log\")\n\n        \n        log = tk.Text(self, wrap=\"none\")\n        log.grid(column=0, row=0, sticky=\"W E N S\")\n\n        \n        log.tag_config('critical', foreground=\"red\", underline=True)\n        log.tag_config('error', foreground=\"red\")\n        log.tag_config('warning', foreground=\"orange\")\n        log.tag_config('info')\n        log.tag_config('debug', foreground=\"\n\n        \n        scrollbar = ttk.Scrollbar(self, orient=\"vertical\", command=log.yview)\n        scrollbar.grid(column=1, row=0, sticky=\"N S\")\n        log['yscrollcommand'] = scrollbar.set\n\n        \n        scrollbar = ttk.Scrollbar(self, orient=\"horizontal\", command=log.xview)\n        scrollbar.grid(column=0, row=1, sticky=\"W E\")\n        log['xscrollcommand'] = scrollbar.set\n\n        \n        class LogHandler(logging.Handler):\n            def __init__(self, text_widget):\n                logging.Handler.__init__(self)\n\n                self.text_widget = text_widget\n                self.text_widget.config(state=tk.DISABLED)\n\n            def flush(self):\n                try:\n                    self.text_widget.see(\"end\")\n                except:\n                    pass\n\n            def emit(self, record):\n                msg = self.format(record)\n                msg = msg[:9] + msg[29:]\n\n                tags = ()\n                if msg.startswith(\"CRITICAL\"):\n                    tags = 'critical'\n                if msg.startswith(\"ERROR\"):\n                    tags = 'error'\n                if msg.startswith(\"WARNING\"):\n                    tags = 'warning'\n                if msg.startswith(\"INFO\"):\n                    tags = 'info'\n                if msg.startswith(\"DEBUG\"):\n                    tags = 'debug'\n\n                self.text_widget.config(state=tk.NORMAL)\n                self.text_widget.insert(\"end\", msg + \"\\n\", tags)\n                self.text_widget.config(state=tk.DISABLED)\n                self.flush()\n\n        discord_logger = logging.getLogger(\"modis.discord_modis\")\n        formatter = logging.Formatter(\n            \"{levelname:8} {name} - {message}\", style=\"{\")\n        discord_handler = LogHandler(log)\n        discord_handler.setFormatter(formatter)\n        discord_logger.addHandler(discord_handler)\n\n        \n        self.columnconfigure(0, weight=1)\n        self.rowconfigure(0, weight=1)", "docstring": "Create a new text box for the console log.\n\nArgs:\nparent: A tk or ttk object", "source": "juraj-google-style"}
{"code": "def __init__(self, start, width, num_buckets):\n    self._start = start\n    self._width = width\n    self._num_buckets = num_buckets", "docstring": "Create a histogram with linear buckets.\n\nArgs:\nstart: Lower bound of a starting bucket.\nwidth: Bucket width. Smaller width implies a better resolution for\npercentile estimation.\nnum_buckets: The number of buckets. Upper bound of an ending bucket is\ndefined by start + width * numBuckets.", "source": "github-repos"}
{"code": "def step(self, observations, raw_rewards, processed_rewards, dones, actions):\n    assert isinstance(observations, np.ndarray)\n    assert isinstance(raw_rewards, np.ndarray)\n    assert isinstance(processed_rewards, np.ndarray)\n    assert isinstance(dones, np.ndarray)\n    assert isinstance(actions, np.ndarray)\n    assert (self.batch_size == observations.shape[0])\n    assert (self.batch_size == raw_rewards.shape[0])\n    assert (self.batch_size == processed_rewards.shape[0])\n    assert (self.batch_size == dones.shape[0])\n    assert (self.batch_size == actions.shape[0])\n    for index in range(self.batch_size):\n        trajectory = self._trajectories[index]\n        assert trajectory.is_active\n        trajectory.change_last_time_step(action=actions[index])\n        trajectory.add_time_step(observation=observations[index], done=dones[index], raw_reward=raw_rewards[index], processed_reward=processed_rewards[index])\n        if dones[index]:\n            self._complete_trajectory(trajectory, index)\n            assert (not self._trajectories[index].is_active)", "docstring": "Record the information obtained from taking a step in all envs.\n\nRecords (observation, rewards, done) in a new time-step and actions in the\ncurrent time-step.\n\nIf any trajectory gets done, we move that trajectory to\ncompleted_trajectories.\n\nArgs:\nobservations: ndarray of first dimension self.batch_size, which has the\nobservations after we've stepped, i.e. s_{t+1} where t is the current\nstate.\nraw_rewards: ndarray of first dimension self.batch_size containing raw\nrewards i.e. r_{t+1}.\nprocessed_rewards: ndarray of first dimension self.batch_size containing\nprocessed rewards. i.e. r_{t+1}\ndones: ndarray of first dimension self.batch_size, containing true at an\nindex if that env is done, i.e. d_{t+1}\nactions: ndarray of first dimension self.batch_size, containing actions\napplied at the current time-step, which leads to the observations\nrewards and done at the next time-step, i.e. a_t", "source": "codesearchnet"}
{"code": "def next_trials(self):\n    trials = list(self._trial_generator)\n    if self._shuffle:\n        random.shuffle(trials)\n    self._finished = True\n    return trials", "docstring": "Provides Trial objects to be queued into the TrialRunner.\n\nReturns:\ntrials (list): Returns a list of trials.", "source": "codesearchnet"}
{"code": "def reverse(path):\n    \n    if is_rooted(path) or '..' in path:\n        from b2.manager import get_manager\n        get_manager().errors()(\n            'reverse(path): path is either rooted or contains \"..\" in the path')\n    if path == '.':\n        return path\n    path = os.path.normpath(path)\n    \n    \n    return os.sep.join('..' for t in path.split(os.sep))", "docstring": "Returns path2 such that `os.path.join(path, path2) == '.'`.\n`path` may not contain '..' or be rooted.\n\nArgs:\npath (str): the path to reverse\n\nReturns:\nthe string of the reversed path\n\nExample:\n\n>>> p1 = 'path/to/somewhere'\n>>> p2 = reverse('path/to/somewhere')\n>>> p2\n'../../..'\n>>> os.path.normpath(os.path.join(p1, p2))\n'.'", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]:\n    residual = hidden_states\n    hidden_states = self.layer_norm1(hidden_states)\n    hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, output_attentions=output_attentions)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.layer_norm2(hidden_states)\n    hidden_states = self.mlp(hidden_states)\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def trigger_methods(instance, args):\n    for name in sorted(args):\n        value = args[name]\n        target = instance\n        if (name.startswith('response_') or name.startswith('reply_')):\n            name = name.replace('response_', '').replace('reply_', '')\n            if hasattr(instance, '_response'):\n                target = instance._response\n        member = getattr(target, name, None)\n        isattr = (name in dir(target))\n        iscallable = (ismethod(member) and (not isfunction(member)))\n        if ((not iscallable) and (not isattr)):\n            raise PookInvalidArgument('Unsupported argument: {}'.format(name))\n        if iscallable:\n            member(value)\n        else:\n            setattr(target, name, value)", "docstring": "Triggers specific class methods using a simple reflection\nmechanism based on the given input dictionary params.\n\nArguments:\ninstance (object): target instance to dynamically trigger methods.\nargs (iterable): input arguments to trigger objects to\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def put(self, id, name, description, private, runs_executable_tasks, runs_docker_container_tasks, runs_singularity_container_tasks, active, whitelists):\n    request_url = (self._client.base_api_url + self.detail_url.format(id=id))\n    data_to_put = {'name': name, 'description': description, 'private': private, 'runs_executable_tasks': runs_executable_tasks, 'runs_docker_container_tasks': runs_docker_container_tasks, 'runs_singularity_container_tasks': runs_singularity_container_tasks, 'active': active, 'whitelists': whitelists}\n    response = self._client.session.put(request_url, data=data_to_put)\n    self.validate_request_success(response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_200_OK)\n    return self.response_data_to_model_instance(response.json())", "docstring": "Updates a task queue on the saltant server.\n\nArgs:\nid (int): The ID of the task queue.\nname (str): The name of the task queue.\ndescription (str): The description of the task queue.\nprivate (bool): A Booleon signalling whether the queue can\nonly be used by its associated user.\nruns_executable_tasks (bool): A Boolean specifying whether\nthe queue runs executable tasks.\nruns_docker_container_tasks (bool): A Boolean specifying\nwhether the queue runs container tasks that run in\nDocker containers.\nruns_singularity_container_tasks (bool): A Boolean\nspecifying whether the queue runs container tasks that\nrun in Singularity containers.\nactive (bool): A Booleon signalling whether the queue is\nactive.\nwhitelists (list): A list of task whitelist IDs.\n\nReturns:\n:class:`saltant.models.task_queue.TaskQueue`:\nA task queue model instance representing the task queue\njust updated.", "source": "codesearchnet"}
{"code": "def compile_protofile(proto_file_path):\n    out_file = tempfile.mkstemp()[1]\n    try:\n        subprocess.check_output(['protoc', '--include_source_info', '--descriptor_set_out', out_file, proto_file_path])\n    except subprocess.CalledProcessError as e:\n        sys.exit('protoc returned status {}'.format(e.returncode))\n    return out_file", "docstring": "Compile proto file to descriptor set.\n\nArgs:\nproto_file_path: Path to proto file to compile.\n\nReturns:\nPath to file containing compiled descriptor set.\n\nRaises:\nSystemExit if the compilation fails.", "source": "codesearchnet"}
{"code": "async def _async_loop(self, urls):\n    results = []\n    async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as session:\n        for url in urls:\n            result = asyncio.ensure_future(self._get_async(url, session))\n            results.append(result)\n        responses = (await asyncio.gather(*results))\n    return responses", "docstring": "Asynchronous internal method used to request multiple URLs\n\nArgs:\nurls (list): URLs to fetch\n\nReturns:\nresponses (obj): All URL requests' response coroutines", "source": "codesearchnet"}
{"code": "def coord_list_mapping_pbc(subset, superset, atol=1e-8):\n    \n    atol = np.array([1., 1. ,1.]) * atol\n    return cuc.coord_list_mapping_pbc(subset, superset, atol)", "docstring": "Gives the index mapping from a subset to a superset.\nSuperset cannot contain duplicate matching rows\n\nArgs:\nsubset, superset: List of frac_coords\n\nReturns:\nlist of indices such that superset[indices] = subset", "source": "juraj-google-style"}
{"code": "def _compile_weights_loss_and_weighted_metrics(self, sample_weights=None):\n    with backend.get_graph().as_default():\n        if sample_weights is not None:\n            self._update_sample_weight_modes(sample_weights)\n        self._prepare_sample_weights(sample_weights)\n        masks = self._prepare_output_masks()\n        self._handle_metrics(self.outputs, targets=self._targets, skip_target_masks=self._prepare_skip_target_masks(), sample_weights=self.sample_weights, masks=masks, return_weighted_metrics=True)\n        self.total_loss = self._prepare_total_loss(masks)", "docstring": "Compiles the model loss and weighted metric sub-graphs.\n\nThis may be used to set graph tensors as sample weights (instead of creating\nplaceholders).\n\nArgs:\nsample_weights: List of tensors to use as the sample weights. Must be the\nsame length as the number of outputs. If left as `None`, placeholders\nare used instead.", "source": "github-repos"}
{"code": "def generate_cot_body(context):\n    \n    try:\n        cot = {\n            'artifacts': get_cot_artifacts(context),\n            'chainOfTrustVersion': 1,\n            'runId': context.claim_task['runId'],\n            'task': context.task,\n            'taskId': context.claim_task['status']['taskId'],\n            'workerGroup': context.claim_task['workerGroup'],\n            'workerId': context.config['worker_id'],\n            'workerType': context.config['worker_type'],\n            'environment': get_cot_environment(context),\n        }\n    except (KeyError, ) as exc:\n        raise ScriptWorkerException(\"Can't generate chain of trust! {}\".format(str(exc)))\n\n    return cot", "docstring": "Generate the chain of trust dictionary.\n\nThis is the unsigned and unformatted chain of trust artifact contents.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\n\nReturns:\ndict: the unsignd and unformatted chain of trust artifact contents.\n\nRaises:\nScriptWorkerException: on error.", "source": "juraj-google-style"}
{"code": "def _generate_mark_code(rule_name):\n    \n    code = ''.join([i for i in str(rule_name) if i.isdigit()])\n    code = code.zfill(2)\n    return code", "docstring": "Generates a two digit string based on a provided string\n\nArgs:\nrule_name (str): A configured rule name 'pytest_mark3'.\n\nReturns:\nstr: A two digit code based on the provided string '03'", "source": "juraj-google-style"}
{"code": "def _set_axis_limits(self, which, lims, d, scale, reverse=False):\n    setattr(self.limits, (which + 'lims'), lims)\n    setattr(self.limits, ('d' + which), d)\n    setattr(self.limits, (which + 'scale'), scale)\n    if reverse:\n        setattr(self.limits, (('reverse_' + which) + '_axis'), True)\n    return", "docstring": "Private method for setting axis limits.\n\nSets the axis limits on each axis for an individual plot.\n\nArgs:\nwhich (str): The indicator of which part of the plots\nto adjust. This currently handles `x` and `y`.\nlims (len-2 list of floats): The limits for the axis.\nd (float): Amount to increment by between the limits.\nscale (str): Scale of the axis. Either `log` or `lin`.\nreverse (bool, optional): If True, reverse the axis tick marks. Default is False.", "source": "codesearchnet"}
{"code": "def extraction_data_statistics(path):\n    \n    with functions.DBContextManager(path) as session:\n        extraction = session.query(models.Extraction).first()\n        X, y = extraction.return_main_dataset()\n        functions.verify_dataset(X, y)\n\n        if extraction.test_dataset['method'] == 'split_from_main':\n            X, X_test, y, y_test = train_test_split(\n                X,\n                y,\n                test_size=extraction.test_dataset['split_ratio'],\n                random_state=extraction.test_dataset['split_seed'],\n                stratify=y\n            )\n        elif extraction.test_dataset['method'] == 'source':\n            if 'source' not in extraction.test_dataset or not extraction.test_dataset['source']:\n                raise exceptions.UserError('Source is empty')\n\n            extraction_code = extraction.test_dataset[\"source\"]\n            extraction_function = functions.\\\n                import_object_from_string_code(extraction_code, \"extract_test_dataset\")\n            X_test, y_test = extraction_function()\n        else:\n            X_test, y_test = None, None\n\n        \n        extraction_code = extraction.meta_feature_generation['source']\n        return_splits_iterable = functions.import_object_from_string_code(\n            extraction_code,\n            'return_splits_iterable'\n        )\n        number_of_splits = 0\n        test_indices = []\n        try:\n            for train_idx, test_idx in return_splits_iterable(X, y):\n                number_of_splits += 1\n                test_indices.append(test_idx)\n        except Exception as e:\n            raise exceptions.UserError('User code exception', exception_message=str(e))\n\n        \n        test_indices = np.concatenate(test_indices)\n        X, y = X[test_indices], y[test_indices]\n\n        \n        extraction_code = extraction.stacked_ensemble_cv['source']\n        return_splits_iterable = functions.import_object_from_string_code(\n            extraction_code,\n            'return_splits_iterable'\n        )\n        number_of_splits_stacked_cv = 0\n        try:\n            for train_idx, test_idx in return_splits_iterable(X, y):\n                number_of_splits_stacked_cv += 1\n        except Exception as e:\n            raise exceptions.UserError('User code exception', exception_message=str(e))\n\n        data_stats = dict()\n        data_stats['train_data_stats'] = functions.verify_dataset(X, y)\n        if X_test is not None:\n            data_stats['test_data_stats'] = functions.verify_dataset(X_test, y_test)\n        else:\n            data_stats['test_data_stats'] = None\n        data_stats['holdout_data_stats'] = {'number_of_splits': number_of_splits}\n        data_stats['stacked_ensemble_cv_stats'] = {'number_of_splits': number_of_splits_stacked_cv}\n\n        extraction.data_statistics = data_stats\n\n        session.add(extraction)\n        session.commit()", "docstring": "Generates data statistics for the given data extraction setup stored\nin Xcessiv notebook.\n\nThis is in rqtasks.py but not as a job yet. Temporarily call this directly\nwhile I'm figuring out Javascript lel.\n\nArgs:\npath (str, unicode): Path to xcessiv notebook", "source": "juraj-google-style"}
{"code": "def reindex(self, axis, labels, **kwargs):\n\n    def reindex_builer(df, axis, old_labels, new_labels, **kwargs):\n        if axis:\n            while (len(df.columns) < len(old_labels)):\n                df[len(df.columns)] = np.nan\n            df.columns = old_labels\n            new_df = df.reindex(columns=new_labels, **kwargs)\n            new_df.columns = pandas.RangeIndex(len(new_df.columns))\n            return new_df\n        else:\n            while (len(df.index) < len(old_labels)):\n                df.loc[len(df.index)] = np.nan\n            df.index = old_labels\n            new_df = df.reindex(index=new_labels, **kwargs)\n            new_df.reset_index(inplace=True, drop=True)\n            return new_df\n    old_labels = (self.columns if axis else self.index)\n    new_index = (self.index if axis else labels)\n    new_columns = (labels if axis else self.columns)\n    func = self._prepare_method((lambda df: reindex_builer(df, axis, old_labels, labels, **kwargs)))\n    new_data = self._map_across_full_axis(axis, func)\n    return self.__constructor__(new_data, new_index, new_columns)", "docstring": "Fits a new index for this Manger.\n\nArgs:\naxis: The axis index object to target the reindex on.\nlabels: New labels to conform 'axis' on to.\n\nReturns:\nA new QueryCompiler with updated data and new index.", "source": "codesearchnet"}
{"code": "def __init__(self, *, verbose: bool = False) -> None:\n        \n        self.verbose = verbose\n        self._start = None\n        self.elapsed = None", "docstring": "Configure the timing Timing context manager.\n\nArgs:\nverbose: Print elapsed time", "source": "juraj-google-style"}
{"code": "def _merge_assets_key_collection(saved_model_proto, path):\n  \n  for meta_graph in saved_model_proto.meta_graphs:\n    node_asset_map = {}\n    if tf_v1.saved_model.constants.ASSETS_KEY in meta_graph.collection_def:\n      assets_any_proto = meta_graph.collection_def[\n          tf_v1.saved_model.constants.ASSETS_KEY].any_list.value\n      for asset_any_proto in assets_any_proto:\n        asset_proto = meta_graph_pb2.AssetFileDef()\n        asset_any_proto.Unpack(asset_proto)\n        asset_filename = _get_asset_filename(path, asset_proto.filename)\n        node_asset_map[_get_node_name_from_tensor(\n            asset_proto.tensor_info.name)] = asset_filename\n      del meta_graph.collection_def[tf_v1.saved_model.constants.ASSETS_KEY]\n\n    for node in meta_graph.graph_def.node:\n      asset_filepath = node_asset_map.get(node.name)\n      if asset_filepath:\n        _check_asset_node_def(node)\n        node.attr[\"value\"].tensor.string_val[0] = asset_filepath", "docstring": "Merges the ASSETS_KEY collection into the GraphDefs in saved_model_proto.\n\nRemoves the ASSETS_KEY collection from the GraphDefs in the SavedModel and\nmodifies nodes with the assets filenames to point to the assets in `path`.\nAfter this transformation, the SavedModel GraphDefs can be used without\nfeeding asset tensors.\n\nArgs:\nsaved_model_proto: SavedModel proto to be modified.\npath: path where the SavedModel is being loaded from.", "source": "juraj-google-style"}
{"code": "def load_parameters(distribution, method_name, parameters=None, cache=None, cache_key=(lambda x: x)):\n    from .. import baseclass\n    if (cache is None):\n        cache = {}\n    if (parameters is None):\n        parameters = {}\n    parameters_ = distribution.prm.copy()\n    parameters_.update(**parameters)\n    parameters = parameters_\n    if contains_call_signature(getattr(distribution, method_name), 'cache'):\n        parameters['cache'] = cache\n    else:\n        for (key, value) in parameters.items():\n            if isinstance(value, baseclass.Dist):\n                value = cache_key(value)\n                if (value in cache):\n                    parameters[key] = cache[value]\n                else:\n                    raise baseclass.StochasticallyDependentError('evaluating under-defined distribution {}.'.format(distribution))\n    return parameters", "docstring": "Load parameter values by filling them in from cache.\n\nArgs:\ndistribution (Dist):\nThe distribution to load parameters from.\nmethod_name (str):\nName of the method for where the parameters should be used.\nTypically ``\"_pdf\"``, ``_cdf`` or the like.\nparameters (:py:data:typing.Any):\nDefault parameters to use if there are no cache to retrieve. Use\nthe distributions internal parameters, if not provided.\ncache (:py:data:typing.Any):\nA dictionary containing previous evaluations from the stack. If\na parameters contains a distribution that contains in the cache, it\nwill be replaced with the cache value. If omitted, a new one will\nbe created.\ncache_key (:py:data:typing.Any)\nRedefine the keys of the cache to suite other purposes.\n\nReturns:\nSame as ``parameters``, if provided. The ``distribution`` parameter if\nnot. In either case, parameters may be updated with cache values (if\nprovided) or by ``cache`` if the call signature of ``method_name`` (on\n``distribution``) contains an ``cache`` argument.", "source": "codesearchnet"}
{"code": "def transform_tensor(self, tensor):\n    dim = tensor.shape\n    rank = len(dim)\n    assert all([(i == 3) for i in dim])\n    lc = string.ascii_lowercase\n    indices = (lc[:rank], lc[rank:(2 * rank)])\n    einsum_string = ','.join([(a + i) for (a, i) in zip(*indices)])\n    einsum_string += ',{}->{}'.format(*indices[::(- 1)])\n    einsum_args = (([self.rotation_matrix] * rank) + [tensor])\n    return np.einsum(einsum_string, *einsum_args)", "docstring": "Applies rotation portion to a tensor. Note that tensor has to be in\nfull form, not the Voigt form.\n\nArgs:\ntensor (numpy array): a rank n tensor\n\nReturns:\nTransformed tensor.", "source": "codesearchnet"}
{"code": "def __init__(self, data_manager, axis, func):\n        \n        self.data_manager = data_manager\n        self.axis = axis\n        self.index_iter = (\n            iter(self.data_manager.columns)\n            if axis\n            else iter(range(len(self.data_manager.index)))\n        )\n        self.func = func", "docstring": "PartitionIterator class to define a generator on partitioned data\n\nArgs:\ndata_manager (DataManager): Data manager for the dataframe\naxis (int): axis to iterate over\nfunc (callable): The function to get inner iterables from\neach partition", "source": "juraj-google-style"}
{"code": "def setup_privnet(self, host=None):\n    self.setup(FILENAME_SETTINGS_PRIVNET)\n    if isinstance(host, str):\n        if (':' in host):\n            raise Exception('No protocol prefix or port allowed in host, use just the IP or domain.')\n        print('Using custom privatenet host:', host)\n        self.SEED_LIST = [('%s:20333' % host)]\n        self.RPC_LIST = [('http:\n        print('- P2P:', ', '.join(self.SEED_LIST))\n        print('- RPC:', ', '.join(self.RPC_LIST))\n    self.check_privatenet()", "docstring": "Load settings from the privnet JSON config file\n\nArgs:\nhost (string, optional): if supplied, uses this IP or domain as neo nodes. The host must\nuse these standard ports: P2P 20333, RPC 30333.", "source": "codesearchnet"}
{"code": "def all_arguments(cls, function, arguments):\n    \n    if isinstance(arguments, dict):\n      arguments = Arguments(**arguments)\n    elif not isinstance(arguments, Arguments):\n      arguments = Arguments(*arguments)\n    return cls(function, arguments)", "docstring": "Helper function for creating `FunctionCall`s with `Arguments`.\n\nArgs:\nfunction: The value to store for the action function.\narguments: The values to store for the arguments of the action. Can either\nbe an `Arguments` object, a `dict`, or an iterable. If a `dict` or an\niterable is provided, the values will be unpacked into an `Arguments`\nobject.\n\nReturns:\nA new `FunctionCall` instance.", "source": "juraj-google-style"}
{"code": "def ne(self, other, axis=\"columns\", level=None):\n        \n        return self._binary_op(\"ne\", other, axis=axis, level=level)", "docstring": "Checks element-wise that this is not equal to other.\n\nArgs:\nother: A DataFrame or Series or scalar to compare to.\naxis: The axis to perform the ne over.\nlevel: The Multilevel index level to apply ne over.\n\nReturns:\nA new DataFrame filled with Booleans.", "source": "juraj-google-style"}
{"code": "def RegisterMountPoint(cls, mount_point, path_spec):\n    if (mount_point in cls._mount_points):\n        raise KeyError('Mount point: {0:s} already set.'.format(mount_point))\n    cls._mount_points[mount_point] = path_spec", "docstring": "Registers a path specification mount point.\n\nArgs:\nmount_point (str): mount point identifier.\npath_spec (PathSpec): path specification of the mount point.\n\nRaises:\nKeyError: if the corresponding mount point is already set.", "source": "codesearchnet"}
{"code": "def backup(filenames, prefix='error'):\n    num = max(([0] + [int(f.split('.')[1]) for f in glob('{}.*.tar.gz'.format(prefix))]))\n    filename = '{}.{}.tar.gz'.format(prefix, (num + 1))\n    logging.info('Backing up run to {}.'.format(filename))\n    with tarfile.open(filename, 'w:gz') as tar:\n        for fname in filenames:\n            for f in glob(fname):\n                tar.add(f)", "docstring": "Backup files to a tar.gz file. Used, for example, in backing up the\nfiles of an errored run before performing corrections.\n\nArgs:\nfilenames ([str]): List of files to backup. Supports wildcards, e.g.,\n*.*.\nprefix (str): prefix to the files. Defaults to error, which means a\nseries of error.1.tar.gz, error.2.tar.gz, ... will be generated.", "source": "codesearchnet"}
{"code": "def Sample(self, operation, description, data_size, compressed_data_size):\n    sample_time = time.time()\n    sample = '{0:f}\\t{1:s}\\t{2:s}\\t{3:d}\\t{4:d}\\n'.format(sample_time, operation, description, data_size, compressed_data_size)\n    self._WritesString(sample)", "docstring": "Takes a sample of data read or written for profiling.\n\nArgs:\noperation (str): operation, either 'read' or 'write'.\ndescription (str): description of the data read.\ndata_size (int): size of the data read in bytes.\ncompressed_data_size (int): size of the compressed data read in bytes.", "source": "codesearchnet"}
{"code": "def unkown_field(self, value=None):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type str '\n                                 'for field `unkown_field`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `unkown_field`')\n\n        self._unkown_field = value", "docstring": "Corresponds to IDD Field `unkown_field` Empty field in data.\n\nArgs:\nvalue (str): value for IDD Field `unkown_field`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def install_requirements(self, path, index=None):\n        \n        cmd = 'install -r {0}'.format(path)\n        if index:\n\n            cmd = 'install --index-url {0} -r {1}'.format(index, path)\n\n        self.pip(cmd)", "docstring": "Install packages from a requirements.txt file.\n\nArgs:\npath (str): The path to the requirements file.\nindex (str): The URL for a pypi index to use.", "source": "juraj-google-style"}
{"code": "def floodlight_monitor(config, task: dict) -> None:\n    if config.verbose:\n        print('FLOODLIGHT MONITOR')\n    if 'template' in task['sheet']:\n        sheets_tab_copy(config, task['auth'], task['sheet']['template']['sheet'], task['sheet']['template']['tab'], task['sheet']['sheet'], task['sheet']['tab'])\n    triggers = sheets_read(config, task['auth'], task['sheet']['sheet'], task['sheet']['tab'], task['sheet']['range'])\n    if config.verbose and len(triggers) == 0:\n        print('FLOODLIGHT MONITOR: No floodlight ids specified in sheet.')\n    alerts = {}\n    day = None\n    for trigger in triggers:\n        trigger.append(floodlight_report(config, task, trigger[TRIGGER_ID]))\n    for trigger in triggers:\n        rows = floodlight_rows(config, task, trigger[TRIGGER_REPORT])\n        last_day, rows = floodlight_analysis(config, task, rows)\n        if last_day:\n            day = last_day if day is None else max(day, last_day)\n            alerts.setdefault(trigger[TRIGGER_EMAIL], [])\n            alerts[trigger[TRIGGER_EMAIL]].extend(rows)\n    if alerts:\n        floodlight_email(config, task, day, alerts)", "docstring": "The task handler.  See module description.\n\nArgs:\nEveruthing is passed using task.\n\nReturns:\nNothing.", "source": "github-repos"}
{"code": "def _ReceiveItemOnActivity(self, zmq_socket):\n    events = zmq_socket.poll(self._ZMQ_SOCKET_RECEIVE_TIMEOUT_MILLISECONDS)\n    if events:\n        try:\n            received_object = self._zmq_socket.recv_pyobj()\n            return received_object\n        except zmq.error.Again:\n            logger.error('{0:s}. Failed to receive item in time.'.format(self.name))\n            raise\n        except zmq.error.ZMQError as exception:\n            if (exception.errno == errno.EINTR):\n                logger.error('ZMQ syscall interrupted in {0:s}. Queue aborting.'.format(self.name))\n            raise\n    raise errors.QueueEmpty", "docstring": "Attempts to receive an item from a ZeroMQ socket.\n\nArgs:\nzmq_socket (zmq.Socket): used to the receive the item.\n\nReturns:\nobject: item from the socket.\n\nRaises:\nQueueEmpty: if no item could be received within the timeout.\nzmq.error.ZMQError: if an error occurs in ZeroMQ", "source": "codesearchnet"}
{"code": "def load_image(buf, request_components=0):\n    \n    x = ffi.new('int*')\n    y = ffi.new('int*')\n    n = ffi.new('int*')\n\n    cbuf = ffi.from_buffer(buf)\n\n    bitmap = lib.stbi_load_from_memory(\n        ffi.cast('unsigned char*', cbuf), len(buf), x, y, n,\n        request_components\n    )\n\n    pybuffer = ffi.buffer(bitmap, x[0]*y[0]*n[0])\n\n    return pybuffer, x[0], y[0], n[0]", "docstring": "Load a png or jpeg image into a bitmap buffer.\n\nArgs:\nbuf (Buffer): Buffer to load\nrequest_components (int): If you want to force number of components\n\nReturns:\n\nA tuple containing:\n\n- Bitmap buffer\n- width of bitmap\n- height of bitmap\n- number of components", "source": "juraj-google-style"}
{"code": "def get_default_mesh() -> Optional[layout_lib.Mesh]:\n    if _dtensor_singleton is None:\n        return None\n    else:\n        return _dtensor_singleton._current_default_mesh", "docstring": "Return the default mesh under the current dtensor device context.\n\nIn the case that dtensor device system is not initialized, this function\nwill return None.\n\nReturns:\nThe current default mesh for the dtensor device context.", "source": "github-repos"}
{"code": "def poll(self, timeout=None):\n    if (not isinstance(timeout, (int, float, type(None)))):\n        raise TypeError('Invalid timeout type, should be integer, float, or None.')\n    p = select.epoll()\n    p.register(self._fd, ((select.EPOLLIN | select.EPOLLET) | select.EPOLLPRI))\n    for _ in range(2):\n        events = p.poll(timeout)\n    if events:\n        try:\n            os.lseek(self._fd, 0, os.SEEK_SET)\n        except OSError as e:\n            raise GPIOError(e.errno, ('Rewinding GPIO: ' + e.strerror))\n        return True\n    return False", "docstring": "Poll a GPIO for the edge event configured with the .edge property.\n\n`timeout` can be a positive number for a timeout in seconds, 0 for a\nnon-blocking poll, or negative or None for a blocking poll. Defaults to\nblocking poll.\n\nArgs:\ntimeout (int, float, None): timeout duration in seconds.\n\nReturns:\nbool: ``True`` if an edge event occurred, ``False`` on timeout.\n\nRaises:\nGPIOError: if an I/O or OS error occurs.\nTypeError: if `timeout` type is not None or int.", "source": "codesearchnet"}
{"code": "def default_float_type():\n    if not is_prefer_float32() and is_allow_float64():\n        return float64\n    else:\n        return float32", "docstring": "Gets the default float type.\n\nReturns:\nIf `is_prefer_float32()` is false and `is_allow_float64()` is true, returns\nfloat64; otherwise returns float32.", "source": "github-repos"}
{"code": "def apply(self, value: Any, allow_partial: bool=False, transform_fn: Optional[Callable[[utils.KeyPath, 'Field', Any], Any]]=None, root_path: Optional[utils.KeyPath]=None) -> Any:\n    value = self._value.apply(value, allow_partial=allow_partial, child_transform=transform_fn, root_path=root_path)\n    if transform_fn:\n        value = transform_fn(root_path, self, value)\n    return value", "docstring": "Apply current field to a value, which validate and complete the value.\n\nArgs:\nvalue: Value to validate against this spec.\nallow_partial: Whether partial value is allowed. This is for dict or\nnested dict values.\ntransform_fn: Function to transform applied value into final value.\nroot_path: Key path for root.\n\nReturns:\nfinal value.\nWhen allow_partial is set to False (default), only fully qualified value\nis acceptable. When allow_partial is set to True, missing fields will\nbe placeheld using MISSING_VALUE.\n\nRaises:\nKeyError: if additional key is found in value, or required key is missing\nand allow_partial is set to False.\nTypeError: if type of value is not the same as spec required.\nValueError: if value is not acceptable, or value is MISSING_VALUE while\nallow_partial is set to False.", "source": "github-repos"}
{"code": "def bin(self, bins, labels=None):\n        \n        return dim(self, bin, bins, labels=labels)", "docstring": "Bins continuous values.\n\nBins continuous using the provided bins and assigns labels\neither computed from each bins center point or from the\nsupplied labels.\n\nArgs:\nbins: List or array containing the bin boundaries\nlabels: List of labels to assign to each bin\nIf the bins are length N the labels should be length N-1", "source": "juraj-google-style"}
{"code": "def format(self, record):\n        \n        level = record.levelno\n\n        if level >= logging.CRITICAL:\n            color = self.CRITICAL\n        elif level >= logging.ERROR:\n            color = self.ERROR\n        elif level >= logging.WARNING:\n            color = self.WARNING\n        elif level >= logging.INFO:\n            color = self.INFO\n        elif level >= logging.DEBUG:\n            color = self.DEBUG\n        else:\n            color = self.DEFAULT\n\n        message = super().format(record)\n        if record.args:\n            try:\n                message = message % record.args\n            except TypeError:\n                \n                \n                pass\n\n        return color + message + self.DEFAULT", "docstring": "Adds colors to a log record and formats it with the default\n\nArgs:\nrecord (logging.LogRecord): log record to format\n\nReturns:\nstr: The colored and formatted record string", "source": "juraj-google-style"}
{"code": "def apply_filter(self, expr, value):\n        \n\n        if self.skip(value):\n            return expr\n\n        if not self._valid_value(value):\n            msg = \"Invalid value {value} passed to filter {name} - \".format(\n                value=repr(value),\n                name=self.name)\n\n            if self.default is not None:\n                warn(msg + \"defaulting to {}\".format(self.default))\n                value = self.default\n            else:\n                warn(msg + \"skipping\")\n                return expr\n\n        return self.func(expr, value)", "docstring": "Returns the given expression filtered by the given value.\n\nArgs:\nexpr (xpath.expression.AbstractExpression): The expression to filter.\nvalue (object): The desired value with which the expression should be filtered.\n\nReturns:\nxpath.expression.AbstractExpression: The filtered expression.", "source": "juraj-google-style"}
{"code": "def get_visual_content(self, id_or_uri):\n        \n        uri = self._client.build_uri(id_or_uri) + \"/visualContent\"\n        return self._client.get(uri)", "docstring": "Gets a list of visual content objects describing each rack within the data center. The response aggregates data\ncenter and rack data with a specified metric (peak24HourTemp) to provide simplified access to display data for\nthe data center.\n\nArgs:\nid_or_uri: Can be either the resource ID or the resource URI.\n\nReturn:\nlist: List of visual content objects.", "source": "juraj-google-style"}
{"code": "def convert_to_localized_md(model_list: str, localized_model_list: str, format_str: str) -> Tuple[bool, str]:\n\n    def _rep(match):\n        title, model_link, paper_affiliations, paper_title_link, paper_authors, supplements = match.groups()\n        return format_str.format(title=title, model_link=model_link, paper_affiliations=paper_affiliations, paper_title_link=paper_title_link, paper_authors=paper_authors, supplements=' ' + supplements.strip() if len(supplements) != 0 else '')\n    _re_capture_meta = re.compile('\\\\*\\\\*\\\\[([^\\\\]]*)\\\\]\\\\(([^\\\\)]*)\\\\)\\\\*\\\\* \\\\(from ([^)]*)\\\\)[^\\\\[]*([^\\\\)]*\\\\)).*?by (.*?[A-Za-z\\\\*]{2,}?)\\\\. (.*)$')\n    _re_capture_title_link = re.compile('\\\\*\\\\*\\\\[([^\\\\]]*)\\\\]\\\\(([^\\\\)]*)\\\\)\\\\*\\\\*')\n    _re_capture_paper_link = re.compile(' \\\\[([^\\\\]]*)\\\\]\\\\(([^\\\\)]*)\\\\)')\n    if len(localized_model_list) == 0:\n        localized_model_index = {}\n    else:\n        try:\n            localized_model_index = {re.search('\\\\*\\\\*\\\\[([^\\\\]]*)', line).groups()[0]: line for line in localized_model_list.strip().split('\\n')}\n        except AttributeError:\n            raise AttributeError('A model name in localized READMEs cannot be recognized.')\n    model_keys = [re.search('\\\\*\\\\*\\\\[([^\\\\]]*)', line).groups()[0] for line in model_list.strip().split('\\n')]\n    readmes_match = not any((k not in model_keys for k in localized_model_index))\n    localized_model_index = {k: v for k, v in localized_model_index.items() if k in model_keys}\n    for model in model_list.strip().split('\\n'):\n        title, model_link = _re_capture_title_link.search(model).groups()\n        if title not in localized_model_index:\n            readmes_match = False\n            localized_model_index[title] = _re_capture_meta.sub(_rep, model + ' ')\n        elif _re_fill_pattern.search(localized_model_index[title]) is not None:\n            update = _re_capture_meta.sub(_rep, model + ' ')\n            if update != localized_model_index[title]:\n                readmes_match = False\n                localized_model_index[title] = update\n        else:\n            converted_model = _re_capture_title_link.sub(f'**[{title}]({model_link})**', localized_model_index[title], count=1)\n            paper_title_link = _re_capture_paper_link.search(model)\n            if paper_title_link is not None:\n                paper_title, paper_link = paper_title_link.groups()\n                converted_model = _re_capture_paper_link.sub(f' [{paper_title}]({paper_link})', converted_model, count=1)\n            if converted_model != localized_model_index[title]:\n                readmes_match = False\n                localized_model_index[title] = converted_model\n    sorted_index = sorted(localized_model_index.items(), key=lambda x: x[0].lower())\n    return (readmes_match, '\\n'.join((x[1] for x in sorted_index)) + '\\n')", "docstring": "Compare the model list from the main README to the one in a localized README.\n\nArgs:\nmodel_list (`str`): The model list in the main README.\nlocalized_model_list (`str`): The model list in one of the localized README.\nformat_str (`str`):\nThe template for a model entry in the localized README (look at the `format_model_list` in the entries of\n`LOCALIZED_READMES` for examples).\n\nReturns:\n`Tuple[bool, str]`: A tuple where the first value indicates if the READMEs match or not, and the second value\nis the correct localized README.", "source": "github-repos"}
{"code": "def load_tensor_from_event(event):\n    tensor_proto = event.summary.value[0].tensor\n    shape = tensor_util.TensorShapeProtoToList(tensor_proto.tensor_shape)\n    num_elements = 1\n    for shape_dim in shape:\n        num_elements *= shape_dim\n    if tensor_proto.tensor_content or tensor_proto.string_val or (not num_elements):\n        if tensor_proto.dtype == types_pb2.DT_RESOURCE:\n            tensor_value = InconvertibleTensorProto(tensor_proto)\n        else:\n            try:\n                tensor_value = tensor_util.MakeNdarray(tensor_proto)\n            except KeyError:\n                tensor_value = InconvertibleTensorProto(tensor_proto)\n    else:\n        tensor_value = InconvertibleTensorProto(tensor_proto, False)\n    return tensor_value", "docstring": "Load a tensor from an Event proto.\n\nArgs:\nevent: The Event proto, assumed to hold a tensor value in its\nsummary.value[0] field.\n\nReturns:\nThe tensor value loaded from the event file, as a `numpy.ndarray`, if\nrepresentation of the tensor value by a `numpy.ndarray` is possible.\nFor uninitialized Tensors, returns `None`. For Tensors of data types that\ncannot be represented as `numpy.ndarray` (e.g., `tf.resource`), return\nthe `TensorProto` protobuf object without converting it to a\n`numpy.ndarray`.", "source": "github-repos"}
{"code": "def isostr_to_datetime(dt_str):\n    if (len(dt_str) <= 20):\n        return datetime.datetime.strptime(dt_str, '%Y-%m-%dT%H:%M:%SZ')\n    else:\n        dt_str = dt_str.split('.')\n        return isostr_to_datetime(('%sZ' % dt_str[0]))", "docstring": "Converts iso formated text string into a datetime object\n\nArgs:\ndt_str (str): ISO formated text string\nReturns:\n:obj:`datetime.datetime`", "source": "codesearchnet"}
{"code": "def _start_workflow_stages(pb: ProcessingBlock, pb_id: str, workflow_stage_dict: dict, workflow_stage: WorkflowStage, docker: DockerSwarmClient):\n    stage_data = workflow_stage_dict[workflow_stage.id]\n    stage_data['start'] = False\n    if (stage_data['status'] == 'none'):\n        if (not workflow_stage.dependencies):\n            stage_data['start'] = True\n        else:\n            dependency_status = []\n            for dependency in workflow_stage.dependencies:\n                dependency_status.append((workflow_stage_dict[dependency['value']]['status'] == 'complete'))\n            stage_data['start'] = all(dependency_status)\n    if stage_data['start']:\n        LOG.info('-- Starting workflow stage: %s --', workflow_stage.id)\n        LOG.info('Configuring EE templates.')\n        args_template = jinja2.Template(workflow_stage.args_template)\n        stage_params = pb.workflow_parameters[workflow_stage.id]\n        template_params = {**workflow_stage.config, **stage_params}\n        args = args_template.render(stage=template_params)\n        LOG.info('Resolving workflow script arguments.')\n        args = json.dumps(json.loads(args))\n        compose_template = jinja2.Template(workflow_stage.compose_template)\n        compose_str = compose_template.render(stage=dict(args=args))\n        compose_dict = yaml.load(compose_str)\n        service_names = compose_dict['services'].keys()\n        new_service_names = ['{}_{}_{}'.format(pb_id, pb.workflow_id, name) for name in service_names]\n        for (new, old) in zip(new_service_names, service_names):\n            compose_dict['services'][new] = compose_dict['services'].pop(old)\n        compose_str = yaml.dump(compose_dict)\n        service_ids = docker.create_services(compose_str)\n        LOG.info('Staring workflow containers:')\n        for service_id in service_ids:\n            service_name = docker.get_service_name(service_id)\n            LOG.info('  %s, %s ', service_name, service_id)\n            stage_data['services'][service_id] = {}\n            LOG.info('Created Services: %s', service_ids)\n            stage_data['services'][service_id] = dict(name=docker.get_service_name(service_id), status='running', complete=False)\n        stage_data['status'] = 'running'", "docstring": "Start a workflow stage by starting a number of docker services.\n\nThis function first assesses if the specified workflow stage can be\nstarted based on its dependencies. If this is found to be the case,\nthe workflow stage is stared by first resolving and template arguments\nin the workflow stage configuration, and then using the Docker Swarm Client\nAPI to start workflow stage services. As part of this, the\nworkflow_stage_dict data structure is updated accordingly.\n\nTODO(BMo) This function will need refactoring at some point as part\nof an update to the way workflow state metadata is stored in the\nconfiguration database. Currently the stage_data dictionary\nis a bit of a hack for a badly specified Configuration Database\nbacked WorkflowStage object.\n\nThis function is used by `execute_processing_block`.\n\nArgs:\npb (ProcessingBlock): Configuration database Processing Block data\nobject\npb_id (str): Processing Block identifier\nworkflow_stage_dict (dict): Workflow stage metadata structure\nworkflow_stage (WorkflowStage): Workflow state configuration database\ndata object.\ndocker (DockerClient): Docker Swarm Client object.", "source": "codesearchnet"}
{"code": "def ContainsIgnoreCase(self, value):\n    \n    self._awql = self._CreateSingleValueCondition(value, 'CONTAINS_IGNORE_CASE')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"contains ignore case\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "juraj-google-style"}
{"code": "def _GetDecryptedStreamSize(self):\n    self._file_object.seek(0, os.SEEK_SET)\n    self._decrypter = self._GetDecrypter()\n    self._decrypted_data = b''\n    encrypted_data_offset = 0\n    encrypted_data_size = self._file_object.get_size()\n    decrypted_stream_size = 0\n    while (encrypted_data_offset < encrypted_data_size):\n        read_count = self._ReadEncryptedData(self._ENCRYPTED_DATA_BUFFER_SIZE)\n        if (read_count == 0):\n            break\n        encrypted_data_offset += read_count\n        decrypted_stream_size += self._decrypted_data_size\n    return decrypted_stream_size", "docstring": "Retrieves the decrypted stream size.\n\nReturns:\nint: decrypted stream size.", "source": "codesearchnet"}
{"code": "def url(self, pattern, method=None, type_cast=None):\n    if (not type_cast):\n        type_cast = {}\n\n    def decorator(function):\n        self.add(pattern, function, method, type_cast)\n        return function\n    return decorator", "docstring": "Decorator for registering a path pattern.\n\nArgs:\npattern (str): Regex pattern to match a certain path\nmethod (str, optional): Usually used to define one of GET, POST,\nPUT, DELETE. You may use whatever fits your situation though.\nDefaults to None.\ntype_cast (dict, optional): Mapping between the param name and\none of `int`, `float` or `bool`. The value reflected by the\nprovided param name will than be casted to the given type.\nDefaults to None.", "source": "codesearchnet"}
{"code": "def get_dag(nodes, downstream_fn) -> Tuple[(Dict, Dict)]:\n    dag = {}\n    node_by_ids = {}\n    for node in nodes:\n        downstream_ops = downstream_fn(node)\n        dag[node.id] = set(downstream_ops)\n        node_by_ids[node.id] = node\n    return (dag, node_by_ids)", "docstring": "Return a dag representation of the nodes passed.\n\nThis is equally used for pipelines and pipeline runs.\n\nParams:\nnodes: an instance of `Operation` | `OperationRun` the nodes to represent en dag.\ndownstream_fn: a function that returns the downstream nodes of the a node.\n\nReturns:\ntuple: (dag, dict(node_id: node))", "source": "codesearchnet"}
{"code": "class PromptDepthAnythingFeatureFusionLayer(nn.Module):\n\n    def __init__(self, config: PromptDepthAnythingConfig):\n        super().__init__()\n        self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True)\n        self.residual_layer1 = PromptDepthAnythingPreActResidualLayer(config)\n        self.residual_layer2 = PromptDepthAnythingPreActResidualLayer(config)\n        self.prompt_depth_layer = PromptDepthAnythingLayer(config)\n\n    def forward(self, hidden_state, residual=None, size=None, prompt_depth=None):\n        if residual is not None:\n            if hidden_state.shape != residual.shape:\n                residual = nn.functional.interpolate(residual, size=hidden_state.shape[2:], mode='bilinear', align_corners=False)\n            hidden_state = hidden_state + self.residual_layer1(residual)\n        hidden_state = self.residual_layer2(hidden_state)\n        if prompt_depth is not None:\n            prompt_depth = nn.functional.interpolate(prompt_depth, size=hidden_state.shape[2:], mode='bilinear', align_corners=False)\n            res = self.prompt_depth_layer(prompt_depth)\n            hidden_state = hidden_state + res\n        modifier = {'scale_factor': 2} if size is None else {'size': size}\n        hidden_state = nn.functional.interpolate(hidden_state, **modifier, mode='bilinear', align_corners=True)\n        hidden_state = self.projection(hidden_state)\n        return hidden_state", "docstring": "Feature fusion layer, merges feature maps from different stages.\n\nArgs:\nconfig (`[PromptDepthAnythingConfig]`):\nModel configuration class defining the model architecture.", "source": "github-repos"}
{"code": "def register_filesystem_plugin(plugin_location):\n    if os.path.exists(plugin_location):\n        py_tf.TF_RegisterFilesystemPlugin(plugin_location)\n    else:\n        raise OSError(errno.ENOENT, 'The file to load file system plugin from does not exist.', plugin_location)", "docstring": "Loads a TensorFlow FileSystem plugin.\n\nArgs:\nplugin_location: Path to the plugin. Relative or absolute filesystem plugin\npath to a dynamic library file.\n\nReturns:\nNone\n\nRaises:\nOSError: When the file to be loaded is not found.\nRuntimeError: when unable to load the library.", "source": "github-repos"}
{"code": "def sticky_attribute_assignment(trackable, name, value):\n    if isinstance(value, NoDependency):\n        add_dependency = False\n    else:\n        add_dependency = True\n    value = wrap_or_unwrap(value)\n    if not add_dependency:\n        return value\n    if isinstance(value, base.Trackable):\n        trackable._track_trackable(value, name=name, overwrite=True)\n    return value", "docstring": "Adds dependencies, generally called from __setattr__.\n\nThis behavior is shared between Trackable and Model.\n\nRespects NoDependency indicators, but otherwise makes trackable objects\nout of common data structures and tracks objects by their attribute names.\n\nArgs:\ntrackable: The object to add dependencies to (generally the one having\nan attribute assigned).\nname: The attribute name being assigned.\nvalue: The value being assigned. Not necessarily a trackable object.\n\nReturns:\nThe value which should be stored in the attribute (unwrapped from a\nNoDependency object if necessary).", "source": "github-repos"}
{"code": "def get_role(self, name):\n    address = _create_role_address(name)\n    role_list_bytes = None\n    try:\n        role_list_bytes = self._state_view.get(address=address)\n    except KeyError:\n        return None\n    if (role_list_bytes is not None):\n        role_list = _create_from_bytes(role_list_bytes, identity_pb2.RoleList)\n        for role in role_list.roles:\n            if (role.name == name):\n                return role\n    return None", "docstring": "Get a single Role by name.\n\nArgs:\nname (str): The name of the Role.\n\nReturns:\n(:obj:`Role`): The Role that matches the name or None.", "source": "codesearchnet"}
{"code": "async def get_auth(request):\n    \n\n    auth_val = request.get(AUTH_KEY)\n    if auth_val:\n        return auth_val\n\n    auth_policy = request.get(POLICY_KEY)\n    if auth_policy is None:\n        raise RuntimeError('auth_middleware not installed')\n\n    request[AUTH_KEY] = await auth_policy.get(request)\n    return request[AUTH_KEY]", "docstring": "Returns the user_id associated with a particular request.\n\nArgs:\nrequest: aiohttp Request object.\n\nReturns:\nThe user_id associated with the request, or None if no user is\nassociated with the request.\n\nRaises:\nRuntimeError: Middleware is not installed", "source": "juraj-google-style"}
{"code": "def build_to_target_size_from_token_counts(cls, target_size, token_counts, min_val, max_val, num_iterations=4):\n    if (min_val > max_val):\n        raise ValueError('Lower bound for the minimum token count is greater than the upper bound.')\n\n    def bisect(min_val, max_val):\n        'Bisection to find the right size.'\n        present_count = ((max_val + min_val) \n        logger.info(('Trying min_count %d' % present_count))\n        subtokenizer = cls()\n        subtokenizer.build_from_token_counts(token_counts, present_count, num_iterations)\n        logger.info('min_count %d attained a %d vocab_size', present_count, subtokenizer.vocab_size)\n        if ((subtokenizer.vocab_size == target_size) or (min_val >= max_val)):\n            return subtokenizer\n        if (subtokenizer.vocab_size > target_size):\n            other_subtokenizer = bisect((present_count + 1), max_val)\n        else:\n            other_subtokenizer = bisect(min_val, (present_count - 1))\n        if (other_subtokenizer is None):\n            return subtokenizer\n        if (abs((other_subtokenizer.vocab_size - target_size)) < abs((subtokenizer.vocab_size - target_size))):\n            return other_subtokenizer\n        return subtokenizer\n    return bisect(min_val, max_val)", "docstring": "Builds a SubwordTextTokenizer that has `vocab_size` near `target_size`.\n\nUses simple recursive binary search to find a minimum token count that most\nclosely matches the `target_size`.\n\nArgs:\ntarget_size: Desired vocab_size to approximate.\ntoken_counts: A dictionary of token counts, mapping string to int.\nmin_val: An integer; lower bound for the minimum token count.\nmax_val: An integer; upper bound for the minimum token count.\nnum_iterations: An integer; how many iterations of refinement.\n\nReturns:\nA SubwordTextTokenizer instance.\n\nRaises:\nValueError: If `min_val` is greater than `max_val`.", "source": "codesearchnet"}
{"code": "def AttachUserList(client, ad_group_id, user_list_id):\n    ad_group_criterion_service = client.GetService('AdGroupCriterionService', 'v201809')\n    user_list = {'xsi_type': 'CriterionUserList', 'userListId': user_list_id}\n    ad_group_criterion = {'xsi_type': 'BiddableAdGroupCriterion', 'criterion': user_list, 'adGroupId': ad_group_id}\n    operations = [{'operator': 'ADD', 'operand': ad_group_criterion}]\n    return ad_group_criterion_service.mutate(operations)['value'][0]", "docstring": "Links the provided ad group and user list.\n\nArgs:\nclient: an AdWordsClient instance.\nad_group_id: an int ad group ID.\nuser_list_id: an int user list ID.\n\nReturns:\nThe ad group criterion that was successfully created.", "source": "codesearchnet"}
{"code": "def _parse_control_fields(self, fields, tag_id=\"tag\"):\n        \n        for field in fields:\n            params = field.params\n\n            \n            if tag_id not in params:\n                continue\n\n            self.controlfields[params[tag_id]] = field.getContent().strip()", "docstring": "Parse control fields.\n\nArgs:\nfields (list): list of HTMLElements\ntag_id (str):  parameter name, which holds the information, about\nfield name this is normally \"tag\", but in case of\noai_marc \"id\".", "source": "juraj-google-style"}
{"code": "def _render(self):\n    message = Message()\n    message.add(Heading(tr('Problem'), **ORANGE_LEVEL_4_STYLE))\n    message.add(Paragraph(tr('The following problem(s) were encountered whilst running the analysis.')))\n    items = BulletedList()\n    for p in reversed(self.problems):\n        items.add(p)\n    message.add(items)\n    message.add(Heading(tr('Suggestion'), **GREEN_LEVEL_4_STYLE))\n    message.add(Paragraph(tr('You can try the following to resolve the issue:')))\n    if (len(self.suggestions) < 1):\n        suggestions = self.standard_suggestions()\n        message.add(suggestions)\n    else:\n        items = BulletedList()\n        for s in reversed(self.suggestions):\n            if (s is not None):\n                items.add(s)\n        message.add(items)\n    if (len(self.details) > 0):\n        items = BulletedList()\n        message.add(Heading(tr('Details'), **ORANGE_LEVEL_5_STYLE))\n        message.add(Paragraph(tr('These additional details were reported when the problem occurred.')))\n        for d in self.details:\n            if (d is not None):\n                items.add(d)\n        message.add(items)\n    message.add(Heading(tr('Diagnostics'), **TRACEBACK_STYLE))\n    message.add(self.tracebacks)\n    return message", "docstring": "Create a Message version of this ErrorMessage\n\nArgs:\nnone\n\nReturns:\nthe Message instance of this ErrorMessage\n\nRaises:\nErrors are propagated", "source": "codesearchnet"}
{"code": "def _build_request(self, verb, verb_arguments):\n    method = getattr(self._component, verb)\n    method_args = {str(k): v for (k, v) in verb_arguments.items()}\n    return method(**method_args)", "docstring": "Builds HttpRequest object.\n\nArgs:\nverb (str): Request verb (ex. insert, update, delete).\nverb_arguments (dict): Arguments to be passed with the request.\n\nReturns:\nhttplib2.HttpRequest: HttpRequest to be sent to the API.", "source": "codesearchnet"}
{"code": "def _set_variable_or_list_initializer(variable_or_list, ckpt_file, tensor_name):\n    if isinstance(variable_or_list, (list, tuple)):\n        slice_name = None\n        for v in variable_or_list:\n            slice_info = v._save_slice_info\n            if slice_name is None:\n                slice_name = slice_info.full_name\n            elif slice_name != slice_info.full_name:\n                raise ValueError('Slices must all be from the same tensor: %s != %s' % (slice_name, slice_info.full_name))\n            _set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)\n    else:\n        _set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, '')", "docstring": "Overrides initialization op of given variable or list of variables.\n\nCalls `_set_checkpoint_initializer` for each variable in the given list of\nvariables.\n\nArgs:\nvariable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.\nckpt_file: string, full path of the checkpoint.\ntensor_name: Name of the tensor to load from the checkpoint.\n\nRaises:\nValueError: if all objects in `variable_or_list` are not partitions of the\nsame large variable.", "source": "github-repos"}
{"code": "def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple]]=None):\n    logits = outputs.logits\n    if target_sizes is not None:\n        if len(logits) != len(target_sizes):\n            raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n        if is_torch_tensor(target_sizes):\n            target_sizes = target_sizes.numpy()\n        semantic_segmentation = []\n        for idx in range(len(logits)):\n            resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)\n            semantic_map = resized_logits[0].argmax(dim=0)\n            semantic_segmentation.append(semantic_map)\n    else:\n        semantic_segmentation = logits.argmax(dim=1)\n        semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]\n    return semantic_segmentation", "docstring": "Converts the output of [`SegformerForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.\n\nArgs:\noutputs ([`SegformerForSemanticSegmentation`]):\nRaw outputs of the model.\ntarget_sizes (`List[Tuple]` of length `batch_size`, *optional*):\nList of tuples corresponding to the requested final size (height, width) of each prediction. If unset,\npredictions will not be resized.\n\nReturns:\nsemantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic\nsegmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is\nspecified). Each entry of each `torch.Tensor` correspond to a semantic class id.", "source": "github-repos"}
{"code": "def describe_all(self, full=False):\n        \n        for table in self.tabs:\n            yield self.tabs[table]().describe(full)", "docstring": "Prints description information about all tables registered\nArgs:\nfull (bool): Also prints description of post processors.", "source": "juraj-google-style"}
{"code": "def get_table_schema_from_string(schema):\n    table_schema = bigquery.TableSchema()\n    schema_list = [s.strip() for s in schema.split(',')]\n    for field_and_type in schema_list:\n        field_name, field_type = field_and_type.split(':')\n        field_schema = bigquery.TableFieldSchema()\n        field_schema.name = field_name\n        field_schema.type = field_type\n        field_schema.mode = 'NULLABLE'\n        table_schema.fields.append(field_schema)\n    return table_schema", "docstring": "Transform the string table schema into a\n:class:`~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema` instance.\n\nArgs:\nschema (str): The string schema to be used if the BigQuery table to write\nhas to be created.\n\nReturns:\n~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema:\nThe schema to be used if the BigQuery table to write has to be created\nbut in the :class:`~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema` format.", "source": "github-repos"}
{"code": "def rightClick(x=None, y=None, duration=0.0, tween=linear, pause=None, _pause=True):\n    _failSafeCheck()\n    click(x, y, 1, 0.0, 'right', _pause=False)\n    _autoPause(pause, _pause)", "docstring": "Performs a right mouse button click.\n\nThis is a wrapper function for click('right', x, y).\n\nThe x and y parameters detail where the mouse event happens. If None, the\ncurrent mouse position is used. If a float value, it is rounded down. If\noutside the boundaries of the screen, the event happens at edge of the\nscreen.\n\nArgs:\nx (int, float, None, tuple, optional): The x position on the screen where the\nclick happens. None by default. If tuple, this is used for x and y.\nIf x is a str, it's considered a filename of an image to find on\nthe screen with locateOnScreen() and click the center of.\ny (int, float, None, optional): The y position on the screen where the\nclick happens. None by default.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _test_connection(url):\n        \n        import pika\n        try:\n            with closing(pika.BlockingConnection(pika.URLParameters(url))) as conn:\n                conn.channel()\n        except pika.exceptions.ConnectionClosed as e:\n            raise ValidationError(e)", "docstring": "Attempt to connect to amqp\n\nArgs:\nurl: string in the form \"amqp://[user]:[password]@[host]\"", "source": "juraj-google-style"}
{"code": "def destroy_team(self):\n    request = self._get_request()\n    request.post(url=self.TEAM_DESTROY_URL, get_json=False)", "docstring": "Delete your Team\n\nDeletes your Team. Can only be invoked when you have a team with only one member left (yourself).\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def reviews(self, packageName, filterByDevice=False, sort=2, nb_results=None, offset=None):\n    path = (REVIEWS_URL + '?doc={}&sort={}'.format(requests.utils.quote(packageName), sort))\n    if (nb_results is not None):\n        path += '&n={}'.format(nb_results)\n    if (offset is not None):\n        path += '&o={}'.format(offset)\n    if filterByDevice:\n        path += '&dfil=1'\n    data = self.executeRequestApi2(path)\n    output = []\n    for review in data.payload.reviewResponse.getResponse.review:\n        output.append(utils.parseProtobufObj(review))\n    return output", "docstring": "Browse reviews for an application\n\nArgs:\npackageName (str): app unique ID.\nfilterByDevice (bool): filter results for current device\nsort (int): sorting criteria (values are unknown)\nnb_results (int): max number of reviews to return\noffset (int): return reviews starting from an offset value\n\nReturns:\ndict object containing all the protobuf data returned from\nthe api", "source": "codesearchnet"}
{"code": "def _GetISO8601String(self, structure):\n    \n    time_offset = structure.time_offset\n    month = timelib.MONTH_DICT.get(structure.month.lower(), 0)\n\n    try:\n      time_offset_hours = int(time_offset[1:3], 10)\n      time_offset_minutes = int(time_offset[3:5], 10)\n    except (IndexError, TypeError, ValueError) as exception:\n      raise ValueError(\n          'unable to parse time zone offset with error: {0!s}.'.format(\n              exception))\n\n    try:\n      date_time_string = (\n          '{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}.000000'\n          '{6:s}{7:02d}:{8:02d}').format(\n              structure.year, month, structure.day, structure.hours,\n              structure.minutes, structure.seconds, time_offset[0],\n              time_offset_hours, time_offset_minutes)\n    except ValueError as exception:\n      raise ValueError(\n          'unable to format date time string with error: {0!s}.'.format(\n              exception))\n\n    return date_time_string", "docstring": "Normalize date time parsed format to an ISO 8601 date time string.\nThe date and time values in Apache access log files are formatted as:\n\"[18/Sep/2011:19:18:28 -0400]\".\n\nArgs:\nstructure (pyparsing.ParseResults): structure of tokens derived from a\nline of a text file.\n\nReturns:\nstr: ISO 8601 date time string.\n\nRaises:\nValueError: if the structure cannot be converted into a date time string.", "source": "juraj-google-style"}
{"code": "def multiply(self, other):\n        \n        if not isinstance(other, Number):\n            raise QiskitError(\"other is not a number\")\n        \n        \n        \n        if isinstance(other, complex) or other < 1:\n            \n            return Stinespring(Choi(self).multiply(other))\n        \n        \n        num = np.sqrt(other)\n        stine_l, stine_r = self._data\n        stine_l = num * self._data[0]\n        stine_r = None\n        if self._data[1] is not None:\n            stine_r = num * self._data[1]\n        return Stinespring((stine_l, stine_r), self.input_dims(),\n                           self.output_dims())", "docstring": "Return the QuantumChannel self + other.\n\nArgs:\nother (complex): a complex number.\n\nReturns:\nStinespring: the scalar multiplication other * self as a\nStinespring object.\n\nRaises:\nQiskitError: if other is not a valid scalar.", "source": "juraj-google-style"}
{"code": "def get_num_samples(repr_ds: RepresentativeDataset) -> Optional[int]:\n    if isinstance(repr_ds, Sized):\n        try:\n            return len(repr_ds)\n        except Exception as ex:\n            logging.info('Cannot determine the size of the dataset (%s).', ex)\n            return None\n    else:\n        return None", "docstring": "Returns the number of samples if known.\n\nArgs:\nrepr_ds: Representative dataset.\n\nReturns:\nReturns the total number of samples in `repr_ds` if it can be determined\nwithout iterating the entier dataset. Returns None iff otherwise. When it\nreturns None it does not mean the representative dataset is infinite or it\nis malformed; it simply means the size cannot be determined without\niterating the whole dataset.", "source": "github-repos"}
{"code": "def get_name_or_instance_id(self, with_id=False):\n        \n        name = self.get_tag('Name', case_sensitive=False)\n        if name and len(name.value.strip()) > 0:\n            return '{0} ({1})'.format(name.value, self.id) if with_id else name.value\n\n        return self.id", "docstring": "Returns the name of an instance if existant, else return the instance id\n\nArgs:\nwith_id (bool): Include the instance ID even if the name is found (default: False)\n\nReturns:\nName and/or instance ID of the instance object", "source": "juraj-google-style"}
{"code": "def get_diff(value1, value2, name1, name2):\n    \n    lines1 = [line + \"\\n\" for line in value1.splitlines()]\n    lines2 = [line + \"\\n\" for line in value2.splitlines()]\n    diff_lines = difflib.context_diff(\n        lines1, lines2, fromfile=name1, tofile=name2\n    )\n    return \"\".join(diff_lines)", "docstring": "Get a diff between two strings.\n\nArgs:\nvalue1 (str): First string to be compared.\nvalue2 (str): Second string to be compared.\nname1 (str): Name of the first string.\nname2 (str): Name of the second string.\n\nReturns:\nstr: The full diff.", "source": "juraj-google-style"}
{"code": "def addResource(self, pid):\n    self._check_initialized()\n    try:\n        self.getObjectByPid(pid)\n        return\n    except IndexError:\n        pass\n    oid = self._pid_to_id(pid)\n    obj = rdflib.URIRef(oid)\n    ag = self.getAggregation()\n    self.add((ag, ORE.aggregates, obj))\n    self.add((obj, ORE.isAggregatedBy, ag))\n    self.add((obj, DCTERMS.identifier, rdflib.term.Literal(pid)))", "docstring": "Add a resource to the Resource Map.\n\nArgs:\npid : str", "source": "codesearchnet"}
{"code": "def _MakeSavedModelV2(self, run_params):\n    saved_model_dir = trt_test.TfTrtIntegrationTestBase._MakeSavedModelV2(self, run_params)\n    saved_model_proto = loader_impl.parse_saved_model(saved_model_dir)\n    new_saved_model = saved_model_pb2.SavedModel()\n    new_saved_model.CopyFrom(saved_model_proto)\n    new_meta_graph_def = new_saved_model.meta_graphs[0]\n    prefix_len = len('__inference_')\n    for func_def in new_meta_graph_def.graph_def.library.function:\n        logging.info('_MakeSavedModelV2, func_def name: %s', func_def.signature.name)\n        func_name_without_prefix = func_def.signature.name[prefix_len:]\n        if func_name_without_prefix.startswith('_conv_and_pool_0'):\n            func_def.attr['_noinline'].CopyFrom(attr_value_pb2.AttrValue(b=True))\n            self._copy_test_attributes_to_func_def(func_def)\n    old_saved_model_file = os.path.join(saved_model_dir, constants.SAVED_MODEL_FILENAME_PB)\n    if os.path.exists(old_saved_model_file):\n        os.remove(old_saved_model_file)\n    path = os.path.join(compat.as_bytes(saved_model_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))\n    file_io.write_string_to_file(path, new_saved_model.SerializeToString(deterministic=True))\n    return saved_model_dir", "docstring": "Write the saved model as an input for testing.\n\nIn addition to creating a SavedModel like its parent method, this method\nreplaces this SavedModel by adding TF-TRT conversion parameters as function\nattributes to each function in the SavedModel.\n\nArgs:\nrun_params: The current test run parameters.\n\nReturns:\nThe directory of the saved model.", "source": "github-repos"}
{"code": "def _instance_transform(fqdn, o, *args, **kwargs):\n    return _package_transform(o, fqdn, *args, start=0, **kwargs)", "docstring": "Applies an instance method with name `fqdn` to `o`.\n\nArgs:\nfqdn (str): fully-qualified domain name of the object.\no: object to apply instance method to.", "source": "codesearchnet"}
{"code": "def DeregisterCredentials(cls, credentials):\n    if (credentials.type_indicator not in cls._credentials):\n        raise KeyError('Credential object not set for type indicator: {0:s}.'.format(credentials.type_indicator))\n    del cls._credentials[credentials.type_indicator]", "docstring": "Deregisters a path specification credentials.\n\nArgs:\ncredentials (Credentials): credentials.\n\nRaises:\nKeyError: if credential object is not set for the corresponding\ntype indicator.", "source": "codesearchnet"}
{"code": "def alltoall_pointtwise(xs, devices, split_axis, concat_axis):\n  \n  n = len(xs)\n  if n == 1:\n    return xs\n  \n  parts = mtf.transpose_list_of_lists(\n      mtf.parallel(devices, tf.split, xs, [n] * n, axis=[split_axis] * n))\n  return mtf.parallel(devices, tf.concat, parts, axis=[concat_axis] * n)", "docstring": "MPI alltoall operation.\n\nImplementation of alltoall using pointwise communication.\n\nArgs:\nxs: a list of n tf.Tensors\ndevices: a list of n strings\nsplit_axis: an integer\nconcat_axis: an integer\n\nReturns:\na list of n Tensors", "source": "juraj-google-style"}
{"code": "def import_to_tensorboard(model_dir, log_dir, tag_set):\n    with session.Session(graph=ops.Graph()) as sess:\n        input_graph_def = saved_model_utils.get_meta_graph_def(model_dir, tag_set).graph_def\n        importer.import_graph_def(input_graph_def)\n        pb_visual_writer = summary.FileWriter(log_dir)\n        pb_visual_writer.add_graph(sess.graph)\n        print('Model Imported. Visualize by running: tensorboard --logdir={}'.format(log_dir))", "docstring": "View an SavedModel as a graph in Tensorboard.\n\nArgs:\nmodel_dir: The directory containing the SavedModel to import.\nlog_dir: The location for the Tensorboard log to begin visualization from.\ntag_set: Group of tag(s) of the MetaGraphDef to load, in string format,\nseparated by ','. For tag-set contains multiple tags, all tags must be\npassed in.\nUsage: Call this function with your SavedModel location and desired log\ndirectory. Launch Tensorboard by pointing it to the log directory. View your\nimported SavedModel as a graph.", "source": "github-repos"}
{"code": "def infer_location(self, location_query, max_distance, google_key, foursquare_client_id, foursquare_client_secret, limit):\n    self.segments = [segment.infer_location(location_query, max_distance, google_key, foursquare_client_id, foursquare_client_secret, limit) for segment in self.segments]\n    return self", "docstring": "In-place location inferring of segments\n\nReturns:\nThis track", "source": "codesearchnet"}
{"code": "def check_required_fields(self, ignore_fields=list(), allow_no_resources=False):\n        \n        \n        if self.is_requestable():\n            self._check_required_fields('dataset-requestable', ignore_fields)\n        else:\n            self._check_required_fields('dataset', ignore_fields)\n            if len(self.resources) == 0 and not allow_no_resources:\n                raise HDXError('There are no resources! Please add at least one resource!')\n            for resource in self.resources:\n                ignore_fields = ['package_id']\n                resource.check_required_fields(ignore_fields=ignore_fields)", "docstring": "Check that metadata for dataset and its resources is complete. The parameter ignore_fields\nshould be set if required to any fields that should be ignored for the particular operation.\n\nArgs:\nignore_fields (List[str]): Fields to ignore. Default is [].\nallow_no_resources (bool): Whether to allow no resources. Defaults to False.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def read_kw_file():\n    self_path = os.path.dirname(__file__)\n    kw_list_path = join(self_path, '../templates/keyword_list.json.bz2')\n    with bz2.BZ2File(kw_list_path) as f:\n        kw_list = f.read()\n    return json.loads(kw_list)", "docstring": "Read content of the file containing keyword informations in JSON. File is\npacked using BZIP.\n\nReturns:\nlist: List of dictionaries containing keywords.", "source": "codesearchnet"}
{"code": "def next_sample(uid):\n    return next(_SHARED_SEQUENCES[uid])", "docstring": "Gets the next value from the generator `uid`.\n\nTo allow multiple generators to be used at the same time, we use `uid` to\nget a specific one. A single generator would cause the validation to\noverwrite the training generator.\n\nArgs:\nuid: int, generator identifier\n\nReturns:\nThe next value of generator `uid`.", "source": "github-repos"}
{"code": "def plot(self, ax=None, legend=None, return_fig=False, **kwargs):\n        \n        if ax is None:\n            fig = plt.figure(figsize=(2, 10))\n            ax = fig.add_subplot(111)\n            return_ax = False\n        else:\n            return_ax = True\n\n        d = None\n        if legend is not None:\n            try:\n                d = legend.get_decor(self)\n            except:\n                pass\n\n        if d is not None:\n            kwargs['color'] = d.colour\n            kwargs['lw'] = getattr(d, 'lineweight', None) or getattr(d, 'lw', 1)\n            kwargs['ls'] = getattr(d, 'linestyle', None) or getattr(d, 'ls', '-')\n\n            \n            axkwargs = {}\n\n            xlim = getattr(d, 'xlim', None)\n            if xlim is not None:\n                axkwargs['xlim'] = list(map(float, xlim.split(',')))\n\n            xticks = getattr(d, 'xticks', None)\n            if xticks is not None:\n                axkwargs['xticks'] = list(map(float, xticks.split(',')))\n\n            xscale = getattr(d, 'xscale', None)\n            if xscale is not None:\n                axkwargs['xscale'] = xscale\n\n            ax.set(**axkwargs)\n\n        ax.plot(self, self.basis, **kwargs)\n        ax.set_title(self.mnemonic)  \n        ax.set_xlabel(self.units)\n\n        if False:  \n            ax.xaxis.tick_top()\n\n        if True:  \n            labels = ax.get_xticklabels()\n            for label in labels:\n                label.set_rotation(90)\n\n        ax.set_ylim([self.stop, self.start])\n        ax.grid('on', color='k', alpha=0.33, lw=0.33, linestyle='-')\n\n        if return_ax:\n            return ax\n        elif return_fig:\n            return fig\n        else:\n            return None", "docstring": "Plot a curve.\n\nArgs:\nax (ax): A matplotlib axis.\nlegend (striplog.legend): A legend. Optional.\nreturn_fig (bool): whether to return the matplotlib figure.\nDefault False.\nkwargs: Arguments for ``ax.set()``\n\nReturns:\nax. If you passed in an ax, otherwise None.", "source": "juraj-google-style"}
{"code": "def parse_history_node(h_node):\n        \n        if isinstance(h_node, dict):\n            return HistoryNode.from_dict(h_node)\n\n        else:\n            if len(h_node) != 3:\n                raise ValueError(\"Invalid History node, \"\n                                 \"should be dict or (name, version, \"\n                                 \"description) tuple: {}\".format(h_node))\n            return HistoryNode(h_node[0], h_node[1], h_node[2])", "docstring": "Parses a History Node object from either a dict or a tuple.\n\nArgs:\nh_node: A dict with name/url/description fields or a 3-element\ntuple.\n\nReturns:\nHistory node.", "source": "juraj-google-style"}
{"code": "def lookup(self, name):\n    name = compat.as_str(name)\n    if name in self._registry:\n        return self._registry[name][_TYPE_TAG]\n    else:\n        raise LookupError('%s registry has no entry for: %s' % (self._name, name))", "docstring": "Looks up \"name\".\n\nArgs:\nname: a string specifying the registry key for the candidate.\nReturns:\nRegistered object if found\nRaises:\nLookupError: if \"name\" has not been registered.", "source": "github-repos"}
{"code": "def _options_form_default(self):\n    if (not self.profile_list):\n        return ''\n    if callable(self.profile_list):\n        return self._render_options_form_dynamically\n    else:\n        return self._render_options_form(self.profile_list)", "docstring": "Build the form template according to the `profile_list` setting.\n\nReturns:\n'' when no `profile_list` has been defined\nThe rendered template (using jinja2) when `profile_list` is defined.", "source": "codesearchnet"}
{"code": "def describe(self, percentiles=None, include=None, exclude=None):\n        \n        if include is not None and (isinstance(include, np.dtype) or include != \"all\"):\n            if not is_list_like(include):\n                include = [include]\n            include = [\n                np.dtype(i)\n                if not (isinstance(i, type) and i.__module__ == \"numpy\")\n                else i\n                for i in include\n            ]\n            if not any(\n                (isinstance(inc, np.dtype) and inc == d)\n                or (\n                    not isinstance(inc, np.dtype)\n                    and inc.__subclasscheck__(getattr(np, d.__str__()))\n                )\n                for d in self._get_dtypes()\n                for inc in include\n            ):\n                \n                raise ValueError(\"No objects to concatenate\")\n        if exclude is not None:\n            if not is_list_like(exclude):\n                exclude = [exclude]\n            exclude = [np.dtype(e) for e in exclude]\n            if all(\n                (isinstance(exc, np.dtype) and exc == d)\n                or (\n                    not isinstance(exc, np.dtype)\n                    and exc.__subclasscheck__(getattr(np, d.__str__()))\n                )\n                for d in self._get_dtypes()\n                for exc in exclude\n            ):\n                \n                raise ValueError(\"No objects to concatenate\")\n        if percentiles is not None:\n            pandas.DataFrame()._check_percentile(percentiles)\n        return self.__constructor__(\n            query_compiler=self._query_compiler.describe(\n                percentiles=percentiles, include=include, exclude=exclude\n            )\n        )", "docstring": "Generates descriptive statistics that summarize the central tendency,\ndispersion and shape of a dataset's distribution, excluding NaN values.\n\nArgs:\npercentiles (list-like of numbers, optional):\nThe percentiles to include in the output.\ninclude: White-list of data types to include in results\nexclude: Black-list of data types to exclude in results\n\nReturns: Series/DataFrame of summary statistics", "source": "juraj-google-style"}
{"code": "def default_bucket(self):\n    if self._default_bucket:\n        return self._default_bucket\n    account = self.boto_session.client('sts').get_caller_identity()['Account']\n    region = self.boto_session.region_name\n    default_bucket = 'sagemaker-{}-{}'.format(region, account)\n    s3 = self.boto_session.resource('s3')\n    try:\n        if (region == 'us-east-1'):\n            s3.create_bucket(Bucket=default_bucket)\n        else:\n            s3.create_bucket(Bucket=default_bucket, CreateBucketConfiguration={'LocationConstraint': region})\n        LOGGER.info('Created S3 bucket: {}'.format(default_bucket))\n    except ClientError as e:\n        error_code = e.response['Error']['Code']\n        message = e.response['Error']['Message']\n        if (error_code == 'BucketAlreadyOwnedByYou'):\n            pass\n        elif ((error_code == 'OperationAborted') and ('conflicting conditional operation' in message)):\n            pass\n        elif (error_code == 'TooManyBuckets'):\n            s3.meta.client.head_bucket(Bucket=default_bucket)\n        else:\n            raise\n    self._default_bucket = default_bucket\n    return self._default_bucket", "docstring": "Return the name of the default bucket to use in relevant Amazon SageMaker interactions.\n\nReturns:\nstr: The name of the default bucket, which is of the form: ``sagemaker-{region}-{AWS account ID}``.", "source": "codesearchnet"}
{"code": "def CheckDirectory(self, path, extension='yaml'):\n    \n    result = True\n\n    if extension:\n      glob_spec = os.path.join(path, '*.{0:s}'.format(extension))\n    else:\n      glob_spec = os.path.join(path, '*')\n\n    for definition_file in sorted(glob.glob(glob_spec)):\n      if not self.CheckFile(definition_file):\n        result = False\n\n    return result", "docstring": "Validates definition files in a directory.\n\nArgs:\npath (str): path of the definition file.\nextension (Optional[str]): extension of the filenames to read.\n\nReturns:\nbool: True if the directory contains valid definitions.", "source": "juraj-google-style"}
{"code": "def get_rows(self, reportId: int=None, timeout: int=60 * 3) -> typing.Iterator[dict]:\n    if reportId is None:\n        reportId = self.reportId\n    while timeout > 0:\n        report = API_SearchAds(self.config, self.auth).reports().get(reportId=reportId).execute()\n        if report['isReportReady']:\n            for fragment in range(len(report['files'])):\n                rows = csv_to_rows(API_SearchAds(self.config, self.auth).reports().getFile(reportId=reportId, reportFragment=fragment).execute())\n                if fragment > 0:\n                    next(rows)\n                yield from rows\n            break\n        else:\n            if self.config.verbose:\n                print('.', end='')\n            sleep(60)\n            timeout -= 1", "docstring": "Return each row of data from a report as a generator.\n\nWait up to 3 hours with 1 minute poll intervals for report to finish.\nHandle fragmented downloads.\n\nArgs:\nreportId - optional,  if not given uses prior value from request(...) call.\ntimeout - optional, number of minutes to wait for report to complete.\n\nReturns:\nGenerator with lists of column values.", "source": "github-repos"}
{"code": "def user(self, user: str, token: Optional[str] = None) -> \"IntentAPI\":\n        \n        if not self.bot:\n            return self.client.intent(user, token)\n        else:\n            self.log.warning(\"Called IntentAPI\n            return self.bot.client.intent(user, token)", "docstring": "Get the intent API for a specific user. This is just a proxy to :func:`~HTTPAPI.intent`.\n\nYou should only call this method for the bot user. Calling it with child intent APIs will\nresult in a warning log.\n\nArgs:\nuser: The Matrix ID of the user whose intent API to get.\ntoken: The access token to use for the Matrix ID.\n\nReturns:\nThe IntentAPI for the given user.", "source": "juraj-google-style"}
{"code": "def EqualTo(self, value):\n    \n    self._awql = self._CreateSingleValueCondition(value, '=')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"equal to\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "juraj-google-style"}
{"code": "class MusicgenUnconditionalInput(ModelOutput):\n    encoder_outputs: Tuple[torch.FloatTensor] = None\n    attention_mask: Optional[torch.LongTensor] = None\n    guidance_scale: Optional[float] = None", "docstring": "Args:\nencoder_outputs  (`Tuple[torch.FloatTensor]` of length 1, with tensor shape `(batch_size, sequence_length, hidden_size)`):\nSequence of hidden-states at the output of the last layer of the text encoder model.\nattention_mask (`torch.LongTensor`)  of shape `(batch_size, sequence_length)`, *optional*):\nEncoder attention mask to avoid performing attention on padding token indices. Mask values selected in `[0,\n1]`: 1 for tokens that are **not masked**, 0 for tokens that are **masked**.\nguidance_scale (`float`, *optional*):\nGuidance scale for classifier free guidance, setting the balance between the conditional logits (predicted\nfrom the prompts) and the unconditional logits (predicted without prompts).", "source": "github-repos"}
{"code": "def container(container_name) -> ContextManager[str]:\n    return get_default_graph().container(container_name)", "docstring": "Wrapper for `Graph.container()` using the default graph.\n\nArgs:\ncontainer_name: The container string to use in the context.\n\nReturns:\nA context manager that specifies the default container to use for newly\ncreated stateful ops.", "source": "github-repos"}
{"code": "def run(self, job_name, handler_spec, input_reader_spec, output_writer_spec=None, params=None, shards=None, base_path=None):\n    if (shards is None):\n        shards = parameters.config.SHARD_COUNT\n    if (base_path is None):\n        base_path = parameters.config.BASE_PATH\n    mapreduce_id = control.start_map(job_name, handler_spec, input_reader_spec, (params or {}), mapreduce_parameters={'done_callback': self.get_callback_url(), 'done_callback_method': 'GET', 'pipeline_id': self.pipeline_id, 'base_path': base_path}, shard_count=shards, output_writer_spec=output_writer_spec, queue_name=self.queue_name)\n    self.fill(self.outputs.job_id, mapreduce_id)\n    self.set_status(console_url=('%s/detail?mapreduce_id=%s' % (base_path, mapreduce_id)))", "docstring": "Start a mapreduce job.\n\nArgs:\njob_name: mapreduce name. Only for display purpose.\nhandler_spec: fully qualified name to your map function/class.\ninput_reader_spec: fully qualified name to input reader class.\noutput_writer_spec: fully qualified name to output writer class.\nparams: a dictionary of parameters for input reader and output writer\ninitialization.\nshards: number of shards. This provides a guide to mapreduce. The real\nnumber of shards is determined by how input are splited.", "source": "codesearchnet"}
{"code": "def _GetUrl(self, url, cache, database):\n    \n    if not url:\n      return ''\n\n    url_cache_results = cache.GetResults('url')\n    if not url_cache_results:\n      result_set = database.Query(self._URL_CACHE_QUERY)\n\n      cache.CacheQueryResults(result_set, 'url', 'id', ('url', 'title'))\n      url_cache_results = cache.GetResults('url')\n\n    reference_url, reference_title = url_cache_results.get(url, ['', ''])\n\n    if not reference_url:\n      return ''\n\n    return '{0:s} ({1:s})'.format(reference_url, reference_title)", "docstring": "Retrieves an URL from a reference to an entry in the from_visit table.\n\nArgs:\nurl (str): URL.\ncache (SQLiteCache): cache.\ndatabase (SQLiteDatabase): database.\n\nReturns:\nstr: URL or an empty string if no URL was found.", "source": "juraj-google-style"}
{"code": "def set_boolean(self, option, value):\n    if (not isinstance(value, bool)):\n        raise TypeError(('%s must be a boolean' % option))\n    self.options[option] = str(value).lower()", "docstring": "Set a boolean option.\n\nArgs:\noption (str): name of option.\nvalue (bool): value of the option.\n\nRaises:\nTypeError: Value must be a boolean.", "source": "codesearchnet"}
{"code": "def _get_modules(package, attr_name, constants_attr_name):\n    modules = set()\n    for module in list(sys.modules.values()):\n        if not module or not hasattr(module, '__name__') or package not in module.__name__:\n            continue\n        for module_contents_name in dir(module):\n            attr = getattr(module, module_contents_name)\n            _, attr = tf_decorator.unwrap(attr)\n            if module_contents_name == constants_attr_name:\n                for exports, _ in attr:\n                    modules.update([_get_module_from_symbol(export) for export in exports])\n                continue\n            if hasattr(attr, '__dict__') and attr_name in attr.__dict__:\n                modules.update([_get_module_from_symbol(export) for export in getattr(attr, attr_name)])\n    return modules", "docstring": "Get list of TF API modules.\n\nArgs:\npackage: We only look at modules that contain package in the name.\nattr_name: Attribute set on TF symbols that contains API names.\nconstants_attr_name: Attribute set on TF modules that contains\nAPI constant names.\n\nReturns:\nSet of TensorFlow API modules.", "source": "github-repos"}
{"code": "def add_exit_node(self, ast_node, section_id, guards):\n    node = self._add_jump_node(ast_node, guards)\n    self.exits[section_id].add(node)\n    return node", "docstring": "Grows the graph by adding an exit node.\n\nThis node becomes an exit for the current section.\n\nArgs:\nast_node: ast.AST\nsection_id: Hashable, the node for which ast_node should be considered to\nbe an exit node\nguards: Tuple[ast.AST, ...], the finally sections that guard ast_node\n\nReturns:\nNode", "source": "github-repos"}
{"code": "def StreamFile(self, filedesc, offset=0, amount=None):\n    \n    reader = FileReader(filedesc, offset=offset)\n    return self.Stream(reader, amount=amount)", "docstring": "Streams chunks of a given file starting at given offset.\n\nArgs:\nfiledesc: A `file` object to stream.\noffset: An integer offset at which the file stream should start on.\namount: An upper bound on number of bytes to read.\n\nReturns:\nGenerator over `Chunk` instances.", "source": "juraj-google-style"}
{"code": "def make_tarfile(self, name=None, max_filesize=None, exclude_exts=None, exclude_dirs=None, verbose=0, **kwargs):\n\n    def any2bytes(s):\n        'Convert string or number to memory in bytes.'\n        if is_string(s):\n            return int(Memory.from_string(s).to('b'))\n        else:\n            return int(s)\n    if (max_filesize is not None):\n        max_filesize = any2bytes(max_filesize)\n    if exclude_exts:\n        exts = []\n        for e in list_strings(exclude_exts):\n            exts.append(e)\n            if e.endswith('.nc'):\n                exts.append(e.replace('.nc', ''))\n            else:\n                exts.append((e + '.nc'))\n        exclude_exts = exts\n\n    def filter(tarinfo):\n        '\\n            Function that takes a TarInfo object argument and returns the changed TarInfo object.\\n            If it instead returns None the TarInfo object will be excluded from the archive.\\n            '\n        if (tarinfo.issym() or tarinfo.islnk()):\n            if verbose:\n                print(('Excluding link: %s' % tarinfo.name))\n            return None\n        if ((max_filesize is not None) and (tarinfo.size > max_filesize)):\n            if verbose:\n                print(('Excluding %s due to max_filesize' % tarinfo.name))\n            return None\n        if (exclude_exts and any((tarinfo.name.endswith(ext) for ext in exclude_exts))):\n            if verbose:\n                print(('Excluding %s due to extension' % tarinfo.name))\n            return None\n        if (exclude_dirs and any(((dir_name in exclude_dirs) for dir_name in tarinfo.name.split(os.path.sep)))):\n            if verbose:\n                print(('Excluding %s due to exclude_dirs' % tarinfo.name))\n            return None\n        return tarinfo\n    back = os.getcwd()\n    os.chdir(os.path.join(self.workdir, '..'))\n    import tarfile\n    name = ((os.path.basename(self.workdir) + '.tar.gz') if (name is None) else name)\n    with tarfile.open(name=name, mode='w:gz', **kwargs) as tar:\n        tar.add(os.path.basename(self.workdir), arcname=None, recursive=True, exclude=None, filter=filter)\n        if ((self.pyfile is not None) and os.path.exists(self.pyfile)):\n            tar.add(self.pyfile)\n    os.chdir(back)\n    return name", "docstring": "Create a tarball file.\n\nArgs:\nname: Name of the tarball file. Set to os.path.basename(`flow.workdir`) + \"tar.gz\"` if name is None.\nmax_filesize (int or string with unit): a file is included in the tar file if its size <= max_filesize\nCan be specified in bytes e.g. `max_files=1024` or with a string with unit e.g. `max_filesize=\"1 Mb\"`.\nNo check is done if max_filesize is None.\nexclude_exts: List of file extensions to be excluded from the tar file.\nexclude_dirs: List of directory basenames to be excluded.\nverbose (int): Verbosity level.\nkwargs: keyword arguments passed to the :class:`TarFile` constructor.\n\nReturns:\nThe name of the tarfile.", "source": "codesearchnet"}
{"code": "def filler(self):\n    if (not self.filled):\n        raise SlotNotFilledError(('Slot with name \"%s\", key \"%s\" not yet filled.' % (self.name, self.key)))\n    return self._filler_pipeline_key.name()", "docstring": "Returns the pipeline ID that filled this slot's value.\n\nReturns:\nA string that is the pipeline ID.\n\nRaises:\nSlotNotFilledError if the value hasn't been filled yet.", "source": "codesearchnet"}
{"code": "def should_include_file_in_search(file_name, extensions, exclude_dirs):\n    \n    return (exclude_dirs is None or not any(file_name.startswith(d) for d in exclude_dirs)) and \\\n        any(file_name.endswith(e) for e in extensions)", "docstring": "Whether or not a filename matches a search criteria according to arguments.\n\nArgs:\nfile_name (str): A file path to check.\nextensions (list): A list of file extensions file should match.\nexclude_dirs (list): A list of directories to exclude from search.\n\nReturns:\nA boolean of whether or not file matches search criteria.", "source": "juraj-google-style"}
{"code": "def cluster_resources(self):\n    resources = defaultdict(int)\n    clients = self.client_table()\n    for client in clients:\n        if client['IsInsertion']:\n            for (key, value) in client['Resources'].items():\n                resources[key] += value\n    return dict(resources)", "docstring": "Get the current total cluster resources.\n\nNote that this information can grow stale as nodes are added to or\nremoved from the cluster.\n\nReturns:\nA dictionary mapping resource name to the total quantity of that\nresource in the cluster.", "source": "codesearchnet"}
{"code": "def Copy(From, To):\n    \n    from benchbuild.utils.cmd import cp\n    cp(\"-ar\", \"--reflink=auto\", From, To)", "docstring": "Small copy wrapper.\n\nArgs:\nFrom (str): Path to the SOURCE.\nTo (str): Path to the TARGET.", "source": "juraj-google-style"}
{"code": "def standardize_sample_or_class_weights(x_weight, output_names, weight_type):\n    if x_weight is None or (isinstance(x_weight, (list, tuple)) and len(x_weight) == 0):\n        return [None for _ in output_names]\n    if len(output_names) == 1:\n        if isinstance(x_weight, (list, tuple)) and len(x_weight) == 1:\n            return x_weight\n        if isinstance(x_weight, dict) and output_names[0] in x_weight:\n            return [x_weight[output_names[0]]]\n        else:\n            return [x_weight]\n    if isinstance(x_weight, (list, tuple)):\n        if len(x_weight) != len(output_names):\n            raise ValueError('Provided `' + weight_type + '` was a list of ' + str(len(x_weight)) + ' elements, but the model has ' + str(len(output_names)) + ' outputs. You should provide one `' + weight_type + '`array per model output.')\n        return x_weight\n    if isinstance(x_weight, collections.abc.Mapping):\n        generic_utils.check_for_unexpected_keys(weight_type, x_weight, output_names)\n        x_weights = []\n        for name in output_names:\n            x_weights.append(x_weight.get(name))\n        return x_weights\n    else:\n        raise TypeError('The model has multiple outputs, so `' + weight_type + '` should be either a list or a dict. Provided `' + weight_type + '` type not understood: ' + str(x_weight))", "docstring": "Maps `sample_weight` or `class_weight` to model outputs.\n\nArgs:\nx_weight: User-provided `sample_weight` or `class_weight` argument.\noutput_names: List of output names (strings) in the model.\nweight_type: A string used purely for exception printing.\n\nReturns:\nA list of `sample_weight` or `class_weight` where there are exactly\none element per model output.\n\nRaises:\nValueError: In case of invalid user-provided argument.", "source": "github-repos"}
{"code": "def parse_response(response, encoding='utf-8'):\n    return requests_toolbelt.multipart.decoder.MultipartDecoder.from_response(response, encoding).parts", "docstring": "Parse a multipart Requests.Response into a tuple of BodyPart objects.\n\nArgs:\nresponse: Requests.Response\n\nencoding:\nThe parser will assume that any text in the HTML body is encoded with this\nencoding when decoding it for use in the ``text`` attribute.\n\nReturns:\ntuple of BodyPart\nMembers: headers (CaseInsensitiveDict), content (bytes), text (Unicode),\nencoding (str).", "source": "codesearchnet"}
{"code": "def iplot_histogram(data, figsize=None, number_to_keep=None, sort='asc', legend=None):\n    html_template = Template('\\n    <p>\\n        <div id=\"histogram_$divNumber\"></div>\\n    </p>\\n    ')\n    javascript_template = Template('\\n    <script>\\n        requirejs.config({\\n            paths: {\\n                qVisualization: \"https:\n    div_number = str(time.time())\n    div_number = re.sub('[.]', '', div_number)\n    if (figsize is None):\n        figsize = (7, 5)\n    options = {'number_to_keep': (0 if (number_to_keep is None) else number_to_keep), 'sort': sort, 'show_legend': 0, 'width': int(figsize[0]), 'height': int(figsize[1])}\n    if legend:\n        options['show_legend'] = 1\n    data_to_plot = []\n    if isinstance(data, dict):\n        data = [data]\n    if (legend and (len(legend) != len(data))):\n        raise VisualizationError((\"Length of legendL (%s) doesn't match number of input executions: %s\" % (len(legend), len(data))))\n    for (item, execution) in enumerate(data):\n        exec_data = process_data(execution, options['number_to_keep'])\n        out_dict = {'data': exec_data}\n        if legend:\n            out_dict['name'] = legend[item]\n        data_to_plot.append(out_dict)\n    html = html_template.substitute({'divNumber': div_number})\n    javascript = javascript_template.substitute({'divNumber': div_number, 'executions': data_to_plot, 'options': options})\n    display(HTML((html + javascript)))", "docstring": "Create a histogram representation.\n\nGraphical representation of the input array using a vertical bars\nstyle graph.\n\nArgs:\ndata (list or dict):  This is either a list of dicts or a single\ndict containing the values to represent (ex. {'001' : 130})\nfigsize (tuple): Figure size in pixels.\nnumber_to_keep (int): The number of terms to plot and\nrest is made into a single bar called other values\nsort (string): Could be 'asc' or 'desc'\nlegend (list): A list of strings to use for labels of the data.\nThe number of entries must match the length of data.\nRaises:\nVisualizationError: When legend is provided and the length doesn't\nmatch the input data.", "source": "codesearchnet"}
{"code": "def tag(self, main_type, sub_type, unique_id, tag, action='GET', owner=None, params=None):\n        \n        params = params or {}\n\n        if owner:\n            params['owner'] = owner\n\n        action = action.upper()\n        if sub_type:\n            url = '/v2/{}/{}/{}/tags/{}'.format(main_type, sub_type, unique_id, quote(tag))\n        else:\n            url = '/v2/{}/{}/tags/{}'.format(main_type, unique_id, quote(tag))\n        response = None\n        if action == 'ADD':\n            response = self.tcex.session.post(url, params=params)\n        elif action == 'DELETE':\n            response = self.tcex.session.delete(url, params=params)\n        elif action == 'GET':\n            response = self.tcex.session.get(url, params=params)\n        else:\n            self.tcex.log.error('_tags error')\n        return response", "docstring": "Args:\nowner:\nmain_type:\nsub_type:\nunique_id:\ntag:\naction:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def get_prefix(self, name):\n        \n        if name == 'current':\n            name = self.current\n\n        try:\n            return self.prefixes[name]\n        except KeyError:\n            raise KeyError(\n                'Unable to find prefix \"%s\" in workdir %s' % (name, self.path)\n            )", "docstring": "Retrieve a prefix, resolving the current one if needed\n\nArgs:\nname(str): name of the prefix to retrieve, or current to get the\ncurrent one\n\nReturns:\nself.prefix_class: instance of the prefix with the given name", "source": "juraj-google-style"}
{"code": "def _load_schema_for_record(data, schema=None):\n    \n    if schema is None:\n        if '$schema' not in data:\n            raise SchemaKeyNotFound(data=data)\n        schema = data['$schema']\n\n    if isinstance(schema, six.string_types):\n        schema = load_schema(schema_name=schema)\n    return schema", "docstring": "Load the schema from a given record.\n\nArgs:\ndata (dict): record data.\nschema (Union[dict, str]): schema to validate against.\n\nReturns:\ndict: the loaded schema.\n\nRaises:\nSchemaNotFound: if the given schema was not found.\nSchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was\nfound in ``data``.\njsonschema.SchemaError: if the schema is invalid.", "source": "juraj-google-style"}
{"code": "def model_from_config(config, custom_objects=None):\n    if isinstance(config, list):\n        raise TypeError('`model_from_config` expects a dictionary, not a list. Maybe you meant to use `Sequential.from_config(config)`?')\n    from tensorflow.python.keras.layers import deserialize\n    return deserialize(config, custom_objects=custom_objects)", "docstring": "Instantiates a Keras model from its config.\n\nUsage:\n```\n# for a Functional API model\ntf.keras.Model().from_config(model.get_config())\n\n# for a Sequential model\ntf.keras.Sequential().from_config(model.get_config())\n```\n\nArgs:\nconfig: Configuration dictionary.\ncustom_objects: Optional dictionary mapping names\n(strings) to custom classes or functions to be\nconsidered during deserialization.\n\nReturns:\nA Keras model instance (uncompiled).\n\nRaises:\nTypeError: if `config` is not a dictionary.", "source": "github-repos"}
{"code": "def _get_showcase_dataset_dict(self, dataset):\n    if (isinstance(dataset, hdx.data.dataset.Dataset) or isinstance(dataset, dict)):\n        if ('id' not in dataset):\n            dataset = hdx.data.dataset.Dataset.read_from_hdx(dataset['name'])\n        dataset = dataset['id']\n    elif (not isinstance(dataset, str)):\n        raise hdx.data.hdxobject.HDXError(('Type %s cannot be added as a dataset!' % type(dataset).__name__))\n    if (is_valid_uuid(dataset) is False):\n        raise hdx.data.hdxobject.HDXError(('%s is not a valid dataset id!' % dataset))\n    return {'showcase_id': self.data['id'], 'package_id': dataset}", "docstring": "Get showcase dataset dict\n\nArgs:\nshowcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary\n\nReturns:\nDict: showcase dataset dict", "source": "codesearchnet"}
{"code": "def __init__(self, class_to_mock):\n    \n\n    \n    \n    MockAnything.__dict__['__init__'](self)\n\n    \n    self._known_methods = set()\n    self._known_vars = set()\n    self._class_to_mock = class_to_mock\n    for method in dir(class_to_mock):\n      if callable(getattr(class_to_mock, method)):\n        self._known_methods.add(method)\n      else:\n        self._known_vars.add(method)", "docstring": "Initialize a mock object.\n\nThis determines the methods and properties of the class and stores them.\n\nArgs:\n# class_to_mock: class to be mocked\nclass_to_mock: class", "source": "juraj-google-style"}
{"code": "def forge_relationship(self, left_id, left_type, right_id, right_type, rel_type='Related To', rel_date=None, rel_confidence='high', rel_reason=''):\n    if (not rel_date):\n        rel_date = datetime.datetime.now()\n    type_trans = self._type_translation(left_type)\n    submit_url = '{}/{}/{}/'.format(self.url, type_trans, left_id)\n    params = {'api_key': self.api_key, 'username': self.username}\n    data = {'action': 'forge_relationship', 'right_type': right_type, 'right_id': right_id, 'rel_type': rel_type, 'rel_date': rel_date, 'rel_confidence': rel_confidence, 'rel_reason': rel_reason}\n    r = requests.patch(submit_url, params=params, data=data, proxies=self.proxies, verify=self.verify)\n    if (r.status_code == 200):\n        log.debug('Relationship built successfully: {0} <-> {1}'.format(left_id, right_id))\n        return True\n    else:\n        log.error('Error with status code {0} and message {1} between these indicators: {2} <-> {3}'.format(r.status_code, r.text, left_id, right_id))\n        return False", "docstring": "Forges a relationship between two TLOs.\n\nArgs:\nleft_id: The CRITs ID of the first indicator\nleft_type: The CRITs TLO type of the first indicator\nright_id: The CRITs ID of the second indicator\nright_type: The CRITs TLO type of the second indicator\nrel_type: The relationships type (\"Related To\", etc)\nrel_date: datetime.datetime object for the date of the\nrelationship. If left blank, it will be datetime.datetime.now()\nrel_confidence: The relationship confidence (high, medium, low)\nrel_reason: Reason for the relationship.\nReturns:\nTrue if the relationship was created. False otherwise.", "source": "codesearchnet"}
{"code": "def _OpenFileObject(self, path_spec):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    file_object = resolver.Resolver.OpenFileObject(\n        path_spec.parent, resolver_context=self._resolver_context)\n\n    vhdi_file = pyvhdi.file()\n    vhdi_file.open_file_object(file_object)\n\n    if vhdi_file.parent_identifier:\n      file_system = resolver.Resolver.OpenFileSystem(\n          path_spec.parent, resolver_context=self._resolver_context)\n\n      try:\n        self._OpenParentFile(file_system, path_spec.parent, vhdi_file)\n      finally:\n        file_system.Close()\n\n    self._sub_file_objects.append(file_object)\n\n    self._parent_vhdi_files.reverse()\n    self._sub_file_objects.reverse()\n\n    return vhdi_file", "docstring": "Opens the file-like object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\npyvhdi.file: a file-like object.\n\nRaises:\nPathSpecError: if the path specification is incorrect.", "source": "juraj-google-style"}
{"code": "def extract_cluster(self, target_sites, **kwargs):\n    cluster = list(target_sites)\n    others = [site for site in self if (site not in cluster)]\n    size = 0\n    while (len(cluster) > size):\n        size = len(cluster)\n        new_others = []\n        for site in others:\n            for site2 in cluster:\n                if CovalentBond.is_bonded(site, site2, **kwargs):\n                    cluster.append(site)\n                    break\n            else:\n                new_others.append(site)\n        others = new_others\n    return cluster", "docstring": "Extracts a cluster of atoms based on bond lengths\n\nArgs:\ntarget_sites ([Site]): List of initial sites to nucleate cluster.\n\\\\*\\\\*kwargs: kwargs passed through to CovalentBond.is_bonded.\n\nReturns:\n[Site/PeriodicSite] Cluster of atoms.", "source": "codesearchnet"}
{"code": "def match_pattern(self, pat, word):\n    segs = self.word_fts(word)\n    if (len(pat) != len(segs)):\n        return None\n    elif all([(set(p) <= s) for (p, s) in zip(pat, segs)]):\n        return segs", "docstring": "Implements fixed-width pattern matching.\n\nMatches just in case pattern is the same length (in segments) as the\nword and each of the segments in the pattern is a featural subset of the\ncorresponding segment in the word. Matches return the corresponding list\nof feature sets; failed matches return None.\n\nArgs:\npat (list): pattern consisting of a sequence of sets of (value,\nfeature) tuples\nword (unicode): a Unicode IPA string consisting of zero or more\nsegments\n\nReturns:\nlist: corresponding list of feature sets or, if there is no match,\nNone", "source": "codesearchnet"}
{"code": "def search(self, scope, search, **kwargs):\n        \n        data = {'scope': scope, 'search': search}\n        return self.http_list('/search', query_data=data, **kwargs)", "docstring": "Search GitLab resources matching the provided string.'\n\nArgs:\nscope (str): Scope of the search\nsearch (str): Search string\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabSearchError: If the server failed to perform the request\n\nReturns:\nGitlabList: A list of dicts describing the resources found.", "source": "juraj-google-style"}
{"code": "def dumps(ms, single=False, pretty_print=False, **kwargs):\n    \n    if single:\n        ms = [ms]\n    return serialize(ms, pretty_print=pretty_print, **kwargs)", "docstring": "Serialize an Xmrs object to the Prolog representation\n\nArgs:\nms: an iterator of Xmrs objects to serialize (unless the\n*single* option is `True`)\nsingle: if `True`, treat *ms* as a single Xmrs object instead\nof as an iterator\npretty_print: if `True`, add newlines and indentation\nReturns:\nthe Prolog string representation of a corpus of Xmrs", "source": "juraj-google-style"}
{"code": "def unbind(self, devices_to_unbind):\n    if (self.entity_api_key == ''):\n        return {'status': 'failure', 'response': 'No API key found in request'}\n    url = (self.base_url + 'api/0.1.0/subscribe/unbind')\n    headers = {'apikey': self.entity_api_key}\n    data = {'exchange': 'amq.topic', 'keys': devices_to_unbind, 'queue': self.entity_id}\n    with self.no_ssl_verification():\n        r = requests.delete(url, json=data, headers=headers)\n        print(r)\n    response = dict()\n    if ('No API key' in str(r.content.decode('utf-8'))):\n        response['status'] = 'failure'\n        r = json.loads(r.content.decode('utf-8'))['message']\n    elif ('unbind' in str(r.content.decode('utf-8'))):\n        response['status'] = 'success'\n        r = r.content.decode('utf-8')\n    else:\n        response['status'] = 'failure'\n        r = r.content.decode('utf-8')\n    response['response'] = str(r)\n    return response", "docstring": "This function allows an entity to unbound devices that are already bound.\n\nArgs:\ndevices_to_unbind (list): an array of devices that are to be unbound ( stop listening)\nExample unbind([\"test10\",\"testDemo105\"])", "source": "codesearchnet"}
{"code": "def main_op():\n    init = variables.global_variables_initializer()\n    init_local = variables.local_variables_initializer()\n    init_tables = lookup_ops.tables_initializer()\n    return control_flow_ops.group(init, init_local, init_tables)", "docstring": "Returns a main op to init variables and tables.\n\nReturns the main op including the group of ops that initializes all\nvariables, initializes local variables and initialize all tables.\n\nReturns:\nThe set of ops to be run as part of the main op upon the load operation.", "source": "github-repos"}
{"code": "def _get_label_encoder_and_max(self, x):\n        \n\n        \n        label_count = x.fillna(NAN_INT).value_counts()\n        n_uniq = label_count.shape[0]\n\n        label_count = label_count[label_count >= self.min_obs]\n        n_uniq_new = label_count.shape[0]\n\n        \n        \n        \n        offset = 0 if n_uniq == n_uniq_new else 1\n\n        label_encoder = pd.Series(np.arange(n_uniq_new) + offset, index=label_count.index)\n        max_label = label_encoder.max()\n        label_encoder = label_encoder.to_dict()\n\n        return label_encoder, max_label", "docstring": "Return a mapping from values and its maximum of a column to integer labels.\n\nArgs:\nx (pandas.Series): a categorical column to encode.\n\nReturns:\nlabel_encoder (dict): mapping from values of features to integers\nmax_label (int): maximum label", "source": "juraj-google-style"}
{"code": "def get_actual_replica(self, service_id: str) -> str:\n    if (not self._manager):\n        raise RuntimeError('Only the Swarm manager node can retrieve replication level of the service')\n    service_details = self.get_service_details(service_id)\n    actual_replica = service_details['Spec']['Mode']['Replicated']['Replicas']\n    return actual_replica", "docstring": "Get the actual replica level of a service.\n\nArgs:\nservice_id (str): docker swarm service id\n\nReturns:\nstr, replicated level of the service", "source": "codesearchnet"}
{"code": "def paginate_resources(cls, request, resources, on_fail_status):\n    if (not resources):\n        return (resources, client_list_control_pb2.ClientPagingResponse())\n    paging = request.paging\n    limit = (min(paging.limit, MAX_PAGE_SIZE) or DEFAULT_PAGE_SIZE)\n    try:\n        if paging.start:\n            start_index = cls.index_by_id(paging.start, resources)\n        else:\n            start_index = 0\n        if ((start_index < 0) or (start_index >= len(resources))):\n            raise AssertionError\n    except AssertionError:\n        raise _ResponseFailed(on_fail_status)\n    paged_resources = resources[start_index:(start_index + limit)]\n    if ((start_index + limit) < len(resources)):\n        paging_response = client_list_control_pb2.ClientPagingResponse(next=cls.id_by_index((start_index + limit), resources), start=cls.id_by_index(start_index, resources), limit=limit)\n    else:\n        paging_response = client_list_control_pb2.ClientPagingResponse(start=cls.id_by_index(start_index, resources), limit=limit)\n    return (paged_resources, paging_response)", "docstring": "Truncates a list of resources based on ClientPagingControls\n\nArgs:\nrequest (object): The parsed protobuf request object\nresources (list of objects): The resources to be paginated\n\nReturns:\nlist: The paginated list of resources\nobject: The ClientPagingResponse to be sent back to the client", "source": "codesearchnet"}
{"code": "def add(self, other):\n        \n        \n        if not isinstance(other, SuperOp):\n            other = SuperOp(other)\n        if self.dim != other.dim:\n            raise QiskitError(\"other QuantumChannel dimensions are not equal\")\n        return SuperOp(self._data + other.data, self.input_dims(),\n                       self.output_dims())", "docstring": "Return the QuantumChannel self + other.\n\nArgs:\nother (QuantumChannel): a quantum channel.\n\nReturns:\nSuperOp: the linear addition self + other as a SuperOp object.\n\nRaises:\nQiskitError: if other cannot be converted to a channel or\nhas incompatible dimensions.", "source": "juraj-google-style"}
{"code": "def graph_structure(self, x, standalone=True):\n        \n        if standalone:\n            x = tf.concat(tf.split(x, 2, axis=0), axis=1)\n\n        with argscope([tf.layers.conv2d], activation=lambda x: tf.nn.leaky_relu(x, 0.1),\n                      padding='valid', strides=2, kernel_size=3,\n                      data_format='channels_first'), \\\n            argscope([tf.layers.conv2d_transpose], padding='same', activation=tf.identity,\n                     data_format='channels_first', strides=2, kernel_size=4):\n            x = tf.layers.conv2d(pad(x, 3), 64, kernel_size=7, name='conv1')\n            conv2 = tf.layers.conv2d(pad(x, 2), 128, kernel_size=5, name='conv2')\n            x = tf.layers.conv2d(pad(conv2, 2), 256, kernel_size=5, name='conv3')\n            conv3 = tf.layers.conv2d(pad(x, 1), 256, name='conv3_1', strides=1)\n            x = tf.layers.conv2d(pad(conv3, 1), 512, name='conv4')\n            conv4 = tf.layers.conv2d(pad(x, 1), 512, name='conv4_1', strides=1)\n            x = tf.layers.conv2d(pad(conv4, 1), 512, name='conv5')\n            conv5 = tf.layers.conv2d(pad(x, 1), 512, name='conv5_1', strides=1)\n            x = tf.layers.conv2d(pad(conv5, 1), 1024, name='conv6')\n            conv6 = tf.layers.conv2d(pad(x, 1), 1024, name='conv6_1', strides=1)\n\n            flow6 = tf.layers.conv2d(pad(conv6, 1), 2, name='predict_flow6', strides=1, activation=tf.identity)\n            flow6_up = tf.layers.conv2d_transpose(flow6, 2, name='upsampled_flow6_to_5', use_bias=False)\n            x = tf.layers.conv2d_transpose(conv6, 512, name='deconv5', activation=lambda x: tf.nn.leaky_relu(x, 0.1))\n\n            concat5 = tf.concat([conv5, x, flow6_up], axis=1, name='concat5')\n            flow5 = tf.layers.conv2d(pad(concat5, 1), 2, name='predict_flow5', strides=1, activation=tf.identity)\n            flow5_up = tf.layers.conv2d_transpose(flow5, 2, name='upsampled_flow5_to_4', use_bias=False)\n            x = tf.layers.conv2d_transpose(concat5, 256, name='deconv4', activation=lambda x: tf.nn.leaky_relu(x, 0.1))\n\n            concat4 = tf.concat([conv4, x, flow5_up], axis=1, name='concat4')\n            flow4 = tf.layers.conv2d(pad(concat4, 1), 2, name='predict_flow4', strides=1, activation=tf.identity)\n            flow4_up = tf.layers.conv2d_transpose(flow4, 2, name='upsampled_flow4_to_3', use_bias=False)\n            x = tf.layers.conv2d_transpose(concat4, 128, name='deconv3', activation=lambda x: tf.nn.leaky_relu(x, 0.1))\n\n            concat3 = tf.concat([conv3, x, flow4_up], axis=1, name='concat3')\n            flow3 = tf.layers.conv2d(pad(concat3, 1), 2, name='predict_flow3', strides=1, activation=tf.identity)\n            flow3_up = tf.layers.conv2d_transpose(flow3, 2, name='upsampled_flow3_to_2', use_bias=False)\n            x = tf.layers.conv2d_transpose(concat3, 64, name='deconv2', activation=lambda x: tf.nn.leaky_relu(x, 0.1))\n\n            concat2 = tf.concat([conv2, x, flow3_up], axis=1, name='concat2')\n            flow2 = tf.layers.conv2d(pad(concat2, 1), 2, name='predict_flow2', strides=1, activation=tf.identity)\n\n            return tf.identity(flow2, name='flow2')", "docstring": "Architecture of FlowNetSimple in Figure 2 of FlowNet 1.0.\n\nArgs:\nx: 2CHW if standalone==True, else NCHW where C=12 is a concatenation\nof 5 tensors of [3, 3, 3, 2, 1] channels.\nstandalone: If True, this model is used to predict flow from two inputs.\nIf False, this model is used as part of the FlowNet2.", "source": "juraj-google-style"}
{"code": "def get_densities(self, spin=None):\n    if (self.densities is None):\n        result = None\n    elif (spin is None):\n        if (Spin.down in self.densities):\n            result = (self.densities[Spin.up] + self.densities[Spin.down])\n        else:\n            result = self.densities[Spin.up]\n    else:\n        result = self.densities[spin]\n    return result", "docstring": "Returns the density of states for a particular spin.\n\nArgs:\nspin: Spin\n\nReturns:\nReturns the density of states for a particular spin. If Spin is\nNone, the sum of all spins is returned.", "source": "codesearchnet"}
{"code": "def unpack(self, buff=None, offset=0):\n        \n        length = UBInt16()\n        length.unpack(buff, offset)\n\n        length.unpack(buff, offset=offset+MeterStats.meter_id.get_size())\n        super().unpack(buff[:offset+length.value], offset=offset)", "docstring": "Unpack *buff* into this object.\n\nThis method will convert a binary data into a readable value according\nto the attribute format.\n\nArgs:\nbuff (bytes): Binary buffer.\noffset (int): Where to begin unpacking.\n\nRaises:\n:exc:`~.exceptions.UnpackException`: If unpack fails.", "source": "juraj-google-style"}
{"code": "def GetEntries(self, parser_mediator, match=None, **unused_kwargs):\n    \n    format_version = match.get('WebHistoryFileVersion', None)\n    if format_version != 1:\n      parser_mediator.ProduceExtractionWarning(\n          'unsupported Safari history version: {0!s}'.format(format_version))\n      return\n\n    if 'WebHistoryDates' not in match:\n      return\n\n    for history_entry in match.get('WebHistoryDates', {}):\n      last_visited_date = history_entry.get('lastVisitedDate', None)\n      if last_visited_date is None:\n        parser_mediator.ProduceExtractionWarning('missing last visited date')\n        continue\n\n      try:\n        \n        timestamp = float(last_visited_date)\n      except (TypeError, ValueError):\n        parser_mediator.ProduceExtractionWarning(\n            'unable to convert last visited date {0:s}'.format(\n                last_visited_date))\n        continue\n\n      display_title = history_entry.get('displayTitle', None)\n\n      event_data = SafariHistoryEventData()\n      if display_title != event_data.title:\n        event_data.display_title = display_title\n      event_data.title = history_entry.get('title', None)\n      event_data.url = history_entry.get('', None)\n      event_data.visit_count = history_entry.get('visitCount', None)\n      event_data.was_http_non_get = history_entry.get(\n          'lastVisitWasHTTPNonGet', None)\n\n      \n      \n      timestamp = int(timestamp)\n      date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts Safari history items.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmatch (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.", "source": "juraj-google-style"}
{"code": "def add_comments(self, comments):\n    for comment in comments:\n        if ((comment not in self.comments) and (len(comment) > 0)):\n            self.comments.append(comment)\n        if (len(self.comments[0]) == 0):\n            self.comments.pop(0)", "docstring": "Add comments to the localization entry\n\nArgs:\ncomments (list of str): The comments to be added to the localization entry.", "source": "codesearchnet"}
{"code": "def list(self, *args, **kwargs):\n        \n        return [\n            self.prepare_model(n)\n            for n in self.client.api.nodes(*args, **kwargs)\n        ]", "docstring": "List swarm nodes.\n\nArgs:\nfilters (dict): Filters to process on the nodes list. Valid\nfilters: ``id``, ``name``, ``membership`` and ``role``.\nDefault: ``None``\n\nReturns:\nA list of :py:class:`Node` objects.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.\n\nExample:\n\n>>> client.nodes.list(filters={'role': 'manager'})", "source": "juraj-google-style"}
{"code": "def eigvals(self, name='eigvals'):\n    if not self.is_self_adjoint:\n        raise NotImplementedError('Only self-adjoint matrices are supported.')\n    with self._name_scope(name):\n        return self._eigvals()", "docstring": "Returns the eigenvalues of this linear operator.\n\nIf the operator is marked as self-adjoint (via `is_self_adjoint`)\nthis computation can be more efficient.\n\nNote: This currently only supports self-adjoint operators.\n\nArgs:\nname:  A name for this `Op`.\n\nReturns:\nShape `[B1,...,Bb, N]` `Tensor` of same `dtype` as `self`.", "source": "github-repos"}
{"code": "def edit_distance_1(self, word):\n    word = word.lower()\n    if (self._check_if_should_check(word) is False):\n        return {word}\n    letters = self._word_frequency.letters\n    splits = [(word[:i], word[i:]) for i in range((len(word) + 1))]\n    deletes = [(L + R[1:]) for (L, R) in splits if R]\n    transposes = [(((L + R[1]) + R[0]) + R[2:]) for (L, R) in splits if (len(R) > 1)]\n    replaces = [((L + c) + R[1:]) for (L, R) in splits if R for c in letters]\n    inserts = [((L + c) + R) for (L, R) in splits for c in letters]\n    return set((((deletes + transposes) + replaces) + inserts))", "docstring": "Compute all strings that are one edit away from `word` using only\nthe letters in the corpus\n\nArgs:\nword (str): The word for which to calculate the edit distance\nReturns:\nset: The set of strings that are edit distance one from the \\\nprovided word", "source": "codesearchnet"}
{"code": "def delete(self, dry=False, meta=None, index_fields=None):\n    from datetime import datetime\n    if (not dry):\n        self.pre_delete()\n    (results, errors) = self._delete_relations(dry)\n    if (not (dry or errors)):\n        self.deleted = True\n        self.deleted_at = datetime.now()\n        self.save(internal=True, meta=meta, index_fields=index_fields)\n        self.post_delete()\n        if settings.ENABLE_CACHING:\n            cache.delete(self.key)\n    return (results, errors)", "docstring": "Sets the objects \"deleted\" field to True and,\ncurrent time to \"deleted_at\" fields then saves it to DB.\n\n\nArgs:\ndry (bool): False. Do not execute the actual deletion.\nJust list what will be deleted as a result of relations.\nmeta (dict): JSON serializable meta data for logging of save operation.\n{'lorem': 'ipsum', 'dolar': 5}\nindex_fields (list): Tuple list for secondary indexing keys in riak (with 'bin' or 'int').\nbin is used for string fields, int is used for integer fields.\n[('lorem','bin'),('dolar','int')]\nReturns:\nTuple. (results [], errors [])", "source": "codesearchnet"}
{"code": "def _del_conversation(self, conversation_key: str) -> None:\n    if (conversation_key in self.conversations.keys()):\n        del self.conversations[conversation_key]\n        log.info(f'Deleted conversation, key: {conversation_key}')", "docstring": "Deletes Conversation instance.\n\nArgs:\nconversation_key: Conversation key.", "source": "codesearchnet"}
{"code": "def _get_parameter_conversion_entry(parameter_config):\n  \n  entry = _PARAM_CONVERSION_MAP.get(parameter_config.get('type'))\n\n  \n  \n  \n  if entry is None and 'enum' in parameter_config:\n    entry = _PARAM_CONVERSION_MAP['enum']\n\n  return entry", "docstring": "Get information needed to convert the given parameter to its API type.\n\nArgs:\nparameter_config: The dictionary containing information specific to the\nparameter in question. This is retrieved from request.parameters in the\nmethod config.\n\nReturns:\nThe entry from _PARAM_CONVERSION_MAP with functions/information needed to\nvalidate and convert the given parameter from a string to the type expected\nby the API.", "source": "juraj-google-style"}
{"code": "def most_specific_common_supertype(self, others):\n    try:\n        for other in others:\n            self.sanity_check_type(other)\n            nest.assert_same_structure(self._element_spec, other._element_spec)\n    except (TypeError, ValueError):\n        return None\n    self_elements = nest.flatten(self._element_spec)\n    others_elements = [nest.flatten(other._element_spec) for other in others]\n    common_elements = [None] * len(self_elements)\n    for i, self_element in enumerate(self_elements):\n        common_elements[i] = self_element.most_specific_common_supertype([other_elements[i] for other_elements in others_elements])\n        if common_elements[i] is None:\n            return None\n    common_element_spec = nest.pack_sequence_as(self._element_spec, common_elements)\n    return type(self)(self._input_workers, common_element_spec, self._strategy, self._options, cardinality=self._cardinality, enable_get_next_as_optional=self._enable_get_next_as_optional)", "docstring": "Returns the most specific supertype of `self` and `others`.\n\nArgs:\nothers: A Sequence of `TypeSpec`.\n\nReturns `None` if a supertype does not exist.", "source": "github-repos"}
{"code": "def load_from_file(self, filename=None, *, strict=True):\n    self.set_to_default()\n    if filename:\n        self._update_from_file(filename)\n    elif (LIGHTFLOW_CONFIG_ENV not in os.environ):\n        if os.path.isfile(os.path.join(os.getcwd(), LIGHTFLOW_CONFIG_NAME)):\n            self._update_from_file(os.path.join(os.getcwd(), LIGHTFLOW_CONFIG_NAME))\n        elif os.path.isfile(expand_env_var('~/{}'.format(LIGHTFLOW_CONFIG_NAME))):\n            self._update_from_file(expand_env_var('~/{}'.format(LIGHTFLOW_CONFIG_NAME)))\n        elif strict:\n            raise ConfigLoadError('Could not find the configuration file.')\n    else:\n        self._update_from_file(expand_env_var(os.environ[LIGHTFLOW_CONFIG_ENV]))\n    self._update_python_paths()", "docstring": "Load the configuration from a file.\n\nThe location of the configuration file can either be specified directly in the\nparameter filename or is searched for in the following order:\n\n1. In the environment variable given by LIGHTFLOW_CONFIG_ENV\n2. In the current execution directory\n3. In the user's home directory\n\nArgs:\nfilename (str): The location and name of the configuration file.\nstrict (bool): If true raises a ConfigLoadError when the configuration\ncannot be found.\n\nRaises:\nConfigLoadError: If the configuration cannot be found.", "source": "codesearchnet"}
{"code": "def execute(self, action):\n        \n        if self.env.game_over():\n            return self.env.getScreenRGB(), True, 0\n\n        action_space = self.env.getActionSet()\n        reward = self.env.act(action_space[action])\n        new_state = self.env.getScreenRGB()\n        done = self.env.game_over()\n        return new_state, done, reward", "docstring": "Executes action, observes next state and reward.\n\nArgs:\nactions: Action to execute.\n\nReturns:\n(Dict of) next state(s), boolean indicating terminal, and reward signal.", "source": "juraj-google-style"}
{"code": "def _load_data(self, data, from_db=False):\n        \n        self._data = data[:]\n        self.setattrs(\n            values=[],\n            node_stack=[],\n            node_dict={},\n        )\n        self._from_db = from_db", "docstring": "Stores the data at self._data, actual object creation done at _generate_instances()\n\nArgs:\ndata (list): List of dicts.\nfrom_db (bool): Default False. Is this data coming from DB or not.", "source": "juraj-google-style"}
{"code": "def concurrent_exec(func, param_list, max_workers=30, raise_on_exception=False):\n    with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n        future_to_params = {executor.submit(func, *p): p for p in param_list}\n        return_vals = []\n        exceptions = []\n        for future in concurrent.futures.as_completed(future_to_params):\n            params = future_to_params[future]\n            try:\n                return_vals.append(future.result())\n            except Exception as exc:\n                logging.exception('%s generated an exception: %s', params, traceback.format_exc())\n                return_vals.append(exc)\n                exceptions.append(exc)\n        if raise_on_exception and exceptions:\n            error_messages = []\n            for exception in exceptions:\n                error_messages.append(''.join(traceback.format_exception(exception.__class__, exception, exception.__traceback__)))\n            raise RuntimeError('\\n\\n'.join(error_messages))\n        return return_vals", "docstring": "Executes a function with different parameters pseudo-concurrently.\n\nThis is basically a map function. Each element (should be an iterable) in\nthe param_list is unpacked and passed into the function. Due to Python's\nGIL, there's no true concurrency. This is suited for IO-bound tasks.\n\nArgs:\nfunc: The function that performs a task.\nparam_list: A list of iterables, each being a set of params to be\npassed into the function.\nmax_workers: int, the number of workers to use for parallelizing the\ntasks. By default, this is 30 workers.\nraise_on_exception: bool, raises all of the task failures if any of the\ntasks failed if `True`. By default, this is `False`.\n\nReturns:\nA list of return values from each function execution. If an execution\ncaused an exception, the exception object will be the corresponding\nresult.\n\nRaises:\nRuntimeError: If executing any of the tasks failed and\n`raise_on_exception` is True.", "source": "github-repos"}
{"code": "def set_error_message(self, error_message):\n    self._empty = False\n    self.error_message = error_message", "docstring": "Sets an error message on an instrumentation block.\n\nThis method is used exclusively to indicate that a test method failed\nto complete, which is usually cause by a crash of some sort such that\nthe test method is marked as error instead of ignored.\n\nArgs:\nerror_message: string, an error message to be added to the\nTestResultRecord to explain that something wrong happened.", "source": "github-repos"}
{"code": "def parse_cgmlst_alleles(cgmlst_fasta):\n    \n    out = defaultdict(list)\n    for header, seq in parse_fasta(cgmlst_fasta):\n        if not '|' in header:\n            raise Exception('Unexpected format for cgMLST fasta file header. No \"|\" (pipe) delimiter present! Header=\"{}\"'.format(header))\n        marker_name, allele_name = header.split('|')\n        out[marker_name].append(seq)\n    return out", "docstring": "Parse cgMLST alleles from fasta file\ncgMLST FASTA file must have a header format of \">{marker name}|{allele name}\"\n\nArgs:\ncgmlst_fasta (str): cgMLST fasta file path\n\nReturns:\ndict of list: Marker name to list of allele sequences", "source": "juraj-google-style"}
{"code": "def on(self, evnt, func, base=None):\n    funcs = self._syn_funcs[evnt]\n    if (func in funcs):\n        return\n    funcs.append(func)\n    if (base is not None):\n\n        def fini():\n            self.off(evnt, func)\n        base.onfini(fini)", "docstring": "Add an base function callback for a specific event with optional filtering.  If the function returns a\ncoroutine, it will be awaited.\n\nArgs:\nevnt (str):         An event name\nfunc (function):    A callback function to receive event tufo\n\nExamples:\n\nAdd a callback function and fire it:\n\nasync def baz(event):\nx = event[1].get('x')\ny = event[1].get('y')\nreturn x + y\n\nd.on('foo', baz)\n\n# this fire triggers baz...\nawait d.fire('foo', x=10, y=20)\n\nReturns:\nNone:", "source": "codesearchnet"}
{"code": "def bbox_rot90(bbox, factor, rows, cols):\n    \n    if factor < 0 or factor > 3:\n        raise ValueError('Parameter n must be in range [0;3]')\n    x_min, y_min, x_max, y_max = bbox\n    if factor == 1:\n        bbox = [y_min, 1 - x_max, y_max, 1 - x_min]\n    if factor == 2:\n        bbox = [1 - x_max, 1 - y_max, 1 - x_min, 1 - y_min]\n    if factor == 3:\n        bbox = [1 - y_max, x_min, 1 - y_min, x_max]\n    return bbox", "docstring": "Rotates a bounding box by 90 degrees CCW (see np.rot90)\n\nArgs:\nbbox (tuple): A tuple (x_min, y_min, x_max, y_max).\nfactor (int): Number of CCW rotations. Must be in range [0;3] See np.rot90.\nrows (int): Image rows.\ncols (int): Image cols.", "source": "juraj-google-style"}
{"code": "def fetch(url: str, **kwargs) -> Selector:\n    \n    kwargs.setdefault('headers', DEFAULT_HEADERS)\n    try:\n        res = requests.get(url, **kwargs)\n        res.raise_for_status()\n    except requests.RequestException as e:\n        print(e)\n    else:\n        html = res.text\n        tree = Selector(text=html)\n        return tree", "docstring": "Send HTTP request and parse it as a DOM tree.\n\nArgs:\nurl (str): The url of the site.\n\nReturns:\nSelector: allows you to select parts of HTML text using CSS or XPath expressions.", "source": "juraj-google-style"}
{"code": "def CreateStorageWriterForFile(cls, session, path):\n    \n    if sqlite_file.SQLiteStorageFile.CheckSupportedFormat(path):\n      return sqlite_writer.SQLiteStorageFileWriter(session, path)\n\n    return None", "docstring": "Creates a storage writer based on the file.\n\nArgs:\nsession (Session): session the storage changes are part of.\npath (str): path to the storage file.\n\nReturns:\nStorageWriter: a storage writer or None if the storage file cannot be\nopened or the storage format is not supported.", "source": "juraj-google-style"}
{"code": "def save(self, filename, image_format=\"eps\", width=8, height=6):\n        \n        self.get_plot(width, height).savefig(filename, format=image_format)", "docstring": "Save the plot to an image file.\n\nArgs:\nfilename: Filename to save to.\nimage_format: Format to save to. Defaults to eps.", "source": "juraj-google-style"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n    output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    if token_ids_1 is not None:\n        output += token_ids_1 + [self.sep_token_id]\n    return output", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A ELECTRA sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def setValue(self, value):\n        \n        if value >= self.minimum() and value <= self.maximum():\n            self._lineEdit.setText(str(value))\n        elif value < self.minimum():\n            self._lineEdit.setText(str(self.minimum()))\n        elif value > self.maximum():\n            self._lineEdit.setText(str(self.maximum()))\n        return True", "docstring": "setter function to _lineEdit.text.  Sets minimum/maximum as new value if value is out of bounds.\n\nArgs:\nvalue (int/long): new value to set.\n\nReturns\nTrue if all went fine.", "source": "juraj-google-style"}
{"code": "def read_vocab(args, column_name):\n  \n  vocab_path = os.path.join(args.analysis,\n                            feature_transforms.VOCAB_ANALYSIS_FILE % column_name)\n\n  if not file_io.file_exists(vocab_path):\n    return []\n\n  vocab, _ = feature_transforms.read_vocab_file(vocab_path)\n  return vocab", "docstring": "Reads a vocab file if it exists.\n\nArgs:\nargs: command line flags\ncolumn_name: name of column to that has a vocab file.\n\nReturns:\nList of vocab words or [] if the vocab file is not found.", "source": "juraj-google-style"}
{"code": "def get_connectable_volume_templates(self, start=0, count=(- 1), filter='', query='', sort=''):\n    uri = (self.URI + '/connectable-volume-templates')\n    get_uri = self._client.build_query_uri(start=start, count=count, filter=filter, query=query, sort=sort, uri=uri)\n    return self._client.get(get_uri)", "docstring": "Gets the storage volume templates that are available on the specified networks based on the storage system\nport's expected network connectivity. If there are no storage volume templates that meet the specified\nconnectivity criteria, an empty collection will be returned.\n\nReturns:\nlist: Storage volume templates.", "source": "codesearchnet"}
{"code": "def get_changes_since(self, timestamp: str) -> Dict[str, List]:\n\t\t\n\t\trg = []\n\t\tcg = []\n\t\tra = []\n\t\tca = []\n\t\tlayers = []\n\n\t\tif self.last_modified() > timestamp:\n\t\t\tif self.row_graphs.last_modified() > timestamp:\n\t\t\t\tfor name in self.row_graphs.keys():\n\t\t\t\t\tif self.row_graphs.last_modified(name) > timestamp:\n\t\t\t\t\t\trg.append(name)\n\t\t\tif self.col_graphs.last_modified() > timestamp:\n\t\t\t\tfor name in self.col_graphs.keys():\n\t\t\t\t\tif self.col_graphs.last_modified(name) > timestamp:\n\t\t\t\t\t\tcg.append(name)\n\t\t\tif self.ra.last_modified() > timestamp:\n\t\t\t\tfor name in self.ra.keys():\n\t\t\t\t\tif self.ra.last_modified(name) > timestamp:\n\t\t\t\t\t\tra.append(name)\n\t\t\tif self.ca.last_modified() > timestamp:\n\t\t\t\tfor name in self.ca.keys():\n\t\t\t\t\tif self.ca.last_modified(name) > timestamp:\n\t\t\t\t\t\tca.append(name)\n\t\t\tif self.layers.last_modified() > timestamp:\n\t\t\t\tfor name in self.layers.keys():\n\t\t\t\t\tif self.layers.last_modified(name) > timestamp:\n\t\t\t\t\t\tlayers.append(name)\n\t\treturn {\"row_graphs\": rg, \"col_graphs\": cg, \"row_attrs\": ra, \"col_attrs\": ca, \"layers\": layers}", "docstring": "Get a summary of the parts of the file that changed since the given time\n\nArgs:\ntimestamp:\tISO8601 timestamp\n\nReturn:\ndict:\tDictionary like ``{\"row_graphs\": rg, \"col_graphs\": cg, \"row_attrs\": ra, \"col_attrs\": ca, \"layers\": layers}`` listing the names of objects that were modified since the given time", "source": "juraj-google-style"}
{"code": "def MakeSuiteFromHist(hist, name=None):\n    \n    if name is None:\n        name = hist.name\n\n    \n    d = dict(hist.GetDict())\n    return MakeSuiteFromDict(d, name)", "docstring": "Makes a normalized suite from a Hist object.\n\nArgs:\nhist: Hist object\nname: string name\n\nReturns:\nSuite object", "source": "juraj-google-style"}
{"code": "def installed(name, source):\n    ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}\n    if (not name):\n        raise SaltInvocationError('Must specify a KB \"name\"')\n    if (not source):\n        raise SaltInvocationError('Must specify a \"source\" file to install')\n    if __salt__['wusa.is_installed'](name):\n        ret['result'] = True\n        ret['comment'] = '{0} already installed'.format(name)\n        return ret\n    if (__opts__['test'] is True):\n        ret['result'] = None\n        ret['comment'] = '{0} would be installed'.format(name)\n        ret['result'] = None\n        return ret\n    cached_source_path = __salt__['cp.cache_file'](path=source, saltenv=__env__)\n    if (not cached_source_path):\n        msg = 'Unable to cache {0} from saltenv \"{1}\"'.format(salt.utils.url.redact_http_basic_auth(source), __env__)\n        ret['comment'] = msg\n        return ret\n    __salt__['wusa.install'](cached_source_path)\n    if __salt__['wusa.is_installed'](name):\n        ret['comment'] = '{0} was installed'.format(name)\n        ret['changes'] = {'old': False, 'new': True}\n        ret['result'] = True\n    else:\n        ret['comment'] = '{0} failed to install'.format(name)\n    return ret", "docstring": "Ensure an update is installed on the minion\n\nArgs:\n\nname(str):\nName of the Windows KB (\"KB123456\")\n\nsource (str):\nSource of .msu file corresponding to the KB\n\nExample:\n\n.. code-block:: yaml\n\nKB123456:\nwusa.installed:\n- source: salt://kb123456.msu", "source": "codesearchnet"}
{"code": "def _split_ir_into_match_steps(pruned_ir_blocks):\n    \n    output = []\n    current_tuple = None\n    for block in pruned_ir_blocks:\n        if isinstance(block, OutputSource):\n            \n            \n            continue\n        elif isinstance(block, root_block_types):\n            if current_tuple is not None:\n                output.append(current_tuple)\n            current_tuple = (block,)\n        elif isinstance(block, (CoerceType, Filter, MarkLocation)):\n            current_tuple += (block,)\n        else:\n            raise AssertionError(u'Unexpected block type when converting to MATCH query: '\n                                 u'{} {}'.format(block, pruned_ir_blocks))\n\n    if current_tuple is None:\n        raise AssertionError(u'current_tuple was unexpectedly None: {}'.format(pruned_ir_blocks))\n    output.append(current_tuple)\n\n    return [_per_location_tuple_to_step(x) for x in output]", "docstring": "Split a list of IR blocks into per-location MATCH steps.\n\nArgs:\npruned_ir_blocks: list of IR basic block objects that have gone through a lowering step.\n\nReturns:\nlist of MatchStep namedtuples, each of which contains all basic blocks that correspond\nto a single MATCH step.", "source": "juraj-google-style"}
{"code": "def _token_to_subtoken_ids(self, token):\n    \n    cache_location = hash(token) % self._cache_size\n    cache_key, cache_value = self._cache[cache_location]\n    if cache_key == token:\n      return cache_value\n    ret = self._escaped_token_to_subtoken_ids(\n        _escape_token(token, self._alphabet))\n    self._cache[cache_location] = (token, ret)\n    return ret", "docstring": "Converts token to a list of subtoken ids.\n\nArgs:\ntoken: a string.\nReturns:\na list of integers in the range [0, vocab_size)", "source": "juraj-google-style"}
{"code": "def add_defaults_to_kwargs(defaults, **kwargs):\n    defaults = dict(defaults)\n    defaults.update(kwargs)\n    return defaults", "docstring": "Updates `kwargs` with dict of `defaults`\n\nArgs:\ndefaults: A dictionary of keys and values\n**kwargs: The kwargs to update.\n\nReturns:\nThe updated kwargs.", "source": "codesearchnet"}
{"code": "def _create_uninitialized_mirrored_tpu_variables(**kwargs):\n    if kwargs.get('initial_value', None) is None:\n        return _create_mirrored_tpu_variables(**kwargs)\n    value_list = []\n    initial_value = None\n    for i, d in enumerate(devices):\n        with ops.device(d):\n            if i == 0:\n                initial_value = kwargs.get('initial_value', None)\n                with maybe_init_scope():\n                    if initial_value is not None:\n                        if callable(initial_value):\n                            initial_value = initial_value()\n                        initial_value = ops.convert_to_tensor(initial_value, dtype=kwargs.get('dtype', None))\n            if i > 0:\n                var0name = value_list[0].name.split(':')[0]\n                kwargs['name'] = '%s/replica_%d/' % (var0name, i)\n            kwargs['initial_value'] = initial_value\n            if kwargs.get('dtype', None) is None:\n                kwargs['dtype'] = kwargs['initial_value'].dtype\n            if kwargs.get('shape', None) is None:\n                kwargs['shape'] = kwargs['initial_value'].shape\n            with context.device_policy(context.DEVICE_PLACEMENT_SILENT):\n                v = uninitialized_variable_creator(**kwargs)\n            assert not isinstance(v, tpu_values.TPUMirroredVariable)\n            value_list.append(v)\n    return value_list", "docstring": "Returns a list of `tf.Variable`s.\n\nThe list contains `number_replicas` `tf.Variable`s and can be used to\ninitialize a `TPUMirroredVariable`.\n\nArgs:\n**kwargs: the keyword arguments for creating a variable", "source": "github-repos"}
{"code": "def _run_dnb_normalization(self, dnb_data, sza_data):\n        \n        \n        dnb_data = xr.DataArray(dnb_data, dims=('y', 'x'))\n        sza_data = xr.DataArray(sza_data, dims=('y', 'x'))\n\n        good_mask = ~(dnb_data.isnull() | sza_data.isnull())\n        output_dataset = dnb_data.where(good_mask)\n        \n        output_dataset = output_dataset.values.copy()\n        dnb_data = dnb_data.values\n        sza_data = sza_data.values\n\n        day_mask, mixed_mask, night_mask = make_day_night_masks(\n            sza_data,\n            good_mask.values,\n            self.high_angle_cutoff,\n            self.low_angle_cutoff,\n            stepsDegrees=self.mixed_degree_step)\n\n        did_equalize = False\n        if day_mask.any():\n            LOG.debug(\"Histogram equalizing DNB day data...\")\n            histogram_equalization(dnb_data, day_mask, out=output_dataset)\n            did_equalize = True\n        if mixed_mask:\n            for mask in mixed_mask:\n                if mask.any():\n                    LOG.debug(\"Histogram equalizing DNB mixed data...\")\n                    histogram_equalization(dnb_data, mask, out=output_dataset)\n                    did_equalize = True\n        if night_mask.any():\n            LOG.debug(\"Histogram equalizing DNB night data...\")\n            histogram_equalization(dnb_data, night_mask, out=output_dataset)\n            did_equalize = True\n\n        if not did_equalize:\n            raise RuntimeError(\"No valid data found to histogram equalize\")\n\n        return output_dataset", "docstring": "Scale the DNB data using a histogram equalization method.\n\nArgs:\ndnb_data (ndarray): Day/Night Band data array\nsza_data (ndarray): Solar Zenith Angle data array", "source": "juraj-google-style"}
{"code": "async def build_task_dependencies(chain, task, name, my_task_id):\n    \n    log.info(\"build_task_dependencies {} {}\".format(name, my_task_id))\n    if name.count(':') > chain.context.config['max_chain_length']:\n        raise CoTError(\"Too deep recursion!\\n{}\".format(name))\n    sorted_dependencies = find_sorted_task_dependencies(task, name, my_task_id)\n\n    for task_name, task_id in sorted_dependencies:\n        if task_id not in chain.dependent_task_ids():\n            link = LinkOfTrust(chain.context, task_name, task_id)\n            json_path = link.get_artifact_full_path('task.json')\n            try:\n                task_defn = await chain.context.queue.task(task_id)\n                link.task = task_defn\n                chain.links.append(link)\n                \n                makedirs(os.path.dirname(json_path))\n                with open(json_path, 'w') as fh:\n                    fh.write(format_json(task_defn))\n                await build_task_dependencies(chain, task_defn, task_name, task_id)\n            except TaskclusterFailure as exc:\n                raise CoTError(str(exc))", "docstring": "Recursively build the task dependencies of a task.\n\nArgs:\nchain (ChainOfTrust): the chain of trust to add to.\ntask (dict): the task definition to operate on.\nname (str): the name of the task to operate on.\nmy_task_id (str): the taskId of the task to operate on.\n\nRaises:\nCoTError: on failure.", "source": "juraj-google-style"}
{"code": "def guess_peb_size(path):\n    \n    file_offset = 0\n    offsets = []\n    f = open(path, 'rb')\n    f.seek(0,2)\n    file_size = f.tell()+1\n    f.seek(0)\n\n    for _ in range(0, file_size, FILE_CHUNK_SZ):\n        buf = f.read(FILE_CHUNK_SZ)\n        for m in re.finditer(UBI_EC_HDR_MAGIC, buf):\n            start = m.start()\n\n            if not file_offset:\n                file_offset = start\n                idx = start\n            else:\n                idx = start+file_offset\n\n            offsets.append(idx)\n\n        file_offset += FILE_CHUNK_SZ\n    f.close()\n\n    occurances = {}\n    for i in range(0, len(offsets)):\n        try:\n            diff = offsets[i] - offsets[i-1]\n        except:\n            diff = offsets[i]\n\n        if diff not in occurances:\n            occurances[diff] = 0\n\n        occurances[diff] += 1\n\n    most_frequent = 0\n    block_size = None\n\n    for offset in occurances:\n        if occurances[offset] > most_frequent:\n            most_frequent = occurances[offset]\n            block_size = offset\n\n    return block_size", "docstring": "Determine the most likely block size\n\nArguments:\nStr:path    -- Path to file.\n\nReturns:\nInt         -- PEB size.\n\nSearches file for Magic Number, picks most\ncommon length between them.", "source": "juraj-google-style"}
{"code": "def union_update(self, *others):\n    _elements = self._elements\n    _total = self._total\n    for other in map(self._as_mapping, others):\n        for (element, multiplicity) in other.items():\n            old_multiplicity = _elements.get(element, 0)\n            if (multiplicity > old_multiplicity):\n                _elements[element] = multiplicity\n                _total += (multiplicity - old_multiplicity)\n    self._total = _total", "docstring": "r\"\"\"Update the multiset, adding elements from all others using the maximum multiplicity.\n\n>>> ms = Multiset('aab')\n>>> ms.union_update('bc')\n>>> sorted(ms)\n['a', 'a', 'b', 'c']\n\nYou can also use the ``|=`` operator for the same effect. However, the operator version\nwill only accept a set as other operator, not any iterable, to avoid errors.\n\n>>> ms = Multiset('aab')\n>>> ms |= Multiset('bccd')\n>>> sorted(ms)\n['a', 'a', 'b', 'c', 'c', 'd']\n\nFor a variant of the operation which does not modify the multiset, but returns a new\nmultiset instead see :meth:`union`.\n\nArgs:\nothers: The other sets to union this multiset with. Can also be any :class:`~typing.Iterable`\\[~T]\nor :class:`~typing.Mapping`\\[~T, :class:`int`] which are then converted to :class:`Multiset`\\[~T].", "source": "codesearchnet"}
{"code": "def tflite_convert(fn, input_templates):\n    fn = def_function.function(fn)\n    concrete_func = fn.get_concrete_function(*input_templates)\n    converter = lite.TFLiteConverterV2([concrete_func])\n    return converter.convert()", "docstring": "Converts the provided fn to tf.lite model.\n\nArgs:\nfn: A callable that expects a list of inputs like input_templates that\nreturns a tensor or structure of tensors.\ninput_templates: A list of Tensors, ndarrays or TensorSpecs describing the\ninputs that fn expects. The actual values of the Tensors or ndarrays are\nunused.\n\nReturns:\nThe serialized tf.lite model.", "source": "github-repos"}
{"code": "def __eq__(self, other):\n        \n        if self._begin == other._begin and self._end == other._end:\n            return True\n        return False", "docstring": "Two intervals are the same if they have the same begin and end.\n\nArgs:\nother (Interval): other Interval\n\nReturns:\nbool: are self and other equal.", "source": "juraj-google-style"}
{"code": "def GetAPFSVolumeByPathSpec(self, path_spec):\n    volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(path_spec)\n    if (volume_index is None):\n        return None\n    return self._fsapfs_container.get_volume(volume_index)", "docstring": "Retrieves an APFS volume for a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\npyfsapfs.volume: an APFS volume or None if not available.", "source": "codesearchnet"}
{"code": "def _GetDisplayPath(self, path_spec, full_path, data_stream_name):\n    \n    display_path = ''\n\n    if path_spec.HasParent():\n      parent_path_spec = path_spec.parent\n      if parent_path_spec and parent_path_spec.type_indicator == (\n          dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION):\n        display_path = ''.join([display_path, parent_path_spec.location])\n\n    display_path = ''.join([display_path, full_path])\n    if data_stream_name:\n      display_path = ':'.join([display_path, data_stream_name])\n\n    return display_path", "docstring": "Retrieves a path to display.\n\nArgs:\npath_spec (dfvfs.PathSpec): path specification of the file entry.\nfull_path (str): full path of the file entry.\ndata_stream_name (str): name of the data stream.\n\nReturns:\nstr: path to display.", "source": "juraj-google-style"}
{"code": "def get_scan_plot(self, coords=None):\n    from pymatgen.util.plotting import pretty_plot\n    plt = pretty_plot(12, 8)\n    d = self.read_scan()\n    if (coords and (coords in d['coords'])):\n        x = d['coords'][coords]\n        plt.xlabel(coords)\n    else:\n        x = range(len(d['energies']))\n        plt.xlabel('points')\n    plt.ylabel('Energy (eV)')\n    e_min = min(d['energies'])\n    y = [((e - e_min) * Ha_to_eV) for e in d['energies']]\n    plt.plot(x, y, 'ro--')\n    return plt", "docstring": "Get a matplotlib plot of the potential energy surface.\n\nArgs:\ncoords: internal coordinate name to use as abcissa.", "source": "codesearchnet"}
{"code": "def generate_cot_body(context):\n    try:\n        cot = {'artifacts': get_cot_artifacts(context), 'chainOfTrustVersion': 1, 'runId': context.claim_task['runId'], 'task': context.task, 'taskId': context.claim_task['status']['taskId'], 'workerGroup': context.claim_task['workerGroup'], 'workerId': context.config['worker_id'], 'workerType': context.config['worker_type'], 'environment': get_cot_environment(context)}\n    except (KeyError,) as exc:\n        raise ScriptWorkerException(\"Can't generate chain of trust! {}\".format(str(exc)))\n    return cot", "docstring": "Generate the chain of trust dictionary.\n\nThis is the unsigned and unformatted chain of trust artifact contents.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\n\nReturns:\ndict: the unsignd and unformatted chain of trust artifact contents.\n\nRaises:\nScriptWorkerException: on error.", "source": "codesearchnet"}
{"code": "def _read_from_seg(self, n):\n    \n    result = self._seg.read(size=n)\n    if result == \"\":\n      return result\n    offset = self._seg.tell()\n    if offset > self._seg_valid_length:\n      extra = offset - self._seg_valid_length\n      result = result[:-1*extra]\n    self._offset += len(result)\n    return result", "docstring": "Read from current seg.\n\nArgs:\nn: max number of bytes to read.\n\nReturns:\nvalid bytes from the current seg. \"\" if no more is left.", "source": "juraj-google-style"}
{"code": "def DataIsInteger(self):\n    return (self.data_type in (definitions.REG_DWORD, definitions.REG_DWORD_BIG_ENDIAN, definitions.REG_QWORD))", "docstring": "Determines, based on the data type, if the data is an integer.\n\nThe data types considered strings are: REG_DWORD (REG_DWORD_LITTLE_ENDIAN),\nREG_DWORD_BIG_ENDIAN and REG_QWORD.\n\nReturns:\nbool: True if the data is an integer, False otherwise.", "source": "codesearchnet"}
{"code": "def browse(self, path=None):\n    params = None\n    if path:\n        assert isinstance(path, string_types)\n        params = {'current': path}\n    return self.get('browse', params=params)", "docstring": "Returns a list of directories matching the path given.\n\nArgs:\npath (str): glob pattern.\n\nReturns:\nList[str]", "source": "codesearchnet"}
{"code": "def reset_logical_devices(device_type, count):\n    reset_context()\n    devices = tf_config.list_physical_devices(device_type)\n    if device_type.upper() not in ('CPU', 'GPU'):\n        raise ValueError('resetting logical device for non-supported device type : %s' % device_type)\n    if count < len(devices):\n        devices = devices[:count]\n        tf_config.set_visible_devices(devices, device_type=device_type.upper())\n    for i, device in enumerate(devices):\n        n = (i + 1) * count \n        assert n > 0\n        configs = []\n        for ordinal in range(n):\n            if device_type.upper() == 'GPU':\n                dev_config = context.LogicalDeviceConfiguration(memory_limit=_DEFAULT_GPU_MEMORY_LIMIT, experimental_device_ordinal=ordinal)\n            else:\n                dev_config = context.LogicalDeviceConfiguration()\n            configs.append(dev_config)\n        tf_config.set_logical_device_configuration(device, configs)", "docstring": "Resets logical devices for CPU/GPU.\n\nLogical devices can only be instantiated once on a particular context. For\nnow, context re-use is triggering some function duplication errors, so we\nreset the context on each call.\n\nArgs:\ndevice_type: The device_type to reset.\ncount: numbers of virtual device to reset to.", "source": "github-repos"}
{"code": "def refill_main_wallet(self, from_address, to_address, nfees, ntokens, password, min_confirmations=6, sync=False):\n    (path, from_address) = from_address\n    unsigned_tx = self._t.simple_transaction(from_address, (([(to_address, self.fee)] * nfees) + ([(to_address, self.token)] * ntokens)), min_confirmations=min_confirmations)\n    signed_tx = self._t.sign_transaction(unsigned_tx, password)\n    txid = self._t.push(signed_tx)\n    return txid", "docstring": "Refill the Federation wallet with tokens and fees. This keeps the federation wallet clean.\nDealing with exact values simplifies the transactions. No need to calculate change. Easier to keep track of the\nunspents and prevent double spends that would result in transactions being rejected by the bitcoin network.\n\nArgs:\n\nfrom_address (Tuple[str]): Refill wallet address. Refills the federation wallet with tokens and fees\nto_address (str): Federation wallet address\nnfees (int): Number of fees to transfer. Each fee is 10000 satoshi. Used to pay for the transactions\nntokens (int): Number of tokens to transfer. Each token is 600 satoshi. Used to register hashes in the blockchain\npassword (str): Password for the Refill wallet. Used to sign the transaction\nmin_confirmations (int): Number of confirmations when chosing the inputs of the transaction. Defaults to 6\nsync (bool): Perform the transaction in synchronous mode, the call to the function will block until there is at\nleast on confirmation on the blockchain. Defaults to False\n\nReturns:\nstr: transaction id", "source": "codesearchnet"}
{"code": "def snyder_opt(self, structure):\n        \n        nsites = structure.num_sites\n        volume = structure.volume\n        num_density = 1e30 * nsites / volume\n        return 1.66914e-23 * \\\n            (self.long_v(structure) + 2.*self.trans_v(structure))/3. \\\n            / num_density ** (-2./3.) * (1 - nsites ** (-1./3.))", "docstring": "Calculates Snyder's optical sound velocity (in SI units)\n\nArgs:\nstructure: pymatgen structure object\n\nReturns: Snyder's optical sound velocity (in SI units)", "source": "juraj-google-style"}
{"code": "def ToDebugString(self, indentation_level=1):\n    indentation = ('  ' * indentation_level)\n    text_parts = ['{0:s}path segment index: {1:d}\\n'.format(indentation, self.path_segment_index)]\n    for (path_segment, scan_object) in self._path_segments.items():\n        text_parts.append('{0:s}path segment: {1:s}\\n'.format(indentation, path_segment))\n        if isinstance(scan_object, PathFilterScanTreeNode):\n            text_parts.append('{0:s}scan tree node:\\n'.format(indentation))\n            text_parts.append(scan_object.ToDebugString((indentation_level + 1)))\n        elif isinstance(scan_object, py2to3.STRING_TYPES):\n            text_parts.append('{0:s}path: {1:s}\\n'.format(indentation, scan_object))\n    text_parts.append('{0:s}default value:\\n'.format(indentation))\n    if isinstance(self.default_value, PathFilterScanTreeNode):\n        text_parts.append('{0:s}scan tree node:\\n'.format(indentation))\n        text_parts.append(self.default_value.ToDebugString((indentation_level + 1)))\n    elif isinstance(self.default_value, py2to3.STRING_TYPES):\n        text_parts.append('{0:s}pattern: {1:s}\\n'.format(indentation, self.default_value))\n    text_parts.append('\\n')\n    return ''.join(text_parts)", "docstring": "Converts the path filter scan tree node into a debug string.\n\nArgs:\nindentation_level: an integer containing the text indentation level.\n\nReturns:\nA string containing a debug representation of the path filter scan\ntree node.", "source": "codesearchnet"}
{"code": "def execute(self, triple_map, output, **kwargs):\n        \n        subjects = []\n        logical_src_iterator = str(triple_map.logicalSource.iterator)\n        json_object = kwargs.get('obj', self.source)\n        \n        if logical_src_iterator == \".\":\n            results = [None,]\n        else:\n            json_path_exp = jsonpath_ng.parse(logical_src_iterator)\n            results = [r.value for r in json_path_exp.find(json_object)][0]\n        for row in results:\n            subject = self.generate_term(term_map=triple_map.subjectMap,\n                                         **kwargs)\n            for pred_obj_map in triple_map.predicateObjectMap:\n                predicate = pred_obj_map.predicate\n                if pred_obj_map.template is not None:\n                    output.add((\n                        subject,\n                        predicate,\n                        self.generate_term(term_map=pred_obj_map, **kwargs)))\n\n                if pred_obj_map.parentTriplesMap is not None:\n                    self.__handle_parents__(\n                        output,\n                        parent_map=pred_obj_map.parentTriplesMap,\n                        subject=subject,\n                        predicate=predicate,\n                        obj=row,\n                        **kwargs)\n                if pred_obj_map.reference is not None:\n                    ref_exp = jsonpath_ng.parse(str(pred_obj_map.reference))\n                    found_objects = [r.value for r in ref_exp.find(row)]\n                    for obj in found_objects:\n                        if rdflib.term._is_valid_uri(obj):\n                            rdf_obj = rdflib.URIRef(str(obj))\n                        else:\n                            rdf_obj = rdflib.Literal(str(obj))\n                        output.add((subject, predicate, rdf_obj))\n                if pred_obj_map.constant is not None:\n                    output.add((subject,\n                                     predicate,\n                                     pred_obj_map.constant))\n            subjects.append(subject)\n        return subjects", "docstring": "Method executes mapping between JSON source and\noutput RDF\n\nArgs:\n\n-----\ntriple_map: SimpleNamespace", "source": "juraj-google-style"}
{"code": "def needs_summary(self, value: Any, *, name: Optional[str]=None, parent: Any=None, title: Union[str, Html, None]=None, enable_summary: Optional[bool]=None, enable_summary_for_str: bool=True, max_summary_len_for_str: int=80) -> bool:\n    del parent\n    if isinstance(enable_summary, bool):\n        return enable_summary\n    assert enable_summary is None\n    if not enable_summary_for_str and isinstance(value, str):\n        return False\n    if name is None and title is None and (isinstance(value, (int, float, bool, type(None))) or (isinstance(value, str) and len(value) <= max_summary_len_for_str)):\n        return False\n    return True", "docstring": "Returns True if the object needs a summary.\n\nArgs:\nvalue: The value to render.\nname: The referred field name of the value.\nparent: The parent of the value.\ntitle: The title of the summary.\nenable_summary: Whether to enable the summary. If None, summary will\nbe enabled for complex types or when string exceeds\n`max_summary_len_for_str`.\nenable_summary_for_str: Whether to enable the summary for strings.\nmax_summary_len_for_str: The maximum length of the string to display.\n\nReturns:\nTrue if the object needs a summary.", "source": "github-repos"}
{"code": "def _CreateTaskStorageWriter(self, path, task):\n    return SQLiteStorageFileWriter(self._session, path, storage_type=definitions.STORAGE_TYPE_TASK, task=task)", "docstring": "Creates a task storage writer.\n\nArgs:\npath (str): path to the storage file.\ntask (Task): task.\n\nReturns:\nSQLiteStorageFileWriter: storage writer.", "source": "codesearchnet"}
{"code": "def add_vlan_int(self, vlan_id):\n        \n        config = ET.Element('config')\n        vlinterface = ET.SubElement(config, 'interface-vlan',\n                                    xmlns=(\"urn:brocade.com:mgmt:\"\n                                           \"brocade-interface\"))\n        interface = ET.SubElement(vlinterface, 'interface')\n        vlan = ET.SubElement(interface, 'vlan')\n        name = ET.SubElement(vlan, 'name')\n        name.text = vlan_id\n        try:\n            self._callback(config)\n            return True\n        \n        except Exception as error:\n            logging.error(error)\n            return False", "docstring": "Add VLAN Interface. VLAN interfaces are required for VLANs even when\nnot wanting to use the interface for any L3 features.\n\nArgs:\nvlan_id: ID for the VLAN interface being created. Value of 2-4096.\n\nReturns:\nTrue if command completes successfully or False if not.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def prefixsearch(self, prefix, results=10):\n    self._check_query(prefix, 'Prefix must be specified')\n    query_params = {'list': 'prefixsearch', 'pssearch': prefix, 'pslimit': ('max' if (results > 500) else results), 'psnamespace': 0, 'psoffset': 0}\n    raw_results = self.wiki_request(query_params)\n    self._check_error_response(raw_results, prefix)\n    return [rec['title'] for rec in raw_results['query']['prefixsearch']]", "docstring": "Perform a prefix search using the provided prefix string\n\nArgs:\nprefix (str): Prefix string to use for search\nresults (int): Number of pages with the prefix to return\nReturns:\nlist: List of page titles\nNote:\n**Per the documentation:** \"The purpose of this module is \\\nsimilar to action=opensearch: to take user input and provide \\\nthe best-matching titles. Depending on the search engine \\\nbackend, this might include typo correction, redirect \\\navoidance, or other heuristics.\"", "source": "codesearchnet"}
{"code": "def create_sequence_pretty_tensor(sequence_input, shape=None, save_state=True):\n    inputs = prettytensor.wrap_sequence(sequence_input.inputs, tensor_shape=shape)\n    targets = prettytensor.wrap_sequence(sequence_input.targets)\n    if save_state:\n        bookkeeper.set_recurrent_state_saver(sequence_input)\n    return (inputs, targets)", "docstring": "Creates a PrettyTensor object for the given sequence.\n\nThe first dimension is treated as a time-dimension * batch and a default is\nset for `unroll` and `state_saver`.\n\nTODO(eiderman): Remove shape.\n\nArgs:\nsequence_input: A SequenceInput or StateSavingSequenceInput\nshape: The shape of each item in the sequence (including batch).\nsave_state: If true, use the sequence_input's state and save_state methods.\nReturns:\n2 Layers: inputs, targets", "source": "codesearchnet"}
{"code": "def pauseProducing(self):\n    if (not self._running):\n        return\n    self._running = False\n    for consumer in self._consumers.values():\n        (yield consumer.channel.basic_cancel(consumer_tag=consumer.tag))\n    _legacy_twisted_log.msg('Paused retrieval of messages for the server queue')", "docstring": "Pause the reception of messages by canceling all existing consumers.\nThis does not disconnect from the server.\n\nMessage reception can be resumed with :meth:`resumeProducing`.\n\nReturns:\nDeferred: fired when the production is paused.", "source": "codesearchnet"}
{"code": "def extract_class(jar, name):\n    with jar.open(name) as entry:\n        return LinkableClass(javatools.unpack_class(entry))", "docstring": "Extracts a LinkableClass from a jar.\n\nArgs:\njar: An open ZipFile instance.\nname: A string containing the binary name of a class.\n\nRaises:\nKeyError: The class does not exist in the jar.", "source": "codesearchnet"}
{"code": "def get_logging_dir(appname='default'):\n    from utool._internal import meta_util_cache\n    from utool._internal import meta_util_cplat\n    from utool import util_cache\n    if ((appname is None) or (appname == 'default')):\n        appname = util_cache.get_default_appname()\n    resource_dpath = meta_util_cplat.get_resource_dir()\n    default = join(resource_dpath, appname, 'logs')\n    log_dir = meta_util_cache.global_cache_read(logdir_cacheid, appname=appname, default=default)\n    log_dir_realpath = realpath(log_dir)\n    return log_dir_realpath", "docstring": "The default log dir is in the system resource directory\nBut the utool global cache allows for the user to override\nwhere the logs for a specific app should be stored.\n\nReturns:\nlog_dir_realpath (str): real path to logging directory", "source": "codesearchnet"}
{"code": "def alexa(self) -> dict:\n    response = {'response': {'shouldEndSession': False, 'outputSpeech': {'type': 'PlainText', 'text': self.content}, 'card': {'type': 'Simple', 'content': self.content}}}\n    return response", "docstring": "Returns Amazon Alexa compatible state of the PlainText instance.\n\nCreating Amazon Alexa response blank with populated \"outputSpeech\" and\n\"card sections.\n\nReturns:\nresponse: Amazon Alexa representation of PlainText state.", "source": "codesearchnet"}
{"code": "def interpret_obj(self, obj, v_level_indexes, h_level_indexes, v_level_visibility, h_level_visibility, v_level_sort_keys, h_level_sort_keys, v_level_titles, h_level_titles):\n    if (not isinstance(obj, NonStringIterable)):\n        raise self.error('Cannot make a table from object {!r}'.format(obj))\n    rectangular_rows = tabulate(obj, v_level_indexes=v_level_indexes, h_level_indexes=h_level_indexes, v_level_visibility=v_level_visibility, h_level_visibility=h_level_visibility, v_level_sort_keys=v_level_sort_keys, h_level_sort_keys=h_level_sort_keys, v_level_titles=v_level_titles, h_level_titles=h_level_titles)\n    assert is_rectangular(rectangular_rows)\n    (num_rows, num_cols) = size(rectangular_rows)\n    return (rectangular_rows, num_cols)", "docstring": "Interpret the given Python object as a table.\n\nArgs:\nobj: A sequence (later a mapping, too)\n\nReturns:\nA list of lists represents rows of cells.\n\nRaises:\nTypeError: If the type couldn't be interpreted as a table.", "source": "codesearchnet"}
{"code": "def create_member(self, member_json):\n    return trolly.member.Member(trello_client=self, member_id=member_json['id'], name=member_json['fullName'], data=member_json)", "docstring": "Create a Member object from JSON object\n\nReturns:\nMember: The member from the given `member_json`.", "source": "codesearchnet"}
{"code": "def topological_sort(self):\n    graph = self.graph\n    in_degree = {}\n    for u in graph:\n        in_degree[u] = 0\n    for u in graph:\n        for v in graph[u]:\n            in_degree[v] += 1\n    queue = deque()\n    for u in in_degree:\n        if (in_degree[u] == 0):\n            queue.appendleft(u)\n    sorted_graph = []\n    while queue:\n        u = queue.pop()\n        sorted_graph.append(u)\n        for v in sorted(graph[u]):\n            in_degree[v] -= 1\n            if (in_degree[v] == 0):\n                queue.appendleft(v)\n    if (len(sorted_graph) == len(graph)):\n        return sorted_graph\n    else:\n        raise ValueError('graph is not acyclic')", "docstring": "Returns a topological ordering of the DAG.\n\nReturns:\nlist: A list of topologically sorted nodes in the graph.\n\nRaises:\nValueError: Raised if the graph is not acyclic.", "source": "codesearchnet"}
{"code": "def institute(self, institute_id):\n        \n        LOG.debug(\"Fetch institute {}\".format(institute_id))\n        institute_obj = self.institute_collection.find_one({\n            '_id': institute_id\n        })\n        if institute_obj is None:\n            LOG.debug(\"Could not find institute {0}\".format(institute_id))\n\n        return institute_obj", "docstring": "Featch a single institute from the backend\n\nArgs:\ninstitute_id(str)\n\nReturns:\nInstitute object", "source": "juraj-google-style"}
{"code": "def _MaybePurgeOrphanedData(self, event):\n    \n    if not self.purge_orphaned_data:\n      return\n    \n    if self.file_version and self.file_version >= 2:\n      \n      \n      self._CheckForRestartAndMaybePurge(event)\n    else:\n      \n      \n      self._CheckForOutOfOrderStepAndMaybePurge(event)", "docstring": "Maybe purge orphaned data due to a TensorFlow crash.\n\nWhen TensorFlow crashes at step T+O and restarts at step T, any events\nwritten after step T are now \"orphaned\" and will be at best misleading if\nthey are included in TensorBoard.\n\nThis logic attempts to determine if there is orphaned data, and purge it\nif it is found.\n\nArgs:\nevent: The event to use as a reference, to determine if a purge is needed.", "source": "juraj-google-style"}
{"code": "def name_from_class(cls, measurement_class):\n    if (not getattr(cls, '_measurements_initialized', False)):\n        cls._measurement_map = dict(((m.name, m) for m in all_measurements()))\n        cls._measurements_initialized = True\n    try:\n        name = getattr(measurement_class, 'name')\n    except AttributeError:\n        raise UnrecognizedMeasurementError((\"No 'name' attribute in %s\" % measurement_class))\n    else:\n        cls._measurement_map[name] = measurement_class\n        return name", "docstring": "For a given measurement class, return its generic name.\n\nThe given class is expected to have a ``name`` attribute, otherwise this\nfunction will raise an execption. The point of using this method instead\nof just trying to grab that attribute in the application is to cache\nmeasurement name to class mappings for future use.\n\nReturns:\nthe generic OpenXC name for a measurement class.\n\nRaise:\nUnrecognizedMeasurementError: if the class does not have a valid\ngeneric name", "source": "codesearchnet"}
{"code": "def add_snmp_host(self, **kwargs):\n    host_info = kwargs.pop('host_info')\n    community = kwargs.pop('community')\n    callback = kwargs.pop('callback', self._callback)\n    config = ET.Element('config')\n    snmp_server = ET.SubElement(config, 'snmp-server', xmlns='urn:brocade.com:mgmt:brocade-snmp')\n    host = ET.SubElement(snmp_server, 'host')\n    ip_addr = ET.SubElement(host, 'ip')\n    ip_addr.text = host_info[0]\n    com = ET.SubElement(host, 'community')\n    com.text = community\n    udp_port = ET.SubElement(host, 'udp-port')\n    udp_port.text = host_info[1]\n    return callback(config)", "docstring": "Add SNMP host to NOS device.\n\nArgs:\nhost_info (tuple(str, str)): Tuple of host IP and port.\ncommunity (str): Community string to be added to device.\ncallback (function): A function executed upon completion of the\nmethod.  The only parameter passed to `callback` will be the\n``ElementTree`` `config`.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nKeyError: if `host_info` or `community` is not defined.", "source": "codesearchnet"}
{"code": "def _create_in_hdx(self, object_type, id_field_name, name_field_name,\n                       file_to_upload=None):\n        \n        \n        self.check_required_fields()\n        if id_field_name in self.data and self._load_from_hdx(object_type, self.data[id_field_name]):\n            logger.warning('%s exists. Updating %s' % (object_type, self.data[id_field_name]))\n            self._merge_hdx_update(object_type, id_field_name, file_to_upload)\n        else:\n            self._save_to_hdx('create', name_field_name, file_to_upload)", "docstring": "Helper method to check if resource exists in HDX and if so, update it, otherwise create it\n\n\nArgs:\nobject_type (str): Description of HDX object type (for messages)\nid_field_name (str): Name of field containing HDX object identifier\nname_field_name (str): Name of field containing HDX object name\nfile_to_upload (Optional[str]): File to upload to HDX (if url not supplied)\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "async def get(self, public_key):\n    if settings.SIGNATURE_VERIFICATION:\n        super().verify()\n    compiler = re.compile('\\\\((.*?)\\\\)')\n    match = compiler.search(self.request.headers.get('User-Agent'))\n    try:\n        source = match.group(1)\n    except:\n        source = None\n    (await self.account.logsource(public_key=public_key, source=source))\n    logging.debug('\\n\\n [+] -- Get account data.')\n    response = (await self.account.getaccountdata(public_key=public_key))\n    logging.debug('\\n')\n    logging.debug(response)\n    logging.debug('\\n')\n    if ('error' in response.keys()):\n        self.set_status(response['error'])\n        self.write(response)\n        raise tornado.web.Finish\n    wallets = (await self.account.balance.get_wallets(uid=response['id']))\n    if isinstance(wallets, dict):\n        if ('error' in wallets.keys()):\n            self.set_status(wallets['error'])\n            self.write(wallets)\n            raise tornado.web.Finish\n    response.update({'wallets': json.dumps([i for i in wallets['wallets'] if (i.get('coinid') not in ['BTC', 'LTC', 'ETH'])])})\n    self.write(response)", "docstring": "Receive account data\n\nAccepts:\nQuery string:\n- \"public_key\" - str\nQuery string params:\n- message ( signed dictionary ):\n- \"timestamp\" - str\n\nReturns:\n- \"device_id\" - str\n- \"phone\" - str\n- \"public_key\" - str\n- \"count\" - int  ( wallets amount )\n- \"level\" - int (2 by default)\n- \"news_count\" - int (0 by default)\n- \"email\" - str\n- \"wallets\" - list\n\nVerified: True", "source": "codesearchnet"}
{"code": "def forward(self, hidden_features):\n    hidden_features = self.dropout_layer(hidden_features)\n    forecast = self.base_pt_block(hidden_features)\n    return forecast", "docstring": "Args:\nhidden_features (`torch.Tensor` of shape `(batch_size x num_patch x d_model)` in `flatten` mode\nor `(batch_size x n_vars x num_patch x d_model)` in `common_channel`/`mix_channel` mode.): Input hidden\nfeatures.\n\nReturns:\n`torch.Tensor` of shape `(batch_size x n_vars x num_patch x patch_length)`.", "source": "github-repos"}
{"code": "def check_regularizers(regularizers, keys):\n    if (regularizers is None):\n        return {}\n    _assert_is_dictlike(regularizers, valid_keys=keys)\n    keys = set(keys)\n    if (not (set(regularizers) <= keys)):\n        extra_keys = (set(regularizers) - keys)\n        raise KeyError('Invalid regularizer keys {}, regularizers can only be provided for {}'.format(', '.join((\"'{}'\".format(key) for key in extra_keys)), ', '.join((\"'{}'\".format(key) for key in keys))))\n    _check_nested_callables(regularizers, 'Regularizer')\n    return dict(regularizers)", "docstring": "Checks the given regularizers.\n\nThis checks that `regularizers` is a dictionary that only contains keys in\n`keys`, and furthermore the entries in `regularizers` are functions or\nfurther dictionaries (the latter used, for example, in passing regularizers\nto modules inside modules) that must satisfy the same constraints.\n\nArgs:\nregularizers: Dictionary of regularizers (allowing nested dictionaries) or\nNone.\nkeys: Iterable of valid keys for `regularizers`.\n\nReturns:\nCopy of checked dictionary of regularizers. If `regularizers=None`, an empty\ndictionary will be returned.\n\nRaises:\nKeyError: If an regularizers is provided for a key not in `keys`.\nTypeError: If a provided regularizer is not a callable function, or\n`regularizers` is not a Mapping.", "source": "codesearchnet"}
{"code": "def ssa(scatterer, h_pol=True):\n    ext_xs = ext_xsect(scatterer, h_pol=h_pol)\n    return ((sca_xsect(scatterer, h_pol=h_pol) / ext_xs) if (ext_xs > 0.0) else 0.0)", "docstring": "Single-scattering albedo for the current setup, with polarization.\n\nArgs:\nscatterer: a Scatterer instance.\nh_pol: If True (default), use horizontal polarization.\nIf False, use vertical polarization.\n\nReturns:\nThe single-scattering albedo.", "source": "codesearchnet"}
{"code": "def _decorator(func):\n    func.__doc__ = '\\n    Assert the condition `x {sym} y` holds element-wise.\\n\\n    This Op checks that `x[i] {sym} y[i]` holds for every pair of (possibly\\n    broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is\\n    trivially satisfied.\\n\\n    If `x` {sym} `y` does not hold, `message`, as well as the first `summarize`\\n    entries of `x` and `y` are printed, and `InvalidArgumentError` is raised.\\n\\n    When using inside `tf.function`, this API takes effects during execution.\\n    It\\'s recommended to use this API with `tf.control_dependencies` to\\n    ensure the correct execution order.\\n\\n    In the following example, without `tf.control_dependencies`, errors may\\n    not be raised at all.\\n    Check `tf.control_dependencies` for more details.\\n\\n    >>> def check_size(x):\\n    ...   with tf.control_dependencies([\\n    ...       tf.debugging.{opname}(tf.size(x), {test_var},\\n    ...                       message=\\'Bad tensor size\\')]):\\n    ...     return x\\n\\n    >>> check_size(tf.ones([2, 3], tf.float32))\\n    Traceback (most recent call last):\\n       ...\\n    InvalidArgumentError: ...\\n\\n    Args:\\n      x:  Numeric `Tensor`.\\n      y:  Numeric `Tensor`, same dtype as and broadcastable to `x`.\\n      message: A string to prefix to the default message. (optional)\\n      summarize: Print this many entries of each tensor. (optional)\\n      name: A name for this operation (optional).  Defaults to \"{opname}\".\\n\\n    Returns:\\n      Op that raises `InvalidArgumentError` if `x {sym} y` is False. This can\\n        be used with `tf.control_dependencies` inside of `tf.function`s to\\n        block followup computation until the check has executed.\\n      @compatibility(eager)\\n      returns None\\n      @end_compatibility\\n\\n    Raises:\\n      InvalidArgumentError: if the check can be performed immediately and\\n        `x == y` is False. The check can be performed immediately during eager\\n        execution or if `x` and `y` are statically known.\\n    '.format(sym=sym, opname=opname, test_var=test_var)\n    return func", "docstring": "Decorator that adds docstring to the function for symbol `sym`.\n\nArgs:\nfunc: Function for a TensorFlow op\n\nReturns:\nA version of `func` with documentation attached.", "source": "github-repos"}
{"code": "def InterpolatePath(path, knowledge_base, users=None, path_args=None, depth=0):\n  \n\n  sys_formatters = {\n      \n      \n      \n      \"systemroot\": \"c:\\\\Windows\"\n  }\n\n  \n  if path_args:\n    sys_formatters.update(path_args)\n\n  if users:\n    results = []\n    for user in users:\n      \n      user = GetUserInfo(knowledge_base, user)\n      if user:\n        formatters = dict((x.name, y) for x, y in user.ListSetFields())\n        formatters.update(sys_formatters)\n        try:\n          results.append(path.format(**formatters))\n        except KeyError:\n          pass  \n    return results\n  else:\n    try:\n      path = path.format(**sys_formatters)\n    except KeyError:\n      logging.warning(\"Failed path interpolation on %s\", path)\n      return \"\"\n    if \"{\" in path and depth < 10:\n      path = InterpolatePath(\n          path,\n          knowledge_base=knowledge_base,\n          users=users,\n          path_args=path_args,\n          depth=depth + 1)\n    return path", "docstring": "Take a string as a path on a client and interpolate with client data.\n\nArgs:\npath: A single string/unicode to be interpolated.\nknowledge_base: An rdf_client.KnowledgeBase object.\nusers: A list of string usernames, or None.\npath_args: A dict of additional args to use in interpolation. These take\nprecedence over any system provided variables.\ndepth: A counter for recursion depth.\n\nReturns:\nA single string if users is None, otherwise a list of strings.", "source": "juraj-google-style"}
{"code": "def rr_history(self, ips):\n        \n        api_name = 'opendns-rr_history'\n        fmt_url_path = u'dnsdb/ip/a/{0}.json'\n        return self._multi_get(api_name, fmt_url_path, ips)", "docstring": "Get the domains related to input ips.\n\nArgs:\nips: an enumerable of strings as ips\nReturns:\nAn enumerable of resource records and features", "source": "juraj-google-style"}
{"code": "def run_tag_processor(self, tag_proc_name):\n        \n        tag_processor = self.tag_procs[tag_proc_name]\n\n        for tag in tag_processor.find(self.soup):\n            self.process_tag(tag_proc_name, tag)", "docstring": "Run a tag processor.\n\nArgs:\ntag_proc_name: A string key that maps to the TagProcessor to run.", "source": "juraj-google-style"}
{"code": "def run_bottleneck_on_image(sess, image_data, image_data_tensor,\n                            decoded_image_tensor, resized_input_tensor,\n                            bottleneck_tensor):\n  \n  \n  resized_input_values = sess.run(decoded_image_tensor,\n                                  {image_data_tensor: image_data})\n  \n  bottleneck_values = sess.run(bottleneck_tensor,\n                               {resized_input_tensor: resized_input_values})\n  bottleneck_values = np.squeeze(bottleneck_values)\n  return bottleneck_values", "docstring": "Runs inference on an image to extract the 'bottleneck' summary layer.\n\nArgs:\nsess: Current active TensorFlow Session.\nimage_data: String of raw JPEG data.\nimage_data_tensor: Input data layer in the graph.\ndecoded_image_tensor: Output of initial image resizing and preprocessing.\nresized_input_tensor: The input node of the recognition graph.\nbottleneck_tensor: Layer before the final softmax.\n\nReturns:\nNumpy array of bottleneck values.", "source": "juraj-google-style"}
{"code": "def __init__(self, num_shards):\n    self._num_shards = num_shards", "docstring": "Creates a new `FixedShardsPartitioner`.\n\nArgs:\nnum_shards: `int`, number of shards to partition.", "source": "github-repos"}
{"code": "def _parse_package(cls, package_string):\n    (pkg, arch) = rsplit(package_string, cls._arch_sep(package_string))\n    if (arch not in KNOWN_ARCHITECTURES):\n        (pkg, arch) = (package_string, None)\n    (pkg, release) = rsplit(pkg, '-')\n    (name, version) = rsplit(pkg, '-')\n    (epoch, version) = (version.split(':', 1) if (':' in version) else ['0', version])\n    if (name.startswith('oracleasm') and name.endswith('.el5')):\n        (name, version2) = name.split('-', 1)\n        version = ((version2 + '-') + version)\n    return {'name': name, 'version': version, 'release': release, 'arch': arch, 'epoch': epoch}", "docstring": "Helper method for parsing package string.\n\nArgs:\npackage_string (str): dash separated package string such as 'bash-4.2.39-3.el7'\n\nReturns:\ndict: dictionary containing 'name', 'version', 'release' and 'arch' keys", "source": "codesearchnet"}
{"code": "def _check_put_dtypes(self, vals, indices=None):\n    if isinstance(vals, dict):\n        if not self._names:\n            raise ValueError('Staging areas must have names to enqueue a dictionary')\n        if not set(vals.keys()).issubset(self._names):\n            raise ValueError(f'Keys in dictionary to put do not match names of staging area. Dictionary: {sorted(vals.keys())}Queue: {sorted(self._names)}')\n        vals, indices, _ = zip(*[(vals[k], i, k) for i, k in enumerate(self._names) if k in vals])\n    else:\n        if self._names:\n            raise ValueError('You must enqueue a dictionary in a staging area with names')\n        if indices is None:\n            raise ValueError('Indices must be supplied when inserting a list of tensors')\n        if len(indices) != len(vals):\n            raise ValueError(f\"Number of indices {len(indices)} doesn't match number of values {len(vals)}\")\n        if not isinstance(vals, (list, tuple)):\n            vals = [vals]\n            indices = [0]\n    if not len(vals) <= len(self._dtypes):\n        raise ValueError(f'Unexpected number of inputs {len(vals)} vs {len(self._dtypes)}')\n    tensors = []\n    for val, i in zip(vals, indices):\n        dtype, shape = (self._dtypes[i], self._shapes[i])\n        if val.dtype != dtype:\n            raise ValueError(f'Datatypes do not match. Received val.dtype {str(val.dtype)} and dtype {str(dtype)}')\n        val.get_shape().assert_is_compatible_with(shape)\n        tensors.append(ops.convert_to_tensor(val, dtype=dtype, name='component_%d' % i))\n    return (tensors, indices)", "docstring": "Validate and convert `vals` to a list of `Tensor`s.\n\nThe `vals` argument can be a Tensor, a list or tuple of tensors, or a\ndictionary with tensor values.\n\nIf `vals` is a list, then the appropriate indices associated with the\nvalues must be provided.\n\nIf it is a dictionary, the staging area must have been constructed with a\n`names` attribute and the dictionary keys must match the staging area names.\n`indices` will be inferred from the dictionary keys.\nIf the staging area was constructed with a `names` attribute, `vals` must\nbe a dictionary.\n\nChecks that the dtype and shape of each value matches that\nof the staging area.\n\nArgs:\nvals: A tensor, a list or tuple of tensors, or a dictionary.\n\nReturns:\nA (tensors, indices) tuple where `tensors` is a list of `Tensor` objects\nand `indices` is a list of indices associated with the tensors.\n\nRaises:\nValueError: If `vals` or `indices` is invalid.", "source": "github-repos"}
{"code": "def add_root(self, model, setter=None):\n    if (model in self._roots):\n        return\n    self._push_all_models_freeze()\n    try:\n        self._roots.append(model)\n    finally:\n        self._pop_all_models_freeze()\n    self._trigger_on_change(RootAddedEvent(self, model, setter))", "docstring": "Add a model as a root of this Document.\n\nAny changes to this model (including to other models referred to\nby it) will trigger ``on_change`` callbacks registered on this\ndocument.\n\nArgs:\nmodel (Model) :\nThe model to add as a root of this document.\n\nsetter (ClientSession or ServerSession or None, optional) :\nThis is used to prevent \"boomerang\" updates to Bokeh apps.\n(default: None)\n\nIn the context of a Bokeh server application, incoming updates\nto properties will be annotated with the session that is\ndoing the updating. This value is propagated through any\nsubsequent change notifications that the update triggers.\nThe session can compare the event setter to itself, and\nsuppress any updates that originate from itself.", "source": "codesearchnet"}
{"code": "def minimize_best_n(Members):\n    \n\n    return(list(reversed(sorted(\n        Members, key=lambda Member: Member.fitness_score\n    ))))", "docstring": "Orders population members from lowest fitness to highest fitness\n\nArgs:\nMembers (list): list of PyGenetics Member objects\n\nReturns:\nlsit: ordered lsit of Members, from highest fitness to lowest fitness", "source": "juraj-google-style"}
{"code": "def initialize():\n    dst_path = get_user_config_path()\n    copied = False\n    if (not os.path.exists(dst_path)):\n        src_path = os.path.join(os.path.dirname(__file__), 'defaultconfig.py')\n        shutil.copyfile(src_path, dst_path)\n        copied = True\n    return (copied, dst_path)", "docstring": "Initialize a default config file if it doesn't exist yet.\n\nReturns:\ntuple: A tuple of (copied, dst_path). `copied` is a bool indicating if\nthis function created the default config file. `dst_path` is the\npath of the user config file.", "source": "codesearchnet"}
{"code": "def read_hdf(cls, path_or_buf, **kwargs):\n        \n        if cls.read_hdf_remote_task is None:\n            return super(RayIO, cls).read_hdf(path_or_buf, **kwargs)\n\n        format = cls._validate_hdf_format(path_or_buf=path_or_buf)\n\n        if format is None:\n            ErrorMessage.default_to_pandas(\n                \"File format seems to be `fixed`. For better distribution consider saving the file in `table` format. \"\n                \"df.to_hdf(format=`table`).\"\n            )\n            return cls.from_pandas(pandas.read_hdf(path_or_buf=path_or_buf, **kwargs))\n\n        columns = kwargs.get(\"columns\", None)\n        if not columns:\n            empty_pd_df = pandas.read_hdf(path_or_buf, start=0, stop=0)\n            columns = empty_pd_df.columns\n\n        num_partitions = cls.frame_mgr_cls._compute_num_partitions()\n        num_splits = min(len(columns), num_partitions)\n        \n        column_splits = (\n            len(columns) \n            if len(columns) % num_partitions == 0\n            else len(columns) \n        )\n        col_partitions = [\n            columns[i : i + column_splits]\n            for i in range(0, len(columns), column_splits)\n        ]\n        blk_partitions = np.array(\n            [\n                cls.read_hdf_remote_task._remote(\n                    args=(path_or_buf, cols, num_splits, kwargs),\n                    num_return_vals=num_splits + 1,\n                )\n                for cols in col_partitions\n            ]\n        ).T\n        remote_partitions = np.array(\n            [\n                [cls.frame_partition_cls(obj) for obj in row]\n                for row in blk_partitions[:-1]\n            ]\n        )\n        index_len = ray.get(blk_partitions[-1][0])\n        index = pandas.RangeIndex(index_len)\n        new_query_compiler = cls.query_compiler_cls(\n            cls.frame_mgr_cls(remote_partitions), index, columns\n        )\n        return new_query_compiler", "docstring": "Load a h5 file from the file path or buffer, returning a DataFrame.\n\nArgs:\npath_or_buf: string, buffer or path object\nPath to the file to open, or an open :class:`pandas.HDFStore` object.\nkwargs: Pass into pandas.read_hdf function.\n\nReturns:\nDataFrame constructed from the h5 file.", "source": "juraj-google-style"}
{"code": "def single_lf_summary(Y_p, Y=None):\n    \n    L = sparse.csr_matrix(arraylike_to_numpy(Y_p).reshape(-1, 1))\n    return lf_summary(L, Y)", "docstring": "Calculates coverage, overlap, conflicts, and accuracy for a single LF\n\nArgs:\nY_p: a np.array or torch.Tensor of predicted labels\nY: a np.array or torch.Tensor of true labels (if known)", "source": "juraj-google-style"}
{"code": "def make_one_shot_iterator(dataset: DatasetV1) -> Union[iterator_ops.Iterator, iterator_ops.OwnedIterator]:\n    try:\n        return dataset._make_one_shot_iterator()\n    except AttributeError:\n        return DatasetV1Adapter(dataset)._make_one_shot_iterator()", "docstring": "Creates an iterator for elements of `dataset`.\n\nNote: The returned iterator will be initialized automatically.\nA \"one-shot\" iterator does not support re-initialization.\n\nArgs:\ndataset: A `tf.data.Dataset`.\n\nReturns:\nA `tf.data.Iterator` for elements of `dataset`.\n\n@compatibility(TF2)\nThis is a legacy API for consuming dataset elements and should only be used\nduring transition from TF 1 to TF 2. Note that using this API should be\na transient state of your code base as there are in general no guarantees\nabout the interoperability of TF 1 and TF 2 code.\n\nIn TF 2 datasets are Python iterables which means you can consume their\nelements using `for elem in dataset: ...` or by explicitly creating iterator\nvia `iterator = iter(dataset)` and fetching its elements via\n`values = next(iterator)`.\n@end_compatibility", "source": "github-repos"}
{"code": "def variant(self, case_id, variant_id):\n        \n        case_obj = self.case(case_id=case_id)\n        vcf_file_path = case_obj.variant_source\n        self.head = get_header(vcf_file_path)\n\n        self.vep_header = self.head.vep_columns\n        self.snpeff_header = self.head.snpeff_columns\n\n        handle = VCF(vcf_file_path)\n\n        for index, variant in enumerate(handle):\n            index += 1\n            line_id = get_variant_id(variant_line=str(variant)).lstrip('chrCHR')\n            if line_id == variant_id:\n                return self._format_variants(\n                    variant=variant,\n                    index=index,\n                    case_obj=case_obj,\n                    add_all_info=True\n                    )\n\n        return None", "docstring": "Return a specific variant.\n\nArgs:\ncase_id (str): Path to vcf file\nvariant_id (str): A variant id\n\nReturns:\nvariant (Variant): The variant object for the given id", "source": "juraj-google-style"}
{"code": "def _import_object(self, path, look_for_cls_method):\n        \n        last_nth = 2 if look_for_cls_method else 1\n        path = path.split('.')\n        module_path = '.'.join(path[:-last_nth])\n        class_name = path[-last_nth]\n        module = importlib.import_module(module_path)\n        if look_for_cls_method and path[-last_nth:][0] == path[-last_nth]:\n            class_method = path[-last_nth:][1]\n        else:\n            class_method = None\n        return getattr(module, class_name), class_name, class_method", "docstring": "Imports the module that contains the referenced method.\n\nArgs:\npath: python path of class/function\nlook_for_cls_method (bool): If True, treat the last part of path as class method.\n\nReturns:\nTuple. (class object, class name, method to be called)", "source": "juraj-google-style"}
{"code": "def hour(self, value=None):\n    if (value is not None):\n        try:\n            value = int(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type int for field `hour`'.format(value))\n        if (value < 1):\n            raise ValueError('value need to be greater or equal 1 for field `hour`')\n        if (value > 24):\n            raise ValueError('value need to be smaller 24 for field `hour`')\n    self._hour = value", "docstring": "Corresponds to IDD Field `hour`\n\nArgs:\nvalue (int): value for IDD Field `hour`\nvalue >= 1\nvalue <= 24\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def create(self, msgtype, *args, **kwargs):\n    if (msgtype not in self._messages):\n        raise ProtocolError(('Unknown message type %r for protocol version %s' % (msgtype, self._version)))\n    return self._messages[msgtype].create(*args, **kwargs)", "docstring": "Create a new Message instance for the given type.\n\nArgs:\nmsgtype (str) :", "source": "codesearchnet"}
{"code": "def _GetExpectedFractionalAvgPoolResult(self, input_tensor, row_seq, col_seq, overlapping):\n    input_shape = input_tensor.shape\n    output_shape = (input_shape[0], len(row_seq) - 1, len(col_seq) - 1, input_shape[3])\n    output_tensor = np.zeros(shape=output_shape, dtype=input_tensor.dtype)\n    for batch in range(input_shape[0]):\n        for channel in range(input_shape[3]):\n            two_dim_slice = input_tensor[batch, :, :, channel]\n            tmp = self._AvgPoolAlongRows(two_dim_slice, row_seq, overlapping)\n            output_tensor[batch, :, :, channel] = self._AvgPoolAlongCols(tmp, col_seq, overlapping)\n    return output_tensor", "docstring": "Get expected fractional average pooling result.\n\nrow_seq and col_seq together defines the fractional pooling region.\n\nArgs:\ninput_tensor: Original input tensor, assuming it is a 4-D tensor, with\ndimension as [batch, height/row, width/column, channels/depth].\nrow_seq: Cumulative pooling sequence along row.\ncol_seq: Cumulative pooling sequence along column.\noverlapping: Use overlapping when doing pooling.\n\nReturns:\nA 4-D tensor that is the result of average pooling on input_tensor based\non pooling region defined by row_seq and col_seq, conditioned on whether\nor not overlapping is used.", "source": "github-repos"}
{"code": "def has_deprecation_decorator(symbol):\n    decorators, symbol = tf_decorator.unwrap(symbol)\n    if contains_deprecation_decorator(decorators):\n        return True\n    if tf_inspect.isfunction(symbol):\n        return False\n    if not tf_inspect.isclass(symbol):\n        return False\n    if not hasattr(symbol, '__init__'):\n        return False\n    init_decorators, _ = tf_decorator.unwrap(symbol.__init__)\n    return contains_deprecation_decorator(init_decorators)", "docstring": "Checks if given object has a deprecation decorator.\n\nWe check if deprecation decorator is in decorators as well as\nwhether symbol is a class whose __init__ method has a deprecation\ndecorator.\nArgs:\nsymbol: Python object.\n\nReturns:\nTrue if symbol has deprecation decorator.", "source": "github-repos"}
{"code": "def __call__(self, hidden_states, cls_index=None, deterministic: bool=True):\n    output = hidden_states[:, 0]\n    output = self.first_dropout(output, deterministic=deterministic)\n    output = self.summary(output)\n    output = self.activation(output)\n    output = self.last_dropout(output, deterministic=deterministic)\n    return output", "docstring": "Compute a single vector summary of a sequence hidden states.\n\nArgs:\nhidden_states (`jnp.ndarray` of shape `[batch_size, seq_len, hidden_size]`):\nThe hidden states of the last layer.\ncls_index (`jnp.ndarray` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*):\nUsed if `summary_type == \"cls_index\"` and takes the last token of the sequence as classification token.\n\nReturns:\n`jnp.ndarray`: The summary of the sequence hidden states.", "source": "github-repos"}
{"code": "def freeze_graph(sess, input_tensors, output_tensors):\n    graph_def = _convert_to_constants.disable_lower_using_switch_merge(sess.graph_def)\n    config = get_grappler_config(['function'])\n    graph_def = run_graph_optimizations(graph_def, input_tensors, output_tensors, config, graph=sess.graph)\n    hinted_outputs_nodes = find_all_hinted_output_nodes(sess)\n    if hinted_outputs_nodes:\n        return _convert_op_hints_if_present(sess, graph_def, output_tensors, hinted_outputs_nodes)\n    if not is_frozen_graph(sess):\n        output_node_names = [tensor.name.split(':')[0] for tensor in output_tensors]\n        return _convert_to_constants.convert_variables_to_constants(sess, graph_def, output_node_names)\n    else:\n        return sess.graph_def", "docstring": "Returns a frozen GraphDef.\n\nRuns a Grappler pass and freezes a graph with Variables in it. Otherwise the\nexisting GraphDef is returned. The Grappler pass is only run on models that\nare frozen in order to inline the functions in the graph.\nIf OpHints is present, it will try to convert the OpHint graph.\n\nArgs:\nsess: TensorFlow Session.\ninput_tensors: List of input tensors.\noutput_tensors: List of output tensors (only .name is used from this).\n\nReturns:\nFrozen GraphDef.", "source": "github-repos"}
{"code": "def serial_wire_viewer(jlink_serial, device):\n    \n    buf = StringIO.StringIO()\n    jlink = pylink.JLink(log=buf.write, detailed_log=buf.write)\n    jlink.open(serial_no=jlink_serial)\n\n    \n    \n    jlink.set_tif(pylink.enums.JLinkInterfaces.SWD)\n    jlink.connect(device, verbose=True)\n    jlink.coresight_configure()\n    jlink.set_reset_strategy(pylink.enums.JLinkResetStrategyCortexM3.RESETPIN)\n\n    \n    jlink.reset()\n    jlink.halt()\n\n    cpu_speed = jlink.cpu_speed()\n    swo_speed = jlink.swo_supported_speeds(cpu_speed, 10)[0]\n\n    \n    jlink.swo_start(swo_speed)\n    jlink.swo_flush()\n\n    \n    sys.stdout.write('Serial Wire Viewer\\n')\n    sys.stdout.write('Press Ctrl-C to Exit\\n')\n    sys.stdout.write('Reading data from port 0:\\n\\n')\n\n    \n    jlink.reset(ms=10, halt=False)\n\n    \n    \n    try:\n        while True:\n            \n            num_bytes = jlink.swo_num_bytes()\n\n            if num_bytes == 0:\n                \n                time.sleep(1)\n                continue\n\n            data = jlink.swo_read_stimulus(0, num_bytes)\n            sys.stdout.write(''.join(map(chr, data)))\n            sys.stdout.flush()\n    except KeyboardInterrupt:\n        pass\n\n    sys.stdout.write('\\n')\n\n    \n    jlink.swo_stop()\n\n    return 0", "docstring": "Implements a Serial Wire Viewer (SWV).\n\nA Serial Wire Viewer (SWV) allows us implement real-time logging of output\nfrom a connected device over Serial Wire Output (SWO).\n\nArgs:\njlink_serial (str): the J-Link serial number\ndevice (str): the target CPU\n\nReturns:\nAlways returns ``0``.\n\nRaises:\nJLinkException: on error", "source": "juraj-google-style"}
{"code": "def _WriteRow(self, output_writer, values, in_bold=False):\n    row_strings = []\n    for (value_index, value_string) in enumerate(values):\n        padding_size = (self._column_sizes[value_index] - len(value_string))\n        padding_string = (' ' * padding_size)\n        row_strings.extend([value_string, padding_string])\n    row_strings.pop()\n    row_strings = ''.join(row_strings)\n    if (in_bold and (not win32console)):\n        row_strings = '\\x1b[1m{0:s}\\x1b[0m'.format(row_strings)\n    output_writer.Write('{0:s}\\n'.format(row_strings))", "docstring": "Writes a row of values aligned with the width to the output writer.\n\nArgs:\noutput_writer (CLIOutputWriter): output writer.\nvalues (list[object]): values.\nin_bold (Optional[bool]): True if the row should be written in bold.", "source": "codesearchnet"}
{"code": "def add_component(self, component, temporary=False):\n    tile = IOTile(component)\n    value = os.path.normpath(os.path.abspath(component))\n    if (temporary is True):\n        self._component_overlays[tile.name] = value\n    else:\n        self.kvstore.set(tile.name, value)", "docstring": "Register a component with ComponentRegistry.\n\nComponent must be a buildable object with a module_settings.json file\nthat describes its name and the domain that it is part of.  By\ndefault, this component is saved in the permanent registry associated\nwith this environment and will remain registered for future CoreTools\ninvocations.\n\nIf you only want this component to be temporarily registered during\nthis program's session, you can pass temporary=True and the component\nwill be stored in RAM only, not persisted to the underlying key-value\nstore.\n\nArgs:\ncomponent (str): The path to a component that should be registered.\ntemporary (bool): Optional flag to only temporarily register the\ncomponent for the duration of this program invocation.", "source": "codesearchnet"}
{"code": "def write(self, name, **data):\n        \n\n        data[\"name\"] = name\n        if not (\"timestamp\" in data):\n            data[\"timestamp\"] = datetime.utcnow()\n\n        try:\n            self.producer.send(topic=self.topic, value=data)\n            self.producer.flush()\n        except (KafkaTimeoutError, NoBrokersAvailable) as exc:\n            logger.warning('writing metric %r failure %r', data, exc)", "docstring": "Write the metric to kafka\n\nArgs:\nname (str): The name of the metric to write\ndata (dict): Additional data to store with the metric", "source": "juraj-google-style"}
{"code": "def _ParseVSSProcessingOptions(self, options):\n    \n    vss_only = False\n    vss_stores = None\n\n    self._process_vss = not getattr(options, 'no_vss', False)\n    if self._process_vss:\n      vss_only = getattr(options, 'vss_only', False)\n      vss_stores = getattr(options, 'vss_stores', None)\n\n    if vss_stores:\n      try:\n        self._ParseVolumeIdentifiersString(vss_stores, prefix='vss')\n      except ValueError:\n        raise errors.BadConfigOption('Unsupported VSS stores')\n\n    self._vss_only = vss_only\n    self._vss_stores = vss_stores", "docstring": "Parses the VSS processing options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "juraj-google-style"}
{"code": "def softplus_and_shift(x, shift=1e-5, name=None):\n  \n  with tf.compat.v1.name_scope(name, 'softplus_and_shift', [x, shift]):\n    x = tf.convert_to_tensor(value=x, name='x')\n    y = tf.nn.softplus(x)\n    if shift is not None:\n      y += shift\n    return y", "docstring": "Converts (batch of) scalars to (batch of) positive valued scalars.\n\nArgs:\nx: (Batch of) `float`-like `Tensor` representing scalars which will be\ntransformed into positive elements.\nshift: `Tensor` added to `softplus` transformation of elements.\nDefault value: `1e-5`.\nname: A `name_scope` name for operations created by this function.\nDefault value: `None` (i.e., \"positive_tril_with_shift\").\n\nReturns:\nscale: (Batch of) scalars`with `x.dtype` and `x.shape`.", "source": "juraj-google-style"}
{"code": "def from_dict(cls, fields, mapping):\n    iterable = ([None] * len(fields))\n    for (key, value) in mapping.items():\n        try:\n            index = fields.index(key)\n        except KeyError:\n            raise ItsdbError(('Invalid field name(s): ' + key))\n        iterable[index] = value\n    return cls(fields, iterable)", "docstring": "Create a Record from a dictionary of field mappings.\n\nThe *fields* object is used to determine the column indices\nof fields in the mapping.\n\nArgs:\nfields: the Relation schema for the table of this record\nmapping: a dictionary or other mapping from field names to\ncolumn values\nReturns:\na :class:`Record` object", "source": "codesearchnet"}
{"code": "def get_tool_filepath(self, tool_alias):\n    tools_dict = self.get_tools()\n    if (tool_alias in tools_dict):\n        if (self.tools_path is None):\n            return None\n        else:\n            return os.path.join(self.tools_path, tool_alias)\n    else:\n        return None", "docstring": "Given a visible tool alias, return the full path to the executable.\n\nArgs:\ntool_alias (str): Tool alias to search for.\n\nReturns:\n(str): Filepath of executable, or None if the tool is not in the\nsuite. May also return None because this suite has not been saved\nto disk, so a filepath hasn't yet been established.", "source": "codesearchnet"}
{"code": "def loss_masks(self, masks_queries_logits: torch.Tensor, mask_labels: List[torch.Tensor], indices: Tuple[np.array], num_masks: int) -> Dict[str, torch.Tensor]:\n    src_idx = self._get_predictions_permutation_indices(indices)\n    tgt_idx = self._get_targets_permutation_indices(indices)\n    pred_masks = masks_queries_logits[src_idx]\n    target_masks, _ = self._pad_images_to_max_in_batch(mask_labels)\n    target_masks = target_masks[tgt_idx]\n    pred_masks = pred_masks[:, None]\n    target_masks = target_masks[:, None]\n    with torch.no_grad():\n        point_coordinates = self.sample_points_using_uncertainty(pred_masks, lambda logits: self.calculate_uncertainty(logits), self.num_points, self.oversample_ratio, self.importance_sample_ratio)\n        point_labels = sample_point(target_masks, point_coordinates, align_corners=False).squeeze(1)\n    point_logits = sample_point(pred_masks, point_coordinates, align_corners=False).squeeze(1)\n    losses = {'loss_mask': sigmoid_cross_entropy_loss(point_logits, point_labels, num_masks), 'loss_dice': dice_loss(point_logits, point_labels, num_masks)}\n    del pred_masks\n    del target_masks\n    return losses", "docstring": "Compute the losses related to the masks using sigmoid_cross_entropy_loss and dice loss.\n\nArgs:\nmasks_queries_logits (`torch.Tensor`):\nA tensor of shape `(batch_size, num_queries, height, width)`.\nmask_labels (`torch.Tensor`):\nList of mask labels of shape `(labels, height, width)`.\nindices (`Tuple[np.array])`:\nThe indices computed by the Hungarian matcher.\nnum_masks (`int)`:\nThe number of masks, used for normalization.\n\nReturns:\nlosses (`Dict[str, Tensor]`): A dict of `torch.Tensor` containing two keys:\n- **loss_mask** -- The loss computed using sigmoid cross entropy loss on the predicted and ground truth.\nmasks.\n- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth,\nmasks.", "source": "github-repos"}
{"code": "def write(self, record):\n    super(TFRecordWriter, self).write(record)", "docstring": "Write a string record to the file.\n\nArgs:\nrecord: str", "source": "github-repos"}
{"code": "def __init__(self, output_mediator):\n    \n    super(XLSXOutputModule, self).__init__(output_mediator)\n    self._column_widths = {}\n    self._current_row = 0\n    self._dynamic_fields_helper = dynamic.DynamicFieldsHelper(output_mediator)\n    self._fields = self._DEFAULT_FIELDS\n    self._filename = None\n    self._sheet = None\n    self._timestamp_format = self._DEFAULT_TIMESTAMP_FORMAT\n    self._workbook = None", "docstring": "Initializes an Excel Spreadsheet (XLSX) output module.\n\nArgs:\noutput_mediator (OutputMediator): output mediator.", "source": "juraj-google-style"}
{"code": "def post_state(self, name, state):\n        \n\n        self.post_command(OPERATIONS.CMD_UPDATE_STATE,\n                          {'name': name, 'new_status': state})", "docstring": "Asynchronously try to update the state for a service.\n\nIf the update fails, nothing is reported because we don't wait for a\nresponse from the server.  This function will return immmediately and\nnot block.\n\nArgs:\nname (string): The name of the service\nstate (int): The new state of the service", "source": "juraj-google-style"}
{"code": "def upsert_project(self, project, id=None, description=None, entity=None):\n        \n        mutation = gql()\n        response = self.gql(mutation, variable_values={\n            'name': self.format_project(project), 'entity': entity or self.settings('entity'),\n            'description': description, 'repo': self.git.remote_url, 'id': id})\n        return response['upsertModel']['model']", "docstring": "Create a new project\n\nArgs:\nproject (str): The project to create\ndescription (str, optional): A description of this project\nentity (str, optional): The entity to scope this project to.", "source": "juraj-google-style"}
{"code": "def createRoles(self, configFiles, dateTimeFormat=None):\n        \n        if dateTimeFormat is None:\n            dateTimeFormat = '%Y-%m-%d %H:%M'\n\n        scriptStartTime = datetime.datetime.now()\n        try:\n\n            print (\"********************Create Roles********************\")\n\n            print (\"Script started at %s\" % scriptStartTime.strftime(dateTimeFormat))\n\n            if self.securityhandler.valid == False:\n                print (\"Login required\")\n            else:\n                orgTools = orgtools.orgtools(securityinfo=self)\n\n                if orgTools is None:\n                    print (\"Error creating orgtools\")\n                else:\n\n                    for configFile in configFiles:\n\n                        config = common.init_config_json(config_file=configFile)\n                        if config is not None:\n\n                            startTime = datetime.datetime.now()\n                            print (\"Processing config %s, starting at: %s\" % (configFile,startTime.strftime(dateTimeFormat)))\n\n                            roleInfos = config['Roles']\n                            for roleInfo in roleInfos:\n                                createRoleResults = orgTools.createRole(roleInfo['Name'],roleInfo['Description'],roleInfo['Privileges'])\n\n                        else:\n                            print (\"Config %s not found\" % configFile)\n\n\n        except(TypeError,ValueError,AttributeError) as e:\n            print (e)\n        except (common.ArcRestHelperError) as e:\n            print (\"error in function: %s\" % e[0]['function'])\n            print (\"error on line: %s\" % e[0]['line'])\n            print (\"error in file name: %s\" % e[0]['filename'])\n            print (\"with error message: %s\" % e[0]['synerror'])\n            if 'arcpyError' in e[0]:\n                print (\"with arcpy message: %s\" % e[0]['arcpyError'])\n\n        except Exception as e:\n            if (reportToolsInstalled):\n                if isinstance(e,(ReportTools.ReportToolsError,DataPrep.DataPrepError)):\n                    print (\"error in function: %s\" % e[0]['function'])\n                    print (\"error on line: %s\" % e[0]['line'])\n                    print (\"error in file name: %s\" % e[0]['filename'])\n                    print (\"with error message: %s\" % e[0]['synerror'])\n                    if 'arcpyError' in e[0]:\n                        print (\"with arcpy message: %s\" % e[0]['arcpyError'])\n                else:\n                    line, filename, synerror = trace()\n                    print (\"error on line: %s\" % line)\n                    print (\"error in file name: %s\" % filename)\n                    print (\"with error message: %s\" % synerror)\n            else:\n                line, filename, synerror = trace()\n                print (\"error on line: %s\" % line)\n                print (\"error in file name: %s\" % filename)\n                print (\"with error message: %s\" % synerror)\n        finally:\n            print (\"Script complete, time to complete: %s\" % str(datetime.datetime.now() - scriptStartTime))\n            print (\"\n            print (\"\")\n\n            \n                \n            groupInfo = None\n            groupFile = None\n            iconPath = None\n            startTime = None\n            thumbnail = None\n            result = None\n            config = None\n            sciptPath = None\n            orgTools = None\n            del groupInfo\n            del groupFile\n            del iconPath\n            del startTime\n            del thumbnail\n            del result\n            del config\n            del sciptPath\n            del orgTools\n\n            gc.collect()", "docstring": "Parses a JSON configuration file to create roles.\n\nArgs:\nconfigFiles (list): A list of JSON files on disk containing\nconfiguration data for creating roles.\ndateTimeFormat (str): A valid date formatting directive, as understood\nby :py:meth:`datetime.datetime.strftime`. Defaults to ``None``, i.e.,\n``'%Y-%m-%d %H:%M'``.", "source": "juraj-google-style"}
{"code": "def _get_timestamp_ms(when):\n  \n  if when is None:\n    return None\n  ms_since_epoch = float(time.mktime(when.utctimetuple()) * 1000.0)\n  ms_since_epoch += when.microsecond / 1000.0\n  return int(ms_since_epoch)", "docstring": "Converts a datetime.datetime to integer milliseconds since the epoch.\n\nRequires special handling to preserve microseconds.\n\nArgs:\nwhen: A datetime.datetime instance.\n\nReturns:\nInteger time since the epoch in milliseconds. If the supplied 'when' is\nNone, the return value will be None.", "source": "juraj-google-style"}
{"code": "def register_for_auto_class(cls, auto_class='AutoImageProcessor'):\n    if not isinstance(auto_class, str):\n        auto_class = auto_class.__name__\n    import transformers.models.auto as auto_module\n    if not hasattr(auto_module, auto_class):\n        raise ValueError(f'{auto_class} is not a valid auto class.')\n    cls._auto_class = auto_class", "docstring": "Register this class with a given auto class. This should only be used for custom image processors as the ones\nin the library are already mapped with `AutoImageProcessor `.\n\n\n\nArgs:\nauto_class (`str` or `type`, *optional*, defaults to `\"AutoImageProcessor \"`):\nThe auto class to register this new image processor with.", "source": "github-repos"}
{"code": "def GenerateBand(self, band, meta_only=False, cast=False):\n    if (not meta_only):\n        fname = band.get('file_name')\n        data = self.ReadTif(('%s/%s' % (os.path.dirname(self.filename), fname)))\n\n    def FixBitmap(d):\n        p = d.get('bitmap_description')\n        if p:\n            lis = p.get('bit')\n            bm = dict()\n            for i in lis:\n                key = i['num']\n                value = i['text']\n                bm[key] = value\n            del d['bitmap_description']\n            d['bitmap_description'] = bm\n        return d\n    band = SetProperties(Band, FixBitmap(self.CleanDict(band)))\n    if (not meta_only):\n        if cast:\n            data = data.astype(np.float32)\n            data[(data == band.fill_value)] = (- 9999)\n            if (band.valid_range is not None):\n                data[(data < band.valid_range.min)] = (- 9999)\n                data[(data > band.valid_range.max)] = (- 9999)\n            data[(data == (- 9999))] = np.nan\n        else:\n            data = np.ma.masked_where((data == band.fill_value), data)\n            if (band.valid_range is not None):\n                data = np.ma.masked_where((data < band.valid_range.min), data)\n                data = np.ma.masked_where((data > band.valid_range.max), data)\n        if self.yflip:\n            data = np.flip(data, 0)\n        band.data = data\n    if (not meta_only):\n        band.validate()\n    return band", "docstring": "Genreate a Band object given band metadata\n\nArgs:\nband (dict): dictionary containing metadata for a given band\n\nReturn:\nBand : the loaded Band onject", "source": "codesearchnet"}
{"code": "def dump(self):\n    walkers = {}\n    walkers.update({str(walker.selector): walker.dump() for walker in self._queue_walkers})\n    walkers.update({str(walker.selector): walker.dump() for walker in self._virtual_walkers})\n    return {u'engine': self._engine.dump(), u'rollover_storage': self._rollover_storage, u'rollover_streaming': self._rollover_streaming, u'last_values': {str(stream): reading.asdict() for (stream, reading) in self._last_values.items()}, u'walkers': walkers}", "docstring": "Dump the state of this SensorLog.\n\nThe purpose of this method is to be able to restore the same state\nlater.  However there are links in the SensorLog for stream walkers.\n\nSo the dump process saves the state of each stream walker and upon\nrestore, it looks through the current set of stream walkers and\nrestores each one that existed when dump() was called to its state.\n\nReturns:\ndict: The serialized state of this SensorLog.", "source": "codesearchnet"}
{"code": "def has_thread(prefix, running_threads):\n    for thread in running_threads:\n        if thread.startswith(prefix):\n            return True\n    return False", "docstring": "Returns whether any 'running_threads' is prefixed with 'prefix'.\n\nArgs:\nprefix: The prefix of the expected thread name.\nrunning_threads: A collection of the running thread names.", "source": "github-repos"}
{"code": "def check_accessible(value_provider_list):\n    assert isinstance(value_provider_list, list)\n\n    def _check_accessible(fnc):\n\n        @wraps(fnc)\n        def _f(self, *args, **kwargs):\n            for obj in [getattr(self, vp) for vp in value_provider_list]:\n                if not obj.is_accessible():\n                    raise error.RuntimeValueProviderError('%s not accessible' % obj)\n            return fnc(self, *args, **kwargs)\n        return _f\n    return _check_accessible", "docstring": "A decorator that checks accessibility of a list of ValueProvider objects.\n\nArgs:\nvalue_provider_list: list of ValueProvider objects\nRaises:\n``RuntimeValueProviderError``: if any of the provided objects are not\naccessible.", "source": "github-repos"}
{"code": "def remove(path, force=False):\n    \n    \n    \n    \n\n    path = os.path.expanduser(path)\n\n    if not os.path.isabs(path):\n        raise SaltInvocationError('File path must be absolute: {0}'.format(path))\n\n    \n    if not os.path.exists(path) and not is_link(path):\n        raise CommandExecutionError('Path not found: {0}'.format(path))\n\n    \n    if force:\n        \n        file_attributes = win32api.GetFileAttributes(path)\n        win32api.SetFileAttributes(path, win32con.FILE_ATTRIBUTE_NORMAL)\n\n    try:\n        if os.path.isfile(path):\n            \n            os.remove(path)\n        elif is_link(path):\n            \n            os.rmdir(path)\n        else:\n            for name in os.listdir(path):\n                item = '{0}\\\\{1}'.format(path, name)\n                \n                remove(item, force)\n\n            \n            os.rmdir(path)\n    except (OSError, IOError) as exc:\n        if force:\n            \n            win32api.SetFileAttributes(path, file_attributes)\n        raise CommandExecutionError(\n            'Could not remove \\'{0}\\': {1}'.format(path, exc)\n        )\n\n    return True", "docstring": "Remove the named file or directory\n\nArgs:\npath (str): The path to the file or directory to remove.\nforce (bool): Remove even if marked Read-Only. Default is False\n\nReturns:\nbool: True if successful, False if unsuccessful\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' file.remove C:\\\\Temp", "source": "juraj-google-style"}
{"code": "def fetch(self, url):\n        \n        opener = self._urllib.build_opener()\n        opener.addheaders = self._requestHeaders.items()\n        response = opener.open(url)\n        headers = response.info()\n        raw = response.read()\n        raw = raw.decode('utf8')\n\n        if not 'Content-Type' in headers:\n            raise OEmbedError('Missing mime-type in response')\n\n        if headers['Content-Type'].find('application/xml') != -1 or \\\n           headers['Content-Type'].find('text/xml') != -1:\n            response = OEmbedResponse.newFromXML(raw)\n        elif headers['Content-Type'].find('application/json') != -1 or \\\n             headers['Content-Type'].find('text/javascript') != -1 or \\\n             headers['Content-Type'].find('text/json') != -1:\n            response = OEmbedResponse.newFromJSON(raw)\n        else:\n            raise OEmbedError('Invalid mime-type in response - %s' % headers['Content-Type'])\n\n        return response", "docstring": "Fetch url and create a response object according to the mime-type.\n\nArgs:\nurl: The url to fetch data from\n\nReturns:\nOEmbedResponse object according to data fetched", "source": "juraj-google-style"}
{"code": "class XCLIPEncoder(nn.Module):\n\n    def __init__(self, config: XCLIPConfig):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([XCLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for idx, encoder_layer in enumerate(self.layers):\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions)\n            else:\n                layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`XCLIPEncoderLayer`].\n\nArgs:\nconfig: XCLIPConfig", "source": "github-repos"}
{"code": "def blackman(x):\n    if any_symbolic_tensors((x,)):\n        return Blackman().symbolic_call(x)\n    return backend.numpy.blackman(x)", "docstring": "Blackman window function.\nThe Blackman window is a taper formed by using a weighted cosine.\n\nArgs:\nx: Scalar or 1D Tensor. Window length.\n\nReturns:\nA 1D tensor containing the Blackman window values.\n\nExample:\n>>> x = keras.ops.convert_to_tensor(5)\n>>> keras.ops.blackman(x)\narray([-1.3877788e-17,  3.4000000e-01,  1.0000000e+00,  3.4000000e-01,\n-1.3877788e-17], dtype=float32)", "source": "github-repos"}
{"code": "def get_settings(self):\n    uri = '{}/settings'.format(self.data['uri'])\n    return self._helper.do_get(uri)", "docstring": "Gets the interconnect settings for a logical interconnect group.\n\nReturns:\ndict: Interconnect Settings.", "source": "codesearchnet"}
{"code": "def recursive_copy(source, destination):\n    if os.path.isdir(source):\n        copy_tree(source, destination)", "docstring": "A wrapper around distutils.dir_util.copy_tree but won't throw any exception when the source\ndirectory does not exist.\n\nArgs:\nsource (str): source path\ndestination (str): destination path", "source": "codesearchnet"}
{"code": "def __call__(self, name, value):\n        \n        super(FloatTypeChecker, self).__call__(name, value)\n        if isinstance(self.minimum, float):\n            if value < self.minimum:\n                raise ValueError(\"%s must be greater or equal %s\" % (name, self.minimum))\n        if isinstance(self.maximum, float):\n            if value > self.maximum:\n                raise ValueError(\"%s must be less or equal %s\" % (name, self.maximum))", "docstring": "Call method.\n\nArgs:\nname (str): the value's name.\nvalue (float): the value to check.\n\nRaises:\nValueError: if value is not type float.\nValueError: if value is less than minimum.\nValueError: if value is more than maximum.", "source": "juraj-google-style"}
{"code": "def pyrdf(value, class_type=None, datatype=None, **kwargs):\n    \n    if isinstance(value, BaseRdfDataType):\n        return value\n    if isinstance(value, dict):\n        value = value.copy()\n        class_type = value.pop('type')\n        try:\n            datatype = value.pop('datatype')\n        except KeyError:\n            datatype = __TYPE_MATCH__[class_type]\n        kwargs = value\n        value = kwargs.pop('value')\n    if not class_type:\n        class_type = 'literal'\n    if not datatype:\n        datatype = type(value)\n    try:\n        \n        return __DT_LOOKUP__[class_type][datatype](value, **kwargs)\n    except KeyError:\n        rtn_val = BaseRdfDataType(value)\n        rtn_val.datatype = Uri(datatype)\n        return rtn_val", "docstring": "Coverts an input to one of the rdfdatatypes classes\n\nArgs:\nvalue: any rdfdatatype, json dict or vlaue\nclass_type: \"literal\", \"uri\" or \"blanknode\"\ndatatype: \"xsd:string\", \"xsd:int\" , etc\nkwargs:\nlang: language tag", "source": "juraj-google-style"}
{"code": "def filter(self, **filters):\n    for (flt, val) in self._flt.items():\n        self._flt[flt] = filters.pop(flt, val)\n    if filters:\n        raise error.UnknownFiltersError(filters.keys())\n    return self", "docstring": "Update filters with provided arguments.\n\nNote that filters are only resolved when the view is iterated, and\nhence they do not compose. Each call to filter merely updates the\nrelevant filters. For example, with this code::\n\nview = sdat.steps[500:].filter(rprof=True, fields=['T'])\nview.filter(fields=[])\n\nthe produced ``view``, when iterated, will generate the steps after the\n500-th that have radial profiles. The ``fields`` filter set in the\nfirst line is emptied in the second line.\n\nArgs:\nsnap (bool): the step must be a snapshot to pass.\nrprof (bool): the step must have rprof data to pass.\nfields (list): list of fields that must be present to pass.\nfunc (function): arbitrary function taking a\n:class:`~stagpy._step.Step` as argument and returning a True\nvalue if the step should pass the filter.\n\nReturns:\nself.", "source": "codesearchnet"}
{"code": "def get_twitter_id(self, cache=True):\n        \n        if not (cache and ('twitter' in self.cache)):\n            response = self.get_attribute('twitter')\n            self.cache['twitter'] = response['artist'].get('twitter')\n        return self.cache['twitter']", "docstring": "Get the twitter id for this artist if it exists\n\nArgs:\n\nKwargs:\n\nReturns:\nA twitter ID string\n\nExample:\n\n>>> a = artist.Artist('big boi')\n>>> a.get_twitter_id()\nu'BigBoi'\n>>>", "source": "juraj-google-style"}
{"code": "class DonutSwinPatchMerging(nn.Module):\n\n    def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:\n        super().__init__()\n        self.input_resolution = input_resolution\n        self.dim = dim\n        self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)\n        self.norm = norm_layer(4 * dim)\n\n    def maybe_pad(self, input_feature, height, width):\n        should_pad = height % 2 == 1 or width % 2 == 1\n        if should_pad:\n            pad_values = (0, 0, 0, width % 2, 0, height % 2)\n            input_feature = nn.functional.pad(input_feature, pad_values)\n        return input_feature\n\n    def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor:\n        height, width = input_dimensions\n        batch_size, dim, num_channels = input_feature.shape\n        input_feature = input_feature.view(batch_size, height, width, num_channels)\n        input_feature = self.maybe_pad(input_feature, height, width)\n        input_feature_0 = input_feature[:, 0::2, 0::2, :]\n        input_feature_1 = input_feature[:, 1::2, 0::2, :]\n        input_feature_2 = input_feature[:, 0::2, 1::2, :]\n        input_feature_3 = input_feature[:, 1::2, 1::2, :]\n        input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)\n        input_feature = input_feature.view(batch_size, -1, 4 * num_channels)\n        input_feature = self.norm(input_feature)\n        input_feature = self.reduction(input_feature)\n        return input_feature", "docstring": "Patch Merging Layer.\n\nArgs:\ninput_resolution (`Tuple[int]`):\nResolution of input feature.\ndim (`int`):\nNumber of input channels.\nnorm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):\nNormalization layer class.", "source": "github-repos"}
{"code": "def create_batch(cls, size, **kwargs):\n        \n        return [cls.create(**kwargs) for _ in range(size)]", "docstring": "Create a batch of instances of the given class, with overriden attrs.\n\nArgs:\nsize (int): the number of instances to create\n\nReturns:\nobject list: the created instances", "source": "juraj-google-style"}
{"code": "def unstage_signature(vcs, signature):\n    evidence_path = _get_staged_history_path(vcs)\n    staged = get_staged_signatures(vcs)\n    if (signature not in staged):\n        raise NotStagedError\n    staged.remove(signature)\n    string = '\\n'.join(staged)\n    with open(evidence_path, 'w') as f:\n        f.write(string)", "docstring": "Remove `signature` from the list of staged signatures\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\nsignature (basestring)\n\nRaises:\nNotStagedError", "source": "codesearchnet"}
{"code": "def _try_put(self, item):\n    try:\n        self._event_queue.put(item)\n    except QueueClosedError:\n        self._internal_close()\n        if self._worker.failure_exc_info:\n            _, exception, _ = self._worker.failure_exc_info\n            raise exception from None", "docstring": "Attempts to enqueue an item to the event queue.\n\nIf the queue is closed, this will close the EventFileWriter and reraise the\nexception that caused the queue closure, if one exists.\n\nArgs:\nitem: the item to enqueue", "source": "github-repos"}
{"code": "def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3):\n    if (kmip_version < enums.KMIPVersion.KMIP_1_3):\n        raise exceptions.VersionNotSupported('KMIP {} does not support the RNGParameters object.'.format(kmip_version.value))\n    super(RNGParameters, self).read(input_buffer, kmip_version=kmip_version)\n    local_buffer = utils.BytearrayStream(input_buffer.read(self.length))\n    if self.is_tag_next(enums.Tags.RNG_ALGORITHM, local_buffer):\n        rng_algorithm = primitives.Enumeration(enums.RNGAlgorithm, tag=enums.Tags.RNG_ALGORITHM)\n        rng_algorithm.read(local_buffer, kmip_version=kmip_version)\n        self._rng_algorithm = rng_algorithm\n    else:\n        raise exceptions.InvalidKmipEncoding('The RNGParameters encoding is missing the RNG algorithm.')\n    if self.is_tag_next(enums.Tags.CRYPTOGRAPHIC_ALGORITHM, local_buffer):\n        cryptographic_algorithm = primitives.Enumeration(enums.CryptographicAlgorithm, tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM)\n        cryptographic_algorithm.read(local_buffer, kmip_version=kmip_version)\n        self._cryptographic_algorithm = cryptographic_algorithm\n    if self.is_tag_next(enums.Tags.CRYPTOGRAPHIC_LENGTH, local_buffer):\n        cryptographic_length = primitives.Integer(tag=enums.Tags.CRYPTOGRAPHIC_LENGTH)\n        cryptographic_length.read(local_buffer, kmip_version=kmip_version)\n        self._cryptographic_length = cryptographic_length\n    if self.is_tag_next(enums.Tags.HASHING_ALGORITHM, local_buffer):\n        hashing_algorithm = primitives.Enumeration(enums.HashingAlgorithm, tag=enums.Tags.HASHING_ALGORITHM)\n        hashing_algorithm.read(local_buffer, kmip_version=kmip_version)\n        self._hashing_algorithm = hashing_algorithm\n    if self.is_tag_next(enums.Tags.DRBG_ALGORITHM, local_buffer):\n        drbg_algorithm = primitives.Enumeration(enums.DRBGAlgorithm, tag=enums.Tags.DRBG_ALGORITHM)\n        drbg_algorithm.read(local_buffer, kmip_version=kmip_version)\n        self._drbg_algorithm = drbg_algorithm\n    if self.is_tag_next(enums.Tags.RECOMMENDED_CURVE, local_buffer):\n        recommended_curve = primitives.Enumeration(enums.RecommendedCurve, tag=enums.Tags.RECOMMENDED_CURVE)\n        recommended_curve.read(local_buffer, kmip_version=kmip_version)\n        self._recommended_curve = recommended_curve\n    if self.is_tag_next(enums.Tags.FIPS186_VARIATION, local_buffer):\n        fips186_variation = primitives.Enumeration(enums.FIPS186Variation, tag=enums.Tags.FIPS186_VARIATION)\n        fips186_variation.read(local_buffer, kmip_version=kmip_version)\n        self._fips186_variation = fips186_variation\n    if self.is_tag_next(enums.Tags.PREDICTION_RESISTANCE, local_buffer):\n        prediction_resistance = primitives.Boolean(tag=enums.Tags.PREDICTION_RESISTANCE)\n        prediction_resistance.read(local_buffer, kmip_version=kmip_version)\n        self._prediction_resistance = prediction_resistance\n    self.is_oversized(local_buffer)", "docstring": "Read the data encoding the RNGParameters structure and decode it\ninto its constituent parts.\n\nArgs:\ninput_buffer (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 2.0.\n\nRaises:\nInvalidKmipEncoding: Raised if the RNG algorithm is missing from\nthe encoding.\nVersionNotSupported: Raised when a KMIP version is provided that\ndoes not support the RNGParameters structure.", "source": "codesearchnet"}
{"code": "def GetPreviousNonBlankLine(clean_lines, linenum):\n  \n\n  prevlinenum = linenum - 1\n  while prevlinenum >= 0:\n    prevline = clean_lines.elided[prevlinenum]\n    if not IsBlankLine(prevline):     \n      return (prevline, prevlinenum)\n    prevlinenum -= 1\n  return ('', -1)", "docstring": "Return the most recent non-blank line and its line number.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file contents.\nlinenum: The number of the line to check.\n\nReturns:\nA tuple with two elements.  The first element is the contents of the last\nnon-blank line before the current line, or the empty string if this is the\nfirst non-blank line.  The second is the line number of that line, or -1\nif this is the first non-blank line.", "source": "juraj-google-style"}
{"code": "def bind(self, isnap, istep):\n        \n        self._isteps[isnap] = istep\n        self.sdat.steps[istep].isnap = isnap", "docstring": "Register the isnap / istep correspondence.\n\nUsers of :class:`StagyyData` should not use this method.\n\nArgs:\nisnap (int): snapshot index.\nistep (int): time step index.", "source": "juraj-google-style"}
{"code": "def __init__(self, credentials=None):\n    \n    if credentials is None:\n      credentials = _utils.get_credentials()\n    self._api = _api.Api(credentials)", "docstring": "Initialize the Projects object.\n\nArgs:\ncredentials: the credentials for the account.", "source": "juraj-google-style"}
{"code": "def all_days(boo):\n  \n  earliest = datetime.strptime(('2015-11-12').replace('-', ' '), '%Y %m %d')\n  latest = datetime.strptime(datetime.today().date().isoformat().replace('-', ' '), '%Y %m %d')\n  num_days = (latest - earliest).days + 1\n  all_days = [latest - timedelta(days=x) for x in range(num_days)]\n  all_days.reverse()\n\n  output = []\n\n  if boo:\n    \n    for d in all_days:\n      output.append(int(str(d).replace('-', '')[:8]))\n  else:\n    \n    for d in all_days:\n      output.append(str(d)[:10])\n  return output", "docstring": "Return a list of all dates from 11/12/2015 to the present.\n\nArgs:\nboo: if true, list contains Numbers (20151230); if false, list contains Strings (\"2015-12-30\")\nReturns:\nlist of either Numbers or Strings", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    file_header_map = self._GetDataTypeMap('binarycookies_file_header')\n    try:\n        (file_header, file_header_data_size) = self._ReadStructureFromFileObject(file_object, 0, file_header_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.UnableToParseFile('Unable to read file header with error: {0!s}.'.format(exception))\n    if (file_header.signature != self._SIGNATURE):\n        raise errors.UnableToParseFile('Unsupported file signature.')\n    file_offset = file_header_data_size\n    page_sizes_data_size = (file_header.number_of_pages * 4)\n    page_sizes_data = file_object.read(page_sizes_data_size)\n    context = dtfabric_data_maps.DataTypeMapContext(values={'binarycookies_file_header': file_header})\n    page_sizes_map = self._GetDataTypeMap('binarycookies_page_sizes')\n    try:\n        page_sizes_array = self._ReadStructureFromByteStream(page_sizes_data, file_offset, page_sizes_map, context=context)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to map page sizes data at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))\n    file_offset += page_sizes_data_size\n    for (page_number, page_size) in enumerate(page_sizes_array):\n        if parser_mediator.abort:\n            break\n        page_data = file_object.read(page_size)\n        if (len(page_data) != page_size):\n            parser_mediator.ProduceExtractionWarning('unable to read page: {0:d}'.format(page_number))\n            break\n        self._ParsePage(parser_mediator, file_offset, page_data)\n        file_offset += page_size", "docstring": "Parses a Safari binary cookie file-like object.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nfile_object (dfvfs.FileIO): file-like object to be parsed.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed, this will signal\nthe event extractor to apply other parsers.", "source": "codesearchnet"}
{"code": "def wait_for_file(self, fn: str, max_wait_sec: int = 3600 * 24 * 365,\n                    check_interval: float = 0.02) -> bool:\n    \n    print(\"Waiting for file\", fn)\n    start_time = time.time()\n    while True:\n      if time.time() - start_time > max_wait_sec:\n        util.log(f\"Timeout exceeded ({max_wait_sec} sec) for {fn}\")\n        return False\n      if not self.exists(fn):\n        time.sleep(check_interval)\n        continue\n      else:\n        break\n    return True", "docstring": "Waits for file maximum of max_wait_sec. Returns True if file was detected within specified max_wait_sec\nArgs:\nfn: filename on task machine\nmax_wait_sec: how long to wait in seconds\ncheck_interval: how often to check in seconds\nReturns:\nFalse if waiting was was cut short by max_wait_sec limit, True otherwise", "source": "juraj-google-style"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not\nmake use of token type ids, therefore a list of zeros is returned\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of ids.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def _set_operation(a, b, set_operation, validate_indices=True):\n    if isinstance(a, sparse_tensor.SparseTensor):\n        if isinstance(b, sparse_tensor.SparseTensor):\n            indices, values, shape = gen_set_ops.sparse_to_sparse_set_operation(a.indices, a.values, a.dense_shape, b.indices, b.values, b.dense_shape, set_operation, validate_indices)\n        else:\n            raise ValueError('Sparse,Dense is not supported, but Dense,Sparse is. Please flip the order of your inputs.')\n    elif isinstance(b, sparse_tensor.SparseTensor):\n        indices, values, shape = gen_set_ops.dense_to_sparse_set_operation(a, b.indices, b.values, b.dense_shape, set_operation, validate_indices)\n    else:\n        indices, values, shape = gen_set_ops.dense_to_dense_set_operation(a, b, set_operation, validate_indices)\n    return sparse_tensor.SparseTensor(indices, values, shape)", "docstring": "Compute set operation of elements in last dimension of `a` and `b`.\n\nAll but the last dimension of `a` and `b` must match.\n\nArgs:\na: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices\nmust be sorted in row-major order.\nb: `Tensor` or `SparseTensor` of the same type as `a`. Must be\n`SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be sorted\nin row-major order.\nset_operation: String indicating set operation. See\nSetOperationOp::SetOperationFromContext for valid values.\nvalidate_indices: Whether to validate the order and range of sparse indices\nin `a` and `b`.\n\nReturns:\nA `SparseTensor` with the same rank as `a` and `b`, and all but the last\ndimension the same. Elements along the last dimension contain the results\nof the set operation.\n\nRaises:\nTypeError: If inputs are invalid types.\nValueError: If `a` is sparse and `b` is dense.", "source": "github-repos"}
{"code": "def from_tensor_4x4(t: torch.Tensor) -> Rigid:\n    if t.shape[-2:] != (4, 4):\n        raise ValueError('Incorrectly shaped input tensor')\n    rots = Rotation(rot_mats=t[..., :3, :3], quats=None)\n    trans = t[..., :3, 3]\n    return Rigid(rots, trans)", "docstring": "Constructs a transformation from a homogeneous transformation tensor.\n\nArgs:\nt: [*, 4, 4] homogeneous transformation tensor\nReturns:\nT object with shape [*]", "source": "github-repos"}
{"code": "def GetValueRepresentation(cls, value, version=sorted(_SERVICE_MAP.keys())[(- 1)]):\n    if (isinstance(value, str) or isinstance(value, unicode)):\n        return {'value': value, 'xsi_type': 'TextValue'}\n    elif isinstance(value, bool):\n        return {'value': value, 'xsi_type': 'BooleanValue'}\n    elif isinstance(value, numbers.Number):\n        return {'value': value, 'xsi_type': 'NumberValue'}\n    elif isinstance(value, datetime.datetime):\n        if (value.tzinfo is None):\n            raise googleads.errors.GoogleAdsValueError(('Datetime %s is not timezone aware.' % value))\n        return {'xsi_type': 'DateTimeValue', 'value': {'date': {'year': value.year, 'month': value.month, 'day': value.day}, 'hour': value.hour, 'minute': value.minute, 'second': value.second, ('timeZoneId' if (version >= 'v201811') else 'timeZoneID'): value.tzinfo.zone}}\n    elif isinstance(value, datetime.date):\n        return {'xsi_type': 'DateValue', 'value': {'year': value.year, 'month': value.month, 'day': value.day}}\n    elif isinstance(value, list):\n        if (value and (not all((isinstance(x, type(value[0])) for x in value)))):\n            raise googleads.errors.GoogleAdsValueError('Cannot pass more than one type in a set.')\n        return {'xsi_type': 'SetValue', 'values': [cls.GetValueRepresentation(v, version) for v in value]}\n    else:\n        raise googleads.errors.GoogleAdsValueError((\"Can't represent unknown type: %s.\" % type(value)))", "docstring": "Converts a single python value to its PQL representation.\n\nArgs:\nvalue: A python value.\nversion: A string identifying the Ad Manager version the value object\nis compatible with. This defaults to what is currently the latest\nversion. This will be updated in future releases to point to what is\nthen the latest version.\n\nReturns:\nThe value formatted for PQL statements which are compatible with a\nparticular API version.", "source": "codesearchnet"}
{"code": "def bessel_k1(x, name=None):\n    with ops.name_scope(name, 'bessel_k1', [x]):\n        return gen_special_math_ops.bessel_k1(x)", "docstring": "Computes the Bessel k1 function of `x` element-wise.\n\nModified Bessel function of order 1.\n\nIt is preferable to use the numerically stabler function `k1e(x)` instead.\n\n>>> tf.math.special.bessel_k1([0.5, 1., 2., 4.]).numpy()\narray([1.65644112, 0.60190723, 0.13986588, 0.0124835 ], dtype=float32)\n\nArgs:\nx: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n`float32`, `float64`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.k1\n@end_compatibility", "source": "github-repos"}
{"code": "def composite_multiscale_entropy(time_series, sample_length, scale, tolerance=None):\n    cmse = np.zeros((1, scale))\n    for i in range(scale):\n        for j in range(i):\n            tmp = util_granulate_time_series(time_series[j:], (i + 1))\n            cmse[i] += (sample_entropy(tmp, sample_length, tolerance) / (i + 1))\n    return cmse", "docstring": "Calculate the Composite Multiscale Entropy of the given time series.\n\nArgs:\ntime_series: Time series for analysis\nsample_length: Number of sequential points of the time series\nscale: Scale factor\ntolerance: Tolerance (default = 0.1...0.2 * std(time_series))\n\nReturns:\nVector containing Composite Multiscale Entropy\n\nReference:\n[1] Wu, Shuen-De, et al. \"Time series analysis using\ncomposite multiscale entropy.\" Entropy 15.3 (2013): 1069-1084.", "source": "codesearchnet"}
{"code": "def get_userid_from_botid(self, botid):\n    botinfo = self.slack_client.api_call('bots.info', bot=botid)\n    if (botinfo['ok'] is True):\n        return botinfo['bot'].get('user_id')\n    else:\n        return botid", "docstring": "Perform a lookup of bots.info to resolve a botid to a userid\n\nArgs:\nbotid (string): Slack botid to lookup.\nReturns:\nstring: userid value", "source": "codesearchnet"}
{"code": "def unshare(self, group_id, **kwargs):\n    path = ('/projects/%s/share/%s' % (self.get_id(), group_id))\n    self.manager.gitlab.http_delete(path, **kwargs)", "docstring": "Delete a shared project link within a group.\n\nArgs:\ngroup_id (int): ID of the group.\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabDeleteError: If the server failed to perform the request", "source": "codesearchnet"}
{"code": "def lines_from_stream(f, as_interned=False):\n    \n    if as_interned:\n        return [sys.intern(line) for line in f.read().splitlines()]\n    return f.read().splitlines()", "docstring": "Create a list of file lines from a given file stream.\n\nArgs:\nf (io.TextIOWrapper): File stream\nas_interned (bool): List of \"interned\" strings (default False)\n\nReturns:\nstrings (list): File line list", "source": "juraj-google-style"}
{"code": "def as_tmpfile(self, tmpdir=None):\n    import tempfile, shutil\n    tmpdir = (tempfile.mkdtemp() if (tmpdir is None) else tmpdir)\n    new_path = os.path.join(tmpdir, self.basename)\n    shutil.copy(self.filepath, new_path)\n    (root, ext) = os.path.splitext(self.filepath)\n    djrepo = (root + '.djrepo')\n    if os.path.exists(djrepo):\n        shutil.copy(djrepo, os.path.join(tmpdir, os.path.basename(djrepo)))\n    new = self.__class__.from_file(new_path)\n    if self.has_dojo_report:\n        new.dojo_report = self.dojo_report.deepcopy()\n    return new", "docstring": "Copy the pseudopotential to a temporary a file and returns a new pseudopotential object.\nUseful for unit tests in which we have to change the content of the file.\n\nArgs:\ntmpdir: If None, a new temporary directory is created and files are copied here\nelse tmpdir is used.", "source": "codesearchnet"}
{"code": "def seek(self, offset, whence=os.SEEK_SET):\n    self._check_open()\n    self._buffer.reset()\n    self._buffer_future = None\n    if (whence == os.SEEK_SET):\n        self._offset = offset\n    elif (whence == os.SEEK_CUR):\n        self._offset += offset\n    elif (whence == os.SEEK_END):\n        self._offset = (self._file_size + offset)\n    else:\n        raise ValueError(('Whence mode %s is invalid.' % str(whence)))\n    self._offset = min(self._offset, self._file_size)\n    self._offset = max(self._offset, 0)\n    if self._remaining():\n        self._request_next_buffer()", "docstring": "Set the file's current offset.\n\nNote if the new offset is out of bound, it is adjusted to either 0 or EOF.\n\nArgs:\noffset: seek offset as number.\nwhence: seek mode. Supported modes are os.SEEK_SET (absolute seek),\nos.SEEK_CUR (seek relative to the current position), and os.SEEK_END\n(seek relative to the end, offset should be negative).\n\nRaises:\nIOError: When this buffer is closed.\nValueError: When whence is invalid.", "source": "codesearchnet"}
{"code": "def unpack(self, buff, offset=0):  \n        \n        super().unpack(buff, self._pyof_class, offset)", "docstring": "Unpack the elements of the list.\n\nThis unpack method considers that all elements have the same size.\nTo use this class with a pyof_class that accepts elements with\ndifferent sizes, you must reimplement the unpack method.\n\nArgs:\nbuff (bytes): The binary data to be unpacked.\noffset (int): If we need to shift the beginning of the data.", "source": "juraj-google-style"}
{"code": "def _wrap_usage_section(source, width):\n    if (not any(((len(line) > width) for line in source.splitlines()))):\n        return source\n    section_header = source[:(source.index(':') + 1)].strip()\n    lines = [section_header]\n    for (commands, args) in parse_commands(source):\n        command = '  {} '.format(' '.join(commands))\n        max_len = (width - len(command))\n        sep = ('\\n' + (' ' * len(command)))\n        wrapped_args = sep.join(textwrap.wrap(' '.join(args), max_len))\n        full_command = (command + wrapped_args)\n        lines += full_command.splitlines()\n    return '\\n'.join(lines)", "docstring": "Wrap the given usage section string to the current terminal size.\n\nNote:\nCommands arguments are wrapped to the column that the arguments began\non the first line of the command.\n\nArgs:\nsource: The section string to wrap.\n\nReturns:\nThe wrapped section string.", "source": "codesearchnet"}
{"code": "def render_secrets(\n        config_path,\n        secret_path,\n):\n    \n    with open(secret_path, 'r') as s_fh:\n        secret_ini = anyconfig.load(s_fh, ac_parser='ini')\n\n    with open(config_path, 'r') as c_fh:\n        raw_cfg = c_fh.read()\n\n    rendered_cfg = anytemplate.renders(raw_cfg, secret_ini, at_engine='jinja2')\n\n    p_config = ProsperConfig(config_path)\n    local_config = configparser.ConfigParser()\n    local_config.optionxform = str\n    local_config.read_string(rendered_cfg)\n\n    p_config.local_config = local_config\n\n    return p_config", "docstring": "combine a jinja template with a secret .ini file\n\nArgs:\nconfig_path (str): path to .cfg file with jinja templating\nsecret_path (str): path to .ini-like secrets file\n\nReturns:\nProsperConfig: rendered configuration object", "source": "juraj-google-style"}
{"code": "def region(self, bounds):\n        \n        if not isinstance(bounds, Bounds):\n            raise TypeError(\"region param bounds must be isinstance of Bounds\")\n        _d = copy.copy(self)\n        _d._bounds = bounds\n        return _d", "docstring": "Set region of the screen area\nArgs:\nbounds: Bounds object\n\nReturns:\nA new AndroidDevice object\n\nRaises:\nTypeError", "source": "juraj-google-style"}
{"code": "def IsLink(self):\n    if (self._stat_object is None):\n        self._stat_object = self._GetStat()\n    if (self._stat_object is not None):\n        self.entry_type = self._stat_object.type\n    return (self.entry_type == definitions.FILE_ENTRY_TYPE_LINK)", "docstring": "Determines if the file entry is a link.\n\nReturns:\nbool: True if the file entry is a link.", "source": "codesearchnet"}
{"code": "def CmdAuthenticate(self, challenge_param, app_param, key_handle, check_only=False):\n    self.logger.debug('CmdAuthenticate')\n    if ((len(challenge_param) != 32) or (len(app_param) != 32)):\n        raise errors.InvalidRequestError()\n    control = (7 if check_only else 3)\n    body = bytearray((((challenge_param + app_param) + bytearray([len(key_handle)])) + key_handle))\n    response = self.InternalSendApdu(apdu.CommandApdu(0, apdu.CMD_AUTH, control, 0, body))\n    response.CheckSuccessOrRaise()\n    return response.body", "docstring": "Attempt to obtain an authentication signature.\n\nAsk the security key to sign a challenge for a particular key handle\nin order to authenticate the user.\n\nArgs:\nchallenge_param: SHA-256 hash of client_data object as a bytes\nobject.\napp_param: SHA-256 hash of the app id as a bytes object.\nkey_handle: The key handle to use to issue the signature as a bytes\nobject.\ncheck_only: If true, only check if key_handle is valid.\n\nReturns:\nA binary structure containing the key handle, attestation, and a\nsignature over that by the attestation key.  The precise format\nis dictated by the FIDO U2F specs.\n\nRaises:\nTUPRequiredError: If check_only is False, a Test of User Precense\nis required to proceed.  If check_only is True, this means\nthe key_handle is valid.\nInvalidKeyHandleError: The key_handle is not valid for this device.\nApduError: Something else went wrong on the device.", "source": "codesearchnet"}
{"code": "def set_db_row(db, start, size, _bytearray):\n    client.db_write(db, start, size, _bytearray)", "docstring": "Here we replace a piece of data in a db block with new data\n\nArgs:\ndb (int): The db to use\nstart(int): The start within the db\nsize(int): The size of the data in bytes\n_butearray (enumerable): The data to put in the db", "source": "codesearchnet"}
{"code": "def strip_html_tags(text, allowed_tags=None):\n    if (text is None):\n        return\n    if (allowed_tags is None):\n        allowed_tags = ALLOWED_TAGS\n    return bleach.clean(text, tags=allowed_tags, attributes=['id', 'class', 'style', 'href', 'title'], strip=True)", "docstring": "Strip all tags from a string except those tags provided in `allowed_tags` parameter.\n\nArgs:\ntext (str): string to strip html tags from\nallowed_tags (list): allowed list of html tags\n\nReturns: a string without html tags", "source": "codesearchnet"}
{"code": "def file_config(filename=None):\n    logger.debug('On entry into file_config(), filename = {}'.format(filename))\n    if (filename is None):\n        filename = CONFIG_DEFAULT_PATH\n    logger.debug('file_config() will try to open `{}`'.format(filename))\n    with open(filename) as f:\n        try:\n            config = json.load(f)\n        except ValueError as err:\n            raise exceptions.ConfigurationError('Failed to parse the JSON configuration from `{}`, {}'.format(filename, err))\n        logger.info('Configuration loaded from `{}`'.format(filename))\n    return config", "docstring": "Returns the config values found in a configuration file.\n\nArgs:\nfilename (str): the JSON file with the configuration values.\nIf ``None``, CONFIG_DEFAULT_PATH will be used.\n\nReturns:\ndict: The config values in the specified config file (or the\nfile at CONFIG_DEFAULT_PATH, if filename == None)", "source": "codesearchnet"}
{"code": "def create(self, friendly_name=None, description=None):\n    if (not self.exists()):\n        try:\n            response = self._api.datasets_insert(self._name_parts, friendly_name=friendly_name, description=description)\n        except Exception as e:\n            raise e\n        if ('selfLink' not in response):\n            raise Exception(('Could not create dataset %s' % self._full_name))\n    return self", "docstring": "Creates the Dataset with the specified friendly name and description.\n\nArgs:\nfriendly_name: (optional) the friendly name for the dataset if it is being created.\ndescription: (optional) a description for the dataset if it is being created.\nReturns:\nThe Dataset.\nRaises:\nException if the Dataset could not be created.", "source": "codesearchnet"}
{"code": "def _checkFunctioncode(functioncode, listOfAllowedValues=[]):\n    \n    FUNCTIONCODE_MIN = 1\n    FUNCTIONCODE_MAX = 127\n\n    _checkInt(functioncode, FUNCTIONCODE_MIN, FUNCTIONCODE_MAX, description='functioncode')\n\n    if listOfAllowedValues is None:\n        return\n\n    if not isinstance(listOfAllowedValues, list):\n        raise TypeError('The listOfAllowedValues should be a list. Given: {0!r}'.format(listOfAllowedValues))\n\n    for value in listOfAllowedValues:\n        _checkInt(value, FUNCTIONCODE_MIN, FUNCTIONCODE_MAX, description='functioncode inside listOfAllowedValues')\n\n    if functioncode not in listOfAllowedValues:\n        raise ValueError('Wrong function code: {0}, allowed values are {1!r}'.format(functioncode, listOfAllowedValues))", "docstring": "Check that the given functioncode is in the listOfAllowedValues.\n\nAlso verifies that 1 <= function code <= 127.\n\nArgs:\n* functioncode (int): The function code\n* listOfAllowedValues (list of int): Allowed values. Use *None* to bypass this part of the checking.\n\nRaises:\nTypeError, ValueError", "source": "juraj-google-style"}
{"code": "async def request(self, method, url, params=None, headers=None, data=None, json=None, token_refresh_attempts=2, **kwargs):\n    if all([data, json]):\n        msg = '\"data\" and \"json\" request parameters can not be used at the same time'\n        logging.warn(msg)\n        raise exceptions.GCPHTTPError(msg)\n    req_headers = (headers or {})\n    req_headers.update(_utils.DEFAULT_REQUEST_HEADERS)\n    req_kwargs = {'params': params, 'headers': req_headers}\n    if data:\n        req_kwargs['data'] = data\n    if json:\n        req_kwargs['json'] = json\n    if token_refresh_attempts:\n        if (not (await self.valid_token_set())):\n            (await self._auth_client.refresh_token())\n            token_refresh_attempts -= 1\n    req_headers.update({'Authorization': f'Bearer {self._auth_client.token}'})\n    request_id = kwargs.get('request_id', uuid.uuid4())\n    logging.debug(_utils.REQ_LOG_FMT.format(request_id=request_id, method=method.upper(), url=url, kwargs=req_kwargs))\n    try:\n        async with self._session.request(method, url, **req_kwargs) as resp:\n            log_kw = {'request_id': request_id, 'method': method.upper(), 'url': resp.url, 'status': resp.status, 'reason': resp.reason}\n            logging.debug(_utils.RESP_LOG_FMT.format(**log_kw))\n            if (resp.status in REFRESH_STATUS_CODES):\n                logging.warning(f'[{request_id}] HTTP Status Code {resp.status} returned requesting {resp.url}: {resp.reason}')\n                if token_refresh_attempts:\n                    logging.info(f'[{request_id}] Attempting request to {resp.url} again.')\n                    return (await self.request(method, url, token_refresh_attempts=token_refresh_attempts, request_id=request_id, **req_kwargs))\n                logging.warning(f'[{request_id}] Max attempts refreshing auth token exhausted while requesting {resp.url}')\n            resp.raise_for_status()\n            return (await resp.text())\n    except aiohttp.ClientResponseError as e:\n        msg = f'[{request_id}] HTTP error response from {resp.url}: {e}'\n        logging.error(msg, exc_info=e)\n        raise exceptions.GCPHTTPResponseError(msg, resp.status)\n    except exceptions.GCPHTTPResponseError as e:\n        raise e\n    except Exception as e:\n        msg = f'[{request_id}] Request call failed: {e}'\n        logging.error(msg, exc_info=e)\n        raise exceptions.GCPHTTPError(msg)", "docstring": "Make an asynchronous HTTP request.\n\nArgs:\nmethod (str): HTTP method to use for the request.\nurl (str): URL to be requested.\nparams (dict): (optional) Query parameters for the request.\nDefaults to ``None``.\nheaders (dict): (optional) HTTP headers to send with the\nrequest. Headers pass through to the request will\ninclude :attr:`DEFAULT_REQUEST_HEADERS`.\ndata (obj): (optional) A dictionary, bytes, or file-like\nobject to send in the body of the request.\njson (obj): (optional) Any json compatible python\nobject.\nNOTE: json and body parameters cannot be used at the same time.\ntoken_refresh_attempts (int): (optional) Number of attempts a token\nrefresh should be performed.\nReturns:\n(str) HTTP response body.\nRaises:\n:exc:`.GCPHTTPError`: if any exception occurred,\nspecifically a :exc:`.GCPHTTPResponseError`, if the\nexception is associated with a response status code.", "source": "codesearchnet"}
{"code": "def auto_convert_cell_no_flags(cell, units=None, parens_as_neg=True):\n    \n    units = units if units != None else {}\n    return auto_convert_cell(flagable=Flagable(), cell=cell, position=None, worksheet=0,\n                             flags={}, units=units, parens_as_neg=parens_as_neg)", "docstring": "Performs a first step conversion of the cell to check\nit's type or try to convert if a valid conversion exists.\nThis version of conversion doesn't flag changes nor store\ncell units.\n\nArgs:\nunits: The dictionary holder for cell units.\nparens_as_neg: Converts numerics surrounded by parens to\nnegative values", "source": "juraj-google-style"}
{"code": "def _BuildFindSpecsFromFileSourcePath(self, source_path, path_separator, environment_variables, user_accounts):\n    find_specs = []\n    for path_glob in path_helper.PathHelper.ExpandRecursiveGlobs(source_path, path_separator):\n        logger.debug('building find spec from path glob: {0:s}'.format(path_glob))\n        for path in path_helper.PathHelper.ExpandUsersVariablePath(path_glob, path_separator, user_accounts):\n            logger.debug('building find spec from path: {0:s}'.format(path))\n            if ('%' in path):\n                path = path_helper.PathHelper.ExpandWindowsPath(path, environment_variables)\n                logger.debug('building find spec from expanded path: {0:s}'.format(path))\n            if (not path.startswith(path_separator)):\n                logger.warning('The path filter must be defined as an absolute path: \"{0:s}\"'.format(path))\n                continue\n            path_segments = path.split(path_separator)\n            path_segments.pop(0)\n            if (not path_segments[(- 1)]):\n                logger.warning('Empty last path segment in path filter: \"{0:s}\"'.format(path))\n                path_segments.pop((- 1))\n            try:\n                find_spec = file_system_searcher.FindSpec(location_glob=path_segments, case_sensitive=False)\n            except ValueError as exception:\n                logger.error('Unable to build find specification for path: \"{0:s}\" with error: {1!s}'.format(path, exception))\n                continue\n            find_specs.append(find_spec)\n    return find_specs", "docstring": "Builds find specifications from a file source type.\n\nArgs:\nsource_path (str): file system path defined by the source.\npath_separator (str): file system path segment separator.\nenvironment_variables (list[str]): environment variable attributes used to\ndynamically populate environment variables in key.\nuser_accounts (list[str]): identified user accounts stored in the\nknowledge base.\n\nReturns:\nlist[dfvfs.FindSpec]: find specifications for the file source type.", "source": "codesearchnet"}
{"code": "def create_sonos_playlist_from_queue(self, title):\n    response = self.avTransport.SaveQueue([('InstanceID', 0), ('Title', title), ('ObjectID', '')])\n    item_id = response['AssignedObjectID']\n    obj_id = item_id.split(':', 2)[1]\n    uri = 'file:\n    res = [DidlResource(uri=uri, protocol_info='x-rincon-playlist:*:*:*')]\n    return DidlPlaylistContainer(resources=res, title=title, parent_id='SQ:', item_id=item_id)", "docstring": "Create a new Sonos playlist from the current queue.\n\nArgs:\ntitle: Name of the playlist\n\n:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`", "source": "codesearchnet"}
{"code": "def add_metric(self, labels, buckets, sum_value, timestamp=None):\n        \n        for b in buckets:\n            bucket, value = b[:2]\n            exemplar = None\n            if len(b) == 3:\n                exemplar = b[2]\n            self.samples.append(Sample(\n                self.name + '_bucket',\n                dict(list(zip(self._labelnames, labels)) + [('le', bucket)]),\n                value,\n                timestamp,\n                exemplar,\n            ))\n        \n        self.samples.extend([\n            Sample(self.name + '_count', dict(zip(self._labelnames, labels)), buckets[-1][1], timestamp),\n            Sample(self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value, timestamp),\n        ])", "docstring": "Add a metric to the metric family.\n\nArgs:\nlabels: A list of label values\nbuckets: A list of lists.\nEach inner list can be a pair of bucket name and value,\nor a triple of bucket name, value, and exemplar.\nThe buckets must be sorted, and +Inf present.\nsum_value: The sum value of the metric.", "source": "juraj-google-style"}
{"code": "def _SimpleEncoder(wire_type, encode_value, compute_value_size):\n  \n\n  def SpecificEncoder(field_number, is_repeated, is_packed):\n    if is_packed:\n      tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)\n      local_EncodeVarint = _EncodeVarint\n      def EncodePackedField(write, value):\n        write(tag_bytes)\n        size = 0\n        for element in value:\n          size += compute_value_size(element)\n        local_EncodeVarint(write, size)\n        for element in value:\n          encode_value(write, element)\n      return EncodePackedField\n    elif is_repeated:\n      tag_bytes = TagBytes(field_number, wire_type)\n      def EncodeRepeatedField(write, value):\n        for element in value:\n          write(tag_bytes)\n          encode_value(write, element)\n      return EncodeRepeatedField\n    else:\n      tag_bytes = TagBytes(field_number, wire_type)\n      def EncodeField(write, value):\n        write(tag_bytes)\n        return encode_value(write, value)\n      return EncodeField\n\n  return SpecificEncoder", "docstring": "Return a constructor for an encoder for fields of a particular type.\n\nArgs:\nwire_type:  The field's wire type, for encoding tags.\nencode_value:  A function which encodes an individual value, e.g.\n_EncodeVarint().\ncompute_value_size:  A function which computes the size of an individual\nvalue, e.g. _VarintSize().", "source": "juraj-google-style"}
{"code": "def get_vcf_header(source):\n    \n    head = HeaderParser()\n    \n    for line in source:\n        line = line.rstrip()\n        if line.startswith('\n            if line.startswith('\n                logger.debug(\"Found metadata line {0}\".format(line))\n                head.parse_meta_data(line)\n            else:\n                logger.debug(\"Found header line {0}\".format(line))\n                head.parse_header_line(line)\n        else:\n            break\n    \n    return head", "docstring": "Get the header lines of a vcf file\n\nArgs:\nsource(iterable): A vcf file\n\nReturns:\nhead (HeaderParser): A headerparser object", "source": "juraj-google-style"}
{"code": "async def get_random_popular_person(self, limit=500):\n    index = random.randrange(limit)\n    data = (await self._get_popular_people_page())\n    if (data is None):\n        return\n    if (index >= len(data['results'])):\n        (page, index) = self._calculate_page_index(index, data)\n        data = (await self._get_popular_people_page(page))\n    if (data is None):\n        return\n    json_data = data['results'][index]\n    details = (await self._get_person_json(json_data['id']))\n    details.update(**json_data)\n    return Person.from_json(details, self.config['data'].get('images'))", "docstring": "Randomly select a popular person.\n\nNotes:\nRequires at least two API calls. May require three API calls\nif the randomly-selected index isn't within the first page of\nrequired data.\n\nArguments:\nlimit (:py:class:`int`, optional): How many of the most\npopular people to make random choice from (defaults to top\n``500``).\n\nReturns:\n:py:class:`~.Person`: A randomly-selected popular person.", "source": "codesearchnet"}
{"code": "def __init__(self, field_instance, sequence):\n        \n        if not field_instance.repeated:\n            raise FieldDefinitionError(\n                'FieldList may only accept repeated fields')\n        self.__field = field_instance\n        self.__field.validate(sequence)\n        list.__init__(self, sequence)", "docstring": "Constructor.\n\nArgs:\nfield_instance: Instance of field that validates the list.\nsequence: List or tuple to construct list from.", "source": "juraj-google-style"}
{"code": "def parse_brome_config_from_browser_config(browser_config):\n    \n\n    config = {}\n\n    brome_keys = [key for key in browser_config if key.find(':') != -1]\n\n    for brome_key in brome_keys:\n        section, option = brome_key.split(':')\n        value = browser_config[brome_key]\n\n        if section not in config:\n            config[section] = {}\n\n        config[section][option] = value\n\n    return config", "docstring": "Parse the browser config and look for brome specific config\n\nArgs:\nbrowser_config (dict)", "source": "juraj-google-style"}
{"code": "def forward(ctx, forward_fn, *args, **kwargs):\n    ctx.forward_fn = forward_fn\n    ctx.save_for_backward(*args)\n    try:\n        output, ctx.grad_fn = forward_fn(*args, **kwargs)\n    except:\n        output = forward_fn(*args, **kwargs)\n        ctx.grad_fn = lambda *args, **kwargs: torch.full((), float('nan'))\n    return output", "docstring": "Forward pass computation specification.\n\nArgs:\nctx: Context object.\nforward_fn: Function to compute forward pass.\n*args: Arguments for the forward pass.\n**kwargs: Keyword arguments for the forward pass.", "source": "github-repos"}
{"code": "def Lease(self, request, global_params=None):\n    config = self.GetMethodConfig('Lease')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Leases a dataflow WorkItem to run.\n\nArgs:\nrequest: (DataflowProjectsJobsWorkItemsLeaseRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(LeaseWorkItemResponse) The response message.", "source": "github-repos"}
{"code": "def get_md5sum(fname, chunk_size=1024):\n\n    def iter_chunks(f):\n        while True:\n            chunk = f.read(chunk_size)\n            if (not chunk):\n                break\n            (yield chunk)\n    sig = hashlib.md5()\n    with open(fname, 'rb') as f:\n        for chunk in iter_chunks(f):\n            sig.update(chunk)\n    return sig.hexdigest()", "docstring": "Returns the MD5 checksum of a file.\n\nArgs:\nfname (str): Filename\nchunk_size (Optional[int]): Size (in Bytes) of the chunks that should be\nread in at once. Increasing chunk size reduces the number of reads\nrequired, but increases the memory usage. Defaults to 1024.\n\nReturns:\nThe MD5 checksum of the file, which is a string.", "source": "codesearchnet"}
{"code": "def nack(self, channel_id=None, **kwargs):\n    path = '/event-service/v1/channels/{}/nack'.format(channel_id)\n    r = self._httpclient.request(method='POST', url=self.url, path=path, **kwargs)\n    return r", "docstring": "Send a negative read-acknowledgement to the service.\n\nCauses the channel's read point to move to its previous position\nprior to the last poll.\n\nArgs:\nchannel_id (str): The channel ID.\n**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.\n\nReturns:\nrequests.Response: Requests Response() object.\n\nExamples:\nRefer to ``event_nack.py`` example.", "source": "codesearchnet"}
{"code": "def deserialize(name, custom_objects=None):\n    return serialization_lib.deserialize_keras_object(name, module_objects=ALL_OBJECTS_DICT, custom_objects=custom_objects)", "docstring": "Deserializes a serialized loss class/function instance.\n\nArgs:\nname: Loss configuration.\ncustom_objects: Optional dictionary mapping names (strings) to custom\nobjects (classes and functions) to be considered during\ndeserialization.\n\nReturns:\nA Keras `Loss` instance or a loss function.", "source": "github-repos"}
{"code": "def get_albums_for_artist(self, artist, full_album_art_uri=False):\n        \n        subcategories = [artist]\n        result = self.get_album_artists(\n            full_album_art_uri=full_album_art_uri,\n            subcategories=subcategories,\n            complete_result=True)\n\n        reduced = [item for item in result if item.__class__ == DidlMusicAlbum]\n        \n        \n        result[:] = reduced\n        result._metadata.update({\n            'item_list': reduced,\n            'search_type': 'albums_for_artist',\n            'number_returned': len(reduced),\n            'total_matches': len(reduced)\n        })\n        return result", "docstring": "Get an artist's albums.\n\nArgs:\nartist (str): an artist's name.\nfull_album_art_uri: whether the album art URI should be\nabsolute (i.e. including the IP address). Default `False`.\n\nReturns:\nA `SearchResult` instance.", "source": "juraj-google-style"}
{"code": "def regroup_if_changed(group, op_list, name=None):\n    has_deltas = isinstance(op_list, sequence_with_deltas.SequenceWithDeltas)\n    if ((group is None) or (len(group.control_inputs) != len(op_list)) or (has_deltas and op_list.has_changed())):\n        if has_deltas:\n            op_list.mark()\n        if op_list:\n            return tf.group(*op_list, name=name)\n        else:\n            return tf.no_op(name=name)\n    else:\n        return group", "docstring": "Creates a new group for op_list if it has changed.\n\nArgs:\ngroup: The current group. It is returned if op_list is unchanged.\nop_list: The list of operations to check.\nname: The name to use if a new group is created.\nReturns:\nEither group or a new group (or if op_list is empty then no_op).", "source": "codesearchnet"}
{"code": "def accept_alert(self, text=None, wait=None):\n        \n\n        wait = wait or capybara.default_max_wait_time\n        with self.driver.accept_modal(\"alert\", text=text, wait=wait):\n            yield", "docstring": "Execute the wrapped code, accepting an alert.\n\nArgs:\ntext (str | RegexObject, optional): Text to match against the text in the modal.\nwait (int | float, optional): Maximum time to wait for the modal to appear after\nexecuting the wrapped code.\n\nRaises:\nModalNotFound: If a modal dialog hasn't been found.", "source": "juraj-google-style"}
{"code": "def build_authorization_endpoint(self, request, disable_sso=None):\n        \n        self.load_config()\n        redirect_to = request.GET.get(REDIRECT_FIELD_NAME, None)\n        if not redirect_to:\n            redirect_to = django_settings.LOGIN_REDIRECT_URL\n        redirect_to = base64.urlsafe_b64encode(redirect_to.encode()).decode()\n        query = QueryDict(mutable=True)\n        query.update({\n            \"response_type\": \"code\",\n            \"client_id\": settings.CLIENT_ID,\n            \"resource\": settings.RELYING_PARTY_ID,\n            \"redirect_uri\": self.redirect_uri(request),\n            \"state\": redirect_to,\n        })\n        if self._mode == \"openid_connect\":\n            query[\"scope\"] = \"openid\"\n            if (disable_sso is None and settings.DISABLE_SSO) or disable_sso is True:\n                query[\"prompt\"] = \"login\"\n\n        return \"{0}?{1}\".format(self.authorization_endpoint, query.urlencode())", "docstring": "This function returns the ADFS authorization URL.\n\nArgs:\nrequest(django.http.request.HttpRequest): A django Request object\ndisable_sso(bool): Whether to disable single sign-on and force the ADFS server to show a login prompt.\n\nReturns:\nstr: The redirect URI", "source": "juraj-google-style"}
{"code": "def remove_server(self, name):\n    cmd = self.command_builder('no ntp server', value=name)\n    return self.configure(cmd)", "docstring": "Remove an NTP server entry from the node config\n\nArgs:\nname (string): The IP address or FQDN of the NTP server.\n\nReturns:\nTrue if the operation succeeds, otherwise False.", "source": "codesearchnet"}
{"code": "def add_time_dimension(padded_inputs, seq_lens):\n    padded_batch_size = tf.shape(padded_inputs)[0]\n    max_seq_len = (padded_batch_size \n    new_batch_size = (padded_batch_size \n    new_shape = ([new_batch_size, max_seq_len] + padded_inputs.get_shape().as_list()[1:])\n    return tf.reshape(padded_inputs, new_shape)", "docstring": "Adds a time dimension to padded inputs.\n\nArguments:\npadded_inputs (Tensor): a padded batch of sequences. That is,\nfor seq_lens=[1, 2, 2], then inputs=[A, *, B, B, C, C], where\nA, B, C are sequence elements and * denotes padding.\nseq_lens (Tensor): the sequence lengths within the input batch,\nsuitable for passing to tf.nn.dynamic_rnn().\n\nReturns:\nReshaped tensor of shape [NUM_SEQUENCES, MAX_SEQ_LEN, ...].", "source": "codesearchnet"}
{"code": "def dict_of_lists_add(dictionary, key, value):\n    list_objs = dictionary.get(key, list())\n    list_objs.append(value)\n    dictionary[key] = list_objs", "docstring": "Add value to a list in a dictionary by key\n\nArgs:\ndictionary (DictUpperBound): Dictionary to which to add values\nkey (Any): Key within dictionary\nvalue (Any): Value to add to list in dictionary\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def get_ut_layer(x,\n                 hparams,\n                 ffn_unit,\n                 attention_unit,\n                 pad_remover=None):\n  \n\n  if hparams.recurrence_type == \"basic\":\n    ut_initializer = (x, x, x)  \n    ut_function = functools.partial(\n        universal_transformer_basic,\n        hparams=hparams,\n        ffn_unit=ffn_unit,\n        attention_unit=attention_unit)\n\n  elif hparams.recurrence_type == \"highway\":\n    ut_initializer = (x, x, x)  \n    ut_function = functools.partial(\n        universal_transformer_highway,\n        hparams=hparams,\n        ffn_unit=ffn_unit,\n        attention_unit=attention_unit,\n        pad_remover=pad_remover)\n\n  elif hparams.recurrence_type == \"skip\":\n    ut_initializer = (x, x, x)  \n    ut_function = functools.partial(\n        universal_transformer_skip,\n        hparams=hparams,\n        ffn_unit=ffn_unit,\n        attention_unit=attention_unit,\n        pad_remover=pad_remover)\n\n  elif hparams.recurrence_type == \"dwa\":\n    \n    memory_size = hparams.num_rec_steps + 1\n\n    \n    memory_empty = tf.zeros([memory_size] + common_layers.shape_list(x))\n\n    \n    memory = fill_memory_slot(memory_empty, x, 0)\n\n    ut_initializer = (x, x, memory)  \n    ut_function = functools.partial(\n        universal_transformer_depthwise_attention,\n        hparams=hparams,\n        ffn_unit=ffn_unit,\n        attention_unit=attention_unit)\n\n  elif hparams.recurrence_type == \"gru\":\n    ut_initializer = (x, x, x)  \n    ut_function = functools.partial(\n        universal_transformer_with_gru_as_transition_function,\n        hparams=hparams,\n        ffn_unit=ffn_unit,\n        attention_unit=attention_unit,\n        pad_remover=pad_remover)\n\n  elif hparams.recurrence_type == \"lstm\":\n    memory = tf.zeros(common_layers.shape_list(x))\n    ut_initializer = (x, x, memory)  \n    ut_function = functools.partial(\n        universal_transformer_with_lstm_as_transition_function,\n        hparams=hparams,\n        ffn_unit=ffn_unit,\n        attention_unit=attention_unit,\n        pad_remover=pad_remover)\n\n  else:\n    raise ValueError(\"Unknown recurrence type: %s\" % hparams.recurrence_type)\n\n  return ut_function, ut_initializer", "docstring": "Provides the function that is used in universal transforemr steps.\n\nArgs:\nx: input\nhparams: model hyper-parameters\nffn_unit: feed-forward unit\nattention_unit: multi-head attention unit\npad_remover: to mask out padding in convolutional layers (efficiency).\n\nReturns:\nut_function and the ut_initializer\n\nRaises:\nValueError: Unknown recurrence type", "source": "juraj-google-style"}
{"code": "class TFConvNextStage(keras.layers.Layer):\n\n    def __init__(self, config: ConvNextConfig, in_channels: int, out_channels: int, kernel_size: int=2, stride: int=2, depth: int=2, drop_path_rates: Optional[List[float]]=None, **kwargs):\n        super().__init__(**kwargs)\n        if in_channels != out_channels or stride > 1:\n            self.downsampling_layer = [keras.layers.LayerNormalization(epsilon=1e-06, name='downsampling_layer.0'), keras.layers.Conv2D(filters=out_channels, kernel_size=kernel_size, strides=stride, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name='downsampling_layer.1')]\n        else:\n            self.downsampling_layer = [tf.identity]\n        drop_path_rates = drop_path_rates or [0.0] * depth\n        self.layers = [TFConvNextLayer(config, dim=out_channels, drop_path=drop_path_rates[j], name=f'layers.{j}') for j in range(depth)]\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.stride = stride\n\n    def call(self, hidden_states):\n        for layer in self.downsampling_layer:\n            hidden_states = layer(hidden_states)\n        for layer in self.layers:\n            hidden_states = layer(hidden_states)\n        return hidden_states\n\n    def build(self, input_shape=None):\n        if self.built:\n            return\n        self.built = True\n        if getattr(self, 'layers', None) is not None:\n            for layer in self.layers:\n                with tf.name_scope(layer.name):\n                    layer.build(None)\n        if self.in_channels != self.out_channels or self.stride > 1:\n            with tf.name_scope(self.downsampling_layer[0].name):\n                self.downsampling_layer[0].build([None, None, None, self.in_channels])\n            with tf.name_scope(self.downsampling_layer[1].name):\n                self.downsampling_layer[1].build([None, None, None, self.in_channels])", "docstring": "ConvNext stage, consisting of an optional downsampling layer + multiple residual blocks.\n\nArgs:\nconfig (`ConvNextV2Config`):\nModel configuration class.\nin_channels (`int`):\nNumber of input channels.\nout_channels (`int`):\nNumber of output channels.\ndepth (`int`):\nNumber of residual blocks.\ndrop_path_rates(`List[float]`):\nStochastic depth rates for each layer.", "source": "github-repos"}
{"code": "def _send_data(self, data, start_offset, file_len):\n    \n    headers = {}\n    end_offset = start_offset + len(data) - 1\n\n    if data:\n      headers['content-range'] = ('bytes %d-%d/%s' %\n                                  (start_offset, end_offset, file_len))\n    else:\n      headers['content-range'] = ('bytes */%s' % file_len)\n\n    status, response_headers, content = self._api.put_object(\n        self._path_with_token, payload=data, headers=headers)\n    if file_len == '*':\n      expected = 308\n    else:\n      expected = 200\n    errors.check_status(status, [expected], self._path, headers,\n                        response_headers, content,\n                        {'upload_path': self._path_with_token})", "docstring": "Send the block to the storage service.\n\nThis is a utility method that does not modify self.\n\nArgs:\ndata: data to send in str.\nstart_offset: start offset of the data in relation to the file.\nfile_len: an int if this is the last data to append to the file.\nOtherwise '*'.", "source": "juraj-google-style"}
{"code": "def _stream_data(self, chunk=None):\n        \n\n        \n        self._stream_sm_running = True\n\n        if chunk is None:\n            chunk = self._next_streaming_chunk(20)\n\n        if chunk is None or len(chunk) == 0:\n            self._stream_sm_running = False\n            return\n\n        try:\n            self._send_notification(StreamingChar.value_handle, chunk)\n            self._defer(self._stream_data)\n        except bable_interface.BaBLEException as err:\n            if err.packet.status == 'Rejected':  \n                time.sleep(0.05)\n                self._defer(self._stream_data, [chunk])\n            else:\n                self._audit('ErrorStreamingReport')  \n                self._logger.exception(\"Error while streaming data\")", "docstring": "Stream reports to the ble client in 20 byte chunks\n\nArgs:\nchunk (bytearray): A chunk that should be sent instead of requesting a\nnew chunk from the pending reports.", "source": "juraj-google-style"}
{"code": "def GetVolumeIdentifiers(self, volume_system):\n    volume_identifiers = []\n    for volume in volume_system.volumes:\n        volume_identifier = getattr(volume, 'identifier', None)\n        if volume_identifier:\n            volume_identifiers.append(volume_identifier)\n    return sorted(volume_identifiers)", "docstring": "Retrieves the volume identifiers.\n\nArgs:\nvolume_system (VolumeSystem): volume system.\n\nReturns:\nlist[str]: sorted volume identifiers.", "source": "codesearchnet"}
{"code": "def BuildAdGroupCriterionOperations(adgroup_id):\n  \n  criterion_operations = [\n      {\n          \n          \n          'xsi_type': 'AdGroupCriterionOperation',\n          'operand': {\n              'xsi_type': 'BiddableAdGroupCriterion',\n              'adGroupId': adgroup_id,\n              'criterion': {\n                  'xsi_type': 'Keyword',\n                  \n                  'text': 'mars%s%s' % (uuid.uuid4(),\n                                        '!!!' if i % 10 == 0 else ''),\n                  'matchType': 'BROAD'\n              }\n          },\n          'operator': 'ADD'\n      }\n      for i in range(KEYWORD_COUNT)]\n\n  return criterion_operations", "docstring": "Builds the operations adding a Keyword Criterion to each AdGroup.\n\nArgs:\nadgroup_id: an integer identifying an AdGroup to associate the keywords\nwith.\n\nReturns:\na list containing the operations that will create a new Keyword Criterion\nassociated with each provided AdGroup.", "source": "juraj-google-style"}
{"code": "def delete(self, filething=None, delete_v1=True, delete_v2=True):\n        \n\n        delete(filething, delete_v1, delete_v2)\n        self.clear()", "docstring": "delete(filething=None, delete_v1=True, delete_v2=True)\n\nRemove tags from a file.\n\nArgs:\nfilething (filething): A filename or `None` to use the one used\nwhen loading.\ndelete_v1 (bool): delete any ID3v1 tag\ndelete_v2 (bool): delete any ID3v2 tag\n\nIf no filename is given, the one most recently loaded is used.", "source": "juraj-google-style"}
{"code": "def get_config(self):\n    return {}", "docstring": "Returns a Python dict of the object config.\n\nA constraint config is a Python dictionary (JSON-serializable) that can\nbe used to reinstantiate the same object.\n\nReturns:\nPython dict containing the configuration of the constraint object.", "source": "github-repos"}
{"code": "def _sample_field(self, sample):\n    tag_values = self.sample_tag_values[sample].values()\n    if tag_values:\n        return ':'.join(tag_values)\n    else:\n        return '.'", "docstring": "Returns string representation of sample-format values.\n\nRaises:\nKeyError: if requested sample is not defined.", "source": "codesearchnet"}
{"code": "def processPhoneList(platformNames=[], numbers=[], excludePlatformNames=[]):\n    platforms = platform_selection.getPlatformsByName(platformNames, mode='phonefy', excludePlatformNames=excludePlatformNames)\n    results = []\n    for num in numbers:\n        for pla in platforms:\n            entities = pla.getInfo(query=num, process=True, mode='phonefy')\n            if (entities != {}):\n                results += json.loads(entities)\n    return results", "docstring": "Method to perform searchs on a series of numbers.\n\nArgs:\n-----\nplatformNames: List of names of the platforms.\nnumbers: List of numbers to be queried.\nexcludePlatformNames: A list of platforms not to be searched.\n\nReturn:\n-------\nA list of verified emails.", "source": "codesearchnet"}
{"code": "def AddMemberDefinition(self, member_definition):\n    \n    self.members.append(member_definition)\n    member_definition.family_definition = self", "docstring": "Adds a member definition.\n\nArgs:\nmember_definition (DataTypeDefinition): member data type definition.", "source": "juraj-google-style"}
{"code": "def save_to_well_known_file(credentials, well_known_file=None):\n    if (well_known_file is None):\n        well_known_file = _get_well_known_file()\n    config_dir = os.path.dirname(well_known_file)\n    if (not os.path.isdir(config_dir)):\n        raise OSError('Config directory does not exist: {0}'.format(config_dir))\n    credentials_data = credentials.serialization_data\n    _save_private_file(well_known_file, credentials_data)", "docstring": "Save the provided GoogleCredentials to the well known file.\n\nArgs:\ncredentials: the credentials to be saved to the well known file;\nit should be an instance of GoogleCredentials\nwell_known_file: the name of the file where the credentials are to be\nsaved; this parameter is supposed to be used for\ntesting only", "source": "codesearchnet"}
{"code": "def __getIp6Address(self, addressType):\n        \n        addrType = ['link local', 'global', 'rloc', 'mesh EID']\n        addrs = []\n        globalAddr = []\n        linkLocal64Addr = ''\n        rlocAddr = ''\n        meshEIDAddr = ''\n\n        addrs = self.__sendCommand('ipaddr')\n        for ip6Addr in addrs:\n            if ip6Addr == 'Done':\n                break\n\n            ip6AddrPrefix = ip6Addr.split(':')[0]\n            if ip6AddrPrefix == 'fe80':\n                \n                if ip6Addr.split(':')[4] != '0':\n                    linkLocal64Addr = ip6Addr\n            elif ip6Addr.startswith(self.meshLocalPrefix):\n                \n                if ip6Addr.split(':')[4] == '0':\n                    \n                    rlocAddr = ip6Addr\n                else:\n                    \n                    meshEIDAddr = ip6Addr\n            else:\n                \n                if ip6Addr != None:\n                    globalAddr.append(ip6Addr)\n                else:\n                    pass\n\n        if addressType == addrType[0]:\n            return linkLocal64Addr\n        elif addressType == addrType[1]:\n            return globalAddr\n        elif addressType == addrType[2]:\n            return rlocAddr\n        elif addressType == addrType[3]:\n            return meshEIDAddr\n        else:\n            pass", "docstring": "get specific type of IPv6 address configured on thread device\n\nArgs:\naddressType: the specific type of IPv6 address\n\nlink local: link local unicast IPv6 address that's within one-hop scope\nglobal: global unicast IPv6 address\nrloc: mesh local unicast IPv6 address for routing in thread network\nmesh EID: mesh Endpoint Identifier\n\nReturns:\nIPv6 address string", "source": "juraj-google-style"}
{"code": "def get_config_value(self, section_name, option, default_option='default'):\n    if (self.config is None):\n        self.config = configparser.ConfigParser()\n        self.config.read(self.ini_file_name)\n    if option:\n        try:\n            return self.config.get(section_name, option)\n        except configparser.NoOptionError:\n            log.debug(\"Didn't find a configuration option for '%s' section and '%s' option\", section_name, option)\n    return self.config.get(section_name, default_option)", "docstring": "Read a value from the configuration, with a default.\n\nArgs:\nsection_name (str): name of the section in the configuration from which\nthe option should be found.\noption (str): name of the configuration option.\ndefault_option (str): name of the default configuration option whose\nvalue should be returned if the requested option is not found.\n\nReturns:\nstr: the value from the ini file.", "source": "codesearchnet"}
{"code": "def __init__(self, ctx, config):\n    super(AnfTransformer, self).__init__(ctx)\n    if config is None:\n        if gast_util.GAST2:\n            literal_node_types = (gast.Num, gast.Str, gast.Bytes, gast.NameConstant, gast.Name)\n        elif gast_util.GAST3:\n            literal_node_types = (gast.Constant, gast.Name)\n        else:\n            assert False\n        self._overrides = [(ASTEdgePattern(ANY, ANY, literal_node_types), LEAVE), (ASTEdgePattern(ANY, ANY, gast.expr), REPLACE)]\n    else:\n        self._overrides = config\n    self._gensym = DummyGensym()\n    self._pending_statements = []", "docstring": "Creates an ANF transformer.\n\nArgs:\nctx: transformer.Context\nconfig: Configuration", "source": "github-repos"}
{"code": "def _somethingFound(self, data, mode=\"phonefy\"):\n        \n        if data:\n            try:\n                for text in self.notFoundText[mode]:\n                    if text in data:\n                        return False\n                return True\n            except AttributeError as e:\n                \n                verifier = self.modes.get(mode)\n                if verifier:\n                    if verifier.get(\"not_found_text\", \"\") in data:\n                        return False\n                    else:\n                        return True\n        return False", "docstring": "Verifying if something was found.\n\nArgs:\n-----\ndata: Data where the self.notFoundText will be searched.\nmode: Mode to be executed.\n\nReturn:\n-------\nTrue if exists.", "source": "juraj-google-style"}
{"code": "def perfcounters(infile):\n    measurements = []\n    with open(infile, 'r') as in_file:\n        read_struct(in_file)\n        for region_struct in read_structs(in_file):\n            region = region_struct['1'][1]\n            core_info = region_struct['Region Info']\n            measurements += get_measurements(region, core_info, region_struct)\n            for table_struct in read_tables(in_file):\n                core_info = None\n                if ('Event' in table_struct):\n                    offset = 1\n                    core_info = table_struct['Event'][offset:]\n                    measurements += get_measurements(region, core_info, table_struct, offset)\n                elif ('Metric' in table_struct):\n                    core_info = table_struct['Metric']\n                    measurements += get_measurements(region, core_info, table_struct)\n    return measurements", "docstring": "Get a complete list of all measurements.\n\nArgs:\ninfile: The filestream containing all likwid output.\n\nReturns:\nA list of all measurements extracted from likwid's file stream.", "source": "codesearchnet"}
{"code": "def server(self, value):\n    self._server = value\n    self._connectionXML.set('server', value)", "docstring": "Set the connection's server property.\n\nArgs:\nvalue:  New server. String.\n\nReturns:\nNothing.", "source": "codesearchnet"}
{"code": "def add_note(path, filename=\"note.txt\"):\n    \n    path = os.path.expanduser(path)\n    assert os.path.isdir(path), \"{} is not a valid directory.\".format(path)\n\n    filepath = os.path.join(path, filename)\n    exists = os.path.isfile(filepath)\n\n    try:\n        subprocess.call([EDITOR, filepath])\n    except Exception as exc:\n        logger.error(\"Editing note failed!\")\n        raise exc\n    if exists:\n        print(\"Note updated at:\", filepath)\n    else:\n        print(\"Note created at:\", filepath)", "docstring": "Opens a txt file at the given path where user can add and save notes.\n\nArgs:\npath (str): Directory where note will be saved.\nfilename (str): Name of note. Defaults to \"note.txt\"", "source": "juraj-google-style"}
{"code": "def __init__(self, key='', *value):\n        \n        if key == '':\n            self.key = self.__class__.__name__\n        else:\n            self.key = key\n        if len(value) != 0:\n            self.value = list(flatten(value))", "docstring": "init\n\nArgs:\nkey (str): the key\n*value: the value to be stored", "source": "juraj-google-style"}
{"code": "def _parse_string_to_list_of_pairs(s, seconds_to_int=False):\n    ret = []\n    for p in [s.split(':') for s in re.sub('[,.;]', ' ', s).split()]:\n        if (len(p) != 2):\n            raise ValueError(('bad input to _parse_string_to_list_of_pairs %s' % s))\n        if seconds_to_int:\n            ret.append((p[0], int(p[1])))\n        else:\n            ret.append(tuple(p))\n    return ret", "docstring": "r\"\"\"Parses a string into a list of pairs.\n\nIn the input string, each pair is separated by a colon, and the delimiters\nbetween pairs are any of \" ,.;\".\n\ne.g. \"rows:32,cols:32\"\n\nArgs:\ns: str to parse.\nseconds_to_int: Boolean. If True, then the second elements are returned\nas integers;  otherwise they are strings.\n\nReturns:\nList of tuple pairs.\n\nRaises:\nValueError: Badly formatted string.", "source": "codesearchnet"}
{"code": "def tags(pode, leaf=False):\n    fulltags = [tag for tag in pode[1]['tags']]\n    if (not leaf):\n        return fulltags\n    retn = []\n    for (size, tag) in sorted([(len(t), t) for t in fulltags], reverse=True):\n        look = (tag + '.')\n        if any([r.startswith(look) for r in retn]):\n            continue\n        retn.append(tag)\n    return retn", "docstring": "Get all the tags for a given node.\n\nArgs:\npode (tuple): A packed node.\nleaf (bool): If True, only return the full tags.\n\nReturns:\nlist: A list of tag strings.", "source": "codesearchnet"}
{"code": "def _usage(shorthelp):\n    \n    doc = _sys.modules['__main__'].__doc__\n    if not doc:\n        doc = '\\nUSAGE: %s [flags]\\n' % _sys.argv[0]\n        doc = flags.text_wrap(doc, indent='       ', firstline_indent='')\n    else:\n        \n        num_specifiers = doc.count('%') - 2 * doc.count('%%')\n        try:\n            doc %= (_sys.argv[0],) * num_specifiers\n        except (OverflowError, TypeError, ValueError):\n            \n            pass\n    if shorthelp:\n        flag_str = flags.FLAGS.main_module_help()\n    else:\n        flag_str = str(flags.FLAGS)\n    try:\n        _sys.stdout.write(doc)\n        if flag_str:\n            _sys.stdout.write('\\nflags:\\n')\n            _sys.stdout.write(flag_str)\n        _sys.stdout.write('\\n')\n    except IOError as e:\n        \n        \n        if e.errno != _errno.EPIPE:\n            raise", "docstring": "Writes __main__'s docstring to stdout with some help text.\n\nArgs:\nshorthelp: bool, if True, prints only flags from the main module,\nrather than all flags.", "source": "juraj-google-style"}
{"code": "def decrypt(self, ciphertext):\n    plaintext = self._rx_tinh.dec(ciphertext)\n    if (plaintext is None):\n        logger.error('Message decryption failure')\n        raise s_exc.CryptoErr(mesg='Message decryption failure')\n    seqn = next(self._rx_sn)\n    (sn, mesg) = s_msgpack.un(plaintext)\n    if (sn != seqn):\n        logger.error('Message out of sequence: got %d expected %d', sn, seqn)\n        raise s_exc.CryptoErr(mesg='Message out of sequence', expected=seqn, got=sn)\n    return mesg", "docstring": "Decrypt a message, validating its sequence number is as we expect.\n\nArgs:\nciphertext (bytes): The message to decrypt and verify.\n\nReturns:\nmesg: A mesg.\n\nRaises:\ns_exc.CryptoErr: If the message decryption fails or the sequence number was unexpected.", "source": "codesearchnet"}
{"code": "def prepare_or_wait_for_session(self, master='', config=None, wait_for_checkpoint=False, max_wait_secs=7200, start_standard_services=True):\n    self._coord.clear_stop()\n    if self._summary_writer:\n        self._summary_writer.reopen()\n    if self._is_chief:\n        sess = self._session_manager.prepare_session(master, init_op=self.init_op, saver=self.saver, checkpoint_dir=self._logdir, wait_for_checkpoint=wait_for_checkpoint, max_wait_secs=max_wait_secs, config=config, init_feed_dict=self._init_feed_dict, init_fn=self._init_fn)\n        self._write_graph()\n        if start_standard_services:\n            logging.info('Starting standard services.')\n            self.start_standard_services(sess)\n    else:\n        sess = self._session_manager.wait_for_session(master, config=config, max_wait_secs=max_wait_secs)\n    if start_standard_services:\n        logging.info('Starting queue runners.')\n        self.start_queue_runners(sess)\n    return sess", "docstring": "Make sure the model is ready to be used.\n\nCreate a session on 'master', recovering or initializing the model as\nneeded, or wait for a session to be ready.  If running as the chief\nand `start_standard_service` is set to True, also call the session\nmanager to start the standard services.\n\nArgs:\nmaster: name of the TensorFlow master to use.  See the\n`tf.compat.v1.Session` constructor for how this is interpreted.\nconfig: Optional ConfigProto proto used to configure the session, which is\npassed as-is to create the session.\nwait_for_checkpoint: Whether we should wait for the availability of a\ncheckpoint before creating Session. Defaults to False.\nmax_wait_secs: Maximum time to wait for the session to become available.\nstart_standard_services: Whether to start the standard services and the\nqueue runners.\n\nReturns:\nA Session object that can be used to drive the model.", "source": "github-repos"}
{"code": "def vlog_is_on(level):\n  \n\n  if level > converter.ABSL_DEBUG:\n    \n    \n    \n    standard_level = converter.STANDARD_DEBUG - (level - 1)\n  else:\n    if level < converter.ABSL_FATAL:\n      level = converter.ABSL_FATAL\n    standard_level = converter.absl_to_standard(level)\n  return _absl_logger.isEnabledFor(standard_level)", "docstring": "Checks if vlog is enabled for the given level in caller's source file.\n\nArgs:\nlevel: int, the C++ verbose logging level at which to log the message,\ne.g. 1, 2, 3, 4... While absl level constants are also supported,\ncallers should prefer level_debug|level_info|... calls for\nchecking those.\n\nReturns:\nTrue if logging is turned on for that level.", "source": "juraj-google-style"}
{"code": "def create_profile(profile_name):\n    try:\n        profile = Profile(profile_name=profile_name)\n        profile.full_clean()\n        profile.save()\n    except ValidationError as err:\n        raise ValCannotCreateError(err.message_dict)", "docstring": "Used to create Profile objects in the database\n\nA profile needs to exists before an EncodedVideo object can be created.\n\nArgs:\nprofile_name (str): ID of the profile\n\nRaises:\nValCannotCreateError: Raised if the profile name is invalid or exists", "source": "codesearchnet"}
{"code": "def handle_duplications(file_path):\n    logging.info('Handling duplications for \"%s\"', file_path)\n    f = open_strings_file(file_path, 'r+')\n    header_comment_key_value_tuples = extract_header_comment_key_value_tuples_from_file(f)\n    file_elements = []\n    section_file_elements = []\n    keys_to_objects = {}\n    duplicates_found = []\n    for (header_comment, comments, key, value) in header_comment_key_value_tuples:\n        if (len(header_comment) > 0):\n            for elem in sorted(section_file_elements, key=(lambda x: x.comments[0])):\n                file_elements.append(elem)\n            section_file_elements = []\n            file_elements.append(Comment(header_comment))\n        if (key in keys_to_objects):\n            keys_to_objects[key].add_comments(comments)\n            duplicates_found.append(key)\n        else:\n            loc_obj = LocalizationEntry(comments, key, value)\n            keys_to_objects[key] = loc_obj\n            section_file_elements.append(loc_obj)\n    for elem in sorted(section_file_elements, key=(lambda x: x.comments[0])):\n        file_elements.append(elem)\n    f.seek(0)\n    for element in file_elements:\n        f.write(unicode(element))\n        f.write(u'\\n')\n    f.truncate()\n    f.close()\n    logging.info(('Omitted %d duplicates (%s)' % (len(duplicates_found), ','.join(duplicates_found))))\n    logging.info('Finished handling duplications for \"%s\"', file_path)", "docstring": "Omits the duplications in the strings files.\nKeys that appear more than once, will be joined to one appearance and the omit will be documented.\n\nArgs:\nfile_path (str): The path to the strings file.", "source": "codesearchnet"}
{"code": "def _aggregation_op(cls,\n            op: Callable[[tf.Tensor, Optional[Sequence[int]]], tf.Tensor],\n            x: 'TensorFluent',\n            vars_list: List[str]) -> 'TensorFluent':\n        \n        axis = cls._varslist2axis(x, vars_list)\n        t = op(x.tensor, axis)\n\n        scope = []\n        for var in x.scope.as_list():\n            if var not in vars_list:\n                scope.append(var)\n\n        batch = x.batch\n\n        return TensorFluent(t, scope, batch=batch)", "docstring": "Returns a TensorFluent for the aggregation `op` applied to fluent `x`.\n\nArgs:\nop: The aggregation operation.\nx: The input fluent.\nvars_list: The list of variables to be aggregated over.\n\nReturns:\nA TensorFluent wrapping the aggregation operator's output.", "source": "juraj-google-style"}
{"code": "def parallel(devices, fn, *args, **kwargs):\n    if (not isinstance(devices, list)):\n        raise ValueError('devices must be a list')\n    for x in (list(args) + list(six.itervalues(kwargs))):\n        if ((not isinstance(x, list)) or (len(x) != len(devices))):\n            raise ValueError(('Argument not a list with same length as devices arg=%s devices=%s' % (x, devices)))\n    ret = []\n    for (i, device) in enumerate(devices):\n        with tf.device(device):\n            with tf.variable_scope(('parallel_%d' % i)):\n                my_args = [x[i] for x in args]\n                my_kwargs = {k: v[i] for (k, v) in six.iteritems(kwargs)}\n                ret.append(fn(*my_args, **my_kwargs))\n    return ret", "docstring": "Call a function once on each device.\n\nArgs:\ndevices: a list of n devices\nfn: a function\n*args: arguments, each of which is a list of length n\n**kwargs: keyword-args, each of which is a list of length n\nReturns:\na list of length n\nRaises:\nValueError: if the arguments are not all lists of length n", "source": "codesearchnet"}
{"code": "def create_public_ip(access_token, subscription_id, resource_group, public_ip_name, dns_label,\n                     location):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Network/publicIPAddresses/', public_ip_name,\n                        '?api-version=', NETWORK_API])\n    ip_body = {'location': location}\n    properties = {'publicIPAllocationMethod': 'Dynamic'}\n    properties['dnsSettings'] = {'domainNameLabel': dns_label}\n    ip_body['properties'] = properties\n    body = json.dumps(ip_body)\n    return do_put(endpoint, body, access_token)", "docstring": "Create a public ip address.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\npublic_ip_name (str): Name of the new public ip address resource.\ndns_label (str): DNS label to apply to the IP address.\nlocation (str): Azure data center location. E.g. westus.\n\nReturns:\nHTTP response. Public IP address JSON body.", "source": "juraj-google-style"}
{"code": "def uniquelines(q):\n    \n    setoflines = set()\n    for facets in q:\n        for line in itertools.combinations(facets, 2):\n            setoflines.add(tuple(sorted(line)))\n    return setoflines", "docstring": "Given all the facets, convert it into a set of unique lines.  Specifically\nused for converting convex hull facets into line pairs of coordinates.\n\nArgs:\nq: A 2-dim sequence, where each row represents a facet. E.g.,\n[[1,2,3],[3,6,7],...]\n\nReturns:\nsetoflines:\nA set of tuple of lines.  E.g., ((1,2), (1,3), (2,3), ....)", "source": "juraj-google-style"}
{"code": "def fit_gaussian(samples, ddof=0):\n    \n    if len(samples.shape) == 1:\n        return np.mean(samples), np.std(samples, ddof=ddof)\n    return np.mean(samples, axis=1), np.std(samples, axis=1, ddof=ddof)", "docstring": "Calculates the mean and the standard deviation of the given samples.\n\nArgs:\nsamples (ndarray): a one or two dimensional array. If one dimensional we calculate the fit using all\nvalues. If two dimensional, we fit the Gaussian for every set of samples over the first dimension.\nddof (int): the difference degrees of freedom in the std calculation. See numpy.", "source": "juraj-google-style"}
{"code": "def log_deferred(op, log_id, every_n=1, first_n=None):\n    prefix = ':::MLPv0.5.0 [{}]'.format(log_id)\n    if ((not (first_n is not None)) and (first_n == 1)):\n        return tf.Print(op, [tf.timestamp(), op], message=prefix, first_n=1)\n    counter = tf.Variable((tf.zeros(shape=(), dtype=tf.int32) - 1), aggregation=tf.VariableAggregation.MEAN)\n    increment = tf.assign_add(counter, 1, use_locking=True)\n    return tf.cond(tf.equal(tf.mod(increment, every_n), 0), (lambda : tf.Print(op, [tf.timestamp(), op], message=prefix, first_n=first_n)), (lambda : op))", "docstring": "Helper method inserting compliance logging ops.\n\nNote: This helper is not guaranteed to be efficient, as it will insert ops\nand control dependencies. If this proves to be a bottleneck, submitters\nmay wish to consider other methods such as extracting values from an\n.events file.\n\nArgs:\nop: A tf op to be printed.\nlog_id: a uuid provided by the logger in mlperf_log.py\nevery_n: If repeat is True, with what frequency should the input op be '\nlogged. If repeat is False, this argument is ignored.\nfirst_n: Only log this many values. This arg does not interact with every_n.\nThe first_n refers to the first n that would have been logged.", "source": "codesearchnet"}
{"code": "def g_square_bin(dm, x, y, s):\n    \n\n    def _calculate_tlog(x, y, s, dof, dm):\n        nijk = np.zeros((2, 2, dof))\n        s_size = len(s)\n        z = []\n        for z_index in range(s_size):\n            z.append(s.pop())\n            pass\n        for row_index in range(0, dm.shape[0]):\n            i = dm[row_index, x]\n            j = dm[row_index, y]\n            k = []\n            k_index = 0\n            for z_index in range(s_size):\n                k_index += dm[row_index, z[z_index]] * int(pow(2, z_index))\n                pass\n            nijk[i, j, k_index] += 1\n            pass\n        nik = np.ndarray((2, dof))\n        njk = np.ndarray((2, dof))\n        for k_index in range(dof):\n            nik[:, k_index] = nijk[:, :, k_index].sum(axis = 1)\n            njk[:, k_index] = nijk[:, :, k_index].sum(axis = 0)\n            pass\n        nk = njk.sum(axis = 0)\n        tlog = np.zeros((2, 2 , dof))\n        tlog.fill(np.nan)\n        for k in range(dof):\n            tx = np.array([nik[:,k]]).T\n            ty = np.array([njk[:,k]])\n            tdijk = tx.dot(ty)\n            tlog[:,:,k] = nijk[:,:,k] * nk[k] / tdijk\n            pass\n        return (nijk, tlog)\n\n    _logger.debug('Edge %d -- %d with subset: %s' % (x, y, s))\n    row_size = dm.shape[0]\n    s_size = len(s)\n    dof = int(pow(2, s_size))\n    row_size_required = 10 * dof\n    if row_size < row_size_required:\n        _logger.warning('Not enough samples. %s is too small. Need %s.'\n                        % (str(row_size), str(row_size_required)))\n        return 1\n    nijk = None\n    if s_size < 6:\n        if s_size == 0:\n            nijk = np.zeros((2, 2))\n            for row_index in range(0, dm.shape[0]):\n                i = dm[row_index, x]\n                j = dm[row_index, y]\n                nijk[i, j] += 1\n                pass\n            tx = np.array([nijk.sum(axis = 1)]).T\n            ty = np.array([nijk.sum(axis = 0)])\n            tdij = tx.dot(ty)\n            tlog = nijk * row_size / tdij\n            pass\n        if s_size > 0:\n            nijk, tlog = _calculate_tlog(x, y, s, dof, dm)\n            pass\n        pass\n    else:\n        \n        nijk = np.zeros((2, 2, 1))\n        i = dm[0, x]\n        j = dm[0, y]\n        k = []\n        for z in s:\n            k.append(dm[:,z])\n            pass\n        k = np.array(k).T\n        parents_count = 1\n        parents_val = np.array([k[0,:]])\n        nijk[i, j, parents_count - 1] = 1\n        for it_sample in range(1, row_size):\n            is_new = True\n            i = dm[it_sample, x]\n            j = dm[it_sample, y]\n            tcomp = parents_val[:parents_count,:] == k[it_sample,:]\n            for it_parents in range(parents_count):\n                if np.all(tcomp[it_parents,:]):\n                    nijk[i, j, it_parents] += 1\n                    is_new = False\n                    break\n                pass\n            if is_new is True:\n                parents_count += 1\n                parents_val = np.r_[parents_val, [k[it_sample,:]]]\n                nnijk = np.zeros((2,2,parents_count))\n                for p in range(parents_count - 1):\n                    nnijk[:,:,p] = nijk[:,:,p]\n                nnijk[i, j, parents_count - 1] = 1\n                nijk = nnijk\n                pass\n            pass\n        nik = np.ndarray((2, parents_count))\n        njk = np.ndarray((2, parents_count))\n        for k_index in range(parents_count):\n            nik[:, k_index] = nijk[:, :, k_index].sum(axis = 1)\n            njk[:, k_index] = nijk[:, :, k_index].sum(axis = 0)\n            pass\n        nk = njk.sum(axis = 0)\n        tlog = np.zeros((2, 2 , parents_count))\n        tlog.fill(np.nan)\n        for k in range(parents_count):\n            tX = np.array([nik[:,k]]).T\n            tY = np.array([njk[:,k]])\n            tdijk = tX.dot(tY)\n            tlog[:,:,k] = nijk[:,:,k] * nk[k] / tdijk\n            pass\n        pass\n    log_tlog = np.log(tlog)\n    G2 = np.nansum(2 * nijk * log_tlog)\n    \n    \n    \n    \n    _logger.debug('G2 = %f' % G2)\n    p_val = chi2.sf(G2, dof)\n    _logger.info('p_val = %s' % str(p_val))\n    return p_val", "docstring": "G square test for a binary data.\n\nArgs:\ndm: the data matrix to be used (as a numpy.ndarray).\nx: the first node (as an integer).\ny: the second node (as an integer).\ns: the set of neibouring nodes of x and y (as a set()).\n\nReturns:\np_val: the p-value of conditional independence.", "source": "juraj-google-style"}
{"code": "def check(self, url: str) -> Optional[dict]:\n    data = self.data.get(url)\n    if data:\n        data = self._check_expiration(url, data)\n    return (data.data if data else None)", "docstring": "Check if data for a url has expired.\n\nData is not fetched again if it has expired.\n\nArgs:\nurl: url to check expiration on\n\nReturns:\nvalue of the data, possibly None", "source": "codesearchnet"}
{"code": "def _get_arguments_for_execution(self, function_name, serialized_args):\n    arguments = []\n    for (i, arg) in enumerate(serialized_args):\n        if isinstance(arg, ObjectID):\n            argument = self.get_object([arg])[0]\n            if isinstance(argument, RayError):\n                raise argument\n        else:\n            argument = arg\n        arguments.append(argument)\n    return arguments", "docstring": "Retrieve the arguments for the remote function.\n\nThis retrieves the values for the arguments to the remote function that\nwere passed in as object IDs. Arguments that were passed by value are\nnot changed. This is called by the worker that is executing the remote\nfunction.\n\nArgs:\nfunction_name (str): The name of the remote function whose\narguments are being retrieved.\nserialized_args (List): The arguments to the function. These are\neither strings representing serialized objects passed by value\nor they are ray.ObjectIDs.\n\nReturns:\nThe retrieved arguments in addition to the arguments that were\npassed by value.\n\nRaises:\nRayError: This exception is raised if a task that\ncreated one of the arguments failed.", "source": "codesearchnet"}
{"code": "def _call_and_serialize(cls, method, data, refresh=False):\n    method(data)\n    if refresh:\n        return cls.read(method.__self__, data[cls.__uid_field__])\n    else:\n        return cls.deserialize(cls._get_non_empty_dict(data))", "docstring": "Call the remote method with data, and optionally refresh.\n\nArgs:\nmethod (callable): The method on the Authenticated Five9 object\nthat should be called.\ndata (dict): A data dictionary that will be passed as the first\nand only position argument to ``method``.\nrefresh (bool, optional): Set to ``True`` to get the record data\nfrom Five9 before returning the record.\n\nReturns:\nBaseModel: The newly created record. If ``refresh`` is ``True``,\nthis will be fetched from Five9. Otherwise, it's the data\nrecord that was sent to the server.", "source": "codesearchnet"}
{"code": "def isnan(x):\n    if any_symbolic_tensors((x,)):\n        return Isnan().symbolic_call(x)\n    return backend.numpy.isnan(x)", "docstring": "Test element-wise for NaN and return result as a boolean tensor.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput boolean tensor.", "source": "github-repos"}
{"code": "def merge(self, other):\n        \n        if other.seed != self.seed:\n            raise ValueError(\"Cannot merge MinHash with\\\n                    different seeds\")\n        if len(self) != len(other):\n            raise ValueError(\"Cannot merge MinHash with\\\n                    different numbers of permutation functions\")\n        self.hashvalues = np.minimum(other.hashvalues, self.hashvalues)", "docstring": "Merge the other MinHash with this one, making this one the union\nof both.\n\nArgs:\nother (datasketch.MinHash): The other MinHash.", "source": "juraj-google-style"}
{"code": "def get_duration(self, matrix_name):\n        \n        duration = 0.0\n        if matrix_name in self.data:\n            duration = sum([stage.duration() for stage in self.data[matrix_name]])\n        return duration", "docstring": "Get duration for a concrete matrix.\n\nArgs:\nmatrix_name (str): name of the Matrix.\n\nReturns:\nfloat: duration of concrete matrix in seconds.", "source": "juraj-google-style"}
{"code": "def preprocess_input(x, data_format=None):\n    return x", "docstring": "A placeholder method for backward compatibility.\n\nThe preprocessing logic has been included in the mobilenet_v3 model\nimplementation. Users are no longer required to call this method to\nnormalize the input data. This method does nothing and only kept as a\nplaceholder to align the API surface between old and new version of model.\n\nArgs:\nx: A floating point `numpy.array` or a tensor.\ndata_format: Optional data format of the image tensor/array.\n`None` means the global setting\n`keras.config.image_data_format()` is used\n(unless you changed it, it uses `\"channels_last\"`).\nDefaults to `None`.\n\nReturns:\nUnchanged `numpy.array` or tensor.", "source": "github-repos"}
{"code": "def Convert(self, values, start_index=0, end_index=None):\n    if (not values):\n        return\n    try:\n        total_batch_count = (len(values) \n    except TypeError:\n        total_batch_count = (- 1)\n    pool = ThreadPool.Factory(self.threadpool_prefix, self.threadpool_size)\n    val_iterator = itertools.islice(values, start_index, end_index)\n    pool.Start()\n    try:\n        for (batch_index, batch) in enumerate(collection.Batch(val_iterator, self.batch_size)):\n            logging.debug('Processing batch %d out of %d', batch_index, total_batch_count)\n            pool.AddTask(target=self.ConvertBatch, args=(batch,), name=('batch_%d' % batch_index), inline=False)\n    finally:\n        pool.Stop(join_timeout=3600)", "docstring": "Converts given collection to exported values.\n\nThis method uses a threadpool to do the conversion in parallel. It\nblocks for up to one hour until everything is converted.\n\nArgs:\nvalues: Iterable object with values to convert.\nstart_index: Start from this index in the collection.\nend_index: Finish processing on the (index - 1) element of the collection.\nIf None, work till the end of the collection.\n\nReturns:\nNothing. ConvertedBatch() should handle the results.", "source": "codesearchnet"}
{"code": "def setlogging(mlogger, defval=None):\n    log_level = os.getenv('SYN_LOG_LEVEL', defval)\n    if log_level:\n        log_level = log_level.upper()\n        if (log_level not in s_const.LOG_LEVEL_CHOICES):\n            raise ValueError('Invalid log level provided: {}'.format(log_level))\n        logging.basicConfig(level=log_level, format=s_const.LOG_FORMAT)\n        mlogger.info('log level set to %s', log_level)", "docstring": "Configure synapse logging.\n\nArgs:\nmlogger (logging.Logger): Reference to a logging.Logger()\ndefval (str): Default log level\n\nNotes:\nThis calls logging.basicConfig and should only be called once per process.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _Open(self, path_spec, mode='rb'):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    file_object = resolver.Resolver.OpenFileObject(\n        path_spec.parent, resolver_context=self._resolver_context)\n\n    try:\n      vslvm_handle = pyvslvm.handle()\n      vslvm_handle.open_file_object(file_object)\n      \n      vslvm_handle.open_physical_volume_files_as_file_objects([\n          file_object])\n      vslvm_volume_group = vslvm_handle.get_volume_group()\n    except:\n      file_object.close()\n      raise\n\n    self._file_object = file_object\n    self._vslvm_handle = vslvm_handle\n    self._vslvm_volume_group = vslvm_volume_group", "docstring": "Opens the file system object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nmode (Optional[str]): file access mode. The default is 'rb' which\nrepresents read-only binary.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file system object could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def max_validator(max_value):\n    \n    def validator(value):\n        if value > max_value:\n            raise ValidationError(\"{} is not <= {}\".format(value, max_value))\n\n    return validator", "docstring": "Return validator function that ensures upper bound of a number.\n\nResult validation function will validate the internal value of resource\ninstance field with the ``value >= min_value`` check.\n\nArgs:\nmax_value: maximum value for new validator", "source": "juraj-google-style"}
{"code": "def generate(self, model_len=None, model_width=None):\n        \n\n        if model_len is None:\n            model_len = Constant.MODEL_LEN\n        if model_width is None:\n            model_width = Constant.MODEL_WIDTH\n        pooling_len = int(model_len / 4)\n        graph = Graph(self.input_shape, False)\n        temp_input_channel = self.input_shape[-1]\n        output_node_id = 0\n        stride = 1\n        for i in range(model_len):\n            output_node_id = graph.add_layer(StubReLU(), output_node_id)\n            output_node_id = graph.add_layer(\n                self.batch_norm(graph.node_list[output_node_id].shape[-1]), output_node_id\n            )\n            output_node_id = graph.add_layer(\n                self.conv(temp_input_channel, model_width, kernel_size=3, stride=stride),\n                output_node_id,\n            )\n            temp_input_channel = model_width\n            if pooling_len == 0 or ((i + 1) % pooling_len == 0 and i != model_len - 1):\n                output_node_id = graph.add_layer(self.pooling(), output_node_id)\n\n        output_node_id = graph.add_layer(self.global_avg_pooling(), output_node_id)\n        output_node_id = graph.add_layer(\n            self.dropout(Constant.CONV_DROPOUT_RATE), output_node_id\n        )\n        output_node_id = graph.add_layer(\n            StubDense(graph.node_list[output_node_id].shape[0], model_width),\n            output_node_id,\n        )\n        output_node_id = graph.add_layer(StubReLU(), output_node_id)\n        graph.add_layer(StubDense(model_width, self.n_output_node), output_node_id)\n        return graph", "docstring": "Generates a CNN.\nArgs:\nmodel_len: An integer. Number of convolutional layers.\nmodel_width: An integer. Number of filters for the convolutional layers.\nReturns:\nAn instance of the class Graph. Represents the neural architecture graph of the generated model.", "source": "juraj-google-style"}
{"code": "def as_dict(self):\n    ret = {}\n    for job in self.jobs:\n        task_indices = self.task_indices(job)\n        if len(task_indices) == 0:\n            ret[job] = {}\n            continue\n        if max(task_indices) + 1 == len(task_indices):\n            ret[job] = self.job_tasks(job)\n        else:\n            ret[job] = {i: self.task_address(job, i) for i in task_indices}\n    return ret", "docstring": "Returns a dictionary from job names to their tasks.\n\nFor each job, if the task index space is dense, the corresponding\nvalue will be a list of network addresses; otherwise it will be a\ndictionary mapping (sparse) task indices to the corresponding\naddresses.\n\nReturns:\nA dictionary mapping job names to lists or dictionaries\ndescribing the tasks in those jobs.", "source": "github-repos"}
{"code": "def ParseLastVisitedRow(\n      self, parser_mediator, query, row, cache=None, database=None,\n      **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    hidden = self._GetRowValue(query_hash, row, 'hidden')\n    transition = self._GetRowValue(query_hash, row, 'transition')\n\n    visit_identifier = self._GetRowValue(query_hash, row, 'visit_id')\n    from_visit = self._GetRowValue(query_hash, row, 'from_visit')\n\n    event_data = ChromeHistoryPageVisitedEventData()\n    event_data.from_visit = self._GetUrl(from_visit, cache, database)\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.page_transition_type = (\n        transition & self._PAGE_TRANSITION_CORE_MASK)\n    event_data.title = self._GetRowValue(query_hash, row, 'title')\n    event_data.typed_count = self._GetRowValue(query_hash, row, 'typed_count')\n    event_data.url = self._GetRowValue(query_hash, row, 'url')\n    event_data.url_hidden = hidden == '1'\n    event_data.visit_source = self._GetVisitSource(\n        visit_identifier, cache, database)\n\n    timestamp = self._GetRowValue(query_hash, row, 'visit_time')\n    date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a last visited row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.\ncache (SQLiteCache): cache which contains cached results from querying\nthe visits and urls tables.\ndatabase (Optional[SQLiteDatabase]): database.", "source": "juraj-google-style"}
{"code": "def setScales(self, scales=None, term_num=None):\n    if (scales == None):\n        for term_i in range(self.n_terms):\n            n_scales = self.vd.getTerm(term_i).getNumberScales()\n            self.vd.getTerm(term_i).setScales(SP.array(SP.randn(n_scales)))\n    elif (term_num == None):\n        assert (scales.shape[0] == self.vd.getNumberScales()), 'incompatible shape'\n        index = 0\n        for term_i in range(self.n_terms):\n            index1 = (index + self.vd.getTerm(term_i).getNumberScales())\n            self.vd.getTerm(term_i).setScales(scales[index:index1])\n            index = index1\n    else:\n        assert (scales.shape[0] == self.vd.getTerm(term_num).getNumberScales()), 'incompatible shape'\n        self.vd.getTerm(term_num).setScales(scales)", "docstring": "get random initialization of variances based on the empirical trait variance\n\nArgs:\nscales:     if scales==None: set them randomly,\nelse: set scales to term_num (if term_num==None: set to all terms)\nterm_num:   set scales to term_num", "source": "codesearchnet"}
{"code": "def is_ref(x):\n    return isinstance(x, variables_module.Variable) or (isinstance(x, module.Module) and hasattr(x, 'dtype') and hasattr(x, 'shape'))", "docstring": "Evaluates if the object has reference semantics.\n\nAn object is deemed \"reference\" if it is a `tf.Variable` instance or is\nderived from a `tf.Module` with `dtype` and `shape` properties.\n\nArgs:\nx: Any object.\n\nReturns:\nis_ref: Python `bool` indicating input is has nonreference semantics, i.e.,\nis a `tf.Variable` or a `tf.Module` with `dtype` and `shape` properties.", "source": "github-repos"}
{"code": "def forward(self, x):\n    head_outputs = ([None] * self.t)\n    if isinstance(self.input_layer, list):\n        input_outputs = [mod(x) for (mod, x) in zip(self.input_layer, x)]\n        x = torch.stack(input_outputs, dim=1)\n        for t in self.task_map[0]:\n            head = self.heads[t]\n            head_outputs[t] = head(input_outputs[t])\n    else:\n        x = self.input_layer(x)\n        for t in self.task_map[0]:\n            head = self.heads[t]\n            head_outputs[t] = head(x)\n    for (i, layer) in enumerate(self.middle_layers, start=1):\n        x = layer(x)\n        for t in self.task_map[i]:\n            head = self.heads[t]\n            if (self.config['pass_predictions'] and bool(self.task_graph.parents[t])):\n                task_input = [x]\n                for p in self.task_graph.parents[t]:\n                    task_input.append(head_outputs[p])\n                task_input = torch.stack(task_input, dim=1)\n            else:\n                task_input = x\n            head_outputs[t] = head(task_input)\n    return head_outputs", "docstring": "Returns a list of outputs for tasks 0,...t-1\n\nArgs:\nx: a [batch_size, ...] batch from X", "source": "codesearchnet"}
{"code": "def _load_from_cache_if_available(self, key):\n    if (key in self._cache):\n        entity = self._cache[key]\n        if ((entity is None) or (entity._key == key)):\n            raise tasklets.Return(entity)", "docstring": "Returns a cached Model instance given the entity key if available.\n\nArgs:\nkey: Key instance.\n\nReturns:\nA Model instance if the key exists in the cache.", "source": "codesearchnet"}
{"code": "def __init__(self, params_arr, cost_functionable):\n        \n        self.__params_arr = params_arr\n        if isinstance(cost_functionable, CostFunctionable):\n            self.__cost_functionable = cost_functionable\n        else:\n            raise TypeError", "docstring": "Init.\n\nArgs:\nparams_arr:           The parameters.\ncost_functionable:    is-a `CostFunctionable`.", "source": "juraj-google-style"}
{"code": "def _Ifup(self, interfaces, logger):\n    ifup = ['/usr/sbin/wicked', 'ifup', '--timeout', '1']\n    try:\n        subprocess.check_call((ifup + interfaces))\n    except subprocess.CalledProcessError:\n        logger.warning('Could not activate interfaces %s.', interfaces)", "docstring": "Activate network interfaces.\n\nArgs:\ninterfaces: list of string, the output device names to enable.\nlogger: logger object, used to write to SysLog and serial port.", "source": "codesearchnet"}
{"code": "def ReleaseRecords(cls, ids, token):\n    with data_store.DB.GetMutationPool() as mutation_pool:\n        mutation_pool.QueueReleaseRecords(ids)", "docstring": "Release records identified by subjects.\n\nReleases any claim on the records identified by ids.\n\nArgs:\nids: A list of ids provided by ClaimRecords.\ntoken: The database access token to write with.\n\nRaises:\nLockError: If the queue is not locked.", "source": "codesearchnet"}
{"code": "def put(self, entity):\n    \n    self._cur_batch.put(entity)\n    self._num_mutations += 1\n    if self._num_mutations >= MAX_MUTATIONS_IN_BATCH:\n      self.commit()\n      self.begin()", "docstring": "Adds mutation of the entity to the mutation buffer.\n\nIf mutation buffer reaches its capacity then this method commit all pending\nmutations from the buffer and emties it.\n\nArgs:\nentity: entity which should be put into the datastore", "source": "juraj-google-style"}
{"code": "def _batch_prepare_for_model(self, batch_ids_pairs: List[Tuple[List[int], None]], batch_entity_ids_pairs: List[Tuple[Optional[List[int]], Optional[List[int]]]], batch_entity_token_spans_pairs: List[Tuple[Optional[List[Tuple[int, int]]], Optional[List[Tuple[int, int]]]]], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:\n    batch_outputs = {}\n    for input_ids, entity_ids, entity_token_span_pairs in zip(batch_ids_pairs, batch_entity_ids_pairs, batch_entity_token_spans_pairs):\n        first_ids, second_ids = input_ids\n        first_entity_ids, second_entity_ids = entity_ids\n        first_entity_token_spans, second_entity_token_spans = entity_token_span_pairs\n        outputs = self.prepare_for_model(first_ids, second_ids, entity_ids=first_entity_ids, pair_entity_ids=second_entity_ids, entity_token_spans=first_entity_token_spans, pair_entity_token_spans=second_entity_token_spans, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, max_entity_length=max_entity_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose)\n        for key, value in outputs.items():\n            if key not in batch_outputs:\n                batch_outputs[key] = []\n            batch_outputs[key].append(value)\n    batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)\n    batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)\n    return batch_outputs", "docstring": "Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It\nadds special tokens, truncates sequences if overflowing while taking into account the special tokens and\nmanages a moving window (with user defined stride) for overflowing tokens\n\n\nArgs:\nbatch_ids_pairs: list of tokenized input ids or input ids pairs\nbatch_entity_ids_pairs: list of entity ids or entity ids pairs\nbatch_entity_token_spans_pairs: list of entity spans or entity spans pairs\nmax_entity_length: The maximum length of the entity sequence.", "source": "github-repos"}
{"code": "def make_slot_check(wanted):\n    \n    if isinstance(wanted, types.FunctionType):\n        return wanted  \n\n    if isinstance(wanted, int):\n        item, meta = wanted, None\n    elif isinstance(wanted, Slot):\n        item, meta = wanted.item_id, wanted.damage  \n    elif isinstance(wanted, (Item, Block)):\n        item, meta = wanted.id, wanted.metadata\n    elif isinstance(wanted, str):\n        item_or_block = get_item_or_block(wanted, init=True)\n        item, meta = item_or_block.id, item_or_block.metadata\n    else:  \n        try:\n            item, meta = wanted\n        except TypeError:\n            raise ValueError('Illegal args for make_slot_check(): %s' % wanted)\n\n    return lambda slot: item == slot.item_id and meta in (None, slot.damage)", "docstring": "Creates and returns a function that takes a slot\nand checks if it matches the wanted item.\n\nArgs:\nwanted: function(Slot) or Slot or itemID or (itemID, metadata)", "source": "juraj-google-style"}
{"code": "def addSources(self, *sources):\n    self._sources.extend(sources)\n    ((debug.logger & debug.flagCompiler) and debug.logger(('current MIB source(s): %s' % ', '.join([str(x) for x in self._sources]))))\n    return self", "docstring": "Add more ASN.1 MIB source repositories.\n\nMibCompiler.compile will invoke each of configured source objects\nin order of their addition asking each to fetch MIB module specified\nby name.\n\nArgs:\nsources: reader object(s)\n\nReturns:\nreference to itself (can be used for call chaining)", "source": "codesearchnet"}
{"code": "def days_in_leap_and_nonleap_years_between(start_date, end_date):\n    days_between = end_date.ordinal() - start_date.ordinal()\n    days_in_leap_years = days_in_leap_years_between(start_date, end_date)\n    return (days_in_leap_years, days_between - days_in_leap_years)", "docstring": "Calculates number of days that fall on leap and non-leap years.\n\nCalculates a tuple '(days_in_leap_years, days_in_nonleap_years)'.\n'start_date' is included and 'end_date' is excluded from the period.\n\nFor example, for dates `2019-12-24` and `2024-2-10` the result is\n(406, 1103):\n406 = 366 days in 2020 + 31 in Jan 2024 + 9 in Feb 2024,\n1103 = 8 in 2019 + 365 in 2021 + 365 in 2022 + 365 in 2023.\n\nIf `end_date` is earlier than `start_date`, the result will be negative or\nzero.\n\nArgs:\nstart_date: DateTensor.\nend_date: DateTensor compatible with `start_date`.\n\nReturns:\nTuple of two Tensors of type 'int32'.", "source": "github-repos"}
{"code": "def plot_val_with_title(self, idxs, y):\n    if (len(idxs) > 0):\n        imgs = np.stack([self.ds[x][0] for x in idxs])\n        title_probs = [self.probs[(x, y)] for x in idxs]\n        return plots(self.ds.denorm(imgs), rows=1, titles=title_probs)\n    else:\n        return False", "docstring": "Displays the images and their probabilities of belonging to a certain class\n\nArguments:\nidxs (numpy.ndarray): indexes of the image samples from the dataset\ny (int): the selected class\n\nReturns:\nPlots the images in n rows [rows = n]", "source": "codesearchnet"}
{"code": "def GetSubFileEntryByName(self, name, case_sensitive=True):\n    \n    name_lower = name.lower()\n    matching_sub_file_entry = None\n\n    for sub_file_entry in self.sub_file_entries:\n      if sub_file_entry.name == name:\n        return sub_file_entry\n\n      if not case_sensitive and sub_file_entry.name.lower() == name_lower:\n        if not matching_sub_file_entry:\n          matching_sub_file_entry = sub_file_entry\n\n    return matching_sub_file_entry", "docstring": "Retrieves a sub file entry by name.\n\nArgs:\nname (str): name of the file entry.\ncase_sensitive (Optional[bool]): True if the name is case sensitive.\n\nReturns:\nFileEntry: a file entry or None if not available.", "source": "juraj-google-style"}
{"code": "def grid(self, dimensions=None, **kwargs):\n        \n        dimensions = self._valid_dimensions(dimensions)\n        if len(dimensions) == self.ndims:\n            with item_check(False):\n                return GridSpace(self, **kwargs).reindex(dimensions)\n        return self.groupby(dimensions, container_type=GridSpace, **kwargs)", "docstring": "Group by supplied dimension(s) and lay out groups in grid\n\nGroups data by supplied dimension(s) laying the groups along\nthe dimension(s) out in a GridSpace.\n\nArgs:\ndimensions: Dimension/str or list\nDimension or list of dimensions to group by\n\nReturns:\nGridSpace with supplied dimensions", "source": "juraj-google-style"}
{"code": "def from_api_repr(cls, resource):\n        \n        version = resource.get(\"version\")\n        etag = resource.get(\"etag\")\n        policy = cls(etag, version)\n        for binding in resource.get(\"bindings\", ()):\n            role = binding[\"role\"]\n            members = sorted(binding[\"members\"])\n            policy[role] = members\n        return policy", "docstring": "Factory: create a policy from a JSON resource.\n\nArgs:\nresource (dict): policy resource returned by ``getIamPolicy`` API.\n\nReturns:\n:class:`Policy`: the parsed policy", "source": "juraj-google-style"}
{"code": "def validate_source_dir(script, directory):\n    if directory:\n        if (not os.path.isfile(os.path.join(directory, script))):\n            raise ValueError('No file named \"{}\" was found in directory \"{}\".'.format(script, directory))\n    return True", "docstring": "Validate that the source directory exists and it contains the user script\n\nArgs:\nscript (str):  Script filename.\ndirectory (str): Directory containing the source file.\n\nRaises:\nValueError: If ``directory`` does not exist, is not a directory, or does not contain ``script``.", "source": "codesearchnet"}
{"code": "def layer_normalization(x, gamma=None, beta=None, axis=-1, epsilon=None, **kwargs):\n    rms_scaling = kwargs.pop('rms_scaling', False)\n    if rms_scaling:\n        warnings.warn('You passed `rms_scaling=True`, which is deprecated. This argument incorrectly scales the input by the variance, not the root mean square. To correctly use RMS Normalization, please use `keras.ops.rms_normalization` / `keras.ops.nn.rms_normalization` instead.')\n    if any_symbolic_tensors((x,)):\n        return LayerNorm(gamma=gamma, beta=beta, axis=axis, epsilon=epsilon, rms_scaling=rms_scaling).symbolic_call(x)\n    return _layer_normalization(x, gamma=gamma, beta=beta, axis=axis, epsilon=epsilon, rms_scaling=rms_scaling)", "docstring": "Layer normalization layer (Ba et al., 2016).\n\nNormalize the activations of the previous layer for each given example in a\nbatch independently, rather than across a batch like Batch Normalization.\ni.e. applies a transformation that maintains the mean activation within each\nexample close to 0 and the activation standard deviation close to 1.\nArgs:\nx: Input tensor.\naxis: The axis or axes along which to perform normalization.\nDefault to -1.\ngamma: Optional scaling factor for the normalization.\nbeta: Optional add offset for the normalized tensor.\nepsilon: A lower bound value for the norm.\nDefaults to `backend.epsilon()`.\n\nReturns:\nThe normalized array.\n>>> x = ops.arange(5,dtype = \"float32\")\n>>> x_norm = ops.layer_normalization(x)\n>>> print(x_norm)\narray([-1.4142135 , -0.70710677,  0.,  0.7071067 ,  1.4142135 ])", "source": "github-repos"}
{"code": "def add_periodic_callback(self, callback, period_milliseconds):\n    from ..server.callbacks import PeriodicCallback\n    cb = PeriodicCallback(self, None, period_milliseconds)\n    return self._add_session_callback(cb, callback, one_shot=False, originator=self.add_periodic_callback)", "docstring": "Add a callback to be invoked on a session periodically.\n\nArgs:\ncallback (callable) :\nA callback function to execute periodically\n\nperiod_milliseconds (int) :\nNumber of milliseconds between each callback execution.\n\nReturns:\nPeriodicCallback : can be used with ``remove_periodic_callback``\n\n.. note::\nPeriodic callbacks only work within the context of a Bokeh server\nsession. This function will no effect when Bokeh outputs to\nstandalone HTML or Jupyter notebook cells.", "source": "codesearchnet"}
{"code": "def add_genstrings_comments_to_file(localization_file, genstrings_err):\n    errors_to_log = [line for line in genstrings_err.splitlines() if ('used with multiple comments' not in line)]\n    if (len(errors_to_log) > 0):\n        logging.warning('genstrings warnings:\\n%s', '\\n'.join(errors_to_log))\n    loc_file = open_strings_file(localization_file, 'a')\n    regex_matches = re.findall('Warning: Key \"(.*?)\" used with multiple comments (\"[^\"]*\" (& \"[^\"]*\")+)', genstrings_err)\n    logging.info('Adding multiple comments from genstrings output')\n    for regex_match in regex_matches:\n        if (len(regex_match) == 3):\n            key = regex_match[0]\n            comments = [comment.strip()[1:(- 1)] for comment in regex_match[1].split('&')]\n            logging.info('Found key with %d comments: %s', len(comments), key)\n            loc_key = LocalizationEntry(comments, key, key)\n            loc_file.write(unicode(loc_key))\n            loc_file.write(u'\\n')\n    loc_file.close()", "docstring": "Adds the comments produced by the genstrings script for duplicate keys.\n\nArgs:\nlocalization_file (str): The path to the strings file.", "source": "codesearchnet"}
{"code": "def gene_panel(self, panel_id, version=None):\n        \n        query = {'panel_name': panel_id}\n        if version:\n            LOG.info(\"Fetch gene panel {0}, version {1} from database\".format(\n                panel_id, version\n            ))\n            query['version'] = version\n            return self.panel_collection.find_one(query)\n        else:\n            LOG.info(\"Fetching gene panels %s from database\", panel_id)\n            res = self.panel_collection.find(query).sort('version', -1)\n            if res.count() > 0:\n                return res[0]\n            else:\n                LOG.info(\"No gene panel found\")\n                return None", "docstring": "Fetch a gene panel.\n\nIf no panel is sent return all panels\n\nArgs:\npanel_id (str): unique id for the panel\nversion (str): version of the panel. If 'None' latest version will be returned\n\nReturns:\ngene_panel: gene panel object", "source": "juraj-google-style"}
{"code": "def arctanh(x):\n    if any_symbolic_tensors((x,)):\n        return Arctanh().symbolic_call(x)\n    return backend.numpy.arctanh(x)", "docstring": "Inverse hyperbolic tangent, element-wise.\n\nArguments:\nx: Input tensor.\n\nReturns:\nOutput tensor of same shape as `x`.", "source": "github-repos"}
{"code": "def get_ignition_type(root):\n    properties = {}\n    elem = root.find('ignitionType')\n    if (elem is None):\n        raise MissingElementError('ignitionType')\n    elem = elem.attrib\n    if ('target' in elem):\n        ign_target = elem['target'].rstrip(';').upper()\n    else:\n        raise MissingAttributeError('target', 'ignitionType')\n    if ('type' in elem):\n        ign_type = elem['type']\n        if (ign_type == 'baseline max intercept from d/dt'):\n            ign_type = 'd/dt max extrapolated'\n    else:\n        raise MissingAttributeError('type', 'ignitionType')\n    if (len(ign_target.split(';')) > 1):\n        raise NotImplementedError('Multiple ignition targets not supported.')\n    if (ign_target == 'OHEX'):\n        ign_target = 'OH*'\n    elif (ign_target == 'CHEX'):\n        ign_target = 'CH*'\n    elif (ign_target == 'P'):\n        ign_target = 'pressure'\n    elif (ign_target == 'T'):\n        ign_target = 'temperature'\n    if (ign_target not in ['pressure', 'temperature', 'OH', 'OH*', 'CH*', 'CH']):\n        raise KeywordError((ign_target + ' not valid ignition target'))\n    if (ign_type not in ['max', 'd/dt max', '1/2 max', 'min', 'd/dt max extrapolated']):\n        raise KeywordError((ign_type + ' not valid ignition type'))\n    properties['type'] = ign_type\n    properties['target'] = ign_target\n    return properties", "docstring": "Gets ignition type and target.\n\nArgs:\nroot (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file\n\nReturns:\nproperties (`dict`): Dictionary with ignition type/target information", "source": "codesearchnet"}
{"code": "def random_sample(list_, nSample, strict=False, rng=None, seed=None):\n    rng = ensure_rng((seed if (rng is None) else rng))\n    if isinstance(list_, list):\n        list2_ = list_[:]\n    else:\n        list2_ = np.copy(list_)\n    if ((len(list2_) == 0) and (not strict)):\n        return list2_\n    rng.shuffle(list2_)\n    if ((nSample is None) and (strict is False)):\n        return list2_\n    if (not strict):\n        nSample = min(max(0, nSample), len(list2_))\n    sample_list = list2_[:nSample]\n    return sample_list", "docstring": "Grabs data randomly\n\nArgs:\nlist_ (list):\nnSample (?):\nstrict (bool): (default = False)\nrng (module):  random number generator(default = numpy.random)\nseed (None): (default = None)\n\nReturns:\nlist: sample_list\n\nCommandLine:\npython -m utool.util_numpy --exec-random_sample\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_numpy import *  # NOQA\n>>> list_ = np.arange(10)\n>>> nSample = 4\n>>> strict = False\n>>> rng = np.random.RandomState(0)\n>>> seed = None\n>>> sample_list = random_sample(list_, nSample, strict, rng, seed)\n>>> result = ('sample_list = %s' % (str(sample_list),))\n>>> print(result)", "source": "codesearchnet"}
{"code": "def _get_saver_or_default():\n    collection_key = ops.GraphKeys.SAVERS\n    savers = ops.get_collection(collection_key)\n    if savers:\n        if len(savers) > 1:\n            raise RuntimeError('More than one item in collection {}. Please indicate which one to use by passing it to the constructor.'.format(collection_key))\n        return savers[0]\n    saver = Saver(sharded=True, allow_empty=True)\n    if saver is not None:\n        ops.add_to_collection(collection_key, saver)\n    return saver", "docstring": "Returns the saver from SAVERS collection, or creates a default one.\n\nThis method is used by other members of the training module, such as\n`Scaffold`, or `CheckpointSaverHook`.\n\nReturns:\n`Saver`.\n\nRaises:\nRuntimeError: If the SAVERS collection already has more than one items.", "source": "github-repos"}
{"code": "def _LineContainsI18n(line):\n    if style.Get('I18N_COMMENT'):\n        for tok in line.tokens:\n            if tok.is_comment and re.match(style.Get('I18N_COMMENT'), tok.value):\n                return True\n    if style.Get('I18N_FUNCTION_CALL'):\n        length = len(line.tokens)\n        for index in range(length - 1):\n            if line.tokens[index + 1].value == '(' and line.tokens[index].value in style.Get('I18N_FUNCTION_CALL'):\n                return True\n    return False", "docstring": "Return true if there are i18n comments or function calls in the line.\n\nI18n comments and pseudo-function calls are closely related. They cannot\nbe moved apart without breaking i18n.\n\nArguments:\nline: (logical_line.LogicalLine) The line currently being formatted.\n\nReturns:\nTrue if the line contains i18n comments or function calls. False otherwise.", "source": "github-repos"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    values_dict = {}\n\n    service_type_value = registry_key.GetValueByName('Type')\n    service_start_value = registry_key.GetValueByName('Start')\n\n    \n    if service_type_value and service_start_value:\n      service_dll = self.GetServiceDll(registry_key)\n      if service_dll:\n        values_dict['ServiceDll'] = service_dll\n\n      \n      for value in registry_key.GetValues():\n        if not value.name:\n          continue\n        if value.name not in values_dict:\n          if value.DataIsString() or value.DataIsInteger():\n            values_dict[value.name] = value.GetDataAsObject()\n          elif value.DataIsMultiString():\n            values_dict[value.name] = ', '.join(value.GetDataAsObject())\n\n      \n      \n      event_data = windows_events.WindowsRegistryServiceEventData()\n      event_data.key_path = registry_key.path\n      event_data.offset = registry_key.offset\n      event_data.regvalue = values_dict\n      event_data.urls = self.URLS\n\n      event = time_events.DateTimeValuesEvent(\n          registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def _parameterize_string(raw):\n    parts = []\n    s_index = 0\n    for match in _PARAMETER_PATTERN.finditer(raw):\n        parts.append(raw[s_index:match.start()])\n        parts.append({u'Ref': match.group(1)})\n        s_index = match.end()\n    if (not parts):\n        return GenericHelperFn(raw)\n    parts.append(raw[s_index:])\n    return GenericHelperFn({u'Fn::Join': [u'', parts]})", "docstring": "Substitute placeholders in a string using CloudFormation references\n\nArgs:\nraw (`str`): String to be processed. Byte strings are not\nsupported; decode them before passing them to this function.\n\nReturns:\n`str` | :class:`troposphere.GenericHelperFn`: An expression with\nplaceholders from the input replaced, suitable to be passed to\nTroposphere to be included in CloudFormation template. This will\nbe the input string without modification if no substitutions are\nfound, and a composition of CloudFormation calls otherwise.", "source": "codesearchnet"}
{"code": "def ParseMessagesRow(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = HangoutsMessageData()\n    event_data.sender = self._GetRowValue(query_hash, row, 'full_name')\n    event_data.body = self._GetRowValue(query_hash, row, 'text')\n    event_data.offset = self._GetRowValue(query_hash, row, '_id')\n    event_data.query = query\n    event_data.message_status = self._GetRowValue(query_hash, row, 'status')\n    event_data.message_type = self._GetRowValue(query_hash, row, 'type')\n\n    timestamp = self._GetRowValue(query_hash, row, 'timestamp')\n    date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n        timestamp=timestamp)\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_CREATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an Messages row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def create(self, *args, **kwargs):\n    rules_dict = [rule.__dict__ for rule in self.forwarding_rules]\n    params = {'name': self.name, 'region': self.region, 'forwarding_rules': rules_dict, 'redirect_http_to_https': self.redirect_http_to_https}\n    if (self.droplet_ids and self.tag):\n        raise ValueError('droplet_ids and tag are mutually exclusive args')\n    elif self.tag:\n        params['tag'] = self.tag\n    else:\n        params['droplet_ids'] = self.droplet_ids\n    if self.algorithm:\n        params['algorithm'] = self.algorithm\n    if self.health_check:\n        params['health_check'] = self.health_check.__dict__\n    if self.sticky_sessions:\n        params['sticky_sessions'] = self.sticky_sessions.__dict__\n    data = self.get_data('load_balancers/', type=POST, params=params)\n    if data:\n        self.id = data['load_balancer']['id']\n        self.ip = data['load_balancer']['ip']\n        self.algorithm = data['load_balancer']['algorithm']\n        self.health_check = HealthCheck(**data['load_balancer']['health_check'])\n        self.sticky_sessions = StickySesions(**data['load_balancer']['sticky_sessions'])\n        self.droplet_ids = data['load_balancer']['droplet_ids']\n        self.status = data['load_balancer']['status']\n        self.created_at = data['load_balancer']['created_at']\n    return self", "docstring": "Creates a new LoadBalancer.\n\nNote: Every argument and parameter given to this method will be\nassigned to the object.\n\nArgs:\nname (str): The Load Balancer's name\nregion (str): The slug identifier for a DigitalOcean region\nalgorithm (str, optional): The load balancing algorithm to be\nused. Currently, it must be either \"round_robin\" or\n\"least_connections\"\nforwarding_rules (obj:`list`): A list of `ForwrdingRules` objects\nhealth_check (obj, optional): A `HealthCheck` object\nsticky_sessions (obj, optional): A `StickySessions` object\nredirect_http_to_https (bool, optional): A boolean indicating\nwhether HTTP requests to the Load Balancer should be\nredirected to HTTPS\ndroplet_ids (obj:`list` of `int`): A list of IDs representing\nDroplets to be added to the Load Balancer (mutually\nexclusive with 'tag')\ntag (str): A string representing a DigitalOcean Droplet tag\n(mutually exclusive with 'droplet_ids')", "source": "codesearchnet"}
{"code": "def partial_declaration_path(decl):\n    if (not decl):\n        return []\n    if (not decl.cache.partial_declaration_path):\n        result = [decl.partial_name]\n        parent = decl.parent\n        while parent:\n            if parent.cache.partial_declaration_path:\n                result.reverse()\n                decl.cache.partial_declaration_path = (parent.cache.partial_declaration_path + result)\n                return decl.cache.partial_declaration_path\n            else:\n                result.append(parent.partial_name)\n                parent = parent.parent\n        result.reverse()\n        decl.cache.partial_declaration_path = result\n        return result\n    return decl.cache.partial_declaration_path", "docstring": "Returns a list of parent declarations names without template arguments that\nhave default value.\n\nArgs:\ndecl (declaration_t): declaration for which the partial declaration\npath should be calculated.\n\nReturns:\nlist[(str | basestring)]: list of names, where first item is the top\nparent name and last item the inputted\ndeclaration name.", "source": "codesearchnet"}
{"code": "class AriaTextDecoderLayer(LlamaDecoderLayer):\n\n    def __init__(self, config: AriaTextConfig, layer_idx: int):\n        super().__init__(self)\n        self.mlp = AriaTextMoELayer(config)", "docstring": "Aria Text Decoder Layer.\n\nThis class defines a single decoder layer in the language model, incorporating self-attention and Mixture of Experts (MoE) feed-forward network.\n\nArgs:\nconfig (`AriaTextConfig`):\nConfiguration object for the text component of the model.\nlayer_idx (`int`):\nIndex of the layer.", "source": "github-repos"}
{"code": "def getCmdOpts(self, text):\n        \n        off = 0\n\n        _, off = s_syntax.nom(text, off, s_syntax.whites)\n\n        name, off = s_syntax.meh(text, off, s_syntax.whites)\n\n        _, off = s_syntax.nom(text, off, s_syntax.whites)\n\n        opts = {}\n\n        args = collections.deque([synt for synt in self._cmd_syntax if not synt[0].startswith('-')])\n\n        switches = {synt[0]: synt for synt in self._cmd_syntax if synt[0].startswith('-')}\n\n        \n        for synt in self._cmd_syntax:\n            snam = synt[0].strip('-')\n\n            defval = synt[1].get('defval')\n            if defval is not None:\n                opts[snam] = defval\n\n            if synt[1].get('type') in ('list', 'kwlist'):\n                opts[snam] = []\n\n        def atswitch(t, o):\n            \n            \n            if not text.startswith('-', o):\n                return None, o\n\n            name, x = s_syntax.meh(t, o, s_syntax.whites)\n            swit = switches.get(name)\n            if swit is None:\n                return None, o\n\n            return swit, x\n\n        while off < len(text):\n\n            _, off = s_syntax.nom(text, off, s_syntax.whites)\n\n            swit, off = atswitch(text, off)\n            if swit is not None:\n\n                styp = swit[1].get('type', 'flag')\n                snam = swit[0].strip('-')\n\n                if styp == 'valu':\n                    valu, off = s_syntax.parse_cmd_string(text, off)\n                    opts[snam] = valu\n\n                elif styp == 'list':\n                    valu, off = s_syntax.parse_cmd_string(text, off)\n                    if not isinstance(valu, list):\n                        valu = valu.split(',')\n                    opts[snam].extend(valu)\n\n                elif styp == 'enum':\n                    vals = swit[1].get('enum:vals')\n                    valu, off = s_syntax.parse_cmd_string(text, off)\n                    if valu not in vals:\n                        raise s_exc.BadSyntax(mesg='%s (%s)' % (swit[0], '|'.join(vals)),\n                                                   text=text)\n\n                    opts[snam] = valu\n\n                else:\n                    opts[snam] = True\n\n                continue\n\n            if not args:\n                raise s_exc.BadSyntax(mesg='trailing text: [%s]' % (text[off:],),\n                                           text=text)\n\n            synt = args.popleft()\n            styp = synt[1].get('type', 'valu')\n\n            \n            if styp == 'glob':\n                opts[synt[0]] = text[off:]\n                break\n\n            \n            if styp == 'list':\n                valu = []\n\n                while off < len(text):\n                    item, off = s_syntax.parse_cmd_string(text, off)\n                    valu.append(item)\n\n                opts[synt[0]] = valu\n                break\n\n            if styp == 'kwlist':\n                kwlist, off = s_syntax.parse_cmd_kwlist(text, off)\n                opts[snam] = kwlist\n                break\n\n            valu, off = s_syntax.parse_cmd_string(text, off)\n            opts[synt[0]] = valu\n\n        return opts", "docstring": "Use the _cmd_syntax def to split/parse/normalize the cmd line.\n\nArgs:\ntext (str): Command to process.\n\nNotes:\nThis is implemented independent of argparse (et al) due to the\nneed for syntax aware argument splitting. Also, allows different\nsplit per command type\n\nReturns:\ndict: An opts dictionary.", "source": "juraj-google-style"}
{"code": "def _dqdv_combinded_frame(cell, **kwargs):\n    \n\n    cycles = cell.get_cap(\n        method=\"forth-and-forth\",\n        categorical_column=True,\n        label_cycle_number=True,\n    )\n    ica_df = dqdv_cycles(cycles, **kwargs)\n    assert isinstance(ica_df, pd.DataFrame)\n    return ica_df", "docstring": "Returns full cycle dqdv data for all cycles as one pd.DataFrame.\n\nArgs:\ncell: CellpyData-object\n\nReturns:\npandas.DataFrame with the following columns:\ncycle: cycle number\nvoltage: voltage\ndq: the incremental capacity", "source": "juraj-google-style"}
{"code": "def create_korobov_samples(order, dim, base=17797):\n    \n    values = numpy.empty(dim)\n    values[0] = 1\n    for idx in range(1, dim):\n        values[idx] = base*values[idx-1] % (order+1)\n\n    grid = numpy.mgrid[:dim, :order+1]\n    out = values[grid[0]] * (grid[1]+1) / (order+1.) % 1.\n    return out[:, :order]", "docstring": "Create Korobov lattice samples.\n\nArgs:\norder (int):\nThe order of the Korobov latice. Defines the number of\nsamples.\ndim (int):\nThe number of dimensions in the output.\nbase (int):\nThe number based used to calculate the distribution of values.\n\nReturns (numpy.ndarray):\nKorobov lattice with ``shape == (dim, order)``", "source": "juraj-google-style"}
{"code": "def __init__(self, filehandles):\n    \n    self._filehandles = filehandles\n    self._pools = [None] * len(filehandles)", "docstring": "Constructor.\n\nArgs:\nfilehandles: list of file handles that this writer outputs to.", "source": "juraj-google-style"}
{"code": "def get_details(app='groupproject', env='dev', region='us-east-1'):\n    url = '{host}/applications/{app}'.format(host=API_URL, app=app)\n    request = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n    if (not request.ok):\n        raise SpinnakerAppNotFound('\"{0}\" not found.'.format(app))\n    app_details = request.json()\n    LOG.debug('App details: %s', app_details)\n    group = app_details['attributes'].get('repoProjectKey')\n    project = app_details['attributes'].get('repoSlug')\n    generated = gogoutils.Generator(group, project, env=env, region=region, formats=APP_FORMATS)\n    LOG.debug('Application details: %s', generated)\n    return generated", "docstring": "Extract details for Application.\n\nArgs:\napp (str): Application Name\nenv (str): Environment/account to get details from\n\nReturns:\ncollections.namedtuple with _group_, _policy_, _profile_, _role_,\n_user_.", "source": "codesearchnet"}
{"code": "def _HashRow(cls, row):\n    values = []\n    for value in row:\n        try:\n            value = '{0!s}'.format(value)\n        except UnicodeDecodeError:\n            value = repr(value)\n        values.append(value)\n    return hash(' '.join(values))", "docstring": "Hashes the given row.\n\nArgs:\nrow (sqlite3.Row): row.\n\nReturns:\nint: hash value of the given row.", "source": "codesearchnet"}
{"code": "def convert_to_jax_compatible(cls, x):\n    return x", "docstring": "Convert a tensor to something that the JAX backend can consume.\n\nThis can be a `JAX` array, `JAXSparse` or a NumPy array.\nOnly called after slicing using `__getitem__`.\nUsed to convert sparse tensors and densify ragged tensors.\n\nArgs:\nx: the tensor to convert.\nReturns: the converted tensor.", "source": "github-repos"}
{"code": "def get_containers(self, container_class):\n    \n    with self._store_lock:\n      return self.store.get(container_class.CONTAINER_TYPE, [])", "docstring": "Thread-safe method to retrieve data from the state's store.\n\nArgs:\ncontainer_class: AttributeContainer class used to filter data.\n\nReturns:\nA list of AttributeContainer objects of matching CONTAINER_TYPE.", "source": "juraj-google-style"}
{"code": "def _get_sorted_inputs(filename):\n    with tf.gfile.Open(filename) as f:\n        records = f.read().split('\\n')\n        inputs = [record.strip() for record in records]\n        if (not inputs[(- 1)]):\n            inputs.pop()\n    input_lens = [(i, len(line.split())) for (i, line) in enumerate(inputs)]\n    sorted_input_lens = sorted(input_lens, key=(lambda x: x[1]), reverse=True)\n    sorted_inputs = []\n    sorted_keys = {}\n    for (i, (index, _)) in enumerate(sorted_input_lens):\n        sorted_inputs.append(inputs[index])\n        sorted_keys[index] = i\n    return (sorted_inputs, sorted_keys)", "docstring": "Read and sort lines from the file sorted by decreasing length.\n\nArgs:\nfilename: String name of file to read inputs from.\nReturns:\nSorted list of inputs, and dictionary mapping original index->sorted index\nof each element.", "source": "codesearchnet"}
{"code": "def _compute_posterior(self, likelihoods_watermarked: torch.Tensor, likelihoods_unwatermarked: torch.Tensor, mask: torch.Tensor, prior: float) -> torch.Tensor:\n    mask = torch.unsqueeze(mask, dim=-1)\n    prior = torch.clamp(prior, min=1e-05, max=1 - 1e-05)\n    log_likelihoods_watermarked = torch.log(torch.clamp(likelihoods_watermarked, min=1e-30, max=float('inf')))\n    log_likelihoods_unwatermarked = torch.log(torch.clamp(likelihoods_unwatermarked, min=1e-30, max=float('inf')))\n    log_odds = log_likelihoods_watermarked - log_likelihoods_unwatermarked\n    relative_surprisal_likelihood = torch.einsum('i...->i', log_odds * mask)\n    relative_surprisal_prior = torch.log(prior) - torch.log(1 - prior)\n    relative_surprisal = relative_surprisal_prior + relative_surprisal_likelihood\n    return torch.sigmoid(relative_surprisal)", "docstring": "Compute posterior P(w|g) given likelihoods, mask and prior.\n\nArgs:\nlikelihoods_watermarked (`torch.Tensor` of shape `(batch, length, depth)`):\nLikelihoods P(g_values|watermarked) of g-values under watermarked model.\nlikelihoods_unwatermarked (`torch.Tensor` of shape `(batch, length, depth)`):\nLikelihoods P(g_values|unwatermarked) of g-values under unwatermarked model.\nmask (`torch.Tensor` of shape `(batch, length)`):\nA binary array indicating which g-values should be used. g-values with mask value 0 are discarded.\nprior (`float`):\nthe prior probability P(w) that the text is watermarked.\n\nReturns:\nPosterior probability P(watermarked|g_values), shape [batch].", "source": "github-repos"}
{"code": "def switch_to_frame(self, frame_reference=None):\n    if ((frame_reference is not None) and (type(frame_reference) not in [int, WebElement])):\n        raise TypeError('Type of frame_reference must be None or int or WebElement')\n    self._execute(Command.SWITCH_TO_FRAME, {'id': frame_reference})", "docstring": "Switches focus to the specified frame, by index, name, or webelement.\n\nSupport:\nWeb(WebView)\n\nArgs:\nframe_reference(None|int|WebElement):\nThe identifier of the frame to switch to.\nNone means to set to the default context.\nAn integer representing the index.\nA webelement means that is an (i)frame to switch to.\nOtherwise throw an error.\n\nReturns:\nWebDriver Object.", "source": "codesearchnet"}
{"code": "def args(self, args):\n        \n        self._args = args\n        self._logger.log('debug', 'Args set to {}'.format(args))", "docstring": "Set additional arguments to be passed to the fitness function\n\nArgs:\nargs (dict): additional arguments", "source": "juraj-google-style"}
{"code": "def __init__(self, nw_ttl=None):\n        \n        super().__init__(action_type=ActionType.OFPAT_SET_NW_TTL, length=8)\n        self.nw_ttl = nw_ttl", "docstring": "Create an ActionSetNWTTL with the optional parameters below.\n\nArgs:\nnw_ttl (int): the TTL address to set in the IP header.", "source": "juraj-google-style"}
{"code": "def class_label_top(body_output, targets, model_hparams, vocab_size):\n  \n  del targets  \n  with tf.variable_scope(\"class_label_modality_%d_%d\" % (\n      vocab_size, model_hparams.hidden_size)):\n    x = body_output\n    x = tf.reduce_mean(x, axis=[1, 2], keepdims=True)\n    res = tf.layers.dense(x, vocab_size)\n    return tf.expand_dims(res, 3)", "docstring": "Transform inputs from model space to target space.\n\nAverage over inner dims and a linear layer to logits.\n\nArgs:\nbody_output: A Tensor with shape [batch, ?, ?, body_output_size].\ntargets:\nmodel_hparams: HParams, model hyperparmeters.\nvocab_size: int, vocabulary size.\n\nReturns:\na Tensors, each with shape [batch_size, 1, 1, 1, vocab_size]", "source": "juraj-google-style"}
{"code": "def usb(self, state):\n    state_lookup = {'off': 0, 'on': 1, 'auto': 2}\n    state = state.lower()\n    if (state in state_lookup):\n        current_state = self.mon.GetUsbPassthrough()\n        while (current_state != state_lookup[state]):\n            self.mon.SetUsbPassthrough(state_lookup[state])\n            time.sleep(1)\n            current_state = self.mon.GetUsbPassthrough()\n        return True\n    return False", "docstring": "Sets the monsoon's USB passthrough mode. This is specific to the\nUSB port in front of the monsoon box which connects to the powered\ndevice, NOT the USB that is used to talk to the monsoon itself.\n\n\"Off\" means USB always off.\n\"On\" means USB always on.\n\"Auto\" means USB is automatically turned off when sampling is going on,\nand turned back on when sampling finishes.\n\nArgs:\nstats: The state to set the USB passthrough to.\n\nReturns:\nTrue if the state is legal and set. False otherwise.", "source": "codesearchnet"}
{"code": "def create_option(name, ty, docstring, default_factory=lambda: None):\n\n    def get_fn(option):\n        if name not in option._options:\n            option._options[name] = default_factory()\n        return option._options.get(name)\n\n    def set_fn(option, value):\n        if not isinstance(value, ty):\n            raise TypeError('Property \"{}\" must be of type {}, got: {} (type: {})'.format(name, ty, value, type(value)))\n        option._options[name] = value\n    return property(get_fn, set_fn, None, docstring)", "docstring": "Creates a type-checked property.\n\nArgs:\nname: The name to use.\nty: The type to use. The type of the property will be validated when it\nis set.\ndocstring: The docstring to use.\ndefault_factory: A callable that takes no arguments and returns a default\nvalue to use if not set.\n\nReturns:\nA type-checked property.", "source": "github-repos"}
{"code": "def scatter_add(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n    return self._lazy_read(gen_resource_variable_ops.resource_scatter_add(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))", "docstring": "Adds `tf.IndexedSlices` to this variable.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to be added to this variable.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nThe updated variable.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"}
{"code": "def _jvp_helper(op_name, attr_tuple, inputs, outputs, tangents):\n    with _TRACE_COUNT_CONSISTENCY_LOCK:\n        _TRACE_COUNT[op_name] = _TRACE_COUNT.get(op_name, 0) + 1\n    special_case = _SPECIAL_CASES.get(op_name, None)\n    if special_case is not None:\n        return special_case(attr_tuple, inputs, outputs, tangents)\n    if not outputs:\n        return []\n    with forwardprop_util.push_forwardprop_state():\n        trainable_inputs = []\n        trainable_indices = []\n        nontrivial_tangents = []\n        for input_index, tensor in enumerate(inputs):\n            if backprop_util.IsTrainable(tensor):\n                trainable_inputs.append(tensor)\n                trainable_indices.append(input_index)\n                nontrivial_tangents.append(tangents[input_index])\n        with backprop.GradientTape() as transpose_tape:\n            with backprop.GradientTape() as backfunc_tape:\n                backfunc_tape.watch(trainable_inputs)\n                execute.record_gradient(op_name, inputs, attr_tuple, outputs)\n            forwardprop_aids = []\n            trainable_outputs = []\n            nontrivial_output_indices = []\n            for output_index, output in enumerate(outputs):\n                if backprop_util.IsTrainable(output):\n                    forwardprop_aids.append(array_ops.ones_like(output, name='unused_forwardprop_aid'))\n                    trainable_outputs.append(output)\n                    nontrivial_output_indices.append(output_index)\n            transpose_tape.watch(forwardprop_aids)\n            grads = backfunc_tape.gradient(trainable_outputs, trainable_inputs, forwardprop_aids, unconnected_gradients=UnconnectedGradients.ZERO)\n        nontrivial_output_tangents = transpose_tape.gradient(grads, forwardprop_aids, output_gradients=nontrivial_tangents)\n        output_tangents = [None] * len(outputs)\n        for index, tangent in zip(nontrivial_output_indices, nontrivial_output_tangents):\n            output_tangents[index] = tangent\n        return output_tangents", "docstring": "Computes a Jacobian-vector product for an op.\n\nNote that this function would be wasteful if executed eagerly. It runs the\nbackward gradient function and throws away the result just to record its\noperations on a GradientTape. These unused ops are pruned away when this\nfunction is traced.\n\nArgs:\nop_name: A string, the type of operation being executed.\nattr_tuple: Attributes of the operation.\ninputs: A flat list of input Tensors to the operation.\noutputs: A flat list of output Tensors from the operation.\ntangents: A flat list of Tensors, same shape as `inputs`.\n\nReturns:\nA flat list of tangents corresponding to `outputs`.", "source": "github-repos"}
{"code": "def MergeOrAddUser(self, kb_user):\n    user = self.GetUser(sid=kb_user.sid, uid=kb_user.uid, username=kb_user.username)\n    new_attrs = []\n    merge_conflicts = []\n    if (not user):\n        new_attrs = self._CreateNewUser(kb_user)\n    else:\n        for (key, val) in iteritems(kb_user.AsDict()):\n            if (user.Get(key) and (user.Get(key) != val)):\n                merge_conflicts.append((key, user.Get(key), val))\n            user.Set(key, val)\n            new_attrs.append(('users.%s' % key))\n    return (new_attrs, merge_conflicts)", "docstring": "Merge a user into existing users or add new if it doesn't exist.\n\nArgs:\nkb_user: A User rdfvalue.\n\nReturns:\nA list of strings with the set attribute names, e.g. [\"users.sid\"]", "source": "codesearchnet"}
{"code": "def ToType(item, allow_constants=False, allow_functions=False, allow_singletons=False):\n    if isinstance(item, Type):\n        return item\n    elif isinstance(item, Module):\n        return item\n    elif isinstance(item, (ParamSpecArgs, ParamSpecKwargs)):\n        return item\n    elif isinstance(item, Class):\n        return ClassType(item.name, item)\n    elif isinstance(item, Function) and allow_functions:\n        return item\n    elif isinstance(item, Constant):\n        if allow_singletons and item.name in SINGLETON_TYPES:\n            return item.type\n        elif item.type.name == 'builtins.type':\n            if isinstance(item.type, GenericType):\n                return item.type.parameters[0]\n            else:\n                return AnythingType()\n        elif isinstance(item.type, AnythingType) or item.name == 'typing_extensions.TypedDict':\n            return AnythingType()\n        elif allow_constants:\n            return item\n    elif isinstance(item, Alias):\n        return item.type\n    raise NotImplementedError(f\"Can't convert {type(item)}: {item}\")", "docstring": "Convert a pytd AST item into a type.\n\nTakes an AST item representing the definition of a type and returns an item\nrepresenting a reference to the type. For example, if the item is a\npytd.Class, this method will return a pytd.ClassType whose cls attribute\npoints to the class.\n\nArgs:\nitem: A pytd.Node item.\nallow_constants: When True, constants that cannot be converted to types will\nbe passed through unchanged.\nallow_functions: When True, functions that cannot be converted to types will\nbe passed through unchanged.\nallow_singletons: When True, singletons that act as their types in\nannotations will return that type.\n\nReturns:\nA pytd.Type object representing the type of an instance of `item`.", "source": "github-repos"}
{"code": "def find_bucket(self, bucketing_id, parent_id, traffic_allocations):\n    \n\n    bucketing_key = BUCKETING_ID_TEMPLATE.format(bucketing_id=bucketing_id, parent_id=parent_id)\n    bucketing_number = self._generate_bucket_value(bucketing_key)\n    self.config.logger.debug('Assigned bucket %s to user with bucketing ID \"%s\".' % (\n      bucketing_number,\n      bucketing_id\n    ))\n\n    for traffic_allocation in traffic_allocations:\n      current_end_of_range = traffic_allocation.get('endOfRange')\n      if bucketing_number < current_end_of_range:\n        return traffic_allocation.get('entityId')\n\n    return None", "docstring": "Determine entity based on bucket value and traffic allocations.\n\nArgs:\nbucketing_id: ID to be used for bucketing the user.\nparent_id: ID representing group or experiment.\ntraffic_allocations: Traffic allocations representing traffic allotted to experiments or variations.\n\nReturns:\nEntity ID which may represent experiment or variation.", "source": "juraj-google-style"}
{"code": "async def get_next_match(self):\n    if (self._final_rank is not None):\n        return None\n    matches = (await self.get_matches(MatchState.open_))\n    if (len(matches) == 0):\n        matches = (await self.get_matches(MatchState.pending))\n    if (len(matches) > 0):\n        return matches[0]\n    return None", "docstring": "Return the first open match found, or if none, the first pending match found\n\n|methcoro|\n\nRaises:\nAPIException", "source": "codesearchnet"}
{"code": "def pack(self, value=None):\n    if isinstance(value, type(self)):\n        return value.pack()\n    if (value is None):\n        value = self.value\n    elif ('value' in dir(value)):\n        value = value.value\n    try:\n        return struct.pack(self._fmt, value)\n    except struct.error:\n        expected_type = type(self).__name__\n        actual_type = type(value).__name__\n        msg_args = (expected_type, value, actual_type)\n        msg = 'Expected {}, found value \"{}\" of type {}'.format(*msg_args)\n        raise PackException(msg)", "docstring": "r\"\"\"Pack the value as a binary representation.\n\nConsidering an example with UBInt8 class, that inherits from\nGenericType:\n\n>>> from pyof.foundation.basic_types import UBInt8\n>>> objectA = UBInt8(1)\n>>> objectB = 5\n>>> objectA.pack()\nb'\\x01'\n>>> objectA.pack(objectB)\nb'\\x05'\n\nArgs:\nvalue: If the value is None, then we will pack the value of the\ncurrent instance. Otherwise, if value is an instance of the\nsame type as the current instance, then we call the pack of the\nvalue object. Otherwise, we will use the current instance pack\nmethod on the passed value.\n\nReturns:\nbytes: The binary representation.\n\nRaises:\n:exc:`~.exceptions.BadValueException`: If the value does not\nfit the binary format.", "source": "codesearchnet"}
{"code": "def __init__(\n        self,\n        pooling_type='max',\n        window=2,\n        stride=2,\n        padding='SAME',\n        named_tensors=None,\n        scope='pool2d',\n        summary_labels=()\n    ):\n        \n        self.pooling_type = pooling_type\n        if isinstance(window, int):\n            self.window = (1, window, window, 1)\n        elif len(window) == 2:\n            self.window = (1, window[0], window[1], 1)\n        else:\n            raise TensorForceError('Invalid window {} for pool2d layer, must be of size 2'.format(window))\n        if isinstance(stride, int):\n            self.stride = (1, stride, stride, 1)\n        elif len(window) == 2:\n            self.stride = (1, stride[0], stride[1], 1)\n        else:\n            raise TensorForceError('Invalid stride {} for pool2d layer, must be of size 2'.format(stride))\n        self.padding = padding\n        super(Pool2d, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "2-dimensional pooling layer.\n\nArgs:\npooling_type: Either 'max' or 'average'.\nwindow: Pooling window size, either an integer or pair of integers.\nstride: Pooling stride, either an integer or pair of integers.\npadding: Pooling padding, one of 'VALID' or 'SAME'.", "source": "juraj-google-style"}
{"code": "def _prepare_4d_causal_attention_mask(attention_mask: Optional[torch.Tensor], input_shape: Union[torch.Size, tuple, list], inputs_embeds: torch.Tensor, past_key_values_length: int, sliding_window: Optional[int]=None):\n    attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window)\n    key_value_length = input_shape[-1] + past_key_values_length\n    if attention_mask is not None and len(attention_mask.shape) == 2:\n        attention_mask = attn_mask_converter.to_4d(attention_mask, input_shape[-1], key_value_length=key_value_length, dtype=inputs_embeds.dtype)\n    elif attention_mask is not None and len(attention_mask.shape) == 4:\n        expected_shape = (input_shape[0], 1, input_shape[1], key_value_length)\n        if tuple(attention_mask.shape) != expected_shape:\n            raise ValueError(f'Incorrect 4D attention_mask shape: {tuple(attention_mask.shape)}; expected: {expected_shape}.')\n        else:\n            inverted_mask = 1.0 - attention_mask\n            attention_mask = inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(inputs_embeds.dtype).min)\n    else:\n        attention_mask = attn_mask_converter.to_causal_4d(input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device)\n    return attention_mask", "docstring": "Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n`(batch_size, key_value_length)`\n\nArgs:\nattention_mask (`torch.Tensor` or `None`):\nA 2D attention mask of shape `(batch_size, key_value_length)`\ninput_shape (`tuple(int)` or `list(int)` or `torch.Size`):\nThe input shape should be a tuple that defines `(batch_size, query_length)`.\ninputs_embeds (`torch.Tensor`):\nThe embedded inputs as a torch Tensor.\npast_key_values_length (`int`):\nThe length of the key value cache.\nsliding_window (`int`, *optional*):\nIf the model uses windowed attention, a sliding window should be passed.", "source": "github-repos"}
{"code": "def usufyToPngExport(d, fPath):\n    \n    newGraph = _generateGraphData(d)\n\n    import matplotlib.pyplot as plt\n    \n    nx.draw(newGraph)\n    plt.savefig(fPath)", "docstring": "Workaround to export to a png file.\n\nArgs:\n-----\nd: Data to export.\nfPath: File path for the output file.", "source": "juraj-google-style"}
{"code": "def nonzero(x):\n    if any_symbolic_tensors((x,)):\n        return Nonzero().symbolic_call(x)\n    return backend.numpy.nonzero(x)", "docstring": "Return the indices of the elements that are non-zero.\n\nArgs:\nx: Input tensor.\n\nReturns:\nIndices of elements that are non-zero.", "source": "github-repos"}
{"code": "def __build_helper_map(cls):\n    ret = {}\n    for name in dir(cls):\n        obj = getattr(cls, name)\n        if ishelper(obj):\n            for cmd in obj.__help_targets__:\n                if (cmd in ret.keys()):\n                    raise PyShellError(\"The command '{}' already has helper method '{}', cannot register a second method '{}'.\".format(cmd, ret[cmd], obj.__name__))\n                ret[cmd] = obj.__name__\n    return ret", "docstring": "Build a mapping from command names to helper names.\n\nOne command name maps to at most one helper method.\nMultiple command names can map to the same helper method.\n\nOnly used by __init__() to initialize self._cmd_map. MUST NOT be used\nelsewhere.\n\nRaises:\nPyShellError: A command maps to multiple helper methods.", "source": "codesearchnet"}
{"code": "def _extract_filename(self, flagfile_str):\n    \n    if flagfile_str.startswith('--flagfile='):\n      return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip())\n    elif flagfile_str.startswith('-flagfile='):\n      return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip())\n    else:\n      raise _exceptions.Error(\n          'Hit illegal --flagfile type: %s' % flagfile_str)", "docstring": "Returns filename from a flagfile_str of form -[-]flagfile=filename.\n\nThe cases of --flagfile foo and -flagfile foo shouldn't be hitting\nthis function, as they are dealt with in the level above this\nfunction.\n\nArgs:\nflagfile_str: str, the flagfile string.\n\nReturns:\nstr, the filename from a flagfile_str of form -[-]flagfile=filename.\n\nRaises:\nError: Raised when illegal --flagfile is provided.", "source": "juraj-google-style"}
{"code": "def find_user(cls, session, mailbox, user):\n        \n        return cls(\n            '/mailboxes/%d/users/%s/conversations.json' % (\n                mailbox.id, user.id,\n            ),\n            session=session,\n        )", "docstring": "Return conversations for a specific user in a mailbox.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nmailbox (helpscout.models.Mailbox): Mailbox to search.\nuser (helpscout.models.User): User to search for.\n\nReturns:\nRequestPaginator(output_type=helpscout.models.Conversation):\nConversations iterator.", "source": "juraj-google-style"}
{"code": "def create_from_options(cls, pipeline_options):\n    from apache_beam.options.pipeline_options import PipelineOptions\n    if not isinstance(pipeline_options, PipelineOptions):\n        raise ValueError('Element of class {}.{} does not subclass PipelineOptions'.format(pipeline_options.__module__, pipeline_options.__class__.__name__))\n    items = {k: v if DisplayDataItem._get_value_type(v) is not None else str(v) for k, v in pipeline_options.display_data().items()}\n    return cls(pipeline_options._get_display_data_namespace(), items)", "docstring": "Creates :class:`~apache_beam.transforms.display.DisplayData` from a\n:class:`~apache_beam.options.pipeline_options.PipelineOptions` instance.\n\nWhen creating :class:`~apache_beam.transforms.display.DisplayData`, this\nmethod will convert the value of any item of a non-supported type to its\nstring representation.\nThe normal :meth:`.create_from()` method rejects those items.\n\nReturns:\n~apache_beam.transforms.display.DisplayData:\nA :class:`~apache_beam.transforms.display.DisplayData` instance with\npopulated items.\n\nRaises:\nValueError: If the **has_display_data** argument is\nnot an instance of :class:`HasDisplayData`.", "source": "github-repos"}
{"code": "def _list_profile_filter(profile_datum, node_name_regex, file_path_regex, op_type_regex, op_time_interval, exec_time_interval, min_lineno=-1, max_lineno=-1):\n    if node_name_regex and (not node_name_regex.match(profile_datum.node_exec_stats.node_name)):\n        return False\n    if file_path_regex:\n        if not profile_datum.file_path or not file_path_regex.match(profile_datum.file_path):\n            return False\n    if min_lineno > 0 and profile_datum.line_number and (profile_datum.line_number < min_lineno):\n        return False\n    if max_lineno > 0 and profile_datum.line_number and (profile_datum.line_number >= max_lineno):\n        return False\n    if profile_datum.op_type is not None and op_type_regex and (not op_type_regex.match(profile_datum.op_type)):\n        return False\n    if op_time_interval is not None and (not op_time_interval.contains(profile_datum.op_time)):\n        return False\n    if exec_time_interval and (not exec_time_interval.contains(profile_datum.node_exec_stats.all_end_rel_micros)):\n        return False\n    return True", "docstring": "Filter function for list_profile command.\n\nArgs:\nprofile_datum: A `ProfileDatum` object.\nnode_name_regex: Regular expression pattern object to filter by name.\nfile_path_regex: Regular expression pattern object to filter by file path.\nop_type_regex: Regular expression pattern object to filter by op type.\nop_time_interval: `Interval` for filtering op time.\nexec_time_interval: `Interval` for filtering exec time.\nmin_lineno: Lower bound for 1-based line number, inclusive.\nIf <= 0, has no effect.\nmax_lineno: Upper bound for 1-based line number, exclusive.\nIf <= 0, has no effect.\n# TODO(cais): Maybe filter by function name.\n\nReturns:\nTrue iff profile_datum should be included.", "source": "github-repos"}
{"code": "def _tf_extension_type_with_packed(self, value):\n    copy = _create_object_from_type_and_dict(type(self), self.__dict__)\n    copy.__dict__['_tf_extension_type_is_packed'] = value\n    return copy", "docstring": "Returns a copy of this `TypeSpec` with `packed=value`.\n\nArgs:\nvalue: A boolean value.\n\nReturns:\nA copy of `self` with `_tf_extension_type_is_packed=value`.", "source": "github-repos"}
{"code": "def stats(self, *args):\n        \n        result = self._fetch_cmd(b'stats', args, False)\n\n        for key, value in six.iteritems(result):\n            converter = STAT_TYPES.get(key, int)\n            try:\n                result[key] = converter(value)\n            except Exception:\n                pass\n\n        return result", "docstring": "The memcached \"stats\" command.\n\nThe returned keys depend on what the \"stats\" command returns.\nA best effort is made to convert values to appropriate Python\ntypes, defaulting to strings when a conversion cannot be made.\n\nArgs:\n*arg: extra string arguments to the \"stats\" command. See the\nmemcached protocol documentation for more information.\n\nReturns:\nA dict of the returned stats.", "source": "juraj-google-style"}
{"code": "def flat_values_spec(self):\n    return self._flat_values_spec", "docstring": "The `TypeSpec` of the flat_values of RaggedTensor.\n\nReturns:\n- The TypeSpec of flat_values.\n- None when the flat_values is a Tensor.", "source": "github-repos"}
{"code": "def init(args):\n    \n    \n    dir_path = Path().absolute()\n\n    if not args.project_name or args.project_name.find(\"/\") >= 0:\n        print(\n            \"{}You should specify a valid project name{}\".format(\n                utils.colors.FAIL, utils.colors.ENDC\n            )\n        )\n        return\n\n    project_path = dir_path / args.project_name\n\n    \n    if not project_path.exists():\n        project_path.mkdir()\n    else:\n        print(\n            \"{}This project already exists{}\".format(\n                utils.colors.FAIL, utils.colors.ENDC\n            )\n        )\n        return\n\n    \n    home_doc_path = project_path / \"docs\"\n    home_doc_path.mkdir()\n    help_doc_path = home_doc_path / \"help\"\n    help_doc_path.mkdir()\n\n    file_path = Path(__file__).resolve().parent / \"include\"\n\n    \n    copyfile(file_path / \"index.md\", home_doc_path / \"index.md\")\n    copyfile(file_path / \"How_To_Use_Mkinx.md\", help_doc_path / \"How_To_Use_Mkinx.md\")\n    copyfile(\n        file_path / \"Writing_Sphinx_Documentation.md\",\n        help_doc_path / \"Writing_Sphinx_Documentation.md\",\n    )\n\n    with open(file_path / \"mkdocs.yml\", \"r\") as f:\n        lines = f.readlines()\n\n    input_text = \"What is your Documentation's name\"\n    input_text += \" (it can be changed later in mkdocs.yml)?\\n\"\n    input_text += \"[Default: {} - Home Documentation]\\n\"\n\n    site_name = input(input_text.format(args.project_name.capitalize()))\n    if not site_name:\n        site_name = \"{} - Home Documentation\".format(args.project_name.capitalize())\n\n    lines[0] = \"site_name: {}\\n\".format(site_name)\n\n    with open(project_path / \"mkdocs.yml\", \"w\") as f:\n        f.writelines(lines)\n\n    example_project_path = project_path / \"example_project\" / \"example_project\"\n\n    windows = \"y\" if sys.platform in {\"win32\", \"cygwin\"} else \"n\"\n\n    copytree(file_path / \"example_project\", example_project_path)\n    move(str(example_project_path / \"source\"), str(project_path / \"example_project\"))\n    move(\n        str(project_path / \"example_project\" / \"example_project\" / \"Makefile\"),\n        str(project_path / \"example_project\"),\n    )\n    if windows == \"y\":\n        move(\n            str(project_path / \"example_project\" / \"example_project\" / \"make.bat\"),\n            str(project_path / \"example_project\"),\n        )\n    else:\n        os.remove(\n            str(project_path / \"example_project\" / \"example_project\" / \"make.bat\")\n        )\n\n    static = project_path / \"example_project\" / \"source\"\n    static /= \"_static\"\n    if not static.exists():\n        static.mkdir()\n\n    _ = subprocess.check_output(\n        \"cd {} && mkinx build -F -A > /dev/null\".format(args.project_name), shell=True\n    )\n\n    print(\n        \"\\n\\n\",\n        utils.colors.OKBLUE,\n        \"{}/{} created as a showcase of how mkinx works\".format(\n            args.project_name, \"example_project\"\n        ),\n        utils.colors.ENDC,\n    )\n\n    print(\n        \"\\n\",\n        utils.colors.OKGREEN,\n        \"Success!\",\n        utils.colors.ENDC,\n        \"You can now start your Docs in ./{}\\n\".format(args.project_name),\n        utils.colors.HEADER,\n        \"$ cd ./{}\".format(args.project_name),\n        utils.colors.ENDC,\n    )\n    print(\n        \"  Start the server from within your Docs to see them \\n  (default\",\n        \"port is 8443 but you can change it with the -s flag):\",\n    )\n    print(\n        utils.colors.HEADER,\n        \" {} $ mkinx serve\\n\".format(args.project_name),\n        utils.colors.ENDC,\n    )", "docstring": "Initialize a Home Documentation's folder\n\nArgs:\nargs (ArgumentParser): Flags from the CLI", "source": "juraj-google-style"}
{"code": "def _init_local_init_op(self, local_init_op=USE_DEFAULT):\n    if local_init_op is Supervisor.USE_DEFAULT:\n        local_init_op = self._get_first_op_from_collection(ops.GraphKeys.LOCAL_INIT_OP)\n        if local_init_op is None:\n            op_list = [variables.local_variables_initializer(), lookup_ops.tables_initializer()]\n            if op_list:\n                local_init_op = control_flow_ops.group(*op_list)\n                ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)\n    self._local_init_op = local_init_op", "docstring": "Initializes local_init_op.\n\nArgs:\nlocal_init_op: `Operation` run for every new supervisor instance. If set\nto USE_DEFAULT, use the first op from the GraphKeys.LOCAL_INIT_OP\ncollection. If the collection is empty, create an op that initializes\nall local variables and all tables.", "source": "github-repos"}
{"code": "def num_samples(self, sr=None):\n    native_sr = self.sampling_rate\n    num_samples = units.seconds_to_sample(self.duration, native_sr)\n    if (sr is not None):\n        ratio = (float(sr) / native_sr)\n        num_samples = int(np.ceil((num_samples * ratio)))\n    return num_samples", "docstring": "Return the number of samples.\n\nArgs:\nsr (int): Calculate the number of samples with the given\nsampling-rate. If None use the native sampling-rate.\n\nReturns:\nint: Number of samples", "source": "codesearchnet"}
{"code": "def execute(self, triple_map, output, **kwargs):\n        \n        subjects = []\n        found_elements = self.source.xpath(\n            str(triple_map.logicalSource.iterator),\n            namespaces=self.xml_ns)\n        for element in found_elements:\n            subject = self.generate_term(term_map=triple_map.subjectMap,\n                                         element=element,\n                                         **kwargs)\n            start = len(output)\n            for row in triple_map.predicateObjectMap:\n                predicate = row.predicate\n                if row.template is not None:\n                    obj_ = self.generate_term(term_map=row, **kwargs)\n                    output.add((subject, predicate, obj_))\n                if row.parentTriplesMap is not None:\n                    self.__handle_parents__(\n                        output,\n                        parent_map=row.parentTriplesMap,\n                        subject=subject,\n                        predicate=predicate,\n                        **kwargs)\n                new_subjects = self.__reference_handler__(\n                    output,\n                    predicate_obj_map=row,\n                    element=element,\n                    subject=subject)\n                subjects.extend(new_subjects)\n                if row.constant is not None:\n                    output.add((subject, predicate, row.constant))\n            if start < len(output):\n                if triple_map.subjectMap.class_ is not None:\n                    output.add((subject,\n                                NS_MGR.rdf.type.rdflib,\n                                triple_map.subjectMap.class_))\n                subjects.append(subject)\n        return subjects", "docstring": "Method executes mapping between source\n\nArgs:\n\n-----\ntriple_map: SimpleNamespace, Triple Map", "source": "juraj-google-style"}
{"code": "def MakeSimpleProtoClass(fields, full_name=None, pool=None):\n    factory = message_factory.MessageFactory(pool=pool)\n    if (full_name is not None):\n        try:\n            proto_cls = _GetMessageFromFactory(factory, full_name)\n            return proto_cls\n        except KeyError:\n            pass\n    field_items = fields.items()\n    if (not isinstance(fields, OrderedDict)):\n        field_items = sorted(field_items)\n    fields_hash = hashlib.sha1()\n    for (f_name, f_type) in field_items:\n        fields_hash.update(f_name.encode('utf-8'))\n        fields_hash.update(str(f_type).encode('utf-8'))\n    proto_file_name = (fields_hash.hexdigest() + '.proto')\n    if (full_name is None):\n        full_name = ('net.proto2.python.public.proto_builder.AnonymousProto_' + fields_hash.hexdigest())\n        try:\n            proto_cls = _GetMessageFromFactory(factory, full_name)\n            return proto_cls\n        except KeyError:\n            pass\n    factory.pool.Add(_MakeFileDescriptorProto(proto_file_name, full_name, field_items))\n    return _GetMessageFromFactory(factory, full_name)", "docstring": "Create a Protobuf class whose fields are basic types.\n\nNote: this doesn't validate field names!\n\nArgs:\nfields: dict of {name: field_type} mappings for each field in the proto. If\nthis is an OrderedDict the order will be maintained, otherwise the\nfields will be sorted by name.\nfull_name: optional str, the fully-qualified name of the proto type.\npool: optional DescriptorPool instance.\nReturns:\na class, the new protobuf class with a FileDescriptor.", "source": "codesearchnet"}
{"code": "def _generate_subtokens(token_counts, alphabet, min_count, num_iterations=4, reserved_tokens=None):\n    if (reserved_tokens is None):\n        reserved_tokens = RESERVED_TOKENS\n    subtoken_list = (reserved_tokens + list(alphabet))\n    max_subtoken_length = 1\n    for i in xrange(num_iterations):\n        tf.logging.info(('\\tGenerating subtokens: iteration %d' % i))\n        subtoken_dict = _list_to_index_dict(subtoken_list)\n        subtoken_counts = _count_and_gen_subtokens(token_counts, alphabet, subtoken_dict, max_subtoken_length)\n        (subtoken_list, max_subtoken_length) = _gen_new_subtoken_list(subtoken_counts, min_count, alphabet, reserved_tokens)\n        tf.logging.info(('\\tVocab size: %d' % len(subtoken_list)))\n    return subtoken_list", "docstring": "Create a list of subtokens in decreasing order of frequency.\n\nArgs:\ntoken_counts: dict mapping str tokens -> int count\nalphabet: set of characters\nmin_count: int minimum number of times a subtoken must appear before it is\nadded to the vocabulary.\nnum_iterations: int number of iterations to generate new tokens.\nreserved_tokens: list of tokens that will be added to the beginning to the\nreturned subtoken list.\n\nReturns:\nSorted list of subtokens (most frequent first)", "source": "codesearchnet"}
{"code": "def pad_to_square(self, images: 'torch.Tensor', background_color: Union[int, Tuple[int, int, int]]=0) -> 'torch.Tensor':\n    height, width = get_image_size(images, ChannelDimension.FIRST)\n    if height == width:\n        return images\n    num_channels = images.shape[1] if len(images.shape) == 4 else images.shape[0]\n    if isinstance(background_color, int):\n        background_color = [background_color] + [0] * (num_channels - 1)\n    elif len(background_color) != num_channels:\n        raise ValueError(f'background_color must have no more than {num_channels} elements to match the number of channels')\n    max_dim = max(height, width)\n    paste_x_left = (max_dim - width) \n    paste_y_left = (max_dim - height) \n    paste_x_right = max_dim - width - paste_x_left\n    paste_y_right = max_dim - height - paste_y_left\n    padded_images = F.pad(images, padding=[paste_x_left, paste_y_left, paste_x_right, paste_y_right], fill=background_color)\n    return padded_images", "docstring": "Pads an image to a square based on the longest edge.\n\nArgs:\nimages (`np.ndarray`):\nThe images to pad.\nbackground_color (`int` or `Tuple[int, int, int]`, *optional*, defaults to 0):\nThe color to use for the padding. Can be an integer for single channel or a\ntuple of integers representing for multi-channel images. If passed as integer\nin mutli-channel mode, it will default to `0` in subsequent channels.\nReturns:\n`torch.Tensor`: The padded images.", "source": "github-repos"}
{"code": "def _sd_of_runs(stats, mean, key='runs'):\n    \n\n    num_runs = len(stats[key])\n    first = stats[key][0]\n\n    standard_deviation = {}\n    for stat_key in first:\n        \n        if isinstance(first[stat_key], numbers.Number):\n            standard_deviation[stat_key] = math.sqrt(\n                sum((run[stat_key] - mean[stat_key])**2\n                    for run in stats[key]) / float(num_runs))\n\n    return standard_deviation", "docstring": "Obtain the standard deviation of stats.\n\nArgs:\nstats: dict; A set of stats, structured as above.\nmean: dict; Mean for each key in stats.\nkey: str; Optional key to determine where list of runs is found in stats", "source": "juraj-google-style"}
{"code": "def convert_transpose(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting transpose ...')\n    if (params['perm'][0] != 0):\n        if (inputs[0] in layers):\n            print('!!! Cannot permute batch dimension. Result may be wrong !!!')\n            layers[scope_name] = layers[inputs[0]]\n        else:\n            print('Skip weight matrix transpose, result may be wrong.')\n    else:\n        if names:\n            tf_name = ('PERM' + random_string(4))\n        else:\n            tf_name = (w_name + str(random.random()))\n        permute = keras.layers.Permute(params['perm'][1:], name=tf_name)\n        layers[scope_name] = permute(layers[inputs[0]])", "docstring": "Convert transpose layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def set_energy(self, spins, target_energy):\n        \n        spin_energy = self.energy(spins)\n        self.assertions.add(Equals(spin_energy, limitReal(target_energy)))", "docstring": "Set the energy of Theta with spins fixed to target_energy.\n\nArgs:\nspins (dict): Spin values for a subset of the variables in Theta.\ntarget_energy (float): The desired energy for Theta with spins fixed.\n\nNotes:\nAdd equality constraint to assertions.", "source": "juraj-google-style"}
{"code": "def create_branch(self, branch_name: str):\n        \n        LOGGER.info('creating branch: %s', branch_name)\n        self._validate_branch_name(branch_name)\n        if branch_name in self.list_branches():\n            LOGGER.error('branch already exists')\n            sys.exit(-1)\n        new_branch = self.repo.create_head(branch_name)\n        new_branch.commit = self.repo.head.commit", "docstring": "Creates a new branch\n\nArgs:\nbranch_name: name of the branch", "source": "juraj-google-style"}
{"code": "def _ParseAttribute(self, file_object):\n    file_offset = file_object.tell()\n    attribute_map = self._GetDataTypeMap('cups_ipp_attribute')\n    try:\n        (attribute, _) = self._ReadStructureFromFileObject(file_object, file_offset, attribute_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse attribute with error: {0!s}'.format(exception))\n    value = None\n    if (attribute.tag_value in self._INTEGER_TAG_VALUES):\n        value = self._ParseIntegerValue(attribute.value_data, file_offset)\n    elif (attribute.tag_value == self._TAG_VALUE_BOOLEAN):\n        value = self._ParseBooleanValue(attribute.value_data)\n    elif (attribute.tag_value == self._TAG_VALUE_DATE_TIME):\n        value = self._ParseDateTimeValue(attribute.value_data, file_offset)\n    elif (attribute.tag_value in self._STRING_WITHOUT_LANGUAGE_VALUES):\n        value = attribute.value_data.decode(self._last_charset_attribute)\n    elif (attribute.tag_value in self._ASCII_STRING_VALUES):\n        value = attribute.value_data.decode('ascii')\n        if (attribute.tag_value == self._TAG_VALUE_CHARSET):\n            self._last_charset_attribute = value\n    else:\n        value = attribute.value_data\n    return (attribute.name, value)", "docstring": "Parses a CUPS IPP attribute from a file-like object.\n\nArgs:\nfile_object (dfvfs.FileIO): file-like object.\n\nReturns:\ntuple[str, object]: attribute name and value.\n\nRaises:\nParseError: if the attribute cannot be parsed.", "source": "codesearchnet"}
{"code": "def _process_exception(e, body, tb):\n    \n    \n    msg = e.message if hasattr(e, \"message\") else str(e)\n    exception_type = str(e.__class__)\n    exception_name = str(e.__class__.__name__)\n\n    properties = pika.BasicProperties(\n        content_type=\"application/text\",\n        delivery_mode=2,\n        headers={\n            \"exception\": msg,\n            \"exception_type\": exception_type,\n            \"exception_name\": exception_name,\n            \"traceback\": tb,\n            \"UUID\": str(uuid.uuid4())\n        }\n    )\n\n    send_message(\"harvester\", body, properties=properties)", "docstring": "Process informations about exception and send them thru AMQP.\n\nArgs:\ne (obj): Exception instance.\nbody (str): Text which will be sent over AMQP.\ntb (obj): Traceback object with informations, which will be put to the\nheaders.", "source": "juraj-google-style"}
{"code": "def to_insert(table, d):\n    columns = []\n    args = []\n    for (key, val) in d.items():\n        columns.append('\"{}\"'.format(key))\n        args.append(val)\n    stmt = 'insert into {table} ({columns}) values ({params})'.format(table=table, columns=', '.join(columns), params=', '.join((['?'] * len(columns))))\n    return (stmt, args)", "docstring": "Generate an insert statement using the given table and dictionary.\n\nArgs:\ntable (str): table name\nd (dict): dictionary with column names as keys and values as values.\nReturns:\ntuple of statement and arguments\n\n>>> to_insert('doc.foobar', {'name': 'Marvin'})\n('insert into doc.foobar (\"name\") values (?)', ['Marvin'])", "source": "codesearchnet"}
{"code": "def get_id(date: datetime.datetime) -> str:\n        \n        date = date.strftime('%Y%m%d')\n        return 'PB-{}-{}-{:03d}'.format(date, 'sip', randint(0, 100))", "docstring": "Generate a Processing Block (PB) Instance ID.\n\nArgs:\ndate (datetime.datetime): UTC date of the PB\n\nReturns:\nstr, Processing Block ID", "source": "juraj-google-style"}
{"code": "def _try_parse_datetime(time_str, fmts):\n    \n    result = None\n    for fmt in fmts:\n        try:\n            result = datetime.strptime(time_str, fmt)\n            break\n        except ValueError:\n            pass\n    return result", "docstring": "A helper function that attempts to parse the input time_str as a date.\n\nArgs:\n\ntime_str (str): A string representing the time\n\nfmts (list): A list of date format strings\n\nReturns:\ndatetime: Returns a datetime object if parsed properly, otherwise None", "source": "juraj-google-style"}
{"code": "def raster_erosion(rasterfile):\n    if is_string(rasterfile):\n        origin_raster = RasterUtilClass.read_raster(str(rasterfile))\n    elif isinstance(rasterfile, Raster):\n        origin_raster = rasterfile.data\n    elif isinstance(rasterfile, numpy.ndarray):\n        origin_raster = rasterfile\n    else:\n        return 'Your rasterfile has a wrong type. Type must be string or numpy.array or class Raster in pygeoc.'\n    max_value_raster = origin_raster.max()\n    erosion_raster = numpy.zeros((origin_raster.shape[0], origin_raster.shape[1]))\n    add_row = numpy.full((1, origin_raster.shape[1]), max_value_raster)\n    temp_origin_raster = numpy.vstack((numpy.vstack((add_row, origin_raster)), add_row))\n    add_col = numpy.full(((origin_raster.shape[0] + 2), 1), max_value_raster)\n    expand_origin_raster = numpy.hstack((numpy.hstack((add_col, temp_origin_raster)), add_col))\n    for i in range(origin_raster.shape[0]):\n        for j in range(origin_raster.shape[1]):\n            min_pixel_value = max_value_raster\n            for k in range(3):\n                for l in range(3):\n                    if (expand_origin_raster[((i + k), (j + l))] <= min_pixel_value):\n                        min_pixel_value = expand_origin_raster[((i + k), (j + l))]\n                erosion_raster[(i, j)] = min_pixel_value\n    return erosion_raster", "docstring": "Erode the raster image.\n\nFind the min pixel's value in 8-neighborhood. Then change the compute\npixel's value into the min pixel's value.\n\nArgs:\nrasterfile: input original raster image, type can be filename(string,\nlike \"test1.tif\"), rasterfile(class Raster) or numpy.ndarray.\n\nReturns:\nerosion_raster: raster image after erosion, type is numpy.ndarray.", "source": "codesearchnet"}
{"code": "def _parse_access_vlan(self, config):\n        \n        value = re.search(r'switchport access vlan (\\d+)', config)\n        return dict(access_vlan=value.group(1))", "docstring": "Scans the specified config and parse the access-vlan value\nArgs:\nconfig (str): The interface configuration block to scan\n\nReturns:\ndict: A Python dict object with the value of switchport access\nvalue.  The dict returned is intended to be merged into the\nresource dict", "source": "juraj-google-style"}
{"code": "def assert_consumed(self):\n    pretty_printer = ObjectGraphProtoPrettyPrinter(self._checkpoint.object_graph_proto)\n    self.assert_existing_objects_matched()\n    ignore_node_ids = []\n    if self._options.experimental_skip_slot_variables:\n        for node in self._checkpoint.object_graph_proto.nodes:\n            for sv in node.slot_variables:\n                ignore_node_ids.append(sv.slot_variable_node_id)\n    for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):\n        if not node.attributes:\n            continue\n        if node_id in ignore_node_ids:\n            continue\n        trackable = self._checkpoint.object_by_proto_id.get(node_id, None)\n        if trackable is None:\n            raise AssertionError(f'Unresolved object in checkpoint {pretty_printer.node_names[node_id]}: {node}')\n    if not self._options.experimental_skip_slot_variables and self._checkpoint.slot_restorations:\n        raise AssertionError(f'Unresolved slot restorations: {self._checkpoint.slot_restorations}')\n    if self._checkpoint.unused_attributes:\n        unused_attribute_messages = []\n        for node_id, attribute in self._checkpoint.unused_attributes.items():\n            obj = self._checkpoint.object_by_proto_id[node_id]\n            unused_attribute_messages.append(f'{pretty_printer.node_names[node_id]} ({obj}): {attribute}')\n        joined_attribute_messages = '\\n'.join(unused_attribute_messages)\n        raise AssertionError(f'Unused attributes in these objects (the attributes exist in the checkpoint but were not restored):\\n{joined_attribute_messages}')\n    return self", "docstring": "Asserts that all objects in the checkpoint have been created/matched.\n\nReturns:\n`self` for chaining.\nRaises:\nAssertionError: If there are any Python objects in the dependency graph\nwhich have not been restored from this checkpoint or a later `restore`,\nor if there are any checkpointed values which have not been matched to\nPython objects.", "source": "github-repos"}
{"code": "def get_contrib_features(project_root):\n    \n    \n    project = Project(project_root)\n    contrib = project._resolve('.features.contrib')\n    return _get_contrib_features(contrib)", "docstring": "Get contributed features for a project at project_root\n\nFor a project ``foo``, walks modules within the ``foo.features.contrib``\nsubpackage. A single object that is an instance of ``ballet.Feature`` is\nimported if present in each module. The resulting ``Feature`` objects are\ncollected.\n\nArgs:\nproject_root (str, path-like): Path to project root\n\nReturns:\nList[ballet.Feature]: list of Feature objects", "source": "juraj-google-style"}
{"code": "def global_step(sess, global_step_tensor):\n    if context.executing_eagerly():\n        return int(global_step_tensor.numpy())\n    return int(sess.run(global_step_tensor))", "docstring": "Small helper to get the global step.\n\n```python\n# Create a variable to hold the global_step.\nglobal_step_tensor = tf.Variable(10, trainable=False, name='global_step')\n# Create a session.\nsess = tf.compat.v1.Session()\n# Initialize the variable\nsess.run(global_step_tensor.initializer)\n# Get the variable value.\nprint('global_step: %s' % tf.compat.v1.train.global_step(sess,\nglobal_step_tensor))\n\nglobal_step: 10\n```\n\nArgs:\nsess: A TensorFlow `Session` object.\nglobal_step_tensor:  `Tensor` or the `name` of the operation that contains\nthe global step.\n\nReturns:\nThe global step value.", "source": "github-repos"}
{"code": "def sg_summary_audio(tensor, sample_rate=16000, prefix=None, name=None):\n    r\n    \n    prefix = '' if prefix is None else prefix + '/'\n    \n    name = prefix + _pretty_name(tensor) if name is None else prefix + name\n    \n    if not tf.get_variable_scope().reuse:\n        tf.summary.audio(name + '-au', tensor, sample_rate)", "docstring": "r\"\"\"Register `tensor` to summary report as audio\n\nArgs:\ntensor: A `Tensor` to log as audio\nsample_rate : An int. Sample rate to report. Default is 16000.\nprefix: A `string`. A prefix to display in the tensor board web UI.\nname: A `string`. A name to display in the tensor board web UI.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def update(self, session, arrays=None, frame=None):\n    new_config = self._get_config()\n    if self._enough_time_has_passed(self.previous_config['FPS']):\n        self.visualizer.update(new_config)\n        self.last_update_time = time.time()\n        final_image = self._update_frame(session, arrays, frame, new_config)\n        self._update_recording(final_image, new_config)", "docstring": "Creates a frame and writes it to disk.\n\nArgs:\narrays: a list of np arrays. Use the \"custom\" option in the client.\nframe: a 2D np array. This way the plugin can be used for video of any\nkind, not just the visualization that comes with the plugin.\n\nframe can also be a function, which only is evaluated when the\n\"frame\" option is selected by the client.", "source": "codesearchnet"}
{"code": "def _ReadTablesArray(self, file_object, tables_array_offset):\n    \n    \n    \n\n    data_type_map = self._GetDataTypeMap('keychain_tables_array')\n\n    tables_array, _ = self._ReadStructureFromFileObject(\n        file_object, tables_array_offset, data_type_map)\n\n    tables = collections.OrderedDict()\n    for table_offset in tables_array.table_offsets:\n      self._ReadTable(tables, file_object, tables_array_offset + table_offset)\n\n    return tables", "docstring": "Reads the tables array.\n\nArgs:\nfile_object (file): file-like object.\ntables_array_offset (int): offset of the tables array relative to\nthe start of the file.\n\nReturns:\ndict[int, KeychainDatabaseTable]: tables per identifier.\n\nRaises:\nParseError: if the tables array cannot be read.", "source": "juraj-google-style"}
{"code": "def sin(x):\n    if any_symbolic_tensors((x,)):\n        return Sin().symbolic_call(x)\n    return backend.numpy.sin(x)", "docstring": "Trigonometric sine, element-wise.\n\nArguments:\nx: Input tensor.\n\nReturns:\nOutput tensor of same shape as `x`.", "source": "github-repos"}
{"code": "def _setup_test_logger(log_path, prefix=None):\n    \n    log = logging.getLogger()\n    kill_test_logger(log)\n    log.propagate = False\n    log.setLevel(logging.DEBUG)\n    \n    terminal_format = log_line_format\n    if prefix:\n        terminal_format = '[%s] %s' % (prefix, log_line_format)\n    c_formatter = logging.Formatter(terminal_format, log_line_time_format)\n    ch = logging.StreamHandler(sys.stdout)\n    ch.setFormatter(c_formatter)\n    ch.setLevel(logging.INFO)\n    \n    f_formatter = logging.Formatter(log_line_format, log_line_time_format)\n    \n    fh_info = logging.FileHandler(\n        os.path.join(log_path, records.OUTPUT_FILE_INFO_LOG))\n    fh_info.setFormatter(f_formatter)\n    fh_info.setLevel(logging.INFO)\n    fh_debug = logging.FileHandler(\n        os.path.join(log_path, records.OUTPUT_FILE_DEBUG_LOG))\n    fh_debug.setFormatter(f_formatter)\n    fh_debug.setLevel(logging.DEBUG)\n    log.addHandler(ch)\n    log.addHandler(fh_info)\n    log.addHandler(fh_debug)\n    log.log_path = log_path\n    logging.log_path = log_path", "docstring": "Customizes the root logger for a test run.\n\nThe logger object has a stream handler and a file handler. The stream\nhandler logs INFO level to the terminal, the file handler logs DEBUG\nlevel to files.\n\nArgs:\nlog_path: Location of the log file.\nprefix: A prefix for each log line in terminal.\nfilename: Name of the log file. The default is the time the logger\nis requested.", "source": "juraj-google-style"}
{"code": "def standardize(self, x):\n    if self.preprocessing_function:\n        x = self.preprocessing_function(x)\n    if self.rescale:\n        x *= self.rescale\n    if self.samplewise_center:\n        x -= np.mean(x, keepdims=True)\n    if self.samplewise_std_normalization:\n        x /= np.std(x, keepdims=True) + 1e-06\n    if self.featurewise_center:\n        if self.mean is not None:\n            x -= self.mean\n        else:\n            warnings.warn(\"This ImageDataGenerator specifies `featurewise_center`, but it hasn't been fit on any training data. Fit it first by calling `.fit(numpy_data)`.\")\n    if self.featurewise_std_normalization:\n        if self.std is not None:\n            x /= self.std + 1e-06\n        else:\n            warnings.warn(\"This ImageDataGenerator specifies `featurewise_std_normalization`, but it hasn't been fit on any training data. Fit it first by calling `.fit(numpy_data)`.\")\n    if self.zca_whitening:\n        if self.zca_whitening_matrix is not None:\n            flat_x = x.reshape(-1, np.prod(x.shape[-3:]))\n            white_x = flat_x @ self.zca_whitening_matrix\n            x = np.reshape(white_x, x.shape)\n        else:\n            warnings.warn(\"This ImageDataGenerator specifies `zca_whitening`, but it hasn't been fit on any training data. Fit it first by calling `.fit(numpy_data)`.\")\n    return x", "docstring": "Applies the normalization configuration in-place to a batch of\ninputs.\n\n`x` is changed in-place since the function is mainly used internally\nto standardize images and feed them to your network. If a copy of `x`\nwould be created instead it would have a significant performance cost.\nIf you want to apply this method without changing the input in-place\nyou can call the method creating a copy before:\n\nstandardize(np.copy(x))\n\nArgs:\nx: Batch of inputs to be normalized.\n\nReturns:\nThe inputs, normalized.", "source": "github-repos"}
{"code": "def set_active(self, username, active_state):\n        \n\n        if active_state not in (True, False):\n            raise ValueError(\"active_state must be True or False\")\n\n        user = self.get_user(username)\n        if user is None:\n            return None\n\n        if user['active'] is active_state:\n            \n            return True\n\n        user['active'] = active_state\n        response = self._put(self.rest_url + \"/user\",\n                             params={\"username\": username},\n                             data=json.dumps(user))\n\n        if response.status_code == 204:\n            return True\n\n        return None", "docstring": "Set the active state of a user\n\nArgs:\nusername: The account username\nactive_state: True or False\n\nReturns:\nTrue: If successful\nNone: If no user or failure occurred", "source": "juraj-google-style"}
{"code": "async def get_all(self, url, params=None):\n        \n        if not params:\n            params = {}\n        items = []\n        next_page_token = None\n\n        while True:\n            if next_page_token:\n                params['pageToken'] = next_page_token\n            response = await self.get_json(url, params=params)\n\n            items.append(response)\n            next_page_token = response.get('nextPageToken')\n            if not next_page_token:\n                break\n        return items", "docstring": "Aggregate data from all pages of an API query.\n\nArgs:\nurl (str): Google API endpoint URL.\nparams (dict): (optional) URL query parameters.\n\nReturns:\nlist: Parsed JSON query response results.", "source": "juraj-google-style"}
{"code": "def typical_or_extreme_period_name(self, value=None):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `typical_or_extreme_period_name`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `typical_or_extreme_period_name`')\n    self._typical_or_extreme_period_name = value", "docstring": "Corresponds to IDD Field `typical_or_extreme_period_name`\n\nArgs:\nvalue (str): value for IDD Field `typical_or_extreme_period_name`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "async def get_tournaments(self, subdomain: str=None, force_update: bool=False) -> list:\n    if (self.tournaments is None):\n        force_update = True\n        self._subdomains_searched.append(('' if (subdomain is None) else subdomain))\n    elif ((subdomain is None) and ('' not in self._subdomains_searched)):\n        force_update = True\n        self._subdomains_searched.append('')\n    elif ((subdomain is not None) and (subdomain not in self._subdomains_searched)):\n        force_update = True\n        self._subdomains_searched.append(subdomain)\n    if force_update:\n        params = {'include_participants': (1 if AUTO_GET_PARTICIPANTS else 0), 'include_matches': (1 if AUTO_GET_MATCHES else 0)}\n        if (subdomain is not None):\n            params['subdomain'] = subdomain\n        res = (await self.connection('GET', 'tournaments', **params))\n        if (len(res) == 0):\n            self.tournaments = []\n        else:\n            for t_data in res:\n                self._refresh_tournament_from_json(t_data)\n    return self.tournaments", "docstring": "gets all user's tournaments\n\n|methcoro|\n\nArgs:\nsubdomain: *optional* subdomain needs to be given explicitely to get tournaments in a subdomain\nforce_update: *optional* set to True to force the data update from Challonge\n\nReturns:\nlist[Tournament]: list of all the user tournaments\n\nRaises:\nAPIException", "source": "codesearchnet"}
{"code": "def sample_node_list(self, low, high, generator):\n    statements = []\n    for _ in range(np.random.randint(low, high)):\n        statements.append(generator())\n    return statements", "docstring": "Generate a list of statements of random length.\n\nArgs:\nlow: Fewest number of statements to generate.\nhigh: Highest number of statements to generate.\ngenerator: Function to call to generate nodes.\n\nReturns:\nA list of statements.", "source": "github-repos"}
{"code": "def add(self, origin):\n        \n\n        digest = self._calc_digest(origin)\n\n        if self.exists(digest):\n            self.logger.debug('Added File: [{0}] ( Already exists. Skipping transfer)'.format(digest))\n            return digest\n\n        absPath = self.get_file_path(digest)\n        absFolderPath = os.path.dirname(absPath)\n\n        \n        self._makedirs(absFolderPath)\n        self._copy_content(origin, absPath)\n\n        self.logger.debug('Added file: \"{0}\" [{1}]'.format(digest, absPath))\n\n        return digest", "docstring": "Add new element to fsdb.\n\nArgs:\norigin -- could be the path of a file or a readable/seekable object ( fileobject, stream, stringIO...)\nReturns:\nString rapresenting the digest of the file", "source": "juraj-google-style"}
{"code": "def convert_variable_to_constant(self, incoming_edge, tensor_data):\n    index = incoming_edge.destination.index\n    for edge in self.outgoing_edges:\n        if edge.source.index == index:\n            edge.destination.convertible.convert_variable_to_constant(edge, tensor_data)\n    function = self.converted_self().function\n    function.signature.input_arg[index].type = tensor_data.dtype\n    if '_input_shapes' in function.attr:\n        function.attr['_input_shapes'].list.shape[index].unknown_rank = True\n        del function.attr['_input_shapes'].list.shape[index].dim[:]\n    arg_attrs = function.arg_attr[index].attr\n    if '_output_shapes' in arg_attrs:\n        arg_attrs['_output_shapes'].list.shape[0].unknown_rank = True\n        del arg_attrs['_output_shapes'].list.shape[0].dim[:]", "docstring": "Converts one function argument into a constant.\n\nArgs:\nincoming_edge: The edge into the argument to be converted.\ntensor_data: The constant value.", "source": "github-repos"}
{"code": "def enum(cls):\n    assert (cls.__bases__ == (object,))\n    d = dict(cls.__dict__)\n    new_type = type(cls.__name__, (int,), d)\n    new_type.__module__ = cls.__module__\n    map_ = {}\n    for (key, value) in iteritems(d):\n        if ((key.upper() == key) and isinstance(value, integer_types)):\n            value_instance = new_type(value)\n            setattr(new_type, key, value_instance)\n            map_[value] = key\n\n    def str_(self):\n        if (self in map_):\n            return ('%s.%s' % (type(self).__name__, map_[self]))\n        return ('%d' % int(self))\n\n    def repr_(self):\n        if (self in map_):\n            return ('<%s.%s: %d>' % (type(self).__name__, map_[self], int(self)))\n        return ('%d' % int(self))\n    setattr(new_type, '__repr__', repr_)\n    setattr(new_type, '__str__', str_)\n    return new_type", "docstring": "A decorator for creating an int enum class.\n\nMakes the values a subclass of the type and implements repr/str.\nThe new class will be a subclass of int.\n\nArgs:\ncls (type): The class to convert to an enum\n\nReturns:\ntype: A new class\n\n::\n\n@enum\nclass Foo(object):\nFOO = 1\nBAR = 2", "source": "codesearchnet"}
{"code": "def trigger(self, attr, old, new, hint=None, setter=None):\n        \n        def invoke():\n            callbacks = self._callbacks.get(attr)\n            if callbacks:\n                for callback in callbacks:\n                    callback(attr, old, new)\n        if hasattr(self, '_document') and self._document is not None:\n            self._document._notify_change(self, attr, old, new, hint, setter, invoke)\n        else:\n            invoke()", "docstring": "Trigger callbacks for ``attr`` on this object.\n\nArgs:\nattr (str) :\nold (object) :\nnew (object) :\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def AddClient(self, client):\n    \n    keywords = self.AnalyzeClient(client)\n    keywords.add(self._NormalizeKeyword(client.client_id))\n\n    data_store.REL_DB.AddClientKeywords(client.client_id, keywords)", "docstring": "Adds a client to the index.\n\nArgs:\nclient: A Client object record.", "source": "juraj-google-style"}
{"code": "def createDirStruct(paths, verbose=True):\n    \n    for k, path in paths.items():\n        p = None\n        try:\n            pathlist = path if type(path) is list else [ path ]\n            for p in pathlist:\n                os.makedirs(p)\n                if verbose:\n                    log.info('Creating directory: ' + p)\n        except OSError, e:\n            \n            if e.errno == errno.EEXIST and os.path.isdir(p):\n                pass\n            else:\n                raise\n\n    return True", "docstring": "Loops ait.config._datapaths from AIT_CONFIG and creates a directory.\n\nReplaces year and doy with the respective year and day-of-year.\nIf neither are given as arguments, current UTC day and year are used.\n\nArgs:\npaths:\n[optional] list of directory paths you would like to create.\ndoy and year will be replaced by the datetime day and year, respectively.\n\ndatetime:\nUTC Datetime string in ISO 8601 Format YYYY-MM-DDTHH:mm:ssZ", "source": "juraj-google-style"}
{"code": "def get_max_bond_distance(self, el1_sym, el2_sym):\n        \n        return sqrt(\n            (self.el_radius[el1_sym] + self.el_radius[el2_sym] + self.tol) ** 2)", "docstring": "Use Jmol algorithm to determine bond length from atomic parameters\nArgs:\nel1_sym: (str) symbol of atom 1\nel2_sym: (str) symbol of atom 2\n\nReturns: (float) max bond length", "source": "juraj-google-style"}
{"code": "def get_cells_iterator(bq_read_client: BigQueryReadClient, table_metadata: TableMetadata, column: str) -> Generator[Any, None, None]:\n    if '.' not in column and '[' not in column:\n        rows = get_readrows_iterator(bq_read_client, table_metadata, [column], data_format=DataFormat.AVRO)\n        for row in rows:\n            yield row.get(column)\n    else:\n        nested_columns = parse_column_path(column)\n        parent_column = nested_columns[0][0]\n        rows = get_readrows_iterator(bq_read_client, table_metadata, [parent_column], data_format=DataFormat.AVRO)\n        for current_value in rows:\n            for column_name, key in nested_columns:\n                if isinstance(current_value, dict):\n                    current_value = current_value.get(column_name)\n                elif isinstance(current_value, list) and key:\n                    current_value = next((item for item in current_value if item.get(column_name) == key), None)\n                if isinstance(current_value, dict) and 'value' in current_value:\n                    extracted_value = next((value for key, value in current_value['value'].items() if value is not None), None)\n                    current_value = extracted_value if extracted_value is not None else current_value\n                if current_value is None:\n                    break\n            yield current_value", "docstring": "Retrieves an iterator of cell values for a specified column, optimized\nfor both simple and nested column\n\naccess, including handling special value structures with dynamic value types\nfor nested columns.\n\nArgs:\nbq_read_client (BigQueryReadClient): The BigQuery Storage API Read client.\ntable_metadata (TableMetadata): The table's metadata.\ncolumn (str): The column name, supporting nested fields and array indices\nfor complex cases.\n\nReturns:\nGenerator[Any, None, None]: An iterator over cell values.", "source": "github-repos"}
{"code": "def _prefix_from_prefix_int(self, prefixlen):\n        \n        if not isinstance(prefixlen, (int, long)):\n            raise NetmaskValueError('%r is not an integer' % prefixlen)\n        prefixlen = int(prefixlen)\n        if not (0 <= prefixlen <= self._max_prefixlen):\n            raise NetmaskValueError('%d is not a valid prefix length' %\n                                    prefixlen)\n        return prefixlen", "docstring": "Validate and return a prefix length integer.\n\nArgs:\nprefixlen: An integer containing the prefix length.\n\nReturns:\nThe input, possibly converted from long to int.\n\nRaises:\nNetmaskValueError: If the input is not an integer, or out of range.", "source": "juraj-google-style"}
{"code": "def easeInOutCubic(n):\n    \n    _checkRange(n)\n    n = 2 * n\n    if n < 1:\n        return 0.5 * n**3\n    else:\n        n = n - 2\n        return 0.5 * (n**3 + 2)", "docstring": "A cubic tween function that accelerates, reaches the midpoint, and then decelerates.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "juraj-google-style"}
{"code": "def validate_primitive_without_value(fhir_primitive: message.Message) -> None:\n    name = fhir_primitive.DESCRIPTOR.full_name\n    if len(extensions.get_fhir_extensions(fhir_primitive)) < 2:\n        raise fhir_errors.InvalidFhirError(f'{name!r} must have either extensions or a value present.')\n    for field, _ in fhir_primitive.ListFields():\n        if field.name not in extensions.NON_VALUE_FIELDS:\n            raise fhir_errors.InvalidFhirError(f'{name!r} contains PrimitiveHasNoValue but {field.name!r} is set.')", "docstring": "Validates a Message which has the PrimitiveWithoutValue extension.\n\nGiven that there is a PrimitiveWithoutValue extension present, there must be\nat least one other extension. Otherwise, there is truly no value set other\nthan id and/or extension (non-value fields).\n\nArgs:\nfhir_primitive: The FHIR primitive Message to validate.\n\nRaises:\nfhir_errors.InvalidFhirError: In the event that there is less than one\nextension present, or there are values set other than id and/or extension.", "source": "github-repos"}
{"code": "def decorate_event_js(js_code):\n\n    def add_annotation(method):\n        setattr(method, '__is_event', True)\n        setattr(method, '_js_code', js_code)\n        return method\n    return add_annotation", "docstring": "setup a method as an event, adding also javascript code to generate\n\nArgs:\njs_code (str): javascript code to generate the event client-side.\njs_code is added to the widget html as\nwidget.attributes['onclick'] = js_code%{'emitter_identifier':widget.identifier, 'event_name':'onclick'}", "source": "codesearchnet"}
{"code": "def get_reconciler(config, metrics, rrset_channel, changes_channel, **kw):\n    builder = reconciler.GDNSReconcilerBuilder(config, metrics, rrset_channel, changes_channel, **kw)\n    return builder.build_reconciler()", "docstring": "Get a GDNSReconciler client.\n\nA factory function that validates configuration, creates an auth\nand :class:`GDNSClient` instance, and returns a GDNSReconciler\nprovider.\n\nArgs:\nconfig (dict): Google Cloud Pub/Sub-related configuration.\nmetrics (obj): :interface:`IMetricRelay` implementation.\nrrset_channel (asyncio.Queue): Queue from which to consume\nrecord set messages to validate.\nchanges_channel (asyncio.Queue): Queue to publish message to\nmake corrections to Cloud DNS.\nkw (dict): Additional keyword arguments to pass to the\nReconciler.\nReturns:\nA :class:`GDNSReconciler` instance.", "source": "codesearchnet"}
{"code": "def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1):\n    N_input = (N_input or 1)\n    N_output = (N_output or 1)\n    N_hidden = (N_hidden or tuple())\n    if isinstance(N_hidden, (int, float, basestring)):\n        N_hidden = (int(N_hidden),)\n    hidden_layer_type = (hidden_layer_type or tuple())\n    hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type))\n    if (verbosity > 0):\n        print(N_hidden, ' layers of type ', hidden_layer_type)\n    assert (len(N_hidden) == len(hidden_layer_type))\n    nn = pb.structure.FeedForwardNetwork()\n    nn.addInputModule(pb.structure.BiasUnit(name='bias'))\n    nn.addInputModule(pb.structure.LinearLayer(N_input, name='input'))\n    for (i, (Nhid, hidlaytype)) in enumerate(zip(N_hidden, hidden_layer_type)):\n        Nhid = int(Nhid)\n        nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden')))\n    nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output'))\n    nn.addConnection(pb.structure.FullConnection(nn['bias'], (nn['hidden'] if N_hidden else nn['output'])))\n    nn.addConnection(pb.structure.FullConnection(nn['input'], (nn['hidden'] if N_hidden else nn['output'])))\n    for (i, (Nhid, hidlaytype)) in enumerate(zip(N_hidden[:(- 1)], hidden_layer_type[:(- 1)])):\n        Nhid = int(Nhid)\n        nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')], nn['hidden-{}'.format((i + 1))]))\n    i = (len(N_hidden) - 1)\n    nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')], nn['output']))\n    nn.sortModules()\n    if FAST:\n        try:\n            nn.convertToFastNetwork()\n        except:\n            if (verbosity > 0):\n                print('Unable to convert slow PyBrain NN to a fast ARAC network...')\n    if (verbosity > 0):\n        print(nn.connections)\n    return nn", "docstring": "Build a neural net with the indicated input, hidden, and outout dimensions\n\nArguments:\nparams (dict or PyBrainParams namedtuple):\ndefault: {'N_hidden': 6}\n(this is the only parameter that affects the NN build)\n\nReturns:\nFeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers", "source": "codesearchnet"}
{"code": "def stop(self):\n    self._logger.info('Cleaning up remaining connection threads.')\n    for thread in threading.enumerate():\n        if (thread is not threading.current_thread()):\n            try:\n                thread.join(10.0)\n            except Exception as e:\n                self._logger.info('Error occurred while attempting to cleanup thread: {0}'.format(thread.name))\n                self._logger.exception(e)\n            else:\n                if thread.is_alive():\n                    self._logger.warning('Cleanup failed for thread: {0}. Thread is still alive'.format(thread.name))\n                else:\n                    self._logger.info('Cleanup succeeded for thread: {0}'.format(thread.name))\n    self._logger.info('Shutting down server socket handler.')\n    try:\n        self._socket.shutdown(socket.SHUT_RDWR)\n        self._socket.close()\n    except Exception as e:\n        self._logger.exception(e)\n        raise exceptions.NetworkingError('Server failed to shutdown socket handler.')\n    if hasattr(self, 'policy_monitor'):\n        try:\n            self.policy_monitor.stop()\n            self.policy_monitor.join()\n        except Exception as e:\n            self._logger.exception(e)\n            raise exceptions.ShutdownError('Server failed to clean up the policy monitor.')", "docstring": "Stop the server.\n\nHalt server client connections and clean up any existing connection\nthreads.\n\nRaises:\nNetworkingError: Raised if a failure occurs while sutting down\nor closing the TLS server socket.", "source": "codesearchnet"}
{"code": "def do_batch_status(args):\n    \n    rest_client = RestClient(args.url, args.user)\n    batch_ids = args.batch_ids.split(',')\n\n    if args.wait and args.wait > 0:\n        statuses = rest_client.get_statuses(batch_ids, args.wait)\n    else:\n        statuses = rest_client.get_statuses(batch_ids)\n\n    if args.format == 'yaml':\n        fmt.print_yaml(statuses)\n    elif args.format == 'json':\n        fmt.print_json(statuses)\n    else:\n        raise AssertionError('Missing handler: {}'.format(args.format))", "docstring": "Runs the batch-status command, printing output to the console\n\nArgs:\nargs: The parsed arguments sent to the command at runtime", "source": "juraj-google-style"}
{"code": "def git_checkout(branch_name, create=False):\n    log.info('Checking out <33>{}'.format(branch_name))\n    shell.run('git checkout {} {}'.format(('-b' if create else ''), branch_name))", "docstring": "Checkout or create a given branch\n\nArgs:\nbranch_name (str):\nThe name of the branch to checkout or create.\ncreate (bool):\nIf set to **True** it will create the branch instead of checking it\nout.", "source": "codesearchnet"}
{"code": "def __init__(self, sv, sess):\n    super(SVSummaryThread, self).__init__(sv.coord, sv.save_summaries_secs)\n    self._sv = sv\n    self._sess = sess", "docstring": "Create a SVSummaryThread.\n\nArgs:\nsv: A `Supervisor`.\nsess: A `Session`.", "source": "github-repos"}
{"code": "def __call__(self, batch: List[List[str]], mean: bool = None) -> List[Union[list, np.ndarray]]:\n        \n        batch = [self._encode(sample, mean) for sample in batch]\n        if self.pad_zero:\n            batch = zero_pad(batch)\n        return batch", "docstring": "Embed sentences from batch\n\nArgs:\nbatch: list of tokenized text samples\nmean: whether to return mean embedding of tokens per sample\n\nReturns:\nembedded batch", "source": "juraj-google-style"}
{"code": "def _GetFlagValues(self, flags):\n    event_types = []\n    for (event_flag, description) in self._FLAG_VALUES.items():\n        if (event_flag & flags):\n            event_types.append(description)\n    return ', '.join(event_types)", "docstring": "Determines which events are indicated by a set of fsevents flags.\n\nArgs:\nflags (int): fsevents record flags.\n\nReturns:\nstr: a comma separated string containing descriptions of the flag values\nstored in an fsevents record.", "source": "codesearchnet"}
{"code": "def format_rpc(data):\n    \n\n    address, rpc_id, args, resp, _status = data\n\n    name = rpc_name(rpc_id)\n\n    if isinstance(args, (bytes, bytearray)):\n        arg_str = hexlify(args)\n    else:\n        arg_str = repr(args)\n\n    if isinstance(resp, (bytes, bytearray)):\n        resp_str = hexlify(resp)\n    else:\n        resp_str = repr(resp)\n\n    \n    return \"%s called on address %d, payload=%s, response=%s\" % (name, address, arg_str, resp_str)", "docstring": "Format an RPC call and response.\n\nArgs:\ndata (tuple): A tuple containing the address, rpc_id, argument and\nresponse payloads and any error code.\n\nReturns:\nstr: The formated RPC string.", "source": "juraj-google-style"}
{"code": "def from_url(cls, path):\n        \n        if os.path.isfile(path):\n            with open(path) as fd:\n                data = fd.read()\n        else:\n            try:\n                response = urllib.urlopen(path)\n                if response.code >= 300:\n                    raise RuntimeError('Unable to load repo from %s' % path)\n\n                data = response.read()\n                response.close()\n            except IOError:\n                raise RuntimeError(\n                    'Unable to load repo from %s (IO error)' % path\n                )\n\n        return cls(json.loads(data), path)", "docstring": "Instantiate a :class:`TemplateRepository` instance from the data in a\nfile or url\n\nArgs:\npath (str): Path or url to the json file to load\n\nReturns:\nTemplateRepository: A new instance", "source": "juraj-google-style"}
{"code": "def find_code_in_transformers(object_name: str, base_path: Optional[str]=None, return_indices: bool=False) -> Union[str, Tuple[List[str], int, int]]:\n    parts = object_name.split('.')\n    i = 0\n    if base_path is None:\n        base_path = TRANSFORMERS_PATH\n    if base_path == MODEL_TEST_PATH:\n        base_path = 'tests'\n    module = parts[i]\n    while i < len(parts) and (not os.path.isfile(os.path.join(base_path, f'{module}.py'))):\n        i += 1\n        if i < len(parts):\n            module = os.path.join(module, parts[i])\n    if i >= len(parts):\n        raise ValueError(f'`object_name` should begin with the name of a module of transformers but got {object_name}.')\n    with open(os.path.join(base_path, f'{module}.py'), 'r', encoding='utf-8', newline='\\n') as f:\n        lines = f.readlines()\n    indent = ''\n    line_index = 0\n    for name in parts[i + 1:]:\n        while line_index < len(lines) and re.search(f'^{indent}(class|def)\\\\s+{name}(\\\\(|\\\\:)', lines[line_index]) is None:\n            line_index += 1\n        indent += '    '\n        line_index += 1\n    if line_index >= len(lines):\n        raise ValueError(f' {object_name} does not match any function or class in {module}.')\n    start_index = line_index - 1\n    end_index = find_block_end(lines, start_index, len(indent))\n    code = ''.join(lines[start_index:end_index])\n    return (code, (lines, start_index, end_index)) if return_indices else code", "docstring": "Find and return the source code of an object.\n\nArgs:\nobject_name (`str`):\nThe name of the object we want the source code of.\nbase_path (`str`, *optional*):\nThe path to the base folder where files are checked. If not set, it will be set to `TRANSFORMERS_PATH`.\nreturn_indices(`bool`, *optional*, defaults to `False`):\nIf `False`, will only return the code (as a string), otherwise it will also return the whole lines of the\nfile where the object specified by `object_name` is defined, together the start/end indices of the block in\nthe file that defines the object.\n\nReturns:\n`Union[str, Tuple[List[str], int, int]]`: If `return_indices=False`, only the source code of the object will be\nreturned. Otherwise, it also returns the whole lines of the file where the object specified by `object_name` is\ndefined, together the start/end indices of the block in the file that defines the object.", "source": "github-repos"}
{"code": "def _contains_composite_function_call(self, graphdef: graph_pb2.GraphDef) -> bool:\n    return any(map(self._is_composite_function, graphdef.library.function))", "docstring": "Determines if the graph def has composite function call.\n\nArgs:\ngraphdef: A GraphDef object.\n\nReturns:\nTrue if and only if the graph def contains a composite function call.", "source": "github-repos"}
{"code": "def _single_shard_restore(file_prefix: tensor_lib.Tensor, shardable_tensors: Sequence[sharding_util.ShardableTensor], options: 'checkpoint_options.CheckpointOptions | None'=None) -> sharding_util.Shard:\n    options = options or checkpoint_options.CheckpointOptions()\n    tensor_names = []\n    tensor_dtypes = []\n    slice_specs = []\n    for shardable_tensor in shardable_tensors:\n        if shardable_tensor._tensor_save_spec:\n            name = shardable_tensor._tensor_save_spec.name\n            spec = shardable_tensor._tensor_save_spec.slice_spec\n        else:\n            name, spec = (shardable_tensor.checkpoint_key, shardable_tensor.slice_spec)\n        tensor_names.append(name)\n        slice_specs.append(spec)\n        tensor_dtypes.append(shardable_tensor.dtype)\n    restore_device = options.experimental_io_device or 'cpu:0'\n    with ops.device(restore_device):\n        restored_tensors = io_ops.restore_v2(file_prefix, tensor_names, slice_specs, tensor_dtypes)\n    restored_tensor_dict = {}\n    for shardable_tensor in shardable_tensors:\n        restored_tensor = restored_tensors.pop(0)\n        restored_tensor_dict.setdefault(shardable_tensor.checkpoint_key, {})[shardable_tensor.slice_spec] = restored_tensor\n    return restored_tensor_dict", "docstring": "Restore the saveable objects from a checkpoint with `file_prefix`.\n\nArgs:\nfile_prefix: A string or scalar string Tensor containing the prefix for\nfiles to read from.\nshardable_tensors: A list of ShardableTensors to restore.\noptions: Optional `CheckpointOptions` object.\n\nReturns:\nA restored tensor dict (maps checkpoint_key -> slice_spec -> tensor).", "source": "github-repos"}
{"code": "def ensure_list_size(list_, size_):\n    lendiff = (size_ - len(list_))\n    if (lendiff > 0):\n        extension = [None for _ in range(lendiff)]\n        list_.extend(extension)", "docstring": "Allocates more space if needbe.\n\nEnsures len(``list_``) == ``size_``.\n\nArgs:\nlist_ (list): ``list`` to extend\nsize_ (int): amount to exent by", "source": "codesearchnet"}
{"code": "def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:\n    rot_mats = self.get_rot_mats()\n    inv_rot_mats = invert_rot_mat(rot_mats)\n    return rot_vec_mul(inv_rot_mats, pts)", "docstring": "The inverse of the apply() method.\n\nArgs:\npts:\nA [*, 3] set of points\nReturns:\n[*, 3] inverse-rotated points", "source": "github-repos"}
{"code": "def shape(self):\n    return self._ragged_shape._to_tensor_shape()", "docstring": "The static shape of this StructuredTensor.\n\nThe returned `TensorShape` is guaranteed to have a known rank, but the\nindividual dimension sizes may be unknown.\n\nReturns:\n`tf.TensorShape`", "source": "github-repos"}
{"code": "def get(self, catID, includeRelationships=False):\n    url = ('%(base_url)s/record/%(catID)s' % {'base_url': self.base_url, 'catID': catID})\n    r = self.gbdx_connection.get(url)\n    r.raise_for_status()\n    return r.json()", "docstring": "Retrieves the strip footprint WKT string given a cat ID.\n\nArgs:\ncatID (str): The source catalog ID from the platform catalog.\nincludeRelationships (bool): whether to include graph links to related objects.  Default False.\n\nReturns:\nrecord (dict): A dict object identical to the json representation of the catalog record", "source": "codesearchnet"}
{"code": "def _combine_handle_data(handle, initial_value):\n    assert handle.dtype == dtypes.resource\n    variable_handle_data = get_eager_safe_handle_data(handle)\n    if initial_value.dtype != dtypes.variant:\n        return variable_handle_data\n    extra_handle_data = get_eager_safe_handle_data(initial_value)\n    if extra_handle_data is not None and extra_handle_data.is_set:\n        if variable_handle_data is None or not variable_handle_data.is_set or len(variable_handle_data.shape_and_type) != 1:\n            raise RuntimeError(f\"Expected VarHandleOp to return a length==1 shape_and_type, but saw: '{variable_handle_data}'\")\n        variable_handle_data.shape_and_type.extend(extra_handle_data.shape_and_type)\n    return variable_handle_data", "docstring": "Concats HandleData from tensors `handle` and `initial_value`.\n\nArgs:\nhandle: A `Tensor` of dtype `resource`.\ninitial_value: A `Tensor`.\n\nReturns:\nA `CppShapeInferenceResult.HandleData`.  If `initial_value` has dtype\n`variant`, the `HandleData` contains the concatenation of the shape_and_type\nfrom both `handle` and `initial_value`.\n\nRaises:\nRuntimeError: If handle, which was returned by VarHandleOp, either has\nno handle data, or its len(handle_data.shape_and_type) != 1.", "source": "github-repos"}
{"code": "def _GetAnalysisPlugins(self, analysis_plugins_string):\n    \n    if not analysis_plugins_string:\n      return []\n\n    analysis_plugins_list = [\n        name.strip() for name in analysis_plugins_string.split(',')]\n\n    analysis_plugins = self._analysis_manager.GetPluginObjects(\n        analysis_plugins_list)\n    return analysis_plugins.values()", "docstring": "Retrieves analysis plugins.\n\nArgs:\nanalysis_plugins_string (str): comma separated names of analysis plugins\nto enable.\n\nReturns:\nlist[AnalysisPlugin]: analysis plugins.", "source": "juraj-google-style"}
{"code": "def _get_job_metadata(provider, user_id, job_name, script, task_ids,\n                      user_project, unique_job_id):\n  \n  create_time = dsub_util.replace_timezone(datetime.datetime.now(), tzlocal())\n  user_id = user_id or dsub_util.get_os_user()\n  job_metadata = provider.prepare_job_metadata(script.name, job_name, user_id,\n                                               create_time)\n  if unique_job_id:\n    job_metadata['job-id'] = uuid.uuid4().hex\n\n  job_metadata['create-time'] = create_time\n  job_metadata['script'] = script\n  job_metadata['user-project'] = user_project\n  if task_ids:\n    job_metadata['task-ids'] = dsub_util.compact_interval_string(list(task_ids))\n\n  return job_metadata", "docstring": "Allow provider to extract job-specific metadata from command-line args.\n\nArgs:\nprovider: job service provider\nuser_id: user submitting the job\njob_name: name for the job\nscript: the script to run\ntask_ids: a set of the task-ids for all tasks in the job\nuser_project: name of the project to be billed for the request\nunique_job_id: generate a unique job id\n\nReturns:\nA dictionary of job-specific metadata (such as job id, name, etc.)", "source": "juraj-google-style"}
{"code": "def histogram(namespace: Union[Type, str], name: str, bucket_type: 'BucketType', logger: Optional['MetricLogger']=None) -> 'Metrics.DelegatingHistogram':\n    namespace = UserMetrics.get_namespace(namespace)\n    return Metrics.DelegatingHistogram(MetricName(namespace, name), bucket_type, logger)", "docstring": "Obtains or creates a Histogram metric.\n\nArgs:\nnamespace: A class or string that gives the namespace to a metric\nname: A string that gives a unique name to a metric\nbucket_type: A type of bucket used in a histogram. A subclass of\napache_beam.utils.histogram.BucketType\nlogger: MetricLogger for logging locally aggregated metric\n\nReturns:\nA Histogram object.", "source": "github-repos"}
{"code": "def view_structure(self, only_chains=None, opacity=1.0, recolor=False, gui=False):\n    if ssbio.utils.is_ipynb():\n        import nglview as nv\n    else:\n        raise EnvironmentError('Unable to display structure - not running in a Jupyter notebook environment')\n    if (not self.structure_file):\n        raise ValueError('Structure file not loaded')\n    only_chains = ssbio.utils.force_list(only_chains)\n    to_show_chains = '( '\n    for c in only_chains:\n        to_show_chains += ':{} or'.format(c)\n    to_show_chains = to_show_chains.strip(' or ')\n    to_show_chains += ' )'\n    if ((self.file_type == 'mmtf') or (self.file_type == 'mmtf.gz')):\n        view = nv.NGLWidget()\n        view.add_component(self.structure_path)\n    else:\n        view = nv.show_structure_file(self.structure_path, gui=gui)\n    if recolor:\n        view.clear_representations()\n        if only_chains:\n            view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity)\n        else:\n            view.add_cartoon(selection='protein', color='silver', opacity=opacity)\n    elif only_chains:\n        view.clear_representations()\n        view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity)\n    return view", "docstring": "Use NGLviewer to display a structure in a Jupyter notebook\n\nArgs:\nonly_chains (str, list): Chain ID or IDs to display\nopacity (float): Opacity of the structure\nrecolor (bool): If structure should be cleaned and recolored to silver\ngui (bool): If the NGLview GUI should show up\n\nReturns:\nNGLviewer object", "source": "codesearchnet"}
{"code": "def _from_signer_and_info(cls, signer, info, **kwargs):\n        \n        return cls(\n            signer,\n            service_account_email=info['client_email'],\n            token_uri=info['token_uri'],\n            project_id=info.get('project_id'), **kwargs)", "docstring": "Creates a Credentials instance from a signer and service account\ninfo.\n\nArgs:\nsigner (google.auth.crypt.Signer): The signer used to sign JWTs.\ninfo (Mapping[str, str]): The service account info.\nkwargs: Additional arguments to pass to the constructor.\n\nReturns:\ngoogle.auth.jwt.Credentials: The constructed credentials.\n\nRaises:\nValueError: If the info is not in the expected format.", "source": "juraj-google-style"}
{"code": "def get_atlas_per_gene_mutation_df(self, gene_id):\n    g = self.reference_gempro.genes.get_by_id(gene_id)\n    (single, fingerprint) = g.protein.sequence_mutation_summary(alignment_type='seqalign')\n    structure_type_suffix = 'NA'\n    appender = []\n    for (k, strains) in single.items():\n        to_append = {}\n        orig_res = k[0]\n        resnum = int(k[1])\n        mutated_res = k[2]\n        num_strains_mutated = len(strains)\n        strain_ids = [str(x.split((g.id + '_'))[1]) for x in strains]\n        to_append['ref_residue'] = orig_res\n        to_append['ref_resnum'] = resnum\n        to_append['strain_residue'] = mutated_res\n        to_append['num_strains_mutated'] = num_strains_mutated\n        to_append['strains_mutated'] = ';'.join(strain_ids)\n        to_append['at_disulfide_bridge'] = False\n        origres_props = ssbio.protein.sequence.properties.residues.residue_biochemical_definition(orig_res)\n        mutres_props = ssbio.protein.sequence.properties.residues.residue_biochemical_definition(mutated_res)\n        to_append['ref_residue_prop'] = origres_props\n        to_append['strain_residue_prop'] = mutres_props\n        (grantham_s, grantham_txt) = ssbio.protein.sequence.properties.residues.grantham_score(orig_res, mutated_res)\n        to_append['grantham_score'] = grantham_s\n        to_append['grantham_annotation'] = grantham_txt\n        to_append.update(g.protein.get_residue_annotations(seq_resnum=resnum, use_representatives=True))\n        if g.protein.representative_structure:\n            if g.protein.representative_structure.is_experimental:\n                to_append['structure_type'] = 'EXP'\n            else:\n                to_append['structure_type'] = 'HOM'\n            repchain = g.protein.representative_chain\n            repchain_annotations = g.protein.representative_structure.chains.get_by_id(repchain).seq_record.annotations\n            if ('SSBOND-biopython' in repchain_annotations):\n                structure_resnum = g.protein.map_seqprop_resnums_to_structprop_resnums(resnums=resnum, use_representatives=True)\n                if (resnum in structure_resnum):\n                    ssbonds = repchain_annotations['SSBOND-biopython']\n                    ssbonds_res = []\n                    for x in ssbonds:\n                        ssbonds_res.append(x[0])\n                        ssbonds_res.append(x[1])\n                    if (structure_resnum in ssbonds_res):\n                        to_append['at_disulfide_bridge'] = True\n        appender.append(to_append)\n    if (not appender):\n        return pd.DataFrame()\n    cols = ['ref_residue', 'ref_resnum', 'strain_residue', 'num_strains_mutated', 'strains_mutated', 'ref_residue_prop', 'strain_residue_prop', 'grantham_score', 'grantham_annotation', 'at_disulfide_bridge', 'seq_SS-sspro', 'seq_SS-sspro8', 'seq_RSA-accpro', 'seq_RSA-accpro20', 'seq_TM-tmhmm', 'struct_SS-dssp', 'struct_RSA-dssp', 'struct_ASA-dssp', 'struct_CA_DEPTH-msms', 'struct_RES_DEPTH-msms', 'struct_PHI-dssp', 'struct_PSI-dssp', 'struct_resnum', 'struct_residuestrains_mutated']\n    df_gene_summary = pd.DataFrame.from_records(appender, columns=cols)\n    df_gene_summary.dropna(axis=1, how='all', inplace=True)\n    df_gene_summary.sort_values(by='ref_resnum', inplace=True)\n    df_gene_summary = df_gene_summary.set_index('ref_resnum')\n    return df_gene_summary", "docstring": "Create a single data frame which summarizes a gene and its mutations.\n\nArgs:\ngene_id (str): Gene ID in the base model\n\nReturns:\nDataFrame: Pandas DataFrame of the results", "source": "codesearchnet"}
{"code": "def compose_tree_url(tree, issn_url=False):\n    url = compose_tree_path(tree, issn_url)\n    if (WEB_PORT == 80):\n        return ('%s:\n    return ('%s:", "docstring": "Compose full url for given `tree`, with protocol, server's address and\nport.\n\nArgs:\ntree (obj): :class:`.Tree` instance.\nissn_url (bool, default False): Compose URL using ISSN.\n\nReturns:\nstr: URL of the tree", "source": "codesearchnet"}
{"code": "def get_channel(self, chan_name, coll_name, exp_name):\n        \n        chan = ChannelResource(chan_name, coll_name, exp_name)\n        return self.get_project(chan)", "docstring": "Helper that gets a fully initialized ChannelResource for an *existing* channel.\n\nArgs:\nchan_name (str): Name of channel.\ncoll_name (str): Name of channel's collection.\nexp_name (str): Name of channel's experiment.\n\nReturns:\n(intern.resource.boss.ChannelResource)", "source": "juraj-google-style"}
{"code": "def FindFileByName(self, file_name):\n    \n\n    try:\n      return self._file_descriptors[file_name]\n    except KeyError:\n      pass\n\n    try:\n      file_proto = self._internal_db.FindFileByName(file_name)\n    except KeyError as error:\n      if self._descriptor_db:\n        file_proto = self._descriptor_db.FindFileByName(file_name)\n      else:\n        raise error\n    if not file_proto:\n      raise KeyError('Cannot find a file named %s' % file_name)\n    return self._ConvertFileProtoToFileDescriptor(file_proto)", "docstring": "Gets a FileDescriptor by file name.\n\nArgs:\nfile_name: The path to the file to get a descriptor for.\n\nReturns:\nA FileDescriptor for the named file.\n\nRaises:\nKeyError: if the file cannot be found in the pool.", "source": "juraj-google-style"}
{"code": "def _create_trial_info(self, expr_dir):\n        \n        meta = self._build_trial_meta(expr_dir)\n\n        self.logger.debug(\"Create trial for %s\" % meta)\n\n        trial_record = TrialRecord.from_json(meta)\n        trial_record.save()", "docstring": "Create information for given trial.\n\nMeta file will be loaded if exists, and the trial information\nwill be saved in db backend.\n\nArgs:\nexpr_dir (str): Directory path of the experiment.", "source": "juraj-google-style"}
{"code": "def bbox_line_intersect(nodes, line_start, line_end):\n    (left, right, bottom, top) = _helpers.bbox(nodes)\n    if (_helpers.in_interval(line_start[0], left, right) and _helpers.in_interval(line_start[1], bottom, top)):\n        return BoxIntersectionType.INTERSECTION\n    if (_helpers.in_interval(line_end[0], left, right) and _helpers.in_interval(line_end[1], bottom, top)):\n        return BoxIntersectionType.INTERSECTION\n    (s_bottom, t_bottom, success) = segment_intersection(np.asfortranarray([left, bottom]), np.asfortranarray([right, bottom]), line_start, line_end)\n    if (success and _helpers.in_interval(s_bottom, 0.0, 1.0) and _helpers.in_interval(t_bottom, 0.0, 1.0)):\n        return BoxIntersectionType.INTERSECTION\n    (s_right, t_right, success) = segment_intersection(np.asfortranarray([right, bottom]), np.asfortranarray([right, top]), line_start, line_end)\n    if (success and _helpers.in_interval(s_right, 0.0, 1.0) and _helpers.in_interval(t_right, 0.0, 1.0)):\n        return BoxIntersectionType.INTERSECTION\n    (s_top, t_top, success) = segment_intersection(np.asfortranarray([right, top]), np.asfortranarray([left, top]), line_start, line_end)\n    if (success and _helpers.in_interval(s_top, 0.0, 1.0) and _helpers.in_interval(t_top, 0.0, 1.0)):\n        return BoxIntersectionType.INTERSECTION\n    return BoxIntersectionType.DISJOINT", "docstring": "r\"\"\"Determine intersection of a bounding box and a line.\n\nWe do this by first checking if either the start or end node of the\nsegment are contained in the bounding box. If they aren't, then\nchecks if the line segment intersects any of the four sides of the\nbounding box.\n\n.. note::\n\nThis function is \"half-finished\". It makes no distinction between\n\"tangent\" intersections of the box and segment and other types\nof intersection. However, the distinction is worthwhile, so this\nfunction should be \"upgraded\" at some point.\n\nArgs:\nnodes (numpy.ndarray): Points (``2 x N``) that determine a\nbounding box.\nline_start (numpy.ndarray): Beginning of a line segment (1D\n``2``-array).\nline_end (numpy.ndarray): End of a line segment (1D ``2``-array).\n\nReturns:\nint: Enum from ``BoxIntersectionType`` indicating the type of\nbounding box intersection.", "source": "codesearchnet"}
{"code": "def allocate(self, amount, child=None, update=True):\n    if (child is not None):\n        if (child not in self.children):\n            c = SecurityBase(child)\n            c.setup(self._universe)\n            c.update(self.now)\n            self._add_child(c)\n        self.children[child].allocate(amount)\n    else:\n        if (self.parent == self):\n            self.parent.adjust((- amount), update=False, flow=True)\n        else:\n            self.parent.adjust((- amount), update=False, flow=False)\n        self.adjust(amount, update=False, flow=True)\n        if (self.children is not None):\n            [c.allocate((amount * c._weight), update=False) for c in self._childrenv]\n        if update:\n            self.root.stale = True", "docstring": "Allocate capital to Strategy. By default, capital is allocated\nrecursively down the children, proportionally to the children's\nweights.  If a child is specified, capital will be allocated\nto that specific child.\n\nAllocation also have a side-effect. They will deduct the same amount\nfrom the parent's \"account\" to offset the allocation. If there is\nremaining capital after allocation, it will remain in Strategy.\n\nArgs:\n* amount (float): Amount to allocate.\n* child (str): If specified, allocation will be directed to child\nonly. Specified by name.\n* update (bool): Force update.", "source": "codesearchnet"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_stream = utils.BytearrayStream()\n    if self._unique_identifier:\n        self._unique_identifier.write(local_stream, kmip_version=kmip_version)\n    if self._cryptographic_parameters:\n        self._cryptographic_parameters.write(local_stream, kmip_version=kmip_version)\n    if self._data:\n        self._data.write(local_stream, kmip_version=kmip_version)\n    if self._digested_data:\n        self._digested_data.write(local_stream, kmip_version=kmip_version)\n    if self._signature_data:\n        self._signature_data.write(local_stream, kmip_version=kmip_version)\n    if self._correlation_value:\n        self._correlation_value.write(local_stream, kmip_version=kmip_version)\n    if self._init_indicator:\n        self._init_indicator.write(local_stream, kmip_version=kmip_version)\n    if self._final_indicator:\n        self._final_indicator.write(local_stream, kmip_version=kmip_version)\n    self.length = local_stream.length()\n    super(SignatureVerifyRequestPayload, self).write(output_stream, kmip_version=kmip_version)\n    output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the SignatureVerify request payload to a\nstream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the data attribute is not defined.", "source": "codesearchnet"}
{"code": "def util_granulate_time_series(time_series, scale):\n    n = len(time_series)\n    b = int(np.fix((n / scale)))\n    temp = np.reshape(time_series[0:(b * scale)], (b, scale))\n    cts = np.mean(temp, axis=1)\n    return cts", "docstring": "Extract coarse-grained time series\n\nArgs:\ntime_series: Time series\nscale: Scale factor\n\nReturns:\nVector of coarse-grained time series with given scale factor", "source": "codesearchnet"}
{"code": "class RunEnsembleDetector(beam.PTransform[beam.PCollection[NestedKeyedInputT], beam.PCollection[NestedKeyedOutputT]]):\n\n    def __init__(self, ensemble_detector: EnsembleAnomalyDetector):\n        self._ensemble_detector = ensemble_detector\n\n    def expand(self, input: beam.PCollection[NestedKeyedInputT]) -> beam.PCollection[NestedKeyedOutputT]:\n        model_uuid = f'{self._ensemble_detector._model_id}:{uuid.uuid4().hex[:6]}'\n        assert self._ensemble_detector._sub_detectors is not None\n        if not self._ensemble_detector._sub_detectors:\n            raise ValueError(f'No detectors found at {model_uuid}')\n        results = []\n        for idx, detector in enumerate(self._ensemble_detector._sub_detectors):\n            if isinstance(detector, EnsembleAnomalyDetector):\n                results.append(input | f'Run Ensemble Detector at index {idx} ({model_uuid})' >> RunEnsembleDetector(detector))\n            elif isinstance(detector, OfflineDetector):\n                results.append(input | f'Run Offline Detector at index {idx} ({model_uuid})' >> RunOfflineDetector(detector))\n            else:\n                results.append(input | f'Run One Detector at index {idx} ({model_uuid})' >> RunOneDetector(detector))\n        if self._ensemble_detector._aggregation_strategy is None:\n            aggregation_type = 'Simple'\n        else:\n            aggregation_type = 'Custom'\n        ret = results | beam.Flatten() | f'Run {aggregation_type} Aggregation Strategy ({model_uuid})' >> RunAggregationStrategy(self._ensemble_detector._aggregation_strategy, self._ensemble_detector._model_id)\n        if self._ensemble_detector._threshold_criterion:\n            ret = ret | f'Run Threshold Criterion ({model_uuid})' >> RunThresholdCriterion(self._ensemble_detector._threshold_criterion)\n        return ret", "docstring": "Runs an ensemble of anomaly detectors on a PCollection of data.\n\nThis PTransform applies an `EnsembleAnomalyDetector` to the input data,\nrunning each sub-detector and aggregating the results.\n\nArgs:\nensemble_detector: The `EnsembleAnomalyDetector` to run.", "source": "github-repos"}
{"code": "def _page_to_title(page):\n  \n  \n  start_tag = u\"<title>\"\n  end_tag = u\"</title>\"\n  start_pos = page.find(start_tag)\n  end_pos = page.find(end_tag)\n  assert start_pos != -1\n  assert end_pos != -1\n  start_pos += len(start_tag)\n  return page[start_pos:end_pos]", "docstring": "Extract the title from a page.\n\nArgs:\npage: a unicode string\nReturns:\na unicode string", "source": "juraj-google-style"}
{"code": "def get_course_certificate(self, course_id, username):\n    return self.client.certificates(username).courses(course_id).get()", "docstring": "Retrieve the certificate for the given username for the given course_id.\n\nArgs:\n* ``course_id`` (str): The string value of the course's unique identifier\n* ``username`` (str): The username ID identifying the user for which to retrieve the certificate\n\nRaises:\n\nHttpNotFoundError if no certificate found for the given user+course.\n\nReturns:\n\na dict containing:\n\n* ``username``: A string representation of an user's username passed in the request.\n* ``course_id``: A string representation of a Course ID.\n* ``certificate_type``: A string representation of the certificate type.\n* ``created_date`: Datetime the certificate was created (tz-aware).\n* ``status``: A string representation of the certificate status.\n* ``is_passing``: True if the certificate has a passing status, False if not.\n* ``download_url``: A string representation of the certificate url.\n* ``grade``: A string representation of a float for the user's course grade.", "source": "codesearchnet"}
{"code": "def delete(self, *names: str, pipeline=False):\n        \n        if pipeline:\n            self._pipeline.delete(*names)\n        else:\n            self._db.delete(*names)", "docstring": "Delete one or more keys specified by names.\n\nArgs:\nnames (str): Names of keys to delete\npipeline (bool): True, start a transaction block. Default false.", "source": "juraj-google-style"}
{"code": "def write_to_fp(self, fp):\n        \n        \n        \n        urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n        text_parts = self._tokenize(self.text)\n        log.debug(\"text_parts: %i\", len(text_parts))\n        assert text_parts, 'No text to send to TTS API'\n\n        for idx, part in enumerate(text_parts):\n            try:\n                \n                part_tk = self.token.calculate_token(part)\n            except requests.exceptions.RequestException as e:  \n                log.debug(str(e), exc_info=True)\n                raise gTTSError(\n                    \"Connection error during token calculation: %s\" %\n                    str(e))\n\n            payload = {'ie': 'UTF-8',\n                       'q': part,\n                       'tl': self.lang,\n                       'ttsspeed': self.speed,\n                       'total': len(text_parts),\n                       'idx': idx,\n                       'client': 'tw-ob',\n                       'textlen': _len(part),\n                       'tk': part_tk}\n\n            log.debug(\"payload-%i: %s\", idx, payload)\n\n            try:\n                \n                r = requests.get(self.GOOGLE_TTS_URL,\n                                 params=payload,\n                                 headers=self.GOOGLE_TTS_HEADERS,\n                                 proxies=urllib.request.getproxies(),\n                                 verify=False)\n\n                log.debug(\"headers-%i: %s\", idx, r.request.headers)\n                log.debug(\"url-%i: %s\", idx, r.request.url)\n                log.debug(\"status-%i: %s\", idx, r.status_code)\n\n                r.raise_for_status()\n            except requests.exceptions.HTTPError:\n                \n                raise gTTSError(tts=self, response=r)\n            except requests.exceptions.RequestException as e:  \n                \n                raise gTTSError(str(e))\n\n            try:\n                \n                for chunk in r.iter_content(chunk_size=1024):\n                    fp.write(chunk)\n                log.debug(\"part-%i written to %s\", idx, fp)\n            except (AttributeError, TypeError) as e:\n                raise TypeError(\n                    \"'fp' is not a file-like object or it does not take bytes: %s\" %\n                    str(e))", "docstring": "Do the TTS API request and write bytes to a file-like object.\n\nArgs:\nfp (file object): Any file-like object to write the ``mp3`` to.\n\nRaises:\n:class:`gTTSError`: When there's an error with the API request.\nTypeError: When ``fp`` is not a file-like object that takes bytes.", "source": "juraj-google-style"}
{"code": "def parse_default_property_value(property_name, property_type_id, default_value_string):\n    if ((property_type_id == PROPERTY_TYPE_EMBEDDED_SET_ID) and (default_value_string == '{}')):\n        return set()\n    elif ((property_type_id == PROPERTY_TYPE_EMBEDDED_LIST_ID) and (default_value_string == '[]')):\n        return list()\n    elif ((property_type_id == PROPERTY_TYPE_STRING_ID) and isinstance(default_value_string, six.string_types)):\n        return default_value_string\n    elif (property_type_id == PROPERTY_TYPE_BOOLEAN_ID):\n        return _parse_bool_default_value(property_name, default_value_string)\n    elif (property_type_id == PROPERTY_TYPE_DATETIME_ID):\n        return _parse_datetime_default_value(property_name, default_value_string)\n    elif (property_type_id == PROPERTY_TYPE_DATE_ID):\n        return _parse_date_default_value(property_name, default_value_string)\n    else:\n        raise AssertionError(u'Unsupported default value for property \"{}\" with type id {}: {}'.format(property_name, property_type_id, default_value_string))", "docstring": "Parse the default value string into its proper form given the property type ID.\n\nArgs:\nproperty_name: string, the name of the property whose default value is being parsed.\nUsed primarily to construct meaningful error messages, should the default\nvalue prove invalid.\nproperty_type_id: int, one of the property type ID constants defined in this file that\nOrientDB uses to designate the native type of a given property.\ndefault_value_string: string, the textual representation of the default value for\nfor the property, as returned by OrientDB's schema introspection code.\n\nReturns:\nan object of type matching the property that can be used as the property's default value.\nFor example, if the property is of string type, the return type will be a string, and if\nthe property is of list type, the return type will be a list.\n\nRaises:\nAssertionError, if the default value is not supported or does not match the\nproperty's declared type (e.g. if a default of \"[]\" is set on an integer property).", "source": "codesearchnet"}
{"code": "def image_preprocessing(image_buffer, bbox, train, thread_id=0):\n    if (bbox is None):\n        raise ValueError('Please supply a bounding box.')\n    image = decode_jpeg(image_buffer)\n    height = FLAGS.image_size\n    width = FLAGS.image_size\n    if train:\n        image = distort_image(image, height, width, bbox, thread_id)\n    else:\n        image = eval_image(image, height, width)\n    image = tf.subtract(image, 0.5)\n    image = tf.multiply(image, 2.0)\n    return image", "docstring": "Decode and preprocess one image for evaluation or training.\n\nArgs:\nimage_buffer: JPEG encoded string Tensor\nbbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]\nwhere each coordinate is [0, 1) and the coordinates are arranged as\n[ymin, xmin, ymax, xmax].\ntrain: boolean\nthread_id: integer indicating preprocessing thread\n\nReturns:\n3-D float Tensor containing an appropriately scaled image\n\nRaises:\nValueError: if user does not provide bounding box", "source": "codesearchnet"}
{"code": "def camel_to_title(name):\n    split = re.findall('[A-Z]?[a-z0-9]+|[A-Z]+(?=[A-Z]|$)', name)\n    ret = ' '.join(split)\n    ret = (ret[0].upper() + ret[1:])\n    return ret", "docstring": "Takes a camelCaseFieldName and returns an Title Case Field Name\n\nArgs:\nname (str): E.g. camelCaseFieldName\n\nReturns:\nstr: Title Case converted name. E.g. Camel Case Field Name", "source": "codesearchnet"}
{"code": "def sorted(field_name, ascending=True, fields=None, count=5):\n    \n    if field_name is None:\n      raise Exception('Sort field must be specified')\n    direction = '' if ascending else ' DESC'\n    projection = Sampling._create_projection(fields)\n    return lambda sql: 'SELECT %s FROM (%s) ORDER BY %s%s LIMIT %d' % (projection, sql, field_name,\n                                                                       direction, count)", "docstring": "Provides a sampling strategy that picks from an ordered set of rows.\n\nArgs:\nfield_name: the name of the field to sort the rows by.\nascending: whether to sort in ascending direction or not.\nfields: an optional list of field names to retrieve.\ncount: optional number of rows to limit the sampled results to.\nReturns:\nA sampling function that can be applied to get the initial few rows.", "source": "juraj-google-style"}
{"code": "def process_cgmlst_results(df):\n    assert isinstance(df, pd.DataFrame)\n    markers = []\n    alleles = []\n    for x in df['qseqid']:\n        (marker, allele) = x.split('|')\n        markers.append(marker)\n        alleles.append(int(allele))\n    df.loc[(:, 'marker')] = markers\n    df.loc[(:, 'allele')] = alleles\n    df.loc[(:, 'is_match')] = (((df['coverage'] >= 1.0) & (df['pident'] >= 90.0)) & (~ df['is_trunc']))\n    df.loc[(:, 'allele_name')] = df.apply((lambda x: allele_name(x.sseq.replace('-', ''))), axis=1)\n    df.loc[(:, 'is_perfect')] = ((df['coverage'] == 1.0) & (df['pident'] == 100.0))\n    df_perf = df[df['is_perfect']]\n    perf_markers = df_perf['marker'].unique()\n    df.loc[(:, 'has_perfect_match')] = df['marker'].isin(perf_markers)\n    (start_idxs, end_idxs, needs_revcomps, trunc, is_extended) = extend_subj_match_vec(df)\n    df.loc[(:, 'start_idx')] = start_idxs\n    df.loc[(:, 'end_idx')] = end_idxs\n    df.loc[(:, 'needs_revcomp')] = needs_revcomps\n    df.loc[(:, 'trunc')] = trunc\n    df.loc[(:, 'is_extended')] = is_extended\n    df.loc[(:, 'sseq_msa_gaps')] = np.zeros(df.shape[0], dtype=np.int64)\n    df.loc[(:, 'sseq_msa_p_gaps')] = np.zeros(df.shape[0], dtype=np.float64)\n    df.loc[(:, 'too_many_gaps')] = trunc\n    return df", "docstring": "Append informative fields to cgMLST330 BLAST results DataFrame\n\nThe `qseqid` column must contain cgMLST330 query IDs with `{marker name}|{allele number}` format.\nThe `qseqid` parsed allele numbers and marker names are appended as new fields.\n\n`is_perfect` column contains boolean values for whether an allele result is 100% identity and coverage.\n`has_perfect_match` denotes if a cgMLST330 marker has a perfect allele match.\nThe top result with the largest bitscore for a marker with no perfect match is used to retrieve the allele present\nat that marker locus.\n\nArgs:\ndf (pandas.DataFrame): DataFrame of cgMLST330 BLAST results\n\nReturns:\npandas.DataFrame: cgMLST330 BLAST results DataFrame with extra fields (`marker`, `allele`, `is_perfect`, `has_perfect_match`)", "source": "codesearchnet"}
{"code": "def metar_to_speech(metar: str) -> str:\n    LOGGER.info('getting speech text from METAR: %s', metar)\n    (metar_data, metar_units) = emiz.avwx.metar.parse_in(metar)\n    speech = emiz.avwx.speech.metar(metar_data, metar_units)\n    speech = str(speech).replace('Altimeter', 'Q N H')\n    LOGGER.debug('resulting speech: %s', speech)\n    return speech", "docstring": "Creates a speakable text from a METAR\n\nArgs:\nmetar: METAR string to use\n\nReturns: speakable METAR for TTS", "source": "codesearchnet"}
{"code": "def title_of_design_condition(self, value=None):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type str '\n                    'for field `title_of_design_condition`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `title_of_design_condition`')\n\n        self._title_of_design_condition = value", "docstring": "Corresponds to IDD Field `title_of_design_condition`\n\nArgs:\nvalue (str): value for IDD Field `title_of_design_condition`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def __init__(self, context=None):\n    \n    if context is None:\n      context = google.datalab.Context.default()\n    self._context = context\n    self._api = _api.Api(context)\n    self._project_id = context.project_id if context else self._api.project_id", "docstring": "Initializes an instance of a BucketList.\n\nArgs:\ncontext: an optional Context object providing project_id and credentials. If a specific\nproject id or credentials are unspecified, the default ones configured at the global\nlevel are used.", "source": "juraj-google-style"}
{"code": "def _MergeOptional(self, a, b):\n    \n    if a and b:\n      if a != b:\n        raise MergeError(\"values must be identical if both specified \"\n                         \"('%s' vs '%s')\" % (transitfeed.EncodeUnicode(a),\n                                             transitfeed.EncodeUnicode(b)))\n    return a or b", "docstring": "Tries to merge two values which may be None.\n\nIf both values are not None, they are required to be the same and the\nmerge is trivial. If one of the values is None and the other is not None,\nthe merge results in the one which is not None. If both are None, the merge\nresults in None.\n\nArgs:\na: The first value.\nb: The second value.\n\nReturns:\nThe merged value.\n\nRaises:\nMergeError: If both values are not None and are not the same.", "source": "juraj-google-style"}
{"code": "def list_events(self):\n    event_dir_dict = collections.defaultdict(set)\n    for event_file in self._glob_events_files(self._paths, recursive=True):\n        dir = os.path.dirname(event_file)\n        try:\n            for record in tf_record.tf_record_iterator(event_file):\n                event = event_pb2.Event.FromString(record)\n                if ((event.summary is None) or (event.summary.value is None)):\n                    continue\n                for value in event.summary.value:\n                    if ((value.simple_value is None) or (value.tag is None)):\n                        continue\n                    event_dir_dict[value.tag].add(dir)\n        except tf.errors.DataLossError:\n            continue\n    return dict(event_dir_dict)", "docstring": "List all scalar events in the directory.\n\nReturns:\nA dictionary. Key is the name of a event. Value is a set of dirs that contain that event.", "source": "codesearchnet"}
{"code": "def get_options_from_str(obj_str, **kwargs):\n    \n    if isinstance(obj_str, list):\n        return obj_str\n    try:\n        obj = get_obj_frm_str(obj_str, **kwargs)\n        if obj:\n            return list(obj)\n    except AttributeError:\n        pass\n    return []", "docstring": "Returns a list of options from a python object string\n\nargs:\nobj_str: python list of options or a python object path\nExample: \"rdfframework.connections.ConnManager[{param1}]\"\n\nkwargs:\n* kwargs used to format the 'obj_str'", "source": "juraj-google-style"}
{"code": "def _set_subject(self, subject):\n\n    def test_uri(value):\n        ' test to see if the value is a uri or bnode\\n\\n            Returns: Uri or Bnode '\n        if (not isinstance(value, (Uri, BlankNode))):\n            try:\n                if value.startswith('_:'):\n                    return BlankNode(value)\n                else:\n                    return Uri(value)\n            except:\n                return BlankNode()\n        else:\n            return value\n    if isinstance(subject, dict):\n        self.subject = test_uri(subject['s'])\n        if isinstance(subject['o'], list):\n            for item in subject['o']:\n                self.add_property(subject['p'], item)\n        else:\n            self.add_property(subject['p'], subject['o'])\n    else:\n        self.subject = test_uri(subject)", "docstring": "sets the subject value for the class instance\n\nArgs:\nsubject(dict, Uri, str): the subject for the class instance", "source": "codesearchnet"}
{"code": "def is_subtype_of(self, other):\n    try:\n        self.sanity_check_type(other)\n        nest.assert_same_structure(self._element_spec, other._element_spec)\n    except (TypeError, ValueError):\n        return False\n    self_elements = nest.flatten(self._element_spec)\n    other_elements = nest.flatten(other._element_spec)\n    return all((self_element.is_subtype_of(other_element) for self_element, other_element in zip(self_elements, other_elements)))", "docstring": "Returns True if `self` is subtype of `other`.\n\nArgs:\nother: A `TypeSpec`.", "source": "github-repos"}
{"code": "class InstructBlipEncoder(nn.Module):\n\n    def __init__(self, config: InstructBlipConfig):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([InstructBlipEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for idx, encoder_layer in enumerate(self.layers):\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`InstructBlipEncoderLayer`].\n\nArgs:\nconfig (`InstructBlipConfig`):\nThe corresponding vision configuration for the `InstructBlipEncoder`.", "source": "github-repos"}
{"code": "def CheckKeyCompatibility(cls, key_path):\n    key_path_upper = key_path.upper()\n    for key_path_prefix in cls._COMPATIBLE_REGISTRY_KEY_PATH_PREFIXES:\n        if key_path_upper.startswith(key_path_prefix):\n            return True\n    logger.warning('Key path: \"{0:s}\" is currently not supported'.format(key_path))\n    return False", "docstring": "Checks if a Windows Registry key path is supported by dfWinReg.\n\nArgs:\nkey_path (str): path of the Windows Registry key.\n\nReturns:\nbool: True if key is compatible or False if not.", "source": "codesearchnet"}
{"code": "def addFixedEffect(self, F=None, A=None, Ftest=None):\n        \n        if A is None:\n            A = sp.eye(self.P)\n        if F is None:\n            F = sp.ones((self.N,1))\n            if self.Ntest is not None:\n                Ftest = sp.ones((self.Ntest,1))\n\n        assert A.shape[1]==self.P, 'VarianceDecomposition:: A has incompatible shape'\n        assert F.shape[0]==self.N, 'VarianceDecimposition:: F has incompatible shape'\n\n        if Ftest is not None:\n            assert self.Ntest is not None, 'VarianceDecomposition:: specify Ntest for predictions (method VarianceDecomposition::setTestSampleSize)'\n            assert Ftest.shape[0]==self.Ntest, 'VarianceDecimposition:: Ftest has incompatible shape'\n            assert Ftest.shape[1]==F.shape[1], 'VarianceDecimposition:: Ftest has incompatible shape'\n\n        \n        self.sample_designs.append(F)\n        self.sample_test_designs.append(Ftest)\n        self.trait_designs.append(A)\n \n        self._desync()", "docstring": "add fixed effect term to the model\n\nArgs:\nF:     sample design matrix for the fixed effect [N,K]\nA:     trait design matrix for the fixed effect (e.g. sp.ones((1,P)) common effect; sp.eye(P) any effect) [L,P]\nFtest: sample design matrix for test samples [Ntest,K]", "source": "juraj-google-style"}
{"code": "def __init__(self, _args):\n        \n\n        super(TcExProfile, self).__init__(_args)\n\n        \n        self._input_permutations = []\n        self._output_permutations = []\n        self.data_dir = os.path.join(self.args.outdir, 'data')\n        self.profile_dir = os.path.join(self.args.outdir, 'profiles')\n        self.profiles = {}", "docstring": "Initialize Class properties.\n\nArgs:\n_args (namespace): The argparser args Namespace.", "source": "juraj-google-style"}
{"code": "def get(self, uid: int) -> FrozenSet[Flag]:\n    recent = (_recent_set if (uid in self._recent) else frozenset())\n    flags = self._flags.get(uid)\n    return (recent if (flags is None) else (flags | recent))", "docstring": "Return the session flags for the mailbox session.\n\nArgs:\nuid: The message UID value.", "source": "codesearchnet"}
{"code": "def filter(self, limit=None, to=None, category=None):\n        \n        if category and not to:\n            msg_slice = itertools.islice((x for x in self.store if x[2] == category), limit)\n        elif to and not category:\n            to = JID.fromstr(to)\n            msg_slice = itertools.islice((x for x in self.store if _agent_in_msg(to, x[1])), limit)\n        elif to and category:\n            to = JID.fromstr(to)\n            msg_slice = itertools.islice((x for x in self.store if _agent_in_msg(to, x[1]) and x[2] == category), limit)\n        else:\n            msg_slice = self.all(limit=limit)\n            return msg_slice\n\n        return list(msg_slice)[::-1]", "docstring": "Returns the events that match the filters\n\nArgs:\nlimit (int, optional): the max length of the events to return (Default value = None)\nto (str, optional): only events that have been sent or received by 'to' (Default value = None)\ncategory (str, optional): only events belonging to the category (Default value = None)\n\nReturns:\nlist: a list of filtered events", "source": "juraj-google-style"}
{"code": "def _save_tensor_value_to_cache_op(self, cache_idx, updates, graph):\n    updates = self._merge_tensor_signatures(updates)\n    updates = array_ops.reshape(updates, [1, self._num_signature_dimensions()])\n    indices = constant_op.constant([cache_idx])\n    cache = self._create_or_get_tensor_values_cache(_TT_SUMMARY_TAG, graph)\n    return state_ops.scatter_update(cache, indices, updates).op", "docstring": "Returns an op that will save the given updates to an entry in the cache.\n\nArgs:\ncache_idx: The cache index of the tensor within the cache.\nupdates: A dictionary of the signature updates.\ngraph: A TensorFlow graph.\nReturns:\nCache update operation.", "source": "github-repos"}
{"code": "def forward_loss(self, pixel_values, pred, mask, interpolate_pos_encoding: bool=False):\n    target = self.patchify(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)\n    if self.config.norm_pix_loss:\n        mean = tf.reduce_mean(target, axis=-1, keepdims=True)\n        var = tf.math.reduce_variance(target, axis=-1, keepdims=True)\n        target = (target - mean) / (var + 1e-06) ** 0.5\n    loss = (pred - target) ** 2\n    loss = tf.reduce_mean(loss, axis=-1)\n    loss = tf.reduce_sum(loss * mask) / tf.reduce_sum(mask)\n    loss = tf.reshape(loss, (1,))\n    return loss", "docstring": "Args:\npixel_values (`tf.Tensor` of shape `(batch_size, height, width, num_channels)`):\nPixel values.\npred (`tf.Tensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`:\nPredicted pixel values.\nmask (`tf.Tensor` of shape `(batch_size, sequence_length)`):\nTensor indicating which patches are masked (1) and which are not (0).\ninterpolate_pos_encoding (`bool`, *optional*, default `False`):\ninterpolation flag passed during the forward pass.\n\nReturns:\n`tf.Tensor`: Pixel reconstruction loss.", "source": "github-repos"}
{"code": "def add_vlan_int(self, vlan_id):\n    config = ET.Element('config')\n    vlinterface = ET.SubElement(config, 'interface-vlan', xmlns='urn:brocade.com:mgmt:brocade-interface')\n    interface = ET.SubElement(vlinterface, 'interface')\n    vlan = ET.SubElement(interface, 'vlan')\n    name = ET.SubElement(vlan, 'name')\n    name.text = vlan_id\n    try:\n        self._callback(config)\n        return True\n    except Exception as error:\n        logging.error(error)\n        return False", "docstring": "Add VLAN Interface. VLAN interfaces are required for VLANs even when\nnot wanting to use the interface for any L3 features.\n\nArgs:\nvlan_id: ID for the VLAN interface being created. Value of 2-4096.\n\nReturns:\nTrue if command completes successfully or False if not.\n\nRaises:\nNone", "source": "codesearchnet"}
{"code": "def get_num_bytes(self, batch: Sequence[scipy.sparse.csr_matrix]) -> int:\n    return sum((sys.getsizeof(element) for element in batch))", "docstring": "Returns:\nThe number of bytes of data for a batch.", "source": "github-repos"}
{"code": "def change_wavelength(self, wavelength):\n        \n        for axis in self.axes:\n            if issubclass(type(axis), Slabs):\n                axis.change_wavelength(wavelength)\n        self.xx, self.xy, self.yx, self.yy, self.zz = self.axes\n        self._wl = wavelength", "docstring": "Changes the wavelength of the structure.\n\nThis will affect the mode solver and potentially\nthe refractive indices used (provided functions\nwere provided as refractive indices).\n\nArgs:\nwavelength (float): The new wavelength.", "source": "juraj-google-style"}
{"code": "def setup_components_and_tf_funcs(self, custom_getter=None):\n        \n\n        if custom_getter is None:\n            def custom_getter(getter, name, registered=False, **kwargs):\n                \n                if registered:\n                    self.registered_variables.add(name)\n                elif name in self.registered_variables:\n                    registered = True\n                \n                variable = getter(name=name, **kwargs)\n                if registered:\n                    pass\n                elif name in self.all_variables:\n                    assert variable is self.all_variables[name]\n                    if kwargs.get('trainable', True):\n                        assert variable is self.variables[name]\n                        if 'variables' in self.summary_labels:\n                            tf.contrib.summary.histogram(name=name, tensor=variable)\n                else:\n                    self.all_variables[name] = variable\n                    if kwargs.get('trainable', True):\n                        self.variables[name] = variable\n                        if 'variables' in self.summary_labels:\n                            tf.contrib.summary.histogram(name=name, tensor=variable)\n                return variable\n\n        self.fn_initialize = tf.make_template(\n            name_='initialize',\n            func_=self.tf_initialize,\n            custom_getter_=custom_getter\n        )\n        self.fn_preprocess = tf.make_template(\n            name_='preprocess',\n            func_=self.tf_preprocess,\n            custom_getter_=custom_getter\n        )\n        self.fn_actions_and_internals = tf.make_template(\n            name_='actions-and-internals',\n            func_=self.tf_actions_and_internals,\n            custom_getter_=custom_getter\n        )\n        self.fn_observe_timestep = tf.make_template(\n            name_='observe-timestep',\n            func_=self.tf_observe_timestep,\n            custom_getter_=custom_getter\n        )\n        self.fn_action_exploration = tf.make_template(\n            name_='action-exploration',\n            func_=self.tf_action_exploration,\n            custom_getter_=custom_getter\n        )\n\n        return custom_getter", "docstring": "Allows child models to create model's component objects, such as optimizer(s), memory(s), etc..\nCreates all tensorflow functions via tf.make_template calls on all the class' \"tf_\"-methods.\n\nArgs:\ncustom_getter: The `custom_getter_` object to use for `tf.make_template` when creating TensorFlow functions.\nIf None, use a default custom_getter_.\n\nReturns: The custom_getter passed in (or a default one if custom_getter was None).", "source": "juraj-google-style"}
{"code": "def onTagDel(self, name, func):\n        \n        if '*' in name:\n            self.ontagdelglobs.add(name, func)\n        else:\n            self.ontagdels[name].append(func)", "docstring": "Register a callback for tag deletion.\n\nArgs:\nname (str): The name of the tag or tag glob.\nfunc (function): The callback func(node, tagname, tagval).", "source": "juraj-google-style"}
{"code": "def get_course_and_course_run(self, course_run_id):\n        \n        \n        course_id = parse_course_key(course_run_id)\n        \n        course = self.get_course_details(course_id)\n\n        course_run = None\n        if course:\n            \n            course_run = None\n            course_runs = [course_run for course_run in course['course_runs'] if course_run['key'] == course_run_id]\n            if course_runs:\n                course_run = course_runs[0]\n\n        return course, course_run", "docstring": "Return the course and course run metadata for the given course run ID.\n\nArguments:\ncourse_run_id (str): The course run ID.\n\nReturns:\ntuple: The course metadata and the course run metadata.", "source": "juraj-google-style"}
{"code": "def _find_penultimate_layer(model, layer_idx, penultimate_layer_idx):\n    \n    if penultimate_layer_idx is None:\n        for idx, layer in utils.reverse_enumerate(model.layers[:layer_idx - 1]):\n            if isinstance(layer, Wrapper):\n                layer = layer.layer\n            if isinstance(layer, (_Conv, _Pooling1D, _Pooling2D, _Pooling3D)):\n                penultimate_layer_idx = idx\n                break\n\n    if penultimate_layer_idx is None:\n        raise ValueError('Unable to determine penultimate `Conv` or `Pooling` '\n                         'layer for layer_idx: {}'.format(layer_idx))\n\n    \n    if layer_idx < 0:\n        layer_idx = len(model.layers) + layer_idx\n    if penultimate_layer_idx > layer_idx:\n        raise ValueError('`penultimate_layer_idx` needs to be before `layer_idx`')\n\n    return model.layers[penultimate_layer_idx]", "docstring": "Searches for the nearest penultimate `Conv` or `Pooling` layer.\n\nArgs:\nmodel: The `keras.models.Model` instance.\nlayer_idx: The layer index within `model.layers`.\npenultimate_layer_idx: The pre-layer to `layer_idx`. If set to None, the nearest penultimate\n`Conv` or `Pooling` layer is used.\n\nReturns:\nThe penultimate layer.", "source": "juraj-google-style"}
{"code": "def from_text_vision_configs(cls, text_config: Siglip2TextConfig, vision_config: Siglip2VisionConfig, **kwargs):\n    return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`Siglip2Config`] (or a derived class) from siglip2 text model configuration and siglip2 vision\nmodel configuration.\n\nReturns:\n[`Siglip2Config`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def grab_data(self, f_start=None, f_stop=None, t_start=None, t_stop=None, if_id=0):\n    self.freqs = self.populate_freqs()\n    self.timestamps = self.populate_timestamps()\n    if (f_start is None):\n        f_start = self.freqs[0]\n    if (f_stop is None):\n        f_stop = self.freqs[(- 1)]\n    i0 = np.argmin(np.abs((self.freqs - f_start)))\n    i1 = np.argmin(np.abs((self.freqs - f_stop)))\n    if (i0 < i1):\n        plot_f = self.freqs[i0:(i1 + 1)]\n        plot_data = np.squeeze(self.data[(t_start:t_stop, ..., i0:(i1 + 1))])\n    else:\n        plot_f = self.freqs[i1:(i0 + 1)]\n        plot_data = np.squeeze(self.data[(t_start:t_stop, ..., i1:(i0 + 1))])\n    return (plot_f, plot_data)", "docstring": "Extract a portion of data by frequency range.\n\nArgs:\nf_start (float): start frequency in MHz\nf_stop (float): stop frequency in MHz\nif_id (int): IF input identification (req. when multiple IFs in file)\n\nReturns:\n(freqs, data) (np.arrays): frequency axis in MHz and data subset", "source": "codesearchnet"}
{"code": "def _ConvertAttributeValueToDict(cls, attribute_value):\n    if isinstance(attribute_value, py2to3.BYTES_TYPE):\n        encoded_value = binascii.b2a_qp(attribute_value)\n        encoded_value = codecs.decode(encoded_value, 'ascii')\n        attribute_value = {'__type__': 'bytes', 'stream': '{0:s}'.format(encoded_value)}\n    elif isinstance(attribute_value, (list, tuple)):\n        json_list = []\n        for list_element in attribute_value:\n            json_dict = cls._ConvertAttributeValueToDict(list_element)\n            json_list.append(json_dict)\n        if isinstance(attribute_value, list):\n            attribute_value = json_list\n        else:\n            attribute_value = {'__type__': 'tuple', 'values': json_list}\n    elif isinstance(attribute_value, collections.Counter):\n        attribute_value = cls._ConvertCollectionsCounterToDict(attribute_value)\n    elif isinstance(attribute_value, dfvfs_path_spec.PathSpec):\n        attribute_value = cls._ConvertPathSpecToDict(attribute_value)\n    elif isinstance(attribute_value, containers_interface.AttributeContainer):\n        attribute_value = cls._ConvertAttributeContainerToDict(attribute_value)\n    return attribute_value", "docstring": "Converts an attribute value into a JSON dictionary.\n\nArgs:\nattribute_value (object): an attribute value.\n\nReturns:\ndict|list: The JSON serialized object which can be a dictionary or a list.", "source": "codesearchnet"}
{"code": "def AddClass(self, class_name, gtfs_class):\n    if (class_name in self._class_mapping):\n        raise problems.DuplicateMapping(class_name)\n    self._class_mapping[class_name] = gtfs_class", "docstring": "Adds an entry to the list of known classes.\n\nArgs:\nclass_name: A string with name through which gtfs_class is to be made\naccessible.\ngtfs_class: The class to be added.\nRaises:\nDuplicateMapping if class_name is already present in the class mapping.", "source": "codesearchnet"}
{"code": "def argument_parser(args):\n    parser = argparse.ArgumentParser(prog='nagaram', description='Finds Scabble anagrams.', formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False)\n    parser.add_argument('-h', '--help', dest='help', action='store_true', default=False)\n    parser.add_argument('--sowpods', dest='sowpods', action='store_true', default=False)\n    parser.add_argument('--length', '-l', dest='length', action='store_true', default=False)\n    parser.add_argument('--starts-with', '-s', dest='starts_with', metavar='chars', default='', nargs=1, type=str)\n    parser.add_argument('--ends-with', '-e', dest='ends_with', metavar='chars', default='', nargs=1, type=str)\n    parser.add_argument('--version', '-v', action='version', version='Nagaram {0} (Released: {1})'.format(nagaram.__version__, nagaram.__release_date__))\n    parser.add_argument(dest='wordlist', metavar='letters to find anagrams with (? for anything, _ for blanks)', nargs=argparse.REMAINDER)\n    settings = parser.parse_args(args)\n    if settings.help:\n        raise SystemExit(nagaram.__doc__.strip())\n    if (not settings.wordlist):\n        raise SystemExit(parser.print_usage())\n    if settings.starts_with:\n        settings.starts_with = settings.starts_with[0]\n    if settings.ends_with:\n        settings.ends_with = settings.ends_with[0]\n    return (settings.wordlist, settings.sowpods, settings.length, settings.starts_with, settings.ends_with)", "docstring": "Argparse logic, command line options.\n\nArgs:\nargs: sys.argv[1:], everything passed to the program after its name\n\nReturns:\nA tuple of:\na list of words/letters to search\na boolean to declare if we want to use the sowpods words file\na boolean to declare if we want to output anagrams by length\na string of starting characters to find anagrams based on\na string of ending characters to find anagrams based on\n\nRaises:\nSystemExit if the user passes invalid arguments, --version or --help", "source": "codesearchnet"}
{"code": "def get_weights(model_hparams, vocab_size, hidden_dim=None):\n  \n  if hidden_dim is None:\n    hidden_dim = model_hparams.hidden_size\n  num_shards = model_hparams.symbol_modality_num_shards\n  shards = []\n  for i in range(num_shards):\n    shard_size = (vocab_size \n        1 if i < vocab_size % num_shards else 0)\n    var_name = \"weights_%d\" % i\n    shards.append(\n        tf.get_variable(\n            var_name, [shard_size, hidden_dim],\n            initializer=tf.random_normal_initializer(0.0, hidden_dim**-0.5)))\n  if num_shards == 1:\n    ret = shards[0]\n  else:\n    ret = tf.concat(shards, 0)\n  \n  if not tf.executing_eagerly():\n    ret = common_layers.convert_gradient_to_tensor(ret)\n  return ret", "docstring": "Create or get concatenated embedding or softmax variable.\n\nArgs:\nmodel_hparams: HParams, model hyperparmeters.\nvocab_size: int, vocabulary size.\nhidden_dim: dim of the variable. Defaults to _model_hparams' hidden_size\n\nReturns:\na list of num_shards Tensors.", "source": "juraj-google-style"}
{"code": "def _check_job_status(self, job, desc, status_key_name):\n    status = desc[status_key_name]\n    status = _STATUS_CODE_TABLE.get(status, status)\n    if ((status != 'Completed') and (status != 'Stopped')):\n        reason = desc.get('FailureReason', '(No reason provided)')\n        job_type = status_key_name.replace('JobStatus', ' job')\n        raise ValueError('Error for {} {}: {} Reason: {}'.format(job_type, job, status, reason))", "docstring": "Check to see if the job completed successfully and, if not, construct and\nraise a ValueError.\n\nArgs:\njob (str): The name of the job to check.\ndesc (dict[str, str]): The result of ``describe_training_job()``.\nstatus_key_name (str): Status key name to check for.\n\nRaises:\nValueError: If the training job fails.", "source": "codesearchnet"}
{"code": "def detect_extracellular_compartment(model):\n    extracellular_key = Counter()\n    for reaction in model.reactions:\n        equation = reaction.equation\n        if (equation is None):\n            continue\n        if (len(equation.compounds) == 1):\n            (compound, _) = equation.compounds[0]\n            compartment = compound.compartment\n            extracellular_key[compartment] += 1\n    if (len(extracellular_key) == 0):\n        return None\n    else:\n        (best_key, _) = extracellular_key.most_common(1)[0]\n    logger.info('{} is extracellular compartment'.format(best_key))\n    return best_key", "docstring": "Detect the identifier for equations with extracellular compartments.\n\nArgs:\nmodel: :class:`NativeModel`.", "source": "codesearchnet"}
{"code": "def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error):\n    line = clean_lines.elided[linenum]\n    match = Match('^(.*)\\\\[\\\\s*(?:=|&[^\\\\w])', line)\n    if match:\n        (line, _, pos) = CloseExpression(clean_lines, linenum, len(match.group(1)))\n        if ((pos >= 0) and Match('^\\\\s*[{(]', line[pos:])):\n            error(filename, linenum, 'build/c++11', 4, 'Default lambda captures are an unapproved C++ feature.')", "docstring": "Check that default lambda captures are not used.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def GetNames(cls):\n    names = []\n    for plugin_class in cls._plugins.values():\n        name = getattr(plugin_class, 'ARTIFACT_DEFINITION_NAME', None)\n        if name:\n            names.append(name)\n    return names", "docstring": "Retrieves the names of the registered artifact definitions.\n\nReturns:\nlist[str]: registered artifact definitions names.", "source": "codesearchnet"}
{"code": "def _WriteFileEntry(self, file_entry, data_stream_name, destination_file):\n    source_file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)\n    if (not source_file_object):\n        return\n    try:\n        with open(destination_file, 'wb') as destination_file_object:\n            source_file_object.seek(0, os.SEEK_SET)\n            data = source_file_object.read(self._COPY_BUFFER_SIZE)\n            while data:\n                destination_file_object.write(data)\n                data = source_file_object.read(self._COPY_BUFFER_SIZE)\n    finally:\n        source_file_object.close()", "docstring": "Writes the contents of the source file entry to a destination file.\n\nNote that this function will overwrite an existing file.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry whose content is to be written.\ndata_stream_name (str): name of the data stream whose content is to be\nwritten.\ndestination_file (str): path of the destination file.", "source": "codesearchnet"}
{"code": "def pop_callback(self, callback):\n    return self.callback_handler.pop_callback(callback)", "docstring": "Remove a callback from the current list of [`~transformers.TrainerCallback`] and returns it.\n\nIf the callback is not found, returns `None` (and no error is raised).\n\nArgs:\ncallback (`type` or [`~transformers.TrainerCallback]`):\nA [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the\nfirst case, will pop the first member of that class found in the list of callbacks.\n\nReturns:\n[`~transformers.TrainerCallback`]: The callback removed, if found.", "source": "github-repos"}
{"code": "def log2(value: types.FloatTensor) -> types.FloatTensor:\n    return tf.math.log(value) / tf.math.log(tf.constant(2, dtype=value.dtype))", "docstring": "Returns the point-wise base-2 logarithm a given `Tensor`.\n\n```python\nimport tensorflow as tf\nimport tf_quant_finance as tff\n\n# Example: Computing the base-2 logarithm of a given vector.\n\ntff.math.qmc.utils.log2(tf.constant([1, 2, 4, 8, 16], dtype=tf.float32))\n# ==> tf.Tensor([0., 1., 2., 3., 4.], shape=(5,), dtype=float32)\n```\n\nArgs:\nvalue: Positive scalar `Tensor` of real values.\n\nReturns:\n`Tensor` with the same `shape` and `dtype` as `value` equal to `ln(value) /\nln(2)`.", "source": "github-repos"}
{"code": "def _build_job_meta(cls, job_dir):\n        \n        meta_file = os.path.join(job_dir, JOB_META_FILE)\n        meta = parse_json(meta_file)\n\n        if not meta:\n            job_name = job_dir.split(\"/\")[-1]\n            user = os.environ.get(\"USER\", None)\n            meta = {\n                \"job_id\": job_name,\n                \"job_name\": job_name,\n                \"user\": user,\n                \"type\": \"ray\",\n                \"start_time\": os.path.getctime(job_dir),\n                \"end_time\": None,\n                \"best_trial_id\": None,\n            }\n\n        if meta.get(\"start_time\", None):\n            meta[\"start_time\"] = timestamp2date(meta[\"start_time\"])\n\n        return meta", "docstring": "Build meta file for job.\n\nArgs:\njob_dir (str): Directory path of the job.\n\nReturn:\nA dict of job meta info.", "source": "juraj-google-style"}
{"code": "def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):\n    agg_grads = []\n    num_devices = len(avail_devices)\n    group_size = num_devices \n    for i, single_grads in enumerate(zip(*replica_grads)):\n        group_0_main_device = i % num_devices\n        group_1_main_device = (group_0_main_device + group_size) % num_devices\n        if group_0_main_device < group_size:\n            group_0_begin = 0\n            group_1_begin = group_size\n        else:\n            group_0_begin = group_size\n            group_1_begin = 0\n        group_0_device_grads = single_grads[group_0_begin:group_0_begin + group_size]\n        with ops.device(avail_devices[group_0_main_device]):\n            group_0_agg_grads, _ = aggregate_single_gradient_using_copy(group_0_device_grads, False, False)\n        group_1_device_grads = single_grads[group_1_begin:group_1_begin + group_size]\n        with ops.device(avail_devices[group_1_main_device]):\n            group_1_agg_grads, _ = aggregate_single_gradient_using_copy(group_1_device_grads, False, False)\n        with ops.device(avail_devices[group_0_main_device]):\n            (agg_total_grads, _), _ = aggregate_single_gradient_using_copy([group_0_agg_grads, group_1_agg_grads], False, False)\n        with ops.device(avail_devices[group_0_main_device]):\n            group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)\n        with ops.device(avail_devices[group_1_main_device]):\n            group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)\n        agg_grads_bcast = []\n        for j in range(len(single_grads)):\n            with ops.device(avail_devices[j]):\n                if (group_0_main_device < group_size) == (j < group_size):\n                    src_device_grad = group_0_agg_grads_bcast\n                else:\n                    src_device_grad = group_1_agg_grads_bcast\n                agg_grads_bcast.append(array_ops.identity(src_device_grad))\n        agg_grads.append([(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])\n    agg_grads = list(zip(*agg_grads))\n    return agg_grads", "docstring": "Aggregate gradients using hierarchical copies.\n\nArgs:\navail_devices: available GPU devices.\nreplica_grads: List of lists of (gradient, variable) tuples. The outer list\nis over replicas. The inner list is over individual gradients.\n\nReturns:\nThe list of (aggregated_gradient, variable), where the gradient has been\nsummed across all replicas and the variable is chosen from the first\nreplica.", "source": "github-repos"}
{"code": "def attachment_to_multidim_measurement(attachment, name=None):\n    data = json.loads(attachment.data)\n    name = (name or data.get('name'))\n    attachment_dims = data.get('dimensions', [])\n    attachment_values = data.get('value')\n    attachment_outcome_str = data.get('outcome')\n    if (attachment_outcome_str not in TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME):\n        try:\n            attachment_outcome_str = test_runs_pb2.Status.Name(int(attachment_outcome_str))\n        except ValueError:\n            attachment_outcome_str = None\n    outcome = TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME.get(attachment_outcome_str)\n    _lazy_load_units_by_code()\n    dims = []\n    for d in attachment_dims:\n        unit = UNITS_BY_CODE.get(d.get('uom_code'), units.NONE)\n        description = d.get('name', '')\n        dims.append(measurements.Dimension(description=description, unit=unit))\n    if (attachment_values and (len(dims) == len(attachment_values[0]))):\n        units_ = dims[(- 1)].unit\n        dimensions = dims[:(- 1)]\n    else:\n        units_ = None\n        dimensions = dims\n    measured_value = measurements.DimensionedMeasuredValue(name=name, num_dimensions=len(dimensions))\n    for row in attachment_values:\n        coordinates = tuple(row[:(- 1)])\n        val = row[(- 1)]\n        measured_value[coordinates] = val\n    measurement = measurements.Measurement(name=name, units=units_, dimensions=tuple(dimensions), measured_value=measured_value, outcome=outcome)\n    return measurement", "docstring": "Convert an OpenHTF test record attachment to a multi-dim measurement.\n\nThis is a best effort attempt to reverse, as some data is lost in converting\nfrom a multidim to an attachment.\n\nArgs:\nattachment: an `openhtf.test_record.Attachment` from a multi-dim.\nname: an optional name for the measurement.  If not provided will use the\nname included in the attachment.\n\nReturns:\nAn multi-dim `openhtf.Measurement`.", "source": "codesearchnet"}
{"code": "def save_spectre_plot(self, filename=\"spectre.pdf\", img_format=\"pdf\",\n                          sigma=0.05, step=0.01):\n        \n        d, plt = self.get_spectre_plot(sigma, step)\n        plt.savefig(filename, format=img_format)", "docstring": "Save matplotlib plot of the spectre to a file.\n\nArgs:\nfilename: Filename to write to.\nimg_format: Image format to use. Defaults to EPS.\nsigma: Full width at half maximum in eV for normal functions.\nstep: bin interval in eV", "source": "juraj-google-style"}
{"code": "def _remove_squeezable_dimensions(predictions, labels, weights):\n    predictions = ops.convert_to_tensor(predictions)\n    if labels is not None:\n        labels, predictions = confusion_matrix.remove_squeezable_dimensions(labels, predictions)\n        predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n    if weights is None:\n        return (predictions, labels, None)\n    weights = ops.convert_to_tensor(weights)\n    weights_shape = weights.get_shape()\n    weights_rank = weights_shape.ndims\n    if weights_rank == 0:\n        return (predictions, labels, weights)\n    predictions_shape = predictions.get_shape()\n    predictions_rank = predictions_shape.ndims\n    if predictions_rank is not None and weights_rank is not None:\n        if weights_rank - predictions_rank == 1:\n            weights = array_ops.squeeze(weights, [-1])\n        elif predictions_rank - weights_rank == 1:\n            weights = array_ops.expand_dims(weights, [-1])\n    else:\n        weights_rank_tensor = array_ops.rank(weights)\n        rank_diff = weights_rank_tensor - array_ops.rank(predictions)\n\n        def _maybe_expand_weights():\n            return cond.cond(math_ops.equal(rank_diff, -1), lambda: array_ops.expand_dims(weights, [-1]), lambda: weights)\n        if weights_rank is not None and (not weights_shape.dims[-1].is_compatible_with(1)):\n            maybe_squeeze_weights = lambda: weights\n        else:\n            maybe_squeeze_weights = lambda: array_ops.squeeze(weights, [-1])\n\n        def _maybe_adjust_weights():\n            return cond.cond(math_ops.equal(rank_diff, 1), maybe_squeeze_weights, _maybe_expand_weights)\n        weights = cond.cond(math_ops.equal(weights_rank_tensor, 0), lambda: weights, _maybe_adjust_weights)\n    return (predictions, labels, weights)", "docstring": "Squeeze or expand last dim if needed.\n\nSqueezes last dim of `predictions` or `labels` if their rank differs by 1\n(using confusion_matrix.remove_squeezable_dimensions).\nSqueezes or expands last dim of `weights` if its rank differs by 1 from the\nnew rank of `predictions`.\n\nIf `weights` is scalar, it is kept scalar.\n\nThis will use static shape if available. Otherwise, it will add graph\noperations, which could result in a performance hit.\n\nArgs:\npredictions: Predicted values, a `Tensor` of arbitrary dimensions.\nlabels: Optional label `Tensor` whose dimensions match `predictions`.\nweights: Optional weight scalar or `Tensor` whose dimensions match\n`predictions`.\n\nReturns:\nTuple of `predictions`, `labels` and `weights`. Each of them possibly has\nthe last dimension squeezed, `weights` could be extended by one dimension.", "source": "github-repos"}
{"code": "def _make_request(self, method, path, data=None, **kwargs):\n        \n        _logger.debug(\"Method for request is %s\" % method)\n        url = self._construct_full_url(path)\n        _logger.debug(\"URL for request is %s\" % url)\n        self._auth_info.populate_request_data(kwargs)\n        _logger.debug(\"The arguments are %s\" % kwargs)\n\n        \n        if self._auth_info._headers:\n            kwargs.setdefault('headers', {}).update(self._auth_info._headers)\n\n        res = requests.request(method, url, data=data, **kwargs)\n\n        if res.ok:\n            _logger.debug(\"Request was successful.\")\n            return res.content.decode('utf-8')\n\n        if hasattr(res, 'content'):\n            _logger.debug(\"Response was %s:%s\", res.status_code, res.content)\n            raise self._exception_for(res.status_code)(\n                res.content, http_code=res.status_code\n            )\n        else:\n            msg = \"No response from URL: %s\" % res.request.url\n            _logger.error(msg)\n            raise NoResponseError(msg)", "docstring": "Make a request.\n\nUse the `requests` module to actually perform the request.\n\nArgs:\n`method`: The method to use.\n`path`: The path to the resource.\n`data`: Any data to send (for POST and PUT requests).\n`kwargs`: Other parameters for `requests`.\nReturns:\nThe content of the response.\nRaises:\nAn exception depending on the HTTP status code of the response.", "source": "juraj-google-style"}
{"code": "class AverageScore(ScoreAggregation):\n\n    def __init__(self, **kwargs):\n        super().__init__(agg_func=statistics.mean, **kwargs)", "docstring": "Aggregates anomaly scores by calculating their average.\n\nThis `AggregationFn` computes the average of the anomaly scores from a\ncollection of `AnomalyPrediction` objects.\n\nArgs:\n**kwargs: Additional keyword arguments to pass to the base\n`ScoreAggregation` class.", "source": "github-repos"}
{"code": "def decode(byte_str, allow_none=False):\n    if ((byte_str is None) and allow_none):\n        return ''\n    if (not isinstance(byte_str, bytes)):\n        raise ValueError('The argument {} must be a bytes object.'.format(byte_str))\n    if (sys.version_info >= (3, 0)):\n        return byte_str.decode('ascii')\n    else:\n        return byte_str", "docstring": "Make this unicode in Python 3, otherwise leave it as bytes.\n\nArgs:\nbyte_str: The byte string to decode.\nallow_none: If true, then we will allow byte_str to be None in which\ncase we will return an empty string. TODO(rkn): Remove this flag.\nThis is only here to simplify upgrading to flatbuffers 1.10.0.\n\nReturns:\nA byte string in Python 2 and a unicode string in Python 3.", "source": "codesearchnet"}
{"code": "class StableDropout(nn.Module):\n\n    def __init__(self, drop_prob):\n        super().__init__()\n        self.drop_prob = drop_prob\n        self.count = 0\n        self.context_stack = None\n\n    def forward(self, x):\n        \n        if self.training and self.drop_prob > 0:\n            return XDropout.apply(x, self.get_context())\n        return x\n\n    def clear_context(self):\n        self.count = 0\n        self.context_stack = None\n\n    def init_context(self, reuse_mask=True, scale=1):\n        if self.context_stack is None:\n            self.context_stack = []\n        self.count = 0\n        for c in self.context_stack:\n            c.reuse_mask = reuse_mask\n            c.scale = scale\n\n    def get_context(self):\n        if self.context_stack is not None:\n            if self.count >= len(self.context_stack):\n                self.context_stack.append(DropoutContext())\n            ctx = self.context_stack[self.count]\n            ctx.dropout = self.drop_prob\n            self.count += 1\n            return ctx\n        else:\n            return self.drop_prob", "docstring": "Optimized dropout module for stabilizing the training\n\nArgs:\ndrop_prob (float): the dropout probabilities", "source": "github-repos"}
{"code": "def persist_perf(run, session, svg_path):\n    from benchbuild.utils import schema as s\n    with open(svg_path, 'r') as svg_file:\n        svg_data = svg_file.read()\n        session.add(s.Metadata(name='perf.flamegraph', value=svg_data, run_id=run.id))", "docstring": "Persist the flamegraph in the database.\n\nThe flamegraph exists as a SVG image on disk until we persist it in the\ndatabase.\n\nArgs:\nrun: The run we attach these perf measurements to.\nsession: The db transaction we belong to.\nsvg_path: The path to the SVG file we want to store.", "source": "codesearchnet"}
{"code": "def parse(self, content):\n        \n        \n        \n        declarations = self.REGEX_IMPORT_RULE.findall(\n            self.remove_comments(content)\n        )\n        return self.flatten_rules(declarations)", "docstring": "Parse a stylesheet document with a regex (``REGEX_IMPORT_RULE``)\nto extract all import rules and return them.\n\nArgs:\ncontent (str): A SCSS source.\n\nReturns:\nlist: Finded paths in import rules.", "source": "juraj-google-style"}
{"code": "def round(x, decimals=0):\n    if any_symbolic_tensors((x,)):\n        return Round(decimals).symbolic_call(x)\n    return backend.numpy.round(x, decimals)", "docstring": "Evenly round to the given number of decimals.\n\nArgs:\nx: Input tensor.\ndecimals: Number of decimal places to round to. Defaults to `0`.\n\nReturns:\nOutput tensor.", "source": "github-repos"}
{"code": "def _encode_queries(self, program: cfg.Program) -> list[dict[str, Any]]:\n    metrics = program.calculate_metrics()\n    solvers = metrics.solver_metrics\n    enc_queries = []\n    query_id = -1\n    for solver_idx, solver in enumerate(solvers):\n        for query in solver.query_metrics:\n            query_id += 1\n            steps = []\n            for step in query.steps:\n                steps.append({'_type': 'QueryStep', 'node': step.node, 'depth': step.depth, 'bindings': step.bindings})\n            enc_queries.append({'_type': 'Query', 'solver_idx': solver_idx, 'start_node': query.start_node, 'end_node': query.end_node, 'initial_binding_count': query.initial_binding_count, 'shortcircuited': query.shortcircuited, 'from_cache': query.from_cache, 'steps': steps})\n    return enc_queries", "docstring": "Encodes information about solver queries from a Program's metrics.\n\nThe queries are numbered in the order they were recorded.\n\nArgs:\nprogram: a cfg.Program.\n\nReturns:\nA list of dictionaries that correspond to SerializedQuery.", "source": "github-repos"}
{"code": "async def _handle_watermark_notification(self, watermark_notification):\n        \n        conv_id = watermark_notification.conversation_id.id\n        res = parsers.parse_watermark_notification(watermark_notification)\n        await self.on_watermark_notification.fire(res)\n        try:\n            conv = await self._get_or_fetch_conversation(conv_id)\n        except exceptions.NetworkError:\n            logger.warning(\n                'Failed to fetch conversation for watermark notification: %s',\n                conv_id\n            )\n        else:\n            await conv.on_watermark_notification.fire(res)", "docstring": "Receive WatermarkNotification and update the conversation.\n\nArgs:\nwatermark_notification: hangouts_pb2.WatermarkNotification instance", "source": "juraj-google-style"}
{"code": "def _create_m_objective(w, X):\n    (clusters, cells) = w.shape\n    genes = X.shape[0]\n    w_sum = w.sum(1)\n\n    def objective(m):\n        m = m.reshape((X.shape[0], w.shape[0]))\n        d = (m.dot(w) + eps)\n        temp = (X / d)\n        w2 = w.dot(temp.T)\n        deriv = (w_sum - w2.T)\n        return ((np.sum((d - (X * np.log(d)))) / genes), (deriv.flatten() / genes))\n    return objective", "docstring": "Creates an objective function and its derivative for M, given W and X\n\nArgs:\nw (array): clusters x cells\nX (array): genes x cells", "source": "codesearchnet"}
{"code": "def update(self, data=None, timeout=-1, force=True):\n        \n        uri = self.data['uri']\n\n        resource = deepcopy(self.data)\n        resource.update(data)\n\n        self.data = self._helper.update(resource, uri, force, timeout)\n\n        return self", "docstring": "Updates server profile template.\n\nArgs:\ndata: Data to update the resource.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\nforce: Force the update operation.\n\nReturns:\nA dict with the updated resource data.", "source": "juraj-google-style"}
{"code": "def parse_report_file(input_, nameservers=None, dns_timeout=2.0, strip_attachment_payloads=False, parallel=False):\n    if (type(input_) == str):\n        file_object = open(input_, 'rb')\n    elif (type(input_) == bytes):\n        file_object = BytesIO(input_)\n    else:\n        file_object = input_\n    content = file_object.read()\n    try:\n        report = parse_aggregate_report_file(content, nameservers=nameservers, dns_timeout=dns_timeout, parallel=parallel)\n        results = OrderedDict([('report_type', 'aggregate'), ('report', report)])\n    except InvalidAggregateReport:\n        try:\n            sa = strip_attachment_payloads\n            results = parse_report_email(content, nameservers=nameservers, dns_timeout=dns_timeout, strip_attachment_payloads=sa, parallel=parallel)\n        except InvalidDMARCReport:\n            raise InvalidDMARCReport('Not a valid aggregate or forensic report')\n    return results", "docstring": "Parses a DMARC aggregate or forensic file at the given path, a\nfile-like object. or bytes\n\nArgs:\ninput_: A path to a file, a file like object, or bytes\nnameservers (list): A list of one or more nameservers to use\n(Cloudflare's public DNS resolvers by default)\ndns_timeout (float): Sets the DNS timeout in seconds\nstrip_attachment_payloads (bool): Remove attachment payloads from\nforensic report results\nparallel (bool): Parallel processing\n\nReturns:\nOrderedDict: The parsed DMARC report", "source": "codesearchnet"}
{"code": "def read(self):\n    try:\n        buf = os.read(self._fd, 8)\n    except OSError as e:\n        raise LEDError(e.errno, ('Reading LED brightness: ' + e.strerror))\n    try:\n        os.lseek(self._fd, 0, os.SEEK_SET)\n    except OSError as e:\n        raise LEDError(e.errno, ('Rewinding LED brightness: ' + e.strerror))\n    return int(buf)", "docstring": "Read the brightness of the LED.\n\nReturns:\nint: Current brightness.\n\nRaises:\nLEDError: if an I/O or OS error occurs.", "source": "codesearchnet"}
{"code": "def call(self, x, y, bias, cache=None):\n    q = self.q_dense_layer(x)\n    k = self.k_dense_layer(y)\n    v = self.v_dense_layer(y)\n    if (cache is not None):\n        k = tf.concat([cache['k'], k], axis=1)\n        v = tf.concat([cache['v'], v], axis=1)\n        cache['k'] = k\n        cache['v'] = v\n    q = self.split_heads(q)\n    k = self.split_heads(k)\n    v = self.split_heads(v)\n    depth = (self.hidden_size \n    q *= (depth ** (- 0.5))\n    logits = tf.matmul(q, k, transpose_b=True)\n    logits += bias\n    weights = tf.nn.softmax(logits, name='attention_weights')\n    if self.train:\n        mlperf_log.transformer_print(key=mlperf_log.MODEL_HP_ATTENTION_DROPOUT, value=self.attention_dropout)\n        weights = tf.nn.dropout(weights, (1.0 - self.attention_dropout))\n    attention_output = tf.matmul(weights, v)\n    attention_output = self.combine_heads(attention_output)\n    attention_output = self.output_dense_layer(attention_output)\n    return attention_output", "docstring": "Apply attention mechanism to x and y.\n\nArgs:\nx: a tensor with shape [batch_size, length_x, hidden_size]\ny: a tensor with shape [batch_size, length_y, hidden_size]\nbias: attention bias that will be added to the result of the dot product.\ncache: (Used during prediction) dictionary with tensors containing results\nof previous attentions. The dictionary must have the items:\n{\"k\": tensor with shape [batch_size, i, key_channels],\n\"v\": tensor with shape [batch_size, i, value_channels]}\nwhere i is the current decoded length.\n\nReturns:\nAttention layer output with shape [batch_size, length_x, hidden_size]", "source": "codesearchnet"}
{"code": "def parse_value(self, text: str) -> Optional[bool]:\n    if (text == 'true'):\n        return True\n    if (text == 'false'):\n        return False", "docstring": "Parse boolean value.\n\nArgs:\ntext: String representation of the value.", "source": "codesearchnet"}
{"code": "def mach53(msg):\n    \n    d = hex2bin(data(msg))\n\n    if d[23] == '0':\n        return None\n\n    mach = bin2int(d[24:33]) * 0.008\n    return round(mach, 3)", "docstring": "MACH number, DBS 5,3 message\n\nArgs:\nmsg (String): 28 bytes hexadecimal message\n\nReturns:\nfloat: MACH number", "source": "juraj-google-style"}
{"code": "def synchronize_clock(self, offset):\n        \n\n        self.time_offset = offset - self.uptime\n        self.is_utc = True\n\n        if self.has_rtc:\n            self.stored_offset = self.time_offset", "docstring": "Persistently synchronize the clock to UTC time.\n\nArgs:\noffset (int): The number of seconds since 1/1/2000 00:00Z", "source": "juraj-google-style"}
{"code": "def add_reader(self, fd: IFileLike, callback: typing.Callable[([IFileLike], typing.Any)]) -> None:\n    raise NotImplementedError()", "docstring": "Add a file descriptor to the processor and wait for READ.\n\nArgs:\nfd (IFileLike): Any obect that exposes a 'fileno' method that\nreturns a valid file descriptor integer.\ncallback (typing.Callable[[IFileLike], typing.Any]): A function\nthat consumes the IFileLike object whenever the READ event is\nfired.", "source": "codesearchnet"}
{"code": "def handle_enterprise_logistration(backend, user, **kwargs):\n    \n    request = backend.strategy.request\n    enterprise_customer = get_enterprise_customer_for_running_pipeline(\n        request,\n        {\n            'backend': backend.name,\n            'kwargs': kwargs\n        }\n    )\n    if enterprise_customer is None:\n        \n        return\n\n    \n    enterprise_customer_user, _ = EnterpriseCustomerUser.objects.update_or_create(\n        enterprise_customer=enterprise_customer,\n        user_id=user.id\n    )\n    enterprise_customer_user.update_session(request)", "docstring": "Perform the linking of user in the process of logging to the Enterprise Customer.\n\nArgs:\nbackend: The class handling the SSO interaction (SAML, OAuth, etc)\nuser: The user object in the process of being logged in with\n**kwargs: Any remaining pipeline variables", "source": "juraj-google-style"}
{"code": "def _GetResponseClass(self, method_descriptor):\n    \n    if method_descriptor.containing_service != self.descriptor:\n      raise RuntimeError(\n          'GetResponseClass() given method descriptor for wrong service type.')\n    return method_descriptor.output_type._concrete_class", "docstring": "Returns the class of the response protocol message.\n\nArgs:\nmethod_descriptor: Descriptor of the method for which to return the\nresponse protocol message class.\n\nReturns:\nA class that represents the output protocol message of the specified\nmethod.", "source": "juraj-google-style"}
{"code": "def __init__(self, finders=None):\n        \n        if finders is None:\n            self.finders = [LocalPackageFinder(), InstalledPackageFinder()]\n        else:\n            self.finders = [f() for f in finders]", "docstring": "Initialization method.\n\nArgs:\nfinders (list of classes):\nlist of package finder classes (not instances) in a specific\norder. Default: [LocalPackageFinder, InstalledPackageFinder].", "source": "juraj-google-style"}
{"code": "def set_button_map(self, button_map):\n    assert (self.finger_count > 0), 'This device does not support tapping'\n    return self._libinput.libinput_device_config_tap_set_button_map(self._handle, button_map)", "docstring": "Set the finger number to button number mapping for tap-to-click.\n\nThe default mapping on most devices is to have a 1, 2 and 3 finger tap\nto map to the left, right and middle button, respectively. A device may\npermit changing the button mapping but disallow specific maps. In this\ncase :attr:`~libinput.constant.ConfigStatus.UNSUPPORTED` is returned,\nthe caller is expected to handle this case correctly.\n\nChanging the button mapping may not take effect immediately, the device\nmay wait until it is in a neutral state before applying any changes.\n\nThe mapping may be changed when tap-to-click is disabled. The new\nmapping takes effect when tap-to-click is enabled in the future.\n\nIf :attr:`finger_count` is 0, this method raises :exc:`AssertionError`.\n\nArgs:\nbutton_map (~libinput.constant.TapButtonMap): The new\nfinger-to-button number mapping.\nReturns:\n~libinput.constant.ConfigStatus: A config status code.\nRaises:\nAssertionError", "source": "codesearchnet"}
{"code": "def get_public_datasets_and_tokens(self):\n        \n        datasets = {}\n        tokens = self.get_public_tokens()\n        for t in tokens:\n            dataset = self.get_token_dataset(t)\n            if dataset in datasets:\n                datasets[dataset].append(t)\n            else:\n                datasets[dataset] = [t]\n        return datasets", "docstring": "NOTE: VERY SLOW!\nGet a dictionary relating key:dataset to value:[tokens] that rely\non that dataset.\n\nArguments:\nNone\n\nReturns:\ndict: relating key:dataset to value:[tokens]", "source": "juraj-google-style"}
{"code": "def _escaped_token_to_subtoken_ids(self, escaped_token):\n    \n    return [\n        self._subtoken_string_to_id[subtoken]\n        for subtoken in self._escaped_token_to_subtoken_strings(escaped_token)\n    ]", "docstring": "Converts an escaped token string to a list of subtoken IDs.\n\nArgs:\nescaped_token: An escaped token as a unicode string.\nReturns:\nA list of subtoken IDs as integers.", "source": "juraj-google-style"}
{"code": "def from_client_secrets_file(cls, client_secrets_file, scopes, **kwargs):\n        \n        with open(client_secrets_file, 'r') as json_file:\n            client_config = json.load(json_file)\n\n        return cls.from_client_config(client_config, scopes=scopes, **kwargs)", "docstring": "Creates a :class:`Flow` instance from a Google client secrets file.\n\nArgs:\nclient_secrets_file (str): The path to the client secrets .json\nfile.\nscopes (Sequence[str]): The list of scopes to request during the\nflow.\nkwargs: Any additional parameters passed to\n:class:`requests_oauthlib.OAuth2Session`\n\nReturns:\nFlow: The constructed Flow instance.", "source": "juraj-google-style"}
{"code": "def __init__(self, function_name):\n        \n        params = {\n            'FUNCTION_NAME': function_name\n        }\n\n        super().__init__(\n            'int', 'lib_nmsimplex', [],\n            resource_filename('mot', 'data/opencl/lib_nmsimplex.cl'),\n            var_replace_dict=params)", "docstring": "The NMSimplex algorithm as a reusable library component.\n\nArgs:\nfunction_name (str): the name of the evaluation function to call, defaults to 'evaluate'.\nThis should point to a function with signature:\n\n``double evaluate(local mot_float_type* x, void* data_void);``", "source": "juraj-google-style"}
{"code": "def do_decode(cls, obj, obj_type):\n        \n        \n        if inspect.isclass(obj_type) and issubclass(  \n                obj_type, ConjureBeanType\n        ):\n            return cls.decode_conjure_bean_type(obj, obj_type)  \n\n        elif inspect.isclass(obj_type) and issubclass(  \n                obj_type, ConjureUnionType\n        ):\n            return cls.decode_conjure_union_type(obj, obj_type)\n\n        elif inspect.isclass(obj_type) and issubclass(  \n                obj_type, ConjureEnumType\n        ):\n            return cls.decode_conjure_enum_type(obj, obj_type)\n\n        elif isinstance(obj_type, DictType):\n            return cls.decode_dict(obj, obj_type.key_type, obj_type.value_type)\n\n        elif isinstance(obj_type, ListType):\n            return cls.decode_list(obj, obj_type.item_type)\n\n        elif isinstance(obj_type, OptionalType):\n            return cls.decode_optional(obj, obj_type.item_type)\n\n        return cls.decode_primitive(obj, obj_type)", "docstring": "Decodes json into the specified type\n\nArgs:\nobj: the json object to decode\nelement_type: a class object which is the type we're decoding into.", "source": "juraj-google-style"}
{"code": "def docx_process_simple_text(text: str, width: int) -> str:\n    \n    if width:\n        return '\\n'.join(textwrap.wrap(text, width=width))\n    else:\n        return text", "docstring": "Word-wraps text.\n\nArgs:\ntext: text to process\nwidth: width to word-wrap to (or 0 to skip word wrapping)\n\nReturns:\nwrapped text", "source": "juraj-google-style"}
{"code": "def ParseOptions(cls, options, analysis_plugin):\n    \n    if not isinstance(analysis_plugin, tagging.TaggingAnalysisPlugin):\n      raise errors.BadConfigObject(\n          'Analysis plugin is not an instance of TaggingAnalysisPlugin')\n\n    tagging_file = cls._ParseStringOption(options, 'tagging_file')\n    if not tagging_file:\n      raise errors.BadConfigOption(\n          'Tagging analysis plugin requires a tagging file.')\n\n    tagging_file_path = tagging_file\n    if not os.path.isfile(tagging_file_path):\n      \n      data_location = getattr(options, 'data_location', None)\n      if data_location:\n        tagging_file_path = os.path.join(data_location, tagging_file)\n\n    if not os.path.isfile(tagging_file_path):\n      raise errors.BadConfigOption(\n          'No such tagging file: {0:s}.'.format(tagging_file))\n\n    try:\n      analysis_plugin.SetAndLoadTagFile(tagging_file_path)\n\n    except UnicodeDecodeError:\n      raise errors.BadConfigOption(\n          'Invalid tagging file: {0:s} encoding must be UTF-8.'.format(\n              tagging_file))\n\n    except errors.TaggingFileError as exception:\n      raise errors.BadConfigOption(\n          'Unable to read tagging file: {0:s} with error: {1!s}'.format(\n              tagging_file, exception))", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nanalysis_plugin (AnalysisPlugin): analysis plugin to configure.\n\nRaises:\nBadConfigObject: when the output module object is of the wrong type.\nBadConfigOption: when a configuration parameter fails validation.", "source": "juraj-google-style"}
{"code": "def has_arg(fn, arg_name):\n    if (sys.version_info < (3,)):\n        if (isinstance(fn, types.FunctionType) or isinstance(fn, types.MethodType)):\n            arg_spec = inspect.getargspec(fn)\n        else:\n            try:\n                arg_spec = inspect.getargspec(fn.__call__)\n            except AttributeError:\n                return False\n        return (arg_name in arg_spec.args)\n    elif (sys.version_info < (3, 6)):\n        arg_spec = inspect.getfullargspec(fn)\n        return ((arg_name in arg_spec.args) or (arg_name in arg_spec.kwonlyargs))\n    else:\n        try:\n            signature = inspect.signature(fn)\n        except ValueError:\n            signature = inspect.signature(fn.__call__)\n        parameter = signature.parameters.get(arg_name)\n        if (parameter is None):\n            return False\n        return (parameter.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY))", "docstring": "Checks if a callable accepts a given keyword argument.\n\nArgs:\nfn: callable to inspect\narg_name: string, keyword argument name to check\n\nReturns:\nbool, whether `fn` accepts a `arg_name` keyword argument.", "source": "codesearchnet"}
{"code": "def _read_file(filename):\n    graph_def = graph_pb2.GraphDef()\n    if not file_io.file_exists(filename):\n        raise IOError(f'File {filename} does not exist.')\n    with file_io.FileIO(filename, 'rb') as f:\n        file_content = f.read()\n    try:\n        graph_def.ParseFromString(file_content)\n        return graph_def\n    except Exception:\n        pass\n    try:\n        text_format.Merge(file_content, graph_def)\n    except text_format.ParseError as e:\n        raise IOError(f'Cannot parse file {filename}: {str(e)}.')\n    return graph_def", "docstring": "Reads a file containing `GraphDef` and returns the protocol buffer.\n\nArgs:\nfilename: `graph_def` filename including the path.\n\nReturns:\nA `GraphDef` protocol buffer.\n\nRaises:\nIOError: If the file doesn't exist, or cannot be successfully parsed.", "source": "github-repos"}
{"code": "def is_locator(self, path, relative=False):\n    if (not relative):\n        path = self.relpath(path)\n    return (path and ('/' not in path.rstrip('/')))", "docstring": "Returns True if path refer to a locator.\n\nDepending the storage, locator may be a bucket or container name,\na hostname, ...\n\nargs:\npath (str): path or URL.\nrelative (bool): Path is relative to current root.\n\nReturns:\nbool: True if locator.", "source": "codesearchnet"}
{"code": "def build(cls, local_scheduler=True, **task_params):\n    luigi.build([cls(**task_params)], local_scheduler=local_scheduler)", "docstring": "Instantiate the task and build it with luigi\n\nArgs:\nlocal_scheduler (bool): use a local scheduler (True, default) or a remote scheduler\ntask_params: parameters to pass to task for instantiation", "source": "codesearchnet"}
{"code": "def FullUpdateFromMap(self, cache, new_map, force_write=False):\n    error_count = 0\n    if len(new_map) == 0 and (not force_write):\n        raise error.EmptyMap('Source map empty during full update, aborting. Use --force-write to override.')\n    error_count = cache.WriteMap(map_data=new_map, force_write=force_write)\n    if error_count == 0:\n        self.WriteModifyTimestamp(new_map.GetModifyTimestamp())\n        self.WriteUpdateTimestamp()\n    return error_count", "docstring": "Write a new map into the provided cache (overwrites).\n\nArgs:\ncache: A nss_cache.caches.Cache object.\nnew_map: A nss_cache.maps.Map object.\nforce_write: A boolean indicating empty maps are okay to write, defaults\nto False which means do not write them.\n\nReturns:\n0 if succesful, non-zero indicating number of failures otherwise.\n\nRaises:\nEmptyMap: Update is an empty map, not raised if force_write=True.", "source": "github-repos"}
{"code": "def key_for_entity_group(cls, key):\n    return model.Key(cls.KIND_NAME, cls.ID, parent=key.root())", "docstring": "Return the key for the entity group containing key.\n\nArgs:\nkey: a key for an entity group whose __entity_group__ key you want.\n\nReturns:\nThe __entity_group__ key for the entity group containing key.", "source": "codesearchnet"}
{"code": "def build_machine(network=None, machine_type=None, preemptible=None, service_account=None, boot_disk_size_gb=None, disks=None, accelerators=None, labels=None, cpu_platform=None, nvidia_driver_version=None):\n    return {'network': network, 'machineType': machine_type, 'preemptible': preemptible, 'serviceAccount': service_account, 'bootDiskSizeGb': boot_disk_size_gb, 'disks': disks, 'accelerators': accelerators, 'labels': labels, 'cpuPlatform': cpu_platform, 'nvidiaDriverVersion': nvidia_driver_version}", "docstring": "Build a VirtualMachine object for a Pipeline request.\n\nArgs:\nnetwork (dict): Network details for the pipeline to run in.\nmachine_type (str): GCE Machine Type string for the pipeline.\npreemptible (bool): Use a preemptible VM for the job.\nservice_account (dict): Service account configuration for the VM.\nboot_disk_size_gb (int): Boot disk size in GB.\ndisks (list[dict]): List of disks to mount.\naccelerators (list[dict]): List of accelerators to attach to the VM.\nlabels (dict[string, string]): Labels for the VM.\ncpu_platform (str): The CPU platform to request.\nnvidia_driver_version (str): The NVIDIA driver version to use when attaching\nan NVIDIA GPU accelerator.\n\nReturns:\nAn object representing a VirtualMachine.", "source": "codesearchnet"}
{"code": "def _create_dir_path(self, file_hash, path=None, hash_list=None):\n    if (hash_list is None):\n        hash_list = list(file_hash)\n    if (not hash_list):\n        raise IOError('Directory structure is too full!')\n    if (not path):\n        path = os.path.join(self.path, hash_list.pop(0))\n    if (not os.path.exists(path)):\n        os.mkdir(path)\n        return self._create_dir_path(file_hash=file_hash, path=path, hash_list=hash_list)\n    files = os.listdir(path)\n    if (file_hash in files):\n        return path\n    if (len(files) < self.dir_limit):\n        return path\n    return self._create_dir_path(file_hash=file_hash, path=os.path.join(path, hash_list.pop(0)), hash_list=hash_list)", "docstring": "Create proper filesystem paths for given `file_hash`.\n\nArgs:\nfile_hash (str): Hash of the file for which the path should be\ncreated.\npath (str, default None): Recursion argument, don't set this.\nhash_list (list, default None): Recursion argument, don't set this.\n\nReturns:\nstr: Created path.", "source": "codesearchnet"}
{"code": "def prune_neighbors(self):\n\n    def _neighbor_check(neighbors, valid):\n        if (not (neighbors == neighbors)):\n            return np.nan\n        valid_keys = (set(valid) & set(neighbors.keys()))\n        d = dict([(k, v) for (k, v) in neighbors.items() if (k in valid_keys)])\n        return d\n    fixed = self.copy()\n    valid = self.get_valid_cell_indecies()\n    valid = pd.DataFrame(self).merge(valid, on=self.frame_columns).set_index((self.frame_columns + ['cell_index']))\n    valid = valid.apply((lambda x: _neighbor_check(x['neighbors'], x['valid'])), 1).reset_index().rename(columns={0: 'new_neighbors'})\n    fixed = fixed.merge(valid, on=(self.frame_columns + ['cell_index'])).drop(columns='neighbors').rename(columns={'new_neighbors': 'neighbors'})\n    fixed.microns_per_pixel = self.microns_per_pixel\n    fixed.db = self.db\n    return fixed", "docstring": "If the CellDataFrame has been subsetted, some of the cell-cell contacts may no longer be part of the the dataset.  This prunes those no-longer existant connections.\n\nReturns:\nCellDataFrame: A CellDataFrame with only valid cell-cell contacts", "source": "codesearchnet"}
{"code": "def __init__(self, couplinglist=None):\n        \n\n        \n        self.graph = nx.DiGraph()\n        \n        self._dist_matrix = None\n        \n        self._qubit_list = None\n\n        if couplinglist is not None:\n            for source, target in couplinglist:\n                self.add_edge(source, target)", "docstring": "Create coupling graph. By default, the generated coupling has no nodes.\n\nArgs:\ncouplinglist (list or None): An initial coupling graph, specified as\nan adjacency list containing couplings, e.g. [[0,1], [0,2], [1,2]].", "source": "juraj-google-style"}
{"code": "def add_note(self, note):\n    if (type(note) == str):\n        return self.update_note({'content': note})\n    elif ((type(note) == dict) and ('content' in note)):\n        return self.update_note(note)\n    else:\n        return ('No string or valid note.', (- 1))", "docstring": "Wrapper method to add a note\n\nThe method can be passed the note as a dict with the `content`\nproperty set, which is then directly send to the web service for\ncreation. Alternatively, only the body as string can also be passed. In\nthis case the parameter is used as `content` for the new note.\n\nArguments:\n- note (dict or string): the note to add\n\nReturns:\nA tuple `(note, status)`\n\n- note (dict): the newly created note\n- status (int): 0 on success and -1 otherwise", "source": "codesearchnet"}
{"code": "def _GetUrl(self, url_id, cache, database):\n    \n    url_cache_results = cache.GetResults('url')\n    if not url_cache_results:\n      result_set = database.Query(self.URL_CACHE_QUERY)\n\n      cache.CacheQueryResults(\n          result_set, 'url', 'id', ('url', 'rev_host'))\n      url_cache_results = cache.GetResults('url')\n\n    url, reverse_host = url_cache_results.get(url_id, ['', ''])\n\n    if not url:\n      return ''\n\n    hostname = self._ReverseHostname(reverse_host)\n    return '{0:s} ({1:s})'.format(url, hostname)", "docstring": "Retrieves an URL from a reference to an entry in the from_visit table.\n\nArgs:\nurl_id (str): identifier of the visited URL.\ncache (SQLiteCache): cache.\ndatabase (SQLiteDatabase): database.\n\nReturns:\nstr: URL and hostname.", "source": "juraj-google-style"}
{"code": "def map(self, map_fn, desc=None):\n    if (desc is None):\n        desc = getattr(map_fn, '__name__', '')\n    desc = u'map({})'.format(desc)\n    return self.transform((lambda xs: (map_fn(x) for x in xs)), desc=desc)", "docstring": "Return a copy of this query, with the values mapped through `map_fn`.\n\nArgs:\nmap_fn (callable): A callable that takes a single argument and returns a new value.\n\nKeyword Args:\ndesc (str): A description of the mapping transform, for use in log message.\nDefaults to the name of the map function.\n\nReturns:\nQuery", "source": "codesearchnet"}
{"code": "def add_alias(self, alias, name, op=None):\n    alias = self.aliases.find_by_name(alias)\n    name = self.aliases.find_by_name(name)\n    if alias == name:\n        return\n    elif alias in self and name in self:\n        self._merge(alias, name, op)\n    elif alias not in self and name not in self:\n        self.aliases.merge(alias, name)\n    elif alias in self:\n        root = self.aliases.merge(alias, name)\n        self._copy_item(alias, root)\n    elif name in self:\n        root = self.aliases.merge(alias, name)\n        self._copy_item(name, root)", "docstring": "Alias 'alias' to 'name'.\n\nAfter aliasing, we will think `alias` and `name`, they represent the same\nname. We will merge the values if `op` is provided.\n\nArgs:\nalias: A string.\nname: A string.\nop: The function used to merge the values.", "source": "github-repos"}
{"code": "def pixel_image(shape, sd=None, init_val=None):\n    if ((sd is not None) and (init_val is not None)):\n        warnings.warn('`pixel_image` received both an initial value and a sd argument. Ignoring sd in favor of the supplied initial value.')\n    sd = (sd or 0.01)\n    init_val = (init_val or np.random.normal(size=shape, scale=sd).astype(np.float32))\n    return tf.Variable(init_val)", "docstring": "A naive, pixel-based image parameterization.\nDefaults to a random initialization, but can take a supplied init_val argument\ninstead.\n\nArgs:\nshape: shape of resulting image, [batch, width, height, channels].\nsd: standard deviation of param initialization noise.\ninit_val: an initial value to use instead of a random initialization. Needs\nto have the same shape as the supplied shape argument.\n\nReturns:\ntensor with shape from first argument.", "source": "codesearchnet"}
{"code": "def fetch(self, order_id, data={}, **kwargs):\n    return super(Order, self).fetch(order_id, data, **kwargs)", "docstring": "Fetch Order for given Id\n\nArgs:\norder_id : Id for which order object has to be retrieved\n\nReturns:\nOrder dict for given order Id", "source": "codesearchnet"}
{"code": "def print_info(self, capture):\n    self.frame_offset += 1\n    (ret, frame) = capture.read()\n    if ret:\n        print('Capture Information')\n        print('\\tDimensions (HxW): {}x{}'.format(*frame.shape[0:2]))\n        print('\\tColor channels:   {}'.format((frame.shape[2] if (len(frame.shape) > 2) else 1)))\n        print('\\tColor range:      {}-{}'.format(np.min(frame), np.max(frame)))\n        print('\\tdtype:            {}'.format(frame.dtype))\n    else:\n        print('No source found.')", "docstring": "Prints information about the unprocessed image.\n\nReads one frame from the source to determine image colors, dimensions\nand data types.\n\nArgs:\ncapture: the source to read from.", "source": "codesearchnet"}
{"code": "def sym_descendants(self, where: Optional[Callable[[Any], bool]]=None, option: DescendantQueryOption=DescendantQueryOption.ALL, include_self: bool=False) -> List[Any]:\n    descendants = []\n    where = where or (lambda x: True)\n\n    def visit(k, v, p):\n        del k, p\n        if not where(v):\n            return TraverseAction.ENTER\n        if not include_self and self is v:\n            return TraverseAction.ENTER\n        if option == DescendantQueryOption.IMMEDIATE:\n            descendants.append(v)\n            return TraverseAction.CONTINUE\n        leaf_descendants = []\n        if isinstance(v, Symbolic):\n            leaf_descendants = v.sym_descendants(where, option)\n        if option is DescendantQueryOption.ALL or not leaf_descendants:\n            descendants.append(v)\n        descendants.extend(leaf_descendants)\n        return TraverseAction.CONTINUE\n    traverse(self, visit)\n    return descendants", "docstring": "Returns all descendants of specific classes.\n\nArgs:\nwhere: Optional callable object as the filter of descendants to return.\noption: Descendant query options, indicating whether all matched,\nimmediate matched or only the matched leaf nodes will be returned.\ninclude_self: If True, `self` will be included in the query, otherwise\nonly strict descendants are included.\n\nReturns:\nA list of objects that match the descendant_cls.", "source": "github-repos"}
{"code": "def to_dict(self) -> dict[str, Any]:\n    output = copy.deepcopy(self.__dict__)\n    output['image_processor_type'] = self.__class__.__name__\n    return output", "docstring": "Serializes this instance to a Python dictionary.\n\nReturns:\n`Dict[str, Any]`: Dictionary of all the attributes that make up this image processor instance.", "source": "github-repos"}
{"code": "def decode_jwt_payload(self, access_token=None):\n    c = self.get_credentials()\n    jwt = (access_token or c.access_token)\n    try:\n        (_, payload, _) = jwt.split('.')\n        rem = (len(payload) % 4)\n        if (rem > 0):\n            payload += ('=' * (4 - rem))\n        try:\n            decoded_jwt = b64decode(payload).decode('utf-8')\n        except TypeError as e:\n            raise PanCloudError(('Failed to base64 decode JWT: %s' % e))\n        else:\n            try:\n                x = loads(decoded_jwt)\n            except ValueError as e:\n                raise PanCloudError(('Invalid JSON: %s' % e))\n    except (AttributeError, ValueError) as e:\n        raise PanCloudError(('Invalid JWT: %s' % e))\n    return x", "docstring": "Extract payload field from JWT.\n\nArgs:\naccess_token (str): Access token to decode. Defaults to ``None``.\n\nReturns:\ndict: JSON object that contains the claims conveyed by the JWT.", "source": "codesearchnet"}
{"code": "def assemble_common_meta(common_meta_dfs, fields_to_remove, sources, remove_all_metadata_fields, error_report_file):\n    \n    all_meta_df, all_meta_df_with_dups = build_common_all_meta_df(common_meta_dfs, fields_to_remove, remove_all_metadata_fields)\n\n    if not all_meta_df.index.is_unique:\n        all_report_df = build_mismatched_common_meta_report([x.shape for x in common_meta_dfs],\n            sources, all_meta_df, all_meta_df_with_dups)\n\n        unique_duplicate_ids = all_report_df.index.unique()\n\n        if error_report_file is not None:\n            all_report_df.to_csv(error_report_file, sep=\"\\t\")\n\n        msg = .format(unique_duplicate_ids, all_report_df)\n        raise MismatchCommonMetadataConcatException(msg)\n\n    \n    all_meta_df_sorted = all_meta_df.sort_index(axis=0)\n\n    return all_meta_df_sorted", "docstring": "Assemble the common metadata dfs together. Both indices are sorted.\nFields that are not in all the dfs are dropped.\n\nArgs:\ncommon_meta_dfs (list of pandas dfs)\nfields_to_remove (list of strings): fields to be removed from the\ncommon metadata because they don't agree across files\n\nReturns:\nall_meta_df_sorted (pandas df)", "source": "juraj-google-style"}
{"code": "def __init__(self, origin):\n    \n    super(ShellItemsParser, self).__init__()\n    self._origin = origin\n    self._path_segments = []", "docstring": "Initializes the parser.\n\nArgs:\norigin (str): origin of the event.", "source": "juraj-google-style"}
{"code": "def search_group_by_id(self, groupID) -> Group:\n        \n        for g in self.groups:\n            if g.id == groupID:\n                return g\n        return None", "docstring": "searches a group by given id\n\nArgs:\ngroupID(str): groupID the group to search for\n\nReturns\nthe group object or None if it couldn't find a group", "source": "juraj-google-style"}
{"code": "def run(func, keys, max_procs=None, show_proc=False, affinity=None, **kwargs):\n    if (max_procs is None):\n        max_procs = cpu_count()\n    kw_arr = saturate_kwargs(keys=keys, **kwargs)\n    if (len(kw_arr) == 0):\n        return\n    if isinstance(affinity, int):\n        win32process.SetProcessAffinityMask(win32api.GetCurrentProcess(), affinity)\n    task_queue = queue.Queue()\n    while (len(kw_arr) > 0):\n        for _ in range(max_procs):\n            if (len(kw_arr) == 0):\n                break\n            kw = kw_arr.pop(0)\n            p = Process(target=func, kwargs=kw)\n            p.start()\n            sys.stdout.flush()\n            task_queue.put(p)\n            if show_proc:\n                signature = ', '.join([f'{k}={v}' for (k, v) in kw.items()])\n                print(f'[{func.__name__}] ({signature})')\n        while (not task_queue.empty()):\n            p = task_queue.get()\n            p.join()", "docstring": "Provide interface for multiprocessing\n\nArgs:\nfunc: callable functions\nkeys: keys in kwargs that want to use process\nmax_procs: max number of processes\nshow_proc: whether to show process\naffinity: CPU affinity\n**kwargs: kwargs for func", "source": "codesearchnet"}
{"code": "def _get_feature_variable_for_type(self, feature_key, variable_key, variable_type, user_id, attributes):\n    if (not validator.is_non_empty_string(feature_key)):\n        self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('feature_key'))\n        return None\n    if (not validator.is_non_empty_string(variable_key)):\n        self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('variable_key'))\n        return None\n    if (not isinstance(user_id, string_types)):\n        self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))\n        return None\n    if (not self._validate_user_inputs(attributes)):\n        return None\n    feature_flag = self.config.get_feature_from_key(feature_key)\n    if (not feature_flag):\n        return None\n    variable = self.config.get_variable_for_feature(feature_key, variable_key)\n    if (not variable):\n        return None\n    if (variable.type != variable_type):\n        self.logger.warning(('Requested variable type \"%s\", but variable is of type \"%s\". Use correct API to retrieve value. Returning None.' % (variable_type, variable.type)))\n        return None\n    feature_enabled = False\n    source_info = {}\n    variable_value = variable.defaultValue\n    decision = self.decision_service.get_variation_for_feature(feature_flag, user_id, attributes)\n    if decision.variation:\n        feature_enabled = decision.variation.featureEnabled\n        if feature_enabled:\n            variable_value = self.config.get_variable_value_for_variation(variable, decision.variation)\n            self.logger.info(('Got variable value \"%s\" for variable \"%s\" of feature flag \"%s\".' % (variable_value, variable_key, feature_key)))\n        else:\n            self.logger.info(('Feature \"%s\" for variation \"%s\" is not enabled. Returning the default variable value \"%s\".' % (feature_key, decision.variation.key, variable_value)))\n    else:\n        self.logger.info(('User \"%s\" is not in any variation or rollout rule. Returning default value for variable \"%s\" of feature flag \"%s\".' % (user_id, variable_key, feature_key)))\n    if (decision.source == enums.DecisionSources.FEATURE_TEST):\n        source_info = {'experiment_key': decision.experiment.key, 'variation_key': decision.variation.key}\n    try:\n        actual_value = self.config.get_typecast_value(variable_value, variable_type)\n    except:\n        self.logger.error('Unable to cast value. Returning None.')\n        actual_value = None\n    self.notification_center.send_notifications(enums.NotificationTypes.DECISION, enums.DecisionNotificationTypes.FEATURE_VARIABLE, user_id, (attributes or {}), {'feature_key': feature_key, 'feature_enabled': feature_enabled, 'source': decision.source, 'variable_key': variable_key, 'variable_value': actual_value, 'variable_type': variable_type, 'source_info': source_info})\n    return actual_value", "docstring": "Helper method to determine value for a certain variable attached to a feature flag based on type of variable.\n\nArgs:\nfeature_key: Key of the feature whose variable's value is being accessed.\nvariable_key: Key of the variable whose value is to be accessed.\nvariable_type: Type of variable which could be one of boolean/double/integer/string.\nuser_id: ID for user.\nattributes: Dict representing user attributes.\n\nReturns:\nValue of the variable. None if:\n- Feature key is invalid.\n- Variable key is invalid.\n- Mismatch with type of variable.", "source": "codesearchnet"}
{"code": "def OpenFile(self, windows_path):\n    \n    path_spec = self._path_resolver.ResolvePath(windows_path)\n    if path_spec is None:\n      return None\n\n    return self._file_system.GetFileObjectByPathSpec(path_spec)", "docstring": "Opens the file specificed by the Windows path.\n\nArgs:\nwindows_path (str): Windows path to the file.\n\nReturns:\nFileIO: file-like object or None if the file does not exist.", "source": "juraj-google-style"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return self.prefix_tokens + token_ids_0 + self.suffix_tokens\n    return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. An MBART-50 sequence has the following format, where `X` represents the sequence:\n\n- `input_ids` (for encoder) `[src_lang_code] X [eos]`\n- `labels`: (for decoder) `[tgt_lang_code] X [eos]`\n\nBOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a\nseparator.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def take_node_screenshot(self, element, screenshot_path):\n    from PIL import Image\n    'Take a screenshot of a node\\n\\n        Args:\\n            element (object): the proxy_element\\n            screenshot_path (str): the path where the screenshot will be saved\\n        '\n    temp_path = os.path.join(tempdir, screenshot_path)\n    el_x = int(element.location['x'])\n    el_y = int(element.location['y'])\n    el_height = int(element.size['height'])\n    el_width = int(element.size['width'])\n    if ((el_height == 0) or (el_width == 0)):\n        self.debug_log('take_node_screenshot cannot be taken because element width or height equal zero')\n        return False\n    bounding_box = (el_x, el_y, (el_x + el_width), (el_y + el_height))\n    self._driver.save_screenshot(temp_path)\n    base_image = Image.open(temp_path)\n    cropped_image = base_image.crop(bounding_box)\n    base_image = base_image.resize(cropped_image.size)\n    base_image.paste(cropped_image, (0, 0))\n    base_image.save(screenshot_path)\n    '\\n        except Exception as e:\\n            tb = traceback.format_exc()\\n            print unicode(tb)\\n            embed()\\n        '", "docstring": "Take a screenshot of a node\n\nArgs:\nelement (object): the proxy_element\nscreenshot_path (str): the path where the screenshot will be saved", "source": "codesearchnet"}
{"code": "def do_command_line(infile: typing.IO[str]) -> int:\n    \n    lines = infile.readlines()\n    tree = ast.parse(''.join(lines))\n    checker = Checker(tree, lines, infile.name)\n    checker.load()\n    errors = []  \n    for func in checker.all_funcs(skip_noqa=True):\n        try:\n            errors = list(func.check_all())\n        except ValidationError as error:\n            errors = [error.to_aaa()]\n        print(func.__str__(errors), end='')\n    return len(errors)", "docstring": "Currently a small stub to create an instance of Checker for the passed\n``infile`` and run its test functions through linting.\n\nArgs:\ninfile\n\nReturns:\nint: Number of flake8 errors raised.", "source": "juraj-google-style"}
{"code": "def generate(cls, strategy, **kwargs):\n        \n        assert strategy in (enums.STUB_STRATEGY, enums.BUILD_STRATEGY, enums.CREATE_STRATEGY)\n        action = getattr(cls, strategy)\n        return action(**kwargs)", "docstring": "Generate a new instance.\n\nThe instance will be created with the given strategy (one of\nBUILD_STRATEGY, CREATE_STRATEGY, STUB_STRATEGY).\n\nArgs:\nstrategy (str): the strategy to use for generating the instance.\n\nReturns:\nobject: the generated instance", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    file_header_map = self._GetDataTypeMap('systemd_journal_file_header')\n    try:\n        (file_header, _) = self._ReadStructureFromFileObject(file_object, 0, file_header_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.UnableToParseFile('Unable to parse file header with error: {0!s}'.format(exception))\n    if (file_header.signature != self._FILE_SIGNATURE):\n        raise errors.UnableToParseFile('Invalid file signature.')\n    if (file_header.header_size not in self._SUPPORTED_FILE_HEADER_SIZES):\n        raise errors.UnableToParseFile('Unsupported file header size: {0:d}.'.format(file_header.header_size))\n    data_hash_table_end_offset = (file_header.data_hash_table_offset + file_header.data_hash_table_size)\n    field_hash_table_end_offset = (file_header.field_hash_table_offset + file_header.field_hash_table_size)\n    self._maximum_journal_file_offset = max(data_hash_table_end_offset, field_hash_table_end_offset)\n    entry_object_offsets = self._ParseEntryObjectOffsets(file_object, file_header.entry_array_offset)\n    for entry_object_offset in entry_object_offsets:\n        if (entry_object_offset == 0):\n            continue\n        try:\n            fields = self._ParseJournalEntry(file_object, entry_object_offset)\n        except errors.ParseError as exception:\n            parser_mediator.ProduceExtractionWarning('Unable to parse journal entry at offset: 0x{0:08x} with error: {1!s}'.format(entry_object_offset, exception))\n            return\n        event_data = SystemdJournalEventData()\n        event_data.body = fields.get('MESSAGE', None)\n        event_data.hostname = fields.get('_HOSTNAME', None)\n        event_data.reporter = fields.get('SYSLOG_IDENTIFIER', None)\n        if (event_data.reporter and (event_data.reporter != 'kernel')):\n            event_data.pid = fields.get('_PID', fields.get('SYSLOG_PID', None))\n        date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=fields['real_time'])\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_WRITTEN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a Systemd journal file-like object.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the header cannot be parsed.", "source": "codesearchnet"}
{"code": "def load_dataset(file_path: str, model: NormalizedModel) -> Dataset:\n    xs = []\n    ys = array.array('B')\n    with open(file_path) as f:\n        for row in f:\n            cols = row.strip().split('\\t')\n            if len(cols) < 2:\n                continue\n            ys.append(cols[0] == '1')\n            xs.append(tuple((k in set(cols[1:]) for k in model.features)))\n    X = jnp.array(xs) * 2 - 1\n    Y = jnp.array(ys)\n    return Dataset(X, Y)", "docstring": "Loads a dataset from the given file path.\n\nArgs:\nfile_path: A file path for the encoded data file.\nmodel: A normalized model.\n\nReturns:\nA dataset of inputs (X) and outputs (Y).", "source": "github-repos"}
{"code": "def _bytestringToValuelist(bytestring, numberOfRegisters):\n    \n    _checkInt(numberOfRegisters, minvalue=1, description='number of registers')\n    numberOfBytes = _NUMBER_OF_BYTES_PER_REGISTER * numberOfRegisters\n    _checkString(bytestring, 'byte string', minlength=numberOfBytes, maxlength=numberOfBytes)\n\n    values = []\n    for i in range(numberOfRegisters):\n        offset = _NUMBER_OF_BYTES_PER_REGISTER * i\n        substring = bytestring[offset : offset + _NUMBER_OF_BYTES_PER_REGISTER]\n        values.append(_twoByteStringToNum(substring))\n\n    return values", "docstring": "Convert a bytestring to a list of numerical values.\n\nThe bytestring is interpreted as 'unsigned INT16'.\n\nArgs:\n* bytestring (str): The string from the slave. Length = 2*numberOfRegisters\n* numberOfRegisters (int): The number of registers. For error checking.\n\nReturns:\nA list of integers.\n\nRaises:\nTypeError, ValueError", "source": "juraj-google-style"}
{"code": "def _get_music_services_data_xml(soco=None):\n        \n        device = soco or discovery.any_soco()\n        log.debug(\"Fetching music services data from %s\", device)\n        available_services = device.musicServices.ListAvailableServices()\n        descriptor_list_xml = available_services[\n            'AvailableServiceDescriptorList']\n        log.debug(\"Services descriptor list: %s\", descriptor_list_xml)\n        return descriptor_list_xml", "docstring": "Fetch the music services data xml from a Sonos device.\n\nArgs:\nsoco (SoCo): a SoCo instance to query. If none is specified, a\nrandom device will be used. Defaults to `None`.\n\nReturns:\nstr: a string containing the music services data xml", "source": "juraj-google-style"}
{"code": "def _bfd_tx(self, **kwargs):\n        \n        int_type = kwargs['int_type']\n        method_name = 'interface_%s_bfd_interval_min_tx' % int_type\n        bfd_tx = getattr(self._interface, method_name)\n        config = bfd_tx(**kwargs)\n        if kwargs['delete']:\n            tag = 'min-tx'\n            config.find('.\n        return config", "docstring": "Return the BFD minimum transmit interval XML.\n\nYou should not use this method.\nYou probably want `BGP.bfd`.\n\nArgs:\nmin_tx (str): BFD transmit interval in milliseconds (300, 500, etc)\ndelete (bool): Remove the configuration if ``True``.\n\nReturns:\nXML to be passed to the switch.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def download_file_from_google_drive(file_id, root, filename=None, md5=None):\n    import requests\n    url = 'https:\n    root = os.path.expanduser(root)\n    if (not filename):\n        filename = file_id\n    fpath = os.path.join(root, filename)\n    makedir_exist_ok(root)\n    if (os.path.isfile(fpath) and check_integrity(fpath, md5)):\n        print(('Using downloaded and verified file: ' + fpath))\n    else:\n        session = requests.Session()\n        response = session.get(url, params={'id': file_id}, stream=True)\n        token = _get_confirm_token(response)\n        if token:\n            params = {'id': file_id, 'confirm': token}\n            response = session.get(url, params=params, stream=True)\n        _save_response_content(response, fpath)", "docstring": "Download a Google Drive file from  and place it in root.\n\nArgs:\nfile_id (str): id of file to be downloaded\nroot (str): Directory to place downloaded file in\nfilename (str, optional): Name to save the file under. If None, use the id of the file.\nmd5 (str, optional): MD5 checksum of the download. If None, do not check", "source": "codesearchnet"}
{"code": "def CopyFromDateTimeString(self, time_string):\n    \n    date_time_values = self._CopyDateTimeFromString(time_string)\n\n    year = date_time_values.get('year', 0)\n    month = date_time_values.get('month', 0)\n    day_of_month = date_time_values.get('day_of_month', 0)\n    hours = date_time_values.get('hours', 0)\n    minutes = date_time_values.get('minutes', 0)\n    seconds = date_time_values.get('seconds', 0)\n\n    microseconds = date_time_values.get('microseconds', 0)\n    milliseconds, _ = divmod(\n        microseconds, definitions.MICROSECONDS_PER_MILLISECOND)\n\n    if year < 1601 or year > 30827:\n      raise ValueError('Unsupported year value: {0:d}.'.format(year))\n\n    self._normalized_timestamp = None\n    self._number_of_seconds = self._GetNumberOfSecondsFromElements(\n        year, month, day_of_month, hours, minutes, seconds)\n\n    self.year = year\n    self.month = month\n    self.day_of_month = day_of_month\n    \n    self.day_of_week = None\n    self.hours = hours\n    self.minutes = minutes\n    self.seconds = seconds\n    self.milliseconds = milliseconds\n\n    self.is_local_time = False", "docstring": "Copies a SYSTEMTIME structure from a date and time string.\n\nArgs:\ntime_string (str): date and time value formatted as:\nYYYY-MM-DD hh:mm:ss.######[+-]##:##\n\nWhere # are numeric digits ranging from 0 to 9 and the seconds\nfraction can be either 3 or 6 digits. The time of day, seconds\nfraction and time zone offset are optional. The default time zone\nis UTC.\n\nRaises:\nValueError: if the date string is invalid or not supported.", "source": "juraj-google-style"}
{"code": "def read_label_file(path):\n    labels = []\n    for record in textfile.read_separated_lines_generator(path, separator='\\t', max_columns=3):\n        value = ''\n        if (len(record) > 2):\n            value = str(record[2])\n        labels.append([float(_clean_time(record[0])), float(_clean_time(record[1])), value])\n    return labels", "docstring": "Read the labels from an audacity label file.\n\nArgs:\npath (str): Path to the label file.\n\nReturns:\nlist: List of labels (start [sec], end [sec], label)\n\nExample::\n\n>>> read_label_file('/path/to/label/file.txt')\n[\n[0.0, 0.2, 'sie'],\n[0.2, 2.2, 'hallo']\n]", "source": "codesearchnet"}
{"code": "def default_matrix(self):\n    matrix = (c_float * 6)()\n    rc = self._libinput.libinput_device_config_calibration_get_default_matrix(self._handle, matrix)\n    return (rc, tuple(matrix))", "docstring": "The default calibration matrix for this device.\n\nOn most devices, this is the identity matrix. If the udev property\n``LIBINPUT_CALIBRATION_MATRIX`` is set on the respective udev device,\nthat property's value becomes the default matrix, see\n`Static device configuration via udev`_.\n\nReturns:\n(bool, (float, float, float, float, float, float)): :obj:`False` if\nno calibration is set and\nthe returned matrix is the identity matrix, :obj:`True`\notherwise. :obj:`tuple` representing the first two rows of\na 3x3 matrix as described\nin :meth:`config_calibration_set_matrix`.", "source": "codesearchnet"}
{"code": "def get_factors(n):\n    \n\n    def factor(n, i, combi, res):\n        \n\n        while i * i <= n:\n            if n % i == 0:\n                res += combi + [i, int(n/i)],\n                factor(n/i, i, combi+[i], res)\n            i += 1\n        return res\n    return factor(n, 2, [], [])", "docstring": "[summary]\n\nArguments:\nn {[int]} -- [to analysed number]\n\nReturns:\n[list of lists] -- [all factors of the number n]", "source": "juraj-google-style"}
{"code": "def songs(self, *, uploaded=True, purchased=True):\n    if ((not uploaded) and (not purchased)):\n        raise ValueError(\"'uploaded' and 'purchased' cannot both be False.\")\n    if (purchased and uploaded):\n        song_list = []\n        for chunk in self.songs_iter(export_type=1):\n            song_list.extend(chunk)\n    elif purchased:\n        song_list = []\n        for chunk in self.songs_iter(export_type=2):\n            song_list.extend(chunk)\n    elif uploaded:\n        purchased_songs = []\n        for chunk in self.songs_iter(export_type=2):\n            purchased_songs.extend(chunk)\n        song_list = [song for chunk in self.songs_iter(export_type=1) for song in chunk if (song not in purchased_songs)]\n    return song_list", "docstring": "Get a listing of Music Library songs.\n\nReturns:\nlist: Song dicts.", "source": "codesearchnet"}
{"code": "def find_connected_atoms(struct, tolerance=0.45, ldict=JmolNN().el_radius):\n    n_atoms = len(struct.species)\n    fc = np.array(struct.frac_coords)\n    fc_copy = np.repeat(fc[(:, :, np.newaxis)], 27, axis=2)\n    neighbors = np.array(list(itertools.product([0, 1, (- 1)], [0, 1, (- 1)], [0, 1, (- 1)]))).T\n    neighbors = np.repeat(neighbors[(np.newaxis, :, :)], 1, axis=0)\n    fc_diff = (fc_copy - neighbors)\n    species = list(map(str, struct.species))\n    for (i, item) in enumerate(species):\n        if (not (item in ldict.keys())):\n            species[i] = str(Specie.from_string(item).element)\n    latmat = struct.lattice.matrix\n    connected_matrix = np.zeros((n_atoms, n_atoms))\n    for i in range(n_atoms):\n        for j in range((i + 1), n_atoms):\n            max_bond_length = ((ldict[species[i]] + ldict[species[j]]) + tolerance)\n            frac_diff = (fc_diff[j] - fc_copy[i])\n            distance_ij = np.dot(latmat.T, frac_diff)\n            if (sum((np.linalg.norm(distance_ij, axis=0) < max_bond_length)) > 0):\n                connected_matrix[(i, j)] = 1\n                connected_matrix[(j, i)] = 1\n    return connected_matrix", "docstring": "Finds bonded atoms and returns a adjacency matrix of bonded atoms.\n\nAuthor: \"Gowoon Cheon\"\nEmail: \"gcheon@stanford.edu\"\n\nArgs:\nstruct (Structure): Input structure\ntolerance: length in angstroms used in finding bonded atoms. Two atoms\nare considered bonded if (radius of atom 1) + (radius of atom 2) +\n(tolerance) < (distance between atoms 1 and 2). Default\nvalue = 0.45, the value used by JMol and Cheon et al.\nldict: dictionary of bond lengths used in finding bonded atoms. Values\nfrom JMol are used as default\n\nReturns:\n(np.ndarray): A numpy array of shape (number of atoms, number of atoms);\nIf any image of atom j is bonded to atom i with periodic boundary\nconditions, the matrix element [atom i, atom j] is 1.", "source": "codesearchnet"}
{"code": "def validate_json_against_schema(json_dict, schema, err_msg=None):\n    try:\n        if isinstance(schema, str):\n            schema_name = schema\n            schema = _SCHEMAS[schema_name]\n            validator = _get_validator(schema_name)\n            validator.validate(json_dict)\n        else:\n            jsonschema.validate(json_dict, schema)\n    except jsonschema.ValidationError as err:\n        if (err_msg is None):\n            err_msg = 'JSON failed validation. Set Qiskit log level to DEBUG for further information.'\n        newerr = SchemaValidationError(err_msg)\n        newerr.__cause__ = _SummaryValidationError(err)\n        logger.debug('%s', _format_causes(err))\n        raise newerr", "docstring": "Validates JSON dict against a schema.\n\nArgs:\njson_dict (dict): JSON to be validated.\nschema (dict or str): JSON schema dictionary or the name of one of the\nstandards schemas in Qiskit to validate against it. The list of\nstandard schemas is: ``backend_configuration``,\n``backend_properties``, ``backend_status``,\n``default_pulse_configuration``, ``job_status``, ``qobj``,\n``result``.\nerr_msg (str): Optional error message.\n\nRaises:\nSchemaValidationError: Raised if validation fails.", "source": "codesearchnet"}
{"code": "def traverse_preorder(self, leaves=True, internal=True):\n    s = deque()\n    s.append(self)\n    while (len(s) != 0):\n        n = s.pop()\n        if ((leaves and n.is_leaf()) or (internal and (not n.is_leaf()))):\n            (yield n)\n        s.extend(n.children)", "docstring": "Perform a preorder traversal starting at this ``Node`` object\n\nArgs:\n``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``", "source": "codesearchnet"}
{"code": "def _default_tolerance(dtype):\n    if dtype == np.float16:\n        return 0.005\n    elif dtype in (np.float32, np.complex64):\n        return 0.001\n    elif dtype in (np.float64, np.complex128):\n        return 1e-05\n    else:\n        return None", "docstring": "Returns a sensible default tolerance for comparing results of a given type.\n\nArgs:\ndtype: A datatype.", "source": "github-repos"}
{"code": "def __init__(self, channel):\n    \n    self.Watch = channel.stream_stream(\n        '/etcdserverpb.Watch/Watch',\n        request_serializer=rpc__pb2.WatchRequest.SerializeToString,\n        response_deserializer=rpc__pb2.WatchResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):\n  \n  \n  \n  \n  \n  line = clean_lines.elided[linenum]\n  match = Match(r'^(.*\\S)&&', line)\n  if not match:\n    match = Match(r'(.*)&&\\S', line)\n  if (not match) or '(&&)' in line or Search(r'\\boperator\\s*$', match.group(1)):\n    return\n\n  \n  \n  \n  typenames = GetTemplateArgs(clean_lines, linenum)\n  and_pos = len(match.group(1))\n  if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos):\n    if not IsRValueAllowed(clean_lines, linenum, typenames):\n      error(filename, linenum, 'build/c++11', 3,\n            'RValue references are an unapproved C++ feature.')\n  else:\n    error(filename, linenum, 'whitespace/operators', 3,\n          'Missing spaces around &&')", "docstring": "Check for rvalue references.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nnesting_state: A NestingState instance which maintains information about\nthe current stack of nested blocks being parsed.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def add(self, index, var):\n    import nnabla as nn\n    from nnabla.utils.image_utils import imsave\n    if ((index != 0) and (((index + 1) % self.interval) != 0)):\n        return\n    if isinstance(var, nn.Variable):\n        data = var.d.copy()\n    elif isinstance(var, nn.NdArray):\n        data = var.data.copy()\n    else:\n        assert isinstance(var, np.ndarray)\n        data = var.copy()\n    assert (data.ndim > 2)\n    channels = data.shape[(- 3)]\n    data = data.reshape((- 1), *data.shape[(- 3):])\n    data = data[:min(data.shape[0], self.num_images)]\n    data = self.normalize_method(data)\n    if (channels > 3):\n        data = data[(:, :3)]\n    elif (channels == 2):\n        data = np.concatenate([data, np.ones(((data.shape[0], 1) + data.shape[(- 2):]))], axis=1)\n    path_tmpl = os.path.join(self.save_dir, '{:06d}-{}.png')\n    for j in range(min(self.num_images, data.shape[0])):\n        img = data[j].transpose(1, 2, 0)\n        if (img.shape[(- 1)] == 1):\n            img = img[(..., 0)]\n        path = path_tmpl.format(index, '{:03d}'.format(j))\n        imsave(path, img)\n    if self.verbose:\n        logger.info('iter={} {{{}}} are written to {}.'.format(index, self.name, path_tmpl.format(index, '*')))", "docstring": "Add a minibatch of images to the monitor.\n\nArgs:\nindex (int): Index.\nvar (:obj:`~nnabla.Variable`, :obj:`~nnabla.NdArray`, or :obj:`~numpy.ndarray`):\nA minibatch of images with ``(N, ..., C, H, W)`` format.\nIf C == 2, blue channel is appended with ones. If C > 3,\nthe array will be sliced to remove C > 3 sub-array.", "source": "codesearchnet"}
{"code": "def query_snl(self, criteria):\n    try:\n        payload = {'criteria': json.dumps(criteria)}\n        response = self.session.post('{}/snl/query'.format(self.preamble), data=payload)\n        if (response.status_code in [200, 400]):\n            resp = json.loads(response.text)\n            if resp['valid_response']:\n                if resp.get('warning'):\n                    warnings.warn(resp['warning'])\n                return resp['response']\n            else:\n                raise MPRestError(resp['error'])\n        raise MPRestError('REST error with status code {} and error {}'.format(response.status_code, response.text))\n    except Exception as ex:\n        raise MPRestError(str(ex))", "docstring": "Query for submitted SNLs.\n\n.. note::\n\nAs of now, this MP REST feature is open only to a select group of\nusers. Opening up submissions to all users is being planned for\nthe future.\n\nArgs:\ncriteria (dict): Query criteria.\n\nReturns:\nA dict, with a list of submitted SNLs in the \"response\" key.\n\nRaises:\nMPRestError", "source": "codesearchnet"}
{"code": "def register_hooked(self, hooks, func, args_gen=None):\n    if (self.hooked is None):\n        self.hooked = {}\n    if (args_gen is None):\n        args_gen = getattr(func, 'call_types', {}).keys\n    if (not isinstance(hooks, Sequence)):\n        hooks = [hooks]\n    for hook_cls in hooks:\n        self.hooked[hook_cls] = (func, args_gen)", "docstring": "Register func to be run when any of the hooks are run by parent\n\nArgs:\nhooks: A Hook class or list of Hook classes of interest\nfunc: The callable that should be run on that Hook\nargs_gen: Optionally specify the argument names that should be\npassed to func. If not given then use func.call_types.keys", "source": "codesearchnet"}
{"code": "def SetName(obj, name):\n  \n  \n  \n  precondition.AssertType(name, str)\n\n  if PY2:\n    obj.__name__ = name.encode(\"ascii\")\n  else:\n    obj.__name__ = name", "docstring": "A compatibility wrapper for setting object's name.\n\nSee documentation for `GetName` for more information.\n\nArgs:\nobj: A type or function object to set the name for.\nname: A name to set.", "source": "juraj-google-style"}
{"code": "def segment_pofile(filename, segments):\n    reading_msg = 'Reading {num} entries from {file}'\n    writing_msg = 'Writing {num} entries to {file}'\n    source_po = polib.pofile(filename)\n    LOG.info(reading_msg.format(file=filename, num=len(source_po)))\n    remaining_po = copy.deepcopy(source_po)\n    remaining_po[:] = []\n    segment_po_files = {filename: remaining_po}\n    segment_patterns = []\n    for (segmentfile, patterns) in segments.items():\n        segment_po_files[segmentfile] = copy.deepcopy(remaining_po)\n        segment_patterns.extend(((pat, segmentfile) for pat in patterns))\n    for msg in source_po:\n        msg_segments = set()\n        for (occ_file, _) in msg.occurrences:\n            for (pat, segment_file) in segment_patterns:\n                if fnmatch.fnmatch(occ_file, pat):\n                    msg_segments.add(segment_file)\n                    break\n            else:\n                msg_segments.add(filename)\n        assert msg_segments\n        if (len(msg_segments) == 1):\n            segment_file = msg_segments.pop()\n            segment_po_files[segment_file].append(msg)\n        else:\n            remaining_po.append(msg)\n    files_written = set()\n    for (segment_file, pofile) in segment_po_files.items():\n        out_file = (filename.dirname() / segment_file)\n        if (not pofile):\n            LOG.error('No messages to write to %s, did you run segment twice?', out_file)\n        else:\n            LOG.info(writing_msg.format(file=out_file, num=len(pofile)))\n            pofile.save(out_file)\n            files_written.add(out_file)\n    return files_written", "docstring": "Segment a .po file using patterns in `segments`.\n\nThe .po file at `filename` is read, and the occurrence locations of its\nmessages are examined.  `segments` is a dictionary: the keys are segment\n.po filenames, the values are lists of patterns::\n\n{\n'django-studio.po': [\n'cms/*',\n'some-other-studio-place/*',\n],\n'django-weird.po': [\n'*/weird_*.*',\n],\n}\n\nIf all a message's occurrences match the patterns for a segment, then that\nmessage is written to the new segmented .po file.\n\nAny message that matches no segments, or more than one, is written back to\nthe original file.\n\nArguments:\nfilename (path.path): a path object referring to the original .po file.\nsegments (dict): specification of the segments to create.\n\nReturns:\na set of path objects, all the segment files written.", "source": "codesearchnet"}
{"code": "def load(png_filename):\n    \n    \n    png_filename = os.path.expanduser(png_filename)\n\n    try:\n        img = Image.open(png_filename)\n    except Exception as e:\n        raise ValueError(\"Could not load file {0} for conversion.\"\n                         .format(png_filename))\n        raise\n\n    return numpy.array(img)", "docstring": "Import a png file into a numpy array.\n\nArguments:\npng_filename (str): A string filename of a png datafile\n\nReturns:\nA numpy array with data from the png file", "source": "juraj-google-style"}
{"code": "def _ParseJournalEntry(self, file_object, file_offset):\n    \n    entry_object = self._ParseEntryObject(file_object, file_offset)\n\n    \n    entry_item_map = self._GetDataTypeMap('systemd_journal_entry_item')\n\n    file_offset += 64\n    data_end_offset = file_offset + entry_object.data_size - 64\n\n    fields = {'real_time': entry_object.real_time}\n\n    while file_offset < data_end_offset:\n      try:\n        entry_item, entry_item_data_size = self._ReadStructureFromFileObject(\n            file_object, file_offset, entry_item_map)\n      except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError((\n            'Unable to parse entry item at offset: 0x{0:08x} with error: '\n            '{1!s}').format(file_offset, exception))\n\n      file_offset += entry_item_data_size\n\n      if entry_item.object_offset < self._maximum_journal_file_offset:\n        raise errors.ParseError(\n            'object offset should be after hash tables ({0:d} < {1:d})'.format(\n                entry_item.object_offset, self._maximum_journal_file_offset))\n\n      event_data = self._ParseDataObject(file_object, entry_item.object_offset)\n      event_string = event_data.decode('utf-8')\n      key, value = event_string.split('=', 1)\n      fields[key] = value\n\n    return fields", "docstring": "Parses a journal entry.\n\nThis method will generate an event per ENTRY object.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object.\nfile_offset (int): offset of the entry object relative to the start\nof the file-like object.\n\nReturns:\ndict[str, objects]: entry items per key.\n\nRaises:\nParseError: when an object offset is out of bounds.", "source": "juraj-google-style"}
{"code": "def _Open(self, path_spec, mode='rb'):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    file_object = resolver.Resolver.OpenFileObject(\n        path_spec.parent, resolver_context=self._resolver_context)\n\n    try:\n      fsnfts_volume = pyfsntfs.volume()\n      fsnfts_volume.open_file_object(file_object)\n    except:\n      file_object.close()\n      raise\n\n    self._file_object = file_object\n    self._fsntfs_volume = fsnfts_volume", "docstring": "Opens the file system object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nmode (Optional[str]): file access mode. The default is 'rb' which\nrepresents read-only binary.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file system object could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def format_cert_name(env='', account='', region='', certificate=None):\n    \n    cert_name = None\n\n    if certificate:\n        if certificate.startswith('arn'):\n            LOG.info(\"Full ARN provided...skipping lookup.\")\n            cert_name = certificate\n        else:\n            generated_cert_name = generate_custom_cert_name(env, region, account, certificate)\n            if generated_cert_name:\n                LOG.info(\"Found generated certificate %s from template\", generated_cert_name)\n                cert_name = generated_cert_name\n            else:\n                LOG.info(\"Using default certificate name logic\")\n                cert_name = ('arn:aws:iam::{account}:server-certificate/{name}'.format(\n                    account=account, name=certificate))\n    LOG.debug('Certificate name: %s', cert_name)\n\n    return cert_name", "docstring": "Format the SSL certificate name into ARN for ELB.\n\nArgs:\nenv (str): Account environment name\naccount (str): Account number for ARN\nregion (str): AWS Region.\ncertificate (str): Name of SSL certificate\n\nReturns:\nstr: Fully qualified ARN for SSL certificate\nNone: Certificate is not desired", "source": "juraj-google-style"}
{"code": "def seek(self, offset, whence=os.SEEK_SET):\n    \n    if not self._is_open:\n      raise IOError('Not opened.')\n\n    if self._fsntfs_data_stream:\n      self._fsntfs_data_stream.seek(offset, whence)\n    else:\n      self._fsntfs_file_entry.seek(offset, whence)", "docstring": "Seeks to an offset within the file-like object.\n\nArgs:\noffset (int): offset to seek to.\nwhence (Optional(int)): value that indicates whether offset is an absolute\nor relative position within the file.\n\nRaises:\nIOError: if the seek failed.\nOSError: if the seek failed.", "source": "juraj-google-style"}
{"code": "def GetProperties(cls, path_spec):\n    \n    properties = {}\n\n    for property_name in cls.PROPERTY_NAMES:\n      \n      if hasattr(path_spec, property_name):\n        properties[property_name] = getattr(path_spec, property_name)\n\n    return properties", "docstring": "Retrieves a dictionary containing the path specification properties.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\ndict[str, str]: path specification properties.\n\nRaises:\ndict: path specification properties.", "source": "juraj-google-style"}
{"code": "def UsesArtifact(self, artifacts):\n    if isinstance(artifacts, string_types):\n        return (artifacts in self.artifacts)\n    else:\n        return any((True for artifact in artifacts if (artifact in self.artifacts)))", "docstring": "Determines if the check uses the specified artifact.\n\nArgs:\nartifacts: Either a single artifact name, or a list of artifact names\n\nReturns:\nTrue if the check uses a specific artifact.", "source": "codesearchnet"}
{"code": "def create(self, project_id=None):\n    \n    if not self.exists():\n      if project_id is None:\n        project_id = self._api.project_id\n      try:\n        self._info = self._api.buckets_insert(self._name, project_id=project_id)\n      except Exception as e:\n        raise e\n    return self", "docstring": "Creates the bucket.\n\nArgs:\nproject_id: the project in which to create the bucket.\nReturns:\nThe bucket.\nRaises:\nException if there was an error creating the bucket.", "source": "juraj-google-style"}
{"code": "def set_attribute(self, key, value):\n    if ((not isinstance(key, str)) or (not isinstance(value, str))):\n        raise ValueError(\"The arguments 'key' and 'value' must both be strings. Instead they are {} and {}.\".format(key, value))\n    self.extra_data[key] = value", "docstring": "Add a key-value pair to the extra_data dict.\n\nThis can be used to add attributes that are not available when\nray.profile was called.\n\nArgs:\nkey: The attribute name.\nvalue: The attribute value.", "source": "codesearchnet"}
{"code": "def sum(x, axis=None, keepdims=False):\n    if any_symbolic_tensors((x,)):\n        return Sum(axis=axis, keepdims=keepdims).symbolic_call(x)\n    return backend.numpy.sum(x, axis=axis, keepdims=keepdims)", "docstring": "Sum of a tensor over the given axes.\n\nArgs:\nx: Input tensor.\naxis: Axis or axes along which the sum is computed. The default is to\ncompute the sum of the flattened tensor.\nkeepdims: If this is set to `True`, the axes which are reduced are left\nin the result as dimensions with size one.\n\nReturns:\nOutput tensor containing the sum.", "source": "github-repos"}
{"code": "def make_tex_table(inputlist, outputfile, close=False, fmt=None,\n                   **kwargs):\n    \n    output_str = \"\"\n    if fmt is None:\n        fmt = {}\n    for row in inputlist:\n        for key, val in enumerate(row):\n            if val is None:\n                output_str += r'\\text{{{}}}'.format(\n                    str(kwargs.get(\"nonestring\", \"None\"))\n                )\n            else:\n                \n                if np.isscalar(val):\n                    temp_str_fmt = \"$\\\\num{{\" + fmt.get(\n                        key, \"{:g}\") + \"}}$\"\n                else:\n                    temp_str_fmt = fmt.get(key, \"{}\")\n                temp_str = temp_str_fmt.format(val).replace(\"+\", \"\")\n            output_str += temp_str + \"&\"\n        output_str = output_str[:-1]\n        output_str += \"\\\\\\\\\\n\"\n    outputfile.write(output_str)\n    if close:\n        outputfile.close()", "docstring": "Parse table from inputlist\n\nArgs:\ninputlist: list\nList to parse\noutputfile: file\n.tex file to write\nfmt: dictionary\nkey: integer\ncolumn index starting with 0\nvalues: string\nformat string. eg \"{:g}\"\n**kwargs:\nnonestring: string\nstring when objecttype is None\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def generate_workflow_description(self):\n    if (not self.tasks):\n        raise WorkflowError('Workflow contains no tasks, and cannot be executed.')\n    self.definition = self.workflow_skeleton()\n    if self.batch_values:\n        self.definition['batch_values'] = self.batch_values\n    all_input_port_values = [t.inputs.__getattribute__(input_port_name).value for t in self.tasks for input_port_name in t.inputs._portnames]\n    for task in self.tasks:\n        output_multiplex_ports_to_exclude = []\n        multiplex_output_port_names = [portname for portname in task.outputs._portnames if task.outputs.__getattribute__(portname).is_multiplex]\n        for p in multiplex_output_port_names:\n            output_port_reference = ((('source:' + task.name) + ':') + p)\n            if (output_port_reference not in all_input_port_values):\n                output_multiplex_ports_to_exclude.append(p)\n        task_def = task.generate_task_workflow_json(output_multiplex_ports_to_exclude=output_multiplex_ports_to_exclude)\n        self.definition['tasks'].append(task_def)\n    if self.callback:\n        self.definition['callback'] = self.callback\n    return self.definition", "docstring": "Generate workflow json for launching the workflow against the gbdx api\n\nArgs:\nNone\n\nReturns:\njson string", "source": "codesearchnet"}
{"code": "def id(self, value):\n        \n        if value == self._defaults['ai.device.id'] and 'ai.device.id' in self._values:\n            del self._values['ai.device.id']\n        else:\n            self._values['ai.device.id'] = value", "docstring": "The id property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def argsort(*args, **kwargs):\n    \n    if len(args) == 1 and isinstance(args[0], dict):\n        dict_ = args[0]\n        index_list = list(dict_.keys())\n        value_list = list(dict_.values())\n        return sortedby2(index_list, value_list)\n    else:\n        index_list = list(range(len(args[0])))\n        return sortedby2(index_list, *args, **kwargs)", "docstring": "like np.argsort but for lists\n\nArgs:\n*args: multiple lists to sort by\n**kwargs:\nreverse (bool): sort order is descending if True else acscending\n\nCommandLine:\npython -m utool.util_list argsort\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_list import *  # NOQA\n>>> result = ut.argsort({'a': 3, 'b': 2, 'c': 100})\n>>> print(result)", "source": "juraj-google-style"}
{"code": "def profile_update_args_v2(self, profile):\n    ij = self.load_install_json(profile.get('install_json', 'install.json'))\n    if ((profile.get('args', {}).get('app') is None) and (profile.get('args', {}).get('default') is None)):\n        _args = profile.pop('args')\n        profile['args'] = {}\n        profile['args']['app'] = {}\n        profile['args']['default'] = {}\n        for arg in self.profile_settings_args_install_json(ij, None):\n            try:\n                profile['args']['app'][arg] = _args.pop(arg)\n            except KeyError:\n                if self.args.verbose:\n                    print('{}{}Input \"{}\" not found in profile \"{}\".'.format(c.Style.BRIGHT, c.Fore.YELLOW, arg, profile.get('profile_name')))\n        profile['args']['default'] = _args\n        print('{}{}Updating args section to v2 schema for profile {}.'.format(c.Style.BRIGHT, c.Fore.YELLOW, profile.get('profile_name')))", "docstring": "Update v1 profile args to v2 schema for args.\n\n.. code-block:: javascript\n\n\"args\": {\n\"app\": {\n\"input_strings\": \"capitalize\",\n\"tc_action\": \"Capitalize\"\n}\n},\n\"default\": {\n\"api_access_id\": \"$env.API_ACCESS_ID\",\n\"api_default_org\": \"$env.API_DEFAULT_ORG\",\n},\n\nArgs:\nprofile (dict): The dictionary containting the profile settings.", "source": "codesearchnet"}
{"code": "def _validate_instantiation_options(self, datafile, skip_json_validation):\n    \n\n    if not skip_json_validation and not validator.is_datafile_valid(datafile):\n      raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('datafile'))\n\n    if not validator.is_event_dispatcher_valid(self.event_dispatcher):\n      raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('event_dispatcher'))\n\n    if not validator.is_logger_valid(self.logger):\n      raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('logger'))\n\n    if not validator.is_error_handler_valid(self.error_handler):\n      raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('error_handler'))", "docstring": "Helper method to validate all instantiation parameters.\n\nArgs:\ndatafile: JSON string representing the project.\nskip_json_validation: Boolean representing whether JSON schema validation needs to be skipped or not.\n\nRaises:\nException if provided instantiation options are valid.", "source": "juraj-google-style"}
{"code": "def get_task_ops(task_type=TaskType.ALG_CTRL):\n    \n    try:\n      return LearnToExecuteState.TASK_TYPE_OPS[task_type]\n    except KeyError:\n      raise KeyError(\"Bad task_type '%s', check config.\" % task_type)", "docstring": "Returns an operations list based on the specified task index.\n\nArgs:\ntask_type: indicates the task type used.\n\nReturns:\nList of the eligible ops.", "source": "juraj-google-style"}
{"code": "def _get_object_from_python_path(python_path):\n    python_path = python_path.split('.')\n    module_path = python_path[:(- 1)]\n    object_class = python_path[(- 1)]\n    if isinstance(module_path, list):\n        module_path = '.'.join(module_path)\n    module = import_module(module_path)\n    schema = getattr(module, object_class)\n    if isclass(schema):\n        schema = schema()\n    return schema", "docstring": "Method that will fetch a Marshmallow schema from a path to it.\n\nArgs:\npython_path (str): The string path to the Marshmallow schema.\n\nReturns:\nmarshmallow.Schema: The schema matching the provided path.\n\nRaises:\nTypeError: This is raised if the specified object isn't\na Marshmallow schema.", "source": "codesearchnet"}
{"code": "def __init__(self, location):\n        \n        super(ContextFieldExistence, self).__init__(location)\n        self.location = location\n        self.validate()", "docstring": "Construct a new ContextFieldExistence object for a vertex field from the global context.\n\nArgs:\nlocation: Location, specifying where the field was declared. Must point to a vertex.\n\nReturns:\nnew ContextFieldExistence expression which evaluates to True iff the vertex exists", "source": "juraj-google-style"}
{"code": "def to_dict(self) -> Dict[str, Any]:\n    output = copy.deepcopy(self.__dict__)\n    return output", "docstring": "Serializes this instance to a Python dictionary.\n\nReturns:\nDict[str, Any]: Dictionary of all the attributes that make up this configuration instance.", "source": "github-repos"}
{"code": "def __init__(self, server_port, stream_handler_class):\n    self._server_port = server_port\n    self._stream_handler_class = stream_handler_class\n    self._server_lock = threading.Lock()\n    self._server_started = False\n    self._stop_requested = False\n    self._debug_ops_state_change_queue = queue.Queue()\n    self._gated_grpc_debug_watches = set()\n    self._breakpoints = set()", "docstring": "Constructor.\n\nArgs:\nserver_port: (int) Port number to bind to.\nstream_handler_class: A class of the base class\n`EventListenerBaseStreamHandler` that will be used to constructor\nstream handler objects during `SendEvents` calls.", "source": "github-repos"}
{"code": "def put_many(self, items: Iterable[T], context: PipelineContext = None) -> None:\n        \n        LOGGER.info(\"Creating transform generator for items \\\"{items}\\\" for sink \\\"{sink}\\\"\".format(items=items, sink=self._sink))\n        transform_generator = (self._transform(data=item, context=context) for item in items)\n        LOGGER.info(\"Putting transform generator for items \\\"{items}\\\" into sink \\\"{sink}\\\"\".format(items=items, sink=self._sink))\n        self._sink.put_many(self._store_type, transform_generator, context)", "docstring": "Puts multiple objects of the same type into the data sink. The objects may be transformed into a new type for insertion if necessary.\n\nArgs:\nitems: An iterable (e.g. list) of objects to be inserted into the data sink.\ncontext: The context of the insertions (mutable).", "source": "juraj-google-style"}
{"code": "def epoch_to_log_line_timestamp(epoch_time, time_zone=None):\n    s, ms = divmod(epoch_time, 1000)\n    d = datetime.datetime.fromtimestamp(s, tz=time_zone)\n    return d.strftime('%m-%d %H:%M:%S.') + str(ms)", "docstring": "Converts an epoch timestamp in ms to log line timestamp format, which\nis readible for humans.\n\nArgs:\nepoch_time: integer, an epoch timestamp in ms.\ntime_zone: instance of tzinfo, time zone information.\nUsing pytz rather than python 3.2 time_zone implementation for\npython 2 compatibility reasons.\n\nReturns:\nA string that is the corresponding timestamp in log line timestamp\nformat.", "source": "github-repos"}
{"code": "def upload(s3_conn, filepath, s3_path):\n    (bucket_name, prefix) = split_s3_path(s3_path)\n    bucket = s3_conn.get_bucket(bucket_name)\n    filename = os.path.basename(filepath)\n    key = boto.s3.key.Key(bucket=bucket, name='{}/{}'.format(prefix, filename))\n    logging.info('uploading from %s to %s', filepath, key)\n    key.set_contents_from_filename(filepath)", "docstring": "Uploads the given file to s3\n\nArgs:\ns3_conn: (boto.s3.connection) an s3 connection\nfilepath (str) the local filename\ns3_path (str) the destination path on s3", "source": "codesearchnet"}
{"code": "def fileSave(self, filePath=None, updatePath=False):\n    if (not filePath):\n        filePath = self.filePath\n    if (not os.path.isfile(filePath)):\n        print((\"Data file '%s' does not exist, will create new file.\" % filePath))\n        if (not os.path.exists(os.path.split(filePath)[0])):\n            os.makedirs(os.path.split(filePath)[0])\n    dataJsonString = json.dumps(self.data, indent=4, sort_keys=True)\n    print((\"Writing to file '%s' ... \" % filePath), end='', flush=True)\n    with open(filePath, 'w') as fileout:\n        fileout.write(dataJsonString)\n    print('Wrote file!')\n    if updatePath:\n        self.filePath = filePath", "docstring": "Write the internal JSON data dictionary to a JSON data file.\n\nIf no file path is provided, the stored data file path will be used.\n\nArgs:\nfilePath (Optional[str]): A relative or absolute path to a\n'.json' file. Defaults to None.\nupdatePath (Optional[bool]): Specifies whether or not to update\nthe stored data file path. Defaults to False.", "source": "codesearchnet"}
{"code": "def get_substring_idxs(substr, string):\n    return [match.start() for match in re.finditer(substr, string)]", "docstring": "Return a list of indexes of substr. If substr not found, list is\nempty.\n\nArguments:\nsubstr (str): Substring to match.\nstring (str): String to match in.\n\nReturns:\nlist of int: Start indices of substr.", "source": "codesearchnet"}
{"code": "def remove_padding(sequence):\n  \n  length = sequence.pop('length')\n  sequence = tools.nested.map(lambda tensor: tensor[:length], sequence)\n  return sequence", "docstring": "Selects the used frames of a sequence, up to its length.\n\nThis function does not expect a batch of sequences, but a single sequence.\nThe sequence must be a dict with `length` key, which will removed from the\nresult.\n\nArgs:\nsequence: Nested dict of tensors with time dimension.\n\nReturns:\nNested dict of tensors with padding elements and `length` key removed.", "source": "juraj-google-style"}
{"code": "def pprnt(input, return_data=False):\n    HEADER = '\\x1b[95m'\n    OKBLUE = '\\x1b[94m'\n    OKGREEN = '\\x1b[32m'\n    WARNING = '\\x1b[93m'\n    FAIL = '\\x1b[91m'\n    ENDC = '\\x1b[0m'\n    BOLD = '\\x1b[1m'\n    UNDERLINE = '\\x1b[4m'\n    import json, re\n    result = json.dumps(input, sort_keys=True, indent=4)\n    result = re.sub('(\")(\\\\w*?_id)(\":)', ('\\\\1%s%s\\\\2%s\\\\3' % (BOLD, HEADER, ENDC)), result)\n    result = re.sub('(\")(\\\\w*?_set)(\":)', ('\\\\1%s%s\\\\2%s\\\\3' % (BOLD, HEADER, ENDC)), result)\n    result = re.sub('(\\\\n *?\")(\\\\w*?)(\":)', ('\\\\1%s%s\\\\2%s\\\\3' % (BOLD, OKGREEN, ENDC)), result)\n    if (not return_data):\n        print(result)\n    else:\n        return result", "docstring": "Prettier print for nested data\n\nArgs:\ninput: Input data\nreturn_data (bool): Default False. Print outs if False, returns if True.\nReturns:\nNone | Pretty formatted text representation of input data.", "source": "codesearchnet"}
{"code": "def _get_args_and_defaults(args, defaults):\n    \n    defaults = defaults or []\n    args_and_defaults = [(argument, default) for (argument, default)\n                         in zip_longest(args[::-1], defaults[::-1],\n                                        fillvalue=NoDefault)]\n    return args_and_defaults[::-1]", "docstring": "Return a list of 2-tuples - the argument name and its default value or\na special value that indicates there is no default value.\n\nArgs:\nargs: list of argument name\ndefaults: tuple of default values", "source": "juraj-google-style"}
{"code": "def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:\n    if not os.path.isdir(save_directory):\n        logger.error(f'Vocabulary path ({save_directory}) should be a directory')\n        return\n    artists_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'])\n    with open(artists_file, 'w', encoding='utf-8') as f:\n        f.write(json.dumps(self.artists_encoder, ensure_ascii=False))\n    genres_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'])\n    with open(genres_file, 'w', encoding='utf-8') as f:\n        f.write(json.dumps(self.genres_encoder, ensure_ascii=False))\n    lyrics_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'])\n    with open(lyrics_file, 'w', encoding='utf-8') as f:\n        f.write(json.dumps(self.lyrics_encoder, ensure_ascii=False))\n    return (artists_file, genres_file, lyrics_file)", "docstring": "Saves the tokenizer's vocabulary dictionary to the provided save_directory.\n\nArgs:\nsave_directory (`str`):\nA path to the directory where to saved. It will be created if it doesn't exist.\n\nfilename_prefix (`Optional[str]`, *optional*):\nA prefix to add to the names of the files saved by the tokenizer.", "source": "github-repos"}
{"code": "def _convert_tflite_enum_type_to_tf_type(tflite_enum_type):\n    tf_type = _MAP_TFLITE_ENUM_TO_TF_TYPES.get(tflite_enum_type)\n    if tf_type is None:\n        raise ValueError('Unsupported enum {}. The valid map of enum to tf types is : {}'.format(tflite_enum_type, _MAP_TFLITE_ENUM_TO_TF_TYPES))\n    return tf_type", "docstring": "Converts tflite enum type (eg: 0) to tf type (eg: tf.float32).\n\nArgs:\ntflite_enum_type: tflite enum type (eg: 0, that corresponds to float32)\n\nRaises:\nValueError: If an invalid tflite enum type is provided.\n\nReturns:\ntf type (eg: tf.float32)", "source": "github-repos"}
{"code": "def _get_int_removals_helper(self, spec_amts_oxi, oxid_el, oxid_els, numa):\n        \n\n        \n        \n        oxid_old = min([spec.oxi_state for spec in spec_amts_oxi if spec.symbol == oxid_el.symbol])\n        oxid_new = math.floor(oxid_old + 1)\n        \n        if oxid_new > oxid_el.max_oxidation_state:\n            return numa\n\n        \n        spec_old = Specie(oxid_el.symbol, oxid_old)\n        spec_new = Specie(oxid_el.symbol, oxid_new)\n        specamt = spec_amts_oxi[spec_old]\n        spec_amts_oxi = {sp: amt for sp, amt in spec_amts_oxi.items() if sp != spec_old}\n        spec_amts_oxi[spec_new] = specamt\n        spec_amts_oxi = Composition(spec_amts_oxi)\n\n        \n        oxi_noA = sum([spec.oxi_state * spec_amts_oxi[spec] for spec in spec_amts_oxi if\n                       spec.symbol not in self.cation.symbol])\n        a = max(0, -oxi_noA / self.cation_charge)\n        numa = numa.union({a})\n\n        \n        if a == 0:\n            return numa\n        else:\n            for oxid_el in oxid_els:\n                numa = numa.union(\n                    self._get_int_removals_helper(spec_amts_oxi.copy(), oxid_el, oxid_els, numa))\n            return numa", "docstring": "This is a helper method for get_removals_int_oxid!\n\nArgs:\nspec_amts_oxi - a dict of species to their amounts in the structure\noxid_el - the element to oxidize\noxid_els - the full list of elements that might be oxidized\nnuma - a running set of numbers of A cation at integer oxidation steps\nReturns:\na set of numbers A; steps for for oxidizing oxid_el first, then the other oxid_els in this list", "source": "juraj-google-style"}
{"code": "def __init__(self, instruments, scripts = None, name=None, settings=None, log_function=None, data_path = None):\n        \n\n        Script.__init__(self, name, settings=settings, scripts=scripts, instruments=instruments, log_function=log_function, data_path = data_path)\n        self.data = {'plant_output': deque(maxlen=self.settings['buffer_length']),\n                     'control_output': deque(maxlen=self.settings['buffer_length'])}", "docstring": "Example of a script that emits a QT signal for the gui\nArgs:\nname (optional): name of script, if empty same as class name\nsettings (optional): settings for this script, if empty same as default settings", "source": "juraj-google-style"}
{"code": "def do_labels_update(self, info, labels):\n    if self.update_label_func:\n        self.update_label_func(self.label_name, info, labels)", "docstring": "Updates a dictionary of labels using the assigned update_op_func\n\nArgs:\ninfo (:class:`endpoints_management.control.report_request.Info`): the\ninfo instance to update\nlabels (dict[string[string]]): the labels dictionary\n\nReturn:\n`True` if desc is supported, otherwise `False`", "source": "codesearchnet"}
{"code": "def convert_tanh(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting tanh ...')\n    if (names == 'short'):\n        tf_name = ('TANH' + random_string(4))\n    elif (names == 'keep'):\n        tf_name = w_name\n    else:\n        tf_name = (w_name + str(random.random()))\n    tanh = keras.layers.Activation('tanh', name=tf_name)\n    layers[scope_name] = tanh(layers[inputs[0]])", "docstring": "Convert tanh layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def query(self, watch_key, time_indices=None, slicing=None, mapping=None):\n    if (watch_key not in self._tensor_data):\n        raise KeyError(('watch_key not found: %s' % watch_key))\n    if (time_indices is None):\n        time_indices = '-1'\n    time_slicing = tensor_helper.parse_time_indices(time_indices)\n    all_time_indices = list(range(self._tensor_data[watch_key].num_total()))\n    sliced_time_indices = all_time_indices[time_slicing]\n    if (not isinstance(sliced_time_indices, list)):\n        sliced_time_indices = [sliced_time_indices]\n    recombine_and_map = False\n    step_mapping = mapping\n    if ((len(sliced_time_indices) > 1) and (mapping not in (None,))):\n        recombine_and_map = True\n        step_mapping = None\n    output = []\n    for index in sliced_time_indices:\n        value = self._tensor_data[watch_key].query(index)[0]\n        if ((value is not None) and (not isinstance(value, debug_data.InconvertibleTensorProto))):\n            output.append(tensor_helper.array_view(value, slicing=slicing, mapping=step_mapping)[2])\n        else:\n            output.append(None)\n    if recombine_and_map:\n        if (mapping == 'image/png'):\n            output = tensor_helper.array_to_base64_png(output)\n        elif (mapping and (mapping != 'none')):\n            logger.warn('Unsupported mapping mode after recomining time steps: %s', mapping)\n    return output", "docstring": "Query tensor store for a given watch_key.\n\nArgs:\nwatch_key: The watch key to query.\ntime_indices: A numpy-style slicing string for time indices. E.g.,\n`-1`, `:-2`, `[::2]`. If not provided (`None`), will use -1.\nslicing: A numpy-style slicing string for individual time steps.\nmapping: An mapping string or a list of them. Supported mappings:\n`{None, 'image/png', 'health-pill'}`.\n\nReturns:\nThe potentially sliced values as a nested list of values or its mapped\nformat. A `list` of nested `list` of values.\n\nRaises:\nValueError: If the shape of the sliced array is incompatible with mapping\nmode. Or if the mapping type is invalid.", "source": "codesearchnet"}
{"code": "def _capture_by_ref(self, graph: Any, lam: Callable[[], Any], key: Hashable=None) -> Any:\n    if key is not None and key in self._by_ref_internal:\n        return self._by_ref_internal[key]\n    if key is None:\n        key = len(self._by_ref_internal)\n        while key in self._by_ref_internal:\n            key += 1\n    value_nested = lam()\n    capture_trace_type = trace_type.from_value(value_nested)\n    ctx = trace_type.InternalPlaceholderContext(graph)\n    internal = capture_trace_type.placeholder_value(ctx)\n\n    def lam_fn():\n        value = lam()\n        return capture_trace_type.to_tensors(value)\n    self._by_ref_external[key] = lam_fn\n    self._by_ref_internal[key] = internal\n    self._by_ref_tracetype[key] = capture_trace_type\n    return self._by_ref_internal[key]", "docstring": "Used during tracing process to create/retrive by-ref captures.\n\nArgs:\ngraph: The FuncGraph that captures this tensor.\nlam: A callable that takes no arguments and returns tensor captures.\nkey: A hashable identifier.\n\nReturns:\nTensor from this FuncGraph.", "source": "github-repos"}
{"code": "def materialize(self, ref, table_name=None, index_columns=None, logger=None):\n        \n        from ambry.library import Library\n        assert isinstance(self._library, Library)\n\n        logger.debug('Materializing warehouse partition.\\n    partition: {}'.format(ref))\n        partition = self._library.partition(ref)\n\n        connection = self._backend._get_connection()\n\n        return self._backend.install(connection, partition, table_name=table_name,\n                                     index_columns=index_columns, materialize=True, logger=logger)", "docstring": "Creates materialized table for given partition reference.\n\nArgs:\nref (str): id, vid, name or vname of the partition.\n\nReturns:\nstr: name of the partition table in the database.", "source": "juraj-google-style"}
{"code": "def get_proj(prj_code):\n    \n    if prj_code in CUSTOM_PRJ:\n        proj = pyproj.Proj(CUSTOM_PRJ[prj_code])\n    else:\n        proj = pyproj.Proj(init=prj_code)\n    return proj", "docstring": "Helper method for handling projection codes that are unknown to pyproj\n\nArgs:\nprj_code (str): an epsg proj code\n\nReturns:\nprojection: a pyproj projection", "source": "juraj-google-style"}
{"code": "def requires_submit(func):\n    \n    @functools.wraps(func)\n    def _wrapper(self, *args, **kwargs):\n        if self._future is None:\n            raise JobError(\"Job not submitted yet!. You have to .submit() first!\")\n        return func(self, *args, **kwargs)\n    return _wrapper", "docstring": "Decorator to ensure that a submit has been performed before\ncalling the method.\n\nArgs:\nfunc (callable): test function to be decorated.\n\nReturns:\ncallable: the decorated function.", "source": "juraj-google-style"}
{"code": "def get(self, addresses):\n        \n\n        with self._lock:\n            results = []\n            for add in addresses:\n                self.validate_read(add)\n                results.append(self._get(add))\n            return results", "docstring": "Returns the value in this context, or None, for each address in\naddresses. Useful for gets on the context manager.\n\nArgs:\naddresses (list of str): The addresses to return values for, if\nwithin this context.\n\nReturns:\nresults (list of bytes): The values in state for these addresses.", "source": "juraj-google-style"}
{"code": "def get_template_name(env, pipeline_type):\n    pipeline_base = 'pipeline/pipeline'\n    template_name_format = '{pipeline_base}'\n    if env.startswith('prod'):\n        template_name_format = (template_name_format + '_{env}')\n    else:\n        template_name_format = (template_name_format + '_stages')\n    if (pipeline_type != 'ec2'):\n        template_name_format = (template_name_format + '_{pipeline_type}')\n    template_name_format = (template_name_format + '.json.j2')\n    template_name = template_name_format.format(pipeline_base=pipeline_base, env=env, pipeline_type=pipeline_type)\n    return template_name", "docstring": "Generates the correct template name based on pipeline type\n\nArgs:\nenv (str): environment to generate templates for\npipeline_type (str): Type of pipeline like ec2 or lambda\n\nReturns:\nstr: Name of template", "source": "codesearchnet"}
{"code": "def inception_v3_arg_scope(weight_decay=0.00004,\n                           stddev=0.1,\n                           batch_norm_var_collection='moving_vars'):\n  \n  batch_norm_params = {\n      \n      'decay': 0.9997,\n      \n      'epsilon': 0.001,\n      \n      'updates_collections': tf.GraphKeys.UPDATE_OPS,\n      \n      'variables_collections': {\n          'beta': None,\n          'gamma': None,\n          'moving_mean': [batch_norm_var_collection],\n          'moving_variance': [batch_norm_var_collection],\n      }\n  }\n\n  \n  with slim.arg_scope([slim.conv2d, slim.fully_connected],\n                      weights_regularizer=slim.l2_regularizer(weight_decay)):\n    with slim.arg_scope([slim.conv2d],\n                        weights_initializer=tf.truncated_normal_initializer(stddev=stddev),\n                        activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm,\n                        normalizer_params=batch_norm_params) as sc:\n      return sc", "docstring": "Defines the default InceptionV3 arg scope.\n\nArgs:\nweight_decay: The weight decay to use for regularizing the model.\nstddev: The standard deviation of the trunctated normal weight initializer.\nbatch_norm_var_collection: The name of the collection for the batch norm\nvariables.\n\nReturns:\nAn `arg_scope` to use for the inception v3 model.", "source": "juraj-google-style"}
{"code": "def remove_user(username):\n    users = passwd_reader.load_users()\n    assert (username in users), (\"Username '%s' not found!\" % username)\n    del users[username]\n    passwd_reader.save_users(users)\n    home_dir = (settings.DATA_PATH + username)\n    if os.path.exists(home_dir):\n        shutil.rmtree(home_dir)\n    reload_configuration()", "docstring": "Remove user, his home directory and so on..\n\nArgs:\nusername (str): User's name.", "source": "codesearchnet"}
{"code": "def upload_files(self, file_list):\n        \n        counter = 0\n        files_to_upload = list(set(file_list) - set(self.uploaded_files)) \n        try:\n            for f in files_to_upload:\n                with open(config.get_storage_path(f), 'rb') as file_obj:\n                    response = config.SESSION.post(config.file_upload_url(), files={'file': file_obj})\n                    if response.status_code == 200:\n                        response.raise_for_status()\n                        self.uploaded_files.append(f)\n                        counter += 1\n                        config.LOGGER.info(\"\\tUploaded {0} ({count}/{total}) \".format(f, count=counter, total=len(files_to_upload)))\n                    else:\n                        self.failed_uploads[f] = response._content.decode('utf-8')\n        finally:\n            config.PROGRESS_MANAGER.set_uploading(self.uploaded_files)", "docstring": "upload_files: uploads files to server\nArgs:\nfile_list (str): list of files to upload\nReturns: None", "source": "juraj-google-style"}
{"code": "def shape_type_conversion(fn):\n\n    def wrapper(instance, input_shape):\n        if input_shape is not None:\n            input_shape = convert_shapes(input_shape, to_tuples=True)\n        output_shape = fn(instance, input_shape)\n        if output_shape is not None:\n            output_shape = convert_shapes(output_shape, to_tuples=False)\n        return output_shape\n    return wrapper", "docstring": "Decorator that handles tuple/TensorShape conversion.\n\nUsed in `compute_output_shape` and `build`.\n\nArgs:\nfn: function to wrap.\n\nReturns:\nWrapped function.", "source": "github-repos"}
{"code": "def __init__(self, device=''):\n    self._resource_handle_value = None\n    self._resource_device = device\n    self._self_destruction_context = context.eager_mode if context.executing_eagerly() else ops.get_default_graph().as_default", "docstring": "Initialize the `CapturableResource`.\n\nArgs:\ndevice: A string indicating a required placement for this resource,\ne.g. \"CPU\" if this resource must be created on a CPU device. A blank\ndevice allows the user to place resource creation, so generally this\nshould be blank unless the resource only makes sense on one device.", "source": "github-repos"}
{"code": "class PerceiverTextPreprocessor(AbstractPreprocessor):\n\n    def __init__(self, config: PerceiverConfig) -> None:\n        super().__init__()\n        self.config = config\n        self.embeddings = nn.Embedding(num_embeddings=config.vocab_size, embedding_dim=config.d_model)\n        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model)\n\n    @property\n    def num_channels(self) -> int:\n        return self.config.d_model\n\n    def forward(self, inputs: torch.LongTensor, pos: Optional[torch.Tensor]=None, network_input_is_1d: bool=True, interpolate_pos_encoding: bool=False):\n        embeddings_without_pos = self.embeddings(inputs)\n        seq_length = inputs.shape[1]\n        position_ids = torch.arange(0, seq_length, device=inputs.device)\n        embeddings = embeddings_without_pos + self.position_embeddings(position_ids)\n        return (embeddings, None, embeddings_without_pos)", "docstring": "Text preprocessing for Perceiver Encoder. Can be used to embed `inputs` and add positional encodings.\n\nThe dimensionality of the embeddings is determined by the `d_model` attribute of the configuration.\n\nArgs:\nconfig ([`PerceiverConfig`]):\nModel configuration.", "source": "github-repos"}
{"code": "def map_fn(*columns):\n    features = collections.OrderedDict(zip(column_names, columns))\n    if label_name is not None:\n        label = features.pop(label_name)\n        return (features, label)\n    return features", "docstring": "Organizes columns into a features dictionary.\n\nArgs:\n*columns: list of `Tensor`s corresponding to one csv record.\nReturns:\nAn OrderedDict of feature names to values for that particular record. If\nlabel_name is provided, extracts the label feature to be returned as the\nsecond element of the tuple.", "source": "github-repos"}
{"code": "def LoadSecondaryConfig(self, filename=None, parser=None):\n    if filename:\n        self.files.append(filename)\n        parser_cls = self.GetParserFromFilename(filename)\n        parser = parser_cls(filename=filename)\n        logging.debug('Loading configuration from %s', filename)\n        self.secondary_config_parsers.append(parser)\n    elif (parser is None):\n        raise ValueError('Must provide either a filename or a parser.')\n    clone = self.MakeNewConfig()\n    clone.MergeData(parser.RawData())\n    clone.initialized = True\n    for file_to_load in clone['Config.includes']:\n        if (not os.path.isabs(file_to_load)):\n            if (not filename):\n                raise ConfigFileNotFound(('While loading %s: Unable to include a relative path (%s) from a config without a filename' % (filename, file_to_load)))\n            file_to_load = os.path.join(os.path.dirname(filename), file_to_load)\n        clone_parser = clone.LoadSecondaryConfig(file_to_load)\n        if (not clone_parser.parsed):\n            raise ConfigFileNotFound(('Unable to load include file %s' % file_to_load))\n    self.MergeData(clone.raw_data)\n    self.files.extend(clone.files)\n    return parser", "docstring": "Loads an additional configuration file.\n\nThe configuration system has the concept of a single Primary configuration\nfile, and multiple secondary files. The primary configuration file is the\nmain file that is used by the program. Any writebacks will only be made to\nthe primary configuration file. Secondary files contain additional\nconfiguration data which will be merged into the configuration system.\n\nThis method adds an additional configuration file.\n\nArgs:\nfilename: The configuration file that will be loaded. For example\nfile:///etc/grr.conf or reg://HKEY_LOCAL_MACHINE/Software/GRR.\nparser: An optional parser can be given. In this case, the parser's data\nwill be loaded directly.\n\nReturns:\nThe parser used to parse this configuration source.\n\nRaises:\nValueError: if both filename and parser arguments are None.\nConfigFileNotFound: If a specified included file was not found.", "source": "codesearchnet"}
{"code": "def set_callback_parameters(callback_list, model, do_validation=False, batch_size=None, epochs=None, steps_per_epoch=None, samples=None, verbose=1, mode=ModeKeys.TRAIN):\n    metric_names = model.metrics_names\n    for cbk in callback_list:\n        if isinstance(cbk, (BaseLogger, ProgbarLogger)):\n            cbk.stateful_metrics = metric_names[1:]\n    callback_metrics = []\n    if mode != ModeKeys.PREDICT:\n        callback_metrics = copy.copy(metric_names)\n        if do_validation:\n            callback_metrics += ['val_' + n for n in metric_names]\n    callback_params = {'batch_size': batch_size, 'epochs': epochs, 'steps': steps_per_epoch, 'samples': samples, 'verbose': verbose, 'do_validation': do_validation, 'metrics': callback_metrics}\n    callback_list.set_params(callback_params)", "docstring": "Sets callback parameters.\n\nArgs:\ncallback_list: CallbackList instance.\nmodel: Model being trained.\ndo_validation: Whether or not validation loop will be run.\nbatch_size: Number of samples per batch.\nepochs: Number of epoch to train.\nsteps_per_epoch: Number of batches to run per training epoch.\nsamples: Number of training samples.\nverbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.\nmode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.\nWhich loop mode to configure callbacks for.", "source": "github-repos"}
{"code": "def GetQueryResults(self, request, global_params=None):\n    config = self.GetMethodConfig('GetQueryResults')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Retrieves the results of a query job.\n\nArgs:\nrequest: (BigqueryJobsGetQueryResultsRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(GetQueryResultsResponse) The response message.", "source": "github-repos"}
{"code": "def matches(self, desc):\n        \n        return (self.metric_name == desc.name and\n                self.kind == desc.metricKind and\n                self.value_type == desc.valueType)", "docstring": "Determines if a given metric descriptor matches this enum instance\n\nArgs:\ndesc (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricDescriptor`): the\ninstance to test\n\nReturn:\n`True` if desc is supported, otherwise `False`", "source": "juraj-google-style"}
{"code": "def get_records(self, name):\n        \n        if name in self._cache:\n            return self._cache[name].values()\n        else:\n            return []", "docstring": "Return all the records for the given name in the cache.\n\nArgs:\nname (string): The name which the required models are stored under.\n\nReturns:\nlist: A list of :class:`cinder_data.model.CinderModel` models.", "source": "juraj-google-style"}
{"code": "def serialize(self, user=None):\n        \n        return {\n            'content': self.body,\n            'type': self.typ,\n            'updated_at': self.updated_at,\n            'timestamp': self.updated_at,\n            'is_update': not hasattr(self, 'unsaved'),\n            'attachments': [attachment.serialize() for attachment in self.attachment_set],\n            'title': self.msg_title,\n            'url': self.url,\n            'sender_name': self.sender.full_name,\n            'sender_key': self.sender.key,\n            'channel_key': self.channel.key,\n            'cmd': 'message',\n            'avatar_url': self.sender.avatar,\n            'key': self.key,\n        }", "docstring": "Serializes message for given user.\n\nNote:\nShould be called before first save(). Otherwise \"is_update\" will get wrong value.\n\nArgs:\nuser: User object\n\nReturns:\nDict. JSON serialization ready dictionary object", "source": "juraj-google-style"}
{"code": "def lookup_package(self, definition_name):\n    while True:\n        descriptor = self.lookup_descriptor(definition_name)\n        if isinstance(descriptor, FileDescriptor):\n            return descriptor.package\n        else:\n            index = definition_name.rfind('.')\n            if (index < 0):\n                return None\n            definition_name = definition_name[:index]", "docstring": "Determines the package name for any definition.\n\nDetermine the package that any definition name belongs to. May\ncheck parent for package name and will resolve missing\ndescriptors if provided descriptor loader.\n\nArgs:\ndefinition_name: Definition name to find package for.", "source": "codesearchnet"}
{"code": "def run_amylpred2(self, seq, outdir, run_amylmuts=False):\n    outdir_amylpred = op.join(outdir, 'AMYLPRED2_results')\n    if (not op.exists(outdir_amylpred)):\n        os.mkdir(outdir_amylpred)\n    url = 'http:\n    cj = CookieJar()\n    opener = build_opener(HTTPCookieProcessor(cj))\n    formdata = {'email': self.email, 'password': self.password}\n    data_encoded = urlencode(formdata)\n    data_encoded = data_encoded.encode('ASCII')\n    response = opener.open(url, data_encoded)\n    methods = ['AGGRESCAN', 'NETCSSP', 'PAFIG', 'APD', 'AMYLPATTERN', 'SECSTR', 'BSC', 'WALTZ', 'CONFENERGY', 'TANGO']\n    if run_amylmuts:\n        methods.append('AMYLMUTS')\n    output = {}\n    timeCounts = 0\n    for met in methods:\n        existing_results = glob.glob(op.join(outdir_amylpred, '*_{}.txt'.format(met)))\n        if existing_results:\n            results_file = existing_results[0]\n        else:\n            values = {'seq_data': seq, 'method': met}\n            data = urlencode(values)\n            data = data.encode('ASCII')\n            url_input = 'http:\n            response = opener.open(url_input, data)\n            result = str(response.read())\n            ind = str.find(result, 'Job ID')\n            result2 = result[ind:(ind + 50)]\n            ind1 = str.find(result2, ':')\n            ind2 = str.find(result2, '<BR>')\n            job_id = result2[(ind1 + 2):ind2]\n            url_result = (('http:\n            print(url_result)\n            print(('Waiting for %s results' % met), end='.')\n            while True:\n                result = urlopen(url_result).read()\n                if (not result):\n                    time.sleep(1)\n                    timeCounts += 1\n                    print('.', end='')\n                else:\n                    response = requests.get(url_result)\n                    break\n            results_file = op.join(outdir_amylpred, '{}_{}.txt'.format(url_result.split('/')[(- 1)].strip('.txt'), met))\n            with open(results_file, 'wb') as handle:\n                for data in response.iter_content():\n                    handle.write(data)\n            print('')\n        (method, hits) = self.parse_method_results(results_file, met)\n        output[met] = hits\n    if (timeCounts != 0):\n        print(('Time spent: %d seconds' % timeCounts))\n    return output", "docstring": "Run all methods on the AMYLPRED2 web server for an amino acid sequence and gather results.\n\nResult files are cached in ``/path/to/outdir/AMYLPRED2_results``.\n\nArgs:\nseq (str): Amino acid sequence as a string\noutdir (str): Directory to where output files should be saved\nrun_amylmuts (bool): If AMYLMUTS method should be run, default False\n\nReturns:\ndict: Result for each method run", "source": "codesearchnet"}
{"code": "def extract(self, destination):\n        \n\n        if os.path.exists(destination):\n            raise OSError(20, 'Destination exists', destination)\n\n        self.__extract_directory(\n            '.',\n            self.files['files'],\n            destination\n        )", "docstring": "Extracts the contents of the archive to the specifed directory.\n\nArgs:\ndestination (str):\nPath to an empty directory to extract the files to.", "source": "juraj-google-style"}
{"code": "def remove(self, uids: Iterable[int]) -> None:\n        \n        for uid in uids:\n            self._recent.discard(uid)\n            self._flags.pop(uid, None)", "docstring": "Remove any session flags for the given message.\n\nArgs:\nuids: The message UID values.", "source": "juraj-google-style"}
{"code": "def write_script(script, tempdir):\n    name = ('script' + self.suffix)\n    path = os.path.join(tempdir, name)\n    with open(path, 'w') as f:\n        f.write('\\n'.join(script))\n    return path", "docstring": "Write script to a temporary directory\n\nArguments:\nscript (list): Commands which to put into a file\n\nReturns:\nAbsolute path to script", "source": "codesearchnet"}
{"code": "def deserialize_pem(cert_pem):\n    \n    if isinstance(cert_pem, str):\n        cert_pem = cert_pem.encode(\"utf-8\")\n    return cryptography.x509.load_pem_x509_certificate(\n        data=cert_pem, backend=cryptography.hazmat.backends.default_backend()\n    )", "docstring": "Deserialize PEM (Base64) encoded X.509 v3 certificate.\n\nArgs:\ncert_pem: str or bytes\nPEM (Base64) encoded X.509 v3 certificate\n\nReturns:\ncert_obj: cryptography.Certificate", "source": "juraj-google-style"}
{"code": "def show(self, obj=None, browser=None, new='tab'):\n    if (obj and (obj not in self.document.roots)):\n        self.document.add_root(obj)\n    show_session(session=self, browser=browser, new=new)", "docstring": "Open a browser displaying this session.\n\nArgs:\nobj (LayoutDOM object, optional) : a Layout (Row/Column),\nPlot or Widget object to display. The object will be added\nto the session's document.\n\nbrowser (str, optional) : browser to show with (default: None)\nFor systems that support it, the **browser** argument allows\nspecifying which browser to display in, e.g. \"safari\", \"firefox\",\n\"opera\", \"windows-default\" (see the ``webbrowser`` module\ndocumentation in the standard lib for more details).\n\nnew (str, optional) : new file output mode (default: \"tab\")\nFor file-based output, opens or raises the browser window\nshowing the current output file.  If **new** is 'tab', then\nopens a new tab. If **new** is 'window', then opens a new window.", "source": "codesearchnet"}
{"code": "def _DropCommonSuffixes(filename):\n  \n  for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',\n                 'inl.h', 'impl.h', 'internal.h'):\n    if (filename.endswith(suffix) and len(filename) > len(suffix) and\n        filename[-len(suffix) - 1] in ('-', '_')):\n      return filename[:-len(suffix) - 1]\n  return os.path.splitext(filename)[0]", "docstring": "Drops common suffixes like _test.cc or -inl.h from filename.\n\nFor example:\n>>> _DropCommonSuffixes('foo/foo-inl.h')\n'foo/foo'\n>>> _DropCommonSuffixes('foo/bar/foo.cc')\n'foo/bar/foo'\n>>> _DropCommonSuffixes('foo/foo_internal.h')\n'foo/foo'\n>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')\n'foo/foo_unusualinternal'\n\nArgs:\nfilename: The input filename.\n\nReturns:\nThe filename with the common suffix removed.", "source": "juraj-google-style"}
{"code": "def unregisterObserver(self, observer):\n        \n        if observer in self.m_observers:\n            self.m_observers.remove(observer)\n        pass", "docstring": "Remove an observer from the meter update() chain.\n\nArgs:\nobserver (MeterObserver): Subclassed MeterObserver.", "source": "juraj-google-style"}
{"code": "def _CheckType(value, check_type, name, allow_none=True):\n  \n  if value is None and allow_none:\n    return\n  if not isinstance(value, check_type):\n    raise TypeError('%s type doesn\\'t match %s.' % (name, check_type))", "docstring": "Check that the type of an object is acceptable.\n\nArgs:\nvalue: The object whose type is to be checked.\ncheck_type: The type that the object must be an instance of.\nname: Name of the object, to be placed in any error messages.\nallow_none: True if value can be None, false if not.\n\nRaises:\nTypeError: If value is not an acceptable type.", "source": "juraj-google-style"}
{"code": "def _ExtractOAuth2Client(product_yaml_key, product_data, proxy_config):\n  \n  oauth2_kwargs = {\n      'proxy_config': proxy_config\n  }\n\n  if all(config in product_data for config in _OAUTH2_INSTALLED_APP_KEYS):\n    oauth2_args = [\n        product_data['client_id'], product_data['client_secret'],\n        product_data['refresh_token']\n    ]\n    oauth2_client = googleads.oauth2.GoogleRefreshTokenClient\n    for key in _OAUTH2_INSTALLED_APP_KEYS:\n      del product_data[key]\n  elif all(config in product_data for config in _OAUTH2_SERVICE_ACCT_KEYS):\n    oauth2_args = [\n        product_data['path_to_private_key_file'],\n        googleads.oauth2.GetAPIScope(product_yaml_key),\n    ]\n    oauth2_kwargs.update({\n        'sub': product_data.get('delegated_account')\n    })\n    oauth2_client = googleads.oauth2.GoogleServiceAccountClient\n    for key in _OAUTH2_SERVICE_ACCT_KEYS:\n      del product_data[key]\n    for optional_key in _OAUTH2_SERVICE_ACCT_KEYS_OPTIONAL:\n      if optional_key in product_data:\n        del product_data[optional_key]\n  else:\n    raise googleads.errors.GoogleAdsValueError(\n        'Your yaml file is incorrectly configured for OAuth2. You need to '\n        'specify credentials for either the installed application flow (%s) '\n        'or service account flow (%s).' %\n        (_OAUTH2_INSTALLED_APP_KEYS, _OAUTH2_SERVICE_ACCT_KEYS))\n\n  return oauth2_client(*oauth2_args, **oauth2_kwargs)", "docstring": "Generates an GoogleOAuth2Client subclass using the given product_data.\n\nArgs:\nproduct_yaml_key: a string key identifying the product being configured.\nproduct_data: a dict containing the configurations for a given product.\nproxy_config: a ProxyConfig instance.\n\nReturns:\nAn instantiated GoogleOAuth2Client subclass.\n\nRaises:\nA GoogleAdsValueError if the OAuth2 configuration for the given product is\nmisconfigured.", "source": "juraj-google-style"}
{"code": "def x_www_form_urlencoded(post_data):\n    if isinstance(post_data, dict):\n        return '&'.join([u'{}={}'.format(key, value) for (key, value) in post_data.items()])\n    else:\n        return post_data", "docstring": "convert origin dict to x-www-form-urlencoded\n\nArgs:\npost_data (dict):\n{\"a\": 1, \"b\":2}\n\nReturns:\nstr:\na=1&b=2", "source": "codesearchnet"}
{"code": "def parse_napp(napp_id):\n    \n    \n    \n    \n    \n    \n    \n    \n    regex = r'([a-zA-Z][a-zA-Z0-9_]{2,})/([a-zA-Z][a-zA-Z0-9_]{2,}):?(.+)?'\n    compiled_regex = re.compile(regex)\n\n    matched = compiled_regex.fullmatch(napp_id)\n\n    if not matched:\n        msg = '\"{}\" NApp has not the form username/napp_name[:version].'\n        raise KytosException(msg.format(napp_id))\n\n    return matched.groups()", "docstring": "Convert a napp_id in tuple with username, napp name and version.\n\nArgs:\nnapp_id: String with the form 'username/napp[:version]' (version is\noptional). If no version is found, it will be None.\n\nReturns:\ntuple: A tuple with (username, napp, version)\n\nRaises:\nKytosException: If a NApp has not the form _username/name_.", "source": "juraj-google-style"}
{"code": "def merge_scores(self, df_addition, reference_markers='all', addition_markers='all', on=['project_name', 'sample_name', 'frame_name', 'cell_index']):\n    if isinstance(reference_markers, str):\n        reference_markers = self.scored_names\n    elif (reference_markers is None):\n        reference_markers = []\n    if isinstance(addition_markers, str):\n        addition_markers = df_addition.scored_names\n    elif (addition_markers is None):\n        addition_markers = []\n    df_addition = df_addition.copy()\n    df_addition['_key'] = 1\n    df = self.merge(df_addition[(['scored_calls', '_key'] + on)].rename(columns={'scored_calls': '_addition'}), on=on, how='left')\n    df['_sub1'] = df['scored_calls'].apply((lambda x: dict(((k, x[k]) for k in reference_markers))))\n    df['_sub2'] = df['_addition'].apply((lambda x: (dict({}) if (x != x) else dict(((k, x[k]) for k in addition_markers)))))\n    df['scored_calls'] = df.apply((lambda x: {**x['_sub1'], **x['_sub2']}), 1)\n    df = df.drop(columns=['_sub1', '_sub2', '_addition'])\n    df = (df.drop(columns='_key').copy(), df[df['_key'].isna()].drop(columns='_key').copy())\n    if self.microns_per_pixel:\n        df[0].microns_per_pixel = self.microns_per_pixel\n    if self.microns_per_pixel:\n        df[1].microns_per_pixel = self.microns_per_pixel\n    return df", "docstring": "Combine CellDataFrames that differ by score composition\n\nArgs:\ndf_addition (CellDataFrame): The CellDataFrame to merge scores in from\nreference_markers (list): which scored call names to keep in the this object (default: all)\naddition_markers (list): which scored call names to merge in (default: all)\non (list): the features to merge cells on\n\nReturns:\nCellDataFrame,CellDataFrame: returns a passing CellDataFrame where merge criteria were met and a fail CellDataFrame where merge criteria were not met.", "source": "codesearchnet"}
{"code": "def get_member(self, id='me', name=None):\n    return self.create_member(dict(id=id, fullName=name))", "docstring": "Get a member or your current member if `id` wasn't given.\n\nReturns:\nMember: The member with the given `id`, defaults to the\nlogged in member.", "source": "codesearchnet"}
{"code": "def process_streamer(self, streamer, callback=None):\n        \n\n        index = streamer.index\n\n        if index in self._in_progress_streamers:\n            raise InternalError(\"You cannot add a streamer again until it has finished streaming.\")\n\n        queue_item = QueuedStreamer(streamer, callback)\n        self._in_progress_streamers.add(index)\n\n        self._logger.debug(\"Streamer %d: queued to send %d readings\", index, queue_item.initial_count)\n        self._queue.put_nowait(queue_item)", "docstring": "Start streaming a streamer.\n\nArgs:\nstreamer (DataStreamer): The streamer itself.\ncallback (callable): An optional callable that will be called as:\ncallable(index, success, highest_id_received_from_other_side)", "source": "juraj-google-style"}
{"code": "def add_command(self, command):\n    if self._commands and command == self._commands[-1]:\n        return\n    if not isinstance(command, str):\n        raise TypeError('Attempt to enter non-str entry to command history')\n    self._commands.append(command)\n    if len(self._commands) > self._limit:\n        self._commands = self._commands[-self._limit:]\n    self._add_command_to_history_file(command)", "docstring": "Add a command to the command history.\n\nArgs:\ncommand: The history command, as a str.\n\nRaises:\nTypeError: if command is not a str.", "source": "github-repos"}
{"code": "def StartFlowAndWorker(client_id, flow_name, **kwargs):\n    queue = rdfvalue.RDFURN(('DEBUG-%s-' % getpass.getuser()))\n    if ('token' in kwargs):\n        token = kwargs.pop('token')\n    else:\n        token = access_control.ACLToken(username='GRRConsole')\n    session_id = flow.StartAFF4Flow(client_id=client_id, flow_name=flow_name, queue=queue, token=token, **kwargs)\n    worker_thrd = worker_lib.GRRWorker(queues=[queue], token=token, threadpool_size=1)\n    while True:\n        try:\n            worker_thrd.RunOnce()\n        except KeyboardInterrupt:\n            print('exiting')\n            worker_thrd.thread_pool.Join()\n            break\n        time.sleep(2)\n        with aff4.FACTORY.Open(session_id, token=token) as flow_obj:\n            if (not flow_obj.GetRunner().IsRunning()):\n                break\n    worker_thrd.thread_pool.Join()\n    return session_id", "docstring": "Launches the flow and worker and waits for it to finish.\n\nArgs:\nclient_id: The client common name we issue the request.\nflow_name: The name of the flow to launch.\n**kwargs: passthrough to flow.\n\nReturns:\nA flow session id.\n\nNote: you need raw access to run this flow as it requires running a worker.", "source": "codesearchnet"}
{"code": "def reset_from_seed(self, seed):\n    state = create_rng_state(seed, self.algorithm)\n    self._state_var.assign(state)", "docstring": "Resets the generator by a new seed.\n\nSee `from_seed` for the meaning of \"seed\".\n\nArgs:\nseed: the new seed.", "source": "github-repos"}
{"code": "def lint_command(name, program, arguments, filter_regex, filename, lines):\n    output = utils.get_output_from_cache(name, filename)\n    if (output is None):\n        call_arguments = (([program] + arguments) + [filename])\n        try:\n            output = subprocess.check_output(call_arguments, stderr=subprocess.STDOUT)\n        except subprocess.CalledProcessError as error:\n            output = error.output\n        except OSError:\n            return {filename: {'error': [(('Could not execute \"%s\".%sMake sure all ' + 'required programs are installed') % (' '.join(call_arguments), os.linesep))]}}\n        output = output.decode('utf-8')\n        utils.save_output_in_cache(name, filename, output)\n    output_lines = output.split(os.linesep)\n    if (lines is None):\n        lines_regex = '\\\\d+'\n    else:\n        lines_regex = '|'.join(map(str, lines))\n    lines_regex = ('(%s)' % lines_regex)\n    groups = ('line', 'column', 'message', 'severity', 'message_id')\n    filtered_lines = utils.filter_lines(output_lines, filter_regex.format(lines=lines_regex, filename=re.escape(filename)), groups=groups)\n    result = []\n    for data in filtered_lines:\n        comment = dict((p for p in zip(groups, data) if (p[1] is not None)))\n        if ('line' in comment):\n            comment['line'] = int(comment['line'])\n        if ('column' in comment):\n            comment['column'] = int(comment['column'])\n        if ('severity' in comment):\n            comment['severity'] = comment['severity'].title()\n        result.append(comment)\n    return {filename: {'comments': result}}", "docstring": "Executes a lint program and filter the output.\n\nExecutes the lint tool 'program' with arguments 'arguments' over the file\n'filename' returning only those lines matching the regular expression\n'filter_regex'.\n\nArgs:\nname: string: the name of the linter.\nprogram: string: lint program.\narguments: list[string]: extra arguments for the program.\nfilter_regex: string: regular expression to filter lines.\nfilename: string: filename to lint.\nlines: list[int]|None: list of lines that we want to capture. If None,\nthen all lines will be captured.\n\nReturns: dict: a dict with the extracted info from the message.", "source": "codesearchnet"}
{"code": "def __init__(self, proto_id=None, debug=False):\n    \n    facility = logging.handlers.SysLogHandler.LOG_DAEMON\n    self.logger = logger.Logger(\n        name='google-ip-forwarding', debug=debug, facility=facility)\n    self.ip_forwarding_utils = ip_forwarding_utils.IpForwardingUtils(\n        logger=self.logger, proto_id=proto_id)", "docstring": "Constructor.\n\nArgs:\nproto_id: string, the routing protocol identifier for Google IP changes.\ndebug: bool, True if debug output should write to the console.", "source": "juraj-google-style"}
{"code": "def __init__(self, prefs, kappa=2.0, omega=0.5, beta=1.0, mu=1.0,\n            phi=scipy.ones(N_NT) / N_NT,\n            freeparams=['kappa', 'omega', 'beta', 'mu', 'eta']):\n        \n        self._nsites = len(prefs)\n        assert self.nsites > 0, \"No preferences specified\"\n\n        assert all(map(lambda x: x in self.ALLOWEDPARAMS, freeparams)),\\\n                \"Invalid entry in freeparams\\nGot: {0}\\nAllowed: {1}\".format(\n                ', '.join(freeparams), ', '.join(self.ALLOWEDPARAMS))\n        self._freeparams = list(freeparams) \n\n        \n        self.pi = scipy.ndarray((self.nsites, N_AA), dtype='float')\n        assert (isinstance(prefs, list) and\n                all([isinstance(x, dict) for x in prefs])),\\\n                \"prefs is not a list of dicts\"\n        for r in range(self.nsites):\n            assert set(prefs[r].keys()) == set(AA_TO_INDEX.keys()),\\\n                    \"prefs not keyed by amino acids for site {0}\".format(r)\n            assert abs(1 - sum(prefs[r].values())) <= ALMOST_ZERO,\\\n                    \"prefs don't sum to one for site {0}\".format(r)\n            for (a, aa) in INDEX_TO_AA.items():\n                _checkParam('pi', prefs[r][aa], self.PARAMLIMITS, self.PARAMTYPES)\n                self.pi[r][a] = prefs[r][aa]\n            self.pi[r] /= self.pi[r].sum() \n\n        \n        self.pi_codon = scipy.full((self.nsites, N_CODON), -1, dtype='float')\n        self.ln_pi_codon = scipy.full((self.nsites, N_CODON), -1, dtype='float')\n        self.piAx_piAy = scipy.full((self.nsites, N_CODON, N_CODON), -1,\n                dtype='float')\n\n        \n        _checkParam('phi', phi, self.PARAMLIMITS, self.PARAMTYPES)\n        assert abs(1 - phi.sum()) <= ALMOST_ZERO, \"phi doesn't sum to 1\"\n        self.phi = phi.copy()\n        self.phi /= self.phi.sum()\n        self._eta_from_phi()\n\n        \n        self._mu = mu \n        self.kappa = kappa\n        self.omega = omega\n        self.beta = beta\n        for (name, value) in [('kappa', self.kappa), ('omega', self.omega),\n                ('beta', self.beta), ('eta', self.eta), ('mu', self.mu)]:\n            _checkParam(name, value, self.PARAMLIMITS, self.PARAMTYPES)\n\n        \n        self.piAx_piAy_beta = scipy.zeros((self.nsites, N_CODON, N_CODON),\n                dtype='float')\n        self.ln_piAx_piAy_beta = scipy.zeros((self.nsites, N_CODON, N_CODON),\n                dtype='float')\n        self.Prxy = scipy.zeros((self.nsites, N_CODON, N_CODON), dtype='float')\n        self.prx = scipy.zeros((self.nsites, N_CODON), dtype='float')\n        self.Qxy = scipy.zeros((N_CODON, N_CODON), dtype='float')\n        self.Frxy = scipy.ones((self.nsites, N_CODON, N_CODON), dtype='float')\n        self.Frxy_no_omega = scipy.ones((self.nsites, N_CODON, N_CODON),\n                dtype='float')\n        self.D = scipy.zeros((self.nsites, N_CODON), dtype='float')\n        self.A = scipy.zeros((self.nsites, N_CODON, N_CODON), dtype='float')\n        self.Ainv = scipy.zeros((self.nsites, N_CODON, N_CODON), dtype='float')\n        self.dPrxy = {}\n        self.B = {}\n        self.dprx = {}\n        for param in self.freeparams:\n            if param == 'mu':\n                self.dprx['mu'] = 0.0\n            elif self.PARAMTYPES[param] == float:\n                self.dPrxy[param] = scipy.zeros((self.nsites, N_CODON, N_CODON),\n                        dtype='float')\n                self.B[param] = scipy.zeros((self.nsites, N_CODON, N_CODON),\n                        dtype='float')\n                self.dprx[param] = scipy.zeros((self.nsites, N_CODON), dtype='float')\n            else:\n                assert self.PARAMTYPES[param][0] == scipy.ndarray\n                paramshape = self.PARAMTYPES[param][1]\n                assert len(paramshape) == 1, \"Can't handle multi-dimensional ndarray\"\n                paramlen = paramshape[0]\n                self.dPrxy[param] = scipy.zeros((paramlen, self.nsites, N_CODON,\n                        N_CODON), dtype='float')\n                self.B[param] = scipy.zeros((paramlen, self.nsites, N_CODON,\n                        N_CODON), dtype='float')\n                self.dprx[param] = scipy.zeros((paramlen, self.nsites, N_CODON),\n                        dtype='float')\n\n        \n        self._diag_indices = scipy.diag_indices(N_CODON)\n\n        self.updateParams({}, update_all=True)", "docstring": "Initialize an `ExpCM` object.\n\nArgs:\n`prefs` (list)\nList of dicts giving amino-acid preferences for\neach site. Each dict keyed by amino acid letter\ncodes, value is pref > 0 and < 1. Must sum to 1\nat each site.\n`kappa`, `omega`, `beta`, `mu`, `phi`\nModel params described in main class doc string.\n`freeparams` (list of strings)\nSpecifies free parameters.", "source": "juraj-google-style"}
{"code": "def AsyncSleep(delay, name=None):\n    return examples_async_sleep(delay=delay, name=name)", "docstring": "Pause for `delay` seconds (which need not be an integer).\n\nThis is an asynchronous (non-blocking) version of a sleep op. It includes\nany time spent being blocked by another thread in `delay`. If it is blocked\nfor a fraction of the time specified by `delay`, it only calls `sleep`\n(actually `usleep`) only for the remainder. If it is blocked for the full\ntime specified by `delay` or more, it returns without explicitly calling\n`sleep`.\n\nArgs:\ndelay: tf.Tensor which is a scalar of type float.\nname: An optional name for the op.\n\nReturns:\nThe `delay` value.", "source": "github-repos"}
{"code": "def CheckParenthesisSpacing(filename, clean_lines, linenum, error):\n    line = clean_lines.elided[linenum]\n    match = Search(' (if\\\\(|for\\\\(|while\\\\(|switch\\\\()', line)\n    if match:\n        error(filename, linenum, 'whitespace/parens', 5, ('Missing space before ( in %s' % match.group(1)))\n    match = Search('\\\\b(if|for|while|switch)\\\\s*\\\\(([ ]*)(.).*[^ ]+([ ]*)\\\\)\\\\s*{\\\\s*$', line)\n    if match:\n        if (len(match.group(2)) != len(match.group(4))):\n            if (not (((match.group(3) == ';') and (len(match.group(2)) == (1 + len(match.group(4))))) or ((not match.group(2)) and Search('\\\\bfor\\\\s*\\\\(.*; \\\\)', line)))):\n                error(filename, linenum, 'whitespace/parens', 5, ('Mismatching spaces inside () in %s' % match.group(1)))\n        if (len(match.group(2)) not in [0, 1]):\n            error(filename, linenum, 'whitespace/parens', 5, ('Should have zero or one spaces inside ( and ) in %s' % match.group(1)))", "docstring": "Checks for horizontal spacing around parentheses.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def parse_meta(meta):\n    resources = {}\n    for name in meta:\n        if name.startswith('$'):\n            continue\n        resources[name] = resource = {}\n        for action in meta[name]:\n            if action.startswith('$'):\n                continue\n            (url, httpmethod) = res_to_url(name, action)\n            resource[action] = {'url': url, 'method': httpmethod}\n    url_prefix = meta.get('$url_prefix', '').rstrip('/')\n    return (url_prefix, meta['$auth']['header'].lower(), resources)", "docstring": "Parse metadata of API\n\nArgs:\nmeta: metadata of API\nReturns:\ntuple(url_prefix, auth_header, resources)", "source": "codesearchnet"}
{"code": "def configure_logging(verbosity):\n    root = logging.getLogger()\n    formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname).3s %(name)s %(message)s', '%y-%m-%d %H:%M:%S')\n    handler = logging.StreamHandler()\n    handler.setFormatter(formatter)\n    loglevels = [logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]\n    if (verbosity >= len(loglevels)):\n        verbosity = (len(loglevels) - 1)\n    level = loglevels[verbosity]\n    root.setLevel(level)\n    root.addHandler(handler)", "docstring": "Set up the global logging level.\n\nArgs:\nverbosity (int): The logging verbosity", "source": "codesearchnet"}
{"code": "def _get_client_address(self, req):\n    try:\n        forwarded_for = req.get_header('X-Forwarded-For', True)\n        return forwarded_for.split(',')[0].strip()\n    except (KeyError, HTTPMissingHeader):\n        return (req.env.get('REMOTE_ADDR') if self.remote_address_fallback else None)", "docstring": "Get address from ``X-Forwarded-For`` header or use remote address.\n\nRemote address is used if the ``X-Forwarded-For`` header is not\navailable. Note that this may not be safe to depend on both without\nproper authorization backend.\n\nArgs:\nreq (falcon.Request): falcon.Request object.\n\nReturns:\nstr: client address.", "source": "codesearchnet"}
{"code": "def clr(M, **kwargs):\n    R = np.zeros(M.shape)\n    Id = [[0, 0] for i in range(M.shape[0])]\n    for i in range(M.shape[0]):\n        mu_i = np.mean(M[(i, :)])\n        sigma_i = np.std(M[(i, :)])\n        Id[i] = [mu_i, sigma_i]\n    for i in range(M.shape[0]):\n        for j in range((i + 1), M.shape[0]):\n            z_i = np.max([0, ((M[(i, j)] - Id[i][0]) / Id[i][0])])\n            z_j = np.max([0, ((M[(i, j)] - Id[j][0]) / Id[j][0])])\n            R[(i, j)] = np.sqrt(((z_i ** 2) + (z_j ** 2)))\n            R[(j, i)] = R[(i, j)]\n    return R", "docstring": "Implementation of the Context Likelihood or Relatedness Network algorithm.\n\nArgs:\nmat (numpy.ndarray): matrix, if it is a square matrix, the program assumes\nit is a relevance matrix where mat(i,j) represents the similarity content\nbetween nodes i and j. Elements of matrix should be\nnon-negative.\n\nReturns:\nmat_nd (numpy.ndarray): Output deconvolved matrix (direct dependency matrix). Its components\nrepresent direct edge weights of observed interactions.\n\n.. note::\nRef:Jeremiah J. Faith, Boris Hayete, Joshua T. Thaden, Ilaria Mogno, Jamey\nWierzbowski, Guillaume Cottarel, Simon Kasif, James J. Collins, and Timothy\nS. Gardner. Large-scale mapping and validation of escherichia coli\ntranscriptional regulation from a compendium of expression profiles.\nPLoS Biology, 2007", "source": "codesearchnet"}
{"code": "def devserver(port, admin_port, clear):\n    \n    \n    admin_port = admin_port or (port + 1)\n\n    args = [\n        '--port={}'.format(port),\n        '--admin_port={}'.format(admin_port)\n    ]\n\n    if clear:\n        args += ['--clear_datastore=yes']\n\n    with conf.within_proj_dir():\n        shell.run('dev_appserver.py . {args}'.format(args=' '.join(args)))", "docstring": "Run devserver.\n\nArgs:\nport (int):\nPort on which the app will be served.\nadmin_port (int):\nPort on which the admin interface is served.\nclear (bool):\nIf set to **True**, clear the datastore on startup.", "source": "juraj-google-style"}
{"code": "def cancel_job(self, job_id=None, job_name=None):\n        \n        return self._delegator.cancel_job(job_id=job_id, job_name = job_name)", "docstring": "Cancel a running job.\n\nArgs:\njob_id (str, optional): Identifier of job to be canceled.\njob_name (str, optional): Name of job to be canceled.\n\nReturns:\ndict: JSON response for the job cancel operation.", "source": "juraj-google-style"}
{"code": "def get_email_message(self, message_uid, message_type=\"text/plain\"):\n        \n        self._mail.select(\"inbox\")\n        result = self._mail.uid('fetch', message_uid, \"(RFC822)\")\n        msg = email.message_from_string(result[1][0][1])\n\n        try:\n            \n            for part in msg.walk():\n                if part.get_content_type() == message_type:\n                    return part.get_payload(decode=True)\n        except:\n            \n            return msg.get_payload(decode=True)", "docstring": "Fetch contents of email.\n\nArgs:\nmessage_uid (int): IMAP Message UID number.\n\nKwargs:\nmessage_type: Can be 'text' or 'html'", "source": "juraj-google-style"}
{"code": "def split(self, path: str) -> Tuple[str, str]:\n    raise NotImplementedError", "docstring": "Splits the given path into two parts.\n\nSplits the path into a pair (head, tail) such that tail contains the last\ncomponent of the path and head contains everything up to that.\n\nFor file-systems other than the local file-system, head should include the\nprefix.\n\nArgs:\npath: path as a string\nReturns:\na pair of path components as strings.", "source": "github-repos"}
{"code": "def __init__(self, message):\n        \n        super(IndexOutOfBounds, self).__init__(\n            reason=enums.ResultReason.INDEX_OUT_OF_BOUNDS,\n            message=message\n        )", "docstring": "Create an IndexOutOfBounds exception.\n\nArgs:\nmessage (string): A string containing information about the error.", "source": "juraj-google-style"}
{"code": "def _handle_request(self, request: dict) -> dict:\n        \n        request_body: bytes = request['request_body']\n        signature_chain_url: str = request['signature_chain_url']\n        signature: str = request['signature']\n        alexa_request: dict = request['alexa_request']\n\n        if not self._verify_request(signature_chain_url, signature, request_body):\n            return {'error': 'failed certificate/signature check'}\n\n        timestamp_str = alexa_request['request']['timestamp']\n        timestamp_datetime = datetime.strptime(timestamp_str, '%Y-%m-%dT%H:%M:%SZ')\n        now = datetime.utcnow()\n\n        delta = now - timestamp_datetime if now >= timestamp_datetime else timestamp_datetime - now\n\n        if abs(delta.seconds) > REQUEST_TIMESTAMP_TOLERANCE_SECS:\n            log.error(f'Failed timestamp check for request: {request_body.decode(\"utf-8\", \"replace\")}')\n            return {'error': 'failed request timestamp check'}\n\n        conversation_key = alexa_request['session']['user']['userId']\n\n        if conversation_key not in self.conversations.keys():\n            if self.config['multi_instance']:\n                conv_agent = self._init_agent()\n                log.info('New conversation instance level agent initiated')\n            else:\n                conv_agent = self.agent\n\n            self.conversations[conversation_key] = \\\n                Conversation(config=self.config,\n                             agent=conv_agent,\n                             conversation_key=conversation_key,\n                             self_destruct_callback=lambda: self._del_conversation(conversation_key))\n\n            log.info(f'Created new conversation, key: {conversation_key}')\n\n        conversation = self.conversations[conversation_key]\n        response = conversation.handle_request(alexa_request)\n\n        return response", "docstring": "Processes Alexa requests from skill server and returns responses to Alexa.\n\nArgs:\nrequest: Dict with Alexa request payload and metadata.\nReturns:\nresult: Alexa formatted or error response.", "source": "juraj-google-style"}
{"code": "def _find_suite_classes_in_module(module):\n    test_suites = []\n    for _, module_member in module.__dict__.items():\n        if inspect.isclass(module_member):\n            if issubclass(module_member, base_suite.BaseSuite):\n                test_suites.append(module_member)\n    return test_suites", "docstring": "Finds all test suite classes in the given module.\n\nWalk through module members and find all classes that is a subclass of\nBaseSuite.\n\nArgs:\nmodule: types.ModuleType, the module object to find test suite classes.\n\nReturns:\nA list of test suite classes.", "source": "github-repos"}
{"code": "def __init__(\n      self, maximum_number_of_file_objects=128,\n      maximum_number_of_file_systems=16):\n    \n    super(Context, self).__init__()\n    self._file_object_cache = cache.ObjectsCache(\n        maximum_number_of_file_objects)\n    self._file_system_cache = cache.ObjectsCache(\n        maximum_number_of_file_systems)", "docstring": "Initializes the resolver context object.\n\nArgs:\nmaximum_number_of_file_objects (Optional[int]): maximum number\nof file-like objects cached in the context.\nmaximum_number_of_file_systems (Optional[int]): maximum number\nof file system objects cached in the context.", "source": "juraj-google-style"}
{"code": "def render_text(text, preformatted=False):\n  \n  return IPython.core.display.HTML(_html.HtmlBuilder.render_text(text, preformatted))", "docstring": "Return text formatted as a HTML\n\nArgs:\ntext: the text to render\npreformatted: whether the text should be rendered as preformatted", "source": "juraj-google-style"}
{"code": "def plot_loss_history(history, figsize=(15, 8)):\n    plt.figure(figsize=figsize)\n    plt.plot(history.history['loss'])\n    plt.plot(history.history['val_loss'])\n    plt.xlabel('\n    plt.ylabel('Loss')\n    plt.legend(['Training', 'Validation'])\n    plt.title('Loss over time')\n    plt.show()", "docstring": "Plots the learning history for a Keras model,\nassuming the validation data was provided to the 'fit' function.\n\nArgs:\nhistory: The return value from the 'fit' function.\nfigsize: The size of the plot.", "source": "codesearchnet"}
{"code": "def validate_primitive_json_representation(desc: descriptor.Descriptor, json_str: str) -> None:\n    pattern = _pattern_for_primitive(desc)\n    if pattern is not None and pattern.fullmatch(json_str) is None:\n        raise fhir_errors.InvalidFhirError(f'Unable to find pattern: {pattern!r}.')", "docstring": "Ensures that json_str matches the associated regex pattern, if one exists.\n\nArgs:\ndesc: The Descriptor of the FHIR primitive to validate.\njson_str: The JSON string to validate.\n\nRaises:\nfhir_errors.InvalidFhirError: Raised in the event that pattern is unable to\nbe matched on json_str.", "source": "github-repos"}
{"code": "def _scalar_field_to_json(field, row_value):\n    converter = _SCALAR_VALUE_TO_JSON_ROW.get(field.field_type)\n    if (converter is None):\n        return row_value\n    return converter(row_value)", "docstring": "Maps a field and value to a JSON-safe value.\n\nArgs:\nfield ( \\\n:class:`~google.cloud.bigquery.schema.SchemaField`, \\\n):\nThe SchemaField to use for type conversion and field name.\nrow_value (any):\nValue to be converted, based on the field's type.\n\nReturns:\nany:\nA JSON-serializable object.", "source": "codesearchnet"}
{"code": "def dtype(self):\n    return self._dtype", "docstring": "The `tf.dtypes.DType` specified by this type for the RaggedTensor.\n\nExamples:\n\n>>> rt = tf.ragged.constant([[\"a\"], [\"b\", \"c\"]], dtype=tf.string)\n>>> tf.type_spec_from_value(rt).dtype\ntf.string\n\nReturns:\nA `tf.dtypes.DType` of the values in the RaggedTensor.", "source": "github-repos"}
{"code": "def _compile_aggregation_expression(self,\n                                        expr: Expression,\n                                        scope: Dict[str, TensorFluent],\n                                        batch_size: Optional[int] = None,\n                                        noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:\n        \n        etype = expr.etype\n        args = expr.args\n\n        typed_var_list = args[:-1]\n        vars_list = [var for _, (var, _) in typed_var_list]\n        expr = args[-1]\n\n        x = self._compile_expression(expr, scope)\n\n        etype2aggr = {\n            'sum':     x.sum,\n            'prod':    x.prod,\n            'avg':     x.avg,\n            'maximum': x.maximum,\n            'minimum': x.minimum,\n            'exists':  x.exists,\n            'forall':  x.forall\n        }\n\n        if etype[1] not in etype2aggr:\n            raise ValueError('Invalid aggregation expression {}.'.format(expr))\n\n        aggr = etype2aggr[etype[1]]\n        fluent = aggr(vars_list=vars_list)\n\n        return fluent", "docstring": "Compile an aggregation expression `expr` into a TensorFluent\nin the given `scope` with optional batch size.\n\nArgs:\nexpr (:obj:`rddl2tf.expr.Expression`): A RDDL aggregation expression.\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.\nbatch_size (Optional[size]): The batch size.\n\nReturns:\n:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.", "source": "juraj-google-style"}
{"code": "def quote(src_string, return_expr=False):\n    node = parse_string(src_string)\n    body = node.body\n    if (len(body) == 1):\n        if (isinstance(body[0], gast.Expr) and (not return_expr)):\n            out = body[0].value\n        else:\n            out = body[0]\n    else:\n        out = node\n    return out", "docstring": "Go from source code to AST nodes.\n\nThis function returns a tree without enclosing `Module` or `Expr` nodes.\n\nArgs:\nsrc_string: The source code to parse.\nreturn_expr: Whether or not to return a containing expression. This can be\nset to `True` if the result is to be part of a series of statements.\n\nReturns:\nAn AST of the given source code.", "source": "codesearchnet"}
{"code": "def get_loss_func(self, C=1.0, k=1):\n        \n        def lf(x):\n            mu, ln_var = self.encode(x)\n            batchsize = len(mu.data)\n            \n            rec_loss = 0\n            for l in six.moves.range(k):\n                z = F.gaussian(mu, ln_var)\n                rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) \\\n                    / (k * batchsize)\n            self.rec_loss = rec_loss\n            self.loss = self.rec_loss + \\\n                C * gaussian_kl_divergence(mu, ln_var) / batchsize\n            return self.loss\n        return lf", "docstring": "Get loss function of VAE.\n\nThe loss value is equal to ELBO (Evidence Lower Bound)\nmultiplied by -1.\n\nArgs:\nC (int): Usually this is 1.0. Can be changed to control the\nsecond term of ELBO bound, which works as regularization.\nk (int): Number of Monte Carlo samples used in encoded vector.", "source": "juraj-google-style"}
{"code": "def predict(self, df_data, threshold=0.05, **kwargs):\n        \n        nb_jobs = kwargs.get(\"nb_jobs\", SETTINGS.NB_JOBS)\n        list_nodes = list(df_data.columns.values)\n        if nb_jobs != 1:\n            result_feature_selection = Parallel(n_jobs=nb_jobs)(delayed(self.run_feature_selection)\n                                                                (df_data, node, idx, **kwargs)\n                                                                for idx, node in enumerate(list_nodes))\n        else:\n            result_feature_selection = [self.run_feature_selection(df_data, node, idx, **kwargs) for idx, node in enumerate(list_nodes)]\n        for idx, i in enumerate(result_feature_selection):\n            try:\n                i.insert(idx, 0)\n            except AttributeError:  \n                result_feature_selection[idx] = np.insert(i, idx, 0)\n        matrix_results = np.array(result_feature_selection)\n        matrix_results *= matrix_results.transpose()\n        np.fill_diagonal(matrix_results, 0)\n        matrix_results /= 2\n\n        graph = nx.Graph()\n\n        for (i, j), x in np.ndenumerate(matrix_results):\n            if matrix_results[i, j] > threshold:\n                graph.add_edge(list_nodes[i], list_nodes[j],\n                               weight=matrix_results[i, j])\n        for node in list_nodes:\n            if node not in graph.nodes():\n                graph.add_node(node)\n        return graph", "docstring": "Predict the skeleton of the graph from raw data.\n\nReturns iteratively the feature selection algorithm on each node.\n\nArgs:\ndf_data (pandas.DataFrame): data to construct a graph from\nthreshold (float): cutoff value for feature selection scores\nkwargs (dict): additional arguments for algorithms\n\nReturns:\nnetworkx.Graph: predicted skeleton of the graph.", "source": "juraj-google-style"}
{"code": "def __init__(\n        self,\n        greedy_q_learning,\n        init_state_key\n    ):\n        \n        if isinstance(boltzmann_q_learning, BoltzmannQLearning):\n            self.__boltzmann_q_learning = boltzmann_q_learning\n        else:\n            raise TypeError()\n\n        self.__init_state_key = init_state_key", "docstring": "Init.\n\nArgs:\nboltzmann_q_learning:  is-a `BoltzmannQLearning`.\ninit_state_key:        First state key.", "source": "juraj-google-style"}
{"code": "def synthesize(self, duration):\n        \n        sr = self.samplerate.samples_per_second\n        seconds = duration / Seconds(1)\n        samples = np.random.uniform(low=-1., high=1., size=int(sr * seconds))\n        return AudioSamples(samples, self.samplerate)", "docstring": "Synthesize white noise\n\nArgs:\nduration (numpy.timedelta64): The duration of the synthesized sound", "source": "juraj-google-style"}
{"code": "def get_vis_data_from_string(self, sess, input_string):\n    encoded_inputs = self.encode(input_string)\n    out = sess.run(self.samples, {self.inputs: encoded_inputs})\n    att_mats = sess.run(self.att_mats, {self.inputs: encoded_inputs, self.targets: np.reshape(out, [1, (- 1), 1, 1])})\n    output_string = self.decode(out)\n    input_list = self.decode_list(encoded_inputs)\n    output_list = self.decode_list(out)\n    return (output_string, input_list, output_list, att_mats)", "docstring": "Constructs the data needed for visualizing attentions.\n\nArgs:\nsess: A tf.Session object.\ninput_string: The input sentence to be translated and visualized.\n\nReturns:\nTuple of (\noutput_string: The translated sentence.\ninput_list: Tokenized input sentence.\noutput_list: Tokenized translation.\natt_mats: Tuple of attention matrices; (\nenc_atts: Encoder self attention weights.\nA list of `num_layers` numpy arrays of size\n(batch_size, num_heads, inp_len, inp_len)\ndec_atts: Decoder self attention weights.\nA list of `num_layers` numpy arrays of size\n(batch_size, num_heads, out_len, out_len)\nencdec_atts: Encoder-Decoder attention weights.\nA list of `num_layers` numpy arrays of size\n(batch_size, num_heads, out_len, inp_len)\n)", "source": "codesearchnet"}
{"code": "def get_url_param(self, index, default=None):\n    params = self.get_url_params()\n    return (params[index] if (index < len(params)) else default)", "docstring": "Return url parameter with given index.\n\nArgs:\n- index: starts from zero, and come after controller and\naction names in url.", "source": "codesearchnet"}
{"code": "def _update_size(self, size, future):\n        \n        with self._size_lock:\n            \n            if size > self._size and future.done:\n                \n                self._size = size", "docstring": "Keep track of the file size during writing.\n\nIf specified size value is greater than the current size, update the\ncurrent size using specified value.\n\nUsed as callback in default \"_flush\" implementation for files supporting\nrandom write access.\n\nArgs:\nsize (int): Size value.\nfuture (concurrent.futures._base.Future): future.", "source": "juraj-google-style"}
{"code": "def from_raw(self, robj: RawObject) -> RootNode:\n        \n        cooked = self.schema.from_raw(robj)\n        return RootNode(cooked, self.schema, cooked.timestamp)", "docstring": "Create an instance node from a raw data tree.\n\nArgs:\nrobj: Dictionary representing a raw data tree.\n\nReturns:\nRoot instance node.", "source": "juraj-google-style"}
{"code": "def SetDefaultValue(self, scan_object):\n    \n    if (not isinstance(scan_object, PathFilterScanTreeNode) and\n        not isinstance(scan_object, py2to3.STRING_TYPES)):\n      raise TypeError('Unsupported scan object type.')\n\n    if self.default_value:\n      raise ValueError('Default value already set.')\n\n    self.default_value = scan_object", "docstring": "Sets the default (non-match) value.\n\nArgs:\nscan_object: a scan object, either a scan tree sub node (instance of\nPathFilterScanTreeNode) or a string containing a path.\n\nRaises:\nTypeError: if the scan object is of an unsupported type.\nValueError: if the default value is already set.", "source": "juraj-google-style"}
{"code": "def _parse_pem_data(pem_data):\n    sep = '-----BEGIN CERTIFICATE-----'\n    cert_chain = [six.b((sep + s)) for s in pem_data.split(sep)[1:]]\n    certs = []\n    load_cert = x509.load_pem_x509_certificate\n    for cert in cert_chain:\n        try:\n            certs.append(load_cert(cert, default_backend()))\n        except ValueError:\n            warnings.warn('Certificate is invalid.')\n            return False\n    return certs", "docstring": "Parse PEM-encoded X.509 certificate chain.\n\nArgs:\npem_data: str. PEM file retrieved from SignatureCertChainUrl.\n\nReturns:\nlist or bool: If url is valid, returns the certificate chain as a list\nof cryptography.hazmat.backends.openssl.x509._Certificate\ncertificates where certs[0] is the first certificate in the file; if\nurl is invalid, returns False.", "source": "codesearchnet"}
{"code": "def parse_json(json_file):\n    \n    if not os.path.exists(json_file):\n        return None\n\n    try:\n        with open(json_file, \"r\") as f:\n            info_str = f.readlines()\n            info_str = \"\".join(info_str)\n            json_info = json.loads(info_str)\n            return unicode2str(json_info)\n    except BaseException as e:\n        logging.error(e.message)\n        return None", "docstring": "Parse a whole json record from the given file.\n\nReturn None if the json file does not exists or exception occurs.\n\nArgs:\njson_file (str): File path to be parsed.\n\nReturns:\nA dict of json info.", "source": "juraj-google-style"}
{"code": "class DepthProPreActResidualLayer(nn.Module):\n\n    def __init__(self, config):\n        super().__init__()\n        self.use_batch_norm = config.use_batch_norm_in_fusion_residual\n        use_bias_in_fusion_residual = config.use_bias_in_fusion_residual if config.use_bias_in_fusion_residual is not None else not self.use_batch_norm\n        self.activation1 = nn.ReLU()\n        self.convolution1 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual)\n        self.activation2 = nn.ReLU()\n        self.convolution2 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual)\n        if self.use_batch_norm:\n            self.batch_norm1 = nn.BatchNorm2d(config.fusion_hidden_size)\n            self.batch_norm2 = nn.BatchNorm2d(config.fusion_hidden_size)\n\n    def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:\n        residual = hidden_state\n        hidden_state = self.activation1(hidden_state)\n        hidden_state = self.convolution1(hidden_state)\n        if self.use_batch_norm:\n            hidden_state = self.batch_norm1(hidden_state)\n        hidden_state = self.activation2(hidden_state)\n        hidden_state = self.convolution2(hidden_state)\n        if self.use_batch_norm:\n            hidden_state = self.batch_norm2(hidden_state)\n        return hidden_state + residual", "docstring": "ResidualConvUnit, pre-activate residual unit.\n\nArgs:\nconfig (`[DepthProConfig]`):\nModel configuration class defining the model architecture.", "source": "github-repos"}
{"code": "def lint(self, content, **kwargs):\n        \n        post_data = {'content': content}\n        data = self.http_post('/ci/lint', post_data=post_data, **kwargs)\n        return (data['status'] == 'valid', data['errors'])", "docstring": "Validate a gitlab CI configuration.\n\nArgs:\ncontent (txt): The .gitlab-ci.yml content\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabVerifyError: If the validation could not be done\n\nReturns:\ntuple: (True, []) if the file is valid, (False, errors(list))\notherwise", "source": "juraj-google-style"}
{"code": "def merge_default_with_oplog(graph, op_log=None, run_meta=None, add_trace=True, add_trainable_var=True):\n    if not graph and (not context.executing_eagerly()):\n        graph = ops.get_default_graph()\n    tmp_op_log = tfprof_log_pb2.OpLogProto()\n    if not graph:\n        return tmp_op_log\n    logged_ops, string_to_id = _get_logged_ops(graph, run_meta, add_trace=add_trace, add_trainable_var=add_trainable_var)\n    if not op_log:\n        tmp_op_log.log_entries.extend(logged_ops.values())\n    else:\n        all_ops = {}\n        for entry in op_log.log_entries:\n            all_ops[entry.name] = entry\n        for op_name, entry in logged_ops.items():\n            if op_name in all_ops:\n                all_ops[op_name].types.extend(entry.types)\n                if entry.float_ops > 0 and all_ops[op_name].float_ops == 0:\n                    all_ops[op_name].float_ops = entry.float_ops\n                if entry.code_def.traces and (not all_ops[op_name].code_def.traces):\n                    all_ops[op_name].code_def.MergeFrom(entry.code_def)\n            else:\n                all_ops[op_name] = entry\n        tmp_op_log.log_entries.extend(all_ops.values())\n    for s, i in string_to_id.items():\n        tmp_op_log.id_to_string[i] = s\n    return tmp_op_log", "docstring": "Merge the tfprof default extra info with caller's op_log.\n\nArgs:\ngraph: tf.Graph. If None and eager execution is not enabled, use\ndefault graph.\nop_log: OpLogProto proto.\nrun_meta: RunMetadata proto used to complete shape information.\nadd_trace: Whether to add op trace information.\nadd_trainable_var: Whether to assign tf.compat.v1.trainable_variables() op\ntype '_trainable_variables'.\nReturns:\ntmp_op_log: Merged OpLogProto proto.", "source": "github-repos"}
{"code": "def _get_outputs_tensor_info_from_meta_graph_def(meta_graph_def, signature_def_key):\n    return meta_graph_def.signature_def[signature_def_key].outputs", "docstring": "Gets TensorInfos for all outputs of the SignatureDef.\n\nReturns a dictionary that maps each output key to its TensorInfo for the given\nsignature_def_key in the meta_graph_def.\n\nArgs:\nmeta_graph_def: MetaGraphDef protocol buffer with the SignatureDefmap to\nlook up signature_def_key.\nsignature_def_key: A SignatureDef key string.\n\nReturns:\nA dictionary that maps output tensor keys to TensorInfos.", "source": "github-repos"}
{"code": "def set_setting(self, setting, value):\n    if (setting not in (self._expected_settings + self._optional_settings)):\n        raise exceptions.ConfigurationError(\"Setting '{0}' is not supported.\".format(setting))\n    if (setting == 'hostname'):\n        self._set_hostname(value)\n    elif (setting == 'port'):\n        self._set_port(value)\n    elif (setting == 'certificate_path'):\n        self._set_certificate_path(value)\n    elif (setting == 'key_path'):\n        self._set_key_path(value)\n    elif (setting == 'ca_path'):\n        self._set_ca_path(value)\n    elif (setting == 'auth_suite'):\n        self._set_auth_suite(value)\n    elif (setting == 'policy_path'):\n        self._set_policy_path(value)\n    elif (setting == 'enable_tls_client_auth'):\n        self._set_enable_tls_client_auth(value)\n    elif (setting == 'tls_cipher_suites'):\n        self._set_tls_cipher_suites(value)\n    elif (setting == 'logging_level'):\n        self._set_logging_level(value)\n    else:\n        self._set_database_path(value)", "docstring": "Set a specific setting value.\n\nThis will overwrite the current setting value for the specified\nsetting.\n\nArgs:\nsetting (string): The name of the setting to set (e.g.,\n'certificate_path', 'hostname'). Required.\nvalue (misc): The value of the setting to set. Type varies based\non setting. Required.\nRaises:\nConfigurationError: Raised if the setting is not supported or if\nthe setting value is invalid.", "source": "codesearchnet"}
{"code": "def upload(self, filename, filedata=None, filepath=None, **kwargs):\n    if ((filepath is None) and (filedata is None)):\n        raise GitlabUploadError('No file contents or path specified')\n    if ((filedata is not None) and (filepath is not None)):\n        raise GitlabUploadError('File contents and file path specified')\n    if (filepath is not None):\n        with open(filepath, 'rb') as f:\n            filedata = f.read()\n    url = ('/projects/%(id)s/uploads' % {'id': self.id})\n    file_info = {'file': (filename, filedata)}\n    data = self.manager.gitlab.http_post(url, files=file_info)\n    return {'alt': data['alt'], 'url': data['url'], 'markdown': data['markdown']}", "docstring": "Upload the specified file into the project.\n\n.. note::\n\nEither ``filedata`` or ``filepath`` *MUST* be specified.\n\nArgs:\nfilename (str): The name of the file being uploaded\nfiledata (bytes): The raw data of the file being uploaded\nfilepath (str): The path to a local file to upload (optional)\n\nRaises:\nGitlabConnectionError: If the server cannot be reached\nGitlabUploadError: If the file upload fails\nGitlabUploadError: If ``filedata`` and ``filepath`` are not\nspecified\nGitlabUploadError: If both ``filedata`` and ``filepath`` are\nspecified\n\nReturns:\ndict: A ``dict`` with the keys:\n* ``alt`` - The alternate text for the upload\n* ``url`` - The direct url to the uploaded file\n* ``markdown`` - Markdown for the uploaded file", "source": "codesearchnet"}
{"code": "def emboss_pepstats_parser(infile):\n    with open(infile) as f:\n        lines = f.read().split('\\n')\n    info_dict = {}\n    for l in lines[38:47]:\n        info = l.split('\\t')\n        cleaninfo = list(filter((lambda x: (x != '')), info))\n        prop = cleaninfo[0]\n        num = cleaninfo[2]\n        percent = (float(cleaninfo[(- 1)]) / float(100))\n        info_dict[(('mol_percent_' + prop.lower()) + '-pepstats')] = percent\n    return info_dict", "docstring": "Get dictionary of pepstats results.\n\nArgs:\ninfile: Path to pepstats outfile\n\nReturns:\ndict: Parsed information from pepstats\n\nTODO:\nOnly currently parsing the bottom of the file for percentages of properties.", "source": "codesearchnet"}
{"code": "def has_ontime_pane(self):\n    pass", "docstring": "Whether this trigger creates an empty pane even if there are no elements.\n\nReturns:\nTrue if this trigger guarantees that there will always be an ON_TIME pane\neven if there are no elements in that pane.", "source": "github-repos"}
{"code": "def determine_intent(self, utterance, num_results=1, include_tags=False, context_manager=None):\n        \n        parser = Parser(self.tokenizer, self.tagger)\n        parser.on('tagged_entities',\n                  (lambda result:\n                   self.emit(\"tagged_entities\", result)))\n\n        context = []\n        if context_manager:\n            context = context_manager.get_context()\n\n        for result in parser.parse(utterance, N=num_results, context=context):\n            self.emit(\"parse_result\", result)\n            \n            remaining_context = self.__get_unused_context(result, context)\n            best_intent, tags = self.__best_intent(result, remaining_context)\n            if best_intent and best_intent.get('confidence', 0.0) > 0:\n                if include_tags:\n                    best_intent['__tags__'] = tags\n                yield best_intent", "docstring": "Given an utterance, provide a valid intent.\n\nArgs:\nutterance(str): an ascii or unicode string representing natural language speech\ninclude_tags(list): includes the parsed tags (including position and confidence)\nas part of result\ncontext_manager(list): a context manager to provide context to the utterance\nnum_results(int): a maximum number of results to be returned.\n\nReturns: A generator that yields dictionaries.", "source": "juraj-google-style"}
{"code": "def create_struct(name):\n    sid = idc.GetStrucIdByName(name)\n    if (sid != idaapi.BADADDR):\n        raise exceptions.SarkStructAlreadyExists('A struct names {!r} already exists.'.format(name))\n    sid = idc.AddStrucEx((- 1), name, 0)\n    if (sid == idaapi.BADADDR):\n        raise exceptions.SarkStructCreationFailed('Struct creation failed.')\n    return sid", "docstring": "Create a structure.\n\nArgs:\nname: The structure's name\n\nReturns:\nThe sturct ID\n\nRaises:\nexceptions.SarkStructAlreadyExists: A struct with the same name already exists\nexceptions.SarkCreationFailed:  Struct creation failed", "source": "codesearchnet"}
{"code": "def update_configuration(self, timeout=(- 1)):\n    uri = '{}/configuration'.format(self.data['uri'])\n    return self._helper.update(None, uri=uri, timeout=timeout)", "docstring": "Asynchronously applies or re-applies the logical interconnect configuration to all managed interconnects.\n\nArgs:\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Logical Interconnect.", "source": "codesearchnet"}
{"code": "def block(inputs, activation='swish', drop_rate=0.0, name='', filters_in=32, filters_out=16, kernel_size=3, strides=1, expand_ratio=1, se_ratio=0.0, id_skip=True):\n    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1\n    filters = filters_in * expand_ratio\n    if expand_ratio != 1:\n        x = layers.Conv2D(filters, 1, padding='same', use_bias=False, kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + 'expand_conv')(inputs)\n        x = layers.BatchNormalization(axis=bn_axis, name=name + 'expand_bn')(x)\n        x = layers.Activation(activation, name=name + 'expand_activation')(x)\n    else:\n        x = inputs\n    if strides == 2:\n        x = layers.ZeroPadding2D(padding=imagenet_utils.correct_pad(x, kernel_size), name=name + 'dwconv_pad')(x)\n        conv_pad = 'valid'\n    else:\n        conv_pad = 'same'\n    x = layers.DepthwiseConv2D(kernel_size, strides=strides, padding=conv_pad, use_bias=False, depthwise_initializer=CONV_KERNEL_INITIALIZER, name=name + 'dwconv')(x)\n    x = layers.BatchNormalization(axis=bn_axis, name=name + 'bn')(x)\n    x = layers.Activation(activation, name=name + 'activation')(x)\n    if 0 < se_ratio <= 1:\n        filters_se = max(1, int(filters_in * se_ratio))\n        se = layers.GlobalAveragePooling2D(name=name + 'se_squeeze')(x)\n        if bn_axis == 1:\n            se_shape = (filters, 1, 1)\n        else:\n            se_shape = (1, 1, filters)\n        se = layers.Reshape(se_shape, name=name + 'se_reshape')(se)\n        se = layers.Conv2D(filters_se, 1, padding='same', activation=activation, kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + 'se_reduce')(se)\n        se = layers.Conv2D(filters, 1, padding='same', activation='sigmoid', kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + 'se_expand')(se)\n        x = layers.multiply([x, se], name=name + 'se_excite')\n    x = layers.Conv2D(filters_out, 1, padding='same', use_bias=False, kernel_initializer=CONV_KERNEL_INITIALIZER, name=name + 'project_conv')(x)\n    x = layers.BatchNormalization(axis=bn_axis, name=name + 'project_bn')(x)\n    if id_skip and strides == 1 and (filters_in == filters_out):\n        if drop_rate > 0:\n            x = layers.Dropout(drop_rate, noise_shape=(None, 1, 1, 1), name=name + 'drop')(x)\n        x = layers.add([x, inputs], name=name + 'add')\n    return x", "docstring": "An inverted residual block.\n\nArgs:\ninputs: input tensor.\nactivation: activation function.\ndrop_rate: float between 0 and 1, fraction of the input units to drop.\nname: string, block label.\nfilters_in: integer, the number of input filters.\nfilters_out: integer, the number of output filters.\nkernel_size: integer, the dimension of the convolution window.\nstrides: integer, the stride of the convolution.\nexpand_ratio: integer, scaling coefficient for the input filters.\nse_ratio: float between 0 and 1, fraction to squeeze the input filters.\nid_skip: boolean.\n\nReturns:\noutput tensor for the block.", "source": "github-repos"}
{"code": "def validate_full_name(self, full_name, timeout=-1):\n        \n        uri = self.URI + '/validateUserName/' + full_name\n        return self._client.create_with_zero_body(uri=uri, timeout=timeout)", "docstring": "Verifies if a fullName is already in use.\n\nArgs:\nfull_name:\nThe fullName to be verified.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation in\nOneView, just stops waiting for its completion.\n\nReturns: True if full name is in use, False if it is not.", "source": "juraj-google-style"}
{"code": "def get_tensors_by_names(names):\n    ret = []\n    G = tfv1.get_default_graph()\n    for n in names:\n        (opn, varn) = get_op_tensor_name(n)\n        ret.append(G.get_tensor_by_name(varn))\n    return ret", "docstring": "Get a list of tensors in the default graph by a list of names.\n\nArgs:\nnames (list):", "source": "codesearchnet"}
{"code": "def unravel_staff(staff_data):\n        \n        staff_list = []\n        for role, staff_members in staff_data['data'].items():\n            for member in staff_members:\n                member['role'] = role\n                staff_list.append(member)\n        return staff_list", "docstring": "Unravels staff role dictionary into flat list of staff\nmembers with ``role`` set as an attribute.\n\nArgs:\nstaff_data(dict): Data return from py:method::get_staff\n\nReturns:\nlist: Flat list of staff members with ``role`` set to\nrole type (i.e. course_admin, instructor, TA, etc)", "source": "juraj-google-style"}
{"code": "def pad_nested_sequences(sequences, dtype='int32'):\n    \n    max_sent_len = 0\n    max_word_len = 0\n    for sent in sequences:\n        max_sent_len = max(len(sent), max_sent_len)\n        for word in sent:\n            max_word_len = max(len(word), max_word_len)\n\n    x = np.zeros((len(sequences), max_sent_len, max_word_len)).astype(dtype)\n    for i, sent in enumerate(sequences):\n        for j, word in enumerate(sent):\n            x[i, j, :len(word)] = word\n\n    return x", "docstring": "Pads nested sequences to the same length.\n\nThis function transforms a list of list sequences\ninto a 3D Numpy array of shape `(num_samples, max_sent_len, max_word_len)`.\n\nArgs:\nsequences: List of lists of lists.\ndtype: Type of the output sequences.\n\n# Returns\nx: Numpy array.", "source": "juraj-google-style"}
{"code": "def str2tuple(str_in):\n    tuple_out = safe_eval(str_in)\n    if (not isinstance(tuple_out, tuple)):\n        tuple_out = None\n    return tuple_out", "docstring": "Extracts a tuple from a string.\n\nArgs:\nstr_in (string) that contains python tuple\nReturns:\n(dict) or None if no valid tuple was found\nRaises:\n-", "source": "codesearchnet"}
{"code": "class Swinv2PatchMerging(nn.Module):\n\n    def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:\n        super().__init__()\n        self.input_resolution = input_resolution\n        self.dim = dim\n        self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)\n        self.norm = norm_layer(2 * dim)\n\n    def maybe_pad(self, input_feature, height, width):\n        should_pad = height % 2 == 1 or width % 2 == 1\n        if should_pad:\n            pad_values = (0, 0, 0, width % 2, 0, height % 2)\n            input_feature = nn.functional.pad(input_feature, pad_values)\n        return input_feature\n\n    def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor:\n        height, width = input_dimensions\n        batch_size, dim, num_channels = input_feature.shape\n        input_feature = input_feature.view(batch_size, height, width, num_channels)\n        input_feature = self.maybe_pad(input_feature, height, width)\n        input_feature_0 = input_feature[:, 0::2, 0::2, :]\n        input_feature_1 = input_feature[:, 1::2, 0::2, :]\n        input_feature_2 = input_feature[:, 0::2, 1::2, :]\n        input_feature_3 = input_feature[:, 1::2, 1::2, :]\n        input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)\n        input_feature = input_feature.view(batch_size, -1, 4 * num_channels)\n        input_feature = self.reduction(input_feature)\n        input_feature = self.norm(input_feature)\n        return input_feature", "docstring": "Patch Merging Layer.\n\nArgs:\ninput_resolution (`Tuple[int]`):\nResolution of input feature.\ndim (`int`):\nNumber of input channels.\nnorm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):\nNormalization layer class.", "source": "github-repos"}
{"code": "def setData(self, data, setName=None):\n        \n        if not isinstance(data, DataFrame):\n            if pd is not None and isinstance(data, pd.DataFrame):\n                data = DataFrame.fromPandas(data)\n        if setName is None:\n            lock_and_call(\n                lambda: self._impl.setData(data._impl),\n                self._lock\n            )\n        else:\n            lock_and_call(\n                lambda: self._impl.setData(data._impl, setName),\n                self._lock\n            )", "docstring": "Assign the data in the dataframe to the AMPL entities with the names\ncorresponding to the column names.\n\nArgs:\ndata: The dataframe containing the data to be assigned.\n\nsetName: The name of the set to which the indices values of the\nDataFrame are to be assigned.\n\nRaises:\nAMPLException: if the data assignment procedure was not successful.", "source": "juraj-google-style"}
{"code": "def glue_convert_examples_to_features(examples: Union[List[InputExample], 'tf.data.Dataset'], tokenizer: PreTrainedTokenizer, max_length: Optional[int]=None, task=None, label_list=None, output_mode=None):\n    warnings.warn(DEPRECATION_WARNING.format('function'), FutureWarning)\n    if is_tf_available() and isinstance(examples, tf.data.Dataset):\n        if task is None:\n            raise ValueError('When calling glue_convert_examples_to_features from TF, the task parameter is required.')\n        return _tf_glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task)\n    return _glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task, label_list=label_list, output_mode=output_mode)", "docstring": "Loads a data file into a list of `InputFeatures`\n\nArgs:\nexamples: List of `InputExamples` or `tf.data.Dataset` containing the examples.\ntokenizer: Instance of a tokenizer that will tokenize the examples\nmax_length: Maximum example length. Defaults to the tokenizer's max_len\ntask: GLUE task\nlabel_list: List of labels. Can be obtained from the processor using the `processor.get_labels()` method\noutput_mode: String indicating the output mode. Either `regression` or `classification`\n\nReturns:\nIf the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the task-specific\nfeatures. If the input is a list of `InputExamples`, will return a list of task-specific `InputFeatures` which\ncan be fed to the model.", "source": "github-repos"}
{"code": "def import_from_file_path(path):\n    if not os.path.exists(path):\n        raise OSError('Given file path does not exist.')\n    module_name = os.path.basename(path)\n    spec = util.spec_from_file_location(module_name, path)\n    if spec is None:\n        raise OSError('Unable to load module from specified path.')\n    module = util.module_from_spec(spec)\n    spec.loader.exec_module(module)\n    return (module, module_name)", "docstring": "Performs a module import given the filename.\n\nArgs:\npath (str): the path to the file to be imported.\n\nRaises:\nIOError: if the given file does not exist or importlib fails to load it.\n\nReturns:\nTuple[ModuleType, str]: returns the imported module and the module name,\nusually extracted from the path itself.", "source": "github-repos"}
{"code": "def _get_non_string_match(self, key):\n    expression = '(?:\\\\s*)'.join(['^', 'define', '\\\\(', \"'{}'\".format(key), ',', '(.*)', '\\\\)', ';'])\n    pattern = re.compile(expression, re.MULTILINE)\n    return pattern.search(self._content)", "docstring": "Gets a MatchObject for the given key, assuming a non-string value.\n\nArgs:\nkey (str): Key of the property to look-up.\n\nReturn:\nMatchObject: The discovered match.", "source": "codesearchnet"}
{"code": "def stat(filename):\n    return stat_v2(filename)", "docstring": "Returns file statistics for a given path.\n\nArgs:\nfilename: string, path to a file\n\nReturns:\nFileStatistics struct that contains information about the path\n\nRaises:\nerrors.OpError: If the operation fails.", "source": "github-repos"}
{"code": "def add_notification_listener(self, notification_type, notification_callback):\n    \n\n    if notification_type not in self.notifications:\n      self.notifications[notification_type] = [(self.notification_id, notification_callback)]\n    else:\n      if reduce(lambda a, b: a + 1,\n                filter(lambda tup: tup[1] == notification_callback, self.notifications[notification_type]),\n                0) > 0:\n          return -1\n      self.notifications[notification_type].append((self.notification_id, notification_callback))\n\n    ret_val = self.notification_id\n\n    self.notification_id += 1\n\n    return ret_val", "docstring": "Add a notification callback to the notification center.\n\nArgs:\nnotification_type: A string representing the notification type from .helpers.enums.NotificationTypes\nnotification_callback: closure of function to call when event is triggered.\n\nReturns:\nInteger notification id used to remove the notification or -1 if the notification has already been added.", "source": "juraj-google-style"}
{"code": "def destroy_s3(app='', env='dev', **_):\n    \n    session = boto3.Session(profile_name=env)\n    client = session.resource('s3')\n\n    generated = get_details(app=app, env=env)\n    archaius = generated.archaius()\n\n    bucket = client.Bucket(archaius['bucket'])\n\n    for item in bucket.objects.filter(Prefix=archaius['path']):\n        item.Object().delete()\n        LOG.info('Deleted: %s/%s', item.bucket_name, item.key)\n\n    return True", "docstring": "Destroy S3 Resources for _app_ in _env_.\n\nArgs:\napp (str): Application name\nenv (str): Deployment environment/account name\n\nReturns:\nboolean: True if destroyed sucessfully", "source": "juraj-google-style"}
{"code": "def new_from_json(cls, json_data):\n        \n        json_data_as_unicode = _helpers._from_bytes(json_data)\n        data = json.loads(json_data_as_unicode)\n        \n        \n        module_name = data['_module']\n        try:\n            module_obj = __import__(module_name)\n        except ImportError:\n            \n            \n            module_name = module_name.replace('.googleapiclient', '')\n            module_obj = __import__(module_name)\n\n        module_obj = __import__(module_name,\n                                fromlist=module_name.split('.')[:-1])\n        kls = getattr(module_obj, data['_class'])\n        return kls.from_json(json_data_as_unicode)", "docstring": "Utility class method to instantiate a Credentials subclass from JSON.\n\nExpects the JSON string to have been produced by to_json().\n\nArgs:\njson_data: string or bytes, JSON from to_json().\n\nReturns:\nAn instance of the subclass of Credentials that was serialized with\nto_json().", "source": "juraj-google-style"}
{"code": "def add_sched_block_instance(self, config_dict):\n        \n        \n        schema = self._get_schema()\n        LOG.debug('Adding SBI with config: %s', config_dict)\n\n        \n        validate(config_dict, schema)\n\n        \n        updated_block = self._add_status(config_dict)\n\n        \n        \n        scheduling_block_data, processing_block_data = \\\n            self._split_sched_block_instance(updated_block)\n\n        \n        name = \"scheduling_block:\" + updated_block[\"id\"]\n        self._db.set_specified_values(name, scheduling_block_data)\n\n        \n        \n        self._db.push_event(self.scheduling_event_name,\n                            updated_block[\"status\"],\n                            updated_block[\"id\"])\n\n        \n        for value in processing_block_data:\n            name = (\"scheduling_block:\" + updated_block[\"id\"] +\n                    \":processing_block:\" + value['id'])\n            self._db.set_specified_values(name, value)\n\n            \n            \n            self._db.push_event(self.processing_event_name,\n                                value[\"status\"],\n                                value[\"id\"])", "docstring": "Add Scheduling Block to the database.\n\nArgs:\nconfig_dict (dict): SBI configuration", "source": "juraj-google-style"}
{"code": "def double_width(self, action):\n        \n        if action == 'on':\n            action = '1'\n        elif action == 'off':\n            action = '0'\n        else:\n            raise RuntimeError('Invalid action for function doubleWidth. Options are on and off')\n        self.send(chr(27)+'W'+action)", "docstring": "Enable/cancel doublewidth printing\n\nArgs:\naction: Enable or disable doublewidth printing. Options are 'on' and 'off'\nReturns:\nNone\nRaises:\nRuntimeError: Invalid action.", "source": "juraj-google-style"}
{"code": "def installed(name, source):\n    \n    ret = {'name': name,\n           'changes': {},\n           'result': False,\n           'comment': ''}\n\n    \n    if not name:\n        raise SaltInvocationError('Must specify a KB \"name\"')\n    if not source:\n        raise SaltInvocationError('Must specify a \"source\" file to install')\n\n    \n    if __salt__['wusa.is_installed'](name):\n        ret['result'] = True\n        ret['comment'] = '{0} already installed'.format(name)\n        return ret\n\n    \n    if __opts__['test'] is True:\n        ret['result'] = None\n        ret['comment'] = '{0} would be installed'.format(name)\n        ret['result'] = None\n        return ret\n\n    \n    cached_source_path = __salt__['cp.cache_file'](path=source, saltenv=__env__)\n    if not cached_source_path:\n        msg = 'Unable to cache {0} from saltenv \"{1}\"'.format(\n            salt.utils.url.redact_http_basic_auth(source), __env__)\n        ret['comment'] = msg\n        return ret\n\n    \n    __salt__['wusa.install'](cached_source_path)\n\n    \n    if __salt__['wusa.is_installed'](name):\n        ret['comment'] = '{0} was installed'.format(name)\n        ret['changes'] = {'old': False, 'new': True}\n        ret['result'] = True\n    else:\n        ret['comment'] = '{0} failed to install'.format(name)\n\n    return ret", "docstring": "Ensure an update is installed on the minion\n\nArgs:\n\nname(str):\nName of the Windows KB (\"KB123456\")\n\nsource (str):\nSource of .msu file corresponding to the KB\n\nExample:\n\n.. code-block:: yaml\n\nKB123456:\nwusa.installed:\n- source: salt://kb123456.msu", "source": "juraj-google-style"}
{"code": "def __init__(self, _lenient=False, **kwds):\n    \n    self._verify_keys(kwds, _lenient)\n    self._set_values(kwds, _lenient)", "docstring": "Init.\n\nArgs:\n_lenient: When true, no option is required.\n**kwds: keyword arguments for options and their values.", "source": "juraj-google-style"}
{"code": "async def forget_ticket(self, request):\n    session = (await get_session(request))\n    session.pop(self.cookie_name, '')", "docstring": "Called to forget the ticket data a request\n\nArgs:\nrequest: aiohttp Request object.", "source": "codesearchnet"}
{"code": "def infer_steps_for_dataset(model, dataset, steps, epochs=1, steps_name='steps'):\n    assert isinstance(dataset, data_types.DatasetV2)\n    if model._in_multi_worker_mode() and dataset.options().experimental_distribute.auto_shard_policy != options_lib.AutoShardPolicy.OFF:\n        return None\n    size = backend.get_value(cardinality.cardinality(dataset))\n    if size == cardinality.INFINITE and steps is None:\n        raise ValueError('When passing an infinitely repeating dataset, you must specify the `%s` argument.' % (steps_name,))\n    if size >= 0:\n        if steps is not None and steps * epochs > size:\n            if epochs > 1:\n                raise ValueError('The dataset you passed contains %s batches, but you passed `epochs=%s` and `%s=%s`, which is a total of %s steps. We cannot draw that many steps from this dataset. We suggest to set `%s=%s`.' % (size, epochs, steps_name, steps, steps * epochs, steps_name, size \n            else:\n                raise ValueError('The dataset you passed contains %s batches, but you passed `%s=%s`. We cannot draw that many steps from this dataset. We suggest to set `%s=%s`.' % (size, steps_name, steps, steps_name, size))\n    if steps is None:\n        if size >= 0:\n            return size\n        return None\n    return steps", "docstring": "Infers steps_per_epoch needed to loop through a dataset.\n\nArgs:\nmodel: Keras model instance.\ndataset: Input data of type tf.data.Dataset.\nsteps: Number of steps to draw from the dataset (may be None if unknown).\nepochs: Number of times to iterate over the dataset.\nsteps_name: The string name of the steps argument, either `steps`,\n`validation_steps`, or `steps_per_epoch`. Only used for error message\nformatting.\n\nReturns:\nInteger or `None`. Inferred number of steps to loop through the dataset.\n`None` is returned if 1) the size of the dataset is unknown and `steps` was\nnot specified, or 2) this is multi-worker training and auto sharding is\nenabled.\n\nRaises:\nValueError: In case of invalid argument values.", "source": "github-repos"}
{"code": "def plot_time_elapsed(filename, elapsed=False, unit='s', plot_kwargs=None):\n    import matplotlib.pyplot as plt\n    if (plot_kwargs is None):\n        plot_kwargs = {}\n    data_column = (3 if elapsed else 1)\n    data = np.genfromtxt(filename, dtype='i8,f4', usecols=(0, data_column), names=['k', 'v'])\n    index = data['k']\n    values = data['v']\n    if (unit == 's'):\n        pass\n    elif (unit == 'm'):\n        values /= 60\n    elif (unit == 'h'):\n        values /= 3600\n    elif (unit == 'd'):\n        values /= (3600 * 24)\n    else:\n        raise ValueError('The argument `unit` must be chosen from {s|m|h|d}.')\n    plt.plot(index, values, **plot_kwargs)", "docstring": "Plot series data from MonitorTimeElapsed output text file.\n\nArgs:\nfilename (str): Path to *.series.txt file produced by :obj:`~nnabla.MonitorSeries` class.\nelapsed (bool): If ``True``, it plots the total elapsed time.\nunit (str):\nTime unit chosen from ``'s'``, ``'m'``, ``'h'``, or ``'d'``.\nplot_kwags (dict, optional):\nKeyward arguments passed to :function:`matplotlib.pyplot.plot`.\n\nNote:\nmatplotlib package is required.", "source": "codesearchnet"}
{"code": "def flatten(schedule: ScheduleComponent, name: str = None) -> Schedule:\n    \n    if name is None:\n        name = schedule.name\n\n    return Schedule(*schedule.instructions, name=name)", "docstring": "Create a flattened schedule.\n\nArgs:\nschedule: Schedules to flatten\nname: Name of the new schedule. Defaults to first element of `schedules`", "source": "juraj-google-style"}
{"code": "def SetTimelineOwner(self, username):\n    \n    self._timeline_owner = username\n    logger.info('Owner of the timeline: {0!s}'.format(self._timeline_owner))", "docstring": "Sets the username of the user that should own the timeline.\n\nArgs:\nusername (str): username.", "source": "juraj-google-style"}
{"code": "def to_las3(self, use_descriptions=False, dlm=',', source='Striplog'):\n    data = self.to_csv(use_descriptions=use_descriptions, dlm=dlm, header=False)\n    return templates.section.format(name='Lithology', short='LITH', source=source, data=data)", "docstring": "Returns an LAS 3.0 section string.\n\nArgs:\nuse_descriptions (bool): Whether to use descriptions instead\nof summaries, if available.\ndlm (str): The delimiter.\nsource (str): The sourse of the data.\n\nReturns:\nstr: A string forming Lithology section of an LAS3 file.", "source": "codesearchnet"}
{"code": "def _initialize_physical_devices(self, reinitialize=False):\n    with self._device_lock:\n        if not reinitialize and self._physical_devices is not None:\n            return\n        devs = pywrap_tfe.TF_ListPhysicalDevices()\n        self._physical_devices = [PhysicalDevice(name=d.decode(), device_type=d.decode().split(':')[1]) for d in devs]\n        self._physical_device_to_index = {p: i for i, p in enumerate(self._physical_devices)}\n        pluggable_devs = pywrap_tfe.TF_ListPluggablePhysicalDevices()\n        self._pluggable_devices = [PhysicalDevice(name=d.decode(), device_type=d.decode().split(':')[1]) for d in pluggable_devs]\n        self._visible_device_list = list(self._physical_devices)\n        self._memory_growth_map = {d: None for d in self._physical_devices if d.device_type == 'GPU' or d in self._pluggable_devices}\n    self._import_config()", "docstring": "Gets local devices visible to the system.\n\nArgs:\nreinitialize: If True, reinitializes self._physical_devices  so that\ndynamic registered devices will also be visible to the python front-end.", "source": "github-repos"}
{"code": "def related(self, *, exclude_self=False):\n        \n        manager = type(self)._default_manager\n        queryset = manager.related_to(self)\n        if exclude_self:\n            queryset = queryset.exclude(id=self.id)\n        return queryset", "docstring": "Get a QuerySet for all trigger log objects for the same connected model.\n\nArgs:\nexclude_self (bool): Whether to exclude this log object from the result list", "source": "juraj-google-style"}
{"code": "def __init__(self, value):\n    if not (isinstance(value, tensor.Tensor) and value.dtype.is_floating):\n        raise ValueError('Regression output value must be a float32 Tensor; got {}'.format(value))\n    self._value = value", "docstring": "Constructor for `RegressionOutput`.\n\nArgs:\nvalue: a float `Tensor` giving the predicted values.  Required.\n\nRaises:\nValueError: if the value is not a `Tensor` with dtype tf.float32.", "source": "github-repos"}
{"code": "def _validate_alias_command(alias_command):\n    if (not alias_command):\n        raise CLIError(EMPTY_ALIAS_ERROR)\n    split_command = shlex.split(alias_command)\n    boundary_index = len(split_command)\n    for (i, subcommand) in enumerate(split_command):\n        if ((not re.match('^[a-z]', subcommand.lower())) or (i > COLLISION_CHECK_LEVEL_DEPTH)):\n            boundary_index = i\n            break\n    command_to_validate = ' '.join(split_command[:boundary_index]).lower()\n    for command in azext_alias.cached_reserved_commands:\n        if re.match('([a-z\\\\-]*\\\\s)*{}($|\\\\s)'.format(command_to_validate), command):\n            return\n    _validate_positional_arguments(shlex.split(alias_command))", "docstring": "Check if the alias command is valid.\n\nArgs:\nalias_command: The command to validate.", "source": "codesearchnet"}
{"code": "def genfile(*paths):\n    \n    path = genpath(*paths)\n    gendir(os.path.dirname(path))\n    if not os.path.isfile(path):\n        return io.open(path, 'w+b')\n    return io.open(path, 'r+b')", "docstring": "Create or open ( for read/write ) a file path join.\n\nArgs:\n*paths: A list of paths to join together to make the file.\n\nNotes:\nIf the file already exists, the fd returned is opened in ``r+b`` mode.\nOtherwise, the fd is opened in ``w+b`` mode.\n\nReturns:\nio.BufferedRandom: A file-object which can be read/written too.", "source": "juraj-google-style"}
{"code": "def _build_udf(name, code, return_type, params, language, imports):\n    params = ','.join([('%s %s' % named_param) for named_param in params])\n    imports = ','.join([('library=\"%s\"' % i) for i in imports])\n    if (language.lower() == 'sql'):\n        udf = (((('CREATE TEMPORARY FUNCTION {name} ({params})\\n' + 'RETURNS {return_type}\\n') + 'AS (\\n') + '{code}\\n') + ');')\n    else:\n        udf = (((((((('CREATE TEMPORARY FUNCTION {name} ({params})\\n' + 'RETURNS {return_type}\\n') + 'LANGUAGE {language}\\n') + 'AS \\n') + 'OPTIONS (\\n') + '{imports}\\n') + ');')\n    return udf.format(name=name, params=params, return_type=return_type, language=language, code=code, imports=imports)", "docstring": "Creates the UDF part of a BigQuery query using its pieces\n\nArgs:\nname: the name of the javascript function\ncode: function body implementing the logic.\nreturn_type: BigQuery data type of the function return. See supported data types in\nthe BigQuery docs\nparams: dictionary of parameter names and types\nlanguage: see list of supported languages in the BigQuery docs\nimports: a list of GCS paths containing further support code.", "source": "codesearchnet"}
{"code": "def __call__(self, shape, dtype=None):\n    \n    dtype = tf.as_dtype(dtype or tf.keras.backend.floatx())\n    if isinstance(shape, tf.TensorShape):\n      shape_dtype = tf.int32\n      shape_ = np.int32(shape)\n    else:\n      if not tf.is_tensor(shape):\n        shape = tf.convert_to_tensor(\n            value=shape, dtype_hint=tf.int32, name='shape')\n      shape_dtype = shape.dtype.base_dtype\n      shape_ = tf.get_static_value(shape, partial=True)\n\n    sizes_ = tf.get_static_value(self.sizes)\n    if sizes_ is not None:\n      sizes_ = np.array(sizes_, shape_dtype.as_numpy_dtype)\n\n    assertions = []\n    message = 'Rightmost dimension of shape must equal `sum(sizes)`.'\n    n = shape[-1] if shape_ is None or shape_[-1] is None else shape_[-1]\n    if sizes_ is not None and not tf.is_tensor(n):\n      if sum(sizes_) != n:\n        raise ValueError(message)\n    elif self.validate_args:\n      assertions.append(tf.compat.v1.assert_equal(\n          shape[-1], tf.reduce_sum(input_tensor=self.sizes), message=message))\n\n    s = (shape[:-1]\n         if shape_ is None or any(s is None for s in shape_[:-1])\n         else shape_[:-1])\n    if sizes_ is not None and isinstance(s, (np.ndarray, np.generic)):\n      return tf.concat([\n          tf.keras.initializers.get(init)(np.concatenate([\n              s, np.array([e], shape_dtype.as_numpy_dtype)], axis=-1), dtype)\n          for init, e in zip(self.initializers, sizes_.tolist())\n      ], axis=-1)\n\n    sizes = tf.split(self.sizes, len(self.initializers))\n    return tf.concat([\n        tf.keras.initializers.get(init)(tf.concat([s, e], axis=-1), dtype)\n        for init, e in zip(self.initializers, sizes)\n    ], axis=-1)", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor. If not provided will return tensor\nof `tf.float32`.", "source": "juraj-google-style"}
{"code": "def read_knmi_dataset(directory):\n    \n    filemask = '%s*.txt' % directory\n    filelist = glob.glob(filemask)\n\n    columns_hourly = ['temp', 'precip', 'glob', 'hum', 'wind', 'ssd']\n    ts = pd.DataFrame(columns=columns_hourly)\n\n    first_call = True\n    for file_i in filelist:\n        print(file_i)\n        current = read_single_knmi_file(file_i)\n        if(first_call):\n            ts = current\n            first_call = False\n        else:\n            ts = pd.concat([ts, current])\n    return ts", "docstring": "Reads files from a directory and merges the time series\n\nPlease note: For each station, a separate directory must be provided!\ndata availability: www.knmi.nl/nederland-nu/klimatologie/uurgegevens\n\nArgs:\ndirectory: directory including the files\n\nReturns:\npandas data frame including time series", "source": "juraj-google-style"}
{"code": "def get_auth_header(self, user_payload):\n        \n        auth_token = self.get_auth_token(user_payload)\n        return '{auth_header_prefix} {auth_token}'.format(\n            auth_header_prefix=self.auth_header_prefix, auth_token=auth_token\n        )", "docstring": "Returns the value for authorization header\nArgs:\nuser_payload(dict, required): A `dict` containing required information\nto create authentication token", "source": "juraj-google-style"}
{"code": "def stream(self, accountID, **kwargs):\n    request = Request('GET', '/v3/accounts/{accountID}/transactions/stream')\n    request.set_path_param('accountID', accountID)\n    request.set_stream(True)\n\n    class Parser():\n\n        def __init__(self, ctx):\n            self.ctx = ctx\n\n        def __call__(self, line):\n            j = json.loads(line.decode('utf-8'))\n            type = j.get('type')\n            if (type is None):\n                return ('unknown', j)\n            elif (type == 'HEARTBEAT'):\n                return ('transaction.TransactionHeartbeat', self.ctx.transaction.TransactionHeartbeat.from_dict(j, self.ctx))\n            transaction = self.ctx.transaction.Transaction.from_dict(j, self.ctx)\n            return ('transaction.Transaction', transaction)\n    request.set_line_parser(Parser(self.ctx))\n    response = self.ctx.request(request)\n    return response", "docstring": "Get a stream of Transactions for an Account starting from when the\nrequest is made.\n\nArgs:\naccountID:\nAccount Identifier\n\nReturns:\nv20.response.Response containing the results from submitting the\nrequest", "source": "codesearchnet"}
{"code": "def arcsin(x):\n    if any_symbolic_tensors((x,)):\n        return Arcsin().symbolic_call(x)\n    return backend.numpy.arcsin(x)", "docstring": "Inverse sine, element-wise.\n\nArgs:\nx: Input tensor.\n\nReturns:\nTensor of the inverse sine of each element in `x`, in radians and in\nthe closed interval `[-pi/2, pi/2]`.\n\nExample:\n>>> x = keras.ops.convert_to_tensor([1, -1, 0])\n>>> keras.ops.arcsin(x)\narray([ 1.5707964, -1.5707964,  0.], dtype=float32)", "source": "github-repos"}
{"code": "def _validate_input(flattened_layouts: Sequence[layout_lib.Layout], flattened_elem_spec: Sequence[tensor_spec.TensorSpec], dataset_already_batched: bool):\n    if not flattened_elem_spec:\n        raise ValueError('Expected input element spec of at least one element, was empty.')\n    first_elem_shape = flattened_elem_spec[0].shape\n    for layout, elem_spec in zip(flattened_layouts, flattened_elem_spec):\n        if elem_spec.shape.rank is None:\n            raise ValueError('Dataset element shape must have a valid rank, got spec %s.' % elem_spec)\n        expected_rank = elem_spec.shape.rank\n        if not dataset_already_batched:\n            expected_rank += 1\n        if layout.rank != expected_rank:\n            raise ValueError('Expected layout with rank %d for element spec %s, got layout %s. Check that the dataset is not batched before passing to DTensorDataset.' % (expected_rank, elem_spec, layout.sharding_specs))\n        if dataset_already_batched:\n            batch_dim_size = first_elem_shape.as_list()[0]\n            if batch_dim_size is None:\n                raise ValueError('Size of batch dimension of element spec %s is None. Ensure drop_remainder=True when batching the dataset.' % elem_spec)\n            if elem_spec.shape.as_list()[0] != batch_dim_size:\n                raise ValueError('Size of batch dimension of element spec %s does not match expected size %d.' % (elem_spec, batch_dim_size))", "docstring": "Checks that the dataset's layouts and element specs are compatible.\n\nArgs:\nflattened_layouts: the flattened list of layouts used to distribute the\ndataset.\nflattened_elem_spec: the flattened list of element specs used in the\ndataset's components.\ndataset_already_batched: whether the dataset to be validated is already\nbatched.\n\nRaises:\nValueError: if the dataset's inputs are incompatible.", "source": "github-repos"}
{"code": "def _add_cadd_score(self, variant_obj, info_dict):\n        \n        cadd_score = info_dict.get('CADD')\n        if cadd_score:\n            logger.debug(\"Updating cadd_score to: {0}\".format(\n                cadd_score))\n            variant_obj.cadd_score = float(cadd_score)", "docstring": "Add the cadd score to the variant\n\nArgs:\nvariant_obj (puzzle.models.Variant)\ninfo_dict (dict): A info dictionary", "source": "juraj-google-style"}
{"code": "class Buffer(Generic[T]):\n    queue: List[T]\n    max_size: int\n    flusher: Union[FlushFunction, NoReturn]\n\n    def __init__(self, initlist: List[T], max_size: int, flusher: FlushFunction) -> None:\n        self.queue = initlist\n        self.max_size = max_size\n        self.flusher = flusher\n\n    def flush(self, force: bool=False) -> bool | Any:\n        \n        if force or len(self.queue) > self.max_size:\n            result = self.flusher(self.queue)\n            self.queue.clear()\n            return result or True\n        else:\n            return False\n\n    def push(self, item: T) -> bool | Any:\n        \n        self.queue.append(item)\n        return self.flush()", "docstring": "Representation of a Buffer (FIFO queue) with the ability to\nconsume the current queue into a flush function when max_size is reached.\n\nIt can queue any list of items, e.g. logs, rows, and API calls.\n\nArgs:\n* initlist: Initial list of items\n* max_size: Maximum queue size\n* flusher: Function to be called with list of items", "source": "github-repos"}
{"code": "def download(self, file: Optional[IO[bytes]]=None, duration_timeout: Optional[float]=None):\n    (yield from self._current_session.download(file, duration_timeout=duration_timeout))", "docstring": "Download content.\n\nArgs:\nfile: An optional file object for the document contents.\nduration_timeout: Maximum time in seconds of which the\nentire file must be read.\n\nReturns:\nResponse: An instance of :class:`.http.request.Response`.\n\nSee :meth:`WebClient.session` for proper usage of this function.\n\nCoroutine.", "source": "codesearchnet"}
{"code": "def on_http_error(error):\n\n    def wrap(f):\n\n        @functools.wraps(f)\n        def wrapped_f(*args, **kwargs):\n            try:\n                return f(*args, **kwargs)\n            except GitlabHttpError as e:\n                raise error(e.error_message, e.response_code, e.response_body)\n        return wrapped_f\n    return wrap", "docstring": "Manage GitlabHttpError exceptions.\n\nThis decorator function can be used to catch GitlabHttpError exceptions\nraise specialized exceptions instead.\n\nArgs:\nerror(Exception): The exception type to raise -- must inherit from\nGitlabError", "source": "codesearchnet"}
{"code": "def is_periodically_contiguous( self ):\n        \n        edges = self.sites_at_edges()\n        is_contiguous = [ False, False, False ]\n        along_x = any( [ s2 in s1.p_neighbours for s1 in edges[0] for s2 in edges[1] ] )\n        along_y = any( [ s2 in s1.p_neighbours for s1 in edges[2] for s2 in edges[3] ] )\n        along_z = any( [ s2 in s1.p_neighbours for s1 in edges[4] for s2 in edges[5] ] )\n        return ( along_x, along_y, along_z )", "docstring": "logical check whether a cluster connects with itself across the\nsimulation periodic boundary conditions.\n\nArgs:\nnone\n\nReturns\n( Bool, Bool, Bool ): Contiguity along the x, y, and z coordinate axes", "source": "juraj-google-style"}
{"code": "def Create(self, request, global_params=None):\n    config = self.GetMethodConfig('Create')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Creates a `WorkerPool`.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsWorkerPoolsCreateRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def get_urls(self):\n    urls = super(DashboardSite, self).get_urls()\n    custom_urls = [url('^$', self.admin_view(HomeView.as_view()), name='index'), url('^logs/', include(logs_urlpatterns(self.admin_view)))]\n    custom_urls += get_realtime_urls(self.admin_view)\n    del urls[0]\n    return (custom_urls + urls)", "docstring": "Get urls method.\n\nReturns:\nlist: the list of url objects.", "source": "codesearchnet"}
{"code": "def filesystem_set_configuration(scheme, key, value, name=None):\n    return _gen_filesystem_ops.file_system_set_configuration(scheme, key=key, value=value, name=name)", "docstring": "Set configuration of the file system.\n\nArgs:\nscheme: File system scheme.\nkey: The name of the configuration option.\nvalue: The value of the configuration option.\nname: A name for the operation (optional).\n\nReturns:\nNone.", "source": "github-repos"}
{"code": "def degrees_to_compass(value):\n    if value is None:\n        return None\n    if value >= 348.75 and value <= 360 or (value >= 0 and value <= 11.25):\n        return 'N'\n    else:\n        for direction in WIND_DIRECTION_MAP.keys():\n            if value >= WIND_DIRECTION_MAP[direction]['f'] and value <= WIND_DIRECTION_MAP[direction]['t']:\n                return direction\n    return None", "docstring": "Turns direction from degrees value to compass direction\n\nArgs:\nvalue: floating point representing the degrees from 0 to 360\nReturns: String representing the compass direction.", "source": "github-repos"}
{"code": "def write_dftbp(filename, atoms):\n    \n    scale_pos = dftbpToBohr\n\n    lines = \"\"\n\n    \n    natoms = atoms.get_number_of_atoms()\n    lines += str(natoms)\n    lines += ' S \\n'\n\n    \n    expaned_symbols = atoms.get_chemical_symbols()\n    symbols = get_reduced_symbols(expaned_symbols)\n    lines += ' '.join(symbols) + '\\n'\n\n    atom_numbers = []\n    for ss in expaned_symbols:\n        atom_numbers.append(symbols.index(ss) + 1)\n\n    positions = atoms.get_positions()/scale_pos\n\n    for ii in range(natoms):\n        pos = positions[ii]\n        pos_str = \"{:3d} {:3d} {:20.15f} {:20.15f} {:20.15f}\\n\".format(\n            ii + 1, atom_numbers[ii], pos[0], pos[1], pos[2])\n        lines += pos_str\n\n    \n    lines +='0.0 0.0 0.0\\n'\n\n    cell = atoms.get_cell()/scale_pos\n\n    for ii in range(3):\n        cell_str = \"{:20.15f} {:20.15f} {:20.15f}\\n\".format(\n            cell[ii][0], cell[ii][1], cell[ii][2])\n        lines += cell_str\n\n    outfile = open(filename, 'w')\n    outfile.write(lines)", "docstring": "Writes DFTB+ readable, gen-formatted structure files\n\nArgs:\nfilename: name of the gen-file to be written\natoms: object containing information about structure", "source": "juraj-google-style"}
{"code": "def read_video_pyav(container, indices):\n    frames = []\n    container.seek(0)\n    start_index = indices[0]\n    end_index = indices[-1]\n    for i, frame in enumerate(container.decode(video=0)):\n        if i > end_index:\n            break\n        if i >= start_index and i in indices:\n            frames.append(frame)\n    return np.stack([x.to_ndarray(format='rgb24') for x in frames])", "docstring": "Decode the video with PyAV decoder.\n\nArgs:\ncontainer (`av.container.input.InputContainer`): PyAV container.\nindices (`List[int]`): List of frame indices to decode.\n\nReturns:\nresult (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).", "source": "github-repos"}
{"code": "def search_point(self, lat, lng, filters=None, startDate=None, endDate=None, types=None, type=None):\n    searchAreaWkt = ('POLYGON ((%s %s, %s %s, %s %s, %s %s, %s %s))' % (lng, lat, lng, lat, lng, lat, lng, lat, lng, lat))\n    return self.search(searchAreaWkt=searchAreaWkt, filters=filters, startDate=startDate, endDate=endDate, types=types)", "docstring": "Perform a catalog search over a specific point, specified by lat,lng\n\nArgs:\nlat: latitude\nlng: longitude\nfilters: Array of filters.  Optional.  Example:\n[\n\"(sensorPlatformName = 'WORLDVIEW01' OR sensorPlatformName ='QUICKBIRD02')\",\n\"cloudCover < 10\",\n\"offNadirAngle < 10\"\n]\nstartDate: string.  Optional.  Example: \"2004-01-01T00:00:00.000Z\"\nendDate: string.  Optional.  Example: \"2004-01-01T00:00:00.000Z\"\ntypes: Array of types to search for.  Optional.  Example (and default):  [\"Acquisition\"]\n\nReturns:\ncatalog search resultset", "source": "codesearchnet"}
{"code": "def RegisterSourceType(cls, source_type_class):\n    \n    if source_type_class.TYPE_INDICATOR in cls._source_type_classes:\n      raise KeyError(\n          'Source type already set for type: {0:s}.'.format(\n              source_type_class.TYPE_INDICATOR))\n\n    cls._source_type_classes[source_type_class.TYPE_INDICATOR] = (\n        source_type_class)", "docstring": "Registers a source type.\n\nSource types are identified based on their type indicator.\n\nArgs:\nsource_type_class (type): source type.\n\nRaises:\nKeyError: if source types is already set for the corresponding\ntype indicator.", "source": "juraj-google-style"}
{"code": "def _get_name(self):\n    if self._known_keys[_InstrumentationKnownStatusKeys.TEST]:\n        return self._known_keys[_InstrumentationKnownStatusKeys.TEST]\n    else:\n        return self.DEFAULT_INSTRUMENTATION_METHOD_NAME", "docstring": "Gets the method name of the test method for the instrumentation\nmethod block.\n\nReturns:\nA string containing the name of the instrumentation test method's\ntest or a default name if no name was parsed.", "source": "github-repos"}
{"code": "def write_env_vars(env_vars=None):  \n    \n    env_vars = env_vars or {}\n    env_vars['PYTHONPATH'] = ':'.join(sys.path)\n\n    for name, value in env_vars.items():\n        os.environ[name] = value", "docstring": "Write the dictionary env_vars in the system, as environment variables.\n\nArgs:\nenv_vars ():\n\nReturns:", "source": "juraj-google-style"}
{"code": "def locate_resource(name, lang, filter=None):\n  \n  task_dir = resource_dir.get(name, name)\n  package_id = u\"{}.{}\".format(task_dir, lang)\n  p = path.join(polyglot_path, task_dir, lang)\n  if not path.isdir(p):\n    if downloader.status(package_id) != downloader.INSTALLED:\n      raise ValueError(\"This resource is available in the index \"\n                       \"but not downloaded, yet. Try to run\\n\\n\"\n                       \"polyglot download {}\".format(package_id))\n  return path.join(p, os.listdir(p)[0])", "docstring": "Return filename that contains specific language resource name.\n\nArgs:\nname (string): Name of the resource.\nlang (string): language code to be loaded.", "source": "juraj-google-style"}
{"code": "def __rmul__(self, left: torch.Tensor) -> Rotation:\n    return self.__mul__(left)", "docstring": "Reverse pointwise multiplication of the rotation with a tensor.\n\nArgs:\nleft:\nThe left multiplicand\nReturns:\nThe product", "source": "github-repos"}
{"code": "def json_merge_fields(recipe, parameters):\n    if isinstance(recipe, dict):\n        for key, value in list(recipe.items()):\n            if isinstance(value, dict) and 'field' in value:\n                if value['field']['name'] in parameters:\n                    recipe[key] = json_merge_field(value, parameters[value['field']['name']])\n            else:\n                json_merge_fields(value, parameters)\n    elif isinstance(recipe, list) or isinstance(recipe, tuple):\n        for index, value in enumerate(recipe):\n            if isinstance(value, dict) and 'field' in value:\n                if value['field']['name'] in parameters:\n                    recipe[index] = json_merge_field(value, parameters[value['field']['name']])\n            else:\n                json_merge_fields(value, parameters)\n    return recipe", "docstring": "Recusrsively merges fields from an include.\n\nField has format: { \"field\":{ \"name\":\"???\", \"kind\":\"???\", \"default\":???, \"description\":\"???\" }}\n\nArgs:\nrecipe: (dict) A dictionary representation fo the JSON script.\nparameters: (dict) A key value pair, where the value could be another field.\n\nReturns:\nfields: (list or dictionary) A list or dictionary representing each field recipe found in the JSON.", "source": "github-repos"}
{"code": "def dump_begin(self, selector_id):\n    if (self.dump_walker is not None):\n        self.storage.destroy_walker(self.dump_walker)\n    selector = DataStreamSelector.FromEncoded(selector_id)\n    self.dump_walker = self.storage.create_walker(selector, skip_all=False)\n    return (Error.NO_ERROR, Error.NO_ERROR, self.dump_walker.count())", "docstring": "Start dumping a stream.\n\nArgs:\nselector_id (int): The buffered stream we want to dump.\n\nReturns:\n(int, int, int): Error code, second error code, number of available readings", "source": "codesearchnet"}
{"code": "def _ragged_tensor_mse(y_true, y_pred):\n    return _ragged_tensor_apply_loss(mean_squared_error, y_true, y_pred)", "docstring": "Implements support for handling RaggedTensors.\n\nArgs:\ny_true: RaggedTensor truth values. shape = `[batch_size, d0, .. dN]`.\ny_pred: RaggedTensor predicted values. shape = `[batch_size, d0, .. dN]`.\n\nReturns:\nMean squared error values. shape = `[batch_size, d0, .. dN-1]`.\nWhen the number of dimensions of the batch feature vector [d0, .. dN] is\ngreater than one the return value is a RaggedTensor. Otherwise a Dense\ntensor with dimensions [batch_size] is returned.", "source": "github-repos"}
{"code": "def set_pattern_step_setpoint(self, patternnumber, stepnumber, setpointvalue):\n        \n        _checkPatternNumber(patternnumber)\n        _checkStepNumber(stepnumber)\n        _checkSetpointValue(setpointvalue, self.setpoint_max)\n        \n        address = _calculateRegisterAddress('setpoint', patternnumber, stepnumber)\n        self.write_register(address, setpointvalue, 1)", "docstring": "Set the setpoint value for a step.\n\nArgs:\n* patternnumber (integer): 0-7\n* stepnumber (integer): 0-7\n* setpointvalue (float): Setpoint value", "source": "juraj-google-style"}
{"code": "def json_using_iso8601(__obj: Dict) -> Dict:\n    for (key, value) in __obj.items():\n        with suppress(TypeError, ValueError):\n            __obj[key] = parse_datetime(value)\n        with suppress(TypeError, ValueError):\n            __obj[key] = parse_delta(value)\n    return __obj", "docstring": "Parse ISO-8601 values from JSON databases.\n\nSee :class:`json.JSONDecoder`\n\nArgs:\n__obj: Object to decode", "source": "codesearchnet"}
{"code": "def splat(f: Callable[..., A]) -> Callable[[Iterable], A]:\n    \n\n    def splatted(args):\n        return f(*args)\n\n    return splatted", "docstring": "Convert a function taking multiple arguments into a function taking a single iterable argument.\n\nArgs:\nf: Any function\n\nReturns:\nA function that accepts a single iterable argument. Each element of this iterable argument is passed as an\nargument to ``f``.\n\nExample:\n$ def f(a, b, c):\n$     return a + b + c\n$\n$ f(1, 2, 3)  # 6\n$ g = splat(f)\n$ g([1, 2, 3])  # 6", "source": "juraj-google-style"}
{"code": "def __init__(self, *futures):\n    \n    for f in futures:\n      if not isinstance(f, PipelineFuture):\n        raise TypeError('May only pass PipelineFuture instances to After(). %r',\n                        type(f))\n    self._futures = set(futures)", "docstring": "Initializer.\n\nArgs:\n*futures: PipelineFutures that all subsequent pipelines should follow.\nMay be empty, in which case this statement does nothing.", "source": "juraj-google-style"}
{"code": "def _wait_on_metadata(self, topic, max_wait):\n    self._sender.add_topic(topic)\n    begin = time.time()\n    elapsed = 0.0\n    metadata_event = None\n    while True:\n        partitions = self._metadata.partitions_for_topic(topic)\n        if (partitions is not None):\n            return partitions\n        if (not metadata_event):\n            metadata_event = threading.Event()\n        log.debug('Requesting metadata update for topic %s', topic)\n        metadata_event.clear()\n        future = self._metadata.request_update()\n        future.add_both((lambda e, *args: e.set()), metadata_event)\n        self._sender.wakeup()\n        metadata_event.wait((max_wait - elapsed))\n        elapsed = (time.time() - begin)\n        if (not metadata_event.is_set()):\n            raise Errors.KafkaTimeoutError(('Failed to update metadata after %.1f secs.' % (max_wait,)))\n        elif (topic in self._metadata.unauthorized_topics):\n            raise Errors.TopicAuthorizationFailedError(topic)\n        else:\n            log.debug('_wait_on_metadata woke after %s secs.', elapsed)", "docstring": "Wait for cluster metadata including partitions for the given topic to\nbe available.\n\nArguments:\ntopic (str): topic we want metadata for\nmax_wait (float): maximum time in secs for waiting on the metadata\n\nReturns:\nset: partition ids for the topic\n\nRaises:\nKafkaTimeoutError: if partitions for topic were not obtained before\nspecified max_wait timeout", "source": "codesearchnet"}
{"code": "def set_density_matrix(self, density_matrix_repr: Union[(int, np.ndarray)]):\n    density_matrix = density_matrix_utils.to_valid_density_matrix(density_matrix_repr, len(self._qubit_map), self._dtype)\n    density_matrix = np.reshape(density_matrix, self.simulator_state().density_matrix.shape)\n    np.copyto(dst=self.simulator_state().density_matrix, src=density_matrix)", "docstring": "Set the density matrix to a new density matrix.\n\nArgs:\ndensity_matrix_repr: If this is an int, the density matrix is set to\nthe computational basis state corresponding to this state. Otherwise\nif this is a np.ndarray it is the full state, either a pure state\nor the full density matrix.  If it is the pure state it must be the\ncorrect size, be normalized (an L2 norm of 1), and be safely\ncastable to an appropriate dtype for the simulator.  If it is a\nmixed state it must be correctly sized and positive semidefinite\nwith trace one.", "source": "codesearchnet"}
{"code": "def bridge_delete(br, if_exists=True):\n    param_if_exists = _param_if_exists(if_exists)\n    cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)\n    result = __salt__['cmd.run_all'](cmd)\n    retcode = result['retcode']\n    return _retcode_to_bool(retcode)", "docstring": "Deletes bridge and all of  its  ports.\n\nArgs:\nbr: A string - bridge name\nif_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.\n\nReturns:\nTrue on success, else False.\n\n.. versionadded:: 2016.3.0\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.bridge_delete br0", "source": "codesearchnet"}
{"code": "def _expand_url(short_link, subreddit=None):\n        \n        \n        message_scheme = 'https:\n        comment_scheme = 'https:\n        post_scheme = 'https:\n\n        if short_link == '':\n            return None\n        else:\n            parts = short_link.split(',')\n\n            if parts[0] == 'm':\n                return message_scheme.format(parts[1])\n            if parts[0] == 'l' and subreddit:\n                if len(parts) > 2:\n                    return comment_scheme.format(subreddit, parts[1], parts[2])\n                else:\n                    return post_scheme.format(subreddit, parts[1])\n            elif not subreddit:\n                raise ValueError('Subreddit name must be provided')\n            else:\n                return None", "docstring": "Convert a usernote's URL short-hand into a full reddit URL.\n\nArguments:\nsubreddit: the subreddit the URL is for (PRAW Subreddit object or str)\nshort_link: the compressed link from a usernote (str)\n\nReturns a String of the full URL.", "source": "juraj-google-style"}
{"code": "def invert_attention_mask(encoder_attention_mask: tf.Tensor) -> tf.Tensor:\n    if not isinstance(encoder_attention_mask, tf.Tensor):\n        encoder_attention_mask = tf.convert_to_tensor(encoder_attention_mask)\n    if encoder_attention_mask.shape.rank == 3:\n        encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]\n    if encoder_attention_mask.shape.rank == 2:\n        encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]\n    encoder_extended_attention_mask = (tf.cast(1, encoder_attention_mask.dtype) - encoder_extended_attention_mask) * encoder_extended_attention_mask.dtype.min\n    return encoder_extended_attention_mask", "docstring": "Invert an attention mask (e.g., switches 0. and 1.).\n\nArgs:\nencoder_attention_mask (`torch.Tensor`): An attention mask.\n\nReturns:\n`tf.Tensor`: The inverted attention mask.", "source": "github-repos"}
{"code": "def TryConsume(self, token):\n    \n    if self.token == token:\n      self.NextToken()\n      return True\n    return False", "docstring": "Tries to consume a given piece of text.\n\nArgs:\ntoken: Text to consume.\n\nReturns:\nTrue iff the text was consumed.", "source": "juraj-google-style"}
{"code": "def search(cls, five9, filters):\n        \n        return cls._name_search(five9.configuration.getDispositions, filters)", "docstring": "Search for a record on the remote and return the results.\n\nArgs:\nfive9 (five9.Five9): The authenticated Five9 remote.\nfilters (dict): A dictionary of search parameters, keyed by the\nname of the field to search. This should conform to the\nschema defined in :func:`five9.Five9.create_criteria`.\n\nReturns:\nlist[BaseModel]: A list of records representing the result.", "source": "juraj-google-style"}
{"code": "def clinvar_submissions(self, user_id, institute_id):\n    LOG.info(\"Retrieving all clinvar submissions for user '%s', institute '%s'\", user_id, institute_id)\n    query = dict(user_id=user_id, institute_id=institute_id)\n    results = list(self.clinvar_submission_collection.find(query))\n    submissions = []\n    for result in results:\n        submission = {}\n        submission['_id'] = result.get('_id')\n        submission['status'] = result.get('status')\n        submission['user_id'] = result.get('user_id')\n        submission['institute_id'] = result.get('institute_id')\n        submission['created_at'] = result.get('created_at')\n        submission['updated_at'] = result.get('updated_at')\n        if ('clinvar_subm_id' in result):\n            submission['clinvar_subm_id'] = result['clinvar_subm_id']\n        if result.get('variant_data'):\n            submission['variant_data'] = self.clinvar_collection.find({'_id': {'$in': result['variant_data']}})\n        if result.get('case_data'):\n            submission['case_data'] = self.clinvar_collection.find({'_id': {'$in': result['case_data']}})\n        submissions.append(submission)\n    return submissions", "docstring": "Collect all open and closed clinvar submission created by a user for an institute\n\nArgs:\nuser_id(str): a user ID\ninstitute_id(str): an institute ID\n\nReturns:\nsubmissions(list): a list of clinvar submission objects", "source": "codesearchnet"}
{"code": "def _deserialize(self, entity, p, unused_depth=1):\n    if (p.meaning() == entity_pb.Property.EMPTY_LIST):\n        self._store_value(entity, [])\n        return\n    val = self._db_get_value(p.value(), p)\n    if (val is not None):\n        val = _BaseValue(val)\n    if self._repeated:\n        if self._has_value(entity):\n            value = self._retrieve_value(entity)\n            assert isinstance(value, list), repr(value)\n            value.append(val)\n        else:\n            value = [val]\n    else:\n        value = val\n    self._store_value(entity, value)", "docstring": "Internal helper to deserialize this property from a protocol buffer.\n\nSubclasses may override this method.\n\nArgs:\nentity: The entity, a Model (subclass) instance.\np: A Property Message object (a protocol buffer).\ndepth: Optional nesting depth, default 1 (unused here, but used\nby some subclasses that override this method).", "source": "codesearchnet"}
{"code": "def fail_run_group(group, session):\n    from datetime import datetime\n    group.end = datetime.now()\n    group.status = 'failed'\n    session.commit()", "docstring": "End the run_group unsuccessfully.\n\nArgs:\ngroup: The run_group we want to complete.\nsession: The database transaction we will finish.", "source": "codesearchnet"}
{"code": "def add_key_path(key_proto, *path_elements):\n    for i in range(0, len(path_elements), 2):\n        pair = path_elements[i:(i + 2)]\n        elem = key_proto.path.add()\n        elem.kind = pair[0]\n        if (len(pair) == 1):\n            return\n        id_or_name = pair[1]\n        if isinstance(id_or_name, (int, long)):\n            elem.id = id_or_name\n        elif isinstance(id_or_name, basestring):\n            elem.name = id_or_name\n        else:\n            raise TypeError(('Expected an integer id or string name as argument %d; received %r (a %s).' % ((i + 2), id_or_name, type(id_or_name))))\n    return key_proto", "docstring": "Add path elements to the given datastore.Key proto message.\n\nArgs:\nkey_proto: datastore.Key proto message.\n*path_elements: list of ancestors to add to the key.\n(kind1, id1/name1, ..., kindN, idN/nameN), the last 2 elements\nrepresent the entity key, if no terminating id/name: they key\nwill be an incomplete key.\n\nRaises:\nTypeError: the given id or name has the wrong type.\n\nReturns:\nthe same datastore.Key.\n\nUsage:\n>>> add_key_path(key_proto, 'Kind', 'name')  # no parent, with name\ndatastore.Key(...)\n>>> add_key_path(key_proto, 'Kind2', 1)  # no parent, with id\ndatastore.Key(...)\n>>> add_key_path(key_proto, 'Kind', 'name', 'Kind2', 1)  # parent, complete\ndatastore.Key(...)\n>>> add_key_path(key_proto, 'Kind', 'name', 'Kind2')  # parent, incomplete\ndatastore.Key(...)", "source": "codesearchnet"}
{"code": "def _add_ttl_ns(self, line):\n    lg = logging.getLogger(('%s.%s' % (self.ln, inspect.stack()[0][3])))\n    lg.setLevel(self.log_level)\n    lg.debug('line:\\n%s', line)\n    line = str(line).strip()\n    if ((line is None) or (line == 'none') or (line == '') or (not line.lower().startswith('@prefix'))):\n        return\n    line = line.replace('@prefix', '', 1).strip()\n    if line.endswith('.'):\n        line = line[:(- 1)]\n    prefix = line[:line.find(':')].strip()\n    uri = self.clean_iri(line[(line.find(':') + 1):].strip())\n    lg.debug('\\nprefix: %s  uri: %s', prefix, uri)\n    self.bind(prefix, uri, override=False, calc=False)", "docstring": "takes one prefix line from the turtle file and binds the namespace\nto the class\n\nArgs:\nline: the turtle prefix line string", "source": "codesearchnet"}
{"code": "def GetLoadedModuleBySuffix(path):\n    root = os.path.splitext(path)[0]\n    for module in sys.modules.values():\n        mod_root = os.path.splitext((getattr(module, '__file__', None) or ''))[0]\n        if (not mod_root):\n            continue\n        if (not os.path.isabs(mod_root)):\n            mod_root = os.path.join(os.getcwd(), mod_root)\n        if IsPathSuffix(mod_root, root):\n            return module\n    return None", "docstring": "Searches sys.modules to find a module with the given file path.\n\nArgs:\npath: Path to the source file. It can be relative or absolute, as suffix\nmatch can handle both. If absolute, it must have already been\nsanitized.\n\nAlgorithm:\nThe given path must be a full suffix of a loaded module to be a valid match.\nFile extensions are ignored when performing suffix match.\n\nExample:\npath: 'a/b/c.py'\nmodules: {'a': 'a.py', 'a.b': 'a/b.py', 'a.b.c': 'a/b/c.pyc']\nreturns: module('a.b.c')\n\nReturns:\nThe module that corresponds to path, or None if such module was not\nfound.", "source": "codesearchnet"}
{"code": "def core(num: int) -> Text:\n    return 'device:TPU_REPLICATED_CORE:{}'.format(num)", "docstring": "Returns the device name for a core in a replicated TPU computation.\n\nArgs:\nnum: the virtual core number within each replica to which operators should\nbe assigned.\nReturns:\nA device name, suitable for passing to `tf.device()`.", "source": "github-repos"}
{"code": "def with_content_spec(self, column_name: str='content', python_type: Type=str, convert_fn: Optional[Callable[[str], Any]]=None, sql_typecast: Optional[str]=None) -> 'ColumnSpecsBuilder':\n\n    def value_fn(chunk: Chunk) -> Any:\n        if chunk.content.text is None:\n            raise ValueError(f'Expected chunk to contain content. {chunk}')\n        value = chunk.content.text\n        return convert_fn(value) if convert_fn else value\n    self._specs.append(ColumnSpec(column_name=column_name, python_type=python_type, value_fn=value_fn, sql_typecast=sql_typecast))\n    return self", "docstring": "Add content :class:`.ColumnSpec` with optional type and conversion.\n\nArgs:\ncolumn_name: Name for the content column (defaults to \"content\")\npython_type: Python type for the column (defaults to str)\nconvert_fn: Optional function to convert the content text\nIf None, uses content text as-is\nsql_typecast: Optional SQL type cast\n\nReturns:\nSelf for method chaining\n\nExample:\n>>> builder.with_content_spec(\n...     column_name=\"content_length\",\n...     python_type=int,\n...     convert_fn=len  # Store content length instead of content\n... )", "source": "github-repos"}
{"code": "def download_archive_artifact_bundle(self, id_or_uri, file_path):\n    uri = ((self.BACKUP_ARCHIVE_PATH + '/') + extract_id_from_uri(id_or_uri))\n    return self._client.download(uri, file_path)", "docstring": "Downloads an archive for the Artifact Bundle.\n\nArgs:\nid_or_uri: ID or URI of the Artifact Bundle.\nfile_path(str): Destination file path.\n\nReturns:\nbool: Successfully downloaded.", "source": "codesearchnet"}
{"code": "def update(self, **kwargs):\n        \n\n        \n        \n        kwargs = {k: (np.array(v) if isinstance(v, (int, float)) else v)\n                  for k, v in kwargs.items()}\n        self.args.update(kwargs)", "docstring": "Update the model arguments with additional arguments.\n\nArgs:\nkwargs (dict): Optional keyword arguments to add to prior args.", "source": "juraj-google-style"}
{"code": "def ping(dest_addr: str, timeout: int=4, unit: str='s', src_addr: str=None, ttl: int=64, seq: int=0, size: int=56) -> (float or None):\n    with socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP) as sock:\n        sock.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)\n        if src_addr:\n            sock.bind((src_addr, 0))\n        icmp_id = (threading.current_thread().ident % 65535)\n        try:\n            send_one_ping(sock=sock, dest_addr=dest_addr, icmp_id=icmp_id, seq=seq, size=size)\n            delay = receive_one_ping(sock=sock, icmp_id=icmp_id, seq=seq, timeout=timeout)\n        except errors.PingError as e:\n            _debug(e)\n            if EXCEPTIONS:\n                raise e\n            return None\n        if (delay is None):\n            return None\n        if (unit == 'ms'):\n            delay *= 1000\n    return delay", "docstring": "Send one ping to destination address with the given timeout.\n\nArgs:\ndest_addr: The destination address, can be an IP address or a domain name. Ex. \"192.168.1.1\"/\"example.com\"\ntimeout: Timeout in seconds. Default is 4s, same as Windows CMD. (default 4)\nunit: The unit of returned value. \"s\" for seconds, \"ms\" for milliseconds. (default \"s\")\nsrc_addr: The IP address to ping from. This is for multi-interface clients. Ex. \"192.168.1.20\". (default None)\nttl: The Time-To-Live of the outgoing packet. Default is 64, same as in Linux and macOS. (default 64)\nseq: ICMP packet sequence, usually increases from 0 in the same process. (default 0)\nsize: The ICMP packet payload size in bytes. Default is 56, same as in macOS. (default 56)\n\nReturns:\nThe delay in seconds/milliseconds or None on timeout.\n\nRaises:\nPingError: Any PingError will raise again if `ping3.EXCEPTIONS` is True.", "source": "codesearchnet"}
{"code": "def __init__(self, schema, force_deterministic=False):\n    self.schema = schema\n    self._type_hint = named_tuple_from_schema(self.schema)\n    self.components = [_nonnull_coder_from_type(field.type) for field in self.schema.fields]\n    if force_deterministic:\n        self.components = [c.as_deterministic_coder(force_deterministic) for c in self.components]\n    self.forced_deterministic = bool(force_deterministic)", "docstring": "Initializes a :class:`RowCoder`.\n\nArgs:\nschema (apache_beam.portability.api.schema_pb2.Schema): The protobuf\nrepresentation of the schema of the data that the RowCoder will be used\nto encode/decode.", "source": "github-repos"}
{"code": "def __build_config_block(self, config_block_node):\n        \n        node_lists = []\n\n        for line_node in config_block_node:\n            if isinstance(line_node, pegnode.ConfigLine):\n                node_lists.append(self.__build_config(line_node))\n            elif isinstance(line_node, pegnode.OptionLine):\n                node_lists.append(self.__build_option(line_node))\n            elif isinstance(line_node, pegnode.ServerLine):\n                node_lists.append(\n                    self.__build_server(line_node))\n            elif isinstance(line_node, pegnode.BindLine):\n                node_lists.append(\n                    self.__build_bind(line_node))\n            elif isinstance(line_node, pegnode.AclLine):\n                node_lists.append(\n                    self.__build_acl(line_node))\n            elif isinstance(line_node, pegnode.BackendLine):\n                node_lists.append(\n                    self.__build_usebackend(line_node))\n            elif isinstance(line_node, pegnode.UserLine):\n                node_lists.append(\n                    self.__build_user(line_node))\n            elif isinstance(line_node, pegnode.GroupLine):\n                node_lists.append(\n                    self.__build_group(line_node))\n            else:\n                \n                pass\n        return node_lists", "docstring": "parse `config_block` in each section\n\nArgs:\nconfig_block_node (TreeNode): Description\n\nReturns:\n[line_node1, line_node2, ...]", "source": "juraj-google-style"}
{"code": "def not_found(cls, errors=None):\n        \n        if cls.expose_status:  \n            cls.response.content_type = 'application/json'\n            cls.response._status_line = '404 Not Found'\n\n        return cls(404, None, errors).to_json", "docstring": "Shortcut API for HTTP 404 `Not found` response.\n\nArgs:\nerrors (list): Response key/value data.\n\nReturns:\nWSResponse Instance.", "source": "juraj-google-style"}
{"code": "def __init__(self, description=None, **options):\n        \n        self.__doc__ = description\n        self._options = {}\n        for name, option in compat.iteritems(options):\n\n            self.register(name, option)\n\n        super(Namespace, self).__init__()", "docstring": "Initalize the Namespace with options\n\nArgs:\ndescription (str, optional): A human readable description of what\nthe Namespace contains.\n**options: Each keyword should be an Option object which will be\nadded to the Namespace.\n\nRaises:\nTypeError: If an entry is not an Option object.", "source": "juraj-google-style"}
{"code": "def __init__(self, steps_col, slc):\n        \n        self._col = steps_col\n        self._idx = slc.indices(len(self._col))\n        self._flt = {\n            'snap': False,\n            'rprof': False,\n            'fields': [],\n            'func': lambda _: True,\n        }\n        self._dflt_func = self._flt['func']", "docstring": "Initialization of instances:\n\nArgs:\nsteps_col (:class:`_Steps` or :class:`_Snaps`): steps collection,\ni.e. :attr:`StagyyData.steps` or :attr:`StagyyData.snaps`\nattributes.\nslc (slice): slice of desired isteps or isnap.", "source": "juraj-google-style"}
{"code": "def email_has_role(self, email, role_name, uuid=None):\n        \n        mbr_data = self.get_membership(uuid=uuid)\n        docs = []\n        try:\n            docs = mbr_data['response']['docs']\n        except KeyError:\n            failure_message = ('KeyError in membership data - '\n                               'got {0}'.format(mbr_data))\n            log.exception(failure_message)\n            raise PyLmodUnexpectedData(failure_message)\n        if len(docs) == 0:\n            return False\n        has_role = any(\n            (x.get('email') == email and x.get('roleType') == role_name)\n            for x in docs\n        )\n        if has_role:\n            return True\n        return False", "docstring": "Determine if an email is associated with a role.\n\nArgs:\nemail (str): user email\nrole_name (str): user role\nuuid (str): optional uuid. defaults to self.cuuid\n\nRaises:\nPyLmodUnexpectedData: Unexpected data was returned.\nrequests.RequestException: Exception connection error\n\nReturns:\nbool: True or False if email has role_name", "source": "juraj-google-style"}
{"code": "def random_expr(depth, vlist, ops):\n  \n  if not depth:\n    return str(vlist[random.randrange(len(vlist))])\n\n  max_depth_side = random.randrange(2)\n  other_side_depth = random.randrange(depth)\n\n  left = random_expr(depth - 1\n                     if max_depth_side else other_side_depth, vlist, ops)\n  right = random_expr(depth - 1\n                      if not max_depth_side else other_side_depth, vlist, ops)\n\n  op = ops[random.randrange(len(ops))]\n  return ExprNode(left, right, op)", "docstring": "Generate a random expression tree.\n\nArgs:\ndepth: At least one leaf will be this many levels down from the top.\nvlist: A list of chars. These chars are randomly selected as leaf values.\nops: A list of ExprOp instances.\n\nReturns:\nAn ExprNode instance which is the root of the generated expression tree.", "source": "juraj-google-style"}
{"code": "def from_index_amount(cls, matrixpos, amt):\n        \n        f = np.identity(3)\n        f[matrixpos] += amt\n        return cls(f)", "docstring": "Factory method for constructing a Deformation object\nfrom a matrix position and amount\n\nArgs:\nmatrixpos (tuple): tuple corresponding the matrix position to\nhave a perturbation added\namt (float): amount to add to the identity matrix at position\nmatrixpos", "source": "juraj-google-style"}
{"code": "def add_collection_def(meta_graph_def, key, graph=None, export_scope=None, exclude_nodes=None, override_contents=None):\n    if graph and (not isinstance(graph, ops.Graph)):\n        raise TypeError(f'graph must be of type Graph. Received type: {type(graph)}.')\n    if not isinstance(key, str) and (not isinstance(key, bytes)):\n        logging.warning('Only collections with string type keys will be serialized. This key has %s', type(key))\n        return\n    graph = graph or ops.get_default_graph()\n    if override_contents:\n        collection_list = override_contents\n    else:\n        collection_list = graph.get_collection(key)\n    collection_list = [x for x in collection_list if _should_include_node(x, export_scope, exclude_nodes)]\n    if not collection_list:\n        return\n    try:\n        col_def = meta_graph_def.collection_def[key]\n        to_proto = ops.get_to_proto_function(key)\n        proto_type = ops.get_collection_proto_type(key)\n        if to_proto:\n            kind = 'bytes_list'\n            for x in collection_list:\n                proto = to_proto(x, export_scope=export_scope)\n                if proto:\n                    assert isinstance(proto, proto_type)\n                    getattr(col_def, kind).value.append(proto.SerializeToString())\n        else:\n            kind = _get_kind_name(collection_list[0])\n            if kind == 'node_list':\n                for x in collection_list:\n                    if not export_scope or x.name.startswith(export_scope):\n                        getattr(col_def, kind).value.append(ops.strip_name_scope(x.name, export_scope))\n            elif kind == 'bytes_list':\n                getattr(col_def, kind).value.extend([compat.as_bytes(x) for x in collection_list])\n            else:\n                getattr(col_def, kind).value.extend([x for x in collection_list])\n    except Exception as e:\n        logging.warning(\"Issue encountered when serializing %s.\\nType is unsupported, or the types of the items don't match field type in CollectionDef. Note this is a warning and probably safe to ignore.\\n%s\", key, str(e))\n        if key in meta_graph_def.collection_def:\n            del meta_graph_def.collection_def[key]\n        return", "docstring": "Adds a collection to MetaGraphDef protocol buffer.\n\nArgs:\nmeta_graph_def: MetaGraphDef protocol buffer.\nkey: One of the GraphKeys or user-defined string.\ngraph: The `Graph` from which to get collections.\nexport_scope: Optional `string`. Name scope to remove.\nexclude_nodes: An iterable of nodes or `string` node names to omit from the\ncollection, or None.\noverride_contents: An iterable of values to place in the collection,\nignoring the current values (if set).", "source": "github-repos"}
{"code": "def CopyToDateTimeString(self):\n    if ((self._timestamp is None) or (self._timestamp < 0) or (self._timestamp > self._UINT64_MAX)):\n        return None\n    (timestamp, remainder) = divmod(self._timestamp, self._100NS_PER_SECOND)\n    (number_of_days, hours, minutes, seconds) = self._GetTimeValues(timestamp)\n    (year, month, day_of_month) = self._GetDateValuesWithEpoch(number_of_days, self._EPOCH)\n    return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:07d}'.format(year, month, day_of_month, hours, minutes, seconds, remainder)", "docstring": "Copies the FILETIME timestamp to a date and time string.\n\nReturns:\nstr: date and time value formatted as: \"YYYY-MM-DD hh:mm:ss.#######\" or\nNone if the timestamp is missing or invalid.", "source": "codesearchnet"}
{"code": "def get_variable_value_for_variation(self, variable, variation):\n    if ((not variable) or (not variation)):\n        return None\n    if (variation.id not in self.variation_variable_usage_map):\n        self.logger.error(('Variation with ID \"%s\" is not in the datafile.' % variation.id))\n        return None\n    variable_usages = self.variation_variable_usage_map[variation.id]\n    variable_usage = None\n    if variable_usages:\n        variable_usage = variable_usages.get(variable.id)\n    if variable_usage:\n        variable_value = variable_usage.value\n        self.logger.info(('Value for variable \"%s\" for variation \"%s\" is \"%s\".' % (variable.key, variation.key, variable_value)))\n    else:\n        variable_value = variable.defaultValue\n        self.logger.info(('Variable \"%s\" is not used in variation \"%s\". Assigning default value \"%s\".' % (variable.key, variation.key, variable_value)))\n    return variable_value", "docstring": "Get the variable value for the given variation.\n\nArgs:\nvariable: The Variable for which we are getting the value.\nvariation: The Variation for which we are getting the variable value.\n\nReturns:\nThe variable value or None if any of the inputs are invalid.", "source": "codesearchnet"}
{"code": "def _dbParamsMom01(self):\n    db_grad = [[]] * 10\n    db_out = [[]] * 10\n    db_grad[0] = [0.00096264342, 0.17914793, 0.93945462, 0.41396621, 0.53037018, 0.93197989, 0.78648776, 0.50036013, 0.55345792, 0.96722615]\n    db_out[0] = [-9.6264346e-05, -0.017914793, -0.093945466, -0.041396622, -0.053037018, -0.093197994, -0.078648776, -0.050036013, -0.055345792, -0.096722618]\n    db_grad[1] = [0.17075552, 0.88821375, 0.20873757, 0.25236958, 0.57578111, 0.15312378, 0.5513742, 0.94687688, 0.16012503, 0.22159521]\n    db_out[1] = [-0.017181443, -0.10852765, -0.12421377, -0.070773244, -0.11591884, -0.11783017, -0.14165108, -0.14972731, -0.076892875, -0.1285544]\n    db_grad[2] = [0.35077485, 0.47304362, 0.44412705, 0.44368884, 0.078527533, 0.81223965, 0.31168157, 0.43203235, 0.16792089, 0.24644311]\n    db_out[2] = [-0.053967446, -0.1648933, -0.1716533, -0.1180798, -0.13005978, -0.20151734, -0.17911947, -0.20289968, -0.095839672, -0.15638189]\n    db_grad[3] = [0.9694621, 0.75035888, 0.28171822, 0.83813518, 0.53807181, 0.3728098, 0.81454384, 0.03848977, 0.89759839, 0.93665648]\n    db_out[3] = [-0.15459226, -0.24556576, -0.20456907, -0.20662397, -0.18528105, -0.24716705, -0.2643207, -0.21206589, -0.18749419, -0.2528303]\n    db_grad[4] = [0.38578293, 0.8536852, 0.88722926, 0.66276771, 0.13678469, 0.94036359, 0.69107032, 0.81897682, 0.5433259, 0.67860287]\n    db_out[4] = [-0.20323303, -0.33900154, -0.29658359, -0.28175515, -0.20448165, -0.34576839, -0.34194785, -0.29488021, -0.25099224, -0.33033544]\n    db_grad[5] = [0.27885768, 0.76100707, 0.24625534, 0.81354135, 0.18959245, 0.48038563, 0.84163809, 0.41172323, 0.83259648, 0.44941229]\n    db_out[5] = [-0.23598288, -0.42444581, -0.33041057, -0.3706224, -0.22536094, -0.40366709, -0.43387437, -0.34433398, -0.34060168, -0.38302717]\n    db_grad[6] = [0.27233034, 0.056316052, 0.5039115, 0.24105175, 0.35697976, 0.75913221, 0.73577434, 0.16014607, 0.57500273, 0.071136251]\n    db_out[6] = [-0.26649091, -0.43862185, -0.38418442, -0.40361428, -0.26314685, -0.48537019, -0.51664448, -0.36529395, -0.40706289, -0.39540997]\n    db_grad[7] = [0.58697265, 0.2494842, 0.08106143, 0.39954534, 0.15892942, 0.12683646, 0.74053431, 0.16033, 0.66625422, 0.73515922]\n    db_out[7] = [-0.32823896, -0.46498787, -0.39766794, -0.446868, -0.28281838, -0.50622416, -0.59897494, -0.38342294, -0.48033443, -0.47016418]\n    db_grad[8] = [0.8215279, 0.41994119, 0.95172721, 0.68000203, 0.79439718, 0.43384039, 0.55561525, 0.22567581, 0.93331909, 0.29438227]\n    db_out[8] = [-0.41656655, -0.50961858, -0.49418902, -0.51919359, -0.36422527, -0.55169362, -0.6627695, -0.40780342, -0.58099347, -0.50707781]\n    db_grad[9] = [0.68297005, 0.67758518, 0.1748755, 0.13266537, 0.70697063, 0.055731893, 0.68593478, 0.50580865, 0.12602448, 0.093537711]\n    db_out[9] = [-0.49369633, -0.58184016, -0.52132869, -0.5396927, -0.44306302, -0.56181377, -0.73774242, -0.46082234, -0.60366184, -0.52012295]\n    return (db_grad, db_out)", "docstring": "Return dist-belief momentum values.\n\nReturn values been generated from the dist-belief momentum unittest,\nrunning with a learning rate of 0.1 and a momentum of 0.1.\n\nThese values record how a parameter vector of size 10, initialized with 0.0,\ngets updated with 10 consecutive momentum steps.  It uses random gradients.\n\nReturns:\ndb_grad: The gradients to apply\ndb_out: The parameters after the momentum update.", "source": "github-repos"}
{"code": "def _EmbedIPython(variables, argv=None):\n    import IPython\n    argv = argv or []\n    IPython.start_ipython(argv=argv, user_ns=variables)", "docstring": "Drops into an IPython REPL with variables available for use.\n\nArgs:\nvariables: A dict of variables to make available. Keys are variable names.\nValues are variable values.\nargv: The argv to use for starting ipython. Defaults to an empty list.", "source": "github-repos"}
{"code": "def get_tpu_system_metadata(self):\n    cluster_spec = self.cluster_spec()\n    cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None\n    tpu_system_metadata = tpu_system_metadata_lib._query_tpu_system_metadata(self.master(), cluster_def=cluster_def, query_topology=False)\n    return tpu_system_metadata", "docstring": "Returns the metadata of the TPU system.\n\nUsers can call this method to get some facts of the TPU system, like\ntotal number of cores, number of TPU workers and the devices. E.g.\n```python\n\nresolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')\ntpu_system_metadata = resolver.get_tpu_system_metadata()\nnum_hosts = tpu_system_metadata.num_hosts\n```\n\nReturns:\nA `tf.tpu.experimental.TPUSystemMetadata` object.", "source": "github-repos"}
{"code": "def _ValidateFractionalAvgPoolResult(self, input_tensor, pooling_ratio, pseudo_random, overlapping):\n    with self.cached_session() as sess:\n        p, r, c = nn_ops.fractional_avg_pool_v2(input_tensor, pooling_ratio, pseudo_random, overlapping, seed=self._SEED)\n        actual, row_seq, col_seq = self.evaluate([p, r, c])\n        expected = self._GetExpectedFractionalAvgPoolResult(input_tensor, row_seq, col_seq, overlapping)\n        self.assertShapeEqual(expected, p)\n        self.assertAllClose(expected, actual)", "docstring": "Validate FractionalAvgPool's result against expected.\n\nExpected result is computed given input_tensor, and pooling region defined\nby row_seq and col_seq.\n\nArgs:\ninput_tensor: A tensor or numpy ndarray.\npooling_ratio: A list or tuple of length 4, first and last element be 1.\npseudo_random: Use pseudo random method to generate pooling sequence.\noverlapping: Use overlapping when pooling.\n\nReturns:\nNone", "source": "github-repos"}
{"code": "class MaxNorm(Constraint):\n\n    def __init__(self, max_value=2, axis=0):\n        self.max_value = max_value\n        self.axis = axis\n\n    @doc_controls.do_not_generate_docs\n    def __call__(self, w):\n        norms = backend.sqrt(math_ops.reduce_sum(math_ops.square(w), axis=self.axis, keepdims=True))\n        desired = backend.clip(norms, 0, self.max_value)\n        return w * (desired / (backend.epsilon() + norms))\n\n    @doc_controls.do_not_generate_docs\n    def get_config(self):\n        return {'max_value': self.max_value, 'axis': self.axis}", "docstring": "MaxNorm weight constraint.\n\nConstrains the weights incident to each hidden unit\nto have a norm less than or equal to a desired value.\n\nAlso available via the shortcut function `tf.keras.constraints.max_norm`.\n\nArgs:\nmax_value: the maximum norm value for the incoming weights.\naxis: integer, axis along which to calculate weight norms.\nFor instance, in a `Dense` layer the weight matrix\nhas shape `(input_dim, output_dim)`,\nset `axis` to `0` to constrain each weight vector\nof length `(input_dim,)`.\nIn a `Conv2D` layer with `data_format=\"channels_last\"`,\nthe weight tensor has shape\n`(rows, cols, input_depth, output_depth)`,\nset `axis` to `[0, 1, 2]`\nto constrain the weights of each filter tensor of size\n`(rows, cols, input_depth)`.", "source": "github-repos"}
{"code": "def __init__(self, *args: str, api_name: str=TENSORFLOW_API_NAME, v1: Optional[Sequence[str]]=None, allow_multiple_exports: bool=True):\n    self._names = args\n    self._names_v1 = v1 if v1 is not None else args\n    self._api_name = api_name\n    self._validate_symbol_names()", "docstring": "Export under the names *args (first one is considered canonical).\n\nArgs:\n*args: API names in dot delimited format.\napi_name: API you want to generate Currently, only `tensorflow`.\nv1: Names for the TensorFlow V1 API. If not set, we will use V2 API names\nboth for TensorFlow V1 and V2 APIs.\nallow_multiple_exports: Deprecated.", "source": "github-repos"}
{"code": "def eval_detection_voc(pred_boxlists, gt_boxlists, iou_thresh=0.5, use_07_metric=False):\n    \n    assert len(gt_boxlists) == len(\n        pred_boxlists\n    ), \"Length of gt and pred lists need to be same.\"\n    prec, rec = calc_detection_voc_prec_rec(\n        pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=iou_thresh\n    )\n    ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)\n    return {\"ap\": ap, \"map\": np.nanmean(ap)}", "docstring": "Evaluate on voc dataset.\nArgs:\npred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields.\ngt_boxlists(list[BoxList]): ground truth boxlist, has labels field.\niou_thresh: iou thresh\nuse_07_metric: boolean\nReturns:\ndict represents the results", "source": "juraj-google-style"}
{"code": "def SetDecryptedStreamSize(self, decrypted_stream_size):\n    \n    if self._is_open:\n      raise IOError('Already open.')\n\n    if decrypted_stream_size < 0:\n      raise ValueError((\n          'Invalid decrypted stream size: {0:d} value out of '\n          'bounds.').format(decrypted_stream_size))\n\n    self._decrypted_stream_size = decrypted_stream_size", "docstring": "Sets the decrypted stream size.\n\nThis function is used to set the decrypted stream size if it can be\ndetermined separately.\n\nArgs:\ndecrypted_stream_size (int): size of the decrypted stream in bytes.\n\nRaises:\nIOError: if the file-like object is already open.\nOSError: if the file-like object is already open.\nValueError: if the decrypted stream size is invalid.", "source": "juraj-google-style"}
{"code": "def rule(self, column: str, rule: str, error: str, value: Any, rule_params: dict={}) -> None:\n    log = self._build_rule_message(column, rule, error, value, rule_params)\n    self.queue_log_message(log)", "docstring": "Adds rule error information to base log message and\nsends it to the logger for writing.\n\nArgs:\n* column: column where the rule is applied\n* rule: rule that is violated and raises this message\n* error: error that occurred\n* value: value that violates the rule\n* rule_params: optional, parameters set for the rule\n\nReturns:\n* None", "source": "github-repos"}
{"code": "def delay_embedding(data, emb_dim, lag=1):\n  \n  data = np.asarray(data)\n  min_len = (emb_dim - 1) * lag + 1\n  if len(data) < min_len:\n    msg = \"cannot embed data of length {} with embedding dimension {} \" \\\n        + \"and lag {}, minimum required length is {}\"\n    raise ValueError(msg.format(len(data), emb_dim, lag, min_len))\n  m = len(data) - min_len + 1\n  indices = np.repeat([np.arange(emb_dim) * lag], m, axis=0)\n  indices += np.arange(m).reshape((m, 1))\n  return data[indices]", "docstring": "Perform a time-delay embedding of a time series\n\nArgs:\ndata (array-like):\nthe data that should be embedded\nemb_dim (int):\nthe embedding dimension\nKwargs:\nlag (int):\nthe lag between elements in the embedded vectors\n\nReturns:\nemb_dim x m array:\nmatrix of embedded vectors of the form\n[data[i], data[i+lag], data[i+2*lag], ... data[i+(emb_dim-1)*lag]]\nfor i in 0 to m-1 (m = len(data)-(emb_dim-1)*lag)", "source": "juraj-google-style"}
{"code": "def setup(self, puller: bool=None, subscriptions: Dict[str, Any]={}):\n\t\t\n\t\tif puller:\n\t\t\tpuller = self._zmq.socket(zmq.PULL)\n\t\t\tip, port, host = self.rslv('rcv')\n\t\t\tpuller.bind('tcp:\n\t\t\tself.poll(puller)\n\t\tif subscriptions:\n\t\t\tfor publisher in subscriptions:  \n\t\t\t\tself.add(publisher, subscriptions[publisher].get('slots'), subscriptions[publisher].get('buffer-length'))\n\t\t\tlogger.info('Listening to %s', {\n\t\t\t\tk: (1 if subscriptions[k].get('slots') is None else len(subscriptions[k].get('slots')))\n\t\t\t\tfor k in subscriptions\n\t\t\t})", "docstring": "Sets up this Node with the specified Interfaces before it is run.\n\nArgs:\npuller: Indication if a Puller Interface should be created.\nsubscriptions: Collection of the Subscriber Interfaces to be created and their Slots.", "source": "juraj-google-style"}
{"code": "def wrap_with_monitor(env, video_dir):\n    env = ExtendToEvenDimentions(env)\n    env = RenderObservations(env)\n    env = gym.wrappers.Monitor(env, video_dir, force=True, video_callable=(lambda idx: True), write_upon_reset=True)\n    return env", "docstring": "Wrap environment with gym.Monitor.\n\nVideo recording provided by Monitor requires\n1) both height and width of observation to be even numbers.\n2) rendering of environment\n\nArgs:\nenv: environment.\nvideo_dir: video directory.\n\nReturns:\nwrapped environment.", "source": "codesearchnet"}
{"code": "def execute_code(self, code, filename=None, isolate=False):\n        \n        def _apply():\n            self.compile_code(code=code,\n                              filename=filename,\n                              exec_namespace=self.globals)\n\n        \n        \n        \n        \n        \n        \n        \n        if isolate:\n            saved_globals = dict(self.globals)\n\n            try:\n                _apply()\n            finally:\n                self.globals.clear()\n                self.globals.update(saved_globals)\n        else:\n            _apply()", "docstring": "Execute code within the execution context.\n\nArgs:\ncode (str or SourceCode): Rex code to execute.\nfilename (str): Filename to report if there are syntax errors.\nisolate (bool): If True, do not affect `self.globals` by executing\nthis code.", "source": "juraj-google-style"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(DeviceCredential, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = BytearrayStream(input_stream.read(self.length))\n    if self.is_tag_next(enums.Tags.DEVICE_SERIAL_NUMBER, local_stream):\n        self._device_serial_number = primitives.TextString(tag=enums.Tags.DEVICE_SERIAL_NUMBER)\n        self._device_serial_number.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.PASSWORD, local_stream):\n        self._password = primitives.TextString(tag=enums.Tags.PASSWORD)\n        self._password.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.DEVICE_IDENTIFIER, local_stream):\n        self._device_identifier = primitives.TextString(tag=enums.Tags.DEVICE_IDENTIFIER)\n        self._device_identifier.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.NETWORK_IDENTIFIER, local_stream):\n        self._network_identifier = primitives.TextString(tag=enums.Tags.NETWORK_IDENTIFIER)\n        self._network_identifier.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.MACHINE_IDENTIFIER, local_stream):\n        self._machine_identifier = primitives.TextString(tag=enums.Tags.MACHINE_IDENTIFIER)\n        self._machine_identifier.read(local_stream, kmip_version=kmip_version)\n    if self.is_tag_next(enums.Tags.MEDIA_IDENTIFIER, local_stream):\n        self._media_identifier = primitives.TextString(tag=enums.Tags.MEDIA_IDENTIFIER)\n        self._media_identifier.read(local_stream, kmip_version=kmip_version)\n    self.is_oversized(local_stream)", "docstring": "Read the data encoding the DeviceCredential struct and decode it into\nits constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def Open(self, filename):\n    \n    if not super(WinevtResourcesSqlite3DatabaseReader, self).Open(filename):\n      return False\n\n    version = self.GetMetadataAttribute('version')\n    if not version or version != '20150315':\n      raise RuntimeError('Unsupported version: {0:s}'.format(version))\n\n    string_format = self.GetMetadataAttribute('string_format')\n    if not string_format:\n      string_format = 'wrc'\n\n    if string_format not in ('pep3101', 'wrc'):\n      raise RuntimeError('Unsupported string format: {0:s}'.format(\n          string_format))\n\n    self._string_format = string_format\n    return True", "docstring": "Opens the database reader object.\n\nArgs:\nfilename (str): filename of the database.\n\nReturns:\nbool: True if successful.\n\nRaises:\nRuntimeError: if the version or string format of the database\nis not supported.", "source": "juraj-google-style"}
{"code": "def register_menu_item(self, items):\n        \n        for itm in items:\n            if itm.group in self.menu_items:\n                \n                if itm not in self.menu_items[itm.group]['items']:\n                    self.menu_items[itm.group]['items'].append(itm)\n            else:\n                logger.warning('Tried registering menu item to unknown group {}'.format(itm.group))", "docstring": "Registers a views menu items into the metadata for the application. Skip if the item is already present\n\nArgs:\nitems (`list` of `MenuItem`): A list of `MenuItem`s\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def diff_bisect(self, text1, text2, deadline):\n    \n\n    \n    text1_length = len(text1)\n    text2_length = len(text2)\n    max_d = (text1_length + text2_length + 1) \n    v_offset = max_d\n    v_length = 2 * max_d\n    v1 = [-1] * v_length\n    v1[v_offset + 1] = 0\n    v2 = v1[:]\n    delta = text1_length - text2_length\n    \n    \n    front = (delta % 2 != 0)\n    \n    \n    k1start = 0\n    k1end = 0\n    k2start = 0\n    k2end = 0\n    for d in range(max_d):\n      \n      if time.time() > deadline:\n        break\n\n      \n      for k1 in range(-d + k1start, d + 1 - k1end, 2):\n        k1_offset = v_offset + k1\n        if k1 == -d or (k1 != d and\n            v1[k1_offset - 1] < v1[k1_offset + 1]):\n          x1 = v1[k1_offset + 1]\n        else:\n          x1 = v1[k1_offset - 1] + 1\n        y1 = x1 - k1\n        while (x1 < text1_length and y1 < text2_length and\n               text1[x1] == text2[y1]):\n          x1 += 1\n          y1 += 1\n        v1[k1_offset] = x1\n        if x1 > text1_length:\n          \n          k1end += 2\n        elif y1 > text2_length:\n          \n          k1start += 2\n        elif front:\n          k2_offset = v_offset + delta - k1\n          if k2_offset >= 0 and k2_offset < v_length and v2[k2_offset] != -1:\n            \n            x2 = text1_length - v2[k2_offset]\n            if x1 >= x2:\n              \n              return self.diff_bisectSplit(text1, text2, x1, y1, deadline)\n\n      \n      for k2 in range(-d + k2start, d + 1 - k2end, 2):\n        k2_offset = v_offset + k2\n        if k2 == -d or (k2 != d and\n            v2[k2_offset - 1] < v2[k2_offset + 1]):\n          x2 = v2[k2_offset + 1]\n        else:\n          x2 = v2[k2_offset - 1] + 1\n        y2 = x2 - k2\n        while (x2 < text1_length and y2 < text2_length and\n               text1[-x2 - 1] == text2[-y2 - 1]):\n          x2 += 1\n          y2 += 1\n        v2[k2_offset] = x2\n        if x2 > text1_length:\n          \n          k2end += 2\n        elif y2 > text2_length:\n          \n          k2start += 2\n        elif not front:\n          k1_offset = v_offset + delta - k2\n          if k1_offset >= 0 and k1_offset < v_length and v1[k1_offset] != -1:\n            x1 = v1[k1_offset]\n            y1 = v_offset + x1 - k1_offset\n            \n            x2 = text1_length - x2\n            if x1 >= x2:\n              \n              return self.diff_bisectSplit(text1, text2, x1, y1, deadline)\n\n    \n    \n    return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]", "docstring": "Find the 'middle snake' of a diff, split the problem in two\nand return the recursively constructed diff.\nSee Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations.\n\nArgs:\ntext1: Old string to be diffed.\ntext2: New string to be diffed.\ndeadline: Time at which to bail if not yet complete.\n\nReturns:\nArray of diff tuples.", "source": "juraj-google-style"}
{"code": "def get_frame(self, frame_id):\n        \n        if frame_id < 0 or frame_id >= self._frame_cnt:\n            raise IndexError(\n                '\"frame_id\" must be between 0 and {}'.format(self._frame_cnt -\n                                                             1))\n        if frame_id == self._position:\n            return self.read()\n        if self._cache:\n            img = self._cache.get(frame_id)\n            if img is not None:\n                self._position = frame_id + 1\n                return img\n        self._set_real_position(frame_id)\n        ret, img = self._vcap.read()\n        if ret:\n            if self._cache:\n                self._cache.put(self._position, img)\n            self._position += 1\n        return img", "docstring": "Get frame by index.\n\nArgs:\nframe_id (int): Index of the expected frame, 0-based.\n\nReturns:\nndarray or None: Return the frame if successful, otherwise None.", "source": "juraj-google-style"}
{"code": "def Create(self, request, global_params=None):\n    config = self.GetMethodConfig('Create')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Starts a build with the specified configuration. This method returns a long-running `Operation`, which includes the build ID. Pass the build ID to `GetBuild` to determine the build status (such as `SUCCESS` or `FAILURE`).\n\nArgs:\nrequest: (CloudbuildProjectsLocationsBuildsCreateRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def __init__(self, pipeline, required_transforms=None, referenced_pcollections=None, cached_pcollections=None):\n    self._required_transforms = required_transforms or set()\n    self._referenced_pcollections = referenced_pcollections or set()\n    self._cached_pcollections = cached_pcollections or set()\n    super().__init__(pipeline=pipeline, default_vertex_attrs={'color': 'gray', 'fontcolor': 'gray'}, default_edge_attrs={'color': 'gray'})\n    transform_updates, pcollection_updates = self._generate_graph_update_dicts()\n    self._update_graph(transform_updates, pcollection_updates)", "docstring": "Constructor of PipelineGraph.\n\nArgs:\npipeline: (Pipeline proto) or (Pipeline) pipeline to be rendered.\nrequired_transforms: (list/set of str) ID of top level PTransforms that\nlead to visible results.\nreferenced_pcollections: (list/set of str) ID of PCollections that are\nreferenced by top level PTransforms executed (i.e.\nrequired_transforms)\ncached_pcollections: (set of str) a set of PCollection IDs of those whose\ncached results are used in the execution.", "source": "github-repos"}
{"code": "def get_score(self, error=None):\n    if (error is not None):\n        self.error = error\n    if (self.error >= 0):\n        return (1 / (self.error + 1))\n    else:\n        return (1 + abs(self.error))", "docstring": "Calculate bee's fitness score given a value returned by the fitness\nfunction\n\nArgs:\nerror (float): value returned by the fitness function\n\nReturns:\nfloat: derived fitness score", "source": "codesearchnet"}
{"code": "def get_candidate(self, dest_spec: ValueSpec) -> typing.Optional[ValueSpec]:\n    for c in self._candidates:\n        if dest_spec.__class__ == c.__class__ and dest_spec.is_compatible(c):\n            return c\n    for c in self._candidates:\n        if isinstance(c, Union):\n            child = c.get_candidate(dest_spec)\n            if child is not None:\n                return child\n        elif dest_spec.is_compatible(c):\n            return c\n    return None", "docstring": "Get candidate by a destination value spec.\n\nArgs:\ndest_spec: destination value spec which is a superset of the value spec\nto return. E.g. Any (dest_spec) is superset of Int (child spec).\n\nReturns:\nThe first value spec under Union with which the destination value spec\nis compatible.", "source": "github-repos"}
{"code": "def learn_mealy_machine(self):\n        \n        logging.info('Initializing learning procedure.')\n        self._init_table()\n\n        logging.info('Generating a closed and consistent observation table.')\n        while True:\n\n            closed = False\n            \n            while not closed:\n\n                logging.debug('Checking if table is closed.')\n                closed, string = self.observation_table.is_closed()\n                if not closed:\n                    logging.debug('Closing table.')\n                    self._ot_make_closed(string)\n                else:\n                    logging.debug('Table closed.')\n\n            \n            mma = self.get_mealy_conjecture()\n\n            logging.info('Generated conjecture machine with %d states.',\n                         len(list(mma.states)))\n\n            \n            logging.debug('Running equivalence query.')\n            found, counter_example = self._equivalence_query(mma)\n\n            \n            if found:\n                logging.info('No counterexample found. Hypothesis is correct!')\n                break\n\n            \n            \n            logging.info(\n                'Processing counterexample %input_string with length %d.',\n                counter_example,\n                len(counter_example))\n            self._process_counter_example(mma, counter_example)\n\n        logging.info('Learning complete.')\n        return mma", "docstring": "Implements the high level loop of the algorithm for learning a\nMealy machine.\nArgs:\nNone\nReturns:\nMealyMachine: The learned mealy machine", "source": "juraj-google-style"}
{"code": "def _ParsePlistKeyValue(self, knowledge_base, name, value):\n    \n    if not knowledge_base.GetHostname():\n      if name in self._PLIST_KEYS:\n        hostname_artifact = artifacts.HostnameArtifact(name=value)\n        knowledge_base.SetHostname(hostname_artifact)", "docstring": "Parses a plist key value.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nname (str): name of the plist key.\nvalue (str): value of the plist key.", "source": "juraj-google-style"}
{"code": "def apply_theme(self, property_values):\n        \n        old_dict = self.themed_values()\n\n        \n        if old_dict is property_values:\n            return\n\n        removed = set()\n        \n        \n        if old_dict is not None:\n            removed.update(set(old_dict.keys()))\n        added = set(property_values.keys())\n        old_values = dict()\n        for k in added.union(removed):\n            old_values[k] = getattr(self, k)\n\n        if len(property_values) > 0:\n            setattr(self, '__themed_values__', property_values)\n        elif hasattr(self, '__themed_values__'):\n            delattr(self, '__themed_values__')\n\n        \n        \n        for k, v in old_values.items():\n            if k in self._unstable_themed_values:\n                del self._unstable_themed_values[k]\n\n        \n        for k, v in old_values.items():\n            descriptor = self.lookup(k)\n            descriptor.trigger_if_changed(self, v)", "docstring": "Apply a set of theme values which will be used rather than\ndefaults, but will not override application-set values.\n\nThe passed-in dictionary may be kept around as-is and shared with\nother instances to save memory (so neither the caller nor the\n|HasProps| instance should modify it).\n\nArgs:\nproperty_values (dict) : theme values to use in place of defaults\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def annotate(self, records, **kwargs):\n        \n        \n        self.annotator_params.update(**kwargs)\n        chunk_size = self.annotator_params.get('chunk_size', self.CHUNK_SIZE)\n\n        chunk = []\n        for i, record in enumerate(records):\n            chunk.append(record)\n            if (i + 1) % chunk_size == 0:\n                for r in self._execute(chunk):\n                    yield r\n                chunk = []\n\n        if chunk:\n            for r in self._execute(chunk):\n                yield r\n            chunk = []", "docstring": "Annotate a set of records with stored fields.\n\nArgs:\nrecords: A list or iterator (can be a Query object)\nchunk_size: The number of records to annotate at once (max 500).\n\nReturns:\nA generator that yields one annotated record at a time.", "source": "juraj-google-style"}
{"code": "def return_secondary_learner(self):\n    estimator = self.base_learner_origin.return_estimator()\n    estimator = estimator.set_params(**self.secondary_learner_hyperparameters)\n    return estimator", "docstring": "Returns secondary learner using its origin and the given hyperparameters\n\nReturns:\nest (estimator): Estimator object", "source": "codesearchnet"}
{"code": "def set_iprouting(self, value=None, default=False, disable=False):\n        \n        if value is False:\n            disable = True\n        cmd = self.command_builder('ip routing', value=value, default=default,\n                                   disable=disable)\n        return self.configure(cmd)", "docstring": "Configures the state of global ip routing\n\nEosVersion:\n4.13.7M\n\nArgs:\nvalue(bool): True if ip routing should be enabled or False if\nip routing should be disabled\ndefault (bool): Controls the use of the default keyword\ndisable (bool): Controls the use of the no keyword\n\nReturns:\nbool: True if the commands completed successfully otherwise False", "source": "juraj-google-style"}
{"code": "def find_response_component(self, api_id=None, signature_id=None):\n    if ((not api_id) and (not signature_id)):\n        raise ValueError('At least one of api_id and signature_id is required')\n    components = list()\n    if self.response_data:\n        for component in self.response_data:\n            if (((api_id and component['api_id']) == api_id) or (signature_id and (component['signature_id'] == signature_id))):\n                components.append(component)\n    return components", "docstring": "Find one or many repsonse components.\n\nArgs:\n\napi_id (str):           Api id associated with the component(s) to be retrieved.\n\nsignature_id (str):     Signature id associated with the component(s) to be retrieved.\n\nReturns:\nA list of dictionaries containing component data", "source": "codesearchnet"}
{"code": "def is_monotonic(neurite, tol):\n    \n\n    for node in neurite.iter_sections():\n        \n        sec = node.points\n        for point_id in range(len(sec) - 1):\n            if sec[point_id + 1][COLS.R] > sec[point_id][COLS.R] + tol:\n                return False\n        \n        if(node.parent is not None and\n           sec[0][COLS.R] > node.parent.points[-1][COLS.R] + tol):\n            return False\n\n    return True", "docstring": "Check if neurite tree is monotonic\n\nIf each child has smaller or equal diameters from its parent\n\nArgs:\nneurite(Neurite): neurite to operate on\ntol(float): tolerance\n\nReturns:\nTrue if neurite monotonic", "source": "juraj-google-style"}
{"code": "def from_csv(cls, filename: str):\n        \n        with open(filename, \"r\", encoding=\"utf-8\") as f:\n            reader = csv.reader(f, delimiter=unicode2str(\",\"),\n                                quotechar=unicode2str(\"\\\"\"),\n                                quoting=csv.QUOTE_MINIMAL)\n            entries = list()\n            header_read = False\n            elements = None\n            for row in reader:\n                if not header_read:\n                    elements = row[1:(len(row) - 1)]\n                    header_read = True\n                else:\n                    name = row[0]\n                    energy = float(row[-1])\n                    comp = dict()\n                    for ind in range(1, len(row) - 1):\n                        if float(row[ind]) > 0:\n                            comp[Element(elements[ind - 1])] = float(row[ind])\n                    entries.append(PDEntry(Composition(comp), energy, name))\n        return cls(entries)", "docstring": "Imports PDEntries from a csv.\n\nArgs:\nfilename: Filename to import from.\n\nReturns:\nList of Elements, List of PDEntries", "source": "juraj-google-style"}
{"code": "def groupby(iterable: Iterable[_Tin], *, key: Callable[[_Tin], _K], value: Callable[[_Tin], _Tout]=_identity) -> dict[_K, list[_Tout]]:\n    groups = collections.defaultdict(list)\n    for v in iterable:\n        groups[key(v)].append(value(v))\n    return dict(groups)", "docstring": "Similar to `itertools.groupby` but return result as a `dict()`.\n\nExample:\n\n```python\nout = epy.groupby(\n['555', '4', '11', '11', '333'],\nkey=len,\nvalue=int,\n)\n# Order is consistent with above\nassert out == {\n3: [555, 333],\n1: [4],\n2: [11, 11],\n}\n```\n\nOther difference with `itertools.groupby`:\n\n* Iterable do not need to be sorted. Order of the original iterator is\npreserved in the group.\n* Transformation can be applied to the value too\n\nArgs:\niterable: The iterable to group\nkey: Mapping applied to group the values (should return a hashable)\nvalue: Mapping applied to the values\n\nReturns:\nThe dict", "source": "github-repos"}
{"code": "def add_curves_from_las(self, fname, remap=None, funcs=None):\n        \n        try:  \n            self.add_curves_from_lasio(lasio.read(fname),\n                                       remap=remap,\n                                       funcs=funcs\n                                       )\n        except:  \n            for f in fname:\n                self.add_curves_from_lasio(lasio.read(f),\n                                           remap=remap,\n                                           funcs=funcs\n                                           )\n\n        return None", "docstring": "Given a LAS file, add curves from it to the current well instance.\nEssentially just wraps ``add_curves_from_lasio()``.\n\nArgs:\nfname (str): The path of the LAS file to read curves from.\nremap (dict): Optional. A dict of 'old': 'new' LAS field names.\nfuncs (dict): Optional. A dict of 'las field': function() for\nimplementing a transform before loading. Can be a lambda.\n\nReturns:\nNone. Works in place.", "source": "juraj-google-style"}
{"code": "def tabledata_list(self, table_name, start_index=None, max_results=None, page_token=None):\n    \n    url = Api._ENDPOINT + (Api._TABLEDATA_PATH % table_name)\n    args = {}\n    if start_index:\n      args['startIndex'] = start_index\n    if max_results:\n      args['maxResults'] = max_results\n    if page_token is not None:\n      args['pageToken'] = page_token\n    return datalab.utils.Http.request(url, args=args, credentials=self._credentials)", "docstring": "Retrieves the contents of a table.\n\nArgs:\ntable_name: the name of the table as a tuple of components.\nstart_index: the index of the row at which to start retrieval.\nmax_results: an optional maximum number of rows to retrieve.\npage_token: an optional token to continue the retrieval.\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"}
{"code": "def put(self, destination):\n        \n        target = get_target_path(destination, self.localpath)\n        shutil.copytree(self.localpath, target)", "docstring": "Copy the referenced directory to this path\n\nThe semantics of this command are similar to unix ``cp``: if ``destination``  already\nexists, the copied directory will be put at ``[destination] // [basename(localpath)]``. If\nit does not already exist, the directory will be renamed to this path (the parent directory\nmust exist).\n\nArgs:\ndestination (str): path to put this directory", "source": "juraj-google-style"}
{"code": "def get_int(self, name, default=None):\n    if (name not in self):\n        if (default is not None):\n            return default\n        raise EnvironmentError.not_found(self._prefix, name)\n    return int(self[name])", "docstring": "Retrieves an environment variable as an integer.\n\nArgs:\nname (str): The case-insensitive, unprefixed variable name.\ndefault: If provided, a default value will be returned\ninstead of throwing ``EnvironmentError``.\n\nReturns:\nint: The environment variable's value as an integer.\n\nRaises:\nEnvironmentError: If the environment variable does not\nexist, and ``default`` was not provided.\nValueError: If the environment variable value is not an\ninteger with base 10.", "source": "codesearchnet"}
{"code": "def event_stream(app, *, filter_by_prefix=None):\n    \n    q = Queue()\n\n    def handle_event(event):\n        if filter_by_prefix is None or\\\n                (filter_by_prefix is not None and\n                 event['type'].startswith(filter_by_prefix)):\n            q.put(event)\n\n    def receive_events():\n        with app.connection() as connection:\n            recv = app.events.Receiver(connection, handlers={\n                '*': handle_event\n            })\n\n            recv.capture(limit=None, timeout=None, wakeup=True)\n\n    t = threading.Thread(target=receive_events)\n    t.start()\n\n    while True:\n        yield q.get(block=True)", "docstring": "Generator function that returns celery events.\n\nThis function turns the callback based celery event handling into a generator.\n\nArgs:\napp: Reference to a celery application object.\nfilter_by_prefix (str): If not None, only allow events that have a type that\nstarts with this prefix to yield an generator event.\n\nReturns:\ngenerator: A generator that returns celery events.", "source": "juraj-google-style"}
{"code": "def batch_size(self):\n    raise NotImplementedError", "docstring": "Return the batch size of the dataset created.\n\nFor certain type of the data input, the batch size is known, and even\nrequired, like numpy array. Where as for dataset, the batch is unknown\nunless we take a peek.\n\nReturns:\nint, the batch size of the dataset, or None if it is unknown.", "source": "github-repos"}
{"code": "def display_hierarchy(root_ad_unit, all_ad_units):\n  \n  \n  parent_id_to_children = collections.defaultdict(list)\n  for ad_unit in all_ad_units:\n    if 'parentId' in ad_unit:\n      parent_id_to_children[ad_unit['parentId']].append(ad_unit)\n  parent_id_to_children = dict(parent_id_to_children)\n\n  display_hierarchy_helper(root_ad_unit, parent_id_to_children, 0)", "docstring": "Display the ad units as a tree.\n\nArgs:\nroot_ad_unit: The root ad unit to begin from.\nall_ad_units: A list containing all ad units.", "source": "juraj-google-style"}
{"code": "def CreateMock(self, class_to_mock):\n    \n\n    new_mock = MockObject(class_to_mock)\n    self._mock_objects.append(new_mock)\n    return new_mock", "docstring": "Create a new mock object.\n\nArgs:\n# class_to_mock: the class to be mocked\nclass_to_mock: class\n\nReturns:\nMockObject that can be used as the class_to_mock would be.", "source": "juraj-google-style"}
{"code": "def to_proto(self, export_scope=None):\n    if export_scope is None or self.name.startswith(export_scope):\n        context_def = control_flow_pb2.CondContextDef()\n        context_def.context_name = ops.strip_name_scope(self.name, export_scope)\n        context_def.pred_name = ops.strip_name_scope(self._pred.name, export_scope)\n        context_def.pivot_name = ops.strip_name_scope(self._pivot.name, export_scope)\n        context_def.branch = self._branch\n        context_def.values_def.MergeFrom(super(CondContext, self)._to_values_def(export_scope))\n        for nested in self._nested_contexts:\n            nested_def = context_def.nested_contexts.add()\n            nested.to_control_flow_context_def(nested_def)\n        return context_def\n    else:\n        return None", "docstring": "Converts a `CondContext` to a `CondContextDef` protocol buffer.\n\nArgs:\nexport_scope: Optional `string`. Name scope to remove.\n\nReturns:\nA `CondContextDef` protocol buffer.", "source": "github-repos"}
{"code": "def disassemble(code, origin=None):\n    \n\n    if inspect.isfunction(code):\n        code = six.get_function_code(code).co_code\n\n    origin = get_py_internals(origin)\n\n    opname = origin['opname']\n    hasjrel = origin['hasjrel']\n    hasjabs = origin['hasjabs']\n    hasjump = set(hasjrel) | set(hasjabs)\n    wordcode = origin['wordcode']\n    if not wordcode:\n        ext_arg_shift = 16\n    else:\n        ext_arg_shift = 8\n\n    ext_arg_name = opname[origin['extended_arg']]\n    ext_arg = 0\n\n    addr_labels = {}\n    addr_ops = []\n\n    code_iter = enumerate(six.iterbytes(code))\n    for op_addr, op_code in code_iter:\n        if op_code >= origin['have_argument']:\n            rel_addr, arg = next(code_iter)\n            if not wordcode:\n                rel_addr, b = next(code_iter)\n                arg += b << 8\n\n            arg += ext_arg\n\n            if op_code in hasjrel:\n                arg += rel_addr\n\n            if op_code in hasjump:\n                arg = addr_labels.setdefault(arg, Label())\n        else:\n            if wordcode:\n                next(code_iter)\n            arg = None\n        ext_arg = 0\n\n        op_name = opname[op_code]\n\n        if op_name == ext_arg_name:\n            ext_arg = arg << ext_arg_shift\n            op = None\n        else:\n            op = Op(op_name, arg)\n\n        addr_ops.append((op_addr, op))\n\n    ops = []\n    for op_addr, op in addr_ops:\n        label = addr_labels.get(op_addr)\n        if label is not None:\n            ops.append(label)\n\n        if op is not None:\n            ops.append(op)\n\n    return ops", "docstring": "Disassemble python bytecode into a series of :class:`Op` and\n:class:`Label` instances.\n\nArguments:\ncode(bytes): The bytecode (a code object's ``co_code`` property). You\ncan also provide a function.\norigin(dict): The opcode specification of the python version that\ngenerated ``code``. If you provide ``None``, the specs for the\ncurrently running python version will be used.\n\nReturns:\nlist: A list of opcodes and labels.", "source": "juraj-google-style"}
{"code": "def slithir_cfg_to_dot(self, filename):\n        \n        from slither.core.cfg.node import NodeType\n        with open(filename, 'w', encoding='utf8') as f:\n            f.write('digraph{\\n')\n            for node in self.nodes:\n                label = 'Node Type: {} {}\\n'.format(NodeType.str(node.type), node.node_id)\n                if node.expression:\n                    label += '\\nEXPRESSION:\\n{}\\n'.format(node.expression)\n                if node.irs:\n                    label += '\\nIRs:\\n' + '\\n'.join([str(ir) for ir in node.irs])\n                f.write('{}[label=\"{}\"];\\n'.format(node.node_id, label))\n                for son in node.sons:\n                    f.write('{}->{};\\n'.format(node.node_id, son.node_id))\n\n            f.write(\"}\\n\")", "docstring": "Export the function to a dot file\nArgs:\nfilename (str)", "source": "juraj-google-style"}
{"code": "def evaluate_layout(self, layout):\n    layout_dict = {}\n    if layout:\n        for pair in layout.split(';'):\n            (mtf_dimension_name, mesh_dimension_name) = pair.split(':', 1)\n            if (mtf_dimension_name in self._layout_validator.splittable_mtf_dimension_names):\n                layout_dict[mtf_dimension_name] = mesh_dimension_name\n            else:\n                logging.warning('Skipping unsplittable dimension %s.', mtf_dimension_name)\n    tensor_memory = {}\n    for tensor_name in self._graph.get_all_tensor_names():\n        if self._graph.is_tensor_on_canonical_device(tensor_name):\n            tensor_memory[tensor_name] = self._graph.get_tensor_size(tensor_name, layout_dict, self._layout_validator.mesh_dimension_name_to_size)\n        else:\n            tensor_memory[tensor_name] = 0.0\n    peak_memory_usage = 0.0\n    for tensor_names in self._get_memory_contents():\n        memory_usage = 0.0\n        for tensor_name in tensor_names:\n            memory_usage += tensor_memory[tensor_name]\n        peak_memory_usage = max(peak_memory_usage, memory_usage)\n    return peak_memory_usage", "docstring": "The current objective value for the given layout.\n\nTODO(joshuawang): The current function does not check that the given\nlayout is valid.\n\nArgs:\nlayout: a string, representing a layout to evaluate (e.g.\n\"d_ff:m1;heads:m2\").\n\nReturns:\nA float, the objective value.", "source": "codesearchnet"}
{"code": "def ProcessMessage(self, message):\n    cert = rdf_crypto.Certificate(message.payload)\n    queue = self.well_known_session_id.Queue()\n    client_id = message.source\n    try:\n        enrolment_cache.Get(client_id)\n        return\n    except KeyError:\n        enrolment_cache.Put(client_id, 1)\n    if data_store.AFF4Enabled():\n        client = aff4.FACTORY.Create(client_id, aff4_grr.VFSGRRClient, mode='rw', token=self.token)\n        client_cert = client.Get(client.Schema.CERT)\n    if data_store.RelationalDBEnabled():\n        try:\n            md = data_store.REL_DB.ReadClientMetadata(client_id.Basename())\n            client_cert = md.certificate\n        except db.UnknownClientError:\n            client_cert = None\n    if data_store.RelationalDBEnabled():\n        data_store.REL_DB.WriteClientMetadata(client_id.Basename(), fleetspeak_enabled=False)\n    if (not client_cert):\n        flow.StartAFF4Flow(client_id=client_id, flow_name=CAEnroler.__name__, csr=cert, queue=queue, token=self.token)", "docstring": "Begins an enrollment flow for this client.\n\nArgs:\nmessage: The Certificate sent by the client. Note that this message is\nnot authenticated.", "source": "codesearchnet"}
{"code": "def __init__(self, source: Any, tag: str, stacktrace: Optional[bool]=None, stacklimit: Optional[int]=None, stacktop: int=-1):\n    if not isinstance(tag, str):\n        raise ValueError(f'`tag` must be a string. Encountered: {tag!r}.')\n    self._source = source\n    self._tag = tag\n    self._stack = None\n    self._stacktrace = None\n    if stacktrace is None:\n        stacktrace = flags.is_tracking_origin()\n    if stacklimit is None:\n        stacklimit = flags.get_origin_stacktrace_limit()\n    if stacktrace:\n        self._stack = traceback.extract_stack(limit=stacklimit - stacktop)\n        if stacktop < 0:\n            self._stack = self._stack[:stacktop]", "docstring": "Constructor.\n\nArgs:\nsource: Source value for the origin.\ntag: A descriptive tag of the origin. Built-in tags are:\n'__init__', 'clone', 'deepclone', 'return'. Users can manually\ncall `sym_setorigin` with custom tag value.\nstacktrace: If True, enable stack trace for the origin. If None, enable\nstack trace if `pg.tracek_origin()` is called. Otherwise stack trace is\ndisabled.\nstacklimit: An optional integer to limit the stack depth. If None, it's\ndetermined by the value passed to `pg.set_origin_stacktrace_limit`,\nwhich is 10 by default.\nstacktop: A negative integer to indicate the stack top among the stack\nframes that we want to present to user, by default it's 2-level up from\nthe stack within current `sym_setorigin` call.", "source": "github-repos"}
{"code": "def register_domain(self, domain=0, tokenizer=None, trie=None):\n        \n        self.domains[domain] = IntentDeterminationEngine(\n            tokenizer=tokenizer, trie=trie)", "docstring": "Register a domain with the intent engine.\n\nArgs:\ntokenizer(tokenizer): The tokenizer you wish to use.\ntrie(Trie): the Trie() you wish to use.\ndomain(str): a string representing the domain you wish to add", "source": "juraj-google-style"}
{"code": "def checksum(self, url):\n    _, path = self._parse_url(url)\n    file_checksum = self._hdfs_client.checksum(path)\n    return '%s-%d-%s' % (file_checksum[_FILE_CHECKSUM_ALGORITHM], file_checksum[_FILE_CHECKSUM_LENGTH], file_checksum[_FILE_CHECKSUM_BYTES])", "docstring": "Fetches a checksum description for a URL.\n\nReturns:\nString describing the checksum.\n\nRaises:\n``BeamIOError``: if url doesn't exist.", "source": "github-repos"}
{"code": "def merge_checkpoint(input_graph,\n                     checkpoint,\n                     output_node_names,\n                     output_graph,\n                     sess):\n    \n    restore_op_name = \"save/restore_all\"\n    filename_tensor_name = \"save/Const:0\"\n\n    input_graph_def = graph_pb2.GraphDef()\n    with gfile.FastGFile(input_graph, \"r\") as f:\n        text_format.Merge(f.read().decode(\"utf-8\"), input_graph_def)\n\n    for node in input_graph_def.node:\n        node.device = \"\"\n\n    importer.import_graph_def(input_graph_def, name=\"\")\n\n    sess.run([restore_op_name], {filename_tensor_name: checkpoint})\n    output_graph_def = graph_util.convert_variables_to_constants(\n        sess,\n        input_graph_def,\n        output_node_names,\n        variable_names_blacklist=\"\"\n    )\n\n    with gfile.GFile(output_graph, \"wb\") as f:\n        f.write(output_graph_def.SerializeToString())", "docstring": "Get the variable values from the checkpoint file, and merge them to the GraphDef file\nArgs:\ninput_graph: the GraphDef file, doesn't contain variable values\ncheckpoint: the checkpoint file\noutput_node_names: A list of string, the output names\noutput_graph: String of the location and the name of the\noutput graph", "source": "juraj-google-style"}
{"code": "def DirnamePath(self, path):\n    \n    if path.endswith(self.PATH_SEPARATOR):\n      path = path[:-1]\n    if not path:\n      return None\n\n    dirname, _, _ = path.rpartition(self.PATH_SEPARATOR)\n    return dirname", "docstring": "Determines the directory name of the path.\n\nThe file system root is represented by an empty string.\n\nArgs:\npath (str): path.\n\nReturns:\nstr: directory name of the path or None.", "source": "juraj-google-style"}
{"code": "def dict_hist(item_list, weight_list=None, ordered=False, labels=None):\n    if (labels is None):\n        hist_ = defaultdict(int)\n    else:\n        hist_ = {k: 0 for k in labels}\n    if (weight_list is None):\n        for item in item_list:\n            hist_[item] += 1\n    else:\n        for (item, weight) in zip(item_list, weight_list):\n            hist_[item] += weight\n    if ordered:\n        getval = op.itemgetter(1)\n        key_order = [key for (key, value) in sorted(hist_.items(), key=getval)]\n        hist_ = order_dict_by(hist_, key_order)\n    return hist_", "docstring": "r\"\"\"\nBuilds a histogram of items in item_list\n\nArgs:\nitem_list (list): list with hashable items (usually containing duplicates)\n\nReturns:\ndict : dictionary where the keys are items in item_list, and the values\nare the number of times the item appears in item_list.\n\nCommandLine:\npython -m utool.util_dict --test-dict_hist\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_dict import *  # NOQA\n>>> import utool as ut\n>>> item_list = [1, 2, 39, 900, 1232, 900, 1232, 2, 2, 2, 900]\n>>> hist_ = dict_hist(item_list)\n>>> result = ut.repr2(hist_)\n>>> print(result)\n{1: 1, 2: 4, 39: 1, 900: 3, 1232: 2}", "source": "codesearchnet"}
{"code": "def validate(self, value, model_instance):\n    if (not isinstance(value, base.StateWrapper)):\n        raise exceptions.ValidationError((self.error_messages['wrong_type'] % value))\n    elif (not (value.workflow == self.workflow)):\n        raise exceptions.ValidationError((self.error_messages['wrong_workflow'] % value.workflow))\n    elif (value.state not in self.workflow.states):\n        raise exceptions.ValidationError((self.error_messages['invalid_state'] % value.state))", "docstring": "Validate that a given value is a valid option for a given model instance.\n\nArgs:\nvalue (xworkflows.base.StateWrapper): The base.StateWrapper returned by to_python.\nmodel_instance: A WorkflowEnabled instance", "source": "codesearchnet"}
{"code": "def defocus_blur(x, severity=1):\n    c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][(severity - 1)]\n    x = (np.array(x) / 255.0)\n    kernel = disk(radius=c[0], alias_blur=c[1])\n    channels = []\n    for d in range(3):\n        channels.append(tfds.core.lazy_imports.cv2.filter2D(x[(:, :, d)], (- 1), kernel))\n    channels = np.array(channels).transpose((1, 2, 0))\n    x_clip = (np.clip(channels, 0, 1) * 255)\n    return around_and_astype(x_clip)", "docstring": "Defocus blurring to images.\n\nApply defocus blurring to images using Gaussian kernel.\n\nArgs:\nx: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\nseverity: integer, severity of corruption.\n\nReturns:\nnumpy array, image with uint8 pixels in [0,255]. Applied defocus blur.", "source": "codesearchnet"}
{"code": "def callEventWaitAndGetRpc(self, callback_id, event_name, timeout_sec):", "docstring": "Calls snippet lib's RPC to wait for a callback event.\n\nOverride this method to use this class with various snippet lib\nimplementations.\n\nThis function waits and gets a CallbackEvent with the specified identifier\nfrom the server. It will raise a timeout error if the expected event does\nnot occur within the time limit.\n\nArgs:\ncallback_id: str, the callback identifier.\nevent_name: str, the callback name.\ntimeout_sec: float, the number of seconds to wait for the event. It is\nalready checked that this argument is no longer than the max timeout\nof a single RPC.\n\nReturns:\nThe event dictionary.\n\nRaises:\nerrors.CallbackHandlerTimeoutError: Raised if the expected event does not\noccur within the time limit.", "source": "github-repos"}
{"code": "def document(self, document_id, **kwargs):\n        \n        baseuri = '{}document/{}/content'.format(self._DOCUMENT_URI,\n                                                 document_id)\n        res = self.session.get(baseuri, params=kwargs)\n        self.handle_http_error(res)\n        return res", "docstring": "Requests for a document by the document id.\nNormally the response.content can be saved as a pdf file\n\nArgs:\ndocument_id (str): The id of the document retrieved.\nkwargs (dict): additional keywords passed into\nrequests.session.get *params* keyword.", "source": "juraj-google-style"}
{"code": "def save(self, filename=None, directory=None):\n        \n        if filename is not None:\n            self.filename = filename\n        if directory is not None:\n            self.directory = directory\n\n        filepath = self.filepath\n        tools.mkdirs(filepath)\n\n        data = text_type(self.source)\n\n        with io.open(filepath, 'w', encoding=self.encoding) as fd:\n            fd.write(data)\n            if not data.endswith(u'\\n'):\n                fd.write(u'\\n')\n\n        return filepath", "docstring": "Save the DOT source to file. Ensure the file ends with a newline.\n\nArgs:\nfilename: Filename for saving the source (defaults to ``name`` + ``'.gv'``)\ndirectory: (Sub)directory for source saving and rendering.\nReturns:\nThe (possibly relative) path of the saved source file.", "source": "juraj-google-style"}
{"code": "def _assertOpOutputMatchesExpected(self, op, axis, output_type, op_input, expected):\n    with self.session() as session:\n        with self.test_scope():\n            pinp = array_ops.placeholder(dtypes.as_dtype(op_input.dtype), op_input.shape, name='a')\n            output = op(pinp, axis=axis, output_type=output_type)\n        result = session.run(output, {pinp: op_input})\n        self.assertAllEqual(result, expected)", "docstring": "Verifies that 'op' produces 'expected' when fed input 'op_input' .\n\nArgs:\nop: argmin or argmax operator to test.\naxis: integer axis to reduce across.\noutput_type: numpy datatype of the output to produce.\nop_input: numpy input array to use as input to 'op'.\nexpected: numpy array representing the expected output of 'op'.", "source": "github-repos"}
{"code": "def converted_function_names(self):\n    if self._converted_function_names is None:\n        parsed_names = []\n        for name in self.functions:\n            elements = name.rsplit('_', 1)\n            if len(elements) == 2 and elements[1].isnumeric():\n                parsed_names.append((int(elements[1]), elements[0], name))\n            else:\n                parsed_names.append((-1, name, name))\n        self._converted_function_names = {name: '{}_frozen_{}'.format(base_name, ops.uid()) for _, base_name, name in sorted(parsed_names)}\n    return self._converted_function_names", "docstring": "Map from original to new function names.\n\nIn order to avoid conflicts (two functions with the same name, one converted\nand one not), we need to change the name of every converted function to\nsomething that is hopefully unique.\n\nReturns:\nMap from original to new suggested function names.", "source": "github-repos"}
{"code": "def _map_graph_network(inputs, outputs):\n    nodes_in_decreasing_depth, layer_indices = _build_map(outputs)\n    network_nodes = {_make_node_key(node.layer.name, node.layer._inbound_nodes.index(node)) for node in nodes_in_decreasing_depth}\n    nodes_depths = {}\n    layers_depths = {}\n    for node in reversed(nodes_in_decreasing_depth):\n        depth = nodes_depths.setdefault(node, 0)\n        previous_depth = layers_depths.get(node.layer, 0)\n        depth = max(depth, previous_depth)\n        layers_depths[node.layer] = depth\n        nodes_depths[node] = depth\n        for node_dep in node.parent_nodes:\n            previous_depth = nodes_depths.get(node_dep, 0)\n            nodes_depths[node_dep] = max(depth + 1, previous_depth)\n    for input_t in inputs:\n        input_layer = input_t._keras_history[0]\n        if input_layer not in layers_depths:\n            layers_depths[input_layer] = 0\n            layer_indices[input_layer] = -1\n            nodes_depths[input_layer._inbound_nodes[0]] = 0\n            network_nodes.add(_make_node_key(input_layer.name, 0))\n    nodes_by_depth = collections.defaultdict(list)\n    for node, depth in nodes_depths.items():\n        nodes_by_depth[depth].append(node)\n    layers_by_depth = collections.defaultdict(list)\n    for layer, depth in layers_depths.items():\n        layers_by_depth[depth].append(layer)\n    depth_keys = list(layers_by_depth.keys())\n    depth_keys.sort(reverse=True)\n    layers = []\n    for depth in depth_keys:\n        layers_for_depth = layers_by_depth[depth]\n        layers_for_depth.sort(key=lambda x: layer_indices[x])\n        layers.extend(layers_for_depth)\n    depth_keys = list(nodes_by_depth.keys())\n    depth_keys.sort(reverse=True)\n    computable_tensors = set()\n    for x in inputs:\n        computable_tensors.add(id(x))\n    layers_with_complete_input = []\n    for depth in depth_keys:\n        for node in nodes_by_depth[depth]:\n            layer = node.layer\n            if layer and (not node.is_input):\n                for x in nest.flatten(node.keras_inputs):\n                    if id(x) not in computable_tensors:\n                        raise ValueError('Graph disconnected: cannot obtain value for tensor ' + str(x) + ' at layer \"' + layer.name + '\". The following previous layers were accessed without issue: ' + str(layers_with_complete_input))\n                for x in nest.flatten(node.outputs):\n                    computable_tensors.add(id(x))\n                layers_with_complete_input.append(layer.name)\n    all_names = [layer.name for layer in layers]\n    for name in all_names:\n        if all_names.count(name) != 1:\n            raise ValueError('The name \"' + name + '\" is used ' + str(all_names.count(name)) + ' times in the model. All layer names should be unique.')\n    return (network_nodes, nodes_by_depth, layers, layers_by_depth)", "docstring": "Validates a network's topology and gather its layers and nodes.\n\nArgs:\ninputs: List of input tensors.\noutputs: List of outputs tensors.\n\nReturns:\nA tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`.\n- nodes: list of Node instances.\n- nodes_by_depth: dict mapping ints (depth) to lists of node instances.\n- layers: list of Layer instances.\n- layers_by_depth: dict mapping ints (depth) to lists of layer instances.\n\nRaises:\nValueError: In case the network is not valid (e.g. disconnected graph).", "source": "github-repos"}
{"code": "def get_trans(self) -> torch.Tensor:\n    return self._trans", "docstring": "Getter for the translation.\n\nReturns:\nThe stored translation", "source": "github-repos"}
{"code": "def get_clusters(self, variant_id):\n        \n        query = {'variant_id':variant_id}\n        identities = self.db.identity.find(query)\n        return identities", "docstring": "Search what clusters a variant belongs to\n\nArgs:\nvariant_id(str): From ID column in vcf\n\nReturns:\nclusters()", "source": "juraj-google-style"}
{"code": "def resolve_widget(self, field):\n        \n        \n        \n        if hasattr(field, 'field'):\n            widget = field.field.widget\n        \n        else:\n            widget = field.widget\n\n        return widget", "docstring": "Given a Field or BoundField, return widget instance.\n\nTodo:\nRaise an exception if given field object does not have a\nwidget.\n\nArguments:\nfield (Field or BoundField): A field instance.\n\nReturns:\ndjango.forms.widgets.Widget: Retrieved widget from given field.", "source": "juraj-google-style"}
{"code": "def attach_socket(self, container, params=None, ws=False):\n    if (params is None):\n        params = {'stdout': 1, 'stderr': 1, 'stream': 1}\n    if (('detachKeys' not in params) and ('detachKeys' in self._general_configs)):\n        params['detachKeys'] = self._general_configs['detachKeys']\n    if ws:\n        return self._attach_websocket(container, params)\n    headers = {'Connection': 'Upgrade', 'Upgrade': 'tcp'}\n    u = self._url('/containers/{0}/attach', container)\n    return self._get_raw_response_socket(self.post(u, None, params=self._attach_params(params), stream=True, headers=headers))", "docstring": "Like ``attach``, but returns the underlying socket-like object for the\nHTTP request.\n\nArgs:\ncontainer (str): The container to attach to.\nparams (dict): Dictionary of request parameters (e.g. ``stdout``,\n``stderr``, ``stream``).\nFor ``detachKeys``, ~/.docker/config.json is used by default.\nws (bool): Use websockets instead of raw HTTP.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def vqt(input_qhbm: qhbm.QHBM, target_hamiltonian: Union[tf.Tensor, hamiltonian.Hamiltonian], beta: tf.Tensor):\n\n    def f_vqt(bitstrings):\n        h_expectations = tf.squeeze(input_qhbm.q_inference.expectation(bitstrings, target_hamiltonian), 1)\n        beta_h_expectations = beta * h_expectations\n        energies = tf.stop_gradient(input_qhbm.e_inference.energy(bitstrings))\n        return beta_h_expectations - energies\n    average_expectation = input_qhbm.e_inference.expectation(f_vqt)\n    current_partition = tf.stop_gradient(input_qhbm.e_inference.log_partition())\n    return average_expectation - current_partition", "docstring": "Computes the VQT loss of a given QHBM and Hamiltonian.\n\nThis function is differentiable within a `tf.GradientTape` scope.\n\nArgs:\ninput_qhbm: Inference methods for the model.\ntarget_hamiltonian: The Hamiltonian whose thermal state is to be learned. If\nit is a `tf.Tensor`, it is of type `tf.string` with shape [1], result of\ncalling `tfq.convert_to_tensor` on a list of `cirq.PauliSum`, `[op]`.\nOtherwise, a Hamiltonian.\nbeta: A scalar `tf.Tensor` which is the inverse temperature at which the\nloss is calculated.\n\nReturns:\nThe VQT loss.", "source": "github-repos"}
{"code": "def quad_genz_keister_22 ( order ):\n    \n    order = sorted(GENZ_KEISTER_22.keys())[order]\n\n    abscissas, weights = GENZ_KEISTER_22[order]\n    abscissas = numpy.array(abscissas)\n    weights = numpy.array(weights)\n\n    weights /= numpy.sum(weights)\n    abscissas *= numpy.sqrt(2)\n\n    return abscissas, weights", "docstring": "Hermite Genz-Keister 22 rule.\n\nArgs:\norder (int):\nThe quadrature order. Must be in the interval (0, 8).\n\nReturns:\n(:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]):\nAbscissas and weights\n\nExamples:\n>>> abscissas, weights = quad_genz_keister_22(1)\n>>> print(numpy.around(abscissas, 4))\n[-1.7321  0.      1.7321]\n>>> print(numpy.around(weights, 4))\n[0.1667 0.6667 0.1667]", "source": "juraj-google-style"}
{"code": "def swo_supported_speeds(self, cpu_speed, num_speeds=3):\n    buf_size = num_speeds\n    buf = (ctypes.c_uint32 * buf_size)()\n    res = self._dll.JLINKARM_SWO_GetCompatibleSpeeds(cpu_speed, 0, buf, buf_size)\n    if (res < 0):\n        raise errors.JLinkException(res)\n    return list(buf)[:res]", "docstring": "Retrives a list of SWO speeds supported by both the target and the\nconnected J-Link.\n\nThe supported speeds are returned in order from highest to lowest.\n\nArgs:\nself (JLink): the ``JLink`` instance\ncpu_speed (int): the target's CPU speed in Hz\nnum_speeds (int): the number of compatible speeds to return\n\nReturns:\nA list of compatible SWO speeds in Hz in order from highest to lowest.", "source": "codesearchnet"}
{"code": "def _open_usb_handle(serial_number=None, **kwargs):\n    init_dependent_flags()\n    remote_usb = conf.remote_usb\n    if remote_usb:\n        if (remote_usb.strip() == 'ethersync'):\n            device = conf.ethersync\n            try:\n                mac_addr = device['mac_addr']\n                port = device['plug_port']\n            except (KeyError, TypeError):\n                raise ValueError('Ethersync needs mac_addr and plug_port to be set')\n            else:\n                ethersync = cambrionix.EtherSync(mac_addr)\n                serial_number = ethersync.get_usb_serial(port)\n    return local_usb.LibUsbHandle.open(serial_number=serial_number, **kwargs)", "docstring": "Open a UsbHandle subclass, based on configuration.\n\nIf configuration 'remote_usb' is set, use it to connect to remote usb,\notherwise attempt to connect locally.'remote_usb' is set to usb type,\nEtherSync or other.\n\nExample of Cambrionix unit in config:\nremote_usb: ethersync\nethersync:\nmac_addr: 78:a5:04:ca:91:66\nplug_port: 5\n\nArgs:\nserial_number: Optional serial number to connect to.\n**kwargs: Arguments to pass to respective handle's Open() method.\n\nReturns:\nInstance of UsbHandle.", "source": "codesearchnet"}
{"code": "def transformer_encoder_attention_unit(x, hparams, encoder_self_attention_bias, attention_dropout_broadcast_dims, save_weights_to=None, make_image_summary=True):\n    with tf.variable_scope('self_attention'):\n        y = common_attention.multihead_attention(common_layers.layer_preprocess(x, hparams), None, encoder_self_attention_bias, (hparams.attention_key_channels or hparams.hidden_size), (hparams.attention_value_channels or hparams.hidden_size), hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, save_weights_to=save_weights_to, max_relative_position=hparams.max_relative_position, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, hard_attention_k=hparams.hard_attention_k)\n        x = common_layers.layer_postprocess(x, y, hparams)\n    return x", "docstring": "Applies multihead attention function which is parametrised for encoding.\n\nArgs:\nx: input\nhparams: model hyper-parameters\nencoder_self_attention_bias: a bias tensor for use in encoder self-attention\nattention_dropout_broadcast_dims: Fpr noise broadcasting in the dropout\nlayers to save memory during training\nsave_weights_to: an optional dictionary to capture attention weights for\nvisualization; the weights tensor will be appended there under a string\nkey created from the variable scope (including name).\nmake_image_summary: Whether to make an attention image summary.\n\nReturns:\nthe output tensor", "source": "codesearchnet"}
{"code": "def delete_contexts(self, context_id_list):\n    for c_id in context_id_list:\n        if (c_id in self._contexts):\n            del self._contexts[c_id]", "docstring": "Delete contexts from the ContextManager.\n\nArgs:\ncontext_id_list (list): a list of context ids\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def client(self, service_name, version, component, **kw):\n    service = _create_service_api(self._credentials, service_name, version, kw.get('developer_key'), kw.get('cache_discovery', False), (self._http or _build_http()))\n    return ServiceClient(gcp_service=service, component=component, credentials=self._credentials, rate_limiter=self._rate_limiter, use_cached_http=self._use_cached_http, http=self._http)", "docstring": "Safely initialize a repository class to a property.\n\nArgs:\nrepository_class (class): The class to initialize.\nversion (str): The gcp service version for the repository.\n\nReturns:\nobject: An instance of repository_class.", "source": "codesearchnet"}
{"code": "def create_korobov_samples(order, dim, base=17797):\n    values = numpy.empty(dim)\n    values[0] = 1\n    for idx in range(1, dim):\n        values[idx] = ((base * values[(idx - 1)]) % (order + 1))\n    grid = numpy.mgrid[(:dim, :(order + 1))]\n    out = (((values[grid[0]] * (grid[1] + 1)) / (order + 1.0)) % 1.0)\n    return out[(:, :order)]", "docstring": "Create Korobov lattice samples.\n\nArgs:\norder (int):\nThe order of the Korobov latice. Defines the number of\nsamples.\ndim (int):\nThe number of dimensions in the output.\nbase (int):\nThe number based used to calculate the distribution of values.\n\nReturns (numpy.ndarray):\nKorobov lattice with ``shape == (dim, order)``", "source": "codesearchnet"}
{"code": "def action(elem, doc):  \n    \n    if isinstance(elem, pf.CodeBlock):\n        doc.listings_counter += 1\n        elems = [elem] if 'hide' not in elem.classes else []\n\n        if 'file' in elem.attributes:\n            elem.text = read_file(elem.attributes['file'])\n            filename = trimpath(elem.attributes)\n            prefix = pf.Emph(pf.Str('File:'))\n\n        if 'exec' in elem.classes:\n            if 'interactive' in elem.classes or elem.text[:4] == '>>> ':\n                elem.text = execute_interactive_code(elem, doc)\n            else:\n                result = execute_code_block(elem, doc)\n\n                if 'hideimports' in elem.classes:\n                    elem.text = remove_import_statements(elem.text)\n\n                if 'plt' in elem.attributes or 'plt' in elem.classes:\n                    doc.plot_found = True\n                    result = maybe_center_plot(result)\n                    block = pf.RawBlock(result, format='latex')\n                else:\n                    block = pf.CodeBlock(result, classes=['changelog'])\n\n                elems += [pf.Para(pf.Emph(pf.Str('Output:'))), block]\n\n        if 'lines' in elem.attributes:\n            elem.text = filter_lines(elem.text, elem.attributes['lines'])\n\n        label = elem.attributes.get('label', f'cl:{doc.listings_counter}')\n\n        if 'caption' in elem.attributes.keys():\n            doc.caption_found = True\n            cap = pf.convert_text(elem.attributes['caption'], output_format='latex')  \n            if 'shortcaption' in elem.attributes.keys():\n                shortcap = pf.convert_text(elem.attributes['shortcaption'], output_format='latex')  \n            else:\n                shortcap = cap\n            if 'file' in elem.attributes.keys():\n                cap += pf.convert_text(f'&nbsp;(`{filename}`)', output_format='latex')  \n\n            elems = make_codelisting(elems, cap, label, shortcaption=shortcap,\n                                     above='capbelow' not in elem.classes)\n        elif 'caption' in elem.classes:\n            doc.caption_found = True\n            cap = ''\n            if 'file' in elem.attributes.keys():\n                cap = pf.convert_text(f'`{filename}`', output_format='latex')\n            elems = make_codelisting(elems, cap, label,\n                                     above='capbelow' not in elem.classes)\n        else:\n            if 'file' in elem.attributes.keys():\n                elems.insert(0, pf.Para(prefix, pf.Space,\n                                        pf.Code(filename)))\n\n        return elems", "docstring": "Processes pf.CodeBlocks.\n\nFor details and a specification of how each command should behave,\ncheck the example files (especially the md and pdf)!\n\nArgs:\nelem: The element to process.\ndoc:  The document.\n\nReturns:\nA changed element or None.", "source": "juraj-google-style"}
{"code": "def list_tensors(self, args, screen_info=None):\n    _ = screen_info\n    parsed = self._arg_parsers['list_tensors'].parse_args(args)\n    output = []\n    filter_strs = []\n    if parsed.op_type_filter:\n        op_type_regex = re.compile(parsed.op_type_filter)\n        filter_strs.append('Op type regex filter: \"%s\"' % parsed.op_type_filter)\n    else:\n        op_type_regex = None\n    if parsed.node_name_filter:\n        node_name_regex = re.compile(parsed.node_name_filter)\n        filter_strs.append('Node name regex filter: \"%s\"' % parsed.node_name_filter)\n    else:\n        node_name_regex = None\n    output = debugger_cli_common.RichTextLines(filter_strs)\n    output.append('')\n    if parsed.tensor_filter:\n        try:\n            filter_callable = self.get_tensor_filter(parsed.tensor_filter)\n        except ValueError:\n            output = cli_shared.error('There is no tensor filter named \"%s\".' % parsed.tensor_filter)\n            _add_main_menu(output, node_name=None, enable_list_tensors=False)\n            return output\n        data_to_show = self._debug_dump.find(filter_callable, exclude_node_names=parsed.filter_exclude_node_names)\n    else:\n        if parsed.filter_exclude_node_names:\n            raise ValueError('The flag --filter_exclude_node_names is valid only when the flag -f or --tensor_filter is used.')\n        data_to_show = self._debug_dump.dumped_tensor_data\n    max_timestamp_width, max_dump_size_width, max_op_type_width = self._measure_tensor_list_column_widths(data_to_show)\n    data_to_show = self._sort_dump_data_by(data_to_show, parsed.sort_by, parsed.reverse)\n    output.extend(self._tensor_list_column_heads(parsed, max_timestamp_width, max_dump_size_width, max_op_type_width))\n    dump_count = 0\n    for dump in data_to_show:\n        if node_name_regex and (not node_name_regex.match(dump.node_name)):\n            continue\n        if op_type_regex:\n            op_type = self._debug_dump.node_op_type(dump.node_name)\n            if not op_type_regex.match(op_type):\n                continue\n        rel_time = (dump.timestamp - self._debug_dump.t0) / 1000.0\n        dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)\n        dumped_tensor_name = '%s:%d' % (dump.node_name, dump.output_slot)\n        op_type = self._debug_dump.node_op_type(dump.node_name)\n        line = '[%.3f]' % rel_time\n        line += ' ' * (max_timestamp_width - len(line))\n        line += dump_size_str\n        line += ' ' * (max_timestamp_width + max_dump_size_width - len(line))\n        line += op_type\n        line += ' ' * (max_timestamp_width + max_dump_size_width + max_op_type_width - len(line))\n        line += dumped_tensor_name\n        output.append(line, font_attr_segs=[(len(line) - len(dumped_tensor_name), len(line), debugger_cli_common.MenuItem('', 'pt %s' % dumped_tensor_name))])\n        dump_count += 1\n    if parsed.tensor_filter:\n        output.prepend(['%d dumped tensor(s) passing filter \"%s\":' % (dump_count, parsed.tensor_filter)])\n    else:\n        output.prepend(['%d dumped tensor(s):' % dump_count])\n    _add_main_menu(output, node_name=None, enable_list_tensors=False)\n    return output", "docstring": "Command handler for list_tensors.\n\nList tensors dumped during debugged Session.run() call.\n\nArgs:\nargs: Command-line arguments, excluding the command prefix, as a list of\nstr.\nscreen_info: Optional dict input containing screen information such as\ncols.\n\nReturns:\nOutput text lines as a RichTextLines object.\n\nRaises:\nValueError: If `--filter_exclude_node_names` is used without `-f` or\n`--tensor_filter` being used.", "source": "github-repos"}
{"code": "def _get_language_modeling_inputs(filename,\n                                  delimiter=\"\\n\",\n                                  repeat=1,\n                                  append_space_to_final_punctionation=True):\n  \n  with tf.gfile.Open(filename) as f:\n    text = f.read()\n  inputs = text.split(delimiter)\n  if not inputs[-1]:\n    inputs.pop()\n  inputs *= repeat\n  if append_space_to_final_punctionation:\n    inputs = [\n        s + \" \" if s and s[-1] in string.punctuation else s for s in inputs]\n  return inputs", "docstring": "Read a file of partial texts to continue.\n\nThe purpose of append_space_to_final_punctionation is that SubwordTokenizer\ngroups punctuation and the ensuing space in the same token.  Adding a space\ncauses the token to be completed.\n\nArgs:\nfilename: a string\ndelimiter: a string\nrepeat: an integer - we repeat the entire file that many times.\nappend_space_to_final_punctionation: a boolean\n\nReturns:\na list of strings", "source": "juraj-google-style"}
{"code": "def CollectFromKnowledgeBase(cls, knowledge_base):\n    \n    for preprocess_plugin in cls._knowledge_base_plugins.values():\n      logger.debug('Running knowledge base preprocessor plugin: {0:s}'.format(\n          preprocess_plugin.__class__.__name__))\n      try:\n        preprocess_plugin.Collect(knowledge_base)\n      except errors.PreProcessFail as exception:\n        logger.warning(\n            'Unable to collect knowledge base value with error: {0!s}'.format(\n                exception))", "docstring": "Collects values from knowledge base values.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.", "source": "juraj-google-style"}
{"code": "def scale(reader, writer, column, start, stop, multiple):\n    for (i, row) in enumerate(reader):\n        if ((i >= start) and (i <= stop)):\n            row[column] = (type(multiple)(row[column]) * multiple)\n        writer.appendRecord(row)", "docstring": "Multiplies a value over a range of rows.\n\nArgs:\nreader: A FileRecordStream object with input data.\nwriter: A FileRecordStream  object to write output data to.\ncolumn: The column of data to modify.\nstart: The first row in the range to modify.\nend: The last row in the range to modify.\nmultiple: The value to scale/multiply by.", "source": "codesearchnet"}
{"code": "def combine_samples(self, md5_list, filename, type_tag):\n    total_bytes = ''\n    for md5 in md5_list:\n        total_bytes += self.get_sample(md5)['sample']['raw_bytes']\n        self.remove_sample(md5)\n    return self.store_sample(total_bytes, filename, type_tag)", "docstring": "Combine samples together. This may have various use cases the most significant\ninvolving a bunch of sample 'chunks' got uploaded and now we combine them together\n\nArgs:\nmd5_list: The list of md5s to combine, order matters!\nfilename: name of the file (used purely as meta data not for lookup)\ntype_tag: ('exe','pcap','pdf','json','swf', or ...)\nReturns:\nthe computed md5 of the combined samples", "source": "codesearchnet"}
{"code": "def get_stdout(self, workflow_id, task_id):\n    url = ('%(wf_url)s/%(wf_id)s/tasks/%(task_id)s/stdout' % {'wf_url': self.workflows_url, 'wf_id': workflow_id, 'task_id': task_id})\n    r = self.gbdx_connection.get(url)\n    r.raise_for_status()\n    return r.text", "docstring": "Get stdout for a particular task.\n\nArgs:\nworkflow_id (str): Workflow id.\ntask_id (str): Task id.\n\nReturns:\nStdout of the task (string).", "source": "codesearchnet"}
{"code": "def convert_maxpool3(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting pooling ...')\n    if (names == 'short'):\n        tf_name = ('P' + random_string(7))\n    elif (names == 'keep'):\n        tf_name = w_name\n    else:\n        tf_name = (w_name + str(random.random()))\n    if ('kernel_shape' in params):\n        (height, width, depth) = params['kernel_shape']\n    else:\n        (height, width, depth) = params['kernel_size']\n    if ('strides' in params):\n        (stride_height, stride_width, stride_depth) = params['strides']\n    else:\n        (stride_height, stride_width, stride_depth) = params['stride']\n    if ('pads' in params):\n        (padding_h, padding_w, padding_d, _, _) = params['pads']\n    else:\n        (padding_h, padding_w, padding_d) = params['padding']\n    input_name = inputs[0]\n    if ((padding_h > 0) and (padding_w > 0) and (padding_d > 0)):\n        padding_name = (tf_name + '_pad')\n        padding_layer = keras.layers.ZeroPadding3D(padding=(padding_h, padding_w, padding_d), name=padding_name)\n        layers[padding_name] = padding_layer(layers[inputs[0]])\n        input_name = padding_name\n    pooling = keras.layers.MaxPooling3D(pool_size=(height, width, depth), strides=(stride_height, stride_width, stride_depth), padding='valid', name=tf_name)\n    layers[scope_name] = pooling(layers[input_name])", "docstring": "Convert 3d Max pooling.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def from_file(cls, weafile, timestep=1, is_leap_year=False):\n    assert os.path.isfile(weafile), 'Failed to find {}'.format(weafile)\n    location = Location()\n    with open(weafile, readmode) as weaf:\n        first_line = weaf.readline()\n        assert first_line.startswith('place'), 'Failed to find place in header. {} is not a valid wea file.'.format(weafile)\n        location.city = ' '.join(first_line.split()[1:])\n        location.latitude = float(weaf.readline().split()[(- 1)])\n        location.longitude = (- float(weaf.readline().split()[(- 1)]))\n        location.time_zone = ((- int(weaf.readline().split()[(- 1)])) / 15)\n        location.elevation = float(weaf.readline().split()[(- 1)])\n        weaf.readline()\n        direct_normal_irradiance = []\n        diffuse_horizontal_irradiance = []\n        for line in weaf:\n            (dirn, difh) = [int(v) for v in line.split()[(- 2):]]\n            direct_normal_irradiance.append(dirn)\n            diffuse_horizontal_irradiance.append(difh)\n    return cls.from_values(location, direct_normal_irradiance, diffuse_horizontal_irradiance, timestep, is_leap_year)", "docstring": "Create wea object from a wea file.\n\nArgs:\nweafile:Full path to wea file.\ntimestep: An optional integer to set the number of time steps per hour.\nDefault is 1 for one value per hour. If the wea file has a time step\nsmaller than an hour adjust this input accordingly.\nis_leap_year: A boolean to indicate if values are representing a leap year.\nDefault is False.", "source": "codesearchnet"}
{"code": "def operate_multi(self, points):\n    points = np.array(points)\n    affine_points = np.concatenate([points, np.ones((points.shape[:(- 1)] + (1,)))], axis=(- 1))\n    return np.inner(affine_points, self.affine_matrix)[(..., :(- 1))]", "docstring": "Apply the operation on a list of points.\n\nArgs:\npoints: List of Cartesian coordinates\n\nReturns:\nNumpy array of coordinates after operation", "source": "codesearchnet"}
{"code": "def get_variant_by_name(self, name):\n        \n        \n        \n        try:\n            geno, i = self.bed.get_geno_marker(name, return_index=True)\n\n        except ValueError:\n            if name in self.bed.get_duplicated_markers():\n                \n                \n                return [\n                    self.get_variant_by_name(dup_name).pop()\n                    for dup_name in self.bed.get_duplicated_markers()[name]\n                ]\n\n            else:\n                \n                \n                logging.variant_name_not_found(name)\n                return []\n\n        else:\n            info = self.bim.iloc[i, :]\n            return [Genotypes(\n                Variant(info.name, CHROM_INT_TO_STR[info.chrom], info.pos,\n                        [info.a1, info.a2]),\n                self._normalize_missing(geno),\n                reference=info.a2,\n                coded=info.a1,\n                multiallelic=info.multiallelic,\n            )]", "docstring": "Get the genotype of a marker using it's name.\n\nArgs:\nname (str): The name of the marker.\n\nReturns:\nlist: A list of Genotypes (only one for PyPlink, see note below).\n\nNote\n====\nFrom PyPlink version 1.3.2 and onwards, each name is unique in the\ndataset. Hence, we can use the 'get_geno_marker' function and be\nsure only one variant is returned.", "source": "juraj-google-style"}
{"code": "def wait_running(self, timeout=None):\n    flag = self._running.wait(timeout)\n    if (flag is False):\n        raise TimeoutExpiredError('Timeout waiting for thread to start running')", "docstring": "Wait for the thread to pass control to its routine.\n\nArgs:\ntimeout (float): The maximum amount of time to wait", "source": "codesearchnet"}
{"code": "def __init__(self,\n                 status=enums.ResultStatus.OPERATION_FAILED,\n                 reason=enums.ResultReason.GENERAL_FAILURE,\n                 message='A general failure occurred.'):\n        \n        super(KmipError, self).__init__(message)\n        self.status = status\n        self.reason = reason", "docstring": "Create a KmipError exception.\n\nArgs:\nstatus (ResultStatus): An enumeration detailing the result outcome.\nreason (ResultReason): An enumeration giving the status rationale.\nmessage (string): A string containing more information about the\nerror.", "source": "juraj-google-style"}
{"code": "def validate(data):\n    text = data.get('text')\n    if ((not isinstance(text, _string_types)) or (len(text) == 0)):\n        raise ValueError('text field is required and should not be empty')\n    if (('markdown' in data) and (not (type(data['markdown']) is bool))):\n        raise ValueError('markdown field should be bool')\n    if ('attachments' in data):\n        if (not isinstance(data['attachments'], (list, tuple))):\n            raise ValueError('attachments field should be list or tuple')\n        for attachment in data['attachments']:\n            if (('text' not in attachment) and ('title' not in attachment)):\n                raise ValueError('text or title is required in attachment')\n    return True", "docstring": "Validates incoming data\n\nArgs:\ndata(dict): the incoming data\n\nReturns:\nTrue if the data is valid\n\nRaises:\nValueError: the data is not valid", "source": "codesearchnet"}
{"code": "def refreshSkypeToken(self):\n    (self.tokens['skype'], self.tokenExpiry['skype']) = SkypeRefreshAuthProvider(self).auth(self.tokens['skype'])\n    self.getRegToken()", "docstring": "Take the existing Skype token and refresh it, to extend the expiry time without other credentials.\n\nRaises:\n.SkypeAuthException: if the login request is rejected\n.SkypeApiException: if the login form can't be processed", "source": "codesearchnet"}
{"code": "def generate(self, text):\n        \n        if not text:\n            raise Exception(\"No text to speak\")\n\n        if len(text) >= self.MAX_CHARS:\n            raise Exception(\"Number of characters must be less than 2000\")\n\n        params = self.__params.copy()\n        params[\"text\"] = text\n        self._data = requests.get(self.TTS_URL, params=params,\n                                  stream=False).iter_content()", "docstring": "Try to get the generated file.\n\nArgs:\ntext: The text that you want to generate.", "source": "juraj-google-style"}
{"code": "def check_filepath(self, path, filename):\n    settings_path = os.path.join(path, filename)\n    if ((not os.path.exists(settings_path)) or (not os.path.isfile(settings_path))):\n        msg = 'Unable to find settings file: {}'\n        raise SettingsBackendError(msg.format(settings_path))\n    return settings_path", "docstring": "Check and return the final filepath to settings\n\nArgs:\npath (str): Directory path where to search for settings file.\nfilename (str): Filename to use to search for settings file.\n\nRaises:\nboussole.exceptions.SettingsBackendError: If determined filepath\ndoes not exists or is a directory.\n\nReturns:\nstring: Settings file path, joining given path and filename.", "source": "codesearchnet"}
{"code": "def is_subset(self, other):\n    if isinstance(other, _basebag):\n        for (elem, count) in self.counts():\n            if (not (count <= other.count(elem))):\n                return False\n    else:\n        for elem in self:\n            if ((self.count(elem) > 1) or (elem not in other)):\n                return False\n    return True", "docstring": "Check that every element in self has a count <= in other.\n\nArgs:\nother (Set)", "source": "codesearchnet"}
{"code": "def grid_reload_from_ids(oargrid_jobids):\n    gk = get_api_client()\n    jobs = []\n    for (site, job_id) in oargrid_jobids:\n        jobs.append(gk.sites[site].jobs[job_id])\n    return jobs", "docstring": "Reload all running or pending jobs of Grid'5000 from their ids\n\nArgs:\noargrid_jobids (list): list of ``(site, oar_jobid)`` identifying the\njobs on each site\n\nReturns:\nThe list of python-grid5000 jobs retrieved", "source": "codesearchnet"}
{"code": "def EncryptPrivateKey(self, decrypted):\n        \n        aes = AES.new(self._master_key, AES.MODE_CBC, self._iv)\n        return aes.encrypt(decrypted)", "docstring": "Encrypt the provided plaintext with the initialized private key.\n\nArgs:\ndecrypted (byte string): the plaintext to be encrypted.\n\nReturns:\nbytes: the ciphertext.", "source": "juraj-google-style"}
{"code": "def vocabulary_size(self):\n    if tf.executing_eagerly():\n        return int(self.lookup_table.size().numpy()) + self._token_start_index()\n    else:\n        return self.lookup_table.size() + self._token_start_index()", "docstring": "Gets the current size of the layer's vocabulary.\n\nReturns:\nThe integer size of the vocabulary, including optional mask and oov\nindices.", "source": "github-repos"}
{"code": "def build_current_graph():\n    graph = SQLStateGraph()\n    for (app_name, config) in apps.app_configs.items():\n        try:\n            module = import_module('.'.join((config.module.__name__, SQL_CONFIG_MODULE)))\n            sql_items = module.sql_items\n        except (ImportError, AttributeError):\n            continue\n        for sql_item in sql_items:\n            graph.add_node((app_name, sql_item.name), sql_item)\n            for dep in sql_item.dependencies:\n                graph.add_lazy_dependency((app_name, sql_item.name), dep)\n    graph.build_graph()\n    return graph", "docstring": "Read current state of SQL items from the current project state.\n\nReturns:\n(SQLStateGraph) Current project state graph.", "source": "codesearchnet"}
{"code": "def rename(self, source_file_names, destination_file_names):\n    err_msg = 'source_file_names and destination_file_names should be equal in length'\n    assert len(source_file_names) == len(destination_file_names), err_msg\n    gcs_batches = []\n    gcs_current_batch = []\n    for src, dest in zip(source_file_names, destination_file_names):\n        gcs_current_batch.append((src, dest))\n        if len(gcs_current_batch) == self.CHUNK_SIZE:\n            gcs_batches.append(gcs_current_batch)\n            gcs_current_batch = []\n    if gcs_current_batch:\n        gcs_batches.append(gcs_current_batch)\n    exceptions = {}\n    for batch in gcs_batches:\n        copy_statuses = self._gcsIO().copy_batch(batch)\n        copy_succeeded = {}\n        delete_targets = []\n        for src, dest, exception in copy_statuses:\n            if exception:\n                exceptions[src, dest] = exception\n            else:\n                copy_succeeded[src] = dest\n                delete_targets.append(src)\n        delete_statuses = self._gcsIO().delete_batch(delete_targets)\n        for src, exception in delete_statuses:\n            if exception:\n                dest = copy_succeeded[src]\n                exceptions[src, dest] = exception\n    if exceptions:\n        raise BeamIOError('Rename operation failed', exceptions)", "docstring": "Rename the files at the source list to the destination list.\nSource and destination lists should be of the same size.\n\nArgs:\nsource_file_names: List of file paths that need to be moved\ndestination_file_names: List of destination_file_names for the files\n\nRaises:\n``BeamIOError``: if any of the rename operations fail", "source": "github-repos"}
{"code": "def _get_imports_for_module(module: str, output_package: str, symbols_by_module: Mapping[str, set[_Entrypoint]], generated_imports_by_module: Mapping[str, set[str]], file_prefixes_to_strip: Sequence[str], module_prefix: str, use_lazy_loading: bool, subpackage_rewrite: Optional[str]) -> str:\n    content = ''\n    symbol_imports = list(symbols_by_module[module])\n    symbol_imports = sorted(symbol_imports, key=lambda s: f'{s.exported_symbol.file_name}:{s.name}')\n    generated_imports = sorted(generated_imports_by_module[module])\n    for imp in generated_imports:\n        if subpackage_rewrite:\n            imp = imp.replace(output_package, subpackage_rewrite)\n        last_dot = imp.rfind('.')\n        if use_lazy_loading:\n            content += f\"  '{imp[last_dot + 1:]}': ('', '{imp}'),\\n\"\n        else:\n            content += f'from {imp[:last_dot]} import {imp[last_dot + 1:]}\\n'\n    for s in symbol_imports:\n        content += f'{s.get_import(file_prefixes_to_strip, module_prefix, use_lazy_loading=use_lazy_loading)}\\n'\n    return content", "docstring": "Returns the imports for a module.\n\nArgs:\nmodule: The module to get imports for.\noutput_package: The package to use for the imports.\nsymbols_by_module: The symbols that should be exposed by each module.\ngenerated_imports_by_module: The sub-modules that should be exposed by each\nmodule.\nfile_prefixes_to_strip: The prefixes to strip from the file names of the\nimports.\nmodule_prefix: A prefix to add to the non-generated imports.\nuse_lazy_loading: Whether to use lazy loading or not.\nsubpackage_rewrite: The subpackage to use for the imports.", "source": "github-repos"}
{"code": "def parse_gptl(file_path, var_list):\n    \n    timing_result = dict()\n    if os.path.isfile(file_path):\n        with open(file_path, 'r') as f:\n            for var in var_list:\n                for line in f:\n                    if var in line:\n                        timing_result[var] = float(line.split()[4])/int(line.split()[2])\n                        break\n    return timing_result", "docstring": "Read a GPTL timing file and extract some data.\n\nArgs:\nfile_path: the path to the GPTL timing file\nvar_list: a list of strings to look for in the file\n\nReturns:\nA dict containing key-value pairs of the livvkit\nand the times associated with them", "source": "juraj-google-style"}
{"code": "def register(self, user_dict):\n        \n        endpoint = os.path.join(self._config.get('napps', 'api'), 'users', '')\n        res = self.make_request(endpoint, method='POST', json=user_dict)\n\n        return res.content.decode('utf-8')", "docstring": "Send an user_dict to NApps server using POST request.\n\nArgs:\nuser_dict(dict): Dictionary with user attributes.\n\nReturns:\nresult(string): Return the response of Napps server.", "source": "juraj-google-style"}
{"code": "def add_to_submission(self, submission_id, submission_objects):\n    LOG.info(\"Adding new variants and case data to clinvar submission '%s'\", submission_id)\n    for var_obj in submission_objects[0]:\n        try:\n            result = self.clinvar_collection.insert_one(var_obj)\n            self.clinvar_submission_collection.update_one({'_id': submission_id}, {'$push': {'variant_data': str(result.inserted_id)}}, upsert=True)\n        except pymongo.errors.DuplicateKeyError:\n            LOG.error('Attepted to insert a clinvar variant which is already in DB!')\n    if submission_objects[1]:\n        for case_obj in submission_objects[1]:\n            try:\n                result = self.clinvar_collection.insert_one(case_obj)\n                self.clinvar_submission_collection.update_one({'_id': submission_id}, {'$push': {'case_data': str(result.inserted_id)}}, upsert=True)\n            except pymongo.errors.DuplicateKeyError:\n                LOG.error('One or more casedata object is already present in clinvar collection!')\n    updated_submission = self.clinvar_submission_collection.find_one_and_update({'_id': submission_id}, {'$set': {'updated_at': datetime.now()}}, return_document=pymongo.ReturnDocument.AFTER)\n    return updated_submission", "docstring": "Adds submission_objects to clinvar collection and update the coresponding submission object with their id\n\nArgs:\nsubmission_id(str) : id of the submission to be updated\nsubmission_objects(tuple): a tuple of 2 elements coresponding to a list of variants and a list of case data objects to add to submission\n\nReturns:\nupdated_submission(obj): an open clinvar submission object, updated", "source": "codesearchnet"}
{"code": "def __init__(self, input_energy):\n    self._energy = input_energy\n    self._num_bits = input_energy.num_bits\n    self._parameters = dict(input_energy=input_energy)\n    self._index_proposal_probs = tf.Variable([0.0] * self._num_bits, trainable=False)\n    self._index_proposal_dist = tfp.distributions.Categorical(probs=self._index_proposal_probs)\n    self._eye_bool = tf.eye(self._num_bits, dtype=tf.bool)", "docstring": "Initializes a GibbsWithGradientsKernel.\n\nArgs:\ninput_energy: The parameterized energy function which helps define the\nacceptance probabilities of the Markov chain.", "source": "github-repos"}
{"code": "def get_nets_radb(self, response, is_http=False):\n    nets = []\n    if is_http:\n        regex = 'route(?:6)?:[^\\\\S\\\\n]+(?P<val>.+?)<br>'\n    else:\n        regex = '^route(?:6)?:[^\\\\S\\\\n]+(?P<val>.+|.+)$'\n    for match in re.finditer(regex, response, re.MULTILINE):\n        try:\n            net = copy.deepcopy(BASE_NET)\n            net['cidr'] = match.group(1).strip()\n            net['start'] = match.start()\n            net['end'] = match.end()\n            nets.append(net)\n        except ValueError:\n            pass\n    return nets", "docstring": "The function for parsing network blocks from ASN origin data.\n\nArgs:\nresponse (:obj:`str`): The response from the RADB whois/http\nserver.\nis_http (:obj:`bool`): If the query is RADB HTTP instead of whois,\nset to True. Defaults to False.\n\nReturns:\nlist: A list of network block dictionaries\n\n::\n\n[{\n'cidr' (str) - The assigned CIDR\n'start' (int) - The index for the start of the parsed\nnetwork block\n'end' (int) - The index for the end of the parsed network\nblock\n}]", "source": "codesearchnet"}
{"code": "def dapply(self, fn, pairwise=False, symmetric=True, diagonal=False, block=None, **kwargs):\n    search_keys = [k for (k, v) in kwargs.items() if (isinstance(v, list) and (len(v) > 1))]\n    functions = util.make_list(fn)\n    search = list(product(functions, util.dict_product(kwargs)))\n    results = []\n    for (fn, kw) in search:\n        if (not pairwise):\n            r = self.index.to_series().apply((lambda step: fn(step, **kw)))\n        else:\n            r = apply_pairwise(self, fn, symmetric=symmetric, diagonal=diagonal, block=block, **kw)\n        name = ([] if (len(functions) == 1) else [fn.__name__])\n        name += util.dict_subset(kw, search_keys).values()\n        if isinstance(r, pd.DataFrame):\n            columns = pd.MultiIndex.from_tuples([tuple((name + util.make_list(c))) for c in r.columns])\n            r.columns = columns\n        else:\n            r.name = tuple(name)\n        results.append(r)\n    if (len(results) > 1):\n        result = pd.concat(results, axis=1)\n        column_names = ([] if (len(functions) == 1) else [None])\n        column_names += search_keys\n        column_names += ([None] * (len(result.columns.names) - len(column_names)))\n        result.columns.names = column_names\n        return StepFrame(result)\n    else:\n        result = results[0]\n        if isinstance(result, pd.DataFrame):\n            return StepFrame(result)\n        else:\n            result.name = functions[0].__name__\n            return StepSeries(result)", "docstring": "Apply function to each step object in the index\n\nArgs:\nfn: function to apply. If a list then each function is applied\npairwise: whether to apply the function to pairs of steps\nsymmetric, diagonal, block: passed to apply_pairwise when pairwise=True\nkwargs: a keyword arguments to pass to each function. Arguments\nwith list value are grid searched using util.dict_product.\n\nReturns: a StepFrame or StepSeries", "source": "codesearchnet"}
{"code": "def model_custom_sink(simplekv, KVs, final_table_name_no_ptransform, final_table_name_with_ptransform):\n    final_table_name = final_table_name_no_ptransform\n    with beam.Pipeline(options=PipelineOptions()) as pipeline:\n        kvs = pipeline | 'CreateKVs' >> beam.Create(KVs)\n        kvs | 'WriteToSimpleKV' >> beam.io.Write(SimpleKVSink(simplekv, 'http:\n    final_table_name = final_table_name_with_ptransform\n    with beam.Pipeline(options=PipelineOptions()) as pipeline:\n        kvs = pipeline | 'CreateKVs' >> beam.core.Create(KVs)\n        kvs | 'WriteToSimpleKV' >> WriteToKVSink(simplekv, 'http:", "docstring": "Demonstrates creating a new custom sink and using it in a pipeline.\n\nUses the new sink in an example pipeline.\n\nAdditionally demonstrates how a sink should be implemented using a\n``PTransform``. This is the recommended way to develop sinks that are to be\ndistributed to a large number of end users.\n\nThis method runs two pipelines.\n\n(1) A pipeline that uses ``SimpleKVSink`` directly using the ``df.Write``\ntransform.\n(2) A pipeline that uses a custom ``PTransform`` that wraps\n``SimpleKVSink``.\n\nArgs:\nsimplekv: an object that mocks the key-value storage.\n\nKVs: the set of key-value pairs to be written in the example pipeline.\n\nfinal_table_name_no_ptransform: the prefix of final set of tables to be\ncreated by the example pipeline that uses\n``SimpleKVSink`` directly.\n\nfinal_table_name_with_ptransform: the prefix of final set of tables to be\ncreated by the example pipeline that uses\na ``PTransform`` that wraps\n``SimpleKVSink``.", "source": "github-repos"}
{"code": "def hgnc_genes(self, hgnc_symbol, build='37', search=False):\n        \n        LOG.debug(\"Fetching genes with symbol %s\" % hgnc_symbol)\n        if search:\n            \n            full_query = self.hgnc_collection.find({\n                '$or': [\n                    {'aliases': hgnc_symbol},\n                    {'hgnc_id': int(hgnc_symbol) if hgnc_symbol.isdigit() else None},\n                ],\n                'build': build\n            })\n            if full_query.count() != 0:\n                return full_query\n\n            return self.hgnc_collection.find({\n                'aliases': {'$regex': hgnc_symbol, '$options': 'i'},\n                'build': build\n            })\n\n        return self.hgnc_collection.find({'build': build, 'aliases': hgnc_symbol})", "docstring": "Fetch all hgnc genes that match a hgnc symbol\n\nCheck both hgnc_symbol and aliases\n\nArgs:\nhgnc_symbol(str)\nbuild(str): The build in which to search\nsearch(bool): if partial searching should be used\n\nReturns:\nresult()", "source": "juraj-google-style"}
{"code": "def filter_pyfqn(cls, value, relative_to=0):\n        \n\n        def collect_packages(element, packages):\n            parent = element.eContainer()\n            if parent:\n                collect_packages(parent, packages)\n            packages.append(element.name)\n\n        packages = []\n        collect_packages(value, packages)\n\n        if relative_to < 0 or relative_to > len(packages):\n            raise ValueError('relative_to not in range of number of packages')\n\n        fqn = '.'.join(packages[relative_to:])\n\n        if relative_to:\n            fqn = '.' + fqn\n\n        return cls.module_path_map.get(fqn, fqn)", "docstring": "Returns Python form of fully qualified name.\n\nArgs:\nrelative_to: If greater 0, the returned path is relative to the first n directories.", "source": "juraj-google-style"}
{"code": "def _AddOption(self, name):\n    \n\n    \n    if name in [option.name for option in self.options]:\n      raise TextFSMTemplateError('Duplicate option \"%s\"' % name)\n\n    \n    try:\n      option = self._options_cls.GetOption(name)(self)\n    except AttributeError:\n      raise TextFSMTemplateError('Unknown option \"%s\"' % name)\n\n    self.options.append(option)", "docstring": "Add an option to this Value.\n\nArgs:\nname: (str), the name of the Option to add.\n\nRaises:\nTextFSMTemplateError: If option is already present or\nthe option does not exist.", "source": "juraj-google-style"}
{"code": "def end_block(self, request_end_block):\n        \n\n        self.abort_if_abci_chain_is_not_synced()\n\n        chain_shift = 0 if self.chain is None else self.chain['height']\n\n        height = request_end_block.height + chain_shift\n        self.new_height = height\n\n        \n        \n        logger.debug(f'Updating pre-commit state: {self.new_height}')\n        pre_commit_state = dict(height=self.new_height,\n                                transactions=self.block_txn_ids)\n        self.bigchaindb.store_pre_commit_state(pre_commit_state)\n\n        block_txn_hash = calculate_hash(self.block_txn_ids)\n        block = self.bigchaindb.get_latest_block()\n\n        if self.block_txn_ids:\n            self.block_txn_hash = calculate_hash([block['app_hash'], block_txn_hash])\n        else:\n            self.block_txn_hash = block['app_hash']\n\n        validator_update = Election.process_block(self.bigchaindb,\n                                                  self.new_height,\n                                                  self.block_transactions)\n\n        return ResponseEndBlock(validator_updates=validator_update)", "docstring": "Calculate block hash using transaction ids and previous block\nhash to be stored in the next block.\n\nArgs:\nheight (int): new height of the chain.", "source": "juraj-google-style"}
{"code": "def list_groups(name):\n    \n    if six.PY2:\n        name = _to_unicode(name)\n\n    ugrp = set()\n    try:\n        user = info(name)['groups']\n    except KeyError:\n        return False\n    for group in user:\n        ugrp.add(group.strip(' *'))\n\n    return sorted(list(ugrp))", "docstring": "Return a list of groups the named user belongs to\n\nArgs:\nname (str): The user name for which to list groups\n\nReturns:\nlist: A list of groups to which the user belongs\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' user.list_groups foo", "source": "juraj-google-style"}
{"code": "def convert(self, vroot, entry_variables):\n        \n        self.graph_info = GraphInfo(vroot)\n        self.entry_variables = entry_variables\n\n        with nn.parameter_scope(self.name):\n            \n            for t, func in enumerate(self.graph_info.funcs):\n                if func.name in self.inner_prod_functions:\n                    inner_prod_func = func\n                    o = self._fixed_point_weight_conversion(inner_prod_func)\n                    continue\n                \n                o = self._identity_conversion(func)\n\n        self.end_variable = o\n\n        if self.call_forward:\n            o.forward(clear_buffer=True)\n        return self.end_variable", "docstring": "All functions are replaced with the same `new` function.\n\nArgs:\nvroot (:obj:`Variable`): NNabla Variable\nentry_variables (:obj:`Variable`): Entry variable from which the conversion starts.", "source": "juraj-google-style"}
{"code": "def get_index_from_alias(alias_name, index_client=None):\n    index_client = (index_client or indices_client())\n    if (not index_client.exists_alias(name=alias_name)):\n        return None\n    return list(index_client.get_alias(name=alias_name).keys())[0]", "docstring": "Retrieve the base index name from an alias\n\nArgs:\nalias_name (str) Name of the alias\nindex_client (Elasticsearch.IndicesClient) an Elasticsearch index\nclient. Optional, will create one if not given\n\nReturns: (str) Name of index", "source": "codesearchnet"}
{"code": "def format(self, info_dict, delimiter='/'):\n\n    def dfs(father, path, acc):\n        if isinstance(father, list):\n            for child in father:\n                dfs(child, path, acc)\n        elif isinstance(father, collections.Mapping):\n            for child in (sorted(father.items(), key=itemgetter(0)),):\n                dfs(child, path, acc)\n        elif isinstance(father, tuple):\n            path = copy.copy(path)\n            path.append(father[0])\n            dfs(father[1], path, acc)\n        else:\n            path[(- 1)] = '{}: {}'.format(path[(- 1)], str(father))\n            acc.append(delimiter.join(path))\n    result = []\n    dfs((info_dict.get('Prefix') or info_dict), [], result)\n    return '\\n'.join(result)", "docstring": "This formatter will take a data structure that\nrepresent a tree and will print all the paths\nfrom the root to the leaves\n\nin our case it will print each value and the keys\nthat needed to get to it, for example:\n\nvm0:\nnet: lago\nmemory: 1024\n\nwill be output as:\n\nvm0/net/lago\nvm0/memory/1024\n\nArgs:\ninfo_dict (dict): information to reformat\ndelimiter (str): a delimiter for the path components\nReturns:\nstr: String representing the formatted info", "source": "codesearchnet"}
{"code": "def retrieve_taf(station_icao) -> typing.Tuple[typing.Union[str, None], typing.Union[str, None]]:\n    \n    url = _BASE_TAF_URL.format(station=station_icao)\n    with requests.get(url) as resp:\n        if not resp.ok:\n            return f'unable to obtain TAF for station {station_icao}\\n' \\\n                   f'Got to \"http:\n                   f'for a list of valid stations', None\n        return None, resp.content.decode().split('\\n')[1]", "docstring": "Retrieves a TAF string from an online database\n\nArgs:\nstation_icao: ICAO of the station\n\nReturns:\ntuple of error, metar_str", "source": "juraj-google-style"}
{"code": "def renderJsonReadsSince(self, timestamp, meter):\n        \n        result = \"\"\n        try:\n            connection = sqlite3.connect(self.m_connection_string)\n            connection.row_factory = self.dict_factory\n            select_cursor = connection.cursor()\n            select_cursor.execute(\"select * from Meter_Reads where \" + Field.Time_Stamp +\n                                  \" > \" + str(timestamp) + \" and \" + Field.Meter_Address +\n                                  \"= '\" + meter + \"';\")\n            reads = select_cursor.fetchall()\n            result = json.dumps(reads, indent=4)\n\n        except:\n            ekm_log(traceback.format_exc(sys.exc_info()))\n        return result", "docstring": "Simple since Time_Stamp query returned as JSON records.\n\nArgs:\ntimestamp (int): Epoch time in seconds.\nmeter (str): 12 character meter address to query\n\nReturns:\nstr: JSON rendered read records.", "source": "juraj-google-style"}
{"code": "def _ConvertBool(value, require_str):\n    if require_str:\n        if (value == 'true'):\n            return True\n        elif (value == 'false'):\n            return False\n        else:\n            raise ParseError('Expected \"true\" or \"false\", not {0}.'.format(value))\n    if (not isinstance(value, bool)):\n        raise ParseError('Expected true or false without quotes.')\n    return value", "docstring": "Convert a boolean value.\n\nArgs:\nvalue: A scalar value to convert.\nrequire_str: If True, value must be a str.\n\nReturns:\nThe bool parsed.\n\nRaises:\nParseError: If a boolean value couldn't be consumed.", "source": "codesearchnet"}
{"code": "def url_assembler(query_string, no_redirect=0, no_html=0, skip_disambig=0):\n    \n    params = [('q', query_string.encode(\"utf-8\")), ('format', 'json')]\n\n    if no_redirect:\n        params.append(('no_redirect', 1))\n    if no_html:\n        params.append(('no_html', 1))\n    if skip_disambig:\n        params.append(('skip_disambig', 1))\n\n    return '/?' + urlencode(params)", "docstring": "Assembler of parameters for building request query.\n\nArgs:\nquery_string: Query to be passed to DuckDuckGo API.\nno_redirect: Skip HTTP redirects (for !bang commands). Default - False.\nno_html: Remove HTML from text, e.g. bold and italics. Default - False.\nskip_disambig: Skip disambiguation (D) Type. Default - False.\n\nReturns:\nA “percent-encoded” string which is used as a part of the query.", "source": "juraj-google-style"}
{"code": "def read_header(self, file_handle, nextdata_offset=0):\n        \n        header = {'FCS format': file_handle.read(6)}\n\n        file_handle.read(4)  \n\n        for field in ('text start', 'text end', 'data start', 'data end', 'analysis start',\n                      'analysis end'):\n            s = file_handle.read(8)\n            try:\n                field_value = int(s)\n            except ValueError:\n                field_value = 0\n            header[field] = field_value + nextdata_offset\n\n        \n        for k in ('text start', 'text end'):\n            if header[k] == 0:\n                raise ValueError(u'The FCS file \"{}\" seems corrupted. (Parser cannot locate '\n                                 u'information about the \"{}\" segment.)'.format(self.path, k))\n            elif header[k] > self._file_size:\n                raise ValueError(u'The FCS file \"{}\" is corrupted. \"{}\" segment '\n                                 u'is larger than file size'.format(self.path, k))\n            else:\n                \n                pass\n\n        self._data_start = header['data start']\n        self._data_end = header['data start']\n\n        if header['analysis end'] - header['analysis start'] != 0:\n            warnings.warn(u'There appears to be some information in the ANALYSIS segment of file '\n                          u'{0}. However, it might not be read correctly.'.format(self.path))\n\n        self.annotation['__header__'] = header", "docstring": "Read the header of the FCS file.\n\nThe header specifies where the annotation, data and analysis are located inside the binary\nfile.\n\nArgs:\nfile_handle: buffer containing FCS file.\nnextdata_offset: byte offset of a set header from file start specified by $NEXTDATA", "source": "juraj-google-style"}
{"code": "def __init__(self, default: typing.Any=MISSING_VALUE, annotation: typing.Any=MISSING_VALUE, transform: typing.Optional[typing.Callable[[typing.Any], typing.Any]]=None, frozen: bool=False):\n    super().__init__(object, default, transform, is_noneable=True, frozen=frozen)\n    self._annotation = annotation", "docstring": "Constructor.\n\nArgs:\ndefault: (Optional) default value of this spec.\nannotation: (Optional) external provided type annotation.\ntransform: (Optional) user-defined function to be called on the input\nof `apply`. It could be used as a type converter or a custom\nvalidator which may raise errors.\nfrozen: If True, values other than the default value is not accceptable.", "source": "github-repos"}
{"code": "def apply_sql(query: str, output_name: Optional[str], found: Dict[str, beam.PCollection], run: bool=True) -> Tuple[str, Union[PValue, SqlNode], SqlChain]:\n    output_name = _generate_output_name(output_name, query, found)\n    query, sql_source, chain = _build_query_components(query, found, output_name, run)\n    if run:\n        try:\n            output = sql_source | SqlTransform(query)\n            output_name, output = create_var_in_main(output_name, output)\n            _LOGGER.info('The output PCollection variable is %s with element_type %s', output_name, pformat_namedtuple(output.element_type))\n            return (output_name, output, chain)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except:\n            on_error('Error when applying the Beam SQL: %s', traceback.format_exc())\n            raise\n    else:\n        return (output_name, chain.current, chain)", "docstring": "Applies a SqlTransform with the given sql and queried PCollections.\n\nArgs:\nquery: The SQL query executed in the magic.\noutput_name: (optional) The output variable name in __main__ module.\nfound: The PCollections with variable names found to be used in the query.\nrun: Whether to prepare the SQL pipeline for a local run or not.\n\nReturns:\nA tuple of values. First str value is the output variable name in\n__main__ module, auto-generated if not provided. Second value: if run,\nit's a PValue; otherwise, a SqlNode tracks the SQL without applying it or\nexecuting it. Third value: SqlChain is a chain of SqlNodes that have been\napplied.", "source": "github-repos"}
{"code": "def add_peer_parser(subparsers, parent_parser):\n    \n    parser = subparsers.add_parser(\n        'peer',\n        help='Displays information about validator peers',\n        description=\"Provides a subcommand to list a validator's peers\")\n\n    grand_parsers = parser.add_subparsers(title='subcommands',\n                                          dest='subcommand')\n    grand_parsers.required = True\n    add_peer_list_parser(grand_parsers, parent_parser)", "docstring": "Adds argument parser for the peer command\n\nArgs:\nsubparsers: Add parsers to this subparser object\nparent_parser: The parent argparse.ArgumentParser object", "source": "juraj-google-style"}
{"code": "def list_load_balancers(access_token, subscription_id):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Network/', '/loadBalancers?api-version=', NETWORK_API])\n    return do_get(endpoint, access_token)", "docstring": "List the load balancers in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body of load balancer list with properties.", "source": "codesearchnet"}
{"code": "def from_snl(cls, snl):\n        \n        hist = []\n        for h in snl.history:\n            d = h.description\n            d['_snl'] = {'url': h.url, 'name': h.name}\n            hist.append(d)\n        return cls(snl.structure, history=hist)", "docstring": "Create TransformedStructure from SNL.\n\nArgs:\nsnl (StructureNL): Starting snl\n\nReturns:\nTransformedStructure", "source": "juraj-google-style"}
{"code": "def sampler(dataframe, modulo, column='client_id', sample_id=42):\n    return dataframe.withColumn('sampler', udf((lambda key: ((crc32((key or '')) & 4294967295) % modulo)))(column)).where(('sampler = %s' % sample_id)).drop('sampler')", "docstring": "Collect a sample of clients given an input column\n\nFilter dataframe based on the modulus of the CRC32 of a given string\ncolumn matching a given sample_id. if dataframe has already been filtered\nby sample_id, then modulo should be a multiple of 100, column should be\n\"client_id\", and the given sample_id should match the value previously\nused, optionally plus multiples of 100.\n\nArgs:\ndataframe: A Dataframe to be sampled\nmodulo (int): selects a 1/modulo sampling of dataframe\ncolumn (str): name of a string column to sample on\nsample_id (int): modulus result to select for sampling\n\nReturns:\nA DataFrame sampled on the given inputs.", "source": "codesearchnet"}
{"code": "def Append(self, value, timestamp):\n    timestamp = self._NormalizeTime(timestamp)\n    if (self.data and (timestamp < self.data[(- 1)][1])):\n        raise RuntimeError('Next timestamp must be larger.')\n    self.data.append([value, timestamp])", "docstring": "Adds value at timestamp.\n\nValues must be added in order of increasing timestamp.\n\nArgs:\nvalue: An observed value.\ntimestamp: The timestamp at which value was observed.\n\nRaises:\nRuntimeError: If timestamp is smaller than the previous timstamp.", "source": "codesearchnet"}
{"code": "def make_job(name: str = '',\n             run_name: str = '',\n             num_tasks: int = 0,\n             install_script: str = '',\n             **kwargs\n             ) -> backend.Job:\n  \n  return _backend.make_job(name=name, run_name=run_name, num_tasks=num_tasks,\n                           install_script=install_script, **kwargs)", "docstring": "Create a job using current backend. Blocks until all tasks are up and initialized.\n\nArgs:\nname: name of the job\nrun_name: name of the run (auto-assigned if empty)\nnum_tasks: number of tasks\ninstall_script: bash-runnable script\n**kwargs:\n\nReturns:\nbackend.Job", "source": "juraj-google-style"}
{"code": "def filter_by_conditional_statement(self, statement):\n        \n        _filt_values, _filt_datetimes = self._filter_by_statement(statement)\n        collection = HourlyDiscontinuousCollection(\n            self.header.duplicate(), _filt_values, _filt_datetimes)\n        collection._validated_a_period = True\n        return collection", "docstring": "Filter the Data Collection based on a conditional statement.\n\nArgs:\nstatement: A conditional statement as a string (e.g. a > 25 and a%5 == 0).\nThe variable should always be named as 'a' (without quotations).\n\nReturn:\nA new Data Collection containing only the filtered data", "source": "juraj-google-style"}
{"code": "def FindModuleDefiningFlag(self, flagname, default=None):\n    \n    registered_flag = self.FlagDict().get(flagname)\n    if registered_flag is None:\n      return default\n    for module, flags in six.iteritems(self.FlagsByModuleDict()):\n      for flag in flags:\n        \n        \n        \n        if (flag.name == registered_flag.name and\n            flag.short_name == registered_flag.short_name):\n          return module\n    return default", "docstring": "Return the name of the module defining this flag, or default.\n\nArgs:\nflagname: Name of the flag to lookup.\ndefault: Value to return if flagname is not defined. Defaults\nto None.\n\nReturns:\nThe name of the module which registered the flag with this name.\nIf no such module exists (i.e. no flag with this name exists),\nwe return default.", "source": "juraj-google-style"}
{"code": "def requestB(self):\n    work_context = self.getContext()\n    self.setContext('request[v4B]')\n    self.m_serial_port.write((('2f3f'.decode('hex') + self.m_meter_address) + '3031210d0a'.decode('hex')))\n    self.m_raw_read_b = self.m_serial_port.getResponse(self.getContext())\n    unpacked_read_b = self.unpackStruct(self.m_raw_read_b, self.m_blk_b)\n    self.convertData(unpacked_read_b, self.m_blk_b, self.m_kwh_precision)\n    self.m_b_crc = self.crcMeterRead(self.m_raw_read_b, self.m_blk_b)\n    self.setContext(work_context)\n    return self.m_b_crc", "docstring": "Issue a B read on V4 meter.\n\nReturns:\nbool: True if CRC match at end of call.", "source": "codesearchnet"}
{"code": "def similar_filter_r(self, sentence_list):\n        \n        result_list = []\n        recursive_list = []\n\n        try:\n            self.nlp_base.tokenize(sentence_list[0])\n            subject_token = self.nlp_base.token\n            result_list.append(sentence_list[0])\n            if len(sentence_list) > 1:\n                for i in range(len(sentence_list)):\n                    if i > 0:\n                        self.nlp_base.tokenize(sentence_list[i])\n                        object_token = self.nlp_base.token\n                        similarity = self.calculate(subject_token, object_token)\n                        if similarity <= self.similarity_limit:\n                            recursive_list.append(sentence_list[i])\n\n            if len(recursive_list) > 0:\n                result_list.extend(self.similar_filter_r(recursive_list))\n        except IndexError:\n            result_list = sentence_list\n\n        return result_list", "docstring": "Filter mutually similar sentences.\n\nArgs:\nsentence_list:    The list of sentences.\n\nReturns:\nThe list of filtered sentences.", "source": "juraj-google-style"}
{"code": "def redirect_stdout(new_stdout):\n    (old_stdout, sys.stdout) = (sys.stdout, new_stdout)\n    try:\n        (yield None)\n    finally:\n        sys.stdout = old_stdout", "docstring": "Redirect the stdout\n\nArgs:\nnew_stdout (io.StringIO): New stdout to use instead", "source": "codesearchnet"}
{"code": "def _ParseStorageMediaImageOptions(self, options):\n    self._partitions = getattr(options, 'partitions', None)\n    if self._partitions:\n        try:\n            self._ParseVolumeIdentifiersString(self._partitions, prefix='p')\n        except ValueError:\n            raise errors.BadConfigOption('Unsupported partitions')\n    self._volumes = getattr(options, 'volumes', None)\n    if self._volumes:\n        try:\n            self._ParseVolumeIdentifiersString(self._volumes, prefix='apfs')\n        except ValueError:\n            raise errors.BadConfigOption('Unsupported volumes')", "docstring": "Parses the storage media image options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "codesearchnet"}
{"code": "def __init__(self, paths=None, separator='/'):\n    \n    if not paths:\n      raise errors.FormatError('Missing directory value.')\n\n    super(DirectorySourceType, self).__init__()\n    self.paths = paths\n    self.separator = separator", "docstring": "Initializes a source type.\n\nArgs:\npaths (Optional[str]): paths relative to the root of the file system.\nseparator (Optional[str]): path segment separator.\n\nRaises:\nFormatError: when paths is not set.", "source": "juraj-google-style"}
{"code": "def start_workflow(name, config, *, queue=DefaultJobQueueName.Workflow, clear_data_store=True, store_args=None):\n    try:\n        wf = Workflow.from_name(name, queue=queue, clear_data_store=clear_data_store, arguments=store_args)\n    except DirectedAcyclicGraphInvalid as e:\n        raise WorkflowDefinitionError(workflow_name=name, graph_name=e.graph_name)\n    celery_app = create_app(config)\n    result = celery_app.send_task(JobExecPath.Workflow, args=(wf,), queue=queue, routing_key=queue)\n    return result.id", "docstring": "Start a single workflow by sending it to the workflow queue.\n\nArgs:\nname (str): The name of the workflow that should be started. Refers to the\nname of the workflow file without the .py extension.\nconfig (Config): Reference to the configuration object from which the\nsettings for the workflow are retrieved.\nqueue (str): Name of the queue the workflow should be scheduled to.\nclear_data_store (bool): Remove any documents created during the workflow\nrun in the data store after the run.\nstore_args (dict): Dictionary of additional arguments that are ingested into the\ndata store prior to the execution of the workflow.\nReturns:\nstr: The ID of the workflow job.\nRaises:\nWorkflowArgumentError: If the workflow requires arguments to be set in store_args\nthat were not supplied to the workflow.\nWorkflowImportError: If the import of the workflow fails.", "source": "codesearchnet"}
{"code": "def format_trigger(self, stream):\n        \n\n        src = u'value'\n        if self.use_count:\n            src = u'count'\n\n        return u\"{}({}) {} {}\".format(src, stream, self.comp_string, self.reference)", "docstring": "Create a user understandable string like count(stream) >= X.\n\nArgs:\nstream (DataStream): The stream to use to format ourselves.\n\nReturns:\nstr: The formatted string", "source": "juraj-google-style"}
{"code": "def write_xml(xml_str, output_loc=None, filename=None):\n    \n    if not xml_str:\n        raise Dump2PolarionException(\"No data to write.\")\n    filename_fin = _get_filename(output_loc=output_loc, filename=filename)\n\n    with io.open(filename_fin, \"w\", encoding=\"utf-8\") as xml_file:\n        xml_file.write(get_unicode_str(xml_str))\n    logger.info(\"Data written to '%s'\", filename_fin)", "docstring": "Outputs the XML content (string) into a file.\n\nIf `output_loc` is supplied and it's a file (not directory), the output\nwill be saved there and the `filename` is ignored.\n\nArgs:\nxml_str: string with XML document\noutput_loc: file or directory for saving the file\nfilename: file name that will be used if `output_loc` is directory\nIf it is needed and is not supplied, it will be generated", "source": "juraj-google-style"}
{"code": "def from_tuple(cls, queries):\n    domain = cls()\n    join_with = cls.AND\n    for query in queries:\n        if (query in [cls.OR, cls.AND]):\n            join_with = query\n        else:\n            domain.add_query(query, join_with)\n    return domain", "docstring": "Create a ``Domain`` given a set of complex query tuples.\n\nArgs:\nqueries (iter): An iterator of complex queries. Each iteration\nshould contain either:\n\n* A data-set compatible with :func:`~domain.Domain.add_query`\n* A string to switch the join type\n\nExample::\n\n[('subject', 'Test1'),\n'OR',\n('subject', 'Test2')',\n('subject', 'Test3')',\n]\n# The above is equivalent to:\n#    subject:'Test1' OR subject:'Test2' OR subject:'Test3'\n\n[('modified_at', datetime(2017, 01, 01)),\n('status', 'active'),\n]\n# The above is equivalent to:\n#    modified_at:[2017-01-01T00:00:00Z TO *]\n#    AND status:\"active\"\n\nReturns:\nDomain: A domain representing the input queries.", "source": "codesearchnet"}
{"code": "def is40(msg):\n    \n\n    if allzeros(msg):\n        return False\n\n    d = hex2bin(data(msg))\n\n    \n\n    if wrongstatus(d, 1, 2, 13):\n        return False\n\n    if wrongstatus(d, 14, 15, 26):\n        return False\n\n    if wrongstatus(d, 27, 28, 39):\n        return False\n\n    if wrongstatus(d, 48, 49, 51):\n        return False\n\n    if wrongstatus(d, 54, 55, 56):\n        return False\n\n    \n\n    if bin2int(d[39:47]) != 0:\n        return False\n\n    if bin2int(d[51:53]) != 0:\n        return False\n\n    return True", "docstring": "Check if a message is likely to be BDS code 4,0\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nbool: True or False", "source": "juraj-google-style"}
{"code": "def _GroupByArguments(self, signatures):\n    groups = {}\n    for sig in signatures:\n        stripped_signature = sig.Replace(return_type=None, exceptions=None)\n        ret = groups.get(stripped_signature)\n        if not ret:\n            ret = _ReturnsAndExceptions()\n            groups[stripped_signature] = ret\n        ret.Update(sig)\n    return groups", "docstring": "Groups signatures by arguments.\n\nArguments:\nsignatures: A list of function signatures (Signature instances).\n\nReturns:\nA dictionary mapping signatures (without return and exceptions) to\na tuple of return values and exceptions.", "source": "github-repos"}
{"code": "def get_length(self, y):\n    lens = [self.find_pad_index(row) for row in y]\n    return lens", "docstring": "Get true length of y.\n\nArgs:\ny (list): padded list.\n\nReturns:\nlens: true length of y.\n\nExamples:\n>>> y = [[1, 0, 0], [1, 1, 0], [1, 1, 1]]\n>>> self.get_length(y)\n[1, 2, 3]", "source": "codesearchnet"}
{"code": "def get_cpu_vendor(cls, family, arch='x86'):\n    props = cls.get_cpu_props(family, arch)\n    vendor = 'generic'\n    try:\n        vendor = props.xpath('vendor/@name')[0]\n    except IndexError:\n        pass\n    return vendor", "docstring": "Get CPU vendor, if vendor is not available will return 'generic'\n\nArgs:\nfamily(str): CPU family\narch(str): CPU arch\n\nReturns:\nstr: CPU vendor if found otherwise 'generic'", "source": "codesearchnet"}
{"code": "def link_cloud(self, username=None, password=None, device_id=None):\n    reg = ComponentRegistry()\n    domain = self.get('cloud:server')\n    if (username is None):\n        prompt_str = 'Please enter your IOTile.cloud email: '\n        username = input(prompt_str)\n    if (password is None):\n        prompt_str = 'Please enter your IOTile.cloud password: '\n        password = getpass.getpass(prompt_str)\n    cloud = Api(domain=domain)\n    ok_resp = cloud.login(email=username, password=password)\n    if (not ok_resp):\n        raise ArgumentError(('Could not login to iotile.cloud as user %s' % username))\n    reg.set_config('arch:cloud_user', cloud.username)\n    reg.set_config('arch:cloud_token', cloud.token)\n    reg.set_config('arch:cloud_token_type', cloud.token_type)\n    if (device_id is not None):\n        cloud = IOTileCloud()\n        cloud.impersonate_device(device_id)", "docstring": "Create and store a token for interacting with the IOTile Cloud API.\n\nYou will need to call link_cloud once for each virtualenv that\nyou create and want to use with any api calls that touch iotile cloud.\n\nNote that this method is called on a ConfigManager instance\n\nIf you do not pass your username or password it will be prompted from\nyou securely on stdin.\n\nIf you are logging in for a user, the token will expire periodically and you\nwill have to relogin.\n\nIf you pass a device_id, you can obtain a limited token for that device\nthat will never expire, assuming you have access to that device.\n\nArgs:\nusername (string): Your iotile.cloud username.  This is prompted\nfrom stdin if not provided.\npassword (string): Your iotile.cloud password.  This is prompted\nfrom stdin if not provided.\ndevice_id (int): Optional device id to obtain permanent credentials\nfor a device.", "source": "codesearchnet"}
{"code": "def get_path(self, key, rel_to_cwd=False, rel_to_conf=False):\n        \n        if key in self.__cli:\n            path = self.__cli[key]\n            from_conf = False\n        else:\n            path = self.__config.get(key)\n            from_conf = True\n\n        if not isinstance(path, str):\n            return None\n\n        res = self.__abspath(path, from_conf)\n\n        if rel_to_cwd:\n            return os.path.relpath(res, self.__invoke_dir)\n        if rel_to_conf:\n            return os.path.relpath(res, self.__conf_dir)\n\n        return self.__abspath(path, from_conf)", "docstring": "Retrieve a path from the config, resolving it against\nthe invokation directory or the configuration file directory,\ndepending on whether it was passed through the command-line\nor the configuration file.\n\nArgs:\nkey: str, the key to lookup the path with\n\nReturns:\nstr: The path, or `None`", "source": "juraj-google-style"}
{"code": "def _checkBool(inputvalue, description='inputvalue'):\n    _checkString(description, minlength=1, description='description string')\n    if (not isinstance(inputvalue, bool)):\n        raise TypeError('The {0} must be boolean. Given: {1!r}'.format(description, inputvalue))", "docstring": "Check that the given inputvalue is a boolean.\n\nArgs:\n* inputvalue (boolean): The value to be checked.\n* description (string): Used in error messages for the checked inputvalue.\n\nRaises:\nTypeError, ValueError", "source": "codesearchnet"}
{"code": "def request(http, uri, method='GET', body=None, headers=None, redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):\n    http_callable = getattr(http, 'request', http)\n    return http_callable(uri, method=method, body=body, headers=headers, redirections=redirections, connection_type=connection_type)", "docstring": "Make an HTTP request with an HTTP object and arguments.\n\nArgs:\nhttp: httplib2.Http, an http object to be used to make requests.\nuri: string, The URI to be requested.\nmethod: string, The HTTP method to use for the request. Defaults\nto 'GET'.\nbody: string, The payload / body in HTTP request. By default\nthere is no payload.\nheaders: dict, Key-value pairs of request headers. By default\nthere are no headers.\nredirections: int, The number of allowed 203 redirects for\nthe request. Defaults to 5.\nconnection_type: httplib.HTTPConnection, a subclass to be used for\nestablishing connection. If not set, the type\nwill be determined from the ``uri``.\n\nReturns:\ntuple, a pair of a httplib2.Response with the status code and other\nheaders and the bytes of the content returned.", "source": "codesearchnet"}
{"code": "def fill_memory_slot(memory, value, index):\n  \n  mask = tf.to_float(\n      tf.one_hot(index,\n                 tf.shape(memory)[0])[:, None, None, None])\n  fill_memory = (1 - mask) * memory + mask * value[None, ...]\n  return fill_memory", "docstring": "Fills the memory slot at a particular index with the given value.\n\nArgs:\nmemory: a 4-d tensor [memory_size, batch, length, channel] containing\nthe state of all steps\nvalue: a 3-d tensor [batch, length, channel] as the sate\nindex: integer in [0, memory_size)\n\nReturns:\nfilled memory", "source": "juraj-google-style"}
{"code": "def upload_blob(self, did, wid, filepath='./blob.json'):\n        \n\n        chars = string.ascii_letters + string.digits\n        boundary_key = ''.join(random.choice(chars) for i in range(8))\n\n        mimetype = mimetypes.guess_type(filepath)[0]\n        encoded_filename = os.path.basename(filepath)\n        file_content_length = str(os.path.getsize(filepath))\n        blob = open(filepath)\n\n        req_headers = {\n            'Content-Type': 'multipart/form-data; boundary=\"%s\"' % boundary_key\n        }\n\n        \n        payload = '--' + boundary_key + '\\r\\nContent-Disposition: form-data; name=\"encodedFilename\"\\r\\n\\r\\n' + encoded_filename + '\\r\\n'\n        payload += '--' + boundary_key + '\\r\\nContent-Disposition: form-data; name=\"fileContentLength\"\\r\\n\\r\\n' + file_content_length + '\\r\\n'\n        payload += '--' + boundary_key + '\\r\\nContent-Disposition: form-data; name=\"file\"; filename=\"' + encoded_filename + '\"\\r\\n'\n        payload += 'Content-Type: ' + mimetype + '\\r\\n\\r\\n'\n        payload += blob.read()\n        payload += '\\r\\n--' + boundary_key + '--'\n\n        return self._api.request('post', '/api/blobelements/d/' + did + '/w/' + wid, headers=req_headers, body=payload)", "docstring": "Uploads a file to a new blob element in the specified doc.\n\nArgs:\n- did (str): Document ID\n- wid (str): Workspace ID\n- filepath (str, default='./blob.json'): Blob element location\n\nReturns:\n- requests.Response: Onshape response data", "source": "juraj-google-style"}
{"code": "def show_abierrors(self, nids=None, stream=sys.stdout):\n        \n        lines = []\n        app = lines.append\n\n        for task in self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids):\n            header = \"=== \" + task.qout_file.path + \"===\"\n            app(header)\n            report = task.get_event_report()\n\n            if report is not None:\n                app(\"num_errors: %s, num_warnings: %s, num_comments: %s\" % (\n                    report.num_errors, report.num_warnings, report.num_comments))\n                app(\"*** ERRORS ***\")\n                app(\"\\n\".join(str(e) for e in report.errors))\n                app(\"*** BUGS ***\")\n                app(\"\\n\".join(str(b) for b in report.bugs))\n\n            else:\n                app(\"get_envent_report returned None!\")\n\n            app(\"=\" * len(header) + 2*\"\\n\")\n\n        return stream.writelines(lines)", "docstring": "Write to the given stream the list of ABINIT errors for all tasks whose status is S_ABICRITICAL.\n\nArgs:\nnids: optional list of node identifiers used to filter the tasks.\nstream: File-like object. Default: sys.stdout", "source": "juraj-google-style"}
{"code": "def add_showcase(self, showcase, showcases_to_check=None):\n        \n        \n        dataset_showcase = self._get_dataset_showcase_dict(showcase)\n        if showcases_to_check is None:\n            showcases_to_check = self.get_showcases()\n        for showcase in showcases_to_check:\n            if dataset_showcase['showcase_id'] == showcase['id']:\n                return False\n        showcase = hdx.data.showcase.Showcase({'id': dataset_showcase['showcase_id']}, configuration=self.configuration)\n        showcase._write_to_hdx('associate', dataset_showcase, 'package_id')\n        return True", "docstring": "Add dataset to showcase\n\nArgs:\nshowcase (Union[Showcase,Dict,str]): Either a showcase id or showcase metadata from a Showcase object or dictionary\nshowcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to showcases containing dataset.\n\nReturns:\nbool: True if the showcase was added, False if already present", "source": "juraj-google-style"}
{"code": "def _create_filters(col_params, extractors):\n    result = []\n    for (col_param, extractor) in zip(col_params, extractors):\n        a_filter = _create_filter(col_param, extractor)\n        if a_filter:\n            result.append(a_filter)\n    return result", "docstring": "Creates filters for the given col_params.\n\nArgs:\ncol_params: List of ListSessionGroupsRequest.ColParam protobufs.\nextractors: list of extractor functions of the same length as col_params.\nEach element should extract the column described by the corresponding\nelement of col_params.\nReturns:\nA list of filter functions. Each corresponding to a single\ncol_params.filter oneof field of _request", "source": "codesearchnet"}
{"code": "def query(self, query):\n    \n    path = self.path(query.key)\n\n    if os.path.exists(path):\n      filenames = os.listdir(path)\n      filenames = list(set(filenames) - set(self.ignore_list))\n      filenames = map(lambda f: os.path.join(path, f), filenames)\n      iterable = self._read_object_gen(filenames)\n    else:\n      iterable = list()\n\n    return query(iterable)", "docstring": "Returns an iterable of objects matching criteria expressed in `query`\nFSDatastore.query queries all the `.obj` files within the directory\nspecified by the query.key.\n\nArgs:\nquery: Query object describing the objects to return.\n\nRaturns:\nCursor with all objects matching criteria", "source": "juraj-google-style"}
{"code": "def receive(host, timeout):\n    \n    parameters = settings.get_amqp_settings()[host]\n\n    queues = parameters[\"queues\"]\n    queues = dict(map(lambda (x, y): (y, x), queues.items()))  \n    queue = queues[parameters[\"out_key\"]]\n\n    channel = _get_channel(host, timeout)\n    for method_frame, properties, body in channel.consume(queue):\n        print json.dumps({\n            \"method_frame\": str(method_frame),\n            \"properties\": str(properties),\n            \"body\": body\n        })\n        print \"-\" * 79\n        print\n\n        channel.basic_ack(method_frame.delivery_tag)", "docstring": "Print all messages in queue.\n\nArgs:\nhost (str): Specified --host.\ntimeout (int): How log should script wait for message.", "source": "juraj-google-style"}
{"code": "def follow(self, chars):\n        \n        chars = chars.lower()\n\n        node = self.node\n        for char in chars:\n            node = cgaddag.gdg_follow_edge(self.gdg, node, char.encode(\"ascii\"))\n            if not node:\n                raise KeyError(char)\n\n        return Node(self.gdg, node)", "docstring": "Traverse the GADDAG to the node at the end of the given characters.\n\nArgs:\nchars: An string of characters to traverse in the GADDAG.\n\nReturns:\nThe Node which is found by traversing the tree.", "source": "juraj-google-style"}
{"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n    y_true = math_ops.cast(y_true, self._dtype)\n    y_pred = math_ops.cast(y_pred, self._dtype)\n    y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)\n    error_sq = math_ops.squared_difference(y_pred, y_true)\n    return super(RootMeanSquaredError, self).update_state(error_sq, sample_weight=sample_weight)", "docstring": "Accumulates root mean squared error statistics.\n\nArgs:\ny_true: The ground truth values.\ny_pred: The predicted values.\nsample_weight: Optional weighting of each example. Defaults to 1. Can be a\n`Tensor` whose rank is either 0, or the same rank as `y_true`, and must\nbe broadcastable to `y_true`.\n\nReturns:\nUpdate op.", "source": "github-repos"}
{"code": "def add_user(\n            self, user,\n            first_name=None, last_name=None,\n            email=None, password=None\n        ):\n        \n        self.project_service.set_auth(self._token_project)\n        self.project_service.add_user(\n            user, first_name, last_name, email, password)", "docstring": "Add a new user.\n\nArgs:\nuser (string): User name.\nfirst_name (optional[string]): User's first name.  Defaults to None.\nlast_name (optional[string]): User's last name.  Defaults to None.\nemail: (optional[string]): User's email address.  Defaults to None.\npassword: (optional[string]): User's password.  Defaults to None.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def get_compound_bodies(node):\n    \n    if isinstance(node, (ast.Module, ast.FunctionDef, ast.ClassDef, ast.With)):\n        return [node.body]\n    elif isinstance(node, (ast.If, ast.While, ast.For)):\n        return [node.body, node.orelse]\n    elif PY2 and isinstance(node, ast.TryFinally):\n        return [node.body, node.finalbody]\n    elif PY2 and isinstance(node, ast.TryExcept):\n        return [node.body, node.orelse] + [h.body for h in node.handlers]\n    elif PY3 and isinstance(node, ast.Try):\n        return ([node.body, node.orelse, node.finalbody]\n                + [h.body for h in node.handlers])\n    end\n    return []", "docstring": "Returns a list of bodies of a compound statement node.\n\nArgs:\nnode: AST node.\n\nReturns:\nA list of bodies of the node. If the given node does not represent\na compound statement, an empty list is returned.", "source": "juraj-google-style"}
{"code": "def disk(radius, alias_blur=0.1, dtype=np.float32):\n  \n  if radius <= 8:\n    length = np.arange(-8, 8 + 1)\n    ksize = (3, 3)\n  else:\n    length = np.arange(-radius, radius + 1)\n    ksize = (5, 5)\n  x_axis, y_axis = np.meshgrid(length, length)\n  aliased_disk = np.array((x_axis**2 + y_axis**2) <= radius**2, dtype=dtype)\n  aliased_disk /= np.sum(aliased_disk)\n  \n  return tfds.core.lazy_imports.cv2.GaussianBlur(\n      aliased_disk, ksize=ksize, sigmaX=alias_blur)", "docstring": "Generating a Gaussian blurring kernel with disk shape.\n\nGenerating a Gaussian blurring kernel with disk shape using cv2 API.\n\nArgs:\nradius: integer, radius of blurring kernel.\nalias_blur: float, standard deviation of Gaussian blurring.\ndtype: data type of kernel\n\nReturns:\ncv2 object of the Gaussian blurring kernel.", "source": "juraj-google-style"}
{"code": "def AllBalancesZeroOrLess(self):\n    for (key, fixed8) in self.Balances.items():\n        if (fixed8.value > 0):\n            return False\n    return True", "docstring": "Flag indicating if all balances are 0 or less.\n\nReturns:\nbool: True if all balances are <= 0. False, otherwise.", "source": "codesearchnet"}
{"code": "def merge_vert(script, threshold=0.0):\n    filter_xml = ''.join(['  <filter name=\"Merge Close Vertices\">\\n', '    <Param name=\"Threshold\" ', 'value=\"{}\" '.format(threshold), 'description=\"Merging distance\" ', 'min=\"0\" ', 'max=\"1\" ', 'type=\"RichAbsPerc\" ', '/>\\n', '  </filter>\\n'])\n    util.write_filter(script, filter_xml)\n    return None", "docstring": "Merge together all the vertices that are nearer than the specified\nthreshold. Like a unify duplicate vertices but with some tolerance.\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter to.\nthreshold (float): Merging distance. All the vertices that are closer\nthan this threshold are merged together. Use very small values,\ndefault is zero.\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "codesearchnet"}
{"code": "def add_forwarding_rules(self, forwarding_rules):\n        \n        rules_dict = [rule.__dict__ for rule in forwarding_rules]\n\n        return self.get_data(\n            \"load_balancers/%s/forwarding_rules/\" % self.id,\n            type=POST,\n            params={\"forwarding_rules\": rules_dict}\n        )", "docstring": "Adds new forwarding rules to a LoadBalancer.\n\nArgs:\nforwarding_rules (obj:`list`): A list of `ForwrdingRules` objects", "source": "juraj-google-style"}
{"code": "def send_message_for_lane_change(sender, **kwargs):\n    \n    current = kwargs['current']\n    owners = kwargs['possible_owners']\n    if 'lane_change_invite' in current.task_data:\n        msg_context = current.task_data.pop('lane_change_invite')\n    else:\n        msg_context = DEFAULT_LANE_CHANGE_INVITE_MSG\n\n    wfi = WFCache(current).get_instance()\n\n    \n    TaskInvitation.objects.filter(instance=wfi, role=current.role, wf_name=wfi.wf.name).delete()\n\n    today = datetime.today()\n    for recipient in owners:\n        inv = TaskInvitation(\n            instance=wfi,\n            role=recipient,\n            wf_name=wfi.wf.name,\n            progress=30,\n            start_date=today,\n            finish_date=today + timedelta(15)\n        )\n        inv.title = current.task_data.get('INVITATION_TITLE') or wfi.wf.title\n        inv.save()\n\n        \n        try:\n\n            recipient.send_notification(title=msg_context['title'],\n                                        message=\"%s %s\" % (wfi.wf.title, msg_context['body']),\n                                        typ=1,  \n                                        url='',\n                                        sender=sender\n                                        )\n        except: \n            pass", "docstring": "Sends a message to possible owners of the current workflows\nnext lane.\n\nArgs:\n**kwargs: ``current`` and ``possible_owners`` are required.\nsender (User): User object", "source": "juraj-google-style"}
{"code": "def get_backend_engine(self, name, **kwargs):\n    if (name not in self._engines):\n        msg = 'Given settings backend is unknowed: {}'\n        raise SettingsBackendError(msg.format(name))\n    return self._engines[name](**kwargs)", "docstring": "Get backend engine from given name.\n\nArgs:\n(string): Path to validate.\n\nRaises:\nboussole.exceptions.SettingsBackendError: If given backend name\ndoes not match any available engine.\n\nReturns:\nobject: Instance of selected backend engine.", "source": "codesearchnet"}
{"code": "def reset(self):\n    self.number_of_hops = 0\n    self.dr = np.array([0.0, 0.0, 0.0])\n    self.summed_dr2 = 0.0\n    self.sites_visited = [self._site.number]", "docstring": "Reinitialise the stored displacements, number of hops, and list of sites visited for this `Atom`.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def set_notifier_path(self, notifier, path):\n        \n        \n        \n        \n        assert self.notifier is Model.notifier or notifier is Model.notifier, \\\n            \"Already have a notifier %s path %s\" % (self.notifier, self.path)\n        self.notifier = notifier\n        self.path = path\n        \n        for name, ct in self.call_types.items():\n            if ct.is_mapping:\n                child = getattr(self, name)\n                if child and issubclass(ct.typ[1], Model):\n                    for k, v in child.items():\n                        v.set_notifier_path(notifier, self.path + [name, k])\n            elif issubclass(ct.typ, Model):\n                assert not ct.is_array, \\\n                    \"Can't deal with Arrays of Models %s\" % ct\n                child = getattr(self, name)\n                child.set_notifier_path(notifier, self.path + [name])", "docstring": "Sets the notifier, and the path from the path from block root\n\nArgs:\nnotifier (Notifier): The Notifier to tell when endpoint data changes\npath (list): The absolute path to get to this object", "source": "juraj-google-style"}
{"code": "def _log_to_stderr(self, record):\n    old_stream = self.stream\n    self.stream = sys.stderr\n    try:\n        super(PythonHandler, self).emit(record)\n    finally:\n        self.stream = old_stream", "docstring": "Emits the record to stderr.\n\nThis temporarily sets the handler stream to stderr, calls\nStreamHandler.emit, then reverts the stream back.\n\nArgs:\nrecord: logging.LogRecord, the record to log.", "source": "codesearchnet"}
{"code": "def plot_spectra_pages_pdf(ss, pdf_filename='pages.pdf', setup=_default_setup):\n    logger = a99.get_python_logger()\n    (xmin, xmax, ymin_, ymax, xspan, yspan) = calc_max_min(ss)\n    ymin = (ymin_ if (setup.ymin is None) else setup.ymin)\n    num_pages = len(ss)\n    a99.format_BLB()\n    pdf = matplotlib.backends.backend_pdf.PdfPages(pdf_filename)\n    for (i, s) in enumerate(ss):\n        title = s.title\n        fig = plt.figure()\n        plt.plot(s.x, s.y, c=_FAV_COLOR)\n        if (setup.flag_xlabel and setup.fmt_xlabel):\n            _set_plot(plt.xlabel, setup.fmt_xlabel, s)\n        if (setup.flag_ylabel and setup.fmt_ylabel):\n            _set_plot(plt.ylabel, setup.fmt_ylabel, s)\n        _set_plot(plt.title, setup.fmt_title, s)\n        plt.xlim([(xmin - (xspan * _T)), (xmax + (xspan * _T))])\n        plt.ylim([(ymin - (yspan * _T)), (ymax + (yspan * _T))])\n        plt.tight_layout()\n        plt.subplots_adjust(top=0.94)\n        logger.info(\"Printing page {0:d}/{1:d} ('{2!s}')\".format((i + 1), num_pages, title))\n        pdf.savefig(fig)\n        plt.close()\n    pdf.close()\n    logger.info('File {0!s} successfully created.'.format(pdf_filename))", "docstring": "Plots spectra into a PDF file, one spectrum per page.\n\nSplits into several pieces of width\n\nArgs:\nss: list of Spectrum objects\npdf_filename: name of output file", "source": "codesearchnet"}
{"code": "def combine_results(results):\n    total, inside = (sum((r[0] for r in results)), sum((r[1] for r in results)))\n    return (total, inside, 4 * float(inside) / total)", "docstring": "Combiner function to sum up trials and compute the estimate.\n\nArgs:\nresults: An iterable of 3-tuples (total trials, inside trials, ignored).\n\nReturns:\nA 3-tuple containing the sum of total trials, sum of inside trials, and\nthe probability computed from the two numbers.", "source": "github-repos"}
{"code": "def get(cls):\n    results = {}\n    hierarchy = cls.__hierarchy\n    hierarchy.reverse()\n    for storeMethod in hierarchy:\n        cls.merger.merge(results, storeMethod.get())\n    return results", "docstring": "Get values gathered from the previously set hierarchy.\n\nRespects the order in which sources are set, the first source set\nhas the highest priority, overrides values with the same key that\nexist in sources with lower priority.\n\nReturns:\ndict: The dictionary containing values gathered from all set sources.", "source": "codesearchnet"}
{"code": "def rep1(parser: Union[(Parser, Sequence[Input])]) -> RepeatedOnceParser:\n    if isinstance(parser, str):\n        parser = lit(parser)\n    return RepeatedOnceParser(parser)", "docstring": "Match a parser one or more times repeatedly.\n\nThis matches ``parser`` multiple times in a row. If it matches as least\nonce, it returns a list of values from each time ``parser`` matched. If it\ndoes not match ``parser`` at all, it fails.\n\nArgs:\nparser: Parser or literal", "source": "codesearchnet"}
{"code": "def coord_list_mapping_pbc(subset, superset, atol=1e-08):\n    atol = (np.array([1.0, 1.0, 1.0]) * atol)\n    return cuc.coord_list_mapping_pbc(subset, superset, atol)", "docstring": "Gives the index mapping from a subset to a superset.\nSuperset cannot contain duplicate matching rows\n\nArgs:\nsubset, superset: List of frac_coords\n\nReturns:\nlist of indices such that superset[indices] = subset", "source": "codesearchnet"}
{"code": "def _send_group_coordinator_request(self):\n    node_id = self._client.least_loaded_node()\n    if (node_id is None):\n        return Future().failure(Errors.NoBrokersAvailable())\n    elif (not self._client.ready(node_id, metadata_priority=False)):\n        e = Errors.NodeNotReadyError(node_id)\n        return Future().failure(e)\n    log.debug('Sending group coordinator request for group %s to broker %s', self.group_id, node_id)\n    request = GroupCoordinatorRequest[0](self.group_id)\n    future = Future()\n    _f = self._client.send(node_id, request)\n    _f.add_callback(self._handle_group_coordinator_response, future)\n    _f.add_errback(self._failed_request, node_id, request, future)\n    return future", "docstring": "Discover the current coordinator for the group.\n\nReturns:\nFuture: resolves to the node id of the coordinator", "source": "codesearchnet"}
{"code": "def ParseOptions(cls, options, configuration_object):\n    \n    if not isinstance(configuration_object, tools.CLITool):\n      raise errors.BadConfigObject(\n          'Configuration object is not an instance of CLITool')\n\n    status_view_mode = cls._ParseStringOption(\n        options, 'status_view_mode',\n        default_value=status_view.StatusView.MODE_WINDOW)\n\n    setattr(configuration_object, '_status_view_mode', status_view_mode)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.", "source": "juraj-google-style"}
{"code": "def mirror(self):\n    reverse_circ = self.copy(name=(self.name + '_mirror'))\n    reverse_circ.data = []\n    for (inst, qargs, cargs) in reversed(self.data):\n        reverse_circ.data.append((inst.mirror(), qargs, cargs))\n    return reverse_circ", "docstring": "Mirror the circuit by reversing the instructions.\n\nThis is done by recursively mirroring all instructions.\nIt does not invert any gate.\n\nReturns:\nQuantumCircuit: the mirrored circuit", "source": "codesearchnet"}
{"code": "def _from_tensor_shape(cls, shape: Any, num_row_partitions: int, dtype: dtypes.DType) -> 'DynamicRaggedShape.Spec':\n    if dtype != dtypes.int32 and dtype != dtypes.int64:\n        raise ValueError('dtype must be tf.int32 or tf.int64')\n    shape = tensor_shape.as_shape(shape)\n    if shape.rank is None:\n        row_partitions = [RowPartitionSpec(dtype=dtype) for _ in range(num_row_partitions)]\n        return DynamicRaggedShape.Spec(row_partitions=row_partitions, static_inner_shape=tensor_shape.TensorShape(None), dtype=dtype)\n    if shape.rank <= 1:\n        if num_row_partitions:\n            raise ValueError('num_row_partitions should be zero ' + 'if shape is a scalar or vector.')\n        return DynamicRaggedShape.Spec(row_partitions=[], static_inner_shape=shape, dtype=dtype)\n    if shape.rank <= num_row_partitions:\n        raise ValueError('num_row_partitions must be less than rank')\n    num_elements_so_far = tensor_shape.dimension_value(shape[0])\n    rp_specs = []\n    for i in range(num_row_partitions):\n        current_dim = tensor_shape.dimension_value(shape[i + 1])\n        if current_dim is None or num_elements_so_far is None:\n            nvals = None\n        else:\n            nvals = num_elements_so_far * current_dim\n        rp_specs.append(RowPartitionSpec(nrows=num_elements_so_far, nvals=nvals, uniform_row_length=current_dim, dtype=dtype))\n        num_elements_so_far = nvals\n    static_inner_shape = tensor_shape.TensorShape([num_elements_so_far]) + shape[num_row_partitions + 1:]\n    return DynamicRaggedShape.Spec(row_partitions=rp_specs, static_inner_shape=static_inner_shape, dtype=dtype)", "docstring": "Creates a `DynamicRaggedShape.Spec` corresponding to a `tf.TensorShape`.\n\nIt is assumed that this is a `tf.TensorShape` coming from a\n`tf.TensorSpec`, not from `RaggedTensor.shape`.\n\nIn addition to the shape, we need to know the number of row partitions,\nand the dtype used in the shape (tf.int32 or tf.int64).\n\nWithin the dimensions that are partitioned, all dimensions are assumed\nto be uniform.\n\nArgs:\nshape: a TensorShape.\nnum_row_partitions: the ragged rank of the RaggedShape.\ndtype: the dtype of the shape (not the tensor); tf.int64 or tf.int32.\n\nReturns:\na DynamicRaggedShape.Spec representing a TensorShape.", "source": "github-repos"}
{"code": "def get_cost_per_mol(self, comp):\n        \n\n        comp = comp if isinstance(comp, Composition) else Composition(comp)\n        decomp = self.get_lowest_decomposition(comp)\n        return sum(k.energy_per_atom * v * comp.num_atoms for k, v in\n                   decomp.items())", "docstring": "Get best estimate of minimum cost/mol based on known data\n\nArgs:\ncomp:\nComposition as a pymatgen.core.structure.Composition\nReturns:\nfloat of cost/mol", "source": "juraj-google-style"}
{"code": "def error(message):\n    \n    fail = '\\033[91m'\n    end = '\\033[0m'\n    sys.exit(fail + \"Error: {}\".format(message) + end)", "docstring": "Throw an error with the given message and immediately quit.\n\nArgs:\nmessage(str): The message to display.", "source": "juraj-google-style"}
{"code": "def parse_ranges(range_string):\n    range_string = range_string.strip()\n    if not range_string:\n        return []\n    if 'inf' in range_string:\n        range_string = re.sub('inf', repr(sys.float_info.max), range_string)\n    ranges = ast.literal_eval(range_string)\n    if isinstance(ranges, list) and (not isinstance(ranges[0], list)):\n        ranges = [ranges]\n    for item in ranges:\n        if len(item) != 2:\n            raise ValueError('Incorrect number of elements in range')\n        elif not isinstance(item[0], (int, float)):\n            raise ValueError('Incorrect type in the 1st element of range: %s' % type(item[0]))\n        elif not isinstance(item[1], (int, float)):\n            raise ValueError('Incorrect type in the 2nd element of range: %s' % type(item[0]))\n    return ranges", "docstring": "Parse a string representing numerical range(s).\n\nArgs:\nrange_string: (str) A string representing a numerical range or a list of\nthem. For example:\n\"[-1.0,1.0]\", \"[-inf, 0]\", \"[[-inf, -1.0], [1.0, inf]]\"\n\nReturns:\n(list of list of float) A list of numerical ranges parsed from the input\nstring.\n\nRaises:\nValueError: If the input doesn't represent a range or a list of ranges.", "source": "github-repos"}
{"code": "def _get_class(self):\n    class_parts = [self._prefix, self._known_keys[_InstrumentationKnownStatusKeys.CLASS]]\n    return '.'.join(filter(None, class_parts))", "docstring": "Gets the class name of the test method for the instrumentation\nmethod block.\n\nReturns:\nA string containing the class name of the instrumentation test\nmethod's test or empty string if no name was parsed. If a prefix\nwas specified, then the prefix will be prepended to the class\nname.", "source": "github-repos"}
{"code": "def abort_class(reason, extras=None):\n    raise signals.TestAbortClass(reason, extras)", "docstring": "Abort all subsequent tests within the same test class in one iteration.\n\nIf one test class is requested multiple times in a test run, this can\nonly abort one of the requested executions, NOT all.\n\nArgs:\nreason: The reason to abort.\nextras: An optional field for extra information to be included in\ntest result.\n\nRaises:\nsignals.TestAbortClass: Abort all subsequent tests in a test class.", "source": "github-repos"}
{"code": "def aes_decrypt(base64_encryption_key, base64_data):\n    data = from_base64(base64_data)\n    (aes_key_bytes, hmac_key_bytes) = _extract_keys(base64_encryption_key)\n    (data, hmac_signature) = (data[:(- HMAC_SIG_SIZE)], data[(- HMAC_SIG_SIZE):])\n    if (hmac.new(hmac_key_bytes, data, hashlib.sha256).digest() != hmac_signature):\n        raise AuthenticationError('HMAC authentication failed')\n    (iv_bytes, data) = (data[:AES_BLOCK_SIZE], data[AES_BLOCK_SIZE:])\n    cipher = AES.new(aes_key_bytes, AES.MODE_CBC, iv_bytes)\n    data = cipher.decrypt(data)\n    return _unpad(data)", "docstring": "Verify HMAC-SHA256 signature and decrypt data with AES-CBC\n\nArguments:\nencryption_key (str): a base64-encoded string containing an AES encryption key and HMAC\nsigning key as generated by generate_encryption_key()\ndata (str): a byte string containing the data decrypted with an HMAC signing key\nappended to the end\n\nReturns:\nstr: a byte string containing the data that was originally encrypted\n\nRaises:\nAuthenticationError: when the HMAC-SHA256 signature authentication fails", "source": "codesearchnet"}
{"code": "def human_timestamp_to_datetime(human_timestamp, to_utc=False):\n    \n\n    settings = {}\n\n    if to_utc:\n        settings = {\"TO_TIMEZONE\": \"UTC\"}\n\n    return dateparser.parse(human_timestamp, settings=settings)", "docstring": "Converts a human-readable timestamp into a Python ``DateTime`` object\n\nArgs:\nhuman_timestamp (str): A timestamp string\nto_utc (bool): Convert the timestamp to UTC\n\nReturns:\nDateTime: The converted timestamp", "source": "juraj-google-style"}
{"code": "def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):\n    del shape\n    if dtype.is_floating:\n        initializer = init_ops.glorot_uniform_initializer()\n        initializing_from_value = False\n    elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool or (dtype == dtypes.string):\n        initializer = init_ops.zeros_initializer()\n        initializing_from_value = False\n    else:\n        raise ValueError('An initializer for variable %s of %s is required' % (name, dtype.base_dtype))\n    return (initializer, initializing_from_value)", "docstring": "Provide a default initializer and a corresponding value.\n\nArgs:\nname: see get_variable.\nshape: see get_variable.\ndtype: see get_variable.\n\nReturns:\ninitializer and initializing_from_value. See get_variable above.\n\nRaises:\nValueError: When giving unsupported dtype.", "source": "github-repos"}
{"code": "def Create(conf):\n    global _source_implementations\n    if not _source_implementations:\n        raise RuntimeError('no source implementations exist')\n    source_name = conf['name']\n    if source_name not in list(_source_implementations.keys()):\n        raise RuntimeError('source not implemented: %r' % (source_name,))\n    return _source_implementations[source_name](conf)", "docstring": "Source creation factory method.\n\nArgs:\nconf: a dictionary of configuration key/value pairs, including one\nrequired attribute 'name'.\n\nReturns:\nA Source instance.\n\nRaises:\nRuntimeError: no sources are registered with RegisterImplementation", "source": "github-repos"}
{"code": "def swap_tensor_content_in_graph_function(graph_def, from_endiness, to_endiness):\n    if isinstance(graph_def, meta_graph_pb2.MetaGraphDef):\n        functions = graph_def.graph_def.library.function\n    elif isinstance(graph_def, graph_pb2.GraphDef):\n        functions = graph_def.library.function\n    else:\n        return\n    for function in functions:\n        node_def = function.node_def\n        for node in node_def:\n            if node.op == 'Const':\n                tensor = node.attr['value'].tensor\n                byte_swap_tensor_content(tensor, from_endiness, to_endiness)", "docstring": "Fix endiness of tensor contents.\n\nArgs:\ngraph_def: Target graph_def to change endiness.\nfrom_endiness: The original endianness format. \"big\" or \"little\"\nto_endiness: The target endianness format. \"big\" or \"little\"", "source": "github-repos"}
{"code": "def get_help_data(filepath):\n    \n\n    try:\n        with open(filepath, 'r') as file:\n            return _json.load(file, object_pairs_hook=OrderedDict)\n    except Exception as e:\n        logger.error(\"Could not load file {}\".format(filepath))\n        logger.exception(e)\n        return {}", "docstring": "Get the json data from a help file\n\nArgs:\nfilepath (str): The file path for the help file\n\nReturns:\ndata: The json data from a help file", "source": "juraj-google-style"}
{"code": "def HasTable(self, table_name):\n    \n    if not self._connection:\n      raise IOError('Not opened.')\n\n    if not table_name:\n      return False\n\n    if self._table_names is None:\n      self._table_names = []\n\n      self._cursor.execute(self._HAS_TABLE_QUERY)\n      for row in self._cursor.fetchall():\n        if not row[0]:\n          continue\n\n        row_table_name = row[0]\n        if isinstance(row_table_name, bytes):\n          row_table_name = row_table_name.decode('utf-8')\n\n        self._table_names.append(row_table_name.lower())\n\n    table_name = table_name.lower()\n    return table_name in self._table_names", "docstring": "Determines if a specific table exists.\n\nArgs:\ntable_name (str): name of the table.\n\nReturns:\nbool: True if the column exists.\n\nRaises:\nIOError: if the database file is not opened.\nOSError: if the database file is not opened.", "source": "juraj-google-style"}
{"code": "def add_severity(self, name, value):\n    logger.debug('Adding severity {0} with value {1} to variant {2}'.format(name, value, self['variant_id']))\n    self['severities'].append({name: value})", "docstring": "Add a severity to the variant\n\nArgs:\nname (str): The name of the severity\nvalue : The value of the severity", "source": "codesearchnet"}
{"code": "def checksum(self, path):\n    if not self.exists(path):\n        raise BeamIOError('Path does not exist: %s' % path)\n    return str(os.path.getsize(path))", "docstring": "Fetch checksum metadata of a file on the\n:class:`~apache_beam.io.filesystem.FileSystem`.\n\nArgs:\npath: string path of a file.\n\nReturns: string containing file size.\n\nRaises:\n``BeamIOError``: if path isn't a file or doesn't exist.", "source": "github-repos"}
{"code": "def hex_is_dark(hexx, percent=50):\n    (r, g, b) = hex_to_rgb(hexx)\n    luma = ((((0.2126 * r) + (0.7152 * g)) + (0.0722 * b)) / 2.55)\n    return (luma < percent)", "docstring": "Function to decide if a hex colour is dark.\n\nArgs:\nhexx (str): A hexadecimal colour, starting with '#'.\n\nReturns:\nbool: The colour's brightness is less than the given percent.", "source": "codesearchnet"}
{"code": "def _FlushCache(cls, format_categories):\n    if (definitions.FORMAT_CATEGORY_ARCHIVE in format_categories):\n        cls._archive_remainder_list = None\n        cls._archive_scanner = None\n        cls._archive_store = None\n    if (definitions.FORMAT_CATEGORY_COMPRESSED_STREAM in format_categories):\n        cls._compressed_stream_remainder_list = None\n        cls._compressed_stream_scanner = None\n        cls._compressed_stream_store = None\n    if (definitions.FORMAT_CATEGORY_FILE_SYSTEM in format_categories):\n        cls._file_system_remainder_list = None\n        cls._file_system_scanner = None\n        cls._file_system_store = None\n    if (definitions.FORMAT_CATEGORY_STORAGE_MEDIA_IMAGE in format_categories):\n        cls._storage_media_image_remainder_list = None\n        cls._storage_media_image_scanner = None\n        cls._storage_media_image_store = None\n    if (definitions.FORMAT_CATEGORY_VOLUME_SYSTEM in format_categories):\n        cls._volume_system_remainder_list = None\n        cls._volume_system_scanner = None\n        cls._volume_system_store = None", "docstring": "Flushes the cached objects for the specified format categories.\n\nArgs:\nformat_categories (set[str]): format categories.", "source": "codesearchnet"}
{"code": "def __init__(self, default_value, initializer):\n    super(InitializableLookupTableBase, self).__init__(initializer.key_dtype, initializer.value_dtype)\n    self._default_value = ops.convert_to_tensor(default_value, dtype=self._value_dtype)\n    self._default_value.get_shape().merge_with(tensor_shape.TensorShape([]))\n    if isinstance(initializer, trackable_base.Trackable):\n        self._initializer = self._track_trackable(initializer, '_initializer')\n    with ops.init_scope():\n        self._resource_handle = self._create_resource()\n    if not context.executing_eagerly() and ops.get_default_graph()._get_control_flow_context() is not None:\n        with ops.init_scope():\n            self._init_op = self._initialize()\n    else:\n        self._init_op = self._initialize()", "docstring": "Construct a table object from a table reference.\n\nIf requires a table initializer object (subclass of `TableInitializerBase`).\nIt provides the table key and value types, as well as the op to initialize\nthe table. The caller is responsible to execute the initialization op.\n\nArgs:\ndefault_value: The value to use if a key is missing in the table.\ninitializer: The table initializer to use.", "source": "github-repos"}
{"code": "def ensure_scheme(url, default_scheme='http'):\n    parsed = urlsplit(url, scheme=default_scheme)\n    if (not parsed.netloc):\n        parsed = SplitResult(scheme=parsed.scheme, netloc=parsed.path, path='', query=parsed.query, fragment=parsed.fragment)\n    return urlunsplit(parsed)", "docstring": "Adds a scheme to a url if not present.\n\nArgs:\nurl (string): a url, assumed to start with netloc\ndefault_scheme (string): a scheme to be added\n\nReturns:\nstring: URL with a scheme", "source": "codesearchnet"}
{"code": "def status(self, **kwargs):\n    path = ('/geo_nodes/%s/status' % self.get_id())\n    return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "Get the status of the geo node.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the server failed to perform the request\n\nReturns:\ndict: The status of the geo node", "source": "codesearchnet"}
{"code": "def NewFromContent(cls, content, urn, chunk_size=1024, token=None, private_key=None, public_key=None):\n    aff4.FACTORY.Delete(urn, token=token)\n    with data_store.DB.GetMutationPool() as pool:\n        with aff4.FACTORY.Create(urn, cls, mode='w', mutation_pool=pool, token=token) as fd:\n            for start_of_chunk in range(0, len(content), chunk_size):\n                chunk = content[start_of_chunk:(start_of_chunk + chunk_size)]\n                blob_rdf = rdf_crypto.SignedBlob()\n                blob_rdf.Sign(chunk, private_key, public_key)\n                fd.Add(blob_rdf, mutation_pool=pool)\n    return urn", "docstring": "Alternate constructor for GRRSignedBlob.\n\nCreates a GRRSignedBlob from a content string by chunking it and signing\neach chunk.\n\nArgs:\ncontent: The data to stored in the GRRSignedBlob.\nurn: The AFF4 URN to create.\n\nchunk_size: Data will be chunked into this size (each chunk is\nindividually signed.\ntoken: The ACL Token.\nprivate_key: An rdf_crypto.RSAPrivateKey() instance.\npublic_key: An rdf_crypto.RSAPublicKey() instance.\n\nReturns:\nthe URN of the new object written.", "source": "codesearchnet"}
{"code": "def _find_and_replace(text, start_string, end_string, replace_fn):\n  \n  ret = u\"\"\n  current_pos = 0\n  while True:\n    start_pos = text.find(start_string, current_pos)\n    if start_pos == -1:\n      ret += text[current_pos:]\n      break\n    ret += text[current_pos:start_pos]\n    end_pos = text.find(end_string, start_pos + len(start_string))\n    if end_pos == -1:\n      break\n    ret += replace_fn(text[start_pos + len(start_string):end_pos])\n    current_pos = end_pos + len(end_string)\n  return ret", "docstring": "Remove everything found between instances of start_string and end_string.\n\nReplace each such instance with replace_fn(removed_text)\n\ne.g. _find_and_replace(u\"the [[fat]] cat [[sat]]\", u\"[[\", u\"]]\", lambda x: x)\n= u\"the fat cat sat\"\n\nArgs:\ntext: a unicode string\nstart_string: a unicode string\nend_string: a unicode string\nreplace_fn: a unary function from unicode string to unicode string\n\nReturns:\na string", "source": "juraj-google-style"}
{"code": "def run_step(self, representer):\n        \n        assert representer, (\"ObjectRepresenter instance required to run \"\n                             \"ObjectRewriterStep.\")\n        rewriter = ObjectRewriter(self.context.get_formatted_iterable,\n                                  representer)\n        super().run_step(rewriter)", "docstring": "Do the object in-out rewrite.\n\nArgs:\nrepresenter: A pypyr.filesystem.ObjectRepresenter instance.", "source": "juraj-google-style"}
{"code": "def Next(self):\n    stacktop = self.stack[(- 1)]\n    if (stacktop.index == (- 1)):\n        stacktop = _Frame(None, index=0)\n        self.stack.append(stacktop)\n    context_array = self.stack[(- 2)].context\n    if (stacktop.index == len(context_array)):\n        self.stack.pop()\n        raise StopIteration\n    stacktop.context = context_array[stacktop.index]\n    stacktop.index += 1\n    return True", "docstring": "Advance to the next item in a repeated section.\n\nRaises:\nStopIteration if there are no more elements", "source": "codesearchnet"}
{"code": "class _ConvBlock(tf.keras.Model):\n\n    def __init__(self, kernel_size, filters, stage, block, data_format, strides=(2, 2)):\n        super(_ConvBlock, self).__init__(name='')\n        filters1, filters2, filters3 = filters\n        conv_name_base = 'res' + str(stage) + block + '_branch'\n        bn_name_base = 'bn' + str(stage) + block + '_branch'\n        bn_axis = 1 if data_format == 'channels_first' else 3\n        self.conv2a = layers.Conv2D(filters1, (1, 1), strides=strides, name=conv_name_base + '2a', data_format=data_format)\n        self.bn2a = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')\n        self.conv2b = layers.Conv2D(filters2, kernel_size, padding='same', name=conv_name_base + '2b', data_format=data_format)\n        self.bn2b = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')\n        self.conv2c = layers.Conv2D(filters3, (1, 1), name=conv_name_base + '2c', data_format=data_format)\n        self.bn2c = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')\n        self.conv_shortcut = layers.Conv2D(filters3, (1, 1), strides=strides, name=conv_name_base + '1', data_format=data_format)\n        self.bn_shortcut = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '1')\n\n    def call(self, input_tensor, training=False):\n        x = self.conv2a(input_tensor)\n        x = self.bn2a(x, training=training)\n        x = tf.nn.relu(x)\n        x = self.conv2b(x)\n        x = self.bn2b(x, training=training)\n        x = tf.nn.relu(x)\n        x = self.conv2c(x)\n        x = self.bn2c(x, training=training)\n        shortcut = self.conv_shortcut(input_tensor)\n        shortcut = self.bn_shortcut(shortcut, training=training)\n        x += shortcut\n        return tf.nn.relu(x)", "docstring": "_ConvBlock is the block that has a conv layer at shortcut.\n\nArgs:\nkernel_size: the kernel size of middle conv layer at main path\nfilters: list of integers, the filters of 3 conv layer at main path\nstage: integer, current stage label, used for generating layer names\nblock: 'a','b'..., current block label, used for generating layer names\ndata_format: data_format for the input ('channels_first' or\n'channels_last').\nstrides: strides for the convolution. Note that from stage 3, the first\nconv layer at main path is with strides=(2,2), and the shortcut should\nhave strides=(2,2) as well.", "source": "github-repos"}
{"code": "def cumulative_distribution(self, X):\n    self.check_fit()\n\n    def func(*args):\n        return self.probability_density(list(args))\n    lower_bound = self.get_lower_bound()\n    ranges = [[lower_bound, val] for val in X]\n    return integrate.nquad(func, ranges)[0]", "docstring": "Computes the cumulative distribution function for the copula\n\nArgs:\nX: `numpy.ndarray` or `pandas.DataFrame`\n\nReturns:\nnp.array: cumulative probability", "source": "codesearchnet"}
{"code": "def add_activation_summary(x, types=None, name=None, collections=None):\n    \n    ndim = x.get_shape().ndims\n    if ndim < 2:\n        logger.warn(\"Cannot summarize scalar activation {}\".format(x.name))\n        return\n    if types is None:\n        types = ['sparsity', 'rms', 'histogram']\n    with cached_name_scope('activation-summary'):\n        add_tensor_summary(x, types, name=name, collections=collections)", "docstring": "Call :func:`add_tensor_summary` under a reused 'activation-summary' name scope.\nThis function is a no-op if not calling from main training tower.\n\nArgs:\nx (tf.Tensor): the tensor to summary.\ntypes (list[str]): summary types, defaults to ``['sparsity', 'rms', 'histogram']``.\nname (str): if is None, use x.name.\ncollections (list[str]): collections of the summary ops.", "source": "juraj-google-style"}
{"code": "def get_size_with_aspect_ratio(image_size: Tuple[int, int], size: int, max_size: Optional[int]=None, mod_size: int=16) -> Tuple[int, int]:\n    height, width = image_size\n    raw_size = None\n    if max_size is not None:\n        min_original_size = float(min((height, width)))\n        max_original_size = float(max((height, width)))\n        if max_original_size / min_original_size * size > max_size:\n            raw_size = max_size * min_original_size / max_original_size\n            size = int(round(raw_size))\n    if width < height:\n        ow = size\n        if max_size is not None and raw_size is not None:\n            oh = int(raw_size * height / width)\n        else:\n            oh = int(size * height / width)\n    elif height <= width and height == size or (width <= height and width == size):\n        oh, ow = (height, width)\n    else:\n        oh = size\n        if max_size is not None and raw_size is not None:\n            ow = int(raw_size * width / height)\n        else:\n            ow = int(size * width / height)\n    if mod_size is not None:\n        ow_mod = torch.remainder(torch.tensor(ow), mod_size).item()\n        oh_mod = torch.remainder(torch.tensor(oh), mod_size).item()\n        ow = ow - ow_mod\n        oh = oh - oh_mod\n    return (oh, ow)", "docstring": "Computes the output image size given the input image size and the desired output size with multiple of divisible_size.\n\nArgs:\nimage_size (`Tuple[int, int]`):\nThe input image size.\nsize (`int`):\nThe desired output size.\nmax_size (`int`, *optional*):\nThe maximum allowed output size.\nmod_size (`int`, *optional*):\nThe size to make multiple of mod_size.", "source": "github-repos"}
{"code": "def install_package(self, name, index=None, force=False, update=False):\n    cmd = 'install'\n    if force:\n        cmd = '{0} {1}'.format(cmd, '--force-reinstall')\n    if update:\n        cmd = '{0} {1}'.format(cmd, '--update')\n    if index:\n        cmd = '{0} {1}'.format(cmd, '--index-url {0}'.format(index))\n    self.pip('{0} {1}'.format(cmd, name))", "docstring": "Install a given package.\n\nArgs:\nname (str): The package name to install. This can be any valid\npip package specification.\nindex (str): The URL for a pypi index to use.\nforce (bool): For the reinstall of packages during updates.\nupdate (bool): Update the package if it is out of date.", "source": "codesearchnet"}
{"code": "def resource_path(package: Union[str, types.ModuleType]) -> abstract_path.Path:\n    try:\n        path = importlib_resources.files(package)\n    except AttributeError:\n        is_adhoc = True\n    else:\n        if isinstance(path, importlib_resources._adapters.CompatibilityFiles.SpecPath):\n            is_adhoc = True\n        else:\n            is_adhoc = False\n    if is_adhoc:\n        if isinstance(package, types.ModuleType):\n            package = getattr(package.__spec__, 'name', package.__name__)\n        path = pathlib.Path(sys.modules[package].__file__)\n        if path.name == '__init__.py':\n            path = path.parent\n    if isinstance(path, pathlib.Path):\n        return abstract_path.Path(path)\n    elif isinstance(path, zipfile.Path):\n        path = ResourcePath(path.root, path.at)\n        return typing.cast(abstract_path.Path, path)\n    elif isinstance(path, importlib_resources.abc.Traversable):\n        return typing.cast(abstract_path.Path, path)\n    else:\n        raise TypeError(f'Unknown resource path: {type(path)}: {path}')", "docstring": "Returns read-only root directory path of the module.\n\nUsed to access module resource files.\n\nUsage:\n\n```python\npath = epath.resource_path('tensorflow_datasets') / 'README.md'\ncontent = path.read_text()\n```\n\nThis is compatible with everything, including zipapp (`.par`).\n\nResource files should be in the `data=` of the `py_library(` (when using\nbazel).\n\nTo write to your project (e.g. automatically update your code), read-only\nresource paths can be converted to read-write paths with\n`epath.to_write_path(path)`.\n\nArgs:\npackage: Module or module name.\n\nReturns:\nThe read-only path to the root module directory", "source": "github-repos"}
{"code": "def valid_as_v2_0(voevent):\n    _return_to_standard_xml(voevent)\n    valid_bool = voevent_v2_0_schema.validate(voevent)\n    _remove_root_tag_prefix(voevent)\n    return valid_bool", "docstring": "Tests if a voevent conforms to the schema.\n\nArgs:\nvoevent(:class:`Voevent`): Root node of a VOEvent etree.\nReturns:\nbool: Whether VOEvent is valid", "source": "codesearchnet"}
{"code": "def options(self):\n\n\t\t\n\n\t\t\n\t\tresponse = self.repo.api.http_request('OPTIONS', self.uri)\n\t\treturn response.headers", "docstring": "Small method to return headers of an OPTIONS request to self.uri\n\nArgs:\nNone\n\nReturn:\n(dict) response headers from OPTIONS request", "source": "juraj-google-style"}
{"code": "def set_default_by_alias(self, alias):\n        \n        if alias not in self._aliases:\n            raise DataInvalidAlias('A dataset with alias {} does not exist'.format(alias))\n\n        self._default_index = self._aliases[alias]", "docstring": "Set the default dataset by its alias.\n\nAfter changing the default dataset, all calls without explicitly specifying the\ndataset by index or alias will be redirected to this dataset.\n\nArgs:\nalias (str): The alias of the dataset that should be made the default.\n\nRaises:\nDataInvalidAlias: If the alias does not represent a valid dataset.", "source": "juraj-google-style"}
{"code": "def cast_to_type(obj, out_type):\n    \n    in_type = type(obj)\n    if out_type is in_type:\n        \n        return obj\n    else:\n        return out_type(obj)", "docstring": "Cast obj to out_type if it's not out_type already.\n\nIf the obj happens to be out_type already, it just returns obj as is.\n\nArgs:\nobj: input object\nout_type: type.\n\nReturns:\nobj cast to out_type. Usual python conversion / casting rules apply.", "source": "juraj-google-style"}
{"code": "def from_api_repr(cls, api_repr):\n        \n        api_repr = api_repr.strip()\n        if not api_repr:\n            raise ValueError(\"Field path API representation cannot be empty.\")\n        return cls(*parse_field_path(api_repr))", "docstring": "Factory: create a FieldPath from the string formatted per the API.\n\nArgs:\napi_repr (str): a string path, with non-identifier elements quoted\nIt cannot exceed 1500 characters, and cannot be empty.\nReturns:\n(:class:`FieldPath`) An instance parsed from ``api_repr``.\nRaises:\nValueError if the parsing fails", "source": "juraj-google-style"}
{"code": "def init(config, workdir=None, logfile=None, loglevel=logging.INFO, **kwargs):\n    \n\n    setup_sdk_logging(logfile, loglevel)\n    defaults = lago_config.get_section('init')\n    if workdir is None:\n        workdir = os.path.abspath('.lago')\n    defaults['workdir'] = workdir\n    defaults['virt_config'] = config\n    defaults.update(kwargs)\n    workdir, prefix = cmd.do_init(**defaults)\n    return SDK(workdir, prefix)", "docstring": "Initialize the Lago environment\n\nArgs:\nconfig(str): Path to LagoInitFile\nworkdir(str): Path to initalize the workdir, defaults to \"$PWD/.lago\"\n**kwargs(dict): Pass arguments to :func:`~lago.cmd.do_init`\nlogfile(str): A path to setup a log file.\nloglevel(int): :mod:`logging` log level.\n\nReturns:\n:class:`~lago.sdk.SDK`: Initialized Lago enviornment\n\nRaises:\n:exc:`~lago.utils.LagoException`: If initialization failed", "source": "juraj-google-style"}
{"code": "def read_structs(fstream):\n    struct = read_struct(fstream)\n    while (struct is not None):\n        (yield struct)\n        struct = read_struct(fstream)", "docstring": "Read all structs from likwid's file stream.\n\nArgs:\nfstream: Likwid's output file stream.\n\nReturns:\nA generator that can be used to iterate over all structs in the\nfstream.", "source": "codesearchnet"}
{"code": "def unsafe_peek(init):\n\n    def peek(store, container, _stack=None):\n        return init(*[store.peek(attr, container, _stack=_stack) for attr in container])\n    return peek", "docstring": "Deserialize all the attributes available in the container and pass them in the same order\nas they come in the container.\n\nThis is a factory function; returns the actual `peek` routine.\n\nArguments:\n\ninit: type constructor.\n\nReturns:\n\ncallable: deserializer (`peek` routine).", "source": "codesearchnet"}
{"code": "def onTagAdd(self, name, func):\n    if ('*' in name):\n        self.ontagaddglobs.add(name, func)\n    else:\n        self.ontagadds[name].append(func)", "docstring": "Register a callback for tag addition.\n\nArgs:\nname (str): The name of the tag or tag glob.\nfunc (function): The callback func(node, tagname, tagval).", "source": "codesearchnet"}
{"code": "def save_images(images, filenames, output_dir):\n  \n  for i, filename in enumerate(filenames):\n    \n    \n    with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:\n      img = (((images[i, :, :, :] + 1.0) * 0.5) * 255.0).astype(np.uint8)\n      Image.fromarray(img).save(f, format='PNG')", "docstring": "Saves images to the output directory.\n\nArgs:\nimages: array with minibatch of images\nfilenames: list of filenames without path\nIf number of file names in this list less than number of images in\nthe minibatch then only first len(filenames) images will be saved.\noutput_dir: directory where to save images", "source": "juraj-google-style"}
{"code": "def __init__(self, shape, dtype=dtypes.float32, name=None):\n    \n    self._shape = tensor_shape.TensorShape(shape)\n    try:\n      self._shape_tuple = tuple(self.shape.as_list())\n    except ValueError:\n      self._shape_tuple = None\n    self._dtype = dtypes.as_dtype(dtype)\n    self._name = name", "docstring": "Creates a TensorSpec.\n\nArgs:\nshape: Value convertible to `tf.TensorShape`. The shape of the tensor.\ndtype: Value convertible to `tf.DType`. The type of the tensor values.\nname: Optional name for the Tensor.\n\nRaises:\nTypeError: If shape is not convertible to a `tf.TensorShape`, or dtype is\nnot convertible to a `tf.DType`.", "source": "juraj-google-style"}
{"code": "def get_edgestore_handle(\n    client: arango.client.ArangoClient,\n    username=None,\n    password=None,\n    edgestore_db_name: str = edgestore_db_name,\n    edgestore_edges_name: str = edgestore_edges_name,\n    edgestore_nodes_name: str = edgestore_nodes_name,\n    edgestore_pipeline_name: str = edgestore_pipeline_name,\n    edgestore_pipeline_stats_name: str = edgestore_pipeline_stats_name,\n    edgestore_pipeline_errors_name: str = edgestore_pipeline_errors_name,\n) -> arango.database.StandardDatabase:\n    \n\n    (username, password) = get_user_creds(username, password)\n\n    sys_db = client.db(\"_system\", username=username, password=password)\n\n    \n    try:\n        if username and password:\n            edgestore_db = sys_db.create_database(\n                name=edgestore_db_name,\n                users=[{\"username\": username, \"password\": password, \"active\": True}],\n            )\n        else:\n            edgestore_db = sys_db.create_database(name=edgestore_db_name)\n    except arango.exceptions.DatabaseCreateError:\n        if username and password:\n            edgestore_db = client.db(\n                edgestore_db_name, username=username, password=password\n            )\n        else:\n            edgestore_db = client.db(edgestore_db_name)\n\n    \n    \n    \n    try:\n        nodes = edgestore_db.create_collection(\n            edgestore_nodes_name, index_bucket_count=64\n        )\n        nodes.add_hash_index(fields=[\"name\"], unique=False)\n        nodes.add_hash_index(\n            fields=[\"components\"], unique=False\n        )  \n    except Exception:\n        pass\n\n    \n    try:\n        edges = edgestore_db.create_collection(\n            edgestore_edges_name, edge=True, index_bucket_count=64\n        )\n        edges.add_hash_index(fields=[\"relation\"], unique=False)\n        edges.add_hash_index(fields=[\"edge_types\"], unique=False)\n        edges.add_hash_index(fields=[\"nanopub_id\"], unique=False)\n        edges.add_hash_index(fields=[\"metadata.project\"], unique=False)\n        edges.add_hash_index(fields=[\"annotations[*].id\"], unique=False)\n    except Exception:\n        pass\n\n    \n    try:\n        edgestore_db.create_collection(edgestore_pipeline_name)\n    except Exception:\n        pass\n\n    try:\n        edgestore_db.create_collection(edgestore_pipeline_errors_name)\n    except Exception:\n        pass\n\n    try:\n        edgestore_db.create_collection(edgestore_pipeline_stats_name)\n    except arango.exceptions.CollectionCreateError as e:\n        pass\n\n    return edgestore_db", "docstring": "Get Edgestore arangodb database handle\n\nArgs:\nclient (arango.client.ArangoClient): Description\nusername (None, optional): Description\npassword (None, optional): Description\nedgestore_db_name (str, optional): Description\nedgestore_edges_name (str, optional): Description\nedgestore_nodes_name (str, optional): Description\n\nReturns:\narango.database.StandardDatabase: Description", "source": "juraj-google-style"}
{"code": "def _check_response(response, expected):\n        \n\n        response_code = response.status_code\n        if expected == response_code:\n            return\n\n        if response_code < 400:\n            raise ex.UnexpectedResponseCodeException(response.text)\n\n        elif response_code == 401:\n            raise ex.UnauthorizedException(response.text)\n\n        elif response_code == 400:\n            raise ex.BadRequestException(response.text)\n\n        elif response_code == 403:\n            raise ex.ForbiddenException(response.text)\n\n        elif response_code == 404:\n            raise ex.NotFoundException(response.text)\n\n        elif response_code == 429:\n            raise ex.RateLimitedException(response.text)\n\n        else:\n            raise ex.InternalServerErrorException(response.text)", "docstring": "Checks if the expected response code matches the actual response code.\nIf they're not equal, raises the appropriate exception\nArgs:\nresponse: (int) Actual status code\nexpected: (int) Expected status code", "source": "juraj-google-style"}
{"code": "def store_container(self, container):\n    with self._store_lock:\n        self.store.setdefault(container.CONTAINER_TYPE, []).append(container)", "docstring": "Thread-safe method to store data in the state's store.\n\nArgs:\ncontainer (containers.interface.AttributeContainer): The data to store.", "source": "codesearchnet"}
{"code": "def rtt_get_num_up_buffers(self):\n        \n        cmd = enums.JLinkRTTCommand.GETNUMBUF\n        dir = ctypes.c_int(enums.JLinkRTTDirection.UP)\n        return self.rtt_control(cmd, dir)", "docstring": "After starting RTT, get the current number of up buffers.\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nThe number of configured up buffers on the target.\n\nRaises:\nJLinkRTTException if the underlying JLINK_RTTERMINAL_Control call fails.", "source": "juraj-google-style"}
{"code": "def get_text(revision, strip=True):\n    start_pos = revision.find('<text')\n    assert (start_pos != (- 1))\n    end_tag_pos = revision.find('>', start_pos)\n    assert (end_tag_pos != (- 1))\n    end_tag_pos += len('>')\n    end_pos = revision.find('</text>')\n    if (end_pos == (- 1)):\n        ret = ''\n    else:\n        ret = revision[end_tag_pos:end_pos]\n    if strip:\n        ret = strip_text(ret)\n    ret = text_encoder.to_unicode_utf8(ret)\n    return ret", "docstring": "Extract the text from a revision.\n\nArgs:\nrevision: a string\nstrip: a boolean\n\nReturns:\na string", "source": "codesearchnet"}
{"code": "def identity(x, name=None):\n    return array_ops.identity(x, name=name)", "docstring": "Returns a tensor with the same content as the input tensor.\n\nArgs:\nx: The input tensor.\nname: String, name for the variable to create.\n\nReturns:\nA tensor of the same shape, type and content.", "source": "github-repos"}
{"code": "def list_dir(root, prefix=False):\n    \n    root = os.path.expanduser(root)\n    directories = list(\n        filter(\n            lambda p: os.path.isdir(os.path.join(root, p)),\n            os.listdir(root)\n        )\n    )\n\n    if prefix is True:\n        directories = [os.path.join(root, d) for d in directories]\n\n    return directories", "docstring": "List all directories at a given root\n\nArgs:\nroot (str): Path to directory whose folders need to be listed\nprefix (bool, optional): If true, prepends the path to each result, otherwise\nonly returns the name of the directories found", "source": "juraj-google-style"}
{"code": "def memory_write(self, addr, data, zone=None, nbits=None):\n    buf_size = len(data)\n    buf = None\n    access = 0\n    if (nbits is None):\n        packed_data = map((lambda d: reversed(binpacker.pack(d))), data)\n        packed_data = list(itertools.chain(*packed_data))\n        buf_size = len(packed_data)\n        buf = (ctypes.c_uint8 * buf_size)(*packed_data)\n        access = 0\n    elif (nbits == 8):\n        buf = (ctypes.c_uint8 * buf_size)(*data)\n        access = 1\n    elif (nbits == 16):\n        buf = (ctypes.c_uint16 * buf_size)(*data)\n        access = 2\n        buf_size = (buf_size * access)\n    elif (nbits == 32):\n        buf = (ctypes.c_uint32 * buf_size)(*data)\n        access = 4\n        buf_size = (buf_size * access)\n    else:\n        raise ValueError(('Given bit size is invalid: %s' % nbits))\n    args = [addr, buf_size, buf, access]\n    method = self._dll.JLINKARM_WriteMemEx\n    if (zone is not None):\n        method = self._dll.JLINKARM_WriteMemZonedEx\n        args.append(zone.encode())\n    units_written = method(*args)\n    if (units_written < 0):\n        raise errors.JLinkWriteException(units_written)\n    return units_written", "docstring": "Writes memory to a target system or specific memory zone.\n\nThe optional ``zone`` specifies a memory zone to access to write to,\ne.g. ``IDATA``, ``DDATA``, or ``CODE``.\n\nThe given number of bits, if provided, must be either ``8``, ``16``, or\n``32``.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): start address to write to\ndata (list): list of data units to write\nzone (str): optional memory zone name to access\nnbits (int): number of bits to use for each unit\n\nReturns:\nNumber of units written.\n\nRaises:\nJLinkException: on write hardware failure.\nValueError: if ``nbits`` is not ``None``, and not in ``8``, ``16`` or\n``32``.", "source": "codesearchnet"}
{"code": "def from_index_amount(cls, idx, amount):\n    if (np.array(idx).ndim == 0):\n        v = np.zeros(6)\n        v[idx] = amount\n        return cls.from_voigt(v)\n    elif (np.array(idx).ndim == 1):\n        v = np.zeros((3, 3))\n        for i in itertools.permutations(idx):\n            v[i] = amount\n        return cls(v)\n    else:\n        raise ValueError('Index must either be 2-tuple or integer corresponding to full-tensor or voigt index')", "docstring": "Like Deformation.from_index_amount, except generates\na strain from the zero 3x3 tensor or voigt vector with\nthe amount specified in the index location.  Ensures\nsymmetric strain.\n\nArgs:\nidx (tuple or integer): index to be perturbed, can be voigt or\nfull-tensor notation\namount (float): amount to perturb selected index", "source": "codesearchnet"}
{"code": "def noise_new(\n    dim: int,\n    h: float = NOISE_DEFAULT_HURST,\n    l: float = NOISE_DEFAULT_LACUNARITY,  \n    random: Optional[tcod.random.Random] = None,\n) -> tcod.noise.Noise:\n    \n    return tcod.noise.Noise(dim, hurst=h, lacunarity=l, seed=random)", "docstring": "Return a new Noise instance.\n\nArgs:\ndim (int): Number of dimensions.  From 1 to 4.\nh (float): The hurst exponent.  Should be in the 0.0-1.0 range.\nl (float): The noise lacunarity.\nrandom (Optional[Random]): A Random instance, or None.\n\nReturns:\nNoise: The new Noise instance.", "source": "juraj-google-style"}
{"code": "def plot_cv(self, tmin, tmax, ntemp, ylim=None, **kwargs):\n    temperatures = np.linspace(tmin, tmax, ntemp)\n    if self.structure:\n        ylabel = '$C_v$ (J/K/mol)'\n    else:\n        ylabel = '$C_v$ (J/K/mol-c)'\n    fig = self._plot_thermo(self.dos.cv, temperatures, ylabel=ylabel, ylim=ylim, **kwargs)\n    return fig", "docstring": "Plots the constant volume specific heat C_v in a temperature range.\n\nArgs:\ntmin: minimum temperature\ntmax: maximum temperature\nntemp: number of steps\nylim: tuple specifying the y-axis limits.\nkwargs: kwargs passed to the matplotlib function 'plot'.\nReturns:\nmatplotlib figure", "source": "codesearchnet"}
{"code": "def _FormatServiceText(self, service):\n    string_segments = [service.name, '\\tImage Path    = {0:s}'.format(service.image_path), '\\tService Type  = {0:s}'.format(service.HumanReadableType()), '\\tStart Type    = {0:s}'.format(service.HumanReadableStartType()), '\\tService Dll   = {0:s}'.format(service.service_dll), '\\tObject Name   = {0:s}'.format(service.object_name), '\\tSources:']\n    for source in service.sources:\n        string_segments.append('\\t\\t{0:s}:{1:s}'.format(source[0], source[1]))\n    return '\\n'.join(string_segments)", "docstring": "Produces a human readable multi-line string representing the service.\n\nArgs:\nservice (WindowsService):  service to format.\n\nReturns:\nstr: human readable representation of a Windows Service.", "source": "codesearchnet"}
{"code": "def json_to_pybel(data, infer_bonds=False):\n    obmol = ob.OBMol()\n    obmol.BeginModify()\n    for atom in data['atoms']:\n        obatom = obmol.NewAtom()\n        obatom.SetAtomicNum(table.GetAtomicNum(str(atom['element'])))\n        obatom.SetVector(*atom['location'])\n        if ('label' in atom):\n            pd = ob.OBPairData()\n            pd.SetAttribute('_atom_site_label')\n            pd.SetValue(atom['label'])\n            obatom.CloneData(pd)\n    if (('bonds' not in data) or (not data['bonds'])):\n        if infer_bonds:\n            obmol.ConnectTheDots()\n            obmol.PerceiveBondOrders()\n    else:\n        for bond in data['bonds']:\n            if ('atoms' not in bond):\n                continue\n            obmol.AddBond((bond['atoms'][0] + 1), (bond['atoms'][1] + 1), bond['order'])\n    if ('unitcell' in data):\n        uc = ob.OBUnitCell()\n        uc.SetData(*(ob.vector3(*v) for v in data['unitcell']))\n        uc.SetSpaceGroup('P1')\n        obmol.CloneData(uc)\n    obmol.EndModify()\n    mol = pybel.Molecule(obmol)\n    if ('charge' in data['atoms'][0]):\n        mol.OBMol.SetPartialChargesPerceived()\n        for (atom, pyatom) in zip(data['atoms'], mol.atoms):\n            pyatom.OBAtom.SetPartialCharge(atom['charge'])\n    return mol", "docstring": "Converts python data structure to pybel.Molecule.\n\nThis will infer bond data if not specified.\n\nArgs:\ndata: The loaded json data of a molecule, as a Python object\ninfer_bonds (Optional): If no bonds specified in input, infer them\nReturns:\nAn instance of `pybel.Molecule`", "source": "codesearchnet"}
{"code": "async def start(self, name='websocket_client'):\n    self._con = (await websockets.connect(self.url))\n    self._connection_task = self._loop.add_task(self._manage_connection(), name=name)", "docstring": "Connect to the websocket server.\n\nThis method will spawn a background task in the designated event loop\nthat will run until stop() is called.  You can control the name of the\nbackground task for debugging purposes using the name parameter.  The\nname is not used in anyway except for debug logging statements.\n\nArgs:\nname (str): Optional name for the background task.", "source": "codesearchnet"}
{"code": "def add_time_dimension(padded_inputs, seq_lens):\n    \n\n    \n    \n    \n    padded_batch_size = tf.shape(padded_inputs)[0]\n    max_seq_len = padded_batch_size \n\n    \n    new_batch_size = padded_batch_size \n    new_shape = ([new_batch_size, max_seq_len] +\n                 padded_inputs.get_shape().as_list()[1:])\n    return tf.reshape(padded_inputs, new_shape)", "docstring": "Adds a time dimension to padded inputs.\n\nArguments:\npadded_inputs (Tensor): a padded batch of sequences. That is,\nfor seq_lens=[1, 2, 2], then inputs=[A, *, B, B, C, C], where\nA, B, C are sequence elements and * denotes padding.\nseq_lens (Tensor): the sequence lengths within the input batch,\nsuitable for passing to tf.nn.dynamic_rnn().\n\nReturns:\nReshaped tensor of shape [NUM_SEQUENCES, MAX_SEQ_LEN, ...].", "source": "juraj-google-style"}
{"code": "def insert_query_m(data, table, conn, columns=None, db_type='mysql'):\n    \n    \n    \n    if len(data) > 10000:\n        _chunk_query(data, 10000, columns, conn, table, db_type)\n    else:\n        \n        if db_type == 'sqlite':\n            type_sign = '?'\n        else:\n            type_sign = '%s'\n        \n        type_com = type_sign + \", \"\n        type = type_com * (len(data[0]) - 1)\n        type = type + type_sign\n\n        \n        if columns:\n            stmt = \"INSERT INTO \" + table + \"( \" + columns + \") VALUES (\" + type + \")\"\n        else:\n            stmt = \"INSERT INTO \" + table + \" VALUES (\" + type + \")\"\n\n        \n        cursor = conn.cursor()\n        cursor.executemany(stmt, data)\n        conn.commit()", "docstring": "Insert python list of tuples into SQL table\n\nArgs:\ndata (list): List of tuples\ntable (str): Name of database table\nconn (connection object): database connection object\ncolumns (str): String of column names to use if not assigned then all columns are presumed to be used [Optional]\ndb_type (str): If \"sqlite\" or \"mysql\"", "source": "juraj-google-style"}
{"code": "def get_additional_charge_by_identifier(self, recurring_billing_id):\n    fmt = 'recurringBillItems/{}'.format(recurring_billing_id)\n    return self.client._get((self.url + fmt), headers=self.get_headers())", "docstring": "Query extra charge information of an invoice from its identifier.\n\nArgs:\nrecurring_billing_id: Identifier of the additional charge.\n\nReturns:", "source": "codesearchnet"}
{"code": "def plot_bloch_multivector(rho, title='', figsize=None):\n    \n    if not HAS_MATPLOTLIB:\n        raise ImportError('Must have Matplotlib installed.')\n    rho = _validate_input_state(rho)\n    num = int(np.log2(len(rho)))\n    width, height = plt.figaspect(1/num)\n    fig = plt.figure(figsize=(width, height))\n    for i in range(num):\n        ax = fig.add_subplot(1, num, i + 1, projection='3d')\n        pauli_singles = [\n            Pauli.pauli_single(num, i, 'X'),\n            Pauli.pauli_single(num, i, 'Y'),\n            Pauli.pauli_single(num, i, 'Z')\n        ]\n        bloch_state = list(\n            map(lambda x: np.real(np.trace(np.dot(x.to_matrix(), rho))),\n                pauli_singles))\n        plot_bloch_vector(bloch_state, \"qubit \" + str(i), ax=ax,\n                          figsize=figsize)\n    fig.suptitle(title, fontsize=16)\n    plt.close(fig)\n    return fig", "docstring": "Plot the Bloch sphere.\n\nPlot a sphere, axes, the Bloch vector, and its projections onto each axis.\n\nArgs:\nrho (ndarray): Numpy array for state vector or density matrix.\ntitle (str): a string that represents the plot title\nfigsize (tuple): Has no effect, here for compatibility only.\n\nReturns:\nFigure: A matplotlib figure instance if `ax = None`.\n\nRaises:\nImportError: Requires matplotlib.", "source": "juraj-google-style"}
{"code": "def __init__(self, class_number, train_examples, test_examples, **kwargs):\n    \n    super(EMNISTConfig, self).__init__(**kwargs)\n    self.class_number = class_number\n    self.train_examples = train_examples\n    self.test_examples = test_examples", "docstring": "BuilderConfig for EMNIST class number.\n\nArgs:\nclass_number: There are six different splits provided in this dataset. And\nhave different class numbers.\ntrain_examples: number of train examples\ntest_examples: number of test examples\n**kwargs: keyword arguments forwarded to super.", "source": "juraj-google-style"}
{"code": "def normalize(self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n    image = rescale(image=image, scale=1 / 127.5, data_format=data_format, input_data_format=input_data_format)\n    image = image - 1\n    return image", "docstring": "Normalizes an images' pixel values to between [-1, 1].\n\nArgs:\nimage (`np.ndarray`):\nImage to normalize.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def GetHelp(self, prefix='', include_special_flags=True):\n    helplist = []\n    flags_by_module = self.FlagsByModuleDict()\n    if flags_by_module:\n        modules = sorted(flags_by_module)\n        main_module = sys.argv[0]\n        if (main_module in modules):\n            modules.remove(main_module)\n            modules = ([main_module] + modules)\n        for module in modules:\n            self.__RenderOurModuleFlags(module, helplist)\n        if include_special_flags:\n            self.__RenderModuleFlags('gflags', _helpers.SPECIAL_FLAGS.FlagDict().values(), helplist)\n    else:\n        values = self.FlagDict().values()\n        if include_special_flags:\n            values.append(_helpers.SPECIAL_FLAGS.FlagDict().values())\n        self.__RenderFlagList(values, helplist, prefix)\n    return '\\n'.join(helplist)", "docstring": "Generates a help string for all known flags.\n\nArgs:\nprefix: str, per-line output prefix.\ninclude_special_flags: bool, whether to include description of\n_SPECIAL_FLAGS, i.e. --flagfile and --undefok.\n\nReturns:\nstr, formatted help message.", "source": "codesearchnet"}
{"code": "def download_software_file(filename=None, synch=False):\n    \n    if not filename:\n        raise CommandExecutionError(\"Filename option must not be none.\")\n\n    if not isinstance(synch, bool):\n        raise CommandExecutionError(\"Synch option must be boolean..\")\n\n    if synch is True:\n        query = {'type': 'op',\n                 'cmd': '<request><system><software><download>'\n                        '<file>{0}</file></download></software></system></request>'.format(filename)}\n    else:\n        query = {'type': 'op',\n                 'cmd': '<request><system><software><download><sync-to-peer>yes</sync-to-peer>'\n                        '<file>{0}</file></download></software></system></request>'.format(filename)}\n\n    return _get_job_results(query)", "docstring": "Download software packages by filename.\n\nArgs:\nfilename(str): The filename of the PANOS file to download.\n\nsynch (bool): If true then the file will synch to the peer unit.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' panos.download_software_file PanOS_5000-8.0.0\nsalt '*' panos.download_software_file PanOS_5000-8.0.0 True", "source": "juraj-google-style"}
{"code": "def from_dict(cls, config_dict, **kwargs):\n    config = cls(**config_dict)\n    to_remove = []\n    for key, value in kwargs.items():\n        if hasattr(config, key):\n            setattr(config, key, value)\n            to_remove.append(key)\n    for key in to_remove:\n        kwargs.pop(key, None)\n    return config", "docstring": "Constructs a BaseWatermarkingConfig instance from a dictionary of parameters.\n\nArgs:\nconfig_dict (Dict[str, Any]): Dictionary containing configuration parameters.\n**kwargs: Additional keyword arguments to override dictionary values.\n\nReturns:\nBaseWatermarkingConfig: Instance of BaseWatermarkingConfig constructed from the dictionary.", "source": "github-repos"}
{"code": "def determine_opening_indent(indent_texts):\n    \n    num_lines = len(indent_texts)\n\n    if num_lines < 1:\n        return 0\n\n    assert num_lines >= 1\n\n    first_line_indent  = indent_texts[0][0]\n\n    if num_lines == 1:\n        return first_line_indent\n\n    assert num_lines >= 2\n\n    second_line_indent = indent_texts[1][0]\n    second_line_text   = indent_texts[1][1]\n\n    if len(second_line_text) == 0:\n        return first_line_indent\n\n    return second_line_indent", "docstring": "Determine the opening indent level for a docstring.\n\nThe opening indent level is the indent level is the first non-zero indent\nlevel of a non-empty line in the docstring.\n\nArgs:\nindent_texts: The lines of the docstring as an iterable over 2-tuples\neach containing an integer indent level as the first element and\nthe text as the second element.\n\nReturns:\nThe opening indent level as an integer.", "source": "juraj-google-style"}
{"code": "def get_nearest_site(self, coords, site, r=None):\n        \n        index = self.index(site)\n        if r is None:\n            r = np.linalg.norm(np.sum(self.lattice.matrix, axis=0))\n        ns = self.get_sites_in_sphere(coords, r, include_index=True)\n        \n        ns = [n for n in ns if n[2] == index]\n        \n        ns.sort(key=lambda x: x[1])\n        \n        return ns[0][0:2]", "docstring": "Given coords and a site, find closet site to coords.\nArgs:\ncoords (3x1 array): cartesian coords of center of sphere\nsite: site to find closest to coords\nr: radius of sphere. Defaults to diagonal of unit cell\n\nReturns:\nClosest site and distance.", "source": "juraj-google-style"}
{"code": "def draw(vertexes, edges):\n    Xs = []\n    Ys = []\n    sug = _build_sugiyama_layout(vertexes, edges)\n    for vertex in sug.g.sV:\n        Xs.append((vertex.view.xy[0] - (vertex.view.w / 2.0)))\n        Xs.append((vertex.view.xy[0] + (vertex.view.w / 2.0)))\n        Ys.append(vertex.view.xy[1])\n        Ys.append((vertex.view.xy[1] + vertex.view.h))\n    for edge in sug.g.sE:\n        for (x, y) in edge.view._pts:\n            Xs.append(x)\n            Ys.append(y)\n    minx = min(Xs)\n    miny = min(Ys)\n    maxx = max(Xs)\n    maxy = max(Ys)\n    canvas_cols = (int(math.ceil((math.ceil(maxx) - math.floor(minx)))) + 1)\n    canvas_lines = int(round((maxy - miny)))\n    canvas = AsciiCanvas(canvas_cols, canvas_lines)\n    for edge in sug.g.sE:\n        assert (len(edge.view._pts) > 1)\n        for index in range(1, len(edge.view._pts)):\n            start = edge.view._pts[(index - 1)]\n            end = edge.view._pts[index]\n            start_x = int(round((start[0] - minx)))\n            start_y = int(round((start[1] - miny)))\n            end_x = int(round((end[0] - minx)))\n            end_y = int(round((end[1] - miny)))\n            assert (start_x >= 0)\n            assert (start_y >= 0)\n            assert (end_x >= 0)\n            assert (end_y >= 0)\n            canvas.line(start_x, start_y, end_x, end_y, '*')\n    for vertex in sug.g.sV:\n        x = (vertex.view.xy[0] - (vertex.view.w / 2.0))\n        y = vertex.view.xy[1]\n        canvas.box(int(round((x - minx))), int(round((y - miny))), vertex.view.w, vertex.view.h)\n        canvas.text((int(round((x - minx))) + 1), (int(round((y - miny))) + 1), vertex.data)\n    canvas.draw()", "docstring": "Build a DAG and draw it in ASCII.\n\nArgs:\nvertexes (list): list of graph vertexes.\nedges (list): list of graph edges.", "source": "codesearchnet"}
{"code": "def adaptive_gaussian_prior_builder(getter, name, *args, **kwargs):\n    kwargs['shape'] = ()\n    loc_var = getter((name + '_prior_loc'), *args, **kwargs)\n    kwargs['initializer'] = scale_variable_initializer(0.01)\n    scale_var = getter((name + '_prior_scale'), *args, **kwargs)\n    prior = tfp.distributions.Normal(loc=loc_var, scale=tf.nn.softplus(scale_var), name='{}_prior_dist'.format(name))\n    return prior", "docstring": "A pre-canned builder for adaptive scalar gaussian prior distributions.\n\nGiven a true `getter` function and arguments forwarded from `tf.get_variable`,\nreturn a distribution object for a scalar-valued adaptive gaussian prior\nwhich will be broadcast over a variable of the requisite shape. This prior's\nparameters (e.g `loc` and `scale` for a gaussian) will consist of a single\nlearned scalar for the entire `tf.Variable` for which it serves as the prior,\nregardless of that `tf.Variable`'s shape.\n\nArgs:\ngetter: The `getter` passed to a `custom_getter`. Please see the\ndocumentation for `tf.get_variable`.\nname: The `name` argument passed to `tf.get_variable`.\n*args: See positional arguments passed to `tf.get_variable`.\n**kwargs: See keyword arguments passed to `tf.get_variable`.\n\nReturns:\nAn instance of `tfp.distributions.Normal` representing the prior\ndistribution over the variable in question.", "source": "codesearchnet"}
{"code": "def merge_bindings(program: cfg.Program, node: cfg.CFGNode, bindings: Sequence[cfg.Binding]) -> cfg.Variable:\n    v = program.NewVariable()\n    for b in bindings:\n        v.PasteBinding(b, node)\n    return v", "docstring": "Create a combined Variable for a list of bindings.\n\nArgs:\nprogram: A cfg.Program instance.\nnode: The current CFG node.\nbindings: A list of cfg.Bindings.\n\nReturns:\nA cfg.Variable.", "source": "github-repos"}
{"code": "def _ParseIndex(self, preread, precompile):\n        \n        self.index = texttable.TextTable()\n        self.index.CsvToTable(self._index_handle)\n\n        if preread:\n            for row in self.index:\n                for col in row.header:\n                    row[col] = preread(col, row[col])\n\n        self.compiled = copy.deepcopy(self.index)\n\n        for row in self.compiled:\n            for col in row.header:\n                if precompile:\n                    row[col] = precompile(col, row[col])\n                if row[col]:\n                    row[col] = copyable_regex_object.CopyableRegexObject(row[col])", "docstring": "Reads index file and stores entries in TextTable.\nFor optimisation reasons, a second table is created with compiled entries.\nArgs:\npreread: func, Pre-processing, applied to each field as it is read.\nprecompile: func, Pre-compilation, applied to each field before compiling.\nRaises:\nIndexTableError: If the column headers has illegal column labels.", "source": "juraj-google-style"}
{"code": "def create_test_method(pipeline_spec_file: str, custom_preprocessors: List[Callable[..., Union[Dict, List]]]):\n\n    @mock.patch('apache_beam.Pipeline', TestPipeline)\n    def test_yaml_example(self):\n        with open(pipeline_spec_file, encoding='utf-8') as f:\n            lines = f.readlines()\n        expected_key = '\n        if expected_key in lines:\n            expected = lines[lines.index('\n        else:\n            raise ValueError(f\"Missing '\n        for i, line in enumerate(expected):\n            expected[i] = line.replace('\n        pipeline_spec = yaml.load(''.join(lines), Loader=yaml_transform.SafeLineLoader)\n        with TestEnvironment() as env:\n            for fn in custom_preprocessors:\n                pipeline_spec = fn(pipeline_spec, expected, env)\n            with beam.Pipeline(options=PipelineOptions(pickle_library='cloudpickle', **yaml_transform.SafeLineLoader.strip_metadata(pipeline_spec.get('options', {})))) as p:\n                actual = [yaml_transform.expand_pipeline(p, pipeline_spec, [yaml_provider.InlineProvider(TEST_PROVIDERS, INPUT_TRANSFORM_TEST_PROVIDERS)])]\n                if not actual[0]:\n                    actual = list(p.transforms_stack[0].parts[-1].outputs.values())\n                    for transform in p.transforms_stack[0].parts[:-1]:\n                        if transform.transform.label == 'log_for_testing':\n                            actual += list(transform.outputs.values())\n                check_output(expected)(actual)\n    if 'deps' in pipeline_spec_file:\n        test_yaml_example = pytest.mark.no_xdist(test_yaml_example)\n        test_yaml_example = unittest.skipIf(sys.platform == 'win32', 'Github virtualenv permissions issues.')(test_yaml_example)\n        test_yaml_example = unittest.skipIf('-cloud' in os.environ.get('TOX_ENV_NAME', ''), 'Github actions environment issue.')(test_yaml_example)\n    if 'java_deps' in pipeline_spec_file:\n        test_yaml_example = pytest.mark.xlang_sql_expansion_service(test_yaml_example)\n        test_yaml_example = unittest.skipIf(not os.path.exists(subprocess_server.JavaJarServer.path_to_dev_beam_jar('sdks:java:extensions:sql:expansion-service:shadowJar')), 'Requires expansion service jars.')(test_yaml_example)\n    return test_yaml_example", "docstring": "Generates a test method for a given YAML pipeline specification file.\n\nThis function reads the YAML file, extracts the expected output (if present),\nand creates a test function that uses `TestPipeline` to run the pipeline\ndefined in the YAML file. It also applies any custom preprocessors registered\nfor this test.\n\nArgs:\npipeline_spec_file: The path to the YAML file containing the pipeline\nspecification.\ncustom_preprocessors: A list of preprocessor functions to apply before\nrunning the test.\n\nReturns:\nA test method (Callable) that can be added to a unittest.TestCase class.", "source": "github-repos"}
{"code": "def get_likelihood(self, uni_matrix):\n    if (self.parents is None):\n        left_u = uni_matrix[(:, self.L)]\n        right_u = uni_matrix[(:, self.R)]\n    else:\n        left_ing = list((self.D - self.parents[0].D))[0]\n        right_ing = list((self.D - self.parents[1].D))[0]\n        left_u = uni_matrix[(self.L, left_ing)]\n        right_u = uni_matrix[(self.R, right_ing)]\n    copula = Bivariate(self.name)\n    copula.theta = self.theta\n    X_left_right = np.array([[left_u, right_u]])\n    X_right_left = np.array([[right_u, left_u]])\n    value = np.sum(copula.probability_density(X_left_right))\n    left_given_right = copula.partial_derivative(X_left_right)\n    right_given_left = copula.partial_derivative(X_right_left)\n    return (value, left_given_right, right_given_left)", "docstring": "Compute likelihood given a U matrix.\n\nArgs:\nuni_matrix(numpy.array): Matrix to compute the likelihood.\n\nReturn:\ntuple(np.ndarray, np.ndarray, np.array): likelihood and conditional values.", "source": "codesearchnet"}
{"code": "def ec2_pipeline_setup(generated=None, project='', settings=None, env='', pipeline_type='', region='', region_subnets=None):\n    data = copy.deepcopy(settings)\n    user_data = generate_encoded_user_data(env=env, region=region, generated=generated, group_name=project, pipeline_type=pipeline_type)\n    instance_security_groups = sorted(DEFAULT_EC2_SECURITYGROUPS[env])\n    instance_security_groups.append(generated.security_group_app)\n    instance_security_groups.extend(settings['security_group']['instance_extras'])\n    instance_security_groups = remove_duplicate_sg(instance_security_groups)\n    LOG.info('Instance security groups to attach: %s', instance_security_groups)\n    if settings['asg']['scaling_policy']:\n        scalingpolicy = True\n        LOG.info('Found scaling policy')\n    else:\n        scalingpolicy = False\n        LOG.info('No scaling policy found')\n    if settings['app']['eureka_enabled']:\n        elb = []\n    else:\n        elb = [generated.elb_app]\n    LOG.info('Attaching the following ELB: %s', elb)\n    health_checks = check_provider_healthcheck(settings)\n    if ((env == 'dev') or settings['app']['eureka_enabled']):\n        data['asg'].update({'hc_type': 'EC2'})\n        LOG.info('Switching health check type to: EC2')\n    hc_grace_period = data['asg'].get('hc_grace_period')\n    app_grace_period = data['asg'].get('app_grace_period')\n    grace_period = (hc_grace_period + app_grace_period)\n    ssh_keypair = data['asg'].get('ssh_keypair', None)\n    if (not ssh_keypair):\n        ssh_keypair = '{0}_{1}_default'.format(env, region)\n    LOG.info('SSH keypair (%s) used', ssh_keypair)\n    if settings['app']['canary']:\n        canary_user_data = generate_encoded_user_data(env=env, region=region, generated=generated, group_name=project, canary=True)\n        data['app'].update({'canary_encoded_user_data': canary_user_data})\n    data['asg'].update({'hc_type': data['asg'].get('hc_type').upper(), 'hc_grace_period': grace_period, 'ssh_keypair': ssh_keypair, 'provider_healthcheck': json.dumps(health_checks.providers), 'enable_public_ips': json.dumps(settings['asg']['enable_public_ips']), 'has_provider_healthcheck': health_checks.has_healthcheck, 'asg_whitelist': ASG_WHITELIST})\n    data['app'].update({'az_dict': json.dumps(region_subnets), 'encoded_user_data': user_data, 'instance_security_groups': json.dumps(instance_security_groups), 'elb': json.dumps(elb), 'scalingpolicy': scalingpolicy})\n    return data", "docstring": "Handles ec2 pipeline data setup\n\nArgs:\ngenerated (gogoutils.Generator): Generated naming formats.\nproject (str): Group name of application\nsettings (dict): Environment settings from configurations.\nenv (str): Deploy environment name, e.g. dev, stage, prod.\npipeline_type (str): Type of Foremast Pipeline to configure.\nregion (str): AWS Region to deploy to.\nregion_subnets (dict): Subnets for a Region, e.g.\n{'us-west-2': ['us-west-2a', 'us-west-2b', 'us-west-2c']}.\n\nReturns:\ndict: Updated settings to pass to templates for EC2 info", "source": "codesearchnet"}
{"code": "def get(self, block_id):\n        \n\n        pool = current_app.config['bigchain_pool']\n\n        with pool() as bigchain:\n            block = bigchain.get_block(block_id=block_id)\n\n        if not block:\n            return make_error(404)\n\n        return block", "docstring": "API endpoint to get details about a block.\n\nArgs:\nblock_id (str): the id of the block.\n\nReturn:\nA JSON string containing the data about the block.", "source": "juraj-google-style"}
{"code": "def delete(self, url, params=None, **kwargs):\n        \n        return self.call_api(\n            \"DELETE\",\n            url,\n            params=params,\n            **kwargs\n        )", "docstring": "Call the API with a DELETE request.\n\nArgs:\nurl (str): Resource location relative to the base URL.\nparams (dict or None): Query-string parameters.\n\nReturns:\nResultParser or ErrorParser.", "source": "juraj-google-style"}
{"code": "def submit_evaluation(self, variant_obj, user_obj, institute_obj, case_obj, link, criteria):\n    variant_specific = variant_obj['_id']\n    variant_id = variant_obj['variant_id']\n    user_id = user_obj['_id']\n    user_name = user_obj.get('name', user_obj['_id'])\n    institute_id = institute_obj['_id']\n    case_id = case_obj['_id']\n    evaluation_terms = [evluation_info['term'] for evluation_info in criteria]\n    classification = get_acmg(evaluation_terms)\n    evaluation_obj = build_evaluation(variant_specific=variant_specific, variant_id=variant_id, user_id=user_id, user_name=user_name, institute_id=institute_id, case_id=case_id, classification=classification, criteria=criteria)\n    self._load_evaluation(evaluation_obj)\n    self.update_acmg(institute_obj, case_obj, user_obj, link, variant_obj, classification)\n    return classification", "docstring": "Submit an evaluation to the database\n\nGet all the relevant information, build a evaluation_obj\n\nArgs:\nvariant_obj(dict)\nuser_obj(dict)\ninstitute_obj(dict)\ncase_obj(dict)\nlink(str): variant url\ncriteria(list(dict)):\n\n[\n{\n'term': str,\n'comment': str,\n'links': list(str)\n},\n.\n.\n]", "source": "codesearchnet"}
{"code": "def match_from_mro(self, left, other_type, allow_compat_builtins=True):\n    for base in left.mro:\n        if isinstance(base, abstract.ParameterizedClass):\n            base_cls = base.base_cls\n        else:\n            base_cls = base\n        if isinstance(base_cls, abstract.Class):\n            if self._match_base_class_flat(base_cls, other_type, allow_compat_builtins):\n                return base\n        elif isinstance(base_cls, abstract.AMBIGUOUS):\n            return base_cls\n        elif isinstance(base_cls, abstract.Empty):\n            continue\n        else:\n            log.warning('Invalid base class %r', base_cls)\n            continue", "docstring": "Checks a type's MRO for a match for a formal type.\n\nArgs:\nleft: The type.\nother_type: The formal type.\nallow_compat_builtins: Whether to allow compatible builtins to match -\ne.g., int against float.\n\nReturns:\nThe match, if any, None otherwise.", "source": "github-repos"}
{"code": "def get_result(self, timeout=None) -> Optional[GenerationOutput]:\n    if self._generation_thread is None and self.output_queue.empty():\n        return None\n    try:\n        result = self.output_queue.get(block=True, timeout=timeout)\n        logger.debug(f'Retrieved result for request {result.request_id}')\n        return result\n    except queue.Empty:\n        return None", "docstring": "Retrieve one result from the output queue.\n\nArgs:\ntimeout: Maximum time to wait for a result\n\nReturns:\nOptional[Dict]: The result data or None if timeout", "source": "github-repos"}
{"code": "def configure_tests(tests, test_run_id):\n    print('UPDATE CONFIG')\n    os.makedirs(HARNESS_DIRECTORY, exist_ok=True)\n    for filename, script in tests:\n        script_fields = json_get_fields(script)\n        script_name = filename.split('.')[0]\n        harness_fields = {}\n        harness_path = HARNESS_DIRECTORY + script_name + '.json'\n        if os.path.exists(harness_path):\n            with open(harness_path, 'r') as f:\n                harness_fields = json.load(f)\n        new_fields = {}\n        for field in script_fields:\n            if field['name'] == 'test_run_id':\n                new_fields['test_run_id'] = test_run_id\n            else:\n                new_fields[field['name']] = harness_fields.get(field['name'], field.get('default'))\n                new_fields['%s_description' % field['name']] = '(%s) %s' % (field.get('kind', 'string'), field.get('description', 'No description.'))\n                if field['name'] not in harness_fields:\n                    print('NEW FIELD ADDED', script_name, field['name'])\n        if new_fields:\n            with open(harness_path, 'w') as f:\n                json.dump(new_fields, f, indent=2)\n        elif os.path.exists(harness_path):\n            os.remove(harness_path)\n    print('')\n    print('------')\n    print('------------')\n    print('------------------------')\n    print('Some tests require custom values. Update the necessary fields for the tests you wish to run.')\n    print('EDIT: ' + HARNESS_DIRECTORY)\n    print('------------------------')\n    print('Some tests require external assets.  Join the following group to gain access.')\n    print('VISIT: https:\n    print('------------------------')\n    print('------------')\n    print('------')\n    print('')\n    sleep(3)", "docstring": "Initialize the starthinker_assets/tests.json variable harness.\n\nRead all existing tests from tests/*.json and create a harness file in\nstarthinker_assets/tests/*.json so developer can configure tests.\n\nArgs:\ntest: List of (filename, json) pairs containing all the tests.\n\nReturns:\nNone", "source": "github-repos"}
{"code": "def __init__(\n            self,\n            base_url,\n            username=None,\n            api_key=None,\n            status_endpoint=None,\n            timeout=60\n    ):\n        \n        self.base_url = base_url\n        self.username = username\n        self.api_key = api_key\n        self.status_endpoint = urljoin(self.base_url, status_endpoint)\n        self.timeout = timeout", "docstring": "Initialise client.\n\nArgs:\nbase_url (str): The base URL to the service being used.\nusername (str): The username to authenticate with.\napi_key (str): The API key to authenticate with.\ntimeout (int): Maximum time before timing out.", "source": "juraj-google-style"}
{"code": "def _add_session_callback(self, callback_obj, callback, one_shot, originator):\n    if one_shot:\n\n        @wraps(callback)\n        def remove_then_invoke(*args, **kwargs):\n            if (callback_obj in self._session_callbacks):\n                self._remove_session_callback(callback_obj, originator)\n            return callback(*args, **kwargs)\n        actual_callback = remove_then_invoke\n    else:\n        actual_callback = callback\n    callback_obj._callback = self._wrap_with_self_as_curdoc(actual_callback)\n    self._session_callbacks.add(callback_obj)\n    self._callback_objs_by_callable[originator][callback].add(callback_obj)\n    self._trigger_on_change(SessionCallbackAdded(self, callback_obj))\n    return callback_obj", "docstring": "Internal implementation for adding session callbacks.\n\nArgs:\ncallback_obj (SessionCallback) :\nA session callback object that wraps a callable and is\npassed to ``trigger_on_change``.\n\ncallback (callable) :\nA callable to execute when session events happen.\n\none_shot (bool) :\nWhether the callback should immediately auto-remove itself\nafter one execution.\n\nReturns:\nSessionCallback : passed in as ``callback_obj``.\n\nRaises:\nValueError, if the callback has been previously added", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states):\n    hidden_states = self.wi(hidden_states)\n    hidden_states = self.act(hidden_states)\n    hidden_states = self.dropout(hidden_states)\n    hidden_states = self.wo(hidden_states)\n    return hidden_states", "docstring": "Args:\nhidden_states (`torch.Tensor`) :\n[num_groups, tokens_per_group, hidden_dim] inputs to send to experts.\nReturns:\ntorch.Tensor[num_groups, tokens_per_group, hidden_dim]", "source": "github-repos"}
{"code": "def _oai_to_xml(marc_oai):  \n    \n    record = MARCXMLRecord(marc_oai)\n    record.oai_marc = False\n\n    return record.to_XML()", "docstring": "Convert OAI to MARC XML.\n\nArgs:\nmarc_oai (str): String with either OAI or MARC XML.\n\nReturns:\nstr: String with MARC XML.", "source": "juraj-google-style"}
{"code": "def sg_producer_func(func):\n\n    @wraps(func)\n    def wrapper(**kwargs):\n        'Manages arguments of `tf.sg_opt`.\\n\\n        Args:\\n          **kwargs:\\n            source: A source queue list to enqueue\\n            dtypes: Input data types of each tensor\\n            out_dtypes: Output data types of each tensor ( If None, same as dtypes )\\n            capacity: Queue capacity. Default is 32.\\n            num_threads: Number of threads. Default is 1.\\n        '\n        opt = (tf.sg_opt(kwargs) + tf.sg_opt(dtypes=[tf.sg_floatx], capacity=32, num_threads=1))\n        assert (opt.source is not None), 'source is mandatory.'\n        if ((type(opt.source) is not list) and (type(opt.source) is not tuple)):\n            opt.source = [opt.source]\n        if ((type(opt.dtypes) is not list) and (type(opt.dtypes) is not tuple)):\n            opt.dtypes = [opt.dtypes]\n        if (opt.out_dtypes is None):\n            opt.out_dtypes = opt.dtypes\n        if ((type(opt.out_dtypes) is not list) and (type(opt.out_dtypes) is not tuple)):\n            opt.out_dtypes = [opt.out_dtypes]\n        assert (len(opt.source) == len(opt.dtypes)), 'Source and dtypes should have same length.'\n\n        def enqueue_func(sess, op):\n            data = func(sess.run(opt.source))\n            feed_dict = {}\n            for (ph, col) in zip(placeholders, data):\n                feed_dict[ph] = col\n            sess.run(op, feed_dict=feed_dict)\n        placeholders = []\n        for dtype in opt.dtypes:\n            placeholders.append(tf.placeholder(dtype=dtype))\n        queue = tf.FIFOQueue(opt.capacity, dtypes=opt.out_dtypes)\n        enqueue_op = queue.enqueue(placeholders)\n        runner = _FuncQueueRunner(enqueue_func, queue, ([enqueue_op] * opt.num_threads))\n        tf.train.add_queue_runner(runner)\n        return queue.dequeue()\n    return wrapper", "docstring": "r\"\"\"Decorates a function `func` as sg_producer_func.\n\nArgs:\nfunc: A function to decorate.", "source": "codesearchnet"}
{"code": "def __init__(self, excluded_sites=None, **kwargs):\n        \n        super().__init__(**kwargs)\n        self.excluded_site = excluded_sites\n        if excluded_sites is None:\n            self.excluded_site = []", "docstring": "Constructor.\n\nArgs:\nexcluded_sites(list): sites to forget about when reloading the\njobs. The primary use case was to exclude unreachable sites and\nallow the program to go on.", "source": "juraj-google-style"}
{"code": "def read_as_base64(fn):\n    \n    with open(fn) as unpacked_file:\n        with tempfile.TemporaryFile() as b64_file:\n            base64.encode(unpacked_file, b64_file)\n            b64_file.flush()\n\n            b64_file.seek(0)\n            return b64_file.read()", "docstring": "Convert given `fn` to base64 and return it. This method does the process\nin not-so-much memory consuming way.\n\nArgs:\nfn (str): Path to the file which should be converted.\n\nReturns:\nstr: File encoded as base64.", "source": "juraj-google-style"}
{"code": "def parse_pv(header):\n    \n    order_fit = parse_order_fit(header)\n\n    def parse_with_base(i):\n        key_base = \"PV%d_\" % i\n\n        pvi_x = [header[key_base + \"0\"]]\n\n        def parse_range(lower, upper):\n            for j in range(lower, upper + 1):\n                pvi_x.append(header[key_base + str(j)])\n\n        if order_fit >= 1:\n            parse_range(1, 3)\n\n        if order_fit >= 2:\n            parse_range(4, 6)\n\n        if order_fit >= 3:\n            parse_range(7, 10)\n\n        return pvi_x\n\n    return [parse_with_base(1), parse_with_base(2)]", "docstring": "Parses the PV array from an astropy FITS header.\n\nArgs:\nheader: astropy.io.fits.header.Header\nThe header containing the PV values.\n\nReturns:\ncd: 2d array (list(list(float))\n[[PV1_0, PV1_1, ... PV1_N], [PV2_0, PV2_1, ... PV2_N]]\nNote that N depends on the order of the fit.  For example, an\norder 3 fit goes up to PV?_10.", "source": "juraj-google-style"}
{"code": "def strip_prefix_from_items(prefix, items):\n  \n  items_no_prefix = []\n  for item in items:\n    if item.startswith(prefix):\n      items_no_prefix.append(item[len(prefix):])\n    else:\n      items_no_prefix.append(item)\n  return items_no_prefix", "docstring": "Strips out the prefix from each of the items if it is present.\n\nArgs:\nprefix: the string for that you wish to strip from the beginning of each\nof the items.\nitems: a list of strings that may or may not contain the prefix you want\nto strip out.\n\nReturns:\nitems_no_prefix: a copy of the list of items (same order) without the\nprefix (if present).", "source": "juraj-google-style"}
{"code": "def get_niggli_reduced_lattice(self, tol: float = 1e-5) -> \"Lattice\":\n        \n        \n        matrix = self.lll_matrix\n        a = matrix[0]\n        b = matrix[1]\n        c = matrix[2]\n        e = tol * self.volume ** (1 / 3)\n\n        \n        G = [\n            [dot(a, a), dot(a, b), dot(a, c)],\n            [dot(a, b), dot(b, b), dot(b, c)],\n            [dot(a, c), dot(b, c), dot(c, c)],\n        ]\n        G = np.array(G)\n\n        \n        for count in range(100):\n            \n            \n            (A, B, C, E, N, Y) = (\n                G[0, 0],\n                G[1, 1],\n                G[2, 2],\n                2 * G[1, 2],\n                2 * G[0, 2],\n                2 * G[0, 1],\n            )\n\n            if A > B + e or (abs(A - B) < e and abs(E) > abs(N) + e):\n                \n                M = [[0, -1, 0], [-1, 0, 0], [0, 0, -1]]\n                G = dot(transpose(M), dot(G, M))\n            if (B > C + e) or (abs(B - C) < e and abs(N) > abs(Y) + e):\n                \n                M = [[-1, 0, 0], [0, 0, -1], [0, -1, 0]]\n                G = dot(transpose(M), dot(G, M))\n                continue\n\n            l = 0 if abs(E) < e else E / abs(E)\n            m = 0 if abs(N) < e else N / abs(N)\n            n = 0 if abs(Y) < e else Y / abs(Y)\n            if l * m * n == 1:\n                \n                i = -1 if l == -1 else 1\n                j = -1 if m == -1 else 1\n                k = -1 if n == -1 else 1\n                M = [[i, 0, 0], [0, j, 0], [0, 0, k]]\n                G = dot(transpose(M), dot(G, M))\n            elif l * m * n == 0 or l * m * n == -1:\n                \n                i = -1 if l == 1 else 1\n                j = -1 if m == 1 else 1\n                k = -1 if n == 1 else 1\n\n                if i * j * k == -1:\n                    if n == 0:\n                        k = -1\n                    elif m == 0:\n                        j = -1\n                    elif l == 0:\n                        i = -1\n                M = [[i, 0, 0], [0, j, 0], [0, 0, k]]\n                G = dot(transpose(M), dot(G, M))\n\n            (A, B, C, E, N, Y) = (\n                G[0, 0],\n                G[1, 1],\n                G[2, 2],\n                2 * G[1, 2],\n                2 * G[0, 2],\n                2 * G[0, 1],\n            )\n\n            \n            if (\n                abs(E) > B + e\n                or (abs(E - B) < e and 2 * N < Y - e)\n                or (abs(E + B) < e and Y < -e)\n            ):\n                M = [[1, 0, 0], [0, 1, -E / abs(E)], [0, 0, 1]]\n                G = dot(transpose(M), dot(G, M))\n                continue\n\n            \n            if (\n                abs(N) > A + e\n                or (abs(A - N) < e and 2 * E < Y - e)\n                or (abs(A + N) < e and Y < -e)\n            ):\n                M = [[1, 0, -N / abs(N)], [0, 1, 0], [0, 0, 1]]\n                G = dot(transpose(M), dot(G, M))\n                continue\n\n            \n            if (\n                abs(Y) > A + e\n                or (abs(A - Y) < e and 2 * E < N - e)\n                or (abs(A + Y) < e and N < -e)\n            ):\n                M = [[1, -Y / abs(Y), 0], [0, 1, 0], [0, 0, 1]]\n                G = dot(transpose(M), dot(G, M))\n                continue\n\n            \n            if E + N + Y + A + B < -e or (abs(E + N + Y + A + B) < e < Y + (A + N) * 2):\n                M = [[1, 0, 1], [0, 1, 1], [0, 0, 1]]\n                G = dot(transpose(M), dot(G, M))\n                continue\n\n            break\n\n        A = G[0, 0]\n        B = G[1, 1]\n        C = G[2, 2]\n        E = 2 * G[1, 2]\n        N = 2 * G[0, 2]\n        Y = 2 * G[0, 1]\n        a = math.sqrt(A)\n        b = math.sqrt(B)\n        c = math.sqrt(C)\n        alpha = math.acos(E / 2 / b / c) / math.pi * 180\n        beta = math.acos(N / 2 / a / c) / math.pi * 180\n        gamma = math.acos(Y / 2 / a / b) / math.pi * 180\n\n        latt = Lattice.from_parameters(a, b, c, alpha, beta, gamma)\n\n        mapped = self.find_mapping(latt, e, skip_rotation_matrix=True)\n        if mapped is not None:\n            if np.linalg.det(mapped[0].matrix) > 0:\n                return mapped[0]\n            else:\n                return Lattice(-mapped[0].matrix)\n\n        raise ValueError(\"can't find niggli\")", "docstring": "Get the Niggli reduced lattice using the numerically stable algo\nproposed by R. W. Grosse-Kunstleve, N. K. Sauter, & P. D. Adams,\nActa Crystallographica Section A Foundations of Crystallography, 2003,\n60(1), 1-6. doi:10.1107/S010876730302186X\n\nArgs:\ntol (float): The numerical tolerance. The default of 1e-5 should\nresult in stable behavior for most cases.\n\nReturns:\nNiggli-reduced lattice.", "source": "juraj-google-style"}
{"code": "def GetMountPoint(self, path=None):\n    path = os.path.abspath(client_utils.CanonicalPathToLocalPath((path or self.path)))\n    while (not os.path.ismount(path)):\n        path = os.path.dirname(path)\n    return path", "docstring": "Walk back from the path to find the mount point.\n\nArgs:\npath: a Unicode string containing the path or None. If path is None the\nvalue in self.path is used.\n\nReturns:\npath string of the mount point", "source": "codesearchnet"}
{"code": "def _args_to_val(func, args):\n    from .google_imports import gql\n    vals = []\n    for arg in args:\n        if isinstance(arg, (int, long, basestring)):\n            val = Parameter(arg)\n        elif isinstance(arg, gql.Literal):\n            val = arg.Get()\n        else:\n            raise TypeError(('Unexpected arg (%r)' % arg))\n        vals.append(val)\n    if (func == 'nop'):\n        if (len(vals) != 1):\n            raise TypeError('\"nop\" requires exactly one value')\n        return vals[0]\n    pfunc = ParameterizedFunction(func, vals)\n    if pfunc.is_parameterized():\n        return pfunc\n    else:\n        return pfunc.resolve({}, {})", "docstring": "Helper for GQL parsing to extract values from GQL expressions.\n\nThis can extract the value from a GQL literal, return a Parameter\nfor a GQL bound parameter (:1 or :foo), and interprets casts like\nKEY(...) and plain lists of values like (1, 2, 3).\n\nArgs:\nfunc: A string indicating what kind of thing this is.\nargs: One or more GQL values, each integer, string, or GQL literal.", "source": "codesearchnet"}
{"code": "def _minigui_report_search_status(self, leaves):\n    root = self._player.get_root()\n    msg = {'id': hex(id(root)), 'n': int(root.N), 'q': float(root.Q)}\n    msg['childQ'] = [int(round((q * 1000))) for q in root.child_Q]\n    msg['childN'] = [int(n) for n in root.child_N]\n    ranked_children = root.rank_children()\n    variations = {}\n    for i in ranked_children[:15]:\n        if ((root.child_N[i] == 0) or (i not in root.children)):\n            break\n        c = coords.to_gtp(coords.from_flat(i))\n        child = root.children[i]\n        nodes = child.most_visited_path_nodes()\n        moves = [coords.to_gtp(coords.from_flat(m.fmove)) for m in nodes]\n        variations[c] = {'n': int(root.child_N[i]), 'q': float(root.child_Q[i]), 'moves': ([c] + moves)}\n    if leaves:\n        path = []\n        leaf = leaves[0]\n        while (leaf != root):\n            path.append(leaf.fmove)\n            leaf = leaf.parent\n        if path:\n            path.reverse()\n            variations['live'] = {'n': int(root.child_N[path[0]]), 'q': float(root.child_Q[path[0]]), 'moves': [coords.to_gtp(coords.from_flat(m)) for m in path]}\n    if variations:\n        msg['variations'] = variations\n    dbg(('mg-update:%s' % json.dumps(msg, sort_keys=True)))", "docstring": "Prints the current MCTS search status to stderr.\n\nReports the current search path, root node's child_Q, root node's\nchild_N, the most visited path in a format that can be parsed by\none of the STDERR_HANDLERS in minigui.ts.\n\nArgs:\nleaves: list of leaf MCTSNodes returned by tree_search().", "source": "codesearchnet"}
{"code": "def _remove_double_brackets(text):\n  \n\n  def replacement_fn(s):\n    if \":\" in s:\n      \n      return \"\"\n    \n    bar_pos = s.find(\"|\")\n    if bar_pos == -1:\n      return s\n    return s[bar_pos + 1:]\n\n  return _find_and_replace(text, \"[[\", \"]]\", replacement_fn)", "docstring": "Remove double brackets, but leave the viewable text.\n\nArgs:\ntext: a string\nReturns:\na string", "source": "juraj-google-style"}
{"code": "def checksum(self, path):\n    raise NotImplementedError", "docstring": "Fetch checksum metadata of a file on the\n:class:`~apache_beam.io.filesystem.FileSystem`.\n\nThis operation returns checksum metadata as stored in the underlying\nFileSystem. It should not need to read file data to obtain this value.\nChecksum type and format are FileSystem dependent and are not compatible\nbetween FileSystems.\nFileSystem implementations may return file size if a checksum isn't\navailable.\n\nArgs:\npath: string path of a file.\n\nReturns: string containing checksum\n\nRaises:\n``BeamIOError``: if path isn't a file or doesn't exist.", "source": "github-repos"}
{"code": "def short_repr(obj, max_len=40):\n    obj_repr = repr(obj)\n    if (len(obj_repr) <= max_len):\n        return obj_repr\n    return '<{} of length {}>'.format(type(obj).__name__, len(obj_repr))", "docstring": "Returns a short, term-friendly string representation of the object.\n\nArgs:\nobj: An object for which to return a string representation.\nmax_len: Maximum length of the returned string. Longer reprs will be turned\ninto a brief descriptive string giving the type and length of obj.", "source": "codesearchnet"}
{"code": "def _validate_cidr(self, rule):\n    try:\n        network = ipaddress.IPv4Network(rule['app'])\n    except (ipaddress.NetmaskValueError, ValueError) as error:\n        raise SpinnakerSecurityGroupCreationFailed(error)\n    self.log.debug('Validating CIDR: %s', network.exploded)\n    return True", "docstring": "Validate the cidr block in a rule.\n\nReturns:\nTrue: Upon successful completion.\n\nRaises:\nSpinnakerSecurityGroupCreationFailed: CIDR definition is invalid or\nthe network range is too wide.", "source": "codesearchnet"}
{"code": "def build_shuffle_all_reduce(input_tensors, gather_devices, red_op, un_op=None):\n    input_tensors, shape = _flatten_tensors(input_tensors)\n    dst_devices = [t.device for t in input_tensors]\n    reduced_shards = _build_shuffle_gather(input_tensors, gather_devices, red_op, un_op)\n    output_tensors = _build_shuffle_scatter(reduced_shards, dst_devices)\n    if len(shape) != 1:\n        output_tensors = _reshape_tensors(output_tensors, shape)\n    return output_tensors", "docstring": "Construct a subgraph for shuffle all-reduce.\n\nShuffle reduce is essentially the algorithm implemented when using\nparameter servers.  Suppose tensor length is n, there are d devices\nand g gather shards.  Each device sends a n/g length sub-tensor to\neach gather shard.  The gather shards perform a reduction across d\nfragments, then broadcast the result back to each device.  The\ndevices then join the g fully reduced fragments they receive from\nthe shards.  The gather shards could perform d-1 pairwise\nreductions, or one d-way reduction.  The first is better where\nreduction Op time is low compared to transmission time, the second\nbetter in the other case.\n\nArgs:\ninput_tensors: list of `tf.Tensor` values to be reduced.\ngather_devices: list of names of devices on which reduction shards\nshould be placed.\nred_op: an n-array elementwise reduction Op\nun_op: optional elementwise unary Op to be applied to fully-reduced values.\n\nReturns:\nlist of `tf.Tensor` which are the fully reduced tensors.", "source": "github-repos"}
{"code": "def redraw(self, reset_camera=False):\n        \n        self.ren.RemoveAllViewProps()\n        self.picker = None\n        self.add_picker_fixed()\n        self.helptxt_mapper = vtk.vtkTextMapper()\n        tprops = self.helptxt_mapper.GetTextProperty()\n        tprops.SetFontSize(14)\n        tprops.SetFontFamilyToTimes()\n        tprops.SetColor(0, 0, 0)\n\n        if self.structure is not None:\n            self.set_structure(self.structure, reset_camera)\n\n        self.ren_win.Render()", "docstring": "Redraw the render window.\n\nArgs:\nreset_camera: Set to True to reset the camera to a\npre-determined default for each structure.  Defaults to False.", "source": "juraj-google-style"}
{"code": "def from_pb(cls, policy_pb):\n        \n        policy = cls(policy_pb.etag, policy_pb.version)\n\n        for binding in policy_pb.bindings:\n            policy[binding.role] = sorted(binding.members)\n\n        return policy", "docstring": "Factory: create a policy from a protobuf message.\n\nArgs:\npolicy_pb (google.iam.policy_pb2.Policy): message returned by\n``get_iam_policy`` gRPC API.\n\nReturns:\n:class:`Policy`: the parsed policy", "source": "juraj-google-style"}
{"code": "def per_device_batch_size(batch_size, num_gpus):\n    if (num_gpus <= 1):\n        return batch_size\n    remainder = (batch_size % num_gpus)\n    if remainder:\n        err = 'When running with multiple GPUs, batch size must be a multiple of the number of available GPUs. Found {} GPUs with a batch size of {}; try --batch_size={} instead.'.format(num_gpus, batch_size, (batch_size - remainder))\n        raise ValueError(err)\n    return int((batch_size / num_gpus))", "docstring": "For multi-gpu, batch-size must be a multiple of the number of GPUs.\n\nNote that this should eventually be handled by DistributionStrategies\ndirectly. Multi-GPU support is currently experimental, however,\nso doing the work here until that feature is in place.\n\nArgs:\nbatch_size: Global batch size to be divided among devices. This should be\nequal to num_gpus times the single-GPU batch_size for multi-gpu training.\nnum_gpus: How many GPUs are used with DistributionStrategies.\n\nReturns:\nBatch size per device.\n\nRaises:\nValueError: if batch_size is not divisible by number of devices", "source": "codesearchnet"}
{"code": "def parse_addr(addr, *, proto=None, host=None):\n    port = None\n    if isinstance(addr, Address):\n        return addr\n    elif isinstance(addr, str):\n        if addr.startswith('http:\n            (proto, addr) = ('http', addr[7:])\n        if addr.startswith('udp:\n            (proto, addr) = ('udp', addr[6:])\n        elif addr.startswith('tcp:\n            (proto, addr) = ('tcp', addr[6:])\n        elif addr.startswith('unix:\n            (proto, addr) = ('unix', addr[7:])\n        (a, _, b) = addr.partition(':')\n        host = (a or host)\n        port = (b or port)\n    elif isinstance(addr, (tuple, list)):\n        (a, b) = addr\n        host = (a or host)\n        port = (b or port)\n    elif isinstance(addr, int):\n        port = addr\n    else:\n        raise ValueError('bad value')\n    if (port is not None):\n        port = int(port)\n    return Address(proto, host, port)", "docstring": "Parses an address\n\nReturns:\nAddress: the parsed address", "source": "codesearchnet"}
{"code": "def register_array_types_from_sources(self, source_files):\n    for fname in source_files:\n        if is_vhdl(fname):\n            self._register_array_types(self.extract_objects(fname))", "docstring": "Add array type definitions from a file list to internal registry\n\nArgs:\nsource_files (list of str): Files to parse for array definitions", "source": "codesearchnet"}
{"code": "def heightmap_dig_bezier(hm: np.ndarray, px: Tuple[(int, int, int, int)], py: Tuple[(int, int, int, int)], startRadius: float, startDepth: float, endRadius: float, endDepth: float) -> None:\n    lib.TCOD_heightmap_dig_bezier(_heightmap_cdata(hm), px, py, startRadius, startDepth, endRadius, endDepth)", "docstring": "Carve a path along a cubic Bezier curve.\n\nBoth radius and depth can vary linearly along the path.\n\nArgs:\nhm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.\npx (Sequence[int]): The 4 `x` coordinates of the Bezier curve.\npy (Sequence[int]): The 4 `y` coordinates of the Bezier curve.\nstartRadius (float): The starting radius size.\nstartDepth (float): The starting depth.\nendRadius (float): The ending radius size.\nendDepth (float): The ending depth.", "source": "codesearchnet"}
{"code": "def _do_refresh_request(self, http):\n        \n        body = self._generate_refresh_request_body()\n        headers = self._generate_refresh_request_headers()\n\n        logger.info('Refreshing access_token')\n        resp, content = transport.request(\n            http, self.token_uri, method='POST',\n            body=body, headers=headers)\n        content = _helpers._from_bytes(content)\n        if resp.status == http_client.OK:\n            d = json.loads(content)\n            self.token_response = d\n            self.access_token = d['access_token']\n            self.refresh_token = d.get('refresh_token', self.refresh_token)\n            if 'expires_in' in d:\n                delta = datetime.timedelta(seconds=int(d['expires_in']))\n                self.token_expiry = delta + _UTCNOW()\n            else:\n                self.token_expiry = None\n            if 'id_token' in d:\n                self.id_token = _extract_id_token(d['id_token'])\n                self.id_token_jwt = d['id_token']\n            else:\n                self.id_token = None\n                self.id_token_jwt = None\n            \n            \n            self.invalid = False\n            if self.store:\n                self.store.locked_put(self)\n        else:\n            \n            \n            logger.info('Failed to retrieve access token: %s', content)\n            error_msg = 'Invalid response {0}.'.format(resp.status)\n            try:\n                d = json.loads(content)\n                if 'error' in d:\n                    error_msg = d['error']\n                    if 'error_description' in d:\n                        error_msg += ': ' + d['error_description']\n                    self.invalid = True\n                    if self.store is not None:\n                        self.store.locked_put(self)\n            except (TypeError, ValueError):\n                pass\n            raise HttpAccessTokenRefreshError(error_msg, status=resp.status)", "docstring": "Refresh the access_token using the refresh_token.\n\nArgs:\nhttp: an object to be used to make HTTP requests.\n\nRaises:\nHttpAccessTokenRefreshError: When the refresh fails.", "source": "juraj-google-style"}
{"code": "def App(apptype, data_flow_kernel=None, walltime=60, cache=False, executors='all'):\n    from parsl.app.python import PythonApp\n    from parsl.app.bash import BashApp\n    logger.warning(\"The 'App' decorator will be deprecated in Parsl 0.8. Please use 'python_app' or 'bash_app' instead.\")\n    if (apptype == 'python'):\n        app_class = PythonApp\n    elif (apptype == 'bash'):\n        app_class = BashApp\n    else:\n        raise InvalidAppTypeError(\"Invalid apptype requested {}; must be 'python' or 'bash'\".format(apptype))\n\n    def wrapper(f):\n        return app_class(f, data_flow_kernel=data_flow_kernel, walltime=walltime, cache=cache, executors=executors)\n    return wrapper", "docstring": "The App decorator function.\n\nArgs:\n- apptype (string) : Apptype can be bash|python\n\nKwargs:\n- data_flow_kernel (DataFlowKernel): The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for\nmanaging this app. This can be omitted only\nafter calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`.\n- walltime (int) : Walltime for app in seconds,\ndefault=60\n- executors (str|list) : Labels of the executors that this app can execute over. Default is 'all'.\n- cache (Bool) : Enable caching of the app call\ndefault=False\n\nReturns:\nA PythonApp or BashApp object, which when called runs the apps through the executor.", "source": "codesearchnet"}
{"code": "def _GetParser(self):\n    parser = optparse.OptionParser()\n    parser.disable_interspersed_args()\n    parser.add_option('-m', '--map', action='append', type='string', dest='maps', help='map to operate on, can be supplied multiple times')\n    return parser", "docstring": "Initialize the argument parser for this command object.\n\nA default parser is initialized which supports common flags.  It\nis expected that Command subclasses extend this and add specific\nflags as needed.\n\nReturns:\nan optparse.OptionParser instance", "source": "github-repos"}
{"code": "def extend(self, key, values, *, section=DataStoreDocumentSection.Data):\n    key_notation = '.'.join([section, key])\n    if (not isinstance(values, list)):\n        return False\n    result = self._collection.update_one({'_id': ObjectId(self._workflow_id)}, {'$push': {key_notation: {'$each': self._encode_value(values)}}, '$currentDate': {'lastModified': True}})\n    return (result.modified_count == 1)", "docstring": "Extends a list in the data store with the elements of values.\n\nArgs:\nkey (str): The key pointing to the value that should be stored/updated.\nIt supports MongoDB's dot notation for nested fields.\nvalues (list): A list of the values that should be used to extend the list\nin the document.\nsection (DataStoreDocumentSection): The section from which the data should\nbe retrieved.\n\nReturns:\nbool: ``True`` if the list in the database could be extended,\notherwise ``False``.", "source": "codesearchnet"}
{"code": "def remove_droplets(self, droplet_ids):\n    return self.get_data(('load_balancers/%s/droplets/' % self.id), type=DELETE, params={'droplet_ids': droplet_ids})", "docstring": "Unassign a LoadBalancer.\n\nArgs:\ndroplet_ids (obj:`list` of `int`): A list of Droplet IDs", "source": "codesearchnet"}
{"code": "def get_and_check_tasks_for(context, task, msg_prefix=''):\n    \n    tasks_for = task['extra']['tasks_for']\n    if tasks_for not in context.config['valid_tasks_for']:\n        raise ValueError(\n            '{}Unknown tasks_for: {}'.format(msg_prefix, tasks_for)\n        )\n    return tasks_for", "docstring": "Given a parent task, return the reason the parent task was spawned.\n\n``.taskcluster.yml`` uses this to know whether to spawn an action,\ncron, or decision task definition.  ``tasks_for`` must be a valid one defined in the context.\n\nArgs:\ntask (dict): the task definition.\nmsg_prefix (str): the string prefix to use for an exception.\n\nRaises:\n(KeyError, ValueError): on failure to find a valid ``tasks_for``.\n\nReturns:\nstr: the ``tasks_for``", "source": "juraj-google-style"}
{"code": "def _clean_out_of_range_indices(labels, num_classes):\n\n    def _labels_is_sparse():\n        \n        return isinstance(labels, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue))\n\n    def _clean_out_of_range(values):\n        \n        return array_ops.where_v2(math_ops.greater_equal(values, num_classes), -1 * array_ops.ones_like(values), values)\n\n    def _clean_labels_out_of_range():\n        \n        if _labels_is_sparse():\n            return type(labels)(indices=labels.indices, values=_clean_out_of_range(labels.values), dense_shape=labels.dense_shape)\n        else:\n            return _clean_out_of_range(labels)\n    max_labels = math_ops.reduce_max(labels.values if _labels_is_sparse() else labels)\n    return cond.cond(math_ops.greater_equal(max_labels, num_classes), _clean_labels_out_of_range, lambda: labels)", "docstring": "Replaces large out-of-range labels by small out-of-range labels.\n\nReplaces any value in `labels` that is greater or equal to `num_classes` by\n-1. Do this conditionally for efficiency in case there are no such values.\n\nArgs:\nlabels: `int64` `Tensor` or `SparseTensor`.\nnum_classes: `int64` scalar `Tensor`.\nReturns:\nAn `int64` `Tensor` or `SparseTensor` as `labels` with indices greater\nor equal to num_classes replaced by -1.", "source": "github-repos"}
{"code": "def furnish(app: web.Application):\n    app_name = app['config']['name']\n    prefix = ('/' + app_name.lstrip('/'))\n    app.router.add_routes(routes)\n    cors_middleware.enable_cors(app)\n    known_resources = set()\n    for route in list(app.router.routes()):\n        if (route.resource in known_resources):\n            continue\n        known_resources.add(route.resource)\n        route.resource.add_prefix(prefix)\n    aiohttp_swagger.setup_swagger(app, swagger_url=(prefix + '/api/doc'), description='', title=f'Brewblox Service \"{app_name}\"', api_version='0.0', contact='development@brewpi.com')\n    LOGGER.info(('Service info: ' + getenv('SERVICE_INFO', 'UNKNOWN')))\n    for route in app.router.routes():\n        LOGGER.info(f'Endpoint [{route.method}] {route.resource}')\n    for (name, impl) in app.get(features.FEATURES_KEY, {}).items():\n        LOGGER.info(f'Feature [{name}] {impl}')", "docstring": "Configures Application routes, readying it for running.\n\nThis function modifies routes and resources that were added by calling code,\nand must be called immediately prior to `run(app)`.\n\nArgs:\napp (web.Application):\nThe Aiohttp Application as created by `create_app()`", "source": "codesearchnet"}
{"code": "def __update_time__(self, filename, **kwargs):\n        \n        conn = self.__get_conn__(**kwargs)\n        load_time = XsdDatetime(datetime.datetime.utcnow())\n        conn.update_query(.format(file=filename,\n                             ctime=load_time.sparql,\n                             graph=\"kdr:load_times\"),\n                **kwargs)\n        self.loaded_times[filename] = load_time", "docstring": "updated the mod time for a file saved to the definition_store\n\nArgs:\nfilename: the name of the file", "source": "juraj-google-style"}
{"code": "def detect_encoding(data, encoding=None, fallback='latin1', is_html=False):\n    if encoding:\n        encoding = normalize_codec_name(encoding)\n    bs4_detector = EncodingDetector(data, override_encodings=((encoding,) if encoding else ()), is_html=is_html)\n    candidates = itertools.chain(bs4_detector.encodings, (fallback,))\n    for candidate in candidates:\n        if (not candidate):\n            continue\n        candidate = normalize_codec_name(candidate)\n        if (not candidate):\n            continue\n        if ((candidate == 'ascii') and (fallback != 'ascii')):\n            continue\n        if try_decoding(data, candidate):\n            return candidate\n    raise ValueError('Unable to detect encoding.')", "docstring": "Detect the character encoding of the data.\n\nReturns:\nstr: The name of the codec\n\nRaises:\nValueError: The codec could not be detected. This error can only\noccur if fallback is not a \"lossless\" codec.", "source": "codesearchnet"}
{"code": "def patch_f90_compiler(f90_compiler):\n    from numpy.distutils.fcompiler import gnu\n    if (not isinstance(f90_compiler, gnu.Gnu95FCompiler)):\n        return False\n    f90_compiler.compiler_f77[:] = _update_flags(f90_compiler.compiler_f77, remove_flags=('-Werror',))\n    f90_compiler.compiler_f90[:] = _update_flags(f90_compiler.compiler_f90)", "docstring": "Patch up ``f90_compiler``.\n\nFor now, only updates the flags for ``gfortran``. In this case, it add\nany of ``GFORTRAN_SHARED_FLAGS`` that are missing. In debug mode, it also\nadds any flags in ``GFORTRAN_DEBUG_FLAGS`` and makes sure none of the flags\nin ``GFORTRAN_OPTIMIZE_FLAGS`` are present. In standard mode (\"OPTIMIZE\"),\nmakes sure flags in ``GFORTRAN_OPTIMIZE_FLAGS`` are present and flags in\n``GFORTRAN_DEBUG_FLAGS`` are not.\n\nArgs:\nf90_compiler (numpy.distutils.fcompiler.FCompiler): A Fortran compiler\ninstance.", "source": "codesearchnet"}
{"code": "def AddMonths(start_date, months):\n    current_date = start_date\n    i = 0\n    while (i < months):\n        month_days = calendar.monthrange(current_date.year, current_date.month)[1]\n        current_date += timedelta(days=month_days)\n        i += 1\n    return current_date", "docstring": "A simple convenience utility for adding months to a given start date.\n\nThis increments the months by adding the number of days in the current month\nto the current month, for each month.\n\nArgs:\nstart_date: date The date months are being added to.\nmonths: int The number of months to add.\n\nReturns:\nA date equal to the start date incremented by the given number of months.", "source": "codesearchnet"}
{"code": "class Concatenate(Merge):\n\n    def __init__(self, axis=-1, **kwargs):\n        super().__init__(**kwargs)\n        self.axis = axis\n        self.supports_masking = True\n        self._reshape_required = False\n\n    def build(self, input_shape):\n        if len(input_shape) < 1 or not isinstance(input_shape[0], (tuple, list)):\n            raise ValueError(f'A `Concatenate` layer should be called on a list of at least 1 input. Received: input_shape={input_shape}')\n        if all((shape is None for shape in input_shape)):\n            return\n        reduced_inputs_shapes = [list(shape) for shape in input_shape]\n        reduced_inputs_shapes_copy = copy.copy(reduced_inputs_shapes)\n        shape_set = set()\n        for i in range(len(reduced_inputs_shapes_copy)):\n            concat_axis = self.axis % len(reduced_inputs_shapes_copy[i])\n            for axis, axis_value in enumerate(reduced_inputs_shapes_copy, start=1):\n                if axis != concat_axis and axis_value == 1:\n                    del reduced_inputs_shapes[i][axis]\n            if len(reduced_inputs_shapes[i]) > self.axis:\n                del reduced_inputs_shapes[i][self.axis]\n            shape_set.add(tuple(reduced_inputs_shapes[i]))\n        if len(shape_set) != 1:\n            err_msg = f'A `Concatenate` layer requires inputs with matching shapes except for the concatenation axis. Received: input_shape={input_shape}'\n            ranks = set((len(shape) for shape in shape_set))\n            if len(ranks) != 1:\n                raise ValueError(err_msg)\n            rank, = ranks\n            for axis in range(rank):\n                unique_dims = set((shape[axis] for shape in shape_set if shape[axis] is not None))\n                if len(unique_dims) > 1:\n                    raise ValueError(err_msg)\n\n    def _merge_function(self, inputs):\n        return ops.concatenate(inputs, axis=self.axis)\n\n    def compute_output_shape(self, input_shape):\n        if not isinstance(input_shape, (tuple, list)) or not isinstance(input_shape[0], (tuple, list)):\n            raise ValueError(f'A `Concatenate` layer should be called on a list of inputs. Received: input_shape={input_shape}')\n        input_shapes = input_shape\n        output_shape = list(input_shapes[0])\n        for shape in input_shapes[1:]:\n            if output_shape[self.axis] is None or shape[self.axis] is None:\n                output_shape[self.axis] = None\n                break\n            output_shape[self.axis] += shape[self.axis]\n        return tuple(output_shape)\n\n    def compute_mask(self, inputs, mask=None):\n        if mask is None:\n            return None\n        if not isinstance(mask, (tuple, list)):\n            raise ValueError(f'`mask` should be a list. Received mask={mask}')\n        if not isinstance(inputs, (tuple, list)):\n            raise ValueError(f'`inputs` should be a list. Received: inputs={inputs}')\n        if len(mask) != len(inputs):\n            raise ValueError(f'The lists `inputs` and `mask` should have the same length. Received: inputs={inputs} of length {len(inputs)}, and mask={mask} of length {len(mask)}')\n        if all((m is None for m in mask)):\n            return None\n        masks = []\n        for input_i, mask_i in zip(inputs, mask):\n            if mask_i is None:\n                masks.append(ops.ones_like(input_i, dtype='bool'))\n            elif mask_i.ndim < input_i.ndim:\n                masks.append(ops.broadcast_to(ops.expand_dims(mask_i, axis=-1), ops.shape(input_i)))\n            else:\n                masks.append(mask_i)\n        concatenated = ops.concatenate(masks, axis=self.axis)\n        return ops.any(concatenated, axis=-1, keepdims=False)\n\n    def get_config(self):\n        config = {'axis': self.axis}\n        base_config = super().get_config()\n        return dict(list(base_config.items()) + list(config.items()))", "docstring": "Concatenates a list of inputs.\n\nIt takes as input a list of tensors, all of the same shape except\nfor the concatenation axis, and returns a single tensor that is the\nconcatenation of all inputs.\n\nExamples:\n\n>>> x = np.arange(20).reshape(2, 2, 5)\n>>> y = np.arange(20, 30).reshape(2, 1, 5)\n>>> keras.layers.Concatenate(axis=1)([x, y])\n\nUsage in a Keras model:\n\n>>> x1 = keras.layers.Dense(8)(np.arange(10).reshape(5, 2))\n>>> x2 = keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))\n>>> y = keras.layers.Concatenate()([x1, x2])\n\nArgs:\naxis: Axis along which to concatenate.\n**kwargs: Standard layer keyword arguments.\n\nReturns:\nA tensor, the concatenation of the inputs alongside axis `axis`.", "source": "github-repos"}
{"code": "def convert_acquire(self, shift, instruction):\n        \n        meas_level = self._run_config.get('meas_level', 2)\n\n        command_dict = {\n            'name': 'acquire',\n            't0': shift+instruction.start_time,\n            'duration': instruction.duration,\n            'qubits': [q.index for q in instruction.acquires],\n            'memory_slot': [m.index for m in instruction.mem_slots]\n        }\n        if meas_level == 2:\n            \n            if instruction.command.discriminator:\n                command_dict.update({\n                    'discriminators': [\n                        QobjMeasurementOption(\n                            name=instruction.command.discriminator.name,\n                            params=instruction.command.discriminator.params)\n                    ]\n                })\n            \n            command_dict.update({\n                'register_slot': [regs.index for regs in instruction.reg_slots]\n            })\n        if meas_level >= 1:\n            \n            if instruction.command.kernel:\n                command_dict.update({\n                    'kernels': [\n                        QobjMeasurementOption(\n                            name=instruction.command.kernel.name,\n                            params=instruction.command.kernel.params)\n                    ]\n                })\n        return self._qobj_model(**command_dict)", "docstring": "Return converted `AcquireInstruction`.\n\nArgs:\nshift(int): Offset time.\ninstruction (AcquireInstruction): acquire instruction.\nReturns:\ndict: Dictionary of required parameters.", "source": "juraj-google-style"}
{"code": "def getattr_sdk(attr, name):\n    \n    if inspect.isroutine(attr):\n        if hasattr(attr, '_sdkmeta'):\n            return attr\n    raise AttributeError(name)", "docstring": "Filter SDK attributes\n\nArgs:\nattr(attribute): Attribute as returned by :func:`getattr`.\nname(str): Attribute name.\n\nReturns:\n`attr` if passed.", "source": "juraj-google-style"}
{"code": "def sackin(self, normalize='leaves'):\n        \n        num_nodes_from_root = dict(); sackin = 0; num_leaves = 0\n        for node in self.traverse_preorder():\n            num_nodes_from_root[node] = 1\n            if not node.is_root():\n                num_nodes_from_root[node] += num_nodes_from_root[node.parent]\n            if node.is_leaf():\n                num_nodes_from_root[node] -= 1; sackin += num_nodes_from_root[node]; num_leaves += 1\n        if normalize is None or normalize is False:\n            return sackin\n        elif not isinstance(normalize,str):\n            raise TypeError(\"normalize must be None or a string\")\n        normalize = normalize.lower()\n        if normalize == 'leaves':\n            return float(sackin)/num_leaves\n        elif normalize == 'yule':\n            x = sum(1./i for i in range(2, num_leaves+1))\n            return (sackin - (2*num_leaves*x)) / num_leaves\n        elif normalize == 'pda':\n            return sackin/(num_leaves**1.5)\n        else:\n            raise RuntimeError(\"normalize must be None, 'leaves', 'yule', or 'pda'\")", "docstring": "Compute the Sackin balance index of this ``Tree``\n\nArgs:\n``normalize`` (``str``): How to normalize the Sackin index (if at all)\n\n* ``None`` to not normalize\n\n* ``\"leaves\"`` to normalize by the number of leaves\n\n* ``\"yule\"`` to normalize to the Yule model\n\n* ``\"pda\"`` to normalize to the Proportional to Distinguishable Arrangements model\n\nReturns:\n``float``: Sackin index (either normalized or not)", "source": "juraj-google-style"}
{"code": "def create_elb_dns(self, regionspecific=False):\n    if regionspecific:\n        dns_elb = self.generated.dns()['elb_region']\n    else:\n        dns_elb = self.generated.dns()['elb']\n    dns_elb_aws = find_elb(name=self.app_name, env=self.env, region=self.region)\n    zone_ids = get_dns_zone_ids(env=self.env, facing=self.elb_subnet)\n    self.log.info('Updating Application URL: %s', dns_elb)\n    dns_kwargs = {'dns_name': dns_elb, 'dns_name_aws': dns_elb_aws, 'dns_ttl': self.dns_ttl}\n    for zone_id in zone_ids:\n        self.log.debug('zone_id: %s', zone_id)\n        update_dns_zone_record(self.env, zone_id, **dns_kwargs)\n    return dns_elb", "docstring": "Create dns entries in route53.\n\nArgs:\nregionspecific (bool): The DNS entry should have region on it\nReturns:\nstr: Auto-generated DNS name for the Elastic Load Balancer.", "source": "codesearchnet"}
{"code": "def insert(self, lines=None):\n    for (i, (key, line)) in enumerate(lines.items()):\n        n = (key + i)\n        first_half = self._lines[:n]\n        last_half = self._lines[n:]\n        self._lines = ((first_half + [line]) + last_half)", "docstring": "Insert lines into the editor.\n\nNote:\nTo insert before the first line, use :func:`~exa.core.editor.Editor.preappend`\n(or key 0); to insert after the last line use :func:`~exa.core.editor.Editor.append`.\n\nArgs:\nlines (dict): Dictionary of lines of form (lineno, string) pairs", "source": "codesearchnet"}
{"code": "def get_overlay_gateway(self):\n        \n        urn = \"urn:brocade.com:mgmt:brocade-tunnels\"\n        config = ET.Element(\"config\")\n        ET.SubElement(config, \"overlay-gateway\", xmlns=urn)\n        output = self._callback(config, handler='get_config')\n        result = {}\n        element = ET.fromstring(str(output))\n        for overlayGw in element.iter('{%s}overlay-gateway' % urn):\n            result['name'] = overlayGw.find('{%s}name' % urn).text\n            isactivate = overlayGw.find('{%s}activate' % urn)\n            if isactivate is None:\n                result['activate'] = False\n            else:\n                result['activate'] = True\n\n            gwtype = overlayGw.find('{%s}gw-type' % urn)\n            if gwtype is None:\n                result['gwtype'] = None\n            else:\n                result['gwtype'] = gwtype.text\n\n            attach = overlayGw.find('{%s}attach' % urn)\n            if attach is not None:\n                rbridgeId = attach.find('{%s}rbridge-id' % urn)\n                if rbridgeId is None:\n                    result['attached-rbridgeId'] = None\n                else:\n                    result['attached-rbridgeId'] = rbridgeId.find('{%s}rb-add' % urn).text\n                result['attached-vlan'] = None\n                vlans = []\n                for vlan in attach.iter('{%s}vlan'%urn):\n                    vlans.append(vlan.find('{%s}vid' % urn).text)\n                result['attached-vlan'] = vlans\n\n        return result", "docstring": "Get overlay-gateway name on the switch\nArgs:\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nDictionary containing details of VXLAN Overlay Gateway.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def position_at_fraction(self, fraction):\n    raise NotImplementedError", "docstring": "Returns the position at the given fraction.\n\nGiven a fraction within the range [0.0, 1.0) this method will return the\nposition at the given fraction compared to the position range\n[self.start_position, self.stop_position).\n\n** Thread safety **\n\nMethods of the class ``RangeTracker`` including this method may get invoked\nby different threads, hence must be made thread-safe, e.g. by using a single\nlock object.\n\nArgs:\nfraction: a float value within the range [0.0, 1.0).\nReturns:\na position within the range [self.start_position, self.stop_position).", "source": "github-repos"}
{"code": "def to_routing_header(params):\n    \n    if sys.version_info[0] < 3:\n        \n        return urlencode(params).replace(\"%2F\", \"/\")\n    return urlencode(\n        params,\n        \n        safe=\"/\",\n    )", "docstring": "Returns a routing header string for the given request parameters.\n\nArgs:\nparams (Mapping[str, Any]): A dictionary containing the request\nparameters used for routing.\n\nReturns:\nstr: The routing header string.", "source": "juraj-google-style"}
{"code": "def __init__( self, jumps ):\n        \n        self.jumps = jumps\n        self.p = np.array( [ jump.relative_probability for jump in self.jumps ] )", "docstring": "Initialise a Transitions object.\n\nArgs:\njumps (List(Jump)): List of jumps to be contained in this Transitions object.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def delete_container_instance_group(access_token, subscription_id, resource_group, container_group_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '?api-version=', CONTAINER_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete a container group from a resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\ncontainer_group_name (str): Name of container instance group.\n\nReturns:\nHTTP response.", "source": "codesearchnet"}
{"code": "def db_wb010(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `db_wb010`'.format(value))\n    self._db_wb010 = value", "docstring": "Corresponds to IDD Field `db_wb010`\nmean coincident dry-bulb temperature to\nWet-bulb temperature corresponding to 1.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `db_wb010`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "class FineGrainedFP8Config(QuantizationConfigMixin):\n\n    def __init__(self, activation_scheme: str='dynamic', weight_block_size: Tuple[int, int]=(128, 128), modules_to_not_convert: Optional[List]=None, **kwargs):\n        self.quant_method = QuantizationMethod.FP8\n        self.modules_to_not_convert = modules_to_not_convert\n        self.activation_scheme = activation_scheme\n        self.weight_block_size = weight_block_size\n        self.post_init()\n\n    def post_init(self):\n        \n        self.activation_scheme = self.activation_scheme.lower()\n        if self.activation_scheme not in ['dynamic']:\n            raise ValueError(f'Activation scheme {self.activation_scheme} not supported')\n        if len(self.weight_block_size) != 2:\n            raise ValueError('weight_block_size must be a tuple of two integers')\n        if self.weight_block_size[0] <= 0 or self.weight_block_size[1] <= 0:\n            raise ValueError('weight_block_size must be a tuple of two positive integers')", "docstring": "FineGrainedFP8Config is a configuration class for fine-grained FP8 quantization used mainly for deepseek models.\n\nArgs:\nactivation_scheme (`str`, *optional*, defaults to `\"dynamic\"`):\nThe scheme used for activation, the defaults and only support scheme for now is \"dynamic\".\nweight_block_size (`typing.Tuple[int, int]`, *optional*, defaults to `(128, 128)`):\nThe size of the weight blocks for quantization, default is (128, 128).\nmodules_to_not_convert (`list`, *optional*):\nA list of module names that should not be converted during quantization.", "source": "github-repos"}
{"code": "def old_collective_correlation( self ):\n        \n        if self.has_run:\n            return self.atoms.collective_dr_squared() / float( self.number_of_jumps )\n        else:\n            return None", "docstring": "Returns the collective correlation factor, f_I\n\nArgs:\nNone\n\nReturns:\n(Float): The collective correlation factor, f_I.\n\nNotes:\nThis function assumes that the jump distance between sites has\nbeen normalised to a=1. If the jumps distance is not equal to 1\nthen the value returned by this function should be divided by a^2.\nEven better, use self.collective_correlation", "source": "juraj-google-style"}
{"code": "def validate_all_values_for_key_in_obj(obj, key, validation_fun):\n    \n    for vkey, value in obj.items():\n        if vkey == key:\n            validation_fun(value)\n        elif isinstance(value, dict):\n            validate_all_values_for_key_in_obj(value, key, validation_fun)\n        elif isinstance(value, list):\n            validate_all_values_for_key_in_list(value, key, validation_fun)", "docstring": "Validate value for all (nested) occurrence  of `key` in `obj`\nusing `validation_fun`.\n\nArgs:\nobj (dict): dictionary object.\nkey (str): key whose value is to be validated.\nvalidation_fun (function): function used to validate the value\nof `key`.\n\nRaises:\nValidationError: `validation_fun` will raise this error on failure", "source": "juraj-google-style"}
{"code": "def validate_element(self, value):\n        \n        if not isinstance(value, self.type):\n\n            \n            if isinstance(value, six.integer_types) and self.type == float:\n                return float(value)\n\n            if value is None:\n                if self.required:\n                    raise ValidationError('Required field is missing')\n            else:\n                try:\n                    name = self.name\n                except AttributeError:\n                    raise ValidationError('Expected type %s for %s, '\n                                          'found %s (type %s)' %\n                                          (self.type, self.__class__.__name__,\n                                           value, type(value)))\n                else:\n                    raise ValidationError(\n                        'Expected type %s for field %s, found %s (type %s)' %\n                        (self.type, name, value, type(value)))\n        return value", "docstring": "Validate single element of field.\n\nThis is different from validate in that it is used on individual\nvalues of repeated fields.\n\nArgs:\nvalue: Value to validate.\n\nReturns:\nThe value casted in the expected type.\n\nRaises:\nValidationError if value is not expected type.", "source": "juraj-google-style"}
{"code": "def _validate_device_existence(serials):\n    valid_ad_identifiers = list_adb_devices() + list_adb_devices_by_usb_id() + list_fastboot_devices()\n    for serial in serials:\n        if serial not in valid_ad_identifiers:\n            raise Error(f'Android device serial \"{serial}\" is specified in config but is not reachable.')", "docstring": "Validate that all the devices specified by the configs can be reached.\n\nArgs:\nserials: list of strings, the serials of all the devices that are expected\nto exist.", "source": "github-repos"}
{"code": "def merge_call(self, merge_fn, args=(), kwargs=None):\n    require_replica_context(self)\n    if kwargs is None:\n        kwargs = {}\n    merge_fn = autograph.tf_convert(merge_fn, autograph_ctx.control_status_ctx(), convert_by_default=False)\n    return self._merge_call(merge_fn, args, kwargs)", "docstring": "Merge args across replicas and run `merge_fn` in a cross-replica context.\n\nThis allows communication and coordination when there are multiple calls\nto the step_fn triggered by a call to `strategy.run(step_fn, ...)`.\n\nSee `tf.distribute.Strategy.run` for an explanation.\n\nIf not inside a distributed scope, this is equivalent to:\n\n```\nstrategy = tf.distribute.get_strategy()\nwith cross-replica-context(strategy):\nreturn merge_fn(strategy, *args, **kwargs)\n```\n\nArgs:\nmerge_fn: Function that joins arguments from threads that are given as\nPerReplica. It accepts `tf.distribute.Strategy` object as\nthe first argument.\nargs: List or tuple with positional per-thread arguments for `merge_fn`.\nkwargs: Dict with keyword per-thread arguments for `merge_fn`.\n\nReturns:\nThe return value of `merge_fn`, except for `PerReplica` values which are\nunpacked.", "source": "github-repos"}
{"code": "def apply_product_config(config):\n    \n    cot_product = config['cot_product']\n\n    for key in config:\n        if isinstance(config[key], Mapping) and 'by-cot-product' in config[key]:\n            try:\n                config[key] = config[key]['by-cot-product'][cot_product]\n            except KeyError:\n                raise ConfigError(\"Product {} not specified for key {}\".format(cot_product, key))\n\n    return config", "docstring": "Apply config values that are keyed by `cot_product`.\n\nThis modifies the passed in configuration.\n\nArgs:\nconfig dict: the config to apply cot_product keying too\n\nReturns: dict", "source": "juraj-google-style"}
{"code": "def get_counters(self):\n    with self._lock:\n        return self.counters.values()", "docstring": "Returns the current set of counters.\n\nReturns:\nAn iterable that contains the current set of counters. To make sure that\nmultiple threads can iterate over the set of counters, we return a new\niterable here. Note that the actual set of counters may get modified after\nthis method returns hence the returned iterable may be stale.", "source": "github-repos"}
{"code": "def energy(self, spins, break_aux_symmetry=True):\n    subtheta = self.theta.copy()\n    subtheta.fix_variables(spins)\n    av = next(self._auxvar_counter)\n    auxvars = {v: Symbol('aux{}_{}'.format(av, v), BOOL) for v in subtheta.linear}\n    if (break_aux_symmetry and (av == 0)):\n        self.assertions.update(set(auxvars.values()))\n    trees = self._trees\n    if (not trees):\n        assert ((not subtheta.linear) and (not subtheta.quadratic))\n        return subtheta.offset\n    energy = Plus(self.message(trees, {}, subtheta, auxvars), subtheta.offset)\n    return energy", "docstring": "A formula for the exact energy of Theta with spins fixed.\n\nArgs:\nspins (dict): Spin values for a subset of the variables in Theta.\nbreak_aux_symmetry (bool, optional): Default True. If True, break\nthe aux variable symmetry by setting all aux variable to 1\nfor one of the feasible configurations. If the energy ranges\nare not symmetric then this can make finding models impossible.\n\nReturns:\nFormula for the exact energy of Theta with spins fixed.", "source": "codesearchnet"}
{"code": "def __init__(self, tcex, domain, data_type, mapping=None):\n        \n        self.tcex = tcex\n        self.domain = domain\n        self.data_type = data_type\n        self.mapping = mapping or {'dynamic': False}\n\n        \n\n        \n        if self.tcex.default_args.tc_token is None:\n            raise RuntimeError(\n                'The DataModel TcEx Module requires a Token to interact with the '\n                'ThreatConnect platform.'\n            )\n\n        self._create_index()  \n        self._update_mappings()", "docstring": "Initialize class properties.\n\nArgs:\ntcex ([type]): [description]\ndomain (str): A value of “system”, “organization”, or “local”.\ndata_type (str): A free form type name for the data.\nmapping (dict, optional): Defaults to None. Elasticsearch mappings data.\n\nRaises:\nRuntimeError: [description]", "source": "juraj-google-style"}
{"code": "def has_no_current_path(self, path, **kwargs):\n        \n\n        try:\n            return self.assert_no_current_path(path, **kwargs)\n        except ExpectationNotMet:\n            return False", "docstring": "Checks if the page doesn't have the given path.\n\nArgs:\npath (str | RegexObject): The string or regex that the current \"path\" should match.\n**kwargs: Arbitrary keyword arguments for :class:`CurrentPathQuery`.\n\nReturns:\nbool: Whether it doesn't match.", "source": "juraj-google-style"}
{"code": "def add_init_container(self,\n                           name,\n                           image,\n                           security_context,\n                           init_environment,\n                           volume_mounts\n                           ):\n        \n        self.init_containers.append(\n            {\n                'name': name,\n                'image': image,\n                'securityContext': security_context,\n                'env': init_environment,\n                'volumeMounts': volume_mounts\n            }\n        )", "docstring": "Adds an init container to the launched pod. useful for pre-\n\nArgs:\nname (str):\nimage (str):\nsecurity_context (dict):\ninit_environment (dict):\nvolume_mounts (dict):\n\nReturns:", "source": "juraj-google-style"}
{"code": "def remove_listener(self, event, listener):\n    with contextlib.suppress(ValueError):\n        self._listeners[event].remove(listener)\n        return True\n    with contextlib.suppress(ValueError):\n        self._once[event].remove(listener)\n        return True\n    return False", "docstring": "Remove a listener from the emitter.\n\nArgs:\nevent (str): The event name on which the listener is bound.\nlistener: A reference to the same object given to add_listener.\n\nReturns:\nbool: True if a listener was removed else False.\n\nThis method only removes one listener at a time. If a listener is\nattached multiple times then this method must be called repeatedly.\nAdditionally, this method removes listeners first from the those\nregistered with 'on' or 'add_listener'. If none are found it continue\nto remove afterwards from those added with 'once'.", "source": "codesearchnet"}
{"code": "def project_and_occlude_texture(texture, surface, angle=DEFAULT_ANGLE):\n    \n    projected_surface = project_surface(surface, angle)\n    projected_surface = _remove_hidden_parts(projected_surface)\n    texture_y = map_texture_to_surface(texture, projected_surface)\n    texture_x, _ = texture\n    return texture_x, texture_y", "docstring": "Projects a texture onto a surface with occluded areas removed.\n\nArgs:\ntexture (texture): the texture to map to the projected surface\nsurface (surface): the surface to project\nangle (float): the angle to project at, in degrees (0 = overhead, 90 = side view)\n\nReturns:\nlayer: A layer.", "source": "juraj-google-style"}
{"code": "def poke_native(getstate):\n\n    def poke(service, objname, obj, container, visited=None, _stack=None):\n        service.pokeNative(objname, getstate(obj), container)\n    return poke", "docstring": "Serializer factory for types which state can be natively serialized.\n\nArguments:\n\ngetstate (callable): takes an object and returns the object's state\nto be passed to `pokeNative`.\n\nReturns:\n\ncallable: serializer (`poke` routine).", "source": "codesearchnet"}
{"code": "def event(self, name, owner=None, **kwargs):\n    return Event(self.tcex, name, owner=owner, **kwargs)", "docstring": "Create the Event TI object.\n\nArgs:\nname:\n**kwargs:\n\nReturn:", "source": "codesearchnet"}
{"code": "def to_gpx(self):\n    gpx_segments = []\n    for segment in self.segments:\n        gpx_points = []\n        for point in segment.points:\n            time = ''\n            if point.time:\n                iso_time = point.time.isoformat().split('.')[0]\n                time = ('<time>%s</time>' % iso_time)\n            gpx_points.append((u'<trkpt lat=\"%f\" lon=\"%f\">%s</trkpt>' % (point.lat, point.lon, time)))\n        points = u'\\n\\t\\t\\t'.join(gpx_points)\n        gpx_segments.append((u'\\t\\t<trkseg>\\n\\t\\t\\t%s\\n\\t\\t</trkseg>' % points))\n    segments = u'\\t\\n'.join(gpx_segments)\n    content = [u'<?xml version=\"1.0\" encoding=\"UTF-8\"?>', u'<gpx xmlns:xsi=\"http:\n    return u'\\n'.join(content)", "docstring": "Converts track to a GPX format\n\nUses GPXPY library as an intermediate format\n\nReturns:\nA string with the GPX/XML track", "source": "codesearchnet"}
{"code": "def load_pickled_model(filename, dirname=None):\n    if (dirname is None):\n        pkg_filename = pkgutil.get_loader('dragnet').get_filename('dragnet')\n        pkg_dirname = os.path.dirname(pkg_filename)\n        dirname = os.path.join(pkg_dirname, 'pickled_models', model_path)\n    filepath = os.path.join(dirname, filename)\n    return joblib.load(filepath)", "docstring": "Load a pickled ``Extractor`` model from disk.\n\nArgs:\nfilename (str): Name of pickled model file under ``dirname``.\ndirname (str): Name of directory on disk containing the pickled model.\nIf None, dragnet's default pickled model directory is used:\n/path/to/dragnet/pickled_models/[PY_VERSION]_[SKLEARN_VERSION]\n\nReturns:\n:class:`dragnet.extractor.Extractor`", "source": "codesearchnet"}
{"code": "def get_percentage(a, b, i=False, r=False):\n    if ((i is False) and (r is True)):\n        percentage = round((100.0 * (float(a) / b)), 2)\n    elif (((i is True) and (r is True)) or ((i is True) and (r is False))):\n        percentage = int(round((100 * (float(a) / b))))\n        if (r is False):\n            warnings.warn('If integer is set to True and Round is set to False, you will still get a rounded number if you pass floating point numbers as arguments.')\n    else:\n        percentage = (100.0 * (float(a) / b))\n    return percentage", "docstring": "Finds the percentage of one number over another.\n\nArgs:\na: The number that is a percent, int or float.\n\nb: The base number that a is a percent of, int or float.\n\ni: Optional boolean integer. True if the user wants the result returned as\na whole number. Assumes False.\n\nr: Optional boolean round. True if the user wants the result rounded.\nRounds to the second decimal point on floating point numbers. Assumes False.\n\nReturns:\nThe argument a as a percentage of b. Throws a warning if integer is set to True\nand round is set to False.", "source": "codesearchnet"}
{"code": "def _indicator(self, indicator_data):\n    if isinstance(indicator_data, dict):\n        xid = indicator_data.get('xid')\n    else:\n        xid = indicator_data.xid\n    if (self.indicators.get(xid) is not None):\n        indicator_data = self.indicators.get(xid)\n    elif (self.indicators_shelf.get(xid) is not None):\n        indicator_data = self.indicators_shelf.get(xid)\n    else:\n        self.indicators[xid] = indicator_data\n    return indicator_data", "docstring": "Return previously stored indicator or new indicator.\n\nArgs:\nindicator_data (dict|obj): An Indicator dict or instance of Indicator object.\n\nReturns:\ndict|obj: The new Indicator dict/object or the previously stored dict/object.", "source": "codesearchnet"}
{"code": "def crowding_distance_sort(frontier: List[pg.DNA]) -> List[pg.DNA]:\n    if len(frontier) <= 1:\n        return frontier\n    individual_num = len(frontier)\n    objective_num = len(base.get_fitness(frontier[0]))\n    distances = [0.0] * individual_num\n    dist = [list(range(individual_num)) for i in range(objective_num)]\n    for i in range(objective_num):\n        dist[i] = sorted(dist[i], key=lambda idx: base.get_fitness(frontier[idx])[i])\n        max_value = base.get_fitness(frontier[dist[i][individual_num - 1]])[i]\n        min_value = base.get_fitness(frontier[dist[i][0]])[i]\n        for j in range(individual_num):\n            if j == 0 or j == individual_num - 1:\n                distances[dist[i][j]] = objective_num\n            elif max_value > min_value:\n                distances[dist[i][j]] += (base.get_fitness(frontier[dist[i][j + 1]])[i] - base.get_fitness(frontier[dist[i][j - 1]])[i]) / (max_value - min_value)\n    idx_arr = list(range(individual_num))\n    idx_arr = sorted(idx_arr, key=lambda idx: distances[idx], reverse=True)\n    return [frontier[idx_arr[i]] for i in range(individual_num)]", "docstring": "Algorithm crowding-distance-assignment implementation.\n\nCheck section III B in the original paper.\n\nArgs:\nfrontier: A list of Individual that need to be sorted.\n\nReturns:\nsorted list of the original list.", "source": "github-repos"}
{"code": "def set_icon_file(self, filename, rel=\"icon\"):\n        \n        mimetype, encoding = mimetypes.guess_type(filename)\n        self.add_child(\"favicon\", '<link rel=\"%s\" href=\"%s\" type=\"%s\" />'%(rel, filename, mimetype))", "docstring": "Allows to define an icon for the App\n\nArgs:\nfilename (str): the resource file name (ie. \"/res:myicon.png\")\nrel (str): leave it unchanged (standard \"icon\")", "source": "juraj-google-style"}
{"code": "def add_cmd_handler(self, handler_obj):\n    for field in dir(handler_obj):\n        if field.startswith('cmd_'):\n            cmd = field[4:]\n            fn = getattr(handler_obj, field)\n            if (cmd in self.cmds):\n                print('Replacing {} with {}'.format(_handler_name(self.cmds[cmd]), _handler_name(fn)), file=sys.stderr)\n            self.cmds[cmd] = fn", "docstring": "Registers a new command handler object.\n\nAll methods on `handler_obj` whose name starts with \"cmd_\" are\nregistered as a GTP command. For example, the method cmd_genmove will\nbe invoked when the engine receives a genmove command.\n\nArgs:\nhandler_obj: the handler object to register.", "source": "codesearchnet"}
{"code": "def __getitem__(self, key):\n        \n        \n        if isinstance(key, str):\n            if key not in self.columns:\n                raise AttributeError('Key not in columns.')\n            return [row[key] if key in row else None for row in self.rows]\n        \n        elif isinstance(key, (int, slice)):\n            return self.rows[key]\n        else:\n            raise TypeError('Invalid argument type.')", "docstring": "Get a column or row from the dataset.\n\nArgs:\nkey (str or int): String referencing a column or integer referencing a row\nReturns:\n:class:`list` or :class:`dict`: List of column values or a dict representing a row", "source": "juraj-google-style"}
{"code": "def loader(self, file_name, bad_steps=None, **kwargs):\n        \n        new_tests = []\n        if not os.path.isfile(file_name):\n            self.logger.info(\"Missing file_\\n   %s\" % file_name)\n            return None\n\n        filesize = os.path.getsize(file_name)\n        hfilesize = humanize_bytes(filesize)\n        txt = \"Filesize: %i (%s)\" % (filesize, hfilesize)\n        self.logger.debug(txt)\n\n        \n        temp_dir = tempfile.gettempdir()\n        temp_filename = os.path.join(temp_dir, os.path.basename(file_name))\n        shutil.copy2(file_name, temp_dir)\n\n        self.logger.debug(\"tmp file: %s\" % temp_filename)\n        self.logger.debug(\"HERE WE LOAD THE DATA\")\n\n        data = DataSet()\n        fid = FileID(file_name)\n\n        \n        test_no = 1\n        data.test_no = test_no\n        data.loaded_from = file_name\n\n        \n        data.channel_index = None\n        data.channel_number = None\n        data.creator = None\n        data.item_ID = None\n        data.schedule_file_name = None\n        data.start_datetime = None\n        data.test_ID = None\n        data.test_name = None\n        data.raw_data_files.append(fid)\n\n        \n        self.logger.debug(\"reading raw-data\")\n        self.mpr_data = None\n        self.mpr_log = None\n        self.mpr_settings = None\n\n        self._load_mpr_data(temp_filename, bad_steps)\n        length_of_test = self.mpr_data.shape[0]\n        self.logger.debug(f\"length of test: {length_of_test}\")\n\n        self.logger.debug(\"renaming columns\")\n        self._rename_headers()\n        \n        summary_df = self._create_summary_data()\n\n        if summary_df.empty:\n            txt = \"\\nCould not find any summary (stats-file)!\"\n            txt += \" (summary_df.empty = True)\"\n            txt += \"\\n -> issue make_summary(use_cellpy_stat_file=False)\"\n            warnings.warn(txt)\n\n        data.dfsummary = summary_df\n        data.dfdata = self.mpr_data\n\n        data.raw_data_files_length.append(length_of_test)\n        new_tests.append(data)\n\n        self._clean_up(temp_filename)\n        return new_tests", "docstring": "Loads data from biologics .mpr files.\n\nArgs:\nfile_name (str): path to .res file.\nbad_steps (list of tuples): (c, s) tuples of steps s\n(in cycle c) to skip loading.\n\nReturns:\nnew_tests (list of data objects)", "source": "juraj-google-style"}
{"code": "def apply_activation(\n    books,\n    x,\n    activation,\n    activation_args=(),\n    activation_kwargs=None):\n  \n  if activation is None:\n    return x\n  if activation_kwargs is None:\n    activation_kwargs = {}\n  y = activation(x, *activation_args, **activation_kwargs)\n  if activation in (tf.nn.relu, functions.leaky_relu, functions.softplus):\n    books.add_scalar_summary(\n        tf.reduce_mean(tf.cast(tf.less(x, 0.0), tf.float32)),\n        '%s/zeros' % y.op.name)\n  elif activation is tf.nn.relu6:\n    books.add_scalar_summary(\n        tf.reduce_mean(tf.cast(tf.less(x, 0.0), tf.float32)),\n        '%s/zeros' % y.op.name)\n    books.add_scalar_summary(\n        tf.reduce_mean(tf.cast(tf.greater(x, 6.0), tf.float32)),\n        '%s/sixes' % y.op.name)\n  elif activation in (functions.l2_normalize, tf.nn.l2_normalize,\n                      functions.l1_normalize):\n    books.add_scalar_summary(\n        tf.reduce_mean(tf.sqrt(tf.reduce_sum(\n            tf.square(x), 1))), '%s/length' % y.op.name)\n  return y", "docstring": "Returns activation(x, *activation_args, **activation_kwargs).\n\nThis applies the given activation and adds useful summaries specific to the\nactivation.\n\nArgs:\nbooks: The bookkeeper.\nx: The tensor to apply activation to.\nactivation: An activation function.\nactivation_args: Optional additional arguments for the activation.\nactivation_kwargs: Optional keyword args for activation.\nReturns:\nA tensor with activation applied to x.", "source": "juraj-google-style"}
{"code": "def get_bool(self):\n    return self.fdp.ConsumeBool()", "docstring": "Consume a bool.\n\nReturns:\nConsumed a bool based on input bytes and constraints.", "source": "github-repos"}
{"code": "def set_metadata(self, token, data):\n        \n        req = requests.post(self.meta_url(\"metadata/ocp/set/\" + token),\n                            json=data, verify=False)\n\n        if req.status_code != 200:\n            raise RemoteDataUploadError(\n                \"Could not upload metadata: \" + req.json()['message']\n            )\n        return req.json()", "docstring": "Insert new metadata into the OCP metadata database.\n\nArguments:\ntoken (str): Token of the datum to set\ndata (str): A dictionary to insert as metadata. Include `secret`.\n\nReturns:\njson: Info of the inserted ID (convenience) or an error message.\n\nThrows:\nRemoteDataUploadError: If the token is already populated, or if\nthere is an issue with your specified `secret` key.", "source": "juraj-google-style"}
{"code": "def get_multi_dataset(datasets, pmf=None):\n    pmf = (tf.fill([len(datasets)], (1.0 / len(datasets))) if (pmf is None) else pmf)\n    samplers = [d.repeat().make_one_shot_iterator().get_next for d in datasets]\n    sample = (lambda _: categorical_case(pmf, samplers))\n    return tf.data.Dataset.from_tensors([]).repeat().map(sample)", "docstring": "Returns a Dataset that samples records from one or more Datasets.\n\nArgs:\ndatasets: A list of one or more Dataset objects to sample from.\npmf: A tensor of shape [len(datasets)], the probabilities to sample each\ndataset with. This tensor is often constructed with the global_step. If\nthis is None, we sample from the datasets uniformly at random.\n\nReturns:\nA Dataset object containing records from multiple datasets. Note that\nbecause this dataset iterates through other datasets it is stateful, thus\nyou will need to call make_initializable_iterator instead of\nmake_one_shot_iterator.", "source": "codesearchnet"}
{"code": "def GetAutomountMap(self, since=None, location=None):\n    if location is None:\n        self.log.error('A location is required to retrieve an automount map!')\n        raise error.EmptyMap\n    autofs_filter = '(objectclass=automount)'\n    return AutomountUpdateGetter(self.conf).GetUpdates(source=self, search_base=location, search_filter=autofs_filter, search_scope='one', since=since)", "docstring": "Return an automount map from this source.\n\nNote that autmount maps are stored in multiple locations, thus we expect\na caller to provide a location.  We also follow the automount spec and\nset our search scope to be 'one'.\n\nArgs:\nsince: Get data only changed since this timestamp (inclusive) or None\nfor all data.\nlocation: Currently a string containing our search base, later we\nmay support hostname and additional parameters.\n\nReturns:\ninstance of AutomountMap", "source": "github-repos"}
{"code": "def uniform_distribution(number_of_nodes):\n    \n    \n    number_of_states = 2 ** number_of_nodes\n    \n    \n    return (np.ones(number_of_states) /\n            number_of_states).reshape([2] * number_of_nodes)", "docstring": "Return the uniform distribution for a set of binary nodes, indexed by state\n(so there is one dimension per node, the size of which is the number of\npossible states for that node).\n\nArgs:\nnodes (np.ndarray): A set of indices of binary nodes.\n\nReturns:\nnp.ndarray: The uniform distribution over the set of nodes.", "source": "juraj-google-style"}
{"code": "def _ensure_list(tensor_or_list):\n    if isinstance(tensor_or_list, (list, tuple)):\n        return (list(tensor_or_list), True)\n    return ([tensor_or_list], False)", "docstring": "Converts the input arg to a list if it is not a list already.\n\nArgs:\ntensor_or_list: A `Tensor` or a Python list of `Tensor`s. The argument to\nconvert to a list of `Tensor`s.\n\nReturns:\nA tuple of two elements. The first is a Python list of `Tensor`s containing\nthe original arguments. The second is a boolean indicating whether\nthe original argument was a list or tuple already.", "source": "codesearchnet"}
{"code": "def temp_dir(folder=None, delete=True):\n    \n    \n    tempdir = get_temp_dir()\n    if folder:\n        tempdir = join(tempdir, folder)\n    if not exists(tempdir):\n        makedirs(tempdir)\n    try:\n        yield tempdir\n    finally:\n        if delete:\n            rmtree(tempdir)", "docstring": "Get a temporary directory optionally with folder appended (and created if it doesn't exist)\n\nArgs:\nfolder (Optional[str]): Folder to create in temporary folder. Defaults to None.\ndelete (bool): Whether to delete folder on exiting with statement\n\nReturns:\nstr: A temporary directory", "source": "juraj-google-style"}
{"code": "def _apply_shadow_vars(avg_grads):\n        \n        ps_var_grads = []\n        for grad, var in avg_grads:\n            assert var.name.startswith('tower'), var.name\n            my_name = '/'.join(var.name.split('/')[1:])\n            my_name = get_op_tensor_name(my_name)[0]\n            new_v = tf.get_variable(my_name, dtype=var.dtype.base_dtype,\n                                    initializer=var.initial_value,\n                                    trainable=True)\n            \n            ps_var_grads.append((grad, new_v))\n        return ps_var_grads", "docstring": "Create shadow variables on PS, and replace variables in avg_grads\nby these shadow variables.\n\nArgs:\navg_grads: list of (grad, var) tuples", "source": "juraj-google-style"}
{"code": "def from_xmrs(cls, xmrs, **kwargs):\n        \n        x = cls()\n        x.__dict__.update(xmrs.__dict__)\n        return x", "docstring": "Facilitate conversion among subclasses.\n\nArgs:\nxmrs (:class:`Xmrs`): instance to convert from; possibly\nan instance of a subclass, such as :class:`Mrs` or\n:class:`Dmrs`\n**kwargs: additional keyword arguments that may be used\nby a subclass's redefinition of :meth:`from_xmrs`.", "source": "juraj-google-style"}
{"code": "def add_item(name, command, system_wide=False):\n    desktop_env = system.get_name()\n    if os.path.isfile(command):\n        command_is_file = True\n        if (not (desktop_env == 'windows')):\n            sp.Popen([('chmod +x %s' % command)], shell=True)\n    if (desktop_env == 'windows'):\n        import winreg\n        if system_wide:\n            startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\\\Windows\\\\Start Menu\\\\Programs\\\\Startup')\n        else:\n            startup_dir = os.path.join(get_config_dir()[0], 'Roaming\\\\Microsoft\\\\Windows\\\\Start Menu\\\\Programs\\\\Startup')\n        if (not command_is_file):\n            with open(os.path.join(startup_dir, (name + '.bat')), 'w') as f:\n                f.write(command)\n        else:\n            shutil.copy(command, startup_dir)\n    elif (desktop_env == 'mac'):\n        sp.Popen((['launchctl submit -l %s -- %s'] % (name, command)), shell=True)\n    elif (desktop_env == 'unknown'):\n        if system_wide:\n            login_file = '/etc/profile'\n        else:\n            login_file = os.path.expanduser('~/.profile')\n        with open(login_file, 'a') as f:\n            f.write(command)\n    else:\n        try:\n            desktop_file_name = (name + '.desktop')\n            startup_file = os.path.join(get_config_dir('autostart', system_wide=system_wide)[0], desktop_file_name)\n            desktop_str = desktopfile.construct(name=name, exec_=command, additional_opts={'X-GNOME-Autostart-enabled': 'true'})\n            with open(startup_file, 'w') as f:\n                f.write(desktop_str)\n        except:\n            pass", "docstring": "Adds a program to startup.\n\nAdds a program to user startup.\n\nArgs:\nname        (str) : The name of the startup entry.\ncommand     (str) : The command to run.\nsystem_wide (bool): Add to system-wide startup.\n\nNote:\n``system_wide`` requires superuser/admin privileges.", "source": "codesearchnet"}
{"code": "def reduce(self, initial_state, reduce_func, name=None):\n    with ops.name_scope('initial_state'):\n        initial_state = structure.normalize_element(initial_state)\n    state_structure = structure.type_spec_from_value(initial_state)\n    need_to_rerun = True\n    while need_to_rerun:\n        wrapped_func = structured_function.StructuredFunctionWrapper(reduce_func, 'reduce()', input_structure=(state_structure, self.element_spec), add_to_graph=False)\n        output_classes = wrapped_func.output_classes\n        state_classes = nest.map_structure(lambda component_spec: component_spec._to_legacy_output_classes(), state_structure)\n        for new_state_class, state_class in zip(nest.flatten(output_classes), nest.flatten(state_classes)):\n            if not issubclass(new_state_class, state_class):\n                raise TypeError(f'The element classes for the new state must match the initial state. Expected {state_classes} but got {wrapped_func.output_classes}.')\n        output_types = wrapped_func.output_types\n        state_types = nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), state_structure)\n        for new_state_type, state_type in zip(nest.flatten(output_types), nest.flatten(state_types)):\n            if new_state_type != state_type:\n                raise TypeError(f'The element types for the new state must match the initial state. Expected {state_types} but got {wrapped_func.output_types}.')\n        output_shapes = wrapped_func.output_shapes\n        state_shapes = nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), state_structure)\n        flat_state_shapes = nest.flatten(state_shapes)\n        flat_new_state_shapes = nest.flatten(output_shapes)\n        weakened_state_shapes = [original.most_specific_compatible_shape(new) for original, new in zip(flat_state_shapes, flat_new_state_shapes)]\n        need_to_rerun = False\n        for original_shape, weakened_shape in zip(flat_state_shapes, weakened_state_shapes):\n            if original_shape.ndims is not None and (weakened_shape.ndims is None or original_shape.as_list() != weakened_shape.as_list()):\n                need_to_rerun = True\n                break\n        if need_to_rerun:\n            state_structure = structure.convert_legacy_structure(state_types, nest.pack_sequence_as(state_shapes, weakened_state_shapes), state_classes)\n    reduce_func = wrapped_func.function\n    reduce_func.add_to_graph(ops.get_default_graph())\n    dataset = self._apply_debug_options()\n    metadata = dataset_metadata_pb2.Metadata()\n    if name:\n        metadata.name = _validate_and_encode(name)\n    return structure.from_compatible_tensor_list(state_structure, gen_dataset_ops.reduce_dataset(dataset._variant_tensor, structure.to_tensor_list(state_structure, initial_state), reduce_func.captured_inputs, f=reduce_func, output_shapes=structure.get_flat_tensor_shapes(state_structure), output_types=structure.get_flat_tensor_types(state_structure), metadata=metadata.SerializeToString()))", "docstring": "Reduces the input dataset to a single element.\n\nThe transformation calls `reduce_func` successively on every element of\nthe input dataset until the dataset is exhausted, aggregating information in\nits internal state. The `initial_state` argument is used for the initial\nstate and the final state is returned as the result.\n\n>>> tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, _: x +\n...   1).numpy().item()\n5\n>>> tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, y: x +\n...   y).numpy().item()\n10\n\nArgs:\ninitial_state: An element representing the initial state of the\ntransformation.\nreduce_func: A function that maps `(old_state, input_element)` to\n`new_state`. It must take two arguments and return a new element The\nstructure of `new_state` must match the structure of `initial_state`.\nname: (Optional.) A name for the tf.data operation.\n\nReturns:\nA dataset element corresponding to the final state of the transformation.", "source": "github-repos"}
{"code": "def update_query_params(uri, params):\n    parts = urllib.parse.urlparse(uri)\n    query_params = parse_unique_urlencoded(parts.query)\n    query_params.update(params)\n    new_query = urllib.parse.urlencode(query_params)\n    new_parts = parts._replace(query=new_query)\n    return urllib.parse.urlunparse(new_parts)", "docstring": "Updates a URI with new query parameters.\n\nIf a given key from ``params`` is repeated in the ``uri``, then\nthe URI will be considered invalid and an error will occur.\n\nIf the URI is valid, then each value from ``params`` will\nreplace the corresponding value in the query parameters (if\nit exists).\n\nArgs:\nuri: string, A valid URI, with potential existing query parameters.\nparams: dict, A dictionary of query parameters.\n\nReturns:\nThe same URI but with the new query parameters added.", "source": "codesearchnet"}
{"code": "def return_dict(self):\n    output_dict = {}\n    output_dict['general'] = self._iterate_through_class(self.general.__dict__)\n    output_dict['figure'] = self._iterate_through_class(self.figure.__dict__)\n    if (self.total_plots > 1):\n        trans_dict = {str(i): self._iterate_through_class(axis.__dict__) for (i, axis) in enumerate(self.ax)}\n        output_dict['plot_info'] = trans_dict\n    else:\n        output_dict['plot_info'] = {'0': self._iterate_through_class(self.ax.__dict__)}\n    if self.print_input:\n        print(output_dict)\n    return output_dict", "docstring": "Output dictionary for ``make_plot.py`` input.\n\nIterates through the entire MainContainer class turning its contents\ninto dictionary form. This dictionary becomes the input for ``make_plot.py``.\n\nIf `print_input` attribute is True, the entire dictionary will be printed\nprior to returning the dicitonary.\n\nReturns:\n- **output_dict** (*dict*): Dicitonary for input into ``make_plot.py``.", "source": "codesearchnet"}
{"code": "def GetKeyByScriptHash(self, script_hash):\n        \n        contract = self.GetContract(script_hash)\n        if contract:\n            return self.GetKey(contract.PublicKeyHash)\n        return None", "docstring": "Get the KeyPair belonging to the script hash.\n\nArgs:\nscript_hash (UInt160): a bytearray (len 20) representing the public key.\n\nReturns:\nKeyPair: If successful, the KeyPair belonging to the public key hash, otherwise None", "source": "juraj-google-style"}
{"code": "def get_user_roles(self, user):\n        \n        return self.service.get_user_roles(\n            user, self.url_prefix, self.auth, self.session, self.session_send_opts)", "docstring": "Get roles associated with the given user.\n\nArgs:\nuser (string): User name.\n\nReturns:\n(list): List of roles that user has.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def report(self, name, **kwargs):\n    group_obj = Report(name, **kwargs)\n    return self._group(group_obj)", "docstring": "Add Report data to Batch object.\n\nArgs:\nname (str): The name for this Group.\nfile_name (str): The name for the attached file for this Group.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nfile_content (str;method, kwargs): The file contents or callback method to retrieve\nfile content.\npublish_date (str, kwargs): The publish datetime expression for this Group.\nxid (str, kwargs): The external id for this Group.\n\nReturns:\nobj: An instance of Report.", "source": "codesearchnet"}
{"code": "def dbmin20years(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `dbmin20years`'.format(value))\n\n        self._dbmin20years = value", "docstring": "Corresponds to IDD Field `dbmin20years`\n20-year return period values for minimum extreme dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmin20years`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def __call__(self, request: beam.Row, *args, **kwargs):\n    embedded_query = request['text']\n    query = {'size': self.size, 'query': {'knn': {self.vector_field: {'vector': embedded_query, 'k': self.k}}}}\n    results = self.client.search(body=query, index=self.index_name)\n    logger.info('Enrichment_results', results)\n    return (beam.Row(text=embedded_query), beam.Row(docs=results))", "docstring": "Reads a row from the opensearch Vector DB and returns\na `Tuple` of request and response.\n\nArgs:\nrequest: the input `beam.Row` to enrich.", "source": "github-repos"}
{"code": "def _build_key_wrapping_specification(self, value):\n    if (value is None):\n        return None\n    if (not isinstance(value, dict)):\n        raise TypeError('Key wrapping specification must be a dictionary.')\n    encryption_key_info = self._build_encryption_key_information(value.get('encryption_key_information'))\n    mac_signature_key_info = self._build_mac_signature_key_information(value.get('mac_signature_key_information'))\n    key_wrapping_specification = cobjects.KeyWrappingSpecification(wrapping_method=value.get('wrapping_method'), encryption_key_information=encryption_key_info, mac_signature_key_information=mac_signature_key_info, attribute_names=value.get('attribute_names'), encoding_option=value.get('encoding_option'))\n    return key_wrapping_specification", "docstring": "Build a KeyWrappingSpecification struct from a dictionary.\n\nArgs:\nvalue (dict): A dictionary containing the key/value pairs for a\nKeyWrappingSpecification struct.\n\nReturns:\nKeyWrappingSpecification: a KeyWrappingSpecification struct\n\nRaises:\nTypeError: if the input argument is invalid", "source": "codesearchnet"}
{"code": "def memory_write8(self, addr, data, zone=None):\n    return self.memory_write(addr, data, zone, 8)", "docstring": "Writes bytes to memory of a target system.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): start address to write to\ndata (list): list of bytes to write\nzone (str): optional memory zone to access\n\nReturns:\nNumber of bytes written to target.\n\nRaises:\nJLinkException: on memory access error.", "source": "codesearchnet"}
{"code": "def _transform_col(self, x, i):\n        \n        return x.fillna(NAN_INT).map(self.label_encoders[i]).fillna(0)", "docstring": "Encode one categorical column into labels.\n\nArgs:\nx (pandas.Series): a categorical column to encode\ni (int): column index\n\nReturns:\nx (pandas.Series): a column with labels.", "source": "juraj-google-style"}
{"code": "def remat(f):\n\n    def wrapped(*args, **kwargs):\n        return torch.utils.checkpoint.checkpoint(f, *args, use_reentrant=False)\n    return wrapped", "docstring": "Implementation of rematerialization.\n\nArgs:\nf: The function or operation to rematerialize.\nReturns:\nA function wrapping f that defines a custom gradient, which\nrecomputes f on the backwards pass of a gradient call.", "source": "github-repos"}
{"code": "def creationlog(base, package, stackdepth=_def_stackdepth):\n       \n    @staticmethod\n    def wrapnew(cls, *argl, **argd):\n        global _atdepth_new, _cstack_new, streamlining\n        origstream = None\n        if not (decorating or streamlining):\n            entry, _atdepth_new = _pre_create(cls, _atdepth_new,\n                                              stackdepth, *argl, **argd)\n            _cstack_new.append(cls)\n\n            \n            fqdn = cls.__fqdn__\n            if fqdn in _streamlines and _streamlines[fqdn]:\n                \n                \n                msg.std(\"Streamlining {}.\".format(fqdn), 2)\n                origstream = streamlining\n                streamlining = True\n            \n        try:\n            if six.PY2:\n                result = base.__old__(cls, *argl, **argd)\n            else: \n                \n                \n                \n                \n                if base.__old__ is object.__new__:\n                    result = base.__old__(cls)\n                else:\n                    result = base.__old__(cls, *argl, **argd)\n                    \n        except TypeError: \n            \n            \n            \n            import sys\n            xcls, xerr = sys.exc_info()[0:2]\n            referral = xerr.args[0].split()[-1]\n            if \".__new__()\" in referral:\n                t = eval(referral.split('.')[0])\n                result = t.__new__(cls, *argl, **argd)\n            else:\n                raise\n                result = None\n\n        if result is not None and hasattr(cls, \"__init__\"):\n            try:\n                cls.__init__(result, *argl, **argd)\n            except: \n                print(cls, argl, argd)\n                raise\n        else: \n            msg.err(\"Object initialize failed for {}.\".format(base.__name__))\n\n        \n        \n        if origstream is not None:\n            \n            \n            streamlining = origstream\n            \n        if not (decorating or streamlining):\n            _cstack_new.pop()\n            if len(_cstack_new) == 0:\n                _atdepth_new = False\n            _post_create(_atdepth_new, entry, result)\n                        \n        return result\n    return wrapnew", "docstring": "Decorator for wrapping the creation of class instances that are being logged\nby acorn.\n\nArgs:\nbase: base class used to call __new__ for the construction.\npackage (str): name of (global) package the class belongs to.\nstackdepth (int): if the calling stack is less than this depth, than\ninclude the entry in the log; otherwise ignore it.", "source": "juraj-google-style"}
{"code": "def input_shape(self):\n    return nest.map_structure(backend.int_shape, self.input)", "docstring": "Retrieves the input shape(s) of a layer.\n\nOnly applicable if the layer has exactly one input,\ni.e. if it is connected to one incoming layer, or if all inputs\nhave the same shape.\n\nReturns:\nInput shape, as an integer shape tuple\n(or list of shape tuples, one tuple per input tensor).\n\nRaises:\nAttributeError: if the layer has no defined input_shape.\nRuntimeError: if called in Eager mode.", "source": "github-repos"}
{"code": "def ExpandWindowsPath(cls, path, environment_variables):\n    \n    if environment_variables is None:\n      environment_variables = []\n\n    lookup_table = {}\n    if environment_variables:\n      for environment_variable in environment_variables:\n        attribute_name = environment_variable.name.upper()\n        attribute_value = environment_variable.value\n        if not isinstance(attribute_value, py2to3.STRING_TYPES):\n          continue\n\n        lookup_table[attribute_name] = attribute_value\n\n    path_segments = path.split('\\\\')\n    \n    for index, path_segment in enumerate(list(path_segments)):\n      if (len(path_segment) <= 2 or not path_segment.startswith('%') or\n          not path_segment.endswith('%')):\n        continue\n\n      path_segment_upper_case = path_segment.upper()\n      if path_segment_upper_case.startswith('%%ENVIRON_'):\n        lookup_key = path_segment_upper_case[10:-2]\n      else:\n        lookup_key = path_segment_upper_case[1:-1]\n      path_segment = lookup_table.get(lookup_key, path_segment)\n      path_segment = path_segment.split('\\\\')\n\n      expanded_path_segments = list(path_segments[:index])\n      expanded_path_segments.extend(path_segment)\n      expanded_path_segments.extend(path_segments[index + 1:])\n\n      path_segments = expanded_path_segments\n\n    if cls._IsWindowsDrivePathSegment(path_segments[0]):\n      path_segments[0] = ''\n\n    return '\\\\'.join(path_segments)", "docstring": "Expands a Windows path containing environment variables.\n\nArgs:\npath (str): Windows path with environment variables.\nenvironment_variables (list[EnvironmentVariableArtifact]): environment\nvariables.\n\nReturns:\nstr: expanded Windows path.", "source": "juraj-google-style"}
{"code": "def formula_double_format(afloat, ignore_ones=True, tol=1e-08):\n    if (ignore_ones and (afloat == 1)):\n        return ''\n    elif (abs((afloat - int(afloat))) < tol):\n        return str(int(afloat))\n    else:\n        return str(round(afloat, 8))", "docstring": "This function is used to make pretty formulas by formatting the amounts.\nInstead of Li1.0 Fe1.0 P1.0 O4.0, you get LiFePO4.\n\nArgs:\nafloat (float): a float\nignore_ones (bool): if true, floats of 1 are ignored.\ntol (float): Tolerance to round to nearest int. i.e. 2.0000000001 -> 2\n\nReturns:\nA string representation of the float for formulas.", "source": "codesearchnet"}
{"code": "def singleprint_from_fingerprint_proto(export_dir: str) -> str:\n    try:\n        return fingerprinting_pywrap.SingleprintFromFP(export_dir)\n    except FingerprintException as e:\n        raise ValueError(e) from None", "docstring": "Returns the singleprint of `fingerprint.pb` in `export_dir`.\n\nArgs:\nexport_dir: The directory that contains `fingerprint.pb`.\n\nReturns:\nA string containing the singleprint of `fingerprint.pb` in `export_dir`.\n\nRaises:\nValueError: If a valid singleprint cannot be constructed from\n`fingerprint.pb`.", "source": "github-repos"}
{"code": "def get_template_files(self, template_id, filename):\n    url = (self.TEMPLATE_GET_FILES_URL + template_id)\n    request = self._get_request()\n    return request.get_file(url, filename)", "docstring": "Download a PDF copy of a template's original files\n\nArgs:\n\ntemplate_id (str):  The id of the template to retrieve.\n\nfilename (str):     Filename to save the PDF file to. This should be a full path.\n\nReturns:\nReturns a PDF file", "source": "codesearchnet"}
{"code": "def ref_for_message_type(self, message_type):\n    \n    name = self.__normalized_name(message_type)\n    if name not in self.__schemas:\n      raise KeyError('Message has not been parsed: %s', name)\n    return name", "docstring": "Returns the JSON Schema id for the given message.\n\nArgs:\nmessage_type: protorpc.message.Message class to be parsed.\n\nReturns:\nstring, The JSON Schema id.\n\nRaises:\nKeyError: if the message hasn't been parsed via add_message().", "source": "juraj-google-style"}
{"code": "def VerifyRow(self, parser_mediator, row):\n    \n    \n    \n    \n\n    if row['md5'] != '0' and not self._MD5_RE.match(row['md5']):\n      return False\n\n    \n    for column_name in (\n        'uid', 'gid', 'size', 'atime', 'mtime', 'ctime', 'crtime'):\n      column_value = row.get(column_name, None)\n      if not column_value:\n        continue\n\n      try:\n        int(column_value, 10)\n      except (TypeError, ValueError):\n        return False\n\n    return True", "docstring": "Verifies if a line of the file is in the expected format.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between\nparsers and other components, such as storage and dfvfs.\nrow (dict[str, str]): fields of a single row, as specified in COLUMNS.\n\nReturns:\nbool: True if this is the correct parser, False otherwise.", "source": "juraj-google-style"}
{"code": "def run(self, command, block=True, cwd=None, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE):\n        \n        if cwd is None:\n            cwd = self.cwd\n\n        return ShellCommand(command=command, logger=self.logger, block=block, cwd=cwd, stdin=stdin, stdout=stdout, stderr=stderr).run()", "docstring": "Create an instance of :class:`~ShellCommand` and run it\n\nArgs:\ncommand (str): :class:`~ShellCommand`\nblock (bool): See :class:`~ShellCommand`\ncwd (str): Override the runner cwd. Useb by the :class:`~ShellCommand` instance", "source": "juraj-google-style"}
{"code": "def lt(self, other, axis=\"columns\", level=None):\n        \n        return self._binary_op(\"lt\", other, axis=axis, level=level)", "docstring": "Checks element-wise that this is less than other.\n\nArgs:\nother: A DataFrame or Series or scalar to compare to.\naxis: The axis to perform the lt over.\nlevel: The Multilevel index level to apply lt over.\n\nReturns:\nA new DataFrame filled with Booleans.", "source": "juraj-google-style"}
{"code": "def cysparse_real_type_from_real_cysparse_complex_type(cysparse_type):\n    \n    r_type = None\n\n    if cysparse_type in ['COMPLEX64_t']:\n        r_type = 'FLOAT32_t'\n    elif cysparse_type in ['COMPLEX128_t']:\n        r_type = 'FLOAT64_t'\n    elif cysparse_type in ['COMPLEX256_t']:\n        r_type = 'FLOAT128_t'\n    else:\n        raise TypeError(\"Not a recognized complex type\")\n\n    return r_type", "docstring": "Returns the **real** type for the real or imaginary part of a **real** complex type.\n\nFor instance:\n\nCOMPLEX128_t -> FLOAT64_t\n\nArgs:\ncysparse:", "source": "juraj-google-style"}
{"code": "def barycentric_coords(coords, simplex):\n    \n    coords = np.atleast_2d(coords)\n\n    t = np.transpose(simplex[:-1, :]) - np.transpose(simplex[-1, :])[:, None]\n    all_but_one = np.transpose(\n        np.linalg.solve(t, np.transpose(coords - simplex[-1])))\n    last_coord = 1 - np.sum(all_but_one, axis=-1)[:, None]\n    return np.append(all_but_one, last_coord, axis=-1)", "docstring": "Converts a list of coordinates to barycentric coordinates, given a\nsimplex with d+1 points. Only works for d >= 2.\n\nArgs:\ncoords: list of n coords to transform, shape should be (n,d)\nsimplex: list of coordinates that form the simplex, shape should be\n(d+1, d)\n\nReturns:\na LIST of barycentric coordinates (even if the original input was 1d)", "source": "juraj-google-style"}
{"code": "def last_modified(self) -> str:\n    if ('last_modified' in self.attrs):\n        return self.attrs['last_modified']\n    elif (self.mode == 'r+'):\n        self.attrs['last_modified'] = timestamp()\n        return self.attrs['last_modified']\n    return timestamp()", "docstring": "Return an ISO8601 timestamp indicating when the file was last modified\n\nReturns:\nAn ISO8601 timestamp indicating when the file was last modified\n\nRemarks:\nIf the file has no timestamp, and mode is 'r+', a new timestamp is created and returned.\nOtherwise, the current time in UTC is returned", "source": "codesearchnet"}
{"code": "def with_resource_hints(self, **kwargs):\n    self.get_resource_hints().update(resources.parse_resource_hints(kwargs))\n    return self", "docstring": "Adds resource hints to the :class:`PTransform`.\n\nResource hints allow users to express constraints on the environment where\nthe transform should be executed.  Interpretation of the resource hints is\ndefined by Beam Runners. Runners may ignore the unsupported hints.\n\nArgs:\n**kwargs: key-value pairs describing hints and their values.\n\nRaises:\nValueError: if provided hints are unknown to the SDK. See\n:mod:`apache_beam.transforms.resources` for a list of known hints.\n\nReturns:\nPTransform: A reference to the instance of this particular\n:class:`PTransform` object.", "source": "github-repos"}
{"code": "def _worker(self, constructor, conn):\n    \n    try:\n      env = constructor()\n      while True:\n        try:\n          \n          if not conn.poll(0.1):\n            continue\n          message, payload = conn.recv()\n        except (EOFError, KeyboardInterrupt):\n          break\n        if message == self._ACCESS:\n          name = payload\n          result = getattr(env, name)\n          conn.send((self._RESULT, result))\n          continue\n        if message == self._CALL:\n          name, args, kwargs = payload\n          result = getattr(env, name)(*args, **kwargs)\n          conn.send((self._RESULT, result))\n          continue\n        if message == self._CLOSE:\n          assert payload is None\n          break\n        raise KeyError('Received message of unknown type {}'.format(message))\n    except Exception:  \n      stacktrace = ''.join(traceback.format_exception(*sys.exc_info()))\n      tf.logging.error('Error in environment process: {}'.format(stacktrace))\n      conn.send((self._EXCEPTION, stacktrace))\n    conn.close()", "docstring": "The process waits for actions and sends back environment results.\n\nArgs:\nconstructor: Constructor for the OpenAI Gym environment.\nconn: Connection for communication to the main process.\n\nRaises:\nKeyError: When receiving a message of unknown type.", "source": "juraj-google-style"}
{"code": "def is_all_initialized(self):\n    return (frozenset(self._class_map.keys()) == frozenset(self._instance_map.keys()))", "docstring": "Return whether all the instances have been initialized.\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def verbose_ping(dest_addr: str, count: int=4, *args, **kwargs):\n    timeout = kwargs.get('timeout')\n    src = kwargs.get('src')\n    unit = kwargs.setdefault('unit', 'ms')\n    for i in range(count):\n        output_text = \"ping '{}'\".format(dest_addr)\n        output_text += (\" from '{}'\".format(src) if src else '')\n        output_text += ' ... '\n        print(output_text, end='')\n        delay = ping(dest_addr, *args, seq=i, **kwargs)\n        if (delay is None):\n            print(('Timeout > {}s'.format(timeout) if timeout else 'Timeout'))\n        else:\n            print('{value}{unit}'.format(value=int(delay), unit=unit))", "docstring": "Send pings to destination address with the given timeout and display the result.\n\nArgs:\ndest_addr: The destination address. Ex. \"192.168.1.1\"/\"example.com\"\ncount: How many pings should be sent. Default is 4, same as Windows CMD. (default 4)\n*args and **kwargs: And all the other arguments available in ping() except `seq`.\n\nReturns:\nFormatted ping results printed.", "source": "codesearchnet"}
{"code": "def set_scope(self, include=None, exclude=None):\n    if include:\n        self.scope = u'document.querySelector(\"{}\")'.format(u', '.join(include))\n    else:\n        self.scope = 'null'\n    if (exclude is not None):\n        raise NotImplementedError('The argument `exclude` has not been implemented in AxsAuditConfig.set_scope method.')", "docstring": "Sets `scope`, the \"start point\" for the audit.\n\nArgs:\n\ninclude: A list of css selectors specifying the elements that\ncontain the portion of the page that should be audited.\nDefaults to auditing the entire document.\nexclude: This arg is not implemented in this ruleset.\n\nExamples:\n\nTo check only the `div` with id `foo`::\n\npage.a11y_audit.config.set_scope([\"div#foo\"])\n\nTo reset the scope to check the whole document::\n\npage.a11y_audit.config.set_scope()", "source": "codesearchnet"}
{"code": "def update_resharding_callback(self, callback: checkpoint_adapter.ReshardCallback):\n    if not issubclass(checkpoint_adapter.ReshardCallback, type(self.callback)):\n        raise TypeError('Cannot override resharding callback, already set to non trivial.')\n    self.callback = callback", "docstring": "Add a resharding callback to the checkpoint.\n\nThis will be applied to the checkpoint value before being supplied to the\nrestore ops.\n\nArgs:\ncallback: Reshard callback for resharding this checkpoint position. Maybe\nNone.", "source": "github-repos"}
{"code": "def ReadFromFile(self, path):\n    \n    self._definitions = {}\n\n    with open(path, 'r') as file_object:\n      for preset_definition in self._ReadPresetsFromFileObject(file_object):\n        self._definitions[preset_definition.name] = preset_definition", "docstring": "Reads parser and parser plugin presets from a file.\n\nArgs:\npath (str): path of file that contains the the parser and parser plugin\npresets configuration.\n\nRaises:\nMalformedPresetError: if one or more plugin preset definitions are\nmalformed.", "source": "juraj-google-style"}
{"code": "def PushTask(self, task):\n    \n    storage_file_size = getattr(task, 'storage_file_size', None)\n    if not storage_file_size:\n      raise ValueError('Task storage file size not set.')\n\n    if task.file_entry_type == dfvfs_definitions.FILE_ENTRY_TYPE_DIRECTORY:\n      weight = 1\n    else:\n      weight = storage_file_size\n\n    task.merge_priority = weight\n\n    heap_values = (weight, task)\n    heapq.heappush(self._heap, heap_values)\n    self._task_identifiers.add(task.identifier)", "docstring": "Pushes a task onto the heap.\n\nArgs:\ntask (Task): task.\n\nRaises:\nValueError: if the size of the storage file is not set in the task.", "source": "juraj-google-style"}
{"code": "def ParseMessage(self, parser_mediator, key, date_time, tokens):\n    if (key not in ('failed_connection', 'login', 'opened_connection')):\n        raise ValueError('Unknown grammar key: {0:s}'.format(key))\n    if (key == 'login'):\n        event_data = SSHLoginEventData()\n    elif (key == 'failed_connection'):\n        event_data = SSHFailedConnectionEventData()\n    elif (key == 'opened_connection'):\n        event_data = SSHOpenedConnectionEventData()\n    event_data.address = tokens.get('address', None)\n    event_data.authentication_method = tokens.get('authentication_method', None)\n    event_data.body = tokens.get('body', None)\n    event_data.fingerprint = tokens.get('fingerprint', None)\n    event_data.hostname = tokens.get('hostname', None)\n    event_data.offset = 0\n    event_data.pid = tokens.get('pid', None)\n    event_data.protocol = tokens.get('protocol', None)\n    event_data.port = tokens.get('port', None)\n    event_data.reporter = tokens.get('reporter', None)\n    event_data.severity = tokens.get('severity', None)\n    event_data.username = tokens.get('username', None)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Produces an event from a syslog body that matched one of the grammars.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nkey (str): name of the matching grammar.\ndate_time (dfdatetime.DateTimeValues): date and time values.\ntokens (dict[str, str]): tokens derived from a syslog message based on\nthe defined grammar.\n\nRaises:\nValueError: If an unknown key is provided.", "source": "codesearchnet"}
{"code": "def make_method(ctx, node, name, params=None, posonly_count=0, kwonly_params=None, return_type=None, self_param=None, varargs=None, kwargs=None, kind=pytd.MethodKind.METHOD):\n\n    def _process_annotation(param):\n        \n        param_type = param.typ\n        if not param_type:\n            return\n        elif isinstance(param_type, cfg.Variable):\n            types = param_type.data\n            if len(types) == 1:\n                annotations[param.name] = types[0].cls\n            else:\n                t = abstract.Union([t.cls for t in types], ctx)\n                annotations[param.name] = t\n        else:\n            annotations[param.name] = param_type\n    params = params or []\n    kwonly_params = kwonly_params or []\n    if kind in (pytd.MethodKind.METHOD, pytd.MethodKind.PROPERTY):\n        self_param = [self_param or Param('self', None, None)]\n    elif kind == pytd.MethodKind.CLASSMETHOD:\n        self_param = [Param('cls', None, None)]\n    else:\n        assert kind == pytd.MethodKind.STATICMETHOD\n        self_param = []\n    annotations = {}\n    params = self_param + params\n    return_param = Param('return', return_type, None) if return_type else None\n    special_params = [x for x in (return_param, varargs, kwargs) if x]\n    for param in special_params + params + kwonly_params:\n        _process_annotation(param)\n    names = lambda xs: tuple((x.name for x in xs))\n    param_names = names(params)\n    kwonly_names = names(kwonly_params)\n    defaults = {x.name: x.default for x in params + kwonly_params if x.default}\n    varargs_name = varargs.name if varargs else None\n    kwargs_name = kwargs.name if kwargs else None\n    ret = abstract.SimpleFunction.build(name=name, param_names=param_names, posonly_count=posonly_count, varargs_name=varargs_name, kwonly_params=kwonly_names, kwargs_name=kwargs_name, defaults=defaults, annotations=annotations, ctx=ctx)\n    ret.signature.check_defaults(ctx)\n    retvar = ret.to_variable(node)\n    if kind in (pytd.MethodKind.METHOD, pytd.MethodKind.PROPERTY):\n        return retvar\n    if kind == pytd.MethodKind.CLASSMETHOD:\n        decorator = ctx.vm.load_special_builtin('classmethod')\n    else:\n        assert kind == pytd.MethodKind.STATICMETHOD\n        decorator = ctx.vm.load_special_builtin('staticmethod')\n    args = function.Args(posargs=(retvar,))\n    return decorator.call(node, func=None, args=args)[1]", "docstring": "Make a method from params.\n\nArgs:\nctx: The context\nnode: Node to create the method variable at\nname: The method name\nparams: Positional params [type: [Param]]\nposonly_count: Number of positional-only parameters\nkwonly_params: Keyword only params [type: [Param]]\nreturn_type: Return type [type: PARAM_TYPES]\nself_param: Self param [type: Param, defaults to self: Any]\nvarargs: Varargs param [type: Param, allows *args to be named and typed]\nkwargs: Kwargs param [type: Param, allows **kwargs to be named and typed]\nkind: The method kind\n\nReturns:\nA new method wrapped in a variable.", "source": "github-repos"}
{"code": "def on_snapshot(self, callback):\n    return Watch.for_query(self, callback, document.DocumentSnapshot, document.DocumentReference)", "docstring": "Monitor the documents in this collection that match this query.\n\nThis starts a watch on this query using a background thread. The\nprovided callback is run on the snapshot of the documents.\n\nArgs:\ncallback(~.firestore.query.QuerySnapshot): a callback to run when\na change occurs.\n\nExample:\nfrom google.cloud import firestore_v1beta1\n\ndb = firestore_v1beta1.Client()\nquery_ref = db.collection(u'users').where(\"user\", \"==\", u'Ada')\n\ndef on_snapshot(docs, changes, read_time):\nfor doc in docs:\nprint(u'{} => {}'.format(doc.id, doc.to_dict()))\n\n# Watch this query\nquery_watch = query_ref.on_snapshot(on_snapshot)\n\n# Terminate this watch\nquery_watch.unsubscribe()", "source": "codesearchnet"}
{"code": "def _parse_ISBN_EAN(details):\n    isbn_ean = _get_td_or_none(details, 'ctl00_ContentPlaceHolder1_tblRowIsbnEan')\n    if (not isbn_ean):\n        return (None, None)\n    ean = None\n    isbn = None\n    if ('/' in isbn_ean):\n        (isbn, ean) = isbn_ean.split('/')\n        isbn = isbn.strip()\n        ean = ean.strip()\n    else:\n        isbn = isbn_ean.strip()\n    if (not isbn):\n        isbn = None\n    return (isbn, ean)", "docstring": "Parse ISBN and EAN.\n\nArgs:\ndetails (obj): HTMLElement containing slice of the page with details.\n\nReturns:\n(ISBN, EAN): Tuple with two string or two None.", "source": "codesearchnet"}
{"code": "def _ParseFileEntryWithParser(self, parser_mediator, parser, file_entry, file_object=None):\n    if (not isinstance(parser, (parsers_interface.FileEntryParser, parsers_interface.FileObjectParser))):\n        raise TypeError('Unsupported parser object type.')\n    parser_mediator.ClearParserChain()\n    reference_count = parser_mediator.resolver_context.GetFileObjectReferenceCount(file_entry.path_spec)\n    parser_mediator.SampleStartTiming(parser.NAME)\n    try:\n        if isinstance(parser, parsers_interface.FileEntryParser):\n            parser.Parse(parser_mediator)\n        elif isinstance(parser, parsers_interface.FileObjectParser):\n            parser.Parse(parser_mediator, file_object)\n        result = self._PARSE_RESULT_SUCCESS\n    except (IOError, dfvfs_errors.BackEndError) as exception:\n        display_name = parser_mediator.GetDisplayName(file_entry)\n        logger.warning('{0:s} unable to parse file: {1:s} with error: {2!s}'.format(parser.NAME, display_name, exception))\n        result = self._PARSE_RESULT_FAILURE\n    except errors.UnableToParseFile as exception:\n        display_name = parser_mediator.GetDisplayName(file_entry)\n        logger.debug('{0:s} unable to parse file: {1:s} with error: {2!s}'.format(parser.NAME, display_name, exception))\n        result = self._PARSE_RESULT_UNSUPPORTED\n    finally:\n        parser_mediator.SampleStopTiming(parser.NAME)\n        parser_mediator.SampleMemoryUsage(parser.NAME)\n        new_reference_count = parser_mediator.resolver_context.GetFileObjectReferenceCount(file_entry.path_spec)\n        if (reference_count != new_reference_count):\n            display_name = parser_mediator.GetDisplayName(file_entry)\n            logger.warning('[{0:s}] did not explicitly close file-object for file: {1:s}.'.format(parser.NAME, display_name))\n    return result", "docstring": "Parses a file entry with a specific parser.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nparser (BaseParser): parser.\nfile_entry (dfvfs.FileEntry): file entry.\nfile_object (Optional[file]): file-like object to parse.\nIf not set the parser will use the parser mediator to open\nthe file entry's default data stream as a file-like object.\n\nReturns:\nint: parse result which is _PARSE_RESULT_FAILURE if the file entry\ncould not be parsed, _PARSE_RESULT_SUCCESS if the file entry\nsuccessfully was parsed or _PARSE_RESULT_UNSUPPORTED when\nUnableToParseFile was raised.\n\nRaises:\nTypeError: if parser object is not a supported parser type.", "source": "codesearchnet"}
{"code": "def auth_proxy(self, method):\n\n    def _proxy(*args, **kwargs):\n        'The actual proxy, which instantiates and authenticates the API.\\n\\n            Args:\\n                *args (mixed): Args to send to class instantiation.\\n                **kwargs (mixed): Kwargs to send to class instantiation.\\n\\n            Returns:\\n                mixed: The result of the authenticated callable.\\n            '\n        return method(self.session, *args, **kwargs)\n    return _proxy", "docstring": "Authentication proxy for API requests.\n\nThis is required because the API objects are naive of ``HelpScout``,\nso they would otherwise be unauthenticated.\n\nArgs:\nmethod (callable): A method call that should be authenticated. It\nshould accept a ``requests.Session`` as its first parameter,\nwhich should be used for the actual API call.\n\nReturns:\nmixed: The results of the authenticated callable.", "source": "codesearchnet"}
{"code": "def mds(means, weights, d):\n    \n    X = dim_reduce(means, weights, d)\n    if X.shape[0]==2:\n        return X.dot(weights)\n    else:\n        return X.T.dot(weights)", "docstring": "Dimensionality reduction using MDS.\n\nArgs:\nmeans (array): genes x clusters\nweights (array): clusters x cells\nd (int): desired dimensionality\n\nReturns:\nW_reduced (array): array of shape (d, cells)", "source": "juraj-google-style"}
{"code": "def release(self, subnets):\n        \n\n        if isinstance(subnets, str) or isinstance(subnets, IPNetwork):\n            subnets = [subnets]\n        subnets_iter = (\n            str(subnet) if isinstance(subnet, IPNetwork) else subnet\n            for subnet in subnets\n        )\n        try:\n            with self._create_lock():\n                for subnet in subnets_iter:\n                    self._release(self.create_lease_object_from_subnet(subnet))\n        except (utils.TimerException, IOError):\n            raise LagoSubnetLeaseLockException(self.path)", "docstring": "Free the lease of the given subnets\n\nArgs:\nsubnets (list of str or netaddr.IPAddress): dotted ipv4 subnet in\nCIDR notation (for example ```192.168.200.0/24```) or IPAddress\nobject.\n\nRaises:\nLagoSubnetLeaseException: If subnet is a str and can't be parsed\nLagoSubnetLeaseLockException:\nIf the lock to self.path can't be acquired.", "source": "juraj-google-style"}
{"code": "def check_attribute_being_used(config_class, attributes, default_value, source_strings):\n    attribute_used = False\n    for attribute in attributes:\n        for modeling_source in source_strings:\n            if f'config.{attribute}' in modeling_source or f'getattr(config, \"{attribute}\"' in modeling_source or f'getattr(self.config, \"{attribute}\"' in modeling_source or ('TextConfig' in config_class.__name__ and f'config.get_text_config().{attribute}' in modeling_source):\n                attribute_used = True\n            elif re.search(f'getattr[ \\\\t\\\\v\\\\n\\\\r\\\\f]*\\\\([ \\\\t\\\\v\\\\n\\\\r\\\\f]*(self\\\\.)?config,[ \\\\t\\\\v\\\\n\\\\r\\\\f]*\"{attribute}\"', modeling_source) is not None:\n                attribute_used = True\n            if attribute_used:\n                break\n        if attribute_used:\n            break\n    attributes_to_allow = ['initializer_range', 'bos_index', 'eos_index', 'pad_index', 'unk_index', 'mask_index', 'image_token_id', 'video_token_id', 'image_seq_length', 'video_seq_length', 'image_size', 'text_config', 'use_cache', 'out_features', 'out_indices', 'sampling_rate', 'use_pretrained_backbone', 'backbone', 'backbone_config', 'use_timm_backbone', 'backbone_kwargs', 'rope_theta', 'partial_rotary_factor', 'pretraining_tp', 'boi_token_id', 'eoi_token_id']\n    attributes_used_in_generation = ['encoder_no_repeat_ngram_size']\n    case_allowed = True\n    if not attribute_used:\n        case_allowed = False\n        for attribute in attributes:\n            if attribute in ['is_encoder_decoder'] and default_value is True:\n                case_allowed = True\n            elif attribute in ['tie_word_embeddings'] and default_value is False:\n                case_allowed = True\n            elif attribute in attributes_to_allow + attributes_used_in_generation:\n                case_allowed = True\n            elif attribute.endswith('_token_id'):\n                case_allowed = True\n            if not case_allowed:\n                allowed_cases = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__, [])\n                case_allowed = allowed_cases is True or attribute in allowed_cases\n    return attribute_used or case_allowed", "docstring": "Check if any name in `attributes` is used in one of the strings in `source_strings`\n\nArgs:\nconfig_class (`type`):\nThe configuration class for which the arguments in its `__init__` will be checked.\nattributes (`List[str]`):\nThe name of an argument (or attribute) and its variant names if any.\ndefault_value (`Any`):\nA default value for the attribute in `attributes` assigned in the `__init__` of `config_class`.\nsource_strings (`List[str]`):\nThe python source code strings in the same modeling directory where `config_class` is defined. The file\ncontaining the definition of `config_class` should be excluded.", "source": "github-repos"}
{"code": "def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):\n        \n        \n        countriesdata = cls.countriesdata(use_live=use_live)\n        iso2 = countriesdata['iso2iso3'].get(iso3.upper())\n        if iso2 is not None:\n            return iso2\n\n        if exception is not None:\n            raise exception\n        return None", "docstring": "Get ISO2 from ISO3 code\n\nArgs:\niso3 (str): ISO3 code for which to get ISO2 code\nuse_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\nexception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\nReturns:\nOptional[str]: ISO2 code", "source": "juraj-google-style"}
{"code": "def preprocess(train_data_path: str, feature_thres: int, val_data_path: typing.Optional[str]=None) -> typing.Tuple[Dataset, typing.List[str], typing.Optional[Dataset]]:\n    features = extract_features(train_data_path, feature_thres)\n    feature_index = dict(((feature, i) for i, feature in enumerate(features)))\n    train_dataset = load_dataset(train_data_path, feature_index)\n    val_dataset = load_dataset(val_data_path, feature_index) if val_data_path else None\n    return (train_dataset, features, val_dataset)", "docstring": "Loads entries and translates them into JAX arrays. The boolean matrix of\nthe input data is represented by row indices and column indices of True values\ninstead of the matrix itself for memory efficiency, assuming the matrix is\nhighly sparse. Row and column indices are not guaranteed to be sorted.\n\nArgs:\ntrain_data_path (str): A file path to the training data file.\nfeature_thres (str): A threshold to filter out features whose number of\noccurances does not exceed the value.\nval_data_path (str, optional): A file path to the validation data file.\n\nReturns:\nA tuple of following items:\n- train_dataset (Dataset): The training dataset.\n- features (List[str]): The list of features.\n- val_dataset (Optional[Dataset]): The validation dataset.\nThis becomes None if val_data_path is None.", "source": "github-repos"}
{"code": "def CEscape(text, as_utf8):\n    Ord = (ord if isinstance(text, six.string_types) else (lambda x: x))\n    if as_utf8:\n        return ''.join((_cescape_utf8_to_str[Ord(c)] for c in text))\n    return ''.join((_cescape_byte_to_str[Ord(c)] for c in text))", "docstring": "Escape a bytes string for use in an ascii protocol buffer.\n\ntext.encode('string_escape') does not seem to satisfy our needs as it\nencodes unprintable characters using two-digit hex escapes whereas our\nC++ unescaping function allows hex escapes to be any length.  So,\n\"\\0011\".encode('string_escape') ends up being \"\\\\x011\", which will be\ndecoded in C++ as a single-character string with char code 0x11.\n\nArgs:\ntext: A byte string to be escaped\nas_utf8: Specifies if result should be returned in UTF-8 encoding\nReturns:\nEscaped string", "source": "codesearchnet"}
{"code": "def linear_add(self, other, scale_factor=1.0):\n        \n        if self.structure != other.structure:\n            raise ValueError(\"Adding or subtraction operations can only be \"\n                             \"performed for volumetric data with the exact \"\n                             \"same structure.\")\n        \n        data = {}\n        for k in self.data.keys():\n            data[k] = self.data[k] + scale_factor * other.data[k]\n        return VolumetricData(self.structure, data, self._distance_matrix)", "docstring": "Method to do a linear sum of volumetric objects. Used by + and -\noperators as well. Returns a VolumetricData object containing the\nlinear sum.\n\nArgs:\nother (VolumetricData): Another VolumetricData object\nscale_factor (float): Factor to scale the other data by.\n\nReturns:\nVolumetricData corresponding to self + scale_factor * other.", "source": "juraj-google-style"}
{"code": "def GetMessages(self, formatter_mediator, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    event_values = event.CopyToDict()\n\n    document_type = event_values.get('document_type', None)\n    if document_type:\n      event_values['document_type'] = self._DOC_TYPES.get(\n          document_type, 'UNKNOWN')\n\n    shared = event_values.get('shared', False)\n    if shared:\n      event_values['shared'] = 'Shared'\n    else:\n      event_values['shared'] = 'Private'\n\n    return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def job(self):\n    if (self._submitter and hasattr(self._submitter, '_job_access')):\n        return self._submitter._job_access()\n    return None", "docstring": "REST binding for the job associated with the submitted build.\n\nReturns:\nJob: REST binding for running job or ``None`` if connection information was not available or no job was submitted.", "source": "codesearchnet"}
{"code": "def _ParseAccountsData(self, account_data):\n    \n    if not account_data:\n      return {}\n    lines = [line for line in account_data.splitlines() if line]\n    user_map = {}\n    for line in lines:\n      if not all(ord(c) < 128 for c in line):\n        self.logger.info('SSH key contains non-ascii character: %s.', line)\n        continue\n      split_line = line.split(':', 1)\n      if len(split_line) != 2:\n        self.logger.info('SSH key is not a complete entry: %s.', split_line)\n        continue\n      user, key = split_line\n      if self._HasExpired(key):\n        self.logger.debug('Expired SSH key for user %s: %s.', user, key)\n        continue\n      if user not in user_map:\n        user_map[user] = []\n      user_map[user].append(key)\n    logging.debug('User accounts: %s.', user_map)\n    return user_map", "docstring": "Parse the SSH key data into a user map.\n\nArgs:\naccount_data: string, the metadata server SSH key attributes data.\n\nReturns:\ndict, a mapping of the form: {'username': ['sshkey1, 'sshkey2', ...]}.", "source": "juraj-google-style"}
{"code": "def Filter(self, function=None):\n    flat = (lambda x: (x if isinstance(x, str) else ''.join([flat(y) for y in x])))\n    if (function is None):\n        function = (lambda row: bool(flat(row.values)))\n    new_table = self.__class__()\n    new_table._table = [self.header]\n    for row in self:\n        if (function(row) is True):\n            new_table.Append(row)\n    return new_table", "docstring": "Construct Textable from the rows of which the function returns true.\n\n\nArgs:\nfunction: A function applied to each row which returns a bool. If\nfunction is None, all rows with empty column values are\nremoved.\nReturns:\nA new TextTable()\n\nRaises:\nTableError: When an invalid row entry is Append()'d", "source": "codesearchnet"}
{"code": "def paginate(db_query, items_per_page, offset=0, start_page=1):\n    return Paginator(db_query, items_per_page, offset=offset, start_page=start_page)", "docstring": "Instantiates a Paginator instance for database queries.\n\nArgs:\ndb_query: The SQLAlchemy database query to paginate.\nitems_per_page: The desired number of items per page.\noffset: The number of items to skip when paginating.\nstart_page: The number of the first page when reporting on page numbers.", "source": "codesearchnet"}
{"code": "def RawBytesToScriptHash(raw):\n    rawh = binascii.unhexlify(raw)\n    rawhashstr = binascii.unhexlify(bytes(Crypto.Hash160(rawh), encoding='utf-8'))\n    return UInt160(data=rawhashstr)", "docstring": "Get a hash of the provided raw bytes using the ripemd160 algorithm.\n\nArgs:\nraw (bytes): byte array of raw bytes. e.g. b'\\xAA\\xBB\\xCC'\n\nReturns:\nUInt160:", "source": "codesearchnet"}
{"code": "def add_note(path, filename='note.txt'):\n    path = os.path.expanduser(path)\n    assert os.path.isdir(path), '{} is not a valid directory.'.format(path)\n    filepath = os.path.join(path, filename)\n    exists = os.path.isfile(filepath)\n    try:\n        subprocess.call([EDITOR, filepath])\n    except Exception as exc:\n        logger.error('Editing note failed!')\n        raise exc\n    if exists:\n        print('Note updated at:', filepath)\n    else:\n        print('Note created at:', filepath)", "docstring": "Opens a txt file at the given path where user can add and save notes.\n\nArgs:\npath (str): Directory where note will be saved.\nfilename (str): Name of note. Defaults to \"note.txt\"", "source": "codesearchnet"}
{"code": "def create(self, document_data):\n        \n        batch = self._client.batch()\n        batch.create(self, document_data)\n        write_results = batch.commit()\n        return _first_write_result(write_results)", "docstring": "Create the current document in the Firestore database.\n\nArgs:\ndocument_data (dict): Property names and values to use for\ncreating a document.\n\nReturns:\ngoogle.cloud.firestore_v1beta1.types.WriteResult: The\nwrite result corresponding to the committed document. A write\nresult contains an ``update_time`` field.\n\nRaises:\n~google.cloud.exceptions.Conflict: If the document already exists.", "source": "juraj-google-style"}
{"code": "def keep_alive(self):\n    txn_response = self.api.http_request('POST', ('%sfcr:tx' % self.root), data=None, headers=None)\n    if (txn_response.status_code == 204):\n        logger.debug(('continuing transaction: %s' % self.root))\n        self.active = True\n        self.expires = txn_response.headers['Expires']\n        return True\n    elif (txn_response.status_code == 410):\n        logger.debug(('transaction does not exist: %s' % self.root))\n        self.active = False\n        return False\n    else:\n        raise Exception(('HTTP %s, could not continue transaction' % txn_response.status_code))", "docstring": "Keep current transaction alive, updates self.expires\n\nArgs:\nNone\n\nReturn:\nNone: sets new self.expires", "source": "codesearchnet"}
{"code": "def coarse_graining(network, state, internal_indices):\n    \n    max_phi = float('-inf')\n    max_coarse_grain = CoarseGrain((), ())\n\n    for coarse_grain in all_coarse_grains(internal_indices):\n        try:\n            subsystem = MacroSubsystem(network, state, internal_indices,\n                                       coarse_grain=coarse_grain)\n        except ConditionallyDependentError:\n            continue\n\n        phi = compute.phi(subsystem)\n        if (phi - max_phi) > constants.EPSILON:\n            max_phi = phi\n            max_coarse_grain = coarse_grain\n\n    return (max_phi, max_coarse_grain)", "docstring": "Find the maximal coarse-graining of a micro-system.\n\nArgs:\nnetwork (Network): The network in question.\nstate (tuple[int]): The state of the network.\ninternal_indices (tuple[int]): Nodes in the micro-system.\n\nReturns:\ntuple[int, CoarseGrain]: The phi-value of the maximal |CoarseGrain|.", "source": "juraj-google-style"}
{"code": "def _as_node_def_input(self):\n    assert self._op.name\n    if self.value_index == 0:\n        return self._op.name\n    else:\n        return '%s:%d' % (self._op.name, self.value_index)", "docstring": "Return a value to use for the NodeDef \"input\" attribute.\n\nThe returned string can be used in a NodeDef \"input\" attribute\nto indicate that the NodeDef uses this Tensor as input.\n\nRaises:\nValueError: if this Tensor's Operation does not have a name.\n\nReturns:\na string.", "source": "github-repos"}
{"code": "def serve(args):\n    \n    \n    port = args.serve_port or PORT\n    host = \"0.0.0.0\"\n\n    \n    dir_path = Path().absolute()\n    web_dir = dir_path / \"site\"\n\n    \n    utils.set_routes()\n\n    \n    if args.offline:\n        os.environ[\"MKINX_OFFLINE\"] = \"true\"\n        _ = subprocess.check_output(\"mkdocs build > /dev/null\", shell=True)\n        utils.make_offline()\n\n    class MkinxHTTPHandler(SimpleHTTPRequestHandler):\n        \n\n        def translate_path(self, path):\n            \n            location = str(web_dir)\n            route = location\n\n            if len(path) != 0 and path != \"/\":\n                for key, loc in utils.get_routes():\n                    if path.startswith(key):\n                        location = loc\n                        path = path[len(key) :]\n                        break\n\n            if location[-1] == \"/\" or not path or path[0] == \"/\":\n                route = location + path\n            else:\n                route = location + \"/\" + path\n\n            return route.split(\"?\")[0]\n\n    \n    success = False\n    count = 0\n    print(\"Waiting for server port...\")\n    try:\n        while not success:\n            try:\n                httpd = socketserver.TCPServer((host, port), MkinxHTTPHandler)\n                success = True\n            except OSError:\n                count += 1\n            finally:\n                if not success and count > 20:\n                    s = \"port {} seems occupied. Try with {} ? (y/n)\"\n                    if \"y\" in input(s.format(port, port + 1)):\n                        port += 1\n                        count = 0\n                    else:\n                        print(\"You can specify a custom port with mkinx serve -s\")\n                        return\n                time.sleep(0.5)\n    except KeyboardInterrupt:\n        print(\"Aborting.\")\n        return\n\n    httpd.allow_reuse_address = True\n    print(\"\\nServing at http:\n    thread = threading.Thread(target=httpd.serve_forever)\n    thread.daemon = True\n    thread.start()\n\n    \n    event_handler = utils.MkinxFileHandler(\n        patterns=[\"*.rst\", \"*.md\", \"*.yml\", \"*.yaml\"]\n    )\n    observer = Observer()\n    observer.schedule(event_handler, path=str(dir_path), recursive=True)\n    observer.start()\n\n    try:\n        while True:\n            time.sleep(1)\n    except KeyboardInterrupt:\n        observer.stop()\n        httpd.server_close()\n    observer.join()", "docstring": "Start a server which will watch .md and .rst files for changes.\nIf a md file changes, the Home Documentation is rebuilt. If a .rst\nfile changes, the updated sphinx project is rebuilt\n\nArgs:\nargs (ArgumentParser): flags from the CLI", "source": "juraj-google-style"}
{"code": "def _BuildParser():\n    parser = argparse.ArgumentParser(prog='yapf', description='Formatter for Python code.')\n    parser.add_argument('-v', '--version', action='version', version='%(prog)s {}'.format(__version__))\n    diff_inplace_quiet_group = parser.add_mutually_exclusive_group()\n    diff_inplace_quiet_group.add_argument('-d', '--diff', action='store_true', help='print the diff for the fixed source')\n    diff_inplace_quiet_group.add_argument('-i', '--in-place', action='store_true', help='make changes to files in place')\n    diff_inplace_quiet_group.add_argument('-q', '--quiet', action='store_true', help='output nothing and set return value')\n    lines_recursive_group = parser.add_mutually_exclusive_group()\n    lines_recursive_group.add_argument('-r', '--recursive', action='store_true', help='run recursively over directories')\n    lines_recursive_group.add_argument('-l', '--lines', metavar='START-END', action='append', default=None, help='range of lines to reformat, one-based')\n    parser.add_argument('-e', '--exclude', metavar='PATTERN', action='append', default=None, help='patterns for files to exclude from formatting')\n    parser.add_argument('--style', action='store', help='specify formatting style: either a style name (for example \"pep8\" or \"google\"), or the name of a file with style settings. The default is pep8 unless a %s or %s or %s file located in the same directory as the source or one of its parent directories (for stdin, the current directory is used).' % (style.LOCAL_STYLE, style.SETUP_CONFIG, style.PYPROJECT_TOML))\n    parser.add_argument('--style-help', action='store_true', help='show style settings and exit; this output can be saved to .style.yapf to make your settings permanent')\n    parser.add_argument('--no-local-style', action='store_true', help=\"don't search for local style definition\")\n    parser.add_argument('-p', '--parallel', action='store_true', help='run YAPF in parallel when formatting multiple files.')\n    parser.add_argument('-m', '--print-modified', action='store_true', help='print out file names of modified files')\n    parser.add_argument('-vv', '--verbose', action='store_true', help='print out file names while processing')\n    parser.add_argument('files', nargs='*', help='reads from stdin when no files are specified.')\n    return parser", "docstring": "Constructs the parser for the command line arguments.\n\nReturns:\nAn ArgumentParser instance for the CLI.", "source": "github-repos"}
{"code": "def group_pairs(pair_list):\n    groupid_to_items = defaultdict(list)\n    for (item, groupid) in pair_list:\n        groupid_to_items[groupid].append(item)\n    return groupid_to_items", "docstring": "Groups a list of items using the first element in each pair as the item and\nthe second element as the groupid.\n\nArgs:\npair_list (list): list of 2-tuples (item, groupid)\n\nReturns:\ndict: groupid_to_items: maps a groupid to a list of items\n\nSeeAlso:\ngroup_items", "source": "codesearchnet"}
{"code": "def store_work_results(self, results, collection, md5):\n        \n\n        \n        results['md5'] = md5\n        results['__time_stamp'] = datetime.datetime.utcnow()\n\n        \n        if 'mod_time' not in results:\n            results['mod_time'] = results['__time_stamp']\n\n        \n        \n        \n        try:\n            self.database[collection].update({'md5':md5}, self.clean_for_storage(results), True)\n        except pymongo.errors.OperationFailure:\n            \n            print 'Could not update exising object in capped collection, punting...'\n            print 'collection: %s md5:%s' % (collection, md5)", "docstring": "Store the output results of the worker.\n\nArgs:\nresults: a dictionary.\ncollection: the database collection to store the results in.\nmd5: the md5 of sample data to be updated.", "source": "juraj-google-style"}
{"code": "def run_pip_command(command_args, pip_version=None, python_version=None):\n    \n    pip_exe, context = find_pip(pip_version, python_version)\n    command = [pip_exe] + list(command_args)\n\n    if context is None:\n        return popen(command)\n    else:\n        return context.execute_shell(command=command, block=False)", "docstring": "Run a pip command.\n\nArgs:\ncommand_args (list of str): Args to pip.\n\nReturns:\n`subprocess.Popen`: Pip process.", "source": "juraj-google-style"}
{"code": "def get_op_or_tensor_by_name(name):\n    \n    G = tfv1.get_default_graph()\n\n    def f(n):\n        if len(n) >= 3 and n[-2] == ':':\n            return G.get_tensor_by_name(n)\n        else:\n            return G.get_operation_by_name(n)\n\n    if not isinstance(name, list):\n        return f(name)\n    else:\n        return list(map(f, name))", "docstring": "Get either tf.Operation of tf.Tensor from names.\n\nArgs:\nname (list[str] or str): names of operations or tensors.\n\nRaises:\nKeyError, if the name doesn't exist", "source": "juraj-google-style"}
{"code": "def process(self, metrics, config):\n    LOG.debug('Process called')\n    for metric in metrics:\n        metric.tags['instance-id'] = config['instance-id']\n    return metrics", "docstring": "Processes metrics.\n\nThis method is called by the Snap deamon during the process phase\nof the execution of a Snap workflow.  Examples of processing metrics\ninclude applying filtering, max, min, average functions as well as\nadding additional context to the metrics to name just a few.\n\nIn this example we are adding a tag called 'context' to every metric.\n\nArgs:\nmetrics (obj:`list` of `snap_plugin.v1.Metric`):\nList of metrics to be processed.\n\nReturns:\n:obj:`list` of `snap_plugin.v1.Metric`:\nList of processed metrics.", "source": "codesearchnet"}
{"code": "def get_rel_pos(self, q_size: int, k_size: int, rel_pos: tf.Tensor) -> tf.Tensor:\n    max_rel_dist = int(2 * max(q_size, k_size) - 1)\n    if rel_pos.shape[0] != max_rel_dist:\n        rel_pos_resized = tf.image.resize(tf.reshape(rel_pos, (1, rel_pos.shape[0], -1)), size=(max_rel_dist, rel_pos.shape[1]), method='bilinear')\n        rel_pos_resized = tf.reshape(rel_pos_resized, (-1, max_rel_dist))\n    else:\n        rel_pos_resized = rel_pos\n    q_coords = tf.expand_dims(tf.range(q_size, dtype=tf.float32), 1) * max(k_size / q_size, 1.0)\n    k_coords = tf.expand_dims(tf.range(k_size, dtype=tf.float32), 0) * max(q_size / k_size, 1.0)\n    relative_coords = q_coords - k_coords + (k_size - 1) * max(q_size / k_size, 1.0)\n    return tf.gather(rel_pos_resized, tf.cast(relative_coords, tf.int32))", "docstring": "Get relative positional embeddings according to the relative positions of\nquery and key sizes.\n\nArgs:\nq_size (int):\nsize of the query.\nk_size (int):\nsize of key k.\nrel_pos (`tf.Tensor`):\nrelative position embeddings (L, channel).\n\nReturns:\nExtracted positional embeddings according to relative positions.", "source": "github-repos"}
{"code": "def find_by_or(cls, payload):\n    if (not isinstance(payload, dict)):\n        raise ValueError(\"The 'payload' parameter must be provided a dictionary object.\")\n    url = os.path.join(cls.URL, 'find_by_or')\n    payload = {'find_by_or': payload}\n    cls.debug_logger.debug('Searching Pulsar {} for {}'.format(cls.__name__, json.dumps(payload, indent=4)))\n    res = requests.post(url=url, json=payload, headers=HEADERS, verify=False)\n    cls.write_response_html_to_file(res, 'bob.html')\n    if res:\n        try:\n            res = res[cls.MODEL_NAME]\n        except KeyError:\n            pass\n    return res", "docstring": "Searches the model in question by OR joining the query parameters.\n\nImplements a Railsy way of looking for a record using a method by the same name and passing\nin the query as a string (for the OR operator joining to be specified).\n\nOnly the first hit is returned, and there is not particular ordering specified in the server-side\nAPI method.\n\nArgs:\npayload: `dict`. The attributes of a record to search for by using OR operator joining\nfor each query parameter.\n\nReturns:\n`dict`: The JSON serialization of the record, if any, found by the API call.\n`None`: If the API call didnt' return any results.", "source": "codesearchnet"}
{"code": "def _is_working_path(dom, path, element):\n\n    def i_or_none(el, i):\n        '\\n        Return ``el[i]`` if the list is not blank, or None otherwise.\\n\\n        Args:\\n            el (list, tuple): Any indexable object.\\n            i (int): Index.\\n\\n        Returns:\\n            obj: Element at index `i` if `el` is not blank, or ``None``.\\n        '\n        if (not el):\n            return None\n        return el[i]\n    path_functions = {'find': (lambda el, index, params: i_or_none(el.find(*params), index)), 'wfind': (lambda el, index, params: i_or_none(el.wfind(*params).childs, index)), 'match': (lambda el, index, params: i_or_none(el.match(*params), index)), 'left_neighbour_tag': (lambda el, index, neigh_data: i_or_none(el.find(neigh_data.tag_name, neigh_data.params, fn=utils.has_neigh(*neigh_data.fn_params, left=True)), index)), 'right_neighbour_tag': (lambda el, index, neigh_data: i_or_none(el.find(neigh_data.tag_name, neigh_data.params, fn=utils.has_neigh(*neigh_data.fn_params, left=False)), index))}\n    el = None\n    if isinstance(path, PathCall):\n        el = path_functions[path.call_type](dom, path.index, path.params)\n    elif isinstance(path, Chained):\n        for path in path.chain:\n            dom = path_functions[path.call_type](dom, path.index, path.params)\n            if (not dom):\n                return False\n        el = dom\n    else:\n        raise UserWarning(('Unknown type of path parameters! (%s)' % str(path)))\n    if (not el):\n        return False\n    return (el.getContent().strip() == element.getContent().strip())", "docstring": "Check whether the path is working or not.\n\nAply proper search function interpreting `path` to `dom` and check, if\nreturned object is `element`. If so, return ``True``, otherwise ``False``.\n\nArgs:\ndom (obj): HTMLElement DOM.\npath (obj): :class:`.PathCall` Instance containing informations about\npath and which function it require to obtain element the\npath is pointing to.\nelement (obj): HTMLElement instance used to decide whether `path`\npoints to correct `element` or not.\n\nReturns:\nbool: True if `path` correctly points to proper `element`.", "source": "codesearchnet"}
{"code": "def delete(self, teamId):\n        \n        check_type(teamId, basestring, may_be_none=False)\n\n        \n        self._session.delete(API_ENDPOINT + '/' + teamId)", "docstring": "Delete a team.\n\nArgs:\nteamId(basestring): The ID of the team to be deleted.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "juraj-google-style"}
{"code": "def get_repeated_from_extensions(extension_list: List[message.Message], repeated_cls: Type[_T]) -> List[_T]:\n    result = []\n    if not extension_list:\n        return result\n    url = annotation_utils.get_structure_definition_url(repeated_cls.DESCRIPTOR)\n    for extension in extension_list:\n        if cast(Any, extension).url.value == url:\n            msg = extension_to_message(extension, repeated_cls)\n            result.append(msg)\n    return result", "docstring": "Extracts matching extensions from extension_list and serializes to protos.\n\nArgs:\nextension_list: The list of FHIR extensions to examine.\nrepeated_cls: The type of message to serialize to.\n\nReturns:\nA list of protos of instance repeated_cls representing the extensions within\nextension_list.", "source": "github-repos"}
{"code": "def is_symbolic_tensor(tensor) -> bool:\n    return isinstance(tensor, SymbolicTensor)", "docstring": "Test if `tensor` is a symbolic Tensor.\n\nArgs:\ntensor: a tensor-like object\n\nReturns:\nTrue if `tensor` is a symbolic tensor (not an eager tensor).", "source": "github-repos"}
{"code": "def paginator(limit, offset, record_count, base_uri, page_nav_tpl='&limit={}&offset={}'):\n    total_pages = int(math.ceil((record_count / limit)))\n    next_cond = ((limit + offset) <= record_count)\n    prev_cond = (offset >= limit)\n    next_page = ((base_uri + page_nav_tpl.format(limit, (offset + limit))) if next_cond else None)\n    prev_page = ((base_uri + page_nav_tpl.format(limit, (offset - limit))) if prev_cond else None)\n    return OrderedDict([('total_count', record_count), ('total_pages', total_pages), ('next_page', next_page), ('prev_page', prev_page)])", "docstring": "Compute pagination info for collection filtering.\n\nArgs:\nlimit (int): Collection filter limit.\noffset (int): Collection filter offset.\nrecord_count (int): Collection filter total record count.\nbase_uri (str): Collection filter base uri (without limit, offset)\npage_nav_tpl (str): Pagination template.\n\nReturns:\nA mapping of pagination info.", "source": "codesearchnet"}
{"code": "def find_subclasses_in_module(base_classes, module):\n    subclasses = []\n    for _, module_member in module.__dict__.items():\n        if inspect.isclass(module_member):\n            for base_class in base_classes:\n                if issubclass(module_member, base_class):\n                    subclasses.append(module_member)\n    return subclasses", "docstring": "Finds the subclasses of the given classes in the given module.\n\nArgs:\nbase_classes: list of classes, the base classes to look for the\nsubclasses of in the module.\nmodule: module, the module to look for the subclasses in.\n\nReturns:\nA list of all of the subclasses found in the module.", "source": "github-repos"}
{"code": "def _get_value_type(cls, value):\n    type_ = cls.typeDict.get(type(value))\n    if type_ is None:\n        type_ = 'CLASS' if inspect.isclass(value) else None\n    if type_ is None and value is None:\n        type_ = 'STRING'\n    return type_", "docstring": "Infers the type of a given value.\n\nArgs:\nvalue: The value whose type needs to be inferred. For 'DURATION' and\n'TIMESTAMP', the corresponding Python type is datetime.timedelta and\ndatetime.datetime respectively. For Python classes, the API type is\njust 'STRING' at the moment.\n\nReturns:\nOne of 'STRING', 'INTEGER', 'FLOAT', 'CLASS', 'DURATION', or\n'TIMESTAMP', depending on the type of the value.", "source": "github-repos"}
{"code": "def __init__(self, server_port, dump_dir, toggle_watch_on_core_metadata=None):\n    self.core_metadata_json_strings = []\n    self.partition_graph_defs = []\n    self.debug_tensor_values = collections.defaultdict(list)\n    self._initialize_toggle_watch_state(toggle_watch_on_core_metadata)\n    grpc_debug_server.EventListenerBaseServicer.__init__(self, server_port, functools.partial(EventListenerTestStreamHandler, dump_dir, self))\n    self._call_types = []\n    self._call_keys = []\n    self._origin_stacks = []\n    self._origin_id_to_strings = []\n    self._graph_tracebacks = []\n    self._graph_versions = []\n    self._source_files = []", "docstring": "Constructor of EventListenerTestServicer.\n\nArgs:\nserver_port: (int) The server port number.\ndump_dir: (str) The root directory to which the data files will be\ndumped. If empty or None, the received debug data will not be dumped\nto the file system: they will be stored in memory instead.\ntoggle_watch_on_core_metadata: A list of\n(node_name, output_slot, debug_op) tuples to toggle the\nwatchpoint status during the on_core_metadata calls (optional).", "source": "github-repos"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return cls + token_ids_0 + sep\n    return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. An ALBERT sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def set_servo_angle(self, goalangle, goaltime, led):\n        \n        if (self.servomodel==0x06) or (self.servomodel == 0x04):\n            goalposition = scale(goalangle, -159.9, 159.6, 10627, 22129)\n        else:\n            goalposition = scale(goalangle, -150, 150, 21, 1002)\n\n        self.set_servo_position(goalposition, goaltime, led)", "docstring": "Sets the servo angle (in degrees)\n\nEnable torque using torque_on function before calling this\n\nArgs:\ngoalangle (int): The desired angle in degrees, range -150 to 150\ngoaltime (int): the time taken to move from present\nposition to goalposition\nled (int): the LED color\n0x00 LED off\n0x04 GREEN\n0x08 BLUE\n0x10 RED", "source": "juraj-google-style"}
{"code": "def get_request_message(cls, remote_info):  \n    \n    if remote_info in cls.__remote_info_cache:\n      return cls.__remote_info_cache[remote_info]\n    else:\n      return remote_info.request_type()", "docstring": "Gets request message or container from remote info.\n\nArgs:\nremote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding\nto a method.\n\nReturns:\nEither an instance of the request type from the remote or the\nResourceContainer that was cached with the remote method.", "source": "juraj-google-style"}
{"code": "def _initial_population_gsa(population_size, solution_size, lower_bounds, upper_bounds):\n    if ((len(lower_bounds) != solution_size) or (len(upper_bounds) != solution_size)):\n        raise ValueError('Lower and upper bounds much have a length equal to the problem size.')\n    return common.make_population(population_size, common.random_real_solution, solution_size, lower_bounds, upper_bounds)", "docstring": "Create a random initial population of floating point values.\n\nArgs:\npopulation_size: an integer representing the number of solutions in the population.\nproblem_size: the number of values in each solution.\nlower_bounds: a list, each value is a lower bound for the corresponding\npart of the solution.\nupper_bounds: a list, each value is a upper bound for the corresponding\npart of the solution.\n\nReturns:\nlist; A list of random solutions.", "source": "codesearchnet"}
{"code": "def get_node_details(self, node_id: list) -> dict:\n        \n        \n        if not self._manager:\n            raise RuntimeError('Only the Swarm manager node can '\n                               'retrieve node details.')\n\n        node = self._client.nodes.get(node_id)\n        return node.attrs", "docstring": "Get details of a node.\n\nOnly the manager nodes can retrieve details of a node\n\nArgs:\nnode_id (list): List of node ID\n\nReturns:\ndict, details of the node", "source": "juraj-google-style"}
{"code": "def from_celery(cls, broker_dict):\n    return BrokerStats(hostname=broker_dict['hostname'], port=broker_dict['port'], transport=broker_dict['transport'], virtual_host=broker_dict['virtual_host'])", "docstring": "Create a BrokerStats object from the dictionary returned by celery.\n\nArgs:\nbroker_dict (dict): The dictionary as returned by celery.\n\nReturns:\nBrokerStats: A fully initialized BrokerStats object.", "source": "codesearchnet"}
{"code": "def simple_repr(obj: Any, attrnames: List[str],\n                with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:\n    \n    elements = [\"{}={}\".format(name, repr(getattr(obj, name)))\n                for name in attrnames]\n    return repr_result(obj, elements, with_addr=with_addr, joiner=joiner)", "docstring": "Convenience function for :func:`__repr__`.\nWorks its way through a list of attribute names, and creates a ``repr()``\nrepresentation assuming that parameters to the constructor have the same\nnames.\n\nArgs:\nobj: object to display\nattrnames: names of attributes to include\nwith_addr: include the memory address of ``obj``\njoiner: string with which to join the elements\n\nReturns:\nstring: :func:`repr`-style representation", "source": "juraj-google-style"}
{"code": "def remove_hallucinated_references(self, text: str) -> str:\n    lines = text.split('\\n')\n    if len(lines) == 0:\n        return ''\n    clean_lines = remove_numbers(lines)\n    slices = get_slices(lines, clean_lines)\n    to_delete = []\n    for slice in slices:\n        to_delete.append(remove_slice_from_lines(lines, clean_lines, slice))\n    for to_delete in reversed(to_delete):\n        text = text.replace(to_delete, '\\n\\n[MISSING_PAGE_POST]\\n\\n')\n    text = re.sub('\n    return text", "docstring": "Remove hallucinated or missing references from the text.\n\nThis function identifies and removes references that are marked as missing or hallucinated from the input text.\n\nArgs:\ntext (`str`):\nThe input text containing references.\n\nReturns:\n`str`: The text with hallucinated references removed.", "source": "github-repos"}
{"code": "def create(configs):\n    if not configs:\n        raise Error(ANDROID_DEVICE_EMPTY_CONFIG_MSG)\n    elif configs == ANDROID_DEVICE_PICK_ALL_TOKEN:\n        ads = get_all_instances()\n    elif not isinstance(configs, list):\n        raise Error(ANDROID_DEVICE_NOT_LIST_CONFIG_MSG)\n    elif isinstance(configs[0], dict):\n        ads = get_instances_with_configs(configs)\n    elif isinstance(configs[0], str):\n        ads = get_instances(configs)\n    else:\n        raise Error('No valid config found in: %s' % configs)\n    _start_services_on_ads(ads)\n    return ads", "docstring": "Creates AndroidDevice controller objects.\n\nArgs:\nconfigs: Represents configurations for Android devices, this can take one of\nthe following forms:\n* str, only asterisk symbol is accepted, indicating that all connected\nAndroid devices will be used\n* A list of dict, each representing a configuration for an Android device.\n* A list of str, each representing the serial number of Android device.\n\nReturns:\nA list of AndroidDevice objects.", "source": "github-repos"}
{"code": "def fastcc(model, epsilon, solver):\n    reaction_set = set(model.reactions)\n    subset = set((reaction_id for reaction_id in reaction_set if (model.limits[reaction_id].lower >= 0)))\n    logger.info('Checking {} irreversible reactions...'.format(len(subset)))\n    logger.debug('|J| = {}, J = {}'.format(len(subset), subset))\n    p = FastcoreProblem(model, solver, epsilon=epsilon)\n    p.lp7(subset)\n    consistent_subset = set((reaction_id for reaction_id in model.reactions if (abs(p.get_flux(reaction_id)) >= (0.999 * epsilon))))\n    logger.debug('|A| = {}, A = {}'.format(len(consistent_subset), consistent_subset))\n    for reaction in (subset - consistent_subset):\n        (yield reaction)\n    subset = ((reaction_set - subset) - consistent_subset)\n    logger.info('Checking reversible reactions...')\n    logger.debug('|J| = {}, J = {}'.format(len(subset), subset))\n    flipped = False\n    singleton = False\n    while (len(subset) > 0):\n        logger.info('{} reversible reactions left to check...'.format(len(subset)))\n        if singleton:\n            reaction = next(iter(subset))\n            subset_i = {reaction}\n            logger.debug('LP3 on {}'.format(subset_i))\n            p.maximize({reaction: ((- 1) if p.is_flipped(reaction) else 1)})\n        else:\n            subset_i = subset\n            logger.debug('LP7 on {}'.format(subset_i))\n            p.lp7(subset_i)\n        consistent_subset.update((reaction_id for reaction_id in subset if abs((p.get_flux(reaction_id) >= (0.999 * epsilon)))))\n        logger.debug('|A| = {}, A = {}'.format(len(consistent_subset), consistent_subset))\n        if (not subset.isdisjoint(consistent_subset)):\n            subset -= consistent_subset\n            logger.debug('|J| = {}, J = {}'.format(len(subset), subset))\n            flipped = False\n        else:\n            subset_rev_i = (subset_i & model.reversible)\n            if (flipped or (len(subset_rev_i) == 0)):\n                flipped = False\n                if singleton:\n                    subset -= subset_rev_i\n                    for reaction in subset_rev_i:\n                        logger.info('Inconsistent: {}'.format(reaction))\n                        (yield reaction)\n                else:\n                    singleton = True\n            else:\n                p.flip(subset_rev_i)\n                flipped = True\n                logger.info('Flipped {} reactions'.format(len(subset_rev_i)))", "docstring": "Check consistency of model reactions.\n\nYield all reactions in the model that are not part of the consistent\nsubset.\n\nArgs:\nmodel: :class:`MetabolicModel` to solve.\nepsilon: Flux threshold value.\nsolver: LP solver instance to use.", "source": "codesearchnet"}
{"code": "def take_reference_screenshot(webdriver, file_name):\n        \n        folder_location = os.path.join(ProjectUtils.get_project_root(),\n                                       WebScreenShotUtil.REFERENCE_SCREEN_SHOT_LOCATION)\n\n        WebScreenShotUtil.__capture_screenshot(\n            webdriver, folder_location, file_name + \".png\")", "docstring": "Captures a screenshot as a reference screenshot.\n\nArgs:\nwebdriver (WebDriver) - Selenium webdriver.\nfile_name (str) - File name to save screenshot as.", "source": "juraj-google-style"}
{"code": "def get_course_current_grades(self, course_id):\n    resp = self.requester.get(urljoin(self.base_url, '/api/grades/v1/courses/{course_key}/'.format(course_key=course_id)))\n    resp.raise_for_status()\n    resp_json = resp.json()\n    if ('results' in resp_json):\n        grade_entries = [CurrentGrade(entry) for entry in resp_json['results']]\n        while (resp_json['next'] is not None):\n            resp = self.requester.get(resp_json['next'])\n            resp.raise_for_status()\n            resp_json = resp.json()\n            grade_entries.extend((CurrentGrade(entry) for entry in resp_json['results']))\n    else:\n        grade_entries = [CurrentGrade(entry) for entry in resp_json]\n    return CurrentGradesByCourse(grade_entries)", "docstring": "Returns a CurrentGradesByCourse object for all users in the specified course.\n\nArgs:\ncourse_id (str): an edX course ids.\n\nReturns:\nCurrentGradesByCourse: object representing the student current grades\n\nAuthorization:\nThe authenticated user must have staff permissions to see grades for all users\nin a course.", "source": "codesearchnet"}
{"code": "def extract_q_df(self, state_key, action_key):\n    q = 0.0\n    if (self.q_df is None):\n        self.save_q_df(state_key, action_key, q)\n        return q\n    q_df = self.q_df[(self.q_df.state_key == state_key)]\n    q_df = q_df[(q_df.action_key == action_key)]\n    if q_df.shape[0]:\n        q = float(q_df['q_value'])\n    else:\n        self.save_q_df(state_key, action_key, q)\n    return q", "docstring": "Extract Q-Value from `self.q_df`.\n\nArgs:\nstate_key:      The key of state.\naction_key:     The key of action.\n\nReturns:\nQ-Value.", "source": "codesearchnet"}
{"code": "def cache_value(self, api_name, key, value):\n        \n        self._cache.setdefault(api_name, {})\n        self._cache[api_name][key] = value", "docstring": "Add the value of an API call to the cache.\n\nArgs:\napi_name: a string name of the API. Keys and values are segmented by api_name.\nkey: a string key for the specific call.\nvalue: the value of the call using the specific key", "source": "juraj-google-style"}
{"code": "def completer(*commands):\n\n    def decorated_func(f):\n        f.__complete_targets__ = list(commands)\n        return f\n    return decorated_func", "docstring": "Decorate a function to be the completer function of commands.\n\nArguments:\ncommands: Names of command that should trigger this function object.\n\n------------------------------\nInterface of completer methods:\n\n@completer('some-other_command')\ndef complete_foo(self, args, text):\n'''\nArguments:\nargs: A list of arguments. The first token, i.e, the command\nitself, is not included.\ntext: The scope of text being replaced.\n\nA few examples, with '$' representing the shell prompt and\n'|' represents the cursor position:\n$ |\n$ history|\nhandled by the __driver_completer() method\n$ history |\nargs = []\ntext = ''\n$ history cle|\nargs = []\ntext = 'cle'\n$ history clear |\nargs = ['clear']\ntext = ''\n\nReturns:\nA list of candidates. If no candidate was found, return\neither [] or None.\n'''\npass", "source": "codesearchnet"}
{"code": "def tf_action_exploration(self, action, exploration, action_spec):\n        \n        action_shape = tf.shape(input=action)\n        exploration_value = exploration.tf_explore(\n            episode=self.global_episode,\n            timestep=self.global_timestep,\n            shape=action_spec['shape']\n        )\n        exploration_value = tf.expand_dims(input=exploration_value, axis=0)\n\n        if action_spec['type'] == 'bool':\n            action = tf.where(\n                condition=(tf.random_uniform(shape=action_shape) < exploration_value),\n                x=(tf.random_uniform(shape=action_shape) < 0.5),\n                y=action\n            )\n\n        elif action_spec['type'] == 'int':\n            action = tf.where(\n                condition=(tf.random_uniform(shape=action_shape) < exploration_value),\n                x=tf.random_uniform(shape=action_shape, maxval=action_spec['num_actions'], dtype=util.tf_dtype('int')),\n                y=action\n            )\n\n        elif action_spec['type'] == 'float':\n            noise = tf.random_normal(shape=action_shape, dtype=util.tf_dtype('float'))\n            action += noise * exploration_value\n            if 'min_value' in action_spec:\n                action = tf.clip_by_value(\n                    t=action,\n                    clip_value_min=action_spec['min_value'],\n                    clip_value_max=action_spec['max_value']\n                )\n\n        return action", "docstring": "Applies optional exploration to the action (post-processor for action outputs).\n\nArgs:\naction (tf.Tensor): The original output action tensor (to be post-processed).\nexploration (Exploration): The Exploration object to use.\naction_spec (dict): Dict specifying the action space.\nReturns:\nThe post-processed action output tensor.", "source": "juraj-google-style"}
{"code": "def refund(request, invoice_id):\n    current_invoice = InvoiceController.for_id_or_404(invoice_id)\n    try:\n        current_invoice.refund()\n        messages.success(request, 'This invoice has been refunded.')\n    except ValidationError as ve:\n        messages.error(request, ve)\n    return redirect('invoice', invoice_id)", "docstring": "Marks an invoice as refunded and requests a credit note for the\nfull amount paid against the invoice.\n\nThis view requires a login, and the logged in user must be staff.\n\nArguments:\ninvoice_id (castable to int): The ID of the invoice to refund.\n\nReturns:\nredirect:\nRedirects to ``invoice``.", "source": "codesearchnet"}
{"code": "def _OpenFileObject(self, path_spec):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(path_spec)\n\n    file_object = resolver.Resolver.OpenFileObject(\n        path_spec.parent, resolver_context=self._resolver_context)\n    fvde_volume = pyfvde.volume()\n    fvde.FVDEVolumeOpen(\n        fvde_volume, path_spec, file_object, resolver.Resolver.key_chain)\n    return fvde_volume", "docstring": "Opens the file-like object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nFileIO: a file-like object.\n\nRaises:\nPathSpecError: if the path specification is incorrect.", "source": "juraj-google-style"}
{"code": "class MeanAbsoluteError(MeanMetricWrapper):\n\n    def __init__(self, name='mean_absolute_error', dtype=None):\n        super(MeanAbsoluteError, self).__init__(mean_absolute_error, name, dtype=dtype)", "docstring": "Computes the mean absolute error between the labels and predictions.\n\nArgs:\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.\n\nStandalone usage:\n\n>>> m = tf.keras.metrics.MeanAbsoluteError()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])\n>>> m.result().numpy()\n0.25\n\n>>> m.reset_state()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],\n...                sample_weight=[1, 0])\n>>> m.result().numpy()\n0.5\n\nUsage with `compile()` API:\n\n```python\nmodel.compile(\noptimizer='sgd',\nloss='mse',\nmetrics=[tf.keras.metrics.MeanAbsoluteError()])\n```", "source": "github-repos"}
{"code": "def normalize_build_spec(self, build_spec):\n        \n        for cmd in build_spec:\n            if not cmd:\n                continue\n            cmd_name = cmd.keys()[0]\n            cmd_options = cmd.values()[0]\n            cmd_handler = self.get_cmd_handler(cmd_name)\n            self.build_cmds.append(cmd_handler(cmd_options))", "docstring": "Convert a build spec into a list of Command tuples.\nAfter running this command, self.build_cmds should hold all\nthe commands that should be run on the disk in self.disk_path.\n\nArgs:\nbuild_spec (dict): The buildspec part from the init file", "source": "juraj-google-style"}
{"code": "def begin_episode(self, agent_indices):\n    with tf.name_scope('begin_episode/'):\n        if (self._last_state is None):\n            reset_state = tf.no_op()\n        else:\n            reset_state = utility.reinit_nested_vars(self._last_state, agent_indices)\n        reset_buffer = self._current_episodes.clear(agent_indices)\n        with tf.control_dependencies([reset_state, reset_buffer]):\n            return tf.constant('')", "docstring": "Reset the recurrent states and stored episode.\n\nArgs:\nagent_indices: Tensor containing current batch indices.\n\nReturns:\nSummary tensor.", "source": "codesearchnet"}
{"code": "def mul(left, right):\n    \n    from .mv_mul import MvMul\n    length = max(left, right)\n    if length == 1:\n        return Mul(left, right)\n    return MvMul(left, right)", "docstring": "Distribution multiplication.\n\nArgs:\nleft (Dist, numpy.ndarray) : left hand side.\nright (Dist, numpy.ndarray) : right hand side.", "source": "juraj-google-style"}
{"code": "def _wrap_response(self, status=None, **kwargs):\n        \n        kwargs['status'] = status if status is not None else self._status.OK\n        return kwargs", "docstring": "Convenience method to wrap a status with any key word args.\n\nArgs:\nstatus (enum): enum response status, defaults to OK\n\nReturns:\ndict: inlcudes a 'status' attribute and any key word arguments", "source": "juraj-google-style"}
{"code": "def _analyze_input_data(self, entry, k, depth=1, max_depth=3, max_list=3):\n        \n\n        class _elementInfo(object):\n            def __init__(self, el, pos, depth=0, max_list=3):\n                self.shape = \"\"\n                self.type = type(el).__name__\n                self.dtype = \"\"\n                self.range = \"\"\n\n                self.sub_elements = []\n\n                self.ident = \" \" * (depth * 2)\n                self.pos = pos\n\n                numpy_scalar_types = list(itertools.chain(*np.sctypes.values()))\n\n                if isinstance(el, (int, float, bool)):\n                    self.range = \" with value {}\".format(el)\n                elif type(el) is np.ndarray:\n                    self.shape = \" of shape {}\".format(el.shape)\n                    self.dtype = \":{}\".format(str(el.dtype))\n                    self.range = \" in range [{}, {}]\".format(el.min(), el.max())\n                elif type(el) in numpy_scalar_types:\n                    self.range = \" with value {}\".format(el)\n                elif isinstance(el, (list)):\n                    self.shape = \" of len {}\".format(len(el))\n\n                    if depth < max_depth:\n                        for k, subel in enumerate(el):\n                            if k < max_list:\n                                self.sub_elements.append(_elementInfo(subel, k, depth + 1, max_list))\n                            else:\n                                self.sub_elements.append(\" \" * ((depth + 1) * 2) + '...')\n                                break\n                    else:\n                        if len(el) > 0:\n                            self.sub_elements.append(\" \" * ((depth + 1) * 2) + ' ...')\n\n            def __str__(self):\n                strings = []\n                vals = (self.ident, self.pos, self.type, self.dtype, self.shape, self.range)\n                strings.append(\"{}{}: {}{}{}{}\".format(*vals))\n\n                for k, el in enumerate(self.sub_elements):\n                    strings.append(str(el))\n                return \"\\n\".join(strings)\n\n        return str(_elementInfo(entry, k, depth, max_list))", "docstring": "Gather useful debug information from a datapoint.\n\nArgs:\nentry: the datapoint component\nk (int): index of this component in current datapoint\ndepth (int, optional): recursion depth\nmax_depth, max_list: same as in :meth:`__init__`.\n\nReturns:\nstring: debug message", "source": "juraj-google-style"}
{"code": "def write_log(self, message):\n        \n        if self._is_write_log and self.log_file and not self.log_file.closed:\n            self.log_file.write(message + '\\n')", "docstring": "Write a line to the VM instruction log file.\n\nArgs:\nmessage (str): string message to write to file.", "source": "juraj-google-style"}
{"code": "def to_valid_density_matrix(density_matrix_rep: Union[(int, np.ndarray)], num_qubits: int, dtype: Type[np.number]=np.complex64) -> np.ndarray:\n    if (isinstance(density_matrix_rep, np.ndarray) and (density_matrix_rep.ndim == 2)):\n        if (density_matrix_rep.shape != ((2 ** num_qubits), (2 ** num_qubits))):\n            raise ValueError('Density matrix was not square and of size 2 ** num_qubit, instead was {}'.format(density_matrix_rep.shape))\n        if (not np.allclose(density_matrix_rep, np.transpose(np.conj(density_matrix_rep)))):\n            raise ValueError('The density matrix is not hermitian.')\n        if (not np.isclose(np.trace(density_matrix_rep), 1.0)):\n            raise ValueError('Density matrix did not have trace 1 but instead {}'.format(np.trace(density_matrix_rep)))\n        if (density_matrix_rep.dtype != dtype):\n            raise ValueError('Density matrix had dtype {} but expected {}'.format(density_matrix_rep.dtype, dtype))\n        if (not np.all((np.linalg.eigvalsh(density_matrix_rep) > (- 1e-08)))):\n            raise ValueError('The density matrix is not positive semidefinite.')\n        return density_matrix_rep\n    state_vector = wave_function.to_valid_state_vector(density_matrix_rep, num_qubits, dtype)\n    return np.outer(state_vector, np.conj(state_vector))", "docstring": "Verifies the density_matrix_rep is valid and converts it to ndarray form.\n\nThis method is used to support passing a matrix, a vector (wave function),\nor a computational basis state as a representation of a state.\n\nArgs:\ndensity_matrix_rep: If an numpy array, if it is of rank 2 (a matrix),\nthen this is the density matrix. If it is a numpy array of rank 1\n(a vector) then this is a wave function. If this is an int,\nthen this is the computation basis state.\nnum_qubits: The number of qubits for the density matrix. The\ndensity_matrix_rep must be valid for this number of qubits.\ndtype: The numpy dtype of the density matrix, will be used when creating\nthe state for a computational basis state (int), or validated\nagainst if density_matrix_rep is a numpy array.\n\nReturns:\nA numpy matrix corresponding to the density matrix on the given number\nof qubits.\n\nRaises:\nValueError if the density_matrix_rep is not valid.", "source": "codesearchnet"}
{"code": "def _ip_int_from_string(cls, ip_str):\n        \n        if not ip_str:\n            raise AddressValueError('Address cannot be empty')\n\n        octets = ip_str.split('.')\n        if len(octets) != 4:\n            raise AddressValueError(\"Expected 4 octets in %r\" % ip_str)\n\n        try:\n            return _compat_int_from_byte_vals(\n                map(cls._parse_octet, octets), 'big')\n        except ValueError as exc:\n            raise AddressValueError(\"%s in %r\" % (exc, ip_str))", "docstring": "Turn the given IP string into an integer for comparison.\n\nArgs:\nip_str: A string, the IP ip_str.\n\nReturns:\nThe IP ip_str as an integer.\n\nRaises:\nAddressValueError: if ip_str isn't a valid IPv4 Address.", "source": "juraj-google-style"}
{"code": "def copy_from_dict(self, attributes):\n    \n    for attribute_name, attribute_value in attributes.items():\n      \n      if attribute_name[0] == '_':\n        continue\n      setattr(self, attribute_name, attribute_value)", "docstring": "Copies the attribute container from a dictionary.\nArgs:\nattributes (dict[str, object]): attribute values per name.", "source": "juraj-google-style"}
{"code": "def filter_by_col(self, column_names):\n        \n\n        if not isinstance(column_names, (list, tuple)):\n            column_names = [column_names, ]\n\n        sheet = self.table\n        identity = self.db_sheet_cols.id\n        exists = self.db_sheet_cols.exists\n        criterion = True\n\n        for column_name in column_names:\n            _criterion = sheet.loc[:, column_name] > 0\n            _exists = sheet.loc[:, exists] > 0\n            criterion = criterion & _criterion & _exists\n\n        return sheet.loc[criterion, identity].values.astype(int)", "docstring": "filters sheet/table by columns (input is column header)\n\nThe routine returns the serial numbers with values>1 in the selected\ncolumns.\n\nArgs:\ncolumn_names (list): the column headers.\n\nReturns:\npandas.DataFrame", "source": "juraj-google-style"}
{"code": "def write(self, inputdata):\n        \n        if VERBOSE:\n            _print_out('\\nDummy_serial: Writing to port. Given:' + repr(inputdata) + '\\n')\n            \n        if sys.version_info[0] > 2:\n            if not type(inputdata) == bytes:\n                raise TypeError('The input must be type bytes. Given:' + repr(inputdata))\n            inputstring = str(inputdata, encoding='latin1')\n        else:\n            inputstring = inputdata\n\n        if not self._isOpen:\n            raise IOError('Dummy_serial: Trying to write, but the port is not open. Given:' + repr(inputdata))\n\n        \n        try:\n            response = RESPONSES[inputstring]\n        except:\n            response = DEFAULT_RESPONSE\n        self._waiting_data = response", "docstring": "Write to a port on dummy_serial.\n\nArgs:\ninputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response\nfor subsequent read operations.\n\nNote that for Python2, the inputdata should be a **string**. For Python3 it should be of type **bytes**.", "source": "juraj-google-style"}
{"code": "def _find_mapreduce_yaml(start, checked):\n    dir = start\n    while (dir not in checked):\n        checked.add(dir)\n        for mr_yaml_name in MR_YAML_NAMES:\n            yaml_path = os.path.join(dir, mr_yaml_name)\n            if os.path.exists(yaml_path):\n                return yaml_path\n        dir = os.path.dirname(dir)\n    return None", "docstring": "Traverse the directory tree identified by start until a directory already\nin checked is encountered or the path of mapreduce.yaml is found.\n\nChecked is present both to make loop termination easy to reason about and so\nthat the same directories do not get rechecked.\n\nArgs:\nstart: the path to start in and work upward from\nchecked: the set of already examined directories\n\nReturns:\nthe path of mapreduce.yaml file or None if not found.", "source": "codesearchnet"}
{"code": "def receiveds_not_parsed(receiveds):\n    log.debug('Receiveds for this email are not parsed')\n    output = []\n    counter = Counter()\n    for i in receiveds[::(- 1)]:\n        j = {'raw': i.strip()}\n        j['hop'] = (counter['hop'] + 1)\n        counter['hop'] += 1\n        output.append(j)\n    else:\n        return output", "docstring": "If receiveds are not parsed, makes a new structure with raw\nfield. It's useful to have the same structure of receiveds\nparsed.\n\nArgs:\nreceiveds (list): list of raw receiveds headers\n\nReturns:\na list of not parsed receiveds headers with first hop in first position", "source": "codesearchnet"}
{"code": "def Decode(self, encoded_data):\n    \n    try:\n      \n      \n      \n      \n      decoded_data = base64.b64decode(encoded_data)\n    except (TypeError, binascii.Error) as exception:\n      raise errors.BackEndError(\n          'Unable to decode base64 stream with error: {0!s}.'.format(\n              exception))\n\n    return decoded_data, b''", "docstring": "Decode the encoded data.\n\nArgs:\nencoded_data (byte): encoded data.\n\nReturns:\ntuple(bytes, bytes): decoded data and remaining encoded data.\n\nRaises:\nBackEndError: if the base64 stream cannot be decoded.", "source": "juraj-google-style"}
{"code": "def __init__(self, path):\n    \n    super(SQLiteStorageFileReader, self).__init__(path)\n    self._storage_file = sqlite_file.SQLiteStorageFile()\n    self._storage_file.Open(path=path)", "docstring": "Initializes a storage reader.\n\nArgs:\npath (str): path to the input file.", "source": "juraj-google-style"}
{"code": "def func(self, volume):\n        \n        return self._func(np.array(volume), self.eos_params)", "docstring": "The equation of state function with the paramters other than volume set\nto the ones obtained from fitting.\n\nArgs:\nvolume (list/numpy.array)\n\nReturns:\nnumpy.array", "source": "juraj-google-style"}
{"code": "def get_browser(browser_name, capabilities=None, **options):\n    if (browser_name == 'chrome'):\n        return webdriver.Chrome(desired_capabilities=capabilities, **options)\n    if (browser_name == 'edge'):\n        return webdriver.Edge(capabilities=capabilities, **options)\n    if (browser_name in ['ff', 'firefox']):\n        return webdriver.Firefox(capabilities=capabilities, **options)\n    if (browser_name in ['ie', 'internet_explorer']):\n        return webdriver.Ie(capabilities=capabilities, **options)\n    if (browser_name == 'phantomjs'):\n        return webdriver.PhantomJS(desired_capabilities=capabilities, **options)\n    if (browser_name == 'remote'):\n        return webdriver.Remote(desired_capabilities=capabilities, **options)\n    if (browser_name == 'safari'):\n        return webdriver.Safari(desired_capabilities=capabilities, **options)\n    raise ValueError('unsupported browser: {}'.format(repr(browser_name)))", "docstring": "Returns an instance of the given browser with the given capabilities.\n\nArgs:\nbrowser_name (str): The name of the desired browser.\ncapabilities (Dict[str, str | bool], optional): The desired capabilities of the browser.\nDefaults to None.\noptions: Arbitrary keyword arguments for the browser-specific subclass of\n:class:`webdriver.Remote`.\n\nReturns:\nWebDriver: An instance of the desired browser.", "source": "codesearchnet"}
{"code": "def stop(self, wait=True):\n        \n        assert not self._stopped, \"Already stopped\"\n        self._stopped = True\n        self._tornado.stop(wait)\n        self._http.stop()", "docstring": "Stop the Bokeh Server.\n\nThis stops and removes all Bokeh Server ``IOLoop`` callbacks, as well\nas stops the ``HTTPServer`` that this instance was configured with.\n\nArgs:\nfast (bool):\nWhether to wait for orderly cleanup (default: True)\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _kl_beta_beta(d1, d2, name=None):\n  \n  def delta(fn, is_property=True):\n    fn1 = getattr(d1, fn)\n    fn2 = getattr(d2, fn)\n    return (fn2 - fn1) if is_property else (fn2() - fn1())\n\n  with tf.name_scope(name or \"kl_beta_beta\"):\n    return (delta(\"_log_normalization\", is_property=False) -\n            tf.math.digamma(d1.concentration1) * delta(\"concentration1\") -\n            tf.math.digamma(d1.concentration0) * delta(\"concentration0\") +\n            (tf.math.digamma(d1.total_concentration) *\n             delta(\"total_concentration\")))", "docstring": "Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta.\n\nArgs:\nd1: instance of a Beta distribution object.\nd2: instance of a Beta distribution object.\nname: (optional) Name to use for created operations.\ndefault is \"kl_beta_beta\".\n\nReturns:\nBatchwise KL(d1 || d2)", "source": "juraj-google-style"}
{"code": "def __live_receivers(signal):\n    \n    with __lock:\n        __purge()\n        receivers = [funcref() for funcref in __receivers[signal]]\n\n    return receivers", "docstring": "Return all signal handlers that are currently still alive for the\ninput `signal`.\n\nArgs:\nsignal: A signal name.\n\nReturns:\nA list of callable receivers for the input signal.", "source": "juraj-google-style"}
{"code": "def _might_have_parameter(fn_or_cls, arg_name):\n    if inspect.isclass(fn_or_cls):\n        fn = _find_class_construction_fn(fn_or_cls)\n    else:\n        fn = fn_or_cls\n    while hasattr(fn, '__wrapped__'):\n        fn = fn.__wrapped__\n    arg_spec = _get_cached_arg_spec(fn)\n    if six.PY3:\n        if arg_spec.varkw:\n            return True\n        return ((arg_name in arg_spec.args) or (arg_name in arg_spec.kwonlyargs))\n    else:\n        if arg_spec.keywords:\n            return True\n        return (arg_name in arg_spec.args)", "docstring": "Returns True if `arg_name` might be a valid parameter for `fn_or_cls`.\n\nSpecifically, this means that `fn_or_cls` either has a parameter named\n`arg_name`, or has a `**kwargs` parameter.\n\nArgs:\nfn_or_cls: The function or class to check.\narg_name: The name fo the parameter.\n\nReturns:\nWhether `arg_name` might be a valid argument of `fn`.", "source": "codesearchnet"}
{"code": "def _get_napp_key(self, key, user=None, napp=None):\n    if (user is None):\n        user = self.user\n    if (napp is None):\n        napp = self.napp\n    kytos_json = (((self._installed / user) / napp) / 'kytos.json')\n    try:\n        with kytos_json.open() as file_descriptor:\n            meta = json.load(file_descriptor)\n            return meta[key]\n    except (FileNotFoundError, json.JSONDecodeError, KeyError):\n        return ''", "docstring": "Return a value from kytos.json.\n\nArgs:\nuser (string): A Username.\nnapp (string): A NApp name\nkey (string): Key used to get the value within kytos.json.\n\nReturns:\nmeta (object): Value stored in kytos.json.", "source": "codesearchnet"}
{"code": "def get_etree_root(doc, encoding=None):\n    tree = get_etree(doc, encoding)\n    root = tree.getroot()\n    return root", "docstring": "Returns an instance of lxml.etree._Element for the given `doc` input.\n\nArgs:\ndoc: The input XML document. Can be an instance of\n``lxml.etree._Element``, ``lxml.etree._ElementTree``, a file-like\nobject, or a string filename.\nencoding: The character encoding of `doc`. If ``None``, an attempt\nwill be made to determine the character encoding by the XML\nparser.\n\nReturns:\nAn ``lxml.etree._Element`` instance for `doc`.\n\nRaises:\nIOError: If `doc` cannot be found.\nlxml.ParseError: If `doc` is a malformed XML document.", "source": "codesearchnet"}
{"code": "def _ParseFileData(self, knowledge_base, file_object):\n    \n    text_file_object = dfvfs_text_file.TextFile(file_object, encoding='utf-8')\n\n    if not knowledge_base.GetHostname():\n      hostname = text_file_object.readline()\n      hostname = hostname.strip()\n      if hostname:\n        hostname_artifact = artifacts.HostnameArtifact(name=hostname)\n        knowledge_base.SetHostname(hostname_artifact)", "docstring": "Parses file content (data) for a hostname preprocessing attribute.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nfile_object (dfvfs.FileIO): file-like object that contains the artifact\nvalue data.\n\nRaises:\nerrors.PreProcessFail: if the preprocessing fails.", "source": "juraj-google-style"}
{"code": "def parse_line(line):\n    columns = line.split()\n    token = columns.pop(0)\n    values = [float(column) for column in columns]\n    return (token, values)", "docstring": "Parses a line of a text embedding file.\n\nArgs:\nline: (str) One line of the text embedding file.\n\nReturns:\nA token string and its embedding vector in floats.", "source": "codesearchnet"}
{"code": "def _compose_custom_getters(getter_a, getter_b):\n  \n  if not getter_a:\n    return getter_b\n  if not getter_b:\n    return getter_a\n\n  def getter_fn(getter, *args, **kwargs):\n    return getter_b(functools.partial(getter_a, getter), *args, **kwargs)\n\n  return getter_fn", "docstring": "Compose two custom getters.\n\nExample use:\ntf.get_variable_scope().set_custom_getter(\ncompose_custom_getters(tf.get_variable_scope().custom_getter, new_getter))\n\nThis composes getters in the same way as creating a new variable scope with\nthe new_getter, but it does not actually create a new variable scope.\n\nArgs:\ngetter_a: a custom getter - generally from the existing variable scope.\ngetter_b: a custom getter\n\nReturns:\na custom getter", "source": "juraj-google-style"}
{"code": "def get_global_vars(func):\n    closure = getclosurevars(func)\n    if closure['nonlocal']:\n        raise TypeError((\"Can't launch a job with closure variables: %s\" % closure['nonlocals'].keys()))\n    globalvars = dict(modules={}, functions={}, vars={})\n    for (name, value) in closure['global'].items():\n        if inspect.ismodule(value):\n            globalvars['modules'][name] = value.__name__\n        elif (inspect.isfunction(value) or inspect.ismethod(value)):\n            globalvars['functions'][name] = value\n        else:\n            globalvars['vars'][name] = value\n    return globalvars", "docstring": "Store any methods or variables bound from the function's closure\n\nArgs:\nfunc (function): function to inspect\n\nReturns:\ndict: mapping of variable names to globally bound VARIABLES", "source": "codesearchnet"}
{"code": "def register_loss_scale_wrapper(optimizer_cls, wrapper_fn, wrapper_cls=None):\n    _REGISTERED_WRAPPER_OPTIMIZER_CLS[optimizer_cls] = (wrapper_fn, wrapper_cls or wrapper_fn)", "docstring": "Registers a loss scale optimizer wrapper.\n\n`tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite`\nautomatically wraps an optimizer with an optimizer wrapper that performs loss\nscaling. This function registers a\n`(base_cls, wrapper_fn, wrapper_cls)` triple\nthat is used by `enable_mixed_precision_graph_rewrite`, where\n`wrapper_fn` is called to create a `wrapper_cls` instance that wraps an\n`optimizer_cls` instance.\n\nArgs:\noptimizer_cls: A base optimizer class, e.g. `tf.keras.optimizers.Optimizer`.\nwrapper_fn: A function that takes in arguments \"optimizer\" and\n\"loss_scale\", and returns a loss scale optimizer of type \"wrapper_cls\"\nthat wraps \"optimizer\".\nwrapper_cls: A loss scale optimizer class. Defaults to `wrapper_fn`, in\nwhich case `wrapper_fn` should be a loss scale optimizer class whose\nconstructor takes in arguments \"optimizer\" and \"loss_scale\".", "source": "github-repos"}
{"code": "def _get_syslog_format(event_type):\n    \n    syslog_format_template = get_template('syslog_format.json')\n    fmt = syslog_format_template.render(\n        event_type=event_type,\n        host=dbconfig.get('instance_name', default='local')\n    )\n\n    \n    return json.dumps(json.loads(fmt))", "docstring": "Take an event type argument and return a python logging format\n\nIn order to properly format the syslog messages to current standard, load the template and perform necessary\nreplacements and return the string.\n\nArgs:\nevent_type (str): Event type name\n\nReturns:\n`str`", "source": "juraj-google-style"}
{"code": "def _get_measure_outcome(self, qubit):\n        \n        \n        axis = list(range(self._number_of_qubits))\n        axis.remove(self._number_of_qubits - 1 - qubit)\n        probabilities = np.sum(np.abs(self._statevector) ** 2, axis=tuple(axis))\n        \n        random_number = self._local_random.rand()\n        if random_number < probabilities[0]:\n            return '0', probabilities[0]\n        \n        return '1', probabilities[1]", "docstring": "Simulate the outcome of measurement of a qubit.\n\nArgs:\nqubit (int): the qubit to measure\n\nReturn:\ntuple: pair (outcome, probability) where outcome is '0' or '1' and\nprobability is the probability of the returned outcome.", "source": "juraj-google-style"}
{"code": "def _nutation(date, eop_correction=True, terms=106):\n    ttt = date.change_scale('TT').julian_century\n    r = 360.0\n    epsilon_bar = (((84381.448 - (46.815 * ttt)) - (0.00059 * (ttt ** 2))) + (0.001813 * (ttt ** 3)))\n    epsilon_bar /= 3600.0\n    m_m = (((134.96298139 + (((1325 * r) + 198.8673981) * ttt)) + (0.0086972 * (ttt ** 2))) + (1.78e-05 * (ttt ** 3)))\n    m_s = (((357.52772333 + (((99 * r) + 359.05034) * ttt)) - (0.0001603 * (ttt ** 2))) - (3.3e-06 * (ttt ** 3)))\n    u_m_m = (((93.27191028 + (((1342 * r) + 82.0175381) * ttt)) - (0.0036825 * (ttt ** 2))) + (3.1e-06 * (ttt ** 3)))\n    d_s = (((297.85036306 + (((1236 * r) + 307.11148) * ttt)) - (0.0019142 * (ttt ** 2))) + (5.3e-06 * (ttt ** 3)))\n    om_m = (((125.04452222 - (((5 * r) + 134.1362608) * ttt)) + (0.0020708 * (ttt ** 2))) + (2.2e-06 * (ttt ** 3)))\n    delta_psi = 0.0\n    delta_eps = 0.0\n    for (integers, reals) in _tab(terms):\n        (a1, a2, a3, a4, a5) = integers\n        (A, B, C, D) = (np.array(list(reals)) / 36000000.0)\n        a_p = (((((a1 * m_m) + (a2 * m_s)) + (a3 * u_m_m)) + (a4 * d_s)) + (a5 * om_m))\n        delta_psi += ((A + (B * ttt)) * np.sin(np.deg2rad(a_p)))\n        delta_eps += ((C + (D * ttt)) * np.cos(np.deg2rad(a_p)))\n    if eop_correction:\n        delta_eps += (date.eop.deps / 3600000.0)\n        delta_psi += (date.eop.dpsi / 3600000.0)\n    return (epsilon_bar, delta_psi, delta_eps)", "docstring": "Model 1980 of nutation as described in Vallado p. 224\n\nArgs:\ndate (beyond.utils.date.Date)\neop_correction (bool): set to ``True`` to include model correction\nfrom 'finals' files.\nterms (int)\nReturn:\ntuple : 3-elements, all floats in degrees\n1. ̄ε\n2. Δψ\n3. Δε\n\nWarning:\nThe good version of the nutation model can be found in the **errata**\nof the 4th edition of *Fundamentals of Astrodynamics and Applications*\nby Vallado.", "source": "codesearchnet"}
{"code": "def regex_find(orig_screen_output, regex, font_attr):\n    new_screen_output = RichTextLines(orig_screen_output.lines, font_attr_segs=copy.deepcopy(orig_screen_output.font_attr_segs), annotations=orig_screen_output.annotations)\n    try:\n        re_prog = re.compile(regex)\n    except re.error:\n        raise ValueError('Invalid regular expression: \"%s\"' % regex)\n    regex_match_lines = []\n    for i, line in enumerate(new_screen_output.lines):\n        find_it = re_prog.finditer(line)\n        match_segs = []\n        for match in find_it:\n            match_segs.append((match.start(), match.end(), font_attr))\n        if match_segs:\n            if i not in new_screen_output.font_attr_segs:\n                new_screen_output.font_attr_segs[i] = match_segs\n            else:\n                new_screen_output.font_attr_segs[i].extend(match_segs)\n                new_screen_output.font_attr_segs[i] = sorted(new_screen_output.font_attr_segs[i], key=lambda x: x[0])\n            regex_match_lines.append(i)\n    new_screen_output.annotations[REGEX_MATCH_LINES_KEY] = regex_match_lines\n    return new_screen_output", "docstring": "Perform regex match in rich text lines.\n\nProduces a new RichTextLines object with font_attr_segs containing highlighted\nregex matches.\n\nExample use cases include:\n1) search for specific items in a large list of items, and\n2) search for specific numerical values in a large tensor.\n\nArgs:\norig_screen_output: The original RichTextLines, in which the regex find\nis to be performed.\nregex: The regex used for matching.\nfont_attr: Font attribute used for highlighting the found result.\n\nReturns:\nA modified copy of orig_screen_output.\n\nRaises:\nValueError: If input str regex is not a valid regular expression.", "source": "github-repos"}
{"code": "def tersoff_potential(self, structure):\n        \n        bv = BVAnalyzer()\n        el = [site.specie.symbol for site in structure]\n        valences = bv.get_valences(structure)\n        el_val_dict = dict(zip(el, valences))\n\n        gin = \"species \\n\"\n        qerfstring = \"qerfc\\n\"\n\n        for key in el_val_dict.keys():\n            if key != \"O\" and el_val_dict[key] % 1 != 0:\n                raise SystemError(\"Oxide has mixed valence on metal\")\n            specie_string = key + \" core \" + str(el_val_dict[key]) + \"\\n\"\n            gin += specie_string\n            qerfstring += key + \" \" + key + \" 0.6000 10.0000 \\n\"\n\n        gin += \"\n        met_oxi_ters = TersoffPotential().data\n        for key in el_val_dict.keys():\n            if key != \"O\":\n                metal = key + \"(\" + str(int(el_val_dict[key])) + \")\"\n                ters_pot_str = met_oxi_ters[metal]\n                gin += ters_pot_str\n\n        gin += qerfstring\n        return gin", "docstring": "Generate the species, tersoff potential lines for an oxide structure\n\nArgs:\nstructure: pymatgen.core.structure.Structure", "source": "juraj-google-style"}
{"code": "def block_depth(self):\n    return self._block_depth", "docstring": "Depth of recursively defined circulant blocks defining this `Operator`.\n\nWith `A` the dense representation of this `Operator`,\n\n`block_depth = 1` means `A` is symmetric circulant.  For example,\n\n```\nA = |w z y x|\n|x w z y|\n|y x w z|\n|z y x w|\n```\n\n`block_depth = 2` means `A` is block symmetric circulant with symmetric\ncirculant blocks.  For example, with `W`, `X`, `Y`, `Z` symmetric circulant,\n\n```\nA = |W Z Y X|\n|X W Z Y|\n|Y X W Z|\n|Z Y X W|\n```\n\n`block_depth = 3` means `A` is block symmetric circulant with block\nsymmetric circulant blocks.\n\nReturns:\nPython `integer`.", "source": "github-repos"}
{"code": "def needle_statistics_alignio(infile):\n    alignments = list(AlignIO.parse(infile, 'emboss'))\n    if (len(alignments) > 1):\n        raise ValueError('Alignment file contains more than one pairwise alignment')\n    alignment = alignments[0]\n    with open(infile) as f:\n        line = f.readline()\n        for i in range(len(alignments)):\n            while (line.rstrip() != '\n                line = f.readline()\n                if (not line):\n                    raise StopIteration\n            while (line[0] == '\n                parts = line[1:].split(':', 1)\n                key = parts[0].lower().strip()\n                if (key == 'identity'):\n                    ident_parse = parts[1].strip().replace('(', '').replace(')', '').replace('%', '').split()\n                    ident_num = int(ident_parse[0].split('/')[0])\n                    ident_percent = float(ident_parse[1])\n                    alignment.annotations['identity'] = ident_num\n                    alignment.annotations['percent_identity'] = ident_percent\n                if (key == 'similarity'):\n                    sim_parse = parts[1].strip().replace('(', '').replace(')', '').replace('%', '').split()\n                    sim_num = int(sim_parse[0].split('/')[0])\n                    sim_percent = float(sim_parse[1])\n                    alignment.annotations['similarity'] = sim_num\n                    alignment.annotations['percent_similarity'] = sim_percent\n                if (key == 'gaps'):\n                    gap_parse = parts[1].strip().replace('(', '').replace(')', '').replace('%', '').split()\n                    gap_num = int(gap_parse[0].split('/')[0])\n                    gap_percent = float(gap_parse[1])\n                    alignment.annotations['gaps'] = gap_num\n                    alignment.annotations['percent_gaps'] = gap_percent\n                if (key == 'score'):\n                    score = float(parts[1].strip())\n                    alignment.annotations['score'] = score\n                line = f.readline()\n    return alignment", "docstring": "Reads in a needle alignment file and returns an AlignIO object with annotations\n\nArgs:\ninfile (str): Alignment file name\n\nReturns:\nAlignIO: annotated AlignIO object", "source": "codesearchnet"}
{"code": "def _spectrum_to_circulant_1d(self, spectrum, shape, dtype):\n    spectrum = _to_complex(spectrum)\n    spectrum_shape = self._shape_to_spectrum_shape(shape)\n    domain_dimension = spectrum_shape[-1]\n    if not domain_dimension:\n        return array_ops.zeros(shape, dtype)\n    matrix_rows = []\n    for m in range(domain_dimension):\n        x = np.zeros([domain_dimension])\n        x[m] = 1.0\n        fft_x = fft_ops.fft(math_ops.cast(x, spectrum.dtype))\n        h_convolve_x = fft_ops.ifft(spectrum * fft_x)\n        matrix_rows.append(h_convolve_x)\n    matrix = array_ops_stack.stack(matrix_rows, axis=-1)\n    return math_ops.cast(matrix, dtype)", "docstring": "Creates a circulant matrix from a spectrum.\n\nIntentionally done in an explicit yet inefficient way.  This provides a\ncross check to the main code that uses fancy reshapes.\n\nArgs:\nspectrum: Float or complex `Tensor`.\nshape:  Python list.  Desired shape of returned matrix.\ndtype:  Type to cast the returned matrix to.\n\nReturns:\nCirculant (batch) matrix of desired `dtype`.", "source": "github-repos"}
{"code": "def serialize_to_json(self, name, datas):\n    data_object = datas.get('object', None)\n    if (data_object is None):\n        msg = \"JSON reference '{}' lacks of required 'object' variable\"\n        raise SerializerError(msg.format(name))\n    try:\n        content = json.loads(data_object, object_pairs_hook=OrderedDict)\n    except json.JSONDecodeError as e:\n        msg = \"JSON reference '{}' raised error from JSON decoder: {}\"\n        raise SerializerError(msg.format(name, e))\n    else:\n        return content", "docstring": "Serialize given datas to any object from assumed JSON string.\n\nArguments:\nname (string): Name only used inside possible exception message.\ndatas (dict): Datas to serialize.\n\nReturns:\nobject: Object depending from JSON content.", "source": "codesearchnet"}
{"code": "def _set_root(self, request):\n        \n        if request.state_root:\n            root = request.state_root\n        else:\n            head = self._get_chain_head()\n            root = head.state_root_hash\n\n        try:\n            self._tree.set_merkle_root(root)\n        except KeyError as e:\n            LOGGER.debug('Unable to find root \"%s\" in database', e)\n            raise _ResponseFailed(self._status.NO_ROOT)\n\n        return root", "docstring": "Sets the root of the merkle tree, returning any head id used.\n\nNote:\nThis method will fail if `_tree` has not been set\n\nArgs:\nrequest (object): The parsed protobuf request object\n\nReturns:\nstr: the state root of the head block used to specify the root\n\nRaises:\nResponseFailed: Failed to set the root if the merkle tree", "source": "juraj-google-style"}
{"code": "def set_consistent(self, consistent_config):\n    self.topology._add_job_control_plane()\n    self.oport.operator.consistent(consistent_config)\n    return self._make_placeable()", "docstring": "Indicates that the stream is the start of a consistent region.\n\nArgs:\nconsistent_config(consistent.ConsistentRegionConfig): the configuration of the consistent region.\n\nReturns:\nStream: Returns this stream.\n\n.. versionadded:: 1.11", "source": "codesearchnet"}
{"code": "def get_filelikeobject(filename: str = None,\n                       blob: bytes = None) -> BinaryIO:\n    \n    if not filename and not blob:\n        raise ValueError(\"no filename and no blob\")\n    if filename and blob:\n        raise ValueError(\"specify either filename or blob\")\n    if filename:\n        return open(filename, 'rb')\n    else:\n        return io.BytesIO(blob)", "docstring": "Open a file-like object.\n\nGuard the use of this function with ``with``.\n\nArgs:\nfilename: for specifying via a filename\nblob: for specifying via an in-memory ``bytes`` object\n\nReturns:\na :class:`BinaryIO` object", "source": "juraj-google-style"}
{"code": "def __init__(self, min_value, max_value, scaling_type='Auto'):\n        \n        self.min_value = min_value\n        self.max_value = max_value\n        self.scaling_type = scaling_type", "docstring": "Initialize a parameter range.\n\nArgs:\nmin_value (float or int): The minimum value for the range.\nmax_value (float or int): The maximum value for the range.\nscaling_type (str): The scale used for searching the range during tuning (default: 'Auto').\nValid values: 'Auto', 'Linear', 'Logarithmic' and 'ReverseLogarithmic'.", "source": "juraj-google-style"}
{"code": "def stop_replace(self, accountID, orderID, **kwargs):\n    return self.replace(accountID, orderID, order=StopOrderRequest(**kwargs))", "docstring": "Shortcut to replace a pending Stop Order in an Account\n\nArgs:\naccountID : The ID of the Account\norderID : The ID of the Stop Order to replace\nkwargs : The arguments to create a StopOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "codesearchnet"}
{"code": "def get_rmsd(self, mol1, mol2):\n    (label1, label2) = self._mapper.uniform_labels(mol1, mol2)\n    if ((label1 is None) or (label2 is None)):\n        return float('Inf')\n    return self._calc_rms(mol1, mol2, label1, label2)", "docstring": "Get RMSD between two molecule with arbitrary atom order.\n\nReturns:\nRMSD if topology of the two molecules are the same\nInfinite if  the topology is different", "source": "codesearchnet"}
{"code": "def build_user(user_info):\n    \n    try:\n        email = user_info['email']\n    except KeyError as err:\n        raise KeyError(\"A user has to have a email\")\n    \n    try:\n        name = user_info['name']\n    except KeyError as err:\n        raise KeyError(\"A user has to have a name\")\n    \n    user_obj = User(email=email, name=name)\n    \n    \n    if 'roles' in user_info:\n        user_obj['roles'] = user_info['roles']\n    \n    if 'location' in user_info:\n        user_obj['location'] = user_info['location']\n    \n    if 'institutes' in user_info:\n        user_obj['institutes'] = user_info['institutes']\n    \n    return user_obj", "docstring": "Build a user object\n\nArgs:\nuser_info(dict): A dictionary with user information\n\nReturns:\nuser_obj(scout.models.User)", "source": "juraj-google-style"}
{"code": "def _fiss_agent_header(headers=None):\n    \n    _set_session()\n\n    fiss_headers = {\"User-Agent\" : FISS_USER_AGENT}\n    if headers is not None:\n        fiss_headers.update(headers)\n    return fiss_headers", "docstring": "Return request headers for fiss.\nInserts FISS as the User-Agent.\nInitializes __SESSION if it hasn't been set.\n\nArgs:\nheaders (dict): Include additional headers as key-value pairs", "source": "juraj-google-style"}
{"code": "def __init__(self, temperatures, materials):\n        \n        self._table = Table(\n            column_keys=temperatures,\n            rows_mapping=materials\n        )", "docstring": "Create a material stress table.\n\nArgs:\ntemperatures: A sequence of temperatures.\n\nmaterials: A mapping of material names to sequences of stress values\nwhich correspond to the temperatures.", "source": "juraj-google-style"}
{"code": "def _build_statistics(self, input_batch, use_batch_stats, stat_dtype):\n    if (self.MOVING_MEAN not in self._initializers):\n        self._initializers[self.MOVING_MEAN] = create_mean_initializer()\n    self._moving_mean = tf.get_variable('moving_mean', dtype=stat_dtype, shape=(self._num_channels,), collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES], initializer=self._initializers[self.MOVING_MEAN], trainable=False)\n    if (self.MOVING_VARIANCE not in self._initializers):\n        self._initializers[self.MOVING_VARIANCE] = create_variance_initializer()\n    self._moving_variance = tf.get_variable('moving_variance', dtype=stat_dtype, shape=(self._num_channels,), collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES], initializer=self._initializers[self.MOVING_VARIANCE], trainable=False)\n\n    def build_batch_stats():\n        'Builds the batch statistics calculation ops.'\n        (mean, variance) = tf.nn.moments(input_batch, self._axis, keep_dims=True, name='normalize_moments')\n        return (mean, variance)\n\n    def build_moving_stats():\n        'Retrieves the moving statistics.'\n        input_dtype = input_batch.dtype.base_dtype\n        if (stat_dtype == input_dtype):\n            return (tf.identity(self._moving_mean), tf.identity(self._moving_variance))\n        else:\n            return (tf.cast(self._moving_mean, input_dtype), tf.cast(self._moving_variance, input_dtype))\n    (mean, variance) = utils.smart_cond(use_batch_stats, build_batch_stats, build_moving_stats)\n    return (mean, variance)", "docstring": "Builds the statistics part of the graph when using moving variance.\n\nArgs:\ninput_batch: Input batch Tensor.\nuse_batch_stats: Boolean to indicate if batch statistics should be\ncalculated, otherwise moving averages are returned.\nstat_dtype: TensorFlow datatype to use for the moving mean and variance.\n\nReturns:\nTuple of (mean, variance), each of the same datatype as `input_batch`.", "source": "codesearchnet"}
{"code": "def get_ip_address(domain):\n    if (':\n        domain = ('http:\n    hostname = urlparse(domain).netloc\n    if (not hostname):\n        raise ValueError(\"Can't parse hostname!\")\n    return socket.gethostbyname(hostname)", "docstring": "Get IP address for given `domain`. Try to do smart parsing.\n\nArgs:\ndomain (str): Domain or URL.\n\nReturns:\nstr: IP address.\n\nRaises:\nValueError: If can't parse the domain.", "source": "codesearchnet"}
{"code": "def _find_children_hints_in_while_loop(function_def, nodes_mapping):\n    new_nodes = []\n    for node in function_def.node_def:\n        for i, _ in enumerate(node.input):\n            if node.input[i] in nodes_mapping:\n                node.input[i] = nodes_mapping[node.input[i]]\n        new_nodes.append(_copy.deepcopy(node))\n    name_to_seq_num = _extract_topology_sequence_mapping(function_def.node_def)\n    children_hints = _find_all_hints_in_nodes(new_nodes)\n    children_hints_q = []\n    for hint in children_hints.values():\n        _, output_names = hint.flattened_inputs_and_outputs()\n        seq = name_to_seq_num[output_names[0]]\n        for output_name in output_names:\n            seq = min(seq, name_to_seq_num[output_name])\n        children_hints_q.append((seq, hint))\n    children_hints_q.sort(key=lambda tup: tup[0])\n    ordered_children_hints = [x[1] for x in children_hints_q]\n    return (ordered_children_hints, new_nodes)", "docstring": "Find children hints and all nodes inside the while loop.\n\nArgs:\nfunction_def: Function def of the while loop.\nnodes_mapping: While loop input_arg : real node name.\n\nReturns:\nOrdered children hints and all re-mapped nodes inside the while loop.", "source": "github-repos"}
{"code": "def deal_with_changeset_stack_policy(self, fqn, stack_policy):\n    if stack_policy:\n        kwargs = generate_stack_policy_args(stack_policy)\n        kwargs['StackName'] = fqn\n        logger.debug('Setting stack policy on %s.', fqn)\n        self.cloudformation.set_stack_policy(**kwargs)", "docstring": "Set a stack policy when using changesets.\n\nChangeSets don't allow you to set stack policies in the same call to\nupdate them. This sets it before executing the changeset if the\nstack policy is passed in.\n\nArgs:\nstack_policy (:class:`stacker.providers.base.Template`): A template\nobject representing a stack policy.", "source": "codesearchnet"}
{"code": "def vmstat(stat):\n    out = subprocess.check_output(['vmstat', '-s'])\n    stat = stat.encode('ascii')\n    for line in out.split(b'\\n'):\n        line = line.strip()\n        if (stat in line):\n            return int(line.split(b' ')[0])\n    raise ValueError(\"Can't find {} in 'vmstat' output.\".format(stat))", "docstring": "Run vmstat and get a particular statistic.\n\nArgs:\nstat: The statistic that we are interested in retrieving.\n\nReturns:\nThe parsed output.", "source": "codesearchnet"}
{"code": "def mimic_adam_with_adafactor(hparams):\n    assert ('adam' in hparams.optimizer)\n    hparams.optimizer = 'adafactor'\n    hparams.optimizer_adafactor_beta1 = hparams.optimizer_adam_beta1\n    hparams.optimizer_adafactor_beta2 = hparams.optimizer_adam_beta2\n    hparams.optimizer_adafactor_multiply_by_parameter_scale = False\n    hparams.optimizer_adafactor_factored = False\n    hparams.optimizer_adafactor_clipping_threshold = None\n    hparams.optimizer_adafactor_decay_type = 'adam'", "docstring": "Switch from Adam to Adafactor, approximating the behavior of Adam.\n\nSome minor things may be different, like epsilon and beta1 correction.\n\nArgs:\nhparams: model hyperparameters where \"adam\" in hparams.optimizer", "source": "codesearchnet"}
{"code": "def parse_pv(header):\n    order_fit = parse_order_fit(header)\n\n    def parse_with_base(i):\n        key_base = ('PV%d_' % i)\n        pvi_x = [header[(key_base + '0')]]\n\n        def parse_range(lower, upper):\n            for j in range(lower, (upper + 1)):\n                pvi_x.append(header[(key_base + str(j))])\n        if (order_fit >= 1):\n            parse_range(1, 3)\n        if (order_fit >= 2):\n            parse_range(4, 6)\n        if (order_fit >= 3):\n            parse_range(7, 10)\n        return pvi_x\n    return [parse_with_base(1), parse_with_base(2)]", "docstring": "Parses the PV array from an astropy FITS header.\n\nArgs:\nheader: astropy.io.fits.header.Header\nThe header containing the PV values.\n\nReturns:\ncd: 2d array (list(list(float))\n[[PV1_0, PV1_1, ... PV1_N], [PV2_0, PV2_1, ... PV2_N]]\nNote that N depends on the order of the fit.  For example, an\norder 3 fit goes up to PV?_10.", "source": "codesearchnet"}
{"code": "def compact_interval_string(value_list):\n  \n\n  if not value_list:\n    return ''\n\n  value_list.sort()\n\n  \n  interval_list = []\n  curr = []\n  for val in value_list:\n    if curr and (val > curr[-1] + 1):\n      interval_list.append((curr[0], curr[-1]))\n      curr = [val]\n    else:\n      curr.append(val)\n\n  if curr:\n    interval_list.append((curr[0], curr[-1]))\n\n  \n  \n  return ','.join([\n      '{}-{}'.format(pair[0], pair[1]) if pair[0] != pair[1] else str(pair[0])\n      for pair in interval_list\n  ])", "docstring": "Compact a list of integers into a comma-separated string of intervals.\n\nArgs:\nvalue_list: A list of sortable integers such as a list of numbers\n\nReturns:\nA compact string representation, such as \"1-5,8,12-15\"", "source": "juraj-google-style"}
{"code": "def execute(self, sensor_graph, scope_stack):\n        \n\n        streamer = DataStreamer(self.selector, self.dest, self.report_format, self.auto, report_type=self.report_type, with_other=self.with_other)\n        sensor_graph.add_streamer(streamer)", "docstring": "Execute this statement on the sensor_graph given the current scope tree.\n\nThis adds a single DataStreamer to the current sensor graph\n\nArgs:\nsensor_graph (SensorGraph): The sensor graph that we are building or\nmodifying\nscope_stack (list(Scope)): A stack of nested scopes that may influence\nhow this statement allocates clocks or other stream resources.", "source": "juraj-google-style"}
{"code": "def copy_buffer(self, dst, src, size=(- 1), *, read_offset=0, write_offset=0) -> None:\n    self.mglo.copy_buffer(dst.mglo, src.mglo, size, read_offset, write_offset)", "docstring": "Copy buffer content.\n\nArgs:\ndst (Buffer): The destination buffer.\nsrc (Buffer): The source buffer.\nsize (int): The number of bytes to copy.\n\nKeyword Args:\nread_offset (int): The read offset.\nwrite_offset (int): The write offset.", "source": "codesearchnet"}
{"code": "def convert_nested_time_distributed(weights):\n    return preprocess_weights_for_loading(layer.layer, weights, original_keras_version, original_backend)", "docstring": "Converts layers nested in `TimeDistributed` wrapper.\n\nThis function uses `preprocess_weights_for_loading()` for converting nested\nlayers.\n\nArgs:\nweights: List of weights values (Numpy arrays).\n\nReturns:\nA list of weights values (Numpy arrays).", "source": "github-repos"}
{"code": "def create_training_target(self, target, run_eagerly=False):\n    if self.has_training_target():\n        raise ValueError('The training_target field for the _TrainingEndpoint instance has already been populated')\n    if run_eagerly:\n        self.training_target = _TrainingTarget(None, feedable=True, skip_target_weights=False)\n        return\n    if self.should_skip_target():\n        self.training_target = _TrainingTarget(None)\n    else:\n        if target is not None and (not backend.is_placeholder(target)):\n            feedable = False\n            skip_target_weights = True\n        else:\n            feedable = True\n            skip_target_weights = False\n        if target is None:\n            target_dtype = losses.LABEL_DTYPES_FOR_LOSSES.get(self.loss_fn, backend.dtype(self.output))\n            target = backend.placeholder(ndim=len(self.shape), name=self.output_name + '_target', sparse=backend.is_sparse(self.output), dtype=target_dtype)\n        self.training_target = _TrainingTarget(target, feedable=feedable, skip_target_weights=skip_target_weights)", "docstring": "Create training_target instance and update the self.training_target.\n\nNote that the input target should just be a tensor or None, and\ncorresponding training target will be created based on the output and\nloss_fn.\n\nArgs:\ntarget: the target tensor for the current output. Could be None.\nrun_eagerly: boolean, whether the model is in run_eagerly mode.\n\nRaises:\nValueError if the training_target field for the current instance has\nalready been populated.", "source": "github-repos"}
{"code": "def reaction_signature(eq, direction=False, stoichiometry=False):\n    \n    def compounds_sig(compounds):\n        if stoichiometry:\n            return tuple(sorted(compounds))\n        else:\n            return tuple(sorted(compound for compound, _ in compounds))\n\n    left = compounds_sig(eq.left)\n    right = compounds_sig(eq.right)\n\n    if left < right:\n        reaction_sig = left, right\n        direction_sig = eq.direction\n    else:\n        reaction_sig = right, left\n        direction_sig = eq.direction.flipped()\n\n    if direction:\n        return reaction_sig, direction_sig\n    return reaction_sig", "docstring": "Return unique signature object for :class:`Reaction`.\n\nSignature objects are hashable, and compare equal only if the reactions\nare considered the same according to the specified rules.\n\nArgs:\ndirection: Include reaction directionality when considering equality.\nstoichiometry: Include stoichiometry when considering equality.", "source": "juraj-google-style"}
{"code": "def copy_to(self, container: Container, fn_host: str, fn_container: str) -> None:\n    logger.debug('Copying file to container, %s: %s -> %s', container.uid, fn_host, fn_container)\n    if (not os.path.exists(fn_host)):\n        logger.error('Failed to copy file [%s] to [%s] in container [%s]: not found.', fn_host, fn_container, container.uid)\n        raise FileNotFound(fn_host)\n    cmd = \"docker cp '{}' '{}:{}'\".format(fn_host, container.id, fn_container)\n    try:\n        subprocess.check_output(cmd, shell=True)\n        logger.debug('Copied file to container, %s: %s -> %s', container.uid, fn_host, fn_container)\n        r = self.command(container, \"sudo chown $(whoami) '{}'\".format(fn_container))\n        if (r.code != 0):\n            m = 'failed to update permissions for container file [{}] (exit code: {}): {}'\n            m = m.format(fn_container, r.code, r.output)\n            raise BugZooException(m)\n    except subprocess.CalledProcessError:\n        logger.exception('Failed to copy file to container, %s: %s -> %s', container.uid, fn_host, fn_container)\n        raise", "docstring": "Copies a file from the host machine to a specified location inside a\ncontainer.\n\nRaises:\nFileNotFound: if the host file wasn't found.\nsubprocess.CalledProcessError: if the file could not be copied to\nthe container.", "source": "codesearchnet"}
{"code": "def move(self, delta):\n        \n        pos = self.pos\n        self.pos = (pos[0]+delta[0], pos[1]+delta[1], pos[2]+delta[0], pos[3]+delta[1])\n\n        \n        for age in self.nodes:\n            for node in age:\n                node.move(delta)", "docstring": "Move the tree.\n\nArgs:\ndelta (tupel): The adjustment of the position.", "source": "juraj-google-style"}
{"code": "def excluded_from_module_rename(module, import_rename_spec):\n    for excluded_prefix in import_rename_spec.excluded_prefixes:\n        if module.startswith(excluded_prefix):\n            return True\n    return False", "docstring": "Check if this module import should not be renamed.\n\nArgs:\nmodule: (string) module name.\nimport_rename_spec: ImportRename instance.\n\nReturns:\nTrue if this import should not be renamed according to the\nimport_rename_spec.", "source": "github-repos"}
{"code": "def __init__(self, founded_command='_silence_', score=0.0, is_new_command=False):\n    self._founded_command = founded_command\n    self._score = score\n    self._is_new_command = is_new_command", "docstring": "Construct a recognition result.\n\nArgs:\nfounded_command: A string indicating the word just founded.\nscore: A float representing the confidence of founded word.\nis_new_command: A boolean indicating if the founded command is a new one\nagainst the last one.", "source": "github-repos"}
{"code": "def parse_services(config, services):\n    enabled = 0\n    for service in services:\n        check_disabled = config.getboolean(service, 'check_disabled')\n        if (not check_disabled):\n            enabled += 1\n    return enabled", "docstring": "Parse configuration to return number of enabled service checks.\n\nArguments:\nconfig (obj): A configparser object with the configuration of\nanycast-healthchecker.\nservices (list): A list of section names which holds configuration\nfor each service check\n\nReturns:\nA number (int) of enabled service checks.", "source": "codesearchnet"}
{"code": "def get_updated(node):\n    if isinstance(node, gast.Assign):\n        return set.union(*(_get_target(target) for target in node.targets))\n    elif isinstance(node, (gast.For, gast.AugAssign)):\n        return _get_target(node.target)\n    elif isinstance(node, gast.arguments):\n        targets = set((arg.id for arg in (node.args + node.kwonlyargs)))\n        if node.vararg:\n            targets.add(node.vararg.id)\n        if node.kwarg:\n            targets.add(node.kwarg.id)\n        return targets\n    else:\n        return set()", "docstring": "Return the variable names created or mutated by this statement.\n\nThis function considers assign statements, augmented assign statements, and\nthe targets of for loops, as well as function arguments.\n\nFor example, `x[0] = 2` will return `x`, `x, y = 3, 4` will return `x` and\n`y`, `for i in range(x)` will return `i`, etc.\n\nArgs:\nnode: An AST node\n\nReturns:\nA set of variable names (strings) of all the variables created or mutated.", "source": "codesearchnet"}
{"code": "def __set_data__(self, prop, data):\n        \n        if self.data_attr:\n            setattr(prop, self.data_attr, data)\n        else:\n            rm_idxs = []\n            for i, val in enumerate(prop):\n                if val not in data:\n                    rm_idxs.append(i)\n            for idx in sorted(rm_idxs, reverse=True):\n                prop.pop(idx)\n            for val in data:\n                if val not in prop:\n                    prop.append(val)", "docstring": "sets the processed data to the appropriated property attribute\n\nArgs:\n-----\nprop: the property being manipulated\ndata: the list of processed data", "source": "juraj-google-style"}
{"code": "def delete(filething):\n    \n\n    t = OggVorbis(filething)\n    filething.fileobj.seek(0)\n    t.delete(filething)", "docstring": "delete(filething)\n\nArguments:\nfilething (filething)\nRaises:\nmutagen.MutagenError\n\nRemove tags from a file.", "source": "juraj-google-style"}
{"code": "def run(self):\n        \n        target = getattr(self, '_Thread__target', getattr(self, '_target', None))\n        args = getattr(self, '_Thread__args', getattr(self, '_args', None))\n        kwargs = getattr(self, '_Thread__kwargs', getattr(self, '_kwargs', None))\n        if target is not None:\n            self._return = target(*args, **kwargs)\n\n        return None", "docstring": "Runs the thread.\n\nArgs:\nself (ThreadReturn): the ``ThreadReturn`` instance\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def shutdown(self, ssc=None, grace_secs=0, timeout=259200):\n    logging.info('Stopping TensorFlow nodes')\n    (ps_list, worker_list, eval_list) = ([], [], [])\n    for node in self.cluster_info:\n        (ps_list if (node['job_name'] == 'ps') else (eval_list if (node['job_name'] == 'evaluator') else worker_list)).append(node)\n    if (timeout > 0):\n\n        def timeout_handler(signum, frame):\n            logging.error('TensorFlow execution timed out, exiting Spark application with error status')\n            self.sc.cancelAllJobs()\n            self.sc.stop()\n            sys.exit(1)\n        signal.signal(signal.SIGALRM, timeout_handler)\n        signal.alarm(timeout)\n    if (ssc is not None):\n        while (not ssc.awaitTerminationOrTimeout(1)):\n            if self.server.done:\n                logging.info('Server done, stopping StreamingContext')\n                ssc.stop(stopSparkContext=False, stopGraceFully=True)\n                break\n    elif (self.input_mode == InputMode.TENSORFLOW):\n        count = 0\n        while (count < 3):\n            st = self.sc.statusTracker()\n            jobs = st.getActiveJobsIds()\n            if (len(jobs) == 0):\n                break\n            stages = st.getActiveStageIds()\n            for i in stages:\n                si = st.getStageInfo(i)\n                if (si.numActiveTasks == (len(ps_list) + len(eval_list))):\n                    count += 1\n            time.sleep(5)\n    workers = len(worker_list)\n    workerRDD = self.sc.parallelize(range(workers), workers)\n    workerRDD.foreachPartition(TFSparkNode.shutdown(self.cluster_info, self.queues))\n    time.sleep(grace_secs)\n    if ('error' in tf_status):\n        logging.error('Exiting Spark application with error status.')\n        self.sc.cancelAllJobs()\n        self.sc.stop()\n        sys.exit(1)\n    logging.info('Shutting down cluster')\n    for node in (ps_list + eval_list):\n        addr = node['addr']\n        authkey = node['authkey']\n        m = TFManager.connect(addr, authkey)\n        q = m.get_queue('control')\n        q.put(None)\n        q.join()\n    while True:\n        time.sleep(5)\n        st = self.sc.statusTracker()\n        jobs = st.getActiveJobsIds()\n        if (len(jobs) == 0):\n            break", "docstring": "Stops the distributed TensorFlow cluster.\n\nFor InputMode.SPARK, this will be executed AFTER the `TFCluster.train()` or `TFCluster.inference()` method completes.\nFor InputMode.TENSORFLOW, this will be executed IMMEDIATELY after `TFCluster.run()` and will wait until the TF worker nodes complete.\n\nArgs:\n:ssc: *For Streaming applications only*. Spark StreamingContext\n:grace_secs: Grace period to wait after all executors have completed their tasks before terminating the Spark application, e.g. to allow the chief worker to perform any final/cleanup duties like exporting or evaluating the model.  Default is 0.\n:timeout: Time in seconds to wait for TF cluster to complete before terminating the Spark application.  This can be useful if the TF code hangs for any reason.  Default is 3 days.  Use -1 to disable timeout.", "source": "codesearchnet"}
{"code": "def to_tensor_shape(spec):\n    if spec.ndim is None and spec.shape is None:\n        return tensor_shape.TensorShape(None)\n    elif spec.shape is not None:\n        return tensor_shape.TensorShape(spec.shape)\n    else:\n        shape = [None] * spec.ndim\n        for a in spec.axes:\n            shape[a] = spec.axes[a]\n        return tensor_shape.TensorShape(shape)", "docstring": "Returns a tf.TensorShape object that matches the shape specifications.\n\nIf the InputSpec's shape or ndim is defined, this method will return a fully\nor partially-known shape. Otherwise, the returned TensorShape is None.\n\nArgs:\nspec: an InputSpec object.\n\nReturns:\na tf.TensorShape object", "source": "github-repos"}
{"code": "def verify_account(self, email_address):\n    request = self._get_request()\n    resp = request.post(self.ACCOUNT_VERIFY_URL, {'email_address': email_address})\n    return ('account' in resp)", "docstring": "Verify whether a HelloSign Account exists\n\nArgs:\n\nemail_address (str): Email address for the account to verify\n\nReturns:\nTrue or False", "source": "codesearchnet"}
{"code": "def getValue(self, unit=None):\n    if (unit or self.unit):\n        r = (float((self.value * UnitToValue(self.unit))) / UnitToValue(unit))\n        return (int(round(r)) if isinstance(self.value, int) else r)\n    return self.value", "docstring": "Return the value of the feature.\n\nIf the unit is specified and the feature has a unit, the value is converted\n\nArgs:\n- unit(str,optional): A unit to convert the current feature value ('B','K','M','G')", "source": "codesearchnet"}
{"code": "def update_z(self, z, indices=None):\n        \n        z = _make_np_bool(z)\n        if indices is None:\n            if len(self._z) != len(z):\n                raise QiskitError(\"During updating whole z, you can not \"\n                                  \"change the number of qubits.\")\n            self._z = z\n        else:\n            if not isinstance(indices, list) and not isinstance(indices, np.ndarray):\n                indices = [indices]\n            for p, idx in enumerate(indices):\n                self._z[idx] = z[p]\n\n        return self", "docstring": "Update partial or entire z.\n\nArgs:\nz (numpy.ndarray or list): to-be-updated z\nindices (numpy.ndarray or list or optional): to-be-updated qubit indices\n\nReturns:\nPauli: self\n\nRaises:\nQiskitError: when updating whole z, the number of qubits must be the same.", "source": "juraj-google-style"}
{"code": "def ResultCollectionForFID(cls, flow_id):\n    \n    \n    if not isinstance(flow_id, rdfvalue.RDFURN):\n      flow_id = rdfvalue.RDFURN(flow_id)\n\n    return sequential_collection.GeneralIndexedCollection(\n        flow_id.Add(RESULTS_SUFFIX))", "docstring": "Returns the ResultCollection for the flow with a given flow_id.\n\nArgs:\nflow_id: The id of the flow, a RDFURN of the form aff4:/flows/F:123456.\n\nReturns:\nThe collection containing the results for the flow identified by the id.", "source": "juraj-google-style"}
{"code": "def __call__(self, shape, dtype=None):\n    dtype = standardize_dtype(dtype)\n    return ops.ones(shape, dtype=dtype)", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor. Only numeric or boolean dtypes\nare supported. If not specified, `keras.backend.floatx()`\nis used, which default to `float32` unless you configured it\notherwise (via `keras.backend.set_floatx(float_dtype)`).", "source": "github-repos"}
{"code": "def __init__(self, selector, comparison=None, argument=None):\n        \n        super(Constraint, self).__init__()\n        self.selector = selector\n        \n        if comparison and COMPARISON_COMP.match(comparison) is None:\n            raise FiqlObjectException(\n                \"'%s' is not a valid FIQL comparison\" % comparison)\n        self.comparison = comparison\n        self.argument = argument", "docstring": "Initialize instance of ``Constraint``.\n\nArgs:\nselector (string): URL decoded constraint ``selector``.\ncomparison (string, optional): Parsed/mapped ``comparison``\noperator. Defaults to ``None``.\nargument (string, optional): URL decoded constraint ``argument``.\nDefaults to ``None``.\n\nRaises:\nFiqlObjectException: Not a valid FIQL comparison.", "source": "juraj-google-style"}
{"code": "def write_transform_artifacts(self, transform_fn, location):\n    return transform_fn | 'Write Transform Artifacts' >> transform_fn_io.WriteTransformFn(location)", "docstring": "Write transform artifacts to the given location.\nArgs:\ntransform_fn: A transform_fn object.\nlocation: A location to write the artifacts.\nReturns:\nA PCollection of WriteTransformFn writing a TF transform graph.", "source": "github-repos"}
{"code": "def get_config(self, name, default=_MISSING):\n        \n\n        value = self._adapter.get_config(name, default)\n        if value is _MISSING:\n            raise ArgumentError(\"Config value did not exist\", name=name)\n\n        return value", "docstring": "Get a config value from this adapter by name\n\nArgs:\nname (string): The name of the config variable\ndefault (object): The default value to return if config is not found\n\nReturns:\nobject: the value associated with the name\n\nRaises:\nArgumentError: if the name is not found and no default is supplied", "source": "juraj-google-style"}
{"code": "def _attempt_slice_retry(self, shard_state, tstate):\n    \n    if (shard_state.slice_retries + 1 <\n        parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS):\n      logging.warning(\n          \"Slice %s %s failed for the %s of up to %s attempts \"\n          \"(%s of %s taskqueue execution attempts). \"\n          \"Will retry now.\",\n          tstate.shard_id,\n          tstate.slice_id,\n          shard_state.slice_retries + 1,\n          parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS,\n          self.task_retry_count() + 1,\n          parameters.config.TASK_MAX_ATTEMPTS)\n      \n      \n      \n      sys.exc_clear()\n      self._try_free_lease(shard_state, slice_retry=True)\n      return self._TASK_DIRECTIVE.RETRY_SLICE\n\n    if parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS > 0:\n      logging.warning(\"Slice attempt %s exceeded %s max attempts.\",\n                      self.task_retry_count() + 1,\n                      parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS)\n    return self._TASK_DIRECTIVE.RETRY_SHARD", "docstring": "Attempt to retry this slice.\n\nThis method may modify shard_state and tstate to prepare for retry or fail.\n\nArgs:\nshard_state: model.ShardState for current shard.\ntstate: model.TransientShardState for current shard.\n\nReturns:\nA _TASK_DIRECTIVE enum. RETRY_SLICE if slice should be retried.\nRETRY_SHARD if shard retry should be attempted.", "source": "juraj-google-style"}
{"code": "def _to_proto_sparse_tensor(sparse_tensor, nested_proto,\n                            process_leafs, already_processed):\n  \n  already_processed.add(id(sparse_tensor))\n  nested_proto.named_tuple.name = _SPARSE_TENSOR_NAME\n  for str_key in _SPARSE_TENSOR_FIELD:\n    tensor = getattr(sparse_tensor, str_key)\n    nested_proto.named_tuple.map[str_key].value = process_leafs(tensor)", "docstring": "Serializes a `tf.SparseTensor` into `nested_proto`.\n\nArgs:\nsparse_tensor: An instance of `tf.SparseTensor`.\nnested_proto: A `module_pb2.NestedData` instance to be filled from\n`sparse_tensor`.\nprocess_leafs: A function to be applied to the leaf valued of the nested\nstructure.\nalready_processed: Set of already processed objects (used to avoid\ninfinite recursion).", "source": "juraj-google-style"}
{"code": "def _html_checker(job_var, interval, status, header,\n                  _interval_set=False):\n    \n    job_status = job_var.status()\n    job_status_name = job_status.name\n    job_status_msg = job_status.value\n    status.value = header % (job_status_msg)\n    while job_status_name not in ['DONE', 'CANCELLED']:\n        time.sleep(interval)\n        job_status = job_var.status()\n        job_status_name = job_status.name\n        job_status_msg = job_status.value\n        if job_status_name == 'ERROR':\n            break\n        else:\n            if job_status_name == 'QUEUED':\n                job_status_msg += ' (%s)' % job_var.queue_position()\n                if not _interval_set:\n                    interval = max(job_var.queue_position(), 2)\n            else:\n                if not _interval_set:\n                    interval = 2\n            status.value = header % (job_status_msg)\n\n    status.value = header % (job_status_msg)", "docstring": "Internal function that updates the status\nof a HTML job monitor.\n\nArgs:\njob_var (BaseJob): The job to keep track of.\ninterval (int): The status check interval\nstatus (widget): HTML ipywidget for output ot screen\nheader (str): String representing HTML code for status.\n_interval_set (bool): Was interval set by user?", "source": "juraj-google-style"}
{"code": "def _ParseVValueString(self, parser_mediator, data, user_information_descriptor):\n    data_start_offset = (user_information_descriptor.offset + self._V_VALUE_STRINGS_OFFSET)\n    data_end_offset = (data_start_offset + user_information_descriptor.size)\n    descriptor_data = data[data_start_offset:data_end_offset]\n    try:\n        username = descriptor_data.decode('utf-16-le')\n    except (UnicodeDecodeError, UnicodeEncodeError) as exception:\n        username = descriptor_data.decode('utf-16-le', errors='replace')\n        parser_mediator.ProduceExtractionWarning('unable to decode V value string with error: {0!s}. Characters that cannot be decoded will be replaced with \"?\" or \"\\\\ufffd\".'.format(exception))\n    return username", "docstring": "Parses a V value string.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ndata (bytes): Windows Registry V value data.\nuser_information_descriptor (user_information_descriptor): V value\nuser information descriptor.\n\nReturns:\nstr: string value stored in the Windows Registry V value data.", "source": "codesearchnet"}
{"code": "def parse_GDS_columns(lines, subsets):\n    \n    data = []\n    index = []\n    for line in lines:\n        line = line.rstrip()\n        if line.startswith(\"\n            tmp = __parse_entry(line)\n            data.append(tmp[1])\n            index.append(tmp[0])\n\n    df = DataFrame(data, index=index, columns=['description'])\n    subset_ids = defaultdict(dict)\n    for subsetname, subset in iteritems(subsets):\n        for expid in subset.metadata[\"sample_id\"][0].split(\",\"):\n            try:\n                subset_type = subset.get_type()\n                subset_ids[subset_type][expid] = \\\n                    subset.metadata['description'][0]\n            except Exception as err:\n                logger.error(\"Error processing subsets: %s for subset %s\" % (\n                    subset.get_type(), subsetname))\n\n    return df.join(DataFrame(subset_ids))", "docstring": "Parse list of line with columns description from SOFT file of GDS.\n\nArgs:\nlines (:obj:`Iterable`): Iterator over the lines.\nsubsets (:obj:`dict` of :obj:`GEOparse.GDSSubset`): Subsets to use.\n\nReturns:\n:obj:`pandas.DataFrame`: Columns description.", "source": "juraj-google-style"}
{"code": "def send(self):\n    xml_request = self.get_xml_request()\n    if (self.connection._debug == 1):\n        print(xml_request)\n    Debug.warn(('-' * 25))\n    Debug.warn(self._command)\n    Debug.dump('doc: \\n', self._documents)\n    Debug.dump('cont: \\n', self._content)\n    Debug.dump('nest cont \\n', self._nested_content)\n    Debug.dump('Request: \\n', xml_request)\n    response = _handle_response(self.connection._send_request(xml_request), self._command, self.connection.document_id_xpath)\n    return response", "docstring": "Send an XML string version of content through the connection.\n\nReturns:\nResponse object.", "source": "codesearchnet"}
{"code": "def __init__(self, jss):\n        \n        self.jss = jss\n        self.url = \"%s/casper.jxml\" % self.jss.base_url\n        self.auth = urllib.urlencode({\"username\": self.jss.user,\n                                      \"password\": self.jss.password})\n        super(Casper, self).__init__(tag=\"Casper\")\n        self.update()", "docstring": "Initialize a Casper object.\n\nArgs:\njss: A JSS object to request the casper page from.", "source": "juraj-google-style"}
{"code": "def prepare_capstone(syntax=AsmSyntax.att, target=None):\n    if (not HAVE_CAPSTONE):\n        raise NotImplementedError('pwnypack requires capstone to disassemble to AT&T and Intel syntax')\n    if (target is None):\n        target = pwnypack.target.target\n    if (target.arch == pwnypack.target.Target.Arch.x86):\n        if (target.bits is pwnypack.target.Target.Bits.bits_32):\n            md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_32)\n        else:\n            md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64)\n    elif (target.arch == pwnypack.target.Target.Arch.arm):\n        mode = 0\n        if (target.bits is pwnypack.target.Target.Bits.bits_32):\n            arch = capstone.CS_ARCH_ARM\n            if (target.mode and pwnypack.target.Target.Mode.arm_thumb):\n                mode = capstone.CS_MODE_THUMB\n            else:\n                mode = capstone.CS_MODE_ARM\n                if (target.mode and pwnypack.target.Target.Mode.arm_m_class):\n                    mode |= capstone.CS_MODE_MCLASS\n            if (target.mode and pwnypack.target.Target.Mode.arm_v8):\n                mode |= capstone.CS_MODE_V8\n        else:\n            arch = capstone.CS_ARCH_ARM64\n        if (target.endian is pwnypack.target.Target.Endian.little):\n            mode |= capstone.CS_MODE_LITTLE_ENDIAN\n        else:\n            mode |= capstone.CS_MODE_BIG_ENDIAN\n        md = capstone.Cs(arch, mode)\n    else:\n        raise NotImplementedError('Only x86 is currently supported.')\n    md.skipdata = True\n    if (syntax is AsmSyntax.att):\n        md.syntax = capstone.CS_OPT_SYNTAX_ATT\n    elif (syntax is AsmSyntax.intel):\n        md.skipdata_setup(('db', None, None))\n    else:\n        raise NotImplementedError('capstone engine only implements AT&T and Intel syntax.')\n    return md", "docstring": "Prepare a capstone disassembler instance for a given target and syntax.\n\nArgs:\nsyntax(AsmSyntax): The assembler syntax (Intel or AT&T).\ntarget(~pwnypack.target.Target): The target to create a disassembler\ninstance for. The global target is used if this argument is\n``None``.\n\nReturns:\nAn instance of the capstone disassembler.\n\nRaises:\nNotImplementedError: If the specified target isn't supported.", "source": "codesearchnet"}
{"code": "def initialize(self, request, response):\n    \n    super(TaskQueueHandler, self).initialize(request, response)\n\n    \n    if \"X-AppEngine-QueueName\" not in self.request.headers:\n      logging.error(self.request.headers)\n      logging.error(\"Task queue handler received non-task queue request\")\n      self.response.set_status(\n          403, message=\"Task queue handler received non-task queue request\")\n      return\n\n    \n    if self.task_retry_count() + 1 > parameters.config.TASK_MAX_ATTEMPTS:\n      logging.error(\n          \"Task %s has been attempted %s times. Dropping it permanently.\",\n          self.request.headers[\"X-AppEngine-TaskName\"],\n          self.task_retry_count() + 1)\n      self._drop_gracefully()\n      return\n\n    try:\n      self._preprocess()\n      self._preprocess_success = True\n    \n    except:\n      self._preprocess_success = False\n      logging.error(\n          \"Preprocess task %s failed. Dropping it permanently.\",\n          self.request.headers[\"X-AppEngine-TaskName\"])\n      self._drop_gracefully()", "docstring": "Initialize.\n\n1. call webapp init.\n2. check request is indeed from taskqueue.\n3. check the task has not been retried too many times.\n4. run handler specific processing logic.\n5. run error handling logic if precessing failed.\n\nArgs:\nrequest: a webapp.Request instance.\nresponse: a webapp.Response instance.", "source": "juraj-google-style"}
{"code": "def _run_function_for_calibration_eager_mode(func: wrap_function.WrappedFunction, representative_dataset: rd.RepresentativeDataset) -> None:\n    _, keyword_args = func.structured_input_signature\n    sample_validator = _create_sample_validator(expected_input_keys=keyword_args.keys())\n    for sample in map(sample_validator, _log_sample_num_for_calibration(representative_dataset)):\n        func_kwargs = _convert_values_to_tf_tensors(sample)\n        func(**func_kwargs)", "docstring": "Runs the representative dataset through a function for calibration.\n\nNOTE: This is intended to be run in eager mode (TF2).\n\nArgs:\nfunc: The function to run the representative samples through.\nrepresentative_dataset: Representative dataset used for calibration. The\ninput keys and input values of the representative samples should match the\nkeyword arguments of `func`.", "source": "github-repos"}
{"code": "def register(self, method_name: str, func: Union[def_function.Function, tf_function.ConcreteFunction]):\n    raise NotImplementedError('Please use create_server method to create aconcrete subclass of Server.')", "docstring": "Method for registering tf.function on server.\n\nRegistered methods can be invoked remotely from clients.\n\nArgs:\nmethod_name: Name of the tf.function. Clients use this method_name to make\nRPCs.\nfunc: A `tf.function` or ConcreteFunction to register.", "source": "github-repos"}
{"code": "def __deepcopy__(self, memo):\n        \n        self._copy_counter += 1\n        new_dag = Dag('{}:{}'.format(self._name, self._copy_counter),\n                      autostart=self._autostart, queue=self._queue)\n        new_dag._schema = deepcopy(self._schema, memo)\n        return new_dag", "docstring": "Create a copy of the dag object.\n\nThis method keeps track of the number of copies that have been made. The number is\nappended to the name of the copy.\n\nArgs:\nmemo (dict): a dictionary that keeps track of the objects that\nhave already been copied.\n\nReturns:\nDag: a copy of the dag object", "source": "juraj-google-style"}
{"code": "def get_index_mapping(index):\n    mappings_dir = get_setting('mappings_dir')\n    filename = ('%s.json' % index)\n    path = os.path.join(mappings_dir, filename)\n    with open(path, 'r') as f:\n        return json.load(f)", "docstring": "Return the JSON mapping file for an index.\n\nMappings are stored as JSON files in the mappings subdirectory of this\napp. They must be saved as {{index}}.json.\n\nArgs:\nindex: string, the name of the index to look for.", "source": "codesearchnet"}
{"code": "def FindCheckMacro(line):\n  \n  for macro in _CHECK_MACROS:\n    i = line.find(macro)\n    if i >= 0:\n      \n      \n      \n      \n      matched = Match(r'^(.*\\b' + macro + r'\\s*)\\(', line)\n      if not matched:\n        continue\n      return (macro, len(matched.group(1)))\n  return (None, -1)", "docstring": "Find a replaceable CHECK-like macro.\n\nArgs:\nline: line to search on.\nReturns:\n(macro name, start position), or (None, -1) if no replaceable\nmacro is found.", "source": "juraj-google-style"}
{"code": "def _Open(self, path_spec=None, mode='rb'):\n    \n    if not path_spec:\n      raise ValueError('Missing path specification.')\n\n    file_system = resolver.Resolver.OpenFileSystem(\n        path_spec, resolver_context=self._resolver_context)\n\n    file_entry = file_system.GetFileEntryByPathSpec(path_spec)\n    if not file_entry:\n      file_system.Close()\n      raise IOError('Unable to retrieve file entry.')\n\n    self._file_system = file_system\n    self._cpio_archive_file = self._file_system.GetCPIOArchiveFile()\n    self._cpio_archive_file_entry = file_entry.GetCPIOArchiveFileEntry()\n\n    self._current_offset = 0", "docstring": "Opens the file-like object defined by path specification.\n\nArgs:\npath_spec (Optional[PathSpec]): path specification.\nmode (Optional[str]): file access mode.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file-like object could not be opened.\nOSError: if the file-like object could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def GetTableView(cls, format_type, column_names=None, title=None):\n    \n    view_class = cls._TABLE_VIEW_FORMAT_CLASSES.get(format_type, None)\n    if not view_class:\n      raise ValueError('Unsupported format type: {0:s}'.format(format_type))\n\n    return view_class(column_names=column_names, title=title)", "docstring": "Retrieves a table view.\n\nArgs:\nformat_type (str): table view format type.\ncolumn_names (Optional[list[str]]): column names.\ntitle (Optional[str]): title.\n\nReturns:\nBaseTableView: table view.\n\nRaises:\nValueError: if the format type is not supported.", "source": "juraj-google-style"}
{"code": "def _ExtractResponseSummaryFields(document):\n    headers = document.childAtPath('Envelope/Header/ResponseHeader')\n    body = document.childAtPath('Envelope/Body')\n    summary_fields = {}\n    if (headers is not None):\n        summary_fields['requestId'] = headers.getChild('requestId').text\n        summary_fields['responseTime'] = headers.getChild('responseTime').text\n        service_name = headers.getChild('serviceName')\n        if (service_name is not None):\n            summary_fields['serviceName'] = service_name.text\n        method_name = headers.getChild('methodName')\n        if (method_name is not None):\n            summary_fields['methodName'] = method_name.text\n        operations = headers.getChild('operations')\n        if (operations is not None):\n            summary_fields['operations'] = operations.text\n    if (body is not None):\n        fault = body.getChild('Fault')\n        if (fault is not None):\n            summary_fields['isFault'] = True\n            summary_fields['faultMessage'] = fault.getChild('faultstring').text[:16000]\n        else:\n            summary_fields['isFault'] = False\n    return summary_fields", "docstring": "Extract logging fields from the response's suds.sax.document.Document.\n\nArgs:\ndocument: A suds.sax.document.Document instance containing the parsed\nAPI response for a given API request.\n\nReturns:\nA dict mapping logging field names to their corresponding value.", "source": "codesearchnet"}
{"code": "def __call__(self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]]=None, xpaths: Optional[Union[List[List[int]], List[List[List[int]]]]]=None, node_labels: Optional[Union[List[int], List[List[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n\n    def _is_valid_text_input(t):\n        if isinstance(t, str):\n            return True\n        elif isinstance(t, (list, tuple)):\n            if len(t) == 0:\n                return True\n            elif isinstance(t[0], str):\n                return True\n            elif isinstance(t[0], (list, tuple)):\n                return len(t[0]) == 0 or isinstance(t[0][0], str)\n            else:\n                return False\n        else:\n            return False\n    if text_pair is not None:\n        if not _is_valid_text_input(text):\n            raise ValueError('text input must of type `str` (single example) or `List[str]` (batch of examples). ')\n        if not isinstance(text_pair, (list, tuple)):\n            raise ValueError('Nodes must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')\n    elif not isinstance(text, (list, tuple)):\n        raise ValueError('Nodes must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')\n    if text_pair is not None:\n        is_batched = isinstance(text, (list, tuple))\n    else:\n        is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))\n    nodes = text if text_pair is None else text_pair\n    assert xpaths is not None, 'You must provide corresponding xpaths'\n    if is_batched:\n        assert len(nodes) == len(xpaths), 'You must provide nodes and xpaths for an equal amount of examples'\n        for nodes_example, xpaths_example in zip(nodes, xpaths):\n            assert len(nodes_example) == len(xpaths_example), 'You must provide as many nodes as there are xpaths'\n    else:\n        assert len(nodes) == len(xpaths), 'You must provide as many nodes as there are xpaths'\n    if is_batched:\n        if text_pair is not None and len(text) != len(text_pair):\n            raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.')\n        batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text\n        is_pair = bool(text_pair is not None)\n        return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)\n    else:\n        return self.encode_plus(text=text, text_pair=text_pair, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)", "docstring": "Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of\nsequences with nodes, xpaths and optional labels.\n\nArgs:\ntext (`str`, `List[str]`, `List[List[str]]`):\nThe sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings\n(words of a single example or questions of a batch of examples) or a list of list of strings (batch of\nwords).\ntext_pair (`List[str]`, `List[List[str]]`):\nThe sequence or batch of sequences to be encoded. Each sequence should be a list of strings\n(pretokenized string).\nxpaths (`List[List[int]]`, `List[List[List[int]]]`):\nNode-level xpaths. Each bounding box should be normalized to be on a 0-1000 scale.\nnode_labels (`List[int]`, `List[List[int]]`, *optional*):\nNode-level integer labels (for token classification tasks).", "source": "github-repos"}
{"code": "def load_csv(path):\n    \n\n    with open(path) as f:\n        line = f.readline().strip()\n\n    X = np.loadtxt(path, delimiter=',',\n                   skiprows=0 if is_number(line.split(',')[0]) else 1)\n\n    y = np.array(X[:, 0]).flatten()\n    X = X[:, 1:]\n\n    return X, y", "docstring": "Load data from a CSV file.\n\nArgs:\npath (str): A path to the CSV format file containing data.\ndense (boolean): An optional variable indicating if the return matrix\nshould be dense.  By default, it is false.\n\nReturns:\nData matrix X and target vector y", "source": "juraj-google-style"}
{"code": "def _loadCSVDataFrame(self):\n    if (self._filename and os.path.exists(self._filename)):\n        encoding = (self._encodingKey or 'UTF_8')\n        try:\n            dataFrame = superReadFile(self._filename, sep=self._delimiter, first_codec=encoding, header=self._header)\n            dataFrame = dataFrame.apply(fillNoneValues)\n            dataFrame = dataFrame.apply(convertTimestamps)\n        except Exception as err:\n            self.updateStatusBar(str(err))\n            print(err)\n            return pandas.DataFrame()\n        self.updateStatusBar('Preview generated.')\n        return dataFrame\n    self.updateStatusBar('File could not be read.')\n    return pandas.DataFrame()", "docstring": "Loads the given csv file with pandas and generate a new dataframe.\n\nThe file will be loaded with the configured encoding, delimiter\nand header.git\nIf any execptions will occur, an empty Dataframe is generated\nand a message will appear in the status bar.\n\nReturns:\npandas.DataFrame: A dataframe containing all the available\ninformation of the csv file.", "source": "codesearchnet"}
{"code": "def default_sequence_length(self) -> int:\n    return OnnxConfig.default_fixed_sequence", "docstring": "The default sequence length to use if no other indication\n\nReturns:\nInteger > 0", "source": "github-repos"}
{"code": "def create_zeros_slot(primary, name, dtype=None, colocate_with_primary=True, *, copy_xla_sharding=False):\n    if dtype is None:\n        dtype = primary.dtype\n    slot_shape = primary.get_shape()\n    if slot_shape.is_fully_defined():\n        initializer = init_ops.zeros_initializer()\n        return create_slot_with_initializer(primary, initializer, slot_shape, dtype, name, colocate_with_primary=colocate_with_primary, copy_xla_sharding=copy_xla_sharding)\n    else:\n        if isinstance(primary, variables.Variable):\n            slot_shape = array_ops.shape(cond.cond(variable_v1.is_variable_initialized(primary), primary.read_value, lambda: primary.initial_value))\n        else:\n            slot_shape = array_ops.shape(primary)\n        val = array_ops.zeros(slot_shape, dtype=dtype)\n        return create_slot(primary, val, name, colocate_with_primary=colocate_with_primary, copy_xla_sharding=copy_xla_sharding)", "docstring": "Create a slot initialized to 0 with same shape as the primary object.\n\nArgs:\nprimary: The primary `Variable` or `Tensor`.\nname: Name to use for the slot variable.\ndtype: Type of the slot variable.  Defaults to the type of `primary`.\ncolocate_with_primary: Boolean.  If True the slot is located\non the same device as `primary`.\ncopy_xla_sharding: Boolean. If True also copies XLA sharding\nfrom primary.\n\nReturns:\nA `Variable` object.", "source": "github-repos"}
{"code": "def _update_object(object_key: str, event: Event):\n    events_list_key = _keys.events_list(object_key)\n    events_data_key = _keys.events_data(object_key)\n    event_dict = deepcopy(event.config)\n    event_dict.pop('id')\n    DB.append_to_list(events_list_key, event.id, pipeline=True)\n    DB.set_hash_value(events_data_key, event.id, json.dumps(event_dict), pipeline=True)", "docstring": "Update the events list and events data for the object.\n\n- Adds the event Id to the list of events for the object.\n- Adds the event data to the hash of object event data keyed by event\nid.\n\nArgs:\nobject_key (str): Key of the object being updated.\nevent (Event): Event object", "source": "codesearchnet"}
{"code": "def get_coordination_numbers(d):\n    \n    structure = Structure.from_dict(d[\"output\"][\"crystal\"])\n    f = VoronoiNN()\n    cn = []\n    for i, s in enumerate(structure.sites):\n        try:\n            n = f.get_cn(structure, i)\n            number = int(round(n))\n            cn.append({\"site\": s.as_dict(), \"coordination\": number})\n        except Exception:\n            logger.error(\"Unable to parse coordination errors\")\n    return cn", "docstring": "Helper method to get the coordination number of all sites in the final\nstructure from a run.\n\nArgs:\nd:\nRun dict generated by VaspToDbTaskDrone.\n\nReturns:\nCoordination numbers as a list of dict of [{\"site\": site_dict,\n\"coordination\": number}, ...].", "source": "juraj-google-style"}
{"code": "def _static_value_provider_of(value_type):\n\n    def _f(value):\n        _f.__name__ = value_type.__name__\n        return StaticValueProvider(value_type, value)\n    return _f", "docstring": "Helper function to plug a ValueProvider into argparse.\n\nArgs:\nvalue_type: the type of the value. Since the type param of argparse's\nadd_argument will always be ValueProvider, we need to\npreserve the type of the actual value.\nReturns:\nA partially constructed StaticValueProvider in the form of a function.", "source": "github-repos"}
{"code": "def get_foreign_id(self, idspace='musicbrainz', cache=True):\n        \n        if not (cache and ('foreign_ids' in self.cache) and filter(lambda d: d.get('catalog') == idspace, self.cache['foreign_ids'])):\n            response = self.get_attribute('profile', bucket=['id:'+idspace])\n            foreign_ids = response['artist'].get(\"foreign_ids\", [])\n            self.cache['foreign_ids'] = self.cache.get('foreign_ids', []) + foreign_ids\n        cval = filter(lambda d: d.get('catalog') == util.map_idspace(idspace),\n                      self.cache.get('foreign_ids'))\n        return cval[0].get('foreign_id') if cval else None", "docstring": "Get the foreign id for this artist for a specific id space\n\nArgs:\n\nKwargs:\nidspace (str): A string indicating the idspace to fetch a foreign id for.\n\nReturns:\nA foreign ID string\n\nExample:\n\n>>> a = artist.Artist('fabulous')\n>>> a.get_foreign_id('7digital')\nu'7digital:artist:186042'\n>>>", "source": "juraj-google-style"}
{"code": "def list_files(base_path, ext=None):\n    \n    if not os.path.isdir(base_path):\n        raise ValueError(\"Path does not exist: %s\" % base_path)\n\n    files = []\n    for entry in os.listdir(base_path):\n        if os.path.isfile(os.path.join(base_path, entry)):\n            _, entry_ext = os.path.splitext(entry)\n            entry_ext = entry_ext.lstrip('.')\n\n            if (ext is None) or \\\n                (isinstance(ext, str) and entry_ext == ext) or \\\n                (isinstance(ext, list) and entry_ext in ext):\n                files.append(entry)\n\n    return files", "docstring": "Lists all of the files in the given base directory, optionally only\nincluding whose extension(s) match the ext string/list of strings.\nThis is non-recursive.\n\nArgs:\nbase_path: The directory in which to search.\next: The extension(s) to match in the given directory. If None, this\nmatches all file extensions.\n\nReturns:\nA list of filenames relative to the given base path.", "source": "juraj-google-style"}
{"code": "def _save_model_and_copy_assets(exported_model: exported_model_pb2.ExportedModel, src_saved_model_path: str, dst_saved_model_path: str, signature_def_map: Mapping[str, meta_graph_pb2.SignatureDef], tags: Collection[str]) -> bool:\n    save_model.save_model_v1(exported_model.graph_def, dst_saved_model_path, signature_def_map, tags, init_op_name=exported_model.init_node_name, saver_def=_get_saver_def_or_none(exported_model), checkpoint_dir=exported_model.checkpoint_dir, function_aliases=exported_model.function_aliases, asset_file_defs=exported_model.asset_file_defs)\n    _copy_assets(src_saved_model_path, dst_saved_model_path)\n    return True", "docstring": "Saves the model and copies the assets from the source model.\n\nArgs:\nexported_model: ExportedModel to save.\nsrc_saved_model_path: Path to the source SavedModel. This will be used to\ncopy the asset files to `dst_saved_model_path`.\ndst_saved_model_path: Destination path to save the exported model.\nsignature_def_map: Signature key -> SignatureDef mapping.\ntags: Tags to attach to the saved MetaGraphDef.\n\nReturns:\n`True` upon successfully saving the model.", "source": "github-repos"}
{"code": "def __init__(self, checkpoint_dir: Text, save_secs: Optional[int]=None, save_steps: Optional[int]=None, saver: Optional[saver_lib.Saver]=None, checkpoint_basename: Text='model.ckpt', scaffold: Optional[monitored_session.Scaffold]=None, listeners: Optional[List[basic_session_run_hooks.CheckpointSaverListener]]=None):\n    save_path = os.path.join(checkpoint_dir, checkpoint_basename)\n    logging.info('Create AsyncCheckpointSaverHook saving to path\\n%s', save_path)\n    if listeners:\n        logging.info(' with %d listener(s).', len(listeners))\n    if saver is not None and scaffold is not None:\n        raise ValueError('You cannot provide both saver and scaffold.')\n    self._saver = saver\n    self._save_thread = None\n    self._write_graph_thread = None\n    self._checkpoint_dir = checkpoint_dir\n    self._save_path = save_path\n    self._scaffold = scaffold\n    self._timer = basic_session_run_hooks.SecondOrStepTimer(every_secs=save_secs, every_steps=save_steps)\n    self._listeners = listeners or []\n    self._steps_per_run = 1\n    self._summary_writer = None\n    self._global_step_tensor = None\n    self._last_checkpoint_step = None\n    global _END_TIME_OF_LAST_WRITE\n    with _END_TIME_OF_LAST_WRITE_LOCK:\n        if _END_TIME_OF_LAST_WRITE is None:\n            _END_TIME_OF_LAST_WRITE = time.time()", "docstring": "Initializes a `CheckpointSaverHook`.\n\nArgs:\ncheckpoint_dir: `str`, base directory for the checkpoint files.\nsave_secs: `int`, save every N secs.\nsave_steps: `int`, save every N steps.\nsaver: `Saver` object, used for saving.\ncheckpoint_basename: `str`, base name for the checkpoint files.\nscaffold: `Scaffold`, use to get saver object.\nlisteners: List of `CheckpointSaverListener` subclass instances. Used for\ncallbacks that run immediately before or after this hook saves the\ncheckpoint.\n\nRaises:\nValueError: One of `save_steps` or `save_secs` should be set.\nValueError: At most one of `saver` or `scaffold` should be set.", "source": "github-repos"}
{"code": "def helper_delete(access_token, oid, path):\n    full_path = ''.join([path, \"('\", oid, \"')\"])\n    full_path_encoded = urllib.parse.quote(full_path, safe='')\n    endpoint = ''.join([ams_rest_endpoint, full_path_encoded])\n    return do_ams_delete(endpoint, full_path_encoded, access_token)", "docstring": "Helper Function to delete a Object at a URL path.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\noid (str): An OID.\npath (str): A URL Path.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def to(self, device: Union[str, 'torch.device'], *, non_blocking: bool=False) -> 'BatchEncoding':\n    requires_backends(self, ['torch'])\n    import torch\n    if isinstance(device, str) or is_torch_device(device) or isinstance(device, int):\n        self.data = {k: v.to(device=device, non_blocking=non_blocking) if isinstance(v, torch.Tensor) else v for k, v in self.data.items()}\n    else:\n        logger.warning(f'Attempting to cast a BatchEncoding to type {str(device)}. This is not supported.')\n    return self", "docstring": "Send all values to device by calling `v.to(device, non_blocking=non_blocking)` (PyTorch only).\n\nArgs:\ndevice (`str` or `torch.device`): The device to put the tensors on.\nnon_blocking (`bool`): Whether to perform the copy asynchronously.\n\nReturns:\n[`BatchEncoding`]: The same instance after modification.", "source": "github-repos"}
{"code": "def format(self, template='{basename}{range}{padding}{extension}'):\n    inverted = ((self.invertedFrameRange() or '') if ('{inverted}' in template) else '')\n    return template.format(basename=self.basename(), extension=self.extension(), start=self.start(), end=self.end(), length=len(self), padding=self.padding(), range=(self.frameRange() or ''), inverted=inverted, dirname=self.dirname())", "docstring": "Return the file sequence as a formatted string according to\nthe given template.\n\nUtilizes the python string format syntax.  Available keys include:\n* basename - the basename of the sequence.\n* extension - the file extension of the sequence.\n* start - the start frame.\n* end - the end frame.\n* length - the length of the frame range.\n* padding - the detecting amount of padding.\n* inverted - the inverted frame range. (returns \"\" if none)\n* dirname - the directory name.\n\nIf asking for the inverted range value, and the new inverted range\nexceeded :const:`fileseq.constants.MAX_FRAME_SIZE`, a ``MaxSizeException``\nwill be raised.\n\nArgs:\ntemplate (str):\n\nReturns:\nstr:\n\nRaises:\n:class:`fileseq.exceptions.MaxSizeException`: If frame size exceeds\n:const:`fileseq.constants.MAX_FRAME_SIZE`", "source": "codesearchnet"}
{"code": "def chart_type(self, value):\n        \n        if value not in self._allowed_charts:\n            raise ValueError(\"Not a valid chart type\")\n\n        self.options[\"chart_type\"] = value", "docstring": "Set the MetricsGraphics chart type.\nAllowed charts are: line, histogram, point, and bar\n\nArgs:\nvalue (str): chart type.\n\nRaises:\nValueError: Not a valid chart type.", "source": "juraj-google-style"}
{"code": "def _init_sampler(tc, init, num):\n\n    def func():\n        with tc.test_session():\n            return init([num]).eval()\n    return func", "docstring": "Returns a func to generate a random tensor of shape [num].\n\nArgs:\ntc: An instance of TensorFlowTestCase.\ninit: An Initializer that generates a tensor of a given shape\nnum: Size of 1D tensor to create.\n\nReturns:\nFunction to generate a random tensor.", "source": "github-repos"}
{"code": "def dice_loss(inputs, targets, num_boxes):\n    inputs = inputs.sigmoid()\n    inputs = inputs.flatten(1)\n    numerator = 2 * (inputs * targets).sum(1)\n    denominator = inputs.sum(-1) + targets.sum(-1)\n    loss = 1 - (numerator + 1) / (denominator + 1)\n    return loss.sum() / num_boxes", "docstring": "Compute the DICE loss, similar to generalized IOU for masks\n\nArgs:\ninputs: A float tensor of arbitrary shape.\nThe predictions for each example.\ntargets: A float tensor with the same shape as inputs. Stores the binary\nclassification label for each element in inputs (0 for the negative class and 1 for the positive\nclass).", "source": "github-repos"}
{"code": "def build(cls, **kwargs):\n        \n        return cls.add(cls.new(**kwargs), commit=False)", "docstring": "Similar to create. But the transaction is not committed\n\nArgs:\n\n**kwargs : The keyword arguments for the constructor\n\nReturns:\n\nA model instance which has been added to db session. But session\ntransaction has not been committed yet.", "source": "juraj-google-style"}
{"code": "def Delete(self, queue, tasks, mutation_pool=None):\n    \n    if queue is None:\n      return\n    if mutation_pool is None:\n      raise ValueError(\"Mutation pool can't be none.\")\n    mutation_pool.QueueDeleteTasks(queue, tasks)", "docstring": "Removes the tasks from the queue.\n\nNote that tasks can already have been removed. It is not an error\nto re-delete an already deleted task.\n\nArgs:\nqueue: A queue to clear.\ntasks: A list of tasks to remove. Tasks may be Task() instances or integers\nrepresenting the task_id.\nmutation_pool: A MutationPool object to schedule deletions on.\n\nRaises:\nValueError: Mutation pool was not passed in.", "source": "juraj-google-style"}
{"code": "def __init__(self, key_dtype, value_dtype):\n    self._key_dtype = dtypes.as_dtype(key_dtype)\n    self._value_dtype = dtypes.as_dtype(value_dtype)", "docstring": "Construct a table initializer object.\n\nArgs:\nkey_dtype: Type of the table keys.\nvalue_dtype: Type of the table values.", "source": "github-repos"}
{"code": "def WriteBlobsWithUnknownHashes(\n      self, blobs_data):\n    \n    blobs_ids = [rdf_objects.BlobID.FromBlobData(d) for d in blobs_data]\n    self.WriteBlobs(dict(zip(blobs_ids, blobs_data)))\n    return blobs_ids", "docstring": "Calculates hash ids and writes contents of given data blobs.\n\nArgs:\nblobs_data: An iterable of bytes.\n\nReturns:\nA list of rdf_objects.BlobID objects with each blob id corresponding\nto an element in the original blobs_data argument.", "source": "juraj-google-style"}
{"code": "def MergeAllSummaries(period=0, run_alone=False, key=None):\n    if (key is None):\n        key = tf.GraphKeys.SUMMARIES\n    period = int(period)\n    if run_alone:\n        return MergeAllSummaries_RunAlone(period, key)\n    else:\n        return MergeAllSummaries_RunWithOp(period, key)", "docstring": "This callback is enabled by default.\nEvaluate all summaries by ``tf.summary.merge_all``, and write them to logs.\n\nArgs:\nperiod (int): by default the callback summarizes once every epoch.\nThis option (if not set to 0) makes it additionally summarize every ``period`` steps.\nrun_alone (bool): whether to evaluate the summaries alone.\nIf True, summaries will be evaluated after each epoch alone.\nIf False, summaries will be evaluated together with the\n`sess.run` calls, in the last step of each epoch.\nFor :class:`SimpleTrainer`, it needs to be False because summary may\ndepend on inputs.\nkey (str): the collection of summary tensors. Same as in ``tf.summary.merge_all``.\nDefault is ``tf.GraphKeys.SUMMARIES``.", "source": "codesearchnet"}
{"code": "def _CheckFileEntryType(self, file_entry):\n    \n    if not self._file_entry_types:\n      return None\n\n    return (\n        self._CheckIsDevice(file_entry) or self._CheckIsDirectory(file_entry) or\n        self._CheckIsFile(file_entry) or self._CheckIsLink(file_entry) or\n        self._CheckIsPipe(file_entry) or self._CheckIsSocket(file_entry))", "docstring": "Checks the file entry type find specifications.\n\nArgs:\nfile_entry (FileEntry): file entry.\n\nReturns:\nbool: True if the file entry matches the find specification, False if\nnot or None if no file entry type specification is defined.", "source": "juraj-google-style"}
{"code": "def __init__(self, cell_ctor, *args, **kwargs):\n    \n    super(RNNCellWrapper, self).__init__(\n        name=kwargs.get(\"name\"),\n        custom_getter=kwargs.pop(\"custom_getter\", None))\n\n    with self._enter_variable_scope():\n      self._cell = cell_ctor(*args, **kwargs)", "docstring": "Constructs the cell, within this module's variable scope.\n\nArgs:\ncell_ctor: Callable that instantiates a `tf.contrib.rnn.RNNCell`.\n*args: Arguments to pass to `cell_ctor`.\n**kwargs: Keyword arguments to pass to `cell_ctor`.\nIf `name` is provided, it is passed to `RNNCore.__init__` as well.\nIf `custom_getter` is provided, it is passed to `RNNCore.__init__`\nbut not to `cell_ctor`.", "source": "juraj-google-style"}
{"code": "def _GetTimestamps(self, olecf_item):\n    if (not olecf_item):\n        return (None, None)\n    try:\n        creation_time = olecf_item.get_creation_time_as_integer()\n    except OverflowError as exception:\n        logger.warning('Unable to read the creation time with error: {0!s}'.format(exception))\n        creation_time = 0\n    try:\n        modification_time = olecf_item.get_modification_time_as_integer()\n    except OverflowError as exception:\n        logger.warning('Unable to read the modification time with error: {0!s}'.format(exception))\n        modification_time = 0\n    if ((not creation_time) and (not modification_time)):\n        return (None, None)\n    if (creation_time == 18446744073709551615):\n        creation_time = 0\n    return (creation_time, modification_time)", "docstring": "Retrieves the timestamps from an OLECF item.\n\nArgs:\nolecf_item (pyolecf.item): OLECF item.\n\nReturns:\ntuple[int, int]: creation and modification FILETIME timestamp.", "source": "codesearchnet"}
{"code": "def design_stat_heating(self, value=\"Heating\"):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type str '\n                    'for field `design_stat_heating`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `design_stat_heating`')\n            vals = set()\n            vals.add(\"Heating\")\n            if value not in vals:\n                raise ValueError('value {} is not an accepted value for '\n                                 'field `design_stat_heating`'.format(value))\n\n        self._design_stat_heating = value", "docstring": "Corresponds to IDD Field `design_stat_heating`\n\nArgs:\nvalue (str): value for IDD Field `design_stat_heating`\nAccepted values are:\n- Heating\nDefault value: Heating\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def unpack(self, buff=None, offset=0):\n        \n        length = UBInt16()\n        length.unpack(buff, offset)\n        super().unpack(buff[:offset+length.value], offset)", "docstring": "Unpack *buff* into this object.\n\nThis method will convert a binary data into a readable value according\nto the attribute format.\n\nArgs:\nbuff (bytes): Binary buffer.\noffset (int): Where to begin unpacking.\n\nRaises:\n:exc:`~.exceptions.UnpackException`: If unpack fails.", "source": "juraj-google-style"}
{"code": "def _generate_comparator(cls, field_names):\n    field_names = list(field_names)\n    reverses = ([1] * len(field_names))\n    for (i, field_name) in enumerate(field_names):\n        if (field_name[0] == '-'):\n            reverses[i] = (- 1)\n            field_names[i] = field_name[1:]\n    field_names = [f.replace(LOOKUP_SEP, '.') for f in field_names]\n\n    def comparator(i1, i2):\n        v1 = attrgetter(*field_names)(i1)\n        v2 = attrgetter(*field_names)(i2)\n        if (len(field_names) == 1):\n            return (cls._cmp(v1, v2) * reverses[0])\n        order = multiply_iterables(list(map(cls._cmp, v1, v2)), reverses)\n        try:\n            return next(dropwhile(__not__, order))\n        except StopIteration:\n            return 0\n    return comparator", "docstring": "Construct a comparator function based on the field names. The comparator\nreturns the first non-zero comparison value.\n\nInputs:\nfield_names (iterable of strings): The field names to sort on.\n\nReturns:\nA comparator function.", "source": "codesearchnet"}
{"code": "def get_nmr_prize_pool(self, round_num=0, tournament=1):\n    tournaments = self.get_competitions(tournament)\n    tournaments.sort(key=(lambda t: t['number']))\n    if (round_num == 0):\n        t = tournaments[(- 1)]\n    else:\n        tournaments = [t for t in tournaments if (t['number'] == round_num)]\n        if (len(tournaments) == 0):\n            raise ValueError('invalid round number')\n        t = tournaments[0]\n    return t['prizePoolNmr']", "docstring": "Get NMR prize pool for the given round and tournament.\n\nArgs:\nround_num (int, optional): The round you are interested in,\ndefaults to current round.\ntournament (int, optional): ID of the tournament, defaults to 1\n\nReturns:\ndecimal.Decimal: prize pool in NMR\n\nRaises:\nValue Error: in case of invalid round number", "source": "codesearchnet"}
{"code": "def pinch(self, direction='in', percent=0.6, duration=2.0, dead_zone=0.1):\n    if (direction not in ('in', 'out')):\n        raise ValueError('Argument `direction` should be one of \"in\" or \"out\". Got {}'.format(repr(direction)))\n    if (dead_zone >= percent):\n        raise ValueError('Argument `dead_zone` should not be greater than `percent`. dead_zoon={}, percent={}'.format(repr(dead_zone), repr(percent)))\n    (w, h) = self.get_size()\n    (x, y) = self.get_position()\n    tracks = make_pinching(direction, [x, y], [w, h], percent, dead_zone, duration)\n    speed = (((math.sqrt((w * h)) * (percent - dead_zone)) / 2) / duration)\n    ret = self.poco.apply_motion_tracks(tracks, accuracy=(speed * 0.03))\n    return ret", "docstring": "Squeezing or expanding 2 fingers on this UI with given motion range and duration.\n\nArgs:\ndirection (:py:obj:`str`): pinching direction, only \"in\" or \"out\". \"in\" for squeezing, \"out\" for expanding\npercent (:py:obj:`float`): squeezing range from or expanding range to of the bounds of the UI\nduration (:py:obj:`float`): time interval in which the action is performed\ndead_zone (:py:obj:`float`): pinching inner circle radius. should not be greater than ``percent``\n\nRaises:\nPocoNoSuchNodeException: raised when the UI element does not exist", "source": "codesearchnet"}
{"code": "def delete_nsg_rule(access_token, subscription_id, resource_group, nsg_name, nsg_rule_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/networkSecurityGroups/', nsg_name, '/securityRules/', nsg_rule_name, '?api-version=', NETWORK_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete network security group rule.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nnsg_name (str): Name of the Network Security Group.\nnsg_rule_name (str): Name of the NSG rule.\n\nReturns:\nHTTP response.", "source": "codesearchnet"}
{"code": "def __init__(self, runner_capabilities: FrozenSet[str], process_bundle_descriptor: beam_fn_api_pb2.ProcessBundleDescriptor, state_handler: sdk_worker.CachingStateHandler, data_channel_factory: data_plane.DataChannelFactory, data_sampler: Optional[data_sampler.DataSampler]=None) -> None:\n    self.runner_capabilities = runner_capabilities\n    self.process_bundle_descriptor = process_bundle_descriptor\n    self.state_handler = state_handler\n    self.data_channel_factory = data_channel_factory\n    self.data_sampler = data_sampler\n    self.current_instruction_id: Optional[str] = None\n    self.consuming_received_data = False\n    _verify_descriptor_created_in_a_compatible_env(process_bundle_descriptor)\n    if self.process_bundle_descriptor.timer_api_service_descriptor.url:\n        self.timer_data_channel = data_channel_factory.create_data_channel_from_url(self.process_bundle_descriptor.timer_api_service_descriptor.url)\n    else:\n        self.timer_data_channel = None\n    self.timers_info: Dict[Tuple[str, str], TimerInfo] = {}\n    self.counter_factory = counters.CounterFactory()\n    self.state_sampler = statesampler.StateSampler('fnapi-step-%s' % self.process_bundle_descriptor.id, self.counter_factory)\n    self.ops = self.create_execution_tree(self.process_bundle_descriptor)\n    for op in reversed(self.ops.values()):\n        op.setup(self.data_sampler)\n    self.splitting_lock = threading.Lock()", "docstring": "Initialize a bundle processor.\n\nArgs:\nrunner_capabilities (``FrozenSet[str]``): The set of capabilities of the\nrunner with which we will be interacting\nprocess_bundle_descriptor (``beam_fn_api_pb2.ProcessBundleDescriptor``):\na description of the stage that this ``BundleProcessor``is to execute.\nstate_handler (CachingStateHandler).\ndata_channel_factory (``data_plane.DataChannelFactory``).", "source": "github-repos"}
{"code": "def search(self, patterns, start=30, limit=1000, include_category=False):\n        \n        api_name = 'opendns-patterns'\n        fmt_url_path = u'search/{0}'\n        start = '-{0}days'.format(start)\n        include_category = str(include_category).lower()\n        query_params = {\n            'start': start,\n            'limit': limit,\n            'includecategory': include_category,\n        }\n        return self._multi_get(api_name, fmt_url_path, patterns, query_params)", "docstring": "Performs pattern searches against the Investigate database.\n\nArgs:\npatterns: An enumerable of RegEx domain patterns to search for\nstart:   How far back results extend from in days (max is 30)\nlimit:   Number of results to show (max is 1000)\ninclude_category: Include OpenDNS security categories\nReturns:\nAn enumerable of matching domain strings", "source": "juraj-google-style"}
{"code": "def verify(self, message, signature):\n        \n        message = _helpers._to_bytes(message, encoding='utf-8')\n        return PKCS1_v1_5.new(self._pubkey).verify(\n            SHA256.new(message), signature)", "docstring": "Verifies a message against a signature.\n\nArgs:\nmessage: string or bytes, The message to verify. If string, will be\nencoded to bytes as utf-8.\nsignature: string or bytes, The signature on the message.\n\nReturns:\nTrue if message was signed by the private key associated with the\npublic key that this object was constructed with.", "source": "juraj-google-style"}
{"code": "def acquire(self, key: Text, constructor_fn: Callable[[], Any], tag: Any=None) -> Any:\n    with self._lock:\n        control_block = self._cache_map.get(key)\n        if control_block is None:\n            control_block = _SharedControlBlock()\n            self._cache_map[key] = control_block\n    result = control_block.acquire(constructor_fn, tag)\n    with self._lock:\n        self._keepalive = (key, result)\n    return result", "docstring": "Acquire a reference to a Shared object.\n\nArgs:\nkey: the key to the shared object\nconstructor_fn: function that initialises / constructs the object if not\npresent in the cache. This function should take no arguments. It should\nreturn an initialised object, or None if the object could not be\ninitialised / constructed.\ntag: an optional indentifier to store with the cached object. If\nsubsequent calls to acquire use different tags, the object will be\nreloaded rather than returned from cache.\n\nReturns:\nA reference to the initialised object, either from the cache, or\nnewly-constructed.", "source": "github-repos"}
{"code": "def connect(portname, baudrate):\n    global SERPORT\n    try:\n        SERPORT = serial.Serial(portname, baudrate, timeout=0.1)\n    except:\n        raise HerkulexError('could not open the serial port')", "docstring": "Connect to the Herkulex bus\n\nConnect to serial port to which Herkulex Servos are attatched\n\nArgs:\nportname (str): The serial port name\nbaudrate (int): The serial port baudrate\nRaises:\nSerialException: Error occured while opening serial port", "source": "codesearchnet"}
{"code": "def __init__(self, value=b''):\n        \n        super(CertificateValue, self).__init__(value, Tags.CERTIFICATE_VALUE)", "docstring": "Construct a CertificateValue byte string.\n\nArgs:\nvalue (bytes): A byte string (e.g., b'\\x00\\x01...') containing the\ncertificate bytes to store. Optional, defaults to the empty\nbyte string.", "source": "juraj-google-style"}
{"code": "def make_view(controller, context, data):\n    if isinstance(data, BlockModel):\n        view = _make_view_subclass(Block, controller, context, data)\n    elif isinstance(data, AttributeModel):\n        view = Attribute(controller, context, data)\n    elif isinstance(data, MethodModel):\n        view = Method(controller, context, data)\n    elif isinstance(data, Model):\n        view = _make_view_subclass(View, controller, context, data)\n    elif isinstance(data, dict):\n        d = OrderedDict()\n        for (k, v) in data.items():\n            d[k] = make_view(controller, context, v)\n        view = d\n    elif isinstance(data, list):\n        view = [make_view(controller, context, x) for x in data]\n    else:\n        view = data\n    return view", "docstring": "Make a View subclass containing properties specific for given data\n\nArgs:\ncontroller (Controller): The child controller that hosts the data\ncontext (Context): The context the parent has made that the View should\nuse for manipulating the data\ndata (Model): The actual data that context will be manipulating\n\nReturns:\nView: A View subclass instance that provides a user-focused API to\nthe given data", "source": "codesearchnet"}
{"code": "def _create_dag_op(self, name, params, qargs):\n    if (name == 'u0'):\n        op_class = U0Gate\n    elif (name == 'u1'):\n        op_class = U1Gate\n    elif (name == 'u2'):\n        op_class = U2Gate\n    elif (name == 'u3'):\n        op_class = U3Gate\n    elif (name == 'x'):\n        op_class = XGate\n    elif (name == 'y'):\n        op_class = YGate\n    elif (name == 'z'):\n        op_class = ZGate\n    elif (name == 't'):\n        op_class = TGate\n    elif (name == 'tdg'):\n        op_class = TdgGate\n    elif (name == 's'):\n        op_class = SGate\n    elif (name == 'sdg'):\n        op_class = SdgGate\n    elif (name == 'swap'):\n        op_class = SwapGate\n    elif (name == 'rx'):\n        op_class = RXGate\n    elif (name == 'ry'):\n        op_class = RYGate\n    elif (name == 'rz'):\n        op_class = RZGate\n    elif (name == 'rzz'):\n        op_class = RZZGate\n    elif (name == 'id'):\n        op_class = IdGate\n    elif (name == 'h'):\n        op_class = HGate\n    elif (name == 'cx'):\n        op_class = CnotGate\n    elif (name == 'cy'):\n        op_class = CyGate\n    elif (name == 'cz'):\n        op_class = CzGate\n    elif (name == 'ch'):\n        op_class = CHGate\n    elif (name == 'crz'):\n        op_class = CrzGate\n    elif (name == 'cu1'):\n        op_class = Cu1Gate\n    elif (name == 'cu3'):\n        op_class = Cu3Gate\n    elif (name == 'ccx'):\n        op_class = ToffoliGate\n    elif (name == 'cswap'):\n        op_class = FredkinGate\n    else:\n        raise QiskitError(('unknown operation for ast node name %s' % name))\n    op = op_class(*params)\n    self.dag.apply_operation_back(op, qargs, [], condition=self.condition)", "docstring": "Create a DAG node out of a parsed AST op node.\n\nArgs:\nname (str): operation name to apply to the dag.\nparams (list): op parameters\nqargs (list(QuantumRegister, int)): qubits to attach to\n\nRaises:\nQiskitError: if encountering a non-basis opaque gate", "source": "codesearchnet"}
{"code": "def process_files(self, path, recursive=False):\n    self._logger.info('Processing files in \"%s\"', path)\n    for (path, file) in files_generator(path, recursive):\n        if (not file.endswith(BATCH_EXTENSION)):\n            self.process_file(os.path.join(path, file))", "docstring": "Apply normalizations over all files in the given directory.\n\nIterate over all files in a given directory. Normalizations\nwill be applied to each file, storing the result in a new file.\nThe extension for the new file will be the one defined in\nBATCH_EXTENSION.\n\nArgs:\npath: Path to the directory.\nrecursive: Whether to find files recursively or not.", "source": "codesearchnet"}
{"code": "def encode_csv(data_dict, column_names):\n  \n  import csv\n  import six\n  values = [str(data_dict[x]) for x in column_names]\n  str_buff = six.StringIO()\n  writer = csv.writer(str_buff, lineterminator='')\n  writer.writerow(values)\n  return str_buff.getvalue()", "docstring": "Builds a csv string.\n\nArgs:\ndata_dict: dict of {column_name: 1 value}\ncolumn_names: list of column names\n\nReturns:\nA csv string version of data_dict", "source": "juraj-google-style"}
{"code": "def AddBasicOptions(self, argument_group):\n    \n    version_string = self.GetVersionInformation()\n\n    \n    argument_group.add_argument(\n        '-h', '--help', action='help',\n        help='Show this help message and exit.')\n\n    argument_group.add_argument(\n        '--troubles', dest='show_troubleshooting', action='store_true',\n        default=False, help='Show troubleshooting information.')\n\n    argument_group.add_argument(\n        '-V', '--version', dest='version', action='version',\n        version=version_string, help='Show the version information.')", "docstring": "Adds the basic options to the argument group.\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "juraj-google-style"}
{"code": "def interact(self, banner=None):\n    \n    sys.ps1 = getattr(sys, 'ps1', '>>> ')\n    sys.ps2 = getattr(sys, 'ps2', '... ')\n    if banner is None:\n      print ('Pyringe (Python %s.%s.%s) on %s\\n%s' %\n             (sys.version_info.major, sys.version_info.minor,\n              sys.version_info.micro, sys.platform, _WELCOME_MSG))\n    else:\n      print banner\n    more = False\n    while True:\n      try:\n        if more:\n          prompt = sys.ps2\n        else:\n          prompt = self.StatusLine() + '\\n' + sys.ps1\n        try:\n          line = self.raw_input(prompt)\n        except EOFError:\n          print ''\n          break\n        else:\n          more = self.push(line)\n      except KeyboardInterrupt:\n        print '\\nKeyboardInterrupt'\n        self.resetbuffer()\n        more = False", "docstring": "Closely emulate the interactive Python console.\n\nThis method overwrites its superclass' method to specify a different help\ntext and to enable proper handling of the debugger status line.\n\nArgs:\nbanner: Text to be displayed on interpreter startup.", "source": "juraj-google-style"}
{"code": "def get_pyof_version(module_fullname):\n    ver_module_re = re.compile('(pyof\\\\.)(v0x\\\\d+)(\\\\..*)')\n    matched = ver_module_re.match(module_fullname)\n    if matched:\n        version = matched.group(2)\n        return version\n    return None", "docstring": "Get the module pyof version based on the module fullname.\n\nArgs:\nmodule_fullname (str): The fullname of the module\n(e.g.: pyof.v0x01.common.header)\n\nReturns:\nstr: openflow version.\nThe openflow version, on the format 'v0x0?' if any. Or None\nif there isn't a version on the fullname.", "source": "codesearchnet"}
{"code": "def traverse_postorder(self, leaves=True, internal=True):\n        \n        s1 = deque(); s2 = deque(); s1.append(self)\n        while len(s1) != 0:\n            n = s1.pop(); s2.append(n); s1.extend(n.children)\n        while len(s2) != 0:\n            n = s2.pop()\n            if (leaves and n.is_leaf()) or (internal and not n.is_leaf()):\n                yield n", "docstring": "Perform a postorder traversal starting at this ``Node`` object\n\nArgs:\n``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``", "source": "juraj-google-style"}
{"code": "def execute_log(args, root_dir):\n    if args.get('keys'):\n        config_dir = os.path.join(root_dir, '.config/pueue')\n        queue_path = os.path.join(config_dir, 'queue')\n        if os.path.exists(queue_path):\n            queue_file = open(queue_path, 'rb')\n            try:\n                queue = pickle.load(queue_file)\n            except Exception:\n                print('Queue log file seems to be corrupted. Aborting.')\n                return\n            queue_file.close()\n        else:\n            print('There is no queue log file. Aborting.')\n            return\n        for key in args.get('keys'):\n            if (queue.get(key) and (queue[key]['status'] in ['failed', 'done'])):\n                entry = queue[key]\n                print('Log of entry: {}'.format(key))\n                print('Returncode: {}'.format(entry['returncode']))\n                print('Command: {}'.format(entry['command']))\n                print('Path: {}'.format(entry['path']))\n                print('Start: {}, End: {} \\n'.format(entry['start'], entry['end']))\n                if (len(entry['stderr']) > 0):\n                    print((Color('{autored}Stderr output: {/autored}\\n    ') + entry['stderr']))\n                if (len(entry['stdout']) > 0):\n                    print((Color('{autogreen}Stdout output: {/autogreen}\\n    ') + entry['stdout']))\n            else:\n                print('No finished process with key {}.'.format(key))\n    else:\n        log_path = os.path.join(root_dir, '.local/share/pueue/queue.log')\n        log_file = open(log_path, 'r')\n        print(log_file.read())", "docstring": "Print the current log file.\n\nArgs:\nargs['keys'] (int): If given, we only look at the specified processes.\nroot_dir (string): The path to the root directory the daemon is running in.", "source": "codesearchnet"}
{"code": "def pack(self, tensors: Sequence[Any], layout: layout_lib.Layout) -> Any:\n    if not context.executing_eagerly():\n        raise RuntimeError('`pack` must be called eagerly.')\n    self._register_mesh(layout.mesh)\n    with ops.device(self.name):\n        if all((isinstance(t, sparse_tensor.SparseTensor) for t in tensors)):\n            if not all((t.shape == tensors[0].shape for t in tensors)):\n                raise TypeError('All input SparseTensors to Pack must be same shape.')\n            is_sparse = True\n            tensors = [t.indices for t in tensors] + [t.values for t in tensors] + [ops.convert_to_tensor(t.shape, dtype=dtypes.int64) for t in tensors]\n        elif any((isinstance(t, sparse_tensor.SparseTensor) for t in tensors)):\n            raise TypeError('Cannot Pack SparseTensors with Tensors.')\n        else:\n            is_sparse = False\n        try:\n            return _pywrap_dtensor_device.Pack(context.context()._handle, tensors, layout.to_string(), self._device_info, is_sparse)\n        except core._NotOkStatusException as e:\n            raise core._status_to_exception(e) from None", "docstring": "Packs tensors into a DTensor handle on this DTensor device.\n\nPacking and unpacking are inverse operations:\n\n```\n* unpack(pack(tensors)) == tensors\n* pack(unpack(dtensor)) == dtensor\n```\n\nRefer to `dtensor.pack` for more information.\n\nArgs:\ntensors: The list of tensors to pack into a DTensor.\nlayout: The layout of the DTensor to be created.\n\nReturns:\nA DTensor created from the individual component tensors.\n\nRaises:\nRuntimeError: When not called eagerly.", "source": "github-repos"}
{"code": "def generate_output_header(self, query_type='RDAP'):\n    output = '\\n{0}{1}{2} query for {3}:{4}\\n\\n'.format(ANSI['ul'], ANSI['b'], query_type, self.obj.address_str, ANSI['end'])\n    return output", "docstring": "The function for generating the CLI output header.\n\nArgs:\nquery_type (:obj:`str`): The IPWhois query type. Defaults to\n'RDAP'.\n\nReturns:\nstr: The generated output.", "source": "codesearchnet"}
{"code": "def _magic_parser(stream, magic):\n    (in_doc, fields) = (0, None)\n    for line in stream:\n        line = line.strip()\n        if line.startswith(magic):\n            keys = line.split()\n            fields = OrderedDict(((k, []) for k in keys))\n        if (fields is not None):\n            in_doc += 1\n            if (in_doc == 1):\n                continue\n            if (not line):\n                break\n            tokens = list(map(float, line.split()[1:]))\n            assert (len(tokens) == len(keys))\n            for (l, v) in zip(fields.values(), tokens):\n                l.append(v)\n    if fields:\n        return OrderedDict([(k, np.array(v)) for (k, v) in fields.items()])\n    else:\n        return None", "docstring": "Parse the section with the SCF cycle\n\nReturns:\ndict where the key are the name of columns and\nthe values are list of numbers. Note if no section was found.\n\n.. warning::\n\nThe parser is very fragile and should be replaced by YAML.", "source": "codesearchnet"}
{"code": "def get_cell_shift(flow_model):\n        \n        assert flow_model.lower() in FlowModelConst.d8_deltas\n        return FlowModelConst.d8_deltas.get(flow_model.lower())", "docstring": "Get flow direction induced cell shift dict.\nArgs:\nflow_model: Currently, \"TauDEM\", \"ArcGIS\", and \"Whitebox\" are supported.", "source": "juraj-google-style"}
{"code": "def plot_thermodynamic_properties(self, tmin, tmax, ntemp, ylim=None, **kwargs):\n        \n        temperatures = np.linspace(tmin, tmax, ntemp)\n\n        mol = \"\" if self.structure else \"-c\"\n\n        fig = self._plot_thermo(self.dos.cv, temperatures, ylabel=\"Thermodynamic properties\", ylim=ylim,\n                                label=r\"$C_v$ (J/K/mol{})\".format(mol), **kwargs)\n        self._plot_thermo(self.dos.entropy, temperatures, ylim=ylim, ax=fig.axes[0],\n                          label=r\"$S$ (J/K/mol{})\".format(mol), **kwargs)\n        self._plot_thermo(self.dos.internal_energy, temperatures, ylim=ylim, ax=fig.axes[0], factor=1e-3,\n                          label=r\"$\\Delta E$ (kJ/K/mol{})\".format(mol), **kwargs)\n        self._plot_thermo(self.dos.helmholtz_free_energy, temperatures, ylim=ylim, ax=fig.axes[0], factor=1e-3,\n                          label=r\"$\\Delta F$ (kJ/K/mol{})\".format(mol), **kwargs)\n\n        fig.axes[0].legend(loc=\"best\")\n\n        return fig", "docstring": "Plots all the thermodynamic properties in a temperature range.\n\nArgs:\ntmin: minimum temperature\ntmax: maximum temperature\nntemp: number of steps\nylim: tuple specifying the y-axis limits.\nkwargs: kwargs passed to the matplotlib function 'plot'.\nReturns:\nmatplotlib figure", "source": "juraj-google-style"}
{"code": "async def forget(request):\n    auth_policy = request.get(POLICY_KEY)\n    if (auth_policy is None):\n        raise RuntimeError('auth_middleware not installed')\n    return (await auth_policy.forget(request))", "docstring": "Called to forget the userid for a request\n\nArgs:\nrequest: aiohttp Request object\n\nRaises:\nRuntimeError: Middleware is not installed", "source": "codesearchnet"}
{"code": "def update(self, item):\n    if (item.matrix not in self.data):\n        self.data[item.matrix] = []\n    result = Select(self.data[item.matrix]).where((lambda entry: (entry.stage == item.stage))).build()\n    if (len(result) > 0):\n        stage = result[0]\n        stage.status = item.status\n        stage.add(item.timestamp, item.information)\n    else:\n        stage = CollectorStage(stage=item.stage, status=item.status)\n        stage.add(item.timestamp, item.information)\n        self.data[item.matrix].append(stage)", "docstring": "Add a collector item.\n\nArgs:\nitem (CollectorUpdate): event data like stage, timestampe and status.", "source": "codesearchnet"}
{"code": "def parse_elements(elements):\n    if (not (len(elements) == 5)):\n        raise ValueError('Invalid WPL waypoint data')\n    latitude = parse_latitude(elements[0], elements[1])\n    longitude = parse_longitude(elements[2], elements[3])\n    name = elements[4]\n    return Waypoint(latitude, longitude, name)", "docstring": "Parse waypoint data elements.\n\nArgs:\nelements (list): Data values for fix\n\nReturns:\nnmea.Waypoint: Object representing data", "source": "codesearchnet"}
{"code": "def from_stat_file(cls, statfile, timestep=1, is_leap_year=False):\n        \n        stat = STAT(statfile)\n\n        \n        def check_missing(opt_data, data_name):\n            if opt_data == []:\n                raise ValueError('Stat file contains no optical data.')\n            for i, x in enumerate(opt_data):\n                if x is None:\n                    raise ValueError(\n                        'Missing optical depth data for {} at month {}'.format(\n                            data_name, i)\n                    )\n        check_missing(stat.monthly_tau_beam, 'monthly_tau_beam')\n        check_missing(stat.monthly_tau_diffuse, 'monthly_tau_diffuse')\n\n        return cls.from_ashrae_revised_clear_sky(stat.location, stat.monthly_tau_beam,\n                                                 stat.monthly_tau_diffuse, timestep,\n                                                 is_leap_year)", "docstring": "Create an ASHRAE Revised Clear Sky wea object from the monthly sky\noptical depths in a .stat file.\n\nArgs:\nstatfile: Full path to the .stat file.\ntimestep: An optional integer to set the number of time steps per\nhour. Default is 1 for one value per hour.\nis_leap_year: A boolean to indicate if values are representing a leap year.\nDefault is False.", "source": "juraj-google-style"}
{"code": "def update_mongo_compound_variants(self, bulk):\n        \n        requests = []\n        for var_id in bulk:\n            var_obj = bulk[var_id]\n            if not var_obj.get('compounds'):\n                continue\n            \n            operation = pymongo.UpdateOne(\n                {'_id': var_obj['_id']},\n                {\n                    '$set': {\n                        'compounds': var_obj['compounds']\n                    }\n                })\n            requests.append(operation)\n\n        if not requests:\n            return\n\n        try:\n            self.variant_collection.bulk_write(requests, ordered=False)\n        except BulkWriteError as err:\n            LOG.warning(\"Updating compounds failed\")\n            raise err", "docstring": "Update the compound information for a bulk of variants in the database\n\nArgs:\nbulk(dict): {'_id': scout.models.Variant}", "source": "juraj-google-style"}
{"code": "def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n    summed_grads_and_vars = []\n    for grad, var in grads_and_vars:\n        if grad is None:\n            summed_grads_and_vars.append((grad, var))\n        else:\n            with ops.colocate_with(grad):\n                summed_grads_and_vars.append((tpu_ops.cross_replica_sum(grad, self._group_assignment), var))\n    return self._opt.apply_gradients(summed_grads_and_vars, global_step, name)", "docstring": "Apply gradients to variables.\n\nCalls tpu_ops.cross_replica_sum() to sum gradient contributions across\nreplicas, and then applies the real optimizer.\n\nArgs:\ngrads_and_vars: List of (gradient, variable) pairs as returned by\ncompute_gradients().\nglobal_step: Optional Variable to increment by one after the\nvariables have been updated.\nname: Optional name for the returned operation.  Default to the\nname passed to the Optimizer constructor.\n\nReturns:\nAn `Operation` that applies the gradients. If `global_step` was not None,\nthat operation also increments `global_step`.\n\nRaises:\nValueError: If the grads_and_vars is malformed.", "source": "github-repos"}
{"code": "def ParseOptions(cls, options, output_module):\n    \n    if not isinstance(output_module, mysql_4n6time.MySQL4n6TimeOutputModule):\n      raise errors.BadConfigObject(\n          'Output module is not an instance of MySQL4n6TimeOutputModule')\n\n    MySQL4n6TimeDatabaseArgumentsHelper.ParseOptions(options, output_module)\n    shared_4n6time_output.Shared4n6TimeOutputArgumentsHelper.ParseOptions(\n        options, output_module)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\noutput_module (OutputModule): output module to configure.\n\nRaises:\nBadConfigObject: when the output module object is of the wrong type.", "source": "juraj-google-style"}
{"code": "def get_point(self, *position):\n        \n        \n        \n        \n        \n        array = _ffi.new(self._arrayType, position)\n        if self._useOctaves:\n            return (self._noiseFunc(self._noise, array, self._octaves) + 1) * 0.5\n        return (self._noiseFunc(self._noise, array) + 1) * 0.5", "docstring": "Return the noise value of a specific position.\n\nExample usage: value = noise.getPoint(x, y, z)\n\nArgs:\nposition (Tuple[float, ...]): The point to sample at.\n\nReturns:\nfloat: The noise value at position.\n\nThis will be a floating point in the 0.0-1.0 range.", "source": "juraj-google-style"}
{"code": "def get_json_files(files, recursive=False):\n    json_files = []\n    if (not files):\n        return json_files\n    for fn in files:\n        if os.path.isdir(fn):\n            children = list_json_files(fn, recursive)\n            json_files.extend(children)\n        elif is_json(fn):\n            json_files.append(fn)\n        else:\n            continue\n    if (not json_files):\n        raise NoJSONFileFoundError('No JSON files found!')\n    return json_files", "docstring": "Return a list of files to validate from `files`. If a member of `files`\nis a directory, its children with a ``.json`` extension will be added to\nthe return value.\n\nArgs:\nfiles: A list of file paths and/or directory paths.\nrecursive: If ``true``, this will descend into any subdirectories\nof input directories.\n\nReturns:\nA list of file paths to validate.", "source": "codesearchnet"}
{"code": "def generate_brome_config():\n    config = {}\n    for key in iter(default_config):\n        for (inner_key, value) in iter(default_config[key].items()):\n            if (key not in config):\n                config[key] = {}\n            config[key][inner_key] = value['default']\n    return config", "docstring": "Generate a brome config with default value\n\nReturns:\nconfig (dict)", "source": "codesearchnet"}
{"code": "def MultiDelete(self, urns, token=None):\n    urns = [rdfvalue.RDFURN(urn) for urn in urns]\n    if (token is None):\n        token = data_store.default_token\n    for urn in urns:\n        if (urn.Path() == '/'):\n            raise ValueError(\"Can't delete root URN. Please enter a valid URN\")\n    deletion_pool = DeletionPool(token=token)\n    deletion_pool.MultiMarkForDeletion(urns)\n    marked_root_urns = deletion_pool.root_urns_for_deletion\n    marked_urns = deletion_pool.urns_for_deletion\n    logging.debug(u'Found %d objects to remove when removing %s', len(marked_urns), urns)\n    logging.debug(u'Removing %d root objects when removing %s: %s', len(marked_root_urns), urns, marked_root_urns)\n    pool = data_store.DB.GetMutationPool()\n    for root in marked_root_urns:\n        self._DeleteChildFromIndex(root, mutation_pool=pool)\n    for urn_to_delete in marked_urns:\n        try:\n            self.intermediate_cache.ExpireObject(urn_to_delete.Path())\n        except KeyError:\n            pass\n    pool.DeleteSubjects(marked_urns)\n    pool.Flush()\n    self.Flush()\n    logging.debug('Removed %d objects', len(marked_urns))", "docstring": "Drop all the information about given objects.\n\nDANGEROUS! This recursively deletes all objects contained within the\nspecified URN.\n\nArgs:\nurns: Urns of objects to remove.\ntoken: The Security Token to use for opening this item.\n\nRaises:\nValueError: If one of the urns is too short. This is a safety check to\nensure the root is not removed.", "source": "codesearchnet"}
{"code": "def get_ax3d_fig_plt(ax=None, **kwargs):\n    import matplotlib.pyplot as plt\n    from mpl_toolkits.mplot3d import axes3d\n    if (ax is None):\n        fig = plt.figure(**kwargs)\n        ax = axes3d.Axes3D(fig)\n    else:\n        fig = plt.gcf()\n    return (ax, fig, plt)", "docstring": "Helper function used in plot functions supporting an optional Axes3D\nargument. If ax is None, we build the `matplotlib` figure and create the\nAxes3D else we return the current active figure.\n\nArgs:\nkwargs: keyword arguments are passed to plt.figure if ax is not None.\n\nReturns:\nax: :class:`Axes` object\nfigure: matplotlib figure\nplt: matplotlib pyplot module.", "source": "codesearchnet"}
{"code": "def AddContext(self, context_string, description=None):\n    if (context_string not in self.context):\n        if (context_string not in self.valid_contexts):\n            raise InvalidContextError(('Invalid context specified: %s' % context_string))\n        self.context.append(context_string)\n        self.context_descriptions[context_string] = description\n    self.FlushCache()", "docstring": "Adds a context string to the global configuration.\n\nThe context conveys information about the caller of the config system and\nallows the configuration to have specialized results for different callers.\n\nNote that the configuration file may specify conflicting options for\ndifferent contexts. In this case, later specified contexts (i.e. the later\nAddContext() calls) will trump the earlier specified contexts. This allows\nspecialized contexts to be specified on the command line which override\nnormal operating options.\n\nArgs:\ncontext_string: A string which describes the global program.\ndescription: A description as to when this context applies.\n\nRaises:\nInvalidContextError: An undefined context was specified.", "source": "codesearchnet"}
{"code": "def _build(self, one_hot_input_sequence):\n    input_shape = one_hot_input_sequence.get_shape()\n    batch_size = input_shape[1]\n    batch_embed_module = snt.BatchApply(self._embed_module)\n    input_sequence = batch_embed_module(one_hot_input_sequence)\n    input_sequence = tf.nn.relu(input_sequence)\n    initial_state = self._core.initial_state(batch_size)\n    if self._use_dynamic_rnn:\n        (output_sequence, final_state) = tf.nn.dynamic_rnn(cell=self._core, inputs=input_sequence, time_major=True, initial_state=initial_state)\n    else:\n        rnn_input_sequence = tf.unstack(input_sequence)\n        (output, final_state) = tf.contrib.rnn.static_rnn(cell=self._core, inputs=rnn_input_sequence, initial_state=initial_state)\n        output_sequence = tf.stack(output)\n    batch_output_module = snt.BatchApply(self._output_module)\n    output_sequence_logits = batch_output_module(output_sequence)\n    return (output_sequence_logits, final_state)", "docstring": "Builds the deep LSTM model sub-graph.\n\nArgs:\none_hot_input_sequence: A Tensor with the input sequence encoded as a\none-hot representation. Its dimensions should be `[truncation_length,\nbatch_size, output_size]`.\n\nReturns:\nTuple of the Tensor of output logits for the batch, with dimensions\n`[truncation_length, batch_size, output_size]`, and the\nfinal state of the unrolled core,.", "source": "codesearchnet"}
{"code": "def get_battery_info(self) -> dict:\n    (output, _) = self._execute('-s', self.device_sn, 'shell', 'dumpsys', 'battery')\n    battery_status = re.split('\\n  |: ', output[33:].strip())\n    return dict(zip(battery_status[::2], battery_status[1::2]))", "docstring": "Show device battery information.\n\nReturns:\nA dict. For example:\n\n{'AC powered': 'false',\n'Charge counter': '0',\n'Max charging current': '0',\n'Max charging voltage': '0',\n'USB powered': 'false',\n'Wireless powered': 'false',\n'health': '2',\n'level': '67',\n'present': 'true',\n'scale': '100',\n'status': '3',\n'technology': 'Li-poly',\n'temperature': '310',\n'voltage': '3965'}", "source": "codesearchnet"}
{"code": "def dict_load(self, ns_dict):\n        \n        for prefix, uri in ns_dict.items():\n            self.bind(prefix, uri, override=False, calc=False)\n        self.__make_dicts__", "docstring": "Reads a dictionary of namespaces and binds them to the manager\n\nArgs:\nns_dict: dictionary with the key as the prefix and the value\nas the uri", "source": "juraj-google-style"}
{"code": "def GetFormatterObject(cls, data_type):\n    data_type = data_type.lower()\n    if (data_type not in cls._formatter_objects):\n        formatter_object = None\n        if (data_type in cls._formatter_classes):\n            formatter_class = cls._formatter_classes[data_type]\n            formatter_object = formatter_class()\n        if (not formatter_object):\n            logger.warning('Using default formatter for data type: {0:s}'.format(data_type))\n            formatter_object = default.DefaultFormatter()\n        cls._formatter_objects[data_type] = formatter_object\n    return cls._formatter_objects[data_type]", "docstring": "Retrieves the formatter object for a specific data type.\n\nArgs:\ndata_type (str): data type.\n\nReturns:\nEventFormatter: corresponding formatter or the default formatter if\nnot available.", "source": "codesearchnet"}
{"code": "def execute(self, inputs=None, output=None, load_targets=False):\n        \n        if self == output:\n            if os.path.exists(self._dump_dirname):\n                shutil.rmtree(self._dump_dirname)\n            if os.path.exists(self._target_filename):\n                os.remove(self._target_filename)\n            os.makedirs(self._dump_dirname)\n\n        if inputs is None:\n            inputs = []\n\n        if not hasattr(self, 'result'):\n            if self in inputs or (load_targets and self.target):\n                logging.info('Loading\\n%s' % util.indent(str(self)))\n                self.load()\n            else:\n                for i in self.inputs:\n                    i.execute(inputs=inputs, output=output,\n                              load_targets=load_targets)\n\n                args = merge_results(self.inputs)\n                logging.info('Running\\n%s' % util.indent(str(self)))\n                self.result = self.run(*args.args, **args.kwargs)\n\n        if self == output:\n            logging.info('Dumping\\n%s' % util.indent(str(self)))\n            self.dump()\n            util.touch(self._target_filename)", "docstring": "Run this step, recursively running or loading inputs.\nUsed in bin/run_step.py which is run by drake.\nArgs:\ninputs: collection of steps that should be loaded\noutput: step that should be dumped after it is run\nload_targets (boolean): load all steps which are targets.\nThis argument is not used by run_step.py because target\ndoes not get serialized. But it can be useful for\nrunning steps directly.", "source": "juraj-google-style"}
{"code": "def resolve_import(self, item):\n        \n        name = item.name\n        \n        \n        short_name = None\n        if item.is_from and not item.is_star:\n            if '.' in name.lstrip('.'):\n                \n                rindex = name.rfind('.')\n            else:\n                \n                rindex = name.rfind('.') + 1\n            short_name = name[:rindex]\n\n        if import_finder.is_builtin(name):\n            filename = name + '.so'\n            return Builtin(filename, name)\n\n        filename, level = convert_to_path(name)\n        if level:\n            \n            \n            filename = os.path.normpath(\n                os.path.join(self.current_directory, filename))\n\n        files = [(name, filename)]\n        if short_name:\n            short_filename = os.path.dirname(filename)\n            files.append((short_name, short_filename))\n\n        for module_name, path in files:\n            for fs in self.fs_path:\n                f = self._find_file(fs, path)\n                if not f or f == self.current_module.path:\n                    \n                    continue\n                if item.is_relative():\n                    package_name = self.current_module.package_name\n                    if package_name is None:\n                        \n                        raise ImportException(name)\n                    module_name = get_absolute_name(package_name, module_name)\n                    if isinstance(self.current_module, System):\n                        return System(f, module_name)\n                return Local(f, module_name, fs)\n\n        \n        \n        if item.source:\n            prefix, ext = os.path.splitext(item.source)\n            mod_name = name\n            \n            if short_name:\n                mod = prefix.replace(os.path.sep, '.')\n                mod = utils.strip_suffix(mod, '.__init__')\n                if not mod.endswith(name) and mod.endswith(short_name):\n                    mod_name = short_name\n\n            if ext == '.pyc':\n                pyfile = prefix + '.py'\n                if os.path.exists(pyfile):\n                    return System(pyfile, mod_name)\n            elif not ext:\n                pyfile = os.path.join(prefix, \"__init__.py\")\n                if os.path.exists(pyfile):\n                    return System(pyfile, mod_name)\n            return System(item.source, mod_name)\n\n        raise ImportException(name)", "docstring": "Simulate how Python resolves imports.\n\nReturns the filename of the source file Python would load\nwhen processing a statement like 'import name' in the module\nwe're currently under.\n\nArgs:\nitem: An instance of ImportItem\n\nReturns:\nA filename\n\nRaises:\nImportException: If the module doesn't exist.", "source": "juraj-google-style"}
{"code": "def can_process_matrix(entry, matrix_tags):\n        \n        if len(matrix_tags) == 0:\n            return True\n\n        count = 0\n        if 'tags' in entry:\n            for tag in matrix_tags:\n                if tag in entry['tags']:\n                    count += 1\n\n        return count > 0", "docstring": "Check given matrix tags to be in the given list of matric tags.\n\nArgs:\nentry (dict): matrix item (in yaml).\nmatrix_tags (list): represents --matrix-tags defined by user in command line.\nReturns:\nbool: True when matrix entry can be processed.", "source": "juraj-google-style"}
{"code": "def load_scout(adapter, config, ped=None, update=False):\n    log.info('Check that the panels exists')\n    if (not check_panels(adapter, config.get('gene_panels', []), config.get('default_gene_panels'))):\n        raise ConfigError('Some panel(s) does not exist in the database')\n    case_obj = adapter.load_case(config, update=update)\n    return case_obj", "docstring": "Load a new case from a Scout config.\n\nArgs:\nadapter(MongoAdapter)\nconfig(dict): loading info\nped(Iterable(str)): Pedigree ingformation\nupdate(bool): If existing case should be updated", "source": "codesearchnet"}
{"code": "def trace_on(graph=True, profiler=False, profiler_outdir=None):\n    if ops.inside_function():\n        logging.warn('Cannot enable trace inside a tf.function.')\n        return\n    if not context.executing_eagerly():\n        logging.warn('Must enable trace in eager mode.')\n        return\n    global _current_trace_context\n    with _current_trace_context_lock:\n        if _current_trace_context:\n            logging.warn('Trace already enabled')\n            return\n        if graph and (not profiler):\n            context.context().enable_graph_collection()\n        if profiler:\n            if profiler_outdir is None:\n                logging.warn(\"No `profiler_outdir` passed to trace_on(). Profiler won't be enabled.\")\n            else:\n                context.context().enable_run_metadata()\n                _profiler.start(profiler_outdir)\n        _current_trace_context = _TraceContext(graph=graph, profiler=profiler)", "docstring": "Starts a trace to record computation graphs and profiling information.\n\nMust be invoked in eager mode.\n\nWhen enabled, TensorFlow runtime will collect information that can later be\nexported and consumed by TensorBoard. The trace is activated across the entire\nTensorFlow runtime and affects all threads of execution.\n\nTo stop the trace and export the collected information, use\n`tf.summary.trace_export`. To stop the trace without exporting, use\n`tf.summary.trace_off`.\n\nArgs:\ngraph: If True, enables collection of executed graphs. It includes ones from\ntf.function invocation and ones from the legacy graph mode. The default is\nTrue.\nprofiler: If True, enables the advanced profiler. Enabling profiler\nimplicitly enables the graph collection. The profiler may incur a high\nmemory overhead. The default is False.\nprofiler_outdir: Output directory for profiler. It is required when profiler\nis enabled when trace was started. Otherwise, it is ignored.", "source": "github-repos"}
{"code": "def getOrderedLinks(self, session):\n        \n        streamLinks = session.query(StreamLink).\\\n                            filter(StreamLink.channelInputFile == self).\\\n                            order_by(StreamLink.linkNumber).\\\n                            all()\n\n        return streamLinks", "docstring": "Retrieve the links in the order of the link number.\n\nArgs:\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.\n\nReturns:\nlist: A list of :class:`.StreamLink` objects.", "source": "juraj-google-style"}
{"code": "def parsed_to_ast(parsed: Parsed, errors: Errors, component_type: str=''):\n    ast = {}\n    sorted_keys = sorted(parsed.keys())\n    for key in sorted_keys:\n        if (parsed[key]['type'] == 'Nested'):\n            nested_component_stack = ['subject', 'object']\n    if component_type:\n        component_stack = [component_type]\n    else:\n        component_stack = ['subject', 'object']\n    for key in sorted_keys:\n        if ((parsed[key]['type'] == 'Function') and (parsed[key]['function_level'] == 'top')):\n            ast[component_stack.pop(0)] = parsed_function_to_ast(parsed, key)\n        elif ((parsed[key]['type'] == 'Relation') and ('relation' not in ast)):\n            ast['relation'] = {'name': parsed[key]['name'], 'type': 'Relation', 'span': key}\n        elif (parsed[key]['type'] == 'Nested'):\n            ast['nested'] = {}\n            for nested_key in sorted_keys:\n                if (nested_key <= key):\n                    continue\n                if ((parsed[nested_key]['type'] == 'Function') and (parsed[nested_key]['function_level'] == 'top')):\n                    ast['nested'][nested_component_stack.pop(0)] = parsed_function_to_ast(parsed, nested_key)\n                elif ((parsed[nested_key]['type'] == 'Relation') and ('relation' not in ast['nested'])):\n                    ast['nested']['relation'] = {'name': parsed[nested_key]['name'], 'type': 'Relation', 'span': parsed[nested_key]['span']}\n            return (ast, errors)\n    return (ast, errors)", "docstring": "Convert parsed data struct to AST dictionary\n\nArgs:\nparsed:\nerrors:\ncomponent_type: Empty string or 'subject' or 'object' to indicate that we\nare parsing the subject or object field input", "source": "codesearchnet"}
{"code": "def _make_pred_succ_maps(self, node):\n        \n\n        pred_map = {e[2]['wire']: e[0] for e in\n                    self._multi_graph.in_edges(nbunch=node, data=True)}\n        succ_map = {e[2]['wire']: e[1] for e in\n                    self._multi_graph.out_edges(nbunch=node, data=True)}\n        return pred_map, succ_map", "docstring": "Return predecessor and successor dictionaries.\n\nArgs:\nnode (DAGNode): reference to multi_graph node\n\nReturns:\ntuple(dict): tuple(predecessor_map, successor_map)\nThese map from wire (Register, int) to predecessor (successor)\nnodes of n.", "source": "juraj-google-style"}
{"code": "def find_elb_dns_zone_id(name='', env='dev', region='us-east-1'):\n    \n    LOG.info('Find %s ELB DNS Zone ID in %s [%s].', name, env, region)\n    client = boto3.Session(profile_name=env).client('elb', region_name=region)\n    elbs = client.describe_load_balancers(LoadBalancerNames=[name])\n    return elbs['LoadBalancerDescriptions'][0]['CanonicalHostedZoneNameID']", "docstring": "Get an application's AWS elb dns zone id.\n\nArgs:\nname (str): ELB name\nenv (str): Environment/account of ELB\nregion (str): AWS Region\n\nReturns:\nstr: elb DNS zone ID", "source": "juraj-google-style"}
{"code": "def browse_stations_categories(self):\n    response = self._call(mc_calls.BrowseStationCategories)\n    station_categories = response.body.get('root', {}).get('subcategories', [])\n    return station_categories", "docstring": "Get the categories from Browse Stations.\n\nReturns:\nlist: Station categories that can contain subcategories.", "source": "codesearchnet"}
{"code": "def get_install_value(self, value_name, wanted_type=None):\n        \n        try:\n            item_value, item_type = self.__reg_query_value(self.__reg_uninstall_handle, value_name)\n        except pywintypes.error as exc:  \n            if exc.winerror == winerror.ERROR_FILE_NOT_FOUND:\n                \n                return None\n            raise\n\n        if wanted_type and item_type not in self.__reg_types[wanted_type]:\n            item_value = None\n\n        return item_value", "docstring": "For the uninstall section of the registry return the name value.\n\nArgs:\nvalue_name (str): Registry value name.\nwanted_type (str):\nThe type of value wanted if the type does not match\nNone is return. wanted_type support values are\n``str`` ``int`` ``list`` ``bytes``.\n\nReturns:\nvalue: Value requested or None if not found.", "source": "juraj-google-style"}
{"code": "def blocking_reader(reader, input, buffer_size=_DEFAULT_BUFFER_SIZE):\n    ion_event = None\n    while True:\n        read_event = (yield ion_event)\n        ion_event = reader.send(read_event)\n        while ((ion_event is not None) and ion_event.event_type.is_stream_signal):\n            data = input.read(buffer_size)\n            if (len(data) == 0):\n                if (ion_event.event_type is IonEventType.INCOMPLETE):\n                    ion_event = reader.send(NEXT_EVENT)\n                    continue\n                else:\n                    (yield ION_STREAM_END_EVENT)\n                    return\n            ion_event = reader.send(read_data_event(data))", "docstring": "Provides an implementation of using the reader co-routine with a file-like object.\n\nArgs:\nreader(Coroutine): A reader co-routine.\ninput(BaseIO): The file-like object to read from.\nbuffer_size(Optional[int]): The optional buffer size to use.", "source": "codesearchnet"}
{"code": "def iter_non_intersecting(self, iterable, key=None, descending=False):\n    return _ContainsVersionIterator(self, iterable, key, descending, mode=_ContainsVersionIterator.MODE_NON_INTERSECTING)", "docstring": "Like `iter_intersect_test`, but returns non-intersections only.\n\nReturns:\nAn iterator that returns items from `iterable` that don't intersect.", "source": "codesearchnet"}
{"code": "def download_uniprot_file(uniprot_id, filetype, outdir='', force_rerun=False):\n    \n\n    my_file = '{}.{}'.format(uniprot_id, filetype)\n    url = 'http:\n    outfile = op.join(outdir, my_file)\n\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n        urlretrieve(url, outfile)\n\n    return outfile", "docstring": "Download a UniProt file for a UniProt ID/ACC\n\nArgs:\nuniprot_id: Valid UniProt ID\nfiletype: txt, fasta, xml, rdf, or gff\noutdir: Directory to download the file\n\nReturns:\nstr: Absolute path to file", "source": "juraj-google-style"}
{"code": "def _peek(self, size=-1):\n        \n        with self._seek_lock:\n            seek = self._seek\n        with handle_os_exceptions():\n            return self._read_range(seek, seek + size)", "docstring": "Return bytes from the stream without advancing the position.\n\nArgs:\nsize (int): Number of bytes to read. -1 to read the full\nstream.\n\nReturns:\nbytes: bytes read", "source": "juraj-google-style"}
{"code": "def _ParseUpdateKeyValue(self, parser_mediator, registry_value, key_path):\n    \n    if not registry_value.DataIsString():\n      parser_mediator.ProduceExtractionWarning(\n          'unsupported UpdateKey value data type: {0:s}'.format(\n              registry_value.data_type_string))\n      return\n\n    date_time_string = registry_value.GetDataAsObject()\n    if not date_time_string:\n      parser_mediator.ProduceExtractionWarning('missing UpdateKey value data')\n      return\n\n    re_match = self._UPDATE_DATE_TIME_RE.match(date_time_string)\n    if not re_match:\n      parser_mediator.ProduceExtractionWarning(\n          'unsupported UpdateKey value data: {0!s}'.format(date_time_string))\n      return\n\n    month, day_of_month, year, hours, minutes, seconds, part_of_day = (\n        re_match.groups())\n\n    try:\n      year = int(year, 10)\n      month = int(month, 10)\n      day_of_month = int(day_of_month, 10)\n      hours = int(hours, 10)\n      minutes = int(minutes, 10)\n      seconds = int(seconds, 10)\n    except (TypeError, ValueError):\n      parser_mediator.ProduceExtractionWarning(\n          'invalid UpdateKey date time value: {0!s}'.format(date_time_string))\n      return\n\n    if part_of_day == 'PM':\n      hours += 12\n\n    time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)\n\n    try:\n      date_time = dfdatetime_time_elements.TimeElements(\n          time_elements_tuple=time_elements_tuple)\n      date_time.is_local_time = True\n    except ValueError:\n      parser_mediator.ProduceExtractionWarning(\n          'invalid UpdateKey date time value: {0!s}'.format(\n              time_elements_tuple))\n      return\n\n    event_data = CCleanerUpdateEventData()\n    event_data.key_path = key_path\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_UPDATE,\n        time_zone=parser_mediator.timezone)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses the UpdateKey value.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_value (dfwinreg.WinRegistryValue): Windows Registry value.\nkey_path (str): Windows Registry key path.", "source": "juraj-google-style"}
{"code": "def gunzip_file(infile, outfile=None, outdir=None, delete_original=False, force_rerun_flag=False):\n    \n    if not outfile:\n        outfile = infile.replace('.gz', '')\n\n    if not outdir:\n        outdir = ''\n    else:\n        outdir = op.dirname(infile)\n    outfile = op.join(outdir, op.basename(outfile))\n\n    if force_rerun(flag=force_rerun_flag, outfile=outfile):\n        gz = gzip.open(infile, \"rb\")\n        decoded = gz.read()\n\n        with open(outfile, \"wb\") as new_file:\n            new_file.write(decoded)\n\n        gz.close()\n        log.debug('{}: file unzipped'.format(outfile))\n    else:\n        log.debug('{}: file already unzipped'.format(outfile))\n\n    if delete_original:\n        os.remove(infile)\n\n    return outfile", "docstring": "Decompress a gzip file and optionally set output values.\n\nArgs:\ninfile: Path to .gz file\noutfile: Name of output file\noutdir: Path to output directory\ndelete_original: If original .gz file should be deleted\nforce_rerun_flag: If file should be decompressed if outfile already exists\n\nReturns:\nstr: Path to decompressed file", "source": "juraj-google-style"}
{"code": "def FileEntryExistsByPathSpec(self, path_spec):\n    \n    \n    fsapfs_file_entry = None\n    location = getattr(path_spec, 'location', None)\n    identifier = getattr(path_spec, 'identifier', None)\n\n    try:\n      if identifier is not None:\n        fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_identifier(\n            identifier)\n      elif location is not None:\n        fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_path(location)\n\n    except IOError as exception:\n      raise errors.BackEndError(exception)\n\n    return fsapfs_file_entry is not None", "docstring": "Determines if a file entry for a path specification exists.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nbool: True if the file entry exists.\n\nRaises:\nBackEndError: if the file entry cannot be opened.", "source": "juraj-google-style"}
{"code": "def __init__(self, file_pattern, min_bundle_size, compression_type, strip_trailing_newlines, coder: coders.Coder, buffer_size=DEFAULT_READ_BUFFER_SIZE, validate=True, skip_header_lines=0, header_processor_fns=(None, None), delimiter=None, escapechar=None):\n    super().__init__(file_pattern, min_bundle_size, compression_type=compression_type, validate=validate)\n    self._strip_trailing_newlines = strip_trailing_newlines\n    self._compression_type = compression_type\n    self._coder = coder\n    self._buffer_size = buffer_size\n    if skip_header_lines < 0:\n        raise ValueError('Cannot skip negative number of header lines: %d' % skip_header_lines)\n    elif skip_header_lines > 10:\n        _LOGGER.warning('Skipping %d header lines. Skipping large number of header lines might significantly slow down processing.')\n    self._skip_header_lines = skip_header_lines\n    self._header_matcher, self._header_processor = header_processor_fns\n    if delimiter is not None:\n        if not isinstance(delimiter, bytes) or len(delimiter) == 0:\n            raise ValueError('Delimiter must be a non-empty bytes sequence.')\n        if self._is_self_overlapping(delimiter):\n            raise ValueError('Delimiter must not self-overlap.')\n    self._delimiter = delimiter\n    if escapechar is not None:\n        if not (isinstance(escapechar, bytes) and len(escapechar) == 1):\n            raise ValueError(\"escapechar must be bytes of size 1: '%s'\" % escapechar)\n    self._escapechar = escapechar", "docstring": "Initialize a _TextSource\n\nArgs:\nheader_processor_fns (tuple): a tuple of a `header_matcher` function\nand a `header_processor` function. The `header_matcher` should\nreturn `True` for all lines at the start of the file that are part\nof the file header and `False` otherwise. These header lines will\nnot be yielded when reading records and instead passed into\n`header_processor` to be handled. If `skip_header_lines` and a\n`header_matcher` are both provided, the value of `skip_header_lines`\nlines will be skipped and the header will be processed from\nthere.\ndelimiter (bytes) Optional: delimiter to split records.\nMust not self-overlap, because self-overlapping delimiters cause\nambiguous parsing.\nescapechar (bytes) Optional: a single byte to escape the records\ndelimiter, can also escape itself.\nRaises:\nValueError: if skip_lines is negative.\n\nPlease refer to documentation in class `ReadFromText` for the rest\nof the arguments.", "source": "github-repos"}
{"code": "def add_role(self, databaseName, roleName, collectionName=None):\n        \n        role = {\"databaseName\" : databaseName,\n                \"roleName\" : roleName}\n        \n        if collectionName:\n            role[\"collectionName\"] = collectionName\n        \n        \n        if collectionName and roleName not in [RoleSpecs.read, RoleSpecs.readWrite]:\n            raise ErrRole(\"Permissions [%s] not available for a collection\" % roleName)\n        elif not collectionName and roleName not in [RoleSpecs.read, RoleSpecs.readWrite, RoleSpecs.dbAdmin] and databaseName != \"admin\":\n            raise ErrRole(\"Permissions [%s] is only available for admin database\" % roleName)\n        \n        if role not in self.roles:\n            self.roles.append(role)", "docstring": "Add one role\n\nArgs:\ndatabaseName (str): Database Name\nroleName (RoleSpecs): role\n\nKeyword Args:\ncollectionName (str): Collection\n\nRaises:\nErrRole: role not compatible with the databaseName and/or collectionName", "source": "juraj-google-style"}
{"code": "def dict_diff(d1: Dict[(Any, Any)], d2: Dict[(Any, Any)], deleted_value: Any=None) -> Dict[(Any, Any)]:\n    changes = {k: v for (k, v) in d2.items() if ((k not in d1) or (d2[k] != d1[k]))}\n    for k in d1.keys():\n        if (k not in d2):\n            changes[k] = deleted_value\n    return changes", "docstring": "Returns a representation of the changes that need to be made to ``d1`` to\ncreate ``d2``.\n\nArgs:\nd1: a dictionary\nd2: another dictionary\ndeleted_value: value to use for deleted keys; see below\n\nReturns:\ndict: a dictionary of the format ``{k: v}`` where the ``k``/``v`` pairs\nare key/value pairs that are absent from ``d1`` and present in ``d2``,\nor present in both but with different values (in which case the ``d2``\nvalue is shown). If a key ``k`` is present in ``d1`` but absent in\n``d2``, the result dictionary has the entry ``{k: deleted_value}``.", "source": "codesearchnet"}
{"code": "def get_splits(self, n_splits=1):\n    if (n_splits == 1):\n        stratify = (self.target if self._stratify else None)\n        return train_test_split(self.data, self.target, shuffle=self._shuffle, stratify=stratify)\n    else:\n        cv_class = (StratifiedKFold if self._stratify else KFold)\n        cv = cv_class(n_splits=n_splits, shuffle=self._shuffle)\n        splits = list()\n        for (train, test) in cv.split(self.data, self.target):\n            X_train = self._get_split(self.data, train)\n            y_train = self._get_split(self.target, train)\n            X_test = self._get_split(self.data, test)\n            y_test = self._get_split(self.target, test)\n            splits.append((X_train, X_test, y_train, y_test))\n        return splits", "docstring": "Return splits of this dataset ready for Cross Validation.\n\nIf n_splits is 1, a tuple containing the X for train and test\nand the y for train and test is returned.\nOtherwise, if n_splits is bigger than 1, a list of such tuples\nis returned, one for each split.\n\nArgs:\nn_splits (int): Number of times that the data needs to be splitted.\n\nReturns:\ntuple or list:\nif n_splits is 1, a tuple containing the X for train and test\nand the y for train and test is returned.\nOtherwise, if n_splits is bigger than 1, a list of such tuples\nis returned, one for each split.", "source": "codesearchnet"}
{"code": "def delete(self, name, **kwargs):\n    self.gitlab.http_delete(self.path, query_data={'name': name}, **kwargs)", "docstring": "Delete a Label on the server.\n\nArgs:\nname: The name of the label\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabDeleteError: If the server cannot perform the request", "source": "codesearchnet"}
{"code": "def searchPageFor(doc, pno, text, hit_max=16, quads=False):\n    \n\n    return doc[pno].searchFor(text, hit_max = hit_max, quads = quads)", "docstring": "Search for a string on a page.\n\nArgs:\npno: page number\ntext: string to be searched for\nhit_max: maximum hits\nquads: return quads instead of rectangles\nReturns:\na list of rectangles or quads, each containing an occurrence.", "source": "juraj-google-style"}
{"code": "def set_auth_traps_enabled(status=True):\n    \n    vname = 'EnableAuthenticationTraps'\n    current_status = get_auth_traps_enabled()\n\n    if bool(status) == current_status:\n        _LOG.debug('%s already contains the provided value.', vname)\n        return True\n\n    vdata = int(status)\n    __utils__['reg.set_value'](_HKEY, _SNMP_KEY, vname, vdata, 'REG_DWORD')\n\n    new_status = get_auth_traps_enabled()\n\n    if status == new_status:\n        _LOG.debug('Setting %s configured successfully: %s', vname, vdata)\n        return True\n    _LOG.error('Unable to configure %s with value: %s', vname, vdata)\n    return False", "docstring": "Manage the sending of authentication traps.\n\nArgs:\nstatus (bool): True to enable traps. False to disable.\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_snmp.set_auth_traps_enabled status='True'", "source": "juraj-google-style"}
{"code": "def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):\n    if self.assistant_model.generation_config.num_assistant_tokens_schedule in {'heuristic', 'heuristic_transient'}:\n        if num_matches == len(scores[0]) - 1:\n            self.num_assistant_tokens += 2.0\n        else:\n            self.num_assistant_tokens = max(1.0, self.num_assistant_tokens - 1.0)\n    if is_sklearn_available() and self.assistant_model.generation_config.assistant_confidence_threshold and (type(self) is AssistedCandidateGenerator):\n        self.matches.extend([1] * num_matches)\n        if len(self.probs) > len(self.matches):\n            self.matches.append(0)\n        excess_length = len(self.probs) - len(self.matches)\n        if excess_length > 0:\n            del self.probs[-excess_length:]\n        if len(self.probs) > 5 and {0, 1}.issubset(self.matches):\n            fpr, tpr, thresholds = roc_curve(self.matches, self.probs)\n            fnr = 1 - tpr\n            costs = fpr + 3 * fnr\n            optimal_threshold_index = np.argmin(costs)\n            best_threshold = thresholds[optimal_threshold_index]\n            self.assistant_model.generation_config.assistant_confidence_threshold = best_threshold", "docstring": "Updates the candidate generation strategy based on the outcomes.\n\nArgs:\ninput_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\nIndices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)\nscores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):\nPrediction scores of a language modeling head. These can be logits for each vocabulary when not using\nbeam search or log softmax for each vocabulary token when using beam search\nnum_matches (`int`):\nThe number of matches between the candidate sequences and the model predictions.", "source": "github-repos"}
{"code": "def suggest(q='', results=15, buckets=None, limit=False, max_familiarity=None, min_familiarity=None, max_hotttnesss=None, min_hotttnesss=None):\n    buckets = (buckets or [])\n    kwargs = {}\n    kwargs['q'] = q\n    if (max_familiarity is not None):\n        kwargs['max_familiarity'] = max_familiarity\n    if (min_familiarity is not None):\n        kwargs['min_familiarity'] = min_familiarity\n    if (max_hotttnesss is not None):\n        kwargs['max_hotttnesss'] = max_hotttnesss\n    if (min_hotttnesss is not None):\n        kwargs['min_hotttnesss'] = min_hotttnesss\n    if results:\n        kwargs['results'] = results\n    if buckets:\n        kwargs['bucket'] = buckets\n    if limit:\n        kwargs['limit'] = 'true'\n    result = util.callm(('%s/%s' % ('artist', 'suggest')), kwargs)\n    return [Artist(**util.fix(a_dict)) for a_dict in result['response']['artists']]", "docstring": "Suggest artists based upon partial names.\n\nArgs:\n\nKwargs:\nq (str): The text to suggest artists from\n\nresults (int): An integer number of results to return\n\nbuckets (list): A list of strings specifying which buckets to retrieve\n\nlimit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets\n\nmax_familiarity (float): A float specifying the max familiarity of artists to search for\n\nmin_familiarity (float): A float specifying the min familiarity of artists to search for\n\nmax_hotttnesss (float): A float specifying the max hotttnesss of artists to search for\n\nmin_hotttnesss (float): A float specifying the max hotttnesss of artists to search for\n\nReturns:\nA list of Artist objects\n\nExample:\n\n>>> results = artist.suggest(text='rad')\n>>> results\n\n>>>", "source": "codesearchnet"}
{"code": "def param_shapes(cls, sample_shape, name='DistributionParamShapes'):\n    with ops.name_scope(name, values=[sample_shape]):\n        return cls._param_shapes(sample_shape)", "docstring": "Shapes of parameters given the desired shape of a call to `sample()`.\n\nThis is a class method that describes what key/value arguments are required\nto instantiate the given `Distribution` so that a particular shape is\nreturned for that instance's call to `sample()`.\n\nSubclasses should override class method `_param_shapes`.\n\nArgs:\nsample_shape: `Tensor` or python list/tuple. Desired shape of a call to\n`sample()`.\nname: name to prepend ops with.\n\nReturns:\n`dict` of parameter name to `Tensor` shapes.", "source": "github-repos"}
{"code": "def accepts(self, tp, converter):\n    tp = ParameterizedProperty._validate_type_param(tp)\n    self.alternatives.append((tp, converter))\n    return self", "docstring": "Declare that other types may be converted to this property type.\n\nArgs:\ntp (Property) :\nA type that may be converted automatically to this property\ntype.\n\nconverter (callable) :\nA function accepting ``value`` to perform conversion of the\nvalue to this property type.\n\nReturns:\nself", "source": "codesearchnet"}
{"code": "def add_team_member(self, account_id=None, email_address=None):\n        \n        return self._add_remove_team_member(self.TEAM_ADD_MEMBER_URL, email_address, account_id)", "docstring": "Add or invite a user to your Team\n\nArgs:\n\naccount_id (str):       The id of the account of the user to invite to your team.\n\nemail_address (str):    The email address of the account to invite to your team. The account id prevails if both account_id and email_address are provided.\n\nReturns:\nA Team object", "source": "juraj-google-style"}
{"code": "def set(self, section, option, value=None):\n        \n        try:\n            section = self.__getitem__(section)\n        except KeyError:\n            raise NoSectionError(section) from None\n        option = self.optionxform(option)\n        if option in section:\n            section[option].value = value\n        else:\n            section[option] = value\n        return self", "docstring": "Set an option.\n\nArgs:\nsection (str): section name\noption (str): option name\nvalue (str): value, default None", "source": "juraj-google-style"}
{"code": "def diffs_prof(step):\n    \n    diff, rad = diff_prof(step)\n    return _scale_prof(step, diff, rad), rad", "docstring": "Scaled diffusion.\n\nThis computation takes sphericity into account if necessary.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nReturns:\ntuple of :class:`numpy.array`: the diffusion and the radial position\nat which it is evaluated.", "source": "juraj-google-style"}
{"code": "def from_dict(cls, copula_dict):\n    instance = cls(copula_dict['copula_type'])\n    instance.theta = copula_dict['theta']\n    instance.tau = copula_dict['tau']\n    return instance", "docstring": "Create a new instance from the given parameters.\n\nArgs:\ncopula_dict: `dict` with the parameters to replicate the copula.\nLike the output of `Bivariate.to_dict`\n\nReturns:\nBivariate: Instance of the copula defined on the parameters.", "source": "codesearchnet"}
{"code": "def buckets_list(self, projection='noAcl', max_results=0, page_token=None, project_id=None):\n    \n    if max_results == 0:\n      max_results = Api._MAX_RESULTS\n\n    args = {'project': project_id if project_id else self._project_id, 'maxResults': max_results}\n    if projection is not None:\n      args['projection'] = projection\n    if page_token is not None:\n      args['pageToken'] = page_token\n\n    url = Api._ENDPOINT + (Api._BUCKET_PATH % '')\n    return google.datalab.utils.Http.request(url, args=args, credentials=self._credentials)", "docstring": "Issues a request to retrieve the list of buckets.\n\nArgs:\nprojection: the projection of the bucket information to retrieve.\nmax_results: an optional maximum number of objects to retrieve.\npage_token: an optional token to continue the retrieval.\nproject_id: the project whose buckets should be listed.\nReturns:\nA parsed list of bucket information dictionaries.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"}
{"code": "def _get_encoded_length(audio_length, kernel_sizes=None, strides=None, dilations=None, use_causal_conv=None):\n    cur_length = audio_length\n    if kernel_sizes is None or strides is None or dilations is None or (use_causal_conv is None):\n        return cur_length\n    for kernel_size, stride, dilation in zip(kernel_sizes, strides, dilations):\n        effective_kernel_size = (kernel_size - 1) * dilation + 1\n        padding_total = kernel_size - stride\n        padding_right = padding_total \n        padding_left = padding_total - padding_right\n        n_frames = (cur_length - effective_kernel_size + padding_total) / stride + 1\n        n_frames = math.ceil(n_frames) - 1\n        ideal_length = n_frames * stride + kernel_size - padding_total\n        extra_padding = ideal_length - cur_length\n        if use_causal_conv:\n            padding_left = padding_total\n            padding_right = extra_padding\n        else:\n            padding_left = padding_left\n            padding_right = padding_right + extra_padding\n        cur_length = cur_length + padding_left + padding_right\n        cur_length = (cur_length - dilation * (kernel_size - 1) - 1) \n    return cur_length", "docstring": "Compute the length of the encoded audio sequence.\n\nArgs:\naudio_length (int): The length of the audio sequence.\nkernel_sizes (List[int]): The kernel sizes for the convolutional layers.\nstrides (List[int]): The strides for the convolutional layers.\nuse_causal_conv (bool): Whether to use causal convolutions.", "source": "github-repos"}
{"code": "def _set_xla_sharding(self, xla_sharding):\n    if self._variable_read and (not context.executing_eagerly()):\n        logging.warning(\"This variable (%s) has already been read (ie. a ReadVariableOp has already been generated) and a new XlaShardingOp using this sharding will not be created unless it is read again. If that's not possible, please set the XLA sharding before reading the variable.\", self.name)\n    self._xla_sharding = xla_sharding", "docstring": "Annotates this `ResourceVariable` with `xla_sharding`.\n\n`xla_sharding` will be used to create an `XlaShardingOp` whenever a\n`ReadVariableOp` is created.\n\nArgs:\nxla_sharding: The xla.OpSharding proto to annotate this ResourceVariable\nwith.", "source": "github-repos"}
{"code": "def reload_data(self):\n    db.session.rollback()\n    self.__data = {}\n    try:\n        for ns in db.ConfigNamespace.all():\n            self.__data[ns.namespace_prefix] = {x.key: x.value for x in ns.config_items}\n    except SQLAlchemyError as ex:\n        if (str(ex).find('1146') != (- 1)):\n            pass", "docstring": "Reloads the configuration from the database\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def validate(self, size):\n    msg = 'scale and array size must match, but were scale: {self.scale.n_bands},  array size: {size}'\n    if (size != len(self.scale)):\n        raise ValueError(msg.format(**locals()))", "docstring": "Ensure that the size of the dimension matches the number of bands in the\nscale\n\nRaises:\nValueError: when the dimension size and number of bands don't match", "source": "codesearchnet"}
{"code": "def _ip_unnumbered_type(self, **kwargs):\n    method_name = ('interface_%s_ip_ip_config_unnumbered_ip_donor_interface_type' % kwargs['int_type'])\n    ip_unnumbered_type = getattr(self._interface, method_name)\n    config = ip_unnumbered_type(**kwargs)\n    if kwargs['delete']:\n        tag = 'ip-donor-interface-type'\n        config.find(('.\n    return config", "docstring": "Return the `ip unnumbered` donor type XML.\n\nYou should not use this method.\nYou probably want `Interface.ip_unnumbered`.\n\nArgs:\nint_type (str): Type of interface. (gigabitethernet,\ntengigabitethernet etc).\ndelete (bool): Remove the configuration if ``True``.\nip_donor_interface_type (str): The donor interface type (loopback)\n\nReturns:\nXML to be passed to the switch.\n\nRaises:\nNone", "source": "codesearchnet"}
{"code": "def _GetKeys(self, data, keys, depth=1):\n    keys = set(keys)\n    match = {}\n    if (depth == 1):\n        for key in keys:\n            match[key] = data[key]\n    else:\n        for (_, parsed_key, parsed_value) in self._RecurseKey(data, depth=depth):\n            if (parsed_key in keys):\n                match[parsed_key] = parsed_value\n                if (set(match.keys()) == keys):\n                    return match\n    return match", "docstring": "Helper function to return keys nested in a bencode dict.\n\nBy default this function will return the values for the named keys requested\nby a plugin in match{}. The default setting is to look a single layer down\nfrom the root (same as the check for plugin applicability). This level is\nsuitable for most cases.\n\nFor cases where there is variability in the name at the first level\n(e.g. it is the MAC addresses of a device, or a UUID) it is possible to\noverride the depth limit and use _GetKeys to fetch from a deeper level.\n\nArgs:\ndata (dict[str, object]): bencode data values.\nkeys (list[str]): keys that should be returned.\ndepth (int): how many levels deep to check for a match.\n\nReturns:\ndict[str, object]: a dictionary with just the keys requested.", "source": "codesearchnet"}
{"code": "def wait_for_prompt(self, timeout_s=None):\n    with self._cond:\n        if self._prompt:\n            if (timeout_s is None):\n                self._cond.wait(((3600 * 24) * 365))\n            else:\n                self._cond.wait(timeout_s)\n        if (self._response is None):\n            raise PromptUnansweredError\n        return self._response", "docstring": "Wait for the user to respond to the current prompt.\n\nArgs:\ntimeout_s: Seconds to wait before raising a PromptUnansweredError.\n\nReturns:\nA string response, or the empty string if text_input was False.\n\nRaises:\nPromptUnansweredError: Timed out waiting for the user to respond.", "source": "codesearchnet"}
{"code": "def cho_solve(L, b):\n    from scipy.linalg import cho_solve as sp_cho_solve\n    L = asarray(L, float)\n    b = asarray(b, float)\n    if (L.size == 0):\n        if (b.size != 0):\n            raise ValueError('Dimension mismatch between L and b.')\n        return empty(b.shape)\n    return sp_cho_solve((L, True), b, check_finite=False)", "docstring": "r\"\"\"Solve for Cholesky decomposition.\n\nSolve the linear equations :math:`\\mathrm A \\mathbf x = \\mathbf b`,\ngiven the Cholesky factorization of :math:`\\mathrm A`.\n\nArgs:\nL (array_like): Lower triangular matrix.\nb (array_like): Right-hand side.\n\nReturns:\n:class:`numpy.ndarray`: The solution to the system\n:math:`\\mathrm A \\mathbf x = \\mathbf b`.\n\nSee Also\n--------\nnumpy.linalg.cholesky : Cholesky decomposition.\nscipy.linalg.cho_solve : Solve linear equations given Cholesky\nfactorization.", "source": "codesearchnet"}
{"code": "def create_exponential(num_finite_buckets, growth_factor, scale):\n    \n    if num_finite_buckets <= 0:\n        raise ValueError(_BAD_NUM_FINITE_BUCKETS)\n    if growth_factor <= 1.0:\n        raise ValueError(_BAD_FLOAT_ARG % (u'growth factor', 1.0))\n    if scale <= 0.0:\n        raise ValueError(_BAD_FLOAT_ARG % (u'scale', 0.0))\n    return sc_messages.Distribution(\n        bucketCounts=[0] * (num_finite_buckets + 2),\n        exponentialBuckets=sc_messages.ExponentialBuckets(\n            numFiniteBuckets=num_finite_buckets,\n            growthFactor=growth_factor,\n            scale=scale))", "docstring": "Creates a new instance of distribution with exponential buckets\n\nArgs:\nnum_finite_buckets (int): initializes number of finite buckets\ngrowth_factor (float): initializes the growth factor\nscale (float): initializes the scale\n\nReturn:\n:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`\n\nRaises:\nValueError: if the args are invalid for creating an instance", "source": "juraj-google-style"}
{"code": "def read_value(self):\n    with ops.name_scope('Read'):\n        value = self._read_variable_op()\n    return array_ops.identity(value)", "docstring": "Constructs an op which reads the value of this variable.\n\nShould be used when there are multiple reads, or when it is desirable to\nread the value only after some condition is true.\n\nReturns:\nThe value of the variable.", "source": "github-repos"}
{"code": "def parse_indices(indices_string):\n    indices_string = re.sub('\\\\s+', '', indices_string)\n    if indices_string.startswith('[') and indices_string.endswith(']'):\n        indices_string = indices_string[1:-1]\n    return [int(element) for element in indices_string.split(',')]", "docstring": "Parse a string representing indices.\n\nFor example, if the input is \"[1, 2, 3]\", the return value will be a list of\nindices: [1, 2, 3]\n\nArgs:\nindices_string: (str) a string representing indices. Can optionally be\nsurrounded by a pair of brackets.\n\nReturns:\n(list of int): Parsed indices.", "source": "github-repos"}
{"code": "def experiment_pb(hparam_infos, metric_infos, user='', description='', time_created_secs=None):\n    if (time_created_secs is None):\n        time_created_secs = time.time()\n    experiment = api_pb2.Experiment(description=description, user=user, time_created_secs=time_created_secs, hparam_infos=hparam_infos, metric_infos=metric_infos)\n    return _summary(metadata.EXPERIMENT_TAG, plugin_data_pb2.HParamsPluginData(experiment=experiment))", "docstring": "Creates a summary that defines a hyperparameter-tuning experiment.\n\nArgs:\nhparam_infos: Array of api_pb2.HParamInfo messages. Describes the\nhyperparameters used in the experiment.\nmetric_infos: Array of api_pb2.MetricInfo messages. Describes the metrics\nused in the experiment. See the documentation at the top of this file\nfor how to populate this.\nuser: String. An id for the user running the experiment\ndescription: String. A description for the experiment. May contain markdown.\ntime_created_secs: float. The time the experiment is created in seconds\nsince the UNIX epoch. If None uses the current time.\n\nReturns:\nA summary protobuffer containing the experiment definition.", "source": "codesearchnet"}
{"code": "def _FormatServiceText(self, service):\n    \n    string_segments = [\n        service.name,\n        '\\tImage Path    = {0:s}'.format(service.image_path),\n        '\\tService Type  = {0:s}'.format(service.HumanReadableType()),\n        '\\tStart Type    = {0:s}'.format(service.HumanReadableStartType()),\n        '\\tService Dll   = {0:s}'.format(service.service_dll),\n        '\\tObject Name   = {0:s}'.format(service.object_name),\n        '\\tSources:']\n\n    for source in service.sources:\n      string_segments.append('\\t\\t{0:s}:{1:s}'.format(source[0], source[1]))\n    return '\\n'.join(string_segments)", "docstring": "Produces a human readable multi-line string representing the service.\n\nArgs:\nservice (WindowsService):  service to format.\n\nReturns:\nstr: human readable representation of a Windows Service.", "source": "juraj-google-style"}
{"code": "class DPTNeck(nn.Module):\n\n    def __init__(self, config):\n        super().__init__()\n        self.config = config\n        if config.backbone_config is not None and config.backbone_config.model_type in ['swinv2']:\n            self.reassemble_stage = None\n        else:\n            self.reassemble_stage = DPTReassembleStage(config)\n        self.convs = nn.ModuleList()\n        for channel in config.neck_hidden_sizes:\n            self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False))\n        self.fusion_stage = DPTFeatureFusionStage(config)\n\n    def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]:\n        \n        if not isinstance(hidden_states, (tuple, list)):\n            raise TypeError('hidden_states should be a tuple or list of tensors')\n        if len(hidden_states) != len(self.config.neck_hidden_sizes):\n            raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.')\n        if self.reassemble_stage is not None:\n            hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)\n        features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]\n        output = self.fusion_stage(features)\n        return output", "docstring": "DPTNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as\ninput and produces another list of tensors as output. For DPT, it includes 2 stages:\n\n* DPTReassembleStage\n* DPTFeatureFusionStage.\n\nArgs:\nconfig (dict): config dict.", "source": "github-repos"}
{"code": "def reverse_fstring(pattern: str, string: str) -> dict[str, str] | None:\n    pattern = _pattern_cache(pattern)\n    if (m := pattern.fullmatch(string)):\n        return m.groupdict()\n    else:\n        return None", "docstring": "Reverse f-string.\n\nExample:\n\n```python\nepy.reverse_fstring(\n'/home/{user}/projects/{project}',\n'/home/conchylicultor/projects/menhir'\n) == {\n'user': 'conchylicultor',\n'project': 'menhir',\n}\n```\n\nArgs:\npattern: The f-string pattern (can only contained named group)\nstring: The string to search\n\nReturns:\nThe extracted info", "source": "github-repos"}
{"code": "def minimal_selector(self, complete_selector):\n    if (complete_selector not in self._selector_map):\n        raise KeyError(\"No value with selector '{}'.\".format(complete_selector))\n    selector_components = complete_selector.split('.')\n    node = self._selector_tree\n    start = None\n    for (i, component) in enumerate(reversed(selector_components)):\n        if (len(node) == 1):\n            if (start is None):\n                start = (- i)\n        else:\n            start = None\n        node = node[component]\n    if (len(node) > 1):\n        return complete_selector\n    return '.'.join(selector_components[start:])", "docstring": "Returns the minimal selector that uniquely matches `complete_selector`.\n\nArgs:\ncomplete_selector: A complete selector stored in the map.\n\nReturns:\nA partial selector that unambiguously matches `complete_selector`.\n\nRaises:\nKeyError: If `complete_selector` is not in the map.", "source": "codesearchnet"}
{"code": "def _subtoken_ids_to_tokens(self, subtokens):\n    concatenated = ''.join([self._subtoken_id_to_subtoken_string(s) for s in subtokens])\n    split = concatenated.split('_')\n    ret = []\n    for t in split:\n        if t:\n            unescaped = _unescape_token((t + '_'))\n            if unescaped:\n                ret.append(unescaped)\n    return ret", "docstring": "Converts a list of subtoken ids to a list of tokens.\n\nArgs:\nsubtokens: a list of integers in the range [0, vocab_size)\nReturns:\na list of strings.", "source": "codesearchnet"}
{"code": "def exists(self, file_path, check_link=False):\n        \n        if check_link and self.islink(file_path):\n            return True\n        file_path = make_string_path(file_path)\n        if file_path is None:\n            raise TypeError\n        if not file_path:\n            return False\n        if file_path == self.dev_null.name:\n            return not self.is_windows_fs\n        try:\n            if self.is_filepath_ending_with_separator(file_path):\n                return False\n            file_path = self.resolve_path(file_path)\n        except (IOError, OSError):\n            return False\n        if file_path == self.root.name:\n            return True\n\n        path_components = self._path_components(file_path)\n        current_dir = self.root\n        for component in path_components:\n            current_dir = self._directory_content(current_dir, component)[1]\n            if not current_dir:\n                return False\n        return True", "docstring": "Return true if a path points to an existing file system object.\n\nArgs:\nfile_path:  The path to examine.\n\nReturns:\n(bool) True if the corresponding object exists.\n\nRaises:\nTypeError: if file_path is None.", "source": "juraj-google-style"}
{"code": "def parse_config(data: dict) -> dict:\n    return {'email': data.get('email'), 'family': data['family_id'], 'samples': [{'id': sample_id, 'type': analysis_type} for (sample_id, analysis_type) in data['analysis_type'].items()], 'config_path': data['config_file_analysis'], 'is_dryrun': (True if ('dry_run_all' in data) else False), 'log_path': data['log_file'], 'out_dir': data['outdata_dir'], 'priority': data['slurm_quality_of_service'], 'sampleinfo_path': data['sample_info_file']}", "docstring": "Parse MIP config file.\n\nArgs:\ndata (dict): raw YAML input from MIP analysis config file\n\nReturns:\ndict: parsed data", "source": "codesearchnet"}
{"code": "def initialize_variables(sess, saver, logdir, checkpoint=None, resume=None):\n    sess.run(tf.group(tf.local_variables_initializer(), tf.global_variables_initializer()))\n    if (resume and (not (logdir or checkpoint))):\n        raise ValueError('Need to specify logdir to resume a checkpoint.')\n    if logdir:\n        state = tf.train.get_checkpoint_state(logdir)\n        if checkpoint:\n            checkpoint = os.path.join(logdir, checkpoint)\n        if ((not checkpoint) and state and state.model_checkpoint_path):\n            checkpoint = state.model_checkpoint_path\n        if (checkpoint and (resume is False)):\n            message = 'Found unexpected checkpoint when starting a new run.'\n            raise RuntimeError(message)\n        if checkpoint:\n            saver.restore(sess, checkpoint)", "docstring": "Initialize or restore variables from a checkpoint if available.\n\nArgs:\nsess: Session to initialize variables in.\nsaver: Saver to restore variables.\nlogdir: Directory to search for checkpoints.\ncheckpoint: Specify what checkpoint name to use; defaults to most recent.\nresume: Whether to expect recovering a checkpoint or starting a new run.\n\nRaises:\nValueError: If resume expected but no log directory specified.\nRuntimeError: If no resume expected but a checkpoint was found.", "source": "codesearchnet"}
{"code": "def id_by_index(index, resources):\n    if ((index < 0) or (index >= len(resources))):\n        return ''\n    try:\n        return resources[index].header_signature\n    except AttributeError:\n        return resources[index].address", "docstring": "Helper method to fetch the id or address of a resource by its index\n\nArgs:\nresources (list of objects): The resources to be paginated\nindex (integer): The index of the target resource\n\nReturns:\nstr: The address or header_signature of the resource,\nreturns an empty string if not found", "source": "codesearchnet"}
{"code": "def AddBatchJob(client):\n    batch_job_service = client.GetService('BatchJobService', version='v201809')\n    batch_job_operations = [{'operand': {}, 'operator': 'ADD'}]\n    return batch_job_service.mutate(batch_job_operations)['value'][0]", "docstring": "Add a new BatchJob to upload operations to.\n\nArgs:\nclient: an instantiated AdWordsClient used to retrieve the BatchJob.\n\nReturns:\nThe new BatchJob created by the request.", "source": "codesearchnet"}
{"code": "def create(self, key, value):\n        \n        data = None\n        if key is not None:\n            key = key.strip()\n            self.tcex.log.debug(u'create variable {}'.format(key))\n            \n            \n            parsed_key = self.parse_variable(key.strip())\n            variable_type = parsed_key['type']\n            if variable_type in self.read_data_types:\n                data = self.create_data_types[variable_type](key, value)\n            else:\n                data = self.create_raw(key, value)\n        return data", "docstring": "Create method of CRUD operation for working with KeyValue DB.\n\nThis method will automatically determine the variable type and\ncall the appropriate method to write the data.  If a non standard\ntype is provided the data will be written as RAW data.\n\nArgs:\nkey (string): The variable to write to the DB.\nvalue (any): The data to write to the DB.\n\nReturns:\n(string): Result string of DB write.", "source": "juraj-google-style"}
{"code": "def get_info(self, userSpecifier, **kwargs):\n    request = Request('GET', '/v3/users/{userSpecifier}')\n    request.set_path_param('userSpecifier', userSpecifier)\n    response = self.ctx.request(request)\n    if (response.content_type is None):\n        return response\n    if (not response.content_type.startswith('application/json')):\n        return response\n    jbody = json.loads(response.raw_body)\n    parsed_body = {}\n    if (str(response.status) == '200'):\n        if (jbody.get('userInfo') is not None):\n            parsed_body['userInfo'] = self.ctx.user.UserInfo.from_dict(jbody['userInfo'], self.ctx)\n    elif (str(response.status) == '401'):\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    elif (str(response.status) == '403'):\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    elif (str(response.status) == '405'):\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    else:\n        parsed_body = jbody\n    response.body = parsed_body\n    return response", "docstring": "Fetch the user information for the specified user. This endpoint is\nintended to be used by the user themself to obtain their own\ninformation.\n\nArgs:\nuserSpecifier:\nThe User Specifier\n\nReturns:\nv20.response.Response containing the results from submitting the\nrequest", "source": "codesearchnet"}
{"code": "def get_repo_url(pypirc, repository):\n    pypirc = os.path.abspath(os.path.expanduser(pypirc))\n    pypi_config = base.PyPIConfig(pypirc)\n    repo_config = pypi_config.get_repo_config(repository)\n    if repo_config:\n        return repo_config.get_clean_url()\n    else:\n        return base.RepositoryURL(repository)", "docstring": "Fetch the RepositoryURL for a given repository, reading info from pypirc.\n\nWill try to find the repository in the .pypirc, including username/password.\n\nArgs:\npypirc (str): path to the .pypirc config file\nrepository (str): URL or alias for the repository\n\nReturns:\nbase.RepositoryURL for the repository", "source": "codesearchnet"}
{"code": "def tf_step(self, x, iteration, conjugate, residual, squared_residual):\n        \n        x, next_iteration, conjugate, residual, squared_residual = super(ConjugateGradient, self).tf_step(\n            x, iteration, conjugate, residual, squared_residual\n        )\n\n        \n        A_conjugate = self.fn_x(conjugate)\n\n        \n        if self.damping > 0.0:\n            A_conjugate = [A_conj + self.damping * conj for A_conj, conj in zip(A_conjugate, conjugate)]\n\n        \n        conjugate_A_conjugate = tf.add_n(\n            inputs=[tf.reduce_sum(input_tensor=(conj * A_conj)) for conj, A_conj in zip(conjugate, A_conjugate)]\n        )\n\n        \n        alpha = squared_residual / tf.maximum(x=conjugate_A_conjugate, y=util.epsilon)\n\n        \n        next_x = [t + alpha * conj for t, conj in zip(x, conjugate)]\n\n        \n        next_residual = [res - alpha * A_conj for res, A_conj in zip(residual, A_conjugate)]\n\n        \n        next_squared_residual = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(res * res)) for res in next_residual])\n\n        \n        beta = next_squared_residual / tf.maximum(x=squared_residual, y=util.epsilon)\n\n        \n        next_conjugate = [res + beta * conj for res, conj in zip(next_residual, conjugate)]\n\n        return next_x, next_iteration, next_conjugate, next_residual, next_squared_residual", "docstring": "Iteration loop body of the conjugate gradient algorithm.\n\nArgs:\nx: Current solution estimate $x_t$.\niteration: Current iteration counter $t$.\nconjugate: Current conjugate $c_t$.\nresidual: Current residual $r_t$.\nsquared_residual: Current squared residual $r_t^2$.\n\nReturns:\nUpdated arguments for next iteration.", "source": "juraj-google-style"}
{"code": "def Cancel(self, request, global_params=None):\n    config = self.GetMethodConfig('Cancel')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Cancels a build in progress.\n\nArgs:\nrequest: (CancelBuildRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Build) The response message.", "source": "github-repos"}
{"code": "def get_weights(self):\n    return backend.batch_get_value(self.weights)", "docstring": "Returns the current value of the weights of the optimizer.\n\nReturns:\nA list of numpy arrays.", "source": "github-repos"}
{"code": "def full_game_name(short_name):\n  \n  camel_game_name = misc_utils.snakecase_to_camelcase(short_name)\n  full_name = camel_game_name + ATARI_GAME_MODE\n  return full_name", "docstring": "CamelCase game name with mode suffix.\n\nArgs:\nshort_name: snake_case name without mode e.g \"crazy_climber\"\n\nReturns:\nfull game name e.g. \"CrazyClimberNoFrameskip-v4\"", "source": "juraj-google-style"}
{"code": "def import_aliases(alias_source):\n    \n    alias_table = get_alias_table()\n    if is_url(alias_source):\n        alias_source = retrieve_file_from_url(alias_source)\n        alias_table.read(alias_source)\n        os.remove(alias_source)\n    else:\n        alias_table.read(alias_source)\n    _commit_change(alias_table)", "docstring": "Import aliases from a file or an URL.\n\nArgs:\nalias_source: The source of the alias. It can be a filepath or an URL.", "source": "juraj-google-style"}
{"code": "def parse_structure(self, store_in_memory=False):\n    if (not self.structure_file):\n        log.error('{}: no structure file, unable to parse'.format(self.id))\n        return None\n    else:\n        structure = StructureIO(self.structure_path, self.file_type)\n        structure_chains = [x.id for x in structure.first_model.child_list]\n        self.add_chain_ids(structure_chains)\n        self.get_structure_seqs(structure.first_model)\n        if (not self.mapped_chains):\n            self.add_mapped_chain_ids(structure_chains)\n        if store_in_memory:\n            self.parsed = True\n            self.structure = structure\n        return structure", "docstring": "Read the 3D coordinates of a structure file and return it as a Biopython Structure object.\nAlso create ChainProp objects in the chains attribute for each chain in the first model.\n\nArgs:\nstore_in_memory (bool): If the Biopython Structure object should be stored in the attribute ``structure``.\n\nReturns:\nStructure: Biopython Structure object", "source": "codesearchnet"}
{"code": "def backend_monitor(backend):\n    \n    if not isinstance(backend, IBMQBackend):\n        raise QiskitError('Input variable is not of type IBMQBackend.')\n    config = backend.configuration().to_dict()\n    status = backend.status().to_dict()\n    config_dict = {**status, **config}\n    if not config['simulator']:\n        props = backend.properties().to_dict()\n\n    print(backend.name())\n    print('='*len(backend.name()))\n    print('Configuration')\n    print('-'*13)\n    offset = '    '\n\n    upper_list = ['n_qubits', 'operational',\n                  'status_msg', 'pending_jobs',\n                  'basis_gates', 'local', 'simulator']\n\n    lower_list = list(set(config_dict.keys()).difference(upper_list))\n    \n    lower_list.remove('gates')\n    for item in upper_list+lower_list:\n        print(offset+item+':', config_dict[item])\n\n    \n    if config['simulator']:\n        return\n\n    print()\n    qubit_header = 'Qubits [Name / Freq / T1 / T2 / U1 err / U2 err / U3 err / Readout err]'\n    print(qubit_header)\n    print('-'*len(qubit_header))\n\n    sep = ' / '\n    for qub in range(len(props['qubits'])):\n        name = 'Q%s' % qub\n        qubit_data = props['qubits'][qub]\n        gate_data = props['gates'][3*qub:3*qub+3]\n        t1_info = qubit_data[0]\n        t2_info = qubit_data[1]\n        freq_info = qubit_data[2]\n        readout_info = qubit_data[3]\n\n        freq = str(round(freq_info['value'], 5))+' '+freq_info['unit']\n        T1 = str(round(t1_info['value'],  \n                       5))+' ' + t1_info['unit']\n        T2 = str(round(t2_info['value'],  \n                       5))+' ' + t2_info['unit']\n        \n        U1 = str(round(gate_data[0]['parameters'][0]['value'], 5))\n        \n        U2 = str(round(gate_data[1]['parameters'][0]['value'], 5))\n        \n        U3 = str(round(gate_data[2]['parameters'][0]['value'], 5))\n\n        readout_error = str(round(readout_info['value'], 5))\n\n        qstr = sep.join([name, freq, T1, T2, U1, U2, U3, readout_error])\n        print(offset+qstr)\n\n    print()\n    multi_qubit_gates = props['gates'][3*config['n_qubits']:]\n    multi_header = 'Multi-Qubit Gates [Name / Type / Gate Error]'\n    print(multi_header)\n    print('-'*len(multi_header))\n\n    for gate in multi_qubit_gates:\n        name = gate['name']\n        ttype = gate['gate']\n        error = str(round(gate['parameters'][0]['value'], 5))\n        mstr = sep.join([name, ttype, error])\n        print(offset+mstr)", "docstring": "Monitor a single IBMQ backend.\n\nArgs:\nbackend (IBMQBackend): Backend to monitor.\nRaises:\nQiskitError: Input is not a IBMQ backend.", "source": "juraj-google-style"}
{"code": "def has_open_file(self, file_object):\n        \n        return (file_object in [wrappers[0].get_object()\n                                for wrappers in self.open_files if wrappers])", "docstring": "Return True if the given file object is in the list of open files.\n\nArgs:\nfile_object: The FakeFile object to be checked.\n\nReturns:\n`True` if the file is open.", "source": "juraj-google-style"}
{"code": "def recipe_dcm_to_sheets(config, auth_read, account, report_id, report_name, sheet, tab):\n    dcm(config, {'auth': auth_read, 'report': {'account': account, 'report_id': report_id, 'name': report_name}, 'out': {'sheets': {'sheet': sheet, 'tab': tab, 'range': 'A1'}}})", "docstring": "Move existing CM report into a Sheet tab.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\naccount (integer) - NA\nreport_id (integer) - NA\nreport_name (string) - NA\nsheet (string) - NA\ntab (string) - NA", "source": "github-repos"}
{"code": "def extract(self, extractor: Extractor, extractable: Extractable = None, tokenizer: Tokenizer = None,\n                joiner: str = \"  \", **options) -> List[Extraction]:\n\n        \n        if not extractable:\n            extractable = self\n\n        if not tokenizer:\n            tokenizer = self.etk.default_tokenizer\n\n        extracted_results = list()\n\n        if extractor.input_type == InputType.TOKENS:\n            if self.etk.error_policy == ErrorPolicy.PROCESS:\n                if isinstance(extractable.value, list):\n                    self.etk.log(\n                        \"Extractor needs tokens, tokenizer needs string to tokenize, got list, converting to string\",\n                        \"warning\", self.doc_id, self.url)\n                    warnings.warn(\n                        \"Extractor needs tokens, tokenizer needs string to tokenize, got list, converting to string\")\n                elif isinstance(extractable.value, dict):\n                    self.etk.log(\n                        \"Extractor needs tokens, tokenizer needs string to tokenize, got dict, converting to string\",\n                        \"warning\", self.doc_id, self.url)\n                    warnings.warn(\n                        \"Extractor needs tokens, tokenizer needs string to tokenize, got dict, converting to string\")\n                tokens = extractable.get_tokens(tokenizer)\n                if tokens:\n                    extracted_results = extractor.extract(tokens, **options)\n            else:\n                raise ExtractorValueError(\n                    \"Extractor needs string, tokenizer needs string to tokenize, got \" + str(type(extractable.value)))\n\n        elif extractor.input_type == InputType.TEXT:\n            if self.etk.error_policy == ErrorPolicy.PROCESS:\n                if isinstance(extractable.value, list):\n                    self.etk.log(\"Extractor needs string, got extractable value as list, converting to string\",\n                                 \"warning\", self.doc_id, self.url)\n                    warnings.warn(\"Extractor needs string, got extractable value as list, converting to string\")\n                elif isinstance(extractable.value, dict):\n                    self.etk.log(\"Extractor needs string, got extractable value as dict, converting to string\",\n                                 \"warning\", self.doc_id, self.url)\n                    warnings.warn(\"Extractor needs string, got extractable value as dict, converting to string\")\n                text = extractable.get_string(joiner)\n                if text:\n                    extracted_results = extractor.extract(text, **options)\n            else:\n                \n                \n                pass\n\n        elif extractor.input_type == InputType.OBJECT:\n            extracted_results = extractor.extract(extractable.value, **options)\n\n        elif extractor.input_type == InputType.HTML:\n            if bool(BeautifulSoup(extractable.value, \"html.parser\").find()):\n                extracted_results = extractor.extract(extractable.value, **options)\n            else:\n                \n                \n                pass\n\n        try:\n            jsonPath = extractable.full_path\n        except AttributeError:\n            jsonPath = None\n\n        for e in extracted_results:\n            \n            e.prov_id = self.provenance_id_index\n            extraction_provenance_record: ExtractionProvenanceRecord = ExtractionProvenanceRecord(\n                e.prov_id, jsonPath, e.provenance[\"extractor_name\"],\n                e.provenance[\"start_char\"], e.provenance[\"end_char\"], e.provenance[\"confidence\"], self,\n                extractable.prov_id)\n            self._provenances[e.prov_id] = extraction_provenance_record\n\n            \n            self.provenance_id_index_incrementer()\n            self.create_provenance(extraction_provenance_record)\n\n        return extracted_results", "docstring": "Invoke the extractor on the given extractable, accumulating all the extractions in a list.\n\nArgs:\nextractor (Extractor):\nextractable (extractable):\ntokenizer: user can pass custom tokenizer if extractor wants token\njoiner: user can pass joiner if extractor wants text\noptions: user can pass arguments as a dict to the extract() function of different extractors\n\nReturns: List of Extraction, containing all the extractions.", "source": "juraj-google-style"}
{"code": "def get_snippet(self, snippet_key = None):\n\t\t\n\t\turi = '/'.join([\n\t\t\t\t\t\tself.api_uri,\n\t\t\t\t\t\tself.snippets_suffix\n\t\t\t\t\t\t])\n\t\tif snippet_key:\n\t\t\turi = '/'.join([\n\t\t\t\t\t\t\turi,\n\t\t\t\t\t\t\tsnippet_key\n\t\t\t\t\t\t\t])\n\n\t\tcode, data =  self._req('get', uri)\n\t\t\n\t\treturn code, data", "docstring": "Get all/one specific snippet by its key\nArgs:\nkey\t\t\tsnippet key (default: None i.e. ALL)\nreturn\t\t(status code, snippet dict or list thereof)", "source": "juraj-google-style"}
{"code": "def setContext(self, context_str):\n        \n        if (len(self.m_context) == 0) and (len(context_str) >= 7):\n            if context_str[0:7] != \"request\":\n                ekm_log(\"Context: \" + context_str)\n        self.m_context = context_str", "docstring": "Set context string for serial command.  Private setter.\n\nArgs:\ncontext_str (str): Command specific string.", "source": "juraj-google-style"}
{"code": "def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, dtype, use_gpu):\n    x1 = self._CreateNumpyTensor(tensor_in_sizes)\n    x2 = self._CreateNumpyTensor(filter_in_sizes)\n    with test_util.device(use_gpu):\n        t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)\n        t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)\n        strides = [1] + strides + [1]\n        dilations = [1] + dilations + [1]\n        if isinstance(padding, (list, tuple)):\n            padding = [(0, 0)] + padding + [(0, 0)]\n        if data_format == 'NCHW':\n            t1 = test_util.NHWCToNCHW(t1)\n            strides = test_util.NHWCToNCHW(strides)\n            dilations = test_util.NHWCToNCHW(dilations)\n            if isinstance(padding, (list, tuple)):\n                padding = test_util.NHWCToNCHW(padding)\n        conv = nn_ops.conv2d(t1, t2, dilations=dilations, strides=strides, padding=padding, data_format=data_format)\n        self.assertEqual(conv.dtype, dtype)\n        if data_format == 'NCHW':\n            conv = test_util.NCHWToNHWC(conv)\n        return conv", "docstring": "Verifies the output values of the convolution function.\n\nArgs:\ntensor_in_sizes: Input tensor dimensions in [batch, input_rows,\ninput_cols, input_depth].\nfilter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,\ninput_depth, output_depth].\ndilations: Dilated rate: [col_dilation, row_dilation]\nstrides: Stride: [col_stride, row_stride]\npadding: Padding type.\ndata_format: Format of the data tensors.\ndtype: Data type for inputs and outputs.\nuse_gpu: True if the operations should be run on GPU\n\nReturns:\nSymbolic tensor value that can be used to execute the computation", "source": "github-repos"}
{"code": "def _ParseCredentialOptions(self, options):\n    credentials = getattr(options, 'credentials', [])\n    if (not isinstance(credentials, list)):\n        raise errors.BadConfigOption('Unsupported credentials value.')\n    for credential_string in credentials:\n        (credential_type, _, credential_data) = credential_string.partition(':')\n        if ((not credential_type) or (not credential_data)):\n            raise errors.BadConfigOption('Badly formatted credential: {0:s}.'.format(credential_string))\n        if (credential_type not in self._SUPPORTED_CREDENTIAL_TYPES):\n            raise errors.BadConfigOption('Unsupported credential type for: {0:s}.'.format(credential_string))\n        if (credential_type in self._BINARY_DATA_CREDENTIAL_TYPES):\n            try:\n                credential_data = credential_data.decode('hex')\n            except TypeError:\n                raise errors.BadConfigOption('Unsupported credential data for: {0:s}.'.format(credential_string))\n        self._credentials.append((credential_type, credential_data))", "docstring": "Parses the credential options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "codesearchnet"}
{"code": "def filter_iqr(array, lower, upper):\n    (upper, lower) = iqr(array, upper, lower)\n    new = list(array)\n    for x in new[:]:\n        if ((x < lower) or (x > upper)):\n            new.remove(x)\n    return new", "docstring": "Return elements which falls within specified interquartile range.\n\nArguments:\n\narray (list): Sequence of numbers.\nlower (float): Lower bound for IQR, in range 0 <= lower <= 1.\nupper (float): Upper bound for IQR, in range 0 <= upper <= 1.\n\nReturns:\n\nlist: Copy of original list, with elements outside of IQR\nremoved.", "source": "codesearchnet"}
{"code": "def has_progress(self, previous_perf: 'NexmarkPerf') -> bool:\n    if self.runtime_sec != previous_perf.runtime_sec or self.event_count != previous_perf.event_count or self.result_count != previous_perf.result_count:\n        return True\n    return False", "docstring": "Args:\nprevious_perf: a NexmarkPerf object to be compared to self\n\nReturns:\nTrue if there are observed pipeline activity between self and other\nNexmarkPerf values", "source": "github-repos"}
{"code": "def _generate_date_with_wildcard_query(self, date_value):\n    if date_value.endswith(ast.GenericValue.WILDCARD_TOKEN):\n        try:\n            date_value = _truncate_wildcard_from_date(date_value)\n        except ValueError:\n            return {}\n        return self._generate_range_queries(self.KEYWORD_TO_ES_FIELDNAME['date'], {ES_RANGE_EQ_OPERATOR: date_value})\n    else:\n        return {}", "docstring": "Helper for generating a date keyword query containing a wildcard.\n\nReturns:\n(dict): The date query containing the wildcard or an empty dict in case the date value is malformed.\n\nThe policy followed here is quite conservative on what it accepts as valid input. Look into\n:meth:`inspire_query_parser.utils.visitor_utils._truncate_wildcard_from_date` for more information.", "source": "codesearchnet"}
{"code": "def get_cpu_props(cls, family, arch='x86'):\n    cpus = cls.get_cpus_by_arch(arch)\n    try:\n        return cpus.xpath('model[@name=\"{0}\"]'.format(family))[0]\n    except IndexError:\n        raise LagoException('No such CPU family: {0}'.format(family))", "docstring": "Get CPU info XML\n\nArgs:\nfamily(str): CPU family\narch(str): CPU arch\n\nReturns:\nlxml.etree.Element: CPU xml\n\nRaises:\n:exc:`~LagoException`: If no such CPU family exists", "source": "codesearchnet"}
{"code": "def by_issn(issn):\n    \n    \n    old_url = aleph.ALEPH_URL\n    aleph.ALEPH_URL = NTK_ALEPH_URL\n    records = aleph.getISSNsXML(issn, base=\"STK02\")\n    aleph.ALEPH_URL = old_url\n\n    \n    for record in records:\n        marc = MARCXMLRecord(record)\n\n        \n        \n        additional_info = {\n            \"222\": marc.get(\"222\", None),\n            \"PER\": marc.get(\"PER\", None),\n            \"776\": marc.get(\"776\", None),\n            \"008\": marc.get(\"008\", None),\n            \"alt_end_date\": \"\"  \n        }\n        additional_info = {\n            key: val\n            for key, val in additional_info.iteritems()\n            if val\n        }\n\n        \n        alt_end_date = None\n        alt_creation_date = None\n        if additional_info[\"008\"]:\n            \n            alt_creation_date = additional_info[\"008\"][7:11]\n\n            \n            alt_end_date = additional_info[\"008\"][11:15]\n            if alt_end_date in [\"9999\", \"****\"]:\n                alt_creation_date += \"-\"  \n                alt_end_date = None\n\n            additional_info[\"alt_end_date\"] = alt_end_date\n\n        \n        author = Author.parse_author(marc)\n\n        model = Model(\n            url=_first_or_none(\n                marc.get(\"856u\")\n            ),\n            conspect=_first_or_none(\n                marc.get(\"072a\")\n            ),\n            annotation_tags=_first_or_none(\n                marc.get(\"520a\")\n            ),\n            periodicity=_first_or_none(\n                marc.get(\"310a\")\n            ),\n            title_tags=_first_or_none(\n                marc.get(\"222a\")\n            ),\n            subtitle_tags=_first_or_none(\n                marc.get(\"245b\")\n            ),\n            place_tags=remove_hairs(\n                _first_or_none(marc.get(\"260a\")) or \"\"\n            ),\n            author_tags=author._asdict() if author else None,\n            publisher_tags=remove_hairs(\n                (\n                    _first_or_none(marc.get(\"260b\")) or\n                    _first_or_none(marc.get(\"264b\")) or\n                    \"\",\n                ),\n                \", \"\n            ),\n            creation_dates=_first_or_none(\n                marc.get(\"260c\", [alt_creation_date])\n            ),\n            lang_tags=_first_or_none(\n                marc.get(\"040b\")\n            ),\n            keyword_tags=marc.get(\"650a07\"),\n            source_info=_first_or_none(\n                marc.get(\"500a\")\n            ),\n            original_xml=record,\n            additional_info=additional_info,\n        )\n\n        yield _add_source(model)", "docstring": "Query aleph for records with given `issn`. The lookup is directed to the\nNTK's Aleph.\n\nArgs:\nissn (str): ISSN of the periodical.\n\nReturns:\nobj: :class:`Model` instances for each record.", "source": "juraj-google-style"}
{"code": "def parse_args(argv=None):\n    \n    parent_parser = get_parent_parser()\n\n    \n    desc = \"Data Version Control\"\n    parser = DvcParser(\n        prog=\"dvc\",\n        description=desc,\n        parents=[parent_parser],\n        formatter_class=argparse.RawTextHelpFormatter,\n    )\n\n    \n    \n    parser.add_argument(\n        \"-V\",\n        \"--version\",\n        action=VersionAction,\n        nargs=0,\n        help=\"Show program's version.\",\n    )\n\n    \n    subparsers = parser.add_subparsers(\n        title=\"Available Commands\",\n        metavar=\"COMMAND\",\n        dest=\"cmd\",\n        help=\"Use dvc COMMAND --help for command-specific help.\",\n    )\n\n    fix_subparsers(subparsers)\n\n    for cmd in COMMANDS:\n        cmd.add_parser(subparsers, parent_parser)\n\n    args = parser.parse_args(argv)\n\n    return args", "docstring": "Parses CLI arguments.\n\nArgs:\nargv: optional list of arguments to parse. sys.argv is used by default.\n\nRaises:\ndvc.exceptions.DvcParserError: raised for argument parsing errors.", "source": "juraj-google-style"}
{"code": "def create(self, name):\n    \n    return Bucket(name, context=self._context).create(self._project_id)", "docstring": "Creates a new bucket.\n\nArgs:\nname: a unique name for the new bucket.\nReturns:\nThe newly created bucket.\nRaises:\nException if there was an error creating the bucket.", "source": "juraj-google-style"}
{"code": "def __init__(self, app, project):\n        \n        self.project = project\n        self.app = app\n        self.sources = set()\n        self.smart_sources = []\n        self.index = None\n        self.source_roots = OrderedSet()\n        self._created_symbols = DefaultOrderedDict(OrderedSet)\n        self.__package_root = None\n        self.__toplevel_comments = OrderedSet()\n\n        self.formatter = self._make_formatter()", "docstring": "Constructor for `Extension`.\n\nThis should never get called directly.\n\nArgs:\nproject: The `project.Project` instance which documentation\nis being generated.", "source": "juraj-google-style"}
{"code": "def sample(self, num_samples=1):\n    self.check_fit()\n    return np.random.normal(self.mean, self.std, num_samples)", "docstring": "Returns new data point based on model.\n\nArguments:\nn_samples: `int`\n\nReturns:\nnp.ndarray: Generated samples", "source": "codesearchnet"}
{"code": "class Globally(PTransform):\n\n    def __init__(self, num_quantiles, key=None, reverse=False, weighted=False, input_batched=False):\n        self._num_quantiles = num_quantiles\n        self._key = key\n        self._reverse = reverse\n        self._weighted = weighted\n        self._input_batched = input_batched\n\n    def expand(self, pcoll):\n        return pcoll | CombineGlobally(ApproximateQuantilesCombineFn.create(num_quantiles=self._num_quantiles, key=self._key, reverse=self._reverse, weighted=self._weighted, input_batched=self._input_batched))\n\n    def display_data(self):\n        return ApproximateQuantiles._display_data(num_quantiles=self._num_quantiles, key=self._key, reverse=self._reverse, weighted=self._weighted, input_batched=self._input_batched)", "docstring": "PTransform takes PCollection and returns a list whose single value is\napproximate N-tiles of the input collection globally.\n\nArgs:\nnum_quantiles: number of elements in the resulting quantiles values list.\nkey: (optional) Key is  a mapping of elements to a comparable key, similar\nto the key argument of Python's sorting methods.\nreverse: (optional) whether to order things smallest to largest, rather\nthan largest to smallest.\nweighted: (optional) if set to True, the transform returns weighted\nquantiles. The input PCollection is then expected to contain tuples of\ninput values with the corresponding weight.\ninput_batched: (optional) if set to True, the transform expects each\nelement of input PCollection to be a batch, which is a list of elements\nfor non-weighted case and a tuple of lists of elements and weights for\nweighted. Provides a way to accumulate multiple elements at a time more\nefficiently.", "source": "github-repos"}
{"code": "def CheckGlobalStatic(filename, clean_lines, linenum, error):\n  \n  line = clean_lines.elided[linenum]\n\n  \n  if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):\n    line += clean_lines.elided[linenum + 1].strip()\n\n  \n  \n  \n  \n  \n  \n  match = Match(\n      r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'\n      r'([a-zA-Z0-9_:]+)\\b(.*)',\n      line)\n\n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  if (match and\n      not Search(r'\\bstring\\b(\\s+const)?\\s*[\\*\\&]\\s*(const\\s+)?\\w', line) and\n      not Search(r'\\boperator\\W', line) and\n      not Match(r'\\s*(<.*>)?(::[a-zA-Z0-9_]+)*\\s*\\(([^\"]|$)', match.group(4))):\n    if Search(r'\\bconst\\b', line):\n      error(filename, linenum, 'runtime/string', 4,\n            'For a static/global string constant, use a C style string '\n            'instead: \"%schar%s %s[]\".' %\n            (match.group(1), match.group(2) or '', match.group(3)))\n    else:\n      error(filename, linenum, 'runtime/string', 4,\n            'Static/global string variables are not permitted.')\n\n  if (Search(r'\\b([A-Za-z0-9_]*_)\\(\\1\\)', line) or\n      Search(r'\\b([A-Za-z0-9_]*_)\\(CHECK_NOTNULL\\(\\1\\)\\)', line)):\n    error(filename, linenum, 'runtime/init', 4,\n          'You seem to be initializing a member variable with itself.')", "docstring": "Check for unsafe global or static objects.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def Uninstall(self, package_name, keep_data=False, timeout_ms=None):\n    cmd = ['pm uninstall']\n    if keep_data:\n        cmd.append('-k')\n    cmd.append(('\"%s\"' % package_name))\n    return self.Shell(' '.join(cmd), timeout_ms=timeout_ms)", "docstring": "Removes a package from the device.\n\nArgs:\npackage_name: Package name of target package.\nkeep_data: whether to keep the data and cache directories\ntimeout_ms: Expected timeout for pushing and installing.\n\nReturns:\nThe pm uninstall output.", "source": "codesearchnet"}
{"code": "def get_catch_vars(catch):\n    \n    catch_re = re.compile(r'catch\\s+(\\${?\\S+}?),\\s*(\\${?\\S+}?)')\n    res = catch_re.match(catch)\n    if res is None:\n        err = 'Catch must have format \"catch $x, $y\", got \"{0}\"'.format(catch)\n        raise exceptions.YamlSyntaxError(err)\n    return get_var_name(res.group(1)), get_var_name(res.group(2))", "docstring": "Returns 2-tuple with names of catch control vars, e.g. for \"catch $was_exc, $exc\"\nit returns ('was_exc', 'err').\n\nArgs:\ncatch: the whole catch line\n\nReturns:\n2-tuple with names of catch control variables\n\nRaises:\nexceptions.YamlSyntaxError if the catch line is malformed", "source": "juraj-google-style"}
{"code": "def APFSUnlockVolume(fsapfs_volume, path_spec, key_chain):\n    is_locked = fsapfs_volume.is_locked()\n    if is_locked:\n        password = key_chain.GetCredential(path_spec, 'password')\n        if password:\n            fsapfs_volume.set_password(password)\n        recovery_password = key_chain.GetCredential(path_spec, 'recovery_password')\n        if recovery_password:\n            fsapfs_volume.set_recovery_password(recovery_password)\n        is_locked = (not fsapfs_volume.unlock())\n    return (not is_locked)", "docstring": "Unlocks an APFS volume using the path specification.\n\nArgs:\nfsapfs_volume (pyapfs.volume): APFS volume.\npath_spec (PathSpec): path specification.\nkey_chain (KeyChain): key chain.\n\nReturns:\nbool: True if the volume is unlocked, False otherwise.", "source": "codesearchnet"}
{"code": "def GetKey(self, public_key_hash):\n    if (public_key_hash.ToBytes() in self._keys.keys()):\n        return self._keys[public_key_hash.ToBytes()]\n    return None", "docstring": "Get the KeyPair belonging to the public key hash.\n\nArgs:\npublic_key_hash (UInt160): a public key hash to get the KeyPair for.\n\nReturns:\nKeyPair: If successful, the KeyPair belonging to the public key hash, otherwise None", "source": "codesearchnet"}
{"code": "def GetHashType(self, hash_str):\n    for (hash_type, hash_re) in self.hashes:\n        if hash_re.match(hash_str):\n            return hash_type\n    return 'EMPTY'", "docstring": "Identify the type of hash in a hash string.\n\nArgs:\nhash_str: A string value that may be a hash.\n\nReturns:\nA string description of the type of hash.", "source": "codesearchnet"}
{"code": "def build_uri(self, id_or_uri):\n        \n        if not id_or_uri:\n            logger.exception(RESOURCE_CLIENT_INVALID_ID)\n            raise ValueError(RESOURCE_CLIENT_INVALID_ID)\n\n        if \"/\" in id_or_uri:\n            self.validate_resource_uri(id_or_uri)\n            return id_or_uri\n        else:\n            return self._base_uri + \"/\" + id_or_uri", "docstring": "Helps to build the URI from resource id and validate the URI.\n\nArgs:\nid_or_uri: ID/URI of the resource.\n\nReturns:\nReturns a valid resource URI", "source": "juraj-google-style"}
{"code": "def max_entropy_distribution(node_indices, number_of_nodes):\n    distribution = np.ones(repertoire_shape(node_indices, number_of_nodes))\n    return (distribution / distribution.size)", "docstring": "Return the maximum entropy distribution over a set of nodes.\n\nThis is different from the network's uniform distribution because nodes\noutside ``node_indices`` are fixed and treated as if they have only 1\nstate.\n\nArgs:\nnode_indices (tuple[int]): The set of node indices over which to take\nthe distribution.\nnumber_of_nodes (int): The total number of nodes in the network.\n\nReturns:\nnp.ndarray: The maximum entropy distribution over the set of nodes.", "source": "codesearchnet"}
{"code": "def get_all_clusters_sites():\n    result = {}\n    gk = get_api_client()\n    sites = gk.sites.list()\n    for site in sites:\n        clusters = site.clusters.list()\n        result.update({c.uid: site.uid for c in clusters})\n    return result", "docstring": "Get all the cluster of all the sites.\n\nReturns:\ndict corresponding to the mapping cluster uid to python-grid5000 site", "source": "codesearchnet"}
{"code": "def isfile(self, path, follow_symlinks=True):\n        \n        return self._is_of_type(path, S_IFREG, follow_symlinks)", "docstring": "Determine if path identifies a regular file.\n\nArgs:\npath: Path to filesystem object.\n\nReturns:\n`True` if path points to a regular file (following symlinks).\n\nRaises:\nTypeError: if path is None.", "source": "juraj-google-style"}
{"code": "def convert_tensor_tf_type_to_tflite_type(tf_type: dtypes.DType, usage: str='') -> _types_pb2.IODataType:\n    mapping = {dtypes.float16: _types_pb2.FLOAT16, dtypes.float32: _types_pb2.FLOAT, dtypes.float64: _types_pb2.FLOAT64, dtypes.int8: _types_pb2.INT8, dtypes.int16: _types_pb2.INT16, dtypes.uint16: _types_pb2.UINT16, dtypes.int32: _types_pb2.INT32, dtypes.int64: _types_pb2.INT64, dtypes.uint8: _types_pb2.UINT8, dtypes.uint32: _types_pb2.UINT32, dtypes.uint64: _types_pb2.UINT64, dtypes.string: _types_pb2.STRING, dtypes.bool: _types_pb2.BOOL, dtypes.complex64: _types_pb2.COMPLEX64, dtypes.complex128: _types_pb2.COMPLEX128}\n    tflite_type = mapping.get(tf_type)\n    if tflite_type is None:\n        raise ValueError('Unsupported TensorFlow type `{0}` provided for the {1}'.format(tf_type, usage))\n    return tflite_type", "docstring": "Convert tensor type from tf type to tflite type.\n\nArgs:\ntf_type: TensorFlow type.\nusage: Text describing the reason for invoking this function.\n\nRaises:\nValueError: If `tf_type` is unsupported.\n\nReturns:\ntflite_type: TFLite type. Refer to compiler/mlir/lite/types.proto.", "source": "github-repos"}
{"code": "def patch_retry(testcase, module):\n    from mock import Mock\n    from mock import patch\n    real_retry_with_exponential_backoff = retry.with_exponential_backoff\n\n    def patched_retry_with_exponential_backoff(**kwargs):\n        \n        kwargs.update(logger=Mock(), clock=Mock())\n        return real_retry_with_exponential_backoff(**kwargs)\n    patch.object(retry, 'with_exponential_backoff', side_effect=patched_retry_with_exponential_backoff).start()\n    importlib.reload(module)\n\n    def remove_patches():\n        patch.stopall()\n        importlib.reload(module)\n    testcase.addCleanup(remove_patches)", "docstring": "A function to patch retry module to use mock clock and logger.\n\nClock and logger that defined in retry decorator will be replaced in test\nin order to skip sleep phase when retry happens.\n\nArgs:\ntestcase: An instance of unittest.TestCase that calls this function to\npatch retry module.\nmodule: The module that uses retry and need to be replaced with mock\nclock and logger in test.", "source": "github-repos"}
{"code": "def __init__(self, dataset_file_map: Mapping[str, _RepresentativeDatasetFile]) -> None:\n    self.dataset_file_map = dataset_file_map", "docstring": "Initializes TFRecord represenatative dataset loader.\n\nArgs:\ndataset_file_map: Signature key -> `RepresentativeDatasetFile` mapping.\n\nRaises:\nDecodeError: If the sample is not RepresentativeDataSample.", "source": "github-repos"}
{"code": "def _parse_logline_timestamp(t):\n    date, time = t.split(' ')\n    month, day = date.split('-')\n    h, m, s = time.split(':')\n    s, ms = s.split('.')\n    return (month, day, h, m, s, ms)", "docstring": "Parses a logline timestamp into a tuple.\n\nArgs:\nt: Timestamp in logline format.\n\nReturns:\nAn iterable of date and time elements in the order of month, day, hour,\nminute, second, microsecond.", "source": "github-repos"}
{"code": "def _Build(self, storage_file):\n    \n    self._index = {}\n    for event_tag in storage_file.GetEventTags():\n      self.SetEventTag(event_tag)", "docstring": "Builds the event tag index.\n\nArgs:\nstorage_file (BaseStorageFile): storage file.", "source": "juraj-google-style"}
{"code": "def name_based_restore(mesh: layout_lib.Mesh, checkpoint_prefix: str, name_tensor_dict: Dict[str, Union[tensor_lib.Tensor, tf_variables.Variable]]):\n    if not context.executing_eagerly():\n        raise ValueError('name based restore must run eagerly.')\n    ordered_name_tensor_dict = name_tensor_dict\n    if not isinstance(name_tensor_dict, collections.OrderedDict):\n        ordered_name_tensor_dict = collections.OrderedDict(name_tensor_dict)\n    for name, tensor in ordered_name_tensor_dict.items():\n        try:\n            if api.fetch_layout(tensor).mesh.device_type().upper() != 'CPU':\n                raise ValueError('Restoring a non CPU Tensor is not supported currently. Offending tensor name : {tensor_name}'.format(tensor_name=name))\n        except errors_impl.OpError as op_error:\n            raise ValueError('Saving/Restoring tensor must be a DTensor') from op_error\n    checkpoint_prefix = api.pack([checkpoint_prefix] * mesh.num_local_devices(), layout_lib.Layout.replicated(mesh.host_mesh(), rank=0))\n    tensor_names = api.pack([list(ordered_name_tensor_dict.keys())] * mesh.num_local_devices(), layout_lib.Layout.replicated(mesh.host_mesh(), rank=1))\n    shape_and_slices = api.pack([[''] * len(ordered_name_tensor_dict)] * mesh.num_local_devices(), layout_lib.Layout.replicated(mesh.host_mesh(), rank=1))\n    input_shapes = [tensor.shape for tensor in ordered_name_tensor_dict.values()]\n    input_layouts = [api.fetch_layout(tensor).to_string() for tensor in ordered_name_tensor_dict.values()]\n    with ops.device(api.device_name()):\n        restored_cpu_tensors = gen_dtensor_ops.d_tensor_restore_v2(prefix=checkpoint_prefix, tensor_names=tensor_names, shape_and_slices=shape_and_slices, input_shapes=input_shapes, input_layouts=input_layouts, dtypes=[tensor.dtype for tensor in ordered_name_tensor_dict.values()])\n    return collections.OrderedDict(zip(ordered_name_tensor_dict.keys(), restored_cpu_tensors))", "docstring": "Restores from checkpoint_prefix to name based DTensors.\n\nIt is required to have already-initialized DTensor variables that have same\nshape/dtype for the tensors being restored.\n\nAlso, we currently only support a named based restore on a single mesh.\n\nArgs:\nmesh: The single mesh that all Tensors would be restored to.\ncheckpoint_prefix : The prefix of checkpoint to be restored.\nname_tensor_dict: A ordered dictionary of tensor_names to a DTensor. The\nDTensor shape/dtype must match the tensors being saved/restored for now.\n\nReturns:\nA dictionary of name to its restored DTensor value.", "source": "github-repos"}
{"code": "def upload(self, file_path, golden_image_info):\n        \n        uri = \"{0}?name={1}&description={2}\".format(self.URI,\n                                                    quote(golden_image_info.get('name', '')),\n                                                    quote(golden_image_info.get('description', '')))\n\n        return self._client.upload(file_path, uri)", "docstring": "Adds a Golden Image resource from the file that is uploaded from a local drive. Only the .zip format file can\nbe used for the upload.\n\nArgs:\nfile_path (str): File name to upload.\ngolden_image_info (dict): Golden Image information.\n\nReturns:\ndict: Golden Image.", "source": "juraj-google-style"}
{"code": "def list_runs(self, project, entity=None):\n        \n        query = gql()\n        return self._flatten_edges(self.gql(query, variable_values={\n            'entity': entity or self.settings('entity'),\n            'model': project or self.settings('project')})['model']['buckets'])", "docstring": "Lists runs in W&B scoped by project.\n\nArgs:\nproject (str): The project to scope the runs to\nentity (str, optional): The entity to scope this project to.  Defaults to public models\n\nReturns:\n[{\"id\",name\",\"description\"}]", "source": "juraj-google-style"}
{"code": "def user_to_uid(user):\n    \n    if user is None:\n        user = salt.utils.user.get_user()\n\n    return salt.utils.win_dacl.get_sid_string(user)", "docstring": "Convert user name to a uid\n\nArgs:\nuser (str): The user to lookup\n\nReturns:\nstr: The user id of the user\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' file.user_to_uid myusername", "source": "juraj-google-style"}
{"code": "def _setweights(self):\n    for name_w in self.weights:\n        raw_w = getattr(self.module, (name_w + '_raw'))\n        w = torch.nn.functional.dropout(raw_w, p=self.dropout, training=self.training)\n        if hasattr(self.module, name_w):\n            delattr(self.module, name_w)\n        setattr(self.module, name_w, w)", "docstring": "Uses pytorch's built-in dropout function to apply dropout to the parameters of\nthe wrapped module.\n\nArgs:\nNone\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def call(self, hidden_states: tf.Tensor, attention_mask: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, past_key_value: Optional[Tuple[tf.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, training=False) -> Tuple[tf.Tensor, Optional[Tuple[tf.Tensor, tf.Tensor]]]:\n    residual = hidden_states\n    hidden_states = self.input_layernorm(hidden_states)\n    hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache)\n    hidden_states = tf.nn.dropout(hidden_states, rate=self.dropout)\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.post_attention_layernorm(hidden_states)\n    hidden_states = self.mlp(hidden_states)\n    hidden_states = tf.nn.dropout(hidden_states, rate=self.dropout)\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (self_attn_weights,)\n    if use_cache:\n        outputs += (present_key_value,)\n    return outputs", "docstring": "Args:\nhidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`tf.Tensor`, *optional*): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.\nuse_cache (`bool`, *optional*):\nIf set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n(see `past_key_values`).\npast_key_value (`Tuple(tf.Tensor)`, *optional*): cached past key and value projection states", "source": "github-repos"}
{"code": "def library_line(self, file_name):\n        \n        gulplib_set = lambda: 'GULP_LIB' in os.environ.keys()\n        readable = lambda f: os.path.isfile(f) and os.access(f, os.R_OK)\n\n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n\n        gin = \"\"\n        dirpath, fname = os.path.split(file_name)\n        if dirpath and readable(file_name):  \n            gin = 'library ' + file_name\n        else:\n            fpath = os.path.join(os.getcwd(), file_name)  \n            if readable(fpath):\n                gin = 'library ' + fpath\n            elif gulplib_set():         \n                fpath = os.path.join(os.environ['GULP_LIB'], file_name)\n                if readable(fpath):\n                    gin = 'library ' + file_name\n        if gin:\n            return gin + \"\\n\"\n        else:\n            raise GulpError('GULP Library not found')", "docstring": "Specifies GULP library file to read species and potential parameters.\nIf using library don't specify species and potential\nin the input file and vice versa. Make sure the elements of\nstructure are in the library file.\n\nArgs:\nfile_name: Name of GULP library file\n\nReturns:\nGULP input string specifying library option", "source": "juraj-google-style"}
{"code": "def _MakeExecutable(self, metadata_script):\n    \n    mode = os.stat(metadata_script).st_mode\n    os.chmod(metadata_script, mode | stat.S_IEXEC)", "docstring": "Add executable permissions to a file.\n\nArgs:\nmetadata_script: string, the path to the executable file.", "source": "juraj-google-style"}
{"code": "def setattr(self, name, val):\n        \n\n        nodes = self._do_query(multiple=False)\n        try:\n            return self.poco.agent.hierarchy.setAttr(nodes, name, val)\n        except UnableToSetAttributeException as e:\n            raise InvalidOperationException('\"{}\" of \"{}\"'.format(str(e), self))", "docstring": "Change the attribute value of the UI element. Not all attributes can be casted to text. If changing the\nimmutable attributes or attributes which do not exist, the InvalidOperationException exception is raised.\n\nArgs:\nname: attribute name\nval: new attribute value to cast\n\nRaises:\nInvalidOperationException: when it fails to set the attribute on UI element", "source": "juraj-google-style"}
{"code": "def _create_initial_state(self, initial_ids, initial_cache):\n    cur_index = tf.constant(0)\n    alive_seq = _expand_to_beam_size(initial_ids, self.beam_size)\n    alive_seq = tf.expand_dims(alive_seq, axis=2)\n    initial_log_probs = tf.constant([([0.0] + ([(- float('inf'))] * (self.beam_size - 1)))])\n    alive_log_probs = tf.tile(initial_log_probs, [self.batch_size, 1])\n    alive_cache = nest.map_structure((lambda t: _expand_to_beam_size(t, self.beam_size)), initial_cache)\n    finished_seq = tf.zeros(tf.shape(alive_seq), tf.int32)\n    finished_scores = (tf.ones([self.batch_size, self.beam_size]) * (- INF))\n    finished_flags = tf.zeros([self.batch_size, self.beam_size], tf.bool)\n    state = {_StateKeys.CUR_INDEX: cur_index, _StateKeys.ALIVE_SEQ: alive_seq, _StateKeys.ALIVE_LOG_PROBS: alive_log_probs, _StateKeys.ALIVE_CACHE: alive_cache, _StateKeys.FINISHED_SEQ: finished_seq, _StateKeys.FINISHED_SCORES: finished_scores, _StateKeys.FINISHED_FLAGS: finished_flags}\n    state_shape_invariants = {_StateKeys.CUR_INDEX: tf.TensorShape([]), _StateKeys.ALIVE_SEQ: tf.TensorShape([None, self.beam_size, None]), _StateKeys.ALIVE_LOG_PROBS: tf.TensorShape([None, self.beam_size]), _StateKeys.ALIVE_CACHE: nest.map_structure(_get_shape_keep_last_dim, alive_cache), _StateKeys.FINISHED_SEQ: tf.TensorShape([None, self.beam_size, None]), _StateKeys.FINISHED_SCORES: tf.TensorShape([None, self.beam_size]), _StateKeys.FINISHED_FLAGS: tf.TensorShape([None, self.beam_size])}\n    return (state, state_shape_invariants)", "docstring": "Return initial state dictionary and its shape invariants.\n\nArgs:\ninitial_ids: initial ids to pass into the symbols_to_logits_fn.\nint tensor with shape [batch_size, 1]\ninitial_cache: dictionary storing values to be passed into the\nsymbols_to_logits_fn.\n\nReturns:\nstate and shape invariant dictionaries with keys from _StateKeys", "source": "codesearchnet"}
{"code": "def __init__(self, wrapper):\n        \n        self._wrapper = wrapper\n        self.make_request = self._wrapper.request_parking", "docstring": "Initialization of the API module.\n\nArgs:\nwrapper (Wrapper): Object that performs the requests to endpoints.", "source": "juraj-google-style"}
{"code": "def bool_env(varname: str, default: bool) -> bool:\n    val = os.getenv(varname, str(default))\n    val = val.lower()\n    if val in ('y', 'yes', 't', 'true', 'on', '1'):\n        return True\n    elif val in ('n', 'no', 'f', 'false', 'off', '0'):\n        return False\n    else:\n        raise ValueError('invalid truth value %r for environment %r' % (val, varname))", "docstring": "Read an environment variable and interpret it as a boolean.\n\nTrue values are (case insensitive): 'y', 'yes', 't', 'true', 'on', and '1';\nfalse values are 'n', 'no', 'f', 'false', 'off', and '0'.\n\nArgs:\nvarname: the name of the variable\ndefault: the default boolean value\nRaises: ValueError if the environment variable is anything else.", "source": "github-repos"}
{"code": "def renew(self, requested_timeout=None):\n        \n        \n        \n\n        if self._has_been_unsubscribed:\n            raise SoCoException(\n                'Cannot renew subscription once unsubscribed')\n        if not self.is_subscribed:\n            raise SoCoException(\n                'Cannot renew subscription before subscribing')\n        if self.time_left == 0:\n            raise SoCoException(\n                'Cannot renew subscription after expiry')\n\n        \n        \n        \n        \n        headers = {\n            'SID': self.sid\n        }\n        if requested_timeout is None:\n            requested_timeout = self.requested_timeout\n        if requested_timeout is not None:\n            headers[\"TIMEOUT\"] = \"Second-{}\".format(requested_timeout)\n        response = requests.request(\n            'SUBSCRIBE',\n            self.service.base_url + self.service.event_subscription_url,\n            headers=headers)\n        response.raise_for_status()\n        timeout = response.headers['timeout']\n        \n        \n        \n        if timeout.lower() == 'infinite':\n            self.timeout = None\n        else:\n            self.timeout = int(timeout.lstrip('Second-'))\n        self._timestamp = time.time()\n        self.is_subscribed = True\n        log.info(\n            \"Renewed subscription to %s, sid: %s\",\n            self.service.base_url + self.service.event_subscription_url,\n            self.sid)", "docstring": "Renew the event subscription.\n\nYou should not try to renew a subscription which has been\nunsubscribed, or once it has expired.\n\nArgs:\nrequested_timeout (int, optional): The period for which a renewal\nrequest should be made. If None (the default), use the timeout\nrequested on subscription.", "source": "juraj-google-style"}
{"code": "def get(self, dist=None, term=None, family=None):\n    if (dist is not None):\n        if (dist not in self.dists):\n            raise ValueError((\"'%s' is not a valid distribution name.\" % dist))\n        return self._get_prior(self.dists[dist])\n    elif (term is not None):\n        if (term not in self.terms):\n            raise ValueError((\"'%s' is not a valid term type.\" % term))\n        return self._get_prior(self.terms[term])\n    elif (family is not None):\n        if (family not in self.families):\n            raise ValueError((\"'%s' is not a valid family name.\" % family))\n        _f = self.families[family]\n        prior = self._get_prior(_f['dist'])\n        return Family(family, prior, _f['link'], _f['parent'])", "docstring": "Retrieve default prior for a named distribution, term type, or family.\n\nArgs:\ndist (str): Name of desired distribution. Note that the name is\nthe key in the defaults dictionary, not the name of the\nDistribution object used to construct the prior.\nterm (str): The type of term family to retrieve defaults for.\nMust be one of 'intercept', 'fixed', or 'random'.\nfamily (str): The name of the Family to retrieve. Must be a value\ndefined internally. In the default config, this is one of\n'gaussian', 'bernoulli', 'poisson', or 't'.", "source": "codesearchnet"}
{"code": "def read_structs(fstream):\n    \n    struct = read_struct(fstream)\n    while struct is not None:\n        yield struct\n        struct = read_struct(fstream)", "docstring": "Read all structs from likwid's file stream.\n\nArgs:\nfstream: Likwid's output file stream.\n\nReturns:\nA generator that can be used to iterate over all structs in the\nfstream.", "source": "juraj-google-style"}
{"code": "def reversals(self, transfer_id, data={}, **kwargs):\n        \n        url = \"{}/{}/reversals\".format(self.base_url, transfer_id)\n        return self.get_url(url, data, **kwargs)", "docstring": "Get all Reversal Transfer from given id\n\nArgs:\ntransfer_id :\nId for which reversal transfer object has to be fetched\n\nReturns:\nTransfer Dict", "source": "juraj-google-style"}
{"code": "def match(self, request):\n    errors = []\n\n    def match(matcher):\n        try:\n            return matcher.match(request)\n        except Exception as err:\n            err = '{}: {}'.format(type(matcher).__name__, err)\n            errors.append(err)\n            return False\n    return (all([match(matcher) for matcher in self]), errors)", "docstring": "Match the given HTTP request instance against the registered\nmatcher functions in the current engine.\n\nArguments:\nrequest (pook.Request): outgoing request to match.\n\nReturns:\ntuple(bool, list[Exception]): ``True`` if all matcher tests\npasses, otherwise ``False``. Also returns an optional list\nof error exceptions.", "source": "codesearchnet"}
{"code": "def new_scope(self, new_scope={}):\n    (old_scopes, self.scopes) = (self.scopes, self.scopes.new_child(new_scope))\n    (yield)\n    self.scopes = old_scopes", "docstring": "Add a new innermost scope for the duration of the with block.\n\nArgs:\nnew_scope (dict-like): The scope to add.", "source": "codesearchnet"}
{"code": "def concatenate(x, other):\n    return type(x)(tf.TensorShape(x).concatenate(other))", "docstring": "Returns the concatenation of the dimension in `x` and `other`.\n\n*Note:* If either `x` or `other` is completely unknown, concatenation will\ndiscard information about the other shape. In future, we might support\nconcatenation that preserves this information for use with slicing.\n\nFor more details, see `help(tf.TensorShape.concatenate)`.\n\nArgs:\nx: object representing a shape; convertible to `tf.TensorShape`.\nother: object representing a shape; convertible to `tf.TensorShape`.\n\nReturns:\nnew_shape: an object like `x` whose elements are the concatenation of the\ndimensions in `x` and `other`.", "source": "codesearchnet"}
{"code": "def __init__(self, devices, group_size, options, collective_keys=None, canonicalize_devices=True):\n    if group_size % len(devices) > 0:\n        raise ValueError('group_size must be divisible by the number of devices.')\n    self._group_size = group_size\n    self._options = options\n    self._collective_keys = collective_keys or cross_device_utils.CollectiveKeys()\n    self._lock = threading.Lock()\n    if canonicalize_devices:\n        self._devices = tuple((device_util.canonicalize(d) for d in devices))\n    else:\n        self._devices = tuple((device_util.canonicalize_without_job_and_task(d) for d in devices))\n    group_key = self._collective_keys.get_group_key(self._devices)\n    self._launchers = []\n    self._limited_nccl = False\n    for device in self._devices:\n        launcher = cross_device_utils.CollectiveReplicaLauncher(group_key, group_size, self._collective_keys, device, options)\n        self._launchers.append(launcher)\n        if not launcher.can_order_nccl():\n            self._limited_nccl = True\n    super(CollectiveAllReduce, self).__init__()\n    self._canonicalize_devices = canonicalize_devices", "docstring": "Initializes the object.\n\nArgs:\ndevices: a list of device strings to run collectives on.\ngroup_size: the global group size. For between-graph replicated training\nit's the total number of devices across all workers.\noptions: a `tf.distribute.experimental.CommunicationOptions`.\ncollective_keys: an optional CollectiveKey object.\ncanonicalize_devices: Whether to canonicalize devices for workers or not.", "source": "github-repos"}
{"code": "def __init__(self, resolver_context):\n    \n    super(VShadowFile, self).__init__(resolver_context)\n    self._file_system = None\n    self._vshadow_store = None", "docstring": "Initializes a file-like object.\n\nArgs:\nresolver_context (Context): resolver context.", "source": "juraj-google-style"}
{"code": "def _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None):\n    input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(input_tensor)\n    if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n        return input_tensor\n    with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value)):\n        if ignore_value is None:\n            if input_tensor.dtype == dtypes.string:\n                ignore_value = ''\n            elif input_tensor.dtype.is_integer:\n                ignore_value = -1\n            else:\n                ignore_value = input_tensor.dtype.as_numpy_dtype()\n        ignore_value = math_ops.cast(ignore_value, input_tensor.dtype, name='ignore_value')\n        indices = array_ops.where(math_ops.not_equal(input_tensor, ignore_value), name='indices')\n        return sparse_tensor_lib.SparseTensor(indices=indices, values=array_ops.gather_nd(input_tensor, indices, name='values'), dense_shape=array_ops.shape(input_tensor, out_type=dtypes.int64, name='dense_shape'))", "docstring": "Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells.\n\nIf `input_tensor` is already a `SparseTensor`, just return it.\n\nArgs:\ninput_tensor: A string or integer `Tensor`.\nignore_value: Entries in `dense_tensor` equal to this value will be absent\nfrom the resulting `SparseTensor`. If `None`, default value of\n`dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`).\n\nReturns:\nA `SparseTensor` with the same shape as `input_tensor`.\n\nRaises:\nValueError: when `input_tensor`'s rank is `None`.", "source": "github-repos"}
{"code": "def configure(self, options):\n    self.client.api.configure_plugin(self.name, options)\n    self.reload()", "docstring": "Update the plugin's settings.\n\nArgs:\noptions (dict): A key-value mapping of options.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "class TFConvNextV2Stage(keras.layers.Layer):\n\n    def __init__(self, config: ConvNextV2Config, in_channels: int, out_channels: int, kernel_size: int=2, stride: int=2, depth: int=2, drop_path_rates: Optional[List[float]]=None, **kwargs):\n        super().__init__(**kwargs)\n        if in_channels != out_channels or stride > 1:\n            self.downsampling_layer = [keras.layers.LayerNormalization(epsilon=1e-06, name='downsampling_layer.0'), keras.layers.Conv2D(filters=out_channels, kernel_size=kernel_size, strides=stride, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name='downsampling_layer.1')]\n        else:\n            self.downsampling_layer = [tf.identity]\n        drop_path_rates = drop_path_rates or [0.0] * depth\n        self.layers = [TFConvNextV2Layer(config, dim=out_channels, drop_path=drop_path_rates[j], name=f'layers.{j}') for j in range(depth)]\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.stride = stride\n\n    def call(self, hidden_states):\n        for layer in self.downsampling_layer:\n            hidden_states = layer(hidden_states)\n        for layer in self.layers:\n            hidden_states = layer(hidden_states)\n        return hidden_states\n\n    def build(self, input_shape=None):\n        if self.built:\n            return\n        self.built = True\n        if getattr(self, 'layers', None) is not None:\n            for layer in self.layers:\n                with tf.name_scope(layer.name):\n                    layer.build(None)\n        if self.in_channels != self.out_channels or self.stride > 1:\n            with tf.name_scope(self.downsampling_layer[0].name):\n                self.downsampling_layer[0].build([None, None, None, self.in_channels])\n            with tf.name_scope(self.downsampling_layer[1].name):\n                self.downsampling_layer[1].build([None, None, None, self.in_channels])", "docstring": "ConvNextV2 stage, consisting of an optional downsampling layer + multiple residual blocks.\n\nArgs:\nconfig (`ConvNextV2V2Config`):\nModel configuration class.\nin_channels (`int`):\nNumber of input channels.\nout_channels (`int`):\nNumber of output channels.\ndepth (`int`):\nNumber of residual blocks.\ndrop_path_rates(`List[float]`):\nStochastic depth rates for each layer.", "source": "github-repos"}
{"code": "def should_record_backprop(tensors):\n    return pywrap_tfe.TFE_Py_TapeSetShouldRecordBackprop(tensors)", "docstring": "Returns true if any tape in the stack watches any of these tensors.\n\nOnly takes GradientTapes into account, not forward accumulators.\n\nArgs:\ntensors: Tensors to check, typically inputs to an operation.\n\nReturns:\nBoolean, whether any tape watches any of `tensors`.", "source": "github-repos"}
{"code": "def in_main_process():\n    return not _running_in_worker", "docstring": "Whether it's in the main test process.\n\nThis is normally used to prepare the test environment which should only happen\nin the main process.\n\nReturns:\nA boolean.", "source": "github-repos"}
{"code": "def render_template(cmd_derived_from_alias, pos_args_table):\n    try:\n        cmd_derived_from_alias = normalize_placeholders(cmd_derived_from_alias, inject_quotes=True)\n        template = jinja.Template(cmd_derived_from_alias)\n        rendered = shlex.split(template.render(pos_args_table))\n        if ('' in rendered):\n            check_runtime_errors(cmd_derived_from_alias, pos_args_table)\n        return rendered\n    except Exception as exception:\n        if isinstance(exception, CLIError):\n            raise\n        split_exception_message = str(exception).split()\n        error_index = split_exception_message[(- 1)]\n        if error_index.isdigit():\n            split_exception_message.insert((- 1), 'index')\n            error_msg = RENDER_TEMPLATE_ERROR.format(' '.join(split_exception_message), cmd_derived_from_alias)\n            error_msg += '\\n{}^'.format((' ' * (((len(error_msg) - len(cmd_derived_from_alias)) + int(error_index)) - 1)))\n        else:\n            exception_str = str(exception).replace('\"{{', '}}').replace('}}\"', '}}')\n            error_msg = RENDER_TEMPLATE_ERROR.format(cmd_derived_from_alias, exception_str)\n        raise CLIError(error_msg)", "docstring": "Render cmd_derived_from_alias as a Jinja template with pos_args_table as the arguments.\n\nArgs:\ncmd_derived_from_alias: The string to be injected with positional arguemnts.\npos_args_table: The dictionary used to rendered.\n\nReturns:\nA processed string with positional arguments injected.", "source": "codesearchnet"}
{"code": "def generate_plaintext_random(plain_vocab, distribution, train_samples, length):\n    if (distribution is not None):\n        assert (len(distribution) == len(plain_vocab))\n    train_indices = np.random.choice(range(len(plain_vocab)), (train_samples, length), p=distribution)\n    return train_indices", "docstring": "Generates samples of text from the provided vocabulary.\n\nArgs:\nplain_vocab: vocabulary.\ndistribution: distribution.\ntrain_samples: samples for training.\nlength: length.\n\nReturns:\ntrain_indices (np.array of Integers): random integers for training.\nshape = [num_samples, length]\ntest_indices (np.array of Integers): random integers for testing.\nshape = [num_samples, length]\nplain_vocab   (list of Integers): unique vocabularies.", "source": "codesearchnet"}
{"code": "def mutant_charts_for_feature(example_protos, feature_name, serving_bundles, viz_params):\n\n    def chart_for_index(index_to_mutate):\n        (mutant_features, mutant_examples) = make_mutant_tuples(example_protos, original_feature, index_to_mutate, viz_params)\n        charts = []\n        for serving_bundle in serving_bundles:\n            inference_result_proto = run_inference(mutant_examples, serving_bundle)\n            charts.append(make_json_formatted_for_single_chart(mutant_features, inference_result_proto, index_to_mutate))\n        return charts\n    try:\n        original_feature = parse_original_feature_from_example(example_protos[0], feature_name)\n    except ValueError as e:\n        return {'chartType': 'categorical', 'data': []}\n    indices_to_mutate = (viz_params.feature_indices or range(original_feature.length))\n    chart_type = ('categorical' if (original_feature.feature_type == 'bytes_list') else 'numeric')\n    try:\n        return {'chartType': chart_type, 'data': [chart_for_index(index_to_mutate) for index_to_mutate in indices_to_mutate]}\n    except IndexError as e:\n        raise common_utils.InvalidUserInputError(e)", "docstring": "Returns JSON formatted for rendering all charts for a feature.\n\nArgs:\nexample_proto: The example protos to mutate.\nfeature_name: The string feature name to mutate.\nserving_bundles: One `ServingBundle` object per model, that contains the\ninformation to make the serving request.\nviz_params: A `VizParams` object that contains the UI state of the request.\n\nRaises:\nInvalidUserInputError if `viz_params.feature_index_pattern` requests out of\nrange indices for `feature_name` within `example_proto`.\n\nReturns:\nA JSON-able dict for rendering a single mutant chart.  parsed in\n`tf-inference-dashboard.html`.\n{\n'chartType': 'numeric', # oneof('numeric', 'categorical')\n'data': [A list of data] # parseable by vz-line-chart or vz-bar-chart\n}", "source": "codesearchnet"}
{"code": "def select_copula(cls, X):\n    frank = Bivariate(CopulaTypes.FRANK)\n    frank.fit(X)\n    if (frank.tau <= 0):\n        selected_theta = frank.theta\n        selected_copula = CopulaTypes.FRANK\n        return (selected_copula, selected_theta)\n    copula_candidates = [frank]\n    theta_candidates = [frank.theta]\n    try:\n        clayton = Bivariate(CopulaTypes.CLAYTON)\n        clayton.fit(X)\n        copula_candidates.append(clayton)\n        theta_candidates.append(clayton.theta)\n    except ValueError:\n        pass\n    try:\n        gumbel = Bivariate(CopulaTypes.GUMBEL)\n        gumbel.fit(X)\n        copula_candidates.append(gumbel)\n        theta_candidates.append(gumbel.theta)\n    except ValueError:\n        pass\n    (z_left, L, z_right, R) = cls.compute_empirical(X)\n    (left_dependence, right_dependence) = cls.get_dependencies(copula_candidates, z_left, z_right)\n    cost_L = [np.sum(((L - l) ** 2)) for l in left_dependence]\n    cost_R = [np.sum(((R - r) ** 2)) for r in right_dependence]\n    cost_LR = np.add(cost_L, cost_R)\n    selected_copula = np.argmax(cost_LR)\n    selected_theta = theta_candidates[selected_copula]\n    return (CopulaTypes(selected_copula), selected_theta)", "docstring": "Select best copula function based on likelihood.\n\nArgs:\nX: 2-dimensional `np.ndarray`\n\nReturns:\ntuple: `tuple(CopulaType, float)` best fit and model param.", "source": "codesearchnet"}
{"code": "def _get_subcommand(name):\n    _LOGGER.debug('Accessing subcommand \"%s\".', name)\n    if (name not in settings.subcommands):\n        raise ValueError('\"{subcommand}\" is not a {command} command. \\'{command} help -a\\' lists all available subcommands.'.format(command=settings.command, subcommand=name))\n    return settings.subcommands[name]", "docstring": "Return the function for the specified subcommand.\n\nArgs:\nname: The name of a subcommand.\n\nReturns:\nThe loadable object from the entry point represented by the subcommand.", "source": "codesearchnet"}
{"code": "def word_to_vector_list(self, word, numeric=False, xsampa=False):\n    if xsampa:\n        word = self.xsampa.convert(word)\n    tensor = list(map(self.segment_to_vector, self.segs(word)))\n    if numeric:\n        return self.tensor_to_numeric(tensor)\n    else:\n        return tensor", "docstring": "Return a list of feature vectors, given a Unicode IPA word.\n\nArgs:\nword (unicode): string in IPA\nnumeric (bool): if True, return features as numeric values instead\nof strings\n\nReturns:\nlist: a list of lists of '+'/'-'/'0' or 1/-1/0", "source": "codesearchnet"}
{"code": "def le(self, other, axis=\"columns\", level=None):\n        \n        return self._binary_op(\"le\", other, axis=axis, level=level)", "docstring": "Checks element-wise that this is less than or equal to other.\n\nArgs:\nother: A DataFrame or Series or scalar to compare to.\naxis: The axis to perform the le over.\nlevel: The Multilevel index level to apply le over.\n\nReturns:\nA new DataFrame filled with Booleans.", "source": "juraj-google-style"}
{"code": "def driver_for_path(path, drivers=None):\n    \n    ext = (os.path.splitext(path)[1][1:] or path).lower()\n    drivers = drivers or ImageDriver.registry if ext else {}\n    for name, meta in drivers.items():\n        if ext == meta.get('DMD_EXTENSION', '').lower():\n            return ImageDriver(name)\n    return None", "docstring": "Returns the gdal.Driver for a path or None based on the file extension.\n\nArguments:\npath -- file path as str with a GDAL supported file extension", "source": "juraj-google-style"}
{"code": "def set_timestamp(cls, filename: str, response: HTTPResponse):\n    last_modified = response.fields.get('Last-Modified')\n    if (not last_modified):\n        return\n    try:\n        last_modified = email.utils.parsedate(last_modified)\n    except ValueError:\n        _logger.exception('Failed to parse date.')\n        return\n    last_modified = time.mktime(last_modified)\n    os.utime(filename, (time.time(), last_modified))", "docstring": "Set the Last-Modified timestamp onto the given file.\n\nArgs:\nfilename: The path of the file\nresponse: Response", "source": "codesearchnet"}
{"code": "def es_json(self, role='rdf_class', remove_empty=True, **kwargs):\n        \n        def test_idx_status(cls_inst, **kwargs):\n            \n            if kwargs.get(\"force\") == True:\n                return False\n            idx_time = cls_inst.get(\"kds_esIndexTime\", [None])[0]\n            mod_time = cls_inst.get(\"dcterm_modified\", [None])[0]\n            error_msg = cls_inst.get(\"kds_esIndexError\", [None])[0]\n            if (not idx_time) or \\\n               error_msg or \\\n               (idx_time and mod_time and idx_time < mod_time):\n               return False\n            return True\n\n        \n        \n        rtn_obj = {}\n        if kwargs.get(\"depth\"):\n            kwargs['depth'] += 1\n        else:\n            kwargs['depth'] = 1\n        if role == 'rdf_class':\n            if test_idx_status(self, **kwargs):\n                return None\n            for prop, value in self.items():\n                if prop in ['kds_esIndexTime', 'kds_esIndexError']:\n                    continue\n                new_val = value.es_json()\n                rtn_method = get_attr(self[prop], 'kds_esObjectType', [])\n                if 'kdr_Array' in rtn_method:\n                    rtn_obj[prop] = new_val\n                elif (remove_empty and new_val) or not remove_empty:\n                    if len(new_val) == 1:\n                        rtn_obj[prop] = new_val[0]\n                    else:\n                        rtn_obj[prop] = new_val\n            nested_props = None\n        else:\n            try:\n                nested_props = self.es_defs.get('kds_esNestedProps',\n                                                list(self.keys())).copy()\n            except AttributeError:\n                nested_props = list(self.keys())\n            for prop, value in self.items():\n                \n                \n                if prop in ['kds_esIndexTime', 'kds_esIndexError']:\n                    continue\n                new_val = value.es_json(**kwargs)\n                rtn_method = get_attr(self[prop], 'kds_esObjectType', [])\n                if 'kdr_Array' in rtn_method:\n                    rtn_obj[prop] = new_val\n                elif (remove_empty and new_val) or not remove_empty:\n                    if len(new_val) == 1:\n                        rtn_obj[prop] = new_val[0] \\\n                                if not isinstance(new_val, dict) \\\n                                else new_val\n                    else:\n                        rtn_obj[prop] = new_val\n        \n        \n\n        rtn_obj = get_es_label(rtn_obj, self)\n        rtn_obj = get_es_value(rtn_obj, self)\n        rtn_obj = get_es_ids(rtn_obj, self)\n        if nested_props:\n            nested_props += ['value', 'id', 'uri']\n            rtn_obj = {key: value\n                       for key, value in rtn_obj.items()\n                       if key in nested_props}\n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        rml_maps = self.get_all_rml(role=role)\n        if rml_maps:\n                rtn_obj['rml_map'] = rml_maps\n        \n        \n        return rtn_obj", "docstring": "Returns a JSON object of the class for insertion into es\n\nargs:\nrole: the role states how the class data should be returned\ndepending upon whether it is used as a subject of an object.\noptions are kds_esNested or rdf_class\nremove_empty: True removes empty items from es object", "source": "juraj-google-style"}
{"code": "def login(self, login_type, **kwargs):\n        \n        content = {\n            \"type\": login_type\n        }\n        for key in kwargs:\n            if kwargs[key]:\n                content[key] = kwargs[key]\n\n        return self._send(\"POST\", \"/login\", content)", "docstring": "Perform /login.\n\nArgs:\nlogin_type (str): The value for the 'type' key.\n**kwargs: Additional key/values to add to the JSON submitted.", "source": "juraj-google-style"}
{"code": "def CrowdsaleRegister(self, wallet, register_addresses, from_addr=None):\n        \n        invoke_args = [self.ScriptHash.ToString(), 'crowdsale_register',\n                       [PromptUtils.parse_param(p, wallet) for p in register_addresses]]\n\n        tx, fee, results, num_ops, engine_success = TestInvokeContract(wallet, invoke_args, None, True, from_addr)\n\n        return tx, fee, results", "docstring": "Register for a crowd sale.\n\nArgs:\nwallet (neo.Wallets.Wallet): a wallet instance.\nregister_addresses (list): list of public addresses to register for the sale.\n\nReturns:\ntuple:\nInvocationTransaction: the transaction.\nint: the transaction fee.\nlist: the neo VM evaluation stack results.", "source": "juraj-google-style"}
{"code": "def get_extended_surface_mesh(self, repeat=(5, 5, 1)):\n        \n        surf_str = Structure.from_sites(self.surface_sites)\n        surf_str.make_supercell(repeat)\n        return surf_str", "docstring": "Gets an extended surface mesh for to use for adsorption\nsite finding by constructing supercell of surface sites\n\nArgs:\nrepeat (3-tuple): repeat for getting extended surface mesh", "source": "juraj-google-style"}
{"code": "def _make_parser_func(sep):\n\n    def parser_func(filepath_or_buffer, sep=sep, delimiter=None, header='infer', names=None, index_col=None, usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skipinitialspace=False, skiprows=None, nrows=None, na_values=None, keep_default_na=True, na_filter=True, verbose=False, skip_blank_lines=True, parse_dates=False, infer_datetime_format=False, keep_date_col=False, date_parser=None, dayfirst=False, iterator=False, chunksize=None, compression='infer', thousands=None, decimal=b'.', lineterminator=None, quotechar='\"', quoting=0, escapechar=None, comment=None, encoding=None, dialect=None, tupleize_cols=None, error_bad_lines=True, warn_bad_lines=True, skipfooter=0, doublequote=True, delim_whitespace=False, low_memory=True, memory_map=False, float_precision=None):\n        (_, _, _, kwargs) = inspect.getargvalues(inspect.currentframe())\n        if (not kwargs.get('sep', sep)):\n            kwargs['sep'] = '\\t'\n        return _read(**kwargs)\n    return parser_func", "docstring": "Creates a parser function from the given sep.\n\nArgs:\nsep: The separator default to use for the parser.\n\nReturns:\nA function object.", "source": "codesearchnet"}
{"code": "def parse(self, text, layers=None):\n    params = {'text': text, 'key': self.key}\n    if (layers is not None):\n        if isinstance(layers, six.string_types):\n            params['layers'] = layers\n        elif isinstance(layers, collections.Iterable):\n            params['layers'] = ','.join(layers)\n    req = requests.get(self.NLU_URL, params=params)\n    return req.json()", "docstring": "Parsing passed text to json.\n\nArgs:\ntext: Text to parse.\nlayers (optional): Special fields. Only one string\nor iterable object (e.g \"Data\", (\"Data\", \"Fio\")).\nOnly these fields will be returned.\n\n\nReturns:\nThe parsed text into a json object.", "source": "codesearchnet"}
{"code": "def get_review(review_struct):\n    review_fn = _resource_context('review.rst')\n    with open(review_fn) as f:\n        review = f.read()\n    with NamedTemporaryFile(suffix='.png') as qr_file:\n        url = pyqrcode.create(review_struct.internal_url)\n        url.png(qr_file.name, scale=5)\n        qr_file.flush()\n        qr_file.seek(0)\n        review = Template(review).substitute(content=review_struct.get_rst(), datum=time.strftime('%d.%m.%Y', time.localtime()), cas=time.strftime('%H:%M', time.localtime()), resources_path=RES_PATH, qr_path=qr_file.name)\n        return gen_pdf(review, open(_resource_context('review_style.json')).read())", "docstring": "Generate review from `review_struct`.\n\nArgs:\nreview_struct (obj): :class:`.GenerateReview` instance.\n\nReturns:\nobj: StringIO file instance containing PDF file.", "source": "codesearchnet"}
{"code": "def produce(self, X):\n        \n\n        signal = X\n\n        window_length = len(self.window)\n        anomalies = np.zeros(len(signal))\n        window_weight = sum(self.window)\n        for i in range(0, len(signal) - window_length - 1):\n            rfft = np.fft.rfft(signal[i:i + window_length] * self.window)\n            sig_freq = np.abs(rfft) / window_weight\n            anomalies[i] = 0\n            for m in range(0, int(window_length / 2) - 1):\n                if ((sig_freq[m] > self.mask_top[m]) or (sig_freq[m] < self.mask_bottom[m])):\n                    anomalies[i] = 1\n                    break\n\n        return anomalies", "docstring": "Detects anomalies in telemetry data based on its power spectral density\n\nArgs:\nX: Telemetry data\n\nReturns:\nanomalies: Data vector consisting of the anomalies detected in the telemetry data", "source": "juraj-google-style"}
{"code": "def align_long_axis(self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n    input_height, input_width = get_image_size(image, channel_dim=input_data_format)\n    output_height, output_width = (size['height'], size['width'])\n    if input_data_format is None:\n        input_data_format = infer_channel_dimension_format(image)\n    if input_data_format == ChannelDimension.LAST:\n        rot_axes = (0, 1)\n    elif input_data_format == ChannelDimension.FIRST:\n        rot_axes = (1, 2)\n    else:\n        raise ValueError(f'Unsupported data format: {input_data_format}')\n    if output_width < output_height and input_width > input_height or (output_width > output_height and input_width < input_height):\n        image = np.rot90(image, 3, axes=rot_axes)\n    if data_format is not None:\n        image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)\n    return image", "docstring": "Align the long axis of the image to the longest axis of the specified size.\n\nArgs:\nimage (`np.ndarray`):\nThe image to be aligned.\nsize (`Dict[str, int]`):\nThe size `{\"height\": h, \"width\": w}` to align the long axis to.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe data format of the output image. If unset, the same format as the input image is used.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.\n\nReturns:\n`np.ndarray`: The aligned image.", "source": "github-repos"}
{"code": "def get_class_weights(y, smooth_factor=0):\n    \n\n    from collections import Counter\n    counter = Counter(y)\n\n    if smooth_factor > 0:\n        p = max(counter.values()) * smooth_factor\n        for k in counter.keys():\n            counter[k] += p\n\n    majority = max(counter.values())\n\n    return {cls: float(majority / count) for cls, count in counter.items()}", "docstring": "Returns the weights for each class based on the frequencies of the samples.\n\nArgs:\ny: A list of true labels (the labels must be hashable).\nsmooth_factor: A factor that smooths extremely uneven weights.\n\nReturns:\nA dictionary with the weight for each class.", "source": "juraj-google-style"}
{"code": "def send_peers(self, connection_id):\n    with self._lock:\n        peer_endpoints = list(self._peers.values())\n        if self._endpoint:\n            peer_endpoints.append(self._endpoint)\n        peers_response = GetPeersResponse(peer_endpoints=peer_endpoints)\n        try:\n            self._network.send(validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE, peers_response.SerializeToString(), connection_id, one_way=True)\n        except ValueError:\n            LOGGER.debug('Connection disconnected: %s', connection_id)", "docstring": "Sends a message containing our peers to the\nconnection identified by connection_id.\n\nArgs:\nconnection_id (str): A unique identifier which identifies an\nconnection on the network server socket.", "source": "codesearchnet"}
{"code": "def __init__(self, url):\n    \n    self._url = url\n    self._last_progress_msg_print_time = time.time()\n    self._total_bytes_downloaded = 0\n    self._max_prog_str = 0", "docstring": "Creates DownloadManager responsible for downloading a TF-Hub module.\n\nArgs:\nurl: URL pointing to the TF-Hub module to download and extract.", "source": "juraj-google-style"}
{"code": "def hstack(xs):\n    if any_symbolic_tensors((xs,)):\n        return Hstack().symbolic_call(xs)\n    return backend.numpy.hstack(xs)", "docstring": "Stack tensors in sequence horizontally (column wise).\n\nThis is equivalent to concatenation along the first axis for 1-D tensors,\nand along the second axis for all other tensors.\n\nArgs:\nxs: Sequence of tensors.\n\nReturns:\nThe tensor formed by stacking the given tensors.", "source": "github-repos"}
{"code": "def ParseCallsRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    call_type = self._GetRowValue(query_hash, row, 'type')\n    call_type = self.CALL_TYPE.get(call_type, 'UNKNOWN')\n    duration = self._GetRowValue(query_hash, row, 'duration')\n    timestamp = self._GetRowValue(query_hash, row, 'date')\n    event_data = AndroidCallEventData()\n    event_data.call_type = call_type\n    event_data.duration = self._GetRowValue(query_hash, row, 'duration')\n    event_data.name = self._GetRowValue(query_hash, row, 'name')\n    event_data.number = self._GetRowValue(query_hash, row, 'number')\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, 'Call Started')\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n    if duration:\n        if isinstance(duration, py2to3.STRING_TYPES):\n            try:\n                duration = int(duration, 10)\n            except ValueError:\n                duration = 0\n        timestamp += (duration * 1000)\n        date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, 'Call Ended')\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a Call record row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "codesearchnet"}
{"code": "def expand_abbreviations(self, text):\n    if (not self.abbreviations):\n        raise LexiconError('No abbreviations in lexicon.')\n\n    def chunks(data, SIZE=25):\n        '\\n            Regex only supports 100 groups for munging callbacks. So we have to\\n            chunk the abbreviation dicitonary.\\n            '\n        it = iter(data)\n        for i in range(0, len(data), SIZE):\n            (yield {k: data[k] for k in islice(it, SIZE)})\n\n    def cb(g):\n        'Regex callback'\n        return (self.abbreviations.get(g.group(0)) or g.group(0))\n    text = re.sub('w/', 'wi', text)\n    for subdict in chunks(self.abbreviations):\n        regex = (('(\\\\b' + '\\\\b)|(\\\\b'.join(subdict.keys())) + '\\\\b)')\n        text = re.sub(regex, cb, text)\n    return text", "docstring": "Parse a piece of text and replace any abbreviations with their full\nword equivalents. Uses the lexicon.abbreviations dictionary to find\nabbreviations.\n\nArgs:\ntext (str): The text to parse.\n\nReturns:\nstr: The text with abbreviations replaced.", "source": "codesearchnet"}
{"code": "def range(self, location, distance):\n    return (segment.range(location, distance) for segment in self)", "docstring": "Test whether locations are within a given range of ``location``.\n\nArgs:\nlocation (Point): Location to test range against\ndistance (float): Distance to test location is within\n\nReturns:\nlist of list of Point: Groups of points in range per segment", "source": "codesearchnet"}
{"code": "def server_def(self):\n    return self._server_def", "docstring": "Returns the `tf.train.ServerDef` for this server.\n\nReturns:\nA `tf.train.ServerDef` protocol buffer that describes the configuration\nof this server.", "source": "github-repos"}
{"code": "def take_profit(self, accountID, **kwargs):\n    return self.create(accountID, order=TakeProfitOrderRequest(**kwargs))", "docstring": "Shortcut to create a Take Profit Order in an Account\n\nArgs:\naccountID : The ID of the Account\nkwargs : The arguments to create a TakeProfitOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "codesearchnet"}
{"code": "def convert_shapes(input_shape, to_tuples=True):\n\n    def _is_shape_component(value):\n        return value is None or isinstance(value, (int, tensor_shape.Dimension))\n\n    def _is_atomic_shape(input_shape):\n        if _is_shape_component(input_shape):\n            return True\n        if isinstance(input_shape, tensor_shape.TensorShape):\n            return True\n        if isinstance(input_shape, (tuple, list)) and all((_is_shape_component(ele) for ele in input_shape)):\n            return True\n        return False\n\n    def _convert_shape(input_shape):\n        input_shape = tensor_shape.TensorShape(input_shape)\n        if to_tuples:\n            input_shape = tuple(input_shape.as_list())\n        return input_shape\n    return map_structure_with_atomic(_is_atomic_shape, _convert_shape, input_shape)", "docstring": "Converts nested shape representations to desired format.\n\nPerforms:\n\nTensorShapes -> tuples if `to_tuples=True`.\ntuples of int or None -> TensorShapes if `to_tuples=False`.\n\nValid objects to be converted are:\n- TensorShapes\n- tuples with elements of type int or None.\n- ints\n- None\n\nArgs:\ninput_shape: A nested structure of objects to be converted to TensorShapes.\nto_tuples: If `True`, converts all TensorShape to tuples. Otherwise converts\nall tuples representing shapes to TensorShapes.\n\nReturns:\nNested structure of shapes in desired format.\n\nRaises:\nValueError: when the input tensor shape can't be converted to tuples, eg\nunknown tensor shape.", "source": "github-repos"}
{"code": "def date2datestr(date, fmt='yyyymmdd'):\n    \n    if '-' in fmt:\n        if not fmt.index('d') < fmt.index('m') < fmt.index('y'):\n            raise ValueError('Invalid format string. {}'.format(\n                    VALID_DATE_FORMATS_TEXT))\n        d, m, y = fmt.split('-')\n    elif '/' in fmt:\n        if not fmt.index('m') < fmt.index('d') < fmt.index('y'):\n            raise ValueError('Invalid format string. {}'.format(\n                    VALID_DATE_FORMATS_TEXT))\n        m, d, y = fmt.split('/')\n    elif any(c not in 'dmy' for c in fmt):\n        raise ValueError('Invalid character in format string. {}'.format(\n                VALID_DATE_FORMATS_TEXT))\n    else:\n        if not fmt.index('y') < fmt.index('m') < fmt.index('d'):\n            raise ValueError('Invalid format string. {}'.format(\n                    VALID_DATE_FORMATS_TEXT))\n        y, m, d = fmt[:-4], fmt[-4:-2], fmt[-2:]\n    for string, char in ((d, 'd'), (m, 'm'), (y, 'y')):\n        if any(c != char for c in string):\n            raise ValueError('Invalid date format: {} is not {}'.\\\n                    format(char, string))\n    if len(y) == 4:\n        fmt = fmt.replace('yyyy', '%Y', 1)\n    elif len(y) == 2:\n        fmt = fmt.replace('yy', '%y', 1)\n    else:\n        raise ValueError('Invalid format string, year must have 2 or 4 digits')\n    if len(m) == 2:\n        fmt = fmt.replace('mm', '%m', 1)\n    elif len(m) == 1:\n        fmt = fmt.replace('m', 'X%m', 1)\n    else:\n        raise ValueError('Invalid format string, month must have 1 or 2 digits')\n    if len(d) == 2:\n        fmt = fmt.replace('dd', '%d', 1)\n    elif len(d) == 1:\n        fmt = fmt.replace('d', 'X%d', 1)\n    else:\n        raise ValueError('Invalid format string, day must have 1 or 2 digits')\n    return date.strftime(fmt).replace('X0','X').replace('X','')", "docstring": "Turns a datetime.date object into a string. The string must have one of the\nformats from VALID_DATE_FORMATS_TEXT to make it compatible with\ndatestr2date.\n\nArgs:\ndate (datetime.date) the date to be translated\nfmt (str) a format string.\nReturns:\n(str) that represents a date.\nRaises:\nValueError if the format is not valid.", "source": "juraj-google-style"}
{"code": "def dump_begin(self, selector_id):\n        \n\n        if self.dump_walker is not None:\n            self.storage.destroy_walker(self.dump_walker)\n\n        selector = DataStreamSelector.FromEncoded(selector_id)\n        self.dump_walker = self.storage.create_walker(selector, skip_all=False)\n\n        return Error.NO_ERROR, Error.NO_ERROR, self.dump_walker.count()", "docstring": "Start dumping a stream.\n\nArgs:\nselector_id (int): The buffered stream we want to dump.\n\nReturns:\n(int, int, int): Error code, second error code, number of available readings", "source": "juraj-google-style"}
{"code": "def remove_team_member(self, account_id=None, email_address=None):\n        \n        return self._add_remove_team_member(self.TEAM_REMOVE_MEMBER_URL, email_address, account_id)", "docstring": "Remove a user from your Team\n\nArgs:\n\naccount_id (str):       The id of the account of the user to remove from your team.\n\nemail_address (str):    The email address of the account to remove from your team. The account id prevails if both account_id and email_address are provided.\n\nReturns:\nA Team object", "source": "juraj-google-style"}
{"code": "def resolve_path(path, config_file):\n    if os.path.isabs(path):\n        return path\n    return os.path.relpath(path, os.path.dirname(config_file))", "docstring": "Resolve path relative to config file location.\n\nArgs:\npath: Path to be resolved.\nconfig_file: Path to config file, which `path` is specified\nrelative to.\n\nReturns:\nPath relative to the `config_file` location. If `path` is an\nabsolute path then it will be returned without change.", "source": "codesearchnet"}
{"code": "def gaussian_noise(x, severity=1):\n  \n  c = [.08, .12, 0.18, 0.26, 0.38][severity - 1]\n  x = np.array(x) / 255.\n  x_clip = np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255\n  return around_and_astype(x_clip)", "docstring": "Gaussian noise corruption to images.\n\nArgs:\nx: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\nseverity: integer, severity of corruption.\n\nReturns:\nnumpy array, image with uint8 pixels in [0,255]. Added Gaussian noise.", "source": "juraj-google-style"}
{"code": "def get_crystal_field_spin(self, coordination: str='oct', spin_config: str='high'):\n    if ((coordination not in ('oct', 'tet')) or (spin_config not in ('high', 'low'))):\n        raise ValueError('Invalid coordination or spin config.')\n    elec = self.full_electronic_structure\n    if ((len(elec) < 4) or (elec[(- 1)][1] != 's') or (elec[(- 2)][1] != 'd')):\n        raise AttributeError('Invalid element {} for crystal field calculation.'.format(self.symbol))\n    nelectrons = ((elec[(- 1)][2] + elec[(- 2)][2]) - self.oxi_state)\n    if ((nelectrons < 0) or (nelectrons > 10)):\n        raise AttributeError('Invalid oxidation state {} for element {}'.format(self.oxi_state, self.symbol))\n    if (spin_config == 'high'):\n        return (nelectrons if (nelectrons <= 5) else (10 - nelectrons))\n    elif (spin_config == 'low'):\n        if (coordination == 'oct'):\n            if (nelectrons <= 3):\n                return nelectrons\n            elif (nelectrons <= 6):\n                return (6 - nelectrons)\n            elif (nelectrons <= 8):\n                return (nelectrons - 6)\n            else:\n                return (10 - nelectrons)\n        elif (coordination == 'tet'):\n            if (nelectrons <= 2):\n                return nelectrons\n            elif (nelectrons <= 4):\n                return (4 - nelectrons)\n            elif (nelectrons <= 7):\n                return (nelectrons - 4)\n            else:\n                return (10 - nelectrons)", "docstring": "Calculate the crystal field spin based on coordination and spin\nconfiguration. Only works for transition metal species.\n\nArgs:\ncoordination (str): Only oct and tet are supported at the moment.\nspin_config (str): Supported keywords are \"high\" or \"low\".\n\nReturns:\nCrystal field spin in Bohr magneton.\n\nRaises:\nAttributeError if species is not a valid transition metal or has\nan invalid oxidation state.\nValueError if invalid coordination or spin_config.", "source": "codesearchnet"}
{"code": "def __init__(self, options=None, **kwargs):\n        \n        try:\n            env = MeCabEnv(**kwargs)\n            self.__ffi = _ffi_libmecab()\n            self.__mecab = self.__ffi.dlopen(env.libpath)\n            self.libpath = env.libpath\n\n            \n            self.__bytes2str, self.__str2bytes = string_support(env.charset)\n\n            \n            self.__split_pattern, self.__split_features = splitter_support(env.charset)\n\n            \n            op = OptionParse(env.charset)\n            self.options = op.parse_mecab_options(options)\n\n            \n            ostr = op.build_options_str(self.options)\n\n            self.model = self.__mecab.mecab_model_new2(ostr)\n            if self.model == self.__ffi.NULL:\n                logger.error(self._ERROR_NULLPTR.format('Model'))\n                raise MeCabError(self._ERROR_NULLPTR.format('Model'))\n\n            self.tagger = self.__mecab.mecab_model_new_tagger(self.model)\n            if self.tagger == self.__ffi.NULL:\n                logger.error(self._ERROR_NULLPTR.format('Tagger'))\n                raise MeCabError(self._ERROR_NULLPTR.format('Tagger'))\n\n            self.lattice = self.__mecab.mecab_model_new_lattice(self.model)\n            if self.lattice == self.__ffi.NULL:\n                logger.error(self._ERROR_NULLPTR.format('Lattice'))\n                raise MeCabError(self._ERROR_NULLPTR.format('Lattice'))\n\n            n = self.options.get('nbest', 1)\n            if n > 1:\n                req_type = self.MECAB_LATTICE_NBEST\n            else:\n                req_type = self.MECAB_LATTICE_ONE_BEST\n            self.__mecab.mecab_lattice_set_request_type(self.lattice, req_type)\n\n            if 'partial' in self.options:\n                self.__mecab.mecab_lattice_add_request_type(\n                    self.lattice, self.MECAB_LATTICE_PARTIAL)\n\n            if 'marginal' in self.options:\n                self.__mecab.mecab_lattice_add_request_type(\n                    self.lattice, self.MECAB_LATTICE_MARGINAL_PROB)\n\n            if 'all_morphs' in self.options:\n                \n                self.__mecab.mecab_lattice_add_request_type(\n                    self.lattice, self.MECAB_LATTICE_ALL_MORPHS)\n\n            if 'allocate_sentence' in self.options:\n                self.__mecab.mecab_lattice_add_request_type(\n                    self.lattice, self.MECAB_LATTICE_ALLOCATE_SENTENCE)\n\n            \n            self.dicts = []\n            dptr = self.__mecab.mecab_model_dictionary_info(self.model)\n            while dptr != self.__ffi.NULL:\n                fpath = self.__bytes2str(self.__ffi.string(dptr.filename))\n                fpath = os.path.abspath(fpath)\n                chset = self.__bytes2str(self.__ffi.string(dptr.charset))\n                self.dicts.append(DictionaryInfo(dptr, fpath, chset))\n                dptr = getattr(dptr, 'next')\n\n            \n            self.__enc = self.dicts[0].charset\n\n            \n            self.version = self.__bytes2str(\n                self.__ffi.string(self.__mecab.mecab_version()))\n        except EnvironmentError as err:\n            logger.error(self._ERROR_INIT.format(str(err)))\n            raise MeCabError(err)\n        except ValueError as verr:\n            logger.error(self._ERROR_INIT.format(str(verr)))\n            raise MeCabError(self._ERROR_INIT.format(str(verr)))", "docstring": "Initializes the MeCab instance with the given options.\n\nArgs:\noptions: Optional string or dictionary of the MeCab options to be\nused.\nKwargs:\ndebug (bool): Flag for outputting debug messages to stderr.\n\nRaises:\nSystemExit: An unrecognized option was passed in.\nMeCabError: An error occurred in locating the MeCab library;\nor the FFI handle to MeCab could not be created.", "source": "juraj-google-style"}
{"code": "def run_commands(commands, settings):\n    \n    sprint = settings[\"sprint\"]\n    quiet = settings[\"quiet\"]\n    error = settings[\"error\"]\n    enhanced_errors = True\n    the_shell = None\n    if settings[\"no_enhanced_errors\"]:\n        enhanced_errors = False\n    if \"shell\" in settings:\n        the_shell = settings[\"shell\"]\n    windows_p = sys.platform == \"win32\"\n\n    STDOUT = None\n    STDERR = None\n    if quiet:\n        STDOUT = PIPE\n        STDERR = PIPE\n\n    commands = commands.rstrip()\n    sprint(\"About to run commands '{}'\".format(commands), level=\"verbose\")\n    if not quiet:\n        sprint(commands)\n\n    if the_shell:\n        tmp = shlex.split(the_shell)\n        the_shell = tmp[0]\n        tmp = tmp[1:]\n        if enhanced_errors and not windows_p:\n            tmp.append(\"-e\")\n        tmp.append(commands)\n        commands = tmp\n    else:\n        if enhanced_errors and not windows_p:\n            commands = [\"-e\", commands]\n\n    p = Popen(commands, shell=True, stdout=STDOUT, stderr=STDERR,\n              executable=the_shell)\n    out, err = p.communicate()\n    if p.returncode:\n        if quiet:\n            error(err.decode(locale.getpreferredencoding()))\n        error(\"Command failed to run\")\n        sys.exit(1)", "docstring": "Runs the commands supplied as an argument\nIt will exit the program if the commands return a\nnon-zero code\n\nArgs:\nthe commands to run\nThe settings dictionary", "source": "juraj-google-style"}
{"code": "def __init__(self, metric_name, metric_methods, label_length, *args):\n    self._metric_name = metric_name\n    self._metric_methods = metric_methods\n    self._label_length = label_length\n    if label_length >= len(self._metric_methods):\n        raise ValueError('Cannot create {} metric with label >= {}'.format(self._metric_name, len(self._metric_methods)))\n    self._metric = self._metric_methods[self._label_length].create(*args)", "docstring": "Creates a new metric.\n\nArgs:\nmetric_name: name of the metric class.\nmetric_methods: list of swig metric methods.\nlabel_length: length of label args.\n*args: the arguments to call create method.", "source": "github-repos"}
{"code": "def _ParseTriggerStartTime(self, parser_mediator, trigger):\n    \n    time_elements_tuple = (\n        trigger.start_date.year, trigger.start_date.month,\n        trigger.start_date.day_of_month, trigger.start_time.hours,\n        trigger.start_time.minutes, 0)\n\n    date_time = None\n    if time_elements_tuple != (0, 0, 0, 0, 0, 0):\n      try:\n        date_time = dfdatetime_time_elements.TimeElements(\n            time_elements_tuple=time_elements_tuple)\n        date_time.is_local_time = True\n        \n        date_time._precision = dfdatetime_definitions.PRECISION_1_MINUTE  \n      except ValueError:\n        parser_mediator.ProduceExtractionWarning(\n            'invalid trigger start time: {0!s}'.format(time_elements_tuple))\n\n    return date_time", "docstring": "Parses the start time from a trigger.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ntrigger (job_trigger): a trigger.\n\nReturns:\ndfdatetime.DateTimeValues: last run date and time or None if not\navailable.", "source": "juraj-google-style"}
{"code": "def put(self, credentials):\n        \n        self.acquire_lock()\n        try:\n            self.locked_put(credentials)\n        finally:\n            self.release_lock()", "docstring": "Write a credential.\n\nThe Storage lock must be held when this is called.\n\nArgs:\ncredentials: Credentials, the credentials to store.", "source": "juraj-google-style"}
{"code": "def observe(self, success, failure):\n        \n        if isinstance(success, int) is False:\n            if isinstance(success, float) is False:\n                raise TypeError()\n        if isinstance(failure, int) is False:\n            if isinstance(failure, float) is False:\n                raise TypeError()\n\n        if success <= 0:\n            raise ValueError()\n        if failure <= 0:\n            raise ValueError()\n\n        self.__success += success\n        self.__failure += failure", "docstring": "Observation data.\n\nArgs:\nsuccess:      The number of success.\nfailure:      The number of failure.", "source": "juraj-google-style"}
{"code": "def save_image(imager, grid_data, grid_norm, output_file):\n    \n    \n    imager.finalise_plane(grid_data, grid_norm)\n    grid_data = numpy.real(grid_data)\n\n    \n    border = (imager.plane_size - imager.image_size) \n    if border > 0:\n        end = border + imager.image_size\n        grid_data = grid_data[border:end, border:end]\n\n    \n    hdr = fits.header.Header()\n    fits.writeto(output_file, grid_data, hdr, clobber=True)", "docstring": "Makes an image from gridded visibilities and saves it to a FITS file.\n\nArgs:\nimager (oskar.Imager):          Handle to configured imager.\ngrid_data (numpy.ndarray):      Final visibility grid.\ngrid_norm (float):              Grid normalisation to apply.\noutput_file (str):              Name of output FITS file to write.", "source": "juraj-google-style"}
{"code": "def start(host, port, profiler_stats, dont_start_browser, debug_mode):\n    stats_handler = functools.partial(StatsHandler, profiler_stats)\n    if (not debug_mode):\n        sys.stderr = open(os.devnull, 'w')\n    print('Starting HTTP server...')\n    if (not dont_start_browser):\n        webbrowser.open('http:\n    try:\n        StatsServer((host, port), stats_handler).serve_forever()\n    except KeyboardInterrupt:\n        print('Stopping...')\n        sys.exit(0)", "docstring": "Starts HTTP server with specified parameters.\n\nArgs:\nhost: Server host name.\nport: Server port.\nprofiler_stats: A dict with collected program stats.\ndont_start_browser: Whether to open browser after profiling.\ndebug_mode: Whether to redirect stderr to /dev/null.", "source": "codesearchnet"}
{"code": "def get_current_remat_mode():\n    remat_scope_stack = global_state.get_global_attribute('remat_scope_stack')\n    if not remat_scope_stack:\n        return None\n    active_scope = remat_scope_stack[-1]\n    return RematMode(active_scope.mode, active_scope.output_size_threshold, active_scope.layer_names)", "docstring": "Get the current rematerialization mode and associated settings.\n\nReturns:\nRematMode or None: The current rematerialization mode, or None if not\nset.", "source": "github-repos"}
{"code": "def _ExpandUsersVariablePathSegments(\n      cls, path_segments, path_separator, user_accounts):\n    \n    if not path_segments:\n      return []\n\n    path_segments_lower = [\n        path_segment.lower() for path_segment in path_segments]\n\n    if path_segments_lower[0] in ('%%users.homedir%%', '%%users.userprofile%%'):\n      return cls._ExpandUsersHomeDirectoryPathSegments(\n          path_segments, path_separator, user_accounts)\n\n    path_expansions = cls._PATH_EXPANSIONS_PER_USERS_VARIABLE.get(\n        path_segments[0], None)\n\n    if path_expansions:\n      expanded_paths = []\n\n      for path_expansion in path_expansions:\n        expanded_path_segments = list(path_expansion)\n        expanded_path_segments.extend(path_segments[1:])\n\n        paths = cls._ExpandUsersVariablePathSegments(\n            expanded_path_segments, path_separator, user_accounts)\n        expanded_paths.extend(paths)\n\n      return expanded_paths\n\n    if cls._IsWindowsDrivePathSegment(path_segments[0]):\n      path_segments[0] = ''\n\n    \n    path = path_separator.join(path_segments)\n    return [path]", "docstring": "Expands path segments with a users variable, e.g. %%users.homedir%%.\n\nArgs:\npath_segments (list[str]): path segments.\npath_separator (str): path segment separator.\nuser_accounts (list[UserAccountArtifact]): user accounts.\n\nReturns:\nlist[str]: paths for which the users variables have been expanded.", "source": "juraj-google-style"}
{"code": "def send(self, conn):\n    if (conn is None):\n        raise ValueError('Cannot send to connection None')\n    with (yield conn.write_lock.acquire()):\n        sent = 0\n        (yield conn.write_message(self.header_json, locked=False))\n        sent += len(self.header_json)\n        (yield conn.write_message(self.metadata_json, locked=False))\n        sent += len(self.metadata_json)\n        (yield conn.write_message(self.content_json, locked=False))\n        sent += len(self.content_json)\n        sent += (yield self.write_buffers(conn, locked=False))\n        raise gen.Return(sent)", "docstring": "Send the message on the given connection.\n\nArgs:\nconn (WebSocketHandler) : a WebSocketHandler to send messages\n\nReturns:\nint : number of bytes sent", "source": "codesearchnet"}
{"code": "def process_buffers_for_display(s, limit=40):\n    if isinstance(s, (list, tuple)):\n        return [process_buffers_for_display(elem, limit=limit) for elem in s]\n    else:\n        length = len(s)\n        if (length > limit):\n            return (binascii.b2a_qp(s[:limit]) + (b' (length-%d truncated at %d bytes)' % (length, limit)))\n        else:\n            return binascii.b2a_qp(s)", "docstring": "Process a buffer for human-readable display.\n\nThis function performs the following operation on each of the buffers in `s`.\n1. Truncate input buffer if the length of the buffer is greater than\n`limit`, to prevent large strings from overloading the frontend.\n2. Apply `binascii.b2a_qp` on the truncated buffer to make the buffer\nprintable and convertible to JSON.\n3. If truncation happened (in step 1), append a string at the end\ndescribing the original length and the truncation.\n\nArgs:\ns: The buffer to be processed, either a single buffer or a nested array of\nthem.\nlimit: Length limit for each buffer, beyond which truncation will occur.\n\nReturn:\nA single processed buffer or a nested array of processed buffers.", "source": "codesearchnet"}
{"code": "def get_default(__func: Callable, __arg: str) -> str:\n    return signature(__func).parameters[__arg].default", "docstring": "Fetch default value for a function argument\n\nArgs:\n__func: Function to inspect\n__arg: Argument to extract default value for", "source": "codesearchnet"}
{"code": "def orient_undirected_graph(self, data, graph, **kwargs):\n        \n        \n        self.arguments['{CITEST}'] = self.dir_CI_test[self.CI_test]\n        self.arguments['{METHOD_INDEP}'] = self.dir_method_indep[self.method_indep]\n        self.arguments['{DIRECTED}'] = 'TRUE'\n        self.arguments['{ALPHA}'] = str(self.alpha)\n        self.arguments['{NJOBS}'] = str(self.nb_jobs)\n        self.arguments['{VERBOSE}'] = str(self.verbose).upper()\n\n        fe = DataFrame(nx.adj_matrix(graph, weight=None).todense())\n        fg = DataFrame(1 - fe.values)\n\n        results = self._run_pc(data, fixedEdges=fe, fixedGaps=fg, verbose=self.verbose)\n\n        return nx.relabel_nodes(nx.DiGraph(results),\n                                {idx: i for idx, i in enumerate(data.columns)})", "docstring": "Run PC on an undirected graph.\n\nArgs:\ndata (pandas.DataFrame): DataFrame containing the data\ngraph (networkx.Graph): Skeleton of the graph to orient\n\nReturns:\nnetworkx.DiGraph: Solution given by PC on the given skeleton.", "source": "juraj-google-style"}
{"code": "def symbolic_master_equation(self, rho=None):\n    (L, H) = (self.L, self.H)\n    if (rho is None):\n        rho = OperatorSymbol('rho', hs=self.space)\n    return (((- I) * ((H * rho) - (rho * H))) + sum(((((Lk * rho) * adjoint(Lk)) - ((((adjoint(Lk) * Lk) * rho) + ((rho * adjoint(Lk)) * Lk)) / 2)) for Lk in L.matrix.ravel())))", "docstring": "Compute the symbolic Liouvillian acting on a state rho\n\nIf no rho is given, an OperatorSymbol is created in its place.\nThis correspnds to the RHS of the master equation\nin which an average is taken over the external noise degrees of\nfreedom.\n\nArgs:\nrho (Operator): A symbolic density matrix operator\n\nReturns:\nOperator: The RHS of the master equation.", "source": "codesearchnet"}
{"code": "def GetFileEntryByPathSpec(self, path_spec):\n    \n    return bde_file_entry.BDEFileEntry(\n        self._resolver_context, self, path_spec, is_root=True, is_virtual=True)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nBDEFileEntry: file entry or None.", "source": "juraj-google-style"}
{"code": "def show_abierrors(self, nids=None, stream=sys.stdout):\n    lines = []\n    app = lines.append\n    for task in self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids):\n        header = (('=== ' + task.qout_file.path) + '===')\n        app(header)\n        report = task.get_event_report()\n        if (report is not None):\n            app(('num_errors: %s, num_warnings: %s, num_comments: %s' % (report.num_errors, report.num_warnings, report.num_comments)))\n            app('*** ERRORS ***')\n            app('\\n'.join((str(e) for e in report.errors)))\n            app('*** BUGS ***')\n            app('\\n'.join((str(b) for b in report.bugs)))\n        else:\n            app('get_envent_report returned None!')\n        app((('=' * len(header)) + (2 * '\\n')))\n    return stream.writelines(lines)", "docstring": "Write to the given stream the list of ABINIT errors for all tasks whose status is S_ABICRITICAL.\n\nArgs:\nnids: optional list of node identifiers used to filter the tasks.\nstream: File-like object. Default: sys.stdout", "source": "codesearchnet"}
{"code": "def remove_api_key(self):\n    url = (self.record_url + '/remove_api_key')\n    res = requests.patch(url=url, headers=HEADERS, verify=False)\n    res.raise_for_status()\n    self.api_key = ''", "docstring": "Removes the user's existing API key, if present, and sets the current instance's 'api_key'\nattribute to the empty string.\n\nReturns:\n`NoneType`: None.", "source": "codesearchnet"}
{"code": "def dbname(self, value):\n        \n        self._dbname = value\n        self._connectionXML.set('dbname', value)", "docstring": "Set the connection's database name property.\n\nArgs:\nvalue:  New name of the database. String.\n\nReturns:\nNothing.", "source": "juraj-google-style"}
{"code": "def add_layout(self, obj, place='center'):\n        \n        valid_places = ['left', 'right', 'above', 'below', 'center']\n        if place not in valid_places:\n            raise ValueError(\n                \"Invalid place '%s' specified. Valid place values are: %s\" % (place, nice_join(valid_places))\n            )\n\n        getattr(self, place).append(obj)", "docstring": "Adds an object to the plot in a specified place.\n\nArgs:\nobj (Renderer) : the object to add to the Plot\nplace (str, optional) : where to add the object (default: 'center')\nValid places are: 'left', 'right', 'above', 'below', 'center'.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def run_shell_command(state, host, command, get_pty=False, timeout=None, print_output=False, **command_kwargs):\n    command = make_command(command, **command_kwargs)\n    logger.debug('Running command on {0}: (pty={1}) {2}'.format(host.name, get_pty, command))\n    if print_output:\n        print('{0}>>> {1}'.format(host.print_prefix, command))\n    (_, stdout_buffer, stderr_buffer) = host.connection.exec_command(command, get_pty=get_pty)\n    channel = stdout_buffer.channel\n    stdout_reader = gevent.spawn(read_buffer, stdout_buffer, print_output=print_output, print_func=(lambda line: '{0}{1}'.format(host.print_prefix, line)))\n    stderr_reader = gevent.spawn(read_buffer, stderr_buffer, print_output=print_output, print_func=(lambda line: '{0}{1}'.format(host.print_prefix, click.style(line, 'red'))))\n    greenlets = gevent.wait((stdout_reader, stderr_reader), timeout=timeout)\n    if (len(greenlets) != 2):\n        stdout_reader.kill()\n        stderr_reader.kill()\n        raise timeout_error()\n    stdout = stdout_reader.get()\n    stderr = stderr_reader.get()\n    logger.debug('Waiting for exit status...')\n    exit_status = channel.recv_exit_status()\n    logger.debug('Command exit status: {0}'.format(exit_status))\n    return ((exit_status == 0), stdout, stderr)", "docstring": "Execute a command on the specified host.\n\nArgs:\nstate (``pyinfra.api.State`` obj): state object for this command\nhostname (string): hostname of the target\ncommand (string): actual command to execute\nsudo (boolean): whether to wrap the command with sudo\nsudo_user (string): user to sudo to\nget_pty (boolean): whether to get a PTY before executing the command\nenv (dict): envrionment variables to set\ntimeout (int): timeout for this command to complete before erroring\n\nReturns:\ntuple: (exit_code, stdout, stderr)\nstdout and stderr are both lists of strings from each buffer.", "source": "codesearchnet"}
{"code": "def create_mapping(record, keys):\n        \n\n        ordered = OrderedDict()\n        field_mappings = []\n\n        for key, value in record.items():\n            ordered[key] = value\n            field_mappings.append({\n                'columnNumber': len(ordered),  \n                'fieldName': key,\n                'key': key in keys,\n            })\n\n        return {\n            'field_mappings': field_mappings,\n            'data': ordered,\n            'fields': list(ordered.values()),\n        }", "docstring": "Create a field mapping for use in API updates and creates.\n\nArgs:\nrecord (BaseModel): Record that should be mapped.\nkeys (list[str]): Fields that should be mapped as keys.\n\nReturns:\ndict: Dictionary with keys:\n\n* ``field_mappings``: Field mappings as required by API.\n* ``data``: Ordered data dictionary for input record.", "source": "juraj-google-style"}
{"code": "def save(self, new_path=None):\n        \n        self.saved_in_temp = new_path is None\n        if new_path is None:\n            fd, new_path = tempfile.mkstemp()\n            os.close(fd)\n\n        if self.current_path:\n            shutil.move(self.current_path, new_path)\n        else:\n            with open(new_path, 'wb') as dest:\n                _copy_stream(self._data, dest, self._size)\n        self.current_path = new_path", "docstring": "Moves or creates the file with stream contents to a new location.\n\nArgs:\nnew_path: path to move to, if None a temporary file is created.", "source": "juraj-google-style"}
{"code": "def analyze_step_stats(self, show_dataflow: bool=True, show_memory: bool=True, op_time: str='schedule') -> StepStatsAnalysis:\n    self._preprocess_op_time(op_time)\n    self._allocate_pids()\n    self._assign_lanes()\n    self._analyze_tensors(show_memory)\n    self._show_compute(show_dataflow)\n    if show_memory:\n        self._show_memory_counters()\n    return StepStatsAnalysis(chrome_trace=self._chrome_trace, allocator_maximums=self._allocator_maximums)", "docstring": "Analyze the step stats and format it into Chrome Trace Format.\n\nArgs:\nshow_dataflow: (Optional.) If True, add flow events to the trace\nconnecting producers and consumers of tensors.\nshow_memory: (Optional.) If True, add object snapshot events to the trace\nshowing the sizes and lifetimes of tensors.\nop_time: (Optional.) How the execution time of op is shown in timeline.\nPossible values are \"schedule\", \"gpu\" and \"all\". \"schedule\" will show op\nfrom the time it is scheduled to the end of the scheduling. Notice by\nthe end of its scheduling its async kernels may not start yet. It is\nshown using the default value from step_stats. \"gpu\" will show op with\nthe execution time of its kernels on GPU. \"all\" will show op from the\nstart of its scheduling to the end of its last kernel.\n\nReturns:\nA 'StepStatsAnalysis' object.", "source": "github-repos"}
{"code": "def notify_program_learners(cls, enterprise_customer, program_details, users):\n        \n        program_name = program_details.get('title')\n        program_branding = program_details.get('type')\n        program_uuid = program_details.get('uuid')\n\n        lms_root_url = get_configuration_value_for_site(\n            enterprise_customer.site,\n            'LMS_ROOT_URL',\n            settings.LMS_ROOT_URL\n        )\n        program_path = urlquote(\n            '/dashboard/programs/{program_uuid}/?tpa_hint={tpa_hint}'.format(\n                program_uuid=program_uuid,\n                tpa_hint=enterprise_customer.identity_provider,\n            )\n        )\n        destination_url = '{site}/{login_or_register}?next={program_path}'.format(\n            site=lms_root_url,\n            login_or_register='{login_or_register}',\n            program_path=program_path\n        )\n        program_type = 'program'\n        program_start = get_earliest_start_date_from_program(program_details)\n\n        with mail.get_connection() as email_conn:\n            for user in users:\n                login_or_register = 'register' if isinstance(user, PendingEnterpriseCustomerUser) else 'login'\n                destination_url = destination_url.format(login_or_register=login_or_register)\n                send_email_notification_message(\n                    user=user,\n                    enrolled_in={\n                        'name': program_name,\n                        'url': destination_url,\n                        'type': program_type,\n                        'start': program_start,\n                        'branding': program_branding,\n                    },\n                    enterprise_customer=enterprise_customer,\n                    email_connection=email_conn\n                )", "docstring": "Notify learners about a program in which they've been enrolled.\n\nArgs:\nenterprise_customer: The EnterpriseCustomer being linked to\nprogram_details: Details about the specific program the learners were enrolled in\nusers: An iterable of the users or pending users who were enrolled", "source": "juraj-google-style"}
{"code": "def encipher_shift(plaintext, plain_vocab, shift):\n    ciphertext = []\n    cipher = ShiftEncryptionLayer(plain_vocab, shift)\n    for (_, sentence) in enumerate(plaintext):\n        cipher_sentence = []\n        for (_, character) in enumerate(sentence):\n            encrypted_char = cipher.encrypt_character(character)\n            cipher_sentence.append(encrypted_char)\n        ciphertext.append(cipher_sentence)\n    return ciphertext", "docstring": "Encrypt plain text with a single shift layer.\n\nArgs:\nplaintext (list of list of Strings): a list of plain text to encrypt.\nplain_vocab (list of Integer): unique vocabularies being used.\nshift (Integer): number of shift, shift to the right if shift is positive.\nReturns:\nciphertext (list of Strings): encrypted plain text.", "source": "codesearchnet"}
{"code": "def format_filter_value(self, element, value):\n    format_func = self.allowed_filter.get(element)\n    return format_func(value)", "docstring": "Calls the specific function to format value,\ndepending on the given element.\n\nArguments:\nelement (string): The element of the VT to be formatted.\nvalue (dictionary): The element value.\n\nReturns:\nReturns a formatted value.", "source": "codesearchnet"}
{"code": "def frombytes(data, size, bandtype=gdal.GDT_Byte):\n    \n    r = ImageDriver('MEM').raster('', size, bandtype)\n    r.frombytes(data)\n    return r", "docstring": "Returns an in-memory raster initialized from a pixel buffer.\n\nArguments:\ndata -- byte buffer of raw pixel data\nsize -- two or three-tuple of (xsize, ysize, bandcount)\nbandtype -- band data type", "source": "juraj-google-style"}
{"code": "def get_by_ip_hostname(self, ip_hostname):\n        \n        resources = self._client.get_all()\n\n        resources_filtered = [x for x in resources if x['credentials']['ip_hostname'] == ip_hostname]\n\n        if resources_filtered:\n            return resources_filtered[0]\n        else:\n            return None", "docstring": "Retrieve a storage system by its IP.\n\nWorks only with API version <= 300.\n\nArgs:\nip_hostname: Storage system IP or hostname.\n\nReturns:\ndict", "source": "juraj-google-style"}
{"code": "def run_tac(model_path, targets, output_path):\n    if not model_path:\n        raise ValueError('Invalid model_path.')\n    if not targets:\n        raise ValueError('Targets are not specified.')\n    if not output_path:\n        raise ValueError('Invalid output_path.')\n    return _pywrap_tac_wrapper.run_tac(model_path, targets, output_path)", "docstring": "Run target aware conversion for the given tflite model file.\n\nArgs:\nmodel_path: Path to the tflite model file.\ntargets: A list of string of the desired targets. E.g., ['GPU', 'CPU'].\noutput_path: The output path.\n\nReturns:\nWhether the optimization succeeded.\n\nRaises:\nValueError:\nInvalid model_path.\nTargets are not specified.\nInvalid output_path.", "source": "github-repos"}
{"code": "def make_m_psd(self, original_nu, feed_dictionary):\n    \n    feed_dict = feed_dictionary.copy()\n    _, min_eig_val_m = self.get_lanczos_eig(compute_m=True, feed_dict=feed_dict)\n\n    lower_nu = original_nu\n    upper_nu = original_nu\n    num_iter = 0\n\n    \n    while min_eig_val_m - TOL < 0 and num_iter < (MAX_BINARY_SEARCH_ITER / 2):\n      num_iter += 1\n      upper_nu *= NU_UPDATE_CONSTANT\n      feed_dict.update({self.nu: upper_nu})\n      _, min_eig_val_m = self.get_lanczos_eig(compute_m=True, feed_dict=feed_dict)\n\n    final_nu = upper_nu\n\n    \n    while lower_nu <= upper_nu and num_iter < MAX_BINARY_SEARCH_ITER:\n      num_iter += 1\n      mid_nu = (lower_nu + upper_nu) / 2\n      feed_dict.update({self.nu: mid_nu})\n      _, min_eig_val_m = self.get_lanczos_eig(compute_m=True, feed_dict=feed_dict)\n      if min_eig_val_m - TOL < 0:\n        lower_nu = mid_nu\n      else:\n        upper_nu = mid_nu\n\n    final_nu = upper_nu\n\n    return final_nu", "docstring": "Run binary search to find a value for nu that makes M PSD\nArgs:\noriginal_nu: starting value of nu to do binary search on\nfeed_dictionary: dictionary of updated lambda variables to feed into M\nReturns:\nnew_nu: new value of nu", "source": "juraj-google-style"}
{"code": "def delta_hv(scatterer):\n    \n    Z = scatterer.get_Z()\n    return np.arctan2(Z[2,3] - Z[3,2], -Z[2,2] - Z[3,3])", "docstring": "Delta_hv for the current setup.\n\nArgs:\nscatterer: a Scatterer instance.\n\nReturns:\nDelta_hv [rad].", "source": "juraj-google-style"}
{"code": "def nic_v2(msg, NICa, NICbc):\n    if ((typecode(msg) < 5) or (typecode(msg) > 22)):\n        raise RuntimeError(('%s: Not a surface position message (5<TC<8),             airborne position message (8<TC<19),             or airborne position with GNSS height (20<TC<22)' % msg))\n    tc = typecode(msg)\n    NIC = uncertainty.TC_NICv2_lookup[tc]\n    if (20 <= tc <= 22):\n        NICs = 0\n    else:\n        NICs = ((NICa * 2) + NICbc)\n    try:\n        if isinstance(NIC, dict):\n            NIC = NIC[NICs]\n        Rc = uncertainty.NICv2[NIC][NICs]['Rc']\n    except KeyError:\n        Rc = uncertainty.NA\n    return Rc", "docstring": "Calculate NIC, navigation integrity category, for ADS-B version 2\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string\nNICa (int or string): NIC supplement - A\nNICbc (int or srting): NIC supplement - B or C\n\nReturns:\nint or string: Horizontal Radius of Containment", "source": "codesearchnet"}
{"code": "def orient_undirected_graph(self, data, graph):\n    self.arguments['{VERBOSE}'] = str(self.verbose).upper()\n    self.arguments['{SCORE}'] = self.score\n    self.arguments['{BETA}'] = str(self.beta)\n    self.arguments['{OPTIM}'] = str(self.optim).upper()\n    self.arguments['{ALPHA}'] = str(self.alpha)\n    whitelist = DataFrame(list(nx.edges(graph)), columns=['from', 'to'])\n    blacklist = DataFrame(list(nx.edges(nx.DiGraph(DataFrame(((- nx.adj_matrix(graph, weight=None).to_dense()) + 1), columns=list(graph.nodes()), index=list(graph.nodes()))))), columns=['from', 'to'])\n    results = self._run_bnlearn(data, whitelist=whitelist, blacklist=blacklist, verbose=self.verbose)\n    return nx.relabel_nodes(nx.DiGraph(results), {idx: i for (idx, i) in enumerate(data.columns)})", "docstring": "Run the algorithm on an undirected graph.\n\nArgs:\ndata (pandas.DataFrame): DataFrame containing the data\ngraph (networkx.Graph): Skeleton of the graph to orient\n\nReturns:\nnetworkx.DiGraph: Solution on the given skeleton.", "source": "codesearchnet"}
{"code": "def calculate_weighted_avg(bonds):\n    minimum_bond = min(bonds)\n    weighted_sum = 0.0\n    total_sum = 0.0\n    for entry in bonds:\n        weighted_sum += (entry * exp((1 - ((entry / minimum_bond) ** 6))))\n        total_sum += exp((1 - ((entry / minimum_bond) ** 6)))\n    return (weighted_sum / total_sum)", "docstring": "Returns the weighted average bond length given by\nHoppe's effective coordination number formula.\n\nArgs:\nbonds (list): list of floats that are the\nbond distances between a cation and its\nperipheral ions", "source": "codesearchnet"}
{"code": "def merge_entries(self, source_entry):\n    for list_attr in source_entry.attrs.values():\n        for attr in list_attr:\n            self.attrs[attr.header.attr_type_id].append(attr)\n    for stream in source_entry.data_streams:\n        dest_stream = self._find_datastream(stream.name)\n        if (dest_stream is not None):\n            dest_stream.add_from_datastream(stream)\n        else:\n            self.data_streams.append(stream)", "docstring": "Merge two entries.\n\nAllow the merging of two MFTEntries copying the attributes to the correct\nplace and the datastreams.\n\nArgs:\nsource_entry (:obj:`MFTEntry`) - Source entry where the data will be\ncopied from", "source": "codesearchnet"}
{"code": "def probabilities(input_energy: energy.BitstringEnergy):\n    all_bitstrings = tf.constant(list(itertools.product([0, 1], repeat=input_energy.num_bits)), dtype=tf.int8)\n    all_energies = input_energy(all_bitstrings)\n    energy_exp = tf.math.exp(-all_energies)\n    partition = tf.math.reduce_sum(energy_exp)\n    return energy_exp / partition", "docstring": "Returns the probabilities of the EBM.\n\nArgs:\ninput_energy: The energy function defining the EBM.", "source": "github-repos"}
{"code": "def get_cohp_by_label(self, label):\n    if (label.lower() == 'average'):\n        return Cohp(efermi=self.efermi, energies=self.energies, cohp=self.cohp, are_coops=self.are_coops, icohp=self.icohp)\n    else:\n        try:\n            return Cohp(efermi=self.efermi, energies=self.energies, cohp=self.all_cohps[label].get_cohp(spin=None, integrated=False), are_coops=self.are_coops, icohp=self.all_cohps[label].get_icohp(spin=None))\n        except KeyError:\n            print('The label does not exist')", "docstring": "Get specific COHP object.\n\nArgs:\nlabel: string (for newer Lobster versions: a number)\n\nReturns:\nReturns the COHP object to simplify plotting", "source": "codesearchnet"}
{"code": "def getContextsForTerm(self, retina_name, term, get_fingerprint=None, start_index=0, max_results=5):\n        \n\n        resourcePath = '/terms/contexts'\n        method = 'GET'\n\n        queryParams = {}\n        headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}\n        postData = None\n\n        queryParams['retina_name'] = retina_name\n        queryParams['term'] = term\n        queryParams['start_index'] = start_index\n        queryParams['max_results'] = max_results\n        queryParams['get_fingerprint'] = get_fingerprint\n        response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)\n        return [context.Context(**r) for r in response.json()]", "docstring": "Get the contexts for a given term\nArgs:\nretina_name, str: The retina name (required)\nterm, str: A term in the retina (required)\nget_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)\nstart_index, int: The start-index for pagination (optional) (optional)\nmax_results, int: Max results per page (optional) (optional)\nReturns: Array[Context]", "source": "juraj-google-style"}
{"code": "def add_tensor_filter(self, filter_name, filter_callable):\n    if not isinstance(filter_name, str):\n        raise TypeError('Input argument filter_name is expected to be str, but is not.')\n    if not filter_name:\n        raise ValueError('Input argument filter_name cannot be empty.')\n    if not callable(filter_callable):\n        raise TypeError('Input argument filter_callable is expected to be callable, but is not.')\n    self._tensor_filters[filter_name] = filter_callable", "docstring": "Add a tensor filter.\n\nA tensor filter is a named callable of the signature:\nfilter_callable(dump_datum, tensor),\n\nwherein dump_datum is an instance of debug_data.DebugTensorDatum carrying\nmetadata about the dumped tensor, including tensor name, timestamps, etc.\ntensor is the value of the dumped tensor as an numpy.ndarray object.\nThe return value of the function is a bool.\nThis is the same signature as the input argument to\ndebug_data.DebugDumpDir.find().\n\nArgs:\nfilter_name: (str) name of the filter. Cannot be empty.\nfilter_callable: (callable) a filter function of the signature described\nas above.\n\nRaises:\nValueError: If filter_name is an empty str.\nTypeError: If filter_name is not a str.\nOr if filter_callable is not callable.", "source": "github-repos"}
{"code": "def GetTypeChecker(field):\n    if ((field.cpp_type == _FieldDescriptor.CPPTYPE_STRING) and (field.type == _FieldDescriptor.TYPE_STRING)):\n        return UnicodeValueChecker()\n    if (field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM):\n        if SupportsOpenEnums(field):\n            return _VALUE_CHECKERS[_FieldDescriptor.CPPTYPE_INT32]\n        else:\n            return EnumValueChecker(field.enum_type)\n    return _VALUE_CHECKERS[field.cpp_type]", "docstring": "Returns a type checker for a message field of the specified types.\n\nArgs:\nfield: FieldDescriptor object for this field.\n\nReturns:\nAn instance of TypeChecker which can be used to verify the types\nof values assigned to a field of the specified type.", "source": "codesearchnet"}
{"code": "def value_loss_given_predictions(value_prediction,\n                                 rewards,\n                                 reward_mask,\n                                 gamma=0.99):\n  \n\n  B, T = rewards.shape  \n  assert (B, T) == reward_mask.shape\n  assert (B, T + 1, 1) == value_prediction.shape\n\n  value_prediction = np.squeeze(value_prediction, axis=2)  \n  value_prediction = value_prediction[:, :-1] * reward_mask  \n  r2g = rewards_to_go(rewards, reward_mask, gamma=gamma)  \n  loss = (value_prediction - r2g)**2\n\n  \n  return np.sum(loss) / np.sum(reward_mask)", "docstring": "Computes the value loss given the prediction of the value function.\n\nArgs:\nvalue_prediction: np.ndarray of shape (B, T+1, 1)\nrewards: np.ndarray of shape (B, T) of rewards.\nreward_mask: np.ndarray of shape (B, T), the mask over rewards.\ngamma: float, discount factor.\n\nReturns:\nThe average L2 value loss, averaged over instances where reward_mask is 1.", "source": "juraj-google-style"}
{"code": "def dbExec(self, query_str):\n        \n        try:\n            connection = sqlite3.connect(self.m_connection_string)\n            cursor = connection.cursor()\n            cursor.execute(query_str)\n            connection.commit()\n            cursor.close()\n            connection.close()\n            return True\n        except:\n            ekm_log(traceback.format_exc(sys.exc_info()))\n            return False\n        pass", "docstring": "Required override of dbExec() from MeterDB(), run query.\nArgs:\nquery_str (str): query to run", "source": "juraj-google-style"}
{"code": "def fprime(self, w, *args):\n    x0 = args[0]\n    x1 = args[1]\n    n0 = x0.shape[0]\n    n1 = x1.shape[0]\n    n = (max(n0, n1) * 10)\n    idx0 = np.random.choice(range(n0), size=n)\n    idx1 = np.random.choice(range(n1), size=n)\n    b = np.ones((n, 1))\n    i1 = (self.i + 1)\n    h = self.h\n    h1 = (h + 1)\n    w2 = w[(- h1):].reshape(h1, 1)\n    w1 = w[:(- h1)].reshape(i1, h)\n    if sparse.issparse(x0):\n        x0 = x0.tocsr()[idx0]\n        x1 = x1.tocsr()[idx1]\n        xb0 = sparse.hstack((x0, b))\n        xb1 = sparse.hstack((x1, b))\n    else:\n        x0 = x0[idx0]\n        x1 = x1[idx1]\n        xb0 = np.hstack((x0, b))\n        xb1 = np.hstack((x1, b))\n    z0 = np.hstack((sigm(xb0.dot(w1)), b))\n    z1 = np.hstack((sigm(xb1.dot(w1)), b))\n    y0 = z0.dot(w2)\n    y1 = z1.dot(w2)\n    e = (1 - (y1 - y0))\n    dy = (e / n)\n    dw1 = ((- (xb1.T.dot((dy.dot(w2[:(- 1)].reshape(1, h)) * dsigm(xb1.dot(w1)))) - xb0.T.dot((dy.dot(w2[:(- 1)].reshape(1, h)) * dsigm(xb0.dot(w1))))).reshape((i1 * h))) + ((self.l1 * w[:(- h1)]) / (i1 * h)))\n    dw2 = ((- (z1 - z0).T.dot(dy).reshape(h1)) + ((self.l2 * w[(- h1):]) / h1))\n    return np.append(dw1, dw2)", "docstring": "Return the derivatives of the cost function for predictions.\n\nArgs:\nw (array of float): weight vectors such that:\nw[:-h1] -- weights between the input and h layers\nw[-h1:] -- weights between the h and output layers\nargs: features (args[0]) and target (args[1])\n\nReturns:\ngradients of the cost function for predictions", "source": "codesearchnet"}
{"code": "def _check_debug_tensor_value(self, tensor_debug_mode, debug_tensor_value, wall_time, op_type, output_slot, execution_index=None, graph_execution_trace_index=None):\n    assert tensor_debug_mode != debug_event_pb2.TensorDebugMode.FULL_TENSOR\n    if not debug_tensor_value:\n        return\n    if tensor_debug_mode == debug_event_pb2.TensorDebugMode.CURT_HEALTH:\n        _, any_nan_inf = debug_tensor_value\n        if any_nan_inf:\n            self._alerts.append(InfNanAlert(wall_time, op_type, output_slot, execution_index=execution_index, graph_execution_trace_index=graph_execution_trace_index))\n    elif tensor_debug_mode == debug_event_pb2.TensorDebugMode.CONCISE_HEALTH:\n        _, size, num_neg_inf, num_pos_inf, num_nan = debug_tensor_value\n        if num_neg_inf or num_pos_inf or num_nan:\n            self._alerts.append(InfNanAlert(wall_time, op_type, output_slot, size=size, num_neg_inf=num_neg_inf, num_pos_inf=num_pos_inf, num_nan=num_nan, execution_index=execution_index, graph_execution_trace_index=graph_execution_trace_index))\n    elif tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_HEALTH:\n        _, _, _, _, size, num_neg_inf, num_pos_inf, num_nan, _, _, _ = debug_tensor_value\n        if num_neg_inf or num_pos_inf or num_nan:\n            self._alerts.append(InfNanAlert(wall_time, op_type, output_slot, size=size, num_neg_inf=num_neg_inf, num_pos_inf=num_pos_inf, num_nan=num_nan, execution_index=execution_index, graph_execution_trace_index=graph_execution_trace_index))", "docstring": "Check for bad numerical values based on debug summary of tensor value.\n\nIf tensor_debug_mode is one in which debug_tensor_value does not carry\ninformation about the presence or count of inf / nan values (e.g., SHAPE),\nthis method is a no-op.\n\nWhen infs and/or nans are found, `InfNanAlert` objects are created and\nappended to `self._alerts`.\n\nArgs:\ntensor_debug_mode: TensorDebugMode proto enum.\ndebug_tensor_value: Debug tensor value as a list of numbers.\nwall_time: Wall timestamp for the tensor event.\nop_type: Type of the op that generated the tensor (e.g., \"Conv2D\").\noutput_slot: Output slot index of the tensor for the op.\nexecution_index: Top-level execution index.\ngraph_execution_trace_index: Intra-graph execution index.", "source": "github-repos"}
{"code": "def DeregisterHelper(cls, resolver_helper):\n    if (resolver_helper.type_indicator not in cls._resolver_helpers):\n        raise KeyError('Resolver helper object not set for type indicator: {0:s}.'.format(resolver_helper.type_indicator))\n    del cls._resolver_helpers[resolver_helper.type_indicator]", "docstring": "Deregisters a path specification resolver helper.\n\nArgs:\nresolver_helper (ResolverHelper): resolver helper.\n\nRaises:\nKeyError: if resolver helper object is not set for the corresponding\ntype indicator.", "source": "codesearchnet"}
{"code": "def create_function(self, vpc_config):\n    zip_file = 'lambda-holder.zip'\n    with zipfile.ZipFile(zip_file, mode='w') as zipped:\n        zipped.writestr('index.py', 'print \"Hello world\"')\n    contents = ''\n    with open('lambda-holder.zip', 'rb') as openfile:\n        contents = openfile.read()\n    LOG.info('Creating lambda function: %s', self.app_name)\n    try:\n        self.lambda_client.create_function(Environment=self.lambda_environment, FunctionName=self.app_name, Runtime=self.runtime, Role=self.role_arn, Handler=self.handler, Code={'ZipFile': contents}, Description=self.description, Timeout=int(self.timeout), MemorySize=int(self.memory), Publish=False, VpcConfig=vpc_config, Tags={'app_group': self.group, 'app_name': self.app_name})\n    except boto3.exceptions.botocore.exceptions.ClientError as error:\n        if ('CreateNetworkInterface' in error.response['Error']['Message']):\n            message = '{0} is missing \"ec2:CreateNetworkInterface\"'.format(self.role_arn)\n            LOG.critical(message)\n            raise SystemExit(message)\n        raise\n    LOG.info('Successfully created Lambda function and alias')", "docstring": "Create lambda function, configures lambda parameters.\n\nWe need to upload non-zero zip when creating function. Uploading\nhello_world python lambda function since AWS doesn't care which\nexecutable is in ZIP.\n\nArgs:\nvpc_config (dict): Dictionary of SubnetIds and SecurityGroupsIds for using\na VPC in lambda", "source": "codesearchnet"}
{"code": "def get_folders(cls, session, mailbox_or_id):\n    if isinstance(mailbox_or_id, Mailbox):\n        mailbox_or_id = mailbox_or_id.id\n    return cls(('/mailboxes/%d/folders.json' % mailbox_or_id), session=session, out_type=Folder)", "docstring": "List the folders for the mailbox.\n\nArgs:\nmailbox_or_id (helpscout.models.Mailbox or int): Mailbox or the ID\nof the mailbox to get the folders for.\n\nReturns:\nRequestPaginator(output_type=helpscout.models.Folder): Folders\niterator.", "source": "codesearchnet"}
{"code": "def global_step(self):\n    return self._global_step", "docstring": "Return the global_step Tensor used by the supervisor.\n\nReturns:\nAn integer Tensor for the global_step.", "source": "github-repos"}
{"code": "def service_info(self, short_name):\n        \n\n        if short_name not in self.services:\n            raise ArgumentError(\"Unknown service name\", short_name=short_name)\n\n        info = {}\n        info['short_name'] = short_name\n        info['long_name'] = self.services[short_name]['state'].long_name\n        info['preregistered'] = self.services[short_name]['state'].preregistered\n\n        return info", "docstring": "Get static information about a service.\n\nArgs:\nshort_name (string): The short name of the service to query\n\nReturns:\ndict: A dictionary with the long_name and preregistered info\non this service.", "source": "juraj-google-style"}
{"code": "def neighbours_pattern(element):\n    \n    \n    if not element.parent:\n        return []\n\n    parent = element.parent\n\n    \n    neighbours = filter(\n        lambda x: x.isTag() and not x.isEndTag() or x.getContent().strip() \\\n                  or x is element,\n        parent.childs\n    )\n    if len(neighbours) <= 1:\n        return []\n\n    output = []\n    element_index = neighbours.index(element)\n\n    \n    if element_index >= 1:\n        output.append(\n            _neighbour_to_path_call(\n                \"left\",\n                neighbours[element_index - 1],\n                element\n            )\n        )\n\n    \n    if element_index + 1 < len(neighbours):\n        output.append(\n            _neighbour_to_path_call(\n                \"right\",\n                neighbours[element_index + 1],\n                element\n            )\n        )\n\n    return output", "docstring": "Look for negihbours of the `element`, return proper :class:`PathCall`.\n\nArgs:\nelement (obj): HTMLElement instance of the object you are looking for.\n\nReturns:\nlist: List of :class:`PathCall` instances.", "source": "juraj-google-style"}
{"code": "def _get_default_help_message(func, args, description=None, args_help=None):\n    if (description is None):\n        description = ('Argument parsing for %s' % func.__name__)\n    args_help = (args_help or {})\n    for argument in [arg_name for arg_name in args if (arg_name not in args_help)]:\n        args_help[argument] = ('Help message for %s' % argument)\n    return (description, args_help)", "docstring": "Create a default description for the parser and help message for the\nagurments if they are missing.\n\nArgs:\nfunc: the method we are creating a parser for\nargs: the argument names of the method\ndescription: a potentially existing description created from the\nfunction docstring\nargs_help: a dict {arg_name: help} with potentially missing arguments\n\nReturns:\na tuple (arg_parse_description, complete_args_help)", "source": "codesearchnet"}
{"code": "def label(self, input_grid):\n        \n        unset = 0\n        high_labels, num_labels = label(input_grid > self.high_thresh)\n        region_ranking = np.argsort(maximum(input_grid, high_labels, index=np.arange(1, num_labels + 1)))[::-1]\n        output_grid = np.zeros(input_grid.shape, dtype=int)\n        stack = []\n        for rank in region_ranking:\n            label_num = rank + 1\n            label_i, label_j = np.where(high_labels == label_num)\n            for i in range(label_i.size):\n                if output_grid[label_i[i], label_j[i]] == unset:\n                    stack.append((label_i[i], label_j[i]))\n            while len(stack) > 0:\n                index = stack.pop()\n                output_grid[index] = label_num\n                for i in range(index[0] - 1, index[0] + 2):\n                    for j in range(index[1] - 1, index[1] + 2):\n                        if 0 <= i < output_grid.shape[0] and 0 <= j < output_grid.shape[1]:\n                            if (input_grid[i, j] > self.low_thresh) and (output_grid[i, j] == unset):\n                                stack.append((i, j))\n        return output_grid", "docstring": "Label input grid with hysteresis method.\n\nArgs:\ninput_grid: 2D array of values.\n\nReturns:\nLabeled output grid.", "source": "juraj-google-style"}
{"code": "def thermal_expansion_coeff(self, structure, temperature, mode='debye'):\n    soec = ElasticTensor(self[0])\n    v0 = ((structure.volume * 1e-30) / structure.num_sites)\n    if (mode == 'debye'):\n        td = soec.debye_temperature(structure)\n        t_ratio = (temperature / td)\n        integrand = (lambda x: (((x ** 4) * np.exp(x)) / ((np.exp(x) - 1) ** 2)))\n        cv = (((9 * 8.314) * (t_ratio ** 3)) * quad(integrand, 0, (t_ratio ** (- 1)))[0])\n    elif (mode == 'dulong-petit'):\n        cv = (3 * 8.314)\n    else:\n        raise ValueError('Mode must be debye or dulong-petit')\n    tgt = self.get_tgt(temperature, structure)\n    alpha = np.einsum('ijkl,ij', soec.compliance_tensor, tgt)\n    alpha *= (cv / ((1000000000.0 * v0) * 6.022e+23))\n    return SquareTensor(alpha)", "docstring": "Gets thermal expansion coefficient from third-order constants.\n\nArgs:\ntemperature (float): Temperature in kelvin, if not specified\nwill return non-cv-normalized value\nstructure (Structure): Structure to be used in directional heat\ncapacity determination, only necessary if temperature\nis specified\nmode (string): mode for finding average heat-capacity,\ncurrent supported modes are 'debye' and 'dulong-petit'", "source": "codesearchnet"}
{"code": "def _add_rank_score(self, variant_obj, info_dict):\n        \n        rank_score_entry = info_dict.get('RankScore')\n        if rank_score_entry:\n            for family_annotation in rank_score_entry.split(','):\n                rank_score = family_annotation.split(':')[-1]\n            logger.debug(\"Updating rank_score to: {0}\".format(\n                rank_score))\n            variant_obj.rank_score = float(rank_score)", "docstring": "Add the rank score if found\n\nArgs:\nvariant_obj (puzzle.models.Variant)\ninfo_dict (dict): A info dictionary", "source": "juraj-google-style"}
{"code": "def get_args(path):\n    defaults = get_defaults(path)\n    licenses = ', '.join(os.listdir((cwd + licenses_loc)))\n    p = parser(description=('tool for adding open source licenses to your projects. available licenses: %s' % licenses))\n    _name = (False if defaults.get('name') else True)\n    _email = (False if defaults.get('email') else True)\n    _license = (False if defaults.get('license') else True)\n    p.add_argument('-n', dest='name', required=_name, help='name')\n    p.add_argument('-e', dest='email', required=_email, help='email')\n    p.add_argument('-l', dest='license', required=_license, help='license')\n    p.add_argument('-p', dest='project', required=False, help='project')\n    p.add_argument('-v', '--version', action='version', version='%(prog)s {version}'.format(version=version))\n    p.add_argument('--txt', action='store_true', required=False, help='add .txt to filename')\n    args = p.parse_args()\n    name = (args.name if args.name else defaults.get('name'))\n    email = (args.email if args.email else defaults.get('email'))\n    license = (get_license(args.license) if args.license else defaults.get('license'))\n    project = (args.project if args.project else os.getcwd().split('/')[(- 1)])\n    ext = ('.txt' if args.txt else '')\n    year = str(date.today().year)\n    return (name, email, license, project, ext, year)", "docstring": "Parse command line args & override defaults.\n\nArguments:\n- path (str) Absolute filepath\n\nReturns:\n- (tuple) Name, email, license, project, ext, year", "source": "codesearchnet"}
{"code": "def get_config(self):\n    config = {'name': self._name}\n    if self.clipnorm is not None:\n        config['clipnorm'] = self.clipnorm\n    if self.clipvalue is not None:\n        config['clipvalue'] = self.clipvalue\n    if self.global_clipnorm is not None:\n        config['global_clipnorm'] = self.global_clipnorm\n    return config", "docstring": "Returns the config of the optimizer.\n\nAn optimizer config is a Python dictionary (serializable)\ncontaining the configuration of an optimizer.\nThe same optimizer can be reinstantiated later\n(without any saved state) from this configuration.\n\nReturns:\nPython dictionary.", "source": "github-repos"}
{"code": "def _insert_stack(stack, sample_count, call_tree):\n    curr_level = call_tree\n    for func in stack:\n        next_level_index = {node['stack']: node for node in curr_level['children']}\n        if (func not in next_level_index):\n            new_node = {'stack': func, 'children': [], 'sampleCount': 0}\n            curr_level['children'].append(new_node)\n            curr_level = new_node\n        else:\n            curr_level = next_level_index[func]\n    curr_level['sampleCount'] = sample_count", "docstring": "Inserts stack into the call tree.\n\nArgs:\nstack: Call stack.\nsample_count: Sample count of call stack.\ncall_tree: Call tree.", "source": "codesearchnet"}
{"code": "def register(self, name):\n    if (name not in settings.CODEMIRROR_SETTINGS):\n        msg = \"Given config name '{}' does not exists in 'settings.CODEMIRROR_SETTINGS'.\"\n        raise UnknowConfigError(msg.format(name))\n    parameters = copy.deepcopy(self.default_internal_config)\n    parameters.update(copy.deepcopy(settings.CODEMIRROR_SETTINGS[name]))\n    if ('css_bundle_name' not in parameters):\n        css_template_name = settings.CODEMIRROR_BUNDLE_CSS_NAME\n        parameters['css_bundle_name'] = css_template_name.format(settings_name=name)\n    if ('js_bundle_name' not in parameters):\n        js_template_name = settings.CODEMIRROR_BUNDLE_JS_NAME\n        parameters['js_bundle_name'] = js_template_name.format(settings_name=name)\n    self.registry[name] = parameters\n    return parameters", "docstring": "Register configuration for an editor instance.\n\nArguments:\nname (string): Config name from available ones in\n``settings.CODEMIRROR_SETTINGS``.\n\nRaises:\nUnknowConfigError: If given config name does not exist in\n``settings.CODEMIRROR_SETTINGS``.\n\nReturns:\ndict: Registred config dict.", "source": "codesearchnet"}
{"code": "def LogHttpFrontendAccess(self, request, source=None, message_count=None):\n    \n    \n    \n    event_id = self.GetNewEventId()\n\n    log_msg = \"%s-%s [%s]: %s %s %s %s (%d)\" % (\n        event_id, request.source_ip, source or \"<unknown>\", request.method,\n        request.url, request.user_agent, request.user, message_count or 0)\n    logging.info(log_msg)", "docstring": "Write a log entry for a Frontend or UI Request.\n\nArgs:\nrequest: A HttpRequest protobuf.\nsource: Client id of the client initiating the request. Optional.\nmessage_count: Number of messages received from the client. Optional.", "source": "juraj-google-style"}
{"code": "def write_config(config, filename=None):\n    if (not filename):\n        filename = CONFIG_DEFAULT_PATH\n    with open(filename, 'w') as f:\n        json.dump(config, f, indent=4)", "docstring": "Write the provided configuration to a specific location.\n\nArgs:\nconfig (dict): a dictionary with the configuration to load.\nfilename (str): the name of the file that will store the new configuration. Defaults to ``None``.\nIf ``None``, the HOME of the current user and the string ``.bigchaindb`` will be used.", "source": "codesearchnet"}
{"code": "def _process_pricing_schedule(self, item, feed_item):\n    if 'pricing_schedule' in feed_item and feed_item['pricing_schedule']:\n        if not 'pricingSchedule' in item:\n            item['pricingSchedule'] = {}\n        item['pricingSchedule']['pricingPeriods'] = []\n        for pricing_schedule in feed_item['pricing_schedule']:\n            item['pricingSchedule']['pricingPeriods'].append({'endDate': pricing_schedule.get(FieldMap.PLACEMENT_PERIOD_END, None), 'startDate': pricing_schedule.get(FieldMap.PLACEMENT_PERIOD_START, None), 'rateOrCostNanos': int(float(pricing_schedule.get(FieldMap.PLACEMENT_PERIOD_RATE)) * 1000000000), 'units': pricing_schedule.get(FieldMap.PLACEMENT_PERIOD_UNITS)})", "docstring": "Updates / creates pricing schedule settings.\n\nThis method updates the CM item with pricing schedule based on\nconfigurations from the Bulkdozer feed.\n\nArgs:\nitem: the CM placement object to update.\nfeed_item: The Bulkdozer feed item representing the settings to define.", "source": "github-repos"}
{"code": "def coerce(self, value):\n        \n        if not isinstance(value, compat.basestring):\n\n            value = str(value)\n\n        if not self._re.match(value):\n\n            raise ValueError(\n                \"The value {0} does not match the pattern {1}\".format(\n                    value,\n                    self.pattern,\n                )\n            )\n\n        return value", "docstring": "Convert a value into a pattern matched string value.\n\nAll string values are matched against a regex before they are\nconsidered acceptable values.\n\nArgs:\nvalue (any): The value to coerce.\n\nRaises:\nValueError: If the value is not an acceptable value.\n\nReturns:\nstr: The pattern matched value represented.", "source": "juraj-google-style"}
{"code": "def _build_colocation_attr_map(input_map, absolute_import_scope):\n    colocation_attr_map = collections.defaultdict(_ConsistentValue)\n    used_outputs_of_imported_ops = collections.defaultdict(set)\n    for (imported_tensor_name, mapped_tensor) in input_map.items():\n        imported_tensor_name = ((absolute_import_scope + '/') + imported_tensor_name)\n        (imported_op_name, imported_index) = _split_tensor_name(imported_tensor_name)\n        key = tf.compat.as_bytes(('loc:@' + imported_op_name))\n        colocation_attr_map[key].Set(mapped_tensor.op.colocation_groups(), {'reason': (\"input '%s' is substituted by '%s'\" % (imported_tensor_name, mapped_tensor.name))})\n        used_outputs_of_imported_ops[imported_op_name].add(imported_index)\n    for (imported_op_name, used_outputs) in used_outputs_of_imported_ops.items():\n        imported_op = tf_v1.get_default_graph().get_operation_by_name(imported_op_name)\n        unused_outputs = (set(range(len(imported_op.outputs))) - used_outputs)\n        if (not unused_outputs):\n            continue\n        key = tf.compat.as_bytes(('loc:@' + imported_op_name))\n        if (imported_op.colocation_groups() != [key]):\n            raise ValueError((\"Internal error: tensors from op '%s' are partially remapped in import but op.colocation_groups=%s cannot be captured in a simple rewrite rule.\" % (imported_op_name, imported_op.colocation_groups())))\n        colocation_attr_map[key].Set([key], {'reason': (\"tensor '%s:%s' is not substituted by inputs\" % (imported_op_name, ','.join((str(i) for i in sorted(unused_outputs)))))})\n    return colocation_attr_map", "docstring": "Returns a dict mapping from pre-import to post-import colocation attrs.\n\nArgs:\ninput_map: as for fix_colocation_after_import.\nabsolute_import_scope: as for fix_colocation_after_import.\n\nReturns:\nA dict that maps bytes `\"loc:@\" + absolute_import_scope + \"/foo\"`\nto _ConsistentValues set to the lists of bytes `[\"loc:@...\", ...]`\naccording to the rewriting scheme of fix_colocation_after_import.\nIn case of an inconsistent rewriting, _ConsistentValue.has_error is true.", "source": "codesearchnet"}
{"code": "def split_list_by_n(l, n):\n    \n    n = max(1, n)\n    return list(l[i:i+n] for i in range(0, len(l), n))", "docstring": "Split a list into lists of size n.\n\nArgs:\nl: List of stuff.\nn: Size of new lists.\n\nReturns:\nlist: List of lists each of size n derived from l.", "source": "juraj-google-style"}
{"code": "def sasl_plain(self, name, password, identity=None):\n    if (identity is None):\n        identity = name\n    self.sasl('plain', name, password, identity)", "docstring": "Authenticate to a server using SASL plain, or does so on connection.\n\nArgs:\nname (str): Name to auth with.\npassword (str): Password to auth with.\nidentity (str): Identity to auth with (defaults to name).", "source": "codesearchnet"}
{"code": "def __init__(self, url=None, token=None, cert=None):\n        \n        token = token or os.environ.get('VAULT_TOKEN')\n        url = url or 'http:\n        self._client = hvac.Client(url=url, token=token, cert=cert)", "docstring": "Initialize the Class properties.\n\nArgs:\nurl (string): The URL to the value server.\ntoken (string): The value token.\ncert (string): The value cert.", "source": "juraj-google-style"}
{"code": "def validate(self, bigchain, current_transactions=[]):\n        \n        input_conditions = []\n\n        if self.operation == Transaction.CREATE:\n            duplicates = any(txn for txn in current_transactions if txn.id == self.id)\n            if bigchain.is_committed(self.id) or duplicates:\n                raise DuplicateTransaction('transaction `{}` already exists'\n                                           .format(self.id))\n\n            if not self.inputs_valid(input_conditions):\n                raise InvalidSignature('Transaction signature is invalid.')\n\n        elif self.operation == Transaction.TRANSFER:\n            self.validate_transfer_inputs(bigchain, current_transactions)\n\n        return self", "docstring": "Validate transaction spend\nArgs:\nbigchain (BigchainDB): an instantiated bigchaindb.BigchainDB object.\nReturns:\nThe transaction (Transaction) if the transaction is valid else it\nraises an exception describing the reason why the transaction is\ninvalid.\nRaises:\nValidationError: If the transaction is invalid", "source": "juraj-google-style"}
{"code": "def find_files(directory, pattern, recursively=True):\n    for (root, dirs, files) in os.walk(directory):\n        for basename in files:\n            if fnmatch.fnmatch(basename, pattern):\n                (yield (root, basename))\n        if (not recursively):\n            break", "docstring": "Yield a list of files with their base directories, recursively or not.\n\nReturns:\nA list of (base_directory, filename)\n\nArgs:\ndirectory: base directory to start the search.\npattern: fnmatch pattern for filenames.\ncomplete_filename: return complete filename or not?\nrecursively: do we recurse or not?", "source": "codesearchnet"}
{"code": "def count(self, entity, files=False):\n        \n        return self._find_entity(entity).count(files)", "docstring": "Return the count of unique values or files for the named entity.\n\nArgs:\nentity (str): The name of the entity.\nfiles (bool): If True, counts the number of filenames that contain\nat least one value of the entity, rather than the number of\nunique values of the entity.", "source": "juraj-google-style"}
{"code": "def create(cls, resource_id, *, account_id, properties=None, tags=None, location=None, auto_add=True, auto_commit=False):\n    if cls.get(resource_id):\n        raise ResourceException('Resource {} already exists'.format(resource_id))\n    res = Resource()\n    res.resource_id = resource_id\n    res.account_id = account_id\n    res.location = location\n    res.resource_type_id = ResourceType.get(cls.resource_type).resource_type_id\n    if properties:\n        for (name, value) in properties.items():\n            prop = ResourceProperty()\n            prop.resource_id = res.resource_id\n            prop.name = name\n            prop.value = (value.isoformat() if (type(value) == datetime) else value)\n            res.properties.append(prop)\n            db.session.add(prop)\n    if tags:\n        for (key, value) in tags.items():\n            if (type(value) != str):\n                raise ValueError('Invalid object type for tag value: {}'.format(key))\n            tag = Tag()\n            tag.resource_id = resource_id\n            tag.key = key\n            tag.value = value\n            res.tags.append(tag)\n            db.session.add(tag)\n    if auto_add:\n        db.session.add(res)\n        if auto_commit:\n            db.session.commit()\n        return cls.get(res.resource_id)\n    else:\n        return cls(res)", "docstring": "Creates a new Resource object with the properties and tags provided\n\nArgs:\nresource_id (str): Unique identifier for the resource object\naccount_id (int): Account ID which owns the resource\nproperties (dict): Dictionary of properties for the resource object.\ntags (dict): Key / value dictionary of tags. Values must be `str` types\nlocation (str): Location of the resource, if applicable\nauto_add (bool): Automatically add the new resource to the DB session. Default: True\nauto_commit (bool): Automatically commit the change to the database. Default: False", "source": "codesearchnet"}
{"code": "def MergeMessage(\n      self, source, destination,\n      replace_message_field=False, replace_repeated_field=False):\n    \n    tree = _FieldMaskTree(self)\n    tree.MergeMessage(\n        source, destination, replace_message_field, replace_repeated_field)", "docstring": "Merges fields specified in FieldMask from source to destination.\n\nArgs:\nsource: Source message.\ndestination: The destination message to be merged into.\nreplace_message_field: Replace message field if True. Merge message\nfield if False.\nreplace_repeated_field: Replace repeated field if True. Append\nelements of repeated field if False.", "source": "juraj-google-style"}
{"code": "def reboot(self, **params):\n        \n        outlet = params['outlet']\n\n        \n        self.tn.write('\\x1b\\r\\n')\n        self.until_done()\n        \n        self.tn.write('1\\r\\n')\n        self.until_done()\n        \n        self.tn.write('2\\r\\n')\n        self.until_done()\n        \n        self.tn.write('1\\r\\n')\n        self.until_done()\n        \n        self.tn.write('%d\\r\\n' % outlet)\n        self.until_done()\n        \n        self.tn.write('1\\r\\n')\n        self.until_done()\n        \n        self.tn.write('2\\r\\n')\n        self.until('to cancel')\n        self.tn.write('YES\\r\\n')\n        self.until('to continue')\n        self.tn.write('\\r\\n')\n        self.until_done()\n\n        time.sleep(5)\n        \n        self.tn.write('1\\r\\n')\n        self.until('to cancel')\n        self.tn.write('YES\\r\\n')\n        self.until('to continue')\n        self.tn.write('\\r\\n')\n        self.until_done()", "docstring": "Reboot outlet\n\nArgs:\nparams (dict), must contain parameter \"outlet\" - outlet number\n\nExample:\nparams = {'outlet': 1}", "source": "juraj-google-style"}
{"code": "def assert_text_equal(self, selector, value, testid=None, **kwargs):\n    self.info_log(('Assert text equal selector(%s) testid(%s)' % (selector, testid)))\n    highlight = kwargs.get('highlight', BROME_CONFIG['highlight']['highlight_on_assertion_success'])\n    self.debug_log(('effective highlight: %s' % highlight))\n    wait_until_visible = kwargs.get('wait_until_visible', BROME_CONFIG['proxy_driver']['wait_until_visible_before_assert_visible'])\n    self.debug_log(('effective wait_until_visible: %s' % wait_until_visible))\n    element = self.find(selector, raise_exception=False, wait_until_visible=wait_until_visible)\n    if element:\n        if (element.text == value):\n            if highlight:\n                element.highlight(highlight=BROME_CONFIG['highlight']['style_on_assertion_success'])\n            if (testid is not None):\n                self.create_test_result(testid, True)\n            return True\n        else:\n            if highlight:\n                element.highlight(style=BROME_CONFIG['highlight']['style_on_assertion_failure'])\n            if (testid is not None):\n                self.create_test_result(testid, False)\n            return False\n    else:\n        if (testid is not None):\n            self.create_test_result(testid, False)\n        return False", "docstring": "Assert that the element's text is equal to the provided value\n\nArgs:\nselector (str): the selector used to find the element\nvalue (str): the value that will be compare\nwith the element.text value\ntest_id (str): the test_id or a str\n\nKwargs:\nwait_until_visible (bool)\nhighlight (bool)\n\nReturns:\nbool: True is the assertion succeed; False otherwise.", "source": "codesearchnet"}
{"code": "def execute_scheduler(self):\n    try:\n        self.scheduler.add_job(self.schedule_jobs, trigger='interval', name='schedule_jobs', minutes=15, start_date=(datetime.now() + timedelta(seconds=1)))\n        self.scheduler.add_job(self.process_status_queue, trigger='interval', name='process_status_queue', seconds=30, start_date=(datetime.now() + timedelta(seconds=5)), max_instances=1)\n        self.scheduler.start()\n    except KeyboardInterrupt:\n        self.scheduler.shutdown()", "docstring": "Main entry point for the scheduler. This method will start two scheduled jobs, `schedule_jobs` which takes\ncare of scheduling the actual SQS messaging and `process_status_queue` which will track the current status\nof the jobs as workers are executing them\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def bestfit_func(self, bestfit_x):\n        \n        if not self.done_bestfit:\n            raise KeyError(\"Do do_bestfit first\")\n        bestfit_y = self.fit_args[1] * (bestfit_x ** self.fit_args[0])\n        return bestfit_y", "docstring": "Returns bestfit_function\n\nargs:\nbestfit_x: scalar, array_like\nx value\nreturn: scalar, array_like\nbestfit y value", "source": "juraj-google-style"}
{"code": "def fmtVersion(*vsnparts):\n    \n    if len(vsnparts) < 1:\n        raise s_exc.BadTypeValu(valu=repr(vsnparts), name='fmtVersion',\n                                mesg='Not enough version parts to form a version string with.',)\n    ret = '.'.join([str(part).lower() for part in vsnparts])\n    return ret", "docstring": "Join a string of parts together with a . separator.\n\nArgs:\n*vsnparts:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _get_variable_nodes_from_graph_def(graph_def):\n    variables = [n for n in graph_def.node if n.op == 'VarHandleOp']\n    variable_name_map = dict(((n.name, n) for n in variables))\n    child_map = collections.defaultdict(lambda: [])\n    for n in graph_def.node:\n        for inp in n.input:\n            if not inp.startswith('^'):\n                child_map[inp].append(n)\n    variables = {}\n    for v_name, v_node in variable_name_map.items():\n        queue = list(child_map[v_name])\n        processed = set([])\n        while queue:\n            n_current = queue.pop()\n            if n_current.name in processed:\n                continue\n            processed.add(n_current.name)\n            if n_current.op in _PASS_THROUGH_VARIABLE_OPS:\n                children = child_map.get(n_current.name, [])\n                queue.extend(children)\n            elif n_current.op not in _READ_ONLY_VARIABLE_OPS:\n                variables[v_name] = (v_node, True)\n                queue = []\n        if v_name not in variables:\n            variables[v_name] = (v_node, False)\n    return variables", "docstring": "Get the list of Variable nodes from `graph_def`.\n\nArgs:\ngraph_def: An instance of `GraphDef`.  This GraphDef *must*\nhave already been optimized by Grappler.  In particular, function\ninlining must have already happened.\n\nReturns:\nA dict mapping string names of variables to tuples `(node_def, modified)`,\nwhere `node_def` is the `NodeDef` corresponding to variable, and `modified`\nis a python bool describing whether the variable is modified during runtime.", "source": "github-repos"}
{"code": "def __init__(self, c_q: int, c_k: int, c_v: int, c_hidden: int, no_heads: int, gating: bool=True):\n    super().__init__()\n    self.c_q = c_q\n    self.c_k = c_k\n    self.c_v = c_v\n    self.c_hidden = c_hidden\n    self.no_heads = no_heads\n    self.gating = gating\n    self.linear_q = EsmFoldLinear(self.c_q, self.c_hidden * self.no_heads, bias=False, init='glorot')\n    self.linear_k = EsmFoldLinear(self.c_k, self.c_hidden * self.no_heads, bias=False, init='glorot')\n    self.linear_v = EsmFoldLinear(self.c_v, self.c_hidden * self.no_heads, bias=False, init='glorot')\n    self.linear_o = EsmFoldLinear(self.c_hidden * self.no_heads, self.c_q, init='final')\n    self.linear_g = None\n    if self.gating:\n        self.linear_g = EsmFoldLinear(self.c_q, self.c_hidden * self.no_heads, init='gating')\n    self.sigmoid = nn.Sigmoid()", "docstring": "Args:\nc_q:\nInput dimension of query data\nc_k:\nInput dimension of key data\nc_v:\nInput dimension of value data\nc_hidden:\nPer-head hidden dimension\nno_heads:\nNumber of attention heads\ngating:\nWhether the output should be gated using query data", "source": "github-repos"}
{"code": "def _scrub_method_name(self, method_name):\n        \n        if method_name not in self._scrubbed_method_names:\n            self._scrubbed_method_names[method_name] = (\n                scrub_method_name(method_name))\n\n        return self._scrubbed_method_names[method_name]", "docstring": "Scrubs a method name, returning result from local cache if available.\n\nThis method wraps fitparse.utils.scrub_method_name and memoizes results,\nas scrubbing a method name is expensive.\n\nArgs:\nmethod_name: Method name to scrub.\n\nReturns:\nScrubbed method name.", "source": "juraj-google-style"}
{"code": "def create(self, msgtype, *args, **kwargs):\n        \n        if msgtype not in self._messages:\n            raise ProtocolError(\"Unknown message type %r for protocol version %s\" % (msgtype, self._version))\n        return self._messages[msgtype].create(*args, **kwargs)", "docstring": "Create a new Message instance for the given type.\n\nArgs:\nmsgtype (str) :", "source": "juraj-google-style"}
{"code": "def compute_all_metrics_statistics(all_results):\n    statistics = {}\n    decode_inds = {}\n    all_metrics = all_results.keys()\n    for key in all_metrics:\n        values = all_results[key]\n        statistics[(key + '_MEAN')] = np.mean(values, axis=0)\n        statistics[(key + '_STD')] = np.std(values, axis=0)\n        (min_stats, min_decode_ind) = reduce_to_best_decode(values, np.argmin)\n        statistics[(key + '_MIN')] = min_stats\n        decode_inds[(key + '_MIN_DECODE')] = min_decode_ind\n        (max_stats, max_decode_ind) = reduce_to_best_decode(values, np.argmax)\n        statistics[(key + '_MAX')] = max_stats\n        decode_inds[(key + '_MAX_DECODE')] = max_decode_ind\n    for key in statistics:\n        statistics[key] = np.mean(statistics[key], axis=0)\n    return (statistics, decode_inds)", "docstring": "Computes statistics of metrics across multiple decodings.\n\nArgs:\nall_results: dict of 3-D numpy arrays.\nEach array has shape=(num_decodes, num_samples, num_frames).\nReturns:\nstatistics: dict of 1-D numpy arrays, shape=(num_frames).\nFirst the statistic (max/mean/std) is computed across the\ndecodes, then the mean is taken across num_samples.\ndecode_inds: dict of 1-D numpy arrays, shape=(num_samples,)\nEach element represents the index of the decode corresponding\nto the best statistic.", "source": "codesearchnet"}
{"code": "def fit(self, X, *args, **kwargs):\n        \n\n        self.constant_value = self._get_constant_value(X)\n\n        if self.constant_value is None:\n            if self.unfittable_model:\n                self.model = getattr(scipy.stats, self.model_class)(*args, **kwargs)\n            else:\n                self.model = getattr(scipy.stats, self.model_class)(X, *args, **kwargs)\n\n            for name in self.METHOD_NAMES:\n                attribute = getattr(self.__class__, name)\n                if isinstance(attribute, str):\n                    setattr(self, name, getattr(self.model, attribute))\n\n                elif attribute is None:\n                    setattr(self, name, missing_method_scipy_wrapper(lambda x: x))\n\n        else:\n            self._replace_constant_methods()\n\n        self.fitted = True", "docstring": "Fit scipy model to an array of values.\n\nArgs:\nX(`np.ndarray` or `pd.DataFrame`):  Datapoints to be estimated from. Must be 1-d\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def __getIp6Address(self, addressType):\n    addrType = ['link local', 'global', 'rloc', 'mesh EID']\n    addrs = []\n    globalAddr = []\n    linkLocal64Addr = ''\n    rlocAddr = ''\n    meshEIDAddr = ''\n    addrs = self.__sendCommand('ipaddr')\n    for ip6Addr in addrs:\n        if (ip6Addr == 'Done'):\n            break\n        ip6AddrPrefix = ip6Addr.split(':')[0]\n        if (ip6AddrPrefix == 'fe80'):\n            if (ip6Addr.split(':')[4] != '0'):\n                linkLocal64Addr = ip6Addr\n        elif ip6Addr.startswith(self.meshLocalPrefix):\n            if (ip6Addr.split(':')[4] == '0'):\n                rlocAddr = ip6Addr\n            else:\n                meshEIDAddr = ip6Addr\n        elif (ip6Addr != None):\n            globalAddr.append(ip6Addr)\n        else:\n            pass\n    if (addressType == addrType[0]):\n        return linkLocal64Addr\n    elif (addressType == addrType[1]):\n        return globalAddr\n    elif (addressType == addrType[2]):\n        return rlocAddr\n    elif (addressType == addrType[3]):\n        return meshEIDAddr\n    else:\n        pass", "docstring": "get specific type of IPv6 address configured on thread device\n\nArgs:\naddressType: the specific type of IPv6 address\n\nlink local: link local unicast IPv6 address that's within one-hop scope\nglobal: global unicast IPv6 address\nrloc: mesh local unicast IPv6 address for routing in thread network\nmesh EID: mesh Endpoint Identifier\n\nReturns:\nIPv6 address string", "source": "codesearchnet"}
{"code": "def _duplicate_body_captures_in_cond(cond_graph, body_graph_captures):\n    types = [t.dtype.as_datatype_enum for t in body_graph_captures]\n    with cond_graph._c_graph.get() as c_graph:\n        placeholders = c_api.TF_CreatePlaceholders(c_graph, types, compat.as_str(_build_cond_placeholders_name_prefix(cond_graph)))\n    placeholder_ops = [ops.Operation._from_c_op(ph.oper, cond_graph) for ph in placeholders]\n    tensors = []\n    for op in placeholder_ops:\n        tensors.append(op.outputs[0])\n    tuples = zip(body_graph_captures, tensors)\n    keys = [id(t) for t in body_graph_captures]\n    for k, v in zip(keys, tuples):\n        cond_graph._function_captures.add_or_replace(key=k, external=v[0], internal=v[1], is_by_ref=False)\n    cond_graph.inputs.extend(tensors)", "docstring": "Creates placeholders for body captures in cond_graph.\n\nThis is needed to match signatures of cond and body graphs.\n\nArgs:\ncond_graph: cond branch graph\nbody_graph_captures: Tensors which were captured when building the\n`body_graph`.", "source": "github-repos"}
{"code": "def load_ini(self, ini_file):\n        \n        if ini_file and not os.path.exists(ini_file):\n            self.log.critical(f\"Settings file specified but not found. {ini_file}\")\n            sys.exit(1)\n        if not ini_file:\n            ini_file = f\"{self.cwd}/settings.ini\"\n        if os.path.exists(ini_file):\n            config = configparser.RawConfigParser(allow_no_value=True)\n            config.read(ini_file)\n            for key, value in self.spec.items():\n                entry = None\n                if value['type'] == str:\n                    entry = config.get(\"settings\", option=key.lower(), fallback=None)\n                elif value['type'] == bool:\n                    entry = config.getboolean(\"settings\", option=key.lower(), fallback=None)\n                elif value['type'] == int:\n                    entry = config.getint(\"settings\", option=key.lower(), fallback=None)\n                elif value['type'] == float:\n                    entry = config.getfloat(\"settings\", option=key.lower(), fallback=None)\n                elif value['type'] in [list, dict]:\n                    entries = config.get(\"settings\", option=key.lower(), fallback=None)\n                    if entries:\n                        try:\n                            entry = json.loads(entries)\n                        except json.decoder.JSONDecodeError as _err:  \n                            self.log.critical(f\"Error parsing json from ini file. {entries}\")\n                            sys.exit(1)\n                if entry is not None:\n                    setattr(self, key.upper(), entry)", "docstring": "Load the contents from the ini file\n\nArgs:\nini_file (str): The file from which the settings should be loaded", "source": "juraj-google-style"}
{"code": "def flux_minimization(model, fixed, solver, weights={}):\n    fba = FluxBalanceProblem(model, solver)\n    for (reaction_id, value) in iteritems(fixed):\n        flux = fba.get_flux_var(reaction_id)\n        fba.prob.add_linear_constraints((flux >= value))\n    fba.minimize_l1()\n    return ((reaction_id, fba.get_flux(reaction_id)) for reaction_id in model.reactions)", "docstring": "Minimize flux of all reactions while keeping certain fluxes fixed.\n\nThe fixed reactions are given in a dictionary as reaction id\nto value mapping. The weighted L1-norm of the fluxes is minimized.\n\nArgs:\nmodel: MetabolicModel to solve.\nfixed: dict of additional lower bounds on reaction fluxes.\nsolver: LP solver instance to use.\nweights: dict of weights on the L1-norm terms.\n\nReturns:\nAn iterator of reaction ID and reaction flux pairs.", "source": "codesearchnet"}
{"code": "class PatchTSMixerEncoder(PatchTSMixerPreTrainedModel):\n\n    def __init__(self, config: PatchTSMixerConfig):\n        super().__init__(config)\n        self.use_return_dict = config.use_return_dict\n        self.patcher = nn.Linear(config.patch_length, config.d_model)\n        if config.use_positional_encoding:\n            self.positional_encoder = PatchTSMixerPositionalEncoding(config=config)\n        else:\n            self.positional_encoder = None\n        self.mlp_mixer_encoder = PatchTSMixerBlock(config=config)\n        if config.post_init:\n            self.post_init()\n\n    @auto_docstring\n    def forward(self, past_values: torch.Tensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=None) -> Union[Tuple, PatchTSMixerEncoderOutput]:\n        \n        return_dict = return_dict if return_dict is not None else self.use_return_dict\n        patches = self.patcher(past_values)\n        if self.positional_encoder is not None:\n            patches = self.positional_encoder(patches)\n        last_hidden_state, hidden_states = self.mlp_mixer_encoder(patches, output_hidden_states=output_hidden_states)\n        if not return_dict:\n            return tuple((v for v in [last_hidden_state, hidden_states]))\n        return PatchTSMixerEncoderOutput(last_hidden_state=last_hidden_state, hidden_states=hidden_states)", "docstring": "Encoder for PatchTSMixer which inputs patched time-series and outputs patched embeddings.\n\nArgs:\nconfig (`PatchTSMixerConfig`):\nConfiguration.", "source": "github-repos"}
{"code": "def write_eval_records(bt_table, game_data, last_game):\n    \n    eval_num = last_game\n\n    \n    GAMES_PER_COMMIT = 2000\n    for games in grouper(tqdm(game_data), GAMES_PER_COMMIT):\n        assert bt_table.read_row(EVAL_PREFIX.format(eval_num)), \"Prev row doesn't exists\"\n        assert bt_table.read_row(EVAL_PREFIX.format(eval_num+1)) is None, \"Row already exists\"\n\n        rows = []\n        for i, metadata in enumerate(games):\n            eval_num += 1\n            row_name = EVAL_PREFIX.format(eval_num)\n            row = bt_table.row(row_name)\n            for column, value in metadata:\n                row.set_cell(METADATA, column, value)\n            rows.append(row)\n            \n            if i < 5 or i + 5 > len(games):\n                print(\"\\t\", i, row_name, metadata[6][1])\n\n        if eval_num == last_game + len(games):\n            test = input(\"Commit ('y'/'yes' required): \")\n            if test.lower() not in ('y', 'yes'):\n                break\n\n        \n        \n\n        game_num_update = bt_table.row(TABLE_STATE)\n        game_num_update.set_cell(METADATA, EVAL_GAME_COUNTER, eval_num)\n        print(TABLE_STATE, eval_num)\n\n        response = bt_table.mutate_rows(rows)\n\n        \n        any_bad = False\n        for i, status in enumerate(response):\n            if status.code is not 0:\n                print(\"Row number {} failed to write {}\".format(i, status))\n                any_bad = True\n        if any_bad:\n            break\n\n        game_num_update.commit()", "docstring": "Write all eval_records to eval_table\n\nIn addition to writing new rows table_state must be updated in\nrow `table_state` columns `metadata:eval_game_counter`\n\nArgs:\nbt_table: bigtable table to add rows to.\ngame_data:  metadata pairs (column name, value) for each eval record.\nlast_game:  last_game in metadata:table_state", "source": "juraj-google-style"}
{"code": "def stringize(self, rnf_profile=RnfProfile()):\n    sorted_segments = sorted(self.segments, key=(lambda x: (((((x.genome_id * (10 ** 23)) + (x.chr_id * (10 ** 21))) + ((x.left + ((int((x.left == 0)) * x.right) - 1)) * (10 ** 11))) + (x.right * (10 ** 1))) + int((x.direction == 'F')))))\n    segments_strings = [x.stringize(rnf_profile) for x in sorted_segments]\n    read_tuple_name = '__'.join([self.prefix, format(self.read_tuple_id, 'x').zfill(rnf_profile.read_tuple_id_width), ','.join(segments_strings), self.suffix])\n    return read_tuple_name", "docstring": "Create RNF representation of this read.\n\nArgs:\nread_tuple_id_width (int): Maximal expected string length of read tuple ID.\ngenome_id_width (int): Maximal expected string length of genome ID.\nchr_id_width (int): Maximal expected string length of chromosome ID.\ncoor_width (int): Maximal expected string length of a coordinate.", "source": "codesearchnet"}
{"code": "def register_auth_system(self, auth_system):\n    auth_system_settings = dbconfig.get('auth_system')\n    if (auth_system.name not in auth_system_settings['available']):\n        auth_system_settings['available'].append(auth_system.name)\n        dbconfig.set('default', 'auth_system', DBCChoice(auth_system_settings))\n    if (auth_system.name == auth_system_settings['enabled'][0]):\n        self.active_auth_system = auth_system\n        auth_system().bootstrap()\n        logger.debug('Registered {} as the active auth system'.format(auth_system.name))\n        return True\n    else:\n        logger.debug('Not trying to load the {} auth system as it is disabled by config'.format(auth_system.name))\n        return False", "docstring": "Register a given authentication system with the framework. Returns `True` if the `auth_system` is registered\nas the active auth system, else `False`\n\nArgs:\nauth_system (:obj:`BaseAuthPlugin`): A subclass of the `BaseAuthPlugin` class to register\n\nReturns:\n`bool`", "source": "codesearchnet"}
{"code": "def get_atten(self, idx=0):\n        \n        if not self.is_open:\n            raise attenuator.Error(\n                \"Connection to attenuator at %s is not open!\" %\n                self._telnet_client.host)\n        if idx + 1 > self.path_count or idx < 0:\n            raise IndexError(\"Attenuator index out of range!\", self.path_count,\n                             idx)\n        atten_val_str = self._telnet_client.cmd(\"CHAN:%s:ATT?\" % (idx + 1))\n        atten_val = float(atten_val_str)\n        return atten_val", "docstring": "This function returns the current attenuation from an attenuator at a\ngiven index in the instrument.\n\nArgs:\nidx: This zero-based index is the identifier for a particular\nattenuator in an instrument.\n\nRaises:\nError: The underlying telnet connection to the instrument is not\nopen.\n\nReturns:\nA float that is the current attenuation value.", "source": "juraj-google-style"}
{"code": "def get_lock_config(self, device_label):\n    response = None\n    try:\n        response = requests.get(urls.lockconfig(self._giid, device_label), headers={'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)})\n    except requests.exceptions.RequestException as ex:\n        raise RequestError(ex)\n    _validate_response(response)\n    return json.loads(response.text)", "docstring": "Get lock configuration\n\nArgs:\ndevice_label (str): device label of lock", "source": "codesearchnet"}
{"code": "def as_videos(content: ProcessorContentTypes, *, ignore_unsupported_types: bool=False) -> list[ProcessorPart]:\n    return _as_format_helper(content, lambda mime: mime.startswith('video/'), ignore_unsupported_types)", "docstring": "Returns the video parts from the content.\n\nArgs:\ncontent: Input content.\nignore_unsupported_types: By default if content contains non-video parts a\nValueError would be raised. This argument allows ingoring such parts.\n\nReturns:\nA list of video parts.", "source": "github-repos"}
{"code": "def _DownloadUrl(self, url, dest_dir):\n    \n    dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False)\n    dest_file.close()\n    dest = dest_file.name\n\n    self.logger.info('Downloading url from %s to %s.', url, dest)\n    try:\n      urlretrieve.urlretrieve(url, dest)\n      return dest\n    except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:\n      self.logger.warning('Could not download %s. %s.', url, str(e))\n    except Exception as e:\n      self.logger.warning('Exception downloading %s. %s.', url, str(e))\n    return None", "docstring": "Download a script from a given URL.\n\nArgs:\nurl: string, the URL to download.\ndest_dir: string, the path to a directory for storing metadata scripts.\n\nReturns:\nstring, the path to the file storing the metadata script.", "source": "juraj-google-style"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. LUKE does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def extract_flattened_patches(self, image: np.ndarray, max_patches: int, patch_size: dict, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n    requires_backends(self.extract_flattened_patches, 'torch')\n    image = to_channel_dimension_format(image, ChannelDimension.FIRST, input_data_format)\n    image = torch.from_numpy(image)\n    patch_height, patch_width = (patch_size['height'], patch_size['width'])\n    image_height, image_width = get_image_size(image, ChannelDimension.FIRST)\n    scale = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))\n    num_feasible_rows = max(min(math.floor(scale * image_height / patch_height), max_patches), 1)\n    num_feasible_cols = max(min(math.floor(scale * image_width / patch_width), max_patches), 1)\n    resized_height = max(num_feasible_rows * patch_height, 1)\n    resized_width = max(num_feasible_cols * patch_width, 1)\n    image = torch.nn.functional.interpolate(image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=False, antialias=True).squeeze(0)\n    patches = torch_extract_patches(image, patch_height, patch_width)\n    patches_shape = patches.shape\n    rows = patches_shape[1]\n    columns = patches_shape[2]\n    depth = patches_shape[3]\n    patches = patches.reshape([rows * columns, depth])\n    row_ids = torch.arange(rows).reshape([rows, 1]).repeat(1, columns).reshape([rows * columns, 1])\n    col_ids = torch.arange(columns).reshape([1, columns]).repeat(rows, 1).reshape([rows * columns, 1])\n    row_ids += 1\n    col_ids += 1\n    row_ids = row_ids.to(torch.float32)\n    col_ids = col_ids.to(torch.float32)\n    result = torch.cat([row_ids, col_ids, patches], -1)\n    result = torch.nn.functional.pad(result, [0, 0, 0, max_patches - rows * columns]).float()\n    result = to_numpy_array(result)\n    return result", "docstring": "Extract flattened patches from an image.\n\nArgs:\nimage (`np.ndarray`):\nImage to extract flattened patches from.\nmax_patches (`int`):\nMaximum number of patches to extract.\npatch_size (`dict`):\nDictionary containing the patch height and width.\n\nReturns:\nresult (`np.ndarray`):\nA sequence of `max_patches` flattened patches.", "source": "github-repos"}
{"code": "def _read_from_hdx(self, object_type, value, fieldname='id',\n                       action=None, **kwargs):\n        \n        \n        if not fieldname:\n            raise HDXError('Empty %s field name!' % object_type)\n        if action is None:\n            action = self.actions()['show']\n        data = {fieldname: value}\n        data.update(kwargs)\n        try:\n            result = self.configuration.call_remoteckan(action, data)\n            return True, result\n        except NotFound:\n            return False, '%s=%s: not found!' % (fieldname, value)\n        except Exception as e:\n            raisefrom(HDXError, 'Failed when trying to read: %s=%s! (POST)' % (fieldname, value), e)", "docstring": "Makes a read call to HDX passing in given parameter.\n\nArgs:\nobject_type (str): Description of HDX object type (for messages)\nvalue (str): Value of HDX field\nfieldname (str): HDX field name. Defaults to id.\naction (Optional[str]): Replacement CKAN action url to use. Defaults to None.\n**kwargs: Other fields to pass to CKAN.\n\nReturns:\nTuple[bool, Union[Dict, str]]: (True/False, HDX object metadata/Error)", "source": "juraj-google-style"}
{"code": "def _update_from_file(self, filename):\n    if os.path.exists(filename):\n        try:\n            with open(filename, 'r') as config_file:\n                yaml_dict = yaml.safe_load(config_file.read())\n                if (yaml_dict is not None):\n                    self._update_dict(self._config, yaml_dict)\n        except IsADirectoryError:\n            raise ConfigLoadError('The specified configuration file is a directory not a file')\n    else:\n        raise ConfigLoadError('The config file {} does not exist'.format(filename))", "docstring": "Helper method to update an existing configuration with the values from a file.\n\nLoads a configuration file and replaces all values in the existing configuration\ndictionary with the values from the file.\n\nArgs:\nfilename (str): The path and name to the configuration file.", "source": "codesearchnet"}
{"code": "def MapTuple(fn, *args, **kwargs):\n    if not callable(fn):\n        raise TypeError('MapTuple can be used only with callable objects. Received %r instead.' % fn)\n    label = 'MapTuple(%s)' % ptransform.label_from_callable(fn)\n    arg_names, defaults = get_function_args_defaults(fn)\n    num_defaults = len(defaults)\n    if num_defaults < len(args) + len(kwargs):\n        raise TypeError('Side inputs must have defaults for MapTuple.')\n    if defaults or args or kwargs:\n        wrapper = lambda x, *args, **kwargs: [fn(*tuple(x) + args, **kwargs)]\n    else:\n        wrapper = lambda x: [fn(*x)]\n    type_hints = get_type_hints(fn).with_defaults(typehints.decorators.IOTypeHints.from_callable(fn))\n    if type_hints.input_types is not None:\n        pass\n    output_hint = type_hints.simple_output_type(label)\n    if output_hint:\n        wrapper = with_output_types(typehints.Iterable[_strip_output_annotations(output_hint)])(wrapper)\n    modified_arg_names = ['tuple_element'] + arg_names[-num_defaults:]\n    modified_argspec = (modified_arg_names, defaults)\n    pardo = ParDo(CallableWrapperDoFn(wrapper, fullargspec=modified_argspec), *args, **kwargs)\n    pardo.label = label\n    return pardo", "docstring": ":func:`MapTuple` is like :func:`Map` but expects tuple inputs and\nflattens them into multiple input arguments.\n\nIn other words\n\n\"SwapKV\" >> beam.Map(lambda kv: (kv[1], kv[0]))\n\nis equivalent to\n\n\"SwapKV\" >> beam.MapTuple(lambda k, v: (v, k))\n\nThis can be useful when processing a PCollection of tuples\n(e.g. key-value pairs).\n\nArgs:\nfn (callable): a callable object.\n*args: positional arguments passed to the transform callable.\n**kwargs: keyword arguments passed to the transform callable.\n\nReturns:\n~apache_beam.pvalue.PCollection:\nA :class:`~apache_beam.pvalue.PCollection` containing the\n:func:`MapTuple` outputs.\n\nRaises:\nTypeError: If the **fn** passed as argument is not a callable.\nTypical error is to pass a :class:`DoFn` instance which is supported only\nfor :class:`ParDo`.", "source": "github-repos"}
{"code": "def subgraph(self, nodeids):\n        \n        _eps, _vars = self._eps, self._vars\n        _hcons, _icons = self._hcons, self._icons\n        top = index = xarg = None\n        eps = [_eps[nid] for nid in nodeids]\n        lbls = set(ep[2] for ep in eps)\n        hcons = []\n        icons = []\n        subvars = {}\n        if self.top:\n            top = self.top\n            tophc = _hcons.get(top, None)\n            if tophc is not None and tophc[2] in lbls:\n                subvars[top] = {}\n            elif top not in lbls:\n                top = None  \n        \n        \n        if self.xarg:\n            xarg = self.xarg\n            subvars[self.xarg] = _vars[self.xarg]['props']\n        subvars.update((lbl, {}) for lbl in lbls)\n        subvars.update(\n            (var, _vars[var]['props'])\n            for ep in eps for var in ep[3].values()\n            if var in _vars\n        )\n        if self.index in subvars:\n            index = self.index\n        \n        for var in subvars:\n            hc = _hcons.get(var, None)\n            if hc is not None and hc[2] in lbls:\n                hcons.append(hc)\n            for ic in _icons.get(var, []):\n                if ic[0] in subvars and ic[2] in subvars:\n                    icons.append(ic)\n        return Xmrs(\n            top=top, index=index, xarg=xarg,\n            eps=eps, hcons=hcons, icons=icons, vars=subvars,\n            lnk=self.lnk, surface=self.surface, identifier=self.identifier\n        )", "docstring": "Return an Xmrs object with only the specified *nodeids*.\n\nNecessary variables and arguments are also included in order to\nconnect any nodes that are connected in the original Xmrs.\n\nArgs:\nnodeids: the nodeids of the nodes/EPs to include in the\nsubgraph.\nReturns:\nAn :class:`Xmrs` object.", "source": "juraj-google-style"}
{"code": "def __init__(self, encoding=None):\n    \n    super(DSVParser, self).__init__()\n    self._encoding = encoding\n    if py2to3.PY_2:\n      self._end_of_line = b'\\n'\n    else:\n      self._end_of_line = '\\n'\n    self._maximum_line_length = (\n        len(self._end_of_line) +\n        len(self.COLUMNS) * (self.FIELD_SIZE_LIMIT + len(self.DELIMITER)))", "docstring": "Initializes a delimiter separated values (DSV) parser.\n\nArgs:\nencoding (Optional[str]): encoding used in the DSV file, where None\nindicates the codepage of the parser mediator should be used.", "source": "juraj-google-style"}
{"code": "def _replace_child_layer_functions(layer, serialization_cache):\n    original_fns = {}\n\n    def replace_layer_functions(child_layer, serialized_fns):\n        \n        original_fns[child_layer] = {'call': child_layer.call, '_activity_regularizer': child_layer._activity_regularizer}\n        with utils.no_automatic_dependency_tracking_scope(child_layer):\n            try:\n                child_layer._activity_regularizer = serialized_fns.get('activity_regularizer_fn')\n            except AttributeError:\n                pass\n            child_layer.call = utils.use_wrapped_call(child_layer, serialized_fns['call_and_return_conditional_losses'], default_training_value=False)\n\n    def replace_metric_functions(child_layer, serialized_fns):\n        \n        original_fns[child_layer] = {'__call__': child_layer.__call__, 'result': child_layer.result, 'update_state': child_layer.update_state}\n        with utils.no_automatic_dependency_tracking_scope(child_layer):\n            child_layer.__call__ = serialized_fns['__call__']\n            child_layer.result = serialized_fns['result']\n            child_layer.update_state = serialized_fns['update_state']\n    for child_layer in utils.list_all_layers(layer):\n        if isinstance(child_layer, input_layer.InputLayer):\n            continue\n        if child_layer not in serialization_cache[constants.KERAS_CACHE_KEY]:\n            serialized_functions = child_layer._trackable_saved_model_saver._get_serialized_attributes(serialization_cache).functions\n        else:\n            serialized_functions = serialization_cache[constants.KERAS_CACHE_KEY][child_layer].functions\n        if not serialized_functions:\n            continue\n        if isinstance(child_layer, metrics.Metric):\n            replace_metric_functions(child_layer, serialized_functions)\n        else:\n            replace_layer_functions(child_layer, serialized_functions)\n    return original_fns", "docstring": "Replaces functions in the children layers with wrapped tf.functions.\n\nThis step allows functions from parent layers to reference the wrapped\nfunctions from their children layers instead of retracing the ops.\n\nThis function also resets all losses stored in the layer. These are stored in\nthe returned dictionary. Use `_restore_child_layer_functions` to restore\nthe original attributes.\n\nArgs:\nlayer: Keras Layer object.\nserialization_cache: Dictionary shared between all objects during\nserialization.\n\nReturns:\nDictionary mapping layer objects -> original functions and losses:\n{ Child layer 1: {\n'losses': Original losses,\n'call': Original call function\n'_activity_regularizer': Original activity regularizer},\nChild layer 2: ...\n}", "source": "github-repos"}
{"code": "def __init__(self, loaders):\n        \n        if not loaders:\n            \n            if any(self._iter_config_props()):\n                raise AssertionError('Class has ConfigProperty attributes: must provide loader(s)')\n\n        self._update_property_keys()\n\n        self.varz = {}\n        self._loaders = loaders\n        self._load()", "docstring": "Load values into the class's ConfigProperty attributes (validating types if possible)\n\nArgs:\nloaders: iterable of AbstractLoader instances\nConfigProperty values are loaded from these sources; and the order indicates\npreference.", "source": "juraj-google-style"}
{"code": "def groups_setPurpose(self, *, channel: str, purpose: str, **kwargs) -> SlackResponse:\n    kwargs.update({'channel': channel, 'purpose': purpose})\n    return self.api_call('groups.setPurpose', json=kwargs)", "docstring": "Sets the purpose for a private channel.\n\nArgs:\nchannel (str): The channel id. e.g. 'G1234567890'\npurpose (str): The new purpose for the channel. e.g. 'My Purpose'", "source": "codesearchnet"}
{"code": "def get_enterprise_sso_uid(self, obj):\n        \n        \n        \n        enterprise_learner = EnterpriseCustomerUser.objects.filter(user_id=obj.id).first()\n\n        return enterprise_learner and enterprise_learner.get_remote_id()", "docstring": "Get enterprise SSO UID.\n\nArguments:\nobj (User): Django User object\n\nReturns:\n(str): string containing UUID for enterprise customer's Identity Provider.", "source": "juraj-google-style"}
{"code": "def local_services(self):\n    if (not self._loop.inside_loop()):\n        self._state_lock.acquire()\n    try:\n        return sorted([(index, name) for (index, name) in self._name_map.items()], key=(lambda element: element[0]))\n    finally:\n        if (not self._loop.inside_loop()):\n            self._state_lock.release()", "docstring": "Get a list of id, name pairs for all of the known synced services.\n\nThis method is safe to call outside of the background event loop\nwithout any race condition.  Internally it uses a thread-safe mutex to\nprotect the local copies of supervisor data and ensure that it cannot\nchange while this method is iterating over it.\n\nReturns:\nlist (id, name): A list of tuples with id and service name sorted by id\nfrom low to high", "source": "codesearchnet"}
{"code": "def format_returnvalue(self, value):\n        \n\n        self._ensure_loaded()\n\n        if not self.return_info.is_data:\n            return None\n\n        \n        if self.return_info.type_name is not None:\n            return typeinfo.type_system.format_value(value, self.return_info.type_name, self.return_info.formatter)\n\n        \n        return self.return_info.formatter(value)", "docstring": "Format the return value of this function as a string.\n\nArgs:\nvalue (object): The return value that we are supposed to format.\n\nReturns:\nstr: The formatted return value, or None if this function indicates\nthat it does not return data", "source": "juraj-google-style"}
{"code": "def copy(src, dst):\n    \n    \n    src, src_is_storage = format_and_is_storage(src)\n    dst, dst_is_storage = format_and_is_storage(dst)\n\n    \n    if not src_is_storage and not dst_is_storage:\n        return shutil_copy(src, dst)\n\n    with handle_os_exceptions():\n        \n        if not hasattr(dst, 'read'):\n            try:\n                \n                if isdir(dst):\n                    dst = join(dst, basename(src))\n\n                \n                elif not isdir(dirname(dst)):\n                    raise IOError(\"No such file or directory: '%s'\" % dst)\n\n            except ObjectPermissionError:\n                \n                \n                pass\n\n        \n        _copy(src, dst, src_is_storage, dst_is_storage)", "docstring": "Copies a source file to a destination file or directory.\n\nEquivalent to \"shutil.copy\".\n\nSource and destination can also be binary opened file-like objects.\n\nArgs:\nsrc (path-like object or file-like object): Source file.\ndst (path-like object or file-like object):\nDestination file or directory.\n\nRaises:\nIOError: Destination directory not found.", "source": "juraj-google-style"}
{"code": "def _use_datastore(self, key, options=None):\n    \n    flag = ContextOptions.use_datastore(options)\n    if flag is None:\n      flag = self._datastore_policy(key)\n    if flag is None:\n      flag = ContextOptions.use_datastore(self._conn.config)\n    if flag is None:\n      flag = True\n    return flag", "docstring": "Return whether to use the datastore for this key.\n\nArgs:\nkey: Key instance.\noptions: ContextOptions instance, or None.\n\nReturns:\nTrue if the datastore should be used, False otherwise.", "source": "juraj-google-style"}
{"code": "def _find_codopant(target, oxidation_state, allowed_elements=None):\n    \n    ref_radius = target.ionic_radius\n    candidates = []\n    symbols = allowed_elements or [el.symbol for el in Element]\n    for sym in symbols:\n        try:\n            with warnings.catch_warnings():\n                warnings.simplefilter(\"ignore\")\n                sp = Specie(sym, oxidation_state)\n                r = sp.ionic_radius\n                if r is not None:\n                    candidates.append((r, sp))\n        except:\n            pass\n    return min(candidates, key=lambda l: abs(l[0] / ref_radius - 1))[1]", "docstring": "Finds the element from \"allowed elements\" that (i) possesses the desired\n\"oxidation state\" and (ii) is closest in ionic radius to the target specie\n\nArgs:\ntarget: (Specie) provides target ionic radius.\noxidation_state: (float) codopant oxidation state.\nallowed_elements: ([str]) List of allowed elements. If None,\nall elements are tried.\n\nReturns:\n(Specie) with oxidation_state that has ionic radius closest to\ntarget.", "source": "juraj-google-style"}
{"code": "def restore(self, restored_tensors, restored_shapes):\n    raise ValueError('Calling an abstract method.')", "docstring": "Restores this object from 'restored_tensors'.\n\nArgs:\nrestored_tensors: the tensors that were loaded from a checkpoint\nrestored_shapes: the shapes this object should conform to after\nrestore, or None.\n\nReturns:\nAn operation that restores the state of the object.\n\nRaises:\nValueError: If the object cannot be restored using the provided\nparameters.", "source": "github-repos"}
{"code": "def all_logging_disabled(highest_level=logging.CRITICAL):\n    \n    previous_level = logging.root.manager.disable\n    logging.disable(highest_level)\n    try:\n        yield\n    finally:\n        logging.disable(previous_level)", "docstring": "Disable all logging temporarily.\n\nA context manager that will prevent any logging messages triggered during the body from being processed.\n\nArgs:\nhighest_level: the maximum logging level that is being blocked", "source": "juraj-google-style"}
{"code": "def core_assignment(self) -> np.ndarray:\n    return self._core_assignment", "docstring": "The logical to physical core mapping.\n\nReturns:\nAn integer numpy array of rank 3, with shape\n`[num_replicas, num_cores_per_replica, topology_rank]`. Maps\n(replica, logical core) pairs to physical topology coordinates.", "source": "github-repos"}
{"code": "def eval_autoregressive(self, features=None, decode_length=50):\n    \n    results = self._slow_greedy_infer(features, decode_length=decode_length)\n    return results[\"logits\"], results[\"losses\"]", "docstring": "Autoregressive eval.\n\nQuadratic time in decode_length.\n\nArgs:\nfeatures: an map of string to `Tensor`\ndecode_length: an integer.  How many additional timesteps to decode.\n\nReturns:\nlogits: `Tensor`\nlosses: a dictionary: {loss-name (string): floating point `Scalar`}.\nContains a single key \"training\".", "source": "juraj-google-style"}
{"code": "def _is_empty_observation_data(feature_ndims, observation_index_points, observations):\n    if ((observation_index_points is None) and (observations is None)):\n        return True\n    num_obs = tf.compat.dimension_value(observation_index_points.shape[(- (feature_ndims + 1))])\n    if ((num_obs is not None) and (num_obs == 0)):\n        return True\n    return False", "docstring": "Returns `True` if given observation data is empty.\n\nEmptiness means either\n1. Both `observation_index_points` and `observations` are `None`, or\n2. the \"number of observations\" shape is 0. The shape of\n`observation_index_points` is `[..., N, f1, ..., fF]`, where `N` is the\nnumber of observations and the `f`s are feature dims. Thus, we look at the\nshape element just to the left of the leftmost feature dim. If that shape is\nzero, we consider the data empty.\n\nWe don't check the shape of observations; validations are checked elsewhere in\nthe calling code, to ensure these shapes are consistent.\n\nArgs:\nfeature_ndims: the number of feature dims, as reported by the GP kernel.\nobservation_index_points: the observation data locations in the index set.\nobservations: the observation data.\n\nReturns:\nis_empty: True if the data were deemed to be empty.", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, padding_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> torch.Tensor:\n    residual = hidden_states\n    hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)\n    hidden_states = residual + hidden_states\n    hidden_states = self.self_attn_layer_norm(hidden_states)\n    residual = hidden_states\n    if padding_mask is not None:\n        hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0)\n    hidden_states = self.conv1(hidden_states.transpose(1, 2)).transpose(1, 2)\n    if padding_mask is not None:\n        hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0)\n    hidden_states = self.activation_fn(hidden_states)\n    hidden_states = self.conv2(hidden_states.transpose(1, 2)).transpose(1, 2)\n    hidden_states = self.conv_dropout(hidden_states)\n    hidden_states = residual + hidden_states\n    hidden_states = self.conv_layer_norm(hidden_states)\n    outputs = (hidden_states, present_key_value)\n    if output_attentions:\n        outputs += self_attn_weights\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`):\ninput to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`):\nattention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very\nlarge negative values.\npadding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\nIndicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked*\nor 0 for *masked*\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def WriteTaskCompletion(self, aborted=False):\n    self._RaiseIfNotWritable()\n    if (self._storage_type != definitions.STORAGE_TYPE_TASK):\n        raise IOError('Unsupported storage type.')\n    self._task.aborted = aborted\n    task_completion = self._task.CreateTaskCompletion()\n    self._storage_file.WriteTaskCompletion(task_completion)", "docstring": "Writes task completion information.\n\nArgs:\naborted (Optional[bool]): True if the session was aborted.\n\nRaises:\nIOError: if the storage type is not supported or\nwhen the storage writer is closed.\nOSError: if the storage type is not supported or\nwhen the storage writer is closed.", "source": "codesearchnet"}
{"code": "def model(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:\n    out = array_ops.gather_v2(self.embedding_w, input_tensor)\n    out = nn_ops.conv2d(out, self.conv_filters, strides=(1, 1, 2, 1), dilations=(1, 1, 1, 1), padding='SAME', data_format='NHWC')\n    if is_qat_model:\n        out = array_ops.fake_quant_with_min_max_args(out, min=-0.1, max=0.2, num_bits=8, narrow_range=False)\n        second_conv_filters = array_ops.fake_quant_with_min_max_args(self.second_conv_filters, min=-0.1, max=0.2, num_bits=8, narrow_range=True)\n    else:\n        second_conv_filters = self.second_conv_filters\n    out = nn_ops.conv2d(out, second_conv_filters, strides=(1, 1, 2, 1), dilations=(1, 1, 1, 1), padding='SAME', data_format='NHWC')\n    if is_qat_model:\n        out = array_ops.fake_quant_with_min_max_args(out, min=-0.1, max=0.2, num_bits=8, narrow_range=False)\n    return {'output': out}", "docstring": "Performs a gather and a 2D convolution operation.\n\nArgs:\ninput_tensor: Input tensor to perform operation on.\n\nReturns:\nA map of: output key -> output result.", "source": "github-repos"}
{"code": "def add_positional_embedding(x, max_length, name=None, positions=None):\n  \n  with tf.name_scope(\"add_positional_embedding\"):\n    _, length, depth = common_layers.shape_list(x)\n    var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype)\n    if positions is None:\n      pad_length = tf.maximum(0, length - max_length)\n      sliced = tf.cond(\n          tf.less(length, max_length),\n          lambda: tf.slice(var, [0, 0], [length, -1]),\n          lambda: tf.pad(var, [[0, pad_length], [0, 0]]))\n      return x + tf.expand_dims(sliced, 0)\n    else:\n      return x + tf.gather(var, tf.to_int32(positions))", "docstring": "Adds positional embedding.\n\nArgs:\nx: Tensor with shape [batch, length, depth].\nmax_length: int representing static maximum size of any dimension.\nname: str representing name of the embedding tf.Variable.\npositions: Tensor with shape [batch, length].\n\nReturns:\nTensor of same shape as x.", "source": "juraj-google-style"}
{"code": "def _FillEventSourceHeap(\n      self, storage_writer, event_source_heap, start_with_first=False):\n    \n    if self._processing_profiler:\n      self._processing_profiler.StartTiming('fill_event_source_heap')\n\n    if self._processing_profiler:\n      self._processing_profiler.StartTiming('get_event_source')\n\n    if start_with_first:\n      event_source = storage_writer.GetFirstWrittenEventSource()\n    else:\n      event_source = storage_writer.GetNextWrittenEventSource()\n\n    if self._processing_profiler:\n      self._processing_profiler.StopTiming('get_event_source')\n\n    while event_source:\n      event_source_heap.PushEventSource(event_source)\n      if event_source_heap.IsFull():\n        break\n\n      if self._processing_profiler:\n        self._processing_profiler.StartTiming('get_event_source')\n\n      event_source = storage_writer.GetNextWrittenEventSource()\n\n      if self._processing_profiler:\n        self._processing_profiler.StopTiming('get_event_source')\n\n    if self._processing_profiler:\n      self._processing_profiler.StopTiming('fill_event_source_heap')", "docstring": "Fills the event source heap with the available written event sources.\n\nArgs:\nstorage_writer (StorageWriter): storage writer for a session storage.\nevent_source_heap (_EventSourceHeap): event source heap.\nstart_with_first (Optional[bool]): True if the function should start\nwith the first written event source.", "source": "juraj-google-style"}
{"code": "def stretch_hist_equalize(self, approximate=False):\n    logger.info('Perform a histogram equalized contrast stretch.')\n    nwidth = 2048.0\n    logger.debug(('Make histogram bins having equal amount of data, ' + 'using numpy percentile function:'))\n\n    def _band_hist(band_data):\n        cdf = da.arange(0.0, 1.0, (1.0 / nwidth), chunks=nwidth)\n        if approximate:\n            flat_data = band_data.ravel()\n            bins = da.percentile(flat_data[da.notnull(flat_data)], (cdf * 100.0))\n        else:\n            bins = dask.delayed(np.nanpercentile)(band_data, (cdf * 100.0))\n            bins = da.from_delayed(bins, shape=(nwidth,), dtype=cdf.dtype)\n        res = dask.delayed(np.interp)(band_data, bins, cdf)\n        res = da.from_delayed(res, shape=band_data.shape, dtype=band_data.dtype)\n        return res\n    band_results = []\n    for band in self.data['bands'].values:\n        if (band == 'A'):\n            continue\n        band_data = self.data.sel(bands=band)\n        res = _band_hist(band_data.data)\n        band_results.append(res)\n    if ('A' in self.data.coords['bands'].values):\n        band_results.append(self.data.sel(bands='A'))\n    self.data.data = da.stack(band_results, axis=self.data.dims.index('bands'))", "docstring": "Stretch the current image's colors through histogram equalization.\n\nArgs:\napproximate (bool): Use a faster less-accurate percentile\ncalculation. At the time of writing the dask\nversion of `percentile` is not as accurate as\nthe numpy version. This will likely change in\nthe future. Current dask version 0.17.", "source": "codesearchnet"}
{"code": "def create_index(self, model, waiting_models):\n        \n        bucket_name = model._get_bucket_name()\n        bucket_type = client.bucket_type(settings.DEFAULT_BUCKET_TYPE)\n        index_name = \"%s_%s\" % (settings.DEFAULT_BUCKET_TYPE, bucket_name)\n        bucket = bucket_type.bucket(bucket_name)\n        try:\n            client.get_search_index(index_name)\n            if not (bucket.get_property('search_index') == index_name):\n                bucket.set_property('search_index', index_name)\n                print(\"+ %s (%s) search index is created.\" % (model.__name__, index_name))\n        except RiakError:\n            try:\n                client.create_search_index(index_name, index_name, self.n_val)\n                bucket.set_property('search_index', index_name)\n                print(\"+ %s (%s) search index is created.\" % (model.__name__, index_name))\n            except RiakError:\n                print(\"+ %s (%s) search index checking operation is taken to queue.\" % (\n                model.__name__, index_name))\n                waiting_models.append(model)", "docstring": "Creates search indexes.\n\nArgs:\nmodel: model to execute\nwaiting_models: if riak can't return response immediately, model is taken to queue.\nAfter first execution session, method is executed with waiting models and controlled.\nAnd be ensured that all given models are executed properly.\n\nReturns:", "source": "juraj-google-style"}
{"code": "def getConfig(self, section = None):\n\t\t\n\t\tdata = {}\n\t\tif section is None:\n\t\t\tfor s in self.config.sections():\n\t\t\t\tif '/' in s:\n\t\t\t\t\t\n\t\t\t\t\tparent, _s = s.split('/')\n\t\t\t\t\tdata[parent][_s] = dict(self.config.items(s))\n\t\t\t\telse:\n\t\t\t\t\tdata[s] = dict(self.config.items(s))\n\t\telse:\n\t\t\t\n\t\t\tdata = dict(self.config.items(section))\n\t\treturn data", "docstring": "Returns a dictionary which contains the current config. If a section is setted,\nonly will returns the section config\n\nArgs:\nsection (str): (Optional) Section name.\n\nReturns:\ndict: Representation of current config", "source": "juraj-google-style"}
{"code": "def _parse_state(self, config):\n        \n        value = STATE_RE.search(config).group('value')\n        return dict(state=value)", "docstring": "_parse_state scans the provided configuration block and extracts\nthe vlan state value.  The config block is expected to always return\nthe vlan state config.  The return dict is inteded to be merged into\nthe response dict.\n\nArgs:\nconfig (str): The vlan configuration block from the nodes\nrunning configuration\n\nReturns:\ndict: resource dict attribute", "source": "juraj-google-style"}
{"code": "def _validate_namespace(self, namespace):\n    if (self._namespace_regex.fullmatch(namespace) is None):\n        LOGGER.debug('Invalid namespace: %s', namespace)\n        raise _ResponseFailed(self._status.INVALID_ADDRESS)", "docstring": "Validates a namespace, raising a ResponseFailed error if invalid.\n\nArgs:\nstate_root (str): The state_root to validate\n\nRaises:\nResponseFailed: The state_root was invalid, and a status of\nINVALID_ROOT will be sent with the response.", "source": "codesearchnet"}
{"code": "def subtract_business_days(self, date_tensor, num_days, roll_convention=constants.BusinessDayConvention.NONE):\n    pass", "docstring": "Adds given number of business days to given dates.\n\nNote that this is different from calling `subtract_period_and_roll` with\nPeriodType.DAY. For example, subtracting 5 business days from Friday gives\nthe previous Friday (unless there are holidays on this week or previous\nFriday). Subtracting 5 days and rolling means landing on Sunday and then\nrolling either to Monday or to Friday, depending on the roll convention.\n\nIf any of the dates in `date_tensor` are not business days, they will be\nrolled to business days before doing the subtraction. If `roll_convention`\nis `NONE`, and any dates are not business days, an exception is raised.\n\nArgs:\ndate_tensor: DateTensor of dates to advance from.\nnum_days: Tensor of int32 type broadcastable to `date_tensor`.\nroll_convention: BusinessDayConvention. Determines how to roll a date that\nfalls on a holiday.\n\nReturns:\nThe resulting DateTensor.", "source": "github-repos"}
{"code": "def _find_bad_transition(self, mma, w_string):\n        \n        conj_out = mma.consume_input(w_string)\n        targ_out = self._membership_query(w_string)\n        \n        \n        length = min(len(conj_out), len(targ_out))\n        diff = [i for i in range(length)\n                if conj_out[i] != targ_out[i]]\n        if len(diff) == 0:\n            diff_index = len(targ_out)\n        else:\n            diff_index = diff[0]\n\n        low = 0\n        high = len(w_string)\n        while True:\n            i = (low + high) / 2\n            length = len(self._membership_query(w_string[:i]))\n            if length == diff_index + 1:\n                return w_string[:i]\n            elif length < diff_index + 1:\n                low = i + 1\n            else:\n                high = i - 1", "docstring": "Checks for bad DFA transitions using the examined string\nArgs:\nmma (DFA): The hypothesis automaton\nw_string (str): The examined string to be consumed\nReturns:\nstr: The prefix of the examined string that matches", "source": "juraj-google-style"}
{"code": "def get_pk_attrnames(obj) -> List[str]:\n    \n    return [attrname\n            for attrname, column in gen_columns(obj)\n            if column.primary_key]", "docstring": "Asks an SQLAlchemy ORM object: \"what are your primary key(s)?\"\n\nArgs:\nobj: SQLAlchemy ORM object\n\nReturns:\nlist of attribute names of primary-key columns", "source": "juraj-google-style"}
{"code": "def _get_edges(self):\n    if (self._edges is None):\n        self._edges = self._compute_edges()\n    return self._edges", "docstring": "Get the edges for the current surface.\n\nIf they haven't been computed yet, first compute and store them.\n\nThis is provided as a means for internal calls to get the edges\nwithout copying (since :attr:`.edges` copies before giving to\na user to keep the stored data immutable).\n\nReturns:\nTuple[~bezier.curve.Curve, ~bezier.curve.Curve, \\\n~bezier.curve.Curve]: The edges of\nthe surface.", "source": "codesearchnet"}
{"code": "def _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op):\n  \n  with tf.Session() as sess:\n    ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n    if ckpt and ckpt.model_checkpoint_path:\n      print(\"ckpt.model_checkpoint_path: {0}\".format(ckpt.model_checkpoint_path))\n      saver.restore(sess, ckpt.model_checkpoint_path)\n\n      \n      \n      \n      global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\n      print('Successfully loaded model from %s at step=%s.' %\n            (ckpt.model_checkpoint_path, global_step))\n    else:\n      print('No checkpoint file found')\n      return\n\n    \n    coord = tf.train.Coordinator()\n    try:\n      threads = []\n      for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):\n        threads.extend(qr.create_threads(sess, coord=coord, daemon=True,\n                                         start=True))\n\n      num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))\n      \n      count_top_1 = 0.0\n      count_top_5 = 0.0\n      total_sample_count = num_iter * FLAGS.batch_size\n      step = 0\n\n      print('%s: starting evaluation on (%s).' % (datetime.now(), FLAGS.subset))\n      start_time = time.time()\n      while step < num_iter and not coord.should_stop():\n        top_1, top_5 = sess.run([top_1_op, top_5_op])\n        count_top_1 += np.sum(top_1)\n        count_top_5 += np.sum(top_5)\n        step += 1\n        if step % 20 == 0:\n          duration = time.time() - start_time\n          sec_per_batch = duration / 20.0\n          examples_per_sec = FLAGS.batch_size / sec_per_batch\n          print('%s: [%d batches out of %d] (%.1f examples/sec; %.3f'\n                'sec/batch)' % (datetime.now(), step, num_iter,\n                                examples_per_sec, sec_per_batch))\n          start_time = time.time()\n\n      \n      precision_at_1 = count_top_1 / total_sample_count\n      recall_at_5 = count_top_5 / total_sample_count\n      print('%s: precision @ 1 = %.4f recall @ 5 = %.4f [%d examples]' %\n            (datetime.now(), precision_at_1, recall_at_5, total_sample_count))\n\n      summary = tf.Summary()\n      summary.ParseFromString(sess.run(summary_op))\n      summary.value.add(tag='Precision @ 1', simple_value=precision_at_1)\n      summary.value.add(tag='Recall @ 5', simple_value=recall_at_5)\n      summary_writer.add_summary(summary, global_step)\n\n    except Exception as e:  \n      coord.request_stop(e)\n\n    coord.request_stop()\n    coord.join(threads, stop_grace_period_secs=10)", "docstring": "Runs Eval once.\n\nArgs:\nsaver: Saver.\nsummary_writer: Summary writer.\ntop_1_op: Top 1 op.\ntop_5_op: Top 5 op.\nsummary_op: Summary op.", "source": "juraj-google-style"}
{"code": "def _next_dna(self, dna: Optional[DNA]=None) -> Optional[DNA]:\n    if self.next_dna_fn is None:\n        cls_name = self.hyper_type or self.__class__.__name__\n        raise NotImplementedError(f'`next_dna` is not supported on {cls_name!r}.')\n    return self.next_dna_fn(dna)", "docstring": "Returns the next DNA in the space represented by this spec.\n\nArgs:\ndna: The DNA whose next will be returned. If None, `next_dna` will return\nthe first DNA.\n\nReturns:\nThe next DNA or None if there is no next DNA.", "source": "github-repos"}
{"code": "def reshape(self, shape: tf.TensorShape) -> 'TensorFluent':\n        \n        t = tf.reshape(self.tensor, shape)\n        scope = self.scope.as_list()\n        batch = self.batch\n        return TensorFluent(t, scope, batch=batch)", "docstring": "Returns a TensorFluent for the reshape operation with given `shape`.\n\nArgs:\nshape: The output's shape.\n\nReturns:\nA TensorFluent wrapping the reshape operation.", "source": "juraj-google-style"}
{"code": "def devno_alloc(self):\n    devno_int = self._devno_pool.alloc()\n    devno = '{:04X}'.format(devno_int)\n    return devno", "docstring": "Allocates a device number unique to this partition, in the range of\n0x8000 to 0xFFFF.\n\nReturns:\nstring: The device number as four hexadecimal digits in upper case.\n\nRaises:\nValueError: No more device numbers available in that range.", "source": "codesearchnet"}
{"code": "def set_default(self, name, value):\n    fl = self._flags()\n    if (name not in fl):\n        self._set_unknown_flag(name, value)\n        return\n    fl[name]._set_default(value)\n    self._assert_validators(fl[name].validators)", "docstring": "Changes the default value of the named flag object.\n\nThe flag's current value is also updated if the flag is currently using\nthe default value, i.e. not specified in the command line, and not set\nby FLAGS.name = value.\n\nArgs:\nname: str, the name of the flag to modify.\nvalue: The new default value.\n\nRaises:\nUnrecognizedFlagError: Raised when there is no registered flag named name.\nIllegalFlagValueError: Raised when value is not valid.", "source": "codesearchnet"}
{"code": "def get_content_of_file(self, name, full_path=False):\n    if self.handle:\n        for member in self.handle.getmembers():\n            if ((full_path and (member.name == name)) or ((not full_path) and (os.path.basename(member.name) == name))):\n                extracted = self.handle.extractfile(member)\n                return extracted.read().decode(locale.getpreferredencoding())\n    return None", "docstring": "Returns content of file from archive.\n\nIf full_path is set to False and two files with given name exist,\ncontent of one is returned (it is not specified which one that is).\nIf set to True, returns content of exactly that file.\n\nArgs:\nname: name of the file to get content of\nReturns:\nContent of the file with given name or None, if no such.", "source": "codesearchnet"}
{"code": "def _pre_action(self, action):\n    assert (len(action) == self.dof), 'environment got invalid action dimension'\n    (low, high) = self.action_spec\n    action = np.clip(action, low, high)\n    if self.has_gripper:\n        arm_action = action[:self.mujoco_robot.dof]\n        gripper_action_in = action[self.mujoco_robot.dof:(self.mujoco_robot.dof + self.gripper.dof)]\n        gripper_action_actual = self.gripper.format_action(gripper_action_in)\n        action = np.concatenate([arm_action, gripper_action_actual])\n    ctrl_range = self.sim.model.actuator_ctrlrange\n    bias = (0.5 * (ctrl_range[(:, 1)] + ctrl_range[(:, 0)]))\n    weight = (0.5 * (ctrl_range[(:, 1)] - ctrl_range[(:, 0)]))\n    applied_action = (bias + (weight * action))\n    self.sim.data.ctrl[:] = applied_action\n    self.sim.data.qfrc_applied[self._ref_joint_vel_indexes] = self.sim.data.qfrc_bias[self._ref_joint_vel_indexes]\n    if self.use_indicator_object:\n        self.sim.data.qfrc_applied[self._ref_indicator_vel_low:self._ref_indicator_vel_high] = self.sim.data.qfrc_bias[self._ref_indicator_vel_low:self._ref_indicator_vel_high]", "docstring": "Overrides the superclass method to actuate the robot with the\npassed joint velocities and gripper control.\n\nArgs:\naction (numpy array): The control to apply to the robot. The first\n@self.mujoco_robot.dof dimensions should be the desired\nnormalized joint velocities and if the robot has\na gripper, the next @self.gripper.dof dimensions should be\nactuation controls for the gripper.", "source": "codesearchnet"}
{"code": "def resolve(self, method, path):\n        \n        if method in self._literal and path in self._literal[method]:\n            return self._literal[method][path], [], {}\n        else:\n            return self._resolve_non_literal_route(method, path)", "docstring": "Resolve a request to a route handler.\n\nArguments:\nmethod (str): HTTP method, e.g. GET, POST, etc. (type: str)\npath (str): Request path\n\nReturns:\ntuple or None: A tuple of three items:\n\n1. Route handler (callable)\n2. Positional arguments (list)\n3. Keyword arguments (dict)\n\n``None`` if no route matches the request.", "source": "juraj-google-style"}
{"code": "def find_in_matrix_2d(val, matrix):\n    dim = len(matrix[0])\n    item_index = 0\n    for row in matrix:\n        for i in row:\n            if (i == val):\n                break\n            item_index += 1\n        if (i == val):\n            break\n    loc = (int((item_index / dim)), (item_index % dim))\n    return loc", "docstring": "Returns a tuple representing the index of an item in a 2D matrix.\n\nArguments:\n- val (str) Value to look for\n- matrix (list) 2D matrix to search for val in\n\nReturns:\n- (tuple) Ordered pair representing location of val", "source": "codesearchnet"}
{"code": "def bulk_insert_extras(dialect_name: str,\n                       fileobj: TextIO,\n                       start: bool) -> None:\n    \n    lines = []\n    if dialect_name == SqlaDialectName.MYSQL:\n        if start:\n            lines = [\n                \"SET autocommit=0;\",\n                \"SET unique_checks=0;\",\n                \"SET foreign_key_checks=0;\",\n            ]\n        else:\n            lines = [\n                \"SET foreign_key_checks=1;\",\n                \"SET unique_checks=1;\",\n                \"COMMIT;\",\n            ]\n    writelines_nl(fileobj, lines)", "docstring": "Writes bulk ``INSERT`` preamble (start=True) or end (start=False).\n\nFor MySQL, this temporarily switches off autocommit behaviour and index/FK\nchecks, for speed, then re-enables them at the end and commits.\n\nArgs:\ndialect_name: SQLAlchemy dialect name (see :class:`SqlaDialectName`)\nfileobj: file-like object to write to\nstart: if ``True``, write preamble; if ``False``, write end", "source": "juraj-google-style"}
{"code": "def stop_ec2_instance(client, resource):\n    \n    instance = EC2Instance.get(resource.id)\n    if instance.state in ('stopped', 'terminated'):\n        return ActionStatus.IGNORED, {}\n\n    client.stop_instances(InstanceIds=[resource.id])\n    return ActionStatus.SUCCEED, {'instance_type': resource.instance_type, 'public_ip': resource.public_ip}", "docstring": "Stop an EC2 Instance\n\nThis function will attempt to stop a running instance.\n\nArgs:\nclient (:obj:`boto3.session.Session.client`): A boto3 client object\nresource (:obj:`Resource`): The resource object to stop\n\nReturns:\n`ActionStatus`", "source": "juraj-google-style"}
{"code": "def _trychar(char, fallback, asciimode=None):  \n    \n    if asciimode is True:\n        \n        return fallback\n    if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:  \n        try:\n            char.encode(sys.stdout.encoding)\n        except Exception:  \n            pass\n        else:\n            return char\n    return fallback", "docstring": "Logic from IPython timeit to handle terminals that cant show mu\n\nArgs:\nchar (str): character, typically unicode, to try to use\nfallback (str): ascii character to use if stdout cannot encode char\nasciimode (bool): if True, always use fallback\n\nExample:\n>>> char = _trychar('µs', 'us')\n>>> print('char = {}'.format(char))\n>>> assert _trychar('µs', 'us', asciimode=True) == 'us'", "source": "juraj-google-style"}
{"code": "def _model_to_dict(model, ignore):\n    return {attr: value for (attr, value) in model.__dict__.items() if ((not attr.startswith('_')) and (attr not in ignore))}", "docstring": "Convert OSS model to dict.\n\nArgs:\nmodel (oss2.models.RequestResult): Model.\nignore (tuple of str): Keys to not insert to dict.\n\nReturns:\ndict: Model dict version.", "source": "codesearchnet"}
{"code": "def verify_link_ed25519_cot_signature(chain, link, unsigned_path, signature_path):\n    if chain.context.config['verify_cot_signature']:\n        log.debug('Verifying the {} {} {} ed25519 chain of trust signature'.format(link.name, link.task_id, link.worker_impl))\n        signature = read_from_file(signature_path, file_type='binary', exception=CoTError)\n        binary_contents = read_from_file(unsigned_path, file_type='binary', exception=CoTError)\n        errors = []\n        verify_key_seeds = chain.context.config['ed25519_public_keys'].get(link.worker_impl, [])\n        for seed in verify_key_seeds:\n            try:\n                verify_key = ed25519_public_key_from_string(seed)\n                verify_ed25519_signature(verify_key, binary_contents, signature, \"{} {}: {} ed25519 cot signature doesn't verify against {}: %(exc)s\".format(link.name, link.task_id, link.worker_impl, seed))\n                log.debug('{} {}: ed25519 cot signature verified.'.format(link.name, link.task_id))\n                break\n            except ScriptWorkerEd25519Error as exc:\n                errors.append(str(exc))\n        else:\n            errors = (errors or ['{} {}: Unknown error verifying ed25519 cot signature. worker_impl {} verify_keys {}'.format(link.name, link.task_id, link.worker_impl, verify_key_seeds)])\n            message = '\\n'.join(errors)\n            raise CoTError(message)\n    link.cot = load_json_or_yaml(unsigned_path, is_path=True, exception=CoTError, message='{} {}: Invalid unsigned cot json body! %(exc)s'.format(link.name, link.task_id))", "docstring": "Verify the ed25519 signatures of the chain of trust artifacts populated in ``download_cot``.\n\nPopulate each link.cot with the chain of trust json body.\n\nArgs:\nchain (ChainOfTrust): the chain of trust to add to.\n\nRaises:\n(CoTError, ScriptWorkerEd25519Error): on signature verification failure.", "source": "codesearchnet"}
{"code": "def unpack_archive(*components, **kwargs) -> str:\n    path = fs.abspath(*components)\n    compression = kwargs.get('compression', 'bz2')\n    dir = kwargs.get('dir', fs.dirname(path))\n    fs.cd(dir)\n    tar = tarfile.open(path, ('r:' + compression))\n    tar.extractall()\n    tar.close()\n    fs.cdpop()\n    return dir", "docstring": "Unpack a compressed archive.\n\nArguments:\n*components (str[]): Absolute path.\n**kwargs (dict, optional): Set \"compression\" to compression type.\nDefault: bz2. Set \"dir\" to destination directory. Defaults to the\ndirectory of the archive.\n\nReturns:\nstr: Path to directory.", "source": "codesearchnet"}
{"code": "def __frontend_limit_descriptor(self, api_info):\n    if (api_info.frontend_limits is None):\n        return None\n    descriptor = {}\n    for (propname, descname) in (('unregistered_user_qps', 'unregisteredUserQps'), ('unregistered_qps', 'unregisteredQps'), ('unregistered_daily', 'unregisteredDaily')):\n        if (getattr(api_info.frontend_limits, propname) is not None):\n            descriptor[descname] = getattr(api_info.frontend_limits, propname)\n    rules = self.__frontend_limit_rules_descriptor(api_info)\n    if rules:\n        descriptor['rules'] = rules\n    return descriptor", "docstring": "Builds a frontend limit descriptor from API info.\n\nArgs:\napi_info: An _ApiInfo object.\n\nReturns:\nA dictionary with frontend limit information.", "source": "codesearchnet"}
{"code": "def Deserialize(self, reader):\n        \n        self.PrevHash = reader.ReadUInt256()\n        self.PrevIndex = reader.ReadUInt16()", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def _guess_format_from_extension(ext):\n    \n    ext = ext.strip('.')\n\n    \n    \n    \n    \n    \n\n    formats = []\n    for fmt in FILE_FORMATS:\n        if ext in FILE_FORMATS[fmt]:\n            formats.append(fmt)\n\n    if formats == [] or len(formats) > 1:\n        return False\n\n    return formats[0]", "docstring": "Guess the appropriate data type from file extension.\n\nArguments:\next:        The file extension (period optional)\n\nReturns:\nString. The format (without leading period),\nor False if none was found or couldn't be guessed", "source": "juraj-google-style"}
{"code": "def handle_upnp_error(self, xml_error):\n    xml_error = xml_error.encode('utf-8')\n    error = XML.fromstring(xml_error)\n    log.debug('Error %s', xml_error)\n    error_code = error.findtext('.\n    if (error_code is not None):\n        description = self.UPNP_ERRORS.get(int(error_code), '')\n        raise SoCoUPnPException(message='UPnP Error {} received: {} from {}'.format(error_code, description, self.soco.ip_address), error_code=error_code, error_description=description, error_xml=xml_error)\n    else:\n        log.error('Unknown error received from %s', self.soco.ip_address)\n        raise UnknownSoCoException(xml_error)", "docstring": "Disect a UPnP error, and raise an appropriate exception.\n\nArgs:\nxml_error (str):  a unicode string containing the body of the\nUPnP/SOAP Fault response. Raises an exception containing the\nerror code.", "source": "codesearchnet"}
{"code": "def get_signature_def(meta_graph, signature_key):\n    signature_def_map = meta_graph.signature_def\n    signature_def_keys = set(signature_def_map.keys())\n    logging.info('The given SavedModel MetaGraphDef contains SignatureDefs with the following keys: %s', signature_def_keys)\n    if signature_key not in signature_def_keys:\n        raise ValueError(\"No '{}' in the SavedModel's SignatureDefs. Possible values are '{}'.\".format(signature_key, ','.join(signature_def_keys)))\n    return signature_def_map[signature_key]", "docstring": "Get the signature def from meta_graph with given signature_key.\n\nArgs:\nmeta_graph: meta_graph_def.\nsignature_key: signature_def in the meta_graph_def.\n\nReturns:\nThe signature_def used for tflite conversion.\n\nRaises:\nValueError: Given signature_key is not valid for this meta_graph.", "source": "github-repos"}
{"code": "def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):\n    countriesdata = cls.countriesdata(use_live=use_live)\n    m49 = countriesdata['m49iso3'].get(iso3)\n    if (m49 is not None):\n        return m49\n    if (exception is not None):\n        raise exception\n    return None", "docstring": "Get M49 from ISO3 code\n\nArgs:\niso3 (str): ISO3 code for which to get M49 code\nuse_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\nexception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\nReturns:\nOptional[int]: M49 code", "source": "codesearchnet"}
{"code": "def agent_version(self, value):\n        \n        if value == self._defaults['ai.internal.agentVersion'] and 'ai.internal.agentVersion' in self._values:\n            del self._values['ai.internal.agentVersion']\n        else:\n            self._values['ai.internal.agentVersion'] = value", "docstring": "The agent_version property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def byte_str(nBytes, unit='bytes', precision=2):\n    if unit.lower().startswith('b'):\n        nUnit = nBytes\n    elif unit.lower().startswith('k'):\n        nUnit = (nBytes / (2.0 ** 10))\n    elif unit.lower().startswith('m'):\n        nUnit = (nBytes / (2.0 ** 20))\n    elif unit.lower().startswith('g'):\n        nUnit = (nBytes / (2.0 ** 30))\n    elif unit.lower().startswith('t'):\n        nUnit = (nBytes / (2.0 ** 40))\n    else:\n        raise NotImplementedError(('unknown nBytes=%r unit=%r' % (nBytes, unit)))\n    return ((repr2(nUnit, precision=precision) + ' ') + unit)", "docstring": "representing the number of bytes with the chosen unit\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def report_error(self, read_tuple_name, error_name, wrong=\"\", message=\"\", warning=False):\n        \n        if (not self.report_only_first) or (error_name not in self.reported_errors):\n            print(\"\\t\".join([\"error\" if warning == False else \"warning\", read_tuple_name, error_name, wrong, message]))\n        self.reported_errors.add(error_name)\n        if warning:\n            self.warning_has_been_reported = True\n        else:\n            self.error_has_been_reported = True", "docstring": "Report an error.\n\nArgs:\nread_tuple_name (): Name of the read tuple.\nerror_name (): Name of the error.\nwrong (str): What is wrong.\nmessage (str): Additional msessage to be printed.\nwarning (bool): Warning (not an error).", "source": "juraj-google-style"}
{"code": "def activate_absence_with_duration(self, duration: int):\n        \n        data = {\"duration\": duration}\n        return self._restCall(\n            \"home/heating/activateAbsenceWithDuration\", json.dumps(data)\n        )", "docstring": "activates the absence mode for a given time\n\nArgs:\nduration(int): the absence duration in minutes", "source": "juraj-google-style"}
{"code": "def wb004(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `wb004`'.format(value))\n    self._wb004 = value", "docstring": "Corresponds to IDD Field `wb004`\nWet-bulb temperature corresponding to 0.4% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `wb004`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "async def download_cot(chain):\n    \n    artifact_tasks = []\n    \n    \n    \n    for link in chain.links:\n        task_id = link.task_id\n        parent_dir = link.cot_dir\n        urls = []\n\n        unsigned_url = get_artifact_url(chain.context, task_id, 'public/chain-of-trust.json')\n        urls.append(unsigned_url)\n        if chain.context.config['verify_cot_signature']:\n            urls.append(\n                get_artifact_url(chain.context, task_id, 'public/chain-of-trust.json.sig')\n            )\n\n        artifact_tasks.append(\n            asyncio.ensure_future(\n                download_artifacts(\n                    chain.context, urls, parent_dir=parent_dir,\n                    valid_artifact_task_ids=[task_id]\n                )\n            )\n        )\n\n    artifacts_paths = await raise_future_exceptions(artifact_tasks)\n\n    for path in artifacts_paths:\n        sha = get_hash(path[0])\n        log.debug(\"{} downloaded; hash is {}\".format(path[0], sha))", "docstring": "Download the signed chain of trust artifacts.\n\nArgs:\nchain (ChainOfTrust): the chain of trust to add to.\n\nRaises:\nBaseDownloadError: on failure.", "source": "juraj-google-style"}
{"code": "def rtm(self, url: Optional[str]=None, bot_id: Optional[str]=None) -> Iterator[events.Event]:\n    while True:\n        bot_id = (bot_id or self._find_bot_id())\n        url = (url or self._find_rtm_url())\n        for event in self._incoming_from_rtm(url, bot_id):\n            (yield event)\n        url = None", "docstring": "Iterate over event from the RTM API\n\nArgs:\nurl: Websocket connection url\nbot_id: Connecting bot ID\n\nReturns:\n:class:`slack.events.Event` or :class:`slack.events.Message`", "source": "codesearchnet"}
{"code": "class InputExample:\n    guid: str\n    text_a: str\n    text_b: Optional[str] = None\n    label: Optional[str] = None\n\n    def to_json_string(self):\n        \n        return json.dumps(dataclasses.asdict(self), indent=2) + '\\n'", "docstring": "A single training/test example for simple sequence classification.\n\nArgs:\nguid: Unique id for the example.\ntext_a: string. The untokenized text of the first sequence. For single\nsequence tasks, only this sequence must be specified.\ntext_b: (Optional) string. The untokenized text of the second sequence.\nOnly must be specified for sequence pair tasks.\nlabel: (Optional) string. The label of the example. This should be\nspecified for train and dev examples, but not for test examples.", "source": "github-repos"}
{"code": "def _use_gl(objs):\n    \n    from ..models.plots import Plot\n    return _any(objs, lambda obj: isinstance(obj, Plot) and obj.output_backend == \"webgl\")", "docstring": "Whether a collection of Bokeh objects contains a plot requesting WebGL\n\nArgs:\nobjs (seq[Model or Document]) :\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def _encode_slice_definition(self, root_builder: expressions.Builder, slice_: _fhir_path_data_types.Slice) -> List[validation_pb2.SqlRequirement]:\n    if slice_.relative_path:\n        slice_builder = self._get_new_child_builder(root_builder, slice_.relative_path)\n    else:\n        slice_builder = root_builder\n    if slice_builder is None:\n        return []\n    element_constraints = []\n    for rule_path, rule_def in slice_.slice_rules:\n        if not rule_def.HasField('fixed') and (not rule_def.HasField('pattern')):\n            continue\n        slice_element_builder = root_builder\n        for path_component in rule_path.split('.'):\n            slice_element_builder = slice_element_builder.__getattr__(path_component)\n            if slice_element_builder.return_type.returns_polymorphic():\n                type_codes = _utils.element_type_codes(slice_element_builder.return_type.root_element_definition)\n                if len(type_codes) > 1:\n                    self._error_reporter.report_fhir_path_error(cast(Any, rule_def).path.value, str(slice_element_builder), f'Element `{slice_element_builder}` in slice `{cast(Any, slice_.slice_def).id.value}` is a choice type with more than one choice which is not currently supported.')\n                    return []\n                slice_element_builder = slice_element_builder.ofType(type_codes[0])\n        fixed_constraint = self._constraint_for_fixed_slice_element(slice_element_builder, rule_def)\n        if fixed_constraint is not None:\n            element_constraints.append(fixed_constraint)\n        pattern_constraint = self._constraint_for_pattern_slice_element(slice_element_builder, rule_def)\n        if pattern_constraint is not None:\n            element_constraints.append(pattern_constraint)\n    if not element_constraints:\n        return []\n    element_predicate = functools.reduce(operator.and_, element_constraints)\n    elements_in_slice = slice_builder.where(element_predicate)\n    slice_constraints = []\n    min_size: int = cast(Any, slice_.slice_def).min.value\n    max_size: str = cast(Any, slice_.slice_def).max.value\n    if str(min_size) == max_size:\n        slice_constraints.append(elements_in_slice.count() == min_size)\n    else:\n        if min_size == 1:\n            slice_constraints.append(elements_in_slice.exists())\n        elif min_size > 1:\n            slice_constraints.append(elements_in_slice.count() >= min_size)\n        if max_size.isdigit():\n            slice_constraints.append(elements_in_slice.count() <= int(max_size))\n    if not slice_constraints:\n        return []\n    slice_constraint = functools.reduce(operator.and_, slice_constraints)\n    constraint_sql = self._encode_fhir_path_builder_constraint(slice_constraint, None)\n    if constraint_sql is None:\n        return []\n    slice_id = cast(Any, slice_.slice_def).id.value\n    slice_path = self._abs_path_invocation(root_builder)\n    slice_name = cast(Any, slice_.slice_def).slice_name.value\n    column_name = f'{_path_to_sql_column_name(slice_path)}_{_path_to_sql_column_name(slice_id)}_slice_cardinality'\n    description = f'Slice {slice_id} requires at least {min_size} and at most {max_size} elements in {slice_builder} to conform to slice {slice_name}.'\n    return [validation_pb2.SqlRequirement(column_name=column_name, sql_expression=constraint_sql.sql, fhir_path_sql_expression=constraint_sql.fhir_path_sql, severity=validation_pb2.ValidationSeverity.SEVERITY_ERROR, type=validation_pb2.ValidationType.VALIDATION_TYPE_CARDINALITY, element_path=root_builder.node.get_root_node().to_fhir_path(), description=description, fhir_path_key=column_name.replace('_', '-'), fhir_path_expression=constraint_sql.builder.fhir_path, fields_referenced_by_expression=sorted(constraint_sql.builder.node.find_paths_referenced()))]", "docstring": "Encodes constraints for slices.\n\nArgs:\nroot_builder: The builder representing a path to the structure definition\ndefining the slice.\nslice_: A slice defined by the structure definition at `root_builder`.\n\nReturns:\nA constraint enforcing the cardinality of `slice_` if `slice_` imposes a\nnon-zero or non-* min or max cardinality. Otherwise, an empty list.", "source": "github-repos"}
{"code": "def Convert(self, metadata, stat_entry, token=None):\n    \n    if stat_entry.pathspec.pathtype != rdf_paths.PathSpec.PathType.REGISTRY:\n      return []\n\n    result = ExportedRegistryKey(\n        metadata=metadata,\n        urn=stat_entry.AFF4Path(metadata.client_urn),\n        last_modified=stat_entry.st_mtime)\n\n    if (stat_entry.HasField(\"registry_type\") and\n        stat_entry.HasField(\"registry_data\")):\n\n      result.type = stat_entry.registry_type\n\n      \n      \n      \n      data = stat_entry.registry_data.GetValue()\n      if isinstance(data, bytes):\n        result.data = data\n      else:\n        result.data = str(data).encode(\"utf-8\")\n\n    return [result]", "docstring": "Converts StatEntry to ExportedRegistryKey.\n\nDoes nothing if StatEntry corresponds to a file and not a registry entry.\n\nArgs:\nmetadata: ExportedMetadata to be used for conversion.\nstat_entry: StatEntry to be converted.\ntoken: Security token.\n\nReturns:\nList or generator with resulting RDFValues. Empty list if StatEntry\ncorresponds to a file and not to a registry entry.", "source": "juraj-google-style"}
{"code": "def __init__(self, on_exception=Exception, limit=5, interval=None,\n                 validator=None):\n        \n        self.attempts = 0\n        self._on_exception = on_exception\n\n        self._setup_limit(limit)\n        self._setup_interval(interval)\n        self._setup_validator(validator)", "docstring": "Configure how a function should be retried.\n\nArgs:\non_exception (BaseException): The exception to catch. Use this to\nset which exception and it's subclasses to catch.\nlimit ()", "source": "juraj-google-style"}
{"code": "def _handle_agg_function(gb, agg_func, agg_name, *args, **kwargs):\n    if _is_associative(agg_func):\n        return _liftable_agg(agg_func)(gb, *args, **kwargs)\n    elif _is_liftable_with_sum(agg_func):\n        return _liftable_agg(agg_func, postagg_meth='sum')(gb, *args, **kwargs)\n    elif _is_unliftable(agg_func):\n        return _unliftable_agg(agg_func)(gb, *args, **kwargs)\n    elif callable(agg_func):\n        return DeferredDataFrame(expressions.ComputedExpression(agg_name, lambda gb_val: gb_val.agg(agg_func, *args, **kwargs), [gb._expr], requires_partition_by=partitionings.Index(), preserves_partition_by=partitionings.Singleton()))\n    else:\n        raise NotImplementedError(f'GroupBy.agg(func={agg_func!r})')", "docstring": "Handles the aggregation logic based on the function type passed.\n\nArgs:\ngb: The groupby instance (DeferredGroupBy).\nagg_name: The name/label of the aggregation function.\nfn: The aggregation function to apply.\n*args: Additional arguments to pass to the aggregation function.\n**kwargs: Keyword arguments to pass to the aggregation function.\n\nReturns:\nA DeferredDataFrame or the result of the aggregation function.\n\nRaises:\nNotImplementedError: If the aggregation function type is unsupported.", "source": "github-repos"}
{"code": "def register_ops_if_needed(graph_ops):\n    missing_ops = (graph_ops - set(op_def_registry.get_registered_ops().keys()))\n    if (not missing_ops):\n        return\n    p_buffer = c_api.TF_GetAllOpList()\n    cpp_op_list = op_def_pb2.OpList()\n    cpp_op_list.ParseFromString(c_api.TF_GetBuffer(p_buffer))\n    cpp_registry_ops = {op.name: op for op in cpp_op_list.op}\n    missing_op_list = op_def_pb2.OpList()\n    for missing_op in missing_ops:\n        if (missing_op not in cpp_registry_ops):\n            logging.info('Op %s is missing from both the python and C++ registry.', missing_op)\n        else:\n            missing_op_list.op.extend([cpp_registry_ops[missing_op]])\n            logging.info('Adding op %s from c++ registry to python registry.', missing_op)\n    op_def_registry.register_op_list(missing_op_list)\n    if (not (missing_ops <= set(cpp_registry_ops.keys()))):\n        raise RuntimeError(('Graph ops missing from the python registry (%s) are also absent from the c++ registry.' % missing_ops.difference(set(cpp_registry_ops.keys()))))", "docstring": "Register graph ops absent in op_def_registry, if present in c++ registry.\n\nArgs:\ngraph_ops: set with graph op names to register.\n\nRaises:\nRuntimeError: if `graph_ops` contains ops that are not in either python or\nc++ registry.", "source": "codesearchnet"}
{"code": "async def dist(self, mesg):\n        \n        if self.isfini:\n            return ()\n\n        ret = []\n        for func in self._syn_funcs.get(mesg[0], ()):\n\n            try:\n                ret.append(await s_coro.ornot(func, mesg))\n            except asyncio.CancelledError:\n                raise\n            except Exception:\n                logger.exception('base %s error with mesg %s', self, mesg)\n\n        for func in self._syn_links:\n            try:\n                ret.append(await func(mesg))\n            except asyncio.CancelledError:\n                raise\n            except Exception:\n                logger.exception('base %s error with mesg %s', self, mesg)\n\n        return ret", "docstring": "Distribute an existing event tuple.\n\nArgs:\nmesg ((str,dict)):  An event tuple.\n\nExample:\n\nawait base.dist( ('foo',{'bar':'baz'}) )", "source": "juraj-google-style"}
{"code": "def extract_tree_with(self, labels, suppress_unifurcations=True):\n    return self.extract_tree(labels, False, suppress_unifurcations)", "docstring": "Extract a copy of this ``Tree`` with only the leaves labeled by the strings in ``labels``\n\nArgs:\n``leaves`` (``set``): Set of leaf labels to include.\n\n``suppress_unifurcations`` (``bool``): ``True`` to suppress unifurcations, otherwise ``False``\n\nReturns:\nTree: Copy of this Tree, including only the leaves labeled by the strings in ``labels``", "source": "codesearchnet"}
{"code": "def _clone_layers_and_model_config(model, input_layers, layer_fn):\n    created_layers = {}\n\n    def _copy_layer(layer):\n        if layer in input_layers:\n            created_layers[layer.name] = input_layers[layer]\n        elif layer in model._input_layers:\n            created_layers[layer.name] = InputLayer(**layer.get_config())\n        else:\n            created_layers[layer.name] = layer_fn(layer)\n        return {}\n    config = functional.get_network_config(model, serialize_layer_fn=_copy_layer)\n    return (config, created_layers)", "docstring": "Clones all layers, and returns the model config without serializing layers.\n\nThis function ensures that only the node graph is retrieved when getting the\nmodel config. The `layer_fn` used to clone layers might not rely on\n`layer.get_config()`, so some custom layers do not define `get_config`.\nTrying to retrieve the config results in errors.\n\nArgs:\nmodel: A Functional model.\ninput_layers: Dictionary mapping input layers in `model` to new input layers\nlayer_fn: Function used to clone all non-input layers.\n\nReturns:\nModel config object, and a dictionary of newly created layers.", "source": "github-repos"}
{"code": "def _convert_validators_to_mapping(validators):\n    validators_mapping = {}\n    for validator in validators:\n        if (not isinstance(validator['check'], collections.Hashable)):\n            check = json.dumps(validator['check'])\n        else:\n            check = validator['check']\n        key = (check, validator['comparator'])\n        validators_mapping[key] = validator\n    return validators_mapping", "docstring": "convert validators list to mapping.\n\nArgs:\nvalidators (list): validators in list\n\nReturns:\ndict: validators mapping, use (check, comparator) as key.\n\nExamples:\n>>> validators = [\n{\"check\": \"v1\", \"expect\": 201, \"comparator\": \"eq\"},\n{\"check\": {\"b\": 1}, \"expect\": 200, \"comparator\": \"eq\"}\n]\n>>> _convert_validators_to_mapping(validators)\n{\n(\"v1\", \"eq\"): {\"check\": \"v1\", \"expect\": 201, \"comparator\": \"eq\"},\n('{\"b\": 1}', \"eq\"): {\"check\": {\"b\": 1}, \"expect\": 200, \"comparator\": \"eq\"}\n}", "source": "codesearchnet"}
{"code": "def get(self, secret_id):\n    return self.prepare_model(self.client.api.inspect_secret(secret_id))", "docstring": "Get a secret.\n\nArgs:\nsecret_id (str): Secret ID.\n\nReturns:\n(:py:class:`Secret`): The secret.\n\nRaises:\n:py:class:`docker.errors.NotFound`\nIf the secret does not exist.\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def filing_history(self, num, transaction=None, **kwargs):\n        \n        baseuri = self._BASE_URI + \"company/{}/filing-history\".format(num)\n        if transaction is not None:\n            baseuri += \"/{}\".format(transaction)\n        res = self.session.get(baseuri, params=kwargs)\n        self.handle_http_error(res)\n        return res", "docstring": "Search for a company's filling history by company number.\n\nArgs:\nnum (str): Company number to search on.\n\ntransaction (Optional[str]): Filing record number.\nkwargs (dict): additional keywords passed into\nrequests.session.get params keyword.", "source": "juraj-google-style"}
{"code": "def binary_arguments_to_tensors(x1, x2):\n  \n  if not isinstance(x1, Tensor) and not isinstance(x2, Tensor):\n    raise ValueError(\"at least one of x1 and x2 must be an mtf Tensor\")\n  elif isinstance(x1, Tensor) and isinstance(x2, Tensor):\n    return x1, x2\n  elif isinstance(x1, Tensor):\n    return x1, import_tf_tensor(\n        x1.mesh, tf.convert_to_tensor(x2, dtype=x1.dtype), Shape([]))\n  else:\n    return import_tf_tensor(x2.mesh, tf.convert_to_tensor(x1, dtype=x2.dtype),\n                            Shape([])), x2", "docstring": "Convert argument of a binary operation to Tensors.\n\nArgs:\nx1: a Tensor or something convertible to a tf Scalar\nx2: a Tensor or something convertible to a tf Scalar\n\nReturns:\nnew_x1: a Tensor\nnew_x2: a Tensor\n\nRaises:\nValueError: on failure", "source": "juraj-google-style"}
{"code": "def _stride(stride_spec):\n  \n  if stride_spec is None:\n    return [1, 1, 1, 1]\n  elif isinstance(stride_spec, tf.compat.integral_types):\n    return [1, stride_spec, stride_spec, 1]\n  elif len(stride_spec) == 1:\n    return [1, stride_spec[0], stride_spec[0], 1]\n  elif len(stride_spec) == 2:\n    return [1, stride_spec[0], stride_spec[1], 1]\n  else:\n    assert len(stride_spec) == 4\n    return stride_spec", "docstring": "Expands the stride spec into a length 4 list.\n\nArgs:\nstride_spec: If length 0, 1 or 2 then assign the inner dimensions, otherwise\nreturn stride_spec if it is length 4.\nReturns:\nA length 4 list.", "source": "juraj-google-style"}
{"code": "def to_str(self, separator=''):\n    if self.closed():\n        raise ValueError('Attempt to call to_str() on a closed Queryable.')\n    return str(separator).join(self.select(str))", "docstring": "Build a string from the source sequence.\n\nThe elements of the query result will each coerced to a string and then\nthe resulting strings concatenated to return a single string. This\nallows the natural processing of character sequences as strings. An\noptional separator which will be inserted between each item may be\nspecified.\n\nNote: this method uses immediate execution.\n\nArgs:\nseparator: An optional separator which will be coerced to a string\nand inserted between each source item in the resulting string.\n\nReturns:\nA single string which is the result of stringifying each element\nand concatenating the results into a single string.\n\nRaises:\nTypeError: If any element cannot be coerced to a string.\nTypeError: If the separator cannot be coerced to a string.\nValueError: If the Queryable is closed.", "source": "codesearchnet"}
{"code": "def properties(self, value):\n        \n        if value == self._defaults['properties'] and 'properties' in self._values:\n            del self._values['properties']\n        else:\n            self._values['properties'] = value", "docstring": "The properties property.\n\nArgs:\nvalue (hash). the property value.", "source": "juraj-google-style"}
{"code": "def report_line(zipfilename: str, contentsfilename: str, line: str,\n                show_inner_file: bool) -> None:\n    \n    if show_inner_file:\n        print(\"{} [{}]: {}\".format(zipfilename, contentsfilename, line))\n    else:\n        print(\"{}: {}\".format(zipfilename, line))", "docstring": "Prints a line from a file, with the ``.zip`` filename and optionally also\nthe inner filename.\n\nArgs:\nzipfilename: filename of the ``.zip`` file\ncontentsfilename: filename of the inner file\nline: the line from the inner file\nshow_inner_file: if ``True``, show both filenames; if ``False``, show\njust the ``.zip`` filename", "source": "juraj-google-style"}
{"code": "def _send(self, **req_kwargs):\n        \n        auth_token = self._auth.getAuthToken()\n        if auth_token is None:\n            raise exception.LoginException('Not logged in')\n\n        req_kwargs.setdefault('headers', {\n            'Authorization': 'OAuth ' + auth_token\n        })\n\n        return self._session.request(**req_kwargs)", "docstring": "Send an authenticated request to a Google API.\n\nArgs:\n**req_kwargs: Arbitrary keyword arguments to pass to Requests.\n\nReturn:\nrequests.Response: The raw response.\n\nRaises:\nLoginException: If :py:meth:`login` has not been called.", "source": "juraj-google-style"}
{"code": "def _CheckGrayscaleImage(image, require_static=True):\n    try:\n        if image.get_shape().ndims is None:\n            image_shape = image.get_shape().with_rank(2)\n        else:\n            image_shape = image.get_shape().with_rank_at_least(2)\n    except ValueError:\n        raise ValueError('A grayscale image (shape %s) must be at least two-dimensional.' % image.shape)\n    if require_static and (not image_shape.is_fully_defined()):\n        raise ValueError(\"'image' must be fully defined.\")\n    if image_shape.is_fully_defined():\n        if image_shape[-1] != 1:\n            raise ValueError('Last dimension of a grayscale image should be size 1.')\n    if not image_shape.is_fully_defined():\n        return [check_ops.assert_equal(array_ops.shape(image)[-1], 1, message='Last dimension of a grayscale image should be size 1.'), check_ops.assert_greater_equal(array_ops.rank(image), 3, message='A grayscale image must be at least two-dimensional.')]\n    else:\n        return []", "docstring": "Assert that we are working with properly shaped grayscale image.\n\nArgs:\nimage: >= 2-D Tensor of size [*, 1]\nrequire_static: Boolean, whether static shape is required.\n\nRaises:\nValueError: if image.shape is not a [>= 2] vector or if\nlast dimension is not size 1.\n\nReturns:\nAn empty list, if `image` has fully defined dimensions. Otherwise, a list\ncontaining an assert op is returned.", "source": "github-repos"}
{"code": "def post_process_video_grounding(self, logits, video_durations):\n    start, end = (round(logits.tolist()[0][0] * video_durations, 1), round(logits.tolist()[0][1] * video_durations, 1))\n    return (start, end)", "docstring": "Compute the time of the video.\n\nArgs:\nlogits (`torch.Tensor`):\nThe logits output of TvpForVideoGrounding.\nvideo_durations (`float`):\nThe video's duration.\n\nReturns:\nstart (`float`):\nThe start time of the video.\nend (`float`):\nThe end time of the video.", "source": "github-repos"}
{"code": "def get_commit_tree(profile, sha):\n    \n    data = commits.get_commit(profile, sha)\n    tree = data.get(\"tree\")\n    sha = tree.get(\"sha\")\n    return sha", "docstring": "Get the SHA of a commit's tree.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nsha\nThe SHA of a commit.\n\nReturns:\nThe SHA of the commit's tree.", "source": "juraj-google-style"}
{"code": "def diffusion_mds(means, weights, d, diffusion_rounds=10):\n    for i in range(diffusion_rounds):\n        weights = (weights * weights)\n        weights = (weights / weights.sum(0))\n    X = dim_reduce(means, weights, d)\n    if (X.shape[0] == 2):\n        return X.dot(weights)\n    else:\n        return X.T.dot(weights)", "docstring": "Dimensionality reduction using MDS, while running diffusion on W.\n\nArgs:\nmeans (array): genes x clusters\nweights (array): clusters x cells\nd (int): desired dimensionality\n\nReturns:\nW_reduced (array): array of shape (d, cells)", "source": "codesearchnet"}
{"code": "def WaitUntilDone(self, timeout=None):\n    \n\n    utils.Poll(\n        generator=self.GetState,\n        condition=lambda s: s != self.__class__.STATE_RUNNING,\n        timeout=timeout)\n    self.target_file = self.target_file.Get()\n    return self", "docstring": "Wait until the operation is done.\n\nArgs:\ntimeout: timeout in seconds. None means default timeout (1 hour).\n0 means no timeout (wait forever).\nReturns:\nOperation object with refreshed target_file.\nRaises:\nPollTimeoutError: if timeout is reached.", "source": "juraj-google-style"}
{"code": "def _compile_arithmetic_expression(self,\n                                       expr: Expression,\n                                       scope: Dict[str, TensorFluent],\n                                       batch_size: Optional[int] = None,\n                                       noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:\n        \n        etype = expr.etype\n        args = expr.args\n\n        if len(args) == 1:\n            etype2op = {\n                '+': lambda x: x,\n                '-': lambda x: -x\n            }\n\n            if etype[1] not in etype2op:\n                raise ValueError('Invalid binary arithmetic expression:\\n{}'.format(expr))\n\n            op = etype2op[etype[1]]\n            x = self._compile_expression(args[0], scope, batch_size, noise)\n            fluent = op(x)\n\n        else:\n            etype2op = {\n                '+': lambda x, y: x + y,\n                '-': lambda x, y: x - y,\n                '*': lambda x, y: x * y,\n                '/': lambda x, y: x / y,\n            }\n\n            if etype[1] not in etype2op:\n                raise ValueError('Invalid binary arithmetic expression:\\n{}'.format(expr))\n\n            op = etype2op[etype[1]]\n            x = self._compile_expression(args[0], scope, batch_size, noise)\n            y = self._compile_expression(args[1], scope, batch_size, noise)\n            fluent = op(x, y)\n\n        return fluent", "docstring": "Compile an arithmetic expression `expr` into a TensorFluent\nin the given `scope` with optional batch size.\n\nArgs:\nexpr (:obj:`rddl2tf.expr.Expression`): A RDDL arithmetic expression.\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.\nbatch_size (Optional[size]): The batch size.\n\nReturns:\n:obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.", "source": "juraj-google-style"}
{"code": "def _check_etag(self, etag):\n    if (etag is None):\n        return\n    elif (self._etag is None):\n        self._etag = etag\n    elif (self._etag != etag):\n        raise ValueError('File on GCS has changed while reading.')", "docstring": "Check if etag is the same across requests to GCS.\n\nIf self._etag is None, set it. If etag is set, check that the new\netag equals the old one.\n\nIn the __init__ method, we fire one HEAD and one GET request using\nndb tasklet. One of them would return first and set the first value.\n\nArgs:\netag: etag from a GCS HTTP response. None if etag is not part of the\nresponse header. It could be None for example in the case of GCS\ncomposite file.\n\nRaises:\nValueError: if two etags are not equal.", "source": "codesearchnet"}
{"code": "def stop_site(name):\n    ps_cmd = ['Stop-WebSite', \"'{0}'\".format(name)]\n    cmd_ret = _srvmgr(ps_cmd)\n    return (cmd_ret['retcode'] == 0)", "docstring": "Stop a Web Site in IIS.\n\n.. versionadded:: 2017.7.0\n\nArgs:\nname (str): The name of the website to stop.\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.stop_site name='My Test Site'", "source": "codesearchnet"}
{"code": "def assignSchedule(self, schedule, period, hour, minute, tariff):\n    if ((schedule not in range(Extents.Schedules)) or (period not in range(Extents.Tariffs)) or (hour < 0) or (hour > 23) or (minute < 0) or (minute > 59) or (tariff < 0)):\n        ekm_log(('Out of bounds in Schedule_' + str((schedule + 1))))\n        return False\n    period += 1\n    idx_min = ('Min_' + str(period))\n    idx_hour = ('Hour_' + str(period))\n    idx_rate = ('Tariff_' + str(period))\n    if (idx_min not in self.m_schedule_params):\n        ekm_log(('Incorrect index: ' + idx_min))\n        return False\n    if (idx_hour not in self.m_schedule_params):\n        ekm_log(('Incorrect index: ' + idx_hour))\n        return False\n    if (idx_rate not in self.m_schedule_params):\n        ekm_log(('Incorrect index: ' + idx_rate))\n        return False\n    self.m_schedule_params[idx_rate] = tariff\n    self.m_schedule_params[idx_hour] = hour\n    self.m_schedule_params[idx_min] = minute\n    self.m_schedule_params['Schedule'] = schedule\n    return True", "docstring": "Assign one schedule tariff period to meter bufffer.\n\nArgs:\nschedule (int): A :class:`~ekmmeters.Schedules` value or in range(Extents.Schedules).\ntariff (int): :class:`~ekmmeters.Tariffs` value or in range(Extents.Tariffs).\nhour (int): Hour from 0-23.\nminute (int): Minute from 0-59.\ntariff (int): Rate value.\n\nReturns:\nbool: True on completed assignment.", "source": "codesearchnet"}
{"code": "def disease_term(self, disease_identifier):\n    query = {}\n    try:\n        disease_identifier = int(disease_identifier)\n        query['disease_nr'] = disease_identifier\n    except ValueError:\n        query['_id'] = disease_identifier\n    return self.disease_term_collection.find_one(query)", "docstring": "Return a disease term\n\nChecks if the identifier is a disease number or a id\n\nArgs:\ndisease_identifier(str)\n\nReturns:\ndisease_obj(dict)", "source": "codesearchnet"}
{"code": "def get_pool_context(self):\n    context = {self.current.lane_id: self.current.role, 'self': self.current.role}\n    for (lane_id, role_id) in self.current.pool.items():\n        if role_id:\n            context[lane_id] = lazy_object_proxy.Proxy((lambda : self.role_model(super_context).objects.get(role_id)))\n    return context", "docstring": "Builds context for the WF pool.\n\nReturns:\nContext dict.", "source": "codesearchnet"}
{"code": "def semantic_eq(node1, node2):\n        \n        \n        if 'barrier' == node1.name == node2.name:\n            return set(node1.qargs) == set(node2.qargs)\n        return node1.data_dict == node2.data_dict", "docstring": "Check if DAG nodes are considered equivalent, e.g. as a node_match for nx.is_isomorphic.\n\nArgs:\nnode1 (DAGNode): A node to compare.\nnode2 (DAGNode): The other node to compare.\n\nReturn:\nBool: If node1 == node2", "source": "juraj-google-style"}
{"code": "def match(self, name):\n        \n        if self.method == Ex.Method.PREFIX:\n            return name.startswith(self.value)\n        elif self.method == Ex.Method.SUFFIX:\n            return name.endswith(self.value)\n        elif self.method == Ex.Method.CONTAINS:\n            return self.value in name\n        elif self.method == Ex.Method.EXACT:\n            return self.value == name\n        elif self.method == Ex.Method.REGEX:\n            return re.search(self.value, name)\n        return False", "docstring": "Check if given name matches.\n\nArgs:\nname (str): name to check.\n\nReturns:\nbool: matches name.", "source": "juraj-google-style"}
{"code": "class Rescaling(TFDataLayer):\n\n    def __init__(self, scale, offset=0.0, **kwargs):\n        super().__init__(**kwargs)\n        self.scale = scale\n        self.offset = offset\n        self.supports_masking = True\n\n    def call(self, inputs):\n        dtype = self.compute_dtype\n        scale = self.backend.cast(self.scale, dtype)\n        offset = self.backend.cast(self.offset, dtype)\n        scale_shape = self.backend.core.shape(scale)\n        if len(scale_shape) > 0 and backend.image_data_format() == 'channels_first':\n            scale = self.backend.numpy.reshape(scale, scale_shape + (1,) * (3 - len(scale_shape)))\n        return self.backend.cast(inputs, dtype) * scale + offset\n\n    def compute_output_shape(self, input_shape):\n        return input_shape\n\n    def get_config(self):\n        config = super().get_config()\n        config.update({'scale': serialization_lib.serialize_keras_object(self.scale), 'offset': serialization_lib.serialize_keras_object(self.offset)})\n        return config\n\n    @classmethod\n    def from_config(cls, config, custom_objects=None):\n        config = config.copy()\n        config['scale'] = serialization_lib.deserialize_keras_object(config['scale'], custom_objects=custom_objects)\n        config['offset'] = serialization_lib.deserialize_keras_object(config['offset'], custom_objects=custom_objects)\n        return cls(**config)", "docstring": "A preprocessing layer which rescales input values to a new range.\n\nThis layer rescales every value of an input (often an image) by multiplying\nby `scale` and adding `offset`.\n\nFor instance:\n\n1. To rescale an input in the `[0, 255]` range\nto be in the `[0, 1]` range, you would pass `scale=1./255`.\n\n2. To rescale an input in the `[0, 255]` range to be in the `[-1, 1]` range,\nyou would pass `scale=1./127.5, offset=-1`.\n\nThe rescaling is applied both during training and inference. Inputs can be\nof integer or floating point dtype, and by default the layer will output\nfloats.\n\n**Note:** This layer is safe to use inside a `tf.data` pipeline\n(independently of which backend you're using).\n\nArgs:\nscale: Float, the scale to apply to the inputs.\noffset: Float, the offset to apply to the inputs.\n**kwargs: Base layer keyword arguments, such as `name` and `dtype`.", "source": "github-repos"}
{"code": "def render(self, container, descender, state, space_below=0, first_line_only=False):\n    indent_first = (float(self.get_style('indent_first', container)) if state.initial else 0)\n    line_width = float(container.width)\n    line_spacing = self.get_style('line_spacing', container)\n    text_align = self.get_style('text_align', container)\n    tab_stops = self.get_style('tab_stops', container)\n    if (not tab_stops):\n        tab_width = (2 * self.get_style('font_size', container))\n        tab_stops = DefaultTabStops(tab_width)\n    saved_state = copy(state)\n    prev_state = copy(state)\n    max_line_width = 0\n\n    def typeset_line(line, last_line=False):\n        \"Typeset `line` and, if no exception is raised, update the\\n            paragraph's internal rendering state.\"\n        nonlocal state, saved_state, max_line_width, descender, space_below\n        max_line_width = max(max_line_width, line.cursor)\n        advance = (line.ascender(container) if (descender is None) else line_spacing.advance(line, descender, container))\n        descender = line.descender(container)\n        line.advance = advance\n        total_advance = ((advance + (space_below if last_line else 0)) - descender)\n        if (container.remaining_height < total_advance):\n            raise EndOfContainer(saved_state)\n        assert container.advance2(advance)\n        line.typeset(container, text_align, last_line)\n        assert container.advance2((- descender))\n        state.initial = False\n        saved_state = copy(state)\n        return Line(tab_stops, line_width, container, significant_whitespace=self.significant_whitespace)\n    first_line = line = Line(tab_stops, line_width, container, indent_first, self.significant_whitespace)\n    while True:\n        try:\n            word = state.next_word()\n        except StopIteration:\n            break\n        try:\n            if (not line.append_word(word)):\n                for (first, second) in word.hyphenate(container):\n                    if line.append_word(first):\n                        state.prepend_word(second)\n                        break\n                else:\n                    state = prev_state\n                line = typeset_line(line)\n                if first_line_only:\n                    break\n                continue\n        except NewLineException:\n            line.append(word.glyphs_span)\n            line = typeset_line(line, last_line=True)\n            if first_line_only:\n                break\n        prev_state = copy(state)\n    if line:\n        typeset_line(line, last_line=True)\n    if (self._width(container) == FlowableWidth.AUTO):\n        if (text_align == TextAlign.CENTER):\n            container.left -= (float((container.width - max_line_width)) / 2)\n        if (text_align == TextAlign.RIGHT):\n            container.left -= float((container.width - max_line_width))\n    return (max_line_width, first_line.advance, descender)", "docstring": "Typeset the paragraph\n\nThe paragraph is typeset in the given container starting below the\ncurrent cursor position of the container. When the end of the container\nis reached, the rendering state is preserved to continue setting the\nrest of the paragraph when this method is called with a new container.\n\nArgs:\ncontainer (Container): the container to render to\ndescender (float or None): descender height of the preceeding line\nstate (ParagraphState): the state where rendering will continue\nfirst_line_only (bool): typeset only the first line", "source": "codesearchnet"}
{"code": "def __init__(self, sender, persistence_path=''):\n        \n        if persistence_path and PersistQueue is None:\n            raise ValueError('persistence_path argument requires persist-queue dependency to be installed')\n        elif persistence_path:\n            self._queue = PersistQueue(persistence_path)\n        else:\n            self._queue = Queue()\n\n        self._persistence_path = persistence_path\n        self._max_queue_length = 500\n        self._sender = sender\n        if sender:\n            self._sender.queue = self", "docstring": "Initializes a new instance of the class.\n\nArgs:\nsender (:class:`SenderBase`) the sender object that will be used in conjunction with this queue.\npersistence_path (str) if set, persist the queue on disk into the provided directory.", "source": "juraj-google-style"}
{"code": "def __init__(self, output_path, open_function=open):\n    self._output_path = output_path\n    self._open_function = open_function\n    self._old_enabled = None", "docstring": "Initialize.\n\nArgs:\noutput_path: The path for the metrics data.  If empty, no metrics are\ncollected.\nopen_function: A custom file opening function.", "source": "github-repos"}
{"code": "def get_credentials(self):\n    return ReadOnlyCredentials(self.access_token, self.client_id, self.client_secret, self.refresh_token)", "docstring": "Get read-only credentials.\n\nReturns:\nclass: Read-only credentials.", "source": "codesearchnet"}
{"code": "def set(self, option, value=None):\n        \n        option = self._container.optionxform(option)\n        if option in self.options():\n            self.__getitem__(option).value = value\n        else:\n            self.__setitem__(option, value)\n        return self", "docstring": "Set an option for chaining.\n\nArgs:\noption (str): option name\nvalue (str): value, default None", "source": "juraj-google-style"}
{"code": "def measures(*measurements, **kwargs):\n\n    def _maybe_make(meas):\n        'Turn strings into Measurement objects if necessary.'\n        if isinstance(meas, Measurement):\n            return meas\n        elif isinstance(meas, six.string_types):\n            return Measurement(meas, **kwargs)\n        raise InvalidMeasurementType('Expected Measurement or string', meas)\n    if (kwargs and (len(measurements) != 1)):\n        raise InvalidMeasurementType('If @measures kwargs are provided, a single measurement name must be provided as a positional arg first.')\n    if ('outcome' in kwargs):\n        raise ValueError('Cannot specify outcome in measurement declaration!')\n    measurements = [_maybe_make(meas) for meas in measurements]\n\n    def decorate(wrapped_phase):\n        'Phase decorator to be returned.'\n        phase = phase_descriptor.PhaseDescriptor.wrap_or_copy(wrapped_phase)\n        duplicate_names = (set((m.name for m in measurements)) & set((m.name for m in phase.measurements)))\n        if duplicate_names:\n            raise DuplicateNameError('Measurement names duplicated', duplicate_names)\n        phase.measurements.extend(measurements)\n        return phase\n    return decorate", "docstring": "Decorator-maker used to declare measurements for phases.\n\nSee the measurements module docstring for examples of usage.\n\nArgs:\nmeasurements: Measurement objects to declare, or a string name from which\nto create a Measurement.\nkwargs: Keyword arguments to pass to Measurement constructor if we're\nconstructing one.  Note that if kwargs are provided, the length\nof measurements must be 1, and that value must be a string containing\nthe measurement name.  For valid kwargs, see the definition of the\nMeasurement class.\n\nReturns:\nA decorator that declares the measurement(s) for the decorated phase.", "source": "codesearchnet"}
{"code": "def _parse_request_arguments(self, request):\n    inference_addresses = request.args.get('inference_address').split(',')\n    model_names = request.args.get('model_name').split(',')\n    model_versions = request.args.get('model_version').split(',')\n    model_signatures = request.args.get('model_signature').split(',')\n    if (len(model_names) != len(inference_addresses)):\n        raise common_utils.InvalidUserInputError(('Every model should have a ' + 'name and address.'))\n    return (inference_addresses, model_names, model_versions, model_signatures)", "docstring": "Parses comma separated request arguments\n\nArgs:\nrequest: A request that should contain 'inference_address', 'model_name',\n'model_version', 'model_signature'.\n\nReturns:\nA tuple of lists for model parameters", "source": "codesearchnet"}
{"code": "def get_terminal_size():\n    try:\n        from IPython import get_ipython\n        ipython = get_ipython()\n        from ipykernel import zmqshell\n        if isinstance(ipython, zmqshell.ZMQInteractiveShell):\n            return (79, 24)\n    except Exception:\n        pass\n    try:\n        import shutil\n        (w, h) = shutil.get_terminal_size()\n        if (w and h):\n            return ((w - 1), h)\n    except Exception:\n        pass\n    try:\n        w = int(os.environ.get('COLUMNS'))\n        h = int(os.environ.get('LINES'))\n        if (w and h):\n            return (w, h)\n    except Exception:\n        pass\n    try:\n        import blessings\n        terminal = blessings.Terminal()\n        w = terminal.width\n        h = terminal.height\n        if (w and h):\n            return (w, h)\n    except Exception:\n        pass\n    try:\n        (w, h) = _get_terminal_size_linux()\n        if (w and h):\n            return (w, h)\n    except Exception:\n        pass\n    try:\n        (w, h) = _get_terminal_size_windows()\n        if (w and h):\n            return (w, h)\n    except Exception:\n        pass\n    try:\n        (w, h) = _get_terminal_size_tput()\n        if (w and h):\n            return (w, h)\n    except Exception:\n        pass\n    return (79, 24)", "docstring": "Get the current size of your terminal\n\nMultiple returns are not always a good idea, but in this case it greatly\nsimplifies the code so I believe it's justified. It's not the prettiest\nfunction but that's never really possible with cross-platform code.\n\nReturns:\nwidth, height: Two integers containing width and height", "source": "codesearchnet"}
{"code": "def find_mip(self, direction, mechanism, purview, allow_neg=False):\n    alpha_min = float('inf')\n    probability = self.probability(direction, mechanism, purview)\n    for partition in mip_partitions(mechanism, purview, self.node_labels):\n        partitioned_probability = self.partitioned_probability(direction, partition)\n        alpha = log2((probability / partitioned_probability))\n        if (utils.eq(alpha, 0) or ((alpha < 0) and (not allow_neg))):\n            return AcRepertoireIrreducibilityAnalysis(state=self.mechanism_state(direction), direction=direction, mechanism=mechanism, purview=purview, partition=partition, probability=probability, partitioned_probability=partitioned_probability, node_labels=self.node_labels, alpha=0.0)\n        if ((abs(alpha_min) - abs(alpha)) > constants.EPSILON):\n            alpha_min = alpha\n            acria = AcRepertoireIrreducibilityAnalysis(state=self.mechanism_state(direction), direction=direction, mechanism=mechanism, purview=purview, partition=partition, probability=probability, partitioned_probability=partitioned_probability, node_labels=self.node_labels, alpha=alpha_min)\n    return acria", "docstring": "Find the ratio minimum information partition for a mechanism\nover a purview.\n\nArgs:\ndirection (str): |CAUSE| or |EFFECT|\nmechanism (tuple[int]): A mechanism.\npurview (tuple[int]): A purview.\n\nKeyword Args:\nallow_neg (boolean): If true, ``alpha`` is allowed to be negative.\nOtherwise, negative values of ``alpha`` will be treated as if\nthey were 0.\n\nReturns:\nAcRepertoireIrreducibilityAnalysis: The irreducibility analysis for\nthe mechanism.", "source": "codesearchnet"}
{"code": "def del_hparam(self, name):\n    if hasattr(self, name):\n        delattr(self, name)\n        del self._hparam_types[name]", "docstring": "Removes the hyperparameter with key 'name'.\n\nDoes nothing if it isn't present.\n\nArgs:\nname: Name of the hyperparameter.", "source": "codesearchnet"}
{"code": "def __squid_to_guid(self, squid):\n        \n        if not squid:\n            return ''\n        squid_match = self.__squid_pattern.match(squid)\n        guid = ''\n        if squid_match is not None:\n            guid = '{' +\\\n                squid_match.group(1)[::-1]+'-' +\\\n                squid_match.group(2)[::-1]+'-' +\\\n                squid_match.group(3)[::-1]+'-' +\\\n                squid_match.group(4)[::-1]+squid_match.group(5)[::-1] + '-'\n            for index in range(6, 12):\n                guid += squid_match.group(index)[::-1]\n            guid += '}'\n        return guid", "docstring": "Squished GUID (SQUID) to GUID.\n\nA SQUID is a Squished/Compressed version of a GUID to use up less space\nin the registry.\n\nArgs:\nsquid (str): Squished GUID.\n\nReturns:\nstr: the GUID if a valid SQUID provided.", "source": "juraj-google-style"}
{"code": "def persist_experiment(experiment):\n    \n    from benchbuild.utils.schema import Experiment, Session\n\n    session = Session()\n\n    cfg_exp = experiment.id\n    LOG.debug(\"Using experiment ID stored in config: %s\", cfg_exp)\n    exps = session.query(Experiment).filter(Experiment.id == cfg_exp)\n    desc = str(CFG[\"experiment_description\"])\n    name = experiment.name\n\n    if exps.count() == 0:\n        newe = Experiment()\n        newe.id = cfg_exp\n        newe.name = name\n        newe.description = desc\n        session.add(newe)\n        ret = newe\n    else:\n        exps.update({'name': name, 'description': desc})\n        ret = exps.first()\n\n    try:\n        session.commit()\n    except IntegrityError:\n        session.rollback()\n        persist_experiment(experiment)\n\n    return (ret, session)", "docstring": "Persist this experiment in the benchbuild database.\n\nArgs:\nexperiment: The experiment we want to persist.", "source": "juraj-google-style"}
{"code": "def __init__(self):\n        \n        super(JLinkSpeedInfo, self).__init__()\n        self.SizeOfStruct = ctypes.sizeof(self)", "docstring": "Initializes the ``JLinkSpeedInfo`` instance.\n\nSets the size of the structure.\n\nArgs:\nself (JLinkSpeedInfo): the ``JLinkSpeedInfo`` instance\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def connect_sync(self, connection_id, connection_string):\n    calldone = threading.Event()\n    results = {}\n\n    def connect_done(callback_connid, callback_adapterid, callback_success, failure_reason):\n        results['success'] = callback_success\n        results['failure_reason'] = failure_reason\n        calldone.set()\n    self.connect_async(connection_id, connection_string, connect_done)\n    calldone.wait()\n    return results", "docstring": "Synchronously connect to a device\n\nArgs:\nconnection_id (int): A unique identifier that will refer to this connection\nconnection_string (string): A DeviceAdapter specific string that can be used to connect to\na device using this DeviceAdapter.\n\nReturns:\ndict: A dictionary with two elements\n'success': a bool with the result of the connection attempt\n'failure_reason': a string with the reason for the failure if we failed", "source": "codesearchnet"}
{"code": "def _get_programs_dict():\n    global __programs_dict\n    if (__programs_dict is not None):\n        return __programs_dict\n    d = __programs_dict = OrderedDict()\n    for pkgname in COLLABORATORS_S:\n        try:\n            package = importlib.import_module(pkgname)\n        except ImportError:\n            continue\n        path_ = os.path.join(os.path.split(package.__file__)[0], 'scripts')\n        bulk = a99.get_exe_info(path_, flag_protected=True)\n        d[pkgname] = {'description': a99.get_obj_doc0(package), 'exeinfo': bulk}\n    return __programs_dict", "docstring": "Builds and returns programs dictionary\n\nThis will have to import the packages in COLLABORATORS_S in order to get their absolute path.\n\nReturns:\ndictionary: {\"packagename\": [ExeInfo0, ...], ...}\n\n\"packagename\" examples: \"f311.explorer\", \"numpy\"", "source": "codesearchnet"}
{"code": "def add_comment(self, line):\n    if (not isinstance(self.last_item, Comment)):\n        comment = Comment(self._structure)\n        self._structure.append(comment)\n    self.last_item.add_line(line)\n    return self", "docstring": "Add a Comment object to the section\n\nUsed during initial parsing mainly\n\nArgs:\nline (str): one line in the comment", "source": "codesearchnet"}
{"code": "def from_dim_sizes(dim_sizes):\n    with ops.name_scope(None, 'RaggedTensorDynamicShapeFromDimensionSizes', [dim_sizes]):\n        dim_sizes = tuple((ops.convert_to_tensor(size, preferred_dtype=dtypes.int64, name='dim_sizes') for size in dim_sizes))\n        inner_split = 0\n        for dim, dim_size in enumerate(dim_sizes):\n            if dim_size.shape.ndims == 1:\n                inner_split = dim + 1\n            elif dim_size.shape.ndims != 0:\n                raise ValueError('Each dim_size must be a scalar or a vector')\n        return RaggedTensorDynamicShape(dim_sizes[:inner_split], dim_sizes[inner_split:])", "docstring": "Constructs a ragged shape from a list of dimension sizes.\n\nThis list contains a single tensor for each dimension, where the tensor\nis a scalar if the dimension is uniform, or a vector if the dimension is\nragged.\n\nArgs:\ndim_sizes: List of int32 or int64 scalars or vectors.\n\nReturns:\nA RaggedTensorDynamicShape.", "source": "github-repos"}
{"code": "def basis(sample_paths, time_index):\n    sample_paths = tf.convert_to_tensor(sample_paths, name='sample_paths')\n    if sample_paths.shape.rank == 3:\n        sample_paths = tf.expand_dims(sample_paths, axis=0)\n    shape = tf.shape(sample_paths)\n    num_samples = shape[1]\n    batch_size = shape[0]\n    dim = sample_paths.shape[-1]\n    slice_samples = tf.slice(sample_paths, [0, 0, time_index, 0], [batch_size, num_samples, 1, dim])\n    samples_centered = slice_samples - tf.math.reduce_mean(slice_samples, axis=1, keepdims=True)\n    grid = tf.range(degree + 1, dtype=samples_centered.dtype)\n    grid = tf.meshgrid(*dim * [grid])\n    grid = tf.reshape(tf.stack(grid, -1), [-1, dim])\n    basis_expansion = tf.reduce_prod(samples_centered ** grid, axis=-1)\n    return tf.transpose(basis_expansion, [0, 2, 1])", "docstring": "Computes polynomial basis expansion at the given sample points.\n\nArgs:\nsample_paths: A `Tensor` of either `flaot32` or `float64` dtype and of\neither shape `[num_samples, num_times, dim]` or\n`[batch_size, num_samples, num_times, dim]`.\ntime_index: An integer scalar `Tensor` that corresponds to the time\ncoordinate at which the basis function is computed.\n\nReturns:\nA `Tensor`s of shape `[batch_size, (degree + 1)**dim, num_samples]`.", "source": "github-repos"}
{"code": "def TotalFees(self):\n    amount = Fixed8.Zero()\n    for tx in self.Transactions:\n        amount += tx.SystemFee()\n    return amount", "docstring": "Get the total transaction fees in the block.\n\nReturns:\nFixed8:", "source": "codesearchnet"}
{"code": "def parse_table_data(lines):\n    data = '\\n'.join([i.rstrip() for i in lines if ((not i.startswith(('^', '!', '\n    if data:\n        return read_csv(StringIO(data), index_col=None, sep='\\t')\n    else:\n        return DataFrame()", "docstring": "Parse list of lines from SOFT file into DataFrame.\n\nArgs:\nlines (:obj:`Iterable`): Iterator over the lines.\n\nReturns:\n:obj:`pandas.DataFrame`: Table data.", "source": "codesearchnet"}
{"code": "def depth_soil_density(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `depth_soil_density`'.format(value))\n\n        self._depth_soil_density = value", "docstring": "Corresponds to IDD Field `depth_soil_density`\n\nArgs:\nvalue (float): value for IDD Field `depth_soil_density`\nUnit: kg/m3\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def _remove_files(files):\n    \n    logger.debug(\"Request for file removal (_remove_files()).\")\n\n    for fn in files:\n        if os.path.exists(fn):\n            logger.debug(\"Removing '%s'.\" % fn)\n            os.remove(fn)", "docstring": "Remove all given files.\n\nArgs:\nfiles (list): List of filenames, which will be removed.", "source": "juraj-google-style"}
{"code": "def set_seed(seed: int, deterministic: bool=False):\n    random.seed(seed)\n    np.random.seed(seed)\n    if is_torch_available():\n        torch.manual_seed(seed)\n        torch.cuda.manual_seed_all(seed)\n        if deterministic:\n            torch.use_deterministic_algorithms(True)\n    if is_torch_mlu_available():\n        torch.mlu.manual_seed_all(seed)\n    if is_torch_musa_available():\n        torch.musa.manual_seed_all(seed)\n    if is_torch_npu_available():\n        torch.npu.manual_seed_all(seed)\n    if is_torch_hpu_available():\n        torch.hpu.manual_seed_all(seed)\n    if is_torch_xpu_available():\n        torch.xpu.manual_seed_all(seed)\n    if is_tf_available():\n        import tensorflow as tf\n        tf.random.set_seed(seed)\n        if deterministic:\n            tf.config.experimental.enable_op_determinism()", "docstring": "Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch` and/or `tf` (if installed).\n\nArgs:\nseed (`int`):\nThe seed to set.\ndeterministic (`bool`, *optional*, defaults to `False`):\nWhether to use deterministic algorithms where available. Can slow down training.", "source": "github-repos"}
{"code": "def validate(self, graph):\n        \n        if not nx.is_directed_acyclic_graph(graph):\n            raise DirectedAcyclicGraphInvalid(graph_name=self._name)", "docstring": "Validate the graph by checking whether it is a directed acyclic graph.\n\nArgs:\ngraph (DiGraph): Reference to a DiGraph object from NetworkX.\n\nRaises:\nDirectedAcyclicGraphInvalid: If the graph is not a valid dag.", "source": "juraj-google-style"}
{"code": "def get_structure(atoms, cls=None):\n        \n        symbols = atoms.get_chemical_symbols()\n        positions = atoms.get_positions()\n        lattice = atoms.get_cell()\n\n        cls = Structure if cls is None else cls\n        return cls(lattice, symbols, positions,\n                   coords_are_cartesian=True)", "docstring": "Returns pymatgen structure from ASE Atoms.\n\nArgs:\natoms: ASE Atoms object\ncls: The Structure class to instantiate (defaults to pymatgen structure)\n\nReturns:\nEquivalent pymatgen.core.structure.Structure", "source": "juraj-google-style"}
{"code": "def Verify(self):\n    return getattr(self, self._KEY) is not None", "docstring": "We can properly index this instance into a Map.\n\nReturns:\nTrue if the value in the attribute named by self._KEY for this class\nis not None.  False otherwise.", "source": "github-repos"}
{"code": "def set_epsilon(value):\n    global _EPSILON\n    _EPSILON = value", "docstring": "Sets the value of the fuzz factor used in numeric expressions.\n\nArgs:\nvalue: float. New value of epsilon.\n\nExample:\n>>> tf.keras.backend.epsilon()\n1e-07\n>>> tf.keras.backend.set_epsilon(1e-5)\n>>> tf.keras.backend.epsilon()\n1e-05\n>>> tf.keras.backend.set_epsilon(1e-7)", "source": "github-repos"}
{"code": "def __init__(self, state_handler: sdk_worker.CachingStateHandler, transform_id: str, key_coder: coders.Coder, window_coder: coders.Coder) -> None:\n    self._state_handler = state_handler\n    self._transform_id = transform_id\n    self._key_coder = key_coder\n    self._window_coder = window_coder\n    self._timers_info: Dict[str, TimerInfo] = {}\n    self._all_states: Dict[tuple, FnApiUserRuntimeStateTypes] = {}", "docstring": "Initialize a ``FnApiUserStateContext``.\n\nArgs:\nstate_handler: A StateServicer object.\ntransform_id: The name of the PTransform that this context is associated.\nkey_coder: Coder for the key type.\nwindow_coder: Coder for the window type.", "source": "github-repos"}
{"code": "def __le__(self, other: 'TensorFluent') -> 'TensorFluent':\n        \n        return self._binary_op(self, other, tf.less_equal, tf.float32)", "docstring": "Returns a TensorFluent for the less-than-or-equal relational operator.\n\nArgs:\nself: The first operand.\nother: The second operand.", "source": "juraj-google-style"}
{"code": "def _handle_emailauth(maildomain='', message=''):\n    print('SteamGuard requires email authentication...')\n    emailauth = input(('Please enter the code sent to your mail address at \"%s\": ' % maildomain))\n    emailauth.upper()\n    return emailauth", "docstring": "Called when SteamGuard requires authentication via e-mail.\nAsks the user to enter the code.\n\nArgs:\nmaildomain: Optional. The mail domain of the e-mail address the SteamGuard\ncode is send to.\nmessage: Optional. A message from Steam service.\n\nReturns:\nA string containing the code.", "source": "codesearchnet"}
{"code": "def load_transcripts(adapter, transcripts_lines=None, build='37', ensembl_genes=None):\n    \n    \n    ensembl_genes = ensembl_genes or adapter.ensembl_genes(build)\n\n    if transcripts_lines is None:\n        transcripts_lines = fetch_ensembl_transcripts(build=build)\n\n    \n    transcripts_dict = parse_transcripts(transcripts_lines)\n    for ens_tx_id in list(transcripts_dict):\n        parsed_tx = transcripts_dict[ens_tx_id]\n        \n        ens_gene_id = parsed_tx['ensembl_gene_id']\n\n        \n        gene_obj = ensembl_genes.get(ens_gene_id)\n        \n        if not gene_obj:\n            transcripts_dict.pop(ens_tx_id)\n            LOG.debug(\"Gene %s does not exist in build %s\", ens_gene_id, build)\n            continue\n\n        \n        parsed_tx['hgnc_id'] = gene_obj['hgnc_id']\n        \n        parsed_tx['primary_transcripts'] = set(gene_obj.get('primary_transcripts', []))\n\n\n    ref_seq_transcripts = 0\n    nr_primary_transcripts = 0\n    nr_transcripts = len(transcripts_dict)\n\n    transcript_objs = []\n\n    with progressbar(transcripts_dict.values(), label=\"Building transcripts\", length=nr_transcripts) as bar:\n        for tx_data in bar:\n\n            \n            \n            \n            \n            \n            \n            \n            \n            tx_data['is_primary'] = False\n            primary_transcripts = tx_data['primary_transcripts']\n            refseq_identifier = None\n            refseq_identifiers = []\n            for category in TRANSCRIPT_CATEGORIES:\n                identifiers = tx_data[category]\n                if not identifiers:\n                    continue\n\n                for refseq_id in identifiers:\n                    \n                    refseq_identifiers.append(refseq_id)\n                    ref_seq_transcripts += 1\n\n                    if refseq_id in primary_transcripts:\n                        refseq_identifier = refseq_id\n                        tx_data['is_primary'] = True\n                        nr_primary_transcripts += 1\n                    \n                    if not refseq_identifier:\n                        refseq_identifier = refseq_id\n\n            if refseq_identifier:\n                tx_data['refseq_id'] = refseq_identifier\n            if refseq_identifiers:\n                tx_data['refseq_identifiers'] = refseq_identifiers\n\n            \n            \n            tx_obj = build_transcript(tx_data, build)\n            transcript_objs.append(tx_obj)\n\n    \n    LOG.info(\"Loading transcripts...\")\n    if len(transcript_objs) > 0:\n        adapter.load_transcript_bulk(transcript_objs)\n\n    LOG.info('Number of transcripts in build %s: %s', build, nr_transcripts)\n    LOG.info('Number of transcripts with refseq identifier: %s', ref_seq_transcripts)\n    LOG.info('Number of primary transcripts: %s', nr_primary_transcripts)\n\n    return transcript_objs", "docstring": "Load all the transcripts\n\nTranscript information is from ensembl.\n\nArgs:\nadapter(MongoAdapter)\ntranscripts_lines(iterable): iterable with ensembl transcript lines\nbuild(str)\nensembl_genes(dict): Map from ensembl_id -> HgncGene\n\nReturns:\ntranscript_objs(list): A list with all transcript objects", "source": "juraj-google-style"}
{"code": "def from_mapping(cls, mapping):\n    out = cls()\n    for (elem, count) in mapping.items():\n        out._set_count(elem, count)\n    return out", "docstring": "Create a bag from a dict of elem->count.\n\nEach key in the dict is added if the value is > 0.\n\nRaises:\nValueError: If any count is < 0.", "source": "codesearchnet"}
{"code": "def __init__(self, input_reader=None, output_writer=None):\n    \n    super(ImageExportTool, self).__init__(\n        input_reader=input_reader, output_writer=output_writer)\n    self._abort = False\n    self._artifact_definitions_path = None\n    self._artifact_filters = None\n    self._artifacts_registry = None\n    self._custom_artifacts_path = None\n    self._destination_path = None\n    self._digests = {}\n    self._filter_collection = file_entry_filters.FileEntryFilterCollection()\n    self._filter_file = None\n    self._path_spec_extractor = extractors.PathSpecExtractor()\n    self._process_memory_limit = None\n    self._resolver_context = context.Context()\n    self._skip_duplicates = True\n    self._source_type = None\n\n    self.has_filters = False\n    self.list_signature_identifiers = False", "docstring": "Initializes the CLI tool object.\n\nArgs:\ninput_reader (Optional[InputReader]): input reader, where None indicates\nthat the stdin input reader should be used.\noutput_writer (Optional[OutputWriter]): output writer, where None\nindicates that the stdout output writer should be used.", "source": "juraj-google-style"}
{"code": "def get_tld(url):\n        \n\n        if url not in URLHelper.__cache:\n            URLHelper.__cache[url] = urlparse(url)\n\n        parts = URLHelper.__cache[url].netloc.split(\".\")\n\n        if len(parts) == 1:\n            return \"\"\n        else:\n            return parts[-1]", "docstring": "Get the tld of the given URL.\n\nArgs:\nurl (str): The URL to get the tld from.\n\nReturns:\nstr: The tld", "source": "juraj-google-style"}
{"code": "def update_subscription(self, *, subscription_id, credit_card_token):\n        \n        payload = {\n            \"creditCardToken\": credit_card_token\n        }\n        fmt = 'subscriptions/{}'.format(subscription_id)\n        return self.client._put(self.url + fmt, json=payload, headers=self.get_headers())", "docstring": "Update information associated with the specified subscription. At the moment it is only possible\nto update the token of the credit card to which the charge of the subscription is made.\n\nArgs:\nsubscription_id: Identification of the subscription.\ncredit_card_token:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def calculate_part_visibility(self, ports):\n        \n        \n        \n        source_port_lookup = {}\n        for part_name, port_infos in SourcePortInfo.filter_parts(ports).items():\n            for port_info in port_infos:\n                source_port_lookup[port_info.connected_value] = (\n                    part_name, port_info.port)\n        \n        \n        for part_name, port_infos in SinkPortInfo.filter_parts(\n                ports).items():\n            for port_info in port_infos:\n                if port_info.value != port_info.disconnected_value:\n                    conn_part, port = source_port_lookup.get(\n                        port_info.value, (None, None))\n                    if conn_part and port == port_info.port:\n                        if conn_part not in self.part_visibility:\n                            self.part_visibility[conn_part] = True\n                        if part_name not in self.part_visibility:\n                            self.part_visibility[part_name] = True", "docstring": "Calculate what is connected to what\n\nArgs:\nports: {part_name: [PortInfo]} from other ports", "source": "juraj-google-style"}
{"code": "def point_dist2(p1, p2):\n    \n    v = vector(p1, p2)\n    return np.dot(v, v)", "docstring": "compute the square of the euclidian distance between two 3D points\n\nArgs:\np1, p2: indexable objects with\nindices 0, 1, 2 corresponding to 3D cartesian coordinates.\nReturns:\nThe square of the euclidian distance between the points.", "source": "juraj-google-style"}
{"code": "def _print_task_data(self, task):\n    print(' {0:s} ({1:s})'.format(task['name'], task['id']))\n    paths = task.get('saved_paths', [])\n    if (not paths):\n        return\n    for path in paths:\n        if path.endswith('worker-log.txt'):\n            continue\n        if path.endswith('{0:s}.log'.format(task.get('id'))):\n            continue\n        if path.startswith('/'):\n            continue\n        print(('   ' + path))", "docstring": "Pretty-prints task data.\n\nArgs:\ntask: Task dict generated by Turbinia.", "source": "codesearchnet"}
{"code": "def read(self, length, timeout):\n    \n    self._read_messages_until_true(\n        lambda: self._buffer_size and self._buffer_size >= length, timeout)\n\n    with self._read_buffer_lock:\n      data, push_back = ''.join(self._read_buffer), ''\n      if length:\n        data, push_back = data[:length], data[length:]\n      self._read_buffer.clear()\n      self._buffer_size = len(push_back)\n      if push_back:\n        self._read_buffer.appendleft(push_back)\n    return data", "docstring": "Read 'length' bytes from this stream transport.\n\nArgs:\nlength: If not 0, read this many bytes from the stream, otherwise read all\navailable data (at least one byte).\ntimeout: timeouts.PolledTimeout to use for this read operation.\n\nReturns:\nThe bytes read from this stream.", "source": "juraj-google-style"}
{"code": "def parse_mip_analysis(mip_config_raw: dict, qcmetrics_raw: dict, sampleinfo_raw: dict) -> dict:\n    \n    outdata = _define_output_dict()\n\n    _config(mip_config_raw, outdata)\n    _qc_metrics(outdata, qcmetrics_raw)\n    _qc_sample_info(outdata, sampleinfo_raw)\n\n    return outdata", "docstring": "Parse the output analysis files from MIP for adding info\nto trend database\n\nArgs:\nmip_config_raw (dict): raw YAML input from MIP analysis config file\nqcmetrics_raw (dict): raw YAML input from MIP analysis qc metric file\nsampleinfo_raw (dict): raw YAML input from MIP analysis qc sample info file\nReturns:\ndict: parsed data", "source": "juraj-google-style"}
{"code": "def isnan(self: EventSetOrNode) -> EventSetOrNode:\n    from temporian.core.operators.unary import isnan\n    return isnan(self)", "docstring": "Returns boolean features, `True` in the NaN elements of the\n[`EventSet`][temporian.EventSet].\n\nNote that for `int` and `bool` this will always be `False` since those types\ndon't support NaNs. It only makes actual sense to use on `float` (or\n`tp.float32`) features.\n\nSee also `evset.notnan()`.\n\nExample:\n```python\n>>> a = tp.event_set(\n...     timestamps=[1, 2, 3],\n...     features={\"M\":[np.nan, 5., np.nan], \"N\":  [-1, 0, 5]},\n... )\n>>> b = a.isnan()\n>>> b\nindexes: ...\n'M': [ True False True]\n'N': [False False False]\n...\n\n>>> # Count nans\n>>> b[\"M\"].cast(int).cumsum()\nindexes: ...\ntimestamps: [1. 2. 3.]\n'M': [1 1 2]\n...\n\n```\n\nReturns:\nEventSet with boolean features.", "source": "github-repos"}
{"code": "def _MergeDifferentId(self):\n    for a in self._GetIter(self.feed_merger.a_schedule):\n        for b in self._GetIter(self.feed_merger.b_schedule):\n            try:\n                self._Add(a, b, self._MergeEntities(a, b))\n                self._num_merged += 1\n            except MergeError:\n                continue\n    for a in self._GetIter(self.feed_merger.a_schedule):\n        if (a not in self.feed_merger.a_merge_map):\n            self._num_not_merged_a += 1\n            newid = self._HasId(self.feed_merger.b_schedule, self._GetId(a))\n            self._Add(a, None, self._Migrate(a, self.feed_merger.a_schedule, newid))\n    for b in self._GetIter(self.feed_merger.b_schedule):\n        if (b not in self.feed_merger.b_merge_map):\n            self._num_not_merged_b += 1\n            newid = self._HasId(self.feed_merger.a_schedule, self._GetId(b))\n            self._Add(None, b, self._Migrate(b, self.feed_merger.b_schedule, newid))\n    return self._num_merged", "docstring": "Tries to merge all possible combinations of entities.\n\nThis tries to merge every entity in the old schedule with every entity in\nthe new schedule. Unlike _MergeSameId, the ids do not need to match.\nHowever, _MergeDifferentId is much slower than _MergeSameId.\n\nThis method makes use of various methods like _Merge and _Migrate which\nare not implemented in the abstract DataSetMerger class. These method\nshould be overwritten in a subclass to allow _MergeSameId to work with\ndifferent entity types.\n\nReturns:\nThe number of merged entities.", "source": "codesearchnet"}
{"code": "def categorytree(self, category, depth=5):\n\n    def __cat_tree_rec(cat, depth, tree, level, categories, links):\n        ' recursive function to build out the tree '\n        tree[cat] = dict()\n        tree[cat]['depth'] = level\n        tree[cat]['sub-categories'] = dict()\n        tree[cat]['links'] = list()\n        tree[cat]['parent-categories'] = list()\n        parent_cats = list()\n        if (cat not in categories):\n            tries = 0\n            while True:\n                if (tries > 10):\n                    raise MediaWikiCategoryTreeError(cat)\n                try:\n                    pag = self.page('{0}:{1}'.format(self.category_prefix, cat))\n                    categories[cat] = pag\n                    parent_cats = categories[cat].categories\n                    links[cat] = self.categorymembers(cat, results=None, subcategories=True)\n                    break\n                except PageError:\n                    raise PageError('{0}:{1}'.format(self.category_prefix, cat))\n                except KeyboardInterrupt:\n                    raise\n                except Exception:\n                    tries = (tries + 1)\n                    time.sleep(1)\n        else:\n            parent_cats = categories[cat].categories\n        tree[cat]['parent-categories'].extend(parent_cats)\n        tree[cat]['links'].extend(links[cat][0])\n        if (depth and (level >= depth)):\n            for ctg in links[cat][1]:\n                tree[cat]['sub-categories'][ctg] = None\n        else:\n            for ctg in links[cat][1]:\n                __cat_tree_rec(ctg, depth, tree[cat]['sub-categories'], (level + 1), categories, links)\n    if (not isinstance(category, list)):\n        cats = [category]\n    else:\n        cats = category\n    if ((len(cats) == 1) and ((cats[0] is None) or (cats[0] == ''))):\n        msg = \"CategoryTree: Parameter 'category' must either be a list of one or more categories or a string; provided: '{}'\".format(category)\n        raise ValueError(msg)\n    if ((depth is not None) and (depth < 1)):\n        msg = \"CategoryTree: Parameter 'depth' must be either None (for the full tree) or be greater than 0\"\n        raise ValueError(msg)\n    results = dict()\n    categories = dict()\n    links = dict()\n    for cat in cats:\n        if ((cat is None) or (cat == '')):\n            continue\n        __cat_tree_rec(cat, depth, results, 0, categories, links)\n    return results", "docstring": "Generate the Category Tree for the given categories\n\nArgs:\ncategory(str or list of strings): Category name(s)\ndepth(int): Depth to traverse the tree\nReturns:\ndict: Category tree structure\nNote:\nSet depth to **None** to get the whole tree\nNote:\nReturn Data Structure: Subcategory contains the same \\\nrecursive structure\n\n>>> {\n'category': {\n'depth': Number,\n'links': list,\n'parent-categories': list,\n'sub-categories': dict\n}\n}\n\n.. versionadded:: 0.3.10", "source": "codesearchnet"}
{"code": "def find_synonym(self, word):\n    if (word and self.synonyms):\n        reverse_lookup = {}\n        for (k, v) in self.synonyms.items():\n            for i in v:\n                reverse_lookup[i.lower()] = k.lower()\n        if (word.lower() in reverse_lookup):\n            return reverse_lookup[word.lower()]\n    return word", "docstring": "Given a string and a dict of synonyms, returns the 'preferred'\nword. Case insensitive.\n\nArgs:\nword (str): A word.\n\nReturns:\nstr: The preferred word, or the input word if not found.\n\nExample:\n>>> syn = {'snake': ['python', 'adder']}\n>>> find_synonym('adder', syn)\n'snake'\n>>> find_synonym('rattler', syn)\n'rattler'\n\nTODO:\nMake it handle case, returning the same case it received.", "source": "codesearchnet"}
{"code": "def _set_label(self, which, label, **kwargs):\n        \n        prop_default = {\n            'fontsize': 18,\n        }\n\n        for prop, default in prop_default.items():\n            kwargs[prop] = kwargs.get(prop, default)\n\n        setattr(self.label, which, label)\n        setattr(self.label, which + '_kwargs', kwargs)\n        return", "docstring": "Private method for setting labels.\n\nArgs:\nwhich (str): The indicator of which part of the plots\nto adjust. This currently handles `xlabel`/`ylabel`,\nand `title`.\nlabel (str): The label to be added.\nfontsize (int, optional): Fontsize for associated label. Default\nis None.", "source": "juraj-google-style"}
{"code": "def enable_store_parameters_in_results(kernel):\n    kernel_stack = []\n    while (hasattr(kernel, 'parameters') and ('inner_kernel' in kernel.parameters)):\n        kernel_stack.append(kernel)\n        kernel = kernel.parameters['inner_kernel']\n\n    def _recreate_kernel(kernel, parameters):\n        new_parameters = kernel.parameters.copy()\n        new_parameters.update(parameters)\n        if ('store_parameters_in_results' in new_parameters):\n            new_parameters['store_parameters_in_results'] = True\n        with deprecation.silence():\n            return type(kernel)(**new_parameters)\n    if hasattr(kernel, 'parameters'):\n        kernel = _recreate_kernel(kernel, {})\n    for outer_kernel in reversed(kernel_stack):\n        outer_kernel = _recreate_kernel(outer_kernel, {'inner_kernel': kernel})\n        kernel = outer_kernel\n    return kernel", "docstring": "Enables the `store_parameters_in_results` parameter in a chain of kernels.\n\nThis is a temporary utility for use during the transition period of the\nparameter storage methods.\n\nArgs:\nkernel: A TransitionKernel.\n\nReturns:\nkernel: The same kernel, but recreated with `store_parameters_in_results`\nrecursively set to `True` in its parameters and its inner kernels (as\nappropriate).", "source": "codesearchnet"}
{"code": "def __validate_args(self, func_name, args, kwargs):\n    from pyvalid.validators import Validator\n    for (i, (arg_name, accepted_values)) in enumerate(self.accepted_args):\n        if (i < len(args)):\n            value = args[i]\n        elif (arg_name in kwargs):\n            value = kwargs[arg_name]\n        elif (i in self.optional_args):\n            continue\n        else:\n            raise InvalidArgumentNumberError(func_name)\n        is_valid = False\n        for accepted_val in accepted_values:\n            is_validator = (isinstance(accepted_val, Validator) or (isinstance(accepted_val, MethodType) and hasattr(accepted_val, '__func__') and isinstance(accepted_val.__func__, Validator)))\n            if is_validator:\n                is_valid = accepted_val(value)\n            elif isinstance(accepted_val, type):\n                is_valid = isinstance(value, accepted_val)\n            else:\n                is_valid = (value == accepted_val)\n            if is_valid:\n                break\n        if (not is_valid):\n            ord_num = self.__ordinal((i + 1))\n            raise ArgumentValidationError(ord_num, func_name, value, accepted_values)", "docstring": "Compare value of each required argument with list of\naccepted values.\n\nArgs:\nfunc_name (str): Function name.\nargs (list): Collection of the position arguments.\nkwargs (dict): Collection of the keyword arguments.\n\nRaises:\nInvalidArgumentNumberError: When position or count of the arguments\nis incorrect.\nArgumentValidationError: When encountered unexpected argument\nvalue.", "source": "codesearchnet"}
{"code": "def get_video_transcript_data(video_id, language_code):\n    \n    video_transcript = VideoTranscript.get_or_none(video_id, language_code)\n    if video_transcript:\n        try:\n            return dict(file_name=video_transcript.filename, content=video_transcript.transcript.file.read())\n        except Exception:\n            logger.exception(\n                '[edx-val] Error while retrieving transcript for video=%s -- language_code=%s',\n                video_id,\n                language_code\n            )\n            raise", "docstring": "Get video transcript data\n\nArguments:\nvideo_id(unicode): An id identifying the Video.\nlanguage_code(unicode): it will be the language code of the requested transcript.\n\nReturns:\nA dict containing transcript file name and its content.", "source": "juraj-google-style"}
{"code": "def compile_date(self):\n        \n        result = self._dll.JLINKARM_GetCompileDateTime()\n        return ctypes.cast(result, ctypes.c_char_p).value.decode()", "docstring": "Returns a string specifying the date and time at which the DLL was\ntranslated.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nDatetime string.", "source": "juraj-google-style"}
{"code": "def load(self, spec):\n        \n        if spec.template is not None:\n            return self.loader.unicode(spec.template, spec.template_encoding)\n\n        path = self._find(spec)\n\n        return self.loader.read(path, spec.template_encoding)", "docstring": "Find and return the template associated to a TemplateSpec instance.\n\nReturns the template as a unicode string.\n\nArguments:\n\nspec: a TemplateSpec instance.", "source": "juraj-google-style"}
{"code": "def get_extended_attention_mask(self, attention_mask: torch.Tensor, input_shape: Tuple[int], device: torch.device, has_query: bool=False) -> torch.Tensor:\n    if attention_mask.dim() == 3:\n        extended_attention_mask = attention_mask[:, None, :, :]\n    elif attention_mask.dim() == 2:\n        extended_attention_mask = attention_mask[:, None, None, :]\n    else:\n        raise ValueError(f'Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})')\n    extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)\n    extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n    return extended_attention_mask", "docstring": "Makes broadcastable attention and causal masks so that future and masked tokens are ignored.\n\nArguments:\nattention_mask (`torch.Tensor`):\nMask with ones indicating tokens to attend to, zeros for tokens to ignore.\ninput_shape (`Tuple[int]`):\nThe shape of the input to the model.\ndevice: (`torch.device`):\nThe device of the input to the model.\n\nReturns:\n`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.", "source": "github-repos"}
{"code": "def validate_element(self, value):\n    if (not isinstance(value, self.type)):\n        if (isinstance(value, six.integer_types) and (self.type == float)):\n            return float(value)\n        if (value is None):\n            if self.required:\n                raise ValidationError('Required field is missing')\n        else:\n            try:\n                name = self.name\n            except AttributeError:\n                raise ValidationError(('Expected type %s for %s, found %s (type %s)' % (self.type, self.__class__.__name__, value, type(value))))\n            else:\n                raise ValidationError(('Expected type %s for field %s, found %s (type %s)' % (self.type, name, value, type(value))))\n    return value", "docstring": "Validate single element of field.\n\nThis is different from validate in that it is used on individual\nvalues of repeated fields.\n\nArgs:\nvalue: Value to validate.\n\nReturns:\nThe value casted in the expected type.\n\nRaises:\nValidationError if value is not expected type.", "source": "codesearchnet"}
{"code": "def CheckTrailingSemicolon(filename, clean_lines, linenum, error):\n    line = clean_lines.elided[linenum]\n    match = Match('^(.*\\\\)\\\\s*)\\\\{', line)\n    if match:\n        closing_brace_pos = match.group(1).rfind(')')\n        opening_parenthesis = ReverseCloseExpression(clean_lines, linenum, closing_brace_pos)\n        if (opening_parenthesis[2] > (- 1)):\n            line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]\n            macro = Search('\\\\b([A-Z_][A-Z0-9_]*)\\\\s*$', line_prefix)\n            func = Match('^(.*\\\\])\\\\s*$', line_prefix)\n            if ((macro and (macro.group(1) not in ('TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', 'LOCKS_EXCLUDED', 'INTERFACE_DEF'))) or (func and (not Search('\\\\boperator\\\\s*\\\\[\\\\s*\\\\]', func.group(1)))) or Search('\\\\b(?:struct|union)\\\\s+alignas\\\\s*$', line_prefix) or Search('\\\\bdecltype$', line_prefix) or Search('\\\\s+=\\\\s*$', line_prefix)):\n                match = None\n        if (match and (opening_parenthesis[1] > 1) and Search('\\\\]\\\\s*$', clean_lines.elided[(opening_parenthesis[1] - 1)])):\n            match = None\n    else:\n        match = Match('^(.*(?:else|\\\\)\\\\s*const)\\\\s*)\\\\{', line)\n        if (not match):\n            prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]\n            if (prevline and Search('[;{}]\\\\s*$', prevline)):\n                match = Match('^(\\\\s*)\\\\{', line)\n    if match:\n        (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, len(match.group(1)))\n        if ((endpos > (- 1)) and Match('^\\\\s*;', endline[endpos:])):\n            raw_lines = clean_lines.raw_lines\n            ParseNolintSuppressions(filename, raw_lines[(endlinenum - 1)], (endlinenum - 1), error)\n            ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum, error)\n            error(filename, endlinenum, 'readability/braces', 4, \"You don't need a ; after a }\")", "docstring": "Looks for redundant trailing semicolon.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def _parameter_net(self, theta, kernel_shape=9):\n        \n        with argscope(FullyConnected, nl=tf.nn.leaky_relu):\n            net = FullyConnected('fc1', theta, 64)\n            net = FullyConnected('fc2', net, 128)\n\n        pred_filter = FullyConnected('fc3', net, kernel_shape ** 2, nl=tf.identity)\n        pred_filter = tf.reshape(pred_filter, [BATCH, kernel_shape, kernel_shape, 1], name=\"pred_filter\")\n        logger.info('Parameter net output: {}'.format(pred_filter.get_shape().as_list()))\n        return pred_filter", "docstring": "Estimate filters for convolution layers\n\nArgs:\ntheta: angle of filter\nkernel_shape: size of each filter\n\nReturns:\nlearned filter as [B, k, k, 1]", "source": "juraj-google-style"}
{"code": "def _concat(self):\n    if len(self._variable_list) == 1:\n        with ops.name_scope(None):\n            return array_ops.identity(self._variable_list[0], name=self._name)\n    partition_axes = self._partition_axes()\n    if len(partition_axes) > 1:\n        raise NotImplementedError('Cannot concatenate along more than one dimension: %s.  Multi-axis partition concat is not supported' % str(partition_axes))\n    partition_ix = partition_axes[0]\n    with ops.name_scope(self._name + '/ConcatPartitions/'):\n        concatenated = array_ops.concat(self._variable_list, partition_ix)\n    with ops.name_scope(None):\n        return array_ops.identity(concatenated, name=self._name)", "docstring": "Returns the overall concatenated value as a `Tensor`.\n\nThis is different from using the partitioned variable directly as a tensor\n(through tensor conversion and `as_tensor`) in that it creates a new set of\noperations that keeps the control dependencies from its scope.\n\nReturns:\n`Tensor` containing the concatenated value.", "source": "github-repos"}
{"code": "def to_dict(self):\n    d = {}\n    d[TestResultEnums.RECORD_NAME] = self.test_name\n    d[TestResultEnums.RECORD_CLASS] = self.test_class\n    d[TestResultEnums.RECORD_BEGIN_TIME] = self.begin_time\n    d[TestResultEnums.RECORD_END_TIME] = self.end_time\n    d[TestResultEnums.RECORD_RESULT] = self.result\n    d[TestResultEnums.RECORD_UID] = self.uid\n    d[TestResultEnums.RECORD_SIGNATURE] = self.signature\n    d[TestResultEnums.RECORD_RETRY_PARENT] = self.retry_parent.signature if self.retry_parent else None\n    d[TestResultEnums.RECORD_PARENT] = {'parent': self.parent[0].signature, 'type': self.parent[1].value} if self.parent else None\n    d[TestResultEnums.RECORD_EXTRAS] = self.extras\n    d[TestResultEnums.RECORD_DETAILS] = self.details\n    d[TestResultEnums.RECORD_TERMINATION_SIGNAL_TYPE] = self.termination_signal_type\n    d[TestResultEnums.RECORD_EXTRA_ERRORS] = {key: value.to_dict() for key, value in self.extra_errors.items()}\n    d[TestResultEnums.RECORD_STACKTRACE] = self.stacktrace\n    return d", "docstring": "Gets a dictionary representating the content of this class.\n\nReturns:\nA dictionary representating the content of this class.", "source": "github-repos"}
{"code": "def status(self, order_id):\n        \n\n        self.logger.debug('Get status of order ' + order_id)\n        url = '%(base_url)s/order/%(order_id)s' % {\n            'base_url': self.base_url, 'order_id': order_id\n        }\n        r = self.gbdx_connection.get(url)\n        r.raise_for_status()\n        return r.json().get(\"acquisitions\", {})", "docstring": "Checks imagery order status. There can be more than one image per\norder and this function returns the status of all images\nwithin the order.\n\nArgs:\norder_id (str): The id of the order placed.\n\nReturns:\nList of dictionaries, one per image. Each dictionary consists\nof the keys 'acquisition_id', 'location' and 'state'.", "source": "juraj-google-style"}
{"code": "def remove_sites_from_neighbours( self, remove_labels ):\n        \n        if type( remove_labels ) is str:\n            remove_labels = [ remove_labels ]\n        self.neighbours = set( n for n in self.neighbours if n.label not in remove_labels )", "docstring": "Removes sites from the set of neighbouring sites if these have labels in remove_labels.\n\nArgs:\nRemove_labels (List) or (Str): List of Site labels to be removed from the cluster neighbour set.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def dimension_values(self, dimension, expanded=True, flat=True):\n        \n        index = self.get_dimension_index(dimension)\n        if index == 0:\n            return np.array([self.data if np.isscalar(self.data) else self.data[index]])\n        elif index == 1:\n            return [] if np.isscalar(self.data) else np.array([self.data[1]])\n        else:\n            return super(Annotation, self).dimension_values(dimension)", "docstring": "Return the values along the requested dimension.\n\nArgs:\ndimension: The dimension to return values for\nexpanded (bool, optional): Whether to expand values\nflat (bool, optional): Whether to flatten array\n\nReturns:\nNumPy array of values along the requested dimension", "source": "juraj-google-style"}
{"code": "def Match(self, file_entry):\n    \n    if not file_entry:\n      return False\n\n    filename = file_entry.name.lower()\n    return filename == self._filename", "docstring": "Determines if a file entry matches the filter.\n\nArgs:\nfile_entry (dfvfs.FileEntry): a file entry.\n\nReturns:\nbool: True if the file entry matches the filter.", "source": "juraj-google-style"}
{"code": "def _build_request(self, verb, verb_arguments):\n        \n        method = getattr(self._component, verb)\n\n        \n        \n        \n        \n        method_args = {str(k): v for k, v in verb_arguments.items()}\n        return method(**method_args)", "docstring": "Builds HttpRequest object.\n\nArgs:\nverb (str): Request verb (ex. insert, update, delete).\nverb_arguments (dict): Arguments to be passed with the request.\n\nReturns:\nhttplib2.HttpRequest: HttpRequest to be sent to the API.", "source": "juraj-google-style"}
{"code": "def extract_lookups(value):\n    \n    lookups = set()\n    if isinstance(value, basestring):\n        lookups = lookups.union(extract_lookups_from_string(value))\n    elif isinstance(value, list):\n        for v in value:\n            lookups = lookups.union(extract_lookups(v))\n    elif isinstance(value, dict):\n        for v in value.values():\n            lookups = lookups.union(extract_lookups(v))\n    return lookups", "docstring": "Recursively extracts any stack lookups within the data structure.\n\nArgs:\nvalue (one of str, list, dict): a structure that contains lookups to\noutput values\n\nReturns:\nlist: list of lookups if any", "source": "juraj-google-style"}
{"code": "def _validate_chain_strength(sampler, chain_strength):\n    \n    properties = sampler.properties\n\n    if 'extended_j_range' in properties:\n        max_chain_strength = - min(properties['extended_j_range'])\n    elif 'j_range' in properties:\n        max_chain_strength = - min(properties['j_range'])\n    else:\n        raise ValueError(\"input sampler should have 'j_range' and/or 'extended_j_range' property.\")\n\n    if chain_strength is None:\n        chain_strength = max_chain_strength\n    elif chain_strength > max_chain_strength:\n        raise ValueError(\"Provided chain strength exceedds the allowed range.\")\n\n    return chain_strength", "docstring": "Validate the provided chain strength, checking J-ranges of the sampler's children.\n\nArgs:\nchain_strength (float) The provided chain strength.  Use None to use J-range.\n\nReturns (float):\nA valid chain strength, either provided or based on available J-range.  Positive finite float.", "source": "juraj-google-style"}
{"code": "def __init__(self, counter_factory, state_sampler):\n    self._counter_factory = counter_factory\n    self._state_sampler = state_sampler\n    self._latest_step = None\n    self.bytes_read_counter = None\n    self.scoped_state = None", "docstring": "Create a new IO read counter.\n\nArgs:\ncounter_factory: A counters.CounterFactory to create byte counters.\nstate_sampler: A statesampler.StateSampler to transition into read states.", "source": "github-repos"}
{"code": "def partial_tile(cls, tile_assignment):\n    if not isinstance(tile_assignment, _np.ndarray):\n        raise TypeError('PartialTile assignment must be of type np.ndarray')\n    dims = list(tile_assignment.shape)\n    flattened_devices = tile_assignment.reshape(-1, order='C')\n    return Sharding(proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.OTHER, tile_assignment_dimensions=dims, tile_assignment_devices=list(flattened_devices), replicate_on_last_tile_dim=True))", "docstring": "Returns a partially tiled sharding attribute.\n\nThis is similar to tile(), but tile_assignment has one more dimension than\nthe tensor, and tiles in the last dimension of tile_assignment are\nreplicated.\n\nArgs:\ntile_assignment: An np.ndarray describing the topology of the tiling and\nwhich device will compute which part of the topology.\n\nRaises:\nTypeError: tile_assignment was not of np.array type.", "source": "github-repos"}
{"code": "def has_no_error(state, incorrect_msg='Your code generated an error. Fix it and try again!'):\n    if state.reporter.get_errors():\n        state.do_test(incorrect_msg)\n    return state", "docstring": "Check whether the submission did not generate a runtime error.\n\nSimply use ``Ex().has_no_error()`` in your SCT whenever you want to check for errors.\nBy default, after the entire SCT finished executing, ``sqlwhat`` will check\nfor errors before marking the exercise as correct. You can disable this behavior\nby using ``Ex().allow_error()``.\n\nArgs:\nincorrect_msg: If specified, this overrides the automatically generated feedback message\nin case the student's query did not return a result.", "source": "codesearchnet"}
{"code": "def entry_point(__func: Callable) -> Callable:\n    \n    if __func.__module__ == '__main__':\n        import sys\n        sys.exit(__func())\n    else:\n        return __func", "docstring": "Execute function when module is run directly.\n\nNote:\nThis allows fall through for importing modules that use it.\n\nArgs:\n__func: Function to run", "source": "juraj-google-style"}
{"code": "def cmd_startstop(options):\n    \n    statelu = {\"start\": \"stopped\", \"stop\": \"running\"}\n    options.inst_state = statelu[options.command]\n    debg.dprint(\"toggle set state: \", options.inst_state)\n    (i_info, param_str) = gather_data(options)\n    (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command)\n    response = awsc.startstop(tar_inst, options.command)\n    responselu = {\"start\": \"StartingInstances\", \"stop\": \"StoppingInstances\"}\n    filt = responselu[options.command]\n    resp = {}\n    state_term = ('CurrentState', 'PreviousState')\n    for i, j in enumerate(state_term):\n        resp[i] = response[\"{0}\".format(filt)][0][\"{0}\".format(j)]['Name']\n    print(\"Current State: {}{}{}  -  Previous State: {}{}{}\\n\".\n          format(C_STAT[resp[0]], resp[0], C_NORM,\n                 C_STAT[resp[1]], resp[1], C_NORM))", "docstring": "Start or Stop the specified instance.\n\nFinds instances that match args and instance-state expected by the\ncommand.  Then, the target instance is determined, the action is\nperformed on the instance, and the eturn information is displayed.\n\nArgs:\noptions (object): contains args and data from parser.", "source": "juraj-google-style"}
{"code": "def predict_proba(self, a, b, idx=0, **kwargs):\n    return self.predict_dataset(DataFrame([[a, b]], columns=['A', 'B']))", "docstring": "Use Jarfo to predict the causal direction of a pair of vars.\n\nArgs:\na (numpy.ndarray): Variable 1\nb (numpy.ndarray): Variable 2\nidx (int): (optional) index number for printing purposes\n\nReturns:\nfloat: Causation score (Value : 1 if a->b and -1 if b->a)", "source": "codesearchnet"}
{"code": "def _handle_message(self, message, handle_wrte=True):\n    if (message.command == 'OKAY'):\n        self._set_or_check_remote_id(message.arg0)\n        if (not self._expecting_okay):\n            raise usb_exceptions.AdbProtocolError('%s received unexpected OKAY: %s', self, message)\n        self._expecting_okay = False\n    elif (message.command == 'CLSE'):\n        self.closed_state = self.ClosedState.CLOSED\n    elif (not handle_wrte):\n        raise usb_exceptions.AdbProtocolError('%s received WRTE before OKAY/CLSE: %s', self, message)\n    else:\n        with self._read_buffer_lock:\n            self._read_buffer.append(message.data)\n            self._buffer_size += len(message.data)", "docstring": "Handle a message that was read for this stream.\n\nFor each message type, this means:\nOKAY: Check id's and make sure we are expecting an OKAY.  Clear the\nself._expecting_okay flag so any pending write()'s know.\nCLSE: Set our internal state to closed.\nWRTE: Add the data read to our internal read buffer.  Note we don't\nreturn the actual data because it may not be this thread that needs it.\n\nArgs:\nmessage: Message that was read.\nhandle_wrte: If True, we can handle WRTE messages, otherwise raise.\n\nRaises:\nAdbProtocolError: If we get a WRTE message but handle_wrte is False.", "source": "codesearchnet"}
{"code": "def parse_date(value):\n    if (not value):\n        return None\n    if isinstance(value, datetime.date):\n        return value\n    return parse_datetime(value).date()", "docstring": "Attempts to parse `value` into an instance of ``datetime.date``. If\n`value` is ``None``, this function will return ``None``.\n\nArgs:\nvalue: A timestamp. This can be a string, datetime.date, or\ndatetime.datetime value.", "source": "codesearchnet"}
{"code": "def get_enabled_references(self, datas, meta_references):\n        \n        references = OrderedDict()\n\n        for section in meta_references:\n            references[section] = self.get_reference(datas, section)\n\n        return references", "docstring": "Get enabled manifest references declarations.\n\nEnabled references are defined through meta references declaration,\nevery other references are ignored.\n\nArguments:\ndatas (dict): Data where to search for reference declarations.\nThis is commonly the fully parsed manifest.\nmeta_references (list): List of enabled reference names.\n\nReturns:\ncollections.OrderedDict: Serialized enabled references datas.", "source": "juraj-google-style"}
{"code": "def setup_gpu(required_gpus):\n    if required_gpus == 0:\n        return\n    available_gpus = tf.config.experimental.list_physical_devices('GPU')\n    if not available_gpus:\n        raise ValueError('requires at least one physical GPU')\n    if len(available_gpus) >= required_gpus:\n        tf.config.set_visible_devices(available_gpus[:required_gpus])\n    else:\n        num_logical_gpus = required_gpus - len(available_gpus) + 1\n        logical_gpus = [tf.config.LogicalDeviceConfiguration(memory_limit=256) for _ in range(num_logical_gpus)]\n        tf.config.set_logical_device_configuration(available_gpus[0], logical_gpus)", "docstring": "Sets up the GPU devices.\n\nIf there're more available GPUs than needed, it hides the additional ones. If\nthere're less, it creates logical devices. This is to make sure the tests see\na fixed number of GPUs regardless of the environment.\n\nArgs:\nrequired_gpus: an integer. The number of GPUs required.\n\nRaises:\nValueError: if num_gpus is larger than zero but no GPU is available.", "source": "github-repos"}
{"code": "def _create_route(self, env, item):\n        \n        if item.name in env:\n            if isinstance(env[item.name], ApiRoutesByVersion):\n                if item.version in env[item.name].at_version:\n                    existing_dt = env[item.name].at_version[item.version]\n                    raise InvalidSpec(\n                        'Route %s at version %d already defined (%s:%d).' % (\n                            quote(item.name), item.version, existing_dt._ast_node.path,\n                            existing_dt._ast_node.lineno),\n                        item.lineno, item.path)\n            else:\n                existing_dt = env[item.name]\n                raise InvalidSpec(\n                    'Symbol %s already defined (%s:%d).' % (\n                        quote(item.name), existing_dt._ast_node.path,\n                        existing_dt._ast_node.lineno),\n                    item.lineno, item.path)\n        else:\n            env[item.name] = ApiRoutesByVersion()\n\n        route = ApiRoute(\n            name=item.name,\n            version=item.version,\n            ast_node=item,\n        )\n        env[route.name].at_version[route.version] = route\n        return route", "docstring": "Constructs a route and adds it to the environment.\n\nArgs:\nenv (dict): The environment of defined symbols. A new key is added\ncorresponding to the name of this new route.\nitem (AstRouteDef): Raw route definition from the parser.\n\nReturns:\nstone.api.ApiRoutesByVersion: A group of fully-defined routes indexed by versions.", "source": "juraj-google-style"}
{"code": "def __init__(self, unique_identifier=None, attribute_names=None):\n        \n        super(GetAttributesRequestPayload, self).__init__(\n            enums.Tags.REQUEST_PAYLOAD)\n\n        self._unique_identifier = None\n        self._attribute_names = list()\n\n        self.unique_identifier = unique_identifier\n        self.attribute_names = attribute_names", "docstring": "Construct a GetAttributes request payload.\n\nArgs:\nunique_identifier (string): The ID of the managed object with\nwhich the retrieved attributes should be associated. Optional,\ndefaults to None.\nattribute_names: A list of strings identifying the names of the\nattributes associated with the managed object. Optional,\ndefaults to None.", "source": "juraj-google-style"}
{"code": "def shadow_calc(data):\n    \n\n    up_shadow = abs(data.high - (max(data.open, data.close)))\n    down_shadow = abs(data.low - (min(data.open, data.close)))\n    entity = abs(data.open - data.close)\n    towards = True if data.open < data.close else False\n    print('=' * 15)\n    print('up_shadow : {}'.format(up_shadow))\n    print('down_shadow : {}'.format(down_shadow))\n    print('entity: {}'.format(entity))\n    print('towards : {}'.format(towards))\n    return up_shadow, down_shadow, entity, data.date, data.code", "docstring": "计算上下影线\n\nArguments:\ndata {DataStruct.slice} -- 输入的是一个行情切片\n\nReturns:\nup_shadow {float} -- 上影线\ndown_shdow {float} -- 下影线\nentity {float} -- 实体部分\ndate {str} -- 时间\ncode {str} -- 代码", "source": "juraj-google-style"}
{"code": "def _runOneBenchmark(self, default_device, num_iters=10, static_unroll=False, steps=10):\n\n    def loop_body(i, x):\n        with ops.device('/gpu:0'):\n            nx = nn_ops.conv2d(input=x, filter=kernel, strides=[1, 1, 1, 1], padding='SAME', data_format='NHWC', name='conv2d')\n            ni = math_ops.add(i, 1)\n            return (ni, nx)\n    ops.reset_default_graph()\n    with session.Session() as sess, ops.device(default_device):\n        i, x, kernel = self._getInitVariables()\n        self.evaluate(variables.global_variables_initializer())\n        if static_unroll:\n            for _ in range(steps):\n                i, x = loop_body(i, x)\n        else:\n            i, x = while_loop_tf.while_loop(lambda i, _: i < steps, loop_body, [i, x], parallel_iterations=steps, swap_memory=True)\n        r = math_ops.reduce_sum(x)\n        dx, dk = gradients_impl.gradients(r, [x, kernel])\n        r = control_flow_ops.group(dx, dk)\n        for _ in range(3):\n            self.evaluate(r)\n        start_time = time.time()\n        for _ in range(num_iters):\n            self.evaluate(r)\n        return (time.time() - start_time) / num_iters", "docstring": "Evaluate the while loop performance.\n\nArgs:\ndefault_device: The default device to run all ops except the loop_body.\nloop_body is always run on GPU.\nnum_iters: Number of iterations to run.\nstatic_unroll: If true, run unrolled version; otherwise, run while_loop.\nsteps: Total number of repeated steps to run the loop.\n\nReturns:\nThe duration of the run in seconds.", "source": "github-repos"}
{"code": "def _get_rest_doc(self, request, start_response):\n    \n    api = request.body_json['api']\n    version = request.body_json['version']\n\n    generator = discovery_generator.DiscoveryGenerator(request=request)\n    services = [s for s in self._backend.api_services if\n                s.api_info.name == api and s.api_info.api_version == version]\n    doc = generator.pretty_print_config_to_json(services)\n    if not doc:\n      error_msg = ('Failed to convert .api to discovery doc for '\n                   'version %s of api %s') % (version, api)\n      _logger.error('%s', error_msg)\n      return util.send_wsgi_error_response(error_msg, start_response)\n    return self._send_success_response(doc, start_response)", "docstring": "Sends back HTTP response with API directory.\n\nThis calls start_response and returns the response body.  It will return\nthe discovery doc for the requested api/version.\n\nArgs:\nrequest: An ApiRequest, the transformed request sent to the Discovery API.\nstart_response: A function with semantics defined in PEP-333.\n\nReturns:\nA string, the response body.", "source": "juraj-google-style"}
{"code": "def attention_mask_autoregressive(query_pos, dtype=tf.float32):\n    memory_pos = rename_length_to_memory_length(query_pos)\n    return (mtf.cast(mtf.less(query_pos, memory_pos), dtype) * (- 1000000000.0))", "docstring": "Bias for self-attention where attention to the right is disallowed.\n\nArgs:\nquery_pos: a mtf.Tensor with shape [..., length_dim]\ndtype: a tf.dtype\n\nReturns:\na mtf.Tensor with shape [..., length_dim, memory_length_dim]", "source": "codesearchnet"}
{"code": "def unwrap_or_else(self, callback: Callable[([], U)]) -> Union[(T, U)]:\n    return (self._val if self._is_some else callback())", "docstring": "Returns the contained value or computes it from ``callback``.\n\nArgs:\ncallback: The the default callback.\n\nReturns:\nThe contained value if the :py:class:`Option` is ``Some``,\notherwise ``callback()``.\n\nExamples:\n>>> Some(0).unwrap_or_else(lambda: 111)\n0\n>>> NONE.unwrap_or_else(lambda: 'ha')\n'ha'", "source": "codesearchnet"}
{"code": "def to_barrier_key(cls, barrier_index_key):\n    \n    barrier_index_path = barrier_index_key.to_path()\n\n    \n    \n    (pipeline_kind, dependent_pipeline_id,\n     unused_kind, purpose) = barrier_index_path[-4:]\n\n    barrier_record_path = (\n        pipeline_kind, dependent_pipeline_id,\n        _BarrierRecord.kind(), purpose)\n\n    return db.Key.from_path(*barrier_record_path)", "docstring": "Converts a _BarrierIndex key to a _BarrierRecord key.\n\nArgs:\nbarrier_index_key: db.Key for a _BarrierIndex entity.\n\nReturns:\ndb.Key for the corresponding _BarrierRecord entity.", "source": "juraj-google-style"}
{"code": "def clear_tc(self, owner, data, clear_type):\n    batch = self.tcex.batch(owner, action='Delete')\n    tc_type = data.get('type')\n    path = data.get('path')\n    if (tc_type in self.tcex.group_types):\n        name = self.tcex.playbook.read(data.get('name'))\n        name = self.path_data(name, path)\n        if (name is not None):\n            print('Deleting ThreatConnect Group: {}{}{}'.format(c.Style.BRIGHT, c.Fore.MAGENTA, name))\n            self.log.info('[{}] Deleting ThreatConnect {} with name: {}.'.format(clear_type, tc_type, name))\n            batch.group(tc_type, name)\n    elif (tc_type in self.tcex.indicator_types):\n        if (data.get('summary') is not None):\n            summary = self.tcex.playbook.read(data.get('summary'))\n        else:\n            resource = self.tcex.resource(tc_type)\n            summary = resource.summary(data)\n        summary = self.path_data(summary, path)\n        if (summary is not None):\n            print('Deleting ThreatConnect Indicator: {}{}{}'.format(c.Style.BRIGHT, c.Fore.MAGENTA, summary))\n            self.log.info('[{}] Deleting ThreatConnect {} with value: {}.'.format(clear_type, tc_type, summary))\n            batch.indicator(tc_type, summary)\n    batch_results = batch.submit()\n    self.log.debug('[{}] Batch Results: {}'.format(clear_type, batch_results))\n    for error in (batch_results.get('errors') or []):\n        self.log.error('[{}] Batch Error: {}'.format(clear_type, error))", "docstring": "Delete threat intel from ThreatConnect platform.\n\nArgs:\nowner (str): The ThreatConnect owner.\ndata (dict): The data for the threat intel to clear.\nclear_type (str): The type of clear action.", "source": "codesearchnet"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        local_stream = BytearrayStream()\n\n        if self._unique_identifier:\n            self._unique_identifier.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        else:\n            raise ValueError(\n                \"Invalid struct missing the unique identifier attribute.\"\n            )\n\n        if self._cryptographic_parameters:\n            self._cryptographic_parameters.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        self.length = local_stream.length()\n        super(MACSignatureKeyInformation, self).write(\n            output_stream,\n            kmip_version=kmip_version\n        )\n        output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the MACSignatureKeyInformation struct to a\nstream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def _get_element_by_names(source, names):\n    \n\n    if source is None:\n        return source\n\n    else:\n        if names:\n            head, *rest = names\n            if isinstance(source, dict) and head in source:\n                return _get_element_by_names(source[head], rest)\n            elif isinstance(source, list) and head.isdigit():\n                return _get_element_by_names(source[int(head)], rest)\n            elif not names[0]:\n                pass\n            else:\n                source = None\n        return source", "docstring": "Given a dict and path '/' or '.' separated. Digs into de dict to retrieve\nthe specified element.\n\nArgs:\nsource (dict): set of nested objects in which the data will be searched\npath (list): list of attribute names", "source": "juraj-google-style"}
{"code": "def from_unknown_text(text, strict=False):\n    if text.startswith('+'):\n        crs = from_proj4(text, strict)\n    elif text.startswith(('PROJCS[', 'GEOGCS[')):\n        crs = from_unknown_wkt(text, strict)\n    elif text.startswith('EPSG:'):\n        crs = from_epsg_code(text.split(':')[1])\n    elif text.startswith('ESRI:'):\n        crs = from_esri_code(text.split(':')[1])\n    elif text.startswith('SR-ORG:'):\n        crs = from_sr_code(text.split(':')[1])\n    else:\n        raise FormatError('Could not auto-detect the type of crs format, make sure it is one of the supported formats')\n    return crs", "docstring": "Detect crs string format and parse into crs object with appropriate function.\n\nArguments:\n\n- *text*: The crs text representation of unknown type.\n- *strict* (optional): When True, the parser is strict about names having to match\nexactly with upper and lowercases. Default is not strict (False).\n\nReturns:\n\n- CRS object.", "source": "codesearchnet"}
{"code": "def from_structures(cls, structures, constant_lattice=True, **kwargs):\n        \n        frac_coords = [structure.frac_coords for structure in structures]\n        if constant_lattice:\n            lattice = structures[0].lattice.matrix\n        else:\n            lattice = [structure.lattice.matrix for structure in structures]\n        site_properties = [structure.site_properties for structure in structures]\n        return cls(lattice, structures[0].species, frac_coords, site_properties=site_properties,\n                   constant_lattice=constant_lattice, **kwargs)", "docstring": "Convenience constructor to obtain trajectory from a list of structures.\nNote: Assumes no atoms removed during simulation\n\nArgs:\nstructures (list): list of pymatgen Structure objects.\nconstant_lattice (bool): Whether the lattice changes during the simulation, such as in an NPT MD\nsimulation. True results in\n\nReturns:\n(Trajectory)", "source": "juraj-google-style"}
{"code": "def get(cls, ns, key):\n    return getattr(db, cls.__name__).find_one((ConfigItem.namespace_prefix == ns), (ConfigItem.key == key))", "docstring": "Fetch an item by namespace and key\n\nArgs:\nns (str): Namespace prefix\nkey (str): Item key\n\nReturns:\n:obj:`Configitem`: Returns config item object if found, else `None`", "source": "codesearchnet"}
{"code": "def search(self, trace_func: Callable[([List[LineSequence], float, float, float, bool], None)]=None) -> List[LineSequence]:\n\n    def search_trace(state: _STATE, temp: float, cost: float, probability: float, accepted: bool):\n        if trace_func:\n            (trace_seqs, _) = state\n            trace_func(trace_seqs, temp, cost, probability, accepted)\n    (seqs, _) = optimization.anneal_minimize(self._create_initial_solution(), self._quadratic_sum_cost, self._force_edges_active_move, self._rand.random_sample, trace_func=search_trace)\n    return seqs", "docstring": "Issues new linear sequence search.\n\nEach call to this method starts new search.\n\nArgs:\ntrace_func: Optional callable which will be called for each simulated\nannealing step with arguments: solution candidate (list of linear\nsequences on the chip), current temperature (float), candidate cost\n(float), probability of accepting candidate (float), and acceptance\ndecision (boolean).\n\nReturns:\nList of linear sequences on the chip found by this method.", "source": "codesearchnet"}
{"code": "def CheckCompletedBlocks(self, filename, error):\n    for obj in self.stack:\n        if isinstance(obj, _ClassInfo):\n            error(filename, obj.starting_linenum, 'build/class', 5, ('Failed to find complete declaration of class %s' % obj.name))\n        elif isinstance(obj, _NamespaceInfo):\n            error(filename, obj.starting_linenum, 'build/namespaces', 5, ('Failed to find complete declaration of namespace %s' % obj.name))", "docstring": "Checks that all classes and namespaces have been completely parsed.\n\nCall this when all lines in a file have been processed.\nArgs:\nfilename: The name of the current file.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def add_subassistants_to(cls, parser, assistant_tuple, level, alias=None):\n        \n        name = alias or assistant_tuple[0].name\n        p = parser.add_parser(name,\n                              description=assistant_tuple[0].description,\n                              argument_default=argparse.SUPPRESS)\n        for arg in assistant_tuple[0].args:\n            arg.add_argument_to(p)\n\n        if len(assistant_tuple[1]) > 0:\n            subparsers = cls._add_subparsers_required(p,\n                dest=settings.SUBASSISTANT_N_STRING.format(level),\n                title=cls.subparsers_str,\n                description=cls.subparsers_desc)\n            for subas_tuple in sorted(assistant_tuple[1], key=lambda x: x[0].name):\n                cls.add_subassistants_to(subparsers, subas_tuple, level + 1)\n        elif level == 1:\n            subparsers = cls._add_subparsers_required(p,\n                dest=settings.SUBASSISTANT_N_STRING.format(level),\n                title=cls.subparsers_str,\n                description=devassistant_argparse.ArgumentParser.no_assistants_msg)", "docstring": "Adds assistant from given part of assistant tree and all its subassistants to\na given argument parser.\n\nArgs:\nparser: instance of devassistant_argparse.ArgumentParser\nassistant_tuple: part of assistant tree (see generate_argument_parser doc)\nlevel: level of subassistants that given assistant is at", "source": "juraj-google-style"}
{"code": "def nac_v(msg):\n    tc = typecode(msg)\n    if (tc != 19):\n        raise RuntimeError(('%s: Not an airborne velocity message, expecting TC = 19' % msg))\n    msgbin = common.hex2bin(msg)\n    NACv = common.bin2int(msgbin[42:45])\n    try:\n        HFOMr = uncertainty.NACv[NACv]['HFOMr']\n        VFOMr = uncertainty.NACv[NACv]['VFOMr']\n    except KeyError:\n        (HFOMr, VFOMr) = (uncertainty.NA, uncertainty.NA)\n    return (HFOMr, VFOMr)", "docstring": "Calculate NACv, Navigation Accuracy Category - Velocity\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string, TC = 19\n\nReturns:\nint or string: 95% horizontal accuracy bounds for velocity, Horizontal Figure of Merit\nint or string: 95% vertical accuracy bounds for velocity, Vertical Figure of Merit", "source": "codesearchnet"}
{"code": "def metta_config(quarter, num_dimensions):\n    (first_day, last_day) = quarter_boundaries(quarter)\n    return {'start_time': first_day, 'end_time': last_day, 'prediction_window': 3, 'label_name': 'onet_soc_code', 'label_type': 'categorical', 'matrix_id': 'job_postings_{}'.format(quarter), 'feature_names': ['doc2vec_{}'.format(i) for i in range(num_dimensions)]}", "docstring": "Returns metta metadata for a quarter's SOC code classifier matrix\n\nArgs:\nquarter (str) quarter, in format '2015Q1'\nnum_dimensions (int) Number of features in matrix\n\nReturns: (dict) metadata suitable for metta.archive_train_test", "source": "codesearchnet"}
{"code": "def find_library_windows(cls):\n    dll = (cls.get_appropriate_windows_sdk_name() + '.dll')\n    root = 'C:\\\\'\n    for d in os.listdir(root):\n        dir_path = os.path.join(root, d)\n        if (d.startswith('Program Files') and os.path.isdir(dir_path)):\n            dir_path = os.path.join(dir_path, 'SEGGER')\n            if (not os.path.isdir(dir_path)):\n                continue\n            ds = filter((lambda x: x.startswith('JLink')), os.listdir(dir_path))\n            for jlink_dir in ds:\n                lib_path = os.path.join(dir_path, jlink_dir, dll)\n                if os.path.isfile(lib_path):\n                    (yield lib_path)", "docstring": "Loads the SEGGER DLL from the windows installation directory.\n\nOn Windows, these are found either under:\n- ``C:\\\\Program Files\\\\SEGGER\\\\JLink``\n- ``C:\\\\Program Files (x86)\\\\SEGGER\\\\JLink``.\n\nArgs:\ncls (Library): the ``Library`` class\n\nReturns:\nThe paths to the J-Link library files in the order that they are\nfound.", "source": "codesearchnet"}
{"code": "def values(self, column_major=False):\n    if column_major:\n        return list(map(list, zip(*self._values)))\n    return [row[:] for row in self._values]", "docstring": "Return a nested list with the worksheet values.\n\nArgs:\ncolumn_major (bool): as list of columns (default list of rows)\nReturns:\nlist: list of lists with values", "source": "codesearchnet"}
{"code": "def _notify_mutated(self, obj, old, hint=None):\n    value = self.__get__(obj, obj.__class__)\n    value = self.property.prepare_value(obj, self.name, value)\n    self._real_set(obj, old, value, hint=hint)", "docstring": "A method to call when a container is mutated \"behind our back\"\nand we detect it with our |PropertyContainer| wrappers.\n\nArgs:\nobj (HasProps) :\nThe object who's container value was mutated\n\nold (object) :\nThe \"old\" value of the container\n\nIn this case, somewhat weirdly, ``old`` is a copy and the\nnew value should already be set unless we change it due to\nvalidation.\n\nhint (event hint or None, optional)\nAn optional update event hint, e.g. ``ColumnStreamedEvent``\n(default: None)\n\nUpdate event hints are usually used at times when better\nupdate performance can be obtained by special-casing in\nsome way (e.g. streaming or patching column data sources)\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _sym_missing(self) -> Dict[str, Any]:\n    missing = dict()\n    for k, v in self._sym_attributes.items():\n        if pg_typing.MISSING_VALUE != v and isinstance(v, base.Symbolic):\n            missing_child = v.sym_missing(flatten=False)\n            if missing_child:\n                missing[k] = missing_child\n    return missing", "docstring": "Returns missing values for Functor.\n\nSemantically unbound arguments are not missing, thus we only return partial\nbound arguments in `sym_missing`. As a result, a functor is partial only\nwhen any of its bound arguments is partial.\n\nReturns:\nA dict of missing key (or path) to missing value.", "source": "github-repos"}
{"code": "def code_string_to_enum_value_descriptor(code_string: str, enum_descriptor: descriptor.EnumDescriptor) -> descriptor.EnumValueDescriptor:\n    value_descriptor = _get_enum_value_descriptor_memo(enum_descriptor, code_string)\n    if value_descriptor is not None:\n        return value_descriptor\n    fhir_case_code_string = code_string.upper().replace('-', '_')\n    value_descriptor = enum_descriptor.values_by_name.get(fhir_case_code_string)\n    if value_descriptor is not None:\n        _set_enum_value_descriptor_memo(enum_descriptor, code_string, value_descriptor)\n        return value_descriptor\n    for value_descriptor in enum_descriptor.values:\n        if value_descriptor.GetOptions().HasExtension(annotations_pb2.fhir_original_code) and value_descriptor.GetOptions().Extensions[annotations_pb2.fhir_original_code] == code_string:\n            _set_enum_value_descriptor_memo(enum_descriptor, code_string, value_descriptor)\n            return value_descriptor\n    raise fhir_errors.InvalidFhirError(f'Failed to convert {code_string!r} to {enum_descriptor.full_name}. No matching enum found.')", "docstring": "Returns an EnumValueDescriptor for a provided EnumDescriptor and raw code.\n\nArgs:\ncode_string: A raw string representation of the code to retrieve.\nenum_descriptor: The EnumDescriptor the desired EnumValueDescriptor belongs\nto.\n\nReturns:\nAn instance of EnumValueDescriptor that the code_string represents.\n\nRaises:\nfhir_errors.InvalidFhirError: In the event that a conversion from\ncode_string was unsuccessful.", "source": "github-repos"}
{"code": "def _prep_noise_interpolants(self):\n    noise_lists = {}\n    self.noise_interpolants = {}\n    if isinstance(self.sensitivity_curves, str):\n        self.sensitivity_curves = [self.sensitivity_curves]\n    if isinstance(self.noise_type_in, list):\n        if (len(self.noise_type_in) != len(self.sensitivity_curves)):\n            raise ValueError((('noise_type_in must have same shape as sensitivity_curves if it is' + 'provided as a list.') + 'If all curves are of the same type, provide a string.'))\n    else:\n        assert isinstance(self.noise_type_in, str)\n        self.noise_type_in = [self.noise_type_in for _ in self.sensitivity_curves]\n    if isinstance(self.signal_type, str):\n        self.signal_type = [self.signal_type]\n    for (num, sc) in enumerate(self.sensitivity_curves):\n        if isinstance(sc, str):\n            (f, h_n) = read_noise_curve(sc, noise_type_in=self.noise_type_in[num], noise_type_out='char_strain')\n            if (sc[(- 4):] == '.txt'):\n                key = sc.split('.')[0].split('/')[(- 1)]\n            else:\n                key = sc\n        elif isinstance(sc, list):\n            (f, h_n) = sc\n            key = str(num)\n        else:\n            raise ValueError(('Sensitivity curves must either be string' + 'or list containing f_n and asd_n.'))\n        noise_lists[key] = [f, h_n]\n    if (str(self.add_wd_noise).lower() in ['true', 'both', 'yes']):\n        if isinstance(self.wd_noise, str):\n            (f_n_wd, h_n_wd) = read_noise_curve(self.wd_noise, noise_type_in=self.wd_noise_type_in, noise_type_out='char_strain')\n        elif isinstance(self, wd_noise, list):\n            (f_n_wd, h_n_wd) = self.wd_noise\n        trans_dict = {}\n        for sc in noise_lists.keys():\n            (f_n, h_n) = noise_lists[sc]\n            if (self.add_wd_noise.lower() == 'both'):\n                trans_dict[sc] = [f_n, h_n]\n            (f_n, h_n) = combine_with_wd_noise(f_n, h_n, f_n_wd, h_n_wd)\n            trans_dict[(sc + '_wd')] = [f_n, h_n]\n        noise_lists = trans_dict\n    for sc in noise_lists:\n        (f_n, h_n) = noise_lists[sc]\n        self.noise_interpolants[sc] = interpolate.interp1d(f_n, h_n, bounds_error=False, fill_value=1e+30)\n    return", "docstring": "Construct interpolated sensitivity curves\n\nThis will construct the interpolated sensitivity curves\nusing scipy.interpolate.interp1d. It will add wd noise\nif that is requested.\n\nRaises:\nValueError: ``len(noise_type_in) != len(sensitivity_curves)``\nValueError: Issue with sensitivity curve type provided.", "source": "codesearchnet"}
{"code": "def print_source(self, args, screen_info=None):\n    del screen_info\n    parsed = self._arg_parsers['print_source'].parse_args(args)\n    device_name_regex = re.compile(parsed.device_name_filter) if parsed.device_name_filter else None\n    profile_data = []\n    data_generator = self._get_profile_data_generator()\n    device_count = len(self._run_metadata.step_stats.dev_stats)\n    for index in range(device_count):\n        device_stats = self._run_metadata.step_stats.dev_stats[index]\n        if device_name_regex and (not device_name_regex.match(device_stats.device)):\n            continue\n        profile_data.extend(data_generator(device_stats))\n    source_annotation = source_utils.annotate_source_against_profile(profile_data, os.path.expanduser(parsed.source_file_path), node_name_filter=parsed.node_name_filter, op_type_filter=parsed.op_type_filter)\n    if not source_annotation:\n        return debugger_cli_common.RichTextLines(['The source file %s does not contain any profile information for the previous Session run under the following filters:' % parsed.source_file_path, '  --%s: %s' % (_DEVICE_NAME_FILTER_FLAG, parsed.device_name_filter), '  --%s: %s' % (_NODE_NAME_FILTER_FLAG, parsed.node_name_filter), '  --%s: %s' % (_OP_TYPE_FILTER_FLAG, parsed.op_type_filter)])\n    max_total_cost = 0\n    for line_index in source_annotation:\n        total_cost = self._get_total_cost(source_annotation[line_index], parsed.cost_type)\n        max_total_cost = max(max_total_cost, total_cost)\n    source_lines, line_num_width = source_utils.load_source(parsed.source_file_path)\n    cost_bar_max_length = 10\n    total_cost_head = parsed.cost_type\n    column_widths = {'cost_bar': cost_bar_max_length + 3, 'total_cost': len(total_cost_head) + 3, 'num_nodes_execs': len(self._NUM_EXECS_SUB_HEAD) + 1, 'line_number': line_num_width}\n    head = RL(' ' * column_widths['cost_bar'] + total_cost_head + ' ' * (column_widths['total_cost'] - len(total_cost_head)) + self._NUM_NODES_HEAD + ' ' * (column_widths['num_nodes_execs'] - len(self._NUM_NODES_HEAD)), font_attr=self._LINE_COST_ATTR)\n    head += RL(self._LINENO_HEAD, font_attr=self._LINE_NUM_ATTR)\n    sub_head = RL(' ' * (column_widths['cost_bar'] + column_widths['total_cost']) + self._NUM_EXECS_SUB_HEAD + ' ' * (column_widths['num_nodes_execs'] - len(self._NUM_EXECS_SUB_HEAD)) + ' ' * column_widths['line_number'], font_attr=self._LINE_COST_ATTR)\n    sub_head += RL(self._SOURCE_HEAD, font_attr='bold')\n    lines = [head, sub_head]\n    output_annotations = {}\n    for i, line in enumerate(source_lines):\n        lineno = i + 1\n        if lineno in source_annotation:\n            annotation = source_annotation[lineno]\n            cost_bar = self._render_normalized_cost_bar(self._get_total_cost(annotation, parsed.cost_type), max_total_cost, cost_bar_max_length)\n            annotated_line = cost_bar\n            annotated_line += ' ' * (column_widths['cost_bar'] - len(cost_bar))\n            total_cost = RL(cli_shared.time_to_readable_str(self._get_total_cost(annotation, parsed.cost_type), force_time_unit=parsed.time_unit), font_attr=self._LINE_COST_ATTR)\n            total_cost += ' ' * (column_widths['total_cost'] - len(total_cost))\n            annotated_line += total_cost\n            file_path_filter = re.escape(parsed.source_file_path) + '$'\n            command = 'lp --file_path_filter %s --min_lineno %d --max_lineno %d' % (file_path_filter, lineno, lineno + 1)\n            if parsed.device_name_filter:\n                command += ' --%s %s' % (_DEVICE_NAME_FILTER_FLAG, parsed.device_name_filter)\n            if parsed.node_name_filter:\n                command += ' --%s %s' % (_NODE_NAME_FILTER_FLAG, parsed.node_name_filter)\n            if parsed.op_type_filter:\n                command += ' --%s %s' % (_OP_TYPE_FILTER_FLAG, parsed.op_type_filter)\n            menu_item = debugger_cli_common.MenuItem(None, command)\n            num_nodes_execs = RL('%d(%d)' % (annotation.node_count, annotation.node_exec_count), font_attr=[self._LINE_COST_ATTR, menu_item])\n            num_nodes_execs += ' ' * (column_widths['num_nodes_execs'] - len(num_nodes_execs))\n            annotated_line += num_nodes_execs\n        else:\n            annotated_line = RL(' ' * sum((column_widths[col_name] for col_name in column_widths if col_name != 'line_number')))\n        line_num_column = RL(' L%d' % lineno, self._LINE_NUM_ATTR)\n        line_num_column += ' ' * (column_widths['line_number'] - len(line_num_column))\n        annotated_line += line_num_column\n        annotated_line += line\n        lines.append(annotated_line)\n        if parsed.init_line == lineno:\n            output_annotations[debugger_cli_common.INIT_SCROLL_POS_KEY] = len(lines) - 1\n    return debugger_cli_common.rich_text_lines_from_rich_line_list(lines, annotations=output_annotations)", "docstring": "Print a Python source file with line-level profile information.\n\nArgs:\nargs: Command-line arguments, excluding the command prefix, as a list of\nstr.\nscreen_info: Optional dict input containing screen information such as\ncols.\n\nReturns:\nOutput text lines as a RichTextLines object.", "source": "github-repos"}
{"code": "def is_active(self, node):\n    \n    \n    \n    if (isinstance(node.value, gast.Call) and\n        anno.getanno(node.value, 'func', False) == utils.pop):\n      return True\n    for succ in gast.walk(node.value):\n      if (isinstance(succ, gast.Name) and isinstance(succ.ctx, gast.Load) and\n          succ.id in self.active_variables):\n        return True\n    return False", "docstring": "Checks whether a statement is active.\n\nAn assignment is active when its right hand side contains active\nvariables.\n\nArgs:\nnode: an instance of gast.Assign\n\nReturns:\nWhether the statement is active.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.ListContexts = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.Contexts/ListContexts',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.ListContextsRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.ListContextsResponse.FromString,\n        )\n    self.GetContext = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.Contexts/GetContext',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.GetContextRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.Context.FromString,\n        )\n    self.CreateContext = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.Contexts/CreateContext',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.CreateContextRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.Context.FromString,\n        )\n    self.UpdateContext = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.Contexts/UpdateContext',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.UpdateContextRequest.SerializeToString,\n        response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.Context.FromString,\n        )\n    self.DeleteContext = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.Contexts/DeleteContext',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.DeleteContextRequest.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n    self.DeleteAllContexts = channel.unary_unary(\n        '/google.cloud.dialogflow.v2beta1.Contexts/DeleteAllContexts',\n        request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_context__pb2.DeleteAllContextsRequest.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def ReadSerializableArray(self, class_name, max=sys.maxsize):\n        \n        module = '.'.join(class_name.split('.')[:-1])\n        klassname = class_name.split('.')[-1]\n        klass = getattr(importlib.import_module(module), klassname)\n        length = self.ReadVarInt(max=max)\n        items = []\n        \n        try:\n            for i in range(0, length):\n                item = klass()\n                item.Deserialize(self)\n                \n                items.append(item)\n        except Exception as e:\n            logger.error(\"Couldn't deserialize %s \" % e)\n\n        return items", "docstring": "Deserialize a stream into the object specific by `class_name`.\n\nArgs:\nclass_name (str): a full path to the class to be deserialized into. e.g. 'neo.Core.Block.Block'\nmax (int): (Optional) maximum number of bytes to read.\n\nReturns:\nlist: list of `class_name` objects deserialized from the stream.", "source": "juraj-google-style"}
{"code": "def DetermineType(value):\n  \n\n  object_type = type(value)\n  if not hasattr(object_type, '__name__'):\n    return None\n\n  type_string = getattr(object_type, '__module__', '')\n  if type_string:\n    type_string += '.'\n\n  type_string += object_type.__name__\n  return type_string", "docstring": "Determines the type of val, returning a \"full path\" string.\n\nFor example:\nDetermineType(5) -> __builtin__.int\nDetermineType(Foo()) -> com.google.bar.Foo\n\nArgs:\nvalue: Any value, the value is irrelevant as only the type metadata\nis checked\n\nReturns:\nType path string.  None if type cannot be determined.", "source": "juraj-google-style"}
{"code": "def SetServerInformation(self, server, port):\n    \n    self._host = server\n    self._port = port\n    logger.debug('Elasticsearch server: {0!s} port: {1:d}'.format(\n        server, port))", "docstring": "Set the server information.\n\nArgs:\nserver (str): IP address or hostname of the server.\nport (int): Port number of the server.", "source": "juraj-google-style"}
{"code": "def google_maps_geoloc_link(data):\n    \n    if isinstance(data, str):\n        lat_lon = ip_geoloc(data)\n        if lat_lon is None:\n            return ''\n        lat, lon = lat_lon\n    else:\n        lat, lon = data\n    loc = '%s,%s' % (lat, lon)\n    return 'https:\n           'data=!3m1!4b1!4m5!3m4!1s0x0:0x0!8m2!3d%s!4d%s' % (\n            loc, lat, lon)", "docstring": "Get a link to google maps pointing on this IP's geolocation.\n\nArgs:\ndata (str/tuple): IP address or (latitude, longitude).\n\nReturns:\nstr: a link to google maps pointing on this IP's geolocation.", "source": "juraj-google-style"}
{"code": "def limit(self, count):\n    query = query_mod.Query(self)\n    return query.limit(count)", "docstring": "Create a limited query with this collection as parent.\n\nSee\n:meth:`~.firestore_v1beta1.query.Query.limit` for\nmore information on this method.\n\nArgs:\ncount (int): Maximum number of documents to return that match\nthe query.\n\nReturns:\n~.firestore_v1beta1.query.Query: A limited query.", "source": "codesearchnet"}
{"code": "def upload(self, resource_id, data):\n    self.body = data\n    self.content_type = 'application/octet-stream'\n    self.resource_id(str(resource_id))\n    self._request_uri = '{}/upload'.format(self._request_uri)", "docstring": "Update the request URI to upload the a document to this resource.\n\nArgs:\nresource_id (integer): The group id.\ndata (any): The raw data to upload.", "source": "codesearchnet"}
{"code": "def get_dataset(self, dsid, dsinfo):\n    data = self[dsinfo.get('file_key', dsid.name)]\n    data.attrs.update(dsinfo)\n    data.attrs['platform_name'] = self['/attr/satellite_name']\n    data.attrs['sensor'] = self['/attr/instrument_name']\n    return data", "docstring": "Get dataset function\n\nArgs:\ndsid: Dataset ID\nparam2: Dataset Information\n\nReturns:\nDask DataArray: Data", "source": "codesearchnet"}
{"code": "def from_base_10_int(decimal, output_base=10):\n    \n    if decimal <= 0:\n        return (0,)\n    if output_base == 1:\n        return (1,) * decimal\n    length = digits(decimal, output_base)\n    converted = tuple(digit(decimal, i, output_base) for i in range(length))\n    return converted[::-1]", "docstring": "Converts a decimal integer to a specific base.\n\nArgs:\ndecimal(int) A base 10 number.\noutput_base(int) base to convert to.\n\nReturns:\nA tuple of digits in the specified base.\n\nExamples:\n>>> from_base_10_int(255)\n(2, 5, 5)\n>>> from_base_10_int(255, 16)\n(15, 15)\n>>> from_base_10_int(9988664439, 8)\n(1, 1, 2, 3, 2, 7, 5, 6, 6, 1, 6, 7)\n>>> from_base_10_int(0, 17)\n(0,)", "source": "juraj-google-style"}
{"code": "def get_ogr_driver(filepath):\n    (filename, file_extension) = os.path.splitext(filepath)\n    EXTENSION = file_extension[1:]\n    ogr_driver_count = ogr.GetDriverCount()\n    for idx in range(ogr_driver_count):\n        driver = ogr.GetDriver(idx)\n        driver_extension = (driver.GetMetadataItem(str('DMD_EXTENSION')) or '')\n        driver_extensions = (driver.GetMetadataItem(str('DMD_EXTENSIONS')) or '')\n        if ((EXTENSION == driver_extension) or (EXTENSION in driver_extensions)):\n            return driver\n    else:\n        msg = 'No driver found for the following file extension: {}'.format(EXTENSION)\n        raise ValueError(msg)", "docstring": "Get the OGR driver from the provided file extension.\n\nArgs:\nfile_extension (str): file extension\n\nReturns:\nosgeo.ogr.Driver\n\nRaises:\nValueError: no driver is found", "source": "codesearchnet"}
{"code": "def generate_exact(self, model, vcpu_num, host_cpu):\n    nested = {'Intel': 'vmx', 'AMD': 'svm'}\n    cpu = ET.Element('cpu', match='exact')\n    ET.SubElement(cpu, 'model').text = model\n    cpu.append(self.generate_topology(vcpu_num))\n    vendor = host_cpu.findtext('vendor')\n    if (not nested.get(vendor)):\n        LOGGER.debug('Unknown vendor: {0}, did not configure nested virtualization cpu flag on guest.'.format(vendor))\n        return cpu\n    model_vendor = LibvirtCPU.get_cpu_vendor(family=model)\n    if (vendor != model_vendor):\n        LOGGER.debug('Not enabling nested virtualization feature, host vendor is: {0}, guest vendor: {1}'.format(vendor, model_vendor))\n        return cpu\n    flag = nested[vendor]\n    if (host_cpu.find('feature/[@name=\"{0}\"]'.format(flag)) is not None):\n        cpu.append(self.generate_feature(name=flag))\n    else:\n        LOGGER.debug('missing {0} cpu flag on host, nested virtualization will probably not work.'.format(flag))\n    return cpu", "docstring": "Generate exact CPU model with nested virtualization CPU feature.\n\nArgs:\nmodel(str): libvirt supported CPU model\nvcpu_num(int): number of virtual cpus\nhost_cpu(lxml.etree.Element): the host CPU model\n\nReturns:\nlxml.etree.Element: CPU XML node", "source": "codesearchnet"}
{"code": "def guess_task_type(name, task_defn):\n    parts = name.split(':')\n    task_type = parts[(- 1)]\n    if (task_type == 'parent'):\n        if is_action(task_defn):\n            task_type = 'action'\n        else:\n            task_type = 'decision'\n    if (task_type not in get_valid_task_types()):\n        raise CoTError('Invalid task type for {}!'.format(name))\n    return task_type", "docstring": "Guess the task type of the task.\n\nArgs:\nname (str): the name of the task.\n\nReturns:\nstr: the task_type.\n\nRaises:\nCoTError: on invalid task_type.", "source": "codesearchnet"}
{"code": "def internal_convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None, as_ref=False):\n    if not isinstance(values, collections_abc.Iterable):\n        raise TypeError('Argument `values` must be iterable.')\n    ret = []\n    for i, value in enumerate(values):\n        if value is None:\n            ret.append(value)\n        else:\n            n = None if name is None else '%s_%d' % (name, i)\n            ret.append(internal_convert_to_tensor_or_indexed_slices(value, dtype=dtype, name=n, as_ref=as_ref))\n    return ret", "docstring": "Converts `values` to a list of `Tensor` or `IndexedSlices` objects.\n\nAny `IndexedSlices` or `SparseTensor` objects in `values` are returned\nunmodified.\n\nArgs:\nvalues: An iterable of `None`, `IndexedSlices`, `SparseTensor`, or objects\nthat can be consumed by `convert_to_tensor()`.\ndtype: (Optional.) The required `DType` of the returned `Tensor` or\n`IndexedSlices`.\nname: (Optional.) A name prefix to used when a new `Tensor` is created, in\nwhich case element `i` will be given the name `name + '_' + i`.\nas_ref: True if the caller wants the results as ref tensors.\n\nReturns:\nA list of `Tensor`, `IndexedSlices`, `SparseTensor` and/or `None` objects.\n\nRaises:\nTypeError: If no conversion function is registered for an element in\n`values`.\nRuntimeError: If a registered conversion function returns an invalid\nvalue.", "source": "github-repos"}
{"code": "def _decorate_block(self, start, end):\n        \n        color = self._get_scope_highlight_color()\n        draw_order = DRAW_ORDERS.get('codefolding')\n        d = TextDecoration(self.editor.document(), start_line=start,\n                           end_line=end+1, draw_order=draw_order)\n        d.set_background(color)\n        d.set_full_width(True, clear=False)\n        self.editor.decorations.add(d)\n        self._scope_decos.append(d)", "docstring": "Create a decoration and add it to the editor.\n\nArgs:\nstart (int) start line of the decoration\nend (int) end line of the decoration", "source": "juraj-google-style"}
{"code": "def prepare_for_send(self, full_url=False):\n    assert self.url\n    assert self.method\n    assert self.version\n    url_info = self.url_info\n    if ('Host' not in self.fields):\n        self.fields['Host'] = url_info.hostname_with_port\n    if (not full_url):\n        if url_info.query:\n            self.resource_path = '{0}?{1}'.format(url_info.path, url_info.query)\n        else:\n            self.resource_path = url_info.path\n    else:\n        self.resource_path = url_info.url", "docstring": "Modify the request to be suitable for HTTP server.\n\nArgs:\nfull_url (bool): Use full URL as the URI. By default, only\nthe path of the URL is given to the server.", "source": "codesearchnet"}
{"code": "def add_arguments(cls, parser):\n        \n\n        parser.add_argument(\n            '-t', '--title',\n            action='store',\n            nargs='?',\n            const='',\n            dest='title',\n            help=\"[issue] task/issue title.\",\n            )\n\n        parser.add_argument(\n            '-b', '--body',\n            action='store',\n            nargs='?',\n            const='',\n            dest='body',\n            help=\"[issue] task/issue body.\",\n            )\n\n        pass", "docstring": "Add arguments to the parser for collection in app.args.\n\nArgs:\nparser:\n`argparse.ArgumentParser`. Parser.\nArguments added here are server on\nself.args.", "source": "juraj-google-style"}
{"code": "def populate_ast_nsarg_orthologs(ast, species):\n    ortholog_namespace = 'EG'\n    if isinstance(ast, NSArg):\n        if re.match(ortholog_namespace, ast.canonical):\n            orthologs = bel.terms.orthologs.get_orthologs(ast.canonical, list(species.keys()))\n            for species_id in species:\n                if (species_id in orthologs):\n                    orthologs[species_id]['species_label'] = species[species_id]\n            ast.orthologs = copy.deepcopy(orthologs)\n    if hasattr(ast, 'args'):\n        for arg in ast.args:\n            populate_ast_nsarg_orthologs(arg, species)\n    return ast", "docstring": "Recursively collect NSArg orthologs for BEL AST\n\nThis requires bo.collect_nsarg_norms() to be run first so NSArg.canonical is available\n\nArgs:\nast: AST at recursive point in belobj\nspecies: dictionary of species ids vs labels for or", "source": "codesearchnet"}
{"code": "def draw_sunpath(self, hoys=None, origin=None, scale=1, sun_scale=1, annual=True, rem_night=True):\n    assert ladybug.isplus, '\"draw_sunpath\" method can only be used in the [+] libraries.'\n    hoys = (hoys or ())\n    origin = (origin or (0, 0, 0))\n    try:\n        origin = tuple(origin)\n    except TypeError as e:\n        try:\n            origin = (origin.X, origin.Y, origin.Z)\n        except AttributeError:\n            raise TypeError(str(e))\n    scale = (scale or 1)\n    sun_scale = (sun_scale or 1)\n    assert (annual or hoys), 'For daily sunpath you need to provide at least one hour.'\n    radius = (200 * scale)\n    base_curves = plus.base_curves(origin, radius, self.north_angle)\n    if annual:\n        asuns = self._analemma_suns()\n        analemma_curves = plus.analemma_curves(asuns, origin, radius)\n    else:\n        analemma_curves = ()\n    if hoys:\n        suns = tuple((self.calculate_sun_from_hoy(hour) for hour in hoys))\n    else:\n        suns = ()\n    if rem_night:\n        suns = tuple((sun for sun in suns if sun.is_during_day))\n    sun_geos = plus.sun_geometry(suns, origin, radius)\n    if annual:\n        dts = (DateTime(m, 21) for m in xrange(1, 13))\n    else:\n        dts = (sun.datetime for sun in suns)\n    dsuns = self._daily_suns(dts)\n    daily_curves = plus.daily_curves(dsuns, origin, radius)\n    SPGeo = namedtuple('SunpathGeo', ('compass_curves', 'analemma_curves', 'daily_curves', 'suns', 'sun_geos'))\n    return SPGeo(base_curves, analemma_curves, daily_curves, suns, sun_geos)", "docstring": "Create sunpath geometry. \\\nThis method should only be used from the + libraries.\n\nArgs:\nhoys: An optional list of hours of the year(default: None).\norigin: Sunpath origin(default: (0, 0, 0)).\nscale: Sunpath scale(default: 1).\nsun_scale: Scale for the sun spheres(default: 1).\nannual: Set to True to draw an annual sunpath.\nOtherwise a daily sunpath is drawn.\nrem_night: Remove suns which are under the horizon(night!).\nReturns:\nbase_curves: A collection of curves for base plot.\nanalemma_curves: A collection of analemma_curves.\ndaily_curves: A collection of daily_curves.\nsuns: A list of suns.", "source": "codesearchnet"}
{"code": "def offTagAdd(self, name, func):\n        \n        if '*' in name:\n            self.ontagaddglobs.rem(name, func)\n            return\n\n        cblist = self.ontagadds.get(name)\n        if cblist is None:\n            return\n        try:\n            cblist.remove(func)\n        except ValueError:\n            pass", "docstring": "Unregister a callback for tag addition.\n\nArgs:\nname (str): The name of the tag or tag glob.\nfunc (function): The callback func(node, tagname, tagval).", "source": "juraj-google-style"}
{"code": "def orthorhombic(a: float, b: float, c: float):\n        \n        return Lattice.from_parameters(a, b, c, 90, 90, 90)", "docstring": "Convenience constructor for an orthorhombic lattice.\n\nArgs:\na (float): *a* lattice parameter of the orthorhombic cell.\nb (float): *b* lattice parameter of the orthorhombic cell.\nc (float): *c* lattice parameter of the orthorhombic cell.\n\nReturns:\nOrthorhombic lattice of dimensions a x b x c.", "source": "juraj-google-style"}
{"code": "def apply_filter(self, structure_filter):\n\n    def test_transformed_structure(ts):\n        return structure_filter.test(ts.final_structure)\n    self.transformed_structures = list(filter(test_transformed_structure, self.transformed_structures))\n    for ts in self.transformed_structures:\n        ts.append_filter(structure_filter)", "docstring": "Applies a structure_filter to the list of TransformedStructures\nin the transmuter.\n\nArgs:\nstructure_filter: StructureFilter to apply.", "source": "codesearchnet"}
{"code": "def UnwrapPyTree(tree):\n    unwrapper = PyTreeUnwrapper()\n    unwrapper.Visit(tree)\n    llines = unwrapper.GetLogicalLines()\n    llines.sort(key=lambda x: x.lineno)\n    return llines", "docstring": "Create and return a list of logical lines from the given pytree.\n\nArguments:\ntree: the top-level pytree node to unwrap..\n\nReturns:\nA list of LogicalLine objects.", "source": "github-repos"}
{"code": "def as_operation(self, timer=datetime.utcnow):\n    now = timer()\n    op = sc_messages.Operation(endTime=timestamp.to_rfc3339(now), startTime=timestamp.to_rfc3339(now), importance=sc_messages.Operation.ImportanceValueValuesEnum.LOW)\n    if self.operation_id:\n        op.operationId = self.operation_id\n    if self.operation_name:\n        op.operationName = self.operation_name\n    if (self.api_key and self.api_key_valid):\n        op.consumerId = (u'api_key:' + self.api_key)\n    elif self.consumer_project_id:\n        op.consumerId = (u'project:' + self.consumer_project_id)\n    return op", "docstring": "Makes an ``Operation`` from this instance.\n\nReturns:\nan ``Operation``", "source": "codesearchnet"}
{"code": "def generate_rpn_proposals(boxes, scores, img_shape,\n                           pre_nms_topk, post_nms_topk=None):\n    \n    assert boxes.shape.ndims == 2, boxes.shape\n    if post_nms_topk is None:\n        post_nms_topk = pre_nms_topk\n\n    topk = tf.minimum(pre_nms_topk, tf.size(scores))\n    topk_scores, topk_indices = tf.nn.top_k(scores, k=topk, sorted=False)\n    topk_boxes = tf.gather(boxes, topk_indices)\n    topk_boxes = clip_boxes(topk_boxes, img_shape)\n\n    topk_boxes_x1y1x2y2 = tf.reshape(topk_boxes, (-1, 2, 2))\n    topk_boxes_x1y1, topk_boxes_x2y2 = tf.split(topk_boxes_x1y1x2y2, 2, axis=1)\n    \n    wbhb = tf.squeeze(topk_boxes_x2y2 - topk_boxes_x1y1, axis=1)\n    valid = tf.reduce_all(wbhb > cfg.RPN.MIN_SIZE, axis=1)  \n    topk_valid_boxes_x1y1x2y2 = tf.boolean_mask(topk_boxes_x1y1x2y2, valid)\n    topk_valid_scores = tf.boolean_mask(topk_scores, valid)\n\n    \n    topk_valid_boxes_y1x1y2x2 = tf.reshape(\n        tf.reverse(topk_valid_boxes_x1y1x2y2, axis=[2]),\n        (-1, 4), name='nms_input_boxes')\n    nms_indices = tf.image.non_max_suppression(\n        topk_valid_boxes_y1x1y2x2,\n        topk_valid_scores,\n        max_output_size=post_nms_topk,\n        iou_threshold=cfg.RPN.PROPOSAL_NMS_THRESH)\n\n    topk_valid_boxes = tf.reshape(topk_valid_boxes_x1y1x2y2, (-1, 4))\n    proposal_boxes = tf.gather(topk_valid_boxes, nms_indices)\n    proposal_scores = tf.gather(topk_valid_scores, nms_indices)\n    tf.sigmoid(proposal_scores, name='probs')  \n    return tf.stop_gradient(proposal_boxes, name='boxes'), tf.stop_gradient(proposal_scores, name='scores')", "docstring": "Sample RPN proposals by the following steps:\n1. Pick top k1 by scores\n2. NMS them\n3. Pick top k2 by scores. Default k2 == k1, i.e. does not filter the NMS output.\n\nArgs:\nboxes: nx4 float dtype, the proposal boxes. Decoded to floatbox already\nscores: n float, the logits\nimg_shape: [h, w]\npre_nms_topk, post_nms_topk (int): See above.\n\nReturns:\nboxes: kx4 float\nscores: k logits", "source": "juraj-google-style"}
{"code": "def masked_within_block_local_attention_1d(q, k, v, block_length=64, name=None):\n    with tf.variable_scope(name, default_name='within_local_attention_1d', values=[q, k, v]):\n        (batch, heads, length, depth_k) = common_layers.shape_list(q)\n        depth_v = common_layers.shape_list(v)[(- 1)]\n        if isinstance(block_length, tf.Tensor):\n            const = tf.contrib.util.constant_value(block_length)\n            if (const is not None):\n                block_length = int(const)\n        original_length = length\n        padding_size = tf.mod((- length), block_length)\n        length += padding_size\n        padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]]\n        q = tf.pad(q, padding)\n        k = tf.pad(k, padding)\n        v = tf.pad(v, padding)\n        num_blocks = tf.div(length, block_length)\n        q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k])\n        k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k])\n        v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v])\n        attention = tf.matmul(q, k, transpose_b=True)\n        attention += tf.reshape(attention_bias_lower_triangle(block_length), [1, 1, 1, block_length, block_length])\n        attention = tf.nn.softmax(attention)\n        output = tf.matmul(attention, v)\n        output = tf.reshape(output, [batch, heads, (- 1), depth_v])\n        output = tf.slice(output, [0, 0, 0, 0], [(- 1), (- 1), original_length, (- 1)])\n        output.set_shape([(None if isinstance(dim, tf.Tensor) else dim) for dim in (batch, heads, length, depth_v)])\n        return output", "docstring": "Attention to the source and a neighborhood to the left within a block.\n\nThe sequence is divided into blocks of length block_length. Attention for a\ngiven query position can only see memory positions less than or equal to the\nquery position in the corresponding block.\n\nArgs:\nq: a Tensor with shape [batch, heads, length, depth_k]\nk: a Tensor with shape [batch, heads, length, depth_k]\nv: a Tensor with shape [batch, heads, length, depth_v]\nblock_length: an integer\nname: an optional string\n\nReturns:\na Tensor of shape [batch, heads, length, depth_v]", "source": "codesearchnet"}
{"code": "def bdp_bds_cache(func, tickers, flds, **kwargs) -> ToQuery:\n    \n    cache_data = []\n    log_level = kwargs.get('log', logs.LOG_LEVEL)\n    logger = logs.get_logger(bdp_bds_cache, level=log_level)\n    kwargs['has_date'] = kwargs.pop('has_date', func == 'bds')\n    kwargs['cache'] = kwargs.get('cache', True)\n\n    tickers = utils.flatten(tickers)\n    flds = utils.flatten(flds)\n    loaded = pd.DataFrame(data=0, index=tickers, columns=flds)\n\n    for ticker, fld in product(tickers, flds):\n        data_file = storage.ref_file(\n            ticker=ticker, fld=fld, ext='pkl', **{\n                k: v for k, v in kwargs.items() if k not in EXC_COLS\n            }\n        )\n        if not files.exists(data_file): continue\n        logger.debug(f'reading from {data_file} ...')\n        cache_data.append(pd.read_pickle(data_file))\n        loaded.loc[ticker, fld] = 1\n\n    to_qry = loaded.where(loaded == 0)\\\n        .dropna(how='all', axis=1).dropna(how='all', axis=0)\n\n    return ToQuery(\n        tickers=to_qry.index.tolist(), flds=to_qry.columns.tolist(),\n        cached_data=cache_data\n    )", "docstring": "Find cached `BDP` / `BDS` queries\n\nArgs:\nfunc: function name - bdp or bds\ntickers: tickers\nflds: fields\n**kwargs: other kwargs\n\nReturns:\nToQuery(ticker, flds, kwargs)", "source": "juraj-google-style"}
{"code": "def convert(self, value):\n    if (self._type is str):\n        return str(value)\n    elif (self._type is int):\n        try:\n            return int(value)\n        except (UnicodeError, ValueError):\n            raise WorkflowArgumentError('Cannot convert {} to int'.format(value))\n    elif (self._type is float):\n        try:\n            return float(value)\n        except (UnicodeError, ValueError):\n            raise WorkflowArgumentError('Cannot convert {} to float'.format(value))\n    elif (self._type is bool):\n        if isinstance(value, bool):\n            return bool(value)\n        value = value.lower()\n        if (value in ('true', '1', 'yes', 'y')):\n            return True\n        elif (value in ('false', '0', 'no', 'n')):\n            return False\n        raise WorkflowArgumentError('Cannot convert {} to bool'.format(value))\n    else:\n        return value", "docstring": "Convert the specified value to the type of the option.\n\nArgs:\nvalue: The value that should be converted.\n\nReturns:\nThe value with the type given by the option.", "source": "codesearchnet"}
{"code": "def to_FIB(self, other):\n    if (not isinstance(other, GroundedFunctionNetwork)):\n        raise TypeError(f'Expected GroundedFunctionNetwork, but got {type(other)}')\n\n    def shortname(var):\n        return var[(var.find('::') + 2):var.rfind('_')]\n\n    def shortname_vars(graph, shortname):\n        return [v for v in graph.nodes() if (shortname in v)]\n    this_var_nodes = [shortname(n) for (n, d) in self.nodes(data=True) if (d['type'] == 'variable')]\n    other_var_nodes = [shortname(n) for (n, d) in other.nodes(data=True) if (d['type'] == 'variable')]\n    shared_vars = set(this_var_nodes).intersection(set(other_var_nodes))\n    full_shared_vars = {full_var for shared_var in shared_vars for full_var in shortname_vars(self, shared_var)}\n    return ForwardInfluenceBlanket(self, full_shared_vars)", "docstring": "Creates a ForwardInfluenceBlanket object representing the\nintersection of this model with the other input model.\n\nArgs:\nother: The GroundedFunctionNetwork object to compare this model to.\n\nReturns:\nA ForwardInfluenceBlanket object to use for model comparison.", "source": "codesearchnet"}
{"code": "def update_config(config):\n    \n\n    \n    update(bigchaindb.config, update_types(config, bigchaindb.config))\n    bigchaindb.config['CONFIGURED'] = True", "docstring": "Update bigchaindb.config with whatever is in the provided config dict,\nand then set bigchaindb.config['CONFIGURED'] = True\n\nArgs:\nconfig (dict): the config dict to read for changes\nto the default config", "source": "juraj-google-style"}
{"code": "def parse(self, argument):\n    \n    if isinstance(argument, self.enum_class):\n      return argument\n    if argument not in self.enum_class.__members__:\n      raise ValueError('value should be one of <%s>' %\n                       '|'.join(self.enum_class.__members__.keys()))\n    else:\n      return self.enum_class[argument]", "docstring": "Determines validity of argument and returns the correct element of enum.\n\nArgs:\nargument: str or Enum class member, the supplied flag value.\n\nReturns:\nThe first matching Enum class member in Enum class.\n\nRaises:\nValueError: Raised when argument didn't match anything in enum.", "source": "juraj-google-style"}
{"code": "def _update_field(self, uri, field):\n\t\t\n\t\t\n\t\tpayload = None\n\t\tif  type(field) is not StreakField:\n\t\t\treturn requests.codes.bad_request, None\n\n\t\tpayload = field.to_dict(rw = True)\n\t\n\t\t\n\t\t\n\t\t\n\t\ttry:\n\t\t\turi = '/'.join([\n\t\t\t\t\t\t\turi, \n\t\t\t\t\t\t\tfield.attributes['key']\n\t\t\t\t\t\t\t])\n\t\texcept KeyError:\n\t\t\treturn requests.codes.bad_request, None\n\t\n\t\tcode, data = self._req('post', uri , json.dumps(payload))\n\t\t\n\t\treturn code, data", "docstring": "Updates a field with the provided attributes.\nArgs:\nkey\treqiured identifier for the pipeline or box\nfield\t\t\tStreakField object\nkwargs\t\t\t{name, type} see StreakField for details\nreturn\t\t\t(status code, field dict)", "source": "juraj-google-style"}
{"code": "def expected_rs(n):\n  \n  front = (n - 0.5) / n\n  i = np.arange(1,n)\n  back = np.sum(np.sqrt((n - i) / i))\n  if n <= 340:\n    middle = math.gamma((n-1) * 0.5) / math.sqrt(math.pi) / math.gamma(n * 0.5)\n  else:\n    middle = 1.0 / math.sqrt(n * math.pi * 0.5)\n  return front * middle * back", "docstring": "Calculates the expected (R/S)_n for white noise for a given n.\n\nThis is used as a correction factor in the function hurst_rs. It uses the\nformula of Anis-Lloyd-Peters (see [h_3]_).\n\nArgs:\nn (int):\nthe value of n for which the expected (R/S)_n should be calculated\n\nReturns:\nfloat:\nexpected (R/S)_n for white noise", "source": "juraj-google-style"}
{"code": "def sample_variants(self, variants, sample_name, category='snv'):\n    LOG.info('Retrieving variants for subject : {0}'.format(sample_name))\n    has_allele = re.compile('1|2')\n    query = {'$and': [{'_id': {'$in': variants}}, {'category': category}, {'samples': {'$elemMatch': {'display_name': sample_name, 'genotype_call': {'$regex': has_allele}}}}]}\n    result = self.variant_collection.find(query)\n    return result", "docstring": "Given a list of variants get variant objects found in a specific patient\n\nArgs:\nvariants(list): a list of variant ids\nsample_name(str): a sample display name\ncategory(str): 'snv', 'sv' ..\n\nReturns:\nresult(iterable(Variant))", "source": "codesearchnet"}
{"code": "def AddrStrToScriptHash(address):\n    data = b58decode(address)\n    if (len(data) != 25):\n        raise ValueError('Not correct Address, wrong length.')\n    if (data[0] != settings.ADDRESS_VERSION):\n        raise ValueError('Not correct Coin Version')\n    checksum = Crypto.Default().Hash256(data[:21])[:4]\n    if (checksum != data[21:]):\n        raise Exception('Address format error')\n    return UInt160(data=data[1:21])", "docstring": "Convert a public address to a script hash.\n\nArgs:\naddress (str): base 58 check encoded public address.\n\nRaises:\nValueError: if the address length of address version is incorrect.\nException: if the address checksum fails.\n\nReturns:\nUInt160:", "source": "codesearchnet"}
{"code": "def query(self, s):\n    s1 = np.sort([self.order[token] for token in s if (token in self.order)])\n    logging.debug('{} original tokens and {} tokens after applying frequency order.'.format(len(s), len(s1)))\n    prefix = self._get_prefix(s1)\n    candidates = set([i for (p1, token) in enumerate(prefix) for (i, p2) in self.index[token] if self.position_filter_func(s1, self.sets[i], p1, p2, self.similarity_threshold)])\n    logging.debug('{} candidates found.'.format(len(candidates)))\n    results = deque([])\n    for i in candidates:\n        s2 = self.sets[i]\n        sim = self.similarity_func(s1, s2)\n        if (sim < self.similarity_threshold):\n            continue\n        results.append((i, sim))\n    logging.debug('{} verified sets found.'.format(len(results)))\n    return list(results)", "docstring": "Query the search index for sets similar to the query set.\n\nArgs:\ns (Iterable): the query set.\n\nReturns (list): a list of tuples `(index, similarity)` where the index\nis the index of the matching sets in the original list of sets.", "source": "codesearchnet"}
{"code": "def inv(a):\n    amean = gvar.mean(a)\n    if ((amean.ndim != 2) or (amean.shape[0] != amean.shape[1])):\n        raise ValueError(('bad matrix shape: ' + str(a.shape)))\n    da = (a - amean)\n    ainv = numpy.linalg.inv(amean)\n    return (ainv - ainv.dot(da.dot(ainv)))", "docstring": "Inverse of matrix ``a``.\n\nArgs:\na: Two-dimensional, square matrix/array of numbers\nand/or :class:`gvar.GVar`\\s.\n\nReturns:\nThe inverse of matrix ``a``.\n\nRaises:\nValueError: If matrix is not square and two-dimensional.", "source": "codesearchnet"}
{"code": "def mimic_adam_with_adafactor(hparams):\n  \n  assert \"adam\" in hparams.optimizer\n  hparams.optimizer = \"adafactor\"\n  hparams.optimizer_adafactor_beta1 = hparams.optimizer_adam_beta1\n  hparams.optimizer_adafactor_beta2 = hparams.optimizer_adam_beta2\n  hparams.optimizer_adafactor_multiply_by_parameter_scale = False\n  hparams.optimizer_adafactor_factored = False\n  hparams.optimizer_adafactor_clipping_threshold = None\n  hparams.optimizer_adafactor_decay_type = \"adam\"", "docstring": "Switch from Adam to Adafactor, approximating the behavior of Adam.\n\nSome minor things may be different, like epsilon and beta1 correction.\n\nArgs:\nhparams: model hyperparameters where \"adam\" in hparams.optimizer", "source": "juraj-google-style"}
{"code": "def __init__(self, artifacts_registry, knowledge_base):\n    \n    super(ArtifactDefinitionsFilterHelper, self).__init__()\n    self._artifacts_registry = artifacts_registry\n    self._knowledge_base = knowledge_base\n\n    self.file_system_artifact_names = set()\n    self.file_system_find_specs = []\n    self.registry_artifact_names = set()\n    self.registry_find_specs = []", "docstring": "Initializes an artifact definitions filter helper.\n\nArgs:\nartifacts_registry (artifacts.ArtifactDefinitionsRegistry): artifact\ndefinitions registry.\nknowledge_base (KnowledgeBase): contains information from the source\ndata needed for filtering.", "source": "juraj-google-style"}
{"code": "def __init__(self, select2attrs=None, *args, **kwargs):\n        \n        self.select2attrs = select2attrs or {}\n        assert_msg = \"select2attrs attribute must be dict, not {}\"\n        assert isinstance(self.select2attrs, dict), assert_msg.format(\n                self.select2attrs.__class__.__name__\n        )\n        if 'width' not in self.select2attrs:\n            self.select2attrs.update({'width': '250px'})\n        super(Select2Mixin, self).__init__(*args, **kwargs)", "docstring": "Initialize default select2 attributes.\n\nIf width is not provided, sets Select2 width to 250px.\n\nArgs:\nselect2attrs: a dictionary, which then passed to\nSelect2 constructor function as options.", "source": "juraj-google-style"}
{"code": "def __init__(self, columns: list[str], name: Optional[str]=None):\n    self.name = name\n    super().__init__(columns)", "docstring": "Deduplicates each row (0th dimension) of the provided tensor.\n\nArgs:\ncolumns: A list of the columns to apply the transformation on.\nname: optional. A name for this operation.", "source": "github-repos"}
{"code": "def add_resource(self, feature_column, resource_name, resource):\n    self._cols_to_resources_map[feature_column][resource_name] = resource\n    if self._layer is not None and isinstance(resource, trackable.Trackable):\n        if feature_column.name not in self._layer._resources:\n            self._layer._resources[feature_column.name] = data_structures.Mapping()\n        if resource_name not in self._layer._resources[feature_column.name]:\n            self._layer._resources[feature_column.name][resource_name] = resource", "docstring": "Creates a new resource.\n\nResources can be things such as tables, variables, trackables, etc.\n\nArgs:\nfeature_column: A `FeatureColumn` object this resource corresponds to.\nresource_name: Name of the resource.\nresource: The resource.\n\nReturns:\nThe created resource.", "source": "github-repos"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    version_value = registry_key.GetValueByName('Version')\n    count_subkey = registry_key.GetSubkeyByName('Count')\n\n    if not version_value:\n      parser_mediator.ProduceExtractionWarning('missing version value')\n      return\n\n    if not version_value.DataIsInteger():\n      parser_mediator.ProduceExtractionWarning(\n          'unsupported version value data type')\n      return\n\n    format_version = version_value.GetDataAsObject()\n    if format_version not in (3, 5):\n      parser_mediator.ProduceExtractionWarning(\n          'unsupported format version: {0:d}'.format(format_version))\n      return\n\n    if not count_subkey:\n      parser_mediator.ProduceExtractionWarning('missing count subkey')\n      return\n\n    userassist_entry_index = 0\n\n    for registry_value in count_subkey.GetValues():\n      try:\n        \n        \n        value_name = codecs.decode(registry_value.name, 'rot-13')\n      except UnicodeEncodeError as exception:\n        logger.debug((\n            'Unable to decode UserAssist string: {0:s} with error: {1!s}.\\n'\n            'Attempting piecewise decoding.').format(\n                registry_value.name, exception))\n\n        characters = []\n        for char in registry_value.name:\n          if ord(char) < 128:\n            try:\n              characters.append(char.decode('rot-13'))\n            except UnicodeEncodeError:\n              characters.append(char)\n          else:\n            characters.append(char)\n\n        value_name = ''.join(characters)\n\n      if format_version == 5:\n        path_segments = value_name.split('\\\\')\n\n        for segment_index, path_segment in enumerate(path_segments):\n          \n          guid = path_segments[segment_index][1:-1]\n          path_segments[segment_index] = known_folder_ids.PATHS.get(\n              guid, path_segment)\n\n        value_name = '\\\\'.join(path_segments)\n        \n        if '%' in value_name:\n          \n          \n          environment_variables = self._knowledge_base.GetEnvironmentVariables()\n          value_name = path_helper.PathHelper.ExpandWindowsPath(\n              value_name, environment_variables)\n\n      if value_name == 'UEME_CTLSESSION':\n        continue\n\n      if format_version == 3:\n        entry_map = self._GetDataTypeMap('user_assist_entry_v3')\n      elif format_version == 5:\n        entry_map = self._GetDataTypeMap('user_assist_entry_v5')\n      else:\n        parser_mediator.ProduceExtractionWarning(\n            'unsupported format version: {0:d}'.format(format_version))\n        continue\n\n      if not registry_value.DataIsBinaryData():\n        parser_mediator.ProduceExtractionWarning(\n            'unsupported value data type: {0:s}'.format(\n                registry_value.data_type_string))\n        continue\n\n      entry_data_size = entry_map.GetByteSize()\n      value_data_size = len(registry_value.data)\n      if entry_data_size != value_data_size:\n        parser_mediator.ProduceExtractionWarning(\n            'unsupported value data size: {0:d}'.format(value_data_size))\n        continue\n\n      try:\n        user_assist_entry = self._ReadStructureFromByteStream(\n            registry_value.data, 0, entry_map)\n      except (ValueError, errors.ParseError) as exception:\n        parser_mediator.ProduceExtractionWarning(\n            'unable to parse UserAssist entry value with error: {0!s}'.format(\n                exception))\n        continue\n\n      event_data = UserAssistWindowsRegistryEventData()\n      event_data.key_path = count_subkey.path\n      event_data.number_of_executions = user_assist_entry.number_of_executions\n      event_data.value_name = value_name\n\n      if format_version == 3:\n        if event_data.number_of_executions > 5:\n          event_data.number_of_executions -= 5\n\n      elif format_version == 5:\n        userassist_entry_index += 1\n\n        event_data.application_focus_count = (\n            user_assist_entry.application_focus_count)\n        event_data.application_focus_duration = (\n            user_assist_entry.application_focus_duration)\n        event_data.entry_index = userassist_entry_index\n\n      timestamp = user_assist_entry.last_execution_time\n      if not timestamp:\n        date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n      else:\n        date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_LAST_RUN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def add_state(self, name: str, state: State, initial: bool=False):\n    if (not issubclass(state.__class__, State)):\n        raise AttributeError('state must be subclass of spade.behaviour.State')\n    self._states[name] = state\n    if initial:\n        self.current_state = name", "docstring": "Adds a new state to the FSM.\n\nArgs:\nname (str): the name of the state, which is used as its identifier.\nstate (spade.behaviour.State): The state class\ninitial (bool, optional): wether the state is the initial state or not. (Only one initial state is allowed) (Default value = False)", "source": "codesearchnet"}
{"code": "def plot_internal_energy(self, tmin, tmax, ntemp, ylim=None, **kwargs):\n        \n        temperatures = np.linspace(tmin, tmax, ntemp)\n\n        if self.structure:\n            ylabel = r\"$\\Delta E$ (kJ/mol)\"\n        else:\n            ylabel = r\"$\\Delta E$ (kJ/mol-c)\"\n\n        fig = self._plot_thermo(self.dos.internal_energy, temperatures, ylabel=ylabel, ylim=ylim,\n                                factor=1e-3, **kwargs)\n\n        return fig", "docstring": "Plots the vibrational internal energy in a temperature range.\n\nArgs:\ntmin: minimum temperature\ntmax: maximum temperature\nntemp: number of steps\nylim: tuple specifying the y-axis limits.\nkwargs: kwargs passed to the matplotlib function 'plot'.\nReturns:\nmatplotlib figure", "source": "juraj-google-style"}
{"code": "def _checkResponseRegisterAddress(payload, registeraddress):\n    \n    _checkString(payload, minlength=2, description='payload')\n    _checkRegisteraddress(registeraddress)\n\n    BYTERANGE_FOR_STARTADDRESS = slice(0, 2)\n\n    bytesForStartAddress = payload[BYTERANGE_FOR_STARTADDRESS]\n    receivedStartAddress = _twoByteStringToNum(bytesForStartAddress)\n\n    if receivedStartAddress != registeraddress:\n        raise ValueError('Wrong given write start adress: {0}, but commanded is {1}. The data payload is: {2!r}'.format( \\\n            receivedStartAddress, registeraddress, payload))", "docstring": "Check that the start adress as given in the response is correct.\n\nThe first two bytes in the payload holds the address value.\n\nArgs:\n* payload (string): The payload\n* registeraddress (int): The register address (use decimal numbers, not hex).\n\nRaises:\nTypeError, ValueError", "source": "juraj-google-style"}
{"code": "def create_graph_from_data(self, data):\n    self.arguments['{SCORE}'] = self.scores[self.score]\n    self.arguments['{VERBOSE}'] = str(self.verbose).upper()\n    results = self._run_gies(data, verbose=self.verbose)\n    return nx.relabel_nodes(nx.DiGraph(results), {idx: i for (idx, i) in enumerate(data.columns)})", "docstring": "Run the GIES algorithm.\n\nArgs:\ndata (pandas.DataFrame): DataFrame containing the data\n\nReturns:\nnetworkx.DiGraph: Solution given by the GIES algorithm.", "source": "codesearchnet"}
{"code": "def add_perm(self, subj_str, perm_str):\n        \n        self._assert_valid_permission(perm_str)\n        self._perm_dict.setdefault(perm_str, set()).add(subj_str)", "docstring": "Add a permission for a subject.\n\nArgs:\nsubj_str : str\nSubject for which to add permission(s)\n\nperm_str : str\nPermission to add. Implicitly adds all lower permissions. E.g., ``write``\nwill also add ``read``.", "source": "juraj-google-style"}
{"code": "def scan(self, folder, sub=None, next_=None):\n    if (not sub):\n        sub = ''\n    assert isinstance(sub, string_types)\n    assert (isinstance(next_, int) or (next_ is None))\n    return self.post('scan', params={'folder': folder, 'sub': sub, 'next': next_})", "docstring": "Request immediate rescan of a folder, or a specific path within a\nfolder.\n\nArgs:\nfolder (str): Folder ID.\nsub (str): Path relative to the folder root. If sub is omitted\nthe entire folder is scanned for changes, otherwise only\nthe given path children are scanned.\nnext_ (int): Delays Syncthing's automated rescan interval for\na given amount of seconds.\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def load_yaml(path):\n    with open(path, 'rt') as f:\n        yamldict = yaml.load(f.read(), Loader=yamlloader.ordereddict.CSafeLoader)\n    if (not yamldict):\n        raise LoadError(('YAML file: %s is empty!' % path))\n    return yamldict", "docstring": "Load YAML file into an ordered dictionary\n\nArgs:\npath (str): Path to YAML file\n\nReturns:\nOrderedDict: Ordered dictionary containing loaded YAML file", "source": "codesearchnet"}
{"code": "def create_slot(primary, val, name, colocate_with_primary=True, *, copy_xla_sharding=False):\n    validate_shape = val.get_shape().is_fully_defined()\n    if isinstance(primary, variables.Variable):\n        prefix = primary._shared_name\n    else:\n        prefix = primary.op.name\n    with variable_scope.variable_scope(None, prefix + '/' + name):\n        if colocate_with_primary:\n            distribution_strategy = distribute_lib.get_strategy()\n            with distribution_strategy.extended.colocate_vars_with(primary):\n                return _create_slot_var(primary, val, '', validate_shape, None, None, copy_xla_sharding=copy_xla_sharding)\n        else:\n            return _create_slot_var(primary, val, '', validate_shape, None, None, copy_xla_sharding=copy_xla_sharding)", "docstring": "Create a slot initialized to the given value.\n\nThe type of the slot is determined by the given value.\n\nArgs:\nprimary: The primary `Variable` or `Tensor`.\nval: A `Tensor` specifying the initial value of the slot.\nname: Name to use for the slot variable.\ncolocate_with_primary: Boolean.  If True the slot is located\non the same device as `primary`.\ncopy_xla_sharding: Boolean. If True also copies XLA sharding\nfrom primary.\n\nReturns:\nA `Variable` object.", "source": "github-repos"}
{"code": "def annotate(label, since, current, extra_message, custom_message=None):\n    warning_message = _WarningMessage(label=label, since=since, current=current, extra_message=extra_message, custom_message=custom_message)\n\n    def _annotate(fnc):\n        if inspect.isclass(fnc):\n            old_new = fnc.__new__\n\n            def wrapped_new(cls, *args, **kwargs):\n                warning_message.emit_warning(fnc.__name__)\n                if old_new is object.__new__:\n                    return old_new(cls)\n                return old_new(cls, *args, **kwargs)\n            fnc.__new__ = staticmethod(wrapped_new)\n            if label == 'deprecated':\n                fnc.__doc__ = _add_deprecation_notice_to_docstring(fnc.__doc__, warning_message.message.replace('%name%', fnc.__name__))\n            return fnc\n        else:\n\n            @wraps(fnc)\n            def inner(*args, **kwargs):\n                warning_message.emit_warning(fnc.__name__)\n                return fnc(*args, **kwargs)\n            if label == 'deprecated':\n                inner.__doc__ = _add_deprecation_notice_to_docstring(fnc.__doc__, warning_message.message.replace('%name%', fnc.__name__))\n            return inner\n    return _annotate", "docstring": "Decorates an API with a deprecated or experimental annotation.\n\nArgs:\nlabel: the kind of annotation ('deprecated' or 'experimental').\nsince: the version that causes the annotation.\ncurrent: the suggested replacement function.\nextra_message: an optional additional message.\ncustom_message: if the default message does not suffice, the message\ncan be changed using this argument. A string\nwhit replacement tokens.\nA replecement string is were the previus args will\nbe located on the custom message.\nThe following replacement strings can be used:\n%name% -> API.__name__\n%since% -> since (Mandatory for the decapreted annotation)\n%current% -> current\n%extra% -> extra_message\n\nReturns:\nThe decorator for the API.", "source": "github-repos"}
{"code": "def resources(self, absolute_url=None):\n        \n        if absolute_url:\n            return Resources(mode=\"server\", root_url=absolute_url + self._prefix, path_versioner=StaticHandler.append_version)\n        return Resources(mode=\"server\", root_url=self._prefix, path_versioner=StaticHandler.append_version)", "docstring": "Provide a :class:`~bokeh.resources.Resources` that specifies where\nBokeh application sessions should load BokehJS resources from.\n\nArgs:\nabsolute_url (bool):\nAn absolute URL prefix to use for locating resources. If None,\nrelative URLs are used (default: None)", "source": "juraj-google-style"}
{"code": "def __is_function_action(self, action_function):\n        \n        \n        is_function_action = True\n\n        if not hasattr(action_function, '__call__'):\n            return False\n\n        \n        try:\n            for end_string, context in action_function():\n                if not isinstance(end_string, basestring):\n                    self.log_error(\"Action function must return end of filename as a string as first argument\")\n                if not isinstance(context, dict):\n                    self.log_error(\"Action function must return context as a dict as second argument\")\n                break\n        except Exception:\n            is_function_action = False\n\n        return is_function_action", "docstring": "Detect if given function is really an action function.\n\nArgs:\naction_function: Function to test.\n\nNote:\nWe don't care if the variable refer to a function but rather if it is callable or not.", "source": "juraj-google-style"}
{"code": "def _get_status_code(self, http_status):\n    try:\n        return int(http_status.split(' ', 1)[0])\n    except TypeError:\n        _logger.warning('Unable to find status code in HTTP status %r.', http_status)\n    return 500", "docstring": "Get the HTTP status code from an HTTP status string.\n\nArgs:\nhttp_status: A string containing a HTTP status code and reason.\n\nReturns:\nAn integer with the status code number from http_status.", "source": "codesearchnet"}
{"code": "def __init__(self, project, query, checksum, timeout_secs=0):\n    if bigquery is None:\n        raise ImportError('Bigquery dependencies are not installed.')\n    if not query or not isinstance(query, str):\n        raise ValueError('Invalid argument: query. Please use non-empty string')\n    if not checksum or not isinstance(checksum, str):\n        raise ValueError('Invalid argument: checksum. Please use non-empty string')\n    self.project = project\n    self.query = query\n    self.expected_checksum = checksum\n    self.checksum = None\n    self.timeout_secs = timeout_secs", "docstring": "Initialize BigQueryMatcher object.\nArgs:\nproject: The name (string) of the project.\nquery: The query (string) to perform.\nchecksum: SHA-1 hash generated from a sorted list of lines\nread from expected output.\ntimeout_secs: Duration to retry query until checksum matches. This\nis useful for DF streaming pipelines or BQ streaming inserts. The\ndefault (0) never retries.", "source": "github-repos"}
{"code": "def reshape_data(tensor, per_example_length=1):\n    dims = [1, 0]\n    for i in xrange(2, tensor.get_shape().ndims):\n        dims.append(i)\n    return pt.wrap(tf.transpose(tensor, dims)).reshape([(- 1), per_example_length])", "docstring": "Reshapes input so that it is appropriate for sequence_lstm..\n\nThe expected format for sequence lstms is\n[timesteps * batch, per_example_length] and the data produced by the utilities\nis [batch, timestep, *optional* expected_length].  The result can be cleaved\nso that there is a Tensor per timestep.\n\nArgs:\ntensor: The tensor to reshape.\nper_example_length: The number of examples at each timestep.\nReturns:\nA Pretty Tensor that is compatible with cleave and then sequence_lstm.", "source": "codesearchnet"}
{"code": "def _Scroll(self, lines=None):\n    \n    if lines is None:\n      lines = self._cli_lines\n\n    if lines < 0:\n      self._displayed -= self._cli_lines\n      self._displayed += lines\n      if self._displayed < 0:\n        self._displayed = 0\n      self._lines_to_show = self._cli_lines\n    else:\n      self._lines_to_show = lines\n\n    self._lastscroll = lines", "docstring": "Set attributes to scroll the buffer correctly.\n\nArgs:\nlines: An int, number of lines to scroll. If None, scrolls\nby the terminal length.", "source": "juraj-google-style"}
{"code": "def __init__(self, config, http_client_session=None):\n        \n        self.verify = config.get(\"verify\", True)\n        self.output = config.get(\"output\", [])\n        self.validation_results = []\n        config_variables = config.get(\"variables\", {})\n\n        \n        testcase_setup_hooks = config.get(\"setup_hooks\", [])\n        \n        self.testcase_teardown_hooks = config.get(\"teardown_hooks\", [])\n\n        self.http_client_session = http_client_session or HttpSession()\n        self.session_context = SessionContext(config_variables)\n\n        if testcase_setup_hooks:\n            self.do_hook_actions(testcase_setup_hooks, \"setup\")", "docstring": "run testcase or testsuite.\n\nArgs:\nconfig (dict): testcase/testsuite config dict\n\n{\n\"name\": \"ABC\",\n\"variables\": {},\n\"setup_hooks\", [],\n\"teardown_hooks\", []\n}\n\nhttp_client_session (instance): requests.Session(), or locust.client.Session() instance.", "source": "juraj-google-style"}
{"code": "def parse_commit_message(commit_message: str) -> Dict[str, bool]:\n    if commit_message is None:\n        return {'skip': False, 'no_filter': False, 'test_all': False}\n    command_search = re.search('\\\\[([^\\\\]]*)\\\\]', commit_message)\n    if command_search is not None:\n        command = command_search.groups()[0]\n        command = command.lower().replace('-', ' ').replace('_', ' ')\n        skip = command in ['ci skip', 'skip ci', 'circleci skip', 'skip circleci']\n        no_filter = set(command.split(' ')) == {'no', 'filter'}\n        test_all = set(command.split(' ')) == {'test', 'all'}\n        return {'skip': skip, 'no_filter': no_filter, 'test_all': test_all}\n    else:\n        return {'skip': False, 'no_filter': False, 'test_all': False}", "docstring": "Parses the commit message to detect if a command is there to skip, force all or part of the CI.\n\nArgs:\ncommit_message (`str`): The commit message of the current commit.\n\nReturns:\n`Dict[str, bool]`: A dictionary of strings to bools with keys the following keys: `\"skip\"`,\n`\"test_all_models\"` and `\"test_all\"`.", "source": "github-repos"}
{"code": "def html2text(__html: str, *, width: int = 80,\n              ascii_replacements: bool = False) -> str:\n    \n    html2.BODY_WIDTH = width\n    html2.UNICODE_SNOB = ascii_replacements\n    return html2.html2text(__html).strip()", "docstring": "HTML to plain text renderer.\n\nSee also: :pypi:`html2text`\n\nArgs:\n__html: Text to process\nwidth: Paragraph width\nascii_replacements: Use pseudo-ASCII replacements for Unicode\nReturns:\nRendered text", "source": "juraj-google-style"}
{"code": "def get_parent(self, tree, alt=None):\n        \n        parent = self.parent_db.get(tree.path)\n\n        if not parent:\n            return alt\n\n        return list(parent)[0]", "docstring": "Get parent for given `tree` or `alt` if not found.\n\nArgs:\ntree (obj): :class:`.Tree` instance, which is already stored in DB.\nalt (obj, default None): Alternative value returned when `tree` is\nnot found.\n\nReturns:\nobj: :class:`.Tree` parent to given `tree`.", "source": "juraj-google-style"}
{"code": "def minimum_eigen_vector(x, num_steps, learning_rate, vector_prod_fn):\n    x = tf.nn.l2_normalize(x)\n    for _ in range(num_steps):\n        x = eig_one_step(x, learning_rate, vector_prod_fn)\n    return x", "docstring": "Computes eigenvector which corresponds to minimum eigenvalue.\n\nArgs:\nx: initial value of eigenvector.\nnum_steps: number of optimization steps.\nlearning_rate: learning rate.\nvector_prod_fn: function which takes x and returns product H*x.\n\nReturns:\napproximate value of eigenvector.\n\nThis function finds approximate value of eigenvector of matrix H which\ncorresponds to smallest (by absolute value) eigenvalue of H.\nIt works by solving optimization problem x^{T}*H*x -> min.", "source": "codesearchnet"}
{"code": "def _parse_ospf_process_id(self, config):\n        \n        match = re.search(r'^router ospf (\\d+)', config)\n        return dict(ospf_process_id=int(match.group(1)))", "docstring": "Parses config file for the OSPF proc ID\n\nArgs:\nconfig(str):  Running configuration\nReturns:\ndict: key: ospf_process_id (int)", "source": "juraj-google-style"}
{"code": "def apply_formatting_dict(obj: Any, formatting: Dict[(str, Any)]) -> Any:\n    new_obj = obj\n    if isinstance(obj, str):\n        if ('$' not in obj):\n            new_obj = string.Formatter().vformat(obj, (), formatting_dict(**formatting))\n    elif isinstance(obj, dict):\n        new_obj = {}\n        for (k, v) in obj.items():\n            new_obj[k] = apply_formatting_dict(v, formatting)\n    elif isinstance(obj, list):\n        new_obj = []\n        for (i, el) in enumerate(obj):\n            new_obj.append(apply_formatting_dict(el, formatting))\n    elif (isinstance(obj, int) or isinstance(obj, float) or (obj is None)):\n        pass\n    elif isinstance(obj, enum.Enum):\n        pass\n    else:\n        logger.debug(f\"Unrecognized obj '{obj}' of type '{type(obj)}'\")\n    return new_obj", "docstring": "Recursively apply a formatting dict to all strings in a configuration.\n\nNote that it skips applying the formatting if the string appears to contain latex (specifically,\nif it contains an \"$\"), since the formatting fails on nested brackets.\n\nArgs:\nobj: Some configuration object to recursively applying the formatting to.\nformatting (dict): String formatting options to apply to each configuration field.\nReturns:\ndict: Configuration with formatting applied to every field.", "source": "codesearchnet"}
{"code": "def apply_cut(self, cut):\n        \n        return Subsystem(self.network, self.state, self.node_indices,\n                         cut=cut, mice_cache=self._mice_cache)", "docstring": "Return a cut version of this |Subsystem|.\n\nArgs:\ncut (Cut): The cut to apply to this |Subsystem|.\n\nReturns:\nSubsystem: The cut subsystem.", "source": "juraj-google-style"}
{"code": "def filter_dict(d, exclude):\n    ret = {}\n    for (key, value) in d.items():\n        if (key not in exclude):\n            ret.update({key: value})\n    return ret", "docstring": "Return a new dict with specified keys excluded from the origional dict\n\nArgs:\nd (dict): origional dict\nexclude (list): The keys that are excluded", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    esedb_file = pyesedb.file()\n    try:\n        esedb_file.open_file_object(file_object)\n    except IOError as exception:\n        parser_mediator.ProduceExtractionWarning('unable to open file with error: {0!s}'.format(exception))\n        return\n    cache = ESEDBCache()\n    try:\n        table_names = frozenset(self._GetTableNames(esedb_file))\n        for plugin in self._plugins:\n            if parser_mediator.abort:\n                break\n            if (not plugin.required_tables.issubset(table_names)):\n                continue\n            try:\n                plugin.UpdateChainAndProcess(parser_mediator, cache=cache, database=esedb_file)\n            except Exception as exception:\n                parser_mediator.ProduceExtractionWarning('plugin: {0:s} unable to parse ESE database with error: {1!s}'.format(plugin.NAME, exception))\n    finally:\n        esedb_file.close()", "docstring": "Parses an ESE database file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.", "source": "codesearchnet"}
{"code": "def prepare_run_debug_urls(self, fetches, feed_dict):\n    self._run_counter_lock.acquire()\n    run_dir = os.path.join(self._session_root, 'run_%d_%d' % (int(time.time() * 1000000.0), self._run_counter))\n    self._run_counter += 1\n    self._run_counter_lock.release()\n    gfile.MkDir(run_dir)\n    fetches_event = event_pb2.Event()\n    fetches_event.log_message.message = repr(fetches)\n    fetches_path = os.path.join(run_dir, debug_data.METADATA_FILE_PREFIX + debug_data.FETCHES_INFO_FILE_TAG)\n    with gfile.Open(os.path.join(fetches_path), 'wb') as f:\n        f.write(fetches_event.SerializeToString())\n    feed_keys_event = event_pb2.Event()\n    feed_keys_event.log_message.message = repr(feed_dict.keys()) if feed_dict else repr(feed_dict)\n    feed_keys_path = os.path.join(run_dir, debug_data.METADATA_FILE_PREFIX + debug_data.FEED_KEYS_INFO_FILE_TAG)\n    with gfile.Open(os.path.join(feed_keys_path), 'wb') as f:\n        f.write(feed_keys_event.SerializeToString())\n    return ['file:", "docstring": "Implementation of abstract method in superclass.\n\nSee doc of `NonInteractiveDebugWrapperSession.prepare_run_debug_urls()`\nfor details. This implementation creates a run-specific subdirectory under\nself._session_root and stores information regarding run `fetches` and\n`feed_dict.keys()` in the subdirectory.\n\nArgs:\nfetches: Same as the `fetches` argument to `Session.run()`\nfeed_dict: Same as the `feed_dict` argument to `Session.run()`\n\nReturns:\ndebug_urls: (`str` or `list` of `str`) file:// debug URLs to be used in\nthis `Session.run()` call.", "source": "github-repos"}
{"code": "def get_class_weights(y, smooth_factor=0):\n    from collections import Counter\n    counter = Counter(y)\n    if (smooth_factor > 0):\n        p = (max(counter.values()) * smooth_factor)\n        for k in counter.keys():\n            counter[k] += p\n    majority = max(counter.values())\n    return {cls: float((majority / count)) for (cls, count) in counter.items()}", "docstring": "Returns the weights for each class based on the frequencies of the samples.\n\nArgs:\ny: A list of true labels (the labels must be hashable).\nsmooth_factor: A factor that smooths extremely uneven weights.\n\nReturns:\nA dictionary with the weight for each class.", "source": "codesearchnet"}
{"code": "def BuildFilterFindSpecs(self, artifact_definitions_path, custom_artifacts_path, knowledge_base_object, artifact_filter_names=None, filter_file_path=None):\n    environment_variables = knowledge_base_object.GetEnvironmentVariables()\n    find_specs = None\n    if artifact_filter_names:\n        logger.debug('building find specification based on artifacts: {0:s}'.format(', '.join(artifact_filter_names)))\n        artifacts_registry_object = BaseEngine.BuildArtifactsRegistry(artifact_definitions_path, custom_artifacts_path)\n        self._artifacts_filter_helper = artifact_filters.ArtifactDefinitionsFilterHelper(artifacts_registry_object, knowledge_base_object)\n        self._artifacts_filter_helper.BuildFindSpecs(artifact_filter_names, environment_variables=environment_variables)\n        if self._artifacts_filter_helper.registry_find_specs:\n            self._artifacts_filter_helper.BuildFindSpecs(self._WINDOWS_REGISTRY_FILES_ARTIFACT_NAMES, environment_variables=environment_variables)\n        find_specs = self._artifacts_filter_helper.file_system_find_specs\n        if (not find_specs):\n            raise errors.InvalidFilter('No valid file system find specifications were built from artifacts.')\n    elif filter_file_path:\n        logger.debug('building find specification based on filter file: {0:s}'.format(filter_file_path))\n        filter_file_object = filter_file.FilterFile(filter_file_path)\n        find_specs = filter_file_object.BuildFindSpecs(environment_variables=environment_variables)\n        if (not find_specs):\n            raise errors.InvalidFilter('No valid file system find specifications were built from filter file.')\n    return find_specs", "docstring": "Builds find specifications from artifacts or filter file if available.\n\nArgs:\nartifact_definitions_path (str): path to artifact definitions file.\ncustom_artifacts_path (str): path to custom artifact definitions file.\nknowledge_base_object (KnowledgeBase): knowledge base.\nartifact_filter_names (Optional[list[str]]): names of artifact\ndefinitions that are used for filtering file system and Windows\nRegistry key paths.\nfilter_file_path (Optional[str]): path of filter file.\n\nReturns:\nlist[dfvfs.FindSpec]: find specifications for the file source type.\n\nRaises:\nInvalidFilter: if no valid FindSpecs are built.", "source": "codesearchnet"}
{"code": "def convert_dt_time(duration, return_iter=False):\n    \n    try:\n        days, hours, minutes, seconds = convert_timedelta(duration)\n        if return_iter:\n            return days, hours, minutes, seconds\n        \n        if days > 0:\n            format_string = (\n                '{} day{}, {} hour{}'.format(\n                 days, 's' if days != 1 else '', hours, 's' if hours != 1 else ''))\n        elif hours > 1:\n            format_string = (\n                '{} hour{}, {} minute{}'.format(\n                 hours, 's' if hours != 1 else '', minutes, 's' if minutes != 1 else ''))\n        else:\n            format_string = (\n                '{} minute{}, {} sec{}'.format(\n                 minutes, 's' if minutes != 1 else '', seconds, 's' if seconds != 1 else ''))\n    except AttributeError as e:\n        logger.exception(\n            '%s: Type mismatch when converting timedelta objects (Code: %s)' %\n            (inspect.stack()[0][3], str(e)))\n    except Exception as e:\n        logger.exception(\n            '%s: Unknown error when converting datetime objects (Code: %s)' %\n            (inspect.stack()[0][3], str(e)))\n    return format_string", "docstring": "Summary:\nconvert timedelta objects to human readable output\nArgs:\n:duration (datetime.timedelta): time duration to convert\n:return_iter (tuple):  tuple containing time sequence\nReturns:\ndays, hours, minutes, seconds | TYPE: tuple (integers), OR\nhuman readable, notated units | TYPE: string", "source": "juraj-google-style"}
{"code": "def preds(self, nodeids=None):\n        \n        if nodeids is None: nodeids = self._nodeids\n        _eps = self._eps\n        return [_eps[nid][1] for nid in nodeids]", "docstring": "Return the Pred objects for *nodeids*, or all Preds.\n\nArgs:\nnodeids: an iterable of nodeids of predications to return\nPreds from; if `None`, return all Preds", "source": "juraj-google-style"}
{"code": "def dataflow_to_dataset(df, types):\n    assert isinstance(df, DataFlow), df\n    assert isinstance(types, (list, tuple)), types\n    df = MapData(df, (lambda dp: tuple(dp)))\n    df.reset_state()\n    ds = tf.data.Dataset.from_generator(df.get_data, tuple(types))\n    return ds", "docstring": "Wrap a dataflow to tf.data.Dataset.\nThis function will also reset the dataflow.\n\nIf the dataflow itself is finite, the returned dataset is also finite.\nTherefore, if used for training, you'll need to add `.repeat()` on the returned\ndataset.\n\nArgs:\ndf (DataFlow): a dataflow which produces lists\ntypes([tf.DType]): list of types\n\nReturns:\n(tf.data.Dataset)", "source": "codesearchnet"}
{"code": "def get_entry_type(self, tag):\n        \n\n        if tag.findParent().get('kind') in ['class', 'struct']:\n            return u'Method'\n\n        return super(functionTagProcessor, self).get_entry_type(tag)", "docstring": "Override that returns u'Method' for class/struct methods.\n\nOverride as necessary.\n\nArgs:\ntag: A BeautifulSoup Tag for a function.\n\nReturns:\nIf this is a class/struct method, returns u'Method', otherwise\nreturns the value from the inherited implementation of\nget_entry_type (which should be u'Function').", "source": "juraj-google-style"}
{"code": "def fill_memory_slot(memory, value, index):\n    mask = tf.to_float(tf.one_hot(index, tf.shape(memory)[0])[(:, None, None, None)])\n    fill_memory = (((1 - mask) * memory) + (mask * value[(None, ...)]))\n    return fill_memory", "docstring": "Fills the memory slot at a particular index with the given value.\n\nArgs:\nmemory: a 4-d tensor [memory_size, batch, length, channel] containing\nthe state of all steps\nvalue: a 3-d tensor [batch, length, channel] as the sate\nindex: integer in [0, memory_size)\n\nReturns:\nfilled memory", "source": "codesearchnet"}
{"code": "def evalAsync(self, amplstatements, callback, **kwargs):\n        \n        if self._langext is not None:\n            amplstatements = self._langext.translate(amplstatements, **kwargs)\n\n        def async_call():\n            self._lock.acquire()\n            try:\n                self._impl.eval(amplstatements)\n                self._errorhandler_wrapper.check()\n            except Exception:\n                self._lock.release()\n                raise\n            else:\n                self._lock.release()\n                callback.run()\n        Thread(target=async_call).start()", "docstring": "Interpret the given AMPL statement asynchronously.\n\nArgs:\namplstatements: A collection of AMPL statements and declarations to\nbe passed to the interpreter.\n\ncallback: Callback to be executed when the statement has been\ninterpreted.\n\nRaises:\nRuntimeError: if the input is not a complete AMPL statement (e.g.\nif it does not end with semicolon) or if the underlying\ninterpreter is not running.", "source": "juraj-google-style"}
{"code": "def set(self, key, value):\n        \n\n        data = self._load_file()\n        data[key] = value\n        self._save_file(data)", "docstring": "Set the value of a key\n\nArgs:\nkey (string): The key used to store this value\nvalue (string): The value to store", "source": "juraj-google-style"}
{"code": "def RegisterPlugin(cls, plugin_class):\n    plugin_name = plugin_class.NAME.lower()\n    if (plugin_name in cls._plugin_classes):\n        raise KeyError('Plugin class already set for name: {0:s}.'.format(plugin_class.NAME))\n    cls._plugin_classes[plugin_name] = plugin_class", "docstring": "Registers a plugin class.\n\nThe plugin classes are identified based on their lower case name.\n\nArgs:\nplugin_class (type): class of the plugin.\n\nRaises:\nKeyError: if plugin class is already set for the corresponding name.", "source": "codesearchnet"}
{"code": "def precipitable_water(self, value=999.0):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `precipitable_water`'.format(value))\n\n        self._precipitable_water = value", "docstring": "Corresponds to IDD Field `precipitable_water`\n\nArgs:\nvalue (float): value for IDD Field `precipitable_water`\nUnit: mm\nMissing value: 999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def case(self, case_id=None):\n        \n        cases = self.cases()\n        if case_id:\n            for case in cases:\n                if case.case_id == case_id:\n                    return case\n        else:\n            if cases:\n                return cases[0]\n\n        return None", "docstring": "Return a Case object\n\nIf no case_id is given return one case\n\nArgs:\ncase_id (str): A case id\n\nReturns:\ncase(Case): A Case object", "source": "juraj-google-style"}
{"code": "def class_logit(layer, label):\n\n    def inner(T):\n        if isinstance(label, int):\n            class_n = label\n        else:\n            class_n = T('labels').index(label)\n        logits = T(layer)\n        logit = tf.reduce_sum(logits[(:, class_n)])\n        return logit\n    return inner", "docstring": "Like channel, but for softmax layers.\n\nArgs:\nlayer: A layer name string.\nlabel: Either a string (refering to a label in model.labels) or an int\nlabel position.\n\nReturns:\nObjective maximizing a logit.", "source": "codesearchnet"}
{"code": "def configure_attributes(self, json_data):\n        \n        env = boto3.session.Session(profile_name=self.env, region_name=self.region)\n        elbclient = env.client('elb')\n\n        elb_settings = self.properties['elb']\n        LOG.debug('Block ELB Settings Pre Configure Load Balancer Attributes:\\n%s', pformat(elb_settings))\n\n        \n        \n        for job in json.loads(json_data)['job']:\n            load_balancer_attributes = {\n                'CrossZoneLoadBalancing': {\n                    'Enabled': True\n                },\n                'AccessLog': {\n                    'Enabled': False,\n                },\n                'ConnectionDraining': {\n                    'Enabled': False,\n                },\n                'ConnectionSettings': {\n                    'IdleTimeout': 60\n                }\n            }\n            if elb_settings.get('connection_draining_timeout'):\n                connection_draining_timeout = int(elb_settings['connection_draining_timeout'])\n                LOG.info('Applying Custom Load Balancer Connection Draining Timeout: %d', connection_draining_timeout)\n                load_balancer_attributes['ConnectionDraining'] = {\n                    'Enabled': True,\n                    'Timeout': connection_draining_timeout\n                }\n            if elb_settings.get('idle_timeout'):\n                idle_timeout = int(elb_settings['idle_timeout'])\n                LOG.info('Applying Custom Load Balancer Idle Timeout: %d', idle_timeout)\n                load_balancer_attributes['ConnectionSettings'] = {'IdleTimeout': idle_timeout}\n            if elb_settings.get('access_log'):\n                access_log_bucket_name = elb_settings['access_log']['bucket_name']\n                access_log_bucket_prefix = elb_settings['access_log']['bucket_prefix']\n                access_log_emit_interval = int(elb_settings['access_log']['emit_interval'])\n                LOG.info('Applying Custom Load Balancer Access Log: %s/%s every %d minutes', access_log_bucket_name,\n                         access_log_bucket_prefix, access_log_emit_interval)\n                load_balancer_attributes['AccessLog'] = {\n                    'Enabled': True,\n                    'S3BucketName': access_log_bucket_name,\n                    'EmitInterval': access_log_emit_interval,\n                    'S3BucketPrefix': access_log_bucket_prefix\n                }\n\n            LOG.info('Applying Load Balancer Attributes')\n            LOG.debug('Load Balancer Attributes:\\n%s', pformat(load_balancer_attributes))\n            elbclient.modify_load_balancer_attributes(\n                LoadBalancerName=self.app, LoadBalancerAttributes=load_balancer_attributes)", "docstring": "Configure load balancer attributes such as idle timeout, connection draining, etc\n\nArgs:\njson_data (json): return data from ELB upsert", "source": "juraj-google-style"}
{"code": "def get_features(self, tokenizer, max_length=None, pad_on_left=False, pad_token=0, mask_padding_with_zero=True, return_tensors=None):\n    if max_length is None:\n        max_length = tokenizer.max_len\n    label_map = {label: i for i, label in enumerate(self.labels)}\n    all_input_ids = []\n    for ex_index, example in enumerate(self.examples):\n        if ex_index % 10000 == 0:\n            logger.info(f'Tokenizing example {ex_index}')\n        input_ids = tokenizer.encode(example.text_a, add_special_tokens=True, max_length=min(max_length, tokenizer.max_len))\n        all_input_ids.append(input_ids)\n    batch_length = max((len(input_ids) for input_ids in all_input_ids))\n    features = []\n    for ex_index, (input_ids, example) in enumerate(zip(all_input_ids, self.examples)):\n        if ex_index % 10000 == 0:\n            logger.info(f'Writing example {ex_index}/{len(self.examples)}')\n        attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n        padding_length = batch_length - len(input_ids)\n        if pad_on_left:\n            input_ids = [pad_token] * padding_length + input_ids\n            attention_mask = [0 if mask_padding_with_zero else 1] * padding_length + attention_mask\n        else:\n            input_ids = input_ids + [pad_token] * padding_length\n            attention_mask = attention_mask + [0 if mask_padding_with_zero else 1] * padding_length\n        if len(input_ids) != batch_length:\n            raise ValueError(f'Error with input length {len(input_ids)} vs {batch_length}')\n        if len(attention_mask) != batch_length:\n            raise ValueError(f'Error with input length {len(attention_mask)} vs {batch_length}')\n        if self.mode == 'classification':\n            label = label_map[example.label]\n        elif self.mode == 'regression':\n            label = float(example.label)\n        else:\n            raise ValueError(self.mode)\n        if ex_index < 5 and self.verbose:\n            logger.info('*** Example ***')\n            logger.info(f'guid: {example.guid}')\n            logger.info(f'input_ids: {' '.join([str(x) for x in input_ids])}')\n            logger.info(f'attention_mask: {' '.join([str(x) for x in attention_mask])}')\n            logger.info(f'label: {example.label} (id = {label})')\n        features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label))\n    if return_tensors is None:\n        return features\n    elif return_tensors == 'tf':\n        if not is_tf_available():\n            raise RuntimeError(\"return_tensors set to 'tf' but TensorFlow 2.0 can't be imported\")\n        import tensorflow as tf\n\n        def gen():\n            for ex in features:\n                yield ({'input_ids': ex.input_ids, 'attention_mask': ex.attention_mask}, ex.label)\n        dataset = tf.data.Dataset.from_generator(gen, ({'input_ids': tf.int32, 'attention_mask': tf.int32}, tf.int64), ({'input_ids': tf.TensorShape([None]), 'attention_mask': tf.TensorShape([None])}, tf.TensorShape([])))\n        return dataset\n    elif return_tensors == 'pt':\n        if not is_torch_available():\n            raise RuntimeError(\"return_tensors set to 'pt' but PyTorch can't be imported\")\n        import torch\n        from torch.utils.data import TensorDataset\n        all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n        all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n        if self.mode == 'classification':\n            all_labels = torch.tensor([f.label for f in features], dtype=torch.long)\n        elif self.mode == 'regression':\n            all_labels = torch.tensor([f.label for f in features], dtype=torch.float)\n        dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)\n        return dataset\n    else:\n        raise ValueError(\"return_tensors should be one of 'tf' or 'pt'\")", "docstring": "Convert examples in a list of `InputFeatures`\n\nArgs:\ntokenizer: Instance of a tokenizer that will tokenize the examples\nmax_length: Maximum example length\npad_on_left: If set to `True`, the examples will be padded on the left rather than on the right (default)\npad_token: Padding token\nmask_padding_with_zero: If set to `True`, the attention mask will be filled by `1` for actual values\nand by `0` for padded values. If set to `False`, inverts it (`1` for padded values, `0` for actual\nvalues)\n\nReturns:\nIf the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the\ntask-specific features. If the input is a list of `InputExamples`, will return a list of task-specific\n`InputFeatures` which can be fed to the model.", "source": "github-repos"}
{"code": "def display_task_progress(self, instance, project, region, request_id=None, user=None, poll_interval=60):\n    total_completed = 0\n    while True:\n        task_results = self.client.get_task_data(instance, project, region, request_id=request_id, user=user)\n        tasks = {task['id']: task for task in task_results}\n        completed_tasks = set()\n        pending_tasks = set()\n        for task in tasks.values():\n            if (task.get('successful') is not None):\n                completed_tasks.add(task['id'])\n            else:\n                pending_tasks.add(task['id'])\n        if ((len(completed_tasks) > total_completed) or (not completed_tasks)):\n            total_completed = len(completed_tasks)\n            print('Task status update (completed: {0:d} | pending: {1:d})'.format(len(completed_tasks), len(pending_tasks)))\n            print('Completed tasks:')\n            for task_id in completed_tasks:\n                self._print_task_data(tasks[task_id])\n            print('Pending tasks:')\n            for task_id in pending_tasks:\n                self._print_task_data(tasks[task_id])\n        if ((len(completed_tasks) == len(task_results)) and completed_tasks):\n            print('All {0:d} Tasks completed'.format(len(task_results)))\n            return\n        time.sleep(poll_interval)", "docstring": "Displays the overall progress of tasks in a Turbinia job.\n\nArgs:\ninstance (string): The name of the Turbinia instance\nproject (string): The project containing the disk to process\nregion (string): Region where turbinia is configured.\nrequest_id (string): The request ID provided by Turbinia.\nuser (string): The username to filter tasks by.\npoll_interval (int): The interval at which to poll for new results.", "source": "codesearchnet"}
{"code": "def VerifyStructure(self, parser_mediator, line):\n    try:\n        structure = self._LINE.parseString(line)\n    except pyparsing.ParseException:\n        logger.debug('Not a SkyDrive old log file')\n        return False\n    (day_of_month, month, year, hours, minutes, seconds, milliseconds) = structure.date_time\n    time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds, milliseconds)\n    try:\n        dfdatetime_time_elements.TimeElementsInMilliseconds(time_elements_tuple=time_elements_tuple)\n    except ValueError:\n        logger.debug('Not a SkyDrive old log file, invalid date and time: {0!s}'.format(structure.date_time))\n        return False\n    return True", "docstring": "Verify that this file is a SkyDrive old log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nline (str): line from a text file.\n\nReturns:\nbool: True if the line is in the expected format, False if not.", "source": "codesearchnet"}
{"code": "def for_executor(cls, executor: Optional[Executor]) -> 'Subsystem':\n        \n        if isinstance(executor, ThreadPoolExecutor):\n            return _ThreadingSubsystem(executor)\n        elif executor is None:\n            return _AsyncioSubsystem()\n        else:\n            raise TypeError(executor)", "docstring": "Return a subsystem based on the given executor. If ``executor`` is\nNone, use :mod:`asyncio`. If ``executor`` is a\n:class:`concurrent.futures.ThreadPoolExecutor`, use :mod:`threading`.\n\nArgs:\nexecutor: The executor in use, if any.", "source": "juraj-google-style"}
{"code": "def start(self, hostname=None, port=None, templates_path=None):\n        \n        self.hostname = hostname if hostname else \"localhost\"\n        if port:\n            self.port = port\n        elif not self.port:\n            self.port = unused_port(self.hostname)\n        if templates_path:\n            self.loaders.insert(0, jinja2.FileSystemLoader(templates_path))\n            self._set_loaders()\n        self.setup_routes()\n        self.runner = aioweb.AppRunner(self.app)\n        return self.agent.submit(start_server_in_loop(self.runner, self.hostname, self.port, self.agent))", "docstring": "Starts the web interface.\n\nArgs:\nhostname (str, optional): host name to listen from. (Default value = None)\nport (int, optional): port to listen from. (Default value = None)\ntemplates_path (str, optional): path to look for templates. (Default value = None)", "source": "juraj-google-style"}
{"code": "def _call_and_return_none_on_error(func: Callable[[], NotNoneT], error_msg: str) -> Optional[NotNoneT]:\n    try:\n        return func()\n    except Exception as ex:\n        traceback.print_exception(ex)\n        logging.error(error_msg)\n        return None", "docstring": "Calls `func` and returns `None` on error.\n\nThis is used to gracefully return the 'error status' represented as `None`, as\nraising exceptions from `PyFunctionLibrary` methods crashes the program.\n\nArgs:\nfunc: The function to run. The function should be a callable returning a\nnon-None value.\nerror_msg: The error message to log upon error. Used for debugging purposes.\n\nReturns:\n`None` if the function raises an exception. The return value of `func`\notherwise.", "source": "github-repos"}
{"code": "def get_next_as_optional(iterator):\n    return iterator.get_next_as_optional()", "docstring": "Returns a `tf.experimental.Optional` with the next element of the iterator.\n\nIf the iterator has reached the end of the sequence, the returned\n`tf.experimental.Optional` will have no value.\n\nArgs:\niterator: A `tf.data.Iterator`.\n\nReturns:\nA `tf.experimental.Optional` object which either contains the next element\nof the iterator (if it exists) or no value.", "source": "github-repos"}
{"code": "def segs(self, word):\n    return [m.group('all') for m in self.seg_regex.finditer(word)]", "docstring": "Returns a list of segments from a word\n\nArgs:\nword (unicode): input word as Unicode IPA string\n\nReturns:\nlist: list of strings corresponding to segments found in `word`", "source": "codesearchnet"}
{"code": "def alt40fms(msg):\n    \n    d = hex2bin(data(msg))\n\n    if d[13] == '0':\n        return None\n\n    alt = bin2int(d[14:26]) * 16    \n    return alt", "docstring": "Selected altitude, FMS\n\nArgs:\nmsg (String): 28 bytes hexadecimal message (BDS40) string\n\nReturns:\nint: altitude in feet", "source": "juraj-google-style"}
{"code": "def __init__(self, regularizer=None, activity_regularizer=None, use_operator=False, var_name='v', **kwargs):\n    self._regularizer = regularizer\n    if isinstance(regularizer, dict):\n        self._regularizer = regularizers.deserialize(regularizer, custom_objects=globals())\n    self._activity_regularizer = activity_regularizer\n    if isinstance(activity_regularizer, dict):\n        self._activity_regularizer = regularizers.deserialize(activity_regularizer, custom_objects=globals())\n    self._use_operator = use_operator\n    self._var_name = var_name\n    super(MultiplyLayer, self).__init__(activity_regularizer=self._activity_regularizer, **kwargs)", "docstring": "Initializes the MultiplyLayer.\n\nArgs:\nregularizer: The weight regularizer on the scalar variable.\nactivity_regularizer: The activity regularizer.\nuse_operator: If True, add using the * operator. If False, add using\ntf.multiply.\nvar_name: The name of the variable. It can be useful to pass a name other\nthan 'v', to test having the attribute name (self.v) being different\nfrom the variable name.\n**kwargs: Passed to AssertTypeLayer constructor.", "source": "github-repos"}
{"code": "def create(self, params):\n        \n        sh_id = params.get('id', str(uuid4()))\n        if sh_id in self:\n            raise ShardedClusterError(\n                \"Sharded cluster with id %s already exists.\" % sh_id)\n        params['id'] = sh_id\n        cluster = ShardedCluster(params)\n        self[cluster.id] = cluster\n        return cluster.id", "docstring": "create new ShardedCluster\nArgs:\nparams - dictionary with specific params for instance\nReturn cluster_id\nwhere cluster_id - id which can use to take the cluster from servers collection", "source": "juraj-google-style"}
{"code": "def set_hook_data(self, key, data):\n    if (not isinstance(data, collections.Mapping)):\n        raise ValueError(('Hook (key: %s) data must be an instance of collections.Mapping (a dictionary for example).' % key))\n    if (key in self.hook_data):\n        raise KeyError('Hook data for key %s already exists, each hook must have a unique data_key.', key)\n    self.hook_data[key] = data", "docstring": "Set hook data for the given key.\n\nArgs:\nkey(str): The key to store the hook data in.\ndata(:class:`collections.Mapping`): A dictionary of data to store,\nas returned from a hook.", "source": "codesearchnet"}
{"code": "def usermacro_get(macro=None, hostids=None, templateids=None, hostmacroids=None, globalmacroids=None, globalmacro=False, **kwargs):\n    conn_args = _login(**kwargs)\n    ret = {}\n    try:\n        if conn_args:\n            method = 'usermacro.get'\n            params = {'output': 'extend', 'filter': {}}\n            if macro:\n                if isinstance(macro, dict):\n                    macro = (('{' + six.text_type(macro.keys()[0])) + '}')\n                if ((not macro.startswith('{')) and (not macro.endswith('}'))):\n                    macro = (('{' + macro) + '}')\n                params['filter'].setdefault('macro', macro)\n            if hostids:\n                params.setdefault('hostids', hostids)\n            elif templateids:\n                params.setdefault('templateids', hostids)\n            if hostmacroids:\n                params.setdefault('hostmacroids', hostmacroids)\n            elif globalmacroids:\n                globalmacro = True\n                params.setdefault('globalmacroids', globalmacroids)\n            if globalmacro:\n                params = _params_extend(params, globalmacro=True)\n            params = _params_extend(params, **kwargs)\n            ret = _query(method, params, conn_args['url'], conn_args['auth'])\n            return (ret['result'] if ret['result'] else False)\n        else:\n            raise KeyError\n    except KeyError:\n        return ret", "docstring": "Retrieve user macros according to the given parameters.\n\nArgs:\nmacro:          name of the usermacro\nhostids:        Return macros for the given hostids\ntemplateids:    Return macros for the given templateids\nhostmacroids:   Return macros with the given hostmacroids\nglobalmacroids: Return macros with the given globalmacroids (implies globalmacro=True)\nglobalmacro:    if True, returns only global macros\n\n\noptional kwargs:\n_connection_user: zabbix user (can also be set in opts or pillar, see module's docstring)\n_connection_password: zabbix password (can also be set in opts or pillar, see module's docstring)\n_connection_url: url of zabbix frontend (can also be set in opts or pillar, see module's docstring)\n\nReturns:\nArray with usermacro details, False if no usermacro found or on failure.\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' zabbix.usermacro_get macro='{$SNMP_COMMUNITY}'", "source": "codesearchnet"}
{"code": "def dump(self, format='ttl'):\n    return self.rdf.graph.serialize(format=format).decode('utf-8')", "docstring": "Convenience method to return RDF data for resource,\noptionally selecting serialization format.\nInspired by .dump from Samvera.\n\nArgs:\nformat (str): expecting serialization formats accepted by rdflib.serialization(format=)", "source": "codesearchnet"}
{"code": "def realtime(widget, url_name=None, url_regex=None, time_interval=None):\n    \n    if not hasattr(widget, 'get_updated_content'):\n        raise AttributeError('Widget %s must implement get_updated_content '\n                             'method.' % widget)\n    elif not callable(widget.get_updated_content):\n        raise ValueError('get_updated_content in widget %s is not callable'\n                         % widget)\n\n    if url_name is None:\n        if getattr(widget, 'url_name', None) is not None:\n            url_name = widget.url_name\n        else:\n            url_name = widget.__class__.__name__\n\n    if url_name in [w.url_name for w in REALTIME_WIDGETS]:\n        raise ValueError('URL name %s is already used by another '\n                         'real time widget.' % url_name)\n\n    if url_regex is None:\n        if getattr(widget, 'url_regex', None) is not None:\n            url_regex = widget.url_regex\n        else:\n            url_regex = sha256(url_name.encode('utf-8'))\n            url_regex = url_regex.hexdigest()[:32]\n            url_regex = 'realtime/' + url_regex\n\n    if url_regex in [w.url_regex for w in REALTIME_WIDGETS]:\n        raise ValueError('URL regex %s is already used by another '\n                         'real time widget.' % url_regex)\n\n    if time_interval is None:\n        if getattr(widget, 'time_interval', None) is not None:\n            time_interval = widget.time_interval\n        else:\n            time_interval = app_settings.default_time_interval\n\n    from django.views.generic import View\n    from braces.views import AjaxResponseMixin, JSONResponseMixin\n\n    \n    class PartialResponse(JSONResponseMixin, AjaxResponseMixin, View):\n        def get_data(self):\n            return widget.get_updated_content()\n\n        def get(self, request, *args, **kwargs):\n            return self.get_ajax(request, *args, **kwargs)\n\n        def get_ajax(self, request, *args, **kwargs):\n            return self.render_json_response(self.get_data())\n\n    PartialResponse.url_name = url_name\n    PartialResponse.url_regex = url_regex\n    PartialResponse.time_interval = time_interval\n\n    REALTIME_WIDGETS.append(PartialResponse)\n\n    if not hasattr(widget, 'url_name'):\n        widget.url_name = url_name\n    if not hasattr(widget, 'url_regex'):\n        widget.url_regex = url_regex\n    if not hasattr(widget, 'time_interval'):\n        widget.time_interval = time_interval\n\n    return widget", "docstring": "Return a widget as real-time.\n\nArgs:\nwidget (Widget): the widget to register and return as real-time.\nurl_name (str): the URL name to call to get updated content.\nurl_regex (regex): the URL regex to be matched.\ntime_interval (int): the interval of refreshment in milliseconds.\n\nReturns:\nWidget: the \"real-timed\" widget.", "source": "juraj-google-style"}
{"code": "def discount_rate(self, date: Optional[types.DateTensor]=None, time: Optional[types.FloatTensor]=None, context=None) -> tf.Tensor:\n    pass", "docstring": "Returns the discount rates to a specified set of dates.\n\nArgs:\ndate: A `DateTensor` specifying the dates at which to evaluate the\ndiscount rates. The function expects either `date` or `time` to be\nspecified.\ntime: A real `Tensor` specifying the times at which to evaluate the\ndiscount rates. The function expects either `date` or `time` to be\nspecified.\ncontext: The context object, e.g., curve_type.\n\nReturns:\nA `Tensor` of the same shape as `dates` with the corresponding discount\nrates.", "source": "github-repos"}
{"code": "def seek(self, offset, whence=os.SEEK_SET):\n    \n    if not self._is_open:\n      raise IOError('Not opened.')\n\n    if self._current_offset < 0:\n      raise IOError(\n          'Invalid current offset: {0:d} value less than zero.'.format(\n              self._current_offset))\n\n    if whence == os.SEEK_CUR:\n      offset += self._current_offset\n\n    elif whence == os.SEEK_END:\n      if self._decoded_stream_size is None:\n        self._decoded_stream_size = self._GetDecodedStreamSize()\n        if self._decoded_stream_size is None:\n          raise IOError('Invalid decoded stream size.')\n\n      offset += self._decoded_stream_size\n\n    elif whence != os.SEEK_SET:\n      raise IOError('Unsupported whence.')\n\n    if offset < 0:\n      raise IOError('Invalid offset value less than zero.')\n\n    if offset != self._current_offset:\n      self._current_offset = offset\n      self._realign_offset = True", "docstring": "Seeks to an offset within the file-like object.\n\nArgs:\noffset (int): offset to seek to.\nwhence (Optional(int)): value that indicates whether offset is an absolute\nor relative position within the file.\n\nRaises:\nIOError: if the seek failed.\nOSError: if the seek failed.", "source": "juraj-google-style"}
{"code": "def _CanSkipContentExtraction(self, file_entry):\n    \n    \n    \n    location = getattr(file_entry.path_spec, 'location', None)\n    if not location:\n      return False\n\n    data_stream_name = getattr(file_entry.path_spec, 'data_stream', None)\n    if data_stream_name:\n      return False\n\n    file_system = file_entry.GetFileSystem()\n\n    path_segments = file_system.SplitPath(location)\n    if not path_segments:\n      return False\n\n    if self._CHROME_CACHE_DATA_FILE_RE.match(path_segments[-1]):\n      location_segments = path_segments[:-1]\n      location_segments.append('index')\n      location = file_system.JoinPath(location_segments)\n      index_path_spec = path_spec_factory.Factory.NewPathSpec(\n          file_entry.type_indicator, location=location,\n          parent=file_entry.path_spec.parent)\n\n      if file_system.FileEntryExistsByPathSpec(index_path_spec):\n        \n        return True\n\n    elif self._FIREFOX_CACHE_DATA_FILE_RE.match(path_segments[-1]):\n      location_segments = path_segments[:-4]\n      location_segments.append('_CACHE_MAP_')\n      location = file_system.JoinPath(location_segments)\n      cache_map_path_spec = path_spec_factory.Factory.NewPathSpec(\n          file_entry.type_indicator, location=location,\n          parent=file_entry.path_spec.parent)\n\n      if file_system.FileEntryExistsByPathSpec(cache_map_path_spec):\n        \n        \n        return True\n\n    elif self._FIREFOX_CACHE2_DATA_FILE_RE.match(path_segments[-1]):\n      location_segments = path_segments[:-2]\n      location_segments.append('index')\n      location = file_system.JoinPath(location_segments)\n      index_path_spec = path_spec_factory.Factory.NewPathSpec(\n          file_entry.type_indicator, location=location,\n          parent=file_entry.path_spec.parent)\n\n      if file_system.FileEntryExistsByPathSpec(index_path_spec):\n        \n        \n        return True\n\n    elif len(path_segments) == 1 and path_segments[0].lower() in (\n        'hiberfil.sys', 'pagefile.sys', 'swapfile.sys'):\n      return True\n\n    return False", "docstring": "Determines if content extraction of a file entry can be skipped.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry of which to determine content\nextraction can be skipped.\n\nReturns:\nbool: True if content extraction can be skipped.", "source": "juraj-google-style"}
{"code": "def _filter_var(self, node, var):\n    bindings = var.Bindings(node) if len(var.bindings) > 1 else var.bindings\n    if not bindings:\n        return None\n    if len(bindings) == len(var.bindings) and (not any((isinstance(b.data, abstract.TypeParameterInstance) for b in bindings))):\n        return var\n    ret = self.ctx.program.NewVariable()\n    for binding in bindings:\n        val = binding.data\n        if isinstance(val, abstract.TypeParameterInstance):\n            var = val.instance.get_instance_type_parameter(val.name)\n            var_bindings = var.Bindings(node)\n            if var_bindings:\n                bindings.extend(var_bindings)\n            elif val.param.constraints or val.param.bound:\n                ret.PasteVariable(val.param.instantiate(node))\n            else:\n                ret.AddBinding(self.ctx.convert.empty, [], node)\n        else:\n            ret.AddBinding(val, {binding}, node)\n    if ret.bindings:\n        return ret\n    else:\n        return None", "docstring": "Filter the variable by the node.\n\nFilters the variable data, including recursively expanded type parameter\ninstances, by visibility at the node. A type parameter instance needs to be\nfiltered at the moment of access because its value may change later.\n\nArgs:\nnode: The current node.\nvar: A variable to filter.\n\nReturns:\nThe filtered variable.", "source": "github-repos"}
{"code": "def _resolve_path(obj, path=None):\n        \n        if path:\n            for attr_name in path.split('__'):\n                obj = getattr(obj, attr_name)\n        return obj", "docstring": "Resolve django-like path eg. object2__object3 for object\nArgs:\nobj: The object the view is displaying.\npath (str, optional): Description\nReturns:\nA oject at end of resolved path", "source": "juraj-google-style"}
{"code": "def replace_nones(list_, repl=(- 1)):\n    repl_list = [(repl if (item is None) else (replace_nones(item, repl) if isinstance(item, list) else item)) for item in list_]\n    return repl_list", "docstring": "r\"\"\"\nRecursively removes Nones in all lists and sublists and replaces them with\nthe repl variable\n\nArgs:\nlist_ (list):\nrepl (obj): replacement value\n\nReturns:\nlist\n\nCommandLine:\npython -m utool.util_list --test-replace_nones\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_list import *  # NOQA\n>>> # build test data\n>>> list_ = [None, 0, 1, 2]\n>>> repl = -1\n>>> # execute function\n>>> repl_list = replace_nones(list_, repl)\n>>> # verify results\n>>> result = str(repl_list)\n>>> print(result)\n[-1, 0, 1, 2]", "source": "codesearchnet"}
{"code": "def __init__(self, replicaset=None, ssl=None, login=None, password=None,\n                 ca_cert=None, certfile=None, keyfile=None,\n                 keyfile_passphrase=None, crlfile=None, **kwargs):\n        \n\n        super().__init__(**kwargs)\n        self.replicaset = replicaset or bigchaindb.config['database'].get('replicaset')\n        self.ssl = ssl if ssl is not None else bigchaindb.config['database'].get('ssl', False)\n        self.login = login or bigchaindb.config['database'].get('login')\n        self.password = password or bigchaindb.config['database'].get('password')\n        self.ca_cert = ca_cert or bigchaindb.config['database'].get('ca_cert', None)\n        self.certfile = certfile or bigchaindb.config['database'].get('certfile', None)\n        self.keyfile = keyfile or bigchaindb.config['database'].get('keyfile', None)\n        self.keyfile_passphrase = keyfile_passphrase or bigchaindb.config['database'].get('keyfile_passphrase', None)\n        self.crlfile = crlfile or bigchaindb.config['database'].get('crlfile', None)", "docstring": "Create a new Connection instance.\n\nArgs:\nreplicaset (str, optional): the name of the replica set to\nconnect to.\n**kwargs: arbitrary keyword arguments provided by the\nconfiguration's ``database`` settings", "source": "juraj-google-style"}
{"code": "def report_sink_lineage(path):\n    FileSystems.get_filesystem(path).report_lineage(path, Lineage.sinks())", "docstring": "Report sink :class:`~apache_beam.metrics.metric.Lineage`.\n\nArgs:\npath: string path to be reported.", "source": "github-repos"}
{"code": "def _ParseEntry(self, key, val):\n    if (key in self._repeated):\n        setting = self.section.setdefault(key, [])\n        setting.extend(val)\n    else:\n        self.section.setdefault(key, val)", "docstring": "Adds an entry for a configuration setting.\n\nArgs:\nkey: The name of the setting.\nval: The value of the setting.", "source": "codesearchnet"}
{"code": "def cumulative_distribution(self, X, U=0):\n        \n        self.check_fit()\n\n        low_bounds = self.model.dataset.mean() - (5 * self.model.dataset.std())\n        return self.model.integrate_box_1d(low_bounds, X) - U", "docstring": "Computes the integral of a 1-D pdf between two bounds\n\nArgs:\nX(float): a datapoint.\nU(float): cdf value in [0,1], only used in get_ppf\n\nReturns:\nfloat: estimated cumulative distribution.", "source": "juraj-google-style"}
{"code": "def dense_shape_and_type(matrix):\n    if not isinstance(matrix, tensor_lib.Tensor):\n        raise TypeError('matrix should be a tensor, but saw: %s' % (matrix,))\n    if matrix.dtype != dtypes.variant:\n        raise TypeError('expected matrix to be type tf.variant, but saw: %s' % (matrix.dtype,))\n    handle_data = _get_handle_data(matrix)\n    if not handle_data or not handle_data.is_set:\n        raise ValueError('matrix has missing handle data: %s' % (matrix,))\n    if len(handle_data.shape_and_type) != 1:\n        raise ValueError(\"len(matrix.handle_data.shape_and_type) != 1: '%s'\" % (handle_data.shape_and_type,))\n    return DenseShapeAndType(tensor_shape.TensorShape(handle_data.shape_and_type[0].shape), dtypes.DType(handle_data.shape_and_type[0].dtype))", "docstring": "Get dense shape and dtype of the tf.Tensor containing the matrix.\n\nArgs:\nmatrix: A `tf.Tensor` of type `tf.variant` storing a sparse matrix.\n\nReturns:\nAn instance of `ShapeAndType` with properties `shape` (a `tf.TensorShape`)\nand `dtype` (a `tf.DType`).\n\nRaises:\nTypeError: if `matrix` is not a tensor or its dtype is not variant.\nValueError: if `matrix` lacks static handle data containing the dense\nshape and dtype.", "source": "github-repos"}
{"code": "def convert_lrelu(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting lrelu ...')\n    if (names == 'short'):\n        tf_name = ('lRELU' + random_string(3))\n    elif (names == 'keep'):\n        tf_name = w_name\n    else:\n        tf_name = (w_name + str(random.random()))\n    leakyrelu = keras.layers.LeakyReLU(alpha=params['alpha'], name=tf_name)\n    layers[scope_name] = leakyrelu(layers[inputs[0]])", "docstring": "Convert leaky relu layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def main(params=None):\n    \n    if params == None:\n        parser = getParser()\n        args = parser.parse_args(params)\n    else:\n        args = params\n\n    print(general.title(banner.text))\n\n    extraWords = args.extra_words\n\n    try:\n        if args.name == None and args.surname1 == None and args.surname2 == None and args.city == None and args.country == None and args.year == None:\n            print(\"\\nCollecting information about the profile\")\n            print(\"----------------------------------------\\n\")\n\n            args.name = raw_input(general.emphasis(\"Insert a name: \".ljust(35, \" \"))).replace(' ','')\n            args.surname1 = raw_input(general.emphasis(\"Insert the first surname: \".ljust(35, \" \"))).replace(' ','')\n            args.surname2 = raw_input(general.emphasis(\"Insert the second surname: \".ljust(35, \" \"))).replace(' ','')\n            args.year = raw_input(general.emphasis(\"Insert a year (e. g.: birthyear): \".ljust(35, \" \"))).replace(' ','')\n            args.city = raw_input(general.emphasis(\"Insert a city: \".ljust(35, \" \"))).replace(' ','')\n            args.country = raw_input(general.emphasis(\"Insert a country: \".ljust(35, \" \"))).replace(' ','')\n\n            if args.extra_words == []:\n                print(\"\\nAdditional transformations to be added\")\n                print(\"--------------------------------------\\n\")\n                inputText = raw_input(general.emphasis(\"Extra words to add (',' separated): \".ljust(35, \" \"))).replace(' ','')\n                extraWords += inputText.lower().split(',')\n    except KeyboardInterrupt:\n        print(\"\\n\\nThe user manually aborted the program. Exiting...\")\n        sys.exit(2)\n\n    lista=[]\n\n    print(\"\\nInput data:\")\n    print(\"-----------\\n\")\n    if args.name != \"\":\n        print(\"Name: \".ljust(20, \" \") + args.name)\n    if args.surname1 != \"\":\n        print(\"First Surname: \".ljust(20, \" \") + args.surname1)\n    if args.surname2 != \"\":\n        print(\"Second Surname: \".ljust(20, \" \") + args.surname2)\n    if args.year != \"\":\n        print(\"Year: \".ljust(20, \" \") + args.year)\n    if args.city != \"\":\n        print(\"City: \".ljust(20, \" \") + args.city)\n    if args.country != \"\":\n        print(\"Country: \".ljust(20, \" \") + args.country)\n\n    aliases = generate(\n        name=args.name,\n        surname1=args.surname1,\n        surname2=args.surname2,\n        city=args.city,\n        country=args.country,\n        year=args.year,\n        useNumbers=args.numbers,\n        useCommonWords=args.common_words,\n        useLeet=args.leet,\n        useLocales=args.locales,\n        extraWords=extraWords\n    )\n\n    print(\"Writing the results onto the file:\\n\\t\" + general.emphasis(args.outputFile))\n\n    oF=open(args.outputFile, \"w\")\n    for l in aliases:\n        oF.write(l+\"\\n\")\n    oF.close()\n\n\n    \n    print(banner.footer)", "docstring": "Main function to launch alias_generator.\n\nArgs:\n-----\nparams: A list with the parameters as grabbed by the terminal. It is\nNone when this is called by an entry_point. If it is called by osrf\nthe data is already parsed.", "source": "juraj-google-style"}
{"code": "def diff_text2(self, diffs):\n    \n    text = []\n    for (op, data) in diffs:\n      if op != self.DIFF_DELETE:\n        text.append(data)\n    return \"\".join(text)", "docstring": "Compute and return the destination text (all equalities and insertions).\n\nArgs:\ndiffs: Array of diff tuples.\n\nReturns:\nDestination text.", "source": "juraj-google-style"}
{"code": "def datetimeobj_YmdHMS(value):\n    \n    i = int(value)\n    S = i\n    M = S\n    H = M\n    d = H\n    m = d\n    Y = m\n    return datetime.datetime(\n        Y % 10000, m % 100, d % 100, H % 100, M % 100, S % 100, tzinfo=TZ_GMT\n    )", "docstring": "Convert timestamp string to a datetime object.\n\nTimestamps strings like '20130618120000' are able to be converted by this\nfunction.\n\nArgs:\nvalue: A timestamp string in the format '%Y%m%d%H%M%S'.\n\nReturns:\nA datetime object.\n\nRaises:\nValueError: If timestamp is invalid.\n\nNote: The timezone is assumed to be UTC/GMT.", "source": "juraj-google-style"}
{"code": "def zip_ll(data, means, M):\n    \n    genes, cells = data.shape\n    clusters = means.shape[1]\n    ll = np.zeros((cells, clusters))\n    d0 = (data==0)\n    d1 = (data>0)\n    for i in range(clusters):\n        means_i = np.tile(means[:,i], (cells, 1))\n        means_i = means_i.transpose()\n        L_i = np.tile(M[:,i], (cells, 1))\n        L_i = L_i.transpose()\n        ll_0 = np.log(L_i + (1 - L_i)*np.exp(-means_i))\n        ll_0 = np.where((L_i==0) & (means_i==0), -means_i, ll_0)\n        \n        ll_1 = np.log(1 - L_i) + xlogy(data, means_i) -  means_i\n        ll_0 = np.where(d0, ll_0, 0.0)\n        ll_1 = np.where(d1, ll_1, 0.0)\n        ll[:,i] = np.sum(ll_0 + ll_1, 0)\n    return ll", "docstring": "Calculates the zero-inflated Poisson log-likelihood.\n\nArgs:\ndata (array): genes x cells\nmeans (array): genes x k\nM (array): genes x k - this is the zero-inflation parameter.\n\nReturns:\ncells x k array of log-likelihood for each cell/cluster pair.", "source": "juraj-google-style"}
{"code": "def text(self, x, y, text):\n        \n        for i, char in enumerate(text):\n            self.point(x + i, y, char)", "docstring": "Print a text on ASCII canvas.\n\nArgs:\nx (int): x coordinate where the text should start.\ny (int): y coordinate where the text should start.\ntext (str): string that should be printed.", "source": "juraj-google-style"}
{"code": "def git_ls_remote(self, uri, ref):\n    logger.debug('Invoking git to retrieve commit id for repo %s...', uri)\n    lsremote_output = subprocess.check_output(['git', 'ls-remote', uri, ref])\n    if (b'\\t' in lsremote_output):\n        commit_id = lsremote_output.split(b'\\t')[0]\n        logger.debug('Matching commit id found: %s', commit_id)\n        return commit_id\n    else:\n        raise ValueError(('Ref \"%s\" not found for repo %s.' % (ref, uri)))", "docstring": "Determine the latest commit id for a given ref.\n\nArgs:\nuri (string): git URI\nref (string): git ref\n\nReturns:\nstr: A commit id", "source": "codesearchnet"}
{"code": "def value_splitter(self, reference, prop, value, mode):\n    items = []\n    if (mode == 'json-list'):\n        try:\n            items = json.loads(value)\n        except json.JSONDecodeError as e:\n            print(value)\n            msg = \"Reference '{ref}' raised JSON decoder error when splitting values from '{prop}': {err}'\"\n            raise SerializerError(msg.format(ref=reference, prop=prop, err=e))\n    elif (len(value) > 0):\n        items = value.split(' ')\n    return items", "docstring": "Split a string into a list items.\n\nDefault behavior is to split on white spaces.\n\n\nArguments:\nreference (string): Reference name used when raising possible\nerror.\nprop (string): Property name used when raising possible error.\nvalue (string): Property value to split.\nmode (string): Splitter mode. Default should come from\n``ManifestSerializer._DEFAULT_SPLITTER``.\n\nAvailable splitter are:\n\n* ``white-space``: Simply split a string on white spaces;\n* ``json-list``: Assume the string is a JSON list to parse;\n\nReturns:\nlist:", "source": "codesearchnet"}
{"code": "def append_transformation(self, transformation, return_alternatives=False, clear_redo=True):\n    if clear_redo:\n        self._undone = []\n    if (return_alternatives and transformation.is_one_to_many):\n        ranked_list = transformation.apply_transformation(self.final_structure, return_ranked_list=return_alternatives)\n        input_structure = self.final_structure.as_dict()\n        alts = []\n        for x in ranked_list[1:]:\n            s = x.pop('structure')\n            actual_transformation = x.pop('transformation', transformation)\n            hdict = actual_transformation.as_dict()\n            hdict['input_structure'] = input_structure\n            hdict['output_parameters'] = x\n            self.final_structure = s\n            d = self.as_dict()\n            d['history'].append(hdict)\n            d['final_structure'] = s.as_dict()\n            alts.append(TransformedStructure.from_dict(d))\n        x = ranked_list[0]\n        s = x.pop('structure')\n        actual_transformation = x.pop('transformation', transformation)\n        hdict = actual_transformation.as_dict()\n        hdict['input_structure'] = self.final_structure.as_dict()\n        hdict['output_parameters'] = x\n        self.history.append(hdict)\n        self.final_structure = s\n        return alts\n    else:\n        s = transformation.apply_transformation(self.final_structure)\n        hdict = transformation.as_dict()\n        hdict['input_structure'] = self.final_structure.as_dict()\n        hdict['output_parameters'] = {}\n        self.history.append(hdict)\n        self.final_structure = s", "docstring": "Appends a transformation to the TransformedStructure.\n\nArgs:\ntransformation: Transformation to append\nreturn_alternatives: Whether to return alternative\nTransformedStructures for one-to-many transformations.\nreturn_alternatives can be a number, which stipulates the\ntotal number of structures to return.\nclear_redo: Boolean indicating whether to clear the redo list.\nBy default, this is True, meaning any appends clears the\nhistory of undoing. However, when using append_transformation\nto do a redo, the redo list should not be cleared to allow\nmultiple redos.", "source": "codesearchnet"}
{"code": "def concatenate(self, other):\n    other = as_shape(other)\n    if self.dims is None or other.dims is None:\n        return unknown_shape()\n    else:\n        return TensorShape(self.dims + other.dims)", "docstring": "Returns the concatenation of the dimension in `self` and `other`.\n\n*N.B.* If either `self` or `other` is completely unknown,\nconcatenation will discard information about the other shape. In\nfuture, we might support concatenation that preserves this\ninformation for use with slicing.\n\nArgs:\nother: Another `TensorShape`.\n\nReturns:\nA `TensorShape` whose dimensions are the concatenation of the\ndimensions in `self` and `other`.", "source": "github-repos"}
{"code": "def _bbox(nodes):\n    (left, bottom) = np.min(nodes, axis=1)\n    (right, top) = np.max(nodes, axis=1)\n    return (left, right, bottom, top)", "docstring": "Get the bounding box for set of points.\n\n.. note::\n\nThere is also a Fortran implementation of this function, which\nwill be used if it can be built.\n\nArgs:\nnodes (numpy.ndarray): A set of points.\n\nReturns:\nTuple[float, float, float, float]: The left, right,\nbottom and top bounds for the box.", "source": "codesearchnet"}
{"code": "def _assert_same_base_type(items, expected_type=None):\n    original_expected_type = expected_type\n    mismatch = False\n    for item in items:\n        if (item is not None):\n            item_type = base_dtype(item.dtype)\n            if (not expected_type):\n                expected_type = item_type\n            elif (expected_type != item_type):\n                mismatch = True\n                break\n    if mismatch:\n        expected_type = original_expected_type\n        original_item_str = None\n        get_name = (lambda x: (x.name if hasattr(x, 'name') else str(x)))\n        for item in items:\n            if (item is not None):\n                item_type = base_dtype(item.dtype)\n                if (not expected_type):\n                    expected_type = item_type\n                    original_item_str = get_name(item)\n                elif (expected_type != item_type):\n                    raise ValueError('{}, type={}, must be of the same type ({}){}.'.format(get_name(item), item_type, expected_type, (' as {}'.format(original_item_str) if original_item_str else '')))\n        return expected_type\n    else:\n        return expected_type", "docstring": "r\"\"\"Asserts all items are of the same base type.\n\nArgs:\nitems: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,\n`Operation`, or `IndexedSlices`). Can include `None` elements, which\nwill be ignored.\nexpected_type: Expected type. If not specified, assert all items are\nof the same base type.\n\nReturns:\nValidated type, or none if neither expected_type nor items provided.\n\nRaises:\nValueError: If any types do not match.", "source": "codesearchnet"}
{"code": "def intersect(df, other, index=False, keep='first'):\n    validate_set_ops(df, other)\n    if index:\n        df_reset_index = df.reset_index()\n        other_reset_index = other.reset_index()\n        index_cols = [col for col in df_reset_index.columns if (col not in df.columns)]\n        df_index_names = df.index.names\n        return_df = pd.merge(df_reset_index, other_reset_index, how='inner', left_on=df_reset_index.columns.values.tolist(), right_on=df_reset_index.columns.values.tolist()).set_index(index_cols)\n        return_df.index.names = df_index_names\n        return_df = return_df.drop_duplicates(keep=keep)\n        return return_df\n    else:\n        return_df = pd.merge(df, other, how='inner', left_on=df.columns.values.tolist(), right_on=df.columns.values.tolist())\n        return_df = return_df.drop_duplicates(keep=keep)\n        return return_df", "docstring": "Returns rows that appear in both DataFrames.\n\nArgs:\ndf (pandas.DataFrame): data passed in through the pipe.\nother (pandas.DataFrame): other DataFrame to use for set operation with\nthe first.\n\nKwargs:\nindex (bool): Boolean indicating whether to consider the pandas index\nas part of the set operation (default `False`).\nkeep (str): Indicates which duplicate should be kept. Options are `'first'`\nand `'last'`.", "source": "codesearchnet"}
{"code": "def wait(timeout=None, flush=True):\n    if (timeout is not None):\n        timeout = (timeout + _time.clock())\n    while True:\n        if _eventQueue:\n            return _eventQueue.pop(0)\n        if flush:\n            _tdl.flush()\n        if (timeout and (_time.clock() >= timeout)):\n            return None\n        _time.sleep(0.001)\n        _processEvents()", "docstring": "Wait for an event.\n\nArgs:\ntimeout (Optional[int]): The time in seconds that this function will\nwait before giving up and returning None.\n\nWith the default value of None, this will block forever.\nflush (bool): If True a call to :any:`tdl.flush` will be made before\nlistening for events.\n\nReturns: Type[Event]: An event, or None if the function\nhas timed out.\nAnything added via :any:`push` will also be returned.", "source": "codesearchnet"}
{"code": "def rescale(self, image: 'torch.Tensor', scale: float, offset: Optional[bool]=True, **kwargs) -> 'torch.Tensor':\n    rescaled_image = image * scale\n    if offset:\n        rescaled_image -= 1\n    return rescaled_image", "docstring": "Rescale an image by a scale factor.\n\nIf `offset` is `True`, the image has its values rescaled by `scale` and then offset by 1. If `scale` is\n1/127.5, the image is rescaled between [-1, 1].\nimage = image * scale - 1\n\nIf `offset` is `False`, and `scale` is 1/255, the image is rescaled between [0, 1].\nimage = image * scale\n\nArgs:\nimage (`torch.Tensor`):\nImage to rescale.\nscale (`float`):\nThe scaling factor to rescale pixel values by.\noffset (`bool`, *optional*):\nWhether to scale the image in both negative and positive directions.\n\nReturns:\n`torch.Tensor`: The rescaled image.", "source": "github-repos"}
{"code": "def get_variable(mesh, name, shape, dtype=tf.float32, master_dtype=None, slice_dtype=None, activation_dtype=None, initializer=None, trainable=True, **kwargs):\n    if (dtype is None):\n        dtype = VariableDType(master_dtype, slice_dtype, activation_dtype)\n    elif isinstance(dtype, tf.DType):\n        dtype = VariableDType((master_dtype or dtype), (slice_dtype or dtype), (activation_dtype or dtype))\n    elif (not isinstance(dtype, VariableDType)):\n        raise ValueError('dtype should be a tf.dtype or a mtf.VariableDType')\n    scope_name = tf.get_variable_scope().name\n    if scope_name:\n        full_name = ((scope_name + '/') + name)\n    else:\n        full_name = name\n    if (full_name in mesh.graph.name_to_variable):\n        var = mesh.graph.name_to_variable[full_name]\n    else:\n        var = Variable(mesh, name, convert_to_shape(shape), dtype, initializer, trainable, **kwargs)\n        if (var.name != full_name):\n            raise ValueError(('Expected var.name == full_name.  %s vs %s' % (var.name, full_name)))\n        mesh.graph.name_to_variable[full_name] = var\n    return var.outputs[0]", "docstring": "Create a new variable or retrieve an already-created one.\n\nArgs:\nmesh: a Mesh\nname: a string (uses the existing tf.variable_scope())\nshape: a Shape\ndtype: a VariableDType or a tf.DType\nmaster_dtype: an optional tf.DType (deprecated - use dtype arg)\nslice_dtype: an optional tf.DType (deprecated - use dtype arg)\nactivation_dtype: an optional tf.DType (deprecated - use dtype arg)\ninitializer: an optional tf initializer function\ntrainable: a boolean\n**kwargs: additional keyword arguments to tf.get_variable\n\nReturns:\na Tensor with the given shape and dtype equal to dtype.activation_dtype", "source": "codesearchnet"}
{"code": "def incrementKeySequenceCounter(self, iIncrementValue=1):\n        \n        print '%s call incrementKeySequenceCounter' % self.port\n        print iIncrementValue\n        currentKeySeq = ''\n        try:\n            currentKeySeq = self.getKeySequenceCounter()\n            keySequence = int(currentKeySeq, 10) + iIncrementValue\n            print keySequence\n            return self.setKeySequenceCounter(keySequence)\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger('incrementKeySequenceCounter() Error: ' + str(e))", "docstring": "increment the key sequence with a given value\n\nArgs:\niIncrementValue: specific increment value to be added\n\nReturns:\nTrue: successful to increment the key sequence with a given value\nFalse: fail to increment the key sequence with a given value", "source": "juraj-google-style"}
{"code": "def __init__(self, rs_params):\n        \n        self.server_map = {}\n        self.auth_key = rs_params.get('auth_key', None)\n        self.login = rs_params.get('login', '')\n        self.auth_source = rs_params.get('authSource', 'admin')\n        self.password = rs_params.get('password', '')\n        self.admin_added = False\n        self.repl_id = rs_params.get('id', None) or str(uuid4())\n        self._version = rs_params.get('version')\n\n        self.sslParams = rs_params.get('sslParams', {})\n        self.kwargs = {}\n        self.restart_required = self.login or self.auth_key\n        self.x509_extra_user = False\n\n        if self.sslParams:\n            self.kwargs.update(DEFAULT_SSL_OPTIONS)\n\n        members = rs_params.get('members', [])\n        \n        self.enable_ipv6 = ipv6_enabled_repl(rs_params)\n\n        config = {\"_id\": self.repl_id, \"members\": [\n            self.member_create(member, index)\n            for index, member in enumerate(members)\n        ]}\n        if 'rsSettings' in rs_params:\n            config['settings'] = rs_params['rsSettings']\n        \n        \n        \n        self._write_concern = len(\n            [m for m in members\n             if not m.get('rsParams', {}).get('arbiterOnly')]\n        )\n\n        logger.debug(\"replica config: {config}\".format(**locals()))\n        if not self.repl_init(config):\n            self.cleanup()\n            raise ReplicaSetError(\"Could not create replica set.\")\n\n        if not self.waiting_config_state():\n            raise ReplicaSetError(\n                \"Could not actualize replica set configuration.\")\n\n        if self.login:\n            \n            \n            \n            \n            \n            for member in members:\n                proc_params = member.get('procParams', {})\n                set_params = proc_params.get('setParameter', {})\n                auth_mechs = set_params.get('authenticationMechanisms', '')\n                auth_mechs = auth_mechs.split(',')\n                if len(auth_mechs) == 1 and auth_mechs[0] == 'MONGODB-X509':\n                    self.x509_extra_user = True\n                    break\n\n            if config[\"members\"]:\n                server_id = self._servers.host_to_server_id(\n                    self.member_id_to_host(0))\n                version = self._servers.version(server_id)\n            else:\n                version = (2, 4, 0)\n\n            self._add_users(self.connection()[self.auth_source], version)\n        if self.restart_required:\n            \n            for idx, member in enumerate(members):\n                server_id = self._servers.host_to_server_id(\n                    self.member_id_to_host(idx))\n                server = self._servers._storage[server_id]\n                \n                \n                if not member.get('rsParams', {}).get('arbiterOnly'):\n                    server.x509_extra_user = self.x509_extra_user\n                    server.auth_source = self.auth_source\n                    server.login = self.login\n                    server.password = self.password\n\n                def add_auth(config):\n                    if self.auth_key:\n                        config['keyFile'] = self.key_file\n                    config.update(member.get('procParams', {}))\n                    return config\n\n                server.restart(config_callback=add_auth)\n            self.restart_required = False\n\n        if not self.waiting_member_state() and self.waiting_config_state():\n            raise ReplicaSetError(\n                \"Could not actualize replica set configuration.\")\n        for i in range(100):\n            if self.connection().primary:\n                break\n            time.sleep(0.1)\n        else:\n            raise ReplicaSetError(\"No primary was ever elected.\")", "docstring": "create replica set according members config\nArgs:\nrs_params - replica set configuration", "source": "juraj-google-style"}
{"code": "def _generate_graph_update_dicts(self):\n    transform_dict = {}\n    pcoll_dict = {}\n    for transform_id, transform_proto in self._top_level_transforms():\n        transform_dict[transform_proto.unique_name] = {'required': transform_id in self._required_transforms}\n        for pcoll_id in transform_proto.outputs.values():\n            pcoll_dict[pcoll_id] = {'cached': pcoll_id in self._cached_pcollections, 'referenced': pcoll_id in self._referenced_pcollections}\n\n    def vertex_properties_to_attributes(vertex):\n        \n        attrs = {}\n        if 'leaf' in vertex:\n            attrs['style'] = 'invis'\n        elif vertex.get('required'):\n            attrs['color'] = 'blue'\n            attrs['fontcolor'] = 'blue'\n        else:\n            attrs['color'] = 'grey'\n        return attrs\n\n    def edge_properties_to_attributes(edge):\n        \n        attrs = {}\n        if edge.get('cached'):\n            attrs['color'] = 'red'\n        elif edge.get('referenced'):\n            attrs['color'] = 'black'\n        else:\n            attrs['color'] = 'grey'\n        return attrs\n    vertex_dict = {}\n    edge_dict = {}\n    for transform_name, transform_properties in transform_dict.items():\n        vertex_dict[transform_name] = vertex_properties_to_attributes(transform_properties)\n    for pcoll_id, pcoll_properties in pcoll_dict.items():\n        edge_dict[pcoll_id] = edge_properties_to_attributes(pcoll_properties)\n    return (vertex_dict, edge_dict)", "docstring": "Generate updates specific to interactive pipeline.\n\nReturns:\nvertex_dict: (Dict[str, Dict[str, str]]) maps vertex name to attributes\nedge_dict: (Dict[str, Dict[str, str]]) maps vertex name to attributes", "source": "github-repos"}
{"code": "def format_level_1_memory(memory):\n    formatted_memory = _list_to_complex_array(memory)\n    if (not (1 <= len(formatted_memory.shape) <= 2)):\n        raise QiskitError('Level one memory is not of correct shape.')\n    return formatted_memory", "docstring": "Format an experiment result memory object for measurement level 1.\n\nArgs:\nmemory (list): Memory from experiment with `meas_level==1`. `avg` or\n`single` will be inferred from shape of result memory.\n\nReturns:\nnp.ndarray: Measurement level 1 complex numpy array\n\nRaises:\nQiskitError: If the returned numpy array does not have 1 (avg) or 2 (single)\nindicies.", "source": "codesearchnet"}
{"code": "def __getattr__(self, item):\n        \n        if item in self.METHOD_NO_PROXY:\n            return super(AuthProxy, self).__getattr__(item)\n        attr = getattr(self.proxy_class, item)\n        if callable(attr):\n            return self.auth_proxy(attr)", "docstring": "Override attribute getter to act as a proxy for``proxy_class``.\n\nIf ``item`` is contained in ``METHOD_NO_PROXY``, it will not be\nproxied to the ``proxy_class`` and will instead return the attribute\non this object.\n\nArgs:\nitem (str): Name of attribute to get.", "source": "juraj-google-style"}
{"code": "def get_el_amount(self, element):\n        \n        return sum([self._all_comp[i][element] * abs(self._coeffs[i])\n                    for i in range(len(self._all_comp))]) / 2", "docstring": "Returns the amount of the element in the reaction.\n\nArgs:\nelement (Element/Specie): Element in the reaction\n\nReturns:\nAmount of that element in the reaction.", "source": "juraj-google-style"}
{"code": "def to_file(self, filename):\n    d = {'mass_info': self.mass_info, 'nonbond_coeffs': self.nonbond_coeffs, 'topo_coeffs': self.topo_coeffs}\n    yaml = YAML(typ='safe')\n    with open(filename, 'w') as f:\n        yaml.dump(d, f)", "docstring": "Saves object to a file in YAML format.\n\nArgs:\nfilename (str): Filename.", "source": "codesearchnet"}
{"code": "def extract_tendency_grid(self, model_grid):\n        \n        var_name = model_grid.variable + \"-tendency\"\n        self.attributes[var_name] = []\n        timesteps = np.arange(self.start_time, self.end_time + 1)\n        for ti, t in enumerate(timesteps):\n            t_index = t - model_grid.start_hour\n            self.attributes[var_name].append(\n                model_grid.data[t_index, self.i[ti], self.j[ti]] - model_grid.data[t_index - 1, self.i[ti], self.j[ti]]\n                )", "docstring": "Extracts the difference in model outputs\n\nArgs:\nmodel_grid: ModelOutput or ModelGrid object.", "source": "juraj-google-style"}
{"code": "def get(self, key, default=None):\n        \n        if key in self.__cli:\n            return self.__cli[key]\n        if key in self.__config:\n            return self.__config.get(key)\n        if key in self.__defaults:\n            return self.__defaults.get(key)\n        return default", "docstring": "Get the value for `key`.\n\nGives priority to command-line overrides.\n\nArgs:\nkey: str, the key to get the value for.\n\nReturns:\nobject: The value for `key`", "source": "juraj-google-style"}
{"code": "def _calibrate_vis(radiance, k):\n    logger.debug('Calibrating to reflectance')\n    refl = ((100 * k) * radiance)\n    return refl.clip(min=0)", "docstring": "Convert VIS radiance to reflectance\n\nNote: Angle of incident radiation and annual variation of the\nearth-sun distance is not taken into account. A value of 100%\ncorresponds to the radiance of a perfectly reflecting diffuse surface\nilluminated at normal incidence when the sun is at its annual-average\ndistance from the Earth.\n\nTODO: Take angle of incident radiation (cos sza) and annual variation\nof the earth-sun distance into account.\n\nReference: [VIS]\n\nArgs:\nradiance: Radiance [mW m-2 cm-1 sr-1]\nk: pi / H, where H is the solar spectral irradiance at\nannual-average sun-earth distance, averaged over the spectral\nresponse function of the detector). Units of k: [m2 um sr W-1]\nReturns:\nReflectance [%]", "source": "codesearchnet"}
{"code": "def transfer(self, rights_assignment_data=None, *, from_user, to_user, rights_assignment_format='jsonld'):\n    rights_assignment = RightsAssignment.from_data((rights_assignment_data or {}), plugin=self.plugin)\n    transfer_payload = rights_assignment._to_format(data_format=rights_assignment_format)\n    transfer_id = super().transfer(transfer_payload, from_user=from_user, to_user=to_user)\n    rights_assignment.persist_id = transfer_id\n    return rights_assignment", "docstring": "Transfer this Right to another owner on the backing\npersistence layer.\n\nArgs:\nrights_assignment_data (dict): Model data for the resulting\n:class:`~.RightsAssignment`\nfrom_user (any, keyword): A user based on the model specified\nby the persistence layer\nto_user (any, keyword): A user based on the model specified\nby the persistence layer\nrights_assignment_format (str, keyword, optional): Data\nformat of the created entity; must be one of:\n\n- 'jsonld' (default)\n- 'json'\n- 'ipld'\n\nReturns:\n:class:`~.RightsAssignment`: The RightsAssignment entity\ncreated from this transfer\n\nRaises:\nSee :meth:`~.TransferrableEntity.transfer`", "source": "codesearchnet"}
{"code": "def export(self, last_checkpoint, output_dir):\n    \n    logging.info('Exporting prediction graph to %s', output_dir)\n    with tf.Session(graph=tf.Graph()) as sess:\n      \n      inputs, outputs = self.build_prediction_graph()\n      signature_def_map = {\n        'serving_default': signature_def_utils.predict_signature_def(inputs, outputs)\n      }\n      init_op = tf.global_variables_initializer()\n      sess.run(init_op)\n      self.restore_from_checkpoint(sess, self.inception_checkpoint_file,\n                                   last_checkpoint)\n      init_op_serving = control_flow_ops.group(\n          variables.local_variables_initializer(),\n          tf.tables_initializer())\n\n      builder = saved_model_builder.SavedModelBuilder(output_dir)\n      builder.add_meta_graph_and_variables(\n          sess, [tag_constants.SERVING],\n          signature_def_map=signature_def_map,\n          legacy_init_op=init_op_serving)\n      builder.save(False)", "docstring": "Builds a prediction graph and xports the model.\n\nArgs:\nlast_checkpoint: Path to the latest checkpoint file from training.\noutput_dir: Path to the folder to be used to output the model.", "source": "juraj-google-style"}
{"code": "def get_absl_log_prefix(record):\n    created_tuple = time.localtime(record.created)\n    created_microsecond = int(((record.created % 1.0) * 1000000.0))\n    critical_prefix = ''\n    level = record.levelno\n    if _is_non_absl_fatal_record(record):\n        level = logging.ERROR\n        critical_prefix = _CRITICAL_PREFIX\n    severity = converter.get_initial_for_level(level)\n    return ('%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] %s' % (severity, created_tuple.tm_mon, created_tuple.tm_mday, created_tuple.tm_hour, created_tuple.tm_min, created_tuple.tm_sec, created_microsecond, _get_thread_id(), record.filename, record.lineno, critical_prefix))", "docstring": "Returns the absl log prefix for the log record.\n\nArgs:\nrecord: logging.LogRecord, the record to get prefix for.", "source": "codesearchnet"}
{"code": "def fibo(max_value=None):\n    \n    a = 1\n    b = 1\n    while True:\n        if max_value is None or a < max_value:\n            yield a\n            a, b = b, a + b\n        else:\n            yield max_value", "docstring": "Generator for fibonaccial decay.\n\nArgs:\nmax_value: The maximum value to yield. Once the value in the\ntrue fibonacci sequence exceeds this, the value\nof max_value will forever after be yielded.", "source": "juraj-google-style"}
{"code": "def flux_randomization(model, threshold, tfba, solver):\n    \n\n    optimize = {}\n    for reaction_id in model.reactions:\n        if model.is_reversible(reaction_id):\n            optimize[reaction_id] = 2*random.random() - 1.0\n        else:\n            optimize[reaction_id] = random.random()\n\n    fba = _get_fba_problem(model, tfba, solver)\n    for reaction_id, value in iteritems(threshold):\n        fba.prob.add_linear_constraints(fba.get_flux_var(reaction_id) >= value)\n\n    fba.maximize(optimize)\n    for reaction_id in model.reactions:\n        yield reaction_id, fba.get_flux(reaction_id)", "docstring": "Find a random flux solution on the boundary of the solution space.\n\nThe reactions in the threshold dictionary are constrained with the\nassociated lower bound.\n\nArgs:\nmodel: MetabolicModel to solve.\nthreshold: dict of additional lower bounds on reaction fluxes.\ntfba: If True enable thermodynamic constraints.\nsolver: LP solver instance to use.\n\nReturns:\nAn iterator of reaction ID and reaction flux pairs.", "source": "juraj-google-style"}
{"code": "def match_objects(self, set_a, set_b, time_a, time_b):\n        \n        costs = self.cost_matrix(set_a, set_b, time_a, time_b) * 100\n        min_row_costs = costs.min(axis=1)\n        min_col_costs = costs.min(axis=0)\n        good_rows = np.where(min_row_costs < 100)[0]\n        good_cols = np.where(min_col_costs < 100)[0]\n        assignments = []\n        if len(good_rows) > 0 and len(good_cols) > 0:\n            munk = Munkres()\n            initial_assignments = munk.compute(costs[tuple(np.meshgrid(good_rows, good_cols, indexing='ij'))].tolist())\n            initial_assignments = [(good_rows[x[0]], good_cols[x[1]]) for x in initial_assignments]\n            for a in initial_assignments:\n                if costs[a[0], a[1]] < 100:\n                    assignments.append(a)\n        return assignments", "docstring": "Match two sets of objects at particular times.\n\nArgs:\nset_a: list of STObjects\nset_b: list of STObjects\ntime_a: time at which set_a is being evaluated for matching\ntime_b: time at which set_b is being evaluated for matching\n\nReturns:\nList of tuples containing (set_a index, set_b index) for each match", "source": "juraj-google-style"}
{"code": "def __init__(self, port_no=None, queue_id=None, tx_bytes=None,\n                 tx_packets=None, tx_errors=None):\n        \n        super().__init__()\n        self.port_no = port_no\n        self.queue_id = queue_id\n        self.tx_bytes = tx_bytes\n        self.tx_packets = tx_packets\n        self.tx_errors = tx_errors", "docstring": "Create a QueueStats with the optional parameters below.\n\nArgs:\nport_no (:class:`int`, :class:`~pyof.v0x01.common.phy_port.Port`):\nPort Number.\nqueue_id (int): Queue ID.\ntx_bytes (int): Number of transmitted bytes.\ntx_packets (int): Number of transmitted packets.\ntx_errors (int): Number of packets dropped due to overrun.", "source": "juraj-google-style"}
{"code": "def VerifyStructure(self, parser_mediator, line):\n    \n    self._last_month = 0\n    self._year_use = parser_mediator.GetEstimatedYear()\n\n    try:\n      structure = self.FIREWALL_LINE.parseString(line)\n    except pyparsing.ParseException as exception:\n      logger.debug((\n          'Unable to parse file as a Mac AppFirewall log file with error: '\n          '{0!s}').format(exception))\n      return False\n\n    if structure.action != 'creating /var/log/appfirewall.log':\n      logger.debug(\n          'Not a Mac AppFirewall log file, invalid action: {0!s}'.format(\n              structure.action))\n      return False\n\n    if structure.status != 'Error':\n      logger.debug(\n          'Not a Mac AppFirewall log file, invalid status: {0!s}'.format(\n              structure.status))\n      return False\n\n    time_elements_tuple = self._GetTimeElementsTuple(structure)\n\n    try:\n      dfdatetime_time_elements.TimeElements(\n          time_elements_tuple=time_elements_tuple)\n    except ValueError:\n      logger.debug((\n          'Not a Mac AppFirewall log file, invalid date and time: '\n          '{0!s}').format(structure.date_time))\n      return False\n\n    self._last_month = time_elements_tuple[1]\n\n    return True", "docstring": "Verify that this file is a Mac AppFirewall log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nline (str): line from a text file.\n\nReturns:\nbool: True if the line is in the expected format, False if not.", "source": "juraj-google-style"}
{"code": "def save(self, filename):\n        \n\n        if filename is None:\n            filename = self.filename('.b26s')\n        \n        \n        with open(filename, 'w') as outfile:\n            outfile.write(pickle.dumps(self.__dict__))", "docstring": "saves the instance of the script to a file using pickle\nArgs:\nfilename: target filename", "source": "juraj-google-style"}
{"code": "def add(x1, x2, output_shape=None, name=None):\n  \n  output_shape = convert_to_shape(output_shape)\n  if not isinstance(x2, Tensor):\n    return ScalarAddOperation(x1, x2).outputs[0]\n  with tf.name_scope(name, default_name=\"add\"):\n    x1, x2 = binary_arguments_to_tensors(x1, x2)\n    return AddOperation(\n        x1, x2, output_shape=_infer_binary_broadcast_shape(\n            x1.shape, x2.shape, output_shape)).outputs[0]", "docstring": "Binary addition with broadcsting.\n\nArgs:\nx1: a Tensor\nx2: a Tensor\noutput_shape: an optional Shape\nname: an optional string\nReturns:\na Tensor", "source": "juraj-google-style"}
{"code": "def search_user_directory(self, term: str) -> List[User]:\n        \n        response = self.api._send(\n            'POST',\n            '/user_directory/search',\n            {\n                'search_term': term,\n            },\n        )\n        try:\n            return [\n                User(self.api, _user['user_id'], _user['display_name'])\n                for _user in response['results']\n            ]\n        except KeyError:\n            return []", "docstring": "Search user directory for a given term, returning a list of users\nArgs:\nterm: term to be searched for\nReturns:\nuser_list: list of users returned by server-side search", "source": "juraj-google-style"}
{"code": "def _process_thread(self, client):\n    system_type = client.data.os_info.system\n    print('System type: {0:s}'.format(system_type))\n    artifact_list = []\n    if self.artifacts:\n        print('Artifacts to be collected: {0!s}'.format(self.artifacts))\n        artifact_list = self.artifacts\n    else:\n        default_artifacts = self.artifact_registry.get(system_type, None)\n        if default_artifacts:\n            print('Collecting default artifacts for {0:s}: {1:s}'.format(system_type, ', '.join(default_artifacts)))\n            artifact_list.extend(default_artifacts)\n    if self.extra_artifacts:\n        print('Throwing in an extra {0!s}'.format(self.extra_artifacts))\n        artifact_list.extend(self.extra_artifacts)\n        artifact_list = list(set(artifact_list))\n    if (not artifact_list):\n        return\n    flow_args = flows_pb2.ArtifactCollectorFlowArgs(artifact_list=artifact_list, use_tsk=self.use_tsk, ignore_interpolation_errors=True, apply_parsers=False)\n    flow_id = self._launch_flow(client, 'ArtifactCollectorFlow', flow_args)\n    self._await_flow(client, flow_id)\n    collected_flow_data = self._download_files(client, flow_id)\n    if collected_flow_data:\n        print('{0!s}: Downloaded: {1:s}'.format(flow_id, collected_flow_data))\n        fqdn = client.data.os_info.fqdn.lower()\n        self.state.output.append((fqdn, collected_flow_data))", "docstring": "Process a single GRR client.\n\nArgs:\nclient: a GRR client object.", "source": "codesearchnet"}
{"code": "def get_v1_constants(module: Any) -> Sequence[str]:\n    constants_v1 = []\n    tensorflow_constants_attr_v1 = API_ATTRS_V1[TENSORFLOW_API_NAME].constants\n    if hasattr(module, tensorflow_constants_attr_v1):\n        constants_v1.extend(getattr(module, tensorflow_constants_attr_v1))\n    return constants_v1", "docstring": "Get a list of TF 1.* constants in this module.\n\nArgs:\nmodule: TensorFlow module.\n\nReturns:\nList of all API constants under the given module.", "source": "github-repos"}
{"code": "def is_valid_transition(self, source: str, dest: str) -> bool:\n        \n        if dest not in self._states or source not in self._states:\n            raise NotValidState\n        elif dest not in self._transitions[source]:\n            raise NotValidTransition\n        return True", "docstring": "Checks if a transitions is registered in the FSM\n\nArgs:\nsource (str): the source state name\ndest (str): the destination state name\n\nReturns:\nbool: wether the transition is valid or not", "source": "juraj-google-style"}
{"code": "def run_tpm(tpm, time_scale):\n    \n    sbs_tpm = convert.state_by_node2state_by_state(tpm)\n    if sparse(tpm):\n        tpm = sparse_time(sbs_tpm, time_scale)\n    else:\n        tpm = dense_time(sbs_tpm, time_scale)\n    return convert.state_by_state2state_by_node(tpm)", "docstring": "Iterate a TPM by the specified number of time steps.\n\nArgs:\ntpm (np.ndarray): A state-by-node tpm.\ntime_scale (int): The number of steps to run the tpm.\n\nReturns:\nnp.ndarray", "source": "juraj-google-style"}
{"code": "def contains(array, ty, string):\n    \n    weld_obj = WeldObject(encoder_, decoder_)\n\n    string_obj = weld_obj.update(string)\n    if isinstance(string, WeldObject):\n        string_obj =  string.obj_id\n        weld_obj.dependencies[string_obj] = string\n\n    array_var = weld_obj.update(array)\n    if isinstance(array, WeldObject):\n        array_var = array.obj_id\n        weld_obj.dependencies[array_var] = array\n\n    (start, end) = 0, len(string)\n    \n    \n    weld_template = \n    weld_obj.weld_code = weld_template % {\"array\": array_var, \"ty\": ty,\n                                          \"start\": start, \"end\": end,\n                                          \"cmpstr\": string_obj}\n\n    return weld_obj", "docstring": "Checks if given string is contained in each string in the array.\nOutput is a vec of booleans.\n\nArgs:\narray (WeldObject / Numpy.ndarray): Input array\nstart (int): starting index\nsize (int): length to truncate at\nty (WeldType): Type of each element in the input array\n\nReturns:\nA WeldObject representing this computation", "source": "juraj-google-style"}
{"code": "def query_dict_to_string(query):\n    query_params = []\n    for (key, value) in query.items():\n        query_params.append(((key + '=') + value))\n    return '&'.join(query_params)", "docstring": "Convert an OrderedDict to a query string.\n\nArgs:\nquery (obj): The key value object with query params.\n\nReturns:\nstr: The query string.\n\nNote:\nThis method does the same as urllib.parse.urlencode except\nthat it doesn't actually encode the values.", "source": "codesearchnet"}
{"code": "def bin_to_mac(bin, size=6):\n    if (len(bin) != size):\n        raise Exception(('Invalid MAC address: %s' % bin))\n    return ':'.join([binascii.hexlify(o) for o in bin])", "docstring": "Convert 6 bytes into a MAC string.\n\nArgs:\nbin (str): hex string of lenth 6.\n\nReturns:\nstr: String representation of the MAC address in lower case.\n\nRaises:\nException: if ``len(bin)`` is not 6.", "source": "codesearchnet"}
{"code": "def BSearchCeil(a, x, lo=0, hi=None):\n    \n    if len(a) == 0: return -1\n    hi = hi if hi is not None else len(a)\n    pos = bisect_left(a, x, lo, hi)\n    return pos if pos < hi else -1", "docstring": "Returns lowest i such as a[i] >= x, or -1 if x > all elements in a\n\nSo, if x is in between two elements in a, this function will return the\nindex of the higher element, hence \"Ceil\".\n\nArguments:\na -- ordered numeric sequence\nx -- element to search within a\nlo -- lowest index to consider in search\nhi -- highest index to consider in search", "source": "juraj-google-style"}
{"code": "def validate_args(self, qubits: Sequence[Qid]) -> None:\n        \n        if len(qubits) == 0:\n            raise ValueError(\n                \"Applied a gate to an empty set of qubits. Gate: {}\".format(\n                    repr(self)))\n\n        if len(qubits) != self.num_qubits():\n            raise ValueError(\n                'Wrong number of qubits for <{!r}>. '\n                'Expected {} qubits but got <{!r}>.'.format(\n                    self,\n                    self.num_qubits(),\n                    qubits))\n\n        if any([not isinstance(qubit, Qid)\n                for qubit in qubits]):\n            raise ValueError(\n                    'Gate was called with type different than Qid.')", "docstring": "Checks if this gate can be applied to the given qubits.\n\nBy default checks if input is of type Qid and qubit count.\nChild classes can override.\n\nArgs:\nqubits: The collection of qubits to potentially apply the gate to.\n\nThrows:\nValueError: The gate can't be applied to the qubits.", "source": "juraj-google-style"}
{"code": "def update(self, task_name, task_json):\n    r = self.gbdx_connection.put(((self._base_url + '/') + task_name), json=task_json)\n    raise_for_status(r)\n    return r.json()", "docstring": "Updates a GBDX task.\n\nArgs:\ntask_name (str): Task name.\ntask_json (dict): Dictionary representing updated task definition.\n\nReturns:\nDictionary representing the updated task definition.", "source": "codesearchnet"}
{"code": "def find_field(item_list, cond, comparator, target_field):\n    \n    for item in item_list:\n        if comparator(item, cond) and target_field in item:\n            return item[target_field]\n    return None", "docstring": "Finds the value of a field in a dict object that satisfies certain\nconditions.\n\nArgs:\nitem_list: A list of dict objects.\ncond: A param that defines the condition.\ncomparator: A function that checks if an dict satisfies the condition.\ntarget_field: Name of the field whose value to be returned if an item\nsatisfies the condition.\n\nReturns:\nTarget value or None if no item satisfies the condition.", "source": "juraj-google-style"}
{"code": "def new_message_from_header(header):\n    \n    message_type = header.message_type\n    if not isinstance(message_type, Type):\n        try:\n            if isinstance(message_type, str):\n                message_type = Type[message_type]\n            elif isinstance(message_type, int):\n                message_type = Type(message_type)\n        except ValueError:\n            raise ValueError\n\n    message = new_message_from_message_type(message_type)\n    message.header.xid = header.xid\n    message.header.length = header.length\n\n    return message", "docstring": "Given an OF Header, return an empty message of header's message_type.\n\nArgs:\nheader (~pyof.v0x01.common.header.Header): Unpacked OpenFlow Header.\n\nReturns:\nEmpty OpenFlow message of the same type of message_type attribute from\nthe given header.\nThe header attribute of the message will be populated.\n\nRaises:\nKytosUndefinedMessageType: Unkown Message_Type.", "source": "juraj-google-style"}
{"code": "def ParseFileEntry(self, parser_mediator, file_entry):\n    \n    filename = parser_mediator.GetFilename()\n    database = SQLiteDatabase(\n        filename, temporary_directory=parser_mediator.temporary_directory)\n\n    file_object = file_entry.GetFileObject()\n    try:\n      database.Open(file_object)\n\n    except (IOError, ValueError, sqlite3.DatabaseError) as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to open SQLite database with error: {0!s}'.format(exception))\n      file_object.close()\n      return\n\n    database_wal, wal_file_entry = self._OpenDatabaseWithWAL(\n        parser_mediator, file_entry, file_object, filename)\n\n    file_object.close()\n\n    \n    cache = SQLiteCache()\n    try:\n      table_names = frozenset(database.tables)\n\n      for plugin in self._plugins:\n        if not plugin.REQUIRED_TABLES.issubset(table_names):\n          continue\n\n        schema_match = plugin.CheckSchema(database)\n        if plugin.REQUIRES_SCHEMA_MATCH and not schema_match:\n          parser_mediator.ProduceExtractionWarning((\n              'plugin: {0:s} found required tables but not a matching '\n              'schema').format(plugin.NAME))\n          continue\n\n        parser_mediator.SetFileEntry(file_entry)\n        parser_mediator.AddEventAttribute('schema_match', schema_match)\n\n        try:\n          plugin.UpdateChainAndProcess(\n              parser_mediator, cache=cache, database=database,\n              database_wal=database_wal, wal_file_entry=wal_file_entry)\n\n        except Exception as exception:  \n          parser_mediator.ProduceExtractionWarning((\n              'plugin: {0:s} unable to parse SQLite database with error: '\n              '{1!s}').format(plugin.NAME, exception))\n\n        finally:\n          parser_mediator.RemoveEventAttribute('schema_match')\n\n        if not database_wal:\n          continue\n\n        schema_match = plugin.CheckSchema(database)\n\n        parser_mediator.SetFileEntry(wal_file_entry)\n        parser_mediator.AddEventAttribute('schema_match', schema_match)\n\n        try:\n          plugin.UpdateChainAndProcess(\n              parser_mediator, cache=cache, database=database,\n              database_wal=database_wal, wal_file_entry=wal_file_entry)\n\n        except Exception as exception:  \n          parser_mediator.ProduceExtractionWarning((\n              'plugin: {0:s} unable to parse SQLite database and WAL with '\n              'error: {1!s}').format(plugin.NAME, exception))\n\n        finally:\n          parser_mediator.RemoveEventAttribute('schema_match')\n\n    finally:\n      database.Close()", "docstring": "Parses a SQLite database file entry.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nfile_entry (dfvfs.FileEntry): file entry to be parsed.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def preprocess_na(sent, label_type):\n    \n    if label_type == \"phonemes_and_tones\":\n        phonemes = True\n        tones = True\n        tgm = True\n    elif label_type == \"phonemes_and_tones_no_tgm\":\n        phonemes = True\n        tones = True\n        tgm = False\n    elif label_type == \"phonemes\":\n        phonemes = True\n        tones = False\n        tgm = False\n    elif label_type == \"tones\":\n        phonemes = False\n        tones = True\n        tgm = True\n    elif label_type == \"tones_notgm\":\n        phonemes = False\n        tones = True\n        tgm = False\n    else:\n        raise ValueError(\"Unrecognized label type: %s\" % label_type)\n\n    def pop_phoneme(sentence):\n        \n        \n\n        \n        if phonemes:\n            if sentence[:4] in [\"əəə…\", \"mmm…\"]:\n                return sentence[:4], sentence[4:]\n            if sentence.startswith(\"ə…\"):\n                return \"əəə…\", sentence[2:]\n            if sentence.startswith(\"m…\"):\n                return \"mmm…\", sentence[2:]\n            if sentence.startswith(\"mm…\"):\n                return \"mmm…\", sentence[3:]\n\n        \n        if sentence[:3] == \"wæ̃\":\n            if phonemes:\n                return \"w̃æ\", sentence[3:]\n            else:\n                return None, sentence[3:]\n        if sentence[:3] == \"ṽ̩\":\n            if phonemes:\n                return \"ṽ̩\", sentence[3:]\n            else:\n                return None, sentence[3:]\n\n        if sentence[:3] in TRI_PHNS:\n            if phonemes:\n                return sentence[:3], sentence[3:]\n            else:\n                return None, sentence[3:]\n        if sentence[:2] in BI_PHNS:\n            if phonemes:\n                return sentence[:2], sentence[2:]\n            else:\n                return None, sentence[2:]\n        if sentence[:2] == \"˧̩\":\n            return \"˧\", sentence[2:]\n        if sentence[:2] == \"˧̍\":\n            return \"˧\", sentence[2:]\n        if sentence[0] in UNI_PHNS:\n            if phonemes:\n                return sentence[0], sentence[1:]\n            else:\n                return None, sentence[1:]\n        if sentence[:2] in BI_TONES:\n            if tones:\n                return sentence[:2], sentence[2:]\n            else:\n                return None, sentence[2:]\n        if sentence[0] in UNI_TONES:\n            if tones:\n                return sentence[0], sentence[1:]\n            else:\n                return None, sentence[1:]\n        if sentence[0] in MISC_SYMBOLS:\n            \n            return None, sentence[1:]\n        if sentence[0] in BAD_NA_SYMBOLS:\n            return None, sentence[1:]\n        if sentence[0] in PUNC_SYMBOLS:\n            return None, sentence[1:]\n        if sentence[0] in [\"-\", \"ʰ\", \"/\"]:\n            return None, sentence[1:]\n        if sentence[0] in set([\"<\", \">\"]):\n            \n            \n            return None, sentence[1:]\n        if sentence[0] == \"[\":\n            \n            \n            if sentence.find(\"]\") == len(sentence)-1:\n                \n                return None, \"\"\n            else:\n                return None, sentence[sentence.find(\"]\")+1:]\n        if sentence[0] in set([\" \", \"\\t\", \"\\n\"]):\n            \n            \n            return \" \", sentence[1:]\n        if sentence[0] == \"|\" or sentence[0] == \"ǀ\" or sentence[0] == \"◊\":\n            \n            \n            \n            if tgm:\n                return \"|\", sentence[1:]\n            else:\n                return None, sentence[1:]\n        if sentence[0] in \"()\":\n            return None, sentence[1:]\n        print(\"***\" + sentence)\n        raise ValueError(\"Next character not recognized: \" + sentence[:1])\n\n    def filter_for_phonemes(sentence):\n        \n\n        filtered_sentence = []\n        while sentence != \"\":\n            phoneme, sentence = pop_phoneme(sentence)\n            if phoneme != \" \":\n                filtered_sentence.append(phoneme)\n        filtered_sentence = [item for item in filtered_sentence if item != None]\n        return \" \".join(filtered_sentence)\n\n    \n    if \"BEGAIEMENT\" in sent:\n        return \"\"\n    sent = filter_for_phonemes(sent)\n    return sent", "docstring": "Preprocess Na sentences\n\nArgs:\nsent: A sentence\nlabel_type: The type of label provided", "source": "juraj-google-style"}
{"code": "def validate(self, profile):\n        \n\n        ij = self.load_install_json(profile.get('install_json'))\n        print('{}{}Profile: \"{}\".'.format(c.Style.BRIGHT, c.Fore.BLUE, profile.get('profile_name')))\n        for arg in self.profile_settings_args_install_json(ij, None):\n            if profile.get('args', {}).get('app', {}).get(arg) is None:\n                print('{}{}Input \"{}\" not found.'.format(c.Style.BRIGHT, c.Fore.YELLOW, arg))", "docstring": "Check to see if any args are \"missing\" from profile.\n\nValidate all args from install.json are in the profile.  This can be helpful to validate\nthat any new args added to App are included in the profiles.\n\n.. Note:: This method does not work with layout.json Apps.\n\nArgs:\nprofile (dict): The current profile to validate.", "source": "juraj-google-style"}
{"code": "def xmoe2_dense(sz):\n    hparams = mtf_transformer.mtf_transformer_paper_lm(sz)\n    hparams.attention_dropout = 0.0\n    hparams.relu_dropout = 0.0\n    hparams.layer_prepostprocess_dropout = 0.0\n    hparams.max_length = 1024\n    hparams.batch_size = 128\n    hparams.learning_rate_schedule = 'rsqrt_decay*linear_decay'\n    hparams.learning_rate_decay_steps = 65536\n    hparams.layout = 'batch:batch;vocab:model;d_ff:model;heads:model'\n    hparams.mesh_shape = 'batch:32'\n    return hparams", "docstring": "Series of architectural experiments on language modeling.\n\nLarger models than the ones above.\n\nAll models are trained on sequences of 1024 tokens.\n\nWe assume infinite training data, so no dropout necessary.\nWe process 2^36 tokens in training = 524288 steps at batch size 128\n\nTODO(noam): find a large enough dataset for these experiments.\n\nYou can use languagemodel_wiki_noref_v32k_l1k, but this is too small,\n(1 epoch = ~46000 steps) so training will cover about 11 epochs.\n\nNote: configurations and code are likely to change without notice.\n\nRun on TPU 4x4 for 524288 steps unless otherwise indicated.\n\nArgs:\nsz: an integer\n\nReturns:\na hparams", "source": "codesearchnet"}
{"code": "def filter_values(cls, part_info):\n        \n        \n        filtered = []\n        for info_list in cls.filter_parts(part_info).values():\n            filtered += info_list\n        return filtered", "docstring": "Filter the part_info dict list looking for instances of our class\n\nArgs:\npart_info (dict): {part_name: [Info] or None} as returned from\nController.run_hook()\n\nReturns:\nlist: [info] where info is a subclass of cls", "source": "juraj-google-style"}
{"code": "def DeserializeExclusiveData(self, reader):\n        \n\n        self.Type = TransactionType.IssueTransaction\n\n        if self.Version > 1:\n            raise Exception('Invalid TX Type')", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def export_analytics_data_to_csv(data, output_folder, result_info_key, identifier_keys):\n    \n    workbook = create_excel_workbook(data, result_info_key, identifier_keys)\n\n    suffix = '.csv'\n\n    if not os.path.exists(output_folder):\n        os.makedirs(output_folder)\n\n    for worksheet in workbook.worksheets:\n        file_name = utilities.convert_title_to_snake_case(worksheet.title)\n\n        file_path = os.path.join(output_folder, file_name + suffix)\n\n        mode = 'w'\n        if sys.version_info[0] < 3:\n            mode = 'wb'\n        with io.open(file_path, mode) as output_file:\n            csv_writer = csv.writer(output_file)\n            for row in worksheet.rows:\n                csv_writer.writerow([cell.value for cell in row])\n\n    print('Saved CSV files to {}'.format(output_folder))", "docstring": "Creates CSV files containing data returned by the Analytics API.\nCreates one file per requested endpoint and saves it into the\nspecified output_folder\n\nArgs:\ndata: Analytics API data as a list of dicts\noutput_folder: Path to a folder to save the CSV files into", "source": "juraj-google-style"}
{"code": "def set_margin(self, top=40, bottom=30, left=50, right=10, buffer_size=8):\n    self.set_integer('top', top)\n    self.set_integer('bottom', bottom)\n    self.set_integer('left', left)\n    self.set_integer('right', right)\n    self.set_integer('buffer', buffer_size)", "docstring": "Set margin of the chart.\n\nArgs:\ntop (int): size of top margin in pixels.\nbottom (int): size of bottom margin in pixels.\nleft (int): size of left margin in pixels.\nright (int): size of right margin in pixels.\nbuffer_size (int): buffer size in pixels between the chart and margins.", "source": "codesearchnet"}
{"code": "def napalm_get(task: Task, getters: List[str], getters_options: GetterOptionsDict=None, **kwargs: Any) -> Result:\n    device = task.host.get_connection('napalm', task.nornir.config)\n    getters_options = (getters_options or {})\n    if isinstance(getters, str):\n        getters = [getters]\n    result = {}\n    for g in getters:\n        options = copy.deepcopy(kwargs)\n        options.update(getters_options.get(g, {}))\n        getter = (g if g.startswith('get_') else 'get_{}'.format(g))\n        method = getattr(device, getter)\n        result[g] = method(**options)\n    return Result(host=task.host, result=result)", "docstring": "Gather information from network devices using napalm\n\nArguments:\ngetters: getters to use\ngetters_options (dict of dicts): When passing multiple getters you\npass a dictionary where the outer key is the getter name\nand the included dictionary represents the options to pass\nto the getter\n**kwargs: will be passed as they are to the getters\n\nExamples:\n\nSimple example::\n\n> nr.run(task=napalm_get,\n>        getters=[\"interfaces\", \"facts\"])\n\nPassing options using ``**kwargs``::\n\n> nr.run(task=napalm_get,\n>        getters=[\"config\"],\n>        retrieve=\"all\")\n\nPassing options using ``getters_options``::\n\n> nr.run(task=napalm_get,\n>        getters=[\"config\", \"interfaces\"],\n>        getters_options={\"config\": {\"retrieve\": \"all\"}})\n\nReturns:\nResult object with the following attributes set:\n* result (``dict``): dictionary with the result of the getter", "source": "codesearchnet"}
{"code": "def tag_match(self, tags=None):\n        \n        if 'tags' not in self.database.collection_names():\n            print 'Warning: Searching on non-existance tags collection'\n            return None\n        if not tags:\n            cursor = self.database['tags'].find({}, {'_id':0, 'md5':1})\n        else:\n            cursor = self.database['tags'].find({'tags': {'$in': tags}}, {'_id':0, 'md5':1})\n\n        \n        \n        tag_md5s = set([item['md5'] for item in cursor])\n        sample_md5s = set(item['md5'] for item in self.database['samples'].find({}, {'_id':0, 'md5':1}))\n        return list(tag_md5s.intersection(sample_md5s))", "docstring": "List all samples that match the tags or all if tags are not specified.\n\nArgs:\ntags: Match samples against these tags (or all if not specified)\n\nReturns:\nList of the md5s for the matching samples", "source": "juraj-google-style"}
{"code": "def delete(self, delete_contents=False):\n    if (not self.exists()):\n        raise Exception(('Cannot delete non-existent dataset %s' % self._full_name))\n    try:\n        self._api.datasets_delete(self._name_parts, delete_contents=delete_contents)\n    except Exception as e:\n        raise e\n    self._info = None\n    return None", "docstring": "Issues a request to delete the dataset.\n\nArgs:\ndelete_contents: if True, any tables and views in the dataset will be deleted. If False\nand the dataset is non-empty an exception will be raised.\nReturns:\nNone on success.\nRaises:\nException if the delete fails (including if table was nonexistent).", "source": "codesearchnet"}
{"code": "def symmetric_difference_update(self, other):\n    other = self._as_multiset(other)\n    elements = (set(self.distinct_elements()) | set(other.distinct_elements()))\n    for element in elements:\n        multiplicity = self[element]\n        other_count = other[element]\n        self[element] = ((multiplicity - other_count) if (multiplicity > other_count) else (other_count - multiplicity))", "docstring": "r\"\"\"Update the multiset to contain only elements in either this multiset or the other but not both.\n\n>>> ms = Multiset('aab')\n>>> ms.symmetric_difference_update('abc')\n>>> sorted(ms)\n['a', 'c']\n\nYou can also use the ``^=`` operator for the same effect. However, the operator version\nwill only accept a set as other operator, not any iterable, to avoid errors.\n\n>>> ms = Multiset('aabbbc')\n>>> ms ^= Multiset('abd')\n>>> sorted(ms)\n['a', 'b', 'b', 'c', 'd']\n\nFor a variant of the operation which does not modify the multiset, but returns a new\nmultiset instead see :meth:`symmetric_difference`.\n\nArgs:\nother: The other set to take the symmetric difference with. Can also be any :class:`~typing.Iterable`\\[~T]\nor :class:`~typing.Mapping`\\[~T, :class:`int`] which are then converted to :class:`Multiset`\\[~T].", "source": "codesearchnet"}
{"code": "def get_inventory_str(self, keys=None):\n        \n        inventory = self.get_inventory(keys)\n        lines = []\n        for name, hosts in inventory.viewitems():\n            lines.append('[{name}]'.format(name=name))\n            for host in sorted(hosts):\n                lines.append(host)\n\n        return '\\n'.join(lines)", "docstring": "Convert a dict generated by ansible.LagoAnsible.get_inventory\nto an INI-like file.\n\nArgs:\nkeys (list of str): Path to the keys that will be used to\ncreate groups.\n\nReturns:\nstr: INI-like Ansible inventory", "source": "juraj-google-style"}
{"code": "def ver(self, value):\n        \n        if value == self._defaults['ver'] and 'ver' in self._values:\n            del self._values['ver']\n        else:\n            self._values['ver'] = value", "docstring": "The ver property.\n\nArgs:\nvalue (int). the property value.", "source": "juraj-google-style"}
{"code": "def _circuit_as_layers(circuit: circuits.Circuit,\n                       grouping: _QubitGrouping) -> List[_TransformsThenCzs]:\n    \n    frontier = {q: 0 for q in circuit.all_qubits()}\n\n    layers = []\n    while True:\n        \n        any_group_matrices = False\n        group_matrices = []\n        for g in grouping.groups:\n            \n            start_frontier = {q: frontier[q] for q in g}\n            end_frontier = circuit.reachable_frontier_from(start_frontier)\n            mergeable_ops = circuit.findall_operations_between(start_frontier,\n                                                               end_frontier)\n\n            \n            for q, v in end_frontier.items():\n                frontier[q] = v\n\n            \n            group_matrix = np.eye(1 << len(g)).reshape((2, 2) * len(g))\n            if mergeable_ops:\n                any_group_matrices = True\n            for _, op in mergeable_ops:\n                group_matrix = linalg.targeted_left_multiply(\n                    left_matrix=protocols.unitary(op).reshape(\n                        (2, 2) * len(op.qubits)),\n                    right_target=group_matrix,\n                    target_axes=[grouping.loc(q)[1] for q in op.qubits])\n            group_matrices.append(np.transpose(group_matrix.reshape(\n                1 << len(g), 1 << len(g))))\n\n        \n        end_frontier = circuit.reachable_frontier_from(\n            frontier,\n            is_blocker=lambda op: grouping.all_in_same_group(*op.qubits))\n        cz_ops = circuit.findall_operations_between(frontier, end_frontier)\n\n        \n        frontier = end_frontier\n\n        \n        cz_indices = []\n        for _, cz in cz_ops:\n            a, b = cz.qubits\n            assert cz == ops.CZ(a, b)\n            cz_indices.append((grouping.ind(a), grouping.ind(b)))\n\n        \n        if not any_group_matrices and not cz_indices:\n            break\n        layer = _TransformsThenCzs(group_matrices=group_matrices,\n                                   cz_indices=cz_indices)\n        layers.append(layer)\n\n    \n    assert frontier == {q: len(circuit) for q in circuit.all_qubits()}\n\n    return layers", "docstring": "Transforms a circuit into a series of GroupMatrix+CZ layers.\n\nArgs:\ncircuit: The circuit to transform.\ngrouping: How the circuit's qubits are combined into groups.\n\nReturns:\nA list of layers. Each layer has a matrix to apply to each group of\nqubits, and a list of CZs to apply to pairs of qubits crossing\nbetween groups.", "source": "juraj-google-style"}
{"code": "def _CheckCompositeMap(self, data_type_definition):\n    if (not data_type_definition):\n        raise errors.FormatError('Missing data type definition')\n    members = getattr(data_type_definition, 'members', None)\n    if (not members):\n        raise errors.FormatError('Invalid data type definition missing members')\n    is_composite_map = False\n    last_member_byte_order = data_type_definition.byte_order\n    for member_definition in members:\n        if member_definition.IsComposite():\n            is_composite_map = True\n            break\n        if ((last_member_byte_order != definitions.BYTE_ORDER_NATIVE) and (member_definition.byte_order != definitions.BYTE_ORDER_NATIVE) and (last_member_byte_order != member_definition.byte_order)):\n            is_composite_map = True\n            break\n        last_member_byte_order = member_definition.byte_order\n    return is_composite_map", "docstring": "Determines if the data type definition needs a composite map.\n\nArgs:\ndata_type_definition (DataTypeDefinition): structure data type definition.\n\nReturns:\nbool: True if a composite map is needed, False otherwise.\n\nRaises:\nFormatError: if a composite map is needed cannot be determined from the\ndata type definition.", "source": "codesearchnet"}
{"code": "def cluster_sites(mol, tol, give_only_index=False):\n    \n    \n    \n    dists = [[np.linalg.norm(site.coords), 0] for site in mol]\n    import scipy.cluster as spcluster\n    f = spcluster.hierarchy.fclusterdata(dists, tol, criterion='distance')\n    clustered_dists = defaultdict(list)\n    for i, site in enumerate(mol):\n        clustered_dists[f[i]].append(dists[i])\n    avg_dist = {label: np.mean(val) for label, val in clustered_dists.items()}\n    clustered_sites = defaultdict(list)\n    origin_site = None\n    for i, site in enumerate(mol):\n        if avg_dist[f[i]] < tol:\n            if give_only_index:\n                origin_site = i\n            else:\n                origin_site = site\n        else:\n            if give_only_index:\n                clustered_sites[\n                    (avg_dist[f[i]], site.species)].append(i)\n            else:\n                clustered_sites[\n                    (avg_dist[f[i]], site.species)].append(site)\n    return origin_site, clustered_sites", "docstring": "Cluster sites based on distance and species type.\n\nArgs:\nmol (Molecule): Molecule **with origin at center of mass**.\ntol (float): Tolerance to use.\n\nReturns:\n(origin_site, clustered_sites): origin_site is a site at the center\nof mass (None if there are no origin atoms). clustered_sites is a\ndict of {(avg_dist, species_and_occu): [list of sites]}", "source": "juraj-google-style"}
{"code": "def call(self, input_features=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_position_ids=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    use_cache = use_cache if use_cache is not None else self.config.use_cache\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    if encoder_outputs is None:\n        encoder_outputs = self.encoder(input_features, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n    elif return_dict and (not isinstance(encoder_outputs, TFBaseModelOutput)):\n        encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)\n    decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n    if not return_dict:\n        return decoder_outputs + encoder_outputs\n    return TFSeq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)", "docstring": "Returns:\n\nExample:\n\n```python\n>>> import tensorflow as tf\n>>> from transformers import TFWhisperModel, AutoFeatureExtractor\n>>> from datasets import load_dataset\n\n>>> model = TFWhisperModel.from_pretrained(\"openai/whisper-base\")\n>>> feature_extractor = AutoFeatureExtractor.from_pretrained(\"openai/whisper-base\")\n>>> ds = load_dataset(\"hf-internal-testing/librispeech_asr_dummy\", \"clean\", split=\"validation\")\n>>> inputs = feature_extractor(ds[0][\"audio\"][\"array\"], return_tensors=\"tf\")\n>>> input_features = inputs.input_features\n>>> decoder_input_ids = tf.convert_to_tensor([[1, 1]]) * model.config.decoder_start_token_id\n>>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state\n>>> list(last_hidden_state.shape)\n[1, 2, 512]\n```", "source": "github-repos"}
{"code": "def conv_output_length(input_length, filter_size, padding, stride, dilation=1):\n    if input_length is None:\n        return None\n    assert padding in {'same', 'valid', 'full', 'causal'}\n    dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)\n    if padding in ['same', 'causal']:\n        output_length = input_length\n    elif padding == 'valid':\n        output_length = input_length - dilated_filter_size + 1\n    elif padding == 'full':\n        output_length = input_length + dilated_filter_size - 1\n    return (output_length + stride - 1)", "docstring": "Determines output length of a convolution given input length.\n\nArgs:\ninput_length: integer.\nfilter_size: integer.\npadding: one of \"same\", \"valid\", \"full\", \"causal\"\nstride: integer.\ndilation: dilation rate, integer.\n\nReturns:\nThe output length (integer).", "source": "github-repos"}
{"code": "def __call__(self, *args, **kwargs):\n    if not self._verified:\n        model = self._get_func()\n        concrete_func = model.get_concrete_function(*args, **kwargs)\n        converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func], model)\n        if self._converter_target_spec is not None:\n            converter.target_spec = self._converter_target_spec\n        if self._converter_allow_custom_ops is not None:\n            converter.allow_custom_ops = self._converter_allow_custom_ops\n        try:\n            converter.convert()\n        except convert.ConverterError as err:\n            self._decode_error(err)\n        finally:\n            self._verified = True\n    return self._get_func()(*args, **kwargs)", "docstring": "Calls decorated function object.\n\nAlso verifies if the function is compatible with TFLite.\n\nReturns:\nA execution result of the decorated function.", "source": "github-repos"}
{"code": "def _GetShortFlags(flags):\n    short_flags = [f[0] for f in flags]\n    short_flag_counts = collections.Counter(short_flags)\n    return [v for v in short_flags if short_flag_counts[v] == 1]", "docstring": "Gets a list of single-character flags that uniquely identify a flag.\n\nArgs:\nflags: list of strings representing flags\n\nReturns:\nList of single character short flags,\nwhere the character occurred at the start of a flag once.", "source": "github-repos"}
{"code": "def run(\n        self,\n        num_episodes=-1,\n        max_episode_timesteps=-1,\n        episode_finished=None,\n        summary_report=None,\n        summary_interval=0,\n        num_timesteps=None,\n        deterministic=False,\n        episodes=None,\n        max_timesteps=None,\n        testing=False,\n        sleep=None\n    ):\n        \n\n        \n        if episodes is not None:\n            num_episodes = episodes\n            warnings.warn(\"WARNING: `episodes` parameter is deprecated, use `num_episodes` instead.\",\n                          category=DeprecationWarning)\n        assert isinstance(num_episodes, int)\n        \n        if max_timesteps is not None:\n            max_episode_timesteps = max_timesteps\n            warnings.warn(\"WARNING: `max_timesteps` parameter is deprecated, use `max_episode_timesteps` instead.\",\n                          category=DeprecationWarning)\n        assert isinstance(max_episode_timesteps, int)\n\n        if summary_report is not None:\n            warnings.warn(\"WARNING: `summary_report` parameter is deprecated, use `episode_finished` callback \"\n                          \"instead to generate summaries every n episodes.\",\n                          category=DeprecationWarning)\n\n        self.reset()\n\n        \n        self.global_episode = 0\n        self.global_timestep = 0\n        self.should_stop = False\n\n        \n        threads = [threading.Thread(target=self._run_single, args=(t, self.agent[t], self.environment[t],),\n                                    kwargs={\"deterministic\": deterministic,\n                                            \"max_episode_timesteps\": max_episode_timesteps,\n                                            \"episode_finished\": episode_finished,\n                                            \"testing\": testing,\n                                            \"sleep\": sleep})\n                   for t in range(len(self.agent))]\n\n        \n        self.start_time = time.time()\n        [t.start() for t in threads]\n\n        \n        try:\n            next_summary = 0\n            next_save = 0 if self.save_frequency_unit != \"s\" else time.time()\n            while any([t.is_alive() for t in threads]) and self.global_episode < num_episodes or num_episodes == -1:\n                self.time = time.time()\n\n                \n                if summary_report is not None and self.global_episode > next_summary:\n                    summary_report(self)\n                    next_summary += summary_interval\n\n                if self.save_path and self.save_frequency is not None:\n                    do_save = True\n                    current = None\n                    if self.save_frequency_unit == \"e\" and self.global_episode > next_save:\n                        current = self.global_episode\n                    elif self.save_frequency_unit == \"s\" and self.time > next_save:\n                        current = self.time\n                    elif self.save_frequency_unit == \"t\" and self.global_timestep > next_save:\n                        current = self.global_timestep\n                    else:\n                        do_save = False\n\n                    if do_save:\n                        self.agent[0].save_model(self.save_path)\n                        \n                        while next_save < current:\n                            next_save += self.save_frequency\n                time.sleep(1)\n\n        except KeyboardInterrupt:\n            print('Keyboard interrupt, sending stop command to threads')\n\n        self.should_stop = True\n\n        \n        [t.join() for t in threads]\n        print('All threads stopped')", "docstring": "Executes this runner by starting all Agents in parallel (each one in one thread).\n\nArgs:\nepisodes (int): Deprecated; see num_episodes.\nmax_timesteps (int): Deprecated; see max_episode_timesteps.", "source": "juraj-google-style"}
{"code": "def easeOutBack(n, s=1.70158):\n    \n    _checkRange(n)\n    n = n - 1\n    return n * n * ((s + 1) * n + s) + 1", "docstring": "A tween function that overshoots the destination a little and then backs into the destination.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "juraj-google-style"}
{"code": "def compare_version(a, b):\n  \n  aa = string.split(a, \".\")\n  bb = string.split(b, \".\")\n  for i in range(0, 4):\n    if aa[i] != bb[i]:\n      return cmp(int(aa[i]), int(bb[i]))\n  return 0", "docstring": "Compare two version number strings of the form W.X.Y.Z.\n\nThe numbers are compared most-significant to least-significant.\nFor example, 12.345.67.89 > 2.987.88.99.\n\nArgs:\na: First version number string to compare\nb: Second version number string to compare\n\nReturns:\n0 if the numbers are identical, a positive number if 'a' is larger, and\na negative number if 'b' is larger.", "source": "juraj-google-style"}
{"code": "def log_jwt_dict_info(log, msg_str, jwt_dict):\n    \n    d = ts_to_str(jwt_dict)\n    \n    log_list = [(b, d.pop(a)) for a, b, c in CLAIM_LIST if a in d] + [\n        (k, d[k]) for k in sorted(d)\n    ]\n    list(\n        map(\n            log,\n            ['{}:'.format(msg_str)] + ['  {}: {}'.format(k, v) for k, v in log_list],\n        )\n    )", "docstring": "Dump JWT to log.\n\nArgs:\nlog: Logger\nLogger to which to write the message.\n\nmsg_str: str\nA message to write to the log before the JWT values.\n\njwt_dict: dict\nJWT containing values to log.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def histogram_pb(tag, data, buckets=None, description=None):\n    bucket_count = (DEFAULT_BUCKET_COUNT if (buckets is None) else buckets)\n    data = np.array(data).flatten().astype(float)\n    if (data.size == 0):\n        buckets = np.array([]).reshape((0, 3))\n    else:\n        min_ = np.min(data)\n        max_ = np.max(data)\n        range_ = (max_ - min_)\n        if (range_ == 0):\n            center = min_\n            buckets = np.array([[(center - 0.5), (center + 0.5), float(data.size)]])\n        else:\n            bucket_width = (range_ / bucket_count)\n            offsets = (data - min_)\n            bucket_indices = np.floor((offsets / bucket_width)).astype(int)\n            clamped_indices = np.minimum(bucket_indices, (bucket_count - 1))\n            one_hots = (np.array([clamped_indices]).transpose() == np.arange(0, bucket_count))\n            assert (one_hots.shape == (data.size, bucket_count)), (one_hots.shape, (data.size, bucket_count))\n            bucket_counts = np.sum(one_hots, axis=0)\n            edges = np.linspace(min_, max_, (bucket_count + 1))\n            left_edges = edges[:(- 1)]\n            right_edges = edges[1:]\n            buckets = np.array([left_edges, right_edges, bucket_counts]).transpose()\n    tensor = tensor_util.make_tensor_proto(buckets, dtype=np.float64)\n    summary_metadata = metadata.create_summary_metadata(display_name=None, description=description)\n    summary = summary_pb2.Summary()\n    summary.value.add(tag=tag, metadata=summary_metadata, tensor=tensor)\n    return summary", "docstring": "Create a histogram summary protobuf.\n\nArguments:\ntag: String tag for the summary.\ndata: A `np.array` or array-like form of any shape. Must have type\ncastable to `float`.\nbuckets: Optional positive `int`. The output will have this\nmany buckets, except in two edge cases. If there is no data, then\nthere are no buckets. If there is data but all points have the\nsame value, then there is one bucket whose left and right\nendpoints are the same.\ndescription: Optional long-form description for this summary, as a\n`str`. Markdown is supported. Defaults to empty.\n\nReturns:\nA `summary_pb2.Summary` protobuf object.", "source": "codesearchnet"}
{"code": "def tournament_name2number(self, name):\n    tournaments = self.get_tournaments()\n    d = {t['name']: t['tournament'] for t in tournaments}\n    return d.get(name, None)", "docstring": "Translate tournament name to tournament number.\n\nArgs:\nname (str): tournament name to translate\n\nReturns:\nnumber (int): number of the tournament or `None` if unknown.\n\nExamples:\n>>> NumerAPI().tournament_name2number('delta')\n4\n>>> NumerAPI().tournament_name2number('foo')\nNone", "source": "codesearchnet"}
{"code": "def GetEntries(self, parser_mediator, match=None, **unused_kwargs):\n    \n    if 'RememberedNetworks' not in match:\n      return\n\n    for wifi in match['RememberedNetworks']:\n      ssid = wifi.get('SSIDString', 'UNKNOWN_SSID')\n      security_type = wifi.get('SecurityType', 'UNKNOWN_SECURITY_TYPE')\n\n      event_data = plist_event.PlistTimeEventData()\n      event_data.desc = (\n          '[WiFi] Connected to network: <{0:s}> using security {1:s}').format(\n              ssid, security_type)\n      event_data.key = 'item'\n      event_data.root = '/RememberedNetworks'\n\n      datetime_value = wifi.get('LastConnected', None)\n      if datetime_value:\n        event = time_events.PythonDatetimeEvent(\n            datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n\n      else:\n        date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)\n\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts relevant Airport entries.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmatch (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.", "source": "juraj-google-style"}
{"code": "def predecesors_pattern(element, root):\n\n    def is_root_container(el):\n        return (el.parent.parent.getTagName() == '')\n    if ((not element.parent) or (not element.parent.parent) or is_root_container(element)):\n        return []\n    trail = [[element.parent.parent.getTagName(), _params_or_none(element.parent.parent.params)], [element.parent.getTagName(), _params_or_none(element.parent.params)], [element.getTagName(), _params_or_none(element.params)]]\n    match = root.match(*trail)\n    if (element in match):\n        return [PathCall('match', match.index(element), trail)]", "docstring": "Look for `element` by its predecesors.\n\nArgs:\nelement (obj): HTMLElement instance of the object you are looking for.\nroot (obj): Root of the `DOM`.\n\nReturns:\nlist: ``[PathCall()]`` - list with one :class:`PathCall` object (to \\\nallow use with ``.extend(predecesors_pattern())``).", "source": "codesearchnet"}
{"code": "def _read_message(self):\n    line = self._rfile.readline()\n    if (not line):\n        return None\n    content_length = self._content_length(line)\n    while (line and line.strip()):\n        line = self._rfile.readline()\n    if (not line):\n        return None\n    return self._rfile.read(content_length)", "docstring": "Reads the contents of a message.\n\nReturns:\nbody of message if parsable else None", "source": "codesearchnet"}
{"code": "def get_signatures_with_results(vcs):\n    results_dir = os.path.join(vcs.private_dir(), 'results')\n    if (not os.path.exists(results_dir)):\n        return []\n    rel_paths = os.listdir(results_dir)\n    return [p for p in rel_paths if os.path.isdir(os.path.join(results_dir, p))]", "docstring": "Returns the list of signatures for which test results are saved.\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\n\nReturns:\nList[str]", "source": "codesearchnet"}
{"code": "def __init__(self, object_type=None, attributes=None):\n        \n        super(ObjectDefaults, self).__init__(tag=enums.Tags.OBJECT_DEFAULTS)\n\n        self._object_type = None\n        self._attributes = None\n\n        self.object_type = object_type\n        self.attributes = attributes", "docstring": "Construct an ObjectDefaults structure.\n\nArgs:\nobject_type (enum): An ObjectType enumeration identifying the type\nto which the defaults pertain. Optional, defaults to None.\nRequired for read/write.\nattributes (structure): An Attributes structure containing\nattribute values that are defaults for an object type.\nOptional, defaults to None. Required for read/write.", "source": "juraj-google-style"}
{"code": "class AsDict(AsSideInput):\n\n    @staticmethod\n    def _from_runtime_iterable(it, options):\n        return dict(it)\n\n    def _side_input_data(self) -> SideInputData:\n        return SideInputData(common_urns.side_inputs.ITERABLE.urn, self._window_mapping_fn, dict)", "docstring": "Marker specifying a PCollection to be used as an indexable side input.\n\nIntended for use in side-argument specification---the same places where\nAsSingleton and AsIter are used, but returns an interface that allows\nkey lookup.\n\nArgs:\npcoll: Input pcollection. All elements should be key-value pairs (i.e.\n2-tuples) with unique keys.\n\nReturns:\nAn AsDict-wrapper around a PCollection whose one element is a dict with\nentries for uniquely-keyed pairs in pcoll.", "source": "github-repos"}
{"code": "def GetDevicePath(device_handle):\n  \n  \n  io_service_obj = iokit.IOHIDDeviceGetService(device_handle)\n  str_buffer = ctypes.create_string_buffer(DEVICE_PATH_BUFFER_SIZE)\n  iokit.IORegistryEntryGetPath(io_service_obj, K_IO_SERVICE_PLANE, str_buffer)\n\n  return str_buffer.value", "docstring": "Obtains the unique path for the device.\n\nArgs:\ndevice_handle: reference to the device\n\nReturns:\nA unique path for the device, obtained from the IO Registry", "source": "juraj-google-style"}
{"code": "def _get_path_for_op_id(self, id: str) -> Optional[str]:\n    for (path_key, path_value) in self._get_spec()['paths'].items():\n        for method in self.METHODS:\n            if (method in path_value):\n                if (self.OPERATION_ID_KEY in path_value[method]):\n                    if (path_value[method][self.OPERATION_ID_KEY] == id):\n                        return path_key\n    return None", "docstring": "Searches the spec for a path matching the operation id.\n\nArgs:\nid: operation id\n\nReturns:\npath to the endpoint, or None if not found", "source": "codesearchnet"}
{"code": "def _update_linear_bucket_count(a_float, dist):\n    \n    buckets = dist.linearBuckets\n    if buckets is None:\n        raise ValueError(_BAD_UNSET_BUCKETS % (u'linear buckets'))\n    bucket_counts = dist.bucketCounts\n    num_finite_buckets = buckets.numFiniteBuckets\n    if len(bucket_counts) < num_finite_buckets + 2:\n        raise ValueError(_BAD_LOW_BUCKET_COUNT)\n    width = buckets.width\n    lower = buckets.offset\n    upper = lower + (num_finite_buckets * width)\n    if a_float < lower:\n        index = 0\n    elif a_float >= upper:\n        index = num_finite_buckets + 1\n    else:\n        index = 1 + int(((a_float - lower) / width))\n    bucket_counts[index] += 1\n    _logger.debug(u'upper:%f, lower:%f, width:%f, sample:%f, index:%d',\n                  upper, lower, width, a_float, index)", "docstring": "Adds `a_float` to `dist`, updating the its linear buckets.\n\nArgs:\na_float (float): a new value\ndist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):\nthe Distribution being updated\n\nRaises:\nValueError: if `dist` does not already have linear buckets defined\nValueError: if there are not enough bucket count fields in `dist`", "source": "juraj-google-style"}
{"code": "def learn(self, features, labels):\n    labels = np.ravel(labels)\n    self.__learn_labels(labels)\n    if (len(labels) == 0):\n        return\n    labels = self.labels.transform(labels)\n    if ((self.feature_length > 0) and hasattr(self.clf, 'partial_fit')):\n        self.clf = self.clf.partial_fit(features, labels)\n    else:\n        self.clf = self.clf.fit(features, labels)\n        self.feature_length = len(features[0])", "docstring": "Fits the classifier\n\nIf it's state is empty, the classifier is fitted, if not\nthe classifier is partially fitted.\nSee sklearn's SGDClassifier fit and partial_fit methods.\n\nArgs:\nfeatures (:obj:`list` of :obj:`list` of :obj:`float`)\nlabels (:obj:`list` of :obj:`str`): Labels for each set of features.\nNew features are learnt.", "source": "codesearchnet"}
{"code": "def copy(reader, writer, start, stop, insertLocation=None, tsCol=None):\n  \n  assert stop >= start\n  startRows = []\n  copyRows = []\n  ts = None\n  inc = None\n  if tsCol is None:\n    tsCol = reader.getTimestampFieldIdx()\n  for i, row in enumerate(reader):\n    \n    if ts is None:\n      ts = row[tsCol]\n    elif inc is None:\n      inc = row[tsCol] - ts\n    \n    if i >= start and i <= stop:\n      copyRows.append(row)\n    startRows.append(row)\n  \n  if insertLocation is None:\n    insertLocation = stop + 1\n  startRows[insertLocation:insertLocation] = copyRows\n  \n  for row in startRows:\n    row[tsCol] = ts\n    writer.appendRecord(row)\n    ts += inc", "docstring": "Copies a range of values to a new location in the data set.\n\nArgs:\nreader: A FileRecordStream object with input data.\nwriter: A FileRecordStream object to write output data to.\nstart: The first row in the range to copy.\nstop: The last row in the range to copy.\ninsertLocation: The location to insert the copied range. If not specified,\nthe range is inserted immediately following itself.", "source": "juraj-google-style"}
{"code": "def longest_one_seg_prefix(self, word):\n        \n        match = self.seg_regex.match(word)\n        if match:\n            return match.group(0)\n        else:\n            return ''", "docstring": "Return longest IPA Unicode prefix of `word`\n\nArgs:\nword (unicode): word as IPA string\n\nReturns:\nunicode: longest single-segment prefix of `word`", "source": "juraj-google-style"}
{"code": "def __init__(self, data=None):\n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        if data is None:\n            data = {}\n        self._data = data\n        self._len = 0", "docstring": "Instantiate the histogram.\n\nArgs:\ndata (Mapping[str, int]): The data strucure to be used to store\nthe underlying data. The default is an empty dictionary.\nThis can be set to a dictionary-like object if required\n(for example, if a special object is needed for\nconcurrency reasons).", "source": "juraj-google-style"}
{"code": "def offset(self, num_to_skip):\n        \n        query = query_mod.Query(self)\n        return query.offset(num_to_skip)", "docstring": "Skip to an offset in a query with this collection as parent.\n\nSee\n:meth:`~.firestore_v1beta1.query.Query.offset` for\nmore information on this method.\n\nArgs:\nnum_to_skip (int): The number of results to skip at the beginning\nof query results. (Must be non-negative.)\n\nReturns:\n~.firestore_v1beta1.query.Query: An offset query.", "source": "juraj-google-style"}
{"code": "def add_middleware(middleware: EFBMiddleware):\n    global middlewares\n    if isinstance(middleware, EFBMiddleware):\n        middlewares.append(middleware)\n    else:\n        raise TypeError('Middleware instance is expected')", "docstring": "Register a middleware with the coordinator.\n\nArgs:\nmiddleware (EFBMiddleware): Middleware to register", "source": "codesearchnet"}
{"code": "def _TerminateProcess(self, process):\n    \n    pid = process.pid\n    logger.warning('Terminating process: (PID: {0:d}).'.format(pid))\n    process.terminate()\n\n    \n    process.join(timeout=self._PROCESS_JOIN_TIMEOUT)\n\n    if process.is_alive():\n      logger.warning('Killing process: (PID: {0:d}).'.format(pid))\n      self._KillProcess(pid)", "docstring": "Terminate a process.\n\nArgs:\nprocess (MultiProcessBaseProcess): process to terminate.", "source": "juraj-google-style"}
{"code": "def create_cloudwatch_event(app_name, env, region, rules):\n    \n    session = boto3.Session(profile_name=env, region_name=region)\n    cloudwatch_client = session.client('events')\n\n    rule_name = rules.get('rule_name')\n    schedule = rules.get('schedule')\n    rule_description = rules.get('rule_description')\n    json_input = rules.get('json_input', {})\n\n    if schedule is None:\n        LOG.critical('Schedule is required and no schedule is defined!')\n        raise InvalidEventConfiguration('Schedule is required and no schedule is defined!')\n\n    if rule_name is None:\n        LOG.critical('Rule name is required and no rule_name is defined!')\n        raise InvalidEventConfiguration('Rule name is required and no rule_name is defined!')\n    else:\n        LOG.info('%s and %s', app_name, rule_name)\n        rule_name = \"{}_{}\".format(app_name, rule_name.replace(' ', '_'))\n\n    if rule_description is None:\n        rule_description = \"{} - {}\".format(app_name, rule_name)\n\n    lambda_arn = get_lambda_arn(app=app_name, account=env, region=region)\n\n    \n    account_id = get_env_credential(env=env)['accountId']\n    principal = \"events.amazonaws.com\"\n    statement_id = '{}_cloudwatch_{}'.format(app_name, rule_name)\n    source_arn = 'arn:aws:events:{}:{}:rule/{}'.format(region, account_id, rule_name)\n    add_lambda_permissions(\n        function=lambda_arn,\n        statement_id=statement_id,\n        action='lambda:InvokeFunction',\n        principal=principal,\n        source_arn=source_arn,\n        env=env,\n        region=region, )\n\n    \n    cloudwatch_client.put_rule(\n        Name=rule_name,\n        ScheduleExpression=schedule,\n        State='ENABLED',\n        Description=rule_description, )\n\n    targets = []\n    \n    json_payload = '{}'.format(json.dumps(json_input))\n\n    target = {\n        \"Id\": app_name,\n        \"Arn\": lambda_arn,\n        \"Input\": json_payload,\n    }\n\n    targets.append(target)\n\n    put_targets_response = cloudwatch_client.put_targets(Rule=rule_name, Targets=targets)\n    LOG.debug('Cloudwatch put targets response: %s', put_targets_response)\n\n    LOG.info('Created Cloudwatch event \"%s\" with schedule: %s', rule_name, schedule)", "docstring": "Create cloudwatch event for lambda from rules.\n\nArgs:\napp_name (str): name of the lambda function\nenv (str): Environment/Account for lambda function\nregion (str): AWS region of the lambda function\nrules (dict): Trigger rules from the settings", "source": "juraj-google-style"}
{"code": "def GetMACBRepresentation(self, event):\n    data_type = getattr(event, 'data_type', None)\n    if (not data_type):\n        return '....'\n    if (data_type == 'fs:stat'):\n        descriptions = event.timestamp_desc.split(';')\n        return_characters = ['.', '.', '.', '.']\n        for description in descriptions:\n            if (description in ('mtime', definitions.TIME_DESCRIPTION_MODIFICATION)):\n                return_characters[0] = 'M'\n            elif (description in ('atime', definitions.TIME_DESCRIPTION_LAST_ACCESS)):\n                return_characters[1] = 'A'\n            elif (description in ('ctime', definitions.TIME_DESCRIPTION_CHANGE)):\n                return_characters[2] = 'C'\n            elif (description in ('crtime', definitions.TIME_DESCRIPTION_CREATION)):\n                return_characters[3] = 'B'\n        return ''.join(return_characters)\n    if (event.timestamp_desc in [definitions.TIME_DESCRIPTION_LAST_ACCESS, definitions.TIME_DESCRIPTION_ACCOUNT_CREATED, definitions.TIME_DESCRIPTION_LAST_VISITED, definitions.TIME_DESCRIPTION_START, definitions.TIME_DESCRIPTION_LAST_SHUTDOWN, definitions.TIME_DESCRIPTION_LAST_LOGIN, definitions.TIME_DESCRIPTION_LAST_PASSWORD_RESET, definitions.TIME_DESCRIPTION_LAST_CONNECTED, definitions.TIME_DESCRIPTION_LAST_RUN, definitions.TIME_DESCRIPTION_LAST_PRINTED]):\n        return '.A..'\n    if (event.timestamp_desc in [definitions.TIME_DESCRIPTION_MODIFICATION, definitions.TIME_DESCRIPTION_WRITTEN, definitions.TIME_DESCRIPTION_DELETED]):\n        return 'M...'\n    if (event.timestamp_desc in [definitions.TIME_DESCRIPTION_CREATION, definitions.TIME_DESCRIPTION_ADDED, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED, definitions.TIME_DESCRIPTION_FIRST_CONNECTED]):\n        return '...B'\n    if (event.timestamp_desc in [definitions.TIME_DESCRIPTION_CHANGE, definitions.TIME_DESCRIPTION_ENTRY_MODIFICATION]):\n        return '..C.'\n    return '....'", "docstring": "Retrieves the MACB representation.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: MACB representation.", "source": "codesearchnet"}
{"code": "def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    encoder_hidden_states = encoder_outputs[0]\n    if encoder_attention_mask is None:\n        batch_size, sequence_length = encoder_hidden_states.shape[:2]\n        encoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    batch_size, sequence_length = decoder_input_ids.shape\n    if decoder_attention_mask is None:\n        decoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    if decoder_position_ids is None:\n        if past_key_values is not None:\n            raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.')\n        decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n    inputs = {'params': params or self.params}\n    if past_key_values:\n        inputs['cache'] = past_key_values\n        mutable = ['cache']\n    else:\n        mutable = False\n\n    def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):\n        decoder_module = module._get_decoder_module()\n        outputs = decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)\n        hidden_states = outputs[0]\n        if self.config.tie_word_embeddings:\n            shared_embedding = module.model.variables['params']['shared']['embedding']\n            lm_logits = module.lm_head.apply({'params': {'kernel': shared_embedding.T}}, hidden_states)\n        else:\n            lm_logits = module.lm_head(hidden_states)\n        lm_logits += module.final_logits_bias.astype(self.dtype)\n        return (lm_logits, outputs)\n    outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)\n    if past_key_values is None:\n        lm_logits, decoder_outputs = outputs\n    else:\n        (lm_logits, decoder_outputs), past = outputs\n    if return_dict:\n        outputs = FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions)\n    else:\n        outputs = (lm_logits,) + decoder_outputs[1:]\n    if past_key_values is not None and return_dict:\n        outputs['past_key_values'] = unfreeze(past['cache'])\n        return outputs\n    elif past_key_values is not None and (not return_dict):\n        outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration\n\n>>> model = FlaxMBartForConditionalGeneration.from_pretrained(\"facebook/mbart-large-cc25\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/mbart-large-cc25\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, max_length=1024, return_tensors=\"jax\")\n>>> encoder_outputs = model.encode(**inputs)\n\n>>> decoder_start_token_id = model.config.decoder_start_token_id\n>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype=\"i4\") * decoder_start_token_id\n\n>>> outputs = model.decode(decoder_input_ids, encoder_outputs)\n>>> logits = outputs.logits\n```", "source": "github-repos"}
{"code": "def prune(self, regex='.*'):\n    return filetree(self.root, ignore=self.ignore, regex=regex)", "docstring": "Prune leaves of filetree according to specified\nregular expression.\n\nArgs:\nregex (str): Regular expression to use in pruning tree.", "source": "codesearchnet"}
{"code": "def pred_to_prob(Y_h, k):\n    \n    Y_h = Y_h.clone()\n    if Y_h.dim() > 1:\n        Y_h = Y_h.squeeze()\n    assert Y_h.dim() == 1\n    assert (Y_h >= 1).all()\n    assert (Y_h <= k).all()\n    n = Y_h.shape[0]\n    Y_s = torch.zeros((n, k), dtype=Y_h.dtype, device=Y_h.device)\n    for i, j in enumerate(Y_h):\n        Y_s[i, j - 1] = 1.0\n    return Y_s", "docstring": "Converts a 1D tensor of predicted labels into a 2D tensor of probabilistic labels\n\nArgs:\nY_h: an [n], or [n,1] tensor of predicted (int) labels in {1,...,k}\nk: the largest possible label in Y_h\nReturns:\nY_s: a torch.FloatTensor of shape [n, k] where Y_s[i, j-1] is the probabilistic\nlabel for item i and label j", "source": "juraj-google-style"}
{"code": "def _openfile(instance, filething, filename, fileobj, writable, create):\n    \n\n    assert not create or writable\n\n    \n    if isinstance(filething, FileThing):\n        filename = filething.filename\n        fileobj = filething.fileobj\n        filething = None\n\n    if filething is not None:\n        if is_fileobj(filething):\n            fileobj = filething\n        elif hasattr(filething, \"__fspath__\"):\n            filename = filething.__fspath__()\n            if not isinstance(filename, (bytes, text_type)):\n                raise TypeError(\"expected __fspath__() to return a filename\")\n        else:\n            filename = filething\n\n    if instance is not None:\n        \n        if not writable:\n            instance.filename = filename\n        elif filename is None:\n            filename = getattr(instance, \"filename\", None)\n\n    if fileobj is not None:\n        verify_fileobj(fileobj, writable=writable)\n        yield FileThing(fileobj, filename, filename or fileobj_name(fileobj))\n    elif filename is not None:\n        verify_filename(filename)\n\n        inmemory_fileobj = False\n        try:\n            fileobj = open(filename, \"rb+\" if writable else \"rb\")\n        except IOError as e:\n            if writable and e.errno == errno.EOPNOTSUPP:\n                \n                \n                \n                \n                \n                try:\n                    with open(filename, \"rb\") as fileobj:\n                        fileobj = BytesIO(fileobj.read())\n                except IOError as e2:\n                    raise MutagenError(e2)\n                inmemory_fileobj = True\n            elif create and e.errno == errno.ENOENT:\n                assert writable\n                try:\n                    fileobj = open(filename, \"wb+\")\n                except IOError as e2:\n                    raise MutagenError(e2)\n            else:\n                raise MutagenError(e)\n\n        with fileobj as fileobj:\n            yield FileThing(fileobj, filename, filename)\n\n            if inmemory_fileobj:\n                assert writable\n                data = fileobj.getvalue()\n                try:\n                    with open(filename, \"wb\") as fileobj:\n                        fileobj.write(data)\n                except IOError as e:\n                    raise MutagenError(e)\n    else:\n        raise TypeError(\"Missing filename or fileobj argument\")", "docstring": "yields a FileThing\n\nArgs:\nfilething: Either a file name, a file object or None\nfilename: Either a file name or None\nfileobj: Either a file object or None\nwritable (bool): if the file should be opened\ncreate (bool): if the file should be created if it doesn't exist.\nimplies writable\nRaises:\nMutagenError: In case opening the file failed\nTypeError: in case neither a file name or a file object is passed", "source": "juraj-google-style"}
{"code": "def add_http_endpoint(self, url, request_handler):\n    self.app.router.add_route('*', url, request_handler)", "docstring": "This method provides a programatic way of added invidual routes\nto the http server.\n\nArgs:\nurl (str): the url to be handled by the request_handler\nrequest_handler (nautilus.network.RequestHandler): The request handler", "source": "codesearchnet"}
{"code": "def unzip(input_layer, split_dim=0, num_splits=2):\n    shape = input_layer.shape\n    _check_split_dims(num_splits, split_dim, shape)\n    splits = functions.unzip(input_layer, split_dim, shape[split_dim], num_splits)\n    return input_layer.with_sequence(splits)", "docstring": "Unzips this Tensor along the split_dim into num_splits Equal chunks.\n\nExamples:\n\n* `[1, 2, 3, 4] -> [1, 3], [2, 4]`\n* `[[1, 1], [2, 2], [3, 3], [4, 4]] -> [[1, 1], [3, 3]], [[2, 2], [4, 4]]`\n\nArgs:\ninput_layer: The chainable object, supplied.\nsplit_dim: The dimension to split along. Defaults to batch.\nnum_splits: The number of splits.\nReturns:\nA list of PrettyTensors.\nRaises:\nValueError: If split_dim is out of range or isn't divided evenly by\nnum_splits.", "source": "codesearchnet"}
{"code": "def Trim(self, flags):\n    logger.info('Trimming!')\n    flags = bytearray(flags)\n    length = (1 << (self.Depth - 1))\n    while (len(flags) < length):\n        flags.append(0)\n    MerkleTree._TrimNode(self.Root, 0, self.Depth, flags)", "docstring": "Trim the nodes from the tree keeping only the root hash.\n\nArgs:\nflags: \"0000\" for trimming, any other value for keeping the nodes.", "source": "codesearchnet"}
{"code": "def _get_graph(self):\n    with self._lock:\n        return self._graph", "docstring": "Returns pydot.Dot object for the pipeline graph.\n\nThe purpose of this method is to avoid accessing the graph while it is\nupdated. No one except for this method should be accessing _graph directly.\n\nReturns:\n(pydot.Dot)", "source": "github-repos"}
{"code": "def stChromagram(signal, fs, win, step, PLOT=False):\n    \n    win = int(win)\n    step = int(step)\n    signal = numpy.double(signal)\n    signal = signal / (2.0 ** 15)\n    DC = signal.mean()\n    MAX = (numpy.abs(signal)).max()\n    signal = (signal - DC) / (MAX - DC)\n\n    N = len(signal)        \n    cur_p = 0\n    count_fr = 0\n    nfft = int(win / 2)\n    nChroma, nFreqsPerChroma = stChromaFeaturesInit(nfft, fs)\n    chromaGram = numpy.array([], dtype=numpy.float64)\n\n    while (cur_p + win - 1 < N):\n        count_fr += 1\n        x = signal[cur_p:cur_p + win]\n        cur_p = cur_p + step\n        X = abs(fft(x))\n        X = X[0:nfft]\n        X = X / len(X)\n        chromaNames, C = stChromaFeatures(X, fs, nChroma, nFreqsPerChroma)\n        C = C[:, 0]\n        if count_fr == 1:\n            chromaGram = C.T\n        else:\n            chromaGram = numpy.vstack((chromaGram, C.T))\n    FreqAxis = chromaNames\n    TimeAxis = [(t * step) / fs for t in range(chromaGram.shape[0])]\n\n    if (PLOT):\n        fig, ax = plt.subplots()\n        chromaGramToPlot = chromaGram.transpose()[::-1, :]\n        Ratio = int(chromaGramToPlot.shape[1] / (3*chromaGramToPlot.shape[0]))\n        if Ratio < 1:\n            Ratio = 1\n        chromaGramToPlot = numpy.repeat(chromaGramToPlot, Ratio, axis=0)\n        imgplot = plt.imshow(chromaGramToPlot)\n        fstep = int(nfft / 5.0)\n\n\n        ax.set_yticks(range(int(Ratio / 2), len(FreqAxis) * Ratio, Ratio))\n        ax.set_yticklabels(FreqAxis[::-1])\n        TStep = int(count_fr / 3)\n        TimeTicks = range(0, count_fr, TStep)\n        TimeTicksLabels = ['%.2f' % (float(t * step) / fs) for t in TimeTicks]\n        ax.set_xticks(TimeTicks)\n        ax.set_xticklabels(TimeTicksLabels)\n        ax.set_xlabel('time (secs)')\n        imgplot.set_cmap('jet')\n        plt.colorbar()\n        plt.show()\n\n    return (chromaGram, TimeAxis, FreqAxis)", "docstring": "Short-term FFT mag for spectogram estimation:\nReturns:\na numpy array (nFFT x numOfShortTermWindows)\nARGUMENTS:\nsignal:      the input signal samples\nfs:          the sampling freq (in Hz)\nwin:         the short-term window size (in samples)\nstep:        the short-term window step (in samples)\nPLOT:        flag, 1 if results are to be ploted\nRETURNS:", "source": "juraj-google-style"}
{"code": "def from_file(cls, path, fields=None, encoding='utf-8'):\n    path = _table_filename(path)\n    if (fields is None):\n        fields = _get_relation_from_table_path(path)\n    table = cls(fields)\n    table.attach(path, encoding=encoding)\n    return table", "docstring": "Instantiate a Table from a database file.\n\nThis method instantiates a table attached to the file at *path*.\nThe file will be opened and traversed to determine the number of\nrecords, but the contents will not be stored in memory unless\nthey are modified.\n\nArgs:\npath: the path to the table file\nfields: the Relation schema for the table (loaded from the\nrelations file in the same directory if not given)\nencoding: the character encoding of the file at *path*", "source": "codesearchnet"}
{"code": "def _MergeOptional(self, a, b):\n    if (a and b):\n        if (a != b):\n            raise MergeError((\"values must be identical if both specified ('%s' vs '%s')\" % (transitfeed.EncodeUnicode(a), transitfeed.EncodeUnicode(b))))\n    return (a or b)", "docstring": "Tries to merge two values which may be None.\n\nIf both values are not None, they are required to be the same and the\nmerge is trivial. If one of the values is None and the other is not None,\nthe merge results in the one which is not None. If both are None, the merge\nresults in None.\n\nArgs:\na: The first value.\nb: The second value.\n\nReturns:\nThe merged value.\n\nRaises:\nMergeError: If both values are not None and are not the same.", "source": "codesearchnet"}
{"code": "def get_catalog(self, catalog_id):\n    return self._load_data(self.CATALOGS_ENDPOINT, default=[], resource_id=catalog_id)", "docstring": "Return specified course catalog.\n\nReturns:\ndict: catalog details if it is available for the user.", "source": "codesearchnet"}
{"code": "async def _auth_login(self, username, password):\n    mechanism = 'LOGIN'\n    (code, message) = (await self.do_cmd('AUTH', mechanism, SMTP.b64enc(username), success=(334,)))\n    try:\n        (code, message) = (await self.do_cmd(SMTP.b64enc(password), success=(235, 503)))\n    except SMTPCommandFailedError as e:\n        raise SMTPAuthenticationError(e.code, e.message, mechanism)\n    return (code, message)", "docstring": "Performs an authentication attempt using the LOGIN mechanism.\n\nProtocol:\n\n1. The username is base64-encoded ;\n2. The string 'AUTH LOGIN' and a space character are prepended to\nthe base64-encoded username and sent to the server ;\n3. If the server replies with a 334 return code, we can go on:\n\n1) The password is base64-encoded and sent to the server ;\n2) If the server replies with a 235 return code, the user is\nauthenticated.\n\nArgs:\nusername (str): Identifier of the user trying to authenticate.\npassword (str): Password for the user.\n\nRaises:\nConnectionResetError: If the connection with the server is\nunexpectedely lost.\nSMTPAuthenticationError: If the authentication attempt fails.\n\nReturns:\n(int, str): A (code, message) 2-tuple containing the server\nresponse.", "source": "codesearchnet"}
{"code": "def _create_interval_filter(interval):\n\n    def filter_fn(value):\n        if ((not isinstance(value, six.integer_types)) and (not isinstance(value, float))):\n            raise error.HParamsError(('Cannot use an interval filter for a value of type: %s, Value: %s' % (type(value), value)))\n        return ((interval.min_value <= value) and (value <= interval.max_value))\n    return filter_fn", "docstring": "Returns a function that checkes whether a number belongs to an interval.\n\nArgs:\ninterval: A tensorboard.hparams.Interval protobuf describing the interval.\nReturns:\nA function taking a number (a float or an object of a type in\nsix.integer_types) that returns True if the number belongs to (the closed)\n'interval'.", "source": "codesearchnet"}
{"code": "def find_distinct(self, collection, key):\n        \n        obj = getattr(self.db, collection)\n        result = obj.distinct(key)\n        return result", "docstring": "Search a collection for the distinct key values provided.\n\nArgs:\ncollection: The db collection. See main class documentation.\nkey: The name of the key to find distinct values. For example with\nthe indicators collection, the key could be \"type\".\nReturns:\nList of distinct values.", "source": "juraj-google-style"}
{"code": "def __init__(self, fn, fullargspec=None):\n    if not callable(fn):\n        raise TypeError('Expected a callable object instead of: %r' % fn)\n    self._fn = fn\n    self._fullargspec = fullargspec\n    if isinstance(fn, (types.BuiltinFunctionType, types.MethodType, types.FunctionType)):\n        self.process = fn\n    else:\n        self.process = lambda element: fn(element)\n    super().__init__()", "docstring": "Initializes a CallableWrapperDoFn object wrapping a callable.\n\nArgs:\nfn: A callable object.\n\nRaises:\nTypeError: if fn parameter is not a callable type.", "source": "github-repos"}
{"code": "def _GetLines(line_strings):\n    lines = []\n    for line_string in line_strings:\n        line = list(map(int, line_string.split('-', 1)))\n        if line[0] < 1:\n            raise errors.YapfError('invalid start of line range: %r' % line)\n        if line[0] > line[1]:\n            raise errors.YapfError('end comes before start in line range: %r' % line)\n        lines.append(tuple(line))\n    return lines", "docstring": "Parses the start and end lines from a line string like 'start-end'.\n\nArguments:\nline_strings: (array of string) A list of strings representing a line\nrange like 'start-end'.\n\nReturns:\nA list of tuples of the start and end line numbers.\n\nRaises:\nValueError: If the line string failed to parse or was an invalid line range.", "source": "github-repos"}
{"code": "def get_cluster_interfaces(cluster, extra_cond=(lambda nic: True)):\n    nics = get_nics(cluster)\n    nics = [(nic['device'], nic['name']) for nic in nics if (nic['mountable'] and (nic['interface'] == 'Ethernet') and (not nic['management']) and extra_cond(nic))]\n    nics = sorted(nics)\n    return nics", "docstring": "Get the network interfaces names corresponding to a criteria.\n\nNote that the cluster is passed (not the individual node names), thus it is\nassumed that all nodes in a cluster have the same interface names same\nconfiguration. In addition to ``extra_cond``, only the mountable and\nEhernet interfaces are returned.\n\nArgs:\ncluster(str): the cluster to consider\nextra_cond(lambda): boolean lambda that takes the nic(dict) as\nparameter", "source": "codesearchnet"}
{"code": "def __init__(self, forward_core, backward_core, name=\"bidir_rnn\"):\n    \n    super(BidirectionalRNN, self).__init__(name=name)\n    self._forward_core = forward_core\n    self._backward_core = backward_core\n    def _is_recurrent(core):\n      has_rnn_core_interface = (hasattr(core, \"initial_state\") and\n                                hasattr(core, \"output_size\") and\n                                hasattr(core, \"state_size\"))\n      return isinstance(core, rnn_core.RNNCore) or has_rnn_core_interface\n    if not(_is_recurrent(forward_core) and _is_recurrent(backward_core)):\n      raise ValueError(\"Forward and backward cores must both be instances of\"\n                       \"RNNCore.\")", "docstring": "Construct a Bidirectional RNN core.\n\nArgs:\nforward_core: callable RNNCore module that computes forward states.\nbackward_core: callable RNNCore module that computes backward states.\nname: name of the module.\n\nRaises:\nValueError: if not all the modules are recurrent.", "source": "juraj-google-style"}
{"code": "def _from_components(self, components):\n    raise NotImplementedError('%s._from_components()' % type(self).__name__)", "docstring": "Reconstructs a value from a nested structure of Tensor/CompositeTensor.\n\nArgs:\ncomponents: A nested structure of `tf.Tensor` or `tf.CompositeTensor`,\ncompatible with `self._component_specs`.  (Caller is responsible for\nensuring compatibility.)\n\nReturns:\nA value that is compatible with this `TypeSpec`.", "source": "github-repos"}
{"code": "def helper_list(access_token, oid, path):\n    if (oid != ''):\n        path = ''.join([path, \"('\", oid, \"')\"])\n    endpoint = ''.join([ams_rest_endpoint, path])\n    return do_ams_get(endpoint, path, access_token)", "docstring": "Helper Function to list a URL path.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\noid (str): An OID.\npath (str): A URL Path.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def unstage_signature(vcs, signature):\n    \n    evidence_path = _get_staged_history_path(vcs)\n    staged = get_staged_signatures(vcs)\n    if signature not in staged:\n        raise NotStagedError\n    staged.remove(signature)\n    string = '\\n'.join(staged)\n    with open(evidence_path, 'w') as f:\n        f.write(string)", "docstring": "Remove `signature` from the list of staged signatures\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\nsignature (basestring)\n\nRaises:\nNotStagedError", "source": "juraj-google-style"}
{"code": "def get_object(cls, api_token, id):\n        \n        load_balancer = cls(token=api_token, id=id)\n        load_balancer.load()\n        return load_balancer", "docstring": "Class method that will return a LoadBalancer object by its ID.\n\nArgs:\napi_token (str): DigitalOcean API token\nid (str): Load Balancer ID", "source": "juraj-google-style"}
{"code": "def __init__(self, message):\n        \n        super(CryptographicFailure, self).__init__(\n            reason=enums.ResultReason.CRYPTOGRAPHIC_FAILURE,\n            message=message\n        )", "docstring": "Create a CryptographicFailure exception.\n\nArgs:\nmessage (string): A string containing information about the error.", "source": "juraj-google-style"}
{"code": "def element(self, using, value):\n        \n        return self._execute(Command.FIND_CHILD_ELEMENT, {\n            'using': using,\n            'value': value\n        })", "docstring": "find an element in the current element.\n\nSupport:\nAndroid iOS Web(WebView)\n\nArgs:\nusing(str): The element location strategy.\nvalue(str): The value of the location strategy.\n\nReturns:\nWebElement Object.\n\nRaises:\nWebDriverException.", "source": "juraj-google-style"}
{"code": "def get_pending_reboot():\n    checks = (get_pending_update, get_pending_file_rename, get_pending_servermanager, get_pending_component_servicing, get_reboot_required_witnessed, get_pending_computer_name, get_pending_domain_join)\n    for check in checks:\n        if check():\n            return True\n    return False", "docstring": "Determine whether there is a reboot pending.\n\n.. versionadded:: 2016.11.0\n\nReturns:\nbool: ``True`` if the system is pending reboot, otherwise ``False``\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' system.get_pending_reboot", "source": "codesearchnet"}
{"code": "def _BuildIntersection(self, type_list):\n    type_list = tuple(type_list)\n    if len(type_list) == 1:\n        return type_list[0]\n    else:\n        return ' and '.join(type_list)", "docstring": "Builds a intersection of the types in type_list.\n\nArgs:\ntype_list: A list of strings representing types.\n\nReturns:\nA string representing the intersection of the types in type_list.\nSimplifies Intersection[X] to X and Intersection[X, None] to Optional[X].", "source": "github-repos"}
{"code": "def get_watermarks(self, applied_ptransform: AppliedPTransform) -> '_TransformWatermarks':\n    while applied_ptransform.parts:\n        applied_ptransform = applied_ptransform.parts[-1]\n    return self._transform_to_watermarks[applied_ptransform]", "docstring": "Gets the input and output watermarks for an AppliedPTransform.\n\nIf the applied_ptransform has not processed any elements, return a\nwatermark with minimum value.\n\nArgs:\napplied_ptransform: AppliedPTransform to get the watermarks for.\n\nReturns:\nA snapshot (TransformWatermarks) of the input watermark and output\nwatermark for the provided transform.", "source": "github-repos"}
{"code": "def _AssertProtoDictEquals(self, expected_dict, actual_dict, verbose=False, update_goldens=False, additional_missing_object_message='', api_version=2):\n    diffs = []\n    verbose_diffs = []\n    expected_keys = set(expected_dict.keys())\n    actual_keys = set(actual_dict.keys())\n    only_in_expected = expected_keys - actual_keys\n    only_in_actual = actual_keys - expected_keys\n    all_keys = expected_keys | actual_keys\n    updated_keys = []\n    for key in all_keys:\n        diff_message = ''\n        verbose_diff_message = ''\n        if key in only_in_expected:\n            diff_message = 'Object %s expected but not found (removed). %s' % (key, additional_missing_object_message)\n            verbose_diff_message = diff_message\n        elif key in only_in_actual:\n            diff_message = 'New object %s found (added).' % key\n            verbose_diff_message = diff_message\n        else:\n            self.maxDiff = None\n            try:\n                self.assertProtoEquals(expected_dict[key], actual_dict[key])\n            except AssertionError as e:\n                updated_keys.append(key)\n                diff_message = 'Change detected in python object: %s.' % key\n                verbose_diff_message = str(e)\n        if diff_message:\n            diffs.append(diff_message)\n            verbose_diffs.append(verbose_diff_message)\n    if diffs:\n        diff_count = len(diffs)\n        logging.error(self._test_readme_message)\n        logging.error('%d differences found between API and golden.', diff_count)\n        if update_goldens:\n            logging.warning(self._update_golden_warning)\n            for key in only_in_expected:\n                filepath = _KeyToFilePath(key, api_version)\n                file_io.delete_file(filepath)\n            for key in only_in_actual | set(updated_keys):\n                filepath = _KeyToFilePath(key, api_version)\n                file_io.write_string_to_file(filepath, text_format.MessageToString(actual_dict[key]))\n        else:\n            for d, verbose_d in zip(diffs, verbose_diffs):\n                logging.error('    %s', d)\n                logging.error('    %s', verbose_d)\n            self.fail('%d differences found between API and golden.' % diff_count)\n    else:\n        logging.info('No differences found between API and golden.')", "docstring": "Diff given dicts of protobufs and report differences a readable way.\n\nArgs:\nexpected_dict: a dict of TFAPIObject protos constructed from golden files.\nactual_dict: a dict of TFAPIObject protos constructed by reading from the\nTF package linked to the test.\nverbose: Whether to log the full diffs, or simply report which files were\ndifferent.\nupdate_goldens: Whether to update goldens when there are diffs found.\nadditional_missing_object_message: Message to print when a symbol is\nmissing.\napi_version: TensorFlow API version to test.", "source": "github-repos"}
{"code": "def wait(animation='elipses', text='', speed=0.2):\n    \n    def decorator(func):\n        func.animation = animation\n        func.speed = speed\n        func.text = text\n\n        @wraps(func)\n        def wrapper(*args, **kwargs):\n            animation = func.animation\n            text = func.text\n            if not isinstance(animation, (list, tuple)) and \\\n                    not hasattr(animations, animation):\n                text = animation if text == '' else text\n                animation = 'elipses'\n            wait = Wait(animation=animation, text=text, speed=func.speed)\n            wait.start()\n            try:\n                ret = func(*args, **kwargs)\n            finally:\n                wait.stop()\n            sys.stdout.write('\\n')\n            return ret\n        return wrapper\n    return decorator", "docstring": "Decorator for adding wait animation to long running\nfunctions.\n\nArgs:\nanimation (str, tuple): String reference to animation or tuple\nwith custom animation.\nspeed (float): Number of seconds each cycle of animation.\n\nExamples:\n>>> @animation.wait('bar')\n>>> def long_running_function():\n>>>     ... 5 seconds later ...\n>>>     return", "source": "juraj-google-style"}
{"code": "def run_inference(self, batch: Sequence[numpy.ndarray], model: Union[xgboost.Booster, xgboost.XGBModel], inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n    return self._inference_fn(batch, model, inference_args)", "docstring": "Runs inferences on a batch of 2d numpy arrays.\n\nArgs:\nbatch: A sequence of examples as 2d numpy arrays. Each\nrow in an array is a single example. The dimensions\nmust match the dimensions of the data used to train\nthe model.\nmodel: XGBoost booster or XBGModel (sklearn interface). Must\nimplement predict(X). Where the parameter X is a 2d numpy array.\ninference_args: Any additional arguments for an inference.\n\nReturns:\nAn Iterable of type PredictionResult.", "source": "github-repos"}
{"code": "def _get_data(self) -> BaseFrameManager:\n\n    def iloc(partition, row_internal_indices, col_internal_indices):\n        return partition.iloc[(row_internal_indices, col_internal_indices)]\n    masked_data = self.parent_data.apply_func_to_indices_both_axis(func=iloc, row_indices=self.index_map.values, col_indices=self.columns_map.values, lazy=False, keep_remaining=False)\n    return masked_data", "docstring": "Perform the map step\n\nReturns:\nA BaseFrameManager object.", "source": "codesearchnet"}
{"code": "def _wrap_definition_section(source, width):\n    index = (source.index('\\n') + 1)\n    (definitions, max_len) = _get_definitions(source[index:])\n    sep = ('\\n' + (' ' * (max_len + 4)))\n    lines = [source[:index].strip()]\n    for (arg, desc) in six.iteritems(definitions):\n        wrapped_desc = sep.join(textwrap.wrap(desc, ((width - max_len) - 4)))\n        lines.append('  {arg:{size}}  {desc}'.format(arg=arg, size=str(max_len), desc=wrapped_desc))\n    return '\\n'.join(lines)", "docstring": "Wrap the given definition section string to the current terminal size.\n\nNote:\nAuto-adjusts the spacing between terms and definitions.\n\nArgs:\nsource: The section string to wrap.\n\nReturns:\nThe wrapped section string.", "source": "codesearchnet"}
{"code": "def proportional_char(self, action):\n        \n        actions = {'off': 0,\n                   'on': 1\n                   }\n        if action in actions:\n            self.send(chr(27)+'p'+action)\n        else:\n            raise RuntimeError('Invalid action in function proportionalChar')", "docstring": "Specifies proportional characters. When turned on, the character spacing set\nwith charSpacing.\n\nArgs:\naction: Turn proportional characters on or off.\nReturns:\nNone\nRaises:\nRuntimeError: Invalid action.", "source": "juraj-google-style"}
{"code": "def _delete_gridfs_data(self, data):\n        \n        if isinstance(data, ObjectId):\n            if self._gridfs.exists({\"_id\": data}):\n                self._gridfs.delete(data)\n            else:\n                raise DataStoreGridfsIdInvalid()\n        elif isinstance(data, list):\n            for item in data:\n                self._delete_gridfs_data(item)\n        elif isinstance(data, dict):\n            for key, item in data.items():\n                self._delete_gridfs_data(item)", "docstring": "Delete all GridFS data that is linked by fields in the specified data.\n\nArgs:\ndata: The data that is parsed for MongoDB ObjectIDs. The linked GridFs object\nfor any ObjectID is deleted.", "source": "juraj-google-style"}
{"code": "def TerminateAFF4Flow(cls, flow_id, reason=None, status=None, token=None):\n    flow_obj = aff4.FACTORY.Open(flow_id, aff4_type=GRRFlow, mode='rw', token=token)\n    if (not flow_obj):\n        raise FlowError(('Could not terminate flow %s' % flow_id))\n    with flow_obj:\n        runner = flow_obj.GetRunner()\n        if (not runner.IsRunning()):\n            return\n        if (token is None):\n            token = access_control.ACLToken()\n        if (reason is None):\n            reason = 'Manual termination by console.'\n        runner.Error(reason, status_code=status)\n        flow_obj.Log('Terminated by user {0}. Reason: {1}'.format(token.username, reason))\n        super_token = token.SetUID()\n        children_to_kill = aff4.FACTORY.MultiOpen(flow_obj.ListChildren(), token=super_token, aff4_type=GRRFlow)\n        for child_obj in children_to_kill:\n            cls.TerminateAFF4Flow(child_obj.urn, reason='Parent flow terminated.', token=super_token)", "docstring": "Terminate a flow.\n\nArgs:\nflow_id: The flow session_id to terminate.\nreason: A reason to log.\nstatus: Status code used in the generated status message.\ntoken: The access token to be used for this request.\n\nRaises:\nFlowError: If the flow can not be found.", "source": "codesearchnet"}
{"code": "def hflip(img):\n    if (not _is_pil_image(img)):\n        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n    return img.transpose(Image.FLIP_LEFT_RIGHT)", "docstring": "Horizontally flip the given PIL Image.\n\nArgs:\nimg (PIL Image): Image to be flipped.\n\nReturns:\nPIL Image:  Horizontall flipped image.", "source": "codesearchnet"}
{"code": "def process_tree(self, root_directory, output_root_directory, copy_other_files):\n    if output_root_directory == root_directory:\n        return self.process_tree_inplace(root_directory)\n    if output_root_directory and os.path.exists(output_root_directory):\n        print('Output directory %r must not already exist.' % output_root_directory)\n        sys.exit(1)\n    norm_root = os.path.split(os.path.normpath(root_directory))\n    norm_output = os.path.split(os.path.normpath(output_root_directory))\n    if norm_root == norm_output:\n        print('Output directory %r same as input directory %r' % (root_directory, output_root_directory))\n        sys.exit(1)\n    files_to_process = []\n    files_to_copy = []\n    for dir_name, _, file_list in os.walk(root_directory):\n        py_files = [f for f in file_list if f.endswith('.py')]\n        copy_files = [f for f in file_list if not f.endswith('.py')]\n        for filename in py_files:\n            fullpath = os.path.join(dir_name, filename)\n            fullpath_output = os.path.join(output_root_directory, os.path.relpath(fullpath, root_directory))\n            files_to_process.append((fullpath, fullpath_output))\n        if copy_other_files:\n            for filename in copy_files:\n                fullpath = os.path.join(dir_name, filename)\n                fullpath_output = os.path.join(output_root_directory, os.path.relpath(fullpath, root_directory))\n                files_to_copy.append((fullpath, fullpath_output))\n    file_count = 0\n    tree_errors = {}\n    report = ''\n    report += '=' * 80 + '\\n'\n    report += 'Input tree: %r\\n' % root_directory\n    report += '=' * 80 + '\\n'\n    for input_path, output_path in files_to_process:\n        output_directory = os.path.dirname(output_path)\n        if not os.path.isdir(output_directory):\n            os.makedirs(output_directory)\n        if os.path.islink(input_path):\n            link_target = os.readlink(input_path)\n            link_target_output = os.path.join(output_root_directory, os.path.relpath(link_target, root_directory))\n            if (link_target, link_target_output) in files_to_process:\n                os.symlink(link_target_output, output_path)\n            else:\n                report += 'Copying symlink %s without modifying its target %s' % (input_path, link_target)\n                os.symlink(link_target, output_path)\n            continue\n        file_count += 1\n        _, l_report, l_errors = self.process_file(input_path, output_path)\n        tree_errors[input_path] = l_errors\n        report += l_report\n    for input_path, output_path in files_to_copy:\n        output_directory = os.path.dirname(output_path)\n        if not os.path.isdir(output_directory):\n            os.makedirs(output_directory)\n        shutil.copy(input_path, output_path)\n    return (file_count, report, tree_errors)", "docstring": "Processes upgrades on an entire tree of python files in place.\n\nNote that only Python files. If you have custom code in other languages,\nyou will need to manually upgrade those.\n\nArgs:\nroot_directory: Directory to walk and process.\noutput_root_directory: Directory to use as base.\ncopy_other_files: Copy files that are not touched by this converter.\n\nReturns:\nA tuple of files processed, the report string for all files, and a dict\nmapping filenames to errors encountered in that file.", "source": "github-repos"}
{"code": "def list_all_eq_to(list_, val, strict=True):\n    if (util_type.HAVE_NUMPY and isinstance(val, np.ndarray)):\n        return all([np.all((item == val)) for item in list_])\n    try:\n        with warnings.catch_warnings():\n            warnings.filterwarnings('ignore', category=FutureWarning)\n            flags = [(item == val) for item in list_]\n            return all([(np.all(flag) if hasattr(flag, '__array__') else flag) for flag in flags])\n    except ValueError:\n        if (not strict):\n            return all([(repr(item) == repr(val)) for item in list_])\n        else:\n            raise", "docstring": "checks to see if list is equal everywhere to a value\n\nArgs:\nlist_ (list):\nval : value to check against\n\nReturns:\nTrue if all items in the list are equal to val", "source": "codesearchnet"}
{"code": "def read_tabular(filepath):\n    (_, fn, ext) = splitext2(filepath)\n    if (ext == '.h5'):\n        return _read_tabular_h5(filepath)\n    elif (ext == '.pkl'):\n        return _read_tabular_pickle(filepath)\n    else:\n        raise NotImplementedError", "docstring": "Read tabular object in HDF5 or pickle format\n\nArgs:\nfilepath (path-like): path to read to; must end in '.h5' or '.pkl'", "source": "codesearchnet"}
{"code": "def _deserialize(self, entity, p, unused_depth=1):\n    \n    if p.meaning() == entity_pb.Property.EMPTY_LIST:\n      self._store_value(entity, [])\n      return\n\n    val = self._db_get_value(p.value(), p)\n    if val is not None:\n      val = _BaseValue(val)\n\n    \n    \n    \n\n    \n    \n    \n    \n\n    if self._repeated:\n      if self._has_value(entity):\n        value = self._retrieve_value(entity)\n        assert isinstance(value, list), repr(value)\n        value.append(val)\n      else:\n        \n        value = [val]\n    else:\n      value = val\n    self._store_value(entity, value)", "docstring": "Internal helper to deserialize this property from a protocol buffer.\n\nSubclasses may override this method.\n\nArgs:\nentity: The entity, a Model (subclass) instance.\np: A Property Message object (a protocol buffer).\ndepth: Optional nesting depth, default 1 (unused here, but used\nby some subclasses that override this method).", "source": "juraj-google-style"}
{"code": "def save_parameters(self, path, grad_only=False):\n        \n        params = self.get_parameters(grad_only=grad_only)\n        nn.save_parameters(path, params)", "docstring": "Save all parameters into a file with the specified format.\n\nCurrently hdf5 and protobuf formats are supported.\n\nArgs:\npath : path or file object\ngrad_only (bool, optional): Return parameters with `need_grad` option as `True`.", "source": "juraj-google-style"}
{"code": "def add_gene_panel(self, panel_obj):\n        \n        panel_name = panel_obj['panel_name']\n        panel_version = panel_obj['version']\n        display_name = panel_obj.get('display_name', panel_name)\n\n        if self.gene_panel(panel_name, panel_version):\n            raise IntegrityError(\"Panel {0} with version {1} already\"\n                                 \" exist in database\".format(panel_name, panel_version))\n        LOG.info(\"loading panel {0}, version {1} to database\".format(\n            display_name, panel_version\n        ))\n        result = self.panel_collection.insert_one(panel_obj)\n        LOG.debug(\"Panel saved\")\n        return result.inserted_id", "docstring": "Add a gene panel to the database\n\nArgs:\npanel_obj(dict)", "source": "juraj-google-style"}
{"code": "def get_value_for_datastore(self, model_instance):\n    value = super(JsonProperty, self).get_value_for_datastore(model_instance)\n    if (not value):\n        return None\n    json_value = value\n    if (not isinstance(value, dict)):\n        json_value = value.to_json()\n    if (not json_value):\n        return None\n    return datastore_types.Text(json.dumps(json_value, sort_keys=True, cls=JsonEncoder))", "docstring": "Gets value for datastore.\n\nArgs:\nmodel_instance: instance of the model class.\n\nReturns:\ndatastore-compatible value.", "source": "codesearchnet"}
{"code": "def split_input(cls, job_config):\n    \n    reader_params = job_config.input_reader_params\n    bucket = reader_params[cls.BUCKET_NAME_PARAM]\n    filenames = reader_params[cls.OBJECT_NAMES_PARAM]\n    delimiter = reader_params.get(cls.DELIMITER_PARAM)\n    account_id = reader_params.get(cls._ACCOUNT_ID_PARAM)\n    buffer_size = reader_params.get(cls.BUFFER_SIZE_PARAM)\n    path_filter = reader_params.get(cls.PATH_FILTER_PARAM)\n\n    \n    all_filenames = []\n    for filename in filenames:\n      if filename.endswith(\"*\"):\n        all_filenames.extend(\n            [file_stat.filename for file_stat in cloudstorage.listbucket(\n                \"/\" + bucket + \"/\" + filename[:-1], delimiter=delimiter,\n                _account_id=account_id)])\n      else:\n        all_filenames.append(\"/%s/%s\" % (bucket, filename))\n\n    \n    readers = []\n    for shard in range(0, job_config.shard_count):\n      shard_filenames = all_filenames[shard::job_config.shard_count]\n      if shard_filenames:\n        readers.append(cls(\n            shard_filenames, buffer_size=buffer_size, _account_id=account_id,\n            delimiter=delimiter, path_filter=path_filter))\n    return readers", "docstring": "Returns a list of input readers.\n\nAn equal number of input files are assigned to each shard (+/- 1). If there\nare fewer files than shards, fewer than the requested number of shards will\nbe used. Input files are currently never split (although for some formats\ncould be and may be split in a future implementation).\n\nArgs:\njob_config: map_job.JobConfig\n\nReturns:\nA list of InputReaders. None when no input data can be found.", "source": "juraj-google-style"}
{"code": "def forward(self, context: torch.Tensor, latents: torch.Tensor) -> torch.Tensor:\n    context = self.context_layer_norm(context)\n    latents = self.latents_layer_norm(latents)\n    batch_size, seq_length, embed_dim = context.shape[:3]\n    q = self.q_proj(latents)\n    k = self.k_proj(torch.cat([context, latents], dim=-2))\n    v = self.v_proj(torch.cat([context, latents], dim=-2))\n    q, k, v = [x.reshape(batch_size, x.shape[1], self.n_heads, self.head_dim).transpose(1, 2) for x in (q, k, v)]\n    if self.qk_layer_norms:\n        q = self.q_layer_norm(q)\n        k = self.k_layer_norm(k)\n    scores = torch.einsum('... i d, ... j d -> ... i j', q * self.qk_scale, k)\n    stabilized_scores = scores - scores.amax(dim=-1, keepdim=True).detach()\n    attn = stabilized_scores.softmax(dim=-1)\n    resampled = torch.einsum('... i j, ... j d -> ... i d', attn, v)\n    return self.output_proj(resampled.transpose(1, 2).flatten(-2))", "docstring": "Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension!\n\nArgs:\ncontext (`torch.Tensor`):\nTensor of shape `[bsz, seq, embed_dim]` representing long-form context to resample.\nlatents (`torch.Tensor`):\nTensor of shape `[bsz, n_latents, embed_dim]` representing fixed length latents to compress to.\n\nReturns:\n`torch.Tensor`: Tensor of shape `[bsz, n_latents, embed_dim]` representing attention over latents w/ cross\nfrom context.", "source": "github-repos"}
{"code": "def _wait_after(provider, job_ids, poll_interval, stop_on_failure):\n  \n\n  \n  \n  \n  \n  \n  \n\n  \n  job_ids_to_check = {j for j in job_ids if j != dsub_util.NO_JOB}\n  error_messages = []\n  while job_ids_to_check and (not error_messages or not stop_on_failure):\n    print('Waiting for: %s.' % (', '.join(job_ids_to_check)))\n\n    \n    jobs_left = _wait_for_any_job(provider, job_ids_to_check, poll_interval)\n\n    \n    jobs_completed = job_ids_to_check.difference(jobs_left)\n\n    \n    tasks_completed = provider.lookup_job_tasks({'*'}, job_ids=jobs_completed)\n\n    \n    \n    \n    dominant_job_tasks = _dominant_task_for_jobs(tasks_completed)\n    if len(dominant_job_tasks) != len(jobs_completed):\n      \n      \n      jobs_found = dsub_util.tasks_to_job_ids(dominant_job_tasks)\n      jobs_not_found = jobs_completed.difference(jobs_found)\n      for j in jobs_not_found:\n        error = '%s: not found' % j\n        print_error('  %s' % error)\n        error_messages += [error]\n\n    \n    for t in dominant_job_tasks:\n      job_id = t.get_field('job-id')\n      status = t.get_field('task-status')\n      print('  %s: %s' % (str(job_id), str(status)))\n      if status in ['FAILURE', 'CANCELED']:\n        error_messages += [provider.get_tasks_completion_messages([t])]\n\n    job_ids_to_check = jobs_left\n\n  return error_messages", "docstring": "Print status info as we wait for those jobs.\n\nBlocks until either all of the listed jobs succeed,\nor one of them fails.\n\nArgs:\nprovider: job service provider\njob_ids: a set of job IDs (string) to wait for\npoll_interval: integer seconds to wait between iterations\nstop_on_failure: whether to stop waiting if one of the tasks fails.\n\nReturns:\nEmpty list if there was no error,\na list of error messages from the failed tasks otherwise.", "source": "juraj-google-style"}
{"code": "def clone(self, name=None):\n    \n\n    if name is None:\n      name = self.module_name + \"_clone\"\n    return MLP(\n        name=name,\n        output_sizes=self.output_sizes,\n        activation=self.activation,\n        activate_final=self.activate_final,\n        initializers=self.initializers,\n        partitioners=self.partitioners,\n        regularizers=self.regularizers,\n        use_bias=self.use_bias,\n        use_dropout=self.use_dropout)", "docstring": "Creates a new MLP with the same structure.\n\nArgs:\nname: Optional string specifying the name of the new module. The default\nname is constructed by appending \"_clone\" to the original name.\n\nReturns:\nA cloned `MLP` module.", "source": "juraj-google-style"}
{"code": "def dump_json(json_info, json_file, overwrite=True):\n    \n    if overwrite:\n        mode = \"w\"\n    else:\n        mode = \"w+\"\n\n    try:\n        with open(json_file, mode) as f:\n            f.write(json.dumps(json_info))\n    except BaseException as e:\n        logging.error(e.message)", "docstring": "Dump a whole json record into the given file.\n\nOverwrite the file if the overwrite flag set.\n\nArgs:\njson_info (dict): Information dict to be dumped.\njson_file (str): File path to be dumped to.\noverwrite(boolean)", "source": "juraj-google-style"}
{"code": "def _dump_to_pages(dump):\n  \n  pos = 0\n  ret = []\n  start_tag = u\"<page>\\n\"\n  end_tag = u\"</page>\\n\"\n  while True:\n    start_pos = dump.find(start_tag, pos)\n    if start_pos == -1:\n      break\n    start_pos += len(start_tag)\n    end_pos = dump.find(end_tag, start_pos)\n    if end_pos == -1:\n      break\n    ret.append(dump[start_pos:end_pos])\n    pos = end_pos + len(end_tag)\n  return ret", "docstring": "Extract pages from an xml dump.\n\nArgs:\ndump: a unicode string\nReturns:\na list of unicode strings", "source": "juraj-google-style"}
{"code": "def generate_output_whois_nets(self, json_data=None, hr=True, show_name=False, colorize=True):\n    if (json_data is None):\n        json_data = {}\n    output = generate_output(line='0', short=(HR_WHOIS['nets']['_short'] if hr else 'nets'), name=(HR_WHOIS['nets']['_name'] if (hr and show_name) else None), is_parent=True, colorize=colorize)\n    count = 0\n    for net in json_data['nets']:\n        if (count > 0):\n            output += self.generate_output_newline(line='1', colorize=colorize)\n        count += 1\n        output += generate_output(line='1', short=net['handle'], is_parent=True, colorize=colorize)\n        for (key, val) in net.items():\n            if (val and ('\\n' in val)):\n                output += generate_output(line='2', short=(HR_WHOIS['nets'][key]['_short'] if hr else key), name=(HR_WHOIS['nets'][key]['_name'] if (hr and show_name) else None), is_parent=(False if ((val is None) or (len(val) == 0)) else True), value=('None' if ((val is None) or (len(val) == 0)) else None), colorize=colorize)\n                for v in val.split('\\n'):\n                    output += generate_output(line='3', value=v, colorize=colorize)\n            else:\n                output += generate_output(line='2', short=(HR_WHOIS['nets'][key]['_short'] if hr else key), name=(HR_WHOIS['nets'][key]['_name'] if (hr and show_name) else None), value=val, colorize=colorize)\n    return output", "docstring": "The function for generating CLI output Legacy Whois networks results.\n\nArgs:\njson_data (:obj:`dict`): The data to process. Defaults to None.\nhr (:obj:`bool`): Enable human readable key translations. Defaults\nto True.\nshow_name (:obj:`bool`): Show human readable name (default is to\nonly show short). Defaults to False.\ncolorize (:obj:`bool`): Colorize the console output with ANSI\ncolors. Defaults to True.\n\nReturns:\nstr: The generated output.", "source": "codesearchnet"}
{"code": "def set_wallpaper(image):\n\t\n\n\tdesktop_env = system.get_name()\n\n\tif desktop_env in ['gnome', 'unity', 'cinnamon', 'pantheon', 'mate']:\n\t\turi = 'file:\n\n\t\tSCHEMA = 'org.gnome.desktop.background'\n\t\tKEY = 'picture-uri'\n\n\t\tif desktop_env == 'mate':\n\t\t\turi = image\n\n\t\t\tSCHEMA = 'org.mate.background'\n\t\t\tKEY = 'picture-filename'\n\n\t\ttry:\n\t\t\tfrom gi.repository import Gio\n\n\t\t\tgsettings = Gio.Settings.new(SCHEMA)\n\t\t\tgsettings.set_string(KEY, uri)\n\t\texcept ImportError:\n\t\t\ttry:\n\t\t\t\tgsettings_proc = sp.Popen(\n\t\t\t\t\t['gsettings', 'set', SCHEMA, KEY, uri])\n\t\t\texcept:  \n\t\t\t\tsp.Popen(['mateconftool-2',\n\t\t\t\t\t\t  '-t',\n\t\t\t\t\t\t  'string',\n\t\t\t\t\t\t  '--set',\n\t\t\t\t\t\t  '/desktop/mate/background/picture_filename',\n\t\t\t\t\t\t  '%s' % image],\n\t\t\t\t\t\t stdout=sp.PIPE)\n\t\t\tfinally:\n\t\t\t\tgsettings_proc.communicate()\n\n\t\t\t\tif gsettings_proc.returncode != 0:\n\t\t\t\t\tsp.Popen(['mateconftool-2',\n\t\t\t\t\t\t\t  '-t',\n\t\t\t\t\t\t\t  'string',\n\t\t\t\t\t\t\t  '--set',\n\t\t\t\t\t\t\t  '/desktop/mate/background/picture_filename',\n\t\t\t\t\t\t\t  '%s' % image])\n\n\telif desktop_env == 'gnome2':\n\t\tsp.Popen(\n\t\t\t['gconftool-2',\n\t\t\t '-t',\n\t\t\t 'string',\n\t\t\t '--set',\n\t\t\t '/desktop/gnome/background/picture_filename',\n\t\t\t image]\n\t\t)\n\n\telif desktop_env == 'kde':\n\t\t\n\n\t\tkde_script = dedent(\n\t\t).format(image)\n\n\t\tsp.Popen(\n\t\t\t\t['dbus-send',\n\t\t\t\t '--session',\n\t\t\t\t '--dest=org.kde.plasmashell',\n\t\t\t\t '--type=method_call',\n\t\t\t\t '/PlasmaShell',\n\t\t\t\t 'org.kde.PlasmaShell.evaluateScript',\n\t\t\t\t 'string:{}'.format(kde_script)]\n\t\t)\n\n\telif desktop_env in ['kde3', 'trinity']:\n\t\targs = 'dcop kdesktop KBackgroundIface setWallpaper 0 \"%s\" 6' % image\n\t\tsp.Popen(args, shell=True)\n\n\telif desktop_env == 'xfce4':\n\t\t\n\n\t\tlist_of_properties = system.get_cmd_out(\n\t\t\t\t['xfconf-query',\n\t\t\t\t '-R',\n\t\t\t\t '-l',\n\t\t\t\t '-c',\n\t\t\t\t 'xfce4-desktop',\n\t\t\t\t '-p',\n\t\t\t\t '/backdrop']\n\t\t)\n\n\t\tfor i in list_of_properties.split('\\n'):\n\t\t\tif i.endswith('last-image'):\n\t\t\t\t\n\t\t\t\tsp.Popen(\n\t\t\t\t\t['xfconf-query -c xfce4-desktop -p %s -s \"%s\"' %\n\t\t\t\t\t\t(i, image)],\n\t\t\t\t\tshell=True)\n\n\t\t\t\tsp.Popen(['xfdesktop --reload'], shell=True)\n\n\telif desktop_env == 'razor-qt':\n\t\tdesktop_conf = configparser.ConfigParser()\n\t\t\n\n\t\tdesktop_conf_file = os.path.join(\n\t\t\tget_config_dir('razor')[0], 'desktop.conf')\n\n\t\tif os.path.isfile(desktop_conf_file):\n\t\t\tconfig_option = r'screens\\1\\desktops\\1\\wallpaper'\n\n\t\telse:\n\t\t\tdesktop_conf_file = os.path.join(\n\t\t\t\tos.path.expanduser('~'), '.razor/desktop.conf')\n\t\t\tconfig_option = r'desktops\\1\\wallpaper'\n\n\t\tdesktop_conf.read(os.path.join(desktop_conf_file))\n\t\ttry:\n\t\t\tif desktop_conf.has_option('razor', config_option):\n\t\t\t\tdesktop_conf.set('razor', config_option, image)\n\t\t\t\twith codecs.open(desktop_conf_file, 'w', encoding='utf-8', errors='replace') as f:\n\t\t\t\t\tdesktop_conf.write(f)\n\t\texcept:\n\t\t\tpass\n\n\telif desktop_env in ['fluxbox', 'jwm', 'openbox', 'afterstep', 'i3']:\n\t\ttry:\n\t\t\targs = ['feh', '--bg-scale', image]\n\t\t\tsp.Popen(args)\n\t\texcept:\n\t\t\tsys.stderr.write('Error: Failed to set wallpaper with feh!')\n\t\t\tsys.stderr.write('Please make sre that You have feh installed.')\n\n\telif desktop_env == 'icewm':\n\t\targs = ['icewmbg', image]\n\t\tsp.Popen(args)\n\n\telif desktop_env == 'blackbox':\n\t\targs = ['bsetbg', '-full', image]\n\t\tsp.Popen(args)\n\n\telif desktop_env == 'lxde':\n\t\targs = 'pcmanfm --set-wallpaper %s --wallpaper-mode=scaled' % image\n\t\tsp.Popen(args, shell=True)\n\n\telif desktop_env == 'lxqt':\n\t\targs = 'pcmanfm-qt --set-wallpaper %s --wallpaper-mode=scaled' % image\n\t\tsp.Popen(args, shell=True)\n\n\telif desktop_env == 'windowmaker':\n\t\targs = 'wmsetbg -s -u %s' % image\n\t\tsp.Popen(args, shell=True)\n\n\telif desktop_env == 'enlightenment':\n\t\targs = 'enlightenment_remote -desktop-bg-add 0 0 0 0 %s' % image\n\t\tsp.Popen(args, shell=True)\n\n\telif desktop_env == 'awesome':\n\t\twith sp.Popen(\"awesome-client\", stdin=sp.PIPE) as awesome_client:\n\t\t\tcommand = ('local gears = require(\"gears\"); for s = 1,'\n\t\t\t\t\t\t' screen.count() do gears.wallpaper.maximized'\n\t\t\t\t\t\t'(\"%s\", s, true); end;') % image\n\t\t\tawesome_client.communicate(input=bytes(command, 'UTF-8'))\n\n\telif desktop_env == 'windows':\n\t\tWINDOWS_SCRIPT = dedent() % image\n\n\t\twindows_script_file = os.path.join(\n\t\t\ttempfile.gettempdir(), 'wallscript.bat')\n\n\t\twith open(windows_script_file, 'w') as f:\n\t\t\tf.write(WINDOWS_SCRIPT)\n\n\t\tsp.Popen([windows_script_file], shell=True)\n\n\t\t\n\t\t\n\n\t\tSPI_SETDESKWALLPAPER = 20\n\t\tctypes.windll.user32.SystemParametersInfoA(\n\t\t\tSPI_SETDESKWALLPAPER, 0, image, 0)\n\n\telif desktop_env == 'mac':\n\t\ttry:\n\t\t\tfrom appscript import app, mactypes\n\t\t\tapp('Finder').desktop_picture.set(mactypes.File(image))\n\t\texcept ImportError:\n\t\t\tOSX_SCRIPT = dedent(\n\t\t\t\t) % image\n\n\t\t\tsp.Popen(['osascript', OSX_SCRIPT])\n\telse:\n\t\ttry:\n\t\t\tsp.Popen(['feh', '--bg-scale', image])\n\t\t\t\n\t\texcept:\n\t\t\tpass", "docstring": "Set the desktop wallpaper.\n\nSets the desktop wallpaper to an image.\n\nArgs:\nimage (str): The path to the image to be set as wallpaper.", "source": "juraj-google-style"}
{"code": "def midpoint(self):\n    midpoints = []\n    for segment in self:\n        if (len(segment) < 2):\n            midpoints.append([])\n        else:\n            midpoints.append(segment.midpoint())\n    return midpoints", "docstring": "Calculate the midpoint between locations in segments.\n\nReturns:\nlist of Point: Groups of midpoint between points in segments", "source": "codesearchnet"}
{"code": "def get_task_ops(task_type=TaskType.ALG_CTRL):\n    try:\n        return LearnToExecuteState.TASK_TYPE_OPS[task_type]\n    except KeyError:\n        raise KeyError((\"Bad task_type '%s', check config.\" % task_type))", "docstring": "Returns an operations list based on the specified task index.\n\nArgs:\ntask_type: indicates the task type used.\n\nReturns:\nList of the eligible ops.", "source": "codesearchnet"}
{"code": "def add_action_to(cls, parser, action, subactions, level):\n        \n        p = parser.add_parser(action.name,\n                              description=action.description,\n                              argument_default=argparse.SUPPRESS)\n        for arg in action.args:\n            arg.add_argument_to(p)\n\n        if subactions:\n            subparsers = cls._add_subparsers_required(p,\n                dest=settings.SUBASSISTANT_N_STRING.format(level),\n                title=cls.subactions_str,\n                description=cls.subactions_desc)\n            for subact, subsubacts in sorted(subactions.items(), key=lambda x: x[0].name):\n                cls.add_action_to(subparsers, subact, subsubacts, level + 1)", "docstring": "Adds given action to given parser\n\nArgs:\nparser: instance of devassistant_argparse.ArgumentParser\naction: devassistant.actions.Action subclass\nsubactions: dict with subactions - {SubA: {SubB: {}}, SubC: {}}", "source": "juraj-google-style"}
{"code": "def search(self, search_phrase, limit=None):\n        \n\n        query_parts = [\n            'SELECT identifier, type, name, similarity(name, :word) AS sml',\n            'FROM identifier_index',\n            'WHERE name % :word',\n            'ORDER BY sml DESC, name']\n\n        query_params = {\n            'word': search_phrase}\n\n        if limit:\n            query_parts.append('LIMIT :limit')\n            query_params['limit'] = limit\n\n        query_parts.append(';')\n\n        query = text('\\n'.join(query_parts))\n\n        self.backend.library.database.set_connection_search_path()\n\n        results = self.execute(query, **query_params).fetchall()\n\n        for result in results:\n            vid, type, name, score = result\n            yield IdentifierSearchResult(\n                score=score, vid=vid,\n                type=type, name=name)", "docstring": "Finds identifiers by search phrase.\n\nArgs:\nsearch_phrase (str or unicode):\nlimit (int, optional): how many results to return. None means without limit.\n\nReturns:\nlist of IdentifierSearchResult instances.", "source": "juraj-google-style"}
{"code": "def expo(base=2, factor=1, max_value=None):\n    \n    n = 0\n    while True:\n        a = factor * base ** n\n        if max_value is None or a < max_value:\n            yield a\n            n += 1\n        else:\n            yield max_value", "docstring": "Generator for exponential decay.\n\nArgs:\nbase: The mathematical base of the exponentiation operation\nfactor: Factor to multiply the exponentation by.\nmax_value: The maximum value to yield. Once the value in the\ntrue exponential sequence exceeds this, the value\nof max_value will forever after be yielded.", "source": "juraj-google-style"}
{"code": "async def async_fetch(url: str, **kwargs) -> Selector:\n    kwargs.setdefault('headers', DEFAULT_HEADERS)\n    async with aiohttp.ClientSession(**kwargs) as ses:\n        async with ses.get(url, **kwargs) as res:\n            html = (await res.text())\n            tree = Selector(text=html)\n            return tree", "docstring": "Do the fetch in an async style.\n\nArgs:\nurl (str): The url of the site.\n\nReturns:\nSelector: allows you to select parts of HTML text using CSS or XPath expressions.", "source": "codesearchnet"}
{"code": "def scan_servos():\n\n    \n    servos = []\n    for servo_id in range(0x00, 0xFE):\n        model = get_model(servo_id)\n        if model:\n            servos += [(servo_id, model)]\n    return servos", "docstring": "Scan for the herkulex servos connected\n\nThis function will scan for all the herkulex servos connected\nto the bus.\n\nArgs:\nnone\nReturns:\nlist: a list of tuples of the form [(id, model)]", "source": "juraj-google-style"}
{"code": "def init_from_wave_file(wavpath):\n        \n\n        try:\n            samplerate, data =  SW.read(wavpath)\n            nframes = data.shape[0]\n        except:\n            \n            \n            try:\n                w = wave.open(wavpath)\n                samplerate = w.getframerate()\n                nframes = w.getnframes()\n            except:\n                raise Exception('Cannot decode wavefile ' + wavpath)\n\n        return SVEnv(samplerate, nframes, wavpath)", "docstring": "Init a sonic visualiser environment structure based the analysis\nof the main audio file. The audio file have to be encoded in wave\n\nArgs:\nwavpath(str): the full path to the wavfile", "source": "juraj-google-style"}
{"code": "def _get_char_input_ids(self, input_ids, subwords_batch, char_count_per_id, pad_token_id=0, unk_token_id=1):\n    if not hasattr(self.generation_config, 'char_to_id'):\n        raise ValueError(\"This model generation config doesn't have a `char_to_id` key which maps\\n                characters to character ids. Make sure to load the right generation config.\")\n    batch_size = input_ids.shape[0]\n    max_len = int(char_count_per_id.sum(1).max().item())\n    char_seqs = input_ids.new_zeros((batch_size, max_len)).fill_(pad_token_id)\n    subword_lens = input_ids.ne(pad_token_id).sum(1)\n    for batch_id in range(batch_size):\n        total = 0\n        subword_indices = input_ids[batch_id, :subword_lens[batch_id]]\n        subwords = subwords_batch[batch_id][:subword_lens[batch_id]]\n        for subword_idx, subword in zip(subword_indices, subwords):\n            if subword_idx == unk_token_id:\n                char_ids = [unk_token_id]\n            else:\n                char_ids = [self.generation_config.char_to_id.get(ch, unk_token_id) for ch in list(subword)]\n            char_seq_len = len(char_ids)\n            char_seqs[batch_id, total:total + char_seq_len] = torch.tensor(char_ids).to(char_seqs)\n            total += char_seq_len\n    return char_seqs", "docstring": "Returns the corresponding character input id for each character of `subwords_batch`.\n\nArgs:\ninput_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`):\nIndices of input sequence tokens in the vocabulary.\nsubwords_batch (`List[List[str]]` of shape `(batch_size, sequence_length)`):\nCorresponding text string for each input id.\nchar_count_per_id (`torch.Tensor` of shape `(batch_size, sequence_length)`):\nNumber of characters per input id.\npad_token_id (`int`, *optional*, defaults to 0):\nThe id of the _padding_ text token. If it is encountered when calculating the length of a subword\nsample, the lengths of subsequent subwords will be set to 0.\nunk_token_id (`int`, *optional*, defaults to 1):\nThe id of the _unknown_ text token. Associated to a subword of length 1.\nReturns:\n`torch.Tensor`: Tensor of shape `(batch_size, char_sequence_length)` containing the id of each character.", "source": "github-repos"}
{"code": "def _get_colors(n):\n    \n\n    import matplotlib.pyplot as plt\n    from matplotlib.colors import rgb2hex as r2h\n    from numpy import linspace\n\n    cols = linspace(0.05, .95, n)\n    cmap = plt.get_cmap('nipy_spectral')\n    return [r2h(cmap(i)) for i in cols]", "docstring": "Returns n unique and \"evenly\" spaced colors for the backgrounds\nof the projects.\n\nArgs:\nn (int): The number of unique colors wanted.\n\nReturns:\ncolors (list of str): The colors in hex form.", "source": "juraj-google-style"}
{"code": "def convert_ini(config_dict):\n    \n    config_lines = []\n\n    for env, configs in sorted(config_dict.items()):\n        for resource, app_properties in sorted(configs.items()):\n            try:\n                for app_property, value in sorted(app_properties.items()):\n                    variable = '{env}_{resource}_{app_property}'.format(\n                        env=env, resource=resource, app_property=app_property).upper()\n\n                    if isinstance(value, (dict, DeepChainMap)):\n                        safe_value = \"'{0}'\".format(json.dumps(dict(value)))\n                    else:\n                        safe_value = json.dumps(value)\n\n                    line = \"{variable}={value}\".format(variable=variable, value=safe_value)\n\n                    LOG.debug('INI line: %s', line)\n                    config_lines.append(line)\n            except AttributeError:\n                resource = resource.upper()\n                app_properties = \"'{}'\".format(json.dumps(app_properties))\n                line = '{0}={1}'.format(resource, app_properties)\n\n                LOG.debug('INI line: %s', line)\n                config_lines.append(line)\n    return config_lines", "docstring": "Convert _config_dict_ into a list of INI formatted strings.\n\nArgs:\nconfig_dict (dict): Configuration dictionary to be flattened.\n\nReturns:\n(list) Lines to be written to a file in the format of KEY1_KEY2=value.", "source": "juraj-google-style"}
{"code": "def AsDict(self, dt=True):\n    data = {}\n    if self.body:\n        data['body'] = self.body\n    if self.posted_at:\n        data['posted_at'] = self.posted_at\n    if self.user:\n        data['user'] = self.user.AsDict()\n    return data", "docstring": "A dict representation of this Comment instance.\n\nThe return value uses the same key names as the JSON representation.\n\nArgs:\ndt (bool): If True, return dates as python datetime objects. If\nFalse, return dates as ISO strings.\n\nReturn:\nA dict representing this Comment instance", "source": "codesearchnet"}
{"code": "def num_connected_components(self, unitary_only=False):\n    reg_offset = 0\n    reg_map = {}\n    if unitary_only:\n        regs = self.qregs\n    else:\n        regs = (self.qregs + self.cregs)\n    for reg in regs:\n        reg_map[reg.name] = reg_offset\n        reg_offset += reg.size\n    sub_graphs = [[bit] for bit in range(reg_offset)]\n    num_sub_graphs = len(sub_graphs)\n    for (instr, qargs, cargs) in self.data:\n        if unitary_only:\n            args = qargs\n            num_qargs = len(args)\n        else:\n            args = (qargs + cargs)\n            num_qargs = (len(args) + (1 if instr.control else 0))\n        if ((num_qargs >= 2) and (instr.name not in ['barrier', 'snapshot'])):\n            graphs_touched = []\n            num_touched = 0\n            if (instr.control and (not unitary_only)):\n                creg = instr.control[0]\n                creg_int = reg_map[creg.name]\n                for coff in range(creg.size):\n                    temp_int = (creg_int + coff)\n                    for k in range(num_sub_graphs):\n                        if (temp_int in sub_graphs[k]):\n                            graphs_touched.append(k)\n                            num_touched += 1\n                            break\n            for item in args:\n                reg_int = (reg_map[item[0].name] + item[1])\n                for k in range(num_sub_graphs):\n                    if (reg_int in sub_graphs[k]):\n                        if (k not in graphs_touched):\n                            graphs_touched.append(k)\n                            num_touched += 1\n                            break\n            if (num_touched > 1):\n                connections = []\n                for idx in graphs_touched:\n                    connections.extend(sub_graphs[idx])\n                _sub_graphs = []\n                for idx in range(num_sub_graphs):\n                    if (idx not in graphs_touched):\n                        _sub_graphs.append(sub_graphs[idx])\n                _sub_graphs.append(connections)\n                sub_graphs = _sub_graphs\n                num_sub_graphs -= (num_touched - 1)\n        if (num_sub_graphs == 1):\n            break\n    return num_sub_graphs", "docstring": "How many non-entangled subcircuits can the circuit be factored to.\n\nArgs:\nunitary_only (bool): Compute only unitary part of graph.\n\nReturns:\nint: Number of connected components in circuit.", "source": "codesearchnet"}
{"code": "def _get_contrib_features(module):\n    if isinstance(module, types.ModuleType):\n        if hasattr(module, '__path__'):\n            (yield from _get_contrib_features_from_package(module))\n        else:\n            (yield _get_contrib_feature_from_module(module))\n    else:\n        raise ValueError('Input is not a module')", "docstring": "Get contributed features from within given module\n\nBe very careful with untrusted code. The module/package will be\nwalked, every submodule will be imported, and all the code therein will be\nexecuted. But why would you be trying to import from an untrusted package\nanyway?\n\nArgs:\ncontrib (module): module (standalone or package) that contains feature\ndefinitions\n\nReturns:\nList[Feature]: list of features", "source": "codesearchnet"}
{"code": "async def create(self, coro: Coroutine) -> asyncio.Task:\n    task = asyncio.get_event_loop().create_task(coro)\n    self._tasks.add(task)\n    return task", "docstring": "Starts execution of a coroutine.\n\nThe created asyncio.Task is returned, and added to managed tasks.\nThe scheduler guarantees that it is cancelled during application shutdown,\nregardless of whether it was already cancelled manually.\n\nArgs:\ncoro (Coroutine):\nThe coroutine to be wrapped in a task, and executed.\n\nReturns:\nasyncio.Task: An awaitable Task object.\nDuring Aiohttp shutdown, the scheduler will attempt to cancel and await this task.\nThe task can be safely cancelled manually, or using `TaskScheduler.cancel(task)`.", "source": "codesearchnet"}
{"code": "def set_all_tiers(key, value, django_cache_timeout=DEFAULT_TIMEOUT):\n    DEFAULT_REQUEST_CACHE.set(key, value)\n    django_cache.set(key, value, django_cache_timeout)", "docstring": "Caches the value for the provided key in both the request cache and the\ndjango cache.\n\nArgs:\nkey (string)\nvalue (object)\ndjango_cache_timeout (int): (Optional) Timeout used to determine\nif and for how long to cache in the django cache. A timeout of\n0 will skip the django cache. If timeout is provided, use that\ntimeout for the key; otherwise use the default cache timeout.", "source": "codesearchnet"}
{"code": "def validate(self, value):\n        \n\n        cast_callback = self.cast_callback if self.cast_callback else self.cast_type\n\n        try:\n            return value if isinstance(value, self.cast_type) else cast_callback(value)\n\n        except Exception:\n            raise NodeTypeError('Invalid value `{}` for {}.'.format(value, self.cast_type))", "docstring": "Base validation method. Check if type is valid, or try brute casting.\n\nArgs:\nvalue (object): A value for validation.\n\nReturns:\nBase_type instance.\n\nRaises:\nSchemaError, if validation or type casting fails.", "source": "juraj-google-style"}
{"code": "def prepare_loss_weights(training_endpoints, loss_weights=None):\n    if loss_weights is None:\n        for e in training_endpoints:\n            e.loss_weight = 1.0\n    elif isinstance(loss_weights, collections.abc.Mapping):\n        generic_utils.check_for_unexpected_keys('loss_weights', loss_weights, [e.output_name for e in training_endpoints])\n        for e in training_endpoints:\n            e.loss_weight = loss_weights.get(e.output_name, 1.0)\n    elif isinstance(loss_weights, list):\n        if len(loss_weights) != len(training_endpoints):\n            raise ValueError('When passing a list as loss_weights, it should have one entry per model output. The model has ' + str(len(training_endpoints)) + ' outputs, but you passed loss_weights=' + str(loss_weights))\n        for w, e in zip(loss_weights, training_endpoints):\n            e.loss_weight = w\n    else:\n        raise TypeError('Could not interpret loss_weights argument: ' + str(loss_weights) + ' - expected a list of dicts.')", "docstring": "Converts loss weights to a list of loss weights.\n\nThe result loss weights will be populated on the training endpoint.\n\nArgs:\ntraining_endpoints: List of model training endpoints.\nloss_weights: Optional list or dictionary specifying scalar coefficients\n(Python floats) to weight the loss contributions of different model\noutputs. The loss value that will be minimized by the model will then be\nthe *weighted sum* of all individual losses, weighted by the\n`loss_weights` coefficients. If a list, it is expected to have a 1:1\nmapping to the model's outputs. If a dict, it is expected to map\noutput names (strings) to scalar coefficients.\n\nRaises:\nValueError: If loss weight is a dict with key not in model output names,\nor if loss is a list with len not equal to model outputs.", "source": "github-repos"}
{"code": "def match(self, request):\n        \n        \n        for test in self.filters:\n            if not test(request, self):\n                return False\n\n        \n        for mapper in self.mappers:\n            request = mapper(request, self)\n            if not request:\n                raise ValueError('map function must return a request object')\n\n        \n        match_errors = []\n\n        \n        for mock in self.mocks[:]:\n            try:\n                \n                matches, errors = mock.match(request.copy())\n                if len(errors):\n                    match_errors += errors\n                if matches:\n                    return mock\n            except PookExpiredMock:\n                \n                self.mocks.remove(mock)\n\n        \n        if not self.should_use_network(request):\n            msg = 'pook error!\\n\\n'\n\n            msg += (\n                '=> Cannot match any mock for the '\n                'following request:\\n{}'.format(request)\n            )\n\n            \n            if self.debug:\n                err = '\\n\\n'.join([str(err) for err in match_errors])\n                if err:\n                    msg += '\\n\\n=> Detailed matching errors:\\n{}\\n'.format(err)\n\n            \n            raise PookNoMatches(msg)\n\n        \n        self.unmatched_reqs.append(request)", "docstring": "Matches a given Request instance contract against the registered mocks.\n\nIf a mock passes all the matchers, its response will be returned.\n\nArguments:\nrequest (pook.Request): Request contract to match.\n\nRaises:\npook.PookNoMatches: if networking is disabled and no mock matches\nwith the given request contract.\n\nReturns:\npook.Response: the mock response to be used by the interceptor.", "source": "juraj-google-style"}
{"code": "def get_alignment_df_from_file(alignment_file, a_seq_id=None, b_seq_id=None):\n    \n    alignments = list(AlignIO.parse(alignment_file, \"emboss\"))\n    alignment_df = pd.DataFrame(columns=['id_a', 'id_b', 'type', 'id_a_aa', 'id_a_pos', 'id_b_aa', 'id_b_pos'])\n\n    for alignment in alignments:\n        if not a_seq_id:\n            a_seq_id = list(alignment)[0].id\n        a_seq = str(list(alignment)[0].seq)\n        if not b_seq_id:\n            b_seq_id = list(alignment)[1].id\n        b_seq = str(list(alignment)[1].seq)\n\n        df = get_alignment_df(a_seq, b_seq, a_seq_id, b_seq_id)\n        alignment_df = alignment_df.append(df).reset_index(drop=True)\n\n    return alignment_df", "docstring": "Get a Pandas DataFrame of the Needle alignment results. Contains all positions of the sequences.\n\nArgs:\nalignment_file:\na_seq_id: Optional specification of the ID of the reference sequence\nb_seq_id: Optional specification of the ID of the aligned sequence\n\nReturns:\nPandas DataFrame: all positions in the alignment", "source": "juraj-google-style"}
{"code": "def to_json(self):\n    cursor = self._get_cursor()\n    cursor_object = False\n    if (cursor and isinstance(cursor, datastore_query.Cursor)):\n        cursor = cursor.to_websafe_string()\n        cursor_object = True\n    return {'key_range': self._key_range.to_json(), 'query_spec': self._query_spec.to_json(), 'cursor': cursor, 'cursor_object': cursor_object}", "docstring": "Serializes all states into json form.\n\nReturns:\nall states in json-compatible map.", "source": "codesearchnet"}
{"code": "def __init__(self, minimum=-18446744073709551616, maximum=18446744073709551615, singleStep=1, parent=None):\n        \n        super(BigIntSpinboxDelegate, self).__init__(parent)\n        self.minimum = minimum\n        self.maximum = maximum\n        self.singleStep = singleStep", "docstring": "construct a new instance of a BigIntSpinboxDelegate.\n\nArgs:\nmaximum (int or long, optional): minimum allowed number in BigIntSpinbox. defaults to -18446744073709551616.\nminimum (int or long, optional): maximum allowed number in BigIntSpinbox. defaults to 18446744073709551615.\nsingleStep (int, optional): amount of steps to stepUp BigIntSpinbox. defaults to 1.", "source": "juraj-google-style"}
{"code": "def images(self, **kwargs):\n        \n        path = self._get_series_id_season_number_path('images')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the images (posters) that we have stored for a TV season by season\nnumber.\n\nArgs:\nlanguage: (optional) ISO 639 code.\ninclude_image_language: (optional) Comma separated, a valid\nISO 69-1.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def add_note(self, note):\n    notes = self.cached_json\n    if (not note.moderator):\n        note.moderator = self.r.user.me().name\n    try:\n        mod_index = notes['constants']['users'].index(note.moderator)\n    except ValueError:\n        notes['constants']['users'].append(note.moderator)\n        mod_index = notes['constants']['users'].index(note.moderator)\n    try:\n        warn_index = notes['constants']['warnings'].index(note.warning)\n    except ValueError:\n        if (note.warning in Note.warnings):\n            notes['constants']['warnings'].append(note.warning)\n            warn_index = notes['constants']['warnings'].index(note.warning)\n        else:\n            raise ValueError(('Warning type not valid: ' + note.warning))\n    new_note = {'n': note.note, 't': note.time, 'm': mod_index, 'l': note.link, 'w': warn_index}\n    try:\n        notes['users'][note.username]['ns'].insert(0, new_note)\n    except KeyError:\n        notes['users'][note.username] = {'ns': [new_note]}\n    return '\"create new note on user {}\" via puni'.format(note.username)", "docstring": "Add a note to the usernotes wiki page.\n\nArguments:\nnote: the note to be added (Note)\n\nReturns the update message for the usernotes wiki\n\nRaises:\nValueError when the warning type of the note can not be found in the\nstored list of warnings.", "source": "codesearchnet"}
{"code": "def GetFormattedSources(self, event):\n    event_formatter = self.GetEventFormatter(event)\n    if (not event_formatter):\n        return (None, None)\n    return event_formatter.GetSources(event)", "docstring": "Retrieves the formatted sources related to the event.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\ntuple: containing:\n\nstr: full source string or None if no event formatter was found.\nstr: short source string or None if no event formatter was found.", "source": "codesearchnet"}
{"code": "def create_endpoints_csv_file(self, timeout=-1):\n        \n        uri = \"{}/endpoints/\".format(self.data[\"uri\"])\n        return self._helper.do_post(uri, {}, timeout, None)", "docstring": "Creates an endpoints CSV file for a SAN.\n\nArgs:\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation in\nOneView, just stops waiting for its completion.\n\nReturns:\ndict: Endpoint CSV File Response.", "source": "juraj-google-style"}
{"code": "def compute_author_match_score(x_authors, y_authors):\n    \n    if not x_authors or not y_authors:\n        return 0.0\n\n    matches = get_number_of_author_matches(x_authors, y_authors)\n    max_length = max(len(x_authors), len(y_authors))\n\n    return matches / float(max_length)", "docstring": "Return the matching score of 2 given lists of authors.\n\nArgs:\nx_authors (list(dict)): first schema-compliant list of authors.\ny_authors (list(dict)): second schema-compliant list of authors.\n\nReturns:\nfloat: matching score of authors.", "source": "juraj-google-style"}
{"code": "def set_time(self, value: float):\n    if (value < 0):\n        value = 0\n    self.controller.row = (self.rps * value)", "docstring": "Set the current time jumping in the timeline.\n\nArgs:\nvalue (float): The new time", "source": "codesearchnet"}
{"code": "def run_ui(self, init_command=None, title=None, title_color=None, enable_mouse_on_start=True):\n    raise NotImplementedError('run_ui() is not implemented in BaseUI')", "docstring": "Run the UI until user- or command- triggered exit.\n\nArgs:\ninit_command: (str) Optional command to run on CLI start up.\ntitle: (str) Optional title to display in the CLI.\ntitle_color: (str) Optional color of the title, e.g., \"yellow\".\nenable_mouse_on_start: (bool) Whether the mouse mode is to be enabled on\nstart-up.\n\nReturns:\nAn exit token of arbitrary type. Can be None.", "source": "github-repos"}
{"code": "def _SetHashers(self, hasher_names_string):\n    if ((not hasher_names_string) or (hasher_names_string == 'none')):\n        return\n    analyzer_object = analyzers_manager.AnalyzersManager.GetAnalyzerInstance('hashing')\n    analyzer_object.SetHasherNames(hasher_names_string)\n    self._analyzers.append(analyzer_object)", "docstring": "Sets the hasher names.\n\nArgs:\nhasher_names_string (str): comma separated names of the hashers\nto enable, where 'none' disables the hashing analyzer.", "source": "codesearchnet"}
{"code": "def GetHashers(cls, hasher_names):\n    \n    hashers = []\n    for hasher_name, hasher_class in iter(cls._hasher_classes.items()):\n      if hasher_name in hasher_names:\n        hashers.append(hasher_class())\n\n    return hashers", "docstring": "Retrieves instances for all the specified hashers.\n\nArgs:\nhasher_names (list[str]): names of the hashers to retrieve.\n\nReturns:\nlist[BaseHasher]: hashers.", "source": "juraj-google-style"}
{"code": "def verify_dataset(X, y):\n    (X_shape, y_shape) = (np.array(X).shape, np.array(y).shape)\n    if (len(X_shape) != 2):\n        raise exceptions.UserError('X must be 2-dimensional array')\n    if (len(y_shape) != 1):\n        raise exceptions.UserError('y must be 1-dimensional array')\n    if (X_shape[0] != y_shape[0]):\n        raise exceptions.UserError('X must have same number of elements as y')\n    return dict(features_shape=X_shape, labels_shape=y_shape)", "docstring": "Verifies if a dataset is valid for use i.e. scikit-learn format\n\nUsed to verify a dataset by returning shape and basic statistics of\nreturned data. This will also provide quick and dirty check on\ncapability of host machine to process the data.\n\nArgs:\nX (array-like): Features array\n\ny (array-like): Label array\n\nReturns:\nX_shape (2-tuple of int): Shape of X returned\n\ny_shape (1-tuple of int): Shape of y returned\n\nRaises:\nAssertionError: `X_shape` must be of length 2 and `y_shape` must be of\nlength 1. `X` must have the same number of elements as `y`\ni.e. X_shape[0] == y_shape[0]. If any of these conditions are not met,\nan AssertionError is raised.", "source": "codesearchnet"}
{"code": "def call(self):\n    (headers, data) = self.prepare()\n    if _LOG.isEnabledFor(logging.DEBUG):\n        _LOG.debug('Sending %s, %s', headers, prettify(data))\n    response = requests.post(self.endpoint, headers=headers, data=data.encode('utf-8'), **self.request_args)\n    _LOG.debug('Received %s, %s', response.headers, response.text)\n    status = response.status_code\n    if (status == 200):\n        tree = XML.fromstring(response.content)\n        body = tree.find('{http:\n        return body\n    elif (status == 500):\n        tree = XML.fromstring(response.content)\n        fault = tree.find('.\n        if (fault is None):\n            response.raise_for_status()\n        faultcode = fault.findtext('faultcode')\n        faultstring = fault.findtext('faultstring')\n        faultdetail = fault.find('detail')\n        raise SoapFault(faultcode, faultstring, faultdetail)\n    else:\n        response.raise_for_status()\n    return None", "docstring": "Call the SOAP method on the server.\n\nReturns:\nstr: the decapusulated SOAP response from the server,\nstill encoded as utf-8.\n\nRaises:\nSoapFault: if a SOAP error occurs.\n~requests.exceptions.HTTPError: if an http error occurs.", "source": "codesearchnet"}
{"code": "def search_orcid(orcid):\n    url = 'https:\n    r = requests.get(url, headers=headers)\n    if (r.status_code != 200):\n        r.raise_for_status()\n    return r.json()", "docstring": "Search the ORCID public API\n\nSpecfically, return a dictionary with the personal details\n(name, etc.) of the person associated with the given ORCID\n\nArgs:\norcid (`str`): The ORCID to be searched\n\nReturns:\n`dict`: Dictionary with the JSON response from the API\n\nRaises:\n`~requests.HTTPError`: If the given ORCID cannot be found, an `~requests.HTTPError`\nis raised with status code 404", "source": "codesearchnet"}
{"code": "def write_grib2(self, path):\n        \n        if self.percentile is None:\n            var_type = \"mean\"\n        else:\n            var_type = \"p{0:02d}\".format(self.percentile)\n        lscale = 1e6\n        grib_id_start = [7, 0, 14, 14, 2]\n        gdsinfo = np.array([0, np.product(self.data.shape[-2:]), 0, 0, 30], dtype=np.int32)\n        lon_0 = self.proj_dict[\"lon_0\"]\n        sw_lon = self.grid_dict[\"sw_lon\"]\n        if lon_0 < 0:\n            lon_0 += 360\n        if sw_lon < 0:\n            sw_lon += 360\n        gdtmp1 = np.array([7, 1, self.proj_dict['a'], 1, self.proj_dict['a'], 1, self.proj_dict['b'],\n                           self.data.shape[-2], self.data.shape[-1], self.grid_dict[\"sw_lat\"] * lscale,\n                           sw_lon * lscale, 0, self.proj_dict[\"lat_0\"] * lscale,\n                           lon_0 * lscale,\n                           self.grid_dict[\"dx\"] * 1e3, self.grid_dict[\"dy\"] * 1e3, 0,\n                           self.proj_dict[\"lat_1\"] * lscale,\n                           self.proj_dict[\"lat_2\"] * lscale, 0, 0], dtype=np.int32)\n        pdtmp1 = np.array([1, 31, 2, 0, 116, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 192, 0, self.data.shape[0]], dtype=np.int32)\n        for m, member in enumerate(self.members):\n            pdtmp1[-2] = m\n            for t, time in enumerate(self.times):\n                time_list = list(time.utctimetuple()[0:6])\n                grbe = Grib2Encode(0, np.array(grib_id_start + time_list + [2, 1], dtype=np.int32))\n                grbe.addgrid(gdsinfo, gdtmp1)\n                pdtmp1[8] = (time.to_pydatetime() - self.run_date).total_seconds() / 3600.0\n                drtmp1 = np.array([0, 0, 4, 8, 0], dtype=np.int32)\n                data = self.data[m, t].astype(np.float32) / 1000.0\n                masked_data = np.ma.array(data, mask=data <= 0)\n                grbe.addfield(1, pdtmp1, 0, drtmp1, masked_data)\n                grbe.end()\n                filename = path + \"{0}_{1}_mlhail_{2}_{3}.grib2\".format(self.ensemble_name.replace(\" \", \"-\"), member,\n                                                                        var_type,\n                                                                        time.to_datetime().strftime(\"%Y%m%d%H%M\"))\n                print(\"Writing to \" + filename)\n                grib_file = open(filename, \"wb\")\n                grib_file.write(grbe.msg)\n                grib_file.close()\n        return", "docstring": "Writes data to grib2 file. Currently, grib codes are set by hand to hail.\n\nArgs:\npath: Path to directory containing grib2 files.\n\nReturns:", "source": "juraj-google-style"}
{"code": "def pack(self, value=None):\n    if (value is None):\n        self.update_header_length()\n        return super().pack()\n    elif isinstance(value, type(self)):\n        return value.pack()\n    else:\n        msg = '{} is not an instance of {}'.format(value, type(self).__name__)\n        raise PackException(msg)", "docstring": "Pack the message into a binary data.\n\nOne of the basic operations on a Message is the pack operation. During\nthe packing process, we convert all message attributes to binary\nformat.\n\nSince that this is usually used before sending the message to a switch,\nhere we also call :meth:`update_header_length`.\n\n.. seealso:: This method call its parent's :meth:`GenericStruct.pack`\nafter :meth:`update_header_length`.\n\nReturns:\nbytes: A binary data thats represents the Message.\n\nRaises:\nException: If there are validation errors.", "source": "codesearchnet"}
{"code": "def all_reduce_sum_gradients(grads_and_vars):\n    grads_and_vars = list(grads_and_vars)\n    filtered_grads_and_vars = filter_empty_gradients(grads_and_vars)\n    if filtered_grads_and_vars:\n        if strategy_supports_no_merge_call():\n            grads = [pair[0] for pair in filtered_grads_and_vars]\n            reduced = distribute_lib.get_strategy().extended._replica_ctx_all_reduce(ds_reduce_util.ReduceOp.SUM, grads)\n        else:\n            reduced = distribute_lib.get_replica_context().merge_call(_all_reduce_sum_fn, args=(filtered_grads_and_vars,))\n    else:\n        reduced = []\n    reduced_with_nones = []\n    reduced_pos = 0\n    for g, v in grads_and_vars:\n        if g is None:\n            reduced_with_nones.append((None, v))\n        else:\n            reduced_with_nones.append((reduced[reduced_pos], v))\n            reduced_pos += 1\n    assert reduced_pos == len(reduced), 'Failed to add all gradients'\n    return reduced_with_nones", "docstring": "Returns all-reduced gradients aggregated via summation.\n\nArgs:\ngrads_and_vars: List of (gradient, variable) pairs.\n\nReturns:\nList of (gradient, variable) pairs where gradients have been all-reduced.", "source": "github-repos"}
{"code": "def dataset_as_numpy(dataset):\n    if not context.executing_eagerly():\n        raise ValueError('dataset_as_numpy must be run in eager mode outside tf.function')\n    nested_ds = dataset\n    del dataset\n    flat_ds = nest.flatten(nested_ds)\n    flat_np = []\n    for ds_el in flat_ds:\n        if not isinstance(ds_el, (tensor_lib.Tensor, dataset_ops.DatasetV2)):\n            types = nest.map_structure(type, nested_ds)\n            raise ValueError('Arguments to dataset_as_numpy must be (possibly nested structure of) tf.Tensors or tf.data.Datasets. Got: %s' % types)\n    for ds_el in flat_ds:\n        if isinstance(ds_el, tensor_lib.Tensor):\n            np_el = tf_np.asarray(ds_el)\n        elif isinstance(ds_el, dataset_ops.DatasetV2):\n            np_el = _eager_dataset_iterator(ds_el)\n        else:\n            assert False\n        flat_np.append(np_el)\n    return nest.pack_sequence_as(nested_ds, flat_np)", "docstring": "Converts a `tf.data.Dataset` to an iterable of ndarrays.\n\n`dataset_as_numpy` converts a possibly nested structure of `tf.data.Dataset`s\nand `tf.Tensor`s to iterables of ndarrays and ndarrays, respectively. This\nfunction must be run in eager mode outside tf.function.\n\nArgs:\ndataset: a possibly nested structure of `tf.data.Dataset`s and/or\n`tf.Tensor`s.\n\nReturns:\nA structure matching `dataset` where `tf.data.Dataset`s are converted to\ngenerators of ndarrays and `tf.Tensor`s are converted to ndarrays.", "source": "github-repos"}
{"code": "def track_change(self, tile, property_name, value, formatter=None):\n    if (not self.tracking):\n        return\n    if ((len(self._whitelist) > 0) and ((tile, property_name) not in self._whitelist)):\n        return\n    if (formatter is None):\n        formatter = str\n    change = StateChange(monotonic(), tile, property_name, value, formatter(value))\n    with self._lock:\n        self.changes.append(change)", "docstring": "Record that a change happened on a given tile's property.\n\nThis will as a StateChange object to our list of changes if we\nare recording changes, otherwise, it will drop the change.\n\nArgs:\ntile (int): The address of the tile that the change happened on.\nproperty_name (str): The name of the property that changed.\nvalue (object): The new value assigned to the property.\nformatter (callable): Optional function to convert value to a\nstring.  This function will only be called if track_changes()\nis enabled and `name` is on the whitelist for properties that\nshould be tracked.  If `formatter` is not passed or is None,\nit will default to `str`.", "source": "codesearchnet"}
{"code": "def _init_boto3_clients(self, profile, region):\n        \n        try:\n            session = None\n            if profile and region:\n                session = boto3.session.Session(profile_name=profile, region_name=region)\n            elif profile:\n                session = boto3.session.Session(profile_name=profile)\n            elif region:\n                session = boto3.session.Session(region_name=region)\n            else:\n                session = boto3.session.Session()\n\n            self._cloud_formation = session.client('cloudformation')\n            return True\n        except Exception as wtf:\n            logging.error(wtf, exc_info=True)\n            return False", "docstring": "The utililty requires boto3 clients to CloudFormation.\n\nArgs:\nNone\n\nReturns:\nGood or Bad; True or False", "source": "juraj-google-style"}
{"code": "def today(self, strict=False):\n        \n        return self.on(arrow.now(), strict=strict)", "docstring": "Iterates (in chronological order) over all events that occurs today\n\nArgs:\nstrict (bool): if True events will be returned only if they are\\\nstrictly *included* in `day`.", "source": "juraj-google-style"}
{"code": "def ExamineEvent(self, mediator, event):\n    \n    \n    if event.data_type != 'fs:stat':\n      return\n\n    filename = getattr(event, 'filename', None)\n    if not filename:\n      return\n\n    \n    if 'chrome' not in filename.lower():\n      return\n\n    if not self._sep:\n      self._sep = self._GetPathSegmentSeparator(filename)\n\n    if '{0:s}Extensions{0:s}'.format(self._sep) not in filename:\n      return\n\n    \n    \n    paths = filename.split(self._sep)\n    if paths[-2] != 'Extensions':\n      return\n\n    extension_identifier = paths[-1]\n    if extension_identifier == 'Temp':\n      return\n\n    \n    user = mediator.GetUsernameForPath(filename)\n\n    \n    \n    if not user:\n      if len(filename) > 25:\n        user = 'Not found ({0:s}...)'.format(filename[0:25])\n      else:\n        user = 'Not found ({0:s})'.format(filename)\n\n    extension_string = self._GetTitleFromChromeWebStore(extension_identifier)\n    if not extension_string:\n      extension_string = extension_identifier\n\n    self._results.setdefault(user, [])\n    if (extension_string, extension_identifier) not in self._results[user]:\n      self._results[user].append((extension_string, extension_identifier))", "docstring": "Analyzes an event.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between analysis\nplugins and other components, such as storage and dfvfs.\nevent (EventObject): event to examine.", "source": "juraj-google-style"}
{"code": "def process_ems(self, doc: Document) -> List[Document]:\n        \n        new_docs = list()\n\n        for a_em in self.em_lst:\n            if a_em.document_selector(doc):\n                self.log(\" processing with \" + str(type(a_em)) + \". Process\", \"info\", doc.doc_id, doc.url)\n                fresh_docs = a_em.process_document(doc)\n                \n                if fresh_docs:\n                    new_docs.extend(fresh_docs)\n            \n            \n            \n            \n            \n            \n            \n            \n            \n            \n            \n            \n            \n            \n            \n            \n\n        \n        doc.insert_kg_into_cdr()\n        if not self.generate_json_ld:\n            if \"knowledge_graph\" in doc.cdr_document:\n                doc.cdr_document[\"knowledge_graph\"].pop(\"@context\", None)\n        Utility.make_json_serializable(doc.cdr_document)\n\n        if self.output_kg_only:\n            doc = doc.kg.value\n        elif not doc.doc_id:\n            doc.doc_id = Utility.create_doc_id_from_json(doc.cdr_document)\n\n        results = [doc]\n        for new_doc in new_docs:\n            results.extend(self.process_ems(new_doc))\n\n        return results", "docstring": "Factory method to wrap input JSON docs in an ETK Document object.\n\nArgs:\ndoc (Document): process on this document\n\nReturns: a Document object and a KnowledgeGraph object", "source": "juraj-google-style"}
{"code": "def pager(__text: str, *, pager: Optional[str] = 'less'):\n    \n    if pager:\n        run([pager, ], input=__text.encode())\n    else:\n        print(__text)", "docstring": "Pass output through pager.\n\nSee :manpage:`less(1)`, if you wish to configure the default pager.  For\nexample, you may wish to check ``FRSX`` options.\n\nArgs:\n__text: Text to page\npager: Pager to use", "source": "juraj-google-style"}
{"code": "def dr( self, atom1, atom2 ):\n        \n        return self.cell.dr( atom1.r, atom2.r )", "docstring": "Calculate the distance between two atoms.\n\nArgs:\natom1 (vasppy.Atom): Atom 1.\natom2 (vasppy.Atom): Atom 2.\n\nReturns:\n(float): The distance between Atom 1 and Atom 2.", "source": "juraj-google-style"}
{"code": "def from_hyperplane(basis, origin, point, internal=True):\n    basis = np.array(basis)\n    assert ((basis.shape[0] + 1) == basis.shape[1])\n    big_basis = np.zeros((basis.shape[1], basis.shape[1]))\n    big_basis[(:basis.shape[0], :basis.shape[1])] = basis\n    (u, s, vh) = np.linalg.svd(big_basis)\n    null_mask = (s <= 1e-08)\n    normal = np.compress(null_mask, vh, axis=0)[0]\n    if (np.inner((np.array(point) - np.array(origin)), normal) > 0):\n        if internal:\n            normal *= (- 1)\n    elif (not internal):\n        normal *= (- 1)\n    offset = (- np.dot(origin, normal))\n    return Halfspace(normal, offset)", "docstring": "Returns a Halfspace defined by a list of vectors parallel to the\nbounding hyperplane.\n\nArgs:\nbasis: basis for the hyperplane (array with vector rows)\norigin: point on the hyperplane\npoint: point not on the hyperplane\ninternal: whether point is inside the halfspace", "source": "codesearchnet"}
{"code": "def read(self, path):\n    with open(path, 'r') as f:\n        for line in f:\n            line = line.strip()\n            match_obj_name = re.search('^([A-Z][A-Z/ \\\\d]+),', line)\n            if (match_obj_name is not None):\n                internal_name = match_obj_name.group(1)\n                if (internal_name in self._data):\n                    self._data[internal_name] = self._create_datadict(internal_name)\n                    data_line = line[(len(internal_name) + 1):]\n                    vals = data_line.strip().split(',')\n                    self._data[internal_name].read(vals)\n            else:\n                wd = WeatherData()\n                wd.read(line.strip().split(','))\n                self.add_weatherdata(wd)", "docstring": "Read EPW weather data from path.\n\nArgs:\npath (str): path to read weather data from", "source": "codesearchnet"}
{"code": "def __init__(self, **kwds):\n        \n        self.code_objs = dict()\n        self._codes = []\n        self._functions = []\n        self._executables = []\n        self.dry_run = None\n        self.encoding = 'utf-8'\n        self.newline = None\n        if 'module' in kwds:\n            self.import_module(kwds['module'])\n        if 'code' in kwds:\n            self.append_code_expr(kwds['code'])\n        if 'function' in kwds:\n            self.append_function(kwds['function'])\n        if 'executable' in kwds:\n            self.append_executable(kwds['executable'])\n        if 'dry_run' in kwds:\n            self.dry_run = kwds['dry_run']\n        if 'encoding' in kwds:\n            self.encoding = kwds['encoding']\n        if 'newline' in kwds:\n            self.newline = kwds['newline']", "docstring": "Initialize MassEdit object.\n\nArgs:\n- code (byte code object): code to execute on input file.\n- function (str or callable): function to call on input file.\n- module (str): module name where to find the function.\n- executable (str): executable file name to execute on input file.\n- dry_run (bool): skip actual modification of input file if True.", "source": "juraj-google-style"}
{"code": "def set_white(self, brightness, colourtemp):\n        \n        if not 25 <= brightness <= 255:\n            raise ValueError(\"The brightness needs to be between 25 and 255.\")\n        if not 0 <= colourtemp <= 255:\n            raise ValueError(\"The colour temperature needs to be between 0 and 255.\")\n\n        payload = self.generate_payload(SET, {\n            self.DPS_INDEX_MODE: self.DPS_MODE_WHITE,\n            self.DPS_INDEX_BRIGHTNESS: brightness,\n            self.DPS_INDEX_COLOURTEMP: colourtemp})\n\n        data = self._send_receive(payload)\n        return data", "docstring": "Set white coloured theme of an rgb bulb.\n\nArgs:\nbrightness(int): Value for the brightness (25-255).\ncolourtemp(int): Value for the colour temperature (0-255).", "source": "juraj-google-style"}
{"code": "class MaxScore(ScoreAggregation):\n\n    def __init__(self, **kwargs):\n        super().__init__(agg_func=max, **kwargs)", "docstring": "Aggregates anomaly scores by selecting the maximum score.\n\nThis `AggregationFn` selects the highest anomaly score from a collection\nof `AnomalyPrediction` objects as the aggregated score.\n\nArgs:\n**kwargs: Additional keyword arguments to pass to the base\n`ScoreAggregation` class.", "source": "github-repos"}
{"code": "def getShareInfo(item):\n    \n    key = f'_syn_sharinfo_{item.__class__.__module__}_{item.__class__.__qualname__}'\n    info = getattr(item, key, None)\n    if info is not None:\n        return info\n\n    meths = {}\n    info = {'meths': meths}\n\n    for name in dir(item):\n\n        if name.startswith('_'):\n            continue\n\n        attr = getattr(item, name, None)\n        if not callable(attr):\n            continue\n\n        \n        \n        wrapped = getattr(attr, '__syn_wrapped__', None)\n        if wrapped in unwraps:\n            real = inspect.unwrap(attr)\n            if inspect.isasyncgenfunction(real):\n                meths[name] = {'genr': True}\n                continue\n\n        if inspect.isasyncgenfunction(attr):\n            meths[name] = {'genr': True}\n\n    try:\n        setattr(item, key, info)\n    except Exception as e:  \n        logger.exception(f'Failed to set magic on {item}')\n\n    try:\n        setattr(item.__class__, key, info)\n    except Exception as e:  \n        logger.exception(f'Failed to set magic on {item.__class__}')\n\n    return info", "docstring": "Get a dictionary of special annotations for a Telepath Proxy.\n\nArgs:\nitem:  Item to inspect.\n\nNotes:\nThis will set the ``_syn_telemeth`` attribute on the item\nand the items class, so this data is only computed once.\n\nReturns:\ndict: A dictionary of methods requiring special handling by the proxy.", "source": "juraj-google-style"}
{"code": "def _parse_type_to_int(dtype, flag):\n    if dtype not in mmi_constants.TFLITE_TYPES:\n        raise ValueError(\"Unsupported value '{0}' for {1}. Only {2} are supported.\".format(dtype, flag, mmi_constants.TFLITE_TYPES))\n    dtype_str = mmi_constants.TFLITE_TO_STR_TYPES[dtype]\n    dtype_int = schema_fb.TensorType.__dict__[dtype_str]\n    return dtype_int", "docstring": "Converts a tflite type to it's integer representation.\n\nArgs:\ndtype: tf.DType representing the inference type.\nflag: str representing the flag name.\n\nReturns:\ninteger, a tflite TensorType enum value.\n\nRaises:\nValueError: Unsupported tflite type.", "source": "github-repos"}
{"code": "def _update_explicit_bucket_count(a_float, dist):\n    \n    buckets = dist.explicitBuckets\n    if buckets is None:\n        raise ValueError(_BAD_UNSET_BUCKETS % (u'explicit buckets'))\n    bucket_counts = dist.bucketCounts\n    bounds = buckets.bounds\n    if len(bucket_counts) < len(bounds) + 1:\n        raise ValueError(_BAD_LOW_BUCKET_COUNT)\n    bucket_counts[bisect.bisect(bounds, a_float)] += 1", "docstring": "Adds `a_float` to `dist`, updating its explicit buckets.\n\nArgs:\na_float (float): a new value\ndist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):\nthe Distribution being updated\n\nRaises:\nValueError: if `dist` does not already have explict buckets defined\nValueError: if there are not enough bucket count fields in `dist`", "source": "juraj-google-style"}
{"code": "def infer_schema(stats_path, schema_path):\n    print('Infering schema from statistics.')\n    schema = tfdv.infer_schema(tfdv.load_statistics(stats_path), infer_feature_shape=False)\n    print(text_format.MessageToString(schema))\n    print('Writing schema to output path.')\n    file_io.write_string_to_file(schema_path, text_format.MessageToString(schema))", "docstring": "Infers a schema from stats in stats_path.\n\nArgs:\nstats_path: Location of the stats used to infer the schema.\nschema_path: Location where the inferred schema is materialized.", "source": "github-repos"}
{"code": "def _execute(self, request):\n        \n        if self._rate_limiter:\n            \n            \n            \n            with self._rate_limiter:\n                return request.execute(http=self.http,\n                                       num_retries=self._num_retries)\n        return request.execute(http=self.http,\n                               num_retries=self._num_retries)", "docstring": "Run execute with retries and rate limiting.\n\nArgs:\nrequest (object): The HttpRequest object to execute.\n\nReturns:\ndict: The response from the API.", "source": "juraj-google-style"}
{"code": "def max(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent':\n    return cls._binary_op(x, y, tf.maximum, tf.float32)", "docstring": "Returns a TensorFluent for the maximum function.TensorFluent\n\nArgs:\nx: The first operand.\ny: The second operand.\n\nReturns:\nA TensorFluent wrapping the maximum function.", "source": "codesearchnet"}
{"code": "def read_int16(self, little_endian=True):\n    if little_endian:\n        endian = '<'\n    else:\n        endian = '>'\n    return self.unpack(('%sh' % endian), 2)", "docstring": "Read 2 byte as a signed integer value from the stream.\n\nArgs:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint:", "source": "codesearchnet"}
{"code": "def get(account):\n    account = Account.get(account)\n    if (not account):\n        return None\n    acct_type = AccountType.get(account.account_type_id).account_type\n    account_class = get_plugin_by_name(PLUGIN_NAMESPACES['accounts'], acct_type)\n    return account_class(account)", "docstring": "Returns the class object identified by `account_id`\n\nArgs:\naccount (`int`, `str`): Unique ID of the account to load from database\n\nReturns:\n`Account` object if found, else None", "source": "codesearchnet"}
{"code": "def requires_genesis(self):\n    genesis_file = os.path.join(self._data_dir, 'genesis.batch')\n    has_genesis_batches = Path(genesis_file).is_file()\n    LOGGER.debug('genesis_batch_file: %s', (genesis_file if has_genesis_batches else 'not found'))\n    chain_head = self._block_store.chain_head\n    has_chain_head = (chain_head is not None)\n    if has_chain_head:\n        LOGGER.debug('chain_head: %s', chain_head)\n    block_chain_id = self._chain_id_manager.get_block_chain_id()\n    is_genesis_node = (block_chain_id is None)\n    LOGGER.debug('block_chain_id: %s', (block_chain_id if (not is_genesis_node) else 'not yet specified'))\n    if (has_genesis_batches and has_chain_head):\n        raise InvalidGenesisStateError('Cannot have a genesis_batch_file and an existing chain')\n    if (has_genesis_batches and (not is_genesis_node)):\n        raise InvalidGenesisStateError('Cannot have a genesis_batch_file and join an existing network')\n    if ((not has_genesis_batches) and (not has_chain_head)):\n        LOGGER.info('No chain head and not the genesis node: starting in peering mode')\n    return (has_genesis_batches and (not has_chain_head) and is_genesis_node)", "docstring": "Determines if the system should be put in genesis mode\n\nReturns:\nbool: return whether or not a genesis block is required to be\ngenerated.\n\nRaises:\nInvalidGenesisStateError: raises this error if there is invalid\ncombination of the following: genesis.batch, existing chain\nhead, and block chain id.", "source": "codesearchnet"}
{"code": "def configure_ospf(self, cmd):\n    config = self.get()\n    cmds = ['router ospf {}'.format(config['ospf_process_id'])]\n    cmds.extend(make_iterable(cmd))\n    return super(Ospf, self).configure(cmds)", "docstring": "Allows for a list of OSPF subcommands to be configured\"\n\nArgs:\ncmd: (list or str): Subcommand to be entered\nReturns:\nbool: True if all the commands completed successfully", "source": "codesearchnet"}
{"code": "def recipe_dcm_log(config, auth_read, auth_write, accounts, days, recipe_slug):\n    dataset(config, {'description': 'The dataset will hold log table, Create it exists.', 'hour': [1], 'auth': auth_write, 'dataset': recipe_slug})\n    dcm_log(config, {'description': 'Will create tables with format CM_* to hold each endpoint via a call to the API list function. Exclude reports for its own task.', 'hour': [2], 'auth': auth_read, 'accounts': {'single_cell': True, 'values': accounts}, 'days': days, 'out': {'auth': auth_write, 'dataset': recipe_slug}})", "docstring": "Downloads Campaign manager logs and allows audits.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nauth_write (authentication) - Credentials used for writing data.\naccounts (integer_list) - Comma separated CM account ids.\ndays (integer) - Number of days to backfill the log, works on first run only.\nrecipe_slug (string) - Google BigQuery dataset to create tables in.", "source": "github-repos"}
{"code": "def _get_db_fields(self, obj):\n    for field in obj.indexes:\n        (yield (field, self._zeo_key(field)))", "docstring": "Return list of database dictionaries, which are used as indexes for\neach attributes.\n\nArgs:\ncached (bool, default True): Use cached connection to database.\n\nReturns:\nlist: List of OOBTree's for each item in :attr:`.COMMON_FIELDS`.", "source": "codesearchnet"}
{"code": "def include_revision(revision_num, skip_factor=1.1):\n    if (skip_factor <= 1.0):\n        return True\n    return (int((math.log1p(revision_num) / math.log(skip_factor))) != int((math.log((revision_num + 2.0)) / math.log(skip_factor))))", "docstring": "Decide whether to include a revision.\n\nIf the number of revisions is large, we exclude some revisions to avoid\na quadratic blowup in runtime, since the article is likely also large.\n\nWe make the ratio between consecutive included revision numbers\nappproximately equal to \"factor\".\n\nArgs:\nrevision_num: an integer\nskip_factor: a floating point number >= 1.0\n\nReturns:\na boolean", "source": "codesearchnet"}
{"code": "def releases(self, **kwargs):\n        \n        path = self._get_id_path('releases')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get the release date and certification information by country for a\nspecific movie id.\n\nArgs:\nappend_to_response: (optional) Comma separated, any movie method.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def _from_safe_path_param_name(safe_parameter):\n    assert safe_parameter.startswith('_')\n    safe_parameter_as_base32 = safe_parameter[1:]\n    padding_length = ((- len(safe_parameter_as_base32)) % 8)\n    padding = ('=' * padding_length)\n    return base64.b32decode((safe_parameter_as_base32 + padding))", "docstring": "Takes a safe regex group name and converts it back to the original value.\n\nOnly alphanumeric characters and underscore are allowed in variable name\ntokens, and numeric are not allowed as the first character.\n\nThe safe_parameter is a base32 representation of the actual value.\n\nArgs:\nsafe_parameter: A string that was generated by _to_safe_path_param_name.\n\nReturns:\nA string, the parameter matched from the URL template.", "source": "codesearchnet"}
{"code": "def initialized_value(self):\n    raise NotImplementedError", "docstring": "Returns the value of the initialized variable.\n\nYou should use this instead of the variable itself to initialize another\nvariable with a value that depends on the value of this variable.\n\n```python\n# Initialize 'v' with a random tensor.\nv = tf.Variable(tf.random.truncated_normal([10, 40]))\n# Use `initialized_value` to guarantee that `v` has been\n# initialized before its value is used to initialize `w`.\n# The random values are picked only once.\nw = tf.Variable(v.initialized_value() * 2.0)\n```\n\nReturns:\nA `Tensor` holding the value of this variable after its initializer\nhas run.", "source": "github-repos"}
{"code": "def trace_buffer_capacity(self):\n        \n        cmd = enums.JLinkTraceCommand.GET_CONF_CAPACITY\n        data = ctypes.c_uint32(0)\n        res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n        if (res == 1):\n            raise errors.JLinkException('Failed to get trace buffer size.')\n        return data.value", "docstring": "Retrieves the trace buffer's current capacity.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\nThe current capacity of the trace buffer.  This is not necessarily\nthe maximum possible size the buffer could be configured with.", "source": "juraj-google-style"}
{"code": "def resize(self, images: 'torch.Tensor', size: SizeDict, keep_aspect_ratio: bool=False, ensure_multiple_of: int=1, interpolation: Optional['F.InterpolationMode']=None) -> 'torch.Tensor':\n    if not size.height or not size.width:\n        raise ValueError(f\"The size dictionary must contain the keys 'height' and 'width'. Got {size}\")\n    output_size = get_resize_output_image_size(images, output_size=(size.height, size.width), keep_aspect_ratio=keep_aspect_ratio, multiple=ensure_multiple_of, input_data_format=ChannelDimension.FIRST)\n    height, width = output_size\n    resized_images = torch.nn.functional.interpolate(images, (int(height), int(width)), mode=interpolation.value, align_corners=True)\n    return resized_images", "docstring": "Resize an image or batchd images to target size `(size[\"height\"], size[\"width\"])`. If `keep_aspect_ratio` is `True`, the image\nis resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is\nset, the image is resized to a size that is a multiple of this value.\n\nArgs:\nimages (`torch.Tensor`):\nImages to resize.\nsize (`Dict[str, int]`):\nTarget size of the output image.\nkeep_aspect_ratio (`bool`, *optional*, defaults to `False`):\nIf `True`, the image is resized to the largest possible size such that the aspect ratio is preserved.\nensure_multiple_of (`int`, *optional*, defaults to 1):\nThe image is resized to a size that is a multiple of this value.\ninterpolation (`F.InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):\nDefines the resampling filter to use if resizing the image. Otherwise, the image is resized to size\nspecified in `size`.", "source": "github-repos"}
{"code": "def expint(x, name=None):\n    with ops.name_scope(name, 'expint', [x]):\n        return gen_special_math_ops.expint(x)", "docstring": "Computes the Exponential integral of `x` element-wise.\n\nThe Exponential integral is defined as the integral of `exp(t) / t` from\n`-inf` to `x`, with the domain of definition all positive real numbers.\n\n>>> tf.math.special.expint([1., 1.1, 2.1, 4.1]).numpy()\narray([ 1.8951179,  2.1673784,  5.3332353, 21.048464], dtype=float32)\n\nThis implementation is based off of the Cephes math library.\n\nArgs:\nx: A `Tensor` or `SparseTensor`. Must be one of the following types:\n`float32`, `float64`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.expi\n@end_compatibility", "source": "github-repos"}
{"code": "def approve(self, peer_jid):\n    self.roster.approve(aioxmpp.JID.fromstr(peer_jid).bare())", "docstring": "Approve a subscription request from jid\n\nArgs:\npeer_jid (str): the JID to approve", "source": "codesearchnet"}
{"code": "def _parse(json_str: str, primitive_cls: Type[Instant]) -> Instant:\n    datetime_str, timezone_str = _primitive_time_utils.split_timezone(json_str)\n    try:\n        dt = datetime.datetime.strptime(datetime_str, '%Y-%m-%dT%H:%M:%S')\n        return _primitive_time_utils.build_date_like(dt, timezone_str, _primitive_time_utils.TimePrecision.SECOND, primitive_cls)\n    except ValueError:\n        pass\n    try:\n        dt = datetime.datetime.strptime(datetime_str, '%Y-%m-%dT%H:%M:%S.%f')\n        if _primitive_time_utils.PRECISION_PATTERN_MILLISECOND.search(datetime_str) is not None:\n            return _primitive_time_utils.build_date_like(dt, timezone_str, _primitive_time_utils.TimePrecision.MILLISECOND, primitive_cls)\n        elif _primitive_time_utils.PRECISION_PATTERN_MICROSECOND.search(datetime_str) is not None:\n            return _primitive_time_utils.build_date_like(dt, timezone_str, _primitive_time_utils.TimePrecision.MICROSECOND, primitive_cls)\n    except ValueError:\n        pass\n    raise fhir_errors.InvalidFhirError('Invalid Instant.')", "docstring": "Parses the json_str into an Instant FHIR primitive.\n\nArgs:\njson_str: The raw JSON string to parse.\nprimitive_cls: The FHIR primitive to parse into.\n\nReturns:\nA FHIR primitive Instant.\n\nRaises:\nfhir_errors.InvalidFhirError: In the event that no FHIR primitive Instant\nformat was able to properly parse the json_str.", "source": "github-repos"}
{"code": "def ctc_loss(target, output, target_length, output_length, mask_index=0):\n    if any_symbolic_tensors((target, output, target_length, output_length)):\n        return CTCLoss(mask_index).symbolic_call(target, output, target_length, output_length)\n    return backend.nn.ctc_loss(target, output, target_length, output_length, mask_index)", "docstring": "CTC (Connectionist Temporal Classification) loss.\n\nArgs:\ntarget: A tensor of shape `(batch_size, max_length)` containing\nthe true labels in integer format.\noutput: A tensor of shape `(batch_size, max_length, num_classes)`\ncontaining logits (the output of your model).\ntarget_length: A tensor of shape `(batch_size,)` containing the\ntrue label lengths.\noutput_length: A tensor of shape `(batch_size,)` containing the\noutput lengths.\nmask_index: The index of the mask character in the vocabulary.\nDefaults to `0`.", "source": "github-repos"}
{"code": "def upsert_variant(self, variant_obj):\n    LOG.debug('Upserting variant %s', variant_obj['_id'])\n    try:\n        result = self.variant_collection.insert_one(variant_obj)\n    except DuplicateKeyError as err:\n        LOG.debug('Variant %s already exists in database', variant_obj['_id'])\n        result = self.variant_collection.find_one_and_update({'_id': variant_obj['_id']}, {'$set': {'compounds': variant_obj.get('compounds', [])}})\n        variant = self.variant_collection.find_one({'_id': variant_obj['_id']})\n    return result", "docstring": "Load a variant object, if the object already exists update compounds.\n\nArgs:\nvariant_obj(dict)\n\nReturns:\nresult", "source": "codesearchnet"}
{"code": "def _save_states(self, state, serialized_readers_entity):\n    \n    mr_id = state.key().id_or_name()\n    fresh_state = model.MapreduceState.get_by_job_id(mr_id)\n    if not self._check_mr_state(fresh_state, mr_id):\n      return False\n    if fresh_state.active_shards != 0:\n      logging.warning(\n          \"Mapreduce %s already has active shards. Looks like spurious task \"\n          \"execution.\", mr_id)\n      return None\n    config = util.create_datastore_write_config(state.mapreduce_spec)\n    db.put([state, serialized_readers_entity], config=config)\n    return True", "docstring": "Run transaction to save state.\n\nArgs:\nstate: a model.MapreduceState entity.\nserialized_readers_entity: a model._HugeTaskPayload entity containing\njson serialized input readers.\n\nReturns:\nFalse if a fatal error is encountered and this task should be dropped\nimmediately. True if transaction is successful. None if a previous\nattempt of this same transaction has already succeeded.", "source": "juraj-google-style"}
{"code": "def db_dp004(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `db_dp004`'.format(value))\n\n        self._db_dp004 = value", "docstring": "Corresponds to IDD Field `db_dp004`\nmean coincident dry-bulb temperature to\nDew-point temperature corresponding to 0.4% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `db_dp004`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def __init__(self, value, masks=None, name='X.509 Certificate'):\n        \n        super(X509Certificate, self).__init__(\n            enums.CertificateType.X_509, value, masks, name)\n\n        \n        \n\n        \n        \n        self._x509_certificate_identifier = None\n        self._x509_certificate_subject = None\n        self._x509_certificate_issuer = None\n\n        self.validate()", "docstring": "Create an X509Certificate.\n\nArgs:\nvalue(bytes): The bytes representing the certificate.\nmasks(list): A list of CryptographicUsageMask enumerations\ndefining how the certificate will be used.\nname(string): The string name of the certificate.", "source": "juraj-google-style"}
{"code": "def __DepthFirstSearch(node, hashes):\n        \n        if node.LeftChild is None:\n            hashes.add(node.Hash)\n        else:\n            MerkleTree.__DepthFirstSearch(node.LeftChild, hashes)\n            MerkleTree.__DepthFirstSearch(node.RightChild, hashes)", "docstring": "Internal helper method.\n\nArgs:\nnode (MerkleTreeNode):\nhashes (list): each item is a bytearray.", "source": "juraj-google-style"}
{"code": "def prng(s):\n    return tf_np.asarray(s, dtype=_RNG_KEY_DTYPE)", "docstring": "Creates RNG state from seed.\n\nArgs:\ns: the seed, an integer.\n\nReturns:\nAn RNG state, as a scalar array of dtype `np.int64`.", "source": "github-repos"}
{"code": "def WriteStatEntries(stat_entries, client_id, mutation_pool, token=None):\n  \n\n  for stat_response in stat_entries:\n    if stat_response.pathspec.last.stream_name:\n      \n      \n      \n      \n      stat_response.st_mode &= ~stat_type_mask\n      stat_response.st_mode |= stat.S_IFREG\n\n  if data_store.AFF4Enabled():\n    for stat_entry in stat_entries:\n      CreateAFF4Object(\n          stat_entry,\n          client_id_urn=rdf_client.ClientURN(client_id),\n          mutation_pool=mutation_pool,\n          token=token)\n\n  if data_store.RelationalDBEnabled():\n    path_infos = [rdf_objects.PathInfo.FromStatEntry(s) for s in stat_entries]\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    data_store.REL_DB.WritePathInfos(client_id,\n                                     _FilterOutPathInfoDuplicates(path_infos))", "docstring": "Persists information about stat entries.\n\nArgs:\nstat_entries: A list of `StatEntry` instances.\nclient_id: An id of a client the stat entries come from.\nmutation_pool: A mutation pool used for writing into the AFF4 data store.\ntoken: A token used for writing into the AFF4 data store.", "source": "juraj-google-style"}
{"code": "def __init__(self, filename=None):\n    \n    self._alphabet = set()\n    self.filename = filename\n    if filename is not None:\n      self._load_from_file(filename)\n    super(SubwordTextEncoder, self).__init__()", "docstring": "Initialize and read from a file, if provided.\n\nArgs:\nfilename: filename from which to read vocab. If None, do not load a\nvocab", "source": "juraj-google-style"}
{"code": "def get_grappler_config(optimizers_list):\n    config = _config_pb2.ConfigProto()\n    rewrite_options = config.graph_options.rewrite_options\n    for optimizer in optimizers_list:\n        rewrite_options.optimizers.append(optimizer)\n    return config", "docstring": "Creates a tf.compat.v1.ConfigProto for configuring Grappler.\n\nArgs:\noptimizers_list: List of strings that represents the list of optimizers.\n\nReturns:\ntf.ConfigProto.", "source": "github-repos"}
{"code": "def authenticate(self, username, password, service='login', encoding='utf-8', resetcreds=True):\n\n    @conv_func\n    def my_conv(n_messages, messages, p_response, app_data):\n        'Simple conversation function that responds to any\\n               prompt where the echo is off with the supplied password'\n        addr = calloc(n_messages, sizeof(PamResponse))\n        response = cast(addr, POINTER(PamResponse))\n        p_response[0] = response\n        for i in range(n_messages):\n            if (messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF):\n                dst = calloc((len(password) + 1), sizeof(c_char))\n                memmove(dst, cpassword, len(password))\n                response[i].resp = dst\n                response[i].resp_retcode = 0\n        return 0\n    if (sys.version_info >= (3,)):\n        if isinstance(username, str):\n            username = username.encode(encoding)\n        if isinstance(password, str):\n            password = password.encode(encoding)\n        if isinstance(service, str):\n            service = service.encode(encoding)\n    else:\n        if isinstance(username, unicode):\n            username = username.encode(encoding)\n        if isinstance(password, unicode):\n            password = password.encode(encoding)\n        if isinstance(service, unicode):\n            service = service.encode(encoding)\n    if ((b'\\x00' in username) or (b'\\x00' in password) or (b'\\x00' in service)):\n        self.code = 4\n        self.reason = 'strings may not contain NUL'\n        return False\n    cpassword = c_char_p(password)\n    handle = PamHandle()\n    conv = PamConv(my_conv, 0)\n    retval = pam_start(service, username, byref(conv), byref(handle))\n    if (retval != 0):\n        self.code = retval\n        self.reason = 'pam_start() failed'\n        return False\n    retval = pam_authenticate(handle, 0)\n    auth_success = (retval == 0)\n    if (auth_success and resetcreds):\n        retval = pam_setcred(handle, PAM_REINITIALIZE_CRED)\n    self.code = retval\n    self.reason = pam_strerror(handle, retval)\n    if (sys.version_info >= (3,)):\n        self.reason = self.reason.decode(encoding)\n    if hasattr(libpam, 'pam_end'):\n        pam_end(handle, retval)\n    return auth_success", "docstring": "username and password authentication for the given service.\n\nReturns True for success, or False for failure.\n\nself.code (integer) and self.reason (string) are always stored and may\nbe referenced for the reason why authentication failed. 0/'Success' will\nbe stored for success.\n\nPython3 expects bytes() for ctypes inputs.  This function will make\nnecessary conversions using the supplied encoding.\n\nInputs:\nusername: username to authenticate\npassword: password in plain text\nservice:  PAM service to authenticate against, defaults to 'login'\n\nReturns:\nsuccess:  True\nfailure:  False", "source": "codesearchnet"}
{"code": "def run_query(self, view: views.View, limit: Optional[int]=None) -> bigquery.QueryJob:\n    return self._client.query(self.to_sql(view, limit=limit))", "docstring": "Runs query for the view and returns the corresponding BigQuery job.\n\nArgs:\nview: the view that defines the query to run.\nlimit: optional limit of the number of items to return.\n\nReturns:\nbigquery.QueryJob: the job for the running query.", "source": "github-repos"}
{"code": "def __init__(\n      self, file_object, member_start_offset, uncompressed_data_offset):\n    \n    self.comment = None\n    self.modification_time = None\n    self.operating_system = None\n    self.original_filename = None\n\n    \n    \n    self._cache_start_offset = None\n    \n    \n    self._cache_end_offset = None\n    self._cache = b''\n\n    \n    self.uncompressed_data_size = None\n    \n    \n    self.uncompressed_data_offset = uncompressed_data_offset\n\n    \n    self.member_start_offset = member_start_offset\n\n    \n    self._file_object = file_object\n    self._file_object.seek(self.member_start_offset, os.SEEK_SET)\n\n    self._ReadMemberHeader(file_object)\n    \n    self._compressed_data_start = file_object.get_offset()\n\n    self._decompressor_state = _GzipDecompressorState(\n        self._compressed_data_start)\n\n    self._LoadDataIntoCache(file_object, 0, read_all_data=True)\n\n    \n    self._ReadMemberFooter(file_object)\n\n    \n    self.member_end_offset = file_object.get_offset()", "docstring": "Initializes a gzip member.\n\nArgs:\nfile_object (FileIO): file-like object, containing the gzip member.\nmember_start_offset (int): offset to the beginning of the gzip member\nin the containing file.\nuncompressed_data_offset (int): current offset into the uncompressed data\nin the containing file.", "source": "juraj-google-style"}
{"code": "def union(self, other):\n        \n        union = Rect()\n        lib.SDL_UnionRect(self._ptr, other._ptr, union._ptr)\n        return union", "docstring": "Calculate the union of this rectangle and another rectangle.\n\nArgs:\nother (Rect): The other rectangle.\n\nReturns:\nRect: The union of this rectangle and the given other rectangle.", "source": "juraj-google-style"}
{"code": "def set_hyperparameters(self, hyperparameters):\n    for (block_name, block_hyperparams) in hyperparameters.items():\n        self.blocks[block_name].set_hyperparameters(block_hyperparams)", "docstring": "Set new hyperparameter values for some blocks.\n\nArgs:\nhyperparameters (dict): A dictionary containing the block names as\nkeys and the new hyperparameters dictionary\nas values.", "source": "codesearchnet"}
{"code": "def notify_batch_pending(self, batch):\n        \n        txn_ids = {t.header_signature for t in batch.transactions}\n        with self._lock:\n            self._pending.add(batch.header_signature)\n            self._batch_info[batch.header_signature] = txn_ids\n            self._update_observers(batch.header_signature,\n                                   ClientBatchStatus.PENDING)", "docstring": "Adds a Batch id to the pending cache, with its transaction ids.\n\nArgs:\nbatch (str): The id of the pending batch", "source": "juraj-google-style"}
{"code": "def __init__(self, min_bundle_size=0, desired_bundle_size=DEFAULT_DESIRED_BUNDLE_SIZE, use_fastavro=True, with_filename=False, label='ReadAllFiles'):\n    source_from_file = partial(_FastAvroSource, min_bundle_size=min_bundle_size)\n    self._read_all_files = filebasedsource.ReadAllFiles(True, CompressionTypes.AUTO, desired_bundle_size, min_bundle_size, source_from_file, with_filename)\n    self.label = label", "docstring": "Initializes ``ReadAllFromAvro``.\n\nArgs:\nmin_bundle_size: the minimum size in bytes, to be considered when\nsplitting the input into bundles.\ndesired_bundle_size: the desired size in bytes, to be considered when\nsplitting the input into bundles.\nuse_fastavro (bool): This flag is left for API backwards compatibility\nand no longer has an effect. Do not use.\nwith_filename: If True, returns a Key Value with the key being the file\nname and the value being the actual data. If False, it only returns\nthe data.", "source": "github-repos"}
{"code": "def _export_debug_info(exported_graph: ops.Graph, export_dir: str):\n    debug_builder = tf_stack.GraphDebugInfoBuilder()\n    for fn_name in exported_graph._functions:\n        fn = exported_graph._get_function(fn_name)\n        if not isinstance(fn, defun.AtomicFunction):\n            continue\n        debug_builder.AppendGraphDebugInfo(fn_name, fn.graph_debug_info)\n    graph_debug_info = debug_builder.Build()\n    file_io.atomic_write_string_to_file(file_io.join(path_helpers.get_or_create_debug_dir(export_dir), constants.DEBUG_INFO_FILENAME_PB), graph_debug_info.SerializeToString(deterministic=True))", "docstring": "Exports debug information from graph to file.\n\nCreates and writes GraphDebugInfo with traces for ops in all functions of the\nexported_graph.\n\nArgs:\nexported_graph: A Graph that has been created by tracing a saveable view.\nexport_dir: SavedModel directory in which to write the debug info.", "source": "github-repos"}
{"code": "def parse_command(command):\n    command = command.strip()\n    if not command:\n        return []\n    brackets_intervals = [f.span() for f in _BRACKETS_PATTERN.finditer(command)]\n    quotes_intervals = [f.span() for f in _QUOTES_PATTERN.finditer(command)]\n    whitespaces_intervals = [f.span() for f in _WHITESPACE_PATTERN.finditer(command)]\n    if not whitespaces_intervals:\n        return [command]\n    arguments = []\n    idx0 = 0\n    for start, end in whitespaces_intervals + [(len(command), None)]:\n        if not any((interval[0] < start < interval[1] for interval in brackets_intervals + quotes_intervals)):\n            argument = command[idx0:start]\n            if argument.startswith('\"') and argument.endswith('\"') or (argument.startswith(\"'\") and argument.endswith(\"'\")):\n                argument = argument[1:-1]\n            arguments.append(argument)\n            idx0 = end\n    return arguments", "docstring": "Parse command string into a list of arguments.\n\n- Disregards whitespace inside double quotes and brackets.\n- Strips paired leading and trailing double quotes in arguments.\n- Splits the command at whitespace.\n\nNested double quotes and brackets are not handled.\n\nArgs:\ncommand: (str) Input command.\n\nReturns:\n(list of str) List of arguments.", "source": "github-repos"}
{"code": "def _extract_nn_info(self, structure, nns):\n        \n\n        \n        if self.targets is None:\n            targets = structure.composition.elements\n        else:\n            targets = self.targets\n\n        \n        siw = []\n        max_weight = max(nn[self.weight] for nn in nns.values())\n        for nstats in nns.values():\n            site = nstats['site']\n            if nstats[self.weight] > self.tol * max_weight \\\n                    and self._is_in_targets(site, targets):\n                nn_info = {'site': site,\n                           'image': self._get_image(structure, site),\n                           'weight': nstats[self.weight] / max_weight,\n                           'site_index': self._get_original_site(\n                               structure, site)}\n\n                if self.extra_nn_info:\n                    \n                    poly_info = nstats\n                    del poly_info['site']\n                    nn_info['poly_info'] = poly_info\n                siw.append(nn_info)\n        return siw", "docstring": "Given Voronoi NNs, extract the NN info in the form needed by NearestNeighbors\n\nArgs:\nstructure (Structure): Structure being evaluated\nnns ([dicts]): Nearest neighbor information for a structure\nReturns:\n(list of tuples (Site, array, float)): See nn_info", "source": "juraj-google-style"}
{"code": "def __init__(self,\n                 nlp,\n                 tokenizer,\n                 extractor_name: str) -> None:\n        \n        Extractor.__init__(self,\n                           input_type=InputType.TEXT,\n                           category=\"build_in_extractor\",\n                           name=extractor_name)\n\n        self._nlp = copy.deepcopy(nlp)\n        self._like_email_matcher = Matcher(self._nlp.vocab)\n        self._tokenizer = tokenizer", "docstring": "Initialize the extractor, storing the rule information and construct spacy rules\nArgs:\nnlp:\ntokenizer: Tokenizer\nextractor_name: str\n\nReturns:", "source": "juraj-google-style"}
{"code": "def parse_arguments(argv):\n    parser = argparse.ArgumentParser(description='online-clustering')\n    parser.add_argument('-m', '--mode', help='Mode to run pipeline in.', choices=['local', 'cloud'], default='local')\n    parser.add_argument('-p', '--project', help='GCP project to run pipeline on.', default=cfg.PROJECT_ID)\n    args, _ = parser.parse_known_args(args=argv)\n    return args", "docstring": "It parses the arguments passed to the command line and returns them as an object\n\nArgs:\nargv: The arguments passed to the command line.\n\nReturns:\nThe arguments that are being passed in.", "source": "github-repos"}
{"code": "def _init_from_proto(self, context_def, import_scope=None):\n    assert isinstance(context_def, control_flow_pb2.WhileContextDef)\n    g = ops.get_default_graph()\n    self._name = ops.prepend_name_scope(context_def.context_name, import_scope)\n    if context_def.maximum_iterations_name:\n        self._maximum_iterations = g.as_graph_element(ops.prepend_name_scope(context_def.maximum_iterations_name, import_scope))\n    else:\n        self._maximum_iterations = None\n    self._parallel_iterations = context_def.parallel_iterations\n    self._back_prop = context_def.back_prop\n    self._swap_memory = context_def.swap_memory\n    self._pivot_for_pred = g.as_graph_element(ops.prepend_name_scope(context_def.pivot_for_pred_name, import_scope))\n    self._pivot_for_body = g.as_graph_element(ops.prepend_name_scope(context_def.pivot_for_body_name, import_scope))\n    self._pivot = g.as_graph_element(ops.prepend_name_scope(context_def.pivot_name, import_scope))\n    self._loop_exits = [g.as_graph_element(ops.prepend_name_scope(exit_name, import_scope)) for exit_name in context_def.loop_exit_names]\n    self._loop_enters = [g.as_graph_element(ops.prepend_name_scope(enter_name, import_scope)) for enter_name in context_def.loop_enter_names]\n    super(WhileContext, self).__init__(values_def=context_def.values_def, import_scope=import_scope)\n    if import_scope:\n        for tensor_name in self._values:\n            op = g.as_graph_element(tensor_name).op\n            if util.IsLoopEnter(op):\n                op._set_attr('frame_name', attr_value_pb2.AttrValue(s=compat.as_bytes(self.name)))\n    self._graph = ops.get_default_graph()", "docstring": "Creates a new `WhileContext` from protocol buffer.\n\nArgs:\ncontext_def: `WhileContextDef` protocol buffer.\nimport_scope: Optional `string`. Name scope to add.", "source": "github-repos"}
{"code": "def _xys(date):\n    (X, Y, s_xy2) = _xysxy2(date)\n    (dX, dY) = ((date.eop.dx / 1000.0), (date.eop.dy / 1000.0))\n    X = np.radians(((X + dX) / 3600.0))\n    Y = np.radians(((Y + dY) / 3600.0))\n    s = (np.radians((s_xy2 / 3600.0)) - ((X * Y) / 2))\n    return (X, Y, s)", "docstring": "Get The X, Y and s coordinates\n\nArgs:\ndate (Date):\nReturn:\n3-tuple of float: Values of X, Y and s, in radians", "source": "codesearchnet"}
{"code": "def is_union(declaration):\n    \n    if not is_class(declaration):\n        return False\n    decl = class_traits.get_declaration(declaration)\n    return decl.class_type == class_declaration.CLASS_TYPES.UNION", "docstring": "Returns True if declaration represents a C++ union\n\nArgs:\ndeclaration (declaration_t): the declaration to be checked.\n\nReturns:\nbool: True if declaration represents a C++ union", "source": "juraj-google-style"}
{"code": "def reduce(self, initial_state, reduce_func):", "docstring": "Reduces this iterable object to a single element.\n\nThe transformation calls `reduce_func` successively on each element.\nThe `initial_state` argument is used for the initial state and the final\nstate is returned as the result.\n\nArgs:\ninitial_state: An element representing the initial state of the\nreduction.\nreduce_func: A function that maps `(old_state, input_element)` to\n`new_state`. The structure of `new_state` must match the structure of\n`old_state`. For the first element, `old_state` is `initial_state`.\n\nReturns:\nThe final state of the transformation.", "source": "github-repos"}
{"code": "def _key_for_namespace(namespace, app):\n  \n  if namespace:\n    return db.Key.from_path(metadata.Namespace.KIND_NAME,\n                            namespace,\n                            _app=app)\n  else:\n    return db.Key.from_path(metadata.Namespace.KIND_NAME,\n                            metadata.Namespace.EMPTY_NAMESPACE_ID,\n                            _app=app)", "docstring": "Return the __namespace__ key for a namespace.\n\nArgs:\nnamespace: The namespace whose key is requested.\napp: The id of the application that the key belongs to.\n\nReturns:\nA db.Key representing the namespace.", "source": "juraj-google-style"}
{"code": "def state_view_for_block(block_wrapper, state_view_factory):\n    state_root_hash = (block_wrapper.state_root_hash if (block_wrapper is not None) else None)\n    return state_view_factory.create_view(state_root_hash)", "docstring": "Returns the state view for an arbitrary block.\n\nArgs:\nblock_wrapper (BlockWrapper): The block for which a state\nview is to be returned\nstate_view_factory (StateViewFactory): The state view factory\nused to create the StateView object\n\nReturns:\nStateView object associated with the block", "source": "codesearchnet"}
{"code": "def delete(self, domain, type_name, search_command):\n    return self._request(domain, type_name, search_command, 'DELETE', None)", "docstring": "Delete entry in ThreatConnect Data Store\n\nArgs:\ndomain (string): One of 'local', 'organization', or 'system'.\ntype_name (string): This is a free form index type name. The ThreatConnect API will use\nthis resource verbatim.\nsearch_command (string): Search command to pass to ES.", "source": "codesearchnet"}
{"code": "def upsert(self, insert_index, val, fn=None):\n        \n        fn = fn or (lambda current, passed: passed)\n        self._magnitude = 0\n        position = self.position_for_index(insert_index)\n        if position < len(self.elements) and self.elements[position] == insert_index:\n            self.elements[position + 1] = fn(self.elements[position + 1], val)\n        else:\n            self.elements.insert(position, val)\n            self.elements.insert(position, insert_index)", "docstring": "Inserts or updates an existing index within the vector.\n\nArgs:\n- insert_index (int): The index at which the element should be\ninserted.\n- val (int|float): The value to be inserted into the vector.\n- fn (callable, optional): An optional callable taking two\narguments, the current value and the passed value to generate\nthe final inserted value at the position in case of collision.", "source": "juraj-google-style"}
{"code": "def __init__(self, c_list):\n        \n        c_list = [NthOrderElasticTensor(c, check_rank=4+i*2)\n                  for i, c in enumerate(c_list)]\n        super().__init__(c_list)", "docstring": "Initialization method for ElasticTensorExpansion\n\nArgs:\nc_list (list or tuple): sequence of Tensor inputs\nor tensors from which the elastic tensor\nexpansion is constructed.", "source": "juraj-google-style"}
{"code": "def _parameter_net(self, theta, kernel_shape=9):\n    with argscope(FullyConnected, nl=tf.nn.leaky_relu):\n        net = FullyConnected('fc1', theta, 64)\n        net = FullyConnected('fc2', net, 128)\n    pred_filter = FullyConnected('fc3', net, (kernel_shape ** 2), nl=tf.identity)\n    pred_filter = tf.reshape(pred_filter, [BATCH, kernel_shape, kernel_shape, 1], name='pred_filter')\n    logger.info('Parameter net output: {}'.format(pred_filter.get_shape().as_list()))\n    return pred_filter", "docstring": "Estimate filters for convolution layers\n\nArgs:\ntheta: angle of filter\nkernel_shape: size of each filter\n\nReturns:\nlearned filter as [B, k, k, 1]", "source": "codesearchnet"}
{"code": "def create_subscription(self, *, customer_id, credit_card_token, plan_code, quantity=None, installments=None, trial_days=None, immediate_payment=None, extra1=None, extra2=None, delivery_address=None, notify_url=None, recurring_bill_items=None):\n    payload = {'quantity': quantity, 'installments': installments, 'trialDays': trial_days, 'immediatePayment': immediate_payment, 'extra1': extra1, 'extra2': extra2, 'customer': {'id': customer_id, 'creditCards': [{'token': credit_card_token}]}, 'plan': {'planCode': plan_code}, 'deliveryAddress': delivery_address, 'notifyUrl': notify_url, 'recurringBillItems': recurring_bill_items}\n    return self.client._post((self.url + 'subscriptions'), json=payload, headers=self.get_headers())", "docstring": "Creating a new subscription of a client to a plan.\n\nArgs:\ncustomer_id: Customer that will be associated to the subscription.\nYou can find more information in the \"Customer\" section of this page.\n\ncredit_card_token: Customer's credit card that is selected to make the payment.\nYou can find more information in the \"Credit card\" section of this page.\n\nplan_code: Plan that will be associated to the subscription.\nYou can find more information in the \"Plan\" section of this page.\n\nquantity: Total amount of plans that will be acquired with the subscription.\nNumeric.\n\ninstallments: Total amount of installments to defer the payment.\nNumeric.\n\ntrial_days: Total amount of trial days of the subscription.\nThis variable has preference over the plan's trial days.\nNumeric.\n\nimmediate_payment:\n\nextra1:\n\nextra2:\n\ndelivery_address:\n\nnotify_url:\n\nrecurring_bill_items:\n\nReturns:", "source": "codesearchnet"}
{"code": "def _send_request(self, url, method='get', data=None, extra_headers=None):\n    headers = {'Content-type': 'application/json'}\n    if isinstance(extra_headers, dict):\n        headers.update(extra_headers)\n    if ((not data) or ('password' not in data)):\n        logger.debug('Sending {method} request to {url} with data {data}'.format(method=method.upper(), url=url, data=data))\n    r = self.session.request(method, url, headers=headers, data=data)\n    r.raise_for_status()\n    return r.json()", "docstring": "Performs a given request and returns a json object\n\nArgs:\nurl (str): URL of the request\nmethod (str): Any of \"get\", \"post\", \"delete\"\ndata (any): Possible extra data to send with the request\nextra_headers (dict): Possible extra headers to send along in the request\nReturns:\ndict", "source": "codesearchnet"}
{"code": "def activate(self, experiment_key, user_id, attributes=None):\n    \n\n    if not self.is_valid:\n      self.logger.error(enums.Errors.INVALID_DATAFILE.format('activate'))\n      return None\n\n    if not validator.is_non_empty_string(experiment_key):\n      self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))\n      return None\n\n    if not isinstance(user_id, string_types):\n      self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))\n      return None\n\n    variation_key = self.get_variation(experiment_key, user_id, attributes)\n\n    if not variation_key:\n      self.logger.info('Not activating user \"%s\".' % user_id)\n      return None\n\n    experiment = self.config.get_experiment_from_key(experiment_key)\n    variation = self.config.get_variation_from_key(experiment_key, variation_key)\n\n    \n    self.logger.info('Activating user \"%s\" in experiment \"%s\".' % (user_id, experiment.key))\n    self._send_impression_event(experiment, variation, user_id, attributes)\n\n    return variation.key", "docstring": "Buckets visitor and sends impression event to Optimizely.\n\nArgs:\nexperiment_key: Experiment which needs to be activated.\nuser_id: ID for user.\nattributes: Dict representing user attributes and values which need to be recorded.\n\nReturns:\nVariation key representing the variation the user will be bucketed in.\nNone if user is not in experiment or if experiment is not Running.", "source": "juraj-google-style"}
{"code": "def create_graph_from_data(self, data, **kwargs):\n        \n        \n        self.arguments['{CITEST}'] = self.dir_CI_test[self.CI_test]\n        self.arguments['{METHOD_INDEP}'] = self.dir_method_indep[self.method_indep]\n        self.arguments['{DIRECTED}'] = 'TRUE'\n        self.arguments['{ALPHA}'] = str(self.alpha)\n        self.arguments['{NJOBS}'] = str(self.nb_jobs)\n        self.arguments['{VERBOSE}'] = str(self.verbose).upper()\n\n        results = self._run_pc(data, verbose=self.verbose)\n\n        return nx.relabel_nodes(nx.DiGraph(results),\n                                {idx: i for idx, i in enumerate(data.columns)})", "docstring": "Run the PC algorithm.\n\nArgs:\ndata (pandas.DataFrame): DataFrame containing the data\n\nReturns:\nnetworkx.DiGraph: Solution given by PC on the given data.", "source": "juraj-google-style"}
{"code": "def write_all_sequences_file(self, outname, outdir=None):\n        \n\n        if not outdir:\n            outdir = self.sequence_dir\n            if not outdir:\n                raise ValueError('Output directory must be specified')\n\n        outfile = op.join(outdir, outname + '.faa')\n        SeqIO.write(self.sequences, outfile, \"fasta\")\n\n        log.info('{}: wrote all protein sequences to file'.format(outfile))\n        return outfile", "docstring": "Write all the stored sequences as a single FASTA file. By default, sets IDs to model gene IDs.\n\nArgs:\noutname (str): Name of the output FASTA file without the extension\noutdir (str): Path to output directory for the file, default is the sequences directory", "source": "juraj-google-style"}
{"code": "def iter_packages(self, name, range_=None, paths=None):\n        \n        for package in iter_packages(name, range_, paths):\n            if not self.excludes(package):\n                yield package", "docstring": "Same as iter_packages in packages.py, but also applies this filter.\n\nArgs:\nname (str): Name of the package, eg 'maya'.\nrange_ (VersionRange or str): If provided, limits the versions returned\nto those in `range_`.\npaths (list of str, optional): paths to search for packages, defaults\nto `config.packages_path`.\n\nReturns:\n`Package` iterator.", "source": "juraj-google-style"}
{"code": "def receive(self, length):\n        \n\n        \n        slipDriver = sliplib.Driver()\n\n        \n        ret = self._serialPort.read(length)\n\n        \n        temp = slipDriver.receive(ret)\n        return iter(temp)", "docstring": "Reads in data from a serial port (length bytes), decodes SLIP packets\n\nA function which reads from the serial port and then uses the SlipLib\nmodule to decode the SLIP protocol packets. Each message received\nis added to a receive buffer in SlipLib which is then returned.\n\nArgs:\nlength (int): Length to receive with serialPort.read(length)\n\nReturns:\nbytes: An iterator of the receive buffer", "source": "juraj-google-style"}
{"code": "def parse(ifp, pb_cls, **kwargs):\n    \n    mode = 'rb'\n    if isinstance(ifp, str):\n        istream = open(ifp, mode=mode, **kwargs)\n    else:\n        istream = open(fileobj=ifp, mode=mode, **kwargs)\n    with istream:\n        for data in istream:\n            pb_obj = pb_cls()\n            pb_obj.ParseFromString(data)\n            yield pb_obj", "docstring": "Parse a stream.\n\nArgs:\nifp (string or file-like object): input stream.\npb_cls (protobuf.message.Message.__class__): The class object of\nthe protobuf message type encoded in the stream.", "source": "juraj-google-style"}
{"code": "def make_list_of_t(ts, check_graph=True, allow_graph=True, ignore_ops=False):\n    if isinstance(ts, ops.Graph):\n        if allow_graph:\n            return get_tensors(ts)\n        else:\n            raise TypeError('allow_graph is False: cannot convert a tf.Graph.')\n    else:\n        if not is_iterable(ts):\n            ts = [ts]\n        if not ts:\n            return []\n        if check_graph:\n            check_types = None if ignore_ops else tensor_lib.Tensor\n            get_unique_graph(ts, check_types=check_types)\n        return [t for t in ts if isinstance(t, tensor_lib.Tensor)]", "docstring": "Convert ts to a list of `tf.Tensor`.\n\nArgs:\nts: can be an iterable of `tf.Tensor`, a `tf.Graph` or a single tensor.\ncheck_graph: if `True` check if all the tensors belong to the same graph.\nallow_graph: if `False` a `tf.Graph` cannot be converted.\nignore_ops: if `True`, silently ignore `tf.Operation`.\nReturns:\nA newly created list of `tf.Tensor`.\nRaises:\nTypeError: if `ts` cannot be converted to a list of `tf.Tensor` or,\nif `check_graph` is `True`, if all the ops do not belong to the same graph.", "source": "github-repos"}
{"code": "def set_defaults(self, defaults):\n\n    def defaults_recurse(current, defaults):\n        \"Walk the current context tree in recursive inner function.\\n\\n            On 1st iteration, current = self (i.e root of context)\\n            On subsequent recursive iterations, current is wherever you're at\\n            in the nested context hierarchy.\\n\\n            Args:\\n                current: dict. Destination of merge.\\n                defaults: dict. Add this to current if keys don't exist\\n                                already.\\n\\n            \"\n        for (k, v) in defaults.items():\n            k = self.get_formatted_string(k)\n            if (k in current):\n                if types.are_all_this_type(Mapping, current[k], v):\n                    defaults_recurse(current[k], v)\n            else:\n                current[k] = self.get_formatted_iterable(v)\n    defaults_recurse(self, defaults)", "docstring": "Set defaults in context if keys do not exist already.\n\nAdds the input dict (defaults) into the context, only where keys in\ndefaults do not already exist in context. Supports nested hierarchies.\n\nExample:\nGiven a context like this:\nkey1: value1\nkey2:\nkey2.1: value2.1\nkey3: None\n\nAnd defaults input like this:\nkey1: 'updated value here won't overwrite since it already exists'\nkey2:\nkey2.2: value2.2\nkey3: 'key 3 exists so I won't overwrite\n\nWill result in context:\nkey1: value1\nkey2:\nkey2.1: value2.1\nkey2.2: value2.2\nkey3: None\n\nArgs:\ndefaults: dict. Add this dict into context.\n\nReturns:\nNone. All operations mutate this instance of context.", "source": "codesearchnet"}
{"code": "def __init__(self, credential=None):\n    self.credential = credential", "docstring": "Initializes FormatToQido.\nArgs:\ncredential: # type: Google credential object, if it is specified, the\nHttp client will use it instead of the default one.", "source": "github-repos"}
{"code": "def GetMessages(self, soft_size_limit=None):\n    with self._lock:\n        ret = rdf_flows.MessageList()\n        ret_size = 0\n        for message in self._Generate():\n            self._total_size -= len(message)\n            ret.job.append(rdf_flows.GrrMessage.FromSerializedString(message))\n            ret_size += len(message)\n            if ((soft_size_limit is not None) and (ret_size > soft_size_limit)):\n                break\n        return ret", "docstring": "Retrieves and removes the messages from the queue.\n\nArgs:\nsoft_size_limit: int If there is more data in the queue than\nsoft_size_limit bytes, the returned list of messages will be\napproximately this large. If None (default), returns all messages\ncurrently on the queue.\n\nReturns:\nrdf_flows.MessageList A list of messages that were .Put on the queue\nearlier.", "source": "codesearchnet"}
{"code": "def softsign(x):\n    if any_symbolic_tensors((x,)):\n        return Softsign().symbolic_call(x)\n    return backend.nn.softsign(x)", "docstring": "Softsign activation function.\n\nIt is defined as `f(x) = x / (abs(x) + 1)`.\n\nArgs:\nx: Input tensor.\n\nReturns:\nA tensor with the same shape as `x`.\n\nExample:\n\n>>> x = keras.ops.convert_to_tensor([-0.100, -10.0, 1.0, 0.0, 100.0])\n>>> keras.ops.softsign(x)\nArray([-0.09090909, -0.90909094, 0.5, 0.0, 0.990099], dtype=float32)", "source": "github-repos"}
{"code": "def WritePathHashHistory(self, client_path, hash_entries):\n    client_path_history = ClientPathHistory()\n    for (timestamp, hash_entry) in iteritems(hash_entries):\n        client_path_history.AddHashEntry(timestamp, hash_entry)\n    self.MultiWritePathHistory({client_path: client_path_history})", "docstring": "Writes a collection of `Hash` observed for particular path.\n\nArgs:\nclient_path: A `ClientPath` instance.\nhash_entries: A dictionary with timestamps as keys and `Hash` instances as\nvalues.", "source": "codesearchnet"}
{"code": "def recommendations(self, **kwargs):\n    path = self._get_id_path('recommendations')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get a list of recommended movies for a movie.\n\nArgs:\nlanguage: (optional) ISO 639-1 code.\npage: (optional) Minimum value of 1.  Expected value is an integer.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def _ReadStructureDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):\n    if is_member:\n        error_message = 'data type not supported as member'\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    return self._ReadDataTypeDefinitionWithMembers(definitions_registry, definition_values, data_types.StructureDefinition, definition_name, supports_conditions=True)", "docstring": "Reads a structure data type definition.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\ndefinition_values (dict[str, object]): definition values.\ndefinition_name (str): name of the definition.\nis_member (Optional[bool]): True if the data type definition is a member\ndata type definition.\n\nReturns:\nStructureDefinition: structure data type definition.\n\nRaises:\nDefinitionReaderError: if the definitions values are missing or if\nthe format is incorrect.", "source": "codesearchnet"}
{"code": "def _remove_hdxobject(self, objlist, obj, matchon='id', delete=False):\n        \n        \n        if objlist is None:\n            return False\n        if isinstance(obj, six.string_types):\n            obj_id = obj\n        elif isinstance(obj, dict) or isinstance(obj, HDXObject):\n            obj_id = obj.get(matchon)\n        else:\n            raise HDXError('Type of object not a string, dict or T<=HDXObject')\n        if not obj_id:\n            return False\n        for i, objdata in enumerate(objlist):\n            objid = objdata.get(matchon)\n            if objid and objid == obj_id:\n                if delete:\n                    objlist[i].delete_from_hdx()\n                del objlist[i]\n                return True\n        return False", "docstring": "Remove an HDX object from a list within the parent HDX object\n\nArgs:\nobjlist (List[Union[T <= HDXObject,Dict]]): list of HDX objects\nobj (Union[T <= HDXObject,Dict,str]): Either an id or hdx object metadata either from an HDX object or a dictionary\nmatchon (str): Field to match on. Defaults to id.\ndelete (bool): Whether to delete HDX object. Defaults to False.\n\nReturns:\nbool: True if object removed, False if not", "source": "juraj-google-style"}
{"code": "def print_math(math_expression_lst, name='math.html', out='html', formatter=(lambda x: x)):\n    try:\n        shutil.rmtree('viz')\n    except:\n        pass\n    pth = (get_cur_path() + print_math_template_path)\n    shutil.copytree(pth, 'viz')\n    html_loc = None\n    if (out == 'html'):\n        html_loc = (pth + 'standalone_index.html')\n    if (out == 'notebook'):\n        from IPython.display import display, HTML\n        html_loc = (pth + 'notebook_index.html')\n    html = open(html_loc).read()\n    html = html.replace('__MATH_LIST__', json.dumps(math_expression_lst))\n    if (out == 'notebook'):\n        display(HTML(html))\n    elif (out == 'html'):\n        with open(name, 'w+') as out_f:\n            out_f.write(html)", "docstring": "Converts LaTeX math expressions into an html layout.\nCreates a html file in the directory where print_math is called\nby default. Displays math to jupyter notebook if \"notebook\" argument\nis specified.\n\nArgs:\nmath_expression_lst (list):  A list of LaTeX math (string) to be rendered by KaTeX\nout (string): {\"html\"|\"notebook\"}: HTML by default. Specifies output medium.\nformatter (function): function that cleans up the string for KaTeX.\nReturns:\nA HTML file in the directory where this function is called, or displays\nHTML output in a notebook.", "source": "codesearchnet"}
{"code": "def dot(matrix, vector, matrix_ty, vector_ty):\n    weld_obj = WeldObject(encoder_, decoder_)\n    matrix_var = weld_obj.update(matrix)\n    if isinstance(matrix, WeldObject):\n        matrix_var = matrix.obj_id\n        weld_obj.dependencies[matrix_var] = matrix\n    vector_var = weld_obj.update(vector)\n    loopsize_annotation = ''\n    if isinstance(vector, WeldObject):\n        vector_var = vector.obj_id\n        weld_obj.dependencies[vector_var] = vector\n    if isinstance(vector, np.ndarray):\n        loopsize_annotation = ('@(loopsize: %dL)' % len(vector))\n    weld_template = '\\n       map(\\n         %(matrix)s,\\n         |row: vec[%(matrix_ty)s]|\\n           result(\\n             %(loopsize_annotation)s\\n             for(\\n               result(\\n                 %(loopsize_annotation)s\\n                 for(\\n                   zip(row, %(vector)s),\\n                   appender,\\n                   |b2, i2, e2: {%(matrix_ty)s, %(vector_ty)s}|\\n                     merge(b2, f64(e2.$0 * %(matrix_ty)s(e2.$1)))\\n                 )\\n               ),\\n               merger[f64,+],\\n               |b, i, e| merge(b, e)\\n             )\\n           )\\n       )\\n    '\n    weld_obj.weld_code = (weld_template % {'matrix': matrix_var, 'vector': vector_var, 'matrix_ty': matrix_ty, 'vector_ty': vector_ty, 'loopsize_annotation': loopsize_annotation})\n    return weld_obj", "docstring": "Computes the dot product between a matrix and a vector.\n\nArgs:\nmatrix (WeldObject / Numpy.ndarray): 2-d input matrix\nvector (WeldObject / Numpy.ndarray): 1-d input vector\nty (WeldType): Type of each element in the input matrix and vector\n\nReturns:\nA WeldObject representing this computation", "source": "codesearchnet"}
{"code": "def get_enterprise_customer_for_user(auth_user):\n    EnterpriseCustomerUser = apps.get_model('enterprise', 'EnterpriseCustomerUser')\n    try:\n        return EnterpriseCustomerUser.objects.get(user_id=auth_user.id).enterprise_customer\n    except EnterpriseCustomerUser.DoesNotExist:\n        return None", "docstring": "Return enterprise customer instance for given user.\n\nSome users are associated with an enterprise customer via `EnterpriseCustomerUser` model,\n1. if given user is associated with any enterprise customer, return enterprise customer.\n2. otherwise return `None`.\n\nArguments:\nauth_user (contrib.auth.User): Django User\n\nReturns:\n(EnterpriseCustomer): enterprise customer associated with the current user.", "source": "codesearchnet"}
{"code": "def boxify(message, border_color=None):\n    lines = message.split('\\n')\n    max_width = max((_visual_width(line) for line in lines))\n    padding_horizontal = 5\n    padding_vertical = 1\n    box_size_horizontal = (max_width + (padding_horizontal * 2))\n    chars = {'corner': '+', 'horizontal': '-', 'vertical': '|', 'empty': ' '}\n    margin = '{corner}{line}{corner}\\n'.format(corner=chars['corner'], line=(chars['horizontal'] * box_size_horizontal))\n    padding_lines = [('{border}{space}{border}\\n'.format(border=colorize(chars['vertical'], color=border_color), space=(chars['empty'] * box_size_horizontal)) * padding_vertical)]\n    content_lines = ['{border}{space}{content}{space}{border}\\n'.format(border=colorize(chars['vertical'], color=border_color), space=(chars['empty'] * padding_horizontal), content=_visual_center(line, max_width)) for line in lines]\n    box_str = '{margin}{padding}{content}{padding}{margin}'.format(margin=colorize(margin, color=border_color), padding=''.join(padding_lines), content=''.join(content_lines))\n    return box_str", "docstring": "Put a message inside a box.\n\nArgs:\nmessage (unicode): message to decorate.\nborder_color (unicode): name of the color to outline the box with.", "source": "codesearchnet"}
{"code": "def get_osdp(self, id_or_uri):\n    uri = self._client.build_subresource_uri(resource_id_or_uri=id_or_uri, subresource_path='osdp')\n    return self._client.get(uri)", "docstring": "Retrieves facts about Server Profiles and Server Profile Templates that are using Deployment Plan based on the ID or URI provided.\n\nArgs:\nid_or_uri: ID or URI of the Deployment Plan.\n\nReturns:\ndict: Server Profiles and Server Profile Templates", "source": "codesearchnet"}
{"code": "def on_predict_end(self, logs=None):", "docstring": "Called at the end of prediction.\n\nSubclasses should override for any actions to run.\n\nArgs:\nlogs: Dict. Currently no data is passed to this argument for this method\nbut that may change in the future.", "source": "github-repos"}
{"code": "def add_headers(vcf_obj, nr_cases=None, sv=False):\n    vcf_obj.add_info_to_header({'ID': 'Obs', 'Number': '1', 'Type': 'Integer', 'Description': 'The number of observations for the variant'})\n    if (not sv):\n        vcf_obj.add_info_to_header({'ID': 'Hom', 'Number': '1', 'Type': 'Integer', 'Description': 'The number of observed homozygotes'})\n        vcf_obj.add_info_to_header({'ID': 'Hem', 'Number': '1', 'Type': 'Integer', 'Description': 'The number of observed hemizygotes'})\n    if nr_cases:\n        case_header = '\n        vcf_obj.add_to_header(case_header)\n    return", "docstring": "Add loqus specific information to a VCF header\n\nArgs:\nvcf_obj(cyvcf2.VCF)", "source": "codesearchnet"}
{"code": "def _ParseCshVariables(self, lines):\n    paths = {}\n    for line in lines:\n        if (len(line) < 2):\n            continue\n        action = line[0]\n        if (action == 'setenv'):\n            target = line[1]\n            path_vals = []\n            if line[2:]:\n                path_vals = line[2].split(':')\n            self._ExpandPath(target, path_vals, paths)\n        elif (action == 'set'):\n            set_vals = self._CSH_SET_RE.search(' '.join(line[1:]))\n            if set_vals:\n                (target, vals) = set_vals.groups()\n                if (target in ('path', 'term', 'user')):\n                    target = target.upper()\n                path_vals = vals.split()\n                self._ExpandPath(target, path_vals, paths)\n    return paths", "docstring": "Extract env_var and path values from csh derivative shells.\n\nPath attributes can be set several ways:\n- setenv takes the form \"setenv PATH_NAME COLON:SEPARATED:LIST\"\n- set takes the form \"set path_name=(space separated list)\" and is\nautomatically exported for several types of files.\n\nThe first entry in each stanza is used to decide what context to use.\nOther entries are used to identify the path name and any assigned values.\n\nArgs:\nlines: A list of lines, each of which is a list of space separated words.\n\nReturns:\na dictionary of path names and values.", "source": "codesearchnet"}
{"code": "def find_all(self, collection):\n    obj = getattr(self.db, collection)\n    result = obj.find()\n    return result", "docstring": "Search a collection for all available items.\n\nArgs:\ncollection: The db collection. See main class documentation.\nReturns:\nList of all items in the collection.", "source": "codesearchnet"}
{"code": "def pre(fqdn, parent, stackdepth, *argl, **argd):\n    global _atdepth_call, _cstack_call\n    pcres = _pre_call(_atdepth_call, parent, fqdn, (stackdepth + 1), *argl, **argd)\n    (entry, _atdepth_call, reduced, bound, ekey) = pcres\n    _cstack_call.append(fqdn)\n    return (entry, bound, ekey)", "docstring": "Adds logging for a call to the specified function that is being handled\nby an external module.\n\nArgs:\nfqdn (str): fully-qualified domain name of the function being logged.\nparent: *object* that the function belongs to.\nstackdepth (int): maximum stack depth before entries are ignored.\nargl (list): positional arguments passed to the function call.\nargd (dict): keyword arguments passed to the function call.", "source": "codesearchnet"}
{"code": "def tail(self, n):\n    if (n < 0):\n        n = max(0, (len(self.index) + n))\n    if self._is_transposed:\n        result = self.__constructor__(self.data.transpose().take(1, (- n)).transpose(), self.index[(- n):], self.columns, self._dtype_cache)\n        result._is_transposed = True\n    else:\n        result = self.__constructor__(self.data.take(0, (- n)), self.index[(- n):], self.columns, self._dtype_cache)\n    return result", "docstring": "Returns the last n rows.\n\nArgs:\nn: Integer containing the number of rows to return.\n\nReturns:\nDataManager containing the last n rows of the original DataManager.", "source": "codesearchnet"}
{"code": "def __init__(self, api_key: str, config: interfaces.Config | None=None):\n    self._config = config or interfaces.Config()\n    self._p_genai_model = genai_model.GenaiModel(api_key=api_key, model_name=self._config.topic_generator_model_name, generate_content_config={'response_mime_type': 'application/json', 'response_schema': list[interfaces.Topic]})\n    self._num_topics = self._config.num_topics\n    preamble_content = [ProcessorPart(prompts.TOPIC_GENERATION_PREAMBLE), ProcessorPart(f)]\n    if self._config.excluded_topics:\n        preamble_content.append(ProcessorPart(f'Here is a list of topics that should be excluded: {self._config.excluded_topics}'))\n    preamble_content.append(ProcessorPart('You will now be provided with the user content.'))\n    p_preamble = preamble.Preamble(content=preamble_content)\n    p_suffix = preamble.Suffix(content=[ProcessorPart(f'Return your response as Topics JSON in the format below.\\n\\nYou MUST return exactly {self._config.num_topics} topics.\\n\\nTopic\\n  topic: str\\n  relationship_to_user_content: list[str]\\n\\nTopics\\n  list[Topic]\\n\\nYour JSON:\\n')])\n    self._pipeline = p_preamble + p_suffix + self._p_genai_model", "docstring": "Initializes the TopicGenerator.\n\nArgs:\napi_key: The API key to use for the GenAI API.\nconfig: The agent configuration.", "source": "github-repos"}
{"code": "def get_critical_compositions(self, comp1, comp2):\n        \n\n        n1 = comp1.num_atoms\n        n2 = comp2.num_atoms\n        pd_els = self.elements\n\n        \n        \n        c1 = self.pd_coords(comp1)\n        c2 = self.pd_coords(comp2)\n\n        \n        \n        if np.all(c1 == c2):\n            return [comp1.copy(), comp2.copy()]\n\n        intersections = [c1, c2]\n        for sc in self.simplexes:\n            intersections.extend(sc.line_intersection(c1, c2))\n        intersections = np.array(intersections)\n\n        \n        l = (c2 - c1)\n        l /= np.sum(l ** 2) ** 0.5\n        proj = np.dot(intersections - c1, l)\n\n        \n        proj = proj[np.logical_and(proj > -self.numerical_tol,\n                                   proj < proj[1] + self.numerical_tol)]\n        proj.sort()\n\n        \n        valid = np.ones(len(proj), dtype=np.bool)\n        valid[1:] = proj[1:] > proj[:-1] + self.numerical_tol\n        proj = proj[valid]\n\n        ints = c1 + l * proj[:, None]\n        \n        cs = np.concatenate([np.array([1 - np.sum(ints, axis=-1)]).T,\n                             ints], axis=-1)\n        \n        x = proj / np.dot(c2 - c1, l)\n        \n        x_unnormalized = x * n1 / (n2 + x * (n1 - n2))\n        num_atoms = n1 + (n2 - n1) * x_unnormalized\n        cs *= num_atoms[:, None]\n        return [Composition((c, v) for c, v in zip(pd_els, m)) for m in cs]", "docstring": "Get the critical compositions along the tieline between two\ncompositions. I.e. where the decomposition products change.\nThe endpoints are also returned.\nArgs:\ncomp1, comp2 (Composition): compositions that define the tieline\nReturns:\n[(Composition)]: list of critical compositions. All are of\nthe form x * comp1 + (1-x) * comp2", "source": "juraj-google-style"}
{"code": "def __init__(self, channel: 'EFBChannel', new_chats: Iterable[str] = tuple(),\n                 removed_chats: Iterable[str] = tuple(), modified_chats: Iterable[str] = tuple()):\n        \n        self.channel: 'EFBChannel' = channel\n        self.new_chats: Iterable[str] = new_chats\n        self.removed_chats: Iterable[str] = removed_chats\n        self.modified_chats: Iterable[str] = modified_chats\n        self.destination_channel: 'EFBChannel' = coordinator.master", "docstring": "__init__(channel: EFBChannel, new_chats: Iterable[str]=tuple(), removed_chats: Iterable[str]=tuple(), modified_chats: Iterable[str]=tuple())\n\nArgs:\nchannel (:obj:`.EFBChannel`): Slave channel that issues the update\nnew_chats (Optional[Iterable[str]]): Unique ID of new chats\nremoved_chats (Optional[Iterable[str]]): Unique ID of removed chats\nmodified_chats (Optional[Iterable[str]]): Unique ID of modified chats", "source": "juraj-google-style"}
{"code": "def GetHashData(hashable):\n        \n        ms = StreamManager.GetStream()\n        writer = BinaryWriter(ms)\n        hashable.SerializeUnsigned(writer)\n        ms.flush()\n        retVal = ms.ToArray()\n        StreamManager.ReleaseStream(ms)\n        return retVal", "docstring": "Get the data used for hashing.\n\nArgs:\nhashable (neo.IO.Mixins.SerializableMixin): object extending SerializableMixin\n\nReturns:\nbytes:", "source": "juraj-google-style"}
{"code": "def __init__(self, auth_key, auth_secret):\n        \n\n        self._auth_key = auth_key\n        self._auth_secret = auth_secret", "docstring": "Create an authentication handler for HouseCanary API V1 requests\n\nArgs:\nauth_key (string) - The HouseCanary API auth key\nauth_secret (string) - The HouseCanary API secret", "source": "juraj-google-style"}
{"code": "def getPaddingNum(chars):\n    match = PRINTF_SYNTAX_PADDING_RE.match(chars)\n    if match:\n        return int(match.group(1))\n    try:\n        return sum([PAD_MAP[char] for char in chars])\n    except KeyError:\n        msg = 'Detected an unsupported padding character: \"{}\".'\n        msg += ' Supported padding characters: {} or printf syntax padding'\n        msg += ' %<int>d'\n        raise ValueError(msg.format(char, str(PAD_MAP.keys())))", "docstring": "Given a supported group of padding characters, return the amount of padding.\n\nArgs:\nchars (str): a supported group of padding characters\n\nReturns:\nint:\n\nRaises:\nValueError: if unsupported padding character is detected", "source": "codesearchnet"}
{"code": "def insecure_channel(target, options=None, *, loop=None, executor=None, standalone_pool_for_streaming=False):\n    return Channel(_grpc.insecure_channel(target, options), loop, executor, standalone_pool_for_streaming)", "docstring": "Creates an insecure Channel to a server.\n\nArgs:\ntarget: The server address\noptions: An optional list of key-value pairs (channel args in gRPC runtime)\nto configure the channel.\n\nReturns:\nA Channel object.", "source": "codesearchnet"}
{"code": "def read_uint64(self, little_endian=True):\n    if little_endian:\n        endian = '<'\n    else:\n        endian = '>'\n    return self.unpack(('%sQ' % endian), 8)", "docstring": "Read 8 bytes as an unsigned integer value from the stream.\n\nArgs:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint:", "source": "codesearchnet"}
{"code": "def add_device(self, device, container):\n    if (self.findtext('is_smart') == 'false'):\n        self.add_object_to_path(device, container)\n    else:\n        raise ValueError('Devices may not be added to smart groups.')", "docstring": "Add a device to a group. Wraps JSSObject.add_object_to_path.\n\nArgs:\ndevice: A JSSObject to add (as list data), to this object.\nlocation: Element or a string path argument to find()", "source": "codesearchnet"}
{"code": "def get_by_name(self, name):\n        \n        result = self.get_by(\"name\", name)\n\n        if result:\n            data = result[0]\n            new_resource = self.new(self._connection, data)\n        else:\n            new_resource = None\n\n        return new_resource", "docstring": "Retrieves a resource by its name.\n\nArgs:\nname: Resource name.\n\nReturns:\nResource object or None if resource does not exist.", "source": "juraj-google-style"}
{"code": "def drop_if_default(self, default):\n    self._default = default\n    self._drop_if_default = True\n    return self", "docstring": "The item should be dropped if its value is equal to its default.\n\nReturns:\nReturns self.", "source": "github-repos"}
{"code": "def really_unicode(in_string):\n    \n    if isinstance(in_string, StringType):\n        for args in (('utf-8',), ('latin-1',), ('ascii', 'replace')):\n            try:\n                \n                in_string = in_string.decode(*args)\n                break\n            except UnicodeDecodeError:\n                continue\n    if not isinstance(in_string, UnicodeType):\n        raise ValueError('%s is not a string at all.' % in_string)\n    return in_string", "docstring": "Make a string unicode. Really.\n\nEnsure ``in_string`` is returned as unicode through a series of\nprogressively relaxed decodings.\n\nArgs:\nin_string (str): The string to convert.\n\nReturns:\nstr: Unicode.\n\nRaises:\nValueError", "source": "juraj-google-style"}
{"code": "def make_lines_texture(num_lines=10, resolution=50):\n    \n    x, y = np.meshgrid(\n        np.hstack([np.linspace(0, 1, resolution), np.nan]),\n        np.linspace(0, 1, num_lines),\n    )\n    \n    y[np.isnan(x)] = np.nan\n    return x.flatten(), y.flatten()", "docstring": "Makes a texture consisting of a given number of horizontal lines.\n\nArgs:\nnum_lines (int): the number of lines to draw\nresolution (int): the number of midpoints on each line\n\nReturns:\nA texture.", "source": "juraj-google-style"}
{"code": "def is_constant_jacobian(self):\n    return self._is_constant_jacobian", "docstring": "Returns true iff the Jacobian matrix is not a function of x.\n\nNote: Jacobian matrix is either constant for both forward and inverse or\nneither.\n\nReturns:\nis_constant_jacobian: Python `bool`.", "source": "github-repos"}
{"code": "def get_local_config_filepath(config_filepath, force_local=False):\n    local_config_name = (path.basename(config_filepath).split('.')[0] + '_local.cfg')\n    local_config_filepath = path.join(path.split(config_filepath)[0], local_config_name)\n    real_config_filepath = ''\n    if (path.isfile(local_config_filepath) or force_local):\n        real_config_filepath = local_config_filepath\n    else:\n        real_config_filepath = config_filepath\n    return real_config_filepath", "docstring": "helper for finding local filepath for config\n\nArgs:\nconfig_filepath (str): path to local config abspath > relpath\nforce_local (bool): force return of _local.cfg version\n\nReturns:\nstr: Path to local config, or global if path DNE", "source": "codesearchnet"}
{"code": "def as_bool(self) -> bool:\n    if len(self._messages) != 1:\n        raise ValueError('FHIRPath did not evaluate to a single boolean.')\n    return proto_utils.get_value_at_field(self._messages[0], 'value')", "docstring": "Returns the result as a boolean.\n\nRaises:\nValueError if the `EvaluationResult` is not a single boolean.", "source": "github-repos"}
{"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return len(cls + token_ids_0 + sep) * [0]\n    return len(cls + token_ids_0 + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"}
{"code": "def create_mutation_file(self, list_of_tuples):\n        \n\n        self.mutation_infile = op.join(self.foldx_dir, 'individual_list.txt')\n\n        idx = 1\n\n        with open(self.mutation_infile, 'w') as f:\n            for mutant_group in list_of_tuples:\n                \n                mutstring = ''.join(list(map(lambda x: '{}{}{}{};'.format(x[0], x[1], x[2], x[3]), mutant_group)))\n                f.write(mutstring + '\\n')\n\n                \n                self.mutation_index_to_group[idx] = mutant_group\n                idx += 1", "docstring": "Create the FoldX file 'individual_list.txt' to run BuildModel upon.\n\nArgs:\nlist_of_tuples (list): A list of tuples indicating mutation groups to carry out BuildModel upon. Example::\n\n[\n(('N', 'A', 308, 'S'), ('S', 'A', 320, 'T'), ('S', 'A', 321, 'H')),  # Mutation group 1\n(('S', 'A', 321, 'R'), ('T', 'A', 345, 'S'))  # Mutation group 2\n]", "source": "juraj-google-style"}
{"code": "def emit(self, name, *args, **kwargs):\n        \n        e = self.__property_events.get(name)\n        if e is None:\n            e = self.__events[name]\n        return e(*args, **kwargs)", "docstring": "Dispatches an event to any subscribed listeners\n\nNote:\nIf a listener returns :obj:`False`, the event will stop dispatching to\nother listeners. Any other return value is ignored.\n\nArgs:\nname (str): The name of the :class:`Event` to dispatch\n*args (Optional): Positional arguments to be sent to listeners\n**kwargs (Optional): Keyword arguments to be sent to listeners", "source": "juraj-google-style"}
{"code": "def items(self):\n    all_items = [(k.decode('utf-8'), v.decode('utf-8')) for (k, v) in self.rdb.hgetall(self.session_hash).items()]\n    return all_items", "docstring": "Return a list of all the key, value pair tuples in the dictionary.\n\nReturns:\nlist of tuples: [(key1,value1),(key2,value2),...,(keyN,valueN)]", "source": "codesearchnet"}
{"code": "def __init__(self, logger):\n    \n    self.logger = logger\n    self.oslogin_installed = True\n    self.update_time = 0", "docstring": "Constructor.\n\nArgs:\nlogger: logger object, used to write to SysLog and serial port.", "source": "juraj-google-style"}
{"code": "def _parse_dtype(self, space):\n    \n    if isinstance(space, gym.spaces.Discrete):\n      return tf.int32\n    if isinstance(space, gym.spaces.Box):\n      return tf.float32\n    raise NotImplementedError()", "docstring": "Get a tensor dtype from a OpenAI Gym space.\n\nArgs:\nspace: Gym space.\n\nRaises:\nNotImplementedError: For spaces other than Box and Discrete.\n\nReturns:\nTensorFlow data type.", "source": "juraj-google-style"}
{"code": "def next_power_of_2(x):\n    \n\n    power_of_2 = 1 if x == 0 else 2 ** np.ceil(np.log2(x))\n    return power_of_2", "docstring": "Finds the next power of 2 value\n\nArgs:\nx: Input value\n\nReturns:\npower_of_2: Next power of 2 value", "source": "juraj-google-style"}
{"code": "def get_developer_package(path, format=None):\n    from rez.developer_package import DeveloperPackage\n    return DeveloperPackage.from_path(path, format=format)", "docstring": "Create a developer package.\n\nArgs:\npath (str): Path to dir containing package definition file.\nformat (str): Package definition file format, detected if None.\n\nReturns:\n`DeveloperPackage`.", "source": "codesearchnet"}
{"code": "def argmax(x, axis=None, keepdims=False):\n    if any_symbolic_tensors((x,)):\n        return Argmax(axis=axis, keepdims=keepdims).symbolic_call(x)\n    return backend.numpy.argmax(x, axis=axis, keepdims=keepdims)", "docstring": "Returns the indices of the maximum values along an axis.\n\nArgs:\nx: Input tensor.\naxis: By default, the index is into the flattened tensor, otherwise\nalong the specified axis.\nkeepdims: If this is set to `True`, the axes which are reduced are left\nin the result as dimensions with size one. Defaults to `False`.\n\nReturns:\nTensor of indices. It has the same shape as `x`, with the dimension\nalong `axis` removed.\n\nExample:\n>>> x = keras.ops.arange(6).reshape(2, 3) + 10\n>>> x\narray([[10, 11, 12],\n[13, 14, 15]], dtype=int32)\n>>> keras.ops.argmax(x)\narray(5, dtype=int32)\n>>> keras.ops.argmax(x, axis=0)\narray([1, 1, 1], dtype=int32)\n>>> keras.ops.argmax(x, axis=1)\narray([2, 2], dtype=int32)", "source": "github-repos"}
{"code": "def send_invitation(self, invitation, **kwargs):\n        \n        \n        return self.email_message(\n            invitation.invitee_identifier,\n            self.invitation_subject,\n            self.invitation_body,\n            invitation.invited_by,\n            **kwargs\n        ).send()", "docstring": "Sends an invitation message for a specific invitation.\n\nThis could be overridden to do other things, such as sending a confirmation\nemail to the sender.\n\nArgs:\ninvitation:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def valid(self, value, level=[]):\n\t\t\n\n\t\t\n\t\tself.validation_failures = []\n\n\t\t\n\t\tif value is None and self._optional:\n\t\t\treturn True\n\n\t\t\n\t\tif not isinstance(value, dict):\n\t\t\tself.validation_failures.append(('.'.join(level), str(value)))\n\t\t\treturn False\n\n\t\t\n\t\tbRet = True\n\n\t\t\n\t\tfor k,v in iteritems(value):\n\n\t\t\t\n\t\t\tlLevel = level[:]\n\t\t\tlLevel.append(k)\n\n\t\t\t\n\t\t\tif not self._key.valid(k):\n\t\t\t\tself.validation_failures.append(('.'.join(lLevel), 'invalid key: %s' % str(k)))\n\t\t\t\tbRet = False\n\t\t\t\tcontinue\n\n\t\t\t\n\t\t\tif not self._node.valid(v, lLevel):\n\t\t\t\tself.validation_failures.extend(self._node.validation_failures)\n\t\t\t\tbRet = False\n\t\t\t\tcontinue\n\n\t\t\n\t\treturn bRet", "docstring": "Valid\n\nChecks if a value is valid based on the instance's values\n\nArguments:\nvalue {mixed} -- The value to validate\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def _add_sink_state(self, states):\n        \n        cleared = []\n        for i in range(0, 128):\n            cleared.append(-1)\n        states.append(cleared)", "docstring": "This function adds a sing state in the total states\nArgs:\nstates (list): The current states\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "class TokenSpan(NamedTuple):\n    start: int\n    end: int", "docstring": "Token span in an encoded string (list of tokens).\n\nArgs:\nstart (`int`): Index of the first token in the span.\nend (`int`): Index of the token following the last token in the span.", "source": "github-repos"}
{"code": "def diffuse_horizontal_illuminance(self, value=999999.0):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `diffuse_horizontal_illuminance`'.format(value))\n            if value < 0.0:\n                raise ValueError('value need to be greater or equal 0.0 '\n                                 'for field `diffuse_horizontal_illuminance`')\n\n        self._diffuse_horizontal_illuminance = value", "docstring": "Corresponds to IDD Field `diffuse_horizontal_illuminance`\nwill be missing if >= 999900\n\nArgs:\nvalue (float): value for IDD Field `diffuse_horizontal_illuminance`\nUnit: lux\nvalue >= 0.0\nMissing value: 999999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "async def count(self, text, opts=None):\n        \n        i = 0\n        async for _ in self.cell.eval(text, opts=opts, user=self.user):\n            i += 1\n        return i", "docstring": "Count the number of nodes which result from a storm query.\n\nArgs:\ntext (str): Storm query text.\nopts (dict): Storm query options.\n\nReturns:\n(int): The number of nodes resulting from the query.", "source": "juraj-google-style"}
{"code": "def _tag_and_add_meta_graph(self, meta_graph_def, tags, signature_def_map):\n    for tag in tags:\n        meta_graph_def.meta_info_def.tags.append(tag)\n    if signature_def_map is not None:\n        for key in signature_def_map:\n            meta_graph_def.signature_def[key].CopyFrom(signature_def_map[key])\n    proto_meta_graph_def = self._saved_model.meta_graphs.add()\n    proto_meta_graph_def.CopyFrom(meta_graph_def)", "docstring": "Tags the meta graph def and adds it to the SavedModel.\n\nTags the meta graph def with the supplied tags, adds signature defs to it if\nprovided and appends the meta graph def to the SavedModel proto.\n\nArgs:\nmeta_graph_def: The meta graph def to add to the SavedModel.\ntags: The set of tags to annotate the meta graph def with.\nsignature_def_map: The map of signature defs to be added to the meta graph\ndef.", "source": "github-repos"}
{"code": "def fetcher_with_object(cls, parent_object, relationship=\"child\"):\n        \n\n        fetcher = cls()\n        fetcher.parent_object = parent_object\n        fetcher.relationship = relationship\n\n        rest_name = cls.managed_object_rest_name()\n        parent_object.register_fetcher(fetcher, rest_name)\n\n        return fetcher", "docstring": "Register the fetcher for a served object.\n\nThis method will fill the fetcher with `managed_class` instances\n\nArgs:\nparent_object: the instance of the parent object to serve\n\nReturns:\nIt returns the fetcher instance.", "source": "juraj-google-style"}
{"code": "def base_http_parser():\n    base_parser = ArgumentParser(add_help=False)\n    base_parser.add_argument('--url', type=str, help=\"identify the URL of the validator's REST API (default: http:\n    base_parser.add_argument('-u', '--user', type=str, metavar='USERNAME[:PASSWORD]', help='specify the user to authorize request')\n    return base_parser", "docstring": "Creates a parser with arguments specific to sending an HTTP request\nto the REST API.\n\nReturns:\n{ArgumentParser}: Base parser with default HTTP args", "source": "codesearchnet"}
{"code": "def sasl_plain(self, name, password, identity=None):\n        \n        if identity is None:\n            identity = name\n\n        self.sasl('plain', name, password, identity)", "docstring": "Authenticate to a server using SASL plain, or does so on connection.\n\nArgs:\nname (str): Name to auth with.\npassword (str): Password to auth with.\nidentity (str): Identity to auth with (defaults to name).", "source": "juraj-google-style"}
{"code": "def set_colour(self, r, g, b):\n        \n        if not 0 <= r <= 255:\n            raise ValueError(\"The value for red needs to be between 0 and 255.\")\n        if not 0 <= g <= 255:\n            raise ValueError(\"The value for green needs to be between 0 and 255.\")\n        if not 0 <= b <= 255:\n            raise ValueError(\"The value for blue needs to be between 0 and 255.\")\n\n        \n        hexvalue = BulbDevice._rgb_to_hexvalue(r, g, b)\n\n        payload = self.generate_payload(SET, {\n            self.DPS_INDEX_MODE: self.DPS_MODE_COLOUR,\n            self.DPS_INDEX_COLOUR: hexvalue})\n        data = self._send_receive(payload)\n        return data", "docstring": "Set colour of an rgb bulb.\n\nArgs:\nr(int): Value for the colour red as int from 0-255.\ng(int): Value for the colour green as int from 0-255.\nb(int): Value for the colour blue as int from 0-255.", "source": "juraj-google-style"}
{"code": "def __sendCommand(self, cmd):\n        \n        logging.info('%s: sendCommand[%s]', self.port, cmd)\n        if self.logThreadStatus == self.logStatus['running']:\n            self.logThreadStatus = self.logStatus['pauseReq']\n            while self.logThreadStatus != self.logStatus['paused'] and self.logThreadStatus != self.logStatus['stop']:\n                pass\n\n        ssh_stdin = None\n        ssh_stdout = None\n        ssh_stderr = None\n        try:\n            \n            retry_times = 3\n            while retry_times > 0:\n                retry_times -= 1\n                try:\n                    if self._is_net:\n                        ssh_stdin, ssh_stdout, ssh_stderr = self.handle.exec_command(cmd)\n                    else:\n                        self._sendline(cmd)\n                        self._expect(cmd)\n                except Exception as e:\n                    logging.exception('%s: failed to send command[%s]: %s', self.port, cmd, str(e))\n                    if retry_times == 0:\n                        raise\n                else:\n                    break\n\n            line = None\n            response = []\n            retry_times = 20\n            stdout_lines = []\n            stderr_lines = []\n            if self._is_net:\n                stdout_lines = ssh_stdout.readlines()\n                stderr_lines = ssh_stderr.readlines()\n                if stderr_lines:\n                    for stderr_line in stderr_lines:\n                        if re.search(r'Not\\s+Found|failed\\s+with\\s+error', stderr_line.strip(), re.M | re.I):\n                            print \"Command failed:\" + stderr_line\n                            return 'Fail'\n                        print \"Got line: \" + stderr_line\n                        logging.info('%s: the read line is[%s]', self.port, stderr_line)\n                        response.append(str(stderr_line.strip()))\n                elif stdout_lines:\n                    for stdout_line in stdout_lines:\n                        logging.info('%s: the read line is[%s]', self.port, stdout_line)\n                        if re.search(r'Not\\s+Found|failed\\s+with\\s+error', stdout_line.strip(), re.M | re.I):\n                            print \"Command failed\"\n                            return 'Fail'\n                        print \"Got line: \" + stdout_line\n                        logging.info('%s: send command[%s] done!', self.port, cmd)\n                        response.append(str(stdout_line.strip()))\n                response.append(WPAN_CARRIER_PROMPT)\n                return response\n            else:\n                while retry_times > 0:\n                    line = self._readline()\n                    print \"read line: %s\" % line\n                    logging.info('%s: the read line is[%s]', self.port, line)\n                    if line:\n                        response.append(line)\n                        if re.match(WPAN_CARRIER_PROMPT, line):\n                            break\n                        elif re.search(r'Not\\s+Found|failed\\s+with\\s+error', line, re.M | re.I):\n                            print \"Command failed\"\n                            return 'Fail'\n\n                    retry_times -= 1\n                    time.sleep(0.1)\n\n                if retry_times == 0:\n                    raise Exception('%s: failed to find end of response' % self.port)\n                logging.info('%s: send command[%s] done!', self.port, cmd)\n                return response\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger('sendCommand() Error: ' + str(e))\n            raise", "docstring": "send specific command to reference unit over serial port\n\nArgs:\ncmd: OpenThread_WpanCtl command string\n\nReturns:\nFail: Failed to send the command to reference unit and parse it\nValue: successfully retrieve the desired value from reference unit\nError: some errors occur, indicates by the followed specific error number", "source": "juraj-google-style"}
{"code": "def date_to_integer(date):\n    \n    if pd and isinstance(date, pd.Timestamp):\n        try:\n            date = date.to_datetime64()\n        except:\n            date = date.to_datetime()\n\n    if isinstance(date, np.datetime64):\n        return date.astype('datetime64[ms]').astype(float)\n    elif isinstance(date, cftime_types):\n        return cftime_to_timestamp(date, 'ms')\n\n    if hasattr(date, 'timetuple'):\n        dt_int = calendar.timegm(date.timetuple())*1000\n    else:\n        raise ValueError('Datetime type not recognized')\n    return dt_int", "docstring": "Converts support date types to milliseconds since epoch\n\nAttempts highest precision conversion of different datetime\nformats to milliseconds since the epoch (1970-01-01 00:00:00).\nIf datetime is a cftime with a non-standard calendar the\ncaveats described in hv.core.util.cftime_to_timestamp apply.\n\nArgs:\ndate: Date- or datetime-like object\n\nReturns:\nMilliseconds since 1970-01-01 00:00:00", "source": "juraj-google-style"}
{"code": "def index_worker_output(self, worker_name, md5, index_name, subfield):\n        \n\n        \n        if subfield:\n            data = self.work_request(worker_name, md5)[worker_name][subfield]\n        else:\n            data = self.work_request(worker_name, md5)[worker_name]\n\n        \n        self.indexer.index_data(data, index_name=index_name, doc_type='unknown')", "docstring": "Index worker output with the Indexer.\nArgs:\nworker_name: 'strings', 'pe_features', whatever\nmd5: the md5 of the sample\nindex_name: the name of the index\nsubfield: index just this subfield (None for all)\nReturns:\nNothing", "source": "juraj-google-style"}
{"code": "def download_from_s3(context):\n    target_file = context.solid_config['target_file']\n    return context.resources.download_manager.download_file_contents(context, target_file)", "docstring": "Download an object from s3.\n\nArgs:\ninfo (ExpectationExecutionInfo): Must expose a boto3 S3 client as its `s3` resource.\n\nReturns:\nstr:\nThe path to the downloaded object.", "source": "codesearchnet"}
{"code": "def __init__(self, offset, size, extent_type=EXTENT_TYPE_DATA):\n    \n    super(VolumeExtent, self).__init__()\n    self.offset = offset\n    self.size = size\n    self.extent_type = extent_type", "docstring": "Initializes a volume extent.\n\nArgs:\noffset (int): start offset of the extent, in bytes.\nsize (int): size of the extent, in bytes.\nextent_type (Optional[str]): type of extent.", "source": "juraj-google-style"}
{"code": "def sample(self, hashes):\n        \n        api_name = 'opendns-sample'\n        fmt_url_path = u'sample/{0}'\n        return self._multi_get(api_name, fmt_url_path, hashes)", "docstring": "Get the information about a sample based on its hash.\n\nArgs:\nhashes: an enumerable of strings as hashes\nReturns:\nAn enumerable of arrays which contains the information\nabout the original samples", "source": "juraj-google-style"}
{"code": "def scrape_info(self, request, response, link_type=None):\n    info = {}\n    for scraper in self._document_scrapers:\n        scrape_result = scraper.scrape(request, response, link_type)\n        info[scraper] = scrape_result\n    return info", "docstring": "Iterate the scrapers and return a dict of results.\n\nReturns:\ndict: A dict where the keys are the scrapers instances and the\nvalues are the results. That is, a mapping from\n:class:`BaseDocumentScraper` to :class:`ScrapeResult`.", "source": "codesearchnet"}
{"code": "def _Insert(cursor, table, values):\n    precondition.AssertIterableType(values, dict)\n    if (not values):\n        return\n    column_names = list(sorted(values[0]))\n    for value_dict in values:\n        if (set(column_names) != set(value_dict)):\n            raise ValueError('Given value dictionaries must have identical keys. Expecting columns {!r}, but got value {!r}'.format(column_names, value_dict))\n    query = ('INSERT IGNORE INTO %s {cols} VALUES {vals}' % table)\n    query = query.format(cols=mysql_utils.Columns(column_names), vals=mysql_utils.Placeholders(num=len(column_names), values=len(values)))\n    values_list = []\n    for values_dict in values:\n        values_list.extend((values_dict[column] for column in column_names))\n    cursor.execute(query, values_list)", "docstring": "Inserts one or multiple rows into the given table.\n\nArgs:\ncursor: The MySQL cursor to perform the insertion.\ntable: The table name, where rows should be inserted.\nvalues: A list of dicts, associating column names to values.", "source": "codesearchnet"}
{"code": "def _zip_files(files, root):\n    zip_data = StringIO()\n    with ZipFile(zip_data, 'w', ZIP_DEFLATED) as zip_file:\n        for fname in files:\n            zip_file.write(os.path.join(root, fname), fname)\n        for zip_entry in zip_file.filelist:\n            perms = ((zip_entry.external_attr & ZIP_PERMS_MASK) >> 16)\n            if ((perms & stat.S_IXUSR) != 0):\n                new_perms = 493\n            else:\n                new_perms = 420\n            if (new_perms != perms):\n                logger.debug('lambda: fixing perms: %s: %o => %o', zip_entry.filename, perms, new_perms)\n                new_attr = ((zip_entry.external_attr & (~ ZIP_PERMS_MASK)) | (new_perms << 16))\n                zip_entry.external_attr = new_attr\n    contents = zip_data.getvalue()\n    zip_data.close()\n    content_hash = _calculate_hash(files, root)\n    return (contents, content_hash)", "docstring": "Generates a ZIP file in-memory from a list of files.\n\nFiles will be stored in the archive with relative names, and have their\nUNIX permissions forced to 755 or 644 (depending on whether they are\nuser-executable in the source filesystem).\n\nArgs:\nfiles (list[str]): file names to add to the archive, relative to\n``root``.\nroot (str): base directory to retrieve files from.\n\nReturns:\nstr: content of the ZIP file as a byte string.\nstr: A calculated hash of all the files.", "source": "codesearchnet"}
{"code": "def UpdateOsLogin(self, oslogin_desired, two_factor_desired=False):\n    oslogin_configured = self._GetStatus(two_factor=False)\n    if (oslogin_configured is None):\n        return None\n    two_factor_configured = self._GetStatus(two_factor=True)\n    two_factor_desired = (two_factor_desired and oslogin_desired)\n    if oslogin_desired:\n        params = ['activate']\n        if two_factor_desired:\n            params += ['--twofactor']\n        if (not oslogin_configured):\n            self.logger.info('Activating OS Login.')\n            return (self._RunOsLoginControl(params) or self._RunOsLoginNssCache())\n        if (two_factor_desired and (not two_factor_configured)):\n            self.logger.info('Activating OS Login two factor authentication.')\n            return (self._RunOsLoginControl(params) or self._RunOsLoginNssCache())\n        if (two_factor_configured and (not two_factor_desired)):\n            self.logger.info('Reactivating OS Login with two factor disabled.')\n            return (self._RunOsLoginControl(['deactivate']) or self._RunOsLoginControl(params))\n        current_time = time.time()\n        if ((current_time - self.update_time) > NSS_CACHE_DURATION_SEC):\n            self.update_time = current_time\n            return self._RunOsLoginNssCache()\n    elif oslogin_configured:\n        self.logger.info('Deactivating OS Login.')\n        return (self._RunOsLoginControl(['deactivate']) or self._RemoveOsLoginNssCache())\n    return 0", "docstring": "Update whether OS Login is enabled and update NSS cache if necessary.\n\nArgs:\noslogin_desired: bool, enable OS Login if True, disable if False.\ntwo_factor_desired: bool, enable two factor if True, disable if False.\n\nReturns:\nint, the return code from updating OS Login, or None if not present.", "source": "codesearchnet"}
{"code": "def most_recent(path, startswith=None, endswith=None):\n    candidate_files = []\n    for filename in all_files_in_directory(path):\n        if (startswith and (not os.path.basename(filename).startswith(startswith))):\n            continue\n        if (endswith and (not filename.endswith(endswith))):\n            continue\n        candidate_files.append({'name': filename, 'modtime': os.path.getmtime(filename)})\n    most_recent = sorted(candidate_files, key=(lambda k: k['modtime']), reverse=True)\n    return (most_recent[0]['name'] if most_recent else None)", "docstring": "Recursively inspect all files under a directory and return the most recent\n\nArgs:\npath (str): the path of the directory to traverse\nstartswith (str): the file name start with (optional)\nendswith (str): the file name ends with (optional)\nReturns:\nthe most recent file within the subdirectory", "source": "codesearchnet"}
{"code": "def discover():\n    candidate_path = os.path.abspath(os.path.join(os.curdir, os.pardir, 'data'))\n    if os.path.exists(candidate_path):\n        return Project(os.path.abspath(os.path.join(candidate_path, os.pardir)))\n    candidate_path = os.path.abspath(os.path.join(os.curdir, 'data'))\n    if os.path.exists(candidate_path):\n        return Project(os.path.abspath(os.curdir))\n    candidate_path = os.path.abspath(os.path.join(os.curdir, os.pardir, 'data'))\n    if os.path.exists(candidate_path):\n        return Project(os.path.abspath(os.path.join(candidate_path, os.pardir, os.pardir)))\n    raise ValueError('Cannot discover the structure of the project. Make sure that the data directory exists')", "docstring": "Automatically discover the paths to various data folders in this project\nand compose a Project instance.\n\nReturns:\nA constructed Project object.\n\nRaises:\nValueError: if the paths could not be figured out automatically.\nIn this case, you have to create a Project manually using the initializer.", "source": "codesearchnet"}
{"code": "def DeregisterAnalyzer(cls, analyzer_class):\n    \n    analyzer_name = analyzer_class.NAME.lower()\n    if analyzer_name not in cls._analyzer_classes:\n      raise KeyError('analyzer class not set for name: {0:s}'.format(\n          analyzer_class.NAME))\n\n    del cls._analyzer_classes[analyzer_name]", "docstring": "Deregisters a analyzer class.\n\nThe analyzer classes are identified based on their lower case name.\n\nArgs:\nanalyzer_class (type): class object of the analyzer.\n\nRaises:\nKeyError: if analyzer class is not set for the corresponding name.", "source": "juraj-google-style"}
{"code": "def read(self, size=None):\n        \n        data = EMPTY\n\n        if size == 0:\n            return data\n\n        while True:\n            if size and len(data) >= size:\n                return data\n\n            if not self.buffer:\n                self._fetch()\n                if not self.buffer:\n                    \n                    return data\n\n            if size:\n                remaining = size - len(data)\n                data += self.buffer[:remaining]\n                self.buffer = self.buffer[remaining:]\n            else:\n                data += self.buffer\n                self.buffer = EMPTY", "docstring": "Read a chunk from rfile buffer and return it.\n\nArgs:\nsize (int): amount of data to read\n\nReturns:\nbytes: Chunk from rfile, limited by size if specified.", "source": "juraj-google-style"}
{"code": "def restore_variables(self, sess, saver, import_scope=None):\n    with sess.graph.as_default():\n        if saver is None and (not variables._all_saveable_objects(scope=import_scope)):\n            tf_logging.info('The specified SavedModel has no variables; no checkpoints were restored.')\n        elif isinstance(saver, tf_saver.Saver):\n            saver.restore(sess, self._variables_path)\n        else:\n            raise ValueError('No tf.train.Saver object was passed to the function `SavedModelLoader.restore_variables`. Since there are variables in the graph, a saver is required.')", "docstring": "Restore SavedModel variable values into the session.\n\nArgs:\nsess: tf.compat.v1.Session to restore variable values.\nsaver: a tf.compat.v1.train.Saver object. Can be None if there are no\nvariables in graph. This may be the saver returned by the load_graph()\nfunction, or a default `tf.compat.v1.train.Saver()`.\nimport_scope: Optional `string` -- if specified, prepend this string\nfollowed by '/' to all loaded tensor names. This scope is applied to\ntensor instances loaded into the passed session, but it is *not* written\nthrough to the static `MetaGraphDef` protocol buffer that is returned.\n\nRaises:\nValueError: if no saver was passed to the saver argument, and there are\nvariables in the graph.", "source": "github-repos"}
{"code": "def scan_chain_len(self, scan_chain):\n    res = self._dll.JLINKARM_MeasureSCLen(scan_chain)\n    if (res < 0):\n        raise errors.JLinkException(res)\n    return res", "docstring": "Retrieves and returns the number of bits in the scan chain.\n\nArgs:\nself (JLink): the ``JLink`` instance\nscan_chain (int): scan chain to be measured\n\nReturns:\nNumber of bits in the specified scan chain.\n\nRaises:\nJLinkException: on error.", "source": "codesearchnet"}
{"code": "def get_properties_of_kind(kind, start=None, end=None):\n    q = Property.query(ancestor=Property.key_for_kind(kind))\n    if ((start is not None) and (start != '')):\n        q = q.filter((Property.key >= Property.key_for_property(kind, start)))\n    if (end is not None):\n        if (end == ''):\n            return []\n        q = q.filter((Property.key < Property.key_for_property(kind, end)))\n    return [Property.key_to_property(k) for k in q.iter(keys_only=True)]", "docstring": "Return all properties of kind in the specified range.\n\nNOTE: This function does not return unindexed properties.\n\nArgs:\nkind: name of kind whose properties you want.\nstart: only return properties >= start if start is not None.\nend: only return properties < end if end is not None.\n\nReturns:\nA list of property names of kind between the (optional) start and end\nvalues.", "source": "codesearchnet"}
{"code": "def resize_num_qa_labels(self, num_labels):\n    cur_qa_logit_layer = self.get_qa_logit_layer()\n    if num_labels is None or cur_qa_logit_layer is None:\n        return\n    new_qa_logit_layer = self._resize_qa_labels(num_labels)\n    self.config.num_qa_labels = num_labels\n    self.num_qa_labels = num_labels\n    return new_qa_logit_layer", "docstring": "Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size\nwill add newly initialized weights. Reducing the size will remove weights from the end\n\nArgs:\nnum_labels (`int`, *optional*):\nNew number of labels in the linear layer weight matrix. Increasing the size will add newly initialized\nweights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just\nreturns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything.\n\nReturn:\n`torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer", "source": "github-repos"}
{"code": "def stats_per_key(self):\n    self.raise_error_if_not_open()\n    all_stats = {}\n    for (key, data) in self._file.items():\n        data = data[()]\n        all_stats[key] = stats.DataStats(float(np.mean(data)), float(np.var(data)), np.min(data), np.max(data), data.size)\n    return all_stats", "docstring": "Return statistics calculated for each key in the container.\n\nNote:\nThe feature container has to be opened in advance.\n\nReturns:\ndict: A dictionary containing a DataStats object for each key.", "source": "codesearchnet"}
{"code": "def vflip(img):\n    \n    if not _is_pil_image(img):\n        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n    return img.transpose(Image.FLIP_TOP_BOTTOM)", "docstring": "Vertically flip the given PIL Image.\n\nArgs:\nimg (PIL Image): Image to be flipped.\n\nReturns:\nPIL Image:  Vertically flipped image.", "source": "juraj-google-style"}
{"code": "def tf_optimization(self, states, internals, actions, terminal, reward, next_states=None, next_internals=None):\n    arguments = self.optimizer_arguments(states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, next_states=next_states, next_internals=next_internals)\n    return self.optimizer.minimize(**arguments)", "docstring": "Creates the TensorFlow operations for performing an optimization update step based\non the given input states and actions batch.\n\nArgs:\nstates: Dict of state tensors.\ninternals: List of prior internal state tensors.\nactions: Dict of action tensors.\nterminal: Terminal boolean tensor.\nreward: Reward tensor.\nnext_states: Dict of successor state tensors.\nnext_internals: List of posterior internal state tensors.\n\nReturns:\nThe optimization operation.", "source": "codesearchnet"}
{"code": "def json_set_instructions(recipe, variables):\n    if 'script' in recipe:\n        if 'instructions' in recipe['script']:\n            try:\n                recipe['script']['instructions'] = [text_set_fields(instruction, variables) for instruction in recipe['script']['instructions']]\n            except KeyError:\n                pass", "docstring": "Replaces all fields in instructions with values provided.\n\nChecks if recipe['script']['instructions'] exist.  The replaces all %(???)s\nvariables\nwith values provided.  Note: %(???)s must match { \"field\":{ \"name\":\"???\" }}\nin JSON.\n\nArgs:\nrecipe: (dict) A dictionary representation of the JSON script.\nvariables: (dict) A lookup table of all values to be replaced, key is name\nof field.\n\nReturns:\nNothig. Instructions are modified in place.", "source": "github-repos"}
{"code": "def delete(self, instance):\n        \n        \n        \n        \n        \n        \n        \n        \n        \n        \n        self.backend.storage.remove(instance)\n        \n        return DeprovisionServiceSpec(False, \"done\")", "docstring": "Delete the instance\n\nArgs:\ninstance (AtlasServiceInstance.Instance): an existing instance\n\nReturns:\nDeprovisionServiceSpec: Status", "source": "juraj-google-style"}
{"code": "def __init__(self, name, aliases=None, description=None, urls=None):\n    \n    super(EnumerationDefinition, self).__init__(\n        name, aliases=aliases, description=description, urls=urls)\n    self.values = []\n    self.values_per_alias = {}\n    self.values_per_name = {}\n    self.values_per_number = {}", "docstring": "Initializes an enumeration data type definition.\n\nArgs:\nname (str): name.\naliases (Optional[list[str]]): aliases.\ndescription (Optional[str]): description.\nurls (Optional[list[str]]): URLs.", "source": "juraj-google-style"}
{"code": "def sys_check_for_event(mask: int, k: Optional[Key], m: Optional[Mouse]) -> int:\n    return int(lib.TCOD_sys_check_for_event(mask, (k.key_p if k else ffi.NULL), (m.mouse_p if m else ffi.NULL)))", "docstring": "Check for and return an event.\n\nArgs:\nmask (int): :any:`Event types` to wait for.\nk (Optional[Key]): A tcod.Key instance which might be updated with\nan event.  Can be None.\nm (Optional[Mouse]): A tcod.Mouse instance which might be updated\nwith an event.  Can be None.\n\n.. deprecated:: 9.3\nUse the :any:`tcod.event.get` function to check for events.", "source": "codesearchnet"}
{"code": "def search_device_by_id(self, deviceID) -> Device:\n        \n        for d in self.devices:\n            if d.id == deviceID:\n                return d\n        return None", "docstring": "searches a device by given id\n\nArgs:\ndeviceID(str): the device to search for\n\nReturns\nthe Device object or None if it couldn't find a device", "source": "juraj-google-style"}
{"code": "def _should_catch_error(self, error, errors=()):\n        \n\n        caught_errors = (\n            errors or\n            self.session.driver.invalid_element_errors + (ElementNotFound,))\n\n        return isinstance(error, caught_errors)", "docstring": "Returns whether to catch the given error.\n\nArgs:\nerror (Exception): The error to consider.\nerrors (Tuple[Type[Exception], ...], optional): The exception types that should be\ncaught. Defaults to :class:`ElementNotFound` plus any driver-specific invalid\nelement errors.\n\nReturns:\nbool: Whether to catch the given error.", "source": "juraj-google-style"}
{"code": "def enable_cpu_offload(self, accelerator_id: Optional[int]=0, **kwargs):\n    if is_accelerate_available():\n        from accelerate import cpu_offload_with_hook\n    else:\n        raise ImportError('`enable_model_cpu_offload` requires `accelerate`.')\n    gpu_id = kwargs.get('gpu_id', 0)\n    if gpu_id != 0:\n        warnings.warn('The argument `gpu_id` is deprecated and will be removed in version 4.54.0 of Transformers. Please use `accelerator_id` instead.', FutureWarning)\n        accelerator_id = gpu_id\n    device_type = 'cuda'\n    if is_torch_accelerator_available():\n        device_type = torch.accelerator.current_accelerator().type\n    device = torch.device(f'{device_type}:{accelerator_id}')\n    torch_accelerator_module = getattr(torch, device_type)\n    if self.device.type != 'cpu':\n        self.to('cpu')\n        torch_accelerator_module.empty_cache()\n    self.semantic.input_embeds_layer, _ = cpu_offload_with_hook(self.semantic.input_embeds_layer, device)\n    hook = None\n    for cpu_offloaded_model in [self.semantic, self.coarse_acoustics, self.fine_acoustics]:\n        _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)\n    self.fine_acoustics_hook = hook\n    _, hook = cpu_offload_with_hook(self.codec_model, device, prev_module_hook=hook)\n    self.codec_model_hook = hook", "docstring": "Offloads all sub-models to CPU using accelerate, reducing memory usage with a low impact on performance. This\nmethod moves one whole sub-model at a time to the accelerator when it is used, and the sub-model remains in accelerator until the next sub-model runs.\n\nArgs:\naccelerator_id (`int`, *optional*, defaults to 0):\naccelerator id on which the sub-models will be loaded and offloaded. This argument is deprecated.\nkwargs (`dict`, *optional*):\nadditional keyword arguments:\n`gpu_id`: accelerator id on which the sub-models will be loaded and offloaded.", "source": "github-repos"}
{"code": "def time2timestr(time, fmt='hhmmss'):\n    if (fmt.count(':') == 2):\n        if (not (fmt.index('h') < fmt.index('m') < fmt.index('s'))):\n            raise ValueError('Invalid format string. {}'.format(VALID_TIME_FORMATS_TEXT))\n        (h, m, s) = fmt.split(':')\n    elif (fmt.count(':') == 1):\n        if (not (fmt.index('h') < fmt.index('m'))):\n            raise ValueError('Invalid format string. {}'.format(VALID_TIME_FORMATS_TEXT))\n        (h, m) = fmt.split(':')\n        s = None\n    elif (any(((c not in 'hms') for c in fmt)) or (len(fmt) != 6)):\n        raise ValueError('Invalid character in format string. {}'.format(VALID_TIME_FORMATS_TEXT))\n    else:\n        if (not (fmt.index('h') < fmt.index('m') < fmt.index('s'))):\n            raise ValueError('Invalid format string. {}'.format(VALID_TIME_FORMATS_TEXT))\n        (h, m, s) = (fmt[:(- 4)], fmt[(- 4):(- 2)], fmt[(- 2):])\n    for (string, char) in ((h, 'h'), (m, 'm'), (s, 's')):\n        if ((string is not None) and any(((c != char) for c in string))):\n            raise ValueError('Invalid date format: {} is not {}'.format(char, string))\n    if (len(h) == 2):\n        fmt = fmt.replace('hh', '%H', 1)\n    elif (len(h) == 1):\n        fmt = fmt.replace('h', 'X%H', 1)\n    else:\n        raise ValueError('Invalid format string, hour must have 1 or 2 digits')\n    if (len(m) == 2):\n        fmt = fmt.replace('mm', '%M', 1)\n    else:\n        raise ValueError('Invalid format string, minutes must have 2 digits')\n    if ((s is not None) and (len(s) == 2)):\n        fmt = fmt.replace('ss', '%S', 1)\n    elif (s is not None):\n        raise ValueError('Invalid format string, seconds must have 2 digits')\n    return time.strftime(fmt).replace('X0', 'X').replace('X', '')", "docstring": "Turns a datetime.time object into a string. The string must have one of the\nformats from VALID_TIME_FORMATS_TEXT to make it compatible with\ntimestr2time.\n\nArgs:\ntime (datetime.time) the time to be translated\nfmt (str) a format string.\nReturns:\n(str) that represents a time.\nRaises:\nValueError if the format is not valid.", "source": "codesearchnet"}
{"code": "def Hash(self):\n    if (not self.__hash):\n        ba = bytearray(binascii.unhexlify(self.GetHashData()))\n        hash = Crypto.Hash256(ba)\n        self.__hash = UInt256(data=hash)\n    return self.__hash", "docstring": "Get the hash of the transaction.\n\nReturns:\nUInt256:", "source": "codesearchnet"}
{"code": "def _refresh_http(api_request, operation_name):\n    \n    path = \"operations/{}\".format(operation_name)\n    api_response = api_request(method=\"GET\", path=path)\n    return json_format.ParseDict(api_response, operations_pb2.Operation())", "docstring": "Refresh an operation using a JSON/HTTP client.\n\nArgs:\napi_request (Callable): A callable used to make an API request. This\nshould generally be\n:meth:`google.cloud._http.Connection.api_request`.\noperation_name (str): The name of the operation.\n\nReturns:\ngoogle.longrunning.operations_pb2.Operation: The operation.", "source": "juraj-google-style"}
{"code": "def __strip_tags(self, node: yaml.Node) -> None:\n    if isinstance(node, yaml.SequenceNode):\n        for subnode in node.value:\n            self.__strip_tags(subnode)\n    elif isinstance(node, yaml.MappingNode):\n        node.tag = 'tag:yaml.org,2002:map'\n        for (key_node, value_node) in node.value:\n            self.__strip_tags(key_node)\n            self.__strip_tags(value_node)", "docstring": "Strips tags from mappings in the tree headed by node.\n\nThis keeps yaml from constructing any objects in this tree.\n\nArgs:\nnode: Head of the tree to strip", "source": "codesearchnet"}
{"code": "def plot_helmholtz_free_energy(self, tmin, tmax, ntemp, ylim=None, **kwargs):\n    temperatures = np.linspace(tmin, tmax, ntemp)\n    if self.structure:\n        ylabel = '$\\\\Delta F$ (kJ/mol)'\n    else:\n        ylabel = '$\\\\Delta F$ (kJ/mol-c)'\n    fig = self._plot_thermo(self.dos.helmholtz_free_energy, temperatures, ylabel=ylabel, ylim=ylim, factor=0.001, **kwargs)\n    return fig", "docstring": "Plots the vibrational contribution to the Helmoltz free energy in a temperature range.\n\nArgs:\ntmin: minimum temperature\ntmax: maximum temperature\nntemp: number of steps\nylim: tuple specifying the y-axis limits.\nkwargs: kwargs passed to the matplotlib function 'plot'.\nReturns:\nmatplotlib figure", "source": "codesearchnet"}
{"code": "def experience(self, agent_indices, observ, action, reward, unused_done, unused_nextob):\n    with tf.name_scope('experience/'):\n        return tf.cond(self._is_training, (lambda : self._define_experience(agent_indices, observ, action, reward)), str)", "docstring": "Process the transition tuple of the current step.\n\nWhen training, add the current transition tuple to the memory and update\nthe streaming statistics for observations and rewards. A summary string is\nreturned if requested at this step.\n\nArgs:\nagent_indices: Tensor containing current batch indices.\nobserv: Batch tensor of observations.\naction: Batch tensor of actions.\nreward: Batch tensor of rewards.\nunused_done: Batch tensor of done flags.\nunused_nextob: Batch tensor of successor observations.\n\nReturns:\nSummary tensor.", "source": "codesearchnet"}
{"code": "def _CountClientStatisticByLabel(self, statistic, day_buckets, cursor):\n    \n    day_buckets = sorted(day_buckets)\n    sum_clauses = []\n    ping_cast_clauses = []\n    timestamp_buckets = []\n    now = rdfvalue.RDFDatetime.Now()\n\n    for day_bucket in day_buckets:\n      column_name = \"days_active_{}\".format(day_bucket)\n      sum_clauses.append(\n          \"CAST(SUM({0}) AS UNSIGNED) AS {0}\".format(column_name))\n      ping_cast_clauses.append(\n          \"CAST(c.last_ping > FROM_UNIXTIME(%s) AS UNSIGNED) AS {}\".format(\n              column_name))\n      timestamp_bucket = now - rdfvalue.Duration.FromDays(day_bucket)\n      timestamp_buckets.append(\n          mysql_utils.RDFDatetimeToTimestamp(timestamp_bucket))\n\n    query = .format(\n        statistic=statistic,\n        sum_clauses=\", \".join(sum_clauses),\n        ping_cast_clauses=\", \".join(ping_cast_clauses))\n\n    cursor.execute(query, timestamp_buckets)\n\n    counts = {}\n    for response_row in cursor.fetchall():\n      statistic_value, client_label = response_row[:2]\n      for i, num_actives in enumerate(response_row[2:]):\n        if num_actives <= 0:\n          continue\n        stats_key = (statistic_value, client_label, day_buckets[i])\n        counts[stats_key] = num_actives\n    return counts", "docstring": "Returns client-activity metrics for a given statistic.\n\nArgs:\nstatistic: The name of the statistic, which should also be a column in the\n'clients' table.\nday_buckets: A set of n-day-active buckets.\ncursor: MySQL cursor for executing queries.", "source": "juraj-google-style"}
{"code": "def overlap_and_add(signal, frame_step, name=None):\n    with ops.name_scope(name, 'overlap_and_add', [signal, frame_step]):\n        signal = ops.convert_to_tensor(signal, name='signal')\n        signal.shape.with_rank_at_least(2)\n        frame_step = ops.convert_to_tensor(frame_step, name='frame_step')\n        frame_step.shape.assert_has_rank(0)\n        if not frame_step.dtype.is_integer:\n            raise ValueError('frame_step must be an integer. Got %s' % frame_step.dtype)\n        frame_step_static = tensor_util.constant_value(frame_step)\n        frame_step_is_static = frame_step_static is not None\n        frame_step = frame_step_static if frame_step_is_static else frame_step\n        signal_shape = array_ops.shape(signal)\n        signal_shape_static = tensor_util.constant_value(signal_shape)\n        if signal_shape_static is not None:\n            signal_shape = signal_shape_static\n        outer_dimensions = signal_shape[:-2]\n        outer_rank = array_ops.size(outer_dimensions)\n        outer_rank_static = tensor_util.constant_value(outer_rank)\n        if outer_rank_static is not None:\n            outer_rank = outer_rank_static\n\n        def full_shape(inner_shape):\n            return array_ops.concat([outer_dimensions, inner_shape], 0)\n        frame_length = signal_shape[-1]\n        frames = signal_shape[-2]\n        output_length = frame_length + frame_step * (frames - 1)\n        if frame_step_is_static and signal.shape.dims is not None and (frame_step == signal.shape.dims[-1].value):\n            output_shape = full_shape([output_length])\n            return array_ops.reshape(signal, output_shape, name='fast_path')\n        segments = -(-frame_length \n        paddings = [[0, segments], [0, segments * frame_step - frame_length]]\n        outer_paddings = array_ops.zeros([outer_rank, 2], dtypes.int32)\n        paddings = array_ops.concat([outer_paddings, paddings], 0)\n        signal = array_ops.pad(signal, paddings)\n        shape = full_shape([frames + segments, segments, frame_step])\n        signal = array_ops.reshape(signal, shape)\n        perm = array_ops.concat([math_ops.range(outer_rank), outer_rank + [1, 0, 2]], 0)\n        perm_static = tensor_util.constant_value(perm)\n        perm = perm_static if perm_static is not None else perm\n        signal = array_ops.transpose(signal, perm)\n        shape = full_shape([(frames + segments) * segments, frame_step])\n        signal = array_ops.reshape(signal, shape)\n        signal = signal[..., :(frames + segments - 1) * segments, :]\n        shape = full_shape([segments, frames + segments - 1, frame_step])\n        signal = array_ops.reshape(signal, shape)\n        signal = math_ops.reduce_sum(signal, -3)\n        shape = full_shape([(frames + segments - 1) * frame_step])\n        signal = array_ops.reshape(signal, shape)\n        signal = signal[..., :output_length]\n        return signal", "docstring": "Reconstructs a signal from a framed representation.\n\nAdds potentially overlapping frames of a signal with shape\n`[..., frames, frame_length]`, offsetting subsequent frames by `frame_step`.\nThe resulting tensor has shape `[..., output_size]` where\n\noutput_size = (frames - 1) * frame_step + frame_length\n\nArgs:\nsignal: A [..., frames, frame_length] `Tensor`. All dimensions may be\nunknown, and rank must be at least 2.\nframe_step: An integer or scalar `Tensor` denoting overlap offsets. Must be\nless than or equal to `frame_length`.\nname: An optional name for the operation.\n\nReturns:\nA `Tensor` with shape `[..., output_size]` containing the overlap-added\nframes of `signal`'s inner-most two dimensions.\n\nRaises:\nValueError: If `signal`'s rank is less than 2, or `frame_step` is not a\nscalar integer.", "source": "github-repos"}
{"code": "def get_pyof_version(module_fullname):\n        \n        ver_module_re = re.compile(r'(pyof\\.)(v0x\\d+)(\\..*)')\n        matched = ver_module_re.match(module_fullname)\n        if matched:\n            version = matched.group(2)\n            return version\n        return None", "docstring": "Get the module pyof version based on the module fullname.\n\nArgs:\nmodule_fullname (str): The fullname of the module\n(e.g.: pyof.v0x01.common.header)\n\nReturns:\nstr: openflow version.\nThe openflow version, on the format 'v0x0?' if any. Or None\nif there isn't a version on the fullname.", "source": "juraj-google-style"}
{"code": "def prepare_partitions(self) -> t.Iterator[Config]:\n    for option in itertools.product(*[self.config.selection[key] for key in self.config.partition_keys]):\n        yield self._create_partition_config(option)", "docstring": "Iterate over client parameters, partitioning over `partition_keys`.\n\nThis produces a Cartesian-Cross over the range of keys.\n\nFor example, if the keys were 'year' and 'month', it would produce\nan iterable like:\n( ('2020', '01'), ('2020', '02'), ('2020', '03'), ...)\n\nReturns:\nAn iterator of `Config`s.", "source": "github-repos"}
{"code": "def _create_complete_graph(node_ids):\n    \n    g = nx.Graph()\n    g.add_nodes_from(node_ids)\n    for (i, j) in combinations(node_ids, 2):\n        g.add_edge(i, j)\n    return g", "docstring": "Create a complete graph from the list of node ids.\n\nArgs:\nnode_ids: a list of node ids\n\nReturns:\nAn undirected graph (as a networkx.Graph)", "source": "juraj-google-style"}
{"code": "def sort_by_timestamp(self, in_place=True):\n    timestamps, values = zip(*sorted(zip(self.timestamps, self.values)))\n    if not in_place:\n        return MetricContainer(values=values, timestamps=timestamps)\n    self.timestamps, self.values = zip(*sorted(zip(self.timestamps, self.values)))", "docstring": "Sorts the metric values and timestamps in ascending order wrt timestamps.\nArgs:\nin_place: If True, sort the metric values and timestamps in place.", "source": "github-repos"}
{"code": "def _ParseIndexTable(self, parser_mediator, file_system, file_entry, index_table):\n    path_segments = file_system.SplitPath(file_entry.path_spec.location)\n    data_block_files = {}\n    for cache_address in index_table:\n        if (cache_address.filename not in data_block_files):\n            path_segments.pop()\n            path_segments.append(cache_address.filename)\n            kwargs = {}\n            if file_entry.path_spec.parent:\n                kwargs['parent'] = file_entry.path_spec.parent\n            kwargs['location'] = file_system.JoinPath(path_segments)\n            data_block_file_path_spec = path_spec_factory.Factory.NewPathSpec(file_entry.path_spec.TYPE_INDICATOR, **kwargs)\n            try:\n                data_block_file_entry = path_spec_resolver.Resolver.OpenFileEntry(data_block_file_path_spec)\n            except RuntimeError as exception:\n                message = 'Unable to open data block file: {0:s} with error: {1!s}'.format(kwargs['location'], exception)\n                parser_mediator.ProduceExtractionWarning(message)\n                data_block_file_entry = None\n            if (not data_block_file_entry):\n                message = 'Missing data block file: {0:s}'.format(cache_address.filename)\n                parser_mediator.ProduceExtractionWarning(message)\n                data_block_file_object = None\n            else:\n                data_block_file_object = data_block_file_entry.GetFileObject()\n                try:\n                    self._data_block_file_parser.ParseFileObject(parser_mediator, data_block_file_object)\n                except (IOError, errors.ParseError) as exception:\n                    message = 'Unable to parse data block file: {0:s} with error: {1!s}'.format(cache_address.filename, exception)\n                    parser_mediator.ProduceExtractionWarning(message)\n                    data_block_file_object.close()\n                    data_block_file_object = None\n            data_block_files[cache_address.filename] = data_block_file_object\n    try:\n        self._ParseCacheEntries(parser_mediator, index_table, data_block_files)\n    finally:\n        for data_block_file_object in iter(data_block_files.values()):\n            if data_block_file_object:\n                data_block_file_object.close()", "docstring": "Parses a Chrome Cache index table.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_system (dfvfs.FileSystem): file system.\nfile_entry (dfvfs.FileEntry): file entry.\nindex_table (list[CacheAddress]): the cache addresses which are stored in\nthe index file.", "source": "codesearchnet"}
{"code": "def _create_RSA_private_key(self, bytes):\n    try:\n        private_key = serialization.load_pem_private_key(bytes, password=None, backend=default_backend())\n        return private_key\n    except Exception:\n        private_key = serialization.load_der_private_key(bytes, password=None, backend=default_backend())\n        return private_key", "docstring": "Instantiates an RSA key from bytes.\n\nArgs:\nbytes (byte string): Bytes of RSA private key.\nReturns:\nprivate_key\n(cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):\nRSA private key created from key bytes.", "source": "codesearchnet"}
{"code": "def parse_meta(filename, data):\n    if ('.' not in filename):\n        raise MetaParsingException((\"Can't recognize type of your metadata ('%s')!\" % filename))\n    suffix = filename.rsplit('.', 1)[1].lower()\n    if (suffix not in SUPPORTED_FILES):\n        raise MetaParsingException((\"Can't parse file of type '%s'!\" % suffix))\n    fp = validator.FieldParser()\n    for (key, val) in SUPPORTED_FILES[suffix](data).items():\n        fp.process(key, val)\n    return fp.get_epublication()", "docstring": "Parse `data` to EPublication.\n\nArgs:\nfilename (str): Used to choose right parser based at suffix.\ndata (str): Content of the metadata file.\n\nReturns:\nEPublication: object.", "source": "codesearchnet"}
{"code": "def ScriptHash(self):\n    if (self._scriptHash is None):\n        self._scriptHash = Crypto.ToScriptHash(self.Script, unhex=False)\n    return self._scriptHash", "docstring": "Get the script hash.\n\nReturns:\nUInt160:", "source": "codesearchnet"}
{"code": "def filepath(self):\n        \n        if hasattr(self, 'local_path'):\n            return self.local_path\n\n        if self.scheme in ['ftp', 'http', 'https', 'globus']:\n            return self.filename\n        elif self.scheme in ['file']:\n            return self.path\n        else:\n            raise Exception('Cannot return filepath for unknown scheme {}'.format(self.scheme))", "docstring": "Return the resolved filepath on the side where it is called from.\n\nThe appropriate filepath will be returned when called from within\nan app running remotely as well as regular python on the client side.\n\nArgs:\n- self\nReturns:\n- filepath (string)", "source": "juraj-google-style"}
{"code": "def select_action_key(self, next_action_arr, next_q_arr):\n        \n        epsilon_greedy_flag = bool(np.random.binomial(n=1, p=self.epsilon_greedy_rate))\n        if epsilon_greedy_flag is False:\n            key = np.random.randint(low=0, high=next_action_arr.shape[0])\n        else:\n            key = next_q_arr.argmax()\n\n        return key", "docstring": "Select action by Q(state, action).\n\nArgs:\nnext_action_arr:        `np.ndarray` of actions.\nnext_q_arr:             `np.ndarray` of Q-Values.\n\nRetruns:\n`np.ndarray` of keys.", "source": "juraj-google-style"}
{"code": "class EetqConfig(QuantizationConfigMixin):\n\n    def __init__(self, weights: str='int8', modules_to_not_convert: Optional[List]=None, **kwargs):\n        self.quant_method = QuantizationMethod.EETQ\n        self.weights = weights\n        self.modules_to_not_convert = modules_to_not_convert\n        self.post_init()\n\n    def post_init(self):\n        \n        accepted_weights = ['int8']\n        if self.weights not in accepted_weights:\n            raise ValueError(f'Only support weights in {accepted_weights} but found {self.weights}')", "docstring": "This is a wrapper class about all possible attributes and features that you can play with a model that has been\nloaded using `eetq`.\n\nArgs:\nweights (`str`, *optional*, defaults to `\"int8\"`):\nThe target dtype for the weights. Supported value is only \"int8\"\nmodules_to_not_convert (`list`, *optional*, default to `None`):\nThe list of modules to not quantize, useful for quantizing models that explicitly require to have\nsome modules left in their original precision.", "source": "github-repos"}
{"code": "def set_cookie(self, key, value, domain=None, path='/', secure=False, httponly=True):\n    self._cookies[key] = value\n    if domain:\n        self._cookies[key]['domain'] = domain\n    if path:\n        self._cookies[key]['path'] = path\n    if secure:\n        self._cookies[key]['secure'] = secure\n    if httponly:\n        self._cookies[key]['httponly'] = httponly", "docstring": "Set a cookie.\n\nArgs:\nkey (:obj:`str`): Cookie name\nvalue (:obj:`str`): Cookie value\ndomain (:obj:`str`): Cookie domain\npath (:obj:`str`): Cookie value\nsecure (:obj:`bool`): True if secure, False otherwise\nhttponly (:obj:`bool`): True if it's a HTTP only cookie, False\notherwise", "source": "codesearchnet"}
{"code": "def output_selector_schema(config_cls):\n    \n    config_type = resolve_config_cls_arg(config_cls)\n    check.param_invariant(config_type.is_selector, 'config_cls')\n\n    def _wrap(func):\n        def _selector(context, config_value, runtime_value):\n            selector_key, selector_value = single_item(config_value)\n            return func(context, selector_key, selector_value, runtime_value)\n\n        return _create_output_schema(config_type, _selector)\n\n    return _wrap", "docstring": "A decorator for a annotating a function that can take the selected properties\nof a ``config_value`` and an instance of a custom type and materialize it.\n\nArgs:\nconfig_cls (Selector):", "source": "juraj-google-style"}
{"code": "def _tf_restore_batch_dims(x, num_nonbatch_dims, prototype):\n  \n  assert x.shape.ndims == 1 + num_nonbatch_dims\n  new_shape = (\n      prototype.shape.as_list()[:-num_nonbatch_dims] + x.shape.as_list()[1:])\n  assert None not in new_shape\n  if new_shape != x.shape.as_list():\n    x = tf.reshape(x, new_shape)\n  return x", "docstring": "Reverse op of _tf_flatten_batch_dims.\n\nUn-flatten the first dimension of x to match all but the last\nnum_nonbatch_dims dimensions of prototype.\n\nArgs:\nx: a tf.Tensor with 1 + num_nonbatch_dims dimensions\nnum_nonbatch_dims: an integer\nprototype: a tf.Tensor\n\nReturns:\na tf.Tensor", "source": "juraj-google-style"}
{"code": "def titles(self, unique=False):\n        \n        if unique:\n            return tools.uniqued(s.title for s in self._items)\n        return [s.title for s in self._items]", "docstring": "Return a list of contained worksheet titles.\n\nArgs:\nunique (bool): drop duplicates\nReturns:\nlist: list of titles/name strings", "source": "juraj-google-style"}
{"code": "def _parse_version(version):\n    parsed_version = parse_version(version)\n    return (tuple((int(dot_version) for dot_version in parsed_version.base_version.split('.'))) + (parsed_version.is_prerelease,))", "docstring": "Parse a version string.\n\nArgs:\nversion (str): A string representing a version e.g. '1.9rc2'\n\nReturns:\ntuple: major, minor, patch parts cast as integer and whether or not\nit was a pre-release version.", "source": "codesearchnet"}
{"code": "def mkp(*args, **kwargs):\n    \n    mk = kwargs.pop('mk', False)\n    path = os.sep.join(list(args))\n    if mk:\n        while sep2 in path:\n            path = path.replace(sep2, os.sep)\n        try:\n            os.makedirs(path)\n        except FileExistsError:\n            pass\n    return path", "docstring": "Generate a directory path, and create it if requested.\n\n.. code-block:: Python\n\nfilepath = mkp('base', 'folder', 'file')\ndirpath = mkp('root', 'path', 'folder', mk=True)\n\nArgs:\n\\*args: File or directory path segments to be concatenated\nmk (bool): Make the directory (if it doesn't exist)\n\nReturns:\npath (str): File or directory path", "source": "juraj-google-style"}
{"code": "def add_business_days(self, date_tensor, num_days, roll_convention=constants.BusinessDayConvention.NONE):\n    control_deps = []\n    biz_days, is_bizday = self._to_biz_space(dt.convert_to_date_tensor(date_tensor).ordinal())\n    if roll_convention == constants.BusinessDayConvention.NONE:\n        control_deps.append(tf.debugging.assert_equal(is_bizday, True, message='Non business starting day with no roll convention.'))\n    with tf.compat.v1.control_dependencies(control_deps):\n        biz_days_rolled = self._apply_roll_biz_space(date_tensor, biz_days, is_bizday, roll_convention)\n        return dt.from_ordinals(self._from_biz_space(biz_days_rolled + num_days))", "docstring": "Adds given number of business days to given dates.\n\nNote that this is different from calling `add_period_and_roll` with\nPeriodType.DAY. For example, adding 5 business days to Monday gives the next\nMonday (unless there are holidays on this week or next Monday). Adding 5\ndays and rolling means landing on Saturday and then rolling either to next\nMonday or to Friday of the same week, depending on the roll convention.\n\nIf any of the dates in `date_tensor` are not business days, they will be\nrolled to business days before doing the addition. If `roll_convention` is\n`NONE`, and any dates are not business days, an exception is raised.\n\nArgs:\ndate_tensor: `DateTensor` of dates to advance from.\nnum_days: Tensor of int32 type broadcastable to `date_tensor`.\nroll_convention: BusinessDayConvention. Determines how to roll a date that\nfalls on a holiday.\n\nReturns:\nThe resulting `DateTensor`.", "source": "github-repos"}
{"code": "def pivot(self, md5, tag=''):\n        \n\n        \n        ss = self.workbench.generate_sample_set(md5)\n        if ss:\n            tag = md5 if not tag else tag\n            md5 = ss\n\n        \n        if self.workbench.is_sample_set(md5):\n\n            \n            ss = self.workbench.get_sample_set(md5)\n            if len(ss) == 1:\n                md5 = ss[0]\n            deco = '(%s:%d)' % (tag, len(ss))\n            self.ipshell.push({'prompt_deco': deco})\n        else:\n            deco = '(%s:1)' % tag\n            self.ipshell.push({'prompt_deco': deco})\n\n        \n        self.session.md5 = md5\n        self.session.short_md5 = md5[:6]\n        self.ipshell.push({'md5': self.session.md5})\n        self.ipshell.push({'short_md5': self.session.short_md5})", "docstring": "Pivot on an md5 (md5 can be a single sample or a sample_set)\nArgs:\nmd5: The md5 can be a single sample or a sample_set\ntags (optional): a tag for the sample (for the prompt)\nReturns:\nNothing but it's sets the active sample/sample_set", "source": "juraj-google-style"}
{"code": "def _request(self, resource, action, data=None, headers=None):\n        \n        url, httpmethod = res_to_url(resource, action)\n        return self.ajax(url, httpmethod, data, headers)", "docstring": "Send request\n\nArgs:\nresource: resource\naction: action\ndata: string or object which can be json.dumps\nheaders: http headers", "source": "juraj-google-style"}
{"code": "def is_valid(self, addr, protocol='http', timeout=5):\n        \n        start = time.time()\n        try:\n            r = requests.get(self.test_url[protocol],\n                             timeout=timeout,\n                             proxies={protocol: 'http:\n        except KeyboardInterrupt:\n            raise\n        except requests.exceptions.Timeout:\n            return {'valid': False, 'msg': 'timeout'}\n        except:\n            return {'valid': False, 'msg': 'exception'}\n        else:\n            if r.status_code == 200:\n                response_time = time.time() - start\n                return {'valid': True, 'response_time': response_time}\n            else:\n                return {\n                    'valid': False,\n                    'msg': 'status code: {}'.format(r.status_code)\n                }", "docstring": "Check if a proxy is valid\n\nArgs:\naddr: A string in the form of 'ip:port'\nprotocol: Either 'http' or 'https', different test urls will be used\naccording to protocol.\ntimeout: A integer indicating the timeout of connecting the test url.\n\nReturns:\ndict: If the proxy is valid, returns {'valid': True, 'response_time': xx}\notherwise returns {'valid': False, 'msg': 'xxxxxx'}.", "source": "juraj-google-style"}
{"code": "def __eq__(self, other):\n        \n        try:\n            return other and \\\n                self.id == other.id and \\\n                self.name == other.name and \\\n                self.profile_image_url == other.profile_image_url and \\\n                self.about == other.about and \\\n                self.website == other.website and \\\n                self.shakes == other.shakes\n        except AttributeError:\n            return False", "docstring": "Compare two user objects against one another.\n\nArgs:\nother (User): another User object against which to compare the\ncurrent user.", "source": "juraj-google-style"}
{"code": "def is_compatible_with(self, spec_or_tensor):\n    return super(TensorSpec, self).is_compatible_with(spec_or_tensor)", "docstring": "Returns True if spec_or_tensor is compatible with this TensorSpec.\n\nTwo tensors are considered compatible if they have the same dtype\nand their shapes are compatible (see `tf.TensorShape.is_compatible_with`).\n\nArgs:\nspec_or_tensor: A tf.TensorSpec or a tf.Tensor\n\nReturns:\nTrue if spec_or_tensor is compatible with self.", "source": "github-repos"}
{"code": "def getDiskSpace(self, file_path, upload_path = '', overwrite = False):\n        \n\n        self.checkAccount()\n\n        url = nurls['checkUpload']\n\n        file_size = os.stat(file_path).st_size\n        file_name = os.path.basename(file_path)\n\n        now = datetime.datetime.now().isoformat()\n\n        data = {'userid': self.user_id,\n                'useridx': self.useridx,\n                'getlastmodified': now,\n                'dstresource': upload_path + file_name,\n                'overwrite': overwrite,\n                'uploadsize': file_size,\n               }\n        r = self.session.post(nurls['getDiskSpace'], data = data)\n\n        return resultManager(r.text)", "docstring": "getDiskSpace\n\nArgs:\nfile_path: Full path for a file you want to checkUpload\nupload_path: Ndrive path where you want to upload file\nex) /Picture/\n\nReturns:\nTrue: Possible to upload a file with a given file_size\nFalse: Impossible to upload a file with a given file_size", "source": "juraj-google-style"}
{"code": "def _parse_validators(valids):\n    outvals = []\n    for val in valids:\n        if isinstance(val, str):\n            args = []\n        elif (len(val) > 1):\n            args = val[1:]\n            val = val[0]\n        else:\n            raise ValidationError('You must pass either an n-tuple or a string to define a validator', validator=val)\n        name = ('validate_%s' % str(val))\n        outvals.append((name, args))\n    return outvals", "docstring": "Parse a list of validator names or n-tuples, checking for errors.\n\nReturns:\nlist((func_name, [args...])): A list of validator function names and a\npotentially empty list of optional parameters for each function.", "source": "codesearchnet"}
{"code": "def make_dir(self, path, relative=False):\n        \n        if not relative:\n            path = self.relpath(path)\n        self._make_dir(self.get_client_kwargs(self.ensure_dir_path(\n            path, relative=True)))", "docstring": "Make a directory.\n\nArgs:\npath (str): Path or URL.\nrelative (bool): Path is relative to current root.", "source": "juraj-google-style"}
{"code": "def _validate_instantiation_options(self, datafile, skip_json_validation):\n    if ((not skip_json_validation) and (not validator.is_datafile_valid(datafile))):\n        raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('datafile'))\n    if (not validator.is_event_dispatcher_valid(self.event_dispatcher)):\n        raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('event_dispatcher'))\n    if (not validator.is_logger_valid(self.logger)):\n        raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('logger'))\n    if (not validator.is_error_handler_valid(self.error_handler)):\n        raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('error_handler'))", "docstring": "Helper method to validate all instantiation parameters.\n\nArgs:\ndatafile: JSON string representing the project.\nskip_json_validation: Boolean representing whether JSON schema validation needs to be skipped or not.\n\nRaises:\nException if provided instantiation options are valid.", "source": "codesearchnet"}
{"code": "def get_forwarding_information_base(self, filter=''):\n    uri = '{}{}'.format(self.data['uri'], self.FORWARDING_INFORMATION_PATH)\n    return self._helper.get_collection(uri, filter=filter)", "docstring": "Gets the forwarding information base data for a logical interconnect. A maximum of 100 entries is returned.\nOptional filtering criteria might be specified.\n\nArgs:\nfilter (list or str):\nFiltering criteria may be specified using supported attributes: interconnectUri, macAddress,\ninternalVlan, externalVlan, and supported relation = (Equals). macAddress is 12 hexadecimal digits with\na colon between each pair of digits (upper case or lower case).\nThe default is no filter; all resources are returned.\n\nReturns:\nlist: A set of interconnect MAC address entries.", "source": "codesearchnet"}
{"code": "def load_pluggable_device_library(library_location):\n    if os.path.exists(library_location):\n        if os.path.isdir(library_location):\n            directory_contents = os.listdir(library_location)\n            pluggable_device_libraries = [os.path.join(library_location, f) for f in directory_contents if _is_shared_object(f)]\n        else:\n            pluggable_device_libraries = [library_location]\n        for lib in pluggable_device_libraries:\n            py_tf.TF_LoadPluggableDeviceLibrary(lib)\n        context.context().reinitialize_physical_devices()\n    else:\n        raise OSError(errno.ENOENT, 'The file or folder to load pluggable device libraries from does not exist.', library_location)", "docstring": "Loads a TensorFlow PluggableDevice plugin.\n\n\"library_location\" can be a path to a specific shared object, or a folder.\nIf it is a folder, all shared objects will be loaded. when the library is\nloaded, devices/kernels registered in the library via StreamExecutor C API\nand Kernel/Op Registration C API are made available in TensorFlow process.\n\nArgs:\nlibrary_location: Path to the plugin or folder of plugins. Relative or\nabsolute filesystem path to a dynamic library file or folder.\n\nRaises:\nOSError: When the file to be loaded is not found.\nRuntimeError: when unable to load the library.", "source": "github-repos"}
{"code": "def compile(self, ops):\n\n    def _compile():\n        code = []\n        for op in ops:\n            if isinstance(op, SyscallInvoke):\n                code.extend(self.syscall(op))\n            elif isinstance(op, LoadRegister):\n                code.extend(self.reg_load(op.register, op.value))\n            elif isinstance(op, str):\n                code.extend(op.split('\\n'))\n            else:\n                raise ValueError(('No idea how to assemble \"%s\"' % repr(op)))\n        return [('\\t%s' % line) for line in code]\n    _compile()\n    return ('\\n'.join(self.finalize(self.data_finalizer(_compile(), self.data))) + '\\n')", "docstring": "Translate a list of operations into its assembler source.\n\nArguments:\nops(list): A list of shellcode operations.\n\nReturns:\nstr: The assembler source code that implements the shellcode.", "source": "codesearchnet"}
{"code": "def _prepare_summary_table(rows):\n  \n  if not rows:\n    return []\n\n  \n  key_field = 'job-name'\n  if key_field not in rows[0]:\n    key_field = 'job-id'\n\n  \n  grouped = collections.defaultdict(lambda: collections.defaultdict(lambda: []))\n  for row in rows:\n    grouped[row.get(key_field, '')][row.get('status', '')] += [row]\n\n  \n  \n  new_rows = []\n  for job_key in sorted(grouped.keys()):\n    group = grouped.get(job_key, None)\n    canonical_status = ['RUNNING', 'SUCCESS', 'FAILURE', 'CANCEL']\n    \n    \n    for status in canonical_status + sorted(group.keys()):\n      if status not in group:\n        continue\n      task_count = len(group[status])\n      del group[status]\n      if task_count:\n        summary_row = collections.OrderedDict()\n        summary_row[key_field] = job_key\n        summary_row['status'] = status\n        summary_row['task-count'] = task_count\n        new_rows.append(summary_row)\n\n  return new_rows", "docstring": "Create a new table that is a summary of the input rows.\n\nAll with the same (job-name or job-id, status) go together.\n\nArgs:\nrows: the input rows, a list of dictionaries.\nReturns:\nA new row set of summary information.", "source": "juraj-google-style"}
{"code": "def write(self, inputdata):\n    if VERBOSE:\n        _print_out((('\\nDummy_serial: Writing to port. Given:' + repr(inputdata)) + '\\n'))\n    if (sys.version_info[0] > 2):\n        if (not (type(inputdata) == bytes)):\n            raise TypeError(('The input must be type bytes. Given:' + repr(inputdata)))\n        inputstring = str(inputdata, encoding='latin1')\n    else:\n        inputstring = inputdata\n    if (not self._isOpen):\n        raise IOError(('Dummy_serial: Trying to write, but the port is not open. Given:' + repr(inputdata)))\n    try:\n        response = RESPONSES[inputstring]\n    except:\n        response = DEFAULT_RESPONSE\n    self._waiting_data = response", "docstring": "Write to a port on dummy_serial.\n\nArgs:\ninputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response\nfor subsequent read operations.\n\nNote that for Python2, the inputdata should be a **string**. For Python3 it should be of type **bytes**.", "source": "codesearchnet"}
{"code": "def check_submission_successful(self, submission_id=None):\n    status = self.submission_status(submission_id)\n    success = bool(status['concordance']['value'])\n    return success", "docstring": "Check if the last submission passes submission criteria.\n\nArgs:\nsubmission_id (str, optional): submission of interest, defaults to\nthe last submission done with the account\n\nReturn:\nbool: True if the submission passed all checks, False otherwise.\n\nExample:\n>>> api = NumerAPI(secret_key=\"..\", public_id=\"..\")\n>>> api.upload_predictions(\"predictions.csv\")\n>>> api.check_submission_successful()\nTrue", "source": "codesearchnet"}
{"code": "def _get_image(structure, site):\n    original_site = structure[NearNeighbors._get_original_site(structure, site)]\n    image = np.around(np.subtract(site.frac_coords, original_site.frac_coords))\n    image = tuple(image.astype(int))\n    return image", "docstring": "Private convenience method for get_nn_info,\ngives lattice image from provided PeriodicSite and Structure.\n\nImage is defined as displacement from original site in structure to a given site.\ni.e. if structure has a site at (-0.1, 1.0, 0.3), then (0.9, 0, 2.3) -> jimage = (1, -1, 2).\nNote that this method takes O(number of sites) due to searching an original site.\n\nArgs:\nstructure: Structure Object\nsite: PeriodicSite Object\n\nReturns:\nimage: ((int)*3) Lattice image", "source": "codesearchnet"}
{"code": "def get_config(config_schema, env=None):\n    if (env is None):\n        env = os.environ\n    return parser.parse_env(config_schema, env)", "docstring": "Parse config from the environment against a given schema\n\nArgs:\nconfig_schema:\nA dictionary mapping keys in the environment to envpy Schema\nobjects describing the expected value.\nenv:\nAn optional dictionary used to override the environment rather\nthan getting it from the os.\n\nReturns:\nA dictionary which maps the values pulled from the environment and\nparsed against the given schema.\n\nRaises:\nMissingConfigError:\nA value in the schema with no default could not be found in the\nenvironment.\nParsingError:\nA value was found in the environment but could not be parsed into\nthe given value type.", "source": "codesearchnet"}
{"code": "def read_video_decord(video_path: str, sample_indices_fn: Optional[Callable]=None, **kwargs):\n    requires_backends(read_video_decord, ['decord'])\n    from decord import VideoReader, cpu\n    vr = VideoReader(uri=video_path, ctx=cpu(0))\n    video_fps = vr.get_avg_fps()\n    total_num_frames = len(vr)\n    duration = total_num_frames / video_fps if video_fps else 0\n    metadata = VideoMetadata(total_num_frames=int(total_num_frames), fps=float(video_fps), duration=float(duration), video_backend='decord')\n    indices = sample_indices_fn(metadata=metadata, **kwargs)\n    frames = vr.get_batch(indices).asnumpy()\n    metadata.frames_indices = indices\n    return (frames, metadata)", "docstring": "Decode a video using the Decord backend.\n\nArgs:\nvideo_path (`str`):\nPath to the video file.\nsample_indices_fn (`Callable`, *optional*):\nA callable function that will return indices at which the video should be sampled. If the video has to be loaded using\nby a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`.\nIf not provided, simple uniform sampling with fps is performed.\nExample:\ndef sample_indices_fn(metadata, **kwargs):\nreturn np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int)\n\nReturns:\nTuple[`np.array`, `VideoMetadata`]: A tuple containing:\n- Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).\n- `VideoMetadata` object.", "source": "github-repos"}
{"code": "def start_automated_run(path, automated_run_id):\n    \n    with functions.DBContextManager(path) as session:\n        automated_run = session.query(models.AutomatedRun).filter_by(id=automated_run_id).first()\n        if not automated_run:\n            raise exceptions.UserError('Automated run {} '\n                                       'does not exist'.format(automated_run_id))\n        automated_run.job_id = get_current_job().id\n        automated_run.job_status = 'started'\n\n        session.add(automated_run)\n        session.commit()\n\n        try:\n            if automated_run.category == 'bayes':\n                automatedruns.start_naive_bayes(automated_run, session, path)\n\n            elif automated_run.category == 'tpot':\n                automatedruns.start_tpot(automated_run, session, path)\n\n            elif automated_run.category == 'greedy_ensemble_search':\n                automatedruns.start_greedy_ensemble_search(automated_run, session, path)\n\n            else:\n                raise Exception('Something went wrong. Invalid category for automated run')\n\n            automated_run.job_status = 'finished'\n            session.add(automated_run)\n            session.commit()\n\n        except:\n            session.rollback()\n            automated_run.job_status = 'errored'\n            automated_run.description['error_type'] = repr(sys.exc_info()[0])\n            automated_run.description['error_value'] = repr(sys.exc_info()[1])\n            automated_run.description['error_traceback'] = \\\n                traceback.format_exception(*sys.exc_info())\n            session.add(automated_run)\n            session.commit()\n            raise", "docstring": "Starts automated run. This will automatically create\nbase learners until the run finishes or errors out.\n\nArgs:\npath (str): Path to Xcessiv notebook\n\nautomated_run_id (str): Automated Run ID", "source": "juraj-google-style"}
{"code": "def split(path):\n    filesystem = FileSystems.get_filesystem(path)\n    return filesystem.split(path)", "docstring": "Splits the given path into two parts.\n\nSplits the path into a pair (head, tail) such that tail contains the last\ncomponent of the path and head contains everything up to that.\n\nFor file-systems other than the local file-system, head should include the\nprefix.\n\nArgs:\npath: path as a string\nReturns:\na pair of path components as strings.", "source": "github-repos"}
{"code": "def forward(self, input, tokens_per_expert):\n    return sequential_experts_gemm(input, self.weight, tokens_per_expert.cpu())", "docstring": "Perform grouped matrix multiplication.\n\nArgs:\ninput (`torch.Tensor`):\nInput tensor of shape (num_tokens, in_features).\ntokens_per_expert (`torch.Tensor`):\nNumber of tokens assigned to each expert.\n\nReturns:\ntorch.Tensor: Output tensor of shape (num_tokens, out_features).", "source": "github-repos"}
{"code": "def add_argument(self, arg_name, arg_value):\n        \n\n        if len(self._employers) > 0:\n            self._logger.log(\n                'warn',\n                'Adding an argument after the employers have been created'\n            )\n        if self._args is None:\n            self._args = {}\n        self._args[arg_name] = arg_value", "docstring": "Add an additional argument to be passed to the fitness function\nvia additional arguments dictionary; this argument/value is not tuned\n\nArgs:\narg_name (string): name/dictionary key of argument\narg_value (any): dictionary value of argument", "source": "juraj-google-style"}
{"code": "def get_storage_account(access_token, subscription_id, rgname, account_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourcegroups/', rgname,\n                        '/providers/Microsoft.Storage/storageAccounts/', account_name,\n                        '?api-version=', STORAGE_API])\n    return do_get(endpoint, access_token)", "docstring": "Get the properties for the named storage account.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\naccount_name (str): Name of the new storage account.\n\nReturns:\nHTTP response. JSON body of storage account properties.", "source": "juraj-google-style"}
{"code": "def set_module_quantized_tensor_to_device(module, tensor_name, device, value=None, quantized_stats=None):\n    if '.' in tensor_name:\n        splits = tensor_name.split('.')\n        for split in splits[:-1]:\n            new_module = getattr(module, split)\n            if new_module is None:\n                raise ValueError(f'{module} has no attribute {split}.')\n            module = new_module\n        tensor_name = splits[-1]\n    if tensor_name not in module._parameters and tensor_name not in module._buffers:\n        raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.')\n    is_buffer = tensor_name in module._buffers\n    old_value = getattr(module, tensor_name)\n    if old_value.device == torch.device('meta') and device not in ['meta', torch.device('meta')] and (value is None):\n        raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.')\n    prequantized_loading = quantized_stats is not None\n    if is_buffer or not is_bitsandbytes_available():\n        is_8bit = False\n        is_4bit = False\n    else:\n        is_4bit = hasattr(bnb.nn, 'Params4bit') and isinstance(module._parameters[tensor_name], bnb.nn.Params4bit)\n        is_8bit = isinstance(module._parameters[tensor_name], bnb.nn.Int8Params)\n    if is_8bit or is_4bit:\n        param = module._parameters[tensor_name]\n        if param.device.type != 'cuda':\n            if value is None:\n                new_value = old_value.to(device)\n            elif isinstance(value, torch.Tensor):\n                new_value = value.to('cpu')\n            else:\n                new_value = torch.tensor(value, device='cpu')\n            if issubclass(module.source_cls, Conv1D) and (not prequantized_loading):\n                new_value = new_value.T\n            kwargs = old_value.__dict__\n            if prequantized_loading != (new_value.dtype in (torch.int8, torch.uint8)):\n                raise ValueError(f'Value dtype `{new_value.dtype}` is not compatible with parameter quantization status.')\n            if is_8bit:\n                is_8bit_serializable = version.parse(importlib.metadata.version('bitsandbytes')) > version.parse('0.37.2')\n                if new_value.dtype in (torch.int8, torch.uint8) and (not is_8bit_serializable):\n                    raise ValueError('Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.')\n                new_value = bnb.nn.Int8Params(new_value, requires_grad=False, **kwargs).to(device)\n                if prequantized_loading:\n                    setattr(new_value, 'SCB', quantized_stats['SCB'].to(device))\n            elif is_4bit:\n                if prequantized_loading:\n                    is_4bit_serializable = version.parse(importlib.metadata.version('bitsandbytes')) >= version.parse('0.41.3')\n                    if new_value.dtype in (torch.int8, torch.uint8) and (not is_4bit_serializable):\n                        raise ValueError('Detected 4-bit weights but the version of bitsandbytes is not compatible with 4-bit serialization. Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.')\n                    new_value = bnb.nn.Params4bit.from_prequantized(data=new_value, quantized_stats=quantized_stats, requires_grad=False, device=device, **kwargs)\n                else:\n                    new_value = bnb.nn.Params4bit(new_value, requires_grad=False, **kwargs).to(device)\n            module._parameters[tensor_name] = new_value\n    else:\n        if value is None:\n            new_value = old_value.to(device)\n        elif isinstance(value, torch.Tensor):\n            new_value = value.to(device)\n        else:\n            new_value = torch.tensor(value, device=device)\n        if is_buffer:\n            module._buffers[tensor_name] = new_value\n        else:\n            new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)\n            module._parameters[tensor_name] = new_value", "docstring": "A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing\n`param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function). The\nfunction is adapted from `set_module_tensor_to_device` function from accelerate that is adapted to support the\nclass `Int8Params` from `bitsandbytes`.\n\nArgs:\nmodule (`torch.nn.Module`):\nThe module in which the tensor we want to move lives.\ntensor_name (`str`):\nThe full name of the parameter/buffer.\ndevice (`int`, `str` or `torch.device`):\nThe device on which to set the tensor.\nvalue (`torch.Tensor`, *optional*):\nThe value of the tensor (useful when going from the meta device to any other device).\nquantized_stats (`dict[str, Any]`, *optional*):\nDict with items for either 4-bit or 8-bit serialization", "source": "github-repos"}
{"code": "def set_position_i(self, ivalue):\n        \n        ivalue_msb = int(ivalue) >> 8\n        ivalue_lsb = int(ivalue) & 0xff\n\n        data = []\n        data.append(0x0B)\n        data.append(self.servoid)\n        data.append(RAM_WRITE_REQ)\n        data.append(POSITION_KI_RAM)\n        data.append(BYTE2)\n        data.append(ivalue_lsb)\n        data.append(ivalue_msb)\n        send_data(data)", "docstring": "Set the I gain of the position PID\n\nArgs:\nivalue (int): I value", "source": "juraj-google-style"}
{"code": "def decode(self, encoded):\n        \n        encoded = super().decode(encoded)\n\n        if encoded.numel() > 1:\n            raise ValueError(\n                '``decode`` decodes one label at a time, use ``batch_decode`` instead.')\n\n        return self.itos[encoded.squeeze().item()]", "docstring": "Decodes ``encoded`` label.\n\nArgs:\nencoded (torch.Tensor): Encoded label.\n\nReturns:\nobject: Label decoded from ``encoded``.", "source": "juraj-google-style"}
{"code": "def get(self):\n    with self._not_empty:\n        while not self._queue:\n            self._not_empty.wait()\n        item = self._queue.popleft()\n        self._not_full.notify()\n        return item", "docstring": "Remove and return an item from the queue.\n\nIf the queue is empty, blocks until an item is available.\n\nReturns:\nan item from the queue", "source": "github-repos"}
{"code": "def generate_multi_set_examples(options, test_sets):\n    _prepare_dir(options)\n    multi_gen_state = MultiGenState()\n    options.multi_gen_state = multi_gen_state\n    zip_path = os.path.join(options.output_path, options.zip_to_output)\n    with zipfile.PyZipFile(zip_path, 'w') as archive:\n        multi_gen_state.archive = archive\n        for test_name in test_sets:\n            new_options = copy.copy(options)\n            multi_gen_state.test_name = re.sub('(_(|with-flex|forward-compat|mlir-quant))?$', '', test_name, count=1)\n            multi_gen_state.label_base_path = os.path.join(os.path.dirname(zip_path), test_name + '.zip')\n            generate_examples(new_options)\n        zipinfo = zipfile.ZipInfo('manifest.txt')\n        archive.writestr(zipinfo, ''.join(multi_gen_state.zip_manifest), zipfile.ZIP_DEFLATED)", "docstring": "Generate examples for test sets.\n\nArgs:\noptions: Options containing information to generate examples.\ntest_sets: List of the name of test sets to generate examples.", "source": "github-repos"}
{"code": "def upload_dict(s3_conn, s3_prefix, data_to_sync):\n    \n    bucket_name, prefix = split_s3_path(s3_prefix)\n    bucket = s3_conn.get_bucket(bucket_name)\n\n    for key, value in data_to_sync.items():\n        full_name = '{}/{}.json'.format(prefix, key)\n        s3_key = boto.s3.key.Key(\n            bucket=bucket,\n            name=full_name\n        )\n        logging.info('uploading key %s', full_name)\n        s3_key.set_contents_from_string(json.dumps(value))", "docstring": "Syncs a dictionary to an S3 bucket, serializing each value in the\ndictionary as a JSON file with the key as its name.\n\nArgs:\ns3_conn: (boto.s3.connection) an s3 connection\ns3_prefix: (str) the destination prefix\ndata_to_sync: (dict)", "source": "juraj-google-style"}
{"code": "def map_concepts_to_indicators(\n        self, n: int = 1, min_temporal_res: Optional[str] = None\n    ):\n        \n\n        for node in self.nodes(data=True):\n            query_parts = [\n                \"select Indicator from concept_to_indicator_mapping\",\n                f\"where `Concept` like '{node[0]}'\",\n            ]\n\n            \n            \n\n            query = \"  \".join(query_parts)\n            results = engine.execute(query)\n\n            if min_temporal_res is not None:\n                if min_temporal_res not in [\"month\"]:\n                    raise ValueError(\"min_temporal_res must be 'month'\")\n\n                vars_with_required_temporal_resolution = [\n                    r[0]\n                    for r in engine.execute(\n                        \"select distinct `Variable` from indicator where \"\n                        f\"`{min_temporal_res.capitalize()}` is not null\"\n                    )\n                ]\n                results = [\n                    r\n                    for r in results\n                    if r[0] in vars_with_required_temporal_resolution\n                ]\n\n            node[1][\"indicators\"] = {\n                x: Indicator(x, \"MITRE12\")\n                for x in [r[0] for r in take(n, results)]\n            }", "docstring": "Map each concept node in the AnalysisGraph instance to one or more\ntangible quantities, known as 'indicators'.\n\nArgs:\nn: Number of matches to keep\nmin_temporal_res: Minimum temporal resolution that the indicators\nmust have data for.", "source": "juraj-google-style"}
{"code": "def ready_op(self):\n    return self._ready_op", "docstring": "Return the Ready Op used by the supervisor.\n\nReturns:\nAn Op or `None`.", "source": "github-repos"}
{"code": "def GetTokenBalance(self, token, watch_only=0):\n        \n        total = Decimal(0)\n\n        if watch_only > 0:\n            for addr in self._watch_only:\n                balance = token.GetBalance(self, addr)\n                total += balance\n        else:\n            for contract in self._contracts.values():\n                balance = token.GetBalance(self, contract.Address)\n                total += balance\n        return total", "docstring": "Get the balance of the specified token.\n\nArgs:\ntoken (NEP5Token): an instance of type neo.Wallets.NEP5Token to get the balance from.\nwatch_only (bool): True, to limit to watch only wallets.\n\nReturns:\nDecimal: total balance for `token`.", "source": "juraj-google-style"}
{"code": "def predict(self, structure, icsd_vol=False):\n        \n\n        \n        std_x = np.std([site.specie.X for site in structure])\n        \n        sub_sites = []\n        \n        bp_dict = {}\n\n        for sp in list(structure.composition.keys()):\n            if sp.atomic_radius:\n                sub_sites.extend([site for site in structure\n                                  if site.specie == sp])\n            else:\n                warnings.warn(\"VolumePredictor: no atomic radius data for \"\n                              \"{}\".format(sp))\n\n            if sp.symbol not in bond_params:\n                warnings.warn(\"VolumePredictor: bond parameters not found, \"\n                              \"used atomic radii for {}\".format(sp))\n            else:\n                r, k = bond_params[sp.symbol][\"r\"], bond_params[sp.symbol][\"k\"]\n                bp_dict[sp] = float(r) + float(k) * std_x\n\n        \n        reduced_structure = Structure.from_sites(sub_sites)\n        smallest_ratio = None\n\n        for site1 in reduced_structure:\n            sp1 = site1.specie\n            neighbors = reduced_structure.get_neighbors(site1,\n                                                        sp1.atomic_radius +\n                                                        self.cutoff)\n\n            for site2, dist in neighbors:\n                sp2 = site2.specie\n\n                if sp1 in bp_dict and sp2 in bp_dict:\n                    expected_dist = bp_dict[sp1] + bp_dict[sp2]\n                else:\n                    expected_dist = sp1.atomic_radius + sp2.atomic_radius\n\n                if not smallest_ratio or dist / expected_dist < smallest_ratio:\n                    smallest_ratio = dist / expected_dist\n\n        if not smallest_ratio:\n            raise ValueError(\"Could not find any bonds within the given cutoff \"\n                             \"in this structure.\")\n\n        volume_factor = (1 / smallest_ratio) ** 3\n\n        \n        if icsd_vol:\n            volume_factor *= 1.05\n\n        if self.min_scaling:\n            volume_factor = max(self.min_scaling, volume_factor)\n        if self.max_scaling:\n            volume_factor = min(self.max_scaling, volume_factor)\n\n        return structure.volume * volume_factor", "docstring": "Given a structure, returns the predicted volume.\n\nArgs:\nstructure (Structure) : a crystal structure with an unknown volume.\nicsd_vol (bool) : True if the input structure's volume comes from\nICSD.\n\nReturns:\na float value of the predicted volume.", "source": "juraj-google-style"}
{"code": "def all_files(path_name, keyword='', ext='', full_path=True, has_date=False, date_fmt=DATE_FMT) -> list:\n    if (not os.path.exists(path=path_name)):\n        return []\n    path_name = path_name.replace('\\\\', '/')\n    if (keyword or ext):\n        keyword = (f'*{keyword}*' if keyword else '*')\n        if (not ext):\n            ext = '*'\n        files = sort_by_modified([f.replace('\\\\', '/') for f in glob.iglob(f'{path_name}/{keyword}.{ext}') if (os.path.isfile(f) and (f.replace('\\\\', '/').split('/')[(- 1)][0] != '~'))])\n    else:\n        files = sort_by_modified([f'{path_name}/{f}' for f in os.listdir(path=path_name) if (os.path.isfile(f'{path_name}/{f}') and (f[0] != '~'))])\n    if has_date:\n        files = filter_by_dates(files, date_fmt=date_fmt)\n    return (files if full_path else [f.split('/')[(- 1)] for f in files])", "docstring": "Search all files with criteria\nReturned list will be sorted by last modified\n\nArgs:\npath_name: full path name\nkeyword: keyword to search\next: file extensions, split by ','\nfull_path: whether return full path (default True)\nhas_date: whether has date in file name (default False)\ndate_fmt: date format to check for has_date parameter\n\nReturns:\nlist: all file names with criteria fulfilled", "source": "codesearchnet"}
{"code": "def depth_texture(self, size, data=None, *, samples=0, alignment=4) -> 'Texture':\n        \n\n        res = Texture.__new__(Texture)\n        res.mglo, res._glo = self.mglo.depth_texture(size, data, samples, alignment)\n        res._size = size\n        res._components = 1\n        res._samples = samples\n        res._dtype = 'f4'\n        res._depth = True\n        res.ctx = self\n        res.extra = None\n        return res", "docstring": "Create a :py:class:`Texture` object.\n\nArgs:\nsize (tuple): The width and height of the texture.\ndata (bytes): Content of the texture.\n\nKeyword Args:\nsamples (int): The number of samples. Value 0 means no multisample format.\nalignment (int): The byte alignment 1, 2, 4 or 8.\n\nReturns:\n:py:class:`Texture` object", "source": "juraj-google-style"}
{"code": "def get_content(url, headers={}, decoded=True):\n    \n\n    logging.debug('get_content: %s' % url)\n\n    req = request.Request(url, headers=headers)\n    if cookies:\n        cookies.add_cookie_header(req)\n        req.headers.update(req.unredirected_hdrs)\n\n    response = urlopen_with_retry(req)\n    data = response.read()\n\n    \n    content_encoding = response.getheader('Content-Encoding')\n    if content_encoding == 'gzip':\n        data = ungzip(data)\n    elif content_encoding == 'deflate':\n        data = undeflate(data)\n\n    \n    if decoded:\n        charset = match1(\n            response.getheader('Content-Type', ''), r'charset=([\\w-]+)'\n        )\n        if charset is not None:\n            data = data.decode(charset, 'ignore')\n        else:\n            data = data.decode('utf-8', 'ignore')\n\n    return data", "docstring": "Gets the content of a URL via sending a HTTP GET request.\n\nArgs:\nurl: A URL.\nheaders: Request headers used by the client.\ndecoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.\n\nReturns:\nThe content as a string.", "source": "juraj-google-style"}
{"code": "async def _run_and_verify(self, examples: List[Example]):\n    async with GRPCClient() as client:\n        await self._get_statuses(client, examples)\n        await self._verify_examples(client, examples, self._origin)", "docstring": "Run beam examples and keep their output.\n\nCall the backend to start code processing for the examples.\nThen receive code output.\n\nArgs:\nexamples: beam examples that should be run", "source": "github-repos"}
{"code": "class Constant(Initializer):\n\n    def __init__(self, value=0.0):\n        self.value = value\n\n    def __call__(self, shape, dtype=None):\n        dtype = standardize_dtype(dtype)\n        return ops.cast(self.value, dtype=dtype) * ops.ones(shape=shape, dtype=dtype)\n\n    def get_config(self):\n        return {'value': serialization_lib.serialize_keras_object(self.value)}\n\n    @classmethod\n    def from_config(cls, config):\n        value = serialization_lib.deserialize_keras_object(config['value'])\n        return cls(value)", "docstring": "Initializer that generates tensors with constant values.\n\nOnly scalar values are allowed.\nThe constant value provided must be convertible to the dtype requested\nwhen calling the initializer.\n\nExamples:\n\n>>> # Standalone usage:\n>>> initializer = Constant(10.)\n>>> values = initializer(shape=(2, 2))\n\n>>> # Usage in a Keras layer:\n>>> initializer = Constant(10.)\n>>> layer = Dense(3, kernel_initializer=initializer)\n\nArgs:\nvalue: A Python scalar.", "source": "github-repos"}
{"code": "def get_range_tracker(self, start_position: Union[int, str, ObjectId]=None, stop_position: Union[int, str, ObjectId]=None) -> Union[_ObjectIdRangeTracker, OffsetRangeTracker, LexicographicKeyRangeTracker]:\n    start_position, stop_position = self._replace_none_positions(start_position, stop_position)\n    if isinstance(start_position, ObjectId):\n        return _ObjectIdRangeTracker(start_position, stop_position)\n    if isinstance(start_position, int):\n        return OffsetRangeTracker(start_position, stop_position)\n    if isinstance(start_position, str):\n        return LexicographicKeyRangeTracker(start_position, stop_position)\n    raise NotImplementedError(f'RangeTracker for {type(start_position)} not implemented!')", "docstring": "Returns a RangeTracker for a given position range depending on type.\n\nArgs:\nstart_position: starting position of the range. If 'None' default start\nposition of the source must be used.\nstop_position:  ending position of the range. If 'None' default stop\nposition of the source must be used.\nReturns:\na ``_ObjectIdRangeTracker``, ``OffsetRangeTracker``\nor ``LexicographicKeyRangeTracker`` depending on the given position range.", "source": "github-repos"}
{"code": "def route_method(method_name, extra_part=False):\n\n    def wrapper(callable_obj):\n        if (method_name.lower() not in DEFAULT_ROUTES):\n            raise HandlerHTTPMethodError('Invalid http method in method: {}'.format(method_name))\n        callable_obj.http_method = method_name.upper()\n        callable_obj.url_extra_part = (callable_obj.__name__ if extra_part else None)\n        return classmethod(callable_obj)\n    return wrapper", "docstring": "Custom handler routing decorator.\nSigns a web handler callable with the http method as attribute.\n\nArgs:\nmethod_name (str): HTTP method name (i.e GET, POST)\nextra_part (bool): Indicates if wrapped callable name should be a part\nof the actual endpoint.\n\nReturns:\nA wrapped handler callable.\n\nexamples:\n>>> @route_method('GET')\n... def method():\n...     return \"Hello!\"\n...\n>>> method.http_method\n'GET'\n>>> method.url_extra_part\nNone", "source": "codesearchnet"}
{"code": "def equals(self, rhs):\n    \n\n    try:\n      return round(rhs-self._float_value, self._places) == 0\n    except TypeError:\n      \n      return False", "docstring": "Check to see if RHS is almost equal to float_value\n\nArgs:\nrhs: the value to compare to float_value\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def extract_keywords_from_text(index_page, no_items=5):\n    index_page = MLStripper.strip_tags(index_page)\n    tokenized_index = TextBlob(index_page).lower()\n\n    def to_str(key):\n        if isinstance(key, unicode):\n            return key.encode('utf-8')\n        return key\n    present_keywords = [KEYWORDS_LOWER[key] for key in KEYWORDS_LOWER.keys() if ((len(key) > 3) and (key in tokenized_index))]\n\n    def to_source_string(key):\n        source = 'Keyword analysis'\n        try:\n            return SourceString(key, source)\n        except UnicodeEncodeError:\n            return SourceString(key.encode('utf-8'), source)\n    multi_keywords = [to_source_string(key) for key in present_keywords if (tokenized_index.words.count(key) >= 1)]\n    multi_keywords = sorted(multi_keywords, key=(lambda x: len(x)), reverse=True)\n    if (len(multi_keywords) > no_items):\n        return multi_keywords[:no_items]\n    return multi_keywords", "docstring": "Try to process text on the `index_page` deduce the keywords and then try\nto match them on the Aleph's dataset.\n\nFunction returns maximally `no_items` items, to prevent spamming the user.\n\nArgs:\nindex_page (str): Content of the page as UTF-8 string\nno_items (int, default 5): Number of items to return.\n\nReturns:\nlist: List of :class:`.SourceString` objects.", "source": "codesearchnet"}
{"code": "async def runCmdLine(self, line):\n        \n        if self.echoline:\n            self.outp.printf(f'{self.cmdprompt}{line}')\n\n        ret = None\n\n        name = line.split(None, 1)[0]\n\n        cmdo = self.getCmdByName(name)\n        if cmdo is None:\n            self.printf('cmd not found: %s' % (name,))\n            return\n\n        try:\n\n            ret = await cmdo.runCmdLine(line)\n\n        except s_exc.CliFini:\n            await self.fini()\n\n        except asyncio.CancelledError:\n            self.printf('Cmd cancelled')\n\n        except Exception as e:\n            exctxt = traceback.format_exc()\n            self.printf(exctxt)\n            self.printf('error: %s' % e)\n\n        return ret", "docstring": "Run a single command line.\n\nArgs:\nline (str): Line to execute.\n\nExamples:\nExecute the 'woot' command with the 'help' switch:\n\nawait cli.runCmdLine('woot --help')\n\nReturns:\nobject: Arbitrary data from the cmd class.", "source": "juraj-google-style"}
{"code": "def pickle(self, path):\n    with open(os.path.expanduser(path), 'wb') as pickle:\n        cPickle.Pickler(pickle, cPickle.HIGHEST_PROTOCOL).dump(self)", "docstring": "Write objects to python pickle.\n\nPickling is Python's method for serializing/deserializing\nPython objects. This allows you to save a fully functional\nJSSObject to disk, and then load it later, without having to\nretrieve it from the JSS.\n\nThis method will pickle each item as it's current type; so\nJSSListData objects will be serialized as JSSListData, and\nJSSObjects as JSSObjects. If you want full data, do:\nmy_list.retrieve_all().pickle(\"filename\")\n\nArgs:\npath: String file path to the file you wish to (over)write.\nPath will have ~ expanded prior to opening.", "source": "codesearchnet"}
{"code": "def channels(self):\n    resp = self._rtm_client.get('v1/current_team.channels')\n    if resp.is_fail():\n        raise RTMServiceError('Failed to get channels of current team', resp)\n    return resp.data['result']", "docstring": "Gets channels of current team\n\nReturns:\nlist of Channel\n\nThrows:\nRTMServiceError when request failed", "source": "codesearchnet"}
{"code": "def read(self, viewport=None, components=3, *, attachment=0, alignment=1, dtype='f1') -> bytes:\n        \n\n        return self.mglo.read(viewport, components, attachment, alignment, dtype)", "docstring": "Read the content of the framebuffer.\n\nArgs:\nviewport (tuple): The viewport.\ncomponents (int): The number of components to read.\n\nKeyword Args:\nattachment (int): The color attachment.\nalignment (int): The byte alignment of the pixels.\ndtype (str): Data type.\n\nReturns:\nbytes", "source": "juraj-google-style"}
{"code": "def randomize_weights(model, random_seed=0, buffers_to_skip=None):\n    random.seed(random_seed)\n    buffers = model.buffers\n    buffer_ids = range(1, len(buffers))\n    if buffers_to_skip is not None:\n        buffer_ids = [idx for idx in buffer_ids if idx not in buffers_to_skip]\n    buffer_types = {}\n    for graph in model.subgraphs:\n        for op in graph.operators:\n            if op.inputs is None:\n                break\n            for input_idx in op.inputs:\n                tensor = graph.tensors[input_idx]\n                buffer_types[tensor.buffer] = type_to_name(tensor.type)\n    for i in buffer_ids:\n        buffer_i_data = buffers[i].data\n        buffer_i_size = 0 if buffer_i_data is None else buffer_i_data.size\n        if buffer_i_size == 0:\n            continue\n        buffer_type = buffer_types.get(i, 'INT8')\n        if buffer_type.startswith('FLOAT'):\n            format_code = 'e' if buffer_type == 'FLOAT16' else 'f'\n            for offset in range(0, buffer_i_size, struct.calcsize(format_code)):\n                value = random.uniform(-0.5, 0.5)\n                struct.pack_into(format_code, buffer_i_data, offset, value)\n        else:\n            for j in range(buffer_i_size):\n                buffer_i_data[j] = random.randint(0, 255)", "docstring": "Randomize weights in a model.\n\nArgs:\nmodel: The model in which to randomize weights.\nrandom_seed: The input to the random number generator (default value is 0).\nbuffers_to_skip: The list of buffer indices to skip. The weights in these\nbuffers are left unmodified.", "source": "github-repos"}
{"code": "def _CheckIsLink(self, file_entry):\n    \n    if definitions.FILE_ENTRY_TYPE_LINK not in self._file_entry_types:\n      return False\n    return file_entry.IsLink()", "docstring": "Checks the is_link find specification.\n\nArgs:\nfile_entry (FileEntry): file entry.\n\nReturns:\nbool: True if the file entry matches the find specification, False if not.", "source": "juraj-google-style"}
{"code": "def _WriteHeader(self, output_writer):\n    \n    header_string = ''\n    if self._title:\n      header_string = ' {0:s} '.format(self._title)\n\n    header_string = self._HEADER_FORMAT_STRING.format(header_string)\n    output_writer.Write(header_string)", "docstring": "Writes a header.\n\nArgs:\noutput_writer (OutputWriter): output writer.", "source": "juraj-google-style"}
{"code": "def InitFromAff4Object(self, aff4_obj):\n    \n    attr_blacklist = []  \n\n    self.types = []\n    for aff4_cls in aff4_obj.__class__.__mro__:\n      if not hasattr(aff4_cls, \"SchemaCls\"):\n        continue\n\n      type_repr = ApiAff4ObjectType().InitFromAff4Object(\n          aff4_obj, aff4_cls, attr_blacklist)\n\n      if type_repr.attributes:\n        self.types.append(type_repr)\n\n      \n      \n      attr_blacklist.extend([attr.name for attr in type_repr.attributes])\n\n    return self", "docstring": "Initializes the current instance from an Aff4Object.\n\nIterates the inheritance hierarchy of the given Aff4Object and adds a\nApiAff4ObjectType for each class found in the hierarchy.\n\nArgs:\naff4_obj: An Aff4Object as source for the initialization.\n\nReturns:\nA reference to the current instance.", "source": "juraj-google-style"}
{"code": "def parse_headers(cls, msg):\n    return list(email.parser.Parser().parsestr(msg).items())", "docstring": "Parse HTTP headers.\n\nArgs:\nmsg (str): HTTP message.\n\nReturns:\n(List[Tuple[str, str]): List of header tuples.", "source": "codesearchnet"}
{"code": "def delete_data(self, url, *args, **kwargs):\n        \n        res = self._conn.delete(url, headers=self._prepare_headers(**kwargs))\n        if res.status_code == 200 or res.status_code == 202:\n            return True\n        else:\n            return False", "docstring": "Deletes data under provided url\n\nReturns status as boolean.\n\nArgs:\n**url**: address of file to be deleted\n\n.. versionadded:: 0.3.2\n**additional_headers**: (optional) Additional headers\nto be used with request\n\nReturns:\nBoolean. True if request was successful. False if not.", "source": "juraj-google-style"}
{"code": "def vertex_indices_in_segments(self, segments, ret_face_indices=False):\n        \n        import numpy as np\n        import warnings\n\n        face_indices = np.array([])\n        vertex_indices = np.array([])\n        if self.segm is not None:\n            try:\n                segments = [self.segm[name] for name in segments]\n            except KeyError as e:\n                raise ValueError('Unknown segments {}. Consier using Mesh.clean_segments on segments'.format(e.args[0]))\n            face_indices = np.unique(np.concatenate(segments))\n            vertex_indices = np.unique(np.ravel(self.f[face_indices]))\n        else:\n            warnings.warn('self.segm is None, will return empty array')\n\n        if ret_face_indices:\n            return vertex_indices, face_indices\n        else:\n            return vertex_indices", "docstring": "Given a list of segment names, return an array of vertex indices for\nall the vertices in those faces.\n\nArgs:\nsegments: a list of segment names,\nret_face_indices: if it is `True`, returns face indices", "source": "juraj-google-style"}
{"code": "def volatility_fn(self):\n    return self._volatility_fn", "docstring": "Python callable calculating the instantaneous volatility.\n\nThe callable should accept two real `Tensor` arguments of the same dtype and\nshape `times_shape`. The first argument is the scalar time t, the second\nargument is the value of Ito process X - `Tensor` of shape\n`batch_shape + sample_shape + [dim]`, where `batch_shape` represents a batch\nof models and `sample_shape` represents samples for each of the models. The\nresult is value of volatility S_{ij}(t, X). The return value of the callable\nis a real `Tensor` of the same dtype as the input arguments and of shape\n`batch_shape + sample_shape + [dim, dim]`. For example, `sample_shape` can\nstand for `[num_samples]` for Monte Carlo sampling, or\n`[num_grid_points_1, ..., num_grid_points_dim]` for Finite Difference\nsolvers.\n\nReturns:\nThe instantaneous volatility callable.", "source": "github-repos"}
{"code": "def Address(self):\n    if (self._address is None):\n        self._address = Crypto.ToAddress(self.ScriptHash)\n    return self._address", "docstring": "Get the wallet address associated with the token.\n\nReturns:\nstr: base58 encoded string representing the wallet address.", "source": "codesearchnet"}
{"code": "def add_buffer(self, buf_header, buf_payload):\n    if ('num_buffers' in self._header):\n        self._header['num_buffers'] += 1\n    else:\n        self._header['num_buffers'] = 1\n    self._header_json = None\n    self._buffers.append((buf_header, buf_payload))", "docstring": "Associate a buffer header and payload with this message.\n\nArgs:\nbuf_header (``JSON``) : a buffer header\nbuf_payload (``JSON`` or bytes) : a buffer payload\n\nReturns:\nNone\n\nRaises:\nMessageError", "source": "codesearchnet"}
{"code": "def outer_graph(self):\n    current = self._weak_outer_graph()\n    if current is None:\n        return self._fallback_outer_graph\n    return current", "docstring": "The Graph this FuncGraph is nested in.\n\nFunctions may capture Tensors from graphs they are nested in (transitive).\n\nReturns:\nA Graph object. Initially set to the current default graph when the\nFuncGraph was created. If the previous `outer_graph` was deleted because\nthe function that owns it was deleted, `outer_graph` is reset to the\noutermost default graph active when the FuncGraph was created. This\nFuncGraph won't have captured anything from the new `outer_graph` (and\nlikely not from the previous setting, since that would have created a\nstrong reference), but it is returned so that FuncGraphs always have a\nparent.", "source": "github-repos"}
{"code": "def to_value_set_codes(self, fhir_context: context.FhirPathContext) -> Optional[ValueSetCodes]:\n    if self.code_values is not None:\n        return ValueSetCodes(self.value_set_url, self.value_set_version, self.code_values)\n    value_set_proto = fhir_context.get_value_set(self.value_set_url)\n    if value_set_proto is None:\n        return None\n    return ValueSetCodes(self.value_set_url, value_set_proto.version.value or None, to_code_values(value_set_proto))", "docstring": "Builds a representation of the value set given to the memberOf call.\n\nIf memberOf was called with a value set proto, returns a ValueSetCodes\nobject using the fields from that proto.\nIf memberOf was called with a URL string, attempt to retrieve a value set\nproto from `fhir_context` and use it to build the ValueSetCodes object.\nIf the URL string can not be resolved in the given `fhir_context`, returns\nNone.\n\nArgs:\nfhir_context: The context to use when looking for value set definitions.\n\nReturns:\nThe value set referenced by the memberOf call or None if the value set URL\ncan not be resolved.", "source": "github-repos"}
{"code": "def from_b58check(private_key):\n    b58dec = base58.b58decode_check(private_key)\n    version = b58dec[0]\n    assert (version in [PrivateKey.TESTNET_VERSION, PrivateKey.MAINNET_VERSION])\n    return PrivateKey(int.from_bytes(b58dec[1:], 'big'))", "docstring": "Decodes a Base58Check encoded private-key.\n\nArgs:\nprivate_key (str): A Base58Check encoded private key.\n\nReturns:\nPrivateKey: A PrivateKey object", "source": "codesearchnet"}
{"code": "def find_all(self, selector, **kwargs):\n    self.debug_log(('Finding elements with selector: %s' % selector))\n    raise_exception = kwargs.get('raise_exception', BROME_CONFIG['proxy_driver']['raise_exception'])\n    self.debug_log(('effective raise_exception: %s' % raise_exception))\n    wait_until_present = kwargs.get('wait_until_present', BROME_CONFIG['proxy_driver']['wait_until_present_before_find'])\n    self.debug_log(('effective wait_until_present: %s' % wait_until_present))\n    wait_until_visible = kwargs.get('wait_until_visible', BROME_CONFIG['proxy_driver']['wait_until_visible_before_find'])\n    self.debug_log(('effective wait_until_visible: %s' % wait_until_visible))\n    _selector = Selector(self, selector)\n    found = False\n    if wait_until_visible:\n        found = self.wait_until_visible(selector, raise_exception=False)\n    if (wait_until_present and (not found)):\n        found = self.wait_until_present(selector, raise_exception=raise_exception)\n        if (not found):\n            self.debug_log(('find_all (%s): No element found' % _selector))\n            return []\n    try:\n        elements = getattr(self._driver, _selector.find_function)(_selector.get_selector())\n    except exceptions.NoSuchElementException:\n        self.debug_log(('find_all (%s): No element found' % _selector))\n        self.print_javascript_error()\n        if raise_exception:\n            raise exceptions.NoSuchElementException(_selector)\n        else:\n            return []\n    if (type(elements) == list):\n        if len(elements):\n            self.debug_log(('find_all (%s): Element found' % _selector))\n            return ProxyElementList(elements, _selector, self)\n        else:\n            msg = ('find_all (%s): No element found' % _selector)\n            self.debug_log(msg)\n            self.print_javascript_error()\n            if raise_exception:\n                raise exceptions.NoSuchElementException(msg)\n            else:\n                return []\n    else:\n        self.debug_log(('find_all (%s): Element found' % _selector))\n        return [ProxyElement(elements, _selector, self)]", "docstring": "Return all the elements found with a selector\n\nArgs:\nselector (str): the selector used to find the element\n\nKwargs:\nwait_until_present (bool) default configurable via\nproxy_driver:wait_until_present_before_find\nwait_until_visible (bool) default configurable via\nproxy_driver:wait_until_visible_before_find\nraise_exception (bool) default configurable via\nproxy_driver:raise_exception\n\nReturns:\nempty list if no element was found\nproxy_element_list when element are found\n\nRaises:\nthis function might raise an exception depending on the\nraise_exception kwargs\nor\nthe config proxy_driver:raise_exception", "source": "codesearchnet"}
{"code": "def patch_fromText(self, textline):\n    \n    if type(textline) == unicode:\n      \n      \n      textline = textline.encode(\"ascii\")\n    patches = []\n    if not textline:\n      return patches\n    text = textline.split('\\n')\n    while len(text) != 0:\n      m = re.match(\"^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$\", text[0])\n      if not m:\n        raise ValueError(\"Invalid patch string: \" + text[0])\n      patch = patch_obj()\n      patches.append(patch)\n      patch.start1 = int(m.group(1))\n      if m.group(2) == '':\n        patch.start1 -= 1\n        patch.length1 = 1\n      elif m.group(2) == '0':\n        patch.length1 = 0\n      else:\n        patch.start1 -= 1\n        patch.length1 = int(m.group(2))\n\n      patch.start2 = int(m.group(3))\n      if m.group(4) == '':\n        patch.start2 -= 1\n        patch.length2 = 1\n      elif m.group(4) == '0':\n        patch.length2 = 0\n      else:\n        patch.start2 -= 1\n        patch.length2 = int(m.group(4))\n\n      del text[0]\n\n      while len(text) != 0:\n        if text[0]:\n          sign = text[0][0]\n        else:\n          sign = ''\n        line = urllib.unquote(text[0][1:])\n        line = line.decode(\"utf-8\")\n        if sign == '+':\n          \n          patch.diffs.append((self.DIFF_INSERT, line))\n        elif sign == '-':\n          \n          patch.diffs.append((self.DIFF_DELETE, line))\n        elif sign == ' ':\n          \n          patch.diffs.append((self.DIFF_EQUAL, line))\n        elif sign == '@':\n          \n          break\n        elif sign == '':\n          \n          pass\n        else:\n          \n          raise ValueError(\"Invalid patch mode: '%s'\\n%s\" % (sign, line))\n        del text[0]\n    return patches", "docstring": "Parse a textual representation of patches and return a list of patch\nobjects.\n\nArgs:\ntextline: Text representation of patches.\n\nReturns:\nArray of Patch objects.\n\nRaises:\nValueError: If invalid input.", "source": "juraj-google-style"}
{"code": "def extract_channel(k, cdim):\n    \n    n = cdim\n    perm = tuple(list(range(k)) + [n - 1] + list(range(k, n - 1)))\n    return CPermutation.create(perm)", "docstring": "Create a :class:`CPermutation` that extracts channel `k`\n\nReturn a permutation circuit that maps the k-th (zero-based)\ninput to the last output, while preserving the relative order of all other\nchannels.\n\nArgs:\nk (int): Extracted channel index\ncdim (int): The circuit dimension (number of channels)\n\nReturns:\nCircuit: Permutation circuit", "source": "juraj-google-style"}
{"code": "def install_requirements(self, path, index=None):\n    cmd = 'install -r {0}'.format(path)\n    if index:\n        cmd = 'install --index-url {0} -r {1}'.format(index, path)\n    self.pip(cmd)", "docstring": "Install packages from a requirements.txt file.\n\nArgs:\npath (str): The path to the requirements file.\nindex (str): The URL for a pypi index to use.", "source": "codesearchnet"}
{"code": "def eulers_totient(n):\n    if (not isinstance(n, int)):\n        raise TypeError('Expecting a strictly positive integer')\n    if (n <= 0):\n        raise ValueError('Expecting a strictly positive integer')\n    if (n == 1):\n        return 1\n    result = 0\n    for i in range(1, n):\n        if (gcd(i, n) == 1):\n            result += 1\n    return result", "docstring": "Calculate the value of Euler's totient for a given integer\n\nArgs:\nn (int): strictly positive integer\n\nReturns:\nThe value of Euler's totient for n\n\nRaises:\nTypeError: If either n or k is not an integer\nValueError: If either n or k is negative, or if k is strictly greater than n", "source": "codesearchnet"}
{"code": "def parse_arguments(*args, **options):\n    days = options.get('days', 1)\n    enterprise_customer_uuid = options.get('enterprise_customer_uuid')\n    enterprise_customer = None\n    if enterprise_customer_uuid:\n        try:\n            enterprise_customer = EnterpriseCustomer.objects.get(uuid=enterprise_customer_uuid)\n        except EnterpriseCustomer.DoesNotExist:\n            raise CommandError('Enterprise customer with uuid \"{enterprise_customer_uuid}\" does not exist.'.format(enterprise_customer_uuid=enterprise_customer_uuid))\n    return (days, enterprise_customer)", "docstring": "Parse and validate arguments for send_course_enrollments command.\n\nArguments:\n*args: Positional arguments passed to the command\n**options: optional arguments passed to the command\n\nReturns:\nA tuple containing parsed values for\n1. days (int): Integer showing number of days to lookup enterprise enrollments,\ncourse completion etc and send to xAPI LRS\n2. enterprise_customer_uuid (EnterpriseCustomer): Enterprise Customer if present then\nsend xAPI statements just for this enterprise.", "source": "codesearchnet"}
{"code": "def post_message(self, level, message, count=1, timestamp=None, now_reference=None):\n    if ((len(self.messages) > 0) and (self.messages[(- 1)].message == message)):\n        self.messages[(- 1)].count += 1\n    else:\n        msg_object = ServiceMessage(level, message, self._last_message_id, timestamp, now_reference)\n        msg_object.count = count\n        self.messages.append(msg_object)\n        self._last_message_id += 1\n    return self.messages[(- 1)]", "docstring": "Post a new message for service.\n\nArgs:\nlevel (int): The level of the message (info, warning, error)\nmessage (string): The message contents\ncount (int): The number of times the message has been repeated\ntimestamp (float): An optional monotonic value in seconds for when the message was created\nnow_reference (float): If timestamp is not relative to monotonic() as called from this\nmodule then this should be now() as seen by whoever created the timestamp.\n\nReturns:\nServiceMessage: The posted message", "source": "codesearchnet"}
{"code": "def get_sample(self, md5):\n        \n        \n        sample = self.data_store.get_sample(md5)\n        if not sample:\n            return {'sample_set': {'md5_list': self.get_sample_set(md5)}}\n        return {'sample': sample}", "docstring": "Get a sample from the DataStore.\nArgs:\nmd5: the md5 of the sample\nReturns:\nA dictionary of meta data about the sample which includes\na ['raw_bytes'] key that contains the raw bytes.\nRaises:\nWorkbench.DataNotFound if the sample is not found.", "source": "juraj-google-style"}
{"code": "def make_list_of_op(tops, check_graph=True, allow_graph=True, ignore_ts=False):\n    if isinstance(tops, ops.Graph):\n        if allow_graph:\n            return tops.get_operations()\n        else:\n            raise TypeError('allow_graph is False: cannot convert a tf.Graph.')\n    else:\n        if not is_iterable(tops):\n            tops = [tops]\n        if not tops:\n            return []\n        if check_graph:\n            check_types = None if ignore_ts else ops.Operation\n            get_unique_graph(tops, check_types=check_types)\n        return [op for op in tops if isinstance(op, ops.Operation)]", "docstring": "Convert ops to a list of `tf.Operation`.\n\nArgs:\ntops: can be an iterable of `tf.Operation`, a `tf.Graph` or a single\noperation.\ncheck_graph: if `True` check if all the operations belong to the same graph.\nallow_graph: if `False` a `tf.Graph` cannot be converted.\nignore_ts: if True, silently ignore `tf.Tensor`.\nReturns:\nA newly created list of `tf.Operation`.\nRaises:\nTypeError: if tops cannot be converted to a list of `tf.Operation` or,\nif `check_graph` is `True`, if all the ops do not belong to the\nsame graph.", "source": "github-repos"}
{"code": "def isdir(path):\n    \n    system = get_instance(path)\n\n    \n    \n    return system.isdir(system.ensure_dir_path(path))", "docstring": "Return True if path is an existing directory.\n\nEquivalent to \"os.path.isdir\".\n\nArgs:\npath (path-like object): Path or URL.\n\nReturns:\nbool: True if directory exists.", "source": "juraj-google-style"}
{"code": "def create_base_for_fuse_batchnorm(self, pattern_match_mode='MATCH_ALL'):\n    with self.cached_session() as sess:\n        data_format = 'NHWC'\n        if pattern_match_mode == 'MISMATCH_FORMAT':\n            data_format = 'NCHW'\n        inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]\n        input_op = constant_op.constant(np.array(inputs), shape=[1, 1, 6, 2] if data_format == 'NHWC' else [1, 2, 1, 6], dtype=dtypes.float32)\n        weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]\n        weights_op = constant_op.constant(np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)\n        conv_op = nn_ops.conv2d(input_op, weights_op, [1, 1, 1, 1], data_format=data_format, padding='SAME', name='conv_op')\n        const_op_1 = None\n        const_op_2 = constant_op.constant(1e-05, dtype=dtypes.float32)\n        const_op_3 = None\n        const_op_4 = None\n        const_op_5 = None\n        const_op_6 = None\n        if data_format == 'NHWC':\n            const_op_1 = constant_op.constant(np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)\n            const_op_3 = constant_op.constant(np.array([10, 20]), shape=[2], dtype=dtypes.float32)\n            const_op_4 = constant_op.constant(np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)\n            const_op_5 = constant_op.constant(np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)\n            const_op_6 = constant_op.constant(np.array([0.2, 0.5]), shape=[2], dtype=dtypes.float32)\n        else:\n            const_op_1 = constant_op.constant(np.array([0.25, 0.5, 0.6, 0.7, 0.8, 0.9]), shape=[6], dtype=dtypes.float32)\n            const_op_3 = constant_op.constant(np.array([10, 20, 30, 40, 50, 60]), shape=[6], dtype=dtypes.float32)\n            const_op_4 = constant_op.constant(np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6]), shape=[6], dtype=dtypes.float32)\n            const_op_5 = constant_op.constant(np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), shape=[6], dtype=dtypes.float32)\n            const_op_6 = constant_op.constant(np.array([0.2, 0.4, 0.5, 0.6, 0.7, 0.8]), shape=[6], dtype=dtypes.float32)\n        add_op_1 = gen_math_ops.add(const_op_1, const_op_2)\n        rsqrt_op = math_ops.rsqrt(add_op_1)\n        variable_op = None\n        if pattern_match_mode == 'MATCH_NO_GAMMA':\n            variable_op = rsqrt_op\n        else:\n            variable_op = math_ops.multiply(rsqrt_op, const_op_5)\n        mul_op_1 = math_ops.multiply(conv_op, variable_op)\n        mul_op_2 = None\n        if pattern_match_mode == 'MISMATCH_PATTERN':\n            mul_op_2 = math_ops.multiply(const_op_3, const_op_6)\n        else:\n            mul_op_2 = math_ops.multiply(const_op_3, variable_op)\n        sub_op = math_ops.subtract(const_op_4, mul_op_2)\n        if pattern_match_mode == 'MATCH_SWITCH_ORDER':\n            gen_math_ops.add(sub_op, mul_op_1, name='output')\n        else:\n            gen_math_ops.add(mul_op_1, sub_op, name='output')\n        test_util.set_producer_version(ops.get_default_graph(), 8)\n        original_graph = sess.graph_def\n        original_result = sess.run(['output:0'])\n        return (original_graph, original_result)", "docstring": "Create testing graph and compute the result from original graph.\n\nArgs:\npattern_match_mode: A label string to indicate which batchnorm composition\npattern to create in the resulting graph. \"MATCH_ALL\" - Create a graph\nmatching the decomposed batchnorm pattern with full set of primitive\nops. \"MATCH_NO_GAMMA\" - Create a graph matching the decomposed batchnorm\npattern when gamma factor is 1 and multiplication with gamma is omitted.\n\"MATCH_SWITCH_ORDER\" - Create a graph matching the decomposed batchnorm\npattern with a different order of inputs to the root Add node.\n\"MISMATCH_PATTERN\" - Create a graph with same set of primitive ops which\nmakes up the decomposed batchnorm, but not matching the pattern.\n\"MISMATCH_FORMAT\" - Create a graph with NCHW format as input.\n\nReturns:\nA GraphDef as original graph to run the decomposed batchnorm test cases.\nComputation result from executing the original graph defined by GraphDef.", "source": "github-repos"}
{"code": "def _run_query(client, query, job_config=None):\n    start_time = time.time()\n    query_job = client.query(query, job_config=job_config)\n    print('Executing query with job ID: {}'.format(query_job.job_id))\n    while True:\n        print('\\rQuery executing: {:0.2f}s'.format((time.time() - start_time)), end='')\n        try:\n            query_job.result(timeout=0.5)\n            break\n        except futures.TimeoutError:\n            continue\n    print('\\nQuery complete after {:0.2f}s'.format((time.time() - start_time)))\n    return query_job", "docstring": "Runs a query while printing status updates\n\nArgs:\nclient (google.cloud.bigquery.client.Client):\nClient to bundle configuration needed for API requests.\nquery (str):\nSQL query to be executed. Defaults to the standard SQL dialect.\nUse the ``job_config`` parameter to change dialects.\njob_config (google.cloud.bigquery.job.QueryJobConfig, optional):\nExtra configuration options for the job.\n\nReturns:\ngoogle.cloud.bigquery.job.QueryJob: the query job created\n\nExample:\n>>> client = bigquery.Client()\n>>> _run_query(client, \"SELECT 17\")\nExecuting query with job ID: bf633912-af2c-4780-b568-5d868058632b\nQuery executing: 1.66s\nQuery complete after 2.07s\n'bf633912-af2c-4780-b568-5d868058632b'", "source": "codesearchnet"}
{"code": "def get_video_transcript_data(video_id, language_code):\n    video_transcript = VideoTranscript.get_or_none(video_id, language_code)\n    if video_transcript:\n        try:\n            return dict(file_name=video_transcript.filename, content=video_transcript.transcript.file.read())\n        except Exception:\n            logger.exception('[edx-val] Error while retrieving transcript for video=%s -- language_code=%s', video_id, language_code)\n            raise", "docstring": "Get video transcript data\n\nArguments:\nvideo_id(unicode): An id identifying the Video.\nlanguage_code(unicode): it will be the language code of the requested transcript.\n\nReturns:\nA dict containing transcript file name and its content.", "source": "codesearchnet"}
{"code": "def _JoinKeyPath(self, path_segments):\n    \n    \n    \n\n    \n    path_segments = [\n        segment.split(definitions.KEY_PATH_SEPARATOR)\n        for segment in path_segments]\n\n    \n    path_segments = [\n        element for sublist in path_segments for element in sublist]\n\n    \n    path_segments = filter(None, path_segments)\n\n    return definitions.KEY_PATH_SEPARATOR.join(path_segments)", "docstring": "Joins the path segments into key path.\n\nArgs:\npath_segments (list[str]): Windows Registry key path segments.\n\nReturns:\nstr: key path.", "source": "juraj-google-style"}
{"code": "def from_label(cls, label):\n    z = np.zeros(len(label), dtype=np.bool)\n    x = np.zeros(len(label), dtype=np.bool)\n    for (i, char) in enumerate(label):\n        if (char == 'X'):\n            x[((- i) - 1)] = True\n        elif (char == 'Z'):\n            z[((- i) - 1)] = True\n        elif (char == 'Y'):\n            z[((- i) - 1)] = True\n            x[((- i) - 1)] = True\n        elif (char != 'I'):\n            raise QiskitError(\"Pauli string must be only consisted of 'I', 'X', 'Y' or 'Z' but you have {}.\".format(char))\n    return cls(z=z, x=x)", "docstring": "r\"\"\"Take pauli string to construct pauli.\n\nThe qubit index of pauli label is q_{n-1} ... q_0.\nE.g., a pauli is $P_{n-1} \\otimes ... \\otimes P_0$\n\nArgs:\nlabel (str): pauli label\n\nReturns:\nPauli: the constructed pauli\n\nRaises:\nQiskitError: invalid character in the label", "source": "codesearchnet"}
{"code": "def GetName(self):\n    if (self.AssetType == AssetType.GoverningToken):\n        return 'NEO'\n    elif (self.AssetType == AssetType.UtilityToken):\n        return 'NEOGas'\n    if (type(self.Name) is bytes):\n        return self.Name.decode('utf-8')\n    return self.Name", "docstring": "Get the asset name based on its type.\n\nReturns:\nstr: 'NEO' or 'NEOGas'", "source": "codesearchnet"}
{"code": "def box(self, x0, y0, width, height):\n        \n        assert width > 1\n        assert height > 1\n\n        width -= 1\n        height -= 1\n\n        for x in range(x0, x0 + width):\n            self.point(x, y0, \"-\")\n            self.point(x, y0 + height, \"-\")\n\n        for y in range(y0, y0 + height):\n            self.point(x0, y, \"|\")\n            self.point(x0 + width, y, \"|\")\n\n        self.point(x0, y0, \"+\")\n        self.point(x0 + width, y0, \"+\")\n        self.point(x0, y0 + height, \"+\")\n        self.point(x0 + width, y0 + height, \"+\")", "docstring": "Create a box on ASCII canvas.\n\nArgs:\nx0 (int): x coordinate of the box corner.\ny0 (int): y coordinate of the box corner.\nwidth (int): box width.\nheight (int): box height.", "source": "juraj-google-style"}
{"code": "def Process(self, parser_mediator, root_item=None, **kwargs):\n    \n    \n    super(SummaryInformationOLECFPlugin, self).Process(\n        parser_mediator, **kwargs)\n\n    if not root_item:\n      raise ValueError('Root item not set.')\n\n    root_creation_time, root_modification_time = self._GetTimestamps(root_item)\n\n    for item_name in self.REQUIRED_ITEMS:\n      item = root_item.get_sub_item_by_name(item_name)\n      if not item:\n        continue\n\n      summary_information = OLECFSummaryInformation(item)\n      event_data = summary_information.GetEventData(\n          data_type='olecf:summary_info')\n      event_data.name = 'Summary Information'\n\n      for property_name, date_time in iter(\n          summary_information.date_time_properties.items()):\n        date_time_description = self._DATE_TIME_DESCRIPTIONS.get(\n            property_name, definitions.TIME_DESCRIPTION_UNKNOWN)\n        event = OLECFSummaryInformationEvent(date_time, date_time_description)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      if root_creation_time:\n        date_time = dfdatetime_filetime.Filetime(\n            timestamp=root_creation_time)\n        event = OLECFSummaryInformationEvent(\n            date_time, definitions.TIME_DESCRIPTION_CREATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      if root_modification_time:\n        date_time = dfdatetime_filetime.Filetime(\n            timestamp=root_modification_time)\n        event = OLECFSummaryInformationEvent(\n            date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a summary information OLECF item.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nroot_item (Optional[pyolecf.item]): root item of the OLECF file.\n\nRaises:\nValueError: If the root item is not set.", "source": "juraj-google-style"}
{"code": "def rollaxis(a, axis, start=0):\n    \n    if isinstance(a, np.ndarray):\n        return np.rollaxis(a, axis, start)\n    if axis not in range(a.ndim):\n        raise ValueError(\n                'rollaxis: axis (%d) must be >=0 and < %d' % (axis, a.ndim))\n    if start not in range(a.ndim + 1):\n        raise ValueError(\n                'rollaxis: start (%d) must be >=0 and < %d' % (axis, a.ndim+1))\n    axes = list(range(a.ndim))\n    axes.remove(axis)\n    axes.insert(start, axis)\n    return transpose(a, axes)", "docstring": "Roll the specified axis backwards, until it lies in a given position.\n\nArgs:\na (array_like): Input array.\naxis (int): The axis to roll backwards.  The positions of the other axes\ndo not change relative to one another.\nstart (int, optional): The axis is rolled until it lies before this\nposition.  The default, 0, results in a \"complete\" roll.\n\nReturns:\nres (ndarray)", "source": "juraj-google-style"}
{"code": "def symm_group_cubic(mat):\n    \n    sym_group = np.zeros([24, 3, 3])\n    sym_group[0, :] = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n    sym_group[1, :] = [[1, 0, 0], [0, -1, 0], [0, 0, -1]]\n    sym_group[2, :] = [[-1, 0, 0], [0, 1, 0], [0, 0, -1]]\n    sym_group[3, :] = [[-1, 0, 0], [0, -1, 0], [0, 0, 1]]\n    sym_group[4, :] = [[0, -1, 0], [-1, 0, 0], [0, 0, -1]]\n    sym_group[5, :] = [[0, -1, 0], [1, 0, 0], [0, 0, 1]]\n    sym_group[6, :] = [[0, 1, 0], [-1, 0, 0], [0, 0, 1]]\n    sym_group[7, :] = [[0, 1, 0], [1, 0, 0], [0, 0, -1]]\n    sym_group[8, :] = [[-1, 0, 0], [0, 0, -1], [0, -1, 0]]\n    sym_group[9, :] = [[-1, 0, 0], [0, 0, 1], [0, 1, 0]]\n    sym_group[10, :] = [[1, 0, 0], [0, 0, -1], [0, 1, 0]]\n    sym_group[11, :] = [[1, 0, 0], [0, 0, 1], [0, -1, 0]]\n    sym_group[12, :] = [[0, 1, 0], [0, 0, 1], [1, 0, 0]]\n    sym_group[13, :] = [[0, 1, 0], [0, 0, -1], [-1, 0, 0]]\n    sym_group[14, :] = [[0, -1, 0], [0, 0, 1], [-1, 0, 0]]\n    sym_group[15, :] = [[0, -1, 0], [0, 0, -1], [1, 0, 0]]\n    sym_group[16, :] = [[0, 0, 1], [1, 0, 0], [0, 1, 0]]\n    sym_group[17, :] = [[0, 0, 1], [-1, 0, 0], [0, -1, 0]]\n    sym_group[18, :] = [[0, 0, -1], [1, 0, 0], [0, -1, 0]]\n    sym_group[19, :] = [[0, 0, -1], [-1, 0, 0], [0, 1, 0]]\n    sym_group[20, :] = [[0, 0, -1], [0, -1, 0], [-1, 0, 0]]\n    sym_group[21, :] = [[0, 0, -1], [0, 1, 0], [1, 0, 0]]\n    sym_group[22, :] = [[0, 0, 1], [0, -1, 0], [1, 0, 0]]\n    sym_group[23, :] = [[0, 0, 1], [0, 1, 0], [-1, 0, 0]]\n\n    mat = np.atleast_2d(mat)\n    all_vectors = []\n    for sym in sym_group:\n        for vec in mat:\n            all_vectors.append(np.dot(sym, vec))\n    return np.unique(np.array(all_vectors), axis=0)", "docstring": "obtain cubic symmetric eqivalents of the list of vectors.\n\nArgs:\nmatrix (lattice matrix, n by 3 array/matrix)\n\nReturn:\ncubic symmetric eqivalents of the list of vectors.", "source": "juraj-google-style"}
{"code": "def __init__(self, line, line_num=-1, var_map=None):\n    \n    self.match = ''\n    self.regex = ''\n    self.regex_obj = None\n    self.line_op = ''              \n    self.record_op = ''            \n    self.new_state = ''            \n    self.line_num = line_num\n\n    line = line.strip()\n    if not line:\n      raise TextFSMTemplateError('Null data in FSMRule. Line: %s'\n                                 % self.line_num)\n\n    \n    match_action = self.MATCH_ACTION.match(line)\n    if match_action:\n      self.match = match_action.group('match')\n    else:\n      self.match = line\n\n    \n    self.regex = self.match\n    if var_map:\n      try:\n        self.regex = string.Template(self.match).substitute(var_map)\n      except (ValueError, KeyError):\n        raise TextFSMTemplateError(\n            \"Duplicate or invalid variable substitution: '%s'. Line: %s.\" %\n            (self.match, self.line_num))\n\n    try:\n      \n      self.regex_obj = CopyableRegexObject(self.regex)\n    except re.error:\n      raise TextFSMTemplateError(\n          \"Invalid regular expression: '%s'. Line: %s.\" %\n          (self.regex, self.line_num))\n\n    \n    if not match_action:\n      return\n\n    \n    action_re = self.ACTION_RE.match(match_action.group('action'))\n    if not action_re:\n      \n      action_re = self.ACTION2_RE.match(match_action.group('action'))\n      if not action_re:\n        \n        action_re = self.ACTION3_RE.match(match_action.group('action'))\n        if not action_re:\n          \n          raise TextFSMTemplateError(\"Badly formatted rule '%s'. Line: %s.\" %\n                                     (line, self.line_num))\n\n    \n    if 'ln_op' in action_re.groupdict() and action_re.group('ln_op'):\n      self.line_op = action_re.group('ln_op')\n\n    \n    if 'rec_op' in action_re.groupdict() and action_re.group('rec_op'):\n      self.record_op = action_re.group('rec_op')\n\n    \n    if 'new_state' in action_re.groupdict() and action_re.group('new_state'):\n      self.new_state = action_re.group('new_state')\n\n    \n    \n    \n    if self.line_op == 'Continue' and self.new_state:\n      raise TextFSMTemplateError(\n          \"Action '%s' with new state %s specified. Line: %s.\"\n          % (self.line_op, self.new_state, self.line_num))\n\n    \n    if self.line_op != 'Error' and self.new_state:\n      if not re.match(r'\\w+', self.new_state):\n        raise TextFSMTemplateError(\n            'Alphanumeric characters only in state names. Line: %s.'\n            % (self.line_num))", "docstring": "Initialise a new rule object.\n\nArgs:\nline: (str), a template rule line to parse.\nline_num: (int), Optional line reference included in error reporting.\nvar_map: Map for template (${var}) substitutions.\n\nRaises:\nTextFSMTemplateError: If 'line' is not a valid format for a Value entry.", "source": "juraj-google-style"}
{"code": "def __init__(self, dataset, class_weight=None, distribution=None):\n    from keras.src.utils.module_utils import tensorflow as tf\n    if not isinstance(dataset, (tf.data.Dataset, tf.distribute.DistributedDataset)):\n        raise ValueError(f'Expected argument `dataset` to be a tf.data.Dataset. Received: {dataset}')\n    if class_weight is not None:\n        dataset = dataset.map(make_class_weight_map_fn(class_weight)).prefetch(tf.data.AUTOTUNE)\n    if distribution is not None:\n        dataset = distribution.distribute_dataset(dataset)\n    self._dataset = dataset", "docstring": "Initialize the TFDatasetAdapter.\n\nArgs:\ndataset: The input `tf.data.Dataset` instance.\nclass_weight: A map where the keys are integer class ids and values\nare the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`.\ndistribution: A `keras.distribution.Distribution` instance. Used to\nshard the input dataset into per worker/process dataset\ninstance.", "source": "github-repos"}
{"code": "def increase_volume(percentage):\n    if ((percentage > 100) or (percentage < 0)):\n        raise ValueError('percentage must be an integer between 0 and 100')\n    if (system.get_name() == 'windows'):\n        pass\n    elif (system.get_name() == 'mac'):\n        volume_int = (percentage / 10)\n        old_volume = get()\n        new_volume = (old_volume + volume_int)\n        if (new_volume > 10):\n            new_volume = 10\n        set_volume((new_volume * 10))\n    else:\n        formatted = ('%d%%+' % percentage)\n        sp.Popen(['amixer', '--quiet', 'sset', 'Master', formatted]).wait()", "docstring": "Increase the volume.\n\nIncrease the volume by a given percentage.\n\nArgs:\npercentage (int): The percentage (as an integer between 0 and 100) to increase the volume by.\n\nRaises:\nValueError: if the percentage is >100 or <0.", "source": "codesearchnet"}
{"code": "def filesizes(images):\n    \n    \n    while True:\n        img = yield marv.pull(images)\n        if img is None:\n            break\n        yield marv.push(img.size)", "docstring": "Stat filesize of files.\n\nArgs:\nimages: stream of marv image files\n\nReturns:\nStream of filesizes", "source": "juraj-google-style"}
{"code": "def _print_results(file, status):\n        \n\n        file_color = c.Fore.GREEN\n        status_color = c.Fore.RED\n        if status == 'Success':\n            status_color = c.Fore.GREEN\n        elif status == 'Skipped':\n            status_color = c.Fore.YELLOW\n        print(\n            '{}{!s:<13}{}{!s:<35}{}{!s:<8}{}{}'.format(\n                c.Fore.CYAN,\n                'Downloading:',\n                file_color,\n                file,\n                c.Fore.CYAN,\n                'Status:',\n                status_color,\n                status,\n            )\n        )", "docstring": "Print the download results.\n\nArgs:\nfile (str): The filename.\nstatus (str): The file download status.", "source": "juraj-google-style"}
{"code": "def exception(self, timeout=None):\n    if (not self._completed.wait(timeout=timeout)):\n        raise exceptions.TimeoutError('Timed out waiting for result.')\n    if (self._result != self._SENTINEL):\n        return None\n    return self._exception", "docstring": "Return the exception raised by the call, if any.\n\nThis blocks until the message has successfully been published, and\nreturns the exception. If the call succeeded, return None.\n\nArgs:\ntimeout (Union[int, float]): The number of seconds before this call\ntimes out and raises TimeoutError.\n\nRaises:\nTimeoutError: If the request times out.\n\nReturns:\nException: The exception raised by the call, if any.", "source": "codesearchnet"}
{"code": "def summarize_dist_params(dist, name, name_scope=\"dist_params\"):\n  \n  with tf.compat.v1.name_scope(name_scope):\n    tf.compat.v2.summary.histogram(\n        name=\"{}/{}\".format(name, \"mean\"),\n        data=dist.mean(),\n        step=tf.compat.v1.train.get_or_create_global_step())\n    tf.compat.v2.summary.histogram(\n        name=\"{}/{}\".format(name, \"stddev\"),\n        data=dist.stddev(),\n        step=tf.compat.v1.train.get_or_create_global_step())", "docstring": "Summarize the parameters of a distribution.\n\nArgs:\ndist: A Distribution object with mean and standard deviation\nparameters.\nname: The name of the distribution.\nname_scope: The name scope of this summary.", "source": "juraj-google-style"}
{"code": "def exists(self, filename):\n    result = True\n    for repo in self._children:\n        if (not repo.exists(filename)):\n            result = False\n    return result", "docstring": "Report whether a file exists on all distribution points.\n\nDetermines file type by extension.\n\nArgs:\nfilename: Filename you wish to check. (No path! e.g.:\n\"AdobeFlashPlayer-14.0.0.176.pkg\")\n\nReturns:\nBoolean", "source": "codesearchnet"}
{"code": "def _set_mtu_to_nics(self, conf):\n    for (dom_name, dom_spec) in conf.get('domains', {}).items():\n        for (idx, nic) in enumerate(dom_spec.get('nics', [])):\n            net = self._get_net(conf, dom_name, nic)\n            mtu = net.get('mtu', 1500)\n            if (mtu != 1500):\n                nic['mtu'] = mtu", "docstring": "For all the nics of all the domains in the conf that have MTU set,\nsave the MTU on the NIC definition.\n\nArgs:\nconf (dict): Configuration spec to extract the domains from\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def transform_framerate(self, in_fps, out_fps):\n        \n        if in_fps <= 0 or out_fps <= 0:\n            raise ValueError(\"Framerates must be positive, cannot transform %f -> %f\" % (in_fps, out_fps))\n\n        ratio = in_fps / out_fps\n        for line in self:\n            line.start = int(round(line.start * ratio))\n            line.end = int(round(line.end * ratio))", "docstring": "Rescale all timestamps by ratio of in_fps/out_fps.\n\nCan be used to fix files converted from frame-based to time-based\nwith wrongly assumed framerate.\n\nArguments:\nin_fps (float)\nout_fps (float)\n\nRaises:\nValueError: Non-positive framerate given.", "source": "juraj-google-style"}
{"code": "def AssertIterableType(iterable, expected_item_type):\n    if isinstance(iterable, collections.Iterator):\n        message = 'Expected iterable container but got iterator `%s` instead'\n        message %= iterable\n        raise TypeError(message)\n    AssertType(iterable, collections.Iterable)\n    for item in iterable:\n        AssertType(item, expected_item_type)", "docstring": "Ensures that given iterable container has certain type.\n\nArgs:\niterable: An iterable container to assert the type for.\nexpected_item_type: An expected type of the container items.\n\nRaises:\nTypeError: If given container does is not an iterable or its items do not\nhave the expected type.", "source": "codesearchnet"}
{"code": "def builtin(cls, name):\n    names = {'nsdoe': LEGEND__NSDOE, 'canstrat': LEGEND__Canstrat, 'nagmdm__6_2': LEGEND__NAGMDM__6_2, 'nagmdm__6_1': LEGEND__NAGMDM__6_1, 'nagmdm__4_3': LEGEND__NAGMDM__4_3, 'sgmc': LEGEND__SGMC}\n    return cls.from_csv(text=names[name.lower()])", "docstring": "Generate a default legend.\n\nArgs:\nname (str): The name of the legend you want. Not case sensitive.\n'nsdoe': Nova Scotia Dept. of Energy\n'canstrat': Canstrat\n'nagmdm__6_2': USGS N. Am. Geol. Map Data Model 6.2\n'nagmdm__6_1': USGS N. Am. Geol. Map Data Model 6.1\n'nagmdm__4_3': USGS N. Am. Geol. Map Data Model 4.3\n'sgmc': USGS State Geologic Map Compilation\n\nDefault 'nagmdm__6_2'.\n\nReturns:\nLegend: The legend stored in `defaults.py`.", "source": "codesearchnet"}
{"code": "def GetAttributeValuesString(self):\n    attributes = []\n    for (attribute_name, attribute_value) in sorted(self.__dict__.items()):\n        if ((attribute_name[0] == '_') or (attribute_value is None)):\n            continue\n        if isinstance(attribute_value, dict):\n            attribute_value = sorted(attribute_value.items())\n        elif isinstance(attribute_value, py2to3.BYTES_TYPE):\n            attribute_value = repr(attribute_value)\n        attribute_string = '{0:s}: {1!s}'.format(attribute_name, attribute_value)\n        attributes.append(attribute_string)\n    return ', '.join(attributes)", "docstring": "Retrieves a comparable string of the attribute values.\n\nReturns:\nstr: comparable string of the attribute values.", "source": "codesearchnet"}
{"code": "def prepare_for_translation(localization_bundle_path):\n    logging.info('Preparing for translation..')\n    for strings_file in os.listdir(os.path.join(localization_bundle_path, DEFAULT_LANGUAGE_DIRECTORY_NAME)):\n        if (not strings_file.endswith('.strings')):\n            continue\n        strings_path = os.path.join(localization_bundle_path, DEFAULT_LANGUAGE_DIRECTORY_NAME, strings_file)\n        for lang_dir in os.listdir(localization_bundle_path):\n            if ((lang_dir == DEFAULT_LANGUAGE_DIRECTORY_NAME) or lang_dir.startswith('.')):\n                continue\n            dest_strings_path = os.path.join(localization_bundle_path, lang_dir, strings_file)\n            pending_path = (dest_strings_path + '.pending')\n            excluded_path = (dest_strings_path + '.excluded')\n            if (not os.path.exists(dest_strings_path)):\n                open_strings_file(dest_strings_path, 'a').close()\n            logging.info('Preparing diff for %s in %s', lang_dir, pending_path)\n            localization_diff(strings_path, dest_strings_path, excluded_path, pending_path)", "docstring": "Prepares the localization bundle for translation.\n\nThis means, after creating the strings files using genstrings.sh, this will produce '.pending' files, that contain\nthe files that are yet to be translated.\n\nArgs:\nlocalization_bundle_path (str): The path to the localization bundle.", "source": "codesearchnet"}
{"code": "def __call__(self, environ, start_response):\n        \n        start_response('200 OK', [('Content-type', 'text/plain')])\n        self.last_request_uri = wsgiref.util.request_uri(environ)\n        return [self._success_message.encode('utf-8')]", "docstring": "WSGI Callable.\n\nArgs:\nenviron (Mapping[str, Any]): The WSGI environment.\nstart_response (Callable[str, list]): The WSGI start_response\ncallable.\n\nReturns:\nIterable[bytes]: The response body.", "source": "juraj-google-style"}
{"code": "def flipcheck(content):\n    \n\n    \n    punct = \n    tamperdict = str.maketrans('', '', punct)\n    tamperproof = content.translate(tamperdict)\n\n    \n    if \"(╯°□°）╯︵\" in tamperproof:\n        \n        if \"┻┻\" in tamperproof:\n            \n            length = 0\n            for letter in content:\n                if letter == \"━\":\n                    length += 1.36\n                elif letter == \"─\":\n                    length += 1\n                elif letter == \"-\":\n                    length += 0.50\n\n            \n            putitback = \"┬\"\n\n            for i in range(int(length)):\n                putitback += \"─\"\n\n            putitback += \"┬﻿ ノ( ゜-゜ノ)\"\n\n            return putitback\n\n        \n        else:\n            \n            flipdict = str.maketrans(\n                'abcdefghijklmnopqrstuvwxyzɐqɔpǝɟbɥıظʞןɯuodbɹsʇnʌʍxʎz😅🙃😞😟😠😡☹🙁😱😨😰😦😧😢😓😥😭',\n                'ɐqɔpǝɟbɥıظʞןɯuodbɹsʇnʌʍxʎzabcdefghijklmnopqrstuvwxyz😄🙂🙂🙂🙂🙂🙂😀😀🙂😄🙂🙂😄😄😄😁'\n            )\n\n            \n            flipstart = content.index('︵')\n            flipped = content[flipstart+1:]\n            flipped = str.lower(flipped).translate(flipdict)\n\n            putitback = ''.join(list(reversed(list(flipped))))\n\n            putitback += \"ノ( ゜-゜ノ)\"\n\n            return putitback\n    else:\n        return False", "docstring": "Checks a string for anger and soothes said anger\n\nArgs:\ncontent (str): The message to be flipchecked\n\nReturns:\nputitback (str): The righted table or text", "source": "juraj-google-style"}
{"code": "def get_metalpdb_info(metalpdb_lig_file):\n    \n\n    pdb_metals = ['CU', 'ZN', 'MN', 'FE', 'MG', 'CO', 'SE', 'YB', 'SF4', 'FES', 'F3S', 'NI', 'FE2']\n\n    \n    coordination_number = 0\n    endogenous_ligands = []\n    exogenous_ligands = []\n\n    \n    ss = StructProp(ident='metalpdb', structure_path=metalpdb_lig_file, file_type='pdb')\n\n    \n    chain_id = op.basename(metalpdb_lig_file)[5]\n    metal_id = (op.basename(metalpdb_lig_file).split('_')[2], op.basename(metalpdb_lig_file).split('_')[3])\n\n    for r in ss.parse_structure().first_model.get_residues():\n        return_id = (r.get_id(), r.get_resname())\n        \n        \n        \n        if r.get_id()[0] != ' ':\n            if not r.resname.strip() in pdb_metals and r.resname != 'HOH':\n                \n                exogenous_ligands.append(return_id)\n        else:\n            endogenous_ligands.append(return_id)\n\n        \n        for a in r.get_atom():\n            if not a.element in pdb_metals:\n                coordination_number += 1\n\n    infodict = {metal_id: {'endogenous_ligands' : endogenous_ligands,\n                           'exogenous_ligands'  : exogenous_ligands,\n                           'coordination_number': coordination_number}}\n\n    return chain_id, infodict", "docstring": "Parse a MetalPDB .lig file and return a tuple of the chain ID it represents, along with metal binding information.\n\nArgs:\nmetalpdb_lig_file (str): Path to .lig file\n\nReturns:\ntuple: (str, dict) of the chain ID and the parsed metal binding site information", "source": "juraj-google-style"}
{"code": "def create_html_from_fragment(tag):\n    try:\n        assert isinstance(tag, bs4.element.Tag)\n    except AssertionError:\n        raise TypeError\n    try:\n        assert (tag.find_all('body') == [])\n    except AssertionError:\n        raise ValueError\n    soup = BeautifulSoup('<html><head></head><body></body></html>', 'html.parser')\n    soup.body.append(tag)\n    return soup", "docstring": "Creates full html tree from a fragment. Assumes that tag should be wrapped in a body and is currently not\n\nArgs:\ntag: a bs4.element.Tag\n\nReturns:\"\nbs4.element.Tag: A bs4 tag representing a full html document", "source": "codesearchnet"}
{"code": "def repository_blob(self, sha, **kwargs):\n        \n\n        path = '/projects/%s/repository/blobs/%s' % (self.get_id(), sha)\n        return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "Return a file by blob SHA.\n\nArgs:\nsha(str): ID of the blob\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the server failed to perform the request\n\nReturns:\ndict: The blob content and metadata", "source": "juraj-google-style"}
{"code": "def add_unref(self, timestamp: int) -> None:\n    self._unref_times.append(timestamp)", "docstring": "Adds an unref to this tensor with the specified timestamp.\n\nArgs:\ntimestamp:  Timestamp of object unreference as an integer.", "source": "github-repos"}
{"code": "def run_scratch(self, path_to_scratch, num_cores=1, outname=None, outdir=None, force_rerun=False):\n        \n        if not outname:\n            outname = self.project_name\n        if not outdir:\n            outdir = ''\n\n        outname = op.join(outdir, outname)\n\n        self.out_sspro = '{}.ss'.format(outname)\n        self.out_sspro8 = '{}.ss8'.format(outname)\n        self.out_accpro = '{}.acc'.format(outname)\n        self.out_accpro20 = '{}.acc20'.format(outname)\n\n        \n        ssbio.utils.command_runner(\n            shell_command='{} {} {} {}'.format(path_to_scratch, self.seq_file, outname, num_cores),\n            force_rerun_flag=force_rerun, outfile_checker='{}.ss'.format(outname))", "docstring": "Run SCRATCH on the sequence_file that was loaded into the class.\n\nArgs:\npath_to_scratch: Path to the SCRATCH executable, run_SCRATCH-1D_predictors.sh\noutname: Prefix to name the output files\noutdir: Directory to store the output files\nforce_rerun: Flag to force rerunning of SCRATCH even if the output files exist\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _post(self, url, data, scope):\n        \n        self._create_session(scope)\n        response = self.session.post(url, data=data)\n        return response.status_code, response.text", "docstring": "Make a POST request using the session object to a Degreed endpoint.\n\nArgs:\nurl (str): The url to send a POST request to.\ndata (str): The json encoded payload to POST.\nscope (str): Must be one of the scopes Degreed expects:\n- `CONTENT_PROVIDER_SCOPE`\n- `COMPLETION_PROVIDER_SCOPE`", "source": "juraj-google-style"}
{"code": "def extract_changelog_items(text, tags):\n    \n    \n\n    patterns = {x['header']: tag_re(x['tag']) for x in tags}\n    items = {x['header']: [] for x in tags}\n    curr_tag = None\n    curr_text = ''\n\n    for line in text.splitlines():\n        if not line.strip():\n            if curr_tag is not None:\n                items[curr_tag].append(curr_text)\n                curr_text = ''\n            curr_tag = None\n\n        for tag in tags:\n            m = patterns[tag['header']].match(line)\n            if m:\n                if curr_tag is not None:\n                    items[curr_tag].append(curr_text)\n                    curr_text = ''\n\n                curr_tag = tag['header']\n                line = m.group('text')\n                break\n\n        if curr_tag is not None:\n            curr_text = '{} {}'.format(curr_text.strip(), line.strip()).strip()\n\n    if curr_tag is not None:\n        items[curr_tag].append(curr_text)\n\n    return items", "docstring": "Extract all tagged items from text.\n\nArgs:\ntext (str):\nText to extract the tagged items from. Each tagged item is a\nparagraph that starts with a tag. It can also be a text list item.\n\nReturns:\ntuple[list[str], list[str], list[str]]:\nA tuple of `(features, changes, fixes)` extracted from the given\ntext.\n\nThe tagged items are usually features/changes/fixes but it can be configured\nthrough `pelconf.yaml`.", "source": "juraj-google-style"}
{"code": "def initialize_from_assignments(assignments, k, max_assign_weight=0.75):\n    cells = len(assignments)\n    init_W = np.zeros((k, cells))\n    for (i, a) in enumerate(assignments):\n        init_W[(a, i)] = max_assign_weight\n        for a2 in range(k):\n            if (a2 != a):\n                init_W[(a2, i)] = ((1 - max_assign_weight) / (k - 1))\n    return (init_W / init_W.sum(0))", "docstring": "Creates a weight initialization matrix from Poisson clustering assignments.\n\nArgs:\nassignments (array): 1D array of integers, of length cells\nk (int): number of states/clusters\nmax_assign_weight (float, optional): between 0 and 1 - how much weight to assign to the highest cluster. Default: 0.75\n\nReturns:\ninit_W (array): k x cells", "source": "codesearchnet"}
{"code": "def ParseContainersTable(self, parser_mediator, database=None, table=None, **unused_kwargs):\n    if (database is None):\n        raise ValueError('Missing database value.')\n    if (table is None):\n        raise ValueError('Missing table value.')\n    for esedb_record in table.records:\n        if parser_mediator.abort:\n            break\n        record_values = self._GetRecordValues(parser_mediator, table.name, esedb_record)\n        event_data = MsieWebCacheContainersEventData()\n        event_data.container_identifier = record_values.get('ContainerId', None)\n        event_data.directory = record_values.get('Directory', None)\n        event_data.name = record_values.get('Name', None)\n        event_data.set_identifier = record_values.get('SetId', None)\n        timestamp = record_values.get('LastScavengeTime', None)\n        if timestamp:\n            date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n            event = time_events.DateTimeValuesEvent(date_time, 'Last Scavenge Time')\n            parser_mediator.ProduceEventWithEventData(event, event_data)\n        timestamp = record_values.get('LastAccessTime', None)\n        if timestamp:\n            date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n            event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)\n            parser_mediator.ProduceEventWithEventData(event, event_data)\n        container_identifier = record_values.get('ContainerId', None)\n        container_name = record_values.get('Name', None)\n        if ((not container_identifier) or (not container_name)):\n            continue\n        table_name = 'Container_{0:d}'.format(container_identifier)\n        esedb_table = database.get_table_by_name(table_name)\n        if (not esedb_table):\n            parser_mediator.ProduceExtractionWarning('Missing table: {0:s}'.format(table_name))\n            continue\n        self._ParseContainerTable(parser_mediator, esedb_table, container_name)", "docstring": "Parses the Containers table.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ndatabase (Optional[pyesedb.file]): ESE database.\ntable (Optional[pyesedb.table]): table.\n\nRaises:\nValueError: if the database or table value is missing.", "source": "codesearchnet"}
{"code": "def profile_update(self, profile):\n    if (profile.get('install_json') is None):\n        print('{}{}Missing install_json parameter for profile {}.'.format(c.Style.BRIGHT, c.Fore.YELLOW, profile.get('profile_name')))\n    self.profile_update_args_v2(profile)\n    self.profile_update_args_v3(profile)\n    self.profile_update_schema(profile)", "docstring": "Update an existing profile with new parameters or remove deprecated parameters.\n\nArgs:\nprofile (dict): The dictionary containting the profile settings.", "source": "codesearchnet"}
{"code": "def exponential(x):\n    return ops.exp(x)", "docstring": "Exponential activation function.\n\nArgs:\nx: Input tensor.", "source": "github-repos"}
{"code": "def ensure_dir_path(self, path, relative=False):\n        \n        if not relative:\n            rel_path = self.relpath(path)\n        else:\n            rel_path = path\n\n        \n        if self.is_locator(rel_path, relative=True):\n            path = path.rstrip('/')\n\n        \n        elif rel_path:\n            path = path.rstrip('/') + '/'\n        \n        return path", "docstring": "Ensure the path is a dir path.\n\nShould end with '/' except for schemes and locators.\n\nArgs:\npath (str): Path or URL.\nrelative (bool): Path is relative to current root.\n\nReturns:\npath: dir path", "source": "juraj-google-style"}
{"code": "def get_items_for_config_file_output(self, source_to_settings,\n                                         parsed_namespace):\n        \n        config_file_items = OrderedDict()\n        for source, settings in source_to_settings.items():\n            if source == _COMMAND_LINE_SOURCE_KEY:\n                _, existing_command_line_args = settings['']\n                for action in self._actions:\n                    config_file_keys = self.get_possible_config_keys(action)\n                    if config_file_keys and not action.is_positional_arg and \\\n                        already_on_command_line(existing_command_line_args,\n                                                action.option_strings):\n                        value = getattr(parsed_namespace, action.dest, None)\n                        if value is not None:\n                            if isinstance(value, bool):\n                                value = str(value).lower()\n                            config_file_items[config_file_keys[0]] = value\n\n            elif source == _ENV_VAR_SOURCE_KEY:\n                for key, (action, value) in settings.items():\n                    config_file_keys = self.get_possible_config_keys(action)\n                    if config_file_keys:\n                        value = getattr(parsed_namespace, action.dest, None)\n                        if value is not None:\n                            config_file_items[config_file_keys[0]] = value\n            elif source.startswith(_CONFIG_FILE_SOURCE_KEY):\n                for key, (action, value) in settings.items():\n                    config_file_items[key] = value\n            elif source == _DEFAULTS_SOURCE_KEY:\n                for key, (action, value) in settings.items():\n                    config_file_keys = self.get_possible_config_keys(action)\n                    if config_file_keys:\n                        value = getattr(parsed_namespace, action.dest, None)\n                        if value is not None:\n                            config_file_items[config_file_keys[0]] = value\n        return config_file_items", "docstring": "Converts the given settings back to a dictionary that can be passed\nto ConfigFormatParser.serialize(..).\n\nArgs:\nsource_to_settings: the dictionary described in parse_known_args()\nparsed_namespace: namespace object created within parse_known_args()\nReturns:\nan OrderedDict where keys are strings and values are either strings\nor lists", "source": "juraj-google-style"}
{"code": "def save(tiff_filename, numpy_data):\n    \n    \n    tiff_filename = os.path.expanduser(tiff_filename)\n\n    if type(numpy_data) is str:\n        fp = open(png_filename, \"wb\")\n        fp.write(numpy_data)\n        fp.close()\n        return png_filename\n\n    try:\n        img = tiff.imsave(tiff_filename, numpy_data)\n    except Exception as e:\n        raise ValueError(\"Could not save TIFF file {0}.\".format(tiff_filename))\n\n    return tiff_filename", "docstring": "Export a numpy array to a TIFF file.\n\nArguments:\ntiff_filename:  A filename to which to save the TIFF data\nnumpy_data:     The numpy array to save to TIFF\n\nReturns:\nString. The expanded filename that now holds the TIFF data", "source": "juraj-google-style"}
{"code": "def _ReadParserPresetValues(self, preset_definition_values):\n    if (not preset_definition_values):\n        raise errors.MalformedPresetError('Missing preset definition values.')\n    name = preset_definition_values.get('name', None)\n    if (not name):\n        raise errors.MalformedPresetError('Invalid preset definition missing name.')\n    parsers = preset_definition_values.get('parsers', None)\n    if (not parsers):\n        raise errors.MalformedPresetError('Invalid preset definition missing parsers.')\n    parser_preset = ParserPreset(name, parsers)\n    for operating_system_values in preset_definition_values.get('operating_systems', []):\n        operating_system = self._ReadOperatingSystemArtifactValues(operating_system_values)\n        parser_preset.operating_systems.append(operating_system)\n    return parser_preset", "docstring": "Reads a parser preset from a dictionary.\n\nArgs:\npreset_definition_values (dict[str, object]): preset definition values.\n\nReturns:\nParserPreset: a parser preset.\n\nRaises:\nMalformedPresetError: if the format of the preset definition is not set\nor incorrect, or the preset of a specific operating system has already\nbeen set.", "source": "codesearchnet"}
{"code": "def compute_inv_covariance(L_aug, Y, k, p):\n    return np.linalg.inv(compute_covariance(L_aug, Y, k, p))", "docstring": "Given label matrix L and labels Y, compute the covariance.\n\nArgs:\nL: (np.array) [n, d] The augmented (indicator) label matrix\nY: (np.array int) [n] The true labels in {1,...,k}", "source": "codesearchnet"}
{"code": "def enhance_pubmed_annotations(pubmed: Mapping[str, Any]) -> Mapping[str, Any]:\n    \n\n    text = pubmed[\"title\"] + pubmed[\"abstract\"]\n\n    annotations = {}\n\n    for nsarg in pubmed[\"annotations\"]:\n        url = f'{config[\"bel_api\"][\"servers\"][\"api_url\"]}/terms/{url_path_param_quoting(nsarg)}'\n        log.info(f\"URL: {url}\")\n        r = get_url(url)\n        log.info(f\"Result: {r}\")\n        new_nsarg = \"\"\n        if r and r.status_code == 200:\n            term = r.json()\n            new_nsarg = bel_utils.convert_nsarg(term[\"id\"], decanonicalize=True)\n\n            pubmed[\"annotations\"][nsarg][\"name\"] = term[\"name\"]\n            pubmed[\"annotations\"][nsarg][\"label\"] = term[\"label\"]\n            pubmed[\"annotations\"][nsarg][\"entity_types\"] = list(\n                set(\n                    pubmed[\"annotations\"][nsarg][\"entity_types\"]\n                    + term.get(\"entity_types\", [])\n                )\n            )\n            pubmed[\"annotations\"][nsarg][\"annotation_types\"] = list(\n                set(\n                    pubmed[\"annotations\"][nsarg][\"annotation_types\"]\n                    + term.get(\"annotation_types\", [])\n                )\n            )\n\n        if new_nsarg != nsarg:\n            annotations[new_nsarg] = copy.deepcopy(pubmed[\"annotations\"][nsarg])\n        else:\n            annotations[nsarg] = copy.deepcopy(pubmed[\"annotations\"][nsarg])\n\n    for nsarg in annotations:\n        for idx, span in enumerate(annotations[nsarg][\"spans\"]):\n            string = text[span[\"begin\"] - 1 : span[\"end\"] - 1]\n            annotations[nsarg][\"spans\"][idx][\"text\"] = string\n\n    pubmed[\"annotations\"] = copy.deepcopy(annotations)\n\n    return pubmed", "docstring": "Enhance pubmed namespace IDs\n\nAdd additional entity and annotation types to annotations\nUse preferred id for namespaces as needed\nAdd strings from Title, Abstract matching Pubtator BioConcept spans\n\nNOTE - basically duplicated code with bel_api:api.services.pubmed\n\nArgs:\npubmed\n\nReturns:\npubmed object", "source": "juraj-google-style"}
{"code": "def set_server_def_retries(retries):\n    context().set_server_def_retries(retries)", "docstring": "Set the number of retries to use when calling SetServerDef.\n\nIn cases where many servers run in high-preemption environments, jobs could\nbe preempted during startup and initial connection via SetServerDef. Retries\nallow for more robust connection in these environments.\n\n\nArgs:\nretries: int specifying the number of connection retries before failing.\nRetries follow an exponential backoff waiting period with min value 1ms,\nmax value 10s, and exponent 1.3.", "source": "github-repos"}
{"code": "def create_sns_event(app_name, env, region, rules):\n    \n    session = boto3.Session(profile_name=env, region_name=region)\n    sns_client = session.client('sns')\n\n    topic_name = rules.get('topic')\n    lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region)\n    topic_arn = get_sns_topic_arn(topic_name=topic_name, account=env, region=region)\n    protocol = 'lambda'\n\n    statement_id = '{}_sns_{}'.format(app_name, topic_name)\n    principal = 'sns.amazonaws.com'\n    add_lambda_permissions(\n        function=lambda_alias_arn,\n        statement_id=statement_id,\n        action='lambda:InvokeFunction',\n        principal=principal,\n        source_arn=topic_arn,\n        env=env,\n        region=region)\n\n    sns_client.subscribe(TopicArn=topic_arn, Protocol=protocol, Endpoint=lambda_alias_arn)\n    LOG.debug(\"SNS Lambda event created\")\n\n    LOG.info(\"Created SNS event subscription on topic %s\", topic_name)", "docstring": "Create SNS lambda event from rules.\n\nArgs:\napp_name (str): name of the lambda function\nenv (str): Environment/Account for lambda function\nregion (str): AWS region of the lambda function\nrules (str): Trigger rules from the settings", "source": "juraj-google-style"}
{"code": "def phase_crossings(ts, phi=0.0):\n    ts = ts.squeeze()\n    if (ts.ndim is not 1):\n        raise ValueError('Currently can only use on single variable timeseries')\n    ts = mod2pi((ts - phi))\n    tsa = ts[0:(- 1)]\n    tsb = ts[1:]\n    p2 = (np.pi / 2)\n    zc = (np.nonzero((((((tsa > (- p2)) & (tsa < 0)) & (tsb >= 0)) & (tsb < p2)) | ((((tsa < p2) & (tsa > 0)) & (tsb <= 0)) & (tsb > (- p2)))))[0] + 1)\n    va = ts[(zc - 1)]\n    vb = ts[zc]\n    ct = (((np.abs(vb) * ts.tspan[(zc - 1)]) + (np.abs(va) * ts.tspan[zc])) / np.abs((vb - va)))\n    if (ts[0] == 0.0):\n        zc = np.r_[(np.array([0]), zc)]\n        ct = np.r_[(np.array([ts.tspan[0]]), ct)]\n    pc = (np.nonzero((((tsa > p2) & (tsb < (- p2))) | ((tsa < (- p2)) & (tsb > p2))))[0] + 1)\n    splice = np.searchsorted(pc, zc)\n    which_zc = np.r_[(np.array([0]), (np.nonzero((splice[0:(- 1)] - splice[1:]))[0] + 1))]\n    if (ct.shape[0] is 0):\n        return ct\n    else:\n        return ct[which_zc]", "docstring": "For a single variable timeseries representing the phase of an oscillator,\nfind the times at which the phase crosses angle phi,\nwith the condition that the phase must visit phi+pi between crossings.\n\n(Thus if noise causes the phase to wander back and forth across angle phi\nwithout the oscillator doing a full revolution, then this is recorded as\na single crossing event, giving the time of the earliest arrival.)\n\nIf the timeseries begins (or ends) exactly at phi, then time zero\n(or the ending time) is also included as a crossing event,\nso that the boundaries of the first and last oscillations are included.\n\nIf the actual crossing time falls between two time steps, linear\ninterpolation is used to estimate the crossing time.\n\nArguments:\nts: Timeseries (single variable)\nThe timeseries of an angle variable (radians)\n\nphi (float): Critical phase angle (radians) at which to report crossings.\n\nReturns:\narray of float", "source": "codesearchnet"}
{"code": "def get_processors(processor_cat, prop_defs, data_attr=None):\n    processor_defs = prop_defs.get(processor_cat, [])\n    processor_list = []\n    for processor in processor_defs:\n        proc_class = PropertyProcessor[processor['rdf_type'][0]]\n        processor_list.append(proc_class(processor.get('kds_params', [{}]), data_attr))\n    return processor_list", "docstring": "reads the prop defs and adds applicable processors for the property\n\nArgs:\nprocessor_cat(str): The category of processors to retreive\nprop_defs: property defintions as defined by the rdf defintions\ndata_attr: the attr to manipulate during processing.\n\nReturns:\nlist: a list of processors", "source": "codesearchnet"}
{"code": "def load(self, cellpy_file, parent_level=\"CellpyData\"):\n        \n\n        try:\n            self.logger.debug(\"loading cellpy-file (hdf5):\")\n            self.logger.debug(cellpy_file)\n            new_datasets = self._load_hdf5(cellpy_file, parent_level)\n            self.logger.debug(\"cellpy-file loaded\")\n        except AttributeError:\n            new_datasets = []\n            self.logger.warning(\"This cellpy-file version is not supported by\"\n                                \"current reader (try to update cellpy).\")\n\n        if new_datasets:\n            for dataset in new_datasets:\n                self.datasets.append(dataset)\n        else:\n            \n            self.logger.warning(\"Could not load\")\n            self.logger.warning(str(cellpy_file))\n\n        self.number_of_datasets = len(self.datasets)\n        self.status_datasets = self._validate_datasets()\n        self._invent_a_name(cellpy_file)\n        return self", "docstring": "Loads a cellpy file.\n\nArgs:\ncellpy_file (path, str): Full path to the cellpy file.\nparent_level (str, optional): Parent level", "source": "juraj-google-style"}
{"code": "def format_info(variant, variant_type='snv'):\n    \n    \n    observations = variant.get('observations',0)\n\n    homozygotes = variant.get('homozygote')\n    hemizygotes = variant.get('hemizygote')\n\n    \n    vcf_info = f\"Obs={observations}\"\n    if homozygotes:\n        vcf_info += f\";Hom={homozygotes}\"\n    if hemizygotes:\n        vcf_info += f\";Hem={hemizygotes}\"\n\n    \n    if variant_type == 'sv':\n        end = int((variant['end_left'] + variant['end_right'])/2)\n        \n        vcf_info += f\";SVTYPE={variant['sv_type']};END={end};SVLEN={variant['length']}\"\n\n    return vcf_info", "docstring": "Format the info field for SNV variants\n\nArgs:\nvariant(dict)\nvariant_type(str): snv or sv\n\nReturns:\nvcf_info(str): A VCF formated info field", "source": "juraj-google-style"}
{"code": "def default(self, obj):\n    if isinstance(obj, datetime):\n        return obj.isoformat()\n    if issubclass(obj.__class__, Enum.__class__):\n        return obj.value\n    to_json = getattr(obj, 'to_json', None)\n    if to_json:\n        out = obj.to_json()\n        if issubclass(obj.__class__, Model):\n            out.update({'__type': obj.__class__.__name__})\n        return out\n    return JSONEncoder.default(self, obj)", "docstring": "Default object encoder function\n\nArgs:\nobj (:obj:`Any`): Object to be serialized\n\nReturns:\nJSON string", "source": "codesearchnet"}
{"code": "def umask(self, new_mask):\n        \n        if not is_int_type(new_mask):\n            raise TypeError('an integer is required')\n        old_umask = self.filesystem.umask\n        self.filesystem.umask = new_mask\n        return old_umask", "docstring": "Change the current umask.\n\nArgs:\nnew_mask: (int) The new umask value.\n\nReturns:\nThe old umask.\n\nRaises:\nTypeError: if new_mask is of an invalid type.", "source": "juraj-google-style"}
{"code": "def write_int8(self, value, little_endian=True):\n        \n        if little_endian:\n            endian = \"<\"\n        else:\n            endian = \">\"\n        return self.pack('%sb' % endian, value)", "docstring": "Pack the value as a signed byte and write 1 byte to the stream.\n\nArgs:\nvalue:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint: the number of bytes written.", "source": "juraj-google-style"}
{"code": "def absent(name):\n    \n\n    ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}\n\n    \n    comment_bridge_deleted = 'Bridge {0} deleted.'.format(name)\n    comment_bridge_notdeleted = 'Unable to delete bridge: {0}.'.format(name)\n    comment_bridge_notexists = 'Bridge {0} does not exist.'.format(name)\n    changes_bridge_deleted = {name: {'old': 'Bridge {0} exists.'.format(name),\n                                     'new': 'Bridge {0} deleted.'.format(name),\n                                     }\n                              }\n\n    bridge_exists = __salt__['openvswitch.bridge_exists'](name)\n\n    \n    if __opts__['test']:\n        if not bridge_exists:\n            ret['result'] = True\n            ret['comment'] = comment_bridge_notexists\n        else:\n            ret['result'] = None\n            ret['comment'] = comment_bridge_deleted\n\n        return ret\n\n    if not bridge_exists:\n        ret['result'] = True\n        ret['comment'] = comment_bridge_notexists\n    else:\n        bridge_delete = __salt__['openvswitch.bridge_delete'](name)\n        if bridge_delete:\n            ret['result'] = True\n            ret['comment'] = comment_bridge_deleted\n            ret['changes'] = changes_bridge_deleted\n        else:\n            ret['result'] = False\n            ret['comment'] = comment_bridge_notdeleted\n\n    return ret", "docstring": "Ensures that the named bridge does not exist, eventually deletes it.\n\nArgs:\nname: The name of the bridge.", "source": "juraj-google-style"}
{"code": "def downsample(data, percent):\n    n_genes = data.shape[0]\n    n_cells = data.shape[1]\n    new_data = data.copy()\n    total_count = float(data.sum())\n    to_remove = (total_count * percent)\n    cell_sums = data.sum(0).astype(float)\n    cell_gene_probs = (data / cell_sums)\n    cell_probs = np.array((cell_sums / total_count)).flatten()\n    cells_selected = np.random.multinomial(to_remove, pvals=cell_probs)\n    for (i, num_selected) in enumerate(cells_selected):\n        cell_gene = np.array(cell_gene_probs[(:, i)]).flatten()\n        genes_selected = np.random.multinomial(num_selected, pvals=cell_gene)\n        if sparse.issparse(data):\n            genes_selected = sparse.csc_matrix(genes_selected).T\n        new_data[(:, i)] -= genes_selected\n    new_data[(new_data < 0)] = 0\n    return new_data", "docstring": "downsample the data by removing a given percentage of the reads.\n\nArgs:\ndata: genes x cells array or sparse matrix\npercent: float between 0 and 1", "source": "codesearchnet"}
{"code": "def read_header(self, file_handle, nextdata_offset=0):\n    header = {'FCS format': file_handle.read(6)}\n    file_handle.read(4)\n    for field in ('text start', 'text end', 'data start', 'data end', 'analysis start', 'analysis end'):\n        s = file_handle.read(8)\n        try:\n            field_value = int(s)\n        except ValueError:\n            field_value = 0\n        header[field] = (field_value + nextdata_offset)\n    for k in ('text start', 'text end'):\n        if (header[k] == 0):\n            raise ValueError(u'The FCS file \"{}\" seems corrupted. (Parser cannot locate information about the \"{}\" segment.)'.format(self.path, k))\n        elif (header[k] > self._file_size):\n            raise ValueError(u'The FCS file \"{}\" is corrupted. \"{}\" segment is larger than file size'.format(self.path, k))\n        else:\n            pass\n    self._data_start = header['data start']\n    self._data_end = header['data start']\n    if ((header['analysis end'] - header['analysis start']) != 0):\n        warnings.warn(u'There appears to be some information in the ANALYSIS segment of file {0}. However, it might not be read correctly.'.format(self.path))\n    self.annotation['__header__'] = header", "docstring": "Read the header of the FCS file.\n\nThe header specifies where the annotation, data and analysis are located inside the binary\nfile.\n\nArgs:\nfile_handle: buffer containing FCS file.\nnextdata_offset: byte offset of a set header from file start specified by $NEXTDATA", "source": "codesearchnet"}
{"code": "def _flatten_beam_dim(tensor):\n  \n  shape = _shape_list(tensor)\n  shape[0] *= shape[1]\n  shape.pop(1)  \n  return tf.reshape(tensor, shape)", "docstring": "Reshapes first two dimensions in to single dimension.\n\nArgs:\ntensor: Tensor to reshape of shape [A, B, ...]\n\nReturns:\nReshaped tensor of shape [A*B, ...]", "source": "juraj-google-style"}
{"code": "def update_hash(a_hash, mv):\n    \n    if mv.labels:\n        signing.add_dict_to_hash(a_hash, encoding.MessageToPyValue(mv.labels))\n    money_value = mv.get_assigned_value(u'moneyValue')\n    if money_value is not None:\n        a_hash.update(b'\\x00')\n        a_hash.update(money_value.currencyCode.encode('utf-8'))", "docstring": "Adds ``mv`` to ``a_hash``\n\nArgs:\na_hash (`Hash`): the secure hash, e.g created by hashlib.md5\nmv (:class:`MetricValue`): the instance to add to the hash", "source": "juraj-google-style"}
{"code": "def CheckTaskToMerge(self, task):\n    with self._lock:\n        is_abandoned = (task.identifier in self._tasks_abandoned)\n        is_processing = (task.identifier in self._tasks_processing)\n        is_queued = (task.identifier in self._tasks_queued)\n        if ((not is_queued) and (not is_processing) and (not is_abandoned)):\n            raise KeyError('Status of task {0:s} is unknown.'.format(task.identifier))\n        return (is_queued or is_processing or (is_abandoned and (not task.has_retry)))", "docstring": "Checks if the task should be merged.\n\nArgs:\ntask (Task): task.\n\nReturns:\nbool: True if the task should be merged.\n\nRaises:\nKeyError: if the task was not queued, processing or abandoned.", "source": "codesearchnet"}
{"code": "def timestamp(method='iso8601'):\n    \n    if method == 'iso8601':\n        \n        \n        \n        \n        tz_hour = time.timezone \n        utc_offset = str(tz_hour) if tz_hour < 0 else '+' + str(tz_hour)\n        stamp = time.strftime('%Y-%m-%dT%H%M%S') + utc_offset\n        return stamp\n    else:\n        raise ValueError('only iso8601 is accepted for now')", "docstring": "make an iso8601 timestamp\n\nArgs:\nmethod (str): type of timestamp\n\nExample:\n>>> stamp = timestamp()\n>>> print('stamp = {!r}'.format(stamp))\nstamp = ...-...-...T...", "source": "juraj-google-style"}
{"code": "def collapse_phenotypes(self, input_phenotype_labels, output_phenotype_label, verbose=True):\n    if isinstance(input_phenotype_labels, str):\n        input_phenotype_labels = [input_phenotype_labels]\n    bad_phenotypes = (set(input_phenotype_labels) - set(self.phenotypes))\n    if (len(bad_phenotypes) > 0):\n        raise ValueError((('Error phenotype(s) ' + str(bad_phenotypes)) + ' are not in the data.'))\n    data = self.copy()\n    if (len(input_phenotype_labels) == 0):\n        return data\n\n    def _swap_in(d, inputs, output):\n        overlap = set(d.keys()).intersection(inputs)\n        if (len(overlap) == 0):\n            return d\n        keepers = [(k, v) for (k, v) in d.items() if (k not in inputs)]\n        return dict((keepers + [(output_phenotype_label, max([d[x] for x in overlap]))]))\n    data['phenotype_calls'] = data.apply((lambda x: _swap_in(x['phenotype_calls'], input_phenotype_labels, output_phenotype_label)), 1)\n\n    def _set_label(d):\n        vals = [k for (k, v) in d.items() if (v == 1)]\n        return (np.nan if (len(vals) == 0) else vals[0])\n    data['phenotype_label'] = data.apply((lambda x: _set_label(x['phenotype_calls'])), 1)\n    return data", "docstring": "Rename one or more input phenotypes to a single output phenotype\n\nArgs:\ninput_phenotype_labels (list): A str name or list of names to combine\noutput_phenotype_label (list): A str name to change the phenotype names to\nverbose (bool): output more details\n\nReturns:\nCellDataFrame: The CellDataFrame modified.", "source": "codesearchnet"}
{"code": "def track_metric(self, name, value, type=None, count=None, min=None, max=None, std_dev=None, properties=None):\n    dataPoint = channel.contracts.DataPoint()\n    dataPoint.name = (name or NULL_CONSTANT_STRING)\n    dataPoint.value = (value or 0)\n    dataPoint.kind = (type or channel.contracts.DataPointType.aggregation)\n    dataPoint.count = count\n    dataPoint.min = min\n    dataPoint.max = max\n    dataPoint.std_dev = std_dev\n    data = channel.contracts.MetricData()\n    data.metrics.append(dataPoint)\n    if properties:\n        data.properties = properties\n    self.track(data, self._context)", "docstring": "Send information about a single metric data point that was captured for the application.\n\nArgs:\nname (str). the name of the metric that was captured.\\n\nvalue (float). the value of the metric that was captured.\\n\ntype (:class:`channel.contracts.DataPointType`). the type of the metric. (defaults to: :func:`channel.contracts.DataPointType.aggregation`)\\n\ncount (int). the number of metrics that were aggregated into this data point. (defaults to: None)\\n\nmin (float). the minimum of all metrics collected that were aggregated into this data point. (defaults to: None)\\n\nmax (float). the maximum of all metrics collected that were aggregated into this data point. (defaults to: None)\\n\nstd_dev (float). the standard deviation of all metrics collected that were aggregated into this data point. (defaults to: None)\\n\nproperties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)", "source": "codesearchnet"}
{"code": "def get_cytoband_coord(chrom, pos):\n    \n    chrom = chrom.strip('chr')\n    pos = int(pos)\n    result = None\n    logger.debug(\"Finding Cytoband for chrom:{0} pos:{1}\".format(chrom, pos))\n    if chrom in CYTOBANDS:\n        for interval in CYTOBANDS[chrom][pos]:\n            result = \"{0}{1}\".format(chrom, interval.data)\n\n    return result", "docstring": "Get the cytoband coordinate for a position\n\nArgs:\nchrom(str): A chromosome\npos(int): The position\n\nReturns:\ncytoband", "source": "juraj-google-style"}
{"code": "def run_one_step(self, eig_init_vec_val, eig_num_iter_val, smooth_val,\n                   penalty_val, learning_rate_val):\n    \n    \n    step_feed_dict = {self.eig_init_vec_placeholder: eig_init_vec_val,\n                      self.eig_num_iter_placeholder: eig_num_iter_val,\n                      self.smooth_placeholder: smooth_val,\n                      self.penalty_placeholder: penalty_val,\n                      self.learning_rate: learning_rate_val}\n\n    if self.params['eig_type'] == 'SCIPY':\n      current_eig_vector, self.current_eig_val_estimate = self.get_scipy_eig_vec()\n      step_feed_dict.update({\n          self.eig_vec_estimate: current_eig_vector\n      })\n    elif self.params['eig_type'] == 'LZS':\n      step_feed_dict.update({\n          self.dual_object.m_min_vec_ph: self.dual_object.m_min_vec_estimate\n      })\n\n    self.sess.run(self.train_step, feed_dict=step_feed_dict)\n\n    [\n        _, self.dual_object.m_min_vec_estimate, self.current_eig_val_estimate\n    ] = self.sess.run([\n        self.proj_step,\n        self.eig_vec_estimate,\n        self.eig_val_estimate\n    ], feed_dict=step_feed_dict)\n\n    if self.current_step % self.params['print_stats_steps'] == 0:\n      [self.current_total_objective, self.current_unconstrained_objective,\n       self.dual_object.m_min_vec_estimate,\n       self.current_eig_val_estimate,\n       self.current_nu] = self.sess.run(\n           [self.total_objective,\n            self.dual_object.unconstrained_objective,\n            self.eig_vec_estimate,\n            self.eig_val_estimate,\n            self.dual_object.nu], feed_dict=step_feed_dict)\n\n      stats = {\n          'total_objective':\n              float(self.current_total_objective),\n          'unconstrained_objective':\n              float(self.current_unconstrained_objective),\n          'min_eig_val_estimate':\n              float(self.current_eig_val_estimate)\n      }\n      tf.logging.info('Current inner step: %d, optimization stats: %s',\n                      self.current_step, stats)\n      if self.params['stats_folder'] is not None:\n        stats = json.dumps(stats)\n        filename = os.path.join(self.params['stats_folder'],\n                                str(self.current_step) + '.json')\n        with tf.gfile.Open(filename) as file_f:\n          file_f.write(stats)\n\n    \n    if self.current_step % self.params['projection_steps'] == 0 and self.current_unconstrained_objective < 0:\n      nu = self.sess.run(self.dual_object.nu)\n      dual_feed_dict = {\n          self.dual_object.h_min_vec_ph: self.dual_object.h_min_vec_estimate\n      }\n      _, min_eig_val_h_lz = self.dual_object.get_lanczos_eig(compute_m=False, feed_dict=dual_feed_dict)\n      projected_dual_feed_dict = {\n          self.dual_object.projected_dual.nu: nu,\n          self.dual_object.projected_dual.min_eig_val_h: min_eig_val_h_lz\n      }\n      if self.dual_object.projected_dual.compute_certificate(self.current_step, projected_dual_feed_dict):\n        return True\n\n    return False", "docstring": "Run one step of gradient descent for optimization.\n\nArgs:\neig_init_vec_val: Start value for eigen value computations\neig_num_iter_val: Number of iterations to run for eigen computations\nsmooth_val: Value of smoothness parameter\npenalty_val: Value of penalty for the current step\nlearning_rate_val: Value of learning rate\nReturns:\nfound_cert: True is negative certificate is found, False otherwise", "source": "juraj-google-style"}
{"code": "async def complete_task(context, result):\n    \n    args = [get_task_id(context.claim_task), get_run_id(context.claim_task)]\n    reversed_statuses = get_reversed_statuses(context)\n    try:\n        if result == 0:\n            log.info(\"Reporting task complete...\")\n            response = await context.temp_queue.reportCompleted(*args)\n        elif result != 1 and result in reversed_statuses:\n            reason = reversed_statuses[result]\n            log.info(\"Reporting task exception {}...\".format(reason))\n            payload = {\"reason\": reason}\n            response = await context.temp_queue.reportException(*args, payload)\n        else:\n            log.info(\"Reporting task failed...\")\n            response = await context.temp_queue.reportFailed(*args)\n        log.debug(\"Task status response:\\n{}\".format(pprint.pformat(response)))\n    except taskcluster.exceptions.TaskclusterRestFailure as exc:\n        if exc.status_code == 409:\n            log.info(\"409: not reporting complete/failed.\")\n        else:\n            raise", "docstring": "Mark the task as completed in the queue.\n\nDecide whether to call reportCompleted, reportFailed, or reportException\nbased on the exit status of the script.\n\nIf the task has expired or been cancelled, we'll get a 409 status.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\n\nRaises:\ntaskcluster.exceptions.TaskclusterRestFailure: on non-409 error.", "source": "juraj-google-style"}
{"code": "def Filter(fn, *args, **kwargs):\n    if not callable(fn):\n        raise TypeError('Filter can be used only with callable objects. Received %r instead.' % fn)\n    wrapper = lambda x, *args, **kwargs: [x] if fn(x, *args, **kwargs) else []\n    label = 'Filter(%s)' % ptransform.label_from_callable(fn)\n    if hasattr(fn, '__name__'):\n        wrapper.__name__ = fn.__name__\n    fn_type_hints = typehints.decorators.IOTypeHints.from_callable(fn)\n    if fn_type_hints is not None:\n        fn_type_hints = fn_type_hints.with_output_types()\n    type_hints = get_type_hints(fn).with_defaults(fn_type_hints)\n    if type_hints.input_types is not None:\n        wrapper = with_input_types(*type_hints.input_types[0], **type_hints.input_types[1])(wrapper)\n    output_hint = type_hints.simple_output_type(label)\n    if output_hint is None and get_type_hints(wrapper).input_types and get_type_hints(wrapper).input_types[0]:\n        output_hint = get_type_hints(wrapper).input_types[0][0]\n    if output_hint:\n        wrapper = with_output_types(typehints.Iterable[_strip_output_annotations(output_hint)])(wrapper)\n    wrapper._argspec_fn = fn\n    pardo = FlatMap(wrapper, *args, **kwargs)\n    pardo.label = label\n    return pardo", "docstring": ":func:`Filter` is a :func:`FlatMap` with its callable filtering out\nelements.\n\nFilter accepts a function that keeps elements that return True, and filters\nout the remaining elements.\n\nArgs:\nfn (``Callable[..., bool]``): a callable object. First argument will be an\nelement.\n*args: positional arguments passed to the transform callable.\n**kwargs: keyword arguments passed to the transform callable.\n\nReturns:\n~apache_beam.pvalue.PCollection:\nA :class:`~apache_beam.pvalue.PCollection` containing the\n:func:`Filter` outputs.\n\nRaises:\nTypeError: If the **fn** passed as argument is not a callable.\nTypical error is to pass a :class:`DoFn` instance which is supported only\nfor :class:`ParDo`.", "source": "github-repos"}
{"code": "def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1, device=None):\n    q_ids = torch.arange(0, query_size, device=device)\n    k_ids = torch.arange(0, key_size, device=device)\n    rel_pos_ids = q_ids[:, None] - k_ids[None, :]\n    if bucket_size > 0 and max_position > 0:\n        rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)\n    rel_pos_ids = rel_pos_ids.to(torch.long)\n    rel_pos_ids = rel_pos_ids[:query_size, :]\n    rel_pos_ids = rel_pos_ids.unsqueeze(0)\n    return rel_pos_ids", "docstring": "Build relative position according to the query and key\n\nWe assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key\n\\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -\nP_k\\)\n\nArgs:\nquery_size (int): the length of query\nkey_size (int): the length of key\nbucket_size (int): the size of position bucket\nmax_position (int): the maximum allowed absolute position\ndevice (`torch.device`): the device on which tensors will be created.\n\nReturn:\n`torch.LongTensor`: A tensor with shape [1, query_size, key_size]", "source": "github-repos"}
{"code": "def __init__(self, coord, timer_interval_secs, target=None, args=None, kwargs=None):\n    if not isinstance(coord, Coordinator):\n        raise ValueError(\"'coord' argument must be a Coordinator: %s\" % coord)\n    super(LooperThread, self).__init__()\n    self.daemon = True\n    self._coord = coord\n    self._timer_interval_secs = timer_interval_secs\n    self._target = target\n    if self._target:\n        self._args = args or ()\n        self._kwargs = kwargs or {}\n    elif args or kwargs:\n        raise ValueError(\"'args' and 'kwargs' argument require that you also pass 'target'\")\n    self._coord.register_thread(self)", "docstring": "Create a LooperThread.\n\nArgs:\ncoord: A Coordinator.\ntimer_interval_secs: Time boundaries at which to call Run(), or None\nif it should be called back to back.\ntarget: Optional callable object that will be executed in the thread.\nargs: Optional arguments to pass to `target` when calling it.\nkwargs: Optional keyword arguments to pass to `target` when calling it.\n\nRaises:\nValueError: If one of the arguments is invalid.", "source": "github-repos"}
{"code": "def nearest_neighbors(self, word, top_k=10):\n    point = self[word]\n    diff = (self.vectors - point)\n    distances = np.linalg.norm(diff, axis=1)\n    top_ids = distances.argsort()[1:(top_k + 1)]\n    return [self.vocabulary.id_word[i] for i in top_ids]", "docstring": "Return the nearest k words to the given `word`.\n\nArgs:\nword (string): single word.\ntop_k (integer): decides how many neighbors to report.\n\nReturns:\nA list of words sorted by the distances. The closest is the first.\n\nNote:\nL2 metric is used to calculate distances.", "source": "codesearchnet"}
{"code": "def load_pickled_model(filename, dirname=None):\n    \n    if dirname is None:\n        pkg_filename = pkgutil.get_loader('dragnet').get_filename('dragnet')\n        pkg_dirname = os.path.dirname(pkg_filename)\n        dirname = os.path.join(pkg_dirname, 'pickled_models', model_path)\n    filepath = os.path.join(dirname, filename)\n    return joblib.load(filepath)", "docstring": "Load a pickled ``Extractor`` model from disk.\n\nArgs:\nfilename (str): Name of pickled model file under ``dirname``.\ndirname (str): Name of directory on disk containing the pickled model.\nIf None, dragnet's default pickled model directory is used:\n/path/to/dragnet/pickled_models/[PY_VERSION]_[SKLEARN_VERSION]\n\nReturns:\n:class:`dragnet.extractor.Extractor`", "source": "juraj-google-style"}
{"code": "def start_vm(access_token, subscription_id, resource_group, vm_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachines/', vm_name, '/start', '?api-version=', COMP_API])\n    return do_post(endpoint, '', access_token)", "docstring": "Start a virtual machine.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvm_name (str): Name of the virtual machine.\n\nReturns:\nHTTP response.", "source": "codesearchnet"}
{"code": "def _padded_shape_to_tensor(padded_shape, input_component_shape):\n    try:\n        padded_shape_as_shape = tensor_shape.as_shape(padded_shape)\n        ret = ops.convert_to_tensor([dim if dim is not None else -1 for dim in padded_shape_as_shape.as_list()], dtype=dtypes.int64)\n    except (TypeError, ValueError) as e:\n        ret = ops.convert_to_tensor(padded_shape, preferred_dtype=dtypes.int64)\n        if ret.shape.dims is not None and len(ret.shape.dims) != 1:\n            raise ValueError(f'Padded shape {padded_shape} must be a `tf.int64` vector tensor, but its shape was {ret.shape}.') from e\n        if ret.dtype != dtypes.int64:\n            raise TypeError(f'Padded shape {padded_shape} must be a `tf.int64` vector tensor, but its element type was {ret.dtype.name}.') from e\n        padded_shape_as_shape = tensor_util.constant_value_as_shape(ret)\n    if not _is_padded_shape_compatible_with(padded_shape_as_shape, input_component_shape):\n        raise ValueError(f'The padded shape {padded_shape_as_shape} is not compatible with the shape {input_component_shape} of the corresponding input component.')\n    return ret", "docstring": "Converts `padded_shape` to a `tf.Tensor` representing that shape.\n\nArgs:\npadded_shape: A shape-like object, which may be a `tf.TensorShape`, a Python\nsequence, or a 1-D `tf.Tensor` of `tf.int64` elements.\ninput_component_shape: A `tf.TensorShape`, with which `padded_shape` must be\ncompatible.\n\nReturns:\nA 1-D `tf.Tensor` of `tf.int64` elements, representing `padded_shape`.\n\nRaises:\nValueError: If `padded_shape` is not a shape or not compatible with\n`input_component_shape`.\nTypeError: If `padded_shape` is not convertible to a `tf.int64` tensor.", "source": "github-repos"}
{"code": "def env_problem(env_problem_name, **kwargs):\n    ep_cls = Registries.env_problems[env_problem_name]\n    ep = ep_cls()\n    ep.initialize(**kwargs)\n    return ep", "docstring": "Get and initialize the `EnvProblem` with the given name and batch size.\n\nArgs:\nenv_problem_name: string name of the registered env problem.\n**kwargs: forwarded to env problem's initialize method.\n\nReturns:\nan initialized EnvProblem with the given batch size.", "source": "codesearchnet"}
{"code": "def get_host(self, retry_count=3):\n    try:\n        default_timeout_set = False\n        if (not socket.getdefaulttimeout()):\n            socket.setdefaulttimeout(self.timeout)\n            default_timeout_set = True\n        log.debug('Host query for {0}'.format(self.address_str))\n        ret = socket.gethostbyaddr(self.address_str)\n        if default_timeout_set:\n            socket.setdefaulttimeout(None)\n        results = namedtuple('get_host_results', 'hostname, aliaslist, ipaddrlist')\n        return results(ret)\n    except (socket.timeout, socket.error) as e:\n        log.debug('Host query socket error: {0}'.format(e))\n        if (retry_count > 0):\n            log.debug('Host query retrying (count: {0})'.format(str(retry_count)))\n            return self.get_host((retry_count - 1))\n        else:\n            raise HostLookupError('Host lookup failed for {0}.'.format(self.address_str))\n    except:\n        raise HostLookupError('Host lookup failed for {0}.'.format(self.address_str))", "docstring": "The function for retrieving host information for an IP address.\n\nArgs:\nretry_count (:obj:`int`): The number of times to retry in case\nsocket errors, timeouts, connection resets, etc. are\nencountered. Defaults to 3.\n\nReturns:\nnamedtuple:\n\n:hostname (str): The hostname returned mapped to the given IP\naddress.\n:aliaslist (list): Alternate names for the given IP address.\n:ipaddrlist (list): IPv4/v6 addresses mapped to the same hostname.\n\nRaises:\nHostLookupError: The host lookup failed.", "source": "codesearchnet"}
{"code": "def visit(self, node):\n    \n    method = 'visit_' + node.__class__.__name__\n    if not hasattr(self, method):\n      raise ValueError('Unknown node type: %s' % node.__class__.__name__)\n    visitor = getattr(self, method)\n\n    \n    \n    if anno.hasanno(node, 'active_in'):\n      self.active_variables = anno.getanno(node, 'active_in')\n    pri, adj = visitor(node)\n\n    \n    if isinstance(pri, gast.AST):\n      anno.setdefaultanno(pri, 'adj', adj)\n    else:\n      for node in pri:\n        anno.setdefaultanno(node, 'adj', adj)\n    if isinstance(adj, gast.AST):\n      anno.setdefaultanno(adj, 'pri', pri)\n    else:\n      for node in adj:\n        anno.setdefaultanno(node, 'pri', pri)\n\n    return pri, adj", "docstring": "Visit a node.\n\nThis method is largely modelled after the ast.NodeTransformer class.\n\nArgs:\nnode: The node to visit.\n\nReturns:\nA tuple of the primal and adjoint, each of which is a node or a list of\nnodes.", "source": "juraj-google-style"}
{"code": "def PopTask(self):\n    try:\n        (_, task) = heapq.heappop(self._heap)\n    except IndexError:\n        return None\n    self._task_identifiers.remove(task.identifier)\n    return task", "docstring": "Retrieves and removes the first task from the heap.\n\nReturns:\nTask: the task or None if the heap is empty.", "source": "codesearchnet"}
{"code": "def linear_set_layer(layer_size, inputs, context=None, activation_fn=tf.nn.relu, dropout=0.0, name=None):\n    with tf.variable_scope(name, default_name='linear_set_layer', values=[inputs]):\n        outputs = conv1d(inputs, layer_size, 1, activation=None, name='set_conv')\n        if (context is not None):\n            if (len(context.get_shape().as_list()) == 2):\n                context = tf.expand_dims(context, axis=1)\n            cont_tfm = conv1d(context, layer_size, 1, activation=None, name='cont_conv')\n            outputs += cont_tfm\n        if (activation_fn is not None):\n            outputs = activation_fn(outputs)\n        if (dropout != 0.0):\n            outputs = tf.nn.dropout(outputs, (1.0 - dropout))\n        return outputs", "docstring": "Basic layer type for doing funky things with sets.\n\nApplies a linear transformation to each element in the input set.\nIf a context is supplied, it is concatenated with the inputs.\ne.g. One can use global_pool_1d to get a representation of the set which\ncan then be used as the context for the next layer.\n\nTODO: Add bias add (or control the biases used).\n\nArgs:\nlayer_size: Dimension to transform the input vectors to.\ninputs: A tensor of shape [batch_size, sequence_length, input_dims]\ncontaining the sequences of input vectors.\ncontext: A tensor of shape [batch_size, context_dims] containing a global\nstatistic about the set.\nactivation_fn: The activation function to use.\ndropout: Dropout probability.\nname: name.\n\nReturns:\nTensor of shape [batch_size, sequence_length, output_dims] containing the\nsequences of transformed vectors.", "source": "codesearchnet"}
{"code": "def complete(self, stream):\n    assert (not self.is_complete())\n    self._marker.addInputPort(outputPort=stream.oport)\n    self.stream.oport.schema = stream.oport.schema\n    self._pending_schema._set(self.stream.oport.schema)\n    stream.oport.operator._start_op = True", "docstring": "Complete the pending stream.\n\nAny connections made to :py:attr:`stream` are connected to `stream` once\nthis method returns.\n\nArgs:\nstream(Stream): Stream that completes the connection.", "source": "codesearchnet"}
{"code": "def square(x):\n    return math_ops.square(x)", "docstring": "Element-wise square.\n\nArgs:\nx: Tensor or variable.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def __init__(self, cluster_resolver=None, communication_options=None):\n    if communication_options is None:\n        communication_options = collective_util.Options()\n    super(CollectiveAllReduceStrategy, self).__init__(CollectiveAllReduceExtended(self, cluster_resolver=cluster_resolver, communication_options=communication_options))\n    distribute_lib.distribution_strategy_gauge.get_cell('V2').set('MultiWorkerMirroredStrategy')\n    distribute_lib.distribution_strategy_replica_gauge.get_cell('num_workers').set(self.extended._num_workers)\n    distribute_lib.distribution_strategy_replica_gauge.get_cell('num_replicas_per_worker').set(self.extended._num_devices_per_worker)", "docstring": "Creates the strategy.\n\nArgs:\ncluster_resolver: optional\n`tf.distribute.cluster_resolver.ClusterResolver`. If `None`,\n`tf.distribute.cluster_resolver.TFConfigClusterResolver` is used.\ncommunication_options: optional\n`tf.distribute.experimental.CommunicationOptions`. This configures the\ndefault options for cross device communications. It can be overridden by\noptions provided to the communication APIs like\n`tf.distribute.ReplicaContext.all_reduce`. See\n`tf.distribute.experimental.CommunicationOptions` for details.", "source": "github-repos"}
{"code": "def inversion(origin=(0, 0, 0)):\n        \n        mat = -np.eye(4)\n        mat[3, 3] = 1\n        mat[0:3, 3] = 2 * np.array(origin)\n        return SymmOp(mat)", "docstring": "Inversion symmetry operation about axis.\n\nArgs:\norigin (3x1 array): Origin of the inversion operation. Defaults\nto [0, 0, 0].\n\nReturns:\nSymmOp representing an inversion operation about the origin.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n                 tensor: bk.TensorLike,\n                 qubits: Qubits = None,\n                 memory: Dict[Addr, Any] = None) -> None:  \n        \n        if qubits is None:\n            tensor = bk.astensorproduct(tensor)\n            bits = bk.rank(tensor)\n            qubits = range(bits)\n\n        self.vec = QubitVector(tensor, qubits)\n        self._memory = memory if memory is not None else {}", "docstring": "Create a new State from a tensor of qubit amplitudes\n\nArgs:\ntensor: A vector or tensor of state amplitudes\nqubits: A sequence of qubit names.\n(Defaults to integer indices, e.g. [0, 1, 2] for 3 qubits)\nmemory: Classical memory.", "source": "juraj-google-style"}
{"code": "def __init__(self, actions=None):\n        \n        super().__init__(InstructionType.OFPIT_WRITE_ACTIONS)\n        self.actions = actions if actions else []", "docstring": "Create a InstructionWriteAction with the optional parameters below.\n\nArgs:\nactions (:class:`~.actions.ListOfActions`):\nActions associated with OFPIT_WRITE_ACTIONS.", "source": "juraj-google-style"}
{"code": "def reply(self, text):\n        \n        data = {'text': text, 'vchannel_id': self['vchannel_id']}\n        if self.is_p2p():\n            data['type'] = RTMMessageType.P2PMessage\n            data['to_uid'] = self['uid']\n        else:\n            data['type'] = RTMMessageType.ChannelMessage\n            data['channel_id'] = self['channel_id']\n        return RTMMessage(data)", "docstring": "Replys a text message\n\nArgs:\ntext(str): message content\n\nReturns:\nRTMMessage", "source": "juraj-google-style"}
{"code": "def taper_rate(p0, p1):\n    return ((2 * abs((p0[COLS.R] - p1[COLS.R]))) / point_dist(p0, p1))", "docstring": "Compute the taper rate between points p0 and p1\n\nArgs:\np0, p1: iterables with first 4 components containing (x, y, z, r)\n\nReturns:\nThe taper rate, defined as the absolute value of the difference in\nthe diameters of p0 and p1 divided by the euclidian distance\nbetween them.", "source": "codesearchnet"}
{"code": "def profile_update_schema(profile):\n        \n\n        \n        if profile.get('autoclear') is None:\n            print(\n                '{}{}Profile Update: Adding new \"autoclear\" parameter.'.format(\n                    c.Style.BRIGHT, c.Fore.YELLOW\n                )\n            )\n            profile['autoclear'] = True\n\n        \n        for validation in profile.get('validations') or []:\n            if validation.get('data_type') is None:\n                print(\n                    '{}{}Profile Update: Adding new \"data_type\" parameter.'.format(\n                        c.Style.BRIGHT, c.Fore.YELLOW\n                    )\n                )\n                validation['data_type'] = 'redis'\n\n        \n        if profile.get('install_json') is not None and profile.get('script') is not None:\n            print(\n                '{}{}Removing deprecated \"script\" parameter.'.format(c.Style.BRIGHT, c.Fore.YELLOW)\n            )\n            profile.pop('script')", "docstring": "Update profile to latest schema.\n\nArgs:\nprofile (dict): The dictionary containting the profile settings.", "source": "juraj-google-style"}
{"code": "def __init__(self, api_key: str, model: str, config: genai_types.GenerateContentConfig, output_dict: dict[EventTransition, content_api.ProcessorContentTypes | None], sensitivity: Optional[dict[EventTransition, int]]=None, max_images: int=5):\n    self._client = genai.Client(api_key=api_key)\n    self._model = model\n    self._config = config\n    if not config.response_schema:\n        raise ValueError('Response schema is required for event detection.')\n    self._sensitivity = sensitivity\n    self._output_dict = {}\n    self._init_output_dict(output_dict)\n    self._last_transition = (START_STATE, START_STATE)\n    self._transition_counter = (self._last_transition, 0)\n    self._images = collections.deque[tuple[ProcessorPart, float]](maxlen=max_images)", "docstring": "Initializes the event detection processor.\n\nArgs:\napi_key: The API key to use for the event detection model.\nmodel: The model to use for the event detection.\nconfig: The configuration to use for the event detection model. This\nconfiguration should contain the response schema for the event detection\nmodel.\noutput_dict: A dictionary of transitions between events to the output to\nreturn when the transition is detected. A transition is a pair of event\nnames `(from_event_state, to_event_state)`, where `from_event_state` can\nbe the start state `START_STATE`, an event name, or the wild card `\"*\"`\nto define all transitions from any state (including the start state) to\nthe event state. When the output is None, the transition is detected but\nno output is returned.\nsensitivity: A dictionary of transitions to the number of detection in a\nrow that should happen before the event detection processor sends a\ndetection output. By default, the sensitivity is 1 for a transition.\nmax_images: The maximum number of images to keep in the input stream.", "source": "github-repos"}
{"code": "def predict(self, df_data, graph=None, **kwargs):\n    if (graph is None):\n        return self.create_graph_from_data(df_data, **kwargs)\n    elif isinstance(graph, nx.DiGraph):\n        return self.orient_directed_graph(df_data, graph, **kwargs)\n    elif isinstance(graph, nx.Graph):\n        return self.orient_undirected_graph(df_data, graph, **kwargs)\n    else:\n        print('Unknown Graph type')\n        raise ValueError", "docstring": "Orient a graph using the method defined by the arguments.\n\nDepending on the type of `graph`, this function process to execute\ndifferent functions:\n\n1. If ``graph`` is a ``networkx.DiGraph``, then ``self.orient_directed_graph`` is executed.\n2. If ``graph`` is a ``networkx.Graph``, then ``self.orient_undirected_graph`` is executed.\n3. If ``graph`` is a ``None``, then ``self.create_graph_from_data`` is executed.\n\nArgs:\ndf_data (pandas.DataFrame): DataFrame containing the observational data.\ngraph (networkx.DiGraph or networkx.Graph or None): Prior knowledge on the causal graph.\n\n.. warning::\nRequirement : Name of the nodes in the graph must correspond to the\nname of the variables in df_data", "source": "codesearchnet"}
{"code": "async def get_mailbox(self, name: str, selected: SelectedMailbox = None) \\\n            -> Tuple[MailboxInterface, Optional[SelectedMailbox]]:\n        \n        ...", "docstring": "Retrieves a :class:`~pymap.interfaces.mailbox.MailboxInterface`\nobject corresponding to an existing mailbox owned by the user. Raises\nan exception if the mailbox does not yet exist.\n\nArgs:\nname: The name of the mailbox.\nselected: If applicable, the currently selected mailbox name.\n\nRaises:\n:class:`~pymap.exceptions.MailboxNotFound`", "source": "juraj-google-style"}
{"code": "def _GetApprovals(self,\n                    approval_type,\n                    offset,\n                    count,\n                    filter_func=None,\n                    token=None):\n    \n    approvals_base_urn = aff4.ROOT_URN.Add(\"users\").Add(\n        token.username).Add(\"approvals\").Add(approval_type)\n\n    all_children = aff4.FACTORY.RecursiveMultiListChildren([approvals_base_urn])\n\n    approvals_urns = []\n    for subject, children in all_children:\n      \n      if children:\n        continue\n      approvals_urns.append(subject)\n\n    approvals_urns.sort(key=lambda x: x.age, reverse=True)\n    approvals = list(\n        aff4.FACTORY.MultiOpen(\n            approvals_urns,\n            mode=\"r\",\n            aff4_type=aff4_security.Approval,\n            age=aff4.ALL_TIMES,\n            token=token))\n    approvals_by_urn = {}\n    for approval in approvals:\n      approvals_by_urn[approval.symlink_urn or approval.urn] = approval\n\n    cur_offset = 0\n    sorted_approvals = []\n    for approval_urn in approvals_urns:\n      try:\n        approval = approvals_by_urn[approval_urn]\n      except KeyError:\n        continue\n\n      if filter_func is not None and not filter_func(approval):\n        continue\n      cur_offset += 1\n      if cur_offset <= offset:\n        continue\n      if count and len(sorted_approvals) >= count:\n        break\n      sorted_approvals.append(approval)\n\n    subjects_urns = [a.Get(a.Schema.SUBJECT) for a in approvals]\n    subjects_by_urn = {}\n    for subject in aff4.FACTORY.MultiOpen(subjects_urns, mode=\"r\", token=token):\n      subjects_by_urn[subject.urn] = subject\n\n    return sorted_approvals, subjects_by_urn", "docstring": "Gets all approvals for a given user and approval type.\n\nArgs:\napproval_type: The type of approvals to get.\noffset: The starting index within the collection.\ncount: The number of items to return.\nfilter_func: A predicate function, returning True if a specific approval\nshould be included in the result and False otherwise.\ntoken: The token identifying the user.\n\nReturns:\nA list of approvals of the given approval type.", "source": "juraj-google-style"}
{"code": "def get_avro_schema_from_table_schema(schema):\n    dict_table_schema = get_dict_table_schema(schema)\n    return bigquery_avro_tools.get_record_schema_from_dict_table_schema('root', dict_table_schema)", "docstring": "Transform the table schema into an Avro schema.\n\nArgs:\nschema (str, dict, ~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema):\nThe TableSchema to convert to Avro schema. This can either be a dict or\nstring or in the TableSchema format.\n\nReturns:\nDict[str, Any]: An Avro schema, which can be used by fastavro.", "source": "github-repos"}
{"code": "def zip_file(self, app_path, app_name, tmp_path):\n    zip_file = os.path.join(app_path, self.args.outdir, app_name)\n    zip_file_zip = '{}.zip'.format(zip_file)\n    zip_file_tcx = '{}.tcx'.format(zip_file)\n    shutil.make_archive(zip_file, 'zip', tmp_path, app_name)\n    shutil.move(zip_file_zip, zip_file_tcx)\n    self._app_packages.append(zip_file_tcx)\n    self.package_data['package'].append({'action': 'App Package:', 'output': zip_file_tcx})", "docstring": "Zip the App with tcex extension.\n\nArgs:\napp_path (str): The path of the current project.\napp_name (str): The name of the App.\ntmp_path (str): The temp output path for the zip.", "source": "codesearchnet"}
{"code": "def extend(self, table, keys=None):\n        \n        if keys:\n            for k in keys:\n                if k not in self._Header():\n                    raise IndexError(\"Unknown key: '%s'\", k)\n\n        extend_with = []\n        for column in table.header:\n            if column not in self.header:\n                extend_with.append(column)\n\n        if not extend_with:\n            return\n\n        for column in extend_with:\n            self.AddColumn(column)\n\n        if not keys:\n            for row1, row2 in zip(self, table):\n                for column in extend_with:\n                    row1[column] = row2[column]\n            return\n\n        for row1 in self:\n            for row2 in table:\n                for k in keys:\n                    if row1[k] != row2[k]:\n                        break\n                else:\n                    for column in extend_with:\n                        row1[column] = row2[column]\n                    break", "docstring": "Extends all rows in the texttable.\n\nThe rows are extended with the new columns from the table.\n\nArgs:\ntable: A texttable, the table to extend this table by.\nkeys: A set, the set of columns to use as the key. If None, the\nrow index is used.\n\nRaises:\nIndexError: If key is not a valid column name.", "source": "juraj-google-style"}
{"code": "def write_journal(self, journal_file_path):\n        \n        \n        with open(journal_file_path, \"w\") as jrn_file:\n            jrn_file.write(self._journal_contents)", "docstring": "Write the constructed journal in to the provided file.\n\nArgs:\njournal_file_path (str): full path to output journal file", "source": "juraj-google-style"}
{"code": "def import_aliases(alias_source):\n    alias_table = get_alias_table()\n    if is_url(alias_source):\n        alias_source = retrieve_file_from_url(alias_source)\n        alias_table.read(alias_source)\n        os.remove(alias_source)\n    else:\n        alias_table.read(alias_source)\n    _commit_change(alias_table)", "docstring": "Import aliases from a file or an URL.\n\nArgs:\nalias_source: The source of the alias. It can be a filepath or an URL.", "source": "codesearchnet"}
{"code": "def _prepare_feed_values(model, inputs, targets, sample_weights, mode):\n    if model._distribution_strategy:\n        if isinstance(inputs, (data_types.DatasetV1, data_types.DatasetV2)):\n            inputs = distributed_training_utils_v1.get_iterator(inputs, model._distribution_strategy)\n\n        def get_distributed_inputs():\n            return distributed_training_utils_v1._prepare_feed_values(model, inputs, targets, sample_weights, mode)\n        if context.executing_eagerly():\n            return get_distributed_inputs\n        else:\n            return get_distributed_inputs()\n    if isinstance(inputs, (data_types.DatasetV1, data_types.DatasetV2, iterator_ops.Iterator)):\n        inputs, targets, sample_weights = model._standardize_user_data(inputs, extract_tensors_from_dataset=True)\n    inputs = training_utils_v1.ModelInputs(inputs).as_list()\n    targets = list(targets or [])\n    sample_weights = list(sample_weights or [])\n    ins = inputs + targets + sample_weights\n    if mode == ModeKeys.TRAIN and (not isinstance(backend.symbolic_learning_phase(), int)):\n        ins += [True]\n    return ins", "docstring": "Prepare feed values to the model execution function.\n\nArgs:\nmodel: Model to prepare feed values for.\ninputs: List or dict of model inputs.\ntargets: Optional list of model targets.\nsample_weights: Optional list of sample weight arrays.\nmode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.\n\nReturns:\nFeed values for the model in the given mode.", "source": "github-repos"}
{"code": "def xray_driver_removed_handler(self, unused_channel, data):\n        \n        gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(\n            data, 0)\n        driver_data = gcs_entries.Entries(0)\n        message = ray.gcs_utils.DriverTableData.GetRootAsDriverTableData(\n            driver_data, 0)\n        driver_id = message.DriverId()\n        logger.info(\"Monitor: \"\n                    \"XRay Driver {} has been removed.\".format(\n                        binary_to_hex(driver_id)))\n        self._xray_clean_up_entries_for_driver(driver_id)", "docstring": "Handle a notification that a driver has been removed.\n\nArgs:\nunused_channel: The message channel.\ndata: The message data.", "source": "juraj-google-style"}
{"code": "def accept_alert(self, text=None, wait=None):\n    wait = (wait or capybara.default_max_wait_time)\n    with self.driver.accept_modal('alert', text=text, wait=wait):\n        (yield)", "docstring": "Execute the wrapped code, accepting an alert.\n\nArgs:\ntext (str | RegexObject, optional): Text to match against the text in the modal.\nwait (int | float, optional): Maximum time to wait for the modal to appear after\nexecuting the wrapped code.\n\nRaises:\nModalNotFound: If a modal dialog hasn't been found.", "source": "codesearchnet"}
{"code": "def check_for_missing_options(config):\n    \n    for section_name, section in config:\n\n        for option_name, option in section:\n\n            if option.required and option.value is None:\n\n                raise exc.MissingRequiredOption(\n                    \"Option {0} in namespace {1} is required.\".format(\n                        option_name,\n                        section_name,\n                    )\n                )\n\n    return config", "docstring": "Iter over a config and raise if a required option is still not set.\n\nArgs:\nconfig (confpy.core.config.Configuration): The configuration object\nto validate.\n\nRaises:\nMissingRequiredOption: If any required options are not set in the\nconfiguration object.\n\nRequired options with default values are considered set and will not cause\nthis function to raise.", "source": "juraj-google-style"}
{"code": "def condensed(network, state):\n    result = []\n    covered_nodes = set()\n    for c in reversed(sorted(complexes(network, state))):\n        if (not any(((n in covered_nodes) for n in c.subsystem.node_indices))):\n            result.append(c)\n            covered_nodes = (covered_nodes | set(c.subsystem.node_indices))\n    return result", "docstring": "Return a list of maximal non-overlapping complexes.\n\nArgs:\nnetwork (Network): The |Network| of interest.\nstate (tuple[int]): The state of the network (a binary tuple).\n\nReturns:\nlist[SystemIrreducibilityAnalysis]: A list of |SIA| for non-overlapping\ncomplexes with maximal |big_phi| values.", "source": "codesearchnet"}
{"code": "def issubset(self, other):\n        \n        other = self._cast_to_frameset(other)\n        if other is NotImplemented:\n            return NotImplemented\n        return self.items <= other.items", "docstring": "Check if the contents of `self` is a subset of the contents of\n`other.`\n\nArgs:\nother (:class:`FrameSet`):\n\nReturns:\nbool:\n:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`", "source": "juraj-google-style"}
{"code": "def turtle_to_texture(turtle_program, turn_amount=DEFAULT_TURN, initial_angle=DEFAULT_INITIAL_ANGLE, resolution=1):\n    generator = branching_turtle_generator(turtle_program, turn_amount, initial_angle, resolution)\n    return texture_from_generator(generator)", "docstring": "Makes a texture from a turtle program.\n\nArgs:\nturtle_program (str): a string representing the turtle program; see the\ndocstring of `branching_turtle_generator` for more details\nturn_amount (float): amount to turn in degrees\ninitial_angle (float): initial orientation of the turtle\nresolution (int): if provided, interpolation amount for visible lines\n\nReturns:\ntexture: A texture.", "source": "codesearchnet"}
{"code": "def load(self, steps_dir=None, step_file=None, step_list=None):\n        \n        self._closed()\n\n        self.steps_library.load(steps_dir=steps_dir, step_file=step_file,\n                                step_list=step_list)", "docstring": "Load CWL steps into the WorkflowGenerator's steps library.\n\nAdds steps (command line tools and workflows) to the\n``WorkflowGenerator``'s steps library. These steps can be used to\ncreate workflows.\n\nArgs:\nsteps_dir (str): path to directory containing CWL files. All CWL in\nthe directory are loaded.\nstep_file (str): path to a file containing a CWL step that will be\nadded to the steps library.", "source": "juraj-google-style"}
{"code": "def search(*, include_disabled=True, account_ids=None, account_type_id=None, properties=None, return_query=False):\n    qry = db.Account.order_by(desc(Account.enabled), Account.account_type_id, Account.account_name)\n    if (not include_disabled):\n        qry = qry.filter((Account.enabled == 1))\n    if account_ids:\n        if (type(account_ids) not in (list, tuple)):\n            account_ids = [account_ids]\n        qry = qry.filter(Account.account_id.in_(account_ids))\n    if account_type_id:\n        qry = qry.filter((Account.account_type_id == account_type_id))\n    if properties:\n        for (prop_name, value) in properties.items():\n            alias = aliased(AccountProperty)\n            qry = qry.join(alias, (Account.account_id == alias.account_id))\n            if (type(value) == list):\n                where_clause = []\n                for item in value:\n                    where_clause.append((alias.value == item))\n                qry = qry.filter(and_((alias.name == prop_name), or_(*where_clause)).self_group())\n            else:\n                qry = qry.filter(and_((alias.name == prop_name), (alias.value == value)).self_group())\n    if return_query:\n        return qry\n    total = qry.count()\n    return (total, list(map(BaseAccount.get_typed_account, qry.all())))", "docstring": "Search for accounts based on the provided filters\n\nArgs:\ninclude_disabled (`bool`): Include disabled accounts (default: True)\naccount_ids: (`list` of `int`): List of account IDs\naccount_type_id (`int`): Account Type ID to limit results to\nproperties (`dict`): A `dict` containing property name and value pairs. Values can be either a str or a list\nof strings, in which case a boolean OR search is performed on the values\nreturn_query (`bool`): Returns the query object prior to adding the limit and offset functions. Allows for\nsub-classes to amend the search feature with extra conditions. The calling function must handle pagination\non its own\n\nReturns:\n`list` of `Account`, `sqlalchemy.orm.Query`", "source": "codesearchnet"}
{"code": "def raw_filter(self, filters):\n        \n        return SearchResult(self, self._api.get(self._href, **{\"filter[]\": filters}))", "docstring": "Sends all filters to the API.\n\nNo fancy, just a wrapper. Any advanced functionality shall be implemented as another method.\n\nArgs:\nfilters: List of filters (strings)\n\nReturns: :py:class:`SearchResult`", "source": "juraj-google-style"}
{"code": "def verify_response(response, status_code, content_type=None):\n    \n    status = int(response.status.split(' ', 1)[0])\n    if status != status_code:\n      return False\n\n    if content_type is None:\n      return True\n\n    for header, value in response.headers:\n      if header.lower() == 'content-type':\n        return value == content_type\n\n    \n    return False", "docstring": "Verifies that a response has the expected status and content type.\n\nArgs:\nresponse: The ResponseTuple to be checked.\nstatus_code: An int, the HTTP status code to be compared with response\nstatus.\ncontent_type: A string with the acceptable Content-Type header value.\nNone allows any content type.\n\nReturns:\nTrue if both status_code and content_type match, else False.", "source": "juraj-google-style"}
{"code": "def roll50(msg):\n    d = hex2bin(data(msg))\n    if (d[0] == '0'):\n        return None\n    sign = int(d[1])\n    value = bin2int(d[2:11])\n    if sign:\n        value = (value - 512)\n    angle = ((value * 45.0) / 256.0)\n    return round(angle, 1)", "docstring": "Roll angle, BDS 5,0 message\n\nArgs:\nmsg (String): 28 bytes hexadecimal message (BDS50) string\n\nReturns:\nfloat: angle in degrees,\nnegative->left wing down, positive->right wing down", "source": "codesearchnet"}
{"code": "def _read_wrappers(self, name):\n    io_attr = getattr(self._io, name)\n\n    def read_wrapper(*args, **kwargs):\n        \"Wrap all read calls to the stream object.\\n\\n            We do this to track the read pointer separate from the write\\n            pointer.  Anything that wants to read from the stream object\\n            while we're in append mode goes through this.\\n\\n            Args:\\n                *args: pass through args\\n                **kwargs: pass through kwargs\\n            Returns:\\n                Wrapped stream object method\\n            \"\n        self._io.seek(self._read_seek, self._read_whence)\n        ret_value = io_attr(*args, **kwargs)\n        self._read_seek = self._io.tell()\n        self._read_whence = 0\n        self._io.seek(0, 2)\n        return ret_value\n    return read_wrapper", "docstring": "Wrap a stream attribute in a read wrapper.\n\nReturns a read_wrapper which tracks our own read pointer since the\nstream object has no concept of a different read and write pointer.\n\nArgs:\nname: The name of the attribute to wrap. Should be a read call.\n\nReturns:\nThe read_wrapper function.", "source": "codesearchnet"}
{"code": "def get_if_not_set(self, addresses):\n    with self._lock:\n        results = []\n        for add in addresses:\n            results.append(self._get_if_not_set(add))\n        return results", "docstring": "Returns the value at an address if it was an input to the txn but\nnever set. It returns None if that address was never set in the\nmerkle database, or if the address is not within the context.\n\nArgs:\naddresses (list of str): The full 70 character addresses.\n\nReturns:\n(list): bytes at that address but not set within the context", "source": "codesearchnet"}
{"code": "def __init__(self, arm_id_list):\n        \n        [self.__beta_dist_dict.setdefault(key, BetaDist()) for key in arm_id_list]", "docstring": "Initialization\n\nArgs:\narm_id_list:    List of arms Master id.", "source": "juraj-google-style"}
{"code": "def daemon_mode(self, args, options):\n        \n        cws = ControlWebSocket(self, args, options)\n        cws.start()\n        if 'cmdsock' in args and args['cmdsock']:\n            lcs = LocalControlSocket(self, args, options)\n            lcs.start()\n            lcs.join()\n        cws.join()", "docstring": "Open a ControlWebSocket to SushiBar server and listend for remote commands.\nArgs:\nargs (dict): chef command line arguments\noptions (dict): additional compatibility mode options given on command line", "source": "juraj-google-style"}
{"code": "def wwpn_alloc(self):\n    wwpn_int = self._wwpn_pool.alloc()\n    wwpn = ('AFFEAFFE0000' + '{:04X}'.format(wwpn_int))\n    return wwpn", "docstring": "Allocates a WWPN unique to this partition, in the range of\n0xAFFEAFFE00008000 to 0xAFFEAFFE0000FFFF.\n\nReturns:\nstring: The WWPN as 16 hexadecimal digits in upper case.\n\nRaises:\nValueError: No more WWPNs available in that range.", "source": "codesearchnet"}
{"code": "def parameterize(\n        self,\n        country: Optional[str] = \"South Sudan\",\n        state: Optional[str] = None,\n        year: Optional[int] = None,\n        month: Optional[int] = None,\n        unit: Optional[str] = None,\n        fallback_aggaxes: List[str] = [\"year\", \"month\"],\n        aggfunc: Callable = np.mean,\n    ):\n        \n\n        valid_axes = (\"country\", \"state\", \"year\", \"month\")\n\n        if any(map(lambda axis: axis not in valid_axes, fallback_aggaxes)):\n            raise ValueError(\n                \"All elements of the fallback_aggaxes set must be one of the \"\n                f\"following: {valid_axes}\"\n            )\n\n        for n in self.nodes(data=True):\n            for indicator in n[1][\"indicators\"].values():\n                indicator.mean, indicator.unit = get_indicator_value(\n                    indicator,\n                    country,\n                    state,\n                    year,\n                    month,\n                    unit,\n                    fallback_aggaxes,\n                    aggfunc,\n                )\n                indicator.stdev = 0.1 * abs(indicator.mean)", "docstring": "Parameterize the analysis graph.\n\nArgs:\ncountry\nyear\nmonth\nfallback_aggaxes:\nAn iterable of strings denoting the axes upon which to perform\nfallback aggregation if the desired constraints cannot be met.\naggfunc: The function that will be called to perform the\naggregation if there are multiple matches.", "source": "juraj-google-style"}
{"code": "def _aspect_preserving_resize(image, smallest_side):\n  \n  smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)\n\n  shape = tf.shape(image)\n  height = shape[0]\n  width = shape[1]\n  new_height, new_width = _smallest_size_at_least(height, width, smallest_side)\n  image = tf.expand_dims(image, 0)\n  resized_image = tf.image.resize_images(\n      image, size=[new_height, new_width], method=tf.image.ResizeMethod.BICUBIC)\n\n  resized_image = tf.squeeze(resized_image)\n  resized_image.set_shape([None, None, 3])\n  return resized_image", "docstring": "Resize images preserving the original aspect ratio.\n\nArgs:\nimage: A 3-D image `Tensor`.\nsmallest_side: A python integer or scalar `Tensor` indicating the size of\nthe smallest side after resize.\n\nReturns:\nresized_image: A 3-D tensor containing the resized image.", "source": "juraj-google-style"}
{"code": "def wait_for_job(self, job, poll=5):\n        \n        desc = _wait_until_training_done(lambda last_desc: _train_done(self.sagemaker_client, job, last_desc),\n                                         None, poll)\n        self._check_job_status(job, desc, 'TrainingJobStatus')\n        return desc", "docstring": "Wait for an Amazon SageMaker training job to complete.\n\nArgs:\njob (str): Name of the training job to wait for.\npoll (int): Polling interval in seconds (default: 5).\n\nReturns:\n(dict): Return value from the ``DescribeTrainingJob`` API.\n\nRaises:\nValueError: If the training job fails.", "source": "juraj-google-style"}
{"code": "def addSingleTraitTerm(self,K=None,is_noise=False,normalize=True,Ks=None):\n        \n        \n        assert self.P == 1, 'Incompatible number of traits'\n        \n        assert K!=None or is_noise, 'Specify covariance structure'\n        \n        if is_noise:\n            assert self.noisPos==None, 'noise term already exists'\n            K = SP.eye(self.Nt)\n            self.noisPos = self.n_terms\n        else:\n            assert K.shape[0]==self.Nt, 'Incompatible shape'\n            assert K.shape[1]==self.Nt, 'Incompatible shape'\n    \n        if Ks!=None:\n            assert Ks.shape[0]==self.N, 'Incompatible shape'\n\n        if normalize:\n            Norm = 1/K.diagonal().mean()\n            K *= Norm\n            if Ks!=None: Ks *= Norm\n\n        self.vd.addTerm(limix.CSingleTraitTerm(K))\n        if Ks!=None: self.setKstar(self.n_terms,Ks)\n        self.n_terms+=1\n    \n        self.gp         = None\n        self.init       = False\n        self.fast       = False\n        self.optimum    = None\n\n        self.cache['Sigma']   = None\n        self.cache['Hessian'] = None\n        self.cache['Lparams'] = None\n        self.cache['paramsST']= None", "docstring": "add random effects term for single trait models (no trait-trait covariance matrix)\n\nArgs:\nK:          NxN sample covariance matrix\nis_noise:\tbool labeling the noise term (noise term has K=eye)\nnormalize:\tif True, K and Ks are scales such that K.diagonal().mean()==1\nKs:\t\t\tNxN test cross covariance for predictions", "source": "juraj-google-style"}
{"code": "def freeze(script_path, target_dir='frozen', **kw):\n    cmds = []\n    freeze_start_time = time.time()\n    logging.debug(('/\\\\%s%s Output%s/\\\\' % (('-' * 10), 'Pyinstaller', ('-' * 10))))\n    orig_dir = os.path.abspath('.')\n    script_path = os.path.abspath(script_path)\n    try:\n        os.chdir(target_dir)\n        cmds += _freeze_config()\n        pyinst_path = ('%s/thirdparty/pyinstaller' % __path__[0])\n        cur_cmd = ('python -O %s/pyinstaller.py %s --skip-configure' % (pyinst_path, script_path))\n        cmds.append(cur_cmd)\n        if _run(cur_cmd):\n            _freeze_config(force=True)\n            cur_cmd = ('python -O %s/pyinstaller.py %s' % (pyinst_path, script_path))\n            _run(cur_cmd)\n    finally:\n        os.chdir(orig_dir)\n    logging.debug(('\\\\/%s%s Output%s\\\\/' % (('-' * 10), 'Pyinstaller', ('-' * 10))))\n    logging.info(('Pyinstaller took [%f] seconds' % (time.time() - freeze_start_time)))\n    return cmds", "docstring": "Wraps pyinstaller and provides an easy to use interface\n\nArgs:\nscript_path: Absolute path to python script to be frozen.\n\nReturns:\nList of freeze commands ran\n\nRaises:\nsubprocess.CalledProcessError: Freeze error.\nOSError: Freeze not found.", "source": "codesearchnet"}
{"code": "def list(self, **kwargs):\n        \n        resp = self.client.api.secrets(**kwargs)\n        return [self.prepare_model(obj) for obj in resp]", "docstring": "List secrets. Similar to the ``docker secret ls`` command.\n\nArgs:\nfilters (dict): Server-side list filtering options.\n\nReturns:\n(list of :py:class:`Secret`): The secrets.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def get_point(self, *position):\n    array = _ffi.new(self._arrayType, position)\n    if self._useOctaves:\n        return ((self._noiseFunc(self._noise, array, self._octaves) + 1) * 0.5)\n    return ((self._noiseFunc(self._noise, array) + 1) * 0.5)", "docstring": "Return the noise value of a specific position.\n\nExample usage: value = noise.getPoint(x, y, z)\n\nArgs:\nposition (Tuple[float, ...]): The point to sample at.\n\nReturns:\nfloat: The noise value at position.\n\nThis will be a floating point in the 0.0-1.0 range.", "source": "codesearchnet"}
{"code": "def select_files(self, what='o'):\n    choices = collections.OrderedDict([('i', self.input_file), ('o', self.output_file), ('f', self.files_file), ('j', self.job_file), ('l', self.log_file), ('e', self.stderr_file), ('q', self.qout_file)])\n    if (what == 'all'):\n        return [getattr(v, 'path') for v in choices.values()]\n    selected = []\n    for c in what:\n        try:\n            selected.append(getattr(choices[c], 'path'))\n        except KeyError:\n            logger.warning(('Wrong keyword %s' % c))\n    return selected", "docstring": "Helper function used to select the files of a task.\n\nArgs:\nwhat: string with the list of characters selecting the file type\nPossible choices:\ni ==> input_file,\no ==> output_file,\nf ==> files_file,\nj ==> job_file,\nl ==> log_file,\ne ==> stderr_file,\nq ==> qout_file,\nall ==> all files.", "source": "codesearchnet"}
{"code": "def probe_services(self, handle, conn_id, callback):\n        \n\n        self._command_task.async_command(['_probe_services', handle], callback,\n                                         {'connection_id': conn_id, 'handle': handle})", "docstring": "Given a connected device, probe for its GATT services and characteristics\n\nArgs:\nhandle (int): a handle to the connection on the BLED112 dongle\nconn_id (int): a unique identifier for this connection on the DeviceManager\nthat owns this adapter.\ncallback (callable): Callback to be called when this procedure finishes", "source": "juraj-google-style"}
{"code": "def make_grid(tensor, nrow=8, padding=2, pad_value=0):\n    if (not (isinstance(tensor, np.ndarray) or (isinstance(tensor, list) and all((isinstance(t, np.ndarray) for t in tensor))))):\n        raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))\n    if isinstance(tensor, list):\n        tensor = np.stack(tensor, 0)\n    if (tensor.ndim == 2):\n        tensor = tensor.reshape((1, tensor.shape[0], tensor.shape[1]))\n    if (tensor.ndim == 3):\n        if (tensor.shape[0] == 1):\n            tensor = np.concatenate((tensor, tensor, tensor), 0)\n        tensor = tensor.reshape((1, tensor.shape[0], tensor.shape[1], tensor.shape[2]))\n    if ((tensor.ndim == 4) and (tensor.shape[1] == 1)):\n        tensor = np.concatenate((tensor, tensor, tensor), 1)\n    if (tensor.shape[0] == 1):\n        return np.squeeze(tensor)\n    nmaps = tensor.shape[0]\n    xmaps = min(nrow, nmaps)\n    ymaps = int(math.ceil((float(nmaps) / xmaps)))\n    (height, width) = (int((tensor.shape[2] + padding)), int((tensor.shape[3] + padding)))\n    grid = (np.ones((3, ((height * ymaps) + padding), ((width * xmaps) + padding))) * pad_value)\n    k = 0\n    for y in range(ymaps):\n        for x in range(xmaps):\n            if (k >= nmaps):\n                break\n            grid[(:, ((y * height) + padding):((y + 1) * height), ((x * width) + padding):((x + 1) * width))] = tensor[k]\n            k = (k + 1)\n    return grid", "docstring": "Make a grid of images, via numpy.\n\nArgs:\ntensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\nor a list of images all of the same size.\nnrow (int, optional): Number of images displayed in each row of the grid.\nThe Final grid size is (B / nrow, nrow). Default is 8.\npadding (int, optional): amount of padding. Default is 2.\npad_value (float, optional): Value for the padded pixels.", "source": "codesearchnet"}
{"code": "def bipartition(seq):\n    \n    return [(tuple(seq[i] for i in part0_idx),\n             tuple(seq[j] for j in part1_idx))\n            for part0_idx, part1_idx in bipartition_indices(len(seq))]", "docstring": "Return a list of bipartitions for a sequence.\n\nArgs:\na (Iterable): The sequence to partition.\n\nReturns:\nlist[tuple[tuple]]: A list of tuples containing each of the two\npartitions.\n\nExample:\n>>> bipartition((1,2,3))\n[((), (1, 2, 3)), ((1,), (2, 3)), ((2,), (1, 3)), ((1, 2), (3,))]", "source": "juraj-google-style"}
{"code": "def optimize(self, sensor_graph, model):\n        \n\n        passes = self._order_pases(self._known_passes.keys())\n\n        for opt_name in passes:\n            rerun = True\n            pass_instance = self._known_passes[opt_name][0]()\n\n            while rerun:\n                rerun = pass_instance.run(sensor_graph, model=model)", "docstring": "Optimize a sensor graph by running optimization passes.\n\nThe passes are run one at a time and modify the sensor graph\nfor future passes.\n\nArgs:\nsensor_graph (SensorGraph): The graph to be optimized\nmodel (DeviceModel): The device that we are optimizing\nfor, that OptimizationPass objects are free to use\nto guide their optimizations.", "source": "juraj-google-style"}
{"code": "def definition_name(cls):\n    outer_definition_name = cls.outer_definition_name()\n    if (outer_definition_name is None):\n        return six.text_type(cls.__name__)\n    return (u'%s.%s' % (outer_definition_name, cls.__name__))", "docstring": "Helper method for creating definition name.\n\nNames will be generated to include the classes package name,\nscope (if the class is nested in another definition) and class\nname.\n\nBy default, the package name for a definition is derived from\nits module name. However, this value can be overriden by\nplacing a 'package' attribute in the module that contains the\ndefinition class. For example:\n\npackage = 'some.alternate.package'\n\nclass MyMessage(Message):\n...\n\n>>> MyMessage.definition_name()\nsome.alternate.package.MyMessage\n\nReturns:\nDot-separated fully qualified name of definition.", "source": "codesearchnet"}
{"code": "def _initialize_mtf_dimension_name_to_size_gcd(self, mtf_graph):\n    mtf_dimension_name_to_size_gcd = {}\n    for mtf_operation in mtf_graph.operations:\n        for mtf_tensor in mtf_operation.outputs:\n            for mtf_dimension in mtf_tensor.shape.dims:\n                mtf_dimension_name_to_size_gcd[mtf_dimension.name] = fractions.gcd(mtf_dimension_name_to_size_gcd.get(mtf_dimension.name, mtf_dimension.size), mtf_dimension.size)\n    return mtf_dimension_name_to_size_gcd", "docstring": "Initializer for self._mtf_dimension_name_to_size_gcd.\n\nArgs:\nmtf_graph: an mtf.Graph.\n\nReturns:\nA {string: int}, mapping the name of an MTF dimension to the greatest\ncommon divisor of all the sizes it has. All these sizes being evenly\ndivisible by some x is equivalent to the GCD being divisible by x.", "source": "codesearchnet"}
{"code": "def _cast_indexed_slice_indices(a, b):\n    if isinstance(a, indexed_slices.IndexedSlices) and isinstance(b, indexed_slices.IndexedSlices) and (a.indices.dtype != b.indices.dtype):\n        a._indices = math_ops.cast(a.indices, dtypes.int64)\n        b._indices = math_ops.cast(b.indices, dtypes.int64)", "docstring": "Cast IndexedSlice.indices from int32 to int64 where necessary.\n\nIf `a` and `b` are both IndexedSlices, and their indices have different\ndtypes, then cast both their dtypes to `int64` (modifies `a` and `b`\nin-place).  Otherwise, does nothing.\n\nArgs:\na: A value, which may be an IndexedSlices.\nb: A value, which may be an IndexedSlices.", "source": "github-repos"}
{"code": "def translate_pname(self, pname: PrefName, mid: ModuleId) -> QualName:\n        \n        loc, nid = self.resolve_pname(pname, mid)\n        return (loc, self.namespace(nid))", "docstring": "Translate a prefixed name to a qualified name.\nArgs:\npname: Name with an optional prefix.\nmid: Identifier of the module in which `pname` appears.\nRaises:\nModuleNotRegistered: If `mid` is not registered in the data model.\nUnknownPrefix: If the prefix specified in `pname` is not declared.", "source": "juraj-google-style"}
{"code": "def get(self, field_paths=None, transaction=None):\n    if isinstance(field_paths, six.string_types):\n        raise ValueError(\"'field_paths' must be a sequence of paths, not a string.\")\n    if (field_paths is not None):\n        mask = common_pb2.DocumentMask(field_paths=sorted(field_paths))\n    else:\n        mask = None\n    firestore_api = self._client._firestore_api\n    try:\n        document_pb = firestore_api.get_document(self._document_path, mask=mask, transaction=_helpers.get_transaction_id(transaction), metadata=self._client._rpc_metadata)\n    except exceptions.NotFound:\n        data = None\n        exists = False\n        create_time = None\n        update_time = None\n    else:\n        data = _helpers.decode_dict(document_pb.fields, self._client)\n        exists = True\n        create_time = document_pb.create_time\n        update_time = document_pb.update_time\n    return DocumentSnapshot(reference=self, data=data, exists=exists, read_time=None, create_time=create_time, update_time=update_time)", "docstring": "Retrieve a snapshot of the current document.\n\nSee :meth:`~.firestore_v1beta1.client.Client.field_path` for\nmore information on **field paths**.\n\nIf a ``transaction`` is used and it already has write operations\nadded, this method cannot be used (i.e. read-after-write is not\nallowed).\n\nArgs:\nfield_paths (Optional[Iterable[str, ...]]): An iterable of field\npaths (``.``-delimited list of field names) to use as a\nprojection of document fields in the returned results. If\nno value is provided, all fields will be returned.\ntransaction (Optional[~.firestore_v1beta1.transaction.\\\nTransaction]): An existing transaction that this reference\nwill be retrieved in.\n\nReturns:\n~.firestore_v1beta1.document.DocumentSnapshot: A snapshot of\nthe current document. If the document does not exist at\nthe time of `snapshot`, the snapshot `reference`, `data`,\n`update_time`, and `create_time` attributes will all be\n`None` and `exists` will be `False`.", "source": "codesearchnet"}
{"code": "def _is_callable(self, node, obj):\n    val = obj.data\n    if isinstance(val, abstract.AMBIGUOUS_OR_EMPTY):\n        return (node, None)\n    if isinstance(val, abstract.Class):\n        return (node, True)\n    node, ret = self.ctx.attribute_handler.get_attribute(node, val, '__call__', valself=obj)\n    return (node, ret is not None)", "docstring": "Check if the object is callable.\n\nArgs:\nnode: The given node.\nobj: A BaseValue, the arg of a callable() call.\n\nReturns:\n(node, result) where result = True if the object is callable,\nFalse if it is not, and None if it is ambiguous.", "source": "github-repos"}
{"code": "def sg_accuracy(tensor, opt):\n    r\n    assert opt.target is not None, 'target is mandatory.'\n    opt += tf.sg_opt(k=1)\n\n    \n    out = tf.identity(tf.equal(tensor.sg_argmax(), tf.cast(opt.target, tf.int64)).sg_float(), name='acc')\n    \n\n    return out", "docstring": "r\"\"\"Returns accuracy of predictions.\n\nArgs:\ntensor: A `Tensor`. Probability distributions or unscaled prediction scores.\nopt:\ntarget: A 'Tensor`. Labels.\n\nReturns:\nA `Tensor` of the same shape as `tensor`. Each value will be 1 if correct else 0.\n\nFor example,\n\n```\ntensor = [[20.1, 18, -4.2], [0.04, 21.1, 31.3]]\ntarget = [[0, 1]]\ntensor.sg_accuracy(target=target) => [[ 1.  0.]]\n```", "source": "juraj-google-style"}
{"code": "def run_with_time_limit(self, cmd, time_limit=SUBMISSION_TIME_LIMIT):\n    \n    if time_limit < 0:\n      return self.run_without_time_limit(cmd)\n    container_name = str(uuid.uuid4())\n    cmd = [DOCKER_BINARY, 'run', DOCKER_NVIDIA_RUNTIME,\n           '--detach', '--name', container_name] + cmd\n    logging.info('Docker command: %s', ' '.join(cmd))\n    logging.info('Time limit %d seconds', time_limit)\n    retval = subprocess.call(cmd)\n    start_time = time.time()\n    elapsed_time_sec = 0\n    while is_docker_still_running(container_name):\n      elapsed_time_sec = int(time.time() - start_time)\n      if elapsed_time_sec < time_limit:\n        time.sleep(1)\n      else:\n        kill_docker_container(container_name)\n        logging.warning('Submission was killed because run out of time')\n    logging.info('Elapsed time of submission: %d', elapsed_time_sec)\n    logging.info('Docker retval: %d', retval)\n    if retval != 0:\n      logging.warning('Docker returned non-zero retval: %d', retval)\n      raise WorkerError('Docker returned non-zero retval ' + str(retval))\n    return elapsed_time_sec", "docstring": "Runs docker command and enforces time limit.\n\nArgs:\ncmd: list with the command line arguments which are passed to docker\nbinary after run\ntime_limit: time limit, in seconds. Negative value means no limit.\n\nReturns:\nhow long it took to run submission in seconds\n\nRaises:\nWorkerError: if error occurred during execution of the submission", "source": "juraj-google-style"}
{"code": "def __init__(self, video, quality=None, download_dir=None, templates=None) -> None:\n        \n        self.video = video\n        self.quality = quality or DEFAULT_OPTIONS['quality']\n        self.download_dir = download_dir or DEFAULT_OPTIONS['download_dir']\n        self.templates = templates or DEFAULT_OPTIONS['templates']\n\n        if self.quality not in ('worst', 'best'):\n            raise WrongQualityError", "docstring": "Create a VideoDownloader for a given video.\n\nArgs:\nvideo (Video): Video object.\nquality (str): Quality of the video (best/worst). Audio quality defaults to best.\ndownload_dir (str): Destination directory for the downloaded video.\ntemplates (dict): Dictionary of templates needed to generate a download path.", "source": "juraj-google-style"}
{"code": "def build_subresource_uri(self, resource_id_or_uri=None, subresource_id_or_uri=None, subresource_path=''):\n    if (subresource_id_or_uri and ('/' in subresource_id_or_uri)):\n        return subresource_id_or_uri\n    else:\n        if (not resource_id_or_uri):\n            raise exceptions.HPOneViewValueError(RESOURCE_ID_OR_URI_REQUIRED)\n        resource_uri = self.build_uri(resource_id_or_uri)\n        uri = '{}/{}/{}'.format(resource_uri, subresource_path, str((subresource_id_or_uri or '')))\n        uri = uri.replace('\n        if uri.endswith('/'):\n            uri = uri[:(- 1)]\n        return uri", "docstring": "Helps to build a URI with resource path and its sub resource path.\n\nArgs:\nresoure_id_or_uri: ID/URI of the main resource.\nsubresource_id__or_uri: ID/URI of the sub resource.\nsubresource_path: Sub resource path to be added with the URI.\n\nReturns:\nReturns URI", "source": "codesearchnet"}
{"code": "def config_tab(backend):\n    \n    status = backend.status().to_dict()\n    config = backend.configuration().to_dict()\n\n    config_dict = {**status, **config}\n\n    upper_list = ['n_qubits', 'operational',\n                  'status_msg', 'pending_jobs',\n                  'basis_gates', 'local', 'simulator']\n\n    lower_list = list(set(config_dict.keys()).difference(upper_list))\n    \n    lower_list.remove('gates')\n    upper_str = \"<table>\"\n    upper_str += \n\n    footer = \"</table>\"\n\n    \n\n    upper_str += \"<tr><th>Property</th><th>Value</th></tr>\"\n    for key in upper_list:\n        upper_str += \"<tr><td><font style='font-weight:bold'>%s</font></td><td>%s</td></tr>\" % (\n            key, config_dict[key])\n    upper_str += footer\n\n    upper_table = widgets.HTML(\n        value=upper_str, layout=widgets.Layout(width='100%', grid_area='left'))\n\n    image_widget = widgets.Output(\n        layout=widgets.Layout(display='flex-inline', grid_area='right',\n                              padding='10px 10px 10px 10px',\n                              width='auto', max_height='300px',\n                              align_items='center'))\n\n    if not config['simulator']:\n        with image_widget:\n            gate_map = plot_gate_map(backend)\n            display(gate_map)\n        plt.close(gate_map)\n\n    lower_str = \"<table>\"\n    lower_str += \n    lower_str += \"<tr><th></th><th></th></tr>\"\n    for key in lower_list:\n        if key != 'name':\n            lower_str += \"<tr><td>%s</td><td>%s</td></tr>\" % (\n                key, config_dict[key])\n    lower_str += footer\n\n    lower_table = widgets.HTML(value=lower_str,\n                               layout=widgets.Layout(\n                                   width='auto',\n                                   grid_area='bottom'))\n\n    grid = widgets.GridBox(children=[upper_table, image_widget, lower_table],\n                           layout=widgets.Layout(\n                               grid_template_rows='auto auto',\n                               grid_template_columns='25% 25% 25% 25%',\n                               grid_template_areas=,\n                               grid_gap='0px 0px'))\n\n    return grid", "docstring": "The backend configuration widget.\n\nArgs:\nbackend (IBMQbackend): The backend.\n\nReturns:\ngrid: A GridBox widget.", "source": "juraj-google-style"}
{"code": "def report_progress(stream=None):\n    \n    if stream is None:\n        stream = sys.stderr\n    for reporter in _reporters:\n        reporter(stream)", "docstring": "Report progress from any currently installed reporters.\n\nArgs:\nstream: The text stream (default: sys.stderr) to which\nprogress will be reported.", "source": "juraj-google-style"}
{"code": "def get_raw_data_feature_spec(self, input_types: dict[str, type]) -> dict[str, tf.io.VarLenFeature]:\n    raw_data_feature_spec = {}\n    for key, value in input_types.items():\n        raw_data_feature_spec[key] = self._get_raw_data_feature_spec_per_column(typ=value, col_name=key)\n    return raw_data_feature_spec", "docstring": "Return a DatasetMetadata object to be used with\ntft_beam.AnalyzeAndTransformDataset.\nArgs:\ninput_types: A dictionary of column names and types.\nReturns:\nA DatasetMetadata object.", "source": "github-repos"}
{"code": "def get_tensor(self):\n    return load_tensor_from_event_file(self.file_path)", "docstring": "Get tensor from the dump (`Event`) file.\n\nReturns:\nThe tensor loaded from the dump (`Event`) file.", "source": "github-repos"}
{"code": "def WriteValuesToJSONFile(self, state, values):\n    \n    value_counters = {}\n    max_post_size = config.CONFIG[\"BigQuery.max_file_post_size\"]\n    for value in values:\n      class_name = value.__class__.__name__\n      output_tracker, created = self._GetTempOutputFileHandles(class_name)\n\n      \n      \n      \n      \n      value_counters[class_name] = value_counters.get(class_name, -1) + 1\n      if not value_counters[class_name] % max_post_size \n\n        \n        output_tracker.gzip_filehandle.flush()\n        if os.path.getsize(output_tracker.gzip_filehandle.name) > max_post_size:\n          \n          self.Flush(state)\n          value_counters[class_name] = 0\n          output_tracker, created = self._GetTempOutputFileHandles(class_name)\n\n      if not output_tracker.schema:\n        output_tracker.schema = self.RDFValueToBigQuerySchema(value)\n\n      if created:\n        \n        self._WriteJSONValue(output_tracker.gzip_filehandle, value)\n      else:\n        self._WriteJSONValue(\n            output_tracker.gzip_filehandle, value, delimiter=\"\\n\")\n\n    for output_tracker in itervalues(self.temp_output_trackers):\n      output_tracker.gzip_filehandle.flush()", "docstring": "Write newline separated JSON dicts for each value.\n\nWe write each dict separately so we don't have to hold all of the output\nstreams in memory. We open and close the JSON array manually with [].\n\nArgs:\nstate: rdf_protodict.AttributedDict with the plugin's state.\nvalues: RDF values to export.", "source": "juraj-google-style"}
{"code": "def add_rel(self, source_id, target_id, rel):\n        \n        self.neo_db.add_rel(source_id, target_id, rel)", "docstring": "Add a relationship: source, target must already exist (see add_node)\n'rel' is the name of the relationship 'contains' or whatever.\nArgs:\nsource_id: the unique node_id of the source\ntarget_id: the unique node_id of the target\nrel: name of the relationship\nReturns:\nNothing", "source": "juraj-google-style"}
{"code": "def convert_to_scl(spec, scl_options):\n    scl_options['skip_functions'] = scl_options['skip_functions'].split(',')\n    scl_options['meta_spec'] = None\n    convertor = SclConvertor(options=scl_options)\n    return str(convertor.convert(spec))", "docstring": "Convert spec into SCL-style spec file using `spec2scl`.\n\nArgs:\nspec: (str) a spec file\nscl_options: (dict) SCL options provided\nReturns:\nA converted spec file", "source": "codesearchnet"}
{"code": "def signed_to_twos_comp(val: int, n_bits: int) -> int:\n    assert ((n_bits % 8) == 0), 'Must specify a whole number of bytes'\n    n_bytes = (n_bits \n    b = val.to_bytes(n_bytes, byteorder=sys.byteorder, signed=True)\n    return int.from_bytes(b, byteorder=sys.byteorder, signed=False)", "docstring": "Convert a signed integer to its \"two's complement\" representation.\n\nArgs:\nval: signed integer\nn_bits: number of bits (which must reflect a whole number of bytes)\n\nReturns:\nunsigned integer: two's complement version", "source": "codesearchnet"}
{"code": "def _validate_alias_file_path(alias_file_path):\n    \n    if not os.path.exists(alias_file_path):\n        raise CLIError(ALIAS_FILE_NOT_FOUND_ERROR)\n\n    if os.path.isdir(alias_file_path):\n        raise CLIError(ALIAS_FILE_DIR_ERROR.format(alias_file_path))", "docstring": "Make sure the alias file path is neither non-existant nor a directory\n\nArgs:\nThe alias file path to import aliases from.", "source": "juraj-google-style"}
{"code": "def _InitializeURL(self, upload_url, current_content_length):\n    if (current_content_length != 0):\n        return upload_url\n    headers = {'Content-Type': 'application/xml', 'Content-Length': 0, 'x-goog-resumable': 'start'}\n    req = urllib2.Request(upload_url, data={}, headers=headers)\n    resp = self._url_opener.open(req)\n    return resp.headers['location']", "docstring": "Ensures that the URL used to upload operations is properly initialized.\n\nArgs:\nupload_url: a string url.\ncurrent_content_length: an integer identifying the current content length\nof data uploaded to the Batch Job.\n\nReturns:\nAn initialized string URL, or the provided string URL if the URL has\nalready been initialized.", "source": "codesearchnet"}
{"code": "def add_dataset(self, task_name, dataset=None, *, aliases=None):\n        \n        self._datasets.append(dataset if dataset is not None else TaskData())\n        last_index = len(self._datasets) - 1\n        self._aliases[task_name] = last_index\n\n        if aliases is not None:\n            for alias in aliases:\n                self._aliases[alias] = last_index\n\n        if len(self._datasets) == 1:\n            self._default_index = 0", "docstring": "Add a new dataset to the MultiTaskData.\n\nArgs:\ntask_name (str): The name of the task from which the dataset was received.\ndataset (TaskData): The dataset that should be added.\naliases (list): A list of aliases that should be registered with the dataset.", "source": "juraj-google-style"}
{"code": "def restore_component(self, component_name, save_path):\n    component = self.get_component(component_name=component_name)\n    self._validate_savable(component=component, component_name=component_name)\n    component.restore(sess=self.session, save_path=save_path)", "docstring": "Restores a component's parameters from a save location.\n\nArgs:\ncomponent_name: The component to restore.\nsave_path: The save location.", "source": "codesearchnet"}
{"code": "def from_string(cls, s):\n        \n\n        log.debug(\"Parsing email from string\")\n        message = email.message_from_string(s)\n        return cls(message)", "docstring": "Init a new object from a string.\n\nArgs:\ns (string): raw email\n\nReturns:\nInstance of MailParser", "source": "juraj-google-style"}
{"code": "def _copy_assets(src_path: str, dst_path: str) -> None:\n    for assets_dir_name in [_ASSETS_DIR, _ASSETS_EXTRA_DIR]:\n        src_assets_path = file_io.join(src_path, assets_dir_name)\n        if not file_io.file_exists_v2(src_assets_path):\n            continue\n        dst_assets_path = file_io.join(dst_path, assets_dir_name)\n        file_io.create_dir_v2(dst_assets_path)\n        for curr_dir, _, files in file_io.walk_v2(src_assets_path):\n            for asset_file_name in files:\n                src_asset_file = file_io.join(curr_dir, asset_file_name)\n                curr_dst_dir = curr_dir.replace(src_assets_path, dst_assets_path)\n                dst_asset_file = file_io.join(curr_dst_dir, asset_file_name)\n                file_io.copy_v2(src_asset_file, dst_asset_file)\n                logging.info('Copied asset file: %s -> %s', src_asset_file, dst_asset_file)", "docstring": "Copies the assets directory of the saved model.\n\nClones the contents of the assets/ directory from the source saved model\ndirectory to the destination saved model directory. Nothing will be copied if\nthere are no assets directory in the source directory.\n\nArgs:\nsrc_path: Source saved model directory.\ndst_path: Destination saved model directory. This directory must exist.", "source": "github-repos"}
{"code": "def _LinearMapByteStream(\n      self, byte_stream, byte_offset=0, context=None, **unused_kwargs):\n    \n    members_data_size = self._data_type_definition.GetByteSize()\n    self._CheckByteStreamSize(byte_stream, byte_offset, members_data_size)\n\n    try:\n      struct_tuple = self._operation.ReadFrom(byte_stream[byte_offset:])\n      struct_values = []\n      for attribute_index, value in enumerate(struct_tuple):\n        data_type_map = self._data_type_maps[attribute_index]\n        member_definition = self._data_type_definition.members[attribute_index]\n\n        value = data_type_map.MapValue(value)\n\n        supported_values = getattr(member_definition, 'values', None)\n        if supported_values and value not in supported_values:\n          raise errors.MappingError(\n              'Value: {0!s} not in supported values: {1:s}'.format(\n                  value, ', '.join([\n                      '{0!s}'.format(value) for value in supported_values])))\n\n        struct_values.append(value)\n\n      mapped_value = self._structure_values_class(*struct_values)\n\n    except Exception as exception:\n      error_string = (\n          'Unable to read: {0:s} from byte stream at offset: {1:d} '\n          'with error: {2!s}').format(\n              self._data_type_definition.name, byte_offset, exception)\n      raise errors.MappingError(error_string)\n\n    if context:\n      context.byte_size = members_data_size\n\n    return mapped_value", "docstring": "Maps a data type sequence on a byte stream.\n\nArgs:\nbyte_stream (bytes): byte stream.\nbyte_offset (Optional[int]): offset into the byte stream where to start.\ncontext (Optional[DataTypeMapContext]): data type map context.\n\nReturns:\nobject: mapped value.\n\nRaises:\nMappingError: if the data type definition cannot be mapped on\nthe byte stream.", "source": "juraj-google-style"}
{"code": "def plot_histogram(self, freq=None, figsize=(15, 5), title=None, bins=20, **kwargs):\n    if (title is None):\n        title = self._get_default_plot_title(self.name, freq, 'Return Histogram')\n    ser = self._get_series(freq).to_returns().dropna()\n    plt.figure(figsize=figsize)\n    ax = ser.hist(bins=bins, figsize=figsize, normed=True, **kwargs)\n    ax.set_title(title)\n    plt.axvline(0, linewidth=4)\n    return ser.plot(kind='kde')", "docstring": "Plots a histogram of returns given a return frequency.\n\nArgs:\n* freq (str): Data frequency used for display purposes.\nThis will dictate the type of returns\n(daily returns, monthly, ...)\nRefer to pandas docs for valid period strings.\n* figsize ((x,y)): figure size\n* title (str): Title if default not appropriate\n* bins (int): number of bins for the histogram\n* kwargs: passed to pandas' hist method", "source": "codesearchnet"}
{"code": "def _remove_jmp_to_get_anext_and_merge(blocks: list[Block], processed_blocks: set[Block]) -> list[Block]:\n    op_to_block = {}\n    merge_list = []\n    for block_idx, block in enumerate(blocks):\n        for code in block.code:\n            op_to_block[code] = block_idx\n    for block_idx, block in enumerate(blocks):\n        for code in block.code:\n            if code.end_async_for_target:\n                merge_list.append((block_idx, op_to_block[code.end_async_for_target]))\n    map_target = {}\n    for block_idx, block_idx_to_merge in merge_list:\n        jump_back_op = blocks[block_idx].code.pop()\n        blocks[block_idx].code.extend(blocks[block_idx_to_merge].code)\n        map_target[jump_back_op] = blocks[block_idx_to_merge].code[0]\n        if block_idx_to_merge < len(blocks) - 1:\n            blocks[block_idx].connect_outgoing(blocks[block_idx_to_merge + 1])\n        processed_blocks.add(blocks[block_idx])\n    to_delete = sorted({to_idx for _, to_idx in merge_list}, reverse=True)\n    for block_idx in to_delete:\n        del blocks[block_idx]\n    for block in blocks:\n        replace_op = map_target.get(block.code[-1].target, None)\n        if replace_op:\n            block.code[-1].target = replace_op\n    return blocks", "docstring": "Remove JUMP_BACKWARD instructions to GET_ANEXT instructions.\n\nAnd also merge the block that contains the END_ASYNC_FOR which is part of the\nsame loop of the GET_ANEXT and JUMP_BACKWARD construct, to the JUMP_BACKWARD\ninstruction. This is to ignore the JUMP_BACKWARD because in pytype's eyes it's\nuseless (as it'll jump back to block that it already executed), and also\nthis is the way to make pytype run the code of END_ASYNC_FOR and whatever\ncomes afterwards.\n\nArgs:\nblocks: A list of Block instances.\n\nReturns:\nA list of Block instances after the removal and merge.", "source": "github-repos"}
{"code": "def HumanReadableType(self):\n    if isinstance(self.service_type, py2to3.STRING_TYPES):\n        return self.service_type\n    return human_readable_service_enums.SERVICE_ENUMS['Type'].get(self.service_type, '{0:d}'.format(self.service_type))", "docstring": "Return a human readable string describing the type value.\n\nReturns:\nstr: human readable description of the type value.", "source": "codesearchnet"}
{"code": "def __contains__(self, item):\n        \n        if item not in self._contains_cache:\n            self._contains_cache[item] = self._contains(item)\n        return self._contains_cache[item]", "docstring": "Get result of _contains, cache it and return it.\n\nArgs:\nitem (Package/Module): a package or module.\n\nReturns:\nbool: True if self contains item, False otherwise.", "source": "juraj-google-style"}
{"code": "def build_attachment(self, text, target, attachment, thread):\n    attachment = {'as_user': True, 'text': text, 'channel': target, 'attachments': [{'fallback': text, 'image_url': attachment}]}\n    if thread:\n        attachment['thread_ts'] = thread\n    return attachment", "docstring": "Builds a slack attachment.\n\nArgs:\nmessage (Legobot.Message): message w/ metadata to send.\n\nReturns:\nattachment (dict): attachment data.", "source": "codesearchnet"}
{"code": "def AddContract(self, contract):\n        \n        if not contract.PublicKeyHash.ToBytes() in self._keys.keys():\n            raise Exception('Invalid operation - public key mismatch')\n\n        self._contracts[contract.ScriptHash.ToBytes()] = contract\n        if contract.ScriptHash in self._watch_only:\n            self._watch_only.remove(contract.ScriptHash)", "docstring": "Add a contract to the wallet.\n\nArgs:\ncontract (Contract): a contract of type neo.SmartContract.Contract.\n\nRaises:\nException: Invalid operation - public key mismatch.", "source": "juraj-google-style"}
{"code": "def download_artifacts_from_gcs(bucket_name, prefix, local_path):\n    client = Client()\n    bucket = client.get_bucket(bucket_name)\n    blobs = [blob.name for blob in bucket.list_blobs(prefix=prefix)]\n    _ = transfer_manager.download_many_to_path(bucket, blobs, destination_directory=local_path)", "docstring": "Downloads artifacts from GCS to the local file system.\nArgs:\nbucket_name: The name of the GCS bucket to download from.\nprefix: Prefix of GCS objects to download.\nlocal_path: The local path to download the folder to.", "source": "github-repos"}
{"code": "class Flatten(PTransform):\n\n    def __init__(self, **kwargs):\n        super().__init__()\n        self.pipeline = kwargs.pop('pipeline', None)\n        if kwargs:\n            raise ValueError('Unexpected keyword arguments: %s' % list(kwargs))\n\n    def _extract_input_pvalues(self, pvalueish):\n        try:\n            pvalueish = tuple(pvalueish)\n        except TypeError:\n            raise ValueError('Input to Flatten must be an iterable. Got a value of type %s instead.' % type(pvalueish))\n        return (pvalueish, pvalueish)\n\n    def expand(self, pcolls):\n        windowing = self.get_windowing(pcolls)\n        for pcoll in pcolls:\n            self._check_pcollection(pcoll)\n            if pcoll.windowing != windowing:\n                _LOGGER.warning('All input pcollections must have the same window. Windowing for flatten set to %s, windowing of pcoll %s set to %s', windowing, pcoll, pcoll.windowing)\n        is_bounded = all((pcoll.is_bounded for pcoll in pcolls))\n        return pvalue.PCollection(self.pipeline, is_bounded=is_bounded)\n\n    def infer_output_type(self, input_type):\n        return input_type\n\n    def to_runner_api_parameter(self, context):\n        return (common_urns.primitives.FLATTEN.urn, None)\n\n    @staticmethod\n    def from_runner_api_parameter(unused_ptransform, unused_parameter, unused_context):\n        return Flatten()", "docstring": "Merges several PCollections into a single PCollection.\n\nCopies all elements in 0 or more PCollections into a single output\nPCollection. If there are no input PCollections, the resulting PCollection\nwill be empty (but see also kwargs below).\n\nArgs:\n**kwargs: Accepts a single named argument \"pipeline\", which specifies the\npipeline that \"owns\" this PTransform. Ordinarily Flatten can obtain this\ninformation from one of the input PCollections, but if there are none (or\nif there's a chance there may be none), this argument is the only way to\nprovide pipeline information and should be considered mandatory.", "source": "github-repos"}
{"code": "def from_file(cls, vert, frag, **kwargs):\n    vert_program = open(vert).read()\n    frag_program = open(frag).read()\n    return cls(vert=vert_program, frag=frag_program, **kwargs)", "docstring": "Reads the shader programs, given the vert and frag filenames\n\nArguments:\n- vert (str): The filename of the vertex shader program (ex: 'vertshader.vert')\n- frag (str): The filename of the fragment shader program (ex: 'fragshader.frag')\n\nReturns:\n- shader (Shader): The Shader using these files.", "source": "codesearchnet"}
{"code": "def export_warnings(self, export_file):\n        \n        warn_filepath = op.dirname(export_file)\n        warn_filename = op.splitext(op.basename(export_file))[0]\n        self._add_entry(templates.EXPORT_WARNINGS\n                                 .format(warnings_export_path=warn_filepath,\n                                         warnings_export_file=warn_filename))", "docstring": "Append an export warnings entry to the journal.\n\nThis instructs Revit to export warnings from the opened model.\nCurrently Revit will stop journal execution if the model does not\nhave any warnings and the export warnings UI button is disabled.\n\nArgs:\nexport_file (str): full path of the ouput html file", "source": "juraj-google-style"}
{"code": "def InventoryReceived(self, inventory):\n    if (inventory.Hash.ToBytes() in self._MissedBlocks):\n        self._MissedBlocks.remove(inventory.Hash.ToBytes())\n    if (inventory is MinerTransaction):\n        return False\n    if (type(inventory) is Block):\n        if (BC.Default() is None):\n            return False\n        if BC.Default().ContainsBlock(inventory.Index):\n            return False\n        if (not BC.Default().AddBlock(inventory)):\n            return False\n    elif (not inventory.Verify(self.MemPool.values())):\n        return False", "docstring": "Process a received inventory.\n\nArgs:\ninventory (neo.Network.Inventory): expect a Block type.\n\nReturns:\nbool: True if processed and verified. False otherwise.", "source": "codesearchnet"}
{"code": "def clean(deltox=False):\n    \n\n    basedir = dirname(__file__)\n\n    print(cyan('delete temp files and dirs for packaging'))\n    local(flo(\n        'rm -rf  '\n        '{basedir}/.eggs/  '\n        '{basedir}/utlz.egg-info/  '\n        '{basedir}/dist  '\n        '{basedir}/README  '\n        '{basedir}/build/  '\n    ))\n\n    print(cyan('\\ndelete temp files and dirs for editing'))\n    local(flo(\n        'rm -rf  '\n        '{basedir}/.cache  '\n        '{basedir}/.ropeproject  '\n    ))\n\n    print(cyan('\\ndelete bytecode compiled versions of the python src'))\n    \n    local(flo('find  {basedir}/utlz  {basedir}/tests  ') +\n          '\\( -name \\*pyc -o -name \\*.pyo -o -name __pycache__ '\n          '-o -name \\*.so -o -name \\*.o -o -name \\*.c \\) '\n          '-prune '\n          '-exec rm -rf {} +')\n\n    if deltox:\n        print(cyan('\\ndelete tox virual environments'))\n        local(flo('cd {basedir}  &&  rm -rf .tox/'))", "docstring": "Delete temporary files not under version control.\n\nArgs:\ndeltox: If True, delete virtual environments used by tox", "source": "juraj-google-style"}
{"code": "def transform(self, col):\n        \n\n        out = pd.DataFrame(index=col.index)\n        out[self.col_name] = col.apply(self.get_val, axis=1)\n\n        if self.subtype == 'int':\n            out[self.col_name] = out[self.col_name].astype(int)\n\n        return out", "docstring": "Prepare the transformer to convert data and return the processed table.\n\nArgs:\ncol(pandas.DataFrame): Data to transform.\n\nReturns:\npandas.DataFrame", "source": "juraj-google-style"}
{"code": "def serialize(loss):\n    return serialization_lib.serialize_keras_object(loss)", "docstring": "Serializes loss function or `Loss` instance.\n\nArgs:\nloss: A Keras `Loss` instance or a loss function.\n\nReturns:\nLoss configuration dictionary.", "source": "github-repos"}
{"code": "def _send_unary_request(self, request):\n    if request.ack_ids:\n        self._client.acknowledge(subscription=self._subscription, ack_ids=list(request.ack_ids))\n    if request.modify_deadline_ack_ids:\n        deadline_to_ack_ids = collections.defaultdict(list)\n        for (n, ack_id) in enumerate(request.modify_deadline_ack_ids):\n            deadline = request.modify_deadline_seconds[n]\n            deadline_to_ack_ids[deadline].append(ack_id)\n        for (deadline, ack_ids) in six.iteritems(deadline_to_ack_ids):\n            self._client.modify_ack_deadline(subscription=self._subscription, ack_ids=ack_ids, ack_deadline_seconds=deadline)\n    _LOGGER.debug('Sent request(s) over unary RPC.')", "docstring": "Send a request using a separate unary request instead of over the\nstream.\n\nArgs:\nrequest (types.StreamingPullRequest): The stream request to be\nmapped into unary requests.", "source": "codesearchnet"}
{"code": "def isplaybook(obj):\n    \n    return isinstance(obj, Iterable) and (not isinstance(obj, string_types) and not isinstance(obj, Mapping))", "docstring": "Inspects the object and returns if it is a playbook\n\nArgs:\nobj (object): The object to be inspected by this function\n\nReturns:\nboolean: True if the object is a list and False if it is not", "source": "juraj-google-style"}
{"code": "def _GenerateAssertion(self):\n    now = int(time.time())\n    payload = {'aud': RpcHelper.TOKEN_ENDPOINT, 'scope': 'https:\n    return crypt.make_signed_jwt(crypt.Signer.from_string(self.service_account_key), payload)", "docstring": "Generates the signed assertion that will be used in the request.\n\nReturns:\nstring, signed Json Web Token (JWT) assertion.", "source": "codesearchnet"}
{"code": "def transition_complete(self, pipeline_key):\n\n    def txn():\n        pipeline_record = db.get(pipeline_key)\n        if (pipeline_record is None):\n            logging.warning('Tried to mark pipeline ID \"%s\" as complete but it does not exist.', pipeline_key.name())\n            raise db.Rollback()\n        if (pipeline_record.status not in (_PipelineRecord.WAITING, _PipelineRecord.RUN)):\n            logging.warning('Tried to mark pipeline ID \"%s\" as complete, found bad state: %s', pipeline_key.name(), pipeline_record.status)\n            raise db.Rollback()\n        pipeline_record.status = _PipelineRecord.DONE\n        pipeline_record.finalized_time = self._gettime()\n        pipeline_record.put()\n    db.run_in_transaction(txn)", "docstring": "Marks the given pipeline as complete.\n\nDoes nothing if the pipeline is no longer in a state that can be completed.\n\nArgs:\npipeline_key: db.Key of the _PipelineRecord that has completed.", "source": "codesearchnet"}
{"code": "def from_image(cls, filename, start, stop, legend, source='Image', col_offset=0.1, row_offset=2, tolerance=0):\n    rgb = utils.loglike_from_image(filename, col_offset)\n    loglike = np.array([utils.rgb_to_hex(t) for t in rgb])\n    (tops, hexes) = utils.tops_from_loglike(loglike, offset=row_offset)\n    nonconsecutive = np.append(np.diff(tops), 2)\n    tops = tops[(nonconsecutive > 1)]\n    hexes = hexes[(nonconsecutive > 1)]\n    hexes_reduced = list(set(hexes))\n    components = [legend.get_component(h, tolerance=tolerance) for h in hexes_reduced]\n    values = [hexes_reduced.index(i) for i in hexes]\n    basis = np.linspace(start, stop, loglike.size)\n    list_of_Intervals = cls.__intervals_from_tops(tops, values, basis, components)\n    return cls(list_of_Intervals, source='Image')", "docstring": "Read an image and generate Striplog.\n\nArgs:\nfilename (str): An image file, preferably high-res PNG.\nstart (float or int): The depth at the top of the image.\nstop (float or int): The depth at the bottom of the image.\nlegend (Legend): A legend to look up the components in.\nsource (str): A source for the data. Default: 'Image'.\ncol_offset (Number): The proportion of the way across the image\nfrom which to extract the pixel column. Default: 0.1 (ie 10%).\nrow_offset (int): The number of pixels to skip at the top of\neach change in colour. Default: 2.\ntolerance (float): The Euclidean distance between hex colours,\nwhich has a maximum (black to white) of 441.67 in base 10.\nDefault: 0.\n\nReturns:\nStriplog: The ``striplog`` object.", "source": "codesearchnet"}
{"code": "def file(self, path):\n        \n        with open(path, 'r') as f:\n            self.body(str(f.read()))", "docstring": "Reads the body to match from a disk file.\n\nArguments:\npath (str): relative or absolute path to file to read from.\n\nReturns:\nself: current Mock instance.", "source": "juraj-google-style"}
{"code": "def get_random_value(length=10, character_sets=[string.ascii_uppercase, string.ascii_lowercase]):\n        \n\n        return \"\".join(random.choice(\"\".join(character_sets)) for i in range(length))", "docstring": "Get a random string with the given length.\n\nArgs:\nlength (int): The length of the string to return.\ncharacter_sets list(str): The caracter sets to use.\n\nReturns:\nstr: The random string.", "source": "juraj-google-style"}
{"code": "def update_environmental_configuration(self, configuration, timeout=(- 1)):\n    uri = '{}/environmentalConfiguration'.format(self.data['uri'])\n    return self._helper.do_put(uri, configuration, timeout, None)", "docstring": "Sets the calibrated max power of an unmanaged or unsupported enclosure.\n\nArgs:\nconfiguration: Configuration\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\nSettings that describe the environmental configuration.", "source": "codesearchnet"}
{"code": "def update_video_image(edx_video_id, course_id, image_data, file_name):\n    try:\n        course_video = CourseVideo.objects.select_related('video').get(course_id=course_id, video__edx_video_id=edx_video_id)\n    except ObjectDoesNotExist:\n        error_message = u'VAL: CourseVideo not found for edx_video_id: {0} and course_id: {1}'.format(edx_video_id, course_id)\n        raise ValVideoNotFoundError(error_message)\n    (video_image, _) = VideoImage.create_or_update(course_video, file_name, image_data)\n    return video_image.image_url()", "docstring": "Update video image for an existing video.\n\nNOTE: If `image_data` is None then `file_name` value will be used as it is, otherwise\na new file name is constructed based on uuid and extension from `file_name` value.\n`image_data` will be None in case of course re-run and export.\n\nArguments:\nimage_data (InMemoryUploadedFile): Image data to be saved for a course video.\n\nReturns:\ncourse video image url\n\nRaises:\nRaises ValVideoNotFoundError if the CourseVideo cannot be retrieved.", "source": "codesearchnet"}
{"code": "def from_params(cls, params):\n    key_fn = (lambda x: id(x[1].owner))\n    streams = []\n    for (_, group) in groupby(sorted(params.items(), key=key_fn), key_fn):\n        group = list(group)\n        inst = [p.owner for (_, p) in group][0]\n        if (not isinstance(inst, param.Parameterized)):\n            continue\n        names = [p.name for (_, p) in group]\n        rename = {p.name: n for (n, p) in group}\n        streams.append(cls(inst, names, rename=rename))\n    return streams", "docstring": "Returns Params streams given a dictionary of parameters\n\nArgs:\nparams (dict): Dictionary of parameters\n\nReturns:\nList of Params streams", "source": "codesearchnet"}
{"code": "def predict(self, a, b):\n        \n        a = np.array(a).reshape((-1, 1))\n        b = np.array(b).reshape((-1, 1))\n        return sp.kendalltau(a, b)[0]", "docstring": "Compute the test statistic\n\nArgs:\na (array-like): Variable 1\nb (array-like): Variable 2\n\nReturns:\nfloat: test statistic", "source": "juraj-google-style"}
{"code": "def GetCloudPath(self, resource_id, cache, database):\n    \n    cloud_path = cache.GetResults('cloud_path')\n    if not cloud_path:\n      results = database.Query(self.CLOUD_PATH_CACHE_QUERY)\n\n      cache.CacheQueryResults(\n          results, 'cloud_path', 'resource_id', ('filename', 'parent'))\n      cloud_path = cache.GetResults('cloud_path')\n\n    if resource_id == 'folder:root':\n      return '/'\n\n    paths = []\n    parent_path, parent_id = cloud_path.get(resource_id, ['', ''])\n    while parent_path:\n      if parent_path == 'folder:root':\n        break\n      paths.append(parent_path)\n      parent_path, parent_id = cloud_path.get(parent_id, ['', ''])\n\n    if not paths:\n      return '/'\n\n    \n    \n    paths.reverse()\n    return '/{0:s}/'.format('/'.join(paths))", "docstring": "Return cloud path given a resource id.\n\nArgs:\nresource_id (str): resource identifier for the file.\ncache (SQLiteCache): cache.\ndatabase (SQLiteDatabase): database.\n\nReturns:\nstr: full path to the resource value.", "source": "juraj-google-style"}
{"code": "def __init__(self, final_ops, final_ops_feed_dict=None):\n    self._final_ops = final_ops\n    self._final_ops_feed_dict = final_ops_feed_dict\n    self._final_ops_values = None", "docstring": "Initializes `FinalOpHook` with ops to run at the end of the session.\n\nArgs:\nfinal_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names\nto `Tensors`.\nfinal_ops_feed_dict: A feed dictionary to use when running\n`final_ops_dict`.", "source": "github-repos"}
{"code": "def diff(self, a_ref, target=None, b_ref=None):\n    \n    result = {}\n    diff_dct = self.scm.get_diff_trees(a_ref, b_ref=b_ref)\n    result[DIFF_A_REF] = diff_dct[DIFF_A_REF]\n    result[DIFF_B_REF] = diff_dct[DIFF_B_REF]\n    if diff_dct[DIFF_EQUAL]:\n        result[DIFF_EQUAL] = True\n        return result\n    result[DIFF_LIST] = []\n    diff_outs = _get_diff_outs(self, diff_dct)\n    if target is None:\n        result[DIFF_LIST] = [\n            _diff_royal(self, path, diff_outs[path]) for path in diff_outs\n        ]\n    elif target in diff_outs:\n        result[DIFF_LIST] = [_diff_royal(self, target, diff_outs[target])]\n    else:\n        msg = \"Have not found file/directory '{}' in the commits\"\n        raise FileNotInCommitError(msg.format(target))\n    return result", "docstring": "Gerenates diff message string output\n\nArgs:\ntarget(str) - file/directory to check diff of\na_ref(str) - first tag\n(optional) b_ref(str) - second git tag\n\nReturns:\nstring: string of output message with diff info", "source": "juraj-google-style"}
{"code": "def cast_to_type(obj, out_type):\n    in_type = type(obj)\n    if (out_type is in_type):\n        return obj\n    else:\n        return out_type(obj)", "docstring": "Cast obj to out_type if it's not out_type already.\n\nIf the obj happens to be out_type already, it just returns obj as is.\n\nArgs:\nobj: input object\nout_type: type.\n\nReturns:\nobj cast to out_type. Usual python conversion / casting rules apply.", "source": "codesearchnet"}
{"code": "def NewOutputModule(cls, name, output_mediator):\n    output_class = cls.GetOutputClass(name)\n    return output_class(output_mediator)", "docstring": "Creates a new output module object for the specified output format.\n\nArgs:\nname (str): name of the output module.\noutput_mediator (OutputMediator): output mediator.\n\nReturns:\nOutputModule: output module.\n\nRaises:\nKeyError: if there is no output class found with the supplied name.\nValueError: if name is not a string.", "source": "codesearchnet"}
{"code": "def is60(msg):\n    if allzeros(msg):\n        return False\n    d = hex2bin(data(msg))\n    if wrongstatus(d, 1, 2, 12):\n        return False\n    if wrongstatus(d, 13, 14, 23):\n        return False\n    if wrongstatus(d, 24, 25, 34):\n        return False\n    if wrongstatus(d, 35, 36, 45):\n        return False\n    if wrongstatus(d, 46, 47, 56):\n        return False\n    ias = ias60(msg)\n    if ((ias is not None) and (ias > 500)):\n        return False\n    mach = mach60(msg)\n    if ((mach is not None) and (mach > 1)):\n        return False\n    vr_baro = vr60baro(msg)\n    if ((vr_baro is not None) and (abs(vr_baro) > 6000)):\n        return False\n    vr_ins = vr60ins(msg)\n    if ((vr_ins is not None) and (abs(vr_ins) > 6000)):\n        return False\n    return True", "docstring": "Check if a message is likely to be BDS code 6,0\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nbool: True or False", "source": "codesearchnet"}
{"code": "def register_write(self, reg_index, value):\n        \n        res = self._dll.JLINKARM_WriteReg(reg_index, value)\n        if res != 0:\n            raise errors.JLinkException('Error writing to register %d' % reg_index)\n        return value", "docstring": "Writes into an ARM register.\n\nNote:\nThe data is not immediately written, but is cached before being\ntransferred to the CPU on CPU start.\n\nArgs:\nself (JLink): the ``JLink`` instance\nreg_index (int): the ARM register to write to\nvalue (int): the value to write to the register\n\nReturns:\nThe value written to the ARM register.\n\nRaises:\nJLinkException: on write error.", "source": "juraj-google-style"}
{"code": "def plot_legend(ax, no_legend=True, legend_arg=None):\n    \n    legend_arg = dict_if_none(legend_arg)\n\n    if not no_legend:\n        ax.legend(**legend_arg)", "docstring": "Function that defines the legend options\nof a matplotlib plot.\n\nArgs:\nax: matplotlib axes\nno_legend (bool): Defines the presence of a legend in the figure\nlegend_arg (dict): Addition arguments for matplotlib.legend() call", "source": "juraj-google-style"}
{"code": "def spawn_agent(self, agent_definition, location):\n        \n        self._should_write_to_command_buffer = True\n        self._add_agents(agent_definition)\n        command_to_send = SpawnAgentCommand(location, agent_definition.name, agent_definition.type)\n        self._commands.add_command(command_to_send)", "docstring": "Queues a spawn agent command. It will be applied when `tick` or `step` is called next.\nThe agent won't be able to be used until the next frame.\n\nArgs:\nagent_definition (:obj:`AgentDefinition`): The definition of the agent to spawn.\nlocation (np.ndarray or list): The position to spawn the agent in the world, in XYZ coordinates (in meters).", "source": "juraj-google-style"}
{"code": "def run_inference(self, batch: Sequence[tf.Tensor], model: tf.Module, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n    inference_args = {} if not inference_args else inference_args\n    return self._inference_fn(model, batch, inference_args, self._model_uri)", "docstring": "Runs inferences on a batch of tf.Tensor and returns an Iterable of\nTensor Predictions.\n\nThis method stacks the list of Tensors in a vectorized format to optimize\nthe inference call.\n\nArgs:\nbatch: A sequence of Tensors. These Tensors should be batchable, as this\nmethod will call `tf.stack()` and pass in batched Tensors with\ndimensions (batch_size, n_features, etc.) into the model's predict()\nfunction.\nmodel: A Tensorflow model.\ninference_args: Non-batchable arguments required as inputs to the model's\nforward() function. Unlike Tensors in `batch`, these parameters will\nnot be dynamically batched\nReturns:\nAn Iterable of type PredictionResult.", "source": "github-repos"}
{"code": "def translate_to_histogram(self, histogram):\n    first_bucket_offset = 0\n    last_bucket_offset = 0\n    for index in range(0, self.MAX_BUCKET_SIZE):\n        if self.buckets[index] != 0:\n            first_bucket_offset = index\n            break\n    for index in range(self.MAX_BUCKET_SIZE - 1, -1, -1):\n        if self.buckets[index] != 0:\n            last_bucket_offset = index\n            break\n    histogram.firstBucketOffset = first_bucket_offset\n    histogram.bucketCounts = self.buckets[first_bucket_offset:last_bucket_offset + 1]", "docstring": "Translate buckets into Histogram.\n\nArgs:\nhistogram: apache_beam.runners.dataflow.internal.clents.dataflow.Histogram\nIdeally, only call this function when reporting counter to\ndataflow service.", "source": "github-repos"}
{"code": "def forward(self, hidden_states: torch.Tensor, pos_emb: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[torch.Tensor]=False):\n    if self.macaron_style:\n        residual = hidden_states\n        if self.normalize_before:\n            hidden_states = self.ff_macaron_layer_norm(hidden_states)\n        hidden_states = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(hidden_states))\n        if not self.normalize_before:\n            hidden_states = self.ff_macaron_layer_norm(hidden_states)\n    residual = hidden_states\n    if self.normalize_before:\n        hidden_states = self.self_attn_layer_norm(hidden_states)\n    attention_output, attention_scores = self.self_attn(hidden_states, attention_mask=attention_mask, pos_emb=pos_emb, output_attentions=output_attentions)\n    if self.concat_after:\n        x_concat = torch.cat((hidden_states, attention_output), dim=-1)\n        hidden_states = self.concat_linear(x_concat)\n        hidden_states = residual + hidden_states\n    else:\n        hidden_states = self.dropout(attention_output)\n        hidden_states = residual + hidden_states\n    if not self.normalize_before:\n        hidden_states = self.self_attn_layer_norm(hidden_states)\n    if self.use_cnn_module:\n        residual = hidden_states\n        if self.normalize_before:\n            hidden_states = self.conv_layer_norm(hidden_states)\n        hidden_states = self.conv_module(hidden_states)\n        hidden_states = self.dropout(hidden_states)\n        hidden_states = residual + hidden_states\n        if not self.normalize_before:\n            hidden_states = self.conv_layer_norm(hidden_states)\n    residual = hidden_states\n    if self.normalize_before:\n        hidden_states = self.ff_layer_norm(hidden_states)\n    hidden_states = self.feed_forward(hidden_states)\n    hidden_states = self.dropout(hidden_states)\n    hidden_states = residual + self.ff_scale * hidden_states\n    if not self.normalize_before:\n        hidden_states = self.ff_layer_norm(hidden_states)\n    if self.conv_module is not None:\n        hidden_states = self.final_layer_norm(hidden_states)\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attention_scores,)\n    return outputs", "docstring": "Compute encoded features.\n\nArgs:\nhidden_states (`torch.Tensor` of shape `(batch, time, size)`): Input tensor.\npos_emb (`torch.Tensor` of shape `(1, time, size)`): Positional embeddings tensor.\nattention_mask (`torch.Tensor` of shape `(batch, time)`): Attention mask tensor for the input.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.\nReturns:\n`torch.Tensor`: Output tensor of shape `(batch, time, size)`.", "source": "github-repos"}
{"code": "def cancel(self, accountID, orderSpecifier, **kwargs):\n    request = Request('PUT', '/v3/accounts/{accountID}/orders/{orderSpecifier}/cancel')\n    request.set_path_param('accountID', accountID)\n    request.set_path_param('orderSpecifier', orderSpecifier)\n    response = self.ctx.request(request)\n    if (response.content_type is None):\n        return response\n    if (not response.content_type.startswith('application/json')):\n        return response\n    jbody = json.loads(response.raw_body)\n    parsed_body = {}\n    if (str(response.status) == '200'):\n        if (jbody.get('orderCancelTransaction') is not None):\n            parsed_body['orderCancelTransaction'] = self.ctx.transaction.OrderCancelTransaction.from_dict(jbody['orderCancelTransaction'], self.ctx)\n        if (jbody.get('relatedTransactionIDs') is not None):\n            parsed_body['relatedTransactionIDs'] = jbody.get('relatedTransactionIDs')\n        if (jbody.get('lastTransactionID') is not None):\n            parsed_body['lastTransactionID'] = jbody.get('lastTransactionID')\n    elif (str(response.status) == '401'):\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    elif (str(response.status) == '404'):\n        if (jbody.get('orderCancelRejectTransaction') is not None):\n            parsed_body['orderCancelRejectTransaction'] = self.ctx.transaction.OrderCancelRejectTransaction.from_dict(jbody['orderCancelRejectTransaction'], self.ctx)\n        if (jbody.get('relatedTransactionIDs') is not None):\n            parsed_body['relatedTransactionIDs'] = jbody.get('relatedTransactionIDs')\n        if (jbody.get('lastTransactionID') is not None):\n            parsed_body['lastTransactionID'] = jbody.get('lastTransactionID')\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    elif (str(response.status) == '405'):\n        if (jbody.get('errorCode') is not None):\n            parsed_body['errorCode'] = jbody.get('errorCode')\n        if (jbody.get('errorMessage') is not None):\n            parsed_body['errorMessage'] = jbody.get('errorMessage')\n    else:\n        parsed_body = jbody\n    response.body = parsed_body\n    return response", "docstring": "Cancel a pending Order in an Account\n\nArgs:\naccountID:\nAccount Identifier\norderSpecifier:\nThe Order Specifier\n\nReturns:\nv20.response.Response containing the results from submitting the\nrequest", "source": "codesearchnet"}
{"code": "def CopyToPath(self):\n    number_of_path_segments = len(self._path_segments)\n    if (number_of_path_segments == 0):\n        return None\n    strings = [self._path_segments[0]]\n    number_of_path_segments -= 1\n    for path_segment in self._path_segments[1:]:\n        if (path_segment.endswith('\\\\') and (number_of_path_segments > 1)):\n            path_segment = path_segment[:(- 1)]\n        if ((path_segment.startswith('<') and path_segment.endswith('>')) or (len(strings) == 1)):\n            strings.append(' {0:s}'.format(path_segment))\n        elif path_segment.startswith('\\\\'):\n            strings.append('{0:s}'.format(path_segment))\n        else:\n            strings.append('\\\\{0:s}'.format(path_segment))\n        number_of_path_segments -= 1\n    return ''.join(strings)", "docstring": "Copies the shell items to a path.\n\nReturns:\nstr: converted shell item list path or None.", "source": "codesearchnet"}
{"code": "def readlines(self, n, echo=None):\n        \n\n        return [\n            self.until(b'\\n', echo)\n            for _ in range(n)\n        ]", "docstring": "Read *n* lines from channel.\n\nArgs:\nn(int): The number of lines to read.\necho(bool): Whether to write the read data to stdout.\n\nReturns:\nlist of bytes: *n* lines which include new line characters.\n\nRaises:\nEOFError: If the channel was closed before *n* lines were read.", "source": "juraj-google-style"}
{"code": "def _preprocess_input(self, inputs, error_message, expected_nesting=1, dtype=None):\n    if inputs is None:\n        return None\n    if hasattr(inputs, 'numpy'):\n        inputs = inputs.numpy().tolist()\n    valid = isinstance(inputs, list)\n    current = inputs\n    for _ in range(expected_nesting):\n        if not valid or not current:\n            break\n        valid = valid and isinstance(current[0], list)\n        current = current[0] if current else None\n    if not valid:\n        raise ValueError(error_message)\n    return [np.array(item, dtype=dtype) for item in inputs]", "docstring": "Preprocess input by converting torch tensors to numpy arrays and validating structure.\n\nArgs:\ninputs: The input to process\nerror_message: Error message if validation fails\nexpected_nesting: Expected nesting level (1 for points/labels, 2 for boxes)\ndtype: Optional data type for numpy array conversion\n\nReturns:\nProcessed input as list of numpy arrays or None", "source": "github-repos"}
{"code": "def _ScanVolumeSystemRoot(self, scan_context, scan_node, base_path_specs):\n    \n    if not scan_node or not scan_node.path_spec:\n      raise errors.ScannerError('Invalid scan node.')\n\n    if scan_node.type_indicator == definitions.TYPE_INDICATOR_APFS_CONTAINER:\n      volume_identifiers = self._GetAPFSVolumeIdentifiers(scan_node)\n\n    elif scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW:\n      volume_identifiers = self._GetVSSStoreIdentifiers(scan_node)\n      \n      volume_identifiers.reverse()\n\n    else:\n      raise errors.ScannerError(\n          'Unsupported volume system type: {0:s}.'.format(\n              scan_node.type_indicator))\n\n    for volume_identifier in volume_identifiers:\n      location = '/{0:s}'.format(volume_identifier)\n      sub_scan_node = scan_node.GetSubNodeByLocation(location)\n      if not sub_scan_node:\n        raise errors.ScannerError(\n            'Scan node missing for volume identifier: {0:s}.'.format(\n                volume_identifier))\n\n      self._ScanVolume(scan_context, sub_scan_node, base_path_specs)", "docstring": "Scans a volume system root scan node for volume and file systems.\n\nArgs:\nscan_context (SourceScannerContext): source scanner context.\nscan_node (SourceScanNode): volume system root scan node.\nbase_path_specs (list[PathSpec]): file system base path specifications.\n\nRaises:\nScannerError: if the scan node is invalid, the scan node type is not\nsupported or if a sub scan node cannot be retrieved.", "source": "juraj-google-style"}
{"code": "def _make_gh_link_node(app, rawtext, role, kind, api_type, id, options=None):\n    url = ('%s/%s/%s' % (_BOKEH_GH, api_type, id))\n    options = (options or {})\n    set_classes(options)\n    node = nodes.reference(rawtext, (kind + utils.unescape(id)), refuri=url, **options)\n    return node", "docstring": "Return a link to a Bokeh Github resource.\n\nArgs:\napp (Sphinx app) : current app\nrawtext (str) : text being replaced with link node.\nrole (str) : role name\nkind (str) : resource type (issue, pull, etc.)\napi_type (str) : type for api link\nid : (str) : id of the resource to link to\noptions (dict) : options dictionary passed to role function", "source": "codesearchnet"}
{"code": "def __init__(self, baselines, scope='aggregated-baseline', summary_labels=()):\n        \n\n        self.baselines = dict()\n        for name in sorted(baselines):\n            self.baselines[name] = Baseline.from_spec(\n                spec=baselines[name],\n                kwargs=dict(summary_labels=summary_labels))\n\n        self.linear = Linear(size=1, bias=0.0, scope='prediction', summary_labels=summary_labels)\n\n        super(AggregatedBaseline, self).__init__(scope, summary_labels)", "docstring": "Aggregated baseline.\n\nArgs:\nbaselines: Dict of per-state baseline specification dicts", "source": "juraj-google-style"}
{"code": "def last_revision(self, mod: YangIdentifier) -> ModuleId:\n        \n        revs = [mn for mn in self.modules if mn[0] == mod]\n        if not revs:\n            raise ModuleNotRegistered(mod)\n        return sorted(revs, key=lambda x: x[1])[-1]", "docstring": "Return the last revision of a module that's part of the data model.\n\nArgs:\nmod: Name of a module or submodule.\n\nRaises:\nModuleNotRegistered: If the module `mod` is not present in the\ndata model.", "source": "juraj-google-style"}
{"code": "def topological_nodes(self):\n    return nx.lexicographical_topological_sort(self._multi_graph, key=(lambda x: str(x.qargs)))", "docstring": "Yield nodes in topological order.\n\nReturns:\ngenerator(DAGNode): node in topological order", "source": "codesearchnet"}
{"code": "def rsolve(A, b, epsilon=_epsilon):\n    r\n    A = asarray(A, float)\n    b = asarray(b, float)\n    if A.shape[0] == 0:\n        return zeros((A.shape[1],))\n    if A.shape[1] == 0:\n        return zeros((0,))\n    try:\n        x = lstsq(A, b, rcond=epsilon)\n        r = sum(x[3] > epsilon)\n        if r == 0:\n            return zeros(A.shape[1])\n        return x[0]\n    except (ValueError, LinAlgError) as e:\n        warnings.warn(str(e), RuntimeWarning)\n    return solve(A, b)", "docstring": "r\"\"\"Robust solve for the linear equations.\n\nArgs:\nA (array_like): Coefficient matrix.\nb (array_like): Ordinate values.\n\nReturns:\n:class:`numpy.ndarray`: Solution ``x``.", "source": "juraj-google-style"}
{"code": "def __init__(self, initial_learning_rate, decay_steps, end_learning_rate=0.0001, power=1.0, cycle=False, name=None):\n    super(PolynomialDecay, self).__init__()\n    self.initial_learning_rate = initial_learning_rate\n    self.decay_steps = decay_steps\n    self.end_learning_rate = end_learning_rate\n    self.power = power\n    self.cycle = cycle\n    self.name = name", "docstring": "Applies a polynomial decay to the learning rate.\n\nArgs:\ninitial_learning_rate: A scalar `float32` or `float64` `Tensor` or a\nPython number.  The initial learning rate.\ndecay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.\nMust be positive.  See the decay computation above.\nend_learning_rate: A scalar `float32` or `float64` `Tensor` or a\nPython number.  The minimal end learning rate.\npower: A scalar `float32` or `float64` `Tensor` or a\nPython number.  The power of the polynomial. Defaults to linear, 1.0.\ncycle: A boolean, whether or not it should cycle beyond decay_steps.\nname: String.  Optional name of the operation. Defaults to\n'PolynomialDecay'.", "source": "github-repos"}
{"code": "def calculate_dimensionality_of_site(bonded_structure, site_index, inc_vertices=False):\n\n    def neighbours(comp_index):\n        return [(s.index, s.jimage) for s in bonded_structure.get_connected_sites(comp_index)]\n\n    def rank(vertices):\n        if (len(vertices) == 0):\n            return (- 1)\n        elif (len(vertices) == 1):\n            return 0\n        else:\n            vertices = np.array(list(vertices))\n            return np.linalg.matrix_rank((vertices[1:] - vertices[0]))\n    connected_sites = {i: neighbours(i) for i in range(bonded_structure.structure.num_sites)}\n    seen_vertices = set()\n    seen_comp_vertices = defaultdict(set)\n    queue = [(site_index, (0, 0, 0))]\n    while (len(queue) > 0):\n        (comp_i, image_i) = queue.pop(0)\n        if ((comp_i, image_i) in seen_vertices):\n            continue\n        seen_vertices.add((comp_i, image_i))\n        if (rank(seen_comp_vertices[comp_i].union({image_i})) > rank(seen_comp_vertices[comp_i])):\n            seen_comp_vertices[comp_i].add(image_i)\n        for (comp_j, image_j) in connected_sites[comp_i]:\n            image_j = tuple(np.add(image_j, image_i))\n            if ((comp_j, image_j) in seen_vertices):\n                continue\n            if (rank(seen_comp_vertices[comp_j].union({image_j})) > rank(seen_comp_vertices[comp_j])):\n                queue.append((comp_j, image_j))\n    if inc_vertices:\n        return (rank(seen_comp_vertices[site_index]), list(seen_comp_vertices[site_index]))\n    else:\n        return rank(seen_comp_vertices[site_index])", "docstring": "Calculates the dimensionality of the component containing the given site.\n\nImplements directly the modified breadth-first-search algorithm described in\nAlgorithm 1 of:\n\nP. Larsem, M. Pandey, M. Strange, K. W. Jacobsen, 2018, arXiv:1808.02114\n\nArgs:\nbonded_structure (StructureGraph): A structure with bonds, represented\nas a pymatgen structure graph. For example, generated using the\nCrystalNN.get_bonded_structure() method.\nsite_index (int): The index of a site in the component of interest.\ninc_vertices (bool, optional): Whether to return the vertices (site\nimages) of the component.\n\nReturns:\n(int or tuple): If inc_vertices is False, the dimensionality of the\ncomponent will be returned as an int. If inc_vertices is true, the\nfunction will return a tuple of (dimensionality, vertices), where\nvertices is a list of tuples. E.g. [(0, 0, 0), (1, 1, 1)].", "source": "codesearchnet"}
{"code": "class OrderedEnqueuer(SequenceEnqueuer):\n\n    def __init__(self, sequence, use_multiprocessing=False, shuffle=False):\n        super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing)\n        self.shuffle = shuffle\n\n    def _get_executor_init(self, workers):\n        \n\n        def pool_fn(seqs):\n            pool = get_pool_class(True)(workers, initializer=init_pool_generator, initargs=(seqs, None, get_worker_id_queue()))\n            _DATA_POOLS.add(pool)\n            return pool\n        return pool_fn\n\n    def _wait_queue(self):\n        \n        while True:\n            time.sleep(0.1)\n            if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():\n                return\n\n    def _run(self):\n        \n        sequence = list(range(len(self.sequence)))\n        self._send_sequence()\n        while True:\n            if self.shuffle:\n                random.shuffle(sequence)\n            with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:\n                for i in sequence:\n                    if self.stop_signal.is_set():\n                        return\n                    self.queue.put(executor.apply_async(get_index, (self.uid, i)), block=True)\n                self._wait_queue()\n                if self.stop_signal.is_set():\n                    return\n            self.sequence.on_epoch_end()\n            self._send_sequence()\n\n    def get(self):\n        \n        while self.is_running():\n            try:\n                inputs = self.queue.get(block=True, timeout=5).get()\n                if self.is_running():\n                    self.queue.task_done()\n                if inputs is not None:\n                    yield inputs\n            except queue.Empty:\n                pass\n            except Exception as e:\n                self.stop()\n                raise e", "docstring": "Builds a Enqueuer from a Sequence.\n\nArgs:\nsequence: A `tf.keras.utils.data_utils.Sequence` object.\nuse_multiprocessing: use multiprocessing if True, otherwise threading\nshuffle: whether to shuffle the data at the beginning of each epoch", "source": "github-repos"}
{"code": "def _update_job_info(cls, job_dir):\n        \n        meta_file = os.path.join(job_dir, JOB_META_FILE)\n        meta = parse_json(meta_file)\n\n        if meta:\n            logging.debug(\"Update job info for %s\" % meta[\"job_id\"])\n            JobRecord.objects \\\n                .filter(job_id=meta[\"job_id\"]) \\\n                .update(end_time=timestamp2date(meta[\"end_time\"]))", "docstring": "Update information for given job.\n\nMeta file will be loaded if exists, and the job information in\nin db backend will be updated.\n\nArgs:\njob_dir (str): Directory path of the job.\n\nReturn:\nUpdated dict of job meta info", "source": "juraj-google-style"}
{"code": "def _remove_subsequent_result_because_of_batch_failure(self, sig):\n        \n\n        batch = self._batches_by_txn_id[sig]\n        seen = []\n        for txn in batch.transactions:\n            txn_id = txn.header_signature\n            for poss_successor in self._scheduled.copy():\n                if not self.is_transaction_in_schedule(poss_successor):\n                    continue\n\n                if self._is_txn_to_replay(txn_id, poss_successor, seen):\n                    if self._txn_has_result(poss_successor):\n                        del self._txn_results[poss_successor]\n                        self._scheduled.remove(poss_successor)\n                        self._txns_available[poss_successor] = \\\n                            self._transactions[poss_successor]\n                    else:\n                        self._outstanding.add(poss_successor)\n                    seen.append(poss_successor)", "docstring": "Remove transactions from scheduled and txn_results for\nsuccessors of txns in a failed batch. These transactions will now,\nor in the future be rescheduled in next_transaction; giving a\nreplay ability.\n\nArgs:\nsig (str): Transaction header signature", "source": "juraj-google-style"}
{"code": "def check_valid(line0, line1):\n    data = line0.strip().split(b' ')\n    if (len(data) <= 2):\n        return False\n    try:\n        map(float, data[2:])\n    except:\n        return False\n    return True", "docstring": "Check if a file is valid Glove format.\n\nArgs:\nline0 (bytes): First line of the file\nline1 (bytes): Second line of the file\n\nReturns:\nboo: ``True`` if it is valid. ``False`` if it is invalid.", "source": "codesearchnet"}
{"code": "def load_fasta_file(filename):\n    \n\n    with open(filename, \"r\") as handle:\n        records = list(SeqIO.parse(handle, \"fasta\"))\n    return records", "docstring": "Load a FASTA file and return the sequences as a list of SeqRecords\n\nArgs:\nfilename (str): Path to the FASTA file to load\n\nReturns:\nlist: list of all sequences in the FASTA file as Biopython SeqRecord objects", "source": "juraj-google-style"}
{"code": "def get_num_bytes(self, batch: Sequence[datatable.Frame]) -> int:\n    return sum((sys.getsizeof(element) for element in batch))", "docstring": "Returns:\nThe number of bytes of data for a batch.", "source": "github-repos"}
{"code": "def is_not_negative() -> RuleChecker[Numeric]:\n\n    def _checker(value: Numeric) -> RuleOutput:\n        if value >= 0:\n            return None\n        else:\n            return 'Value is a negative number.'\n    return _checker", "docstring": "Checks if the provided numeric value IS NOT negative\ni.e. NOT positive (+) or zero (0).\n\nReturns:\n* None: if value >= 0\n* Error message, otherwise", "source": "github-repos"}
{"code": "def _merge_section(original, to_merge):\n    if (not original):\n        return (to_merge or '')\n    if (not to_merge):\n        return (original or '')\n    try:\n        index = (original.index(':') + 1)\n    except ValueError:\n        index = original.index('\\n')\n    name = original[:index].strip()\n    section = '\\n  '.join((original[(index + 1):].lstrip(), to_merge[(index + 1):].lstrip())).rstrip()\n    return '{name}\\n  {section}'.format(name=name, section=section)", "docstring": "Merge two sections together.\n\nArgs:\noriginal: The source of header and initial section lines.\nto_merge: The source for the additional section lines to append.\n\nReturns:\nA new section string that uses the header of the original argument and\nthe section lines from both.", "source": "codesearchnet"}
{"code": "def job_stories(self, raw=False, limit=None):\n        \n        job_stories = self._get_stories('jobstories', limit)\n        if raw:\n            job_stories = [story.raw for story in job_stories]\n        return job_stories", "docstring": "Returns list of item ids of latest Job stories\n\nArgs:\nlimit (int): specifies the number of stories to be returned.\nraw (bool): Flag to indicate whether to transform all\nobjects into raw json.\n\nReturns:\n`list` object containing ids of Job stories.", "source": "juraj-google-style"}
{"code": "def retweeted_tweet(self):\n    retweet = tweet_embeds.get_retweeted_tweet(self)\n    if (retweet is not None):\n        try:\n            return Tweet(retweet)\n        except NotATweetError as nate:\n            raise NotATweetError(('The retweet payload appears malformed.' + \" Failed with '{}'\".format(nate)))\n    else:\n        return None", "docstring": "The retweeted Tweet as a Tweet object\nIf the Tweet is not a Retweet, return None\nIf the Retweet payload cannot be loaded as a Tweet, this will\nraise a `NotATweetError`\n\nReturns:\nTweet: A Tweet representing the retweeted status (or None)\n(see tweet_embeds.get_retweet, this is that value as a Tweet)\n\nRaises:\nNotATweetError: if retweeted tweet is malformed", "source": "codesearchnet"}
{"code": "def FindModuleIdDefiningFlag(self, flagname, default=None):\n    \n    registered_flag = self.FlagDict().get(flagname)\n    if registered_flag is None:\n      return default\n    for module_id, flags in six.iteritems(self.FlagsByModuleIdDict()):\n      for flag in flags:\n        \n        \n        \n        if (flag.name == registered_flag.name and\n            flag.short_name == registered_flag.short_name):\n          return module_id\n    return default", "docstring": "Return the ID of the module defining this flag, or default.\n\nArgs:\nflagname: Name of the flag to lookup.\ndefault: Value to return if flagname is not defined. Defaults\nto None.\n\nReturns:\nThe ID of the module which registered the flag with this name.\nIf no such module exists (i.e. no flag with this name exists),\nwe return default.", "source": "juraj-google-style"}
{"code": "def add_recipe_folder(self, recipe_folder, whitelist=None):\n    if (whitelist is not None):\n        whitelist = set(whitelist)\n    if (recipe_folder == ''):\n        recipe_folder = '.'\n    for yaml_file in [x for x in os.listdir(recipe_folder) if x.endswith('.yaml')]:\n        if ((whitelist is not None) and (yaml_file not in whitelist)):\n            continue\n        recipe = RecipeObject.FromFile(os.path.join(recipe_folder, yaml_file), self._recipe_actions, self._recipe_resources)\n        self._recipes[recipe.name] = recipe\n    for ship_file in [x for x in os.listdir(recipe_folder) if x.endswith('.ship')]:\n        if ((whitelist is not None) and (ship_file not in whitelist)):\n            continue\n        recipe = RecipeObject.FromArchive(os.path.join(recipe_folder, ship_file), self._recipe_actions, self._recipe_resources)\n        self._recipes[recipe.name] = recipe", "docstring": "Add all recipes inside a folder to this RecipeManager with an optional whitelist.\n\nArgs:\nrecipe_folder (str): The path to the folder of recipes to add.\nwhitelist (list): Only include files whose os.basename() matches something\non the whitelist", "source": "codesearchnet"}
{"code": "def getPagePixmap(doc, pno, matrix = None, colorspace = csRGB,\n                  clip = None, alpha = True):\n    \n    return doc[pno].getPixmap(matrix = matrix, colorspace = colorspace,\n                          clip = clip, alpha = alpha)", "docstring": "Create pixmap of document page by page number.\n\nNotes:\nConvenience function calling page.getPixmap.\nArgs:\npno: (int) page number\nmatrix: Matrix for transformation (default: Identity).\ncolorspace: (str/Colorspace) rgb, rgb, gray - case ignored, default csRGB.\nclip: (irect-like) restrict rendering to this area.\nalpha: (bool) include alpha channel", "source": "juraj-google-style"}
{"code": "def from_epw_file(cls, epwfile, timestep=1):\n    is_leap_year = False\n    epw = EPW(epwfile)\n    (direct_normal, diffuse_horizontal) = cls._get_data_collections(epw.direct_normal_radiation.values, epw.diffuse_horizontal_radiation.values, epw.metadata, 1, is_leap_year)\n    if (timestep != 1):\n        print(((\"Note: timesteps greater than 1 on epw-generated Wea's \\n\" + 'are suitable for thermal models but are not recommended \\n') + 'for daylight models.'))\n        direct_normal = direct_normal.interpolate_to_timestep(timestep)\n        diffuse_horizontal = diffuse_horizontal.interpolate_to_timestep(timestep)\n        sp = Sunpath.from_location(epw.location)\n        for (i, dt) in enumerate(cls._get_datetimes(timestep, is_leap_year)):\n            sun = sp.calculate_sun_from_date_time(dt)\n            if (sun.altitude < 0):\n                direct_normal[i] = 0\n                diffuse_horizontal[i] = 0\n    return cls(epw.location, direct_normal, diffuse_horizontal, timestep, is_leap_year)", "docstring": "Create a wea object using the solar irradiance values in an epw file.\n\nArgs:\nepwfile: Full path to epw weather file.\ntimestep: An optional integer to set the number of time steps per hour.\nDefault is 1 for one value per hour. Note that this input\nwill only do a linear interpolation over the data in the EPW\nfile.  While such linear interpolations are suitable for most\nthermal simulations, where thermal lag \"smooths over\" the effect\nof momentary increases in solar energy, it is not recommended\nfor daylight simulations, where momentary increases in solar\nenergy can mean the difference between glare and visual comfort.", "source": "codesearchnet"}
{"code": "def delete(self, json=None):\n    return self._call('delete', url=self.endpoint, json=json)", "docstring": "Send a DELETE request and return the JSON decoded result.\n\nArgs:\njson (dict, optional): Object to encode and send in request.\n\nReturns:\nmixed: JSON decoded response data.", "source": "codesearchnet"}
{"code": "def get_precursor_mz(exact_mass, precursor_type):\n    \n\n    \n    d = {'[M-H]-': -1.007276,\n         '[M+H]+': 1.007276,\n         '[M+H-H2O]+': 1.007276 - ((1.007276 * 2) + 15.9949)\n         }\n\n    try:\n\n        return exact_mass + d[precursor_type]\n    except KeyError as e:\n        print(e)\n        return False", "docstring": "Calculate precursor mz based on exact mass and precursor type\n\nArgs:\nexact_mass (float): exact mass of compound of interest\nprecursor_type (str): Precursor type (currently only works with '[M-H]-', '[M+H]+' and '[M+H-H2O]+'\n\nReturn:\nneutral mass of compound", "source": "juraj-google-style"}
{"code": "def get_channel_info(self, id: str) -> Dict[(str, Any)]:\n    return self._query(f'channels/{id}', 'GET')", "docstring": "Get a chanel's information by its id\n\nArgs:\nid: snowflake id of the chanel\n\nReturns:\nDictionary data for the chanel API object\n\nExample:\n{\n\"id\": \"41771983423143937\",\n\"guild_id\": \"41771983423143937\",\n\"name\": \"general\",\n\"type\": 0,\n\"position\": 6,\n\"permission_overwrites\": [],\n\"topic\": \"24/7 chat about how to gank Mike #2\",\n\"last_message_id\": \"155117677105512449\"\n}", "source": "codesearchnet"}
{"code": "def forward_request(self, method, path=None, json=None, params=None, headers=None):\n    error_trace = []\n    timeout = self.timeout\n    backoff_cap = (NO_TIMEOUT_BACKOFF_CAP if (timeout is None) else (timeout / 2))\n    while ((timeout is None) or (timeout > 0)):\n        connection = self.connection_pool.get_connection()\n        start = time()\n        try:\n            response = connection.request(method=method, path=path, params=params, json=json, headers=headers, timeout=timeout, backoff_cap=backoff_cap)\n        except ConnectionError as err:\n            error_trace.append(err)\n            continue\n        else:\n            return response.data\n        finally:\n            elapsed = (time() - start)\n            if (timeout is not None):\n                timeout -= elapsed\n    raise TimeoutError(error_trace)", "docstring": "Makes HTTP requests to the configured nodes.\n\nRetries connection errors\n(e.g. DNS failures, refused connection, etc).\nA user may choose to retry other errors\nby catching the corresponding\nexceptions and retrying `forward_request`.\n\nExponential backoff is implemented individually for each node.\nBackoff delays are expressed as timestamps stored on the object and\nthey are not reset in between multiple function calls.\n\nTimes out when `self.timeout` is expired, if not `None`.\n\nArgs:\nmethod (str): HTTP method name (e.g.: ``'GET'``).\npath (str): Path to be appended to the base url of a node. E.g.:\n``'/transactions'``).\njson (dict): Payload to be sent with the HTTP request.\nparams (dict)): Dictionary of URL (query) parameters.\nheaders (dict): Optional headers to pass to the request.\n\nReturns:\ndict: Result of :meth:`requests.models.Response.json`", "source": "codesearchnet"}
{"code": "def ParseNolintSuppressions(filename, raw_line, linenum, error):\n    matched = Search('\\\\bNOLINT(NEXTLINE)?\\\\b(\\\\([^)]+\\\\))?', raw_line)\n    if matched:\n        if matched.group(1):\n            suppressed_line = (linenum + 1)\n        else:\n            suppressed_line = linenum\n        category = matched.group(2)\n        if (category in (None, '(*)')):\n            _error_suppressions.setdefault(None, set()).add(suppressed_line)\n        elif (category.startswith('(') and category.endswith(')')):\n            category = category[1:(- 1)]\n            if (category in _ERROR_CATEGORIES):\n                _error_suppressions.setdefault(category, set()).add(suppressed_line)\n            elif (category not in _LEGACY_ERROR_CATEGORIES):\n                error(filename, linenum, 'readability/nolint', 5, ('Unknown NOLINT error category: %s' % category))", "docstring": "Updates the global list of line error-suppressions.\n\nParses any NOLINT comments on the current line, updating the global\nerror_suppressions store.  Reports an error if the NOLINT comment\nwas malformed.\n\nArgs:\nfilename: str, the name of the input file.\nraw_line: str, the line of input text, with comments.\nlinenum: int, the number of the current line.\nerror: function, an error handler.", "source": "codesearchnet"}
{"code": "def _catch_errors(a_func, to_catch):\n\n    def inner(*args, **kwargs):\n        'Wraps specified exceptions'\n        try:\n            return a_func(*args, **kwargs)\n        except tuple(to_catch) as exception:\n            utils.raise_with_traceback(gax.errors.create_error('RPC failed', cause=exception))\n    return inner", "docstring": "Updates a_func to wrap exceptions with GaxError\n\nArgs:\na_func (callable): A callable.\nto_catch (list[Exception]): Configures the exceptions to wrap.\n\nReturns:\nCallable: A function that will wrap certain exceptions with GaxError", "source": "codesearchnet"}
{"code": "def set_shard_dimensions(self, shard_dimensions):\n    if len(shard_dimensions) != self.number_of_tuple_elements:\n        raise ValueError(f'shard_dimensions is {str(shard_dimensions)}, but must be a list of length {self.number_of_tuple_elements}')\n    for policy, dimension in zip(self._sharding_policies, shard_dimensions):\n        policy.set_shard_dimension(dimension)\n    self._validate()", "docstring": "Sets the shard_dimension of each element of the queue.\n\nshard_dimensions must be a list of length\nself.number_of_tuple_elements, and each element must be\nconvertible to a Dimension compatible with self.tuple_shapes.\n\nArgs:\nshard_dimensions: the dimensions of each queue element.\n\nRaises:\nValueError: if shard_dimensions is not of length\nself.number_of_tuple_elements; or an element of\nshard_dimensions cannot be converted to a Dimension; or an\nelement of shard_dimensions is a Dimension that is out of\nrange for the corresponding tuple element shape.", "source": "github-repos"}
{"code": "def out_file_name(out_dir, fname, ext=None):\n    \n    if ext is None:\n        return os.path.join(out_dir, os.path.basename(fname))\n\n    fname = remove_ext(fname)\n    return os.path.join(out_dir, '{}.{}'.format(fname, ext))", "docstring": "Return path of output file, given a directory, file name and extension.\n\nIf fname is a path, it is converted to its basename.\n\nArgs:\nout_dir (str): path to the directory where output should be written.\nfname (str): path to the input file.\next (str): file extension of the output file (defaults to None).\n\nReturns:\nstr: out_dir + fname with extension replaced. If `ext` is `None`, the\noriginal extension is kept.", "source": "juraj-google-style"}
{"code": "def prediction_step(self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[list[str]]=None, **gen_kwargs) -> tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:\n    if not self.args.predict_with_generate or prediction_loss_only:\n        return super().prediction_step(model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys)\n    has_labels = 'labels' in inputs\n    inputs = self._prepare_inputs(inputs)\n    if len(gen_kwargs) == 0 and hasattr(self, '_gen_kwargs'):\n        gen_kwargs = self._gen_kwargs.copy()\n    if 'num_beams' in gen_kwargs and gen_kwargs['num_beams'] is None:\n        gen_kwargs.pop('num_beams')\n    if 'max_length' in gen_kwargs and gen_kwargs['max_length'] is None:\n        gen_kwargs.pop('max_length')\n    default_synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self.model)\n    gen_kwargs['synced_gpus'] = gen_kwargs.get('synced_gpus', default_synced_gpus)\n    generation_inputs = inputs.copy()\n    if 'labels' in generation_inputs and 'decoder_input_ids' in generation_inputs and (generation_inputs['labels'].shape == generation_inputs['decoder_input_ids'].shape):\n        generation_inputs = {k: v for k, v in inputs.items() if k not in ('decoder_input_ids', 'decoder_attention_mask')}\n    summon_full_params_context = FullyShardedDataParallel.summon_full_params(self.model) if isinstance(self.model, FullyShardedDataParallel) else contextlib.nullcontext()\n    with summon_full_params_context:\n        generated_tokens = self.model.generate(**generation_inputs, **gen_kwargs)\n    if self.model.generation_config._from_model_config:\n        self.model.generation_config._from_model_config = False\n    gen_config = self.model.generation_config\n    if generated_tokens.shape[-1] < gen_config.max_length:\n        generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_length)\n    elif gen_config.max_new_tokens is not None and generated_tokens.shape[-1] < gen_config.max_new_tokens + 1:\n        generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_new_tokens + 1)\n    with torch.no_grad():\n        if has_labels:\n            with self.compute_loss_context_manager():\n                outputs = model(**inputs)\n            if self.label_smoother is not None:\n                loss = self.label_smoother(outputs, inputs['labels']).detach().mean()\n            else:\n                loss = (outputs['loss'] if isinstance(outputs, dict) else outputs[0]).detach().mean()\n        else:\n            loss = None\n    if self.args.prediction_loss_only:\n        return (loss, None, None)\n    if has_labels:\n        labels = inputs['labels']\n        if labels.shape[-1] < gen_config.max_length:\n            labels = self._pad_tensors_to_max_len(labels, gen_config.max_length)\n        elif gen_config.max_new_tokens is not None and labels.shape[-1] < gen_config.max_new_tokens + 1:\n            labels = self._pad_tensors_to_max_len(labels, gen_config.max_new_tokens + 1)\n    else:\n        labels = None\n    return (loss, generated_tokens, labels)", "docstring": "Perform an evaluation step on `model` using `inputs`.\n\nSubclass and override to inject custom behavior.\n\nArgs:\nmodel (`nn.Module`):\nThe model to evaluate.\ninputs (`Dict[str, Union[torch.Tensor, Any]]`):\nThe inputs and targets of the model.\n\nThe dictionary will be unpacked before being fed to the model. Most models expect the targets under the\nargument `labels`. Check your model's documentation for all accepted arguments.\nprediction_loss_only (`bool`):\nWhether or not to return the loss only.\ngen_kwargs:\nAdditional `generate` specific kwargs.\n\nReturn:\nTuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and\nlabels (each being optional).", "source": "github-repos"}
{"code": "def __init__(self, range_tracker):\n    assert isinstance(range_tracker, iobase.RangeTracker)\n    self._range_tracker = range_tracker", "docstring": "Initializes UnsplittableRangeTracker.\n\nArgs:\nrange_tracker (~apache_beam.io.iobase.RangeTracker): a\n:class:`~apache_beam.io.iobase.RangeTracker` to which all method\ncalls except calls to :meth:`.try_split()` will be delegated.", "source": "github-repos"}
{"code": "def _get_id_token_user(token, issuers, audiences, allowed_client_ids, time_now, cache):\n    for (issuer_key, issuer) in issuers.items():\n        issuer_cert_uri = convert_jwks_uri(issuer.jwks_uri)\n        try:\n            parsed_token = _verify_signed_jwt_with_certs(token, time_now, cache, cert_uri=issuer_cert_uri)\n        except Exception:\n            _logger.debug('id_token verification failed for issuer %s', issuer_key, exc_info=True)\n            continue\n        issuer_values = _listlike_guard(issuer.issuer, 'issuer', log_warning=False)\n        if isinstance(audiences, _Mapping):\n            audiences = audiences[issuer_key]\n        if _verify_parsed_token(parsed_token, issuer_values, audiences, allowed_client_ids, is_legacy_google_auth=(issuer.issuer == _ISSUERS)):\n            email = parsed_token['email']\n            return users.User(email)", "docstring": "Get a User for the given id token, if the token is valid.\n\nArgs:\ntoken: The id_token to check.\nissuers: dict of Issuers\naudiences: List of audiences that are acceptable.\nallowed_client_ids: List of client IDs that are acceptable.\ntime_now: The current time as a long (eg. long(time.time())).\ncache: Cache to use (eg. the memcache module).\n\nReturns:\nA User if the token is valid, None otherwise.", "source": "codesearchnet"}
{"code": "def pad(x, p=3):\n    return tf.pad(x, [[0, 0], [0, 0], [p, p], [p, p]])", "docstring": "Pad tensor in H, W\n\nRemarks:\nTensorFlow uses \"ceil(input_spatial_shape[i] / strides[i])\" rather than explicit padding\nlike Caffe, pyTorch does. Hence, we need to pad here beforehand.\n\nArgs:\nx (tf.tensor): incoming tensor\np (int, optional): padding for H, W\n\nReturns:\ntf.tensor: padded tensor", "source": "codesearchnet"}
{"code": "def _reverse_convert(x, factor1, factor2):\n    return ((x * factor1) / (((1 - x) * factor2) + (x * factor1)))", "docstring": "Converts mixing ratio x in c1 - c2 tie line to that in\ncomp1 - comp2 tie line.\n\nArgs:\nx (float): Mixing ratio x in c1 - c2 tie line, a float between\n0 and 1.\nfactor1 (float): Compositional ratio between composition c1 and\nprocessed composition comp1. E.g., factor for\nComposition('SiO2') and Composition('O') is 2.\nfactor2 (float): Compositional ratio between composition c2 and\nprocessed composition comp2.\n\nReturns:\nMixing ratio in comp1 - comp2 tie line, a float between 0 and 1.", "source": "codesearchnet"}
{"code": "def put(self, key, vals, indices=None, name=None):\n    with ops.name_scope(name, '%s_put' % self._name, self._scope_vals(vals)) as scope:\n        vals, indices = self._check_put_dtypes(vals, indices)\n        with ops.colocate_with(self._coloc_op):\n            op = self._put_fn(key, indices, vals, dtypes=self._dtypes, shared_name=self._name, name=scope, capacity=self._capacity, memory_limit=self._memory_limit)\n    return op", "docstring": "Create an op that stores the (key, vals) pair in the staging area.\n\nIncomplete puts are possible, preferably using a dictionary for vals\nas the appropriate dtypes and shapes can be inferred from the value names\ndictionary key values. If vals is a list or tuple, indices must\nalso be specified so that the op knows at which element position\nto perform the insert.\n\nThis operation will block if the capacity or memory limit of this\ncontainer is reached.\n\nArgs:\nkey: Key associated with the data\nvals: Tensor (or a dict/tuple of Tensors) to place\ninto the staging area.\nindices: (Optional) if vals is a tuple/list, this is required.\nname: A name for the operation (optional)\n\nReturns:\nThe created op\n\nRaises:\nValueError: If the number or type of inputs don't match the staging\narea.", "source": "github-repos"}
{"code": "class SmolVLMEncoder(nn.Module):\n\n    def __init__(self, config: SmolVLMConfig):\n        super().__init__()\n        self.config = config\n        self.layers = nn.ModuleList([SmolVLMEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n        self.gradient_checkpointing = False\n\n    def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        hidden_states = inputs_embeds\n        for encoder_layer in self.layers:\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, output_attentions)\n            else:\n                layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)\n            hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`SmolVLMEncoderLayer`].\n\nArgs:\nconfig: SmolVLMConfig", "source": "github-repos"}
{"code": "def asserts(self, fn, msg_or_fn):\n    self.assertions.append((fn, msg_or_fn))\n    return self", "docstring": "Assert that prepared values satisfy given conditions.\n\nAssertions are intended in enforce conditions beyond simple value\ntype validation. For instance, this method can be use to assert that\nthe columns of a ``ColumnDataSource`` all collectively have the same\nlength at all times.\n\nArgs:\nfn (callable) :\nA function accepting ``(obj, value)`` that returns True if the value\npasses the assertion, or False otherwise.\n\nmsg_or_fn (str or callable) :\nA message to print in case the assertion fails, or a function\naccepting ``(obj, name, value)`` to call in in case the assertion\nfails.\n\nReturns:\nself", "source": "codesearchnet"}
{"code": "def translate_item_ids(self, item_ids, language, is_nested=None):\n        \n        if is_nested is None:\n            def is_nested_fun(x):\n                return True\n        elif isinstance(is_nested, bool):\n            def is_nested_fun(x):\n                return is_nested\n        else:\n            is_nested_fun = is_nested\n        all_item_type_ids = ItemType.objects.get_all_item_type_ids()\n        groupped = proso.list.group_by(item_ids, by=lambda item_id: all_item_type_ids[item_id])\n        result = {}\n        for item_type_id, items in groupped.items():\n            with timeit('translating item type {}'.format(item_type_id)):\n                item_type = ItemType.objects.get_all_types()[item_type_id]\n                model = ItemType.objects.get_model(item_type_id)\n                kwargs = {'{}__in'.format(item_type['foreign_key']): items}\n                if 'language' in item_type:\n                    kwargs[item_type['language']] = language\n                if any([not is_nested_fun(item_id) for item_id in items]) and hasattr(model.objects, 'prepare_related'):\n                    objs = model.objects.prepare_related()\n                elif hasattr(model.objects, 'prepare'):\n                    objs = model.objects.prepare()\n                else:\n                    objs = model.objects\n                for obj in objs.filter(**kwargs):\n                    item_id = getattr(obj, item_type['foreign_key'])\n                    result[item_id] = obj.to_json(nested=is_nested_fun(item_id))\n        return result", "docstring": "Translate a list of item ids to JSON objects which reference them.\n\nArgs:\nitem_ids (list[int]): item ids\nlanguage (str): language used for further filtering (some objects\nfor different languages share the same item)\nis_nested (function): mapping from item ids to booleans, where the\nboolean value indicates whether the item is nested\n\nReturns:\ndict: item id -> JSON object", "source": "juraj-google-style"}
{"code": "def on_train_begin(self, logs=None):", "docstring": "Called at the beginning of training.\n\nSubclasses should override for any actions to run.\n\nArgs:\nlogs: Dict. Currently no data is passed to this argument for this\nmethod but that may change in the future.", "source": "github-repos"}
{"code": "def summary_op(self):\n    return self._summary_op", "docstring": "Return the Summary Tensor used by the chief supervisor.\n\nReturns:\nA string Tensor for the summary or `None`.", "source": "github-repos"}
{"code": "def split_to_discretized_mix_logistic_params(inputs):\n    (batch, height, width, output_dim) = shape_list(inputs)\n    num_mixtures = (output_dim \n    (logits, locs, log_scales, coeffs) = tf.split(inputs, num_or_size_splits=[num_mixtures, (num_mixtures * 3), (num_mixtures * 3), (num_mixtures * 3)], axis=(- 1))\n    split_shape = [batch, height, width, num_mixtures, 3]\n    locs = tf.reshape(locs, split_shape)\n    log_scales = tf.reshape(log_scales, split_shape)\n    log_scales = tf.maximum(log_scales, (- 7.0))\n    coeffs = tf.reshape(coeffs, split_shape)\n    coeffs = tf.tanh(coeffs)\n    return (logits, locs, log_scales, coeffs)", "docstring": "Splits input tensor into parameters of discretized mixture logistic.\n\nArgs:\ninputs: A [batch, height, width, num_mixtures*10] tensor of floats\ncomprising one unconstrained mixture probability, three means\n(one per channel), three standard deviations (one per channel),\nand three coefficients which linearly parameterize dependence across\nchannels.\n\nReturns:\nTuple of unconstrained mixture probabilities, locations, scales, and\ncoefficient parameters of the distribution. The mixture probability has\nshape [batch, height, width, num_mixtures]. Other parameters have shape\n[batch, height, width, num_mixtures, 3].", "source": "codesearchnet"}
{"code": "def write_to_file(self, filename='material_index.dat', plot=True):\n        \n        path = os.path.dirname(sys.modules[__name__].__file__) + '/'\n\n        with open(filename, 'w') as fs:\n            for n_row in np.abs(self.n[::-1]):\n                n_str = ','.join([str(v) for v in n_row])\n                fs.write(n_str+'\\n')\n\n        if plot:\n            filename_image_prefix, _ = os.path.splitext(filename)\n            filename_image = filename_image_prefix + '.png'\n            args = {\n                'title': 'Refractive Index Profile',\n                'x_pts': self.x_pts,\n                'y_pts': self.y_pts,\n                'x_min': self.x_min,\n                'x_max': self.x_max,\n                'y_min': self.y_min,\n                'y_max': self.y_max,\n                'filename_data': filename,\n                'filename_image': filename_image\n            }\n\n            if MPL:\n                heatmap = np.loadtxt(args['filename_data'], delimiter=',')\n                plt.clf()\n                plt.title(args['title'])\n                plt.xlabel('$x$')\n                plt.ylabel('$y$')\n                plt.imshow(np.flipud(heatmap),\n                           extent=(args['x_min'], args['x_max'], args['y_min'], args['y_max']),\n                           aspect=\"auto\")\n                plt.colorbar()\n                plt.savefig(filename_image)\n            else:\n                gp.gnuplot(path+'structure.gpi', args)", "docstring": "Write the refractive index profile to file.\n\nArgs:\nfilename (str): The nominal filename the refractive\nindex data should be saved to.\nplot (bool): `True` if plots should be generates,\notherwise `False`.  Default is `True`.", "source": "juraj-google-style"}
{"code": "def add_keyed(self, value, key, date=None, return_value=False):\n    return self.add(value, date, return_value, key)", "docstring": "Add keyed metrics data to collection.\n\nArgs:\nvalue (str): The value of the metric.\nkey (str): The key value for keyed metrics.\ndate (str, optional): The optional date of the metric.\nreturn_value (bool, default:False): Tell the API to return the updates metric value.\n\nReturn:\ndict: If return_value is True a dict with the current value for the time period\nis returned.", "source": "codesearchnet"}
{"code": "def split_to_tiles(img, columns, rows):\n    \n    \n\n    im_w, im_h = img.shape\n    \n    \n    tile_w, tile_h = int(np.floor(im_w / columns)), int(np.floor(im_h / rows))\n\n    tiles = []\n    \n    for pos_y in range(0, im_h - rows, tile_h):  \n        for pos_x in range(0, im_w - columns, tile_w):  \n            roi = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h)\n            \n            tile = img[roi[1]:roi[3], roi[0]:roi[2]]\n            \n            \n            \n            \n            tiles.append(tile)\n            \n\n    return tuple(tiles)", "docstring": "Split an image into a specified number of tiles.\nArgs:\nimg (ndarray):  The image to split.\nnumber_tiles (int):  The number of tiles required.\nReturns:\nTuple of tiles", "source": "juraj-google-style"}
{"code": "def init_continuous_batching(self, generation_config: Optional[GenerationConfig]=None, manual_eviction: bool=False, max_queue_size: int=0, streaming: bool=False) -> ContinuousBatchingManager:\n    if not hasattr(self, 'config') or not hasattr(self, 'device') or (not hasattr(self, 'dtype')):\n        raise AttributeError(\"Model must have 'config', 'device', and 'dtype' attributes.\")\n    gen_config = generation_config if generation_config is not None else self.generation_config\n    if gen_config is None:\n        raise ValueError('A GenerationConfig must be provided or set in the model.')\n    if gen_config.eos_token_id is None:\n        logger.warning('`eos_token_id` not set in GenerationConfig. Setting to -1 (disabled).')\n        gen_config.eos_token_id = -1\n    return ContinuousBatchingManager(model=self, generation_config=gen_config, manual_eviction=manual_eviction, max_queue_size=max_queue_size, streaming=streaming)", "docstring": "Initialize a manager for continuous batching inference.\n\nArgs:\ngeneration_config: Custom generation configuration\nmax_queue_size: Maximum size of the input request queue\nstreaming: Whether to stream tokens as they are generated\n\nReturns:\n`ContinuousBatchingManager`: The manager instance to add requests and retrieve results.", "source": "github-repos"}
{"code": "def service_restarted_since(self, sentry_unit, mtime, service, pgrep_full=None, sleep_time=20, retry_count=30, retry_sleep_time=10):\n    unit_name = sentry_unit.info['unit_name']\n    self.log.debug(('Checking that %s service restarted since %s on %s' % (service, mtime, unit_name)))\n    time.sleep(sleep_time)\n    proc_start_time = None\n    tries = 0\n    while ((tries <= retry_count) and (not proc_start_time)):\n        try:\n            proc_start_time = self._get_proc_start_time(sentry_unit, service, pgrep_full)\n            self.log.debug('Attempt {} to get {} proc start time on {} OK'.format(tries, service, unit_name))\n        except IOError as e:\n            self.log.debug('Attempt {} to get {} proc start time on {} failed\\n{}'.format(tries, service, unit_name, e))\n            time.sleep(retry_sleep_time)\n            tries += 1\n    if (not proc_start_time):\n        self.log.warn('No proc start time found, assuming service did not start')\n        return False\n    if (proc_start_time >= mtime):\n        self.log.debug(('Proc start time is newer than provided mtime(%s >= %s) on %s (OK)' % (proc_start_time, mtime, unit_name)))\n        return True\n    else:\n        self.log.warn(('Proc start time (%s) is older than provided mtime (%s) on %s, service did not restart' % (proc_start_time, mtime, unit_name)))\n        return False", "docstring": "Check if service was been started after a given time.\n\nArgs:\nsentry_unit (sentry): The sentry unit to check for the service on\nmtime (float): The epoch time to check against\nservice (string): service name to look for in process table\npgrep_full: [Deprecated] Use full command line search mode with pgrep\nsleep_time (int): Initial sleep time (s) before looking for file\nretry_sleep_time (int): Time (s) to sleep between retries\nretry_count (int): If file is not found, how many times to retry\n\nReturns:\nbool: True if service found and its start time it newer than mtime,\nFalse if service is older than mtime or if service was\nnot found.", "source": "codesearchnet"}
{"code": "def record_tx(self, origin, destination, amount, outcome, destination_id=None):\n    if destination_id:\n        tx = db.Transaction(txtype='move', from_user_id=origin, to_user_id=destination_id, txdate=datetime.now(), amount=amount, currency=COINS[self.coin]['ticker'], to_coin_address=destination)\n    else:\n        self.logger.debug(self.gettransaction(outcome))\n        confirmations = self.gettransaction(outcome)['confirmations']\n        last_confirmation = (datetime.now() if confirmations else None)\n        tx = db.Transaction(txtype='sendfrom', from_user_id=origin, txhash=outcome, txdate=datetime.now(), amount=amount, currency=COINS[self.coin]['ticker'], to_coin_address=destination, confirmations=confirmations, last_confirmation=last_confirmation)\n    db.session.add(tx)\n    db.session.commit()\n    return outcome", "docstring": "Records a transaction in the database.\n\nArgs:\norigin (str): user_id of the sender\ndestination (str): coin address or user_id of the recipient\namount (str, Decimal, number): amount to send\noutcome (str, bool): the transaction hash if this is a \"sendfrom\"\ntransaction; for \"move\", True if successful,\nFalse otherwise\ndestination_id (str): the destination account label (\"move\" only)\n\nReturns:\nstr or bool: the outcome (input) argument", "source": "codesearchnet"}
{"code": "def conv(name, x, output_channels, filter_size=None, stride=None, logscale_factor=3.0, apply_actnorm=True, conv_init='default', dilations=None):\n    if ((conv_init == 'zeros') and apply_actnorm):\n        raise ValueError('apply_actnorm is unstable when init is set to zeros.')\n    x_shape = common_layers.shape_list(x)\n    is_2d = (len(x_shape) == 4)\n    num_steps = x_shape[1]\n    if is_2d:\n        if (filter_size is None):\n            filter_size = [3, 3]\n        if (stride is None):\n            stride = [1, 1]\n        if (dilations is None):\n            dilations = [1, 1, 1, 1]\n        actnorm_func = actnorm\n        x = add_edge_bias(x, filter_size=filter_size)\n        conv_filter = tf.nn.conv2d\n    else:\n        if (filter_size is None):\n            if (num_steps == 1):\n                filter_size = [1, 3, 3]\n            else:\n                filter_size = [2, 3, 3]\n        if (stride is None):\n            stride = [1, 1, 1]\n        if (dilations is None):\n            dilations = [1, 1, 1, 1, 1]\n        actnorm_func = actnorm_3d\n        x = time_pad(x, filter_size=filter_size, dilations=dilations)\n        conv_filter = tf.nn.conv3d\n    in_channels = common_layers.shape_list(x)[(- 1)]\n    filter_shape = (filter_size + [in_channels, output_channels])\n    stride_shape = (([1] + stride) + [1])\n    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n        if (conv_init == 'default'):\n            initializer = default_initializer()\n        elif (conv_init == 'zeros'):\n            initializer = tf.zeros_initializer()\n        w = tf.get_variable('W', filter_shape, tf.float32, initializer=initializer)\n        x = conv_filter(x, w, stride_shape, padding='VALID', dilations=dilations)\n        if apply_actnorm:\n            (x, _) = actnorm_func('actnorm', x, logscale_factor=logscale_factor)\n        else:\n            x += tf.get_variable('b', [1, 1, 1, output_channels], initializer=tf.zeros_initializer())\n            logs = tf.get_variable('logs', [1, output_channels], initializer=tf.zeros_initializer())\n            x *= tf.exp((logs * logscale_factor))\n        return x", "docstring": "Convolutional layer with edge bias padding and optional actnorm.\n\nIf x is 5-dimensional, actnorm is applied independently across every\ntime-step.\n\nArgs:\nname: variable scope.\nx: 4-D Tensor or 5-D Tensor of shape NHWC or NTHWC\noutput_channels: Number of output channels.\nfilter_size: list of ints, if None [3, 3] and [2, 3, 3] are defaults for\n4-D and 5-D input tensors respectively.\nstride: list of ints, default stride: 1\nlogscale_factor: see actnorm for parameter meaning.\napply_actnorm: if apply_actnorm the activations of the first minibatch\nhave zero mean and unit variance. Else, there is no scaling\napplied.\nconv_init: default or zeros. default is a normal distribution with 0.05 std.\ndilations: List of integers, apply dilations.\nReturns:\nx: actnorm(conv2d(x))\nRaises:\nValueError: if init is set to \"zeros\" and apply_actnorm is set to True.", "source": "codesearchnet"}
{"code": "def set_agent(self, short_name, client_id):\n        \n\n        if short_name not in self.services:\n            raise ArgumentError(\"Unknown service name\", short_name=short_name)\n\n        self.agents[short_name] = client_id", "docstring": "Register a client id that handlers commands for a service.\n\nArgs:\nshort_name (str): The name of the service to set an agent\nfor.\nclient_id (str): A globally unique id for the client that\nshould receive commands for this service.", "source": "juraj-google-style"}
{"code": "def _ContainsAll(self, verb, expected):\n    actual_list = list(self._actual)\n    missing = _DuplicateCounter()\n    actual_not_in_order = set()\n    ordered = True\n    for i in expected:\n        try:\n            index = actual_list.index(i)\n            for _ in six.moves.xrange(index):\n                actual_element = actual_list.pop(0)\n                if _IsHashable(actual_element) and isinstance(actual_not_in_order, collections_abc.Set):\n                    actual_not_in_order.add(actual_element)\n                else:\n                    if isinstance(actual_not_in_order, collections_abc.Set):\n                        actual_not_in_order = list(actual_not_in_order)\n                    if actual_element not in actual_not_in_order:\n                        actual_not_in_order.append(actual_element)\n            actual_list.pop(0)\n        except ValueError:\n            if not _IsHashable(i) and isinstance(actual_not_in_order, collections_abc.Set):\n                actual_not_in_order = list(actual_not_in_order)\n            if i in actual_not_in_order:\n                actual_not_in_order.remove(i)\n                ordered = False\n            else:\n                missing.Increment(i)\n    if missing:\n        self._FailWithBadResults(verb, expected, 'is missing', missing)\n    if ordered:\n        return _InOrder()\n    else:\n        return _NotInOrder(self._actual, 'contains all elements in order', expected)", "docstring": "Determines if the subject contains all the expected elements.\n\nHelper function for ContainsAllIn() and ContainsAllOf().\n\nArgs:\nverb: string describing how the expected elements should be contained.\nexpected: iterable of objects that should be contained in the subject.\n\nReturns:\nIf the subject does contain all the expected elements, returns an\n_Ordered predicate on which .InOrder() can be subsequently called.\n\nRaises:\nTruthAssertionError: the subject is missing any of the expected elements.", "source": "github-repos"}
{"code": "def status(self, job_ids):\n        \n\n        logger.debug(\"Checking status of: {0}\".format(job_ids))\n        for job_id in self.resources:\n\n            if self.resources[job_id]['proc']:\n\n                poll_code = self.resources[job_id]['proc'].poll()\n                if self.resources[job_id]['status'] in ['COMPLETED', 'FAILED']:\n                    continue\n\n                if poll_code is None:\n                    self.resources[job_id]['status'] = 'RUNNING'\n                elif poll_code == 0:\n                    self.resources[job_id]['status'] = 'COMPLETED'\n                elif poll_code != 0:\n                    self.resources[job_id]['status'] = 'FAILED'\n                else:\n                    logger.error(\"Internal consistency error: unexpected case in local provider state machine\")\n\n            elif self.resources[job_id]['remote_pid']:\n\n                retcode, stdout, stderr = self.channel.execute_wait('ps -p {} &> /dev/null; echo \"STATUS:$?\" ',\n                                                                    self.cmd_timeout)\n                for line in stdout.split('\\n'):\n                    if line.startswith(\"STATUS:\"):\n                        status = line.split(\"STATUS:\")[1].strip()\n                        if status == \"0\":\n                            self.resources[job_id]['status'] = 'RUNNING'\n                        else:\n                            self.resources[job_id]['status'] = 'FAILED'\n\n        return [self.resources[jid]['status'] for jid in job_ids]", "docstring": "Get the status of a list of jobs identified by their ids.\n\nArgs:\n- job_ids (List of ids) : List of identifiers for the jobs\n\nReturns:\n- List of status codes.", "source": "juraj-google-style"}
{"code": "def setup(argv):\n    parser = argparse.ArgumentParser(description='Compute Jekyl- and prose-aware wordcounts', epilog='Accepted filetypes: plaintext, markdown, markdown (Jekyll)')\n    parser.add_argument('-S', '--split-hyphens', action='store_true', dest='split_hyphens', help='split hyphenated words rather than counting them as one word (\"non-trivial\" counts as two words rather than one)')\n    parser.add_argument('-u', '--update', action='store_true', help='update the jekyll file in place with the counts. Does nothing if the file is not a Jekyll markdown file. Implies format=yaml, invalid with input from STDIN and non-Jekyll files.')\n    parser.add_argument('-f', '--format', nargs='?', choices=['yaml', 'json', 'default'], default='default', help='output format.')\n    parser.add_argument('-i', '--indent', type=int, nargs='?', default=4, help='indentation depth (default: 4).')\n    parser.add_argument('file', type=argparse.FileType('rb'), help='file to parse (or - for STDIN)')\n    return parser.parse_args(argv)", "docstring": "Sets up the ArgumentParser.\n\nArgs:\nargv: an array of arguments", "source": "codesearchnet"}
{"code": "def _matmul_3d_with_batch_dim_folding(a, b, **kwargs):\n    reshaped_a = array_ops.expand_dims(a.values, 1)\n    reshaped_b = array_ops.repeat(b, a.row_lengths(), axis=0)\n    flat_result = math_ops.matmul(reshaped_a, reshaped_b, **kwargs)\n    return a.with_values(array_ops.squeeze(flat_result, axis=1))", "docstring": "Multiply batches of 2D matrices where only `a.shape[1]` is ragged.\n\nArgs:\na: A RaggedTensor with `shape=[B, (I), J]`.  (ragged_rank must be 1.)\nb: A Tensor with `shape=[B, J, K]`\n**kwargs: Additional arguments for `tf.matmul` (e.g. transpose_a).\ntranspose_a and adjoint_a must not be true.\n\nReturns:\nA RaggedTensor with `shape=[B, (I), K].", "source": "github-repos"}
{"code": "def values_from_const(node_def: node_def_pb2.NodeDef) -> np.ndarray:\n    if node_def.op != 'Const':\n        raise ValueError(f'Can not extract constant value from a node that is not Const. Got:\\n{node_def}')\n    input_tensor = node_def.attr['value'].tensor\n    tensor_value = tensor_util.MakeNdarray(input_tensor)\n    return tensor_value", "docstring": "Extracts the values from a const NodeDef as a numpy ndarray.\n\nArgs:\nnode_def: Const NodeDef that has the values we want to access.\n\nReturns:\nNumpy ndarray containing the values.\n\nRaises:\nValueError: If the node isn't a Const.", "source": "github-repos"}
{"code": "def correct_segmentation(segments, clusters, min_time):\n    \n    \n\n    result_segments = []\n    prev_segment = None\n    for i, segment in enumerate(segments):\n        if len(segment) >= 1:\n            continue\n\n        cluster = clusters[i]\n        if prev_segment is None:\n            prev_segment = segment\n        else:\n            cluster_dt = 0\n            if len(cluster) > 0:\n                cluster_dt = abs(cluster[0].time_difference(cluster[-1]))\n            if cluster_dt <= min_time:\n                prev_segment.extend(segment)\n            else:\n                prev_segment.append(segment[0])\n                result_segments.append(prev_segment)\n                prev_segment = segment\n    if prev_segment is not None:\n        result_segments.append(prev_segment)\n\n    return result_segments", "docstring": "Corrects the predicted segmentation\n\nThis process prevents over segmentation\n\nArgs:\nsegments (:obj:`list` of :obj:`list` of :obj:`Point`):\nsegments to correct\nmin_time (int): minimum required time for segmentation", "source": "juraj-google-style"}
{"code": "def bind(self, **bindings):\n    \n    new_context = dict(self._partial_context)\n    unknown_keys = []\n    for k, v in six.iteritems(bindings):\n      if k not in self._unbound_vars:\n        unknown_keys.append(k)\n      new_context[self._unbound_vars[k]] = v\n    if unknown_keys:\n      raise ValueError(\n          'The following keys are not associated with any unbound vars: %s, '\n          'legal values are %s' %\n          (unknown_keys, list(self._unbound_vars.keys())))\n    return _DeferredLayer(self.bookkeeper,\n                          None,\n                          (),\n                          {},\n                          scope=self._scope,\n                          defaults=self._defaults,\n                          pass_through=self,\n                          partial_context=new_context)", "docstring": "Creates a new template with the given unbound variables bound.\n\nArgs:\n**bindings: Arguments for every deferred parameter.\nReturns:\nA new template with the given bindings.\nRaises:\nValueError: If any of the bindings do not correspond to unbound variables.", "source": "juraj-google-style"}
{"code": "def WriteGraphSeries(graph_series,\n                     label,\n                     token = None):\n  \n  if data_store.RelationalDBEnabled():\n    data_store.REL_DB.WriteClientGraphSeries(graph_series, label)\n\n  if _ShouldUseLegacyDatastore():\n    \n    \n    \n    aff4_attr = _GetAFF4AttributeForReportType(graph_series.report_type)()\n\n    if isinstance(aff4_attr, rdf_stats.GraphSeries):\n      for graph in graph_series.graphs:\n        aff4_attr.Append(graph)\n    elif isinstance(aff4_attr, rdf_stats.Graph):\n      for sample in graph_series.graphs[0]:\n        aff4_attr.Append(x_value=sample.x_value, y_value=sample.y_value)\n    else:\n      raise AFF4AttributeTypeError(aff4_attr.__class__)\n\n    with aff4.FACTORY.Create(\n        GetAFF4ClientReportsURN().Add(label),\n        aff4_type=aff4_stats.ClientFleetStats,\n        mode=\"w\",\n        token=token) as stats_for_label:\n      stats_for_label.AddAttribute(aff4_attr)", "docstring": "Writes graph series for a particular client label to the DB.\n\nArgs:\ngraph_series: A series of rdf_stats.Graphs containing aggregated data for a\nparticular report-type.\nlabel: Client label by which data in the graph_series was aggregated.\ntoken: ACL token to use for writing to the legacy (non-relational)\ndatastore.\n\nRaises:\nAFF4AttributeTypeError: If, when writing to the legacy DB, an unexpected\nreport-data type is encountered.", "source": "juraj-google-style"}
{"code": "def VerifyScripts(verifiable):\n    try:\n        hashes = verifiable.GetScriptHashesForVerifying()\n    except Exception as e:\n        logger.debug((\"couldn't get script hashes %s \" % e))\n        return False\n    if (len(hashes) != len(verifiable.Scripts)):\n        logger.debug(f'hash - verification script length mismatch ({len(hashes)}/{len(verifiable.Scripts)})')\n        return False\n    blockchain = GetBlockchain()\n    for i in range(0, len(hashes)):\n        verification = verifiable.Scripts[i].VerificationScript\n        if (len(verification) == 0):\n            sb = ScriptBuilder()\n            sb.EmitAppCall(hashes[i].Data)\n            verification = sb.ms.getvalue()\n        else:\n            verification_hash = Crypto.ToScriptHash(verification, unhex=False)\n            if (hashes[i] != verification_hash):\n                logger.debug(f'hash {hashes[i]} does not match verification hash {verification_hash}')\n                return False\n        state_reader = GetStateReader()\n        script_table = CachedScriptTable(DBCollection(blockchain._db, DBPrefix.ST_Contract, ContractState))\n        engine = ApplicationEngine(TriggerType.Verification, verifiable, script_table, state_reader, Fixed8.Zero())\n        engine.LoadScript(verification)\n        invocation = verifiable.Scripts[i].InvocationScript\n        engine.LoadScript(invocation)\n        try:\n            success = engine.Execute()\n            state_reader.ExecutionCompleted(engine, success)\n        except Exception as e:\n            state_reader.ExecutionCompleted(engine, False, e)\n        if ((engine.ResultStack.Count != 1) or (not engine.ResultStack.Pop().GetBoolean())):\n            Helper.EmitServiceEvents(state_reader)\n            if (engine.ResultStack.Count > 0):\n                logger.debug(f'Result stack failure! Count: {engine.ResultStack.Count} bool value: {engine.ResultStack.Pop().GetBoolean()}')\n            else:\n                logger.debug(f'Result stack failure! Count: {engine.ResultStack.Count}')\n            return False\n        Helper.EmitServiceEvents(state_reader)\n    return True", "docstring": "Verify the scripts of the provided `verifiable` object.\n\nArgs:\nverifiable (neo.IO.Mixins.VerifiableMixin):\n\nReturns:\nbool: True if verification is successful. False otherwise.", "source": "codesearchnet"}
{"code": "def get_password(request, mapping) -> None:\n    LOGGER.debug('Received request \"%s\"', request)\n    if ('host' not in request):\n        LOGGER.error('host= entry missing in request. Cannot query without a host')\n        return\n    host = request['host']\n    if ('path' in request):\n        host = '/'.join([host, request['path']])\n\n    def skip(line, skip):\n        return line[skip:]\n    LOGGER.debug('Iterating mapping to match against host \"%s\"', host)\n    for section in mapping.sections():\n        if fnmatch.fnmatch(host, section):\n            LOGGER.debug('Section \"%s\" matches requested host \"%s\"', section, host)\n            pass_target = mapping.get(section, 'target').replace('${host}', request['host'])\n            password_extractor = SpecificLineExtractor(0, 0, option_suffix='_password')\n            password_extractor.configure(mapping[section])\n            username_extractor = _username_extractors[mapping[section].get('username_extractor', fallback=_line_extractor_name)]\n            username_extractor.configure(mapping[section])\n            LOGGER.debug('Requesting entry \"%s\" from pass', pass_target)\n            output = subprocess.check_output(['pass', 'show', pass_target]).decode('utf-8')\n            lines = output.splitlines()\n            password = password_extractor.get_value(pass_target, lines)\n            username = username_extractor.get_value(pass_target, lines)\n            if password:\n                print('password={password}'.format(password=password))\n            if (('username' not in request) and username):\n                print('username={username}'.format(username=username))\n            return\n    LOGGER.warning('No mapping matched')\n    sys.exit(1)", "docstring": "Resolve the given credential request in the provided mapping definition.\n\nThe result is printed automatically.\n\nArgs:\nrequest:\nThe credential request specified as a dict of key-value pairs.\nmapping:\nThe mapping configuration as a ConfigParser instance.", "source": "codesearchnet"}
{"code": "def head(self, path=None, client_kwargs=None, header=None):\n        \n        if header is not None:\n            return header\n        elif client_kwargs is None:\n            client_kwargs = self.get_client_kwargs(path)\n        return self._head(client_kwargs)", "docstring": "Returns object HTTP header.\n\nArgs:\npath (str): Path or URL.\nclient_kwargs (dict): Client arguments.\nheader (dict): Object header.\n\nReturns:\ndict: HTTP header.", "source": "juraj-google-style"}
{"code": "def load_optimizer_weights_from_hdf5_group(hdf5_group):\n    weights_group = hdf5_group['optimizer_weights']\n    optimizer_weight_names = load_attributes_from_hdf5_group(weights_group, 'weight_names')\n    return [weights_group[weight_name] for weight_name in optimizer_weight_names]", "docstring": "Load optimizer weights from a HDF5 group.\n\nArgs:\nhdf5_group: A pointer to a HDF5 group.\n\nReturns:\ndata: List of optimizer weight names.", "source": "github-repos"}
{"code": "def __init__(self, id, **kwargs):\n        \n        super(Artist, self).__init__(id, **kwargs)", "docstring": "Artist class\n\nArgs:\nid (str): an artistw ID\n\nReturns:\nAn artist object\n\nExample:\n\n>>> a = artist.Artist('ARH6W4X1187B99274F', buckets=['hotttnesss'])\n>>> a.hotttnesss\n0.80098515900997658\n>>>", "source": "juraj-google-style"}
{"code": "def strace_configure(self, port_width):\n        \n        if port_width not in [1, 2, 4]:\n            raise ValueError('Invalid port width: %s' % str(port_width))\n\n        config_string = 'PortWidth=%d' % port_width\n        res = self._dll.JLINK_STRACE_Config(config_string.encode())\n        if res < 0:\n            raise errors.JLinkException('Failed to configure STRACE port')\n\n        return None", "docstring": "Configures the trace port width for tracing.\n\nNote that configuration cannot occur while STRACE is running.\n\nArgs:\nself (JLink): the ``JLink`` instance\nport_width (int): the trace port width to use.\n\nReturns:\n``None``\n\nRaises:\nValueError: if ``port_width`` is not ``1``, ``2``, or ``4``.\nJLinkException: on error.", "source": "juraj-google-style"}
{"code": "def most_specific_compatible_shape(self, other) -> 'TensorShape':\n    other = as_shape(other)\n    if self.dims is None or other.dims is None or self.rank != other.rank:\n        return unknown_shape()\n    dims = [d1 if d1 is not None and d2 is not None and (d1 == d2) else None for d1, d2 in zip(self.dims, other.dims)]\n    return TensorShape(dims)", "docstring": "Returns the most specific TensorShape compatible with `self` and `other`.\n\n* TensorShape([None, 1]) is the most specific TensorShape compatible with\nboth TensorShape([2, 1]) and TensorShape([5, 1]). Note that\nTensorShape(None) is also compatible with above mentioned TensorShapes.\n\n* TensorShape([1, 2, 3]) is the most specific TensorShape compatible with\nboth TensorShape([1, 2, 3]) and TensorShape([1, 2, 3]). There are more\nless specific TensorShapes compatible with above mentioned TensorShapes,\ne.g. TensorShape([1, 2, None]), TensorShape(None).\n\nArgs:\nother: Another `TensorShape`.\n\nReturns:\nA `TensorShape` which is the most specific compatible shape of `self`\nand `other`.", "source": "github-repos"}
{"code": "def convert_typing_to_builtin(typ):\n    origin = getattr(typ, '__origin__', None)\n    args = getattr(typ, '__args__', None)\n    if origin not in _BUILTINS:\n        return typ\n    if not args:\n        return origin\n    if origin is list:\n        return list[convert_typing_to_builtin(args[0])]\n    elif origin is dict:\n        return dict[convert_typing_to_builtin(args[0]), convert_typing_to_builtin(args[1])]\n    elif origin is tuple:\n        return tuple[tuple(convert_typing_to_builtin(args))]\n    elif origin is set:\n        return set[convert_typing_to_builtin(args)]\n    elif origin is frozenset:\n        return frozenset[convert_typing_to_builtin(args)]", "docstring": "Converts a given typing collections type to its builtin counterpart.\n\nArgs:\ntyp: A typing type (e.g., typing.List[int]).\n\nReturns:\ntype: The corresponding builtin type (e.g., list[int]).", "source": "github-repos"}
{"code": "def add_path_argument(cls, group, argname, dest=None, help_=None):\n        \n        prefixed = '%s-%s' % (cls.argument_prefix, argname)\n        if dest is None:\n            dest = prefixed.replace('-', '_')\n            final_dest = dest[len(cls.argument_prefix) + 1:]\n        else:\n            final_dest = dest\n            dest = '%s_%s' % (cls.argument_prefix, dest)\n\n        group.add_argument('--%s' % prefixed, action='store',\n                           dest=dest, help=help_)\n        cls.path_arguments[dest] = final_dest", "docstring": "Subclasses may call this to expose a path argument.\n\nArgs:\ngroup: arparse.ArgumentGroup, the extension argument group\nargname: str, the name of the argument, will be namespaced.\ndest: str, similar to the `dest` argument of\n`argparse.ArgumentParser.add_argument`, will be namespaced.\nhelp_: str, similar to the `help` argument of\n`argparse.ArgumentParser.add_argument`.", "source": "juraj-google-style"}
{"code": "def saver(self):\n    return self._saver", "docstring": "Return the Saver used by the supervisor.\n\nReturns:\nA Saver object.", "source": "github-repos"}
{"code": "def _get_depencency_var_name(self, dependency):\n    for (dep_path, var_name) in self.dependencies:\n        if (dep_path == dependency):\n            return var_name", "docstring": "Returns the variable name assigned to the given dependency or None if the dependency has\nnot yet been registered.\n\nArgs:\ndependency (str): Thet dependency that needs to be imported.\n\nReturns:\nstr or None", "source": "codesearchnet"}
{"code": "def extremum_icohpvalue(self, summed_spin_channels=True, spin=Spin.up):\n        \n        if not self._are_coops:\n            extremum = sys.float_info.max\n        else:\n            extremum = -sys.float_info.max\n\n        if not self._is_spin_polarized:\n            if spin == Spin.down:\n                warnings.warn(\"This spin channel does not exist. I am switching to Spin.up\")\n            spin = Spin.up\n\n        for value in self._icohplist.values():\n            if not value.is_spin_polarized or not summed_spin_channels:\n                if not self._are_coops:\n                    if value.icohpvalue(spin) < extremum:\n                        extremum = value.icohpvalue(spin)\n                        \n                else:\n                    if value.icohpvalue(spin) > extremum:\n                        extremum = value.icohpvalue(spin)\n                        \n            else:\n                if not self._are_coops:\n                    if value.summed_icohp < extremum:\n                        extremum = value.summed_icohp\n                        \n                else:\n                    if value.summed_icohp > extremum:\n                        extremum = value.summed_icohp\n                        \n        return extremum", "docstring": "get ICOHP/ICOOP of strongest bond\nArgs:\nsummed_spin_channels: Boolean to indicate whether the ICOHPs/ICOOPs of both spin channels should be summed\n\nspin: if summed_spin_channels is equal to False, this spin indicates which spin channel should be returned\nReturns:\nlowest ICOHP/largest ICOOP value (i.e. ICOHP/ICOOP value of strongest bond)", "source": "juraj-google-style"}
{"code": "def get_plot(self, normalize_rxn_coordinate=True, label_barrier=True):\n        \n        plt = pretty_plot(12, 8)\n        scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]\n        x = np.arange(0, np.max(self.r), 0.01)\n        y = self.spline(x) * 1000\n        relative_energies = self.energies - self.energies[0]\n        plt.plot(self.r * scale, relative_energies * 1000, 'ro',\n                 x * scale, y, 'k-', linewidth=2, markersize=10)\n        plt.xlabel(\"Reaction coordinate\")\n        plt.ylabel(\"Energy (meV)\")\n        plt.ylim((np.min(y) - 10, np.max(y) * 1.02 + 20))\n        if label_barrier:\n            data = zip(x * scale, y)\n            barrier = max(data, key=lambda d: d[1])\n            plt.plot([0, barrier[0]], [barrier[1], barrier[1]], 'k--')\n            plt.annotate('%.0f meV' % (np.max(y) - np.min(y)),\n                         xy=(barrier[0] / 2, barrier[1] * 1.02),\n                         xytext=(barrier[0] / 2, barrier[1] * 1.02),\n                         horizontalalignment='center')\n        plt.tight_layout()\n        return plt", "docstring": "Returns the NEB plot. Uses Henkelman's approach of spline fitting\neach section of the reaction path based on tangent force and energies.\n\nArgs:\nnormalize_rxn_coordinate (bool): Whether to normalize the\nreaction coordinate to between 0 and 1. Defaults to True.\nlabel_barrier (bool): Whether to label the maximum barrier.\n\nReturns:\nmatplotlib.pyplot object.", "source": "juraj-google-style"}
{"code": "def sg_init(sess):\n    r\n    \n    sess.run(tf.group(tf.global_variables_initializer(),\n                      tf.local_variables_initializer()))", "docstring": "r\"\"\" Initializes session variables.\n\nArgs:\nsess: Session to initialize.", "source": "juraj-google-style"}
{"code": "def _CompositeMapByteStream(\n      self, byte_stream, byte_offset=0, context=None, **unused_kwargs):\n    \n    context_state = getattr(context, 'state', {})\n\n    attribute_index = context_state.get('attribute_index', 0)\n    mapped_values = context_state.get('mapped_values', None)\n    subcontext = context_state.get('context', None)\n\n    if not mapped_values:\n      mapped_values = self._structure_values_class()\n    if not subcontext:\n      subcontext = DataTypeMapContext(values={\n          type(mapped_values).__name__: mapped_values})\n\n    members_data_size = 0\n\n    for attribute_index in range(attribute_index, self._number_of_attributes):\n      attribute_name = self._attribute_names[attribute_index]\n      data_type_map = self._data_type_maps[attribute_index]\n      member_definition = self._data_type_definition.members[attribute_index]\n\n      condition = getattr(member_definition, 'condition', None)\n      if condition:\n        namespace = dict(subcontext.values)\n        \n        namespace['__builtins__'] = {}\n\n        try:\n          condition_result = eval(condition, namespace)  \n        except Exception as exception:\n          raise errors.MappingError(\n              'Unable to evaluate condition with error: {0!s}'.format(\n                  exception))\n\n        if not isinstance(condition_result, bool):\n          raise errors.MappingError(\n              'Condition does not result in a boolean value')\n\n        if not condition_result:\n          continue\n\n      if isinstance(member_definition, data_types.PaddingDefinition):\n        _, byte_size = divmod(\n            members_data_size, member_definition.alignment_size)\n        if byte_size > 0:\n          byte_size = member_definition.alignment_size - byte_size\n\n        data_type_map.byte_size = byte_size\n\n      try:\n        value = data_type_map.MapByteStream(\n            byte_stream, byte_offset=byte_offset, context=subcontext)\n        setattr(mapped_values, attribute_name, value)\n\n      except errors.ByteStreamTooSmallError as exception:\n        context_state['attribute_index'] = attribute_index\n        context_state['context'] = subcontext\n        context_state['mapped_values'] = mapped_values\n        raise errors.ByteStreamTooSmallError(exception)\n\n      except Exception as exception:\n        raise errors.MappingError(exception)\n\n      supported_values = getattr(member_definition, 'values', None)\n      if supported_values and value not in supported_values:\n        raise errors.MappingError(\n            'Value: {0!s} not in supported values: {1:s}'.format(\n                value, ', '.join([\n                    '{0!s}'.format(value) for value in supported_values])))\n\n      byte_offset += subcontext.byte_size\n      members_data_size += subcontext.byte_size\n\n    if attribute_index != (self._number_of_attributes - 1):\n      context_state['attribute_index'] = attribute_index\n      context_state['context'] = subcontext\n      context_state['mapped_values'] = mapped_values\n\n      error_string = (\n          'Unable to read: {0:s} from byte stream at offset: {1:d} '\n          'with error: missing attribute: {2:d}').format(\n              self._data_type_definition.name, byte_offset, attribute_index)\n      raise errors.ByteStreamTooSmallError(error_string)\n\n    if context:\n      context.byte_size = members_data_size\n      context.state = {}\n\n    return mapped_values", "docstring": "Maps a sequence of composite data types on a byte stream.\n\nArgs:\nbyte_stream (bytes): byte stream.\nbyte_offset (Optional[int]): offset into the byte stream where to start.\ncontext (Optional[DataTypeMapContext]): data type map context.\n\nReturns:\nobject: mapped value.\n\nRaises:\nMappingError: if the data type definition cannot be mapped on\nthe byte stream.", "source": "juraj-google-style"}
{"code": "def GetMessages(self, formatter_mediator, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    event_values = event.CopyToDict()\n\n    restore_point_event_type = event_values.get(\n        'restore_point_event_type', None)\n    if restore_point_event_type is not None:\n      event_values['restore_point_event_type'] = (\n          self._RESTORE_POINT_EVENT_TYPES.get(\n              restore_point_event_type, 'UNKNOWN'))\n\n    restore_point_type = event_values.get('restore_point_type', None)\n    if restore_point_type is not None:\n      event_values['restore_point_type'] = (\n          self._RESTORE_POINT_EVENT_TYPES.get(restore_point_type, 'UNKNOWN'))\n\n    return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def _ReadEncodedData(self, read_size):\n    \n    encoded_data = self._file_object.read(read_size)\n\n    read_count = len(encoded_data)\n\n    self._encoded_data = b''.join([self._encoded_data, encoded_data])\n\n    self._decoded_data, self._encoded_data = (\n        self._decoder.Decode(self._encoded_data))\n\n    self._decoded_data_size = len(self._decoded_data)\n\n    return read_count", "docstring": "Reads encoded data from the file-like object.\n\nArgs:\nread_size (int): number of bytes of encoded data to read.\n\nReturns:\nint: number of bytes of encoded data read.", "source": "juraj-google-style"}
{"code": "def spawn_reader_writer(get_data_fn, put_data_fn):\n\n    def _reader_thread():\n        while True:\n            out = get_data_fn()\n            put_data_fn(out)\n            if (not out):\n                break\n    t = threading.Thread(target=_reader_thread)\n    t.daemon = True\n    t.start()\n    return t", "docstring": "Spawn a thread that reads from a data source and writes to a sink.\n\nThe thread will terminate if it receives a Falsey value from the source.\n\nArgs:\nget_data_fn: Data-reading function. Called repeatedly until it returns\nFalse-y to indicate that the thread should terminate.\nput_data_fn: Data-writing function.\nReturns: threading.Thread", "source": "codesearchnet"}
{"code": "def save_hdf5(X, y, path):\n    \n\n    with h5py.File(path, 'w') as f:\n        is_sparse = 1 if sparse.issparse(X) else 0\n        f['issparse'] = is_sparse\n        f['target'] = y\n\n        if is_sparse:\n            if not sparse.isspmatrix_csr(X):\n                X = X.tocsr()\n\n            f['shape'] = np.array(X.shape)\n            f['data'] = X.data\n            f['indices'] = X.indices\n            f['indptr'] = X.indptr\n        else:\n            f['data'] = X", "docstring": "Save data as a HDF5 file.\n\nArgs:\nX (numpy or scipy sparse matrix): Data matrix\ny (numpy array): Target vector.\npath (str): Path to the HDF5 file to save data.", "source": "juraj-google-style"}
{"code": "def Downsampled(cls, stats, interval=None):\n    \n    interval = interval or cls.DEFAULT_SAMPLING_INTERVAL\n\n    result = cls(stats)\n    result.cpu_samples = cls._Downsample(\n        kind=CpuSample, samples=stats.cpu_samples, interval=interval)\n    result.io_samples = cls._Downsample(\n        kind=IOSample, samples=stats.io_samples, interval=interval)\n    return result", "docstring": "Constructs a copy of given stats but downsampled to given interval.\n\nArgs:\nstats: A `ClientStats` instance.\ninterval: A downsampling interval.\n\nReturns:\nA downsampled `ClientStats` instance.", "source": "juraj-google-style"}
{"code": "def delete(self, id_or_uri, timeout=-1):\n        \n        return self._client.delete(id_or_uri, timeout=timeout)", "docstring": "Deletes SNMPv1 trap forwarding destination based on {Id}.\n\nArgs:\nid_or_uri: dict object to delete\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.\n\nReturns:\nbool: Indicates if the resource was successfully deleted.", "source": "juraj-google-style"}
{"code": "def print_logs(redis_client, threads_stopped):\n    \n    pubsub_client = redis_client.pubsub(ignore_subscribe_messages=True)\n    pubsub_client.subscribe(ray.gcs_utils.LOG_FILE_CHANNEL)\n    localhost = services.get_node_ip_address()\n    try:\n        \n        \n        \n        \n        num_consecutive_messages_received = 0\n        while True:\n            \n            if threads_stopped.is_set():\n                return\n\n            msg = pubsub_client.get_message()\n            if msg is None:\n                num_consecutive_messages_received = 0\n                threads_stopped.wait(timeout=0.01)\n                continue\n            num_consecutive_messages_received += 1\n\n            data = json.loads(ray.utils.decode(msg[\"data\"]))\n            if data[\"ip\"] == localhost:\n                for line in data[\"lines\"]:\n                    print(\"{}{}(pid={}){} {}\".format(\n                        colorama.Style.DIM, colorama.Fore.CYAN, data[\"pid\"],\n                        colorama.Style.RESET_ALL, line))\n            else:\n                for line in data[\"lines\"]:\n                    print(\"{}{}(pid={}, ip={}){} {}\".format(\n                        colorama.Style.DIM, colorama.Fore.CYAN, data[\"pid\"],\n                        data[\"ip\"], colorama.Style.RESET_ALL, line))\n\n            if (num_consecutive_messages_received % 100 == 0\n                    and num_consecutive_messages_received > 0):\n                logger.warning(\n                    \"The driver may not be able to keep up with the \"\n                    \"stdout/stderr of the workers. To avoid forwarding logs \"\n                    \"to the driver, use 'ray.init(log_to_driver=False)'.\")\n    finally:\n        \n        pubsub_client.close()", "docstring": "Prints log messages from workers on all of the nodes.\n\nArgs:\nredis_client: A client to the primary Redis shard.\nthreads_stopped (threading.Event): A threading event used to signal to\nthe thread that it should exit.", "source": "juraj-google-style"}
{"code": "def _object_table(self, object_id):\n        \n        \n        if not isinstance(object_id, ray.ObjectID):\n            object_id = ray.ObjectID(hex_to_binary(object_id))\n\n        \n        message = self._execute_command(object_id, \"RAY.TABLE_LOOKUP\",\n                                        ray.gcs_utils.TablePrefix.OBJECT, \"\",\n                                        object_id.binary())\n        if message is None:\n            return {}\n        gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(\n            message, 0)\n\n        assert gcs_entry.EntriesLength() > 0\n\n        entry = ray.gcs_utils.ObjectTableData.GetRootAsObjectTableData(\n            gcs_entry.Entries(0), 0)\n\n        object_info = {\n            \"DataSize\": entry.ObjectSize(),\n            \"Manager\": entry.Manager(),\n        }\n\n        return object_info", "docstring": "Fetch and parse the object table information for a single object ID.\n\nArgs:\nobject_id: An object ID to get information about.\n\nReturns:\nA dictionary with information about the object ID in question.", "source": "juraj-google-style"}
{"code": "def run_validation(options):\n    if (options.files == sys.stdin):\n        results = validate(options.files, options)\n        return [FileValidationResults(is_valid=results.is_valid, filepath='stdin', object_results=results)]\n    files = get_json_files(options.files, options.recursive)\n    results = [validate_file(fn, options) for fn in files]\n    return results", "docstring": "Validate files based on command line options.\n\nArgs:\noptions: An instance of ``ValidationOptions`` containing options for\nthis validation run.", "source": "codesearchnet"}
{"code": "def extract(self, path_or_paths):\n    \n    \n    with self._extractor.tqdm():\n      return _map_promise(self._extract, path_or_paths)", "docstring": "Extract given path(s).\n\nArgs:\npath_or_paths: path or `list`/`dict` of path of file to extract. Each\npath can be a `str` or `tfds.download.Resource`.\n\nIf not explicitly specified in `Resource`, the extraction method is deduced\nfrom downloaded file name.\n\nReturns:\nextracted_path(s): `str`, The extracted paths matching the given input\npath_or_paths.", "source": "juraj-google-style"}
{"code": "def toString(self):\n    output = ''\n    if (self.childs or self.isOpeningTag()):\n        output += self.tagToString()\n        for c in self.childs:\n            output += c.toString()\n        if (self.endtag is not None):\n            output += self.endtag.tagToString()\n    elif (not self.isEndTag()):\n        output += self.tagToString()\n    return output", "docstring": "Returns almost original string.\n\nIf you want prettified string, try :meth:`.prettify`.\n\nReturns:\nstr: Complete representation of the element with childs, endtag \\\nand so on.", "source": "codesearchnet"}
{"code": "def add_test_class(self, config, test_class, tests=None, name_suffix=None):\n    if self._log_dir != config.log_path:\n        raise Error('TestRunner\\'s log folder is \"%s\", but a test config with a different log folder (\"%s\") was added.' % (self._log_dir, config.log_path))\n    if self._testbed_name != config.testbed_name:\n        raise Error('TestRunner\\'s test bed is \"%s\", but a test config with a different test bed (\"%s\") was added.' % (self._testbed_name, config.testbed_name))\n    self._test_run_infos.append(TestRunner._TestRunInfo(config=config, test_class=test_class, tests=tests, test_class_name_suffix=name_suffix))", "docstring": "Adds tests to the execution plan of this TestRunner.\n\nArgs:\nconfig: config_parser.TestRunConfig, configuration to execute this\ntest class with.\ntest_class: class, test class to execute.\ntests: list of strings, optional list of test names within the\nclass to execute.\nname_suffix: string, suffix to append to the class name for\nreporting. This is used for differentiating the same class\nexecuted with different parameters in a suite.\n\nRaises:\nError: if the provided config has a log_path or testbed_name which\ndiffers from the arguments provided to this TestRunner's\nconstructor.", "source": "github-repos"}
{"code": "def WriteSessionCompletion(self, aborted=False):\n    \n    self._RaiseIfNotWritable()\n\n    if self._storage_type != definitions.STORAGE_TYPE_SESSION:\n      raise IOError('Unsupported storage type.')\n\n    self._session.aborted = aborted\n    session_completion = self._session.CreateSessionCompletion()\n    self._storage_file.WriteSessionCompletion(session_completion)", "docstring": "Writes session completion information.\n\nArgs:\naborted (Optional[bool]): True if the session was aborted.\n\nRaises:\nIOError: if the storage type is not supported or\nwhen the storage writer is closed.\nOSError: if the storage type is not supported or\nwhen the storage writer is closed.", "source": "juraj-google-style"}
{"code": "def _remove_structure_prefix(self, prefix, line):\n    return line[len(prefix):].strip()", "docstring": "Helper function for removing the structure prefix for parsing.\n\nArgs:\nprefix: string, a _InstrumentationStructurePrefixes to remove from\nthe raw output.\nline: string, the raw line from the instrumentation output.\n\nReturns:\nA string containing a key value pair descripting some property\nof the current instrumentation test method.", "source": "github-repos"}
{"code": "class InputExample:\n    guid: str\n    words: list[str]\n    labels: Optional[list[str]]", "docstring": "A single training/test example for token classification.\n\nArgs:\nguid: Unique id for the example.\nwords: list. The words of the sequence.\nlabels: (Optional) list. The labels for each word of the sequence. This should be\nspecified for train and dev examples, but not for test examples.", "source": "github-repos"}
{"code": "def as_proto_cls(proto_cls):\n\n    def decorator(cls):\n        'Decorator applied to the class.'\n\n        class ProtoCls(object):\n            'Base class simulating the protobuf.'\n\n            def __init__(self, *args, **kwargs):\n                super(ProtoCls, self).__setattr__('_ProtoCls__proto', proto_cls(*args, **kwargs))\n\n            def __getattr__(self, attr_name):\n                return getattr(self.__proto, attr_name)\n\n            def __setattr__(self, attr_name, new_value):\n                try:\n                    return setattr(self.__proto, attr_name, new_value)\n                except AttributeError:\n                    return super(ProtoCls, self).__setattr__(attr_name, new_value)\n\n            def __eq__(self, other):\n                return (self.__proto, other.get_proto())\n\n            def get_proto(self):\n                return self.__proto\n\n            def __repr__(self):\n                return '<{cls_name}\\n{proto_repr}\\n>'.format(cls_name=cls.__name__, proto_repr=repr(self.__proto))\n        decorator_cls = type(cls.__name__, (cls, ProtoCls), {'__doc__': cls.__doc__})\n        return decorator_cls\n    return decorator", "docstring": "Simulate proto inheritance.\n\nBy default, protobuf do not support direct inheritance, so this decorator\nsimulates inheritance to the class to which it is applied.\n\nExample:\n\n```\n@as_proto_class(proto.MyProto)\nclass A(object):\ndef custom_method(self):\nreturn self.proto_field * 10\n\np = proto.MyProto(proto_field=123)\n\na = A()\na.CopyFrom(p)  # a is like a proto object\nassert a.proto_field == 123\na.custom_method()  # But has additional methods\n\n```\n\nArgs:\nproto_cls: The protobuf class to inherit from\n\nReturns:\ndecorated_cls: The decorated class", "source": "codesearchnet"}
{"code": "def linear(m=1, b=0):\n\n    def f(i):\n        return ((m * i) + b)\n    return partial(force, sequence=_advance(f))", "docstring": "Return a driver function that can advance a sequence of linear values.\n\n.. code-block:: none\n\nvalue = m * i + b\n\nArgs:\nm (float) : a slope for the linear driver\nx (float) : an offset for the linear driver", "source": "codesearchnet"}
{"code": "def _overwrite_model_variables_with_average_value(self, trainable_variables):\n    trainable_variables = [v.value if isinstance(v, backend.Variable) else v for v in trainable_variables]\n    for var, average_var in zip(trainable_variables, self._model_variables_moving_average):\n        self._distribution_strategy.extended.update(var, lambda a, b: a.assign(b), args=(average_var,))", "docstring": "Overwrite model variables with their moving average values.\n\nThis function overwrites variables on each device.\n\nArgs:\nvar_list: list of model variables.", "source": "github-repos"}
{"code": "def validate_options(options):\n  \n  if not options:\n    return\n\n  for k, v in options.iteritems():\n    if not isinstance(k, str):\n      raise TypeError('option %r should be a str.' % k)\n    if not any(k.lower().startswith(valid) for valid in _GCS_OPTIONS):\n      raise ValueError('option %s is not supported.' % k)\n    if not isinstance(v, basestring):\n      raise TypeError('value %r for option %s should be of type basestring.' %\n                      (v, k))", "docstring": "Validate Google Cloud Storage options.\n\nArgs:\noptions: a str->basestring dict of options to pass to Google Cloud Storage.\n\nRaises:\nValueError: if option is not supported.\nTypeError: if option is not of type str or value of an option\nis not of type basestring.", "source": "juraj-google-style"}
{"code": "def set_storage(self, storage):\n    if isinstance(storage, BaseStorage):\n        self.storage = storage\n    elif isinstance(storage, dict):\n        if (('backend' not in storage) and ('root_dir' in storage)):\n            storage['backend'] = 'FileSystem'\n        try:\n            backend_cls = getattr(storage_package, storage['backend'])\n        except AttributeError:\n            try:\n                backend_cls = import_module(storage['backend'])\n            except ImportError:\n                self.logger.error('cannot find backend module %s', storage['backend'])\n                sys.exit()\n        kwargs = storage.copy()\n        del kwargs['backend']\n        self.storage = backend_cls(**kwargs)\n    else:\n        raise TypeError('\"storage\" must be a storage object or dict')", "docstring": "Set storage backend for downloader\n\nFor full list of storage backend supported, please see :mod:`storage`.\n\nArgs:\nstorage (dict or BaseStorage): storage backend configuration or instance", "source": "codesearchnet"}
{"code": "def DeleteGRRTempFile(path):\n    precondition.AssertType(path, Text)\n    if (not os.path.isabs(path)):\n        raise ErrorBadPath('Path must be absolute')\n    prefix = config.CONFIG['Client.tempfile_prefix']\n    directories = [GetTempDirForRoot(root) for root in config.CONFIG['Client.tempdir_roots']]\n    if (not _CheckIfPathIsValidForDeletion(path, prefix=prefix, directories=directories)):\n        msg = \"Can't delete temp file %s. Filename must start with %s or lie within any of %s.\"\n        raise ErrorNotTempFile((msg % (path, prefix, ';'.join(directories))))\n    if os.path.exists(path):\n        files.FILE_HANDLE_CACHE.Flush()\n        os.remove(path)\n    else:\n        raise ErrorNotAFile(('%s does not exist.' % path))", "docstring": "Delete a GRR temp file.\n\nTo limit possible damage the path must be absolute and either the\nfile must be within any of the Client.tempdir_roots or the file name\nmust begin with Client.tempfile_prefix.\n\nArgs:\npath: path string to file to be deleted.\n\nRaises:\nOSError: Permission denied, or file not found.\nErrorBadPath: Path must be absolute.\nErrorNotTempFile: Filename must start with Client.tempfile_prefix.\nErrorNotAFile: File to delete does not exist.", "source": "codesearchnet"}
{"code": "def apply_with_summary(input_layer, operation, *op_args, **op_kwargs):\n    return layers.apply_activation(input_layer.bookkeeper, input_layer.tensor, operation, activation_args=op_args, activation_kwargs=op_kwargs)", "docstring": "Applies the given operation to `input_layer` and create a summary.\n\nArgs:\ninput_layer: The input layer for this op.\noperation: An operation that takes a tensor and the supplied args.\n*op_args: Extra arguments for operation.\n**op_kwargs: Keyword arguments for the operation.\nReturns:\nA new layer with operation applied.", "source": "codesearchnet"}
{"code": "def open(self, **params):\n    logger.info('opening telnet')\n    self.port = params['port']\n    self.ip = params['ip']\n    self.tn = None\n    self._init()", "docstring": "Open telnet connection\n\nArgs:\nparams (dict), must contain two parameters \"ip\" - ip address or hostname and \"port\" - port number\n\nExample:\nparams = {'port': 23, 'ip': 'localhost'}", "source": "codesearchnet"}
{"code": "def resample(self, data, cache_dir=None, mask_area=None, **kwargs):\n    if ((mask_area is None) and isinstance(self.source_geo_def, SwathDefinition)):\n        mask_area = True\n    if mask_area:\n        if isinstance(self.source_geo_def, SwathDefinition):\n            geo_dims = self.source_geo_def.lons.dims\n        else:\n            geo_dims = ('y', 'x')\n        flat_dims = [dim for dim in data.dims if (dim not in geo_dims)]\n        if np.issubdtype(data.dtype, np.integer):\n            kwargs['mask'] = (data == data.attrs.get('_FillValue', np.iinfo(data.dtype.type).max))\n        else:\n            kwargs['mask'] = data.isnull()\n        kwargs['mask'] = kwargs['mask'].all(dim=flat_dims)\n    cache_id = self.precompute(cache_dir=cache_dir, **kwargs)\n    return self.compute(data, cache_id=cache_id, **kwargs)", "docstring": "Resample `data` by calling `precompute` and `compute` methods.\n\nOnly certain resampling classes may use `cache_dir` and the `mask`\nprovided when `mask_area` is True. The return value of calling the\n`precompute` method is passed as the `cache_id` keyword argument\nof the `compute` method, but may not be used directly for caching. It\nis up to the individual resampler subclasses to determine how this\nis used.\n\nArgs:\ndata (xarray.DataArray): Data to be resampled\ncache_dir (str): directory to cache precomputed results\n(default False, optional)\nmask_area (bool): Mask geolocation data where data values are\ninvalid. This should be used when data values\nmay affect what neighbors are considered valid.\n\nReturns (xarray.DataArray): Data resampled to the target area", "source": "codesearchnet"}
{"code": "def GetNTFSFileEntryByPathSpec(self, path_spec):\n    location = getattr(path_spec, 'location', None)\n    mft_attribute = getattr(path_spec, 'mft_attribute', None)\n    mft_entry = getattr(path_spec, 'mft_entry', None)\n    if ((mft_attribute is not None) and (mft_entry is not None)):\n        fsntfs_file_entry = self._fsntfs_volume.get_file_entry(mft_entry)\n    elif (location is not None):\n        fsntfs_file_entry = self._fsntfs_volume.get_file_entry_by_path(location)\n    else:\n        raise errors.PathSpecError('Path specification missing location and MFT entry.')\n    return fsntfs_file_entry", "docstring": "Retrieves the NTFS file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\npyfsntfs.file_entry: NTFS file entry.\n\nRaises:\nPathSpecError: if the path specification is missing location and\nMFT entry.", "source": "codesearchnet"}
{"code": "def get(self, *, search, limit=0, headers=None):\n    return self.transport.forward_request(method='GET', path=self.path, params={'search': search, 'limit': limit}, headers=headers)", "docstring": "Retrieves the assets that match a given text search string.\n\nArgs:\nsearch (str): Text search string.\nlimit (int): Limit the number of returned documents. Defaults to\nzero meaning that it returns all the matching assets.\nheaders (dict): Optional headers to pass to the request.\n\nReturns:\n:obj:`list` of :obj:`dict`: List of assets that match the query.", "source": "codesearchnet"}
{"code": "def CopyTextToLabel(cls, text, prefix=''):\n    text = '{0:s}{1:s}'.format(prefix, text)\n    return cls._INVALID_LABEL_CHARACTERS_REGEX.sub('_', text)", "docstring": "Copies a string to a label.\n\nA label only supports a limited set of characters therefore\nunsupported characters are replaced with an underscore.\n\nArgs:\ntext (str): label text.\nprefix (Optional[str]): label prefix.\n\nReturns:\nstr: label.", "source": "codesearchnet"}
{"code": "def _maybe_track_assets(self, graph_def):\n    asset_tracker = {}\n    for node in graph_def.node:\n        if node.name.startswith('FileIdentity'):\n            asset_tracker[node.input[0]] = None\n    if not asset_tracker:\n        return {}\n    for node in graph_def.node:\n        if node.name in asset_tracker:\n            tensor_proto = node.attr['value'].tensor\n            with context.eager_mode(), ops.device('CPU'):\n                node_value = gen_parsing_ops.parse_tensor(tensor_proto.SerializeToString(), dtypes.string).numpy()\n            asset_tracker[node.name] = [self._track_trackable(asset.Asset(n), name=node.name + '_' + str(i), overwrite=True) for i, n in enumerate(node_value)]\n    return asset_tracker", "docstring": "Finds and tracks nodes in `graph_def` that refer to asset files.\n\nArgs:\ngraph_def: Serialized graph representation of this dataset.\n\nReturns:\nA dictionary mapping the node name of an asset constant to a tracked\n`asset.Asset` object.", "source": "github-repos"}
{"code": "def fulfill_order(self, order_number, site_code=None, email_opt_in=False):\n    \n    max_fulfillment_retries = get_configuration('MAX_FULFILLMENT_RETRIES', site_code=site_code)\n    api = get_ecommerce_client(site_code=site_code)\n    try:\n        logger.info('Requesting fulfillment of order [%s].', order_number)\n        api.orders(order_number).fulfill.put(email_opt_in=email_opt_in)\n    except exceptions.HttpClientError as exc:\n        status_code = exc.response.status_code  \n        if status_code == 406:\n            \n            logger.info('Order [%s] has already been fulfilled. Ignoring.', order_number)\n            raise Ignore()\n        else:\n            \n            logger.warning(\n                'Fulfillment of order [%s] failed because of HttpClientError. Retrying',\n                order_number,\n                exc_info=True\n            )\n            _retry_order(self, exc, max_fulfillment_retries, order_number)\n\n    except (exceptions.HttpServerError, exceptions.Timeout, SSLError) as exc:\n        \n        _retry_order(self, exc, max_fulfillment_retries, order_number)", "docstring": "Fulfills an order.\n\nArguments:\norder_number (str): Order number indicating which order to fulfill.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def replace_name_with_id(cls, name):\n    try:\n        int(name)\n        return name\n    except ValueError:\n        pass\n    if (name.split('-')[0] in Meta._MODEL_ABBREVS):\n        return int(name.split('-', 1)[1])\n    try:\n        result = cls.ES.get_record_by_name(cls.ES_INDEX_NAME, name)\n        if result:\n            return result['id']\n    except pulsarpy.elasticsearch_utils.MultipleHitsException as e:\n        raise\n    raise RecordNotFound(\"Name '{}' for model '{}' not found.\".format(name, cls.__name__))", "docstring": "Used to replace a foreign key reference using a name with an ID. Works by searching the\nrecord in Pulsar and expects to find exactly one hit. First, will check if the foreign key\nreference is an integer value and if so, returns that as it is presumed to be the foreign key.\n\nRaises:\n`pulsarpy.elasticsearch_utils.MultipleHitsException`: Multiple hits were returned from the name search.\n`pulsarpy.models.RecordNotFound`: No results were produced from the name search.", "source": "codesearchnet"}
{"code": "def refresh(self) -> bool:\n    with self._lock:\n        min_pending_timestamp = WatermarkManager.WATERMARK_POS_INF\n        has_pending_elements = False\n        for input_bundle in self._pending:\n            for wv in input_bundle.get_elements_iterable():\n                has_pending_elements = True\n                if wv.timestamp < min_pending_timestamp:\n                    min_pending_timestamp = wv.timestamp\n        pending_holder = WatermarkManager.WATERMARK_POS_INF\n        if has_pending_elements:\n            pending_holder = min_pending_timestamp - TIME_GRANULARITY\n        input_watermarks = [tw.output_watermark for tw in self._input_transform_watermarks]\n        input_watermarks.append(WatermarkManager.WATERMARK_POS_INF)\n        producer_watermark = min(input_watermarks)\n        self._input_watermark = max(self._input_watermark, min(pending_holder, producer_watermark))\n        earliest_hold = WatermarkManager.WATERMARK_POS_INF\n        for hold in self._keyed_earliest_holds.values():\n            if hold < earliest_hold:\n                earliest_hold = hold\n        new_output_watermark = min(self._input_watermark, earliest_hold)\n        advanced = new_output_watermark > self._output_watermark\n        self._output_watermark = new_output_watermark\n        return advanced", "docstring": "Refresh the watermark for a given transform.\n\nThis method looks at the watermark coming from all input PTransforms, and\nthe timestamp of the minimum element, as well as any watermark holds.\n\nReturns:\nTrue if the watermark has advanced, and False if it has not.", "source": "github-repos"}
{"code": "def downsample_bottleneck(x, output_channels, dim='2d', stride=1, scope='h'):\n    conv = CONFIG[dim]['conv']\n    with tf.variable_scope(scope):\n        x = conv(x, output_channels, 1, strides=stride, padding='SAME', activation=None)\n        return x", "docstring": "Downsamples 'x' by `stride` using a 1x1 convolution filter.\n\nArgs:\nx: input tensor of size [N, H, W, C]\noutput_channels: Desired number of output channels.\ndim: '2d' if 2-dimensional, '3d' if 3-dimensional.\nstride: What stride to use. Usually 1 or 2.\nscope: Optional variable scope.\n\nReturns:\nA downsampled tensor of size [N, H/2, W/2, output_channels] if stride\nis 2, else returns a tensor of size [N, H, W, output_channels] if\nstride is 1.", "source": "codesearchnet"}
{"code": "def compile_regex_from_str(self, ft_str):\n    sequence = []\n    for m in re.finditer('\\\\[([^]]+)\\\\]', ft_str):\n        ft_mask = fts(m.group(1))\n        segs = self.all_segs_matching_fts(ft_mask)\n        sub_pat = '({})'.format('|'.join(segs))\n        sequence.append(sub_pat)\n    pattern = ''.join(sequence)\n    regex = re.compile(pattern)\n    return regex", "docstring": "Given a string describing features masks for a sequence of segments,\nreturn a regex matching the corresponding strings.\n\nArgs:\nft_str (str): feature masks, each enclosed in square brackets, in\nwhich the features are delimited by any standard delimiter.\n\nReturns:\nPattern: regular expression pattern equivalent to `ft_str`", "source": "codesearchnet"}
{"code": "def get_doc_sources(api_name):\n    if api_name == tf_export.TENSORFLOW_API_NAME:\n        return _TENSORFLOW_DOC_SOURCES\n    if api_name == tf_export.KERAS_API_NAME:\n        return _KERAS_DOC_SOURCES\n    return {}", "docstring": "Get a map from module to a DocSource object.\n\nArgs:\napi_name: API you want to generate (e.g. `tensorflow` or `estimator`).\n\nReturns:\nMap from module name to DocSource object.", "source": "github-repos"}
{"code": "def add_server(self, name, prefer=False):\n        \n        if not name or re.match(r'^[\\s]+$', name):\n            raise ValueError('ntp server name must be specified')\n        if prefer:\n            name = '%s prefer' % name\n        cmd = self.command_builder('ntp server', value=name)\n        return self.configure(cmd)", "docstring": "Add or update an NTP server entry to the node config\n\nArgs:\nname (string): The IP address or FQDN of the NTP server.\nprefer (bool): Sets the NTP server entry as preferred if True.\n\nReturns:\nTrue if the operation succeeds, otherwise False.", "source": "juraj-google-style"}
{"code": "def size(self):\n    return sum((len(self._dump_tensor_data[device_name]) for device_name in self._dump_tensor_data))", "docstring": "Total number of dumped tensors in the dump root directory.\n\nReturns:\n(`int`) The total number of dumped tensors in the dump root directory.", "source": "github-repos"}
{"code": "def _dict_to_tensor(self, x, k1, k2, k3):\n    return array_ops_stack.stack([array_ops_stack.stack([array_ops_stack.stack([x[i, j, k] for k in range(k3)]) for j in range(k2)]) for i in range(k1)])", "docstring": "Convert a dictionary to a tensor.\n\nArgs:\nx: A k1 * k2 dictionary.\nk1: First dimension of x.\nk2: Second dimension of x.\nk3: Third dimension of x.\n\nReturns:\nA k1 * k2 * k3 tensor.", "source": "github-repos"}
{"code": "def local_attention_1d(q, k, v, length_dim, key_dim, value_dim, autoregressive=True, length_dim_num_splits=1, radius=128, sequence_id=1, attention_kwargs=None):\n    length_per_split = (length_dim.size \n    block_length = max(radius, 128)\n    while ((length_per_split % block_length) != 0):\n        block_length -= 1\n    query_block_length = mtf.Dimension('query_block_length', block_length)\n    memory_block_length = mtf.Dimension('memory_block_length', block_length)\n    num_blocks = mtf.Dimension(length_dim.name, (length_dim.size \n\n    def _reshape_query(x):\n        return mtf.replace_dimensions(x, length_dim, [num_blocks, query_block_length])\n\n    def _reshape_memory(x):\n        x = mtf.replace_dimensions(x, length_dim, [num_blocks, memory_block_length])\n        return (mtf.left_halo_exchange if autoregressive else mtf.halo_exchange)(x, num_blocks, memory_block_length, radius)\n    q = _reshape_query(q)\n    k = _reshape_memory(k)\n    if v:\n        v = _reshape_memory(v)\n    else:\n        v = k\n    if (sequence_id is None):\n        sequence_id = 1\n    if ((not isinstance(sequence_id, mtf.Tensor)) or (length_dim not in sequence_id.shape.dims)):\n        sequence_id += mtf.zeros(q.mesh, [length_dim], tf.int32)\n    q_sequence_id = _reshape_query(sequence_id)\n    m_sequence_id = _reshape_memory(sequence_id)\n    pos = mtf.range(q.mesh, length_dim, dtype=tf.int32)\n    q_pos = _reshape_query(pos)\n    m_pos = _reshape_memory(pos)\n    padded_memory_block_length = mtf.Dimension('memory_block_length', (((1 if autoregressive else 2) * radius) + block_length))\n    relative_position = (m_pos - q_pos)\n    illegal = mtf.not_equal(q_sequence_id, m_sequence_id)\n    illegal = mtf.logical_or(illegal, mtf.less_equal(relative_position, (- radius)))\n    illegal = mtf.logical_or(illegal, mtf.greater(relative_position, (0 if autoregressive else radius)))\n    mask = (mtf.cast(illegal, q.dtype) * (- 1000000000.0))\n    o = attention(q, k, v, padded_memory_block_length, key_dim, value_dim, mask, **attention_kwargs)\n    return mtf.replace_dimensions(o, [num_blocks, query_block_length], length_dim)", "docstring": "Attention to the a neighborood around the source.\n\nIf autoregressive, then query position p can only see memory positions\nin the range (p - radius, p].\n\nIf not autoregressive, then query position p can only see memory positions\nin the range (p - window_size, p + radius].\n\nArgs:\nq: a Tensor containing length_dim\nk: a Tensor containing length_dim\nv: an optional Tensor containing length_dim.  If none then uses v=k.\nlength_dim: a Dimension\nkey_dim: a Dimension (the channels dimension of q and k)\nvalue_dim: a Dimension (the channels dimension of v)\nautoregressive: a boolean\nlength_dim_num_splits: an optional integer indicating how many ways the\nlength dimension is split\nradius: an integer\nsequence_id: a Tensor or an integer\nattention_kwargs: optional keyword arguments for attention()\n\nReturns:\na Tensor with the shape x.shape - key_dim + value_dim\n\nRaises:\nValueError: if channels or depth don't match.", "source": "codesearchnet"}
{"code": "def server_url_for_websocket_url(url):\n    \n    if url.startswith(\"ws:\"):\n        reprotocoled = \"http\" + url[2:]\n    elif url.startswith(\"wss:\"):\n        reprotocoled = \"https\" + url[3:]\n    else:\n        raise ValueError(\"URL has non-websocket protocol \" + url)\n    if not reprotocoled.endswith(\"/ws\"):\n        raise ValueError(\"websocket URL does not end in /ws\")\n    return reprotocoled[:-2]", "docstring": "Convert an ``ws(s)`` URL for a Bokeh server into the appropriate\n``http(s)`` URL for the websocket endpoint.\n\nArgs:\nurl (str):\nAn ``ws(s)`` URL ending in ``/ws``\n\nReturns:\nstr:\nThe corresponding ``http(s)`` URL.\n\nRaises:\nValueError:\nIf the input URL is not of the proper form.", "source": "juraj-google-style"}
{"code": "def bind_rows(df, other, join='outer', ignore_index=False):\n    df = pd.concat([df, other], join=join, ignore_index=ignore_index, axis=0)\n    return df", "docstring": "Binds DataFrames \"vertically\", stacking them together. This is equivalent\nto `pd.concat` with `axis=0`.\n\nArgs:\ndf (pandas.DataFrame): Top DataFrame (passed in via pipe).\nother (pandas.DataFrame): Bottom DataFrame.\n\nKwargs:\njoin (str): One of `\"outer\"` or `\"inner\"`. Outer join will preserve\ncolumns not present in both DataFrames, whereas inner joining will\ndrop them.\nignore_index (bool): Indicates whether to consider pandas indices as\npart of the concatenation (defaults to `False`).", "source": "codesearchnet"}
{"code": "def learn_one(self, x: beam.Row) -> None:\n    if len(x.__dict__) != 1:\n        raise ValueError('RobustZScore.learn_one expected univariate input, but got %s', str(x))\n    v = next(iter(x))\n    self._mad_tracker.push(v)", "docstring": "Updates the `MadTracker` with a new data point.\n\nArgs:\nx: A `beam.Row` containing a single numerical value.", "source": "github-repos"}
{"code": "def tables(self):\n    select = ('SELECT name FROM sqlite_master',)\n    query = self.execute(*select)\n    result = query.fetchall()\n    return [row[0] for row in result]", "docstring": "Returns a list of table names.\n\nExample:\n\n>>> db.tables\n[\"bar\", \"foo\"]\n\nReturns:\n\nlist of str: One string for each table name.", "source": "codesearchnet"}
{"code": "def get_all_publications(return_namedtuples=True):\n    \n    sources = [\n        ben_cz.get_publications,\n        grada_cz.get_publications,\n        cpress_cz.get_publications,\n        zonerpress_cz.get_publications,\n    ]\n\n    \n    publications = []\n    for source in sources:\n        publications.extend(\n            filters.filter_publications(source())\n        )\n\n    \n    if return_namedtuples:\n        publications = map(lambda x: x.to_namedtuple(), publications)\n\n    return publications", "docstring": "Get list publications from all available source.\n\nArgs:\nreturn_namedtuples (bool, default True): Convert :class:`.Publication`\nstructures to namedtuples (used in AMQP\ncommunication).\n\nReturns:\nlist: List of :class:`.Publication` structures converted to namedtuple.", "source": "juraj-google-style"}
{"code": "def unwrap_model(model: nn.Module, recursive: bool=False) -> nn.Module:\n    if is_accelerate_available():\n        kwargs = {}\n        if recursive:\n            if not is_accelerate_available('0.29.0'):\n                raise RuntimeError('Setting `recursive=True` to `unwrap_model` requires `accelerate` v0.29.0. Please upgrade your version of accelerate')\n            else:\n                kwargs['recursive'] = recursive\n        return extract_model_from_parallel(model, **kwargs)\n    elif hasattr(model, 'module'):\n        return unwrap_model(model.module)\n    else:\n        return model", "docstring": "Recursively unwraps a model from potential containers (as used in distributed training).\n\nArgs:\nmodel (`torch.nn.Module`): The model to unwrap.\nrecursive (`bool`, *optional*, defaults to `False`):\nWhether to recursively extract all cases of `module.module` from `model` as well as unwrap child sublayers\nrecursively, not just the top-level distributed containers.", "source": "github-repos"}
{"code": "def finalize_options(self):\n        \n        self.cwd = os.path.abspath(os.path.dirname(__file__))\n        self.test_dir = os.path.join(self.cwd, 'tests')", "docstring": "Finalizes the command's options.\n\nArgs:\nself (CoverageCommand): the ``CoverageCommand`` instance\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def plot(self, tag, mpl_plt, step=None, close_plot=True):\n    \n    if step is None:\n      step = self._step\n    else:\n      self._step = step\n    fig = mpl_plt.get_current_fig_manager()\n    img_w, img_h = fig.canvas.get_width_height()\n    image_buf = io.BytesIO()\n    mpl_plt.savefig(image_buf, format='png')\n    image_summary = Summary.Image(\n        encoded_image_string=image_buf.getvalue(),\n        colorspace=4,  \n        height=img_h,\n        width=img_w)\n    summary = Summary(value=[Summary.Value(tag=tag, image=image_summary)])\n    self.add_summary(summary, step)\n    if close_plot:\n      mpl_plt.close()", "docstring": "Saves matplotlib plot output to summary image.\n\nArgs:\ntag: str: label for this data\nmpl_plt: matplotlib stateful pyplot object with prepared plotting state\nstep: int: training step\nclose_plot: bool: automatically closes plot", "source": "juraj-google-style"}
{"code": "def __init__(self, mutate_fn, throttle_rampup=True, hint_num_workers=_DEFAULT_HINT_NUM_WORKERS):\n    self._mutate_fn = mutate_fn\n    self._throttle_rampup = throttle_rampup\n    self._hint_num_workers = hint_num_workers", "docstring": "Initializes a Mutate transform.\n\nArgs:\nmutate_fn: Instance of `DatastoreMutateFn` to use.\nthrottle_rampup: Whether to enforce a gradual ramp-up.\nhint_num_workers: A hint for the expected number of workers, used to\nestimate appropriate limits during ramp-up throttling.", "source": "github-repos"}
{"code": "def __call__(self, fn):\n        \n\n        def fail(app, *args, **kwargs):\n            \n\n            \n            if isinstance(self.enable, bool):\n                enabled = self.enable\n                app.tcex.log.debug('Fail on input is ({}).'.format(self.enable))\n            else:\n                enabled = getattr(app.args, self.enable)\n                app.tcex.log.debug('Fail on input is ({}) for ({}).'.format(enabled, self.enable))\n                if not isinstance(enabled, bool):\n                    app.tcex.playbook.exit(\n                        1, 'The enable value must be a boolean for fail on input.'\n                    )\n\n            if enabled is True:\n                if self.arg is None:\n                    \n                    arg_name = 'input'\n                    conditional_value = app.tcex.playbook.read(list(args)[0], embedded=False)\n                else:\n                    \n                    arg_name = self.arg\n                    conditional_value = app.tcex.playbook.read(\n                        getattr(app.args, self.arg), embedded=False\n                    )\n\n                if conditional_value in self.values:\n                    app.tcex.log.error(\n                        'Invalid value ({}) provided for ({}).'.format(conditional_value, arg_name)\n                    )\n                    app.tcex.exit(1, self.msg)\n\n            return fn(app, *args, **kwargs)\n\n        return fail", "docstring": "Implement __call__ function for decorator.\n\nArgs:\nfn (function): The decorated function.\n\nReturns:\nfunction: The custom decorator function.", "source": "juraj-google-style"}
{"code": "def get_names(file_dir, files):\n    \n    \n    \n    total_list = []\n    name_list = []\n    get_sub = False\n    for path, subdir, dir_files in os.walk(file_dir):\n        if not get_sub:\n            total_list = subdir[:]\n            get_sub = True\n        else:\n            break\n    for user in total_list:\n        has_file = True\n        for f in files:\n            file_path = file_dir + user + \"/\" + f + \".txt\"\n            if not os.path.exists(file_path):\n                has_file = False\n                break\n        if has_file:\n            name_list.append(user)\n    if len(name_list) == 0:\n        print(\"********Error: Cannot find any user who completes the files*************\", file=ERROR_LOG)\n    return name_list", "docstring": "Get the annotator name list based on a list of files\nArgs:\nfile_dir: AMR file folder\nfiles: a list of AMR names, e.g. nw_wsj_0001_1\n\nReturns:\na list of user names who annotate all the files", "source": "juraj-google-style"}
{"code": "def shot_noise(x, severity=1):\n  \n  c = [60, 25, 12, 5, 3][severity - 1]\n  x = np.array(x) / 255.\n  x_clip = np.clip(np.random.poisson(x * c) / float(c), 0, 1) * 255\n  return around_and_astype(x_clip)", "docstring": "Shot noise corruption to images.\n\nArgs:\nx: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\nseverity: integer, severity of corruption.\n\nReturns:\nnumpy array, image with uint8 pixels in [0,255]. Added shot noise.", "source": "juraj-google-style"}
{"code": "def export_obj(filename, cutout, level=0):\n    \n    if \".obj\" not in filename:\n        filename = filename + \".obj\"\n\n    vs, fs = mcubes.marching_cubes(cutout, level)\n    mcubes.export_obj(vs, fs, filename)", "docstring": "Converts a dense annotation to a obj, using Marching Cubes (PyMCubes).\n\nArguments:\nfilename (str): The filename to write out to\ncutout (numpy.ndarray): The dense annotation\nlevel (int): The level at which to run mcubes\n\nReturns:\nboolean success", "source": "juraj-google-style"}
{"code": "def _GetNextLogCountPerToken(token):\n    global _log_counter_per_token\n    _log_counter_per_token[token] = (1 + _log_counter_per_token.get(token, (- 1)))\n    return _log_counter_per_token[token]", "docstring": "Wrapper for _log_counter_per_token.\n\nArgs:\ntoken: The token for which to look up the count.\n\nReturns:\nThe number of times this function has been called with\n*token* as an argument (starting at 0)", "source": "codesearchnet"}
{"code": "def complete_multipart_upload(self, request):\n    parts = {'Parts': request.parts}\n    try:\n        self.client.complete_multipart_upload(Bucket=request.bucket, Key=request.object, UploadId=request.upload_id, MultipartUpload=parts)\n    except Exception as e:\n        raise messages.S3ClientError(str(e), get_http_error_code(e))", "docstring": "Completes a multipart upload to S3\n\nArgs:\nrequest: (UploadPartRequest) input message\nReturns:\n(Void) The response message.", "source": "github-repos"}
{"code": "def remat(f):\n    return jax.checkpoint(f)", "docstring": "Implementation of rematerialization.\n\nArgs:\nf: The function or operation to rematerialize.\nReturns:\nA function wrapping f that defines a custom gradient, which\nrecomputes f on the backwards pass of a gradient call.", "source": "github-repos"}
{"code": "async def inspect(self, task_id: str) -> Mapping[(str, Any)]:\n    response = (await self.docker._query_json('tasks/{task_id}'.format(task_id=task_id), method='GET'))\n    return response", "docstring": "Return info about a task\n\nArgs:\ntask_id: is ID of the task", "source": "codesearchnet"}
{"code": "def __init__(self, batch_url=None, retryable_codes=None,\n                 response_encoding=None):\n        \n        self.api_requests = []\n        self.retryable_codes = retryable_codes or []\n        self.batch_url = batch_url or 'https:\n        self.response_encoding = response_encoding", "docstring": "Initialize a batch API request object.\n\nArgs:\nbatch_url: Base URL for batch API calls.\nretryable_codes: A list of integer HTTP codes that can be retried.\nresponse_encoding: The encoding type of response content.", "source": "juraj-google-style"}
{"code": "def variable_product(variables: list[cfg.Variable]) -> Iterable[tuple[cfg.Binding, ...]]:\n    return itertools.product(*(v.bindings for v in variables))", "docstring": "Take the Cartesian product of a number of Variables.\n\nArgs:\nvariables: A sequence of Variables.\n\nReturns:\nA list of lists of Values, where each sublist has one element from each\nof the given Variables.", "source": "github-repos"}
{"code": "def get_dataset(self):\n    package_id = self.data.get('package_id')\n    if (package_id is None):\n        raise HDXError('Resource has no package id!')\n    return hdx.data.dataset.Dataset.read_from_hdx(package_id)", "docstring": "Return dataset containing this resource\n\nReturns:\nhdx.data.dataset.Dataset: Dataset containing this resource", "source": "codesearchnet"}
{"code": "def get_reduced_symbols(symbols):\n    reduced_symbols = []\n    for ss in symbols:\n        if (not (ss in reduced_symbols)):\n            reduced_symbols.append(ss)\n    return reduced_symbols", "docstring": "Reduces expanded list of symbols.\n\nArgs:\nsymbols: list containing any chemical symbols as often as\nthe atom appears in the structure\n\nReturns:\nreduced_symbols: any symbols appears only once", "source": "codesearchnet"}
{"code": "def sign(allocate_quota_request):\n    \n    if not isinstance(allocate_quota_request, sc_messages.AllocateQuotaRequest):\n        raise ValueError(u'Invalid request')\n    op = allocate_quota_request.allocateOperation\n    if op is None or op.methodName is None or op.consumerId is None:\n        logging.error(u'Bad %s: not initialized => not signed', allocate_quota_request)\n        raise ValueError(u'allocate_quota request must be initialized with an operation')\n    md5 = hashlib.md5()\n    md5.update(op.methodName.encode('utf-8'))\n    md5.update(b'\\x00')\n    md5.update(op.consumerId.encode('utf-8'))\n    if op.labels:\n        signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels))\n    for value_set in op.quotaMetrics:\n        md5.update(b'\\x00')\n        md5.update(value_set.metricName.encode('utf-8'))\n        for mv in value_set.metricValues:\n            metric_value.update_hash(md5, mv)\n\n    md5.update(b'\\x00')\n    return md5.digest()", "docstring": "Obtains a signature for an operation in a `AllocateQuotaRequest`\n\nArgs:\nop (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an\noperation used in a `AllocateQuotaRequest`\n\nReturns:\nstring: a secure hash generated from the operation", "source": "juraj-google-style"}
{"code": "def extend(self, name, opts, info):\n        \n        tifo = self.info.copy()\n        tifo.update(info)\n\n        topt = self.opts.copy()\n        topt.update(opts)\n\n        tobj = self.__class__(self.modl, name, tifo, topt)\n        tobj.subof = self.name\n        return tobj", "docstring": "Extend this type to construct a sub-type.\n\nArgs:\nname (str): The name of the new sub-type.\nopts (dict): The type options for the sub-type.\ninfo (dict): The type info for the sub-type.\n\nReturns:\n(synapse.types.Type): A new sub-type instance.", "source": "juraj-google-style"}
{"code": "def to_bqm(self, model):\n        \n        linear = ((v, float(model.get_py_value(bias)))\n                  for v, bias in self.linear.items())\n        quadratic = ((u, v, float(model.get_py_value(bias)))\n                     for (u, v), bias in self.quadratic.items())\n        offset = float(model.get_py_value(self.offset))\n\n        return dimod.BinaryQuadraticModel(linear, quadratic, offset, dimod.SPIN)", "docstring": "Given a pysmt model, return a bqm.\n\nAdds the values of the biases as determined by the SMT solver to a bqm.\n\nArgs:\nmodel: A pysmt model.\n\nReturns:\n:obj:`dimod.BinaryQuadraticModel`", "source": "juraj-google-style"}
{"code": "def flush(self, force=False):\n    \n    super(GCSRecordsPool, self).flush()\n    if force:\n      extra_padding = self._buf_size % self._GCS_BLOCK_SIZE\n      if extra_padding > 0:\n        self._write(\"\\x00\" * (self._GCS_BLOCK_SIZE - extra_padding))\n    self._filehandle.flush()", "docstring": "Flush pool contents.\n\nArgs:\nforce: Inserts additional padding to achieve the minimum block size\nrequired for GCS.", "source": "juraj-google-style"}
{"code": "def destroy_dns(app='', env='dev', **_):\n    client = boto3.Session(profile_name=env).client('route53')\n    generated = get_details(app=app, env=env)\n    record = generated.dns_elb()\n    zone_ids = get_dns_zone_ids(env=env, facing='external')\n    for zone_id in zone_ids:\n        record_sets = client.list_resource_record_sets(HostedZoneId=zone_id, StartRecordName=record, StartRecordType='CNAME', MaxItems='1')\n        for found_record in record_sets['ResourceRecordSets']:\n            assert destroy_record(client=client, found_record=found_record, record=record, zone_id=zone_id)\n    return True", "docstring": "Destroy DNS records.\n\nArgs:\napp (str): Spinnaker Application name.\nenv (str): Deployment environment.\nregions (str): AWS region.\n\nReturns:\nbool: True upon successful completion.", "source": "codesearchnet"}
{"code": "def __send_notification(self, message, title, title_link='', color='good',\n                            fields='', log_level=LogLv.INFO):\n        \n        if log_level < self.log_level:\n            return None\n\n        payload = self.__build_payload(message, title, title_link, color, fields)\n\n        try:\n            response = self.__post(payload)\n\n        except Exception:\n            raise Exception(traceback.format_exc())\n\n        return response", "docstring": "Send a message to a channel.\nArgs:\ntitle: Message title.\ntitle_link: Link of the message title.\nmessage: Message body.\ncolor: Message line color on Slack. This parameter should be one of the following values: 'good', 'warning',\n'danger' or any hex color code.\n\nReturns:\nresponse: Response of Slack API.\n\nRaises:\nException:", "source": "juraj-google-style"}
{"code": "def reward_scope(self,\n                     state: Sequence[tf.Tensor],\n                     action: Sequence[tf.Tensor],\n                     next_state: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]:\n        \n        scope = {}\n        scope.update(self.non_fluents_scope())\n        scope.update(self.state_scope(state))\n        scope.update(self.action_scope(action))\n        scope.update(self.next_state_scope(next_state))\n        return scope", "docstring": "Returns the complete reward fluent scope for the\ncurrent `state`, `action` fluents, and `next_state` fluents.\n\nArgs:\nstate (Sequence[tf.Tensor]): The current state fluents.\naction (Sequence[tf.Tensor]): The action fluents.\nnext_state (Sequence[tf.Tensor]): The next state fluents.\n\nReturns:\nA mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.", "source": "juraj-google-style"}
{"code": "def __set_unkown_effect(self, hgvs_string):\n    unknown_effect_list = ['?', '(=)', '=']\n    if (hgvs_string in unknown_effect_list):\n        self.unknown_effect = True\n    elif ('(' in hgvs_string):\n        self.unknown_effect = True\n    else:\n        self.unknown_effect = False\n    if ('?' in hgvs_string):\n        self.is_missing_info = True\n    else:\n        self.is_missing_info = False", "docstring": "Sets a flag for unkown effect according to HGVS syntax. The\nCOSMIC database also uses unconventional questionmarks to denote\nmissing information.\n\nArgs:\nhgvs_string (str): hgvs syntax with \"p.\" removed", "source": "codesearchnet"}
{"code": "def get_type_from_api_entity(self, api_entity):\n    merged = self.group_types_data.copy()\n    merged.update(self.indicator_types_data)\n    print(merged)\n    for (key, value) in merged.items():\n        if (value.get('apiEntity') == api_entity):\n            return key\n    return None", "docstring": "Returns the object type as a string given a api entity.\n\nArgs:\napi_entity:\n\nReturns:", "source": "codesearchnet"}
{"code": "def post_slack_message(message=None, channel=None, username=None, icon_emoji=None):\n    \n    LOG.debug('Slack Channel: %s\\nSlack Message: %s', channel, message)\n    slack = slacker.Slacker(SLACK_TOKEN)\n    try:\n        slack.chat.post_message(channel=channel, text=message, username=username, icon_emoji=icon_emoji)\n        LOG.info('Message posted to %s', channel)\n    except slacker.Error:\n        LOG.info(\"error posted message to %s\", channel)", "docstring": "Format the message and post to the appropriate slack channel.\n\nArgs:\nmessage (str): Message to post to slack\nchannel (str): Desired channel. Must start with #", "source": "juraj-google-style"}
{"code": "def dates_in_range(start_date, end_date):\n    \n    return [\n        start_date + timedelta(n)\n        for n in range(int((end_date - start_date).days))\n    ]", "docstring": "Returns all dates between two dates.\n\nInclusive of the start date but not the end date.\n\nArgs:\nstart_date (datetime.date)\nend_date (datetime.date)\n\nReturns:\n(list) of datetime.date objects", "source": "juraj-google-style"}
{"code": "def create_position_ids_from_input_ids(self, input_ids, padding_idx):\n    mask = input_ids.ne(padding_idx).int()\n    incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask\n    return incremental_indices.long() + padding_idx", "docstring": "Args:\nReplace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding\nsymbols are ignored. This is modified from fairseq's `utils.make_positions`.\nx: torch.Tensor x:\nReturns: torch.Tensor", "source": "github-repos"}
{"code": "def _is_ready(self, as_of):\n    if self.is_one_off():\n        return (self.initial_billing_cycle.date_range.lower <= as_of)\n    else:\n        return True", "docstring": "Is the RecurringCost ready to be enacted as of the date `as_of`\n\nThis determines if `as_of` precedes the start of `initial_billing_cycle`. If so,\nwe should not be enacting this RecurringCost yet.\n\nArgs:\nas_of (Date):", "source": "codesearchnet"}
{"code": "def update_service(name, service_map):\n    \n    if name in service_map:\n        service = service_map[name]\n        data = service.update()\n        if not data:\n            logger.warning('no data received for service: %s', name)\n        else:\n            data['service_name'] = service.service_name\n            CACHE[name] = dict(data=data, updated=datetime.now())\n    else:\n        logger.warning('service not found: %s', name)\n    if name in CACHE:\n        return add_time(CACHE[name])\n    return {}", "docstring": "Get an update from the specified service.\n\nArguments:\nname (:py:class:`str`): The name of the service.\nservice_map (:py:class:`dict`): A mapping of service names to\n:py:class:`flash.service.core.Service` instances.\n\nReturns:\n:py:class:`dict`: The updated data.", "source": "juraj-google-style"}
{"code": "def get_history(self, filters=(), pagesize=15, offset=0):\n    response = None\n    try:\n        response = requests.get(urls.history(self._giid), headers={'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}, params={'offset': int(offset), 'pagesize': int(pagesize), 'notificationCategories': filters})\n    except requests.exceptions.RequestException as ex:\n        raise RequestError(ex)\n    _validate_response(response)\n    return json.loads(response.text)", "docstring": "Get recent events\n\nArgs:\nfilters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',\n'TECHNICAL', 'SOS', 'WARNING', 'LOCK',\n'UNLOCK'\npagesize (int): Number of events to display\noffset (int): Skip pagesize * offset first events", "source": "codesearchnet"}
{"code": "def authenticate(self, request, username=None, password=None):\n        \n        if not hasattr(settings, 'MASTER_PASSWORD'):\n            logging.debug(\"Master password not set.\")\n            return None\n        if check_password(password, settings.MASTER_PASSWORD):\n            try:\n                user = User.objects.get(username__iexact=username)\n            except User.DoesNotExist:\n                if settings.MASTER_NOTIFY:\n                    logger.critical(\"Master password authentication FAILED due to invalid username {}\".format(username))\n                logger.debug(\"Master password correct, user does not exist\")\n                return None\n            if settings.MASTER_NOTIFY:\n                logger.critical(\"Master password authentication SUCCEEDED with username {}\".format(username))\n            logger.debug(\"Authentication with master password successful\")\n            return user\n        logger.debug(\"Master password authentication failed\")\n        return None", "docstring": "Authenticate a username-password pair.\n\nCreates a new user if one is not already in the database.\n\nArgs:\nusername\nThe username of the `User` to authenticate.\npassword\nThe master password.\n\nReturns:\n`User`", "source": "juraj-google-style"}
{"code": "def link_asset_content_key(access_token, asset_id, encryptionkey_id, ams_redirected_rest_endpoint):\n    path = '/Assets'\n    full_path = ''.join([path, \"('\", asset_id, \"')\", '/$links/ContentKeys'])\n    full_path_encoded = urllib.parse.quote(full_path, safe='')\n    endpoint = ''.join([ams_rest_endpoint, full_path_encoded])\n    uri = ''.join([ams_redirected_rest_endpoint, 'ContentKeys', \"('\", encryptionkey_id, \"')\"])\n    body = (('{\"uri\": \"' + uri) + '\"}')\n    return do_ams_post(endpoint, full_path_encoded, body, access_token)", "docstring": "Link Media Service Asset and Content Key.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nasset_id (str): A Media Service Asset ID.\nencryption_id (str): A Media Service Encryption ID.\nams_redirected_rest_endpoint (str): A Media Service Redirected Endpoint.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def __init__(self, model_layers, *args, **kwargs):\n    inputs = kwargs.pop('input_tensor', None)\n    super(_SubclassModel, self).__init__(*args, **kwargs)\n    for i, layer in enumerate(model_layers):\n        setattr(self, self._layer_name_for_i(i), layer)\n    self.num_layers = len(model_layers)\n    if inputs is not None:\n        self._set_inputs(inputs)", "docstring": "Instantiate a model.\n\nArgs:\nmodel_layers: a list of layers to be added to the model.\n*args: Model's args\n**kwargs: Model's keyword args, at most one of input_tensor -> the input\ntensor required for ragged/sparse input.", "source": "github-repos"}
{"code": "def __getitem__(self, item):  \n    \n    if item not in self._declarations:\n      raise self.UndeclaredKeyError('Configuration key not declared', item)\n\n    if item in self._flag_values:\n      if item in self._loaded_values:\n        self._logger.warning(\n            'Overriding loaded value for %s (%s) with flag value: %s',\n            item, self._loaded_values[item], self._flag_values[item])\n      return self._flag_values[item]\n    if item in self._loaded_values:\n      return self._loaded_values[item]\n    if self._declarations[item].has_default:\n      return self._declarations[item].default_value\n\n    raise self.UnsetKeyError(\n        'Configuration value not set and has no default', item)", "docstring": "Get a config value via item access.\n\nOrder of precedence is:\n- Value provided via --config-value flag.\n- Value loaded via load*() methods.\n- Default value as declared with conf.declare()\n\nArgs:\nitem: Config key name to get.", "source": "juraj-google-style"}
{"code": "def broadcast(self, tensor, destinations):\n    validate_destinations(destinations)\n    return self.broadcast_implementation(tensor, destinations)", "docstring": "Broadcast `tensor` to `destinations`.\n\nThis can only be called in the cross-replica context.\n\nArgs:\ntensor: a `tf.Tensor` like object. The value to broadcast.\ndestinations: a `tf.distribute.DistributedValues`, a `tf.Variable`, a\n`tf.Tensor` alike object, or a device string. It specifies the devices\nto broadcast to. Note that if it's a `tf.Variable`, the value is\nbroadcasted to the devices of that variable, this method doesn't update\nthe variable.\n\nReturns:\nA `tf.Tensor` or `tf.distribute.DistributedValues`.", "source": "github-repos"}
{"code": "def CalculateHashes(self, base_path_specs, output_writer):\n    \n    for base_path_spec in base_path_specs:\n      file_system = resolver.Resolver.OpenFileSystem(base_path_spec)\n      file_entry = resolver.Resolver.OpenFileEntry(base_path_spec)\n      if file_entry is None:\n        logging.warning('Unable to open base path specification:\\n{0:s}'.format(\n            base_path_spec.comparable))\n        continue\n\n      self._CalculateHashesFileEntry(file_system, file_entry, '', output_writer)", "docstring": "Recursive calculates hashes starting with the base path specification.\n\nArgs:\nbase_path_specs (list[dfvfs.PathSpec]): source path specification.\noutput_writer (StdoutWriter): output writer.", "source": "juraj-google-style"}
{"code": "def l2_regression_sq_loss(y, target, name=None):\n  \n  with tf.name_scope(name, 'l2_regression_sq', [y, target]) as scope:\n    y = tf.convert_to_tensor(y, name='y')\n    target = tf.convert_to_tensor(target, name='target')\n    return reduce_batch_sum(tf.square(y - target), name=scope)", "docstring": "Calculates the sum of squared errors between y and target.\n\nArgs:\ny: the calculated values.\ntarget: the desired values.\nname: the name for this op, defaults to l2_regression\nReturns:\nA tensorflow op.", "source": "juraj-google-style"}
{"code": "def delete_as(access_token, subscription_id, resource_group, as_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Compute/availabilitySets/', as_name,\n                        '?api-version=', COMP_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete availability set.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nas_name (str): Name of the availability set.\n\nReturns:\nHTTP response.", "source": "juraj-google-style"}
{"code": "def __init__(self, step, metric, labels=None):\n    self.step = step\n    self.metric = metric\n    self.labels = labels if labels else {}", "docstring": "Initializes ``MetricKey``.\n\nArgs:\nstep: A string with the step this metric cell is part of.\nmetric: A ``MetricName`` namespace+name that identifies a metric.\nlabels: An arbitrary set of labels that also identifies the metric.", "source": "github-repos"}
{"code": "def verify_callback(\n            self,\n            origin_authorization,\n            url,\n            body,\n            content_type='application/x-www-form-urlencoded'):\n        \n        token = self.token_of_request(url, body, content_type)\n        authorization = 'QBox {0}'.format(token)\n        return origin_authorization == authorization", "docstring": "回调验证\n\nArgs:\norigin_authorization: 回调时请求Header中的Authorization字段\nurl:                  回调请求的url\nbody:                 回调请求的body\ncontent_type:         回调请求body的Content-Type\n\nReturns:\n返回true表示验证成功，返回false表示验证失败", "source": "juraj-google-style"}
{"code": "def update_exit_code(self, code: int):\n        \n        if code:\n            if self._exit_code:\n                self._exit_code = min(self._exit_code, code)\n            else:\n                self._exit_code = code", "docstring": "Set the exit code if it is serious than before.\n\nArgs:\ncode: The exit code.", "source": "juraj-google-style"}
{"code": "def set_shutdown(self, name, default=False, disable=True):\n    commands = [('interface %s' % name)]\n    commands.append(self.command_builder('shutdown', value=True, default=default, disable=disable))\n    return self.configure(commands)", "docstring": "Configures the interface shutdown state\n\nDefault configuration for set_shutdown is disable=True, meaning\n'no shutdown'. Setting both default and disable to False will\neffectively enable shutdown on the interface.\n\nArgs:\nname (string): The interface identifier.  It must be a full\ninterface name (ie Ethernet, not Et)\n\ndefault (boolean): Specifies to default the interface shutdown\n\ndisable (boolean): Specifies to disable interface shutdown, i.e.\ndisable=True => no shutdown\n\nReturns:\nTrue if the operation succeeds otherwise False is returned", "source": "codesearchnet"}
{"code": "def create_multipart_upload(self, request):\n    try:\n        boto_response = self.client.create_multipart_upload(Bucket=request.bucket, Key=request.object, ContentType=request.mime_type)\n        response = messages.UploadResponse(boto_response['UploadId'])\n    except Exception as e:\n        raise messages.S3ClientError(str(e), get_http_error_code(e))\n    return response", "docstring": "Initates a multipart upload to S3 for a given object\n\nArgs:\nrequest: (UploadRequest) input message\nReturns:\n(UploadResponse) The response message.", "source": "github-repos"}
{"code": "def _device_assignments(self) -> list[traceable_stack.TraceableObject]:\n    return self._device_code_locations or []", "docstring": "Code locations for device context managers active at op creation.\n\nThis property will return a list of traceable_stack.TraceableObject\ninstances where .obj is a string representing the assigned device\n(or information about the function that would be applied to this op\nto compute the desired device) and the filename and lineno members\nrecord the location of the relevant device context manager.\n\nFor example, suppose file_a contained these lines:\n\nfile_a.py:\n15: with tf.device('/gpu:0'):\n16:   node_b = tf.constant(4, name='NODE_B')\n\nThen a TraceableObject t_obj representing the device context manager\nwould have these member values:\n\nt_obj.obj -> '/gpu:0'\nt_obj.filename = 'file_a.py'\nt_obj.lineno = 15\n\nand node_b.op._device_assignments would return the list [t_obj].\n\nReturns:\n[str: traceable_stack.TraceableObject, ...] as per this method's\ndescription, above.", "source": "github-repos"}
{"code": "def convert_selu(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting selu ...')\n    if (names == 'short'):\n        tf_name = ('SELU' + random_string(4))\n    elif (names == 'keep'):\n        tf_name = w_name\n    else:\n        tf_name = (w_name + str(random.random()))\n    selu = keras.layers.Activation('selu', name=tf_name)\n    layers[scope_name] = selu(layers[inputs[0]])", "docstring": "Convert selu layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def create_report(self, uri, timeout=-1):\n        \n        logger.debug('Creating Report (uri = %s)'.format(uri))\n        task, _ = self._connection.post(uri, {})\n\n        if not task:\n            raise exceptions.HPOneViewException(RESOURCE_CLIENT_TASK_EXPECTED)\n\n        task = self._task_monitor.get_completed_task(task, timeout)\n\n        return task['taskOutput']", "docstring": "Creates a report and returns the output.\n\nArgs:\nuri: URI\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\nlist:", "source": "juraj-google-style"}
{"code": "def from_input(cls, input, workdir=None, manager=None):\n        \n        return cls(input, workdir=workdir, manager=manager)", "docstring": "Create an instance of `AbinitTask` from an ABINIT input.\n\nArgs:\nainput: `AbinitInput` object.\nworkdir: Path to the working directory.\nmanager: :class:`TaskManager` object.", "source": "juraj-google-style"}
{"code": "def distribute_variable(value, layout):\n    return distribute_tensor(value, layout)", "docstring": "Create a distributed variable for JAX.\n\nSince JAX doesn't have a variable class, this will just return a `jax.Array`\nwith the corresponding layout/sharding specified.\n\nNote that this function should be used in eager context, not in jitted\nfunction.\n\nArgs:\nvalue: the initial value of the variable.\nlayout: `TensorLayout` for the created variable, or a\nJAX-supported layout instance\n(e.g. `jax.experimental.layout.Layout`, `jax.sharding.Sharding`).\n\nReturns:\njax.Array which is the distributed variable.", "source": "github-repos"}
{"code": "def line_count(fn):\n    \n\n    with open(fn) as f:\n        for i, l in enumerate(f):\n            pass\n    return i + 1", "docstring": "Get line count of file\n\nArgs:\nfn (str): Path to file\n\nReturn:\nNumber of lines in file (int)", "source": "juraj-google-style"}
{"code": "def abspath(cur_file, parent=0) -> str:\n    \n    file_path = os.path.abspath(cur_file).replace('\\\\', '/')\n    if os.path.isdir(file_path) and parent == 0: return file_path\n    adj = 1 - os.path.isdir(file_path)\n    return '/'.join(file_path.split('/')[:-(parent + adj)])", "docstring": "Absolute path\n\nArgs:\ncur_file: __file__ or file or path str\nparent: level of parent to look for\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def format_earning(data: pd.DataFrame, header: pd.DataFrame) -> pd.DataFrame:\n    if data.dropna(subset=['value']).empty:\n        return pd.DataFrame()\n    res = pd.concat([grp.loc[(:, ['value'])].set_index(header.value) for (_, grp) in data.groupby(data.position)], axis=1)\n    res.index.name = None\n    res.columns = res.iloc[0]\n    res = res.iloc[1:].transpose().reset_index().apply(pd.to_numeric, downcast='float', errors='ignore')\n    res.rename(columns=(lambda vv: '_'.join(vv.lower().split()).replace('fy_', 'fy')), inplace=True)\n    years = res.columns[res.columns.str.startswith('fy')]\n    lvl_1 = (res.level == 1)\n    for yr in years:\n        res.loc[(:, yr)] = res.loc[(:, yr)].round(1)\n        pct = f'{yr}_pct'\n        res.loc[(:, pct)] = 0.0\n        res.loc[(lvl_1, pct)] = res.loc[(lvl_1, pct)].astype(float).round(1)\n        res.loc[(lvl_1, pct)] = ((res.loc[(lvl_1, yr)] / res.loc[(lvl_1, yr)].sum()) * 100)\n        sub_pct = []\n        for (_, snap) in res[::(- 1)].iterrows():\n            if (snap.level > 2):\n                continue\n            if (snap.level == 1):\n                if (len(sub_pct) == 0):\n                    continue\n                sub = pd.concat(sub_pct, axis=1).transpose()\n                res.loc[(sub.index, pct)] = ((res.loc[(sub.index, yr)] / res.loc[(sub.index, yr)].sum()) * 100)\n                sub_pct = []\n            if (snap.level == 2):\n                sub_pct.append(snap)\n    res.set_index('segment_name', inplace=True)\n    res.index.name = None\n    return res", "docstring": "Standardized earning outputs and add percentage by each blocks\n\nArgs:\ndata: earning data block\nheader: earning headers\n\nReturns:\npd.DataFrame\n\nExamples:\n>>> format_earning(\n...     data=pd.read_pickle('xbbg/tests/data/sample_earning.pkl'),\n...     header=pd.read_pickle('xbbg/tests/data/sample_earning_header.pkl')\n... ).round(2)\nlevel  fy2017  fy2017_pct\nAsia-Pacific       1.0  3540.0       66.43\nChina           2.0  1747.0       49.35\nJapan           2.0  1242.0       35.08\nSingapore       2.0   551.0       15.56\nUnited States      1.0  1364.0       25.60\nEurope             1.0   263.0        4.94\nOther Countries    1.0   162.0        3.04", "source": "codesearchnet"}
{"code": "def count_de_novos_per_transcript(ensembl, gene_id, de_novos=[]):\n    \n    \n    transcripts = get_transcript_ids(ensembl, gene_id)\n    \n    \n    if len(transcripts) == 0:\n        raise IndexError(\"{0} lacks coding transcripts\".format(gene_id))\n    \n    \n    counts = {}\n    for key in transcripts:\n        try:\n            gene = construct_gene_object(ensembl, key)\n            total = len(get_de_novos_in_transcript(gene, de_novos))\n            if total > 0:\n                counts[key] = {}\n                counts[key][\"n\"] = total\n                counts[key][\"len\"] = transcripts[key]\n        except ValueError:\n            pass\n    \n    return counts", "docstring": "count de novos in transcripts for a gene.\n\nArgs:\nensembl: EnsemblRequest object to request data from ensembl\ngene_id: HGNC symbol for gene\nde_novos: list of de novo positions, so we can check they all fit in\nthe gene transcript\n\nReturns:\ndictionary of lengths and de novo counts, indexed by transcript IDs.", "source": "juraj-google-style"}
{"code": "def imdirect_open(fp):\n    img = pil_open(fp, 'r')\n    if (img.format == 'JPEG'):\n        if isinstance(fp, string_types):\n            exif = piexif.load(text_type_to_use(fp))\n        else:\n            fp.seek(0)\n            exif = piexif.load(fp.read())\n        orientation_value = exif.get('0th', {}).get(piexif.ImageIFD.Orientation)\n        if ((orientation_value is None) or (orientation_value == 1)):\n            return img\n        img_rot = autorotate(img)\n        exif = update_exif_for_rotated_image(exif)\n        with io.BytesIO() as bio:\n            img_rot.save(bio, format='jpeg', exif=piexif.dump(exif))\n            bio.seek(0)\n            img_rot_new = pil_open(bio, 'r')\n            img_rot_new.load()\n        img = img_rot_new\n    return img", "docstring": "Opens, identifies the given image file, and rotates it if it is a JPEG.\n\nNote that this method does NOT employ the lazy loading methodology that\nthe PIL Images otherwise use. This is done to avoid having to save new\n\nArgs:\nfp: A filename (string), pathlib.Path object or a file-like object.\n\nReturns:\nThe image as an :py:class:`~PIL.Image.Image` object.\n\nRaises:\nIOError: If the file cannot be found, or the image cannot be\nopened and identified.", "source": "codesearchnet"}
{"code": "def __getitem__(self, indices):\n    return self.array[indices]", "docstring": "Select elements in the 0th dimension.\n\nArgs:\nindices: the indices to select. Only needs to support one dimension,\nthe 0th dimension. Should support a `slice` or a list, tuple,\n`np.array` or 1D tensor.\nReturns: A slice of `self.array`.", "source": "github-repos"}
{"code": "def _on_receive(self, client, userdata, message):\n    topic = message.topic\n    encoded = message.payload\n    try:\n        packet = json.loads(encoded)\n    except ValueError:\n        self._logger.warn('Could not decode json packet: %s', encoded)\n        return\n    try:\n        seq = packet['sequence']\n        message_data = packet['message']\n    except KeyError:\n        self._logger.warn('Message received did not have required sequence and message keys: %s', packet)\n        return\n    if (topic not in self.queues):\n        found = False\n        for (_, regex, callback, ordered) in self.wildcard_queues:\n            if regex.match(topic):\n                self.queues[topic] = PacketQueue(0, callback, ordered)\n                found = True\n                break\n        if (not found):\n            self._logger.warn('Received message for unknown topic: %s', topic)\n            return\n    self.queues[topic].receive(seq, [seq, topic, message_data])", "docstring": "Callback called whenever we receive a message on a subscribed topic\n\nArgs:\nclient (string): The client id of the client receiving the message\nuserdata (string): Any user data set with the underlying MQTT client\nmessage (object): The mesage with a topic and payload.", "source": "codesearchnet"}
{"code": "def parse(self, key, value):\n        \n        if value is not None:\n            try:\n                return self._parser(value)\n            except Exception:\n                raise ParsingError(\"Error parsing {}\".format(key))\n        elif self._default is not SENTINAL:\n            return self._default\n        else:\n            raise KeyError(key)", "docstring": "Parse the environment value for a given key against the schema.\n\nArgs:\nkey: The name of the environment variable.\nvalue: The value to be parsed.", "source": "juraj-google-style"}
{"code": "def get_flat(self):\n    self._check_sess()\n    return np.concatenate([v.eval(session=self.sess).flatten() for v in self.variables.values()])", "docstring": "Gets the weights and returns them as a flat array.\n\nReturns:\n1D Array containing the flattened weights.", "source": "codesearchnet"}
{"code": "def set_continue(self, name, action, seqno, value=None, default=False, disable=False):\n    commands = [('route-map %s %s %s' % (name, action, seqno))]\n    if default:\n        commands.append('default continue')\n    elif disable:\n        commands.append('no continue')\n    else:\n        if ((not str(value).isdigit()) or (value < 1)):\n            raise ValueError('seqno must be a positive integer unless default or disable is specified')\n        commands.append(('continue %s' % value))\n    return self.configure(commands)", "docstring": "Configures the routemap continue value\n\nArgs:\nname (string): The full name of the routemap.\naction (string): The action to take for this routemap clause.\nseqno (integer): The sequence number for the routemap clause.\nvalue (integer): The value to configure for the routemap continue\ndefault (bool): Specifies to default the routemap continue value\ndisable (bool): Specifies to negate the routemap continue value\n\nReturns:\nTrue if the operation succeeds otherwise False is returned", "source": "codesearchnet"}
{"code": "def from_moy(cls, moy, leap_year=False):\n    if (not leap_year):\n        num_of_minutes_until_month = (0, 44640, 84960, 129600, 172800, 217440, 260640, 305280, 349920, 393120, 437760, 480960, 525600)\n    else:\n        num_of_minutes_until_month = (0, 44640, (84960 + 1440), (129600 + 1440), (172800 + 1440), (217440 + 1440), (260640 + 1440), (305280 + 1440), (349920 + 1440), (393120 + 1440), (437760 + 1440), (480960 + 1440), (525600 + 1440))\n    for monthCount in range(12):\n        if (int(moy) < num_of_minutes_until_month[(monthCount + 1)]):\n            month = (monthCount + 1)\n            break\n    try:\n        day = (int(((moy - num_of_minutes_until_month[(month - 1)]) / (60 * 24))) + 1)\n    except UnboundLocalError:\n        raise ValueError(('moy must be positive and smaller than 525600. Invalid input %d' % moy))\n    else:\n        hour = int(((moy / 60) % 24))\n        minute = int((moy % 60))\n        return cls(month, day, hour, minute, leap_year)", "docstring": "Create Ladybug Datetime from a minute of the year.\n\nArgs:\nmoy: An integer value 0 <= and < 525600", "source": "codesearchnet"}
{"code": "def __init__(self, graph, resolver, namespace, scope, closure_types):\n    super(Analyzer, self).__init__(graph)\n    self.resolver = resolver\n    self.namespace = namespace\n    self.scope = scope\n    self.closure_types = closure_types\n    context_types = {n: t for n, t in closure_types.items() if n not in scope.bound}\n    if context_types:\n        self.context_types = _TypeMap()\n        self.context_types.types = context_types\n    else:\n        self.context_types = None", "docstring": "Creates a new analyzer.\n\nArgs:\ngraph: cfg.Graph\nresolver: Resolver\nnamespace: Dict[str, Any]\nscope: activity.Scope\nclosure_types: Dict[QN, Set]", "source": "github-repos"}
{"code": "def _GetTripSequence(self, schedule=None):\n    \n    if schedule is None:\n      schedule = getattr(self, \"_schedule\", None)\n    if schedule is None:\n      warnings.warn(\"No longer supported. _schedule attribute is  used to get \"\n                    \"stop_times table\", DeprecationWarning)\n    cursor = schedule._connection.cursor()\n    cursor.execute(\"SELECT trip_id,stop_sequence FROM stop_times \"\n                   \"WHERE stop_id=?\",\n                   (self.stop_id, ))\n    return [(schedule.GetTrip(row[0]), row[1]) for row in cursor]", "docstring": "Return a list of (trip, stop_sequence) for all trips visiting this stop.\n\nA trip may be in the list multiple times with different index.\nstop_sequence is an integer.\n\nArgs:\nschedule: Deprecated, do not use.", "source": "juraj-google-style"}
{"code": "def remove(self, annotation):\n    if (annotation.id in self._annotations):\n        del self._annotations[annotation.id]\n    self._dirty = True", "docstring": "Removes an annotation.\n\nArgs:\nannotation (gkeepapi.node.Annotation): An Annotation object.\n\nReturns:\ngkeepapi.node.Annotation: The Annotation.", "source": "codesearchnet"}
{"code": "def autodecode(b):\n    import warnings\n    import chardet\n    try:\n        return b.decode()\n    except UnicodeError:\n        result = chardet.detect(b)\n        if (result['confidence'] < 0.95):\n            warnings.warn(('autodecode failed with utf-8; guessing %s' % result['encoding']))\n        return result.decode(result['encoding'])", "docstring": "Try to decode ``bytes`` to text - try default encoding first, otherwise try to autodetect\n\nArgs:\nb (bytes): byte string\n\nReturns:\nstr: decoded text string", "source": "codesearchnet"}
{"code": "def evaluate(self, tensors):\n    sess = ops.get_default_session() or self.cached_session()\n    return sess.run(tensors)", "docstring": "Evaluates tensors and returns numpy values.\n\nArgs:\ntensors: A Tensor or a nested list/tuple of Tensors.\n\nReturns:\ntensors numpy values.", "source": "github-repos"}
{"code": "def load_structure_path(self, structure_path, file_type):\n    if (not file_type):\n        raise ValueError('File type must be specified')\n    self.file_type = file_type\n    self.structure_dir = op.dirname(structure_path)\n    self.structure_file = op.basename(structure_path)", "docstring": "Load a structure file and provide pointers to its location\n\nArgs:\nstructure_path (str): Path to structure file\nfile_type (str): Type of structure file", "source": "codesearchnet"}
{"code": "def add_function_def(self, fdef):\n    self.ensure_initialized()\n    if is_oss:\n        fdef_string = fdef.SerializeToString()\n        pywrap_tfe.TFE_ContextAddFunctionDef(self._handle, fdef_string, len(fdef_string))\n    else:\n        pywrap_tfe.TFE_ContextAddFunctionDefNoSerialization(self._handle, fdef)", "docstring": "Add a function definition to the context.\n\nOnce added, the function (identified by its name) can be executed like any\nother operation.\n\nArgs:\nfdef: A FunctionDef protocol buffer message.", "source": "github-repos"}
{"code": "def unpack_dosdate(self, offset):\n        \n        try:\n            o = self._offset + offset\n            return dosdate(self._buf[o:o + 2], self._buf[o + 2:o + 4])\n        except struct.error:\n            raise OverrunBufferException(o, len(self._buf))", "docstring": "Returns a datetime from the DOSDATE and DOSTIME starting at\nthe relative offset.\nArguments:\n- `offset`: The relative offset from the start of the block.\nThrows:\n- `OverrunBufferException`", "source": "juraj-google-style"}
{"code": "def setup_ui(uifile, base_instance=None):\n    \n    ui = QtCompat.loadUi(uifile)  \n    if not base_instance:\n        return ui\n    else:\n        for member in dir(ui):\n            if not member.startswith('__') and \\\n               member is not 'staticMetaObject':\n                setattr(base_instance, member, getattr(ui, member))\n        return ui", "docstring": "Load a Qt Designer .ui file and returns an instance of the user interface\n\nArgs:\nuifile (str): Absolute path to .ui file\nbase_instance (QWidget): The widget into which UI widgets are loaded\n\nReturns:\nQWidget: the base instance", "source": "juraj-google-style"}
{"code": "def name_to_vector(name):\n    if (not isinstance(name, unicode)):\n        name = name.decode('utf-8')\n    name = name.lower()\n    name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')\n    name = ''.join(filter((lambda x: (x.isalpha() or (x == ' '))), list(name)))\n    return sorted(name.split(), key=(lambda x: len(x)), reverse=True)", "docstring": "Convert `name` to the ASCII vector.\n\nExample:\n>>> name_to_vector(\"ing. Franta Putšálek\")\n['putsalek', 'franta', 'ing']\n\nArgs:\nname (str): Name which will be vectorized.\n\nReturns:\nlist: Vector created from name.", "source": "codesearchnet"}
{"code": "def draw(self, time: float, frametime: float, target: moderngl.Framebuffer):\n        \n        raise NotImplementedError(\"draw() is not implemented\")", "docstring": "Draw function called by the system every frame when the effect is active.\nThis method raises ``NotImplementedError`` unless implemented.\n\nArgs:\ntime (float): The current time in seconds.\nframetime (float): The time the previous frame used to render in seconds.\ntarget (``moderngl.Framebuffer``): The target FBO for the effect.", "source": "juraj-google-style"}
{"code": "def check_type(obj: Any, candidate_type: Any, reltype: str='invariant') -> bool:\n    if (reltype not in ['invariant', 'covariant', 'contravariant']):\n        raise ValueError(f' Variadic type {reltype} is unknown')\n    if ((type(candidate_type) == type) and (reltype in ['invariant'])):\n        return isinstance(obj, candidate_type)\n    if ((type(candidate_type) == type) and (reltype in ['covariant'])):\n        return issubclass(obj.__class__, candidate_type)\n    if ((type(candidate_type) == type) and (reltype in ['contravariant'])):\n        return issubclass(candidate_type, obj.__class__)\n    if (type(candidate_type) == type(Any)):\n        return True\n    if (type(candidate_type) == type(Union)):\n        return any((check_type(obj, t, reltype) for t in candidate_type.__args__))\n    if ((type(candidate_type) == type(Tuple)) and (tuple in candidate_type.__bases__)):\n        if (not hasattr(obj, '__len__')):\n            return False\n        if (len(candidate_type.__args__) != len(obj)):\n            return False\n        return all((check_type(o, t, reltype) for (o, t) in zip(obj, candidate_type.__args__)))\n    if ((type(candidate_type) == type(Dict)) and (dict in candidate_type.__bases__)):\n        if (type(obj) != dict):\n            return False\n        return all(((check_type(k, candidate_type.__args__[0], reltype) and check_type(v, candidate_type.__args__[1], reltype)) for (k, v) in obj.items()))\n    if ((type(candidate_type) == type(List)) and ((list in candidate_type.__bases__) or (set in candidate_type.__bases__))):\n        if (not hasattr(obj, '__len__')):\n            return False\n        return all((check_type(o, candidate_type.__args__[0], reltype) for o in obj))\n    if (type(candidate_type) == TypeVar):\n        if (not candidate_type.__constraints__):\n            return True\n        if (not (candidate_type.__covariant__ or candidate_type.__contravariant__)):\n            return any((check_type(obj, t) for t in candidate_type.__constraints__))\n    if (type(candidate_type) == type(Type)):\n        return check_type(obj, candidate_type.__args__[0], reltype='covariant')\n    if (inspect.isclass(candidate_type) and (reltype in ['invariant'])):\n        return isinstance(obj, candidate_type)\n    raise ValueError(f'Cannot check against {reltype} type {candidate_type}')", "docstring": "Tell wether a value correspond to a type,\noptionally specifying the type as contravariant or covariant.\n\nArgs:\nobj (Any): The value to check.\ncandidate_type (Any): The type to check the object against.\nreltype (:obj:`str`, optional): Variance of the type, can be contravariant,\ncovariant or invariant. By default is invariant.\nReturns:\nbool: True if the type is fine, False otherwise\n\nRaises:\nValueError: When the variance or the type are not among the ones the function can manage.", "source": "codesearchnet"}
{"code": "def Patch(self, request, global_params=None):\n    config = self.GetMethodConfig('Patch')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Update an association between a GCP project and a GitHub Enterprise server.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsGithubEnterpriseConfigsPatchRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def make_processor(self, name, mappings, processor_type, **kwargs):\n    from .processor import Processor\n    if self.processors.get(name):\n        raise LookupError('processor has already been created')\n    if isinstance(mappings, list):\n        mappings = [self.get_rml(item) for item in mappings]\n    else:\n        mappings = [self.get_rml(mappings)]\n    self.processors[name] = Processor[processor_type](mappings, **kwargs)\n    self.processors[name].name = name\n    return self.processors[name]", "docstring": "Instantiates a RmlProcessor and registers it in the manager\n\nArgs:\n-----\nname: the name to register the processor\nmappings: the list RML mapping definitions to use\nprocessor_type: the name of the RML processor to use", "source": "codesearchnet"}
{"code": "def filter_out_spontaneous_genes(genes, custom_spont_id=None):\n    new_genes = DictList()\n    for gene in genes:\n        if (not is_spontaneous(gene, custom_id=custom_spont_id)):\n            new_genes.append(gene)\n    return new_genes", "docstring": "Return the DictList of genes that are not spontaneous in a model.\n\nArgs:\ngenes (DictList): Genes DictList\ncustom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``\n\nReturns:\nDictList: genes excluding ones that are spontaneous", "source": "codesearchnet"}
{"code": "def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: torch.device=None, dtype: torch.float=None) -> Tensor:\n    if dtype is None:\n        dtype = self.dtype\n    if not (attention_mask.dim() == 2 and self.config.is_decoder):\n        if device is not None:\n            warnings.warn('The `device` argument is deprecated and will be removed in v5 of Transformers.', FutureWarning)\n    if attention_mask.dim() == 3:\n        extended_attention_mask = attention_mask[:, None, :, :]\n    elif attention_mask.dim() == 2:\n        if self.config.is_decoder:\n            extended_attention_mask = ModuleUtilsMixin.create_extended_attention_mask_for_decoder(input_shape, attention_mask, device)\n        else:\n            extended_attention_mask = attention_mask[:, None, None, :]\n    else:\n        raise ValueError(f'Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})')\n    extended_attention_mask = extended_attention_mask.to(dtype=dtype)\n    extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(dtype).min\n    return extended_attention_mask", "docstring": "Makes broadcastable attention and causal masks so that future and masked tokens are ignored.\n\nArguments:\nattention_mask (`torch.Tensor`):\nMask with ones indicating tokens to attend to, zeros for tokens to ignore.\ninput_shape (`Tuple[int]`):\nThe shape of the input to the model.\n\nReturns:\n`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.", "source": "github-repos"}
{"code": "def element_if_exists(self, using, value):\n        \n        try:\n            self._execute(Command.FIND_ELEMENT, {\n                'using': using,\n                'value': value\n            })\n            return True\n        except:\n            return False", "docstring": "Check if an element in the current context.\n\nSupport:\nAndroid iOS Web(WebView)\n\nArgs:\nusing(str): The element location strategy.\nvalue(str): The value of the location strategy.\n\nReturns:\nReturn True if the element does exists and return False otherwise.\n\nRaises:\nWebDriverException.", "source": "juraj-google-style"}
{"code": "def _verify_structure_compatible(input_name, spec_name, input_, spec):\n    try:\n        nest.assert_same_structure(input_, spec, expand_composites=True)\n    except (ValueError, TypeError) as e:\n        raise TypeError('{} must have the same element structure as {}.\\n\\n{}'.format(input_name, spec_name, str(e))) from e\n    nest.map_structure(functools.partial(_verify_spec_compatible, input_name, spec_name), input_, spec)", "docstring": "Verifies that possibly-structured symbol has types compatible vith another.\n\nSee _verify_spec_compatible for a more concrete meaning of \"compatible\".\nUnspec _verify_spec_compatible, which handles singular Tensor-spec objects,\nverify_structures_compatible can process structures recognized by tf.nest.\n\nArgs:\ninput_name: A name to use for `input_` in error messages.\nspec_name: A name to use for `spec` in error messages.\ninput_: Any, value to verify. May, but doesn't need to, be a structure.\nspec: Any, value that `input_` must be compatible with. May, but doesn't\nneed to, be a structure.\n\nRaises:\nValueError if the two types have been determined not to be compatible.", "source": "github-repos"}
{"code": "def UpdatePreprocessor(self, line):\n    if Match('^\\\\s*\n        self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))\n    elif Match('^\\\\s*\n        if self.pp_stack:\n            if (not self.pp_stack[(- 1)].seen_else):\n                self.pp_stack[(- 1)].seen_else = True\n                self.pp_stack[(- 1)].stack_before_else = copy.deepcopy(self.stack)\n            self.stack = copy.deepcopy(self.pp_stack[(- 1)].stack_before_if)\n        else:\n            pass\n    elif Match('^\\\\s*\n        if self.pp_stack:\n            if self.pp_stack[(- 1)].seen_else:\n                self.stack = self.pp_stack[(- 1)].stack_before_else\n            self.pp_stack.pop()\n        else:\n            pass", "docstring": "Update preprocessor stack.\n\nWe need to handle preprocessors due to classes like this:\n#ifdef SWIG\nstruct ResultDetailsPageElementExtensionPoint {\n#else\nstruct ResultDetailsPageElementExtensionPoint : public Extension {\n#endif\n\nWe make the following assumptions (good enough for most files):\n- Preprocessor condition evaluates to true from #if up to first\n#else/#elif/#endif.\n\n- Preprocessor condition evaluates to false from #else/#elif up\nto #endif.  We still perform lint checks on these lines, but\nthese do not affect nesting stack.\n\nArgs:\nline: current line to check.", "source": "codesearchnet"}
{"code": "def __replaceSpecialValues(self, decisions):\n\t\t\n\t\terror = []\n\t\tfor row, line in enumerate(decisions):\n\t\t\tif '.' in line:\n\t\t\t\tfor i, element in enumerate(line):\n\t\t\t\t\tif row == 0:\n\t\t\t\t\t\terror.append(\n\t\t\t\t\t\t\t\"Row: {}colume: {}==> don't have parent value\".format(str(row).ljust(4), str(i).ljust(4)))\n\t\t\t\t\tif element == self.__parentSymbol:\n\t\t\t\t\t\tif decisions[row - 1][i] == '.':\n\t\t\t\t\t\t\terror.append(\"Row: {}Colume: {}==> don't have parent value\".format(str(row).ljust(4),\n\t\t\t\t\t\t\t                                                                   str(i).ljust(4)))\n\n\t\t\t\t\t\tdecisions[row][i] = decisions[row - 1][i]\n\n\t\tif error:\n\t\t\tview.Tli.showErrors('ReplaceSpecialValuesError', error)\n\t\telse:\n\t\t\treturn decisions", "docstring": "Will replace special values in decisions array.\n\nArgs:\ndecisions (array of array of str): Standard decision array format.\nRaises:\nValueError: Row element don't have parent value.\n\nReturns:\nNew decision array with updated values.", "source": "juraj-google-style"}
{"code": "def compile_reward(self, scope: Dict[(str, TensorFluent)]) -> TensorFluent:\n    reward_expr = self.rddl.domain.reward\n    with self.graph.as_default():\n        with tf.name_scope('reward'):\n            return self._compile_expression(reward_expr, scope)", "docstring": "Compiles the reward function given the fluent `scope`.\n\nArgs:\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for reward evaluation.\n\nReturns:\nA :obj:`rddl2tf.fluent.TensorFluent` representing the reward function.", "source": "codesearchnet"}
{"code": "def RegisterUtility(utility_name, version_mapping=None):\n\n    def IsFunctionOrMethod(member):\n        \"Determines if given member is a function or method.\\n\\n    These two are used in combination to ensure that inspect finds all of a\\n    given utility class's methods in both Python 2 and 3.\\n\\n    Args:\\n      member: object that is a member of a class, to be determined whether it is\\n        a function or method.\\n\\n    Returns:\\n      A boolean that is True if the provided member is a function or method, or\\n      False if it isn't.\\n    \"\n        return (inspect.isfunction(member) or inspect.ismethod(member))\n\n    def MethodDecorator(utility_method, version):\n        'Decorates a method in the utility class.'\n        registry_name = (('%s/%s' % (utility_name, version)) if version else utility_name)\n\n        @wraps(utility_method)\n        def Wrapper(*args, **kwargs):\n            AddToUtilityRegistry(registry_name)\n            return utility_method(*args, **kwargs)\n        return Wrapper\n\n    def ClassDecorator(cls):\n        'Decorates a utility class.'\n        for (name, method) in inspect.getmembers(cls, predicate=IsFunctionOrMethod):\n            if (not name.startswith('_')):\n                if (not getattr(method, '__self__', None)):\n                    setattr(cls, name, MethodDecorator(method, (version_mapping.get(name) if version_mapping else None)))\n        return cls\n    return ClassDecorator", "docstring": "Decorator that registers a class with the given utility name.\n\nThis will only register the utilities being used if the UtilityRegistry is\nenabled. Note that only the utility class's public methods will cause the\nutility name to be added to the registry.\n\nArgs:\nutility_name: A str specifying the utility name associated with the class.\nversion_mapping: A dict containing optional version strings to append to the\nutility string for individual methods; where the key is the method name and\nthe value is the text to be appended as the version.\n\nReturns:\nThe decorated class.", "source": "codesearchnet"}
{"code": "def create_latin_hypercube_samples(order, dim=1):\n    \n    randoms = numpy.random.random(order*dim).reshape((dim, order))\n    for dim_ in range(dim):\n        perm = numpy.random.permutation(order)  \n        randoms[dim_] = (perm + randoms[dim_])/order\n    return randoms", "docstring": "Latin Hypercube sampling.\n\nArgs:\norder (int):\nThe order of the latin hyper-cube. Defines the number of samples.\ndim (int):\nThe number of dimensions in the latin hyper-cube.\n\nReturns (numpy.ndarray):\nLatin hyper-cube with ``shape == (dim, order)``.", "source": "juraj-google-style"}
{"code": "def get_attached_bytes_map(meta_graph):\n  \n  result = {}\n  if ATTACHMENT_COLLECTION_SAVED not in meta_graph.collection_def:\n    return result\n  collection_def = meta_graph.collection_def[ATTACHMENT_COLLECTION_SAVED]\n  if collection_def.WhichOneof(\"kind\") != \"bytes_list\":\n    raise ValueError(\n        \"Internal CollectionDef for attached messages has kind %s, \"\n        \"expected bytes_list\" % collection_def.WhichOneof(\"kind\"))\n  attachment = module_attachment_pb2.ModuleAttachment()\n  for value in collection_def.bytes_list.value:\n    attachment.ParseFromString(value)\n    result[attachment.key] = attachment.value  \n  return result", "docstring": "Returns the dict of ModuleAttachments stored in `meta_graph`.\n\nArgs:\nmeta_graph: A MetaGraphDef, as built by SavedModelHandler.add_graph_copy()\nfrom some graph.\n\nReturns:\nA dict, containing the `(key, bytes)` items passed to `attach_bytes()`\nwhen the graph had been built.\n\nRaises:\nValueError: if `meta-graph` is malformed.", "source": "juraj-google-style"}
{"code": "def get_object_metadata(self, request):\n    kwargs = {'Bucket': request.bucket, 'Key': request.object}\n    try:\n        boto_response = self.client.head_object(**kwargs)\n    except Exception as e:\n        raise messages.S3ClientError(str(e), get_http_error_code(e))\n    item = messages.Item(boto_response['ETag'], request.object, boto_response['LastModified'], boto_response['ContentLength'], boto_response['ContentType'])\n    return item", "docstring": "Retrieves an object's metadata.\n\nArgs:\nrequest: (GetRequest) input message\n\nReturns:\n(Object) The response message.", "source": "github-repos"}
{"code": "def longest_one_seg_prefix(self, word):\n        \n        for i in range(self.longest_seg, 0, -1):\n            if word[:i] in self.seg_dict:\n                return word[:i]\n        return ''", "docstring": "Return longest Unicode IPA prefix of a word\n\nArgs:\nword (unicode): input word as Unicode IPA string\n\nReturns:\nunicode: longest single-segment prefix of `word` in database", "source": "juraj-google-style"}
{"code": "def StartsWith(self, value):\n    self._awql = self._CreateSingleValueCondition(value, 'STARTS_WITH')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"starts with\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "codesearchnet"}
{"code": "def cumulative_distribution(self, X):\n        \n        self.check_fit()\n\n        low_bounds = self.model.dataset.mean() - (5 * self.model.dataset.std())\n\n        result = []\n        for value in X:\n            result.append(self.model.integrate_box_1d(low_bounds, value))\n\n        return np.array(result)", "docstring": "Computes the integral of a 1-D pdf between two bounds\n\nArgs:\nX(numpy.array): Shaped (1, n), containing the datapoints.\n\nReturns:\nnumpy.array: estimated cumulative distribution.", "source": "juraj-google-style"}
{"code": "def Corr(poly, dist=None, **kws):\n    if isinstance(poly, distributions.Dist):\n        (poly, dist) = (polynomials.variable(len(poly)), poly)\n    else:\n        poly = polynomials.Poly(poly)\n    cov = Cov(poly, dist, **kws)\n    var = numpy.diag(cov)\n    vvar = numpy.sqrt(numpy.outer(var, var))\n    return numpy.where((vvar > 0), (cov / vvar), 0)", "docstring": "Correlation matrix of a distribution or polynomial.\n\nArgs:\npoly (Poly, Dist):\nInput to take correlation on. Must have ``len(poly)>=2``.\ndist (Dist):\nDefines the space the correlation is taken on.  It is ignored if\n``poly`` is a distribution.\n\nReturns:\n(numpy.ndarray):\nCorrelation matrix with\n``correlation.shape == poly.shape+poly.shape``.\n\nExamples:\n>>> Z = chaospy.MvNormal([3, 4], [[2, .5], [.5, 1]])\n>>> print(numpy.around(chaospy.Corr(Z), 4))\n[[1.     0.3536]\n[0.3536 1.    ]]\n\n>>> x = chaospy.variable()\n>>> Z = chaospy.Normal()\n>>> print(numpy.around(chaospy.Corr([x, x**2], Z), 4))\n[[1. 0.]\n[0. 1.]]", "source": "codesearchnet"}
{"code": "def _oai_to_xml(marc_oai):\n    record = MARCXMLRecord(marc_oai)\n    record.oai_marc = False\n    return record.to_XML()", "docstring": "Convert OAI to MARC XML.\n\nArgs:\nmarc_oai (str): String with either OAI or MARC XML.\n\nReturns:\nstr: String with MARC XML.", "source": "codesearchnet"}
{"code": "def _add_tags(self, tags):\n    alltagsadded = True\n    for tag in tags:\n        if (not self._add_tag(tag)):\n            alltagsadded = False\n    return alltagsadded", "docstring": "Add a list of tag\n\nArgs:\ntags (List[str]): list of tags to add\n\nReturns:\nbool: True if all tags added or False if any already present.", "source": "codesearchnet"}
{"code": "def matches(self, stream):\n        \n\n        if self.match_type != stream.stream_type:\n            return False\n\n        if self.match_id is not None:\n            return self.match_id == stream.stream_id\n\n        if self.match_spec == DataStreamSelector.MatchUserOnly:\n            return not stream.system\n        elif self.match_spec == DataStreamSelector.MatchSystemOnly:\n            return stream.system\n        elif self.match_spec == DataStreamSelector.MatchUserAndBreaks:\n            return (not stream.system) or (stream.system and (stream.stream_id in DataStream.KnownBreakStreams))\n\n        \n        \n        return True", "docstring": "Check if this selector matches the given stream\n\nArgs:\nstream (DataStream): The stream to check\n\nReturns:\nbool: True if this selector matches the stream", "source": "juraj-google-style"}
{"code": "def clean_title(title):\n    \n    date_pattern = re.compile(r'\\W*'\n                              r'\\d{1,2}'\n                              r'[/\\-.]'\n                              r'\\d{1,2}'\n                              r'[/\\-.]'\n                              r'(?=\\d*)(?:.{4}|.{2})'\n                              r'\\W*')\n    title = date_pattern.sub(' ', title)\n    title = re.sub(r'\\s{2,}', ' ', title)\n    title = title.strip()\n    return title", "docstring": "Clean title -> remove dates, remove duplicated spaces and strip title.\n\nArgs:\ntitle (str): Title.\n\nReturns:\nstr: Clean title without dates, duplicated, trailing and leading spaces.", "source": "juraj-google-style"}
{"code": "def may_lose_data(self, unused_windowing: core.Windowing) -> DataLossReason:\n    return DataLossReason.NO_POTENTIAL_LOSS", "docstring": "Returns whether or not this trigger could cause data loss.\n\nA trigger can cause data loss in the following scenarios:\n\n* The trigger has a chance to finish. For instance, AfterWatermark()\nwithout a late trigger would cause all late data to be lost. This\nscenario is only accounted for if the windowing strategy allows\nlate data. Otherwise, the trigger is not responsible for the data\nloss.\n\nNote that this only returns the potential for loss. It does not mean that\nthere will be data loss. It also only accounts for loss related to the\ntrigger, not other potential causes.\n\nArgs:\nwindowing: The Windowing that this trigger belongs to. It does not need\nto be the top-level trigger.\n\nReturns:\nThe DataLossReason. If there is no potential loss,\nDataLossReason.NO_POTENTIAL_LOSS is returned. Otherwise, all the\npotential reasons are returned as a single value.", "source": "github-repos"}
{"code": "def get_tqdm_kwargs(**kwargs):\n    default = dict(smoothing=0.5, dynamic_ncols=True, ascii=True, bar_format='{l_bar}{bar}|{n_fmt}/{total_fmt}[{elapsed}<{remaining},{rate_noinv_fmt}]')\n    try:\n        interval = float(os.environ['TENSORPACK_PROGRESS_REFRESH'])\n    except KeyError:\n        interval = _pick_tqdm_interval(kwargs.get('file', sys.stderr))\n    default['mininterval'] = interval\n    default.update(kwargs)\n    return default", "docstring": "Return default arguments to be used with tqdm.\n\nArgs:\nkwargs: extra arguments to be used.\nReturns:\ndict:", "source": "codesearchnet"}
{"code": "def execute_plan(plan):\n    \n    results = [action() for action in plan]\n    return [result for result in results if actns.step_has_failed(result)]", "docstring": "Execute the plan.\n\nArgs:\nplan (:obj:`list` of :obj:`actions.Step`): The plan we want to execute.\n\nReturns:\n(:obj:`list` of :obj:`actions.Step`): A list of failed actions.", "source": "juraj-google-style"}
{"code": "def prepare_policy_template(self, scaling_type, period_sec, server_group):\n        \n        template_kwargs = {\n            'app': self.app,\n            'env': self.env,\n            'region': self.region,\n            'server_group': server_group,\n            'period_sec': period_sec,\n            'scaling_policy': self.settings['asg']['scaling_policy'],\n        }\n        if scaling_type == 'scale_up':\n            template_kwargs['operation'] = 'increase'\n            template_kwargs['comparisonOperator'] = 'GreaterThanThreshold'\n            template_kwargs['scalingAdjustment'] = 1\n\n        elif scaling_type == 'scale_down':\n            cur_threshold = int(self.settings['asg']['scaling_policy']['threshold'])\n            self.settings['asg']['scaling_policy']['threshold'] = floor(cur_threshold * 0.5)\n            template_kwargs['operation'] = 'decrease'\n            template_kwargs['comparisonOperator'] = 'LessThanThreshold'\n            template_kwargs['scalingAdjustment'] = -1\n\n        rendered_template = get_template(template_file='infrastructure/autoscaling_policy.json.j2', **template_kwargs)\n        self.log.info('Creating a %s policy in %s for %s', scaling_type, self.env, self.app)\n        wait_for_task(rendered_template)\n        self.log.info('Successfully created a %s policy in %s for %s', scaling_type, self.env, self.app)", "docstring": "Renders scaling policy templates based on configs and variables.\nAfter rendering, POSTs the json to Spinnaker for creation.\n\nArgs:\nscaling_type (str): ``scale_up`` or ``scaling_down``. Type of policy\nperiod_sec (int): Period of time to look at metrics for determining scale\nserver_group (str): The name of the server group to render template for", "source": "juraj-google-style"}
{"code": "def similar_movies(self, **kwargs):\n    path = self._get_id_path('similar_movies')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the similar movies for a specific movie id.\n\nArgs:\npage: (optional) Minimum value of 1.  Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\nappend_to_response: (optional) Comma separated, any movie method.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "async def _on_trace_notification(self, trace_event):\n    conn_string = trace_event.get('connection_string')\n    payload = trace_event.get('payload')\n    (await self.notify_event(conn_string, 'trace', payload))", "docstring": "Callback function called when a trace chunk is received.\n\nArgs:\ntrace_chunk (dict): The received trace chunk information", "source": "codesearchnet"}
{"code": "def reset(self):\n    self._will_reset()\n    if self._has_backup:\n        self._restore()\n    else:\n        _LIB.Reset(self._env)\n    self._did_reset()\n    self.done = False\n    return self.screen", "docstring": "Reset the state of the environment and returns an initial observation.\n\nReturns:\nstate (np.ndarray): next frame as a result of the given action", "source": "codesearchnet"}
{"code": "def usufyToXlsxExport(d, fPath):\n    \n    from pyexcel_xlsx import get_data\n    try:\n        \n        \n        oldData = {\"OSRFramework\": get_data(fPath) }\n    except:\n        \n        oldData = {\"OSRFramework\":[]}\n\n    \n    tabularData = _generateTabularData(d, oldData)\n\n    from pyexcel_xlsx import save_data\n    \n    save_data(fPath, tabularData)", "docstring": "Workaround to export to a .xlsx file.\n\nArgs:\n-----\nd: Data to export.\nfPath: File path for the output file.", "source": "juraj-google-style"}
{"code": "def _OpenFileObject(self, path_spec):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    parent_path_spec = path_spec.parent\n\n    file_system = resolver.Resolver.OpenFileSystem(\n        parent_path_spec, resolver_context=self._resolver_context)\n\n    \n    \n    segment_file_path_specs = ewf.EWFGlobPathSpec(file_system, path_spec)\n    if not segment_file_path_specs:\n      return None\n\n    if parent_path_spec.IsSystemLevel():\n      \n      self._resolver_context.SetMaximumNumberOfFileObjects(\n          len(segment_file_path_specs) + 127)\n\n    for segment_file_path_spec in segment_file_path_specs:\n      file_object = resolver.Resolver.OpenFileObject(\n          segment_file_path_spec, resolver_context=self._resolver_context)\n      self._file_objects.append(file_object)\n\n    ewf_handle = pyewf.handle()\n    ewf_handle.open_file_objects(self._file_objects)\n    return ewf_handle", "docstring": "Opens the file-like object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\npyewf.handle: a file-like object or None.\n\nRaises:\nPathSpecError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def onkeyup(self, key, keycode, ctrl, shift, alt):\n        \n        return (key, keycode, ctrl, shift, alt)", "docstring": "Called when user types and releases a key.\nThe widget should be able to receive the focus in order to emit the event.\nAssign a 'tabindex' attribute to make it focusable.\n\nArgs:\nkey (str): the character value\nkeycode (str): the numeric char code", "source": "juraj-google-style"}
{"code": "def __init__(self, fn, job_id, *args, **kwargs):\n    \n    super(LambdaJob, self).__init__(job_id)\n    self._future = _async.async.executor.submit(fn, *args, **kwargs)", "docstring": "Initializes an instance of a Job.\n\nArgs:\nfn: the lambda function to execute asyncronously\njob_id: an optional ID for the job. If None, a UUID will be generated.", "source": "juraj-google-style"}
{"code": "def size_filter(labeled_grid, min_size):\n    out_grid = np.zeros(labeled_grid.shape, dtype=int)\n    slices = find_objects(labeled_grid)\n    j = 1\n    for (i, s) in enumerate(slices):\n        box = labeled_grid[s]\n        size = np.count_nonzero((box.ravel() == (i + 1)))\n        if ((size >= min_size) and (box.shape[0] > 1) and (box.shape[1] > 1)):\n            out_grid[np.where((labeled_grid == (i + 1)))] = j\n            j += 1\n    return out_grid", "docstring": "Remove labeled objects that do not meet size threshold criteria.\n\nArgs:\nlabeled_grid: 2D output from label method.\nmin_size: minimum size of object in pixels.\n\nReturns:\nlabeled grid with smaller objects removed.", "source": "codesearchnet"}
{"code": "def save_shared_file(self, sharekey=None):\n    endpoint = '/api/sharedfile/{sharekey}/save'.format(sharekey=sharekey)\n    data = self._make_request('POST', endpoint=endpoint, data=None)\n    try:\n        sf = SharedFile.NewFromJSON(data)\n        sf.saved = True\n        return sf\n    except:\n        raise Exception('{0}'.format(data['error']))", "docstring": "Save a SharedFile to your Shake.\n\nArgs:\nsharekey (str): Sharekey for the file to save.\n\nReturns:\nSharedFile saved to your shake.", "source": "codesearchnet"}
{"code": "def create_resource(self, resource_type=None, uri=None):\n\n\t\t\n\n\t\tif resource_type in [NonRDFSource, Binary, BasicContainer, DirectContainer, IndirectContainer]:\n\t\t\treturn resource_type(self, uri)\n\t\telse:\n\t\t\traise TypeError(\"expecting Resource type, such as BasicContainer or NonRDFSource\")", "docstring": "Convenience method for creating a new resource\n\nNote: A Resource is instantiated, but is not yet created.  Still requires resource.create().\n\nArgs:\nuri (rdflib.term.URIRef, str): uri of resource to create\nresource_type (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer):  resource type to create\n\nReturns:\n(NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): instance of appropriate type", "source": "juraj-google-style"}
{"code": "def get_file(profile, branch, file_path):\n    \n    branch_sha = get_branch_sha(profile, branch)\n    tree = get_files_in_branch(profile, branch_sha)\n    match = None\n    for item in tree:\n        if item.get(\"path\") == file_path:\n            match = item\n            break\n    file_sha = match.get(\"sha\")\n    blob = blobs.get_blob(profile, file_sha)\n    content = blob.get(\"content\")\n    decoded_content = b64decode(content)\n    return decoded_content.decode(\"utf-8\")", "docstring": "Get a file from a branch.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nbranch\nThe name of a branch.\n\nfile_path\nThe path of the file to fetch.\n\nReturns:\nThe (UTF-8 encoded) content of the file, as a string.", "source": "juraj-google-style"}
{"code": "def list_depth(list_, func=max, _depth=0):\n    depth_list = [list_depth(item, func=func, _depth=(_depth + 1)) for item in list_ if util_type.is_listlike(item)]\n    if (len(depth_list) > 0):\n        return func(depth_list)\n    else:\n        return _depth", "docstring": "Returns the deepest level of nesting within a list of lists\n\nArgs:\nlist_  : a nested listlike object\nfunc   : depth aggregation strategy (defaults to max)\n_depth : internal var\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_list import *  # NOQA\n>>> list_ = [[[[[1]]], [3]], [[1], [3]], [[1], [3]]]\n>>> result = (list_depth(list_, _depth=0))\n>>> print(result)", "source": "codesearchnet"}
{"code": "def _tavella_randell_nonuniform_grid(x_min, x_max, x_star, num_grid_points, alpha, dtype):\n    c1 = tf.math.asinh((x_min - x_star) / alpha)\n    c2 = tf.math.asinh((x_max - x_star) / alpha)\n    i = tf.expand_dims(tf.range(0, num_grid_points + 1, 1, dtype=dtype), axis=-1)\n    grid = x_star + alpha * tf.math.sinh(c2 * i / num_grid_points + c1 * (1 - i / num_grid_points))\n    return tf.transpose(grid)", "docstring": "Creates non-uniform grid clustered around a specified point.\n\nArgs:\nx_min: A real `Tensor` of shape `(dim,)` specifying the lower limit of the\ngrid.\nx_max: A real `Tensor` of same shape and dtype as `x_min` specifying the\nupper limit of the grid.\nx_star: A real `Tensor` of same shape and dtype as `x_min` specifying the\nlocation on the grid around which higher grid density is desired.\nnum_grid_points: A scalar integer `Tensor` specifying the number of points\non the grid.\nalpha: A scalar parameter which controls the degree of non-uniformity of the\ngrid. The smaller values of `alpha` correspond to greater degree of\nclustering around `x_star`.\ndtype: The default dtype to use when converting values to `Tensor`s.\n\nReturns:\nA real `Tensor` of shape `(dim, num_grid_points+1)` containing the\nnon-uniform grid.", "source": "github-repos"}
{"code": "def __call__(self, data: List) -> np.ndarray:\n        \n        \n        \n        max_length = max(len(x) for x in data)\n        answer = np.zeros(shape=(len(data), max_length, self.dim), dtype=int)\n        for i, sent in enumerate(data):\n            for j, word in enumerate(sent):\n                answer[i, j][self._get_word_indexes(word)] = 1\n        return answer", "docstring": "Transforms words to one-hot encoding according to the dictionary.\n\nArgs:\ndata: the batch of words\n\nReturns:\na 3D array. answer[i][j][k] = 1 iff data[i][j] is the k-th word in the dictionary.", "source": "juraj-google-style"}
{"code": "def object_upload(self, bucket, key, content, content_type):\n    \n    args = {'uploadType': 'media', 'name': key}\n    headers = {'Content-Type': content_type}\n\n    url = Api._UPLOAD_ENDPOINT + (Api._OBJECT_PATH % (bucket, ''))\n    return google.datalab.utils.Http.request(url, args=args, data=content, headers=headers,\n                                             credentials=self._credentials, raw_response=True)", "docstring": "Writes text content to the object.\n\nArgs:\nbucket: the name of the bucket containing the object.\nkey: the key of the object to be written.\ncontent: the text content to be written.\ncontent_type: the type of text content.\nRaises:\nException if the object could not be written to.", "source": "juraj-google-style"}
{"code": "def receive_bytes(self, data):\n    i = 0\n    n = len(data)\n    responses = []\n    while (i < n):\n        if (not self._receiving):\n            bytes_to_read = min((4 - self._header.tell()), (n - i))\n            self._header.write(data[i:(i + bytes_to_read)])\n            i += bytes_to_read\n            if (self._header.tell() == 4):\n                self._header.seek(0)\n                nbytes = Int32.decode(self._header)\n                self._rbuffer = KafkaBytes(nbytes)\n                self._receiving = True\n            elif (self._header.tell() > 4):\n                raise Errors.KafkaError('this should not happen - are you threading?')\n        if self._receiving:\n            total_bytes = len(self._rbuffer)\n            staged_bytes = self._rbuffer.tell()\n            bytes_to_read = min((total_bytes - staged_bytes), (n - i))\n            self._rbuffer.write(data[i:(i + bytes_to_read)])\n            i += bytes_to_read\n            staged_bytes = self._rbuffer.tell()\n            if (staged_bytes > total_bytes):\n                raise Errors.KafkaError('Receive buffer has more bytes than expected?')\n            if (staged_bytes != total_bytes):\n                break\n            self._receiving = False\n            self._rbuffer.seek(0)\n            resp = self._process_response(self._rbuffer)\n            responses.append(resp)\n            self._reset_buffer()\n    return responses", "docstring": "Process bytes received from the network.\n\nArguments:\ndata (bytes): any length bytes received from a network connection\nto a kafka broker.\n\nReturns:\nresponses (list of (correlation_id, response)): any/all completed\nresponses, decoded from bytes to python objects.\n\nRaises:\nKafkaProtocolError: if the bytes received could not be decoded.\nCorrelationIdError: if the response does not match the request\ncorrelation id.", "source": "codesearchnet"}
{"code": "def get_token(self,\n                  token_name,\n                  project_name,\n                  dataset_name):\n        \n        url = self.url() + \"/nd/resource/dataset/{}\".format(dataset_name)\\\n            + \"/project/{}\".format(project_name)\\\n            + \"/token/{}/\".format(token_name)\n        req = self.remote_utils.get_url(url)\n\n        if req.status_code is not 200:\n            raise RemoteDataUploadError('Could not find {}'.format(req.text))\n        else:\n            return req.json()", "docstring": "Get a token with the given parameters.\nArguments:\nproject_name (str): Project name\ndataset_name (str): Dataset name project is based on\ntoken_name (str): Token name\nReturns:\ndict: Token info", "source": "juraj-google-style"}
{"code": "def unpack_small_tensors(tower_grads, packing):\n    if (not packing):\n        return tower_grads\n    new_tower_grads = []\n    num_devices = len(tower_grads)\n    num_packed = (len(packing.keys()) \n    for (dev_idx, gv_list) in enumerate(tower_grads):\n        new_gv_list = gv_list[num_packed:]\n        for i in xrange(0, num_packed):\n            k = ('%d:%d' % (dev_idx, i))\n            gpt = packing[k]\n            gv = unpack_grad_tuple(gv_list[i], gpt)\n            for (gi, idx) in enumerate(gpt.indices):\n                assert (idx == gpt.indices[gi])\n                new_gv_list.insert(idx, gv[gi])\n        new_tower_grads.append(new_gv_list)\n    return new_tower_grads", "docstring": "Undo the structure alterations to tower_grads done by pack_small_tensors.\n\nArgs:\ntower_grads: List of List of (grad, var) tuples.\npacking: A dict generated by pack_small_tensors describing the changes\nit made to tower_grads.\n\nReturns:\nnew_tower_grads: identical to tower_grads except that concatentations\nof small tensors have been split apart and returned to their original\npositions, paired with their original variables.", "source": "codesearchnet"}
{"code": "def console_get_alignment(con: tcod.console.Console) -> int:\n    return int(lib.TCOD_console_get_alignment(_console(con)))", "docstring": "Return this consoles current alignment mode.\n\nArgs:\ncon (Console): Any Console instance.\n\n.. deprecated:: 8.5\nCheck :any:`Console.default_alignment` instead.", "source": "codesearchnet"}
{"code": "def GetSources(self, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    return self.SOURCE_SHORT, self.SOURCE_LONG", "docstring": "Determines the the short and long source for an event object.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): short and long source string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def create_dummy_object(name: str, backend_name: str) -> str:\n    if name.isupper():\n        return DUMMY_CONSTANT.format(name)\n    elif name.islower():\n        return DUMMY_FUNCTION.format(name, backend_name)\n    else:\n        return DUMMY_CLASS.format(name, backend_name)", "docstring": "Create the code for a dummy object.\n\nArgs:\nname (`str`): The name of the object.\nbackend_name (`str`): The name of the backend required for that object.\n\nReturns:\n`str`: The code of the dummy object.", "source": "github-repos"}
{"code": "def get(query):\n    conversion_funcs = _tensor_conversion_func_cache.get(query)\n    if conversion_funcs is None:\n        with _tensor_conversion_func_lock:\n            conversion_funcs = _tensor_conversion_func_cache.get(query)\n            if conversion_funcs is None:\n                conversion_funcs = []\n                for _, funcs_at_priority in sorted(_tensor_conversion_func_registry.items()):\n                    conversion_funcs.extend(((base_type, conversion_func) for base_type, conversion_func in funcs_at_priority if issubclass(query, base_type)))\n                _tensor_conversion_func_cache[query] = conversion_funcs\n    return conversion_funcs", "docstring": "Get conversion function for objects of `cls`.\n\nArgs:\nquery: The type to query for.\n\nReturns:\nA list of conversion functions in increasing order of priority.", "source": "github-repos"}
{"code": "def DecryptMessage(self, encrypted_response):\n    try:\n        response_comms = rdf_flows.ClientCommunication.FromSerializedString(encrypted_response)\n        return self.DecodeMessages(response_comms)\n    except (rdfvalue.DecodeError, type_info.TypeValueError, ValueError, AttributeError) as e:\n        raise DecodingError(('Error while decrypting messages: %s' % e))", "docstring": "Decrypt the serialized, encrypted string.\n\nArgs:\nencrypted_response: A serialized and encrypted string.\n\nReturns:\na Packed_Message_List rdfvalue", "source": "codesearchnet"}
{"code": "def update_memo(self, task_id, task, r):\n        \n        if not self.memoize or not task['memoize']:\n            return\n\n        if task['hashsum'] in self.memo_lookup_table:\n            logger.info('Updating appCache entry with latest %s:%s call' %\n                        (task['func_name'], task_id))\n            self.memo_lookup_table[task['hashsum']] = r\n        else:\n            self.memo_lookup_table[task['hashsum']] = r", "docstring": "Updates the memoization lookup table with the result from a task.\n\nArgs:\n- task_id (int): Integer task id\n- task (dict) : A task dict from dfk.tasks\n- r (Result future): Result future\n\nA warning is issued when a hash collision occurs during the update.\nThis is not likely.", "source": "juraj-google-style"}
{"code": "def fixed_padding(inputs, kernel_size, data_format):\n    pad_total = (kernel_size - 1)\n    pad_beg = (pad_total \n    pad_end = (pad_total - pad_beg)\n    if (data_format == 'channels_first'):\n        padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])\n    else:\n        padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n    return padded_inputs", "docstring": "Pads the input along the spatial dimensions independently of input size.\n\nArgs:\ninputs: A tensor of size [batch, channels, height_in, width_in] or\n[batch, height_in, width_in, channels] depending on data_format.\nkernel_size: The kernel to be used in the conv2d or max_pool2d operation.\nShould be a positive integer.\ndata_format: The input format ('channels_last' or 'channels_first').\n\nReturns:\nA tensor with the same format as the input with the data either intact\n(if kernel_size == 1) or padded (if kernel_size > 1).", "source": "codesearchnet"}
{"code": "def from_parameters(cls, parameters: Dict[str, Any], dna_spec: DNASpec, use_literal_values: bool=False) -> 'DNA':\n    del use_literal_values\n    return cls.from_dict(parameters, dna_spec)", "docstring": "Create DNA from parameters based on DNASpec.\n\nDeprecated: use `from_dict` instead.\n\nArgs:\nparameters: A 1-depth dict of parameter names to parameter values.\ndna_spec: DNASpec to interpret the parameters.\nuse_literal_values: If True, parameter values are literal values from\nDNASpec.\n\nReturns:\nDNA instance bound with the DNASpec.\n\nRaises:\nValueError: If parameters are not aligned with DNA spec.", "source": "github-repos"}
{"code": "def query(botcust2, message):\n    logger.debug('Getting Mitsuku reply')\n    params = {'botid': 'f6a012073e345a08', 'amp;skin': 'chat'}\n    headers = {'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'en-US,en;q=0.8', 'Cache-Control': 'max-age=0', 'Connection': 'keep-alive', 'Content-Length': str((len(message) + 34)), 'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': ('botcust2=' + botcust2), 'DNT': '1', 'Host': 'kakko.pandorabots.com', 'Origin': 'https:\n    data = {'botcust2': botcust2, 'message': message}\n    logger.debug('Sending POST request')\n    response = requests.post(url, params=params, headers=headers, data=data)\n    logger.debug('POST response {}'.format(response))\n    parsed = lxml.html.parse(io.StringIO(response.text)).getroot()\n    try:\n        result = parsed[1][2][0][2].tail[1:]\n        logger.debug('Getting botcust2 successful')\n    except IndexError:\n        result = False\n        logger.critical('Getting botcust2 from html failed')\n    return result", "docstring": "Sends a message to Mitsuku and retrieves the reply\n\nArgs:\nbotcust2 (str): The botcust2 identifier\nmessage (str): The message to send to Mitsuku\n\nReturns:\nreply (str): The message Mitsuku sent back", "source": "codesearchnet"}
{"code": "def _update_general_statistics(a_float, dist):\n    if (not dist.count):\n        dist.count = 1\n        dist.maximum = a_float\n        dist.minimum = a_float\n        dist.mean = a_float\n        dist.sumOfSquaredDeviation = 0\n    else:\n        old_count = dist.count\n        old_mean = dist.mean\n        new_mean = (((old_count * old_mean) + a_float) / (old_count + 1))\n        delta_sum_squares = ((a_float - old_mean) * (a_float - new_mean))\n        dist.count += 1\n        dist.mean = new_mean\n        dist.maximum = max(a_float, dist.maximum)\n        dist.minimum = min(a_float, dist.minimum)\n        dist.sumOfSquaredDeviation += delta_sum_squares", "docstring": "Adds a_float to distribution, updating the statistics fields.\n\nArgs:\na_float (float): a new value\ndist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):\nthe Distribution being updated", "source": "codesearchnet"}
{"code": "def node_filter(self, name, **kwargs):\n\n    def decorator(func):\n        self.filters[name] = NodeFilter(name, func, **kwargs)\n    return decorator", "docstring": "Returns a decorator function for adding a node filter.\n\nArgs:\nname (str): The name of the filter.\n**kwargs: Variable keyword arguments for the filter.\n\nReturns:\nCallable[[Callable[[Element, Any], bool]]]: A decorator function for adding a node\nfilter.", "source": "codesearchnet"}
{"code": "def get_appliance_event_after_time(self, location_id, since, per_page=None, page=None, min_power=None):\n    url = 'https:\n    headers = self.__gen_headers()\n    headers['Content-Type'] = 'application/json'\n    params = {'locationId': location_id, 'since': since}\n    if min_power:\n        params['minPower'] = min_power\n    if per_page:\n        params['perPage'] = per_page\n    if page:\n        params['page'] = page\n    url = self.__append_url_params(url, params)\n    r = requests.get(url, headers=headers)\n    return r.json()", "docstring": "Get appliance events by location Id after defined time.\n\nArgs:\nlocation_id (string): hexadecimal id of the sensor to query, e.g.\n``0x0013A20040B65FAD``\nsince (string): ISO 8601 start time for getting the events that are created or updated after it.\nMaxiumim value allowed is 1 day from the current time.\nmin_power (string): The minimum average power (in watts) for filtering.\nOnly events with an average power above this value will be returned.\n(default: 400)\nper_page (string, optional): the number of returned results per page\n(min 1, max 500) (default: 10)\npage (string, optional): the page number to return (min 1, max 100000)\n(default: 1)\n\nReturns:\nlist: dictionary objects containing appliance events meeting specified criteria", "source": "codesearchnet"}
{"code": "def metaclass(*metaclasses):\n    \n    \n\n    def _inner(cls):\n        \n        metabases = tuple(\n            collections.OrderedDict(  \n                (c, None) for c in (metaclasses + (type(cls),))\n            ).keys()\n        )\n        \n        _Meta = metabases[0]\n        for base in metabases[1:]:\n\n            class _Meta(base, _Meta):  \n                pass\n\n        return six.add_metaclass(_Meta)(cls)\n\n    return _inner", "docstring": "Create the class using all metaclasses.\n\nArgs:\nmetaclasses: A tuple of metaclasses that will be used to generate and\nreplace a specified class.\n\nReturns:\nA decorator that will recreate the class using the specified\nmetaclasses.", "source": "juraj-google-style"}
{"code": "def verify_token(id_token, request, audience=None, certs_url=_GOOGLE_OAUTH2_CERTS_URL):\n    certs = _fetch_certs(request, certs_url)\n    return jwt.decode(id_token, certs=certs, audience=audience)", "docstring": "Verifies an ID token and returns the decoded token.\n\nArgs:\nid_token (Union[str, bytes]): The encoded token.\nrequest (google.auth.transport.Request): The object used to make\nHTTP requests.\naudience (str): The audience that this token is intended for. If None\nthen the audience is not verified.\ncerts_url (str): The URL that specifies the certificates to use to\nverify the token. This URL should return JSON in the format of\n``{'key id': 'x509 certificate'}``.\n\nReturns:\nMapping[str, Any]: The decoded token.", "source": "codesearchnet"}
{"code": "def map(self, ID_s, FROM=None, TO=None, target_as_set=False, no_match_sub=None):\n\n    def io_mode(ID_s):\n        '\\n            Handles the input/output modalities of the mapping.\\n            '\n        unlist_return = False\n        list_of_lists = False\n        if isinstance(ID_s, str):\n            ID_s = [ID_s]\n            unlist_return = True\n        elif isinstance(ID_s, list):\n            if ((len(ID_s) > 0) and isinstance(ID_s[0], list)):\n                list_of_lists = True\n        return (ID_s, unlist_return, list_of_lists)\n    if (FROM == TO):\n        return ID_s\n    (ID_s, unlist_return, list_of_lists) = io_mode(ID_s)\n    if list_of_lists:\n        mapped_ids = [self.map(ID, FROM, TO, target_as_set, no_match_sub) for ID in ID_s]\n    else:\n        mapped_ids = self._map(ID_s, FROM, TO, target_as_set, no_match_sub)\n    if unlist_return:\n        return mapped_ids[0]\n    return Mapping(ID_s, mapped_ids)", "docstring": "The main method of this class and the essence of the package.\nIt allows to \"map\" stuff.\n\nArgs:\n\nID_s: Nested lists with strings as leafs (plain strings also possible)\nFROM (str): Origin key for the mapping (default: main key)\nTO (str): Destination key for the mapping (default: main key)\ntarget_as_set (bool): Whether to summarize the output as a set (removes duplicates)\nno_match_sub: Object representing the status of an ID not being able to be matched\n(default: None)\n\nReturns:\n\nMapping: a mapping object capturing the result of the mapping request", "source": "codesearchnet"}
{"code": "def whois_nameservers(self, nameservers):\n    api_name = 'opendns-whois-nameservers'\n    fmt_url_path = u'whois/nameservers/{0}'\n    return self._multi_get(api_name, fmt_url_path, nameservers)", "docstring": "Calls WHOIS Nameserver end point\n\nArgs:\nemails: An enumerable of nameservers\nReturns:\nA dict of {nameserver: domain_result}", "source": "codesearchnet"}
{"code": "class PatchTSMixerPretrainHead(nn.Module):\n\n    def __init__(self, config: PatchTSMixerConfig):\n        super().__init__()\n        self.dropout_layer = nn.Dropout(config.head_dropout)\n        self.base_pt_block = nn.Linear(config.d_model, config.patch_length)\n\n    def forward(self, hidden_features):\n        \n        hidden_features = self.dropout_layer(hidden_features)\n        forecast = self.base_pt_block(hidden_features)\n        return forecast", "docstring": "Pretraining head.\n\nArgs:\nconfig (`PatchTSMixerConfig`):\nConfiguration.", "source": "github-repos"}
{"code": "def tracers(tracersfile):\n    if (not tracersfile.is_file()):\n        return None\n    tra = {}\n    with tracersfile.open('rb') as fid:\n        readbin = partial(_readbin, fid)\n        magic = readbin()\n        if (magic > 8000):\n            magic -= 8000\n            readbin()\n            readbin = partial(readbin, file64=True)\n        if (magic < 100):\n            raise ParsingError(tracersfile, 'magic > 100 expected to get tracervar info')\n        nblk = (magic % 100)\n        readbin('f', 2)\n        readbin()\n        readbin('f')\n        ninfo = readbin()\n        ntra = readbin(nwords=nblk, unpack=False)\n        readbin('f')\n        curv = readbin()\n        if curv:\n            readbin('f')\n        infos = []\n        for _ in range(ninfo):\n            infos.append(b''.join(readbin('b', 16)).strip().decode())\n            tra[infos[(- 1)]] = []\n        if (magic > 200):\n            ntrace_elt = readbin()\n            if (ntrace_elt > 0):\n                readbin('f', ntrace_elt)\n        for ntrab in ntra:\n            data = readbin('f', (ntrab * ninfo))\n            for (idx, info) in enumerate(infos):\n                tra[info].append(data[idx::ninfo])\n    return tra", "docstring": "Extract tracers data.\n\nArgs:\ntracersfile (:class:`pathlib.Path`): path of the binary tracers file.\n\nReturns:\ndict of list of numpy.array:\nTracers data organized by attribute and block.", "source": "codesearchnet"}
{"code": "def timedelta(self, time_input1, time_input2):\n    time_input1 = self.any_to_datetime(time_input1)\n    time_input2 = self.any_to_datetime(time_input2)\n    diff = (time_input1 - time_input2)\n    delta = relativedelta(time_input1, time_input2)\n    total_months = ((delta.years * 12) + delta.months)\n    total_weeks = (((delta.years * 52) + (total_months * 4)) + delta.weeks)\n    total_days = diff.days\n    total_hours = ((total_days * 24) + delta.hours)\n    total_minutes = ((total_hours * 60) + delta.minutes)\n    total_seconds = ((total_minutes * 60) + delta.seconds)\n    total_microseconds = ((total_seconds * 1000) + delta.microseconds)\n    return {'datetime_1': time_input1.isoformat(), 'datetime_2': time_input2.isoformat(), 'years': delta.years, 'months': delta.months, 'weeks': delta.weeks, 'days': delta.days, 'hours': delta.hours, 'minutes': delta.minutes, 'seconds': delta.seconds, 'microseconds': delta.microseconds, 'total_months': total_months, 'total_weeks': total_weeks, 'total_days': total_days, 'total_hours': total_hours, 'total_minutes': total_minutes, 'total_seconds': total_seconds, 'total_microseconds': total_microseconds}", "docstring": "Calculates time delta between two time expressions.\n\nArgs:\ntime_input1 (string): The time input string (see formats above).\ntime_input2 (string): The time input string (see formats above).\n\nReturns:\n(dict): Dict with delta values.", "source": "codesearchnet"}
{"code": "def CompileReport(self, mediator):\n    \n    results = {}\n    for key, count in iter(self._counter.items()):\n      search_engine, _, search_term = key.partition(':')\n      results.setdefault(search_engine, {})\n      results[search_engine][search_term] = count\n\n    lines_of_text = []\n    for search_engine, terms in sorted(results.items()):\n      lines_of_text.append(' == ENGINE: {0:s} =='.format(search_engine))\n\n      for search_term, count in sorted(\n          terms.items(), key=lambda x: (x[1], x[0]), reverse=True):\n        lines_of_text.append('{0:d} {1:s}'.format(count, search_term))\n\n      \n      lines_of_text.append('')\n\n    lines_of_text.append('')\n    report_text = '\\n'.join(lines_of_text)\n    analysis_report = reports.AnalysisReport(\n        plugin_name=self.NAME, text=report_text)\n    analysis_report.report_array = self._search_term_timeline\n    analysis_report.report_dict = results\n    return analysis_report", "docstring": "Compiles an analysis report.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between\nanalysis plugins and other components, such as storage and dfvfs.\n\nReturns:\nAnalysisReport: analysis report.", "source": "juraj-google-style"}
{"code": "def start_standing_subprocess(cmd, shell=False, env=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE):\n    logging.debug('Starting standing subprocess with: %s', cmd)\n    proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, shell=shell, env=env)\n    proc.stdin.close()\n    proc.stdin = None\n    logging.debug('Started standing subprocess %d', proc.pid)\n    return proc", "docstring": "Starts a long-running subprocess.\n\nThis is not a blocking call and the subprocess started by it should be\nexplicitly terminated with stop_standing_subprocess.\n\nFor short-running commands, you should use subprocess.check_call, which\nblocks.\n\nArgs:\ncmd: string, the command to start the subprocess with.\nshell: bool, True to run this command through the system shell,\nFalse to invoke it directly. See subprocess.Popen() docs.\nenv: dict, a custom environment to run the standing subprocess. If not\nspecified, inherits the current environment. See subprocess.Popen()\ndocs.\nstdout: None, subprocess.PIPE, subprocess.DEVNULL, an existing file\ndescriptor, or an existing file object. See subprocess.Popen() docs.\nstderr: None, subprocess.PIPE, subprocess.DEVNULL, an existing file\ndescriptor, or an existing file object. See subprocess.Popen() docs.\n\nReturns:\nThe subprocess that was started.", "source": "github-repos"}
{"code": "def trees_by_issn(self, issn):\n        \n        return set(\n            self.issn_db.get(issn, OOSet()).keys()\n        )", "docstring": "Search trees by `issn`.\n\nArgs:\nissn (str): :attr:`.Tree.issn` property of :class:`.Tree`.\n\nReturns:\nset: Set of matching :class:`Tree` instances.", "source": "juraj-google-style"}
{"code": "def build(self, var_list):\n    if self.built:\n        return\n    super().build(var_list)\n    self.adam_momentums = {}\n    self.adam_velocities = {}\n    self.muon_momentums = {}\n    self.muon_velocities = {}\n    for var in var_list:\n        if not self._overwrite_variable_with_gradient(var):\n            self.adam_momentums[var.path] = self.add_variable_from_reference(reference_variable=var, name='momentum')\n            if self._should_use_adamw(var):\n                self.adam_velocities[var.path] = self.add_variable_from_reference(reference_variable=var, name='velocity')", "docstring": "Initialize optimizer variables.\n\nAdam optimizer has 3 types of variables: momentums, velocities and\nvelocity_hat (only set when amsgrad is applied),\n\nArgs:\nvar_list: list of model variables to build Adam variables on.", "source": "github-repos"}
{"code": "def silence(warning, silence=True):\n    if (not isinstance(warning, int)):\n        raise ValueError('Input to silence should be a warning object - not of type {}'.format(type(warning)))\n    if silence:\n        __silencers__.add(warning)\n    elif (warning in __silencers__):\n        __silencers__.remove(warning)\n    return __silencers__", "docstring": "Silence a particular warning on all Bokeh models.\n\nArgs:\nwarning (Warning) : Bokeh warning to silence\nsilence (bool) : Whether or not to silence the warning\n\nReturns:\nA set containing the all silenced warnings\n\nThis function adds or removes warnings from a set of silencers which\nis referred to when running ``check_integrity``. If a warning\nis added to the silencers - then it will never be raised.\n\n.. code-block:: python\n\n>>> from bokeh.core.validation.warnings import EMPTY_LAYOUT\n>>> bokeh.core.validation.silence(EMPTY_LAYOUT, True)\n{1002}\n\nTo turn a warning back on use the same method but with the silence\nargument set to false\n\n.. code-block:: python\n\n>>> bokeh.core.validation.silence(EMPTY_LAYOUT, False)\nset()", "source": "codesearchnet"}
{"code": "def prange(N=1, dim=1):\n    \n    A = {}\n    r = numpy.arange(N, dtype=int)\n    key = numpy.zeros(dim, dtype=int)\n    for i in range(N):\n        key[-1] = i\n        A[tuple(key)] = 1*(r==i)\n\n    return Poly(A, dim, (N,), int)", "docstring": "Constructor to create a range of polynomials where the exponent vary.\n\nArgs:\nN (int):\nNumber of polynomials in the array.\ndim (int):\nThe dimension the polynomial should span.\n\nReturns:\n(Poly):\nA polynomial array of length N containing simple polynomials with\nincreasing exponent.\n\nExamples:\n>>> print(prange(4))\n[1, q0, q0^2, q0^3]\n>>> print(prange(4, dim=3))\n[1, q2, q2^2, q2^3]", "source": "juraj-google-style"}
{"code": "def get_server(self, name):\n        \n        mech = self.get(name)\n        return mech if isinstance(mech, ServerMechanism) else None", "docstring": "Like :meth:`.get`, but only mechanisms inheriting\n:class:`ServerMechanism` will be returned.\n\nArgs:\nname: The SASL mechanism name.\n\nReturns:\nThe mechanism object or ``None``", "source": "juraj-google-style"}
{"code": "def of(cls, msg_header: MessageHeader) -> 'MessageDecoder':\n        \n        cte_hdr = msg_header.parsed.content_transfer_encoding\n        return cls.of_cte(cte_hdr)", "docstring": "Return a decoder from the message header object.\n\nSee Also:\n:meth:`.of_cte`\n\nArgs:\nmsg_header: The message header object.", "source": "juraj-google-style"}
{"code": "def is_diagonal(matrix: np.ndarray, *, atol: float = 1e-8) -> bool:\n    \n    matrix = np.copy(matrix)\n    for i in range(min(matrix.shape)):\n        matrix[i, i] = 0\n    return tolerance.all_near_zero(matrix, atol=atol)", "docstring": "Determines if a matrix is a approximately diagonal.\n\nA matrix is diagonal if i!=j implies m[i,j]==0.\n\nArgs:\nmatrix: The matrix to check.\natol: The per-matrix-entry absolute tolerance on equality.\n\nReturns:\nWhether the matrix is diagonal within the given tolerance.", "source": "juraj-google-style"}
{"code": "def get_acmg(acmg_terms):\n    prediction = 'uncertain_significance'\n    pvs = False\n    ps_terms = []\n    pm_terms = []\n    pp_terms = []\n    ba = False\n    bs_terms = []\n    bp_terms = []\n    for term in acmg_terms:\n        if term.startswith('PVS'):\n            pvs = True\n        elif term.startswith('PS'):\n            ps_terms.append(term)\n        elif term.startswith('PM'):\n            pm_terms.append(term)\n        elif term.startswith('PP'):\n            pp_terms.append(term)\n        elif term.startswith('BA'):\n            ba = True\n        elif term.startswith('BS'):\n            bs_terms.append(term)\n        elif term.startswith('BP'):\n            bp_terms.append(term)\n    pathogenic = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)\n    likely_pathogenic = is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms)\n    benign = is_benign(ba, bs_terms)\n    likely_benign = is_likely_benign(bs_terms, bp_terms)\n    if (pathogenic or likely_pathogenic):\n        if (benign or likely_benign):\n            prediction = 'uncertain_significance'\n        elif pathogenic:\n            prediction = 'pathogenic'\n        else:\n            prediction = 'likely_pathogenic'\n    else:\n        if benign:\n            prediction = 'benign'\n        if likely_benign:\n            prediction = 'likely_benign'\n    return prediction", "docstring": "Use the algorithm described in ACMG paper to get a ACMG calssification\n\nArgs:\nacmg_terms(set(str)): A collection of prediction terms\n\nReturns:\nprediction(int):\n0 - Uncertain Significanse\n1 - Benign\n2 - Likely Benign\n3 - Likely Pathogenic\n4 - Pathogenic", "source": "codesearchnet"}
{"code": "def generate_output_asn(self, json_data=None, hr=True, show_name=False, colorize=True):\n    if (json_data is None):\n        json_data = {}\n    keys = {'asn', 'asn_cidr', 'asn_country_code', 'asn_date', 'asn_registry', 'asn_description'}.intersection(json_data)\n    output = ''\n    for key in keys:\n        output += generate_output(line='0', short=(HR_ASN[key]['_short'] if hr else key), name=(HR_ASN[key]['_name'] if (hr and show_name) else None), value=(json_data[key] if ((json_data[key] is not None) and (len(json_data[key]) > 0) and (json_data[key] != 'NA')) else 'None'), colorize=colorize)\n    return output", "docstring": "The function for generating CLI output ASN results.\n\nArgs:\njson_data (:obj:`dict`): The data to process. Defaults to None.\nhr (:obj:`bool`): Enable human readable key translations. Defaults\nto True.\nshow_name (:obj:`bool`): Show human readable name (default is to\nonly show short). Defaults to False.\ncolorize (:obj:`bool`): Colorize the console output with ANSI\ncolors. Defaults to True.\n\nReturns:\nstr: The generated output.", "source": "codesearchnet"}
{"code": "def __init__(self, library, options=None):\n    if platform.python_implementation() != 'CPython':\n        raise RuntimeError('Delegates are currently only supported into CPythondue to missing immediate reference counting.')\n    self._library = ctypes.pydll.LoadLibrary(library)\n    self._library.tflite_plugin_create_delegate.argtypes = [ctypes.POINTER(ctypes.c_char_p), ctypes.POINTER(ctypes.c_char_p), ctypes.c_int, ctypes.CFUNCTYPE(None, ctypes.c_char_p)]\n    self._library.tflite_plugin_create_delegate.restype = ctypes.c_void_p\n    options = options or {}\n    options_keys = (ctypes.c_char_p * len(options))()\n    options_values = (ctypes.c_char_p * len(options))()\n    for idx, (key, value) in enumerate(options.items()):\n        options_keys[idx] = str(key).encode('utf-8')\n        options_values[idx] = str(value).encode('utf-8')\n\n    class ErrorMessageCapture:\n\n        def __init__(self):\n            self.message = ''\n\n        def report(self, x):\n            self.message += x if isinstance(x, str) else x.decode('utf-8')\n    capture = ErrorMessageCapture()\n    error_capturer_cb = ctypes.CFUNCTYPE(None, ctypes.c_char_p)(capture.report)\n    self._delegate_ptr = self._library.tflite_plugin_create_delegate(options_keys, options_values, len(options), error_capturer_cb)\n    if self._delegate_ptr is None:\n        raise ValueError(capture.message)", "docstring": "Loads delegate from the shared library.\n\nArgs:\nlibrary: Shared library name.\noptions: Dictionary of options that are required to load the delegate. All\nkeys and values in the dictionary should be serializable. Consult the\ndocumentation of the specific delegate for required and legal options.\n(default None)\n\nRaises:\nRuntimeError: This is raised if the Python implementation is not CPython.", "source": "github-repos"}
{"code": "def _GetPropertyValue(self, parser_mediator, properties, property_name):\n    \n    property_value = properties.get(property_name, None)\n    if isinstance(property_value, py2to3.BYTES_TYPE):\n      try:\n        \n        property_value = property_value.decode('utf-8')\n      except UnicodeDecodeError:\n        parser_mediator.ProduceExtractionWarning(\n            'unable to decode property: {0:s}'.format(property_name))\n\n    return property_value", "docstring": "Retrieves a property value.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nproperties (dict[str, object]): properties.\nproperty_name (str): name of the property.\n\nReturns:\nstr: property value.", "source": "juraj-google-style"}
{"code": "def comment(data, what):\n    \n    data = data.splitlines()\n\n    data = map(\n        lambda x: \"\n        data\n    )\n\n    return \"\\n\".join(data)", "docstring": "Comments line containing `what` in string `data`.\n\nArgs:\ndata (str): Configuration file in string.\nwhat (str): Line which will be commented out.\n\nReturns:\nstr: Configuration file with commented `what`.", "source": "juraj-google-style"}
{"code": "def scatter_div(self, sparse_delta, use_locking=False, name=None):\n    raise NotImplementedError", "docstring": "Divide this variable by `tf.IndexedSlices`.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to divide this variable by.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nThe updated variable.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"}
{"code": "def _post_process(self, feed_item, new_item):\n    for third_party_url in feed_item.get('third_party_urls', []):\n        third_party_url[FieldMap.CREATIVE_ID] = new_item['id']\n        third_party_url[FieldMap.CREATIVE_NAME] = new_item['name']\n    for association in feed_item.get('associations', []):\n        association[FieldMap.CREATIVE_ID] = self.get(association)['id']\n        association[FieldMap.CREATIVE_NAME] = self.get(association)['name']\n        dcm_association = self.creative_asset_dao.get(association, required=True)\n        if dcm_association:\n            association[FieldMap.CREATIVE_ASSET_ID] = dcm_association.get('id', None)\n            association[FieldMap.CREATIVE_ASSET_NAME] = dcm_association.get('name', None)\n            backup_lp = self.landing_page_dao.get(feed_item, column_name=FieldMap.BACKUP_IMAGE_CLICK_THROUGH_LANDING_PAGE_ID)\n            if backup_lp:\n                association[FieldMap.BACKUP_IMAGE_CLICK_THROUGH_LANDING_PAGE_ID] = backup_lp['id']\n                association[FieldMap.BACKUP_IMAGE_CLICK_THROUGH_LANDING_PAGE_NAME] = backup_lp['name']\n            backup_asset = self.creative_asset_dao.get(association, column_name=FieldMap.CREATIVE_BACKUP_ASSET_ID)\n            if backup_asset:\n                association[FieldMap.CREATIVE_BACKUP_ASSET_ID] = backup_asset['id']\n    for click_tag in feed_item.get('click_tags', []):\n        click_tag[FieldMap.CREATIVE_ID] = new_item['id']\n        click_tag[FieldMap.CREATIVE_NAME] = new_item['name']\n        click_tag_lp = self.landing_page_dao.get(click_tag, column_name=FieldMap.CLICK_TAG_LANDING_PAGE_ID)\n        if click_tag_lp:\n            click_tag[FieldMap.CLICK_TAG_LANDING_PAGE_ID] = click_tag_lp['id']\n            click_tag[FieldMap.CLICK_TAG_LANDING_PAGE_NAME] = click_tag_lp['name']\n    backup_asset = self.creative_asset_dao.get(feed_item, column_name=FieldMap.CREATIVE_BACKUP_ASSET_ID)\n    if backup_asset:\n        feed_item[FieldMap.CREATIVE_BACKUP_ASSET_ID] = backup_asset['id']\n    backup_lp = self.landing_page_dao.get(feed_item, column_name=FieldMap.BACKUP_IMAGE_CLICK_THROUGH_LANDING_PAGE_ID)\n    if backup_lp:\n        feed_item[FieldMap.BACKUP_IMAGE_CLICK_THROUGH_LANDING_PAGE_ID] = backup_lp['id']\n        feed_item[FieldMap.BACKUP_IMAGE_CLICK_THROUGH_LANDING_PAGE_NAME] = backup_lp['name']", "docstring": "Maps ids and names of related entities so they can be updated in the Bulkdozer feed.\n\nWhen Bulkdozer is done processing an item, it writes back the updated names\nand ids of related objects, this method makes sure those are updated in the\ncreative feed.\n\nArgs:\nfeed_item: Feed item representing the creative from the Bulkdozer feed.\nitem: The DCM creative being updated or created.", "source": "github-repos"}
{"code": "def convert_into_by_batch(input_dir, output_format='csv', java_options=None, **kwargs):\n    \n\n    if input_dir is None or not os.path.isdir(input_dir):\n        raise AttributeError(\"'input_dir' shoud be directory path\")\n\n    kwargs['format'] = _extract_format_for_conversion(output_format)\n\n    if java_options is None:\n        java_options = []\n\n    elif isinstance(java_options, str):\n        java_options = shlex.split(java_options)\n\n    \n    kwargs['batch'] = input_dir\n\n    _run(java_options, kwargs)", "docstring": "Convert tables from PDFs in a directory.\n\nArgs:\ninput_dir (str):\nDirectory path.\noutput_format (str, optional):\nOutput format of this function (csv, json or tsv)\njava_options (list, optional):\nSet java options like `-Xmx256m`.\nkwargs (dict):\nDictionary of option for tabula-java. Details are shown in `build_options()`\n\nReturns:\nNothing. Outputs are saved into the same directory with `input_dir`", "source": "juraj-google-style"}
{"code": "def optimize_with_repeates(self, fast=None, verbose=None, n_times=10, lambd=None, lambd_g=None, lambd_n=None):\n    verbose = dlimix.getVerbose(verbose)\n    if (not self.init):\n        self._initGP(fast)\n    opt_list = []\n    fixed0 = sp.zeros_like(self.gp.getParams()['dataTerm'])\n    for i in range(n_times):\n        scales1 = self._getScalesRand()\n        fixed1 = (0.1 * sp.randn(fixed0.shape[0], fixed0.shape[1]))\n        conv = self.trainGP(fast=fast, scales0=scales1, fixed0=fixed1, lambd=lambd, lambd_g=lambd_g, lambd_n=lambd_n)\n        if conv:\n            temp = 1\n            for j in range(len(opt_list)):\n                if sp.allclose(abs(self.getScales()), abs(opt_list[j]['scales'])):\n                    temp = 0\n                    opt_list[j]['counter'] += 1\n                    break\n            if (temp == 1):\n                opt = {}\n                opt['counter'] = 1\n                opt['LML'] = self.getLML()\n                opt['scales'] = self.getScales()\n                opt_list.append(opt)\n    LML = sp.array([opt_list[i]['LML'] for i in range(len(opt_list))])\n    index = LML.argsort()[::(- 1)]\n    out = []\n    if verbose:\n        print('\\nLocal mimima\\n')\n        print('n_times\\t\\tLML')\n        print('------------------------------------')\n    for i in range(len(opt_list)):\n        out.append(opt_list[index[i]])\n        if verbose:\n            print(('%d\\t\\t%f' % (opt_list[index[i]]['counter'], opt_list[index[i]]['LML'])))\n            print('')\n    return out", "docstring": "Train the model repeadly up to a number specified by the users with random restarts and\nreturn a list of all relative minima that have been found. This list is sorted according to\nleast likelihood. Each list term is a dictionary with keys \"counter\", \"LML\", and \"scales\".\n\nAfter running this function, the vc object will be set at the last iteration. Thus, if you\nwish to get the vc object of one of the repeats, then set the scales. For example:\n\nvc.setScales(scales=optimize_with_repeates_output[0][\"scales\"])\n\nArgs:\nfast:       Boolean. if set to True initalize kronSumGP\nverbose:    Boolean. If set to True, verbose output is produced. (default True)\nn_times:    number of re-starts of the optimization. (default 10)", "source": "codesearchnet"}
{"code": "def error(self, error_msg):\n    if (self.logger is not None):\n        self.logger.error(error_msg)\n    if (self.exc is not None):\n        raise self.exc(error_msg)", "docstring": "Outputs error message on own logger. Also raises exceptions if need be.\n\nArgs:\nerror_msg: message to output", "source": "codesearchnet"}
{"code": "def _name_search(cls, method, filters):\n    filters = cls._get_name_filters(filters)\n    return [cls.deserialize(cls._zeep_to_dict(row)) for row in method(filters)]", "docstring": "Helper for search methods that use name filters.\n\nArgs:\nmethod (callable): The Five9 API method to call with the name\nfilters.\nfilters (dict): A dictionary of search parameters, keyed by the\nname of the field to search. This should conform to the\nschema defined in :func:`five9.Five9.create_criteria`.\n\nReturns:\nlist[BaseModel]: A list of records representing the result.", "source": "codesearchnet"}
{"code": "def Svn(url, fname, to=None):\n    if (to is None):\n        to = str(CFG['tmp_dir'])\n    src_dir = (local.path(to) / fname)\n    if (not source_required(src_dir)):\n        Copy(src_dir, '.')\n        return\n    from benchbuild.utils.cmd import svn\n    svn('co', url, src_dir)\n    update_hash(src_dir)\n    Copy(src_dir, '.')", "docstring": "Checkout the SVN repo.\n\nArgs:\nurl (str): The SVN SOURCE repo.\nfname (str): The name of the repo on disk.\nto (str): The name of the TARGET folder on disk.\nDefaults to ``CFG[\"tmpdir\"]``", "source": "codesearchnet"}
{"code": "def generate_branches(scales=None, angles=None, shift_angle=0):\n    branches = []\n    for (pos, scale) in enumerate(scales):\n        angle = ((((- sum(angles)) / 2) + sum(angles[:pos])) + shift_angle)\n        branches.append([scale, angle])\n    return branches", "docstring": "Generates branches with alternative system.\n\nArgs:\nscales (tuple/array): Indicating how the branch/es length/es develop/s from age to age.\nangles (tuple/array): Holding the branch and shift angle in radians.\nshift_angle (float): Holding the rotation angle for all branches.\n\nReturns:\nbranches (2d-array): A array constits of arrays holding scale and angle for every branch.", "source": "codesearchnet"}
{"code": "def from_raw(self, raw: RawScalar) -> Optional[ScalarValue]:\n        \n        if isinstance(raw, str):\n            return raw", "docstring": "Return a cooked value of the receiver type.\n\nArgs:\nraw: Raw value obtained from JSON parser.", "source": "juraj-google-style"}
{"code": "def GetMessages(self, soft_size_limit=None):\n    \n    with self._lock:\n      ret = rdf_flows.MessageList()\n      ret_size = 0\n      for message in self._Generate():\n        self._total_size -= len(message)\n        ret.job.append(rdf_flows.GrrMessage.FromSerializedString(message))\n        ret_size += len(message)\n        if soft_size_limit is not None and ret_size > soft_size_limit:\n          break\n\n      return ret", "docstring": "Retrieves and removes the messages from the queue.\n\nArgs:\nsoft_size_limit: int If there is more data in the queue than\nsoft_size_limit bytes, the returned list of messages will be\napproximately this large. If None (default), returns all messages\ncurrently on the queue.\n\nReturns:\nrdf_flows.MessageList A list of messages that were .Put on the queue\nearlier.", "source": "juraj-google-style"}
{"code": "def SampleTaskStatus(self, task, status):\n    if self._tasks_profiler:\n        self._tasks_profiler.Sample(task, status)", "docstring": "Takes a sample of the status of the task for profiling.\n\nArgs:\ntask (Task): a task.\nstatus (str): status.", "source": "codesearchnet"}
{"code": "def __call__(self, shape, dtype=None, **kwargs):\n    _validate_kwargs(self.__class__.__name__, kwargs)\n    dtype = _assert_float_dtype(_get_dtype(dtype))\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    return self._random_generator.truncated_normal(shape, self.mean, self.stddev, dtype)", "docstring": "Returns a tensor object initialized to random normal values (truncated).\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor. Only floating point types are\nsupported. If not specified, `tf.keras.backend.floatx()` is used, which\ndefault to `float32` unless you configured it otherwise (via\n`tf.keras.backend.set_floatx(float_dtype)`)\n**kwargs: Additional keyword arguments.", "source": "github-repos"}
{"code": "def emboss_pepstats_parser(infile):\n    \n    with open(infile) as f:\n        lines = f.read().split('\\n')\n\n    info_dict = {}\n\n    for l in lines[38:47]:\n        info = l.split('\\t')\n        cleaninfo = list(filter(lambda x: x != '', info))\n        prop = cleaninfo[0]\n        num = cleaninfo[2]\n        percent = float(cleaninfo[-1]) / float(100)\n\n        info_dict['mol_percent_' + prop.lower() + '-pepstats'] = percent\n\n    return info_dict", "docstring": "Get dictionary of pepstats results.\n\nArgs:\ninfile: Path to pepstats outfile\n\nReturns:\ndict: Parsed information from pepstats\n\nTODO:\nOnly currently parsing the bottom of the file for percentages of properties.", "source": "juraj-google-style"}
{"code": "def __init__(self, timestamp=None):\n    \n    super(Filetime, self).__init__()\n    self._precision = definitions.PRECISION_100_NANOSECONDS\n    self._timestamp = timestamp", "docstring": "Initializes a FILETIME timestamp.\n\nArgs:\ntimestamp (Optional[int]): FILETIME timestamp.", "source": "juraj-google-style"}
{"code": "def set_domain_id(self, value=None, default=False, disable=False):\n    return self._configure_mlag('domain-id', value, default, disable)", "docstring": "Configures the mlag domain-id value\n\nArgs:\nvalue (str): The value to configure the domain-id\ndefault (bool): Configures the domain-id using the default keyword\ndisable (bool): Negates the domain-id using the no keyword\n\nReturns:\nbool: Returns True if the commands complete successfully", "source": "codesearchnet"}
{"code": "def reset_for_retry(self, output_writer):\n    \n    self.input_reader = self.initial_input_reader\n    self.slice_id = 0\n    self.retries += 1\n    self.output_writer = output_writer\n    self.handler = self.mapreduce_spec.mapper.handler", "docstring": "Reset self for shard retry.\n\nArgs:\noutput_writer: new output writer that contains new output files.", "source": "juraj-google-style"}
{"code": "def plot_term_kdes(self, words, **kwargs):\n    stem = PorterStemmer().stem\n    for word in words:\n        kde = self.kde(stem(word), **kwargs)\n        plt.plot(kde)\n    plt.show()", "docstring": "Plot kernel density estimates for multiple words.\n\nArgs:\nwords (list): A list of unstemmed terms.", "source": "codesearchnet"}
{"code": "def call_fn(fn: TransitionOperator, args: Union[Tuple[Any], Any]) -> Any:\n  \n\n  if isinstance(args, (list, tuple)) and not mcmc_util.is_namedtuple_like(args):\n    args = args  \n    return fn(*args)\n  else:\n    return fn(args)", "docstring": "Calls a transition operator with args, unpacking args if its a sequence.\n\nArgs:\nfn: A `TransitionOperator`.\nargs: Arguments to `fn`\n\nReturns:\nret: Return value of `fn`.", "source": "juraj-google-style"}
{"code": "def reconnect(self):\n        \n        if self._auth_method is \"userpass\":\n            self._mgr = manager.connect(host=self._conn[0],\n                                        port=self._conn[1],\n                                        username=self._auth[0],\n                                        password=self._auth[1],\n                                        hostkey_verify=self._hostkey_verify)\n        elif self._auth_method is \"key\":\n            self._mgr = manager.connect(host=self._conn[0],\n                                        port=self._conn[1],\n                                        username=self._auth[0],\n                                        key_filename=self._auth_key,\n                                        hostkey_verify=self._hostkey_verify)\n        else:\n            raise ValueError(\"auth_method incorrect value.\")\n        self._mgr.timeout = 600\n\n        return True", "docstring": "Reconnect session with device.\n\nArgs:\nNone\n\nReturns:\nbool: True if reconnect succeeds, False if not.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def update_submit_s3_uri(estimator, job_name):\n    if (estimator.uploaded_code is None):\n        return\n    pattern = '(?<=/)[^/]+?(?=/source/sourcedir.tar.gz)'\n    submit_uri = estimator.uploaded_code.s3_prefix\n    submit_uri = re.sub(pattern, job_name, submit_uri)\n    script_name = estimator.uploaded_code.script_name\n    estimator.uploaded_code = fw_utils.UploadedCode(submit_uri, script_name)", "docstring": "Updated the S3 URI of the framework source directory in given estimator.\n\nArgs:\nestimator (sagemaker.estimator.Framework): The Framework estimator to update.\njob_name (str): The new job name included in the submit S3 URI\n\nReturns:\nstr: The updated S3 URI of framework source directory", "source": "codesearchnet"}
{"code": "def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    encoder_hidden_states = encoder_outputs[0]\n    if encoder_attention_mask is None:\n        batch_size, sequence_length = encoder_hidden_states.shape[:2]\n        encoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    batch_size, sequence_length = decoder_input_ids.shape\n    if decoder_attention_mask is None:\n        decoder_attention_mask = jnp.ones((batch_size, sequence_length))\n    if decoder_position_ids is None:\n        if past_key_values is not None:\n            raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.')\n        decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n    inputs = {'params': params or self.params}\n    if past_key_values:\n        inputs['cache'] = past_key_values\n        mutable = ['cache']\n    else:\n        mutable = False\n\n    def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):\n        decoder_module = module._get_decoder_module()\n        return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)\n    outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)\n    if past_key_values is not None and return_dict:\n        outputs, past = outputs\n        outputs['past_key_values'] = unfreeze(past['cache'])\n        return outputs\n    elif past_key_values is not None and (not return_dict):\n        outputs, past = outputs\n        outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration\n\n>>> model = FlaxMBartForConditionalGeneration.from_pretrained(\"facebook/mbart-large-cc25\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/mbart-large-cc25\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, max_length=1024, return_tensors=\"jax\")\n>>> encoder_outputs = model.encode(**inputs)\n\n>>> decoder_start_token_id = model.config.decoder_start_token_id\n>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype=\"i4\") * decoder_start_token_id\n\n>>> outputs = model.decode(decoder_input_ids, encoder_outputs)\n>>> last_decoder_hidden_states = outputs.last_hidden_state\n```", "source": "github-repos"}
{"code": "def norm(values, min=None, max=None):\n    \n    min = np.min(values) if min is None else min\n    max = np.max(values) if max is None else max\n    return (values - min) / (max-min)", "docstring": "Unity-based normalization to scale data into 0-1 range.\n\n(values - min) / (max - min)\n\nArgs:\nvalues: Array of values to be normalized\nmin (float, optional): Lower bound of normalization range\nmax (float, optional): Upper bound of normalization range\n\nReturns:\nArray of normalized values", "source": "juraj-google-style"}
{"code": "def CheckCheck(filename, clean_lines, linenum, error):\n    lines = clean_lines.elided\n    (check_macro, start_pos) = FindCheckMacro(lines[linenum])\n    if (not check_macro):\n        return\n    (last_line, end_line, end_pos) = CloseExpression(clean_lines, linenum, start_pos)\n    if (end_pos < 0):\n        return\n    if (not Match('\\\\s*;', last_line[end_pos:])):\n        return\n    if (linenum == end_line):\n        expression = lines[linenum][(start_pos + 1):(end_pos - 1)]\n    else:\n        expression = lines[linenum][(start_pos + 1):]\n        for i in xrange((linenum + 1), end_line):\n            expression += lines[i]\n        expression += last_line[0:(end_pos - 1)]\n    lhs = ''\n    rhs = ''\n    operator = None\n    while expression:\n        matched = Match('^\\\\s*(<<|<<=|>>|>>=|->\\\\*|->|&&|\\\\|\\\\||==|!=|>=|>|<=|<|\\\\()(.*)$', expression)\n        if matched:\n            token = matched.group(1)\n            if (token == '('):\n                expression = matched.group(2)\n                (end, _) = FindEndOfExpressionInLine(expression, 0, ['('])\n                if (end < 0):\n                    return\n                lhs += ('(' + expression[0:end])\n                expression = expression[end:]\n            elif (token in ('&&', '||')):\n                return\n            elif (token in ('<<', '<<=', '>>', '>>=', '->*', '->')):\n                lhs += token\n                expression = matched.group(2)\n            else:\n                operator = token\n                rhs = matched.group(2)\n                break\n        else:\n            matched = Match('^([^-=!<>()&|]+)(.*)$', expression)\n            if (not matched):\n                matched = Match('^(\\\\s*\\\\S)(.*)$', expression)\n                if (not matched):\n                    break\n            lhs += matched.group(1)\n            expression = matched.group(2)\n    if (not (lhs and operator and rhs)):\n        return\n    if ((rhs.find('&&') > (- 1)) or (rhs.find('||') > (- 1))):\n        return\n    lhs = lhs.strip()\n    rhs = rhs.strip()\n    match_constant = '^([-+]?(\\\\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|\".*\"|\\\\\\'.*\\\\\\')$'\n    if (Match(match_constant, lhs) or Match(match_constant, rhs)):\n        error(filename, linenum, 'readability/check', 2, ('Consider using %s instead of %s(a %s b)' % (_CHECK_REPLACEMENT[check_macro][operator], check_macro, operator)))", "docstring": "Checks the use of CHECK and EXPECT macros.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def predict_on_batch(self, data: Union[(list, tuple)], return_indexes: bool=False) -> List[List[str]]:\n    X = self._transform_batch(data)\n    (objects_number, lengths) = (len(X[0]), [len(elem) for elem in data[0]])\n    Y = self.model_.predict_on_batch(X)\n    labels = np.argmax(Y, axis=(- 1))\n    answer: List[List[str]] = ([None] * objects_number)\n    for (i, (elem, length)) in enumerate(zip(labels, lengths)):\n        elem = elem[:length]\n        answer[i] = (elem if return_indexes else self.tags.idxs2toks(elem))\n    return answer", "docstring": "Makes predictions on a single batch\n\nArgs:\ndata: a batch of word sequences together with additional inputs\nreturn_indexes: whether to return tag indexes in vocabulary or tags themselves\n\nReturns:\na batch of label sequences", "source": "codesearchnet"}
{"code": "def register_menu_item(self, items):\n    for itm in items:\n        if (itm.group in self.menu_items):\n            if (itm not in self.menu_items[itm.group]['items']):\n                self.menu_items[itm.group]['items'].append(itm)\n        else:\n            logger.warning('Tried registering menu item to unknown group {}'.format(itm.group))", "docstring": "Registers a views menu items into the metadata for the application. Skip if the item is already present\n\nArgs:\nitems (`list` of `MenuItem`): A list of `MenuItem`s\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def fill_empty_rows(ragged_input, default_value, name=None):\n    with ops.name_scope(name, 'RaggedFillEmptyRows', [ragged_input]):\n        if not isinstance(ragged_input, ragged_tensor.RaggedTensor):\n            raise TypeError(f'ragged_input must be RaggedTensor,             got {type(ragged_input)}')\n        default_value = ops.convert_to_tensor(default_value, dtype=ragged_input.dtype)\n        output_value_rowids, output_values, empty_row_indicator, unused_reverse_index_map = gen_ragged_array_ops.ragged_fill_empty_rows(value_rowids=ragged_input.value_rowids(), values=ragged_input.values, nrows=ragged_input.nrows(), default_value=default_value)\n        return (ragged_tensor.RaggedTensor.from_value_rowids(values=output_values, value_rowids=output_value_rowids, validate=False), empty_row_indicator)", "docstring": "Fills empty rows in the input `RaggedTensor` with rank 2 with a default\n\nvalue.\n\nThis op adds entries with the specified `default_value` for any row in the\ninput that does not already have a value.\n\nThe op also returns an indicator vector such that\n\nempty_row_indicator[i] = True iff row i was an empty row.\n\nArgs:\nragged_input: A `RaggedTensor` with rank 2.\ndefault_value: The value to fill for empty rows, with the same type as\n`ragged_input.`\nname: A name prefix for the returned tensors (optional)\n\nReturns:\nragged_ordered_output: A `RaggedTensor`with all empty rows filled in with\n`default_value`.\nempty_row_indicator: A bool vector indicating whether each input row was\nempty.\n\nRaises:\nTypeError: If `ragged_input` is not a `RaggedTensor`.", "source": "github-repos"}
{"code": "def add_file(self, path, compress):\n        \n        if not os.path.isfile(path):\n            raise ValueError('{} is not a file'.format(path))\n        self.fileobj.seek(self.last_offset)\n\n        with open(path, 'rb') as f:\n            flags = os.stat(path).st_mode & 0o777\n            self.add_fileobj(f, path, compress, flags)", "docstring": "Add a single file to the MAR file.\n\nArgs:\npath (str): path to a file to add to this MAR file.\ncompress (str): One of 'xz', 'bz2', or None. Defaults to None.", "source": "juraj-google-style"}
{"code": "def get_concatenated_pdf_from_disk(filenames: Iterable[str],\n                                   start_recto: bool = True) -> bytes:\n    \n    \n    \n    if start_recto:\n        writer = PdfFileWriter()\n        for filename in filenames:\n            if filename:\n                if writer.getNumPages() % 2 != 0:\n                    writer.addBlankPage()\n                writer.appendPagesFromReader(\n                    PdfFileReader(open(filename, 'rb')))\n        return pdf_from_writer(writer)\n    else:\n        merger = PdfFileMerger()\n        for filename in filenames:\n            if filename:\n                merger.append(open(filename, 'rb'))\n        return pdf_from_writer(merger)", "docstring": "Concatenates PDFs from disk and returns them as an in-memory binary PDF.\n\nArgs:\nfilenames: iterable of filenames of PDFs to concatenate\nstart_recto: start a new right-hand page for each new PDF?\n\nReturns:\nconcatenated PDF, as ``bytes``", "source": "juraj-google-style"}
{"code": "def merge_and_fit(self, track, pairings):\n    for (self_seg_index, track_seg_index, _) in pairings:\n        self_s = self.segments[self_seg_index]\n        ss_start = self_s.points[0]\n        track_s = track.segments[track_seg_index]\n        tt_start = track_s.points[0]\n        tt_end = track_s.points[(- 1)]\n        d_start = ss_start.distance(tt_start)\n        d_end = ss_start.distance(tt_end)\n        if (d_start > d_end):\n            track_s = track_s.copy()\n            track_s.points = list(reversed(track_s.points))\n        self_s.merge_and_fit(track_s)\n    return self", "docstring": "Merges another track with this one, ordering the points based on a\ndistance heuristic\n\nArgs:\ntrack (:obj:`Track`): Track to merge with\npairings\nReturns:\n:obj:`Segment`: self", "source": "codesearchnet"}
{"code": "def _add_results(self, results, trial_id):\n        \n        for result in results:\n            self.logger.debug(\"Appending result: %s\" % result)\n            result[\"trial_id\"] = trial_id\n            result_record = ResultRecord.from_json(result)\n            result_record.save()", "docstring": "Add a list of results into db.\n\nArgs:\nresults (list): A list of json results.\ntrial_id (str): Id of the trial.", "source": "juraj-google-style"}
{"code": "def reply_all(self, reply_comment):\n    payload = (('{ \"Comment\": \"' + reply_comment) + '\"}')\n    endpoint = 'https:\n    self._make_api_call('post', endpoint, data=payload)", "docstring": "Replies to everyone on the email, including those on the CC line.\n\nWith great power, comes great responsibility.\n\nArgs:\nreply_comment: The string comment to send to everyone on the email.", "source": "codesearchnet"}
{"code": "def plan(description, stack_action, context, tail=None, reverse=False):\n\n    def target_fn(*args, **kwargs):\n        return COMPLETE\n    steps = [Step(stack, fn=stack_action, watch_func=tail) for stack in context.get_stacks()]\n    steps += [Step(target, fn=target_fn) for target in context.get_targets()]\n    graph = build_graph(steps)\n    return build_plan(description=description, graph=graph, targets=context.stack_names, reverse=reverse)", "docstring": "A simple helper that builds a graph based plan from a set of stacks.\n\nArgs:\ndescription (str): a description of the plan.\naction (func): a function to call for each stack.\ncontext (:class:`stacker.context.Context`): a\n:class:`stacker.context.Context` to build the plan from.\ntail (func): an optional function to call to tail the stack progress.\nreverse (bool): if True, execute the graph in reverse (useful for\ndestroy actions).\n\nReturns:\n:class:`plan.Plan`: The resulting plan object", "source": "codesearchnet"}
{"code": "def clean_up_tokenization(out_string: str) -> str:\n    out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',').replace(\" ' \", \"'\").replace(\" n't\", \"n't\").replace(\" 'm\", \"'m\").replace(\" 's\", \"'s\").replace(\" 've\", \"'ve\").replace(\" 're\", \"'re\")\n    return out_string", "docstring": "Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.\n\nArgs:\nout_string (`str`): The text to clean up.\n\nReturns:\n`str`: The cleaned-up string.", "source": "github-repos"}
{"code": "def get(self, resource_id=None, resource_action=None, resource_cls=None, single_resource=False):\n    endpoint = self.endpoint\n    if (not resource_cls):\n        resource_cls = self._cls\n    if resource_id:\n        endpoint = self._build_url(endpoint, resource_id)\n    if resource_action:\n        endpoint = self._build_url(endpoint, resource_action)\n    response = self.api.execute('GET', endpoint)\n    if (not response.ok):\n        raise Error.parse(response.json())\n    if (resource_id or single_resource):\n        return resource_cls.parse(response.json())\n    return [resource_cls.parse(resource) for resource in response.json()]", "docstring": "Gets the details for one or more resources by ID\n\nArgs:\ncls - gophish.models.Model - The resource class\nresource_id - str - The endpoint (URL path) for the resource\nresource_action - str - An action to perform on the resource\nresource_cls - cls - A class to use for parsing, if different than\nthe base resource\nsingle_resource - bool - An override to tell Gophish that even\nthough we aren't requesting a single resource, we expect a\nsingle response object\n\nReturns:\nOne or more instances of cls parsed from the returned JSON", "source": "codesearchnet"}
{"code": "def getAll(self, event_name):\n        \n        raw_events = self._event_client.eventGetAll(self._id, event_name)\n        return [snippet_event.from_dict(msg) for msg in raw_events]", "docstring": "Gets all the events of a certain name that have been received so\nfar. This is a non-blocking call.\n\nArgs:\ncallback_id: The id of the callback.\nevent_name: string, the name of the event to get.\n\nReturns:\nA list of SnippetEvent, each representing an event from the Java\nside.", "source": "juraj-google-style"}
{"code": "def kill(self, exit_code: Any = None):\n        \n        self._force_kill.set()\n        if exit_code is not None:\n            self._exit_code = exit_code\n        logger.info(\"Killing behavior {0} with exit code: {1}\".format(self, exit_code))", "docstring": "Stops the behaviour\n\nArgs:\nexit_code (object, optional): the exit code of the behaviour (Default value = None)", "source": "juraj-google-style"}
{"code": "def __getitem__(self, k):\n        \n        chain = ChainMap(self.scopes, self.globals)\n        return chain.__getitem__(k)", "docstring": "Look up a variable.\n\nArgs:\nk (str): The name of the variable to look up.\n\nReturns:\nLispVal: The value assigned to the variable.\n\nRaises:\nKeyError: If the variable has not been assigned to.", "source": "juraj-google-style"}
{"code": "def load(cls, campaign_dir):\n    if (not Path(campaign_dir).is_absolute()):\n        raise ValueError('Path is not absolute')\n    if (not Path(campaign_dir).exists()):\n        raise ValueError('Directory does not exist')\n    filename = ('%s.json' % os.path.split(campaign_dir)[1])\n    filepath = os.path.join(campaign_dir, filename)\n    try:\n        tinydb = TinyDB(filepath)\n        assert (set(tinydb.table('config').all()[0].keys()) == set(['script', 'params', 'commit']))\n    except:\n        os.remove(filepath)\n        raise ValueError('Specified campaign directory seems corrupt')\n    return cls(tinydb, campaign_dir)", "docstring": "Initialize from an existing database.\n\nIt is assumed that the database json file has the same name as its\ncontaining folder.\n\nArgs:\ncampaign_dir (str): The path to the campaign directory.", "source": "codesearchnet"}
{"code": "def objects_get(self, bucket, key, projection='noAcl'):\n    \n    args = {}\n    if projection is not None:\n      args['projection'] = projection\n\n    url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key)))\n    return datalab.utils.Http.request(url, args=args, credentials=self._credentials)", "docstring": "Issues a request to retrieve information about an object.\n\nArgs:\nbucket: the name of the bucket.\nkey: the key of the object within the bucket.\nprojection: the projection of the object to retrieve.\nReturns:\nA parsed object information dictionary.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"}
{"code": "def __call__(self, data):\n        \n        \n        if _is_mutable_sequence_like(data) and len(data) > 0 and _is_sequence_like(data[0]):\n            return '\\n'.join([_CsvSerializer._serialize_row(row) for row in data])\n        return _CsvSerializer._serialize_row(data)", "docstring": "Take data of various data formats and serialize them into CSV.\n\nArgs:\ndata (object): Data to be serialized.\n\nReturns:\nobject: Sequence of bytes to be used for the request body.", "source": "juraj-google-style"}
{"code": "def expression(self, rbp=0):\n    prev_token = self.consume()\n    left = prev_token.nud(context=self)\n    while (rbp < self.current_token.lbp):\n        prev_token = self.consume()\n        left = prev_token.led(left, context=self)\n    return left", "docstring": "Extract an expression from the flow of tokens.\n\nArgs:\nrbp (int): the \"right binding power\" of the previous token.\nThis represents the (right) precedence of the previous token,\nand will be compared to the (left) precedence of next tokens.\n\nReturns:\nWhatever the led/nud functions of tokens returned.", "source": "codesearchnet"}
{"code": "def _el_orb_tuple(string):\n    \n    el_orbs = []\n    for split in string.split(','):\n        splits = split.split('.')\n        el = splits[0]\n        if len(splits) == 1:\n            el_orbs.append(el)\n        else:\n            el_orbs.append((el, tuple(splits[1:])))\n    return el_orbs", "docstring": "Parse the element and orbital argument strings.\n\nThe presence of an element without any orbitals means that we want to plot\nall of its orbitals.\n\nArgs:\nstring (`str`): The selected elements and orbitals in in the form:\n`\"Sn.s.p,O\"`.\n\nReturns:\nA list of tuples specifying which elements/orbitals to plot. The output\nfor the above example would be:\n\n`[('Sn', ('s', 'p')), 'O']`", "source": "juraj-google-style"}
{"code": "def mapped_repr(obj: Any, attributes: List[Tuple[str, str]],\n                with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:\n    \n    elements = [\"{}={}\".format(init_param_name, repr(getattr(obj, attr_name)))\n                for attr_name, init_param_name in attributes]\n    return repr_result(obj, elements, with_addr=with_addr, joiner=joiner)", "docstring": "Convenience function for :func:`__repr__`.\nTakes attribute names and corresponding initialization parameter names\n(parameters to :func:`__init__`).\n\nArgs:\nobj: object to display\nattributes: list of tuples, each ``(attr_name, init_param_name)``.\nwith_addr: include the memory address of ``obj``\njoiner: string with which to join the elements\n\nReturns:\nstring: :func:`repr`-style representation", "source": "juraj-google-style"}
{"code": "def in_array_list(array_list, a, tol=1e-5):\n    \n    if len(array_list) == 0:\n        return False\n    axes = tuple(range(1, a.ndim + 1))\n    if not tol:\n        return np.any(np.all(np.equal(array_list, a[None, :]), axes))\n    else:\n        return np.any(np.sum(np.abs(array_list - a[None, :]), axes) < tol)", "docstring": "Extremely efficient nd-array comparison using numpy's broadcasting. This\nfunction checks if a particular array a, is present in a list of arrays.\nIt works for arrays of any size, e.g., even matrix searches.\n\nArgs:\narray_list ([array]): A list of arrays to compare to.\na (array): The test array for comparison.\ntol (float): The tolerance. Defaults to 1e-5. If 0, an exact match is\ndone.\n\nReturns:\n(bool)", "source": "juraj-google-style"}
{"code": "def fix_image_flip_shape(image, result):\n    image_shape = image.get_shape()\n    if image_shape == tensor_shape.unknown_shape():\n        result.set_shape([None, None, None])\n    else:\n        result.set_shape(image_shape)\n    return result", "docstring": "Set the shape to 3 dimensional if we don't know anything else.\n\nArgs:\nimage: original image size\nresult: flipped or transformed image\n\nReturns:\nAn image whose shape is at least (None, None, None).", "source": "github-repos"}
{"code": "def to_diff_dict(self) -> Dict[str, Any]:\n    config_dict = self.to_dict()\n    default_config_dict = CompressedTensorsConfig().to_dict()\n    serializable_config_dict = {}\n    for key, value in config_dict.items():\n        if key not in default_config_dict or value != default_config_dict[key]:\n            serializable_config_dict[key] = value\n    return serializable_config_dict", "docstring": "Removes all attributes from config which correspond to the default config attributes for better readability and\nserializes to a Python dictionary.\nReturns:\n`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,", "source": "github-repos"}
{"code": "def truepath(path, real=False):\n    path = expanduser(path)\n    path = expandvars(path)\n    if real:\n        path = realpath(path)\n    else:\n        path = abspath(path)\n    path = normpath(path)\n    return path", "docstring": "Normalizes a string representation of a path and does shell-like expansion.\n\nArgs:\npath (PathLike): string representation of a path\nreal (bool): if True, all symbolic links are followed. (default: False)\n\nReturns:\nPathLike : normalized path\n\nNote:\nThis function is similar to the composition of expanduser, expandvars,\nnormpath, and (realpath if `real` else abspath). However, on windows\nbackslashes are then replaced with forward slashes to offer a\nconsistent unix-like experience across platforms.\n\nOn windows expanduser will expand environment variables formatted as\n%name%, whereas on unix, this will not occur.\n\nCommandLine:\npython -m ubelt.util_path truepath\n\nExample:\n>>> import ubelt as ub\n>>> assert ub.truepath('~/foo') == join(ub.userhome(), 'foo')\n>>> assert ub.truepath('~/foo') == ub.truepath('~/foo/bar/..')\n>>> assert ub.truepath('~/foo', real=True) == ub.truepath('~/foo')", "source": "codesearchnet"}
{"code": "def port_get_tag(port):\n    \n    cmd = 'ovs-vsctl get port {0} tag'.format(port)\n    result = __salt__['cmd.run_all'](cmd)\n    retcode = result['retcode']\n    stdout = result['stdout']\n    return _stdout_list_split(retcode, stdout)", "docstring": "Lists tags of the port.\n\nArgs:\nport: A string - port name.\n\nReturns:\nList of tags (or empty list), False on failure.\n\n.. versionadded:: 2016.3.0\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.port_get_tag tap0", "source": "juraj-google-style"}
{"code": "def ParseEnum(field, value):\n    enum_descriptor = field.enum_type\n    try:\n        number = int(value, 0)\n    except ValueError:\n        enum_value = enum_descriptor.values_by_name.get(value, None)\n        if (enum_value is None):\n            raise ValueError(('Enum type \"%s\" has no value named %s.' % (enum_descriptor.full_name, value)))\n    else:\n        enum_value = enum_descriptor.values_by_number.get(number, None)\n        if (enum_value is None):\n            raise ValueError(('Enum type \"%s\" has no value with number %d.' % (enum_descriptor.full_name, number)))\n    return enum_value.number", "docstring": "Parse an enum value.\n\nThe value can be specified by a number (the enum value), or by\na string literal (the enum name).\n\nArgs:\nfield: Enum field descriptor.\nvalue: String value.\n\nReturns:\nEnum value number.\n\nRaises:\nValueError: If the enum value could not be parsed.", "source": "codesearchnet"}
{"code": "def _decorator(func):\n    opname = func.__name__\n    func.__doc__ = '\\n    Assert the condition `x {sym} y` holds element-wise.\\n\\n    This condition holds if for every pair of (possibly broadcast) elements\\n    `x[i]`, `y[i]`, we have `x[i] {sym} y[i]`.\\n    If both `x` and `y` are empty, this is trivially satisfied.\\n\\n    When running in graph mode, you should add a dependency on this operation\\n    to ensure that it runs. Example of adding a dependency to an operation:\\n\\n    ```python\\n    with tf.control_dependencies([tf.compat.v1.{opname}(x, y)]):\\n      output = tf.reduce_sum(x)\\n    ```\\n\\n    Args:\\n      x:  Numeric `Tensor`.\\n      y:  Numeric `Tensor`, same dtype as and broadcastable to `x`.\\n      data:  The tensors to print out if the condition is False.  Defaults to\\n        error message and first few entries of `x`, `y`.\\n      summarize: Print this many entries of each tensor.\\n      message: A string to prefix to the default message.\\n      name: A name for this operation (optional).  Defaults to \"{opname}\".\\n\\n    Returns:\\n      Op that raises `InvalidArgumentError` if `x {sym} y` is False.\\n\\n    Raises:\\n      InvalidArgumentError: if the check can be performed immediately and\\n        `x {sym} y` is False. The check can be performed immediately during\\n        eager execution or if `x` and `y` are statically known.\\n\\n    @compatibility(TF2)\\n    `tf.compat.v1.{opname}` is compatible with eager execution and\\n    `tf.function`.\\n    Please use `tf.debugging.{opname}` instead when migrating to TF2. Apart\\n    from `data`, all arguments are supported with the same argument name.\\n\\n    If you want to ensure the assert statements run before the\\n    potentially-invalid computation, please use `tf.control_dependencies`,\\n    as tf.function auto-control dependencies are insufficient for assert\\n    statements.\\n\\n    \n    return func", "docstring": "Generated decorator that adds the appropriate docstring to the function for symbol `sym`.\n\nArgs:\nfunc: Function for a TensorFlow op\n\nReturns:\nA version of `func` with documentation attached.", "source": "github-repos"}
{"code": "def average_datetimes(dt_list):\n    \n    if sys.version_info < (3, 3):\n        \n        import time\n\n        def timestamp_func(dt):\n            return time.mktime(dt.timetuple())\n    else:\n        timestamp_func = datetime.timestamp\n\n    total = [timestamp_func(dt) for dt in dt_list]\n    return datetime.fromtimestamp(sum(total) / len(total))", "docstring": "Average a series of datetime objects.\n\n.. note::\n\nThis function assumes all datetime objects are naive and in the same\ntime zone (UTC).\n\nArgs:\ndt_list (iterable): Datetime objects to average\n\nReturns: Average datetime as a datetime object", "source": "juraj-google-style"}
{"code": "def __init__(self, path, ignoreErrors=True):\n        \n        self._name = path\n        self._members = {}\n        self._pendingError = None\n\n        try:\n            self._members = self._readZipDirectory(fileObj=open(path, 'rb'))\n\n        except Exception:\n            debug.logger & debug.flagReader and debug.logger(\n                'ZIP file %s open failure: %s' % (self._name, sys.exc_info()[1]))\n\n            if not ignoreErrors:\n                self._pendingError = error.PySmiError('file %s access error: %s' % (self._name, sys.exc_info()[1]))", "docstring": "Create an instance of *ZipReader* serving a ZIP archive.\n\nArgs:\npath (str): path to ZIP archive containing MIB files\n\nKeyword Args:\nignoreErrors (bool): ignore ZIP archive access errors", "source": "juraj-google-style"}
{"code": "def get_all_links_in_chain(self):\n    if (self.is_decision() and self.get_link(self.task_id)):\n        return self.links\n    return ([self] + self.links)", "docstring": "Return all links in the chain of trust, including the target task.\n\nBy default, we're checking a task and all its dependencies back to the\ntree, so the full chain is ``self.links`` + ``self``. However, we also\nsupport checking the decision task itself. In that case, we populate\nthe decision task as a link in ``self.links``, and we don't need to add\nanother check for ``self``.\n\nReturns:\nlist: of all ``LinkOfTrust``s to verify.", "source": "codesearchnet"}
{"code": "def extract_stack(stacklevel=1):\n    thread_key = _get_thread_key()\n    return _tf_stack.extract_stack(_source_mapper_stacks[thread_key][-1].internal_map, _source_filter_stacks[thread_key][-1].internal_set, stacklevel)", "docstring": "An eager-friendly alternative to traceback.extract_stack.\n\nArgs:\nstacklevel: number of initial frames to skip when producing the stack.\n\nReturns:\nA list-like FrameSummary containing StackFrame-like objects, which are\nnamedtuple-like objects with the following fields: filename, lineno, name,\nline, meant to masquerade as traceback.FrameSummary objects.", "source": "github-repos"}
{"code": "def _export_files(self, bq):\n    job_labels = self._get_bq_metadata().add_additional_bq_job_labels(self.bigquery_job_labels)\n    export_job_name = bigquery_tools.generate_bq_job_name(self._job_name, self._source_uuid, bigquery_tools.BigQueryJobTypes.EXPORT, '%s_%s' % (int(time.time()), random.randint(0, 1000)))\n    temp_location = self.options.view_as(GoogleCloudOptions).temp_location\n    gcs_location = bigquery_export_destination_uri(self.gcs_location, temp_location, self._source_uuid)\n    try:\n        if self.use_json_exports:\n            job_ref = bq.perform_extract_job([gcs_location], export_job_name, self.table_reference, bigquery_tools.FileFormat.JSON, project=self._get_project(), job_labels=job_labels, include_header=False)\n        else:\n            job_ref = bq.perform_extract_job([gcs_location], export_job_name, self.table_reference, bigquery_tools.FileFormat.AVRO, project=self._get_project(), include_header=False, job_labels=job_labels, use_avro_logical_types=True)\n        bq.wait_for_bq_job(job_ref)\n    except Exception as exn:\n        logging.warning('Error exporting table: %s. Note that external tables cannot be exported: https:\n        raise\n    metadata_list = FileSystems.match([gcs_location])[0].metadata_list\n    if isinstance(self.table_reference, vp.ValueProvider):\n        table_ref = bigquery_tools.parse_table_reference(self.table_reference.get(), project=self.project)\n    else:\n        table_ref = self.table_reference\n    table = bq.get_table(table_ref.projectId, table_ref.datasetId, table_ref.tableId)\n    return (table.schema, metadata_list)", "docstring": "Runs a BigQuery export job.\n\nReturns:\nbigquery.TableSchema instance, a list of FileMetadata instances", "source": "github-repos"}
{"code": "def set_metadata(self, token, data):\n    req = requests.post(self.meta_url(('metadata/ocp/set/' + token)), json=data, verify=False)\n    if (req.status_code != 200):\n        raise RemoteDataUploadError(('Could not upload metadata: ' + req.json()['message']))\n    return req.json()", "docstring": "Insert new metadata into the OCP metadata database.\n\nArguments:\ntoken (str): Token of the datum to set\ndata (str): A dictionary to insert as metadata. Include `secret`.\n\nReturns:\njson: Info of the inserted ID (convenience) or an error message.\n\nThrows:\nRemoteDataUploadError: If the token is already populated, or if\nthere is an issue with your specified `secret` key.", "source": "codesearchnet"}
{"code": "def clean_dataframes(dfs):\n    \n    if isinstance(dfs, (list)):\n        for df in dfs:\n            df = clean_dataframe(df)\n        return dfs\n    else:\n        return [clean_dataframe(dfs)]", "docstring": "Fill NaNs with the previous value, the next value or if all are NaN then 1.0\n\nTODO:\nLinear interpolation and extrapolation\n\nArguments:\ndfs (list of dataframes): list of dataframes that contain NaNs to be removed\n\nReturns:\nlist of dataframes: list of dataframes with NaNs replaced by interpolated values", "source": "juraj-google-style"}
{"code": "def _MergeMessageField(self, tokenizer, message, field):\n    \n    is_map_entry = _IsMapEntry(field)\n\n    if tokenizer.TryConsume('<'):\n      end_token = '>'\n    else:\n      tokenizer.Consume('{')\n      end_token = '}'\n\n    if (field.message_type.full_name == _ANY_FULL_TYPE_NAME and\n        tokenizer.TryConsume('[')):\n      packed_type_name = self._ConsumeAnyTypeUrl(tokenizer)\n      tokenizer.Consume(']')\n      tokenizer.TryConsume(':')\n      if tokenizer.TryConsume('<'):\n        expanded_any_end_token = '>'\n      else:\n        tokenizer.Consume('{')\n        expanded_any_end_token = '}'\n      if not self.descriptor_pool:\n        raise ParseError('Descriptor pool required to parse expanded Any field')\n      expanded_any_sub_message = _BuildMessageFromTypeName(packed_type_name,\n                                                           self.descriptor_pool)\n      if not expanded_any_sub_message:\n        raise ParseError('Type %s not found in descriptor pool' %\n                         packed_type_name)\n      while not tokenizer.TryConsume(expanded_any_end_token):\n        if tokenizer.AtEnd():\n          raise tokenizer.ParseErrorPreviousToken('Expected \"%s\".' %\n                                                  (expanded_any_end_token,))\n        self._MergeField(tokenizer, expanded_any_sub_message)\n      if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n        any_message = getattr(message, field.name).add()\n      else:\n        any_message = getattr(message, field.name)\n      any_message.Pack(expanded_any_sub_message)\n    elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n      if field.is_extension:\n        sub_message = message.Extensions[field].add()\n      elif is_map_entry:\n        sub_message = getattr(message, field.name).GetEntryClass()()\n      else:\n        sub_message = getattr(message, field.name).add()\n    else:\n      if field.is_extension:\n        sub_message = message.Extensions[field]\n      else:\n        sub_message = getattr(message, field.name)\n      sub_message.SetInParent()\n\n    while not tokenizer.TryConsume(end_token):\n      if tokenizer.AtEnd():\n        raise tokenizer.ParseErrorPreviousToken('Expected \"%s\".' % (end_token,))\n      self._MergeField(tokenizer, sub_message)\n\n    if is_map_entry:\n      value_cpptype = field.message_type.fields_by_name['value'].cpp_type\n      if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:\n        value = getattr(message, field.name)[sub_message.key]\n        value.MergeFrom(sub_message.value)\n      else:\n        getattr(message, field.name)[sub_message.key] = sub_message.value", "docstring": "Merges a single scalar field into a message.\n\nArgs:\ntokenizer: A tokenizer to parse the field value.\nmessage: The message of which field is a member.\nfield: The descriptor of the field to be merged.\n\nRaises:\nParseError: In case of text parsing problems.", "source": "juraj-google-style"}
{"code": "def __add_min_max_value(parser, basename, default_min, default_max, initial, help_template):\n    help_template = Template(help_template)\n    parser.add('--{0}-min'.format(basename), default=default_min, type=float, required=False, help=help_template.substitute(mmi='min', name=basename))\n    parser.add('--{0}-max'.format(basename), default=default_max, type=float, required=False, help=help_template.substitute(mmi='max', name=basename))\n    parser.add('--{0}'.format(basename), default=initial, type=float, required=False, help=help_template.substitute(mmi='initial', name=basename))", "docstring": "Generates parser entries for options\nwith a min, max, and default value.\n\nArgs:\nparser: the parser to use.\nbasename: the base option name. Generated options will have flags\n--basename-min, --basename-max, and --basename.\ndefault_min: the default min value\ndefault_max: the default max value\ninitial: the default initial value\nhelp_template: the help string template.\n$mmi will be replaced with min, max, or initial.\n$name will be replaced with basename.", "source": "codesearchnet"}
{"code": "def __init__(self, directory, jinja2_environment, logger=None, raise_exception_on_warning=False):\n        \n        super(Generator, self).__init__()\n\n        \n        self.__logger = logger\n        self.__raise_exception_on_warning = raise_exception_on_warning\n\n        \n        if not os.path.isdir(directory):\n            self.log_error('Main directory \\'%s\\' does not exists!' % directory)\n\n        self.__root_directory = os.path.abspath(directory)   \n        self.__jinja2_environment = jinja2_environment\n\n\n\n        self.__jinja2_predefined_filters = self.__jinja2_environment.filters.keys()\n\n\n\n        self.__extensions = {}\n        self.__actions = TreeMap()\n\n        self.__default_action = None", "docstring": "Constructor of a :program:`cygenja` template machine.\n\nArgs:\ndirectory (str): Absolute or relative base directory. Everything happens in that directory and sub-directories.\njinja2_environment: :program:`Jinja2` environment.\nlogger: A logger (from the standard ``logging``) or ``None`` is no logging is wanted.\nraise_exception_on_warning (bool): If set to ``True``, raise a ``RuntimeError`` when logging a warning.", "source": "juraj-google-style"}
{"code": "def process_fidelity(channel1, channel2, require_cptp=True):\n    is_cptp1 = None\n    is_cptp2 = None\n    if isinstance(channel1, (list, np.ndarray)):\n        channel1 = Operator(channel1)\n        if require_cptp:\n            is_cptp1 = channel1.is_unitary()\n    if isinstance(channel2, (list, np.ndarray)):\n        channel2 = Operator(channel2)\n        if require_cptp:\n            is_cptp2 = channel2.is_unitary()\n    s1 = SuperOp(channel1)\n    s2 = SuperOp(channel2)\n    if require_cptp:\n        if (is_cptp1 is None):\n            is_cptp1 = s1.is_cptp()\n        if (not is_cptp1):\n            raise QiskitError('channel1 is not CPTP')\n        if (is_cptp2 is None):\n            is_cptp2 = s2.is_cptp()\n        if (not is_cptp2):\n            raise QiskitError('channel2 is not CPTP')\n    (input_dim1, output_dim1) = s1.dim\n    (input_dim2, output_dim2) = s2.dim\n    if ((input_dim1 != output_dim1) or (input_dim2 != output_dim2)):\n        raise QiskitError('Input channels must have same size input and output dimensions.')\n    if (input_dim1 != input_dim2):\n        raise QiskitError('Input channels have different dimensions.')\n    fidelity = (np.trace(s1.compose(s2.adjoint()).data) / (input_dim1 ** 2))\n    return fidelity", "docstring": "Return the process fidelity between two quantum channels.\n\nThis is given by\n\nF_p(E1, E2) = Tr[S2^dagger.S1])/dim^2\n\nwhere S1 and S2 are the SuperOp matrices for channels E1 and E2,\nand dim is the dimension of the input output statespace.\n\nArgs:\nchannel1 (QuantumChannel or matrix): a quantum channel or unitary matrix.\nchannel2 (QuantumChannel or matrix): a quantum channel or unitary matrix.\nrequire_cptp (bool): require input channels to be CPTP [Default: True].\n\nReturns:\narray_like: The state fidelity F(state1, state2).\n\nRaises:\nQiskitError: if inputs channels do not have the same dimensions,\nhave different input and output dimensions, or are not CPTP with\n`require_cptp=True`.", "source": "codesearchnet"}
{"code": "def all(self, predicate=bool):\n    if self.closed():\n        raise ValueError('Attempt to call all() on a closed Queryable.')\n    if (not is_callable(predicate)):\n        raise TypeError('all() parameter predicate={0} is not callable'.format(repr(predicate)))\n    return all(self.select(predicate))", "docstring": "Determine if all elements in the source sequence satisfy a condition.\n\nAll of the source sequence will be consumed.\n\nNote: This method uses immediate execution.\n\nArgs:\npredicate (callable): An optional single argument function used to\ntest each elements. If omitted, the bool() function is used\nresulting in the elements being tested directly.\n\nReturns:\nTrue if all elements in the sequence meet the predicate condition,\notherwise False.\n\nRaises:\nValueError: If the Queryable is closed()\nTypeError: If predicate is not callable.", "source": "codesearchnet"}
{"code": "def coco_to_pascal_voc(bboxes: np.ndarray) -> np.ndarray:\n    bboxes[:, 2] = bboxes[:, 2] + bboxes[:, 0] - 1\n    bboxes[:, 3] = bboxes[:, 3] + bboxes[:, 1] - 1\n    return bboxes", "docstring": "Converts bounding boxes from the COCO format to the Pascal VOC format.\n\nIn other words, converts from (top_left_x, top_left_y, width, height) format\nto (top_left_x, top_left_y, bottom_right_x, bottom_right_y).\n\nArgs:\nbboxes (`np.ndarray` of shape `(batch_size, 4)):\nBounding boxes in COCO format.\n\nReturns:\n`np.ndarray` of shape `(batch_size, 4) in Pascal VOC format.", "source": "github-repos"}
{"code": "def _model_ready(self, sess: session.Session) -> Tuple[bool, Optional[str]]:\n    return _ready(self._ready_op, sess, 'Model not ready')", "docstring": "Checks if the model is ready or not.\n\nArgs:\nsess: A `Session`.\n\nReturns:\nA tuple (is_ready, msg), where is_ready is True if ready and False\notherwise, and msg is `None` if the model is ready, a `String` with the\nreason why it is not ready otherwise.", "source": "github-repos"}
{"code": "def languages(self, **kwargs):\n    path = ('/projects/%s/languages' % self.get_id())\n    return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "Get languages used in the project with percentage value.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the server failed to perform the request", "source": "codesearchnet"}
{"code": "def kill(self, container, signal=None):\n        \n        url = self._url(\"/containers/{0}/kill\", container)\n        params = {}\n        if signal is not None:\n            if not isinstance(signal, six.string_types):\n                signal = int(signal)\n            params['signal'] = signal\n        res = self._post(url, params=params)\n\n        self._raise_for_status(res)", "docstring": "Kill a container or send a signal to a container.\n\nArgs:\ncontainer (str): The container to kill\nsignal (str or int): The signal to send. Defaults to ``SIGKILL``\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def _write_init_models(self, filenames):\n        \n\n        self.write(destination=self.output_directory, filename=\"__init__.py\", template_name=\"__init_model__.py.tpl\",\n                   filenames=self._prepare_filenames(filenames),\n                   class_prefix=self._class_prefix,\n                   product_accronym=self._product_accronym,\n                   header=self.header_content)", "docstring": "Write init file\n\nArgs:\nfilenames (dict): dict of filename and classes", "source": "juraj-google-style"}
{"code": "def begin_stream(self, command: Command) -> Reply:\n    (yield from self._control_stream.write_command(command))\n    reply = (yield from self._control_stream.read_reply())\n    self.raise_if_not_match('Begin stream', (ReplyCodes.file_status_okay_about_to_open_data_connection, ReplyCodes.data_connection_already_open_transfer_starting), reply)\n    return reply", "docstring": "Start sending content on the data stream.\n\nArgs:\ncommand: A command that tells the server to send data over the\ndata connection.\n\nCoroutine.\n\nReturns:\nThe begin reply.", "source": "codesearchnet"}
{"code": "def _get_js_files(cls, extra_files):\n    return cls._get_media_files(packager=Packager(), media_packages=getattr(cls, 'js_packages', {}), media_type='js', extra_files=extra_files)", "docstring": "Return all JavaScript files from the Media class.\n\nArgs:\nextra_files (list):\nThe contents of the Media class's original :py:attr:`js`\nattribute, if one was provided.\n\nReturns:\nlist:\nThe JavaScript files to return for the :py:attr:`js` attribute.", "source": "codesearchnet"}
{"code": "def get_models(self, uniprot_acc):\n        \n        if uniprot_acc in self.all_models:\n            return self.all_models[uniprot_acc]\n        else:\n            log.error('{}: no SWISS-MODELs available'.format(uniprot_acc))\n            return None", "docstring": "Return all available models for a UniProt accession number.\n\nArgs:\nuniprot_acc (str): UniProt ACC/ID\n\nReturns:\ndict: All available models in SWISS-MODEL for this UniProt entry", "source": "juraj-google-style"}
{"code": "def _insert_stack(stack, sample_count, call_tree):\n        \n        curr_level = call_tree\n        for func in stack:\n            next_level_index = {\n                node['stack']: node for node in curr_level['children']}\n            if func not in next_level_index:\n                new_node = {'stack': func, 'children': [], 'sampleCount': 0}\n                curr_level['children'].append(new_node)\n                curr_level = new_node\n            else:\n                curr_level = next_level_index[func]\n        curr_level['sampleCount'] = sample_count", "docstring": "Inserts stack into the call tree.\n\nArgs:\nstack: Call stack.\nsample_count: Sample count of call stack.\ncall_tree: Call tree.", "source": "juraj-google-style"}
{"code": "def sample(self, num_rows):\n    sampled_values = []\n    for i in range(num_rows):\n        sampled_values.append(self._sample_row())\n    return pd.DataFrame(sampled_values, columns=self.columns)", "docstring": "Sample new rows.\n\nArgs:\nnum_rows(int): Number of rows to sample\n\nReturns:\npandas.DataFrame", "source": "codesearchnet"}
{"code": "def create_knowledge_base(project_id, display_name):\n    import dialogflow_v2beta1 as dialogflow\n    client = dialogflow.KnowledgeBasesClient()\n    project_path = client.project_path(project_id)\n    knowledge_base = dialogflow.types.KnowledgeBase(display_name=display_name)\n    response = client.create_knowledge_base(project_path, knowledge_base)\n    print('Knowledge Base created:\\n')\n    print('Display Name: {}\\n'.format(response.display_name))\n    print('Knowledge ID: {}\\n'.format(response.name))", "docstring": "Creates a Knowledge base.\n\nArgs:\nproject_id: The GCP project linked with the agent.\ndisplay_name: The display name of the Knowledge base.", "source": "codesearchnet"}
{"code": "def get_alarms(zone=None):\n    \n    \n    if zone is None:\n        zone = discovery.any_soco()\n    response = zone.alarmClock.ListAlarms()\n    alarm_list = response['CurrentAlarmList']\n    tree = XML.fromstring(alarm_list.encode('utf-8'))\n\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n\n    \n    alarms = tree.findall('Alarm')\n    result = set()\n    for alarm in alarms:\n        values = alarm.attrib\n        alarm_id = values['ID']\n        \n        \n        if Alarm._all_alarms.get(alarm_id):\n            instance = Alarm._all_alarms.get(alarm_id)\n        else:\n            instance = Alarm(None)\n            instance._alarm_id = alarm_id\n            Alarm._all_alarms[instance._alarm_id] = instance\n\n        instance.start_time = datetime.strptime(\n            values['StartTime'], \"%H:%M:%S\").time()  \n        \n        instance.duration = None if values['Duration'] == '' else\\\n            datetime.strptime(values['Duration'], \"%H:%M:%S\").time()\n        instance.recurrence = values['Recurrence']\n        instance.enabled = values['Enabled'] == '1'\n        instance.zone = next((z for z in zone.all_zones\n                              if z.uid == values['RoomUUID']), None)\n        \n        if instance.zone is None:\n            continue\n        instance.program_uri = None if values['ProgramURI'] ==\\\n            \"x-rincon-buzzer:0\" else values['ProgramURI']\n        instance.program_metadata = values['ProgramMetaData']\n        instance.play_mode = values['PlayMode']\n        instance.volume = values['Volume']\n        instance.include_linked_zones = values['IncludeLinkedZones'] == '1'\n\n        result.add(instance)\n    return result", "docstring": "Get a set of all alarms known to the Sonos system.\n\nArgs:\nzone (`SoCo`, optional): a SoCo instance to query. If None, a random\ninstance is used. Defaults to `None`.\n\nReturns:\nset: A set of `Alarm` instances\n\nNote:\nAny existing `Alarm` instance will have its attributes updated to those\ncurrently stored on the Sonos system.", "source": "juraj-google-style"}
{"code": "def evaluate_cut(uncut_subsystem, cut, unpartitioned_ces):\n    log.debug('Evaluating %s...', cut)\n    cut_subsystem = uncut_subsystem.apply_cut(cut)\n    if config.ASSUME_CUTS_CANNOT_CREATE_NEW_CONCEPTS:\n        mechanisms = unpartitioned_ces.mechanisms\n    else:\n        mechanisms = set((unpartitioned_ces.mechanisms + list(cut_subsystem.cut_mechanisms)))\n    partitioned_ces = ces(cut_subsystem, mechanisms)\n    log.debug('Finished evaluating %s.', cut)\n    phi_ = ces_distance(unpartitioned_ces, partitioned_ces)\n    return SystemIrreducibilityAnalysis(phi=phi_, ces=unpartitioned_ces, partitioned_ces=partitioned_ces, subsystem=uncut_subsystem, cut_subsystem=cut_subsystem)", "docstring": "Compute the system irreducibility for a given cut.\n\nArgs:\nuncut_subsystem (Subsystem): The subsystem without the cut applied.\ncut (Cut): The cut to evaluate.\nunpartitioned_ces (CauseEffectStructure): The cause-effect structure of\nthe uncut subsystem.\n\nReturns:\nSystemIrreducibilityAnalysis: The |SystemIrreducibilityAnalysis| for\nthat cut.", "source": "codesearchnet"}
{"code": "def _launch_flow(self, client, name, args):\n    \n    \n    flow = self._check_approval_wrapper(\n        client, client.CreateFlow, name=name, args=args)\n    flow_id = flow.flow_id\n    print('{0:s}: Scheduled'.format(flow_id))\n\n    if self.keepalive:\n      keepalive_flow = client.CreateFlow(\n          name='KeepAlive', args=flows_pb2.KeepAliveArgs())\n      print('KeepAlive Flow:{0:s} scheduled'.format(keepalive_flow.flow_id))\n\n    return flow_id", "docstring": "Create specified flow, setting KeepAlive if requested.\n\nArgs:\nclient: GRR Client object on which to launch the flow.\nname: string containing flow name.\nargs: proto (*FlowArgs) for type of flow, as defined in GRR flow proto.\n\nReturns:\nstring containing ID of launched flow", "source": "juraj-google-style"}
{"code": "def _get_endpoint(self, sub_domain):\n    storage_parameters = (self._storage_parameters or dict())\n    account_name = storage_parameters.get('account_name')\n    if (not account_name):\n        raise ValueError('\"account_name\" is required for Azure storage')\n    suffix = storage_parameters.get('endpoint_suffix', 'core.windows.net')\n    self._endpoint = ('http%s:\n    return (account_name, suffix.replace('.', '\\\\.'))", "docstring": "Get endpoint information from storage parameters.\n\nUpdate system with endpoint information and return information required\nto define roots.\n\nArgs:\nself (pycosio._core.io_system.SystemBase subclass): System.\nsub_domain (str): Azure storage sub-domain.\n\nReturns:\ntuple of str: account_name, endpoint_suffix", "source": "codesearchnet"}
{"code": "def replace_vars(config, env):\n  \n  if isinstance(config, dict):\n    for k, v in list(config.items()):\n      if isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple):\n        replace_vars(v, env)\n      elif isinstance(v, basestring):\n        config[k] = expand_var(v, env)\n  elif isinstance(config, list):\n    for i, v in enumerate(config):\n      if isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple):\n        replace_vars(v, env)\n      elif isinstance(v, basestring):\n        config[i] = expand_var(v, env)\n  elif isinstance(config, tuple):\n    \n    for v in config:\n      if isinstance(v, dict) or isinstance(v, list) or isinstance(v, tuple):\n        replace_vars(v, env)", "docstring": "Replace variable references in config using the supplied env dictionary.\n\nArgs:\nconfig: the config to parse. Can be a tuple, list or dict.\nenv: user supplied dictionary.\n\nRaises:\nException if any variable references are not found in env.", "source": "juraj-google-style"}
{"code": "def try_pick_piece_of_work(self, worker_id, submission_id=None):\n    \n    client = self._datastore_client\n    unclaimed_work_ids = None\n    if submission_id:\n      unclaimed_work_ids = [\n          k for k, v in iteritems(self.work)\n          if is_unclaimed(v) and (v['submission_id'] == submission_id)\n      ]\n    if not unclaimed_work_ids:\n      unclaimed_work_ids = [k for k, v in iteritems(self.work)\n                            if is_unclaimed(v)]\n    if unclaimed_work_ids:\n      next_work_id = random.choice(unclaimed_work_ids)\n    else:\n      return None\n    try:\n      with client.transaction() as transaction:\n        work_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id,\n                              KIND_WORK, next_work_id)\n        work_entity = client.get(work_key, transaction=transaction)\n        if not is_unclaimed(work_entity):\n          return None\n        work_entity['claimed_worker_id'] = worker_id\n        work_entity['claimed_worker_start_time'] = get_integer_time()\n        transaction.put(work_entity)\n    except Exception:\n      return None\n    return next_work_id", "docstring": "Tries pick next unclaimed piece of work to do.\n\nAttempt to claim work piece is done using Cloud Datastore transaction, so\nonly one worker can claim any work piece at a time.\n\nArgs:\nworker_id: ID of current worker\nsubmission_id: if not None then this method will try to pick\npiece of work for this submission\n\nReturns:\nID of the claimed work piece", "source": "juraj-google-style"}
{"code": "def is_attribute_deprecated(self, attribute):\n        \n        rule_set = self._attribute_rule_sets.get(attribute)\n        if rule_set.version_deprecated:\n            if self._version >= rule_set.version_deprecated:\n                return True\n            else:\n                return False\n        else:\n            return False", "docstring": "Check if the attribute is deprecated by the current KMIP version.\n\nArgs:\nattribute (string): The name of the attribute\n(e.g., 'Unique Identifier'). Required.", "source": "juraj-google-style"}
{"code": "def References(self):\n    if (self.__references is None):\n        refs = {}\n        for (hash, group) in groupby(self.inputs, (lambda x: x.PrevHash)):\n            (tx, height) = GetBlockchain().GetTransaction(hash.ToBytes())\n            if (tx is not None):\n                for input in group:\n                    refs[input] = tx.outputs[input.PrevIndex]\n        self.__references = refs\n    return self.__references", "docstring": "Get all references.\n\nReturns:\ndict:\nKey (UInt256): input PrevHash\nValue (TransactionOutput): object.", "source": "codesearchnet"}
{"code": "def HandleBlockHeadersReceived(self, inventory):\n        \n        try:\n            inventory = IOHelper.AsSerializableWithType(inventory, 'neo.Network.Payloads.HeadersPayload.HeadersPayload')\n            if inventory is not None:\n                logger.debug(f\"{self.prefix} received headers\")\n                self.heart_beat(HEARTBEAT_HEADERS)\n                BC.Default().AddHeaders(inventory.Headers)\n\n        except Exception as e:\n            logger.debug(f\"Error handling Block headers {e}\")", "docstring": "Process a block header inventory payload.\n\nArgs:\ninventory (neo.Network.Inventory):", "source": "juraj-google-style"}
{"code": "def upload_to_metta(train_features_path, train_labels_path, test_features_path, test_labels_path, train_quarter, test_quarter, num_dimensions):\n    train_config = metta_config(train_quarter, num_dimensions)\n    test_config = metta_config(test_quarter, num_dimensions)\n    X_train = pd.read_csv(train_features_path, sep=',')\n    X_train.columns = [('doc2vec_' + str(i)) for i in range(X_train.shape[1])]\n    Y_train = pd.read_csv(train_labels_path)\n    Y_train.columns = ['onet_soc_code']\n    train = pd.concat([X_train, Y_train], axis=1)\n    X_test = pd.read_csv(test_features_path, sep=',')\n    X_test.columns = [('doc2vec_' + str(i)) for i in range(X_test.shape[1])]\n    Y_test = pd.read_csv(test_labels_path)\n    Y_test.columns = ['onet_soc_code']\n    test = pd.concat([X_test, Y_test], axis=1)\n    metta.archive_train_test(train_config, X_train, test_config, X_test, directory='wdi')", "docstring": "Store train and test matrices using metta\n\nArgs:\ntrain_features_path (str) Path to matrix with train features\ntrain_labels_path (str) Path to matrix with train labels\ntest_features_path (str) Path to matrix with test features\ntest_labels_path (str) Path to matrix with test labels\ntrain_quarter (str) Quarter of train matrix\ntest_quarter (str) Quarter of test matrix\nnum_dimensions (int) Number of features", "source": "codesearchnet"}
{"code": "def _CompositeFoldByteStream(\n      self, mapped_value, context=None, **unused_kwargs):\n    \n    context_state = getattr(context, 'state', {})\n\n    attribute_index = context_state.get('attribute_index', 0)\n    subcontext = context_state.get('context', None)\n\n    if not subcontext:\n      subcontext = DataTypeMapContext(values={\n          type(mapped_value).__name__: mapped_value})\n\n    data_attributes = []\n\n    for attribute_index in range(attribute_index, self._number_of_attributes):\n      attribute_name = self._attribute_names[attribute_index]\n      data_type_map = self._data_type_maps[attribute_index]\n\n      member_value = getattr(mapped_value, attribute_name, None)\n      if data_type_map is None or member_value is None:\n        continue\n\n      member_data = data_type_map.FoldByteStream(\n          member_value, context=subcontext)\n      if member_data is None:\n        return None\n\n      data_attributes.append(member_data)\n\n    if context:\n      context.state = {}\n\n    return b''.join(data_attributes)", "docstring": "Folds the data type into a byte stream.\n\nArgs:\nmapped_value (object): mapped value.\ncontext (Optional[DataTypeMapContext]): data type map context.\n\nReturns:\nbytes: byte stream.\n\nRaises:\nFoldingError: if the data type definition cannot be folded into\nthe byte stream.", "source": "juraj-google-style"}
{"code": "def get_rprof(step, var):\n    if (var in step.rprof.columns):\n        rprof = step.rprof[var]\n        rad = None\n        if (var in phyvars.RPROF):\n            meta = phyvars.RPROF[var]\n        else:\n            meta = phyvars.Varr(var, None, '1')\n    elif (var in phyvars.RPROF_EXTRA):\n        meta = phyvars.RPROF_EXTRA[var]\n        (rprof, rad) = meta.description(step)\n        meta = phyvars.Varr(misc.baredoc(meta.description), meta.kind, meta.dim)\n    else:\n        raise UnknownRprofVarError(var)\n    (rprof, _) = step.sdat.scale(rprof, meta.dim)\n    if (rad is not None):\n        (rad, _) = step.sdat.scale(rad, 'm')\n    return (rprof, rad, meta)", "docstring": "Extract or compute and rescale requested radial profile.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nvar (str): radial profile name, a key of :data:`stagpy.phyvars.RPROF`\nor :data:`stagpy.phyvars.RPROF_EXTRA`.\nReturns:\ntuple of :class:`numpy.array` and :class:`stagpy.phyvars.Varr`:\nrprof, rad, meta\nrprof is the requested profile, rad the radial position at which it\nis evaluated (set to None if it is the position of profiles output\nby StagYY), and meta is a :class:`stagpy.phyvars.Varr` instance\nholding metadata of the requested variable.", "source": "codesearchnet"}
{"code": "def combine_slices(self, slices, tensor_shape, device=None):\n    \n    if tensor_shape.ndims == 0:\n      return slices[0]\n\n    ret = slices[:]\n    tensor_layout = self.tensor_layout(tensor_shape)\n    for mesh_dim, tensor_axis in zip(\n        self.shape, tensor_layout.mesh_axis_to_tensor_axis(self.ndims)):\n      slice_size = len(ret) \n      if tensor_axis is None:\n        ret = ret[:slice_size]\n      else:\n        if device:\n          devices = [device] * slice_size\n        else:\n          devices = [ret[i].device for i in xrange(slice_size)]\n        concat_inputs = []\n        for i in xrange(slice_size):\n          concat_inputs.append(\n              [ret[i + slice_size * j] for j in xrange(mesh_dim.size)])\n        ret = parallel(\n            devices, tf.concat, concat_inputs,\n            axis=[tensor_axis] * len(devices))\n    assert len(ret) == 1\n    return ret[0]", "docstring": "Turns a set of slices into a single tensor.\n\nArgs:\nslices: list of tf.Tensor with length self.size.\ntensor_shape: Shape.\ndevice: optional str. If absent, we use the devices of the slices.\n\nReturns:\ntf.Tensor.", "source": "juraj-google-style"}
{"code": "def reset_network(roles, extra_vars=None):\n    \n    logger.debug('Reset the constraints')\n\n    if not extra_vars:\n        extra_vars = {}\n\n    tmpdir = os.path.join(os.getcwd(), TMP_DIRNAME)\n\n    _check_tmpdir(tmpdir)\n    utils_playbook = os.path.join(ANSIBLE_DIR, 'utils.yml')\n    options = {'enos_action': 'tc_reset',\n               'tc_output_dir': tmpdir}\n    options.update(extra_vars)\n    run_ansible([utils_playbook], roles=roles, extra_vars=options)", "docstring": "Reset the network constraints (latency, bandwidth ...)\n\nRemove any filter that have been applied to shape the traffic.\n\nArgs:\nroles (dict): role->hosts mapping as returned by\n:py:meth:`enoslib.infra.provider.Provider.init`\ninventory (str): path to the inventory", "source": "juraj-google-style"}
{"code": "def get_relative_modpath(module_fpath):\n    modsubdir_list = get_module_subdir_list(module_fpath)\n    (_, ext) = splitext(module_fpath)\n    rel_modpath = (join(*modsubdir_list) + ext)\n    rel_modpath = ensure_crossplat_path(rel_modpath)\n    return rel_modpath", "docstring": "Returns path to module relative to the package root\n\nArgs:\nmodule_fpath (str): module filepath\n\nReturns:\nstr: modname\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_path import *  # NOQA\n>>> import utool as ut\n>>> module_fpath = ut.util_path.__file__\n>>> rel_modpath = ut.get_relative_modpath(module_fpath)\n>>> rel_modpath = rel_modpath.replace('.pyc', '.py')  # allow pyc or py\n>>> result = ensure_crossplat_path(rel_modpath)\n>>> print(result)\nutool/util_path.py", "source": "codesearchnet"}
{"code": "def delete_duplicates(seq):\n    \n    seen = set()\n    seen_add = seen.add\n    return [x for x in seq if not (x in seen or seen_add(x))]", "docstring": "Remove duplicates from an iterable, preserving the order.\n\nArgs:\nseq: Iterable of various type.\n\nReturns:\nlist: List of unique objects.", "source": "juraj-google-style"}
{"code": "def fwd(self, x_data):\n        \n        x_data = numpy.asfarray(x_data)\n        shape = x_data.shape\n        x_data = x_data.reshape(len(self), -1)\n\n        lower, upper = evaluation.evaluate_bound(self, x_data)\n        q_data = numpy.zeros(x_data.shape)\n        indices = x_data > upper\n        q_data[indices] = 1\n        indices = ~indices & (x_data >= lower)\n\n        q_data[indices] = numpy.clip(evaluation.evaluate_forward(\n            self, x_data), a_min=0, a_max=1)[indices]\n\n        q_data = q_data.reshape(shape)\n        return q_data", "docstring": "Forward Rosenblatt transformation.\n\nArgs:\nx_data (numpy.ndarray):\nLocation for the distribution function. ``x_data.shape`` must\nbe compatible with distribution shape.\n\nReturns:\n(numpy.ndarray):\nEvaluated distribution function values, where\n``out.shape==x_data.shape``.", "source": "juraj-google-style"}
{"code": "def __init__(self, requests, expert_capacity):\n    \n    self._requests = tf.to_float(requests)\n    self._expert_capacity = expert_capacity\n    expert_capacity_f = tf.to_float(expert_capacity)\n    self._batch, self._length, self._num_experts = tf.unstack(\n        tf.shape(self._requests), num=3)\n\n    \n    position_in_expert = tf.cumsum(self._requests, axis=1, exclusive=True)\n    \n    self._gates = self._requests * tf.to_float(\n        tf.less(position_in_expert, expert_capacity_f))\n    batch_index = tf.reshape(\n        tf.to_float(tf.range(self._batch)), [self._batch, 1, 1])\n    length_index = tf.reshape(\n        tf.to_float(tf.range(self._length)), [1, self._length, 1])\n    expert_index = tf.reshape(\n        tf.to_float(tf.range(self._num_experts)), [1, 1, self._num_experts])\n    \n    flat_position = (\n        position_in_expert +\n        batch_index * (tf.to_float(self._num_experts) * expert_capacity_f) +\n        expert_index * expert_capacity_f)\n    \n    \n    self._indices = tf.unsorted_segment_sum(\n        data=tf.reshape((length_index + 1.0) * self._gates, [-1]),\n        segment_ids=tf.to_int32(tf.reshape(flat_position, [-1])),\n        num_segments=self._batch * self._num_experts * expert_capacity)\n    self._indices = tf.reshape(\n        self._indices,\n        [self._batch, self._num_experts, expert_capacity])\n    \n    \n    self._nonpadding = tf.minimum(self._indices, 1.0)\n    \n    self._indices = tf.nn.relu(self._indices - 1.0)\n    \n    \n    self._flat_indices = tf.to_int32(\n        self._indices +\n        (tf.reshape(tf.to_float(tf.range(self._batch)), [-1, 1, 1])\n         * tf.to_float(self._length)))\n    self._indices = tf.to_int32(self._indices)", "docstring": "Create a TruncatingDispatcher.\n\nArgs:\nrequests: a boolean `Tensor` of shape `[batch, length, num_experts]`.\nAlternatively, a float or int Tensor containing zeros and ones.\nexpert_capacity: a Scalar - maximum number of examples per expert per\nbatch element.\n\nReturns:\na TruncatingDispatcher", "source": "juraj-google-style"}
{"code": "def seek(self, offset, whence=Seek.set):\n    _whence = int(whence)\n    if (_whence == Seek.current):\n        offset += self._pos\n    if ((_whence == Seek.current) or (_whence == Seek.set)):\n        if (offset < 0):\n            raise ValueError('Negative seek position {}'.format(offset))\n    elif (_whence == Seek.end):\n        if (offset > 0):\n            raise ValueError('Positive seek position {}'.format(offset))\n        offset += self._end\n    else:\n        raise ValueError('Invalid whence ({}, should be {}, {} or {})'.format(_whence, Seek.set, Seek.current, Seek.end))\n    if (offset < self._pos):\n        self._f = self._zip.open(self.name)\n        self._pos = 0\n    self.read((offset - self._pos))\n    return self._pos", "docstring": "Change stream position.\n\nChange the stream position to the given byte offset. The\noffset is interpreted relative to the position indicated by\n``whence``.\n\nArguments:\noffset (int): the offset to the new position, in bytes.\nwhence (int): the position reference. Possible values are:\n* `Seek.set`: start of stream (the default).\n* `Seek.current`: current position; offset may be negative.\n* `Seek.end`: end of stream; offset must be negative.\n\nReturns:\nint: the new absolute position.\n\nRaises:\nValueError: when ``whence`` is not known, or ``offset``\nis invalid.\n\nNote:\nZip compression does not support seeking, so the seeking\nis emulated. Seeking somewhere else than the current position\nwill need to either:\n* reopen the file and restart decompression\n* read and discard data to advance in the file", "source": "codesearchnet"}
{"code": "def model_from_yaml(yaml_string, custom_objects=None):\n    raise RuntimeError('Method `model_from_yaml()` has been removed due to security risk of arbitrary code execution. Please use `Model.to_json()` and `model_from_json()` instead.')", "docstring": "Parses a yaml model configuration file and returns a model instance.\n\nNote: Since TF 2.6, this method is no longer supported and will raise a\nRuntimeError.\n\nArgs:\nyaml_string: YAML string or open file encoding a model configuration.\ncustom_objects: Optional dictionary mapping names\n(strings) to custom classes or functions to be\nconsidered during deserialization.\n\nReturns:\nA Keras model instance (uncompiled).\n\nRaises:\nRuntimeError: announces that the method poses a security risk", "source": "github-repos"}
{"code": "def prune_layer(layer: nn.Linear | Conv1D, index: torch.LongTensor, dim: int | None=None) -> nn.Linear | Conv1D:\n    if isinstance(layer, nn.Linear):\n        return prune_linear_layer(layer, index, dim=0 if dim is None else dim)\n    elif isinstance(layer, Conv1D):\n        return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)\n    else:\n        raise ValueError(f\"Can't prune layer of class {layer.__class__}\")", "docstring": "Prune a Conv1D or linear layer to keep only entries in index.\n\nUsed to remove heads.\n\nArgs:\nlayer (`Union[torch.nn.Linear, Conv1D]`): The layer to prune.\nindex (`torch.LongTensor`): The indices to keep in the layer.\ndim (`int`, *optional*): The dimension on which to keep the indices.\n\nReturns:\n`torch.nn.Linear` or [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`.", "source": "github-repos"}
{"code": "def pixelate(x, severity=1):\n  \n  c = [0.6, 0.5, 0.4, 0.3, 0.25][severity - 1]\n  shape = x.shape\n  x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8))\n  x = x.resize((int(shape[1] * c), int(shape[0] * c)))\n  x = x.resize((shape[1], shape[0]))\n  return np.asarray(x)", "docstring": "Pixelate images.\n\nConduct pixelating corruptions to images by first shrinking the images and\nthen resizing to original size.\n\nArgs:\nx: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\nseverity: integer, severity of corruption.\n\nReturns:\nnumpy array, image with uint8 pixels in [0,255]. Applied pixelating\ncorruption.", "source": "juraj-google-style"}
{"code": "def project_texture_on_surface(texture, surface, angle=DEFAULT_ANGLE):\n    \n    projected_surface = project_surface(surface, angle)\n    texture_x, _ = texture\n    texture_y = map_texture_to_surface(texture, projected_surface)\n    return texture_x, texture_y", "docstring": "Maps a texture onto a surface, then projects to 2D and returns a layer.\n\nArgs:\ntexture (texture): the texture to project\nsurface (surface): the surface to project onto\nangle (float): the projection angle in degrees (0 = top-down, 90 = side view)\n\nReturns:\nlayer: A layer.", "source": "juraj-google-style"}
{"code": "def turb44(msg):\n    \n    d = hex2bin(data(msg))\n\n    if d[46] == '0':\n        return None\n\n    turb = bin2int(d[47:49])\n\n    return turb", "docstring": "Turblence.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nint: turbulence level. 0=NIL, 1=Light, 2=Moderate, 3=Severe", "source": "juraj-google-style"}
{"code": "def visualize_conv_weights(filters, name):\n    \n    with tf.name_scope('visualize_w_' + name):\n        filters = tf.transpose(filters, (3, 2, 0, 1))   \n        filters = tf.unstack(filters)                   \n        filters = tf.concat(filters, 1)                 \n        filters = tf.unstack(filters)                   \n        filters = tf.concat(filters, 1)                 \n        filters = tf.expand_dims(filters, 0)\n        filters = tf.expand_dims(filters, -1)\n\n    tf.summary.image('visualize_w_' + name, filters)", "docstring": "Visualize use weights in convolution filters.\n\nArgs:\nfilters: tensor containing the weights [H,W,Cin,Cout]\nname: label for tensorboard\n\nReturns:\nimage of all weight", "source": "juraj-google-style"}
{"code": "def set(cls, values):\n        \n        cls.mrc_out_el.text = values.get(\"mrc\", \"\")\n        cls.oai_out_el.text = values.get(\"oai\", \"\")\n        cls.dc_out_el.text = values.get(\"dc\", \"\")\n        cls.filename = values.get(\"fn\", \"fn\")\n\n        cls.values = values", "docstring": "Set the elements from the data obtained from REST API.\n\nArgs:\nvalues (dict): Dict with ``mrc``, ``oai``, ``dc`` and ``fn`` keys.", "source": "juraj-google-style"}
{"code": "def get_by(self, field, value):\n    if ((field == 'userName') or (field == 'name')):\n        return self._client.get(((self.URI + '/') + value))\n    elif (field == 'role'):\n        value = value.replace(' ', '%20')\n        return self._client.get(((self.URI + '/roles/users/') + value))['members']\n    else:\n        raise HPOneViewException('Only userName, name and role can be queried for this resource.')", "docstring": "Gets all Users that match the filter.\n\nThe search is case-insensitive.\n\nArgs:\nfield: Field name to filter. Accepted values: 'name', 'userName', 'role'\nvalue: Value to filter.\n\nReturns:\nlist: A list of Users.", "source": "codesearchnet"}
{"code": "def create_test_suite(cls, name: str, path: str):\n    return type(name, (unittest.TestCase,), dict(cls.parse_test_methods(path)))", "docstring": "Dynamically creates a unittest.TestCase subclass with generated tests.\n\nThis method takes a suite name and a path (or glob pattern). It uses\n`parse_test_methods` to find YAML files at the given path and generate\nindividual test methods for each. These generated test methods are then\nadded as attributes to a new class, which is a subclass of\n`unittest.TestCase`.\n\nArgs:\nname: The desired name for the dynamically created test suite class.\npath: A string representing the path or glob pattern to search for\nYAML example files, which will be used to generate test methods.\n\nReturns:\nA new class, subclass of `unittest.TestCase`, containing dynamically\ngenerated test methods based on the YAML files found at the given path.", "source": "github-repos"}
{"code": "def get_variables(scope=None, suffix=None):\n  \n  candidates = tf.get_collection(MODEL_VARIABLES, scope)[:]\n  if suffix is not None:\n    candidates = [var for var in candidates if var.op.name.endswith(suffix)]\n  return candidates", "docstring": "Gets the list of variables, filtered by scope and/or suffix.\n\nArgs:\nscope: an optional scope for filtering the variables to return.\nsuffix: an optional suffix for filtering the variables to return.\n\nReturns:\na copied list of variables with scope and suffix.", "source": "juraj-google-style"}
{"code": "def _merge_section(original, to_merge):\n    \n    \n    if not original:\n        return to_merge or ''\n    if not to_merge:\n        return original or ''\n    try:\n        index = original.index(':') + 1\n    except ValueError:\n        index = original.index('\\n')\n    name = original[:index].strip()\n    section = '\\n  '.join(\n        (original[index + 1:].lstrip(), to_merge[index + 1:].lstrip())\n    ).rstrip()\n    return '{name}\\n  {section}'.format(name=name, section=section)", "docstring": "Merge two sections together.\n\nArgs:\noriginal: The source of header and initial section lines.\nto_merge: The source for the additional section lines to append.\n\nReturns:\nA new section string that uses the header of the original argument and\nthe section lines from both.", "source": "juraj-google-style"}
{"code": "def parseFloat(self, words):\n\n    def pointFloat(words):\n        m = re.search('(.*) point (.*)', words)\n        if m:\n            whole = m.group(1)\n            frac = m.group(2)\n            total = 0.0\n            coeff = 0.1\n            for digit in frac.split(' '):\n                total += (coeff * self.parse(digit))\n                coeff /= 10.0\n            return (self.parseInt(whole) + total)\n        return None\n\n    def fractionFloat(words):\n        m = re.search('(.*) and (.*)', words)\n        if m:\n            whole = self.parseInt(m.group(1))\n            frac = m.group(2)\n            frac = re.sub('(\\\\w+)s(\\\\b)', '\\\\g<1>\\\\g<2>', frac)\n            frac = re.sub('(\\\\b)a(\\\\b)', '\\\\g<1>one\\\\g<2>', frac)\n            split = frac.split(' ')\n            num = split[:1]\n            denom = split[1:]\n            while denom:\n                try:\n                    num_value = self.parse(' '.join(num))\n                    denom_value = self.parse(' '.join(denom))\n                    return (whole + (float(num_value) / denom_value))\n                except:\n                    num += denom[:1]\n                    denom = denom[1:]\n        return None\n    result = pointFloat(words)\n    if result:\n        return result\n    result = fractionFloat(words)\n    if result:\n        return result\n    return self.parseInt(words)", "docstring": "Convert a floating-point number described in words to a double.\n\nSupports two kinds of descriptions: those with a 'point' (e.g.,\n\"one point two five\") and those with a fraction (e.g., \"one and\na quarter\").\n\nArgs:\nwords (str): Description of the floating-point number.\n\nReturns:\nA double representation of the words.", "source": "codesearchnet"}
{"code": "def media_download(self, mxcurl, allow_remote=True):\n    query_params = {}\n    if (not allow_remote):\n        query_params['allow_remote'] = False\n    if mxcurl.startswith('mxc:\n        return self._send('GET', mxcurl[6:], api_path='/_matrix/media/r0/download/', query_params=query_params, return_json=False)\n    else:\n        raise ValueError((\"MXC URL '%s' did not begin with 'mxc:", "docstring": "Download raw media from provided mxc URL.\n\nArgs:\nmxcurl (str): mxc media URL.\nallow_remote (bool): indicates to the server that it should not\nattempt to fetch the media if it is deemed remote. Defaults\nto true if not provided.", "source": "codesearchnet"}
{"code": "def setdim(P, dim=None):\n    P = P.copy()\n    ldim = P.dim\n    if (not dim):\n        dim = (ldim + 1)\n    if (dim == ldim):\n        return P\n    P.dim = dim\n    if (dim > ldim):\n        key = numpy.zeros(dim, dtype=int)\n        for lkey in P.keys:\n            key[:ldim] = lkey\n            P.A[tuple(key)] = P.A.pop(lkey)\n    else:\n        key = numpy.zeros(dim, dtype=int)\n        for lkey in P.keys:\n            if ((not sum(lkey[(ldim - 1):])) or (not sum(lkey))):\n                P.A[lkey[:dim]] = P.A.pop(lkey)\n            else:\n                del P.A[lkey]\n    P.keys = sorted(P.A.keys(), key=sort_key)\n    return P", "docstring": "Adjust the dimensions of a polynomial.\n\nOutput the results into Poly object\n\nArgs:\nP (Poly) : Input polynomial\ndim (int) : The dimensions of the output polynomial. If omitted,\nincrease polynomial with one dimension. If the new dim is\nsmaller then P's dimensions, variables with cut components are\nall cut.\n\nExamples:\n>>> x,y = chaospy.variable(2)\n>>> P = x*x-x*y\n>>> print(chaospy.setdim(P, 1))\nq0^2", "source": "codesearchnet"}
{"code": "def to_dict(self):\n    output = copy.deepcopy(self.__dict__)\n    output['semantic_config'] = self.semantic_config.to_dict()\n    output['coarse_acoustics_config'] = self.coarse_acoustics_config.to_dict()\n    output['fine_acoustics_config'] = self.fine_acoustics_config.to_dict()\n    output['model_type'] = self.__class__.model_type\n    return output", "docstring": "Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].\n\nReturns:\n`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,", "source": "github-repos"}
{"code": "def write_temporary_file(content, prefix='', suffix=''):\n    temp = tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, mode='w+t', delete=False)\n    temp.writelines(content)\n    temp.close()\n    return temp.name", "docstring": "Generating a temporary file with content.\n\nArgs:\ncontent (str): file content (usually a script, Dockerfile, playbook or config file)\nprefix (str): the filename starts with this prefix (default: no prefix)\nsuffix (str): the filename ends with this suffix (default: no suffix)\n\nReturns:\nstr: name of the temporary file\n\nNote:\nYou are responsible for the deletion of the file.", "source": "codesearchnet"}
{"code": "def revnet(name, x, hparams, reverse=True):\n  \n  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n    steps = np.arange(hparams.depth)\n    if reverse:\n      steps = steps[::-1]\n\n    objective = 0.0\n    for step in steps:\n      x, curr_obj = revnet_step(\n          \"revnet_step_%d\" % step, x, hparams, reverse=reverse)\n      objective += curr_obj\n    return x, objective", "docstring": "hparams.depth' steps of generative flow.\n\nArgs:\nname: variable scope for the revnet block.\nx: 4-D Tensor, shape=(NHWC).\nhparams: HParams.\nreverse: bool, forward or backward pass.\nReturns:\nx: 4-D Tensor, shape=(NHWC).\nobjective: float.", "source": "juraj-google-style"}
{"code": "def _separate_words(string):\n    words = []\n    separator = ''\n    i = 1\n    s = 0\n    p = string[0:1]\n    was_upper = False\n    if string.isupper():\n        string = string.lower()\n        was_upper = True\n    while (i <= len(string)):\n        c = string[i:(i + 1)]\n        split = False\n        if (i < len(string)):\n            if UPPER.match(c):\n                split = True\n            elif (NOTSEP.match(c) and SEP.match(p)):\n                split = True\n            elif (SEP.match(c) and NOTSEP.match(p)):\n                split = True\n        else:\n            split = True\n        if split:\n            if NOTSEP.match(p):\n                words.append(string[s:i])\n            else:\n                if (not separator):\n                    separator = string[s:(s + 1)]\n                words.append(None)\n            s = i\n        i += 1\n        p = c\n    return (words, separator, was_upper)", "docstring": "Segment string on separator into list of words.\n\nArguments:\nstring -- the string we want to process\n\nReturns:\nwords -- list of words the string got minced to\nseparator -- the separator char intersecting words\nwas_upper -- whether string happened to be upper-case", "source": "codesearchnet"}
{"code": "def _Commit(self):\n    if not self.temp_cache_file.closed:\n        self.temp_cache_file.flush()\n        os.fsync(self.temp_cache_file.fileno())\n        self.temp_cache_file.close()\n    else:\n        self.log.debug('temp cache file was already closed before Commit')\n    try:\n        shutil.copymode(self.GetCompatFilename(), self.temp_cache_filename)\n        stat_info = os.stat(self.GetCompatFilename())\n        uid = stat_info.st_uid\n        gid = stat_info.st_gid\n        os.chown(self.temp_cache_filename, uid, gid)\n    except OSError as e:\n        if e.errno == errno.ENOENT:\n            if self.map_name == 'sshkey':\n                os.chmod(self.temp_cache_filename, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)\n            else:\n                os.chmod(self.temp_cache_filename, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)\n    self.log.debug('committing temporary cache file %r to %r', self.temp_cache_filename, self.GetCacheFilename())\n    os.rename(self.temp_cache_filename, self.GetCacheFilename())\n    return True", "docstring": "Ensure the cache is now the active data source for NSS.\n\nPerform an atomic rename on the cache file to the location\nexpected by the NSS module.  No verification of database validity\nor consistency is performed here.\n\nReturns:\nAlways returns True", "source": "github-repos"}
{"code": "def prediction_step(self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[list[str]]=None) -> tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:\n    inputs = self._prepare_inputs(inputs)\n    gen_kwargs = {'max_length': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, 'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams}\n    if self.args.predict_with_generate and (not self.args.prediction_loss_only):\n        generated_tokens = self.model.generate(inputs['input_ids'], attention_mask=inputs['attention_mask'], **gen_kwargs)\n        if generated_tokens.shape[-1] < gen_kwargs['max_length']:\n            generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs['max_length'])\n    labels = inputs.pop('labels')\n    with torch.no_grad():\n        loss, logits = self._compute_loss(model, inputs, labels)\n    loss = loss.mean().detach()\n    if self.args.prediction_loss_only:\n        return (loss, None, None)\n    logits = generated_tokens if self.args.predict_with_generate else logits\n    if labels.shape[-1] < gen_kwargs['max_length']:\n        labels = self._pad_tensors_to_max_len(labels, gen_kwargs['max_length'])\n    return (loss, logits, labels)", "docstring": "Perform an evaluation step on :obj:`model` using obj:`inputs`.\n\nSubclass and override to inject custom behavior.\n\nArgs:\nmodel (:obj:`nn.Module`):\nThe model to evaluate.\ninputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):\nThe inputs and targets of the model.\n\nThe dictionary will be unpacked before being fed to the model. Most models expect the targets under the\nargument :obj:`labels`. Check your model's documentation for all accepted arguments.\nprediction_loss_only (:obj:`bool`):\nWhether or not to return the loss only.\n\nReturn:\nTuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:\nA tuple with the loss, logits and labels (each being optional).", "source": "github-repos"}
{"code": "def _do_sampling(self, logits, num_samples, sampler):\n    with test_util.use_gpu():\n        random_seed.set_random_seed(1618)\n        op = sampler(constant_op.constant(logits), num_samples)\n        d = self.evaluate(op)\n    batch_size, num_classes = logits.shape\n    freqs_mat = []\n    for i in range(batch_size):\n        cnts = dict(collections.Counter(d[i, :]))\n        self.assertLess(max(cnts.keys()), num_classes)\n        self.assertGreaterEqual(min(cnts.keys()), 0)\n        freqs = [cnts[k] * 1.0 / num_samples if k in cnts else 0 for k in range(num_classes)]\n        freqs_mat.append(freqs)\n    return freqs_mat", "docstring": "Samples using the supplied sampler and inputs.\n\nArgs:\nlogits: Numpy ndarray of shape [batch_size, num_classes].\nnum_samples: Int; number of samples to draw.\nsampler: A sampler function that takes (1) a [batch_size, num_classes]\nTensor, (2) num_samples and returns a [batch_size, num_samples] Tensor.\n\nReturns:\nFrequencies from sampled classes; shape [batch_size, num_classes].", "source": "github-repos"}
{"code": "class TFConvNextV2Layer(keras.layers.Layer):\n\n    def __init__(self, config: ConvNextV2Config, dim: int, drop_path: float=0.0, **kwargs):\n        super().__init__(**kwargs)\n        self.dim = dim\n        self.config = config\n        self.dwconv = keras.layers.Conv2D(filters=dim, kernel_size=7, padding='same', groups=dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name='dwconv')\n        self.layernorm = keras.layers.LayerNormalization(epsilon=1e-06, name='layernorm')\n        self.pwconv1 = keras.layers.Dense(units=4 * dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name='pwconv1')\n        self.act = get_tf_activation(config.hidden_act)\n        self.grn = TFConvNextV2GRN(config, 4 * dim, dtype=tf.float32, name='grn')\n        self.pwconv2 = keras.layers.Dense(units=dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name='pwconv2')\n        self.drop_path = TFConvNextV2DropPath(drop_path, name='drop_path') if drop_path > 0.0 else keras.layers.Activation('linear', name='drop_path')\n\n    def call(self, hidden_states, training=False):\n        input = hidden_states\n        x = self.dwconv(hidden_states)\n        x = self.layernorm(x)\n        x = self.pwconv1(x)\n        x = self.act(x)\n        x = self.grn(x)\n        x = self.pwconv2(x)\n        x = self.drop_path(x, training=training)\n        x = input + x\n        return x\n\n    def build(self, input_shape=None):\n        if self.built:\n            return\n        self.built = True\n        if getattr(self, 'dwconv', None) is not None:\n            with tf.name_scope(self.dwconv.name):\n                self.dwconv.build([None, None, None, self.dim])\n        if getattr(self, 'layernorm', None) is not None:\n            with tf.name_scope(self.layernorm.name):\n                self.layernorm.build([None, None, None, self.dim])\n        if getattr(self, 'pwconv1', None) is not None:\n            with tf.name_scope(self.pwconv1.name):\n                self.pwconv1.build([None, None, self.dim])\n        if getattr(self, 'grn', None) is not None:\n            with tf.name_scope(self.grn.name):\n                self.grn.build(None)\n        if getattr(self, 'pwconv2', None) is not None:\n            with tf.name_scope(self.pwconv2.name):\n                self.pwconv2.build([None, None, 4 * self.dim])\n        if getattr(self, 'drop_path', None) is not None:\n            with tf.name_scope(self.drop_path.name):\n                self.drop_path.build(None)", "docstring": "This corresponds to the `Block` class in the original implementation.\n\nThere are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,\nH, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back\n\nThe authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow\nNHWC ordering, we can just apply the operations straight-away without the permutation.\n\nArgs:\nconfig (`ConvNextV2Config`):\nModel configuration class.\ndim (`int`):\nNumber of input channels.\ndrop_path (`float`, *optional*, defaults to 0.0):\nStochastic depth rate.", "source": "github-repos"}
{"code": "def separate_resources(self):\n    self._separate_hdxobjects(self.resources, 'resources', 'name', hdx.data.resource.Resource)", "docstring": "Move contents of resources key in internal dictionary into self.resources\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def cluster_sites(mol, tol, give_only_index=False):\n    dists = [[np.linalg.norm(site.coords), 0] for site in mol]\n    import scipy.cluster as spcluster\n    f = spcluster.hierarchy.fclusterdata(dists, tol, criterion='distance')\n    clustered_dists = defaultdict(list)\n    for (i, site) in enumerate(mol):\n        clustered_dists[f[i]].append(dists[i])\n    avg_dist = {label: np.mean(val) for (label, val) in clustered_dists.items()}\n    clustered_sites = defaultdict(list)\n    origin_site = None\n    for (i, site) in enumerate(mol):\n        if (avg_dist[f[i]] < tol):\n            if give_only_index:\n                origin_site = i\n            else:\n                origin_site = site\n        elif give_only_index:\n            clustered_sites[(avg_dist[f[i]], site.species)].append(i)\n        else:\n            clustered_sites[(avg_dist[f[i]], site.species)].append(site)\n    return (origin_site, clustered_sites)", "docstring": "Cluster sites based on distance and species type.\n\nArgs:\nmol (Molecule): Molecule **with origin at center of mass**.\ntol (float): Tolerance to use.\n\nReturns:\n(origin_site, clustered_sites): origin_site is a site at the center\nof mass (None if there are no origin atoms). clustered_sites is a\ndict of {(avg_dist, species_and_occu): [list of sites]}", "source": "codesearchnet"}
{"code": "def new(image):\n    pointer = vips_lib.vips_region_new(image.pointer)\n    if (pointer == ffi.NULL):\n        raise Error('unable to make region')\n    return pyvips.Region(pointer)", "docstring": "Make a region on an image.\n\nReturns:\nA new :class:`.Region`.\n\nRaises:\n:class:`.Error`", "source": "codesearchnet"}
{"code": "def ResetSection(self, directive):\n    self._section = self._INITIAL_SECTION\n    self._last_header = ''\n    if (directive in ('if', 'ifdef', 'ifndef')):\n        self.include_list.append([])\n    elif (directive in ('else', 'elif')):\n        self.include_list[(- 1)] = []", "docstring": "Reset section checking for preprocessor directive.\n\nArgs:\ndirective: preprocessor directive (e.g. \"if\", \"else\").", "source": "codesearchnet"}
{"code": "def gradients(ys, xs, grad_ys=None):\n    graph = ys[0].graph\n    if (not grad_ys):\n        grad_ys = [Constant(y.mesh, 1.0, y.shape, y.dtype).outputs[0] for y in ys]\n    downstream = set(xs)\n    for op in graph.operations:\n        if op.has_gradient:\n            if (set(op.inputs) & downstream):\n                downstream |= set(op.outputs)\n    tensor_to_gradient = dict(zip(ys, grad_ys))\n    for op in graph.operations[::(- 1)]:\n        grad_outputs = [tensor_to_gradient.get(out) for out in op.outputs]\n        if (op.has_gradient and any(grad_outputs) and (set(op.inputs) & downstream)):\n            with tf.variable_scope((op.name + '/gradients')):\n                input_grads = op.gradient(grad_outputs)\n                for (inp, grad) in zip(op.inputs, input_grads):\n                    if ((inp in downstream) and (grad is not None)):\n                        if (inp in tensor_to_gradient):\n                            tensor_to_gradient[inp] += grad\n                        else:\n                            tensor_to_gradient[inp] = grad\n    return [tensor_to_gradient.get(x, None) for x in xs]", "docstring": "Compute gradients in dtf.\n\nArgs:\nys: a list of Tensors\nxs: a list of Tensors\ngrad_ys: an optional list of Tensors\n\nReturns:\ngrad_xs: a list of Tensors", "source": "codesearchnet"}
{"code": "def parse_node(self, node):\n    spec = super(CamundaProcessParser, self).parse_node(node)\n    spec.data = self._parse_input_data(node)\n    spec.data['lane_data'] = self._get_lane_properties(node)\n    spec.defines = spec.data\n    service_class = node.get(full_attr('assignee'))\n    if service_class:\n        self.parsed_nodes[node.get('id')].service_class = node.get(full_attr('assignee'))\n    return spec", "docstring": "Overrides ProcessParser.parse_node\nParses and attaches the inputOutput tags that created by Camunda Modeller\n\nArgs:\nnode: xml task node\nReturns:\nTaskSpec", "source": "codesearchnet"}
{"code": "def _add_asset_to_metagraph(meta_graph_def, asset_filename, asset_tensor):\n    asset_proto = meta_graph_def.asset_file_def.add()\n    asset_proto.filename = asset_filename\n    asset_proto.tensor_info.name = asset_tensor.name", "docstring": "Builds an asset proto and adds it to the meta graph def.\n\nArgs:\nmeta_graph_def: The meta graph def to which the asset will be added.\nasset_filename: The filename of the asset to be added.\nasset_tensor: The asset tensor used to populate the tensor info of the asset\nproto.", "source": "github-repos"}
{"code": "def retrieve_artifacts(self, compose_data, output_data_config, job_name):\n        \n        \n        \n        artifacts = os.path.join(self.container_root, 'artifacts')\n        compressed_artifacts = os.path.join(self.container_root, 'compressed_artifacts')\n        os.mkdir(artifacts)\n\n        model_artifacts = os.path.join(artifacts, 'model')\n        output_artifacts = os.path.join(artifacts, 'output')\n\n        artifact_dirs = [model_artifacts, output_artifacts, compressed_artifacts]\n        for d in artifact_dirs:\n            os.mkdir(d)\n\n        \n        for host in self.hosts:\n            volumes = compose_data['services'][str(host)]['volumes']\n            for volume in volumes:\n                host_dir, container_dir = volume.split(':')\n                if container_dir == '/opt/ml/model':\n                    sagemaker.local.utils.recursive_copy(host_dir, model_artifacts)\n                elif container_dir == '/opt/ml/output':\n                    sagemaker.local.utils.recursive_copy(host_dir, output_artifacts)\n\n        \n        model_files = [os.path.join(model_artifacts, name) for name in os.listdir(model_artifacts)]\n        output_files = [os.path.join(output_artifacts, name) for name in os.listdir(output_artifacts)]\n        sagemaker.utils.create_tar_file(model_files, os.path.join(compressed_artifacts, 'model.tar.gz'))\n        sagemaker.utils.create_tar_file(output_files, os.path.join(compressed_artifacts, 'output.tar.gz'))\n\n        if output_data_config['S3OutputPath'] == '':\n            output_data = 'file:\n        else:\n            \n            output_data = sagemaker.local.utils.move_to_destination(\n                compressed_artifacts,\n                output_data_config['S3OutputPath'],\n                job_name,\n                self.sagemaker_session)\n\n        _delete_tree(model_artifacts)\n        _delete_tree(output_artifacts)\n\n        return os.path.join(output_data, 'model.tar.gz')", "docstring": "Get the model artifacts from all the container nodes.\n\nUsed after training completes to gather the data from all the individual containers. As the\nofficial SageMaker Training Service, it will override duplicate files if multiple containers have\nthe same file names.\n\nArgs:\ncompose_data(dict): Docker-Compose configuration in dictionary format.\n\nReturns: Local path to the collected model artifacts.", "source": "juraj-google-style"}
{"code": "def latex(self, aliases=None):\n    self._initialize_latex_array(aliases)\n    self._build_latex_array(aliases)\n    header_1 = '% \\\\documentclass[preview]{standalone}\\n% If the image is too large to fit on this documentclass use\\n\\\\documentclass[draft]{beamer}\\n'\n    beamer_line = '\\\\usepackage[size=custom,height=%d,width=%d,scale=%.1f]{beamerposter}\\n'\n    header_2 = '% instead and customize the height and width (in cm) to fit.\\n% Large images may run out of memory quickly.\\n% To fix this use the LuaLaTeX compiler, which dynamically\\n% allocates memory.\\n\\\\usepackage[braket, qm]{qcircuit}\\n\\\\usepackage{amsmath}\\n\\\\pdfmapfile{+sansmathaccent.map}\\n% \\\\usepackage[landscape]{geometry}\\n% Comment out the above line if using the beamer documentclass.\\n\\\\begin{document}\\n\\\\begin{equation*}'\n    qcircuit_line = '\\n    \\\\Qcircuit @C=%.1fem @R=%.1fem @!R {\\n'\n    output = io.StringIO()\n    output.write(header_1)\n    output.write(('%% img_width = %d, img_depth = %d\\n' % (self.img_width, self.img_depth)))\n    output.write((beamer_line % self._get_beamer_page()))\n    output.write(header_2)\n    output.write((qcircuit_line % (self.column_separation, self.row_separation)))\n    for i in range(self.img_width):\n        output.write('\\t \\t')\n        for j in range((self.img_depth + 1)):\n            cell_str = self._latex[i][j]\n            if ('barrier' in cell_str):\n                output.write(cell_str)\n            else:\n                cell_str = re.sub('[-+]?\\\\d*\\\\.\\\\d{2,}|\\\\d{2,}', _truncate_float, cell_str)\n                output.write(cell_str)\n            if (j != self.img_depth):\n                output.write(' & ')\n            else:\n                output.write(('\\\\\\\\' + '\\n'))\n    output.write('\\t }\\n')\n    output.write('\\\\end{equation*}\\n\\n')\n    output.write('\\\\end{document}')\n    contents = output.getvalue()\n    output.close()\n    return contents", "docstring": "Return LaTeX string representation of circuit.\n\nThis method uses the LaTeX Qconfig package to create a graphical\nrepresentation of the circuit.\n\nReturns:\nstring: for writing to a LaTeX file.", "source": "codesearchnet"}
{"code": "def removeChild(self, child, end_tag_too=True):\n        \n        \n        if _is_iterable(child):\n            for x in child:\n                self.removeChild(child=x, end_tag_too=end_tag_too)\n            return\n\n        if not self.childs:\n            return\n\n        end_tag = None\n        if end_tag_too:\n            end_tag = child.endtag\n\n        for e in self.childs:\n            if e != child:\n                e.removeChild(child, end_tag_too)\n                continue\n\n            if end_tag_too and end_tag in self.childs:\n                self.childs.remove(end_tag)\n\n            self.childs.remove(e)", "docstring": "Remove subelement (`child`) specified by reference.\n\nNote:\nThis can't be used for removing subelements by value! If you want\nto do such thing, try::\n\nfor e in dom.find(\"value\"):\ndom.removeChild(e)\n\nArgs:\nchild (obj): :class:`HTMLElement` instance which will be removed\nfrom this element.\nend_tag_too (bool, default True): Remove also `child` endtag.", "source": "juraj-google-style"}
{"code": "def _multi_request(self, verb, urls, query_params, data, to_json=True, send_as_file=False):\n    if (not urls):\n        raise InvalidRequestError('No URL supplied')\n    request_params = self._zip_request_params(urls, query_params, data)\n    batch_of_params = [request_params[pos:(pos + self._max_requests)] for pos in range(0, len(request_params), self._max_requests)]\n    all_responses = []\n    for param_batch in batch_of_params:\n        if self._rate_limiter:\n            self._rate_limiter.make_calls(num_calls=len(param_batch))\n        prepared_requests = [self._create_request(verb, url, query_params=query_param, data=datum, send_as_file=send_as_file) for (url, query_param, datum) in param_batch]\n        responses = self._wait_for_response(prepared_requests)\n        for response in responses:\n            if response:\n                all_responses.append((self._convert_to_json(response) if to_json else response))\n            else:\n                all_responses.append(None)\n    return all_responses", "docstring": "Issues multiple batches of simultaneous HTTP requests and waits for responses.\n\nArgs:\nverb - MultiRequest._VERB_POST or MultiRequest._VERB_GET\nurls - A string URL or list of string URLs\nquery_params - None, a dict, or a list of dicts representing the query params\ndata - None, a dict or string, or a list of dicts and strings representing the data body.\nto_json - A boolean, should the responses be returned as JSON blobs\nReturns:\nIf multiple requests are made - a list of dicts if to_json, a list of requests responses otherwise\nIf a single request is made, the return is not a list\nRaises:\nInvalidRequestError - if no URL is supplied or if any of the requests returns 403 Access Forbidden response", "source": "codesearchnet"}
{"code": "def getqualifiedname(namespace, object_, max_depth=5, visited=None):\n    if visited is None:\n        visited = set()\n    namespace = dict(namespace)\n    for name in namespace:\n        if object_ is namespace[name]:\n            return name\n    parent = tf_inspect.getmodule(object_)\n    if parent is not None and parent is not object_ and (parent is not namespace):\n        parent_name = getqualifiedname(namespace, parent, max_depth=0, visited=visited)\n        if parent_name is not None:\n            name_in_parent = getqualifiedname(parent.__dict__, object_, max_depth=0, visited=visited)\n            assert name_in_parent is not None, 'An object should always be found in its owner module'\n            return '{}.{}'.format(parent_name, name_in_parent)\n    if max_depth:\n        for name in namespace.keys():\n            value = namespace[name]\n            if tf_inspect.ismodule(value) and id(value) not in visited:\n                visited.add(id(value))\n                name_in_module = getqualifiedname(value.__dict__, object_, max_depth - 1, visited)\n                if name_in_module is not None:\n                    return '{}.{}'.format(name, name_in_module)\n    return None", "docstring": "Returns the name by which a value can be referred to in a given namespace.\n\nIf the object defines a parent module, the function attempts to use it to\nlocate the object.\n\nThis function will recurse inside modules, but it will not search objects for\nattributes. The recursion depth is controlled by max_depth.\n\nArgs:\nnamespace: Dict[str, Any], the namespace to search into.\nobject_: Any, the value to search.\nmax_depth: Optional[int], a limit to the recursion depth when searching\ninside modules.\nvisited: Optional[Set[int]], ID of modules to avoid visiting.\nReturns: Union[str, None], the fully-qualified name that resolves to the value\no, or None if it couldn't be found.", "source": "github-repos"}
{"code": "def set_nsxcontroller_port(self, **kwargs):\n    name = kwargs.pop('name')\n    port = str(kwargs.pop('port'))\n    port_args = dict(name=name, port=port)\n    method_name = 'nsx_controller_connection_addr_port'\n    method_class = self._brocade_tunnels\n    nsxcontroller_attr = getattr(method_class, method_name)\n    config = nsxcontroller_attr(**port_args)\n    output = self._callback(config)\n    return output", "docstring": "Set Nsx Controller pot on the switch\n\nArgs:\nport (int): 1 to 65535.\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturn value of `callback`.\n\nRaises:\nNone", "source": "codesearchnet"}
{"code": "def __init__(self, dev):\n        \n\n        self._dev = dev\n        self._dev_handle = None\n        self._scanchain = None\n        self._jtagon = False\n        self._speed = None", "docstring": "Initialize general controller driver values with defaults.\n\nArgs:\ndev (usb1.USBDevice) - Device entry the driver will control.", "source": "juraj-google-style"}
{"code": "def get_create_batch_env_fun(batch_env_fn, time_limit):\n\n    def create_env_fun(game_name=None, sticky_actions=None):\n        del game_name, sticky_actions\n        batch_env = batch_env_fn(in_graph=False)\n        batch_env = ResizeBatchObservation(batch_env)\n        batch_env = DopamineBatchEnv(batch_env, max_episode_steps=time_limit)\n        return batch_env\n    return create_env_fun", "docstring": "Factory for dopamine environment initialization function.\n\nArgs:\nbatch_env_fn: function(in_graph: bool) -> batch environment.\ntime_limit: time steps limit for environment.\n\nReturns:\nfunction (with optional, unused parameters) initializing environment.", "source": "codesearchnet"}
{"code": "def find(self, package, **kwargs):\n        \n        for finder in self.finders:\n            package_spec = finder.find(package, **kwargs)\n            if package_spec:\n                return package_spec\n        return None", "docstring": "Find a package using package finders.\n\nReturn the first package found.\n\nArgs:\npackage (str): package to find.\n**kwargs (): additional keyword arguments used by finders.\n\nReturns:\nPackageSpec: if package found, else None", "source": "juraj-google-style"}
{"code": "def get_balance(self, asset_hash, id=None, endpoint=None):\n        \n        return self._call_endpoint(GET_BALANCE, params=[asset_hash], id=id, endpoint=endpoint)", "docstring": "Get balance by asset hash\nArgs:\nasset_hash: (str) asset to lookup, example would be 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b'\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\n\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"}
{"code": "def cumsum(x, axis=0, exclusive=False):\n  \n  if not is_xla_compiled():\n    return tf.cumsum(x, axis=axis, exclusive=exclusive)\n  x_shape = shape_list(x)\n  rank = len(x_shape)\n  length = x_shape[axis]\n  my_range = tf.range(length)\n  comparator = tf.less if exclusive else tf.less_equal\n  mask = tf.cast(\n      comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),\n      x.dtype)\n  ret = tf.tensordot(x, mask, axes=[[axis], [0]])\n  if axis != rank - 1:\n    ret = tf.transpose(\n        ret,\n        list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))\n  return ret", "docstring": "TPU hack for tf.cumsum.\n\nThis is equivalent to tf.cumsum and is faster on TPU as of 04/2018 unless\nthe axis dimension is very large.\n\nArgs:\nx: a Tensor\naxis: an integer\nexclusive: a boolean\n\nReturns:\nTensor of the same shape as x.", "source": "juraj-google-style"}
{"code": "def _CountStoredAttributeContainers(self, container_type):\n    \n    if not container_type in self._CONTAINER_TYPES:\n      raise ValueError('Attribute container type {0:s} is not supported'.format(\n          container_type))\n\n    if not self._HasTable(container_type):\n      return 0\n\n    \n    \n    \n    query = 'SELECT MAX(_ROWID_) FROM {0:s} LIMIT 1'.format(container_type)\n    self._cursor.execute(query)\n    row = self._cursor.fetchone()\n    if not row:\n      return 0\n\n    return row[0] or 0", "docstring": "Counts the number of attribute containers of the given type.\n\nArgs:\ncontainer_type (str): attribute container type.\n\nReturns:\nint: number of attribute containers of the given type.\n\nRaises:\nValueError: if an unsupported container_type is provided.", "source": "juraj-google-style"}
{"code": "def get_type(self, index):\n    if ((index < 0) or (index >= len(self._types))):\n        raise ValueError('Index for getting order parameter type out-of-bounds!')\n    return self._types[index]", "docstring": "Return type of order parameter at the index provided and\nrepresented by a short string.\n\nArgs:\nindex (int): index of order parameter for which type is\nto be returned.\nReturns:\nstr: OP type.", "source": "codesearchnet"}
{"code": "def __init__(self, tokens, flags=re.MULTILINE):\n    \n    self.tokens = {}\n\n    \n    for state, patterns in tokens.iteritems():\n      full_patterns = []\n      for p in patterns:\n        pat = re.compile(p[0], flags)\n        action = p[1]\n        new_state = p[2] if len(p) >= 3 else None\n\n        \n        if new_state and new_state.startswith('\n          try:\n            new_state = -int(new_state.split(':')[1])\n          except IndexError, ValueError:\n            new_state = -1\n\n        full_patterns.append((pat, action, new_state))\n      self.tokens[state] = full_patterns", "docstring": "Create a new lexer\n\nArgs:\ntokens (dict(match rules)): Hierarchical dict of states with a list of regex patterns and transitions\nflags (int): Optional regex flags", "source": "juraj-google-style"}
{"code": "def extractTimes(self, inp):\n\n    def handleMatch(time):\n        relative = False\n        if (not time):\n            return None\n        elif (time.group(1) == 'morning'):\n            h = 8\n            m = 0\n        elif (time.group(1) == 'afternoon'):\n            h = 12\n            m = 0\n        elif (time.group(1) == 'evening'):\n            h = 19\n            m = 0\n        elif (time.group(4) and time.group(5)):\n            (h, m) = (0, 0)\n            converter = NumberService()\n            try:\n                diff = converter.parse(time.group(4))\n            except:\n                return None\n            if (time.group(5) == 'hours'):\n                h += diff\n            else:\n                m += diff\n            if time.group(6):\n                converter = NumberService()\n                try:\n                    diff = converter.parse(time.group(7))\n                except:\n                    return None\n                if (time.group(8) == 'hours'):\n                    h += diff\n                else:\n                    m += diff\n            relative = True\n        else:\n            t = time.group(2)\n            (h, m) = ((int(t.split(':')[0]) % 12), int(t.split(':')[1]))\n            try:\n                if (time.group(3) == 'pm'):\n                    h += 12\n            except IndexError:\n                pass\n        if relative:\n            return (self.now + datetime.timedelta(hours=h, minutes=m))\n        else:\n            return datetime.datetime(self.now.year, self.now.month, self.now.day, h, m)\n    inp = self._preprocess(inp)\n    return [handleMatch(time) for time in self._timeRegex.finditer(inp)]", "docstring": "Extracts time-related information from an input string.\nIgnores any information related to the specific date, focusing\non the time-of-day.\n\nArgs:\ninp (str): Input string to be parsed.\n\nReturns:\nA list of datetime objects containing the extracted times from the\ninput snippet, or an empty list if none found.", "source": "codesearchnet"}
{"code": "def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(LocateResponsePayload, self).read(input_buffer, kmip_version=kmip_version)\n    local_buffer = utils.BytearrayStream(input_buffer.read(self.length))\n    if self.is_tag_next(enums.Tags.LOCATED_ITEMS, local_buffer):\n        self._located_items = primitives.Integer(tag=enums.Tags.LOCATED_ITEMS)\n        self._located_items.read(local_buffer, kmip_version=kmip_version)\n    self._unique_identifiers = []\n    while self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_buffer):\n        unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)\n        unique_identifier.read(local_buffer, kmip_version=kmip_version)\n        self._unique_identifiers.append(unique_identifier)\n    self.is_oversized(local_buffer)", "docstring": "Read the data encoding the Locate response payload and decode it\ninto its constituent parts.\n\nArgs:\ninput_buffer (stream): A data buffer containing encoded object\ndata, supporting a read method.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def generate_batch(cls, strategy, size, **kwargs):\n        \n        assert strategy in (enums.STUB_STRATEGY, enums.BUILD_STRATEGY, enums.CREATE_STRATEGY)\n        batch_action = getattr(cls, '%s_batch' % strategy)\n        return batch_action(size, **kwargs)", "docstring": "Generate a batch of instances.\n\nThe instances will be created with the given strategy (one of\nBUILD_STRATEGY, CREATE_STRATEGY, STUB_STRATEGY).\n\nArgs:\nstrategy (str): the strategy to use for generating the instance.\nsize (int): the number of instances to generate\n\nReturns:\nobject list: the generated instances", "source": "juraj-google-style"}
{"code": "def _GetDayOfYear(self, year, month, day_of_month):\n    \n    if month not in range(1, 13):\n      raise ValueError('Month value out of bounds.')\n\n    days_per_month = self._GetDaysPerMonth(year, month)\n    if day_of_month < 1 or day_of_month > days_per_month:\n      raise ValueError('Day of month value out of bounds.')\n\n    day_of_year = day_of_month\n    for past_month in range(1, month):\n      day_of_year += self._GetDaysPerMonth(year, past_month)\n\n    return day_of_year", "docstring": "Retrieves the day of the year for a specific day of a month in a year.\n\nArgs:\nyear (int): year e.g. 1970.\nmonth (int): month, where 1 represents January.\nday_of_month (int): day of the month, where 1 represents the first day.\n\nReturns:\nint: day of year.\n\nRaises:\nValueError: if the month or day of month value is out of bounds.", "source": "juraj-google-style"}
{"code": "def all_matches(pcoll, regex):\n    regex = Regex._regex_compile(regex)\n\n    def _process(element):\n        m = regex.match(element)\n        if m:\n            yield [m.group(ix) for ix in range(m.lastindex + 1)]\n    return pcoll | FlatMap(_process)", "docstring": "Returns all matches (groups) if zero or more characters at the beginning\nof string match the regular expression.\n\nArgs:\nregex: the regular expression string or (re.compile) pattern.", "source": "github-repos"}
{"code": "def dup(node, copy_map, field_name='___pyct_anno'):\n    for n in gast.walk(node):\n        for k in copy_map:\n            if hasanno(n, k, field_name):\n                setanno(n, copy_map[k], getanno(n, k, field_name), field_name)", "docstring": "Recursively copies annotations in an AST tree.\n\nArgs:\nnode: ast.AST\ncopy_map: Dict[Hashable, Hashable], maps a source anno key to a destination\nkey. All annotations with the source key will be copied to identical\nannotations with the destination key.\nfield_name: str", "source": "github-repos"}
{"code": "def read_pattern(text_str, patterns, terminate_on_match=False, postprocess=str):\n    compiled = {key: re.compile(pattern, (re.MULTILINE | re.DOTALL)) for (key, pattern) in patterns.items()}\n    matches = defaultdict(list)\n    for (key, pattern) in compiled.items():\n        for match in pattern.finditer(text_str):\n            matches[key].append([postprocess(i) for i in match.groups()])\n            if terminate_on_match:\n                break\n    return matches", "docstring": "General pattern reading on an input string\n\nArgs:\ntext_str (str): the input string to search for patterns\npatterns (dict): A dict of patterns, e.g.,\n{\"energy\": r\"energy\\\\(sigma->0\\\\)\\\\s+=\\\\s+([\\\\d\\\\-.]+)\"}.\nterminate_on_match (bool): Whether to terminate when there is at\nleast one match in each key in pattern.\npostprocess (callable): A post processing function to convert all\nmatches. Defaults to str, i.e., no change.\n\nRenders accessible:\nAny attribute in patterns. For example,\n{\"energy\": r\"energy\\\\(sigma->0\\\\)\\\\s+=\\\\s+([\\\\d\\\\-.]+)\"} will set the\nvalue of matches[\"energy\"] = [[-1234], [-3453], ...], to the\nresults from regex and postprocess. Note that the returned values\nare lists of lists, because you can grep multiple items on one line.", "source": "codesearchnet"}
{"code": "def requires_open_handle(method):  \n  \n  @functools.wraps(method)\n  def wrapper_requiring_open_handle(self, *args, **kwargs):\n    \n    if self.is_closed():\n      raise usb_exceptions.HandleClosedError()\n    return method(self, *args, **kwargs)\n  return wrapper_requiring_open_handle", "docstring": "Decorator to ensure a handle is open for certain methods.\n\nSubclasses should decorate their Read() and Write() with this rather than\nchecking their own internal state, keeping all \"is this handle open\" logic\nin is_closed().\n\nArgs:\nmethod: A class method on a subclass of UsbHandle\n\nRaises:\nHandleClosedError: If this handle has been closed.\n\nReturns:\nA wrapper around method that ensures the handle is open before calling through\nto the wrapped method.", "source": "juraj-google-style"}
{"code": "def potential_purviews(self, direction, mechanism, purviews=False):\n        \n        system = self.system[direction]\n        return [\n            purview for purview in system.potential_purviews(\n                direction, mechanism, purviews)\n            if set(purview).issubset(self.purview_indices(direction))\n        ]", "docstring": "Return all purviews that could belong to the |MIC|/|MIE|.\n\nFilters out trivially-reducible purviews.\n\nArgs:\ndirection (str): Either |CAUSE| or |EFFECT|.\nmechanism (tuple[int]): The mechanism of interest.\n\nKeyword Args:\npurviews (tuple[int]): Optional subset of purviews of interest.", "source": "juraj-google-style"}
{"code": "def victim_email_assets(self, main_type, sub_type, unique_id, params=None):\n        \n        params = params or {}\n\n        if not sub_type:\n            url = '/v2/{}/{}/victimAssets/emailAddresses'.format(main_type, unique_id)\n        else:\n            url = '/v2/{}/{}/{}/victimAssets/emailAddresses'.format(type, sub_type, unique_id)\n\n        for vea in self._iterate(url, params, 'victimEmail'):\n            yield vea", "docstring": "Args:\nmain_type:\nsub_type:\nunique_id:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def _tokenize_wordpiece(self, text):\n        \n\n        output_tokens = []\n        for token in self.basic_tokenizer._whitespace_tokenize(text):\n            chars = list(token)\n            if len(chars) > self.max_input_chars_per_word:\n                output_tokens.append(self.vocab.unknown_token)\n                continue\n            is_bad = False\n            start = 0\n            sub_tokens = []\n            while start < len(chars):\n                end = len(chars)\n                cur_substr = None\n                while start < end:\n                    substr = ''.join(chars[start:end])\n                    if start > 0:\n                        substr = '\n                    if substr in self.vocab:\n                        cur_substr = substr\n                        break\n                    end -= 1\n                if cur_substr is None:\n                    is_bad = True\n                    break\n                sub_tokens.append(cur_substr)\n                start = end\n            if is_bad:\n                output_tokens.append(self.vocab.unknown_token)\n            else:\n                output_tokens.extend(sub_tokens)\n        return output_tokens", "docstring": "Tokenizes a piece of text into its word pieces.\n\nThis uses a greedy longest-match-first algorithm to perform tokenization\nusing the given vocabulary.\n\nFor example:\ninput = \"unaffable\"\noutput = [\"un\", \"##aff\", \"##able\"]\n\nArgs:\ntext: A single token or whitespace separated tokens. This should have\nalready been passed through `BERTBasicTokenizer.\n\nReturns:\nA list of wordpiece tokens.", "source": "juraj-google-style"}
{"code": "def write_representative_sequences_file(self, outname, outdir=None, set_ids_from_model=True):\n    if (not outdir):\n        outdir = self.data_dir\n        if (not outdir):\n            raise ValueError('Output directory must be specified')\n    outfile = op.join(outdir, (outname + '.faa'))\n    tmp = []\n    for x in self.genes_with_a_representative_sequence:\n        repseq = x.protein.representative_sequence\n        copied_seq_record = copy(repseq)\n        if set_ids_from_model:\n            copied_seq_record.id = x.id\n        tmp.append(copied_seq_record)\n    SeqIO.write(tmp, outfile, 'fasta')\n    log.info('{}: wrote all representative sequences to file'.format(outfile))\n    self.genome_path = outfile\n    return self.genome_path", "docstring": "Write all the model's sequences as a single FASTA file. By default, sets IDs to model gene IDs.\n\nArgs:\noutname (str): Name of the output FASTA file without the extension\noutdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories\nwere not created initially\nset_ids_from_model (bool): If the gene ID source should be the model gene IDs, not the original sequence ID", "source": "codesearchnet"}
{"code": "def _get_args_and_defaults(args, defaults):\n    defaults = (defaults or [])\n    args_and_defaults = [(argument, default) for (argument, default) in zip_longest(args[::(- 1)], defaults[::(- 1)], fillvalue=NoDefault)]\n    return args_and_defaults[::(- 1)]", "docstring": "Return a list of 2-tuples - the argument name and its default value or\na special value that indicates there is no default value.\n\nArgs:\nargs: list of argument name\ndefaults: tuple of default values", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context, encoding='utf-8'):\n    \n    super(CPIOFileSystem, self).__init__(resolver_context)\n    self._cpio_archive_file = None\n    self._file_object = None\n    self.encoding = encoding", "docstring": "Initializes a CPIO archive file system.\n\nArgs:\nresolver_context (Context): resolver context.\nencoding (Optional[str]): file entry name encoding.", "source": "juraj-google-style"}
{"code": "def VerifyCipherSignature(self, remote_public_key):\n    \n    if self.cipher_metadata.signature and remote_public_key:\n\n      stats_collector_instance.Get().IncrementCounter(\"grr_rsa_operations\")\n      remote_public_key.Verify(self.serialized_cipher,\n                               self.cipher_metadata.signature)\n      return True", "docstring": "Verifies the signature on the encrypted cipher block.\n\nThis method returns True if the signature verifies correctly with\nthe key given.\n\nArgs:\nremote_public_key: The remote public key.\n\nReturns:\nNone\nRaises:\nrdf_crypto.VerificationError: A signature and a key were both given but\nverification fails.", "source": "juraj-google-style"}
{"code": "def case_study_social_link_linkedin(value):\n    \n\n    parsed = parse.urlparse(value.lower())\n    if not parsed.netloc.endswith('linkedin.com'):\n        raise ValidationError(MESSAGE_NOT_LINKEDIN)", "docstring": "Confirms that the social media url is pointed at the correct domain.\n\nArgs:\nvalue (string): The url to check.\n\nRaises:\ndjango.forms.ValidationError", "source": "juraj-google-style"}
{"code": "def _request(self, method, url, headers=None, **kwargs):\n        \n        _headers = {\n            'Accept': 'application/json',\n            'Content-Type': 'application/json'\n        }\n        if headers:\n            _headers.update(headers)\n\n        if self.is_debug:\n            self.logger.debug('{} {} {} {}'.format(method, url, headers, kwargs))\n        return self._parse(requests.request(method, url, headers=_headers, timeout=60, **kwargs))", "docstring": "Normally the connection guarantees response times of 3 seconds on average,\nif there is an abnormal situation, the maximum response time is 1 minute.\nIt is highly recommended that you set “timeouts” when you connect with PayU.\n\nArgs:\nmethod:\nurl:\nheaders:\n**kwargs:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _maybe_init_experiment(self, experiment_name):\n    \n    user_id = self._maybe_init_user()\n    cursor = self._db.cursor()\n    cursor.execute(\n        ,\n        (user_id, experiment_name))\n    row = cursor.fetchone()\n    if row:\n      return row[0]\n    experiment_id = self._create_id()\n    \n    computed_time = 0\n    cursor.execute(\n        ,\n        (user_id, experiment_id, experiment_name, time.time(), computed_time,\n         False))\n    return experiment_id", "docstring": "Returns the ID for the given experiment, creating the row if needed.\n\nArgs:\nexperiment_name: name of experiment.", "source": "juraj-google-style"}
{"code": "def __init__(self, value=None):\n        \n        super(ApplicationNamespace, self).__init__(\n            value, Tags.APPLICATION_NAMESPACE)", "docstring": "Construct an ApplicationNamespace object.\n\nArgs:\nvalue (str): A string representing a namespace. Optional, defaults\nto None.", "source": "juraj-google-style"}
{"code": "def encode(self, value: Any) -> geno.DNA:\n    children = []\n\n    def _encode(path: utils.KeyPath, template_value: Any, input_value: Any) -> Any:\n        \n        if pg_typing.MISSING_VALUE == input_value and pg_typing.MISSING_VALUE != template_value:\n            raise ValueError(f\"Value is missing from input. Path='{path}'.\")\n        if isinstance(template_value, base.HyperValue) and (not self._where or self._where(template_value)):\n            children.append(template_value.encode(input_value))\n        elif isinstance(template_value, derived.DerivedValue):\n            if self._compute_derived:\n                referenced_values = [reference_path.query(value) for _, reference_path in template_value.resolve()]\n                derived_value = template_value.derive(*referenced_values)\n                if derived_value != input_value:\n                    raise ValueError(f\"Unmatched derived value between template and input. (Path='{path}', Template={template_value!r}, ComputedValue={derived_value!r}, Input={input_value!r})\")\n        elif isinstance(template_value, symbolic.Object):\n            if type(input_value) is not type(template_value):\n                raise ValueError(f\"Unmatched Object type between template and input: (Path='{path}', Template={template_value!r}, Input={input_value!r})\")\n            template_keys = set(template_value.sym_keys())\n            value_keys = set(input_value.sym_keys())\n            if template_keys != value_keys:\n                raise ValueError(f\"Unmatched Object keys between template value and input value. (Path='{path}', TemplateOnlyKeys={template_keys - value_keys}, InputOnlyKeys={value_keys - template_keys})\")\n            for key in template_value.sym_keys():\n                utils.merge_tree(template_value.sym_getattr(key), input_value.sym_getattr(key), _encode, root_path=utils.KeyPath(key, path))\n        elif isinstance(template_value, symbolic.Dict):\n            if not isinstance(input_value, dict):\n                raise ValueError(f\"Unmatched dict between template value and input value. (Path='{path}', Template={template_value!r}, Input={input_value!r})\")\n        elif isinstance(template_value, symbolic.List):\n            if not isinstance(input_value, list) or len(input_value) != len(template_value):\n                raise ValueError(f\"Unmatched list between template value and input value. (Path='{path}', Template={template_value!r}, Input={input_value!r})\")\n            for i, template_item in enumerate(template_value):\n                utils.merge_tree(template_item, input_value[i], _encode, root_path=utils.KeyPath(i, path))\n        elif template_value != input_value:\n            raise ValueError(f\"Unmatched value between template and input. (Path='{path}', Template={utils.quote_if_str(template_value)}, Input={utils.quote_if_str(input_value)})\")\n        return template_value\n    utils.merge_tree(self._value, value, _encode, root_path=self._root_path)\n    return geno.DNA(None, children)", "docstring": "Encode a value into a DNA.\n\nExample::\n\n# DNA of a constant template:\ntemplate = pg.hyper.ObjectTemplate({'a': 0})\nassert template.encode({'a': 0}) == pg.DNA(None)\n# Raises: Unmatched value between template and input.\ntemplate.encode({'a': 1})\n\n# DNA of a template containing only one pg.oneof.\ntemplate = pg.hyper.ObjectTemplate({'a': pg.oneof([1, 2])})\nassert template.encode({'a': 1}) == pg.DNA(0)\n\n# DNA of a template containing only one pg.oneof.\ntemplate = pg.hyper.ObjectTemplate({'a': pg.floatv(0.1, 1.0)})\nassert template.encode({'a': 0.5}) == pg.DNA(0.5)\n\nArgs:\nvalue: Value to encode.\n\nReturns:\nEncoded DNA.\n\nRaises:\nValueError if value cannot be encoded by this template.", "source": "github-repos"}
{"code": "def list(self):\n    return self._registry.keys()", "docstring": "Lists registered items.\n\nReturns:\nA list of names of registered objects.", "source": "github-repos"}
{"code": "def find_wells_with_curve(self, mnemonic, alias=None):\n    return Project([w for w in self if (w.get_curve(mnemonic, alias=alias) is not None)])", "docstring": "Returns a new Project with only the wells which have the named curve.\n\nArgs:\nmenmonic (str): the name of the curve to look for.\nalias (dict): a welly alias dictionary.\n\nReturns:\nproject.", "source": "codesearchnet"}
{"code": "def _copy_attr(self, module, varname, cls, attrname=None):\n    if (not hasattr(module, varname)):\n        raise RuntimeError(\"Variable '{}' not found\".format(varname))\n    obj = getattr(module, varname)\n    if (not isinstance(obj, cls)):\n        raise RuntimeError(\"Expecting fobj to be a {}, not a '{}'\".format(cls.__name__, obj.__class__.__name__))\n    if (attrname is None):\n        attrname = varname\n    setattr(self, attrname, obj)", "docstring": "Copies attribute from module object to self. Raises if object not of expected class\n\nArgs:\nmodule: module object\nvarname: variable name\ncls: expected class of variable\nattrname: attribute name of self. Falls back to varname", "source": "codesearchnet"}
{"code": "def dispatch(self, state_change: StateChange) -> List[Event]:\n        \n        assert isinstance(state_change, StateChange)\n\n        \n        \n        next_state = deepcopy(self.current_state)\n\n        \n        iteration = self.state_transition(\n            next_state,\n            state_change,\n        )\n\n        assert isinstance(iteration, TransitionResult)\n\n        self.current_state = iteration.new_state\n        events = iteration.events\n\n        assert isinstance(self.current_state, (State, type(None)))\n        assert all(isinstance(e, Event) for e in events)\n\n        return events", "docstring": "Apply the `state_change` in the current machine and return the\nresulting events.\n\nArgs:\nstate_change: An object representation of a state\nchange.\n\nReturn:\nA list of events produced by the state transition.\nIt's the upper layer's responsibility to decided how to handle\nthese events.", "source": "juraj-google-style"}
{"code": "def getPoly(rCut, nMax):\n    \n    rCutVeryHard = rCut+5.0\n    rx = 0.5*rCutVeryHard*(x + 1)\n\n    basisFunctions = []\n    for i in range(1, nMax + 1):\n        basisFunctions.append(lambda rr, i=i, rCut=rCut: (rCut - np.clip(rr, 0, rCut))**(i+2))\n\n    \n    \n    \n    \n    \n    S = np.zeros((nMax, nMax))\n    for i in range(1, nMax+1):\n        for j in range(1, nMax+1):\n            S[i-1, j-1] = (2*(rCut)**(7+i+j))/((5+i+j)*(6+i+j)*(7+i+j))\n    betas = sqrtm(np.linalg.inv(S))\n\n    \n    if (betas.dtype == np.complex128):\n        raise ValueError(\n            \"Could not calculate normalization factors for the polynomial basis\"\n            \" in the domain of real numbers. Lowering the number of radial \"\n            \"basis functions is advised.\"\n        )\n\n    fs = np.zeros([nMax, len(x)])\n    for n in range(1, nMax+1):\n        fs[n-1, :] = (rCut-np.clip(rx, 0, rCut))**(n+2)\n\n    gss = np.dot(betas, fs)\n\n    return nMax, rx, gss", "docstring": "Used to calculate discrete vectors for the polynomial basis functions.\n\nArgs:\nrCut(float): Radial cutoff\nnMax(int): Number of polynomial radial functions", "source": "juraj-google-style"}
{"code": "def __init__(self, string):\n        \n\n        \n        self._raw_taf = None\n        self._taf_header = None\n        self._raw_weather_groups = []\n        self._weather_groups = []\n        self._maintenance = None\n\n        if isinstance(string, str) and string != \"\":\n            self._raw_taf = string\n        else:\n            raise MalformedTAF(\"TAF/METAR string expected\")\n\n        \n        \n        self._raw_taf = self._raw_taf.strip()\n\n        \n        self._taf_header = self._init_header(self._raw_taf)\n\n        if self._taf_header['form'] == 'metar':\n            self._weather_groups.append(self._parse_group(self._raw_taf))\n        else:\n            \n            self._raw_weather_groups = self._init_groups(self._raw_taf)\n\n            for group in self._raw_weather_groups:\n                parsed_group = self._parse_group(group)\n                self._weather_groups.append(parsed_group)\n\n        self._maintenance = self._parse_maintenance(self._raw_taf)", "docstring": "Initializes the object with TAF/METAR report text.\n\nArgs:\nstring: TAF/METAR report string\n\nRaises:\nMalformedTAF: An error parsing the TAF/METAR report", "source": "juraj-google-style"}
{"code": "def bucket(self, experiment, user_id, bucketing_id):\n    \n\n    if not experiment:\n      return None\n\n    \n    if experiment.groupPolicy in GROUP_POLICIES:\n      group = self.config.get_group(experiment.groupId)\n\n      if not group:\n        return None\n\n      user_experiment_id = self.find_bucket(bucketing_id, experiment.groupId, group.trafficAllocation)\n      if not user_experiment_id:\n        self.config.logger.info('User \"%s\" is in no experiment.' % user_id)\n        return None\n\n      if user_experiment_id != experiment.id:\n        self.config.logger.info('User \"%s\" is not in experiment \"%s\" of group %s.' % (\n          user_id,\n          experiment.key,\n          experiment.groupId\n        ))\n        return None\n\n      self.config.logger.info('User \"%s\" is in experiment %s of group %s.' % (\n        user_id,\n        experiment.key,\n        experiment.groupId\n      ))\n\n    \n    variation_id = self.find_bucket(bucketing_id, experiment.id, experiment.trafficAllocation)\n    if variation_id:\n      variation = self.config.get_variation_from_id(experiment.key, variation_id)\n      self.config.logger.info('User \"%s\" is in variation \"%s\" of experiment %s.' % (\n        user_id,\n        variation.key,\n        experiment.key\n      ))\n      return variation\n\n    self.config.logger.info('User \"%s\" is in no variation.' % user_id)\n    return None", "docstring": "For a given experiment and bucketing ID determines variation to be shown to user.\n\nArgs:\nexperiment: Object representing the experiment for which user is to be bucketed.\nuser_id: ID for user.\nbucketing_id: ID to be used for bucketing the user.\n\nReturns:\nVariation in which user with ID user_id will be put in. None if no variation.", "source": "juraj-google-style"}
{"code": "def get_intermediate_dirs(fs, dir_path):\n    intermediates = []\n    with fs.lock():\n        for path in recursepath(abspath(dir_path), reverse=True):\n            try:\n                resource = fs.getinfo(path)\n            except ResourceNotFound:\n                intermediates.append(abspath(path))\n            else:\n                if resource.is_dir:\n                    break\n                raise errors.DirectoryExpected(dir_path)\n    return intermediates[::(- 1)][:(- 1)]", "docstring": "Get a list of non-existing intermediate directories.\n\nArguments:\nfs (FS): A filesystem instance.\ndir_path (str): A path to a new directory on the filesystem.\n\nReturns:\nlist: A list of non-existing paths.\n\nRaises:\n~fs.errors.DirectoryExpected: If a path component\nreferences a file and not a directory.", "source": "codesearchnet"}
{"code": "def int_to_id(cls, number):\n    if number < 0 or number >= 1 << 96:\n        raise ValueError('number value must be within [0, %s)' % (1 << 96))\n    ints = [(number & 79228162495817593519834398720) >> 64, (number & 18446744069414584320) >> 32, number & 4294967295]\n    number_bytes = struct.pack('>III', *ints)\n    return ObjectId(number_bytes)", "docstring": "Args:\nnumber(int): The integer value to be used to convert to ObjectId.\n\nReturns: The ObjectId that has the 12 bytes binary converted from the\ninteger value.", "source": "github-repos"}
{"code": "def send(self, message_type, data, callback=None, one_way=False):\n    message = validator_pb2.Message(correlation_id=_generate_id(), content=data, message_type=message_type)\n    fut = future.Future(message.correlation_id, message.content, callback, timeout=self._connection_timeout)\n    if (not one_way):\n        self._futures.put(fut)\n    self._send_receive_thread.send_message(message)\n    return fut", "docstring": "Sends a message of message_type\n\nArgs:\nmessage_type (validator_pb2.Message): enum value\ndata (bytes): serialized protobuf\ncallback (function): a callback function to call when a\nresponse to this message is received\n\nReturns:\nfuture.Future", "source": "codesearchnet"}
{"code": "def next_event(self, event_id, prev=False):\n    i = self.events.index(self._events_dict[event_id])\n    if (prev and (i > 0)):\n        return self.events[(i - 1)]\n    elif ((not prev) and ((i + 1) < len(self.events))):\n        return self.events[(i + 1)]\n    else:\n        return None", "docstring": "Get the event following another event in this conversation.\n\nArgs:\nevent_id (str): ID of the event.\nprev (bool): If ``True``, return the previous event rather than the\nnext event. Defaults to ``False``.\n\nRaises:\nKeyError: If no such :class:`.ConversationEvent` is known.\n\nReturns:\n:class:`.ConversationEvent` or ``None`` if there is no following\nevent.", "source": "codesearchnet"}
{"code": "def get_pipe_series_output(commands: Sequence[str],\n                           stdinput: BinaryIO = None) -> bytes:\n    \n    \n    \n    \n    \n\n    \n    processes = []  \n    for i in range(len(commands)):\n        if i == 0:  \n            processes.append(\n                subprocess.Popen(\n                    shlex.split(commands[i]),\n                    stdin=subprocess.PIPE,\n                    stdout=subprocess.PIPE\n                )\n            )\n        else:  \n            processes.append(\n                subprocess.Popen(\n                    shlex.split(commands[i]),\n                    stdin=processes[i - 1].stdout,\n                    stdout=subprocess.PIPE\n                )\n            )\n    return processes[len(processes) - 1].communicate(stdinput)[0]", "docstring": "Get the output from a piped series of commands.\n\nArgs:\ncommands: sequence of command strings\nstdinput: optional ``stdin`` data to feed into the start of the pipe\n\nReturns:\n``stdout`` from the end of the pipe", "source": "juraj-google-style"}
{"code": "def _load_yaml_(file_name):\n    if (not os.path.exists(file_name)):\n        return dict()\n    with open(file_name, 'r', encoding='utf-8') as fp:\n        return YAML().load(stream=fp)", "docstring": "Load assets infomation from file\n\nArgs:\nfile_name: file name\n\nReturns:\ndict", "source": "codesearchnet"}
{"code": "def forward(self, input_ids: torch.LongTensor, spkr_id: torch.Tensor, lang_id: torch.Tensor) -> Tuple[torch.Tensor]:\n    hidden_states = self.unit_embedding(input_ids).transpose(1, 2)\n    spkr = self.speaker_embedding(spkr_id).transpose(1, 2)\n    lang = self.language_embedding(lang_id).transpose(1, 2)\n    log_dur_pred = self.dur_predictor(hidden_states.transpose(1, 2))\n    dur_out = torch.clamp(torch.round(torch.expm1(log_dur_pred)).long(), min=1)\n    if hidden_states.size(0) == 1:\n        hidden_states = torch.repeat_interleave(hidden_states, dur_out.view(-1), dim=2)\n    else:\n        if hidden_states.shape[0] > 1 and self.training:\n            logger.warning('`self.training=True` and you use batching. You lose parallelism during the hifigan\\n                               forward pass because the samples are interleaved.')\n        hidden_states = [torch.repeat_interleave(hidden_state, duration, dim=-1).transpose(0, 1) for hidden_state, duration in zip(hidden_states, dur_out)]\n        hidden_states = nn.utils.rnn.pad_sequence(hidden_states, batch_first=True).transpose(1, 2)\n    spkr = spkr.repeat(1, 1, hidden_states.shape[-1])\n    lang = lang.repeat(1, 1, hidden_states.shape[-1])\n    hidden_states = torch.cat([lang, hidden_states, spkr], dim=1)\n    hidden_states = self.hifi_gan(hidden_states)\n    unit_lengths = self._get_dur_output_lengths(input_ids, dur_out)\n    lengths = self._get_output_hifigan_lengths(unit_lengths)\n    return (hidden_states, lengths)", "docstring": "Args:\ninput_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\nIndices of input sequence tokens in the vocabulary.\n\nIndices can be obtained using [`SeamlessM4TTextToUnitForConditionalGeneration`]. [What are input\nIDs?](../glossary#input-ids)\nspkr_id (`int`, *optional*):\nThe id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.\ntgt_lang (`str`, *optional*):\nThe language id to use as target language for translation.", "source": "github-repos"}
{"code": "def console_print_ex(con: tcod.console.Console, x: int, y: int, flag: int, alignment: int, fmt: str) -> None:\n    lib.TCOD_console_printf_ex(_console(con), x, y, flag, alignment, _fmt(fmt))", "docstring": "Print a string on a console using a blend mode and alignment mode.\n\nArgs:\ncon (Console): Any Console instance.\nx (int): Character x position from the left.\ny (int): Character y position from the top.\n\n.. deprecated:: 8.5\nUse :any:`Console.print_` instead.", "source": "codesearchnet"}
{"code": "def get_built_in(self, language, level, data):\n        \n        \n        pp = pprint.PrettyPrinter(indent=level)\n\n        lookup = {'python' : pp.pformat(data),\n                  'json' : str(json.dumps(data, sort_keys=True, indent=level, separators=(',', ': ')))}\n\n        self.data_structure = lookup[language]", "docstring": "Gets the return string for a language that's supported by python.\nUsed in cases when python provides support for the conversion.\n\nArgs:\nlanguage: string the langage to return for.\n\nlevel: integer, the indentation level.\n\ndata: python data structure being converted (list of tuples)\n\nReturns:\nNone, updates self.data_structure", "source": "juraj-google-style"}
{"code": "def _ReadMemberFooter(self, file_object):\n    file_offset = file_object.get_offset()\n    member_footer = self._ReadStructure(file_object, file_offset, self._MEMBER_FOOTER_SIZE, self._MEMBER_FOOTER, 'member footer')\n    self.uncompressed_data_size = member_footer.uncompressed_data_size", "docstring": "Reads a member footer.\n\nArgs:\nfile_object (FileIO): file-like object to read from.\n\nRaises:\nFileFormatError: if the member footer cannot be read.", "source": "codesearchnet"}
{"code": "def __init__(self, stream_start):\n    \n    self._decompressor = zlib_decompressor.DeflateDecompressor()\n    self.last_read = stream_start\n    self.uncompressed_offset = 0\n    self._compressed_data = b''", "docstring": "Initializes a gzip member decompressor wrapper.\n\nArgs:\nstream_start (int): offset to the compressed stream within the containing\nfile object.", "source": "juraj-google-style"}
{"code": "def feed(self, url_template, keyword, offset, max_num, page_step):\n    for i in range(offset, (offset + max_num), page_step):\n        url = url_template.format(keyword, i)\n        self.out_queue.put(url)\n        self.logger.debug('put url to url_queue: {}'.format(url))", "docstring": "Feed urls once\n\nArgs:\nurl_template: A string with parameters replaced with \"{}\".\nkeyword: A string indicating the searching keyword.\noffset: An integer indicating the starting index.\nmax_num: An integer indicating the max number of images to be crawled.\npage_step: An integer added to offset after each iteration.", "source": "codesearchnet"}
{"code": "def from_string(cls, key, key_id=None):\n        \n        key = _helpers.from_bytes(key)  \n        marker_id, key_bytes = pem.readPemBlocksFromFile(\n            six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)\n\n        \n        if marker_id == 0:\n            private_key = rsa.key.PrivateKey.load_pkcs1(\n                key_bytes, format='DER')\n        \n        elif marker_id == 1:\n            key_info, remaining = decoder.decode(\n                key_bytes, asn1Spec=_PKCS8_SPEC)\n            if remaining != b'':\n                raise ValueError('Unused bytes', remaining)\n            private_key_info = key_info.getComponentByName('privateKey')\n            private_key = rsa.key.PrivateKey.load_pkcs1(\n                private_key_info.asOctets(), format='DER')\n        else:\n            raise ValueError('No key could be detected.')\n\n        return cls(private_key, key_id=key_id)", "docstring": "Construct an Signer instance from a private key in PEM format.\n\nArgs:\nkey (str): Private key in PEM format.\nkey_id (str): An optional key id used to identify the private key.\n\nReturns:\ngoogle.auth.crypt.Signer: The constructed signer.\n\nRaises:\nValueError: If the key cannot be parsed as PKCS#1 or PKCS#8 in\nPEM format.", "source": "juraj-google-style"}
{"code": "def post_attention(self, token, x):\n    \n    with tf.variable_scope(self.name + \"/post_attention\", reuse=tf.AUTO_REUSE):\n      depth = common_layers.shape_list(x)[-1]\n      actual_batch_size = common_layers.shape_list(x)[0]\n      memory_output = tf.gather(token[\"retrieved_mem\"],\n                                tf.range(actual_batch_size))\n      output = tf.add(tf.layers.dense(x, depth, use_bias=False),\n                      tf.layers.dense(memory_output, depth))\n      with tf.control_dependencies([output]):\n        with tf.control_dependencies([\n            self.write(token[\"x\"], token[\"access_logits\"])]):\n          return tf.identity(output)", "docstring": "Called after self-attention. The memory can be updated here.\n\nArgs:\ntoken: Data returned by pre_attention, which can be used to carry over\nstate related to the current memory operation.\nx: a Tensor of data after self-attention and feed-forward\nReturns:\na (possibly modified) version of the input x", "source": "juraj-google-style"}
{"code": "def define_singleton(carrier, name, cls, cls_args = {}):\n    \n    instance_name = \"__{}\".format(name)\n    setattr(carrier, instance_name, None)\n\n    def getter(self):\n        instance = getattr(carrier, instance_name)\n\n        if instance is None:\n            instance = cls(**cls_args)\n            setattr(carrier, instance_name, instance)\n\n        return instance\n\n    setattr(type(carrier), name, property(getter))", "docstring": "Creates a property with the given name, but the cls will created only with the first call\n\nArgs:\ncarrier: an instance of the class where want to reach the cls instance\nname (str): the variable name of the cls instance\ncls (type): the singleton object type\ncls_args (dict): optional dict for createing cls", "source": "juraj-google-style"}
{"code": "def power(self, n):\n        \n        if n > 0:\n            return super().power(n)\n        return Choi(SuperOp(self).power(n))", "docstring": "The matrix power of the channel.\n\nArgs:\nn (int): compute the matrix power of the superoperator matrix.\n\nReturns:\nChoi: the matrix power of the SuperOp converted to a Choi channel.\n\nRaises:\nQiskitError: if the input and output dimensions of the\nQuantumChannel are not equal, or the power is not an integer.", "source": "juraj-google-style"}
{"code": "def set_cn_energies( self, cn_energies ):\n        \n        for site in self.sites:\n            site.set_cn_occupation_energies( cn_energies[ site.label ] )\n        self.cn_energies = cn_energies", "docstring": "Set the coordination number dependent energies for this lattice.\n\nArgs:\ncn_energies (Dict(Str:Dict(Int:Float))): Dictionary of dictionaries specifying the coordination number dependent energies for each site type. e.g.::\n\n{ 'A' : { 0 : 0.0, 1 : 1.0, 2 : 2.0 }, 'B' : { 0 : 0.0, 1 : 2.0 } }\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def patch_index_to_coordinate(ul_idx: int, lr_idx: int, num_patches_per_side: int):\n    cell_size = 1.0 / num_patches_per_side\n    ul_x = ul_idx % num_patches_per_side\n    ul_y = ul_idx \n    lr_x = lr_idx % num_patches_per_side\n    lr_y = lr_idx \n    if ul_idx == lr_idx:\n        x1 = ul_x * cell_size\n        y1 = ul_y * cell_size\n        x2 = lr_x * cell_size + cell_size\n        y2 = lr_y * cell_size + cell_size\n    elif ul_x == lr_x or ul_y == lr_y:\n        x1 = ul_x * cell_size\n        y1 = ul_y * cell_size\n        x2 = lr_x * cell_size + cell_size\n        y2 = lr_y * cell_size + cell_size\n    else:\n        x1 = ul_x * cell_size + cell_size / 2\n        y1 = ul_y * cell_size + cell_size / 2\n        x2 = lr_x * cell_size + cell_size / 2\n        y2 = lr_y * cell_size + cell_size / 2\n    return (x1, y1, x2, y2)", "docstring": "Given a grid of length `num_patches_per_side` and the indices of the upper-left and lower-right corners of a\nbounding box, returns the normalized coordinates of the bounding box, in the form (x1, y1, x2, y2).\n\nArgs:\nul_idx (`int`): the index of the grid cell that corresponds to the upper-left corner of the bounding box.\nlr_idx (`int`): the index of the grid cell that corresponds to the lower-right corner of the bounding box.\nnum_patches_per_side (`int`): the number of patches along each side.\n\nReturns:\n`Tuple[float]`: the normalized coordinates of the bounding box, in the form (x1, y1, x2, y2).", "source": "github-repos"}
{"code": "def mask_from_embedding(emb):\n    return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keepdims=True))", "docstring": "Input embeddings -> padding mask.\n\nWe have hacked symbol_modality to return all-zero embeddings for padding.\nReturns a mask with 0.0 in the padding positions and 1.0 elsewhere.\n\nArgs:\nemb: a Tensor with shape [batch, width, height, depth].\nReturns:\na 0.0/1.0 Tensor with shape [batch, width, height, 1].", "source": "codesearchnet"}
{"code": "def bayesian_resnet(input_shape, num_classes=10, kernel_posterior_scale_mean=(- 9.0), kernel_posterior_scale_stddev=0.1, kernel_posterior_scale_constraint=0.2):\n    filters = [64, 128, 256, 512]\n    kernels = [3, 3, 3, 3]\n    strides = [1, 2, 2, 2]\n\n    def _untransformed_scale_constraint(t):\n        return tf.clip_by_value(t, (- 1000), tf.math.log(kernel_posterior_scale_constraint))\n    kernel_posterior_fn = tfp.layers.default_mean_field_normal_fn(untransformed_scale_initializer=tf.compat.v1.initializers.random_normal(mean=kernel_posterior_scale_mean, stddev=kernel_posterior_scale_stddev), untransformed_scale_constraint=_untransformed_scale_constraint)\n    image = tf.keras.layers.Input(shape=input_shape, dtype='float32')\n    x = tfp.layers.Convolution2DFlipout(64, 3, strides=1, padding='same', kernel_posterior_fn=kernel_posterior_fn)(image)\n    for i in range(len(kernels)):\n        x = _resnet_block(x, filters[i], kernels[i], strides[i], kernel_posterior_fn)\n    x = tf.keras.layers.BatchNormalization()(x)\n    x = tf.keras.layers.Activation('relu')(x)\n    x = tf.keras.layers.AveragePooling2D(4, 1)(x)\n    x = tf.keras.layers.Flatten()(x)\n    x = tfp.layers.DenseFlipout(num_classes, kernel_posterior_fn=kernel_posterior_fn)(x)\n    model = tf.keras.Model(inputs=image, outputs=x, name='resnet18')\n    return model", "docstring": "Constructs a ResNet18 model.\n\nArgs:\ninput_shape: A `tuple` indicating the Tensor shape.\nnum_classes: `int` representing the number of class labels.\nkernel_posterior_scale_mean: Python `int` number for the kernel\nposterior's scale (log variance) mean. The smaller the mean the closer\nis the initialization to a deterministic network.\nkernel_posterior_scale_stddev: Python `float` number for the initial kernel\nposterior's scale stddev.\n```\nq(W|x) ~ N(mu, var),\nlog_var ~ N(kernel_posterior_scale_mean, kernel_posterior_scale_stddev)\n````\nkernel_posterior_scale_constraint: Python `float` number for the log value\nto constrain the log variance throughout training.\ni.e. log_var <= log(kernel_posterior_scale_constraint).\n\nReturns:\ntf.keras.Model.", "source": "codesearchnet"}
{"code": "def partial_derivative(self, X, y=0):\n        \n        self.check_fit()\n\n        U, V = self.split_matrix(X)\n\n        if self.theta == 1:\n            return V\n\n        else:\n            t1 = np.power(-np.log(U), self.theta)\n            t2 = np.power(-np.log(V), self.theta)\n            p1 = self.cumulative_distribution(X)\n            p2 = np.power(t1 + t2, -1 + 1.0 / self.theta)\n            p3 = np.power(-np.log(V), self.theta - 1)\n            return np.divide(np.multiply(np.multiply(p1, p2), p3), V) - y", "docstring": "Compute partial derivative :math:`C(u|v)` of cumulative density.\n\nArgs:\nX: `np.ndarray`\ny: `float`\n\nReturns:", "source": "juraj-google-style"}
{"code": "def locate_point(nodes, x_val, y_val):\n    r\n    \n    zero1 = _curve_helpers.full_reduce(nodes[[0], :]) - x_val\n    zero2 = _curve_helpers.full_reduce(nodes[[1], :]) - y_val\n    \n    \n    if zero1.shape[1] > zero2.shape[1]:\n        zero1, zero2 = zero2, zero1\n    \n    if zero1.shape[1] == 1:\n        \n        \n        zero1, zero2 = zero2, zero1\n    power_basis1 = poly_to_power_basis(zero1[0, :])\n    all_roots = roots_in_unit_interval(power_basis1)\n    if all_roots.size == 0:\n        return None\n\n    \n    \n    power_basis2 = normalize_polynomial(poly_to_power_basis(zero2[0, :]))\n    near_zero = np.abs(polynomial.polyval(all_roots, power_basis2))\n    index = np.argmin(near_zero)\n    if near_zero[index] < _ZERO_THRESHOLD:\n        return all_roots[index]\n\n    return None", "docstring": "r\"\"\"Find the parameter corresponding to a point on a curve.\n\n.. note::\n\nThis assumes that the curve :math:`B(s, t)` defined by ``nodes``\nlives in :math:`\\mathbf{R}^2`.\n\nArgs:\nnodes (numpy.ndarray): The nodes defining a B |eacute| zier curve.\nx_val (float): The :math:`x`-coordinate of the point.\ny_val (float): The :math:`y`-coordinate of the point.\n\nReturns:\nOptional[float]: The parameter on the curve (if it exists).", "source": "juraj-google-style"}
{"code": "def _annotate_variable_ops(func, graph_def):\n    ph_shape_map = {}\n    for ph, var in zip(func.graph.internal_captures, func.variables):\n        ph_shape_map[ph.name] = var.shape\n    name_to_node = {node.name: node for node in graph_def.node}\n    for node in graph_def.node:\n        if node.op == 'ReadVariableOp' or node.op == 'ResourceGather':\n            node_ = node\n            while name_to_node[node_.input[0]].op == 'Identity':\n                node_ = name_to_node[node_.input[0]]\n            ph_name = node_.input[0] + ':0'\n            if ph_name in ph_shape_map:\n                shape = ph_shape_map[ph_name]\n                node.attr['_shape'].shape.CopyFrom(shape.as_proto())\n            else:\n                raise RuntimeError('Not found in the function captures: {}'.format(ph_name))", "docstring": "Annotates variable operations with custom `_shape` attribute.\n\nThis is required for the converters and shape inference. The graph\ndefinition is modified in-place.\n\nArgs:\nfunc: Function represented by the graph definition.\ngraph_def: Graph definition to be annotated in-place.\n\nRaises:\nRuntimeError: if some shapes cannot be annotated.", "source": "github-repos"}
{"code": "def create(self, data, *args, **kwargs):\n    if (self.create.__func__.__module__ != self.__module__):\n        raise Exception('Child method not implemented')\n    self._MambuStruct__method = 'POST'\n    self._MambuStruct__data = data\n    self.connect(*args, **kwargs)\n    self._MambuStruct__method = 'GET'\n    self._MambuStruct__data = None", "docstring": "Creates an entity in Mambu\n\nThis method must be implemented in child classes\n\nArgs:\ndata (dictionary): dictionary with data to send, this dictionary\nis specific for each Mambu entity", "source": "codesearchnet"}
{"code": "def start_timer(self, timer_name):\n    self._timers[timer_name] = datetime.datetime.now()", "docstring": "Initializes a new timer.\n\nArgs:\ntimer_name: name of the timer to initialize, if not unique will reset\nexisting timer.", "source": "github-repos"}
{"code": "def symbolic_heisenberg_eom(\n            self, X=None, noises=None, expand_simplify=True):\n        \n        L, H = self.L, self.H\n\n        if X is None:\n            X = OperatorSymbol('X', hs=(L.space | H.space))\n\n        summands = [I * (H * X - X * H), ]\n        for Lk in L.matrix.ravel():\n            summands.append(adjoint(Lk) * X * Lk)\n            summands.append(-(adjoint(Lk) * Lk * X + X * adjoint(Lk) * Lk) / 2)\n\n        if noises is not None:\n            if not isinstance(noises, Matrix):\n                noises = Matrix(noises)\n            LambdaT = (noises.adjoint().transpose() * noises.transpose()).transpose()\n            assert noises.shape == L.shape\n            S = self.S\n            summands.append((adjoint(noises) * S.adjoint() * (X * L - L * X))\n                            .expand()[0, 0])\n            summand = (((L.adjoint() * X - X * L.adjoint()) * S * noises)\n                       .expand()[0, 0])\n            summands.append(summand)\n            if len(S.space & X.space):\n                comm = (S.adjoint() * X * S - X)\n                summands.append((comm * LambdaT).expand().trace())\n\n        ret = OperatorPlus.create(*summands)\n        if expand_simplify:\n            ret = ret.expand().simplify_scalar()\n        return ret", "docstring": "Compute the symbolic Heisenberg equations of motion of a system\noperator X.  If no X is given, an OperatorSymbol is created in its\nplace.  If no noises are given, this correspnds to the\nensemble-averaged Heisenberg equation of motion.\n\nArgs:\nX (Operator): A system operator\nnoises (Operator): A vector of noise inputs\n\nReturns:\nOperator: The RHS of the Heisenberg equations of motion of X.", "source": "juraj-google-style"}
{"code": "def collective_dr_squared(self):\n    return sum(np.square(sum([atom.dr for atom in self.atoms])))", "docstring": "Squared sum of total displacements for these atoms.\n\nArgs:\nNone\n\nReturns:\n(Float): The square of the summed total displacements for these atoms.", "source": "codesearchnet"}
{"code": "def is_pure_symbolic(x: Any) -> bool:\n\n    def _check_pure_symbolic(k, v, p):\n        del k, p\n        if isinstance(v, PureSymbolic) or (isinstance(v, Symbolic) and v.sym_puresymbolic):\n            return TraverseAction.STOP\n        else:\n            return TraverseAction.ENTER\n    return not traverse(x, _check_pure_symbolic)", "docstring": "Returns if the input value is pure symbolic.\n\nExample::\n\nclass Bar(pg.PureSymbolic):\npass\n\n@pg.symbolize\ndef foo(x, y):\npass\n\nassert not pg.is_pure_symbolic(1)\nassert not pg.is_pure_symbolic(foo(1, 2))\nassert pg.is_pure_symbolic(Bar())\nassert pg.is_pure_symbolic(foo(Bar(), 1))\nassert pg.is_pure_symbolic(foo(pg.oneof([1, 2]), 1))\n\nArgs:\nx: Value to query against.\n\nReturns:\nTrue if value itself is PureSymbolic or its child and nested\nchild fields contain PureSymbolic values.", "source": "github-repos"}
{"code": "def add_permissions(self, grp_name, resource, permissions):\n    self.project_service.set_auth(self._token_project)\n    self.project_service.add_permissions(grp_name, resource, permissions)", "docstring": "Add additional permissions for the group associated with the resource.\n\nArgs:\ngrp_name (string): Name of group.\nresource (intern.resource.boss.Resource): Identifies which data\nmodel object to operate on.\npermissions (list): List of permissions to add to the given resource\n\nRaises:\nrequests.HTTPError on failure.", "source": "codesearchnet"}
{"code": "def draw(self, time: float, frametime: float, target: moderngl.Framebuffer):\n    raise NotImplementedError('draw() is not implemented')", "docstring": "Draw function called by the system every frame when the effect is active.\nThis method raises ``NotImplementedError`` unless implemented.\n\nArgs:\ntime (float): The current time in seconds.\nframetime (float): The time the previous frame used to render in seconds.\ntarget (``moderngl.Framebuffer``): The target FBO for the effect.", "source": "codesearchnet"}
{"code": "def create_switch(type, settings, pin):\n\t\n\n\tswitch = None\n\tif type == \"A\":\n\t\tgroup, device = settings.split(\",\")\n\t\tswitch = pi_switch.RCSwitchA(group, device)\n\n\telif type == \"B\":\n\t\taddr, channel = settings.split(\",\")\n\t\taddr = int(addr)\n\t\tchannel = int(channel)\n\t\tswitch = pi_switch.RCSwitchB(addr, channel)\n\n\telif type == \"C\":\n\t\tfamily, group, device = settings.split(\",\")\n\t\tgroup = int(group)\n\t\tdevice = int(device)\n\t\tswitch = pi_switch.RCSwitchC(family, group, device)\n\n\telif type == \"D\":\n\t\tgroup, device = settings.split(\",\")\n\t\tdevice = int(device)\n\t\tswitch = pi_switch.RCSwitchD(group, device)\n\n\telse:\n\t\tprint \"Type %s is not supported!\" % type\n\t\tsys.exit()\n\n\tswitch.enableTransmit(pin)\n\treturn switch", "docstring": "Create a switch.\n\nArgs:\ntype: (str): type of the switch [A,B,C,D]\nsettings (str): a comma separted list\npin (int): wiringPi pin\n\nReturns:\nswitch", "source": "juraj-google-style"}
{"code": "def _add_doc_value(self, field_name: str, jsonpath: str) -> None:\n        \n        path = self.origin_doc.etk.parse_json_path(jsonpath)\n        matches = path.find(self.origin_doc.value)\n        all_valid = True\n        invalid = []\n        for a_match in matches:\n            \n            if a_match.value:\n                valid = self._add_value(field_name, a_match.value, provenance_path=str(a_match.full_path))\n                if not valid:\n                    invalid.append(field_name + \":\" + str(a_match.value))\n                all_valid = all_valid and valid\n\n        if not all_valid:\n            raise KgValueError(\"Some kg value type invalid according to schema: \" + json.dumps(invalid))", "docstring": "Add a value to knowledge graph by giving a jsonpath\n\nArgs:\nfield_name: str\njsonpath: str\n\nReturns:", "source": "juraj-google-style"}
{"code": "def publish_state(self, state):\n    message = json.dumps({'state': {'reported': state}})\n    self.client.publish(self.topic, message)\n    self._state = state", "docstring": "Publish thing state to AWS IoT.\n\nArgs:\nstate (dict): object state. Must be JSON serializable (i.e., not\nhave circular references).", "source": "codesearchnet"}
{"code": "def __init__(self, sess, grpc_debug_server_addresses, thread_name_filter=None, send_traceback_and_source_code=True):\n\n    def _gated_grpc_watch_fn(fetches, feeds):\n        del fetches, feeds\n        return framework.WatchOptions(debug_ops=['DebugIdentity(gated_grpc=true)'])\n    super().__init__(sess, grpc_debug_server_addresses, watch_fn=_gated_grpc_watch_fn, thread_name_filter=thread_name_filter)\n    self._send_traceback_and_source_code = send_traceback_and_source_code\n    self._sent_graph_version = -1\n    register_signal_handler()", "docstring": "Constructor of TensorBoardDebugWrapperSession.\n\nArgs:\nsess: The `tf.compat.v1.Session` instance to be wrapped.\ngrpc_debug_server_addresses: gRPC address(es) of debug server(s), as a\n`str` or a `list` of `str`s. E.g., \"localhost:2333\",\n\"grpc://localhost:2333\", [\"192.168.0.7:2333\", \"192.168.0.8:2333\"].\nthread_name_filter: Optional filter for thread names.\nsend_traceback_and_source_code: Whether traceback of graph elements and\nthe source code are to be sent to the debug server(s).", "source": "github-repos"}
{"code": "def __init__(self, expr, weld_type, dim):\n        \n        self.expr = expr\n        self.weld_type = weld_type\n        self.dim = dim", "docstring": "Summary\n\nArgs:\nexpr (TYPE): Description\nweld_type (TYPE): Description\ndim (TYPE): Description", "source": "juraj-google-style"}
{"code": "def find_replace(obj, find, replace):\n    \n    try:\n        if isinstance(obj, dict):\n            return {find_replace(key,find,replace): find_replace(value,find,replace) for key, value in obj.items()}\n        elif isinstance(obj, list):\n            return [find_replace(element,find,replace) for element in obj]\n        elif obj == find:\n            return unicode_convert(replace)\n        else:\n            try:\n                return unicode_convert(find_replace_string(obj, find, replace))\n                \n                \n            except:\n                return unicode_convert(obj)\n    except:\n        line, filename, synerror = trace()\n        raise ArcRestHelperError({\n                    \"function\": \"find_replace\",\n                    \"line\": line,\n                    \"filename\":  filename,\n                    \"synerror\": synerror,\n                                    }\n                                    )\n    finally:\n        pass", "docstring": "Searches an object and performs a find and replace.\n\nArgs:\nobj (object): The object to iterate and find/replace.\nfind (str): The string to search for.\nreplace (str): The string to replace with.\nReturns:\nobject: The object with replaced strings.", "source": "juraj-google-style"}
{"code": "def mark_point(img, x, y):\n    overlay = img.copy()\n    output = img.copy()\n    alpha = 0.5\n    radius = max(5, (min(img.shape[:2]) \n    center = (int(x), int(y))\n    color = (0, 0, 255)\n    cv2.circle(overlay, center, radius, color, (- 1))\n    cv2.addWeighted(overlay, alpha, output, (1 - alpha), 0, output)\n    return output", "docstring": "Mark a point\n\nArgs:\n- img(numpy): the source image\n- x, y(int): position", "source": "codesearchnet"}
{"code": "def load_graph_from_args(pipeline_name: str, framework: str, model: str, tokenizer: Optional[str]=None, **models_kwargs) -> Pipeline:\n    if tokenizer is None:\n        tokenizer = model\n    if framework == 'pt' and (not is_torch_available()):\n        raise Exception('Cannot convert because PyTorch is not installed. Please install torch first.')\n    if framework == 'tf' and (not is_tf_available()):\n        raise Exception('Cannot convert because TF is not installed. Please install tensorflow first.')\n    print(f'Loading pipeline (model: {model}, tokenizer: {tokenizer})')\n    return pipeline(pipeline_name, model=model, tokenizer=tokenizer, framework=framework, model_kwargs=models_kwargs)", "docstring": "Convert the set of arguments provided through the CLI to an actual pipeline reference (tokenizer + model\n\nArgs:\npipeline_name: The kind of pipeline to use (ner, question-answering, etc.)\nframework: The actual model to convert the pipeline from (\"pt\" or \"tf\")\nmodel: The model name which will be loaded by the pipeline\ntokenizer: The tokenizer name which will be loaded by the pipeline, default to the model's value\n\nReturns: Pipeline object", "source": "github-repos"}
{"code": "def __init__(self, e_pw, nsites, kappa=2.0, omega=0.5, mu=1.0,\n            freeparams=['kappa', 'omega', 'mu']):\n        \n        _checkParam('e_pw', e_pw, self.PARAMLIMITS, self.PARAMTYPES)\n        self.e_pw = e_pw.copy()\n        self.phi = self._calculate_correctedF3X4()\n        assert scipy.allclose(self.phi.sum(axis = 1),\\\n                scipy.ones(3, dtype='float'),atol=1e-4, rtol=5e-3),\\\n                \"The `phi` values do not sum to 1 for all `p`\"\n\n        self.Phi_x = scipy.ones(N_CODON, dtype='float')\n        self._calculate_Phi_x()\n        self._nsites = nsites\n        assert self._nsites > 0, \"There must be more than 1 site in the gene\"\n\n        \n        assert all(map(lambda x: x in self.ALLOWEDPARAMS, freeparams)),\\\n                \"Invalid entry in freeparams\\nGot: {0}\\nAllowed: {1}\".format(\n                ', '.join(freeparams), ', '.join(self.ALLOWEDPARAMS))\n        self._freeparams = list(freeparams) \n\n        \n        self._mu = mu \n        self.kappa = kappa\n        self.omega = omega\n        for (name, value) in [('kappa', self.kappa), ('omega', self.omega),\n                    ('mu', self.mu)]:\n            _checkParam(name, value, self.PARAMLIMITS, self.PARAMTYPES)\n\n        \n        \n        self.Pxy = scipy.zeros((1, N_CODON, N_CODON), dtype='float')\n        self.Pxy_no_omega = scipy.zeros((1, N_CODON, N_CODON), dtype='float')\n        self.D = scipy.zeros((1, N_CODON), dtype='float')\n        self.A = scipy.zeros((1, N_CODON, N_CODON), dtype='float')\n        self.Ainv = scipy.zeros((1, N_CODON, N_CODON), dtype='float')\n        self.dPxy = {}\n        self.B = {}\n        for param in self.freeparams:\n            if param in self.ALLOWEDPARAMS:\n                self.dPxy[param] = scipy.zeros((1, N_CODON, N_CODON),\n                        dtype='float')\n                self.B[param] = scipy.zeros((1, N_CODON, N_CODON),\n                        dtype='float')\n            else:\n                raise ValueError(\"Unrecognized param {0}\".format(param))\n\n        \n        self._diag_indices = scipy.diag_indices(N_CODON)\n        self.updateParams({}, update_all=True)", "docstring": "Initialize an `YNGKP_M0` object.\n\nArgs:\n`kappa`, `omega`, `mu`,\nModel params described in main class doc string.\n`freeparams` (list of strings)\nSpecifies free parameters.\n`e_pw`, `nsites`\nMeaning described in the main class doc string.", "source": "juraj-google-style"}
{"code": "def _tf_restore_batch_dims(x, num_nonbatch_dims, prototype):\n    assert (x.shape.ndims == (1 + num_nonbatch_dims))\n    new_shape = (prototype.shape.as_list()[:(- num_nonbatch_dims)] + x.shape.as_list()[1:])\n    assert (None not in new_shape)\n    if (new_shape != x.shape.as_list()):\n        x = tf.reshape(x, new_shape)\n    return x", "docstring": "Reverse op of _tf_flatten_batch_dims.\n\nUn-flatten the first dimension of x to match all but the last\nnum_nonbatch_dims dimensions of prototype.\n\nArgs:\nx: a tf.Tensor with 1 + num_nonbatch_dims dimensions\nnum_nonbatch_dims: an integer\nprototype: a tf.Tensor\n\nReturns:\na tf.Tensor", "source": "codesearchnet"}
{"code": "def browse_podcasts_genres(self):\n    response = self._call(mc_calls.PodcastBrowseHierarchy)\n    genres = response.body.get('groups', [])\n    return genres", "docstring": "Get the genres from the Podcasts browse tab dropdown.\n\nReturns:\nlist: Genre groups that contain sub groups.", "source": "codesearchnet"}
{"code": "def _maybe_download_corpora(tmp_dir):\n  \n  mnli_filename = \"MNLI.zip\"\n  mnli_finalpath = os.path.join(tmp_dir, \"MNLI\")\n  if not tf.gfile.Exists(mnli_finalpath):\n    zip_filepath = generator_utils.maybe_download(\n        tmp_dir, mnli_filename, _MNLI_URL)\n    zip_ref = zipfile.ZipFile(zip_filepath, \"r\")\n    zip_ref.extractall(tmp_dir)\n    zip_ref.close()\n\n  return mnli_finalpath", "docstring": "Download corpora for multinli.\n\nArgs:\ntmp_dir: a string\nReturns:\na string", "source": "juraj-google-style"}
{"code": "def variable_op_v2(shape, dtype, name='Variable', container='', shared_name=''):\n    return gen_state_ops.variable_v2(shape=shape, dtype=dtype, name=name, container=container, shared_name=shared_name)", "docstring": "Create a variable Operation.\n\nSee also variables.Variable.\n\nArgs:\nshape: The shape of the tensor managed by this variable\ndtype: The underlying type of the tensor values.\nname: optional name to use for the variable op.\ncontainer: An optional string. Defaults to \"\".\nIf non-empty, this variable is placed in the given container.\nOtherwise, a default container is used.\nshared_name: An optional string. Defaults to \"\".\nIf non-empty, this variable is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.\n\nReturns:\nA variable tensor.", "source": "github-repos"}
{"code": "def ApproximateDistanceBetweenPoints(pa, pb):\n  \n  alat, alon = pa\n  blat, blon = pb\n  sa = transitfeed.Stop(lat=alat, lng=alon)\n  sb = transitfeed.Stop(lat=blat, lng=blon)\n  return transitfeed.ApproximateDistanceBetweenStops(sa, sb)", "docstring": "Finds the distance between two points on the Earth's surface.\n\nThis is an approximate distance based on assuming that the Earth is a sphere.\nThe points are specified by their lattitude and longitude.\n\nArgs:\npa: the first (lat, lon) point tuple\npb: the second (lat, lon) point tuple\n\nReturns:\nThe distance as a float in metres.", "source": "juraj-google-style"}
{"code": "def custom_apply(self, path: utils.KeyPath, value_spec: pg_typing.ValueSpec, allow_partial: bool, child_transform: Optional[Callable[[utils.KeyPath, pg_typing.Field, Any], Any]]=None) -> Tuple[bool, Any]:\n    del path, value_spec, allow_partial, child_transform\n    return (False, self)", "docstring": "Custom apply on a value based on its original value spec.\n\nThis implements ``pg.pg_typing.CustomTyping``, allowing a pure symbolic\nvalue to be assigned to any field. To customize this behavior, override\nthis method in subclasses.\n\nArgs:\npath: KeyPath of current object under its object tree.\nvalue_spec: Original value spec for this field.\nallow_partial: Whether allow partial object to be created.\nchild_transform: Function to transform child node values into their final\nvalues. Transform function is called on leaf nodes first, then on their\nparents, recursively.\n\nReturns:\nA tuple (proceed_with_standard_apply, value_to_proceed).\nIf proceed_with_standard_apply is set to False, value_to_proceed\nwill be used as final value.\n\nRaises:\nError when the value is not compatible with the value spec.", "source": "github-repos"}
{"code": "def SafeReadBytes(self, length):\n    data = self.ReadBytes(length)\n    if (len(data) < length):\n        raise ValueError('Not enough data available')\n    else:\n        return data", "docstring": "Read exactly `length` number of bytes from the stream.\n\nRaises:\nValueError is not enough data\n\nReturns:\nbytes: `length` number of bytes", "source": "codesearchnet"}
{"code": "def _example_from_allof(self, prop_spec):\n        \n        example_dict = {}\n        for definition in prop_spec['allOf']:\n            update = self.get_example_from_prop_spec(definition, True)\n            example_dict.update(update)\n        return example_dict", "docstring": "Get the examples from an allOf section.\n\nArgs:\nprop_spec: property specification you want an example of.\n\nReturns:\nAn example dict", "source": "juraj-google-style"}
{"code": "def postprocess_monograph(marc_xml, mods, uuid, counter, url):\n    dom = double_linked_dom(mods)\n    if (not isinstance(marc_xml, MARCXMLRecord)):\n        marc_xml = MARCXMLRecord(marc_xml)\n    add_missing_xml_attributes(dom, counter)\n    fix_invalid_type_parameter(dom)\n    if uuid:\n        add_uuid(dom, uuid)\n    add_marccountry_tag(dom)\n    add_genre(dom)\n    remove_hairs_from_tags(dom)\n    fix_issuance(dom)\n    fix_location_tag(dom)\n    fix_related_item_tag(dom)\n    fix_missing_electronic_locator_tag(dom, url)\n    fix_missing_lang_tags(marc_xml, dom)\n    return dom.prettify()", "docstring": "Fix bugs in `mods` produced by XSLT template.\n\nArgs:\nmarc_xml (str): Original Aleph record.\nmods (str): XML string generated by XSLT template.\nuuid (str): UUID of the package.\ncounter (int): Number of record, is added to XML headers.\nurl (str): URL of the publication (public or not).\n\nReturns:\nstr: Updated XML.", "source": "codesearchnet"}
{"code": "def g_step(self, gen_frames, fake_logits_stop):\n    hparam_to_gen_loss = {'least_squares': gan_losses.least_squares_generator_loss, 'cross_entropy': gan_losses.modified_generator_loss, 'wasserstein': gan_losses.wasserstein_generator_loss}\n    fake_logits = self.discriminator(gen_frames)\n    mean_fake_logits = tf.reduce_mean(fake_logits)\n    tf.summary.scalar('mean_fake_logits', mean_fake_logits)\n    generator_loss_func = hparam_to_gen_loss[self.hparams.gan_loss]\n    gan_g_loss_pos_d = generator_loss_func(discriminator_gen_outputs=fake_logits, add_summaries=True)\n    gan_g_loss_neg_d = (- generator_loss_func(discriminator_gen_outputs=fake_logits_stop, add_summaries=True))\n    return (gan_g_loss_pos_d, gan_g_loss_neg_d)", "docstring": "Performs the generator step in computing the GAN loss.\n\nArgs:\ngen_frames: Generated frames\nfake_logits_stop: Logits corresponding to the generated frames as per\nthe discriminator. Assumed to have a stop-gradient term.\nReturns:\ngan_g_loss_pos_d: Loss.\ngan_g_loss_neg_d: -gan_g_loss_pos_d but with a stop gradient on generator.", "source": "codesearchnet"}
{"code": "def verify(self, verify_locations: str) -> None:\n        \n        \n        with open(verify_locations):\n            pass\n\n        try:\n            self._ocsp_response.basic_verify(verify_locations)\n        except _nassl.OpenSSLError as e:\n            if 'certificate verify error' in str(e):\n                raise OcspResponseNotTrustedError(verify_locations)\n            raise", "docstring": "Verify that the OCSP response is trusted.\n\nArgs:\nverify_locations: The file path to a trust store containing pem-formatted certificates, to be used for\nvalidating the OCSP response.\n\nRaises OcspResponseNotTrustedError if the validation failed ie. the OCSP response is not trusted.", "source": "juraj-google-style"}
{"code": "def solve_sweep_structure(self, structures, sweep_param_list, filename='structure_n_effs.dat', plot=True, x_label='Structure number', fraction_mode_list=[]):\n    n_effs = []\n    mode_types = []\n    fractions_te = []\n    fractions_tm = []\n    for s in tqdm.tqdm(structures, ncols=70):\n        self.solve(s)\n        n_effs.append(np.real(self.n_effs))\n        mode_types.append(self._get_mode_types())\n        fractions_te.append(self.fraction_te)\n        fractions_tm.append(self.fraction_tm)\n    if filename:\n        self._write_n_effs_to_file(n_effs, (self._modes_directory + filename), sweep_param_list)\n        with open((self._modes_directory + 'mode_types.dat'), 'w') as fs:\n            header = ','.join((('Mode%i' % i) for (i, _) in enumerate(mode_types[0])))\n            fs.write((('\n            for mt in mode_types:\n                txt = ','.join((('%s %.2f' % pair) for pair in mt))\n                fs.write((txt + '\\n'))\n        with open((self._modes_directory + 'fraction_te.dat'), 'w') as fs:\n            header = 'fraction te'\n            fs.write((('\n            for (param, fte) in zip(sweep_param_list, fractions_te):\n                txt = ('%.6f,' % param)\n                txt += ','.join((('%.2f' % f) for f in fte))\n                fs.write((txt + '\\n'))\n        with open((self._modes_directory + 'fraction_tm.dat'), 'w') as fs:\n            header = 'fraction tm'\n            fs.write((('\n            for (param, ftm) in zip(sweep_param_list, fractions_tm):\n                txt = ('%.6f,' % param)\n                txt += ','.join((('%.2f' % f) for f in ftm))\n                fs.write((txt + '\\n'))\n        if plot:\n            if MPL:\n                title = ('$n_{eff}$ vs %s' % x_label)\n                y_label = '$n_{eff}$'\n            else:\n                title = ('n_{effs} vs %s' % x_label)\n                y_label = 'n_{eff}'\n            self._plot_n_effs((self._modes_directory + filename), (self._modes_directory + 'fraction_te.dat'), x_label, y_label, title)\n            title = ('TE Fraction vs %s' % x_label)\n            self._plot_fraction((self._modes_directory + 'fraction_te.dat'), x_label, 'TE Fraction [%]', title, fraction_mode_list)\n            title = ('TM Fraction vs %s' % x_label)\n            self._plot_fraction((self._modes_directory + 'fraction_tm.dat'), x_label, 'TM Fraction [%]', title, fraction_mode_list)\n    return n_effs", "docstring": "Find the modes of many structures.\n\nArgs:\nstructures (list): A list of `Structures` to find the modes\nof.\nsweep_param_list (list): A list of the parameter-sweep sweep\nthat was used.  This is for plotting purposes only.\nfilename (str): The nominal filename to use when saving the\neffective indices.  Defaults to 'structure_n_effs.dat'.\nplot (bool): `True` if plots should be generates,\notherwise `False`.  Default is `True`.\nx_label (str): x-axis text to display in the plot.\nfraction_mode_list (list): A list of mode indices of the modes\nthat should be included in the TE/TM mode fraction plot.\nIf the list is empty, all modes will be included.  The list\nis empty by default.\n\nReturns:\nlist: A list of the effective indices found for each structure.", "source": "codesearchnet"}
{"code": "def __init__(self, plot_type, fields=None, win=None, env=None, opts={}, port=8097, server=\"localhost\", name=None):\n        \n        super(VisdomPlotLogger, self).__init__(fields, win, env, opts, port, server)\n        valid_plot_types = {\n            \"scatter\": self.viz.scatter,\n            \"line\": self.viz.line}\n        self.plot_type = plot_type\n        \n        if plot_type not in valid_plot_types.keys():\n            raise ValueError(\"plot_type \\'{}\\' not found. Must be one of {}\".format(\n                plot_type, valid_plot_types.keys()))\n        self.chart = valid_plot_types[plot_type]", "docstring": "Multiple lines can be added to the same plot with the \"name\" attribute (see example)\nArgs:\nfields: Currently unused\nplot_type: {scatter, line}\n\nExamples:\n>>> scatter_logger = VisdomPlotLogger('line')\n>>> scatter_logger.log(stats['epoch'], loss_meter.value()[0], name=\"train\")\n>>> scatter_logger.log(stats['epoch'], loss_meter.value()[0], name=\"test\")", "source": "juraj-google-style"}
{"code": "def save_results(vcs, signature, result_path, patterns):\n    results_directory = _get_results_directory(vcs, signature)\n    if (not os.path.exists(results_directory)):\n        os.makedirs(results_directory)\n    with open(os.path.join(results_directory, 'patterns'), 'w') as f:\n        f.write('\\n'.join(patterns))\n    if (not os.path.exists(os.path.join(results_directory, 'results'))):\n        os.mkdir(os.path.join(results_directory, 'results'))\n    includes = ['--include={}'.format(x) for x in patterns]\n    cmd = ((['rsync', '-r'] + includes) + ['--exclude=*', os.path.join(result_path, ''), os.path.join(results_directory, 'results', '')])\n    subprocess.check_call(cmd)", "docstring": "Save results matching `patterns` at `result_path`.\n\nArgs:\nvcs (easyci.vcs.base.Vcs) - the VCS object for the actual project\n(not the disposable copy)\nsignature (str) - the project state signature\nresult_path (str) - the path containing the result, usually\na disposable copy of the project\npatterns (str) - `rsync`-compatible patterns matching test results\nto save.", "source": "codesearchnet"}
{"code": "def set_default_by_alias(self, alias):\n    if (alias not in self._aliases):\n        raise DataInvalidAlias('A dataset with alias {} does not exist'.format(alias))\n    self._default_index = self._aliases[alias]", "docstring": "Set the default dataset by its alias.\n\nAfter changing the default dataset, all calls without explicitly specifying the\ndataset by index or alias will be redirected to this dataset.\n\nArgs:\nalias (str): The alias of the dataset that should be made the default.\n\nRaises:\nDataInvalidAlias: If the alias does not represent a valid dataset.", "source": "codesearchnet"}
{"code": "def get_reverse_dependency_tree(package_name, depth=None, paths=None, build_requires=False, private_build_requires=False):\n    pkgs_list = [[package_name]]\n    g = digraph()\n    g.add_node(package_name)\n    it = iter_package_families(paths)\n    package_names = set((x.name for x in it))\n    if (package_name not in package_names):\n        raise PackageFamilyNotFoundError(('No such package family %r' % package_name))\n    if (depth == 0):\n        return (pkgs_list, g)\n    bar = ProgressBar('Searching', len(package_names))\n    lookup = defaultdict(set)\n    for (i, package_name_) in enumerate(package_names):\n        it = iter_packages(name=package_name_, paths=paths)\n        packages = list(it)\n        if (not packages):\n            continue\n        pkg = max(packages, key=(lambda x: x.version))\n        requires = []\n        for variant in pkg.iter_variants():\n            pbr = (private_build_requires and (pkg.name == package_name))\n            requires += variant.get_requires(build_requires=build_requires, private_build_requires=pbr)\n        for req in requires:\n            if (not req.conflict):\n                lookup[req.name].add(package_name_)\n        bar.next()\n    bar.finish()\n    n = 0\n    consumed = set([package_name])\n    working_set = set([package_name])\n    node_color = '\n    node_fontsize = 10\n    node_attrs = [('fillcolor', node_color), ('style', 'filled'), ('fontsize', node_fontsize)]\n    while (working_set and ((depth is None) or (n < depth))):\n        working_set_ = set()\n        for child in working_set:\n            parents = (lookup[child] - consumed)\n            working_set_.update(parents)\n            consumed.update(parents)\n            for parent in parents:\n                g.add_node(parent, attrs=node_attrs)\n                g.add_edge((parent, child))\n        if working_set_:\n            pkgs_list.append(sorted(list(working_set_)))\n        working_set = working_set_\n        n += 1\n    return (pkgs_list, g)", "docstring": "Find packages that depend on the given package.\n\nThis is a reverse dependency lookup. A tree is constructed, showing what\npackages depend on the given package, with an optional depth limit. A\nresolve does not occur. Only the latest version of each package is used,\nand requirements from all variants of that package are used.\n\nArgs:\npackage_name (str): Name of the package depended on.\ndepth (int): Tree depth limit, unlimited if None.\npaths (list of str): paths to search for packages, defaults to\n`config.packages_path`.\nbuild_requires (bool): If True, includes packages' build_requires.\nprivate_build_requires (bool): If True, include `package_name`'s\nprivate_build_requires.\n\nReturns:\nA 2-tuple:\n- (list of list of str): Lists of package names, where each list is a\nsingle depth in the tree. The first list is always [`package_name`].\n- `pygraph.digraph` object, where nodes are package names, and\n`package_name` is always the leaf node.", "source": "codesearchnet"}
{"code": "def get_models_in_diff():\n    fork_point_sha = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')\n    modified_files = subprocess.check_output(f'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode('utf-8').split()\n    relevant_modified_files = [x for x in modified_files if '/models/' in x and x.endswith('.py')]\n    model_names = set()\n    for file_path in relevant_modified_files:\n        model_name = file_path.split('/')[-2]\n        model_names.add(model_name)\n    return model_names", "docstring": "Finds all models that have been modified in the diff.\n\nReturns:\nA set containing the names of the models that have been modified (e.g. {'llama', 'whisper'}).", "source": "github-repos"}
{"code": "def get(self, key, value):\n    if (key == 'id'):\n        response = self._swimlane.request('get', 'app/{0}/record/{1}'.format(self._app.id, value))\n        return Record(self._app, response.json())\n    if (key == 'tracking_id'):\n        response = self._swimlane.request('get', 'app/{0}/record/tracking/{1}'.format(self._app.id, value))\n        return Record(self._app, response.json())", "docstring": "Get a single record by id\n\nSupports resource cache\n\n.. versionchanged:: 2.17.0\nAdded option to retrieve record by tracking_id\n\nKeyword Args:\nid (str): Full record ID\ntracking_id (str): Record Tracking ID\n\nReturns:\nRecord: Matching Record instance returned from API\n\nRaises:\nTypeError: No id argument provided", "source": "codesearchnet"}
{"code": "def deregister_instances(name, instances, region=None, key=None, keyid=None, profile=None):\n    if (isinstance(instances, six.string_types) or isinstance(instances, six.text_type)):\n        instances = [instances]\n    conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n    try:\n        registered_instances = conn.deregister_instances(name, instances)\n    except boto.exception.BotoServerError as error:\n        if (error.error_code == 'InvalidInstance'):\n            log.warning('One or more of instance(s) %s are not part of ELB %s. deregister_instances not performed.', instances, name)\n            return None\n        else:\n            log.warning(error)\n            return False\n    registered_instance_ids = [instance.id for instance in registered_instances]\n    deregister_failures = set(instances).intersection(set(registered_instance_ids))\n    if deregister_failures:\n        log.warning('Instance(s): %s not deregistered from ELB %s.', list(deregister_failures), name)\n        deregister_result = False\n    else:\n        deregister_result = True\n    return deregister_result", "docstring": "Deregister instances with an ELB.  Instances is either a string\ninstance id or a list of string instance id's.\n\nReturns:\n\n- ``True``: instance(s) deregistered successfully\n- ``False``: instance(s) failed to be deregistered\n- ``None``: instance(s) not valid or not registered, no action taken\n\nCLI example:\n\n.. code-block:: bash\n\nsalt myminion boto_elb.deregister_instances myelb instance_id\nsalt myminion boto_elb.deregister_instances myelb \"[instance_id, instance_id]\"", "source": "codesearchnet"}
{"code": "def max_variance_genes(data, nbins=5, frac=0.2):\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    indices = []\n    if sparse.issparse(data):\n        means, var = sparse_mean_var(data)\n    else:\n        means = data.mean(1)\n        var = data.var(1)\n    mean_indices = means.argsort()\n    n_elements = int(data.shape[0]/nbins)\n    frac_elements = int(n_elements*frac)\n    for i in range(nbins):\n        bin_i = mean_indices[i*n_elements : (i+1)*n_elements]\n        if i==nbins-1:\n            bin_i = mean_indices[i*n_elements :]\n        var_i = var[bin_i]\n        var_sorted = var_i.argsort()\n        top_var_indices = var_sorted[len(bin_i) - frac_elements:]\n        ind = bin_i[top_var_indices]\n        \n        ind = [index for index in ind if var[index]>0]\n        indices.extend(ind)\n    return indices", "docstring": "This function identifies the genes that have the max variance\nacross a number of bins sorted by mean.\n\nArgs:\ndata (array): genes x cells\nnbins (int): number of bins to sort genes by mean expression level. Default: 10.\nfrac (float): fraction of genes to return per bin - between 0 and 1. Default: 0.1\n\nReturns:\nlist of gene indices (list of ints)", "source": "juraj-google-style"}
{"code": "def schema_keys(schema):\n\n    def _get_leaf(value):\n        if isinstance(value, Schema):\n            return _get_leaf(value._schema)\n        return value\n    keys = set()\n    dict_ = schema._schema\n    assert isinstance(dict_, dict)\n    for key in dict_.iterkeys():\n        key_ = _get_leaf(key)\n        if isinstance(key_, basestring):\n            keys.add(key_)\n    return keys", "docstring": "Get the string values of keys in a dict-based schema.\n\nNon-string keys are ignored.\n\nReturns:\nSet of string keys of a schema which is in the form (eg):\n\nschema = Schema({Required(\"foo\"): int,\nOptional(\"bah\"): basestring})", "source": "codesearchnet"}
{"code": "def floodlight_rows(config, task: dict, report_id: int) -> Generator[list[str, str, str, str, str, str, int], None, None]:\n    filename, report = report_file(config, task['auth'], task['account'], report_id, None, 10)\n    rows = report_to_rows(report)\n    rows = report_clean(rows)\n    rows = rows_header_trim(rows)\n    rows = rows_to_type(rows, column=6)\n    return rows", "docstring": "Monitor a report for completion and return rows\n\nArgs:\nreport_id - the report created earlier for a specific floodlight id.\n\nReturns:\nA stream of rows, see FLOODLIGHT_* constants for definitions.", "source": "github-repos"}
{"code": "def AddBudget(self, client_customer_id, micro_amount):\n    \n    self.client.SetClientCustomerId(client_customer_id)\n\n    budget_service = self.client.GetService('BudgetService')\n\n    operations = [{\n        'operator': 'ADD',\n        'operand': {\n            'name': 'Budget \n            'amount': {\n                'microAmount': micro_amount\n            },\n            'deliveryMethod': 'STANDARD'\n        }\n    }]\n\n    return budget_service.mutate(operations)['value'][0]['budgetId']", "docstring": "Create a new Budget with the given microAmount.\n\nArgs:\nclient_customer_id: str Client Customer Id used to create Budget.\nmicro_amount: str The budget represented in micros.\n\nReturns:\nstr BudgetId of the newly created Budget.", "source": "juraj-google-style"}
{"code": "def rolldim(P, n=1):\n    dim = P.dim\n    shape = P.shape\n    dtype = P.dtype\n    A = dict((((key[n:] + key[:n]), P.A[key]) for key in P.keys))\n    return Poly(A, dim, shape, dtype)", "docstring": "Roll the axes.\n\nArgs:\nP (Poly) : Input polynomial.\nn (int) : The axis that after rolling becomes the 0th axis.\n\nReturns:\n(Poly) : Polynomial with new axis configuration.\n\nExamples:\n>>> x,y,z = variable(3)\n>>> P = x*x*x + y*y + z\n>>> print(P)\nq0^3+q1^2+q2\n>>> print(rolldim(P))\nq0^2+q2^3+q1", "source": "codesearchnet"}
{"code": "def retrieve(url):\n    try:\n        pem_data = urlopen(url).read()\n    except (ValueError, HTTPError):\n        warnings.warn('Certificate URL is invalid.')\n        return False\n    if (sys.version >= '3'):\n        try:\n            pem_data = pem_data.decode()\n        except UnicodeDecodeError:\n            warnings.warn('Certificate encoding is not utf-8.')\n            return False\n    return _parse_pem_data(pem_data)", "docstring": "Retrieve and parse PEM-encoded X.509 certificate chain.\n\nSee `validate.request` for additional info.\n\nArgs:\nurl: str. SignatureCertChainUrl header value sent by request.\n\nReturns:\nlist or bool: If url is valid, returns the certificate chain as a list\nof cryptography.hazmat.backends.openssl.x509._Certificate\ncertificates where certs[0] is the first certificate in the file; if\nurl is invalid, returns False.", "source": "codesearchnet"}
{"code": "def push(self, is_building_function, enter_context_fn, device_stack):\n    self.stack.append(ContextSwitch(is_building_function, enter_context_fn, device_stack))", "docstring": "Push metadata about a context switch onto the stack.\n\nA context switch can take any one of the two forms: installing a graph as\nthe default graph, or entering the eager context. For each context switch,\nwe record whether or not the entered context is building a function.\n\nArgs:\nis_building_function: (bool.) Whether the context is building a function.\nenter_context_fn: (function.) A callable that executes the context switch.\nFor example, `graph.as_default` or `eager_mode`.\ndevice_stack: If applicable, the device function stack for this graph.\nWhen breaking out of graphs in init_scope, the innermost nonempty device\nstack is used. Eager contexts put `None` here and the value is never\nused.", "source": "github-repos"}
{"code": "def from_config(cls, config):\n    if 'dtype' in config and isinstance(config['dtype'], dict):\n        config = config.copy()\n        policy = dtype_policies.deserialize(config['dtype'])\n        if not isinstance(policy, dtype_policies.DTypePolicyMap) and policy.quantization_mode is None:\n            policy = policy.name\n        config['dtype'] = policy\n    try:\n        return cls(**config)\n    except Exception as e:\n        raise TypeError(f\"Error when deserializing class '{cls.__name__}' using config={config}.\\n\\nException encountered: {e}\")", "docstring": "Creates an operation from its config.\n\nThis method is the reverse of `get_config`, capable of instantiating the\nsame operation from the config dictionary.\n\nNote: If you override this method, you might receive a serialized dtype\nconfig, which is a `dict`. You can deserialize it as follows:\n\n```python\nif \"dtype\" in config and isinstance(config[\"dtype\"], dict):\npolicy = dtype_policies.deserialize(config[\"dtype\"])\n```\n\nArgs:\nconfig: A Python dictionary, typically the output of `get_config`.\n\nReturns:\nAn operation instance.", "source": "github-repos"}
{"code": "def get_missing_simulations(self, param_list, runs=None):\n        \n\n        params_to_simulate = []\n\n        if runs is not None:  \n            next_runs = self.db.get_next_rngruns()\n            available_params = [r['params'] for r in self.db.get_results()]\n            for param_comb in param_list:\n                \n                \n                \n                needed_runs = runs\n                for i, p in enumerate(available_params):\n                    if param_comb == {k: p[k] for k in p.keys() if k != \"RngRun\"}:\n                        needed_runs -= 1\n                new_param_combs = []\n                for needed_run in range(needed_runs):\n                    \n                    \n                    \n                    \n                    \n                    new_param = deepcopy(param_comb)\n                    new_param['RngRun'] = next(next_runs)\n                    new_param_combs += [new_param]\n                params_to_simulate += new_param_combs\n        else:\n            for param_comb in param_list:\n                if not self.db.get_results(param_comb):\n                    params_to_simulate += [param_comb]\n\n        return params_to_simulate", "docstring": "Return a list of the simulations among the required ones that are not\navailable in the database.\n\nArgs:\nparam_list (list): a list of dictionaries containing all the\nparameters combinations.\nruns (int): an integer representing how many repetitions are wanted\nfor each parameter combination, None if the dictionaries in\nparam_list already feature the desired RngRun value.", "source": "juraj-google-style"}
{"code": "def _store_checkpoint(self, sess, saver, global_step):\n    if ((not self._logdir) or (not saver)):\n        return\n    tf.gfile.MakeDirs(self._logdir)\n    filename = os.path.join(self._logdir, 'model.ckpt')\n    saver.save(sess, filename, global_step)", "docstring": "Store a checkpoint if a log directory was provided to the constructor.\n\nThe directory will be created if needed.\n\nArgs:\nsess: Session containing variables to store.\nsaver: Saver used for checkpointing.\nglobal_step: Step number of the checkpoint name.", "source": "codesearchnet"}
{"code": "def create(cls, env, filenames, trim=False):\n        \n        import_graph = cls(env)\n        for filename in filenames:\n            import_graph.add_file_recursive(os.path.abspath(filename), trim)\n        import_graph.build()\n        return import_graph", "docstring": "Create and return a final graph.\n\nArgs:\nenv: An environment.Environment object\nfilenames: A list of filenames\ntrim: Whether to trim the dependencies of builtin and system files.\n\nReturns:\nAn immutable ImportGraph with the recursive dependencies of all the\nfiles in filenames", "source": "juraj-google-style"}
{"code": "def console_set_background_flag(con: tcod.console.Console, flag: int) -> None:\n    lib.TCOD_console_set_background_flag(_console(con), flag)", "docstring": "Change the default blend mode for this console.\n\nArgs:\ncon (Console): Any Console instance.\nflag (int): Blend mode to use by default.\n\n.. deprecated:: 8.5\nSet :any:`Console.default_bg_blend` instead.", "source": "codesearchnet"}
{"code": "def multivariate_ess(samples, batch_size_generator=None):\n    samples_generator = _get_sample_generator(samples)\n    return np.array(multiprocess_mapping(_MultivariateESSMultiProcessing(batch_size_generator), samples_generator()))", "docstring": "r\"\"\"Estimate the multivariate Effective Sample Size for the samples of every problem.\n\nThis essentially applies :func:`estimate_multivariate_ess` to every problem.\n\nArgs:\nsamples (ndarray, dict or generator): either a matrix of shape (d, p, n) with d problems, p parameters and\nn samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally,\na generator function that yields sample arrays of shape (p, n).\nbatch_size_generator (MultiVariateESSBatchSizeGenerator): the batch size generator, tells us how many\nbatches and of which size we use in estimating the minimum ESS.\n\nReturns:\nndarray: the multivariate ESS per problem", "source": "codesearchnet"}
{"code": "def get_test_configs():\n    test_configs = [('NHWC', False), ('NHWC', True)]\n    return test_configs", "docstring": "Get all the valid tests configs to run.\n\nReturns:\nall the valid test configs as tuples of data_format and use_gpu.", "source": "github-repos"}
{"code": "def create_deferred(self, func, input_layer, deferred_args, deferred_kwargs,\n                      name):\n    \n    my_defaults = _defaults\n\n    def _with_method_complete(*args, **kwargs):\n      input_layer = args[0]\n      with input_layer.g.as_default(), defaults_scope(**my_defaults), \\\n          tf.name_scope(name):\n        return input_layer._method_complete(func(*args, **kwargs))\n    \n    \n    full_args = [input_layer]\n    full_args.extend(deferred_args)\n    partial_context = {}\n    if isinstance(input_layer, _DeferredLayer):\n      partial_context = input_layer._partial_context\n    return _DeferredLayer(input_layer.bookkeeper,\n                          scopes.Template(None, _with_method_complete),\n                          full_args,\n                          deferred_kwargs,\n                          scope=input_layer._scope,\n                          defaults=input_layer.defaults,\n                          partial_context=partial_context)", "docstring": "Creates a deferred node with captured scope.\n\nArgs:\nfunc: The original function to call.\ninput_layer: The input_layer.\ndeferred_args: The arguments that will be used bythe deferred function.\ndeferred_kwargs: The keyword args for the deferred function.\nname: The name of this layer.\nReturns:\nA _DeferredLayer that will execute func in the correct scopes.", "source": "juraj-google-style"}
{"code": "def get_frame(self, index=None, onset=None):\n    if onset:\n        index = int((onset * self.fps))\n    return super(VideoStim, self).get_frame(index)", "docstring": "Overrides the default behavior by giving access to the onset\nargument.\n\nArgs:\nindex (int): Positional index of the desired frame.\nonset (float): Onset (in seconds) of the desired frame.", "source": "codesearchnet"}
{"code": "def is_edge_change_point(change_point_index, data_size, edge_segment_size=constants._EDGE_SEGMENT_SIZE):\n    return change_point_index > data_size - edge_segment_size", "docstring": "Removes the change points that are at the edges of the data.\nArgs:\nchange_point_index: Index of the change point.\ndata_size: Size of the data.\nedge_segment_size: Size of the edge segment.", "source": "github-repos"}
{"code": "def decodes(self, s: str) -> BioCCollection:\n    tree = etree.parse(io.BytesIO(bytes(s, encoding='UTF-8')))\n    collection = self.__parse_collection(tree.getroot())\n    collection.encoding = tree.docinfo.encoding\n    collection.standalone = tree.docinfo.standalone\n    collection.version = tree.docinfo.xml_version\n    return collection", "docstring": "Deserialize ``s`` to a BioC collection object.\n\nArgs:\ns: a \"str\" instance containing a BioC collection\n\nReturns:\nan object of BioCollection", "source": "codesearchnet"}
{"code": "def delete(self):\n    try:\n        self._api.table_delete(self._name_parts)\n    except google.datalab.utils.RequestException:\n        pass\n    except Exception as e:\n        raise e\n    return (not self.exists())", "docstring": "Delete the table.\n\nReturns:\nTrue if the Table no longer exists; False otherwise.", "source": "codesearchnet"}
{"code": "def open_phrase(self, string, pos):\n\n\t\t\n\n\t\t\n\t\tif string[pos - 1] == \"\\\\\":\n\t\t\t\n\t\t\tstring = string[:pos - 1] + string[pos:]\n\n\t\t\t\n\t\t\t\n\t\t\tpos -= 1\n\n\t\t\t\n\t\t\t\n\t\t\tif pos == 0 or string[pos - 1] != \"\\\\\":\n\t\t\t\ttag = self.meta.search(string, pos + 1)\n\n\t\t\t\treturn string, None, tag\n\n\t\tchild = Phrase(pos)\n\n\t\tescaped, child = self.parse(string[pos + 1:], child)\n\n\t\tstring = string[:pos + 1] + escaped\n\n\t\ttag = self.meta.search(string, child.closing + 1)\n\n\t\treturn string, child, tag", "docstring": "Helper function of self.parse() handling opening tags.\n\nArguments:\nstring (str): The string being parsed.\npos (int): The index/position of the opening tag in the string.\n\nReturns:\nThe (possibly) escaped string, a child phrase if the opening tag\nwas not escaped and otherwise None, and a new tag match, either\nstarting at one index passed the escaped tag or one index passed\nthe closing tag of the child.", "source": "juraj-google-style"}
{"code": "def _retrieve_object(output_dict: Dict[(str, Any)], obj: Any) -> None:\n    import ROOT\n    if (isinstance(obj, ROOT.TH1) or isinstance(obj, ROOT.THnBase)):\n        if isinstance(obj, ROOT.TH1):\n            obj.SetDirectory(0)\n        ROOT.SetOwnership(obj, False)\n        output_dict[obj.GetName()] = obj\n    if isinstance(obj, ROOT.TCollection):\n        output_dict[obj.GetName()] = {}\n        for obj_temp in list(obj):\n            _retrieve_object(output_dict[obj.GetName()], obj_temp)", "docstring": "Function to recursively retrieve histograms from a list in a ROOT file.\n\n``SetDirectory(True)`` is applied to TH1 derived hists and python is explicitly given\nownership of the retrieved objects.\n\nArgs:\noutput_dict (dict): Dict under which hists should be stored.\nobj (ROOT.TObject derived): Object(s) to be stored. If it is a collection,\nit will be recursed through.\nReturns:\nNone: Changes in the dict are reflected in the output_dict which was passed.", "source": "codesearchnet"}
{"code": "def check_network_role(self, public_key):\n        \n        state_root = self._current_root_func()\n        if state_root == INIT_ROOT_KEY:\n            LOGGER.debug(\"Chain head is not set yet. Permit all.\")\n            return True\n\n        self._cache.update_view(state_root)\n        role = self._cache.get_role(\"network\", state_root)\n\n        if role is None:\n            policy_name = \"default\"\n        else:\n            policy_name = role.policy_name\n        policy = self._cache.get_policy(policy_name, state_root)\n        if policy is not None:\n            if not self._allowed(public_key, policy):\n                LOGGER.debug(\"Node is not permitted: %s.\", public_key)\n                return False\n        return True", "docstring": "Check the public key of a node on the network to see if they are\npermitted to participate. The roles being checked are the\nfollowing, from first to last:\n\"network\"\n\"default\"\n\nThe first role that is set will be the one used to enforce if the\nnode is allowed.\n\nArgs:\npublic_key (string): The public key belonging to a node on the\nnetwork", "source": "juraj-google-style"}
{"code": "def _InitializeGraph(self, os_name, artifact_list):\n    \n    dependencies = artifact_registry.REGISTRY.SearchDependencies(\n        os_name, artifact_list)\n    artifact_names, attribute_names = dependencies\n\n    self._AddAttributeNodes(attribute_names)\n    self._AddArtifactNodesAndEdges(artifact_names)", "docstring": "Creates the nodes and directed edges of the dependency graph.\n\nArgs:\nos_name: String specifying the OS name.\nartifact_list: List of requested artifact names.", "source": "juraj-google-style"}
{"code": "def AddStorageMediaImageOptions(self, argument_group):\n    argument_group.add_argument('--partitions', '--partition', dest='partitions', action='store', type=str, default=None, help='Define partitions to be processed. A range of partitions can be defined as: \"3..5\". Multiple partitions can be defined as: \"1,3,5\" (a list of comma separated values). Ranges and lists can also be combined as: \"1,3..5\". The first partition is 1. All partitions can be specified with: \"all\".')\n    argument_group.add_argument('--volumes', '--volume', dest='volumes', action='store', type=str, default=None, help='Define volumes to be processed. A range of volumes can be defined as: \"3..5\". Multiple volumes can be defined as: \"1,3,5\" (a list of comma separated values). Ranges and lists can also be combined as: \"1,3..5\". The first volume is 1. All volumes can be specified with: \"all\".')", "docstring": "Adds the storage media image options to the argument group.\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "codesearchnet"}
{"code": "def _ParseProcessingOptions(self, options):\n    \n    argument_helper_names = [\n        'process_resources', 'temporary_directory', 'zeromq']\n    helpers_manager.ArgumentHelperManager.ParseOptions(\n        options, self, names=argument_helper_names)\n\n    worker_memory_limit = getattr(options, 'worker_memory_limit', None)\n\n    if worker_memory_limit and worker_memory_limit < 0:\n      raise errors.BadConfigOption(\n          'Invalid worker memory limit value cannot be negative.')\n\n    self._worker_memory_limit = worker_memory_limit", "docstring": "Parses the processing options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "juraj-google-style"}
{"code": "def deprocess_input(input_array, input_range=(0, 255)):\n    \n    \n    input_array = input_array.copy()\n    input_array -= input_array.mean()\n    input_array /= (input_array.std() + K.epsilon())\n    input_array *= 0.1\n\n    \n    input_array += 0.5\n    input_array = np.clip(input_array, 0, 1)\n\n    \n    return (input_range[1] - input_range[0]) * input_array + input_range[0]", "docstring": "Utility function to scale the `input_array` to `input_range` throwing away high frequency artifacts.\n\nArgs:\ninput_array: An N-dim numpy array.\ninput_range: Specifies the input range as a `(min, max)` tuple to rescale the `input_array`.\n\nReturns:\nThe rescaled `input_array`.", "source": "juraj-google-style"}
{"code": "def _run_graph(self, device, input_shape, perm, num_iters, datatype):\n    graph = ops.Graph()\n    with graph.as_default():\n        outputs = build_graph(device, input_shape, perm, datatype, num_iters)\n        with session_lib.Session(graph=graph) as session:\n            variables.global_variables_initializer().run()\n            session.run(outputs)\n            start_time = time.time()\n            session.run(outputs)\n            duration = (time.time() - start_time) / num_iters\n            throughput = np.prod(np.array(input_shape)) * datatype().itemsize * 2 / duration / 1000000000.0\n            print('%s %s inputshape:%s perm:%s %d %.6fsec, %.4fGB/s.' % (device, str(datatype), str(input_shape).replace(' ', ''), str(perm).replace(' ', ''), num_iters, duration, throughput))\n    name_template = 'transpose_{device}_{dtype}_input_shape_{inputshape}_perm_{perm}'\n    self.report_benchmark(name=name_template.format(device=device, dtype=str(datatype).replace(' ', ''), inputshape=str(input_shape).replace(' ', ''), perm=str(perm).replace(' ', '')).replace(' ', ''), iters=num_iters, wall_time=duration)\n    return duration", "docstring": "runs the graph and print its execution time.\n\nArgs:\ndevice: String, the device to run on.\ninput_shape: Shape of the input tensor.\nperm: A list of ints with the same length as input tensor's dimension.\nnum_iters: Number of iterations to run the benchmark.\ndatatype: numpy data type of the input tensor.\n\nReturns:\nThe duration of the run in seconds.", "source": "github-repos"}
{"code": "def start_txn(self, txn_name=None):\n    if (not txn_name):\n        txn_name = uuid.uuid4().hex\n    txn_response = self.api.http_request('POST', ('%s/fcr:tx' % self.root), data=None, headers=None)\n    if (txn_response.status_code == 201):\n        txn_uri = txn_response.headers['Location']\n        logger.debug(('spawning transaction: %s' % txn_uri))\n        txn = Transaction(self, txn_name, txn_uri, expires=txn_response.headers['Expires'])\n        self.txns[txn_name] = txn\n        return txn", "docstring": "Request new transaction from repository, init new Transaction,\nstore in self.txns\n\nArgs:\ntxn_name (str): human name for transaction\n\nReturn:\n(Transaction): returns intance of newly created transaction", "source": "codesearchnet"}
{"code": "def hist_axis_func(axis_type: enum.Enum) -> Callable[[Hist], Axis]:\n    \n    def axis_func(hist: Hist) -> Axis:\n        \n        \n        \n        \n        try:\n            \n            hist_axis_type = axis_type.value\n        except AttributeError:\n            \n            hist_axis_type = axis_type\n\n        if hasattr(hist, \"ProjectionND\") and hasattr(hist, \"Projection\"):\n            \n            \n            \n            return hist.GetAxis(hist_axis_type)\n        else:\n            \n            axis_function_map = {\n                TH1AxisType.x_axis.value: hist.GetXaxis,\n                TH1AxisType.y_axis.value: hist.GetYaxis,\n                TH1AxisType.z_axis.value: hist.GetZaxis\n            }\n\n            \n            \n            return_func = axis_function_map[hist_axis_type]\n            return return_func()\n\n    return axis_func", "docstring": "Wrapper to retrieve the axis of a given histogram.\n\nThis can be convenient outside of just projections, so it's made available in the API.\n\nArgs:\naxis_type: The type of axis to retrieve.\nReturns:\nCallable to retrieve the specified axis when given a hist.", "source": "juraj-google-style"}
{"code": "def _prevent_2nd_derivative(x):\n\n    def grad(dy):\n        return array_ops.prevent_gradient(dy, message='Second derivative is not implemented.')\n    return (tf.identity(x), grad)", "docstring": "Disables computation of the second derivatives for a tensor.\n\nNB: you need to apply a non-identity function to the output tensor for the\nexception to be raised.\n\nArguments:\nx: A tensor.\n\nReturns:\nA tensor with the same value and the same derivative as x, but that raises\nLookupError when trying to compute the second derivatives.", "source": "codesearchnet"}
{"code": "def get_unprocessed_data(self, how_many, model_settings, mode):\n    candidates = self.data_index[mode]\n    if how_many == -1:\n        sample_count = len(candidates)\n    else:\n        sample_count = how_many\n    desired_samples = model_settings['desired_samples']\n    words_list = self.words_list\n    data = np.zeros((sample_count, desired_samples))\n    labels = []\n    with tf.compat.v1.Session(graph=tf.Graph()) as sess:\n        wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, [])\n        wav_loader = io_ops.read_file(wav_filename_placeholder)\n        wav_decoder = tf.audio.decode_wav(wav_loader, desired_channels=1, desired_samples=desired_samples)\n        foreground_volume_placeholder = tf.compat.v1.placeholder(tf.float32, [])\n        scaled_foreground = tf.multiply(wav_decoder.audio, foreground_volume_placeholder)\n        for i in range(sample_count):\n            if how_many == -1:\n                sample_index = i\n            else:\n                sample_index = np.random.randint(len(candidates))\n            sample = candidates[sample_index]\n            input_dict = {wav_filename_placeholder: sample['file']}\n            if sample['label'] == SILENCE_LABEL:\n                input_dict[foreground_volume_placeholder] = 0\n            else:\n                input_dict[foreground_volume_placeholder] = 1\n            data[i, :] = sess.run(scaled_foreground, feed_dict=input_dict).flatten()\n            label_index = self.word_to_index[sample['label']]\n            labels.append(words_list[label_index])\n    return (data, labels)", "docstring": "Retrieve sample data for the given partition, with no transformations.\n\nArgs:\nhow_many: Desired number of samples to return. -1 means the entire\ncontents of this partition.\nmodel_settings: Information about the current model being trained.\nmode: Which partition to use, must be 'training', 'validation', or\n'testing'.\n\nReturns:\nList of sample data for the samples, and list of labels in one-hot form.", "source": "github-repos"}
{"code": "def kill(container, rm=True):\n    \n    container = get_container(container)\n    if not container:\n        raise Exception('No such container: %s' % container)\n    unbind_all(container['ip']) \n\n    sudo('docker kill %s' % container['name'])\n    if rm:\n        sudo('docker rm %s' % container['name'])", "docstring": "Kill a container\n\nArgs:\n* container: Container name or ID\n* rm=True: Remove the container or not", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    file_header_map = self._GetDataTypeMap('systemd_journal_file_header')\n\n    try:\n      file_header, _ = self._ReadStructureFromFileObject(\n          file_object, 0, file_header_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.UnableToParseFile(\n          'Unable to parse file header with error: {0!s}'.format(\n              exception))\n\n    if file_header.signature != self._FILE_SIGNATURE:\n      raise errors.UnableToParseFile('Invalid file signature.')\n\n    if file_header.header_size not in self._SUPPORTED_FILE_HEADER_SIZES:\n      raise errors.UnableToParseFile(\n          'Unsupported file header size: {0:d}.'.format(\n              file_header.header_size))\n\n    data_hash_table_end_offset = (\n        file_header.data_hash_table_offset +\n        file_header.data_hash_table_size)\n    field_hash_table_end_offset = (\n        file_header.field_hash_table_offset +\n        file_header.field_hash_table_size)\n    self._maximum_journal_file_offset = max(\n        data_hash_table_end_offset, field_hash_table_end_offset)\n\n    entry_object_offsets = self._ParseEntryObjectOffsets(\n        file_object, file_header.entry_array_offset)\n\n    for entry_object_offset in entry_object_offsets:\n      if entry_object_offset == 0:\n        continue\n\n      try:\n        fields = self._ParseJournalEntry(file_object, entry_object_offset)\n      except errors.ParseError as exception:\n        parser_mediator.ProduceExtractionWarning((\n            'Unable to parse journal entry at offset: 0x{0:08x} with '\n            'error: {1!s}').format(entry_object_offset, exception))\n        return\n\n      event_data = SystemdJournalEventData()\n\n      event_data.body = fields.get('MESSAGE', None)\n      event_data.hostname = fields.get('_HOSTNAME', None)\n      event_data.reporter = fields.get('SYSLOG_IDENTIFIER', None)\n\n      if event_data.reporter and event_data.reporter != 'kernel':\n        event_data.pid = fields.get('_PID', fields.get('SYSLOG_PID', None))\n\n      date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n          timestamp=fields['real_time'])\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_WRITTEN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a Systemd journal file-like object.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the header cannot be parsed.", "source": "juraj-google-style"}
{"code": "def __enter__(self):\n    self._old = self._var_scope_store.current_scope\n    if isinstance(self._name_or_scope, VariableScope):\n        self._var_scope_store.open_variable_scope(self._new_name)\n        self._old_subscopes = copy.copy(self._var_scope_store.variable_scopes_count)\n        variable_scope_object = self._cached_variable_scope_object\n    else:\n        self._new_name = self._old.name + '/' + self._name_or_scope if self._old.name else self._name_or_scope\n        self._reuse = self._reuse or self._old.reuse\n        if self._old_name_scope is None:\n            name_scope = self._name_or_scope\n        else:\n            name_scope = self._old_name_scope\n        variable_scope_object = VariableScope(self._reuse, name=self._new_name, initializer=self._old.initializer, regularizer=self._old.regularizer, caching_device=self._old.caching_device, partitioner=self._old.partitioner, dtype=self._old.dtype, use_resource=self._old.use_resource, custom_getter=self._old.custom_getter, name_scope=name_scope, constraint=self._constraint)\n        if self._initializer is not None:\n            variable_scope_object.set_initializer(self._initializer)\n        if self._regularizer is not None:\n            variable_scope_object.set_regularizer(self._regularizer)\n        if self._caching_device is not None:\n            variable_scope_object.set_caching_device(self._caching_device)\n        if self._partitioner is not None:\n            variable_scope_object.set_partitioner(self._partitioner)\n        if self._custom_getter is not None:\n            variable_scope_object.set_custom_getter(_maybe_wrap_custom_getter(self._custom_getter, self._old.custom_getter))\n        if self._dtype is not None:\n            variable_scope_object.set_dtype(self._dtype)\n        if self._use_resource is not None:\n            variable_scope_object.set_use_resource(self._use_resource)\n        self._var_scope_store.open_variable_scope(self._new_name)\n    self._var_scope_store.current_scope = variable_scope_object\n    self._last_variable_scope_object = variable_scope_object\n    return variable_scope_object", "docstring": "Begins the scope block.\n\nReturns:\nA VariableScope.\nRaises:\nValueError: when trying to reuse within a create scope, or create within\na reuse scope, or if reuse is not `None` or `True`.\nTypeError: when the types of some arguments are not appropriate.", "source": "github-repos"}
{"code": "def flick(self, x, y, speed):\n        \n        self._driver.flick(self, x, y, speed)", "docstring": "Deprecated use touch('drag', { fromX, fromY, toX, toY, duration(s) }) instead.\nFlick on the touch screen using finger motion events.\nThis flickcommand starts at a particulat screen location.\n\nSupport:\niOS\n\nArgs:\nx(float}: The x offset in pixels to flick by.\ny(float): The y offset in pixels to flick by.\nspeed(float) The speed in pixels per seconds.\n\nReturns:\nWebElement object.", "source": "juraj-google-style"}
{"code": "def _callable_func(self, func, axis, *args, **kwargs):\n\n    def callable_apply_builder(df, axis=0):\n        if (not axis):\n            df.index = index\n            df.columns = pandas.RangeIndex(len(df.columns))\n        else:\n            df.columns = index\n            df.index = pandas.RangeIndex(len(df.index))\n        result = df.apply(func, *args, axis=axis, **kwargs)\n        return result\n    index = (self.index if (not axis) else self.columns)\n    func_prepared = self._build_mapreduce_func(callable_apply_builder, axis=axis)\n    result_data = self._map_across_full_axis(axis, func_prepared)\n    return self._post_process_apply(result_data, axis)", "docstring": "Apply callable functions across given axis.\n\nArgs:\nfunc: The functions to apply.\naxis: Target axis to apply the function along.\n\nReturns:\nA new PandasQueryCompiler.", "source": "codesearchnet"}
{"code": "def __init__(self, config=None, namespace=None):\n        \n        self.driver = get_database_instance(config)\n        self.user = generate_key_pair(get_value('secret', 'SECRET', None, config))\n        self.namespace = get_value('db.namespace', 'DB_NAMESPACE', 'namespace' if not namespace else namespace, config)\n        self.logger = logging.getLogger('Plugin')\n        logging.basicConfig(level=logging.INFO)", "docstring": "Initialize a :class:`~.Plugin` instance and connect to BigchainDB.\nArgs:\n*nodes (str): One or more URLs of BigchainDB nodes to\nconnect to as the persistence layer", "source": "juraj-google-style"}
{"code": "def _process_health_pill_event(self, node_name_set, mapping, target_step, file_path):\n    events_loader = event_file_loader.EventFileLoader(file_path)\n    for event in events_loader.Load():\n        if (not event.HasField('summary')):\n            logger.warn('An event in a debugger events file lacks a summary.')\n            continue\n        if (event.step < target_step):\n            continue\n        if (event.step > target_step):\n            return True\n        for value in event.summary.value:\n            summary_metadata = value.metadata\n            plugin_data = summary_metadata.plugin_data\n            if (plugin_data.plugin_name == constants.DEBUGGER_PLUGIN_NAME):\n                try:\n                    content = json.loads(tf.compat.as_text(summary_metadata.plugin_data.content))\n                except ValueError as err:\n                    logger.warn('Could not parse the JSON string containing data for the debugger plugin: %r, %r', content, err)\n                    continue\n                device_name = content['device']\n                output_slot = content['outputSlot']\n            else:\n                logger.error('No debugger plugin data found for event with tag %s and node name %s.', value.tag, value.node_name)\n                continue\n            if (not value.HasField('tensor')):\n                logger.warn('An event in a debugger events file lacks a tensor value.')\n                continue\n            match = re.match('^(.*):(\\\\d+):DebugNumericSummary$', value.node_name)\n            if (not match):\n                logger.warn('A event with a health pill has an invalid watch, (i.e., an unexpected debug op): %r', value.node_name)\n                return None\n            health_pill = self._process_health_pill_value(wall_time=event.wall_time, step=event.step, device_name=device_name, output_slot=output_slot, node_name=match.group(1), tensor_proto=value.tensor, node_name_set=node_name_set)\n            if (not health_pill):\n                continue\n            mapping[health_pill.node_name].append(health_pill)\n    return False", "docstring": "Creates health pills out of data in an event.\n\nCreates health pills out of the event and adds them to the mapping.\n\nArgs:\nnode_name_set: A set of node names that are relevant.\nmapping: The mapping from node name to HealthPillEvents.\nThis object may be destructively modified.\ntarget_step: The target step at which to obtain health pills.\nfile_path: The path to the file with health pill events.\n\nReturns:\nWhether we should stop reading events because future events are no longer\nrelevant.", "source": "codesearchnet"}
{"code": "def single_qubit_matrix_to_pauli_rotations(\n        mat: np.ndarray, atol: float = 0\n) -> List[Tuple[ops.Pauli, float]]:\n    \n\n    def is_clifford_rotation(half_turns):\n        return near_zero_mod(half_turns, 0.5, atol=atol)\n\n    def to_quarter_turns(half_turns):\n        return round(2 * half_turns) % 4\n\n    def is_quarter_turn(half_turns):\n        return (is_clifford_rotation(half_turns) and\n                to_quarter_turns(half_turns) % 2 == 1)\n\n    def is_half_turn(half_turns):\n        return (is_clifford_rotation(half_turns) and\n                to_quarter_turns(half_turns) == 2)\n\n    def is_no_turn(half_turns):\n        return (is_clifford_rotation(half_turns) and\n                to_quarter_turns(half_turns) == 0)\n\n    \n    z_rad_before, y_rad, z_rad_after = (\n        linalg.deconstruct_single_qubit_matrix_into_angles(mat))\n    z_ht_before = z_rad_before / np.pi - 0.5\n    m_ht = y_rad / np.pi\n    m_pauli = ops.pauli_gates.X  \n    z_ht_after = z_rad_after / np.pi + 0.5\n\n    \n    if is_clifford_rotation(z_ht_before):\n        if ((is_quarter_turn(z_ht_before) or is_quarter_turn(z_ht_after)) ^\n            (is_half_turn(m_ht) and is_no_turn(z_ht_before-z_ht_after))):\n            z_ht_before += 0.5\n            z_ht_after -= 0.5\n            m_pauli = ops.pauli_gates.Y\n        if is_half_turn(z_ht_before) or is_half_turn(z_ht_after):\n            z_ht_before -= 1\n            z_ht_after += 1\n            m_ht = -m_ht\n    if is_no_turn(m_ht):\n        z_ht_before += z_ht_after\n        z_ht_after = 0\n    elif is_half_turn(m_ht):\n        z_ht_after -= z_ht_before\n        z_ht_before = 0\n\n    \n    rotation_list = [\n        (ops.pauli_gates.Z, z_ht_before),\n        (m_pauli, m_ht),\n        (ops.pauli_gates.Z, z_ht_after)]\n    return [(pauli, ht) for pauli, ht in rotation_list if not is_no_turn(ht)]", "docstring": "Implements a single-qubit operation with few rotations.\n\nArgs:\nmat: The 2x2 unitary matrix of the operation to implement.\natol: A limit on the amount of absolute error introduced by the\nconstruction.\n\nReturns:\nA list of (Pauli, half_turns) tuples that, when applied in order,\nperform the desired operation.", "source": "juraj-google-style"}
{"code": "def SummaryMetadata(self, run, tag):\n    accumulator = self.GetAccumulator(run)\n    return accumulator.SummaryMetadata(tag)", "docstring": "Return the summary metadata for the given tag on the given run.\n\nArgs:\nrun: A string name of the run for which summary metadata is to be\nretrieved.\ntag: A string name of the tag whose summary metadata is to be\nretrieved.\n\nRaises:\nKeyError: If the run is not found, or the tag is not available for\nthe given run.\n\nReturns:\nA `SummaryMetadata` protobuf.", "source": "codesearchnet"}
{"code": "def confirm(statement):\n    \n    prompt = \"{statement} [y/n]\".format(statement=statement)\n    answer = _ask(prompt, limited_to=[\"yes\", \"no\", \"y\", \"n\"])\n    return answer and answer.startswith(\"y\")", "docstring": "Ask the user for confirmation about the specified statement.\n\nArgs:\nstatement (unicode): statement to ask the user confirmation about.\n\nReturns:\nbool: whether or not specified statement was confirmed.", "source": "juraj-google-style"}
{"code": "def sonority_from_fts(self, seg):\n\n    def match(m):\n        return self.fm.match(fts(m), seg)\n    minusHi = BoolTree(match('-hi'), 9, 8)\n    minusNas = BoolTree(match('-nas'), 6, 5)\n    plusVoi1 = BoolTree(match('+voi'), 4, 3)\n    plusVoi2 = BoolTree(match('+voi'), 2, 1)\n    plusCont = BoolTree(match('+cont'), plusVoi1, plusVoi2)\n    plusSon = BoolTree(match('+son'), minusNas, plusCont)\n    minusCons = BoolTree(match('-cons'), 7, plusSon)\n    plusSyl = BoolTree(match('+syl'), minusHi, minusCons)\n    return plusSyl.get_value()", "docstring": "Given a segment as features, returns the sonority on a scale of 1\nto 9.\n\nArgs:\nseg (list): collection of (value, feature) pairs representing\na segment (vowel or consonant)\n\nReturns:\nint: sonority of `seg` between 1 and 9", "source": "codesearchnet"}
{"code": "def depth_may_average_ground_temperature(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `depth_may_average_ground_temperature`'.format(value))\n\n        self._depth_may_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_may_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_may_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def __init__(self, name=None):\n    \n    if not name or name[-1] != '/':  \n      with tf.compat.v1.name_scope(name or type(self).__name__) as name:\n        pass\n    self._name = name", "docstring": "Creates the ExponentialFamily.\n\nArgs:\nname: Python `str` used as TF namescope for ops created by member\nfunctions. Default value: `None` (i.e., the subclass name).", "source": "juraj-google-style"}
{"code": "def __init__(self, name, combine_fn):\n    self.name = name\n    self.combine_fn = combine_fn\n    self.accumulator = combine_fn.create_accumulator()\n    self._add_input = self.combine_fn.add_input", "docstring": "Creates a Counter object.\n\nArgs:\nname: the name of this counter. It may be a string,\nor a CounterName object.\ncombine_fn: the CombineFn to use for aggregation", "source": "github-repos"}
{"code": "def all(script, face=True, vert=True):\n    filter_xml = ''.join(['  <filter name=\"Select All\">\\n', '    <Param name=\"allFaces\" ', 'value=\"{}\" '.format(str(face).lower()), 'description=\"DSelect all Faces\" ', 'type=\"RichBool\" ', '/>\\n', '    <Param name=\"allVerts\" ', 'value=\"{}\" '.format(str(vert).lower()), 'description=\"Select all Vertices\" ', 'type=\"RichBool\" ', '/>\\n', '  </filter>\\n'])\n    util.write_filter(script, filter_xml)\n    return None", "docstring": "Select all the faces of the current mesh\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter to.\nfaces (bool): If True the filter will select all the faces.\nverts (bool): If True the filter will select all the vertices.\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "codesearchnet"}
{"code": "def list_groups(refresh=False):\n    if (('group.list_groups' in __context__) and (not refresh)):\n        return __context__['group.list_groups']\n    results = _get_all_groups()\n    ret = []\n    for result in results:\n        ret.append(result.Name)\n    __context__['group.list_groups'] = ret\n    return ret", "docstring": "Return a list of groups\n\nArgs:\n\nrefresh (bool):\nRefresh the info for all groups in ``__context__``. If False only\nthe groups in ``__context__`` will be returned. If True, the\n``__context__`` will be refreshed with current data and returned.\nDefault is False\n\nReturns:\nlist: A list of groups on the machine\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' group.list_groups", "source": "codesearchnet"}
{"code": "def set_size(self, mode):\n    return len(self.data_index[mode])", "docstring": "Calculates the number of samples in the dataset partition.\n\nArgs:\nmode: Which partition, must be 'training', 'validation', or 'testing'.\n\nReturns:\nNumber of samples in the partition.", "source": "github-repos"}
{"code": "def add_step(self, step, run_meta):\n    op_log = tfprof_logger.merge_default_with_oplog(self._graph, run_meta=run_meta)\n    self._coverage = print_mdl.AddStep(step, _graph_string(self._graph), run_meta.SerializeToString(), op_log.SerializeToString())", "docstring": "Add statistics of a step.\n\nArgs:\nstep: int, An id used to group one or more different `run_meta` together.\nWhen profiling with the profile_xxx APIs, user can use the `step` id in\nthe `options` to profile these `run_meta` together.\nrun_meta: RunMetadata proto that contains statistics of a session run.", "source": "github-repos"}
{"code": "def _get_bases(type_):\n    try:\n\n        class _(type_):\n            'Check if type_ is subclassable.'\n        BaseClass = type_\n    except TypeError:\n        BaseClass = object\n\n    class MetaClass(_ValidationMeta, BaseClass.__class__):\n        'Use the type_ meta and include base validation functionality.'\n    return (BaseClass, MetaClass)", "docstring": "Get the base and meta classes to use in creating a subclass.\n\nArgs:\ntype_: The type to subclass.\n\nReturns:\nA tuple containing two values: a base class, and a metaclass.", "source": "codesearchnet"}
{"code": "def is_sequence(value):\n    return (hasattr(value, '__iter__') and (not isinstance(value, (six.string_types, six.binary_type))))", "docstring": "Determine if a value is a sequence type.\n\nReturns:\n``True`` if `value` is a sequence type (e.g., ``list``, or ``tuple``).\nString types will return ``False``.\n\nNOTE: On Python 3, strings have the __iter__ defined, so a simple hasattr\ncheck is insufficient.", "source": "codesearchnet"}
{"code": "def sg_reverse_seq(tensor, opt):\n    opt += tf.sg_opt(axis=1)\n    seq_len = tf.not_equal(tensor, tf.zeros_like(tensor)).sg_int().sg_sum(axis=opt.axis)\n    return tf.reverse_sequence(tensor, seq_len, opt.axis, name=opt.name)", "docstring": "r\"\"\"Reverses variable length slices.\n\nBefore applying the pure tensorflow function tf.reverse_sequence,\nthis function calculates sequence lengths by counting non-zeros.\n\nFor example,\n\n```\ntensor = [[1, 2, 3, 0, 0], [4, 5, 0, 0, 0]]\ntensor.sg_reverse_seq()\n=> [[3 2 1 0 0]\n[5 4 0 0 0]]\n```\n\nArgs:\ntensor: A 2-D `Tensor` (automatically given by chain).\nopt:\naxis: Axis to reverse. Default is 1.\nname : If provided, it replaces current tensor's name.\n\nReturns:\nA `Tensor` with the same shape and type as `tensor`.", "source": "codesearchnet"}
{"code": "def greedy_set_cover(universe, subsets, costs):\n    \n    elements = set(e for s in subsets.keys() for e in subsets[s])\n    \n    if elements != universe:\n        return None\n\n    \n    covered = set()\n    cover_sets = []\n\n    while covered != universe:\n        min_cost_elem_ratio = float(\"inf\")\n        min_set = None\n        \n        for s, elements in subsets.items():\n            new_elements = len(elements - covered)\n            \n            \n            if new_elements != 0:\n                cost_elem_ratio = costs[s] / new_elements\n                if cost_elem_ratio < min_cost_elem_ratio:\n                    min_cost_elem_ratio = cost_elem_ratio\n                    min_set = s\n        cover_sets.append(min_set)\n        \n        covered |= subsets[min_set]\n    return cover_sets", "docstring": "Approximate greedy algorithm for set-covering. Can be used on large\ninputs - though not an optimal solution.\n\nArgs:\nuniverse (list): Universe of elements\nsubsets (dict): Subsets of U {S1:elements,S2:elements}\ncosts (dict): Costs of each subset in S - {S1:cost, S2:cost...}", "source": "juraj-google-style"}
{"code": "def save(self, project):\n    if (('id' in project) and (project['id'] is not None)):\n        self.logger.debug(('Updating existing project: ' + json.dumps(project)))\n        url = ('%(base_url)s/%(project_id)s' % {'base_url': self.base_url, 'project_id': project['id']})\n        r = self.gbdx_connection.put(url, json=project)\n        try:\n            r.raise_for_status()\n        except:\n            print(r.text)\n            raise\n        return project['id']\n    else:\n        self.logger.debug(('Creating new project: ' + json.dumps(project)))\n        url = self.base_url\n        r = self.gbdx_connection.post(url, json=project)\n        try:\n            r.raise_for_status()\n        except:\n            print(r.text)\n            raise\n        project_json = r.json()\n        return project_json['id']", "docstring": "Saves an AnswerFactory Project\n\nArgs:\nproject (dict): Dictionary specifying an AnswerFactory Project.\n\nReturns:\nAnswerFactory Project id", "source": "codesearchnet"}
{"code": "def write_temp_file(self, content, filename=None, mode='w'):\n    if (filename is None):\n        filename = str(uuid.uuid4())\n    fqpn = os.path.join(self.tcex.default_args.tc_temp_path, filename)\n    with open(fqpn, mode) as fh:\n        fh.write(content)\n    return fqpn", "docstring": "Write content to a temporary file.\n\nArgs:\ncontent (bytes|str): The file content. If passing binary data the mode needs to be set\nto 'wb'.\nfilename (str, optional): The filename to use when writing the file.\nmode (str, optional): The file write mode which could be either 'w' or 'wb'.\n\nReturns:\nstr: Fully qualified path name for the file.", "source": "codesearchnet"}
{"code": "def get_copy_folder_location():\n    copy_settings_path = 'Library/Application Support/Copy Agent/config.db'\n    copy_home = None\n    copy_settings = os.path.join(os.environ['HOME'], copy_settings_path)\n    if os.path.isfile(copy_settings):\n        database = sqlite3.connect(copy_settings)\n        if database:\n            cur = database.cursor()\n            query = \"SELECT value FROM config2 WHERE option = 'csmRootPath';\"\n            cur.execute(query)\n            data = cur.fetchone()\n            copy_home = str(data[0])\n            cur.close()\n    if (not copy_home):\n        error('Unable to find your Copy install =(')\n    return copy_home", "docstring": "Try to locate the Copy folder.\n\nReturns:\n(str) Full path to the current Copy folder", "source": "codesearchnet"}
{"code": "def _write_to_zip(self, path, contents):\n        \n        if isinstance(path, list):\n            path = os.path.sep.join(path)\n        self.zf.writestr(path, contents)", "docstring": "_write_to_zip: Write file to zip\nArgs:\npath: (str) where in zip to write file\ncontents: (str) contents of file to write\nReturns: None", "source": "juraj-google-style"}
{"code": "def get_module_by_id(module_id: str) -> Union[EFBChannel, EFBMiddleware]:\n    \n    try:\n        if master.channel_id == module_id:\n            return master\n    except NameError:\n        pass\n    if module_id in slaves:\n        return slaves[module_id]\n    for i in middlewares:\n        if i.middleware_id == module_id:\n            return i\n    raise NameError(\"Module ID {} is not found\".format(module_id))", "docstring": "Return the module instance of a provided module ID\nArgs:\nmodule_id: Module ID, with instance ID if available.\n\nReturns:\nModule instance requested.\n\nRaises:\nNameError: When the module is not found.", "source": "juraj-google-style"}
{"code": "def get_tensors_by_names(names):\n    \n    ret = []\n    G = tfv1.get_default_graph()\n    for n in names:\n        opn, varn = get_op_tensor_name(n)\n        ret.append(G.get_tensor_by_name(varn))\n    return ret", "docstring": "Get a list of tensors in the default graph by a list of names.\n\nArgs:\nnames (list):", "source": "juraj-google-style"}
{"code": "def update_model_path(self, model_path: Optional[str]=None):\n    self._model = model_path if model_path else self._model", "docstring": "Updates the pretrained model used by the Hugging Face Pipeline task.\nMake sure that the new model does the same task as initial model.\n\nArgs:\nmodel_path (str): (Optional) Path to the new trained model\nfrom Hugging Face. Defaults to None.", "source": "github-repos"}
{"code": "def get_mim_phenotypes(genemap_lines):\n    phenotype_mims = set()\n    phenotypes_found = {}\n    for entry in parse_genemap2(genemap_lines):\n        hgnc_symbol = entry['hgnc_symbol']\n        for phenotype in entry['phenotypes']:\n            mim_nr = phenotype['mim_number']\n            if (mim_nr in phenotypes_found):\n                phenotype_entry = phenotypes_found[mim_nr]\n                phenotype_entry['inheritance'] = phenotype_entry['inheritance'].union(phenotype['inheritance'])\n                phenotype_entry['hgnc_symbols'].add(hgnc_symbol)\n            else:\n                phenotype['hgnc_symbols'] = set([hgnc_symbol])\n                phenotypes_found[mim_nr] = phenotype\n    return phenotypes_found", "docstring": "Get a dictionary with phenotypes\n\nUse the mim numbers for phenotypes as keys and phenotype information as\nvalues.\n\nArgs:\ngenemap_lines(iterable(str))\n\nReturns:\nphenotypes_found(dict): A dictionary with mim_numbers as keys and\ndictionaries with phenotype information as values.\n\n{\n'description': str, # Description of the phenotype\n'hgnc_symbols': set(), # Associated hgnc symbols\n'inheritance': set(),  # Associated phenotypes\n'mim_number': int, # mim number of phenotype\n}", "source": "codesearchnet"}
{"code": "def offset(self, num_to_skip):\n    query = query_mod.Query(self)\n    return query.offset(num_to_skip)", "docstring": "Skip to an offset in a query with this collection as parent.\n\nSee\n:meth:`~.firestore_v1beta1.query.Query.offset` for\nmore information on this method.\n\nArgs:\nnum_to_skip (int): The number of results to skip at the beginning\nof query results. (Must be non-negative.)\n\nReturns:\n~.firestore_v1beta1.query.Query: An offset query.", "source": "codesearchnet"}
{"code": "def stSpectogram(signal, fs, win, step, PLOT=False):\n    \n    win = int(win)\n    step = int(step)\n    signal = numpy.double(signal)\n    signal = signal / (2.0 ** 15)\n    DC = signal.mean()\n    MAX = (numpy.abs(signal)).max()\n    signal = (signal - DC) / (MAX - DC)\n\n    N = len(signal)        \n    cur_p = 0\n    count_fr = 0\n    nfft = int(win / 2)\n    specgram = numpy.array([], dtype=numpy.float64)\n\n    while (cur_p + win - 1 < N):\n        count_fr += 1\n        x = signal[cur_p:cur_p+win]\n        cur_p = cur_p + step\n        X = abs(fft(x))\n        X = X[0:nfft]\n        X = X / len(X)\n\n        if count_fr == 1:\n            specgram = X ** 2\n        else:\n            specgram = numpy.vstack((specgram, X))\n\n    FreqAxis = [float((f + 1) * fs) / (2 * nfft) for f in range(specgram.shape[1])]\n    TimeAxis = [float(t * step) / fs for t in range(specgram.shape[0])]\n\n    if (PLOT):\n        fig, ax = plt.subplots()\n        imgplot = plt.imshow(specgram.transpose()[::-1, :])\n        fstep = int(nfft / 5.0)\n        FreqTicks = range(0, int(nfft) + fstep, fstep)\n        FreqTicksLabels = [str(fs / 2 - int((f * fs) / (2 * nfft))) for f in FreqTicks]\n        ax.set_yticks(FreqTicks)\n        ax.set_yticklabels(FreqTicksLabels)\n        TStep = int(count_fr/3)\n        TimeTicks = range(0, count_fr, TStep)\n        TimeTicksLabels = ['%.2f' % (float(t * step) / fs) for t in TimeTicks]\n        ax.set_xticks(TimeTicks)\n        ax.set_xticklabels(TimeTicksLabels)\n        ax.set_xlabel('time (secs)')\n        ax.set_ylabel('freq (Hz)')\n        imgplot.set_cmap('jet')\n        plt.colorbar()\n        plt.show()\n\n    return (specgram, TimeAxis, FreqAxis)", "docstring": "Short-term FFT mag for spectogram estimation:\nReturns:\na numpy array (nFFT x numOfShortTermWindows)\nARGUMENTS:\nsignal:      the input signal samples\nfs:          the sampling freq (in Hz)\nwin:         the short-term window size (in samples)\nstep:        the short-term window step (in samples)\nPLOT:        flag, 1 if results are to be ploted\nRETURNS:", "source": "juraj-google-style"}
{"code": "def _compose_output_rep(lhs_rep, rhs_rep, lhs_contraction, rhs_contraction, lhs_batch, rhs_batch):\n    output_rep = []\n    for dim in lhs_batch:\n        output_rep.append(lhs_rep[dim])\n    for i in _minus(range(len(lhs_rep)), lhs_batch + lhs_contraction):\n        output_rep.append(lhs_rep[i])\n    for i in _minus(range(len(rhs_rep)), rhs_batch + rhs_contraction):\n        output_rep.append(rhs_rep[i])\n    return ''.join(output_rep)", "docstring": "Compose the output string representation.\n\ne.g., ij, jk, (((1,), (0,)), ((), ())) -> ik\naij, ajk, (((2,), (1,)), ((0,), (0,))) -> aik\n\nArgs:\nlhs_rep: A string representation for the left-hand side input array\nrhs_rep: A string representation for the right-hand side input array\nlhs_contraction: Sequence[int] (the contraction dimensions of lhs)\nrhs_contraction: Sequence[int] (the contraction dimensions of rhs)\nlhs_batch: Sequence[int] (the batch dimensions of lhs)\nrhs_batch: Sequence[int] (the batch dimensions of rhs)\n\nReturns:\nA string representation of the result array.", "source": "github-repos"}
{"code": "def _ConvertValueBinaryDataToUBInt64(self, value):\n    if (not value):\n        return None\n    integer_map = self._GetDataTypeMap('uint64be')\n    try:\n        return self._ReadStructureFromByteStream(value, 0, integer_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse integer value with error: {0!s}'.format(exception))", "docstring": "Converts a binary data value into an integer.\n\nArgs:\nvalue (bytes): binary data value containing an unsigned 64-bit big-endian\ninteger.\n\nReturns:\nint: integer representation of binary data value or None if value is\nnot set.\n\nRaises:\nParseError: if the integer value cannot be parsed.", "source": "codesearchnet"}
{"code": "def debug(self, status=None, nids=None):\n        \n        nrows, ncols = get_terminal_size()\n\n        \n        sched_excfile = os.path.join(self.workdir, \"_exceptions\")\n        if os.path.exists(sched_excfile):\n            with open(sched_excfile, \"r\") as fh:\n                cprint(\"Found exceptions raised by the scheduler\", \"red\")\n                cprint(fh.read(), color=\"red\")\n                return\n\n        if status is not None:\n            tasks = list(self.iflat_tasks(status=status, nids=nids))\n        else:\n            errors = list(self.iflat_tasks(status=self.S_ERROR, nids=nids))\n            qcriticals = list(self.iflat_tasks(status=self.S_QCRITICAL, nids=nids))\n            abicriticals = list(self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids))\n            tasks = errors + qcriticals + abicriticals\n\n        \n        \n        \n        \n        \n        \n        \n        ntasks = 0\n        for task in tasks:\n            print(make_banner(str(task), width=ncols, mark=\"=\"))\n            ntasks += 1\n\n            \n            for efname in [\"qerr_file\", \"stderr_file\",]:\n                err_file = getattr(task, efname)\n                if err_file.exists:\n                    s = err_file.read()\n                    if not s: continue\n                    print(make_banner(str(err_file), width=ncols, mark=\"=\"))\n                    cprint(s, color=\"red\")\n                    \n\n            \n            try:\n                report = task.get_event_report()\n                if report and report.num_errors:\n                    print(make_banner(os.path.basename(report.filename), width=ncols, mark=\"=\"))\n                    s = \"\\n\".join(str(e) for e in report.errors)\n                else:\n                    s = None\n            except Exception as exc:\n                s = str(exc)\n\n            count = 0 \n            if s is not None:\n                cprint(s, color=\"red\")\n                count += 1\n\n            if not count:\n                \n                log_files = task.tmpdir.list_filepaths(wildcard=\"*LOG_*\")\n                if not log_files:\n                    cprint(\"No *LOG_* file in tmpdir. This usually happens if you are running with many CPUs\", color=\"magenta\")\n\n                for log_file in log_files:\n                    try:\n                        report = EventsParser().parse(log_file)\n                        if report.errors:\n                            print(report)\n                            count += 1\n                            break\n                    except Exception as exc:\n                        cprint(str(exc), color=\"red\")\n                        count += 1\n                        break\n\n            if not count:\n                cprint(\"Houston, we could not find any error message that can explain the problem\", color=\"magenta\")\n\n        print(\"Number of tasks analyzed: %d\" % ntasks)", "docstring": "This method is usually used when the flow didn't completed succesfully\nIt analyzes the files produced the tasks to facilitate debugging.\nInfo are printed to stdout.\n\nArgs:\nstatus: If not None, only the tasks with this status are selected\nnids: optional list of node identifiers used to filter the tasks.", "source": "juraj-google-style"}
{"code": "def create_string(self, key, value):\n        \n        data = None\n        if key is not None and value is not None:\n            if isinstance(value, (bool, list, int, dict)):\n                \n                value = u'{}'.format(value)\n            \n            data = self.db.create(key.strip(), u'{}'.format(json.dumps(value)))\n        else:\n            self.tcex.log.warning(u'The key or value field was None.')\n        return data", "docstring": "Create method of CRUD operation for string data.\n\nArgs:\nkey (string): The variable to write to the DB.\nvalue (any): The data to write to the DB.\n\nReturns:\n(string): Result of DB write.", "source": "juraj-google-style"}
{"code": "def _build_zmat(self, construction_table):\n    c_table = construction_table\n    default_cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral']\n    optional_cols = list((set(self.columns) - {'atom', 'x', 'y', 'z'}))\n    zmat_frame = pd.DataFrame(columns=(default_cols + optional_cols), dtype='float', index=c_table.index)\n    zmat_frame.loc[(:, optional_cols)] = self.loc[(c_table.index, optional_cols)]\n    zmat_frame.loc[(:, 'atom')] = self.loc[(c_table.index, 'atom')]\n    zmat_frame.loc[(:, ['b', 'a', 'd'])] = c_table\n    zmat_values = self._calculate_zmat_values(c_table)\n    zmat_frame.loc[(:, ['bond', 'angle', 'dihedral'])] = zmat_values\n    zmatrix = Zmat(zmat_frame, metadata=self.metadata, _metadata={'last_valid_cartesian': self.copy()})\n    return zmatrix", "docstring": "Create the Zmatrix from a construction table.\n\nArgs:\nConstruction table (pd.DataFrame):\n\nReturns:\nZmat: A new instance of :class:`Zmat`.", "source": "codesearchnet"}
{"code": "def _load_activations(self, filename):\n    logger.info(('Loading activation data from %s...' % filename))\n    activations = pd.read_csv(filename, sep='\\t')\n    activations.columns = [col.lower() for col in list(activations.columns)]\n    mc = ['x', 'y', 'z', 'id', 'space']\n    if (set(mc) - set(list(activations.columns))):\n        logger.error('At least one of mandatory columns (x, y, z, id, and space) is missing from input file.')\n        return\n    spaces = activations['space'].unique()\n    xyz = activations[['x', 'y', 'z']].values\n    for s in spaces:\n        if (s != self.transformer.target):\n            inds = (activations['space'] == s)\n            xyz[inds] = self.transformer.apply(s, xyz[inds])\n    activations[['x', 'y', 'z']] = xyz\n    ijk = pd.DataFrame(transformations.xyz_to_mat(xyz), columns=['i', 'j', 'k'])\n    activations = pd.concat([activations, ijk], axis=1)\n    return activations", "docstring": "Load activation data from a text file.\n\nArgs:\nfilename (str): a string pointing to the location of the txt file\nto read from.", "source": "codesearchnet"}
{"code": "def add_to_query(self, query):\n    self.handle = win32pdh.AddCounter(query, self.path)", "docstring": "Add the current path to the query\n\nArgs:\nquery (obj):\nThe handle to the query to add the counter", "source": "codesearchnet"}
{"code": "def matches_kv(pcoll, regex, keyGroup, valueGroup=0):\n    regex = Regex._regex_compile(regex)\n\n    def _process(element):\n        match = regex.match(element)\n        if match:\n            yield (match.group(keyGroup), match.group(valueGroup))\n    return pcoll | FlatMap(_process)", "docstring": "Returns the KV pairs if the string matches the regular expression, deriving\nthe key & value from the specified group of the regular expression.\n\nArgs:\nregex: the regular expression string or (re.compile) pattern.\nkeyGroup: The Regex group to use as the key. Can be int or str.\nvalueGroup: (optional) Regex group to use the value. Can be int or str.\nThe default value \"0\" returns entire matched string.", "source": "github-repos"}
{"code": "def _init_boto3_clients(self):\n    try:\n        profile = self._config.get('environment', {}).get('profile')\n        region = self._config.get('environment', {}).get('region')\n        if profile:\n            self._b3Sess = boto3.session.Session(profile_name=profile)\n        else:\n            self._b3Sess = boto3.session.Session()\n        self._s3 = self._b3Sess.client('s3')\n        self._cloudFormation = self._b3Sess.client('cloudformation', region_name=region)\n        self._ssm = self._b3Sess.client('ssm', region_name=region)\n        return True\n    except Exception as wtf:\n        logging.error('Exception caught in intialize_session(): {}'.format(wtf))\n        traceback.print_exc(file=sys.stdout)\n        return False", "docstring": "The utililty requires boto3 clients to Cloud Formation and S3. Here is\nwhere we make them.\n\nArgs:\nNone\n\nReturns:\nGood or Bad; True or False", "source": "codesearchnet"}
{"code": "def __init__(self, fraction_of_second=None, time_elements_tuple=None):\n    \n    if fraction_of_second is not None:\n      if fraction_of_second < 0.0 or fraction_of_second >= 1.0:\n        raise ValueError(\n            'Fraction of second value: {0:f} out of bounds.'.format(\n                fraction_of_second))\n\n    super(TimeElementsWithFractionOfSecond, self).__init__(\n        time_elements_tuple=time_elements_tuple)\n    self._precision = None\n    self.fraction_of_second = fraction_of_second", "docstring": "Initializes time elements.\n\nArgs:\nfraction_of_second (Optional[decimal.Decimal]): fraction of second, which\nmust be a value between 0.0 and 1.0.\ntime_elements_tuple (Optional[tuple[int, int, int, int, int, int]]):\ntime elements, contains year, month, day of month, hours, minutes and\nseconds.\n\nRaises:\nValueError: if the time elements tuple is invalid or fraction of second\nvalue is out of bounds.", "source": "juraj-google-style"}
{"code": "def qc_curve_group(self, tests, alias=None):\n        \n        keys = [k for k, v in self.data.items() if isinstance(v, Curve)]\n        if not keys:\n            return {}\n\n        all_tests = tests.get('all', tests.get('All', tests.get('ALL', [])))\n        data = {test.__name__: test(self, keys, alias) for test in all_tests}\n\n        results = {}\n        for i, key in enumerate(keys):\n            this = {}\n            for test, result in data.items():\n                this[test] = result[i]\n            results[key] = this\n        return results", "docstring": "Run tests on a cohort of curves.\n\nArgs:\nalias (dict): an alias dictionary, mapping mnemonics to lists of\nmnemonics.\n\nReturns:\ndict.", "source": "juraj-google-style"}
{"code": "def Get(self, request, global_params=None):\n    config = self.GetMethodConfig('Get')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsOperationsGetRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def replace(self, **kw):\n    if ('tzinfo' in kw):\n        if (kw['tzinfo'] is None):\n            raise TypeError('Can not remove the timezone use asdatetime()')\n        else:\n            tzinfo = kw['tzinfo']\n            del kw['tzinfo']\n    else:\n        tzinfo = None\n    is_dst = None\n    if ('is_dst' in kw):\n        is_dst = kw['is_dst']\n        del kw['is_dst']\n    else:\n        is_dst = self.is_dst\n    replaced = self.asdatetime().replace(**kw)\n    return type(self)(replaced, tzinfo=(tzinfo or self.tzinfo.zone), is_dst=is_dst)", "docstring": "Return datetime with new specified fields given as arguments.\n\nFor example, dt.replace(days=4) would return a new datetime_tz object with\nexactly the same as dt but with the days attribute equal to 4.\n\nAny attribute can be replaced, but tzinfo can not be set to None.\n\nArgs:\nAny datetime_tz attribute.\n\nReturns:\nA datetime_tz object with the attributes replaced.\n\nRaises:\nTypeError: If the given replacement is invalid.", "source": "codesearchnet"}
{"code": "def plot_entropy(self, tmin, tmax, ntemp, ylim=None, **kwargs):\n    temperatures = np.linspace(tmin, tmax, ntemp)\n    if self.structure:\n        ylabel = '$S$ (J/K/mol)'\n    else:\n        ylabel = '$S$ (J/K/mol-c)'\n    fig = self._plot_thermo(self.dos.entropy, temperatures, ylabel=ylabel, ylim=ylim, **kwargs)\n    return fig", "docstring": "Plots the vibrational entrpy in a temperature range.\n\nArgs:\ntmin: minimum temperature\ntmax: maximum temperature\nntemp: number of steps\nylim: tuple specifying the y-axis limits.\nkwargs: kwargs passed to the matplotlib function 'plot'.\nReturns:\nmatplotlib figure", "source": "codesearchnet"}
{"code": "def merge_value(self, json_value: Any, target: message.Message) -> None:\n    target_descriptor = target.DESCRIPTOR\n    if annotation_utils.is_primitive_type(target_descriptor):\n        if isinstance(json_value, dict):\n            self._merge_message(json_value, target)\n            extension_field = target_descriptor.fields_by_name.get('extension')\n            if extension_field is None:\n                raise ValueError(f\"Invalid primitive. No 'extension' field exists on {target_descriptor.full_name}.\")\n            primitive_has_no_value = extensions.create_primitive_has_no_value(extension_field.message_type)\n            proto_utils.append_value_at_field(target, extension_field, primitive_has_no_value)\n        else:\n            wrapper = self.primitive_handler.primitive_wrapper_from_json_value(json_value, type(target), default_timezone=self.default_timezone)\n            wrapper.merge_into(target)\n    elif annotation_utils.is_reference(target_descriptor):\n        self._merge_message(json_value, target)\n        references.split_if_relative_reference(target)\n    elif isinstance(json_value, dict):\n        self._merge_message(json_value, target)\n    elif isinstance(json_value, (tuple, list)) and len(json_value) == 1:\n        self._merge_message(json_value[0], target)\n    else:\n        raise ValueError(f'Expected a JSON object for field of type: {target_descriptor.full_name}.')", "docstring": "Merges the provided json_value into the target Message.\n\nArgs:\njson_value: A Python-native representation of JSON data.\ntarget: The target Message to merge the JSON data into.", "source": "github-repos"}
{"code": "def for_model(self, fn):\n    return ray.get(self.workers[0].for_model.remote(fn))", "docstring": "Apply the given function to a single model replica.\n\nReturns:\nResult from applying the function.", "source": "codesearchnet"}
{"code": "def __init__(self, envs, blocking):\n    \n    self._envs = envs\n    self._blocking = blocking\n    observ_space = self._envs[0].observation_space\n    if not all(env.observation_space == observ_space for env in self._envs):\n      raise ValueError('All environments must use the same observation space.')\n    action_space = self._envs[0].action_space\n    if not all(env.action_space == action_space for env in self._envs):\n      raise ValueError('All environments must use the same observation space.')", "docstring": "Combine multiple environments to step them in batch.\n\nTo step environments in parallel, environments must support a\n`blocking=False` argument to their step and reset functions that makes them\nreturn callables instead to receive the result at a later time.\n\nArgs:\nenvs: List of environments.\nblocking: Step environments after another rather than in parallel.\n\nRaises:\nValueError: Environments have different observation or action spaces.", "source": "juraj-google-style"}
{"code": "def raster(self, path, size, bandtype=gdal.GDT_Byte):\n        \n        path = getattr(path, 'name', path)\n        try:\n            is_multiband = len(size) > 2\n            nx, ny, nbands = size if is_multiband else size + (1,)\n        except (TypeError, ValueError) as exc:\n            exc.args = ('Size must be 2 or 3-item sequence',)\n            raise\n        if nx < 1 or ny < 1:\n            raise ValueError('Invalid raster size %s' % (size,))\n        \n        if not self._is_empty(path):\n            raise IOError('%s already exists, open with Raster()' % path)\n        ds = self.Create(path, nx, ny, nbands, bandtype)\n        if not ds:\n            raise ValueError(\n                'Could not create %s using %s' % (path, str(self)))\n        return Raster(ds)", "docstring": "Returns a new Raster instance.\n\ngdal.Driver.Create() does not support all formats.\n\nArguments:\npath -- file object or path as str\nsize -- two or three-tuple of (xsize, ysize, bandcount)\nbandtype -- GDAL pixel data type", "source": "juraj-google-style"}
{"code": "def as_date(dat):\n    \n    LOGGER.debug('as_date(%s)', dat)\n\n    return strict_rfc3339.timestamp_to_rfc3339_utcoffset(\n        calendar.timegm(dat.timetuple()))", "docstring": "Return the RFC3339 UTC string representation of the given date and time.\n\nArgs:\ndat (:py:class:`datetime.date`): the object/type to be serialized.\n\nRaises:\nTypeError:\nwhen ``o`` is not an instance of ``datetime.date``.\n\nReturns:\n(str) JSON serializable type for the given object.", "source": "juraj-google-style"}
{"code": "def rated_movies(self, **kwargs):\n        \n        path = self._get_guest_session_id_path('rated_movies')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Get a list of rated moview for a specific guest session id.\n\nArgs:\npage: (optional) Minimum 1, maximum 1000.\nsort_by: (optional) 'created_at.asc' | 'created_at.desc'\nlanguage: (optional) ISO 639-1 code.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def ws010c(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `ws010c`'.format(value))\n    self._ws010c = value", "docstring": "Corresponds to IDD Field `ws010c`\nWind speed corresponding to 1.0% cumulative frequency\nof occurrence for coldest month;\n\nArgs:\nvalue (float): value for IDD Field `ws010c`\nUnit: m/s\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def get_variantid(variant_obj, family_id):\n    new_id = parse_document_id(chrom=variant_obj['chromosome'], pos=str(variant_obj['position']), ref=variant_obj['reference'], alt=variant_obj['alternative'], variant_type=variant_obj['variant_type'], case_id=family_id)\n    return new_id", "docstring": "Create a new variant id.\n\nArgs:\nvariant_obj(dict)\nfamily_id(str)\n\nReturns:\nnew_id(str): The new variant id", "source": "codesearchnet"}
{"code": "def get_stored_hash(self, temp_ver):\n    with open(self._prefixed(('%s.hash' % temp_ver.name))) as f:\n        return f.read().strip()", "docstring": "Retrieves the hash for the given template version from the store\n\nArgs:\ntemp_ver (TemplateVersion): template version to retrieve the hash\nfor\n\nReturns:\nstr: hash of the given template version", "source": "codesearchnet"}
{"code": "def __init__(self, path, recursive=True, ignoreErrors=True):\n        \n        self._path = os.path.normpath(path)\n        self._recursive = recursive\n        self._ignoreErrors = ignoreErrors\n        self._indexLoaded = False\n        self._mibIndex = None", "docstring": "Create an instance of *FileReader* serving a directory.\n\nArgs:\npath (str): directory to search MIB files\n\nKeyword Args:\nrecursive (bool): whether to include subdirectories\nignoreErrors (bool): ignore filesystem access errors", "source": "juraj-google-style"}
{"code": "def _get_setting(self, key, default_value=None, value_type=str):\n        \n        try:\n            state_entry = self._state_view.get(\n                SettingsView.setting_address(key))\n        except KeyError:\n            return default_value\n\n        if state_entry is not None:\n            setting = Setting()\n            setting.ParseFromString(state_entry)\n            for setting_entry in setting.entries:\n                if setting_entry.key == key:\n                    return value_type(setting_entry.value)\n\n        return default_value", "docstring": "Get the setting stored at the given key.\n\nArgs:\nkey (str): the setting key\ndefault_value (str, optional): The default value, if none is\nfound. Defaults to None.\nvalue_type (function, optional): The type of a setting value.\nDefaults to `str`.\n\nReturns:\nstr: The value of the setting if found, default_value\notherwise.", "source": "juraj-google-style"}
{"code": "def clear_operations_touching(self, qubits: Iterable[ops.Qid], moment_indices: Iterable[int]):\n    qubits = frozenset(qubits)\n    for k in moment_indices:\n        if (0 <= k < len(self._moments)):\n            self._moments[k] = self._moments[k].without_operations_touching(qubits)", "docstring": "Clears operations that are touching given qubits at given moments.\n\nArgs:\nqubits: The qubits to check for operations on.\nmoment_indices: The indices of moments to check for operations\nwithin.", "source": "codesearchnet"}
{"code": "def main(argv=None):\n    if argv is None:\n        argv = sys.argv\n    args = parse_args(argv)\n    logging.basicConfig(level=50 - args.verbosity * 10)\n    if args.diff:\n        mode = merge_pyi.Mode.DIFF\n    elif args.in_place:\n        mode = merge_pyi.Mode.OVERWRITE\n    else:\n        mode = merge_pyi.Mode.PRINT\n    backup = args.backup or None\n    changed = merge_pyi.merge_files(py_path=args.py, pyi_path=args.pyi, mode=mode, backup=backup)\n    if mode == merge_pyi.Mode.OVERWRITE:\n        if changed:\n            print(f'Merged types to {args.py} from {args.pyi}')\n        else:\n            print(f'No new types for {args.py} in {args.pyi}')", "docstring": "Merge a source file and a pyi file.\n\nArgs:\nargv: Flags and files to process.", "source": "github-repos"}
{"code": "def get_config_files():\n    apps_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), APPS_DIR)\n    custom_apps_dir = os.path.join(os.environ['HOME'], CUSTOM_APPS_DIR)\n    config_files = set()\n    custom_files = set()\n    if os.path.isdir(custom_apps_dir):\n        for filename in os.listdir(custom_apps_dir):\n            if filename.endswith('.cfg'):\n                config_files.add(os.path.join(custom_apps_dir, filename))\n                custom_files.add(filename)\n    for filename in os.listdir(apps_dir):\n        if (filename.endswith('.cfg') and (filename not in custom_files)):\n            config_files.add(os.path.join(apps_dir, filename))\n    return config_files", "docstring": "Return the application configuration files.\n\nReturn a list of configuration files describing the apps supported by\nMackup. The files return are absolute full path to those files.\ne.g. /usr/lib/mackup/applications/bash.cfg\n\nOnly one config file per application should be returned, custom config\nhaving a priority over stock config.\n\nReturns:\nset of strings.", "source": "codesearchnet"}
{"code": "def write_string(self, registeraddress, textstring, numberOfRegisters=16):\n    _checkInt(numberOfRegisters, minvalue=1, description='number of registers for write string')\n    _checkString(textstring, 'input string', minlength=1, maxlength=(2 * numberOfRegisters))\n    self._genericCommand(16, registeraddress, textstring, numberOfRegisters=numberOfRegisters, payloadformat='string')", "docstring": "Write a string to the slave.\n\nEach 16-bit register in the slave are interpreted as two characters (1 byte = 8 bits).\nFor example 16 consecutive registers can hold 32 characters (32 bytes).\n\nUses Modbus function code 16.\n\nArgs:\n* registeraddress (int): The slave register start address  (use decimal numbers, not hex).\n* textstring (str): The string to store in the slave\n* numberOfRegisters (int): The number of registers allocated for the string.\n\nIf the textstring is longer than the 2*numberOfRegisters, an error is raised.\nShorter strings are padded with spaces.\n\nReturns:\nNone\n\nRaises:\nValueError, TypeError, IOError", "source": "codesearchnet"}
{"code": "def create_volume(self, volume_name: str, driver_spec: str = None):\n        \n        \n        if driver_spec:\n            driver = driver_spec\n        else:\n            driver = 'local'\n\n        \n        if not self._manager:\n            raise RuntimeError('Services can only be deleted '\n                               'on swarm manager nodes')\n\n        self._client.volumes.create(name=volume_name, driver=driver)", "docstring": "Create new docker volumes.\n\nOnly the manager nodes can create a volume\n\nArgs:\nvolume_name (string): Name for the new docker volume\ndriver_spec (string): Driver for the docker volume", "source": "juraj-google-style"}
{"code": "def exponential(data):\n    data = np.hstack(([0.0], np.array(data)))\n    cumm = np.cumsum(data)\n\n    def cost(s, t):\n        ' Cost function for exponential distribution with changing mean\\n\\n        Args:\\n            start (int): start index\\n            end (int): end index\\n        Returns:\\n            float: Cost, from start to end\\n        '\n        return (((- 1) * (t - s)) * (np.log((t - s)) - np.log((cumm[t] - cumm[s]))))\n    return cost", "docstring": "Creates a segment cost function for a time series with a\nexponential distribution with changing mean\n\nArgs:\ndata (:obj:`list` of float): 1D time series data\nReturns:\nfunction: Function with signature\n(int, int) -> float\nwhere the first arg is the starting index, and the second\nis the last arg. Returns the cost of that segment", "source": "codesearchnet"}
{"code": "def correct_entry(self, entry):\n        \n        entry.correction.update(self.get_correction(entry))\n        return entry", "docstring": "Corrects a single entry.\n\nArgs:\nentry: A DefectEntry object.\n\nReturns:\nAn processed entry.\n\nRaises:\nCompatibilityError if entry is not compatible.", "source": "juraj-google-style"}
{"code": "def abs_path(rel_path):\n    \n    \n    return os.path.abspath(\n        os.path.join(os.path.dirname(sys._getframe(1).f_code.co_filename), rel_path)\n    )", "docstring": "Convert a path that is relative to the module from which this function is called,\nto an absolute path.\n\nArgs:\nrel_path: str\nPath relative to the location of the module file from which this function is called.\n\nReturns:\nstr : Absolute path to the location specified by ``rel_path``.", "source": "juraj-google-style"}
{"code": "def add_to_collections(self, names, value) -> None:\n    names = (names,) if isinstance(names, str) else set(names)\n    for name in names:\n        self.add_to_collection(name, value)", "docstring": "Stores `value` in the collections given by `names`.\n\nNote that collections are not sets, so it is possible to add a value to\na collection several times. This function makes sure that duplicates in\n`names` are ignored, but it will not check for pre-existing membership of\n`value` in any of the collections in `names`.\n\n`names` can be any iterable, but if `names` is a string, it is treated as a\nsingle collection name.\n\nArgs:\nnames: The keys for the collections to add to. The `GraphKeys` class\ncontains many standard names for collections.\nvalue: The value to add to the collections.", "source": "github-repos"}
{"code": "def check_url_filetoupload(self):\n    if (self.file_to_upload is None):\n        if ('url' in self.data):\n            if ('resource_type' not in self.data):\n                self.data['resource_type'] = 'api'\n            if ('url_type' not in self.data):\n                self.data['url_type'] = 'api'\n        else:\n            raise HDXError('Either a url or a file to upload must be supplied!')\n    else:\n        if ('url' in self.data):\n            if (self.data['url'] != hdx.data.dataset.Dataset.temporary_url):\n                raise HDXError('Either a url or a file to upload must be supplied not both!')\n        if ('resource_type' not in self.data):\n            self.data['resource_type'] = 'file.upload'\n        if ('url_type' not in self.data):\n            self.data['url_type'] = 'upload'\n        if ('tracking_summary' in self.data):\n            del self.data['tracking_summary']", "docstring": "Check if url or file to upload provided for resource and add resource_type and url_type if not supplied\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def matchall(text, patterns):\n    ret = []\n    for pattern in patterns:\n        match = re.findall(pattern, text)\n        ret += match\n    return ret", "docstring": "Scans through a string for substrings matched some patterns.\n\nArgs:\ntext: A string to be scanned.\npatterns: a list of regex pattern.\n\nReturns:\na list if matched. empty if not.", "source": "codesearchnet"}
{"code": "def FindFileContainingSymbol(self, symbol):\n    symbol = _NormalizeFullyQualifiedName(symbol)\n    try:\n        return self._descriptors[symbol].file\n    except KeyError:\n        pass\n    try:\n        return self._enum_descriptors[symbol].file\n    except KeyError:\n        pass\n    try:\n        return self._FindFileContainingSymbolInDb(symbol)\n    except KeyError:\n        pass\n    try:\n        return self._file_desc_by_toplevel_extension[symbol]\n    except KeyError:\n        pass\n    (message_name, _, extension_name) = symbol.rpartition('.')\n    try:\n        message = self.FindMessageTypeByName(message_name)\n        assert message.extensions_by_name[extension_name]\n        return message.file\n    except KeyError:\n        raise KeyError(('Cannot find a file containing %s' % symbol))", "docstring": "Gets the FileDescriptor for the file containing the specified symbol.\n\nArgs:\nsymbol: The name of the symbol to search for.\n\nReturns:\nA FileDescriptor that contains the specified symbol.\n\nRaises:\nKeyError: if the file cannot be found in the pool.", "source": "codesearchnet"}
{"code": "def get_definition(self, task_name):\n        \n        r = self.gbdx_connection.get(self._base_url + '/' + task_name)\n        raise_for_status(r)\n\n        return r.json()", "docstring": "Gets definition of a registered GBDX task.\n\nArgs:\ntask_name (str): Task name.\n\nReturns:\nDictionary representing the task definition.", "source": "juraj-google-style"}
{"code": "def bundle_for_objs_and_resources(objs, resources):\n    \n    if isinstance(resources, BaseResources):\n        js_resources = css_resources = resources\n    elif isinstance(resources, tuple) and len(resources) == 2 and all(r is None or isinstance(r, BaseResources) for r in resources):\n        js_resources, css_resources = resources\n\n        if js_resources and not css_resources:\n            warn('No Bokeh CSS Resources provided to template. If required you will need to provide them manually.')\n\n        if css_resources and not js_resources:\n            warn('No Bokeh JS Resources provided to template. If required you will need to provide them manually.')\n    else:\n        raise ValueError(\"expected Resources or a pair of optional Resources, got %r\" % resources)\n\n    from copy import deepcopy\n\n    \n    use_widgets = _use_widgets(objs) if objs else True\n    use_tables  = _use_tables(objs)  if objs else True\n    use_gl      = _use_gl(objs)      if objs else True\n\n    if js_resources:\n        js_resources = deepcopy(js_resources)\n        if not use_widgets and \"bokeh-widgets\" in js_resources.js_components:\n            js_resources.js_components.remove(\"bokeh-widgets\")\n        if not use_tables and \"bokeh-tables\" in js_resources.js_components:\n            js_resources.js_components.remove(\"bokeh-tables\")\n        if not use_gl and \"bokeh-gl\" in js_resources.js_components:\n            js_resources.js_components.remove(\"bokeh-gl\")\n        bokeh_js = js_resources.render_js()\n    else:\n        bokeh_js = None\n\n    models = [ obj.__class__ for obj in _all_objs(objs) ] if objs else None\n    custom_bundle = bundle_models(models)\n\n    if custom_bundle is not None:\n        custom_bundle = wrap_in_script_tag(custom_bundle)\n\n        if bokeh_js is not None:\n            bokeh_js += \"\\n\" + custom_bundle\n        else:\n            bokeh_js = custom_bundle\n\n    if css_resources:\n        css_resources = deepcopy(css_resources)\n        if not use_widgets and \"bokeh-widgets\" in css_resources.css_components:\n            css_resources.css_components.remove(\"bokeh-widgets\")\n        if not use_tables and \"bokeh-tables\" in css_resources.css_components:\n            css_resources.css_components.remove(\"bokeh-tables\")\n        bokeh_css = css_resources.render_css()\n    else:\n        bokeh_css = None\n\n    return bokeh_js, bokeh_css", "docstring": "Generate rendered CSS and JS resources suitable for the given\ncollection of Bokeh objects\n\nArgs:\nobjs (seq[Model or Document]) :\n\nresources (BaseResources or tuple[BaseResources])\n\nReturns:\ntuple", "source": "juraj-google-style"}
{"code": "def GetAllSubClasses(ast):\n    hierarchy = ast.Visit(pytd_visitors.ExtractSuperClasses())\n    hierarchy = {cls: list(superclasses) for cls, superclasses in hierarchy.items()}\n    return utils.invert_dict(hierarchy)", "docstring": "Compute a class->subclasses mapping.\n\nArgs:\nast: Parsed PYTD.\n\nReturns:\nA dictionary, mapping instances of pytd.Type (types) to lists of\npytd.Class (the derived classes).", "source": "github-repos"}
{"code": "def calculate_row_format(columns, keys=None):\n    row_format = ''\n    if (keys is None):\n        keys = columns.keys()\n    else:\n        keys = [key for key in keys if (key in columns)]\n    for key in keys:\n        if (len(row_format) > 0):\n            row_format += '|'\n        row_format += ('%%(%s)-%ds' % (key, columns[key]))\n    return (('|' + row_format) + '|')", "docstring": "Calculate row format.\n\nArgs:\ncolumns (dict): the keys are the column name and the value the max length.\nkeys (list): optional list of keys to order columns as well as to filter for them.\n\nReturns:\nstr: format for table row", "source": "codesearchnet"}
{"code": "def adjust_brightness(img, brightness_factor):\n    if (not _is_pil_image(img)):\n        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n    enhancer = ImageEnhance.Brightness(img)\n    img = enhancer.enhance(brightness_factor)\n    return img", "docstring": "Adjust brightness of an Image.\n\nArgs:\nimg (PIL Image): PIL Image to be adjusted.\nbrightness_factor (float):  How much to adjust the brightness. Can be\nany non negative number. 0 gives a black image, 1 gives the\noriginal image while 2 increases the brightness by a factor of 2.\n\nReturns:\nPIL Image: Brightness adjusted image.", "source": "codesearchnet"}
{"code": "def load(self, filename, offset):\n    self.offset = offset\n    self.filename = filename\n    self.bootsector = BootSector(filename=filename, length=NTFS_BOOTSECTOR_SIZE, offset=self.offset)\n    self.mft_table = MftTable(mft_entry_size=self.bootsector.mft_record_size, filename=self.filename, offset=self.mft_table_offset)\n    self.mft_table.preload_entries(NUM_SYSTEM_ENTRIES)\n    self._load_volume_information()", "docstring": "Loads NTFS volume information\n\nArgs:\nfilename (str): Path to file/device to read the volume \\\ninformation from.\noffset (uint): Valid NTFS partition offset from the beginning \\\nof the file/device.\n\nRaises:\nIOError: If source file/device does not exist or is not readable", "source": "codesearchnet"}
{"code": "def save_attributes_to_hdf5_group(group, name, data):\n    bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT]\n    if bad_attributes:\n        raise RuntimeError(f'The following attributes cannot be saved to HDF5 file because they are larger than {HDF5_OBJECT_HEADER_LIMIT} bytes: {bad_attributes}')\n    data_npy = np.asarray(data)\n    num_chunks = 1\n    chunked_data = np.array_split(data_npy, num_chunks)\n    while any((x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data)):\n        num_chunks += 1\n        chunked_data = np.array_split(data_npy, num_chunks)\n    if num_chunks > 1:\n        for chunk_id, chunk_data in enumerate(chunked_data):\n            group.attrs['%s%d' % (name, chunk_id)] = chunk_data\n    else:\n        group.attrs[name] = data", "docstring": "Saves attributes (data) of the specified name into the HDF5 group.\n\nThis method deals with an inherent problem of HDF5 file which is not\nable to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes.\n\nArgs:\ngroup: A pointer to a HDF5 group.\nname: A name of the attributes to save.\ndata: Attributes data to store.\n\nRaises:\nRuntimeError: If any single attribute is too large to be saved.", "source": "github-repos"}
{"code": "def random_subset_by_duration(self, relative_duration, balance_labels=False, label_list_ids=None):\n    total_duration = self.corpus.total_duration\n    subset_duration = (relative_duration * total_duration)\n    utterance_durations = {utt_idx: utt.duration for (utt_idx, utt) in self.corpus.utterances.items()}\n    if balance_labels:\n        all_label_values = self.corpus.all_label_values(label_list_ids=label_list_ids)\n        label_durations = {}\n        for (utt_idx, utt) in self.corpus.utterances.items():\n            label_durations[utt_idx] = utt.label_total_duration(label_list_ids)\n        subset_utterance_ids = utils.select_balanced_subset(label_durations, subset_duration, list(all_label_values), select_count_values=utterance_durations, seed=self.rand.random())\n    else:\n        dummy_weights = {utt_idx: {'w': 1} for utt_idx in self.corpus.utterances.keys()}\n        subset_utterance_ids = utils.select_balanced_subset(dummy_weights, subset_duration, ['w'], select_count_values=utterance_durations, seed=self.rand.random())\n    filter = subview.MatchingUtteranceIdxFilter(utterance_idxs=set(subset_utterance_ids))\n    return subview.Subview(self.corpus, filter_criteria=[filter])", "docstring": "Create a subview of random utterances with a approximate duration relative to the full corpus.\nRandom utterances are selected so that the sum of all utterance durations\nequals to the relative duration of the full corpus.\n\nArgs:\nrelative_duration (float): A value between 0 and 1. (e.g. 0.5 will create a subset with approximately\n50% of the full corpus duration)\nbalance_labels (bool): If True, the labels of the selected utterances are balanced as far as possible.\nSo the count/duration of every label within the subset is equal.\nlabel_list_ids (list): List of label-list ids. If none is given, all label-lists are considered\nfor balancing. Otherwise only the ones that are in the list are considered.\n\nReturns:\nSubview: The subview representing the subset.", "source": "codesearchnet"}
{"code": "def get_provider_fn_decorations(provider_fn, default_arg_names):\n    if hasattr(provider_fn, _IS_WRAPPER_ATTR):\n        provider_decorations = getattr(provider_fn, _PROVIDER_DECORATIONS_ATTR)\n        if provider_decorations:\n            expanded_provider_decorations = []\n            for provider_decoration in provider_decorations:\n                if (provider_decoration.in_scope_id is None):\n                    provider_decoration.in_scope_id = scoping.DEFAULT_SCOPE\n                if (provider_decoration.arg_name is not None):\n                    expanded_provider_decorations.append(provider_decoration)\n                else:\n                    expanded_provider_decorations.extend([ProviderDecoration(default_arg_name, provider_decoration.annotated_with, provider_decoration.in_scope_id) for default_arg_name in default_arg_names])\n            return expanded_provider_decorations\n    return [ProviderDecoration(default_arg_name, annotated_with=None, in_scope_id=scoping.DEFAULT_SCOPE) for default_arg_name in default_arg_names]", "docstring": "Retrieves the provider method-relevant info set by decorators.\n\nIf any info wasn't set by decorators, then defaults are returned.\n\nArgs:\nprovider_fn: a (possibly decorated) provider function\ndefault_arg_names: the (possibly empty) arg names to use if none were\nspecified via @provides()\nReturns:\na sequence of ProviderDecoration", "source": "codesearchnet"}
{"code": "def __init__(self, x=0, y=0):\n        \n        self._ptr = ffi.new('SDL_Point *', [x, y])", "docstring": "Construct a new point.\n\nArgs:\nx (int): The x position of the point.\ny (int): The y position of the point.", "source": "juraj-google-style"}
{"code": "def merge_summaries(prev_summary, next_summary, epsilon):\n    merged = np.concatenate((prev_summary, next_summary), axis=1)\n    merged = np.take(merged, np.argsort(merged[0]), axis=1)\n    return compress_summary(merged, epsilon)", "docstring": "Weighted merge sort of summaries.\n\nGiven two summaries of distinct data, this function merges (and compresses)\nthem to stay within `epsilon` error tolerance.\n\nArgs:\nprev_summary: 2D `np.ndarray` summary to be merged with `next_summary`.\nnext_summary: 2D `np.ndarray` summary to be merged with `prev_summary`.\nepsilon: A float that determines the approximate desired precision.\n\nReturns:\nA 2-D `np.ndarray` that is a merged summary. First column is the\ninterpolated partition values, the second is the weights (counts).", "source": "github-repos"}
{"code": "def backend():\n    return _BACKEND", "docstring": "Publicly accessible method for determining the current backend.\n\nReturns:\nString, the name of the backend Keras is currently using. One of\n`\"tensorflow\"`, `\"torch\"`, or `\"jax\"`.\n\nExample:\n\n>>> keras.config.backend()\n'tensorflow'", "source": "github-repos"}
{"code": "def write_zip_data(self, temp_parfile, stored_resources):\n    logging.debug('Storing Files...')\n    with contextlib.closing(zipfile.ZipFile(temp_parfile, 'w', self.compression)) as z:\n        items = sorted(stored_resources.items())\n        for relative_path, resource in items:\n            assert resource.zipinfo.filename == relative_path\n            resource.store(z)", "docstring": "Write the second part of a parfile, consisting of ZIP data\n\nArgs:\nstored_resources: A dictionary mapping relative path to the\ncontent to store at that path.", "source": "github-repos"}
{"code": "def add_map(self, counters_map):\n    for counter_name in counters_map.counters:\n        self.increment(counter_name, counters_map.counters[counter_name])", "docstring": "Add all counters from the map.\n\nFor each counter in the passed map, adds its value to the counter in this\nmap.\n\nArgs:\ncounters_map: CounterMap instance to add.", "source": "codesearchnet"}
{"code": "def _ReportSameIdButNotMerged(self, entity_id, reason):\n    \n    self.feed_merger.problem_reporter.SameIdButNotMerged(self,\n                                                         entity_id,\n                                                         reason)", "docstring": "Report that two entities have the same id but could not be merged.\n\nArgs:\nentity_id: The id of the entities.\nreason: A string giving a reason why they could not be merged.", "source": "juraj-google-style"}
{"code": "def convert_hardtanh(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting hardtanh (clip) ...')\n\n    def target_layer(x, max_val=float(params['max_val']), min_val=float(params['min_val'])):\n        return tf.minimum(max_val, tf.maximum(min_val, x))\n    lambda_layer = keras.layers.Lambda(target_layer)\n    layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert hardtanh layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def serialize_attrs(self, *args):\n        \n        \n        cls = type(self)\n        result = {}\n        \n        \n        \n        \n        \n        \n        for a in args:\n            if hasattr(cls, a) and a not in cls.attrs_forbidden_for_serialization():\n                val = getattr(self, a)\n                if is_list_like(val):\n                    result[a] = list(val)\n                else:\n                    result[a] = val\n        return result", "docstring": "Converts and instance to a dictionary with only the specified\nattributes as keys\n\nArgs:\n*args (list): The attributes to serialize\n\nExamples:\n\n>>> customer = Customer.create(name=\"James Bond\", email=\"007@mi.com\",\nphone=\"007\", city=\"London\")\n>>> customer.serialize_attrs('name', 'email')\n{'name': u'James Bond', 'email': u'007@mi.com'}", "source": "juraj-google-style"}
{"code": "def find_stable_entry(self, pH, V):\n        \n        energies_at_conditions = [e.normalized_energy_at_conditions(pH, V)\n                                  for e in self.stable_entries]\n        return self.stable_entries[np.argmin(energies_at_conditions)]", "docstring": "Finds stable entry at a pH,V condition\nArgs:\npH (float): pH to find stable entry\nV (float): V to find stable entry\n\nReturns:", "source": "juraj-google-style"}
{"code": "def get_formatted_as_type(self, value, default=None, out_type=str):\n        \n        if value is None:\n            value = default\n\n        if isinstance(value, SpecialTagDirective):\n            result = value.get_value(self)\n            return types.cast_to_type(result, out_type)\n        if isinstance(value, str):\n            result = self.get_formatted_string(value)\n            result_type = type(result)\n            if out_type is result_type:\n                \n                return result\n            elif out_type is bool and result_type is str:\n                \n                \n                \n                return result.lower() in ['true', '1', '1.0']\n            else:\n                return out_type(result)\n        else:\n            return out_type(value)", "docstring": "Return formatted value for input value, returns as out_type.\n\nCaveat emptor: if out_type is bool and value a string,\nreturn will be True if str is 'True'. It will be False for all other\ncases.\n\nArgs:\nvalue: the value to format\ndefault: if value is None, set to this\nout_type: cast return as this type\n\nReturns:\nFormatted value of type out_type", "source": "juraj-google-style"}
{"code": "def _AlignUncompressedDataOffset(self, uncompressed_data_offset):\n    \n    self._file_object.seek(0, os.SEEK_SET)\n\n    self._decompressor = self._GetDecompressor()\n    self._uncompressed_data = b''\n\n    compressed_data_offset = 0\n    compressed_data_size = self._file_object.get_size()\n\n    while compressed_data_offset < compressed_data_size:\n      read_count = self._ReadCompressedData(self._COMPRESSED_DATA_BUFFER_SIZE)\n      if read_count == 0:\n        break\n\n      compressed_data_offset += read_count\n\n      if uncompressed_data_offset < self._uncompressed_data_size:\n        self._uncompressed_data_offset = uncompressed_data_offset\n        break\n\n      uncompressed_data_offset -= self._uncompressed_data_size", "docstring": "Aligns the compressed file with the uncompressed data offset.\n\nArgs:\nuncompressed_data_offset (int): uncompressed data offset.", "source": "juraj-google-style"}
{"code": "def get_effective_ecs(self, strain, order=2):\n    ec_sum = 0\n    for (n, ecs) in enumerate(self[(order - 2):]):\n        ec_sum += (ecs.einsum_sequence(([strain] * n)) / factorial(n))\n    return ec_sum", "docstring": "Returns the effective elastic constants\nfrom the elastic tensor expansion.\n\nArgs:\nstrain (Strain or 3x3 array-like): strain condition\nunder which to calculate the effective constants\norder (int): order of the ecs to be returned", "source": "codesearchnet"}
{"code": "def excel_to_dict(excel_filepath, encapsulate_filepath=False, **kwargs):\n    \n    result = {}\n    try:\n        callbacks = {'to_dictlist': excel_todictlist}  \n        callbacks.update(kwargs.get('alt_callbacks', {}))\n\n        \n        excel_data = callbacks.get('to_dictlist')(excel_filepath, **kwargs)\n        for sheet in excel_data.keys():\n            try:\n                kwargs['rows'] = excel_data.get(sheet, [])\n                result[sheet] = csv_to_dict(excel_filepath, **kwargs)\n            except Exception as ex:\n                logger.error('Fail to parse sheet {} - {}'.format(sheet, ex))\n                result[sheet] = []\n                continue\n\n        if encapsulate_filepath:\n            result = {excel_filepath: result}\n\n    except Exception as ex:\n        msg = 'Fail transform excel to dict - {}'.format(ex)\n        logger.error(msg, excel_filepath=excel_filepath)\n\n    return result", "docstring": "Turn excel into dict.\nArgs:\n:excel_filepath: path to excel file to turn into dict.\n:limits: path to csv file to turn into dict", "source": "juraj-google-style"}
{"code": "def reqTickers(self, *contracts: List[Contract], regulatorySnapshot: bool=False) -> List[Ticker]:\n    return self._run(self.reqTickersAsync(*contracts, regulatorySnapshot=regulatorySnapshot))", "docstring": "Request and return a list of snapshot tickers.\nThe list is returned when all tickers are ready.\n\nThis method is blocking.\n\nArgs:\ncontracts: Contracts to get tickers for.\nregulatorySnapshot: Request NBBO snapshots (may incur a fee).", "source": "codesearchnet"}
{"code": "def delete(self, file_path, branch, commit_message, **kwargs):\n    path = ('%s/%s' % (self.path, file_path.replace('/', '%2F')))\n    data = {'branch': branch, 'commit_message': commit_message}\n    self.gitlab.http_delete(path, query_data=data, **kwargs)", "docstring": "Delete a file on the server.\n\nArgs:\nfile_path (str): Path of the file to remove\nbranch (str): Branch from which the file will be removed\ncommit_message (str): Commit message for the deletion\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabDeleteError: If the server cannot perform the request", "source": "codesearchnet"}
{"code": "def info(self, **kwargs):\n    path = self._get_path('info')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the system wide configuration info.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def content(self):\n    if (not self._content_data):\n        if is_seekable(self.file):\n            with wpull.util.reset_file_offset(self.file):\n                self._content_data = self.file.read()\n        else:\n            self._content_data = self.file.read()\n    return self._content_data", "docstring": "Return the content of the file.\n\nIf this function is invoked, the contents of the entire file is read\nand cached.\n\nReturns:\n``bytes``: The entire content of the file.", "source": "codesearchnet"}
{"code": "def check_spec(self, pos_args, kwargs=None):\n    if (kwargs is None):\n        kwargs = {}\n    if ((self.varargs is not None) or (self.kwargs is not None)):\n        raise InternalError('check_spec cannot be called on a function that takes *args or **kwargs')\n    missing = object()\n    arg_vals = ([missing] * len(self.arg_names))\n    kw_indices = {name: i for (i, name) in enumerate(self.arg_names)}\n    for (i, arg) in enumerate(pos_args):\n        if (i >= len(arg_vals)):\n            raise ArgumentError(('Too many positional arguments, first excessive argument=%s' % str(arg)))\n        arg_vals[i] = arg\n    for (arg, val) in kwargs.items():\n        index = kw_indices.get(arg)\n        if (index is None):\n            raise ArgumentError(('Cannot find argument by name: %s' % arg))\n        if (arg_vals[index] is not missing):\n            raise ValidationError(('Argument %s passed twice' % arg))\n        arg_vals[index] = val\n    if (len(self.arg_defaults) > 0):\n        for i in range(0, len(self.arg_defaults)):\n            neg_index = ((- len(self.arg_defaults)) + i)\n            if (arg_vals[neg_index] is missing):\n                arg_vals[neg_index] = self.arg_defaults[i]\n    if (missing in arg_vals):\n        index = arg_vals.index(missing)\n        raise ArgumentError(('Missing a required argument (position: %d, name: %s)' % (index, self.arg_names[index])))\n    return {name: val for (name, val) in zip(self.arg_names, arg_vals)}", "docstring": "Check if there are any missing or duplicate arguments.\n\nArgs:\npos_args (list): A list of arguments that will be passed as positional\narguments.\nkwargs (dict): A dictionary of the keyword arguments that will be passed.\n\nReturns:\ndict: A dictionary of argument name to argument value, pulled from either\nthe value passed or the default value if no argument is passed.\n\nRaises:\nArgumentError: If a positional or keyword argument does not fit in the spec.\nValidationError: If an argument is passed twice.", "source": "codesearchnet"}
{"code": "def _is_device_list_single_worker(devices):\n    specs = []\n    for d in devices:\n        name = d.name if isinstance(d, context.LogicalDevice) else d\n        specs.append(tf_device.DeviceSpec.from_string(name))\n    num_workers = len({(d.job, d.task, d.replica) for d in specs})\n    all_local = all((d.job in (None, 'localhost') for d in specs))\n    any_local = any((d.job in (None, 'localhost') for d in specs))\n    if any_local and (not all_local):\n        raise ValueError(\"Local device should have only 'localhost' in the job field in device string. E.g. 'job:localhost' in /job:localhost/replica:0/task:0/device:CPU:0Devices cannot have mixed list of device strings containing both localhost and other job types such as worker, ps etc. \")\n    if num_workers == 1 and (not all_local):\n        if any((d.task is None for d in specs)):\n            raise ValueError(\"Remote device string must have task specified.E.g. 'task:0' in /job:worker/replica:0/task:0/device:CPU:0\")\n    return num_workers == 1", "docstring": "Checks whether the devices list is for single or multi-worker.\n\nArgs:\ndevices: a list of device strings or tf.config.LogicalDevice objects, for\neither local or for remote devices.\n\nReturns:\na boolean indicating whether these device strings are for local or for\nremote.\n\nRaises:\nValueError: if device strings are not consistent.", "source": "github-repos"}
{"code": "def save_target_classes_for_batch(self,\n                                    filename,\n                                    image_batches,\n                                    batch_id):\n    \n    images = image_batches.data[batch_id]['images']\n    with open(filename, 'w') as f:\n      for image_id, image_val in iteritems(images):\n        target_class = self.get_target_class(image_val['dataset_image_id'])\n        f.write('{0}.png,{1}\\n'.format(image_id, target_class))", "docstring": "Saves file with target class for given dataset batch.\n\nArgs:\nfilename: output filename\nimage_batches: instance of ImageBatchesBase with dataset batches\nbatch_id: dataset batch ID", "source": "juraj-google-style"}
{"code": "def close(self, file_des):\n        \n        file_handle = self.filesystem.get_open_file(file_des)\n        file_handle.close()", "docstring": "Close a file descriptor.\n\nArgs:\nfile_des: An integer file descriptor for the file object requested.\n\nRaises:\nOSError: bad file descriptor.\nTypeError: if file descriptor is not an integer.", "source": "juraj-google-style"}
{"code": "def save_exported_model(self, dst_saved_model_path: str, exported_model_serialized: bytes, src_saved_model_path: str, tags: set[str], serialized_signature_def_map: dict[str, bytes]) -> Optional[bool]:\n    exported_model = exported_model_pb2.ExportedModel.FromString(exported_model_serialized)\n    signature_def_map = {}\n    for key, serialized_signature_def in serialized_signature_def_map.items():\n        signature_def_map[key] = meta_graph_pb2.SignatureDef.FromString(serialized_signature_def)\n    return _call_and_return_none_on_error(func=functools.partial(_save_model_and_copy_assets, exported_model, src_saved_model_path, dst_saved_model_path, signature_def_map, tags), error_msg=f'Failed to save model \"{dst_saved_model_path}\", signature_def_map: {signature_def_map}, tags: {tags}.')", "docstring": "Saves `ExportedModel` to `dst_saved_model_path` as a SavedModel.\n\nArgs:\ndst_saved_model_path: Destination path to save the exported model.\nexported_model_serialized: Exported model to export as SavedModel.\nsrc_saved_model_path: Path to the source SavedModel. This will be used to\ncopy the asset files to `dst_saved_model_path`.\ntags: Tags to attach to the saved MetaGraphDef.\nserialized_signature_def_map: Signature key -> serialized SignatureDef.\n\nReturns:\n`True` upon successful execution. `None` when an error is raised\ninternally.", "source": "github-repos"}
{"code": "def ParseCloudEntryRow(self, parser_mediator, query, row, cache=None, database=None, **unused_kwargs):\n    query_hash = hash(query)\n    parent_resource_id = self._GetRowValue(query_hash, row, 'parent_resource_id')\n    filename = self._GetRowValue(query_hash, row, 'filename')\n    cloud_path = self.GetCloudPath(parent_resource_id, cache, database)\n    cloud_filename = '{0:s}{1:s}'.format(cloud_path, filename)\n    event_data = GoogleDriveSnapshotCloudEntryEventData()\n    event_data.document_type = self._GetRowValue(query_hash, row, 'doc_type')\n    event_data.path = cloud_filename\n    event_data.query = query\n    event_data.shared = bool(self._GetRowValue(query_hash, row, 'shared'))\n    event_data.size = self._GetRowValue(query_hash, row, 'size')\n    event_data.url = self._GetRowValue(query_hash, row, 'url')\n    timestamp = self._GetRowValue(query_hash, row, 'modified')\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n    timestamp = self._GetRowValue(query_hash, row, 'created')\n    if timestamp:\n        date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a cloud entry row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.\ncache (SQLiteCache): cache.\ndatabase (SQLiteDatabase): database.", "source": "codesearchnet"}
{"code": "def poisson_objective(X, m, w):\n    \n    clusters, cells = w.shape\n    genes = X.shape[0]\n    \n    d = m.dot(w)+eps\n    \n    \n    \n    \n    return np.sum(d - X*np.log(d))/genes", "docstring": "Creates an objective function and its derivative for M, given W and X\nArgs:\nw (array): clusters x cells\nX (array): genes x cells\nselected_genes (array): array of ints - genes to be selected", "source": "juraj-google-style"}
{"code": "def InitializeDebuggeeLabels(self, flags):\n    \n    self._debuggee_labels = {}\n\n    for (label, var_names) in six.iteritems(_DEBUGGEE_LABELS):\n      \n      \n      for name in var_names:\n        value = os.environ.get(name)\n        if value:\n          \n          \n          if label == labels.Debuggee.MODULE and value == 'default':\n            break\n          self._debuggee_labels[label] = value\n          break\n\n    if flags:\n      self._debuggee_labels.update(\n          {name: value for (name, value) in six.iteritems(flags)\n           if name in _DEBUGGEE_LABELS})\n\n    self._debuggee_labels['projectid'] = self._project_id", "docstring": "Initialize debuggee labels from environment variables and flags.\n\nThe caller passes all the flags that the the debuglet got. This function\nwill only use the flags used to label the debuggee. Flags take precedence\nover environment variables.\n\nDebuggee description is formatted from available flags.\n\nArgs:\nflags: dictionary of debuglet command line flags.", "source": "juraj-google-style"}
{"code": "def probe_async(self, callback):\n    topics = MQTTTopicValidator(self.prefix)\n    self.client.publish(topics.probe, {'type': 'command', 'operation': 'probe', 'client': self.name})\n    callback(self.id, True, None)", "docstring": "Probe for visible devices connected to this DeviceAdapter.\n\nArgs:\ncallback (callable): A callback for when the probe operation has completed.\ncallback should have signature callback(adapter_id, success, failure_reason) where:\nsuccess: bool\nfailure_reason: None if success is True, otherwise a reason for why we could not probe", "source": "codesearchnet"}
{"code": "def for_loop(loop_fn, loop_fn_dtypes, iters, parallel_iterations=None):\n    flat_loop_fn_dtypes = nest.flatten(loop_fn_dtypes)\n    is_none_list = []\n\n    def while_body(i, *ta_list):\n        \n        fn_conv = autograph.tf_convert(loop_fn, autograph_ctx.control_status_ctx())\n        fn_output = nest.flatten(fn_conv(i))\n        if len(fn_output) != len(flat_loop_fn_dtypes):\n            raise ValueError(f'Number of expected outputs {len(flat_loop_fn_dtypes)}, does not match the number of actual outputs {len(fn_output)} from loop_fn: {loop_fn} with output {fn_output}.')\n        outputs = []\n        del is_none_list[:]\n        is_none_list.extend((x is None for x in fn_output))\n        for out, ta in zip(fn_output, ta_list):\n            if out is not None:\n                ta = ta.write(i, out)\n            outputs.append(ta)\n        return tuple([i + 1] + outputs)\n    if parallel_iterations is not None:\n        extra_args = {'parallel_iterations': parallel_iterations}\n    else:\n        extra_args = {}\n    ta_list = while_loop.while_loop(lambda i, *ta: i < iters, while_body, [0] + [tensor_array_ops.TensorArray(dtype.base_dtype, iters) for dtype in flat_loop_fn_dtypes], **extra_args)[1:]\n    output = [None if is_none else ta.stack() for ta, is_none in zip(ta_list, is_none_list)]\n    assert len(output) in (0, len(flat_loop_fn_dtypes))\n    if not output:\n        loop_var = array_ops.placeholder_with_default(0, shape=[])\n        try:\n            loop_fn_out = loop_fn(loop_var)\n            out_shapes = [[0] + ops.convert_to_tensor(x).shape for x in nest.flatten(loop_fn_out)]\n            output = [array_ops.zeros(out_shapes[i], dt) for i, dt in enumerate(flat_loop_fn_dtypes)]\n        except Exception:\n            output = [array_ops.zeros([0])]\n    return nest.pack_sequence_as(loop_fn_dtypes, output)", "docstring": "Runs `loop_fn` `iters` times and stacks the outputs.\n\n\nRuns `loop_fn` `iters` times, with input values from 0 to `iters - 1`, and\nstacks corresponding outputs of the different runs.\n\nArgs:\nloop_fn: A function that takes an int32 scalar tf.Tensor object representing\nthe iteration number, and returns a possibly nested structure of tensor\nobjects. The shape of these outputs should not depend on the input.\nloop_fn_dtypes: dtypes for the outputs of `loop_fn`.\niters: Number of iterations for which to run `loop_fn`.\nparallel_iterations: The number of iterations that can be dispatched in\nparallel. This knob can be used to control the total memory usage.\n\nReturns:\nReturns a nested structure of stacked output tensor objects with the same\nnested structure as the output of `loop_fn`.", "source": "github-repos"}
{"code": "def write_to_file_by_name(folder, fname, data, mkdir=False):\n    if (not os.path.isdir(folder)):\n        if mkdir:\n            preparedir(folder)\n        else:\n            created = preparedir(folder, False)\n            if (not created):\n                raise ValueError(('Failed to find %s.' % folder))\n    file_path = os.path.join(folder, fname)\n    with open(file_path, writemode) as outf:\n        try:\n            outf.write(str(data))\n            return file_path\n        except Exception as e:\n            raise IOError(('Failed to write %s to file:\\n\\t%s' % (fname, str(e))))", "docstring": "Write a string of data to file by filename and folder.\n\nArgs:\nfolder: Target folder (e.g. c:/ladybug).\nfname: File name (e.g. testPts.pts).\ndata: Any data as string.\nmkdir: Set to True to create the directory if doesn't exist (Default: False).", "source": "codesearchnet"}
{"code": "def _add_string_to_commastring(self, field, string):\n    if (string in self._get_stringlist_from_commastring(field)):\n        return False\n    strings = ('%s,%s' % (self.data.get(field, ''), string))\n    if (strings[0] == ','):\n        strings = strings[1:]\n    self.data[field] = strings\n    return True", "docstring": "Add a string to a comma separated list of strings\n\nArgs:\nfield (str): Field containing comma separated list\nstring (str): String to add\n\nReturns:\nbool: True if string added or False if string already present", "source": "codesearchnet"}
{"code": "def update_hash_with_array(hash_value, int_array):\n    if int_array is not None:\n        for i in int_array:\n            hash_value = update_hash_with_primitive_value(hash_value, i)\n    return hash_value", "docstring": "Update the hash value using a TFLite int array.\n\nArgs:\nhash_value (int): The current hash value.\nint_array: A TFLite int array to incorporate into the hash.\n\nReturns:\nint: The updated hash value.", "source": "github-repos"}
{"code": "def DisplayTree(node, children, level=0):\n    value = ''\n    node_type = ''\n    if ('caseValue' in node):\n        case_value = node['caseValue']\n        node_type = case_value['ProductDimension.Type']\n        if (node_type == 'ProductCanonicalCondition'):\n            value = (case_value['condition'] if ('condition' in case_value) else 'OTHER')\n        elif (node_type == 'ProductBiddingCategory'):\n            value = ('%s(%s)' % (case_value['type'], (case_value['value'] if ('value' in case_value) else 'OTHER')))\n        else:\n            value = (case_value['value'] if ('value' in case_value) else 'OTHER')\n    print(('%sid: %s, node_type: %s, value: %s\\n' % ((' ' * level), node['id'], node_type, value)))\n    for child_node in children[node['id']]:\n        DisplayTree(child_node, children, (level + 1))", "docstring": "Recursively display a node and each of its children.\n\nArgs:\nnode: The node we're displaying the children of.\nchildren: Children of the parent node.\nlevel: How deep in the tree we are.", "source": "codesearchnet"}
{"code": "def get_block(self, block_name):\n        \n        \n        \n        \n\n        return self.new(self.data.loc[(block_name, slice(None)), :])", "docstring": "getblock 获取板块, block_name是list或者是单个str\n\nArguments:\nblock_name {[type]} -- [description]\n\nReturns:\n[type] -- [description]", "source": "juraj-google-style"}
{"code": "def year(self, value=None):\n    if (value is not None):\n        try:\n            value = int(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type int for field `year`'.format(value))\n    self._year = value", "docstring": "Corresponds to IDD Field `year`\n\nArgs:\nvalue (int): value for IDD Field `year`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def db010(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `db010`'.format(value))\n\n        self._db010 = value", "docstring": "Corresponds to IDD Field `db010`\nDry-bulb temperature corresponding to 1.0% annual cumulative frequency of occurrence (warm conditions)\n\nArgs:\nvalue (float): value for IDD Field `db010`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def GetAPIScope(api_name):\n  \n  try:\n    return SCOPES[api_name]\n  except KeyError:\n    raise googleads.errors.GoogleAdsValueError(\n        'Invalid API name \"%s\" provided. Acceptable values are: %s' %\n        (api_name, SCOPES.keys()))", "docstring": "Retrieves the scope for the given API name.\n\nArgs:\napi_name: A string identifying the name of the API we want to retrieve a\nscope for.\n\nReturns:\nA string that is the scope for the given API name.\n\nRaises:\nGoogleAdsValueError: If the given api_name is invalid; accepted values are\n\"adwords\" and \"ad_manager\".", "source": "juraj-google-style"}
{"code": "def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False):\n    assert not (input_ids is None and inputs_embeds is None)\n    if input_ids is not None:\n        check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n        inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n    input_shape = shape_list(inputs_embeds)[:-1]\n    if position_ids is None:\n        if input_ids is not None:\n            position_ids = self.create_position_ids_from_input_ids(input_ids=input_ids)\n        else:\n            position_ids = tf.expand_dims(tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0)\n    position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)\n    final_embeddings = inputs_embeds + position_embeds\n    final_embeddings = self.LayerNorm(inputs=final_embeddings)\n    final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n    return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\nfinal_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github-repos"}
{"code": "def GetAllPluginInformation(cls, show_all=True):\n    \n    results = []\n    for plugin_class in iter(cls._plugin_classes.values()):\n      plugin_object = plugin_class()\n      if not show_all and not plugin_class.ENABLE_IN_EXTRACTION:\n        continue\n\n      \n      doc_string, _, _ = plugin_class.__doc__.partition('\\n')\n      type_string = cls._PLUGIN_TYPE_STRINGS.get(plugin_object.plugin_type)\n      information_tuple = (plugin_object.plugin_name, doc_string, type_string)\n      results.append(information_tuple)\n\n    return sorted(results)", "docstring": "Retrieves a list of the registered analysis plugins.\n\nArgs:\nshow_all (Optional[bool]): True if all analysis plugin names should\nbe listed.\n\nReturns:\nlist[tuple[str, str, str]]: the name, docstring and type string of each\nanalysis plugin in alphabetical order.", "source": "juraj-google-style"}
{"code": "def add_oxidation_state_by_site(self, oxidation_states):\n    if (len(oxidation_states) != len(self.sites)):\n        raise ValueError('Oxidation states of all sites must be specified.')\n    for (site, ox) in zip(self.sites, oxidation_states):\n        new_sp = {}\n        for (el, occu) in site.species.items():\n            sym = el.symbol\n            new_sp[Specie(sym, ox)] = occu\n        site.species = new_sp", "docstring": "Add oxidation states to a structure by site.\n\nArgs:\noxidation_states (list): List of oxidation states.\nE.g., [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2]", "source": "codesearchnet"}
{"code": "def copy_submission_to_destination(self, src_filename, dst_subdir,\n                                     submission_id):\n    \n\n    extension = [e for e in ALLOWED_EXTENSIONS if src_filename.endswith(e)]\n    if len(extension) != 1:\n      logging.error('Invalid submission extension: %s', src_filename)\n      return\n    dst_filename = os.path.join(self.target_dir, dst_subdir,\n                                submission_id + extension[0])\n    cmd = ['gsutil', 'cp', src_filename, dst_filename]\n    if subprocess.call(cmd) != 0:\n      logging.error('Can\\'t copy submission to destination')\n    else:\n      logging.info('Submission copied to: %s', dst_filename)", "docstring": "Copies submission to target directory.\n\nArgs:\nsrc_filename: source filename of the submission\ndst_subdir: subdirectory of the target directory where submission should\nbe copied to\nsubmission_id: ID of the submission, will be used as a new\nsubmission filename (before extension)", "source": "juraj-google-style"}
{"code": "def _create_batch(signer, transactions):\n    \n    txn_ids = [txn.header_signature for txn in transactions]\n    batch_header = BatchHeader(\n        signer_public_key=signer.get_public_key().as_hex(),\n        transaction_ids=txn_ids).SerializeToString()\n\n    return Batch(\n        header=batch_header,\n        header_signature=signer.sign(batch_header),\n        transactions=transactions)", "docstring": "Creates a batch from a list of transactions and a public key, and signs\nthe resulting batch with the given signing key.\n\nArgs:\nsigner (:obj:`Signer`): The cryptographic signer\ntransactions (list of `Transaction`): The transactions to add to the\nbatch.\n\nReturns:\n`Batch`: The constructed and signed batch.", "source": "juraj-google-style"}
{"code": "def plot_spectra_pieces_pdf(ss, aint=10, pdf_filename='pieces.pdf', setup=_default_setup):\n    import f311.explorer as ex\n    (xmin, xmax, ymin_, ymax, _, yspan) = calc_max_min(ss)\n    ymin = (ymin_ if (setup.ymin is None) else setup.ymin)\n    num_pages = int(math.ceil(((xmax - xmin) / aint)))\n    a99.format_BLB()\n    pdf = matplotlib.backends.backend_pdf.PdfPages(pdf_filename)\n    logger = a99.get_python_logger()\n    for h in range(num_pages):\n        fig = plt.figure()\n        lambda0 = (xmin + (h * aint))\n        lambda1 = (lambda0 + aint)\n        logger.info('Printing page {0:d}/{1:d} ([{2:g}, {3:g}])'.format((h + 1), num_pages, lambda0, lambda1))\n        for (i, s) in enumerate(ss):\n            s_cut = ex.cut_spectrum(s, lambda0, lambda1)\n            ax = plt.gca()\n            ax.plot(s_cut.x, s_cut.y, label=s.title)\n        if (setup.flag_xlabel and setup.fmt_xlabel):\n            plt.xlabel('Wavelength (interval: [{0:g}, {1:g}])'.format(lambda0, lambda1))\n        xspan = (lambda1 - lambda0)\n        ax.set_xlim([(lambda0 - (xspan * _T)), (lambda1 + (xspan * _T))])\n        ax.set_ylim([(ymin - (yspan * _T)), (ymax + (yspan * _T))])\n        if setup.flag_legend:\n            leg = plt.legend(loc=0)\n            a99.format_legend(leg)\n        plt.tight_layout()\n        pdf.savefig(fig)\n        plt.close()\n    pdf.close()\n    logger.info('File {0!s} successfully created.'.format(pdf_filename))", "docstring": "Plots spectra, overlapped, in small wavelength intervals into a PDF file,\none interval per page of the PDF file.\n\nArgs:\nss: list of Spectrum objects\naint: wavelength interval for each plot\npdf_filename: name of output file\nsetup: PlotSpectrumSetup object\n\n**Note** overrides setup.fmt_xlabel; leaves y-labell and title blank", "source": "codesearchnet"}
{"code": "def remove_token(self, *, payer_id, credit_card_token_id):\n        \n        payload = {\n            \"language\": self.client.language.value,\n            \"command\": PaymentCommand.REMOVE_TOKEN.value,\n            \"merchant\": {\n                \"apiLogin\": self.client.api_login,\n                \"apiKey\": self.client.api_key\n            },\n            \"removeCreditCardToken\": {\n                \"payerId\": payer_id,\n                \"creditCardTokenId\": credit_card_token_id\n            },\n            \"test\": self.client.is_test\n        }\n        return self.client._post(self.url, json=payload)", "docstring": "This feature allows you to delete a tokenized credit card register.\n\nArgs:\npayer_id:\ncredit_card_token_id:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def coord2healpix(coords, frame, nside, nest=True):\n    if (coords.frame.name != frame):\n        c = coords.transform_to(frame)\n    else:\n        c = coords\n    if hasattr(c, 'ra'):\n        phi = c.ra.rad\n        theta = ((0.5 * np.pi) - c.dec.rad)\n        return hp.pixelfunc.ang2pix(nside, theta, phi, nest=nest)\n    elif hasattr(c, 'l'):\n        phi = c.l.rad\n        theta = ((0.5 * np.pi) - c.b.rad)\n        return hp.pixelfunc.ang2pix(nside, theta, phi, nest=nest)\n    elif hasattr(c, 'x'):\n        return hp.pixelfunc.vec2pix(nside, c.x.kpc, c.y.kpc, c.z.kpc, nest=nest)\n    elif hasattr(c, 'w'):\n        return hp.pixelfunc.vec2pix(nside, c.w.kpc, c.u.kpc, c.v.kpc, nest=nest)\n    else:\n        raise dustexceptions.CoordFrameError('No method to transform from coordinate frame \"{}\" to HEALPix.'.format(frame))", "docstring": "Calculate HEALPix indices from an astropy SkyCoord. Assume the HEALPix\nsystem is defined on the coordinate frame ``frame``.\n\nArgs:\ncoords (:obj:`astropy.coordinates.SkyCoord`): The input coordinates.\nframe (:obj:`str`): The frame in which the HEALPix system is defined.\nnside (:obj:`int`): The HEALPix nside parameter to use. Must be a power of 2.\nnest (Optional[:obj:`bool`]): ``True`` (the default) if nested HEALPix ordering\nis desired. ``False`` for ring ordering.\n\nReturns:\nAn array of pixel indices (integers), with the same shape as the input\nSkyCoord coordinates (:obj:`coords.shape`).\n\nRaises:\n:obj:`dustexceptions.CoordFrameError`: If the specified frame is not supported.", "source": "codesearchnet"}
{"code": "def __init__(self, path):\n        \n        \n        if isinstance(path, list):\n            path = os.path.join(*path)\n\n        \n        \n        self.path = Path(path).resolve()\n\n        \n        if not self.path.is_dir():\n            log.error(\"No path exists at {}\".format(self.path))\n            err_msg = \"Path '{}' is not a directory.\".format(self.path)\n            raise NotADirectoryError(err_msg)\n\n        log.info(\"%d Serving static files out of %s\" % (id(self), self.path))", "docstring": "Construct Static method.\nArgs:\npath (str or list): The directory path to search for files.\nIf this is a list, the paths will be path-joined\nautomatically.", "source": "juraj-google-style"}
{"code": "def get_ticks(self):\n    tick_distance = []\n    tick_labels = []\n    previous_label = self._bs.qpoints[0].label\n    previous_branch = self._bs.branches[0]['name']\n    for (i, c) in enumerate(self._bs.qpoints):\n        if (c.label is not None):\n            tick_distance.append(self._bs.distance[i])\n            this_branch = None\n            for b in self._bs.branches:\n                if (b['start_index'] <= i <= b['end_index']):\n                    this_branch = b['name']\n                    break\n            if ((c.label != previous_label) and (previous_branch != this_branch)):\n                label1 = c.label\n                if (label1.startswith('\\\\') or (label1.find('_') != (- 1))):\n                    label1 = (('$' + label1) + '$')\n                label0 = previous_label\n                if (label0.startswith('\\\\') or (label0.find('_') != (- 1))):\n                    label0 = (('$' + label0) + '$')\n                tick_labels.pop()\n                tick_distance.pop()\n                tick_labels.append(((label0 + '$\\\\mid$') + label1))\n            elif (c.label.startswith('\\\\') or (c.label.find('_') != (- 1))):\n                tick_labels.append((('$' + c.label) + '$'))\n            else:\n                tick_labels.append(c.label)\n            previous_label = c.label\n            previous_branch = this_branch\n    return {'distance': tick_distance, 'label': tick_labels}", "docstring": "Get all ticks and labels for a band structure plot.\n\nReturns:\nA dict with 'distance': a list of distance at which ticks should\nbe set and 'label': a list of label for each of those ticks.", "source": "codesearchnet"}
{"code": "def temp45(msg):\n    \n    d = hex2bin(data(msg))\n\n    sign = int(d[16])\n    value = bin2int(d[17:26])\n\n    if sign:\n        value = value - 512\n\n    temp = value * 0.25   \n    temp = round(temp, 1)\n\n    return temp", "docstring": "Static air temperature.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nfloat: tmeperature in Celsius degree", "source": "juraj-google-style"}
{"code": "def calibrate_and_quantize_single(self, dataset_gen, input_type, output_type, allow_float, op_output_name, resize_input=True):\n    self._feed_tensors(dataset_gen, resize_input)\n    return self._calibrator.QuantizeModel(np.dtype(input_type.as_numpy_dtype()).num, np.dtype(output_type.as_numpy_dtype()).num, allow_float, op_output_name)", "docstring": "Calibrates the model with specified generator and then quantizes it.\n\nOnly the single op with output op_output_name will be quantized.\nThe input shapes of the calibrator are resized with the calibration data.\n\nReturns:\nA quantized model.\n\nArgs:\ndataset_gen: A generator that generates calibration samples.\ninput_type: A tf.dtype representing the desired real-value input type.\noutput_type: A tf.dtype representing the desired real-value output type.\nallow_float: A boolean. False if the resulting model cannot perform float\ncomputation, useful when targeting an integer-only backend. If False, an\nerror will be thrown if an operation cannot be quantized, otherwise the\nmodel will fallback to float ops.\nop_output_name: A string, only this op will be quantized.\nresize_input: A boolean. True if the shape of the sample data is different\nfrom the input.", "source": "github-repos"}
{"code": "def from_saved_model(cls, saved_model_dir, signature_keys=None, tags=None):\n    TFLiteConverterBase._set_original_model_type(conversion_metadata_fb.ModelType.TF_SAVED_MODEL)\n    if not context.executing_eagerly():\n        signature_key = None\n        if signature_keys:\n            if len(signature_keys) != 1:\n                raise ValueError('Only support a single signature key.')\n            else:\n                signature_key = signature_keys[0]\n        logging.warning('Invoking the TF1 implementation of TFLiteConverter because eager is disabled. Consider enabling eager.')\n        return TFLiteConverter.from_saved_model(saved_model_dir, signature_key=signature_key, tag_set=tags)\n    if tags is None:\n        tags = set([_tag_constants.SERVING])\n    with context.eager_mode():\n        saved_model = _load(saved_model_dir, tags)\n    if not signature_keys:\n        signature_keys = list(saved_model.signatures.keys())\n    if not signature_keys:\n        raise ValueError('Only support at least one signature key.')\n    if len(signature_keys) > 1 and hasattr(saved_model, 'serve') and (not hasattr(saved_model, '_default_save_signature')):\n        saved_model.serving_default = saved_model.serve\n        delattr(saved_model, 'serve')\n        signature_keys = ['serving_default']\n    funcs = []\n    for key in signature_keys:\n        if key not in saved_model.signatures:\n            raise ValueError(\"Invalid signature key '{}' found. Valid keys are '{}'.\".format(key, ','.join(saved_model.signatures)))\n        funcs.append(saved_model.signatures[key])\n    saved_model_converter = TFLiteSavedModelConverterV2(saved_model_dir, tags, signature_keys)\n    if saved_model_converter.saved_model_dir:\n        return saved_model_converter\n    return cls(funcs, saved_model)", "docstring": "Creates a TFLiteConverter object from a SavedModel directory.\n\nArgs:\nsaved_model_dir: SavedModel directory to convert.\nsignature_keys: List of keys identifying SignatureDef containing inputs\nand outputs. Elements should not be duplicated. By default the\n`signatures` attribute of the MetaGraphdef is used. (default\nsaved_model.signatures)\ntags: Set of tags identifying the MetaGraphDef within the SavedModel to\nanalyze. All tags in the tag set must be present. (default\n{tf.saved_model.SERVING} or {'serve'})\n\nReturns:\nTFLiteConverter object.\n\nRaises:\nInvalid signature keys.", "source": "github-repos"}
{"code": "def get_tests_dir(append_path=None):\n    caller__file__ = inspect.stack()[1][1]\n    tests_dir = os.path.abspath(os.path.dirname(caller__file__))\n    while not tests_dir.endswith('tests'):\n        tests_dir = os.path.dirname(tests_dir)\n    if append_path:\n        return os.path.join(tests_dir, append_path)\n    else:\n        return tests_dir", "docstring": "Args:\nappend_path: optional path to append to the tests dir path\n\nReturn:\nThe full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is\njoined after the `tests` dir the former is provided.", "source": "github-repos"}
{"code": "def unpack(packet):\n    \n    validate_packet(packet)\n\n    version = packet[0]\n    try:\n        pyof_lib = PYOF_VERSION_LIBS[version]\n    except KeyError:\n        raise UnpackException('Version not supported')\n\n    try:\n        message = pyof_lib.common.utils.unpack_message(packet)\n        return message\n    except (UnpackException, ValueError) as exception:\n        raise UnpackException(exception)", "docstring": "Unpack the OpenFlow Packet and returns a message.\n\nArgs:\npacket: buffer with the openflow packet.\n\nReturns:\nGenericMessage: Message unpacked based on openflow packet.\n\nRaises:\nUnpackException: if the packet can't be unpacked.", "source": "juraj-google-style"}
{"code": "def concatenate_unique(la, lb):\n    la_set = set(la)\n    for l in lb:\n        if l not in la_set:\n            la.append(l)\n            la_set.add(l)\n    return la", "docstring": "Add all the elements of `lb` to `la` if they are not there already.\n\nThe elements added to `la` maintain ordering with respect to `lb`.\n\nArgs:\nla: List of Python objects.\nlb: List of Python objects.\nReturns:\n`la`: The list `la` with missing elements from `lb`.", "source": "github-repos"}
{"code": "def IsDeletedOrDefault(clean_lines, linenum):\n  \n  open_paren = clean_lines.elided[linenum].find('(')\n  if open_paren < 0:\n    return False\n  (close_line, _, close_paren) = CloseExpression(\n      clean_lines, linenum, open_paren)\n  if close_paren < 0:\n    return False\n  return Match(r'\\s*=\\s*(?:delete|default)\\b', close_line[close_paren:])", "docstring": "Check if current constructor or operator is deleted or default.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nReturns:\nTrue if this is a deleted or default constructor.", "source": "juraj-google-style"}
{"code": "def register_subcommand(parser: ArgumentParser):\n    dataclass_types = (ChatArguments,)\n    chat_parser = parser.add_parser('chat', dataclass_types=dataclass_types)\n    group = chat_parser.add_argument_group('Positional arguments')\n    group.add_argument('model_name_or_path_positional', type=str, default=None, help='Name of the pre-trained model.')\n    group.add_argument('generate_flags', type=str, default=None, help=\"Flags to pass to `generate`, using a space as a separator between flags. Accepts booleans, numbers, and lists of integers, more advanced parameterization should be set through --generation-config. Example: `transformers chat <model_repo> max_new_tokens=100 do_sample=False eos_token_id=[1,2]`. If you're a new user, check this basic flag guide: https:\n    chat_parser.set_defaults(func=chat_command_factory)", "docstring": "Register this command to argparse so it's available for the transformer-cli\n\nArgs:\nparser: Root parser to register command-specific arguments", "source": "github-repos"}
{"code": "def create_backup(name):\n    if (name in list_backups()):\n        raise CommandExecutionError('Backup already present: {0}'.format(name))\n    ps_cmd = ['Backup-WebConfiguration', '-Name', \"'{0}'\".format(name)]\n    cmd_ret = _srvmgr(ps_cmd)\n    if (cmd_ret['retcode'] != 0):\n        msg = 'Unable to backup web configuration: {0}\\nError: {1}'.format(name, cmd_ret['stderr'])\n        raise CommandExecutionError(msg)\n    return (name in list_backups())", "docstring": "r'''\nBackup an IIS Configuration on the System.\n\n.. versionadded:: 2017.7.0\n\n.. note::\nBackups are stored in the ``$env:Windir\\System32\\inetsrv\\backup``\nfolder.\n\nArgs:\nname (str): The name to give the backup\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.create_backup good_config_20170209", "source": "codesearchnet"}
{"code": "def filter_logits(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):\n    logits = logits.clone()\n    top_k = min(top_k, logits.size(-1))\n    if top_k > 0:\n        indices_to_remove = logits < torch.topk(logits, top_k, dim=-1)[0][..., -1:]\n        logits[indices_to_remove] = filter_value\n    if top_p > 0.0:\n        sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)\n        cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n        sorted_indices_to_remove = cumulative_probs > top_p\n        sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n        sorted_indices_to_remove[..., 0] = 0\n        indices_to_remove = torch.zeros_like(logits, dtype=torch.bool).scatter_(dim=-1, index=sorted_indices, src=sorted_indices_to_remove)\n        logits[indices_to_remove] = filter_value\n    return logits", "docstring": "Filter a distribution of logits using top-k and/or nucleus (top-p) filtering\n\nArgs:\nlogits (`torch.Tensor`):\nlogits distribution shape (vocabulary size)\ntop_k (`int`, *optional*, defaults to 0):\nWhen `top_k >0` keep only top key tokens with highest probability (top-k filtering).\ntop_p (`int`, *optional*, defaults to 0):\nWhen `top_p>0.0` keep the top tokens with cumulative probability >= `top_p` (nucleus filtering).", "source": "github-repos"}
{"code": "def dp996(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `dp996`'.format(value))\n\n        self._dp996 = value", "docstring": "Corresponds to IDD Field `dp996`\nDew-point temperature corresponding to 99.6% annual cumulative\nfrequency of occurrence (cold conditions)\n\nArgs:\nvalue (float): value for IDD Field `dp996`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def exit_actor():\n    worker = ray.worker.global_worker\n    if ((worker.mode == ray.WORKER_MODE) and (not worker.actor_id.is_nil())):\n        worker.raylet_client.disconnect()\n        ray.disconnect()\n        ray.global_state.disconnect()\n        sys.exit(0)\n        assert False, 'This process should have terminated.'\n    else:\n        raise Exception('exit_actor called on a non-actor worker.')", "docstring": "Intentionally exit the current actor.\n\nThis function is used to disconnect an actor and exit the worker.\n\nRaises:\nException: An exception is raised if this is a driver or this\nworker is not an actor.", "source": "codesearchnet"}
{"code": "def freeze_matrix(script, all_layers=False):\n    \n    filter_xml = ''.join([\n        '  <filter name=\"Freeze Current Matrix\">\\n',\n        '    <Param name=\"allLayers\" ',\n        'value=\"%s\" ' % str(all_layers).lower(),\n        'description=\"Apply to all visible Layers\" ',\n        'type=\"RichBool\" ',\n        '/>\\n',\n        '  </filter>\\n'])\n    util.write_filter(script, filter_xml)\n    return None", "docstring": "Freeze the current transformation matrix into the coordinates of the\nvertices of the mesh (and set this matrix to the identity).\n\nIn other words it applies in a definitive way the current matrix to the\nvertex coordinates.\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter to.\nall_layers (bool): If selected the filter will be applied to all\nvisible mesh layers.", "source": "juraj-google-style"}
{"code": "def _RawData(self, data):\n    if (not isinstance(data, dict)):\n        return data\n    result = collections.OrderedDict()\n    for (k, v) in iteritems(data):\n        result[k] = self._RawData(v)\n    return result", "docstring": "Convert data to common format.\n\nConfiguration options are normally grouped by the functional component which\ndefine it (e.g. Logging.path is the path parameter for the logging\nsubsystem). However, sometimes it is more intuitive to write the config as a\nflat string (e.g. Logging.path). In this case we group all the flat strings\nin their respective sections and create the sections automatically.\n\nArgs:\ndata: A dict of raw data.\n\nReturns:\na dict in common format. Any keys in the raw data which have a \".\" in them\nare separated into their own sections. This allows the config to be\nwritten explicitly in dot notation instead of using a section.", "source": "codesearchnet"}
{"code": "def log(level, msg, *args, **kwargs):\n    if (level > converter.ABSL_DEBUG):\n        standard_level = (converter.STANDARD_DEBUG - (level - 1))\n    else:\n        if (level < converter.ABSL_FATAL):\n            level = converter.ABSL_FATAL\n        standard_level = converter.absl_to_standard(level)\n    _absl_logger.log(standard_level, msg, *args, **kwargs)", "docstring": "Logs 'msg % args' at absl logging level 'level'.\n\nIf no args are given just print msg, ignoring any interpolation specifiers.\n\nArgs:\nlevel: int, the absl logging level at which to log the message\n(logging.DEBUG|INFO|WARNING|ERROR|FATAL). While some C++ verbose logging\nlevel constants are also supported, callers should prefer explicit\nlogging.vlog() calls for such purpose.\n\nmsg: str, the message to be logged.\n*args: The args to be substitued into the msg.\n**kwargs: May contain exc_info to add exception traceback to message.", "source": "codesearchnet"}
{"code": "def verified(self, institute_id):\n        \n        query = {\n            'verb' : 'validate',\n            'institute' : institute_id,\n        }\n        res = []\n        validate_events = self.event_collection.find(query)\n        for validated in list(validate_events):\n            case_id = validated['case']\n            var_obj = self.variant(case_id=case_id, document_id=validated['variant_id'])\n            case_obj = self.case(case_id=case_id)\n            if not case_obj or not var_obj:\n                continue \n            var_obj['case_obj'] = {\n                'display_name' : case_obj['display_name'],\n                'individuals' : case_obj['individuals']\n            }\n            res.append(var_obj)\n\n        return res", "docstring": "Return all verified variants for a given institute\n\nArgs:\ninstitute_id(str): institute id\n\nReturns:\nres(list): a list with validated variants", "source": "juraj-google-style"}
{"code": "def on_fire(self, watermark, window, context):\n    pass", "docstring": "Called when a trigger actually fires.\n\nArgs:\nwatermark: (a lower bound on) the watermark of the system\nwindow: the window whose trigger is being fired\ncontext: a context (e.g. a TriggerContext instance) for managing state\nand setting timers\n\nReturns:\nwhether this trigger is finished", "source": "github-repos"}
{"code": "def copy_buffer(self, dst, src, size=-1, *, read_offset=0, write_offset=0) -> None:\n        \n\n        self.mglo.copy_buffer(dst.mglo, src.mglo, size, read_offset, write_offset)", "docstring": "Copy buffer content.\n\nArgs:\ndst (Buffer): The destination buffer.\nsrc (Buffer): The source buffer.\nsize (int): The number of bytes to copy.\n\nKeyword Args:\nread_offset (int): The read offset.\nwrite_offset (int): The write offset.", "source": "juraj-google-style"}
{"code": "def distance_from_point(self, pt):\n    return np.linalg.norm((np.array(pt) - self.coords))", "docstring": "Returns distance between the site and a point in space.\n\nArgs:\npt: Cartesian coordinates of point.\n\nReturns:\nDistance (float)", "source": "codesearchnet"}
{"code": "def make_rsa_keypair(bits):\n    private_key = rsa.generate_private_key(public_exponent=65537, key_size=bits, backend=default_backend())\n    private_pem = private_key.private_bytes(encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption())\n    public_pem = private_key.public_key().public_bytes(encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo)\n    return (private_pem, public_pem)", "docstring": "Generate an RSA keypair.\n\nArgs:\nbits (int): number of bits to use for the key.\n\nReturns:\n(private_key, public_key) - both as PEM encoded strings", "source": "codesearchnet"}
{"code": "def _compare_constant_tuple_prefix(op, prefix, constant_tuple, reverse):\n    length = min(len(prefix), len(constant_tuple))\n    trimmed_prefix = prefix[:length]\n    trimmed_constant_tuple = constant_tuple[:length]\n    if trimmed_prefix == trimmed_constant_tuple:\n        if len(prefix) >= len(constant_tuple):\n            if reverse:\n                return op in (slots.LT, slots.LE, slots.NE)\n            else:\n                return op in (slots.NE, slots.GE, slots.GT)\n        return None\n    if reverse:\n        return _compare_constants(op, trimmed_constant_tuple, trimmed_prefix)\n    else:\n        return _compare_constants(op, trimmed_prefix, trimmed_constant_tuple)", "docstring": "Compares a tuple's constant prefix against a constant tuple.\n\nArgs:\nop: A comparison operator, such as LT (less than).\nprefix: A constant prefix of a non-constant tuple (referred to as \"left\" in\nthe inline comments). So if left=(3, 2, ...), prefix=(3, 2).\nconstant_tuple: A constant tuple (referred to as \"right\").\nreverse: Whether left and right should be reversed for the comparison.\n\nReturns:\nA bool of the comparison result if it can be determined, None otherwise.", "source": "github-repos"}
{"code": "def BSearchFloor(a, x, lo=0, hi=None):\n    \n    if len(a) == 0: return -1\n    hi = hi if hi is not None else len(a)\n    pos = bisect_left(a, x, lo, hi)\n    return pos - 1 if pos >= hi \\\n        else (pos if x == a[pos] else (pos - 1 if pos > lo else -1))", "docstring": "Returns highest i such as a[i] <= x, or -1 if x < all elements in a\n\nSo, if x is in between two elements in a, this function will return the\nindex of the lower element, hence \"Floor\".\n\nArguments:\na -- ordered numeric sequence\nx -- element to search within a\nlo -- lowest index to consider in search\nhi -- highest index to consider in search", "source": "juraj-google-style"}
{"code": "def get_model_loader(filename):\n    assert isinstance(filename, six.string_types), filename\n    filename = os.path.expanduser(filename)\n    if filename.endswith('.npy'):\n        assert tf.gfile.Exists(filename), filename\n        return DictRestore(np.load(filename, encoding='latin1').item())\n    elif filename.endswith('.npz'):\n        assert tf.gfile.Exists(filename), filename\n        obj = np.load(filename)\n        return DictRestore(dict(obj))\n    else:\n        return SaverRestore(filename)", "docstring": "Get a corresponding model loader by looking at the file name.\n\nReturns:\nSessInit: either a :class:`DictRestore` (if name ends with 'npy/npz') or\n:class:`SaverRestore` (otherwise).", "source": "codesearchnet"}
{"code": "def _calculate_page_index(index, data):\n        \n        if index > data['total_results']:\n            raise ValueError('index not in paged data')\n        page_length = len(data['results'])\n        return (index", "docstring": "Determine the location of a given index in paged data.\n\nArguments:\nindex (:py:class:`int`): The overall index.\ndata: (:py:class:`dict`) The first page of data.\n\nReturns:\n:py:class:`tuple`: The location of that index, in the format\n``(page, index_in_page)``.", "source": "juraj-google-style"}
{"code": "def get_dimension_type(self, dim):\n        \n        dim_obj = self.get_dimension(dim)\n        if dim_obj and dim_obj.type is not None:\n            return dim_obj.type\n        return self.interface.dimension_type(self, dim_obj)", "docstring": "Get the type of the requested dimension.\n\nType is determined by Dimension.type attribute or common\ntype of the dimension values, otherwise None.\n\nArgs:\ndimension: Dimension to look up by name or by index\n\nReturns:\nDeclared type of values along the dimension", "source": "juraj-google-style"}
{"code": "def copy(self, name=None):\n    cpy = copy.copy(self)\n    if name:\n        cpy.name = name\n    return cpy", "docstring": "shallow copy of the instruction.\n\nArgs:\nname (str): name to be given to the copied circuit,\nif None then the name stays the same\n\nReturns:\nInstruction: a shallow copy of the current instruction, with the name\nupdated if it was provided", "source": "codesearchnet"}
{"code": "def auto_call_functors(enabled: bool=True) -> ContextManager[None]:\n    return thread_local.thread_local_value_scope(_TLS_AUTO_CALL_FUNCTORS, enabled, False)", "docstring": "Returns a context manager to enable or disable auto call for functors.\n\n`auto_call_functors` is thread-safe and can be nested. For example::\n\n@pg.symbolize\ndef foo(x, y):\nreturn x + y\n\nwith pg.auto_call_functors(True):\na = foo(1, 2)\nassert a == 3\nwith pg.auto_call_functors(False):\nb = foo(1, 2)\nassert isinstance(b, foo)\n\nArgs:\nenabled: If True, enable auto call for functors.\nOtherwise, auto call will be disabled.\n\nReturns:\nA context manager for enabling/disabling auto call for functors.", "source": "github-repos"}
{"code": "def validate_args(func: Method, *args: Any, **kwargs: Any) -> Method:\n    \n    signature(func).bind(*args, **kwargs)\n    return func", "docstring": "Check if the request's arguments match a function's signature.\n\nRaises TypeError exception if arguments cannot be passed to a function.\n\nArgs:\nfunc: The function to check.\nargs: Positional arguments.\nkwargs: Keyword arguments.\n\nRaises:\nTypeError: If the arguments cannot be passed to the function.", "source": "juraj-google-style"}
{"code": "def _clean_options(method, provided_options):\n    provided_options = (provided_options or {})\n    default_options = get_minimizer_options(method)\n    result = {}\n    for (name, default) in default_options.items():\n        if (name in provided_options):\n            result[name] = provided_options[name]\n        else:\n            result[name] = default_options[name]\n    return result", "docstring": "Clean the given input options.\n\nThis will make sure that all options are present, either with their default values or with the given values,\nand that no other options are present then those supported.\n\nArgs:\nmethod (str): the method name\nprovided_options (dict): the given options\n\nReturns:\ndict: the resulting options dictionary", "source": "codesearchnet"}
{"code": "def minimize(self, minimize):\n    self._minimize = minimize\n    self._logger.log('debug', 'Minimize set to {}'.format(minimize))", "docstring": "Configures the ABC to minimize fitness function return value or\nderived score\n\nArgs:\nminimize (bool): if True, minimizes fitness function return value;\nif False, minimizes derived score", "source": "codesearchnet"}
{"code": "def set_metadata(self, **kwargs):\n    if self._traceme and kwargs:\n        self._traceme.SetMetadata(**kwargs)", "docstring": "Sets metadata in this trace event.\n\nArgs:\n**kwargs: metadata in key-value pairs.\n\nThis method enables setting metadata in a trace event after it is\ncreated.\n\nExample usage:\n\n```python\n\ndef call(function):\nwith tf.profiler.experimental.Trace(\"call\",\nfunction_name=function.name) as tm:\nbinary, in_cache = jit_compile(function)\ntm.set_metadata(in_cache=in_cache)\nexecute(binary)\n\n```\nIn this example, we want to trace how much time spent on\ncalling a function, which includes compilation and execution.\nThe compilation can be either getting a cached copy of the\nbinary or actually generating the binary, which is indicated\nby the boolean \"in_cache\" returned by jit_compile(). We need\nto use set_metadata() to pass in_cache because we did not know\nthe in_cache value when the trace was created (and we cannot\ncreate the trace after jit_compile(), because we want\nto measure the entire duration of call()).", "source": "github-repos"}
{"code": "def formal_cities(reverse=False):\n    output = {}\n    fname = pkg_resources.resource_filename(__name__, 'resources/Formal_City_Name_Pairs.csv')\n    with open(fname, 'rU') as csvfile:\n        reader = csv.reader(csvfile, delimiter=',')\n        for row in reader:\n            if (not reverse):\n                output[row[0]] = row[1]\n            else:\n                output[row[1]] = row[0]\n    return output", "docstring": "Get a dictionary that maps all Backpage city names to their presentable, formal names.\n\nReturns:\ndictionary of Backpage city names mapped to formal city names", "source": "codesearchnet"}
{"code": "def _process_between_filter_directive(filter_operation_info, location, context, parameters):\n    filtered_field_type = filter_operation_info.field_type\n    filtered_field_name = filter_operation_info.field_name\n    argument_inferred_type = strip_non_null_from_type(filtered_field_type)\n    (arg1_expression, arg1_non_existence) = _represent_argument(location, context, parameters[0], argument_inferred_type)\n    (arg2_expression, arg2_non_existence) = _represent_argument(location, context, parameters[1], argument_inferred_type)\n    lower_bound_clause = expressions.BinaryComposition(u'>=', expressions.LocalField(filtered_field_name), arg1_expression)\n    if (arg1_non_existence is not None):\n        lower_bound_clause = expressions.BinaryComposition(u'||', arg1_non_existence, lower_bound_clause)\n    upper_bound_clause = expressions.BinaryComposition(u'<=', expressions.LocalField(filtered_field_name), arg2_expression)\n    if (arg2_non_existence is not None):\n        upper_bound_clause = expressions.BinaryComposition(u'||', arg2_non_existence, upper_bound_clause)\n    filter_predicate = expressions.BinaryComposition(u'&&', lower_bound_clause, upper_bound_clause)\n    return blocks.Filter(filter_predicate)", "docstring": "Return a Filter basic block that checks that a field is between two values, inclusive.\n\nArgs:\nfilter_operation_info: FilterOperationInfo object, containing the directive and field info\nof the field where the filter is to be applied.\nlocation: Location where this filter is used.\ncontext: dict, various per-compilation data (e.g. declared tags, whether the current block\nis optional, etc.). May be mutated in-place in this function!\nparameters: list of 2 elements, specifying the time range in which the data must lie;\nif either of the elements is optional and missing,\ntheir side of the check is assumed to be True\n\nReturns:\na Filter basic block that performs the range check", "source": "codesearchnet"}
{"code": "def is_seq_of(seq, expected_type, seq_type=None):\n    if (seq_type is None):\n        exp_seq_type = collections_abc.Sequence\n    else:\n        assert isinstance(seq_type, type)\n        exp_seq_type = seq_type\n    if (not isinstance(seq, exp_seq_type)):\n        return False\n    for item in seq:\n        if (not isinstance(item, expected_type)):\n            return False\n    return True", "docstring": "Check whether it is a sequence of some type.\n\nArgs:\nseq (Sequence): The sequence to be checked.\nexpected_type (type): Expected type of sequence items.\nseq_type (type, optional): Expected sequence type.\n\nReturns:\nbool: Whether the sequence is valid.", "source": "codesearchnet"}
{"code": "def item_to_mrc(code, val):\n    \n    if isinstance(val, basestring):\n        return [val_to_mrc(code, val)]\n\n    if isinstance(val, dict):\n        val = [val]\n\n    return dicts_to_mrc(code, val)", "docstring": "Convert `val` to MRC, whether it is dict or string.\n\nArgs:\ncode (str): Code of the field.\nval (str or dict): Value of the field.\n\nReturns:\nlist: MRC lines for output template.", "source": "juraj-google-style"}
{"code": "def Parse(self, parser_mediator, file_object):\n    if (not file_object):\n        raise errors.UnableToParseFile('Invalid file object')\n    if (self._INITIAL_FILE_OFFSET is not None):\n        file_object.seek(self._INITIAL_FILE_OFFSET, os.SEEK_SET)\n    parser_mediator.AppendToParserChain(self)\n    try:\n        self.ParseFileObject(parser_mediator, file_object)\n    finally:\n        parser_mediator.PopFromParserChain()", "docstring": "Parses a single file-like object.\n\nArgs:\nparser_mediator (ParserMediator): a parser mediator.\nfile_object (dvfvs.FileIO): a file-like object to parse.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def parse_history_node(h_node):\n    if isinstance(h_node, dict):\n        return HistoryNode.from_dict(h_node)\n    else:\n        if (len(h_node) != 3):\n            raise ValueError('Invalid History node, should be dict or (name, version, description) tuple: {}'.format(h_node))\n        return HistoryNode(h_node[0], h_node[1], h_node[2])", "docstring": "Parses a History Node object from either a dict or a tuple.\n\nArgs:\nh_node: A dict with name/url/description fields or a 3-element\ntuple.\n\nReturns:\nHistory node.", "source": "codesearchnet"}
{"code": "def _validate_config(config):\n    if (not isinstance(config, list)):\n        raise TypeError('Config must be a list')\n    for config_dict in config:\n        if (not isinstance(config_dict, dict)):\n            raise TypeError('Config must be a list of dictionaries')\n        label = config_dict.keys()[0]\n        cfg = config_dict[label]\n        if (not isinstance(cfg, dict)):\n            raise TypeError('Config structure is broken')\n        if ('host' not in cfg):\n            raise TypeError('Config entries must have a value for host')\n        if ((not isinstance(cfg['host'], str)) and (not isinstance(cfg['host'], list))):\n            raise TypeError('Host must be a string or a list.')\n        if ('port' not in cfg):\n            raise TypeError('Config entries must have a value for port')\n        if (not isinstance(cfg['port'], int)):\n            raise TypeError('Port must be an int')\n        if ('dbpath' not in cfg):\n            raise TypeError('Config entries must have a value for dbpath')\n        if (not isinstance(cfg['dbpath'], str)):\n            if (not isinstance(cfg['dbpath'], list)):\n                raise TypeError('Dbpath must either a string or a list of strings')\n            for dbpath in cfg['dbpath']:\n                if (not isinstance(dbpath, str)):\n                    raise TypeError('Dbpath must either a string or a list of strings')\n        if (('read_preference' in cfg) and (not isinstance(cfg['read_preference'], str))):\n            raise TypeError('Read_preference must be a string')\n        if (('replicaSet' in cfg) and (not isinstance(cfg['replicaSet'], str))):\n            raise TypeError('replicaSet must be a string')", "docstring": "Validate that the provided configurtion is valid.\n\nEach dictionary in the configuration list must have the following\nmandatory entries :\n{label: {host(string), port(int), dbpath(string|list of strings)}}\nIt can also contain 1 optional key:\n{read_preference(string)}\n\nArgs:\nconfig: the list of configurations provided at instantiation\n\nRaises:\nTypeError: a fault in the configurations is found", "source": "codesearchnet"}
{"code": "def add_oxidation_state_by_element(self, oxidation_states):\n    try:\n        for site in self.sites:\n            new_sp = {}\n            for (el, occu) in site.species.items():\n                sym = el.symbol\n                new_sp[Specie(sym, oxidation_states[sym])] = occu\n            site.species = new_sp\n    except KeyError:\n        raise ValueError('Oxidation state of all elements must be specified in the dictionary.')", "docstring": "Add oxidation states.\n\nArgs:\noxidation_states (dict): Dict of oxidation states.\nE.g., {\"Li\":1, \"Fe\":2, \"P\":5, \"O\":-2}", "source": "codesearchnet"}
{"code": "def destroy(ads):\n    \n    for ad in ads:\n        try:\n            ad.services.stop_all()\n        except:\n            ad.log.exception('Failed to clean up properly.')", "docstring": "Cleans up AndroidDevice objects.\n\nArgs:\nads: A list of AndroidDevice objects.", "source": "juraj-google-style"}
{"code": "def get_percentile(self, percentile):\n        \n        assert 0 <= percentile <= 100, \\\n            'percentile must be between 0 and 100. Got {}'.format(percentile)\n        return self._percentile(self._values, percentile)", "docstring": "Get a value representing a the input percentile of the Data Collection.\n\nArgs:\npercentile: A float value from 0 to 100 representing the\nrequested percentile.\n\nReturn:\nThe Data Collection value at the input percentile", "source": "juraj-google-style"}
{"code": "def sub_pi_to_number(self, subpage=1, subitem=1):\n        \n        if subitem == None:\n            subitem = 0\n\n        if subpage == None:\n            return 0\n        else:\n            if subpage > 1:\n                return ((subpage - 1) * self.subpage_items) + subitem\n            else:\n                return 0 + subitem", "docstring": "Convert subpage & subitem to a integer\n\n* if page == 1, then return 0, since the item count is the true # of items\n* if page == 2, then return, page-1 * items_per_page, since we are\nreturning the # of items on a full page.\n\nArgs:\n* None\n\nReturns:\n* Integer - Which represents the number of items up to the page.", "source": "juraj-google-style"}
{"code": "def try_serialize_handler(handler):\n  \n  if (isinstance(handler, types.InstanceType) or  \n      (isinstance(handler, object) and  \n       not inspect.isfunction(handler) and\n       not inspect.ismethod(handler)) and\n      hasattr(handler, \"__call__\")):\n    return pickle.dumps(handler)\n  return None", "docstring": "Try to serialize map/reduce handler.\n\nArgs:\nhandler: handler function/instance. Handler can be a function or an\ninstance of a callable class. In the latter case, the handler will\nbe serialized across slices to allow users to save states.\n\nReturns:\nserialized handler string or None.", "source": "juraj-google-style"}
{"code": "def port_list(br):\n    \n    cmd = 'ovs-vsctl list-ports {0}'.format(br)\n    result = __salt__['cmd.run_all'](cmd)\n    retcode = result['retcode']\n    stdout = result['stdout']\n    return _stdout_list_split(retcode, stdout)", "docstring": "Lists all of the ports within bridge.\n\nArgs:\nbr: A string - bridge name.\n\nReturns:\nList of bridges (or empty list), False on failure.\n\n.. versionadded:: 2016.3.0\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.port_list br0", "source": "juraj-google-style"}
{"code": "def UpdateFrom(self, src):\n    \n    if not isinstance(src, PathInfo):\n      raise TypeError(\"expected `%s` but got `%s`\" % (PathInfo, type(src)))\n    if self.path_type != src.path_type:\n      raise ValueError(\n          \"src [%s] does not represent the same path type as self [%s]\" %\n          (src.path_type, self.path_type))\n    if self.components != src.components:\n      raise ValueError(\"src [%s] does not represent the same path as self [%s]\"\n                       % (src.components, self.components))\n\n    if src.HasField(\"stat_entry\"):\n      self.stat_entry = src.stat_entry\n\n    self.last_stat_entry_timestamp = max(self.last_stat_entry_timestamp,\n                                         src.last_stat_entry_timestamp)\n    self.directory = self.directory or src.directory", "docstring": "Merge path info records.\n\nMerges src into self.\nArgs:\nsrc: An rdfvalues.objects.PathInfo record, will be merged into self.\n\nRaises:\nValueError: If src does not represent the same path.", "source": "juraj-google-style"}
{"code": "def __init__(self, message):\n        \n        super(IllegalOperation, self).__init__(\n            reason=enums.ResultReason.ILLEGAL_OPERATION,\n            message=message\n        )", "docstring": "Create an IllegalOperation exception.\n\nArgs:\nmessage (string): A string containing information about the error.", "source": "juraj-google-style"}
{"code": "def db_stats(self):\n    data = dict(action='db-stats')\n    jsondata = self._api_request(params=data)\n    stats = DBStats(total_clicks=int(jsondata['db-stats']['total_clicks']), total_links=int(jsondata['db-stats']['total_links']))\n    return stats", "docstring": "Get database statistics.\n\nReturns:\nDBStats: Total clicks and links statistics.\n\nRaises:\nrequests.exceptions.HTTPError: Generic HTTP Error", "source": "codesearchnet"}
{"code": "def add_word(self, word):\n    word = word.lower()\n    if (not (word.isascii() and word.isalpha())):\n        raise ValueError(\"Invalid character in word '{}'\".format(word))\n    word = word.encode(encoding='ascii')\n    result = cgaddag.gdg_add_word(self.gdg, word)\n    if (result == 1):\n        raise ValueError(\"Invalid character in word '{}'\".format(word))\n    elif (result == 2):\n        raise MemoryError('Out of memory, GADDAG is in an undefined state')", "docstring": "Add a word to the GADDAG.\n\nArgs:\nword: A word to be added to the GADDAG.", "source": "codesearchnet"}
{"code": "def insert_arguments_into_query(compilation_result, arguments):\n    \n    _ensure_arguments_are_provided(compilation_result.input_metadata, arguments)\n\n    if compilation_result.language == MATCH_LANGUAGE:\n        return insert_arguments_into_match_query(compilation_result, arguments)\n    elif compilation_result.language == GREMLIN_LANGUAGE:\n        return insert_arguments_into_gremlin_query(compilation_result, arguments)\n    elif compilation_result.language == SQL_LANGUAGE:\n        return insert_arguments_into_sql_query(compilation_result, arguments)\n    else:\n        raise AssertionError(u'Unrecognized language in compilation result: '\n                             u'{}'.format(compilation_result))", "docstring": "Insert the arguments into the compiled GraphQL query to form a complete query.\n\nArgs:\ncompilation_result: a CompilationResult object derived from the GraphQL compiler\narguments: dict, mapping argument name to its value, for every parameter the query expects.\n\nReturns:\nstring, a query in the appropriate output language, with inserted argument data", "source": "juraj-google-style"}
{"code": "def record_gradient(op_name, inputs, attrs, outputs):\n    pywrap_tfe.TFE_Py_RecordGradient(op_name, inputs, attrs, outputs, ops.get_name_scope())", "docstring": "Explicitly record the gradient for a given op.\n\nArgs:\nop_name: The op name as listed in the `OpDef` for the op.\ninputs: A list of tensor inputs to the op.\nattrs: The op attributes as a flattened list of alternating attribute names\nand attribute values.\noutputs: A list of tensor outputs from the op.", "source": "github-repos"}
{"code": "def get_user_groups(self, dn, group_search_dn=None, _connection=None):\n    connection = _connection\n    if (not connection):\n        connection = self._make_connection(bind_user=self.config.get('LDAP_BIND_USER_DN'), bind_password=self.config.get('LDAP_BIND_USER_PASSWORD'))\n        connection.bind()\n    safe_dn = ldap3.utils.conv.escape_filter_chars(dn)\n    search_filter = '(&{group_filter}({members_attr}={user_dn}))'.format(group_filter=self.config.get('LDAP_GROUP_OBJECT_FILTER'), members_attr=self.config.get('LDAP_GROUP_MEMBERS_ATTR'), user_dn=safe_dn)\n    log.debug(\"Searching for groups for specific user with filter '{0}' , base '{1}' and scope '{2}'\".format(search_filter, (group_search_dn or self.full_group_search_dn), self.config.get('LDAP_GROUP_SEARCH_SCOPE')))\n    connection.search(search_base=(group_search_dn or self.full_group_search_dn), search_filter=search_filter, attributes=self.config.get('LDAP_GET_GROUP_ATTRIBUTES'), search_scope=getattr(ldap3, self.config.get('LDAP_GROUP_SEARCH_SCOPE')))\n    results = []\n    for item in connection.response:\n        if (('type' not in item) or (item.get('type') != 'searchResEntry')):\n            continue\n        group_data = item['attributes']\n        group_data['dn'] = item['dn']\n        results.append(group_data)\n    if (not _connection):\n        self.destroy_connection(connection)\n    return results", "docstring": "Gets a list of groups a user at dn is a member of\n\nArgs:\ndn (str): The dn of the user to find memberships for.\n_connection (ldap3.Connection): A connection object to use when\nsearching. If not given, a temporary connection will be\ncreated, and destroyed after use.\ngroup_search_dn (str): The search dn for groups. Defaults to\n``'{LDAP_GROUP_DN},{LDAP_BASE_DN}'``.\n\nReturns:\nlist: A list of LDAP groups the user is a member of.", "source": "codesearchnet"}
{"code": "def create_user(self, claims):\n        \n        \n        username_claim = settings.USERNAME_CLAIM\n        usermodel = get_user_model()\n        user, created = usermodel.objects.get_or_create(**{\n            usermodel.USERNAME_FIELD: claims[username_claim]\n        })\n        if created or not user.password:\n            user.set_unusable_password()\n            logger.debug(\"User '{}' has been created.\".format(claims[username_claim]))\n\n        return user", "docstring": "Create the user if it doesn't exist yet\n\nArgs:\nclaims (dict): claims from the access token\n\nReturns:\ndjango.contrib.auth.models.User: A Django user", "source": "juraj-google-style"}
{"code": "def __getitem__(self, index):\n        \n        getter = coordinates.Coordinates.from_string(index)\n        return getter(self._values)", "docstring": "Return the value(s) of the given cell(s).\n\nArgs:\nindex (str): cell/row/col index ('A1', '2', 'B') or slice ('A1':'C3')\nReturns:\nvalue (cell), list(col, row), or nested list (two-dimentional slice)\nRaises:\nTypeError: if ``index`` is not a string or slice of strings\nValueError: if ``index`` canot be parsed\nIndexError: if ``index`` is out of range", "source": "juraj-google-style"}
{"code": "def __init__(self, num_embeddings, num_additional_embeddings, embedding_dim, partially_freeze: Optional[bool]=False, device=None, dtype=None, padding_idx=None, **kwargs) -> None:\n    if padding_idx is not None and padding_idx > num_embeddings:\n        raise ValueError(f'padding_idx must be within num_embeddings. Got {padding_idx} and {num_embeddings}')\n    super().__init__(num_embeddings=num_embeddings, embedding_dim=embedding_dim, device=device, dtype=dtype, padding_idx=padding_idx, **kwargs)\n    self.num_embeddings = num_embeddings\n    self.padding_idx = padding_idx\n    self.num_additional_embeddings = num_additional_embeddings\n    self.partially_freeze = partially_freeze\n    if partially_freeze:\n        self.weight.requires_grad_(False)\n    if self.num_additional_embeddings > 0:\n        self.additional_embedding = nn.Embedding(num_embeddings=self.num_additional_embeddings, embedding_dim=embedding_dim, device=device, dtype=dtype)", "docstring": "Args:\nnum_embeddings (`int`):\nSize of the dictionary of embeddings\nnum_additional_embeddings (`int`):\nNumber of additional embeddings. Only useful when you `partially_freeze=True`.\nembedding_dim (`int`):\nThe size of each embedding vector\npartially_freeze: (`bool`, *optional*, defaults to `False`):\nIf `True`, the regular `weight` will be frozen. `additional_weight` is never frozen.\npadding_idx (`int`, *optional*):\nThe padding index (needs to be less than num_embeddings)\n\nNote: there are a lot of other parameters to initialize a standard `nn.Embedding` such as `padding_idx`,\n`max_norm` or `norm_type`. We are not supporting these.", "source": "github-repos"}
{"code": "def Reinit(self, pid, auto_symfile_loading=True):\n    \n    self.ShutDownGdb()\n    self.__init__(pid, auto_symfile_loading, architecture=self.arch)", "docstring": "Reinitializes the object with a new pid.\n\nSince all modes might need access to this object at any time, this object\nneeds to be long-lived. To make this clear in the API, this shorthand is\nsupplied.\nArgs:\npid: the pid of the target process\nauto_symfile_loading: whether the symbol file should automatically be\nloaded by gdb.", "source": "juraj-google-style"}
{"code": "def _ScanVolumeSystemRoot(self, scan_context, scan_node, base_path_specs):\n    if ((not scan_node) or (not scan_node.path_spec)):\n        raise errors.ScannerError('Invalid scan node.')\n    if (scan_node.type_indicator == definitions.TYPE_INDICATOR_APFS_CONTAINER):\n        volume_identifiers = self._GetAPFSVolumeIdentifiers(scan_node)\n    elif (scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW):\n        volume_identifiers = self._GetVSSStoreIdentifiers(scan_node)\n        volume_identifiers.reverse()\n    else:\n        raise errors.ScannerError('Unsupported volume system type: {0:s}.'.format(scan_node.type_indicator))\n    for volume_identifier in volume_identifiers:\n        location = '/{0:s}'.format(volume_identifier)\n        sub_scan_node = scan_node.GetSubNodeByLocation(location)\n        if (not sub_scan_node):\n            raise errors.ScannerError('Scan node missing for volume identifier: {0:s}.'.format(volume_identifier))\n        self._ScanVolume(scan_context, sub_scan_node, base_path_specs)", "docstring": "Scans a volume system root scan node for volume and file systems.\n\nArgs:\nscan_context (SourceScannerContext): source scanner context.\nscan_node (SourceScanNode): volume system root scan node.\nbase_path_specs (list[PathSpec]): file system base path specifications.\n\nRaises:\nScannerError: if the scan node is invalid, the scan node type is not\nsupported or if a sub scan node cannot be retrieved.", "source": "codesearchnet"}
{"code": "def __init__(self, project_id, instance_id, table_id, flush_count, max_row_bytes):\n    super().__init__()\n    self.beam_options = {'project_id': project_id, 'instance_id': instance_id, 'table_id': table_id, 'flush_count': flush_count, 'max_row_bytes': max_row_bytes}\n    self.table = None\n    self.batcher = None\n    self.service_call_metric = None\n    self.written = Metrics.counter(self.__class__, 'Written Row')", "docstring": "Constructor of the Write connector of Bigtable\nArgs:\nproject_id(str): GCP Project of to write the Rows\ninstance_id(str): GCP Instance to write the Rows\ntable_id(str): GCP Table to write the `DirectRows`\nflush_count(int): Max number of rows to flush\nmax_row_bytes(int) Max number of row mutations size to flush", "source": "github-repos"}
{"code": "def parse_case(config):\n    \n    if 'owner' not in config:\n        raise ConfigError(\"A case has to have a owner\")\n\n    if 'family' not in config:\n        raise ConfigError(\"A case has to have a 'family'\")\n\n    individuals = parse_individuals(config['samples'])\n    case_data = {\n        'owner': config['owner'],\n        'collaborators': [config['owner']],\n        'case_id': config['family'],\n        'display_name': config.get('family_name', config['family']),\n        'genome_build': config.get('human_genome_build'),\n        'rank_model_version': config.get('rank_model_version'),\n        'rank_score_threshold': config.get('rank_score_threshold', 0),\n        'analysis_date': config['analysis_date'],\n        'individuals': individuals,\n        'vcf_files': {\n            'vcf_snv': config.get('vcf_snv'),\n            'vcf_sv': config.get('vcf_sv'),\n            'vcf_str': config.get('vcf_str'),\n            'vcf_cancer': config.get('vcf_cancer'),\n            'vcf_snv_research': config.get('vcf_snv_research'),\n            'vcf_sv_research': config.get('vcf_sv_research'),\n            'vcf_cancer_research': config.get('vcf_cancer_research'),\n        },\n        'default_panels': config.get('default_gene_panels', []),\n        'gene_panels': config.get('gene_panels', []),\n        'assignee': config.get('assignee'),\n        'peddy_ped': config.get('peddy_ped'),\n        'peddy_sex': config.get('peddy_sex'),\n        'peddy_check': config.get('peddy_check'),\n        'delivery_report': config.get('delivery_report'),\n        'multiqc': config.get('multiqc'),\n        'track': config.get('track', 'rare'),\n    }\n\n    \n    if 'madeline' in config:\n        mad_path = Path(config['madeline'])\n        if not mad_path.exists():\n            raise ValueError(\"madeline path not found: {}\".format(mad_path))\n        with mad_path.open('r') as in_handle:\n            case_data['madeline_info'] = in_handle.read()\n    \n    if (case_data['vcf_files']['vcf_cancer'] or case_data['vcf_files']['vcf_cancer_research']):\n        case_data['track'] = 'cancer'\n    \n    return case_data", "docstring": "Parse case information from config or PED files.\n\nArgs:\nconfig (dict): case config with detailed information\n\nReturns:\ndict: parsed case data", "source": "juraj-google-style"}
{"code": "def get_topic_triggers(rs, topic, thats, depth=0, inheritance=0, inherited=False):\n    if (depth > rs._depth):\n        rs._warn('Deep recursion while scanning topic inheritance')\n    rs._say(((((((((('\\tCollecting trigger list for topic ' + topic) + '(depth=') + str(depth)) + '; inheritance=') + str(inheritance)) + '; ') + 'inherited=') + str(inherited)) + ')'))\n    if (not (topic in rs._topics)):\n        rs._warn(\"Inherited or included topic {} doesn't exist or has no triggers\".format(topic))\n        return []\n    triggers = []\n    inThisTopic = []\n    if (not thats):\n        if (topic in rs._topics):\n            for trigger in rs._topics[topic]:\n                inThisTopic.append([trigger['trigger'], trigger])\n    elif (topic in rs._thats.keys()):\n        for curtrig in rs._thats[topic].keys():\n            for (previous, pointer) in rs._thats[topic][curtrig].items():\n                inThisTopic.append([pointer['trigger'], pointer])\n    if (topic in rs._includes):\n        for includes in rs._includes[topic]:\n            rs._say(((('\\t\\tTopic ' + topic) + ' includes ') + includes))\n            triggers.extend(get_topic_triggers(rs, includes, thats, (depth + 1), inheritance, True))\n    if (topic in rs._lineage):\n        for inherits in rs._lineage[topic]:\n            rs._say(((('\\t\\tTopic ' + topic) + ' inherits ') + inherits))\n            triggers.extend(get_topic_triggers(rs, inherits, thats, (depth + 1), (inheritance + 1), False))\n    if ((topic in rs._lineage) or inherited):\n        for trigger in inThisTopic:\n            rs._say(((('\\t\\tPrefixing trigger with {inherits=' + str(inheritance)) + '}') + trigger[0]))\n            triggers.append([((('{inherits=' + str(inheritance)) + '}') + trigger[0]), trigger[1]])\n    else:\n        triggers.extend(inThisTopic)\n    return triggers", "docstring": "Recursively scan a topic and return a list of all triggers.\n\nArguments:\nrs (RiveScript): A reference to the parent RiveScript instance.\ntopic (str): The original topic name.\nthats (bool): Are we getting triggers for 'previous' replies?\ndepth (int): Recursion step counter.\ninheritance (int): The inheritance level counter, for topics that\ninherit other topics.\ninherited (bool): Whether the current topic is inherited by others.\n\nReturns:\n[]str: List of all triggers found.", "source": "codesearchnet"}
{"code": "def _ParseTokenType(self, file_object, file_offset):\n    token_type_map = self._GetDataTypeMap('uint8')\n    (token_type, _) = self._ReadStructureFromFileObject(file_object, file_offset, token_type_map)\n    return token_type", "docstring": "Parses a token type.\n\nArgs:\nfile_object (dfvfs.FileIO): file-like object.\nfile_offset (int): offset of the token relative to the start of\nthe file-like object.\n\nReturns:\nint: token type", "source": "codesearchnet"}
{"code": "def __init__(self, file_path, cause):\n        \n        message = six.text_type(\"Malformed config at {}: {}\").format(\n            file_path,\n            cause\n        )\n        super(MalformedConfig, self).__init__(message)", "docstring": "Exception to be raised if pased file is invalid.\n\nArgs:\nfile_path (string): path to bad config\ncause (string): reason of failure, i.e. what exactly was the\nproblem while parsing", "source": "juraj-google-style"}
{"code": "def add_peer_parser(subparsers, parent_parser):\n    parser = subparsers.add_parser('peer', help='Displays information about validator peers', description=\"Provides a subcommand to list a validator's peers\")\n    grand_parsers = parser.add_subparsers(title='subcommands', dest='subcommand')\n    grand_parsers.required = True\n    add_peer_list_parser(grand_parsers, parent_parser)", "docstring": "Adds argument parser for the peer command\n\nArgs:\nsubparsers: Add parsers to this subparser object\nparent_parser: The parent argparse.ArgumentParser object", "source": "codesearchnet"}
{"code": "def send(self, message):\n    if ('call_id' not in message):\n        message['call_id'] = self.gen_call_id()\n    self._ws.send(message.to_json())", "docstring": "Sends a RTMMessage\nShould be called after starting the loop\n\nArgs:\nmessage(RTMMessage): the sending message\n\nRaises:\nWebSocketConnectionClosedException: if the loop is closed", "source": "codesearchnet"}
{"code": "def graph_execution_traces(self, digest=False, begin=None, end=None):\n    digests = self._graph_execution_trace_digests\n    if begin is not None or end is not None:\n        begin = begin or 0\n        end = end or len(digests)\n        digests = digests[begin:end]\n    if digest:\n        return digests\n    else:\n        return [self.read_graph_execution_trace(digest) for digest in digests]", "docstring": "Get all the intra-graph execution tensor traces read so far.\n\nArgs:\ndigest: Whether the results will be returned in the more light-weight\ndigest form.\nbegin: Optional beginning index for the requested traces or their digests.\nPython-style negative indices are supported.\nend: Optional ending index for the requested traces or their digests.\nPython-style negative indices are supported.\n\nReturns:\nIf `digest`: a `list` of `GraphExecutionTraceDigest` objects.\nElse: a `list` of `GraphExecutionTrace` objects.", "source": "github-repos"}
{"code": "def extract_derivative_feature(feature):\n    first_derivative_feature = processing.derivative_extraction(feature, DeltaWindows=2)\n    second_derivative_feature = processing.derivative_extraction(first_derivative_feature, DeltaWindows=2)\n    feature_cube = np.concatenate((feature[(:, :, None)], first_derivative_feature[(:, :, None)], second_derivative_feature[(:, :, None)]), axis=2)\n    return feature_cube", "docstring": "This function extracts temporal derivative features which are\nfirst and second derivatives.\n\nArgs:\nfeature (array): The feature vector which its size is: N x M\n\nReturn:\narray: The feature cube vector which contains the static, first and second derivative features of size: N x M x 3", "source": "codesearchnet"}
{"code": "def set_timezone(tz=None, deploy=False):\n    \n\n    if not tz:\n        raise CommandExecutionError(\"Timezone name option must not be none.\")\n\n    ret = {}\n\n    query = {'type': 'config',\n             'action': 'set',\n             'xpath': '/config/devices/entry[@name=\\'localhost.localdomain\\']/deviceconfig/system/timezone',\n             'element': '<timezone>{0}</timezone>'.format(tz)}\n\n    ret.update(__proxy__['panos.call'](query))\n\n    if deploy is True:\n        ret.update(commit())\n\n    return ret", "docstring": "Set the timezone of the Palo Alto proxy minion. A commit will be required before this is processed.\n\nCLI Example:\n\nArgs:\ntz (str): The name of the timezone to set.\n\ndeploy (bool): If true then commit the full candidate configuration, if false only set pending change.\n\n.. code-block:: bash\n\nsalt '*' panos.set_timezone UTC\nsalt '*' panos.set_timezone UTC deploy=True", "source": "juraj-google-style"}
{"code": "def __definitions_descriptor(self):\n    result = {}\n    for (def_key, def_value) in self.__parser.schemas().iteritems():\n        if (('properties' in def_value) or ('type' in def_value)):\n            key_result = {}\n            required_keys = set()\n            if ('type' in def_value):\n                key_result['type'] = def_value['type']\n            if ('properties' in def_value):\n                for (prop_key, prop_value) in def_value['properties'].items():\n                    if (isinstance(prop_value, dict) and ('required' in prop_value)):\n                        required_keys.add(prop_key)\n                        del prop_value['required']\n                key_result['properties'] = def_value['properties']\n            if required_keys:\n                key_result['required'] = sorted(required_keys)\n            result[def_key] = key_result\n    for def_value in result.itervalues():\n        for prop_value in def_value.itervalues():\n            if isinstance(prop_value, dict):\n                if ('$ref' in prop_value):\n                    prop_value['type'] = 'object'\n                self._add_def_paths(prop_value)\n    return result", "docstring": "Describes the definitions section of the OpenAPI spec.\n\nReturns:\nDictionary describing the definitions of the spec.", "source": "codesearchnet"}
{"code": "def xpath(self, exact=None):\n    exact = (exact if (exact is not None) else self.exact)\n    if isinstance(self.expression, AbstractExpression):\n        expression = self._apply_expression_filters(self.expression)\n        return to_xpath(expression, exact=exact)\n    else:\n        return str_(self.expression)", "docstring": "Returns the XPath query for this selector.\n\nArgs:\nexact (bool, optional): Whether to exactly match text.\n\nReturns:\nstr: The XPath query for this selector.", "source": "codesearchnet"}
{"code": "def first_function(function: _evaluation.FirstFunction, operand_result: Optional[_sql_data_types.Select], params_result: Collection[_sql_data_types.StandardSqlExpression]) -> _sql_data_types.Select:\n    del params_result\n    if operand_result is None:\n        raise ValueError('first() cannot be called without an operand.')\n    result = copy.copy(operand_result)\n    if _fhir_path_data_types.is_collection(function.parent_node.return_type):\n        return _sql_data_types.Select(select_part=result.select_part, from_part=f'(SELECT FIRST({result.sql_alias}) AS {result.sql_alias} FROM {result.to_subquery()})', sql_dialect=_sql_data_types.SqlDialect.SPARK)\n    else:\n        new_alias = result.sql_alias\n        return _sql_data_types.Select(select_part=_sql_data_types.Identifier((new_alias,), _sql_data_type=result.sql_data_type, _sql_alias=new_alias), from_part=f'(SELECT FIRST({new_alias}) AS {new_alias} FROM {result.to_subquery()})', sql_dialect=_sql_data_types.SqlDialect.SPARK)", "docstring": "Generates Spark SQL representing the FHIRPath first() function.\n\nReturns a collection with the first value of the operand collection.\n\nThe returned SQL expression is a table with cardinality 0 or 1.\n\nArgs:\nfunction: The FHIRPath AST `FirstFunction` node\noperand_result: The expression which is being evaluated\nparams_result: The parameter passed in to function\n\nReturns:\nA compiled Spark SQL expression.\n\nRaises:\nValueError: When the function is called without an operand", "source": "github-repos"}
{"code": "def Match(self, registry_key):\n    \n    value_names = frozenset([\n        registry_value.name for registry_value in registry_key.GetValues()])\n\n    return self._value_names.issubset(value_names)", "docstring": "Determines if a Windows Registry key matches the filter.\n\nArgs:\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\n\nReturns:\nbool: True if the keys match.", "source": "juraj-google-style"}
{"code": "def __init__(self, outer_index, inner_index):\n    if outer_index.batch_dims != inner_index.batch_dims:\n        raise ValueError('outer_index.batch_dims and inner_index.batch_dims must be the same.')\n    super(ProductIndexMap, self).__init__(indices=inner_index.indices + outer_index.indices * tf.cast(inner_index.num_segments, inner_index.indices.dtype), num_segments=inner_index.num_segments * outer_index.num_segments, batch_dims=inner_index.batch_dims)\n    self.outer_index = outer_index\n    self.inner_index = inner_index", "docstring": "Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the\nintersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows\nand columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation\ncombines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has `num_segments` equal to\n`outer_index.num_segements` * `inner_index.num_segments`.\n\nArgs:\nouter_index: IndexMap.\ninner_index: IndexMap, must have the same shape as `outer_index`.", "source": "github-repos"}
{"code": "def probe_async(self, callback):\n\n    def _on_finished(_name, control_info, exception):\n        if (exception is not None):\n            callback(self.id, False, str(exception))\n            return\n        self._control_info = control_info\n        try:\n            info = {'connection_string': 'direct', 'uuid': control_info.uuid, 'signal_strength': 100}\n            self._trigger_callback('on_scan', self.id, info, self.ExpirationTime)\n        finally:\n            callback(self.id, True, None)\n    self._control_thread.command(JLinkControlThread.FIND_CONTROL, _on_finished, self._device_info.ram_start, self._device_info.ram_size)", "docstring": "Send advertisements for all connected devices.\n\nArgs:\ncallback (callable): A callback for when the probe operation has completed.\ncallback should have signature callback(adapter_id, success, failure_reason) where:\nsuccess: bool\nfailure_reason: None if success is True, otherwise a reason for why we could not probe", "source": "codesearchnet"}
{"code": "def get_best_dataset_key(key, choices):\n    if ((key.wavelength is not None) and choices):\n        nearest_wl = min([_wl_dist(key.wavelength, x.wavelength) for x in choices if (x.wavelength is not None)])\n        choices = [c for c in choices if (_wl_dist(key.wavelength, c.wavelength) == nearest_wl)]\n    if ((key.modifiers is None) and choices):\n        num_modifiers = min((len((x.modifiers or tuple())) for x in choices))\n        choices = [c for c in choices if (len((c.modifiers or tuple())) == num_modifiers)]\n    if ((key.calibration is None) and choices):\n        best_cal = [x.calibration for x in choices if x.calibration]\n        if best_cal:\n            best_cal = min(best_cal, key=(lambda x: CALIBRATION_ORDER[x]))\n            choices = [c for c in choices if (c.calibration == best_cal)]\n    if ((key.resolution is None) and choices):\n        low_res = [x.resolution for x in choices if x.resolution]\n        if low_res:\n            low_res = min(low_res)\n            choices = [c for c in choices if (c.resolution == low_res)]\n    if ((key.level is None) and choices):\n        low_level = [x.level for x in choices if x.level]\n        if low_level:\n            low_level = max(low_level)\n            choices = [c for c in choices if (c.level == low_level)]\n    return choices", "docstring": "Choose the \"best\" `DatasetID` from `choices` based on `key`.\n\nThe best key is chosen based on the follow criteria:\n\n1. Central wavelength is nearest to the `key` wavelength if\nspecified.\n2. Least modified dataset if `modifiers` is `None` in `key`.\nOtherwise, the modifiers are ignored.\n3. Highest calibration if `calibration` is `None` in `key`.\nCalibration priority is chosen by `satpy.CALIBRATION_ORDER`.\n4. Best resolution (smallest number) if `resolution` is `None`\nin `key`. Otherwise, the resolution is ignored.\n\nThis function assumes `choices` has already been filtered to only\ninclude datasets that match the provided `key`.\n\nArgs:\nkey (DatasetID): Query parameters to sort `choices` by.\nchoices (iterable): `DatasetID` objects to sort through to determine\nthe best dataset.\n\nReturns: List of best `DatasetID`s from `choices`. If there is more\nthan one element this function could not choose between the\navailable datasets.", "source": "codesearchnet"}
{"code": "def _StopMonitoringProcess(self, process):\n    if (process is None):\n        raise ValueError('Missing process.')\n    pid = process.pid\n    self._RaiseIfNotMonitored(pid)\n    del self._process_information_per_pid[pid]\n    rpc_client = self._rpc_clients_per_pid.get(pid, None)\n    if rpc_client:\n        rpc_client.Close()\n        del self._rpc_clients_per_pid[pid]\n    if (pid in self._rpc_errors_per_pid):\n        del self._rpc_errors_per_pid[pid]\n    logger.debug('Stopped monitoring process: {0:s} (PID: {1:d})'.format(process.name, pid))", "docstring": "Stops monitoring a process.\n\nArgs:\nprocess (MultiProcessBaseProcess): process.\n\nRaises:\nKeyError: if the process is not monitored.\nValueError: if the process is missing.", "source": "codesearchnet"}
{"code": "def load(self, key_filter=None, header_preproc=None):\n        \n        \n        df = pd.read_csv(self.input_file,\n                         sep='\\t',\n                         dtype=object)\n\n        if key_filter is not None:\n            \n            df = df[df[df.columns[0]].str.match(key_filter)]\n\n        \n        meta_col = df.columns[0]\n        df[meta_col] = df[meta_col].str.split(',').str[-1]\n\n        \n        for col_name in df.columns[1:]:\n            \n            stripped = df[col_name].str.replace(r'[a-z]', '')\n\n            \n            df[col_name] = pd.to_numeric(stripped, errors='coerce')\n\n        \n        if header_preproc is not None:\n            df.columns = list(df.columns[:1]) + [header_preproc(c) for c in df.columns[1:]]\n\n        \n        \n        df.columns = ['key'] + [int(y) for y in df.columns[1:]]\n\n        return df", "docstring": "Load data table from tsv file, from default location\n\nArgs:\nkey_filter (str): additional filter for key column - regex matching\nkey values to include; None for no filter\n\nheader_preproc (func): function to apply to column headers to extract year numbers (as strings)\n\nReturns:\npd.DataFrame: data", "source": "juraj-google-style"}
{"code": "def _format_field_name(self, field_name) -> str:\n        \n\n        field = self._get_model_field(field_name)\n        return self.qn(field.column)", "docstring": "Formats a field's name for usage in SQL.\n\nArguments:\nfield_name:\nThe field name to format.\n\nReturns:\nThe specified field name formatted for\nusage in SQL.", "source": "juraj-google-style"}
{"code": "def draw_rects(self, *rects):\n        \n        rect_array = ffi.new('SDL_Rect[]', len(rects))\n        for i, r in enumerate(rects):\n            rect_array[i] = r._ptr[0]\n        check_int_err(lib.SDL_RenderDrawRects(self._ptr, rect_array, len(rects)))", "docstring": "Draw some number of rectangles on the current rendering target.\n\nArgs:\n*rects (Rect): The destination rectangles.\n\nRaises:\nSDLError: If an error is encountered.", "source": "juraj-google-style"}
{"code": "def get_group(self, uuid=None):\n    if (uuid is None):\n        uuid = self.uuid\n    group_data = self.get('group', params={'uuid': uuid})\n    return group_data", "docstring": "Get group data based on uuid.\n\nArgs:\nuuid (str): optional uuid. defaults to self.cuuid\n\nRaises:\nPyLmodUnexpectedData: No data was returned.\nrequests.RequestException: Exception connection error\n\nReturns:\ndict: group json", "source": "codesearchnet"}
{"code": "def retrieve_products(self, reviewer):\n        \n        if not isinstance(reviewer, self._reviewer_cls):\n            raise TypeError(\n                \"Type of given reviewer isn't acceptable:\", reviewer,\n                \", expected:\", self._reviewer_cls)\n        return list(self.graph.successors(reviewer))", "docstring": "Retrieve products reviewed by a given reviewer.\n\nArgs:\nreviewer: A reviewer.\n\nReturns:\nA list of products which the reviewer reviews.\n\nRaises:\nTypeError: when given reviewer isn't instance of specified reviewer\nclass when this graph is constructed.", "source": "juraj-google-style"}
{"code": "def use_pcm(self, pcm_params=None, solvent_key='solvent', solvent_params=None, radii_force_field=None):\n    self.params['pcm'] = dict()\n    self.params[solvent_key] = dict()\n    default_pcm_params = {'Theory': 'SSVPE', 'vdwScale': 1.1, 'Radii': 'UFF'}\n    if (not solvent_params):\n        solvent_params = {'Dielectric': 78.3553}\n    if pcm_params:\n        for (k, v) in pcm_params.items():\n            self.params['pcm'][k.lower()] = (v.lower() if isinstance(v, str) else v)\n    for (k, v) in default_pcm_params.items():\n        if (k.lower() not in self.params['pcm'].keys()):\n            self.params['pcm'][k.lower()] = (v.lower() if isinstance(v, str) else v)\n    for (k, v) in solvent_params.items():\n        self.params[solvent_key][k.lower()] = (v.lower() if isinstance(v, str) else copy.deepcopy(v))\n    self.params['rem']['solvent_method'] = 'pcm'\n    if radii_force_field:\n        self.params['pcm']['radii'] = 'bondi'\n        self.params['rem']['force_fied'] = radii_force_field.lower()", "docstring": "Set the solvent model to PCM. Default parameters are trying to comply to\ngaussian default value\n\nArgs:\npcm_params (dict): The parameters of \"$pcm\" section.\nsolvent_key (str): for versions < 4.2 the section name is \"pcm_solvent\"\nsolvent_params (dict): The parameters of solvent_key section\nradii_force_field (str): The force fied used to set the solute\nradii. Default to UFF.", "source": "codesearchnet"}
{"code": "def trigger(self, when=1):\n    tw = Window(self.stream, self._config['type'])\n    tw._config['evictPolicy'] = self._config['evictPolicy']\n    tw._config['evictConfig'] = self._config['evictConfig']\n    if (self._config['evictPolicy'] == 'TIME'):\n        tw._config['evictTimeUnit'] = 'MILLISECONDS'\n    if isinstance(when, datetime.timedelta):\n        tw._config['triggerPolicy'] = 'TIME'\n        tw._config['triggerConfig'] = int((when.total_seconds() * 1000.0))\n        tw._config['triggerTimeUnit'] = 'MILLISECONDS'\n    elif isinstance(when, int):\n        tw._config['triggerPolicy'] = 'COUNT'\n        tw._config['triggerConfig'] = when\n    else:\n        raise ValueError(when)\n    return tw", "docstring": "Declare a window with this window's size and a trigger policy.\n\nWhen the window is triggered is defined by `when`.\n\nIf `when` is an `int` then the window is triggered every\n`when` tuples.  For example, with ``when=5`` the window\nwill be triggered every five tuples.\n\nIf `when` is an `datetime.timedelta` then it is the period\nof the trigger. With a `timedelta` representing one minute\nthen the window is triggered every minute.\n\nBy default, when `trigger` has not been called on a `Window`\nit triggers for every tuple inserted into the window\n(equivalent to ``when=1``).\n\nArgs:\nwhen: The size of the window, either an `int` to define the\nnumber of tuples or `datetime.timedelta` to define the\nduration of the window.\n\nReturns:\nWindow: Window that will be triggered.\n\n.. warning:: A trigger is only supported for a sliding window\nsuch as one created by :py:meth:`last`.", "source": "codesearchnet"}
{"code": "def num_batches(self):\n    raise NotImplementedError", "docstring": "Return the size (number of batches) for the dataset created.\n\nFor certain type of the data input, the number of batches is known, eg\nfor Numpy data, the size is same as (number_of_element / batch_size).\nWhereas for dataset or python generator, the size is unknown since it\nmay or may not have an end state.\n\nReturns:\nint, the number of batches for the dataset, or None if it is\nunknown.  The caller could use this to control the loop of training,\nshow progress bar, or handle unexpected StopIteration error.", "source": "github-repos"}
{"code": "def bounding_box_from(points, i, i1, thr):\n    pi = points[i]\n    pi1 = points[i1]\n    min_lat = min(pi.lat, pi1.lat)\n    min_lon = min(pi.lon, pi1.lon)\n    max_lat = max(pi.lat, pi1.lat)\n    max_lon = max(pi.lon, pi1.lon)\n    return ((min_lat - thr), (min_lon - thr), (max_lat + thr), (max_lon + thr))", "docstring": "Creates bounding box for a line segment\n\nArgs:\npoints (:obj:`list` of :obj:`Point`)\ni (int): Line segment start, index in points array\ni1 (int): Line segment end, index in points array\nReturns:\n(float, float, float, float): with bounding box min x, min y, max x and max y", "source": "codesearchnet"}
{"code": "def _serve_audio_metadata(self, request):\n    \n    tag = request.args.get('tag')\n    run = request.args.get('run')\n    sample = int(request.args.get('sample', 0))\n\n    events = self._multiplexer.Tensors(run, tag)\n    response = self._audio_response_for_run(events, run, tag, sample)\n    return http_util.Respond(request, response, 'application/json')", "docstring": "Given a tag and list of runs, serve a list of metadata for audio.\n\nNote that the actual audio data are not sent; instead, we respond\nwith URLs to the audio. The frontend should treat these URLs as\nopaque and should not try to parse information about them or\ngenerate them itself, as the format may change.\n\nArgs:\nrequest: A werkzeug.wrappers.Request object.\n\nReturns:\nA werkzeug.Response application.", "source": "juraj-google-style"}
{"code": "def add_log_file(path):\n    \n    logfile_handler = RotatingFileHandler(\n        path, maxBytes=50000, backupCount=2)\n    formatter = logging.Formatter(\n        fmt='%(asctime)s %(levelname)s %(module)s - %(message)s',\n        datefmt=\"%d-%b-%Y %H:%M:%S\")\n    logfile_handler.setFormatter(formatter)\n    geoparse_logger.addHandler(logfile_handler)", "docstring": "Add log file.\n\nArgs:\npath (:obj:`str`): Path to the log file.", "source": "juraj-google-style"}
{"code": "def __init__(self,\n                 unique_identifier=None,\n                 data=None):\n        \n        super(DecryptResponsePayload, self).__init__(\n            enums.Tags.RESPONSE_PAYLOAD\n        )\n\n        self._unique_identifier = None\n        self._data = None\n\n        self.unique_identifier = unique_identifier\n        self.data = data", "docstring": "Construct a Decrypt response payload struct.\n\nArgs:\nunique_identifier (string): The ID of the managed object (e.g.,\na symmetric key) used for decryption. Required for encoding\nand decoding.\ndata (bytes): The decrypted data in binary form. Required for\nencoding and decoding.", "source": "juraj-google-style"}
{"code": "def __init__(self, function_name, unique_function_id, node_name_prefix, attr_name, level=1, children_inputs_mappings=None):\n    self._function_name = function_name\n    self._unique_function_id = unique_function_id\n    self._next_global_index = 0\n    self._used_global_indices = set()\n    self._tag_to_global_index = {}\n    self._tag_to_next_sort_index = {}\n    self._node_name_prefix = node_name_prefix\n    self._attr_name = attr_name\n    self._level = level\n    self._children_inputs_mappings = children_inputs_mappings", "docstring": "Initialize ophint argument.\n\nArgs:\nfunction_name: Name of the function that this tracks arguments for.\nunique_function_id: UUID of function that this tracks arguments for.\nnode_name_prefix: How identities that are created are named.\nattr_name: Name of attribute to use to store the index for this hint.\ni.e. FUNCTION_INPUT_INDEX or FUNCTION_OUTPUT_INDEX\nlevel: Hierarchical level of the Ophint node, a number.\nchildren_inputs_mappings: Inputs/Outputs mapping for children hints.", "source": "github-repos"}
{"code": "def getaccountaddress(self, user_id=\"\"):\n        \n        address = self.rpc.call(\"getaccountaddress\", user_id)\n        self.logger.debug(\"Your\", self.coin, \"address is\", address)\n        return address", "docstring": "Get the coin address associated with a user id.\n\nIf the specified user id does not yet have an address for this\ncoin, then generate one.\n\nArgs:\nuser_id (str): this user's unique identifier\n\nReturns:\nstr: Base58Check address for this account", "source": "juraj-google-style"}
{"code": "def _LinearFoldByteStream(self, mapped_value, **unused_kwargs):\n    \n    try:\n      attribute_values = [\n          getattr(mapped_value, attribute_name, None)\n          for attribute_name in self._attribute_names]\n      attribute_values = [\n          value for value in attribute_values if value is not None]\n      return self._operation.WriteTo(tuple(attribute_values))\n\n    except Exception as exception:\n      error_string = (\n          'Unable to write: {0:s} to byte stream with error: {1!s}').format(\n              self._data_type_definition.name, exception)\n      raise errors.FoldingError(error_string)", "docstring": "Folds the data type into a byte stream.\n\nArgs:\nmapped_value (object): mapped value.\n\nReturns:\nbytes: byte stream.\n\nRaises:\nFoldingError: if the data type definition cannot be folded into\nthe byte stream.", "source": "juraj-google-style"}
{"code": "def set_name(self, name):\n    if (not self._campfire.get_user().admin):\n        return False\n    result = self._connection.put(('room/%s' % self.id), {'room': {'name': name}})\n    if result['success']:\n        self._load()\n    return result['success']", "docstring": "Set the room name.\n\nArgs:\nname (str): Name\n\nReturns:\nbool. Success", "source": "codesearchnet"}
{"code": "def undo_windowing(hidden_states: torch.Tensor, shape: List[int], mask_unit_shape: List[int]) -> torch.Tensor:\n    batch_size, hidden_size = (hidden_states.shape[0], hidden_states.shape[-1])\n    num_mask_units = [s \n    hidden_states = hidden_states.view(batch_size, *num_mask_units, *mask_unit_shape, hidden_size)\n    hidden_states = hidden_states.permute(0, 1, 3, 2, 4, 5)\n    hidden_states = hidden_states.reshape(batch_size, *shape, hidden_size)\n    return hidden_states", "docstring": "Restore spatial organization by undoing windowed organization of mask units.\n\nArgs:\nhidden_states (`torch.Tensor`): The hidden states tensor of shape `[batch_size, num_mask_unit_height*num_mask_unit_width, hidden_size]`.\nshape (`List[int]`): The original shape of the hidden states tensor before windowing.\nmask_unit_shape (`List[int]`): The shape of the mask units used for windowing.\n\nReturns:\ntorch.Tensor: The restored hidden states tensor of shape [batch_size, num_mask_unit_height*mask_unit_height, num_mask_unit_width*mask_unit_width, hidden_size].", "source": "github-repos"}
{"code": "def _handle_request(self, request):\n        \n        if request is None:\n            return Response(success=False, uid=request.uid)\n\n        action_map = {\n            'start_dag': self._handle_start_dag,\n            'stop_workflow': self._handle_stop_workflow,\n            'join_dags': self._handle_join_dags,\n            'stop_dag': self._handle_stop_dag,\n            'is_dag_stopped': self._handle_is_dag_stopped\n        }\n\n        if request.action in action_map:\n            return action_map[request.action](request)\n        else:\n            raise RequestActionUnknown()", "docstring": "Handle an incoming request by forwarding it to the appropriate method.\n\nArgs:\nrequest (Request): Reference to a request object containing the\nincoming request.\n\nRaises:\nRequestActionUnknown: If the action specified in the request is not known.\n\nReturns:\nResponse: A response object containing the response from the method handling\nthe request.", "source": "juraj-google-style"}
{"code": "class RandomUniform(Initializer):\n\n    def __init__(self, minval=-0.05, maxval=0.05, seed=None):\n        self.minval = minval\n        self.maxval = maxval\n        self.seed = seed\n        self._random_generator = _RandomGenerator(seed)\n\n    def __call__(self, shape, dtype=None, **kwargs):\n        \n        _validate_kwargs(self.__class__.__name__, kwargs)\n        dtype = _get_dtype(dtype)\n        if not dtype.is_floating and (not dtype.is_integer):\n            raise ValueError('Expected float or integer dtype, got %s.' % dtype)\n        if _PARTITION_SHAPE in kwargs:\n            shape = kwargs[_PARTITION_SHAPE]\n        return self._random_generator.random_uniform(shape, self.minval, self.maxval, dtype)\n\n    def get_config(self):\n        return {'minval': self.minval, 'maxval': self.maxval, 'seed': self.seed}", "docstring": "Initializer that generates tensors with a uniform distribution.\n\nAlso available via the shortcut function\n`tf.keras.initializers.random_uniform`.\n\nExamples:\n\n>>> # Standalone usage:\n>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)\n>>> values = initializer(shape=(2, 2))\n\n>>> # Usage in a Keras layer:\n>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)\n>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\nArgs:\nminval: A python scalar or a scalar tensor. Lower bound of the range of\nrandom values to generate (inclusive).\nmaxval: A python scalar or a scalar tensor. Upper bound of the range of\nrandom values to generate (exclusive).\nseed: A Python integer. An initializer created with a given seed will\nalways produce the same random tensor for a given shape and dtype.", "source": "github-repos"}
{"code": "def _write_init_fetchers(self, filenames):\n        \n        destination = \"%s%s\" % (self.output_directory, self.fetchers_path)\n        self.write(destination=destination, filename=\"__init__.py\", template_name=\"__init_fetcher__.py.tpl\",\n                   filenames=self._prepare_filenames(filenames, suffix='Fetcher'),\n                   class_prefix=self._class_prefix,\n                   product_accronym=self._product_accronym,\n                   header=self.header_content)", "docstring": "Write fetcher init file\n\nArgs:\nfilenames (dict): dict of filename and classes", "source": "juraj-google-style"}
{"code": "def _StopFolderSelectionMethod(self, stop_folder):\n    if (not self.show_stop_hierarchy):\n        return (lambda stop: (stop_folder, None))\n    station_folder = self._CreateFolder(stop_folder, 'Stations')\n    platform_folder = self._CreateFolder(stop_folder, 'Platforms')\n    platform_connections = self._CreateFolder(platform_folder, 'Connections')\n    entrance_folder = self._CreateFolder(stop_folder, 'Entrances')\n    entrance_connections = self._CreateFolder(entrance_folder, 'Connections')\n    standalone_folder = self._CreateFolder(stop_folder, 'Stand-Alone')\n\n    def FolderSelectionMethod(stop):\n        if (stop.location_type == transitfeed.Stop.LOCATION_TYPE_STATION):\n            return (station_folder, None)\n        elif (stop.location_type == googletransit.Stop.LOCATION_TYPE_ENTRANCE):\n            return (entrance_folder, entrance_connections)\n        elif stop.parent_station:\n            return (platform_folder, platform_connections)\n        return (standalone_folder, None)\n    return FolderSelectionMethod", "docstring": "Create a method to determine which KML folder a stop should go in.\n\nArgs:\nstop_folder: the parent folder element for all stops.\n\nReturns:\nA function that should accept a Stop argument and return a tuple of\n(stop KML folder, pathways KML folder).\n\nGiven a Stop, we need to determine which folder the stop should go in.  In\nthe most basic case, that's the root Stops folder.  However, if\nshow_stop_hierarchy is enabled, we put a stop in a separate sub-folder\ndepending on if the stop is a station, a platform, an entrance, or just a\nplain-old stand-alone stop.  This method returns a function that is used\nto pick which folder a stop stop should go in.  It also optionally returns\na folder where any line-string connections associated with a stop (eg. to\nshow the pathway between an entrance and a station) should be added.", "source": "codesearchnet"}
{"code": "def __init__(self, hash_start=[], hash_stop=UInt256()):\n        \n        self.HashStart = hash_start\n        self.HashStop = hash_stop", "docstring": "Create an instance.\n\nArgs:\nhash_start (list): a list of hash values. Each value is of the bytearray type. Note: should actually be UInt256 objects.\nhash_stop (UInt256):", "source": "juraj-google-style"}
{"code": "def append_dims_and_file_extension(fname, data_df):\n    \n    \n    if not fname.endswith(\".gct\"):\n        out_fname = '{0}_n{1}x{2}.gct'.format(fname, data_df.shape[1], data_df.shape[0])\n        return out_fname\n\n    \n    else:\n        basename = os.path.splitext(fname)[0]\n        out_fname = '{0}_n{1}x{2}.gct'.format(basename, data_df.shape[1], data_df.shape[0])\n        return out_fname", "docstring": "Append dimensions and file extension to output filename.\nN.B. Dimensions are cols x rows.\n\nArgs:\nfname (string): output filename\ndata_df (pandas df)\nReturns:\nout_fname (string): output filename with matrix dims and .gct appended", "source": "juraj-google-style"}
{"code": "def walk_dependencies(root, visitor):\n    \n    def visit(parent, visitor):\n        for d in get_dependencies(parent):\n            visitor(d, parent)\n            visit(d, visitor)\n\n    visitor(root, None)\n    visit(root, visitor)", "docstring": "Call visitor on root and all dependencies reachable from it in breadth\nfirst order.\n\nArgs:\nroot (component): component function or class\nvisitor (function): signature is `func(component, parent)`.  The\ncall on root is `visitor(root, None)`.", "source": "juraj-google-style"}
{"code": "def GetBaseFiles(self, diff):\n    files = {}\n    for line in diff.splitlines(True):\n        if (line.startswith('Index:') or line.startswith('Property changes on:')):\n            (unused, filename) = line.split(':', 1)\n            filename = to_slash(filename.strip())\n            files[filename] = self.GetBaseFile(filename)\n    return files", "docstring": "Helper that calls GetBase file for each file in the patch.\n\nReturns:\nA dictionary that maps from filename to GetBaseFile's tuple.  Filenames\nare retrieved based on lines that start with \"Index:\" or\n\"Property changes on:\".", "source": "codesearchnet"}
{"code": "def CreateTaskStart(self):\n    task_start = TaskStart()\n    task_start.identifier = self.identifier\n    task_start.session_identifier = self.session_identifier\n    task_start.timestamp = self.start_time\n    return task_start", "docstring": "Creates a task start.\n\nReturns:\nTaskStart: task start attribute container.", "source": "codesearchnet"}
{"code": "def VerifyServerPEM(self, http_object):\n    \n    try:\n      server_pem = http_object.data\n      server_url = http_object.url\n\n      if b\"BEGIN CERTIFICATE\" in server_pem:\n        \n        \n        server_certificate = rdf_crypto.RDFX509Cert(server_pem)\n        self.communicator.LoadServerCertificate(\n            server_certificate=server_certificate, ca_certificate=self.ca_cert)\n\n        logging.info(\"Server PEM re-keyed.\")\n        return True\n    except Exception as e:  \n      logging.info(\"Unable to verify server certificate at %s: %s\", server_url,\n                   e)\n\n      return False", "docstring": "Check the server PEM for validity.\n\nThis is used to determine connectivity to the server. Sometimes captive\nportals return a valid HTTP status, but the data is corrupted.\n\nArgs:\nhttp_object: The response received from the server.\n\nReturns:\nTrue if the response contains a valid server certificate.", "source": "juraj-google-style"}
{"code": "def _make_request(self, url, method='get', data=None, extra_headers=None):\n    attempts = 0\n    while (attempts < 1):\n        if (not self._is_authenticated):\n            self._authenticate()\n        try:\n            return self._send_request(url, method, data, extra_headers)\n        except HTTPError as e:\n            if (e.response.status_code == 403):\n                logger.info('Authenticated session against NetMRI timed out. Retrying.')\n                self._is_authenticated = False\n                attempts += 1\n            else:\n                raise", "docstring": "Prepares the request, checks for authentication and retries in case of issues\n\nArgs:\nurl (str): URL of the request\nmethod (str): Any of \"get\", \"post\", \"delete\"\ndata (any): Possible extra data to send with the request\nextra_headers (dict): Possible extra headers to send along in the request\nReturns:\ndict", "source": "codesearchnet"}
{"code": "def Copy(self, name=None):\n        \n        new = copy.copy(self)\n        new.d = copy.copy(self.d)\n        new.name = name if name is not None else self.name\n        return new", "docstring": "Returns a copy.\n\nMake a shallow copy of d.  If you want a deep copy of d,\nuse copy.deepcopy on the whole object.\n\nArgs:\nname: string name for the new Hist", "source": "juraj-google-style"}
{"code": "def draw_lines_svg_layer(df_endpoints, layer_name, layer_number=1):\n    dwg = svgwrite.Drawing('should_not_exist.svg', profile='tiny', debug=False)\n    dwg.attribs['width'] = df_endpoints[['x_source', 'x_target']].values.max()\n    dwg.attribs['height'] = df_endpoints[['y_source', 'y_target']].values.max()\n    nsmap = INKSCAPE_NSMAP\n    dwg.attribs['xmlns:inkscape'] = nsmap['inkscape']\n    coord_columns = ['x_source', 'y_source', 'x_target', 'y_target']\n    line_layer = dwg.g(id=('layer%d' % layer_number), **{'inkscape:label': layer_name, 'inkscape:groupmode': 'layer'})\n    for (i, (x1, y1, x2, y2)) in df_endpoints[coord_columns].iterrows():\n        line_i = dwg.line((x1, y1), (x2, y2), id=('line%d' % i), style='stroke:\n        line_layer.add(line_i)\n    dwg.add(line_layer)\n    output = StringIO.StringIO()\n    dwg.write(output)\n    output.seek(0)\n    return output", "docstring": "Draw lines defined by endpoint coordinates as a layer in a SVG file.\n\nArgs:\n\ndf_endpoints (pandas.DataFrame) : Each row corresponds to the endpoints\nof a single line, encoded through the columns: ``x_source``,\n``y_source``, ``x_target``, and ``y_target``.\nlayer_name (str) : Name of Inkscape layer.\nlayer_number (int, optional) : Z-order index of Inkscape layer.\n\nReturns\n-------\nStringIO.StringIO\nA file-like object containing SVG XML source.\n\nThe XML contains a layer named ``\"Connections\"``, which in turn\ncontains one line per row in the input :data:`df_endpoints` table.", "source": "codesearchnet"}
{"code": "def get_formatted_string(self, input_string):\n    if isinstance(input_string, str):\n        try:\n            return self.get_processed_string(input_string)\n        except KeyNotInContextError as err:\n            raise KeyNotInContextError(f\"Unable to format '{input_string}' because {err}\") from err\n    elif isinstance(input_string, SpecialTagDirective):\n        return input_string.get_value(self)\n    else:\n        raise TypeError(f'can only format on strings. {input_string} is a {type(input_string)} instead.')", "docstring": "Return formatted value for input_string.\n\nget_formatted gets a context[key] value.\nget_formatted_string is for any arbitrary string that is not in the\ncontext.\n\nOnly valid if input_string is a type string.\nReturn a string interpolated from the context dictionary.\n\nIf input_string='Piping {key1} the {key2} wild'\nAnd context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}\n\nThen this will return string: \"Piping down the valleys wild\"\n\nArgs:\ninput_string: string to parse for substitutions.\n\nReturns:\nFormatted string.\n\nRaises:\nKeyNotInContextError: context[key] has {somekey} where somekey does\nnot exist in context dictionary.\nTypeError: Attempt operation on a non-string type.", "source": "codesearchnet"}
{"code": "def runCmd(cls, cmd):\n    cit.echo(cmd, 'command')\n    result = os.system(cmd)\n    cls.checkResult(result)", "docstring": "run command and show if success or failed\n\nArgs:\ncmd: string\nReturns:\nbool: if this command run successfully", "source": "codesearchnet"}
{"code": "async def run_tasks(context):\n    running_tasks = RunTasks()\n    context.running_tasks = running_tasks\n    status = (await running_tasks.invoke(context))\n    context.running_tasks = None\n    return status", "docstring": "Run any tasks returned by claimWork.\n\nReturns the integer status of the task that was run, or None if no task was\nrun.\n\nargs:\ncontext (scriptworker.context.Context): the scriptworker context.\n\nRaises:\nException: on unexpected exception.\n\nReturns:\nint: exit status\nNone: if no task run.", "source": "codesearchnet"}
{"code": "def security_label(self, name, description=None, color=None):\n    label = SecurityLabel(name, description, color)\n    for label_data in self._labels:\n        if (label_data.name == name):\n            label = label_data\n            break\n    else:\n        self._labels.append(label)\n    return label", "docstring": "Return instance of SecurityLabel.\n\n.. note:: The provided security label will be create if it doesn't exist. If the security\nlabel already exists nothing will be changed.\n\nArgs:\nname (str): The value for this security label.\ndescription (str): A description for this security label.\ncolor (str): A color (hex value) for this security label.\n\nReturns:\nobj: An instance of SecurityLabel.", "source": "codesearchnet"}
{"code": "def scripthash_to_address(scripthash):\n    \n    sb = bytearray([ADDRESS_VERSION]) + scripthash\n    c256 = bin_dbl_sha256(sb)[0:4]\n    outb = sb + bytearray(c256)\n    return base58.b58encode(bytes(outb)).decode(\"utf-8\")", "docstring": "Convert a script hash to a public address.\n\nArgs:\nscripthash (bytes):\n\nReturns:\nstr: base58 encoded string representing the wallet address.", "source": "juraj-google-style"}
{"code": "def Update(self, attribute=None):\n    \n    \n    client_id = self.urn.Split()[0]\n\n    if attribute == \"CONTAINS\":\n      \n      flow_id = flow.StartAFF4Flow(\n          client_id=client_id,\n          \n          \n          \n          \n          flow_name=\"ListDirectory\",\n          pathspec=self.real_pathspec,\n          notify_to_user=False,\n          token=self.token)\n\n      return flow_id", "docstring": "Refresh an old attribute.\n\nNote that refreshing the attribute is asynchronous. It does not change\nanything about the current object - you need to reopen the same URN some\ntime later to get fresh data.\n\nAttributes: CONTAINS - Refresh the content of the directory listing.\nArgs:\nattribute: An attribute object as listed above.\n\nReturns:\nThe Flow ID that is pending\n\nRaises:\nIOError: If there has been an error starting the flow.", "source": "juraj-google-style"}
{"code": "def CanonicalPathToLocalPath(path):\n  r\n  \n  path = path.replace(\"/\\\\\", \"\\\\\")\n  path = path.replace(\"/\", \"\\\\\")\n  m = re.match(r\"\\\\([a-zA-Z]):(.*)$\", path)\n  if m:\n    path = \"%s:\\\\%s\" % (m.group(1), m.group(2).lstrip(\"\\\\\"))\n\n  return path", "docstring": "r\"\"\"Converts the canonical paths as used by GRR to OS specific paths.\n\nDue to the inconsistencies between handling paths in windows we need to\nconvert a path to an OS specific version prior to using it. This function\nshould be called just before any OS specific functions.\n\nCanonical paths on windows have:\n- / instead of \\.\n- Begin with /X:// where X is the drive letter.\n\nArgs:\npath: A canonical path specification.\n\nReturns:\nA windows specific path.", "source": "juraj-google-style"}
{"code": "def distance_from_point(self, pt):\n        \n        return np.linalg.norm(np.array(pt) - self.coords)", "docstring": "Returns distance between the site and a point in space.\n\nArgs:\npt: Cartesian coordinates of point.\n\nReturns:\nDistance (float)", "source": "juraj-google-style"}
{"code": "def initialize_remaining_constants(self, value=0):\n    remaining = []\n    for (node, _inputs, _outputs) in self.iterate_bfs():\n        streams = (node.input_streams() + [node.stream])\n        for stream in streams:\n            if (stream.stream_type is not DataStream.ConstantType):\n                continue\n            if (stream not in self.constant_database):\n                self.add_constant(stream, value)\n                remaining.append(stream)\n    return remaining", "docstring": "Ensure that all constant streams referenced in the sensor graph have a value.\n\nConstant streams that are automatically created by the compiler are initialized\nas part of the compilation process but it's possible that the user references\nother constant streams but never assigns them an explicit initial value.  This\nfunction will initialize them all to a default value (0 if not passed) and\nreturn the streams that were so initialized.\n\nArgs:\nvalue (int): Optional value to use to initialize all uninitialized constants.\nDefaults to 0 if not passed.\n\nReturns:\nlist(DataStream): A list of all of the constant streams that were not previously\ninitialized and were initialized to the given value in this function.", "source": "codesearchnet"}
{"code": "def Execute(self, http):\n        \n\n        self._Execute(http)\n\n        for key in self.__request_response_handlers:\n            response = self.__request_response_handlers[key].response\n            callback = self.__request_response_handlers[key].handler\n\n            exception = None\n\n            if response.status_code >= 300:\n                exception = exceptions.HttpError.FromResponse(response)\n\n            if callback is not None:\n                callback(response, exception)\n            if self.__callback is not None:\n                self.__callback(response, exception)", "docstring": "Execute all the requests as a single batched HTTP request.\n\nArgs:\nhttp: A httplib2.Http object to be used with the request.\n\nReturns:\nNone\n\nRaises:\nBatchError if the response is the wrong format.", "source": "juraj-google-style"}
{"code": "def __init__(self, output_mediator):\n    \n    super(SharedElasticsearchOutputModule, self).__init__(output_mediator)\n    self._client = None\n    self._document_type = self._DEFAULT_DOCUMENT_TYPE\n    self._event_documents = []\n    self._flush_interval = self._DEFAULT_FLUSH_INTERVAL\n    self._host = None\n    self._index_name = None\n    self._number_of_buffered_events = 0\n    self._password = None\n    self._port = None\n    self._username = None\n    self._use_ssl = None\n    self._ca_certs = None\n    self._url_prefix = None", "docstring": "Initializes an Elasticsearch output module.\n\nArgs:\noutput_mediator (OutputMediator): mediates interactions between output\nmodules and other components, such as storage and dfvfs.", "source": "juraj-google-style"}
{"code": "def merge_files(context):\n  \n  resolver = EFTemplateResolver(\n      profile=context.profile,\n      region=context.region,\n      env=context.env,\n      service=context.service\n  )\n\n  try:\n    with open(context.template_path, 'r') as f:\n      template_body = f.read()\n      f.close()\n  except IOError as error:\n    raise IOError(\"Error loading template file: {} {}\".format(context.template_path, repr(error)))\n\n  if context.no_params is False:\n    try:\n      with open(context.param_path, 'r') as f:\n        param_body = f.read()\n        f.close()\n    except IOError as error:\n      raise IOError(\"Error loading param file: {} {}\".format(context.param_path, repr(error)))\n\n    dest = yaml.safe_load(param_body)[\"dest\"]\n\n    \n    if \"environments\" in dest:\n      if not resolver.resolved[\"ENV_SHORT\"] in dest[\"environments\"]:\n        print(\"Environment: {} not enabled for {}\".format(resolver.resolved[\"ENV_SHORT\"], context.template_path))\n        return\n\n    \n    resolver.load(template_body, param_body)\n  else:\n    resolver.load(template_body)\n  rendered_body = resolver.render()\n\n  if not resolver.resolved_ok():\n    raise RuntimeError(\"Couldn't resolve all symbols; template has leftover {{ or }}: {}\".format(resolver.unresolved_symbols()))\n\n  if context.lint:\n    if context.template_path.endswith(\".json\"):\n      try:\n        json.loads(rendered_body, strict=False)\n        print(\"JSON passed linting process.\")\n      except ValueError as e:\n        fail(\"JSON failed linting process.\", e)\n    elif context.template_path.endswith((\".yml\", \".yaml\")):\n      conf = yamllint_config.YamlLintConfig(content='extends: relaxed')\n      lint_output = yamllinter.run(rendered_body, conf)\n      lint_level = 'error'\n      lint_errors = [issue for issue in lint_output if issue.level == lint_level]\n      if lint_errors:\n        split_body = rendered_body.splitlines()\n        for error in lint_errors:\n          print(error)\n          \n          print(\"\\t\", split_body[error.line - 1])\n        fail(\"YAML failed linting process.\")\n\n  if context.verbose:\n    print(context)\n    if context.no_params:\n      print('no_params flag set to true!')\n      print('Inline template resolution based on external symbol lookup only and no destination for file write.\\n')\n    else:\n      dir_path = normpath(dirname(dest[\"path\"]))\n      print(\"make directories: {} {}\".format(dir_path, dest[\"dir_perm\"]))\n      print(\"chmod file to: \" + dest[\"file_perm\"])\n      user, group = dest[\"user_group\"].split(\":\")\n      print(\"chown last directory in path to user: {}, group: {}\".format(user, group))\n      print(\"chown file to user: {}, group: {}\\n\".format(user, group))\n\n    print(\"template body:\\n{}\\nrendered body:\\n{}\\n\".format(template_body, rendered_body))\n  elif context.silent:\n    print(\"Config template rendered successfully.\")\n  else:\n    print(rendered_body)", "docstring": "Given a context containing path to template, env, and service:\nmerge config into template and output the result to stdout\nArgs:\ncontext: a populated context object", "source": "juraj-google-style"}
{"code": "def copy_entities(self, from_namespace, from_workspace, etype, enames):\n        \n        r = fapi.copy_entities(from_namespace, from_workspace,\n                               self.namespace, self.name, etype, enames,\n                               self.api_url)\n        fapi._check_response_code(r, 201)", "docstring": "Copy entities from another workspace.\n\nArgs:\nfrom_namespace (str): Source workspace namespace\nfrom_workspace (str): Source workspace name\netype (str): Entity type\nenames (list(str)): List of entity names to copy", "source": "juraj-google-style"}
{"code": "def CacheObject(self, identifier, vfs_object):\n    if (identifier in self._values):\n        raise KeyError('Object already cached for identifier: {0:s}'.format(identifier))\n    if (len(self._values) == self._maximum_number_of_cached_values):\n        raise errors.CacheFullError('Maximum number of cached values reached.')\n    self._values[identifier] = ObjectsCacheValue(vfs_object)", "docstring": "Caches a VFS object.\n\nThis method ignores the cache value reference count.\n\nArgs:\nidentifier (str): VFS object identifier.\nvfs_object (object): VFS object to cache.\n\nRaises:\nCacheFullError: if he maximum number of cached values is reached.\nKeyError: if the VFS object already is cached.", "source": "codesearchnet"}
{"code": "def AddItem(self, key, item, f=(lambda x: x)):\n    with self._mutex:\n        bucket = self._buckets[key]\n    bucket.AddItem(item, f)", "docstring": "Add a new item to the Reservoir with the given tag.\n\nIf the reservoir has not yet reached full size, the new item is guaranteed\nto be added. If the reservoir is full, then behavior depends on the\nalways_keep_last boolean.\n\nIf always_keep_last was set to true, the new item is guaranteed to be added\nto the reservoir, and either the previous last item will be replaced, or\n(with low probability) an older item will be replaced.\n\nIf always_keep_last was set to false, then the new item will replace an\nold item with low probability.\n\nIf f is provided, it will be applied to transform item (lazily, iff item is\ngoing to be included in the reservoir).\n\nArgs:\nkey: The key to store the item under.\nitem: The item to add to the reservoir.\nf: An optional function to transform the item prior to addition.", "source": "codesearchnet"}
{"code": "def report_validation_error(self, element_path: str, msg: str) -> None:", "docstring": "Reports the given error during FHIR validation.\n\nThis indicates that the resource does not fully comply with the FHIR\nspecification or profile.\n\nArgs:\nelement_path: The path to the field where the issue occurred.\nmsg: The error message produced.", "source": "github-repos"}
{"code": "def _get_vep_transcript(self, transcript_info):\n    transcript = Transcript(hgnc_symbol=transcript_info.get('SYMBOL'), transcript_id=transcript_info.get('Feature'), ensembl_id=transcript_info.get('Gene'), biotype=transcript_info.get('BIOTYPE'), consequence=transcript_info.get('Consequence'), strand=transcript_info.get('STRAND'), sift=transcript_info.get('SIFT'), polyphen=transcript_info.get('PolyPhen'), exon=transcript_info.get('EXON'), HGVSc=transcript_info.get('HGVSc'), HGVSp=transcript_info.get('HGVSp'), GMAF=transcript_info.get('GMAF'), ExAC_MAF=transcript_info.get('ExAC_MAF'))\n    return transcript", "docstring": "Create a Transcript based on the vep annotation\n\nArgs:\ntranscript_info (dict): A dict with vep info\n\nReturns:\ntranscript (puzzle.models.Transcript): A Transcripts", "source": "codesearchnet"}
{"code": "def get_for_type(input_type='text'):\n    if (input_type in RandomInputHelper.cache):\n        return RandomInputHelper.cache[input_type]\n    types = {'text': RandomInputHelper.get_random_value, 'hidden': RandomInputHelper.get_random_value, 'search': RandomInputHelper.get_random_value, 'color': RandomInputHelper.get_random_color, 'week': {'function': RandomInputHelper.get_random_value, 'params': [2, ['1234']]}, 'password': RandomInputHelper.get_random_password, 'number': RandomInputHelper.get_random_number, 'tel': RandomInputHelper.get_random_telephonenumber, 'url': RandomInputHelper.get_random_url, 'textarea': RandomInputHelper.get_random_text, 'email': RandomInputHelper.get_random_email}\n    if (types.get(input_type) is None):\n        return ''\n    if (type(types.get(input_type)) is dict):\n        generator = types.get(input_type)\n        value = generator.get('function')(*generator.get('params'))\n    else:\n        value = types.get(input_type)()\n    RandomInputHelper.cache[input_type] = value\n    return value", "docstring": "Get a random string for the given html input type\n\nArgs:\ninput_type (str): The input type (e.g. email).\n\nReturns:\nstr: The (cached) random value.", "source": "codesearchnet"}
{"code": "def __init__(self, value, data_type, masks=None, name='Secret Data'):\n        \n        super(SecretData, self).__init__()\n\n        self._object_type = enums.ObjectType.SECRET_DATA\n\n        self.value = value\n        self.data_type = data_type\n        self.names = [name]\n\n        if masks:\n            self.cryptographic_usage_masks = masks\n\n        \n        \n\n        \n        \n\n        self.validate()", "docstring": "Create a SecretData object.\n\nArgs:\nvalue(bytes): The bytes representing secret data.\ndata_type(SecretDataType): An enumeration defining the type of the\nsecret value.\nmasks(list): A list of CryptographicUsageMask enumerations\ndefining how the key will be used.\nname(string): The string name of the key.", "source": "juraj-google-style"}
{"code": "def GetAPFSFileEntryByPathSpec(self, path_spec):\n    \n    \n    location = getattr(path_spec, 'location', None)\n    identifier = getattr(path_spec, 'identifier', None)\n\n    if identifier is not None:\n      fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_identifier(\n          identifier)\n    elif location is not None:\n      fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_path(location)\n    else:\n      raise errors.PathSpecError(\n          'Path specification missing location and identifier.')\n\n    return fsapfs_file_entry", "docstring": "Retrieves the APFS file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\npyfsapfs.file_entry: file entry.\n\nRaises:\nPathSpecError: if the path specification is missing location and\nidentifier.", "source": "juraj-google-style"}
{"code": "def _GetDisplayPath(self, path_spec, full_path, data_stream_name):\n    display_path = ''\n    if path_spec.HasParent():\n        parent_path_spec = path_spec.parent\n        if (parent_path_spec and (parent_path_spec.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION)):\n            display_path = ''.join([display_path, parent_path_spec.location])\n    display_path = ''.join([display_path, full_path])\n    if data_stream_name:\n        display_path = ':'.join([display_path, data_stream_name])\n    return display_path", "docstring": "Retrieves a path to display.\n\nArgs:\npath_spec (dfvfs.PathSpec): path specification of the file entry.\nfull_path (str): full path of the file entry.\ndata_stream_name (str): name of the data stream.\n\nReturns:\nstr: path to display.", "source": "codesearchnet"}
{"code": "class TimesFmOutput(BaseModelOutput):\n    loc: Optional[torch.Tensor] = None\n    scale: Optional[torch.Tensor] = None", "docstring": "Args:\nloc (`torch.Tensor` of shape `(batch_size, )`):\nThe mean of the time series inputs.\nscale (`torch.Tensor` of shape `(batch_size,)`):\nThe scale of the time series inputs.", "source": "github-repos"}
{"code": "def get_card(self, id, name=None):\n    return self.create_card(dict(id=id, name=name))", "docstring": "Get a card\n\nReturns:\nCard: The card with the given `id`", "source": "codesearchnet"}
{"code": "def process_actions(self, actions):\n        \n        notices = {}\n        notification_contacts = {}\n        for action in actions:\n            resource = action['resource']\n            action_status = ActionStatus.SUCCEED\n\n            try:\n                if action['action'] == AuditActions.REMOVE:\n                    action_status = self.process_action(\n                        resource,\n                        AuditActions.REMOVE\n                    )\n                    if action_status == ActionStatus.SUCCEED:\n                        db.session.delete(action['issue'].issue)\n\n                elif action['action'] == AuditActions.STOP:\n                    action_status = self.process_action(\n                        resource,\n                        AuditActions.STOP\n                    )\n                    if action_status == ActionStatus.SUCCEED:\n                        action['issue'].update({\n                            'missing_tags': action['missing_tags'],\n                            'notes': action['notes'],\n                            'last_alert': action['last_alert'],\n                            'state': action['action']\n                        })\n\n                elif action['action'] == AuditActions.FIXED:\n                    db.session.delete(action['issue'].issue)\n\n                elif action['action'] == AuditActions.ALERT:\n                    action['issue'].update({\n                        'missing_tags': action['missing_tags'],\n                        'notes': action['notes'],\n                        'last_alert': action['last_alert'],\n                        'state': action['action']\n                    })\n\n                db.session.commit()\n\n                if action_status == ActionStatus.SUCCEED:\n                    for owner in [\n                        dict(t) for t in {tuple(d.items()) for d in (action['owners'] + self.permanent_emails)}\n                    ]:\n                        if owner['value'] not in notification_contacts:\n                            contact = NotificationContact(type=owner['type'], value=owner['value'])\n                            notification_contacts[owner['value']] = contact\n                            notices[contact] = {\n                                'fixed': [],\n                                'not_fixed': []\n                            }\n                        else:\n                            contact = notification_contacts[owner['value']]\n\n                        if action['action'] == AuditActions.FIXED:\n                            notices[contact]['fixed'].append(action)\n                        else:\n                            notices[contact]['not_fixed'].append(action)\n            except Exception as ex:\n                self.log.exception('Unexpected error while processing resource {}/{}/{}/{}'.format(\n                    action['resource'].account.account_name,\n                    action['resource'].id,\n                    action['resource'],\n                    ex\n                ))\n\n        return notices", "docstring": "Process the actions we want to take\n\nArgs:\nactions (`list`): List of actions we want to take\n\nReturns:\n`list` of notifications", "source": "juraj-google-style"}
{"code": "def write_file(self, filename='HEADER'):\n        \n        with open(filename, \"w\") as f:\n            f.write(str(self) + \"\\n\")", "docstring": "Writes Header into filename on disk.\n\nArgs:\nfilename: Filename and path for file to be written to disk", "source": "juraj-google-style"}
{"code": "def addColumn(self, columnName, dtype, defaultValue):\n    model = self.tableView.model()\n    if (model is not None):\n        model.addDataFrameColumn(columnName, dtype, defaultValue)\n    self.addColumnButton.setChecked(False)", "docstring": "Adds a column with the given parameters to the underlying model\n\nThis method is also a slot.\nIf no model is set, nothing happens.\n\nArgs:\ncolumnName (str): The name of the new column.\ndtype (numpy.dtype): The datatype of the new column.\ndefaultValue (object): Fill the column with this value.", "source": "codesearchnet"}
{"code": "def to_deeper_model(self, target_id, new_layer):\n        \n        self.operation_history.append((\"to_deeper_model\", target_id, new_layer))\n        input_id = self.layer_id_to_input_node_ids[target_id][0]\n        output_id = self.layer_id_to_output_node_ids[target_id][0]\n        if self.weighted:\n            if is_layer(new_layer, \"Dense\"):\n                init_dense_weight(new_layer)\n            elif is_layer(new_layer, \"Conv\"):\n                init_conv_weight(new_layer)\n            elif is_layer(new_layer, \"BatchNormalization\"):\n                init_bn_weight(new_layer)\n\n        self._insert_new_layers([new_layer], input_id, output_id)", "docstring": "Insert a relu-conv-bn block after the target block.\nArgs:\ntarget_id: A convolutional layer ID. The new block should be inserted after the block.\nnew_layer: An instance of StubLayer subclasses.", "source": "juraj-google-style"}
{"code": "def __init__(self, jid, password, verify_security=False):\n        \n        self.jid = aioxmpp.JID.fromstr(jid)\n        self.password = password\n        self.verify_security = verify_security\n\n        self.behaviours = []\n        self._values = {}\n\n        self.conn_coro = None\n        self.stream = None\n        self.client = None\n        self.message_dispatcher = None\n        self.presence = None\n        self.loop = None\n\n        self.container = Container()\n        self.container.register(self)\n\n        self.loop = self.container.loop\n\n        \n        self.web = WebApp(agent=self)\n\n        self.traces = TraceStore(size=1000)\n\n        self._alive = Event()", "docstring": "Creates an agent\n\nArgs:\njid (str): The identifier of the agent in the form username@server\npassword (str): The password to connect to the server\nverify_security (bool): Wether to verify or not the SSL certificates", "source": "juraj-google-style"}
{"code": "def shape(self):\n    nrows = self._row_partition.static_nrows\n    ncols = self._row_partition.static_uniform_row_length\n    value_shape = self._values.shape[1:]\n    return tensor_shape.TensorShape([nrows, ncols]).concatenate(value_shape)", "docstring": "The statically known shape of this ragged tensor.\n\nReturns:\nA `TensorShape` containing the statically known shape of this ragged\ntensor.  Ragged dimensions have a size of `None`.\n\nExamples:\n\n>>> tf.ragged.constant([[0], [1, 2]]).shape\nTensorShape([2, None])\n\n>>> tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).shape\nTensorShape([2, None, 2])", "source": "github-repos"}
{"code": "def increase_route_count(self, crawled_request):\n    for route in self.__routing_options.routes:\n        if re.compile(route).match(crawled_request.url):\n            count_key = (str(route) + crawled_request.method)\n            if (count_key in self.__routing_count.keys()):\n                self.__routing_count[count_key] += 1\n            else:\n                self.__routing_count[count_key] = 1\n            break", "docstring": "Increase the count that determines how many times a URL of a certain route has been crawled.\n\nArgs:\ncrawled_request (:class:`nyawc.http.Request`): The request that possibly matches a route.", "source": "codesearchnet"}
{"code": "def __init__(self, operation: Type[Operation], *expressions: Expression) -> None:\n        \n        self.operation = operation\n        self.length = len(expressions)\n\n        self.constant = Multiset()  \n        self.syntactic = Multiset()  \n        self.sequence_variables = Multiset()  \n        self.sequence_variable_infos = dict()\n        self.fixed_variables = Multiset()  \n        self.fixed_variable_infos = dict()\n        self.rest = Multiset()  \n\n        self.sequence_variable_min_length = 0\n        self.fixed_variable_length = 0\n        self.wildcard_min_length = 0\n        self.optional_count = 0\n        self.wildcard_fixed = None\n\n        for expression in expressions:\n            expression = expression\n            if is_constant(expression):\n                self.constant[expression] += 1\n            elif isinstance(expression, Wildcard):\n                wc = cast(Wildcard, expression)\n                if wc.variable_name:\n                    name = wc.variable_name\n                    if wc.fixed_size:\n                        self.fixed_variables[name] += 1\n                        symbol_type = getattr(wc, 'symbol_type', None)\n                        self._update_var_info(self.fixed_variable_infos, name, wc.min_count, symbol_type, wc.optional)\n                        if wc.optional is None:\n                            self.fixed_variable_length += wc.min_count\n                        else:\n                            self.optional_count += 1\n                    else:\n                        self.sequence_variables[name] += 1\n                        self._update_var_info(self.sequence_variable_infos, name, wc.min_count, None, wc.optional)\n                        if wc.optional is None:\n                            self.sequence_variable_min_length += wc.min_count\n                else:\n                    self.wildcard_min_length += wc.min_count\n                    if self.wildcard_fixed is None:\n                        self.wildcard_fixed = wc.fixed_size\n                    else:\n                        self.wildcard_fixed = self.wildcard_fixed and wc.fixed_size\n            elif is_syntactic(expression):\n                self.syntactic[expression] += 1\n            else:\n                self.rest[expression] += 1", "docstring": "Create a CommutativePatternsParts instance.\n\nArgs:\noperation:\nThe type of the commutative operation. Must be a subclass of :class:`.Operation` with\n:attr:`~.Operation.commutative` set to ``True``.\n*expressions:\nThe operands of the commutative operation.", "source": "juraj-google-style"}
{"code": "def _build_encryption_key_information(self, value):\n    if (value is None):\n        return None\n    if (not isinstance(value, dict)):\n        raise TypeError('Encryption key information must be a dictionary.')\n    cryptographic_parameters = value.get('cryptographic_parameters')\n    if cryptographic_parameters:\n        cryptographic_parameters = self._build_cryptographic_parameters(cryptographic_parameters)\n    encryption_key_information = cobjects.EncryptionKeyInformation(unique_identifier=value.get('unique_identifier'), cryptographic_parameters=cryptographic_parameters)\n    return encryption_key_information", "docstring": "Build an EncryptionKeyInformation struct from a dictionary.\n\nArgs:\nvalue (dict): A dictionary containing the key/value pairs for a\nEncryptionKeyInformation struct.\n\nReturns:\nEncryptionKeyInformation: an EncryptionKeyInformation struct\n\nRaises:\nTypeError: if the input argument is invalid", "source": "codesearchnet"}
{"code": "def albedo(self, value=999.0):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `albedo`'.format(value))\n\n        self._albedo = value", "docstring": "Corresponds to IDD Field `albedo`\n\nArgs:\nvalue (float): value for IDD Field `albedo`\nMissing value: 999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def ParseRecord(self, parser_mediator, key, structure):\n    \n    if key != 'logline':\n      logger.warning(\n          'Unable to parse record, unknown structure: {0:s}'.format(key))\n      return\n\n    try:\n      timestamp = int(structure.timestamp)\n    except ValueError:\n      logger.debug('Invalid timestamp string {0:s}, skipping record'.format(\n          structure.timestamp))\n      return\n\n    try:\n      nickname, text = self._StripThenGetNicknameAndText(structure.text)\n    except pyparsing.ParseException:\n      logger.debug('Error parsing entry at offset {0:d}'.format(self._offset))\n      return\n\n    event_data = XChatScrollbackEventData()\n    event_data.nickname = nickname\n    event_data.offset = self._offset\n    event_data.text = text\n\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_ADDED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a log record structure.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nkey (str): name of the parsed structure.\nstructure (pyparsing.ParseResults): structure parsed from the log file.", "source": "juraj-google-style"}
{"code": "def experimental_from_proto(cls, proto: struct_pb2.TypeSpecProto) -> 'TypeSpec':\n    return nested_structure_coder.decode_proto(struct_pb2.StructuredValue(type_spec_value=proto))", "docstring": "Returns a TypeSpec instance based on the serialized proto.\n\nDo NOT override for custom non-TF types.\n\nArgs:\nproto: Proto generated using 'experimental_as_proto'.", "source": "github-repos"}
{"code": "def dump(destination, ms, single=False, pretty_print=False, **kwargs):\n    text = dumps(ms, single=single, pretty_print=pretty_print, **kwargs)\n    if hasattr(destination, 'write'):\n        print(text, file=destination)\n    else:\n        with open(destination, 'w') as fh:\n            print(text, file=fh)", "docstring": "Serialize Xmrs objects to the Prolog representation and write to a file.\n\nArgs:\ndestination: filename or file object where data will be written\nms: an iterator of Xmrs objects to serialize (unless the\n*single* option is `True`)\nsingle: if `True`, treat *ms* as a single Xmrs object\ninstead of as an iterator\npretty_print: if `True`, add newlines and indentation", "source": "codesearchnet"}
{"code": "def DeterminePeakMemoryUsage(self, item):\n    return tf_cluster.TF_DeterminePeakMemoryUsage(item.tf_item, self._tf_cluster)", "docstring": "Returns a snapshot of the peak memory usage.\n\nArgs:\nitem: The item for which to measure the costs.\nReturns: A hashtable indexed by device name.", "source": "github-repos"}
{"code": "def _histogram_move_keys_by_game(sess, ds, batch_size=8*1024):\n    \n    ds = ds.batch(batch_size)\n    \n    ds = ds.map(lambda x: tf.strings.substr(x, 0, 12))\n    iterator = ds.make_initializable_iterator()\n    sess.run(iterator.initializer)\n    get_next = iterator.get_next()\n    h = collections.Counter()\n    try:\n        while True:\n            h.update(sess.run(get_next))\n    except tf.errors.OutOfRangeError:\n        pass\n    \n    return h", "docstring": "Given dataset of key names, return histogram of moves/game.\n\nMove counts are written by the game players, so\nthis is mostly useful for repair or backfill.\n\nArgs:\nsess:  TF session\nds:  TF dataset containing game move keys.\nbatch_size:  performance tuning parameter", "source": "juraj-google-style"}
{"code": "def ValidateAccessAndSubjects(requested_access, subjects):\n    if (not requested_access):\n        raise access_control.UnauthorizedAccess(('Must specify requested access type for %s' % subjects))\n    for s in requested_access:\n        if (s not in 'rwq'):\n            raise ValueError(('Invalid access requested for %s: %s' % (subjects, requested_access)))\n    if (('q' in requested_access) and ('r' not in requested_access)):\n        raise access_control.UnauthorizedAccess(('Invalid access request: query permissions require read permissions for %s' % subjects), requested_access=requested_access)\n    return True", "docstring": "Does basic requested access validation.\n\nArgs:\nrequested_access: String consisting or 'r', 'w' and 'q' characters.\nsubjects: A list of subjects that are about to be accessed with a given\nrequested_access. Used for logging purposes only.\n\nReturns:\nTrue if requested_access is valid.\n\nRaises:\naccess_control.UnauthorizedAccess: if requested_access is not valid.\nValueError: if subjects list is empty.", "source": "codesearchnet"}
{"code": "def mark_flags_as_mutual_exclusive(flag_names, required=False,\n                                   flag_values=FLAGS):\n  \n\n  def validate_mutual_exclusion(flags_dict):\n    flag_count = sum(1 for val in flags_dict.values() if val is not None)\n    if flag_count == 1 or (not required and flag_count == 0):\n      return True\n    message = ('%s one of (%s) must be specified.' %\n               ('Exactly' if required else 'At most', ', '.join(flag_names)))\n    raise ValidationError(message)\n\n  register_multi_flags_validator(\n      flag_names, validate_mutual_exclusion, flag_values=flag_values)", "docstring": "Ensures that only one flag among flag_names is set.\n\nArgs:\nflag_names: [str], a list of the flag names to be checked.\nrequired: Boolean, if set, exactly one of the flags must be set.\nOtherwise, it is also valid for none of the flags to be set.\nflag_values: An optional FlagValues instance to validate against.", "source": "juraj-google-style"}
{"code": "def list_files(root, suffix, prefix=False):\n    \n    root = os.path.expanduser(root)\n    files = list(\n        filter(\n            lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),\n            os.listdir(root)\n        )\n    )\n\n    if prefix is True:\n        files = [os.path.join(root, d) for d in files]\n\n    return files", "docstring": "List all files ending with a suffix at a given root\n\nArgs:\nroot (str): Path to directory whose folders need to be listed\nsuffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').\nIt uses the Python \"str.endswith\" method and is passed directly\nprefix (bool, optional): If true, prepends the path to each result, otherwise\nonly returns the name of the files found", "source": "juraj-google-style"}
{"code": "def validate(self):\n    endpoint = '/validate'\n    payload = dict(accessToken=self.access_token)\n    rep = self._ygg_req(endpoint, payload)\n    return (not bool(rep))", "docstring": "Check if an access token is valid\n\nReturns:\ndict: Empty or error dict", "source": "codesearchnet"}
{"code": "def removedirs(self, target_directory):\n    target_directory = self.filesystem.absnormpath(target_directory)\n    directory = self.filesystem.confirmdir(target_directory)\n    if directory.contents:\n        self.filesystem.raise_os_error(errno.ENOTEMPTY, self.path.basename(target_directory))\n    else:\n        self.rmdir(target_directory)\n    (head, tail) = self.path.split(target_directory)\n    if (not tail):\n        (head, tail) = self.path.split(head)\n    while (head and tail):\n        head_dir = self.filesystem.confirmdir(head)\n        if head_dir.contents:\n            break\n        self.filesystem.rmdir(head, allow_symlink=True)\n        (head, tail) = self.path.split(head)", "docstring": "Remove a leaf fake directory and all empty intermediate ones.\n\nArgs:\ntarget_directory: the directory to be removed.\n\nRaises:\nOSError: if target_directory does not exist or is not a directory.\nOSError: if target_directory is not empty.", "source": "codesearchnet"}
{"code": "def with_subject(self, subject):\n        \n        return self.__class__(\n            self._signer,\n            service_account_email=self._service_account_email,\n            scopes=self._scopes,\n            token_uri=self._token_uri,\n            subject=subject,\n            project_id=self._project_id,\n            additional_claims=self._additional_claims.copy())", "docstring": "Create a copy of these credentials with the specified subject.\n\nArgs:\nsubject (str): The subject claim.\n\nReturns:\ngoogle.auth.service_account.Credentials: A new credentials\ninstance.", "source": "juraj-google-style"}
{"code": "def report_proto_path(self):\n    return self._report_proto_path", "docstring": "Getter for path where tensor_tracer.proto object should be written.\n\nReturns:\nA string path.", "source": "github-repos"}
{"code": "def recipe_google_ads_segmentology(config, auth_read, customer_id, developer_token, login_id, auth_write, recipe_slug):\n    dataset(config, {'description': 'Create a dataset for bigquery tables.', 'hour': [4], 'auth': auth_write, 'dataset': recipe_slug})\n    bigquery(config, {'auth': auth_write, 'function': 'Pearson Significance Test', 'to': {'dataset': recipe_slug}})\n    google_api(config, {'auth': auth_read, 'api': 'googleads', 'version': 'v8', 'function': 'customers.googleAds.search', 'kwargs': {'customerId': customer_id, 'body': {'query': 'SELECT\\n         campaign.name,\\n         ad_group.name,\\n         segments.geo_target_postal_code,\\n         metrics.impressions,\\n         metrics.clicks,\\n         metrics.conversions,\\n         metrics.interactions\\n         FROM user_location_view         '}}, 'headers': {'developer-token': developer_token, 'login-customer-id': login_id}, 'iterate': True, 'results': {'bigquery': {'dataset': recipe_slug, 'table': 'GoogleAds_KPI', 'schema': [{'name': 'userLocationView', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': [{'name': 'resourceName', 'type': 'STRING', 'mode': 'NULLABLE'}]}, {'name': 'segments', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': [{'name': 'geoTargetPostalCode', 'type': 'STRING', 'mode': 'NULLABLE'}]}, {'name': 'metrics', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': [{'name': 'interactions', 'type': 'INTEGER', 'mode': 'NULLABLE'}, {'name': 'impressions', 'type': 'INTEGER', 'mode': 'NULLABLE'}, {'name': 'conversions', 'type': 'INTEGER', 'mode': 'NULLABLE'}, {'name': 'clicks', 'type': 'INTEGER', 'mode': 'NULLABLE'}]}, {'name': 'adGroup', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': [{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'}, {'name': 'resourceName', 'type': 'STRING', 'mode': 'NULLABLE'}]}, {'name': 'campaign', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': [{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'}, {'name': 'resourceName', 'type': 'STRING', 'mode': 'NULLABLE'}]}]}}})\n    bigquery(config, {'auth': auth_write, 'from': {'query': 'SELECT\\n          campaign.name AS Campaign,\\n          adGRoup.name AS Ad_Group,\\n          segments.geoTargetPostalCode AS Postal_Code,\\n          SAFE_DIVIDE(metrics.impressions, SUM(metrics.impressions) OVER()) AS Impression,\\n          SAFE_DIVIDE(metrics.clicks, metrics.impressions) AS Click,\\n          SAFE_DIVIDE(metrics.conversions, metrics.impressions) AS Conversion,\\n          SAFE_DIVIDE(metrics.interactions, metrics.impressions) AS Interaction,\\n          metrics.impressions AS Impressions          FROM\\n          `{dataset}.GoogleAds_KPI`;        ', 'parameters': {'dataset': recipe_slug}, 'legacy': False}, 'to': {'dataset': recipe_slug, 'view': 'GoogleAds_KPI_Normalized'}})\n    census(config, {'auth': auth_write, 'normalize': {'census_geography': 'zip_codes', 'census_year': '2018', 'census_span': '5yr'}, 'to': {'dataset': recipe_slug, 'type': 'view'}})\n    census(config, {'auth': auth_write, 'correlate': {'join': 'Postal_Code', 'pass': ['Campaign', 'Ad_Group'], 'sum': ['Impressions'], 'correlate': ['Impression', 'Click', 'Conversion', 'Interaction'], 'dataset': recipe_slug, 'table': 'GoogleAds_KPI_Normalized', 'significance': 80}, 'to': {'dataset': recipe_slug, 'type': 'view'}})", "docstring": "GoogleAds funnel analysis using Census data.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\ncustomer_id (string) - Google Ads customer.\ndeveloper_token (string) - Google Ads developer token.\nlogin_id (string) - Google Ads login.\nauth_write (authentication) - Authorization used for writing data.\nrecipe_slug (string) - Name of Google BigQuery dataset to create.", "source": "github-repos"}
{"code": "def stat_v2(path):\n    return _pywrap_file_io.Stat(compat.path_to_str(path))", "docstring": "Returns file statistics for a given path.\n\nArgs:\npath: string, path to a file\n\nReturns:\nFileStatistics struct that contains information about the path\n\nRaises:\nerrors.OpError: If the operation fails.", "source": "github-repos"}
{"code": "def _find_furthest_new_line(read_buffer):\n    new_line_positions = [read_buffer.rfind(n) for n in new_lines_bytes]\n    return max(new_line_positions)", "docstring": "Return -1 if read_buffer does not contain new line otherwise the position of the rightmost newline.\n\nArgs:\nread_buffer (bytestring)\n\nReturns:\nint: The right most position of new line character in read_buffer if found, else -1", "source": "codesearchnet"}
{"code": "def set_installed_version(vcs, version):\n    version_path = _get_version_path(vcs)\n    with open(version_path, 'w') as f:\n        f.write(version)", "docstring": "Set the installed version for this project.\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\nversion (str)", "source": "codesearchnet"}
{"code": "def tags(pode, leaf=False):\n    \n    fulltags = [tag for tag in pode[1]['tags']]\n    if not leaf:\n        return fulltags\n\n    \n    retn = []\n\n    \n    for size, tag in sorted([(len(t), t) for t in fulltags], reverse=True):\n        look = tag + '.'\n        if any([r.startswith(look) for r in retn]):\n            continue\n        retn.append(tag)\n    return retn", "docstring": "Get all the tags for a given node.\n\nArgs:\npode (tuple): A packed node.\nleaf (bool): If True, only return the full tags.\n\nReturns:\nlist: A list of tag strings.", "source": "juraj-google-style"}
{"code": "def _GetEventData(\n      self, parser_mediator, record_index, evt_record, recovered=False):\n    \n    event_data = WinEvtRecordEventData()\n\n    try:\n      event_data.record_number = evt_record.identifier\n    except OverflowError as exception:\n      parser_mediator.ProduceExtractionWarning((\n          'unable to read record identifier from event record: {0:d} '\n          'with error: {1!s}').format(record_index, exception))\n\n    try:\n      event_identifier = evt_record.event_identifier\n    except OverflowError as exception:\n      parser_mediator.ProduceExtractionWarning((\n          'unable to read event identifier from event record: {0:d} '\n          'with error: {1!s}').format(record_index, exception))\n\n      event_identifier = None\n\n    event_data.offset = evt_record.offset\n    event_data.recovered = recovered\n\n    \n    \n    if event_identifier is not None:\n      event_data.event_identifier = event_identifier & 0xffff\n      event_data.facility = (event_identifier >> 16) & 0x0fff\n      event_data.severity = event_identifier >> 30\n      event_data.message_identifier = event_identifier\n\n    event_data.event_type = evt_record.event_type\n    event_data.event_category = evt_record.event_category\n    event_data.source_name = evt_record.source_name\n\n    \n    \n    event_data.computer_name = evt_record.computer_name\n    event_data.user_sid = evt_record.user_security_identifier\n\n    event_data.strings = list(evt_record.strings)\n\n    return event_data", "docstring": "Retrieves event data from the Windows EventLog (EVT) record.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrecord_index (int): event record index.\nevt_record (pyevt.record): event record.\nrecovered (Optional[bool]): True if the record was recovered.\n\nReturns:\nWinEvtRecordEventData: event data.", "source": "juraj-google-style"}
{"code": "def get_all_pipelines(app=''):\n    url = '{host}/applications/{app}/pipelineConfigs'.format(host=API_URL, app=app)\n    response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n    assert response.ok, 'Could not retrieve Pipelines for {0}.'.format(app)\n    pipelines = response.json()\n    LOG.debug('Pipelines:\\n%s', pipelines)\n    return pipelines", "docstring": "Get a list of all the Pipelines in _app_.\n\nArgs:\napp (str): Name of Spinnaker Application.\n\nReturns:\nrequests.models.Response: Response from Gate containing Pipelines.", "source": "codesearchnet"}
{"code": "def coalescence_times(self, backward=True):\n    if (not isinstance(backward, bool)):\n        raise TypeError('backward must be a bool')\n    for dist in sorted((d for (n, d) in self.distances_from_root() if (len(n.children) > 1)), reverse=backward):\n        (yield dist)", "docstring": "Generator over the times of successive coalescence events\n\nArgs:\n``backward`` (``bool``): ``True`` to go backward in time (i.e., leaves to root), otherwise ``False``", "source": "codesearchnet"}
{"code": "def _GetGradSource(op_or_tensor):\n    name_tokens = op_or_tensor.name.split('/')\n    grad_pos = [i for i, x in enumerate(name_tokens) if x.startswith('gradients')]\n    if not grad_pos:\n        raise ValueError(f\"Expected op/tensor name to start with gradients (excluding scope), got: {op_or_tensor.name}. This means that a tf.gradients op with this op in its dependency path has a custom name that does not start with 'gradients'. Please make sure all calls to tf.gradients that have non-empty `name` arguments use names that start with 'gradients'.\")\n    return '/'.join(name_tokens[:grad_pos[-1] + 1])", "docstring": "Identify which call to tf.gradients created this gradient op or tensor.\n\nTensorArray gradient calls use an accumulator TensorArray object.  If\nmultiple gradients are calculated and run in the same session, the multiple\ngradient nodes may accidentally flow through the same accumulator TensorArray.\nThis double counting breaks the TensorArray gradient flow.\n\nThe solution is to identify which gradient call this particular\nTensorArray*Grad is being called in, by looking at the input gradient\ntensor's name, and create or lookup an accumulator gradient TensorArray\nassociated with this specific call.  This solves any confusion and ensures\ndifferent gradients from the same forward graph get their own accumulators.\n\nThis function creates the unique label associated with the tf.gradients call\nthat is used to create the gradient TensorArray.\n\nArgs:\nop_or_tensor: `Tensor` or `Operation` which is an input to a\nTensorArray*Grad call.\n\nReturns:\nA python string, the unique label associated with this particular\ngradients calculation.\n\nRaises:\nValueError: If not called within a gradients calculation.", "source": "github-repos"}
{"code": "def predict(self, a, b, sig=[-1, -1], maxpnt=500):\n        \n        a = (a - np.mean(a)) / np.std(a)\n        b = (b - np.mean(b)) / np.std(b)\n\n        return FastHsicTestGamma(a, b, sig, maxpnt)", "docstring": "Compute the test statistic\n\nArgs:\na (array-like): Variable 1\nb (array-like): Variable 2\nsig (list): [0] (resp [1]) is kernel size for a(resp b) (set to median distance if -1)\nmaxpnt (int): maximum number of points used, for computational time\n\nReturns:\nfloat: test statistic", "source": "juraj-google-style"}
{"code": "def default_num_choices(self) -> int:\n    return OnnxConfig.default_fixed_num_choices", "docstring": "The default number of choices to use if no other indication\n\nReturns:\nInteger > 0", "source": "github-repos"}
{"code": "def to_json_str(self):\n    adict = dict(vars(self), sort_keys=True)\n    adict['type'] = self.__class__.__name__\n    return json.dumps(adict)", "docstring": "Convert data to json string representation.\n\nReturns:\njson representation as string.", "source": "codesearchnet"}
{"code": "def get_organisation(self, **query_params):\n    organisation_json = self.get_organisations_json(self.base_uri, query_params=query_params)\n    return self.create_organisation(organisation_json)", "docstring": "Get the Organisation for this board. Returns Organisation object.\n\nReturns:\nlist(Organisation): The organisation attached to this board", "source": "codesearchnet"}
{"code": "def get_airport_details(self, iata, page=1, limit=100):\n    url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)\n    details = self._fr24.get_airport_details(url)\n    weather = self._fr24.get_airport_weather(url)\n    details['position']['elevation'] = weather['elevation']\n    return details", "docstring": "Retrieve the details of an airport\n\nGiven the IATA code of an airport, this method returns the detailed information like lat lon, full name, URL, codes etc.\n\nArgs:\niata (str): The IATA code for an airport, e.g. HYD\npage (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data\nlimit (int): Optional limit on number of records returned\n\nReturns:\nA list of dicts with the data; one dict for each row of data from flightradar24\n\nExample::\n\nfrom pyflightdata import FlightData\nf=FlightData()\n#optional login\nf.login(myemail,mypassword)\nf.get_airport_details('HYD')\nf.get_airport_details('HYD',page=1,limit=10)", "source": "codesearchnet"}
{"code": "def create(self, name, passphrase=None, wallet_data=None):\n    if (not self.application):\n        raise RoundError('User accounts are limited to one wallet. Make an account or shoot us an email <dev@gem.co> if you have a compelling use case for more.')\n    if ((not passphrase) and (not wallet_data)):\n        raise ValueError('Usage: wallets.create(name, passphrase [, wallet_data])')\n    elif passphrase:\n        wallet_data = generate(passphrase, trees=(['primary', 'backup'] if self.application else ['primary']))\n    wallet = dict(primary_private_seed=wallet_data['primary']['encrypted_seed'], primary_public_seed=wallet_data['primary']['public_seed'], name=name)\n    if self.application:\n        wallet['backup_public_seed'] = wallet_data['backup']['public_seed']\n    resource = self.resource.create(wallet)\n    wallet = self.wrap(resource)\n    return ((wallet_data['backup']['private_seed'], self.add(wallet)) if self.application else self.add(wallet))", "docstring": "Create a new Wallet object and add it to this Wallets collection.\nThis is only available in this library for Application wallets. Users\nmust add additional wallets in their User Console\n\nArgs:\nname (str): wallet name\npassphrase (str, optional): A passphrase with which to encrypt a user\nwallet. If not supplied, wallet_data is mandatory.\nwallet_data (dict): Output from wallets.generate.\nFor User Wallets, only the primary tree is used.\nFor Application Wallets, the primary and backup trees are used.\n\nReturns:\nA tuple of the (backup_private_seed, round.Wallet).", "source": "codesearchnet"}
{"code": "def merge(self, workdir, pot_files, out_dvdb, delete_source=True):\n    pot_files = [os.path.abspath(s) for s in list_strings(pot_files)]\n    if (not os.path.isabs(out_dvdb)):\n        out_dvdb = os.path.join(os.path.abspath(workdir), os.path.basename(out_dvdb))\n    if self.verbose:\n        print(('Will merge %d files into output DVDB %s' % (len(pot_files), out_dvdb)))\n        for (i, f) in enumerate(pot_files):\n            print((' [%d] %s' % (i, f)))\n    if (len(pot_files) == 1):\n        with open(pot_files[0], 'r') as inh, open(out_dvdb, 'w') as out:\n            for line in inh:\n                out.write(line)\n        return out_dvdb\n    (self.stdin_fname, self.stdout_fname, self.stderr_fname) = map(os.path.join, (3 * [os.path.abspath(workdir)]), ['mrgdvdb.stdin', 'mrgdvdb.stdout', 'mrgdvdb.stderr'])\n    inp = StringIO()\n    inp.write((out_dvdb + '\\n'))\n    inp.write((str(len(pot_files)) + '\\n'))\n    for fname in pot_files:\n        inp.write((fname + '\\n'))\n    self.stdin_data = [s for s in inp.getvalue()]\n    with open(self.stdin_fname, 'wt') as fh:\n        fh.writelines(self.stdin_data)\n        fh.flush()\n        os.fsync(fh.fileno())\n    retcode = self.execute(workdir)\n    if ((retcode == 0) and delete_source):\n        for f in pot_files:\n            try:\n                os.remove(f)\n            except IOError:\n                pass\n    return out_dvdb", "docstring": "Merge POT files containing 1st order DFPT potential\nreturn the absolute path of the new database in workdir.\n\nArgs:\ndelete_source: True if POT1 files should be removed after (successful) merge.", "source": "codesearchnet"}
{"code": "def kill_raylet_monitor(self, check_alive=True):\n    self._kill_process_type(ray_constants.PROCESS_TYPE_RAYLET_MONITOR, check_alive=check_alive)", "docstring": "Kill the raylet monitor.\n\nArgs:\ncheck_alive (bool): Raise an exception if the process was already\ndead.", "source": "codesearchnet"}
{"code": "def period_end_day(self, value=None):\n    if (value is not None):\n        try:\n            value = str(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type str for field `period_end_day`'.format(value))\n        if (',' in value):\n            raise ValueError('value should not contain a comma for field `period_end_day`')\n    self._period_end_day = value", "docstring": "Corresponds to IDD Field `period_end_day`\n\nArgs:\nvalue (str): value for IDD Field `period_end_day`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def label(self, name):\n    if isinstance(name, str):\n        self._label = name\n    else:\n        raise TypeError('label expects a string')", "docstring": "Set snapshot label to name\n\nArgs:\nname (str or None): label to assign unitary\n\nRaises:\nTypeError: name is not string or None.", "source": "codesearchnet"}
{"code": "def get_unique_variable(name):\n  \n  candidates = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, name)\n  if not candidates:\n    raise ValueError('Couldnt find variable %s' % name)\n\n  for candidate in candidates:\n    if candidate.op.name == name:\n      return candidate\n  raise ValueError('Variable %s does not uniquely identify a variable', name)", "docstring": "Gets the variable uniquely identified by that name.\n\nArgs:\nname: a name that uniquely identifies the variable.\n\nReturns:\na tensorflow variable.\n\nRaises:\nValueError: if no variable uniquely identified by the name exists.", "source": "juraj-google-style"}
{"code": "def save(self, output_saved_model_dir):\n    assert self._converted\n    if self._need_calibration:\n        assert self._calibration_data_collected\n    if self._input_graph_def:\n        raise ValueError('Not able to save to a SavedModel since input is a GraphDef')\n\n    def _restore_collections(dest_graph, src_meta_graph_def, collection_keys):\n        \n        scope = ''\n        for key in collection_keys:\n            collection_def = src_meta_graph_def.collection_def[key]\n            kind = collection_def.WhichOneof('kind')\n            if kind is None:\n                logging.error('Cannot identify data type for collection %s. Skipping.', key)\n                continue\n            from_proto = ops.get_from_proto_function(key)\n            if from_proto and kind == 'bytes_list':\n                proto_type = ops.get_collection_proto_type(key)\n                for value in collection_def.bytes_list.value:\n                    proto = proto_type()\n                    proto.ParseFromString(value)\n                    try:\n                        new_value = from_proto(proto, import_scope=scope)\n                    except:\n                        continue\n                    dest_graph.add_to_collection(key, new_value)\n            else:\n                field = getattr(collection_def, kind)\n                if kind == 'node_list':\n                    for value in field.value:\n                        name = ops.prepend_name_scope(value, scope)\n                        try:\n                            col_op = dest_graph.as_graph_element(name)\n                        except (TypeError, ValueError, KeyError):\n                            continue\n                        dest_graph.add_to_collection(key, col_op)\n                elif kind == 'int64_list':\n                    for value in field.value:\n                        dest_graph.add_to_collection(key, int(value))\n                else:\n                    for value in field.value:\n                        dest_graph.add_to_collection(key, ops.prepend_name_scope(value, scope))\n    saved_model_builder = builder.SavedModelBuilder(output_saved_model_dir)\n    with ops.Graph().as_default():\n        importer.import_graph_def(self._converted_graph_def, name='')\n        _restore_collections(ops.get_default_graph(), self._grappler_meta_graph_def, self._collections_to_keep(self._grappler_meta_graph_def.collection_def))\n        with session.Session() as sess:\n            saved_model_builder.add_meta_graph_and_variables(sess, self._input_saved_model_tags, signature_def_map=self._grappler_meta_graph_def.signature_def)\n    saved_model_builder.save()", "docstring": "Save the converted graph as a SavedModel.\n\nArgs:\noutput_saved_model_dir: construct a SavedModel using the converted\nGraphDef and save it to the specified directory. This option only works\nwhen the input graph is loaded from a SavedModel, i.e. when\ninput_saved_model_dir is specified and input_graph_def is None in\n__init__().\n\nRaises:\nValueError: if the input to the converter is a GraphDef instead of a\nSavedModel.", "source": "github-repos"}
{"code": "def track(self, event_key, user_id, attributes=None, event_tags=None):\n    if (not self.is_valid):\n        self.logger.error(enums.Errors.INVALID_DATAFILE.format('track'))\n        return\n    if (not validator.is_non_empty_string(event_key)):\n        self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('event_key'))\n        return\n    if (not isinstance(user_id, string_types)):\n        self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))\n        return\n    if (not self._validate_user_inputs(attributes, event_tags)):\n        return\n    event = self.config.get_event(event_key)\n    if (not event):\n        self.logger.info(('Not tracking user \"%s\" for event \"%s\".' % (user_id, event_key)))\n        return\n    conversion_event = self.event_builder.create_conversion_event(event_key, user_id, attributes, event_tags)\n    self.logger.info(('Tracking event \"%s\" for user \"%s\".' % (event_key, user_id)))\n    self.logger.debug(('Dispatching conversion event to URL %s with params %s.' % (conversion_event.url, conversion_event.params)))\n    try:\n        self.event_dispatcher.dispatch_event(conversion_event)\n    except:\n        self.logger.exception('Unable to dispatch conversion event!')\n    self.notification_center.send_notifications(enums.NotificationTypes.TRACK, event_key, user_id, attributes, event_tags, conversion_event)", "docstring": "Send conversion event to Optimizely.\n\nArgs:\nevent_key: Event key representing the event which needs to be recorded.\nuser_id: ID for user.\nattributes: Dict representing visitor attributes and values which need to be recorded.\nevent_tags: Dict representing metadata associated with the event.", "source": "codesearchnet"}
{"code": "def GetCodeObjectAtLine(module, line):\n    if (not hasattr(module, '__file__')):\n        return (False, (None, None))\n    prev_line = 0\n    next_line = six.MAXSIZE\n    for code_object in _GetModuleCodeObjects(module):\n        for co_line_number in _GetLineNumbers(code_object):\n            if (co_line_number == line):\n                return (True, code_object)\n            elif (co_line_number < line):\n                prev_line = max(prev_line, co_line_number)\n            elif (co_line_number > line):\n                next_line = min(next_line, co_line_number)\n                break\n    prev_line = (None if (prev_line == 0) else prev_line)\n    next_line = (None if (next_line == six.MAXSIZE) else next_line)\n    return (False, (prev_line, next_line))", "docstring": "Searches for a code object at the specified line in the specified module.\n\nArgs:\nmodule: module to explore.\nline: 1-based line number of the statement.\n\nReturns:\n(True, Code object) on success or (False, (prev_line, next_line)) on\nfailure, where prev_line and next_line are the closest lines with code above\nand below the specified line, or None if they do not exist.", "source": "codesearchnet"}
{"code": "def __init__(self, identifier, text_format=False):\n    \n    super(FormatSpecification, self).__init__()\n    self._text_format = text_format\n    self.identifier = identifier\n    self.signatures = []", "docstring": "Initializes a format specification.\n\nArgs:\nidentifier (str): unique name for the format.\ntext_format (Optional[bool]): True if the format is a text format,\nFalse otherwise.", "source": "juraj-google-style"}
{"code": "def _get_column_alias(builder: column_expression_builder.ColumnExpressionBuilder) -> str:\n    if builder.column_name:\n        return builder.column_name\n    else:\n        invoke_node = builder.node\n        while invoke_node and (not hasattr(invoke_node, 'identifier') or not invoke_node.identifier):\n            invoke_node = invoke_node.parent_node\n        if _fhir_path_data_types.returns_collection(invoke_node.return_type):\n            return f'{invoke_node.identifier}_element_'\n        else:\n            return invoke_node.identifier", "docstring": "Determine the column alias based on the builder's state.\n\nArgs:\nbuilder: A ColumnExpressionBuilder object.\n\nReturns:\nA string representing the column alias.", "source": "github-repos"}
{"code": "def CreateBudget(client):\n  \n  budget_service = client.GetService('BudgetService', version='v201809')\n\n  \n  budget = {\n      'name': 'Interplanetary Cruise App Budget \n      'amount': {\n          'microAmount': '50000000'\n      },\n      'deliveryMethod': 'STANDARD',\n      'isExplicitlyShared': False\n  }\n\n  budget_operations = [{\n      'operator': 'ADD',\n      'operand': budget\n  }]\n\n  \n  budget_id = budget_service.mutate(budget_operations)['value'][0]['budgetId']\n\n  return budget_id", "docstring": "Creates a budget and returns its budgetId.\n\nArgs:\nclient: An AdWordsClient instance.\n\nReturns:\nAn int budgetId for the created Budget.", "source": "juraj-google-style"}
{"code": "def setup(__pkg: ModuleType) -> Tuple[Callable[[str], str],\n                                      Callable[[str, str, int], str]]:\n    \n    package_locale = path.join(path.dirname(__pkg.__file__), 'locale')\n    gettext.install(__pkg.__name__, package_locale)\n\n    return gettext.gettext, gettext.ngettext", "docstring": "Configure ``gettext`` for given package.\n\nArgs:\n__pkg: Package to use as location for :program:`gettext` files\nReturns:\n:program:`gettext` functions for singular and plural translations", "source": "juraj-google-style"}
{"code": "def minutes(start, end=None):\n    return iterate.between(start, datetime.timedelta(minutes=1), end)", "docstring": "Iterate over the minutes between the given datetime_tzs.\n\nArgs:\nstart: datetime_tz to start from.\nend: (Optional) Date to end at, if not given the iterator will never\nterminate.\n\nReturns:\nAn iterator which generates datetime_tz objects a minute apart.", "source": "codesearchnet"}
{"code": "def _GetClientLibCallback(args, client_func=_GetClientLib):\n  \n  client_paths = client_func(\n      args.service, args.language, args.output, args.build_system,\n      hostname=args.hostname, application_path=args.application)\n\n  for client_path in client_paths:\n    print 'API client library written to %s' % client_path", "docstring": "Generate discovery docs and client libraries to files.\n\nArgs:\nargs: An argparse.Namespace object to extract parameters from.\nclient_func: A function that generates client libraries and stores them to\nfiles, accepting a list of service names, a client library language,\nan output directory, a build system for the client library language, and\na hostname.", "source": "juraj-google-style"}
{"code": "def _bfd_rx(self, **kwargs):\n        \n        method_name = 'rbridge_id_router_router_bgp_router_bgp_attributes_' \\\n                      'bfd_interval_min_rx'\n        bfd_rx = getattr(self._rbridge, method_name)\n        config = bfd_rx(**kwargs)\n        if kwargs['delete']:\n            tag = 'min-rx'\n            config.find('.\n            pass\n        return config", "docstring": "Return the BFD minimum receive interval XML.\n\nYou should not use this method.\nYou probably want `BGP.bfd`.\n\nArgs:\nmin_rx (str): BFD receive interval in milliseconds (300, 500, etc)\ndelete (bool): Remove the configuration if ``True``.\n\nReturns:\nXML to be passed to the switch.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def GetSystemConfigurationArtifact(self, session_identifier=CURRENT_SESSION):\n    \n    system_configuration = artifacts.SystemConfigurationArtifact()\n\n    system_configuration.code_page = self.GetValue(\n        'codepage', default_value=self._codepage)\n\n    system_configuration.hostname = self._hostnames.get(\n        session_identifier, None)\n\n    system_configuration.keyboard_layout = self.GetValue('keyboard_layout')\n    system_configuration.operating_system = self.GetValue('operating_system')\n    system_configuration.operating_system_product = self.GetValue(\n        'operating_system_product')\n    system_configuration.operating_system_version = self.GetValue(\n        'operating_system_version')\n\n    date_time = datetime.datetime(2017, 1, 1)\n    time_zone = self._time_zone.tzname(date_time)\n\n    if time_zone and isinstance(time_zone, py2to3.BYTES_TYPE):\n      time_zone = time_zone.decode('ascii')\n\n    system_configuration.time_zone = time_zone\n\n    user_accounts = self._user_accounts.get(session_identifier, {})\n    \n    \n    system_configuration.user_accounts = list(user_accounts.values())\n\n    return system_configuration", "docstring": "Retrieves the knowledge base as a system configuration artifact.\n\nArgs:\nsession_identifier (Optional[str])): session identifier, where\nCURRENT_SESSION represents the active session.\n\nReturns:\nSystemConfigurationArtifact: system configuration artifact.", "source": "juraj-google-style"}
{"code": "def build_info(self):\n    if self.is_bootloader:\n        self.log.error('Device is in fastboot mode, could not get build info.')\n        return\n    if self._build_info is None or self._is_rebooting:\n        info = {}\n        build_info = self.adb.getprops(CACHED_SYSTEM_PROPS)\n        for build_info_constant in BuildInfoConstants:\n            info[build_info_constant.build_info_key] = build_info.get(build_info_constant.system_prop_key, '')\n        self._build_info = info\n        return info\n    return self._build_info", "docstring": "Gets the build info of this Android device, including build id and type.\n\nThis is not available if the device is in bootloader mode.\n\nReturns:\nA dict with the build info of this Android device, or None if the\ndevice is in bootloader mode.", "source": "github-repos"}
{"code": "def get_dihedral(self, i: int, j: int, k: int, l: int) -> float:\n    v1 = (self[k].coords - self[l].coords)\n    v2 = (self[j].coords - self[k].coords)\n    v3 = (self[i].coords - self[j].coords)\n    v23 = np.cross(v2, v3)\n    v12 = np.cross(v1, v2)\n    return math.degrees(math.atan2((np.linalg.norm(v2) * np.dot(v1, v23)), np.dot(v12, v23)))", "docstring": "Returns dihedral angle specified by four sites.\n\nArgs:\ni: Index of first site\nj: Index of second site\nk: Index of third site\nl: Index of fourth site\n\nReturns:\nDihedral angle in degrees.", "source": "codesearchnet"}
{"code": "def parse_received(received):\n    \n\n    values_by_clause = {}\n    for pattern in RECEIVED_COMPILED_LIST:\n        matches = [match for match in pattern.finditer(received)]\n\n        if len(matches) == 0:\n            \n            log.debug(\"No matches found for %s in %s\" % (\n                pattern.pattern, received))\n            continue\n        elif len(matches) > 1:\n            \n            \n            msg = \"More than one match found for %s in %s\" % (\n                pattern.pattern, received)\n            log.error(msg)\n            raise MailParserReceivedParsingError(msg)\n        else:\n            \n            log.debug(\"Found one match for %s in %s\" % (\n                pattern.pattern, received))\n            match = matches[0].groupdict()\n            if six.PY2:\n                values_by_clause[match.keys()[0]] = match.values()[0]\n            elif six.PY3:\n                key = list(match.keys())[0]\n                value = list(match.values())[0]\n                values_by_clause[key] = value\n\n    if len(values_by_clause) == 0:\n        \n        msg = \"Unable to match any clauses in %s\" % (received)\n        log.error(msg)\n        raise MailParserReceivedParsingError(msg)\n    return values_by_clause", "docstring": "Parse a single received header.\nReturn a dictionary of values by clause.\n\nArguments:\nreceived {str} -- single received header\n\nRaises:\nMailParserReceivedParsingError -- Raised when a\nreceived header cannot be parsed\n\nReturns:\ndict -- values by clause", "source": "juraj-google-style"}
{"code": "def mknod(self, filename, mode=None, device=None, dir_fd=None):\n    if self.filesystem.is_windows_fs:\n        raise (AttributeError, \"module 'os' has no attribute 'mknode'\")\n    if (mode is None):\n        mode = (S_IFREG | 384)\n    if (device or ((not (mode & S_IFREG)) and (not is_root()))):\n        self.filesystem.raise_os_error(errno.EPERM)\n    filename = self._path_with_dir_fd(filename, self.mknod, dir_fd)\n    (head, tail) = self.path.split(filename)\n    if (not tail):\n        if self.filesystem.exists(head, check_link=True):\n            self.filesystem.raise_os_error(errno.EEXIST, filename)\n        self.filesystem.raise_os_error(errno.ENOENT, filename)\n    if (tail in (b'.', u'.', b'..', u'..')):\n        self.filesystem.raise_os_error(errno.ENOENT, filename)\n    if self.filesystem.exists(filename, check_link=True):\n        self.filesystem.raise_os_error(errno.EEXIST, filename)\n    try:\n        self.filesystem.add_object(head, FakeFile(tail, (mode & (~ self.filesystem.umask)), filesystem=self.filesystem))\n    except IOError as e:\n        self.filesystem.raise_os_error(e.errno, filename)", "docstring": "Create a filesystem node named 'filename'.\n\nDoes not support device special files or named pipes as the real os\nmodule does.\n\nArgs:\nfilename: (str) Name of the file to create\nmode: (int) Permissions to use and type of file to be created.\nDefault permissions are 0o666.  Only the stat.S_IFREG file type\nis supported by the fake implementation.  The umask is applied\nto this mode.\ndevice: not supported in fake implementation\ndir_fd: If not `None`, the file descriptor of a directory,\nwith `filename` being relative to this directory.\nNew in Python 3.3.\n\nRaises:\nOSError: if called with unsupported options or the file can not be\ncreated.", "source": "codesearchnet"}
{"code": "def add_business_days(self, date_tensor, num_days, roll_convention=constants.BusinessDayConvention.NONE):\n    pass", "docstring": "Adds given number of business days to given dates.\n\nNote that this is different from calling `add_period_and_roll` with\nPeriodType.DAY. For example, adding 5 business days to Monday gives the next\nMonday (unless there are holidays on this week or next Monday). Adding 5\ndays and rolling means landing on Saturday and then rolling either to next\nMonday or to Friday of the same week, depending on the roll convention.\n\nIf any of the dates in `date_tensor` are not business days, they will be\nrolled to business days before doing the addition. If `roll_convention` is\n`NONE`, and any dates are not business days, an exception is raised.\n\nArgs:\ndate_tensor: DateTensor of dates to advance from.\nnum_days: Tensor of int32 type broadcastable to `date_tensor`.\nroll_convention: BusinessDayConvention. Determines how to roll a date that\nfalls on a holiday.\n\nReturns:\nThe resulting DateTensor.", "source": "github-repos"}
{"code": "def assert_corofunction(**kw):\n    \n    for name, value in kw.items():\n        if not asyncio.iscoroutinefunction(value):\n            raise TypeError(\n                'paco: {} must be a coroutine function'.format(name))", "docstring": "Asserts if a given values are a coroutine function.\n\nArguments:\n**kw (mixed): value to check if it is an iterable.\n\nRaises:\nTypeError: if assertion fails.", "source": "juraj-google-style"}
{"code": "def gen_pdf(rst_content, style_text, header=None, footer=FOOTER):\n    out_file_obj = StringIO()\n    with NamedTemporaryFile() as f:\n        f.write(style_text)\n        f.flush()\n        pdf = _init_pdf(f.name, header, footer)\n    pdf.createPdf(text=rst_content, output=out_file_obj, compressed=True)\n    out_file_obj.seek(0)\n    return out_file_obj", "docstring": "Create PDF file from `rst_content` using `style_text` as style.\n\nOptinally, add `header` or `footer`.\n\nArgs:\nrst_content (str): Content of the PDF file in restructured text markup.\nstyle_text (str): Style for the :mod:`rst2pdf` module.\nheader (str, default None): Header which will be rendered to each page.\nfooter (str, default FOOTER): Footer, which will be rendered to each\npage. See :attr:`FOOTER` for details.\n\nReturns:\nobj: StringIO file instance containing PDF file.", "source": "codesearchnet"}
{"code": "def _check_root_tag(self, root):\n        \n        supported = self.supported_tags()\n        if root.tag in supported:\n            return\n\n        error = \"Document root element ({0}) not one of ({1})\"\n        raise UnsupportedRootElementError(\n            message=error.format(root.tag, supported),\n            expected=supported,\n            found=root.tag,\n        )", "docstring": "Check that the XML element tree has a supported root element.\n\nArgs:\nroot (etree.Element)\n\nRaises:\nUnsupportedRootElementError", "source": "juraj-google-style"}
{"code": "def get_instances_with_configs(configs):\n    \n    results = []\n    for c in configs:\n        try:\n            serial = c.pop('serial')\n        except KeyError:\n            raise Error(\n                'Required value \"serial\" is missing in AndroidDevice config %s.'\n                % c)\n        is_required = c.get(KEY_DEVICE_REQUIRED, True)\n        try:\n            ad = AndroidDevice(serial)\n            ad.load_config(c)\n        except Exception:\n            if is_required:\n                raise\n            ad.log.exception('Skipping this optional device due to error.')\n            continue\n        results.append(ad)\n    return results", "docstring": "Create AndroidDevice instances from a list of dict configs.\n\nEach config should have the required key-value pair 'serial'.\n\nArgs:\nconfigs: A list of dicts each representing the configuration of one\nandroid device.\n\nReturns:\nA list of AndroidDevice objects.", "source": "juraj-google-style"}
{"code": "def run(self, args):\n    jlink = pylink.JLink()\n    if args.test:\n        if jlink.test():\n            print('Self-test succeeded.')\n        else:\n            print('Self-test failed.')\n    elif ((args.list is None) or (args.list in ['usb', 'ip'])):\n        host = pylink.JLinkHost.USB_OR_IP\n        if (args.list == 'usb'):\n            host = pylink.JLinkHost.USB\n        elif (args.list == 'ip'):\n            host = pylink.JLinkHost.IP\n        emulators = jlink.connected_emulators(host)\n        for (index, emulator) in enumerate(emulators):\n            if (index > 0):\n                print('')\n            print(('Product Name: %s' % emulator.acProduct.decode()))\n            print(('Serial Number: %s' % emulator.SerialNumber))\n            usb = bool(emulator.Connection)\n            if (not usb):\n                print(('Nickname: %s' % emulator.acNickname.decode()))\n                print(('Firmware: %s' % emulator.acFWString.decode()))\n            print(('Connection: %s' % ('USB' if usb else 'IP')))\n            if (not usb):\n                print(('IP Address: %s' % emulator.aIPAddr))\n    elif (args.supported is not None):\n        device = args.supported[0]\n        num_supported_devices = jlink.num_supported_devices()\n        for i in range(num_supported_devices):\n            found_device = jlink.supported_device(i)\n            if (device.lower() == found_device.name.lower()):\n                print(('Device Name: %s' % device))\n                print(('Core ID: %s' % found_device.CoreId))\n                print(('Flash Address: %s' % found_device.FlashAddr))\n                print(('Flash Size: %s bytes' % found_device.FlashSize))\n                print(('RAM Address: %s' % found_device.RAMAddr))\n                print(('RAM Size: %s bytes' % found_device.RAMSize))\n                print(('Manufacturer: %s' % found_device.manufacturer))\n                break\n        else:\n            print(('%s is not supported :(' % device))\n    return None", "docstring": "Runs the emulator command.\n\nArgs:\nself (EmulatorCommand): the ``EmulatorCommand`` instance\nargs (Namespace): arguments to parse\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def expected_h(nvals, fit='RANSAC'):\n    rsvals = [expected_rs(n) for n in nvals]\n    poly = poly_fit(np.log(nvals), np.log(rsvals), 1, fit=fit)\n    return poly[0]", "docstring": "Uses expected_rs to calculate the expected value for the Hurst exponent h\nbased on the values of n used for the calculation.\n\nArgs:\nnvals (iterable of int):\nthe values of n used to calculate the individual (R/S)_n\n\nKWargs:\nfit (str):\nthe fitting method to use for the line fit, either 'poly' for normal\nleast squares polynomial fitting or 'RANSAC' for RANSAC-fitting which\nis more robust to outliers\n\nReturns:\nfloat:\nexpected h for white noise", "source": "codesearchnet"}
{"code": "def mixture_stddev(mixture_weight_vector, mean_vector, stddev_vector):\n    tensorshape_util.assert_has_rank(mixture_weight_vector.shape, 2)\n    if (not tensorshape_util.is_compatible_with(mean_vector.shape, mixture_weight_vector.shape)):\n        raise ValueError('Expecting means to have same shape as mixture weights.')\n    if (not tensorshape_util.is_compatible_with(stddev_vector.shape, mixture_weight_vector.shape)):\n        raise ValueError('Expecting stddevs to have same shape as mixture weights.')\n    pi_for_dot_prod = tf.expand_dims(mixture_weight_vector, axis=1)\n    mu_for_dot_prod = tf.expand_dims(mean_vector, axis=2)\n    sigma_for_dot_prod = tf.expand_dims(stddev_vector, axis=2)\n    mean_wa = tf.matmul(pi_for_dot_prod, mu_for_dot_prod)\n    mean_wa = tf.reshape(mean_wa, ((- 1),))\n    var_wa = tf.matmul(pi_for_dot_prod, tf.square(sigma_for_dot_prod))\n    var_wa = tf.reshape(var_wa, ((- 1),))\n    sq_mean_wa = tf.matmul(pi_for_dot_prod, tf.square(mu_for_dot_prod))\n    sq_mean_wa = tf.reshape(sq_mean_wa, ((- 1),))\n    mixture_variance = ((var_wa + sq_mean_wa) - tf.square(mean_wa))\n    return tf.sqrt(mixture_variance)", "docstring": "Computes the standard deviation of a mixture distribution.\n\nThis function works regardless of the component distribution, so long as\neach component's mean and standard deviation can be provided.\n\nArgs:\nmixture_weight_vector: A 2D tensor with shape [batch_size, num_components]\nmean_vector: A 2D tensor of mixture component means. Has shape `[batch_size,\nnum_components]`.\nstddev_vector: A 2D tensor of mixture component standard deviations. Has\nshape `[batch_size, num_components]`.\n\nReturns:\nA 1D tensor of shape `[batch_size]` representing the standard deviation of\nthe mixture distribution with given weights and component means and standard\ndeviations.\nRaises:\nValueError: If the shapes of the input tensors are not as expected.", "source": "codesearchnet"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n    if token_ids_1 is not None:\n        return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1)\n    return [1] + [0] * len(token_ids_0)", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`): List of IDs.\ntoken_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def _parse_config(self):\n    config = self.get_block('mlag configuration')\n    cfg = dict()\n    cfg.update(self._parse_domain_id(config))\n    cfg.update(self._parse_local_interface(config))\n    cfg.update(self._parse_peer_address(config))\n    cfg.update(self._parse_peer_link(config))\n    cfg.update(self._parse_shutdown(config))\n    return dict(config=cfg)", "docstring": "Parses the mlag global configuration\n\nReturns:\ndict: A dict object that is intended to be merged into the\nresource dict", "source": "codesearchnet"}
{"code": "def _register_notification_callback(self, connection_handle, attribute_handle, callback, once=False):\n    notification_id = (connection_handle, attribute_handle)\n    with self.notification_callbacks_lock:\n        self.notification_callbacks[notification_id] = (callback, once)", "docstring": "Register a callback as a notification callback. It will be called if a notification with the matching\nconnection_handle and attribute_handle is received.\n\nArgs:\nconnection_handle (int): The connection handle to watch\nattribute_handle (int): The attribute handle to watch\ncallback (func): The callback function to call once the notification has been received\nonce (bool): Should the callback only be called once (and then removed from the notification callbacks)", "source": "codesearchnet"}
{"code": "def derivative_extraction(feat, DeltaWindows):\n    \n\n    \n    rows, cols = feat.shape\n\n    \n    DIF = np.zeros(feat.shape, dtype=feat.dtype)\n    Scale = 0\n\n    \n    FEAT = np.lib.pad(feat, ((0, 0), (DeltaWindows, DeltaWindows)), 'edge')\n    for i in range(DeltaWindows):\n        \n        offset = DeltaWindows\n\n        \n        Range = i + 1\n\n        dif = Range * FEAT[:, offset + Range:offset + Range + cols]\n        - FEAT[:, offset - Range:offset - Range + cols]\n        Scale += 2 * np.power(Range, 2)\n        DIF += dif\n\n    return DIF / Scale", "docstring": "This function the derivative features.\n\nArgs:\nfeat (array): The main feature vector(For returning the second\norder derivative it can be first-order derivative).\nDeltaWindows (int): The value of  DeltaWindows is set using\nthe configuration parameter DELTAWINDOW.\n\nReturns:\narray: Derivative feature vector - A NUMFRAMESxNUMFEATURES numpy\narray which is the derivative features along the features.", "source": "juraj-google-style"}
{"code": "def datasets_insert(self, dataset_name, friendly_name=None, description=None):\n    url = (Api._ENDPOINT + (Api._DATASETS_PATH % (dataset_name.project_id, '')))\n    data = {'kind': 'bigquery\n    if friendly_name:\n        data['friendlyName'] = friendly_name\n    if description:\n        data['description'] = description\n    return datalab.utils.Http.request(url, data=data, credentials=self._credentials)", "docstring": "Issues a request to create a dataset.\n\nArgs:\ndataset_name: the name of the dataset to create.\nfriendly_name: (optional) the friendly name for the dataset\ndescription: (optional) a description for the dataset\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "codesearchnet"}
{"code": "def options(self, section):\n        \n        if not self.has_section(section):\n            raise NoSectionError(section) from None\n        return self.__getitem__(section).options()", "docstring": "Returns list of configuration options for the named section.\n\nArgs:\nsection (str): name of section\n\nReturns:\nlist: list of option names", "source": "juraj-google-style"}
{"code": "def call_remoteckan(self, *args, **kwargs):\n        \n        \n        requests_kwargs = kwargs.get('requests_kwargs', dict())\n        credentials = self._get_credentials()\n        if credentials:\n            requests_kwargs['auth'] = credentials\n        kwargs['requests_kwargs'] = requests_kwargs\n        apikey = kwargs.get('apikey', self.get_api_key())\n        kwargs['apikey'] = apikey\n        return self.remoteckan().call_action(*args, **kwargs)", "docstring": "Calls the remote CKAN\n\nArgs:\n*args: Arguments to pass to remote CKAN call_action method\n**kwargs: Keyword arguments to pass to remote CKAN call_action method\n\nReturns:\nDict: The response from the remote CKAN call_action method", "source": "juraj-google-style"}
{"code": "def export_model(module_spec, class_count, saved_model_dir):\n    (sess, in_image, _, _, _, _) = build_eval_session(module_spec, class_count)\n    with sess.graph.as_default() as graph:\n        tf.saved_model.simple_save(sess, saved_model_dir, inputs={'image': in_image}, outputs={'prediction': graph.get_tensor_by_name('final_result:0')}, legacy_init_op=tf.group(tf.tables_initializer(), name='legacy_init_op'))", "docstring": "Exports model for serving.\n\nArgs:\nmodule_spec: The hub.ModuleSpec for the image module being used.\nclass_count: The number of classes.\nsaved_model_dir: Directory in which to save exported model and variables.", "source": "codesearchnet"}
{"code": "def SignBuffer(self, in_buffer):\n    precondition.AssertType(in_buffer, bytes)\n    with tempfile.NamedTemporaryFile() as temp_in:\n        temp_in.write(in_buffer)\n        temp_in.seek(0)\n        outfile = self.SignFile(temp_in.name)\n        with io.open(outfile, 'rb') as filedesc:\n            return filedesc.read()", "docstring": "Sign a buffer via temp files.\n\nOur signing tool can't sign a buffer, so we work around it using temporary\nfiles.\n\nArgs:\nin_buffer: data to sign\n\nReturns:\nsigned data", "source": "codesearchnet"}
{"code": "def _parse_description(self, config):\n        \n        value = None\n        match = re.search(r'description (.+)$', config, re.M)\n        if match:\n            value = match.group(1)\n        return dict(description=value)", "docstring": "Scans the specified config block and returns the description value\n\nArgs:\nconfig (str): The interface config block to scan\n\nReturns:\ndict: Returns a dict object with the description value retrieved\nfrom the config block.  If the description value is not\nconfigured, None is returned as the value.  The returned dict\nis intended to be merged into the interface resource dict.", "source": "juraj-google-style"}
{"code": "def load_checkpoints(self, checkpointDirs):\n    self.memo_lookup_table = None\n    if (not checkpointDirs):\n        return {}\n    if (type(checkpointDirs) is not list):\n        raise BadCheckpoint('checkpointDirs expects a list of checkpoints')\n    return self._load_checkpoints(checkpointDirs)", "docstring": "Load checkpoints from the checkpoint files into a dictionary.\n\nThe results are used to pre-populate the memoizer's lookup_table\n\nKwargs:\n- checkpointDirs (list) : List of run folder to use as checkpoints\nEg. ['runinfo/001', 'runinfo/002']\n\nReturns:\n- dict containing, hashed -> future mappings", "source": "codesearchnet"}
{"code": "def indicators(self, indicator_type=None, filters=None, params=None):\n    indicator = self._tcex.ti.indicator(indicator_type)\n    for i in self.tc_requests.indicators_from_tag(indicator, self.name, filters=filters, params=params):\n        (yield i)", "docstring": "Gets all indicators from a tag.\n\nArgs:\nparams:\nfilters:\nindicator_type:", "source": "codesearchnet"}
{"code": "def sever_sink_ports(self, context, ports, connected_to=None):\n        \n        \n        \n        if connected_to:\n            \n            source_port_lookup = self._source_port_lookup(\n                ports.get(connected_to, []))\n        else:\n            source_port_lookup = True\n\n        \n        sink_ports = self._get_flowgraph_ports(ports, SinkPortInfo)\n\n        \n        if sink_ports and source_port_lookup:\n            child = context.block_view(self.mri)\n            attribute_values = {}\n            for name, port_info in sink_ports.items():\n                if source_port_lookup is True or source_port_lookup.get(\n                        child[name].value, None) == port_info.port:\n                    attribute_values[name] = port_info.disconnected_value\n            child.put_attribute_values(attribute_values)", "docstring": "Conditionally sever Sink Ports of the child. If connected_to\nis then None then sever all, otherwise restrict to connected_to's\nSource Ports\n\nArgs:\ncontext (Context): The context to use\nports (dict): {part_name: [PortInfo]}\nconnected_to (str): Restrict severing to this part", "source": "juraj-google-style"}
{"code": "def get_nn(self, structure, n):\n        \n\n        return [e['site'] for e in self.get_nn_info(structure, n)]", "docstring": "Get near neighbors of site with index n in structure.\n\nArgs:\nstructure (Structure): input structure.\nn (integer): index of site in structure for which to determine\nneighbors.\nReturns:\nsites (list of Site objects): near neighbors.", "source": "juraj-google-style"}
{"code": "def tflite_to_tosa_bytecode(flatbuffer, bytecode, use_external_constant=False, ordered_input_arrays=None, ordered_output_arrays=None):\n    pywrap_mlir.experimental_tflite_to_tosa_bytecode(flatbuffer, bytecode, use_external_constant, ordered_input_arrays, ordered_output_arrays)", "docstring": "Converts TFLite flatbuffer to TOSA dialect in MLIR bytecode.\n\nArgs:\nflatbuffer: Path to flatbuffer.\nbytecode: Path to output bytecode.\nuse_external_constant: Whether to create `tfl.external_const` instead of\n`tfl.const`.\nordered_input_arrays:\nordered_output_arrays: If ordered_output_arrays is not empty, then the\nfunction will only return nodes in ordered_output_arrays in the same order", "source": "github-repos"}
{"code": "def op_priority(op_type):\n    if op_type in ('Const', 'Shape', 'BroadcastGradientArgs', 'Range', 'VariableShape', 'Fill', 'OneHot', 'ShapeN'):\n        return 7\n    if op_type in ('Identity', 'Cast', 'Reshape', 'ExpandDims', 'StopGradient', 'PreventGradient', 'Squeeze', 'Gather', 'GatherNd'):\n        return 6\n    if op_type in ('ConcatV2', 'Concat', 'StridedSlice', 'Slice', 'Pack', 'Tile', 'CollectivePermute', 'SplitV', 'DynamicPartition'):\n        return 5\n    if op_type in ('Pad', 'RandomUniformInt', 'GreaterEqual'):\n        return 4\n    if op_type in ('Sum', 'AddV2', 'Add', 'AddN', 'BiasAdd', 'CrossReplicaSum'):\n        return 3\n    if op_type in ('Neg', 'Sub'):\n        return 2\n    if op_type in ('Mul', 'Square', 'MatMul', 'RandomUniform', 'Select', 'Maximum', 'Mean', 'Variance', 'Exp', 'Rsqrt'):\n        return 1\n    return 2", "docstring": "Returns the priority of the op.\n\nIf the priority of the op is k, it will be traced if trace_level>=k.\nArgs:\nop_type: String name of the operation type.\nReturns:\nInteger value corresponding the priority of the op.", "source": "github-repos"}
{"code": "def verify_exhausted_iterator(self, ds_fn, num_outputs, sparse_tensors=False, assert_items_equal=False):\n    del assert_items_equal\n    self.gen_outputs(ds_fn, [], num_outputs, verify_exhausted=True, sparse_tensors=sparse_tensors)\n    actual = self.gen_outputs(ds_fn, [], 0, ckpt_saved=True, verify_exhausted=True, sparse_tensors=sparse_tensors)\n    self.assertLen(actual, 0)", "docstring": "Verifies that saving and restoring an exhausted iterator works.\n\nAn exhausted iterator is one which has returned an OutOfRange error.\n\nArgs:\nds_fn: 0-argument function that returns a Dataset.\nnum_outputs: Total number of outputs expected from this Dataset.\nsparse_tensors: Whether dataset is built from SparseTensor(s).\nassert_items_equal: Tests the output has the expected elements regardless\nof order.\n\nRaises:\nAssertionError if any test fails.", "source": "github-repos"}
{"code": "def _catch_errors(a_func, to_catch):\n    \n    def inner(*args, **kwargs):\n        \n        try:\n            return a_func(*args, **kwargs)\n        \n        except tuple(to_catch) as exception:\n            utils.raise_with_traceback(\n                gax.errors.create_error('RPC failed', cause=exception))\n\n    return inner", "docstring": "Updates a_func to wrap exceptions with GaxError\n\nArgs:\na_func (callable): A callable.\nto_catch (list[Exception]): Configures the exceptions to wrap.\n\nReturns:\nCallable: A function that will wrap certain exceptions with GaxError", "source": "juraj-google-style"}
{"code": "def multithread_predict_dataflow(dataflows, model_funcs):\n    \n    num_worker = len(model_funcs)\n    assert len(dataflows) == num_worker\n    if num_worker == 1:\n        return predict_dataflow(dataflows[0], model_funcs[0])\n    kwargs = {'thread_name_prefix': 'EvalWorker'} if sys.version_info.minor >= 6 else {}\n    with ThreadPoolExecutor(max_workers=num_worker, **kwargs) as executor, \\\n            tqdm.tqdm(total=sum([df.size() for df in dataflows])) as pbar:\n        futures = []\n        for dataflow, pred in zip(dataflows, model_funcs):\n            futures.append(executor.submit(predict_dataflow, dataflow, pred, pbar))\n        all_results = list(itertools.chain(*[fut.result() for fut in futures]))\n        return all_results", "docstring": "Running multiple `predict_dataflow` in multiple threads, and aggregate the results.\n\nArgs:\ndataflows: a list of DataFlow to be used in :func:`predict_dataflow`\nmodel_funcs: a list of callable to be used in :func:`predict_dataflow`\n\nReturns:\nlist of dict, in the format used by\n`DetectionDataset.eval_or_save_inference_results`", "source": "juraj-google-style"}
{"code": "def get_blob(profile, sha):\n    resource = ('/blobs/' + sha)\n    data = api.get_request(profile, resource)\n    return prepare(data)", "docstring": "Fetch a blob.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nsha\nThe SHA of the blob to fetch.\n\nReturns:\nA dict with data about the blob.", "source": "codesearchnet"}
{"code": "def __init__(self, **kwargs):\n    \n    prefix_chars = kwargs.get('prefix_chars', '-')\n    if prefix_chars != '-':\n      raise ValueError(\n          'argparse_flags.ArgumentParser only supports \"-\" as the prefix '\n          'character, found \"{}\".'.format(prefix_chars))\n\n    \n    self._inherited_absl_flags = kwargs.pop('inherited_absl_flags', flags.FLAGS)\n    \n    \n    super(ArgumentParser, self).__init__(**kwargs)\n\n    if self.add_help:\n      \n      \n      self.add_argument(\n          \n          '--helpshort', action='help',\n          default=argparse.SUPPRESS, help=argparse.SUPPRESS)\n      self.add_argument(\n          '--helpfull', action=_HelpFullAction,\n          default=argparse.SUPPRESS, help='show full help message and exit')\n\n    if self._inherited_absl_flags:\n      self.add_argument('--undefok', help=argparse.SUPPRESS)\n      self._define_absl_flags(self._inherited_absl_flags)", "docstring": "Initializes ArgumentParser.\n\nArgs:\n**kwargs: same as argparse.ArgumentParser, except:\n1. It also accepts `inherited_absl_flags`: the absl flags to inherit.\nThe default is the global absl.flags.FLAGS instance. Pass None to\nignore absl flags.\n2. The `prefix_chars` argument must be the default value '-'.\n\nRaises:\nValueError: Raised when prefix_chars is not '-'.", "source": "juraj-google-style"}
{"code": "def char(self, c: str) -> None:\n        \n        if self.peek() == c:\n            self.offset += 1\n        else:\n            raise UnexpectedInput(self, f\"char '{c}'\")", "docstring": "Parse the specified character.\n\nArgs:\nc: One-character string.\n\nRaises:\nEndOfInput: If past the end of `self.input`.\nUnexpectedInput: If the next character is different from `c`.", "source": "juraj-google-style"}
{"code": "def _transform(transformer_chain: Sequence[Tuple[(DataTransformer, Type)]], data: S, context: PipelineContext=None) -> T:\n    for (transformer, target_type) in transformer_chain:\n        data = transformer.transform(target_type, data, context)\n    return data", "docstring": "Transform data to a new type.\n\nArgs:\ntransformer_chain: A sequence of (transformer, type) pairs to convert the data.\ndata: The data to be transformed.\ncontext: The context of the transformations (mutable).\n\nReturns:\nThe transformed data.", "source": "codesearchnet"}
{"code": "def task_address(self, job_name, task_index):\n    try:\n        job = self._cluster_spec[job_name]\n    except KeyError:\n        raise ValueError('No such job in cluster: %r' % job_name)\n    try:\n        return job[task_index]\n    except KeyError:\n        raise ValueError('No task with index %r in job %r' % (task_index, job_name))", "docstring": "Returns the address of the given task in the given job.\n\nArgs:\njob_name: The string name of a job in this cluster.\ntask_index: A non-negative integer.\n\nReturns:\nThe address of the given task in the given job.\n\nRaises:\nValueError: If `job_name` does not name a job in this cluster,\nor no task with index `task_index` is defined in that job.", "source": "github-repos"}
{"code": "def has_open_file(self, file_object):\n    return (file_object in [wrappers[0].get_object() for wrappers in self.open_files if wrappers])", "docstring": "Return True if the given file object is in the list of open files.\n\nArgs:\nfile_object: The FakeFile object to be checked.\n\nReturns:\n`True` if the file is open.", "source": "codesearchnet"}
{"code": "def _submitQuery(self, gitquery, gitvars={}, verbose=False, rest=False):\n    errOut = (DEVNULL if (not verbose) else None)\n    authhead = ('Authorization: bearer ' + self.__githubApiToken)\n    bashcurl = ('curl -iH TMPauthhead -X POST -d TMPgitquery https:\n    bashcurl_list = bashcurl.split()\n    bashcurl_list[2] = authhead\n    if (not rest):\n        gitqueryJSON = json.dumps({'query': gitquery, 'variables': json.dumps(gitvars)})\n        bashcurl_list[6] = gitqueryJSON\n    fullResponse = check_output(bashcurl_list, stderr=errOut).decode()\n    _vPrint(verbose, ('\\n' + fullResponse))\n    fullResponse = fullResponse.split('\\r\\n\\r\\n')\n    heads = fullResponse[0].split('\\r\\n')\n    if (len(fullResponse) > 1):\n        result = fullResponse[1]\n    else:\n        result = ''\n    http = heads[0].split()\n    statusNum = int(http[1])\n    headDict = {}\n    headDict['http'] = heads[0]\n    for header in heads[1:]:\n        h = header.split(': ')\n        headDict[h[0]] = h[1]\n    linkDict = None\n    if ('Link' in headDict):\n        linkProperties = headDict['Link'].split(', ')\n        propDict = {}\n        for item in linkProperties:\n            divided = re.split('<https:\n            propDict[divided[2]] = divided[1]\n        linkDict = propDict\n    return {'statusNum': statusNum, 'headDict': headDict, 'linkDict': linkDict, 'result': result}", "docstring": "Send a curl request to GitHub.\n\nArgs:\ngitquery (str): The query or endpoint itself.\nExamples:\nquery: 'query { viewer { login } }'\nendpoint: '/user'\ngitvars (Optional[Dict]): All query variables.\nDefaults to empty.\nverbose (Optional[bool]): If False, stderr prints will be\nsuppressed. Defaults to False.\nrest (Optional[bool]): If True, uses the REST API instead\nof GraphQL. Defaults to False.\n\nReturns:\n{\n'statusNum' (int): The HTTP status code.\n'headDict' (Dict[str]): The response headers.\n'linkDict' (Dict[int]): Link based pagination data.\n'result' (str): The body of the response.\n}", "source": "codesearchnet"}
{"code": "def basis(sample_paths):\n    samples = tf.convert_to_tensor(sample_paths)\n    dim = samples.shape.as_list()[-1]\n    grid = tf.range(0, degree + 1, dtype=samples.dtype)\n    samples_centered = samples - tf.math.reduce_mean(samples, axis=0)\n    samples_centered = tf.expand_dims(samples_centered, -2)\n    grid = tf.meshgrid(*dim * [grid])\n    grid = tf.reshape(tf.stack(grid, -1), [-1, dim])\n    basis_expansion = tf.reduce_prod(samples_centered ** grid, -1)\n    return tf.transpose(basis_expansion)", "docstring": "Computes polynomial basis expansion at the given sample points.\n\nArgs:\nsample_paths: A `Tensor`s of either `flot32` or `float64` dtype and of\nshape `[num_samples, dim]` where `dim` has to be statically known.\n\nReturns:\nA `Tensor`s of shape `[degree * dim, num_samples]`.", "source": "github-repos"}
{"code": "def register_views(self, app):\n    self.add_resource(LoginRedirectView, '/auth/login')\n    self.add_resource(LogoutRedirectView, '/auth/logout')\n    for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.auth']['plugins']:\n        cls = entry_point.load()\n        app.available_auth_systems[cls.name] = cls\n        if app.register_auth_system(cls):\n            for vcls in cls.views:\n                self.add_resource(vcls, *vcls.URLS)\n                logger.debug('Registered auth system view {} for paths: {}'.format(cls.__name__, ', '.join(vcls.URLS)))\n    if (not app.active_auth_system):\n        logger.error('No auth systems active, please enable an auth system and then start the system again')\n        sys.exit((- 1))\n    for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.views']['plugins']:\n        view = entry_point.load()\n        self.add_resource(view, *view.URLS)\n        app.register_menu_item(view.MENU_ITEMS)\n        logger.debug('Registered view {} for paths: {}'.format(view.__name__, ', '.join(view.URLS)))", "docstring": "Iterates all entry points for views and auth systems and dynamically load and register the routes with Flask\n\nArgs:\napp (`CINQFlask`): CINQFlask object to register views for\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def Match(self, event):\n    if (not self._matcher):\n        return True\n    self._decision = self._matcher.Matches(event)\n    return self._decision", "docstring": "Determines if an event matches the filter.\n\nArgs:\nevent (EventObject): an event.\n\nReturns:\nbool: True if the event matches the filter.", "source": "codesearchnet"}
{"code": "def match_pattern(expr_or_pattern: object, expr: object) -> MatchDict:\n    \n    try:  \n        return expr_or_pattern.match(expr)\n    except AttributeError:  \n        if expr_or_pattern == expr:\n            return MatchDict()  \n        else:\n            res = MatchDict()\n            res.success = False\n            res.reason = \"Expressions '%s' and '%s' are not the same\" % (\n                          repr(expr_or_pattern), repr(expr))\n            return res", "docstring": "Recursively match `expr` with the given `expr_or_pattern`\n\nArgs:\nexpr_or_pattern: either a direct expression (equal to `expr` for a\nsuccessful match), or an instance of :class:`Pattern`.\nexpr: the expression to be matched", "source": "juraj-google-style"}
{"code": "def eigvals(tensor, name=None):\n    if tensor.dtype == dtypes.float32 or tensor.dtype == dtypes.complex64:\n        out_dtype = dtypes.complex64\n    elif tensor.dtype == dtypes.float64 or tensor.dtype == dtypes.complex128:\n        out_dtype = dtypes.complex128\n    e, _ = gen_linalg_ops.eig(tensor, Tout=out_dtype, compute_v=False, name=name)\n    return e", "docstring": "Computes the eigenvalues of one or more matrices.\n\nNote: If your program backpropagates through this function, you should replace\nit with a call to tf.linalg.eig (possibly ignoring the second output) to\navoid computing the eigen decomposition twice. This is because the\neigenvectors are used to compute the gradient w.r.t. the eigenvalues. See\n_SelfAdjointEigV2Grad in linalg_grad.py.\n\nArgs:\ntensor: `Tensor` of shape `[..., N, N]`.\nname: string, optional name of the operation.\n\nReturns:\ne: Eigenvalues. Shape is `[..., N]`. The vector `e[..., :]` contains the `N`\neigenvalues of `tensor[..., :, :]`.", "source": "github-repos"}
{"code": "def return_type(type_name, formatter=None):\n    \n\n    def _returns(func):\n        annotated(func)\n        func.metadata.typed_returnvalue(type_name, formatter)\n        return func\n\n    return _returns", "docstring": "Specify that this function returns a typed value.\n\nArgs:\ntype_name (str): A type name known to the global typedargs type system\nformatter (str): An optional name of a formatting function specified\nfor the type given in type_name.", "source": "juraj-google-style"}
{"code": "def _ConvertCollectionsCounterToDict(cls, collections_counter):\n    if (not isinstance(collections_counter, collections.Counter)):\n        raise TypeError\n    json_dict = {'__type__': 'collections.Counter'}\n    for (attribute_name, attribute_value) in iter(collections_counter.items()):\n        if (attribute_value is None):\n            continue\n        if isinstance(attribute_value, py2to3.BYTES_TYPE):\n            attribute_value = {'__type__': 'bytes', 'stream': '{0:s}'.format(binascii.b2a_qp(attribute_value))}\n        json_dict[attribute_name] = attribute_value\n    return json_dict", "docstring": "Converts a collections.Counter object into a JSON dictionary.\n\nThe resulting dictionary of the JSON serialized objects consists of:\n{\n'__type__': 'collections.Counter'\n...\n}\n\nHere '__type__' indicates the object base type. In this case\n'collections.Counter'. The rest of the elements of the dictionary make up\nthe collections.Counter object attributes.\n\nArgs:\ncollections_counter (collections.Counter): counter.\n\nReturns:\ndict[str, object]: JSON serialized objects.\n\nRaises:\nTypeError: if not an instance of collections.Counter.", "source": "codesearchnet"}
{"code": "def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]:\n    raise NotImplementedError", "docstring": "For each query in the batch, retrieves `n_docs` documents.\n\nArgs:\nquestion_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`):\nAn array of query vectors.\nn_docs (`int`):\nThe number of docs retrieved per query.\n\nReturns:\n`np.ndarray` of shape `(batch_size, n_docs)`: A tensor of indices of retrieved documents. `np.ndarray` of\nshape `(batch_size, vector_size)`: A tensor of vector representations of retrieved documents.", "source": "github-repos"}
{"code": "def _set_advertising_data(self, packet_type, data):\n        \n\n        payload = struct.pack(\"<BB%ss\" % (len(data)), packet_type, len(data), bytes(data))\n        response = self._send_command(6, 9, payload)\n\n        result, = unpack(\"<H\", response.payload)\n        if result != 0:\n            return False, {'reason': 'Error code from BLED112 setting advertising data', 'code': result}\n\n        return True, None", "docstring": "Set the advertising data for advertisements sent out by this bled112\n\nArgs:\npacket_type (int): 0 for advertisement, 1 for scan response\ndata (bytearray): the data to set", "source": "juraj-google-style"}
{"code": "def _StartAnalysisProcesses(self, storage_writer, analysis_plugins):\n    \n    logger.info('Starting analysis plugins.')\n\n    for analysis_plugin in analysis_plugins.values():\n      self._analysis_plugins[analysis_plugin.NAME] = analysis_plugin\n\n      process = self._StartWorkerProcess(analysis_plugin.NAME, storage_writer)\n      if not process:\n        logger.error('Unable to create analysis process: {0:s}'.format(\n            analysis_plugin.NAME))\n\n    logger.info('Analysis plugins running')", "docstring": "Starts the analysis processes.\n\nArgs:\nstorage_writer (StorageWriter): storage writer.\nanalysis_plugins (dict[str, AnalysisPlugin]): analysis plugins that\nshould be run and their names.", "source": "juraj-google-style"}
{"code": "def seek(self, offset, whence=os.SEEK_SET):\n    \n    if not self._is_open:\n      raise IOError('Not opened.')\n\n    self._vslvm_logical_volume.seek(offset, whence)", "docstring": "Seeks to an offset within the file-like object.\n\nArgs:\noffset (int): offset to seek to.\nwhence (Optional(int)): value that indicates whether offset is an absolute\nor relative position within the file.\n\nRaises:\nIOError: if the seek failed.\nOSError: if the seek failed.", "source": "juraj-google-style"}
{"code": "def __generate_localization_dictionary_from_file(file_path, localization_entry_attribute_name_for_key):\n    \n    localization_dictionary = {}\n    f = open_strings_file(file_path, \"r+\")\n    header_comment_key_value_tuples = extract_header_comment_key_value_tuples_from_file(f)\n\n    if len(header_comment_key_value_tuples) == 0:\n        logging.warning(\"Couldn't find any strings in file '%s'. Check encoding and format.\" % file_path)\n\n    for header_comment, comments, key, value in header_comment_key_value_tuples:\n        localization_entry = LocalizationEntry(comments, key, value)\n        localization_dictionary[\n            localization_entry.__getattribute__(localization_entry_attribute_name_for_key)] = localization_entry\n    f.close()\n    return localization_dictionary", "docstring": "Generates a dictionary mapping between keys (defined by the given attribute name) and localization entries.\n\nArgs:\nfile_path (str): The strings file path.\nlocalization_entry_attribute_name_for_key: The name of the attribute of LocalizationEntry to use as key.\n\nReturns:\ndict: A dictionary mapping between keys (defined by the given attribute name) and localization entries.", "source": "juraj-google-style"}
{"code": "def GetMissingChunks(self, fd, length, offset):\n    start_chunk = (offset \n    end_chunk = (((offset + length) - 1) \n    relevant_chunks = range(start_chunk, (end_chunk + 1))\n    missing_chunks = set(relevant_chunks)\n    for (idx, metadata) in iteritems(fd.ChunksMetadata(relevant_chunks)):\n        if (not self.DataRefreshRequired(last=metadata.get('last', None))):\n            missing_chunks.remove(idx)\n    return sorted(missing_chunks)", "docstring": "Return which chunks a file doesn't have.\n\nSpecifically, we return a list of the chunks specified by a\nlength-offset range which are not in the datastore.\n\nArgs:\nfd: The database object to read chunks from.\nlength: Length to read.\noffset: File offset to read from.\n\nReturns:\nA list of chunk numbers.", "source": "codesearchnet"}
{"code": "def description(self, force_refresh=False):\n        \n        if force_refresh:\n            self.clear_cache()\n        if not self._tuning_job_describe_result:\n            self._tuning_job_describe_result = self._sage_client.describe_hyper_parameter_tuning_job(\n                HyperParameterTuningJobName=self.name\n            )\n        return self._tuning_job_describe_result", "docstring": "Call ``DescribeHyperParameterTuningJob`` for the hyperparameter tuning job.\n\nArgs:\nforce_refresh (bool): Set to True to fetch the latest data from SageMaker API.\n\nReturns:\ndict: The Amazon SageMaker response for ``DescribeHyperParameterTuningJob``.", "source": "juraj-google-style"}
{"code": "def add_field_with_label(self, key, label_description, field):\n    self.inputs[key] = field\n    label = Label(label_description)\n    label.style['margin'] = '0px 5px'\n    label.style['min-width'] = '30%'\n    container = HBox()\n    container.style.update({'justify-content': 'space-between', 'overflow': 'auto', 'padding': '3px'})\n    container.append(label, key=('lbl' + key))\n    container.append(self.inputs[key], key=key)\n    self.container.append(container, key=key)", "docstring": "Adds a field to the dialog together with a descriptive label and a unique identifier.\n\nNote: You can access to the fields content calling the function GenericDialog.get_field(key).\n\nArgs:\nkey (str): The unique identifier for the field.\nlabel_description (str): The string content of the description label.\nfield (Widget): The instance of the field Widget. It can be for example a TextInput or maybe\na custom widget.", "source": "codesearchnet"}
{"code": "def draw_mask(im, mask, alpha=0.5, color=None):\n    \n    if color is None:\n        color = PALETTE_RGB[np.random.choice(len(PALETTE_RGB))][::-1]\n    im = np.where(np.repeat((mask > 0)[:, :, None], 3, axis=2),\n                  im * (1 - alpha) + color * alpha, im)\n    im = im.astype('uint8')\n    return im", "docstring": "Overlay a mask on top of the image.\n\nArgs:\nim: a 3-channel uint8 image in BGR\nmask: a binary 1-channel image of the same size\ncolor: if None, will choose automatically", "source": "juraj-google-style"}
{"code": "def stop(self, timeout_s=None):\n    self.stopped.set()\n    if self.thread:\n        self.thread.join(timeout_s)\n        return (not self.thread.isAlive())\n    else:\n        return True", "docstring": "Stops the interval.\n\nIf a timeout is provided and stop returns False then the thread is\neffectively abandoned in whatever state it was in (presumably dead-locked).\n\nArgs:\ntimeout_s: The time in seconds to wait on the thread to finish.  By\ndefault it's forever.\nReturns:\nFalse if a timeout was provided and we timed out.", "source": "codesearchnet"}
{"code": "def with_input_types(self, input_type_hint):\n    input_type_hint = native_type_compatibility.convert_to_beam_type(input_type_hint)\n    validate_composite_type_param(input_type_hint, 'Type hints for a PTransform')\n    return super().with_input_types(input_type_hint)", "docstring": "Annotates the input type of a :class:`PTransform` with a type-hint.\n\nArgs:\ninput_type_hint (type): An instance of an allowed built-in type, a custom\nclass, or an instance of a\n:class:`~apache_beam.typehints.typehints.TypeConstraint`.\n\nRaises:\nTypeError: If **input_type_hint** is not a valid type-hint.\nSee\n:obj:`apache_beam.typehints.typehints.validate_composite_type_param()`\nfor further details.\n\nReturns:\nPTransform: A reference to the instance of this particular\n:class:`PTransform` object. This allows chaining type-hinting related\nmethods.", "source": "github-repos"}
{"code": "def _SetPath(self, path):\n    old_path = self._path\n    if (old_path and (not io_wrapper.IsCloudPath(old_path))):\n        try:\n            size = tf.io.gfile.stat(old_path).length\n            logger.debug('Setting latest size of %s to %d', old_path, size)\n            self._finalized_sizes[old_path] = size\n        except tf.errors.OpError as e:\n            logger.error('Unable to get size of %s: %s', old_path, e)\n    self._path = path\n    self._loader = self._loader_factory(path)", "docstring": "Sets the current path to watch for new events.\n\nThis also records the size of the old path, if any. If the size can't be\nfound, an error is logged.\n\nArgs:\npath: The full path of the file to watch.", "source": "codesearchnet"}
{"code": "def sym_get(self, path: Union[utils.KeyPath, str, int], default: Any=RAISE_IF_NOT_FOUND, use_inferred: bool=False) -> Any:\n    path = utils.KeyPath.from_value(path)\n    if default is RAISE_IF_NOT_FOUND:\n        return path.query(self, use_inferred=use_inferred)\n    else:\n        return path.get(self, default, use_inferred=use_inferred)", "docstring": "Returns a sub-node by path.\n\nNOTE: there is no `sym_set`, use `sym_rebind`.\n\nArgs:\npath: A KeyPath object or equivalence.\ndefault: Default value if path does not exists. If absent, `KeyError` will\nbe thrown.\nuse_inferred: If True, return inferred value instead of the symbolic form\nof `pg.Inferential` objects.\n\nReturns:\nValue of symbolic attribute specified by path if found, otherwise the\ndefault value if it's specified.\n\nRaises:\nKeyError if `path` does not exist and `default` is not specified.", "source": "github-repos"}
{"code": "def _get_relationships(self, dna: pg.DNA) -> Tuple[List[pg.DNA], List[Optional[pg.DNA]], List[Optional[int]]]:\n\n    def is_mutable_node(obj):\n        return self._is_mutable_node(obj)\n    results = pg.query(dna, where=is_mutable_node, enter_selected=True)\n    child_nodes = list(results.values())\n    parent_nodes = [n.parent_dna for n in child_nodes]\n    child_indexes = [n.sym_path.key if n.parent_dna else None for n in child_nodes]\n    return (child_nodes, parent_nodes, child_indexes)", "docstring": "Extracts the parent-child node relationships in a DNA.\n\nNote that PyGlove represents the nodes in a DNA instance as DNA instances\nthemselves.\n\nArgs:\ndna: the DNA that will be mutated.\n\nReturns:\nA tuple of 3 lists of the same length with corresponding elements:\n-child_nodes: a list of every node in the DNA.\n-parent_nodes: a list of the parent node of the corresponding node in\n`child_nodes`.\n-child_indexes: a list of indexes. For all j, child_nodes[j] is the i-th\nchild of parent_nodes[j], where i = child_indexes[j].\nNote that the root is included as a \"child\" with a `None` parent.", "source": "github-repos"}
{"code": "def patch_deepCopy(self, patches):\n    patchesCopy = []\n    for patch in patches:\n        patchCopy = patch_obj()\n        patchCopy.diffs = patch.diffs[:]\n        patchCopy.start1 = patch.start1\n        patchCopy.start2 = patch.start2\n        patchCopy.length1 = patch.length1\n        patchCopy.length2 = patch.length2\n        patchesCopy.append(patchCopy)\n    return patchesCopy", "docstring": "Given an array of patches, return another array that is identical.\n\nArgs:\npatches: Array of Patch objects.\n\nReturns:\nArray of Patch objects.", "source": "codesearchnet"}
{"code": "def _FormatDateTime(self, event):\n    if (not event.timestamp):\n        return 'N/A'\n    date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=event.timestamp)\n    (year, month, day_of_month) = date_time.GetDate()\n    (hours, minutes, seconds) = date_time.GetTimeOfDay()\n    try:\n        return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'.format(year, month, day_of_month, hours, minutes, seconds)\n    except (TypeError, ValueError):\n        self._ReportEventError(event, 'unable to copy timestamp: {0!s} to a human readable date and time. Defaulting to: \"0000-00-00 00:00:00\"'.format(event.timestamp))\n        return '0000-00-00 00:00:00'", "docstring": "Formats the date and time.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: date and time string or \"N/A\" if no event timestamp is available.", "source": "codesearchnet"}
{"code": "def _get_common_params(self, user_id, attributes):\n    \n    commonParams = {}\n\n    commonParams[self.EventParams.PROJECT_ID] = self._get_project_id()\n    commonParams[self.EventParams.ACCOUNT_ID] = self._get_account_id()\n\n    visitor = {}\n    visitor[self.EventParams.END_USER_ID] = user_id\n    visitor[self.EventParams.SNAPSHOTS] = []\n\n    commonParams[self.EventParams.USERS] = []\n    commonParams[self.EventParams.USERS].append(visitor)\n    commonParams[self.EventParams.USERS][0][self.EventParams.ATTRIBUTES] = self._get_attributes(attributes)\n\n    commonParams[self.EventParams.SOURCE_SDK_TYPE] = 'python-sdk'\n    commonParams[self.EventParams.ENRICH_DECISIONS] = True\n    commonParams[self.EventParams.SOURCE_SDK_VERSION] = version.__version__\n    commonParams[self.EventParams.ANONYMIZE_IP] = self._get_anonymize_ip()\n    commonParams[self.EventParams.REVISION] = self._get_revision()\n\n    return commonParams", "docstring": "Get params which are used same in both conversion and impression events.\n\nArgs:\nuser_id: ID for user.\nattributes: Dict representing user attributes and values which need to be recorded.\n\nReturns:\nDict consisting of parameters common to both impression and conversion events.", "source": "juraj-google-style"}
{"code": "def SplitKeyPath(key_path, path_separator=definitions.KEY_PATH_SEPARATOR):\n    return list(filter(None, key_path.split(path_separator)))", "docstring": "Splits the key path into path segments.\n\nArgs:\nkey_path (str): key path.\npath_separator (Optional[str]): path separator.\n\nReturns:\nlist[str]: key path segments without the root path segment, which is an\nempty string.", "source": "codesearchnet"}
{"code": "def create_channels(self, dataset, token, new_channels_data):\n        \n        channels = {}\n        for channel_new in new_channels_data:\n\n            self._check_channel(channel_new.name)\n\n            if channel_new.channel_type not in ['image', 'annotation']:\n                raise ValueError('Channel type must be ' +\n                                 'neuroRemote.IMAGE or ' +\n                                 'neuroRemote.ANNOTATION.')\n\n            if channel_new.readonly * 1 not in [0, 1]:\n                raise ValueError(\"readonly must be 0 (False) or 1 (True).\")\n\n            channels[channel_new.name] = {\n                \"channel_name\": channel_new.name,\n                \"channel_type\": channel_new.channel_type,\n                \"datatype\": channel_new.dtype,\n                \"readonly\": channel_new.readonly * 1\n            }\n        req = requests.post(self.url(\"/{}/project/\".format(dataset) +\n                                     \"{}\".format(token)),\n                            json={\"channels\": {channels}}, verify=False)\n\n        if req.status_code is not 201:\n            raise RemoteDataUploadError('Could not upload {}'.format(req.text))\n        else:\n            return True", "docstring": "Creates channels given a dictionary in 'new_channels_data'\n, 'dataset' name, and 'token' (project) name.\n\nArguments:\ntoken (str): Token to identify project\ndataset (str): Dataset name to identify dataset to download from\nnew_channels_data (dict): New channel data to upload into new\nchannels\n\nReturns:\nbool: Process completed succesfully or not", "source": "juraj-google-style"}
{"code": "def Instance(reactor=None):\n    if (NodeLeader._LEAD is None):\n        NodeLeader._LEAD = NodeLeader(reactor)\n    return NodeLeader._LEAD", "docstring": "Get the local node instance.\n\nArgs:\nreactor: (optional) custom reactor to use in NodeLeader.\n\nReturns:\nNodeLeader: instance.", "source": "codesearchnet"}
{"code": "def supported_features_mapping(*supported_features: str, onnx_config_cls: Optional[str]=None) -> Dict[str, Callable[[PretrainedConfig], OnnxConfig]]:\n    if onnx_config_cls is None:\n        raise ValueError('A OnnxConfig class must be provided')\n    config_cls = transformers\n    for attr_name in onnx_config_cls.split('.'):\n        config_cls = getattr(config_cls, attr_name)\n    mapping = {}\n    for feature in supported_features:\n        if '-with-past' in feature:\n            task = feature.replace('-with-past', '')\n            mapping[feature] = partial(config_cls.with_past, task=task)\n        else:\n            mapping[feature] = partial(config_cls.from_model_config, task=feature)\n    return mapping", "docstring": "Generate the mapping between supported the features and their corresponding OnnxConfig for a given model.\n\nArgs:\n*supported_features: The names of the supported features.\nonnx_config_cls: The OnnxConfig full name corresponding to the model.\n\nReturns:\nThe dictionary mapping a feature to an OnnxConfig constructor.", "source": "github-repos"}
{"code": "def ToJson(self, index):\n        \n        return {\n            'n': index,\n            'asset': self.AssetId.To0xString(),\n            'value': self.Value.ToNeoJsonString(),\n            'address': self.Address\n        }", "docstring": "Convert object members to a dictionary that can be parsed as JSON.\nArgs:\nindex (int): The index of the output in a transaction\n\nReturns:\ndict:", "source": "juraj-google-style"}
{"code": "def __init__(\n      self, location=None, parent=None, part_index=None, start_offset=None,\n      **kwargs):\n    \n    if not parent:\n      raise ValueError('Missing parent value.')\n\n    super(TSKPartitionPathSpec, self).__init__(parent=parent, **kwargs)\n    self.location = location\n    self.part_index = part_index\n    self.start_offset = start_offset", "docstring": "Initializes a path specification.\n\nNote that the TSK partition path specification must have a parent.\n\nArgs:\nlocation (Optional[str]): location.\nparent (Optional[PathSpec]): parent path specification.\npart_index (Optional[int]): part index.\nstart_offset (Optional[int]): start offset.\n\nRaises:\nValueError: when parent is not set.", "source": "juraj-google-style"}
{"code": "def __init__(self, *args, allow_comments=False, directory=None, **kwargs):\n\t\t\n\t\tsuper().__init__(*args, **kwargs)\n\t\tself.allow_comments = allow_comments\n\t\tself.dir = directory", "docstring": "Constructor. Also see Entry.__init__.\n\nArgs:\nallow_comments (bool): Whether to allow comments. Default False.\ndirectory (str): Optional. If the page should live in a subdirectory\ninstead of at the web root, specify it here instead of making it\npart of the slug.", "source": "juraj-google-style"}
{"code": "def open_repository(path, spor_dir='.spor'):\n    root = _find_root_dir(path, spor_dir)\n    return Repository(root, spor_dir)", "docstring": "Open an existing repository.\n\nArgs:\npath: Path to any file or directory within the repository.\nspor_dir: The name of the directory containing spor data.\n\nReturns: A `Repository` instance.\n\nRaises:\nValueError: No repository is found.", "source": "codesearchnet"}
{"code": "def zoom_blur(x, severity=1):\n  \n  c = [\n      np.arange(1, 1.11, 0.01),\n      np.arange(1, 1.16, 0.01),\n      np.arange(1, 1.21, 0.02),\n      np.arange(1, 1.26, 0.02),\n      np.arange(1, 1.31, 0.03)\n  ][severity - 1]\n  x = (np.array(x) / 255.).astype(np.float32)\n  out = np.zeros_like(x)\n  for zoom_factor in c:\n    out += clipped_zoom(x, zoom_factor)\n  x = (x + out) / (len(c) + 1)\n  x_clip = np.clip(x, 0, 1) * 255\n  return around_and_astype(x_clip)", "docstring": "Zoom blurring to images.\n\nApplying zoom blurring to images by zooming the central part of the images.\n\nArgs:\nx: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\nseverity: integer, severity of corruption.\n\nReturns:\nnumpy array, image with uint8 pixels in [0,255]. Applied zoom blur.", "source": "juraj-google-style"}
{"code": "def console_get_height_rect(con: tcod.console.Console, x: int, y: int, w: int, h: int, fmt: str) -> int:\n    return int(lib.TCOD_console_get_height_rect_fmt(_console(con), x, y, w, h, _fmt(fmt)))", "docstring": "Return the height of this text once word-wrapped into this rectangle.\n\nReturns:\nint: The number of lines of text once word-wrapped.\n\n.. deprecated:: 8.5\nUse :any:`Console.get_height_rect` instead.", "source": "codesearchnet"}
{"code": "def inspect(self, nids=None, wslice=None, **kwargs):\n        \n        figs = []\n        for task in self.select_tasks(nids=nids, wslice=wslice):\n            if hasattr(task, \"inspect\"):\n                fig = task.inspect(**kwargs)\n                if fig is None:\n                    cprint(\"Cannot inspect Task %s\" % task, color=\"blue\")\n                else:\n                    figs.append(fig)\n            else:\n                cprint(\"Task %s does not provide an inspect method\" % task, color=\"blue\")\n\n        return figs", "docstring": "Inspect the tasks (SCF iterations, Structural relaxation ...) and\nproduces matplotlib plots.\n\nArgs:\nnids: List of node identifiers.\nwslice: Slice object used to select works.\nkwargs: keyword arguments passed to `task.inspect` method.\n\n.. note::\n\nnids and wslice are mutually exclusive.\nIf nids and wslice are both None, all tasks in self are inspected.\n\nReturns:\nList of `matplotlib` figures.", "source": "juraj-google-style"}
{"code": "def get_broker() -> 'Broker':\n    global global_broker\n    if (global_broker is None):\n        from .brokers.rabbitmq import RabbitmqBroker\n        set_broker(RabbitmqBroker(host='127.0.0.1', port=5672, heartbeat=5, connection_attempts=5, blocked_connection_timeout=30))\n    return global_broker", "docstring": "Get the global broker instance.  If no global broker is set,\nthis initializes a RabbitmqBroker and returns it.\n\nReturns:\nBroker: The default Broker.", "source": "codesearchnet"}
{"code": "def MetaGraph(self):\n    if (self._meta_graph is None):\n        raise ValueError('There is no metagraph in this EventAccumulator')\n    meta_graph = meta_graph_pb2.MetaGraphDef()\n    meta_graph.ParseFromString(self._meta_graph)\n    return meta_graph", "docstring": "Return the metagraph definition, if there is one.\n\nRaises:\nValueError: If there is no metagraph for this run.\n\nReturns:\nThe `meta_graph_def` proto.", "source": "codesearchnet"}
{"code": "def get_snpeff_info(snpeff_string, snpeff_header):\n    snpeff_annotations = [dict(zip(snpeff_header, snpeff_annotation.split('|'))) for snpeff_annotation in snpeff_string.split(',')]\n    return snpeff_annotations", "docstring": "Make the vep annotations into a dictionaries\n\nA snpeff dictionary will have the snpeff column names as keys and\nthe vep annotations as values.\nThe dictionaries are stored in a list.\nOne dictionary for each transcript.\n\nArgs:\nsnpeff_string (string): A string with the ANN annotation\nsnpeff_header (list): A list with the vep header\n\nReturn:\nsnpeff_annotations (list): A list of vep dicts", "source": "codesearchnet"}
{"code": "def __init__(self, n, key=None, reverse=False):\n    super().__init__()\n    self._n = n\n    self._key = key\n    self._reverse = reverse", "docstring": "Creates a global Top operation.\n\nThe arguments 'key' and 'reverse' may be passed as keyword arguments,\nand have the same meaning as for Python's sort functions.\n\nArgs:\nn: number of elements to extract from pcoll.\nkey: (optional) a mapping of elements to a comparable key, similar to\nthe key argument of Python's sorting methods.\nreverse: (optional) whether to order things smallest to largest, rather\nthan largest to smallest", "source": "github-repos"}
{"code": "def __toString(self, values):\n    for key in values:\n        if (not (values[key] is str)):\n            values[key] = str(values[key])\n    return values", "docstring": "Will replace dict values with string values\n\nArgs:\nvalues (dict): Dictionary of values\n\nReturns:\nUpdated values dict", "source": "codesearchnet"}
{"code": "def path_of_module(self, mod: nn.Module) -> str:\n    try:\n        return super().path_of_module(mod)\n    except NameError as e:\n        if self.allow_insert_stateless_mods and len(list(mod.parameters())) == 0 and (len(list(mod.buffers())) == 0):\n            path = self._insert_module_as_submodule(mod)\n            return path\n        raise e", "docstring": "Helper method to find the qualified name of `mod` in the Module hierarchy of `root`. For example, if `root` has\na submodule named `foo`, which has a submodule named `bar`, passing `bar` into this function will return the\nstring \"foo.bar\".\n\nArgs:\nmod (str): The `Module` to retrieve the qualified name for.", "source": "github-repos"}
{"code": "def _process_debug_op_state_changes(self, event_reply=None):\n    if event_reply is None:\n        event_reply = debug_service_pb2.EventReply()\n    while not self._debug_ops_state_change_queue.empty():\n        state_change = self._debug_ops_state_change_queue.get()\n        debug_node_key = (state_change.node_name, state_change.output_slot, state_change.debug_op)\n        if state_change.state == debug_service_pb2.EventReply.DebugOpStateChange.READ_WRITE:\n            logging.info('Adding breakpoint %s:%d:%s', state_change.node_name, state_change.output_slot, state_change.debug_op)\n            self._breakpoints.add(debug_node_key)\n        elif state_change.state == debug_service_pb2.EventReply.DebugOpStateChange.READ_ONLY:\n            logging.info('Adding watchpoint %s:%d:%s', state_change.node_name, state_change.output_slot, state_change.debug_op)\n            if debug_node_key in self._breakpoints:\n                self._breakpoints.discard(debug_node_key)\n        elif state_change.state == debug_service_pb2.EventReply.DebugOpStateChange.DISABLED:\n            logging.info('Removing watchpoint or breakpoint: %s:%d:%s', state_change.node_name, state_change.output_slot, state_change.debug_op)\n            if debug_node_key in self._breakpoints:\n                self._breakpoints.discard(debug_node_key)\n            else:\n                logging.warn('Attempting to remove a non-existent debug node key: %s', debug_node_key)\n        new_state_change = event_reply.debug_op_state_changes.add()\n        new_state_change.CopyFrom(state_change)\n    return event_reply", "docstring": "Dequeue and process all the queued debug-op state change protos.\n\nInclude all the debug-op state change protos in a `EventReply` proto.\n\nArgs:\nevent_reply: An `EventReply` to add the `DebugOpStateChange` protos to,\nor `None`.\n\nReturns:\nAn `EventReply` proto with the dequeued `DebugOpStateChange` protos (if\nany) added.", "source": "github-repos"}
{"code": "def from_der(der):\n        \n        d = get_bytes(der)\n        \n        \n        \n        \n        \n        \n        \n        \n        \n\n        \n        if len(d) < 8:\n            raise ValueError(\"DER signature string is too short.\")\n        \n        if len(d) > 72:\n            raise ValueError(\"DER signature string is too long.\")\n        if d[0] != 0x30:\n            raise ValueError(\"DER signature does not start with 0x30.\")\n        if d[1] != len(d[2:]):\n            raise ValueError(\"DER signature length incorrect.\")\n\n        total_length = d[1]\n\n        if d[2] != 0x02:\n            raise ValueError(\"DER signature no 1st int marker.\")\n        if d[3] <= 0 or d[3] > (total_length - 7):\n            raise ValueError(\"DER signature incorrect R length.\")\n\n        \n        rlen = d[3]\n        s_magic_index = 4 + rlen\n        rb = d[4:s_magic_index]\n\n        if rb[0] & 0x80 != 0:\n            raise ValueError(\"DER signature R is negative.\")\n        if len(rb) > 1 and rb[0] == 0 and rb[1] & 0x80 != 0x80:\n            raise ValueError(\"DER signature R is excessively padded.\")\n\n        r = int.from_bytes(rb, 'big')\n\n        \n        if d[s_magic_index] != 0x02:\n            raise ValueError(\"DER signature no 2nd int marker.\")\n        slen_index = s_magic_index + 1\n        slen = d[slen_index]\n        if slen <= 0 or slen > len(d) - (slen_index + 1):\n            raise ValueError(\"DER signature incorrect S length.\")\n\n        sb = d[slen_index + 1:]\n\n        if sb[0] & 0x80 != 0:\n            raise ValueError(\"DER signature S is negative.\")\n        if len(sb) > 1 and sb[0] == 0 and sb[1] & 0x80 != 0x80:\n            raise ValueError(\"DER signature S is excessively padded.\")\n\n        s = int.from_bytes(sb, 'big')\n\n        if r < 1 or r >= bitcoin_curve.n:\n            raise ValueError(\"DER signature R is not between 1 and N - 1.\")\n        if s < 1 or s >= bitcoin_curve.n:\n            raise ValueError(\"DER signature S is not between 1 and N - 1.\")\n\n        return Signature(r, s)", "docstring": "Decodes a Signature that was DER-encoded.\n\nArgs:\nder (bytes or str): The DER encoding to be decoded.\n\nReturns:\nSignature: The deserialized signature.", "source": "juraj-google-style"}
{"code": "def WriteEventBody(self, event):\n    \n    inode = getattr(event, 'inode', None)\n    if inode is None:\n      event.inode = 0\n\n    json_dict = self._JSON_SERIALIZER.WriteSerializedDict(event)\n    json_string = json.dumps(json_dict, sort_keys=True)\n\n    if self._event_counter != 0:\n      self._output_writer.Write(', ')\n\n    line = '\"event_{0:d}\": {1:s}\\n'.format(self._event_counter, json_string)\n    self._output_writer.Write(line)\n\n    self._event_counter += 1", "docstring": "Writes the body of an event object to the output.\n\nArgs:\nevent (EventObject): event.", "source": "juraj-google-style"}
{"code": "def _ReadFileEntries(self, file_object):\n    \n    self._file_entries = {}\n\n    file_offset = 0\n    while file_offset < self._file_size or self._file_size == 0:\n      file_entry = self._ReadFileEntry(file_object, file_offset)\n      file_offset += file_entry.size\n      if file_entry.path == 'TRAILER!!!':\n        break\n\n      if file_entry.path in self._file_entries:\n        \n        continue\n\n      self._file_entries[file_entry.path] = file_entry", "docstring": "Reads the file entries from the cpio archive.\n\nArgs:\nfile_object (FileIO): file-like object.", "source": "juraj-google-style"}
{"code": "def _GenerateSection(self, problem_type):\n    if (problem_type == transitfeed.TYPE_WARNING):\n        dataset_problems = self._dataset_warnings\n        heading = 'Warnings'\n    else:\n        dataset_problems = self._dataset_errors\n        heading = 'Errors'\n    if (not dataset_problems):\n        return ''\n    prefix = ('<h2 class=\"issueHeader\">%s:</h2>' % heading)\n    dataset_sections = []\n    for (dataset_merger, problems) in dataset_problems.items():\n        dataset_sections.append(('<h3>%s</h3><ol>%s</ol>' % (dataset_merger.FILE_NAME, '\\n'.join(problems))))\n    body = '\\n'.join(dataset_sections)\n    return (prefix + body)", "docstring": "Generate a listing of the given type of problems.\n\nArgs:\nproblem_type: The type of problem. This is one of the problem type\nconstants from transitfeed.\n\nReturns:\nThe generated HTML as a string.", "source": "codesearchnet"}
{"code": "def get_service_name(self, service_id: str) -> str:\n        \n        \n        if not self._manager:\n            raise RuntimeError('Only the Swarm manager node can retrieve all'\n                               ' the services details.')\n\n        service = self._client.services.get(service_id)\n        return service.name", "docstring": "Get the name of the docker service.\n\nOnly the manager nodes can retrieve service name\n\nArgs:\nservice_id (string): List of service ID\n\nReturns:\nstring, name of the docker service", "source": "juraj-google-style"}
{"code": "def terminate(self, nowait=False):\n        \n        logger.debug(\"Acquiring lock for service termination\")\n        with self.lock:\n            logger.debug(\"Terminating service\")\n\n            if not self.listener:\n                logger.warning(\"Service already stopped.\")\n                return\n\n            self.listener.stop(nowait)\n\n            try:\n                if not nowait:\n                    self._post_log_batch()\n            except Exception:\n                if self.error_handler:\n                    self.error_handler(sys.exc_info())\n                else:\n                    raise\n            finally:\n                self.queue = None\n                self.listener = None", "docstring": "Finalize and stop service\n\nArgs:\nnowait: set to True to terminate immediately and skip processing\nmessages still in the queue", "source": "juraj-google-style"}
{"code": "def get_axis(self, undefined=np.zeros(3)):\n    tolerance = 1e-17\n    self._normalise()\n    norm = np.linalg.norm(self.vector)\n    if (norm < tolerance):\n        return undefined\n    else:\n        return (self.vector / norm)", "docstring": "Get the axis or vector about which the quaternion rotation occurs\n\nFor a null rotation (a purely real quaternion), the rotation angle will\nalways be `0`, but the rotation axis is undefined.\nIt is by default assumed to be `[0, 0, 0]`.\n\nParams:\nundefined: [optional] specify the axis vector that should define a null rotation.\nThis is geometrically meaningless, and could be any of an infinite set of vectors,\nbut can be specified if the default (`[0, 0, 0]`) causes undesired behaviour.\n\nReturns:\nA Numpy unit 3-vector describing the Quaternion object's axis of rotation.\n\nNote:\nThis feature only makes sense when referring to a unit quaternion.\nCalling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.", "source": "codesearchnet"}
{"code": "def is_method_call(func, types=(), methods=()):\n    return (isinstance(func, astroid.BoundMethod) and isinstance(func.bound, astroid.Instance) and ((func.bound.name in types) if types else True) and ((func.name in methods) if methods else True))", "docstring": "Determines if a BoundMethod node represents a method call.\n\nArgs:\nfunc (astroid.BoundMethod): The BoundMethod AST node to check.\ntypes (Optional[String]): Optional sequence of caller type names to restrict check.\nmethods (Optional[String]): Optional sequence of method names to restrict check.\n\nReturns:\nbool: true if the node represents a method call for the given type and\nmethod names, False otherwise.", "source": "codesearchnet"}
{"code": "def get(self, file_path, ref, **kwargs):\n    file_path = file_path.replace('/', '%2F')\n    return GetMixin.get(self, file_path, ref=ref, **kwargs)", "docstring": "Retrieve a single file.\n\nArgs:\nfile_path (str): Path of the file to retrieve\nref (str): Name of the branch, tag or commit\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the file could not be retrieved\n\nReturns:\nobject: The generated RESTObject", "source": "codesearchnet"}
{"code": "def compute_effective_axis_dimension(dimension: int, fixed_dimension: int, num_token_to_add: int=0) -> int:\n    if dimension <= 0:\n        dimension = fixed_dimension\n    dimension -= num_token_to_add\n    return dimension", "docstring": "Args:\ndimension:\nfixed_dimension:\nnum_token_to_add:\n\nReturns:", "source": "github-repos"}
{"code": "def GetMACBRepresentationFromDescriptions(self, timestamp_descriptions):\n    macb_representation = []\n    if (('mtime' in timestamp_descriptions) or (definitions.TIME_DESCRIPTION_MODIFICATION in timestamp_descriptions)):\n        macb_representation.append('M')\n    else:\n        macb_representation.append('.')\n    if (('atime' in timestamp_descriptions) or (definitions.TIME_DESCRIPTION_LAST_ACCESS in timestamp_descriptions)):\n        macb_representation.append('A')\n    else:\n        macb_representation.append('.')\n    if (('ctime' in timestamp_descriptions) or (definitions.TIME_DESCRIPTION_CHANGE in timestamp_descriptions)):\n        macb_representation.append('C')\n    else:\n        macb_representation.append('.')\n    if (('crtime' in timestamp_descriptions) or (definitions.TIME_DESCRIPTION_CREATION in timestamp_descriptions)):\n        macb_representation.append('B')\n    else:\n        macb_representation.append('.')\n    return ''.join(macb_representation)", "docstring": "Determines the MACB representation from the timestamp descriptions.\n\nMACB representation is a shorthand for representing one or more of\nmodification, access, change, birth timestamp descriptions as the letters\n\"MACB\" or a \".\" if the corresponding timestamp is not set.\n\nNote that this is an output format shorthand and does not guarantee that\nthe timestamps represent the same occurrence.\n\nArgs:\ntimestamp_descriptions (list[str]): timestamp descriptions, which are\ndefined in definitions.TIME_DESCRIPTIONS.\n\nReturns:\nstr: MACB representation.", "source": "codesearchnet"}
{"code": "def default_storable(python_type, exposes=None, version=None, storable_type=None, peek=default_peek):\n    if (not exposes):\n        for extension in expose_extensions:\n            try:\n                exposes = extension(python_type)\n            except (SystemExit, KeyboardInterrupt):\n                raise\n            except:\n                pass\n            else:\n                if exposes:\n                    break\n        if (not exposes):\n            raise AttributeError('`exposes` required for type: {!r}'.format(python_type))\n    return Storable(python_type, key=storable_type, handlers=StorableHandler(version=version, exposes=exposes, poke=poke(exposes), peek=peek(python_type, exposes)))", "docstring": "Default mechanics for building the storable instance for a type.\n\nArguments:\n\npython_type (type): type.\n\nexposes (iterable): attributes exposed by the type.\n\nversion (tuple): version number.\n\nstorable_type (str): universal string identifier for the type.\n\npeek (callable): peeking routine.\n\nReturns:\n\nStorable: storable instance.", "source": "codesearchnet"}
{"code": "def docs(recreate, gen_index, run_doctests):\n    build_dir = conf.get_path('build_dir', '.build')\n    docs_dir = conf.get_path('docs.path', 'docs')\n    refdoc_paths = conf.get('docs.reference', [])\n    docs_html_dir = conf.get_path('docs.out', os.path.join(docs_dir, 'html'))\n    docs_tests_dir = conf.get_path('docs.tests_out', os.path.join(docs_dir, 'doctest'))\n    docs_build_dir = os.path.join(build_dir, 'docs')\n    if recreate:\n        for path in (docs_html_dir, docs_build_dir):\n            if os.path.exists(path):\n                log.info('<91>Deleting <94>{}'.format(path))\n                shutil.rmtree(path)\n    if refdoc_paths:\n        gen_ref_docs(gen_index)\n    else:\n        log.err('Not generating any reference documentation - No docs.reference specified in config')\n    with conf.within_proj_dir(docs_dir):\n        log.info('Building docs')\n        shell.run('sphinx-build -b html -d {build} {docs} {out}'.format(build=docs_build_dir, docs=docs_dir, out=docs_html_dir))\n        if run_doctests:\n            log.info('Running doctests')\n            shell.run('sphinx-build -b doctest -d {build} {docs} {out}'.format(build=docs_build_dir, docs=docs_dir, out=docs_tests_dir))\n        log.info('You can view the docs by browsing to <34>file:", "docstring": "Build the documentation for the project.\n\nArgs:\nrecreate (bool):\nIf set to **True**, the build and output directories will be cleared\nprior to generating the docs.\ngen_index (bool):\nIf set to **True**, it will generate top-level index file for the\nreference documentation.\nrun_doctests (bool):\nSet to **True** if you want to run doctests after the documentation\nis generated.\npretend (bool):\nIf set to **True**, do not actually execute any shell commands, just\nprint the command that would be executed.", "source": "codesearchnet"}
{"code": "def get_output_slot(element_name):\n    _, output_slot = parse_node_or_tensor_name(element_name)\n    return output_slot if output_slot is not None else 0", "docstring": "Get the output slot number from the name of a graph element.\n\nIf element_name is a node name without output slot at the end, 0 will be\nassumed.\n\nArgs:\nelement_name: (`str`) name of the graph element in question.\n\nReturns:\n(`int`) output slot number.", "source": "github-repos"}
{"code": "def compute_mu(L_aug, Y, k, p):\n    \n    n, d = L_aug.shape\n    assert Y.shape[0] == n\n\n    \n    mu = np.zeros((d, k))\n    for y in range(1, k + 1):\n        L_y = L_aug[Y == y]\n        mu[:, y - 1] = L_y.sum(axis=0) / L_y.shape[0]\n    return mu", "docstring": "Given label matrix L_aug and labels Y, compute the true mu params.\n\nArgs:\nL: (np.array {0,1}) [n, d] The augmented (indicator) label matrix\nY: (np.array int) [n] The true labels in {1,...,k}\nk: (int) Cardinality\np: (np.array float) [k] The class balance", "source": "juraj-google-style"}
{"code": "def Query(self, query, parameters=None):\n    if parameters:\n        self._cursor.execute(query, parameters)\n    else:\n        self._cursor.execute(query)\n    return self._cursor.fetchall()", "docstring": "Queries the database file.\n\nArgs:\nquery (str): SQL query.\nparameters (Optional[dict|tuple]): query parameters.\n\nReturns:\nlist[sqlite3.Row]: rows resulting from the query.", "source": "codesearchnet"}
{"code": "def filter_data(self, field, filter_value, filter_operator, field_converter=None):\n    data = []\n    if (self._indexes.get(field) is not None):\n        data = self._index_filter(self._indexes.get(field), filter_value, filter_operator, field_converter)\n    return set(data)", "docstring": "Filter the data given the provided.\n\nArgs:\nfield (string): The field to filter on.\nfilter_value (string | list): The value to match.\nfilter_operator (string): The operator for comparison.\nfield_converter (method): A method used to convert the field before comparison.\n\nReturns:\n(set): List of matching data objects", "source": "codesearchnet"}
{"code": "def refresh_access_token(self, refresh_token):\n        \n        request = self._get_request()\n        response = request.post(self.OAUTH_TOKEN_URL, {\n            \"grant_type\": \"refresh_token\",\n            \"refresh_token\": refresh_token\n        })\n        self.auth = HSAccessTokenAuth.from_response(response)\n        return self.auth.access_token", "docstring": "Refreshes the current access token.\n\nGets a new access token, updates client auth and returns it.\n\nArgs:\n\nrefresh_token (str): Refresh token to use\n\nReturns:\nThe new access token", "source": "juraj-google-style"}
{"code": "def delete_all(self, filter=None, timeout=-1):\n        \n        return self._client.delete_all(filter=filter, timeout=timeout)", "docstring": "Delete an SNMPv3 User based on User name specified in filter. The user will be deleted only if it has no associated destinations.\n\nArgs:\nusername: ID or URI of SNMPv3 user.\nfilter: A general filter/query string to narrow the list of items returned.\nThe default is no filter - all resources are returned.\n\nReturns:\nbool: Indicates if the resource was successfully deleted.", "source": "juraj-google-style"}
{"code": "def encode(cls, command):\n    args = []\n    for arg in command.args:\n        if (not isinstance(arg, str)):\n            arg = str(arg)\n        if ((',' in arg) or arg.startswith(' ') or arg.endswith(' ') or arg.startswith('hex:')):\n            arg = 'hex:{}'.format(hexlify(arg.encode('utf-8')).decode('utf-8'))\n        args.append(arg)\n    argstr = ''\n    if (len(args) > 0):\n        argstr = ((' {' + ','.join(args)) + '}')\n    return (command.name + argstr)", "docstring": "Encode a command as an unambiguous string.\n\nArgs:\ncommand (Command): The command to encode.\n\nReturns:\nstr: The encoded command", "source": "codesearchnet"}
{"code": "def repeat(self, count=None, name=None) -> 'DatasetV2':\n    from tensorflow.python.data.ops import repeat_op\n    return repeat_op._repeat(self, count, name)", "docstring": "Repeats this dataset so each original value is seen `count` times.\n\n>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])\n>>> dataset = dataset.repeat(3)\n>>> [a.item() for a in dataset.as_numpy_iterator()]\n[1, 2, 3, 1, 2, 3, 1, 2, 3]\n\nNote: If the input dataset depends on global state (e.g. a random number\ngenerator) or its output is non-deterministic (e.g. because of upstream\n`shuffle`), then different repetitions may produce different elements.\n\nArgs:\ncount: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the\nnumber of times the dataset should be repeated. The default behavior (if\n`count` is `None` or `-1`) is for the dataset be repeated indefinitely.\nname: (Optional.) A name for the tf.data operation.\n\nReturns:\nA new `Dataset` with the transformation applied as described above.", "source": "github-repos"}
{"code": "def _wrap_section(source, width):\n    \n    \n    if _get_section('usage', source):\n        return _wrap_usage_section(source, width)\n    if _is_definition_section(source):\n        return _wrap_definition_section(source, width)\n    lines = inspect.cleandoc(source).splitlines()\n    paragraphs = (textwrap.wrap(line, width, replace_whitespace=False)\n                  for line in lines)\n    return '\\n'.join(line for paragraph in paragraphs for line in paragraph)", "docstring": "Wrap the given section string to the current terminal size.\n\nIntelligently wraps the section string to the given width. When wrapping\nsection lines, it auto-adjusts the spacing between terms and definitions.\nIt also adjusts commands the fit the correct length for the arguments.\n\nArgs:\nsource: The section string to wrap.\n\nReturns:\nThe wrapped section string.", "source": "juraj-google-style"}
{"code": "def sign_hash(private_key, hash, hash_algo):\n    hash_algo = _hash_algorithms[hash_algo]\n    return get_privatekey(private_key).sign(hash, padding.PKCS1v15(), utils.Prehashed(hash_algo))", "docstring": "Sign the given hash with the given private key.\n\nArgs:\nprivate_key (str): PEM enoded private key\nhash (byte str): hash to sign\nhash_algo (str): name of hash algorithm used\n\nReturns:\nbyte string representing the signature", "source": "codesearchnet"}
{"code": "def aggregate_scores(weights: typing.List[str]) -> typing.Dict[str, typing.Dict[str, float]]:\n    decision_trees: typing.Dict[str, typing.Dict[str, float]] = dict()\n    for row in weights:\n        row = row.strip()\n        if not row:\n            continue\n        feature = row.split('\\t')[0]\n        feature_group, feature_content = feature.split(':', 1)\n        score = float(row.split('\\t')[1])\n        decision_trees.setdefault(feature_group, {})\n        decision_trees[feature_group].setdefault(feature_content, 0)\n        decision_trees[feature_group][feature_content] += score\n    return decision_trees", "docstring": "Exports the model by aggregating the weight scores.\n\nArgs:\nweights (List[str]): The lines of exported weight score file.\n\nReturns:\nmodel (Dict[string, Dict[string, float]]) The exported model.", "source": "github-repos"}
{"code": "def init_app(self, app):\n        \n        self._key = app.config.get(CONF_KEY) or getenv(CONF_KEY)\n\n        if not self._key:\n            return\n\n        self._endpoint_uri = app.config.get(CONF_ENDPOINT_URI)\n        sender = AsynchronousSender(self._endpoint_uri)\n\n        queue = AsynchronousQueue(sender)\n        self._channel = TelemetryChannel(None, queue)\n\n        self._init_request_logging(app)\n        self._init_trace_logging(app)\n        self._init_exception_logging(app)", "docstring": "Initializes the extension for the provided Flask application.\n\nArgs:\napp (flask.Flask). the Flask application for which to initialize the extension.", "source": "juraj-google-style"}
{"code": "def submit_snl(self, snl):\n        \n        try:\n            snl = snl if isinstance(snl, list) else [snl]\n            jsondata = [s.as_dict() for s in snl]\n            payload = {\"snl\": json.dumps(jsondata, cls=MontyEncoder)}\n            response = self.session.post(\"{}/snl/submit\".format(self.preamble),\n                                         data=payload)\n            if response.status_code in [200, 400]:\n                resp = json.loads(response.text, cls=MontyDecoder)\n                if resp[\"valid_response\"]:\n                    if resp.get(\"warning\"):\n                        warnings.warn(resp[\"warning\"])\n                    return resp['inserted_ids']\n                else:\n                    raise MPRestError(resp[\"error\"])\n\n            raise MPRestError(\"REST error with status code {} and error {}\"\n                              .format(response.status_code, response.text))\n\n        except Exception as ex:\n            raise MPRestError(str(ex))", "docstring": "Submits a list of StructureNL to the Materials Project site.\n\n.. note::\n\nAs of now, this MP REST feature is open only to a select group of\nusers. Opening up submissions to all users is being planned for\nthe future.\n\nArgs:\nsnl (StructureNL/[StructureNL]): A single StructureNL, or a list\nof StructureNL objects\n\nReturns:\nA list of inserted submission ids.\n\nRaises:\nMPRestError", "source": "juraj-google-style"}
{"code": "def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n    known_args, pipeline_args = parse_known_args(argv)\n    pipeline_options = PipelineOptions(pipeline_args)\n    pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n\n    class OnnxNoBatchModelHandler(OnnxModelHandlerNumpy):\n        \n\n        def batch_elements_kwargs(self):\n            return {'max_batch_size': 1}\n    model_handler = OnnxNoBatchModelHandler(model_uri=known_args.model_uri)\n    pipeline = test_pipeline\n    if not test_pipeline:\n        pipeline = beam.Pipeline(options=pipeline_options)\n    tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n    text = pipeline | 'ReadSentences' >> beam.io.ReadFromText(known_args.input)\n    text_and_tokenized_text_tuple = text | 'FilterEmptyLines' >> beam.ParDo(filter_empty_lines) | 'TokenizeSentence' >> beam.Map(lambda x: tokenize_sentence(x, tokenizer))\n    output = text_and_tokenized_text_tuple | 'PyTorchRunInference' >> RunInference(KeyedModelHandler(model_handler)) | 'ProcessOutput' >> beam.ParDo(PostProcessor())\n    _ = output | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)\n    result = pipeline.run()\n    result.wait_until_finish()\n    return result", "docstring": "Args:\nargv: Command line arguments defined for this example.\nsave_main_session: Used for internal testing.\ntest_pipeline: Used for internal testing.", "source": "github-repos"}
{"code": "def vectorize(self, token_list):\n        \n        vector_list = [self.__collection.tf_idf(token, self.__collection) for token in token_list]\n        return vector_list", "docstring": "Tokenize token list.\n\nArgs:\ntoken_list:   The list of tokens..\n\nReturns:\n[vector of token, vector of token, vector of token, ...]", "source": "juraj-google-style"}
{"code": "def xmon_op_from_proto_dict(proto_dict: Dict) -> ops.Operation:\n\n    def raise_missing_fields(gate_name: str):\n        raise ValueError('{} missing required fields: {}'.format(gate_name, proto_dict))\n    param = _parameterized_value_from_proto_dict\n    qubit = devices.GridQubit.from_proto_dict\n    if ('exp_w' in proto_dict):\n        exp_w = proto_dict['exp_w']\n        if (('half_turns' not in exp_w) or ('axis_half_turns' not in exp_w) or ('target' not in exp_w)):\n            raise_missing_fields('ExpW')\n        return ops.PhasedXPowGate(exponent=param(exp_w['half_turns']), phase_exponent=param(exp_w['axis_half_turns'])).on(qubit(exp_w['target']))\n    elif ('exp_z' in proto_dict):\n        exp_z = proto_dict['exp_z']\n        if (('half_turns' not in exp_z) or ('target' not in exp_z)):\n            raise_missing_fields('ExpZ')\n        return (ops.Z(qubit(exp_z['target'])) ** param(exp_z['half_turns']))\n    elif ('exp_11' in proto_dict):\n        exp_11 = proto_dict['exp_11']\n        if (('half_turns' not in exp_11) or ('target1' not in exp_11) or ('target2' not in exp_11)):\n            raise_missing_fields('Exp11')\n        return (ops.CZ(qubit(exp_11['target1']), qubit(exp_11['target2'])) ** param(exp_11['half_turns']))\n    elif ('measurement' in proto_dict):\n        meas = proto_dict['measurement']\n        invert_mask = cast(Tuple[(Any, ...)], ())\n        if ('invert_mask' in meas):\n            invert_mask = tuple((json.loads(x) for x in meas['invert_mask']))\n        if (('key' not in meas) or ('targets' not in meas)):\n            raise_missing_fields('Measurement')\n        return ops.MeasurementGate(num_qubits=len(meas['targets']), key=meas['key'], invert_mask=invert_mask).on(*[qubit(q) for q in meas['targets']])\n    else:\n        raise ValueError('invalid operation: {}'.format(proto_dict))", "docstring": "Convert the proto dictionary to the corresponding operation.\n\nSee protos in api/google/v1 for specification of the protos.\n\nArgs:\nproto_dict: Dictionary representing the proto. Keys are always\nstrings, but values may be types correspond to a raw proto type\nor another dictionary (for messages).\n\nReturns:\nThe operation.\n\nRaises:\nValueError if the dictionary does not contain required values\ncorresponding to the proto.", "source": "codesearchnet"}
{"code": "def get_block_hash(self, height, id=None, endpoint=None):\n        \n        return self._call_endpoint(GET_BLOCK_HASH, params=[height], id=id, endpoint=endpoint)", "docstring": "Get hash of a block by its height\nArgs:\nheight: (int) height of the block to lookup\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\n\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"}
{"code": "def output_file(self, filename, title='Bokeh Plot', mode='cdn', root_dir=None):\n    self._file = {'filename': filename, 'resources': Resources(mode=mode, root_dir=root_dir), 'title': title}\n    if os.path.isfile(filename):\n        log.info((\"Session output file '%s' already exists, will be overwritten.\" % filename))", "docstring": "Configure output to a standalone HTML file.\n\nCalling ``output_file`` not clear the effects of any other calls to\n``output_notebook``, etc. It adds an additional output destination\n(publishing to HTML files). Any other active output modes continue\nto be active.\n\nArgs:\nfilename (str) : a filename for saving the HTML document\n\ntitle (str, optional) : a title for the HTML document\n\nmode (str, optional) : how to include BokehJS (default: ``'cdn'``)\n\nOne of: ``'inline'``, ``'cdn'``, ``'relative(-dev)'`` or\n``'absolute(-dev)'``. See :class:`~bokeh.resources.Resources`\nfor more details.\n\nroot_dir (str, optional) : root dir to use for absolute resources\n(default: None)\n\nThis value is ignored for other resource types, e.g. ``INLINE`` or ``CDN``.\n\n.. warning::\nThe specified output file will be overwritten on every save, e.g.,\nevery time ``show()`` or ``save()`` is called.", "source": "codesearchnet"}
{"code": "def _io_write_test_preprocessor(test_spec: dict, expected: List[str], env: TestEnvironment):\n    if (pipeline := test_spec.get('pipeline', None)):\n        for transform in pipeline.get('transforms', []):\n            if transform.get('type', '').startswith('WriteTo'):\n                transform['type'] = 'LogForTesting'\n                transform['config'] = {k: v for k, v in transform.get('config', {}).items() if k.startswith('__') or k == 'error_handling'}\n    return test_spec", "docstring": "Preprocessor for tests that involve writing to IO.\n\nThis preprocessor replaces any WriteTo transform with a LogForTesting\ntransform. This allows the test to verify the data being written without\nactually writing to an external system.\n\nArgs:\ntest_spec: The dictionary representation of the YAML pipeline specification.\nexpected: A list of strings representing the expected output of the\npipeline.\nenv: The TestEnvironment object providing utilities for creating temporary\nfiles.\n\nReturns:\nThe modified test_spec dictionary with WriteTo transforms replaced.", "source": "github-repos"}
{"code": "def get_all_resource_ids_in_datastore(configuration=None):\n    resource = Resource(configuration=configuration)\n    (success, result) = resource._read_from_hdx('datastore', '_table_metadata', 'resource_id', Resource.actions()['datastore_search'], limit=10000)\n    resource_ids = list()\n    if (not success):\n        logger.debug(result)\n    else:\n        for record in result['records']:\n            resource_ids.append(record['name'])\n    return resource_ids", "docstring": "Get list of resources that have a datastore returning their ids.\n\nArgs:\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nList[str]: List of resource ids that are in the datastore", "source": "codesearchnet"}
{"code": "def _from_base_type(self, value):\n    if (not value):\n        return None\n    try:\n        credentials = client.Credentials.new_from_json(value)\n    except ValueError:\n        credentials = None\n    return credentials", "docstring": "Converts our stored JSON string back to the desired type.\n\nArgs:\nvalue: A value from the datastore to be converted to the\ndesired type.\n\nReturns:\nA deserialized Credentials (or subclass) object, else None if\nthe value can't be parsed.", "source": "codesearchnet"}
{"code": "def clinsig_query(self, query, mongo_query):\n    LOG.debug('clinsig is a query parameter')\n    trusted_revision_level = ['mult', 'single', 'exp', 'guideline']\n    rank = []\n    str_rank = []\n    clnsig_query = {}\n    for item in query['clinsig']:\n        rank.append(int(item))\n        rank.append(CLINSIG_MAP[int(item)])\n        str_rank.append(CLINSIG_MAP[int(item)])\n    if (query.get('clinsig_confident_always_returned') == True):\n        LOG.debug('add CLINSIG filter with trusted_revision_level')\n        clnsig_query = {'clnsig': {'$elemMatch': {'$or': [{'$and': [{'value': {'$in': rank}}, {'revstat': {'$in': trusted_revision_level}}]}, {'$and': [{'value': re.compile('|'.join(str_rank))}, {'revstat': re.compile('|'.join(trusted_revision_level))}]}]}}}\n    else:\n        LOG.debug(('add CLINSIG filter for rank: %s' % ', '.join(str(query['clinsig']))))\n        clnsig_query = {'clnsig': {'$elemMatch': {'$or': [{'value': {'$in': rank}}, {'value': re.compile('|'.join(str_rank))}]}}}\n    return clnsig_query", "docstring": "Add clinsig filter values to the mongo query object\n\nArgs:\nquery(dict): a dictionary of query filters specified by the users\nmongo_query(dict): the query that is going to be submitted to the database\n\nReturns:\nclinsig_query(dict): a dictionary with clinsig key-values", "source": "codesearchnet"}
{"code": "def ValidateToken(token, targets):\n\n    def GetSubjectForError():\n        if (len(targets) == 1):\n            return list(targets)[0]\n        else:\n            return None\n    if (not token):\n        raise access_control.UnauthorizedAccess(('Must give an authorization token for %s' % targets), subject=GetSubjectForError())\n    token.CheckExpiry()\n    if (not token.username):\n        raise access_control.UnauthorizedAccess(('Must specify a username for access to %s.' % targets), subject=GetSubjectForError())\n    return True", "docstring": "Does basic token validation.\n\nArgs:\ntoken: User's credentials as access_control.ACLToken.\ntargets: List of targets that were meant to be accessed by the token. This\nis used for logging purposes only.\n\nReturns:\nTrue if token is valid.\n\nRaises:\naccess_control.UnauthorizedAccess: if token is not valid.\nValueError: if targets list is empty.", "source": "codesearchnet"}
{"code": "def collection(self, *collection_path):\n    if (len(collection_path) == 1):\n        path = collection_path[0].split(_helpers.DOCUMENT_PATH_DELIMITER)\n    else:\n        path = collection_path\n    return CollectionReference(*path, client=self)", "docstring": "Get a reference to a collection.\n\nFor a top-level collection:\n\n.. code-block:: python\n\n>>> client.collection('top')\n\nFor a sub-collection:\n\n.. code-block:: python\n\n>>> client.collection('mydocs/doc/subcol')\n>>> # is the same as\n>>> client.collection('mydocs', 'doc', 'subcol')\n\nSub-collections can be nested deeper in a similar fashion.\n\nArgs:\ncollection_path (Tuple[str, ...]): Can either be\n\n* A single ``/``-delimited path to a collection\n* A tuple of collection path segments\n\nReturns:\n~.firestore_v1beta1.collection.CollectionReference: A reference\nto a collection in the Firestore database.", "source": "codesearchnet"}
{"code": "def __build_cmd_maps(cls):\n    cmd_map_all = {}\n    cmd_map_visible = {}\n    cmd_map_internal = {}\n    for name in dir(cls):\n        obj = getattr(cls, name)\n        if iscommand(obj):\n            for cmd in getcommands(obj):\n                if (cmd in cmd_map_all.keys()):\n                    raise PyShellError(\"The command '{}' already has cmd method '{}', cannot register a second method '{}'.\".format(cmd, cmd_map_all[cmd], obj.__name__))\n                cmd_map_all[cmd] = obj.__name__\n                if isvisiblecommand(obj):\n                    cmd_map_visible[cmd] = obj.__name__\n                if isinternalcommand(obj):\n                    cmd_map_internal[cmd] = obj.__name__\n    return (cmd_map_all, cmd_map_visible, cmd_map_internal)", "docstring": "Build the mapping from command names to method names.\n\nOne command name maps to at most one method.\nMultiple command names can map to the same method.\n\nOnly used by __init__() to initialize self._cmd_map. MUST NOT be used\nelsewhere.\n\nReturns:\nA tuple (cmd_map, hidden_cmd_map, internal_cmd_map).", "source": "codesearchnet"}
{"code": "def get_seed(op_seed):\n    eager = context.executing_eagerly()\n    if eager:\n        global_seed = context.global_seed()\n    else:\n        global_seed = ops.get_default_graph().seed\n    if global_seed is not None:\n        if op_seed is None:\n            if hasattr(ops.get_default_graph(), '_seed_used'):\n                ops.get_default_graph()._seed_used = True\n            if eager:\n                op_seed = context.internal_operation_seed()\n            else:\n                op_seed = _graph_to_seed_dict.setdefault(ops.get_default_graph(), 0)\n                _graph_to_seed_dict[ops.get_default_graph()] += 1\n        seeds = (_truncate_seed(global_seed), _truncate_seed(op_seed))\n    elif op_seed is not None:\n        seeds = (DEFAULT_GRAPH_SEED, _truncate_seed(op_seed))\n    else:\n        seeds = (None, None)\n    if seeds == (None, None) and config.is_op_determinism_enabled():\n        raise RuntimeError('Random ops require a seed to be set when determinism is enabled. Please set a seed before running the op, e.g. by calling tf.random.set_seed(1).')\n    if seeds == (0, 0):\n        return (0, _MAXINT32)\n    return seeds", "docstring": "Returns the local seeds an operation should use given an op-specific seed.\n\nGiven operation-specific seed, `op_seed`, this helper function returns two\nseeds derived from graph-level and op-level seeds. Many random operations\ninternally use the two seeds to allow user to change the seed globally for a\ngraph, or for only specific operations.\n\nFor details on how the graph-level seed interacts with op seeds, see\n`tf.compat.v1.random.set_random_seed`.\n\nArgs:\nop_seed: integer.\n\nReturns:\nA tuple of two integers that should be used for the local seed of this\noperation.", "source": "github-repos"}
{"code": "def take_while(self, predicate):\n    if self.closed():\n        raise ValueError('Attempt to call take_while() on a closed Queryable.')\n    if (not is_callable(predicate)):\n        raise TypeError('take_while() parameter predicate={0} is not callable'.format(repr(predicate)))\n    return self._create(self._generate_take_while_result(predicate))", "docstring": "Returns elements from the start while the predicate is True.\n\nNote: This method uses deferred execution.\n\nArgs:\npredicate: A function returning True or False with which elements\nwill be tested.\n\nReturns:\nA Queryable over the elements from the beginning of the source\nsequence for which predicate is True.\n\nRaises:\nValueError: If the Queryable is closed()\nTypeError: If the predicate is not callable.", "source": "codesearchnet"}
{"code": "def render(self, link_url, image_url, **kwargs):\n    path = ('%s/render' % self.path)\n    data = {'link_url': link_url, 'image_url': image_url}\n    return self.gitlab.http_get(path, data, **kwargs)", "docstring": "Preview link_url and image_url after interpolation.\n\nArgs:\nlink_url (str): URL of the badge link\nimage_url (str): URL of the badge image\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabRenderError: If the rendering failed\n\nReturns:\ndict: The rendering properties", "source": "codesearchnet"}
{"code": "def put_archive(self, path, data):\n    return self.client.api.put_archive(self.id, path, data)", "docstring": "Insert a file or folder in this container using a tar archive as\nsource.\n\nArgs:\npath (str): Path inside the container where the file(s) will be\nextracted. Must exist.\ndata (bytes): tar data to be extracted\n\nReturns:\n(bool): True if the call succeeds.\n\nRaises:\n:py:class:`~docker.errors.APIError` If an error occurs.", "source": "codesearchnet"}
{"code": "def __fetch_route53_zone_records(self, zone_id):\n        \n        route53 = self.session.client('route53')\n\n        done = False\n        nextName = nextType = None\n        records = {}\n\n        try:\n            while not done:\n                if nextName and nextType:\n                    response = route53.list_resource_record_sets(\n                        HostedZoneId=zone_id,\n                        StartRecordName=nextName,\n                        StartRecordType=nextType\n                    )\n                else:\n                    response = route53.list_resource_record_sets(HostedZoneId=zone_id)\n\n                if response['IsTruncated']:\n                    nextName = response['NextRecordName']\n                    nextType = response['NextRecordType']\n                else:\n                    done = True\n\n                if 'ResourceRecordSets' in response:\n                    for record in response['ResourceRecordSets']:\n                        \n                        \n                        \n                        record_id = self._get_resource_hash(zone_id, record)\n                        if 'AliasTarget' in record:\n                            value = record['AliasTarget']['DNSName']\n                            records[record_id] = {\n                                'id': record_id,\n                                'name': record['Name'].rstrip('.'),\n                                'type': 'ALIAS',\n                                'ttl': 0,\n                                'value': [value]\n                            }\n                        else:\n                            value = [y['Value'] for y in record['ResourceRecords']]\n                            records[record_id] = {\n                                'id': record_id,\n                                'name': record['Name'].rstrip('.'),\n                                'type': record['Type'],\n                                'ttl': record['TTL'],\n                                'value': value\n                            }\n\n            return list(records.values())\n        finally:\n            del route53", "docstring": "Return all resource records for a specific Route53 zone\n\nArgs:\nzone_id (`str`): Name / ID of the hosted zone\n\nReturns:\n`dict`", "source": "juraj-google-style"}
{"code": "def __sweeten(self, dumper: 'Dumper', class_: Type, node: Node) -> None:\n        \n        for base_class in class_.__bases__:\n            if base_class in dumper.yaml_representers:\n                logger.debug('Sweetening for class {}'.format(\n                    self.class_.__name__))\n                self.__sweeten(dumper, base_class, node)\n        if hasattr(class_, 'yatiml_sweeten'):\n            class_.yatiml_sweeten(node)", "docstring": "Applies the user's yatiml_sweeten() function(s), if any.\n\nSweetening is done for the base classes first, then for the \\\nderived classes, down the hierarchy to the class we're \\\nconstructing.\n\nArgs:\ndumper: The dumper that is dumping this object.\nclass_: The type of the object to be dumped.\nrepresented_object: The object to be dumped.", "source": "juraj-google-style"}
{"code": "def get_node_list(self) -> list:\n    nodes = []\n    if (not self._manager):\n        raise RuntimeError('Only the Swarm manager node can retrieve all the nodes.')\n    node_list = self._client.nodes.list()\n    for n_list in node_list:\n        nodes.append(n_list.id)\n    return nodes", "docstring": "Get a list of nodes.\n\nOnly the manager nodes can retrieve all the nodes\n\nReturns:\nlist, all the ids of the nodes in swarm", "source": "codesearchnet"}
{"code": "def _create_single_feature_method(feature):\n    \n    \n    fx_name = feature.name.lower()\n    if \"detection\" in fx_name:\n        fx_doc = \"Perform {0}.\".format(fx_name.replace(\"_\", \" \"))\n    else:\n        fx_doc = \"Return {desc} information.\".format(desc=fx_name.replace(\"_\", \" \"))\n\n    \n    \n    fx_doc += \n\n    \n    feature_value = {\"type\": feature}\n\n    \n    def inner(self, image, max_results=None, retry=None, timeout=None, **kwargs):\n        \n        copied_features = feature_value.copy()\n        if max_results is not None:\n            copied_features[\"max_results\"] = max_results\n        request = dict(image=image, features=[copied_features], **kwargs)\n        response = self.annotate_image(request, retry=retry, timeout=timeout)\n        return response\n\n    \n    inner.__name__ = fx_name\n    inner.__doc__ = fx_doc\n\n    \n    return inner", "docstring": "Return a function that will detect a single feature.\n\nArgs:\nfeature (enum): A specific feature defined as a member of\n:class:`~enums.Feature.Type`.\n\nReturns:\nfunction: A helper function to detect just that feature.", "source": "juraj-google-style"}
{"code": "def log_warning(self, msg):\n        \n        if self.__logger:\n            self.__logger.warning(msg)\n\n        if self.__raise_exception_on_warning:\n            raise RuntimeError(msg)", "docstring": "Log a warning if ``logger`` exists.\n\nArgs:\nmsg: Warning to log.\n\nWarning:\nCan raise a ``RuntimeError`` if this was asked in the constructor.", "source": "juraj-google-style"}
{"code": "def save_aggregate_reports_to_kafka(self, aggregate_reports, aggregate_topic):\n    if ((type(aggregate_reports) == dict) or (type(aggregate_reports) == OrderedDict)):\n        aggregate_reports = [aggregate_reports]\n    if (len(aggregate_reports) < 1):\n        return\n    for report in aggregate_reports:\n        report['date_range'] = self.generate_daterange(report)\n        report = self.strip_metadata(report)\n        for slice in report['records']:\n            slice['date_range'] = report['date_range']\n            slice['org_name'] = report['org_name']\n            slice['org_email'] = report['org_email']\n            slice['policy_published'] = report['policy_published']\n            slice['report_id'] = report['report_id']\n            logger.debug('Sending slice.')\n            try:\n                logger.debug('Saving aggregate report to Kafka')\n                self.producer.send(aggregate_topic, slice)\n            except UnknownTopicOrPartitionError:\n                raise KafkaError('Kafka error: Unknown topic or partition on broker')\n            except Exception as e:\n                raise KafkaError('Kafka error: {0}'.format(e.__str__()))\n            try:\n                self.producer.flush()\n            except Exception as e:\n                raise KafkaError('Kafka error: {0}'.format(e.__str__()))", "docstring": "Saves aggregate DMARC reports to Kafka\n\nArgs:\naggregate_reports (list):  A list of aggregate report dictionaries\nto save to Kafka\naggregate_topic (str): The name of the Kafka topic", "source": "codesearchnet"}
{"code": "def weCanCheckTheseDomains(email):\n    notWorking = ['@aol.com', '@bk.ru', '@breakthru.com', '@gmx.', '@hotmail.co', '@inbox.com', '@latinmail.com', '@libero.it', '@mail.ru', '@mail2tor.com', '@outlook.com', '@rambler.ru', '@rocketmail.com', '@starmedia.com', '@ukr.net@yahoo.', '@ymail.']\n    for n in notWorking:\n        if (n in email):\n            print(\"\\t[*] Verification of '{}' aborted. Details:\\n\\t\\t{}\".format(general.warning(email), 'This domain CANNOT be verified using mailfy.'))\n            return False\n    emailDomains = EMAIL_DOMAINS\n    safe = False\n    for e in EMAIL_DOMAINS:\n        if (e in email):\n            safe = True\n    if (not safe):\n        print(\"\\t[*] Verification of '{}' aborted. Details:\\n\\t\\t{}\".format(general.warning(email), 'This domain CANNOT be verified using mailfy.'))\n        return False\n    return True", "docstring": "Method that verifies if a domain can be safely verified.\n\nArgs:\n-----\nemail: the email whose domain will be verified.\n\nReturns:\n--------\nbool: it represents whether the domain can be verified.", "source": "codesearchnet"}
{"code": "def new_cells(self, name=None, formula=None):\n    return self._impl.new_cells(name, formula).interface", "docstring": "Create a cells in the space.\n\nArgs:\nname: If omitted, the model is named automatically ``CellsN``,\nwhere ``N`` is an available number.\nfunc: The function to define the formula of the cells.\n\nReturns:\nThe new cells.", "source": "codesearchnet"}
{"code": "def get_attr_info(binary_view):\n    global _ATTR_BASIC\n    (attr_type, attr_len, non_resident) = _ATTR_BASIC.unpack(binary_view[:9])\n    return (AttrTypes(attr_type), attr_len, bool(non_resident))", "docstring": "Gets basic information from a binary stream to allow correct processing of\nthe attribute header.\n\nThis function allows the interpretation of the Attribute type, attribute length\nand if the attribute is non resident.\n\nArgs:\nbinary_view (memoryview of bytearray) - A binary stream with the\ninformation of the attribute\n\nReturns:\nAn tuple with the attribute type, the attribute length, in bytes, and\nif the attribute is resident or not.", "source": "codesearchnet"}
{"code": "def get_namespaces(start=None, end=None):\n  \n  q = Namespace.query()\n  if start is not None:\n    q = q.filter(Namespace.key >= Namespace.key_for_namespace(start))\n  if end is not None:\n    q = q.filter(Namespace.key < Namespace.key_for_namespace(end))\n  return [x.namespace_name for x in q]", "docstring": "Return all namespaces in the specified range.\n\nArgs:\nstart: only return namespaces >= start if start is not None.\nend: only return namespaces < end if end is not None.\n\nReturns:\nA list of namespace names between the (optional) start and end values.", "source": "juraj-google-style"}
{"code": "def create_submission(self, user_id, institute_id):\n        \n\n        submission_obj = {\n            'status' : 'open',\n            'created_at' : datetime.now(),\n            'user_id' : user_id,\n            'institute_id' : institute_id\n        }\n        LOG.info(\"Creating a new clinvar submission for user '%s' and institute %s\", user_id, institute_id)\n        result = self.clinvar_submission_collection.insert_one(submission_obj)\n        return result.inserted_id", "docstring": "Create an open clinvar submission for a user and an institute\nArgs:\nuser_id(str): a user ID\ninstitute_id(str): an institute ID\n\nreturns:\nsubmission(obj): an open clinvar submission object", "source": "juraj-google-style"}
{"code": "def expand_source_files(filenames, cwd=None):\n    out = []\n    for f in expand_paths(filenames, cwd):\n        if os.path.isdir(f):\n            out += collect_files(f, '.py')\n        elif f.endswith('.py'):\n            out.append(f)\n    return sorted(set(out))", "docstring": "Expand a list of filenames passed in as sources.\n\nThis is a helper function for handling command line arguments that specify a\nlist of source files and directories.\n\nAny directories in filenames will be scanned recursively for .py files.\nAny files that do not end with \".py\" will be dropped.\n\nArgs:\nfilenames: A list of filenames to process.\ncwd: An optional working directory to expand relative paths\nReturns:\nA list of sorted full paths to .py files", "source": "codesearchnet"}
{"code": "def format_page(text):\n    \n    width = max(map(len, text.splitlines()))\n    page = \"+-\" + \"-\" * width + \"-+\\n\"\n    for line in text.splitlines():\n        page += \"| \" + line.ljust(width) + \" |\\n\"\n    page += \"+-\" + \"-\" * width + \"-+\\n\"\n    return page", "docstring": "Format the text for output adding ASCII frame around the text.\n\nArgs:\ntext (str): Text that needs to be formatted.\n\nReturns:\nstr: Formatted string.", "source": "juraj-google-style"}
{"code": "def assert_title(self, title, **kwargs):\n    query = TitleQuery(title, **kwargs)\n\n    @self.synchronize(wait=query.wait)\n    def assert_title():\n        if (not query.resolves_for(self)):\n            raise ExpectationNotMet(query.failure_message)\n        return True\n    return assert_title()", "docstring": "Asserts that the page has the given title.\n\nArgs:\ntitle (str | RegexObject): The string or regex that the title should match.\n**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.\n\nReturns:\nTrue\n\nRaises:\nExpectationNotMet: If the assertion hasn't succeeded during the wait time.", "source": "codesearchnet"}
{"code": "def _GetResolverHelper(cls, type_indicator):\n    \n    if not cls._resolver_helpers_manager:\n      \n      \n      from dfvfs.resolver_helpers import manager\n\n      cls._resolver_helpers_manager = manager.ResolverHelperManager\n\n    return cls._resolver_helpers_manager.GetHelper(type_indicator)", "docstring": "Retrieves the path specification resolver helper for the specified type.\n\nArgs:\ntype_indicator (str): type indicator.\n\nReturns:\nResolverHelper: a resolver helper.", "source": "juraj-google-style"}
{"code": "def add_case(self, case, update=False):\n        \n        existing_case = self.case(case)\n        if existing_case and not update:\n            raise CaseError(\"Case {} already exists\".format(case['case_id']))\n        if existing_case:\n            self.db.case.find_one_and_replace(\n                {'case_id': case['case_id']},\n                case,\n            )\n        else:\n            self.db.case.insert_one(case)\n\n        return case", "docstring": "Add a case to the case collection\n\nIf the case exists and update is False raise error.\n\nArgs:\ndb (MongoClient): A connection to the mongodb\ncase (dict): A case dictionary\nupdate(bool): If existing case should be updated\n\nReturns:\nmongo_case_id(ObjectId)", "source": "juraj-google-style"}
{"code": "def register_repeating_metric(self, metric_name, frequency, getter):\n        \n        l = task.LoopingCall(self._publish_repeating_metric, metric_name, getter)\n        repeating_metric_handle = RepeatingMetricHandle(l, frequency)\n        self._repeating_metric_handles.append(repeating_metric_handle)\n        if self.running:\n            repeating_metric_handle.start()\n        return repeating_metric_handle", "docstring": "Record hits to a metric at a specified interval.\n\nArgs:\nmetric_name: The name of the metric to record with Carbon.\nfrequency: The frequency with which to poll the getter and record the value with Carbon.\ngetter: A function which takes no arguments and returns the value to record with Carbon.\n\nReturns:\nRepeatingMetricHandle instance. Call .stop() on it to stop recording the metric.", "source": "juraj-google-style"}
{"code": "def count_true_positive(truth, recommend):\n    tp = 0\n    for r in recommend:\n        if (r in truth):\n            tp += 1\n    return tp", "docstring": "Count number of true positives from given sets of samples.\n\nArgs:\ntruth (numpy 1d array): Set of truth samples.\nrecommend (numpy 1d array): Ordered set of recommended samples.\n\nReturns:\nint: Number of true positives.", "source": "codesearchnet"}
{"code": "def __init__(self, caller: Caller[RequestT, ResponseT], timeout: Optional[float]=DEFAULT_TIMEOUT_SECS, should_backoff: Optional[ShouldBackOff]=None, repeater: Repeater=ExponentialBackOffRepeater(), cache: Optional[Cache]=None, throttler: PreCallThrottler=DefaultThrottler()):\n    self._caller = caller\n    self._timeout = timeout\n    self._should_backoff = should_backoff\n    if repeater:\n        self._repeater = repeater\n    else:\n        self._repeater = NoOpsRepeater()\n    self._cache = cache\n    self._throttler = throttler\n    self._batching_kwargs = self._caller.batch_elements_kwargs()", "docstring": "Instantiates a RequestResponseIO transform.\n\nArgs:\ncaller: an implementation of\n`Caller` object that makes call to the API.\ntimeout (float): timeout value in seconds to wait for response from API.\nshould_backoff: (Optional) provides methods for backoff.\nrepeater: provides method to repeat failed requests to API due to service\nerrors. Defaults to\n:class:`apache_beam.io.requestresponse.ExponentialBackOffRepeater` to\nrepeat requests with exponential backoff.\ncache: (Optional) a `~apache_beam.io.requestresponse.Cache` object\nto use the appropriate cache.\nthrottler: provides methods to pre-throttle a request. Defaults to\n:class:`apache_beam.io.requestresponse.DefaultThrottler` for\nclient-side adaptive throttling using\n:class:`apache_beam.io.components.adaptive_throttler.AdaptiveThrottler`", "source": "github-repos"}
{"code": "def _call_with_flat_signature(self, args, kwargs):\n    if len(args) > self._num_positional_args:\n        raise TypeError(f'{self._flat_signature_summary()} takes {self._num_positional_args} positional arguments, got {len(args)}.')\n    args = list(args)\n    kwargs = dict(kwargs)\n    kwargs = {function_type_lib.sanitize_arg_name(k): v for k, v in kwargs.items()}\n    for keyword in self._arg_keywords[len(args):]:\n        try:\n            args.append(kwargs.pop(function_type_lib.sanitize_arg_name(compat.as_str(keyword))))\n        except KeyError:\n            specified_keywords = list(self._arg_keywords[:len(args)]) + list(kwargs.keys())\n            missing_required_args = sorted(set(self._arg_keywords) - set(specified_keywords))\n            raise TypeError(f'{self._flat_signature_summary()} missing required arguments: {', '.join(missing_required_args)}.')\n    if kwargs:\n        positional_arg_keywords = set(self._arg_keywords[:len(args)])\n        for unused_key in kwargs:\n            if unused_key in positional_arg_keywords:\n                raise TypeError(f\"{self._flat_signature_summary()} got two values for '{unused_key}'.\")\n        raise TypeError(f'{self._flat_signature_summary()} got unexpected keyword arguments: {', '.join(sorted(kwargs))}.')\n    for i, arg in enumerate(args):\n        if not isinstance(arg, (tensor_lib.Tensor, resource_variable_ops.BaseResourceVariable)):\n            raise TypeError(f'{self._flat_signature_summary()}: expected argument \n    return self._call_flat(args, self.captured_inputs)", "docstring": "Executes the wrapped function with the flat signature.\n\nArgs:\nargs: Positional arguments to the concrete function.\nkwargs: Keyword arguments to the concrete function.\n\nReturns:\nThe result of applying the function on the Tensors/Variables contained in\n`args` and `kwargs`.\nRaises:\nTypeError: if `args` and `kwargs` do not match the flat signature of this\n`ConcreteFunction`.", "source": "github-repos"}
{"code": "def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides, padding):\n    x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)\n    x2 = np.random.rand(*filter_in_sizes).astype(np.float32)\n\n    def _setup_val(data_format, use_gpu):\n        with test_util.device(use_gpu):\n            t1 = constant_op.constant(x1, shape=tensor_in_sizes)\n            t2 = constant_op.constant(x2, shape=filter_in_sizes)\n            strides = [1] + conv_strides + [1]\n            if data_format == 'NCHW':\n                t1 = test_util.NHWCToNCHW(t1)\n                strides = test_util.NHWCToNCHW(strides)\n            conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding, data_format=data_format)\n            if data_format == 'NCHW':\n                conv = test_util.NCHWToNHWC(conv)\n            return conv\n    tensors = []\n    for data_format, use_gpu in get_test_configs():\n        tensors.append(_setup_val(data_format, use_gpu))\n    values = self.evaluate(tensors)\n    for i in range(1, len(values)):\n        self.assertAllClose(values[0], values[i], rtol=0.001, atol=0.001)", "docstring": "Verifies that CPU and GPU produce the same values.\n\nArgs:\ntensor_in_sizes: Input tensor dimensions in [batch, input_rows,\ninput_cols, input_depth].\nfilter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,\ninput_depth, output_depth].\nconv_strides: [row_stride, col_stride] for the convolution;\npadding: Padding type.", "source": "github-repos"}
{"code": "def _ScanVolumeSystemRootNode(self, scan_context, scan_node, auto_recurse=True):\n    if (scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW):\n        path_spec = self.ScanForFileSystem(scan_node.path_spec.parent)\n        if path_spec:\n            scan_context.AddScanNode(path_spec, scan_node.parent_node)\n    file_entry = resolver.Resolver.OpenFileEntry(scan_node.path_spec, resolver_context=self._resolver_context)\n    for sub_file_entry in file_entry.sub_file_entries:\n        sub_scan_node = scan_context.AddScanNode(sub_file_entry.path_spec, scan_node)\n        if (scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW):\n            continue\n        if (auto_recurse or (not scan_context.updated)):\n            self._ScanNode(scan_context, sub_scan_node, auto_recurse=auto_recurse)", "docstring": "Scans a volume system root node for supported formats.\n\nArgs:\nscan_context (SourceScannerContext): source scanner context.\nscan_node (SourceScanNode): source scan node.\nauto_recurse (Optional[bool]): True if the scan should automatically\nrecurse as far as possible.\n\nRaises:\nValueError: if the scan context or scan node is invalid.", "source": "codesearchnet"}
{"code": "def concatenate(xs, axis=0):\n    if any_symbolic_tensors(xs):\n        return Concatenate(axis=axis).symbolic_call(xs)\n    return backend.numpy.concatenate(xs, axis=axis)", "docstring": "Join a sequence of tensors along an existing axis.\n\nArgs:\nxs: The sequence of tensors to concatenate.\naxis: The axis along which the tensors will be joined. Defaults to `0`.\n\nReturns:\nThe concatenated tensor.", "source": "github-repos"}
{"code": "def impersonate(self, user, enterprise):\n        \n\n        if not user or not enterprise:\n            raise ValueError('You must set a user name and an enterprise name to begin impersonification')\n\n        self._is_impersonating = True\n        self._impersonation = \"%s@%s\" % (user, enterprise)", "docstring": "Impersonate a user in a enterprise\n\nArgs:\nuser: the name of the user to impersonate\nenterprise: the name of the enterprise where to use impersonation", "source": "juraj-google-style"}
{"code": "def run(self, inputs=None, warmup_iterations: int=10, benchmark_iterations: int=100, enable_gpu: bool=True) -> TestResult:", "docstring": "Runs the model with provided or randomly generated input tensors.\n\nArgs:\ninputs: Mapping from names to input ndarrays in TF1, or a sequence of\ntensors in TF2. If `None`, ramdomly generated inputs will be used\ninstead.\nwarmup_iterations: Number of inferences to warm up the runtime.\nbenchmark_iterations: Number of inferences to measure the latency.\nenable_gpu: Whether it is allowed to use GPU or not.\n\nReturns:\n`TestResult` summarizing latency and numerics information.", "source": "github-repos"}
{"code": "def _HandleMetadataUpdate(self, metadata_key='', recursive=True, wait=True, timeout=None, retry=True):\n    exception = None\n    while True:\n        try:\n            return self._GetMetadataUpdate(metadata_key=metadata_key, recursive=recursive, wait=wait, timeout=timeout)\n        except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:\n            if (not isinstance(e, type(exception))):\n                exception = e\n                self.logger.error('GET request error retrieving metadata. %s.', e)\n            if retry:\n                continue\n            else:\n                break", "docstring": "Wait for a successful metadata response.\n\nArgs:\nmetadata_key: string, the metadata key to watch for changes.\nrecursive: bool, True if we should recursively watch for metadata changes.\nwait: bool, True if we should wait for a metadata change.\ntimeout: int, timeout in seconds for returning metadata output.\nretry: bool, True if we should retry on failure.\n\nReturns:\njson, the deserialized contents of the metadata server.", "source": "codesearchnet"}
{"code": "def is_dir(self, follow_symlinks=True):\n        \n        try:\n            return (self._system.isdir(\n                path=self._path, client_kwargs=self._client_kwargs,\n                virtual_dir=False) or\n\n                \n                \n                bool(S_ISDIR(self.stat().st_mode)))\n\n        except ObjectPermissionError:\n            \n            \n            return True", "docstring": "Return True if this entry is a directory or a symbolic link pointing to\na directory; return False if the entry is or points to any other kind\nof file, or if it doesn’t exist anymore.\n\nThe result is cached on the os.DirEntry object.\n\nArgs:\nfollow_symlinks (bool): Follow symlinks.\nNot supported on cloud storage objects.\n\nReturns:\nbool: True if directory exists.", "source": "juraj-google-style"}
{"code": "def find_elements_by_class(self, class_, update=False) -> Elements:\n        \n        return self.find_elements(by=By.CLASS, value=class_, update=update)", "docstring": "Finds multiple elements by class.\n\nArgs:\nclass_: The class of the elements to be found.\nupdate: If the interface has changed, this option should be True.\n\nReturns:\nA list with elements if any was found. An empty list if not.\n\nRaises:\nNoSuchElementException - If the element wasn't found.\n\nUsage:\nelements = driver.find_elements_by_class('foo')", "source": "juraj-google-style"}
{"code": "def prob(self, value, name='prob'):\n    return self._call_prob(value, name)", "docstring": "Probability density/mass function.\n\nArgs:\nvalue: `float` or `double` `Tensor`.\nname: Python `str` prepended to names of ops created by this function.\n\nReturns:\nprob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with\nvalues of type `self.dtype`.", "source": "github-repos"}
{"code": "def SetServerInformation(self, server, port):\n    self._host = server\n    self._port = port", "docstring": "Sets the server information.\n\nArgs:\nserver (str): hostname or IP address of the database server.\nport (int): port number of the database server.", "source": "codesearchnet"}
{"code": "def has_unitary(val: Any) -> bool:\n    from cirq.protocols.decompose import decompose_once, decompose_once_with_qubits\n    from cirq import Gate, Operation, LineQubit\n    getter = getattr(val, '_has_unitary_', None)\n    result = (NotImplemented if (getter is None) else getter())\n    if (result is not NotImplemented):\n        return result\n    unitary_getter = getattr(val, '_unitary_', None)\n    if ((unitary_getter is not None) and (unitary_getter() is not NotImplemented)):\n        return True\n    if isinstance(val, Gate):\n        decomposed_val = decompose_once_with_qubits(val, LineQubit.range(val.num_qubits()), default=None)\n        if (decomposed_val is not None):\n            return all((has_unitary(v) for v in decomposed_val))\n    elif isinstance(val, Operation):\n        decomposed_val = decompose_once(val, default=None)\n        if (decomposed_val is not None):\n            return all((has_unitary(v) for v in decomposed_val))\n    return (unitary(val, None) is not None)", "docstring": "Returns whether the value has a unitary matrix representation.\n\nReturns:\nIf `val` has a _has_unitary_ method and its result is not\nNotImplemented, that result is returned. Otherwise, if `val` is a\ncirq.Gate or cirq.Operation, a decomposition is attempted and the\nresulting unitary is returned if has_unitary is True for all operations\nof the decompostion. Otherwise, if the value has a _unitary_ method\nreturn if that has a non-default value. Returns False if neither\nfunction exists.", "source": "codesearchnet"}
{"code": "def __ge__(self, other):\n        \n        if self.index_type is not None:\n            expr = grizzly_impl.get_field(self.expr, 1)\n        else:\n            expr = self.expr\n        return SeriesWeld(\n            grizzly_impl.compare(\n                expr,\n                other,\n                \">=\",\n                self.weld_type\n            ),\n            WeldBit(),\n            self.df,\n            self.column_name\n        )", "docstring": "Summary\n\nArgs:\nother (TYPE): Description\n\nReturns:\nTYPE: Description", "source": "juraj-google-style"}
{"code": "def __init__(self, window_fn, main_receivers, tagged_receivers, per_element_output_counter, output_batch_converter, process_yields_batches, process_batch_yields_elements):\n    self.window_fn = window_fn\n    self.main_receivers = main_receivers\n    self.tagged_receivers = tagged_receivers\n    if per_element_output_counter is not None and per_element_output_counter.is_cythonized:\n        self.per_element_output_counter = per_element_output_counter\n    else:\n        self.per_element_output_counter = None\n    self.output_batch_converter = output_batch_converter\n    self._process_yields_batches = process_yields_batches\n    self._process_batch_yields_elements = process_batch_yields_elements", "docstring": "Initializes ``_OutputHandler``.\n\nArgs:\nwindow_fn: a windowing function (WindowFn).\nmain_receivers: a dict of tag name to Receiver objects.\ntagged_receivers: main receiver object.\nper_element_output_counter: per_element_output_counter of one work_item.\ncould be none if experimental flag turn off", "source": "github-repos"}
{"code": "def normpath(self, path):\n    path = self.normcase(path)\n    (drive, path) = self.splitdrive(path)\n    sep = self._path_separator(path)\n    is_absolute_path = path.startswith(sep)\n    path_components = path.split(sep)\n    collapsed_path_components = []\n    dot = self._matching_string(path, '.')\n    dotdot = self._matching_string(path, '..')\n    for component in path_components:\n        if ((not component) or (component == dot)):\n            continue\n        if (component == dotdot):\n            if (collapsed_path_components and (collapsed_path_components[(- 1)] != dotdot)):\n                collapsed_path_components.pop()\n                continue\n            elif is_absolute_path:\n                continue\n        collapsed_path_components.append(component)\n    collapsed_path = sep.join(collapsed_path_components)\n    if is_absolute_path:\n        collapsed_path = (sep + collapsed_path)\n    return ((drive + collapsed_path) or dot)", "docstring": "Mimic os.path.normpath using the specified path_separator.\n\nMimics os.path.normpath using the path_separator that was specified\nfor this FakeFilesystem. Normalizes the path, but unlike the method\nabsnormpath, does not make it absolute.  Eliminates dot components\n(. and ..) and combines repeated path separators (//).  Initial ..\ncomponents are left in place for relative paths.\nIf the result is an empty path, '.' is returned instead.\n\nThis also replaces alternative path separator with path separator.\nThat is, it behaves like the real os.path.normpath on Windows if\ninitialized with '\\\\' as path separator and  '/' as alternative\nseparator.\n\nArgs:\npath:  (str) The path to normalize.\n\nReturns:\n(str) A copy of path with empty components and dot components\nremoved.", "source": "codesearchnet"}
{"code": "def load_from_file(filepath, format_=FileFormat.py, update_data_callback=None,\n                   disable_memcache=False):\n    \n    filepath = os.path.realpath(filepath)\n    cache_filepath = file_cache.get(filepath)\n\n    if cache_filepath:\n        \n        \n        return _load_file(filepath=cache_filepath,\n                          format_=format_,\n                          update_data_callback=update_data_callback,\n                          original_filepath=filepath)\n    elif disable_memcache:\n        return _load_file(filepath=filepath,\n                          format_=format_,\n                          update_data_callback=update_data_callback)\n    else:\n        return _load_from_file(filepath=filepath,\n                               format_=format_,\n                               update_data_callback=update_data_callback)", "docstring": "Load data from a file.\n\nNote:\nAny functions from a .py file will be converted to `SourceCode` objects.\n\nArgs:\nfilepath (str): File to load.\nformat_ (`FileFormat`): Format of file contents.\nupdate_data_callback (callable): Used to change data before it is\nreturned or cached.\ndisable_memcache (bool): If True, don't r/w to memcache.\n\nReturns:\ndict.", "source": "juraj-google-style"}
{"code": "def package_in_memory(cls, workflow_name, workflow_files):\n    s = StringIO()\n    p = cls(s, workflow_name, meta_data=[])\n    p.add_bpmn_files_by_glob(workflow_files)\n    p.create_package()\n    return s.getvalue()", "docstring": "Generates wf packages from workflow diagrams.\n\nArgs:\nworkflow_name: Name of wf\nworkflow_files:  Diagram  file.\n\nReturns:\nWorkflow package (file like) object", "source": "codesearchnet"}
{"code": "def find_call(self, path, method):\n        \n        if not path.endswith('/'):\n            path += '/'\n        path = path.split('/')[1:]\n        return self._recursive_route_match(self._routes, path, method, [])", "docstring": "Find callable for the specified URL path and HTTP method.\n\nArgs:\npath (:obj:`str`): URL path to match\nmethod (:obj:`str`): HTTP method\n\nNote:\nA trailing '/' is always assumed in the path.", "source": "juraj-google-style"}
{"code": "def get_path_from_query_string(req):\n    if (req.args.get('path') is None):\n        raise exceptions.UserError('Path not found in query string')\n    return req.args.get('path')", "docstring": "Gets path from query string\n\nArgs:\nreq (flask.request): Request object from Flask\n\nReturns:\npath (str): Value of \"path\" parameter from query string\n\nRaises:\nexceptions.UserError: If \"path\" is not found in query string", "source": "codesearchnet"}
{"code": "def _shapes(tensor_list_list, shapes, enqueue_many):\n    if shapes is None:\n        len0 = len(tensor_list_list[0])\n        for tl in tensor_list_list:\n            for i in range(len0):\n                if tl[i].shape.ndims is None:\n                    raise ValueError(\"Cannot infer Tensor's rank: %s\" % tl[i])\n        shapes = [_merge_shapes([tl[i].shape.as_list() for tl in tensor_list_list], enqueue_many) for i in range(len0)]\n    return shapes", "docstring": "Calculate and merge the shapes of incoming tensors.\n\nArgs:\ntensor_list_list: List of tensor lists.\nshapes: List of shape tuples corresponding to tensors within the lists.\nenqueue_many: Boolean describing whether shapes will be enqueued as\nbatches or individual entries.\n\nReturns:\nA list of shapes aggregating shape inference info from `tensor_list_list`,\nor returning `shapes` if it is not `None`.\n\nRaises:\nValueError: If any of the inferred shapes in `tensor_list_list` lack a\nwell defined rank.", "source": "github-repos"}
{"code": "def _get_access_from_refresh(self) -> Tuple[str, float]:\n        \n        headers = self._get_authorization_headers()\n        data = {\n            'grant_type': 'refresh_token',\n            'refresh_token': self.refresh_token\n        }\n        r = self.session.post(self.TOKEN_URL, headers=headers, data=data)\n        response_data = r.json()\n        return (response_data['access_token'], response_data['expires_in'])", "docstring": "Uses the stored refresh token to get a new access token.\n\nThis method assumes that the refresh token exists.\n\nArgs:\nNone\n\nReturns:\nnew access token and expiration time (from now)", "source": "juraj-google-style"}
{"code": "def __init__(self, message, exc=None):\n    \n    super(WorkerError, self).__init__()\n    self.msg = message\n    self.exc = exc", "docstring": "Initializes WorkerError.\n\nArgs:\nmessage: error message\nexc: optional underlying exception.", "source": "juraj-google-style"}
{"code": "def _emit_with_loc(self, op_str, node=None):\n    loc = ''\n    if node:\n        loc = self._create_mlir_loc(anno.getanno(node, anno.Basic.ORIGIN, default=None))\n    self.emit(op_str + ' ' + loc)", "docstring": "Emit the mlir operation with the location associated with the node.\n\nArgs:\nop_str: The mlir operation string to be emitted.\nnode: The node of the AST tree, the mlir operation translated from.", "source": "github-repos"}
{"code": "def balance(self, as_of=None, raw=False, leg_query=None, **kwargs):\n    balances = [account.simple_balance(as_of=as_of, raw=raw, leg_query=leg_query, **kwargs) for account in self.get_descendants(include_self=True)]\n    return sum(balances, Balance())", "docstring": "Get the balance for this account, including child accounts\n\nArgs:\nas_of (Date): Only include transactions on or before this date\nraw (bool): If true the returned balance should not have its sign\nadjusted for display purposes.\nkwargs (dict): Will be used to filter the transaction legs\n\nReturns:\nBalance\n\nSee Also:\n:meth:`simple_balance()`", "source": "codesearchnet"}
{"code": "def snyder_ac(self, structure):\n        \n        nsites = structure.num_sites\n        volume = structure.volume\n        natoms = structure.composition.num_atoms\n        num_density = 1e30 * nsites / volume\n        tot_mass = sum([e.atomic_mass for e in structure.species])\n        avg_mass = 1.6605e-27 * tot_mass / natoms\n        return 0.38483*avg_mass * \\\n            ((self.long_v(structure) + 2.*self.trans_v(structure))/3.) ** 3.\\\n            / (300.*num_density ** (-2./3.) * nsites ** (1./3.))", "docstring": "Calculates Snyder's acoustic sound velocity (in SI units)\n\nArgs:\nstructure: pymatgen structure object\n\nReturns: Snyder's acoustic sound velocity (in SI units)", "source": "juraj-google-style"}
{"code": "def error_messages(self, driver_id=None):\n    if (driver_id is not None):\n        assert isinstance(driver_id, ray.DriverID)\n        return self._error_messages(driver_id)\n    error_table_keys = self.redis_client.keys((ray.gcs_utils.TablePrefix_ERROR_INFO_string + '*'))\n    driver_ids = [key[len(ray.gcs_utils.TablePrefix_ERROR_INFO_string):] for key in error_table_keys]\n    return {binary_to_hex(driver_id): self._error_messages(ray.DriverID(driver_id)) for driver_id in driver_ids}", "docstring": "Get the error messages for all drivers or a specific driver.\n\nArgs:\ndriver_id: The specific driver to get the errors for. If this is\nNone, then this method retrieves the errors for all drivers.\n\nReturns:\nA dictionary mapping driver ID to a list of the error messages for\nthat driver.", "source": "codesearchnet"}
{"code": "def ms_to_times(ms):\n    \n    ms = int(round(ms))\n    h, ms = divmod(ms, 3600000)\n    m, ms = divmod(ms, 60000)\n    s, ms = divmod(ms, 1000)\n    return Times(h, m, s, ms)", "docstring": "Convert milliseconds to normalized tuple (h, m, s, ms).\n\nArguments:\nms: Number of milliseconds (may be int, float or other numeric class).\nShould be non-negative.\n\nReturns:\nNamed tuple (h, m, s, ms) of ints.\nInvariants: ``ms in range(1000) and s in range(60) and m in range(60)``", "source": "juraj-google-style"}
{"code": "def json(self, ondemand=False):\n    self._request_entity = 'indicator'\n    self._request_uri = '{}/{}'.format(self._api_uri, 'json')\n    self._stream = True\n    if ondemand:\n        self._request.add_payload('runNow', True)", "docstring": "Update request URI to return JSON data.\n\nFor onDemand bulk generation to work it must first be enabled in the\nThreatConnect platform under System settings.\n\nArgs:\nondemand (boolean): Enable on demand bulk generation.", "source": "codesearchnet"}
{"code": "def add(self, obj):\n        \n        if not isinstance(obj, dict):\n            raise TypeError(\"Add object should be a dict object\")\n        obj = self.validation(obj)\n        obj[\"id\"] = self.maxId + 1\n        obj = self._cast_model(obj)\n        self.model.db.append(obj)\n\n        if not self._batch.enable.is_set():\n            self.model.save_db()\n        return obj", "docstring": "Add a object\nArgs:\nObject: Object will be added\nReturns:\nObject: Object with id\nRaises:\nTypeError: If add object is not a dict\nMultipleInvalid: If input object is invaild", "source": "juraj-google-style"}
{"code": "def infer_from_frame_stack(self, ob_stack):\n    \n    logits, vf = self.sess.run([self.logits_t, self.value_function_t],\n                               feed_dict={self.obs_t: ob_stack})\n    return logits, vf", "docstring": "Infer policy from stack of observations.\n\nArgs:\nob_stack: array of shape (1, frame_stack_size, height, width, channels)\n\nReturns:\nlogits and vf.", "source": "juraj-google-style"}
{"code": "def get_batch_strategy_instance(strategy, splitter):\n    \n    if strategy == 'SingleRecord':\n        return SingleRecordStrategy(splitter)\n    elif strategy == 'MultiRecord':\n        return MultiRecordStrategy(splitter)\n    else:\n        raise ValueError('Invalid Batch Strategy: %s - Valid Strategies: \"SingleRecord\", \"MultiRecord\"')", "docstring": "Return an Instance of :class:`sagemaker.local.data.BatchStrategy` according to `strategy`\n\nArgs:\nstrategy (str): Either 'SingleRecord' or 'MultiRecord'\nsplitter (:class:`sagemaker.local.data.Splitter): splitter to get the data from.\n\nReturns\n:class:`sagemaker.local.data.BatchStrategy`: an Instance of a BatchStrategy", "source": "juraj-google-style"}
{"code": "def get_meshes_fld(step, var):\n    \n    fld = step.fields[var]\n    if step.geom.twod_xz:\n        xmesh, ymesh = step.geom.x_mesh[:, 0, :], step.geom.z_mesh[:, 0, :]\n        fld = fld[:, 0, :, 0]\n    elif step.geom.cartesian and step.geom.twod_yz:\n        xmesh, ymesh = step.geom.y_mesh[0, :, :], step.geom.z_mesh[0, :, :]\n        fld = fld[0, :, :, 0]\n    else:  \n        xmesh, ymesh = step.geom.x_mesh[0, :, :], step.geom.y_mesh[0, :, :]\n        fld = fld[0, :, :, 0]\n    return xmesh, ymesh, fld", "docstring": "Return scalar field along with coordinates meshes.\n\nOnly works properly in 2D geometry.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nvar (str): scalar field name.\nReturns:\ntuple of :class:`numpy.array`: xmesh, ymesh, fld\n2D arrays containing respectively the x position, y position, and\nthe value of the requested field.", "source": "juraj-google-style"}
{"code": "def __del__(self):\n        \n        if self._initialized:\n            if self.connected():\n                if self.swo_enabled():\n                    self.swo_stop()\n\n            if self.opened():\n                self.close()", "docstring": "Destructor for the ``JLink`` instance.  Closes the J-Link connection\nif one exists.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def __init__(self, datastore_client, storage_client, round_name):\n    \n    self._datastore_client = datastore_client\n    self._storage_client = storage_client\n    self._round_name = round_name\n    \n    \n    \n    self._attacks = None\n    self._targeted_attacks = None\n    self._defenses = None", "docstring": "Initializes CompetitionSubmissions.\n\nArgs:\ndatastore_client: instance of CompetitionDatastoreClient\nstorage_client: instance of CompetitionStorageClient\nround_name: name of the round", "source": "juraj-google-style"}
{"code": "def InternalSendApdu(self, apdu_to_send):\n    response = None\n    if (not self.use_legacy_format):\n        response = apdu.ResponseApdu(self.transport.SendMsgBytes(apdu_to_send.ToByteArray()))\n        if ((response.sw1 == 103) and (response.sw2 == 0)):\n            self.use_legacy_format = True\n            return self.InternalSendApdu(apdu_to_send)\n    else:\n        response = apdu.ResponseApdu(self.transport.SendMsgBytes(apdu_to_send.ToLegacyU2FByteArray()))\n    return response", "docstring": "Send an APDU to the device.\n\nSends an APDU to the device, possibly falling back to the legacy\nencoding format that is not ISO7816-4 compatible.\n\nArgs:\napdu_to_send: The CommandApdu object to send\n\nReturns:\nThe ResponseApdu object constructed out of the devices reply.", "source": "codesearchnet"}
{"code": "def generate_skip_gram_data_set(self, token_list):\n        \n        n_gram_tuple_zip = self.generate_tuple_zip(token_list, 3)\n        skip_gram_list = []\n        for pre, point, post in n_gram_tuple_zip:\n            skip_gram_list.append((point, pre))\n            skip_gram_list.append((point, post))\n        return zip(skip_gram_list)", "docstring": "Generate the Skip-gram's pair.\n\nArgs:\ntoken_list:     The list of tokens.\n\nReturns:\nzip of Tuple(Training N-gram data, Target N-gram data)", "source": "juraj-google-style"}
{"code": "def __find_incongruities(self, op, index):\n        \n        if len(self) == 1:\n            return\n\n        hits = []\n        intervals = []\n\n        if self.order == 'depth':\n            one, two = 'base', 'top'\n        else:\n            one, two = 'top', 'base'\n\n        for i, iv in enumerate(self[:-1]):\n            next_iv = self[i+1]\n            if op(getattr(iv, one), getattr(next_iv, two)):\n                hits.append(i)\n\n                top = getattr(iv, one)\n                base = getattr(next_iv, two)\n                iv_gap = Interval(top, base)\n                intervals.append(iv_gap)\n\n        if index and hits:\n            return hits\n        elif intervals:\n            return Striplog(intervals)\n        else:\n            return", "docstring": "Private method. Finds gaps and overlaps in a striplog. Called by\nfind_gaps() and find_overlaps().\n\nArgs:\nop (operator): ``operator.gt`` or ``operator.lt``\nindex (bool): If ``True``, returns indices of intervals with\ngaps after them.\n\nReturns:\nStriplog: A striplog of all the gaps. A sort of anti-striplog.", "source": "juraj-google-style"}
{"code": "def tv_list(self, **kwargs):\n    path = self._get_path('tv_list')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the list of TV genres.\n\nArgs:\nlanguage: (optional) ISO 639-1 code.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def __new__(cls, *args, **kwargs) -> Any:\n    dynamic_evaluate_fn = get_dynamic_evaluate_fn()\n    if dynamic_evaluate_fn is None:\n        return super().__new__(cls)\n    else:\n        hyper_value = object.__new__(cls)\n        cls.__init__(hyper_value, *args, **kwargs)\n        return dynamic_evaluate_fn(hyper_value)", "docstring": "Overrides __new__ for supporting dynamic evaluation mode.\n\nArgs:\n*args: Positional arguments passed to init the custom hyper.\n**kwargs: Keyword arguments passed to init the custom hyper.\n\nReturns:\nA dynamic evaluated value according to current `dynamic_evaluate` context.", "source": "github-repos"}
{"code": "def __init__(self, hunt_obj, runner_args=None, token=None):\n    \n    self.token = token or hunt_obj.token\n\n    self.queue_manager = queue_manager.QueueManager(token=self.token)\n\n    self.outbound_lock = threading.Lock()\n    self.hunt_obj = hunt_obj\n\n    \n    if runner_args is not None:\n      self.runner_args = runner_args\n      self.session_id = self.GetNewSessionID()\n      self.hunt_obj.urn = self.session_id\n\n      \n      self.context = self.InitializeContext(runner_args)\n      self.hunt_obj.context = self.context\n      self.context.session_id = self.session_id\n\n    else:\n      \n      \n      \n      self.context = self.hunt_obj.context\n\n      self.runner_args = self.hunt_obj.runner_args\n\n    \n    self.hunt_obj.urn = self.session_id = self.context.session_id", "docstring": "Constructor for the Hunt Runner.\n\nArgs:\nhunt_obj: The hunt object this runner will run states for.\nrunner_args: A HuntRunnerArgs() instance containing initial values. If not\nspecified, we use the runner_args from the hunt_obj.\ntoken: An instance of access_control.ACLToken security token.", "source": "juraj-google-style"}
{"code": "def compute(i, tas):\n    elems_value_batchable = [ta.read(i) for ta in elems_batchable_ta]\n    elems_value_flat = _elems_value_batchable_to_flat(elems_value_batchable, elems_flat_signature)\n    elems_value = elems_unflatten(elems_value_flat)\n    ag_ctx = autograph_ctx.control_status_ctx()\n    autographed_fn = autograph.tf_convert(fn, ag_ctx)\n    result_value = autographed_fn(elems_value)\n    nest.assert_same_structure(fn_output_signature or elems, result_value)\n    result_value_flat = nest.flatten(result_value)\n    result_value_batchable = _result_value_flat_to_batchable(result_value_flat, result_flat_signature)\n    tas = [ta.write(i, value) for ta, value in zip(tas, result_value_batchable)]\n    return (i + 1, tas)", "docstring": "The loop body of map_fn.\n\nArgs:\ni: the loop counter\ntas: the flat TensorArray accumulator list\n\nReturns:\n(i + 1, tas): the updated counter + updated TensorArrays\n\nRaises:\nTypeError: if fn_output_signature and result_value structure don't match\nValueType: if fn_output_signature and result_value lengths don't match", "source": "github-repos"}
{"code": "def loads(text):\n    if text.startswith('CCSDS_OEM_VERS'):\n        func = _read_oem\n    elif text.startswith('CCSDS_OPM_VERS'):\n        func = _read_opm\n    else:\n        raise ValueError('Unknown CCSDS type')\n    return func(text)", "docstring": "Read CCSDS from a string, and provide the beyond class corresponding;\nOrbit or list of Orbit if it's an OPM, Ephem if it's an OEM.\n\nArgs:\ntext (str):\nReturn:\nOrbit or Ephem\nRaise:\nValueError: when the text is not a recognizable CCSDS format", "source": "codesearchnet"}
{"code": "def __init__(self, fraction):\n        \n        self.fraction = fraction\n        super().__init__('Fraction should be in (0,1] (received {})'\n                         .format(fraction))", "docstring": "Initialization of instances:\n\nArgs:\nfraction (float): the invalid fraction.\n\nAttributes:\nfraction (float): the invalid fraction.", "source": "juraj-google-style"}
{"code": "def prompt_for_test_start(\n    message='Enter a DUT ID in order to start the test.', timeout_s=60*60*24,\n    validator=lambda sn: sn, cli_color=''):\n  \n\n  @PhaseOptions(timeout_s=timeout_s)\n  @plugs.plug(prompts=UserInput)\n  def trigger_phase(test, prompts):\n    \n    dut_id = prompts.prompt(\n        message, text_input=True, timeout_s=timeout_s, cli_color=cli_color)\n    test.test_record.dut_id = validator(dut_id)\n\n  return trigger_phase", "docstring": "Returns an OpenHTF phase for use as a prompt-based start trigger.\n\nArgs:\nmessage: The message to display to the user.\ntimeout_s: Seconds to wait before raising a PromptUnansweredError.\nvalidator: Function used to validate or modify the serial number.\ncli_color: An ANSI color code, or the empty string.", "source": "juraj-google-style"}
{"code": "def _Replacement(node):\n    value = node.id\n    if value in ('True', 'False', 'None'):\n        return node\n    return _StrNode(value)", "docstring": "Returns a node to use in place of the supplied node in the AST.\n\nArgs:\nnode: A node of type Name. Could be a variable, or builtin constant.\nReturns:\nA node to use in place of the supplied Node. Either the same node, or a\nString node whose value matches the Name node's id.", "source": "github-repos"}
{"code": "def FromStream(cls, stream):\n    if stream.system:\n        specifier = DataStreamSelector.MatchSystemOnly\n    else:\n        specifier = DataStreamSelector.MatchUserOnly\n    return DataStreamSelector(stream.stream_type, stream.stream_id, specifier)", "docstring": "Create a DataStreamSelector from a DataStream.\n\nArgs:\nstream (DataStream): The data stream that we want to convert.", "source": "codesearchnet"}
{"code": "def deepcopy_dict(data):\n    \n    try:\n        return copy.deepcopy(data)\n    except TypeError:\n        copied_data = {}\n        for key, value in data.items():\n            if isinstance(value, dict):\n                copied_data[key] = deepcopy_dict(value)\n            else:\n                try:\n                    copied_data[key] = copy.deepcopy(value)\n                except TypeError:\n                    copied_data[key] = value\n\n        return copied_data", "docstring": "deepcopy dict data, ignore file object (_io.BufferedReader)\n\nArgs:\ndata (dict): dict data structure\n{\n'a': 1,\n'b': [2, 4],\n'c': lambda x: x+1,\n'd': open('LICENSE'),\n'f': {\n'f1': {'a1': 2},\n'f2': io.open('LICENSE', 'rb'),\n}\n}\n\nReturns:\ndict: deep copied dict data, with file object unchanged.", "source": "juraj-google-style"}
{"code": "def flatten(inputs, name=None, data_format='channels_last'):\n    warnings.warn('`tf.layers.flatten` is deprecated and will be removed in a future version. Please use `tf.keras.layers.Flatten` instead.')\n    layer = Flatten(name=name, data_format=data_format)\n    return layer.apply(inputs)", "docstring": "Flattens an input tensor while preserving the batch axis (axis 0).\n\nArgs:\ninputs: Tensor input.\nname: The name of the layer (string).\ndata_format: A string, one of `channels_last` (default) or `channels_first`.\nThe ordering of the dimensions in the inputs.\n`channels_last` corresponds to inputs with shape\n`(batch, height, width, channels)` while `channels_first` corresponds to\ninputs with shape `(batch, channels, height, width)`.\n\nReturns:\nReshaped tensor.\n\nExamples:\n\n```\nx = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32')\ny = flatten(x)\n# now `y` has shape `(None, 16)`\n\nx = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32')\ny = flatten(x)\n# now `y` has shape `(None, None)`\n```", "source": "github-repos"}
{"code": "def validate_word(self, word):\n    while word:\n        match = self.seg_regex.match(word)\n        if match:\n            word = word[len(match.group(0)):]\n        else:\n            return False\n    return True", "docstring": "Returns True if `word` consists exhaustively of valid IPA segments\n\nArgs:\nword (unicode): input word as Unicode IPA string\n\nReturns:\nbool: True if `word` can be divided exhaustively into IPA segments\nthat exist in the database", "source": "codesearchnet"}
{"code": "def mark_flags_as_mutual_exclusive(flag_names, required=False, flag_values=FLAGS):\n\n    def validate_mutual_exclusion(flags_dict):\n        flag_count = sum((1 for val in flags_dict.values() if (val is not None)))\n        if ((flag_count == 1) or ((not required) and (flag_count == 0))):\n            return True\n        message = ('%s one of (%s) must be specified.' % (('Exactly' if required else 'At most'), ', '.join(flag_names)))\n        raise ValidationError(message)\n    register_multi_flags_validator(flag_names, validate_mutual_exclusion, flag_values=flag_values)", "docstring": "Ensures that only one flag among flag_names is set.\n\nArgs:\nflag_names: [str], a list of the flag names to be checked.\nrequired: Boolean, if set, exactly one of the flags must be set.\nOtherwise, it is also valid for none of the flags to be set.\nflag_values: An optional FlagValues instance to validate against.", "source": "codesearchnet"}
{"code": "def get_name(node):\n  \n  if isinstance(node, gast.Name):\n    return node.id\n  elif isinstance(node, (gast.Subscript, gast.Attribute)):\n    return get_name(node.value)\n  else:\n    raise TypeError", "docstring": "Get the name of a variable.\n\nArgs:\nnode: A `Name`, `Subscript` or `Attribute` node.\n\nReturns:\nThe name of the variable e.g. `'x'` for `x`, `x.i` and `x[i]`.", "source": "juraj-google-style"}
{"code": "def _create_complete_graph(node_ids):\n    g = nx.Graph()\n    g.add_nodes_from(node_ids)\n    for (i, j) in combinations(node_ids, 2):\n        g.add_edge(i, j)\n    return g", "docstring": "Create a complete graph from the list of node ids.\n\nArgs:\nnode_ids: a list of node ids\n\nReturns:\nAn undirected graph (as a networkx.Graph)", "source": "codesearchnet"}
{"code": "def program(self, *, vertex_shader, fragment_shader=None, geometry_shader=None,\n                tess_control_shader=None, tess_evaluation_shader=None, varyings=()) -> 'Program':\n        \n\n        if type(varyings) is str:\n            varyings = (varyings,)\n\n        varyings = tuple(varyings)\n\n        res = Program.__new__(Program)\n        res.mglo, ls1, ls2, ls3, ls4, ls5, res._subroutines, res._geom, res._glo = self.mglo.program(\n            vertex_shader, fragment_shader, geometry_shader, tess_control_shader, tess_evaluation_shader,\n            varyings\n        )\n\n        members = {}\n\n        for item in ls1:\n            obj = Attribute.__new__(Attribute)\n            obj.mglo, obj._location, obj._array_length, obj._dimension, obj._shape, obj._name = item\n            members[obj.name] = obj\n\n        for item in ls2:\n            obj = Varying.__new__(Varying)\n            obj._number, obj._array_length, obj._dimension, obj._name = item\n            members[obj.name] = obj\n\n        for item in ls3:\n            obj = Uniform.__new__(Uniform)\n            obj.mglo, obj._location, obj._array_length, obj._dimension, obj._name = item\n            members[obj.name] = obj\n\n        for item in ls4:\n            obj = UniformBlock.__new__(UniformBlock)\n            obj.mglo, obj._index, obj._size, obj._name = item\n            members[obj.name] = obj\n\n        for item in ls5:\n            obj = Subroutine.__new__(Subroutine)\n            obj._index, obj._name = item\n            members[obj.name] = obj\n\n        res._members = members\n        res.ctx = self\n        res.extra = None\n        return res", "docstring": "Create a :py:class:`Program` object.\n\nOnly linked programs will be returned.\n\nA single shader in the `shaders` parameter is also accepted.\nThe varyings are only used when a transform program is created.\n\nArgs:\nshaders (list): A list of :py:class:`Shader` objects.\nvaryings (list): A list of varying names.\n\nReturns:\n:py:class:`Program` object", "source": "juraj-google-style"}
{"code": "def pb(name, data, bucket_count=None, display_name=None, description=None):\n    import tensorflow.compat.v1 as tf\n    if (bucket_count is None):\n        bucket_count = summary_v2.DEFAULT_BUCKET_COUNT\n    data = np.array(data).flatten().astype(float)\n    if (data.size == 0):\n        buckets = np.array([]).reshape((0, 3))\n    else:\n        min_ = np.min(data)\n        max_ = np.max(data)\n        range_ = (max_ - min_)\n        if (range_ == 0):\n            center = min_\n            buckets = np.array([[(center - 0.5), (center + 0.5), float(data.size)]])\n        else:\n            bucket_width = (range_ / bucket_count)\n            offsets = (data - min_)\n            bucket_indices = np.floor((offsets / bucket_width)).astype(int)\n            clamped_indices = np.minimum(bucket_indices, (bucket_count - 1))\n            one_hots = (np.array([clamped_indices]).transpose() == np.arange(0, bucket_count))\n            assert (one_hots.shape == (data.size, bucket_count)), (one_hots.shape, (data.size, bucket_count))\n            bucket_counts = np.sum(one_hots, axis=0)\n            edges = np.linspace(min_, max_, (bucket_count + 1))\n            left_edges = edges[:(- 1)]\n            right_edges = edges[1:]\n            buckets = np.array([left_edges, right_edges, bucket_counts]).transpose()\n    tensor = tf.make_tensor_proto(buckets, dtype=tf.float64)\n    if (display_name is None):\n        display_name = name\n    summary_metadata = metadata.create_summary_metadata(display_name=display_name, description=description)\n    tf_summary_metadata = tf.SummaryMetadata.FromString(summary_metadata.SerializeToString())\n    summary = tf.Summary()\n    summary.value.add(tag=('%s/histogram_summary' % name), metadata=tf_summary_metadata, tensor=tensor)\n    return summary", "docstring": "Create a legacy histogram summary protobuf.\n\nArguments:\nname: A unique name for the generated summary, including any desired\nname scopes.\ndata: A `np.array` or array-like form of any shape. Must have type\ncastable to `float`.\nbucket_count: Optional positive `int`. The output will have this\nmany buckets, except in two edge cases. If there is no data, then\nthere are no buckets. If there is data but all points have the\nsame value, then there is one bucket whose left and right\nendpoints are the same.\ndisplay_name: Optional name for this summary in TensorBoard, as a\n`str`. Defaults to `name`.\ndescription: Optional long-form description for this summary, as a\n`str`. Markdown is supported. Defaults to empty.\n\nReturns:\nA `tf.Summary` protobuf object.", "source": "codesearchnet"}
{"code": "def _validate(self):\n    if self.tuple_shapes is not None:\n        for policy, shape in zip(self._sharding_policies, self._tuple_shapes):\n            _ = policy.get_sharded_shape(shape)", "docstring": "Checks that the configuration is self-consistent.\n\nRaises:\nValueError: if the shapes and sharding policies don't match.", "source": "github-repos"}
{"code": "def cut_video(in_file, out_file, start=None, end=None, vcodec=None, acodec=None, log_level='info', print_cmd=False, **kwargs):\n    options = {'log_level': log_level}\n    if (vcodec is None):\n        options['vcodec'] = 'copy'\n    if (acodec is None):\n        options['acodec'] = 'copy'\n    if start:\n        options['ss'] = start\n    else:\n        start = 0\n    if end:\n        options['t'] = (end - start)\n    convert_video(in_file, out_file, print_cmd, **options)", "docstring": "Cut a clip from a video.\n\nArgs:\nin_file (str): Input video filename.\nout_file (str): Output video filename.\nstart (None or float): Start time (in seconds).\nend (None or float): End time (in seconds).\nvcodec (None or str): Output video codec, None for unchanged.\nacodec (None or str): Output audio codec, None for unchanged.\nlog_level (str): Logging level of ffmpeg.\nprint_cmd (bool): Whether to print the final ffmpeg command.", "source": "codesearchnet"}
{"code": "def GetBalance(self, asset_id, watch_only=0):\n        \n        total = Fixed8(0)\n\n        if type(asset_id) is NEP5Token.NEP5Token:\n            return self.GetTokenBalance(asset_id, watch_only)\n\n        for coin in self.GetCoins():\n            if coin.Output.AssetId == asset_id:\n                if coin.State & CoinState.Confirmed > 0 and \\\n                        coin.State & CoinState.Spent == 0 and \\\n                        coin.State & CoinState.Locked == 0 and \\\n                        coin.State & CoinState.Frozen == 0 and \\\n                        coin.State & CoinState.WatchOnly == watch_only:\n                    total = total + coin.Output.Value\n\n        return total", "docstring": "Get the balance of a specific token by its asset id.\n\nArgs:\nasset_id (NEP5Token|TransactionOutput): an instance of type neo.Wallets.NEP5Token or neo.Core.TX.Transaction.TransactionOutput to get the balance from.\nwatch_only (bool): True, to limit to watch only wallets.\n\nReturns:\nFixed8: total balance.", "source": "juraj-google-style"}
{"code": "def _WriteCacheFile(self, cache_filename, scopes):\n        \n        \n        creds = {'scopes': sorted(list(scopes)),\n                 'svc_acct_name': self.__service_account_name}\n        creds_str = json.dumps(creds)\n        cache_file = _MultiProcessCacheFile(cache_filename)\n        try:\n            cache_file.LockedWrite(creds_str)\n        except KeyboardInterrupt:\n            raise\n        except:  \n            \n            pass", "docstring": "Writes the credential metadata to the cache file.\n\nThis does not save the credentials themselves (CredentialStore class\noptionally handles that after this class is initialized).\n\nArgs:\ncache_filename: Cache filename to check.\nscopes: Scopes for the desired credentials.", "source": "juraj-google-style"}
{"code": "def BuildAdGroupCriterionOperations(adgroup_operations, number_of_keywords=1):\n  \n  criterion_operations = [\n      {\n          \n          \n          \n          \n          \n          'xsi_type': 'AdGroupCriterionOperation',\n          'operand': {\n              'xsi_type': 'BiddableAdGroupCriterion',\n              'adGroupId': adgroup_operation['operand']['id'],\n              'criterion': {\n                  'xsi_type': 'Keyword',\n                  \n                  'text': 'mars%s%s' % (i, '!!!' if i % 2 == 0 else ''),\n                  'matchType': 'BROAD'\n              }\n          },\n          'operator': 'ADD'\n      }\n      for adgroup_operation in adgroup_operations\n      for i in range(number_of_keywords)]\n\n  return criterion_operations", "docstring": "Builds the operations adding a Keyword Criterion to each AdGroup.\n\nArgs:\nadgroup_operations: a list containing the operations that will add AdGroups.\nnumber_of_keywords: an int defining the number of Keywords to be created.\n\nReturns:\na list containing the operations that will create a new Keyword Criterion\nassociated with each provided AdGroup.", "source": "juraj-google-style"}
{"code": "def __init__(self, AssetId=None, Value=None, script_hash=None):\n        \n        super(TransactionOutput, self).__init__()\n        self.AssetId = AssetId\n        self.Value = Value\n        self.ScriptHash = script_hash", "docstring": "Create an instance.\n\nArgs:\nAssetId (UInt256):\nValue (Fixed8):\nscript_hash (UInt160):", "source": "juraj-google-style"}
{"code": "def GetDecrypter(cls, encryption_method, **kwargs):\n    \n    encryption_method = encryption_method.lower()\n    decrypter = cls._decrypters.get(encryption_method, None)\n    if not decrypter:\n      return None\n\n    return decrypter(**kwargs)", "docstring": "Retrieves the decrypter object for a specific encryption method.\n\nArgs:\nencryption_method (str): encryption method identifier.\nkwargs (dict): keyword arguments depending on the decrypter.\n\nReturns:\nDecrypter: decrypter or None if the encryption method does not exists.\n\nRaises:\nCredentialError: if the necessary credentials are missing.", "source": "juraj-google-style"}
{"code": "def list_worker_processes(apppool):\n    ps_cmd = ['Get-ChildItem', \"'IIS:\\\\AppPools\\\\{0}\\\\WorkerProcesses'\".format(apppool)]\n    cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)\n    try:\n        items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n    except ValueError:\n        raise CommandExecutionError('Unable to parse return data as Json.')\n    ret = dict()\n    for item in items:\n        ret[item['processId']] = item['appPoolName']\n    if (not ret):\n        log.warning('No backups found in output: %s', cmd_ret)\n    return ret", "docstring": "Returns a list of worker processes that correspond to the passed\napplication pool.\n\n.. versionadded:: 2017.7.0\n\nArgs:\napppool (str): The application pool to query\n\nReturns:\ndict: A dictionary of worker processes with their process IDs\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.list_worker_processes 'My App Pool'", "source": "codesearchnet"}
{"code": "def get_min_max_value(self) -> tuple[float, float]:\n    if self._num_bins > 512:\n        logging.warning('num_bins=%d is too large. The HISTOGRAM_MSE_BRUTEFORCE method tests all histogram mid value pairs, so it may take a long time.', self._num_bins)\n    mse_min = (float('inf'), float('inf'), float('inf'))\n    for left, right in itertools.combinations(range(self._num_bins), 2):\n        quant_min, quant_max = (self._hist_mids[left], self._hist_mids[right])\n        mse_tuple = self._get_weighted_mean_squared_error(quant_min, quant_max)\n        mse_min = min(mse_tuple, mse_min)\n    min_value, max_value = (mse_min[1], mse_min[2])\n    return (min_value, max_value)", "docstring": "Finds the optimal quant_min and quant_max by testing all possible cases.\n\nIt guarantees optimal quant_min and quant_max for the representative\ndataset, but not for the test dataset.\n\nReturns:\n(min_value, max_value): Min and max calculated using\nHistogramMseBruteforce.", "source": "github-repos"}
{"code": "def prepare_axes(axes, title, size, cmap=None):\n    if (axes is None):\n        return None\n    axes.set_xlim([0, size[1]])\n    axes.set_ylim([size[0], 0])\n    axes.set_aspect('equal')\n    axes.axis('off')\n    if isinstance(cmap, str):\n        title = '{} (cmap: {})'.format(title, cmap)\n    axes.set_title(title)\n    axes_image = image.AxesImage(axes, cmap=cmap, extent=(0, size[1], size[0], 0))\n    axes_image.set_data(np.random.random((size[0], size[1], 3)))\n    axes.add_image(axes_image)\n    return axes_image", "docstring": "Prepares an axes object for clean plotting.\n\nRemoves x and y axes labels and ticks, sets the aspect ratio to be\nequal, uses the size to determine the drawing area and fills the image\nwith random colors as visual feedback.\n\nCreates an AxesImage to be shown inside the axes object and sets the\nneeded properties.\n\nArgs:\naxes:  The axes object to modify.\ntitle: The title.\nsize:  The size of the expected image.\ncmap:  The colormap if a custom color map is needed.\n(Default: None)\nReturns:\nThe AxesImage's handle.", "source": "codesearchnet"}
{"code": "def inject_params(self, params):\n        \n\n        for arg, value in params.items():\n            cli_arg = '--{}'.format(arg)\n            if cli_arg in sys.argv:\n                \n                self.tcex.log.debug('skipping existing arg: {}'.format(cli_arg))\n                continue\n\n            \n            \n            \n            param_data = self.tcex.install_json_params.get(arg) or {}\n            if param_data.get('type', '').lower() == 'multichoice':\n                \n                \n                value = value.split('|')\n            elif param_data.get('type', '').lower() == 'boolean':\n                \n                value = self.tcex.utils.to_bool(value)\n            elif arg in self.tc_bool_args:\n                value = self.tcex.utils.to_bool(value)\n\n            if isinstance(value, (bool)):\n                \n                if value is True:\n                    sys.argv.append(cli_arg)\n            elif isinstance(value, (list)):\n                for mcv in value:\n                    sys.argv.append('{}={}'.format(cli_arg, mcv))\n            else:\n                sys.argv.append('{}={}'.format(cli_arg, value))\n\n        \n        self._default_args, unknown = self.parser.parse_known_args()  \n\n        \n        self.tcex._logger()", "docstring": "Inject params into sys.argv from secureParams API, AOT, or user provided.\n\nArgs:\nparams (dict): A dictionary containing all parameters that need to be injected as args.", "source": "juraj-google-style"}
{"code": "def patch_masks(patches: dict) -> None:\n    for patch in patches:\n        patch_mask(patch)", "docstring": "Wraps patch mask function for list of patches.\n\nModifies in place. Executes patch_mask for multiple patches.\n\nArgs:\npatches: A list of patch objects to annotate.", "source": "github-repos"}
{"code": "def _as_log_entry(self, name, now):\n    d = {u'http_response_code': self.response_code, u'timestamp': time.mktime(now.timetuple())}\n    severity = _SEVERITY.INFO\n    if (self.response_code >= 400):\n        severity = _SEVERITY.ERROR\n        d[u'error_cause'] = self.error_cause.name\n    if (self.request_size > 0):\n        d[u'request_size'] = self.request_size\n    if (self.response_size > 0):\n        d[u'response_size'] = self.response_size\n    if self.method:\n        d[u'http_method'] = self.method\n    if self.request_time:\n        d[u'request_latency_in_ms'] = (self.request_time.total_seconds() * 1000)\n    for key in self.COPYABLE_LOG_FIELDS:\n        value = getattr(self, key, None)\n        if value:\n            d[key] = value\n    return sc_messages.LogEntry(name=name, timestamp=timestamp.to_rfc3339(now), severity=severity, structPayload=_struct_payload_from(d))", "docstring": "Makes a `LogEntry` from this instance for the given log_name.\n\nArgs:\nrules (:class:`ReportingRules`): determines what labels, metrics and\nlogs to include in the report request.\nnow (:class:`datetime.DateTime`): the current time\n\nReturn:\na ``LogEntry`` generated from this instance with the given name\nand timestamp\n\nRaises:\nValueError: if the fields in this instance are insufficient to\nto create a valid ``ServicecontrolServicesReportRequest``", "source": "codesearchnet"}
{"code": "def _ParseLastRunTime(self, parser_mediator, fixed_length_section):\n    systemtime_struct = fixed_length_section.last_run_time\n    system_time_tuple = (systemtime_struct.year, systemtime_struct.month, systemtime_struct.weekday, systemtime_struct.day_of_month, systemtime_struct.hours, systemtime_struct.minutes, systemtime_struct.seconds, systemtime_struct.milliseconds)\n    date_time = None\n    if (system_time_tuple != self._EMPTY_SYSTEM_TIME_TUPLE):\n        try:\n            date_time = dfdatetime_systemtime.Systemtime(system_time_tuple=system_time_tuple)\n        except ValueError:\n            parser_mediator.ProduceExtractionWarning('invalid last run time: {0!s}'.format(system_time_tuple))\n    return date_time", "docstring": "Parses the last run time from a fixed-length data section.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfixed_length_section (job_fixed_length_data_section): a Windows\nScheduled Task job fixed-length data section.\n\nReturns:\ndfdatetime.DateTimeValues: last run date and time or None if not\navailable.", "source": "codesearchnet"}
{"code": "def _get_default_configurable_parameter_values(fn, whitelist, blacklist):\n  \n  arg_vals = _ARG_DEFAULTS_CACHE.get(fn)\n  if arg_vals is not None:\n    return arg_vals.copy()\n\n  \n  arg_spec = _get_cached_arg_spec(fn)\n  if arg_spec.defaults:\n    default_kwarg_names = arg_spec.args[-len(arg_spec.defaults):]\n    arg_vals = dict(zip(default_kwarg_names, arg_spec.defaults))\n  else:\n    arg_vals = {}\n\n  if six.PY3 and arg_spec.kwonlydefaults:\n    arg_vals.update(arg_spec.kwonlydefaults)\n\n  \n  \n  for k in list(six.iterkeys(arg_vals)):\n    whitelist_fail = whitelist and k not in whitelist\n    blacklist_fail = blacklist and k in blacklist\n    representable = _is_literally_representable(arg_vals[k])\n    if whitelist_fail or blacklist_fail or not representable:\n      del arg_vals[k]\n\n  _ARG_DEFAULTS_CACHE[fn] = arg_vals\n  return arg_vals.copy()", "docstring": "Retrieve all default values for configurable parameters of a function.\n\nAny parameters included in the supplied blacklist, or not included in the\nsupplied whitelist, are excluded.\n\nArgs:\nfn: The function whose parameter values should be retrieved.\nwhitelist: The whitelist (or `None`) associated with the function.\nblacklist: The blacklist (or `None`) associated with the function.\n\nReturns:\nA dictionary mapping configurable parameter names to their default values.", "source": "juraj-google-style"}
{"code": "def _load_json_module():\n    first_import_error = None\n    for module_name in ['json', 'simplejson']:\n        try:\n            module = __import__(module_name, {}, {}, 'json')\n            if (not hasattr(module, 'JSONEncoder')):\n                message = ('json library \"%s\" is not compatible with ProtoRPC' % module_name)\n                logging.warning(message)\n                raise ImportError(message)\n            else:\n                return module\n        except ImportError as err:\n            if (not first_import_error):\n                first_import_error = err\n    logging.error('Must use valid json library (json or simplejson)')\n    raise first_import_error", "docstring": "Try to load a valid json module.\n\nThere are more than one json modules that might be installed.  They are\nmostly compatible with one another but some versions may be different.\nThis function attempts to load various json modules in a preferred order.\nIt does a basic check to guess if a loaded version of json is compatible.\n\nReturns:\nCompatible json module.\n\nRaises:\nImportError if there are no json modules or the loaded json module is\nnot compatible with ProtoRPC.", "source": "codesearchnet"}
{"code": "def get_logging_metric_hook(benchmark_log_dir=None, tensors_to_log=None, every_n_secs=600, **kwargs):\n    if (benchmark_log_dir is None):\n        raise ValueError('metric_log_dir should be provided to use metric logger')\n    if (tensors_to_log is None):\n        tensors_to_log = _TENSORS_TO_LOG\n    return metric_hook.LoggingMetricHook(tensors=tensors_to_log, log_dir=benchmark_log_dir, every_n_secs=every_n_secs)", "docstring": "Function to get LoggingMetricHook.\n\nArgs:\nbenchmark_log_dir: `string`, directory path to save the metric log.\ntensors_to_log: List of tensor names or dictionary mapping labels to tensor\nnames. If not set, log _TENSORS_TO_LOG by default.\nevery_n_secs: `int`, the frequency for logging the metric. Default to every\n10 mins.\n\nReturns:\nReturns a ProfilerHook that writes out timelines that can be loaded into\nprofiling tools like chrome://tracing.", "source": "codesearchnet"}
{"code": "def is_special_unitary(matrix: np.ndarray, *, rtol: float=1e-05, atol: float=1e-08) -> bool:\n    return (is_unitary(matrix, rtol=rtol, atol=atol) and ((matrix.shape[0] == 0) or np.allclose(np.linalg.det(matrix), 1, rtol=rtol, atol=atol)))", "docstring": "Determines if a matrix is approximately unitary with unit determinant.\n\nA matrix is special-unitary if it is square and its adjoint is its inverse\nand its determinant is one.\n\nArgs:\nmatrix: The matrix to check.\nrtol: The per-matrix-entry relative tolerance on equality.\natol: The per-matrix-entry absolute tolerance on equality.\nReturns:\nWhether the matrix is unitary with unit determinant within the given\ntolerance.", "source": "codesearchnet"}
{"code": "def raw_state(self):\n    try:\n        return self._get_domain().state()\n    except libvirt.libvirtError as e:\n        raise vm_plugin.LagoFailedToGetVMStateError(str(e))", "docstring": "Return the state of the domain in Libvirt's terms\n\nRetruns:\ntuple of ints: The state and its reason\n\nRaises:\n:exc:`~lago.plugins.vm.LagoVMDoesNotExistError`:\nIf the VM of this provider doesn't exist.\n:exc:`~lago.plugins.vm.LagoFailedToGetVMStateError:\nIf the VM exist, but the query returned an error.", "source": "codesearchnet"}
{"code": "def get_sig(ir, name):\n    \n    sig = '{}({})'\n\n    \n    argss = convert_arguments(ir.arguments)\n    return [sig.format(name, ','.join(args)) for args in argss]", "docstring": "Return a list of potential signature\nIt is a list, as Constant variables can be converted to int256\nArgs:\nir (slithIR.operation)\nReturns:\nlist(str)", "source": "juraj-google-style"}
{"code": "def LockScanNode(self, path_spec):\n    \n    scan_node = self._scan_nodes.get(path_spec, None)\n    if not scan_node:\n      raise KeyError('Scan node does not exist.')\n\n    self._locked_scan_nodes[path_spec] = scan_node", "docstring": "Marks a scan node as locked.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nRaises:\nKeyError: if the scan node does not exists.", "source": "juraj-google-style"}
{"code": "def save_feature_list(self, obj, set_id, feature_list_id):\n        \n\n        save(obj, self.features_dir + 'X_{}_{}.pickle'.format(set_id, feature_list_id))", "docstring": "Pickle the specified feature list to a file.\nExample: `save_feature_list(project, X_tfidf_train, 'train', 'tfidf')`.\n\nArgs:\nobj: The object to pickle (e.g., a numpy array or a Pandas dataframe)\nproject: An instance of pygoose project.\nset_id: The id of the subset (e.g., 'train' or 'test')\nfeature_list_id: The name for this feature list.", "source": "juraj-google-style"}
{"code": "def transpose(self, name=None, activate_final=None):\n    \n    if name is None:\n      name = self.module_name + \"_transpose\"\n    if activate_final is None:\n      activate_final = self.activate_final\n    output_sizes = [lambda l=layer: l.input_shape[1] for layer in self._layers]\n    output_sizes.reverse()\n    return MLP(\n        name=name,\n        output_sizes=output_sizes,\n        activation=self.activation,\n        activate_final=activate_final,\n        initializers=self.initializers,\n        partitioners=self.partitioners,\n        regularizers=self.regularizers,\n        use_bias=self.use_bias,\n        use_dropout=self.use_dropout)", "docstring": "Returns transposed `MLP`.\n\nArgs:\nname: Optional string specifying the name of the transposed module. The\ndefault name is constructed by appending \"_transpose\"\nto `self.module_name`.\nactivate_final: Optional boolean determining if the activation and batch\nnormalization, if turned on, are applied to the final layer.\n\nReturns:\nMatching transposed `MLP` module.", "source": "juraj-google-style"}
{"code": "def _force_edge_active_move(self, state: _STATE) -> _STATE:\n    (seqs, edges) = state\n    unused_edges = edges.copy()\n    for seq in seqs:\n        for i in range(1, len(seq)):\n            unused_edges.remove(self._normalize_edge((seq[(i - 1)], seq[i])))\n    edge = self._choose_random_edge(unused_edges)\n    if (not edge):\n        return (seqs, edges)\n    return (self._force_edge_active(seqs, edge, (lambda : bool(self._rand.randint(2)))), edges)", "docstring": "Move which forces a random edge to appear on some sequence.\n\nThis move chooses random edge from the edges which do not belong to any\nsequence and modifies state in such a way, that this chosen edge\nappears on some sequence of the search state.\n\nArgs:\nstate: Search state, not mutated.\n\nReturns:\nNew search state with one of the unused edges appearing in some\nsequence.", "source": "codesearchnet"}
{"code": "def _feedback(self, dna: DNA, reward: Union[float, Tuple[float]]) -> None:", "docstring": "Actual feedback method which should be implemented by the child class.\n\nThe default implementation is no-op.\n\nArgs:\ndna: a DNA object.\nreward: reward for the DNA. It is a float if `self.multi_objective`\nreturns False, otherwise it's a tuple of floats.", "source": "github-repos"}
{"code": "def _get_name_filter(package, context='decorate', reparse=False):\n    global name_filters\n    pkey = (package, context)\n    if ((pkey in name_filters) and (not reparse)):\n        return name_filters[pkey]\n    from acorn.config import settings\n    spack = settings(package)\n    sections = {'decorate': ['tracking', 'acorn.tracking'], 'time': ['timing', 'acorn.timing'], 'analyze': ['analysis', 'acorn.analysis']}\n    (filters, rfilters) = (None, None)\n    import re\n    if (context in sections):\n        (filters, rfilters) = ([], [])\n        (ignores, rignores) = ([], [])\n        for section in sections[context]:\n            if spack.has_section(section):\n                options = spack.options(section)\n                if ('filter' in options):\n                    filters.extend(re.split('\\\\s*\\\\$\\\\s*', spack.get(section, 'filter')))\n                if ('rfilter' in options):\n                    pfilters = re.split('\\\\s*\\\\$\\\\s*', spack.get(section, 'rfilter'))\n                    rfilters.extend([re.compile(p, re.I) for p in pfilters])\n                if ('ignore' in options):\n                    ignores.extend(re.split('\\\\s*\\\\$\\\\s*', spack.get(section, 'ignore')))\n                if ('rignore' in options):\n                    pignores = re.split('\\\\s*\\\\$\\\\s*', spack.get(section, 'rignore'))\n                    rignores.extend([re.compile(p, re.I) for p in pfilters])\n        name_filters[pkey] = {'filters': filters, 'rfilters': rfilters, 'ignores': ignores, 'rignores': rignores}\n    else:\n        name_filters[pkey] = None\n    return name_filters[pkey]", "docstring": "Makes sure that the name filters for the specified package have been\nloaded.\n\nArgs:\npackage (str): name of the package that this method belongs to.\ncontext (str): one of ['decorate', 'time', 'analyze']; specifies which\nsection of the configuration settings to check.", "source": "codesearchnet"}
{"code": "def _process_policy_eval_results(to_eval, eval_results, active_episodes, active_envs, off_policy_actions, policies, clip_actions):\n    actions_to_send = defaultdict(dict)\n    for env_id in active_envs:\n        actions_to_send[env_id] = {}\n    for (policy_id, eval_data) in to_eval.items():\n        rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data])\n        (actions, rnn_out_cols, pi_info_cols) = eval_results[policy_id]\n        if (len(rnn_in_cols) != len(rnn_out_cols)):\n            raise ValueError('Length of RNN in did not match RNN out, got: {} vs {}'.format(rnn_in_cols, rnn_out_cols))\n        for (f_i, column) in enumerate(rnn_in_cols):\n            pi_info_cols['state_in_{}'.format(f_i)] = column\n        for (f_i, column) in enumerate(rnn_out_cols):\n            pi_info_cols['state_out_{}'.format(f_i)] = column\n        actions = _unbatch_tuple_actions(actions)\n        policy = _get_or_raise(policies, policy_id)\n        for (i, action) in enumerate(actions):\n            env_id = eval_data[i].env_id\n            agent_id = eval_data[i].agent_id\n            if clip_actions:\n                actions_to_send[env_id][agent_id] = clip_action(action, policy.action_space)\n            else:\n                actions_to_send[env_id][agent_id] = action\n            episode = active_episodes[env_id]\n            episode._set_rnn_state(agent_id, [c[i] for c in rnn_out_cols])\n            episode._set_last_pi_info(agent_id, {k: v[i] for (k, v) in pi_info_cols.items()})\n            if ((env_id in off_policy_actions) and (agent_id in off_policy_actions[env_id])):\n                episode._set_last_action(agent_id, off_policy_actions[env_id][agent_id])\n            else:\n                episode._set_last_action(agent_id, action)\n    return actions_to_send", "docstring": "Process the output of policy neural network evaluation.\n\nRecords policy evaluation results into the given episode objects and\nreturns replies to send back to agents in the env.\n\nReturns:\nactions_to_send: nested dict of env id -> agent id -> agent replies.", "source": "codesearchnet"}
{"code": "def AddAttribute(self, attribute, value=None, age=None):\n    if ('w' not in self.mode):\n        raise IOError(('Writing attribute %s to read only object.' % attribute))\n    if (value is None):\n        value = attribute\n        attribute = value.attribute_instance\n    if ((self.mode != 'w') and attribute.lock_protected and (not self.transaction)):\n        raise IOError(('Object must be locked to write attribute %s.' % attribute))\n    self._CheckAttribute(attribute, value)\n    if attribute.versioned:\n        if attribute.creates_new_object_version:\n            self._new_version = True\n        if age:\n            value.age = age\n        else:\n            value.age = rdfvalue.RDFDatetime.Now()\n    else:\n        self._to_delete.add(attribute)\n        self.synced_attributes.pop(attribute, None)\n        self.new_attributes.pop(attribute, None)\n        value.age = 0\n    self._AddAttributeToCache(attribute, value, self.new_attributes)\n    self._dirty = True", "docstring": "Add an additional attribute to this object.\n\nIf value is None, attribute is expected to be already initialized with a\nvalue. For example:\n\nfd.AddAttribute(fd.Schema.CONTAINS(\"some data\"))\n\nArgs:\nattribute: The attribute name or an RDFValue derived from the attribute.\nvalue: The value the attribute will be set to.\nage: Age (timestamp) of the attribute. If None, current time is used.\n\nRaises:\nIOError: If this object is read only.", "source": "codesearchnet"}
{"code": "def _stream_output(process):\n    exit_code = None\n    while (exit_code is None):\n        stdout = process.stdout.readline().decode('utf-8')\n        sys.stdout.write(stdout)\n        exit_code = process.poll()\n    if (exit_code != 0):\n        raise RuntimeError(('Process exited with code: %s' % exit_code))\n    return exit_code", "docstring": "Stream the output of a process to stdout\n\nThis function takes an existing process that will be polled for output. Only stdout\nwill be polled and sent to sys.stdout.\n\nArgs:\nprocess(subprocess.Popen): a process that has been started with\nstdout=PIPE and stderr=STDOUT\n\nReturns (int): process exit code", "source": "codesearchnet"}
{"code": "def getParameter(self, name):\n        \n        return lock_and_call(\n            lambda: Parameter(self._impl.getParameter(name)),\n            self._lock\n        )", "docstring": "Get the parameter with the corresponding name.\n\nArgs:\nname: Name of the parameter to be found.\n\nRaises:\nTypeError: if the specified parameter does not exist.", "source": "juraj-google-style"}
{"code": "def json(self) -> dict:\n    content = {}\n    if self.text:\n        content['text'] = self.text\n    content['controls'] = [control.json() for control in self.content]\n    self.control_json['content'] = content\n    return self.control_json", "docstring": "Returns json compatible state of the ButtonsFrame instance.\n\nReturns json compatible state of the ButtonsFrame instance including\nall nested buttons.\n\nReturns:\ncontrol_json: Json representation of ButtonsFrame state.", "source": "codesearchnet"}
{"code": "def should_stop(self):\n    if self._check_stop():\n        return True\n    if self._sess:\n        return self._wrapped_is_stoppable and self._sess.should_stop()\n    return True", "docstring": "Return true if this session should not be used anymore.\n\nAlways return True if the session was closed.\n\nReturns:\nTrue if the session should stop, False otherwise.", "source": "github-repos"}
{"code": "def get_creation_date_tags(url, domain, as_dicts=False):\n    \n    creation_date_tags = [\n        mementoweb_api_tags(url),\n        get_whois_tags(domain),\n    ]\n\n    creation_date_tags = sorted(\n        sum(creation_date_tags, []),\n        key=lambda x: x.date\n    )\n\n    if not as_dicts:\n        return creation_date_tags\n\n    return [\n        item._as_dict()\n        for item in creation_date_tags\n    ]", "docstring": "Put together all data sources in this module and return it's output.\n\nArgs:\nurl (str): URL of the web. With relative paths and so on.\ndomain (str): Just the domain of the web.\nas_dicts (bool, default False): Convert output to dictionaries\ncompatible with :class:`.SourceString`?\n\nReturns:\nlist: Sorted list of :class:`TimeResource` objects or dicts.", "source": "juraj-google-style"}
{"code": "def annotate_test_file(self, test_file: Iterator[str]) -> Iterator[str]:\n    transformed_tests, run_directives = self.for_each_test_case(test_file, self.annotate_test_case, num_outputs=2)\n    return itertools.chain([_BANNER_COMMENT_LINE], run_directives, ['\\n'], transformed_tests)", "docstring": "Inserts FileCheck directives above each test case in an HLO test file.\n\nArgs:\ntest_file: An iterator over the lines of an HLO test file.\n\nReturns:\nAn iterator over the lines of the transformed HLO test file. Each test\ncase is preceded by FileCheck directives describing the expected output\nof the optimizer on that test case.", "source": "github-repos"}
{"code": "def add_op(state, op_func, *args, **kwargs):\n    frameinfo = get_caller_frameinfo()\n    kwargs['frameinfo'] = frameinfo\n    for host in state.inventory:\n        op_func(state, host, *args, **kwargs)", "docstring": "Prepare & add an operation to ``pyinfra.state`` by executing it on all hosts.\n\nArgs:\nstate (``pyinfra.api.State`` obj): the deploy state to add the operation\nto op_func (function): the operation function from one of the modules,\nie ``server.user``\nargs/kwargs: passed to the operation function", "source": "codesearchnet"}
{"code": "def make_sharded_variable_creator(hosts: List[Text]) -> Callable[..., TPUEmbeddingVariable]:\n\n    def sharded_variable_creator(next_creator: Callable[..., tf_variables.Variable], *args, **kwargs):\n        \n        kwargs['skip_mirrored_creator'] = True\n        num_hosts = len(hosts)\n        name, shape, dtype, unwrapped_initial_value = extract_variable_info(kwargs)\n        initial_value = kwargs['initial_value']\n        rows = shape[0]\n        cols = shape[1]\n        partial_partition = rows % num_hosts\n        full_rows_per_host = rows \n        partitions = [full_rows_per_host + 1] * partial_partition + [full_rows_per_host] * (num_hosts - partial_partition)\n        variables = []\n        sharding_aware = 'shard_info' in tf_inspect.getargspec(initial_value).args\n        offset = 0\n        kwargs['dtype'] = dtype\n        for i, p in enumerate(partitions):\n            if p == 0:\n                continue\n            with ops.device(hosts[i]):\n                kwargs['name'] = '{}_{}'.format(name, i)\n                kwargs['shape'] = (p, cols)\n                if sharding_aware:\n                    shard_info = base.ShardInfo(kwargs['shape'], (offset, 0))\n                    kwargs['initial_value'] = functools.partial(initial_value, shard_info=shard_info)\n                    offset += p\n                else:\n                    kwargs['initial_value'] = functools.partial(unwrapped_initial_value, kwargs['shape'], dtype=dtype)\n                variables.append(next_creator(*args, **kwargs))\n        return TPUEmbeddingVariable(variables, name=name)\n    return sharded_variable_creator", "docstring": "Makes a sharded variable creator given a list of hosts.\n\nArgs:\nhosts: a list of tensorflow devices on which to shard the tensors.\n\nReturns:\nA variable creator function.", "source": "github-repos"}
{"code": "def _read_protocol_line(self):\n    self._server_start_stdout = []\n    while True:\n        line = self._proc.stdout.readline().decode('utf-8')\n        if not line:\n            raise errors.ServerStartError(self._device, 'Unexpected EOF when waiting for server to start.')\n        line = line.strip()\n        if line.startswith('INSTRUMENTATION_RESULT:') or line.startswith('SNIPPET '):\n            self.log.debug('Accepted line from instrumentation output: \"%s\"', line)\n            return line\n        self._server_start_stdout.append(line)\n        self.log.debug('Discarded line from instrumentation output: \"%s\"', line)", "docstring": "Reads the next line of instrumentation output relevant to snippets.\n\nThis method will skip over lines that don't start with 'SNIPPET ' or\n'INSTRUMENTATION_RESULT:'.\n\nReturns:\nA string for the next line of snippet-related instrumentation output,\nstripped.\n\nRaises:\nerrors.ServerStartError: If EOF is reached without any protocol lines\nbeing read.", "source": "github-repos"}
{"code": "async def get_movie(self, id_):\n        \n        url = self.url_builder(\n            'movie/{movie_id}',\n            dict(movie_id=id_),\n            url_params=OrderedDict(append_to_response='credits'),\n        )\n        data = await self.get_data(url)\n        if data is None:\n            return\n        return Movie.from_json(data, self.config['data'].get('images'))", "docstring": "Retrieve movie data by ID.\n\nArguments:\nid_ (:py:class:`int`): The movie's TMDb ID.\n\nReturns:\n:py:class:`~.Movie`: The requested movie.", "source": "juraj-google-style"}
{"code": "def __init__(self, header, values, datetimes):\n        \n        assert isinstance(header, Header), \\\n            'header must be a Ladybug Header object. Got {}'.format(type(header))\n        assert isinstance(datetimes, Iterable) \\\n            and not isinstance(datetimes, (str, dict, bytes, bytearray)), \\\n            'datetimes should be a list or tuple. Got {}'.format(type(datetimes))\n\n        self._header = header\n        self._datetimes = tuple(datetimes)\n        self.values = values\n        self._validated_a_period = False", "docstring": "Initialize base collection.\n\nArgs:\nheader: A Ladybug Header object.\nvalues: A list of values.\ndatetimes: A list of Ladybug DateTime objects that aligns with\nthe list of values.", "source": "juraj-google-style"}
{"code": "def get_qa_logit_layer(self) -> nn.Module:\n    if hasattr(self, 'answer_head'):\n        return self.answer_head.logit_fc[-1]", "docstring": "Returns the linear layer that produces question answering logits\n\nReturns:\n`nn.Module`: A torch module mapping the question answering prediction hidden states. `None`: A NoneType\nobject if Lxmert does not have the visual answering head.", "source": "github-repos"}
{"code": "def from_dict(cls, parameters):\n        \n        instance = cls()\n        instance.fitted = parameters['fitted']\n        instance.constant_value = parameters['constant_value']\n\n        if instance.fitted and instance.constant_value is None:\n            instance.model = scipy.stats.truncnorm(parameters['a'], parameters['b'])\n\n        return instance", "docstring": "Set attributes with provided values.\n\nArgs:\nparameters(dict): Dictionary containing instance parameters.\n\nReturns:\nTruncnorm: Instance populated with given parameters.", "source": "juraj-google-style"}
{"code": "def make_single_array(ds, batch_size=8*1024):\n    \n    if isinstance(ds.output_types, tuple) or isinstance(ds.output_shapes, tuple):\n        raise ValueError('Dataset must have a single type and shape')\n    nshapes = len(ds.output_shapes)\n    if nshapes > 0:\n        raise ValueError('Dataset must be comprised of scalars (TensorShape=[])')\n    batches = []\n    with tf.Session() as sess:\n        ds = ds.batch(batch_size)\n        iterator = ds.make_initializable_iterator()\n        sess.run(iterator.initializer)\n        get_next = iterator.get_next()\n        with tqdm(desc='Elements', unit_scale=1) as pbar:\n            try:\n                while True:\n                    batches.append(sess.run(get_next))\n                    pbar.update(len(batches[-1]))\n            except tf.errors.OutOfRangeError:\n                pass\n    if batches:\n        return np.concatenate(batches)\n    return np.array([], dtype=ds.output_types.as_numpy_dtype)", "docstring": "Create a single numpy array from a dataset.\n\nThe dataset must have only one dimension, that is,\nthe length of its `output_shapes` and `output_types`\nis 1, and its output shape must be `[]`, that is,\nevery tensor in the dataset must be a scalar.\n\nArgs:\nds:  a TF Dataset.\nbatch_size:  how many elements to read per pass\n\nReturns:\na single numpy array.", "source": "juraj-google-style"}
{"code": "def __init__(self, bytes_per_pack=0, timeout_seconds=None):\n    pass", "docstring": "Creates a CollectiveHints.\n\nArgs:\nbytes_per_pack: a non-negative integer. Breaks collective operations into\npacks of certain size. If it's zero, the value is determined\nautomatically. This only applies to all-reduce with\n`MultiWorkerMirroredStrategy` currently.\ntimeout_seconds: a float or None, timeout in seconds. If not None, the\ncollective raises `tf.errors.DeadlineExceededError` if it takes longer\nthan this timeout. This can be useful when debugging hanging issues.\nThis should only be used for debugging since it creates a new thread for\neach collective, i.e. an overhead of `timeout_seconds *\nnum_collectives_per_second` more threads.  This only works for\n`tf.distribute.experimental.MultiWorkerMirroredStrategy`.\n\nRaises:\nValueError: When arguments have invalid value.", "source": "github-repos"}
{"code": "def status(self, job_ids):\n        \n\n        logging.debug(\"Checking status of : {0}\".format(job_ids))\n        for job_id in self.resources:\n            poll_code = self.resources[job_id]['proc'].poll()\n            if self.resources[job_id]['status'] in ['COMPLETED', 'FAILED']:\n                continue\n\n            if poll_code is None:\n                self.resources[job_id]['status'] = 'RUNNING'\n            elif poll_code == 0 and self.resources[job_id]['status'] != 'RUNNING':\n                self.resources[job_id]['status'] = 'COMPLETED'\n            elif poll_code < 0 and self.resources[job_id]['status'] != 'RUNNING':\n                self.resources[job_id]['status'] = 'FAILED'\n\n        return [self.resources[jid]['status'] for jid in job_ids]", "docstring": "Get the status of a list of jobs identified by their ids.\n\nArgs:\n- job_ids (List of ids) : List of identifiers for the jobs\n\nReturns:\n- List of status codes.", "source": "juraj-google-style"}
{"code": "def validate_probability(p: float, p_str: str) -> float:\n    \n    if p < 0:\n        raise ValueError('{} was less than 0.'.format(p_str))\n    elif p > 1:\n        raise ValueError('{} was greater than 1.'.format(p_str))\n    return p", "docstring": "Validates that a probability is between 0 and 1 inclusively.\n\nArgs:\np: The value to validate.\np_str: What to call the probability in error messages.\n\nReturns:\nThe probability p if the probability if valid.\n\nRaises:\nValueError if the probability is invalid.", "source": "juraj-google-style"}
{"code": "def __init__(self, mediator=None):\n    \n    super(WindowsVolumeScanner, self).__init__(mediator=mediator)\n    self._file_system = None\n    self._path_resolver = None\n    self._windows_directory = None", "docstring": "Initializes a Windows volume scanner.\n\nArgs:\nmediator (VolumeScannerMediator): a volume scanner mediator.", "source": "juraj-google-style"}
{"code": "def broadcast_shapes(shape1, shape2):\n    shape1 = list(shape1)\n    shape2 = list(shape2)\n    origin_shape1 = shape1\n    origin_shape2 = shape2\n    if len(shape1) > len(shape2):\n        shape2 = [1] * (len(shape1) - len(shape2)) + shape2\n    if len(shape1) < len(shape2):\n        shape1 = [1] * (len(shape2) - len(shape1)) + shape1\n    output_shape = list(shape1)\n    for i in range(len(shape1)):\n        if shape1[i] == 1:\n            output_shape[i] = shape2[i]\n        elif shape1[i] is None:\n            output_shape[i] = None if shape2[i] == 1 else shape2[i]\n        elif shape2[i] == 1 or shape2[i] is None or shape2[i] == shape1[i]:\n            output_shape[i] = shape1[i]\n        else:\n            raise ValueError(f'Cannot broadcast shape, the failure dim has value {shape1[i]}, which cannot be broadcasted to {shape2[i]}. Input shapes are: {origin_shape1} and {origin_shape2}.')\n    return output_shape", "docstring": "Broadcast input shapes to a unified shape.\n\nConvert to list for mutability.\n\nArgs:\nshape1: A tuple or list of integers.\nshape2: A tuple or list of integers.\n\nReturns:\noutput_shape (list of integers or `None`): The broadcasted shape.\n\nExample:\n>>> broadcast_shapes((5, 3), (1, 3))\n[5, 3]", "source": "github-repos"}
{"code": "def probabilistic_collocation(order, dist, subset=0.1):\n    (abscissas, weights) = chaospy.quad.collection.golub_welsch(order, dist)\n    likelihood = dist.pdf(abscissas)\n    alpha = numpy.random.random(len(weights))\n    alpha = (likelihood > ((alpha * subset) * numpy.max(likelihood)))\n    abscissas = abscissas.T[alpha].T\n    weights = weights[alpha]\n    return (abscissas, weights)", "docstring": "Probabilistic collocation method.\n\nArgs:\norder (int, numpy.ndarray) : Quadrature order along each axis.\ndist (Dist) : Distribution to generate samples from.\nsubset (float) : Rate of which to removed samples.", "source": "codesearchnet"}
{"code": "def ProcessGlobalSuppresions(lines):\n    for line in lines:\n        if _SEARCH_C_FILE.search(line):\n            for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:\n                _global_error_suppressions[category] = True\n        if _SEARCH_KERNEL_FILE.search(line):\n            for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES:\n                _global_error_suppressions[category] = True", "docstring": "Updates the list of global error suppressions.\n\nParses any lint directives in the file that have global effect.\n\nArgs:\nlines: An array of strings, each representing a line of the file, with the\nlast element being empty if the file is terminated with a newline.", "source": "codesearchnet"}
{"code": "def _try_refresh_access_token(self) -> None:\n    if self.refresh_token:\n        if ((not self.access_token) or self._is_access_token_expired()):\n            (self.access_token, self.access_expiration) = self._get_access_from_refresh()\n            self.access_expiration = (time.time() + self.access_expiration)", "docstring": "Attempts to get a new access token using the refresh token, if needed.\n\nIf the access token is expired and this instance has a stored refresh token,\nthen the refresh token is in the API call to get a new access token. If\nsuccessful, this instance is modified in-place with that new access token.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def Match(self, artifact=None, os_name=None, cpe=None, label=None):\n    \n    return [\n        c for c in self.conditions if c.Match(artifact, os_name, cpe, label)\n    ]", "docstring": "Test if host data should trigger a check.\n\nArgs:\nartifact: An artifact name.\nos_name: An OS string.\ncpe: A CPE string.\nlabel: A label string.\n\nReturns:\nA list of conditions that match.", "source": "juraj-google-style"}
{"code": "def _build_hash_string(self):\n    if ((self.site_name in SITE_LIST) or self.hash_string):\n        if (self.username and self.password):\n            try:\n                hash_string = self.hash_string.format(self.password)\n            except TypeError:\n                raise PybooruError(\"Pybooru can't add 'password' to 'hash_string'\")\n            self.password_hash = hashlib.sha1(hash_string.encode('utf-8')).hexdigest()\n        else:\n            raise PybooruError(\"Specify the 'username' and 'password' parameters of the Pybooru object, for setting 'password_hash' attribute.\")\n    else:\n        raise PybooruError(\"Specify the 'hash_string' parameter of the Pybooru object, for the functions that requires login.\")", "docstring": "Function for build password hash string.\n\nRaises:\nPybooruError: When isn't provide hash string.\nPybooruError: When aren't provide username or password.\nPybooruError: When Pybooru can't add password to hash strring.", "source": "codesearchnet"}
{"code": "def load_validation_plugin(name=None):\n    if (not name):\n        return BaseValidationRules\n    plugin = None\n    for entry_point in iter_entry_points('bigchaindb.validation', name):\n        plugin = entry_point.load()\n    if (not plugin):\n        raise ResolutionError('No plugin found in group `bigchaindb.validation` with name `{}`'.format(name))\n    if (not issubclass(plugin, (BaseValidationRules,))):\n        raise TypeError('object of type \"{}\" does not implement `bigchaindb.validation.BaseValidationRules`'.format(type(plugin)))\n    return plugin", "docstring": "Find and load the chosen validation plugin.\n\nArgs:\nname (string): the name of the entry_point, as advertised in the\nsetup.py of the providing package.\n\nReturns:\nan uninstantiated subclass of ``bigchaindb.validation.AbstractValidationRules``", "source": "codesearchnet"}
{"code": "def VerifyStructure(self, parser_mediator, line):\n    self._last_month = 0\n    self._year_use = parser_mediator.GetEstimatedYear()\n    try:\n        structure = self.SECURITYD_LINE.parseString(line)\n    except pyparsing.ParseException:\n        logger.debug('Not a MacOS securityd log file')\n        return False\n    time_elements_tuple = self._GetTimeElementsTuple(structure)\n    try:\n        dfdatetime_time_elements.TimeElements(time_elements_tuple=time_elements_tuple)\n    except ValueError:\n        logger.debug('Not a MacOS securityd log file, invalid date and time: {0!s}'.format(structure.date_time))\n        return False\n    self._last_month = time_elements_tuple[1]\n    return True", "docstring": "Verify that this file is a securityd log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nline (str): line from a text file.\n\nReturns:\nbool: True if the line is in the expected format, False if not.", "source": "codesearchnet"}
{"code": "def get_palette(num_colors=256):\n    \n    pallete = [0]*(num_colors*3)\n    for j in range(0, num_colors):\n        lab = j\n        pallete[j*3+0] = 0\n        pallete[j*3+1] = 0\n        pallete[j*3+2] = 0\n        i = 0\n        while (lab > 0):\n            pallete[j*3+0] |= (((lab >> 0) & 1) << (7-i))\n            pallete[j*3+1] |= (((lab >> 1) & 1) << (7-i))\n            pallete[j*3+2] |= (((lab >> 2) & 1) << (7-i))\n            i = i + 1\n            lab >>= 3\n    return pallete", "docstring": "generates the colormap for visualizing the segmentation mask\nArgs:\nnum_colors (int): the number of colors to generate in the output palette\n\nReturns:\nstring: the supplied extension, if assertion is successful.", "source": "juraj-google-style"}
{"code": "def check_requirements_file(req_file, skip_packages):\n    reqs = read_requirements(req_file)\n    if (skip_packages is not None):\n        reqs = [req for req in reqs if (req.name not in skip_packages)]\n    outdated_reqs = filter(None, [check_req(req) for req in reqs])\n    return outdated_reqs", "docstring": "Return list of outdated requirements.\n\nArgs:\nreq_file (str): Filename of requirements file\nskip_packages (list): List of package names to ignore.", "source": "codesearchnet"}
{"code": "def _get_filename_from_url(url):\n    \n    parse = urlparse(url)\n    return os.path.basename(parse.path)", "docstring": "Return a filename from a URL\n\nArgs:\nurl (str): URL to extract filename from\n\nReturns:\n(str): Filename in URL", "source": "juraj-google-style"}
{"code": "def _get_dominant_angle(lines, domination_type=MEDIAN):\n    if (domination_type == MEDIAN):\n        return _get_median_angle(lines)\n    elif (domination_type == MEAN):\n        return _get_mean_angle(lines)\n    else:\n        raise ValueError(('Unknown domination type provided: %s' % domination_type))", "docstring": "Picks dominant angle of a set of lines.\n\nArgs:\nlines: iterable of (x1, y1, x2, y2) tuples that define lines.\ndomination_type: either MEDIAN or MEAN.\n\nReturns:\nDominant angle value in radians.\n\nRaises:\nValueError: on unknown domination_type.", "source": "codesearchnet"}
{"code": "def _reduce_output(self, outputs, seq_lengths):\n    batch_size = outputs.shape[0]\n    reduced = []\n    for i in range(batch_size):\n        if (self.lstm_reduction == 'mean'):\n            reduced.append(outputs[(i, :seq_lengths[i], :)].mean(dim=0))\n        elif (self.lstm_reduction == 'max'):\n            reduced.append(outputs[(i, :seq_lengths[i], :)].max(dim=0)[0])\n        elif (self.lstm_reduction == 'last'):\n            reduced.append(outputs[(i, (seq_lengths[i] - 1), :)])\n        elif (self.lstm_reduction == 'attention'):\n            reduced.append(self._attention(outputs[(i, :seq_lengths[i], :)]))\n        else:\n            msg = f\"Did not recognize lstm kwarg 'lstm_reduction' == {self.lstm_reduction}\"\n            raise ValueError(msg)\n    return torch.stack(reduced, dim=0)", "docstring": "Reduces the output of an LSTM step\n\nArgs:\noutputs: (torch.FloatTensor) the hidden state outputs from the\nlstm, with shape [batch_size, max_seq_length, hidden_size]", "source": "codesearchnet"}
{"code": "def get(self, key, default=None, cast=True):\n        \n        tablename, _, key = key.rpartition(':')\n        if tablename and tablename not in self.fields.name.split('+'):\n            raise ItsdbError('column requested from wrong table: {}'\n                             .format(tablename))\n        try:\n            index = self.fields.index(key)\n            value = list.__getitem__(self, index)\n        except (KeyError, IndexError):\n            value = default\n        else:\n            if cast:\n                field = self.fields[index]\n                value = _cast_to_datatype(value, field)\n        return value", "docstring": "Return the field data given by field name *key*.\n\nArgs:\nkey: the field name of the data to return\ndefault: the value to return if *key* is not in the row", "source": "juraj-google-style"}
{"code": "def UpdateIncludeState(filename, include_dict, io=codecs):\n  \n  headerfile = None\n  try:\n    headerfile = io.open(filename, 'r', 'utf8', 'replace')\n  except IOError:\n    return False\n  linenum = 0\n  for line in headerfile:\n    linenum += 1\n    clean_line = CleanseComments(line)\n    match = _RE_PATTERN_INCLUDE.search(clean_line)\n    if match:\n      include = match.group(2)\n      include_dict.setdefault(include, linenum)\n  return True", "docstring": "Fill up the include_dict with new includes found from the file.\n\nArgs:\nfilename: the name of the header to read.\ninclude_dict: a dictionary in which the headers are inserted.\nio: The io factory to use to read the file. Provided for testability.\n\nReturns:\nTrue if a header was successfully added. False otherwise.", "source": "juraj-google-style"}
{"code": "def get_frame(self, index):\n        \n\n        frame_num = self.frame_index[index]\n        onset = float(frame_num) / self.fps\n\n        if index < self.n_frames - 1:\n            next_frame_num = self.frame_index[index + 1]\n            end = float(next_frame_num) / self.fps\n        else:\n            end = float(self.duration)\n\n        duration = end - onset if end > onset else 0.0\n\n        return VideoFrameStim(self, frame_num,\n                              data=self.clip.get_frame(onset),\n                              duration=duration)", "docstring": "Get video frame at the specified index.\n\nArgs:\nindex (int): Positional index of the desired frame.", "source": "juraj-google-style"}
{"code": "def add_member_to_list(self, username, listname, member_type='USER'):\n    return self.client.service.addMemberToList(listname, username, member_type, self.proxy_id)", "docstring": "Add a member to an existing list.\n\nArgs:\nusername (str): The username of the user to add\nlistname (str): The name of the list to add the user to\nmember_type (str): Normally, this should be \"USER\".\nIf you are adding a list as a member of another list,\nset this to \"LIST\", instead.", "source": "codesearchnet"}
{"code": "def pull(self, platform=None):\n        \n        repository, _ = parse_repository_tag(self.image_name)\n        return self.collection.pull(repository, tag=self.id, platform=platform)", "docstring": "Pull the image digest.\n\nArgs:\nplatform (str): The platform to pull the image for.\nDefault: ``None``\n\nReturns:\n(:py:class:`Image`): A reference to the pulled image.", "source": "juraj-google-style"}
{"code": "def render_template_inplace(template_path, info, dry_run=False, extra_filters=None, resolver=None):\n    filters = {}\n    if (resolver is not None):\n        filters['find_product'] = _create_resolver_filter(resolver)\n    if (extra_filters is not None):\n        filters.update(extra_filters)\n    basedir = os.path.dirname(template_path)\n    template_name = os.path.basename(template_path)\n    if (not template_name.endswith('.tpl')):\n        raise ArgumentError('You must specify a filename that ends in .tpl', filepath=template_path)\n    out_path = os.path.join(basedir, template_name[:(- 4)])\n    if (basedir == ''):\n        basedir = '.'\n    env = Environment(loader=FileSystemLoader(basedir), trim_blocks=True, lstrip_blocks=True)\n    for (name, func) in filters.items():\n        env.filters[name] = func\n    template = env.get_template(template_name)\n    result = template.render(info)\n    if (not dry_run):\n        with open(out_path, 'wb') as outfile:\n            outfile.write(result.encode('utf-8'))\n    return out_path", "docstring": "Render a template file in place.\n\nThis function expects template path to be a path to a file\nthat ends in .tpl.  It will be rendered to a file in the\nsame directory with the .tpl suffix removed.\n\nArgs:\ntemplate_path (str): The path to the template file\nthat we want to render in place.\ninfo (dict): A dictionary of variables passed into the template to\nperform substitutions.\ndry_run (bool): Whether to actually render the output file or just return\nthe file path that would be generated.\nextra_filters (dict of str -> callable): An optional group of filters that\nwill be made available to the template.  The dict key will be the\nname at which callable is made available.\nresolver (ProductResolver): The specific ProductResolver class to use in the\nfind_product filter.\n\nReturns:\nstr: The path to the output file generated.", "source": "codesearchnet"}
{"code": "def print_start_command(self, command):\n        \n        size = len(command)\n        if size > 20:\n            raise RuntimeError('Command too long')\n        n1 = size/10\n        n2 = size%10\n        self.send('^PS'+chr(n1)+chr(n2)+command)", "docstring": "Set print command\n\nArgs:\ncommand: the type of command you desire.\nReturns:\nNone\nRaises:\nRuntimeError: Command too long.", "source": "juraj-google-style"}
{"code": "def db(self, entity, query_filters='size=10'):\n    if (self.entity_api_key == ''):\n        return {'status': 'failure', 'response': 'No API key found in request'}\n    historic_url = ((self.base_url + 'api/0.1.0/historicData?') + query_filters)\n    historic_headers = {'apikey': self.entity_api_key, 'Content-Type': 'application/json'}\n    historic_query_data = json.dumps({'query': {'match': {'key': entity}}})\n    with self.no_ssl_verification():\n        r = requests.get(historic_url, data=historic_query_data, headers=historic_headers)\n    response = dict()\n    if ('No API key' in str(r.content.decode('utf-8'))):\n        response['status'] = 'failure'\n    else:\n        r = r.content.decode('utf-8')\n        response = r\n    return response", "docstring": "This function allows an entity to access the historic data.\n\nArgs:\nentity        (string): Name of the device to listen to\nquery_filters (string): Elastic search response format string\nexample, \"pretty=true&size=10\"", "source": "codesearchnet"}
{"code": "def decorate_set_on_listener(prototype):\n\n    def add_annotation(method):\n        method._event_info = {}\n        method._event_info['name'] = method.__name__\n        method._event_info['prototype'] = prototype\n        return method\n    return add_annotation", "docstring": "Private decorator for use in the editor.\nAllows the Editor to create listener methods.\n\nArgs:\nparams (str): The list of parameters for the listener\nmethod (es. \"(self, new_value)\")", "source": "codesearchnet"}
{"code": "def __call__(self, **kwargs):\n    if len(kwargs) != len(self._inputs):\n        raise ValueError('Invalid number of inputs provided for running a SignatureDef, expected %s vs provided %s' % (len(self._inputs), len(kwargs)))\n    for input_name, value in kwargs.items():\n        if input_name not in self._inputs:\n            raise ValueError('Invalid Input name (%s) for SignatureDef' % input_name)\n        self._interpreter_wrapper.ResizeInputTensor(self._inputs[input_name], np.array(value.shape, dtype=np.int32), False, self._subgraph_index)\n    self._interpreter_wrapper.AllocateTensors(self._subgraph_index)\n    for input_name, value in kwargs.items():\n        self._interpreter_wrapper.SetTensor(self._inputs[input_name], value, self._subgraph_index)\n    self._interpreter_wrapper.Invoke(self._subgraph_index)\n    result = {}\n    for output_name, output_index in self._outputs:\n        result[output_name] = self._interpreter_wrapper.GetTensor(output_index, self._subgraph_index)\n    return result", "docstring": "Runs the SignatureDef given the provided inputs in arguments.\n\nArgs:\n**kwargs: key,value for inputs to the model. Key is the SignatureDef input\nname. Value is numpy array with the value.\n\nReturns:\ndictionary of the results from the model invoke.\nKey in the dictionary is SignatureDef output name.\nValue is the result Tensor.", "source": "github-repos"}
{"code": "def getContext(self, context_name = 'default'):\n\t\t\n\t\tif context_name == 'default' and 'default' not in self.contexts:\n\t\t\tself('default')\n\n\t\treturn self.contexts[context_name]", "docstring": "Get a context by name, create the default context if it does not exist\n\nParams:\ncontext_name (string):\nContext name\n\nRaises:\nKeyError:\nIf the context name does not exist\n\nReturns:\nbubbler.Bubbler:\nNamed context", "source": "juraj-google-style"}
{"code": "def added_tokens_decoder(self) -> dict[int, AddedToken]:\n    return dict(sorted(self._added_tokens_decoder.items(), key=lambda item: item[0]))", "docstring": "Returns the added tokens in the vocabulary as a dictionary of index to AddedToken.\n\nReturns:\n`Dict[str, int]`: The added tokens.", "source": "github-repos"}
{"code": "def _ssl_context_factory(parameters):\n    client_cert = None\n    ca_cert = None\n    key = config.conf['tls']['keyfile']\n    cert = config.conf['tls']['certfile']\n    ca_file = config.conf['tls']['ca_cert']\n    if ca_file:\n        with open(ca_file, 'rb') as fd:\n            ca_cert = ssl.Certificate.loadPEM(fd.read())\n    if (key and cert):\n        with open(key) as fd:\n            client_keypair = fd.read()\n        with open(cert) as fd:\n            client_keypair += fd.read()\n        client_cert = ssl.PrivateCertificate.loadPEM(client_keypair)\n    hostname = parameters.host\n    if (not isinstance(hostname, six.text_type)):\n        hostname = hostname.decode(locale.getdefaultlocale()[1])\n    try:\n        context_factory = ssl.optionsForClientTLS(hostname, trustRoot=(ca_cert or ssl.platformTrust()), clientCertificate=client_cert, extraCertificateOptions={'raiseMinimumTo': ssl.TLSVersion.TLSv1_2})\n    except AttributeError:\n        context_factory = ssl.CertificateOptions(certificate=client_cert.original, privateKey=client_cert.privateKey.original, caCerts=([ca_cert.original] or ssl.platformTrust()), verify=True, requireCertificate=True, verifyOnce=False, enableSessions=False)\n    return context_factory", "docstring": "Produce a Twisted SSL context object from a pika connection parameter object.\nThis is necessary as Twisted manages the connection, not Pika.\n\nArgs:\nparameters (pika.ConnectionParameters): The connection parameters built\nfrom the fedora_messaging configuration.", "source": "codesearchnet"}
{"code": "def register_mbr_plugin(self, fs_id, plugin):\n        \n        self.logger.debug('MBR: {}, FS ID: {}'\n                          .format(self.__get_plugin_name(plugin), fs_id))\n        self.__mbr_plugins[fs_id].append(plugin)", "docstring": "Used in plugin's registration routine,\nto associate it's detection method with given filesystem id\n\nArgs:\nfs_id: filesystem id that is read from MBR partition entry\nplugin: plugin that supports this filesystem", "source": "juraj-google-style"}
{"code": "def __init__(self, configuration, provider=None):\n        \n        self._configuration = configuration\n        self._provider = provider", "docstring": "Base class for backends.\n\nThis method should initialize the module and its configuration, and\nraise an exception if a component of the module is\nnot available.\n\nArgs:\nconfiguration (BackendConfiguration): backend configuration\nprovider (BaseProvider): provider responsible for this backend\n\nRaises:\nFileNotFoundError if backend executable is not available.\nQiskitError: if there is no name in the configuration", "source": "juraj-google-style"}
{"code": "def plot_ax(self, ax=None, fontsize=12, **kwargs):\n        \n        ax, fig, plt = get_ax_fig_plt(ax=ax)\n\n        color = kwargs.get(\"color\", \"r\")\n        label = kwargs.get(\"label\", \"{} fit\".format(self.__class__.__name__))\n        lines = [\"Equation of State: %s\" % self.__class__.__name__,\n                 \"Minimum energy = %1.2f eV\" % self.e0,\n                 \"Minimum or reference volume = %1.2f Ang^3\" % self.v0,\n                 \"Bulk modulus = %1.2f eV/Ang^3 = %1.2f GPa\" %\n                 (self.b0, self.b0_GPa),\n                 \"Derivative of bulk modulus wrt pressure = %1.2f\" % self.b1]\n        text = \"\\n\".join(lines)\n        text = kwargs.get(\"text\", text)\n\n        \n        ax.plot(self.volumes, self.energies, linestyle=\"None\", marker=\"o\", color=color)\n\n        \n        vmin, vmax = min(self.volumes), max(self.volumes)\n        vmin, vmax = (vmin - 0.01 * abs(vmin), vmax + 0.01 * abs(vmax))\n        vfit = np.linspace(vmin, vmax, 100)\n\n        ax.plot(vfit, self.func(vfit), linestyle=\"dashed\", color=color, label=label)\n\n        ax.grid(True)\n        ax.set_xlabel(\"Volume $\\\\AA^3$\")\n        ax.set_ylabel(\"Energy (eV)\")\n        ax.legend(loc=\"best\", shadow=True)\n        \n        ax.text(0.5, 0.5, text, fontsize=fontsize, horizontalalignment='center',\n            verticalalignment='center', transform=ax.transAxes)\n\n        return fig", "docstring": "Plot the equation of state on axis `ax`\n\nArgs:\nax: matplotlib :class:`Axes` or None if a new figure should be created.\nfontsize: Legend fontsize.\ncolor (str): plot color.\nlabel (str): Plot label\ntext (str): Legend text (options)\n\nReturns:\nMatplotlib figure object.", "source": "juraj-google-style"}
{"code": "def _get_backend_instance(self, backend_cls):\n    try:\n        backend_instance = backend_cls(provider=self)\n    except Exception as err:\n        raise QiskitError(('Backend %s could not be instantiated: %s' % (backend_cls, err)))\n    return backend_instance", "docstring": "Return an instance of a backend from its class.\n\nArgs:\nbackend_cls (class): Backend class.\nReturns:\nBaseBackend: a backend instance.\nRaises:\nQiskitError: if the backend could not be instantiated.", "source": "codesearchnet"}
{"code": "def write(self, offset, data):\n    if (not isinstance(offset, (int, long))):\n        raise TypeError('Invalid offset type, should be integer.')\n    if (not isinstance(data, (bytes, bytearray, list))):\n        raise TypeError('Invalid data type, expected bytes, bytearray, or list.')\n    offset = self._adjust_offset(offset)\n    self._validate_offset(offset, len(data))\n    data = bytes(bytearray(data))\n    self.mapping[offset:(offset + len(data))] = data", "docstring": "Write a string of bytes to the specified `offset` in bytes, relative\nto the base physical address of the MMIO region.\n\nArgs:\noffset (int, long): offset from base physical address, in bytes.\ndata (bytes, bytearray, list): a byte array or list of 8-bit\nintegers to write.\n\nRaises:\nTypeError: if `offset` or `data` type are invalid.\nValueError: if `offset` is out of bounds, or if data is not valid bytes.", "source": "codesearchnet"}
{"code": "def _compress_json(self, j):\n        \n        compressed_json = copy.copy(j)\n        compressed_json.pop('users', None)\n\n        compressed_data = zlib.compress(\n            json.dumps(j['users']).encode('utf-8'),\n            self.zlib_compression_strength\n        )\n        b64_data = base64.b64encode(compressed_data).decode('utf-8')\n\n        compressed_json['blob'] = b64_data\n\n        return compressed_json", "docstring": "Compress the BLOB data portion of the usernotes.\n\nArguments:\nj: the JSON in Schema v5 format (dict)\n\nReturns a dict with the 'users' key removed and 'blob' key added", "source": "juraj-google-style"}
{"code": "def get_first_model_with_rest_name(cls, rest_name):\n        \n\n        models = cls.get_models_with_rest_name(rest_name)\n\n        if len(models) > 0:\n            return models[0]\n\n        return None", "docstring": "Get the first model corresponding to a rest_name\n\nArgs:\nrest_name: the rest name", "source": "juraj-google-style"}
{"code": "def RemoveEventAttribute(self, attribute_name):\n    if (attribute_name not in self._extra_event_attributes):\n        raise KeyError('Event attribute: {0:s} not set'.format(attribute_name))\n    del self._extra_event_attributes[attribute_name]", "docstring": "Removes an attribute from being set on all events produced.\n\nArgs:\nattribute_name (str): name of the attribute to remove.\n\nRaises:\nKeyError: if the event attribute is not set.", "source": "codesearchnet"}
{"code": "def GetEntries(\n      self, parser_mediator, cookie_data=None, url=None, **kwargs):\n    \n    fields = cookie_data.split('.')\n    number_of_fields = len(fields)\n\n    if number_of_fields not in (1, 4):\n      parser_mediator.ProduceExtractionWarning(\n          'unsupported number of fields: {0:d} in cookie: {1:s}'.format(\n              number_of_fields, self.COOKIE_NAME))\n      return\n\n    if number_of_fields == 1:\n      domain_hash = None\n\n      try:\n        \n        last_visit_posix_time = int(fields[0], 10) / 10000000\n      except ValueError:\n        last_visit_posix_time = None\n\n      number_of_pages_viewed = None\n\n    elif number_of_fields == 4:\n      domain_hash = fields[0]\n\n      try:\n        number_of_pages_viewed = int(fields[1], 10)\n      except ValueError:\n        number_of_pages_viewed = None\n\n      try:\n        if fields[2] in ('8', '9'):\n          \n          last_visit_posix_time = int(fields[3], 10) / 1000\n        else:\n          last_visit_posix_time = int(fields[3], 10)\n      except ValueError:\n        last_visit_posix_time = None\n\n    if last_visit_posix_time is not None:\n      date_time = dfdatetime_posix_time.PosixTime(\n          timestamp=last_visit_posix_time)\n      timestamp_description = definitions.TIME_DESCRIPTION_LAST_VISITED\n    else:\n      date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n      timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME\n\n    event_data = GoogleAnalyticsEventData('utmb')\n    event_data.cookie_name = self.COOKIE_NAME\n    event_data.domain_hash = domain_hash\n    event_data.pages_viewed = number_of_pages_viewed\n    event_data.url = url\n\n    event = time_events.DateTimeValuesEvent(date_time, timestamp_description)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts event objects from the cookie.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\ncookie_data (bytes): cookie data.\nurl (str): URL or path where the cookie got set.", "source": "juraj-google-style"}
{"code": "def __init__(self, name, aliases=None, description=None, urls=None):\n    \n    super(UUIDDefinition, self).__init__(\n        name, aliases=aliases, description=description, urls=urls)\n    self.size = 16", "docstring": "Initializes an UUID data type definition.\n\nArgs:\nname (str): name.\naliases (Optional[list[str]]): aliases.\ndescription (Optional[str]): description.\nurls (Optional[list[str]]): URLs.", "source": "juraj-google-style"}
{"code": "def read_raw(self, key):\n    data = None\n    if (key is not None):\n        data = self.db.read(key.strip())\n    else:\n        self.tcex.log.warning(u'The key field was None.')\n    return data", "docstring": "Read method of CRUD operation for raw data.\n\nArgs:\nkey (string): The variable to read from the DB.\n\nReturns:\n(any): Results retrieved from DB.", "source": "codesearchnet"}
{"code": "def console_from_file(filename: str) -> tcod.console.Console:\n    return tcod.console.Console._from_cdata(lib.TCOD_console_from_file(filename.encode('utf-8')))", "docstring": "Return a new console object from a filename.\n\nThe file format is automactially determined.  This can load REXPaint `.xp`,\nASCII Paint `.apf`, or Non-delimited ASCII `.asc` files.\n\nArgs:\nfilename (Text): The path to the file, as a string.\n\nReturns: A new :any`Console` instance.", "source": "codesearchnet"}
{"code": "def create_endpoint(self, endpoint_name, config_name, tags=None, wait=True):\n    LOGGER.info('Creating endpoint with name {}'.format(endpoint_name))\n    tags = (tags or [])\n    self.sagemaker_client.create_endpoint(EndpointName=endpoint_name, EndpointConfigName=config_name, Tags=tags)\n    if wait:\n        self.wait_for_endpoint(endpoint_name)\n    return endpoint_name", "docstring": "Create an Amazon SageMaker ``Endpoint`` according to the endpoint configuration specified in the request.\n\nOnce the ``Endpoint`` is created, client applications can send requests to obtain inferences.\nThe endpoint configuration is created using the ``CreateEndpointConfig`` API.\n\nArgs:\nendpoint_name (str): Name of the Amazon SageMaker ``Endpoint`` being created.\nconfig_name (str): Name of the Amazon SageMaker endpoint configuration to deploy.\nwait (bool): Whether to wait for the endpoint deployment to complete before returning (default: True).\n\nReturns:\nstr: Name of the Amazon SageMaker ``Endpoint`` created.", "source": "codesearchnet"}
{"code": "def jwt_is_expired(self, access_token=None, leeway=0):\n    if (access_token is not None):\n        exp = self._decode_exp(access_token)\n    else:\n        exp = self.jwt_exp\n    now = time()\n    if (exp < (now - leeway)):\n        return True\n    return False", "docstring": "Validate JWT access token expiration.\n\nArgs:\naccess_token (str): Access token to validate. Defaults to ``None``.\nleeway (float): Time in seconds to adjust for local clock skew. Defaults to 0.\n\nReturns:\nbool: ``True`` if expired, otherwise ``False``.", "source": "codesearchnet"}
{"code": "def _DropCommonSuffixes(filename):\n    for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', 'inl.h', 'impl.h', 'internal.h'):\n        if (filename.endswith(suffix) and (len(filename) > len(suffix)) and (filename[((- len(suffix)) - 1)] in ('-', '_'))):\n            return filename[:((- len(suffix)) - 1)]\n    return os.path.splitext(filename)[0]", "docstring": "Drops common suffixes like _test.cc or -inl.h from filename.\n\nFor example:\n>>> _DropCommonSuffixes('foo/foo-inl.h')\n'foo/foo'\n>>> _DropCommonSuffixes('foo/bar/foo.cc')\n'foo/bar/foo'\n>>> _DropCommonSuffixes('foo/foo_internal.h')\n'foo/foo'\n>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')\n'foo/foo_unusualinternal'\n\nArgs:\nfilename: The input filename.\n\nReturns:\nThe filename with the common suffix removed.", "source": "codesearchnet"}
{"code": "def __edit_distance_alt(self, words):\n        \n        words = [x.lower() for x in words]\n        return [e2 for e1 in words for e2 in self.edit_distance_1(e1)]", "docstring": "Compute all strings that are 1 edits away from all the words using\nonly the letters in the corpus\n\nArgs:\nwords (list): The words for which to calculate the edit distance\nReturns:\nset: The set of strings that are edit distance two from the \\\nprovided words", "source": "juraj-google-style"}
{"code": "def fit(self, x, y):\n        \n        train = np.vstack((np.array([self.featurize_row(row.iloc[0],\n                                                        row.iloc[1]) for idx, row in x.iterrows()]),\n                           np.array([self.featurize_row(row.iloc[1],\n                                                        row.iloc[0]) for idx, row in x.iterrows()])))\n        labels = np.vstack((y, -y)).ravel()\n        verbose = 1 if self.verbose else 0\n        self.clf = CLF(verbose=verbose,\n                       min_samples_leaf=self.L,\n                       n_estimators=self.E,\n                       max_depth=self.max_depth,\n                       n_jobs=self.n_jobs).fit(train, labels)", "docstring": "Train the model.\n\nArgs:\nx_tr (pd.DataFrame): CEPC format dataframe containing the pairs\ny_tr (pd.DataFrame or np.ndarray): labels associated to the pairs", "source": "juraj-google-style"}
{"code": "def compilable_sources(self, sourcedir, absolute=False, recursive=True, excludes=[]):\n    filepaths = []\n    for (root, dirs, files) in os.walk(sourcedir):\n        dirs.sort()\n        files.sort()\n        for item in files:\n            relative_dir = os.path.relpath(root, sourcedir)\n            if (relative_dir == '.'):\n                relative_dir = ''\n            absolute_filepath = os.path.join(root, item)\n            conditions = {'sourcedir': sourcedir, 'nopartial': True, 'exclude_patterns': excludes, 'excluded_libdirs': []}\n            if self.match_conditions(absolute_filepath, **conditions):\n                relative_filepath = os.path.join(relative_dir, item)\n                if absolute:\n                    filepath = absolute_filepath\n                else:\n                    filepath = relative_filepath\n                filepaths.append(filepath)\n        if (not recursive):\n            break\n    return filepaths", "docstring": "Find all scss sources that should be compiled, aka all sources that\nare not \"partials\" Sass sources.\n\n\nArgs:\nsourcedir (str): Directory path to scan.\n\nKeyword Arguments:\nabsolute (bool): Returned paths will be absolute using\n``sourcedir`` argument (if True), else return relative paths.\nrecursive (bool): Switch to enabled recursive finding (if True).\nDefault to True.\nexcludes (list): A list of excluding patterns (glob patterns).\nPatterns are matched against the relative filepath (from its\nsourcedir).\n\nReturns:\nlist: List of source paths.", "source": "codesearchnet"}
{"code": "def to_python_package(classes, target_folder, parent_package=None, indent=DEFAULT_INDENT):\n    PackageBuilder(target_folder, parent_package, indent).from_classes_with_refs(classes)", "docstring": "This function can be used to build a python package representation of pyschema classes.\nOne module is created per namespace in a package matching the namespace hierarchy.\n\nArgs:\nclasses: A collection of classes to build the package from\ntarget_folder: Root folder of the package\nparent_package: Prepended on all import statements in order to support absolute imports.\nparent_package is not used when building the package file structure\nindent: Indent level. Defaults to 4 spaces", "source": "codesearchnet"}
{"code": "def get_equivalent_atoms(self, tolerance=0.3):\n    PA = self._get_point_group_analyzer(tolerance=tolerance)\n    eq = PA.get_equivalent_atoms()\n    self._convert_eq(eq)\n    return eq", "docstring": "Returns sets of equivalent atoms with symmetry operations\n\nArgs:\ntolerance (float): Tolerance to generate the full set of symmetry\noperations.\n\nReturns:\ndict: The returned dictionary has two possible keys:\n\n``eq_sets``:\nA dictionary of indices mapping to sets of indices,\neach key maps to indices of all equivalent atoms.\nThe keys are guaranteed to be not equivalent.\n\n``sym_ops``:\nTwofold nested dictionary.\n``operations[i][j]`` gives the symmetry operation\nthat maps atom ``i`` unto ``j``.", "source": "codesearchnet"}
{"code": "def create_threads(self, sess, coord=None, daemon=False, start=False):\n    with self._lock:\n        try:\n            if self._runs_per_session[sess] > 0:\n                return []\n        except KeyError:\n            pass\n        self._runs_per_session[sess] = len(self._enqueue_ops)\n        self._exceptions_raised = []\n    ret_threads = []\n    for op in self._enqueue_ops:\n        name = 'QueueRunnerThread-{}-{}'.format(self.name, op.name)\n        ret_threads.append(threading.Thread(target=self._run, args=(sess, op, coord), name=name))\n    if coord:\n        name = 'QueueRunnerThread-{}-close_on_stop'.format(self.name)\n        ret_threads.append(threading.Thread(target=self._close_on_stop, args=(sess, self._cancel_op, coord), name=name))\n    for t in ret_threads:\n        if coord:\n            coord.register_thread(t)\n        if daemon:\n            t.daemon = True\n        if start:\n            t.start()\n    return ret_threads", "docstring": "Create threads to run the enqueue ops for the given session.\n\nThis method requires a session in which the graph was launched.  It creates\na list of threads, optionally starting them.  There is one thread for each\nop passed in `enqueue_ops`.\n\nThe `coord` argument is an optional coordinator that the threads will use\nto terminate together and report exceptions.  If a coordinator is given,\nthis method starts an additional thread to close the queue when the\ncoordinator requests a stop.\n\nIf previously created threads for the given session are still running, no\nnew threads will be created.\n\nArgs:\nsess: A `Session`.\ncoord: Optional `Coordinator` object for reporting errors and checking\nstop conditions.\ndaemon: Boolean.  If `True` make the threads daemon threads.\nstart: Boolean.  If `True` starts the threads.  If `False` the\ncaller must call the `start()` method of the returned threads.\n\nReturns:\nA list of threads.", "source": "github-repos"}
{"code": "def rand_ascii_str(length):\n    letters = [random.choice(ascii_letters_and_digits) for _ in range(length)]\n    return ''.join(letters)", "docstring": "Generates a random string of specified length, composed of ascii letters\nand digits.\n\nArgs:\nlength: The number of characters in the string.\n\nReturns:\nThe random string generated.", "source": "codesearchnet"}
{"code": "def __init__(self, wrapped_list):\n    self._non_append_mutation_value = False\n    self._external_modification_value = False\n    super().__init__(wrapped_list)\n    self._last_wrapped_list_snapshot = list(self._storage)", "docstring": "Construct a new list wrapper.\n\nArgs:\nwrapped_list: The initial value of the data structure. A shallow copy may\nbe maintained for error checking. `wrapped_list` itself should not be\nmodified directly after constructing the `ListWrapper`, and if changes\nare detected the `ListWrapper` will throw an exception on save.", "source": "github-repos"}
{"code": "async def stop_tasks(self, address):\n        \n\n        tasks = self._tasks.get(address, [])\n        for task in tasks:\n            task.cancel()\n\n        asyncio.gather(*tasks, return_exceptions=True)\n        self._tasks[address] = []", "docstring": "Clear all tasks pertaining to a tile.\n\nThis coroutine will synchronously cancel all running tasks that were\nattached to the given tile and wait for them to stop before returning.\n\nArgs:\naddress (int): The address of the tile we should stop.", "source": "juraj-google-style"}
{"code": "def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):\n    vision_data = {}\n    if image_sizes is not None:\n        images_kwargs = LlavaProcessorKwargs._defaults.get('images_kwargs', {})\n        images_kwargs.update(kwargs)\n        crop_size = images_kwargs.get('crop_size', None) or self.image_processor.crop_size\n        resized_height, resized_width = (crop_size['height'], crop_size['width'])\n        num_image_tokens = resized_height \n        num_image_tokens += self.num_additional_image_tokens\n        if self.vision_feature_select_strategy == 'default':\n            num_image_tokens -= 1\n        num_image_tokens = [num_image_tokens] * len(image_sizes)\n        num_image_patches = [1] * len(image_sizes)\n        vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})\n    return MultiModalData(**vision_data)", "docstring": "Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.\n\nArgs:\nimage_sizes (`List[List[int]]`, *optional*):\nThe input sizes formatted as (height, width) per each image.\n\nReturns:\n`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided\ninput modalities, along with other useful data.", "source": "github-repos"}
{"code": "def __init__(self, name=None):\n    rr = gen_io_ops.identity_reader_v2(name=name)\n    super(IdentityReader, self).__init__(rr, supports_serialize=True)", "docstring": "Create a IdentityReader.\n\nArgs:\nname: A name for the operation (optional).", "source": "github-repos"}
{"code": "def to_frame(self, **kwargs):\n    df = export.write_dataframe(self._values, **kwargs)\n    df.name = self.title\n    return df", "docstring": "r\"\"\"Return a pandas DataFrame loaded from the worksheet data.\n\nArgs:\n\\**kwargs: passed to ``pandas.read_csv()`` (e.g. ``header``, ``index_col``)\nReturns:\npandas.DataFrame: new ``DataFrame`` instance", "source": "codesearchnet"}
{"code": "def to_pandas(self):\n    df = self.data.to_pandas(is_transposed=self._is_transposed)\n    if df.empty:\n        if (len(self.columns) != 0):\n            df = pandas.DataFrame(columns=self.columns).astype(self.dtypes)\n        else:\n            df = pandas.DataFrame(columns=self.columns, index=self.index)\n    else:\n        ErrorMessage.catch_bugs_and_request_email(((len(df.index) != len(self.index)) or (len(df.columns) != len(self.columns))))\n        df.index = self.index\n        df.columns = self.columns\n    return df", "docstring": "Converts Modin DataFrame to Pandas DataFrame.\n\nReturns:\nPandas DataFrame of the DataManager.", "source": "codesearchnet"}
{"code": "def write_info_file(tensorboard_info):\n    payload = ('%s\\n' % _info_to_string(tensorboard_info))\n    with open(_get_info_file_path(), 'w') as outfile:\n        outfile.write(payload)", "docstring": "Write TensorBoardInfo to the current process's info file.\n\nThis should be called by `main` once the server is ready. When the\nserver shuts down, `remove_info_file` should be called.\n\nArgs:\ntensorboard_info: A valid `TensorBoardInfo` object.\n\nRaises:\nValueError: If any field on `info` is not of the correct type.", "source": "codesearchnet"}
{"code": "def __init__(self, dump):\n    self._dump = dump\n    self._cached_tensor_values = {}", "docstring": "Constructor of ExpressionEvaluator.\n\nArgs:\ndump: an instance of `DebugDumpDir`.", "source": "github-repos"}
{"code": "def RunJob(self, job):\n    if (not job.leased_until):\n        raise LockError('CronJob must be leased for Run() to be called.')\n    if (job.leased_until < rdfvalue.RDFDatetime.Now()):\n        raise LockError(('CronJob lease expired for %s.' % job.cron_job_id))\n    logging.info('Starting cron job: %s', job.cron_job_id)\n    if (job.args.action_type == job.args.ActionType.SYSTEM_CRON_ACTION):\n        cls_name = job.args.system_cron_action.job_class_name\n        job_cls = registry.SystemCronJobRegistry.CronJobClassByName(cls_name)\n        name = ('%s runner' % cls_name)\n    elif (job.args.action_type == job.args.ActionType.HUNT_CRON_ACTION):\n        job_cls = registry.CronJobRegistry.CronJobClassByName('RunHunt')\n        name = 'Hunt runner'\n    else:\n        raise ValueError((\"CronJob %s doesn't have a valid args type set.\" % job.cron_job_id))\n    run_state = rdf_cronjobs.CronJobRun(cron_job_id=job.cron_job_id, status='RUNNING')\n    run_state.GenerateRunId()\n    run_obj = job_cls(run_state, job)\n    (wait_for_start_event, signal_event, wait_for_write_event) = (threading.Event(), threading.Event(), threading.Event())\n    try:\n        self._GetThreadPool().AddTask(target=run_obj.StartRun, args=(wait_for_start_event, signal_event, wait_for_write_event), name=name, blocking=False, inline=False)\n        if (not wait_for_start_event.wait(TASK_STARTUP_WAIT)):\n            logging.error('Cron job run task for %s is too slow to start.', job.cron_job_id)\n            return False\n        signal_event.set()\n        wait_for_write_event.wait(TASK_STARTUP_WAIT)\n        return True\n    except threadpool.Full:\n        return False", "docstring": "Does the actual work of the Cron, if the job is due to run.\n\nArgs:\njob: The cronjob rdfvalue that should be run. Must be leased.\n\nReturns:\nA boolean indicating if this cron job was started or not. False may\nbe returned when the threadpool is already full.\n\nRaises:\nLockError: if the object is not locked.\nValueError: If the job argument is invalid.", "source": "codesearchnet"}
{"code": "def sync_trial_info(self, job_path, expr_dir_name):\n    expr_name = expr_dir_name[(- 8):]\n    expr_path = os.path.join(job_path, expr_dir_name)\n    if (expr_name not in self._monitored_trials):\n        self._create_trial_info(expr_path)\n        self._monitored_trials.add(expr_name)\n    else:\n        self._update_trial_info(expr_path)", "docstring": "Load information of the trial from the given experiment directory.\n\nCreate or update the trial information, together with the trial\nmeta file.\n\nArgs:\njob_path(str)\nexpr_dir_name(str)", "source": "codesearchnet"}
{"code": "def connect(self, timeout=600):\n        \n        \n        if self.socket:\n            raise TensorForceError(\"Already connected to {}:{}. Only one connection allowed at a time. \" +\n                                   \"Close first by calling `close`!\".format(self.host, self.port))\n        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n        if timeout < 5 or timeout is None:\n            timeout = 5\n\n        err = 0\n        start_time = time.time()\n        while time.time() - start_time < timeout:\n            self.socket.settimeout(5)\n            err = self.socket.connect_ex((self.host, self.port))\n            if err == 0:\n                break\n            time.sleep(1)\n        if err != 0:\n            raise TensorForceError(\"Error when trying to connect to {}:{}: errno={} errcode='{}' '{}'\".\n                                   format(self.host, self.port, err, errno.errorcode[err], os.strerror(err)))", "docstring": "Starts the server tcp connection on the given host:port.\n\nArgs:\ntimeout (int): The time (in seconds) for which we will attempt a connection to the remote\n(every 5sec). After that (or if timeout is None or 0), an error is raised.", "source": "juraj-google-style"}
{"code": "def get_source_event_declaration(self, event):\n    return next((x.source_mapping for x in self.events if (x.name == event)))", "docstring": "Return the source mapping where the event is declared\n\nArgs:\nevent (str): event name\nReturns:\n(dict): sourceMapping", "source": "codesearchnet"}
{"code": "def drag_and_drop(self, source_selector, destination_selector, **kwargs):\n    self.info_log(('Drag and drop: source (%s); destination (%s)' % (source_selector, destination_selector)))\n    use_javascript_dnd = kwargs.get('use_javascript_dnd', 'proxy_driver:use_javascript_dnd')\n    source_el = self.find(source_selector)\n    destination_el = self.find(destination_selector)\n    if use_javascript_dnd:\n        try:\n            dnd_script = [\"function simulate(f,c,d,e){var b,a=null;for(b in eventMatchers)if(eventMatchers[b].test(c)){a=b;break}if(!a)return!1;document.createEvent?(b=document.createEvent(a),a=='HTMLEvents'?b.initEvent(c,!0,!0):b.initMouseEvent(c,!0,!0,document.defaultView,0,d,e,d,e,!1,!1,!1,!1,0,null),f.dispatchEvent(b)):(a=document.createEventObject(),a.detail=0,a.screenX=d,a.screenY=e,a.clientX=d,a.clientY=e,a.ctrlKey=!1,a.altKey=!1,a.shiftKey=!1,a.metaKey=!1,a.button=1,f.fireEvent('on'+c,a));return!0} var eventMatchers={HTMLEvents:/^(?:load|unload|abort|error|select|change|submit|reset|focus|blur|resize|scroll)$/,MouseEvents:/^(?:click|dblclick|mouse(?:down|up|over|move|out))$/};\", 'var source = arguments[0],destination = arguments[1];', \"simulate(source, 'mousedown', 0, 0);\", \"simulate(source, 'mousemove', destination.offsetLeft, destination.offsetTop);\", \"simulate(source, 'mouseup', destination.offsetLeft, destination.offsetTop);\"]\n            self._driver.execute_script('\\n'.join(dnd_script), source_el._element, destination_el._element)\n        except Exception as e:\n            self.error_log((u'drag_and_drop exception: %s' % str(e)))\n            raise\n    else:\n        try:\n            ActionChains(self._driver).drag_and_drop(source_el, destination_el).perform()\n        except Exception as e:\n            self.error_log((u'drag_and_drop exception: %s' % str(e)))\n            raise", "docstring": "Drag and drop\n\nArgs:\nsource_selector: (str)\ndestination_selector: (str)\n\nKwargs:\nuse_javascript_dnd: bool; default:\nconfig proxy_driver:use_javascript_dnd", "source": "codesearchnet"}
{"code": "def multiplicative_jitter(x, epsilon=0.01):\n    if (epsilon == 0):\n        return x\n    return (x * mtf.random_uniform(x.mesh, x.shape, minval=(1.0 - epsilon), maxval=(1.0 + epsilon), dtype=x.dtype))", "docstring": "Multiply values by a random number between 1-epsilon and 1+epsilon.\n\nMakes models more resilient to rounding errors introduced by bfloat16.\nThis seems particularly important for logits.\n\nArgs:\nx: a mtf.Tensor\nepsilon: a floating point value\n\nReturns:\na mtf.Tensor with the same type and shape as x.", "source": "codesearchnet"}
{"code": "def delete_by_file(self, file_obj):\n        \n        BalancedDiscStorage._check_interface(file_obj)\n\n        file_hash = self._get_hash(file_obj)\n\n        return self.delete_by_hash(file_hash)", "docstring": "Remove file from the storage. File is identified by opened `file_obj`,\nfrom which the hashes / path are computed.\n\nArgs:\nfile_obj (file): Opened file-like object, which is used to compute\nhashes.\n\nRaises:\nIOError: If the `file_obj` is not in storage.", "source": "juraj-google-style"}
{"code": "def pull(self, arm_id, success, failure):\n        \n        self.__beta_dist_dict[arm_id].observe(success, failure)", "docstring": "Pull arms.\n\nArgs:\narm_id:     Arms master id.\nsuccess:    The number of success.\nfailure:    The number of failure.", "source": "juraj-google-style"}
{"code": "def consume_input(self, mystr, stack=[], state=1, curchar=0, depth=0):\n        \n        mystrsplit = mystr.split(' ')\n        if self.s[state].type == 1:\n            stack.append(self.s[state].sym)\n            if len(self.s[state].trans) > 0:\n                state = self.s[state].trans[0]\n                if self.parse(\n                        mystr,\n                        stack=stack,\n                        state=state,\n                        curchar=curchar,\n                        depth=depth + 1) == 1:\n                    return True\n            return False\n        if self.s[state].type == 2:\n            if len(stack) == 0:\n                return False\n            sym = stack.pop()\n            for key in self.s[state].trans:\n                if sym in self.s[state].trans[key]:\n                    if self.parse(\n                            mystr,\n                            stack=stack,\n                            state=key,\n                            curchar=curchar,\n                            depth=depth + 1) == 1:\n                        return True\n            return False\n        if self.s[state].type == 3:\n            for key in self.s[state].trans:\n                if mystrsplit[curchar] in self.s[state].trans[key]:\n                    \n                    if curchar + 1 == len(mystrsplit) \\\n                            and 'closing' in self.s[key].trans:\n                        return True\n                    elif curchar + 1 == len(mystrsplit):\n                        return False\n\n                    \n                    if self.parse(\n                            mystr,\n                            stack=stack,\n                            state=key,\n                            curchar=curchar + 1,\n                            depth=depth + 1) == 1:\n                        return True\n            return False", "docstring": "Consumes an input and validates if it is accepted\nArgs:\nmystr (str): the input string to be consumes\nstack (list): the stack of symbols\nstate (int): the current state of the PDA\ncurchar (int): the index of the consumed character\ndepth (int): the depth of the function call in the stack\nReturns:\nbool: A value indicating the correct or erroneous execution", "source": "juraj-google-style"}
{"code": "def __init__(self, log_dir, testbed_name):\n    self._log_dir = log_dir\n    self._testbed_name = testbed_name\n    self.results = records.TestResult()\n    self._test_run_infos = []\n    self._test_run_metadata = TestRunner._TestRunMetaData(log_dir, testbed_name)", "docstring": "Constructor for TestRunner.\n\nArgs:\nlog_dir: string, root folder where to write logs\ntestbed_name: string, name of the testbed to run tests on", "source": "github-repos"}
{"code": "def parse_fields_whois(self, response):\n    try:\n        temp = response.split('|')\n        ret = {'asn_registry': temp[4].strip(' \\n')}\n        if (ret['asn_registry'] not in self.rir_whois.keys()):\n            raise ASNRegistryError('ASN registry {0} is not known.'.format(ret['asn_registry']))\n        ret['asn'] = temp[0].strip(' \\n')\n        ret['asn_cidr'] = temp[2].strip(' \\n')\n        ret['asn_country_code'] = temp[3].strip(' \\n').upper()\n        ret['asn_date'] = temp[5].strip(' \\n')\n        ret['asn_description'] = temp[6].strip(' \\n')\n    except ASNRegistryError:\n        raise\n    except Exception as e:\n        raise ASNParseError('Parsing failed for \"{0}\" with exception: {1}.'.format(response, e)[:100])\n    return ret", "docstring": "The function for parsing ASN fields from a whois response.\n\nArgs:\nresponse (:obj:`str`): The response from the ASN whois server.\n\nReturns:\ndict: The ASN lookup results\n\n::\n\n{\n'asn' (str) - The Autonomous System Number\n'asn_date' (str) - The ASN Allocation date\n'asn_registry' (str) - The assigned ASN registry\n'asn_cidr' (str) - The assigned ASN CIDR\n'asn_country_code' (str) - The assigned ASN country code\n'asn_description' (str) - The ASN description\n}\n\nRaises:\nASNRegistryError: The ASN registry is not known.\nASNParseError: ASN parsing failed.", "source": "codesearchnet"}
{"code": "def events_filter(self, topics: List[str]=None, from_block: BlockSpecification=None, to_block: BlockSpecification=None) -> StatelessFilter:\n    return self.client.new_filter(self.address, topics=topics, from_block=from_block, to_block=to_block)", "docstring": "Install a new filter for an array of topics emitted by the contract.\n\nArgs:\ntopics: A list of event ids to filter for. Can also be None,\nin which case all events are queried.\nfrom_block: The block number at which to start looking for events.\nto_block: The block number at which to stop looking for events.\nReturn:\nFilter: The filter instance.", "source": "codesearchnet"}
{"code": "def get_propagator(name):\n    \n\n    from .sgp4 import Sgp4\n    from .sgp4beta import Sgp4Beta\n\n    scope = locals().copy()\n    scope.update(globals())\n\n    if name not in scope:\n        raise UnknownPropagatorError(name)\n\n    return scope[name]", "docstring": "Retrieve a named propagator\n\nArgs:\nname (str): Name of the desired propagator\nReturn:\nPropagator class", "source": "juraj-google-style"}
{"code": "def refl(scatterer, h_pol=True):\n    \n    return scatterer.wavelength**4/(np.pi**5*scatterer.Kw_sqr) * \\\n        radar_xsect(scatterer, h_pol)", "docstring": "Reflectivity (with number concentration N=1) for the current setup.\n\nArgs:\nscatterer: a Scatterer instance.\nh_pol: If True (default), use horizontal polarization.\nIf False, use vertical polarization.\n\nReturns:\nThe reflectivity.\n\nNOTE: To compute reflectivity in dBZ, give the particle diameter and\nwavelength in [mm], then take 10*log10(Zi).", "source": "juraj-google-style"}
{"code": "def make_trace_api(client):\n    generated = trace_service_client.TraceServiceClient(credentials=client._credentials, client_info=_CLIENT_INFO)\n    return _TraceAPI(generated, client)", "docstring": "Create an instance of the gapic Trace API.\n\nArgs:\nclient (~google.cloud.trace.client.Client): The client that holds\nconfiguration details.\n\nReturns:\nA :class:`~google.cloud.trace._gapic._TraceAPI` instance with the\nproper configurations.", "source": "codesearchnet"}
{"code": "def empty(shape, dtype=None, **kwargs):\n    \n    data = np.empty(shape, dtype)\n    return dc.array(data, **kwargs)", "docstring": "Create an array of given shape and type, without initializing entries.\n\nArgs:\nshape (sequence of ints): 2D shape of the array.\ndtype (data-type, optional): Desired data-type for the array.\nkwargs (optional): Other arguments of the array (*coords, attrs, and name).\n\nReturns:\narray (decode.array): Decode array without initializing entries.", "source": "juraj-google-style"}
{"code": "def timestamp(method='iso8601'):\n    if (method == 'iso8601'):\n        tz_hour = (time.timezone \n        utc_offset = (str(tz_hour) if (tz_hour < 0) else ('+' + str(tz_hour)))\n        stamp = (time.strftime('%Y-%m-%dT%H%M%S') + utc_offset)\n        return stamp\n    else:\n        raise ValueError('only iso8601 is accepted for now')", "docstring": "make an iso8601 timestamp\n\nArgs:\nmethod (str): type of timestamp\n\nExample:\n>>> stamp = timestamp()\n>>> print('stamp = {!r}'.format(stamp))\nstamp = ...-...-...T...", "source": "codesearchnet"}
{"code": "def reindex(self):\n    _map = dict(zip(self.micro_indices, reindex(self.micro_indices)))\n    partition = tuple((tuple((_map[index] for index in group)) for group in self.partition))\n    return CoarseGrain(partition, self.grouping)", "docstring": "Re-index this coarse graining to use squeezed indices.\n\nThe output grouping is translated to use indices ``0..n``, where ``n``\nis the number of micro indices in the coarse-graining. Re-indexing does\nnot effect the state grouping, which is already index-independent.\n\nReturns:\nCoarseGrain: A new |CoarseGrain| object, indexed from ``0..n``.\n\nExample:\n>>> partition = ((1, 2),)\n>>> grouping = (((0,), (1, 2)),)\n>>> coarse_grain = CoarseGrain(partition, grouping)\n>>> coarse_grain.reindex()\nCoarseGrain(partition=((0, 1),), grouping=(((0,), (1, 2)),))", "source": "codesearchnet"}
{"code": "def GetSubkeyByName(self, name):\n    \n    if not self._registry_key and self._registry:\n      self._GetKeyFromRegistry()\n\n    return self._subkeys.get(name.upper(), None)", "docstring": "Retrieves a subkey by name.\n\nArgs:\nname (str): name of the subkey.\n\nReturns:\nWinRegistryKey: Windows Registry subkey or None if not found.", "source": "juraj-google-style"}
{"code": "def select_sites( self, site_labels ):\n        \n        if type( site_labels ) in ( list, set ):\n            selected_sites = [ s for s in self.sites if s.label in site_labels ]\n        elif type( site_labels ) is str:\n            selected_sites = [ s for s in self.sites if s.label is site_labels ]\n        else:\n            raise ValueError( str( site_labels ) )\n        return selected_sites", "docstring": "Selects sites in the lattice with specified labels.\n\nArgs:\nsite_labels (List(Str)|Set(Str)|Str): Labels of sites to select.\nThis can be a List [ 'A', 'B' ], a Set ( 'A', 'B' ), or a String 'A'.\n\nReturns:\n(List(Site)): List of sites with labels given by `site_labels`.", "source": "juraj-google-style"}
{"code": "def get_holodeck_path():\n    if (('HOLODECKPATH' in os.environ) and (os.environ['HOLODECKPATH'] != '')):\n        return os.environ['HOLODECKPATH']\n    if (os.name == 'posix'):\n        return os.path.expanduser('~/.local/share/holodeck')\n    elif (os.name == 'nt'):\n        return os.path.expanduser('~\\\\AppData\\\\Local\\\\holodeck')\n    else:\n        raise NotImplementedError('holodeck is only supported for Linux and Windows')", "docstring": "Gets the path of the holodeck environment\n\nReturns:\n(str): path to the current holodeck environment", "source": "codesearchnet"}
{"code": "def compress_dir(path, compression=\"gz\"):\n    \n    for parent, subdirs, files in os.walk(path):\n        for f in files:\n            compress_file(os.path.join(parent, f), compression=compression)", "docstring": "Recursively compresses all files in a directory. Note that this\ncompresses all files singly, i.e., it does not create a tar archive. For\nthat, just use Python tarfile class.\n\nArgs:\npath (str): Path to parent directory.\ncompression (str): A compression mode. Valid options are \"gz\" or\n\"bz2\". Defaults to gz.", "source": "juraj-google-style"}
{"code": "def authenticate_direct_bind(self, username, password):\n    bind_user = '{rdn}={username},{user_search_dn}'.format(rdn=self.config.get('LDAP_USER_RDN_ATTR'), username=username, user_search_dn=self.full_user_search_dn)\n    connection = self._make_connection(bind_user=bind_user, bind_password=password)\n    response = AuthenticationResponse()\n    try:\n        connection.bind()\n        log.debug(\"Authentication was successful for user '{0}'\".format(username))\n        response.status = AuthenticationResponseStatus.success\n        user_info = self.get_user_info(dn=bind_user, _connection=connection)\n        response.user_dn = bind_user\n        response.user_id = username\n        response.user_info = user_info\n        if self.config.get('LDAP_SEARCH_FOR_GROUPS'):\n            response.user_groups = self.get_user_groups(dn=bind_user, _connection=connection)\n    except ldap3.core.exceptions.LDAPInvalidCredentialsResult:\n        log.debug(\"Authentication was not successful for user '{0}'\".format(username))\n        response.status = AuthenticationResponseStatus.fail\n    except Exception as e:\n        log.error(e)\n        response.status = AuthenticationResponseStatus.fail\n    self.destroy_connection(connection)\n    return response", "docstring": "Performs a direct bind. We can do this since the RDN is the same\nas the login attribute. Hence we just string together a dn to find\nthis user with.\n\nArgs:\nusername (str): Username of the user to bind (the field specified\nas LDAP_BIND_RDN_ATTR)\npassword (str): User's password to bind with.\n\nReturns:\nAuthenticationResponse", "source": "codesearchnet"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List, token_ids_1: Optional[List]=None, already_has_special_tokens: bool=False) -> List[int]:\n    if already_has_special_tokens:\n        if token_ids_1 is not None:\n            raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')\n        return [1 if token in self.all_special_ids else 0 for token in token_ids_0]\n    mask = [1] + [0] * len(token_ids_0) + [1]\n    if token_ids_1 is not None:\n        mask += [0] * len(token_ids_1) + [1]\n    return mask", "docstring": "Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of ids of the first sequence.\ntoken_ids_1 (`List[int]`, *optional*):\nList of ids of the second sequence.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\nA list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def GetRequestXML(self, method, *args):\n    \n    self.suds_client.set_options(nosend=True)\n    service_request = (getattr(self, method))(*args).envelope\n    self.suds_client.set_options(nosend=False)\n    return lxml.etree.fromstring(service_request)", "docstring": "Get the raw SOAP XML for a request.\n\nArgs:\nmethod: The method name.\n*args: A list of arguments to be passed to the method.\n\nReturns:\nAn element containing the raw XML that would be sent as the request.", "source": "juraj-google-style"}
{"code": "def download_write_file(self, metadata, out_dir=None):\n        \n        fileName = metadata['name']\n        path = os.path.join(out_dir or wandb_dir(), fileName)\n        if self.file_current(fileName, metadata['md5']):\n            return path, None\n\n        size, response = self.download_file(metadata['url'])\n\n        with open(path, \"wb\") as file:\n            for data in response.iter_content(chunk_size=1024):\n                file.write(data)\n\n        return path, response", "docstring": "Download a file from a run and write it to wandb/\n\nArgs:\nmetadata (obj): The metadata object for the file to download. Comes from Api.download_urls().\n\nReturns:\nA tuple of the file's local path and the streaming response. The streaming response is None if the file already existed and was up to date.", "source": "juraj-google-style"}
{"code": "def Webhook(self, request, global_params=None):\n    config = self.GetMethodConfig('Webhook')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "ReceiveTriggerWebhook [Experimental] is called when the API receives a webhook request targeted at a specific trigger.\n\nArgs:\nrequest: (CloudbuildProjectsTriggersWebhookRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ReceiveTriggerWebhookResponse) The response message.", "source": "github-repos"}
{"code": "def set_document_type(loader_cls: Type, type_: Type) -> None:\n    loader_cls.document_type = type_\n    if (not hasattr(loader_cls, '_registered_classes')):\n        loader_cls._registered_classes = dict()", "docstring": "Set the type corresponding to the whole document.\n\nArgs:\nloader_cls: The loader class to set the document type for.\ntype_: The type to loader should process the document into.", "source": "codesearchnet"}
{"code": "def validate_document(self, definition):\n    initial_document = {}\n    try:\n        initial_document = Loader.load(definition)\n    except RuntimeError as exception:\n        self.logger.error(str(exception))\n        sys.exit(1)\n    document = Validator().validate(initial_document)\n    if (document is None):\n        self.logger.info(\"Schema validation for '%s' has failed\", definition)\n        sys.exit(1)\n    self.logger.info(\"Schema validation for '%s' succeeded\", definition)\n    return document", "docstring": "Validate given pipeline document.\n\nThe method is trying to load, parse and validate the spline document.\nThe validator verifies the Python structure B{not} the file format.\n\nArgs:\ndefinition (str): path and filename of a yaml file containing a valid spline definition.\n\nReturns:\ndict: loaded and validated spline document.\n\nNote:\nif validation fails the application does exit!\n\nSee Also:\nspline.validation.Validator", "source": "codesearchnet"}
{"code": "def get_class(class_key):\n    if (class_key not in CLASSES):\n        for basecls in (MediaMetadata, MediaCollection):\n            if class_key.startswith(basecls.__name__):\n                class_name = ('MS' + class_key.replace(basecls.__name__, ''))\n                if (sys.version_info[0] == 2):\n                    class_name = class_name.encode('ascii')\n                CLASSES[class_key] = type(class_name, (basecls,), {})\n                _LOG.info('Class %s created', CLASSES[class_key])\n    return CLASSES[class_key]", "docstring": "Form a music service data structure class from the class key\n\nArgs:\nclass_key (str): A concatenation of the base class (e.g. MediaMetadata)\nand the class name\n\nReturns:\nclass: Subclass of MusicServiceItem", "source": "codesearchnet"}
{"code": "def path(self, goal):\n    if (goal == self.name):\n        return [self]\n    if (goal not in self.routes):\n        raise ValueError(\"Unknown '{0}'\".format(goal))\n    obj = self\n    path = [obj]\n    while True:\n        obj = obj.routes[goal].direction\n        path.append(obj)\n        if (obj.name == goal):\n            break\n    return path", "docstring": "Get the shortest way between two nodes of the graph\n\nArgs:\ngoal (str): Name of the targeted node\nReturn:\nlist of Node", "source": "codesearchnet"}
{"code": "def notify(self, notices):\n        \n        issues_html = get_template('unattached_ebs_volume.html')\n        issues_text = get_template('unattached_ebs_volume.txt')\n\n        for recipient, issues in list(notices.items()):\n            if issues:\n                message_html = issues_html.render(issues=issues)\n                message_text = issues_text.render(issues=issues)\n\n                send_notification(\n                    subsystem=self.name,\n                    recipients=[recipient],\n                    subject=self.subject,\n                    body_html=message_html,\n                    body_text=message_text\n                )", "docstring": "Send notifications to the users via. the provided methods\n\nArgs:\nnotices (:obj:`dict` of `str`: `dict`): List of the notifications to send\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def _convert_to_compatible_tensor(value, target, error_prefix):\n  \n  try:\n    tensor = tf_v1.convert_to_tensor_or_indexed_slices(value, target.dtype)\n  except TypeError as e:\n    raise TypeError(\"%s: %s\" % (error_prefix, e))\n  if _is_sparse(tensor) != _is_sparse(target):\n    if _is_sparse(tensor):\n      raise TypeError(\"%s: Is sparse. Expected dense.\" % error_prefix)\n    else:\n      raise TypeError(\"%s: Is dense. Expected sparse.\" % error_prefix)\n  if not tensor.get_shape().is_compatible_with(target.get_shape()):\n    raise TypeError(\"%s: Shape %r is incompatible with %r\" %\n                    (error_prefix, tensor.get_shape(), target.get_shape()))\n  return tensor", "docstring": "Converts `value` into a tensor that can be feed into `tensor_info`.\n\nArgs:\nvalue: A value to convert into Tensor or SparseTensor.\ntarget: An object returned by `parse_tensor_info_map`.\nerror_prefix: A string to prefix on raised TypeErrors.\n\nRaises:\nTypeError: If it fails to convert.\n\nReturns:\nA Tensor or SparseTensor compatible with tensor_info.", "source": "juraj-google-style"}
{"code": "def _setup(self, delete=True):\n        \n        if delete:\n            self.clear()\n        with nn.context_scope(self.ctx):\n            outputs = self.func(\n                *(self.inputs_f + self.func_args), **self.func_kwargs)\n            if not hasattr(outputs, '__iter__'):\n                self.outputs = [outputs]\n            else:\n                self.outputs = outputs\n        self.func_ins = self.outputs[0].parent\n        self.inputs = self.func_ins.inputs", "docstring": "Create a function instance and execute setup.\n\nArgs:\ndelete (bool): Delete buffered variables.", "source": "juraj-google-style"}
{"code": "def run(self, input_dir, output_file_path):\n    logging.info('Running defense %s', self.submission_id)\n    tmp_run_dir = self.temp_copy_extracted_submission()\n    output_dir = os.path.dirname(output_file_path)\n    output_filename = os.path.basename(output_file_path)\n    cmd = ['--network=none', '-m=24g', '--cpus=3.75', '-v', '{0}:/input_images:ro'.format(input_dir), '-v', '{0}:/output_data'.format(output_dir), '-v', '{0}:/code'.format(tmp_run_dir), '-w', '/code', self.container_name, ('./' + self.entry_point), '/input_images', ('/output_data/' + output_filename)]\n    elapsed_time_sec = self.run_with_time_limit(cmd)\n    sudo_remove_dirtree(tmp_run_dir)\n    return elapsed_time_sec", "docstring": "Runs defense inside Docker.\n\nArgs:\ninput_dir: directory with input (adversarial images).\noutput_file_path: path of the output file.\n\nReturns:\nhow long it took to run submission in seconds", "source": "codesearchnet"}
{"code": "def save_index(self, filename):\n        \n        data = {}\n        for f in self.files.values():\n            entities = {v.entity.id: v.value for k, v in f.tags.items()}\n            data[f.path] = {'domains': f.domains, 'entities': entities}\n        with open(filename, 'w') as outfile:\n            json.dump(data, outfile)", "docstring": "Save the current Layout's index to a .json file.\n\nArgs:\nfilename (str): Filename to write to.\n\nNote: At the moment, this won't serialize directory-specific config\nfiles. This means reconstructed indexes will only work properly in\ncases where there aren't multiple layout specs within a project.", "source": "juraj-google-style"}
{"code": "def _convert_int(self, value):\n    try:\n        return int(value)\n    except:\n        return None", "docstring": "Converts a value into a integer.\n\nArgs:\nvalue: String representation of a field from the Bulkdozer feed.\n\nReturns:\nIf possible to convert value into an integer, returns the integer\nrepresentation, otherwise None.", "source": "github-repos"}
{"code": "def _maybe_expand_labels(labels, predictions):\n    with ops.name_scope(None, 'expand_labels', (labels, predictions)) as scope:\n        labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)\n        if isinstance(labels, sparse_tensor.SparseTensor):\n            return cond.cond(math_ops.equal(array_ops.rank(predictions), array_ops.size(labels.dense_shape) + 1), lambda: sparse_ops.sparse_reshape(labels, shape=array_ops.concat((labels.dense_shape, (1,)), 0), name=scope), lambda: labels)\n        labels_rank = labels.get_shape().ndims\n        if labels_rank is not None:\n            predictions_rank = predictions.get_shape().ndims\n            if predictions_rank is not None:\n                if predictions_rank == labels_rank:\n                    return labels\n                if predictions_rank == labels_rank + 1:\n                    return array_ops.expand_dims(labels, -1, name=scope)\n                raise ValueError(f'Unexpected labels shape {labels.get_shape()} for predictions shape {predictions.get_shape()}. Predictions rank should be the same rank as labels rank or labels rank plus one .')\n        return cond.cond(math_ops.equal(array_ops.rank(predictions), array_ops.rank(labels) + 1), lambda: array_ops.expand_dims(labels, -1, name=scope), lambda: labels)", "docstring": "If necessary, expand `labels` along last dimension to match `predictions`.\n\nArgs:\nlabels: `Tensor` or `SparseTensor` with shape\n[D1, ... DN, num_labels] or [D1, ... DN]. The latter implies\nnum_labels=1, in which case the result is an expanded `labels` with shape\n[D1, ... DN, 1].\npredictions: `Tensor` with shape [D1, ... DN, num_classes].\n\nReturns:\n`labels` with the same rank as `predictions`.\n\nRaises:\nValueError: if `labels` has invalid shape.", "source": "github-repos"}
{"code": "def export(self, remote_function):\n    if (self._worker.mode is None):\n        self._functions_to_export.append(remote_function)\n        return\n    if (self._worker.mode != ray.worker.SCRIPT_MODE):\n        return\n    self._do_export(remote_function)", "docstring": "Export a remote function.\n\nArgs:\nremote_function: the RemoteFunction object.", "source": "codesearchnet"}
{"code": "def _activation_summary(x):\n  \n  \n  \n  tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)\n  tf.summary.histogram(tensor_name + '/activations', x)\n  tf.summary.scalar(tensor_name + '/sparsity',\n                                       tf.nn.zero_fraction(x))", "docstring": "Helper to create summaries for activations.\n\nCreates a summary that provides a histogram of activations.\nCreates a summary that measures the sparsity of activations.\n\nArgs:\nx: Tensor\nReturns:\nnothing", "source": "juraj-google-style"}
{"code": "def _find_dependencies(self, dataset_key, **dfilter):\n        \n        \n        try:\n            node = self.getitem(dataset_key)\n            LOG.trace(\"Found exact dataset already loaded: {}\".format(node.name))\n            return node, set()\n        except KeyError:\n            \n            LOG.trace(\"Exact dataset {} isn't loaded, will try reader...\".format(dataset_key))\n\n        \n        try:\n            node = self._find_reader_dataset(dataset_key, **dfilter)\n        except TooManyResults:\n            LOG.warning(\"Too many possible datasets to load for {}\".format(dataset_key))\n            return None, set([dataset_key])\n        if node is not None:\n            LOG.trace(\"Found reader provided dataset:\\n\\tRequested: {}\\n\\tFound: {}\".format(dataset_key, node.name))\n            return node, set()\n        LOG.trace(\"Could not find dataset in reader: {}\".format(dataset_key))\n\n        \n        try:\n            \n            \n            \n            node = self[dataset_key]\n            LOG.trace(\"Composite already loaded:\\n\\tRequested: {}\\n\\tFound: {}\".format(dataset_key, node.name))\n            return node, set()\n        except KeyError:\n            \n            LOG.trace(\"Composite hasn't been loaded yet, will load: {}\".format(dataset_key))\n\n        \n        try:\n            node, unknowns = self._find_compositor(dataset_key, **dfilter)\n            LOG.trace(\"Found composite:\\n\\tRequested: {}\\n\\tFound: {}\".format(dataset_key, node and node.name))\n        except KeyError:\n            node = None\n            unknowns = set([dataset_key])\n            LOG.trace(\"Composite not found: {}\".format(dataset_key))\n\n        return node, unknowns", "docstring": "Find the dependencies for *dataset_key*.\n\nArgs:\ndataset_key (str, float, DatasetID): Dataset identifier to locate\nand find any additional\ndependencies for.\n**dfilter (dict): Additional filter parameters. See\n`satpy.readers.get_key` for more details.", "source": "juraj-google-style"}
{"code": "def __parameter_descriptor(self, subfield_list):\n    descriptor = {}\n    final_subfield = subfield_list[(- 1)]\n    if all((subfield.required for subfield in subfield_list)):\n        descriptor['required'] = True\n    descriptor['type'] = self.__field_to_parameter_type(final_subfield)\n    default = self.__parameter_default(final_subfield)\n    if (default is not None):\n        descriptor['default'] = default\n    if any((subfield.repeated for subfield in subfield_list)):\n        descriptor['repeated'] = True\n    enum_descriptor = self.__parameter_enum(final_subfield)\n    if (enum_descriptor is not None):\n        descriptor['enum'] = enum_descriptor\n    return descriptor", "docstring": "Creates descriptor for a parameter using the subfields that define it.\n\nEach parameter is defined by a list of fields, with all but the last being\na message field and the final being a simple (non-message) field.\n\nMany of the fields in the descriptor are determined solely by the simple\nfield at the end, though some (such as repeated and required) take the whole\nchain of fields into consideration.\n\nArgs:\nsubfield_list: List of fields describing the parameter.\n\nReturns:\nDictionary containing a descriptor for the parameter described by the list\nof fields.", "source": "codesearchnet"}
{"code": "def find_files(paths, file_predicate):\n    file_list = []\n    for path in paths:\n        p = abs_path(path)\n        for (dirPath, _, fileList) in os.walk(p):\n            for fname in fileList:\n                (name, ext) = os.path.splitext(fname)\n                if file_predicate(name, ext):\n                    file_list.append((dirPath, name, ext))\n    return file_list", "docstring": "Locate files whose names and extensions match the given predicate in\nthe specified directories.\n\nArgs:\npaths: A list of directory paths where to find the files.\nfile_predicate: A function that returns True if the file name and\nextension are desired.\n\nReturns:\nA list of files that match the predicate.", "source": "codesearchnet"}
{"code": "def remove(self, path, dir_fd=None):\n        \n        path = self._path_with_dir_fd(path, self.remove, dir_fd)\n        self.filesystem.remove(path)", "docstring": "Remove the FakeFile object at the specified file path.\n\nArgs:\npath: Path to file to be removed.\ndir_fd: If not `None`, the file descriptor of a directory,\nwith `path` being relative to this directory.\nNew in Python 3.3.\n\nRaises:\nOSError: if path points to a directory.\nOSError: if path does not exist.\nOSError: if removal failed.", "source": "juraj-google-style"}
{"code": "def add_operator(self, operator):\n    if (not isinstance(operator, Operator)):\n        raise FiqlObjectException(('%s is not a valid element type' % operator.__class__))\n    if (not self._working_fragment.operator):\n        self._working_fragment.operator = operator\n    elif (operator > self._working_fragment.operator):\n        last_constraint = self._working_fragment.elements.pop()\n        self._working_fragment = self._working_fragment.create_nested_expression()\n        self._working_fragment.add_element(last_constraint)\n        self._working_fragment.add_operator(operator)\n    elif (operator < self._working_fragment.operator):\n        if self._working_fragment.parent:\n            return self._working_fragment.parent.add_operator(operator)\n        else:\n            return Expression().add_element(self._working_fragment).add_operator(operator)\n    return self", "docstring": "Add an ``Operator`` to the ``Expression``.\n\nThe ``Operator`` may result in a new ``Expression`` if an ``Operator``\nalready exists and is of a different precedence.\n\nThere are three possibilities when adding an ``Operator`` to an\n``Expression`` depending on whether or not an ``Operator`` already\nexists:\n\n- No ``Operator`` on the working ``Expression``; Simply set the\n``Operator`` and return ``self``.\n- ``Operator`` already exists and is higher in precedence; The\n``Operator`` and last ``Constraint`` belong in a sub-expression of\nthe working ``Expression``.\n- ``Operator`` already exists and is lower in precedence; The\n``Operator`` belongs to the parent of the working ``Expression``\nwhether one currently exists or not. To remain in the context of\nthe top ``Expression``, this method will return the parent here\nrather than ``self``.\n\nArgs:\noperator (Operator): What we are adding.\n\nReturns:\nExpression: ``self`` or related ``Expression``.\n\nRaises:\nFiqlObjectExpression: Operator is not a valid ``Operator``.", "source": "codesearchnet"}
{"code": "def on_graph_def(self, graph_def, device_name, wall_time):\n    del wall_time\n    self._graph_defs[device_name] = graph_def\n    if (not self._graph_defs_arrive_first):\n        self._add_graph_def(device_name, graph_def)\n        self._incoming_channel.get()", "docstring": "Implementation of the GraphDef-carrying Event proto callback.\n\nArgs:\ngraph_def: A GraphDef proto. N.B.: The GraphDef is from\nthe core runtime of a debugged Session::Run() call, after graph\npartition. Therefore it may differ from the GraphDef available to\nthe general TensorBoard. For example, the GraphDef in general\nTensorBoard may get partitioned for multiple devices (CPUs and GPUs),\neach of which will generate a GraphDef event proto sent to this\nmethod.\ndevice_name: Name of the device on which the graph was created.\nwall_time: An epoch timestamp (in microseconds) for the graph.", "source": "codesearchnet"}
{"code": "def get_signature(self, base_commit=None):\n        \n        if base_commit is None:\n            base_commit = 'HEAD'\n        self.run('add', '-A', self.path)\n        sha = self.run('rev-parse', '--verify', base_commit).strip()\n        diff = self.run('diff', sha).strip()\n        if len(diff) == 0:\n            try:\n                return self.get_signature(base_commit + '~1')\n            except CommandError:\n                pass\n        h = hashlib.sha1()\n        h.update(sha)\n        h.update(diff)\n        return h.hexdigest()", "docstring": "Get the signature of the current state of the repository\n\nTODO right now `get_signature` is an effectful process in that\nit adds all untracked file to staging. This is the only way to get\naccruate diff on new files. This is ok because we only use it on a\ndisposable copy of the repo.\n\nArgs:\nbase_commit - the base commit ('HEAD', sha, etc.)\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def CopyAFF4ToLocal(aff4_urn, target_dir, token=None, overwrite=False):\n    try:\n        fd = aff4.FACTORY.Open(aff4_urn, token=token)\n        filepath = os.path.join(target_dir, fd.urn.Path()[1:])\n        if isinstance(fd, standard.VFSDirectory):\n            try:\n                os.makedirs(filepath)\n            except OSError:\n                pass\n            return None\n        elif isinstance(fd, aff4.AFF4Stream):\n            if (not os.path.isfile(filepath)):\n                try:\n                    os.makedirs(os.path.dirname(filepath))\n                except OSError:\n                    pass\n                DownloadFile(fd, filepath)\n            elif ((os.stat(filepath)[stat.ST_SIZE] != fd.Get(fd.Schema.SIZE)) or overwrite):\n                DownloadFile(fd, filepath)\n            else:\n                logging.info('File %s exists, skipping', filepath)\n            return filepath\n        else:\n            raise ValueError(('Opened urn is neither a downloaded file nor a directory: %s' % aff4_urn))\n    except IOError as e:\n        logging.exception('Failed to read %s due to %s', aff4_urn, e)\n        raise", "docstring": "Copy an AFF4 object that supports a read interface to local filesystem.\n\nArgs:\naff4_urn: URN of thing to copy.\ntarget_dir: Directory to copy the file to.\ntoken: Auth token.\noverwrite: If True overwrite the file if it exists.\n\nReturns:\nIf aff4_urn points to a file, returns path to the downloaded file.\nOtherwise returns None.\n\nBy default file will only be overwritten if file size differs.", "source": "codesearchnet"}
{"code": "def noise_op(latents, hparams):\n    if ((hparams.latent_noise == 0) or (hparams.mode != tf.estimator.ModeKeys.TRAIN)):\n        return latents\n    latent_shape = common_layers.shape_list(latents)\n    return (latents + tf.random_normal(latent_shape, stddev=hparams.latent_noise))", "docstring": "Adds isotropic gaussian-noise to each latent.\n\nArgs:\nlatents: 4-D or 5-D tensor, shape=(NTHWC) or (NHWC).\nhparams: HParams.\nReturns:\nlatents: latents with isotropic gaussian noise appended.", "source": "codesearchnet"}
{"code": "def S2_surface(self, sizes, bounds, presets, covers, use_torch=False, num_samples=10):\n    args = self.inputs\n    Si = self.sobol_analysis(num_samples, {'num_vars': len(args), 'names': args, 'bounds': [bounds[arg] for arg in args]}, covers)\n    S2 = Si['S2']\n    (s2_max, v1, v2) = get_max_s2_sensitivity(S2)\n    x_var = args[v1]\n    y_var = args[v2]\n    search_space = [(x_var, bounds[x_var]), (y_var, bounds[y_var])]\n    preset_vals = {arg: presets[arg] for (i, arg) in enumerate(args) if ((i != v1) and (i != v2))}\n    X = np.linspace(*search_space[0][1], sizes[0])\n    Y = np.linspace(*search_space[1][1], sizes[1])\n    if use_torch:\n        (Xm, Ym) = torch.meshgrid(torch.tensor(X), torch.tensor(Y))\n        inputs = {n: torch.full_like(Xm, v) for (n, v) in presets.items()}\n        inputs.update({search_space[0][0]: Xm, search_space[1][0]: Ym})\n        Z = self.run(inputs, covers).numpy()\n    else:\n        (Xm, Ym) = np.meshgrid(X, Y)\n        Z = np.zeros((len(X), len(Y)))\n        for (x, y) in itertools.product(range(len(X)), range(len(Y))):\n            inputs = {n: v for (n, v) in presets.items()}\n            inputs.update({search_space[0][0]: x, search_space[1][0]: y})\n            Z[x][y] = self.run(inputs, covers)\n    return (X, Y, Z, x_var, y_var)", "docstring": "Calculates the sensitivity surface of a GrFN for the two variables with\nthe highest S2 index.\n\nArgs:\nnum_samples: Number of samples for sensitivity analysis.\nsizes: Tuple of (number of x inputs, number of y inputs).\nbounds: Set of bounds for GrFN inputs.\npresets: Set of standard values for GrFN inputs.\n\nReturns:\nTuple:\nTuple: The names of the two variables that were selected\nTuple: The X, Y vectors of eval values\nZ: The numpy matrix of output evaluations", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    \n    \n    if not self._encoding:\n      self._encoding = parser_mediator.codepage\n\n    try:\n      if not self._HasExpectedLineLength(file_object):\n        display_name = parser_mediator.GetDisplayName()\n        raise errors.UnableToParseFile((\n            '[{0:s}] Unable to parse DSV file: {1:s} with error: '\n            'unexpected line length.').format(self.NAME, display_name))\n    except UnicodeDecodeError as exception:\n      display_name = parser_mediator.GetDisplayName()\n      raise errors.UnableToParseFile(\n          '[{0:s}] Unable to parse DSV file: {1:s} with error: {2!s}.'.format(\n              self.NAME, display_name, exception))\n\n    try:\n      line_reader = self._CreateLineReader(file_object)\n      reader = self._CreateDictReader(line_reader)\n      row_offset = line_reader.tell()\n      row = next(reader)\n    except (StopIteration, csv.Error, UnicodeDecodeError) as exception:\n      display_name = parser_mediator.GetDisplayName()\n      raise errors.UnableToParseFile(\n          '[{0:s}] Unable to parse DSV file: {1:s} with error: {2!s}.'.format(\n              self.NAME, display_name, exception))\n\n    number_of_columns = len(self.COLUMNS)\n    number_of_records = len(row)\n\n    if number_of_records != number_of_columns:\n      display_name = parser_mediator.GetDisplayName()\n      raise errors.UnableToParseFile((\n          '[{0:s}] Unable to parse DSV file: {1:s}. Wrong number of '\n          'records (expected: {2:d}, got: {3:d})').format(\n              self.NAME, display_name, number_of_columns,\n              number_of_records))\n\n    for key, value in row.items():\n      if self._MAGIC_TEST_STRING in (key, value):\n        display_name = parser_mediator.GetDisplayName()\n        raise errors.UnableToParseFile((\n            '[{0:s}] Unable to parse DSV file: {1:s}. Signature '\n            'mismatch.').format(self.NAME, display_name))\n\n    row = self._ConvertRowToUnicode(parser_mediator, row)\n\n    if not self.VerifyRow(parser_mediator, row):\n      display_name = parser_mediator.GetDisplayName()\n      raise errors.UnableToParseFile((\n          '[{0:s}] Unable to parse DSV file: {1:s}. Verification '\n          'failed.').format(self.NAME, display_name))\n\n    self.ParseRow(parser_mediator, row_offset, row)\n    row_offset = line_reader.tell()\n\n    for row in reader:\n      if parser_mediator.abort:\n        break\n      row = self._ConvertRowToUnicode(parser_mediator, row)\n      self.ParseRow(parser_mediator, row_offset, row)\n      row_offset = line_reader.tell()", "docstring": "Parses a DSV text file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def _try_guard_against_uninitialized_dependencies(name, initial_value):\n    if not isinstance(initial_value, tensor_lib.Tensor):\n        raise TypeError('initial_value needs to be a Tensor: %s' % initial_value)\n    if _has_cycle(initial_value.op, state={}):\n        return initial_value\n    return _safe_initial_value_from_tensor(name, initial_value, op_cache={})", "docstring": "Attempt to guard against dependencies on uninitialized variables.\n\nReplace references to variables in `initial_value` with references to the\nvariable's initialized values. The initialized values are essentially\nconditional TensorFlow graphs that return a variable's value if it is\ninitialized or its `initial_value` if it hasn't been initialized. This\nreplacement is done on a best effort basis:\n\n- If the `initial_value` graph contains cycles, we don't do any\nreplacements for that graph.\n- If the variables that `initial_value` depends on are not present in the\n`GLOBAL_VARIABLES` or `LOCAL_VARIABLES` we don't replace them.\n\nIn these cases, it is up to the caller to ensure that the `initial_value`\ngraph uses initialized variables or that they guard access to variables\nusing their `initialized_value` method.\n\nArgs:\nname: Variable name.\ninitial_value: `Tensor`. The initial value.\n\nReturns:\nA `Tensor` suitable to initialize a variable.\nRaises:\nTypeError: If `initial_value` is not a `Tensor`.", "source": "github-repos"}
{"code": "def deprecated(replacement=None, message=None):\n    \n\n    def wrap(old):\n        def wrapped(*args, **kwargs):\n            msg = \"%s is deprecated\" % old.__name__\n            if replacement is not None:\n                if isinstance(replacement, property):\n                    r = replacement.fget\n                elif isinstance(replacement, (classmethod, staticmethod)):\n                    r = replacement.__func__\n                else:\n                    r = replacement\n                msg += \"; use %s in %s instead.\" % (r.__name__, r.__module__)\n            if message is not None:\n                msg += \"\\n\" + message\n            warnings.simplefilter('default')\n            warnings.warn(msg, DeprecationWarning, stacklevel=2)\n            return old(*args, **kwargs)\n\n        return wrapped\n\n    return wrap", "docstring": "Decorator to mark classes or functions as deprecated,\nwith a possible replacement.\n\nArgs:\nreplacement (callable): A replacement class or method.\nmessage (str): A warning message to be displayed.\n\nReturns:\nOriginal function, but with a warning to use the updated class.", "source": "juraj-google-style"}
{"code": "def create(cls, extension_name=None, extension_tag=None, extension_type=None):\n    extension_name = ExtensionName(extension_name)\n    extension_tag = ExtensionTag(extension_tag)\n    extension_type = ExtensionType(extension_type)\n    return ExtensionInformation(extension_name=extension_name, extension_tag=extension_tag, extension_type=extension_type)", "docstring": "Construct an ExtensionInformation object from provided extension\nvalues.\n\nArgs:\nextension_name (str): The name of the extension. Optional,\ndefaults to None.\nextension_tag (int): The tag number of the extension. Optional,\ndefaults to None.\nextension_type (int): The type index of the extension. Optional,\ndefaults to None.\n\nReturns:\nExtensionInformation: The newly created set of extension\ninformation.\n\nExample:\n>>> x = ExtensionInformation.create('extension', 1, 1)\n>>> x.extension_name.value\nExtensionName(value='extension')\n>>> x.extension_tag.value\nExtensionTag(value=1)\n>>> x.extension_type.value\nExtensionType(value=1)", "source": "codesearchnet"}
{"code": "def _match_against_protocol(self, left, other_type, subst, view):\n    if isinstance(left.cls, abstract.AMBIGUOUS_OR_EMPTY):\n        return subst\n    elif left.cls.is_dynamic:\n        return self._subst_with_type_parameters_from(subst, other_type)\n    elif other_type.full_name == 'typing.Sequence' and any((cls.full_name == 'typing.Mapping' for cls in left.cls.mro)):\n        return None\n    left_attributes = self._get_attribute_names(left)\n    missing = other_type.protocol_attributes - left_attributes\n    if missing:\n        self._protocol_error = error_types.ProtocolMissingAttributesError(left.cls, other_type, missing)\n        return None\n    key = (left.cls, other_type)\n    if key in self._protocol_cache:\n        return subst\n    self._protocol_cache.add(key)\n    new_substs = []\n    for attribute in other_type.protocol_attributes:\n        new_subst = self._match_protocol_attribute(left, other_type, attribute, subst, view)\n        if new_subst is None:\n            return None\n        new_substs.append(new_subst)\n    return self._merge_substs(subst, new_substs)", "docstring": "Checks whether a type is compatible with a protocol.\n\nArgs:\nleft: An instance of a type.\nother_type: A protocol.\nsubst: The current type parameter assignment.\nview: The current mapping of Variable to Value.\n\nReturns:\nA new type parameter assignment if the matching succeeded, None otherwise.", "source": "github-repos"}
{"code": "def get_graph_element_name(elem):\n    return elem.name if hasattr(elem, 'name') else str(elem)", "docstring": "Obtain the name or string representation of a graph element.\n\nIf the graph element has the attribute \"name\", return name. Otherwise, return\na __str__ representation of the graph element. Certain graph elements, such as\n`SparseTensor`s, do not have the attribute \"name\".\n\nArgs:\nelem: The graph element in question.\n\nReturns:\nIf the attribute 'name' is available, return the name. Otherwise, return\nstr(fetch).", "source": "github-repos"}
{"code": "def reset(self, ms=0, halt=True):\n    self._dll.JLINKARM_SetResetDelay(ms)\n    res = self._dll.JLINKARM_Reset()\n    if (res < 0):\n        raise errors.JLinkException(res)\n    elif (not halt):\n        self._dll.JLINKARM_Go()\n    return res", "docstring": "Resets the target.\n\nThis method resets the target, and by default toggles the RESET and\nTRST pins.\n\nArgs:\nself (JLink): the ``JLink`` instance\nms (int): Amount of milliseconds to delay after reset (default: 0)\nhalt (bool): if the CPU should halt after reset (default: True)\n\nReturns:\nNumber of bytes read.", "source": "codesearchnet"}
{"code": "def dot(self, y, t=None, A=None, U=None, V=None, kernel=None, check_sorted=True):\n    if (kernel is None):\n        kernel = self.kernel\n    if (t is not None):\n        t = np.atleast_1d(t)\n        if (check_sorted and np.any((np.diff(t) < 0.0))):\n            raise ValueError('the input coordinates must be sorted')\n        if (check_sorted and (len(t.shape) > 1)):\n            raise ValueError('dimension mismatch')\n        A = (np.empty(0) if (A is None) else A)\n        U = (np.empty((0, 0)) if (U is None) else U)\n        V = (np.empty((0, 0)) if (V is None) else V)\n    else:\n        if (not self.computed):\n            raise RuntimeError(\"you must call 'compute' first\")\n        t = self._t\n        A = self._A\n        U = self._U\n        V = self._V\n    (alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag) = kernel.coefficients\n    return self.solver.dot(kernel.jitter, alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag, A, U, V, t, np.ascontiguousarray(y, dtype=float))", "docstring": "Dot the covariance matrix into a vector or matrix\n\nCompute ``K.y`` where ``K`` is the covariance matrix of the GP without\nthe white noise or ``yerr`` values on the diagonal.\n\nArgs:\ny (array[n] or array[n, nrhs]): The vector or matrix ``y``\ndescribed above.\nkernel (Optional[terms.Term]): A different kernel can optionally\nbe provided to compute the matrix ``K`` from a different\nkernel than the ``kernel`` property on this object.\n\nReturns:\narray[n] or array[n, nrhs]: The dot product ``K.y`` as described\nabove. This will have the same shape as ``y``.\n\nRaises:\nValueError: For mismatched dimensions.", "source": "codesearchnet"}
{"code": "def ParseContainersTable(\n      self, parser_mediator, database=None, table=None, **unused_kwargs):\n    \n    if database is None:\n      raise ValueError('Missing database value.')\n\n    if table is None:\n      raise ValueError('Missing table value.')\n\n    for esedb_record in table.records:\n      if parser_mediator.abort:\n        break\n\n      record_values = self._GetRecordValues(\n          parser_mediator, table.name, esedb_record)\n\n      event_data = MsieWebCacheContainersEventData()\n      event_data.container_identifier = record_values.get('ContainerId', None)\n      event_data.directory = record_values.get('Directory', None)\n      event_data.name = record_values.get('Name', None)\n      event_data.set_identifier = record_values.get('SetId', None)\n\n      timestamp = record_values.get('LastScavengeTime', None)\n      if timestamp:\n        date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(\n            date_time, 'Last Scavenge Time')\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      timestamp = record_values.get('LastAccessTime', None)\n      if timestamp:\n        date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(\n            date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      container_identifier = record_values.get('ContainerId', None)\n      container_name = record_values.get('Name', None)\n\n      if not container_identifier or not container_name:\n        continue\n\n      table_name = 'Container_{0:d}'.format(container_identifier)\n      esedb_table = database.get_table_by_name(table_name)\n      if not esedb_table:\n        parser_mediator.ProduceExtractionWarning(\n            'Missing table: {0:s}'.format(table_name))\n        continue\n\n      self._ParseContainerTable(parser_mediator, esedb_table, container_name)", "docstring": "Parses the Containers table.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ndatabase (Optional[pyesedb.file]): ESE database.\ntable (Optional[pyesedb.table]): table.\n\nRaises:\nValueError: if the database or table value is missing.", "source": "juraj-google-style"}
{"code": "def unpack_byte(self, offset):\n        \n        o = self._offset + offset\n        try:\n            return struct.unpack_from(\"<B\", self._buf, o)[0]\n        except struct.error:\n            raise OverrunBufferException(o, len(self._buf))", "docstring": "Returns a little-endian unsigned byte from the relative offset.\nArguments:\n- `offset`: The relative offset from the start of the block.\nThrows:\n- `OverrunBufferException`", "source": "juraj-google-style"}
{"code": "def consolidate(self, args):\n    result = dict(args)\n    for opt in self:\n        if (opt.name in result):\n            result[opt.name] = opt.convert(result[opt.name])\n        elif (opt.default is not None):\n            result[opt.name] = opt.convert(opt.default)\n    return result", "docstring": "Consolidate the provided arguments.\n\nIf the provided arguments have matching options, this performs a type conversion.\nFor any option that has a default value and is not present in the provided\narguments, the default value is added.\n\nArgs:\nargs (dict): A dictionary of the provided arguments.\n\nReturns:\ndict: A dictionary with the type converted and with default options enriched\narguments.", "source": "codesearchnet"}
{"code": "def prange(N=1, dim=1):\n    A = {}\n    r = numpy.arange(N, dtype=int)\n    key = numpy.zeros(dim, dtype=int)\n    for i in range(N):\n        key[(- 1)] = i\n        A[tuple(key)] = (1 * (r == i))\n    return Poly(A, dim, (N,), int)", "docstring": "Constructor to create a range of polynomials where the exponent vary.\n\nArgs:\nN (int):\nNumber of polynomials in the array.\ndim (int):\nThe dimension the polynomial should span.\n\nReturns:\n(Poly):\nA polynomial array of length N containing simple polynomials with\nincreasing exponent.\n\nExamples:\n>>> print(prange(4))\n[1, q0, q0^2, q0^3]\n>>> print(prange(4, dim=3))\n[1, q2, q2^2, q2^3]", "source": "codesearchnet"}
{"code": "def resize_tensor_input(self, input_index, tensor_size, strict=False):\n    self._ensure_safe()\n    tensor_size = np.array(tensor_size, dtype=np.int32)\n    self._interpreter.ResizeInputTensor(input_index, tensor_size, strict)", "docstring": "Resizes an input tensor.\n\nArgs:\ninput_index: Tensor index of input to set. This value can be gotten from\nthe 'index' field in get_input_details.\ntensor_size: The tensor_shape to resize the input to.\nstrict: Only unknown dimensions can be resized when `strict` is True.\nUnknown dimensions are indicated as `-1` in the `shape_signature`\nattribute of a given tensor. (default False)\n\nRaises:\nValueError: If the interpreter could not resize the input tensor.\n\nUsage:\n```\ninterpreter = Interpreter(model_content=tflite_model)\ninterpreter.resize_tensor_input(0, [num_test_images, 224, 224, 3])\ninterpreter.allocate_tensors()\ninterpreter.set_tensor(0, test_images)\ninterpreter.invoke()\n```", "source": "github-repos"}
{"code": "def WriteGraphSeries(graph_series, label, token=None):\n    if data_store.RelationalDBEnabled():\n        data_store.REL_DB.WriteClientGraphSeries(graph_series, label)\n    if _ShouldUseLegacyDatastore():\n        aff4_attr = _GetAFF4AttributeForReportType(graph_series.report_type)()\n        if isinstance(aff4_attr, rdf_stats.GraphSeries):\n            for graph in graph_series.graphs:\n                aff4_attr.Append(graph)\n        elif isinstance(aff4_attr, rdf_stats.Graph):\n            for sample in graph_series.graphs[0]:\n                aff4_attr.Append(x_value=sample.x_value, y_value=sample.y_value)\n        else:\n            raise AFF4AttributeTypeError(aff4_attr.__class__)\n        with aff4.FACTORY.Create(GetAFF4ClientReportsURN().Add(label), aff4_type=aff4_stats.ClientFleetStats, mode='w', token=token) as stats_for_label:\n            stats_for_label.AddAttribute(aff4_attr)", "docstring": "Writes graph series for a particular client label to the DB.\n\nArgs:\ngraph_series: A series of rdf_stats.Graphs containing aggregated data for a\nparticular report-type.\nlabel: Client label by which data in the graph_series was aggregated.\ntoken: ACL token to use for writing to the legacy (non-relational)\ndatastore.\n\nRaises:\nAFF4AttributeTypeError: If, when writing to the legacy DB, an unexpected\nreport-data type is encountered.", "source": "codesearchnet"}
{"code": "def unpack_inputs(func):\n    original_signature = inspect.signature(func)\n\n    @functools.wraps(func)\n    def run_call_with_unpacked_inputs(self, *args, **kwargs):\n        kwargs_call = {key: val for key, val in kwargs.items() if key not in dict(original_signature.parameters)}\n        fn_args_and_kwargs = {key: val for key, val in kwargs.items() if key not in kwargs_call}\n        fn_args_and_kwargs.update({'kwargs_call': kwargs_call})\n        fn_args_and_kwargs.update(dict(zip(func.__code__.co_varnames[1:], args)))\n        if 'EncoderDecoder' in self.__class__.__name__:\n            config = None\n        else:\n            config = self.config\n        unpacked_inputs = input_processing(func, config, **fn_args_and_kwargs)\n        return func(self, **unpacked_inputs)\n    run_call_with_unpacked_inputs.__signature__ = original_signature\n    return run_call_with_unpacked_inputs", "docstring": "Decorator that processes the inputs to a Keras layer, passing them to the layer as keyword arguments. This enables\ndownstream use of the inputs by their variable name, even if they arrive packed as a dictionary in the first input\n(common case in Keras).\n\nArgs:\nfunc (`callable`):\nThe callable function of the TensorFlow model.\n\n\nReturns:\nA callable that wraps the original `func` with the behavior described above.", "source": "github-repos"}
{"code": "def _url_format(self, service):\n        \n        base_service_url = '{base}{service}'.format(\n            base=self.urlbase,\n            service=service\n        )\n        return base_service_url", "docstring": "Generate URL from urlbase and service.\n\nArgs:\nservice (str): The endpoint service to use, i.e. gradebook\nReturns:\nstr: URL to where the request should be made", "source": "juraj-google-style"}
{"code": "def create_from_binary(cls, binary_view):\n    (attr_type, attr_len, non_resident, name_len, name_offset, flags, attr_id, content_len, content_offset, indexed_flag) = cls._REPR.unpack(binary_view[:cls._REPR.size])\n    if name_len:\n        name = binary_view[name_offset:(name_offset + (2 * name_len))].tobytes().decode('utf_16_le')\n    else:\n        name = None\n    nw_obj = cls((AttrTypes(attr_type), attr_len, bool(non_resident), AttrFlags(flags), attr_id, name), (content_len, content_offset, indexed_flag))\n    return nw_obj", "docstring": "Creates a new object AttributeHeader from a binary stream. The binary\nstream can be represented by a byte string, bytearray or a memoryview of the\nbytearray.\n\nArgs:\nbinary_view (memoryview of bytearray) - A binary stream with the\ninformation of the attribute\n\nReturns:\nAttributeHeader: New object using hte binary stream as source", "source": "codesearchnet"}
{"code": "def __init__(self, tcex, name, to, from_addr, subject, body, header, owner=None, **kwargs):\n        \n        super(Email, self).__init__(tcex, 'emails', name, owner, **kwargs)\n        self.api_entity = 'email'\n        self._data['to'] = to or kwargs.get('to')\n        self._data['from'] = from_addr or kwargs.get('from_addr')\n        self._data['subject'] = subject or kwargs.get('subject')\n        self._data['body'] = body or kwargs.get('body')\n        self._data['header'] = header or kwargs.get('header')\n        self._data['score'] = kwargs.get('score', 0)", "docstring": "Initialize Class Properties.\n\nArgs:\nname (str): The name for this Group.\nsubject (str): The subject for this Email.\nheader (str): The header for this Email.\nbody (str): The body for this Email.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nfrom_addr (str, kwargs): The **from** address for this Email.\nto (str, kwargs): The **to** address for this Email.", "source": "juraj-google-style"}
{"code": "def register(self, task_json=None, json_filename=None):\n        \n        if not task_json and not json_filename:\n            raise Exception(\"Both task json and filename can't be none.\")\n\n        if task_json and json_filename:\n            raise Exception(\"Both task json and filename can't be provided.\")\n\n        if json_filename:\n            task_json = json.load(open(json_filename, 'r'))\n\n        r = self.gbdx_connection.post(self._base_url, json=task_json)\n        raise_for_status(r)\n\n        return r.text", "docstring": "Registers a new GBDX task.\n\nArgs:\ntask_json (dict): Dictionary representing task definition.\njson_filename (str): A full path of a file with json representing the task definition.\nOnly one out of task_json and json_filename should be provided.\nReturns:\nResponse (str).", "source": "juraj-google-style"}
{"code": "def AtMaximumDepth(self, search_depth):\n    \n    if self._key_path_segments is not None:\n      if search_depth >= self._number_of_key_path_segments:\n        return True\n\n    return False", "docstring": "Determines if the find specification is at maximum depth.\n\nArgs:\nsearch_depth (int): number of key path segments to compare.\n\nReturns:\nbool: True if at maximum depth, False if not.", "source": "juraj-google-style"}
{"code": "def binomial_coefficient(n, k):\n    if ((not isinstance(k, int)) or (not isinstance(n, int))):\n        raise TypeError('Expecting positive integers')\n    if (k > n):\n        raise ValueError('k must be lower or equal than n')\n    if ((k < 0) or (n < 0)):\n        raise ValueError('Expecting positive integers')\n    return (factorial(n)", "docstring": "Calculate the binomial coefficient indexed by n and k.\n\nArgs:\nn (int): positive integer\nk (int): positive integer\n\nReturns:\nThe binomial coefficient indexed by n and k\n\nRaises:\nTypeError: If either n or k is not an integer\nValueError: If either n or k is negative, or if k is strictly greater than n", "source": "codesearchnet"}
{"code": "async def get_jsone_context_and_template(chain, parent_link, decision_link, tasks_for):\n    if (tasks_for == 'action'):\n        (jsone_context, tmpl) = (await get_action_context_and_template(chain, parent_link, decision_link))\n    else:\n        tmpl = (await get_in_tree_template(decision_link))\n        jsone_context = (await populate_jsone_context(chain, parent_link, decision_link, tasks_for))\n    return (jsone_context, tmpl)", "docstring": "Get the appropriate json-e context and template for any parent task.\n\nArgs:\nchain (ChainOfTrust): the chain of trust.\nparent_link (LinkOfTrust): the parent link to test.\ndecision_link (LinkOfTrust): the parent link's decision task link.\ntasks_for (str): the reason the parent link was created (cron,\nhg-push, action)\n\nReturns:\n(dict, dict): the json-e context and template.", "source": "codesearchnet"}
{"code": "def topdown(cls):\n    return tuple(unique_everseen((r for r in cls._instances.values() if (r.direction == 'topdown'))))", "docstring": "Get all topdown `Relationship` instances.\n\nReturns:\n:obj:`generator`\n\nExample:\n\n>>> from pronto import Relationship\n>>> for r in Relationship.topdown():\n...    print(r)\nRelationship('can_be')\nRelationship('has_part')", "source": "codesearchnet"}
{"code": "def target_code_to_name(code):\n    TARGET_NAMES = {v: k for (k, v) in TARGET_CODES.items()}\n    return TARGET_NAMES[code]", "docstring": "Converts an int target code to a target name\n\nSince self.TARGET_CODES is a 1:1 mapping, perform a reverse lookup\nto get the more readable name.\n\nArgs:\ncode: Value from self.TARGET_CODES\n\nReturns:\nString target name corresponding to the given code.", "source": "codesearchnet"}
{"code": "def cluster_info(cpu, cfg):\n    cpus = cpu.cpu_count\n    pods_per_core = cfg.doc.find('pods-per-core')\n    pods_per_core_int = (int(pods_per_core.value) if pods_per_core else PODS_PER_CORE)\n    cfg_max_pods = cfg.doc.find('max-pods')\n    cfg_max_pods_int = (int(cfg_max_pods.value) if cfg_max_pods else MAX_PODS)\n    calc_max_pods = (cpus * pods_per_core_int)\n    return {'cpu_count': cpus, 'pods_per_core': pods_per_core_int, 'pods_per_core_customized': bool(pods_per_core), 'max_pods': min(cfg_max_pods_int, calc_max_pods), 'max_pods_customized': bool(cfg_max_pods)}", "docstring": "Collects fact for each host\n\nCollects the cpu and node configuration facts to be used by the rule.\n\nArguments:\ncpu (CpuInfo): Parser object for the cpu info.\ncfg (NodeConfig): Parser object for the node configuration.\n\nReturns:\ndict: Dictionary of fact information including the keys\n``cpu_count``, ``pods_per_core_int``, ``pods_per_core_customized``,\n``max_pods``, and ``max_pods_customized``.", "source": "codesearchnet"}
{"code": "def decode_from_file(estimator, vocabulary, model_type, batch_size, sequence_length, checkpoint_path='', input_filename=gin.REQUIRED, output_filename=gin.REQUIRED, eos_id=1):\n    with tf.gfile.Open(input_filename) as f:\n        text = f.read()\n    records = text.split('\\n')\n    inputs = [record.strip() for record in records]\n    if (not inputs[(- 1)]):\n        inputs.pop()\n    n = len(inputs)\n    all_input_ids = []\n    for line in inputs:\n        ids = inputs_vocabulary(vocabulary).encode(line.strip())\n        if (model_type != 'lm'):\n            ids += [eos_id]\n        if (len(ids) > sequence_length):\n            ids = ids[:sequence_length]\n        else:\n            ids.extend(([0] * (sequence_length - len(ids))))\n            all_input_ids.append(ids)\n    all_input_ids.extend(([all_input_ids[0]] * ((- n) % batch_size)))\n    padded_n = len(all_input_ids)\n    all_input_ids = np.array(all_input_ids, dtype=np.int32)\n\n    def input_fn(params):\n        del params\n        dataset = tf.data.Dataset.from_tensor_slices({'inputs': all_input_ids})\n        dataset = dataset.batch(batch_size, drop_remainder=True)\n        return dataset\n    result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path)\n    vocab_size = targets_vocabulary(vocabulary).vocab_size\n    decodes = []\n    for (i, result) in enumerate(result_iter):\n        output_ids = clean_decodes(list(result['outputs']), vocab_size)\n        output_string = targets_vocabulary(vocabulary).decode([int(x) for x in output_ids])\n        decodes.append(output_string)\n        if ((i & (i - 1)) == 0):\n            if (i < len(inputs)):\n                tf.logging.info(('decode %d input = %s' % (i, inputs[i])))\n                tf.logging.info(('          output = %s' % output_string))\n    if (len(decodes) == padded_n):\n        tf.logging.info('number of decodes matches number of inputs')\n    elif ((len(decodes) % padded_n) == 0):\n        num_cores = (len(decodes) \n        tf.logging.info('output is repeated num_cores times - removing extras')\n\n        def keep(i):\n            return ((i % (batch_size * num_cores)) < batch_size)\n        decodes = [d for (i, d) in enumerate(decodes) if keep(i)]\n    else:\n        raise ValueError('unexpected number of outputs')\n    output_file = tf.gfile.Open(output_filename, 'w')\n    decodes = decodes[:n]\n    for d in decodes:\n        output_file.write(d)\n        output_file.write('\\n')\n    output_file.close()", "docstring": "Decode from a text file.\n\nArgs:\nestimator: a TPUEstimator\nvocabulary: a mtf.transformer.vocabulary.Vocabulary\nmodel_type: a string\nbatch_size: an integer\nsequence_length: an integer (maximum decode length)\ncheckpoint_path: an optional string\ninput_filename: a string\noutput_filename: a string\neos_id: EOS id", "source": "codesearchnet"}
{"code": "def multi_frontier_two_objective_reward(example):\n    int_val = int(example * 10)\n    if int_val >= 0 and int_val < 3:\n        return [int_val, 10 - int_val]\n    elif int_val >= 3 and int_val < 7:\n        return [int_val * 10, 100 - int_val * 10]\n    else:\n        return [int_val, 10 - int_val]", "docstring": "Reward for the trivial search space.\n\nThe reward (i.e. fitness) is a 2-element list. The goal of the search,\ntherefore, is to find the pareto frontier in\nmulti_frontier_two_objective_pareto function.\n\nArgs:\nexample: a materialized value.\n\nReturns:\nA 2-element list.", "source": "github-repos"}
{"code": "def add_chunk(self, chunk: Union[message.Message, bytes], field_tags: util.FieldTypes, index=None) -> None:\n    if self._parent_splitter is not None:\n        self._parent_splitter.add_chunk(chunk, self._fields_in_parent + field_tags, index)\n    else:\n        assert self._chunks is not None\n        assert self._chunked_message is not None\n        field = self._chunked_message.chunked_fields.add(field_tag=util.get_field_tag(self._proto, field_tags))\n        new_chunk_index = len(self._chunks)\n        field.message.chunk_index = new_chunk_index\n        self._add_chunk_order.append(id(chunk))\n        if index is None:\n            self._chunks.append(chunk)\n        else:\n            self._chunks.insert(index, chunk)\n            self._fix_chunk_order = True", "docstring": "Adds a new chunk and updates the ChunkedMessage proto.\n\nArgs:\nchunk: Proto message or bytes.\nfield_tags: Field information about the placement of the chunked data\nwithin self._proto.\nindex: Optional index at which to insert the chunk. The chunk ordering is\nimportant for merging.", "source": "github-repos"}
{"code": "def acquire(self, constructor_fn: Callable[[], Any], tag: Any=None) -> Any:\n    with self._lock:\n        if self._ref is None or self._ref() is None or self._tag != tag:\n            result = constructor_fn()\n            if result is None:\n                return None\n            self._ref = weakref.ref(result)\n            self._tag = tag\n        else:\n            result = self._ref()\n    return result", "docstring": "Acquire a reference to the object this shared control block manages.\n\nArgs:\nconstructor_fn: function that initialises / constructs the object if not\npresent in the cache. This function should take no arguments. It should\nreturn an initialised object, or None if the object could not be\ninitialised / constructed.\ntag: an optional indentifier to store with the cached object. If\nsubsequent calls to acquire use different tags, the object will be\nreloaded rather than returned from cache.\n\nReturns:\nAn initialised object, either from a previous initialisation, or\nnewly-constructed.", "source": "github-repos"}
{"code": "def activate_async(fn, _engine):\n\n    @coroutine\n    @functools.wraps(fn)\n    def wrapper(*args, **kw):\n        _engine.activate()\n        try:\n            if iscoroutinefunction(fn):\n                (yield from fn(*args, **kw))\n            else:\n                fn(*args, **kw)\n        finally:\n            _engine.disable()\n    return wrapper", "docstring": "Async version of activate decorator\n\nArguments:\nfn (function): function that be wrapped by decorator.\n_engine (Engine): pook engine instance\n\nReturns:\nfunction: decorator wrapper function.", "source": "codesearchnet"}
{"code": "def create_game(self, map_name):\n    map_inst = maps.get(map_name)\n    map_data = map_inst.data(self._run_config)\n    if (map_name not in self._saved_maps):\n        for controller in self._controllers:\n            controller.save_map(map_inst.path, map_data)\n        self._saved_maps.add(map_name)\n    create = sc_pb.RequestCreateGame(local_map=sc_pb.LocalMap(map_path=map_inst.path), disable_fog=False)\n    for _ in range(self._num_agents):\n        create.player_setup.add(type=sc_pb.Participant)\n    self._controllers[0].create_game(create)", "docstring": "Create a game for the agents to join.\n\nArgs:\nmap_name: The map to use.", "source": "codesearchnet"}
{"code": "def from_primitive(cls, primitive: message.Message, context: Context) -> 'PrimitiveWrapper':\n    result = cls(primitive, context)\n    result.validate_wrapped()\n    return result", "docstring": "Instantiates a new version of PrimitiveWrapper wrapping primitive.\n\nArgs:\nprimitive: The FHIR primitive message to wrap and validate.\ncontext: Related primitive information to use for printing/parsing a\nwrapped primitive.\n\nReturns:\nAn instance of PrimitiveWrapper.", "source": "github-repos"}
{"code": "def apply_sync(fn: StreamFn, content: Iterable[_T]) -> list[_T]:\n\n    async def run_with_context():\n        async with context.context():\n            as_async = streams.stream_content(content)\n            return await streams.gather_stream(fn(as_async))\n    return asyncio.run(run_with_context())", "docstring": "Applies a part function synchronously.\n\nArgs:\nfn: the part function to apply to the content.\ncontent: a collection of inputs/parts on which to apply the function.\n\nReturns:\nthe content, with the function `fn` applied to each input/part.", "source": "github-repos"}
{"code": "def buckets_list(self, projection='noAcl', max_results=0, page_token=None, project_id=None):\n    if (max_results == 0):\n        max_results = Api._MAX_RESULTS\n    args = {'project': (project_id if project_id else self._project_id), 'maxResults': max_results}\n    if (projection is not None):\n        args['projection'] = projection\n    if (page_token is not None):\n        args['pageToken'] = page_token\n    url = (Api._ENDPOINT + (Api._BUCKET_PATH % ''))\n    return google.datalab.utils.Http.request(url, args=args, credentials=self._credentials)", "docstring": "Issues a request to retrieve the list of buckets.\n\nArgs:\nprojection: the projection of the bucket information to retrieve.\nmax_results: an optional maximum number of objects to retrieve.\npage_token: an optional token to continue the retrieval.\nproject_id: the project whose buckets should be listed.\nReturns:\nA parsed list of bucket information dictionaries.\nRaises:\nException if there is an error performing the operation.", "source": "codesearchnet"}
{"code": "def filter(self, versions, key=lambda x: x):\n        \n\n        return [x for x in versions if self.check(key(x))]", "docstring": "Filter all of the versions in an iterable that match this version range\n\nArgs:\nversions (iterable): An iterable of SemanticVersion objects\n\nReturns:\nlist: A list of the SemanticVersion objects that matched this range", "source": "juraj-google-style"}
{"code": "def make_tar_stream(build_context, buffer):\n    \n    tf = tarfile.TarFile(fileobj=buffer, mode='w')\n    for context_path, fileobj in build_context.items():\n        if getattr(fileobj, 'localpath', None) is not None:\n            tf.add(fileobj.localpath, arcname=context_path)\n        else:\n            tar_add_bytes(tf, context_path, fileobj.read('rb'))\n    tf.close()", "docstring": "Write a tar stream of the build context to the provided buffer\n\nArgs:\nbuild_context (Mapping[str, pyccc.FileReferenceBase]): dict mapping filenames to file references\nbuffer (io.BytesIO): writable binary mode buffer", "source": "juraj-google-style"}
{"code": "def remove_interceptor(self, name):\n    for (index, interceptor) in enumerate(self.interceptors):\n        matches = ((type(interceptor).__name__ == name) or (getattr(interceptor, 'name') == name))\n        if matches:\n            self.interceptors.pop(index)\n            return True\n    return False", "docstring": "Removes a specific interceptor by name.\n\nArguments:\nname (str): interceptor name to disable.\n\nReturns:\nbool: `True` if the interceptor was disabled, otherwise `False`.", "source": "codesearchnet"}
{"code": "def deep_update(d, u):\n  \n\n  for k, v in u.items():\n    if isinstance(v, Mapping):\n      d[k] = deep_update(d.get(k, {}), v)\n    elif isinstance(v, list):\n      existing_elements = d.get(k, [])\n      d[k] = existing_elements + [ele for ele in v if ele not in existing_elements]\n    else:\n      d[k] = v\n\n  return d", "docstring": "Deeply updates a dictionary. List values are concatenated.\n\nArgs:\nd (dict): First dictionary which will be updated\nu (dict): Second dictionary use to extend the first one\n\nReturns:\ndict: The merge dictionary", "source": "juraj-google-style"}
{"code": "def reset_score(student_id, course_id, item_id, clear_state=False, emit_signal=True):\n    try:\n        student_item = StudentItem.objects.get(student_id=student_id, course_id=course_id, item_id=item_id)\n    except StudentItem.DoesNotExist:\n        return\n    try:\n        score = Score.create_reset_score(student_item)\n        if emit_signal:\n            score_reset.send(sender=None, anonymous_user_id=student_id, course_id=course_id, item_id=item_id, created_at=score.created_at)\n        if clear_state:\n            for sub in student_item.submission_set.all():\n                sub.status = Submission.DELETED\n                sub.save(update_fields=['status'])\n                cache_key = Submission.get_cache_key(sub.uuid)\n                cache.delete(cache_key)\n    except DatabaseError:\n        msg = u'Error occurred while reseting scores for item {item_id} in course {course_id} for student {student_id}'.format(item_id=item_id, course_id=course_id, student_id=student_id)\n        logger.exception(msg)\n        raise SubmissionInternalError(msg)\n    else:\n        msg = u'Score reset for item {item_id} in course {course_id} for student {student_id}'.format(item_id=item_id, course_id=course_id, student_id=student_id)\n        logger.info(msg)", "docstring": "Reset scores for a specific student on a specific problem.\n\nNote: this does *not* delete `Score` models from the database,\nsince these are immutable.  It simply creates a new score with\nthe \"reset\" flag set to True.\n\nArgs:\nstudent_id (unicode): The ID of the student for whom to reset scores.\ncourse_id (unicode): The ID of the course containing the item to reset.\nitem_id (unicode): The ID of the item for which to reset scores.\nclear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem\n\nReturns:\nNone\n\nRaises:\nSubmissionInternalError: An unexpected error occurred while resetting scores.", "source": "codesearchnet"}
{"code": "def _get_default_help_message(func, args, description=None, args_help=None):\n    \n    if description is None:\n        description = \"Argument parsing for %s\" % func.__name__\n    args_help = args_help or {}\n    \n    for argument in [arg_name for arg_name in args\n                     if arg_name not in args_help]:\n        args_help[argument] = \"Help message for %s\" % argument\n    return (description, args_help)", "docstring": "Create a default description for the parser and help message for the\nagurments if they are missing.\n\nArgs:\nfunc: the method we are creating a parser for\nargs: the argument names of the method\ndescription: a potentially existing description created from the\nfunction docstring\nargs_help: a dict {arg_name: help} with potentially missing arguments\n\nReturns:\na tuple (arg_parse_description, complete_args_help)", "source": "juraj-google-style"}
{"code": "def _force_close(self, file_length=None):\n    \n    if file_length is None:\n      file_length = self._get_offset_from_gcs() + 1\n    self._send_data('', 0, file_length)", "docstring": "Close this buffer on file_length.\n\nFinalize this upload immediately on file_length.\nContents that are still in memory will not be uploaded.\n\nThis is a utility method that does not modify self.\n\nArgs:\nfile_length: file length. Must match what has been uploaded. If None,\nit will be queried from GCS.", "source": "juraj-google-style"}
{"code": "def compress(d, output, fmt='gz', logger=None):\n    \n    if not logger:\n        logger = log.get_logger('s3')\n    if type(d) not in [list, tuple]:\n        d = [d, ]\n    d = [os.path.expanduser(_d) for _d in d]\n    print_compress_info(d, output, compress, logger)\n    if fmt.lower() == 'none':\n        fmt = ''\n    elif fmt.lower() not in ['gz', 'bz2']:\n        logger.info('Compression option (\"{}\") is invalid.\\nFalling back to uncompressed.'.format(fmt))\n        fmt = ''\n    output = os.path.expanduser(output)\n    tar = tarfile.open(output, 'w:{}'.format(fmt))\n    for obj in d:\n        tar.add(obj)\n    tar.close()\n    return output", "docstring": "Creates a compressed/uncompressed tar file.\n\nArgs:\n\nd: Can be one of three things:\n\n1. the path to a single file, as a string\n\n2. the path to a single directory, as a string\n\n3. an iterable of file or directory paths\n\noutput (str): Output file path.\n\nfmt: Compression method. Options are ``'gz'`` (gzip),\n``'bz2'`` (bzip2) and ``'none'`` (uncompressed). Default is ``'gz'``.", "source": "juraj-google-style"}
{"code": "def show(self, progress, msg=None):\n    if (self.whole_tag.style.display == 'none'):\n        self.whole_tag.style.display = 'block'\n    if (isinstance(progress, int) or isinstance(progress, float)):\n        percentage = progress\n    else:\n        percentage = self.__class__._compute_percentage(progress)\n    self.tag.class_name = 'progress-bar'\n    if (percentage < 100):\n        self.tag.class_name += ' progress-bar-striped active'\n    else:\n        msg = 'Hotovo'\n    self.tag.aria_valuemin = percentage\n    self.tag.style.width = '{}%'.format(percentage)\n    if msg:\n        self.tag.text = msg", "docstring": "Show the progress bar and set it to `progress` tuple or value.\n\nArgs:\nprogress (tuple / int / float): Tuple ``(done / len(all))`` or\nthe direct percentage value as int / float.\nmsg (str, default None): Alternative background description.", "source": "codesearchnet"}
{"code": "def related(self, *, exclude_self=False):\n    manager = type(self)._default_manager\n    queryset = manager.related_to(self)\n    if exclude_self:\n        queryset = queryset.exclude(id=self.id)\n    return queryset", "docstring": "Get a QuerySet for all trigger log objects for the same connected model.\n\nArgs:\nexclude_self (bool): Whether to exclude this log object from the result list", "source": "codesearchnet"}
{"code": "def __init__(self, message, callback, color=''):\n    \n    super(ConsolePrompt, self).__init__()\n    self.daemon = True\n    self._message = message\n    self._callback = callback\n    self._color = color\n    self._stop_event = threading.Event()\n    self._answered = False", "docstring": "Initializes a ConsolePrompt.\n\nArgs:\nmessage: A string to be presented to the user.\ncallback: A function to be called with the response string.\ncolor: An ANSI color code, or the empty string.", "source": "juraj-google-style"}
{"code": "def CreateSignatureScanner(cls, specification_store):\n    \n    scanner_object = pysigscan.scanner()\n\n    for format_specification in specification_store.specifications:\n      for signature in format_specification.signatures:\n        pattern_offset = signature.offset\n\n        if pattern_offset is None:\n          signature_flags = pysigscan.signature_flags.NO_OFFSET\n        elif pattern_offset < 0:\n          pattern_offset *= -1\n          signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END\n        else:\n          signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START\n\n        scanner_object.add_signature(\n            signature.identifier, pattern_offset, signature.pattern,\n            signature_flags)\n\n    return scanner_object", "docstring": "Creates a signature scanner for format specifications with signatures.\n\nArgs:\nspecification_store (FormatSpecificationStore): format specifications\nwith signatures.\n\nReturns:\npysigscan.scanner: signature scanner.", "source": "juraj-google-style"}
{"code": "def from_text_vision_configs(cls, text_config: AlignTextConfig, vision_config: AlignVisionConfig, **kwargs):\n    return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`AlignConfig`] (or a derived class) from align text model configuration and align vision model\nconfiguration.\n\nReturns:\n[`AlignConfig`]: An instance of a configuration object", "source": "github-repos"}
{"code": "def str2dict_values(str_in):\n    tmp_dict = str2dict(str_in)\n    if (tmp_dict is None):\n        return None\n    return [tmp_dict[key] for key in sorted((k for k in tmp_dict))]", "docstring": "Extracts the values from a string that represents a dict and returns them\nsorted by key.\n\nArgs:\nstr_in (string) that contains python dict\nReturns:\n(list) with values or None if no valid dict was found\nRaises:\n-", "source": "codesearchnet"}
{"code": "def to_zmat(self, buf=None, upper_triangle=True, implicit_index=True, float_format='{:.6f}'.format, overwrite=True, header=False):\n    out = self.copy()\n    if implicit_index:\n        out = out.change_numbering(new_index=range(1, (len(self) + 1)))\n    if (not upper_triangle):\n        out = out._remove_upper_triangle()\n    output = out.to_string(index=(not implicit_index), float_format=float_format, header=header)\n    if (buf is not None):\n        if overwrite:\n            with open(buf, mode='w') as f:\n                f.write(output)\n        else:\n            with open(buf, mode='x') as f:\n                f.write(output)\n    else:\n        return output", "docstring": "Write zmat-file\n\nArgs:\nbuf (str): StringIO-like, optional buffer to write to\nimplicit_index (bool): If implicit_index is set, the zmat indexing\nis changed to ``range(1, len(self) + 1)``.\nUsing :meth:`~chemcoord.Zmat.change_numbering`\nBesides the index is omitted while writing which means,\nthat the index is given\nimplicitly by the row number.\nfloat_format (one-parameter function): Formatter function\nto apply to column’s elements if they are floats.\nThe result of this function must be a unicode string.\noverwrite (bool): May overwrite existing files.\n\nReturns:\nformatted : string (or unicode, depending on data and options)", "source": "codesearchnet"}
{"code": "def create_iam_role(self, account):\n    try:\n        iam = self.session.client('iam')\n        trust = get_template('vpc_flow_logs_iam_role_trust.json').render()\n        policy = get_template('vpc_flow_logs_role_policy.json').render()\n        newrole = iam.create_role(Path='/', RoleName=self.role_name, AssumeRolePolicyDocument=trust)['Role']['Arn']\n        iam.put_role_policy(RoleName=self.role_name, PolicyName='VpcFlowPolicy', PolicyDocument=policy)\n        self.log.debug('Created VPC Flow Logs role & policy for {}'.format(account.account_name))\n        auditlog(event='vpc_flow_logs.create_iam_role', actor=self.ns, data={'account': account.account_name, 'roleName': self.role_name, 'trustRelationship': trust, 'inlinePolicy': policy})\n        return newrole\n    except Exception:\n        self.log.exception('Failed creating the VPC Flow Logs role for {}.'.format(account))", "docstring": "Create a new IAM role. Returns the ARN of the newly created role\n\nArgs:\naccount (:obj:`Account`): Account where to create the IAM role\n\nReturns:\n`str`", "source": "codesearchnet"}
{"code": "def load_obj(fn):\n  \n  position = [np.zeros(3, dtype=np.float32)]\n  normal = [np.zeros(3, dtype=np.float32)]\n  uv = [np.zeros(2, dtype=np.float32)]\n  \n  tuple2idx = OrderedDict()\n  trinagle_indices = []\n  \n  input_file = open(fn) if isinstance(fn, str) else fn\n  for line in input_file:\n    line = line.strip()\n    if not line or line[0] == '\n      continue\n    line = line.split(' ', 1)\n    tag = line[0]\n    if len(line) > 1:\n      line = line[1]\n    else:\n      line = ''\n    if tag == 'v':\n      position.append(np.fromstring(line, sep=' '))\n    elif tag == 'vt':\n      uv.append(np.fromstring(line, sep=' '))\n    elif tag == 'vn':\n      normal.append(np.fromstring(line, sep=' '))\n    elif tag == 'f':\n      output_face_indices = []\n      for chunk in line.split():\n        \n        vt = _parse_vertex_tuple(chunk)\n        if vt not in tuple2idx:  \n          tuple2idx[vt] = len(tuple2idx)\n        output_face_indices.append(tuple2idx[vt])\n      \n      for i in range(1, len(output_face_indices)-1):\n        for vi in [0, i, i+1]:\n          trinagle_indices.append(output_face_indices[vi])\n  \n  outputs = {}\n  outputs['face'] = np.int32(trinagle_indices)\n  pos_idx, uv_idx, normal_idx = np.int32(list(tuple2idx)).T\n  if np.any(pos_idx):\n    outputs['position'] = _unify_rows(position)[pos_idx]\n  if np.any(uv_idx):\n    outputs['uv'] = _unify_rows(uv)[uv_idx]\n  if np.any(normal_idx):\n    outputs['normal'] = _unify_rows(normal)[normal_idx]\n  return outputs", "docstring": "Load 3d mesh form .obj' file.\n\nArgs:\nfn: Input file name or file-like object.\n\nReturns:\ndictionary with the following keys (some of which may be missing):\nposition: np.float32, (n, 3) array, vertex positions\nuv: np.float32, (n, 2) array, vertex uv coordinates\nnormal: np.float32, (n, 3) array, vertex uv normals\nface: np.int32, (k*3,) traingular face indices", "source": "juraj-google-style"}
{"code": "def comparator(objective):\n    \n\n    if isinstance(objective, Minimum):\n        return lambda l, r: l < r\n    else:\n        return lambda l, r: l > r", "docstring": "Higher order function creating a compare function for objectives.\n\nArgs:\nobjective (cipy.algorithms.core.Objective): The objective to create a\ncompare for.\n\nReturns:\ncallable: Function accepting two objectives to compare.\n\nExamples:\n>>> a = Minimum(0.1)\n>>> b = Minimum(0.2)\n>>> compare = comparator(a)\n>>> comparison = compare(a, b) # False", "source": "juraj-google-style"}
{"code": "def highway_core_with_recurrent_dropout(\n    hidden_size,\n    num_layers,\n    keep_prob=0.5,\n    **kwargs):\n  \n\n  core = HighwayCore(hidden_size, num_layers, **kwargs)\n  return RecurrentDropoutWrapper(core, keep_prob), core", "docstring": "Highway core with recurrent dropout.\n\nArgs:\nhidden_size: (int) Hidden size dimensionality.\nnum_layers: (int) Number of highway layers.\nkeep_prob: the probability to keep an entry when applying dropout.\n**kwargs: Extra keyword arguments to pass to the highway core.\n\nReturns:\nA tuple (train_core, test_core) where train_core is a higway core with\nrecurrent dropout enabled to be used for training and test_core is the\nsame highway core without recurrent dropout.", "source": "juraj-google-style"}
{"code": "def make_one_shot_iterator(self) -> Union[iterator_ops.Iterator, iterator_ops.OwnedIterator]:\n    return self._make_one_shot_iterator()", "docstring": "Creates an iterator for elements of this dataset.\n\nNote: The returned iterator will be initialized automatically.\nA \"one-shot\" iterator does not currently support re-initialization. For\nthat see `make_initializable_iterator`.\n\nExample:\n\n```python\n# Building graph ...\ndataset = ...\nnext_value = dataset.make_one_shot_iterator().get_next()\n\n# ... from within a session ...\ntry:\nwhile True:\nvalue = sess.run(next_value)\n...\nexcept tf.errors.OutOfRangeError:\npass\n```\n\nReturns:\nAn `tf.data.Iterator` for elements of this dataset.", "source": "github-repos"}
{"code": "def compute_attr_metadata(self, own_attrs: list[Attribute], decorator: str) -> Sequence[Attribute]:\n    assert decorator in _METADATA_KEYS, f'No metadata key for {decorator}'\n    key = _METADATA_KEYS[decorator]\n    attrs = self._get_attrs_from_mro(own_attrs, key)\n    self.metadata[key] = attrs\n    return attrs", "docstring": "Sets combined metadata based on inherited and own attrs.\n\nArgs:\nown_attrs: The attrs defined explicitly in this class\ndecorator: The fully qualified decorator name\n\nReturns:\nThe list of combined attrs.", "source": "github-repos"}
{"code": "def list_keyvaults(access_token, subscription_id, rgname):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults', '?api-version=', KEYVAULT_API])\n    return do_get_next(endpoint, access_token)", "docstring": "Lists key vaults in the named resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\n\nReturns:\nHTTP response. 200 OK.", "source": "codesearchnet"}
{"code": "def dummy_inputs(self):\n    if self.config.use_lang_emb and self.config.n_langs > 1:\n        return {'input_ids': tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32), 'langs': tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32)}\n    else:\n        return {'input_ids': tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32)}", "docstring": "Dummy inputs to build the network.\n\nReturns:\ntf.Tensor with dummy inputs", "source": "github-repos"}
{"code": "def shape_list(tensor: Union[tf.Tensor, np.ndarray]) -> list[int]:\n    if isinstance(tensor, np.ndarray):\n        return list(tensor.shape)\n    dynamic = tf.shape(tensor)\n    if tensor.shape == tf.TensorShape(None):\n        return dynamic\n    static = tensor.shape.as_list()\n    return [dynamic[i] if s is None else s for i, s in enumerate(static)]", "docstring": "Deal with dynamic shape in tensorflow cleanly.\n\nArgs:\ntensor (`tf.Tensor` or `np.ndarray`): The tensor we want the shape of.\n\nReturns:\n`List[int]`: The shape of the tensor as a list.", "source": "github-repos"}
{"code": "def live(self):\n    session = self._session\n    url = '{}/live'.format(self._base_url)\n    supported_params = frozenset(['filter[port]'])\n    params = {k: v for (k, v) in iteritems(self._params) if (k in supported_params)}\n    return session.live(url, self._datapoint_class, {'is_aggregate': self._is_aggregate}, params=params)", "docstring": "Get a live stream of timeseries readings.\n\nThis returns an Iterable over a live stream of readings. Note\nthat the result will need to be closed since the system can\nnot tell when you'll be done with it.\n\nYou can either call ``close`` on the endpoint when you're or\nuse the context management facilities of the endpoint.\n\n\n.. code-block:: python\n\n# Fetch a sensor\ntimeseries = sensor.timeseries()\n\n# ensure live endpoint closed\nwith timeseries.live() as live:\n# Wait for 10 readings\nfirst10 = list(islice(live, 10))\n\nReturns:", "source": "codesearchnet"}
{"code": "def record_request_completion(self, created_time: float, request_id: str) -> None:\n    if not _has_opentelemetry:\n        return\n    latency_ms = (time.time() - created_time) * 1000.0\n    try:\n        self.request_latency_histogram.record(latency_ms)\n        logger.debug(f'Recorded request completion for {request_id}: {latency_ms:.2f}ms')\n    except Exception as e:\n        logger.warning(f'Failed to record request completion metric: {e}')", "docstring": "Record metrics about a completed request.\n\nArgs:\ncreated_time: The time the request was created\nrequest_id: The ID of the request", "source": "github-repos"}
{"code": "def disqualified(self, num, natural=True, **kwargs):\n    search_type = ('natural' if natural else 'corporate')\n    baseuri = (self._BASE_URI + 'disqualified-officers/{}/{}'.format(search_type, num))\n    res = self.session.get(baseuri, params=kwargs)\n    self.handle_http_error(res)\n    return res", "docstring": "Search for disqualified officers by officer ID.\n\nSearches for natural disqualifications by default. Specify\nnatural=False to search for corporate disqualifications.\n\nArgs:\nnum (str): Company number to search on.\nnatural (Optional[bool]): Natural or corporate search\nkwargs (dict): additional keywords passed into\nrequests.session.get *params* keyword.", "source": "codesearchnet"}
{"code": "def _read_addr_resolve(self, length, htype):\n    if (htype == 1):\n        _byte = self._read_fileng(6)\n        _addr = '-'.join(textwrap.wrap(_byte.hex(), 2))\n    else:\n        _addr = self._read_fileng(length)\n    return _addr", "docstring": "Resolve MAC address according to protocol.\n\nPositional arguments:\n* length -- int, hardware address length\n* htype -- int, hardware type\n\nReturns:\n* str -- MAC address", "source": "codesearchnet"}
{"code": "def _has_extras(ctx):\n    if (not ctx.index.entries):\n        return False\n    return ((ctx.data_offset > 8) and (ctx.data_offset > (ctx.signatures.offset_end + 8)))", "docstring": "Determine if a MAR file has an additional section block or not.\n\nIt does this by looking at where file data starts in the file. If this\nstarts immediately after the signature data, then no additional sections are present.\n\nArgs:\nctx (context): construct parsing context\n\nReturns:\nTrue if the MAR file has an additional section block\nFalse otherwise", "source": "codesearchnet"}
{"code": "def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n    size = get_size_dict(size, default_to_square=False)\n    if 'height' in size and 'width' in size:\n        output_size = (size['height'], size['width'])\n    elif 'longest_edge' in size:\n        output_size = get_resize_output_image_size(image, size['longest_edge'], input_data_format)\n    else:\n        raise ValueError(f\"Size must have 'height' and 'width' or 'longest_edge' as keys. Got {size.keys()}\")\n    return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Resize an image.\n\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nsize (`Dict[str, int]`):\nSize of the output image. If `size` is of the form `{\"height\": h, \"width\": w}`, the output image will\nhave the size `(h, w)`. If `size` is of the form `{\"longest_edge\": s}`, the output image will have its\nlongest edge of length `s` while keeping the aspect ratio of the original image.\nresample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):\nResampling filter to use when resiizing the image.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"}
{"code": "def with_wget(url_dict=None, target_file=None):\n\n    def wget_decorator(cls):\n\n        def download_impl(self):\n            'Download the selected version from the url_dict value.'\n            t_file = (target_file if target_file else self.SRC_FILE)\n            t_version = url_dict[self.version]\n            Wget(t_version, t_file)\n\n        @staticmethod\n        def versions_impl():\n            'Return a list of versions from the url_dict keys.'\n            return list(url_dict.keys())\n        cls.versions = versions_impl\n        cls.download = download_impl\n        return cls\n    return wget_decorator", "docstring": "Decorate a project class with wget-based version information.\n\nThis adds two attributes to a project class:\n- A `versions` method that returns a list of available versions\nfor this project.\n- A `repository` attribute that provides a repository string to\ndownload from later.\nWe use the `git rev-list` subcommand to list available versions.\n\nArgs:\nurl_dict (dict): A dictionary that assigns a version to a download URL.\ntarget_file (str): An optional path where we should put the clone.\nIf unspecified, we will use the `SRC_FILE` attribute of\nthe decorated class.", "source": "codesearchnet"}
{"code": "def _remove_hdxobject(self, objlist, obj, matchon='id', delete=False):\n    if (objlist is None):\n        return False\n    if isinstance(obj, six.string_types):\n        obj_id = obj\n    elif (isinstance(obj, dict) or isinstance(obj, HDXObject)):\n        obj_id = obj.get(matchon)\n    else:\n        raise HDXError('Type of object not a string, dict or T<=HDXObject')\n    if (not obj_id):\n        return False\n    for (i, objdata) in enumerate(objlist):\n        objid = objdata.get(matchon)\n        if (objid and (objid == obj_id)):\n            if delete:\n                objlist[i].delete_from_hdx()\n            del objlist[i]\n            return True\n    return False", "docstring": "Remove an HDX object from a list within the parent HDX object\n\nArgs:\nobjlist (List[Union[T <= HDXObject,Dict]]): list of HDX objects\nobj (Union[T <= HDXObject,Dict,str]): Either an id or hdx object metadata either from an HDX object or a dictionary\nmatchon (str): Field to match on. Defaults to id.\ndelete (bool): Whether to delete HDX object. Defaults to False.\n\nReturns:\nbool: True if object removed, False if not", "source": "codesearchnet"}
{"code": "def put(self):\n    return self.manager.put(id=self.id, name=self.name, description=self.description, whitelisted_container_task_types=self.whitelisted_container_task_types, whitelisted_executable_task_types=self.whitelisted_executable_task_types)", "docstring": "Updates this task whitelist on the saltant server.\n\nReturns:\n:class:`saltant.models.task_whitelist.TaskWhitelist`:\nA task whitelist model instance representing the task\nwhitelist just updated.", "source": "codesearchnet"}
{"code": "def port_create_vlan(br, port, id, internal=False):\n    interfaces = __salt__['network.interfaces']()\n    if (not (0 <= id <= 4095)):\n        return False\n    elif (not bridge_exists(br)):\n        return False\n    elif ((not internal) and (port not in interfaces)):\n        return False\n    elif (port in port_list(br)):\n        cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)\n        if internal:\n            cmd += ' -- set interface {0} type=internal'.format(port)\n        result = __salt__['cmd.run_all'](cmd)\n        return _retcode_to_bool(result['retcode'])\n    else:\n        cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)\n        if internal:\n            cmd += ' -- set interface {0} type=internal'.format(port)\n        result = __salt__['cmd.run_all'](cmd)\n        return _retcode_to_bool(result['retcode'])", "docstring": "Isolate VM traffic using VLANs.\n\nArgs:\nbr: A string - bridge name.\nport: A string - port name.\nid: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.\ninternal: A boolean to create an internal interface if one does not exist.\n\nReturns:\nTrue on success, else False.\n\n.. versionadded:: 2016.3.0\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.port_create_vlan br0 tap0 100", "source": "codesearchnet"}
{"code": "def configs(self, filters=None):\n        \n        url = self._url('/configs')\n        params = {}\n        if filters:\n            params['filters'] = utils.convert_filters(filters)\n        return self._result(self._get(url, params=params), True)", "docstring": "List configs\n\nArgs:\nfilters (dict): A map of filters to process on the configs\nlist. Available filters: ``names``\n\nReturns (list): A list of configs", "source": "juraj-google-style"}
{"code": "def _GenerateNotices(self):\n    items = []\n    for e in self._notices:\n        d = e.GetDictToFormat()\n        if ('url' in d.keys()):\n            d['url'] = ('<a href=\"%(url)s\">%(url)s</a>' % d)\n        items.append(('<li class=\"notice\">%s</li>' % e.FormatProblem(d).replace('\\n', '<br>')))\n    if items:\n        return ('<h2>Notices:</h2>\\n<ul>%s</ul>\\n' % '\\n'.join(items))\n    else:\n        return ''", "docstring": "Generate a summary of any notices.\n\nReturns:\nThe generated HTML as a string.", "source": "codesearchnet"}
{"code": "def __getDecision(self, result, multiple=False, **values):\n\t\t\n\n\t\tvalues = self.__toString(values)\n\t\t__valueKeyWithHeaderIndex = self.__valueKeyWithHeaderIndex(values)\n\n\t\terrors = self.__checkDecisionParameters(result, **values)\n\t\tif errors:\n\t\t\tview.Tli.showErrors('ParametersError', errors)\n\n\t\tmachingData = {}\n\t\tfor line in self.decisions:\n\n\t\t\tmatch = True\n\n\t\t\tfor index in __valueKeyWithHeaderIndex:\n\t\t\t\tif line[index] != __valueKeyWithHeaderIndex[index]:\n\t\t\t\t\tif line[index] != self.__wildcardSymbol:\n\t\t\t\t\t\tmatch = False\n\t\t\t\t\t\tbreak\n\n\t\t\tif match:\n\t\t\t\tif multiple:\n\t\t\t\t\tfor header in result:\n\t\t\t\t\t\tif header not in machingData:\n\t\t\t\t\t\t\tmachingData[header] = [line[self.header.index(header)]]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tmachingData[header].append(line[self.header.index(header)])\n\t\t\t\telse:\n\t\t\t\t\tfor header in result:\n\t\t\t\t\t\tmachingData[header] = line[self.header.index(header)]\n\t\t\t\t\treturn machingData\n\n\t\tif multiple:\n\t\t\tif machingData:\n\t\t\t\treturn machingData\n\n\t\t\n\t\t\n\t\treturn dict((key, None) for key in result)", "docstring": "The main method for decision picking.\n\nArgs:\nresult (array of str): What values you want to get in return array.\nmultiple (bolean, optional): Do you want multiple result if it finds many maching decisions.\n**values (dict): What should finder look for, (headerString : value).\n\nReturns: Maped result values with finded elements in row/row.", "source": "juraj-google-style"}
{"code": "def __init__(self, speaker, audio_format, key, lang=\"ru-RU\", **kwargs):\n        \n        self.__params = {\n            \"speaker\": speaker,\n            \"format\": audio_format,\n            \"key\": key,\n            \"lang\": lang,\n        }\n        self.__params.update(kwargs)\n        self._data = None", "docstring": "Class for generate of speech.\n\nArgs:\nspeaker: Speaker.\naudio_format: Audio file format.\nkey: API-key for Yandex speech kit.\nlang (optional): Language. Defaults to \"ru-RU\".\nemotion (optional): The color of the voice. Defaults to \"normal\".\nspeed (optional): Speech tempo. Defaults to 1.0.", "source": "juraj-google-style"}
{"code": "def get_registered_object(name, custom_objects=None, module_objects=None):\n    custom_objects_scope_dict = global_state.get_global_attribute('custom_objects_scope_dict', {})\n    if name in custom_objects_scope_dict:\n        return custom_objects_scope_dict[name]\n    elif name in GLOBAL_CUSTOM_OBJECTS:\n        return GLOBAL_CUSTOM_OBJECTS[name]\n    elif custom_objects and name in custom_objects:\n        return custom_objects[name]\n    elif module_objects and name in module_objects:\n        return module_objects[name]\n    return None", "docstring": "Returns the class associated with `name` if it is registered with Keras.\n\nThis function is part of the Keras serialization and deserialization\nframework. It maps strings to the objects associated with them for\nserialization/deserialization.\n\nExample:\n\n```python\ndef from_config(cls, config, custom_objects=None):\nif 'my_custom_object_name' in config:\nconfig['hidden_cls'] = tf.keras.saving.get_registered_object(\nconfig['my_custom_object_name'], custom_objects=custom_objects)\n```\n\nArgs:\nname: The name to look up.\ncustom_objects: A dictionary of custom objects to look the name up in.\nGenerally, custom_objects is provided by the user.\nmodule_objects: A dictionary of custom objects to look the name up in.\nGenerally, module_objects is provided by midlevel library\nimplementers.\n\nReturns:\nAn instantiable class associated with `name`, or `None` if no such class\nexists.", "source": "github-repos"}
{"code": "def _grouper(iterable, n, fillvalue=0):\n        \n        args = [iter(iterable)] * n\n        return zip_longest(fillvalue=fillvalue, *args)", "docstring": "Collect data into fixed-length chunks or blocks.\n\nArgs:\nn (int): The size of the chunk.\nfillvalue (int): The fill value.\n\nReturns:\niterator: An iterator over the chunks.", "source": "juraj-google-style"}
{"code": "def nsarg_completions(\n    completion_text: str,\n    entity_types: list,\n    bel_spec: BELSpec,\n    namespace: str,\n    species_id: str,\n    bel_fmt: str,\n    size: int,\n):\n    \n\n    minimal_nsarg_completion_len = 1\n\n    species = [species_id]\n    namespaces = [namespace]\n    replace_list = []\n\n    if len(completion_text) >= minimal_nsarg_completion_len:\n        \n        \n\n        url = f'{config[\"bel_api\"][\"servers\"][\"api_url\"]}/terms/completions/{url_path_param_quoting(completion_text)}'\n        params = {\n            \"size\": size,\n            \"entity_types\": entity_types,\n            \"namespaces\": namespaces,\n            \"species\": species,\n        }\n        r = get_url(url, params=params)\n\n        if r.status_code == 200:\n            ns_completions = r.json()\n        else:\n            log.error(f\"Status code of {r.status_code} for {url}\")\n            ns_completions = {}\n\n        for complete in ns_completions.get(\"completions\", []):\n            replace_list.append(\n                {\n                    \"replacement\": complete[\"id\"],\n                    \"label\": f\"{complete['id']} ({complete['label']})\",\n                    \"highlight\": complete[\"highlight\"][-1],\n                    \"type\": \"NSArg\",\n                }\n            )\n\n    \n    for entity_type in entity_types:\n        default_namespace = bel_spec[\"namespaces\"].get(entity_type, [])\n        if default_namespace:\n            for obj in default_namespace[\"info\"]:\n                replacement = None\n                if bel_fmt == \"long\" and re.match(\n                    completion_text, obj[\"name\"], re.IGNORECASE\n                ):\n                    replacement = obj[\"name\"]\n                elif bel_fmt in [\"short\", \"medium\"] and re.match(\n                    completion_text, obj[\"abbreviation\"], re.IGNORECASE\n                ):\n                    replacement = obj[\"abbreviation\"]\n\n                if replacement:\n                    highlight = replacement.replace(\n                        completion_text, f\"<em>{completion_text}</em>\"\n                    )\n                    replace_list.insert(\n                        0,\n                        {\n                            \"replacement\": replacement,\n                            \"label\": replacement,\n                            \"highlight\": highlight,\n                            \"type\": \"NSArg\",\n                        },\n                    )\n\n    return replace_list[:size]", "docstring": "Namespace completions\n\nArgs:\ncompletion_text\nentity_types: used to filter namespace search results\nbel_spec: used to search default namespaces\nnamespace: used to filter namespace search results\nspecies_id: used to filter namespace search results\nbel_fmt: used to select full name or abbrev for default namespaces\nsize: how many completions to return\n\nResults:\nlist of replacement text objects", "source": "juraj-google-style"}
{"code": "def setHolidayDates(self, cmd_dict=None, password=\"00000000\"):\n        \n        result = False\n        self.setContext(\"setHolidayDates\")\n        if not cmd_dict:\n            cmd_dict = self.m_holiday_date_params\n\n        try:\n            if not self.request(False):\n                self.writeCmdMsg(\"Bad read CRC on setting\")\n            else:\n                if not self.serialCmdPwdAuth(password):\n                    self.writeCmdMsg(\"Password failure\")\n                else:\n                    req_table = \"\"\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_1_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_1_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_2_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_2_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_3_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_3_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_4_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_4_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_5_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_5_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_6_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_6_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_7_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_7_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_8_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_8_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_9_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_9_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_10_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_10_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_11_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_11_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_12_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_12_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_13_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_13_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_14_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_14_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_15_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_15_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_16_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_16_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_17_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_17_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_18_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_18_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_19_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_19_Day\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_20_Month\"]).zfill(2))\n                    req_table += binascii.hexlify(str(cmd_dict[\"Holiday_20_Day\"]).zfill(2))\n                    req_str = \"015731023030423028\" + req_table + \"2903\"\n                    req_str += self.calc_crc16(req_str[2:].decode(\"hex\"))\n                    self.m_serial_port.write(req_str.decode(\"hex\"))\n                    if self.m_serial_port.getResponse(self.getContext()).encode(\"hex\") == \"06\":\n                        self.writeCmdMsg(\"Success(setHolidayDates: 06 returned.\")\n                        result = True\n            self.serialPostEnd()\n        except:\n            ekm_log(traceback.format_exc(sys.exc_info()))\n\n        self.setContext(\"\")\n        return result", "docstring": "Serial call to set holiday list.\n\nIf a buffer dictionary is not supplied, the method will use\nthe class object buffer populated with assignHolidayDate.\n\nArgs:\ncmd_dict (dict): Optional dictionary of holidays.\npassword (str): Optional password.\n\nReturns:\nbool: True on completion.", "source": "juraj-google-style"}
{"code": "def compare(expr, value, regex_expr=False):\n    \n    \n    if expr == value:\n        return True\n\n    \n    negate = False\n    if isinstance(expr, str):\n        negate = expr.startswith(NEGATE)\n        expr = strip_negate(expr) if negate else expr\n\n    try:\n        \n        test(expr, value, regex_expr=regex_expr)\n    except Exception as err:\n        if negate:\n            return True\n        else:\n            raise err\n\n    return True", "docstring": "Compares an string or regular expression againast a given value.\n\nArguments:\nexpr (str|regex): string or regular expression value to compare.\nvalue (str): value to compare against to.\nregex_expr (bool, optional): enables string based regex matching.\n\nRaises:\nAssertionError: in case of assertion error.\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def get_db_row(db, start, size):\n    type_ = snap7.snap7types.wordlen_to_ctypes[snap7.snap7types.S7WLByte]\n    data = client.db_read(db, start, type_, size)\n    return data", "docstring": "Here you see and example of readying out a part of a DB\n\nArgs:\ndb (int): The db to use\nstart (int): The index of where to start in db data\nsize (int): The size of the db data to read", "source": "codesearchnet"}
{"code": "def _InternalUnpackAny(msg):\n  \n  type_url = msg.type_url\n  db = symbol_database.Default()\n\n  if not type_url:\n    return None\n\n  \n  \n  type_name = type_url.split(\"/\")[-1]\n  descriptor = db.pool.FindMessageTypeByName(type_name)\n\n  if descriptor is None:\n    return None\n\n  message_class = db.GetPrototype(descriptor)\n  message = message_class()\n\n  message.ParseFromString(msg.value)\n  return message", "docstring": "Unpacks Any message and returns the unpacked message.\n\nThis internal method is differnt from public Any Unpack method which takes\nthe target message as argument. _InternalUnpackAny method does not have\ntarget message type and need to find the message type in descriptor pool.\n\nArgs:\nmsg: An Any message to be unpacked.\n\nReturns:\nThe unpacked message.", "source": "juraj-google-style"}
{"code": "def load_morfessor_model(lang=\"en\", version=\"2\"):\n  \n  src_dir = \"morph{}\".format(version)\n  p = locate_resource(src_dir, lang)\n  file_handler = _open(p)\n  tmp_file_ = NamedTemporaryFile(delete=False)\n  tmp_file_.write(file_handler.read())\n  tmp_file_.close()\n  io = morfessor.MorfessorIO()\n  model = io.read_any_model(tmp_file_.name)\n  os.remove(tmp_file_.name)\n  return model", "docstring": "Return a morfessor model for `lang` and of version `version`\n\nArgs:\nlang (string): language code.\nversion (string): version of the parameters to be used.", "source": "juraj-google-style"}
{"code": "def write_file(self, file_name, vasp4_compatible=False):\n\n    def _print_fortran_float(f):\n        '\\n            Fortran codes print floats with a leading zero in scientific\\n            notation. When writing CHGCAR files, we adopt this convention\\n            to ensure written CHGCAR files are byte-to-byte identical to\\n            their input files as far as possible.\\n            :param f: float\\n            :return: str\\n            '\n        s = '{:.10E}'.format(f)\n        if (f >= 0):\n            return (((('0.' + s[0]) + s[2:12]) + 'E') + '{:+03}'.format((int(s[13:]) + 1)))\n        else:\n            return (((('-.' + s[1]) + s[3:13]) + 'E') + '{:+03}'.format((int(s[14:]) + 1)))\n    with zopen(file_name, 'wt') as f:\n        p = Poscar(self.structure)\n        comment = getattr(self, 'name', p.comment)\n        lines = (comment + '\\n')\n        lines += '   1.00000000000000\\n'\n        latt = self.structure.lattice.matrix\n        lines += (' %12.6f%12.6f%12.6f\\n' % tuple(latt[(0, :)]))\n        lines += (' %12.6f%12.6f%12.6f\\n' % tuple(latt[(1, :)]))\n        lines += (' %12.6f%12.6f%12.6f\\n' % tuple(latt[(2, :)]))\n        if (not vasp4_compatible):\n            lines += (''.join([('%5s' % s) for s in p.site_symbols]) + '\\n')\n        lines += (''.join([('%6d' % x) for x in p.natoms]) + '\\n')\n        lines += 'Direct\\n'\n        for site in self.structure:\n            lines += ('%10.6f%10.6f%10.6f\\n' % tuple(site.frac_coords))\n        lines += ' \\n'\n        f.write(lines)\n        a = self.dim\n\n        def write_spin(data_type):\n            lines = []\n            count = 0\n            f.write('   {}   {}   {}\\n'.format(a[0], a[1], a[2]))\n            for (k, j, i) in itertools.product(list(range(a[2])), list(range(a[1])), list(range(a[0]))):\n                lines.append(_print_fortran_float(self.data[data_type][(i, j, k)]))\n                count += 1\n                if ((count % 5) == 0):\n                    f.write(((' ' + ''.join(lines)) + '\\n'))\n                    lines = []\n                else:\n                    lines.append(' ')\n            f.write(((' ' + ''.join(lines)) + ' \\n'))\n            f.write(''.join(self.data_aug.get(data_type, [])))\n        write_spin('total')\n        if (self.is_spin_polarized and self.is_soc):\n            write_spin('diff_x')\n            write_spin('diff_y')\n            write_spin('diff_z')\n        elif self.is_spin_polarized:\n            write_spin('diff')", "docstring": "Write the VolumetricData object to a vasp compatible file.\n\nArgs:\nfile_name (str): Path to a file\nvasp4_compatible (bool): True if the format is vasp4 compatible", "source": "codesearchnet"}
{"code": "def substitute_symbol_table(table, version, max_id):\n    if (not table.table_type.is_shared):\n        raise ValueError('Symbol table to substitute from must be a shared table')\n    if (version <= 0):\n        raise ValueError(('Version must be grater than or equal to 1: %s' % version))\n    if (max_id < 0):\n        raise ValueError(('Max ID must be zero or positive: %s' % max_id))\n    if (max_id <= table.max_id):\n        symbols = (token.text for token in islice(table, max_id))\n    else:\n        symbols = chain((token.text for token in table), repeat(None, (max_id - table.max_id)))\n    return SymbolTable(table_type=SHARED_TABLE_TYPE, symbols=symbols, name=table.name, version=version, is_substitute=True)", "docstring": "Substitutes a given shared symbol table for another version.\n\n* If the given table has **more** symbols than the requested substitute, then the generated\nsymbol table will be a subset of the given table.\n* If the given table has **less** symbols than the requested substitute, then the generated\nsymbol table will have symbols with unknown text generated for the difference.\n\nArgs:\ntable (SymbolTable): The shared table to derive from.\nversion (int): The version to target.\nmax_id (int): The maximum ID allocated by the substitute, must be ``>= 0``.\n\nReturns:\nSymbolTable: The synthesized table.", "source": "codesearchnet"}
{"code": "def add_entry(self, path_object):\n    if ((not is_root()) and (not (self.st_mode & PERM_WRITE)) and (not self.filesystem.is_windows_fs)):\n        exception = (IOError if IS_PY2 else OSError)\n        raise exception(errno.EACCES, 'Permission Denied', self.path)\n    if (path_object.name in self.contents):\n        self.filesystem.raise_os_error(errno.EEXIST, self.path)\n    self.contents[path_object.name] = path_object\n    path_object.parent_dir = self\n    self.st_nlink += 1\n    path_object.st_nlink += 1\n    path_object.st_dev = self.st_dev\n    if (path_object.st_nlink == 1):\n        self.filesystem.change_disk_usage(path_object.size, path_object.name, self.st_dev)", "docstring": "Adds a child FakeFile to this directory.\n\nArgs:\npath_object: FakeFile instance to add as a child of this directory.\n\nRaises:\nOSError: if the directory has no write permission (Posix only)\nOSError: if the file or directory to be added already exists", "source": "codesearchnet"}
{"code": "def nested_update(d, u):\n    for (k, v) in list(u.items()):\n        if isinstance(v, collections.Mapping):\n            r = nested_update(d.get(k, {}), v)\n            d[k] = r\n        else:\n            d[k] = u[k]\n    return d", "docstring": "Merge two nested dicts.\n\nNested dicts are sometimes used for representing various recursive structures. When\nupdating such a structure, it may be convenient to present the updated data as a\ncorresponding recursive structure. This function will then apply the update.\n\nArgs:\nd: dict\ndict that will be updated in-place. May or may not contain nested dicts.\n\nu: dict\ndict with contents that will be merged into ``d``. May or may not contain\nnested dicts.", "source": "codesearchnet"}
{"code": "def __init__(\n      self, cipher_mode=None, initialization_vector=None, key=None, **kwargs):\n    \n    if not key:\n      raise ValueError('Missing key.')\n\n    cipher_mode = self.ENCRYPTION_MODES.get(cipher_mode, None)\n    if cipher_mode is None:\n      raise ValueError('Unsupported cipher mode: {0!s}'.format(cipher_mode))\n\n    if cipher_mode != AES.MODE_ECB and not initialization_vector:\n      \n      \n      raise ValueError('Missing initialization vector.')\n\n    super(AESDecrypter, self).__init__()\n    if cipher_mode == AES.MODE_ECB:\n      self._aes_cipher = AES.new(key, mode=cipher_mode)\n    else:\n      self._aes_cipher = AES.new(\n          key, IV=initialization_vector, mode=cipher_mode)", "docstring": "Initializes a decrypter.\n\nArgs:\ncipher_mode (Optional[str]): cipher mode.\ninitialization_vector (Optional[bytes]): initialization vector.\nkey (Optional[bytes]): key.\nkwargs (dict): keyword arguments depending on the decrypter.\n\nRaises:\nValueError: when key is not set, block cipher mode is not supported,\nor initialization_vector is required and not set.", "source": "juraj-google-style"}
{"code": "def _format_ase2clusgeo(obj, all_atomtypes=None):\n    totalAN = len(obj)\n    if (all_atomtypes is not None):\n        atomtype_set = set(all_atomtypes)\n    else:\n        atomtype_set = set(obj.get_atomic_numbers())\n    atomtype_lst = np.sort(list(atomtype_set))\n    n_atoms_per_type_lst = []\n    pos_lst = []\n    for atomtype in atomtype_lst:\n        condition = (obj.get_atomic_numbers() == atomtype)\n        pos_onetype = obj.get_positions()[condition]\n        n_onetype = pos_onetype.shape[0]\n        pos_lst.append(pos_onetype)\n        n_atoms_per_type_lst.append(n_onetype)\n    typeNs = n_atoms_per_type_lst\n    Ntypes = len(n_atoms_per_type_lst)\n    atomtype_lst\n    Apos = np.concatenate(pos_lst).ravel()\n    return (Apos, typeNs, Ntypes, atomtype_lst, totalAN)", "docstring": "Takes an ase Atoms object and returns numpy arrays and integers\nwhich are read by the internal clusgeo. Apos is currently a flattened\nout numpy array\n\nArgs:\nobj():\nall_atomtypes():\nsort():", "source": "codesearchnet"}
{"code": "def __init__(self, *others):\n        \n        selectors = list()\n        heads = collections.defaultdict(set)\n        for other in others:\n            if isinstance(other, MultiFieldSelector):\n                for head, tail in other.heads.iteritems():\n                    heads[head].add(tail)\n            elif isinstance(other, FieldSelector):\n                selectors.append(other)\n            else:\n                selectors.append(self.FieldSelector(other))\n\n        for selector in selectors:\n            chain = selector.selectors\n            if chain:\n                head = chain[0]\n                tail = self.FieldSelector(chain[1:]) if len(chain) > 1 else all\n                heads[head].add(tail)\n            else:\n                heads[None].add(all)\n\n        self.heads = dict(\n            (head, all if all in tail else MultiFieldSelector(*tail))\n            for head, tail in heads.iteritems()\n        ) if None not in heads or heads[None] is not all else {None: all}\n\n        \n        head_types = set(type(x) for x in self.heads)\n        self.has_int = int in head_types or long in head_types\n        self.has_string = any(issubclass(x, basestring) for x in head_types)\n        self.has_none = types.NoneType in head_types\n        self.complete = self.has_none and self.heads[None] is all\n        if self.has_none and (self.has_int or self.has_string):\n            \n            raise ValueError(\n                \"MultiFieldSelector cannot yet specify a list and a hash/\"\n                \"object at the same level: %r\" % self.heads.keys()\n            )", "docstring": "Returns a MultiFieldSelector based on combining the passed-in\nFieldSelector and MultiFieldSelector objects.\n\nargs:\n\n``*others=``\\ *FieldSelector*\\ \\|\\ *iterable*\n\nEach argument is interpreted as either a FieldSelector, or a\nFieldSelector constructor.", "source": "juraj-google-style"}
{"code": "def modify_module(channel, module_name, module_state):\n    gui = ui_embed.UI(channel, '{} updated'.format(module_name), '{} is now {}'.format(module_name, ('activated' if module_state else 'deactivated')), modulename=modulename)\n    return gui", "docstring": "Creates an embed UI containing the module modified message\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\nmodule_name (str): The name of the module that was updated\nmodule_state (bool): The current state of the module\n\nReturns:\nembed: The created embed", "source": "codesearchnet"}
{"code": "def to_sigproc_keyword(keyword, value=None):\n    \n\n    keyword = bytes(keyword)\n\n    if value is None:\n        return np.int32(len(keyword)).tostring() + keyword\n    else:\n        dtype = header_keyword_types[keyword]\n\n        dtype_to_type = {b'<l'  : np.int32,\n                         b'str' : str,\n                         b'<d'  : np.float64,\n                         b'angle' : to_sigproc_angle}\n\n        value_dtype = dtype_to_type[dtype]\n\n        if value_dtype is str:\n            return np.int32(len(keyword)).tostring() + keyword + np.int32(len(value)).tostring() + value\n        else:\n            return np.int32(len(keyword)).tostring() + keyword + value_dtype(value).tostring()", "docstring": "Generate a serialized string for a sigproc keyword:value pair\n\nIf value=None, just the keyword will be written with no payload.\nData type is inferred by keyword name (via a lookup table)\n\nArgs:\nkeyword (str): Keyword to write\nvalue (None, float, str, double or angle): value to write to file\n\nReturns:\nvalue_str (str): serialized string to write to file.", "source": "juraj-google-style"}
{"code": "def run(self, blocking: bool=True):\n        \n        if not self._run_control_loop:\n            err = (\"`run` called, but not using the internal control loop. Use\"\n                   \" `start` instead\")\n\n            raise RuntimeError(err)\n\n        self._setup()\n        self._heartbeat_reciever.start()\n\n        if blocking:\n            return self.loop.start()\n        else:\n            self._run_thread = _threading.Thread(target=self.loop.start,\n                                                 daemon=True)\n\n            self._thread.run()", "docstring": "Run the internal control loop.\nArgs:\nblocking (bool): Defaults to `True`. If set to `False`, will\nintialize a thread to run the control loop.\nRaises:\nRuntimeError: If called and not using the internal control loop\nvia `self._run_control_loop`, set in the intializer of the\nclass", "source": "juraj-google-style"}
{"code": "def has_cwd(state, dir, incorrect_msg='Your current working directory should be `{{dir}}`. Use `cd {{dir}}` to navigate there.'):\n    expr = \"[[ $PWD == '{}' ]]\".format(dir)\n    _msg = state.build_message(incorrect_msg, fmt_kwargs={'dir': dir})\n    has_expr_exit_code(state, expr, output='0', incorrect_msg=_msg)\n    return state", "docstring": "Check whether the student is in the expected directory.\n\nThis check is typically used before using ``has_expr_output()``\nto make sure the student didn't navigate somewhere else.\n\nArgs:\nstate: State instance describing student and solution code. Can be omitted if used with ``Ex()``.\ndir: Directory that the student should be in. Always use the absolute path.\nincorrect_msg: If specified, this overrides the automatically generated message in\ncase the student is not in the expected directory.\n\n:Example:\n\nIf you want to be sure that the student is in ``/home/repl/my_dir``: ::\n\nEx().has_cwd('/home/repl/my_dir')", "source": "codesearchnet"}
{"code": "def call_for_each_replica(self, fn, args=(), kwargs=None):\n    distribute_lib._require_cross_replica_or_default_context_extended(self)\n    if kwargs is None:\n        kwargs = {}\n    map_fn = functools.partial(dtensor_util.convert_inputs_to_dtensor, mesh=self._mesh)\n    d_args = nest.map_structure(map_fn, args)\n    d_kwargs = nest.map_structure(map_fn, kwargs)\n    with self._container_strategy().scope():\n        with dtensor_util.DTensorReplicaContext(self._container_strategy()):\n            dtensor_result = fn(*d_args, **d_kwargs)\n    return nest.map_structure(dtensor_util.DTensorDistributedValue, dtensor_result)", "docstring": "Run `fn` once per replica.\n\nThis is a method that expected by the strategy base class in its `run()`.\n\nArgs:\nfn: function to run (will be run once per replica).\nargs: Tuple or list with positional arguments for `fn`.\nkwargs: Dict with keyword arguments for `fn`.\n\nReturns:\nMerged return value of `fn` across all replicas.", "source": "github-repos"}
{"code": "def torque_on(self):\n    data = []\n    data.append(10)\n    data.append(self.servoid)\n    data.append(RAM_WRITE_REQ)\n    data.append(TORQUE_CONTROL_RAM)\n    data.append(1)\n    data.append(96)\n    send_data(data)", "docstring": "Enable the torques of Herkulex\n\nIn this mode, position control and velocity control\nwill work.\n\nArgs:\nnone", "source": "codesearchnet"}
{"code": "def dump_session_params(path):\n    \n    \n    var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n    var.extend(tf.get_collection(tf.GraphKeys.MODEL_VARIABLES))\n    \n    assert len(set(var)) == len(var), \"TRAINABLE and MODEL variables have duplication!\"\n    gvars = set([k.name for k in tf.global_variables()])\n    var = [v for v in var if v.name in gvars]\n    result = {}\n    for v in var:\n        result[v.name] = v.eval()\n    save_chkpt_vars(result, path)", "docstring": "Dump value of all TRAINABLE + MODEL variables to a dict, and save as\nnpz format (loadable by :func:`sessinit.get_model_loader`).\n\nArgs:\npath(str): the file name to save the parameters. Must ends with npz.", "source": "juraj-google-style"}
{"code": "def create(self, resource):\n        \n        return self.service.create(\n            resource, self.url_prefix, self.auth, self.session,\n            self.session_send_opts)", "docstring": "Create the given resource.\n\nArgs:\nresource (intern.resource.boss.BossResource): Create a data model object with attributes matching those of the resource.\n\nReturns:\n(intern.resource.boss.BossResource): Returns resource of type requested on success.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def print_info(self, buf=None, format_=FileFormat.yaml,\n                   skip_attributes=None, include_release=False):\n        \n        data = self.validated_data().copy()\n\n        \n        \n        \n        \n        data.pop(\"config\", None)\n        if self.config:\n            if isinstance(self, Package):\n                config_dict = self.data.get(\"config\")\n            else:\n                config_dict = self.parent.data.get(\"config\")\n            data[\"config\"] = config_dict\n\n        if not include_release:\n            skip_attributes = list(skip_attributes or []) + list(package_release_keys)\n\n        buf = buf or sys.stdout\n        dump_package_data(data, buf=buf, format_=format_,\n                          skip_attributes=skip_attributes)", "docstring": "Print the contents of the package.\n\nArgs:\nbuf (file-like object): Stream to write to.\nformat_ (`FileFormat`): Format to write in.\nskip_attributes (list of str): List of attributes to not print.\ninclude_release (bool): If True, include release-related attributes,\nsuch as 'timestamp' and 'changelog'", "source": "juraj-google-style"}
{"code": "def testBasic(self, count, batch_size, drop_remainder, num_parallel_calls):\n    components = (np.arange(7), np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis], np.array(37.0) * np.arange(7))\n\n    def _map_fn(x, y, z):\n        return (math_ops.square(x), math_ops.square(y), math_ops.square(z))\n    dataset = dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn).repeat(count).batch(batch_size, drop_remainder, num_parallel_calls)\n    get_next = self.getNext(dataset)\n    if drop_remainder:\n        dim0 = batch_size\n    else:\n        dim0 = None\n    self.assertEqual([ts.as_list() for ts in nest.flatten(dataset_ops.get_legacy_output_shapes(dataset))], [[dim0] + list(c.shape[1:]) for c in components])\n    num_full_batches = count * 7 \n    for i in range(num_full_batches):\n        result = self.evaluate(get_next())\n        for component, result_component in zip(components, result):\n            for j in range(batch_size):\n                self.assertAllEqual(component[(i * batch_size + j) % 7] ** 2, result_component[j])\n    if not drop_remainder and count * 7 % batch_size > 0:\n        result = self.evaluate(get_next())\n        for component, result_component in zip(components, result):\n            for j in range(count * 7 % batch_size):\n                self.assertAllEqual(component[(num_full_batches * batch_size + j) % 7] ** 2, result_component[j])\n    with self.assertRaises(errors.OutOfRangeError):\n        result = self.evaluate(get_next())", "docstring": "Tests the batch dataset logic for various input configurations.\n\nArgs:\ncount: the number of input elements\nbatch_size: the batch size\ndrop_remainder: whether a smaller batch size should be produced if batch\nsize does not divide number of inputs evenly\nnum_parallel_calls: the number batches to process asynchronously in\nparallel", "source": "github-repos"}
{"code": "def process_openxml_file(filename: str,\n                         print_good: bool,\n                         delete_if_bad: bool) -> None:\n    \n    print_bad = not print_good\n    try:\n        file_good = is_openxml_good(filename)\n        file_bad = not file_good\n        if (print_good and file_good) or (print_bad and file_bad):\n            print(filename)\n        if delete_if_bad and file_bad:\n            log.warning(\"Deleting: {}\", filename)\n            os.remove(filename)\n    except Exception as e:\n        \n        \n        log.critical(\"Uncaught error in subprocess: {!r}\\n{}\", e,\n                     traceback.format_exc())\n        raise", "docstring": "Prints the filename of, or deletes, an OpenXML file depending on whether\nit is corrupt or not.\n\nArgs:\nfilename: filename to check\nprint_good: if ``True``, then prints the filename if the file\nappears good.\ndelete_if_bad: if ``True``, then deletes the file if the file\nappears corrupt.", "source": "juraj-google-style"}
{"code": "def ensure_tensor_on_device(self, **inputs):\n    return self._ensure_tensor_on_device(inputs, self.device)", "docstring": "Ensure PyTorch tensors are on the specified device.\n\nArgs:\ninputs (keyword arguments that should be `torch.Tensor`, the rest is ignored):\nThe tensors to place on `self.device`.\nRecursive on lists **only**.\n\nReturn:\n`Dict[str, torch.Tensor]`: The same as `inputs` but on the proper device.", "source": "github-repos"}
{"code": "def insert(self, **fields):\n        \n\n        if self.conflict_target or self.conflict_action:\n            compiler = self._build_insert_compiler([fields])\n            rows = compiler.execute_sql(return_id=True)\n\n            pk_field_name = self.model._meta.pk.name\n            return rows[0][pk_field_name]\n\n        \n        return super().create(**fields).pk", "docstring": "Creates a new record in the database.\n\nThis allows specifying custom conflict behavior using .on_conflict().\nIf no special behavior was specified, this uses the normal Django create(..)\n\nArguments:\nfields:\nThe fields of the row to create.\n\nReturns:\nThe primary key of the record that was created.", "source": "juraj-google-style"}
{"code": "def find_next(self, *strings, **kwargs):\n    start = kwargs.pop('start', None)\n    keys_only = kwargs.pop('keys_only', False)\n    staht = (start if (start is not None) else self.cursor)\n    for (start, stop) in [(staht, len(self)), (0, staht)]:\n        for i in range(start, stop):\n            for string in strings:\n                if (string in self[i]):\n                    tup = (i, self[i])\n                    self.cursor = (i + 1)\n                    if keys_only:\n                        return i\n                    return tup", "docstring": "From the editor's current cursor position find the next instance of the\ngiven string.\n\nArgs:\nstrings (iterable): String or strings to search for\n\nReturns:\ntup (tuple): Tuple of cursor position and line or None if not found\n\nNote:\nThis function cycles the entire editor (i.e. cursor to length of\neditor to zero and back to cursor position).", "source": "codesearchnet"}
{"code": "def get_qubit_los(self, user_lo_config):\n        \n        try:\n            _q_los = self.default_qubit_los.copy()\n        except KeyError:\n            raise PulseError('Qubit default frequencies not exist.')\n\n        for channel, lo_freq in user_lo_config.qubit_lo_dict().items():\n            _q_los[channel.index] = lo_freq\n\n        if _q_los == self.default_qubit_los:\n            return None\n        return _q_los", "docstring": "Embed default qubit LO frequencies from backend and format them to list object.\nIf configured lo frequency is the same as default, this method returns `None`.\n\nArgs:\nuser_lo_config (LoConfig): A dictionary of LOs to format.\n\nReturns:\nlist: A list of qubit LOs.\n\nRaises:\nPulseError: when LO frequencies are missing.", "source": "juraj-google-style"}
{"code": "def GetSysFeeAmountByHeight(self, height):\n    hash = self.GetBlockHash(height)\n    return self.GetSysFeeAmount(hash)", "docstring": "Get the system fee for the specified block.\n\nArgs:\nheight (int): block height.\n\nReturns:\nint:", "source": "codesearchnet"}
{"code": "def __init__(self, orig_image, dpi, save_image):\n        \n\n        self._shreds = None\n        self.orig_img = orig_image\n\n        self.save_image = save_image\n\n        self._fg_mask = None\n        self._shreds = None\n\n        if dpi is None:\n            self.res_x, self.res_y = self._guess_dpi()\n        else:\n            self.res_x, self.res_y = dpi", "docstring": "Initializes a Sheet instance.\n\nArgs:\norig_image: cv.Mat instance with the original sheet image.\ndpi: optional (x resolution, y resolution) tuple or None.\nIf set to None, will try to guess dpi.\nsave_image: A callback to save debug images with args (name, img)", "source": "juraj-google-style"}
{"code": "def getindex(self, child, recursive=True, ignore=True):\n    for (i, c) in enumerate(self.data):\n        if (c is child):\n            return i\n    if recursive:\n        for (i, c) in enumerate(self.data):\n            if (ignore is True):\n                try:\n                    if (not c.auth):\n                        continue\n                except AttributeError:\n                    pass\n            elif ignore:\n                doignore = False\n                for e in ignore:\n                    if (e is True):\n                        try:\n                            if (not c.auth):\n                                doignore = True\n                                break\n                        except AttributeError:\n                            pass\n                    elif ((e == c.__class__) or issubclass(c.__class__, e)):\n                        doignore = True\n                        break\n                if doignore:\n                    continue\n            if isinstance(c, AbstractElement):\n                j = c.getindex(child, recursive)\n                if (j != (- 1)):\n                    return i\n    return (- 1)", "docstring": "Get the index at which an element occurs, recursive by default!\n\nReturns:\nint", "source": "codesearchnet"}
{"code": "def ParseContactRow(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = TangoAndroidContactEventData()\n\n    first_name = self._GetRowValue(query_hash, row, 'first_name')\n    try:\n      decoded_text = base64_decode(first_name)\n      event_data.first_name = codecs.decode(decoded_text, 'utf-8')\n    except ValueError:\n      event_data.first_name = first_name\n      parser_mediator.ProduceExtractionWarning(\n          'unable to parse first name: {0:s}'.format(first_name))\n\n    last_name = self._GetRowValue(query_hash, row, 'last_name')\n    try:\n      decoded_text = base64_decode(last_name)\n      event_data.last_name = codecs.decode(decoded_text, 'utf-8')\n    except ValueError:\n      event_data.last_name = last_name\n      parser_mediator.ProduceExtractionWarning(\n          'unable to parse last name: {0:s}'.format(last_name))\n\n    event_data.birthday = self._GetRowValue(query_hash, row, 'birthday')\n    event_data.gender = self._GetRowValue(query_hash, row, 'gender')\n\n    status = self._GetRowValue(query_hash, row, 'status')\n    try:\n      decoded_text = base64_decode(status)\n      event_data.status = codecs.decode(decoded_text, 'utf-8')\n    except ValueError:\n      event_data.status = status\n      parser_mediator.ProduceExtractionWarning(\n          'unable to parse status: {0:s}'.format(status))\n\n    event_data.distance = self._GetRowValue(query_hash, row, 'distance')\n\n    is_friend = self._GetRowValue(query_hash, row, 'friend')\n    event_data.is_friend = False\n    if is_friend:\n      event_data.is_friend = True\n\n    event_data.friend_request_type = self._GetRowValue(\n        query_hash, row, 'friend_request_type')\n\n    friend_request_message = self._GetRowValue(\n        query_hash, row, 'friend_request_message')\n    try:\n      decoded_text = base64_decode(friend_request_message)\n      event_data.friend_request_message = codecs.decode(decoded_text, 'utf-8')\n    except ValueError:\n      event_data.friend_request_message = friend_request_message\n      parser_mediator.ProduceExtractionWarning(\n          'unable to parse status: {0:s}'.format(friend_request_message))\n\n    timestamp = self._GetRowValue(query_hash, row, 'last_active_time')\n    if timestamp:\n      date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_LAST_ACTIVE)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'last_access_time')\n    if timestamp:\n      date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'friend_request_time')\n    if timestamp:\n      date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_SENT)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a contact row from the database.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from query.", "source": "juraj-google-style"}
{"code": "def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, dtype, use_gpu, op_name):\n    x1 = self._CreateNumpyTensor(tensor_in_sizes)\n    x2 = self._CreateNumpyTensor(filter_in_sizes)\n    with test_util.device(use_gpu):\n        t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)\n        t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)\n        strides = [1] + strides + [1]\n        dilations = [1] + dilations + [1]\n        if isinstance(padding, (list, tuple)):\n            padding = [(0, 0)] + padding + [(0, 0)]\n        if data_format == 'NCHW':\n            t1 = test_util.NHWCToNCHW(t1)\n            strides = test_util.NHWCToNCHW(strides)\n            dilations = test_util.NHWCToNCHW(dilations)\n            if isinstance(padding, (list, tuple)):\n                padding = test_util.NHWCToNCHW(padding)\n        if op_name == 'Conv2D':\n            conv = nn_ops.conv2d(t1, t2, dilations=dilations, strides=strides, padding=padding, data_format=data_format)\n        elif op_name == 'Conv':\n            conv_format = 'CHANNELS_LAST' if data_format == 'NHWC' else 'CHANNELS_FIRST'\n            conv_padding, explicit_paddings = nn_ops.convert_padding(padding)\n            conv = gen_nn_ops.conv(t1, t2, strides=strides, padding=conv_padding, explicit_paddings=explicit_paddings, data_format=conv_format, dilations=dilations)\n        else:\n            raise ValueError('Invalid op name: %s' % op_name)\n        self.assertEqual(conv.dtype, dtype)\n        if data_format == 'NCHW':\n            conv = test_util.NCHWToNHWC(conv)\n        return conv", "docstring": "Verifies the output values of the convolution function.\n\nArgs:\ntensor_in_sizes: Input tensor dimensions in [batch, input_rows,\ninput_cols, input_depth].\nfilter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,\ninput_depth, output_depth].\ndilations: Dilated rate: [col_dilation, row_dilation]\nstrides: Stride: [col_stride, row_stride]\npadding: Padding type.\ndata_format: Format of the data tensors.\ndtype: Data type for inputs and outputs.\nuse_gpu: True if the operations should be run on GPU\nop_name: Name of the op to be tested\n\nReturns:\nSymbolic tensor value that can be used to execute the computation", "source": "github-repos"}
{"code": "def run_inference(self, batch: Sequence[ExampleT], model: ModelT, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionT]:\n    while self.throttler.throttle_request(time.time() * _MILLISECOND_TO_SECOND):\n        self.logger.info('Delaying request for %d seconds due to previous failures', self.throttle_delay_secs)\n        time.sleep(self.throttle_delay_secs)\n        self.throttled_secs.inc(self.throttle_delay_secs)\n    try:\n        req_time = time.time()\n        predictions = self.request(batch, model, inference_args)\n        self.throttler.successful_request(req_time * _MILLISECOND_TO_SECOND)\n        return predictions\n    except Exception as e:\n        self.logger.error('exception raised as part of request, got %s', e)\n        raise", "docstring": "Runs inferences on a batch of examples. Calls a remote model for\npredictions and will retry if a retryable exception is raised.\n\nArgs:\nbatch: A sequence of examples or features.\nmodel: The model used to make inferences.\ninference_args: Extra arguments for models whose inference call requires\nextra parameters.\n\nReturns:\nAn Iterable of Predictions.", "source": "github-repos"}
{"code": "def version(msg):\n    \n    tc = typecode(msg)\n\n    if tc != 31:\n        raise RuntimeError(\"%s: Not a status operation message, expecting TC = 31\" % msg)\n\n    msgbin = common.hex2bin(msg)\n    version = common.bin2int(msgbin[72:75])\n\n    return version", "docstring": "ADS-B Version\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string, TC = 31\n\nReturns:\nint: version number", "source": "juraj-google-style"}
{"code": "def ConfigureLogging(\n    debug_output=False, filename=None, mode='w', quiet_mode=False):\n  \n  \n  \n  for handler in logging.root.handlers:\n    logging.root.removeHandler(handler)\n\n  logger = logging.getLogger()\n\n  if filename and filename.endswith('.gz'):\n    handler = CompressedFileHandler(filename, mode=mode)\n  elif filename:\n    handler = logging.FileHandler(filename, mode=mode)\n  else:\n    handler = logging.StreamHandler()\n\n  format_string = (\n      '%(asctime)s [%(levelname)s] (%(processName)-10s) PID:%(process)d '\n      '<%(module)s> %(message)s')\n\n  formatter = logging.Formatter(format_string)\n  handler.setFormatter(formatter)\n\n  if debug_output:\n    level = logging.DEBUG\n  elif quiet_mode:\n    level = logging.WARNING\n  else:\n    level = logging.INFO\n\n  logger.setLevel(level)\n  handler.setLevel(level)\n\n  logger.addHandler(handler)", "docstring": "Configures the logging root logger.\n\nArgs:\ndebug_output (Optional[bool]): True if the logging should include debug\noutput.\nfilename (Optional[str]): log filename.\nmode (Optional[str]): log file access mode.\nquiet_mode (Optional[bool]): True if the logging should not include\ninformation output. Note that debug_output takes precedence over\nquiet_mode.", "source": "juraj-google-style"}
{"code": "def mask_from_embedding(emb):\n  \n  return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keepdims=True))", "docstring": "Input embeddings -> padding mask.\n\nWe have hacked symbol_modality to return all-zero embeddings for padding.\nReturns a mask with 0.0 in the padding positions and 1.0 elsewhere.\n\nArgs:\nemb: a Tensor with shape [batch, width, height, depth].\nReturns:\na 0.0/1.0 Tensor with shape [batch, width, height, 1].", "source": "juraj-google-style"}
{"code": "def grab_data(self, f_start=None, f_stop=None,t_start=None, t_stop=None, if_id=0):\n        \n\n        self.freqs = self.populate_freqs()\n        self.timestamps = self.populate_timestamps()\n\n        if f_start is None:\n            f_start = self.freqs[0]\n        if f_stop is None:\n            f_stop = self.freqs[-1]\n\n        i0 = np.argmin(np.abs(self.freqs - f_start))\n        i1 = np.argmin(np.abs(self.freqs - f_stop))\n\n        if i0 < i1:\n            plot_f    = self.freqs[i0:i1 + 1]\n            plot_data = np.squeeze(self.data[t_start:t_stop, ..., i0:i1 + 1])\n        else:\n            plot_f    = self.freqs[i1:i0 + 1]\n            plot_data = np.squeeze(self.data[t_start:t_stop, ..., i1:i0 + 1])\n\n        return plot_f, plot_data", "docstring": "Extract a portion of data by frequency range.\n\nArgs:\nf_start (float): start frequency in MHz\nf_stop (float): stop frequency in MHz\nif_id (int): IF input identification (req. when multiple IFs in file)\n\nReturns:\n(freqs, data) (np.arrays): frequency axis in MHz and data subset", "source": "juraj-google-style"}
{"code": "def get_svg_layers(svg_sources):\n    \n    layers = []\n    width, height = None, None\n\n    def extract_length(attr):\n        'Extract length in pixels.'\n        match = CRE_MM_LENGTH.match(attr)\n        if match:\n            \n            return INKSCAPE_PPmm.magnitude * float(match.group('length'))\n        else:\n            return float(attr)\n\n    for svg_source_i in svg_sources:\n        \n        xml_root = etree.parse(svg_source_i)\n        svg_root = xml_root.xpath('/svg:svg', namespaces=INKSCAPE_NSMAP)[0]\n        width = max(extract_length(svg_root.attrib['width']), width)\n        height = max(extract_length(svg_root.attrib['height']), height)\n        layers += svg_root.xpath('\n                                 namespaces=INKSCAPE_NSMAP)\n\n    for i, layer_i in enumerate(layers):\n        layer_i.attrib['id'] = 'layer%d' % (i + 1)\n    return (width, height), layers", "docstring": "Collect layers from input svg sources.\n\nArgs:\n\nsvg_sources (list) : A list of file-like objects, each containing\none or more XML layers.\n\nReturns\n-------\n(width, height), layers : (int, int), list\nThe first item in the tuple is the shape of the largest layer, and the\nsecond item is a list of ``Element`` objects (from :mod:`lxml.etree`\nmodule), one per SVG layer.", "source": "juraj-google-style"}
{"code": "def generate_sb(date: datetime.datetime, project: str, programme_block: str) -> dict:\n    date = date.strftime('%Y%m%d')\n    instance_id = randint(0, 9999)\n    sb_id = 'SB-{}-{}-{:04d}'.format(date, project, instance_id)\n    return dict(id=sb_id, project=project, programme_block=programme_block)", "docstring": "Generate a Scheduling Block data object.\n\nArgs:\ndate (datetime.datetime): UTC date of the SBI\nproject (str): Project Name\nprogramme_block (str): Programme\n\nReturns:\nstr, Scheduling Block Instance (SBI) ID.", "source": "codesearchnet"}
{"code": "def eventFilter(self, object, event):\n        \n        if (object is self.tree_scripts):\n            \n            \n            if (event.type() == QtCore.QEvent.ChildAdded):\n                item = self.tree_scripts.selectedItems()[0]\n                if not isinstance(item.value, Script):\n                    print('ONLY SCRIPTS CAN BE DRAGGED')\n                    return False\n                print(('XXX ChildAdded', self.tree_scripts.selectedItems()[0].name))\n\n\n\n                \n                \n                \n                \n                \n                \n            if (event.type() == QtCore.QEvent.ChildRemoved):\n                print(('XXX ChildRemoved', self.tree_scripts.selectedItems()[0].name))\n            if (event.type() == QtCore.QEvent.Drop):\n                print('XXX Drop')\n                \n                \n                \n                \n                \n            return False  \n\n        return False", "docstring": "TEMPORARY / UNDER DEVELOPMENT\n\nTHIS IS TO ALLOW COPYING OF PARAMETERS VIA DRAP AND DROP\n\nArgs:\nobject:\nevent:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _AddToTree(self, x, prevx):\n    self.s.add(x)\n    self.prev[x] = prevx\n    for y in self.right:\n        slack = self._CalcSlack(x, y)\n        if slack < self.slack[y]:\n            self.slack[y] = slack\n            self.slackx[y] = x", "docstring": "Adds |x| to the current augmenting tree.\n\nx is a node which has already been matched to a node y in Right (which is\nitself connected to prevx via a non-matching edge in the equality subgraph).\nWe indicate prevx comes before x in the tree so we can trace the path later.\n\nArgs:\nx: Node which has already been matched to a node y in right\nprevx: Previous node in Left along the path.", "source": "github-repos"}
{"code": "def get_all_without_ethernet(self, start=0, count=(- 1), filter='', sort=''):\n    without_ethernet_client = ResourceClient(self._connection, '/rest/logical-downlinks/withoutEthernet')\n    return without_ethernet_client.get_all(start, count, filter=filter, sort=sort)", "docstring": "Gets a paginated collection of logical downlinks without ethernet. The collection is\nbased on optional sorting and filtering and is constrained by start and count parameters.\n\nArgs:\nstart:\nThe first item to return, using 0-based indexing.\nIf not specified, the default is 0 - start with the first available item.\ncount:\nThe number of resources to return. A count of -1 requests all items.\nThe actual number of items in the response might differ from the requested\ncount if the sum of start and count exceeds the total number of items.\nfilter (list or str):\nA general filter/query string to narrow the list of items returned. The\ndefault is no filter; all resources are returned.\nsort:\nThe sort order of the returned data set. By default, the sort order is based\non create time with the oldest entry first.\n\nReturns:\ndict", "source": "codesearchnet"}
{"code": "def plot_path(line, lattice=None, coords_are_cartesian=False, ax=None, **kwargs):\n    (ax, fig, plt) = get_ax3d_fig_plt(ax)\n    if ('color' not in kwargs):\n        kwargs['color'] = 'r'\n    if ('linewidth' not in kwargs):\n        kwargs['linewidth'] = 3\n    for k in range(1, len(line)):\n        vertex1 = line[(k - 1)]\n        vertex2 = line[k]\n        if (not coords_are_cartesian):\n            if (lattice is None):\n                raise ValueError('coords_are_cartesian False requires the lattice')\n            vertex1 = lattice.get_cartesian_coords(vertex1)\n            vertex2 = lattice.get_cartesian_coords(vertex2)\n        ax.plot(*zip(vertex1, vertex2), **kwargs)\n    return (fig, ax)", "docstring": "Adds a line passing through the coordinates listed in 'line' to a matplotlib Axes\n\nArgs:\nline: list of coordinates.\nlattice: Lattice object used to convert from reciprocal to cartesian coordinates\ncoords_are_cartesian: Set to True if you are providing\ncoordinates in cartesian coordinates. Defaults to False.\nRequires lattice if False.\nax: matplotlib :class:`Axes` or None if a new figure should be created.\nkwargs: kwargs passed to the matplotlib function 'plot'. Color defaults to red\nand linewidth to 3.\n\nReturns:\nmatplotlib figure and matplotlib ax", "source": "codesearchnet"}
{"code": "def _maybe_init_run(self, experiment_name, run_name):\n    \n    experiment_id = self._maybe_init_experiment(experiment_name)\n    cursor = self._db.cursor()\n    cursor.execute(\n        ,\n        (experiment_id, run_name))\n    row = cursor.fetchone()\n    if row:\n      return row[0]\n    run_id = self._create_id()\n    \n    started_time = 0\n    cursor.execute(\n        ,\n        (experiment_id, run_id, run_name, time.time(), started_time))\n    return run_id", "docstring": "Returns the ID for the given run, creating the row if needed.\n\nArgs:\nexperiment_name: name of experiment containing this run.\nrun_name: name of run.", "source": "juraj-google-style"}
{"code": "def format(self, exclude_class=False):\n    if exclude_class:\n        msg = self.msg\n    else:\n        msg = ('%s: %s' % (self.__class__.__name__, self.msg))\n    if (len(self.params) != 0):\n        paramstring = '\\n'.join([((str(key) + ': ') + str(val)) for (key, val) in self.params.items()])\n        msg += ('\\nAdditional Information:\\n' + paramstring)\n    return msg", "docstring": "Format this exception as a string including class name.\n\nArgs:\nexclude_class (bool): Whether to exclude the exception class\nname when formatting this exception\n\nReturns:\nstring: a multiline string with the message, class name and\nkey value parameters passed to create the exception.", "source": "codesearchnet"}
{"code": "def _process_image_files(name, filenames, texts, labels, num_shards):\n    assert (len(filenames) == len(texts))\n    assert (len(filenames) == len(labels))\n    spacing = np.linspace(0, len(filenames), (FLAGS.num_threads + 1)).astype(np.int)\n    ranges = []\n    for i in range((len(spacing) - 1)):\n        ranges.append([spacing[i], spacing[(i + 1)]])\n    print(('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges)))\n    sys.stdout.flush()\n    coord = tf.train.Coordinator()\n    coder = ImageCoder()\n    threads = []\n    for thread_index in range(len(ranges)):\n        args = (coder, thread_index, ranges, name, filenames, texts, labels, num_shards)\n        t = threading.Thread(target=_process_image_files_batch, args=args)\n        t.start()\n        threads.append(t)\n    coord.join(threads)\n    print(('%s: Finished writing all %d images in data set.' % (datetime.now(), len(filenames))))\n    sys.stdout.flush()", "docstring": "Process and save list of images as TFRecord of Example protos.\n\nArgs:\nname: string, unique identifier specifying the data set\nfilenames: list of strings; each string is a path to an image file\ntexts: list of strings; each string is human readable, e.g. 'dog'\nlabels: list of integer; each integer identifies the ground truth\nnum_shards: integer number of shards for this data set.", "source": "codesearchnet"}
{"code": "def delete_permissions(self, grp_name, resource):\n        \n        self.project_service.set_auth(self._token_project)\n        self.project_service.delete_permissions(grp_name, resource)", "docstring": "Removes permissions from the group for the given resource.\n\nArgs:\ngrp_name (string): Name of group.\nresource (intern.resource.boss.Resource): Identifies which data\nmodel object to operate on.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def get_range(self, name_prefix, vlan_id_range):\n    filter = '\"\\'name\\' matches \\'{}\\\\_%\\'\"'.format(name_prefix)\n    ethernet_networks = self.get_all(filter=filter, sort='vlanId:ascending')\n    vlan_ids = self.dissociate_values_or_ranges(vlan_id_range)\n    for net in ethernet_networks[:]:\n        if (int(net['vlanId']) not in vlan_ids):\n            ethernet_networks.remove(net)\n    return ethernet_networks", "docstring": "Gets a list of Ethernet Networks that match the 'given name_prefix' and the 'vlan_id_range'.\n\nExamples:\n>>> enet.get_range('Enet_name', '1-2,5')\n# The result contains the ethernet network with names:\n['Enet_name_1', 'Enet_name_2', 'Enet_name_5']\n\n>>> enet.get_range('Enet_name', '2')\n# The result contains the ethernet network with names:\n['Enet_name_1', 'Enet_name_2']\n\nArgs:\nname_prefix: The Ethernet Network prefix\nvlan_id_range: A combination of values or ranges to be retrieved. For example, '1-10,50,51,500-700'.\n\nReturns:\nlist: A list of Ethernet Networks.", "source": "codesearchnet"}
{"code": "def _get_element_attr_or_none(document, selector, attribute):\n    element = document.cssselect(selector)\n    if element:\n        return element[0].get(attribute)\n    return None", "docstring": "Using a CSS selector, get the element and return the given attribute value, or None if no element.\n\nArgs:\ndocument (HTMLElement) - HTMLElement document\nselector (str) - CSS selector\nattribute (str) - The attribute to get from the element", "source": "codesearchnet"}
{"code": "def _set_bearer_user_vars_local(token, allowed_client_ids, scopes):\n  \n  \n  result = urlfetch.fetch(\n      '%s?%s' % (_TOKENINFO_URL, urllib.urlencode({'access_token': token})))\n  if result.status_code != 200:\n    try:\n      error_description = json.loads(result.content)['error_description']\n    except (ValueError, KeyError):\n      error_description = ''\n    _logger.error('Token info endpoint returned status %s: %s',\n                  result.status_code, error_description)\n    return\n  token_info = json.loads(result.content)\n\n  \n  if 'email' not in token_info:\n    _logger.warning('Oauth token doesn\\'t include an email address.')\n    return\n  if token_info.get('email_verified') != 'true':\n    _logger.warning('Oauth token email isn\\'t verified.')\n    return\n\n  \n  client_id = token_info.get('azp')\n  if (list(allowed_client_ids) != SKIP_CLIENT_ID_CHECK and\n      client_id not in allowed_client_ids):\n    _logger.warning('Client ID is not allowed: %s', client_id)\n    return\n\n  \n  _, sufficient_scopes = _process_scopes(scopes)\n  authorized_scopes = token_info.get('scope', '').split(' ')\n  if not _are_scopes_sufficient(authorized_scopes, sufficient_scopes):\n    _logger.warning('Oauth token scopes don\\'t match any acceptable scopes.')\n    return\n\n  os.environ[_ENV_AUTH_EMAIL] = token_info['email']\n  os.environ[_ENV_AUTH_DOMAIN] = ''\n  _logger.debug('Local dev returning user from token.')", "docstring": "Validate the oauth bearer token on the dev server.\n\nSince the functions in the oauth module return only example results in local\ndevelopment, this hits the tokeninfo endpoint and attempts to validate the\ntoken.  If it's valid, we'll set _ENV_AUTH_EMAIL and _ENV_AUTH_DOMAIN so we\ncan get the user from the token.\n\nArgs:\ntoken: String with the oauth token to validate.\nallowed_client_ids: List of client IDs that are acceptable.\nscopes: List of acceptable scopes.", "source": "juraj-google-style"}
{"code": "def search(self, scope, search, **kwargs):\n    data = {'scope': scope, 'search': search}\n    return self.http_list('/search', query_data=data, **kwargs)", "docstring": "Search GitLab resources matching the provided string.'\n\nArgs:\nscope (str): Scope of the search\nsearch (str): Search string\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabSearchError: If the server failed to perform the request\n\nReturns:\nGitlabList: A list of dicts describing the resources found.", "source": "codesearchnet"}
{"code": "def monkhorst(cls, ngkpt, shiftk=(0.5, 0.5, 0.5), chksymbreak=None, use_symmetries=True, use_time_reversal=True, comment=None):\n    return cls(kpts=[ngkpt], kpt_shifts=shiftk, use_symmetries=use_symmetries, use_time_reversal=use_time_reversal, chksymbreak=chksymbreak, comment=(comment if comment else 'Monkhorst-Pack scheme with user-specified shiftk'))", "docstring": "Convenient static constructor for a Monkhorst-Pack mesh.\n\nArgs:\nngkpt: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors.\nshiftk: Shift to be applied to the kpoints.\nuse_symmetries: Use spatial symmetries to reduce the number of k-points.\nuse_time_reversal: Use time-reversal symmetry to reduce the number of k-points.\n\nReturns:\n:class:`KSampling` object.", "source": "codesearchnet"}
{"code": "def save_args(conditions, out_path):\n    if isinstance(conditions, argparse.Namespace):\n        args = vars(conditions)\n    else:\n        args = conditions\n    try:\n        os.makedirs(out_path)\n    except OSError:\n        pass\n    with tempdir(prefix='args', dir=out_path) as tempd:\n        path = os.path.join(tempd, 'args.json')\n        with open(path, 'w') as f:\n            json.dump(args, f, indent=4)\n        new_path = os.path.join(out_path, 'args')\n        shutil.move(path, new_path)", "docstring": "A util function to save experiment condition for job table.\n\nArgs:\nconditions (:class:`argparse.Namespace` or dict): Experiment conditions\nto show on a job table. Keys are show as table header and values\nare show at a job row.\nout_path (str): Output directory name to save conditions.", "source": "codesearchnet"}
{"code": "def _optimize_tf_model(self, graph_def, input_tensors, output_tensors, quant_mode):\n    if self.saved_model_dir or quant_mode.is_quantization_aware_trained_model():\n        return graph_def\n    try:\n        graph = _convert_to_constants.disable_lower_using_switch_merge(graph_def)\n        optimized_graph = _run_graph_optimizations(graph, input_tensors, output_tensors, config=self._grappler_config(['function']))\n        return optimized_graph\n    except Exception:\n        return graph_def", "docstring": "Run a Grappler pass to optimize the TensorFlow graph.\n\nArgs:\ngraph_def: Frozen GraphDef to be optimized.\ninput_tensors: List of input tensors.\noutput_tensors: List of output tensors.\nquant_mode: the quantization mode.\n\nReturns:\nThe optimized TensorFlow graph.", "source": "github-repos"}
{"code": "def __init__(self, message, raises=False):\n        \n        super(CustodianError, self).__init__(message)\n        self.raises = raises\n        self.message = message", "docstring": "Initializes the error with a message.\n\nArgs:\nmessage (str): Message passed to Exception\nraises (bool): Whether this should be raised outside custodian", "source": "juraj-google-style"}
{"code": "def __init__(self, dfk, *args, threshold=20, interval=5):\n        \n        self.dfk = dfk\n        self.threshold = threshold\n        self.interval = interval\n        self.cb_args = args\n        self.strategy = Strategy(dfk)\n        self.callback = self.strategy.strategize\n        self._handle = None\n        self._event_count = 0\n        self._event_buffer = []\n        self._wake_up_time = time.time() + 1\n        self._kill_event = threading.Event()\n        self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,))\n        self._thread.daemon = True\n        self._thread.start()", "docstring": "Initialize the flowcontrol object.\n\nWe start the timer thread here\n\nArgs:\n- dfk (DataFlowKernel) : DFK object to track parsl progress\n\nKWargs:\n- threshold (int) : Tasks after which the callback is triggered\n- interval (int) : seconds after which timer expires", "source": "juraj-google-style"}
{"code": "def send_to_prv_exchange(self, user_id, message=None):\n    exchange = ('prv_%s' % user_id.lower())\n    msg = json.dumps(message, cls=ZEngineJSONEncoder)\n    log.debug(('Sending following users \"%s\" exchange:\\n%s ' % (exchange, msg)))\n    self.get_channel().publish(exchange=exchange, routing_key='', body=msg)", "docstring": "Send messages through logged in users private exchange.\n\nArgs:\nuser_id string: User key\nmessage dict: Message object", "source": "codesearchnet"}
{"code": "def __init__(self, graph, name=None):\n        \n        if not isinstance(graph, BipartiteGraph):\n            raise ValueError(\n                \"Given graph is not instance of Bipartite:\", graph)\n\n        self._graph = graph\n        if name:\n            self.name = name\n        else:\n            self.name = super(_Node, self).__str__()\n        self._hash = None", "docstring": "Construct a new node.\n\nArgs:\nname: Specifying the name of this node.\nIf not given, use strings returned from __str__ method.", "source": "juraj-google-style"}
{"code": "def GetFileEntryByPathSpec(self, path_spec):\n    \n    return encrypted_stream_file_entry.EncryptedStreamFileEntry(\n        self._resolver_context, self, path_spec, is_root=True, is_virtual=True)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\nEncryptedStreamFileEntry: a file entry or None if not available.", "source": "juraj-google-style"}
{"code": "def segment(self, source, language=None):\n    \n    if language and not language in self.supported_languages:\n      raise ValueError(\n          'Language {} is not supported by NLAPI segmenter'.format(language))\n\n    chunks = ChunkList()\n    results = tinysegmenter.tokenize(source)\n    seek = 0\n    for word in results:\n      word = word.strip()\n      if not word:\n        continue\n      if source[seek: seek + len(word)] != word:\n        assert source[seek] == ' '\n        assert source[seek + 1: seek + len(word) + 1] == word\n        chunks.append(Chunk.space())\n        seek += 1\n\n      dependency = None\n      if word in _PARTICLES or word in _AUX_VERBS or is_hiragana(word):\n        dependency = False\n\n      chunk = Chunk(word, dependency=dependency)\n      if chunk.is_punct():\n        chunk.dependency = chunk.is_open_punct()\n      chunks.append(chunk)\n      seek += len(word)\n    chunks.resolve_dependencies()\n    return chunks", "docstring": "Returns a chunk list from the given sentence.\n\nArgs:\nsource (str): Source string to segment.\nlanguage (:obj:`str`, optional): A language code.\n\nReturns:\nA chunk list. (:obj:`budou.chunk.ChunkList`)\n\nRaises:\nValueError: If :obj:`language` is given and it is not included in\n:obj:`supported_languages`.", "source": "juraj-google-style"}
{"code": "def GreaterThanOrEqualTo(self, value):\n    self._awql = self._CreateSingleValueCondition(value, '>=')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"greater than or equal to\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "codesearchnet"}
{"code": "def setValues(self, values):\n        \n        ncols = self.getNumCols()\n        nindices = self.getNumIndices()\n        for key, value in values.items():\n            key = Utils.convToList(key)\n            assert len(key) == nindices\n            value = Utils.convToList(value)\n            assert len(value) == ncols-nindices\n            self.addRow(key + value)", "docstring": "Set the values of a DataFrame from a dictionary.\n\nArgs:\nvalues: Dictionary with the values to set.", "source": "juraj-google-style"}
{"code": "def _GetGdbThreadMapping(self, position):\n    if (len(gdb.selected_inferior().threads()) == 1):\n        return {position[1]: 1}\n    thread_line_regexp = '\\\\s*\\\\**\\\\s*([0-9]+)\\\\s+[a-zA-Z]+\\\\s+([x0-9a-fA-F]+)\\\\s.*'\n    output = gdb.execute('info threads', to_string=True)\n    matches = [re.match(thread_line_regexp, line) for line in output.split('\\n')[1:]]\n    return {int(match.group(2), 16): int(match.group(1)) for match in matches if match}", "docstring": "Gets a mapping from python tid to gdb thread num.\n\nThere's no way to get the thread ident from a gdb thread.  We only get the\n\"ID of the thread, as assigned by GDB\", which is completely useless for\neverything except talking to gdb.  So in order to translate between these\ntwo, we have to execute 'info threads' and parse its output. Note that this\nmay only work on linux, and only when python was compiled to use pthreads.\nIt may work elsewhere, but we won't guarantee it.\n\nArgs:\nposition: array of pid, tid, framedepth specifying the requested position.\nReturns:\nA dictionary of the form {python_tid: gdb_threadnum}.", "source": "codesearchnet"}
{"code": "def int64_user_gauge(namespace, name, metric, ptransform=None) -> metrics_pb2.MonitoringInfo:\n    labels = create_labels(ptransform=ptransform, namespace=namespace, name=name)\n    if isinstance(metric, GaugeData):\n        coder = coders.VarIntCoder()\n        value = metric.value\n        timestamp = metric.timestamp\n    else:\n        raise TypeError('Expected GaugeData metric type but received %s with value %s' % (type(metric), metric))\n    payload = _encode_gauge(coder, timestamp, value)\n    return create_monitoring_info(USER_GAUGE_URN, LATEST_INT64_TYPE, payload, labels)", "docstring": "Return the gauge monitoring info for the URN, metric and labels.\n\nArgs:\nnamespace: User-defined namespace of gauge metric.\nname: Name of gauge metric.\nmetric: The GaugeData containing the metrics.\nptransform: The ptransform id used as a label.", "source": "github-repos"}
{"code": "def start(self, wait=False):\n        \n        if self._status is not TaskStatus.IDLE:\n            raise RuntimeError(\"Cannot start %s in state %s\" %\n                               (self, self._status))\n        self._status = TaskStatus.STARTED\n        STARTED_TASKS.add(self)\n        self._start()\n\n        if wait:\n            self.wait()\n\n        return self.return_values", "docstring": "Start a task.\n\nThis function depends on the underlying implementation of _start, which\nany subclass of ``Task`` should implement.\n\nArgs:\nwait (bool): Whether or not to wait on the task to finish before\nreturning from this function. Default `False`.\n\nRaises:\nRuntimeError: If the task has already been started without a\nsubsequent call to ``reset()``.", "source": "juraj-google-style"}
{"code": "def get_random_transform(self, img_shape, seed=None):\n    img_row_axis = self.row_axis - 1\n    img_col_axis = self.col_axis - 1\n    if seed is not None:\n        np.random.seed(seed)\n    if self.rotation_range:\n        theta = np.random.uniform(-self.rotation_range, self.rotation_range)\n    else:\n        theta = 0\n    if self.height_shift_range:\n        try:\n            tx = np.random.choice(self.height_shift_range)\n            tx *= np.random.choice([-1, 1])\n        except ValueError:\n            tx = np.random.uniform(-self.height_shift_range, self.height_shift_range)\n        if np.max(self.height_shift_range) < 1:\n            tx *= img_shape[img_row_axis]\n    else:\n        tx = 0\n    if self.width_shift_range:\n        try:\n            ty = np.random.choice(self.width_shift_range)\n            ty *= np.random.choice([-1, 1])\n        except ValueError:\n            ty = np.random.uniform(-self.width_shift_range, self.width_shift_range)\n        if np.max(self.width_shift_range) < 1:\n            ty *= img_shape[img_col_axis]\n    else:\n        ty = 0\n    if self.shear_range:\n        shear = np.random.uniform(-self.shear_range, self.shear_range)\n    else:\n        shear = 0\n    if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:\n        zx, zy = (1, 1)\n    else:\n        zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)\n    flip_horizontal = (np.random.random() < 0.5) * self.horizontal_flip\n    flip_vertical = (np.random.random() < 0.5) * self.vertical_flip\n    channel_shift_intensity = None\n    if self.channel_shift_range != 0:\n        channel_shift_intensity = np.random.uniform(-self.channel_shift_range, self.channel_shift_range)\n    brightness = None\n    if self.brightness_range is not None:\n        brightness = np.random.uniform(self.brightness_range[0], self.brightness_range[1])\n    transform_parameters = {'theta': theta, 'tx': tx, 'ty': ty, 'shear': shear, 'zx': zx, 'zy': zy, 'flip_horizontal': flip_horizontal, 'flip_vertical': flip_vertical, 'channel_shift_intensity': channel_shift_intensity, 'brightness': brightness}\n    return transform_parameters", "docstring": "Generates random parameters for a transformation.\n\nArgs:\nimg_shape: Tuple of integers.\nShape of the image that is transformed.\nseed: Random seed.\n\nReturns:\nA dictionary containing randomly chosen parameters describing the\ntransformation.", "source": "github-repos"}
{"code": "def remove(self, layers):\n        \n        if not isinstance(layers, list):\n            layers = [layers]\n        for l in layers:\n            if isinstance(l, string_types):\n                if l not in self.layers:\n                    raise ValueError(\"There's no image/layer named '%s' in \"\n                                     \"the masking stack!\" % l)\n                self.stack.remove(l)\n            else:\n                l = self.stack.pop(l)\n            del self.layers[l]\n\n        self.set_mask()", "docstring": "Remove one or more layers from the stack of masking layers.\nArgs:\nlayers: An int, string or list of strings and/or ints. Ints are\ninterpreted as indices in the stack to remove; strings are\ninterpreted as names of layers to remove. Negative ints will\nalso work--i.e., remove(-1) will drop the last layer added.", "source": "juraj-google-style"}
{"code": "def _FormatMessages(self, format_string, short_format_string, event_values):\n    \n    message_string = self._FormatMessage(format_string, event_values)\n\n    if short_format_string:\n      short_message_string = self._FormatMessage(\n          short_format_string, event_values)\n    else:\n      short_message_string = message_string\n\n    \n    if len(short_message_string) > 80:\n      short_message_string = '{0:s}...'.format(short_message_string[:77])\n\n    return message_string, short_message_string", "docstring": "Determines the formatted message strings.\n\nArgs:\nformat_string (str): message format string.\nshort_format_string (str): short message format string.\nevent_values (dict[str, object]): event values.\n\nReturns:\ntuple(str, str): formatted message string and short message string.", "source": "juraj-google-style"}
{"code": "def _plot_cwt(ts, coefs, freqs, tsize=1024, fsize=512):\n    \n    import matplotlib.style\n    import matplotlib as mpl\n    mpl.style.use('classic')\n    import matplotlib.pyplot as plt\n    from scipy import interpolate\n    channels = ts.shape[1]\n    fig = plt.figure()\n    for i in range(channels):\n        rect = (0.1, 0.85*(channels - i - 1)/channels + 0.1, \n                0.8, 0.85/channels)\n        ax = fig.add_axes(rect)\n        logpowers = np.log((coefs[:, :, i] * coefs[:, :, i].conj()).real)\n        tmin, tmax = ts.tspan[0], ts.tspan[-1]\n        fmin, fmax = freqs[0], freqs[-1]\n        tgrid, fgrid = np.mgrid[tmin:tmax:tsize*1j, fmin:fmax:fsize*1j]\n        gd = interpolate.interpn((ts.tspan, freqs), logpowers, \n                                 (tgrid, fgrid)).T\n        ax.imshow(gd, cmap='gnuplot2', aspect='auto', origin='lower',\n                   extent=(tmin, tmax, fmin, fmax))\n        ax.set_ylabel('freq (Hz)')\n    fig.axes[0].set_title(u'log(power spectral density)')\n    fig.axes[channels - 1].set_xlabel('time (s)')\n    fig.show()", "docstring": "Plot time resolved power spectral density from cwt results\nArgs:\nts: the original Timeseries\ncoefs:  continuous wavelet transform coefficients as calculated by cwt()\nfreqs: list of frequencies (in Hz) corresponding to coefs.\ntsize, fsize: size of the plot (time axis and frequency axis, in pixels)", "source": "juraj-google-style"}
{"code": "def init_from_adversarial_batches(self, adv_batches):\n    \n    for idx, (adv_batch_id, adv_batch_val) in enumerate(iteritems(adv_batches)):\n      work_id = ATTACK_WORK_ID_PATTERN.format(idx)\n      self.work[work_id] = {\n          'claimed_worker_id': None,\n          'claimed_worker_start_time': None,\n          'is_completed': False,\n          'error': None,\n          'elapsed_time': None,\n          'submission_id': adv_batch_val['submission_id'],\n          'shard_id': None,\n          'output_adversarial_batch_id': adv_batch_id,\n      }", "docstring": "Initializes work pieces from adversarial batches.\n\nArgs:\nadv_batches: dict with adversarial batches,\ncould be obtained as AversarialBatches.data", "source": "juraj-google-style"}
{"code": "def mark_causative(self, institute, case, user, link, variant):\n        \n        display_name = variant['display_name']\n        LOG.info(\"Mark variant {0} as causative in the case {1}\".format(\n            display_name, case['display_name']))\n\n        LOG.info(\"Adding variant to causatives in case {0}\".format(\n            case['display_name']))\n\n        LOG.info(\"Marking case {0} as solved\".format(\n            case['display_name']))\n\n        updated_case = self.case_collection.find_one_and_update(\n            {'_id': case['_id']},\n            {\n                '$push': {'causatives': variant['_id']},\n                '$set': {'status': 'solved'}\n            },\n            return_document=pymongo.ReturnDocument.AFTER\n        )\n\n        LOG.info(\"Creating case event for marking {0}\" \\\n                    \" causative\".format(variant['display_name']))\n\n        self.create_event(\n            institute=institute,\n            case=case,\n            user=user,\n            link=link,\n            category='case',\n            verb='mark_causative',\n            variant=variant,\n            subject=variant['display_name'],\n        )\n\n        LOG.info(\"Creating variant event for marking {0}\" \\\n                    \" causative\".format(case['display_name']))\n\n        self.create_event(\n            institute=institute,\n            case=case,\n            user=user,\n            link=link,\n            category='variant',\n            verb='mark_causative',\n            variant=variant,\n            subject=variant['display_name'],\n        )\n        return updated_case", "docstring": "Create an event for marking a variant causative.\n\nArguments:\ninstitute (dict): A Institute object\ncase (dict): Case object\nuser (dict): A User object\nlink (str): The url to be used in the event\nvariant (variant): A variant object\n\nReturns:\nupdated_case(dict)", "source": "juraj-google-style"}
{"code": "def noisy_moments(self, moments: 'Iterable[cirq.Moment]', system_qubits: Sequence['cirq.Qid']) -> Sequence['cirq.OP_TREE']:\n    if (not hasattr(self.noisy_moment, '_not_overridden')):\n        result = []\n        for moment in moments:\n            result.append(self.noisy_moment(moment, system_qubits))\n        return result\n    if (not hasattr(self.noisy_operation, '_not_overridden')):\n        result = []\n        for moment in moments:\n            result.append([self.noisy_operation(op) for op in moment])\n        return result\n    assert False, 'Should be unreachable.'", "docstring": "Adds possibly stateful noise to a series of moments.\n\nArgs:\nmoments: The moments to add noise to.\nsystem_qubits: A list of all qubits in the system.\n\nReturns:\nA sequence of OP_TREEs, with the k'th tree corresponding to the\nnoisy operations for the k'th moment.", "source": "codesearchnet"}
{"code": "def market_open(self, session, mins) -> Session:\n        \n        if session not in self.exch: return SessNA\n        start_time = self.exch[session][0]\n        return Session(start_time, shift_time(start_time, int(mins)))", "docstring": "Time intervals for market open\n\nArgs:\nsession: [allday, day, am, pm, night]\nmins: mintues after open\n\nReturns:\nSession of start_time and end_time", "source": "juraj-google-style"}
{"code": "def sg_sum(tensor, opt):\n    r\n    return tf.reduce_sum(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)", "docstring": "r\"\"\"Computes the sum of elements across axis of a tensor.\n\nSee `tf.reduce_sum()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` with zero-padding (automatically given by chain).\nopt:\naxis: A tuple/list of integers or an integer. The axis to reduce.\nkeep_dims: If true, retains reduced dimensions with length 1.\nname: If provided, replace current tensor's name.\n\nReturns:\nA `Tensor`.", "source": "juraj-google-style"}
{"code": "def FullTransactions(self):\n    is_trimmed = False\n    try:\n        tx = self.Transactions[0]\n        if (type(tx) is str):\n            is_trimmed = True\n    except Exception as e:\n        pass\n    if (not is_trimmed):\n        return self.Transactions\n    txs = []\n    for hash in self.Transactions:\n        (tx, height) = GetBlockchain().GetTransaction(hash)\n        txs.append(tx)\n    self.Transactions = txs\n    return self.Transactions", "docstring": "Get the list of full Transaction objects.\n\nNote: Transactions can be trimmed to contain only the header and the hash. This will get the full data if\ntrimmed transactions are found.\n\nReturns:\nlist: of neo.Core.TX.Transaction.Transaction objects.", "source": "codesearchnet"}
{"code": "def get_camera_imageseries(self, number_of_imageseries=10, offset=0):\n        \n        response = None\n        try:\n            response = requests.get(\n                urls.get_imageseries(self._giid),\n                headers={\n                    'Accept': 'application/json, text/javascript, */*; q=0.01',\n                    'Cookie': 'vid={}'.format(self._vid)},\n                params={\n                    \"numberOfImageSeries\": int(number_of_imageseries),\n                    \"offset\": int(offset),\n                    \"fromDate\": \"\",\n                    \"toDate\": \"\",\n                    \"onlyNotViewed\": \"\",\n                    \"_\": self._giid})\n        except requests.exceptions.RequestException as ex:\n            raise RequestError(ex)\n        _validate_response(response)\n        return json.loads(response.text)", "docstring": "Get smartcam image series\n\nArgs:\nnumber_of_imageseries (int): number of image series to get\noffset (int): skip offset amount of image series", "source": "juraj-google-style"}
{"code": "def on_graph_def(self, graph_def, device_name, wall_time):\n    raise NotImplementedError('on_graph_def() is not implemented in the base servicer class')", "docstring": "Callback for Event proto received through the gRPC stream.\n\nThis Event proto carries a GraphDef, encoded as bytes, in its graph_def\nfield.\n\nArgs:\ngraph_def: A GraphDef object.\ndevice_name: Name of the device on which the graph was created.\nwall_time: An epoch timestamp (in microseconds) for the graph.\n\nReturns:\n`None` or an `EventReply` proto to be sent back to the client. If `None`,\nan `EventReply` proto construct with the default no-arg constructor will\nbe sent back to the client.", "source": "github-repos"}
{"code": "def get_plot(self, xlim=None, ylim=None):\n        \n\n        plt = pretty_plot(12, 8)\n        base = 0.0\n        i = 0\n        for key, sp in self._spectra.items():\n            if not self.stack:\n                plt.plot(sp.x, sp.y + self.yshift * i, color=self.colors[i],\n                         label=str(key), linewidth=3)\n            else:\n                plt.fill_between(sp.x, base, sp.y + self.yshift * i,\n                                 color=self.colors[i],\n                                 label=str(key), linewidth=3)\n                base = sp.y + base\n            plt.xlabel(sp.XLABEL)\n            plt.ylabel(sp.YLABEL)\n            i += 1\n\n        if xlim:\n            plt.xlim(xlim)\n        if ylim:\n            plt.ylim(ylim)\n\n        plt.legend()\n        leg = plt.gca().get_legend()\n        ltext = leg.get_texts()  \n        plt.setp(ltext, fontsize=30)\n        plt.tight_layout()\n        return plt", "docstring": "Get a matplotlib plot showing the DOS.\n\nArgs:\nxlim: Specifies the x-axis limits. Set to None for automatic\ndetermination.\nylim: Specifies the y-axis limits.", "source": "juraj-google-style"}
{"code": "def getEstTraitCovar(self, term_i=None):\n    assert (self.P > 1), 'Trait covars not defined for single trait analysis'\n    if (term_i == None):\n        RV = SP.zeros((self.P, self.P))\n        for term_i in range(self.n_terms):\n            RV += self.vd.getTerm(term_i).getTraitCovar().K()\n    else:\n        assert (term_i < self.n_terms), 'Term index non valid'\n        RV = self.vd.getTerm(term_i).getTraitCovar().K()\n    return RV", "docstring": "Returns explicitly the estimated trait covariance matrix\n\nArgs:\nterm_i:     index of the term we are interested in", "source": "codesearchnet"}
{"code": "def is_displayed(target):\n    is_displayed = getattr(target, 'is_displayed', None)\n    if ((not is_displayed) or (not callable(is_displayed))):\n        raise TypeError(\"Target has no attribute 'is_displayed' or not callable\")\n    if (not is_displayed()):\n        raise WebDriverException('element not visible')", "docstring": "Assert whether the target is displayed\n\nArgs:\ntarget(WebElement): WebElement Object.\n\nReturns:\nReturn True if the element is displayed or return False otherwise.", "source": "codesearchnet"}
{"code": "def read_uint64(self, little_endian=True):\n        \n        if little_endian:\n            endian = \"<\"\n        else:\n            endian = \">\"\n        return self.unpack('%sQ' % endian, 8)", "docstring": "Read 8 bytes as an unsigned integer value from the stream.\n\nArgs:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint:", "source": "juraj-google-style"}
{"code": "def get_crypt_class(self):\n    crypt_type = getattr(settings, 'ENCRYPTED_FIELD_MODE', 'DECRYPT_AND_ENCRYPT')\n    if (crypt_type == 'ENCRYPT'):\n        crypt_class_name = 'Encrypter'\n    elif (crypt_type == 'DECRYPT_AND_ENCRYPT'):\n        crypt_class_name = 'Crypter'\n    else:\n        raise ImproperlyConfigured(('ENCRYPTED_FIELD_MODE must be either DECRYPT_AND_ENCRYPT or ENCRYPT, not %s.' % crypt_type))\n    return getattr(keyczar, crypt_class_name)", "docstring": "Get the Keyczar class to use.\n\nThe class can be customized with the ENCRYPTED_FIELD_MODE setting. By default,\nthis setting is DECRYPT_AND_ENCRYPT. Set this to ENCRYPT to disable decryption.\nThis is necessary if you are only providing public keys to Keyczar.\n\nReturns:\nkeyczar.Encrypter if ENCRYPTED_FIELD_MODE is ENCRYPT.\nkeyczar.Crypter if ENCRYPTED_FIELD_MODE is DECRYPT_AND_ENCRYPT.\n\nOverride this method to customize the type of Keyczar class returned.", "source": "codesearchnet"}
{"code": "async def pull(\n        self,\n        from_image: str,\n        *,\n        auth: Optional[Union[MutableMapping, str, bytes]] = None,\n        tag: str = None,\n        repo: str = None,\n        stream: bool = False\n    ) -> Mapping:\n        \n        image = from_image  \n        params = {\"fromImage\": image}\n        headers = {}\n        if repo:\n            params[\"repo\"] = repo\n        if tag:\n            params[\"tag\"] = tag\n        if auth is not None:\n            registry, has_registry_host, _ = image.partition(\"/\")\n            if not has_registry_host:\n                raise ValueError(\n                    \"Image should have registry host \"\n                    \"when auth information is provided\"\n                )\n            \n            headers[\"X-Registry-Auth\"] = compose_auth_header(auth, registry)\n        response = await self.docker._query(\n            \"images/create\", \"POST\", params=params, headers=headers\n        )\n        return await json_stream_result(response, stream=stream)", "docstring": "Similar to `docker pull`, pull an image locally\n\nArgs:\nfromImage: name of the image to pull\nrepo: repository name given to an image when it is imported\ntag: if empty when pulling an image all tags\nfor the given image to be pulled\nauth: special {'auth': base64} pull private repo", "source": "juraj-google-style"}
{"code": "def __init__(self, devices=None, cross_device_ops=None, *, mesh=None):\n    self._validate_init_args(mesh, devices)\n    if not mesh:\n        mesh = self._build_mesh_from_device_list(devices)\n    extended = dtensor_strategy_extended.DTensorStrategyExtended(container_strategy=self, mesh=mesh)\n    super().__init__(extended)\n    self._mesh = mesh\n    self._devices = devices", "docstring": "Synchronous training across multiple replicas on one machine.\n\nArgs:\ndevices: a list of device strings, such as ['/gpu:0', '/gpu:1']. If both\n`mesh` and `devices` are None, all the available GPU/TPU will be used.\nIf no accelerators are found, CPU is used.\ncross_device_ops: optional, a descendant of `CrossDeviceOps`. The value is\nignored at the moment, and support will be added later.\nmesh: optional DTensor mesh for the computation. Note that either `mesh`\nor `devices` should be provided, and not both. The mesh should be 1D,\nand will be used to split the input data among that dimension.", "source": "github-repos"}
{"code": "def invert_dict(d):\n    inverted = collections.defaultdict(list)\n    for key, value_list in d.items():\n        for val in value_list:\n            inverted[val].append(key)\n    return inverted", "docstring": "Invert a dictionary.\n\nConverts a dictionary (mapping strings to lists of strings) to a dictionary\nthat maps into the other direction.\n\nArguments:\nd: Dictionary to be inverted\n\nReturns:\nA dictionary n with the property that if \"y in d[x]\", then \"x in n[y]\".", "source": "github-repos"}
{"code": "def _parse_date(dataset_date, date_format):\n        \n        \n        if date_format is None:\n            try:\n                return parser.parse(dataset_date)\n            except (ValueError, OverflowError) as e:\n                raisefrom(HDXError, 'Invalid dataset date!', e)\n        else:\n            try:\n                return datetime.strptime(dataset_date, date_format)\n            except ValueError as e:\n                raisefrom(HDXError, 'Invalid dataset date!', e)", "docstring": "Parse dataset date from string using specified format. If no format is supplied, the function will guess.\nFor unambiguous formats, this should be fine.\n\nArgs:\ndataset_date (str): Dataset date string\ndate_format (Optional[str]): Date format. If None is given, will attempt to guess. Defaults to None.\n\nReturns:\ndatetime.datetime", "source": "juraj-google-style"}
{"code": "def phase_histogram(dts, times=None, nbins=30, colormap=mpl.cm.Blues):\n    \n    if times is None:\n        times = np.linspace(dts.tspan[0], dts.tspan[-1], num=4)\n    elif isinstance(times, numbers.Number):\n        times = np.array([times], dtype=np.float64)\n    indices = distob.gather(dts.tspan.searchsorted(times))\n    if indices[-1] == len(dts.tspan):\n        indices[-1] -= 1\n    nplots = len(indices)\n    fig = plt.figure()\n    n = np.zeros((nbins, nplots))\n    for i in range(nplots):\n        index = indices[i]\n        time = dts.tspan[index]\n        phases = distob.gather(dts.mod2pi()[index, 0, :])\n        ax = fig.add_subplot(1, nplots, i + 1, projection='polar')\n        n[:,i], bins, patches = ax.hist(phases, nbins, (-np.pi, np.pi), \n                                        density=True, histtype='bar')\n        ax.set_title('time = %d s' % time)\n        ax.set_xticklabels(['0', r'$\\frac{\\pi}{4}$', r'$\\frac{\\pi}{2}$', \n                            r'$\\frac{3\\pi}{4}$', r'$\\pi$', r'$\\frac{-3\\pi}{4}$',\n                            r'$\\frac{-\\pi}{2}$', r'$\\frac{-\\pi}{4}$'])\n    nmin, nmax = n.min(), n.max()\n    \n    norm = mpl.colors.Normalize(1.2*nmin - 0.2*nmax, \n                                0.6*nmin + 0.4*nmax, clip=True)\n    for i in range(nplots):\n        ax = fig.get_axes()[i]\n        ax.set_ylim(0, nmax)\n        for this_n, thispatch in zip(n[:,i], ax.patches):\n            color = colormap(norm(this_n))\n            thispatch.set_facecolor(color)\n            thispatch.set_edgecolor(color)\n    fig.show()", "docstring": "Plot a polar histogram of a phase variable's probability distribution\nArgs:\ndts: DistTimeseries with axis 2 ranging over separate instances of an\noscillator (time series values are assumed to represent an angle)\ntimes (float or sequence of floats): The target times at which\nto plot the distribution\nnbins (int): number of histogram bins\ncolormap", "source": "juraj-google-style"}
{"code": "def _LogForwardedIpChanges(self, configured, desired, to_add, to_remove, interface):\n    if ((not to_add) and (not to_remove)):\n        return\n    self.logger.info('Changing %s IPs from %s to %s by adding %s and removing %s.', interface, (configured or None), (desired or None), (to_add or None), (to_remove or None))", "docstring": "Log the planned IP address changes.\n\nArgs:\nconfigured: list, the IP address strings already configured.\ndesired: list, the IP address strings that will be configured.\nto_add: list, the forwarded IP address strings to configure.\nto_remove: list, the forwarded IP address strings to delete.\ninterface: string, the output device to modify.", "source": "codesearchnet"}
{"code": "def update_pipeline_and_auto_class_table(table: Dict[str, Tuple[str, str]]) -> Dict[str, Tuple[str, str]]:\n    auto_modules = [transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto]\n    for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:\n        model_mappings = [model_mapping, f'TF_{model_mapping}', f'FLAX_{model_mapping}']\n        auto_classes = [auto_class, f'TF_{auto_class}', f'Flax_{auto_class}']\n        for module, cls, mapping in zip(auto_modules, auto_classes, model_mappings):\n            if not hasattr(module, mapping):\n                continue\n            model_names = []\n            for name in getattr(module, mapping).values():\n                if isinstance(name, str):\n                    model_names.append(name)\n                else:\n                    model_names.extend(list(name))\n            table.update(dict.fromkeys(model_names, (pipeline_tag, cls)))\n    return table", "docstring": "Update the table mapping models to pipelines and auto classes without removing old keys if they don't exist anymore.\n\nArgs:\ntable (`Dict[str, Tuple[str, str]]`):\nThe existing table mapping model names to a tuple containing the pipeline tag and the auto-class name with\nwhich they should be used.\n\nReturns:\n`Dict[str, Tuple[str, str]]`: The updated table in the same format.", "source": "github-repos"}
{"code": "def fit(self, col):\n    dates = self.safe_datetime_cast(col)\n    self.default_val = (dates.groupby(dates).count().index[0].timestamp() * 1000000000.0)", "docstring": "Prepare the transformer to convert data.\n\nArgs:\ncol(pandas.DataFrame): Data to transform.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def ParseNumericOption(self, options, name, base=10, default_value=None):\n    numeric_value = getattr(options, name, None)\n    if (not numeric_value):\n        return default_value\n    try:\n        return int(numeric_value, base)\n    except (TypeError, ValueError):\n        name = name.replace('_', ' ')\n        raise errors.BadConfigOption('Unsupported numeric value {0:s}: {1!s}.'.format(name, numeric_value))", "docstring": "Parses a numeric option.\n\nIf the option is not set the default value is returned.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\nname (str): name of the numeric option.\nbase (Optional[int]): base of the numeric value.\ndefault_value (Optional[object]): default value.\n\nReturns:\nint: numeric value.\n\nRaises:\nBadConfigOption: if the options are invalid.", "source": "codesearchnet"}
{"code": "def get_caching_key(self, user_context):\n    raise NotImplementedError('subclasses must override this')", "docstring": "Returns a unique key to use for caching.\n\nSubclasses must override this.\n\nCalls made to `transform_function` with functions that have the same code\nobject and caching key will return a cached instance on subsequent\ninvocations.\n\nArgs:\nuser_context: The context object which was passed to `transform`.\n\nReturns:\nextra_locals: A hashable.", "source": "github-repos"}
{"code": "def __convertLongToString(self, iValue):\n        \n        string = ''\n        strValue = str(hex(iValue))\n\n        string = strValue.lstrip('0x')\n        string = string.rstrip('L')\n\n        return string", "docstring": "convert a long hex integer to string\nremove '0x' and 'L' return string\n\nArgs:\niValue: long integer in hex format\n\nReturns:\nstring of this long integer without \"0x\" and \"L\"", "source": "juraj-google-style"}
{"code": "def ToName(param_type):\n    \n    items = inspect.getmembers(ContractParameterType)\n\n    if type(param_type) is bytes:\n        param_type = int.from_bytes(param_type, 'little')\n\n    for item in items:\n        name = item[0]\n        val = int(item[1].value)\n\n        if val == param_type:\n            return name\n\n    return None", "docstring": "Gets the name of a ContractParameterType based on its value\nArgs:\nparam_type (ContractParameterType): type to get the name of\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def _query(self, url, xpath):\n    return self.session.query(CachedRequest).filter((CachedRequest.url == url)).filter((CachedRequest.xpath == xpath))", "docstring": "Base query for an url and xpath\n\nArgs:\nurl (str): URL to search\nxpath (str): xpath to search (may be ``None``)", "source": "codesearchnet"}
{"code": "def MapByteStream(\n      self, byte_stream, byte_offset=0, context=None, **unused_kwargs):\n    \n    data_type_size = self._data_type_definition.GetByteSize()\n    self._CheckByteStreamSize(byte_stream, byte_offset, data_type_size)\n\n    try:\n      if self._byte_order == definitions.BYTE_ORDER_BIG_ENDIAN:\n        mapped_value = uuid.UUID(\n            bytes=byte_stream[byte_offset:byte_offset + 16])\n      elif self._byte_order == definitions.BYTE_ORDER_LITTLE_ENDIAN:\n        mapped_value = uuid.UUID(\n            bytes_le=byte_stream[byte_offset:byte_offset + 16])\n\n    except Exception as exception:\n      error_string = (\n          'Unable to read: {0:s} from byte stream at offset: {1:d} '\n          'with error: {2!s}').format(\n              self._data_type_definition.name, byte_offset, exception)\n      raise errors.MappingError(error_string)\n\n    if context:\n      context.byte_size = data_type_size\n\n    return mapped_value", "docstring": "Maps the data type on a byte stream.\n\nArgs:\nbyte_stream (bytes): byte stream.\nbyte_offset (Optional[int]): offset into the byte stream where to start.\ncontext (Optional[DataTypeMapContext]): data type map context.\n\nReturns:\nuuid.UUID: mapped value.\n\nRaises:\nMappingError: if the data type definition cannot be mapped on\nthe byte stream.", "source": "juraj-google-style"}
{"code": "def firmware_version(self):\n        \n        namespace = \"urn:brocade.com:mgmt:brocade-firmware-ext\"\n\n        request_ver = ET.Element(\"show-firmware-version\", xmlns=namespace)\n\n        ver = self._callback(request_ver, handler='get')\n        return ver.find('.", "docstring": "Returns firmware version.\n\nArgs:\nNone\n\nReturns:\nDictionary\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def preprocess_input(x, data_format=None):\n    return x", "docstring": "A placeholder method for backward compatibility.\n\nThe preprocessing logic has been included in the efficientnet model\nimplementation. Users are no longer required to call this method to\nnormalize the input data. This method does nothing and only kept as a\nplaceholder to align the API surface between old and new version of model.\n\nArgs:\nx: A floating point `numpy.array` or a tensor.\ndata_format: Optional data format of the image tensor/array. `None`\nmeans the global setting `keras.backend.image_data_format()`\nis used (unless you changed it, it uses `\"channels_last\"`).\nDefaults to `None`.\n\nReturns:\nUnchanged `numpy.array` or tensor.", "source": "github-repos"}
{"code": "def build_panel(panel_info, adapter):\n    panel_name = panel_info.get('panel_id', panel_info.get('panel_name'))\n    if (not panel_name):\n        raise KeyError('Panel has to have a id')\n    panel_obj = dict(panel_name=panel_name)\n    LOG.info('Building panel with name: {0}'.format(panel_name))\n    try:\n        institute_id = panel_info['institute']\n    except KeyError as err:\n        raise KeyError('Panel has to have a institute')\n    if (adapter.institute(institute_id) is None):\n        raise IntegrityError(('Institute %s could not be found' % institute_id))\n    panel_obj['institute'] = panel_info['institute']\n    panel_obj['version'] = float(panel_info['version'])\n    try:\n        panel_obj['date'] = panel_info['date']\n    except KeyError as err:\n        raise KeyError('Panel has to have a date')\n    panel_obj['display_name'] = panel_info.get('display_name', panel_obj['panel_name'])\n    gene_objs = []\n    fail = False\n    for gene_info in panel_info.get('genes', []):\n        try:\n            gene_obj = build_gene(gene_info, adapter)\n            gene_objs.append(gene_obj)\n        except IntegrityError as err:\n            LOG.warning(err)\n            fail = True\n    if fail:\n        raise IntegrityError('Some genes did not exist in database. Please see log messages.')\n    panel_obj['genes'] = gene_objs\n    return panel_obj", "docstring": "Build a gene_panel object\n\nArgs:\npanel_info(dict): A dictionary with panel information\nadapter (scout.adapter.MongoAdapter)\n\nReturns:\npanel_obj(dict)\n\ngene_panel = dict(\npanel_id = str, # required\ninstitute = str, # institute_id, required\nversion = float, # required\ndate = datetime, # required\ndisplay_name = str, # default is panel_name\ngenes = list, # list of panel genes, sorted on panel_gene['symbol']\n)", "source": "codesearchnet"}
{"code": "def run(self, dag):\n        \n        for node in dag.threeQ_or_more_gates():\n            \n            rule = node.op.definition\n            if not rule:\n                raise QiskitError(\"Cannot unroll all 3q or more gates. \"\n                                  \"No rule to expand instruction %s.\" %\n                                  node.op.name)\n\n            \n            \n            decomposition = DAGCircuit()\n            decomposition.add_qreg(rule[0][1][0][0])\n            for inst in rule:\n                decomposition.apply_operation_back(*inst)\n            decomposition = self.run(decomposition)  \n            dag.substitute_node_with_dag(node, decomposition)\n        return dag", "docstring": "Expand 3+ qubit gates using their decomposition rules.\n\nArgs:\ndag(DAGCircuit): input dag\nReturns:\nDAGCircuit: output dag with maximum node degrees of 2\nRaises:\nQiskitError: if a 3q+ gate is not decomposable", "source": "juraj-google-style"}
{"code": "def to_proj4(self, as_dict=False, toplevel=True):\n        \n        \n        \n        if toplevel:\n            string = \"+proj=longlat %s %s +nodef\" % (self.datum.to_proj4(), self.prime_mer.to_proj4())\n        else:\n            string = \"%s %s\" % (self.datum.to_proj4(), self.prime_mer.to_proj4())\n        if as_dict:\n            return dict([\n                        entry.lstrip('+').split('=')\n                        for entry in string.split()\n                        if entry != \"+no_defs\"\n                         ])\n        else:\n            return string", "docstring": "Returns the CS as a proj4 formatted string or dict.\n\nArguments:\n\n- **as_dict** (optional): If True, returns the proj4 string as a dict (defaults to False).\n- **toplevel** (optional): If True, treats this CS as the final toplevel CS and adds the necessary proj4 elements (defaults to True).", "source": "juraj-google-style"}
{"code": "def right_shift_blockwise(x, query_shape, name=None):\n    with tf.variable_scope(name, default_name='right_shift_blockwise', values=[x]):\n        x_list_shape = x.get_shape().as_list()\n        x_shape = common_layers.shape_list(x)\n        x = tf.expand_dims(x, axis=1)\n        x = pad_to_multiple_2d(x, query_shape)\n        padded_x_shape = common_layers.shape_list(x)\n        x_indices = gather_indices_2d(x, query_shape, query_shape)\n        x_new = get_shifted_center_blocks(x, x_indices)\n        output = scatter_blocks_2d(x_new, x_indices, padded_x_shape)\n        output = tf.squeeze(output, axis=1)\n        output = tf.slice(output, [0, 0, 0, 0], [(- 1), x_shape[1], x_shape[2], (- 1)])\n        output.set_shape(x_list_shape)\n        return output", "docstring": "Right shifts once in every block.\n\nArgs:\nx: a tensor of shape [batch, height, width, depth]\nquery_shape: A 2d tuple of ints\nname: a string\n\nReturns:\noutput: a tensor of the same shape as x", "source": "codesearchnet"}
{"code": "def get_service_name(*args):\n    raw_services = _get_services()\n    services = dict()\n    for raw_service in raw_services:\n        if args:\n            if ((raw_service['DisplayName'] in args) or (raw_service['ServiceName'] in args) or (raw_service['ServiceName'].lower() in args)):\n                services[raw_service['DisplayName']] = raw_service['ServiceName']\n        else:\n            services[raw_service['DisplayName']] = raw_service['ServiceName']\n    return services", "docstring": "The Display Name is what is displayed in Windows when services.msc is\nexecuted.  Each Display Name has an associated Service Name which is the\nactual name of the service.  This function allows you to discover the\nService Name by returning a dictionary of Display Names and Service Names,\nor filter by adding arguments of Display Names.\n\nIf no args are passed, return a dict of all services where the keys are the\nservice Display Names and the values are the Service Names.\n\nIf arguments are passed, create a dict of Display Names and Service Names\n\nReturns:\ndict: A dictionary of display names and service names\n\nCLI Examples:\n\n.. code-block:: bash\n\nsalt '*' service.get_service_name\nsalt '*' service.get_service_name 'Google Update Service (gupdate)' 'DHCP Client'", "source": "codesearchnet"}
{"code": "class MaxPooling1D(keras_layers.MaxPooling1D, base.Layer):\n\n    def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs):\n        if strides is None:\n            raise ValueError('Argument `strides` must not be None.')\n        super(MaxPooling1D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)", "docstring": "Max Pooling layer for 1D inputs.\n\nArgs:\npool_size: An integer or tuple/list of a single integer,\nrepresenting the size of the pooling window.\nstrides: An integer or tuple/list of a single integer, specifying the\nstrides of the pooling operation.\npadding: A string. The padding method, either 'valid' or 'same'.\nCase-insensitive.\ndata_format: A string, one of `channels_last` (default) or `channels_first`.\nThe ordering of the dimensions in the inputs.\n`channels_last` corresponds to inputs with shape\n`(batch, length, channels)` while `channels_first` corresponds to\ninputs with shape `(batch, channels, length)`.\nname: A string, the name of the layer.", "source": "github-repos"}
{"code": "def hash_stream(self, url):\n    md5hash = hashlib.md5()\n    try:\n        for chunk in self.response.iter_content(chunk_size=10240):\n            if chunk:\n                md5hash.update(chunk)\n        return md5hash.hexdigest()\n    except Exception as e:\n        raisefrom(DownloadError, ('Download of %s failed in retrieval of stream!' % url), e)", "docstring": "Stream file from url and hash it using MD5. Must call setup method first.\n\nArgs:\nurl (str): URL to download\n\nReturns:\nstr: MD5 hash of file", "source": "codesearchnet"}
{"code": "def _get_arg_parser(func, types, args_and_defaults, delimiter_chars):\n    _LOG.debug(\"Creating ArgumentParser for '%s'\", func.__name__)\n    (description, arg_help) = _prepare_doc(func, [x for (x, _) in args_and_defaults], delimiter_chars)\n    parser = argparse.ArgumentParser(description=description)\n    for ((arg, default), arg_type) in zip_longest(args_and_defaults, types):\n        help_msg = arg_help[arg]\n        if (default is NoDefault):\n            arg_type = (arg_type or identity_type)\n            if (arg_type == bool):\n                _LOG.debug('Adding optional flag %s.%s', func.__name__, arg)\n                parser.add_argument(('--%s' % arg), default=True, required=False, action='store_false', help=('%s. Defaults to True if not specified' % help_msg))\n            else:\n                _LOG.debug('Adding positional argument %s.%s', func.__name__, arg)\n                parser.add_argument(arg, help=help_msg, type=arg_type)\n        else:\n            if ((default is None) and (arg_type is None)):\n                raise ParseThisError(\"To use default value of 'None' you need to specify the type of the argument '{}' for the method '{}'\".format(arg, func.__name__))\n            arg_type = (arg_type or type(default))\n            if (arg_type == bool):\n                action = ('store_false' if default else 'store_true')\n                _LOG.debug('Adding optional flag %s.%s', func.__name__, arg)\n                parser.add_argument(('--%s' % arg), help=help_msg, default=default, action=action)\n            else:\n                _LOG.debug('Adding optional argument %s.%s', func.__name__, arg)\n                parser.add_argument(('--%s' % arg), help=help_msg, default=default, type=arg_type)\n    return parser", "docstring": "Return an ArgumentParser for the given function. Arguments are defined\nfrom the function arguments and their associated defaults.\n\nArgs:\nfunc: function for which we want an ArgumentParser\ntypes: types to which the command line arguments should be converted to\nargs_and_defaults: list of 2-tuples (arg_name, arg_default)\ndelimiter_chars: characters used to separate the parameters from their\nhelp message in the docstring", "source": "codesearchnet"}
{"code": "def _parse_state(self, config):\n    value = STATE_RE.search(config).group('value')\n    return dict(state=value)", "docstring": "_parse_state scans the provided configuration block and extracts\nthe vlan state value.  The config block is expected to always return\nthe vlan state config.  The return dict is inteded to be merged into\nthe response dict.\n\nArgs:\nconfig (str): The vlan configuration block from the nodes\nrunning configuration\n\nReturns:\ndict: resource dict attribute", "source": "codesearchnet"}
{"code": "def is_str(string):\n    if (sys.version_info[:2] >= (3, 0)):\n        return isinstance(string, str)\n    return isinstance(string, basestring)", "docstring": "Python 2 and 3 compatible string checker.\n\nArgs:\nstring (str | basestring): the string to check\n\nReturns:\nbool: True or False", "source": "codesearchnet"}
{"code": "def notify(self, method_name: str, *args: Any, trim_log_values: Optional[bool]=None, validate_against_schema: Optional[bool]=None, **kwargs: Any) -> Response:\n    return self.send(Notification(method_name, *args, **kwargs), trim_log_values=trim_log_values, validate_against_schema=validate_against_schema)", "docstring": "Send a JSON-RPC request, without expecting a response.\n\nArgs:\nmethod_name: The remote procedure's method name.\nargs: Positional arguments passed to the remote procedure.\nkwargs: Keyword arguments passed to the remote procedure.\ntrim_log_values: Abbreviate the log entries of requests and responses.\nvalidate_against_schema: Validate response against the JSON-RPC schema.", "source": "codesearchnet"}
{"code": "def execute_with_cancellation(op_name, num_outputs, inputs, attrs, ctx, cancellation_manager, name=None):\n    device_name = ctx.device_name\n    try:\n        ctx.ensure_initialized()\n        tensors = pywrap_tfe.TFE_Py_ExecuteCancelable(ctx._handle, device_name, op_name, inputs, attrs, cancellation_manager._impl, num_outputs)\n    except core._NotOkStatusException as e:\n        if name is not None:\n            e.message += ' name: ' + name\n        raise core._status_to_exception(e) from None\n    except TypeError as e:\n        keras_symbolic_tensors = [x for x in inputs if _is_keras_symbolic_tensor(x)]\n        if keras_symbolic_tensors:\n            raise core._SymbolicException('Inputs to eager execution function cannot be Keras symbolic tensors, but found {}'.format(keras_symbolic_tensors))\n        raise e\n    return tensors", "docstring": "Execute a TensorFlow operation.\n\nArgs:\nop_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to\nexecute.\nnum_outputs: The number of outputs of the operation to fetch. (Explicitly\nprovided instead of being inferred for performance reasons).\ninputs: A list of inputs to the operation. Each entry should be a Tensor, or\na value which can be passed to the Tensor constructor to create one.\nattrs: A tuple with alternating string attr names and attr values for this\noperation.\nctx: The value of context.context().\ncancellation_manager: a `CancellationManager` object that can be used to\ncancel the operation.\nname: Customized name for the operation.\n\nReturns:\nList of output Tensor objects. The list is empty if there are no outputs\n\nRaises:\nAn exception on error.", "source": "github-repos"}
{"code": "def make_decoder(activation, latent_size, output_shape, base_depth):\n  \n  deconv = functools.partial(\n      tf.keras.layers.Conv2DTranspose, padding=\"SAME\", activation=activation)\n  conv = functools.partial(\n      tf.keras.layers.Conv2D, padding=\"SAME\", activation=activation)\n\n  decoder_net = tf.keras.Sequential([\n      deconv(2 * base_depth, 7, padding=\"VALID\"),\n      deconv(2 * base_depth, 5),\n      deconv(2 * base_depth, 5, 2),\n      deconv(base_depth, 5),\n      deconv(base_depth, 5, 2),\n      deconv(base_depth, 5),\n      conv(output_shape[-1], 5, activation=None),\n  ])\n\n  def decoder(codes):\n    original_shape = tf.shape(input=codes)\n    \n    \n    codes = tf.reshape(codes, (-1, 1, 1, latent_size))\n    logits = decoder_net(codes)\n    logits = tf.reshape(\n        logits, shape=tf.concat([original_shape[:-1], output_shape], axis=0))\n    return tfd.Independent(tfd.Bernoulli(logits=logits),\n                           reinterpreted_batch_ndims=len(output_shape),\n                           name=\"image\")\n\n  return decoder", "docstring": "Creates the decoder function.\n\nArgs:\nactivation: Activation function in hidden layers.\nlatent_size: Dimensionality of the encoding.\noutput_shape: The output image shape.\nbase_depth: Smallest depth for a layer.\n\nReturns:\ndecoder: A `callable` mapping a `Tensor` of encodings to a\n`tfd.Distribution` instance over images.", "source": "juraj-google-style"}
{"code": "def is_type(url: str, message_or_descriptor: annotation_utils.MessageOrDescriptorBase) -> bool:\n    return annotation_utils.get_structure_definition_url(message_or_descriptor) == url", "docstring": "Returns True if message_or_descriptor has a structure definition of url.\n\nArgs:\nurl: The FHIR structure definition URL to compare against.\nmessage_or_descriptor: The Message or Descriptor to examine.\n\nReturns:\nTrue if message_or_descriptor has a structure definition equal to url.", "source": "github-repos"}
{"code": "def bulk_write(self, metrics):\n        \n        actions = []\n        index = self.get_index()\n        for metric in metrics:\n            actions.append({'index': {'_index': index, '_type': self.doc_type}})\n            actions.append(metric)\n        try:\n            self.client.bulk(actions)\n        except TransportError as exc:\n            logger.warning('bulk_write metrics %r failure %r', metrics, exc)", "docstring": "Write multiple metrics to elasticsearch in one request\n\nArgs:\nmetrics (list): data with mappings to send to elasticsearch", "source": "juraj-google-style"}
{"code": "def get_owner_emails(self, partial_owner_match=True):\n    for tag in self.tags:\n        if (tag.key.lower() == 'owner'):\n            rgx = re.compile(RGX_EMAIL_VALIDATION_PATTERN, re.I)\n            if partial_owner_match:\n                match = rgx.findall(tag.value)\n                if match:\n                    return [NotificationContact('email', email) for email in match]\n            else:\n                match = rgx.match(tag.value)\n                if match:\n                    return [NotificationContact('email', email) for email in match.groups()]\n    return None", "docstring": "Return a list of email addresses associated with the instance, based on tags\n\nReturns:\nList of email addresses if any, else None", "source": "codesearchnet"}
{"code": "class CombinePerKey(PTransformWithSideInputs):\n\n    def with_hot_key_fanout(self, fanout):\n        \n        from apache_beam.transforms.combiners import curry_combine_fn\n        if fanout is None:\n            return self\n        else:\n            return _CombinePerKeyWithHotKeyFanout(curry_combine_fn(self.fn, self.args, self.kwargs), fanout)\n\n    def display_data(self):\n        return {'combine_fn': DisplayDataItem(self.fn.__class__, label='Combine Function'), 'combine_fn_dd': self.fn}\n\n    def make_fn(self, fn, has_side_inputs):\n        self._fn_label = ptransform.label_from_callable(fn)\n        return CombineFn.maybe_from_callable(fn, has_side_inputs)\n\n    def default_label(self):\n        return '%s(%s)' % (self.__class__.__name__, self._fn_label)\n\n    def _process_argspec_fn(self):\n        return lambda element, *args, **kwargs: None\n\n    def expand(self, pcoll):\n        args, kwargs = util.insert_values_in_args(self.args, self.kwargs, self.side_inputs)\n        return pcoll | GroupByKey() | 'Combine' >> CombineValues(self.fn, *args, **kwargs)\n\n    def default_type_hints(self):\n        result = self.fn.get_type_hints()\n        k = typehints.TypeVariable('K')\n        if result.input_types:\n            args, kwargs = result.input_types\n            args = (typehints.Tuple[k, args[0]],) + args[1:]\n            result = result.with_input_types(*args, **kwargs)\n        else:\n            result = result.with_input_types(typehints.Tuple[k, typehints.Any])\n        if result.output_types:\n            main_output_type = result.simple_output_type('')\n            result = result.with_output_types(typehints.Tuple[k, main_output_type])\n        else:\n            result = result.with_output_types(typehints.Tuple[k, typehints.Any])\n        return result\n\n    def to_runner_api_parameter(self, context):\n        if self.args or self.kwargs:\n            from apache_beam.transforms.combiners import curry_combine_fn\n            combine_fn = curry_combine_fn(self.fn, self.args, self.kwargs)\n        else:\n            combine_fn = self.fn\n        return (common_urns.composites.COMBINE_PER_KEY.urn, _combine_payload(combine_fn, context))\n\n    @staticmethod\n    @PTransform.register_urn(common_urns.composites.COMBINE_PER_KEY.urn, beam_runner_api_pb2.CombinePayload)\n    def from_runner_api_parameter(unused_ptransform, combine_payload, context):\n        return CombinePerKey(CombineFn.from_runner_api(combine_payload.combine_fn, context))\n\n    def runner_api_requires_keyed_input(self):\n        return True", "docstring": "A per-key Combine transform.\n\nIdentifies sets of values associated with the same key in the input\nPCollection, then applies a CombineFn to condense those sets to single\nvalues. See documentation in CombineFn for details on the specifics on how\nCombineFns are applied.\n\nArgs:\npcoll: input pcollection.\nfn: instance of CombineFn to apply to all values under the same key in\npcoll, or a callable whose signature is ``f(iterable, *args, **kwargs)``\n(e.g., sum, max).\n*args: arguments and side inputs, passed directly to the CombineFn.\n**kwargs: arguments and side inputs, passed directly to the CombineFn.\n\nReturns:\nA PObject holding the result of the combine operation.", "source": "github-repos"}
{"code": "def AddEventTags(self, event_tags):\n    self._RaiseIfNotWritable()\n    for event_tag in event_tags:\n        self.AddEventTag(event_tag)", "docstring": "Adds event tags.\n\nArgs:\nevent_tags (list[EventTag]): event tags.\n\nRaises:\nIOError: when the storage file is closed or read-only or\nif the event tags cannot be serialized.\nOSError: when the storage file is closed or read-only or\nif the event tags cannot be serialized.", "source": "codesearchnet"}
{"code": "def __init__(self, sources, stacker_cache_dir=None):\n        \n        if not stacker_cache_dir:\n            stacker_cache_dir = os.path.expanduser(\"~/.stacker\")\n        package_cache_dir = os.path.join(stacker_cache_dir, 'packages')\n        self.stacker_cache_dir = stacker_cache_dir\n        self.package_cache_dir = package_cache_dir\n        self.sources = sources\n        self.configs_to_merge = []\n        self.create_cache_directories()", "docstring": "Process a config's defined package sources.\n\nArgs:\nsources (dict): Package sources from Stacker config dictionary\nstacker_cache_dir (string): Path where remote sources will be\ncached.", "source": "juraj-google-style"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        local_stream = utils.BytearrayStream()\n\n        if self._unique_identifier is not None:\n            self._unique_identifier.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self._key_format_type is not None:\n            self._key_format_type.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self._key_compression_type is not None:\n            self._key_compression_type.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n        if self._key_wrapping_specification is not None:\n            self._key_wrapping_specification.write(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        self.length = local_stream.length()\n        super(GetRequestPayload, self).write(\n            output_stream,\n            kmip_version=kmip_version\n        )\n        output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the Get request payload to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def tf_retrieve_indices(self, indices):\n    states = dict()\n    for name in sorted(self.states_memory):\n        states[name] = tf.gather(params=self.states_memory[name], indices=indices)\n    internals = dict()\n    for name in sorted(self.internals_memory):\n        internals[name] = tf.gather(params=self.internals_memory[name], indices=indices)\n    actions = dict()\n    for name in sorted(self.actions_memory):\n        actions[name] = tf.gather(params=self.actions_memory[name], indices=indices)\n    terminal = tf.gather(params=self.terminal_memory, indices=indices)\n    reward = tf.gather(params=self.reward_memory, indices=indices)\n    if self.include_next_states:\n        assert (util.rank(indices) == 1)\n        next_indices = ((indices + 1) % self.capacity)\n        next_states = dict()\n        for name in sorted(self.states_memory):\n            next_states[name] = tf.gather(params=self.states_memory[name], indices=next_indices)\n        next_internals = dict()\n        for name in sorted(self.internals_memory):\n            next_internals[name] = tf.gather(params=self.internals_memory[name], indices=next_indices)\n        return dict(states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, next_states=next_states, next_internals=next_internals)\n    else:\n        return dict(states=states, internals=internals, actions=actions, terminal=terminal, reward=reward)", "docstring": "Fetches experiences for given indices.\n\nArgs:\nindices: Index tensor\n\nReturns: Batch of experiences", "source": "codesearchnet"}
{"code": "def get_framework(model, revision: Optional[str]=None):\n    warnings.warn('`get_framework` is deprecated and will be removed in v5, use `infer_framework_from_model` instead.', FutureWarning)\n    if not is_tf_available() and (not is_torch_available()):\n        raise RuntimeError('At least one of TensorFlow 2.0 or PyTorch should be installed. To install TensorFlow 2.0, read the instructions at https:\n    if isinstance(model, str):\n        if is_torch_available() and (not is_tf_available()):\n            model = AutoModel.from_pretrained(model, revision=revision)\n        elif is_tf_available() and (not is_torch_available()):\n            model = TFAutoModel.from_pretrained(model, revision=revision)\n        else:\n            try:\n                model = AutoModel.from_pretrained(model, revision=revision)\n            except OSError:\n                model = TFAutoModel.from_pretrained(model, revision=revision)\n    framework = infer_framework(model.__class__)\n    return framework", "docstring": "Select framework (TensorFlow or PyTorch) to use.\n\nArgs:\nmodel (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel]`):\nIf both frameworks are installed, picks the one corresponding to the model passed (either a model class or\nthe model name). If no specific model is provided, defaults to using PyTorch.", "source": "github-repos"}
{"code": "def get_data(img_path):\n    \n    mean = np.array([123.68, 116.779, 103.939])  \n    img = Image.open(img_path)\n    img = np.array(img, dtype=np.float32)\n    reshaped_mean = mean.reshape(1, 1, 3)\n    img = img - reshaped_mean\n    img = np.swapaxes(img, 0, 2)\n    img = np.swapaxes(img, 1, 2)\n    img = np.expand_dims(img, axis=0)\n    return img", "docstring": "get the (1, 3, h, w) np.array data for the supplied image\nArgs:\nimg_path (string): the input image path\n\nReturns:\nnp.array: image data in a (1, 3, h, w) shape", "source": "juraj-google-style"}
{"code": "def normalize(self, text, normalizations=None):\n        \n        for normalization, kwargs in self._parse_normalizations(\n                normalizations or self._config.normalizations):\n            try:\n                text = getattr(self, normalization)(text, **kwargs)\n            except AttributeError as e:\n                self._logger.debug('Invalid normalization: %s', e)\n\n\n        return text", "docstring": "Normalize a given text applying all normalizations.\n\nNormalizations to apply can be specified through a list of\nparameters and will be executed in that order.\n\nArgs:\ntext: The text to be processed.\nnormalizations: List of normalizations to apply.\n\nReturns:\nThe text normalized.", "source": "juraj-google-style"}
{"code": "def update_configuration(self):\n    uri = '{}/configuration'.format(self.data['uri'])\n    result = self._helper.update({}, uri)\n    self.refresh()\n    return result", "docstring": "Asynchronously applies or re-applies the SAS Logical Interconnect configuration to all managed interconnects\nof a SAS Logical Interconnect.\n\nReturns:\ndict: SAS Logical Interconnect.", "source": "codesearchnet"}
{"code": "def call_fn(fn, args):\n  \n\n  if expand_as_args(args):\n    return fn(*args)\n  elif _expand_as_kwargs(args):\n    return fn(**args)\n  else:\n    return fn(args)", "docstring": "Calls `fn` with `args`, possibly expanding `args`.\n\nUse this function when calling a user-provided callable using user-provided\narguments.\n\nThe expansion rules are as follows:\n\n`fn(*args)` if `args` is a `list` or a `tuple`, but not a `namedtuple`.\n`fn(**args)` if `args` is a `dict`.\n`fn(args)` otherwise.\n\nArgs:\nfn: A callable that takes either `args` as an argument(s).\nargs: Arguments to `fn`.\n\nReturns:\nresult: Return value of `fn`.", "source": "juraj-google-style"}
{"code": "def setup(__pkg: str) -> jinja2.Environment:\n    \n    dirs = [path.join(d, 'templates')\n            for d in xdg_basedir.get_data_dirs(__pkg)]\n\n    env = jinja2.Environment(\n        autoescape=jinja2.select_autoescape(['html', 'xml']),\n        loader=jinja2.ChoiceLoader([jinja2.FileSystemLoader(s) for s in dirs]))\n    env.loader.loaders.append(jinja2.PackageLoader(__pkg, 'templates'))\n    env.filters.update(FILTERS)\n\n    return env", "docstring": "Configure a new Jinja environment with our filters.\n\nArgs:\n__pkg: Package name to use as base for templates searches\nReturns:\nConfigured Jinja environment", "source": "juraj-google-style"}
{"code": "def allele_clusters(dists, t=0.025):\n    \n    clusters = fcluster(linkage(dists), 0.025, criterion='distance')\n    cluster_idx = defaultdict(list)\n    for idx, cl in enumerate(clusters):\n        cluster_idx[cl].append(idx)\n    return cluster_idx", "docstring": "Flat clusters from distance matrix\n\nArgs:\ndists (numpy.array): pdist distance matrix\nt (float): fcluster (tree cutting) distance threshold\n\nReturns:\ndict of lists: cluster number to list of indices of distances in cluster", "source": "juraj-google-style"}
{"code": "def generate_states(state_count, process_matrix, process_covariance,\n                    initial_state=None):\n    \n    \n    process_matrix = np.atleast_2d(process_matrix)\n    process_covariance = np.atleast_2d(process_covariance)\n    state_dim = process_matrix.shape[0]\n\n    if process_matrix.shape != (state_dim, state_dim):\n        raise ValueError(\"Process matrix has inconsistent shape: {}\".format(\n            process_matrix.shape))\n\n    if process_covariance.shape != (state_dim, state_dim):\n        raise ValueError(\"Process covariance has inconsistent shape: {}\".format(\n            process_covariance.shape))\n\n    if initial_state is None:\n        initial_state = np.zeros(process_matrix.shape[0])\n\n    states = [initial_state]\n    while len(states) < state_count:\n        states.append(\n            process_matrix.dot(states[-1]) + np.random.multivariate_normal(\n                mean=np.zeros(state_dim), cov=process_covariance\n            )\n        )\n\n    return np.vstack(states)", "docstring": "Generate states by simulating a linear system with constant process matrix\nand process noise covariance.\n\nArgs:\nstate_count (int): Number of states to generate.\nprocess_matrix (array): Square array\nprocess_covariance (array): Square array specifying process noise\ncovariance.\ninitial_state (array or None): If omitted, use zero-filled vector as\ninitial state.", "source": "juraj-google-style"}
{"code": "def persist(self, status=None):\n    self._persist = (status if (type(status) is bool) else True)", "docstring": "Enables persistent mode for the current mock.\n\nReturns:\nself: current Mock instance.", "source": "codesearchnet"}
{"code": "def get_dict_definition(self, dict, get_list=False):\n    list_def_candidate = []\n    for definition_name in self.specification['definitions'].keys():\n        if self.validate_definition(definition_name, dict):\n            if (not get_list):\n                return definition_name\n            list_def_candidate.append(definition_name)\n    if get_list:\n        return list_def_candidate\n    return None", "docstring": "Get the definition name of the given dict.\n\nArgs:\ndict: dict to test.\nget_list: if set to true, return a list of definition that match the body.\nif False, only return the first.\n\nReturns:\nThe definition name or None if the dict does not match any definition.\nIf get_list is True, return a list of definition_name.", "source": "codesearchnet"}
{"code": "def call(poly, args):\n    \n    args = list(args)\n\n    \n    if len(args) < poly.dim:\n        args = args + [np.nan]*(poly.dim-len(args))\n\n    elif len(args) > poly.dim:\n        raise ValueError(\"too many arguments\")\n\n    \n    x0, x1 = [], []\n    for idx, arg in enumerate(args):\n\n        if isinstance(arg, Poly):\n            poly_ = Poly({\n                tuple(np.eye(poly.dim)[idx]): np.array(1)\n            })\n            x0.append(poly_)\n            x1.append(arg)\n            args[idx] = np.nan\n    if x0:\n        poly = call(poly, args)\n        return substitute(poly, x0, x1)\n\n    \n    masks = np.zeros(len(args), dtype=bool)\n    for idx, arg in enumerate(args):\n        if np.ma.is_masked(arg) or np.any(np.isnan(arg)):\n            masks[idx] = True\n            args[idx] = 0\n\n    shape = np.array(\n        args[\n            np.argmax(\n                [np.prod(np.array(arg).shape) for arg in args]\n            )\n        ]\n    ).shape\n    args = np.array([np.ones(shape, dtype=int)*arg for arg in args])\n\n    A = {}\n    for key in poly.keys:\n\n        key_ = np.array(key)*(1-masks)\n        val = np.outer(poly.A[key], np.prod((args.T**key_).T, \\\n                axis=0))\n        val = np.reshape(val, poly.shape + tuple(shape))\n        val = np.where(val != val, 0, val)\n\n        mkey = tuple(np.array(key)*(masks))\n        if not mkey in A:\n            A[mkey] = val\n        else:\n            A[mkey] = A[mkey] + val\n\n    out = Poly(A, poly.dim, None, None)\n    if out.keys and not np.sum(out.keys):\n        out = out.A[out.keys[0]]\n    elif not out.keys:\n        out = np.zeros(out.shape, dtype=out.dtype)\n    return out", "docstring": "Evaluate a polynomial along specified axes.\n\nArgs:\npoly (Poly):\nInput polynomial.\nargs (numpy.ndarray):\nArgument to be evaluated. Masked values keeps the variable intact.\n\nReturns:\n(Poly, numpy.ndarray):\nIf masked values are used the Poly is returned. Else an numpy array\nmatching the polynomial's shape is returned.", "source": "juraj-google-style"}
{"code": "def __init__(self, jss, data, **kwargs):\n        \n        self.jss = jss\n        if isinstance(data, basestring):\n            super(JSSObject, self).__init__(tag=self.list_type)\n            self._new(data, **kwargs)\n        elif isinstance(data, ElementTree.Element):\n            super(JSSObject, self).__init__(tag=data.tag)\n            for child in data.getchildren():\n                self.append(child)\n        else:\n            raise TypeError(\"JSSObjects data argument must be of type \"\n                            \"xml.etree.ElemenTree.Element, or a string for the\"\n                            \" name.\")", "docstring": "Initialize a new JSSObject\n\nArgs:\njss: JSS object.\ndata: xml.etree.ElementTree.Element data to use for\ncreating the object OR a string name to use for creating\na new object (provided it has an implemented _new()\nmethod.", "source": "juraj-google-style"}
{"code": "def scan_servos():\n    servos = []\n    for servo_id in range(0, 254):\n        model = get_model(servo_id)\n        if model:\n            servos += [(servo_id, model)]\n    return servos", "docstring": "Scan for the herkulex servos connected\n\nThis function will scan for all the herkulex servos connected\nto the bus.\n\nArgs:\nnone\nReturns:\nlist: a list of tuples of the form [(id, model)]", "source": "codesearchnet"}
{"code": "def GetMap(self, cache_info, data):\n    for obj in json.loads(cache_info.read()):\n        key = obj.get('Key', '')\n        value = obj.get('Value', '')\n        if not value or not key:\n            continue\n        map_entry = self._ReadEntry(key, value)\n        if map_entry is None:\n            self.log.warning('Could not create entry from line %r in cache, skipping', value)\n            continue\n        if not data.Add(map_entry):\n            self.log.warning('Could not add entry %r read from line %r in cache', map_entry, value)\n    return data", "docstring": "Returns a map from a cache.\n\nArgs:\ncache_info: file like object containing the cache.\ndata: a Map to populate.\nReturns:\nA child of Map containing the cache data.", "source": "github-repos"}
{"code": "def get_data_path(self, filename, env_prefix=None):\n        \n        if env_prefix == None:\n            target_file = filename\n        else:\n            target_file = os.path.join(env_prefix, filename)\n\n        if os.path.exists(os.path.join(self._data_path, target_file)):\n            return os.path.join(self._data_path, target_file)\n        else:\n            raise DataNotFoundError(\n                u(\"Cannot find data file: {0}\").format(target_file))", "docstring": "Get data path.\n\nArgs:\nfilename (string) : Name of file inside of /data folder to retrieve.\n\nKwargs:\nenv_prefix (string) : Name of subfolder, ex: 'qa' will find files in /data/qa\n\nReturns:\nString - path to file.\n\nUsage::\n\nopen(WTF_DATA_MANAGER.get_data_path('testdata.csv')\n\nNote: WTF_DATA_MANAGER is a provided global instance of DataManager", "source": "juraj-google-style"}
{"code": "def _convert_value(value, expected_type, path, context=_ConversionContext.VALUE):\n    assert isinstance(path, tuple)\n    if expected_type is None:\n        expected_type = _NoneType\n    if expected_type is tensor.Tensor:\n        return _convert_tensor(value, path, context)\n    elif isinstance(expected_type, type) and _issubclass(expected_type, composite_tensor.CompositeTensor):\n        return _convert_composite_tensor(value, expected_type, path, context)\n    elif expected_type is tensor_shape.TensorShape:\n        try:\n            return tensor_shape.as_shape(value)\n        except TypeError as e:\n            raise TypeError(f\"{''.join(path)}: expected 'tf.TensorShape', got {type(value).__name__!r}\") from e\n    elif expected_type is dtypes.DType:\n        try:\n            return dtypes.as_dtype(value)\n        except TypeError as e:\n            raise TypeError(f\"{''.join(path)}: expected 'tf.DType', got {type(value).__name__!r}\") from e\n    elif expected_type in (int, float, bool, str, bytes, _NoneType):\n        if not isinstance(value, expected_type):\n            raise TypeError(f'{''.join(path)}: expected {expected_type.__name__!r}, got {type(value).__name__!r}')\n        return value\n    elif type_annotations.is_generic_tuple(expected_type):\n        return _convert_tuple(value, expected_type, path, context)\n    elif type_annotations.is_generic_mapping(expected_type):\n        return _convert_mapping(value, expected_type, path, context)\n    elif type_annotations.is_generic_union(expected_type):\n        return _convert_union(value, expected_type, path, context)\n    else:\n        raise TypeError(f'{''.join(path)}: Unsupported type annotation {expected_type!r}')", "docstring": "Type-checks and converts a value.\n\nArgs:\nvalue: The value to type-check.\nexpected_type: The expected type for the value.\npath: Tuple of `str` naming the value (used for exception messages).\ncontext: _ConversionContext, indicates what kind of value we are converting.\n\nReturns:\nA copy of `value`, converted to the expected type.\n\nRaises:\nTypeError: If `value` can not be converted to the expected type.", "source": "github-repos"}
{"code": "def SetPermissions(path, mode=None, uid=None, gid=None, mkdir=False):\n  \n  if mkdir and not os.path.exists(path):\n    os.mkdir(path, mode or 0o777)\n  elif mode:\n    os.chmod(path, mode)\n  if uid and gid:\n    os.chown(path, uid, gid)\n  _SetSELinuxContext(path)", "docstring": "Set the permissions and ownership of a path.\n\nArgs:\npath: string, the path for which owner ID and group ID needs to be setup.\nmode: octal string, the permissions to set on the path.\nuid: int, the owner ID to be set for the path.\ngid: int, the group ID to be set for the path.\nmkdir: bool, True if the directory needs to be created.", "source": "juraj-google-style"}
{"code": "def ExpectingFunctionArgs(clean_lines, linenum):\n    line = clean_lines.elided[linenum]\n    return (Match('^\\\\s*MOCK_(CONST_)?METHOD\\\\d+(_T)?\\\\(', line) or ((linenum >= 2) and (Match('^\\\\s*MOCK_(?:CONST_)?METHOD\\\\d+(?:_T)?\\\\((?:\\\\S+,)?\\\\s*$', clean_lines.elided[(linenum - 1)]) or Match('^\\\\s*MOCK_(?:CONST_)?METHOD\\\\d+(?:_T)?\\\\(\\\\s*$', clean_lines.elided[(linenum - 2)]) or Search('\\\\bstd::m?function\\\\s*\\\\<\\\\s*$', clean_lines.elided[(linenum - 1)]))))", "docstring": "Checks whether where function type arguments are expected.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\n\nReturns:\nTrue if the line at 'linenum' is inside something that expects arguments\nof function types.", "source": "codesearchnet"}
{"code": "def get_config(self, key, default=MISSING):\n    keyname = ('config:' + key)\n    try:\n        return self.kvstore.get(keyname)\n    except KeyError:\n        if (default is MISSING):\n            raise ArgumentError('No config value found for key', key=key)\n        return default", "docstring": "Get the value of a persistent config key from the registry\n\nIf no default is specified and the key is not found ArgumentError is raised.\n\nArgs:\nkey (string): The key name to fetch\ndefault (string): an optional value to be returned if key cannot be found\n\nReturns:\nstring: the key's value", "source": "codesearchnet"}
{"code": "def _force_float(v):\n    \n    try:\n        return float(v)\n    except Exception as exc:\n        return float('nan')\n        logger.warning('Failed to convert {} to float with {} error. Using 0 instead.'.format(v, exc))", "docstring": "Converts given argument to float. On fail logs warning and returns 0.0.\n\nArgs:\nv (any): value to convert to float\n\nReturns:\nfloat: converted v or 0.0 if conversion failed.", "source": "juraj-google-style"}
{"code": "def __init__(self, axis=-1, validate_args=False, name=\"cumsum\"):\n    \n    if not isinstance(axis, int) or axis >= 0:\n      raise ValueError(\"`axis` must be a negative integer.\")\n    self._axis = axis\n\n    super(Cumsum, self).__init__(\n        is_constant_jacobian=True,\n        forward_min_event_ndims=-axis,  \n        validate_args=validate_args,\n        name=name)", "docstring": "Instantiates the `Cumsum` bijector.\n\nArgs:\naxis: Negative Python `int` indicating the axis along which to compute the\ncumulative sum. Note that positive (and zero) values are not supported.\nvalidate_args: Python `bool` indicating whether arguments should be\nchecked for correctness.\nname: Python `str` name given to ops managed by this object.\n\nRaises:\nValueError: If `axis` is not a negative `int`.", "source": "juraj-google-style"}
{"code": "def indicator_associations_types(\n        self, indicator_type, api_entity=None, api_branch=None, params=None\n    ):\n        \n        if params is None:\n            params = {}\n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        target = self._tcex.ti.indicator(indicator_type)\n        for at in self.tc_requests.indicator_associations_types(\n            self.api_type,\n            self.api_sub_type,\n            self.unique_id,\n            target,\n            api_entity=api_entity,\n            api_branch=api_branch,\n            owner=self.owner,\n            params=params,\n        ):\n            yield at", "docstring": "Gets the indicator association from a Indicator/Group/Victim\n\nArgs:\nindicator_type:\napi_entity:\napi_branch:\nparams:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def execute_show(args, root_dir):\n    \n    key = None\n    if args.get('key'):\n        key = args['key']\n        status = command_factory('status')({}, root_dir=root_dir)\n        if key not in status['data'] or status['data'][key]['status'] != 'running':\n            print('No running process with this key, use `log` to show finished processes.')\n            return\n\n    \n    else:\n        status = command_factory('status')({}, root_dir=root_dir)\n        if isinstance(status['data'], str):\n            print(status['data'])\n            return\n        for k in sorted(status['data'].keys()):\n            if status['data'][k]['status'] == 'running':\n                key = k\n                break\n        if key is None:\n            print('No running process, use `log` to show finished processes.')\n            return\n\n    config_dir = os.path.join(root_dir, '.config/pueue')\n    \n    stdoutFile = os.path.join(config_dir, 'pueue_process_{}.stdout'.format(key))\n    stderrFile = os.path.join(config_dir, 'pueue_process_{}.stderr'.format(key))\n    stdoutDescriptor = open(stdoutFile, 'r')\n    stderrDescriptor = open(stderrFile, 'r')\n    running = True\n    \n    if args['watch']:\n        \n        stdscr = curses.initscr()\n        curses.noecho()\n        curses.cbreak()\n        curses.curs_set(2)\n        stdscr.keypad(True)\n        stdscr.refresh()\n\n        try:\n            \n            while running:\n                stdscr.clear()\n                stdoutDescriptor.seek(0)\n                message = stdoutDescriptor.read()\n                stdscr.addstr(0, 0, message)\n                stdscr.refresh()\n                time.sleep(2)\n        except Exception:\n            \n            curses.nocbreak()\n            stdscr.keypad(False)\n            curses.echo()\n            curses.endwin()\n    else:\n        print('Stdout output:\\n')\n        stdoutDescriptor.seek(0)\n        print(get_descriptor_output(stdoutDescriptor, key))\n        print('\\n\\nStderr output:\\n')\n        stderrDescriptor.seek(0)\n        print(get_descriptor_output(stderrDescriptor, key))", "docstring": "Print stderr and stdout of the current running process.\n\nArgs:\nargs['watch'] (bool): If True, we open a curses session and tail\nthe output live in the console.\nroot_dir (string): The path to the root directory the daemon is running in.", "source": "juraj-google-style"}
{"code": "def is_macos_gfortran(f90_compiler):\n    from numpy.distutils.fcompiler import gnu\n    if (sys.platform != MAC_OS):\n        return False\n    if (not isinstance(f90_compiler, gnu.Gnu95FCompiler)):\n        return False\n    return True", "docstring": "Checks if the current build is ``gfortran`` on macOS.\n\nArgs:\nf90_compiler (numpy.distutils.fcompiler.FCompiler): A Fortran compiler\ninstance.\n\nReturns:\nbool: Only :data:`True` if\n\n* Current OS is macOS (checked via ``sys.platform``).\n* ``f90_compiler`` corresponds to ``gfortran``.", "source": "codesearchnet"}
{"code": "def _read_single(parser, filepath):\n    from os import path\n    global packages\n    if path.isfile(filepath):\n        parser.readfp(open(filepath))", "docstring": "Reads a single config file into the parser, silently failing if the file\ndoes not exist.\n\nArgs:\nparser (ConfigParser): parser to read the file into.\nfilepath (str): full path to the config file.", "source": "codesearchnet"}
{"code": "def import_string_code_as_module(code):\n    sha256 = hashlib.sha256(code.encode('UTF-8')).hexdigest()\n    module = imp.new_module(sha256)\n    try:\n        exec_(code, module.__dict__)\n    except Exception as e:\n        raise exceptions.UserError('User code exception', exception_message=str(e))\n    sys.modules[sha256] = module\n    return module", "docstring": "Used to run arbitrary passed code as a module\n\nArgs:\ncode (string): Python code to import as module\n\nReturns:\nmodule: Python module", "source": "codesearchnet"}
{"code": "def cumulative_distribution(self, X):\n        \n        self.check_fit()\n        return norm.cdf(X, loc=self.mean, scale=self.std)", "docstring": "Cumulative distribution function for gaussian distribution.\n\nArguments:\nX: `np.ndarray` of shape (n, 1).\n\nReturns:\nnp.ndarray: Cumulative density for X.", "source": "juraj-google-style"}
{"code": "def get_blob(profile, sha):\n    \n    resource = \"/blobs/\" + sha\n    data = api.get_request(profile, resource)\n    return prepare(data)", "docstring": "Fetch a blob.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nsha\nThe SHA of the blob to fetch.\n\nReturns:\nA dict with data about the blob.", "source": "juraj-google-style"}
{"code": "def from_string(header_str):\n        \n        lines = tuple(clean_lines(header_str.split(\"\\n\"), False))\n        comment1 = lines[0]\n        feffpmg = comment1.find(\"pymatgen\")\n\n        if feffpmg:\n            comment2 = ' '.join(lines[1].split()[2:])\n\n            source = ' '.join(lines[2].split()[2:])\n            basis_vec = lines[6].split(\":\")[-1].split()\n            \n            a = float(basis_vec[0])\n            b = float(basis_vec[1])\n            c = float(basis_vec[2])\n            lengths = [a, b, c]\n            \n            basis_ang = lines[7].split(\":\")[-1].split()\n            alpha = float(basis_ang[0])\n            beta = float(basis_ang[1])\n            gamma = float(basis_ang[2])\n            angles = [alpha, beta, gamma]\n\n            lattice = Lattice.from_lengths_and_angles(lengths, angles)\n\n            natoms = int(lines[8].split(\":\")[-1].split()[0])\n\n            atomic_symbols = []\n            for i in range(9, 9 + natoms):\n                atomic_symbols.append(lines[i].split()[2])\n\n            \n            coords = []\n            for i in range(natoms):\n                toks = lines[i + 9].split()\n                coords.append([float(s) for s in toks[3:]])\n\n            struct = Structure(lattice, atomic_symbols, coords, False,\n                               False, False)\n\n            h = Header(struct, source, comment2)\n\n            return h\n        else:\n            return \"Header not generated by pymatgen, cannot return header object\"", "docstring": "Reads Header string and returns Header object if header was\ngenerated by pymatgen.\nNote: Checks to see if generated by pymatgen, if not it is impossible\nto generate structure object so it is not possible to generate\nheader object and routine ends\n\nArgs:\nheader_str: pymatgen generated feff.inp header\n\nReturns:\nStructure object.", "source": "juraj-google-style"}
{"code": "def set_doc_ids(self, doc_ids):\n        \n        if isinstance(doc_ids, list):\n            self.set_documents(dict.fromkeys(doc_ids))\n        else:\n            self.set_documents({doc_ids: None})", "docstring": "Build xml documents from a list of document ids.\n\nArgs:\ndoc_ids -- A document id or a lost of those.", "source": "juraj-google-style"}
{"code": "def from_las3(cls, string, lexicon=None, source='LAS', dlm=',', abbreviations=False):\n    f = (re.DOTALL | re.IGNORECASE)\n    regex = '\\\\~\\\\w+?_Data.+?\\\\n(.+?)(?:\\\\n\\\\n+|\\\\n*\\\\~|\\\\n*$)'\n    pattern = re.compile(regex, flags=f)\n    text = pattern.search(string).group(1)\n    s = re.search('\\\\.(.+?)\\\\: ?.+?source', string)\n    if s:\n        source = s.group(1).strip()\n    return cls.from_descriptions(text, lexicon, source=source, dlm=dlm, abbreviations=abbreviations)", "docstring": "Turn LAS3 'lithology' section into a Striplog.\n\nArgs:\nstring (str): A section from an LAS3 file.\nlexicon (Lexicon): The language for conversion to components.\nsource (str): A source for the data.\ndlm (str): The delimiter.\nabbreviations (bool): Whether to expand abbreviations.\n\nReturns:\nStriplog: The ``striplog`` object.\n\nNote:\nHandles multiple 'Data' sections. It would be smarter for it\nto handle one at a time, and to deal with parsing the multiple\nsections in the Well object.\n\nDoes not read an actual LAS file. Use the Well object for that.", "source": "codesearchnet"}
{"code": "def _get_napp_key(self, key, user=None, napp=None):\n        \n        if user is None:\n            user = self.user\n        if napp is None:\n            napp = self.napp\n        kytos_json = self._installed / user / napp / 'kytos.json'\n        try:\n            with kytos_json.open() as file_descriptor:\n                meta = json.load(file_descriptor)\n                return meta[key]\n        except (FileNotFoundError, json.JSONDecodeError, KeyError):\n            return ''", "docstring": "Return a value from kytos.json.\n\nArgs:\nuser (string): A Username.\nnapp (string): A NApp name\nkey (string): Key used to get the value within kytos.json.\n\nReturns:\nmeta (object): Value stored in kytos.json.", "source": "juraj-google-style"}
{"code": "def _CheckPenalties(self, tree, list_of_expected):\n\n    def FlattenRec(tree):\n        if pytree_utils.NodeName(tree) in pytree_utils.NONSEMANTIC_TOKENS:\n            return []\n        if isinstance(tree, pytree.Leaf):\n            return [(tree.value, pytree_utils.GetNodeAnnotation(tree, pytree_utils.Annotation.SPLIT_PENALTY))]\n        nodes = []\n        for node in tree.children:\n            nodes += FlattenRec(node)\n        return nodes\n    self.assertEqual(list_of_expected, FlattenRec(tree))", "docstring": "Check that the tokens in the tree have the correct penalties.\n\nArgs:\ntree: the pytree.\nlist_of_expected: list of (name, penalty) pairs. Non-semantic tokens are\nfiltered out from the expected values.", "source": "github-repos"}
{"code": "def from_iterable(cls, frames, sort=False):\n        \n        return FrameSet(sorted(frames) if sort else frames)", "docstring": "Build a :class:`FrameSet` from an iterable of frames.\n\nArgs:\nframes (collections.Iterable): an iterable object containing frames as integers\nsort (bool): True to sort frames before creation, default is False\n\nReturns:\n:class:`FrameSet`:", "source": "juraj-google-style"}
{"code": "def get_dfa_conjecture(self):\n        \n        dfa = DFA(self.alphabet)\n        for s in self.observation_table.sm_vector:\n            for i in self.alphabet:\n                dst = self.observation_table.equiv_classes[s + i]\n                \n                if dst == None:\n                    logging.debug('Conjecture attempt on non closed table.')\n                    return None\n                obsrv = self.observation_table[s, i]\n                src_id = self.observation_table.sm_vector.index(s)\n                dst_id = self.observation_table.sm_vector.index(dst)\n                dfa.add_arc(src_id, dst_id, i, obsrv)\n\n        \n        i = 0\n        for s in self.observation_table.sm_vector:\n            dfa[i].final = self.observation_table[s, self.epsilon]\n            i += 1\n        return dfa", "docstring": "Utilize the observation table to construct a Mealy Machine.\nThe library used for representing the Mealy Machine is the python\nbindings of the openFST library (pyFST).\nArgs:\nNone\nReturns:\nMealyMachine: A mealy machine build based on a closed and consistent\nobservation table.", "source": "juraj-google-style"}
{"code": "def restore(self, x):\n    \n    with tf.name_scope(\"pad_reduce/restore\"):\n      x = tf.scatter_nd(\n          indices=self.nonpad_ids,\n          updates=x,\n          shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0),\n      )\n    return x", "docstring": "Add padding back to the given tensor.\n\nArgs:\nx (tf.Tensor): of shape [dim_compressed,...]\n\nReturns:\na tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The\ndim is restored from the original reference tensor", "source": "juraj-google-style"}
{"code": "def _publish_scan_response(self, client):\n    devices = self._manager.scanned_devices\n    converted_devs = []\n    for (uuid, info) in devices.items():\n        slug = self._build_device_slug(uuid)\n        message = {}\n        message['uuid'] = uuid\n        if (uuid in self._connections):\n            message['user_connected'] = True\n        elif ('user_connected' in info):\n            message['user_connected'] = info['user_connected']\n        else:\n            message['user_connected'] = False\n        message['connection_string'] = slug\n        message['signal_strength'] = info['signal_strength']\n        converted_devs.append({x: y for (x, y) in message.items()})\n        message['type'] = 'notification'\n        message['operation'] = 'advertisement'\n        self.client.publish(self.topics.gateway_topic(slug, 'data/advertisement'), message)\n    probe_message = {}\n    probe_message['type'] = 'response'\n    probe_message['client'] = client\n    probe_message['success'] = True\n    probe_message['devices'] = converted_devs\n    self.client.publish(self.topics.status, probe_message)", "docstring": "Publish a scan response message\n\nThe message contains all of the devices that are currently known\nto this agent.  Connection strings for direct connections are\ntranslated to what is appropriate for this agent.\n\nArgs:\nclient (string): A unique id for the client that made this request", "source": "codesearchnet"}
{"code": "def get_instances_with_configs(configs):\n    results = []\n    for c in configs:\n        try:\n            serial = c.pop('serial')\n        except KeyError:\n            raise Error(('Required value \"serial\" is missing in AndroidDevice config %s.' % c))\n        is_required = c.get(KEY_DEVICE_REQUIRED, True)\n        try:\n            ad = AndroidDevice(serial)\n            ad.load_config(c)\n        except Exception:\n            if is_required:\n                raise\n            ad.log.exception('Skipping this optional device due to error.')\n            continue\n        results.append(ad)\n    return results", "docstring": "Create AndroidDevice instances from a list of dict configs.\n\nEach config should have the required key-value pair 'serial'.\n\nArgs:\nconfigs: A list of dicts each representing the configuration of one\nandroid device.\n\nReturns:\nA list of AndroidDevice objects.", "source": "codesearchnet"}
{"code": "def set(self, name, permission):\n        \n        assert isinstance(permission, BasePermission), 'Only permission instances can be added to the set'\n\n        self._permissions[name] = permission", "docstring": "Adds permission with the given name to the set. Permission with the same name will be overridden.\nArgs:\nname: name of the permission\npermission: permission instance", "source": "juraj-google-style"}
{"code": "def create_audit_event(self, code='AUDIT'):\n    event = self._meta.event_model(code=code, model=self.__class__.__name__)\n    if current_user:\n        event.created_by = current_user.get_id()\n    self.copy_foreign_keys(event)\n    self.populate_audit_fields(event)\n    return event", "docstring": "Creates a generic auditing Event logging the changes between saves\nand the initial data in creates.\n\nKwargs:\ncode (str): The code to set the new Event to.\n\nReturns:\nEvent: A new event with relevant info inserted into it", "source": "codesearchnet"}
{"code": "def read(self, length=(- 1)):\n    if (0 <= length < len(self)):\n        newpos = (self.pos + length)\n        data = self.buf[self.pos:newpos]\n        self.pos = newpos\n        self.__discard()\n        return data\n    data = self.buf[self.pos:]\n    self.clear()\n    return data", "docstring": "Reads from the FIFO.\n\nReads as much data as possible from the FIFO up to the specified\nlength. If the length argument is negative or ommited all data\ncurrently available in the FIFO will be read. If there is no data\navailable in the FIFO an empty string is returned.\n\nArgs:\nlength: The amount of data to read from the FIFO. Defaults to -1.", "source": "codesearchnet"}
{"code": "def run_gpu_or_tpu(func: _F) -> _F:\n    if tf_inspect.isclass(func):\n        raise ValueError('`run_gpu_or_tpu` only supports test methods.')\n\n    def decorated(self: 'TensorFlowTestCase', *args, **kwargs):\n        if config.list_physical_devices('GPU'):\n            return func(self, 'GPU', *args, **kwargs)\n        if config.list_physical_devices('TPU'):\n            return func(self, 'TPU', *args, **kwargs)\n        self.skipTest('Test requires GPU or TPU')\n    return decorated", "docstring": "Execute the decorated test only if a physical GPU or TPU is available.\n\nThis function is intended to be applied to tests that require the presence\nof a physical GPU or TPU. It complies with the following rules:\n- If a GPU is available, the test will run on the GPU.\n- If a GPU is absent and a TPU is available, the test will run on the TPU.\n- If both GPU and TPU are absent, the test will be skipped.\n\nArgs:\nfunc: function to be annotated.\n\nReturns:\nReturns a function that will conditionally skip the decorated test method.", "source": "github-repos"}
{"code": "def FromDBInstance(db_token):\n    hash_ar = bytearray(binascii.unhexlify(db_token.ContractHash))\n    hash_ar.reverse()\n    hash = UInt160(data=hash_ar)\n    token = NEP5Token(script=None)\n    token.SetScriptHash(hash)\n    token.name = db_token.Name\n    token.symbol = db_token.Symbol\n    token.decimals = db_token.Decimals\n    return token", "docstring": "Get a NEP5Token instance from a database token.\n\nArgs:\ndb_token (neo.Implementations.Wallets.peewee.Models.NEP5Token):\n\nReturns:\nNEP5Token: self.", "source": "codesearchnet"}
{"code": "def AddArguments(cls, argument_group):\n    \n    storage_formats = sorted(definitions.STORAGE_FORMATS)\n\n    argument_group.add_argument(\n        '--storage_format', '--storage-format', action='store',\n        choices=storage_formats, dest='storage_format', type=str,\n        metavar='FORMAT', default=definitions.DEFAULT_STORAGE_FORMAT, help=(\n            'Format of the storage file, the default is: {0:s}. Supported '\n            'options: {1:s}'.format(\n                definitions.DEFAULT_STORAGE_FORMAT,\n                ', '.join(storage_formats))))", "docstring": "Adds command line arguments to an argument group.\n\nThis function takes an argument parser or an argument group object and adds\nto it all the command line arguments this helper supports.\n\nArgs:\nargument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\nargparse group.", "source": "juraj-google-style"}
{"code": "def map_across_full_axis(self, axis, map_func):\n        \n        \n        \n        \n        num_splits = self._compute_num_partitions()\n        preprocessed_map_func = self.preprocess_func(map_func)\n        partitions = self.column_partitions if not axis else self.row_partitions\n        \n        \n        \n        \n        result_blocks = np.array(\n            [\n                part.apply(preprocessed_map_func, num_splits=num_splits)\n                for part in partitions\n            ]\n        )\n        \n        \n        \n        return (\n            self.__constructor__(result_blocks.T)\n            if not axis\n            else self.__constructor__(result_blocks)\n        )", "docstring": "Applies `map_func` to every partition.\n\nNote: This method should be used in the case that `map_func` relies on\nsome global information about the axis.\n\nArgs:\naxis: The axis to perform the map across (0 - index, 1 - columns).\nmap_func: The function to apply.\n\nReturns:\nA new BaseFrameManager object, the type of object that called this.", "source": "juraj-google-style"}
{"code": "def add_line(self, start, end, color=(0.5, 0.5, 0.5), width=1):\n        \n        source = vtk.vtkLineSource()\n        source.SetPoint1(start)\n        source.SetPoint2(end)\n\n        vertexIDs = vtk.vtkStringArray()\n        vertexIDs.SetNumberOfComponents(1)\n        vertexIDs.SetName(\"VertexIDs\")\n        \n        vertexIDs.InsertNextValue(\"a\")\n        vertexIDs.InsertNextValue(\"b\")\n        source.GetOutput().GetPointData().AddArray(vertexIDs)\n\n        mapper = vtk.vtkPolyDataMapper()\n        mapper.SetInputConnection(source.GetOutputPort())\n        actor = vtk.vtkActor()\n        actor.SetMapper(mapper)\n        actor.GetProperty().SetColor(color)\n        actor.GetProperty().SetLineWidth(width)\n        self.ren.AddActor(actor)", "docstring": "Adds a line.\n\nArgs:\nstart: Starting coordinates for line.\nend: Ending coordinates for line.\ncolor: Color for text as RGB. Defaults to grey.\nwidth: Width of line. Defaults to 1.", "source": "juraj-google-style"}
{"code": "def sget_timestamp(self, cycle, step, dataset_number=None):\n        \n\n        dataset_number = self._validate_dataset_number(dataset_number)\n        if dataset_number is None:\n            self._report_empty_dataset()\n            return\n        cycle_index_header = self.headers_normal.cycle_index_txt\n        timestamp_header = self.headers_normal.test_time_txt\n        step_index_header = self.headers_normal.step_index_txt\n        test = self.datasets[dataset_number].dfdata\n\n        if isinstance(step, (list, tuple)):\n            warnings.warn(f\"The varialbe step is a list.\"\n                          f\"Should be an integer.\"\n                          f\"{step}\")\n            step = step[0]\n\n        c = test[(test[cycle_index_header] == cycle) &\n                 (test[step_index_header] == step)]\n        if not self.is_empty(c):\n            t = c[timestamp_header]\n            return t\n        else:\n            return pd.Series()", "docstring": "Returns timestamp for cycle, step.\n\nConvinience function; same as issuing\ndfdata[(dfdata[cycle_index_header] == cycle) &\n(dfdata[step_index_header] == step)][timestamp_header]\n\nArgs:\ncycle: cycle number\nstep: step number\ndataset_number: the dataset number (automatic selection if None)\n\nReturns:\npandas.Series", "source": "juraj-google-style"}
{"code": "def condense(input_string):\n    \n    try:\n        assert isinstance(input_string, basestring)\n    except AssertionError:\n        raise TypeError\n    removed_leading_whitespace = re.sub('>\\s+', '>', input_string).strip()\n    removed_trailing_whitespace = re.sub('\\s+<', '<', removed_leading_whitespace).strip()\n    return removed_trailing_whitespace", "docstring": "Trims leadings and trailing whitespace between tags in an html document\n\nArgs:\ninput_string: A (possible unicode) string representing HTML.\n\nReturns:\nA (possibly unicode) string representing HTML.\n\nRaises:\nTypeError: Raised if input_string isn't a unicode string or string.", "source": "juraj-google-style"}
{"code": "def DataRefreshRequired(self, path=None, last=None):\n    if (last is None):\n        if (path is None):\n            raise type_info.TypeValueError(\"Either 'path' or 'last' must be supplied as an argument.\")\n        fd = aff4.FACTORY.Open(self.root.Add(path), token=self.token)\n        stat_obj = fd.Get(fd.Schema.STAT)\n        if stat_obj:\n            last = stat_obj.age\n        else:\n            last = rdfvalue.RDFDatetime(0)\n    if (last is None):\n        return True\n    last = last.AsDatetime()\n    return ((datetime.datetime.utcnow() - last) > self.max_age_before_refresh)", "docstring": "True if we need to update this path from the client.\n\nArgs:\npath: The path relative to the root to check freshness of.\nlast: An aff4:last attribute to check freshness of.\n\nAt least one of path or last must be supplied.\n\nReturns:\nTrue if the path hasn't been updated in the last\nself.max_age_before_refresh seconds, else False.\n\nRaises:\ntype_info.TypeValueError: If no arguments are supplied.", "source": "codesearchnet"}
{"code": "def start(self, **kwargs):\n    return self.client.api.start(self.id, **kwargs)", "docstring": "Start this container. Similar to the ``docker start`` command, but\ndoesn't support attach options.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def _make_query_from_terms(self, terms):\n        \n\n        match_query = ''\n\n        expanded_terms = self._expand_terms(terms)\n        if expanded_terms['doc']:\n            match_query = self.backend._and_join(expanded_terms['doc'])\n\n        if expanded_terms['keywords']:\n            if match_query:\n                match_query = self.backend._and_join(\n                    [match_query, self.backend._join_keywords(expanded_terms['keywords'])])\n            else:\n                match_query = self.backend._join_keywords(expanded_terms['keywords'])\n\n        if match_query:\n            query = text()\n            query_params = {\n                'match_query': match_query}\n        else:\n            query = text()\n            query_params = {}\n\n        return query, query_params", "docstring": "Creates a query for partition from decomposed search terms.\n\nArgs:\nterms (dict or unicode or string):\n\nReturns:\ntuple of (str, dict): First element is str with FTS query, second is parameters of the query.", "source": "juraj-google-style"}
{"code": "def get_continent(self, callsign, timestamp=timestamp_now):\n    return self.get_all(callsign, timestamp)[const.CONTINENT]", "docstring": "Returns the continent Identifier of a callsign\n\nArgs:\ncallsign (str): Amateur Radio callsign\ntimestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)\n\nReturns:\nstr: continent identified\n\nRaises:\nKeyError: No Continent found for callsign\n\nNote:\nThe following continent identifiers are used:\n\n- EU: Europe\n- NA: North America\n- SA: South America\n- AS: Asia\n- AF: Africa\n- OC: Oceania\n- AN: Antarctica", "source": "codesearchnet"}
{"code": "def parse(response_text: str, *, batch: bool, validate_against_schema: bool=True) -> Union[(JSONRPCResponse, List[JSONRPCResponse])]:\n    if (not response_text):\n        if batch:\n            return []\n        else:\n            return NotificationResponse()\n    deserialized = deserialize(response_text)\n    if validate_against_schema:\n        jsonschema.validate(deserialized, schema)\n    if isinstance(deserialized, list):\n        return [get_response(r) for r in deserialized if ('id' in r)]\n    return get_response(deserialized)", "docstring": "Parses response text, returning JSONRPCResponse objects.\n\nArgs:\nresponse_text: JSON-RPC response string.\nbatch: If the response_text is an empty string, this determines how to parse.\nvalidate_against_schema: Validate against the json-rpc schema.\n\nReturns:\nEither a JSONRPCResponse, or a list of them.\n\nRaises:\njson.JSONDecodeError: The response was not valid JSON.\njsonschema.ValidationError: The response was not a valid JSON-RPC response\nobject.", "source": "codesearchnet"}
{"code": "def from_stream(credential_filename):\n        \n        if credential_filename and os.path.isfile(credential_filename):\n            try:\n                return _get_application_default_credential_from_file(\n                    credential_filename)\n            except (ApplicationDefaultCredentialsError, ValueError) as error:\n                extra_help = (' (provided as parameter to the '\n                              'from_stream() method)')\n                _raise_exception_for_reading_json(credential_filename,\n                                                  extra_help,\n                                                  error)\n        else:\n            raise ApplicationDefaultCredentialsError(\n                'The parameter passed to the from_stream() '\n                'method should point to a file.')", "docstring": "Create a Credentials object by reading information from a file.\n\nIt returns an object of type GoogleCredentials.\n\nArgs:\ncredential_filename: the path to the file from where the\ncredentials are to be read\n\nRaises:\nApplicationDefaultCredentialsError: raised when the credentials\nfail to be retrieved.", "source": "juraj-google-style"}
{"code": "def _to_json(self, strip, to_serialize=None):\n    curr_type = self.__class__\n    if (to_serialize is None):\n        to_serialize = copy.copy(self.__dict__)\n    else:\n        to_serialize = copy.copy(to_serialize)\n    for member in strip:\n        if (member in to_serialize):\n            del to_serialize[member]\n    to_serialize['token_expiry'] = _parse_expiry(to_serialize.get('token_expiry'))\n    to_serialize['_class'] = curr_type.__name__\n    to_serialize['_module'] = curr_type.__module__\n    for (key, val) in to_serialize.items():\n        if isinstance(val, bytes):\n            to_serialize[key] = val.decode('utf-8')\n        if isinstance(val, set):\n            to_serialize[key] = list(val)\n    return json.dumps(to_serialize)", "docstring": "Utility function that creates JSON repr. of a Credentials object.\n\nArgs:\nstrip: array, An array of names of members to exclude from the\nJSON.\nto_serialize: dict, (Optional) The properties for this object\nthat will be serialized. This allows callers to\nmodify before serializing.\n\nReturns:\nstring, a JSON representation of this instance, suitable to pass to\nfrom_json().", "source": "codesearchnet"}
{"code": "def insert_tile(self, tile_info):\n    for (i, tile) in enumerate(self.registered_tiles):\n        if (tile.slot == tile_info.slot):\n            self.registered_tiles[i] = tile_info\n            return\n    self.registered_tiles.append(tile_info)", "docstring": "Add or replace an entry in the tile cache.\n\nArgs:\ntile_info (TileInfo): The newly registered tile.", "source": "codesearchnet"}
{"code": "def _create_or_get_tensor_history_values_cache(self, cache_name, graph, shape=None, dtype=dtypes.float32):\n    if graph is None:\n        raise ValueError('Invalid graph.')\n    if graph not in self._history_value_cache:\n        self._history_value_cache[graph] = {}\n    if cache_name not in self._history_value_cache[graph]:\n        if shape is None:\n            raise ValueError('shape must be provided at cache creation.')\n        if dtype.is_integer:\n            init_val = int(_COMPACT_TRACE_ENTRY_INIT_VALUE)\n        else:\n            init_val = _COMPACT_TRACE_ENTRY_INIT_VALUE\n        with graph.as_default() as g, g.name_scope(None):\n            self._history_value_cache[graph][cache_name] = variable_scope.get_variable('tt_history' + '_' + self._escape_namescopes(cache_name), shape=shape, dtype=dtype, initializer=init_ops.constant_initializer(init_val), trainable=False, use_resource=True, collections=[_TENSOR_TRACER_STORAGE, ops.GraphKeys.LOCAL_VARIABLES])\n    return self._history_value_cache[graph][cache_name]", "docstring": "Creates a variable as the cache to store historic intermediate tensor values.\n\nArgs:\ncache_name: Name to be given to the cache (an instance of tf.variable).\ngraph: Tensorflow graph.\nshape: A list of dimensions.\ndtype: Data type of created cache.\nReturns:\nA ref to newly created or existing cache with the given dimensions.\nRaises:\nValueError:\n(1) If graph is None, or\n(2) shape is None when a new cache needs to be created.", "source": "github-repos"}
{"code": "def dotd(A, B):\n    r\n    A = asarray(A, float)\n    B = asarray(B, float)\n    if A.ndim == 1 and B.ndim == 1:\n        return dot(A, B)\n\n    out = empty((A.shape[0],), float)\n    out[:] = sum(A * B.T, axis=1)\n\n    return out", "docstring": "r\"\"\"Diagonal of :math:`\\mathrm A\\mathrm B^\\intercal`.\n\nIf ``A`` is :math:`n\\times p` and ``B`` is :math:`p\\times n`, it is done in\n:math:`O(pn)`.\n\nArgs:\nA (array_like): Left matrix.\nB (array_like): Right matrix.\n\nReturns:\n:class:`numpy.ndarray`: Resulting diagonal.", "source": "juraj-google-style"}
{"code": "def parse_uri(self, uri=None):\n\n\t\t\n\n\t\t\n\t\tif not uri:\n\t\t\treturn rdflib.term.URIRef(self.root)\n\n\t\t\n\t\telif type(uri) == str:\n\n\t\t\t\n\t\t\tif type(uri) == str and not uri.startswith('http'):\n\t\t\t\treturn rdflib.term.URIRef(\"%s%s\" % (self.root, uri))\n\n\t\t\t\n\t\t\telse:\n\t\t\t\treturn rdflib.term.URIRef(uri)\n\n\t\t\n\t\telif type(uri) == rdflib.term.URIRef:\n\t\t\treturn uri\n\n\t\t\n\t\telse:\n\t\t\traise TypeError('invalid URI input')", "docstring": "parses and cleans up possible uri inputs, return instance of rdflib.term.URIRef\n\nArgs:\nuri (rdflib.term.URIRef,str): input URI\n\nReturns:\nrdflib.term.URIRef", "source": "juraj-google-style"}
{"code": "def download_and_install(uri, name=DEFAULT_MODULE_NAME, cache=True):\n    should_use_cache = (cache and exists(name))\n    if (not should_use_cache):\n        with _files.tmpdir() as tmpdir:\n            if uri.startswith('s3:\n                dst = os.path.join(tmpdir, 'tar_file')\n                _files.s3_download(uri, dst)\n                module_path = os.path.join(tmpdir, 'module_dir')\n                os.makedirs(module_path)\n                with tarfile.open(name=dst, mode='r:gz') as t:\n                    t.extractall(path=module_path)\n            else:\n                module_path = uri\n            prepare(module_path, name)\n            install(module_path)", "docstring": "Download, prepare and install a compressed tar file from S3 or local directory as a module.\n\nThe SageMaker Python SDK saves the user provided scripts as compressed tar files in S3.\nThis function downloads this compressed file and, if provided, transforms it\ninto a module before installing it.\n\nThis method is the predecessor of :meth:`~sagemaker_containers.beta.framework.files.download_and_extract`\nand has been kept for backward-compatibility purposes.\n\nArgs:\nname (str): name of the script or module.\nuri (str): the location of the module.\ncache (bool): defaults to True. It will not download and install the module again if it is already installed.", "source": "codesearchnet"}
{"code": "def generate_sigproc_header(f):\n    \n\n    header_string = b''\n    header_string += to_sigproc_keyword(b'HEADER_START')\n\n    for keyword in f.header.keys():\n        if keyword == b'src_raj':\n            header_string += to_sigproc_keyword(b'src_raj')  + to_sigproc_angle(f.header[b'src_raj'])\n        elif keyword == b'src_dej':\n            header_string += to_sigproc_keyword(b'src_dej')  + to_sigproc_angle(f.header[b'src_dej'])\n        elif keyword == b'az_start' or keyword == b'za_start':\n            header_string += to_sigproc_keyword(keyword)  + np.float64(f.header[keyword]).tostring()\n        elif keyword not in header_keyword_types.keys():\n            pass\n        else:\n            header_string += to_sigproc_keyword(keyword, f.header[keyword])\n\n    header_string += to_sigproc_keyword(b'HEADER_END')\n    return header_string", "docstring": "Generate a serialzed sigproc header which can be written to disk.\n\nArgs:\nf (Filterbank object): Filterbank object for which to generate header\n\nReturns:\nheader_str (str): Serialized string corresponding to header", "source": "juraj-google-style"}
{"code": "def get_size(fileobj):\n    old_pos = fileobj.tell()\n    try:\n        fileobj.seek(0, 2)\n        return fileobj.tell()\n    finally:\n        fileobj.seek(old_pos, 0)", "docstring": "Returns the size of the file.\nThe position when passed in will be preserved if no error occurs.\n\nArgs:\nfileobj (fileobj)\nReturns:\nint: The size of the file\nRaises:\nIOError", "source": "codesearchnet"}
{"code": "def get_max_recv_data_size(self, target):\n    fname = 'get_max_recv_data_size'\n    cname = ((self.__class__.__module__ + '.') + self.__class__.__name__)\n    raise NotImplementedError(('%s.%s() is required' % (cname, fname)))", "docstring": "Returns the maximum number of data bytes for receiving.\n\nThe maximum number of data bytes acceptable for receiving with\neither :meth:`send_cmd_recv_rsp` or :meth:`send_rsp_recv_cmd`.\nThe value reflects the local device capabilities for receiving\nin the mode determined by *target*. It does not relate to any\nprotocol capabilities and negotiations.\n\nArguments:\n\ntarget (nfc.clf.Target): The current local or remote\ncommunication target.\n\nReturns:\n\nint: Maximum number of data bytes supported for receiving.", "source": "codesearchnet"}
{"code": "def scores2recos(self, scores, candidates, rev=False):\n    sorted_indices = np.argsort(scores)\n    if rev:\n        sorted_indices = sorted_indices[::(- 1)]\n    return (candidates[sorted_indices], scores[sorted_indices])", "docstring": "Get recommendation list for a user u_index based on scores.\n\nArgs:\nscores (numpy array; (n_target_items,)):\nScores for the target items. Smaller score indicates a promising item.\ncandidates (numpy array; (# target items, )): Target items' indices. Only these items are considered as the recommendation candidates.\nrev (bool): If true, return items in an descending order. A ascending order (i.e., smaller scores are more promising) is default.\n\nReturns:\n(numpy array, numpy array) : (Sorted list of items, Sorted scores).", "source": "codesearchnet"}
{"code": "def convert_outlook_msg(msg_bytes):\n    if (not is_outlook_msg(msg_bytes)):\n        raise ValueError('The supplied bytes are not an Outlook MSG file')\n    orig_dir = os.getcwd()\n    tmp_dir = tempfile.mkdtemp()\n    os.chdir(tmp_dir)\n    with open('sample.msg', 'wb') as msg_file:\n        msg_file.write(msg_bytes)\n    try:\n        subprocess.check_call(['msgconvert', 'sample.msg'], stdout=null_file, stderr=null_file)\n        eml_path = 'sample.eml'\n        with open(eml_path, 'rb') as eml_file:\n            rfc822 = eml_file.read()\n    except FileNotFoundError:\n        raise EmailParserError('Failed to convert Outlook MSG: msgconvert utility not found')\n    finally:\n        os.chdir(orig_dir)\n        shutil.rmtree(tmp_dir)\n    return rfc822", "docstring": "Uses the ``msgconvert`` Perl utility to convert an Outlook MS file to\nstandard RFC 822 format\n\nArgs:\nmsg_bytes (bytes): the content of the .msg file\n\nReturns:\nA RFC 822 string", "source": "codesearchnet"}
{"code": "def check_for_wdiff():\n    cmd = ['which', CMD_WDIFF]\n    DEVNULL = open(os.devnull, 'wb')\n    proc = sub.Popen(cmd, stdout=DEVNULL)\n    proc.wait()\n    DEVNULL.close()\n    if (proc.returncode != 0):\n        msg = \"the `{}` command can't be found\".format(CMD_WDIFF)\n        raise WdiffNotFoundError(msg)", "docstring": "Checks if the `wdiff` command can be found.\n\nRaises:\n\nWdiffNotFoundError: if ``wdiff`` is not found.", "source": "codesearchnet"}
{"code": "def start(self, extra_args='', tag=''):\n    if self.started:\n        return\n    utils.create_dir(self.log_path)\n    if tag:\n        tag = (tag + ',')\n    out_file_name = 'IPerfServer,{},{}{}.log'.format(self.port, tag, len(self.log_files))\n    full_out_path = os.path.join(self.log_path, out_file_name)\n    cmd = ('%s %s > %s' % (self.iperf_str, extra_args, full_out_path))\n    self.iperf_process = utils.start_standing_subprocess(cmd, shell=True)\n    self.log_files.append(full_out_path)\n    self.started = True", "docstring": "Starts iperf server on specified port.\n\nArgs:\nextra_args: A string representing extra arguments to start iperf\nserver with.\ntag: Appended to log file name to identify logs from different\niperf runs.", "source": "codesearchnet"}
{"code": "def find_container_traits(cls_or_string):\n    \n\n    if utils.is_str(cls_or_string):\n        if not templates.is_instantiation(cls_or_string):\n            return None\n        name = templates.name(cls_or_string)\n        if name.startswith('std::'):\n            name = name[len('std::'):]\n        if name.startswith('std::tr1::'):\n            name = name[len('std::tr1::'):]\n        for cls_traits in all_container_traits:\n            if cls_traits.name() == name:\n                return cls_traits\n    else:\n\n        if isinstance(cls_or_string, class_declaration.class_types):\n            \n            if cls_or_string.cache.container_traits is not None:\n                return cls_or_string.cache.container_traits\n\n        \n        for cls_traits in all_container_traits:\n            if cls_traits.is_my_case(cls_or_string):\n                \n                if isinstance(cls_or_string, class_declaration.class_types):\n                    cls_or_string.cache.container_traits = cls_traits\n                return cls_traits", "docstring": "Find the container traits type of a declaration.\n\nArgs:\ncls_or_string (str | declarations.declaration_t): a string\n\nReturns:\ndeclarations.container_traits: a container traits", "source": "juraj-google-style"}
{"code": "def self(self) -> 'EFBChat':\n    self.chat_name = 'You'\n    self.chat_alias = None\n    self.chat_uid = EFBChat.SELF_ID\n    self.chat_type = ChatType.User\n    return self", "docstring": "Set the chat as yourself.\nIn this context, \"yourself\" means the user behind the master channel.\nEvery channel should relate this to the corresponding target.\n\nReturns:\nEFBChat: This object.", "source": "codesearchnet"}
{"code": "def _DisableNetworkManager(self, interfaces, logger):\n    \n    for interface in interfaces:\n      interface_config = os.path.join(\n          self.network_path, 'ifcfg-%s' % interface)\n      if os.path.exists(interface_config):\n        self._ModifyInterface(\n            interface_config, 'DEVICE', interface, replace=False)\n        self._ModifyInterface(\n            interface_config, 'NM_CONTROLLED', 'no', replace=True)\n      else:\n        with open(interface_config, 'w') as interface_file:\n          interface_content = [\n              '\n              'BOOTPROTO=none',\n              'DEFROUTE=no',\n              'DEVICE=%s' % interface,\n              'IPV6INIT=no',\n              'NM_CONTROLLED=no',\n              'NOZEROCONF=yes',\n              '',\n          ]\n          interface_file.write('\\n'.join(interface_content))\n        logger.info('Created config file for interface %s.', interface)", "docstring": "Disable network manager management on a list of network interfaces.\n\nArgs:\ninterfaces: list of string, the output device names enable.\nlogger: logger object, used to write to SysLog and serial port.", "source": "juraj-google-style"}
{"code": "def all_near_zero_mod(a: Union[(float, complex, Iterable[float], np.ndarray)], period: float, *, atol: float=1e-08) -> bool:\n    b = (((np.asarray(a) + (period / 2)) % period) - (period / 2))\n    return np.all(np.less_equal(np.abs(b), atol))", "docstring": "Checks if the tensor's elements are all near multiples of the period.\n\nArgs:\na: Tensor of elements that could all be near multiples of the period.\nperiod: The period, e.g. 2 pi when working in radians.\natol: Absolute tolerance.", "source": "codesearchnet"}
{"code": "def send_state_event(self, event_type, content, state_key=\"\"):\n        \n        return self.client.api.send_state_event(\n            self.room_id,\n            event_type,\n            content,\n            state_key\n        )", "docstring": "Send a state event to the room.\n\nArgs:\nevent_type (str): The type of event that you are sending.\ncontent (): An object with the content of the message.\nstate_key (str, optional): A unique key to identify the state.", "source": "juraj-google-style"}
{"code": "def altcode(msg):\n    \n\n    if df(msg) not in [0, 4, 16, 20]:\n        raise RuntimeError(\"Message must be Downlink Format 0, 4, 16, or 20.\")\n\n    \n    mbin = hex2bin(msg)\n\n    mbit = mbin[25]   \n    qbit = mbin[27]   \n\n\n    if mbit == '0':         \n        if qbit == '1':     \n            vbin = mbin[19:25] + mbin[26] + mbin[28:32]\n            alt = bin2int(vbin) * 25 - 1000\n        if qbit == '0':     \n            C1 = mbin[19]\n            A1 = mbin[20]\n            C2 = mbin[21]\n            A2 = mbin[22]\n            C4 = mbin[23]\n            A4 = mbin[24]\n            \n            B1 = mbin[26]\n            \n            B2 = mbin[28]\n            D2 = mbin[29]\n            B4 = mbin[30]\n            D4 = mbin[31]\n\n            graystr =  D2 + D4 + A1 + A2 + A4 + B1 + B2 + B4 + C1 + C2 + C4\n            alt = gray2alt(graystr)\n\n    if mbit == '1':         \n        vbin = mbin[19:25] + mbin[26:31]\n        alt = int(bin2int(vbin) * 3.28084)  \n\n    return alt", "docstring": "Computes the altitude from DF4 or DF20 message, bit 20-32.\ncredit: @fbyrkjeland\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nint: altitude in ft", "source": "juraj-google-style"}
{"code": "def to_dict(mapreduce_yaml):\n    \n    all_configs = []\n    for config in mapreduce_yaml.mapreduce:\n      out = {\n          \"name\": config.name,\n          \"mapper_input_reader\": config.mapper.input_reader,\n          \"mapper_handler\": config.mapper.handler,\n      }\n      if config.mapper.params_validator:\n        out[\"mapper_params_validator\"] = config.mapper.params_validator\n      if config.mapper.params:\n        param_defaults = {}\n        for param in config.mapper.params:\n          param_defaults[param.name] = param.default or param.value\n        out[\"mapper_params\"] = param_defaults\n      if config.params:\n        param_defaults = {}\n        for param in config.params:\n          param_defaults[param.name] = param.default or param.value\n        out[\"params\"] = param_defaults\n      if config.mapper.output_writer:\n        out[\"mapper_output_writer\"] = config.mapper.output_writer\n      all_configs.append(out)\n\n    return all_configs", "docstring": "Converts a MapReduceYaml file into a JSON-encodable dictionary.\n\nFor use in user-visible UI and internal methods for interfacing with\nuser code (like param validation). as a list\n\nArgs:\nmapreduce_yaml: The Pyton representation of the mapreduce.yaml document.\n\nReturns:\nA list of configuration dictionaries.", "source": "juraj-google-style"}
{"code": "def _get_colors(n):\n    import matplotlib.pyplot as plt\n    from matplotlib.colors import rgb2hex as r2h\n    from numpy import linspace\n    cols = linspace(0.05, 0.95, n)\n    cmap = plt.get_cmap('nipy_spectral')\n    return [r2h(cmap(i)) for i in cols]", "docstring": "Returns n unique and \"evenly\" spaced colors for the backgrounds\nof the projects.\n\nArgs:\nn (int): The number of unique colors wanted.\n\nReturns:\ncolors (list of str): The colors in hex form.", "source": "codesearchnet"}
{"code": "def format_unitary(mat, decimals=None):\n    \n    num_basis = len(mat)\n    mat_complex = np.zeros((num_basis, num_basis), dtype=complex)\n    for i, vec in enumerate(mat):\n        mat_complex[i] = format_statevector(vec, decimals)\n    return mat_complex", "docstring": "Format unitary coming from the backend to present to the Qiskit user.\n\nArgs:\nmat (list[list]): a list of list of [re, im] complex numbers\ndecimals (int): the number of decimals in the statevector.\nIf None, no rounding is done.\n\nReturns:\nlist[list[complex]]: a matrix of complex numbers", "source": "juraj-google-style"}
{"code": "def run(self):\n    return self._test_suite", "docstring": "Runs the dynamically generated test suite.\n\nThis method simply returns the test suite class created during\ninitialization. The test runner (e.g., unittest.main()) can then be used\nto discover and run the tests within this suite.\n\nReturns:\nThe dynamically created unittest.TestCase subclass.", "source": "github-repos"}
{"code": "def _GetConfigValue(self, config_parser, section_name, value_name):\n    try:\n        return config_parser.get(section_name, value_name)\n    except configparser.NoOptionError:\n        return None", "docstring": "Retrieves a value from the config parser.\n\nArgs:\nconfig_parser (ConfigParser): configuration parser.\nsection_name (str): name of the section that contains the value.\nvalue_name (str): name of the value.\n\nReturns:\nobject: configuration value or None if the value does not exists.", "source": "codesearchnet"}
{"code": "def copy_pkg(self, filename, _):\n    basename = os.path.basename(filename)\n    self._copy(filename, os.path.join(self.connection['mount_point'], 'Packages', basename))", "docstring": "Copy a package to the repo's Package subdirectory.\n\nArgs:\nfilename: Path for file to copy.\n_: Ignored. Used for compatibility with JDS repos.", "source": "codesearchnet"}
{"code": "def run(data, base_logdir, session_id, group_id, hparams):\n  \n  model = model_fn(hparams=hparams, seed=session_id)\n  logdir = os.path.join(base_logdir, session_id)\n\n  callback = tf.keras.callbacks.TensorBoard(\n      logdir,\n      update_freq=flags.FLAGS.summary_freq,\n      profile_batch=0,  \n  )\n  hparams_callback = hp.KerasCallback(logdir, hparams, group_name=group_id)\n  ((x_train, y_train), (x_test, y_test)) = data\n  result = model.fit(\n      x=x_train,\n      y=y_train,\n      epochs=flags.FLAGS.num_epochs,\n      shuffle=False,\n      validation_data=(x_test, y_test),\n      callbacks=[callback, hparams_callback],\n  )", "docstring": "Run a training/validation session.\n\nFlags must have been parsed for this function to behave.\n\nArgs:\ndata: The data as loaded by `prepare_data()`.\nbase_logdir: The top-level logdir to which to write summary data.\nsession_id: A unique string ID for this session.\ngroup_id: The string ID of the session group that includes this\nsession.\nhparams: A dict mapping hyperparameters in `HPARAMS` to values.", "source": "juraj-google-style"}
{"code": "def predict_features(self, df_features, df_target, idx=0, C=.1, **kwargs):\n        \n        lsvc = LinearSVR(C=C).fit(df_features.values, df_target.values)\n\n        return np.abs(lsvc.coef_)", "docstring": "For one variable, predict its neighbouring nodes.\n\nArgs:\ndf_features (pandas.DataFrame):\ndf_target (pandas.Series):\nidx (int): (optional) for printing purposes\nkwargs (dict): additional options for algorithms\nC (float): Penalty parameter of the error term\n\nReturns:\nlist: scores of each feature relatively to the target", "source": "juraj-google-style"}
{"code": "def _add_step(self, step):\n        \n        self._closed()\n\n        self.has_workflow_step = self.has_workflow_step or step.is_workflow\n        self.wf_steps[step.name_in_workflow] = step", "docstring": "Add a step to the workflow.\n\nArgs:\nstep (Step): a step from the steps library.", "source": "juraj-google-style"}
{"code": "def ensure_app_data_dir(appname, *args):\n    from ubelt import util_path\n    dpath = get_app_data_dir(appname, *args)\n    util_path.ensuredir(dpath)\n    return dpath", "docstring": "Calls `get_app_data_dir` but ensures the directory exists.\n\nArgs:\nappname (str): the name of the application\n*args: any other subdirectories may be specified\n\nSeeAlso:\nget_app_data_dir\n\nExample:\n>>> import ubelt as ub\n>>> dpath = ub.ensure_app_data_dir('ubelt')\n>>> assert exists(dpath)", "source": "codesearchnet"}
{"code": "def GetFailedTasks(self):\n    with self._lock:\n        return [task for task in self._tasks_abandoned.values() if (not task.has_retry)]", "docstring": "Retrieves all failed tasks.\n\nFailed tasks are tasks that were abandoned and have no retry task once\nthe foreman is done processing.\n\nReturns:\nlist[Task]: tasks.", "source": "codesearchnet"}
{"code": "def decorate(fn):\n    if (not isfunction(fn)):\n        raise TypeError('paco: fn must be a callable object')\n\n    @functools.wraps(fn)\n    def decorator(*args, **kw):\n        for arg in args:\n            if iscoro_or_corofunc(arg):\n                return fn(*args, **kw)\n        if (len(args) and (args[0] is None)):\n            raise TypeError('paco: first argument cannot be empty')\n\n        def wrapper(coro, *_args, **_kw):\n            if (not iscoro_or_corofunc(coro)):\n                raise TypeError('paco: first argument must be a coroutine or coroutine function')\n            _args = ((coro,) + (args + _args))\n            kw.update(_kw)\n            return fn(*_args, **kw)\n        return wrapper\n    return decorator", "docstring": "Generic decorator for coroutines helper functions allowing\nmultiple variadic initialization arguments.\n\nThis function is intended to be used internally.\n\nArguments:\nfn (function): target function to decorate.\n\nRaises:\nTypeError: if function or coroutine function is not provided.\n\nReturns:\nfunction: decorated function.", "source": "codesearchnet"}
{"code": "def easeInOutCubic(n):\n    _checkRange(n)\n    n = (2 * n)\n    if (n < 1):\n        return (0.5 * (n ** 3))\n    else:\n        n = (n - 2)\n        return (0.5 * ((n ** 3) + 2))", "docstring": "A cubic tween function that accelerates, reaches the midpoint, and then decelerates.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "codesearchnet"}
{"code": "def recipe_fred_series_to_bigquery(config, auth, fred_api_key, fred_series_id, fred_units, fred_frequency, fred_aggregation_method, project, dataset):\n    fred(config, {'auth': auth, 'api_key': fred_api_key, 'frequency': fred_frequency, 'series': [{'series_id': fred_series_id, 'units': fred_units, 'aggregation_method': fred_aggregation_method}], 'out': {'bigquery': {'project': project, 'dataset': dataset}}})", "docstring": "Download federal reserve series.\n\nArgs:\nauth (authentication) - Credentials used for writing data.\nfred_api_key (string) - 32 character alpha-numeric lowercase string.\nfred_series_id (string) - Series ID to pull data from.\nfred_units (choice) - A key that indicates a data value transformation.\nfred_frequency (choice) - An optional parameter that indicates a lower frequency to aggregate values to.\nfred_aggregation_method (choice) - A key that indicates the aggregation method used for frequency aggregation.\nproject (string) - Existing BigQuery project.\ndataset (string) - Existing BigQuery dataset.", "source": "github-repos"}
{"code": "def show_constant(val: types.BaseValue) -> str:\n\n    def _ellipsis_printer(v):\n        if isinstance(v, types.PythonConstant):\n            return v.str_of_constant(_ellipsis_printer)\n        return '...'\n    return _ellipsis_printer(val)", "docstring": "Pretty-print a value if it is a constant.\n\nRecurses into a constant, printing the underlying Python value for constants\nand just using \"...\" for everything else (e.g., Variables). This is useful\nfor generating clear error messages that show the exact values related to an\nerror while preventing implementation details from leaking into the message.\n\nArgs:\nval: an abstract value.\n\nReturns:\nA string of the pretty-printed constant.", "source": "github-repos"}
{"code": "def get_player_stats(self, player_key, board_key):\n        \n        player_stats_url = self.api_path + 'player/' + player_key + '/league/' + board_key + '/stats/'\n        response = self.get_response(player_stats_url)\n        return response", "docstring": "Calling the Player Stats API\nArgs:\nplayer_key: Key of the player\nboard_key: key of the board\nReturn:\njson data", "source": "juraj-google-style"}
{"code": "class XLNetPoolerEndLogits(nn.Module):\n\n    def __init__(self, config: XLNetConfig):\n        super().__init__()\n        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)\n        self.activation = nn.Tanh()\n        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n        self.dense_1 = nn.Linear(config.hidden_size, 1)\n\n    def forward(self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, p_mask: Optional[torch.FloatTensor]=None) -> torch.FloatTensor:\n        \n        assert start_states is not None or start_positions is not None, 'One of start_states, start_positions should be not None'\n        if start_positions is not None:\n            slen, hsz = hidden_states.shape[-2:]\n            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)\n            start_states = hidden_states.gather(-2, start_positions)\n            start_states = start_states.expand(-1, slen, -1)\n        x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))\n        x = self.activation(x)\n        x = self.LayerNorm(x)\n        x = self.dense_1(x).squeeze(-1)\n        if p_mask is not None:\n            if p_mask.dtype == torch.float16:\n                x = x * (1 - p_mask) - 65500 * p_mask\n            else:\n                x = x * (1 - p_mask) - 1e+30 * p_mask\n        return x", "docstring": "Compute SQuAD end logits from sequence hidden states.\n\nArgs:\nconfig ([`XLNetConfig`]):\nThe config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps`\nto use.", "source": "github-repos"}
{"code": "def update_ref(profile, ref, sha):\n    \n    resource = \"/refs/\" + ref\n    payload = {\"sha\": sha}\n    data = api.patch_request(profile, resource, payload)\n    return prepare(data)", "docstring": "Point a ref to a new SHA.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nref\nThe ref to update, e.g., ``heads/my-feature-branch``.\n\nsha\nThe SHA of the commit to point the ref to.\n\nReturns\nA dict with data about the ref.", "source": "juraj-google-style"}
{"code": "def _format_param_val(self, param_val):\n    if isinstance(param_val, list):\n        return ' '.join((str(x) for x in param_val))\n    else:\n        return str(param_val)", "docstring": "Internal method to format values in the packmol parameter dictionaries\n\nArgs:\nparam_val:\nSome object to turn into String\n\nReturns:\nstring representation of the object", "source": "codesearchnet"}
{"code": "def Eq(left: str, right: str) -> BooleanTerm:\n    if left == right:\n        return TRUE\n    elif left > right:\n        return _Eq(left, right)\n    else:\n        return _Eq(right, left)", "docstring": "Create an equality or its simplified equivalent.\n\nThis will ensure that left > right. (For left == right, it'll just return\nTRUE).\n\nArgs:\nleft: A string. Left side of the equality. This will get sorted, so it might\nend up on the right.\nright: A string. Right side of the equality. This will get sorted, so it\nmight end up on the left.\n\nReturns:\nA BooleanTerm.", "source": "github-repos"}
{"code": "def get_attrs(obj: object) -> dict[str, object]:\n    attrs = {}\n    for k in dir(obj) + object.__dir__(obj):\n        if k in attrs:\n            continue\n        try:\n            v = getattr(obj, k)\n        except Exception as e:\n            v = ExceptionWrapper(e)\n        attrs[k] = v\n    return attrs", "docstring": "Parse all attributes from an object.\n\nLimitation:\n\n* Descriptor will be resolved, so all properties are executed (some can\nhave side effects, or take a lot of time to compute)\n\nArgs:\nobj: Object to inspect\n\nReturns:\nDict mapping attribute name to values.", "source": "github-repos"}
{"code": "def grating_coupler_period(wavelength, n_eff, n_clad, incidence_angle_deg, diffration_order=1):\n    k0 = ((2.0 * np.pi) / wavelength)\n    beta = (n_eff.real * k0)\n    n_inc = n_clad\n    grating_period = (((2.0 * np.pi) * diffration_order) / (beta - ((k0 * n_inc) * np.sin(np.radians(incidence_angle_deg)))))\n    return grating_period", "docstring": "Calculate the period needed for a grating coupler.\n\nArgs:\nwavelength (float): The target wavelength for the\ngrating coupler.\nn_eff (float): The effective index of the mode\nof a waveguide with the width of the grating\ncoupler.\nn_clad (float): The refractive index of the cladding.\nincidence_angle_deg (float): The incidence angle\nthe grating coupler should operate at [degrees].\ndiffration_order (int): The grating order the coupler\nshould work at.  Default is 1st order (1).\n\nReturns:\nfloat: The period needed for the grating coupler\nin the same units as the wavelength was given at.", "source": "codesearchnet"}
{"code": "def add_spectrum(self, label, spectrum, color=None):\n    self._spectra[label] = spectrum\n    self.colors.append((color or self.colors_cycle[(len(self._spectra) % len(self.colors_cycle))]))", "docstring": "Adds a Spectrum for plotting.\n\nArgs:\nlabel (str): Label for the Spectrum. Must be unique.\nspectrum: Spectrum object\ncolor (str): This is passed on to matplotlib. E.g., \"k--\" indicates\na dashed black line. If None, a color will be chosen based on\nthe default color cycle.", "source": "codesearchnet"}
{"code": "def files(self, request, id):\n        \n        gist = self.send(request, id).json()\n        return gist['files']", "docstring": "Returns a list of files in the gist\n\nArguments:\nrequest: an initial request object\nid:      the gist identifier\n\nReturns:\nA list of the files", "source": "juraj-google-style"}
{"code": "def ReadFromDirectory(self, artifacts_reader, path, extension='yaml'):\n    \n    for artifact_definition in artifacts_reader.ReadDirectory(\n        path, extension=extension):\n      self.RegisterDefinition(artifact_definition)", "docstring": "Reads artifact definitions into the registry from files in a directory.\n\nThis function does not recurse sub directories.\n\nArgs:\nartifacts_reader (ArtifactsReader): an artifacts reader.\npath (str): path of the directory to read from.\nextension (Optional[str]): extension of the filenames to read.\n\nRaises:\nKeyError: if a duplicate artifact definition is encountered.", "source": "juraj-google-style"}
{"code": "def install(self, updates):\n    if (updates.count() == 0):\n        ret = {'Success': False, 'Updates': 'Nothing to install'}\n        return ret\n    installer = self._session.CreateUpdateInstaller()\n    self._session.ClientApplicationID = 'Salt: Install Update'\n    with salt.utils.winapi.Com():\n        install_list = win32com.client.Dispatch('Microsoft.Update.UpdateColl')\n    ret = {'Updates': {}}\n    for update in updates.updates:\n        uid = update.Identity.UpdateID\n        ret['Updates'][uid] = {}\n        ret['Updates'][uid]['Title'] = update.Title\n        ret['Updates'][uid]['AlreadyInstalled'] = bool(update.IsInstalled)\n        if (not salt.utils.data.is_true(update.IsInstalled)):\n            log.debug('To Be Installed: %s', uid)\n            log.debug('\\tTitle: %s', update.Title)\n            install_list.Add(update)\n    if (install_list.Count == 0):\n        ret = {'Success': True, 'Updates': 'Nothing to install'}\n        return ret\n    installer.Updates = install_list\n    try:\n        log.debug('Installing Updates')\n        result = installer.Install()\n    except pywintypes.com_error as error:\n        (hr, msg, exc, arg) = error.args\n        try:\n            failure_code = self.fail_codes[exc[5]]\n        except KeyError:\n            failure_code = 'Unknown Failure: {0}'.format(error)\n        log.error('Install Failed: %s', failure_code)\n        raise CommandExecutionError(failure_code)\n    result_code = {0: 'Installation Not Started', 1: 'Installation In Progress', 2: 'Installation Succeeded', 3: 'Installation Succeeded With Errors', 4: 'Installation Failed', 5: 'Installation Aborted'}\n    log.debug('Install Complete')\n    log.debug(result_code[result.ResultCode])\n    ret['Message'] = result_code[result.ResultCode]\n    if (result.ResultCode in [2, 3]):\n        ret['Success'] = True\n        ret['NeedsReboot'] = result.RebootRequired\n        log.debug('NeedsReboot: %s', result.RebootRequired)\n    else:\n        log.debug('Install Failed')\n        ret['Success'] = False\n    reboot = {0: 'Never Reboot', 1: 'Always Reboot', 2: 'Poss Reboot'}\n    for i in range(install_list.Count):\n        uid = install_list.Item(i).Identity.UpdateID\n        ret['Updates'][uid]['Result'] = result_code[result.GetUpdateResult(i).ResultCode]\n        ret['Updates'][uid]['RebootBehavior'] = reboot[install_list.Item(i).InstallationBehavior.RebootBehavior]\n    return ret", "docstring": "Install the updates passed in the updates collection. Load the updates\ncollection using the ``search`` or ``available`` functions. If the\nupdates need to be downloaded, use the ``download`` function.\n\nArgs:\n\nupdates (Updates): An instance of the Updates class containing a\nthe updates to be installed.\n\nReturns:\ndict: A dictionary containing the results of the installation\n\nCode Example:\n\n.. code-block:: python\n\nimport salt.utils.win_update\nwua = salt.utils.win_update.WindowsUpdateAgent()\n\n# install KB3195454\nupdates = wua.search('KB3195454')\nresults = wua.download(updates)\nresults = wua.install(updates)", "source": "codesearchnet"}
{"code": "async def get(self, request):\n        \n        ticket = await self.get_ticket(request)\n        if ticket is None:\n            return None\n\n        try:\n            \n            now = time.time()\n            fields = self._ticket.validate(ticket, self._get_ip(request), now)\n\n            \n            if (self._reissue_time is not None and\n                now >= (fields.valid_until - self._reissue_time)):\n\n                \n                request[_REISSUE_KEY] = self._new_ticket(request, fields.user_id)\n\n            return fields.user_id\n\n        except TicketError as e:\n            return None", "docstring": "Gets the user_id for the request.\n\nGets the ticket for the request using the get_ticket() function, and\nauthenticates the ticket.\n\nArgs:\nrequest: aiohttp Request object.\n\nReturns:\nThe userid for the request, or None if the ticket is not\nauthenticated.", "source": "juraj-google-style"}
{"code": "def _encode_required_fields(self, builder: expressions.Builder) -> List[validation_pb2.SqlRequirement]:\n    if not isinstance(builder.return_type, _fhir_path_data_types.StructureDataType):\n        return []\n    if builder.return_type.element_type == 'Extension':\n        return []\n    encoded_requirements: List[validation_pb2.SqlRequirement] = []\n    for name, desc_message in builder.return_type.iter_all_descendants():\n        containing_type_builder = builder\n        child_builder = containing_type_builder\n        paths = name.split('.')\n        for path in paths:\n            if isinstance(child_builder.return_type, _fhir_path_data_types.StructureDataType):\n                containing_type_builder = child_builder\n            child_builder = self._get_new_child_builder(child_builder, path)\n            if not child_builder:\n                break\n        if not child_builder:\n            continue\n        name = paths[-1]\n        requirement = self._encode_required_field(name, containing_type_builder, child_builder, desc_message)\n        if requirement:\n            encoded_requirements.append(requirement)\n    return encoded_requirements", "docstring": "Returns `SqlRequirement`s for all required fields in `ElementDefinition`.\n\nArgs:\nbuilder: The builder containing the element to encode required fields for.\n\nReturns:\nA list of `SqlRequirement`s representing requirements generated from\nrequired fields on the element.", "source": "github-repos"}
{"code": "def _get_userprofile_from_registry(user, sid):\n    profile_dir = __utils__['reg.read_value']('HKEY_LOCAL_MACHINE', 'SOFTWARE\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\ProfileList\\\\{0}'.format(sid), 'ProfileImagePath')['vdata']\n    log.debug('user %s with sid=%s profile is located at \"%s\"', user, sid, profile_dir)\n    return profile_dir", "docstring": "In case net user doesn't return the userprofile we can get it from the\nregistry\n\nArgs:\nuser (str): The user name, used in debug message\n\nsid (str): The sid to lookup in the registry\n\nReturns:\nstr: Profile directory", "source": "codesearchnet"}
{"code": "def _code_search(query, github_user=None):\n    github_client = temple.utils.GithubClient()\n    headers = {'Accept': 'application/vnd.github.v3.text-match+json'}\n    resp = github_client.get('/search/code', params={'q': query, 'per_page': 100}, headers=headers)\n    if ((resp.status_code == requests.codes.unprocessable_entity) and github_user):\n        raise temple.exceptions.InvalidGithubUserError('Invalid Github user or org - \"{}\"'.format(github_user))\n    resp.raise_for_status()\n    resp_data = resp.json()\n    repositories = collections.defaultdict(dict)\n    while True:\n        repositories.update({'git@github.com:{}.git'.format(repo['repository']['full_name']): repo['repository'] for repo in resp_data['items']})\n        next_url = _parse_link_header(resp.headers).get('next')\n        if next_url:\n            resp = requests.get(next_url, headers=headers)\n            resp.raise_for_status()\n            resp_data = resp.json()\n        else:\n            break\n    return repositories", "docstring": "Performs a Github API code search\n\nArgs:\nquery (str): The query sent to Github's code search\ngithub_user (str, optional): The Github user being searched in the query string\n\nReturns:\ndict: A dictionary of repository information keyed on the git SSH url\n\nRaises:\n`InvalidGithubUserError`: When ``github_user`` is invalid", "source": "codesearchnet"}
{"code": "def _prefix_from_ip_int(self, ip_int):\n        \n        trailing_zeroes = _count_righthand_zero_bits(ip_int,\n                                                     self._max_prefixlen)\n        prefixlen = self._max_prefixlen - trailing_zeroes\n        leading_ones = ip_int >> trailing_zeroes\n        all_ones = (1 << prefixlen) - 1\n        if leading_ones != all_ones:\n            byteslen = self._max_prefixlen \n            details = _int_to_bytes(ip_int, byteslen, 'big')\n            msg = 'Netmask pattern %r mixes zeroes & ones'\n            raise ValueError(msg % details)\n        return prefixlen", "docstring": "Return prefix length from the bitwise netmask.\n\nArgs:\nip_int: An integer, the netmask in expanded bitwise format\n\nReturns:\nAn integer, the prefix length.\n\nRaises:\nValueError: If the input intermingles zeroes & ones", "source": "juraj-google-style"}
{"code": "def __init__(self, connect_func, max_size=10):\n    \n    self.connect_func = connect_func\n    self.limiter = threading.BoundedSemaphore(max_size)\n    self.idle_conns = []  \n    self.closed = False", "docstring": "Creates a ConnectionPool.\n\nArgs:\nconnect_func: A closure which returns a new connection to the underlying\ndatabase, i.e. a MySQLdb.Connection. Should raise or block if the\ndatabase is unavailable.\nmax_size: The maximum number of simultaneous connections.", "source": "juraj-google-style"}
{"code": "def top_kth_iterative(x, k):\n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  def next_x(cur_x, _):\n    top_x = tf.reduce_max(cur_x, axis=-1, keep_dims=True)\n    return cur_x * to_float(cur_x < top_x)\n  \n  fin_x = tf.foldl(next_x, tf.range(k - 1), initializer=tf.stop_gradient(x),\n                   parallel_iterations=2, back_prop=False)\n  return tf.stop_gradient(tf.reduce_max(fin_x, axis=-1, keep_dims=True))", "docstring": "Compute the k-th top element of x on the last axis iteratively.\n\nThis assumes values in x are non-negative, rescale if needed.\nIt is often faster than tf.nn.top_k for small k, especially if k < 30.\nNote: this does not support back-propagation, it stops gradients!\n\nArgs:\nx: a Tensor of non-negative numbers of type float.\nk: a python integer.\n\nReturns:\na float tensor of the same shape as x but with 1 on the last axis\nthat contains the k-th largest number in x.", "source": "juraj-google-style"}
{"code": "def bind(self, devices_to_bind):\n        \n        if self.entity_api_key == \"\":\n            return {'status': 'failure', 'response': 'No API key found in request'}\n        url = self.base_url + \"api/0.1.0/subscribe/bind\"\n        headers = {\"apikey\": self.entity_api_key}\n        data = {\n            \"exchange\": \"amq.topic\",\n            \"keys\": devices_to_bind,\n            \"queue\": self.entity_id\n        }\n\n        with self.no_ssl_verification():\n            r = requests.post(url, json=data, headers=headers)\n        response = dict()\n        if \"No API key\" in str(r.content.decode(\"utf-8\")):\n            response[\"status\"] = \"failure\"\n            r = json.loads(r.content.decode(\"utf-8\"))['message']\n        elif 'bind queue ok' in str(r.content.decode(\"utf-8\")):\n            response[\"status\"] = \"success\"\n            r = r.content.decode(\"utf-8\")\n        else:\n            response[\"status\"] = \"failure\"\n            r = r.content.decode(\"utf-8\")\n        response[\"response\"] = str(r)\n        return response", "docstring": "This function allows an entity to list the devices to subscribe for data. This function must be called\nat least once, before doing a subscribe. Subscribe function will listen to devices that are bound here.\n\nArgs:\ndevices_to_bind  (list): an array of devices to listen to.\nExample bind([\"test100\",\"testDemo\"])", "source": "juraj-google-style"}
{"code": "def run(self, dag):\n        \n        if self.layout is None:\n            if self.property_set[\"layout\"]:\n                self.layout = self.property_set[\"layout\"]\n            else:\n                self.layout = Layout.generate_trivial_layout(*dag.qregs.values())\n\n        self.property_set['is_swap_mapped'] = True\n\n        for gate in dag.twoQ_gates():\n            physical_q0 = self.layout[gate.qargs[0]]\n            physical_q1 = self.layout[gate.qargs[1]]\n\n            if self.coupling_map.distance(physical_q0, physical_q1) != 1:\n                self.property_set['is_swap_mapped'] = False\n                return", "docstring": "If `dag` is mapped to `coupling_map`, the property\n`is_swap_mapped` is set to True (or to False otherwise).\n\nArgs:\ndag (DAGCircuit): DAG to map.", "source": "juraj-google-style"}
{"code": "def Normalize(self, fraction=1.0):\n    if self.log:\n        raise ValueError('Pmf is under a log transform')\n    total = self.Total()\n    if (total == 0.0):\n        raise ValueError('total probability is zero.')\n        logging.warning('Normalize: total probability is zero.')\n        return total\n    factor = (float(fraction) / total)\n    for x in self.d:\n        self.d[x] *= factor\n    return total", "docstring": "Normalizes this PMF so the sum of all probs is fraction.\n\nArgs:\nfraction: what the total should be after normalization\n\nReturns: the total probability before normalizing", "source": "codesearchnet"}
{"code": "def _get_bucket_attribute(bucket, query_param, xml_response_tag, retry_params=None, _account_id=None):\n    api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id)\n    common.validate_bucket_path(bucket)\n    (status, headers, content) = api.get_bucket(('%s?%s' % (bucket, query_param)))\n    errors.check_status(status, [200], bucket, resp_headers=headers, body=content)\n    root = ET.fromstring(content)\n    if ((root.tag == xml_response_tag) and root.text):\n        return root.text\n    return None", "docstring": "Helper method to request a bucket parameter and parse the response.\n\nArgs:\nbucket: A Google Cloud Storage bucket of form '/bucket'.\nquery_param: The query parameter to include in the get bucket request.\nxml_response_tag: The expected tag in the xml response.\nretry_params: An api_utils.RetryParams for this call to GCS. If None,\nthe default one is used.\n_account_id: Internal-use only.\n\nReturns:\nThe xml value as a string.  None if the returned xml does not match expected\nformat.\n\nRaises:\nerrors.AuthorizationError: if authorization failed.\nerrors.NotFoundError: if the bucket does not exist.", "source": "codesearchnet"}
{"code": "def create_sequence_pretty_tensor(sequence_input, shape=None, save_state=True):\n  \n  inputs = prettytensor.wrap_sequence(sequence_input.inputs, tensor_shape=shape)\n  targets = prettytensor.wrap_sequence(sequence_input.targets)\n  if save_state:\n    bookkeeper.set_recurrent_state_saver(sequence_input)\n  return inputs, targets", "docstring": "Creates a PrettyTensor object for the given sequence.\n\nThe first dimension is treated as a time-dimension * batch and a default is\nset for `unroll` and `state_saver`.\n\nTODO(eiderman): Remove shape.\n\nArgs:\nsequence_input: A SequenceInput or StateSavingSequenceInput\nshape: The shape of each item in the sequence (including batch).\nsave_state: If true, use the sequence_input's state and save_state methods.\nReturns:\n2 Layers: inputs, targets", "source": "juraj-google-style"}
{"code": "def pow(self, other, axis=\"columns\", level=None, fill_value=None):\n        \n        return self._binary_op(\n            \"pow\", other, axis=axis, level=level, fill_value=fill_value\n        )", "docstring": "Pow this DataFrame against another DataFrame/Series/scalar.\n\nArgs:\nother: The object to use to apply the pow against this.\naxis: The axis to pow over.\nlevel: The Multilevel index level to apply pow over.\nfill_value: The value to fill NaNs with.\n\nReturns:\nA new DataFrame with the Pow applied.", "source": "juraj-google-style"}
{"code": "def read(self, key):\n    key = quote(key, safe='~')\n    url = '/internal/playbooks/keyValue/{}'.format(key)\n    r = self.tcex.session.get(url)\n    data = r.content\n    if ((data is not None) and (not isinstance(data, str))):\n        data = str(r.content, 'utf-8')\n    return data", "docstring": "Read data from remote KV store for the provided key.\n\nArgs:\nkey (string): The key to read in remote KV store.\n\nReturns:\n(any): The response data from the remote KV store.", "source": "codesearchnet"}
{"code": "def __init__(self, prefs, g, kappa=2.0, omega=0.5, beta=1.0, mu=1.0,\n            freeparams=['kappa', 'omega', 'beta', 'mu']):\n        \n\n        _checkParam('g', g, self.PARAMLIMITS, self.PARAMTYPES)\n        assert abs(1 - g.sum()) <= ALMOST_ZERO, \"g doesn't sum to 1\"\n        self.g = g.copy()\n        self.g /= self.g.sum()\n\n        super(ExpCM_empirical_phi, self).__init__(prefs, kappa=kappa,\n                omega=omega, beta=beta, mu=mu, freeparams=freeparams)", "docstring": "Initialize an `ExpCM_empirical_phi` object.\n\nArgs:\n`prefs`, `kappa`, `omega`, `beta`, `mu`, `freeparams`\nSame meaning as for an `ExpCM`\n`g`\nHas the meaning described in the main class doc string.", "source": "juraj-google-style"}
{"code": "def get_section_header(self, section):\n    self._ensure_section_headers_loaded()\n    if (type(section) is int):\n        return self._section_headers_by_index[section]\n    else:\n        return self._section_headers_by_name[section]", "docstring": "Get a specific section header by index or name.\n\nArgs:\nsection(int or str): The index or name of the section header to return.\n\nReturns:\n:class:`~ELF.SectionHeader`: The section header.\n\nRaises:\nKeyError: The requested section header does not exist.", "source": "codesearchnet"}
{"code": "def export_warnings(self, export_file):\n    warn_filepath = op.dirname(export_file)\n    warn_filename = op.splitext(op.basename(export_file))[0]\n    self._add_entry(templates.EXPORT_WARNINGS.format(warnings_export_path=warn_filepath, warnings_export_file=warn_filename))", "docstring": "Append an export warnings entry to the journal.\n\nThis instructs Revit to export warnings from the opened model.\nCurrently Revit will stop journal execution if the model does not\nhave any warnings and the export warnings UI button is disabled.\n\nArgs:\nexport_file (str): full path of the ouput html file", "source": "codesearchnet"}
{"code": "def rank_dated_files(pattern, dir, descending=True):\n    files = glob.glob(op.join(dir, pattern))\n    return sorted(files, reverse=descending)", "docstring": "Search a directory for files that match a pattern. Return an ordered list of these files by filename.\n\nArgs:\npattern: The glob pattern to search for.\ndir: Path to directory where the files will be searched for.\ndescending: Default True, will sort alphabetically by descending order.\n\nReturns:\nlist: Rank-ordered list by filename.", "source": "codesearchnet"}
{"code": "def nextindx(self):\n    indx = 0\n    with s_lmdbslab.Scan(self.slab, self.db) as curs:\n        last_key = curs.last_key()\n        if (last_key is not None):\n            indx = (s_common.int64un(last_key) + 1)\n    return indx", "docstring": "Determine the next insert offset according to storage.\n\nReturns:\nint: The next insert offset.", "source": "codesearchnet"}
{"code": "def _preprocess_numpy_input(x, data_format, mode):\n    if not issubclass(x.dtype.type, np.floating):\n        x = x.astype(backend.floatx(), copy=False)\n    if mode == 'tf':\n        x /= 127.5\n        x -= 1.0\n        return x\n    elif mode == 'torch':\n        x /= 255.0\n        mean = [0.485, 0.456, 0.406]\n        std = [0.229, 0.224, 0.225]\n    else:\n        if data_format == 'channels_first':\n            if len(x.shape) == 3:\n                x = x[::-1, ...]\n            else:\n                x = x[:, ::-1, ...]\n        else:\n            x = x[..., ::-1]\n        mean = [103.939, 116.779, 123.68]\n        std = None\n    if data_format == 'channels_first':\n        if len(x.shape) == 3:\n            x[0, :, :] -= mean[0]\n            x[1, :, :] -= mean[1]\n            x[2, :, :] -= mean[2]\n            if std is not None:\n                x[0, :, :] /= std[0]\n                x[1, :, :] /= std[1]\n                x[2, :, :] /= std[2]\n        else:\n            x[:, 0, :, :] -= mean[0]\n            x[:, 1, :, :] -= mean[1]\n            x[:, 2, :, :] -= mean[2]\n            if std is not None:\n                x[:, 0, :, :] /= std[0]\n                x[:, 1, :, :] /= std[1]\n                x[:, 2, :, :] /= std[2]\n    else:\n        x[..., 0] -= mean[0]\n        x[..., 1] -= mean[1]\n        x[..., 2] -= mean[2]\n        if std is not None:\n            x[..., 0] /= std[0]\n            x[..., 1] /= std[1]\n            x[..., 2] /= std[2]\n    return x", "docstring": "Preprocesses a NumPy array encoding a batch of images.\n\nArgs:\nx: Input array, 3D or 4D.\ndata_format: Data format of the image array.\nmode: One of \"caffe\", \"tf\" or \"torch\".\n- caffe: will convert the images from RGB to BGR,\nthen will zero-center each color channel with\nrespect to the ImageNet dataset,\nwithout scaling.\n- tf: will scale pixels between -1 and 1,\nsample-wise.\n- torch: will scale pixels between 0 and 1 and then\nwill normalize each channel with respect to the\nImageNet dataset.\n\nReturns:\nPreprocessed Numpy array.", "source": "github-repos"}
{"code": "async def post(self):\n    logging.debug('\\n\\n[+] -- Account debugging. ')\n    if settings.SIGNATURE_VERIFICATION:\n        super().verify()\n    try:\n        data = json.loads(self.request.body)\n    except:\n        self.set_status(400)\n        self.write({'error': 400, 'reason': 'Unexpected data format. JSON required'})\n        raise tornado.web.Finish\n    message = data['message']\n    new_account = (await self.account.createaccount(**data))\n    logging.debug('\\n\\n [+] -- New account debugging.')\n    logging.debug(new_account['id'])\n    if ('error' in new_account.keys()):\n        self.set_status(new_account['error'])\n        self.write(new_account)\n        raise tornado.web.Finish\n    wallets = (await self.account.balance.get_wallets(uid=new_account['id']))\n    if isinstance(wallets, dict):\n        if ('error' in wallets.keys()):\n            self.set_status(wallets['error'])\n            self.write(wallets)\n            raise tornado.web.Finish\n    new_account.update({'href': ((settings.ENDPOINTS['ams'] + '/') + new_account['public_key']), 'wallets': json.dumps(wallets['wallets'])})\n    if new_account.get('email'):\n        email_data = {'to': new_account['email'], 'subject': 'Robin8 Support', 'optional': (('Your account was created on %s' % settings.domain) + new_account['href'])}\n        (await self.account.mailer.sendmail(**email_data))\n    self.write(new_account)", "docstring": "Creates new account\n\nAccepts:\n- message (signed dict):\n- \"device_id\" - str\n- \"email\" - str\n- \"phone\" - str\n- \"public_key\" - str\n- \"signature\" - str\n\nReturns:\ndictionary with following fields:\n- \"device_id\" - str\n- \"phone\" - str\n- \"public_key\" - str\n- \"count\" - int  ( wallets amount )\n- \"level\" - int (2 by default)\n- \"news_count\" - int (0 by default)\n- \"email\" - str\n- \"href\" - str\n- \"wallets\" - list\n\nVerified: True", "source": "codesearchnet"}
{"code": "def set_membership(self, room_id, user_id, membership, reason=\"\", profile=None,\n                       timestamp=None):\n        \n        if profile is None:\n            profile = {}\n        body = {\n            \"membership\": membership,\n            \"reason\": reason\n        }\n        if 'displayname' in profile:\n            body[\"displayname\"] = profile[\"displayname\"]\n        if 'avatar_url' in profile:\n            body[\"avatar_url\"] = profile[\"avatar_url\"]\n\n        return self.send_state_event(room_id, \"m.room.member\", body, state_key=user_id,\n                                     timestamp=timestamp)", "docstring": "Perform PUT /rooms/$room_id/state/m.room.member/$user_id\n\nArgs:\nroom_id (str): The room ID\nuser_id (str): The user ID\nmembership (str): New membership value\nreason (str): The reason\ntimestamp (int): Set origin_server_ts (For application services only)", "source": "juraj-google-style"}
{"code": "def __sid_to_username(sid):\n        \n        if sid is None or sid == '':\n            return ''\n        try:\n            sid_bin = win32security.GetBinarySid(sid)  \n        except pywintypes.error as exc:  \n            raise ValueError(\n                    'pkg: Software owned by {0} is not valid: [{1}] {2}'.format(sid, exc.winerror, exc.strerror)\n                )\n        try:\n            name, domain, _account_type = win32security.LookupAccountSid(None, sid_bin)  \n            user_name = '{0}\\\\{1}'.format(domain, name)\n        except pywintypes.error as exc:  \n            \n            \n            \n            if exc.winerror == winerror.ERROR_NONE_MAPPED:  \n                \n                \n                return sid\n            else:\n                raise ValueError(\n                          'Failed looking up sid \\'{0}\\' username: [{1}] {2}'.format(sid, exc.winerror, exc.strerror)\n                        )\n        try:\n            user_principal = win32security.TranslateName(  \n                            user_name,\n                            win32api.NameSamCompatible,  \n                            win32api.NameUserPrincipal)  \n        except pywintypes.error as exc:  \n            \n            \n            \n            \n            \n            if exc.winerror in (winerror.ERROR_NO_SUCH_DOMAIN,\n                                winerror.ERROR_INVALID_DOMAINNAME,\n                                winerror.ERROR_NONE_MAPPED):\n                return '{0}@{1}'.format(name.lower(), domain.lower())\n            else:\n                raise\n        return user_principal", "docstring": "Provided with a valid Windows Security Identifier (SID) and returns a Username\n\nArgs:\nsid (str): Security Identifier (SID).\n\nReturns:\nstr: Username in the format of username@realm or username@computer.", "source": "juraj-google-style"}
{"code": "def set_hasher(self, hash, rounds=None):\n    hash = hash.replace('-', '_')\n    if (hash not in VALID_HASHERS):\n        raise WrongHashAlgorithm(WRONG_HASH_MESSAGE)\n    hasher = getattr(ph, hash)\n    utils.test_hasher(hasher)\n    default_rounds = getattr(hasher, 'default_rounds', 1)\n    min_rounds = getattr(hasher, 'min_rounds', 1)\n    max_rounds = getattr(hasher, 'max_rounds', float('inf'))\n    rounds = min(max((rounds or default_rounds), min_rounds), max_rounds)\n    op = {'schemes': (VALID_HASHERS + DEPRECATED_HASHERS), 'deprecated': DEPRECATED_HASHERS, 'default': hash, (hash + '__default_rounds'): rounds}\n    self.hasher = CryptContext(**op)\n    self.hash = hash.replace('_', '-')\n    self.rounds = rounds", "docstring": "Updates the has algorithm and, optionally, the number of rounds\nto use.\n\nRaises:\n`~WrongHashAlgorithm` if new algorithm isn't one of the three\nrecomended options.", "source": "codesearchnet"}
{"code": "def _open_script_interface(self, connection_id, callback):\n        \n\n        try:\n            context = self.connections.get_context(connection_id)\n        except ArgumentError:\n            callback(connection_id, self.id, False, \"Could not find connection information\")\n            return\n\n        success = HighSpeedChar in context['services'][TileBusService]\n        reason = None\n        if not success:\n            reason = 'Could not find high speed streaming characteristic'\n\n        callback(connection_id, self.id, success, reason)", "docstring": "Enable script streaming interface for this IOTile device\n\nArgs:\nconnection_id (int): The unique identifier for the connection\ncallback (callback): Callback to be called when this command finishes\ncallback(conn_id, adapter_id, success, failure_reason)", "source": "juraj-google-style"}
{"code": "def _get_values(self, data_blob, dtype_enum, shape_string):\n    \n    buf = np.frombuffer(data_blob, dtype=tf.DType(dtype_enum).as_numpy_dtype)\n    return buf.reshape([int(i) for i in shape_string.split(',')]).tolist()", "docstring": "Obtains values for histogram data given blob and dtype enum.\nArgs:\ndata_blob: The blob obtained from the database.\ndtype_enum: The enum representing the dtype.\nshape_string: A comma-separated string of numbers denoting shape.\nReturns:\nThe histogram values as a list served to the frontend.", "source": "juraj-google-style"}
{"code": "def send_log_messages(self, messages: List[LogMessage]) -> None:\n    pass", "docstring": "Sends multiple log messages to be handled.\n\nArgs:\n* messages: list of LogMessage dictionaries\n\nReturns:\n* None", "source": "github-repos"}
{"code": "def resolve_backend_name(name, backends, deprecated, aliased):\n    available = [backend.name() for backend in backends]\n    resolved_name = deprecated.get(name, aliased.get(name, name))\n    if isinstance(resolved_name, list):\n        resolved_name = next((b for b in resolved_name if (b in available)), '')\n    if (resolved_name not in available):\n        raise LookupError(\"backend '{}' not found.\".format(name))\n    if (name in deprecated):\n        logger.warning(\"WARNING: '%s' is deprecated. Use '%s'.\", name, resolved_name)\n    return resolved_name", "docstring": "Resolve backend name from a deprecated name or an alias.\n\nA group will be resolved in order of member priorities, depending on\navailability.\n\nArgs:\nname (str): name of backend to resolve\nbackends (list[BaseBackend]): list of available backends.\ndeprecated (dict[str: str]): dict of deprecated names.\naliased (dict[str: list[str]]): dict of aliased names.\n\nReturns:\nstr: resolved name (name of an available backend)\n\nRaises:\nLookupError: if name cannot be resolved through regular available\nnames, nor deprecated, nor alias names.", "source": "codesearchnet"}
{"code": "def _reconstruct_sequence_inputs(op_def, inputs, attrs) -> list[Union[tensor_lib.Tensor, list[tensor_lib.Tensor]]]:\n    grouped_inputs = []\n    i = 0\n    for input_arg in op_def.input_arg:\n        if input_arg.number_attr:\n            input_len = attrs[input_arg.number_attr].i\n            is_sequence = True\n        elif input_arg.type_list_attr:\n            input_len = len(attrs[input_arg.type_list_attr].list.type)\n            is_sequence = True\n        else:\n            input_len = 1\n            is_sequence = False\n        if is_sequence:\n            grouped_inputs.append(inputs[i:i + input_len])\n        else:\n            grouped_inputs.append(inputs[i])\n        i += input_len\n    assert i == len(inputs)\n    return grouped_inputs", "docstring": "Regroups a flat list of input tensors into scalar and sequence inputs.\n\nArgs:\nop_def: The `op_def_pb2.OpDef` (for knowing the input types)\ninputs: a list of input `Tensor`s to the op.\nattrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define\nhow long each sequence is)\n\nReturns:\nA list of `Tensor`s (corresponding to scalar inputs) and lists of\n`Tensor`s (corresponding to sequence inputs).", "source": "github-repos"}
{"code": "def get_create_agent(agent_kwargs):\n\n    def create_agent(sess, environment, summary_writer=None):\n        'Creates a DQN agent.\\n\\n    Simplified version of `dopamine.discrete_domains.train.create_agent`\\n\\n    Args:\\n      sess: a session\\n      environment: an environment\\n      summary_writer: a summary writer.\\n\\n    Returns:\\n      a DQN agent.\\n    '\n        return BatchDQNAgent(env_batch_size=environment.batch_size, sess=sess, num_actions=environment.action_space.n, summary_writer=summary_writer, tf_device='/gpu:*', **agent_kwargs)\n    return create_agent", "docstring": "Factory for dopamine agent initialization.\n\nArgs:\nagent_kwargs: dict of BatchDQNAgent parameters\n\nReturns:\nFunction(sess, environment, summary_writer) -> BatchDQNAgent instance.", "source": "codesearchnet"}
{"code": "def with_attributes(name, checkpointable_objects=None, functions=None, copy_from=None):\n    checkpointable_objects = checkpointable_objects or []\n    functions = functions or []\n    if copy_from is not None:\n        for cls in copy_from:\n            checkpointable_objects.extend(cls.all_checkpointable_objects)\n            functions.extend(cls.all_functions)\n    classdict = {'all_checkpointable_objects': set(checkpointable_objects), 'all_functions': set(functions)}\n    return type(name, (SerializedAttributes,), classdict)", "docstring": "Creates a subclass with all attributes as specified in the arguments.\n\nArgs:\nname: Name of subclass\ncheckpointable_objects: List of checkpointable objects to be serialized\nin the SavedModel.\nfunctions: List of functions to be serialized in the SavedModel.\ncopy_from: List of other SerializedAttributes subclasses. The returned\nclass will copy checkpoint objects/functions from each subclass.\n\nReturns:\nChild class with attributes as defined in the `checkpointable_objects`\nand `functions` lists.", "source": "github-repos"}
{"code": "def _ParseRecordLogline(self, parser_mediator, structure):\n    \n    date_time = dfdatetime_time_elements.TimeElementsInMilliseconds()\n\n    try:\n      datetime_iso8601 = self._GetISO8601String(structure.date_time)\n      date_time.CopyFromStringISO8601(datetime_iso8601)\n    except ValueError:\n      parser_mediator.ProduceExtractionWarning(\n          'invalid date time value: {0!s}'.format(structure.date_time))\n      return\n\n    event_data = GoogleDriveSyncLogEventData()\n    event_data.log_level = structure.log_level\n    event_data.pid = structure.pid\n    event_data.thread = structure.thread\n    event_data.source_code = structure.source_code\n    \n    event_data.message = structure.message.replace('\\n', ' ')\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_ADDED)\n\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a logline record structure and produces events.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.", "source": "juraj-google-style"}
{"code": "def _shard_num_args(self, constant_dict: Dict[(str, Any)]=None) -> List[Dict[(str, Any)]]:\n    args = []\n    for shard_num in range(self._num_shards):\n        append_dict = (dict(constant_dict) if constant_dict else {})\n        append_dict['shard_num'] = shard_num\n        append_dict['num_shards'] = self._num_shards\n        append_dict['num_shard_qubits'] = self._num_shard_qubits\n        append_dict.update(self._shared_mem_dict)\n        args.append(append_dict)\n    return args", "docstring": "Helper that returns a list of dicts including a num_shard entry.\n\nThe dict for each entry also includes shared_mem_dict, the number of\nshards, the number of shard qubits, and the supplied constant dict.\n\nArgs:\nconstant_dict: Dictionary that will be updated to every element of\nthe returned list of dictionaries.\n\nReturns:\nA list of dictionaries. Each dictionary is constant except for the\n'shard_num' key which ranges from 0 to number of shards - 1.\nIncluded keys are 'num_shards' and 'num_shard_qubits' along with\nall the keys in constant_dict.", "source": "codesearchnet"}
{"code": "def _async_open(self, session_id, proto_version):\n    try:\n        (yield self.application_context.create_session_if_needed(session_id, self.request))\n        session = self.application_context.get_session(session_id)\n        protocol = Protocol(proto_version)\n        self.receiver = Receiver(protocol)\n        log.debug('Receiver created for %r', protocol)\n        self.handler = ProtocolHandler()\n        log.debug('ProtocolHandler created for %r', protocol)\n        self.connection = self.application.new_connection(protocol, self, self.application_context, session)\n        log.info('ServerConnection created')\n    except ProtocolError as e:\n        log.error('Could not create new server session, reason: %s', e)\n        self.close()\n        raise e\n    msg = self.connection.protocol.create('ACK')\n    (yield self.send_message(msg))\n    raise gen.Return(None)", "docstring": "Perform the specific steps needed to open a connection to a Bokeh session\n\nSpecifically, this method coordinates:\n\n* Getting a session for a session ID (creating a new one if needed)\n* Creating a protocol receiver and hander\n* Opening a new ServerConnection and sending it an ACK\n\nArgs:\nsession_id (str) :\nA session ID to for a session to connect to\n\nIf no session exists with the given ID, a new session is made\n\nproto_version (str):\nThe protocol version requested by the connecting client.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def _concatenate_inner(self, direction):\n    tmp_bucket = []\n    source_chunks = (self if direction else self[::(- 1)])\n    target_chunks = ChunkList()\n    for chunk in source_chunks:\n        if ((chunk.dependency == direction) or ((direction is False) and chunk.is_space())):\n            tmp_bucket.append(chunk)\n            continue\n        tmp_bucket.append(chunk)\n        if (not direction):\n            tmp_bucket = tmp_bucket[::(- 1)]\n        new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket])\n        new_chunk = Chunk(new_word, pos=chunk.pos, label=chunk.label, dependency=chunk.dependency)\n        target_chunks.append(new_chunk)\n        tmp_bucket = ChunkList()\n    if tmp_bucket:\n        target_chunks += tmp_bucket\n    if (not direction):\n        target_chunks = target_chunks[::(- 1)]\n    self.list = target_chunks", "docstring": "Concatenates chunks based on each chunk's dependency.\n\nArgs:\ndirection (bool): Direction of concatenation process. True for forward.", "source": "codesearchnet"}
{"code": "def get_strategy() -> 'StrategyBase':\n    return _get_per_thread_mode().strategy", "docstring": "Returns the current `tf.distribute.Strategy` object.\n\nTypically only used in a cross-replica context:\n\n```\nif tf.distribute.in_cross_replica_context():\nstrategy = tf.distribute.get_strategy()\n...\n```\n\nReturns:\nA `tf.distribute.Strategy` object. Inside a `with strategy.scope()` block,\nit returns `strategy`, otherwise it returns the default (single-replica)\n`tf.distribute.Strategy` object.", "source": "github-repos"}
{"code": "def _transpile_circuit(circuit_config_tuple):\n    (circuit, transpile_config) = circuit_config_tuple\n    if transpile_config.pass_manager:\n        pass_manager = transpile_config.pass_manager\n    elif transpile_config.coupling_map:\n        pass_manager = default_pass_manager(transpile_config.basis_gates, transpile_config.coupling_map, transpile_config.initial_layout, transpile_config.seed_transpiler)\n    else:\n        pass_manager = default_pass_manager_simulator(transpile_config.basis_gates)\n    return pass_manager.run(circuit)", "docstring": "Select a PassManager and run a single circuit through it.\n\nArgs:\ncircuit_config_tuple (tuple):\ncircuit (QuantumCircuit): circuit to transpile\ntranspile_config (TranspileConfig): configuration dictating how to transpile\n\nReturns:\nQuantumCircuit: transpiled circuit", "source": "codesearchnet"}
{"code": "def _avro_rows(block, avro_schema):\n    blockio = six.BytesIO(block.avro_rows.serialized_binary_rows)\n    while True:\n        try:\n            (yield fastavro.schemaless_reader(blockio, avro_schema))\n        except StopIteration:\n            break", "docstring": "Parse all rows in a stream block.\n\nArgs:\nblock ( \\\n~google.cloud.bigquery_storage_v1beta1.types.ReadRowsResponse \\\n):\nA block containing Avro bytes to parse into rows.\navro_schema (fastavro.schema):\nA parsed Avro schema, used to deserialized the bytes in the\nblock.\n\nReturns:\nIterable[Mapping]:\nA sequence of rows, represented as dictionaries.", "source": "codesearchnet"}
{"code": "def can_fetch(self, request: Request, file=None) -> bool:\n    try:\n        return self.can_fetch_pool(request)\n    except NotInPoolError:\n        pass\n    (yield from self.fetch_robots_txt(request, file=file))\n    return self.can_fetch_pool(request)", "docstring": "Return whether the request can fetched.\n\nArgs:\nrequest: Request.\nfile: A file object to where the robots.txt contents are written.\n\nCoroutine.", "source": "codesearchnet"}
{"code": "def append(\n            self,\n            moment_or_operation_tree: Union[ops.Moment, ops.OP_TREE],\n            strategy: InsertStrategy = InsertStrategy.EARLIEST):\n        \n        self.insert(len(self._moments), moment_or_operation_tree, strategy)", "docstring": "Appends operations onto the end of the circuit.\n\nMoments within the operation tree are appended intact.\n\nArgs:\nmoment_or_operation_tree: The moment or operation tree to append.\nstrategy: How to pick/create the moment to put operations into.", "source": "juraj-google-style"}
{"code": "def flatten(sequence):\n    flat_sequence = nest.flatten(sequence, expand_composites=True)\n    return [item.flow if isinstance(item, tensor_array_ops.TensorArray) else item for item in flat_sequence]", "docstring": "Like nest.flatten w/ expand_composites, but returns flow for TensorArrays.\n\nArgs:\nsequence: A nested structure of Tensors, CompositeTensors, and TensorArrays.\n\nReturns:\nA list of tensors.", "source": "github-repos"}
{"code": "def AddWatchOnly(self, script_hash):\n        \n        if script_hash in self._contracts:\n            logger.error(\"Address already in contracts\")\n            return\n\n        self._watch_only.append(script_hash)", "docstring": "Add a watch only address to the wallet.\n\nArgs:\nscript_hash (UInt160): a bytearray (len 20) representing the public key.\n\nNote:\nPrints a warning to the console if the address already exists in the wallet.", "source": "juraj-google-style"}
{"code": "def check_annotation_type_mismatch(self, node, name, typ, value, stack, allow_none, details=None):\n    if not typ or not value:\n        return\n    if value.data == [self.convert.ellipsis] or (allow_none and value.data == [self.convert.none]):\n        return\n    contained_type = abstract_utils.match_type_container(typ, ('typing.ClassVar', 'dataclasses.InitVar'))\n    if contained_type:\n        typ = contained_type\n    bad = self.matcher(node).compute_one_match(value, typ).bad_matches\n    for match in bad:\n        self.errorlog.annotation_type_mismatch(stack, match.expected.typ, match.actual_binding, name, match.error_details, details)", "docstring": "Checks for a mismatch between a variable's annotation and value.\n\nArgs:\nnode: node\nname: variable name\ntyp: variable annotation\nvalue: variable value\nstack: a frame stack for error reporting\nallow_none: whether a value of None is allowed for any type\ndetails: any additional details to add to the error message", "source": "github-repos"}
{"code": "def _case_create_default_action(predicates, actions):\n    k = len(predicates) - 1\n    predicate, action = (predicates[k], actions[k])\n    other_predicates, other_actions = (predicates[:k], actions[:k])\n\n    def default_action():\n        others_msg = 'Implementation error: selected default action \n        default_msg = ('Input error: None of conditions evaluated as True:', array_ops_stack.stack(predicates, name='preds_c'))\n        with ops.control_dependencies([_assert_at_most_n_true(other_predicates, n=0, msg=others_msg), control_flow_assert.Assert(predicate, data=default_msg)]):\n            return action()\n    return (default_action, other_predicates, other_actions)", "docstring": "Creates default action for a list of actions and their predicates.\n\nIt uses the input actions to select an arbitrary as default and makes sure\nthat corresponding predicates have valid values.\n\nArgs:\npredicates: a list of bool scalar tensors\nactions: a list of callable objects which return tensors.\n\nReturns:\na callable", "source": "github-repos"}
{"code": "def RemoveConnectedPeer(self, peer):\n    if (peer in self.Peers):\n        self.Peers.remove(peer)", "docstring": "Remove a connected peer from the known peers list.\n\nArgs:\npeer (NeoNode): instance.", "source": "codesearchnet"}
{"code": "def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.FloatTensor:\n    for processor in self:\n        function_args = inspect.signature(processor.__call__).parameters\n        if len(function_args) > 2:\n            if not all((arg in kwargs for arg in list(function_args.keys())[2:])):\n                raise ValueError(f'Make sure that all the required parameters: {list(function_args.keys())} for {processor.__class__} are passed to the logits processor.')\n            scores = processor(input_ids, scores, **kwargs)\n        else:\n            scores = processor(input_ids, scores)\n    return scores", "docstring": "Args:\ninput_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\nIndices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)\nscores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\nPrediction scores of a language modeling head. These can be logits for each vocabulary when not using\nbeam search or log softmax for each vocabulary token when using beam search\nkwargs (`Dict[str, Any]`, *optional*):\nAdditional kwargs that are specific to a logits processor.\n\nReturn:\n`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`:\nThe processed prediction scores.", "source": "github-repos"}
{"code": "def parquet_to_df(filename, use_threads=1):\n    \n    try:\n        return pq.read_table(filename, use_threads=use_threads).to_pandas()\n    except pa.lib.ArrowIOError:\n        print('Could not read parquet file {:s}'.format(filename))\n        return None", "docstring": "parquet_to_df: Reads a Parquet file into a Pandas DataFrame\nArgs:\nfilename (string): The full path to the filename for the Parquet file\nntreads (int): The number of threads to use (defaults to 1)", "source": "juraj-google-style"}
{"code": "def resumeProducing(self):\n    self._running = True\n    for consumer in self._consumers.values():\n        (queue_object, _) = (yield consumer.channel.basic_consume(queue=consumer.queue, consumer_tag=consumer.tag))\n        deferred = self._read(queue_object, consumer)\n        deferred.addErrback((lambda f: _legacy_twisted_log.msg), '_read failed on consumer {c}', c=consumer, logLevel=logging.ERROR)\n    _legacy_twisted_log.msg('AMQP connection successfully established')", "docstring": "Starts or resumes the retrieval of messages from the server queue.\n\nThis method starts receiving messages from the server, they will be\npassed to the consumer callback.\n\n.. note:: This is called automatically when :meth:`.consume` is called,\nso users should not need to call this unless :meth:`.pauseProducing`\nhas been called.\n\nReturns:\ndefer.Deferred: fired when the production is ready to start", "source": "codesearchnet"}
{"code": "def create_pipeline_box(self, pipeline_key, name, **kwargs):\n\t\t\n\t\t\n\t\tif not (pipeline_key and name):\n\t\t\treturn requests.codes.bad_request, None\n\n\t\turi = '/'.join([\n\t\t\t\t\t\tself.api_uri,\n\t\t\t\t\t\tself.pipelines_suffix,\n\t\t\t\t\t\tpipeline_key,\n\t\t\t\t\t\tself.boxes_suffix\n\t\t\t\t\t\t]) \n\n\t\tkwargs.update({'name':name})\n\n\t\tnew_box = StreakBox(**kwargs)\n\t\t\n\t\tcode, data = self._req('put', uri, new_box.to_dict(rw = True))\n\t\t\n\t\treturn code, data", "docstring": "Creates a box int the pipeline specified with the provided attributes.\nArgs:\nname\trequired name string\nkwargs\t{...} see StreakBox object for details\nreturn\t(status code, box dict)", "source": "juraj-google-style"}
{"code": "def display_arr(screen, arr, video_size, transpose):\n    if transpose:\n        pyg_img = pygame.surfarray.make_surface(arr.swapaxes(0, 1))\n    else:\n        pyg_img = arr\n    pyg_img = pygame.transform.scale(pyg_img, video_size)\n    screen.blit(pyg_img, (0, 0))", "docstring": "Display an image to the pygame screen.\n\nArgs:\nscreen (pygame.Surface): the pygame surface to write frames to\narr (np.ndarray): numpy array representing a single frame of gameplay\nvideo_size (tuple): the size to render the frame as\ntranspose (bool): whether to transpose the frame before displaying\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "async def export_image(self, name: str):\n        \n        response = await self.docker._query(\n            \"images/{name}/get\".format(name=name), \"GET\"\n        )\n        return response.content", "docstring": "Get a tarball of an image by name or id.\n\nArgs:\nname: name/id of the image to be exported\n\nReturns:\nStreamreader of tarball image", "source": "juraj-google-style"}
{"code": "def execute_no_wait(self, cmd, walltime=2, envs={}):\n    (stdin, stdout, stderr) = self.ssh_client.exec_command(self.prepend_envs(cmd, envs), bufsize=(- 1), timeout=walltime)\n    return (None, stdout, stderr)", "docstring": "Execute asynchronousely without waiting for exitcode\n\nArgs:\n- cmd (string): Commandline string to be executed on the remote side\n- walltime (int): timeout to exec_command\n\nKWargs:\n- envs (dict): A dictionary of env variables\n\nReturns:\n- None, stdout (readable stream), stderr (readable stream)\n\nRaises:\n- ChannelExecFailed (reason)", "source": "codesearchnet"}
{"code": "def run(self, *args, backend=None, **kwargs):\n    if (backend is None):\n        if (self._default_backend is None):\n            backend = self.__get_backend(DEFAULT_BACKEND_NAME)\n        else:\n            backend = self.__get_backend(self._default_backend)\n    elif isinstance(backend, str):\n        backend = self.__get_backend(backend)\n    return backend.run(self.ops, self.n_qubits, *args, **kwargs)", "docstring": "Run the circuit.\n\n`Circuit` have several backends. When `backend` parameter is specified,\nuse specified backend, and otherwise, default backend is used.\nOther parameters are passed to the backend.\n\nThe meaning of parameters are depends on the backend specifications.\nHowever, following parameters are commonly used.\n\nCommonly used args (Depends on backend):\nshots (int, optional): The number of sampling the circuit.\nreturns (str, optional):  The category of returns value.\ne.g. \"statevector\" returns the state vector after run the circuit.\n\"shots\" returns the counter of measured value.\ntoken, url (str, optional): The token and URL for cloud resource.\n\nReturns:\nDepends on backend.\n\nRaises:\nDepends on backend.", "source": "codesearchnet"}
{"code": "def split(self, grouper):\n        \n        data = self.to_df(condition=True, entities=True)\n        data = data.drop('condition', axis=1)\n\n        subsets = []\n        for i, (name, g) in enumerate(data.groupby(grouper)):\n            name = '%s.%s' % (self.name, name)\n            col = self.__class__(name=name, data=g, source=self.source,\n                                 run_info=getattr(self, 'run_info', None))\n            subsets.append(col)\n        return subsets", "docstring": "Split the current SparseRunVariable into multiple columns.\n\nArgs:\ngrouper (iterable): list to groupby, where each unique value will\nbe taken as the name of the resulting column.\n\nReturns:\nA list of SparseRunVariables, one per unique value in the\ngrouper.", "source": "juraj-google-style"}
{"code": "def shapes_match(a, b):\n    if (isinstance(a, (tuple, list)) and isinstance(b, (tuple, list))):\n        if (len(a) != len(b)):\n            return False\n        return all([shapes_match(ia, ib) for (ia, ib) in zip(a, b)])\n    elif (isinstance(a, dict) and isinstance(b, dict)):\n        if (len(a) != len(b)):\n            return False\n        match = True\n        for ((ak, av), (bk, bv)) in zip(a.items(), b.items()):\n            match = (match and all([((ak == bk) and shapes_match(av, bv))]))\n        return match\n    else:\n        shape_checker = shape_checkers[(type(a), type(b))]\n        return shape_checker(a, b)", "docstring": "Recursively check if shapes of object `a` and `b` match.\n\nWill walk lists, tuples and dicts.\n\nArgs:\na: object of type (numpy.ndarray,tf.Tensor,list,tuple,dict)\nto check for matching shapes against `b`.\nb: object to check for matching shape against `a`.\n\nReturns:\nA boolean indicating whether the shapes of `a` and `b` match.", "source": "codesearchnet"}
{"code": "def get_connected_client(self):\n    if (self.__sem is not None):\n        (yield self.__sem.acquire())\n    client = None\n    (newly_created, client) = self._get_client_from_pool_or_make_it()\n    if newly_created:\n        res = (yield client.connect())\n        if (not res):\n            LOG.warning(\"can't connect to %s\", client.title)\n            raise tornado.gen.Return(ClientError((\"can't connect to %s\" % client.title)))\n    raise tornado.gen.Return(client)", "docstring": "Gets a connected Client object.\n\nIf max_size is reached, this method will block until a new client\nobject is available.\n\nReturns:\nA Future object with connected Client instance as a result\n(or ClientError if there was a connection problem)", "source": "codesearchnet"}
{"code": "def get_bucket(self, key, rate=None, capacity=None, **kwargs):\n    return buckets.Bucket(key=key, rate=(rate or self.rate), capacity=(capacity or self.capacity), storate=self.storate, **kwargs)", "docstring": "Fetch a Bucket for the given key.\n\nrate and capacity might be overridden from the Throttler defaults.\n\nArgs:\nrate (float): Units regenerated by second, or None to keep\nThrottler defaults\ncapacity (int): Maximum units available, or None to keep Throttler\ndefaults", "source": "codesearchnet"}
{"code": "def _recurse(self, matrix, m_list, indices, output_m_list=[]):\n    if self._finished:\n        return\n    while (m_list[(- 1)][1] == 0):\n        m_list = copy(m_list)\n        m_list.pop()\n        if (not m_list):\n            matrix_sum = np.sum(matrix)\n            if (matrix_sum < self._current_minimum):\n                self.add_m_list(matrix_sum, output_m_list)\n            return\n    if (m_list[(- 1)][1] > len(indices.intersection(m_list[(- 1)][2]))):\n        return\n    if ((len(m_list) == 1) or (m_list[(- 1)][1] > 1)):\n        if (self.best_case(matrix, m_list, indices) > self._current_minimum):\n            return\n    index = self.get_next_index(matrix, m_list[(- 1)], indices)\n    m_list[(- 1)][2].remove(index)\n    matrix2 = np.copy(matrix)\n    m_list2 = deepcopy(m_list)\n    output_m_list2 = copy(output_m_list)\n    matrix2[(index, :)] *= m_list[(- 1)][0]\n    matrix2[(:, index)] *= m_list[(- 1)][0]\n    output_m_list2.append([index, m_list[(- 1)][3]])\n    indices2 = copy(indices)\n    indices2.remove(index)\n    m_list2[(- 1)][1] -= 1\n    self._recurse(matrix2, m_list2, indices2, output_m_list2)\n    self._recurse(matrix, m_list, indices, output_m_list)", "docstring": "This method recursively finds the minimal permutations using a binary\ntree search strategy.\n\nArgs:\nmatrix: The current matrix (with some permutations already\nperformed).\nm_list: The list of permutations still to be performed\nindices: Set of indices which haven't had a permutation\nperformed on them.", "source": "codesearchnet"}
{"code": "def __init__(self, d: Dict, nlp) -> None:\n        \n\n        self.dependencies = d[\"dependencies\"] if \"dependencies\" in d else []\n        self.description = d[\"description\"] if \"description\" in d else \"\"\n        self.active = tf_transfer(d[\"is_active\"])\n        self.identifier = d[\"identifier\"]\n        self.output_format = d[\"output_format\"]\n        self.polarity = tf_transfer(d[\"polarity\"])\n        self.patterns = []\n        for pattern_idx, a_pattern in enumerate(d[\"pattern\"]):\n            this_pattern = Pattern(a_pattern, nlp)\n            self.patterns.append(this_pattern)", "docstring": "Storing information for each Rule, create list of Pattern for a rule\nArgs:\nd: Dict\nnlp\n\nReturns:", "source": "juraj-google-style"}
{"code": "def select_rows(self, rows):\n        \n        self.values = self.values.iloc[rows]\n        self.index = self.index.iloc[rows, :]\n        for prop in self._property_columns:\n            vals = getattr(self, prop)[rows]\n            setattr(self, prop, vals)", "docstring": "Truncate internal arrays to keep only the specified rows.\n\nArgs:\nrows (array): An integer or boolean array identifying the indices\nof rows to keep.", "source": "juraj-google-style"}
{"code": "def serialize(self, datas):\n        \n        self._metas = OrderedDict({\n            'references': self.get_meta_references(datas),\n        })\n\n        return self.get_enabled_references(datas, self._metas['references'])", "docstring": "Serialize datas to manifest structure with metas and references.\n\nOnly references are returned, metas are assigned to attribute\n``ManifestSerializer._metas``.\n\nArguments:\ndatas (dict): Data where to search for reference declarations. This\nis commonly the fully parsed manifest.\n\nReturns:\ncollections.OrderedDict: Serialized enabled references datas.", "source": "juraj-google-style"}
{"code": "def __mul__(self, right: torch.Tensor) -> Rigid:\n    if not isinstance(right, torch.Tensor):\n        raise TypeError('The other multiplicand must be a Tensor')\n    new_rots = self._rots * right\n    new_trans = self._trans * right[..., None]\n    return Rigid(new_rots, new_trans)", "docstring": "Pointwise left multiplication of the transformation with a tensor. Can be used to e.g. mask the Rigid.\n\nArgs:\nright:\nThe tensor multiplicand\nReturns:\nThe product", "source": "github-repos"}
{"code": "def _maybe_cast_inputs(self, inputs, input_list=None):\n    if not input_list:\n        input_list = nest.flatten(inputs)\n    compute_dtype_object = self._compute_dtype_object\n    should_autocast = self._autocast and compute_dtype_object and compute_dtype_object.is_floating\n    if should_autocast and any(map(self._should_cast_single_input, input_list)):\n        return nest.map_structure(self._cast_single_input, inputs)\n    else:\n        return inputs", "docstring": "Maybe casts the inputs to the compute dtype.\n\nIf self._compute_dtype is floating-point, and self_autocast is True,\nfloating-point inputs are casted to self._compute_dtype.\n\nArgs:\ninputs: Input tensor, or structure of input tensors.\ninput_list: Flat list of input tensors.\n\nReturns:\n`inputs`, but tensors may have been casted to self._compute_dtype", "source": "github-repos"}
{"code": "def _shannon_radii_from_cn(species_list, cn_roman, radius_to_compare=0):\n    shannon_radii = []\n    for s in species_list:\n        try:\n            radius = s.get_shannon_radius(cn_roman)\n            shannon_radii.append({'species': s, 'radius': radius, 'radii_diff': (radius - radius_to_compare)})\n        except KeyError:\n            pass\n    return shannon_radii", "docstring": "Utility func to get Shannon radii for a particular coordination number.\n\nAs the Shannon radii depends on charge state and coordination number,\nspecies without an entry for a particular coordination number will\nbe skipped.\n\nArgs:\nspecies_list (list): A list of Species to get the Shannon radii for.\ncn_roman (str): The coordination number as a roman numeral. See\nSpecie.get_shannon_radius for more details.\nradius_to_compare (float, optional): If set, the data will be returned\nwith a \"radii_diff\" key, containing the difference between the\nshannon radii and this radius.\n\nReturns:\n(list of dict): The Shannon radii for all Species in species. Formatted\nas a list of dictionaries, with the keys:\n\n- \"species\": The species with charge state.\n- \"radius\": The Shannon radius for the species.\n- \"radius_diff\": The difference between the Shannon radius and the\nradius_to_compare optional argument.", "source": "codesearchnet"}
{"code": "def _merge_with(self, other: 'DynamicRaggedShape') -> 'DynamicRaggedShape':\n    max_num_row_partitions = max(self.num_row_partitions, other.num_row_partitions)\n    a = self._with_num_row_partitions(max_num_row_partitions)\n    b = other._with_num_row_partitions(max_num_row_partitions)\n    new_row_partitions = [rp_a._merge_precomputed_encodings(rp_b) for rp_a, rp_b in zip(a._row_partitions, b._row_partitions)]\n    new_dtype = b.dtype if a.dtype == dtypes.int32 else dtypes.int64\n    new_static_inner_shape = a._static_inner_shape.merge_with(b._static_inner_shape)\n    new_inner_shape = a._inner_shape\n    return DynamicRaggedShape(new_row_partitions, new_inner_shape, new_dtype, True, new_static_inner_shape)", "docstring": "Merge two shapes that are equal modulo num_row_partitions.\n\nThe resulting num_row_partitions is the maximum of the two\nnum_row_partitions.\n\nArgs:\nother: a DynamicRaggedShape representing the same shape with a possibly\ndifferent number of row partitions.\n\nReturns:\nA DynamicRaggedShape with the same shape and the maximum of the\nnum_row_partitions of the two shapes.", "source": "github-repos"}
{"code": "def get_interpolated_value(self, x):\n    if (len(self.ydim) == 1):\n        return get_linear_interpolated_value(self.x, self.y, x)\n    else:\n        return [get_linear_interpolated_value(self.x, self.y[(:, k)], x) for k in range(self.ydim[1])]", "docstring": "Returns an interpolated y value for a particular x value.\n\nArgs:\nx: x value to return the y value for\n\nReturns:\nValue of y at x", "source": "codesearchnet"}
{"code": "def _MakeTimestamp(self, start=None, end=None):\n    \n    mysql_unsigned_bigint_max = 18446744073709551615\n    ts_start = int(start or 0)\n    if end is None:\n      ts_end = mysql_unsigned_bigint_max\n    else:\n      ts_end = int(end)\n    if ts_start == 0 and ts_end == mysql_unsigned_bigint_max:\n      return None\n    else:\n      return (ts_start, ts_end)", "docstring": "Create a timestamp using a start and end time.\n\nArgs:\nstart: Start timestamp.\nend: End timestamp.\n\nReturns:\nA tuple (start, end) of converted timestamps or None for all time.", "source": "juraj-google-style"}
{"code": "def run_bidirectional_blast(reference, other_genome, dbtype, outdir=''):\n    if (dbtype == 'nucl'):\n        command = 'blastn'\n    elif (dbtype == 'prot'):\n        command = 'blastp'\n    else:\n        raise ValueError('dbtype must be \"nucl\" or \"prot\"')\n    (r_folder, r_name, r_ext) = utils.split_folder_and_path(reference)\n    (g_folder, g_name, g_ext) = utils.split_folder_and_path(other_genome)\n    run_makeblastdb(infile=reference, dbtype=dbtype, outdir=r_folder)\n    run_makeblastdb(infile=other_genome, dbtype=dbtype, outdir=g_folder)\n    r_vs_g = (((r_name + '_vs_') + g_name) + '_blast.out')\n    r_vs_g = op.join(outdir, r_vs_g)\n    if (op.exists(r_vs_g) and (os.stat(r_vs_g).st_size != 0)):\n        log.debug('{} vs {} BLAST already run'.format(r_name, g_name))\n    else:\n        cmd = '{} -query {} -db {} -outfmt 6 -out {}'.format(command, reference, op.join(g_folder, g_name), r_vs_g)\n        log.debug('Running: {}'.format(cmd))\n        retval = subprocess.call(cmd, shell=True)\n        if (retval == 0):\n            log.debug('BLASTed {} vs {}'.format(g_name, r_name))\n        else:\n            log.error('Error running {}, exit code {}'.format(command, retval))\n    g_vs_r = (((g_name + '_vs_') + r_name) + '_blast.out')\n    g_vs_r = op.join(outdir, g_vs_r)\n    if (op.exists(g_vs_r) and (os.stat(g_vs_r).st_size != 0)):\n        log.debug('{} vs {} BLAST already run'.format(g_name, r_name))\n    else:\n        cmd = '{} -query {} -db {} -outfmt 6 -out {}'.format(command, other_genome, op.join(r_folder, r_name), g_vs_r)\n        log.debug('Running: {}'.format(cmd))\n        retval = subprocess.call(cmd, shell=True)\n        if (retval == 0):\n            log.debug('BLASTed {} vs {}'.format(g_name, r_name))\n        else:\n            log.error('Error running {}, exit code {}'.format(command, retval))\n    return (r_vs_g, g_vs_r)", "docstring": "BLAST a genome against another, and vice versa.\n\nThis function requires BLAST to be installed, do so by running:\nsudo apt install ncbi-blast+\n\nArgs:\nreference (str): path to \"reference\" genome, aka your \"base strain\"\nother_genome (str): path to other genome which will be BLASTed to the reference\ndbtype (str): \"nucl\" or \"prot\" - what format your genome files are in\noutdir (str): path to folder where BLAST outputs should be placed\n\nReturns:\nPaths to BLAST output files.\n(reference_vs_othergenome.out, othergenome_vs_reference.out)", "source": "codesearchnet"}
{"code": "def gill_king(mat, eps=1e-16):\n    if (not scipy.sparse.issparse(mat)):\n        mat = numpy.asfarray(mat)\n    assert numpy.allclose(mat, mat.T)\n    size = mat.shape[0]\n    mat_diag = mat.diagonal()\n    gamma = abs(mat_diag).max()\n    off_diag = abs((mat - numpy.diag(mat_diag))).max()\n    delta = (eps * max((gamma + off_diag), 1))\n    beta = numpy.sqrt(max(gamma, (off_diag / size), eps))\n    lowtri = _gill_king(mat, beta, delta)\n    return lowtri", "docstring": "Gill-King algorithm for modified cholesky decomposition.\n\nArgs:\nmat (numpy.ndarray):\nMust be a non-singular and symmetric matrix.  If sparse, the result\nwill also be sparse.\neps (float):\nError tolerance used in algorithm.\n\n\nReturns:\n(numpy.ndarray):\nLower triangular Cholesky factor.\n\nExamples:\n>>> mat = [[4, 2, 1], [2, 6, 3], [1, 3, -.004]]\n>>> lowtri = gill_king(mat)\n>>> print(numpy.around(lowtri, 4))\n[[2.     0.     0.    ]\n[1.     2.2361 0.    ]\n[0.5    1.118  1.2264]]\n>>> print(numpy.around(numpy.dot(lowtri, lowtri.T), 4))\n[[4.    2.    1.   ]\n[2.    6.    3.   ]\n[1.    3.    3.004]]", "source": "codesearchnet"}
{"code": "def _CreateOutputFileHandles(self, output_type):\n    \n    gzip_filehandle_parent = tempfile.NamedTemporaryFile(suffix=output_type)\n    gzip_filehandle = gzip.GzipFile(gzip_filehandle_parent.name, \"wb\",\n                                    self.GZIP_COMPRESSION_LEVEL,\n                                    gzip_filehandle_parent)\n    self.temp_output_trackers[output_type] = TempOutputTracker(\n        output_type=output_type,\n        gzip_filehandle=gzip_filehandle,\n        gzip_filehandle_parent=gzip_filehandle_parent)\n    return self.temp_output_trackers[output_type]", "docstring": "Creates a new gzipped output tempfile for the output type.\n\nWe write to JSON data to gzip_filehandle to get compressed data. We hold a\nreference to the original filehandle (gzip_filehandle_parent) so we can pass\nthe gzip data to bigquery.\n\nArgs:\noutput_type: string of export type to be used in filename. e.g.\nExportedFile\n\nReturns:\nA TempOutputTracker object", "source": "juraj-google-style"}
{"code": "async def info(self):\n    stat = self._items.stat()\n    return {'indx': self._items.index(), 'metrics': self._metrics.index(), 'stat': stat}", "docstring": "Returns information about the CryoTank instance.\n\nReturns:\ndict: A dict containing items and metrics indexes.", "source": "codesearchnet"}
{"code": "def compute(self, x_arr, y_arr):\n        \n        return np.linalg.norm(x_arr - y_arr, axis=-1)", "docstring": "Compute distance.\n\nArgs:\nx_arr:      `np.ndarray` of vectors.\ny_arr:      `np.ndarray` of vectors.\n\nRetruns:\n`np.ndarray` of distances.", "source": "juraj-google-style"}
{"code": "def _unary_op(cls, x: 'TensorFluent', op: Callable[([tf.Tensor], tf.Tensor)], dtype: tf.DType) -> 'TensorFluent':\n    x = x.cast(dtype)\n    t = op(x.tensor)\n    scope = x.scope.as_list()\n    batch = x.batch\n    return TensorFluent(t, scope, batch=batch)", "docstring": "Returns a TensorFluent for the unary `op` applied to fluent `x`.\n\nArgs:\nx: The input fluent.\nop: The unary operation.\ndtype: The output's data type.\n\nReturns:\nA TensorFluent wrapping the unary operator's output.", "source": "codesearchnet"}
{"code": "def DownloadPqlResultToList(self, pql_query, values=None):\n    results = []\n    self._PageThroughPqlSet(pql_query, results.append, values)\n    return results", "docstring": "Downloads the results of a PQL query to a list.\n\nArgs:\npql_query: str a statement filter to apply (the query should not include\nthe limit or the offset)\n[optional]\nvalues: A dict of python objects or a list of raw SOAP values to bind\nto the pql_query.\n\nReturns:\na list of lists with the first being the header row and each subsequent\nlist being a row of results.", "source": "codesearchnet"}
{"code": "def is_unknown(input, model_file=None, model_proto=None, name=None):\n  \n\n  return _gen_sentencepiece_processor_op.sentencepiece_get_piece_type(\n      input, model_file=model_file, model_proto=model_proto, name=name,\n      piece_type=0)", "docstring": "Returns true if input id is unknown piece.\n\nArgs:\ninput: An arbitrary tensor of int32.\nmodel_file: The sentencepiece model file path.\nmodel_proto: The sentencepiece model serialized proto.\nEither `model_file` or `model_proto` must be set.\nname: The name argument that is passed to the op function.\nReturns:\nA tensor of bool with the same shape as input.", "source": "juraj-google-style"}
{"code": "def get_labels_encoder(self, data_dir):\n    \n    label_filepath = os.path.join(data_dir, self.vocab_filename)\n    return text_encoder.TokenTextEncoder(label_filepath)", "docstring": "Builds encoder for the given class labels.\n\nArgs:\ndata_dir: data directory\n\nReturns:\nAn encoder for class labels.", "source": "juraj-google-style"}
{"code": "def define_simulation_graph(batch_env, algo_cls, config):\n    step = tf.Variable(0, False, dtype=tf.int32, name='global_step')\n    is_training = tf.placeholder(tf.bool, name='is_training')\n    should_log = tf.placeholder(tf.bool, name='should_log')\n    do_report = tf.placeholder(tf.bool, name='do_report')\n    force_reset = tf.placeholder(tf.bool, name='force_reset')\n    algo = algo_cls(batch_env, step, is_training, should_log, config)\n    (done, score, summary) = tools.simulate(batch_env, algo, should_log, force_reset)\n    message = 'Graph contains {} trainable variables.'\n    tf.logging.info(message.format(tools.count_weights()))\n    return tools.AttrDict(locals())", "docstring": "Define the algorithm and environment interaction.\n\nArgs:\nbatch_env: In-graph environments object.\nalgo_cls: Constructor of a batch algorithm.\nconfig: Configuration object for the algorithm.\n\nReturns:\nObject providing graph elements via attributes.", "source": "codesearchnet"}
{"code": "def _build(self, inputs):\n    if (self._axis is None):\n        axis = list(range(1, inputs.shape.ndims))\n    else:\n        axis = self._axis\n    original_dtype = inputs.dtype\n    if (original_dtype in [tf.float16, tf.bfloat16]):\n        inputs = tf.cast(inputs, tf.float32)\n    if (inputs.get_shape().ndims < 2):\n        raise base.NotSupportedError('Layer normalization expects inputs of at least rank 2. Got inputs of rank {}.'.format(inputs.get_shape().ndims))\n    params_shape = inputs.get_shape()[(- 1):]\n    if self._scale:\n        if (self.GAMMA not in self._initializers):\n            self._initializers[self.GAMMA] = create_gamma_initializer()\n        self._gamma = tf.get_variable(self.GAMMA, shape=params_shape, dtype=inputs.dtype, initializer=self._initializers[self.GAMMA], partitioner=self._partitioners.get(self.GAMMA), regularizer=self._regularizers.get(self.GAMMA))\n    else:\n        self._gamma = None\n    if self._offset:\n        if (self.BETA not in self._initializers):\n            self._initializers[self.BETA] = create_beta_initializer()\n        self._beta = tf.get_variable(self.BETA, shape=params_shape, dtype=inputs.dtype, initializer=self._initializers[self.BETA], partitioner=self._partitioners.get(self.BETA), regularizer=self._regularizers.get(self.BETA))\n    else:\n        self._beta = None\n    (mean, var) = tf.nn.moments(inputs, axis, keep_dims=True)\n    normalized = tf.nn.batch_normalization(inputs, mean, var, self._beta, self._gamma, self._eps)\n    if (original_dtype in [tf.float16, tf.bfloat16]):\n        normalized = tf.cast(normalized, dtype=original_dtype)\n    return normalized", "docstring": "Connects the LayerNorm module into the graph.\n\nArgs:\ninputs: a Tensor of dimensionality >= 2.\n\nReturns:\nnormalized: layer normalized outputs with same shape as inputs.\n\nRaises:\nbase.NotSupportedError: If `inputs` has less than 2 dimensions.", "source": "codesearchnet"}
{"code": "def __init__(self, augmented_graph_view: _AugmentedGraphView, options: save_options.SaveOptions):\n    self.augmented_graph_view = augmented_graph_view\n    self.options = options\n    self._trackable_objects, self.node_paths, self.node_ids, self._slot_variables, self.object_names = checkpoint_util.objects_ids_and_slot_variables_and_paths(self.augmented_graph_view)\n    untraced_functions = self.augmented_graph_view.untraced_functions\n    if untraced_functions:\n        logging.info('Found untraced functions such as %s while saving (showing %d of %d). These functions will not be directly callable after loading.', ', '.join(untraced_functions[:_NUM_DISPLAY_UNTRACED_FUNCTIONS]), min(_NUM_DISPLAY_UNTRACED_FUNCTIONS, len(untraced_functions)), len(untraced_functions))\n    self._initialize_save_and_restore_functions()\n    self._initialize_nodes_and_concrete_functions()\n    self.captured_tensor_node_ids = object_identity.ObjectIdentityDictionary()", "docstring": "Initializes a SaveableView.\n\nArgs:\naugmented_graph_view: A GraphView object.\noptions: A SaveOptions instance.", "source": "github-repos"}
{"code": "def _get_trainable_state(self):\n    trainable_state = weakref.WeakKeyDictionary()\n    for layer in self._flatten_layers():\n        trainable_state[layer] = layer.trainable\n    return trainable_state", "docstring": "Get the `trainable` state of each sublayer.\n\nReturns:\nA dict mapping all sublayers to their `trainable` value.", "source": "github-repos"}
{"code": "def n_choose_k(n, k):\n    \n    if n == 0:\n        return 0\n    return reduce(lambda x, y: x * y[0] / y[1],\n                  zip(range(n - k + 1, n + 1),\n                      range(1, k + 1)), 1)", "docstring": "Return the number of combinations for n choose k.\n\nArgs:\nn (int): the total number of options .\nk (int): The number of elements.\n\nReturns:\nint: returns the binomial coefficient", "source": "juraj-google-style"}
{"code": "def check_secret(self, secret):\n        \n        try:\n            return hmac.compare_digest(secret, self.secret)\n        except AttributeError:  \n            return secret == self.secret", "docstring": "Checks if the secret string used in the authentication attempt\nmatches the \"known\" secret string. Some mechanisms will override this\nmethod to control how this comparison is made.\n\nArgs:\nsecret: The secret string to compare against what was used in the\nauthentication attempt.\n\nReturns:\nTrue if the given secret matches the authentication attempt.", "source": "juraj-google-style"}
{"code": "def _compute_sum_image(features, max_area_width, max_area_height=1, height=1,\n                       name=None):\n  \n  with tf.name_scope(name, default_name=\"compute_sum_image\"):\n    feature_shape = common_layers.shape_list(features)\n    batch_size = feature_shape[0]\n    length = feature_shape[-2]\n    depth = feature_shape[-1]\n    width = length \n    features_2d = tf.reshape(features, [batch_size, height, width, depth])\n    width_cum = tf.cumsum(features_2d, axis=-2, name=\"compute_integral_h\")\n    integral_image = tf.cumsum(width_cum, axis=-3, name=\"compute_integral_v\")\n    padded_image = tf.pad(\n        integral_image, [[0, 0], [1, 0], [1, 0], [0, 0]], constant_values=0)\n    height_list = []\n    width_list = []\n    dst_images = []\n    src_images_diag = []\n    src_images_h = []\n    src_images_v = []\n    size_tensor = tf.ones_like(padded_image[:, :, :, 0],\n                               dtype=tf.int32)\n    for area_height in range(max_area_height):\n      for area_width in range(max_area_width):\n        dst_images.append(\n            tf.reshape(\n                padded_image[:, area_height + 1:, area_width + 1:, :],\n                [batch_size, -1, depth]))\n        src_images_diag.append(\n            tf.reshape(\n                padded_image[:, :-area_height - 1, :-area_width - 1, :],\n                [batch_size, -1, depth]))\n        src_images_h.append(\n            tf.reshape(\n                padded_image[:, area_height + 1:, :-area_width - 1, :],\n                [batch_size, -1, depth]))\n        src_images_v.append(\n            tf.reshape(\n                padded_image[:, :-area_height - 1, area_width + 1:, :],\n                [batch_size, -1, depth]))\n        height_list.append(\n            tf.reshape(\n                size_tensor[:, area_height + 1:, area_width + 1:] *\\\n                (area_height + 1), [batch_size, -1]))\n        width_list.append(\n            tf.reshape(\n                size_tensor[:, area_height + 1:, area_width + 1:] *\\\n                (area_width + 1), [batch_size, -1]))\n    sum_image = tf.subtract(\n        tf.concat(dst_images, axis=1) + tf.concat(src_images_diag, axis=1),\n        tf.concat(src_images_v, axis=1) + tf.concat(src_images_h, axis=1))\n    area_heights = tf.expand_dims(tf.concat(height_list, axis=1), 2)\n    area_widths = tf.expand_dims(tf.concat(width_list, axis=1), 2)\n  return sum_image, area_heights, area_widths", "docstring": "Computes area sums for features.\n\nArgs:\nfeatures: a Tensor in a shape of [batch_size, height * width, depth].\nmax_area_width: the max width allowed for an area.\nmax_area_height: the max height allowed for an area.\nheight: the height of the image.\nname: the namescope.\nReturns:\nsum_image: A Tensor of shape [batch_size, num_areas, depth]\narea_heights: A Tensor of shape [batch_size, num_areas, 1]\narea_widths: A Tensor of shape [batch_size, num_areas, 1]", "source": "juraj-google-style"}
{"code": "def labels_to_dataset(labels, label_mode, num_classes):\n    label_ds = tf.data.Dataset.from_tensor_slices(labels)\n    if label_mode == 'binary':\n        label_ds = label_ds.map(lambda x: tf.expand_dims(tf.cast(x, 'float32'), axis=-1), num_parallel_calls=tf.data.AUTOTUNE)\n    elif label_mode == 'categorical':\n        label_ds = label_ds.map(lambda x: tf.one_hot(x, num_classes), num_parallel_calls=tf.data.AUTOTUNE)\n    return label_ds", "docstring": "Create a `tf.data.Dataset` from the list/tuple of labels.\n\nArgs:\nlabels: list/tuple of labels to be converted into a `tf.data.Dataset`.\nlabel_mode: String describing the encoding of `labels`. Options are:\n- `\"binary\"` indicates that the labels (there can be only 2) are encoded\nas `float32` scalars with values 0 or 1\n(e.g. for `binary_crossentropy`).\n- `\"categorical\"` means that the labels are mapped into a categorical\nvector.  (e.g. for `categorical_crossentropy` loss).\nnum_classes: number of classes of labels.\n\nReturns:\nA `tf.data.Dataset` instance.", "source": "github-repos"}
{"code": "def _value_to_pb(value, proto_type):\n    \n    data_type_pb = getattr(google_dot_protobuf_dot_wrappers__pb2, proto_type)()\n    ParseDict(value, data_type_pb)\n    return data_type_pb", "docstring": "Convert a value to protobuf. e.g. BoolValue, Int32Value.\n\nArgs:\nvalue (dict): A dict that needs to be converted to protobuf.\nproto_type (str): The type of the Protobuf.\n\nReturns:\nAn instance of the specified protobuf.", "source": "juraj-google-style"}
{"code": "def __init__(self, value=True, tag=enums.Tags.DEFAULT):\n        \n        super(Boolean, self).__init__(tag, type=enums.Types.BOOLEAN)\n        self.logger = logging.getLogger(__name__)\n        self.value = value\n        self.length = self.LENGTH\n\n        self.validate()", "docstring": "Create a Boolean object.\n\nArgs:\nvalue (bool): The value of the Boolean. Optional, defaults to True.\ntag (Tags): An enumeration defining the tag of the Boolean object.\nOptional, defaults to Tags.DEFAULT.", "source": "juraj-google-style"}
{"code": "def from_json_file(cls, file_name):\n    with open(file_name) as json_data:\n        config = json.load(json_data)\n    return cls(config)", "docstring": "Construct OneViewClient using a json file.\n\nArgs:\nfile_name: json full path.\n\nReturns:\nOneViewClient:", "source": "codesearchnet"}
{"code": "def _ReadUnionDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):\n    return self._ReadDataTypeDefinitionWithMembers(definitions_registry, definition_values, data_types.UnionDefinition, definition_name, supports_conditions=False)", "docstring": "Reads an union data type definition.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\ndefinition_values (dict[str, object]): definition values.\ndefinition_name (str): name of the definition.\nis_member (Optional[bool]): True if the data type definition is a member\ndata type definition.\n\nReturns:\nUnionDefinition: union data type definition.\n\nRaises:\nDefinitionReaderError: if the definitions values are missing or if\nthe format is incorrect.", "source": "codesearchnet"}
{"code": "def reserve_ids(self, token, channel, quantity):\n    quantity = str(quantity)\n    url = self.url('{}/{}/reserve/{}/'.format(token, channel, quantity))\n    req = self.remote_utils.get_url(url)\n    if (req.status_code is not 200):\n        raise RemoteDataNotFoundError(('Invalid req: ' + req.status_code))\n    out = req.json()\n    return [(out[0] + i) for i in range(out[1])]", "docstring": "Requests a list of next-available-IDs from the server.\n\nArguments:\nquantity (int): The number of IDs to reserve\n\nReturns:\nint[quantity]: List of IDs you've been granted", "source": "codesearchnet"}
{"code": "def metrics(self):\n    collected_metrics = []\n    for layer in self._flatten_layers():\n        with layer._metrics_lock:\n            collected_metrics.extend(layer._metrics)\n    return collected_metrics", "docstring": "List of metrics added using the `add_metric()` API.\n\nExample:\n\n>>> input = tf.keras.layers.Input(shape=(3,))\n>>> d = tf.keras.layers.Dense(2)\n>>> output = d(input)\n>>> d.add_metric(tf.reduce_max(output), name='max')\n>>> d.add_metric(tf.reduce_min(output), name='min')\n>>> [m.name for m in d.metrics]\n['max', 'min']\n\nReturns:\nA list of `Metric` objects.", "source": "github-repos"}
{"code": "def drop_scored_calls(self, names):\n\n    def _remove(calls, names):\n        d = dict([(k, v) for (k, v) in calls.items() if (k not in names)])\n        return d\n    if isinstance(names, str):\n        names = [names]\n    output = self.copy()\n    output['scored_calls'] = output['scored_calls'].apply((lambda x: _remove(x, names)))\n    return output", "docstring": "Take a name or list of scored call names and drop those from the scored calls\n\nArgs:\nnames (list): list of names to drop or a single string name to drop\n\nReturns:\nCellDataFrame: The CellDataFrame modified.", "source": "codesearchnet"}
{"code": "def find_invalid_filenames(filenames, repository_root):\n    \n    errors = []\n    for filename in filenames:\n        if not os.path.abspath(filename).startswith(repository_root):\n            errors.append((filename, 'Error: File %s does not belong to '\n                           'repository %s' % (filename, repository_root)))\n        if not os.path.exists(filename):\n            errors.append((filename,\n                           'Error: File %s does not exist' % (filename, )))\n        if os.path.isdir(filename):\n            errors.append((filename,\n                           'Error: %s is a directory. Directories are'\n                           ' not yet supported' % (filename, )))\n\n    return errors", "docstring": "Find files that does not exist, are not in the repo or are directories.\n\nArgs:\nfilenames: list of filenames to check\nrepository_root: the absolute path of the repository's root.\n\nReturns: A list of errors.", "source": "juraj-google-style"}
{"code": "def get_collection(self, uri=None, filter='', path=''):\n        \n        if not uri:\n            uri = self._base_uri\n\n        if filter:\n            filter = self.make_query_filter(filter)\n            filter = \"?\" + filter[1:]\n\n        uri = \"{uri}{path}{filter}\".format(uri=uri, path=path, filter=filter)\n        logger.debug('Get resource collection (uri = %s)' % uri)\n\n        response = self._connection.get(uri)\n\n        return self.get_members(response)", "docstring": "Retrieves a collection of resources.\n\nUse this function when the 'start' and 'count' parameters are not allowed in the GET call.\nOtherwise, use get_all instead.\n\nOptional filtering criteria may be specified.\n\nArgs:\nfilter (list or str): General filter/query string.\npath (str): path to be added with base URI\n\nReturns:\nCollection of the requested resource.", "source": "juraj-google-style"}
{"code": "def join(self, basepath, *paths):\n    if not basepath.startswith(S3FileSystem.S3_PREFIX):\n        raise ValueError('Basepath %r must be S3 path.' % basepath)\n    path = basepath\n    for p in paths:\n        path = path.rstrip('/') + '/' + p.lstrip('/')\n    return path", "docstring": "Join two or more pathname components for the filesystem\n\nArgs:\nbasepath: string path of the first component of the path\npaths: path components to be added\n\nReturns: full path after combining all of the return nulled components", "source": "github-repos"}
{"code": "def truncate(text, length=255):\n    lines = []\n    i = 0\n    while (i < (len(text) - 1)):\n        try:\n            lines.append(text[i:(i + length)])\n            i += length\n        except IndexError as e:\n            lines.append(text[i:])\n    return lines", "docstring": "Splits the message into a list of strings of of length `length`\n\nArgs:\ntext (str): The text to be divided\nlength (int, optional): The length of the chunks of text. \\\nDefaults to 255.\n\nReturns:\nlist: Text divided into chunks of length `length`", "source": "codesearchnet"}
{"code": "def deprecated_endpoints(*args):\n\n    def deprecated_wrapper(func):\n        if '_tf_deprecated_api_names' in func.__dict__:\n            raise DeprecatedNamesAlreadySetError(f'Cannot set deprecated names for {func.__name__} to {args}. Deprecated names are already set to {func._tf_deprecated_api_names}.')\n        func._tf_deprecated_api_names = args\n        return func\n    return deprecated_wrapper", "docstring": "Decorator for marking endpoints deprecated.\n\nThis decorator does not print deprecation messages.\nTODO(annarev): eventually start printing deprecation warnings when\n@deprecation_endpoints decorator is added.\n\nArgs:\n*args: Deprecated endpoint names.\n\nReturns:\nA function that takes symbol as an argument and adds\n_tf_deprecated_api_names to that symbol.\n_tf_deprecated_api_names would be set to a list of deprecated\nendpoint names for the symbol.", "source": "github-repos"}
{"code": "def all_downstreams(self, node):\n        \n        nodes = [node]\n        nodes_seen = set()\n        i = 0\n        while i < len(nodes):\n            downstreams = self.downstream(nodes[i])\n            for downstream_node in downstreams:\n                if downstream_node not in nodes_seen:\n                    nodes_seen.add(downstream_node)\n                    nodes.append(downstream_node)\n            i += 1\n        return [\n            node_ for node_ in self.topological_sort() if node_ in nodes_seen\n        ]", "docstring": "Returns a list of all nodes ultimately downstream\nof the given node in the dependency graph, in\ntopological order.\n\nArgs:\nnode (str): The node whose downstream nodes you want to find.\n\nReturns:\nlist: A list of nodes that are downstream from the node.", "source": "juraj-google-style"}
{"code": "def get_pipeline_path(pipeline_name, working_directory):\n    \n    logger.debug(\"starting\")\n\n    \n    logger.debug(f\"current directory is {working_directory}\")\n\n    \n    pipeline_path = os.path.abspath(os.path.join(\n        working_directory,\n        'pipelines',\n        pipeline_name + '.yaml'))\n\n    if os.path.isfile(pipeline_path):\n        logger.debug(f\"Found {pipeline_path}\")\n    else:\n        logger.debug(f\"{pipeline_name} not found in current \"\n                     \"directory/pipelines folder. Looking in pypyr install \"\n                     \"directory instead.\")\n        pypyr_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n        logger.debug(f\"pypyr installation directory is: {pypyr_dir}\")\n        pipeline_path = os.path.abspath(os.path.join(\n            pypyr_dir,\n            'pipelines',\n            pipeline_name + '.yaml'))\n\n        if os.path.isfile(pipeline_path):\n            logger.debug(f\"Found {pipeline_path}\")\n        else:\n            raise PipelineNotFoundError(f\"{pipeline_name}.yaml not found in \"\n                                        f\"either \"\n                                        f\"{working_directory}/pipelines \"\n                                        f\"or {pypyr_dir}/pipelines\")\n\n    logger.debug(\"done\")\n    return pipeline_path", "docstring": "Look for the pipeline in the various places it could be.\n\nFirst checks the cwd. Then checks pypyr/pipelines dir.\n\nArgs:\npipeline_name: string. Name of pipeline to find\nworking_directory: string. Path in which to look for pipeline_name.yaml\n\nReturns:\nAbsolute path to the pipeline_name.yaml file\n\nRaises:\nPipelineNotFoundError: if pipeline_name.yaml not found in working_dir\nor in {pypyr install dir}/pipelines.", "source": "juraj-google-style"}
{"code": "def edit(self, customer_id, data={}, **kwargs):\n    url = '{}/{}'.format(self.base_url, customer_id)\n    return self.put_url(url, data, **kwargs)", "docstring": "Edit Customer information from given dict\n\nReturns:\nCustomer Dict which was edited", "source": "codesearchnet"}
{"code": "def __init__(self, filepath, eps=10, max_rows=None):\n        \n\n        \n        \n        \n        \n        \n        self.eps_timer = itertools.cycle([max(0, delta) for delta in np.random.normal(1.0/float(eps), .5/float(eps), size=1000)])\n\n        \n        self.log_reader = bro_log_reader.BroLogReader(filepath, tail=False)\n\n        \n        self.max_rows = max_rows", "docstring": "Initialization for the LiveSimulator Class\nArgs:\neps (int): Events Per Second that the simulator will emit events (default = 10)\nmax_rows (int): The maximum number of rows to generate (default = None (go forever))", "source": "juraj-google-style"}
{"code": "def show_fields(self, block=None):\n    mapping = self._mapping()\n    if (block is None):\n        return mapping\n    elif (block == 'top'):\n        blocks = set()\n        for key in mapping.keys():\n            blocks.add(key.split('.')[0])\n        block_map = {}\n        for b in blocks:\n            block_map[b] = 'object'\n    else:\n        block_map = {}\n        for (key, value) in mapping.items():\n            if key.startswith(block):\n                block_map[key] = value\n    return block_map", "docstring": "Retrieve and return the mapping for the given metadata block.\n\nArguments:\nblock (str): The top-level field to fetch the mapping for (for example, ``\"mdf\"``),\nor the special values ``None`` for everything or ``\"top\"`` for just the\ntop-level fields.\n**Default:** ``None``.\nindex (str): The Search index to map. **Default:** The current index.\n\nReturns:\ndict: ``field:datatype`` pairs.", "source": "codesearchnet"}
{"code": "def fn(x: list[Union[int, float]], y: Optional[Union[int, str]]=None):\n    return x", "docstring": "Test function\n\nArgs:\nx: The input\ny: Also the input", "source": "github-repos"}
{"code": "def add_tile(self, address, tile):\n    if (address in self._tiles):\n        raise ArgumentError('Tried to add two tiles at the same address', address=address)\n    self._tiles[address] = tile", "docstring": "Add a tile to handle all RPCs at a given address.\n\nArgs:\naddress (int): The address of the tile\ntile (RPCDispatcher): A tile object that inherits from RPCDispatcher", "source": "codesearchnet"}
{"code": "def delete_endpoint_config(self, endpoint_config_name):\n        \n        LOGGER.info('Deleting endpoint configuration with name: {}'.format(endpoint_config_name))\n        self.sagemaker_client.delete_endpoint_config(EndpointConfigName=endpoint_config_name)", "docstring": "Delete an Amazon SageMaker endpoint configuration.\n\nArgs:\nendpoint_config_name (str): Name of the Amazon SageMaker endpoint configuration to delete.", "source": "juraj-google-style"}
{"code": "def get_schedule_distribution(schedule, global_step=None):\n  \n  interpolation, steps, pmfs = schedule\n  if len(pmfs) == 1:\n    \n    \n    \n    return pmfs[0]\n  if global_step is None:\n    global_step = tf.train.get_or_create_global_step()\n  if interpolation == 'step':\n    interpolation_fn = step_interpolation\n  elif interpolation == 'linear':\n    interpolation_fn = linear_interpolation\n  else:\n    raise ValueError('Invalid interpolation strategy: %s' % interpolation)\n  return tf.reshape(\n      tf.py_func(\n          func=lambda x: interpolation_fn(x, np.array(steps), np.array(pmfs)),\n          inp=[global_step], Tout=tf.float32), [len(pmfs[0])])", "docstring": "Computes the pmf of a schedule given the global_step.\n\nArgs:\nschedule: A schedule tuple, see encode_schedule for details.\nglobal_step: A scalar tensor, the step to query the schedule.\n\nReturns:\nA 1-D tensor of probs, the sampling distribution of the global_step.", "source": "juraj-google-style"}
{"code": "def rationalize(flt: float, denominators: Set[int] = None) -> Fraction:\n    \n    if denominators is None:\n        denominators = _DENOMINATORS\n    frac = Fraction.from_float(flt).limit_denominator()\n    if frac.denominator not in denominators:\n        raise ValueError('Cannot rationalize')\n    return frac", "docstring": "Convert a floating point number to a Fraction with a small\ndenominator.\n\nArgs:\nflt:            A floating point number\ndenominators:   Collection of standard denominators. Default is\n1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 32, 64, 128, 256, 512,\n1024, 2048, 4096, 8192\n\nRaises:\nValueError:     If cannot rationalize float", "source": "juraj-google-style"}
{"code": "def __call__(self, name, value):\n        \n        super(ObjectTypeChecker, self).__call__(name, value)", "docstring": "Call method.\n\nArgs:\nname (str): the value's name.\nvalue (str): the value to check.\n\nRaises:\nValueError: if value is not type str.", "source": "juraj-google-style"}
{"code": "def history(self, samples=500, pandas=True, stream=\"default\"):\n        \n        node = \"history\" if stream == \"default\" else \"events\"\n        query = gql( % node)\n\n        response = self._exec(query, samples=samples)\n        lines = [json.loads(line)\n                 for line in response['project']['run'][node]]\n        if pandas:\n            pandas = util.get_module(\"pandas\")\n            if pandas:\n                lines = pandas.DataFrame.from_records(lines)\n            else:\n                print(\"Unable to load pandas, call history with pandas=False\")\n        return lines", "docstring": "Return history metrics for a run\n\nArgs:\nsamples (int, optional): The number of samples to return\npandas (bool, optional): Return a pandas dataframe\nstream (str, optional): \"default\" for metrics, \"system\" for machine metrics", "source": "juraj-google-style"}
{"code": "def _OpenPathSpec(self, path_specification, ascii_codepage='cp1252'):\n    \n    if not path_specification:\n      return None\n\n    file_entry = self._file_system.GetFileEntryByPathSpec(path_specification)\n    if file_entry is None:\n      return None\n\n    file_object = file_entry.GetFileObject()\n    if file_object is None:\n      return None\n\n    registry_file = dfwinreg_regf.REGFWinRegistryFile(\n        ascii_codepage=ascii_codepage)\n\n    try:\n      registry_file.Open(file_object)\n    except IOError as exception:\n      logger.warning(\n          'Unable to open Windows Registry file with error: {0!s}'.format(\n              exception))\n      file_object.close()\n      return None\n\n    return registry_file", "docstring": "Opens the Windows Registry file specified by the path specification.\n\nArgs:\npath_specification (dfvfs.PathSpec): path specification.\nascii_codepage (Optional[str]): ASCII string codepage.\n\nReturns:\nWinRegistryFile: Windows Registry file or None.", "source": "juraj-google-style"}
{"code": "def unwrap(tensor):\n    while isinstance(tensor, (PrettyTensor, Loss)):\n        tensor = tensor.tensor\n    return tensor", "docstring": "Returns the underlying tensor if tensor is wrapped or tensor.\n\nArgs:\ntensor: The tensor to unwrap.\nReturns:\nTensor or if it is a pretty tensor, the unwrapped version.\nRaises:\nValueError: if tensor holds a sequence.", "source": "codesearchnet"}
{"code": "def to_dict(self):\n    return self._base(((key, (value.to_dict() if isinstance(value, AutoDict) else value)) for (key, value) in self.items()))", "docstring": "Recursively casts a AutoDict into a regular dictionary. All nested\nAutoDict values are also converted.\n\nReturns:\ndict: a copy of this dict without autovivification\n\nExample:\n>>> from ubelt.util_dict import AutoDict\n>>> auto = AutoDict()\n>>> auto[1] = 1\n>>> auto['n1'] = AutoDict()\n>>> static = auto.to_dict()\n>>> assert not isinstance(static, AutoDict)\n>>> assert not isinstance(static['n1'], AutoDict)", "source": "codesearchnet"}
{"code": "def get_course_video_ids_with_youtube_profile(course_ids=None, offset=None, limit=None):\n    \n    course_videos = (CourseVideo.objects.select_related('video')\n                     .prefetch_related('video__encoded_videos', 'video__encoded_videos__profile')\n                     .filter(video__encoded_videos__profile__profile_name='youtube')\n                     .order_by('id')\n                     .distinct())\n\n    if course_ids:\n        course_videos = course_videos.filter(course_id__in=course_ids)\n\n    course_videos = course_videos.values_list('course_id', 'video__edx_video_id')\n    if limit is not None and offset is not None:\n        course_videos = course_videos[offset: offset+limit]\n\n    course_videos_with_yt_profile = []\n    for course_id, edx_video_id in course_videos:\n        yt_profile = EncodedVideo.objects.filter(\n            video__edx_video_id=edx_video_id,\n            profile__profile_name='youtube'\n        ).first()\n\n        if yt_profile:\n            course_videos_with_yt_profile.append((\n                course_id, edx_video_id, yt_profile.url\n            ))\n\n    return course_videos_with_yt_profile", "docstring": "Returns a list that contains all the course ids and video ids with the youtube profile\n\nArgs:\ncourse_ids (list): valid course ids\nlimit (int): batch records limit\noffset (int): an offset for selecting a batch\nReturns:\n(list): Tuples of course_id, edx_video_id and youtube video url", "source": "juraj-google-style"}
{"code": "def push(self,message,message_type):\n        \n            \n        super(Producer,self).send(message,message_type)", "docstring": "Send a reply message of the given type\n\nArgs:\n- message: the message to publish\n- message_type: the type of message being sent", "source": "juraj-google-style"}
{"code": "def constant(value, dtype=None, shape=None, name=None):\n    if dtype is None:\n        dtype = floatx()\n    return constant_op.constant(value, dtype=dtype, shape=shape, name=name)", "docstring": "Creates a constant tensor.\n\nArgs:\nvalue: A constant value (or list)\ndtype: The type of the elements of the resulting tensor.\nshape: Optional dimensions of resulting tensor.\nname: Optional name for the tensor.\n\nReturns:\nA Constant Tensor.", "source": "github-repos"}
{"code": "def InitializeDownload(self, http_request, http=None, client=None):\n    self.EnsureUninitialized()\n    if ((http is None) and (client is None)):\n        raise exceptions.UserError('Must provide client or http.')\n    http = (http or client.http)\n    if (client is not None):\n        http_request.url = client.FinalizeTransferUrl(http_request.url)\n    url = http_request.url\n    if self.auto_transfer:\n        end_byte = self.__ComputeEndByte(0)\n        self.__SetRangeHeader(http_request, 0, end_byte)\n        response = http_wrapper.MakeRequest((self.bytes_http or http), http_request)\n        if (response.status_code not in self._ACCEPTABLE_STATUSES):\n            raise exceptions.HttpError.FromResponse(response)\n        self.__initial_response = response\n        self.__SetTotal(response.info)\n        url = response.info.get('content-location', response.request_url)\n    if (client is not None):\n        url = client.FinalizeTransferUrl(url)\n    self._Initialize(http, url)\n    if self.auto_transfer:\n        self.StreamInChunks()", "docstring": "Initialize this download by making a request.\n\nArgs:\nhttp_request: The HttpRequest to use to initialize this download.\nhttp: The httplib2.Http instance for this request.\nclient: If provided, let this client process the final URL before\nsending any additional requests. If client is provided and\nhttp is not, client.http will be used instead.", "source": "codesearchnet"}
{"code": "def render(self,\n            trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array],\n            batch: Optional[int] = None) -> None:\n        \n\n        non_fluents, initial_state, states, actions, interms, rewards = trajectories\n\n        non_fluents = dict(non_fluents)\n        states  = dict((name, fluent[0]) for name, fluent in states)\n        actions = dict((name, fluent[0]) for name, fluent in actions)\n        rewards = rewards[0]\n\n        idx = self._compiler.rddl.domain.state_fluent_ordering.index('location/1')\n\n        start = initial_state[idx][0]\n        g = non_fluents['GOAL/1']\n        path = states['location/1']\n        deltas = actions['move/1']\n\n        centers = non_fluents['DECELERATION_ZONE_CENTER/2']\n        decays = non_fluents['DECELERATION_ZONE_DECAY/1']\n        zones = [(x, y, d) for (x, y), d in zip(centers, decays)]\n\n        self._ax1 = plt.gca()\n\n        self._render_state_space()\n        self._render_start_and_goal_positions(start, g)\n        self._render_deceleration_zones(zones)\n        self._render_state_action_trajectory(start, path, deltas)\n\n        plt.title('Navigation', fontweight='bold')\n        plt.legend(loc='lower right')\n        plt.show()", "docstring": "Render the simulated state-action `trajectories` for Navigation domain.\n\nArgs:\nstats: Performance statistics.\ntrajectories: NonFluents, states, actions, interms and rewards.\nbatch: Number of batches to render.", "source": "juraj-google-style"}
{"code": "def from_frozen_graph(cls, graph_def_file, input_arrays, output_arrays, input_shapes=None):\n    TFLiteConverterBase._set_original_model_type(conversion_metadata_fb.ModelType.TF_GRAPH_DEF)\n    with _ops.Graph().as_default():\n        with _session.Session() as sess:\n            if not gfile.Exists(graph_def_file):\n                raise IOError(\"File '{0}' does not exist.\".format(graph_def_file))\n            with gfile.GFile(graph_def_file, 'rb') as f:\n                file_content = f.read()\n            try:\n                graph_def = _graph_pb2.GraphDef()\n                graph_def.ParseFromString(file_content)\n            except (_text_format.ParseError, DecodeError):\n                try:\n                    print(\"Ignore 'tcmalloc: large alloc' warnings.\")\n                    if not isinstance(file_content, str):\n                        file_content = file_content.decode('utf-8')\n                    graph_def = _graph_pb2.GraphDef()\n                    _text_format.Merge(file_content, graph_def)\n                except (_text_format.ParseError, DecodeError):\n                    raise IOError(\"Unable to parse input file '{}'.\".format(graph_def_file))\n            if sys.byteorder == 'big':\n                bst.swap_tensor_content_in_graph_node(graph_def, 'little', 'big')\n            load_model_in_session = True\n            try:\n                _import_graph_def(graph_def, name='')\n            except _NotFoundError:\n                load_model_in_session = False\n            if load_model_in_session:\n                if not _is_frozen_graph(sess):\n                    raise ValueError('Please freeze the graph using freeze_graph.py.')\n                input_tensors = _get_tensors_from_tensor_names(sess.graph, input_arrays)\n                output_tensors = _get_tensors_from_tensor_names(sess.graph, output_arrays)\n                _set_tensor_shapes(input_tensors, input_shapes)\n                return cls(sess.graph_def, input_tensors, output_tensors)\n            else:\n                if not input_shapes:\n                    raise ValueError('input_shapes must be defined for this model.')\n                if set(input_arrays) != set(input_shapes.keys()):\n                    raise ValueError('input_shapes must contain a value for each item in input_array.')\n                input_arrays_with_shape = [(name, input_shapes[name]) for name in input_arrays]\n                return cls(graph_def, input_tensors=None, output_tensors=None, input_arrays_with_shape=input_arrays_with_shape, output_arrays=output_arrays)", "docstring": "Creates a TFLiteConverter class from a file containing a frozen GraphDef.\n\nArgs:\ngraph_def_file: Full filepath of file containing frozen GraphDef.\ninput_arrays: List of input tensors to freeze graph with.\noutput_arrays: List of output tensors to freeze graph with.\ninput_shapes: Dict of strings representing input tensor names to list of\nintegers representing input shapes (e.g., {\"foo\" : [1, 16, 16, 3]}).\nAutomatically determined when input shapes is None (e.g., {\"foo\" :\nNone}). (default None)\n\nReturns:\nTFLiteConverter class.\n\nRaises:\nIOError:\nFile not found.\nUnable to parse input file.\nValueError:\nThe graph is not frozen.\ninput_arrays or output_arrays contains an invalid tensor name.\ninput_shapes is not correctly defined when required", "source": "github-repos"}
{"code": "def convert_argument(self, arg_name, arg_value):\n    self._ensure_loaded()\n    type_name = self.param_type(arg_name)\n    if (type_name is None):\n        return arg_value\n    val = typeinfo.type_system.convert_to_type(arg_value, type_name)\n    validators = self.annotated_params[arg_name].validators\n    if (len(validators) == 0):\n        return val\n    type_obj = typeinfo.type_system.get_type(type_name)\n    try:\n        for (validator_name, extra_args) in validators:\n            if (not hasattr(type_obj, validator_name)):\n                raise ValidationError('Could not find validator specified for argument', argument=arg_name, validator_name=validator_name, type=str(type_obj), method=dir(type_obj))\n            validator = getattr(type_obj, validator_name)\n            validator(val, *extra_args)\n    except (ValueError, TypeError) as exc:\n        raise ValidationError(exc.args[0], argument=arg_name, arg_value=val)\n    return val", "docstring": "Given a parameter with type information, convert and validate it.\n\nArgs:\narg_name (str): The name of the argument to convert and validate\narg_value (object): The value to convert and validate\n\nReturns:\nobject: The converted value.", "source": "codesearchnet"}
{"code": "def _compress_url(link):\n        \n        comment_re = re.compile(r'/comments/([A-Za-z\\d]{2,})(?:/[^\\s]+/([A-Za-z\\d]+))?')\n        message_re = re.compile(r'/message/messages/([A-Za-z\\d]+)')\n        matches = re.findall(comment_re, link)\n\n        if len(matches) == 0:\n            matches = re.findall(message_re, link)\n\n            if len(matches) == 0:\n                return None\n            else:\n                return 'm,' + matches[0]\n        else:\n            if matches[0][1] == '':\n                return 'l,' + matches[0][0]\n            else:\n                return 'l,' + matches[0][0] + ',' + matches[0][1]", "docstring": "Convert a reddit URL into the short-hand used by usernotes.\n\nArguments:\nlink: a link to a comment, submission, or message (str)\n\nReturns a String of the shorthand URL", "source": "juraj-google-style"}
{"code": "def rms(x):\n    \n    try:\n        return (np.array(x) ** 2).mean() ** 0.5\n    except:\n        x = np.array(dropna(x))\n        invN = 1.0 / len(x)\n        return (sum(invN * (x_i ** 2) for x_i in x)) ** .5", "docstring": "Root Mean Square\"\n\nArguments:\nx (seq of float): A sequence of numerical values\n\nReturns:\nThe square root of the average of the squares of the values\n\nmath.sqrt(sum(x_i**2 for x_i in x) / len(x))\n\nor\n\nreturn (np.array(x) ** 2).mean() ** 0.5\n\n>>> rms([0, 2, 4, 4])\n3.0", "source": "juraj-google-style"}
{"code": "def set(self, key, val):\n    self._create_file_if_none_exists()\n    with open(self.filename, 'r+b') as file_object:\n        cache_pickle = pickle.load(file_object)\n        cache_pickle[key] = val\n        file_object.seek(0)\n        pickle.dump(cache_pickle, file_object)", "docstring": "Sets a value in a key.\n\nArgs:\nkey (str): Key for the value.\nval: Value to set.\n\nReturns:\nRetrieved value.", "source": "codesearchnet"}
{"code": "def isinf(x):\n    if any_symbolic_tensors((x,)):\n        return Isinf().symbolic_call(x)\n    return backend.numpy.isinf(x)", "docstring": "Test element-wise for positive or negative infinity.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput boolean tensor.", "source": "github-repos"}
{"code": "def _broadcast_shape_helper(shape_x, shape_y):\n    broadcasted_dims = reversed(list(itertools.zip_longest(reversed(shape_x.dims), reversed(shape_y.dims), fillvalue=tensor_shape.Dimension(1))))\n    return_dims = []\n    for dim_x, dim_y in broadcasted_dims:\n        if dim_x.value is None or dim_y.value is None:\n            if dim_x.value is not None and dim_x.value > 1:\n                return_dims.append(dim_x)\n            elif dim_y.value is not None and dim_y.value > 1:\n                return_dims.append(dim_y)\n            else:\n                return_dims.append(None)\n        elif dim_x.value == 1:\n            return_dims.append(dim_y)\n        elif dim_y.value == 1:\n            return_dims.append(dim_x)\n        elif dim_x.value == dim_y.value:\n            return_dims.append(dim_x.merge_with(dim_y))\n        else:\n            return None\n    return return_dims", "docstring": "Helper functions for is_broadcast_compatible and broadcast_shape.\n\nArgs:\nshape_x: A `TensorShape`\nshape_y: A `TensorShape`\n\nReturns:\nReturns None if the shapes are not broadcast compatible,\na list of the broadcast dimensions otherwise.", "source": "github-repos"}
{"code": "def create_table(cls, table_name, schema_fields, table_data):\n    table_schema = bigquery.TableSchema()\n    for field_def in schema_fields:\n        field = bigquery.TableFieldSchema()\n        field.name = field_def[0]\n        field.type = field_def[1]\n        if len(field_def) > 2:\n            field.mode = field_def[2]\n            if len(field_def) > 3:\n                for subfield_def in field_def[3]:\n                    subfield = bigquery.TableFieldSchema()\n                    subfield.name = subfield_def[0]\n                    subfield.type = subfield_def[1]\n                    field.fields.append(subfield)\n        table_schema.fields.append(field)\n    table = bigquery.Table(tableReference=bigquery.TableReference(projectId=cls.project, datasetId=cls.dataset_id, tableId=table_name), schema=table_schema)\n    request = bigquery.BigqueryTablesInsertRequest(projectId=cls.project, datasetId=cls.dataset_id, table=table)\n    cls.bigquery_client.client.tables.Insert(request)\n    cls.bigquery_client.insert_rows(cls.project, cls.dataset_id, table_name, table_data)\n    return f'{cls.project}.{cls.dataset_id}.{table_name}'", "docstring": "Create a BigQuery table with the specified schema and data.\n\nArgs:\ntable_name: Name of the table to create\nschema_fields: List of field definitions in the format:\n(name, type, [mode, [subfields]])\ntable_data: List of dictionaries containing the data to insert\n\nReturns:\nFully qualified table name (project.dataset.table)", "source": "github-repos"}
{"code": "def resolve(self, host: str) -> ResolveResult:\n        \n\n        _logger.debug(__('Lookup address {0}.', host))\n\n        try:\n            host = self.hook_dispatcher.call(PluginFunctions.resolve_dns, host\n                                             ) or host\n        except HookDisconnected:\n            pass\n\n        cache_key = (host, self._family)\n\n        if self._cache and cache_key in self._cache:\n            resolve_result = self._cache[cache_key]\n            _logger.debug(__('Return by cache {0}.', resolve_result))\n\n            if self._rotate:\n                resolve_result.rotate()\n\n            return resolve_result\n\n        address_infos = []\n        dns_infos = []\n\n        if not self.dns_python_enabled:\n            families = ()\n        elif self._family == IPFamilyPreference.any:\n            families = (socket.AF_INET, socket.AF_INET6)\n        elif self._family == IPFamilyPreference.ipv4_only:\n            families = (socket.AF_INET, )\n        else:\n            families = (socket.AF_INET6, )\n\n        for family in families:\n            datetime_now = datetime.datetime.utcnow()\n            try:\n                answer = yield from self._query_dns(host, family)\n            except DNSNotFound:\n                continue\n            else:\n                dns_infos.append(DNSInfo(datetime_now, answer.response.answer))\n                address_infos.extend(self._convert_dns_answer(answer))\n\n        if not address_infos:\n            \n\n            if self._family == IPFamilyPreference.any:\n                family = socket.AF_UNSPEC\n            elif self._family == IPFamilyPreference.ipv4_only:\n                family = socket.AF_INET\n            else:\n                family = socket.AF_INET6\n\n            results = yield from self._getaddrinfo(host, family)\n            address_infos.extend(self._convert_addrinfo(results))\n\n        _logger.debug(__('Resolved addresses: {0}.', address_infos))\n\n        resolve_result = ResolveResult(address_infos, dns_infos)\n\n        if self._cache:\n            self._cache[cache_key] = resolve_result\n\n        self.event_dispatcher.notify(PluginFunctions.resolve_dns_result, host, resolve_result)\n\n        if self._rotate:\n            resolve_result.shuffle()\n\n        return resolve_result", "docstring": "Resolve hostname.\n\nArgs:\nhost: Hostname.\n\nReturns:\nResolved IP addresses.\n\nRaises:\nDNSNotFound if the hostname could not be resolved or\nNetworkError if there was an error connecting to DNS servers.\n\nCoroutine.", "source": "juraj-google-style"}
{"code": "def __init__(self, option_strings, dest, copyright_text=None, nargs=None,\n                 **kwargs):\n        \n\n        \n        if nargs is not None:\n            raise ValueError('nargs not allowed for CopyRight')\n\n        self.copyright = copyright_text\n\n        \n        super(CopyRight, self).__init__(option_strings, dest, nargs=0,\n                                        **kwargs)", "docstring": "Initialize class and spawn self as Base Class w/o nargs\n\nArgs:\noption_strings (list): list of str giving command line flags that\ncall this action\n\ndest (str): namespace reference to value\n\ncopyright_text (str): str to print\n\nnargs (str): number of args as special char or int\n\n**kwargs (various): optional arguments to pass to super call", "source": "juraj-google-style"}
{"code": "def __init__(self, manager):\n        \n        self.manager = manager\n        self._var_cache = dict((k, EnvironmentVariable(k, self))\n                               for k in manager.parent_environ.iterkeys())", "docstring": "Creates an `EnvironmentDict`.\n\nArgs:\noverride_existing_lists (bool): If True, the first call to append\nor prepend will override the value in `environ` and effectively\nact as a setenv operation. If False, pre-existing values will\nbe appended/prepended to as usual.", "source": "juraj-google-style"}
{"code": "def circle(y_true, y_pred, ref_labels=None, ref_embeddings=None, remove_diagonal=True, gamma=80, margin=0.4):\n    y_pred = ops.convert_to_tensor(y_pred)\n    y_true = ops.cast(y_true, 'int32')\n    ref_embeddings = y_pred if ref_embeddings is None else ops.convert_to_tensor(ref_embeddings)\n    ref_labels = y_true if ref_labels is None else ops.cast(ref_labels, 'int32')\n    optim_pos = margin\n    optim_neg = 1 + margin\n    delta_pos = margin\n    delta_neg = 1 - margin\n    pairwise_cosine_distances = 1 - ops.matmul(y_pred, ops.transpose(ref_embeddings))\n    pairwise_cosine_distances = ops.maximum(pairwise_cosine_distances, 0.0)\n    positive_mask, negative_mask = build_pos_neg_masks(y_true, ref_labels, remove_diagonal=remove_diagonal)\n    positive_mask = ops.cast(positive_mask, dtype=pairwise_cosine_distances.dtype)\n    negative_mask = ops.cast(negative_mask, dtype=pairwise_cosine_distances.dtype)\n    pos_weights = optim_pos + pairwise_cosine_distances\n    pos_weights = pos_weights * positive_mask\n    pos_weights = ops.maximum(pos_weights, 0.0)\n    neg_weights = optim_neg - pairwise_cosine_distances\n    neg_weights = neg_weights * negative_mask\n    neg_weights = ops.maximum(neg_weights, 0.0)\n    pos_dists = delta_pos - pairwise_cosine_distances\n    neg_dists = delta_neg - pairwise_cosine_distances\n    pos_wdists = -1 * gamma * pos_weights * pos_dists\n    neg_wdists = gamma * neg_weights * neg_dists\n    p_loss = ops.logsumexp(ops.where(positive_mask, pos_wdists, float('-inf')), axis=1)\n    n_loss = ops.logsumexp(ops.where(negative_mask, neg_wdists, float('-inf')), axis=1)\n    circle_loss = ops.softplus(p_loss + n_loss)\n    backend.set_keras_mask(circle_loss, circle_loss > 0)\n    return circle_loss", "docstring": "Computes the Circle loss.\n\nIt is designed to minimize within-class distances and maximize between-class\ndistances in L2 normalized embedding space.\n\nArgs:\ny_true: Tensor with ground truth labels in integer format.\ny_pred: Tensor with predicted L2 normalized embeddings.\nref_labels: Optional integer tensor with labels for reference\nembeddings. If `None`, defaults to `y_true`.\nref_embeddings: Optional tensor with L2 normalized reference embeddings.\nIf `None`, defaults to `y_pred`.\nremove_diagonal: Boolean, whether to remove self-similarities from\npositive mask. Defaults to `True`.\ngamma: Float, scaling factor for the loss. Defaults to `80`.\nmargin: Float, relaxation factor for the loss. Defaults to `0.4`.\n\nReturns:\nCircle loss value.", "source": "github-repos"}
{"code": "def draw_points(self, *points):\n    point_array = ffi.new('SDL_Point[]', len(points))\n    for (i, p) in enumerate(points):\n        point_array[i] = p._ptr[0]\n    check_int_err(lib.SDL_RenderDrawPoints(self._ptr, point_array, len(points)))", "docstring": "Draw multiple points on the current rendering target.\n\nArgs:\n*points (Point): The points to draw.\n\nRaises:\nSDLError: If an error is encountered.", "source": "codesearchnet"}
{"code": "def launch(self):\n    server = self._make_server()\n    thread = threading.Thread(target=server.serve_forever, name='TensorBoard')\n    thread.daemon = True\n    thread.start()\n    return server.get_url()", "docstring": "Python API for launching TensorBoard.\n\nThis method is the same as main() except it launches TensorBoard in\na separate permanent thread. The configure() method must be called\nfirst.\n\nReturns:\nThe URL of the TensorBoard web server.\n\n:rtype: str", "source": "codesearchnet"}
{"code": "def _get_parsed_args(command_name, doc, argv):\n    \n    \n    _LOGGER.debug('Parsing docstring:  with arguments %s.', doc, argv)\n    args = docopt(doc, argv=argv)\n    if command_name == settings.command:\n        args[command_name] = True\n    return args", "docstring": "Parse the docstring with docopt.\n\nArgs:\ncommand_name: The name of the subcommand to parse.\ndoc: A docopt-parseable string.\nargv: The list of arguments to pass to docopt during parsing.\n\nReturns:\nThe docopt results dictionary. If the subcommand has the same name as\nthe primary command, the subcommand value will be added to the\ndictionary.", "source": "juraj-google-style"}
{"code": "def is_tensor_on_canonical_device(self, tensor_name):\n    \n    device = self.get_tensor_device(tensor_name)\n    return not device or device == self.canonical_device", "docstring": "Whether the tensor is on the first (canonical) device.\n\nTensors not assigned to a device are assumed to be on all devices, including\nthe canonical device.\n\nArgs:\ntensor_name: a string, name of a tensor in the graph.\n\nReturns:\na boolean indicating whether the tensor is on the first device.", "source": "juraj-google-style"}
{"code": "def enroll_user(cls, enterprise_customer, user, course_mode, *course_ids):\n    (enterprise_customer_user, __) = EnterpriseCustomerUser.objects.get_or_create(enterprise_customer=enterprise_customer, user_id=user.id)\n    enrollment_client = EnrollmentApiClient()\n    succeeded = True\n    for course_id in course_ids:\n        try:\n            enrollment_client.enroll_user_in_course(user.username, course_id, course_mode)\n        except HttpClientError as exc:\n            if cls.is_user_enrolled(user, course_id, course_mode):\n                succeeded = True\n            else:\n                succeeded = False\n                default_message = 'No error message provided'\n                try:\n                    error_message = json.loads(exc.content.decode()).get('message', default_message)\n                except ValueError:\n                    error_message = default_message\n                logging.error('Error while enrolling user %(user)s: %(message)s', dict(user=user.username, message=error_message))\n        if succeeded:\n            (__, created) = EnterpriseCourseEnrollment.objects.get_or_create(enterprise_customer_user=enterprise_customer_user, course_id=course_id)\n            if created:\n                track_enrollment('admin-enrollment', user.id, course_id)\n    return succeeded", "docstring": "Enroll a single user in any number of courses using a particular course mode.\n\nArgs:\nenterprise_customer: The EnterpriseCustomer which is sponsoring the enrollment\nuser: The user who needs to be enrolled in the course\ncourse_mode: The mode with which the enrollment should be created\n*course_ids: An iterable containing any number of course IDs to eventually enroll the user in.\n\nReturns:\nBoolean: Whether or not enrollment succeeded for all courses specified", "source": "codesearchnet"}
{"code": "def contextual_override(*, cascade: bool=False, override_attrs: bool=False, **variables) -> ContextManager[dict[str, ContextualOverride]]:\n    vs = {}\n    for k, v in variables.items():\n        if not isinstance(v, ContextualOverride):\n            v = ContextualOverride(v, cascade, override_attrs)\n        vs[k] = v\n    return contextual_scope(_global_contextual_overrides, **vs)", "docstring": "Context manager to provide contextual values under a scope.\n\nPlease be aware that contextual value override are per-thread. If you want\nto propagate the contextual value override to other threads, please obtain\na wrapper function for a user function using\n`pg.with_contextual_override(func)`.\n\nArgs:\ncascade: If True, this override will apply to both current scope and nested\nscope, meaning that this `pg.contextual_override` will take precedence\nover all nested `pg.contextual_override` on the overriden variables.\noverride_attrs: If True, this override will apply to attributes that already\nhave values. Otherwise overridden variables will only be used for\ncontextual attributes whose values are not present.\n**variables: Key/values as override for contextual attributes.\n\nReturns:\nA dict of attribute names to their contextual overrides.", "source": "github-repos"}
{"code": "def get_shared_file(self, sharekey=None):\n        \n        if not sharekey:\n            raise Exception(\"You must specify a sharekey.\")\n        endpoint = '/api/sharedfile/{0}'.format(sharekey)\n        data = self._make_request('GET', endpoint)\n        return SharedFile.NewFromJSON(data)", "docstring": "Returns a SharedFile object given by the sharekey.\n\nArgs:\nsharekey (str): Sharekey of the SharedFile you want to retrieve.\n\nReturns:\nSharedFile", "source": "juraj-google-style"}
{"code": "def verify_ed25519_signature_cmdln(args=None, exception=SystemExit):\n    \n    args = args or sys.argv[1:]\n    parser = argparse.ArgumentParser(\n        description=)\n    parser.add_argument('--pubkey', help='path to a base64-encoded ed25519 pubkey, optional')\n    parser.add_argument('file_path')\n    parser.add_argument('sig_path')\n    opts = parser.parse_args(args)\n    log = logging.getLogger('scriptworker')\n    log.setLevel(logging.DEBUG)\n    logging.basicConfig()\n    pubkeys = {}\n    if opts.pubkey:\n        pubkeys['cmdln'] = [read_from_file(opts.pubkey)]\n    pubkeys.update(dict(DEFAULT_CONFIG['ed25519_public_keys']))\n    contents = read_from_file(opts.file_path, file_type='binary')\n    signature = read_from_file(opts.sig_path, file_type='binary')\n    for key_type, seeds in pubkeys.items():\n        for seed in seeds:\n            try:\n                verify_ed25519_signature(\n                    ed25519_public_key_from_string(seed), contents, signature,\n                    \"didn't work with {}\".format(seed)\n                )\n                log.info(\"Verified good with {} seed {} !\".format(\n                    key_type, seed\n                ))\n                sys.exit(0)\n            except ScriptWorkerEd25519Error:\n                pass\n    raise exception(\"This is not a valid signature!\")", "docstring": "Verify an ed25519 signature from the command line.\n\nArgs:\nargs (list, optional): the commandline args to parse. If ``None``, use\n``sys.argv[1:]``. Defaults to ``None``.\nexception (Exception, optional): the exception to raise on failure.\nDefaults to ``SystemExit``.", "source": "juraj-google-style"}
{"code": "def visit_indexer(self, indexer: _evaluation.IndexerNode) -> _sql_data_types.Select:\n    collection_result = self.visit(indexer.collection)\n    index_result = self.visit(indexer.index)\n    sql_alias = f'indexed_{collection_result.sql_alias}'\n    return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(f'element_at(COLLECT_LIST({collection_result.sql_alias}),{index_result.as_operand()} + 1)', collection_result.sql_data_type, _sql_alias=sql_alias), from_part=f'{collection_result.to_subquery()}', sql_dialect=_sql_data_types.SqlDialect.SPARK)", "docstring": "Translates a FHIRPath indexer expression to Spark SQL.\n\nArgs:\nindexer: The `_Indexer` Expression node.\n\nReturns:\nA compiled Spark SQL expression.", "source": "github-repos"}
{"code": "def Patch(self, request, global_params=None):\n    config = self.GetMethodConfig('Patch')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Updates a `WorkerPool`.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsWorkerPoolsPatchRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Operation) The response message.", "source": "github-repos"}
{"code": "def solve(self):\n    if self.assignments:\n        return self.assignments\n    self._complete()\n    assignments = {var: self._get_nonfalse_values(var) for var in self.variables}\n    ground_pivots = self.ground_truth.simplify(assignments).extract_pivots(assignments)\n    for pivot, possible_values in ground_pivots.items():\n        if pivot in assignments:\n            assignments[pivot] &= set(possible_values)\n    something_changed = True\n    while something_changed:\n        something_changed = False\n        and_terms = []\n        for var in self.variables:\n            or_terms = []\n            for value in assignments[var].copy():\n                implication = self.implications[var][value].simplify(assignments)\n                if implication is FALSE:\n                    assignments[var].remove(value)\n                    something_changed = True\n                else:\n                    or_terms.append(implication)\n                self.implications[var][value] = implication\n            and_terms.append(Or(or_terms))\n        d = And(and_terms)\n        for pivot, possible_values in d.extract_pivots(assignments).items():\n            if pivot in assignments:\n                length_before = len(assignments[pivot])\n                assignments[pivot] &= set(possible_values)\n                length_after = len(assignments[pivot])\n                something_changed |= length_before != length_after\n    self.register_variable = pytd_utils.disabled_function\n    self.implies = pytd_utils.disabled_function\n    self.assignments = assignments\n    return assignments", "docstring": "Solve the system of equations.\n\nReturns:\nAn assignment, mapping strings (variables) to sets of strings (values).", "source": "github-repos"}
{"code": "def delete_vm(access_token, subscription_id, resource_group, vm_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Compute/virtualMachines/', vm_name,\n                        '?api-version=', COMP_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete a virtual machine.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvm_name (str): Name of the virtual machine.\n\nReturns:\nHTTP response.", "source": "juraj-google-style"}
{"code": "def prefer_static_value(x):\n    static_x = tensor_util.constant_value(x)\n    if static_x is not None:\n        return static_x\n    return x", "docstring": "Return static value of tensor `x` if available, else `x`.\n\nArgs:\nx: `Tensor` (already converted).\n\nReturns:\nNumpy array (if static value is obtainable), else `Tensor`.", "source": "github-repos"}
{"code": "def prod(self, vars_list: List[str]) -> 'TensorFluent':\n    operand = self\n    if (operand.dtype == tf.bool):\n        operand = operand.cast(tf.float32)\n    return self._aggregation_op(tf.reduce_prod, operand, vars_list)", "docstring": "Returns the TensorFluent for the prod aggregation function.\n\nArgs:\nvars_list: The list of variables to be aggregated over.\n\nReturns:\nA TensorFluent wrapping the prod aggregation function.", "source": "codesearchnet"}
{"code": "def read_into(self, buffer, face, *, alignment=1, write_offset=0) -> None:\n        \n\n        if type(buffer) is Buffer:\n            buffer = buffer.mglo\n\n        return self.mglo.read_into(buffer, face, alignment, write_offset)", "docstring": "Read a face from the cubemap texture.\n\nArgs:\nbuffer (bytearray): The buffer that will receive the pixels.\nface (int): The face to read.\n\nKeyword Args:\nalignment (int): The byte alignment of the pixels.\nwrite_offset (int): The write offset.", "source": "juraj-google-style"}
{"code": "def imcrop(img, bboxes, scale=1.0, pad_fill=None):\n    \n    chn = 1 if img.ndim == 2 else img.shape[2]\n    if pad_fill is not None:\n        if isinstance(pad_fill, (int, float)):\n            pad_fill = [pad_fill for _ in range(chn)]\n        assert len(pad_fill) == chn\n\n    _bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes\n    scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32)\n    clipped_bbox = bbox_clip(scaled_bboxes, img.shape)\n\n    patches = []\n    for i in range(clipped_bbox.shape[0]):\n        x1, y1, x2, y2 = tuple(clipped_bbox[i, :])\n        if pad_fill is None:\n            patch = img[y1:y2 + 1, x1:x2 + 1, ...]\n        else:\n            _x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :])\n            if chn == 2:\n                patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1)\n            else:\n                patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn)\n            patch = np.array(\n                pad_fill, dtype=img.dtype) * np.ones(\n                    patch_shape, dtype=img.dtype)\n            x_start = 0 if _x1 >= 0 else -_x1\n            y_start = 0 if _y1 >= 0 else -_y1\n            w = x2 - x1 + 1\n            h = y2 - y1 + 1\n            patch[y_start:y_start + h, x_start:x_start +\n                  w, ...] = img[y1:y1 + h, x1:x1 + w, ...]\n        patches.append(patch)\n\n    if bboxes.ndim == 1:\n        return patches[0]\n    else:\n        return patches", "docstring": "Crop image patches.\n\n3 steps: scale the bboxes -> clip bboxes -> crop and pad.\n\nArgs:\nimg (ndarray): Image to be cropped.\nbboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes.\nscale (float, optional): Scale ratio of bboxes, the default value\n1.0 means no padding.\npad_fill (number or list): Value to be filled for padding, None for\nno padding.\n\nReturns:\nlist or ndarray: The cropped image patches.", "source": "juraj-google-style"}
{"code": "def dict_strip(d):\n    \n    _d = deepcopy(d)\n    for k, v in iteritems(d):\n        if isinstance(v, str):\n            _d[k] = v.strip()\n        elif isinstance(v, dict):\n            _d[k] = dict_strip(v)\n\n    return _d", "docstring": "Strips whitespace from the string values of the given dictionary (recursively).\n\nArgs:\nd: A dictionary object.\n\nReturns:\nA new dictionary object, whose string values' whitespace has been stripped out.", "source": "juraj-google-style"}
{"code": "def lock(self, key, client):\n        \n\n        self.key = key\n        self.client = client", "docstring": "Set the key that will be used to ensure messages come from one party\n\nArgs:\nkey (string): The key used to validate future messages\nclient (string): A string that will be returned to indicate who\nlocked this device.", "source": "juraj-google-style"}
{"code": "def instantiate_interface(virtual_iface, config, loop):\n    if (virtual_iface == 'null'):\n        return StandardDeviceServer(None, {}, loop=loop)\n    conf = {}\n    if ('interface' in config):\n        conf = config['interface']\n    try:\n        reg = ComponentRegistry()\n        if virtual_iface.endswith('.py'):\n            (_name, iface) = reg.load_extension(virtual_iface, class_filter=AbstractDeviceServer, unique=True)\n        else:\n            (_name, iface) = reg.load_extensions('iotile.device_server', name_filter=virtual_iface, class_filter=AbstractDeviceServer, unique=True)\n        return iface(None, conf, loop=loop)\n    except ArgumentError as err:\n        print(('ERROR: Could not load device_server (%s): %s' % (virtual_iface, err.msg)))\n        sys.exit(1)", "docstring": "Find a virtual interface by name and instantiate it\n\nArgs:\nvirtual_iface (string): The name of the pkg_resources entry point corresponding to\nthe interface.  It should be in group iotile.virtual_interface\nconfig (dict): A dictionary with a 'interface' key with the config info for configuring\nthis virtual interface.  This is optional.\n\nReturns:\nVirtualInterface: The instantiated subclass of VirtualInterface", "source": "codesearchnet"}
{"code": "def exportData(self, datfile):\n\n    def ampl_set(name, values):\n\n        def format_entry(e):\n            return repr(e).replace(' ', '')\n        return 'set {0} := {1};'.format(name, ','.join((format_entry(e) for e in values)))\n\n    def ampl_param(name, values):\n\n        def format_entry(k, v):\n            k = repr(k).strip('()').replace(' ', '')\n            if (v == inf):\n                v = 'Infinity'\n            elif (v == (- inf)):\n                v = '-Infinity'\n            else:\n                v = repr(v).strip('()').replace(' ', '')\n            return '[{0}]{1}'.format(k, v)\n        return 'param {0} := {1};'.format(name, ''.join((format_entry(k, v) for (k, v) in values.items())))\n    with open(datfile, 'w') as f:\n        for (name, entity) in self.getSets():\n            values = entity.getValues().toList()\n            print(ampl_set(name, values), file=f)\n        for (name, entity) in self.getParameters():\n            if entity.isScalar():\n                print('param {} := {};'.format(name, entity.value()), file=f)\n            else:\n                values = entity.getValues().toDict()\n                print(ampl_param(name, values), file=f)", "docstring": "Create a .dat file with the data that has been loaded.\n\nArgs:\ndatfile: Path to the file (Relative to the current working\ndirectory or absolute).", "source": "codesearchnet"}
{"code": "def match_term(self, value, required=True, new_group=False):\n    if self.initialized:\n        if required:\n            self._and_join(new_group)\n        else:\n            self._or_join(new_group)\n    self._term(value)\n    return self", "docstring": "Add a fulltext search term to the query.\n\nWarning:\nDo not use this method with any other query-building helpers. This method\nis only for building fulltext queries (in non-advanced mode). Using other\nhelpers, such as ``match_field()``, will cause the query to run in advanced mode.\nIf a fulltext term query is run in advanced mode, it will have unexpected\nresults.\n\nArguments:\nvalue (str): The term to match.\nrequired (bool): If ``True``, will add term with ``AND``.\nIf ``False``, will use ``OR``. **Default:** ``True``.\nnew_group (bool): If ``True``, will separate the term into a new parenthetical group.\nIf ``False``, will not.\n**Default:** ``False``.\n\nReturns:\nSearchHelper: Self", "source": "codesearchnet"}
{"code": "def validate(\n        self, nanopub: Mapping[str, Any]\n    ) -> Tuple[bool, List[Tuple[str, str]]]:\n        \n\n        \n        (is_valid, messages) = validate_to_schema(nanopub, self.nanopub_schema)\n        if not is_valid:\n            return messages\n\n        \n        if nanopub[\"nanopub\"][\"type\"][\"name\"].upper() == \"BEL\":\n            bel_version = nanopub[\"nanopub\"][\"type\"][\"version\"]\n        else:\n            is_valid = False\n            return (\n                is_valid,\n                f\"Not a BEL Nanopub according to nanopub.type.name: {nanopub['nanopub']['type']['name']}\",\n            )\n\n        all_messages = []\n        \n        bel_obj = bel.lang.belobj.BEL(bel_version, self.endpoint)\n        for edge in nanopub[\"nanopub\"][\"edges\"]:\n            bel_statement = f\"{edge['subject']} {edge['relation']} {edge['object']}\"\n            parse_obj = bel_obj.parse(bel_statement)\n            if not parse_obj.valid:\n                all_messages.extend(\n                    (\n                        \"ERROR\",\n                        f\"BEL statement parse error {parse_obj.error}, {parse_obj.err_visual}\",\n                    )\n                )\n\n        \n        for context in nanopub[\"nanopub\"][\"context\"]:\n            (is_valid, messages) = self.validate_context(context)\n            all_messages.extend(messages)\n\n        is_valid = True\n        for _type, msg in all_messages:\n            if _type == \"ERROR\":\n                is_valid = False\n\n        return (is_valid, all_messages)", "docstring": "Validates using the nanopub schema\n\nArgs:\nnanopub (Mapping[str, Any]): nanopub dict\n\nReturns:\nTuple[bool, List[Tuple[str, str]]]:\nbool: Is valid?  Yes = True, No = False\nList[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg)\ne.g. [('WARNING', \"Context ID not found\")]", "source": "juraj-google-style"}
{"code": "def _PrintAnalysisStatusUpdateWindow(self, processing_status):\n    \n    if self._stdout_output_writer:\n      self._ClearScreen()\n\n    output_text = 'plaso - {0:s} version {1:s}\\n\\n'.format(\n        self._tool_name, plaso.__version__)\n    self._output_writer.Write(output_text)\n\n    self._PrintAnalysisStatusHeader(processing_status)\n\n    table_view = views.CLITabularTableView(column_names=[\n        'Identifier', 'PID', 'Status', 'Memory', 'Events', 'Tags',\n        'Reports'], column_sizes=[23, 7, 15, 15, 15, 15, 0])\n\n    self._AddsAnalysisProcessStatusTableRow(\n        processing_status.foreman_status, table_view)\n\n    for worker_status in processing_status.workers_status:\n      self._AddsAnalysisProcessStatusTableRow(worker_status, table_view)\n\n    table_view.Write(self._output_writer)\n    self._output_writer.Write('\\n')\n\n    if processing_status.aborted:\n      self._output_writer.Write(\n          'Processing aborted - waiting for clean up.\\n\\n')\n\n    if self._stdout_output_writer:\n      \n      sys.stdout.flush()", "docstring": "Prints an analysis status update in window mode.\n\nArgs:\nprocessing_status (ProcessingStatus): processing status.", "source": "juraj-google-style"}
{"code": "def learn(self, grad_arr, fix_opt_flag=False):\n        \n        if grad_arr.ndim > 3:\n            grad_arr = grad_arr.reshape((\n                grad_arr.shape[0],\n                grad_arr.shape[1],\n                -1\n            ))\n\n        delta_arr, grads_list = self.__lstm_model.back_propagation(self.__pred_arr, grad_arr)\n\n        if fix_opt_flag is False:\n            self.__lstm_model.optimize(\n                grads_list,\n                self.__learning_rate,\n                1\n            )\n\n        return delta_arr", "docstring": "Update this Discriminator by ascending its stochastic gradient.\n\nArgs:\ngrad_arr:       `np.ndarray` of gradients.\nfix_opt_flag:   If `False`, no optimization in this model will be done.\n\nReturns:\n`np.ndarray` of delta or gradients.", "source": "juraj-google-style"}
{"code": "def available_writers(as_dict=False):\n    writers = []\n    for writer_configs in configs_for_writer():\n        try:\n            writer_info = read_writer_config(writer_configs)\n        except (KeyError, IOError, yaml.YAMLError):\n            LOG.warning('Could not import writer config from: %s', writer_configs)\n            LOG.debug('Error loading YAML', exc_info=True)\n            continue\n        writers.append((writer_info if as_dict else writer_info['name']))\n    return writers", "docstring": "Available writers based on current configuration.\n\nArgs:\nas_dict (bool): Optionally return writer information as a dictionary.\nDefault: False\n\nReturns: List of available writer names. If `as_dict` is `True` then\na list of dictionaries including additionally writer information\nis returned.", "source": "codesearchnet"}
{"code": "def poke(exposes):\n\n    def _poke(store, objname, obj, container, visited=None, _stack=None):\n        try:\n            sub_container = store.newContainer(objname, obj, container)\n        except (SystemExit, KeyboardInterrupt):\n            raise\n        except:\n            raise ValueError('generic poke not supported by store')\n        for iobjname in exposes:\n            try:\n                iobj = getattr(obj, iobjname)\n            except AttributeError:\n                pass\n            else:\n                store.poke(iobjname, iobj, sub_container, visited=visited, _stack=_stack)\n    return _poke", "docstring": "Default serializer factory.\n\nArguments:\n\nexposes (iterable): attributes to serialized.\n\nReturns:\n\ncallable: serializer (`poke` routine).", "source": "codesearchnet"}
{"code": "def createDomains(tlds, nicks=None, nicksFile=None):\n    domain_candidates = []\n    if (nicks != None):\n        for n in nicks:\n            for t in tlds:\n                tmp = {'domain': (n + t['tld']), 'type': t['type'], 'tld': t['tld']}\n                domain_candidates.append(tmp)\n    elif (nicksFile != None):\n        with open(nicksFile, 'r') as iF:\n            nicks = iF.read().splitlines()\n            for n in nicks:\n                for t in tlds:\n                    tmp = {'domain': (n + t['tld']), 'type': t['type'], 'tld': t['tld']}\n                    domain_candidates.append(tmp)\n    return domain_candidates", "docstring": "Method that globally permits to generate the domains to be checked.\n\nArgs:\n-----\ntlds: List of tlds.\nnicks: List of aliases.\nnicksFile: The filepath to the aliases file.\n\nReturns:\n--------\nlist: list of domains to be checked.", "source": "codesearchnet"}
{"code": "def RemoveTransaction(self, tx):\n    if (BC.Default() is None):\n        return False\n    if (not BC.Default().ContainsTransaction(tx.Hash)):\n        return False\n    if (tx.Hash.ToBytes() in self.MemPool):\n        del self.MemPool[tx.Hash.ToBytes()]\n        return True\n    return False", "docstring": "Remove a transaction from the memory pool if it is found on the blockchain.\n\nArgs:\ntx (neo.Core.TX.Transaction): instance.\n\nReturns:\nbool: True if successfully removed. False otherwise.", "source": "codesearchnet"}
{"code": "def GetDateRange(self):\n    start = self.start_date\n    end = self.end_date\n    for (date, (exception_type, _)) in self.date_exceptions.items():\n        if (exception_type == self._EXCEPTION_TYPE_REMOVE):\n            continue\n        if ((not start) or (date < start)):\n            start = date\n        if ((not end) or (date > end)):\n            end = date\n    if (start is None):\n        start = end\n    elif (end is None):\n        end = start\n    return (start, end)", "docstring": "Return the range over which this ServicePeriod is valid.\n\nThe range includes exception dates that add service outside of\n(start_date, end_date), but doesn't shrink the range if exception\ndates take away service at the edges of the range.\n\nReturns:\nA tuple of \"YYYYMMDD\" strings, (start date, end date) or (None, None) if\nno dates have been given.", "source": "codesearchnet"}
{"code": "def group(self, group_type=None, owner=None, **kwargs):\n    group = None\n    if (not group_type):\n        return Group(self.tcex, None, None, owner=owner, **kwargs)\n    name = kwargs.pop('name', None)\n    group_type = group_type.upper()\n    if (group_type == 'ADVERSARY'):\n        group = Adversary(self.tcex, name, owner=owner, **kwargs)\n    if (group_type == 'CAMPAIGN'):\n        group = Campaign(self.tcex, name, owner=owner, **kwargs)\n    if (group_type == 'DOCUMENT'):\n        group = Document(self.tcex, name, kwargs.pop('file_name', None), owner=owner, **kwargs)\n    if (group_type == 'EVENT'):\n        group = Event(self.tcex, name, owner=owner, **kwargs)\n    if (group_type == 'EMAIL'):\n        group = Email(self.tcex, name, kwargs.pop('to', None), kwargs.pop('from_addr', None), kwargs.pop('subject', None), kwargs.pop('body', None), kwargs.pop('header', None), owner=owner, **kwargs)\n    if (group_type == 'INCIDENT'):\n        group = Incident(self.tcex, name, owner=owner, **kwargs)\n    if (group_type == 'INTRUSION SET'):\n        group = IntrusionSet(self.tcex, name, owner=owner, **kwargs)\n    if (group_type == 'REPORT'):\n        group = Report(self.tcex, name, owner=owner, **kwargs)\n    if (group_type == 'SIGNATURE'):\n        group = Signature(self.tcex, name, kwargs.pop('file_name', None), kwargs.pop('file_type', None), kwargs.pop('file_text', None), owner=owner, **kwargs)\n    if (group_type == 'THREAT'):\n        group = Threat(self.tcex, name, owner=owner, **kwargs)\n    if (group_type == 'TASK'):\n        group = Task(self.tcex, name, kwargs.pop('status', 'Not Started'), kwargs.pop('due_date', None), kwargs.pop('reminder_date', None), kwargs.pop('escalation_date', None), owner=owner, **kwargs)\n    return group", "docstring": "Create the Group TI object.\n\nArgs:\nowner:\ngroup_type:\n**kwargs:\n\nReturn:", "source": "codesearchnet"}
{"code": "def init_feed_dict(self):\n    return self._init_feed_dict", "docstring": "Return the feed dictionary used when evaluating the `init_op`.\n\nReturns:\nA feed dictionary or `None`.", "source": "github-repos"}
{"code": "def rmse(y, p):\n    \n\n    \n    assert y.shape == p.shape\n\n    return np.sqrt(mse(y, p))", "docstring": "Root Mean Squared Error (RMSE).\n\nArgs:\ny (numpy.array): target\np (numpy.array): prediction\n\nReturns:\ne (numpy.float64): RMSE", "source": "juraj-google-style"}
{"code": "def get_tag_html(tag_id):\n    tag_data = get_lazy_tag_data(tag_id)\n    tag = tag_data['tag']\n    args = tag_data['args']\n    kwargs = tag_data['kwargs']\n    (lib, tag_name) = get_lib_and_tag_name(tag)\n    args_str = ''\n    if args:\n        for arg in args:\n            if isinstance(arg, six.string_types):\n                args_str += \"'{0}' \".format(arg)\n            else:\n                args_str += '{0} '.format(arg)\n    kwargs_str = ''\n    if kwargs:\n        for (name, value) in kwargs.items():\n            if isinstance(value, six.string_types):\n                kwargs_str += \"{0}='{1}' \".format(name, value)\n            else:\n                kwargs_str += '{0}={1} '.format(name, value)\n    html = '{{% load {lib} %}}{{% {tag_name} {args}{kwargs}%}}'.format(lib=lib, tag_name=tag_name, args=args_str, kwargs=kwargs_str)\n    return html", "docstring": "Returns the Django HTML to load the tag library and render the tag.\n\nArgs:\ntag_id (str): The tag id for the to return the HTML for.", "source": "codesearchnet"}
{"code": "def __init__(self, pqc: tf.Tensor, qubits: List[cirq.GridQubit], symbol_names: tf.Tensor, value_layers_inputs: List[Union[tf.Variable, List[tf.Variable]]], value_layers: List[List[tf.keras.layers.Layer]], name: Union[None, str]=None):\n    super().__init__(name=name)\n    self._pqc = pqc\n    self._qubits = sorted(qubits)\n    self._symbol_names = symbol_names\n    self._value_layers = value_layers\n    self._value_layers_inputs = value_layers_inputs\n    raw_bit_circuit = circuit_utils.bit_circuit(self.qubits)\n    bit_symbol_names = list(sorted(tfq.util.get_circuit_symbols(raw_bit_circuit)))\n    self._bit_symbol_names = tf.constant([str(x) for x in bit_symbol_names])\n    self._bit_circuit = tfq.convert_to_tensor([raw_bit_circuit])", "docstring": "Initializes a QuantumCircuit.\n\nArgs:\npqc: TFQ string representation of a parameterized quantum circuit.\nqubits: The qubits on which `pqc` acts.\nsymbol_names: Strings which are used to specify the order in which the\nvalues in `self.symbol_values` should be placed inside of the circuit.\nvalue_layers_inputs: Inputs to the `value_layers` argument.\nvalue_layers: The concatenation of the layers in entry `i` yields a\ntrainable map from `value_layers_inputs[i]` to the `i` entry in the list\nof intermediate values.  The list of intermediate values is concatenated\nto yield the values to substitute into the circuit.\nname: Optional name for the model.", "source": "github-repos"}
{"code": "def VerifyStructure(self, parser_mediator, lines):\n    \n    if self._VERIFICATION_REGEX.match(lines):\n      return True\n\n    return False", "docstring": "Verifies whether content corresponds to a Zsh extended_history file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nlines (str): one or more lines from the text file.\n\nReturns:\nbool: True if the line was successfully parsed.", "source": "juraj-google-style"}
{"code": "def __init__(self, _args):\n        \n        super(TcExInit, self).__init__(_args)\n\n        \n        self.base_url = (\n            'https:\n        ).format(self.args.branch)", "docstring": "Initialize Class properties.\n\nArgs:\n_args (namespace): The argparser args Namespace.", "source": "juraj-google-style"}
{"code": "def _CheckMacOSPaths(self, filename, artifact_definition, source, paths):\n    \n    result = True\n\n    paths_with_private = []\n    paths_with_symbolic_link_to_private = []\n\n    for path in paths:\n      path_lower = path.lower()\n      path_segments = path_lower.split(source.separator)\n      if not path_segments:\n        logging.warning((\n            'Empty path defined by artifact definition: {0:s} in file: '\n            '{1:s}').format(artifact_definition.name, filename))\n        result = False\n\n      elif len(path_segments) == 1:\n        continue\n\n      elif path_segments[1] in self._MACOS_PRIVATE_SUB_PATHS:\n        paths_with_symbolic_link_to_private.append(path)\n\n      elif path_segments[1] == 'private' and len(path_segments) >= 2:\n        if path_segments[2] in self._MACOS_PRIVATE_SUB_PATHS:\n          paths_with_private.append(path)\n\n        else:\n          logging.warning((\n              'Unsupported private path: {0:s} defined by artifact definition: '\n              '{1:s} in file: {2:s}').format(\n                  path, artifact_definition.name, filename))\n          result = False\n\n    for private_path in paths_with_private:\n      if private_path[8:] not in paths_with_symbolic_link_to_private:\n        logging.warning((\n            'Missing symbolic link: {0:s} for path: {1:s} defined by artifact '\n            'definition: {2:s} in file: {3:s}').format(\n                private_path[8:], private_path, artifact_definition.name,\n                filename))\n        result = False\n\n    for path in paths_with_symbolic_link_to_private:\n      private_path = '/private{0:s}'.format(path)\n      if private_path not in paths_with_private:\n        logging.warning((\n            'Missing path: {0:s} for symbolic link: {1:s} defined by artifact '\n            'definition: {2:s} in file: {3:s}').format(\n                private_path, path, artifact_definition.name, filename))\n        result = False\n\n    return result", "docstring": "Checks if the paths are valid MacOS paths.\n\nArgs:\nfilename (str): name of the artifacts definition file.\nartifact_definition (ArtifactDefinition): artifact definition.\nsource (SourceType): source definition.\npaths (list[str]): paths to validate.\n\nReturns:\nbool: True if the MacOS paths is valid.", "source": "juraj-google-style"}
{"code": "def num_samples(self, sr=None):\n        \n        native_sr = self.sampling_rate\n        num_samples = units.seconds_to_sample(self.duration, native_sr)\n\n        if sr is not None:\n            ratio = float(sr) / native_sr\n            num_samples = int(np.ceil(num_samples * ratio))\n\n        return num_samples", "docstring": "Return the number of samples.\n\nArgs:\nsr (int): Calculate the number of samples with the given\nsampling-rate. If None use the native sampling-rate.\n\nReturns:\nint: Number of samples", "source": "juraj-google-style"}
{"code": "def np_doc_only(np_fun_name, np_fun=None):\n    np_fun_name, np_fun = _prepare_np_fun_name_and_fun(np_fun_name, np_fun)\n\n    def decorator(f):\n        f.__doc__ = _np_doc_helper(f, np_fun, np_fun_name=np_fun_name)\n        return f\n    return decorator", "docstring": "Attachs numpy docstring to a function.\n\nThis differs from np_doc in that it doesn't check for a match in signature.\n\nArgs:\nnp_fun_name: name for the np_fun symbol. At least one of np_fun or\nnp_fun_name shoud be set.\nnp_fun: (optional) the numpy function whose docstring will be used.\n\nReturns:\nA function decorator that attaches the docstring from `np_fun` to the\ndecorated function.", "source": "github-repos"}
{"code": "def expand(self, url):\n        \n        expand_url = f'{self.api_url}v3/expand'\n        params = {\n            'shortUrl': url,\n            'access_token': self.api_key,\n            'format': 'txt',\n        }\n        response = self._get(expand_url, params=params)\n        if response.ok:\n            return response.text.strip()\n        raise ExpandingErrorException(response.content)", "docstring": "Expand implementation for Bit.ly\nArgs:\nurl: the URL you want to shorten\n\nReturns:\nA string containing the expanded URL\n\nRaises:\nExpandingErrorException: If the API Returns an error as response", "source": "juraj-google-style"}
{"code": "def simple_stack(self, opcode=None):\n    if opcode is not None:\n        return (frame_state.SimpleFrame(opcode),)\n    elif self.frame:\n        return (frame_state.SimpleFrame(self.frame.current_opcode),)\n    else:\n        return ()", "docstring": "Get a stack of simple frames.\n\nArgs:\nopcode: Optionally, an opcode to create a stack for.\n\nReturns:\nIf an opcode is provided, a stack with a single frame at that opcode.\nOtherwise, the VM's current stack converted to simple frames.", "source": "github-repos"}
{"code": "def json(cls, message):\n    if (type(message) is OrderedDict):\n        pprint(dict(message))\n    else:\n        pprint(message)", "docstring": "Print a nice JSON output\n\nArgs:\nmessage: the message to print", "source": "codesearchnet"}
{"code": "def sketch_fasta(fasta_path, outdir):\n    \n    genome_name = genome_name_from_fasta_path(fasta_path)\n    outpath = os.path.join(outdir, genome_name)\n    args = ['mash', 'sketch', '-o', outpath, fasta_path]\n    logging.info('Running Mash sketch with command: %s', ' '.join(args))\n    p = Popen(args)\n    p.wait()\n    sketch_path = outpath + '.msh'\n    assert os.path.exists(sketch_path), 'Mash sketch for genome {} was not created at {}'.format(\n        genome_name,\n        sketch_path)\n    return sketch_path", "docstring": "Create a Mash sketch from an input fasta file\n\nArgs:\nfasta_path (str): input fasta file path. Genome name in fasta filename\noutdir (str): output directory path to write Mash sketch file to\n\nReturns:\nstr: output Mash sketch file path", "source": "juraj-google-style"}
{"code": "def _Aff4Read(aff4_obj, offset, length):\n    length = (length or (_Aff4Size(aff4_obj) - offset))\n    aff4_obj.Seek(offset)\n    return aff4_obj.Read(length)", "docstring": "Reads contents of given AFF4 file.\n\nArgs:\naff4_obj: An AFF4 stream instance to retrieve contents for.\noffset: An offset to start the reading from.\nlength: A number of bytes to read. Reads the whole file if 0.\n\nReturns:\nContents of specified AFF4 stream.\n\nRaises:\nTypeError: If `aff4_obj` is not an instance of AFF4 stream.", "source": "codesearchnet"}
{"code": "def build_relative_position(query_layer, key_layer, bucket_size: int=-1, max_position: int=-1):\n    query_size = query_layer.size(-2)\n    key_size = key_layer.size(-2)\n    q_ids = torch.arange(query_size, dtype=torch.long, device=query_layer.device)\n    k_ids = torch.arange(key_size, dtype=torch.long, device=key_layer.device)\n    rel_pos_ids = q_ids[:, None] - k_ids[None, :]\n    if bucket_size > 0 and max_position > 0:\n        rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)\n    rel_pos_ids = rel_pos_ids.to(torch.long)\n    rel_pos_ids = rel_pos_ids[:query_size, :]\n    rel_pos_ids = rel_pos_ids.unsqueeze(0)\n    return rel_pos_ids", "docstring": "Build relative position according to the query and key\n\nWe assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key\n\\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -\nP_k\\)\n\nArgs:\nquery_size (int): the length of query\nkey_size (int): the length of key\nbucket_size (int): the size of position bucket\nmax_position (int): the maximum allowed absolute position\ndevice (`torch.device`): the device on which tensors will be created.\n\nReturn:\n`torch.LongTensor`: A tensor with shape [1, query_size, key_size]", "source": "github-repos"}
{"code": "def open(self, mode='r', encoding=None):\n        \n        access_type = self._get_access_type(mode)\n\n        if access_type == 't' and encoding is not None and encoding != self.encoded_with:\n            warnings.warn('Attempting to decode %s as \"%s\", but encoding is declared as \"%s\"'\n                          % (self, encoding, self.encoded_with))\n\n        if encoding is None:\n            encoding = self.encoded_with\n\n        buffer = io.BytesIO(self._contents)\n        if access_type == 'b':\n            return buffer\n        else:\n            return io.TextIOWrapper(buffer, encoding=encoding)", "docstring": "Return file-like object\n\nArgs:\nmode (str): access mode (only reading modes are supported)\nencoding (str): text decoding method for text access (default: system default)\n\nReturns:\nio.BytesIO OR io.TextIOWrapper: buffer accessing the file as bytes or characters", "source": "juraj-google-style"}
{"code": "def __init__(self, platform, device):\n        \n        self._platform = platform\n        self._device = device\n\n        if (self._platform, self._device) not in _context_cache:\n            context = cl.Context([device])\n            _context_cache[(self._platform, self._device)] = context\n\n        self._context = _context_cache[(self._platform, self._device)]\n        self._queue = cl.CommandQueue(self._context, device=device)", "docstring": "Storage unit for an OpenCL environment.\n\nArgs:\nplatform (pyopencl platform): An PyOpenCL platform.\ndevice (pyopencl device): An PyOpenCL device", "source": "juraj-google-style"}
{"code": "def rt_is_equiv_dense(rt):\n    return math_ops.reduce_all([math_ops.equal(math_ops.reduce_variance(math_ops.cast(row_lens, backend.floatx())), constant_op.constant([0.0])) for row_lens in rt.nested_row_lengths()])", "docstring": "Returns true if this RaggedTensor has the same row_lengths across\n\nall ragged dimensions and thus can be converted to a dense tensor\nwithout loss of information.\n\nArgs:\nrt: RaggedTensor.", "source": "github-repos"}
{"code": "def execute_before(self, sensor_graph, scope_stack):\n        \n\n        parent = scope_stack[-1]\n        alloc = parent.allocator\n\n        \n        \n        \n        connect_stream = alloc.allocate_stream(DataStream.UnbufferedType, attach=True)\n        disconnect_stream = alloc.allocate_stream(DataStream.UnbufferedType, attach=True)\n        latch_stream = alloc.allocate_stream(DataStream.ConstantType, attach=True)\n        latch_on_stream = alloc.allocate_stream(DataStream.ConstantType, attach=True)\n        latch_off_stream = alloc.allocate_stream(DataStream.ConstantType, attach=True)\n\n        sensor_graph.add_node(u\"({} always) => {} using copy_latest_a\".format(user_connected, connect_stream))\n        sensor_graph.add_node(u\"({} always) => {} using copy_latest_a\".format(user_disconnected, disconnect_stream))\n\n        sensor_graph.add_node(u\"({} always && {} when value=={}) => {} using copy_latest_a\".format(latch_on_stream, connect_stream, self.slot_id.address, latch_stream))\n        sensor_graph.add_node(u\"({} always && {} when value=={}) => {} using copy_latest_a\".format(latch_off_stream, disconnect_stream, self.slot_id.address, latch_stream))\n\n        sensor_graph.add_constant(latch_on_stream, 1)\n        sensor_graph.add_constant(latch_off_stream, 0)\n        sensor_graph.add_constant(latch_stream, 0)\n\n        new_scope = GatedClockScope(sensor_graph, scope_stack, (latch_stream, InputTrigger(u'value', u'==', 1)))\n\n        \n        new_scope.add_identifier('connect', connect_stream)\n        new_scope.add_identifier('disconnect', disconnect_stream)\n        scope_stack.append(new_scope)", "docstring": "Execute statement before children are executed.\n\nArgs:\nsensor_graph (SensorGraph): The sensor graph that we are building or\nmodifying\nscope_stack (list(Scope)): A stack of nested scopes that may influence\nhow this statement allocates clocks or other stream resources.", "source": "juraj-google-style"}
{"code": "def simulate(self, action):\n    \n    with tf.name_scope(\"environment/simulate\"):\n      if action.dtype in (tf.float16, tf.float32, tf.float64):\n        action = tf.check_numerics(action, \"action\")\n      def step(action):\n        step_response = self._batch_env.step(action)\n        \n        \n        \n        if len(step_response) == 3:\n          (observ, reward, done) = step_response\n        else:\n          (observ, reward, done, _) = step_response\n        return (observ, reward.astype(np.float32), done)\n      observ, reward, done = tf.py_func(\n          step, [action],\n          [self.observ_dtype, tf.float32, tf.bool], name=\"step\")\n      reward = tf.check_numerics(reward, \"reward\")\n      reward.set_shape((len(self),))\n      done.set_shape((len(self),))\n      with tf.control_dependencies([self._observ.assign(observ)]):\n        return tf.identity(reward), tf.identity(done)", "docstring": "Step the batch of environments.\n\nThe results of the step can be accessed from the variables defined below.\n\nArgs:\naction: Tensor holding the batch of actions to apply.\n\nReturns:\nOperation.", "source": "juraj-google-style"}
{"code": "def rewrite_filters_in_optional_blocks(ir_blocks):\n    new_ir_blocks = []\n    optional_context_depth = 0\n    for block in ir_blocks:\n        new_block = block\n        if isinstance(block, CoerceType):\n            raise AssertionError(u'Found a CoerceType block after all such blocks should have been lowered to Filter blocks: {}'.format(ir_blocks))\n        elif (isinstance(block, Traverse) and block.optional):\n            optional_context_depth += 1\n        elif (isinstance(block, Backtrack) and block.optional):\n            optional_context_depth -= 1\n            if (optional_context_depth < 0):\n                raise AssertionError(u'Reached negative optional context depth for blocks: {}'.format(ir_blocks))\n        elif (isinstance(block, Filter) and (optional_context_depth > 0)):\n            null_check = BinaryComposition(u'=', LocalField('@this'), NullLiteral)\n            new_block = Filter(BinaryComposition(u'||', null_check, block.predicate))\n        else:\n            pass\n        new_ir_blocks.append(new_block)\n    return new_ir_blocks", "docstring": "In optional contexts, add a check for null that allows non-existent optional data through.\n\nOptional traversals in Gremlin represent missing optional data by setting the current vertex\nto null until the exit from the optional scope. Therefore, filtering and type coercions\n(which should have been lowered into filters by this point) must check for null before\napplying their filtering predicates. Since missing optional data isn't filtered,\nthe new filtering predicate should be \"(it == null) || existing_predicate\".\n\nArgs:\nir_blocks: list of IR blocks to lower into Gremlin-compatible form\n\nReturns:\nnew list of IR blocks with this lowering step applied", "source": "codesearchnet"}
{"code": "def find_slot(self, wanted, slots=None):\n    for slot in self.find_slots(wanted, slots):\n        return slot\n    return None", "docstring": "Searches the given slots or, if not given,\nactive hotbar slot, hotbar, inventory, open window in this order.\n\nArgs:\nwanted: function(Slot) or Slot or itemID or (itemID, metadata)\n\nReturns:\nOptional[Slot]: The first slot containing the item\nor None if not found.", "source": "codesearchnet"}
{"code": "def find_executable(executable):\n    logger = logging.getLogger(__name__)\n    logger.debug(\"Checking executable '%s'...\", executable)\n    executable_path = _find_executable(executable)\n    found = (executable_path is not None)\n    if found:\n        logger.debug(\"Executable '%s' found: '%s'\", executable, executable_path)\n    else:\n        logger.debug(\"Executable '%s' not found\", executable)\n    return executable_path", "docstring": "Finds executable in PATH\n\nReturns:\nstring or None", "source": "codesearchnet"}
{"code": "def build_hpo_term(hpo_info):\n    \n    \n    try:\n        hpo_id = hpo_info['hpo_id']\n    except KeyError:\n        raise KeyError(\"Hpo terms has to have a hpo_id\")\n\n    LOG.debug(\"Building hpo term %s\", hpo_id)\n\n    \n    try:\n        description = hpo_info['description']\n    except KeyError:\n        raise KeyError(\"Hpo terms has to have a description\")\n\n    hpo_obj = HpoTerm(\n        hpo_id = hpo_id,\n        description = description\n    )\n    \n    \n    hgnc_ids = hpo_info.get('genes', set())\n    if hgnc_ids:\n        hpo_obj['genes'] = list(hgnc_ids)\n    \n    return hpo_obj", "docstring": "Build a hpo_term object\n\nCheck that the information is correct and add the correct hgnc ids to the\narray of genes.\n\nArgs:\nhpo_info(dict)\n\nReturns:\nhpo_obj(scout.models.HpoTerm): A dictionary with hpo information", "source": "juraj-google-style"}
{"code": "def __init__(self, usage=None, data=None):\n        \n        super(TransactionAttribute, self).__init__()\n        self.Usage = usage\n        self.Data = data", "docstring": "Create an instance.\n\nArgs:\nusage (neo.Core.TX.TransactionAttribute.TransactionAttributeUsage):\ndata (bytes):", "source": "juraj-google-style"}
{"code": "def ParseArguments(args):\n  \n  try:\n    (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',\n                                                 'counting=',\n                                                 'filter=',\n                                                 'root=',\n                                                 'linelength=',\n                                                 'extensions='])\n  except getopt.GetoptError:\n    PrintUsage('Invalid arguments.')\n\n  verbosity = _VerboseLevel()\n  output_format = _OutputFormat()\n  filters = ''\n  counting_style = ''\n\n  for (opt, val) in opts:\n    if opt == '--help':\n      PrintUsage(None)\n    elif opt == '--output':\n      if val not in ('emacs', 'vs7', 'eclipse'):\n        PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')\n      output_format = val\n    elif opt == '--verbose':\n      verbosity = int(val)\n    elif opt == '--filter':\n      filters = val\n      if not filters:\n        PrintCategories()\n    elif opt == '--counting':\n      if val not in ('total', 'toplevel', 'detailed'):\n        PrintUsage('Valid counting options are total, toplevel, and detailed')\n      counting_style = val\n    elif opt == '--root':\n      global _root\n      _root = val\n    elif opt == '--linelength':\n      global _line_length\n      try:\n          _line_length = int(val)\n      except ValueError:\n          PrintUsage('Line length must be digits.')\n    elif opt == '--extensions':\n      global _valid_extensions\n      try:\n          _valid_extensions = set(val.split(','))\n      except ValueError:\n          PrintUsage('Extensions must be comma seperated list.')\n\n  if not filenames:\n    PrintUsage('No files were specified.')\n\n  _SetOutputFormat(output_format)\n  _SetVerboseLevel(verbosity)\n  _SetFilters(filters)\n  _SetCountingStyle(counting_style)\n\n  return filenames", "docstring": "Parses the command line arguments.\n\nThis may set the output format and verbosity level as side-effects.\n\nArgs:\nargs: The command line arguments:\n\nReturns:\nThe list of filenames to lint.", "source": "juraj-google-style"}
{"code": "def make_timebar(progress=0, duration=0):\n    \n\n    duration_string = api_music.duration_to_string(duration)\n    if duration <= 0:\n        return \"---\"\n\n    time_counts = int(round((progress / duration) * TIMEBAR_LENGTH))\n    if time_counts > TIMEBAR_LENGTH:\n        time_counts = TIMEBAR_LENGTH\n\n    if duration > 0:\n        bar = \"│\" + (TIMEBAR_PCHAR * time_counts) + (TIMEBAR_ECHAR * (TIMEBAR_LENGTH - time_counts)) + \"│\"\n        time_bar = \"{} {}\".format(bar, duration_string)\n    else:\n        time_bar = duration_string\n\n    return time_bar", "docstring": "Makes a new time bar string\n\nArgs:\nprogress: How far through the current song we are (in seconds)\nduration: The duration of the current song (in seconds)\n\nReturns:\ntimebar (str): The time bar string", "source": "juraj-google-style"}
{"code": "def ensure_mingw_drive(win32_path):\n    (win32_drive, _path) = splitdrive(win32_path)\n    mingw_drive = ('/' + win32_drive[:(- 1)].lower())\n    mingw_path = (mingw_drive + _path)\n    return mingw_path", "docstring": "r\"\"\" replaces windows drives with mingw style drives\n\nArgs:\nwin32_path (str):\n\nCommandLine:\npython -m utool.util_path --test-ensure_mingw_drive\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_path import *  # NOQA\n>>> win32_path = r'C:/Program Files/Foobar'\n>>> result = ensure_mingw_drive(win32_path)\n>>> print(result)\n/c/Program Files/Foobar", "source": "codesearchnet"}
{"code": "def _refresh_grpc(operations_stub, operation_name):\n    \n    request_pb = operations_pb2.GetOperationRequest(name=operation_name)\n    return operations_stub.GetOperation(request_pb)", "docstring": "Refresh an operation using a gRPC client.\n\nArgs:\noperations_stub (google.longrunning.operations_pb2.OperationsStub):\nThe gRPC operations stub.\noperation_name (str): The name of the operation.\n\nReturns:\ngoogle.longrunning.operations_pb2.Operation: The operation.", "source": "juraj-google-style"}
{"code": "def list(self):\n    self._initialize_list()\n    interested = True\n    response = self._cloudFormation.list_stacks()\n    print('Stack(s):')\n    while interested:\n        if ('StackSummaries' in response):\n            for stack in response['StackSummaries']:\n                stack_status = stack['StackStatus']\n                if (stack_status != 'DELETE_COMPLETE'):\n                    print('    [{}] - {}'.format(stack['StackStatus'], stack['StackName']))\n        next_token = response.get('NextToken', None)\n        if next_token:\n            response = self._cloudFormation.list_stacks(NextToken=next_token)\n        else:\n            interested = False\n    return True", "docstring": "List the existing stacks in the indicated region\n\nArgs:\nNone\n\nReturns:\nTrue if True\n\nTodo:\nFigure out what could go wrong and take steps\nto hanlde problems.", "source": "codesearchnet"}
{"code": "def convert_tensorflow(nlp: Pipeline, opset: int, output: Path):\n    if not is_tf_available():\n        raise Exception('Cannot convert because TF is not installed. Please install tensorflow first.')\n    print(\"/!\\\\ Please note TensorFlow doesn't support exporting model > 2Gb /!\\\\\")\n    try:\n        import tensorflow as tf\n        import tf2onnx\n        from tf2onnx import __version__ as t2ov\n        print(f'Using framework TensorFlow: {tf.version.VERSION}, tf2onnx: {t2ov}')\n        input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, 'tf')\n        nlp.model.predict(tokens.data)\n        input_signature = [tf.TensorSpec.from_tensor(tensor, name=key) for key, tensor in tokens.items()]\n        model_proto, _ = tf2onnx.convert.from_keras(nlp.model, input_signature, opset=opset, output_path=output.as_posix())\n    except ImportError as e:\n        raise Exception(f'Cannot import {e.name} required to convert TF model to ONNX. Please install {e.name} first. {e}')", "docstring": "Export a TensorFlow backed pipeline to ONNX Intermediate Representation (IR)\n\nArgs:\nnlp: The pipeline to be exported\nopset: The actual version of the ONNX operator set to use\noutput: Path where will be stored the generated ONNX model\n\nNotes: TensorFlow cannot export model bigger than 2GB due to internal constraint from TensorFlow", "source": "github-repos"}
{"code": "def parse_content_type(headers: MutableMapping) -> Tuple[Optional[str], str]:\n    \n    content_type = headers.get(\"content-type\")\n    if not content_type:\n        return None, \"utf-8\"\n    else:\n        type_, parameters = cgi.parse_header(content_type)\n        encoding = parameters.get(\"charset\", \"utf-8\")\n        return type_, encoding", "docstring": "Find content-type and encoding of the response\n\nArgs:\nheaders: Response headers\n\nReturns:\n:py:class:`tuple` (content-type, encoding)", "source": "juraj-google-style"}
{"code": "def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.return_dict\n    if attention_mask is None:\n        attention_mask = jnp.ones_like(input_ids)\n    if position_ids is None:\n        batch_size, sequence_length = input_ids.shape\n        position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n    rngs = {}\n    if dropout_rng is not None:\n        rngs['dropout'] = dropout_rng\n\n    def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):\n        encode_module = module._get_encoder_module()\n        return encode_module(input_ids, attention_mask, position_ids, **kwargs)\n    return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward)", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration\n\n>>> model = FlaxMBartForConditionalGeneration.from_pretrained(\"facebook/mbart-large-cc25\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/mbart-large-cc25\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, max_length=1024, return_tensors=\"jax\")\n>>> encoder_outputs = model.encode(**inputs)\n```", "source": "github-repos"}
{"code": "def decode(pieces, sequence_length, model_file=None, model_proto=None, reverse=False, name=None):\n    return _gen_sentencepiece_processor_op.sentencepiece_decode(pieces, sequence_length, model_file=model_file, model_proto=model_proto, reverse=reverse, name=name)", "docstring": "Decode pieces into postprocessed text.\n\nArgs:\npieces: A 2D int32 or string tensor [batch_size x max_length] of\nencoded sequences.\nsequence_length: A 1D int32 tensor [batch_size] representing the\nlength of pieces.\nmodel_file: The sentencepiece model file path.\nmodel_proto: The sentencepiece model serialized proto.\nEither `model_file` or `model_proto` must be set.\nreverse: Reverses the tokenized sequence (Default = false)\nname: The name argument that is passed to the op function.\n\nReturns:\ntext: A 1D string tensor of decoded string.", "source": "codesearchnet"}
{"code": "def send_message(channel_id, message):\n    \n\n    channel = client.get_channel(channel_id)\n\n    if channel is None:\n        logger.info(\"{} is not a channel\".format(channel_id))\n        return\n\n    \n    data = datatools.get_data()\n    if not data[\"discord\"][\"servers\"][channel.server.id][modulename][\"activated\"]:\n        logger.info(\"This module has been disabled in {} ({})\".format(channel.server.name, channel.server.id))\n\n    try:\n        runcoro(client.send_message(channel, message))\n    except Exception as e:\n        logger.exception(e)", "docstring": "Send a message to a channel\n\nArgs:\nchannel_id (str): The id of the channel to send the message to\nmessage (str): The message to send to the channel", "source": "juraj-google-style"}
{"code": "def count_moves_in_game_range(self, game_begin, game_end):\n        \n        rows = self.bt_table.read_rows(\n            ROWCOUNT_PREFIX.format(game_begin),\n            ROWCOUNT_PREFIX.format(game_end),\n            filter_=bigtable_row_filters.ColumnRangeFilter(\n                METADATA, MOVE_COUNT, MOVE_COUNT))\n        return sum([int(r.cell_value(METADATA, MOVE_COUNT)) for r in rows])", "docstring": "Count the total moves in a game range.\n\nArgs:\ngame_begin:  integer, starting game\ngame_end:  integer, ending game\n\nUses the `ct_` keyspace for rapid move summary.", "source": "juraj-google-style"}
{"code": "def format(self, compact: bool=False, verbose: bool=True, root_indent: int=0, **kwargs) -> str:", "docstring": "Formats this object into a string representation.\n\nArgs:\ncompact: If True, this object will be formatted into a single line.\nverbose: If True, this object will be formatted with verbosity.\nSubclasses should define `verbosity` on their own.\nroot_indent: The start indent level for this object if the output is a\nmulti-line string.\n**kwargs: Subclass specific keyword arguments.\n\nReturns:\nA string of formatted object.", "source": "github-repos"}
{"code": "def scale(self, width: int, height: int) -> None:\n    lib.TCOD_image_scale(self.image_c, width, height)\n    (self.width, self.height) = (width, height)", "docstring": "Scale this Image to the new width and height.\n\nArgs:\nwidth (int): The new width of the Image after scaling.\nheight (int): The new height of the Image after scaling.", "source": "codesearchnet"}
{"code": "def generate_name(self, name_format=DEFAULT_FILE_NAME_FORMAT):\n    if (len(self.segments) > 0):\n        return (self.segments[0].points[0].time.strftime(name_format) + '.gpx')\n    else:\n        return 'EmptyTrack'", "docstring": "Generates a name for the track\n\nThe name is generated based on the date of the first point of the\ntrack, or in case it doesn't exist, \"EmptyTrack\"\n\nArgs:\nname_format (str, optional): Name formar to give to the track, based on\nits start time. Defaults to DEFAULT_FILE_NAME_FORMAT\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def _remove_curly_braces(text):\n  \n  current_pos = 0\n  depth = 0\n  ret = \"\"\n  for match in re.finditer(\"[{}]\", text):\n    if depth == 0:\n      ret += text[current_pos:match.start()]\n    depth += 1 if text[match.start()] == \"{\" else -1\n    current_pos = match.end()\n  if depth != 0:\n    \n    \n    pass\n  else:\n    ret += text[current_pos:]\n  return ret", "docstring": "Remove everything in curly braces.\n\nCurly braces may be nested, so we keep track of depth.\n\nArgs:\ntext: a string\nReturns:\na string", "source": "juraj-google-style"}
{"code": "def dump_migration_session_state(raw):\n\n    class BlockStyle(str):\n        pass\n\n    class SessionDumper(yaml.SafeDumper):\n        pass\n\n    def str_block_formatter(dumper, data):\n        return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')\n    SessionDumper.add_representer(BlockStyle, str_block_formatter)\n    raw = deepcopy(raw)\n    for step in raw:\n        step['output'] = BlockStyle(step['output'])\n        step['traceback'] = BlockStyle(step['traceback'])\n    return yaml.dump(raw, Dumper=SessionDumper)", "docstring": "Serialize a migration session state to yaml using nicer formatting\n\nArgs:\nraw: object to serialize\nReturns: string (of yaml)\n\nSpecifically, this forces the \"output\" member of state step dicts (e.g.\nstate[0]['output']) to use block formatting. For example, rather than this:\n\n- migration: [app, migration_name]\noutput: \"line 1\\nline2\\nline3\"\n\nYou get this:\n\n- migration: [app, migration_name]\noutput: |\nline 1\nline 2\nline 3", "source": "codesearchnet"}
{"code": "def find_all(self, kw: YangIdentifier,\n                 pref: YangIdentifier = None) -> List[\"Statement\"]:\n        \n        return [c for c in self.substatements\n                if c.keyword == kw and c.prefix == pref]", "docstring": "Return the list all substatements with the given keyword and prefix.\n\nArgs:\nkw: Statement keyword (local part for extensions).\npref: Keyword prefix (``None`` for built-in statements).", "source": "juraj-google-style"}
{"code": "def _set_root(self, request):\n    if request.state_root:\n        root = request.state_root\n    else:\n        head = self._get_chain_head()\n        root = head.state_root_hash\n    try:\n        self._tree.set_merkle_root(root)\n    except KeyError as e:\n        LOGGER.debug('Unable to find root \"%s\" in database', e)\n        raise _ResponseFailed(self._status.NO_ROOT)\n    return root", "docstring": "Sets the root of the merkle tree, returning any head id used.\n\nNote:\nThis method will fail if `_tree` has not been set\n\nArgs:\nrequest (object): The parsed protobuf request object\n\nReturns:\nstr: the state root of the head block used to specify the root\n\nRaises:\nResponseFailed: Failed to set the root if the merkle tree", "source": "codesearchnet"}
{"code": "def assert_proper_iterable(values):\n    unintentional_iterables = (tensor_lib.Tensor, sparse_tensor.SparseTensor, np.ndarray) + compat.bytes_or_text_types\n    if isinstance(values, unintentional_iterables):\n        raise TypeError('Expected argument \"values\" to be a \"proper\" iterable.  Found: %s' % type(values))\n    if not hasattr(values, '__iter__'):\n        raise TypeError('Expected argument \"values\" to be iterable.  Found: %s' % type(values))", "docstring": "Static assert that values is a \"proper\" iterable.\n\n`Ops` that expect iterables of `Tensor` can call this to validate input.\nUseful since `Tensor`, `ndarray`, byte/text type are all iterables themselves.\n\nArgs:\nvalues:  Object to be checked.\n\nRaises:\nTypeError:  If `values` is not iterable or is one of\n`Tensor`, `SparseTensor`, `np.array`, `tf.compat.bytes_or_text_types`.", "source": "github-repos"}
{"code": "def create_from_json(cls, json_data):\n        \n        block = Block()\n        block_info = json_data[\"block_info\"]\n        block.block_id = block_info[\"block_id\"]\n        block.num_bins = block_info[\"num_bins\"] if \"num_bins\" in block_info else None\n        block.property_type = block_info[\"property_type\"] if \"property_type\" in block_info else None\n        block.meta = json_data[\"meta\"] if \"meta\" in json_data else None\n\n        block.component_results = _create_component_results(json_data, \"block_info\")\n\n        return block", "docstring": "Deserialize block json data into a Block object\n\nArgs:\njson_data (dict): The json data for this block\n\nReturns:\nBlock object", "source": "juraj-google-style"}
{"code": "def complete_acquaintance_strategy(qubit_order: Sequence[ops.Qid], acquaintance_size: int=0) -> circuits.Circuit:\n    if (acquaintance_size < 0):\n        raise ValueError('acquaintance_size must be non-negative.')\n    elif (acquaintance_size == 0):\n        return circuits.Circuit(device=UnconstrainedAcquaintanceDevice)\n    if (acquaintance_size > len(qubit_order)):\n        return circuits.Circuit(device=UnconstrainedAcquaintanceDevice)\n    if (acquaintance_size == len(qubit_order)):\n        return circuits.Circuit.from_ops(acquaint(*qubit_order), device=UnconstrainedAcquaintanceDevice)\n    strategy = circuits.Circuit.from_ops((acquaint(q) for q in qubit_order), device=UnconstrainedAcquaintanceDevice)\n    for size_to_acquaint in range(2, (acquaintance_size + 1)):\n        expose_acquaintance_gates(strategy)\n        replace_acquaintance_with_swap_network(strategy, qubit_order, size_to_acquaint)\n    return strategy", "docstring": "Returns an acquaintance strategy capable of executing a gate corresponding\nto any set of at most acquaintance_size qubits.\n\nArgs:\nqubit_order: The qubits on which the strategy should be defined.\nacquaintance_size: The maximum number of qubits to be acted on by\nan operation.\n\nReturns:\nAn circuit capable of implementing any set of k-local\noperation.", "source": "codesearchnet"}
{"code": "def workflow_stages(self) -> List[WorkflowStage]:\n    workflow_stages = []\n    stages = DB.get_hash_value(self.key, 'workflow_stages')\n    for index in range(len(ast.literal_eval(stages))):\n        workflow_stages.append(WorkflowStage(self.id, index))\n    return workflow_stages", "docstring": "Return list of workflow stages.\n\nReturns:\ndict, resources of a specified pb", "source": "codesearchnet"}
{"code": "async def create_artifact(context, path, target_path, content_type, content_encoding, storage_type='s3', expires=None):\n    payload = {'storageType': storage_type, 'expires': (expires or get_expiration_arrow(context).isoformat()), 'contentType': content_type}\n    args = [get_task_id(context.claim_task), get_run_id(context.claim_task), target_path, payload]\n    tc_response = (await context.temp_queue.createArtifact(*args))\n    skip_auto_headers = [aiohttp.hdrs.CONTENT_TYPE]\n    loggable_url = get_loggable_url(tc_response['putUrl'])\n    log.info('uploading {path} to {url}...'.format(path=path, url=loggable_url))\n    with open(path, 'rb') as fh:\n        async with async_timeout.timeout(context.config['artifact_upload_timeout']):\n            async with context.session.put(tc_response['putUrl'], data=fh, headers=_craft_artifact_put_headers(content_type, content_encoding), skip_auto_headers=skip_auto_headers, compress=False) as resp:\n                log.info('create_artifact {}: {}'.format(path, resp.status))\n                response_text = (await resp.text())\n                log.info(response_text)\n                if (resp.status not in (200, 204)):\n                    raise ScriptWorkerRetryException('Bad status {}'.format(resp.status))", "docstring": "Create an artifact and upload it.\n\nThis should support s3 and azure out of the box; we'll need some tweaking\nif we want to support redirect/error artifacts.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\npath (str): the path of the file to upload.\ntarget_path (str):\ncontent_type (str): Content type (MIME type) of the artifact. Values can be found via\nscriptworker.artifacts.guess_content_type_and_encoding()\ncontent_encoding (str): Encoding (per mimetypes' library) of the artifact. None is for no encoding. Values can\nbe found via scriptworker.artifacts.guess_content_type_and_encoding()\nstorage_type (str, optional): the taskcluster storage type to use.\nDefaults to 's3'\nexpires (str, optional): datestring of when the artifact expires.\nDefaults to None.\n\nRaises:\nScriptWorkerRetryException: on failure.", "source": "codesearchnet"}
{"code": "def _get_batches_of_transformed_samples(self, index_array):\n    raise NotImplementedError", "docstring": "Gets a batch of transformed samples.\n\nArgs:\nindex_array: Array of sample indices to include in batch.\nReturns:\nA batch of transformed samples.", "source": "github-repos"}
{"code": "def send(msg_type, send_async=False, *args, **kwargs):\n    message = message_factory(msg_type, *args, **kwargs)\n    try:\n        if send_async:\n            message.send_async()\n        else:\n            message.send()\n    except MessageSendError as e:\n        err_exit('Unable to send message: ', e)", "docstring": "Constructs a message class and sends the message.\nDefaults to sending synchronously.  Set send_async=True to send\nasynchronously.\n\nArgs:\n:msg_type: (str) the type of message to send, i.e. 'Email'\n:send_async: (bool) default is False, set True to send asynchronously.\n:kwargs: (dict) keywords arguments that are required for the\nvarious message types.  See docstrings for each type.\ni.e. help(messages.Email), help(messages.Twilio), etc.\n\nExample:\n>>> kwargs = {\nfrom_: 'me@here.com',\nto: 'you@there.com',\nauth: 'yourPassword',\nsubject: 'Email Subject',\nbody: 'Your message to send',\nattachments: ['filepath1', 'filepath2'],\n}\n>>> messages.send('email', **kwargs)\nMessage sent...", "source": "codesearchnet"}
{"code": "def create_model(text_in, timesteps, phase):\n    with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=1e-05):\n        with tf.device('/cpu:0'):\n            embedded = text_in.embedding_lookup(CHARS, [EMBEDDING_SIZE])\n        lstm = embedded.cleave_sequence(timesteps).sequence_lstm(LOWER).sequence_lstm(UPPER)\n        return lstm.squash_sequence().dropout(keep_prob=0.8, phase=phase).fully_connected(CHARS, activation_fn=None)", "docstring": "Creates a 2 layer LSTM model with dropout.\n\nArgs:\ntext_in: The input text as ASCII ordinals in a Tensor.\ntimesteps: The number of timesteps in the sequence.\nphase: Phase controls whether or not dropout is active.  In training mode\nwe want to perform dropout, but in test we want to disable it.\nReturns:\nThe logits.", "source": "codesearchnet"}
{"code": "def update(self, resource, id_or_uri):\n    return self._client.update(resource=resource, uri=id_or_uri)", "docstring": "Updates a registered Device Manager.\n\nArgs:\nresource (dict): Object to update.\nid_or_uri: Can be either the Device manager ID or URI.\n\nReturns:\ndict: The device manager resource.", "source": "codesearchnet"}
{"code": "def get_hostname(url):\n    if (url not in URLHelper.__cache):\n        URLHelper.__cache[url] = urlparse(url)\n    parts = URLHelper.__cache[url].netloc.split('.')\n    if (len(parts) == 1):\n        return parts[0]\n    else:\n        return '.'.join(parts[(- 2):(- 1)])", "docstring": "Get the hostname of the given URL.\n\nArgs:\nurl (str): The URL to get the hostname from.\n\nReturns:\nstr: The hostname", "source": "codesearchnet"}
{"code": "def __batch_update(self, train_events, test_events, n_epoch):\n        \n        for epoch in range(n_epoch):\n            \n            \n            \n            if n_epoch != 1:\n                np.random.shuffle(train_events)\n\n            \n            for e in train_events:\n                self.rec.update(e, batch_train=True)\n\n            \n            MPR = self.__batch_evaluate(test_events)\n            if self.debug:\n                logger.debug('epoch %2d: MPR = %f' % (epoch + 1, MPR))", "docstring": "Batch update called by the fitting method.\n\nArgs:\ntrain_events (list of Event): Positive training events.\ntest_events (list of Event): Test events.\nn_epoch (int): Number of epochs for the batch training.", "source": "juraj-google-style"}
{"code": "def similar(self, **kwargs):\n    path = self._get_id_path('similar')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the similar TV series for a specific TV series id.\n\nArgs:\npage: (optional) Minimum value of 1.  Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\nappend_to_response: (optional) Comma separated, any TV method.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def check_valid_cpc_status(method, uri, cpc):\n    status = cpc.properties.get('status', None)\n    if (status is None):\n        return\n    valid_statuses = ['active', 'service-required', 'degraded', 'exceptions']\n    if (status not in valid_statuses):\n        if uri.startswith(cpc.uri):\n            raise ConflictError(method, uri, reason=1, message='The operation cannot be performed because the targeted CPC {} has a status that is not valid for the operation: {}'.format(cpc.name, status))\n        else:\n            raise ConflictError(method, uri, reason=6, message='The operation cannot be performed because CPC {} hosting the targeted resource has a status that is not valid for the operation: {}'.format(cpc.name, status))", "docstring": "Check that the CPC is in a valid status, as indicated by its 'status'\nproperty.\n\nIf the Cpc object does not have a 'status' property set, this function does\nnothing (in order to make the mock support easy to use).\n\nRaises:\nConflictError with reason 1: The CPC itself has been targeted by the\noperation.\nConflictError with reason 6: The CPC is hosting the resource targeted by\nthe operation.", "source": "codesearchnet"}
{"code": "def _get_arg_parser(func, types, args_and_defaults, delimiter_chars):\n    \n    _LOG.debug(\"Creating ArgumentParser for '%s'\", func.__name__)\n    (description, arg_help) = _prepare_doc(\n        func, [x for (x, _) in args_and_defaults], delimiter_chars)\n    parser = argparse.ArgumentParser(description=description)\n    for ((arg, default), arg_type) in zip_longest(args_and_defaults, types):\n        help_msg = arg_help[arg]\n        if default is NoDefault:\n            arg_type = arg_type or identity_type\n            if arg_type == bool:\n                _LOG.debug(\"Adding optional flag %s.%s\", func.__name__, arg)\n                parser.add_argument(\"--%s\" % arg, default=True, required=False,\n                                    action=\"store_false\",\n                                    help=\"%s. Defaults to True if not specified\"\n                                    % help_msg)\n            else:\n                _LOG.debug(\"Adding positional argument %s.%s\", func.__name__,\n                           arg)\n                parser.add_argument(arg, help=help_msg, type=arg_type)\n        else:\n            if default is None and arg_type is None:\n                raise ParseThisError(\"To use default value of 'None' you need \"\n                                     \"to specify the type of the argument '{}' \"\n                                     \"for the method '{}'\"\n                                     .format(arg, func.__name__))\n            arg_type = arg_type or type(default)\n            if arg_type == bool:\n                action = \"store_false\" if default else \"store_true\"\n                _LOG.debug(\"Adding optional flag %s.%s\", func.__name__, arg)\n                parser.add_argument(\"--%s\" % arg, help=help_msg,\n                                    default=default, action=action)\n            else:\n                _LOG.debug(\n                    \"Adding optional argument %s.%s\", func.__name__, arg)\n                parser.add_argument(\"--%s\" % arg, help=help_msg,\n                                    default=default, type=arg_type)\n    return parser", "docstring": "Return an ArgumentParser for the given function. Arguments are defined\nfrom the function arguments and their associated defaults.\n\nArgs:\nfunc: function for which we want an ArgumentParser\ntypes: types to which the command line arguments should be converted to\nargs_and_defaults: list of 2-tuples (arg_name, arg_default)\ndelimiter_chars: characters used to separate the parameters from their\nhelp message in the docstring", "source": "juraj-google-style"}
{"code": "def coarse_grain(G, ncg):\n        \n        if ncg <= 1:\n            return G\n        G = numpy.asarray(G)\n        nbin, remainder = divmod(G.shape[-1], ncg)\n        if remainder != 0:\n            nbin += 1\n        return numpy.transpose([\n            numpy.sum(G[..., i:i+ncg], axis=-1) / G[..., i:i+ncg].shape[-1]\n            for i in numpy.arange(0, ncg * nbin, ncg)\n            ])", "docstring": "Coarse-grain last index of array ``G``.\n\nBin the last index of array ``G`` in bins of width ``ncg``, and\nreplace each bin by its average. Return the binned results.\n\nArgs:\nG: Array to be coarse-grained.\nncg: Bin width for coarse-graining.", "source": "juraj-google-style"}
{"code": "def parse_flux_bounds(entry):\n    \n    lower_bound = None\n    upper_bound = None\n    for parameter in entry.kinetic_law_reaction_parameters:\n        pid, name, value, units = parameter\n        if pid == 'UPPER_BOUND' or name == 'UPPER_BOUND':\n            upper_bound = value\n        elif pid == 'LOWER_BOUND' or name == 'LOWER_BOUND':\n            lower_bound = value\n\n    return lower_bound, upper_bound", "docstring": "Return flux bounds for reaction entry.\n\nDetect flux bounds that are specified using the non-standardized\nkinetic law parameters which are used by many pre-FBC SBML models. The\nflux bounds are returned as a pair of lower, upper bounds. The returned\nbound is None if undefined.\n\nArgs:\nentry: :class:`SBMLReactionEntry`.", "source": "juraj-google-style"}
{"code": "def _probe_characteristics_finished(self, result):\n    handle = result['context']['handle']\n    conn_id = result['context']['connection_id']\n    conndata = self._get_connection(handle, 'preparing')\n    if (conndata is None):\n        self._logger.info('Connection disconnected before probe_char... finished, conn_id=%d', conn_id)\n        return\n    callback = conndata['callback']\n    if (result['result'] is False):\n        conndata['failed'] = True\n        conndata['failure_reason'] = 'Could not probe GATT characteristics'\n        self.disconnect_async(conn_id, self._on_connection_failed)\n        return\n    services = result['return_value']['services']\n    if (TileBusService not in services):\n        conndata['failed'] = True\n        conndata['failure_reason'] = 'TileBus service not present in GATT services'\n        self.disconnect_async(conn_id, self._on_connection_failed)\n        return\n    conndata['chars_done_time'] = time.time()\n    service_time = (conndata['services_done_time'] - conndata['connect_time'])\n    char_time = (conndata['chars_done_time'] - conndata['services_done_time'])\n    total_time = (service_time + char_time)\n    conndata['state'] = 'connected'\n    conndata['services'] = services\n    conndata['parser'] = IOTileReportParser(report_callback=self._on_report, error_callback=self._on_report_error)\n    conndata['parser'].context = conn_id\n    del conndata['disconnect_handler']\n    with self.count_lock:\n        self.connecting_count -= 1\n    self._logger.info('Total time to connect to device: %.3f (%.3f enumerating services, %.3f enumerating chars)', total_time, service_time, char_time)\n    callback(conndata['connection_id'], self.id, True, None)", "docstring": "Callback when BLE adapter has finished probing services and characteristics for a device\n\nArgs:\nresult (dict): Result from the probe_characteristics command", "source": "codesearchnet"}
{"code": "def process_response(self, req, resp, resource):\n        \n        if isinstance(resp.body, dict):\n            try:\n                resp.body = json.dumps(resp.body)\n            except(nameError):\n                resp.status = falcon.HTTP_500", "docstring": "Post-processing of the response (after routing).\n\nArgs:\nreq: Request object.\nresp: Response object.\nresource: Resource object to which the request was\nrouted. May be None if no route was found\nfor the request.", "source": "juraj-google-style"}
{"code": "def parse_individual(sample):\n    ind_info = {}\n    if ('sample_id' not in sample):\n        raise PedigreeError(\"One sample is missing 'sample_id'\")\n    sample_id = sample['sample_id']\n    if ('sex' not in sample):\n        raise PedigreeError((\"Sample %s is missing 'sex'\" % sample_id))\n    sex = sample['sex']\n    if (sex not in REV_SEX_MAP):\n        log.warning(\"'sex' is only allowed to have values from {}\".format(', '.join(list(REV_SEX_MAP.keys()))))\n        raise PedigreeError(('Individual %s has wrong formated sex' % sample_id))\n    if ('phenotype' not in sample):\n        raise PedigreeError((\"Sample %s is missing 'phenotype'\" % sample_id))\n    phenotype = sample['phenotype']\n    if (phenotype not in REV_PHENOTYPE_MAP):\n        log.warning(\"'phenotype' is only allowed to have values from {}\".format(', '.join(list(REV_PHENOTYPE_MAP.keys()))))\n        raise PedigreeError(('Individual %s has wrong formated phenotype' % sample_id))\n    ind_info['individual_id'] = sample_id\n    ind_info['display_name'] = sample.get('sample_name', sample['sample_id'])\n    ind_info['sex'] = sex\n    ind_info['phenotype'] = phenotype\n    ind_info['father'] = sample.get('father')\n    ind_info['mother'] = sample.get('mother')\n    ind_info['confirmed_parent'] = sample.get('confirmed_parent')\n    ind_info['confirmed_sex'] = sample.get('confirmed_sex')\n    ind_info['predicted_ancestry'] = sample.get('predicted_ancestry')\n    bam_file = sample.get('bam_path')\n    if bam_file:\n        ind_info['bam_file'] = bam_file\n    mt_bam = sample.get('mt_bam')\n    if mt_bam:\n        ind_info['mt_bam'] = mt_bam\n    analysis_type = sample.get('analysis_type')\n    if analysis_type:\n        ind_info['analysis_type'] = analysis_type\n    ind_info['capture_kits'] = ([sample.get('capture_kit')] if ('capture_kit' in sample) else [])\n    vcf2cytosure = sample.get('vcf2cytosure')\n    if vcf2cytosure:\n        ind_info['vcf2cytosure'] = vcf2cytosure\n    tumor_type = sample.get('tumor_type')\n    if tumor_type:\n        ind_info['tumor_type'] = tumor_type\n    tumor_mutational_burden = sample.get('tmb')\n    if tumor_mutational_burden:\n        ind_info['tmb'] = tumor_mutational_burden\n    msi = sample.get('msi')\n    if msi:\n        ind_info['msi'] = msi\n    tumor_purity = sample.get('tumor_purity')\n    if tumor_purity:\n        ind_info['tumor_purity'] = tumor_purity\n    return ind_info", "docstring": "Parse individual information\n\nArgs:\nsample (dict)\n\nReturns:\n{\n'individual_id': str,\n'father': str,\n'mother': str,\n'display_name': str,\n'sex': str,\n'phenotype': str,\n'bam_file': str,\n'vcf2cytosure': str,\n'analysis_type': str,\n'capture_kits': list(str),\n}", "source": "codesearchnet"}
{"code": "def get_or_generate_vocabulary(data_dir, tmp_dir, data_prefix, max_page_size_exp, approx_vocab_size=32768, strip=True):\n    num_pages_for_vocab_generation = (approx_vocab_size \n    vocab_file = vocab_filename(approx_vocab_size, strip)\n\n    def my_generator(data_prefix):\n        'Line generator for vocab.'\n        count = 0\n        for page in corpus_page_generator(all_corpus_files(data_prefix)[::(- 1)], tmp_dir, max_page_size_exp):\n            revisions = page['revisions']\n            if revisions:\n                text = get_text(revisions[(- 1)], strip=strip)\n                (yield text)\n                count += 1\n                if ((count % 100) == 0):\n                    tf.logging.info(('reading pages for vocab %d' % count))\n                if (count > num_pages_for_vocab_generation):\n                    break\n    return generator_utils.get_or_generate_vocab_inner(data_dir, vocab_file, approx_vocab_size, my_generator(data_prefix))", "docstring": "Get or generate the vocabulary.\n\nArgs:\ndata_dir: a string\ntmp_dir: a string\ndata_prefix: a string\nmax_page_size_exp: an integer\napprox_vocab_size: an integer\nstrip: a boolean\n\nReturns:\na TextEncoder", "source": "codesearchnet"}
{"code": "def _format_src_url(self, path, caller_system):\n    path = ('%s/%s' % (self._endpoint, self.relpath(path)))\n    if (caller_system is not self):\n        try:\n            path = ('%s?%s' % (path, self._storage_parameters['sas_token']))\n        except KeyError:\n            pass\n    return path", "docstring": "Ensure path is absolute and use the correct URL format for use with\ncross Azure storage account copy function.\n\nArgs:\npath (str): Path or URL.\ncaller_system (pycosio.storage.azure._AzureBaseSystem subclass):\nSystem calling this method (Can be another Azure system).\n\nReturns:\nstr: URL.", "source": "codesearchnet"}
{"code": "def HandleSimpleResponses(\n            self, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):\n        \n        return self._AcceptResponses(b'OKAY', info_cb, timeout_ms=timeout_ms)", "docstring": "Accepts normal responses from the device.\n\nArgs:\ntimeout_ms: Timeout in milliseconds to wait for each response.\ninfo_cb: Optional callback for text sent from the bootloader.\n\nReturns:\nOKAY packet's message.", "source": "juraj-google-style"}
{"code": "def check_array_lengths(inputs, targets, weights=None):\n\n    def is_tensor_or_composite_tensor(x):\n        return tensor_util.is_tf_type(x) or is_composite_or_composite_value(x)\n\n    def set_of_lengths(x):\n        if x is None:\n            return {}\n        else:\n            return set([y.shape[0] for y in x if y is not None and (not is_tensor_or_composite_tensor(y))])\n    set_x = set_of_lengths(inputs)\n    set_y = set_of_lengths(targets)\n    set_w = set_of_lengths(weights)\n    if len(set_x) > 1:\n        raise ValueError('All input arrays (x) should have the same number of samples. Got array shapes: ' + str([x.shape for x in inputs]))\n    if len(set_y) > 1:\n        raise ValueError('All target arrays (y) should have the same number of samples. Got array shapes: ' + str([y.shape for y in targets]))\n    if set_x and set_y and (list(set_x)[0] != list(set_y)[0]):\n        raise ValueError('Input arrays should have the same number of samples as target arrays. Found ' + str(list(set_x)[0]) + ' input samples and ' + str(list(set_y)[0]) + ' target samples.')\n    if len(set_w) > 1:\n        raise ValueError('All sample_weight arrays should have the same number of samples. Got array shapes: ' + str([w.shape for w in weights]))\n    if set_y and set_w and (list(set_y)[0] != list(set_w)[0]):\n        raise ValueError('Sample_weight arrays should have the same number of samples as target arrays. Got ' + str(list(set_y)[0]) + ' input samples and ' + str(list(set_w)[0]) + ' target samples.')", "docstring": "Does user input validation for numpy arrays.\n\nArgs:\ninputs: list of Numpy arrays of inputs.\ntargets: list of Numpy arrays of targets.\nweights: list of Numpy arrays of sample weights.\n\nRaises:\nValueError: in case of incorrectly formatted data.", "source": "github-repos"}
{"code": "def users(self, proc):\n    ret = {}\n    if (self.first_column in ['USER', 'UID']):\n        for row in self.data:\n            if (proc == row[self.command_name]):\n                if (row[self.first_column] not in ret):\n                    ret[row[self.first_column]] = []\n                ret[row[self.first_column]].append(row['PID'])\n    return ret", "docstring": "Searches for all users running a given command.\n\nReturns:\ndict: each username as a key to a list of PIDs (as strings) that\nare running the given process.\n``{}`` if  neither ``USER`` nor ``UID`` is found or ``proc`` is not found.\n\n.. note::\n'proc' must match the entire command and arguments.", "source": "codesearchnet"}
{"code": "def check_channel(fcn):\n\n    def wrapper(*args, **kwargs):\n        if (not isinstance(args[1], ChannelResource)):\n            raise RuntimeError('resource must be an instance of intern.resource.boss.ChannelResource.')\n        if (not args[1].cutout_ready):\n            raise PartialChannelResourceError('ChannelResource not fully initialized.  Use intern.remote.BossRemote.get_channel({}, {}, {})'.format(args[1].name, args[1].coll_name, args[1].exp_name))\n        return fcn(*args, **kwargs)\n    return wrapper", "docstring": "Decorator that ensures a valid channel passed in.\n\nArgs:\nfcn (function): Function that has a ChannelResource as its second argument.\n\nReturns:\n(function): Wraps given function with one that checks for a valid channel.", "source": "codesearchnet"}
{"code": "def bessel_i0(x, name=None):\n    with ops.name_scope(name, 'bessel_i0', [x]):\n        return gen_special_math_ops.bessel_i0(x)", "docstring": "Computes the Bessel i0 function of `x` element-wise.\n\nModified Bessel function of order 0.\n\nIt is preferable to use the numerically stabler function `i0e(x)` instead.\n\n>>> tf.math.special.bessel_i0([-1., -0.5, 0.5, 1.]).numpy()\narray([1.26606588, 1.06348337, 1.06348337, 1.26606588], dtype=float32)\n\nArgs:\nx: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n`float32`, `float64`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.i0\n@end_compatibility", "source": "github-repos"}
{"code": "def differential(P, Q):\n    \n    P, Q = Poly(P), Poly(Q)\n\n    if not chaospy.poly.is_decomposed(Q):\n        differential(chaospy.poly.decompose(Q)).sum(0)\n\n    if Q.shape:\n        return Poly([differential(P, q) for q in Q])\n\n    if Q.dim>P.dim:\n        P = chaospy.poly.setdim(P, Q.dim)\n    else:\n        Q = chaospy.poly.setdim(Q, P.dim)\n\n    qkey = Q.keys[0]\n\n    A = {}\n    for key in P.keys:\n\n        newkey = numpy.array(key) - numpy.array(qkey)\n\n        if numpy.any(newkey<0):\n            continue\n\n        A[tuple(newkey)] = P.A[key]*numpy.prod([fac(key[i], \\\n            exact=True)/fac(newkey[i], exact=True) \\\n            for i in range(P.dim)])\n\n    return Poly(B, P.dim, P.shape, P.dtype)", "docstring": "Polynomial differential operator.\n\nArgs:\nP (Poly):\nPolynomial to be differentiated.\nQ (Poly):\nPolynomial to differentiate by. Must be decomposed. If polynomial\narray, the output is the Jacobian matrix.", "source": "juraj-google-style"}
{"code": "def _extract_namespace_ast_node(self, desc):\n    if ((len(desc) == 0) or (not isinstance(desc[0], AstNamespace))):\n        if self._debug:\n            self._logger.info('Description: %r', desc)\n        raise InvalidSpec('First declaration in a stone must be a namespace. Possibly caused by preceding errors.', desc[0].lineno, desc[0].path)\n    for item in desc[1:]:\n        if isinstance(item, AstNamespace):\n            raise InvalidSpec('Only one namespace declaration per file.', item[0].lineno, item[0].path)\n    return desc.pop(0)", "docstring": "Checks that the namespace is declared first in the spec, and that only\none namespace is declared.\n\nArgs:\ndesc (List[stone.stone.parser.ASTNode]): All AST nodes in a spec\nfile in the order they were defined.\n\nReturn:\nstone.frontend.ast.AstNamespace: The namespace AST node.", "source": "codesearchnet"}
{"code": "def modify_lattice(self, new_lattice):\n    self._lattice = new_lattice\n    for site in self._sites:\n        site.lattice = new_lattice", "docstring": "Modify the lattice of the structure.  Mainly used for changing the\nbasis.\n\nArgs:\nnew_lattice (Lattice): New lattice", "source": "codesearchnet"}
{"code": "def FindEnumTypeByName(self, full_name):\n    full_name = _NormalizeFullyQualifiedName(full_name)\n    if (full_name not in self._enum_descriptors):\n        self.FindFileContainingSymbol(full_name)\n    return self._enum_descriptors[full_name]", "docstring": "Loads the named enum descriptor from the pool.\n\nArgs:\nfull_name: The full name of the enum descriptor to load.\n\nReturns:\nThe enum descriptor for the named type.", "source": "codesearchnet"}
{"code": "def run_board(args):\n    init_config(args)\n    from backend.collector import CollectorService\n    service = CollectorService(args.logdir, args.reload_interval, standalone=False, log_level=args.log_level)\n    service.run()\n    logger.info(('Try to start automlboard on port %s\\n' % args.port))\n    command = [os.path.join(root_path, 'manage.py'), 'runserver', ('0.0.0.0:%s' % args.port), '--noreload']\n    execute_from_command_line(command)", "docstring": "Run main entry for AutoMLBoard.\n\nArgs:\nargs: args parsed from command line", "source": "codesearchnet"}
{"code": "async def game(self, short_name, *, id=None, text=None, parse_mode=(), link_preview=True, geo=None, period=60, contact=None, game=False, buttons=None):\n    result = types.InputBotInlineResultGame(id=(id or ''), short_name=short_name, send_message=(await self._message(text=text, parse_mode=parse_mode, link_preview=link_preview, geo=geo, period=period, contact=contact, game=game, buttons=buttons)))\n    if (id is None):\n        result.id = hashlib.sha256(bytes(result)).hexdigest()\n    return result", "docstring": "Creates a new inline result of game type.\n\nArgs:\nshort_name (`str`):\nThe short name of the game to use.", "source": "codesearchnet"}
{"code": "def save_array_types(self, fname):\n    \n    type_defs = {'arrays': sorted(list(self.array_types))}\n    with open(fname, 'wt') as fh:\n      pprint(type_defs, stream=fh)", "docstring": "Save array type registry to a file\n\nArgs:\nfname (str): Name of file to save array database to", "source": "juraj-google-style"}
{"code": "def encode_bqm_as_qp(solver, linear, quadratic):\n    \n    active = active_qubits(linear, quadratic)\n\n    \n    \n    \n    \n    \n    \n    nan = float('nan')\n    lin = [uniform_get(linear, qubit, 0 if qubit in active else nan)\n           for qubit in solver._encoding_qubits]\n    lin = base64.b64encode(struct.pack('<' + ('d' * len(lin)), *lin))\n\n    \n    \n    \n    quad = [quadratic.get((q1,q2), 0) + quadratic.get((q2,q1), 0)\n            for (q1,q2) in solver._encoding_couplers\n            if q1 in active and q2 in active]\n    quad = base64.b64encode(struct.pack('<' + ('d' * len(quad)), *quad))\n\n    \n    \n    return {\n        'format': 'qp',\n        'lin': lin.decode('utf-8'),\n        'quad': quad.decode('utf-8')\n    }", "docstring": "Encode the binary quadratic problem for submission to a given solver,\nusing the `qp` format for data.\n\nArgs:\nsolver (:class:`dwave.cloud.solver.Solver`):\nThe solver used.\n\nlinear (dict[variable, bias]/list[variable, bias]):\nLinear terms of the model.\n\nquadratic (dict[(variable, variable), bias]):\nQuadratic terms of the model.\n\nReturns:\nencoded submission dictionary", "source": "juraj-google-style"}
{"code": "def unbroadcast_tfe_to(tensor, shape):\n  \n  axis = utils.create_unbroadcast_axis(shape, shape_as_list(tensor))\n  return tf.reshape(tf.reduce_sum(tensor, axis=axis), shape)", "docstring": "Reverse the broadcasting operation.\n\nSee utils.py.\n\nArgs:\ntensor: A Tensor.\nshape: A shape that could have been broadcasted to the shape of tensor.\n\nReturns:\nTensor with dimensions summed to match `shape`.", "source": "juraj-google-style"}
{"code": "def id_to_piece(input, model_file=None, model_proto=None, name=None):\n    return _gen_sentencepiece_processor_op.sentencepiece_id_to_piece(input, model_file=model_file, model_proto=model_proto, name=name)", "docstring": "Converts vocabulary id into piece.\n\nArgs:\ninput: An arbitrary tensor of int32.\nmodel_file: The sentencepiece model file path.\nmodel_proto: The sentencepiece model serialized proto.\nEither `model_file` or `model_proto` must be set.\nname: The name argument that is passed to the op function.\nReturns:\nA tensor of string with the same shape as input.", "source": "codesearchnet"}
{"code": "def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        super(RevokeResponsePayload, self).read(\n            istream,\n            kmip_version=kmip_version\n        )\n        tstream = BytearrayStream(istream.read(self.length))\n\n        self.unique_identifier = attributes.UniqueIdentifier()\n        self.unique_identifier.read(tstream, kmip_version=kmip_version)\n\n        self.is_oversized(tstream)\n        self.validate()", "docstring": "Read the data encoding the RevokeResponsePayload object and decode it\ninto its constituent parts.\nArgs:\nistream (Stream): A data stream containing encoded object data,\nsupporting a read method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def variant_export_lines(store, case_obj, variants_query):\n    \n\n    export_variants = []\n\n    for variant in variants_query:\n        variant_line = []\n        position = variant['position']\n        change = variant['reference']+'>'+variant['alternative']\n        variant_line.append(variant['rank_score'])\n        variant_line.append(variant['chromosome'])\n        variant_line.append(position)\n        variant_line.append(change)\n        variant_line.append('_'.join([str(position), change]))\n\n        \n        gene_list = variant.get('genes') \n        gene_ids = []\n        gene_names = []\n        hgvs_c = []\n\n        \n        if len(gene_list) > 0:\n            for gene_obj in gene_list:\n                hgnc_id = gene_obj['hgnc_id']\n                gene_name = gene(store, hgnc_id)['symbol']\n\n                gene_ids.append(hgnc_id)\n                gene_names.append(gene_name)\n\n                hgvs_nucleotide = '-'\n                \n                transcripts_list = gene_obj.get('transcripts')\n                for transcript_obj in transcripts_list:\n                    if transcript_obj.get('is_canonical') and transcript_obj.get('is_canonical') is True:\n                        hgvs_nucleotide = str(transcript_obj.get('coding_sequence_name'))\n                hgvs_c.append(hgvs_nucleotide)\n\n            variant_line.append(';'.join( str(x) for x in  gene_ids))\n            variant_line.append(';'.join( str(x) for x in  gene_names))\n            variant_line.append(';'.join( str(x) for x in  hgvs_c))\n        else:\n            while i < 4:\n                variant_line.append('-') \n                i = i+1\n\n        variant_gts = variant['samples'] \n        for individual in case_obj['individuals']:\n            for variant_gt in variant_gts:\n                if individual['individual_id'] == variant_gt['sample_id']:\n                    \n                    variant_line.append(variant_gt['allele_depths'][0]) \n                    variant_line.append(variant_gt['allele_depths'][1]) \n                    \n                    variant_line.append(variant_gt['genotype_quality'])\n\n        variant_line = [str(i) for i in variant_line]\n        export_variants.append(\",\".join(variant_line))\n\n    return export_variants", "docstring": "Get variants info to be exported to file, one list (line) per variant.\n\nArgs:\nstore(scout.adapter.MongoAdapter)\ncase_obj(scout.models.Case)\nvariants_query: a list of variant objects, each one is a dictionary\n\nReturns:\nexport_variants: a list of strings. Each string  of the list corresponding to the fields\nof a variant to be exported to file, separated by comma", "source": "juraj-google-style"}
{"code": "def save(f, arr, vocab):\n    itr = iter(vocab)\n    (word, idx) = next(itr)\n    _write_line(f, arr[idx], word)\n    for (word, idx) in itr:\n        f.write(b'\\n')\n        _write_line(f, arr[idx], word)", "docstring": "Save word embedding file.\n\nArgs:\nf (File): File to write the vectors. File should be open for writing\nascii.\narr (numpy.array): Numpy array with ``float`` dtype.\nvocab (iterable): Each element is pair of a word (``bytes``) and ``arr``\nindex (``int``). Word should be encoded to str apriori.", "source": "codesearchnet"}
{"code": "def remove(self, force=False):\n    return self.client.api.remove_plugin(self.name, force=force)", "docstring": "Remove the plugin from the server.\n\nArgs:\nforce (bool): Remove even if the plugin is enabled.\nDefault: False\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def mtf_range(mesh, dim, dtype, name=None):\n    dim = convert_to_dimension(dim)\n    with tf.variable_scope(name, default_name='range'):\n        if (dtype == tf.bfloat16):\n            tf_range = tf.cast(tf.range(dim.size), tf.bfloat16)\n        else:\n            tf_range = tf.range(dim.size, dtype=dtype)\n        return import_tf_tensor(mesh, tf_range, shape=Shape([dim]))", "docstring": "Create a 1d mesh tensor with a range from [0, dim.size).\n\nCall externally as mtf.range()\n\nArgs:\nmesh: a Mesh\ndim: a Dimension\ndtype: a tf.DType\nname: an optional string\n\nReturns:\na Tensor", "source": "codesearchnet"}
{"code": "def action(elem, doc):\n    if isinstance(elem, pf.CodeBlock):\n        doc.listings_counter += 1\n        elems = ([elem] if ('hide' not in elem.classes) else [])\n        if ('file' in elem.attributes):\n            elem.text = read_file(elem.attributes['file'])\n            filename = trimpath(elem.attributes)\n            prefix = pf.Emph(pf.Str('File:'))\n        if ('exec' in elem.classes):\n            if (('interactive' in elem.classes) or (elem.text[:4] == '>>> ')):\n                elem.text = execute_interactive_code(elem, doc)\n            else:\n                result = execute_code_block(elem, doc)\n                if ('hideimports' in elem.classes):\n                    elem.text = remove_import_statements(elem.text)\n                if (('plt' in elem.attributes) or ('plt' in elem.classes)):\n                    doc.plot_found = True\n                    result = maybe_center_plot(result)\n                    block = pf.RawBlock(result, format='latex')\n                else:\n                    block = pf.CodeBlock(result, classes=['changelog'])\n                elems += [pf.Para(pf.Emph(pf.Str('Output:'))), block]\n        if ('lines' in elem.attributes):\n            elem.text = filter_lines(elem.text, elem.attributes['lines'])\n        label = elem.attributes.get('label', f'cl:{doc.listings_counter}')\n        if ('caption' in elem.attributes.keys()):\n            doc.caption_found = True\n            cap = pf.convert_text(elem.attributes['caption'], output_format='latex')\n            if ('shortcaption' in elem.attributes.keys()):\n                shortcap = pf.convert_text(elem.attributes['shortcaption'], output_format='latex')\n            else:\n                shortcap = cap\n            if ('file' in elem.attributes.keys()):\n                cap += pf.convert_text(f'&nbsp;(`{filename}`)', output_format='latex')\n            elems = make_codelisting(elems, cap, label, shortcaption=shortcap, above=('capbelow' not in elem.classes))\n        elif ('caption' in elem.classes):\n            doc.caption_found = True\n            cap = ''\n            if ('file' in elem.attributes.keys()):\n                cap = pf.convert_text(f'`{filename}`', output_format='latex')\n            elems = make_codelisting(elems, cap, label, above=('capbelow' not in elem.classes))\n        elif ('file' in elem.attributes.keys()):\n            elems.insert(0, pf.Para(prefix, pf.Space, pf.Code(filename)))\n        return elems", "docstring": "Processes pf.CodeBlocks.\n\nFor details and a specification of how each command should behave,\ncheck the example files (especially the md and pdf)!\n\nArgs:\nelem: The element to process.\ndoc:  The document.\n\nReturns:\nA changed element or None.", "source": "codesearchnet"}
{"code": "def enable_eager_execution_internal(config=None, device_policy=None, execution_mode=None, server_def=None) -> None:\n    if config is not None and (not isinstance(config, config_pb2.ConfigProto)):\n        raise TypeError('config must be a tf.ConfigProto, but got %s' % type(config))\n    if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT, context.DEVICE_PLACEMENT_WARN, context.DEVICE_PLACEMENT_SILENT, context.DEVICE_PLACEMENT_SILENT_FOR_INT32):\n        raise ValueError('device_policy must be one of None, DEVICE_PLACEMENT_*')\n    if execution_mode not in (None, context.SYNC, context.ASYNC):\n        raise ValueError('execution_mode must be one of None, SYNC, ASYNC')\n    if context.default_execution_mode == context.GRAPH_MODE:\n        graph_mode_has_been_used = _default_graph_stack._global_default_graph is not None\n        if graph_mode_has_been_used:\n            raise ValueError('tf.enable_eager_execution must be called at program startup.')\n    context.default_execution_mode = context.EAGER_MODE\n    with context._context_lock:\n        if context._context is None:\n            context._set_context_locked(context.Context(config=config, device_policy=device_policy, execution_mode=execution_mode, server_def=server_def))\n        elif config is not None and config is not context._context._config or (device_policy is not None and device_policy is not context._context._device_policy) or (execution_mode is not None and execution_mode is not context._context._execution_mode):\n            raise ValueError('Trying to change the options of an active eager execution. Context config: %s, specified config: %s. Context device policy: %s, specified device policy: %s. Context execution mode: %s,  specified execution mode %s.' % (context._context._config, config, context._context._device_policy, device_policy, context._context._execution_mode, execution_mode))\n        else:\n            context._context._thread_local_data.is_eager = True\n    context.context = context.context_safe", "docstring": "Enables eager execution for the lifetime of this program.\n\nMost of the doc string for enable_eager_execution is relevant here as well.\n\nArgs:\nconfig: See enable_eager_execution doc string\ndevice_policy: See enable_eager_execution doc string\nexecution_mode: See enable_eager_execution doc string\nserver_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on\nremote devices. GrpcServers need to be started by creating an identical\nserver_def to this, and setting the appropriate task_indexes, so that the\nservers can communicate. It will then be possible to execute operations on\nremote devices.\n\nRaises:\nValueError", "source": "github-repos"}
{"code": "def squid_to_guid(squid):\n    \n    squid_pattern = re.compile(r'^(\\w{8})(\\w{4})(\\w{4})(\\w\\w)(\\w\\w)(\\w\\w)(\\w\\w)(\\w\\w)(\\w\\w)(\\w\\w)(\\w\\w)$')\n    squid_match = squid_pattern.match(squid)\n    guid = ''\n    if squid_match is not None:\n        guid = '{' + \\\n               squid_match.group(1)[::-1]+'-' + \\\n               squid_match.group(2)[::-1]+'-' + \\\n               squid_match.group(3)[::-1]+'-' + \\\n               squid_match.group(4)[::-1]+squid_match.group(5)[::-1] + '-'\n        for index in range(6, 12):\n            guid += squid_match.group(index)[::-1]\n        guid += '}'\n    return guid", "docstring": "Converts a compressed GUID (SQUID) back into a GUID\n\nArgs:\n\nsquid (str): A valid compressed GUID\n\nReturns:\nstr: A valid GUID", "source": "juraj-google-style"}
{"code": "def xor_bytes(a, b):\n    \n    assert isinstance(a, bytes)\n    assert isinstance(b, bytes)\n    assert len(a) == len(b)\n    res = bytearray()\n    for i in range(len(a)):\n        res.append(a[i] ^ b[i])\n    return bytes(res)", "docstring": "XOR on two bytes objects\n\nArgs:\na (bytes): object 1\nb (bytes): object 2\n\nReturns:\nbytes: The XOR result", "source": "juraj-google-style"}
{"code": "def generate_masks_with_special_tokens_and_transfer_map(input_ids: torch.LongTensor) -> Tuple[Tensor, Tensor]:\n    batch_size, num_token = input_ids.shape\n    special_tokens_mask = torch.zeros((batch_size, num_token), device=input_ids.device).bool()\n    for special_token in SPECIAL_TOKENS:\n        special_tokens_mask |= input_ids == special_token\n    idxs = torch.nonzero(special_tokens_mask)\n    attention_mask = torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(batch_size, 1, 1)\n    position_ids = torch.zeros((batch_size, num_token), device=input_ids.device)\n    previous_col = 0\n    for i in range(idxs.shape[0]):\n        row, col = idxs[i]\n        if col == 0 or col == num_token - 1:\n            attention_mask[row, col, col] = True\n            position_ids[row, col] = 0\n        else:\n            attention_mask[row, previous_col + 1:col + 1, previous_col + 1:col + 1] = True\n            position_ids[row, previous_col + 1:col + 1] = torch.arange(0, col - previous_col, device=input_ids.device)\n        previous_col = col\n    return (attention_mask, position_ids.to(torch.long))", "docstring": "Generate attention mask between each pair of special tokens and positional ids.\nArgs:\ninput_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\nIndices of input sequence tokens in the vocabulary.\nReturns:\n`tuple(torch.Tensor)` comprising attention mask between each special tokens and position_ids:\n- **attention_mask** (`torch.BoolTensor` of shape `(batch_size, sequence_length, sequence_length)`)\n- **position_ids** (`torch.LongTensor` of shape `(batch_size, sequence_length)`)", "source": "github-repos"}
{"code": "def IsOutOfLineMethodDefinition(clean_lines, linenum):\n    for i in xrange(linenum, max((- 1), (linenum - 10)), (- 1)):\n        if Match('^([^()]*\\\\w+)\\\\(', clean_lines.elided[i]):\n            return (Match('^[^()]*\\\\w+::\\\\w+\\\\(', clean_lines.elided[i]) is not None)\n    return False", "docstring": "Check if current line contains an out-of-line method definition.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nReturns:\nTrue if current line contains an out-of-line method definition.", "source": "codesearchnet"}
{"code": "def adafactor_decay_rate_adam(beta2):\n  \n  t = tf.to_float(tf.train.get_or_create_global_step()) + 1.0\n  decay = beta2 * (1.0 - tf.pow(beta2, t - 1.0)) / (1.0 - tf.pow(beta2, t))\n  \n  return decay", "docstring": "Second-moment decay rate like Adam, subsuming the correction factor.\n\nArgs:\nbeta2: a float between 0 and 1\nReturns:\na scalar", "source": "juraj-google-style"}
{"code": "def l2_regularizer(weight=1.0, scope=None):\n  \n  def regularizer(tensor):\n    with tf.name_scope(scope, 'L2Regularizer', [tensor]):\n      l2_weight = tf.convert_to_tensor(weight,\n                                       dtype=tensor.dtype.base_dtype,\n                                       name='weight')\n      return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value')\n  return regularizer", "docstring": "Define a L2 regularizer.\n\nArgs:\nweight: scale the loss by this factor.\nscope: Optional scope for name_scope.\n\nReturns:\na regularizer function.", "source": "juraj-google-style"}
{"code": "def ExpectedEnginesToBuild(self, run_params):\n    return [f'TRTEngineOp_{seq_id:03d}' for seq_id in range(len(self.max_batch_sizes))]", "docstring": "Checks that the expected engine is built.\n\nArgs:\nrun_params: the run parameters.\n\nReturns:\nthe expected engines to build.\n\nThere shall be engines generated for each maximum batch size.", "source": "github-repos"}
{"code": "def start_instance(self):\n    start_url = self._get_url('start_path')\n    res = self.rest_client.session.put(start_url, json={})\n    _handle_http_errors(res)\n    return res.json()", "docstring": "Start the instance for this Streaming Analytics service.\n\nReturns:\ndict: JSON response for the instance start operation.", "source": "codesearchnet"}
{"code": "def add_output(self, name, value):\n    self.template.add_output(Output(name, Value=value))", "docstring": "Simple helper for adding outputs.\n\nArgs:\nname (str): The name of the output to create.\nvalue (str): The value to put in the output.", "source": "codesearchnet"}
{"code": "def _callback_main(self, call, handler='edit_config', target='running', source='startup'):\n    try:\n        if (handler == 'get_config'):\n            call = ET.tostring(call.getchildren()[0])\n            return self._mgr.get(filter=('subtree', call))\n        call = ET.tostring(call)\n        if (handler == 'get'):\n            call_element = xml_.to_ele(call)\n            return ET.fromstring(str(self._mgr.dispatch(call_element)))\n        if (handler == 'edit_config'):\n            self._mgr.edit_config(target=target, config=call)\n        if (handler == 'delete_config'):\n            self._mgr.delete_config(target=target)\n        if (handler == 'copy_config'):\n            self._mgr.copy_config(target=target, source=source)\n    except (ncclient.transport.TransportError, ncclient.transport.SessionCloseError, ncclient.transport.SSHError, ncclient.transport.AuthenticationError, ncclient.transport.SSHUnknownHostError) as error:\n        logging.error(error)\n        raise DeviceCommError", "docstring": "Callback for NETCONF calls.\n\nArgs:\ncall: An Element Tree element containing the XML of the NETCONF\ncall you intend to make to the device.\nhandler: Type of ncclient call to make.\nget_config: NETCONF standard get config.\nget: ncclient dispatch. For custom RPCs.\nedit_config: NETCONF standard edit.\ndelete_config: NETCONF standard delete.\ncopy_config: NETCONF standard copy.\ntarget: Target configuration location for action. Only used for\nedit_config, delete_config, and copy_config.\nsource: Source of configuration information for copying\nconfiguration. Only used for copy_config.\n\nReturns:\nNone\n\nRaises:\nNone", "source": "codesearchnet"}
{"code": "def __init__(self, structure, defect_site, charge=0.):\n        \n        self._structure = structure\n        self._charge = charge\n        self._defect_site = defect_site\n        if structure.lattice != defect_site.lattice:\n            raise ValueError(\"defect_site lattice must be same as structure lattice.\")", "docstring": "Initializes an abstract defect\n\nArgs:\nstructure: Pymatgen Structure without any defects\ndefect_site (Site): site for defect within structure\nmust have same lattice as structure\ncharge: (int or float) defect charge\ndefault is zero, meaning no change to NELECT after defect is created in the structure\n(assuming use_structure_charge=True in vasp input set)", "source": "juraj-google-style"}
{"code": "def dbmin50years(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `dbmin50years`'.format(value))\n\n        self._dbmin50years = value", "docstring": "Corresponds to IDD Field `dbmin50years`\n50-year return period values for minimum extreme dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmin50years`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def watch(self, path, recursive=False):\n        \n        self._logger.info('Initializing watcher for path \"%s\"', path)\n\n        handler = FileHandler(self)\n        self._observer = Observer()\n        self._observer.schedule(handler, path, recursive)\n\n        self._logger.info('Starting watcher')\n        self._observer.start()\n        self._watch = True\n\n        try:\n            self._logger.info('Waiting for file events')\n            while self._watch:\n                time.sleep(1)\n        except KeyboardInterrupt: \n            self.stop_watching()\n\n        self._observer.join()", "docstring": "Watch for files in a directory and apply normalizations.\n\nWatch for new or changed files in a directory and apply\nnormalizations over them.\n\nArgs:\npath: Path to the directory.\nrecursive: Whether to find files recursively or not.", "source": "juraj-google-style"}
{"code": "def cancelMktDepth(self, contract: Contract, isSmartDepth=False):\n    ticker = self.ticker(contract)\n    reqId = self.wrapper.endTicker(ticker, 'mktDepth')\n    if reqId:\n        self.client.cancelMktDepth(reqId, isSmartDepth)\n    else:\n        self._logger.error(f'cancelMktDepth: No reqId found for contract {contract}')", "docstring": "Unsubscribe from market depth data.\n\nArgs:\ncontract: The exact contract object that was used to\nsubscribe with.", "source": "codesearchnet"}
{"code": "def _nested_from_proto(nested_proto, process_leafs):\n    if (not isinstance(nested_proto, module_pb2.NestedData)):\n        raise base_errors.ModuleInfoError('Expected module_pb2.NestedData.')\n    if nested_proto.HasField('value'):\n        value = nested_proto.value\n        if (not value):\n            value = _UnserializableObject()\n        else:\n            value = process_leafs(value)\n        return value\n    elif nested_proto.HasField('list'):\n        return [_nested_from_proto(child, process_leafs) for child in nested_proto.list.list]\n    elif nested_proto.HasField('tuple'):\n        return tuple((_nested_from_proto(child, process_leafs) for child in nested_proto.tuple.list))\n    elif nested_proto.HasField('dict'):\n        return {name: _nested_from_proto(child, process_leafs) for (name, child) in six.iteritems(nested_proto.dict.map)}\n    elif nested_proto.HasField('named_tuple'):\n        tmp_dict = {name: _nested_from_proto(child, process_leafs) for (name, child) in six.iteritems(nested_proto.named_tuple.map)}\n        NamedTuple = collections.namedtuple(nested_proto.named_tuple.name, tmp_dict.keys())\n        return NamedTuple(**tmp_dict)\n    elif nested_proto.HasField('special_type'):\n        if (nested_proto.special_type.name not in _TO_PROTO_SPECIAL_TYPES):\n            return _UnserializableObject()\n        type_info = _TO_PROTO_SPECIAL_TYPES[nested_proto.special_type.name]\n        return type_info.from_proto(nested_proto.special_type.object, process_leafs)\n    else:\n        raise base_errors.ModuleInfoError('Cannot deserialize a `ModuleInfo` protobuf with no fields.')", "docstring": "Deserializes `nested_proto`.\n\nArgs:\nnested_proto: An instance of `module_pb2.NestedData`.\nprocess_leafs: A function to be applied to the leaf values of the nested\nstructure.\n\nReturns:\nAn instance of `string`, `tuple`, `dict` or `namedtuple`.\n\nRaises:\nbase_errors.ModuleInfoError: If the probobuf is of the wrong type or\nif some of its fields are missing.", "source": "codesearchnet"}
{"code": "def tensor_layout(self, tensor_shape, mesh_shape):\n    \n    ret = [self.tensor_dimension_to_mesh_axis(d, mesh_shape)\n           for d in tensor_shape]\n    not_nones = [a for a in ret if a is not None]\n    if len(not_nones) != len(set(not_nones)):\n      raise ValueError(\n          \"Two Tensor Dimensions may not map to the same Mesh Dimension:\"\n          \" layout=%s tensor_shape=%s mesh_shape=%s \" %\n          (self, tensor_shape, mesh_shape))\n    return TensorLayout(ret)", "docstring": "Computes TensorLayout given a Tensor Shape and a Mesh Shape.\n\nArgs:\ntensor_shape: Shape.\nmesh_shape: Shape.\n\nReturns:\nTensorLayout.\n\nRaises:\nValueError: If two Tensor Dimensions map to the same Mesh Dimensions.", "source": "juraj-google-style"}
{"code": "def fresh(t, non_generic):\n    \n\n    mappings = {}  \n\n    def freshrec(tp):\n        p = prune(tp)\n        if isinstance(p, TypeVariable):\n            if is_generic(p, non_generic):\n                if p not in mappings:\n                    mappings[p] = TypeVariable()\n                return mappings[p]\n            else:\n                return p\n        elif isinstance(p, dict):\n            return p  \n        elif isinstance(p, Collection):\n            return Collection(*[freshrec(x) for x in p.types])\n        elif isinstance(p, Scalar):\n            return Scalar([freshrec(x) for x in p.types])\n        elif isinstance(p, TypeOperator):\n            return TypeOperator(p.name, [freshrec(x) for x in p.types])\n        elif isinstance(p, MultiType):\n            return MultiType([freshrec(x) for x in p.types])\n        else:\n            assert False, \"missing freshrec case {}\".format(type(p))\n\n    return freshrec(t)", "docstring": "Makes a copy of a type expression.\n\nThe type t is copied. The generic variables are duplicated and the\nnon_generic variables are shared.\n\nArgs:\nt: A type to be copied.\nnon_generic: A set of non-generic TypeVariables", "source": "juraj-google-style"}
{"code": "def list_current_jobs(self):\n    jobs = {}\n    for job in self.scheduler.get_jobs():\n        if (job.name not in ('schedule_jobs', 'process_status_queue')):\n            jobs[job.name] = job\n    return jobs", "docstring": "Return a list of the currently scheduled jobs in APScheduler\n\nReturns:\n`dict` of `str`: :obj:`apscheduler/job:Job`", "source": "codesearchnet"}
{"code": "def Detect(self, str_in):\n    components = SplitIntoComponents(str_in)\n    extracted_paths = set()\n    for extractor in self.extractors:\n        extracted_paths.update(extractor.Extract(components))\n    results = set(extracted_paths)\n    for post_processor in self.post_processors:\n        processed_results = set()\n        for result in results:\n            processed_results.update(post_processor.Process(result))\n        results = processed_results\n    return results", "docstring": "Detects paths in a given string.\n\nArgs:\nstr_in: String where the paths should be detected.\n\nReturns:\nA list of paths (as strings) detected inside the given string.", "source": "codesearchnet"}
{"code": "def isInstalled(value):\n\t\t\n\n\t\tfunction = \n\t\tcommand = .format(f = function, arg=value)\n\t\tcmd = CommandHelper(command)\n\t\tcmd.execute()\n\n\t\treturn \"1\" in cmd.output", "docstring": "Check if a software is installed into machine.\n\nArgs:\nvalue (str): Software's name\n\nReturns:\nbool: True if the software is installed. False else", "source": "juraj-google-style"}
{"code": "def stream_reader_statements(stream_arn):\n    action_type = get_stream_action_type(stream_arn)\n    arn_parts = stream_arn.split('/')\n    wildcard_arn_parts = arn_parts[:(- 1)]\n    wildcard_arn_parts.append('*')\n    wildcard_arn = '/'.join(wildcard_arn_parts)\n    return [Statement(Effect=Allow, Resource=[stream_arn], Action=[action_type('DescribeStream'), action_type('GetRecords'), action_type('GetShardIterator')]), Statement(Effect=Allow, Resource=[wildcard_arn], Action=[action_type('ListStreams')])]", "docstring": "Returns statements to allow Lambda to read from a stream.\n\nHandles both DynamoDB & Kinesis streams. Automatically figures out the\ntype of stream, and provides the correct actions from the supplied Arn.\n\nArg:\nstream_arn (str): A kinesis or dynamodb stream arn.\n\nReturns:\nlist: A list of statements.", "source": "codesearchnet"}
{"code": "def fastcc_consistent_subset(model, epsilon, solver):\n    \n    reaction_set = set(model.reactions)\n    return reaction_set.difference(fastcc(model, epsilon, solver))", "docstring": "Return consistent subset of model.\n\nThe largest consistent subset is returned as\na set of reaction names.\n\nArgs:\nmodel: :class:`MetabolicModel` to solve.\nepsilon: Flux threshold value.\nsolver: LP solver instance to use.\n\nReturns:\nSet of reaction IDs in the consistent reaction subset.", "source": "juraj-google-style"}
{"code": "def run_validate_program_main(self, program_main):\n    program_language = self.profile.get('install_json').get('programLanguage', 'python').lower()\n    if ((program_language == 'python') and (not os.path.isfile('{}.py'.format(program_main)))):\n        print('{}{}Could not find program main file ({}).'.format(c.Style.BRIGHT, c.Fore.RED, program_main))\n        sys.exit(1)", "docstring": "Validate the program main file exists.\n\nArgs:\nprogram_main (str): The executable name.", "source": "codesearchnet"}
{"code": "def get_ss_class(pdb_file, dssp_file, chain):\n    prag = pr.parsePDB(pdb_file)\n    pr.parseDSSP(dssp_file, prag)\n    (alpha, threeTen, beta) = get_dssp_ss_content_multiplechains(prag, chain)\n    if ((alpha == 0) and (beta > 0)):\n        classification = 'all-beta'\n    elif ((beta == 0) and (alpha > 0)):\n        classification = 'all-alpha'\n    elif ((beta == 0) and (alpha == 0)):\n        classification = 'mixed'\n    elif ((float(alpha) / beta) >= 20):\n        classification = 'all-alpha'\n    else:\n        classification = 'mixed'\n    return classification", "docstring": "Define the secondary structure class of a PDB file at the specific chain\n\nArgs:\npdb_file:\ndssp_file:\nchain:\n\nReturns:", "source": "codesearchnet"}
{"code": "def retry_auth_check(exception):\n  \n  if isinstance(exception, apiclient.errors.HttpError):\n    if exception.resp.status in HTTP_AUTH_ERROR_CODES:\n      _print_error('Retrying...')\n      return True\n\n  return False", "docstring": "Specific check for auth error codes.\n\nReturn True if we should retry.\n\nFalse otherwise.\nArgs:\nexception: An exception to test for transience.\n\nReturns:\nTrue if we should retry. False otherwise.", "source": "juraj-google-style"}
{"code": "def get_vcenter(self, **kwargs):\n    config = ET.Element('config')\n    urn = 'urn:brocade.com:mgmt:brocade-vswitch'\n    ET.SubElement(config, 'vcenter', xmlns=urn)\n    output = self._callback(config, handler='get_config')\n    result = []\n    element = ET.fromstring(str(output))\n    for vcenter in element.iter(('{%s}vcenter' % urn)):\n        vc = {}\n        vc['name'] = vcenter.find(('{%s}id' % urn)).text\n        vc['url'] = vcenter.find(('{%s}credentials' % urn)).find(('{%s}url' % urn)).text\n        isactive = vcenter.find(('{%s}activate' % urn))\n        if (isactive is None):\n            vc['isactive'] = False\n        else:\n            vc['isactive'] = True\n        result.append(vc)\n    return result", "docstring": "Get vCenter hosts on the switch\n\nArgs:\n\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns:\nReturns a list of vcenters\n\nRaises:\nNone", "source": "codesearchnet"}
{"code": "def get(self, *index):\n    assert (self.wrapFunction is not None)\n    if ((len(index) == 1) and isinstance(index[0], (tuple, list))):\n        index = index[0]\n    if (len(index) == 0):\n        return self.wrapFunction(self._impl.get())\n    else:\n        return self.wrapFunction(self._impl.get(Tuple(index)._impl))", "docstring": "Get the instance with the specified index.\n\nReturns:\nThe corresponding instance.", "source": "codesearchnet"}
{"code": "def clear(self, size=-1, *, offset=0, chunk=None) -> None:\n        \n\n        self.mglo.clear(size, offset, chunk)", "docstring": "Clear the content.\n\nArgs:\nsize (int): The size. Value ``-1`` means all.\n\nKeyword Args:\noffset (int): The offset.\nchunk (bytes): The chunk to use repeatedly.", "source": "juraj-google-style"}
{"code": "def MultiDestroyFlowStates(self, session_ids, request_limit=None):\n    \n\n    subjects = [session_id.Add(\"state\") for session_id in session_ids]\n    to_delete = []\n    deleted_requests = []\n\n    for subject, values in self.MultiResolvePrefix(\n        subjects, self.FLOW_REQUEST_PREFIX, limit=request_limit):\n      for _, serialized, _ in values:\n\n        request = rdf_flow_runner.RequestState.FromSerializedString(serialized)\n        deleted_requests.append(request)\n\n        \n        response_subject = self.GetFlowResponseSubject(request.session_id,\n                                                       request.id)\n        to_delete.append(response_subject)\n\n      \n      to_delete.append(subject)\n\n    \n    self.DeleteSubjects(to_delete, sync=True)\n    return deleted_requests", "docstring": "Deletes all requests and responses for the given flows.\n\nArgs:\nsession_ids: A lists of flows to destroy.\nrequest_limit: A limit on the number of requests to delete.\n\nReturns:\nA list of requests that were deleted.", "source": "juraj-google-style"}
{"code": "def get_template_list(self, page=1, page_size=None, account_id=None, query=None):\n    request = self._get_request()\n    parameters = {'page': page, 'page_size': page_size, 'account_id': account_id, 'query': query}\n    return request.get(self.TEMPLATE_GET_LIST_URL, parameters=parameters)", "docstring": "Lists your Templates\n\nArgs:\n\npage (int, optional):           Page number of the template List to return. Defaults to 1.\npage_size (int, optional):      Number of objects to be returned per page, must be between 1 and 100, default is 20.\naccount_id (str, optional):     Which account to return Templates for. Must be a team member. Use \"all\" to indicate all team members. Defaults to your account.\nquery (str, optional):          String that includes search terms and/or fields to be used to filter the Template objects.\n\nReturns:\nA ResourceList object", "source": "codesearchnet"}
{"code": "def save(self, file_prefix: str, options: Optional[checkpoint_options.CheckpointOptions]=None) -> Optional[ops.Operation]:\n    if options is not None and options.experimental_io_device is not None:\n        raise ValueError('Specified experimental_io_device in DTensor checkpoint is not supported.')\n    del options\n    tensor_names = []\n    tensors = []\n    tensor_slices = []\n    for saveable in self._saveable_objects:\n        for spec in saveable.specs:\n            tensor = spec.tensor\n            if tensor is not None:\n                if api.device_name() != spec.device:\n                    tensor = api.pack([tensor] * self._mesh.host_mesh().num_local_devices(), layout.Layout.replicated(self._mesh.host_mesh(), rank=tensor.shape.rank))\n                tensor_names.append(spec.name)\n                tensors.append(tensor)\n                tensor_slices.append(spec.slice_spec)\n    return save_restore.sharded_save(self._mesh, file_prefix, tensor_names, tensor_slices, tensors)", "docstring": "Saves the saveable objects to a checkpoint with `file_prefix`.\n\nAlso query the generated shards from the distributed DTensor SaveV2 ops and\ndo a MergeV2 on those. Each op here is backed by a global_barrier to avoid\nracing from multiple clients.\n\nArgs:\nfile_prefix: A string or scalar string Tensor containing the prefix to\nsave under.\noptions: Optional `CheckpointOptions` object. This is unused in DTensor.\n\nReturns:\nAn `Operation`, or None when executing eagerly.", "source": "github-repos"}
{"code": "def flowwrite(flow, filename, quantize=False, concat_axis=0, *args, **kwargs):\n    if (not quantize):\n        with open(filename, 'wb') as f:\n            f.write('PIEH'.encode('utf-8'))\n            np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f)\n            flow = flow.astype(np.float32)\n            flow.tofile(f)\n            f.flush()\n    else:\n        assert (concat_axis in [0, 1])\n        (dx, dy) = quantize_flow(flow, *args, **kwargs)\n        dxdy = np.concatenate((dx, dy), axis=concat_axis)\n        imwrite(dxdy, filename)", "docstring": "Write optical flow to file.\n\nIf the flow is not quantized, it will be saved as a .flo file losslessly,\notherwise a jpeg image which is lossy but of much smaller size. (dx and dy\nwill be concatenated horizontally into a single image if quantize is True.)\n\nArgs:\nflow (ndarray): (h, w, 2) array of optical flow.\nfilename (str): Output filepath.\nquantize (bool): Whether to quantize the flow and save it to 2 jpeg\nimages. If set to True, remaining args will be passed to\n:func:`quantize_flow`.\nconcat_axis (int): The axis that dx and dy are concatenated,\ncan be either 0 or 1. Ignored if quantize is False.", "source": "codesearchnet"}
{"code": "def apply_mutation(module_path, operator, occurrence):\n    module_ast = get_ast(module_path, python_version=operator.python_version)\n    original_code = module_ast.get_code()\n    visitor = MutationVisitor(occurrence, operator)\n    mutated_ast = visitor.walk(module_ast)\n    mutated_code = None\n    if visitor.mutation_applied:\n        mutated_code = mutated_ast.get_code()\n        with module_path.open(mode='wt', encoding='utf-8') as handle:\n            handle.write(mutated_code)\n            handle.flush()\n    return (original_code, mutated_code)", "docstring": "Apply a specific mutation to a file on disk.\n\nArgs:\nmodule_path: The path to the module to mutate.\noperator: The `operator` instance to use.\noccurrence: The occurrence of the operator to apply.\n\nReturns: A `(unmutated-code, mutated-code)` tuple to the with-block. If there was\nno mutation performed, the `mutated-code` is `None`.", "source": "codesearchnet"}
{"code": "def get_estimator(output_dir, train_config, args):\n  \n\n  \n  target_name = train_config['target_column']\n  if is_classification_model(args.model_type) and target_name not in \\\n          train_config['categorical_columns']:\n    raise ValueError('When using a classification model, the target must be a '\n                     'categorical variable.')\n  if is_regression_model(args.model_type) and target_name not in \\\n          train_config['numerical_columns']:\n    raise ValueError('When using a regression model, the target must be a '\n                     'numerical variable.')\n\n  \n  if is_dnn_model(args.model_type) and not args.layer_sizes:\n    raise ValueError('--layer-size* must be used with DNN models')\n  if is_linear_model(args.model_type) and args.layer_sizes:\n    raise ValueError('--layer-size* cannot be used with linear models')\n\n  \n  feature_columns = _tflearn_features(train_config, args)\n\n  \n  config = tf.contrib.learn.RunConfig(\n      save_checkpoints_secs=args.save_checkpoints_secs)\n\n  train_dir = os.path.join(output_dir, 'train')\n  if args.model_type == 'dnn_regression':\n    estimator = tf.contrib.learn.DNNRegressor(\n        feature_columns=feature_columns,\n        hidden_units=args.layer_sizes,\n        config=config,\n        model_dir=train_dir,\n        optimizer=tf.train.AdamOptimizer(\n            args.learning_rate, epsilon=args.epsilon))\n  elif args.model_type == 'linear_regression':\n    estimator = tf.contrib.learn.LinearRegressor(\n        feature_columns=feature_columns,\n        config=config,\n        model_dir=train_dir,\n        optimizer=tf.train.AdamOptimizer(\n            args.learning_rate, epsilon=args.epsilon))\n  elif args.model_type == 'dnn_classification':\n    estimator = tf.contrib.learn.DNNClassifier(\n        feature_columns=feature_columns,\n        hidden_units=args.layer_sizes,\n        n_classes=train_config['vocab_stats'][target_name]['n_classes'],\n        config=config,\n        model_dir=train_dir,\n        optimizer=tf.train.AdamOptimizer(\n            args.learning_rate, epsilon=args.epsilon))\n  elif args.model_type == 'linear_classification':\n    estimator = tf.contrib.learn.LinearClassifier(\n        feature_columns=feature_columns,\n        n_classes=train_config['vocab_stats'][target_name]['n_classes'],\n        config=config,\n        model_dir=train_dir,\n        optimizer=tf.train.AdamOptimizer(\n            args.learning_rate, epsilon=args.epsilon))\n  else:\n    raise ValueError('bad --model-type value')\n\n  return estimator", "docstring": "Returns a tf learn estimator.\n\nWe only support {DNN, Linear}Regressor and {DNN, Linear}Classifier. This is\ncontrolled by the values of model_type in the args.\n\nArgs:\noutput_dir: Modes are saved into outputdir/train\ntrain_config: our training config\nargs: command line parameters\n\nReturns:\nTF lean estimator\n\nRaises:\nValueError: if config is wrong.", "source": "juraj-google-style"}
{"code": "def _ragged_tensor_to_string(string_tensor, summarize):\n    if string_tensor.shape.rank == 1:\n        pieces = string_tensor\n    else:\n        pieces = map_fn_lib.map_fn(lambda s: _ragged_tensor_to_string(s, summarize), string_tensor, fn_output_signature=tensor_lib.TensorSpec(None, dtypes.string))\n    if summarize not in (-1, None):\n        pieces = cond.cond(_nrows(string_tensor) <= 2 * summarize, lambda: pieces, lambda: array_ops.concat([pieces[:summarize], ['...'], pieces[-summarize:]], axis=0))\n    return '[' + string_ops.reduce_join(pieces, separator=', ') + ']'", "docstring": "Returns a scalar string tensor with the contents of `string_tensor`.\n\nArgs:\nstring_tensor: A potentially ragged tensor with dtype=string.\nsummarize: Include only the first and last `summarize` elements of each\ndimension.  If `-1` or `None`, then include all elements.\n\nReturns:\nA scalar string Tensor.", "source": "github-repos"}
{"code": "def dump(ofp, *pb_objs, **kwargs):\n    \n    mode = 'wb'\n    if isinstance(ofp, str):\n        ostream = open(ofp, mode=mode, **kwargs)\n    else:\n        ostream = open(fileobj=ofp, mode=mode, **kwargs)\n    with ostream:\n        ostream.write(*pb_objs)", "docstring": "Write to a stream.\n\nArgs:\nofp (string or file-like object): output stream.\npb_objs (*protobuf.message.Message): list of protobuf message objects\nto be written.", "source": "juraj-google-style"}
{"code": "def collective_diffusion_coefficient( self ):\n        \n        if self.has_run:\n            return self.atoms.collective_dr_squared() / ( 6.0 * self.lattice.time )\n        else:\n            return None", "docstring": "Returns the collective or \"jump\" diffusion coefficient, D_J.\n\nArgs:\nNone\n\nReturns:\n(Float): The collective diffusion coefficient, D_J.", "source": "juraj-google-style"}
{"code": "def aggregate(self, batch_outs, batch_start=None, batch_end=None):\n    raise NotImplementedError('Must be implemented in subclasses.')", "docstring": "Aggregates batch-level results into total results.\n\nArgs:\nbatch_outs: A list of batch-level outputs.\nbatch_start: The start index of this batch. Always `None` if `use_steps`\nis `True`.\nbatch_end: The end index of this batch. Always `None` if `use_steps` is\n`True`.", "source": "github-repos"}
{"code": "def __init__(self, dimensions, hidden_size):\n    \n    super(LearnableMultivariateNormalDiagCell, self).__init__()\n    self.dimensions = dimensions\n    self.hidden_size = hidden_size\n    self.lstm_cell = tf.keras.layers.LSTMCell(hidden_size)\n    self.output_layer = tf.keras.layers.Dense(2*dimensions)", "docstring": "Constructs a learnable multivariate diagonal normal cell.\n\nArgs:\ndimensions: An integer corresponding to the dimensionality of the\ndistribution.\nhidden_size: Dimensionality of the LSTM function parameters.", "source": "juraj-google-style"}
{"code": "def convert_transpose(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting transpose ...')\n    if params['perm'][0] != 0:\n        if inputs[0] in layers:\n            print('!!! Cannot permute batch dimension. Result may be wrong !!!')\n            layers[scope_name] = layers[inputs[0]]\n        else:\n            print('Skip weight matrix transpose, result may be wrong.')\n    else:\n        if names:\n            tf_name = 'PERM' + random_string(4)\n        else:\n            tf_name = w_name + str(random.random())\n        permute = keras.layers.Permute(params['perm'][1:], name=tf_name)\n        layers[scope_name] = permute(layers[inputs[0]])", "docstring": "Convert transpose layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def pp_hex(raw, reverse=True):\n    \n    if not reverse:\n        return ''.join(['{:02x}'.format(v) for v in bytearray(raw)])\n    return ''.join(reversed(['{:02x}'.format(v) for v in bytearray(raw)]))", "docstring": "Return a pretty-printed (hex style) version of a binary string.\n\nArgs:\nraw (bytes): any sequence of bytes\nreverse (bool): True if output should be in reverse order.\n\nReturns:\nHex string corresponding to input byte sequence.", "source": "juraj-google-style"}
{"code": "def _FormatReturnOrExitToken(self, token_data):\n    error_string = bsmtoken.BSM_ERRORS.get(token_data.status, 'UNKNOWN')\n    return {'error': error_string, 'token_status': token_data.status, 'call_status': token_data.return_value}", "docstring": "Formats a return or exit token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_exit|bsm_token_data_return32|\nbsm_token_data_return64): AUT_EXIT, AUT_RETURN32 or\nAUT_RETURN64 token data.\n\nReturns:\ndict[str, str]: token values.", "source": "codesearchnet"}
{"code": "def _extract_params(self, kwargs, hyperparameters):\n    init_params = dict()\n    fit_params = dict()\n    produce_params = dict()\n    for (name, param) in hyperparameters.get('fixed', dict()).items():\n        if (name in kwargs):\n            value = kwargs.pop(name)\n        elif ('default' in param):\n            value = param['default']\n        else:\n            raise TypeError(\"{} required argument '{}' not found\".format(self.name, name))\n        init_params[name] = value\n    for (name, param) in hyperparameters.get('tunable', dict()).items():\n        if (name in kwargs):\n            init_params[name] = kwargs.pop(name)\n    fit_args = [arg['name'] for arg in self.fit_args]\n    produce_args = [arg['name'] for arg in self.produce_args]\n    for name in list(kwargs.keys()):\n        if (name in fit_args):\n            fit_params[name] = kwargs.pop(name)\n        elif (name in produce_args):\n            produce_params[name] = kwargs.pop(name)\n    if kwargs:\n        error = \"Unexpected hyperparameters '{}'\".format(', '.join(kwargs.keys()))\n        raise TypeError(error)\n    return (init_params, fit_params, produce_params)", "docstring": "Extract init, fit and produce params from kwargs.\n\nThe `init_params`, `fit_params` and `produce_params` are extracted\nfrom the passed `kwargs` taking the metadata hyperparameters as a\nreference.\n\nDuring this extraction, make sure that all the required hyperparameters\nhave been given and that nothing unexpected exists in the input.\n\nArgs:\nkwargs (dict): dict containing the Keyword arguments that have\nbeen passed to the `__init__` method upon\ninitialization.\nhyperparameters (dict): hyperparameters dictionary, as found in\nthe JSON annotation.\n\nRaises:\nTypeError: A `TypeError` is raised if a required argument is not\nfound in the `kwargs` dict, or if an unexpected\nargument has been given.", "source": "codesearchnet"}
{"code": "def tagscleanupdicts(configuration=None, url=None, keycolumn=5, failchained=True):\n    if (not Tags._tags_dict):\n        if (configuration is None):\n            configuration = Configuration.read()\n        with Download(full_agent=configuration.get_user_agent()) as downloader:\n            if (url is None):\n                url = configuration['tags_cleanup_url']\n            Tags._tags_dict = downloader.download_tabular_rows_as_dicts(url, keycolumn=keycolumn)\n            keys = Tags._tags_dict.keys()\n            chainerror = False\n            for (i, tag) in enumerate(keys):\n                whattodo = Tags._tags_dict[tag]\n                action = whattodo[u'action']\n                final_tags = whattodo[u'final tags (semicolon separated)']\n                for final_tag in final_tags.split(';'):\n                    if (final_tag in keys):\n                        index = list(keys).index(final_tag)\n                        if (index != i):\n                            whattodo2 = Tags._tags_dict[final_tag]\n                            action2 = whattodo2[u'action']\n                            if ((action2 != 'OK') and (action2 != 'Other')):\n                                final_tags2 = whattodo2[u'final tags (semicolon separated)']\n                                if (final_tag not in final_tags2.split(';')):\n                                    chainerror = True\n                                    if failchained:\n                                        logger.error(('Chained rules: %s (%s -> %s) | %s (%s -> %s)' % (action, tag, final_tags, action2, final_tag, final_tags2)))\n            if (failchained and chainerror):\n                raise ChainRuleError('Chained rules for tags detected!')\n            Tags._wildcard_tags = list()\n            for tag in Tags._tags_dict:\n                if ('*' in tag):\n                    Tags._wildcard_tags.append(tag)\n    return (Tags._tags_dict, Tags._wildcard_tags)", "docstring": "Get tags cleanup dictionaries\n\nArgs:\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\nurl (Optional[str]): Url of tags cleanup spreadsheet. Defaults to None (internal configuration parameter).\nkeycolumn (int): Column number of tag column in spreadsheet. Defaults to 5.\nfailchained (bool): Fail if chained rules found. Defaults to True.\n\nReturns:\nTuple[Dict,List]: Returns (Tags dictionary, Wildcard tags list)", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False, **kwargs):\n    residual = hidden_states\n    if self.normalize_before:\n        hidden_states = self.self_attn_layer_norm(hidden_states)\n    hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    if not self.normalize_before:\n        hidden_states = self.self_attn_layer_norm(hidden_states)\n    if self.normalize_before:\n        hidden_states = self.final_layer_norm(hidden_states)\n    residual = hidden_states\n    hidden_states = self.activation_fn(self.fc1(hidden_states))\n    hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n    hidden_states = self.fc2(hidden_states)\n    hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n    hidden_states = residual + hidden_states\n    if not self.normalize_before:\n        hidden_states = self.final_layer_norm(hidden_states)\n    if self.training:\n        if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():\n            clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n            hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attn_weights,)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`): attention mask of size\n`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative\nvalues.\nposition_embeddings (`torch.FloatTensor`, *optional*):\nObject queries (also called content embeddings), to be added to the hidden states.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def download_software_version(version=None, synch=False):\n    \n    if not version:\n        raise CommandExecutionError(\"Version option must not be none.\")\n\n    if not isinstance(synch, bool):\n        raise CommandExecutionError(\"Synch option must be boolean..\")\n\n    if synch is True:\n        query = {'type': 'op',\n                 'cmd': '<request><system><software><download>'\n                        '<version>{0}</version></download></software></system></request>'.format(version)}\n    else:\n        query = {'type': 'op',\n                 'cmd': '<request><system><software><download><sync-to-peer>yes</sync-to-peer>'\n                        '<version>{0}</version></download></software></system></request>'.format(version)}\n\n    return _get_job_results(query)", "docstring": "Download software packages by version number.\n\nArgs:\nversion(str): The version of the PANOS file to download.\n\nsynch (bool): If true then the file will synch to the peer unit.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' panos.download_software_version 8.0.0\nsalt '*' panos.download_software_version 8.0.0 True", "source": "juraj-google-style"}
{"code": "def __init__(self, n_clusters: int, batch_size: int, is_batched: bool=False):\n    super().__init__()\n    self.n_clusters = n_clusters\n    self.batch_size = batch_size\n    self.is_batched = is_batched", "docstring": "Preprocessing for Clustering Transformation\nThe clustering transform expects batches for performance reasons,\ntherefore this batches the data and converts it to numpy arrays,\nwhich are accepted by sklearn. This transform also adds the same key\nto all batches, such that only 1 state is created and updated during\nclustering updates.\n\nExample Usage::\n\npcoll | ClusteringPreprocessing(\nn_clusters=8,\nbatch_size=1024,\nis_batched=False)\n\nArgs:\nn_clusters: number of clusters used by the algorithm\nbatch_size: size of the data batches\nis_batched: boolean value that marks if the collection is already\nbatched and thus doesn't need to be batched by this transform", "source": "github-repos"}
{"code": "def read_stream(self, file: IO, data_stream: DataStream) -> Reply:\n    (yield from data_stream.read_file(file=file))\n    reply = (yield from self._control_stream.read_reply())\n    self.raise_if_not_match('End stream', ReplyCodes.closing_data_connection, reply)\n    data_stream.close()\n    return reply", "docstring": "Read from the data stream.\n\nArgs:\nfile: A destination file object or a stream writer.\ndata_stream: The stream of which to read from.\n\nCoroutine.\n\nReturns:\nReply: The final reply.", "source": "codesearchnet"}
{"code": "def _build_zmat(self, construction_table):\n        \n        c_table = construction_table\n        default_cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral']\n        optional_cols = list(set(self.columns) - {'atom', 'x', 'y', 'z'})\n\n        zmat_frame = pd.DataFrame(columns=default_cols + optional_cols,\n                                  dtype='float', index=c_table.index)\n\n        zmat_frame.loc[:, optional_cols] = self.loc[c_table.index,\n                                                    optional_cols]\n\n        zmat_frame.loc[:, 'atom'] = self.loc[c_table.index, 'atom']\n        zmat_frame.loc[:, ['b', 'a', 'd']] = c_table\n\n        zmat_values = self._calculate_zmat_values(c_table)\n        zmat_frame.loc[:, ['bond', 'angle', 'dihedral']] = zmat_values\n\n        zmatrix = Zmat(zmat_frame, metadata=self.metadata,\n                       _metadata={'last_valid_cartesian': self.copy()})\n        return zmatrix", "docstring": "Create the Zmatrix from a construction table.\n\nArgs:\nConstruction table (pd.DataFrame):\n\nReturns:\nZmat: A new instance of :class:`Zmat`.", "source": "juraj-google-style"}
{"code": "def write(self, data):\n        \n\n        self._process.poll()\n        if self._process.returncode is not None:\n            raise EOFError('Process ended')\n        self._process.stdin.write(data)", "docstring": "Write *n* bytes to the subprocess' input channel.\n\nArgs:\ndata(bytes): The data to write.\n\nRaises:\nEOFError: If the process exited.", "source": "juraj-google-style"}
{"code": "def extract_possible_actions(self, state_arr):\n        \n        agent_x, agent_y = np.where(state_arr[-1] == 1)\n        agent_x, agent_y = agent_x[0], agent_y[0]\n\n        possible_action_arr = None\n        for x, y in [\n            (-1, 0), (1, 0), (0, -1), (0, 1), (0, 0)\n        ]:\n            next_x = agent_x + x\n            if next_x < 0 or next_x >= state_arr[-1].shape[1]:\n                continue\n            next_y = agent_y + y\n            if next_y < 0 or next_y >= state_arr[-1].shape[0]:\n                continue\n\n            wall_flag = False\n            if x > 0:\n                for add_x in range(1, x):\n                    if self.__map_arr[agent_x + add_x, next_y] == self.WALL:\n                        wall_flag = True\n            elif x < 0:\n                for add_x in range(x, 0):\n                    if self.__map_arr[agent_x + add_x, next_y] == self.WALL:\n                        wall_flag = True\n                    \n            if wall_flag is True:\n                continue\n\n            if y > 0:\n                for add_y in range(1, y):\n                    if self.__map_arr[next_x, agent_y + add_y] == self.WALL:\n                        wall_flag = True\n            elif y < 0:\n                for add_y in range(y, 0):\n                    if self.__map_arr[next_x, agent_y + add_y] == self.WALL:\n                        wall_flag = True\n\n            if wall_flag is True:\n                continue\n\n            if self.__map_arr[next_x, next_y] == self.WALL:\n                continue\n\n            if (next_x, next_y) in self.__route_memory_list:\n                continue\n\n            next_action_arr = np.zeros((\n                 3 + self.__enemy_num,\n                 state_arr[-1].shape[0],\n                 state_arr[-1].shape[1]\n            ))\n            next_action_arr[0][agent_x, agent_y] = 1\n            next_action_arr[1] = self.__map_arr\n            next_action_arr[-1][next_x, next_y] = 1\n\n            for e in range(self.__enemy_num):\n                enemy_state_arr = np.zeros(state_arr[0].shape)\n                enemy_state_arr[self.__enemy_pos_list[e][0], self.__enemy_pos_list[e][1]] = 1\n                next_action_arr[2 + e] = enemy_state_arr\n\n            next_action_arr = np.expand_dims(next_action_arr, axis=0)\n            if possible_action_arr is None:\n                possible_action_arr = next_action_arr\n            else:\n                possible_action_arr = np.r_[possible_action_arr, next_action_arr]\n\n        if possible_action_arr is not None:\n            while possible_action_arr.shape[0] < self.__batch_size:\n                key = np.random.randint(low=0, high=possible_action_arr.shape[0])\n                possible_action_arr = np.r_[\n                    possible_action_arr,\n                    np.expand_dims(possible_action_arr[key], axis=0)\n                ]\n        else:\n            \n            self.__route_memory_list = self.__route_memory_list[1:]\n            possible_action_arr = self.extract_possible_actions(state_arr)\n\n        return possible_action_arr", "docstring": "Extract possible actions.\n\nArgs:\nstate_arr:  `np.ndarray` of state.\n\nReturns:\n`np.ndarray` of actions.\nThe shape is:(\n`batch size corresponded to each action key`,\n`channel that is 1`,\n`feature points1`,\n`feature points2`\n)", "source": "juraj-google-style"}
{"code": "def by_geopoint(self, lat, long):\n        \n\n        header, content = self._http_request(self.BASE_URL, lat=lat, long=long)\t\t\n        return json.loads(content)", "docstring": "Perform a Yelp Neighborhood API Search based on a geopoint.\n\nArgs:\nlat      - geopoint latitude\nlong     - geopoint longitude", "source": "juraj-google-style"}
{"code": "def unhide_tool(self, context_name, tool_name):\n        \n        data = self._context(context_name)\n        hidden_tools = data[\"hidden_tools\"]\n        if tool_name in hidden_tools:\n            hidden_tools.remove(tool_name)\n            self._flush_tools()", "docstring": "Unhide a tool so that it may be exposed in a suite.\n\nNote that unhiding a tool doesn't guarantee it can be seen - a tool of\nthe same name from a different context may be overriding it.\n\nArgs:\ncontext_name (str): Context containing the tool.\ntool_name (str): Name of tool to unhide.", "source": "juraj-google-style"}
{"code": "def attach_socket(self, **kwargs):\n        \n        return self.client.api.attach_socket(self.id, **kwargs)", "docstring": "Like :py:meth:`attach`, but returns the underlying socket-like object\nfor the HTTP request.\n\nArgs:\nparams (dict): Dictionary of request parameters (e.g. ``stdout``,\n``stderr``, ``stream``).\nws (bool): Use websockets instead of raw HTTP.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def _show_all(saved_model_dir):\n    saved_model = saved_model_utils.read_saved_model(saved_model_dir)\n    for meta_graph_def in sorted(saved_model.meta_graphs, key=lambda meta_graph_def: list(meta_graph_def.meta_info_def.tags)):\n        tag_set = meta_graph_def.meta_info_def.tags\n        print(\"\\nMetaGraphDef with tag-set: '%s' contains the following SignatureDefs:\" % ', '.join(tag_set))\n        tag_set = ','.join(tag_set)\n        signature_def_map = meta_graph_def.signature_def\n        for signature_def_key in sorted(signature_def_map.keys()):\n            print(\"\\nsignature_def['\" + signature_def_key + \"']:\")\n            _show_inputs_outputs_mgd(meta_graph_def, signature_def_key, indent=1)\n        _show_ops_in_metagraph_mgd(meta_graph_def)\n    _show_defined_functions(saved_model_dir, saved_model.meta_graphs)", "docstring": "Prints tag-set, ops, SignatureDef, and Inputs/Outputs of SavedModel.\n\nPrints all tag-set, ops, SignatureDef and Inputs/Outputs information stored in\nSavedModel directory.\n\nArgs:\nsaved_model_dir: Directory containing the SavedModel to inspect.", "source": "github-repos"}
{"code": "def remove_child(self, c: 'AbstractSyntaxTree') -> None:\n    if self._children is None:\n        raise ValueError(f'No children belonging to {self!r}.')\n    self._children.remove(c)\n    c.parent = None", "docstring": "Removes a child from the reciever and sets its parent to `None`.\n\nArgs:\nc: The child to remove. By default, compared using pointer equality.\n\nRaises:\nValueError in the event that the child does not being to the underlying\nlist of children.", "source": "github-repos"}
{"code": "def PublishEvent(cls, event_name, msg, token=None):\n    \n    cls.PublishMultipleEvents({event_name: [msg]}, token=token)", "docstring": "Publish the message into all listeners of the event.\n\nWe send the message to all event handlers which contain this\nstring in their EVENT static member. This allows the event to be\nsent to multiple interested listeners.\n\nArgs:\nevent_name: An event name.\nmsg: The message to send to the event handler.\ntoken: ACL token.\n\nRaises:\nValueError: If the message is invalid. The message must be a Semantic\nValue (instance of RDFValue) or a full GrrMessage.", "source": "juraj-google-style"}
{"code": "def alwaysThrew(self, error_type=None): \n        \n        if self.callCount == 0:\n            return False\n        if not error_type:\n            return True if len(self.exceptions) == self.callCount else False\n        else:\n            return uch.obj_in_list_always(self.exceptions, error_type)", "docstring": "Determining whether the specified exception is the ONLY thrown exception\nArgs:\nerror_type:\nNone: checking without specified exception\nSpecified Exception\nReturn: Boolean", "source": "juraj-google-style"}
{"code": "def load_hat(self, path):  \n        \n        hat = cv2.imread(path, cv2.IMREAD_UNCHANGED)\n        if hat is None:\n            raise ValueError('No hat image found at `{}`'.format(path))\n        b, g, r, a = cv2.split(hat)\n        return cv2.merge((r, g, b, a))", "docstring": "Loads the hat from a picture at path.\n\nArgs:\npath: The path to load from\n\nReturns:\nThe hat data.", "source": "juraj-google-style"}
{"code": "def cifar_generator(cifar_version, tmp_dir, training, how_many, start_from=0):\n    if (cifar_version == 'cifar10'):\n        url = _CIFAR10_URL\n        train_files = _CIFAR10_TRAIN_FILES\n        test_files = _CIFAR10_TEST_FILES\n        prefix = _CIFAR10_PREFIX\n        image_size = _CIFAR10_IMAGE_SIZE\n        label_key = 'labels'\n    elif ((cifar_version == 'cifar100') or (cifar_version == 'cifar20')):\n        url = _CIFAR100_URL\n        train_files = _CIFAR100_TRAIN_FILES\n        test_files = _CIFAR100_TEST_FILES\n        prefix = _CIFAR100_PREFIX\n        image_size = _CIFAR100_IMAGE_SIZE\n        if (cifar_version == 'cifar100'):\n            label_key = 'fine_labels'\n        else:\n            label_key = 'coarse_labels'\n    _get_cifar(tmp_dir, url)\n    data_files = (train_files if training else test_files)\n    (all_images, all_labels) = ([], [])\n    for filename in data_files:\n        path = os.path.join(tmp_dir, prefix, filename)\n        with tf.gfile.Open(path, 'rb') as f:\n            if six.PY2:\n                data = cPickle.load(f)\n            else:\n                data = cPickle.load(f, encoding='latin1')\n        images = data['data']\n        num_images = images.shape[0]\n        images = images.reshape((num_images, 3, image_size, image_size))\n        all_images.extend([np.squeeze(images[j]).transpose((1, 2, 0)) for j in range(num_images)])\n        labels = data[label_key]\n        all_labels.extend([labels[j] for j in range(num_images)])\n    return image_utils.image_generator(all_images[start_from:(start_from + how_many)], all_labels[start_from:(start_from + how_many)])", "docstring": "Image generator for CIFAR-10 and 100.\n\nArgs:\ncifar_version: string; one of \"cifar10\" or \"cifar100\"\ntmp_dir: path to temporary storage directory.\ntraining: a Boolean; if true, we use the train set, otherwise the test set.\nhow_many: how many images and labels to generate.\nstart_from: from which image to start.\n\nReturns:\nAn instance of image_generator that produces CIFAR-10 images and labels.", "source": "codesearchnet"}
{"code": "def __init__(self, max_iterations, unroll_loop=False):\n        \n        assert max_iterations >= 0\n        self.max_iterations = max_iterations\n\n        assert isinstance(unroll_loop, bool)\n        self.unroll_loop = unroll_loop\n\n        super(Iterative, self).__init__()\n\n        \n        self.initialize = tf.make_template(name_='initialize', func_=self.tf_initialize)\n        self.step = tf.make_template(name_='step', func_=self.tf_step)\n        self.next_step = tf.make_template(name_='next-step', func_=self.tf_next_step)", "docstring": "Creates a new iterative solver instance.\n\nArgs:\nmax_iterations: Maximum number of iterations before termination.\nunroll_loop: Unrolls the TensorFlow while loop if true.", "source": "juraj-google-style"}
{"code": "def email_address(self, address, owner=None, **kwargs):\n    return EmailAddress(self.tcex, address, owner=owner, **kwargs)", "docstring": "Create the Email Address TI object.\n\nArgs:\nowner:\naddress:\n**kwargs:\n\nReturn:", "source": "codesearchnet"}
{"code": "def html_for_modules_method(method_name, *args, **kwargs):\n    \n    method = getattr(modules, method_name)\n    value = method(*args, **kwargs)\n    return KEY_VALUE_TEMPLATE.format(method_name, value)", "docstring": "Returns an HTML snippet for a Modules API method.\n\nArgs:\nmethod_name: A string containing a Modules API method.\nargs: Positional arguments to be passed to the method.\nkwargs: Keyword arguments to be passed to the method.\n\nReturns:\nString HTML representing the Modules API method and value.", "source": "juraj-google-style"}
{"code": "def _begin_operation_action(self, action):\n        \n\n        conn_key = action.data['id']\n        callback = action.data['callback']\n\n        if self._get_connection_state(conn_key) != self.Idle:\n            callback(conn_key, self.id, False, 'Cannot start operation, connection is not idle')\n            return\n\n        data = self._get_connection(conn_key)\n        data['state'] = self.InProgress\n        data['microstate'] = action.data['operation_name']\n        data['action'] = action", "docstring": "Begin an attempted operation.\n\nArgs:\naction (ConnectionAction): the action object describing what we are\noperating on", "source": "juraj-google-style"}
{"code": "def _add_arg_java(self, key, value, mask=False):\n    if isinstance(value, bool):\n        value = int(value)\n    self._data[key] = value\n    self._args.append('{}{}={}'.format('-D', key, value))\n    self._args_quoted.append(self.quote('{}{}={}'.format('-D', key, value)))\n    if mask:\n        value = ('x' * len(str(value)))\n    self._args_masked.append('{}{}={}'.format('-D', key, value))", "docstring": "Add CLI Arg formatted specifically for Java.\n\nArgs:\nkey (string): The CLI Args key (e.g., --name).\nvalue (string): The CLI Args value (e.g., bob).\nmask (boolean, default:False): Indicates whether no mask value.", "source": "codesearchnet"}
{"code": "def read_label_list(path):\n    \n    ll = annotations.LabelList()\n\n    for record in read_label_file(path):\n        ll.add(annotations.Label(record[2], start=record[0], end=record[1]))\n\n    return ll", "docstring": "Reads labels from an Audacity label file\nand returns them wrapped in a :py:class:`audiomate.annotations.LabelList`.\n\nArgs:\npath (str): Path to the Audacity label file\n\nReturns:\naudiomate.annotations.LabelList: Label list containing the labels", "source": "juraj-google-style"}
{"code": "def __init__(self, project_name, instance_name, table_name):\n        \n        self.btspec = BigtableSpec(project_name, instance_name, table_name)\n        self.bt_table = bigtable.Client(\n            self.btspec.project, admin=True).instance(\n                self.btspec.instance).table(self.btspec.table)\n        self.tf_table = tf.contrib.cloud.BigtableClient(\n            self.btspec.project,\n            self.btspec.instance).table(self.btspec.table)", "docstring": "Constructor.\n\nArgs:\nproject_name:  string name of GCP project having table.\ninstance_name:  string name of CBT instance in project.\ntable_name:  string name of CBT table in instance.", "source": "juraj-google-style"}
{"code": "def tell(self):\n    self._checkClosed()\n    return self._position", "docstring": "Tell the stream's current offset.\n\nReturns:\ncurrent offset in reading this stream.\n\nRaises:\n``ValueError``: When this stream is closed.", "source": "github-repos"}
{"code": "def get_intersection(self, range_):\n    result = []\n    for entry in self.entries:\n        (package, value) = entry\n        if (value is None):\n            continue\n        if (package.version not in range_):\n            continue\n        if isinstance(value, list):\n            variants = value\n            entry_ = _PackageEntry(package, variants, self.solver)\n            result.append(entry_)\n            continue\n        if self.solver.package_filter:\n            rule = self.solver.package_filter.excludes(package)\n            if rule:\n                if config.debug_package_exclusions:\n                    print_debug((\"Package '%s' was excluded by rule '%s'\" % (package.qualified_name, str(rule))))\n                entry[1] = None\n                continue\n        if self.solver.package_load_callback:\n            self.solver.package_load_callback(package)\n        variants_ = []\n        for var in package.iter_variants():\n            variant = PackageVariant(var, self.solver.building)\n            variants_.append(variant)\n        entry[1] = variants_\n        entry_ = _PackageEntry(package, variants_, self.solver)\n        result.append(entry_)\n    return (result or None)", "docstring": "Get a list of variants that intersect with the given range.\n\nArgs:\nrange_ (`VersionRange`): Package version range.\n\nReturns:\nList of `_PackageEntry` objects.", "source": "codesearchnet"}
{"code": "def register_event(self, name, callback, validator):\n\n    async def _validate_and_call(message):\n        payload = message.get('payload')\n        try:\n            payload = validator.verify(payload)\n        except ValidationError:\n            self._logger.warning('Dropping invalid payload for event %s, payload=%s', name, payload)\n            return\n        try:\n            result = callback(payload)\n            if inspect.isawaitable(result):\n                (await result)\n        except:\n            self._logger.error('Error calling callback for event %s, payload=%s', name, payload, exc_info=True)\n    self._manager.every_match(_validate_and_call, type='event', name=name)", "docstring": "Register a callback to receive events.\n\nEvery event with the matching name will have its payload validated\nusing validator and then will be passed to callback if validation\nsucceeds.\n\nCallback must be a normal callback function, coroutines are not\nallowed.  If you need to run a coroutine you are free to schedule it\nfrom your callback.\n\nArgs:\nname (str): The name of the event that we are listening\nfor\ncallback (callable): The function that should be called\nwhen a message that matches validator is received.\nvalidator (Verifier): A schema verifier that will\nvalidate a received message uniquely", "source": "codesearchnet"}
{"code": "def _peer_get_bfd(self, tx, rx, multiplier):\n        \n        tx = self._callback(tx, handler='get_config')\n        rx = self._callback(rx, handler='get_config')\n        multiplier = self._callback(multiplier, handler='get_config')\n        tx = pynos.utilities.return_xml(str(tx))\n        rx = pynos.utilities.return_xml(str(rx))\n        multiplier = pynos.utilities.return_xml(str(multiplier))\n        config = pynos.utilities.merge_xml(tx, rx)\n        return pynos.utilities.merge_xml(config, multiplier)", "docstring": "Get and merge the `bfd` config from global BGP.\n\nYou should not use this method.\nYou probably want `BGP.bfd`.\n\nArgs:\ntx: XML document with the XML to get the transmit interval.\nrx: XML document with the XML to get the receive interval.\nmultiplier: XML document with the XML to get the interval\nmultiplier.\n\nReturns:\nMerged XML document.\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def load_institute(adapter, internal_id, display_name, sanger_recipients=None):\n    institute_obj = build_institute(internal_id=internal_id, display_name=display_name, sanger_recipients=sanger_recipients)\n    log.info('Loading institute {0} with display name {1} into database'.format(internal_id, display_name))\n    adapter.add_institute(institute_obj)", "docstring": "Load a institute into the database\n\nArgs:\nadapter(MongoAdapter)\ninternal_id(str)\ndisplay_name(str)\nsanger_recipients(list(email))", "source": "codesearchnet"}
{"code": "def wait_for_stateful_block_init(context, mri, timeout=DEFAULT_TIMEOUT):\n    context.when_matches([mri, 'state', 'value'], StatefulStates.READY, bad_values=[StatefulStates.FAULT, StatefulStates.DISABLED], timeout=timeout)", "docstring": "Wait until a Block backed by a StatefulController has initialized\n\nArgs:\ncontext (Context): The context to use to make the child block\nmri (str): The mri of the child block\ntimeout (float): The maximum time to wait", "source": "codesearchnet"}
{"code": "def add_attribute_label(self, attribute_id, label):\n    if (not self.can_update()):\n        self._tcex.handle_error(910, [self.type])\n    return self.tc_requests.add_attribute_label(self.api_type, self.api_sub_type, self.unique_id, attribute_id, label, owner=self.owner)", "docstring": "Adds a security labels to a attribute\n\nArgs:\nattribute_id:\nlabel:\n\nReturns: A response json", "source": "codesearchnet"}
{"code": "def _map_args(self, node: 'cfg.CFGNode', args: function.Args) -> 'dict[str, cfg.Variable]':\n    posargs = [u.AssignToNewVariable(node) for u in args.posargs]\n    kws = {k: u.AssignToNewVariable(node) for k, u in args.namedargs.items()}\n    sig = self.signature\n    callargs = {name: self.ctx.program.NewVariable(default.data, [], node) for name, default in sig.defaults.items()}\n    positional = dict(zip(sig.param_names, posargs))\n    posonly_names = set(sig.posonly_params)\n    for key in set(positional) - posonly_names:\n        if key in kws:\n            raise error_types.DuplicateKeyword(sig, args, self.ctx, key)\n    kwnames = set(kws)\n    extra_kws = kwnames.difference(sig.param_names + sig.kwonly_params)\n    if extra_kws and (not sig.kwargs_name):\n        if function.has_visible_namedarg(node, args, extra_kws):\n            raise error_types.WrongKeywordArgs(sig, args, self.ctx, extra_kws)\n    posonly_kws = kwnames & posonly_names\n    if posonly_kws and (not sig.kwargs_name):\n        raise error_types.WrongKeywordArgs(sig, args, self.ctx, posonly_kws)\n    callargs.update(positional)\n    callargs.update(kws)\n    for key, kwonly in itertools.chain(self.get_nondefault_params(), ((key, True) for key in sig.kwonly_params)):\n        if key not in callargs:\n            if args.starstarargs or (args.starargs and (not kwonly)):\n                callargs[key] = self.ctx.new_unsolvable(node)\n            else:\n                raise error_types.MissingParameter(sig, args, self.ctx, key)\n    if sig.varargs_name:\n        varargs_name = sig.varargs_name\n        extraneous = posargs[self.argcount(node):]\n        if args.starargs:\n            if extraneous:\n                log.warning('Not adding extra params to *%s', varargs_name)\n            callargs[varargs_name] = args.starargs.AssignToNewVariable(node)\n        else:\n            callargs[varargs_name] = self.ctx.convert.build_tuple(node, extraneous)\n    elif len(posargs) > self.argcount(node):\n        raise error_types.WrongArgCount(sig, args, self.ctx)\n    if sig.kwargs_name:\n        kwargs_name = sig.kwargs_name\n        if args.starstarargs:\n            callargs[kwargs_name] = args.starstarargs.AssignToNewVariable(node)\n        else:\n            omit = sig.param_names + sig.kwonly_params\n            k = _instances.Dict(self.ctx)\n            k.update(node, args.namedargs, omit=omit)\n            callargs[kwargs_name] = k.to_variable(node)\n    return callargs", "docstring": "Map call args to function args.\n\nThis emulates how Python would map arguments of function calls. It takes\ncare of keyword parameters, default parameters, and *args and **kwargs.\n\nArgs:\nnode: The current CFG node.\nargs: The arguments.\n\nReturns:\nA dictionary, mapping strings (parameter names) to cfg.Variable.\n\nRaises:\nfunction.FailedFunctionCall: If the caller supplied incorrect arguments.", "source": "github-repos"}
{"code": "def urlEncodeAndJoin(self, seq, sepr=','):\n        \n        try:\n            from urllib.parse import quote_plus as encode\n            return sepr.join([encode(x, encoding=CHARSET_UTF8) for x in seq])\n        except ImportError:\n            from urllib import quote as encode\n            return sepr.join([i for i in map(lambda x: encode(x), seq)])", "docstring": "sepr.join(urlencode(seq))\nArgs:\nseq: string list to be urlencoded\nsepr: join seq with sepr\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def patchify(self, pixel_values, interpolate_pos_encoding: bool=False):\n    patch_size, num_channels = (self.config.patch_size, self.config.num_channels)\n    if shape_list(pixel_values)[1] == num_channels:\n        pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))\n    if not interpolate_pos_encoding:\n        tf.debugging.assert_equal(shape_list(pixel_values)[1], shape_list(pixel_values)[2], message='Make sure the pixel values have a squared size')\n    tf.debugging.assert_equal(shape_list(pixel_values)[1] % patch_size, 0, message='Make sure the pixel values have a size that is divisible by the patch size')\n    tf.debugging.assert_equal(shape_list(pixel_values)[3], num_channels, message='Make sure the number of channels of the pixel values is equal to the one set in the configuration')\n    batch_size = shape_list(pixel_values)[0]\n    num_patches_h = shape_list(pixel_values)[1] \n    num_patches_w = shape_list(pixel_values)[2] \n    patchified_pixel_values = tf.reshape(pixel_values, (batch_size, num_patches_h, patch_size, num_patches_w, patch_size, num_channels))\n    patchified_pixel_values = tf.einsum('nhpwqc->nhwpqc', patchified_pixel_values)\n    patchified_pixel_values = tf.reshape(patchified_pixel_values, (batch_size, num_patches_h * num_patches_w, patch_size ** 2 * num_channels))\n    return patchified_pixel_values", "docstring": "Args:\npixel_values (`tf.Tensor` of shape `(batch_size, height, width, num_channels)` or `(batch_size, num_channels, height, width)`):\nPixel values.\ninterpolate_pos_encoding (`bool`, default `False`):\ninterpolation flag passed during the forward pass.\n\nReturns:\n`tf.Tensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`:\nPatchified pixel values.", "source": "github-repos"}
{"code": "def lstat(self, entry_path, dir_fd=None):\n        \n        \n        entry_path = self._path_with_dir_fd(entry_path, self.lstat, dir_fd)\n        return self.filesystem.stat(entry_path, follow_symlinks=False)", "docstring": "Return the os.stat-like tuple for entry_path, not following symlinks.\n\nArgs:\nentry_path:  path to filesystem object to retrieve.\ndir_fd: If not `None`, the file descriptor of a directory, with\n`entry_path` being relative to this directory.\nNew in Python 3.3.\n\nReturns:\nthe FakeStatResult object corresponding to `entry_path`.\n\nRaises:\nOSError: if the filesystem object doesn't exist.", "source": "juraj-google-style"}
{"code": "def __init__(self, prefix):\n        \n        self.bed = PyPlink(prefix)\n        self.bim = self.bed.get_bim()\n        self.fam = self.bed.get_fam()\n\n        \n        self.bim[\"multiallelic\"] = False\n        self.bim.loc[\n            self.bim.duplicated([\"chrom\", \"pos\"], keep=False),\n            \"multiallelic\"\n        ] = True\n\n        \n        try:\n            self.fam = self.fam.set_index(\"iid\", verify_integrity=True)\n        except ValueError:\n            logging.info(\n                \"Setting the index as 'fid_iid' because the individual IDs \"\n                \"are not unique.\"\n            )\n\n            self.fam[\"fid_iid\"] = [\n                \"{fid}_{iid}\".format(fid=fid, iid=iid)\n                for fid, iid in zip(self.fam.fid, self.fam.iid)\n            ]\n            self.fam = self.fam.set_index(\"fid_iid\", verify_integrity=True)", "docstring": "Binary plink file reader.\nArgs:\nprefix (str): the prefix of the Plink binary files.", "source": "juraj-google-style"}
{"code": "def assert_global_step(global_step_tensor):\n    if not (isinstance(global_step_tensor, variables.Variable) or isinstance(global_step_tensor, tensor.Tensor) or resource_variable_ops.is_resource_variable(global_step_tensor)):\n        raise TypeError('Existing \"global_step\" must be a Variable or Tensor: %s.' % global_step_tensor)\n    if not global_step_tensor.dtype.base_dtype.is_integer:\n        raise TypeError('Existing \"global_step\" does not have integer type: %s' % global_step_tensor.dtype)\n    if global_step_tensor.get_shape().ndims != 0 and global_step_tensor.get_shape().is_fully_defined():\n        raise TypeError('Existing \"global_step\" is not scalar: %s' % global_step_tensor.get_shape())", "docstring": "Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`.\n\nArgs:\nglobal_step_tensor: `Tensor` to test.", "source": "github-repos"}
{"code": "def set_state(self, vid, value=None, default=False, disable=False):\n    cmds = self.command_builder('state', value=value, default=default, disable=disable)\n    return self.configure_vlan(vid, cmds)", "docstring": "Configures the VLAN state\n\nEosVersion:\n4.13.7M\n\nArgs:\nvid (str): The VLAN ID to configure\nvalue (str): The value to set the vlan state to\ndefault (bool): Configures the vlan state to its default value\ndisable (bool): Negates the vlan state\n\nReturns:\nTrue if the operation was successful otherwise False", "source": "codesearchnet"}
{"code": "def tar_add_bytes(tf, filename, bytestring):\n    \n    if not isinstance(bytestring, bytes):  \n        bytestring = bytestring.encode('ascii')\n    buff = io.BytesIO(bytestring)\n    tarinfo = tarfile.TarInfo(filename)\n    tarinfo.size = len(bytestring)\n    tf.addfile(tarinfo, buff)", "docstring": "Add a file to a tar archive\n\nArgs:\ntf (tarfile.TarFile): tarfile to add the file to\nfilename (str): path within the tar file\nbytestring (bytes or str): file contents. Must be :class:`bytes` or\nascii-encodable :class:`str`", "source": "juraj-google-style"}
{"code": "def timeRange(\n        start: datetime.time, end: datetime.time,\n        step: float) -> Iterator[datetime.datetime]:\n    \n    assert step > 0\n    start = _fillDate(start)\n    end = _fillDate(end)\n    delta = datetime.timedelta(seconds=step)\n    t = start\n    while t < datetime.datetime.now():\n        t += delta\n    while t <= end:\n        waitUntil(t)\n        yield t\n        t += delta", "docstring": "Iterator that waits periodically until certain time points are\nreached while yielding those time points.\n\nArgs:\nstart: Start time, can be specified as datetime.datetime,\nor as datetime.time in which case today is used as the date\nend: End time, can be specified as datetime.datetime,\nor as datetime.time in which case today is used as the date\nstep (float): The number of seconds of each period", "source": "juraj-google-style"}
{"code": "def __call__(self, *args):\n        \n        if len(self.formatters) == 0:\n            self.setup(*args)\n\n        row_cells = []\n\n        if self.rownum:\n            row_cells.append(0)\n        if self.timestamp:\n            row_cells.append(datetime.datetime.now())\n        if self.time_diff:\n            row_cells.append(0)\n\n        row_cells.extend(args)\n\n        if len(row_cells) != len(self.formatters):\n            raise ValueError('Expected number of columns is {}. Got {}.'.format(\n                len(self.formatters), len(row_cells)))\n\n        line = self.format_row(*row_cells)\n        self.print_line(line)", "docstring": "Prints a formatted row\n\nArgs:\nargs: row cells", "source": "juraj-google-style"}
{"code": "def create_query(self, fields=None):\n        \n        if fields is None:\n            return Query(self.fields)\n\n        non_contained_fields = set(fields) - set(self.fields)\n        if non_contained_fields:\n            raise BaseLunrException(\n                \"Fields {} are not part of the index\", non_contained_fields\n            )\n\n        return Query(fields)", "docstring": "Convenience method to create a Query with the Index's fields.\n\nArgs:\nfields (iterable, optional): The fields to include in the Query,\ndefaults to the Index's `all_fields`.\n\nReturns:\nQuery: With the specified fields or all the fields in the Index.", "source": "juraj-google-style"}
{"code": "def make_supercells_with_defects(self, scaling_matrix):\n        \n        scs = []\n        sc = self._structure.copy()\n        sc.make_supercell(scaling_matrix)\n        scs.append(sc)\n        for ids, defect_site in enumerate(self._defect_sites):\n            sc_with_inter = sc.copy()\n            sc_with_inter.append(\n                defect_site.species_string,\n                defect_site.frac_coords,\n                coords_are_cartesian=False,\n                validate_proximity=False,\n                properties=None)\n            if not sc_with_inter:\n                raise RuntimeError(\n                    \"could not generate supercell with\" \" interstitial {}\".format(\n                        ids + 1))\n            scs.append(sc_with_inter.copy())\n        return scs", "docstring": "Generate a sequence of supercells\nin which each supercell contains a single interstitial,\nexcept for the first supercell in the sequence\nwhich is a copy of the defect-free input structure.\n\nArgs:\nscaling_matrix (3x3 integer array): scaling matrix\nto transform the lattice vectors.\nReturns:\nscs ([Structure]): sequence of supercells.", "source": "juraj-google-style"}
{"code": "def CopyToDict(self):\n    result_dict = {'labels': self.labels}\n    if self.comment:\n        result_dict['comment'] = self.comment\n    return result_dict", "docstring": "Copies the event tag to a dictionary.\n\nReturns:\ndict[str, object]: event tag attributes.", "source": "codesearchnet"}
{"code": "def convert_timedelta(duration):\n    \n    days, seconds = duration.days, duration.seconds\n    hours = seconds \n    minutes = (seconds % 3600) \n    seconds = (seconds % 60)\n    return days, hours, minutes, seconds", "docstring": "Summary:\nConvert duration into component time units\nArgs:\n:duration (datetime.timedelta): time duration to convert\nReturns:\ndays, hours, minutes, seconds | TYPE: tuple (integers)", "source": "juraj-google-style"}
{"code": "def _original_path(self, path):\n\n    def components_to_path():\n        if (len(path_components) > len(normalized_components)):\n            normalized_components.extend(path_components[len(normalized_components):])\n        sep = self._path_separator(path)\n        normalized_path = sep.join(normalized_components)\n        if (path.startswith(sep) and (not normalized_path.startswith(sep))):\n            normalized_path = (sep + normalized_path)\n        return normalized_path\n    if (self.is_case_sensitive or (not path)):\n        return path\n    path_components = self._path_components(path)\n    normalized_components = []\n    current_dir = self.root\n    for component in path_components:\n        if (not isinstance(current_dir, FakeDirectory)):\n            return components_to_path()\n        (dir_name, current_dir) = self._directory_content(current_dir, component)\n        if ((current_dir is None) or (isinstance(current_dir, FakeDirectory) and (current_dir._byte_contents is None) and (current_dir.st_size == 0))):\n            return components_to_path()\n        normalized_components.append(dir_name)\n    return components_to_path()", "docstring": "Return a normalized case version of the given path for\ncase-insensitive file systems. For case-sensitive file systems,\nreturn path unchanged.\n\nArgs:\npath: the file path to be transformed\n\nReturns:\nA version of path matching the case of existing path elements.", "source": "codesearchnet"}
{"code": "def generate_json_schema(cls, schema, context=DEFAULT_DICT):\n        \n        schema = cls._get_schema(schema)\n\n        \n        return cls(context=context).dump(schema).data", "docstring": "Generate a JSON Schema from a Marshmallow schema.\n\nArgs:\nschema (marshmallow.Schema|str): The Marshmallow schema, or the\nPython path to one, to create the JSON schema for.\n\nKeyword Args:\nfile_pointer (file, optional): The path or pointer to the file\nto write this schema to. If not provided, the schema will be\ndumped to ``sys.stdout``.\n\nReturns:\ndict: The JSON schema in dictionary form.", "source": "juraj-google-style"}
{"code": "def plot(self, **plot_kwargs: Any) -> None:\n        \n        fig = plt.figure()\n        plt.plot(self._rabi_angles, self._excited_state_probs, 'ro-',\n                 figure=fig, **plot_kwargs)\n        plt.xlabel(r\"Rabi Angle (Radian)\", figure=fig)\n        plt.ylabel('Excited State Probability', figure=fig)\n        fig.show()", "docstring": "Plots excited state probability vs the Rabi angle (angle of rotation\naround the x-axis).\n\nArgs:\n**plot_kwargs: Arguments to be passed to matplotlib.pyplot.plot.", "source": "juraj-google-style"}
{"code": "def Print(self, output_writer):\n    \n    if self._date_time_ranges:\n      for date_time_range in self._date_time_ranges:\n        if date_time_range.start_date_time is None:\n          end_time_string = date_time_range.end_date_time.CopyToDateTimeString()\n          output_writer.Write('\\t{0:s} after {1:s}\\n'.format(\n              date_time_range.time_value, end_time_string))\n\n        elif date_time_range.end_date_time is None:\n          start_time_string = (\n              date_time_range.start_date_time.CopyToDateTimeString())\n          output_writer.Write('\\t{0:s} before {1:s}\\n'.format(\n              date_time_range.time_value, start_time_string))\n\n        else:\n          start_time_string = (\n              date_time_range.start_date_time.CopyToDateTimeString())\n          end_time_string = date_time_range.end_date_time.CopyToDateTimeString()\n          output_writer.Write('\\t{0:s} between {1:s} and {2:s}\\n'.format(\n              date_time_range.time_value, start_time_string,\n              end_time_string))", "docstring": "Prints a human readable version of the filter.\n\nArgs:\noutput_writer (CLIOutputWriter): output writer.", "source": "juraj-google-style"}
{"code": "def ones(shape, dtype=None, **kwargs):\n    data = np.ones(shape, dtype)\n    return dc.array(data, **kwargs)", "docstring": "Create an array of given shape and type, filled with ones.\n\nArgs:\nshape (sequence of ints): 2D shape of the array.\ndtype (data-type, optional): Desired data-type for the array.\nkwargs (optional): Other arguments of the array (*coords, attrs, and name).\n\nReturns:\narray (decode.array): Decode array filled with ones.", "source": "codesearchnet"}
{"code": "async def gather(self, *cmds: str) -> Tuple[int]:\n    subprocs = self.spawn(*cmds)\n    subproc_wait_coros = [subproc.wait_done() for subproc in subprocs]\n    return (await asyncio.gather(*subproc_wait_coros))", "docstring": "Coroutine to spawn subprocesses and block until completion.\n\nNote:\nThe same `max_concurrency` restriction that applies to `spawn`\nalso applies here.\n\nReturns:\nThe exit codes of the spawned subprocesses, in the order they were\npassed.", "source": "codesearchnet"}
{"code": "def compile_protofile(proto_file_path):\n    \n    out_file = tempfile.mkstemp()[1]\n    try:\n        subprocess.check_output(['protoc', '--include_source_info',\n                                 '--descriptor_set_out', out_file,\n                                 proto_file_path])\n    except subprocess.CalledProcessError as e:\n        sys.exit('protoc returned status {}'.format(e.returncode))\n    return out_file", "docstring": "Compile proto file to descriptor set.\n\nArgs:\nproto_file_path: Path to proto file to compile.\n\nReturns:\nPath to file containing compiled descriptor set.\n\nRaises:\nSystemExit if the compilation fails.", "source": "juraj-google-style"}
{"code": "def _click(x, y, button):\n    \n    if button == 'left':\n        try:\n            _sendMouseEvent(MOUSEEVENTF_LEFTCLICK, x, y)\n        except (PermissionError, OSError): \n            pass\n    elif button == 'middle':\n        try:\n            _sendMouseEvent(MOUSEEVENTF_MIDDLECLICK, x, y)\n        except (PermissionError, OSError): \n            pass\n    elif button == 'right':\n        try:\n            _sendMouseEvent(MOUSEEVENTF_RIGHTCLICK, x, y)\n        except (PermissionError, OSError): \n            pass\n    else:\n        assert False, \"button argument not in ('left', 'middle', 'right')\"", "docstring": "Send the mouse click event to Windows by calling the mouse_event() win32\nfunction.\n\nArgs:\nbutton (str): The mouse button, either 'left', 'middle', or 'right'\nx (int): The x position of the mouse event.\ny (int): The y position of the mouse event.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def get_package_for_module(module):\n    if isinstance(module, six.string_types):\n        try:\n            module = sys.modules[module]\n        except KeyError:\n            return None\n    try:\n        return six.text_type(module.package)\n    except AttributeError:\n        if (module.__name__ == '__main__'):\n            try:\n                file_name = module.__file__\n            except AttributeError:\n                pass\n            else:\n                base_name = os.path.basename(file_name)\n                split_name = os.path.splitext(base_name)\n                if (len(split_name) == 1):\n                    return six.text_type(base_name)\n                return u'.'.join(split_name[:(- 1)])\n        return six.text_type(module.__name__)", "docstring": "Get package name for a module.\n\nHelper calculates the package name of a module.\n\nArgs:\nmodule: Module to get name for.  If module is a string, try to find\nmodule in sys.modules.\n\nReturns:\nIf module contains 'package' attribute, uses that as package name.\nElse, if module is not the '__main__' module, the module __name__.\nElse, the base name of the module file name.  Else None.", "source": "codesearchnet"}
{"code": "def _CheckForOutOfOrderStepAndMaybePurge(self, event):\n    \n    if event.step < self.most_recent_step and event.HasField('summary'):\n      self._Purge(event, by_tags=True)\n    else:\n      self.most_recent_step = event.step\n      self.most_recent_wall_time = event.wall_time", "docstring": "Check for out-of-order event.step and discard expired events for tags.\n\nCheck if the event is out of order relative to the global most recent step.\nIf it is, purge outdated summaries for tags that the event contains.\n\nArgs:\nevent: The event to use as reference. If the event is out-of-order, all\nevents with the same tags, but with a greater event.step will be purged.", "source": "juraj-google-style"}
{"code": "def _allocate_ips_to_nics(self, conf):\n    for (dom_name, dom_spec) in conf.get('domains', {}).items():\n        for (idx, nic) in enumerate(dom_spec.get('nics', [])):\n            if ('ip' in nic):\n                continue\n            net = self._get_net(conf, dom_name, nic)\n            if (net['type'] != 'nat'):\n                continue\n            allocated = net['mapping'].values()\n            vacant = _create_ip(net['gw'], set(range(2, 255)).difference(set([int(ip.split('.')[(- 1)]) for ip in allocated])).pop())\n            nic['ip'] = vacant\n            self._add_nic_to_mapping(net, dom_spec, nic)", "docstring": "For all the nics of all the domains in the conf that have dynamic ip,\nallocate one and addit to the network mapping\n\nArgs:\nconf (dict): Configuration spec to extract the domains from\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def coroutine(func):\n\n    def wrapper(*args, **kwargs):\n        gen = func(*args, **kwargs)\n        val = next(gen)\n        if (val != None):\n            raise TypeError('Unexpected value from start of coroutine')\n        return gen\n    wrapper.__name__ = func.__name__\n    wrapper.__doc__ = func.__doc__\n    return wrapper", "docstring": "Wraps a PEP-342 enhanced generator in a way that avoids boilerplate of the \"priming\" call to ``next``.\n\nArgs:\nfunc (Callable): The function constructing a generator to decorate.\n\nReturns:\nCallable: The decorated generator.", "source": "codesearchnet"}
{"code": "def get_tensor_from_node(node):\n    with ops.init_scope():\n        if getattr(node, 'is_distributed_variable', False):\n            return node\n        elif getattr(node, 'is_distributed_table', False):\n            return node\n        elif getattr(node, 'is_sharded_variable', False):\n            return node\n        elif resource_variable_ops.is_resource_variable(node):\n            return node.handle\n        elif isinstance(node, asset.Asset):\n            return node.asset_path\n        elif tensor_util.is_tf_type(node):\n            return node\n        elif isinstance(node, resource.CapturableResource):\n            return node.resource_handle\n        raise ValueError(f'Cannot convert node {node} to tensor.')", "docstring": "Resolves a saved model graph node into a tensor to be captured.\n\nArgs:\nnode: a tensor, variable, or resource to be resolved into a capturable\ntensor\n\nReturns:\nA list of tensors.\nRaises:\nValueError: if the node cannot be converted into a tensor.", "source": "github-repos"}
{"code": "def dot(poly1, poly2):\n    if ((not isinstance(poly1, Poly)) and (not isinstance(poly2, Poly))):\n        return numpy.dot(poly1, poly2)\n    poly1 = Poly(poly1)\n    poly2 = Poly(poly2)\n    poly = (poly1 * poly2)\n    if ((numpy.prod(poly1.shape) <= 1) or (numpy.prod(poly2.shape) <= 1)):\n        return poly\n    return chaospy.poly.sum(poly, 0)", "docstring": "Dot product of polynomial vectors.\n\nArgs:\npoly1 (Poly) : left part of product.\npoly2 (Poly) : right part of product.\n\nReturns:\n(Poly) : product of poly1 and poly2.\n\nExamples:\n>>> poly = cp.prange(3, 1)\n>>> print(poly)\n[1, q0, q0^2]\n>>> print(cp.dot(poly, numpy.arange(3)))\n2q0^2+q0\n>>> print(cp.dot(poly, poly))\nq0^4+q0^2+1", "source": "codesearchnet"}
{"code": "def resorted(values):\n    \n    if not values:\n        return values\n\n    values = sorted(values)\n\n    \n    first_word = next(\n        (cnt for cnt, val in enumerate(values)\n             if val and not val[0].isdigit()),\n        None\n    )\n\n    \n    if first_word is None:\n        return values\n\n    words = values[first_word:]\n    numbers = values[:first_word]\n\n    return words + numbers", "docstring": "Sort values, but put numbers after alphabetically sorted words.\n\nThis function is here to make outputs diff-compatible with Aleph.\n\nExample::\n>>> sorted([\"b\", \"1\", \"a\"])\n['1', 'a', 'b']\n>>> resorted([\"b\", \"1\", \"a\"])\n['a', 'b', '1']\n\nArgs:\nvalues (iterable): any iterable object/list/tuple/whatever.\n\nReturns:\nlist of sorted values, but with numbers after words", "source": "juraj-google-style"}
{"code": "def _sample_cell(args, cell_body):\n    env = datalab.utils.commands.notebook_environment()\n    query = None\n    table = None\n    view = None\n    if args['query']:\n        query = _get_query_argument(args, cell_body, env)\n    elif args['table']:\n        table = _get_table(args['table'])\n    elif args['view']:\n        view = datalab.utils.commands.get_notebook_item(args['view'])\n        if (not isinstance(view, datalab.bigquery.View)):\n            raise Exception(('%s is not a view' % args['view']))\n    else:\n        query = datalab.bigquery.Query(cell_body, values=env)\n    count = args['count']\n    method = args['method']\n    if (method == 'random'):\n        sampling = datalab.bigquery.Sampling.random(percent=args['percent'], count=count)\n    elif (method == 'hashed'):\n        sampling = datalab.bigquery.Sampling.hashed(field_name=args['field'], percent=args['percent'], count=count)\n    elif (method == 'sorted'):\n        ascending = (args['order'] == 'ascending')\n        sampling = datalab.bigquery.Sampling.sorted(args['field'], ascending=ascending, count=count)\n    elif (method == 'limit'):\n        sampling = datalab.bigquery.Sampling.default(count=count)\n    else:\n        sampling = datalab.bigquery.Sampling.default(count=count)\n    if query:\n        results = query.sample(sampling=sampling, dialect=args['dialect'], billing_tier=args['billing'])\n    elif view:\n        results = view.sample(sampling=sampling)\n    else:\n        results = table.sample(sampling=sampling)\n    if args['verbose']:\n        print(results.sql)\n    if args['profile']:\n        return datalab.utils.commands.profile_df(results.to_dataframe())\n    else:\n        return results", "docstring": "Implements the bigquery sample cell magic for ipython notebooks.\n\nArgs:\nargs: the optional arguments following '%%bigquery sample'.\ncell_body: optional contents of the cell interpreted as SQL, YAML or JSON.\nReturns:\nThe results of executing the sampling query, or a profile of the sample data.", "source": "codesearchnet"}
{"code": "def start_naive_bayes(automated_run, session, path):\n    module = functions.import_string_code_as_module(automated_run.source)\n    random_state = (8 if (not hasattr(module, 'random_state')) else module.random_state)\n    assert (module.metric_to_optimize in automated_run.base_learner_origin.metric_generators)\n    base_estimator = automated_run.base_learner_origin.return_estimator()\n    base_estimator.set_params(**module.default_params)\n    default_params = functions.make_serializable(base_estimator.get_params())\n    non_searchable_params = dict(((key, val) for (key, val) in iteritems(default_params) if (key not in module.pbounds)))\n    existing_base_learners = []\n    for base_learner in automated_run.base_learner_origin.base_learners:\n        if (not (base_learner.job_status == 'finished')):\n            continue\n        in_search_space = True\n        for (key, val) in iteritems(non_searchable_params):\n            if (base_learner.hyperparameters[key] != val):\n                in_search_space = False\n                break\n        if in_search_space:\n            existing_base_learners.append(base_learner)\n    target = []\n    initialization_dict = dict(((key, list()) for key in module.pbounds.keys()))\n    for base_learner in existing_base_learners:\n        all_numerical = True\n        for key in module.pbounds.keys():\n            if (not isinstance(base_learner.hyperparameters[key], numbers.Number)):\n                all_numerical = False\n                break\n        if (not all_numerical):\n            continue\n        for key in module.pbounds.keys():\n            initialization_dict[key].append(base_learner.hyperparameters[key])\n        target.append(base_learner.individual_score[module.metric_to_optimize])\n    initialization_dict['target'] = (target if (not module.invert_metric) else list(map((lambda x: (- x)), target)))\n    print('{} existing in initialization dictionary'.format(len(initialization_dict['target'])))\n    func_to_optimize = return_func_to_optimize(path, session, automated_run.base_learner_origin, module.default_params, module.metric_to_optimize, module.invert_metric, set(module.integers))\n    bo = BayesianOptimization(func_to_optimize, module.pbounds)\n    bo.initialize(initialization_dict)\n    np.random.seed(random_state)\n    bo.maximize(**module.maximize_config)", "docstring": "Starts naive bayes automated run\n\nArgs:\nautomated_run (xcessiv.models.AutomatedRun): Automated run object\n\nsession: Valid SQLAlchemy session\n\npath (str, unicode): Path to project folder", "source": "codesearchnet"}
{"code": "def decode(self, fp: TextIO) -> BioCCollection:\n        \n        \n        tree = etree.parse(fp)\n        collection = self.__parse_collection(tree.getroot())\n        collection.encoding = tree.docinfo.encoding\n        collection.standalone = tree.docinfo.standalone\n        collection.version = tree.docinfo.xml_version\n        return collection", "docstring": "Deserialize ``fp`` to a BioC collection object.\n\nArgs:\nfp: a ``.read()``-supporting file-like object containing a BioC collection\n\nReturns:\nan object of BioCollection", "source": "juraj-google-style"}
{"code": "def builder(name, **builder_init_kwargs):\n    (name, builder_kwargs) = _dataset_name_and_kwargs_from_name_str(name)\n    builder_kwargs.update(builder_init_kwargs)\n    if (name in _ABSTRACT_DATASET_REGISTRY):\n        raise DatasetNotFoundError(name, is_abstract=True)\n    if (name in _IN_DEVELOPMENT_REGISTRY):\n        raise DatasetNotFoundError(name, in_development=True)\n    if (name not in _DATASET_REGISTRY):\n        raise DatasetNotFoundError(name)\n    try:\n        return _DATASET_REGISTRY[name](**builder_kwargs)\n    except BaseException:\n        logging.error('Failed to construct dataset %s', name)\n        raise", "docstring": "Fetches a `tfds.core.DatasetBuilder` by string name.\n\nArgs:\nname: `str`, the registered name of the `DatasetBuilder` (the snake case\nversion of the class name). This can be either `\"dataset_name\"` or\n`\"dataset_name/config_name\"` for datasets with `BuilderConfig`s.\nAs a convenience, this string may contain comma-separated keyword\narguments for the builder. For example `\"foo_bar/a=True,b=3\"` would use\nthe `FooBar` dataset passing the keyword arguments `a=True` and `b=3`\n(for builders with configs, it would be `\"foo_bar/zoo/a=True,b=3\"` to\nuse the `\"zoo\"` config and pass to the builder keyword arguments `a=True`\nand `b=3`).\n**builder_init_kwargs: `dict` of keyword arguments passed to the\n`DatasetBuilder`. These will override keyword arguments passed in `name`,\nif any.\n\nReturns:\nA `tfds.core.DatasetBuilder`.\n\nRaises:\nDatasetNotFoundError: if `name` is unrecognized.", "source": "codesearchnet"}
{"code": "def from_table(table, fields=None):\n    if (fields is None):\n        fields = '*'\n    elif isinstance(fields, list):\n        fields = ','.join(fields)\n    return Query(('SELECT %s FROM %s' % (fields, table._repr_sql_())))", "docstring": "Return a Query for the given Table object\n\nArgs:\ntable: the Table object to construct a Query out of\nfields: the fields to return. If None, all fields will be returned. This can be a string\nwhich will be injected into the Query after SELECT, or a list of field names.\n\nReturns:\nA Query object that will return the specified fields from the records in the Table.", "source": "codesearchnet"}
{"code": "def WriteEventBody(self, event):\n    \n    latitude = getattr(event, 'latitude', None)\n    longitude = getattr(event, 'longitude', None)\n    if latitude is not None and longitude is not None:\n      placemark_xml_element = ElementTree.Element('Placemark')\n\n      name_xml_element = ElementTree.SubElement(placemark_xml_element, 'name')\n\n      name_xml_element.text = 'PLACEHOLDER FOR EVENT IDENTIFIER'\n\n      description_xml_element = ElementTree.SubElement(\n          placemark_xml_element, 'description')\n      \n      description_xml_element.text = (\n          rawpy.NativePythonFormatterHelper.GetFormattedEventObject(event))\n\n      point_xml_element = ElementTree.SubElement(\n          placemark_xml_element, 'Point')\n\n      coordinates_xml_element = ElementTree.SubElement(\n          point_xml_element, 'coordinates')\n      coordinates_xml_element.text = '{0!s},{1!s}'.format(longitude, latitude)\n\n      \n      \n      xml_string = ElementTree.tostring(placemark_xml_element)\n\n      output_text = codecs.decode(xml_string, self._output_mediator.encoding)\n      self._output_writer.Write(output_text)", "docstring": "Writes the body of an event to the output.\n\nArgs:\nevent (EventObject): event.", "source": "juraj-google-style"}
{"code": "def make_dataset_from_selfplay(data_extracts):\n    \n    tf_examples = (make_tf_example(features_lib.extract_features(pos), pi, result)\n                   for pos, pi, result in data_extracts)\n    return tf_examples", "docstring": "Returns an iterable of tf.Examples.\nArgs:\ndata_extracts: An iterable of (position, pi, result) tuples", "source": "juraj-google-style"}
{"code": "def add_trial(self, trial):\n    trial.set_verbose(self._verbose)\n    self._trials.append(trial)\n    with warn_if_slow('scheduler.on_trial_add'):\n        self._scheduler_alg.on_trial_add(self, trial)\n    self.trial_executor.try_checkpoint_metadata(trial)", "docstring": "Adds a new trial to this TrialRunner.\n\nTrials may be added at any time.\n\nArgs:\ntrial (Trial): Trial to queue.", "source": "codesearchnet"}
{"code": "def set_value(self, text):\n        \n        if self.single_line:\n            text = text.replace('\\n', '')\n        self.set_text(text)", "docstring": "Sets the text content.\n\nArgs:\ntext (str): The string content that have to be appended as standard child identified by the key 'text'", "source": "juraj-google-style"}
{"code": "def CheckPosixThreading(filename, clean_lines, linenum, error):\n    line = clean_lines.elided[linenum]\n    for (single_thread_func, multithread_safe_func, pattern) in _THREADING_LIST:\n        if Search(pattern, line):\n            error(filename, linenum, 'runtime/threadsafe_fn', 2, (((('Consider using ' + multithread_safe_func) + '...) instead of ') + single_thread_func) + '...) for improved thread safety.'))", "docstring": "Checks for calls to thread-unsafe functions.\n\nMuch code has been originally written without consideration of\nmulti-threading. Also, engineers are relying on their old experience;\nthey have learned posix before threading extensions were added. These\ntests guide the engineers to use thread-safe functions (when using\nposix directly).\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def clean_strings(iterable):\n    retval = []\n    for val in iterable:\n        try:\n            retval.append(val.strip())\n        except AttributeError:\n            retval.append(val)\n    return retval", "docstring": "Take a list of strings and clear whitespace\non each one. If a value in the list is not a\nstring pass it through untouched.\n\nArgs:\niterable: mixed list\n\nReturns:\nmixed list", "source": "codesearchnet"}
{"code": "def ones(shape, dtype=None, name=None):\n    with ops.init_scope():\n        if dtype is None:\n            dtype = floatx()\n        tf_dtype = dtypes_module.as_dtype(dtype)\n        v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name)\n        if py_all(v.shape.as_list()):\n            return variable(v, dtype=dtype, name=name)\n        return v", "docstring": "Instantiates an all-ones variable and returns it.\n\nArgs:\nshape: Tuple of integers, shape of returned Keras variable.\ndtype: String, data type of returned Keras variable.\nname: String, name of returned Keras variable.\n\nReturns:\nA Keras variable, filled with `1.0`.\nNote that if `shape` was symbolic, we cannot return a variable,\nand will return a dynamically-shaped tensor instead.\n\nExample:\n\n\n>>> kvar = tf.keras.backend.ones((3,4))\n>>> tf.keras.backend.eval(kvar)\narray([[1.,  1.,  1.,  1.],\n[1.,  1.,  1.,  1.],\n[1.,  1.,  1.,  1.]], dtype=float32)", "source": "github-repos"}
{"code": "def predict_proba(self, X):\n        \n        return collections.deque(self.iter_predict_proba(X), maxlen=1).pop()", "docstring": "Returns the predicted probabilities for ``X``.\n\nArguments:\nX (array-like or sparse matrix of shape (n_samples, n_features)): The input samples.\nSparse matrices are accepted only if they are supported by the weak model.\n\nReturns:\narray of shape (n_samples, n_classes) containing the predicted probabilities.", "source": "juraj-google-style"}
{"code": "def _Consumers(t, func_graphs):\n    consumers = t.consumers()\n    for func in func_graphs:\n        for input_t, placeholder in _Captures(func):\n            if input_t is t:\n                consumers.extend(_Consumers(placeholder, func_graphs))\n    return consumers", "docstring": "Returns the consumers of t, crossing closure boundaries where necessary.\n\nArgs:\nt: Tensor\nfunc_graphs: a list of FuncGraphs that may have captured t.\n\nReturns:\nA list of tensors. The tensors will be from the current graph and/or\nfunc_graphs.", "source": "github-repos"}
{"code": "def WriteValuesToJSONFile(self, state, values):\n    value_counters = {}\n    max_post_size = config.CONFIG['BigQuery.max_file_post_size']\n    for value in values:\n        class_name = value.__class__.__name__\n        (output_tracker, created) = self._GetTempOutputFileHandles(class_name)\n        value_counters[class_name] = (value_counters.get(class_name, (- 1)) + 1)\n        if (not ((value_counters[class_name] % max_post_size) \n            output_tracker.gzip_filehandle.flush()\n            if (os.path.getsize(output_tracker.gzip_filehandle.name) > max_post_size):\n                self.Flush(state)\n                value_counters[class_name] = 0\n                (output_tracker, created) = self._GetTempOutputFileHandles(class_name)\n        if (not output_tracker.schema):\n            output_tracker.schema = self.RDFValueToBigQuerySchema(value)\n        if created:\n            self._WriteJSONValue(output_tracker.gzip_filehandle, value)\n        else:\n            self._WriteJSONValue(output_tracker.gzip_filehandle, value, delimiter='\\n')\n    for output_tracker in itervalues(self.temp_output_trackers):\n        output_tracker.gzip_filehandle.flush()", "docstring": "Write newline separated JSON dicts for each value.\n\nWe write each dict separately so we don't have to hold all of the output\nstreams in memory. We open and close the JSON array manually with [].\n\nArgs:\nstate: rdf_protodict.AttributedDict with the plugin's state.\nvalues: RDF values to export.", "source": "codesearchnet"}
{"code": "def load_ipython_extension(ip):\n    \n    decor = InteractiveDecorator(ip)\n    ip.events.register('post_run_cell', decor.post_run_cell)\n\n    \n    \n    \n    \n    \n    newhist = AcornHistoryManager(ip.history_manager, decor)\n    ip.history_manager = newhist", "docstring": "Loads the interacting decorator that ships with `acorn` into the ipython\ninteractive shell.\n\nArgs:\nip (IPython.core.interactiveshell.InteractiveShell): ipython shell instance\nfor interacting with the shell variables.", "source": "juraj-google-style"}
{"code": "def expm1(x):\n    if any_symbolic_tensors((x,)):\n        return Expm1().symbolic_call(x)\n    return backend.numpy.expm1(x)", "docstring": "Calculate `exp(x) - 1` for all elements in the tensor.\n\nArgs:\nx: Input values.\n\nReturns:\nOutput tensor, element-wise exponential minus one.", "source": "github-repos"}
{"code": "def encode(self, value: Any) -> geno.DNA:", "docstring": "Encode a value into a DNA.\n\nArgs:\nvalue: A value that conforms to the hyper value definition.\n\nReturns:\nDNA for the value.", "source": "github-repos"}
{"code": "def set_dna(self, dna: geno.DNA) -> None:\n    self._dna = dna\n    self._decoded_value = None", "docstring": "Use this DNA to generate value.\n\nNOTE(daiyip): self._dna is only used in __call__.\nThus 'set_dna' can be called multiple times to generate different values.\n\nArgs:\ndna: DNA to use to decode the value.", "source": "github-repos"}
{"code": "def expand_indicators(indicator):\n        \n        if indicator.count(' : ') > 0:\n            \n            indicator_list = []\n\n            \n            iregx_pattern = r'^(.*?(?=\\s\\:\\s|$))?'\n            iregx_pattern += r'(?:\\s\\:\\s)?'  \n            \n            \n            iregx_pattern += r'((?<=\\s\\:\\s).*?(?=(?:\\s)?\\:\\s|$))?'\n            iregx_pattern += r'(?:(?:\\s)?\\:\\s)?'  \n            \n            \n            iregx_pattern += r'((?<=\\s\\:\\s).*?(?=$))?$'\n            iregx = re.compile(iregx_pattern)\n\n            indicators = iregx.search(indicator)\n            if indicators is not None:\n                indicator_list = list(indicators.groups())\n        else:\n            \n            indicator_list = [indicator]\n\n        return indicator_list", "docstring": "Process indicators expanding file hashes/custom indicators into multiple entries.\n\nArgs:\nindicator (string): \" : \" delimited string\nReturns:\n(list): a list of indicators split on \" : \".", "source": "juraj-google-style"}
{"code": "def get_other_answers_simple(pool, seeded_answers, get_student_item_dict, num_responses):\n    \n    ret = []\n    \n    pool = {int(k): v for k, v in pool.items()}\n    total_in_pool = len(seeded_answers)\n    merged_pool = convert_seeded_answers(seeded_answers)\n    student_id = get_student_item_dict()['student_id']\n    \n    for key in pool:\n        total_in_pool += len(pool[key])\n        \n        \n        if student_id in pool[key].keys():\n            total_in_pool -= 1\n        if key in merged_pool:\n            merged_pool[key].update(pool[key].items())\n        else:\n            merged_pool[key] = pool[key]\n\n    \n    selected = []\n\n    \n    while len(ret) < min(num_responses, total_in_pool):\n        for option, students in merged_pool.items():\n            student = student_id\n            i = 0\n            while (student == student_id or i > 100) and (str(option) + student) not in selected:\n                \n                \n                \n                \n                student = random.choice(students.keys())\n                i += 1\n            selected.append(str(option)+student)\n            if student.startswith('seeded'):\n                \n                rationale = students[student]\n            else:\n                student_item = get_student_item_dict(student)\n                submission = sas_api.get_answers_for_student(student_item)\n                rationale = submission.get_rationale(0)\n            ret.append({'option': option, 'rationale': rationale})\n\n            \n            if len(ret) >= min(num_responses, total_in_pool):\n                break\n\n    return {\"answers\": ret}", "docstring": "Get answers from others with simple algorithm, which picks one answer for each option.\n\nArgs:\nsee `get_other_answers`\nnum_responses (int): the number of responses to be returned. This value may not be\nrespected if there is not enough answers to return\n\nReturns:\ndict: answers based on the selection algorithm", "source": "juraj-google-style"}
{"code": "def __init__(self, dims):\n    if isinstance(dims, (tuple, list)):\n        self._dims = tuple((as_dimension(d).value for d in dims))\n    elif dims is None:\n        self._dims = None\n    elif isinstance(dims, tensor_shape_pb2.TensorShapeProto):\n        if dims.unknown_rank:\n            self._dims = None\n        else:\n            self._dims = tuple((dim.size if dim.size != -1 else None for dim in dims.dim))\n    elif isinstance(dims, TensorShape):\n        self._dims = dims._dims\n    else:\n        try:\n            dims_iter = iter(dims)\n        except TypeError:\n            self._dims = (as_dimension(dims).value,)\n        else:\n            self._dims = []\n            for d in dims_iter:\n                try:\n                    self._dims.append(as_dimension(d).value)\n                except TypeError as e:\n                    raise TypeError(\"Failed to convert '{0!r}' to a shape: '{1!r}'could not be converted to a dimension. A shape should either be single dimension (e.g. 10), or an iterable of dimensions (e.g. [1, 10, None]).\".format(dims, d)) from e\n            self._dims = tuple(self._dims)", "docstring": "Creates a new TensorShape with the given dimensions.\n\nArgs:\ndims: A list of Dimensions, or None if the shape is unspecified.\n\nRaises:\nTypeError: If dims cannot be converted to a list of dimensions.", "source": "github-repos"}
{"code": "def parse_matches(patient_id, match_objs):\n    LOG.info('Parsing MatchMaker matches for patient {}'.format(patient_id))\n    parsed_matches = []\n    for match_obj in match_objs:\n        milliseconds_date = match_obj['created']['$date']\n        mdate = datetime.datetime.fromtimestamp((milliseconds_date / 1000.0))\n        match_type = 'external'\n        matching_patients = []\n        parsed_match = {'match_oid': match_obj['_id']['$oid'], 'match_date': mdate}\n        if (match_obj['data']['patient']['id'] == patient_id):\n            match_results = match_obj['results']\n            for node_result in match_results:\n                if (match_obj['match_type'] == 'internal'):\n                    match_type = 'internal'\n                for patient in node_result['patients']:\n                    match_patient = {'patient_id': patient['patient']['id'], 'score': patient['score'], 'patient': patient['patient'], 'node': node_result['node']}\n                    matching_patients.append(match_patient)\n        else:\n            m_patient = match_obj['data']['patient']\n            contact_institution = m_patient['contact'].get('institution')\n            if (contact_institution and ('Scout software user' in contact_institution)):\n                match_type = 'internal'\n            score = None\n            for res in match_obj['results']:\n                for patient in res['patients']:\n                    LOG.info('Looping in else, patient:{}'.format(patient['patient']['id']))\n                    if (patient['patient']['id'] == patient_id):\n                        score = patient['score']\n                        match_patient = {'patient_id': m_patient['id'], 'score': score, 'patient': m_patient, 'node': res['node']}\n                        matching_patients.append(match_patient)\n        parsed_match['match_type'] = match_type\n        parsed_match['patients'] = matching_patients\n        parsed_matches.append(parsed_match)\n    parsed_matches = sorted(parsed_matches, key=(lambda k: k['match_date']), reverse=True)\n    return parsed_matches", "docstring": "Parse a list of matchmaker matches objects and returns\na readable list of matches to display in matchmaker matches view.\n\nArgs:\npatient_id(str): id of a mme patient\nmatch_objs(list): list of match objs returned by MME server for the patient\n# match_objs looks like this:\n[\n{\n'node' : { id : node_id , label: node_label},\n'patients' : [\n{ 'patient': {patient1_data} },\n{ 'patient': {patient2_data} },\n..\n]\n},\n..\n]\n\nReturns:\nparsed_matches(list): a list of parsed match objects", "source": "codesearchnet"}
{"code": "def from_authorized_user_info(cls, info, scopes=None):\n    keys_needed = set(('refresh_token', 'client_id', 'client_secret'))\n    missing = keys_needed.difference(six.iterkeys(info))\n    if missing:\n        raise ValueError('Authorized user info was not in the expected format, missing fields {}.'.format(', '.join(missing)))\n    return Credentials(None, refresh_token=info['refresh_token'], token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT, scopes=scopes, client_id=info['client_id'], client_secret=info['client_secret'])", "docstring": "Creates a Credentials instance from parsed authorized user info.\n\nArgs:\ninfo (Mapping[str, str]): The authorized user info in Google\nformat.\nscopes (Sequence[str]): Optional list of scopes to include in the\ncredentials.\n\nReturns:\ngoogle.oauth2.credentials.Credentials: The constructed\ncredentials.\n\nRaises:\nValueError: If the info is not in the expected format.", "source": "codesearchnet"}
{"code": "def _draw_frame(self, framedata):\n    original = self.read_frame()\n    if (original is None):\n        self.update_info(self.info_string(message='Finished.', frame=framedata))\n        return\n    if (self.original is not None):\n        processed = self.process_frame(original.copy())\n        if (self.cmap_original is not None):\n            original = to_gray(original)\n        elif (not is_color_image(original)):\n            self.original.set_cmap('gray')\n        self.original.set_data(original)\n    else:\n        processed = self.process_frame(original)\n    if (self.cmap_processed is not None):\n        processed = to_gray(processed)\n    elif (not is_color_image(processed)):\n        self.processed.set_cmap('gray')\n    if self.annotations:\n        self.annotate(framedata)\n    self.processed.set_data(processed)\n    self.update_info(self.info_string(frame=framedata))", "docstring": "Reads, processes and draws the frames.\n\nIf needed for color maps, conversions to gray scale are performed. In\ncase the images are no color images and no custom color maps are\ndefined, the colormap `gray` is applied.\n\nThis function is called by TimedAnimation.\n\nArgs:\nframedata: The frame data.", "source": "codesearchnet"}
{"code": "def watchlist_movies(self, **kwargs):\n    path = self._get_id_path('watchlist_movies')\n    kwargs.update({'session_id': self.session_id})\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the list of movies on an account watchlist.\n\nArgs:\npage: (optional) Minimum 1, maximum 1000.\nsort_by: (optional) 'created_at.asc' | 'created_at.desc'\nlanguage: (optional) ISO 639-1 code.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def dict_of_lists_add(dictionary, key, value):\n    \n    \n    list_objs = dictionary.get(key, list())\n    list_objs.append(value)\n    dictionary[key] = list_objs", "docstring": "Add value to a list in a dictionary by key\n\nArgs:\ndictionary (DictUpperBound): Dictionary to which to add values\nkey (Any): Key within dictionary\nvalue (Any): Value to add to list in dictionary\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def add_ldap_group_link(self, cn, group_access, provider, **kwargs):\n    path = ('/groups/%s/ldap_group_links' % self.get_id())\n    data = {'cn': cn, 'group_access': group_access, 'provider': provider}\n    self.manager.gitlab.http_post(path, post_data=data, **kwargs)", "docstring": "Add an LDAP group link.\n\nArgs:\ncn (str): CN of the LDAP group\ngroup_access (int): Minimum access level for members of the LDAP\ngroup\nprovider (str): LDAP provider for the LDAP group\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabCreateError: If the server cannot perform the request", "source": "codesearchnet"}
{"code": "def is_chief(self):\n    return self._is_chief", "docstring": "Return True if this is a chief supervisor.\n\nReturns:\nA bool.", "source": "github-repos"}
{"code": "def put_pixel(self, x: int, y: int, color: Tuple[int, int, int]) -> None:\n        \n        lib.TCOD_image_put_pixel(self.image_c, x, y, color)", "docstring": "Change a pixel on this Image.\n\nArgs:\nx (int): X pixel of the Image.  Starting from the left at 0.\ny (int): Y pixel of the Image.  Starting from the top at 0.\ncolor (Union[Tuple[int, int, int], Sequence[int]]):\nAn (r, g, b) sequence or Color instance.", "source": "juraj-google-style"}
{"code": "def IsAllSpent(self):\n    for item in self.Items:\n        if (item == CoinState.Confirmed):\n            return False\n    return True", "docstring": "Flag indicating if all balance is spend.\n\nReturns:\nbool:", "source": "codesearchnet"}
{"code": "def _get_block_publisher(self, state_hash):\n    state_view = self._state_view_factory.create_view(state_hash)\n    try:\n\n        class BatchPublisher():\n\n            def send(self, transactions):\n                raise InvalidGenesisConsensusError('Consensus cannot send transactions during genesis.')\n        consensus = ConsensusFactory.get_configured_consensus_module(NULL_BLOCK_IDENTIFIER, state_view)\n        return consensus.BlockPublisher(BlockCache(self._block_store), state_view_factory=self._state_view_factory, batch_publisher=BatchPublisher(), data_dir=self._data_dir, config_dir=self._config_dir, validator_id=self._identity_signer.get_public_key().as_hex())\n    except UnknownConsensusModuleError as e:\n        raise InvalidGenesisStateError(e)", "docstring": "Returns the block publisher based on the consensus module set by the\n\"sawtooth_settings\" transaction family.\n\nArgs:\nstate_hash (str): The current state root hash for reading settings.\n\nRaises:\nInvalidGenesisStateError: if any errors occur getting the\nBlockPublisher.", "source": "codesearchnet"}
{"code": "def mat2euler(rmat, axes=\"sxyz\"):\n    \n    try:\n        firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]\n    except (AttributeError, KeyError):\n        firstaxis, parity, repetition, frame = axes\n\n    i = firstaxis\n    j = _NEXT_AXIS[i + parity]\n    k = _NEXT_AXIS[i - parity + 1]\n\n    M = np.array(rmat, dtype=np.float32, copy=False)[:3, :3]\n    if repetition:\n        sy = math.sqrt(M[i, j] * M[i, j] + M[i, k] * M[i, k])\n        if sy > EPS:\n            ax = math.atan2(M[i, j], M[i, k])\n            ay = math.atan2(sy, M[i, i])\n            az = math.atan2(M[j, i], -M[k, i])\n        else:\n            ax = math.atan2(-M[j, k], M[j, j])\n            ay = math.atan2(sy, M[i, i])\n            az = 0.0\n    else:\n        cy = math.sqrt(M[i, i] * M[i, i] + M[j, i] * M[j, i])\n        if cy > EPS:\n            ax = math.atan2(M[k, j], M[k, k])\n            ay = math.atan2(-M[k, i], cy)\n            az = math.atan2(M[j, i], M[i, i])\n        else:\n            ax = math.atan2(-M[j, k], M[j, j])\n            ay = math.atan2(-M[k, i], cy)\n            az = 0.0\n\n    if parity:\n        ax, ay, az = -ax, -ay, -az\n    if frame:\n        ax, az = az, ax\n    return vec((ax, ay, az))", "docstring": "Converts given rotation matrix to euler angles in radian.\n\nArgs:\nrmat: 3x3 rotation matrix\naxes: One of 24 axis sequences as string or encoded tuple\n\nReturns:\nconverted euler angles in radian vec3 float", "source": "juraj-google-style"}
{"code": "def import_module(name):\n    parts = name.split('.')\n    path = None\n    module_name = ''\n    fhandle = None\n    for (index, part) in enumerate(parts):\n        module_name = (part if (index == 0) else ('%s.%s' % (module_name, part)))\n        path = ([path] if (path is not None) else path)\n        try:\n            (fhandle, path, descr) = imp.find_module(part, path)\n            if (module_name in sys.modules):\n                mod = sys.modules[module_name]\n            else:\n                mod = imp.load_module(module_name, fhandle, path, descr)\n        finally:\n            if fhandle:\n                fhandle.close()\n    return mod", "docstring": "Imports a module into the current runtime environment\n\nThis function emulates the Python import system that allows for\nimporting full path modules.  It will break down the module and\nimport each part (or skip if it is already loaded in cache).\n\nArgs:\nname (str): The name of the module to import.  This should be\nthe full path of the module\n\nReturns:\nThe module that was imported", "source": "codesearchnet"}
{"code": "def _login(self, max_tries=2):\n    \n\n    if not self.current_url.startswith(_KindleCloudReaderBrowser._SIGNIN_URL):\n      raise BrowserError(\n          'Current url \"%s\" is not a signin url (\"%s\")' %\n          (self.current_url, _KindleCloudReaderBrowser._SIGNIN_URL))\n\n    email_field_loaded = lambda br: br.find_elements_by_id('ap_email')\n    self._wait().until(email_field_loaded)\n    tries = 0\n    while tries < max_tries:\n      \n      email_elem = self.find_element_by_id('ap_email')\n      email_elem.clear()\n      email_elem.send_keys(self._uname)\n\n      \n      pword_elem = self.find_element_by_id('ap_password')\n      pword_elem.clear()\n      pword_elem.send_keys(self._pword)\n\n      def creds_entered(_):\n        \n\n        email_ok = email_elem.get_attribute('value') == self._uname\n        pword_ok = pword_elem.get_attribute('value') == self._pword\n        return email_ok and pword_ok\n\n      kcr_page_loaded = lambda br: br.title == u'Kindle Cloud Reader'\n      try:\n        self._wait(5).until(creds_entered)\n        self.find_element_by_id('signInSubmit-input').click()\n        self._wait(5).until(kcr_page_loaded)\n      except TimeoutException:\n        tries += 1\n      else:\n        return\n\n    raise LoginError", "docstring": "Logs in to Kindle Cloud Reader.\n\nArgs:\nmax_tries: The maximum number of login attempts that will be made.\n\nRaises:\nBrowserError: If method called when browser not at a signin URL.\nLoginError: If login unsuccessful after `max_tries` attempts.", "source": "juraj-google-style"}
{"code": "def segment_similarity(A, B, T=CLOSE_DISTANCE_THRESHOLD):\n    l_a = len(A.points)\n    l_b = len(B.points)\n    idx = index.Index()\n    dex = 0\n    for i in range((l_a - 1)):\n        idx.insert(dex, bounding_box_from(A.points, i, (i + 1), T), obj=[A.points[i], A.points[(i + 1)]])\n        dex = (dex + 1)\n    prox_acc = []\n    for i in range((l_b - 1)):\n        ti = B.points[i].gen2arr()\n        ti1 = B.points[(i + 1)].gen2arr()\n        bb = bounding_box_from(B.points, i, (i + 1), T)\n        intersects = idx.intersection(bb, objects=True)\n        n_prox = []\n        i_prox = 0\n        a = 0\n        for x in intersects:\n            a = (a + 1)\n            pi = x.object[0].gen2arr()\n            pi1 = x.object[1].gen2arr()\n            prox = line_similarity(ti, ti1, pi, pi1, T)\n            i_prox = (i_prox + prox)\n            n_prox.append(prox)\n        if (a != 0):\n            prox_acc.append((i_prox / a))\n        else:\n            prox_acc.append(0)\n    return (np.mean(prox_acc), prox_acc)", "docstring": "Computes the similarity between two segments\n\nArgs:\nA (:obj:`Segment`)\nB (:obj:`Segment`)\nReturns:\nfloat: between 0 and 1. Where 1 is very similar and 0 is completely different", "source": "codesearchnet"}
{"code": "def _begin_connection_action(self, action):\n        \n\n        conn_id = action.data['connection_id']\n        int_id = action.data['internal_id']\n        callback = action.data['callback']\n\n        \n        if self._get_connection_state(conn_id) != self.Disconnected:\n            print(self._connections[conn_id])\n            callback(conn_id, self.id, False, 'Connection ID is already in use for another connection')\n            return\n\n        if self._get_connection_state(int_id) != self.Disconnected:\n            callback(conn_id, self.id, False, 'Internal ID is already in use for another connection')\n            return\n\n        conn_data = {\n            'state': self.Connecting,\n            'microstate': None,\n            'conn_id': conn_id,\n            'int_id': int_id,\n            'callback': callback,\n            'timeout': action.timeout,\n            'context': action.data['context']\n        }\n\n        self._connections[conn_id] = conn_data\n        self._int_connections[int_id] = conn_data", "docstring": "Begin a connection attempt\n\nArgs:\naction (ConnectionAction): the action object describing what we are\nconnecting to", "source": "juraj-google-style"}
{"code": "def func_callsig(func, with_name=True):\n    import inspect\n    argspec = inspect.getargspec(func)\n    (args, varargs, varkw, defaults) = argspec\n    callsig = inspect.formatargspec(*argspec[0:3])\n    if with_name:\n        callsig = (get_callable_name(func) + callsig)\n    return callsig", "docstring": "String of function call signature\n\nArgs:\nfunc (function): live python function\n\nReturns:\nstr: callsig\n\nCommandLine:\npython -m utool.util_str --exec-func_callsig\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_str import *  # NOQA\n>>> func = func_str\n>>> callsig = func_callsig(func)\n>>> result = str(callsig)\n>>> print(result)\nfunc_str(func, args, kwargs, type_aliases, packed, packkw, truncate)", "source": "codesearchnet"}
{"code": "def update_(self, conf_dict, conf_arg=True):\n        \n        for section, secdict in conf_dict.items():\n            self[section].update_(secdict, conf_arg)", "docstring": "Update values of configuration options with dict.\n\nArgs:\nconf_dict (dict): dict of dict indexed with section and option\nnames.\nconf_arg (bool): if True, only options that can be set in a config\nfile are updated.", "source": "juraj-google-style"}
{"code": "def rollapply(data, window, fn):\n    \n    res = data.copy()\n    res[:] = np.nan\n    n = len(data)\n\n    if window > n:\n        return res\n\n    for i in range(window - 1, n):\n        res.iloc[i] = fn(data.iloc[i - window + 1:i + 1])\n\n    return res", "docstring": "Apply a function fn over a rolling window of size window.\n\nArgs:\n* data (Series or DataFrame): Series or DataFrame\n* window (int): Window size\n* fn (function): Function to apply over the rolling window.\nFor a series, the return value is expected to be a single\nnumber. For a DataFrame, it shuold return a new row.\n\nReturns:\n* Object of same dimensions as data", "source": "juraj-google-style"}
{"code": "def WriteScanContext(self, scan_context, scan_step=None):\n    if (scan_step is not None):\n        print('Scan step: {0:d}'.format(scan_step))\n    print('Source type\\t\\t: {0:s}'.format(scan_context.source_type))\n    print('')\n    scan_node = scan_context.GetRootScanNode()\n    self.WriteScanNode(scan_context, scan_node)\n    print('')", "docstring": "Writes the source scanner context to stdout.\n\nArgs:\nscan_context (SourceScannerContext): the source scanner context.\nscan_step (Optional[int]): the scan step, where None represents no step.", "source": "codesearchnet"}
{"code": "def get(app: web.Application, feature_type: Type[Any]=None, key: Hashable=None) -> Any:\n    key = (key or feature_type)\n    if (not key):\n        raise AssertionError('No feature identifier provided')\n    try:\n        found = app[FEATURES_KEY][key]\n    except KeyError:\n        raise KeyError(f'No feature found for \"{key}\"')\n    if (feature_type and (not isinstance(found, feature_type))):\n        raise AssertionError(f'Found {found} did not match type \"{feature_type}\"')\n    return found", "docstring": "Finds declared feature.\nIdentification is done based on feature type and key.\n\nArgs:\napp (web.Application):\nThe current Aiohttp application.\n\nfeature_type (Type[Any]):\nThe Python type of the desired feature.\nIf specified, it will be checked against the found feature.\n\nkey (Hashable):\nA specific identifier for the desired feature.\nDefaults to `feature_type`\n\nReturns:\nAny: The feature found for the combination of `feature_type` and `key`", "source": "codesearchnet"}
{"code": "def write(self, index, value, name=None):\n    return self._implementation.write(index, value, name=name)", "docstring": "Write `value` into index `index` of the TensorArray.\n\nArgs:\nindex: 0-D.  int32 scalar with the index to write to.\nvalue: N-D.  Tensor of type `dtype`.  The Tensor to write to this index.\nname: A name for the operation (optional).\n\nReturns:\nA new TensorArray object with flow that ensures the write occurs.\nUse this object for all subsequent operations.\n\nRaises:\nValueError: if there are more writers than specified.", "source": "github-repos"}
{"code": "def _finished_callback(self, batch_fut, todo):\n    self._running.remove(batch_fut)\n    err = batch_fut.get_exception()\n    if (err is not None):\n        tb = batch_fut.get_traceback()\n        for (fut, _) in todo:\n            if (not fut.done()):\n                fut.set_exception(err, tb)", "docstring": "Passes exception along.\n\nArgs:\nbatch_fut: the batch future returned by running todo_tasklet.\ntodo: (fut, option) pair. fut is the future return by each add() call.\n\nIf the batch fut was successful, it has already called fut.set_result()\non other individual futs. This method only handles when the batch fut\nencountered an exception.", "source": "codesearchnet"}
{"code": "def convert_shape(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting shape ...')\n\n    def target_layer(x):\n        import tensorflow as tf\n        return tf.shape(x)\n\n    lambda_layer = keras.layers.Lambda(target_layer)\n    layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert shape operation.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def ch_stop_time(self, *channels: List[Channel]) -> int:\n        \n        return self.timeslots.ch_stop_time(*channels)", "docstring": "Return maximum start time for supplied channels.\n\nArgs:\n*channels: Supplied channels", "source": "juraj-google-style"}
{"code": "def acquire(self, host: str, port: int, use_ssl: bool=False,\n                host_key: Optional[Any]=None) \\\n            -> Union[Connection, SSLConnection]:\n        \n        assert isinstance(port, int), 'Expect int. Got {}'.format(type(port))\n        assert not self._closed\n\n        yield from self._process_no_wait_releases()\n\n        if use_ssl:\n            connection_factory = functools.partial(\n                self._ssl_connection_factory, hostname=host)\n        else:\n            connection_factory = functools.partial(\n                self._connection_factory, hostname=host)\n\n        connection_factory = functools.partial(\n            HappyEyeballsConnection, (host, port), connection_factory,\n            self._resolver, self._happy_eyeballs_table,\n            is_ssl=use_ssl\n        )\n\n        key = host_key or (host, port, use_ssl)\n\n        with (yield from self._host_pools_lock):\n            if key not in self._host_pools:\n                host_pool = self._host_pools[key] = HostPool(\n                    connection_factory,\n                    max_connections=self._max_host_count\n                )\n                self._host_pool_waiters[key] = 1\n            else:\n                host_pool = self._host_pools[key]\n                self._host_pool_waiters[key] += 1\n\n        _logger.debug('Check out %s', key)\n\n        connection = yield from host_pool.acquire()\n        connection.key = key\n\n        \n        \n        \n        \n\n        with (yield from self._host_pools_lock):\n            self._host_pool_waiters[key] -= 1\n\n        return connection", "docstring": "Return an available connection.\n\nArgs:\nhost: A hostname or IP address.\nport: Port number.\nuse_ssl: Whether to return a SSL connection.\nhost_key: If provided, it overrides the key used for per-host\nconnection pooling. This is useful for proxies for example.\n\nCoroutine.", "source": "juraj-google-style"}
{"code": "def tasks(self):\n    if (not self.__tasks):\n        self.__tasks = Tasks(self.__connection)\n    return self.__tasks", "docstring": "Gets the Tasks API client.\n\nReturns:\nTasks:", "source": "codesearchnet"}
{"code": "def shutdown(self, message=None):\n    for (name, server) in self.servers.items():\n        server.quit(message)", "docstring": "Disconnect all servers with a message.\n\nArgs:\nmessage (str): Quit message to use on each connection.", "source": "codesearchnet"}
{"code": "def fix_variable(self, v, value):\n    variables = self.variables\n    try:\n        idx = variables.index(v)\n    except ValueError:\n        raise ValueError('given variable {} is not part of the constraint'.format(v))\n    if (value not in self.vartype.value):\n        raise ValueError('expected value to be in {}, received {} instead'.format(self.vartype.value, value))\n    configurations = frozenset(((config[:idx] + config[(idx + 1):]) for config in self.configurations if (config[idx] == value)))\n    if (not configurations):\n        raise UnsatError('fixing {} to {} makes this constraint unsatisfiable'.format(v, value))\n    variables = (variables[:idx] + variables[(idx + 1):])\n    self.configurations = configurations\n    self.variables = variables\n\n    def func(*args):\n        return (args in configurations)\n    self.func = func\n    self.name = '{} ({} fixed to {})'.format(self.name, v, value)", "docstring": "Fix the value of a variable and remove it from the constraint.\n\nArgs:\nv (variable):\nVariable in the constraint to be set to a constant value.\n\nval (int):\nValue assigned to the variable. Values must match the :class:`.Vartype` of the\nconstraint.\n\nExamples:\nThis example creates a constraint that :math:`a \\\\ne b` on binary variables,\nfixes variable a to 0, and tests two candidate solutions.\n\n>>> import dwavebinarycsp\n>>> const = dwavebinarycsp.Constraint.from_func(operator.ne,\n...             ['a', 'b'], dwavebinarycsp.BINARY)\n>>> const.fix_variable('a', 0)\n>>> const.check({'b': 1})\nTrue\n>>> const.check({'b': 0})\nFalse", "source": "codesearchnet"}
{"code": "def entropy(rho: Density, base: float=None) -> float:\n    op = asarray(rho.asoperator())\n    probs = np.linalg.eigvalsh(op)\n    probs = np.maximum(probs, 0.0)\n    return scipy.stats.entropy(probs, base=base)", "docstring": "Returns the von-Neumann entropy of a mixed quantum state.\n\nArgs:\nrho:    A density matrix\nbase:   Optional logarithm base. Default is base e, and entropy is\nmeasures in nats. For bits set base to 2.\n\nReturns:\nThe von-Neumann entropy of rho", "source": "codesearchnet"}
{"code": "def write(self, data, timeout_ms=None):\n    timeout = timeouts.PolledTimeout.from_millis(timeout_ms)\n    while data:\n        self._transport.write(data[:self._transport.adb_connection.maxdata], timeout)\n        data = data[self._transport.adb_connection.maxdata:]", "docstring": "Write data to this stream.\n\nArgs:\ndata: Data to write.\ntimeout_ms: Timeout to use for the write/Ack transaction, in\nmilliseconds (or as a PolledTimeout object).\n\nRaises:\nAdbProtocolError: If an ACK is not received.\nAdbStreamClosedError: If the stream is already closed, or gets closed\nbefore the write completes.", "source": "codesearchnet"}
{"code": "def MergeOrAddUser(self, kb_user):\n    \n\n    user = self.GetUser(\n        sid=kb_user.sid, uid=kb_user.uid, username=kb_user.username)\n    new_attrs = []\n    merge_conflicts = []  \n    if not user:\n      new_attrs = self._CreateNewUser(kb_user)\n    else:\n      for key, val in iteritems(kb_user.AsDict()):\n        if user.Get(key) and user.Get(key) != val:\n          merge_conflicts.append((key, user.Get(key), val))\n        user.Set(key, val)\n        new_attrs.append(\"users.%s\" % key)\n\n    return new_attrs, merge_conflicts", "docstring": "Merge a user into existing users or add new if it doesn't exist.\n\nArgs:\nkb_user: A User rdfvalue.\n\nReturns:\nA list of strings with the set attribute names, e.g. [\"users.sid\"]", "source": "juraj-google-style"}
{"code": "def get_bounds(changeset):\n    \n    try:\n        return Polygon([\n            (float(changeset.get('min_lon')), float(changeset.get('min_lat'))),\n            (float(changeset.get('max_lon')), float(changeset.get('min_lat'))),\n            (float(changeset.get('max_lon')), float(changeset.get('max_lat'))),\n            (float(changeset.get('min_lon')), float(changeset.get('max_lat'))),\n            (float(changeset.get('min_lon')), float(changeset.get('min_lat'))),\n            ])\n    except TypeError:\n        return Polygon()", "docstring": "Get the bounds of the changeset and return it as a Polygon object. If\nthe changeset has not coordinates (case of the changesets that deal only\nwith relations), it returns an empty Polygon.\n\nArgs:\nchangeset: the XML string of the changeset.", "source": "juraj-google-style"}
{"code": "def poly_energy(sample_like, poly):\n    \n\n    msg = (\"poly_energy is deprecated and will be removed in dimod 0.9.0.\"\n           \"In the future, use BinaryPolynomial.energy\")\n    warnings.warn(msg, DeprecationWarning)\n    \n    \n    return BinaryPolynomial(poly, 'SPIN').energy(sample_like)", "docstring": "Calculates energy of a sample from a higher order polynomial.\n\nArgs:\nsample (samples_like):\nA raw sample. `samples_like` is an extension of NumPy's\narray_like structure. See :func:`.as_samples`.\n\npoly (dict):\nPolynomial as a dict of form {term: bias, ...}, where `term` is a\ntuple of variables and `bias` the associated bias.\n\nReturns:\nfloat: The energy of the sample.", "source": "juraj-google-style"}
{"code": "def __init__(self, filename, asarfile, files, baseoffset):\n        \n\n        self.filename = filename\n        self.asarfile = asarfile\n        self.files = files\n        self.baseoffset = baseoffset", "docstring": "Initializes a new instance of the :see AsarArchive class.\n\nArgs:\nfilename (str):\nThe path to the *.asar file to read/write from/to.\n\nasarfile (File):\nA open *.asar file object.\n\nfiles (dict):\nDictionary of files contained in the archive.\n(The header that was read from the file).\n\nbaseoffset (int):\nBase offset, indicates where in the file the header ends.", "source": "juraj-google-style"}
{"code": "def postprocess(chunks: typing.List[str]) -> typing.List[str]:\n    chunks = break_before_sequence(chunks, '（')\n    chunks = break_before_sequence(chunks, 'もら')\n    return chunks", "docstring": "Applies some processes to modify the extracted chunks.\n\nArgs:\nchunks (List[str]): Source chunks.\n\nReturns:\nProcessed chunks.", "source": "github-repos"}
{"code": "def notify_on_change(enabled: bool=True) -> ContextManager[None]:\n    return thread_local.thread_local_value_scope(_TLS_ENABLE_CHANGE_NOTIFICATION, enabled, True)", "docstring": "Returns a context manager to enable or disable notification upon change.\n\n`notify_on_change` is thread-safe and can be nested. For example, in the\nfollowing code, `_on_change` (thus `_on_bound`) method of `a` will be\ntriggered due to the rebind in the inner `with` statement, and those of `b`\nwill not be triggered as the outer `with` statement disables the\nnotification::\n\nwith pg.notify_on_change(False):\nwith pg.notify_on_change(True):\na.rebind(b=1)\nb.rebind(x=2)\n\nArgs:\nenabled: If True, enable change notification in current scope.\nOtherwise, disable notification.\n\nReturns:\nA context manager for allowing/disallowing change notification in scope.", "source": "github-repos"}
{"code": "def template_string(\n    task: Task, template: str, jinja_filters: FiltersDict = None, **kwargs: Any\n) -> Result:\n    \n    jinja_filters = jinja_filters or {} or task.nornir.config.jinja2.filters\n    text = jinja_helper.render_from_string(\n        template=template, host=task.host, jinja_filters=jinja_filters, **kwargs\n    )\n    return Result(host=task.host, result=text)", "docstring": "Renders a string with jinja2. All the host data is available in the template\n\nArguments:\ntemplate (string): template string\njinja_filters (dict): jinja filters to enable. Defaults to nornir.config.jinja2.filters\n**kwargs: additional data to pass to the template\n\nReturns:\nResult object with the following attributes set:\n* result (``string``): rendered string", "source": "juraj-google-style"}
{"code": "def delete_resource(self, resource, delete=True):\n        \n        \n        if isinstance(resource, str):\n            if is_valid_uuid(resource) is False:\n                raise HDXError('%s is not a valid resource id!' % resource)\n        return self._remove_hdxobject(self.resources, resource, delete=delete)", "docstring": "Delete a resource from the dataset and also from HDX by default\n\nArgs:\nresource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary\ndelete (bool): Whetehr to delete the resource from HDX (not just the dataset). Defaults to True.\n\nReturns:\nbool: True if resource removed or False if not", "source": "juraj-google-style"}
{"code": "def unique(ar):\n    import dask.array as da\n    if isinstance(ar, da.core.Array):\n        return da.unique(ar)\n    return _unique(ar)", "docstring": "r\"\"\"Find the unique elements of an array.\n\nIt uses ``dask.array.unique`` if necessary.\n\nArgs:\nar (array_like): Input array.\n\nReturns:\narray_like: the sorted unique elements.", "source": "codesearchnet"}
{"code": "def torch_equals_ignore_index(tensor, tensor_other, ignore_index=None):\n    if (ignore_index is not None):\n        assert (tensor.size() == tensor_other.size())\n        mask_arr = tensor.ne(ignore_index)\n        tensor = tensor.masked_select(mask_arr)\n        tensor_other = tensor_other.masked_select(mask_arr)\n    return torch.equal(tensor, tensor_other)", "docstring": "Compute ``torch.equal`` with the optional mask parameter.\n\nArgs:\nignore_index (int, optional): Specifies a ``tensor`` index that is ignored.\n\nReturns:\n(bool) Returns ``True`` if target and prediction are equal.", "source": "codesearchnet"}
{"code": "def normalize_build_spec(self, build_spec):\n    for cmd in build_spec:\n        if (not cmd):\n            continue\n        cmd_name = cmd.keys()[0]\n        cmd_options = cmd.values()[0]\n        cmd_handler = self.get_cmd_handler(cmd_name)\n        self.build_cmds.append(cmd_handler(cmd_options))", "docstring": "Convert a build spec into a list of Command tuples.\nAfter running this command, self.build_cmds should hold all\nthe commands that should be run on the disk in self.disk_path.\n\nArgs:\nbuild_spec (dict): The buildspec part from the init file", "source": "codesearchnet"}
{"code": "def list_bindings(site):\n    \n    ret = dict()\n    sites = list_sites()\n\n    if site not in sites:\n        log.warning('Site not found: %s', site)\n        return ret\n\n    ret = sites[site]['bindings']\n\n    if not ret:\n        log.warning('No bindings found for site: %s', site)\n\n    return ret", "docstring": "Get all configured IIS bindings for the specified site.\n\nArgs:\nsite (str): The name if the IIS Site\n\nReturns:\ndict: A dictionary of the binding names and properties.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.list_bindings site", "source": "juraj-google-style"}
{"code": "def _extract_gcs_api_response_error(message):\n  \n  try:\n    if len(message) == 3:\n      \n      data = json.loads(message[2])\n      return data['error']['errors'][0]['message']\n  except Exception:\n    pass\n  return message", "docstring": "A helper function to extract user-friendly error messages from service exceptions.\n\nArgs:\nmessage: An error message from an exception. If this is from our HTTP client code, it\nwill actually be a tuple.\n\nReturns:\nA modified version of the message that is less cryptic.", "source": "juraj-google-style"}
{"code": "def freeze(script_path, target_dir='frozen', **kw):\n    \n    cmds = []\n    freeze_start_time = time.time()\n    logging.debug('/\\\\%s%s Output%s/\\\\' % ('-' * 10, 'Pyinstaller', '-' * 10))\n    orig_dir = os.path.abspath('.')\n    script_path = os.path.abspath(script_path)\n    try:\n        os.chdir(target_dir)\n        cmds += _freeze_config()\n        pyinst_path = '%s/thirdparty/pyinstaller' % __path__[0]\n        cur_cmd = 'python -O %s/pyinstaller.py %s --skip-configure' % (pyinst_path, script_path)\n        cmds.append(cur_cmd)\n        if _run(cur_cmd):  \n            _freeze_config(force=True)\n            cur_cmd = 'python -O %s/pyinstaller.py %s' % (pyinst_path, script_path)\n            _run(cur_cmd)\n    finally:\n        os.chdir(orig_dir)\n    logging.debug('\\\\/%s%s Output%s\\\\/' % ('-' * 10, 'Pyinstaller', '-' * 10))\n    logging.info('Pyinstaller took [%f] seconds' % (time.time() - freeze_start_time))\n    return cmds", "docstring": "Wraps pyinstaller and provides an easy to use interface\n\nArgs:\nscript_path: Absolute path to python script to be frozen.\n\nReturns:\nList of freeze commands ran\n\nRaises:\nsubprocess.CalledProcessError: Freeze error.\nOSError: Freeze not found.", "source": "juraj-google-style"}
{"code": "def parse_string_to_constructor(ctor_string):\n    orig_ctor_string = ctor_string\n    if ('.' not in ctor_string):\n        ctor_string = ('sonnet.' + ctor_string)\n    if ctor_string.startswith('snt.'):\n        ctor_string = ('sonnet.' + ctor_string[len('snt.'):])\n    (package_name, rest) = ctor_string.split('.', 1)\n    package = importlib.import_module(package_name)\n    try:\n        return _recursive_getattr(package, rest)\n    except AttributeError:\n        raise ValueError('could not find `{}`, after normalizing to `{}`'.format(orig_ctor_string, ctor_string))", "docstring": "Returns a callable which corresponds to the constructor string.\n\nVarious modules (eg, ConvNet2D) take constructor arguments which are\ncallables, indicating a submodule to build. These can be passed as\nactual constructors, eg `snt.LayerNorm`, however that makes the config\nfor that module not trivially serializable. This function tries to map\na string representation to the underlying callable, allowing configs to\nremain serializable where necessary.\n\nArgs:\nctor_string: string representing some module in Sonnet. If the string is\nprovided with no dots, we assume it is a member of Sonnet available at\ntop level, i.e. \"LayerNorm\" maps to `snt.LayerNorm`.\n\nRaises:\nValueError: if no matching constructor can be found.\n\nReturns:\nCallable constructor which corresponds to `ctor_string`.", "source": "codesearchnet"}
{"code": "def set_suite_info(self, suite_info=None):\n    self._suite_info = suite_info or {}", "docstring": "Interface for sub-classes to set user defined extra info to test summary.\n\nArgs:\nsuite_info: dict, A dict of suite information. Keys and values must be\nserializable.", "source": "github-repos"}
{"code": "def register_subcommand(parser: ArgumentParser):\n    train_parser = parser.add_parser('train', help='CLI tool to train a model on a task.')\n    train_parser.add_argument('--train_data', type=str, required=True, help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.')\n    train_parser.add_argument('--column_label', type=int, default=0, help='Column of the dataset csv file with example labels.')\n    train_parser.add_argument('--column_text', type=int, default=1, help='Column of the dataset csv file with example texts.')\n    train_parser.add_argument('--column_id', type=int, default=2, help='Column of the dataset csv file with example ids.')\n    train_parser.add_argument('--skip_first_row', action='store_true', help='Skip the first row of the csv file (headers).')\n    train_parser.add_argument('--validation_data', type=str, default='', help='path to validation dataset.')\n    train_parser.add_argument('--validation_split', type=float, default=0.1, help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.')\n    train_parser.add_argument('--output', type=str, default='./', help='path to saved the trained model.')\n    train_parser.add_argument('--task', type=str, default='text_classification', help='Task to train the model on.')\n    train_parser.add_argument('--model', type=str, default='google-bert/bert-base-uncased', help=\"Model's name or path to stored model.\")\n    train_parser.add_argument('--train_batch_size', type=int, default=32, help='Batch size for training.')\n    train_parser.add_argument('--valid_batch_size', type=int, default=64, help='Batch size for validation.')\n    train_parser.add_argument('--learning_rate', type=float, default=3e-05, help='Learning rate.')\n    train_parser.add_argument('--adam_epsilon', type=float, default=1e-08, help='Epsilon for Adam optimizer.')\n    train_parser.set_defaults(func=train_command_factory)", "docstring": "Register this command to argparse so it's available for the transformer-cli\n\nArgs:\nparser: Root parser to register command-specific arguments", "source": "github-repos"}
{"code": "def set_spacing(self, space):\n        \n        self.figure.spacing = space\n        if 'subplots_adjust_kwargs' not in self.figure.__dict__:\n            self.figure.subplots_adjust_kwargs = {}\n        if space == 'wide':\n            self.figure.subplots_adjust_kwargs['hspace'] = 0.3\n            self.figure.subplots_adjust_kwargs['wspace'] = 0.3\n        else:\n            self.figure.subplots_adjust_kwargs['hspace'] = 0.0\n            self.figure.subplots_adjust_kwargs['wspace'] = 0.0\n\n        return", "docstring": "Set the figure spacing.\n\nSets whether in general there is space between subplots.\nIf all axes are shared, this can be `tight`. Default in code is `wide`.\n\nThe main difference is the tick labels extend to the ends if space==`wide`.\nIf space==`tight`, the edge tick labels are cut off for clearity.\n\nArgs:\nspace (str): Sets spacing for subplots. Either `wide` or `tight`.", "source": "juraj-google-style"}
{"code": "def _CopyTimeFromString(self, time_string):\n    time_string_length = len(time_string)\n    if (time_string_length < 8):\n        raise ValueError('Time string too short.')\n    if ((time_string[2] != ':') or (time_string[5] != ':')):\n        raise ValueError('Invalid time string.')\n    try:\n        hours = int(time_string[0:2], 10)\n    except ValueError:\n        raise ValueError('Unable to parse hours.')\n    if (hours not in range(0, 24)):\n        raise ValueError('Hours value: {0:d} out of bounds.'.format(hours))\n    try:\n        minutes = int(time_string[3:5], 10)\n    except ValueError:\n        raise ValueError('Unable to parse minutes.')\n    if (minutes not in range(0, 60)):\n        raise ValueError('Minutes value: {0:d} out of bounds.'.format(minutes))\n    try:\n        seconds = int(time_string[6:8], 10)\n    except ValueError:\n        raise ValueError('Unable to parse day of seconds.')\n    if (seconds not in range(0, 60)):\n        raise ValueError('Seconds value: {0:d} out of bounds.'.format(seconds))\n    microseconds = None\n    time_zone_offset = None\n    time_zone_string_index = 8\n    while (time_zone_string_index < time_string_length):\n        if (time_string[time_zone_string_index] in ('+', '-')):\n            break\n        time_zone_string_index += 1\n    if (time_zone_string_index == (time_string_length - 1)):\n        time_zone_string_index += 1\n    if ((time_string_length > 8) and (time_string[8] == '.')):\n        time_fraction_length = (time_zone_string_index - 9)\n        if (time_fraction_length not in (3, 6)):\n            raise ValueError('Invalid time string.')\n        try:\n            time_fraction = time_string[9:time_zone_string_index]\n            time_fraction = int(time_fraction, 10)\n        except ValueError:\n            raise ValueError('Unable to parse time fraction.')\n        if (time_fraction_length == 3):\n            time_fraction *= 1000\n        microseconds = time_fraction\n    if (time_zone_string_index < time_string_length):\n        if (((time_string_length - time_zone_string_index) != 6) or (time_string[(time_zone_string_index + 3)] != ':')):\n            raise ValueError('Invalid time string.')\n        try:\n            hours_from_utc = int(time_string[(time_zone_string_index + 1):(time_zone_string_index + 3)])\n        except ValueError:\n            raise ValueError('Unable to parse time zone hours offset.')\n        if (hours_from_utc not in range(0, 15)):\n            raise ValueError('Time zone hours offset value out of bounds.')\n        try:\n            minutes_from_utc = int(time_string[(time_zone_string_index + 4):(time_zone_string_index + 6)])\n        except ValueError:\n            raise ValueError('Unable to parse time zone minutes offset.')\n        if (minutes_from_utc not in range(0, 60)):\n            raise ValueError('Time zone minutes offset value out of bounds.')\n        time_zone_offset = ((hours_from_utc * 60) + minutes_from_utc)\n        if (time_string[time_zone_string_index] != '-'):\n            time_zone_offset = (- time_zone_offset)\n    return (hours, minutes, seconds, microseconds, time_zone_offset)", "docstring": "Copies a time from a string.\n\nArgs:\ntime_string (str): time value formatted as:\nhh:mm:ss.######[+-]##:##\n\nWhere # are numeric digits ranging from 0 to 9 and the seconds\nfraction can be either 3 or 6 digits. The seconds fraction and\ntime zone offset are optional.\n\nReturns:\ntuple[int, int, int, int, int]: hours, minutes, seconds, microseconds,\ntime zone offset in minutes.\n\nRaises:\nValueError: if the time string is invalid or not supported.", "source": "codesearchnet"}
{"code": "def __init__(self,\n                 object_type=None,\n                 template_attribute=None):\n        \n        super(CreateRequestPayload, self).__init__(\n            tag=enums.Tags.REQUEST_PAYLOAD\n        )\n\n        self._object_type = None\n        self._template_attribute = None\n\n        self.object_type = object_type\n        self.template_attribute = template_attribute", "docstring": "Construct a Create request payload structure.\n\nArgs:\nobject_type (enum): An ObjectType enumeration specifying the type\nof object to create. Optional, defaults to None. Required for\nread/write.\ntemplate_attribute (TemplateAttribute): A TemplateAttribute\nstructure containing a set of attributes to set on the new\nobject. Optional, defaults to None. Required for read/write.", "source": "juraj-google-style"}
{"code": "def new_product(self, name):\n        \n        n = self._product_cls(self, name, summary_cls=self._summary_cls)\n        self.graph.add_node(n)\n        self.products.append(n)\n        return n", "docstring": "Create a new product.\n\nArgs:\nname: name of the new product.\n\nReturns:\nA new product instance.", "source": "juraj-google-style"}
{"code": "def _update_exit_code_from_error(self, error):\n    for (error_type, exit_code) in self.ERROR_CODE_MAP.items():\n        if isinstance(error, error_type):\n            self.update_exit_code(exit_code)\n            break\n    else:\n        self.update_exit_code(ExitStatus.generic_error)", "docstring": "Set the exit code based on the error type.\n\nArgs:\nerror (:class:`Exception`): An exception instance.", "source": "codesearchnet"}
{"code": "def _expand_sequence(self, seq: List[GridQubit]) -> List[GridQubit]:\n    i = 1\n    while (i < len(seq)):\n        path = self._find_path_between(seq[(i - 1)], seq[i], set(seq))\n        if path:\n            seq = ((seq[:i] + path) + seq[i:])\n        else:\n            i += 1\n    return seq", "docstring": "Tries to expand given sequence with more qubits.\n\nArgs:\nseq: Linear sequence of qubits.\n\nReturns:\nNew continuous linear sequence which contains all the qubits from\nseq and possibly new qubits inserted in between.", "source": "codesearchnet"}
{"code": "def l2_distance(t1, t2, epsilon=1e-12, name=None):\n  \n  with tf.name_scope(name, 'l2_distance', [t1, t2]) as scope:\n    t1 = tf.convert_to_tensor(t1, name='t1')\n    t2 = tf.convert_to_tensor(t2, name='t2')\n    return tf.sqrt(tf.maximum(l2_distance_sq(t1, t2, scope), epsilon))", "docstring": "l2 distance between t1 and t2 and caps the gradient of the Square Root.\n\nArgs:\nt1: A tensor.\nt2: A tensor that is the same size as t1.\nepsilon: A lower bound for distance, useful to avoid sqrt of very small\nvalues that can blow up gradients.\nname: Optional name for this op.\nReturns:\nThe l2 distance between t1 and t2.", "source": "juraj-google-style"}
{"code": "def load_array_types(self, fname):\n    \n    type_defs = ''\n    with open(fname, 'rt') as fh:\n      type_defs = fh.read()\n\n    try:\n      type_defs = ast.literal_eval(type_defs)\n    except SyntaxError:\n      type_defs = {}\n\n    self._add_array_types(type_defs)", "docstring": "Load file of previously extracted data types\n\nArgs:\nfname (str): Name of file to load array database from", "source": "juraj-google-style"}
{"code": "def get_parameter_vector(self, include_frozen=False):\n    if include_frozen:\n        return self.parameter_vector\n    return self.parameter_vector[self.unfrozen_mask]", "docstring": "Get an array of the parameter values in the correct order\n\nArgs:\ninclude_frozen (Optional[bool]): Should the frozen parameters be\nincluded in the returned value? (default: ``False``)", "source": "codesearchnet"}
{"code": "def version_info(self):\n    if (self._api_version is None):\n        self.query_api_version()\n    return (self._api_version['api-major-version'], self._api_version['api-minor-version'])", "docstring": "Returns API version information for the HMC.\n\nThis operation does not require authentication.\n\nReturns:\n\n:term:`HMC API version`: The HMC API version supported by the HMC.\n\nRaises:\n\n:exc:`~zhmcclient.HTTPError`\n:exc:`~zhmcclient.ParseError`\n:exc:`~zhmcclient.ConnectionError`", "source": "codesearchnet"}
{"code": "def delete(self, service):\n    url = self._url_format(service)\n    return self.rest_action(self._session.delete, url)", "docstring": "Generic DELETE operation for Learning Modules API.\n\nArgs:\nservice (str): The endpoint service to use, i.e. gradebook\n\nRaises:\nrequests.RequestException: Exception connection error\nValueError: Unable to decode response content\n\nReturns:\nlist: the json-encoded content of the response", "source": "codesearchnet"}
{"code": "def parse_selinux(parts):\n    \n\n    owner, group = parts[:2]\n    selinux = parts[2].split(\":\")\n    lsel = len(selinux)\n    path, link = parse_path(parts[-1])\n    result = {\n        \"owner\": owner,\n        \"group\": group,\n        \"se_user\": selinux[0],\n        \"se_role\": selinux[1] if lsel > 1 else None,\n        \"se_type\": selinux[2] if lsel > 2 else None,\n        \"se_mls\": selinux[3] if lsel > 3 else None,\n        \"name\": path\n    }\n    if link:\n        result[\"link\"] = link\n    return result", "docstring": "Parse part of an ls output line that is selinux.\n\nArgs:\nparts (list): A four element list of strings representing the initial\nparts of an ls line after the permission bits. The parts are owner\ngroup, selinux info, and the path.\n\nReturns:\nA dict containing owner, group, se_user, se_role, se_type, se_mls, and\nname. If the raw name was a symbolic link, link is always included.", "source": "juraj-google-style"}
{"code": "def _analemma_suns(self):\n    for h in xrange(0, 24):\n        if (self._analemma_position(h) < 0):\n            continue\n        elif (self._analemma_position(h) == 0):\n            chours = []\n            prevhour = (self.latitude <= 0)\n            num_of_days = (8760 if (not self.is_leap_year) else (8760 + 24))\n            for hoy in xrange(h, num_of_days, 24):\n                thishour = self.calculate_sun_from_hoy(hoy).is_during_day\n                if (thishour != prevhour):\n                    if (not thishour):\n                        hoy -= 24\n                    dt = DateTime.from_hoy(hoy, self.is_leap_year)\n                    chours.append((dt.month, dt.day, dt.hour))\n                prevhour = thishour\n            tt = []\n            for hcount in range(int((len(chours) / 2))):\n                st = chours[(2 * hcount)]\n                en = chours[((2 * hcount) + 1)]\n                if (self.latitude >= 0):\n                    tt = (((([self.calculate_sun(*st)] + [self.calculate_sun(st[0], d, h) for d in xrange((st[1] + 1), 29, 7)]) + [self.calculate_sun(m, d, h) for m in xrange((st[0] + 1), en[0]) for d in xrange(3, 29, 7)]) + [self.calculate_sun(en[0], d, h) for d in xrange(3, en[1], 7)]) + [self.calculate_sun(*en)])\n                else:\n                    tt = ((((([self.calculate_sun(*en)] + [self.calculate_sun(en[0], d, h) for d in xrange((en[1] + 1), 29, 7)]) + [self.calculate_sun(m, d, h) for m in xrange((en[0] + 1), 13) for d in xrange(3, 29, 7)]) + [self.calculate_sun(m, d, h) for m in xrange(1, st[0]) for d in xrange(3, 29, 7)]) + [self.calculate_sun(st[0], d, h) for d in xrange(3, st[1], 7)]) + [self.calculate_sun(*st)])\n                (yield tt)\n        else:\n            (yield tuple((self.calculate_sun(((m % 12) + 1), d, h) for m in xrange(0, 13) for d in (7, 14, 21)))[:(- 2)])", "docstring": "Calculate times that should be used for drawing analemma_curves.\n\nReturns:\nA list of list of analemma suns.", "source": "codesearchnet"}
{"code": "def median(series):\n    if np.issubdtype(series.dtype, np.number):\n        return series.median()\n    else:\n        return np.nan", "docstring": "Returns the median value of a series.\n\nArgs:\nseries (pandas.Series): column to summarize.", "source": "codesearchnet"}
{"code": "def set_white(self, brightness, colourtemp):\n    if (not (25 <= brightness <= 255)):\n        raise ValueError('The brightness needs to be between 25 and 255.')\n    if (not (0 <= colourtemp <= 255)):\n        raise ValueError('The colour temperature needs to be between 0 and 255.')\n    payload = self.generate_payload(SET, {self.DPS_INDEX_MODE: self.DPS_MODE_WHITE, self.DPS_INDEX_BRIGHTNESS: brightness, self.DPS_INDEX_COLOURTEMP: colourtemp})\n    data = self._send_receive(payload)\n    return data", "docstring": "Set white coloured theme of an rgb bulb.\n\nArgs:\nbrightness(int): Value for the brightness (25-255).\ncolourtemp(int): Value for the colour temperature (0-255).", "source": "codesearchnet"}
{"code": "def eq(left: Any, right: Any) -> bool:\n    if left is right:\n        return True\n    if isinstance(left, list) and isinstance(right, list) or (isinstance(left, tuple) and isinstance(right, tuple)):\n        if len(left) != len(right):\n            return False\n        for x, y in zip(left, right):\n            if ne(x, y):\n                return False\n        return True\n    elif isinstance(left, dict):\n        if not isinstance(right, dict) or len(left) != len(right) or set(left.keys()) != set(right.keys()):\n            return False\n        left_items = left.sym_items if isinstance(left, Symbolic) else left.items\n        right_item = right.sym_getattr if isinstance(right, Symbolic) else right.__getitem__\n        for k, v in left_items():\n            if ne(v, right_item(k)):\n                return False\n        return True\n    elif hasattr(left, 'sym_eq') and (not inspect.isclass(left)) and (left.sym_eq.__code__ is not Symbolic.sym_eq.__code__):\n        return left.sym_eq(right)\n    elif hasattr(right, 'sym_eq') and (not inspect.isclass(right)) and (right.sym_eq.__code__ is not Symbolic.sym_eq.__code__):\n        return right.sym_eq(left)\n    return pg_typing.callable_eq(left, right)", "docstring": "Compares if two values are equal. Use symbolic equality if possible.\n\nExample::\n\n@pg.members([\n('x', pg.typing.Any())\n])\nclass A(pg.Object):\ndef sym_eq(self, right):\nif super().sym_eq(right):\nreturn True\nreturn pg.eq(self.x, right)\n\nclass B:\npass\n\nassert pg.eq(1, 1)\nassert pg.eq(A(1), A(1))\n# This is True since A has override `sym_eq`.\nassert pg.eq(A(1), 1)\n# Objects of B are compared by references.\nassert not pg.eq(A(B()), A(B()))\n\nArgs:\nleft: The left-hand value to compare.\nright: The right-hand value to compare.\n\nReturns:\nTrue if left and right is equal or symbolically equal. Otherwise False.", "source": "github-repos"}
{"code": "def _convert_to_eval_metric(metric_fn):\n  \n  def problem_metric_fn(*args):\n    \n    (scores, weights) = metric_fn(*args)\n\n    \n    return tf.metrics.mean(scores, weights)\n  return problem_metric_fn", "docstring": "Wrap a metric fn that returns scores and weights as an eval metric fn.\n\nThe input metric_fn returns values for the current batch. The wrapper\naggregates the return values collected over all of the batches evaluated.\n\nArgs:\nmetric_fn: function that returns scores and weights for the current batch's\nlogits and predicted labels.\n\nReturns:\nfunction that aggregates the scores and weights from metric_fn.", "source": "juraj-google-style"}
{"code": "def diversity(layer):\n\n    def inner(T):\n        layer_t = T(layer)\n        (batch_n, _, _, channels) = layer_t.get_shape().as_list()\n        flattened = tf.reshape(layer_t, [batch_n, (- 1), channels])\n        grams = tf.matmul(flattened, flattened, transpose_a=True)\n        grams = tf.nn.l2_normalize(grams, axis=[1, 2], epsilon=1e-10)\n        return (sum([sum([tf.reduce_sum((grams[i] * grams[j])) for j in range(batch_n) if (j != i)]) for i in range(batch_n)]) / batch_n)\n    return inner", "docstring": "Encourage diversity between each batch element.\n\nA neural net feature often responds to multiple things, but naive feature\nvisualization often only shows us one. If you optimize a batch of images,\nthis objective will encourage them all to be different.\n\nIn particular, it caculuates the correlation matrix of activations at layer\nfor each image, and then penalizes cossine similarity between them. This is\nvery similar to ideas in style transfer, except we're *penalizing* style\nsimilarity instead of encouraging it.\n\nArgs:\nlayer: layer to evaluate activation correlations on.\n\nReturns:\nObjective.", "source": "codesearchnet"}
{"code": "def apply_transformation(self, structure):\n    if structure.is_ordered:\n        return structure\n    species = [dict(sp) for sp in structure.species_and_occu]\n    for sp in species:\n        for (k, v) in sp.items():\n            old_occ = sp[k]\n            new_occ = float(Fraction(old_occ).limit_denominator(self.max_denominator))\n            if self.fix_denominator:\n                new_occ = (around((old_occ * self.max_denominator)) / self.max_denominator)\n            if (round(abs((old_occ - new_occ)), 6) > self.tol):\n                raise RuntimeError('Cannot discretize structure within tolerance!')\n            sp[k] = new_occ\n    return Structure(structure.lattice, species, structure.frac_coords)", "docstring": "Discretizes the site occupancies in the structure.\n\nArgs:\nstructure: disordered Structure to discretize occupancies\n\nReturns:\nA new disordered Structure with occupancies discretized", "source": "codesearchnet"}
{"code": "def get_device_topology(self, id_or_uri):\n    uri = (self._client.build_uri(id_or_uri) + '/deviceTopology')\n    return self._client.get(uri)", "docstring": "Retrieves the topology information for the rack resource specified by ID or URI.\n\nArgs:\nid_or_uri: Can be either the resource ID or the resource URI.\n\nReturn:\ndict: Device topology.", "source": "codesearchnet"}
{"code": "def __init__(self, serial=None, **kwargs):\n        \n        self.__display = None\n        serial = serial or getenvs('ATX_ADB_SERIALNO', 'ANDROID_SERIAL')\n        self._host = kwargs.get('host') or getenvs(\n            'ATX_ADB_HOST', 'ANDROID_ADB_SERVER_HOST') or '127.0.0.1'\n        self._port = int(kwargs.get('port') or getenvs(\n            'ATX_ADB_PORT', 'ANDROID_ADB_SERVER_PORT') or 5037)\n\n        self._adb_client = adbkit.Client(self._host, self._port)\n        self._adb_device = self._adb_client.device(serial)\n        \n\n        \n        self._uiauto = uiautomator2.connect_usb(serial)\n        if not self._uiauto.alive:\n            self._uiauto.healthcheck(unlock=False)\n\n        DeviceMixin.__init__(self)\n        self._randid = base.id_generator(5)\n\n        self.screen_rotation = None\n\n        \n        self.swipe = self._uiauto.swipe\n        self.drag = self._uiauto.drag\n        self.press = self._uiauto.press\n        self.long_click = self._uiauto.long_click\n        self.dump = self._uiauto.dump_hierarchy", "docstring": "Initial AndroidDevice\nArgs:\nserial (str): serial or wlan ip\n\nReturns:\nAndroidDevice object\n\nRaises:\nEnvironmentError", "source": "juraj-google-style"}
{"code": "def __call__(self, dumper: 'Dumper', data: Any) -> yaml.MappingNode:\n        \n        \n        logger.info('Representing {} of class {}'.format(\n            data, self.class_.__name__))\n        if hasattr(data, 'yatiml_attributes'):\n            logger.debug('Found yatiml_attributes()')\n            attributes = data.yatiml_attributes()\n            if attributes is None:\n                raise RuntimeError(('{}.yatiml_attributes() returned None,'\n                                    ' where a dict was expected.').format(\n                                        self.class_.__name__))\n        else:\n            logger.debug(\n                'No yatiml_attributes() found, using public attributes')\n            argspec = inspect.getfullargspec(data.__init__)\n            attribute_names = list(argspec.args[1:])\n            attrs = [(name, getattr(data, name)) for name in attribute_names\n                     if name != 'yatiml_extra']\n            if 'yatiml_extra' in attribute_names:\n                if not hasattr(data, 'yatiml_extra'):\n                    raise RuntimeError(\n                        ('Class {} takes yatiml_extra but has '\n                         ' no yatiml_extra attribute, and no '\n                         ' yatiml_attributes().').format(self.class_.__name__))\n                attrs.extend(data.yatiml_extra.items())\n            attributes = yaml.comments.CommentedMap(attrs)\n\n        \n        represented = dumper.represent_mapping('tag:yaml.org,2002:map',\n                                               attributes)\n\n        \n        cnode = Node(represented)\n        self.__sweeten(dumper, self.class_, cnode)\n        represented = cnode.yaml_node\n\n        logger.debug('End representing {}'.format(data))\n        return represented", "docstring": "Represents the class as a MappingNode.\n\nArgs:\ndumper: The dumper to use.\ndata: The user-defined object to dump.\n\nReturns:\nA yaml.Node representing the object.", "source": "juraj-google-style"}
{"code": "def assertShapeEqual(self, input_a, input_b, msg=None):\n    if not isinstance(input_a, (np.ndarray, np.generic, tensor_lib.Tensor)):\n        raise TypeError(f'input_a must be a Numpy ndarray, Numpy scalar, or a Tensor.Instead received {type(input_a)}')\n    if not isinstance(input_b, (np.ndarray, np.generic, tensor_lib.Tensor)):\n        raise TypeError(f'input_b must be a Numpy ndarray, Numpy scalar, or a Tensor.Instead received {type(input_b)}')\n    shape_a = input_a.get_shape().as_list() if isinstance(input_a, tensor_lib.Tensor) else input_a.shape\n    shape_b = input_b.get_shape().as_list() if isinstance(input_b, tensor_lib.Tensor) else input_b.shape\n    self.assertAllEqual(shape_a, shape_b, msg=msg)", "docstring": "Asserts that two Numpy or TensorFlow objects have the same shape.\n\nFor Tensors, this compares statically known shapes at compile time, not\ndynamic shapes at runtime.\n\nArgs:\ninput_a: A Numpy ndarray, Numpy scalar, or a Tensor.\ninput_b: A Numpy ndarray, Numpy scalar, or a Tensor.\nmsg: Optional message to report on failure.\n\nRaises:\nTypeError: If the arguments have the wrong type.", "source": "github-repos"}
{"code": "def recommend(self, limit=10):\n    expected_list = [(arm_id, beta_dist.expected_value()) for (arm_id, beta_dist) in self.__beta_dist_dict.items()]\n    expected_list = sorted(expected_list, key=(lambda x: x[1]), reverse=True)\n    return expected_list[:limit]", "docstring": "Listup arms and expected value.\n\nArgs:\nlimit:      Length of the list.\n\nReturns:\n[Tuple(`Arms master id`, `expected value`)]", "source": "codesearchnet"}
{"code": "def setup(self, *args, **kwargs):\n    pass", "docstring": "Called to prepare an instance for combining.\n\nThis method can be useful if there is some state that needs to be loaded\nbefore executing any of the other methods. The resources can then be\ndisposed of in ``CombineFn.teardown``.\n\nIf you are using Dataflow, you need to enable Dataflow Runner V2\nbefore using this feature.\n\nArgs:\n*args: Additional arguments and side inputs.\n**kwargs: Additional arguments and side inputs.", "source": "github-repos"}
{"code": "def put(self, item):\n    with self._not_full:\n        if self._closed:\n            raise QueueClosedError()\n        if self._maxsize > 0:\n            while len(self._queue) == self._maxsize:\n                self._not_full.wait()\n                if self._closed:\n                    raise QueueClosedError()\n        self._queue.append(item)\n        self._not_empty.notify()", "docstring": "Put an item into the queue.\n\nIf the queue is closed, fails immediately.\n\nIf the queue is full, blocks until space is available or until the queue\nis closed by a call to close(), at which point this call fails.\n\nArgs:\nitem: an item to add to the queue\n\nRaises:\nQueueClosedError: if insertion failed because the queue is closed", "source": "github-repos"}
{"code": "def get_path_str(self, sep=os.path.sep, type_str=None):\n    return sep.join(list(reversed([v.label_str for v in self.parent_gen if (type_str in (None, v.type_str))])))", "docstring": "Get path from root to this node.\n\nArgs:\nsep: str\nOne or more characters to insert between each element in the path.\nDefaults to \"/\" on Unix and \"\\\" on Windows.\n\ntype_str:\nSUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include\ninformation from nodes of that type.\n\nReturns:\nstr: String describing the path from the root to this node.", "source": "codesearchnet"}
{"code": "def _format_param_val(self, param_val):\n        \n        if isinstance(param_val, list):\n            return ' '.join(str(x) for x in param_val)\n        else:\n            return str(param_val)", "docstring": "Internal method to format values in the packmol parameter dictionaries\n\nArgs:\nparam_val:\nSome object to turn into String\n\nReturns:\nstring representation of the object", "source": "juraj-google-style"}
{"code": "def iterable_source(iterable, target):\n    it = iter(iterable)\n    for item in it:\n        try:\n            target.send(item)\n        except StopIteration:\n            return prepend(item, it)\n    return empty_iter()", "docstring": "Convert an iterable into a stream of events.\n\nArgs:\niterable: A series of items which will be sent to the target one by one.\ntarget: The target coroutine or sink.\n\nReturns:\nAn iterator over any remaining items.", "source": "codesearchnet"}
{"code": "def choose_1_from_each(lists):\n    if (len(lists) == 0):\n        (yield [])\n    else:\n        for el in lists[0]:\n            for next_list in choose_1_from_each(lists[1:]):\n                (yield ([el] + next_list))", "docstring": "Takes a list of lists and returns a list of lists with one item\nfrom each list.  This new list should be the length of each list multiplied\nby the others.  18 for an list with lists of 3, 2 and 3.  Also the lenght\nof each sub list should be same as the length of lists passed in.\n\nArgs:\nlists(list of Lists):  A list of lists\n\nReturns:\nlist of lists: returns a list of lists constructions of one item from each\nlist in lists.", "source": "codesearchnet"}
{"code": "def garbage_collect_exports(export_dir_base, exports_to_keep):\n  \n  if exports_to_keep is None:\n    return\n  version_paths = []  \n  for filename in tf_v1.gfile.ListDirectory(export_dir_base):\n    path = os.path.join(\n        tf.compat.as_bytes(export_dir_base),\n        tf.compat.as_bytes(filename))\n    if len(filename) == 10 and filename.isdigit():\n      version_paths.append((int(filename), path))\n\n  oldest_version_path = sorted(version_paths)[:-exports_to_keep]\n  for _, path in oldest_version_path:\n    try:\n      tf_v1.gfile.DeleteRecursively(path)\n    except tf.errors.NotFoundError as e:\n      logging.warn(\"Can not delete %s recursively: %s\", path, e)", "docstring": "Deletes older exports, retaining only a given number of the most recent.\n\nExport subdirectories are assumed to be named with monotonically increasing\nintegers; the most recent are taken to be those with the largest values.\n\nArgs:\nexport_dir_base: the base directory under which each export is in a\nversioned subdirectory.\nexports_to_keep: Number of exports to keep. Older exports will be garbage\ncollected. Set to None to disable.", "source": "juraj-google-style"}
{"code": "def load(filename):\n    \n    if not os.path.exists(filename):\n        LOG.error(\"load object - File '%s' does not exist.\", filename)\n        return None\n\n    obj = None\n    with open(filename, 'rb') as obj_file:\n        obj = dill.load(obj_file)\n    return obj", "docstring": "Load a pickled obj from the filesystem.\n\nYou better know what you expect from the given pickle, because we don't check it.\n\nArgs:\nfilename (str): The filename we load the object from.\n\nReturns:\nThe object we were able to unpickle, else None.", "source": "juraj-google-style"}
{"code": "def forward(self, input_ids: torch.Tensor, cache_position: torch.Tensor) -> torch.Tensor:\n    batch_size = input_ids.shape[0]\n    position_ids = cache_position.unsqueeze(0).expand(batch_size, -1)\n    outputs = self.model(input_ids=input_ids, attention_mask=None, position_ids=position_ids, past_key_values=self.cache, use_cache=True, cache_position=cache_position)\n    return outputs.logits", "docstring": "Forward pass of the module, which is compatible with the ExecuTorch llm runner.\n\nArgs:\ninput_ids (`torch.Tensor`): Tensor representing current input token id to the module.\ncache_position (`torch.Tensor`): Tensor representing current input position in the cache.\n\nReturns:\ntorch.Tensor: Logits output from the model.", "source": "github-repos"}
{"code": "def lookup_replicas(self, task_id: int, logical_core: int) -> List[int]:\n    try:\n        return self._task_and_cores_to_replicas[task_id][logical_core]\n    except KeyError:\n        raise ValueError('Can not find any replica in task: {} contains logical_core: {} '.format(task_id, logical_core))", "docstring": "Lookup replica ids by task number and logical core.\n\nArgs:\ntask_id: TensorFlow task number.\nlogical_core: An integer, identifying a logical core.\nReturns:\nA sorted list of the replicas that are attached to that task and\nlogical_core.\nRaises:\nValueError: If no replica exists in the task which contains the logical\ncore.", "source": "github-repos"}
{"code": "def add_listener(self, callback, event_type=None):\n    listener_uid = uuid4()\n    self.listeners.append({'uid': listener_uid, 'callback': callback, 'event_type': event_type})\n    return listener_uid", "docstring": "Add a listener that will send a callback when the client recieves\nan event.\n\nArgs:\ncallback (func(roomchunk)): Callback called when an event arrives.\nevent_type (str): The event_type to filter for.\n\nReturns:\nuuid.UUID: Unique id of the listener, can be used to identify the listener.", "source": "codesearchnet"}
{"code": "def get_requirements(requirements_file=\"requirements.txt\"):\n    \n    with open(requirements_file) as fd:\n        lines = fd.readlines()\n    dependencies = []\n    for line in lines:\n        maybe_dep = line.strip()\n        if maybe_dep.startswith(\"\n            \n            continue\n        if maybe_dep.startswith(\"git+\"):\n            \n            \n            __, __, maybe_dep = maybe_dep.rpartition(\"\n        else:\n            \n            maybe_dep, __, __ = maybe_dep.partition(\"\n        \n        maybe_dep = maybe_dep.strip()\n        if maybe_dep:\n            dependencies.append(maybe_dep)\n    return dependencies", "docstring": "Get the contents of a file listing the requirements.\n\nArgs:\nrequirements_file (str): The path to the requirements file, relative\nto this file.\n\nReturns:\nlist: the list of requirements, or an empty list if\n``requirements_file`` could not be opened or read.", "source": "juraj-google-style"}
{"code": "def length_of_overlap(first_start, first_end, second_start, second_end):\n    \n    if first_end <= second_start or first_start >= second_end:\n        return 0.0\n\n    if first_start < second_start:\n        if first_end < second_end:\n            return abs(first_end - second_start)\n        else:\n            return abs(second_end - second_start)\n\n    if first_start > second_start:\n        if first_end > second_end:\n            return abs(second_end - first_start)\n        else:\n            return abs(first_end - first_start)", "docstring": "Find the length of the overlapping part of two segments.\n\nArgs:\nfirst_start (float): Start of the first segment.\nfirst_end (float): End of the first segment.\nsecond_start (float): Start of the second segment.\nsecond_end (float): End of the second segment.\n\nReturn:\nfloat: The amount of overlap or 0 if they don't overlap at all.", "source": "juraj-google-style"}
{"code": "def fetch_tokens(self, client_id=None, client_secret=None, code=None,\n                     redirect_uri=None, **kwargs):\n        \n        client_id = client_id or self.client_id\n        client_secret = client_secret or self.client_secret\n        redirect_uri = redirect_uri or self.redirect_uri\n        data = {\n           'grant_type': 'authorization_code',\n           'client_id': client_id,\n           'client_secret': client_secret,\n           'code': code,\n           'redirect_uri': redirect_uri\n        }\n        r = self._httpclient.request(\n            method='POST',\n            url=self.token_url,\n            json=data,\n            path='/api/oauth2/RequestToken',\n            auth=None,\n            **kwargs\n        )\n        if not r.ok:\n            raise PanCloudError(\n                '%s %s: %s' % (r.status_code, r.reason, r.text)\n            )\n        try:\n            r_json = r.json()\n        except ValueError as e:\n            raise PanCloudError(\"Invalid JSON: %s\" % e)\n        else:\n            if r.json().get(\n                'error_description'\n            ) or r.json().get(\n                'error'\n            ):\n                raise PanCloudError(r.text)\n            self.access_token = r_json.get('access_token')\n            self.jwt_exp = self._decode_exp(self.access_token_)\n            self.refresh_token = r_json.get('refresh_token')\n            self.write_credentials()\n            return r_json", "docstring": "Exchange authorization code for token.\n\nArgs:\nclient_id (str): OAuth2 client ID. Defaults to ``None``.\nclient_secret (str): OAuth2 client secret. Defaults to ``None``.\ncode (str): Authorization code. Defaults to ``None``.\nredirect_uri (str): Redirect URI. Defaults to ``None``.\n\nReturns:\ndict: Response from token URL.", "source": "juraj-google-style"}
{"code": "def subscribe(object_type: str, subscriber: str, callback_handler: Callable=None) -> EventQueue:\n    key = _keys.subscribers(object_type)\n    DB.remove_from_list(key, subscriber)\n    DB.append_to_list(key, subscriber)\n    return EventQueue(object_type, subscriber, callback_handler)", "docstring": "Subscribe to the specified object type.\n\nReturns an EventQueue object which can be used to query events\nassociated with the object type for this subscriber.\n\nArgs:\nobject_type (str): Object type\nsubscriber (str): Subscriber name\ncallback_handler (function, optional): Callback handler function.\n\nReturns:\nEventQueue, event queue object.", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    win_registry_reader = FileObjectWinRegistryFileReader()\n\n    try:\n      registry_file = win_registry_reader.Open(file_object)\n    except IOError as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to open Windows Registry file with error: {0!s}'.format(\n              exception))\n      return\n\n    win_registry = dfwinreg_registry.WinRegistry()\n\n    key_path_prefix = win_registry.GetRegistryFileMapping(registry_file)\n    registry_file.SetKeyPathPrefix(key_path_prefix)\n    root_key = registry_file.GetRootKey()\n    if not root_key:\n      return\n\n    registry_find_specs = getattr(\n        parser_mediator.artifacts_filter_helper, 'registry_find_specs', None)\n\n    if not registry_find_specs:\n      try:\n        self._ParseRecurseKeys(parser_mediator, root_key)\n      except IOError as exception:\n        parser_mediator.ProduceExtractionWarning('{0!s}'.format(exception))\n\n    else:\n      artifacts_filter_helper = artifact_filters.ArtifactDefinitionsFilterHelper\n      if not artifacts_filter_helper.CheckKeyCompatibility(key_path_prefix):\n        logger.warning((\n            'Artifacts filters are not supported for Windows Registry file '\n            'with key path prefix: \"{0:s}\".').format(key_path_prefix))\n\n      else:\n        try:\n          win_registry.MapFile(key_path_prefix, registry_file)\n          self._ParseKeysFromFindSpecs(\n              parser_mediator, win_registry, registry_find_specs)\n        except IOError as exception:\n          parser_mediator.ProduceExtractionWarning('{0!s}'.format(exception))", "docstring": "Parses a Windows Registry file-like object.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nfile_object (dfvfs.FileIO): a file-like object.", "source": "juraj-google-style"}
{"code": "def get_conflicting_tools(self, request_only=False):\n    from collections import defaultdict\n    tool_sets = defaultdict(set)\n    tools_dict = self.get_tools(request_only=request_only)\n    for (variant, tools) in tools_dict.itervalues():\n        for tool in tools:\n            tool_sets[tool].add(variant)\n    conflicts = dict(((k, v) for (k, v) in tool_sets.iteritems() if (len(v) > 1)))\n    return conflicts", "docstring": "Returns tools of the same name provided by more than one package.\n\nArgs:\nrequest_only: If True, only return the key from resolved packages\nthat were also present in the request.\n\nReturns:\nDict of {tool-name: set([Variant])}.", "source": "codesearchnet"}
{"code": "def scalar_projection(v1, v2):\n    \n    return np.dot(v1, v2) / np.linalg.norm(v2)", "docstring": "compute the scalar projection of v1 upon v2\n\nArgs:\nv1, v2: iterable\nindices 0, 1, 2 corresponding to cartesian coordinates\n\nReturns:\n3-vector of the projection of point p onto the direction of v", "source": "juraj-google-style"}
{"code": "def setup_engines(client=None):\n    if (not client):\n        try:\n            client = ipyparallel.Client()\n        except:\n            raise DistobClusterError(u\"Could not connect to an ipyparallel cluster. Make\\n                 sure a cluster is started (e.g. to use the CPUs of a\\n                 single computer, can type 'ipcluster start')\")\n    eids = client.ids\n    if (not eids):\n        raise DistobClusterError(u'No ipyparallel compute engines are available')\n    nengines = len(eids)\n    dv = client[eids]\n    dv.use_dill()\n    with dv.sync_imports(quiet=True):\n        import distob\n    ars = []\n    for i in eids:\n        dv.targets = i\n        ars.append(dv.apply_async(_remote_setup_engine, i, nengines))\n    dv.wait(ars)\n    for ar in ars:\n        if (not ar.successful()):\n            raise ar.r\n    if (distob.engine is None):\n        distob.engine = ObjectHub((- 1), client)", "docstring": "Prepare all iPython engines for distributed object processing.\n\nArgs:\nclient (ipyparallel.Client, optional): If None, will create a client\nusing the default ipyparallel profile.", "source": "codesearchnet"}
{"code": "def remove_behaviour(self, behaviour):\n        \n        if not self.has_behaviour(behaviour):\n            raise ValueError(\"This behaviour is not registered\")\n        index = self.behaviours.index(behaviour)\n        self.behaviours[index].kill()\n        self.behaviours.pop(index)", "docstring": "Removes a behaviour from the agent.\nThe behaviour is first killed.\n\nArgs:\nbehaviour (spade.behaviour.CyclicBehaviour): the behaviour instance to be removed", "source": "juraj-google-style"}
{"code": "def gallery_section(images, title):\n    \n    \n    imgs = []\n    while True:\n        img = yield marv.pull(images)\n        if img is None:\n            break\n        imgs.append({'src': img.relpath})\n    if not imgs:\n        return\n\n    \n    widget = {'title': images.title, 'gallery': {'images': imgs}}\n    section = {'title': title, 'widgets': [widget]}\n    yield marv.push(section)", "docstring": "Create detail section with gallery.\n\nArgs:\ntitle (str): Title to be displayed for detail section.\nimages: stream of marv image files\n\nReturns\nOne detail section.", "source": "juraj-google-style"}
{"code": "class JetMoeMoA(nn.Module):\n\n    def __init__(self, config: JetMoeConfig):\n        super(JetMoeMoA, self).__init__()\n        self.num_experts = config.num_local_experts\n        self.input_size = config.hidden_size\n        self.hidden_size = config.kv_channels * config.num_key_value_heads\n        self.top_k = config.num_experts_per_tok\n        self.bias = torch.nn.Parameter(torch.empty(self.input_size))\n        self.input_linear = JetMoeParallelExperts(self.num_experts, self.input_size, self.hidden_size)\n        self.output_linear = JetMoeParallelExperts(self.num_experts, self.hidden_size, self.input_size)\n        self.router = JetMoeTopKGating(input_size=self.input_size, num_experts=self.num_experts, top_k=self.top_k)\n\n    def map(self, layer_input):\n        \n        bsz, length, emb_size = layer_input.size()\n        layer_input = layer_input.reshape(-1, emb_size)\n        index_sorted_experts, batch_index, batch_gates, expert_size, router_logits = self.router(layer_input)\n        topo_info = (index_sorted_experts, batch_index, batch_gates, expert_size)\n        expert_inputs = layer_input[batch_index]\n        expert_outputs = self.input_linear(expert_inputs, expert_size)\n        zeros = torch.zeros((bsz * length * self.top_k, self.hidden_size), dtype=expert_outputs.dtype, device=expert_outputs.device)\n        layer_output = zeros.index_add(0, index_sorted_experts, expert_outputs)\n        layer_output = layer_output.view(bsz, length, self.top_k, -1)\n        return (layer_output, router_logits, topo_info)\n\n    def reduce(self, layer_input, topo_info):\n        \n        bsz, length, k, hidden_size = layer_input.size()\n        layer_input = layer_input.reshape(-1, hidden_size)\n        index_sorted_experts, batch_index, batch_gates, expert_size = topo_info\n        expert_inputs = layer_input[index_sorted_experts]\n        expert_outputs = self.output_linear(expert_inputs, expert_size)\n        expert_outputs = expert_outputs * batch_gates[:, None]\n        zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device)\n        layer_output = zeros.index_add(0, batch_index, expert_outputs)\n        layer_output = layer_output.view(bsz, length, self.input_size)\n        layer_output = layer_output + self.bias\n        return layer_output\n\n    def forward(self, layer_input):\n        raise NotImplementedError(\"This module doesn't support call and forward.\")", "docstring": "A Sparsely gated mixture of attention layer with pairs of query- and output-projections as experts.\n\nArgs:\nconfig:\nConfiguration object with model hyperparameters.", "source": "github-repos"}
{"code": "def dot(inputs, axes, normalize=False, **kwargs):\n    return Dot(axes=axes, normalize=normalize, **kwargs)(inputs)", "docstring": "Functional interface to the `Dot` layer.\n\nArgs:\ninputs: A list of input tensors (at least 2).\naxes: Integer or tuple of integers,\naxis or axes along which to take the dot product.\nnormalize: Whether to L2-normalize samples along the\ndot product axis before taking the dot product.\nIf set to True, then the output of the dot product\nis the cosine proximity between the two samples.\n**kwargs: Standard layer keyword arguments.\n\nReturns:\nA tensor, the dot product of the samples from the inputs.", "source": "github-repos"}
{"code": "def print_probabilities(state: State, ndigits: int = 4,\n                        file: TextIO = None) -> None:\n    \n    prob = bk.evaluate(state.probabilities())\n    for index, prob in np.ndenumerate(prob):\n        prob = round(prob, ndigits)\n        if prob == 0.0:\n            continue\n        ket = \"\".join([str(n) for n in index])\n        print(ket, \":\", prob, file=file)", "docstring": "Pretty print state probabilities.\n\nArgs:\nstate:\nndigits: Number of digits of accuracy\nfile: Output stream (Defaults to stdout)", "source": "juraj-google-style"}
{"code": "def get_summary_dict(self, include_msd_t=False, include_mscd_t=False):\n    d = {'D': self.diffusivity, 'D_sigma': self.diffusivity_std_dev, 'D_charge': self.chg_diffusivity, 'D_charge_sigma': self.chg_diffusivity_std_dev, 'S': self.conductivity, 'S_sigma': self.conductivity_std_dev, 'S_charge': self.chg_conductivity, 'D_components': self.diffusivity_components.tolist(), 'S_components': self.conductivity_components.tolist(), 'D_components_sigma': self.diffusivity_components_std_dev.tolist(), 'S_components_sigma': self.conductivity_components_std_dev.tolist(), 'specie': str(self.specie), 'step_skip': self.step_skip, 'time_step': self.time_step, 'temperature': self.temperature, 'max_framework_displacement': self.max_framework_displacement, 'Haven_ratio': self.haven_ratio}\n    if include_msd_t:\n        d['msd'] = self.msd.tolist()\n        d['msd_components'] = self.msd_components.tolist()\n        d['dt'] = self.dt.tolist()\n    if include_mscd_t:\n        d['mscd'] = self.mscd.tolist()\n    return d", "docstring": "Provides a summary of diffusion information.\n\nArgs:\ninclude_msd_t (bool): Whether to include mean square displace and\ntime data with the data.\ninclude_msd_t (bool): Whether to include mean square charge displace and\ntime data with the data.\n\nReturns:\n(dict) of diffusion and conductivity data.", "source": "codesearchnet"}
{"code": "def get_iso3_country_code(cls, country, use_live=True, exception=None):\n        \n        \n        countriesdata = cls.countriesdata(use_live=use_live)\n        countryupper = country.upper()\n        len_countryupper = len(countryupper)\n        if len_countryupper == 3:\n            if countryupper in countriesdata['countries']:\n                return countryupper\n        elif len_countryupper == 2:\n            iso3 = countriesdata['iso2iso3'].get(countryupper)\n            if iso3 is not None:\n                return iso3\n\n        iso3 = countriesdata['countrynames2iso3'].get(countryupper)\n        if iso3 is not None:\n            return iso3\n\n        for candidate in cls.expand_countryname_abbrevs(countryupper):\n            iso3 = countriesdata['countrynames2iso3'].get(candidate)\n            if iso3 is not None:\n                return iso3\n\n        if exception is not None:\n            raise exception\n        return None", "docstring": "Get ISO3 code for cls. Only exact matches or None are returned.\n\nArgs:\ncountry (str): Country for which to get ISO3 code\nuse_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\nexception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\nReturns:\nOptional[str]: ISO3 country code or None", "source": "juraj-google-style"}
{"code": "def optimize(\n        self,\n        re_encoder_grads_list,\n        decoder_grads_list,\n        encoder_grads_list,\n        learning_rate,\n        epoch\n    ):\n        \n        self.__retrospective_encoder.optimize(re_encoder_grads_list, learning_rate, epoch)\n        self.__encoder_decoder_controller.optimize(\n            decoder_grads_list,\n            encoder_grads_list,\n            learning_rate,\n            epoch\n        )", "docstring": "Back propagation.\n\nArgs:\nre_encoder_grads_list:  re-encoder's `list` of graduations.\ndecoder_grads_list:     decoder's `list` of graduations.\nencoder_grads_list:     encoder's `list` of graduations.\nlearning_rate:          Learning rate.\nepoch:                  Now epoch.", "source": "juraj-google-style"}
{"code": "def vrp_solver(path_graph, initial_solution=None, runtime_seconds=60):\n    \n    \n    \n    routing = pywrapcp.RoutingModel(path_graph.num_nodes(),\n                                    1, path_graph.ORIGIN)\n\n    \n    \n    for disjunction in path_graph.iter_disjunctions():\n        routing.AddDisjunction(disjunction)\n\n    \n    \n    \n    COST_MULTIPLIER = 1e4\n\n    def distance(i, j):\n        return int(path_graph.cost(i, j) * COST_MULTIPLIER)\n    routing.SetArcCostEvaluatorOfAllVehicles(distance)\n\n    start_time = time()\n\n    def found_solution():\n        t = time() - start_time\n        cost = routing.CostVar().Max() / COST_MULTIPLIER\n        print('\\rBest solution at {} seconds has cost {}        '.format(\n            int(t), cost), end='')\n    routing.AddAtSolutionCallback(found_solution)\n\n    \n    \n    if not initial_solution:\n        initial_solution = [i for i, _ in path_graph.iter_disjunctions()]\n\n    \n    \n    initial_assignment = routing.ReadAssignmentFromRoutes([initial_solution],\n                                                          True)\n    \n    \n\n    \n    search_parameters = pywrapcp.RoutingModel.DefaultSearchParameters()\n    search_parameters.time_limit_ms = runtime_seconds * 1000\n    search_parameters.local_search_metaheuristic = (\n        routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n\n    \n    assignment = routing.SolveFromAssignmentWithParameters(initial_assignment,\n                                                           search_parameters)\n    print()\n    \n\n    \n    solution = []\n    index = routing.Start(0)\n    while not routing.IsEnd(index):\n        index = assignment.Value(routing.NextVar(index))\n        node = routing.IndexToNode(index)\n        if node != 0:\n            \n            solution.append(node)\n    return solution", "docstring": "Solve a path using or-tools' Vehicle Routing Problem solver.\nParams:\npath_graph        the PathGraph representing the problem\ninitial_solution  a solution to start with (list of indices, not\nincluding the origin)\nruntime_seconds   how long to search before returning\n\nReturns: an ordered list of indices in the graph representing a\nsolution.", "source": "juraj-google-style"}
{"code": "def _publish_actor_class_to_key(self, key, actor_class_info):\n    self._worker.redis_client.hmset(key, actor_class_info)\n    self._worker.redis_client.rpush('Exports', key)", "docstring": "Push an actor class definition to Redis.\n\nThe is factored out as a separate function because it is also called\non cached actor class definitions when a worker connects for the first\ntime.\n\nArgs:\nkey: The key to store the actor class info at.\nactor_class_info: Information about the actor class.", "source": "codesearchnet"}
{"code": "def _get_operand_name_and_index(self, numeric_verify_name: str) -> Tuple[str, int]:\n    tensor_name, tensor_idx = numeric_verify_name.rsplit(':', 1)\n    float_tensor_name = tensor_name[len(_NUMERIC_VERIFY_OP_NAME) + 1:]\n    if re.match('\\\\d', float_tensor_name[-1]):\n        float_tensor_name = float_tensor_name[:-1]\n    return (float_tensor_name, int(tensor_idx))", "docstring": "Gets the index and name of NumericVerify Op's quantized input tensor.\n\nArgs:\nnumeric_verify_name: name of the NumericVerify op's output tensor. It has\nformat of `NumericVerify/{quantized_tensor_name}:{quantized_tensor_idx}`\n\nReturns:\nTuple of (tensor_name, tensor_idx) for quantized op's output tensor.", "source": "github-repos"}
{"code": "def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):\n        \n        init_params = super(MXNet, cls)._prepare_init_params_from_job_description(job_details, model_channel_name)\n        image_name = init_params.pop('image')\n        framework, py_version, tag, _ = framework_name_from_image(image_name)\n\n        if not framework:\n            \n            \n            init_params['image_name'] = image_name\n            return init_params\n\n        init_params['py_version'] = py_version\n\n        \n        \n        \n        \n        init_params['framework_version'] = '0.12' if tag == '1.0' else framework_version_from_tag(tag)\n\n        training_job_name = init_params['base_job_name']\n\n        if framework != cls.__framework_name__:\n            raise ValueError(\"Training job: {} didn't use image for requested framework\".format(training_job_name))\n\n        return init_params", "docstring": "Convert the job description to init params that can be handled by the class constructor\n\nArgs:\njob_details: the returned job details from a describe_training_job API call.\nmodel_channel_name (str): Name of the channel where pre-trained model data will be downloaded.\n\nReturns:\ndictionary: The transformed init_params", "source": "juraj-google-style"}
{"code": "def constant_value_as_shape(tensor):\n    if isinstance(tensor, core.Value):\n        return tensor_shape.TensorShape([dim if dim != -1 else None for dim in tensor.numpy()])\n    if tensor.get_shape().ndims == 0:\n        value = constant_value(tensor)\n        if value is None:\n            raise ValueError(\"Received a scalar with unknown value as shape; require a statically known scalar with value '-1' to describe an unknown shape.\")\n        if value != -1:\n            raise ValueError(f\"Received a scalar value '{value}' as shape; require a statically known scalar with value '-1' to describe an unknown shape.\")\n        return tensor_shape.unknown_shape()\n    shape = tensor.get_shape().with_rank(1)\n    if shape == [0]:\n        return tensor_shape.TensorShape([])\n    elif tensor.op.type == 'Cast':\n        pre_cast = constant_value_as_shape(tensor.op.inputs[0])\n        if pre_cast.dims is None:\n            return pre_cast\n        cast_dtype = dtypes.as_dtype(tensor.op.get_attr('DstT'))\n        if cast_dtype not in (dtypes.int32, dtypes.int64):\n            return tensor_shape.unknown_shape(shape.dims[0].value)\n        dest_dtype_shape_array = np.array([x if x is not None else -1 for x in pre_cast.as_list()]).astype(cast_dtype.as_numpy_dtype)\n        return tensor_shape.TensorShape([x if x >= 0 else None for x in dest_dtype_shape_array])\n    elif tensor.op.type == 'Shape':\n        return tensor.op.inputs[0].get_shape()\n    elif tensor.op.type == 'Pack':\n        ret = tensor_shape.TensorShape([])\n        assert tensor.op.get_attr('axis') == 0\n        for pack_input in tensor.op.inputs:\n            pack_input_val = constant_value(pack_input)\n            if pack_input_val is None or pack_input_val < 0:\n                new_dim = tensor_shape.Dimension(None)\n            else:\n                new_dim = tensor_shape.Dimension(pack_input_val)\n            ret = ret.concatenate([new_dim])\n        return ret\n    elif tensor.op.type == 'Concat':\n        ret = tensor_shape.TensorShape([])\n        for concat_input in tensor.op.inputs[1:]:\n            ret = ret.concatenate(constant_value_as_shape(concat_input))\n        return ret\n    elif tensor.op.type == 'ConcatV2':\n        ret = tensor_shape.TensorShape([])\n        for concat_input in tensor.op.inputs[:-1]:\n            ret = ret.concatenate(constant_value_as_shape(concat_input))\n        return ret\n    elif tensor.op.type == 'StridedSlice':\n        try:\n            begin = constant_value(tensor.op.inputs[1])\n            end = constant_value(tensor.op.inputs[2])\n            strides = constant_value(tensor.op.inputs[3])\n            if begin is not None and end is not None and (strides is not None):\n                begin = begin[0]\n                end = end[0]\n                strides = strides[0]\n                begin_mask = tensor.op.get_attr('begin_mask')\n                if begin_mask == 1:\n                    begin = None\n                end_mask = tensor.op.get_attr('end_mask')\n                if end_mask == 1:\n                    end = None\n                ellipsis_mask = tensor.op.get_attr('ellipsis_mask')\n                new_axis_mask = tensor.op.get_attr('new_axis_mask')\n                shrink_axis_mask = tensor.op.get_attr('shrink_axis_mask')\n                valid_attributes = not ellipsis_mask and (not new_axis_mask) and (not shrink_axis_mask) and (not begin_mask or begin_mask == 1) and (not end_mask or end_mask == 1)\n                if valid_attributes:\n                    prev = constant_value_as_shape(tensor.op.inputs[0])\n                    prev = prev[begin:end:strides]\n                    ret = tensor_shape.TensorShape(prev)\n                    return ret\n        except ValueError:\n            pass\n        except TypeError:\n            pass\n    elif tensor.op.type == 'Placeholder' and tensor.op.graph.building_function and hasattr(tensor.op.graph, 'internal_captures'):\n        for i, capture in enumerate(tensor.op.graph.internal_captures):\n            if capture is tensor:\n                external_capture = tensor.op.graph.external_captures[i]\n                return constant_value_as_shape(external_capture)\n    ret = tensor_shape.unknown_shape(shape.dims[0].value)\n    value = constant_value(tensor)\n    if value is not None:\n        ret = ret.merge_with(tensor_shape.TensorShape([d if d >= 0 else None for d in value]))\n    return ret", "docstring": "A version of `constant_value()` that returns a `TensorShape`.\n\nThis version should be used when a constant tensor value is\ninterpreted as a (possibly partial) shape, e.g. in the shape\nfunction for `tf.reshape()`. By explicitly requesting a\n`TensorShape` as the return value, it is possible to represent\nunknown dimensions; by contrast, `constant_value()` is\nall-or-nothing.\n\nArgs:\ntensor: The rank-0 or rank-1 Tensor to be evaluated.\n\nReturns:\nA `TensorShape` based on the constant value of the given `tensor`.\n\nRaises:\nValueError: If the shape is rank-0 and is not statically known to be -1.", "source": "github-repos"}
{"code": "def GetTARInfo(self):\n    if (not self._tar_info):\n        location = getattr(self.path_spec, 'location', None)\n        if (location is None):\n            raise errors.PathSpecError('Path specification missing location.')\n        if (not location.startswith(self._file_system.LOCATION_ROOT)):\n            raise errors.PathSpecError('Invalid location in path specification.')\n        if (len(location) == 1):\n            return None\n        tar_file = self._file_system.GetTARFile()\n        try:\n            self._tar_info = tar_file.getmember(location[1:])\n        except KeyError:\n            pass\n    return self._tar_info", "docstring": "Retrieves the TAR info.\n\nReturns:\ntarfile.TARInfo: TAR info or None if it does not exist.\n\nRaises:\nPathSpecError: if the path specification is incorrect.", "source": "codesearchnet"}
{"code": "def to_hdf(self, path, key, mode='a'):\n    pd.DataFrame(self.serialize()).to_hdf(path, key, mode=mode, format='table', complib='zlib', complevel=9)\n    f = h5py.File(path, 'r+')\n    f[key].attrs['microns_per_pixel'] = (float(self.microns_per_pixel) if (self.microns_per_pixel is not None) else np.nan)\n    f.close()", "docstring": "Save the CellDataFrame to an hdf5 file.\n\nArgs:\npath (str): the path to save to\nkey (str): the name of the location to save it to\nmode (str): write mode", "source": "codesearchnet"}
{"code": "def _ParseVValueString(\n      self, parser_mediator, data, user_information_descriptor):\n    \n    data_start_offset = (\n        user_information_descriptor.offset + self._V_VALUE_STRINGS_OFFSET)\n    data_end_offset = data_start_offset + user_information_descriptor.size\n    descriptor_data = data[data_start_offset:data_end_offset]\n\n    try:\n      username = descriptor_data.decode('utf-16-le')\n    except (UnicodeDecodeError, UnicodeEncodeError) as exception:\n      username = descriptor_data.decode('utf-16-le', errors='replace')\n      parser_mediator.ProduceExtractionWarning((\n          'unable to decode V value string with error: {0!s}. Characters '\n          'that cannot be decoded will be replaced with \"?\" or '\n          '\"\\\\ufffd\".').format(exception))\n\n    return username", "docstring": "Parses a V value string.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ndata (bytes): Windows Registry V value data.\nuser_information_descriptor (user_information_descriptor): V value\nuser information descriptor.\n\nReturns:\nstr: string value stored in the Windows Registry V value data.", "source": "juraj-google-style"}
{"code": "def get_step():\n    return _summary_state.step", "docstring": "Returns the default summary step for the current thread.\n\nReturns:\nThe step set by `tf.summary.experimental.set_step()` if one has been set,\notherwise None.", "source": "github-repos"}
{"code": "def _merge_assets_key_collection(saved_model_proto, path):\n    for meta_graph in saved_model_proto.meta_graphs:\n        node_asset_map = {}\n        if (tf_v1.saved_model.constants.ASSETS_KEY in meta_graph.collection_def):\n            assets_any_proto = meta_graph.collection_def[tf_v1.saved_model.constants.ASSETS_KEY].any_list.value\n            for asset_any_proto in assets_any_proto:\n                asset_proto = meta_graph_pb2.AssetFileDef()\n                asset_any_proto.Unpack(asset_proto)\n                asset_filename = _get_asset_filename(path, asset_proto.filename)\n                node_asset_map[_get_node_name_from_tensor(asset_proto.tensor_info.name)] = asset_filename\n            del meta_graph.collection_def[tf_v1.saved_model.constants.ASSETS_KEY]\n        for node in meta_graph.graph_def.node:\n            asset_filepath = node_asset_map.get(node.name)\n            if asset_filepath:\n                _check_asset_node_def(node)\n                node.attr['value'].tensor.string_val[0] = asset_filepath", "docstring": "Merges the ASSETS_KEY collection into the GraphDefs in saved_model_proto.\n\nRemoves the ASSETS_KEY collection from the GraphDefs in the SavedModel and\nmodifies nodes with the assets filenames to point to the assets in `path`.\nAfter this transformation, the SavedModel GraphDefs can be used without\nfeeding asset tensors.\n\nArgs:\nsaved_model_proto: SavedModel proto to be modified.\npath: path where the SavedModel is being loaded from.", "source": "codesearchnet"}
{"code": "def lxml(self):\n    import lxml.etree\n    return lxml.etree.fromstring((e_views.XML_HEADER + self.xml()).encode('utf-8'))", "docstring": "render the record into a lxml document.\nthis is useful for querying data from the record using xpath, etc.\n\nnote: lxml must be installed.\n\nReturns:\nlxml.etree.ElementTree: the rendered and parsed xml document.\n\nRaises:\nImportError: if lxml is not installed.", "source": "codesearchnet"}
{"code": "def GetConfiguredUsers(self):\n    if os.path.exists(self.google_users_file):\n        users = open(self.google_users_file).readlines()\n    else:\n        users = []\n    return [user.strip() for user in users]", "docstring": "Retrieve the list of configured Google user accounts.\n\nReturns:\nlist, the username strings of users congfigured by Google.", "source": "codesearchnet"}
{"code": "def _bind_length_handlers(tids, user_handler, lns):\n    \n    for tid in tids:\n        for ln in lns:\n            type_octet = _gen_type_octet(tid, ln)\n            ion_type = _TID_VALUE_TYPE_TABLE[tid]\n            if ln == 1 and ion_type is IonType.STRUCT:\n                handler = partial(_ordered_struct_start_handler, partial(user_handler, ion_type))\n            elif ln < _LENGTH_FIELD_FOLLOWS:\n                \n                handler = partial(user_handler, ion_type, ln)\n            else:\n                \n                handler = partial(_var_uint_field_handler, partial(user_handler, ion_type))\n            _HANDLER_DISPATCH_TABLE[type_octet] = handler", "docstring": "Binds a set of handlers with the given factory.\n\nArgs:\ntids (Sequence[int]): The Type IDs to bind to.\nuser_handler (Callable): A function that takes as its parameters\n:class:`IonType`, ``length``, and the ``ctx`` context\nreturning a co-routine.\nlns (Sequence[int]): The low-nibble lengths to bind to.", "source": "juraj-google-style"}
{"code": "def get_self_attention_bias(x):\n  \n\n  x_shape = common_layers.shape_list(x)\n  self_attention_bias = common_attention.attention_bias_lower_triangle(\n      x_shape[1])\n  return self_attention_bias", "docstring": "Creates masked self attention bias.\n\nArgs:\nx: A tensor of shape [batch, length, depth]\n\nReturns:\nself_attention_bias: A tensor of shape [length, length, 1]", "source": "juraj-google-style"}
{"code": "def execute_script(self, script, *args):\n        \n\n        args = [arg.base if isinstance(arg, Base) else arg for arg in args]\n        self.driver.execute_script(script, *args)", "docstring": "Execute the given script, not returning a result. This is useful for scripts that return\ncomplex objects, such as jQuery statements. ``execute_script`` should be used over\n:meth:`evaluate_script` whenever possible.\n\nArgs:\nscript (str): A string of JavaScript to execute.\n*args: Variable length argument list to pass to the executed JavaScript string.", "source": "juraj-google-style"}
{"code": "def convert_strtime_datetime(dt_str):\n    \n    dt, _, us = dt_str.partition(\".\")\n    dt = datetime.datetime.strptime(dt, \"%Y-%m-%dT%H:%M:%S\")\n    us = int(us.rstrip(\"Z\"), 10)\n    return dt + datetime.timedelta(microseconds=us)", "docstring": "Converts datetime isoformat string to datetime (dt) object\n\nArgs:\n:dt_str (str): input string in '2017-12-30T18:48:00.353Z' form\nor similar\nReturns:\nTYPE:  datetime object", "source": "juraj-google-style"}
{"code": "def encoding_specs(self, spec):\n    return spec._component_specs", "docstring": "Returns a list of `TensorSpec`(s) describing the encoding for `spec`.\n\nSee `encode` for a description of the default encoding.  Subclasses may\noverride this default definition, when necessary.\n\nArgs:\nspec: The TypeSpec whose encoding should be described.\n\nReturns:\nA nest (as defined by `tf.nest) of `tf.TypeSpec`, describing the values\nthat are returned by `self.encode(spec, ...)`.  All TypeSpecs in this\nnest must be batchable.", "source": "github-repos"}
{"code": "def to(self, new_unit):\n        \n        return FloatWithUnit(\n            self * self.unit.get_conversion_factor(new_unit),\n            unit_type=self._unit_type,\n            unit=new_unit)", "docstring": "Conversion to a new_unit. Right now, only supports 1 to 1 mapping of\nunits of each type.\n\nArgs:\nnew_unit: New unit type.\n\nReturns:\nA FloatWithUnit object in the new units.\n\nExample usage:\n>>> e = Energy(1.1, \"eV\")\n>>> e = Energy(1.1, \"Ha\")\n>>> e.to(\"eV\")\n29.932522246 eV", "source": "juraj-google-style"}
{"code": "def info(self, code, message, compressed=False):\n    return ''.join([x for x in self.info_gen(code, message, compressed)])", "docstring": "The complete content of an info response.\n\nThis should only used for commands that return small or known amounts of\ndata.\n\nReturns:\nA the complete content of a textual response.", "source": "codesearchnet"}
{"code": "def ProcessConfigOverrides(filename):\n  \n\n  abs_filename = os.path.abspath(filename)\n  cfg_filters = []\n  keep_looking = True\n  while keep_looking:\n    abs_path, base_name = os.path.split(abs_filename)\n    if not base_name:\n      break  \n\n    cfg_file = os.path.join(abs_path, \"CPPLINT.cfg\")\n    abs_filename = abs_path\n    if not os.path.isfile(cfg_file):\n      continue\n\n    try:\n      with open(cfg_file) as file_handle:\n        for line in file_handle:\n          line, _, _ = line.partition('\n          if not line.strip():\n            continue\n\n          name, _, val = line.partition('=')\n          name = name.strip()\n          val = val.strip()\n          if name == 'set noparent':\n            keep_looking = False\n          elif name == 'filter':\n            cfg_filters.append(val)\n          elif name == 'exclude_files':\n            \n            \n            \n            \n            \n            \n            if base_name:\n              pattern = re.compile(val)\n              if pattern.match(base_name):\n                sys.stderr.write('Ignoring \"%s\": file excluded by \"%s\". '\n                                 'File path component \"%s\" matches '\n                                 'pattern \"%s\"\\n' %\n                                 (filename, cfg_file, base_name, val))\n                return False\n          elif name == 'linelength':\n            global _line_length\n            try:\n                _line_length = int(val)\n            except ValueError:\n                sys.stderr.write('Line length must be numeric.')\n          else:\n            sys.stderr.write(\n                'Invalid configuration option (%s) in file %s\\n' %\n                (name, cfg_file))\n\n    except IOError:\n      sys.stderr.write(\n          \"Skipping config file '%s': Can't open for reading\\n\" % cfg_file)\n      keep_looking = False\n\n  \n  \n  for filter in reversed(cfg_filters):\n     _AddFilters(filter)\n\n  return True", "docstring": "Loads the configuration files and processes the config overrides.\n\nArgs:\nfilename: The name of the file being processed by the linter.\n\nReturns:\nFalse if the current |filename| should not be processed further.", "source": "juraj-google-style"}
{"code": "def list_jobs(config, *, status=JobStatus.Active, filter_by_type=None, filter_by_worker=None):\n    celery_app = create_app(config)\n    if (filter_by_worker is not None):\n        inspect = celery_app.control.inspect(destination=(filter_by_worker if isinstance(filter_by_worker, list) else [filter_by_worker]))\n    else:\n        inspect = celery_app.control.inspect()\n    if (status == JobStatus.Active):\n        job_map = inspect.active()\n    elif (status == JobStatus.Registered):\n        job_map = inspect.registered()\n    elif (status == JobStatus.Reserved):\n        job_map = inspect.reserved()\n    elif (status == JobStatus.Scheduled):\n        job_map = inspect.scheduled()\n    else:\n        job_map = None\n    if (job_map is None):\n        return []\n    result = []\n    for (worker_name, jobs) in job_map.items():\n        for job in jobs:\n            try:\n                job_stats = JobStats.from_celery(worker_name, job, celery_app)\n                if ((filter_by_type is None) or (job_stats.type == filter_by_type)):\n                    result.append(job_stats)\n            except JobStatInvalid:\n                pass\n    return result", "docstring": "Return a list of Celery jobs.\n\nArgs:\nconfig (Config): Reference to the configuration object from which the\nsettings are retrieved.\nstatus (JobStatus): The status of the jobs that should be returned.\nfilter_by_type (list): Restrict the returned jobs to the types in this list.\nfilter_by_worker (list): Only return jobs that were registered, reserved or are\nrunning on the workers given in this list of worker names. Using\nthis option will increase the performance.\n\nReturns:\nlist: A list of JobStats.", "source": "codesearchnet"}
{"code": "def minimum(self, vars_list: List[str]) -> 'TensorFluent':\n    return self._aggregation_op(tf.reduce_min, self, vars_list)", "docstring": "Returns the TensorFluent for the minimum aggregation function.\n\nArgs:\nvars_list: The list of variables to be aggregated over.\n\nReturns:\nA TensorFluent wrapping the minimum aggregation function.", "source": "codesearchnet"}
{"code": "def relabel_variables(self, mapping, inplace=True):\n    if (not inplace):\n        return self.copy().relabel_variables(mapping, inplace=True)\n    try:\n        old_labels = set(mapping)\n        new_labels = set(mapping.values())\n    except TypeError:\n        raise ValueError('mapping targets must be hashable objects')\n    variables = self.variables\n    for v in new_labels:\n        if ((v in variables) and (v not in old_labels)):\n            raise ValueError('A variable cannot be relabeled \"{}\" without also relabeling the existing variable of the same name'.format(v))\n    shared = (old_labels & new_labels)\n    if shared:\n        (old_to_intermediate, intermediate_to_new) = resolve_label_conflict(mapping, old_labels, new_labels)\n        self.relabel_variables(old_to_intermediate, inplace=True)\n        self.relabel_variables(intermediate_to_new, inplace=True)\n        return self\n    for (oldterm, bias) in list(self.items()):\n        newterm = frozenset((mapping.get(v, v) for v in oldterm))\n        if (newterm != oldterm):\n            self[newterm] = bias\n            del self[oldterm]\n    return self", "docstring": "Relabel variables of a binary polynomial as specified by mapping.\n\nArgs:\nmapping (dict):\nDict mapping current variable labels to new ones. If an\nincomplete mapping is provided, unmapped variables retain their\ncurrent labels.\n\ninplace (bool, optional, default=True):\nIf True, the binary polynomial is updated in-place; otherwise, a\nnew binary polynomial is returned.\n\nReturns:\n:class:`.BinaryPolynomial`: A binary polynomial with the variables\nrelabeled. If `inplace` is set to True, returns itself.", "source": "codesearchnet"}
{"code": "async def _verify_examples(self, client: GRPCClient, examples: List[Example], origin: Origin):\n    count_of_verified = 0\n    verify_status_failed = False\n    default_examples = []\n    for example in examples:\n        if example.tag.default_example:\n            default_examples.append(example)\n        if example.status not in Config.ERROR_STATUSES:\n            count_of_verified += 1\n            continue\n        if example.status == STATUS_VALIDATION_ERROR:\n            logging.error('Example: %s has validation error', example.filepath)\n        elif example.status == STATUS_PREPARATION_ERROR:\n            logging.error('Example: %s has preparation error', example.filepath)\n        elif example.status == STATUS_ERROR:\n            logging.error('Example: %s has error during setup run builder', example.filepath)\n        elif example.status == STATUS_RUN_TIMEOUT:\n            logging.error('Example: %s failed because of timeout', example.filepath)\n        elif example.status == STATUS_COMPILE_ERROR:\n            err = await client.get_compile_output(example.pipeline_id)\n            logging.error('Example: %s has compilation error: %s', example.filepath, err)\n        elif example.status == STATUS_RUN_ERROR:\n            err = await client.get_run_error(example.pipeline_id)\n            logging.error('Example: %s has execution error: %s', example.filepath, err)\n        verify_status_failed = True\n    logging.info('Number of verified Playground examples: %s / %s', count_of_verified, len(examples))\n    logging.info('Number of Playground examples with some error: %s / %s', len(examples) - count_of_verified, len(examples))\n    if origin == Origin.PG_EXAMPLES:\n        if len(default_examples) == 0:\n            logging.error('Default example not found')\n            raise VerifyException('CI step failed due to finding an incorrect number of default examples. Default example not found')\n        if len(default_examples) > 1:\n            logging.error('Many default examples found')\n            logging.error('Examples where the default_example field is true:')\n            for example in default_examples:\n                logging.error(example.filepath)\n            raise VerifyException('CI step failed due to finding an incorrect number of default examples. Many default examples found')\n    if verify_status_failed:\n        raise VerifyException('CI step failed due to errors in the examples')", "docstring": "Verify statuses of beam examples and the number of found default examples.\n\nCheck example.status for each examples. If the status of the example is:\n- STATUS_VALIDATION_ERROR/STATUS_PREPARATION_ERROR\n/STATUS_ERROR/STATUS_RUN_TIMEOUT: log error\n- STATUS_COMPILE_ERROR: get logs using GetCompileOutput request and\nlog them with error.\n- STATUS_RUN_ERROR: get logs using GetRunError request and\nlog them with error.\n\nArgs:\nexamples: beam examples that should be verified", "source": "github-repos"}
{"code": "def assert_no_current_path(self, path, **kwargs):\n    query = CurrentPathQuery(path, **kwargs)\n\n    @self.document.synchronize\n    def assert_no_current_path():\n        if query.resolves_for(self):\n            raise ExpectationNotMet(query.negative_failure_message)\n    assert_no_current_path()\n    return True", "docstring": "Asserts that the page doesn't have the given path.\n\nArgs:\npath (str | RegexObject): The string or regex that the current \"path\" should match.\n**kwargs: Arbitrary keyword arguments for :class:`CurrentPathQuery`.\n\nReturns:\nTrue\n\nRaises:\nExpectationNotMet: If the assertion hasn't succeeded during the wait time.", "source": "codesearchnet"}
{"code": "def attach(self, engine, metric_names=None, output_transform=None, event_name=Events.ITERATION_COMPLETED, closing_event_name=Events.EPOCH_COMPLETED):\n    desc = self.tqdm_kwargs.get('desc', 'Epoch')\n    if (not ((event_name in Events) and (closing_event_name in Events))):\n        raise ValueError('Logging and closing events should be only ignite.engine.Events')\n    if (not self._compare_lt(event_name, closing_event_name)):\n        raise ValueError('Logging event {} should be called before closing event {}'.format(event_name, closing_event_name))\n    log_handler = _OutputHandler(desc, metric_names, output_transform, event_name=event_name, closing_event_name=closing_event_name)\n    super(ProgressBar, self).attach(engine, log_handler, event_name)\n    engine.add_event_handler(closing_event_name, self._close)", "docstring": "Attaches the progress bar to an engine object.\n\nArgs:\nengine (Engine): engine object.\nmetric_names (list, optional): list of the metrics names to log as the bar progresses\noutput_transform (callable, optional): a function to select what you want to print from the engine's\noutput. This function may return either a dictionary with entries in the format of ``{name: value}``,\nor a single scalar, which will be displayed with the default name `output`.\nevent_name: event's name on which the progress bar advances. Valid events are from\n:class:`~ignite.engine.Events`.\nclosing_event_name: event's name on which the progress bar is closed. Valid events are from\n:class:`~ignite.engine.Events`.", "source": "codesearchnet"}
{"code": "def add_package(self, pkg, action_type=\"Install\"):\n        \n        if isinstance(pkg, Package):\n            if action_type not in (\"Install\", \"Cache\", \"Install Cached\"):\n                raise ValueError\n            package = self.add_object_to_path(\n                pkg, \"package_configuration/packages\")\n            \n            \n            action = package.find(\"action\")\n            if not action:\n                action = ElementTree.SubElement(package, \"action\")\n            action.text = action_type\n        else:\n            raise ValueError(\"Please pass a Package object to parameter: \"\n                             \"pkg.\")", "docstring": "Add a Package object to the policy with action=install.\n\nArgs:\npkg: A Package object to add.\naction_type (str, optional): One of \"Install\", \"Cache\", or\n\"Install Cached\".  Defaults to \"Install\".", "source": "juraj-google-style"}
{"code": "def produce_csv_output(filehandle: TextIO,\n                       fields: Sequence[str],\n                       values: Iterable[str]) -> None:\n    \n    output_csv(filehandle, fields)\n    for row in values:\n        output_csv(filehandle, row)", "docstring": "Produce CSV output, without using ``csv.writer``, so the log can be used\nfor lots of things.\n\n- ... eh? What was I talking about?\n- POOR; DEPRECATED.\n\nArgs:\nfilehandle: file to write to\nfields: field names\nvalues: values", "source": "juraj-google-style"}
{"code": "def default_onnx_opset(self) -> int:\n    return DEFAULT_ONNX_OPSET", "docstring": "Which onnx opset to use when exporting the model\n\nReturns:\nInteger ONNX Opset version", "source": "github-repos"}
{"code": "def create_with_secret(self, name, secret, encryption):\n        \n        try:\n            encryption = encryption or DEFAULT_ENCRYPTION\n            enc = ENCRYPTION_MAP[encryption]\n        except KeyError:\n            raise TypeError('encryption must be one of \"cleartext\", \"md5\"'\n                            ' or \"sha512\"')\n\n        cmd = 'username %s secret %s %s' % (name, enc, secret)\n        return self.configure(cmd)", "docstring": "Creates a new user on the local node\n\nArgs:\nname (str): The name of the user to craete\n\nsecret (str): The secret (password) to assign to this user\n\nencryption (str): Specifies how the secret is encoded.  Valid\nvalues are \"cleartext\", \"md5\", \"sha512\".  The default is\n\"cleartext\"\n\nReturns:\nTrue if the operation was successful otherwise False", "source": "juraj-google-style"}
{"code": "def clone(self, spec=None, **overrides):\n        \n        settings = dict(self.get_param_values(), **overrides)\n\n        if spec is None:\n            spec = (self.name, overrides.get('label', self.label))\n        if 'label' in overrides and isinstance(spec, basestring) :\n            spec = (spec, overrides['label'])\n        elif 'label' in overrides and isinstance(spec, tuple) :\n            if overrides['label'] != spec[1]:\n                self.param.warning(\n                    'Using label as supplied by keyword ({!r}), ignoring '\n                    'tuple value {!r}'.format(overrides['label'], spec[1]))\n            spec = (spec[0],  overrides['label'])\n        return self.__class__(spec, **{k:v for k,v in settings.items()\n                                       if k not in ['name', 'label']})", "docstring": "Clones the Dimension with new parameters\n\nDerive a new Dimension that inherits existing parameters\nexcept for the supplied, explicit overrides\n\nArgs:\nspec (tuple, optional): Dimension tuple specification\n**overrides: Dimension parameter overrides\n\nReturns:\nCloned Dimension object", "source": "juraj-google-style"}
{"code": "def _is_propertyable(names, attrs, annotations, attr):\n    return ((attr in annotations) and (not attr.startswith('_')) and (not attr.isupper()) and ('__{}'.format(attr) not in names) and (not isinstance(getattr(attrs, attr, None), types.MethodType)))", "docstring": "Determine if an attribute can be replaced with a property.\n\nArgs:\nnames: The complete list of all attribute names for the class.\nattrs: The attribute dict returned by __prepare__.\nannotations: A mapping of all defined annotations for the class.\nattr: The attribute to test.\n\nReturns:\nTrue if the attribute can be replaced with a property; else False.", "source": "codesearchnet"}
{"code": "def _in_multi_worker_mode(self):\n    strategy = self._distribution_strategy\n    if not strategy and distribute_lib.has_strategy():\n        strategy = distribute_lib.get_strategy()\n    return strategy and strategy.extended._in_multi_worker_mode()", "docstring": "Method to infer if this `Model` is working in multi-worker settings.\n\nMulti-worker training refers to the setup where the training is\ndistributed across multiple workers, as opposed to the case where\nonly a local process performs the training. This function is\nused to infer for example whether or not a distribute coordinator\nshould be run, and thus TensorFlow servers should be started for\ncommunication with other servers in the cluster, or whether or not\nsaving/restoring checkpoints is relevant for preemption fault tolerance.\n\nExperimental. Signature and implementation are subject to change.\n\nReturns:\nWhether this model indicates it's working in multi-worker settings.", "source": "github-repos"}
{"code": "def check_the_end_flag(self, state_arr):\n        \n        if self.__check_goal_flag(state_arr) is True or self.__check_crash_flag(state_arr):\n            return True\n        else:\n            return False", "docstring": "Check the end flag.\n\nIf this return value is `True`, the learning is end.\n\nAs a rule, the learning can not be stopped.\nThis method should be overrided for concreate usecases.\n\nArgs:\nstate_arr:    `np.ndarray` of state in `self.t`.\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def add(self, resource, provider_uri_or_id, timeout=-1):\n        \n        uri = self._provider_client.build_uri(provider_uri_or_id) + \"/device-managers\"\n        return self._client.create(resource=resource, uri=uri, timeout=timeout)", "docstring": "Adds a Device Manager under the specified provider.\n\nArgs:\nresource (dict): Object to add.\nprovider_uri_or_id: ID or URI of provider.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView, just stop waiting for its completion.\n\nReturns:\ndict: Added SAN Manager.", "source": "juraj-google-style"}
{"code": "def AddPerformanceOptions(self, argument_group):\n    \n    argument_group.add_argument(\n        '--buffer_size', '--buffer-size', '--bs', dest='buffer_size',\n        action='store', default=0, help=(\n            'The buffer size for the output (defaults to 196MiB).'))\n\n    argument_group.add_argument(\n        '--queue_size', '--queue-size', dest='queue_size', action='store',\n        default=0, help=(\n            'The maximum number of queued items per worker '\n            '(defaults to {0:d})').format(self._DEFAULT_QUEUE_SIZE))", "docstring": "Adds the performance options to the argument group.\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "juraj-google-style"}
{"code": "def set_status(self, status: Status, increment_try_count: bool=True,\n                   filename: str=None):\n        \n        url = self.url_record.url\n        assert not self._try_count_incremented, (url, status)\n\n        if increment_try_count:\n            self._try_count_incremented = True\n\n        _logger.debug(__('Marking URL {0} status {1}.', url, status))\n\n        url_result = URLResult()\n        url_result.filename = filename\n\n        self.app_session.factory['URLTable'].check_in(\n            url,\n            status,\n            increment_try_count=increment_try_count,\n            url_result=url_result,\n        )\n\n        self._processed = True", "docstring": "Mark the item with the given status.\n\nArgs:\nstatus: a value from :class:`Status`.\nincrement_try_count: if True, increment the ``try_count``\nvalue", "source": "juraj-google-style"}
{"code": "def html_for_cgi_argument(argument, form):\n    \n    value = form[argument].value if argument in form else None\n    return KEY_VALUE_TEMPLATE.format(argument, value)", "docstring": "Returns an HTML snippet for a CGI argument.\n\nArgs:\nargument: A string representing an CGI argument name in a form.\nform: A CGI FieldStorage object.\n\nReturns:\nString HTML representing the CGI value and variable.", "source": "juraj-google-style"}
{"code": "def get(self, params=None):\n        \n        return self._call('get', url=self.endpoint, params=params)", "docstring": "Send a POST request and return the JSON decoded result.\n\nArgs:\nparams (dict, optional): Mapping of parameters to send in request.\n\nReturns:\nmixed: JSON decoded response data.", "source": "juraj-google-style"}
{"code": "def from_params(cls, params):\n        \n        key_fn = lambda x: id(x[1].owner)\n        streams = []\n        for _, group in groupby(sorted(params.items(), key=key_fn), key_fn):\n            group = list(group)\n            inst = [p.owner for _, p in group][0]\n            if not isinstance(inst, param.Parameterized):\n                continue\n            names = [p.name for _, p in group]\n            rename = {p.name: n for n, p in group}\n            streams.append(cls(inst, names, rename=rename))\n        return streams", "docstring": "Returns Params streams given a dictionary of parameters\n\nArgs:\nparams (dict): Dictionary of parameters\n\nReturns:\nList of Params streams", "source": "juraj-google-style"}
{"code": "def get_tensors(object_):\n    \n    if torch.is_tensor(object_):\n        return [object_]\n    elif isinstance(object_, (str, float, int)):\n        return []\n\n    tensors = set()\n\n    if isinstance(object_, collections.abc.Mapping):\n        for value in object_.values():\n            tensors.update(get_tensors(value))\n    elif isinstance(object_, collections.abc.Iterable):\n        for value in object_:\n            tensors.update(get_tensors(value))\n    else:\n        members = [\n            value for key, value in inspect.getmembers(object_)\n            if not isinstance(value, (collections.abc.Callable, type(None)))\n        ]\n        tensors.update(get_tensors(members))\n\n    return tensors", "docstring": "Get all tensors associated with ``object_``\n\nArgs:\nobject_ (any): Any object to look for tensors.\n\nReturns:\n(list of torch.tensor): List of tensors that are associated with ``object_``.", "source": "juraj-google-style"}
{"code": "def _collect_grades_data(self, enterprise_enrollment, course_details):\n    if (self.grades_api is None):\n        self.grades_api = GradesApiClient(self.user)\n    course_id = enterprise_enrollment.course_id\n    username = enterprise_enrollment.enterprise_customer_user.user.username\n    try:\n        grades_data = self.grades_api.get_course_grade(course_id, username)\n    except HttpNotFoundError as error:\n        if hasattr(error, 'content'):\n            response_content = json.loads(error.content)\n            if (response_content.get('error_code', '') == 'user_not_enrolled'):\n                LOGGER.info('User [%s] not enrolled in course [%s], enterprise enrollment [%d]', username, course_id, enterprise_enrollment.pk)\n                return (None, None, None)\n        LOGGER.error('No grades data found for [%d]: [%s], [%s]', enterprise_enrollment.pk, course_id, username)\n        return (None, None, None)\n    course_end_date = course_details.get('end')\n    if (course_end_date is not None):\n        course_end_date = parse_datetime(course_end_date)\n    now = timezone.now()\n    is_passing = grades_data.get('passed')\n    if ((course_end_date is not None) and (course_end_date < now)):\n        completed_date = course_end_date\n        grade = (self.grade_passing if is_passing else self.grade_failing)\n    elif is_passing:\n        completed_date = now\n        grade = self.grade_passing\n    else:\n        completed_date = None\n        grade = self.grade_incomplete\n    return (completed_date, grade, is_passing)", "docstring": "Collect the learner completion data from the Grades API.\n\nUsed for self-paced courses.\n\nArgs:\nenterprise_enrollment (EnterpriseCourseEnrollment): the enterprise enrollment record for which we need to\ncollect completion/grade data\ncourse_details (dict): the course details for the course in the enterprise enrollment record.\n\nReturns:\ncompleted_date: Date the course was completed, this is None if course has not been completed.\ngrade: Current grade in the course.\nis_passing: Boolean indicating if the grade is a passing grade or not.", "source": "codesearchnet"}
{"code": "def _input_optional(inp):\n        \n        if 'default' in inp.keys():\n            return True\n\n        typ = inp.get('type')\n        if isinstance(typ, six.string_types):\n            return typ.endswith('?')\n        elif isinstance(typ, dict):\n            \n            return False\n        elif isinstance(typ, list):\n            \n            \n            return bool(u'null' in typ)\n        else:\n            raise ValueError('Invalid input \"{}\"'.format(inp.get['id']))", "docstring": "Returns True if a step input parameter is optional.\n\nArgs:\ninp (dict): a dictionary representation of an input.\n\nRaises:\nValueError: The inp provided is not valid.", "source": "juraj-google-style"}
{"code": "def _split_bytecode(bytecode: list[opcodes.Opcode], processed_blocks: set[Block], python_version) -> list[Block]:\n    targets = {op.target for op in bytecode if op.target}\n    blocks = []\n    code = []\n    prev_block: Block = None\n    i = 0\n    while i < len(bytecode):\n        op = bytecode[i]\n        if python_version >= (3, 12) and isinstance(op, opcodes.SEND):\n            if code:\n                prev_block = Block(code)\n                blocks.append(prev_block)\n                code = []\n            new_blocks, i = _preprocess_async_for_and_yield(i, bytecode, prev_block, processed_blocks)\n            blocks.extend(new_blocks)\n            prev_block = blocks[-1]\n            continue\n        code.append(op)\n        if op.no_next() or op.does_jump() or op.pops_block() or (op.next is None) or (op.next in targets and (not isinstance(op.next, opcodes.GET_ANEXT) or python_version < (3, 12))):\n            prev_block = Block(code)\n            blocks.append(prev_block)\n            code = []\n        i += 1\n    return blocks", "docstring": "Given a sequence of bytecodes, return basic blocks.\n\nThis will split the code at \"basic block boundaries\". These occur at\nevery instruction that is jumped to, and after every instruction that jumps\nsomewhere else (or returns / aborts).\n\nArgs:\nbytecode: A list of instances of opcodes.Opcode. (E.g. returned from\nopcodes.dis())\n\nReturns:\nA list of _Block instances.", "source": "github-repos"}
{"code": "def _tf_predict(model_dir, input_csvlines):\n  \n\n  with tf.Graph().as_default(), tf.Session() as sess:\n    input_alias_map, output_alias_map = _tf_load_model(sess, model_dir)\n    csv_tensor_name = list(input_alias_map.values())[0]\n    results = sess.run(fetches=output_alias_map,\n                       feed_dict={csv_tensor_name: input_csvlines})\n\n  \n  \n  \n  if len(input_csvlines) == 1:\n    for k, v in six.iteritems(results):\n      if not isinstance(v, (list, np.ndarray)):\n        results[k] = [v]\n\n  \n  for k, v in six.iteritems(results):\n    if any(isinstance(x, bytes) for x in v):\n      results[k] = [x.decode('utf-8') for x in v]\n\n  return results", "docstring": "Prediction with a tf savedmodel.\n\nArgs:\nmodel_dir: directory that contains a saved model\ninput_csvlines: list of csv strings\n\nReturns:\nDict in the form tensor_name:prediction_list. Note that the value is always\na list, even if there was only 1 row in input_csvlines.", "source": "juraj-google-style"}
{"code": "def moving_average_variables(scope=None):\n    return ops.get_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, scope)", "docstring": "Returns all variables that maintain their moving averages.\n\nIf an `ExponentialMovingAverage` object is created and the `apply()`\nmethod is called on a list of variables, these variables will\nbe added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.\nThis convenience function returns the contents of that collection.\n\nArgs:\nscope: (Optional.) A string. If supplied, the resulting list is filtered to\ninclude only items whose `name` attribute matches `scope` using\n`re.match`. Items without a `name` attribute are never returned if a scope\nis supplied. The choice of `re.match` means that a `scope` without special\ntokens filters by prefix.\n\nReturns:\nA list of Variable objects.", "source": "github-repos"}
{"code": "def protocol(alias_name, default=None, allow_none=False):\n    \n    warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2)\n    try:\n        return _split_docker_link(alias_name)[0]\n    except KeyError as err:\n        if default or allow_none:\n            return default\n        else:\n            raise err", "docstring": "Get the protocol from the docker link alias or return the default.\n\nArgs:\nalias_name: The docker link alias\ndefault: The default value if the link isn't available\nallow_none: If the return value can be `None` (i.e. optional)\n\nExamples:\nAssuming a Docker link was created with ``docker --link postgres:db``\nand the resulting environment variable is ``DB_PORT=tcp://172.17.0.82:5432``.\n\n>>> envitro.docker.protocol('DB')\ntcp", "source": "juraj-google-style"}
{"code": "def compress_dir(path, compression='gz'):\n    for (parent, subdirs, files) in os.walk(path):\n        for f in files:\n            compress_file(os.path.join(parent, f), compression=compression)", "docstring": "Recursively compresses all files in a directory. Note that this\ncompresses all files singly, i.e., it does not create a tar archive. For\nthat, just use Python tarfile class.\n\nArgs:\npath (str): Path to parent directory.\ncompression (str): A compression mode. Valid options are \"gz\" or\n\"bz2\". Defaults to gz.", "source": "codesearchnet"}
{"code": "def WriteTaskStart(self):\n    self._RaiseIfNotWritable()\n    if (self._storage_type != definitions.STORAGE_TYPE_TASK):\n        raise IOError('Unsupported storage type.')\n    task_start = self._task.CreateTaskStart()\n    self._storage_file.WriteTaskStart(task_start)", "docstring": "Writes task start information.\n\nRaises:\nIOError: if the storage type is not supported or\nwhen the storage writer is closed.\nOSError: if the storage type is not supported or\nwhen the storage writer is closed.", "source": "codesearchnet"}
{"code": "def extract_version(exepath, version_arg, word_index=-1, version_rank=3):\n    \n    if isinstance(version_arg, basestring):\n        version_arg = [version_arg]\n    args = [exepath] + version_arg\n\n    stdout, stderr, returncode = _run_command(args)\n    if returncode:\n        raise RezBindError(\"failed to execute %s: %s\\n(error code %d)\"\n                           % (exepath, stderr, returncode))\n\n    stdout = stdout.strip().split('\\n')[0].strip()\n    log(\"extracting version from output: '%s'\" % stdout)\n\n    try:\n        strver = stdout.split()[word_index]\n        toks = strver.replace('.', ' ').replace('-', ' ').split()\n        strver = '.'.join(toks[:version_rank])\n        version = Version(strver)\n    except Exception as e:\n        raise RezBindError(\"failed to parse version from output '%s': %s\"\n                           % (stdout, str(e)))\n\n    log(\"extracted version: '%s'\" % str(version))\n    return version", "docstring": "Run an executable and get the program version.\n\nArgs:\nexepath: Filepath to executable.\nversion_arg: Arg to pass to program, eg \"-V\". Can also be a list.\nword_index: Expect the Nth word of output to be the version.\nversion_rank: Cap the version to this many tokens.\n\nReturns:\n`Version` object.", "source": "juraj-google-style"}
{"code": "def init_args(cls):\n    \n    \n    \n    \n    try:\n        \n        argspec = getargspec(cls)\n    except TypeError:\n        \n        argspec = getargspec(cls.__init__)\n    args = argspec.args\n\n    \n    \n    if args[0] == 'self':\n        args.remove('self')\n\n    return args", "docstring": "Return the __init__ args (minus 'self') for @cls\n\nArgs:\ncls: class, instance or callable\nReturns:\nlist of str, the arguments minus 'self'", "source": "juraj-google-style"}
{"code": "def element_at(self, index):\n    if self.closed():\n        raise ValueError('Attempt to call element_at() on a closed Queryable.')\n    if (index < 0):\n        raise OutOfRangeError('Attempt to use negative index.')\n    try:\n        return self._iterable[index]\n    except IndexError:\n        raise OutOfRangeError('Index out of range.')\n    except TypeError:\n        pass\n    for (i, item) in enumerate(self):\n        if (i == index):\n            return item\n    raise OutOfRangeError('element_at(index) out of range.')", "docstring": "Return the element at ordinal index.\n\nNote: This method uses immediate execution.\n\nArgs:\nindex: The index of the element to be returned.\n\nReturns:\nThe element at ordinal index in the source sequence.\n\nRaises:\nValueError: If the Queryable is closed().\nValueError: If index is out of range.", "source": "codesearchnet"}
{"code": "def HasDefinition(self, name):\n    return name in self.consts or name in self.roles or name in self.states or (name in self.qualifiers) or (name in self.messages) or (name in self.events) or (name in self.transitions)", "docstring": "Whether this module has a named object |name|.\n\nArgs:\nname: The string name of the object to look for.\n\nReturns:\nTrue if this module has an object with name |name|, False otherwise.", "source": "github-repos"}
{"code": "def __init__(self, token_provider):\n    \n    if token_provider is None:\n      raise ValueError(\"token_provider is required\")\n    if not isinstance(token_provider, TokenProvider):\n      raise ValueError(\"token_provider must be instance of TokenProvider\")\n\n    self.__token = token_provider.get_token()", "docstring": "The Neurio API client.\n\nArgs:\ntoken_provider (TokenProvider): object providing authentication services", "source": "juraj-google-style"}
{"code": "def _as_indexed_slices_list(inputs, optimize=True):\n    if not isinstance(inputs, (list, tuple)):\n        raise TypeError(f'Expected a list or tuple, not {type(inputs)}.')\n    outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]\n    with_int32_index = [o.indices for o in outputs if o.indices.dtype == dtypes.int32]\n    if not with_int32_index or len(with_int32_index) == len(outputs):\n        return outputs\n    casted_outputs = []\n    for o in outputs:\n        if o.indices.dtype == dtypes.int32:\n            casted_outputs.append(indexed_slices.IndexedSlices(o.values, cast(o.indices, dtypes.int64), o.dense_shape))\n        else:\n            casted_outputs.append(o)\n    return casted_outputs", "docstring": "Convert all elements of 'inputs' to IndexedSlices.\n\nAdditionally, homogenize the types of all the indices to\neither int32 or int64.\n\nArgs:\ninputs: List containing either Tensor or IndexedSlices objects.\noptimize: if true, attempt to optimize the conversion of each input.\n\nReturns:\nA list of IndexedSlices objects.\n\nRaises:\nTypeError: If 'inputs' is not a list or a tuple.", "source": "github-repos"}
{"code": "def user_warning(channel, user, warnings, max_warnings):\n    \n\n    username = user.name\n    if isinstance(user, discord.Member):\n        if user.nick is not None:\n            username = user.nick\n\n    warning_count_text = \"warnings\" if warnings != 1 else \"warning\"\n    warning_text = \"{} {}\".format(warnings, warning_count_text)\n    result_text = \"at {} you will be banned\".format(max_warnings)\n    if warnings >= max_warnings:\n        result_text = \"you are being banned because you have more than the maximum warnings\"\n\n    \n    gui = ui_embed.UI(\n        channel,\n        \"Warning {}\".format(username),\n        \"You now have {} {}, {}\".format(warning_text, username, result_text),\n        modulename=modulename\n    )\n\n    return gui", "docstring": "Creates an embed UI containing an user warning message\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\nuser (discord.User): The user to warn\nwarnings (str): The warnings for the user\nmax_warnings (str): The maximum warnings for the user\n\nReturns:\nui (ui_embed.UI): The embed UI object", "source": "juraj-google-style"}
{"code": "def click_slot(self, slot, right=False):\n    if isinstance(slot, int):\n        slot = self.window.slots[slot]\n    button = (constants.INV_BUTTON_RIGHT if right else constants.INV_BUTTON_LEFT)\n    return self.send_click(windows.SingleClick(slot, button))", "docstring": "Left-click or right-click the slot.\n\nArgs:\nslot (Slot): The clicked slot. Can be ``Slot`` instance or integer.\nSet to ``inventory.cursor_slot``\nfor clicking outside the window.", "source": "codesearchnet"}
{"code": "def read_init() -> Dict[str, List[str]]:\n    with open(os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'), 'r', encoding='utf-8', newline='\\n') as f:\n        lines = f.readlines()\n    line_index = 0\n    while not lines[line_index].startswith('if TYPE_CHECKING'):\n        line_index += 1\n    backend_specific_objects = {}\n    while line_index < len(lines):\n        backend = find_backend(lines[line_index])\n        if backend is not None:\n            while not lines[line_index].startswith('    else:'):\n                line_index += 1\n            line_index += 1\n            objects = []\n            while len(lines[line_index]) <= 1 or lines[line_index].startswith(' ' * 8):\n                line = lines[line_index]\n                single_line_import_search = _re_single_line_import.search(line)\n                if single_line_import_search is not None:\n                    objects.extend(single_line_import_search.groups()[0].split(', '))\n                elif line.startswith(' ' * 12):\n                    objects.append(line[12:-2])\n                line_index += 1\n            backend_specific_objects[backend] = objects\n        else:\n            line_index += 1\n    return backend_specific_objects", "docstring": "Read the init and extract backend-specific objects.\n\nReturns:\nDict[str, List[str]]: A dictionary mapping backend name to the list of object names requiring that backend.", "source": "github-repos"}
{"code": "def set(self, time):\n    self._time = time\n    self._pb.sec = int(self._time)\n    self._pb.nsec = int(((self._time - self._pb.sec) * (10 ** 9)))", "docstring": "Sets time in seconds since Epoch\n\nArgs:\ntime (:obj:`float`): time in seconds since Epoch (see time.time())\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def volume( self ):\n        \n        return np.dot( self.matrix[0], np.cross( self.matrix[1], self.matrix[2] ) )", "docstring": "The cell volume.\n\nArgs:\nNone\n\nReturns:\n(float): The cell volume.", "source": "juraj-google-style"}
{"code": "def _is_png(contents, name=None):\n    with ops.name_scope(name, 'is_png'):\n        substr = string_ops.substr(contents, 0, 3)\n        return math_ops.equal(substr, b'\\x89PN', name=name)", "docstring": "Convenience function to check if the 'contents' encodes a PNG image.\n\nArgs:\ncontents: 0-D `string`. The encoded image bytes.\nname: A name for the operation (optional)\n\nReturns:\nA scalar boolean tensor indicating if 'contents' may be a PNG image.\nis_png is susceptible to false positives.", "source": "github-repos"}
{"code": "def join(self, basepath, *paths):\n    return os.path.join(basepath, *paths)", "docstring": "Join two or more pathname components for the filesystem\n\nArgs:\nbasepath: string path of the first component of the path\npaths: path components to be added\n\nReturns: full path after combining all the passed components", "source": "github-repos"}
{"code": "def get_config(self):\n    raise NotImplementedError(f'{self} does not implement get_config()')", "docstring": "Returns the config of the regularizer.\n\nAn regularizer config is a Python dictionary (serializable)\ncontaining all configuration parameters of the regularizer.\nThe same regularizer can be reinstantiated later\n(without any saved state) from this configuration.\n\nThis method is optional if you are just training and executing models,\nexporting to and from SavedModels, or using weight checkpoints.\n\nThis method is required for Keras `model_to_estimator`, saving and\nloading models to HDF5 formats, Keras model cloning, some visualization\nutilities, and exporting models to and from JSON.\n\nReturns:\nPython dictionary.", "source": "github-repos"}
{"code": "def ExpandSuperClasses(self, t):\n    superclasses = set()\n    self._CollectSuperclasses(t, superclasses)\n    return superclasses", "docstring": "Generate a list of all (known) superclasses for a type.\n\nArguments:\nt: A type name. E.g. \"int\".\n\nReturns:\nA set of types. This set includes t as well as all its superclasses. For\nexample, this will return \"bool\", \"int\" and \"object\" for \"bool\".", "source": "github-repos"}
{"code": "def truncate_repetitions(text: str, min_len: int=30) -> str:\n    text_lower = text.lower()\n    text_length = len(text_lower)\n    if text_length < 2 * min_len:\n        return text\n    max_repetition_length = None\n    for repetition_length in range(min_len, int(text_length / 2)):\n        same = True\n        for i in range(0, repetition_length):\n            if text_lower[text_length - repetition_length - i - 1] != text_lower[text_length - i - 1]:\n                same = False\n                break\n        if same:\n            max_repetition_length = repetition_length\n    if max_repetition_length is None:\n        return text\n    lcs = text_lower[-max_repetition_length:]\n    substituted_text = text\n    substituted_text_lower = text_lower\n    while substituted_text_lower.endswith(lcs):\n        substituted_text = substituted_text[:-max_repetition_length]\n        substituted_text_lower = substituted_text_lower[:-max_repetition_length]\n    repeating_tail = text_lower[len(substituted_text_lower):]\n    substituted_text_lower_out = substituted_text_lower\n    while True:\n        sentence_end = find_next_punctuation(text_lower, len(substituted_text_lower_out))\n        sentence_start = find_next_punctuation(text_lower[::-1], len(substituted_text_lower_out))\n        if sentence_end and sentence_start:\n            sentence = text_lower[sentence_start:sentence_end]\n            substituted_text_lower_out = text_lower[:sentence_end + 1]\n            if sentence in repeating_tail:\n                break\n        else:\n            break\n    text_out = text[:len(substituted_text_lower_out)]\n    return text_out", "docstring": "Attempt to truncate repeating segments in the input string.\n\nThis function looks for the longest repeating substring at the end of the input string and truncates it to appear\nonly once. To be considered for removal, repetitions need to be continuous.\n\nArgs:\ntext (`str`):\nThe input raw prediction to be truncated.\nmin_len (int):\nThe minimum length of the repeating segment.\n\nReturns:\n`str`: The input string with repeated segments truncated.", "source": "github-repos"}
{"code": "def __init__(self, time: Timestamp, duration: Union[Duration, timedelta],\n                 operation: ops.Operation) -> None:\n        \n        self.time = time\n        self.duration = Duration.create(duration)\n        self.operation = operation", "docstring": "Initializes the scheduled operation.\n\nArgs:\ntime: When the operation starts.\nduration: How long the operation lasts.\noperation: The operation.", "source": "juraj-google-style"}
{"code": "def _ParseKey(self, parser_mediator, registry_key):\n    \n    matching_plugin = None\n\n    normalized_key_path = self._NormalizeKeyPath(registry_key.path)\n    if self._path_filter.CheckPath(normalized_key_path):\n      matching_plugin = self._plugin_per_key_path[normalized_key_path]\n    else:\n      for plugin in self._plugins_without_key_paths:\n        if self._CanProcessKeyWithPlugin(registry_key, plugin):\n          matching_plugin = plugin\n          break\n\n    if not matching_plugin:\n      matching_plugin = self._default_plugin\n\n    if matching_plugin:\n      self._ParseKeyWithPlugin(parser_mediator, registry_key, matching_plugin)", "docstring": "Parses the Registry key with a specific plugin.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nregistry_key (dfwinreg.WinRegistryKey): Windwos Registry key.", "source": "juraj-google-style"}
{"code": "def _compute_valid(self):\n    if (self._dimension != 2):\n        raise NotImplementedError('Validity check only implemented in R^2')\n    poly_sign = None\n    if (self._degree == 1):\n        first_deriv = (self._nodes[(:, 1:)] - self._nodes[(:, :(- 1))])\n        poly_sign = _SIGN(np.linalg.det(first_deriv))\n    elif (self._degree == 2):\n        bernstein = _surface_helpers.quadratic_jacobian_polynomial(self._nodes)\n        poly_sign = _surface_helpers.polynomial_sign(bernstein, 2)\n    elif (self._degree == 3):\n        bernstein = _surface_helpers.cubic_jacobian_polynomial(self._nodes)\n        poly_sign = _surface_helpers.polynomial_sign(bernstein, 4)\n    else:\n        raise _helpers.UnsupportedDegree(self._degree, supported=(1, 2, 3))\n    return (poly_sign == 1)", "docstring": "r\"\"\"Determines if the current surface is \"valid\".\n\nDoes this by checking if the Jacobian of the map from the\nreference triangle is everywhere positive.\n\nReturns:\nbool: Flag indicating if the current surface is valid.\n\nRaises:\nNotImplementedError: If the surface is in a dimension other\nthan :math:`\\mathbf{R}^2`.\n.UnsupportedDegree: If the degree is not 1, 2 or 3.", "source": "codesearchnet"}
{"code": "def _parse_config(self, requires_cfg=True):\n        \n        if len(self.config_paths) > 0:\n            try:\n                self._find_config()\n            except BisonError:\n                if not requires_cfg:\n                    return\n                raise\n            try:\n                with open(self.config_file, 'r') as f:\n                    parsed = self._fmt_to_parser[self.config_format](f)\n            except Exception as e:\n                raise BisonError(\n                    'Failed to parse config file: {}'.format(self.config_file)\n                ) from e\n\n            \n            self._full_config = None\n            self._config = parsed", "docstring": "Parse the configuration file, if one is configured, and add it to\nthe `Bison` state.\n\nArgs:\nrequires_cfg (bool): Specify whether or not parsing should fail\nif a config file is not found. (default: True)", "source": "juraj-google-style"}
{"code": "def save_data_files(vr, bs, prefix=None, directory=None):\n    filename = ('{}_band.dat'.format(prefix) if prefix else 'band.dat')\n    directory = (directory if directory else '.')\n    filename = os.path.join(directory, filename)\n    if bs.is_metal():\n        zero = vr.efermi\n    else:\n        zero = bs.get_vbm()['energy']\n    with open(filename, 'w') as f:\n        header = '\n        f.write(header)\n        for band in bs.bands[Spin.up]:\n            for (d, e) in zip(bs.distance, band):\n                f.write('{:.8f} {:.8f}\\n'.format(d, (e - zero)))\n            f.write('\\n')\n        if bs.is_spin_polarized:\n            for band in bs.bands[Spin.down]:\n                for (d, e) in zip(bs.distance, band):\n                    f.write('{:.8f} {:.8f}\\n'.format(d, (e - zero)))\n                f.write('\\n')\n    return filename", "docstring": "Write the band structure data files to disk.\n\nArgs:\nvs (`Vasprun`): Pymatgen `Vasprun` object.\nbs (`BandStructureSymmLine`): Calculated band structure.\nprefix (`str`, optional): Prefix for data file.\ndirectory (`str`, optional): Directory in which to save the data.\n\nReturns:\nThe filename of the written data file.", "source": "codesearchnet"}
{"code": "def _merge_tensors(t1, t2, name, validate):\n    if t1 is None:\n        return (t2, False)\n    elif t2 is None:\n        return (t1, False)\n    elif t1 is t2:\n        return (t1, True)\n    else:\n        err_msg = 'RowPartition._merge_precomputed_encodings: partitions have incompatible %s' % name\n        if not t1.shape.is_compatible_with(t2.shape):\n            raise ValueError(err_msg)\n        if validate:\n            checks = [check_ops.assert_equal(t1, t2, message=err_msg)]\n            return (control_flow_ops.with_dependencies(checks, t1), True)\n        else:\n            return (t1, False)", "docstring": "Merge two optional Tensors with equal values into a single Tensor.\n\nArgs:\nt1: tf.Tensor or None\nt2: tf.Tensor or None\nname: A name for the tensors (for error messages)\nvalidate: If true, then check that `t1` is compatible with `t2` (if both are\nnon-None).\n\nReturns:\nA pair `(merged_value, validated)`:\n* `merged_value` is `t1` if it is not None; or `t2` otherwise.\n* `validated` is true if we validated that t1 and t2 are equal (either\nby adding a check, or because t1 is t2).", "source": "github-repos"}
{"code": "def _get_combined_properties(self, dev):\n    return (dev.job if dev.job is not None else self.job, dev.replica if dev.replica is not None else self.replica, dev.task if dev.task is not None else self.task, dev.device_type if dev.device_type is not None else self.device_type, dev.device_index if dev.device_index is not None else self.device_index)", "docstring": "Combine the current DeviceSpec with another DeviceSpec.\n\nThe combination of DeviceSpecs is will give priority to dev.\n\nArgs:\ndev: a `DeviceSpec`\n\nReturns:\nA tuple of (job, replica, task, device_type, device_index) which\nrepresents the combination of self and dev.", "source": "github-repos"}
{"code": "def lookup(self, iterable, index=0, gather=False, edit_distance=0, max_edit_distance=0, match_threshold=0.0, matched_length=0):\n        \n        if self.is_terminal:\n            if index == len(iterable) or \\\n                    (gather and index < len(iterable) and iterable[index] == ' '):  \n                confidence = float(len(self.key) - edit_distance) / float(max(len(self.key), index))\n                if confidence > match_threshold:\n                    yield {\n                        'key': self.key,\n                        'match': iterable[:index],\n                        'data': self.data,\n                        'confidence': confidence * self.weight\n                    }\n\n        if index < len(iterable) and iterable[index] in self.children:\n            for result in self.children[iterable[index]]\\\n                    .lookup(iterable, index + 1, gather=gather,\n                            edit_distance=edit_distance, max_edit_distance=max_edit_distance, matched_length=matched_length + 1):\n                yield result\n\n        \n        potential_confidence = float(index - edit_distance + (max_edit_distance - edit_distance)) / \\\n                               (float(index) + (max_edit_distance - edit_distance)) if index + max_edit_distance - edit_distance > 0 else 0.0\n        if edit_distance < max_edit_distance and potential_confidence > match_threshold:\n            for child in list(self.children):\n                if index >= len(iterable) or child != iterable[index]:\n                    \n                    for result in self.children[child]\\\n                        .lookup(iterable, index + 1, gather=gather,\n                                edit_distance=edit_distance + 1, max_edit_distance=max_edit_distance, matched_length=matched_length):\n                        yield result\n                    \n                    for result in self.children[child]\\\n                        .lookup(iterable, index + 2, gather=gather,\n                                edit_distance=edit_distance + 1, max_edit_distance=max_edit_distance, matched_length=matched_length):\n                        yield result\n                    \n                    for result in self.children[child]\\\n                        .lookup(iterable, index, gather=gather,\n                                edit_distance=edit_distance + 1, max_edit_distance=max_edit_distance, matched_length=matched_length):\n                        yield result", "docstring": "TODO: Implement trie lookup with edit distance\n\nArgs:\niterable(list?): key used to find what is requested this could\nbe a generator.\nindex(int): index of what is requested\ngather(bool): of weather to gather or not\nedit_distance(int): the distance -- currently not used\nmax_edit_distance(int): the max distance -- not currently used\n\nyields:\nobject: yields the results of the search", "source": "juraj-google-style"}
{"code": "def prepare_for_tokenization(self, text: str, is_split_into_words: bool=False, **kwargs) -> tuple[str, dict[str, Any]]:\n    return (text, kwargs)", "docstring": "Performs any necessary transformations before tokenization.\n\nThis method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the\n`kwargs` at the end of the encoding process to be sure all the arguments have been used.\n\nArgs:\ntext (`str`):\nThe text to prepare.\nis_split_into_words (`bool`, *optional*, defaults to `False`):\nWhether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the\ntokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)\nwhich it will tokenize. This is useful for NER or token classification.\nkwargs (`Dict[str, Any]`, *optional*):\nKeyword arguments to use for the tokenization.\n\nReturns:\n`Tuple[str, Dict[str, Any]]`: The prepared text and the unused kwargs.", "source": "github-repos"}
{"code": "def get_user_roles(self, user):\n        \n        self.project_service.set_auth(self._token_project)\n        return self.project_service.get_user_roles(user)", "docstring": "Get roles associated with the given user.\n\nArgs:\nuser (string): User name.\n\nReturns:\n(list): List of roles that user has.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"}
{"code": "def retrieve(self, block_height, headers=None):\n        \n        path = self.path + block_height\n        return self.transport.forward_request(\n            method='GET', path=path, headers=None)", "docstring": "Retrieves the block with the given ``block_height``.\n\nArgs:\nblock_height (str): height of the block to retrieve.\nheaders (dict): Optional headers to pass to the request.\n\nReturns:\ndict: The block with the given ``block_height``.", "source": "juraj-google-style"}
{"code": "def _is_node_return_ended(self, node):\n    if isinstance(node, astroid.Return):\n        return True\n    if isinstance(node, astroid.Call):\n        try:\n            funcdef_node = node.func.inferred()[0]\n            if self._is_function_def_never_returning(funcdef_node):\n                return True\n        except astroid.InferenceError:\n            pass\n    if isinstance(node, astroid.While):\n        return True\n    if isinstance(node, astroid.Raise):\n        if (not node.exc):\n            return True\n        if (not utils.is_node_inside_try_except(node)):\n            return True\n        exc = utils.safe_infer(node.exc)\n        if ((exc is None) or (exc is astroid.Uninferable)):\n            return False\n        exc_name = exc.pytype().split('.')[(- 1)]\n        handlers = utils.get_exception_handlers(node, exc_name)\n        handlers = (list(handlers) if (handlers is not None) else [])\n        if handlers:\n            return any((self._is_node_return_ended(_handler) for _handler in handlers))\n        return True\n    if isinstance(node, astroid.If):\n        is_orelse_returning = any((self._is_node_return_ended(_ore) for _ore in node.orelse if (not isinstance(_ore, astroid.FunctionDef))))\n        is_if_returning = any((self._is_node_return_ended(_ifn) for _ifn in node.body if (not isinstance(_ifn, astroid.FunctionDef))))\n        return (is_if_returning and is_orelse_returning)\n    return any((self._is_node_return_ended(_child) for _child in node.get_children() if (not isinstance(_child, astroid.ExceptHandler))))", "docstring": "Check if the node ends with an explicit return statement.\n\nArgs:\nnode (astroid.NodeNG): node to be checked.\n\nReturns:\nbool: True if the node ends with an explicit statement, False otherwise.", "source": "codesearchnet"}
{"code": "def __init__(self, token_list):\n        \n        self.__token_arr = np.array(list(set(token_list)))", "docstring": "Initialize.\n\nArgs:\ntoken_list:    The list of all tokens.", "source": "juraj-google-style"}
{"code": "def shared_symbol_table(name, version, symbols, imports=None):\n    return SymbolTable(table_type=SHARED_TABLE_TYPE, symbols=symbols, name=name, version=version, imports=imports)", "docstring": "Constructs a shared symbol table.\n\nArgs:\nname (unicode): The name of the shared symbol table.\nversion (int): The version of the shared symbol table.\nsymbols (Iterable[unicode]): The symbols to associate with the table.\nimports (Optional[Iterable[SymbolTable]): The shared symbol tables to inject into this one.\n\nReturns:\nSymbolTable: The constructed table.", "source": "codesearchnet"}
{"code": "def GrabFileSystem(self, path_spec):\n    \n    identifier = self._GetFileSystemCacheIdentifier(path_spec)\n    self._file_system_cache.GrabObject(identifier)", "docstring": "Grabs a cached file system object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.", "source": "juraj-google-style"}
{"code": "def image_format(value):\n    \n\n    if value.image.format.upper() not in constants.ALLOWED_IMAGE_FORMATS:\n        raise ValidationError(MESSAGE_INVALID_IMAGE_FORMAT)", "docstring": "Confirms that the uploaded image is of supported format.\n\nArgs:\nvalue (File): The file with an `image` property containing the image\n\nRaises:\ndjango.forms.ValidationError", "source": "juraj-google-style"}
{"code": "def add_variable(self, shape, initializer='zeros', dtype=None, aggregation='none', layout=None, name=None):\n    self._check_super_called()\n    initializer = initializers.get(initializer)\n    with backend.name_scope(self.name, caller=self):\n        variable = backend.Variable(initializer=initializer, shape=shape, dtype=dtype, trainable=False, aggregation=aggregation, layout=layout, name=name)\n    self._track_variable(variable)\n    return variable", "docstring": "Add a variable to the optimizer.\n\nArgs:\nshape: Shape tuple for the variable. Must be fully-defined\n(no `None` entries).\ninitializer: Initializer object to use to populate the initial\nvariable value, or string name of a built-in initializer\n(e.g. `\"random_normal\"`). Defaults to `\"zeros\"`.\ndtype: Dtype of the variable to create, e.g. `\"float32\"`. If\nunspecified, defaults to the `keras.backend.floatx()`.\naggregation: Optional string, one of `None`, `\"none\"`, `\"mean\"`,\n`\"sum\"` or `\"only_first_replica\"`. Annotates the variable with\nthe type of multi-replica aggregation to be used for this\nvariable when writing custom data parallel training loops.\nDefaults to `\"none\"`.\nlayout: Optional tensor layout.  Defaults to `None`.\nname: String name of the variable. Useful for debugging purposes.\n\nReturns:\nAn optimizer variable, in the format of `keras.Variable`.", "source": "github-repos"}
{"code": "def assert_true(expr, msg, extras=None):\n    if not expr:\n        fail(msg, extras)", "docstring": "Assert an expression evaluates to true, otherwise fail the test.\n\nArgs:\nexpr: The expression that is evaluated.\nmsg: A string explaining the details in case of failure.\nextras: An optional field for extra information to be included in\ntest result.", "source": "github-repos"}
{"code": "def status(self, targets, jobs=None, remote=None, show_checksums=False):\n    cloud = self._get_cloud(remote, 'status')\n    return self.repo.cache.local.status(targets, jobs=jobs, remote=cloud, show_checksums=show_checksums)", "docstring": "Check status of data items in a cloud-agnostic way.\n\nArgs:\ntargets (list): list of targets to check status for.\njobs (int): number of jobs that can be running simultaneously.\nremote (dvc.remote.base.RemoteBase): optional remote to compare\ntargets to. By default remote from core.remote config option\nis used.\nshow_checksums (bool): show checksums instead of file names in\ninformation messages.", "source": "codesearchnet"}
{"code": "def validate_sqs_policy(self, accounts):\n        \n        sqs_queue_name = self.dbconfig.get('sqs_queue_name', self.ns)\n        sqs_queue_region = self.dbconfig.get('sqs_queue_region', self.ns)\n        sqs_account = AWSAccount.get(self.dbconfig.get('sqs_queue_account', self.ns))\n        session = get_aws_session(sqs_account)\n\n        sqs = session.client('sqs', region_name=sqs_queue_region)\n        sqs_queue_url = sqs.get_queue_url(QueueName=sqs_queue_name, QueueOwnerAWSAccountId=sqs_account.account_number)\n        sqs_attribs = sqs.get_queue_attributes(QueueUrl=sqs_queue_url['QueueUrl'], AttributeNames=['Policy'])\n\n        policy = json.loads(sqs_attribs['Attributes']['Policy'])\n\n        for account in accounts:\n            arn = 'arn:aws:sns:*:{}:{}'.format(account.account_number, sqs_queue_name)\n            if arn not in policy['Statement'][0]['Condition']['ForAnyValue:ArnEquals']['aws:SourceArn']:\n                self.log.warning('SQS policy is missing condition for ARN {}'.format(arn))\n                policy['Statement'][0]['Condition']['ForAnyValue:ArnEquals']['aws:SourceArn'].append(arn)\n\n        sqs.set_queue_attributes(QueueUrl=sqs_queue_url['QueueUrl'], Attributes={'Policy': json.dumps(policy)})", "docstring": "Given a list of accounts, ensures that the SQS policy allows all the accounts to write to the queue\n\nArgs:\naccounts (`list` of :obj:`Account`): List of accounts\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def get_orbital_derivative_between_states(self, band_i, band_j, kpoint, spin, cart_dir):\n        \n        if band_i < 0 or band_i > self.nbands - 1 or band_j < 0 or band_j > self.nelect - 1:\n            raise ValueError(\"Band index out of bounds\")\n        if kpoint > self.nkpoints:\n            raise ValueError(\"K-point index out of bounds\")\n        if cart_dir > 2 or cart_dir < 0:\n            raise ValueError(\"cart_dir index out of bounds\")\n\n        return self.cder_data[band_i, band_j, kpoint, spin, cart_dir]", "docstring": "Method returning a value\nbetween bands band_i and band_j for k-point index, spin-channel and cartesian direction.\nArgs:\nband_i (Integer): Index of band i\nband_j (Integer): Index of band j\nkpoint (Integer): Index of k-point\nspin   (Integer): Index of spin-channel (0 or 1)\ncart_dir (Integer): Index of cartesian direction (0,1,2)\n\nReturns:\na float value", "source": "juraj-google-style"}
{"code": "def get_graph(self, run_key, device_name, debug=False):\n    return self.get_graphs(run_key, debug=debug).get(device_name, None)", "docstring": "Get the runtime GraphDef proto associated with a run key and a device.\n\nArgs:\nrun_key: A Session.run kay.\ndevice_name: Name of the device in question.\ndebug: Whether the debugger-decoratedgraph is to be retrieved.\n\nReturns:\nA `GraphDef` proto.", "source": "codesearchnet"}
{"code": "def ws45(msg):\n    \n    d = hex2bin(data(msg))\n    if d[3] == '0':\n        return None\n\n    ws = bin2int(d[4:6])\n    return ws", "docstring": "Wind shear.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nint: Wind shear level. 0=NIL, 1=Light, 2=Moderate, 3=Severe", "source": "juraj-google-style"}
{"code": "def request_file(link, outfile, force_rerun_flag=False):\n    \n    if force_rerun(flag=force_rerun_flag, outfile=outfile):\n        req = requests.get(link)\n        if req.status_code == 200:\n            with open(outfile, 'w') as f:\n                f.write(req.text)\n            log.debug('Loaded and saved {} to {}'.format(link, outfile))\n        else:\n            log.error('{}: request error {}'.format(link, req.status_code))\n    return outfile", "docstring": "Download a file given a URL if the outfile does not exist already.\n\nArgs:\nlink (str): Link to download file.\noutfile (str): Path to output file, will make a new file if it does not exist. Will not download if it does\nexist, unless force_rerun_flag is True.\nforce_rerun_flag (bool): Flag to force re-downloading of the file if it exists already.\n\nReturns:\nstr: Path to downloaded file.", "source": "juraj-google-style"}
{"code": "def bpe_decode(self, sequences):\n    return self.bpe_tokenizer.batch_decode(sequences)", "docstring": "Convert a list of lists of bpe token ids into a list of strings by calling bpe tokenizer.\n\nArgs:\nsequences (`torch.Tensor`):\nList of tokenized input ids.\nReturns:\n`List[str]`: The list of bpe decoded sentences.", "source": "github-repos"}
{"code": "def pnl(self, account='', modelCode='') -> List[PnL]:\n        \n        return [v for v in self.wrapper.pnls.values() if\n                (not account or v.account == account) and\n                (not modelCode or v.modelCode == modelCode)]", "docstring": "List of subscribed :class:`.PnL` objects (profit and loss),\noptionally filtered by account and/or modelCode.\n\nThe :class:`.PnL` objects are kept live updated.\n\nArgs:\naccount: If specified, filter for this account name.\nmodelCode: If specified, filter for this account model.", "source": "juraj-google-style"}
{"code": "def inverse_event_shape(self, output_shape):\n    return self._inverse_event_shape(output_shape)", "docstring": "Shape of a single sample from a single batch as a `TensorShape`.\n\nSame meaning as `inverse_event_shape_tensor`. May be only partially defined.\n\nArgs:\noutput_shape: `TensorShape` indicating event-portion shape passed into\n`inverse` function.\n\nReturns:\ninverse_event_shape_tensor: `TensorShape` indicating event-portion shape\nafter applying `inverse`. Possibly unknown.", "source": "github-repos"}
{"code": "def matches_any(patterns: List[Pattern[str]], line: str) -> bool:\n    stripped_line = line.strip()\n    for pattern in patterns:\n        if pattern.match(stripped_line):\n            return True\n    return False", "docstring": "Checks if the line matches any of the given patterns.\n\nArgs:\npatterns: A list of compiled regular expression patterns.\nline: The line to check for matches.\n\nReturns:\nTrue if the line matches any of the patterns, False otherwise.", "source": "github-repos"}
{"code": "def read_lines(self, max_lines=None):\n    if (max_lines is None):\n        return self.read_stream().split('\\n')\n    max_to_read = self.metadata.size\n    bytes_to_read = min((100 * max_lines), self.metadata.size)\n    while True:\n        content = self.read_stream(byte_count=bytes_to_read)\n        lines = content.split('\\n')\n        if ((len(lines) > max_lines) or (bytes_to_read >= max_to_read)):\n            break\n        bytes_to_read = min((bytes_to_read * 10), max_to_read)\n    del lines[(- 1)]\n    return lines[0:max_lines]", "docstring": "Reads the content of this object as text, and return a list of lines up to some max.\n\nArgs:\nmax_lines: max number of lines to return. If None, return all lines.\nReturns:\nThe text content of the object as a list of lines.\nRaises:\nException if there was an error requesting the object's content.", "source": "codesearchnet"}
{"code": "def _common_prefix(self, m):\n    if not m:\n        return ''\n    s1 = min(m)\n    s2 = max(m)\n    for i, c in enumerate(s1):\n        if c != s2[i]:\n            return s1[:i]\n    return s1", "docstring": "Given a list of str, returns the longest common prefix.\n\nArgs:\nm: (list of str) A list of strings.\n\nReturns:\n(str) The longest common prefix.", "source": "github-repos"}
{"code": "def files_upload(\n        self, *, file: Union[str, IOBase] = None, content: str = None, **kwargs\n    ) -> SlackResponse:\n        \n        if file is None and content is None:\n            raise e.SlackRequestError(\"The file or content argument must be specified.\")\n        if file is not None and content is not None:\n            raise e.SlackRequestError(\n                \"You cannot specify both the file and the content argument.\"\n            )\n\n        if file:\n            return self.api_call(\"files.upload\", files={\"file\": file}, data=kwargs)\n        elif content:\n            data = kwargs.copy()\n            data.update({\"content\": content})\n            return self.api_call(\"files.upload\", data=data)", "docstring": "Uploads or creates a file.\n\nArgs:\nfile (str): Supply a file path.\nwhen you'd like to upload a specific file. e.g. 'dramacat.gif'\ncontent (str): Supply content when you'd like to create an\neditable text file containing the specified text. e.g. 'launch plan'\nRaises:\nSlackRequestError: If niether or both the `file` and `content` args are specified.", "source": "juraj-google-style"}
{"code": "def _cast_dict(self, data_dict):\n    for (key, value) in data_dict.iteritems():\n        data_dict[key] = self._cast_value(value)\n    if ('resp_body_data' in data_dict):\n        del data_dict['resp_body_data']\n    return data_dict", "docstring": "Internal method that makes sure any dictionary elements\nare properly cast into the correct types, instead of\njust treating everything like a string from the csv file.\n\nArgs:\ndata_dict: dictionary containing bro log data.\n\nReturns:\nCleaned Data dict.", "source": "codesearchnet"}
{"code": "def register_date_conversion_handler(date_specifier_patterns):\n\n    def _decorator(func):\n        global DATE_SPECIFIERS_CONVERSION_HANDLERS\n        DATE_SPECIFIERS_CONVERSION_HANDLERS[DATE_SPECIFIERS_REGEXES[date_specifier_patterns]] = func\n        return func\n    return _decorator", "docstring": "Decorator for registering handlers that convert text dates to dates.\n\nArgs:\ndate_specifier_patterns (str): the date specifier (in regex pattern format) for which the handler is registered", "source": "codesearchnet"}
{"code": "def set_trunk_groups(self, intf, value=None, default=False, disable=False):\n    if default:\n        cmd = 'default switchport trunk group'\n        return self.configure_interface(intf, cmd)\n    if disable:\n        cmd = 'no switchport trunk group'\n        return self.configure_interface(intf, cmd)\n    current_value = self.get(intf)['trunk_groups']\n    failure = False\n    value = make_iterable(value)\n    for name in set(value).difference(current_value):\n        if (not self.add_trunk_group(intf, name)):\n            failure = True\n    for name in set(current_value).difference(value):\n        if (not self.remove_trunk_group(intf, name)):\n            failure = True\n    return (not failure)", "docstring": "Configures the switchport trunk group value\n\nArgs:\nintf (str): The interface identifier to configure.\nvalue (str): The set of values to configure the trunk group\ndefault (bool): Configures the trunk group default value\ndisable (bool): Negates all trunk group settings\n\nReturns:\nTrue if the config operation succeeds otherwise False", "source": "codesearchnet"}
{"code": "def __init__(self, ethertype=None):\n        \n        super().__init__(action_type=ActionType.OFPAT_POP_MPLS)\n        self.ethertype = ethertype", "docstring": "Create an ActionPopMPLS with the optional parameters below.\n\nArgs:\nethertype (int): indicates the Ethertype of the payload.", "source": "juraj-google-style"}
{"code": "def _expand_dims(x, input_shape, output_shape):\n  \n  verify_no_new_dims([output_shape], input_shape)\n  if input_shape == output_shape or input_shape.ndims == 0:\n    return x\n  perm = [input_shape.dims.index(d) for d in output_shape.dims\n          if d in input_shape.dims]\n  x = tf.transpose(x, perm)\n  for i, d in enumerate(output_shape.dims):\n    if d not in input_shape.dims:\n      x = tf.expand_dims(x, i)\n  return x", "docstring": "Expand dimensions and transpose if necessary.\n\nArgs:\nx: a tf.Tensor\ninput_shape: a Shape\noutput_shape: a Shape whose dimensions are a superset of\nthose in input_shape\n\nReturns:\na tf.Tensor", "source": "juraj-google-style"}
{"code": "def format(self, exclude_class=False):\n        \n\n        if exclude_class:\n            msg = self.msg\n        else:\n            msg = \"%s: %s\" % (self.__class__.__name__, self.msg)\n\n        if len(self.params) != 0:\n            paramstring = \"\\n\".join([str(key) + \": \" + str(val) for key, val in self.params.items()])\n            msg += \"\\nAdditional Information:\\n\" + paramstring\n\n        return msg", "docstring": "Format this exception as a string including class name.\n\nArgs:\nexclude_class (bool): Whether to exclude the exception class\nname when formatting this exception\n\nReturns:\nstring: a multiline string with the message, class name and\nkey value parameters passed to create the exception.", "source": "juraj-google-style"}
{"code": "def websocket_url_for_server_url(url):\n    \n    if url.startswith(\"http:\"):\n        reprotocoled = \"ws\" + url[4:]\n    elif url.startswith(\"https:\"):\n        reprotocoled = \"wss\" + url[5:]\n    else:\n        raise ValueError(\"URL has unknown protocol \" + url)\n    if reprotocoled.endswith(\"/\"):\n        return reprotocoled + \"ws\"\n    else:\n        return reprotocoled + \"/ws\"", "docstring": "Convert an ``http(s)`` URL for a Bokeh server websocket endpoint into\nthe appropriate ``ws(s)`` URL\n\nArgs:\nurl (str):\nAn ``http(s)`` URL\n\nReturns:\nstr:\nThe corresponding ``ws(s)`` URL ending in ``/ws``\n\nRaises:\nValueError:\nIf the input URL is not of the proper form.", "source": "juraj-google-style"}
{"code": "def get_program(self, program_resource_name: str) -> Dict:\n        \n        return self.service.projects().programs().get(\n            name=program_resource_name).execute()", "docstring": "Returns the previously created quantum program.\n\nParams:\nprogram_resource_name: A string of the form\n`projects/project_id/programs/program_id`.\n\nReturns:\nA dictionary containing the metadata and the program.", "source": "juraj-google-style"}
{"code": "def get(self, recipe_id):\n        \n        self.logger.debug('Retrieving recipe by id: ' + recipe_id)\n        url = '%(base_url)s/recipe/%(recipe_id)s' % {\n            'base_url': self.base_url, 'recipe_id': recipe_id\n        }\n        r = self.gbdx_connection.get(url)\n        r.raise_for_status()\n        return r.json()", "docstring": "Retrieves an AnswerFactory Recipe by id\n\nArgs:\nrecipe_id The id of the recipe\n\nReturns:\nA JSON representation of the recipe", "source": "juraj-google-style"}
{"code": "def load_json(filename, **kwargs):\n    \n\n    with open(filename, 'r', encoding='utf-8') as f:\n        return json.load(f, **kwargs)", "docstring": "Load a JSON object from the specified file.\n\nArgs:\nfilename: Path to the input JSON file.\n**kwargs: Additional arguments to `json.load`.\n\nReturns:\nThe object deserialized from JSON.", "source": "juraj-google-style"}
{"code": "def checksum(self, path):\n    try:\n        return self._blobstorageIO().checksum(path)\n    except Exception as e:\n        raise BeamIOError('Checksum operation failed', {path, e})", "docstring": "Fetch checksum metadata of a file on the\n:class:`~apache_beam.io.filesystem.FileSystem`.\n\nArgs:\npath: string path of a file.\n\nReturns: string containing checksum\n\nRaises:\n``BeamIOError``: if path isn't a file or doesn't exist.", "source": "github-repos"}
{"code": "def with_params(self, **kwargs):\n    \n    if _TEST_MODE:\n      logging.info(\n          'Setting runtime parameters for %s\n          self, self.pipeline_id, kwargs)\n      return self\n\n    if self.pipeline_id is not None:\n      raise UnexpectedPipelineError(\n          'May only call with_params() on a Pipeline that has not yet '\n          'been scheduled for execution.')\n\n    ALLOWED = ('backoff_seconds', 'backoff_factor', 'max_attempts', 'target')\n    for name, value in kwargs.iteritems():\n      if name not in ALLOWED:\n        raise TypeError('Unexpected keyword: %s=%r' % (name, value))\n      setattr(self, name, value)\n    return self", "docstring": "Modify various execution parameters of a Pipeline before it runs.\n\nThis method has no effect in test mode.\n\nArgs:\nkwargs: Attributes to modify on this Pipeline instance before it has\nbeen executed.\n\nReturns:\nThis Pipeline instance, for easy chaining.", "source": "juraj-google-style"}
{"code": "def orthonormalize_righthanded(basis):\n    (v1, v2) = (basis[(:, 0)], basis[(:, 1)])\n    e1 = normalize(v1)\n    e3 = normalize(np.cross(e1, v2))\n    e2 = normalize(np.cross(e3, e1))\n    return np.array([e1, e2, e3]).T", "docstring": "Orthonormalizes righthandedly a given 3D basis.\n\nThis functions returns a right handed orthonormalize_righthandedd basis.\nSince only the first two vectors in the basis are used, it does not matter\nif you give two or three vectors.\n\nRight handed means, that:\n\n.. math::\n\n\\\\vec{e_1} \\\\times \\\\vec{e_2} &= \\\\vec{e_3} \\\\\\\\\n\\\\vec{e_2} \\\\times \\\\vec{e_3} &= \\\\vec{e_1} \\\\\\\\\n\\\\vec{e_3} \\\\times \\\\vec{e_1} &= \\\\vec{e_2} \\\\\\\\\n\nArgs:\nbasis (np.array): An array of shape = (3,2) or (3,3)\n\nReturns:\nnew_basis (np.array): A right handed orthonormalized basis.", "source": "codesearchnet"}
{"code": "def _unflatten_dict(flat_dict, prefixes):\n    original_dict = {}\n    for (key, value) in flat_dict.items():\n        prefix_found = False\n        for prefix in prefixes:\n            full_prefix = (('__' + prefix) + '_')\n            if key.startswith(full_prefix):\n                if (prefix not in original_dict):\n                    original_dict[prefix] = {}\n                original_dict[prefix][key[len(full_prefix):]] = value\n                prefix_found = True\n                break\n        if (not prefix_found):\n            original_dict[key] = value\n    return original_dict", "docstring": "Returns a dict of dicts if any prefixes match keys in the flat dict.\n\nThe function handles the case where the prefix may not be a dict.\n\nArgs:\nflat_dict: A dict without any nesting.\nprefixes: A list of strings which may have been dicts in the\noriginal structure.", "source": "codesearchnet"}
{"code": "def from_string(cls, model_id, default_project=None):\n    (proj, dset, model) = _helpers._parse_3_part_id(model_id, default_project=default_project, property_name='model_id')\n    return cls.from_api_repr({'projectId': proj, 'datasetId': dset, 'modelId': model})", "docstring": "Construct a model reference from model ID string.\n\nArgs:\nmodel_id (str):\nA model ID in standard SQL format. If ``default_project``\nis not specified, this must included a project ID, dataset\nID, and model ID, each separated by ``.``.\ndefault_project (str):\nOptional. The project ID to use when ``model_id`` does not\ninclude a project ID.\n\nReturns:\ngoogle.cloud.bigquery.model.ModelReference:\nModel reference parsed from ``model_id``.\n\nRaises:\nValueError:\nIf ``model_id`` is not a fully-qualified table ID in\nstandard SQL format.", "source": "codesearchnet"}
{"code": "def get_pourbaix_plot(self, limits=None, title=\"\",\n                          label_domains=True, plt=None):\n        \n        if limits is None:\n            limits = [[-2, 16], [-3, 3]]\n\n        plt = plt or pretty_plot(16)\n\n        xlim = limits[0]\n        ylim = limits[1]\n\n        h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],\n                               [xlim[1], -xlim[1] * PREFAC]])\n        o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],\n                               [xlim[1], -xlim[1] * PREFAC + 1.23]])\n        neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])\n        V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])\n\n        ax = plt.gca()\n        ax.set_xlim(xlim)\n        ax.set_ylim(ylim)\n        lw = 3\n        plt.plot(h_line[0], h_line[1], \"r--\", linewidth=lw)\n        plt.plot(o_line[0], o_line[1], \"r--\", linewidth=lw)\n        plt.plot(neutral_line[0], neutral_line[1], \"k-.\", linewidth=lw)\n        plt.plot(V0_line[0], V0_line[1], \"k-.\", linewidth=lw)\n\n        for entry, vertices in self._pd._stable_domain_vertices.items():\n            center = np.average(vertices, axis=0)\n            x, y = np.transpose(np.vstack([vertices, vertices[0]]))\n            plt.plot(x, y, 'k-', linewidth=lw)\n            if label_domains:\n                plt.annotate(generate_entry_label(entry), center, ha='center',\n                             va='center', fontsize=20, color=\"b\")\n\n        plt.xlabel(\"pH\")\n        plt.ylabel(\"E (V)\")\n        plt.title(title, fontsize=20, fontweight='bold')\n        return plt", "docstring": "Plot Pourbaix diagram.\n\nArgs:\nlimits: 2D list containing limits of the Pourbaix diagram\nof the form [[xlo, xhi], [ylo, yhi]]\ntitle (str): Title to display on plot\nlabel_domains (bool): whether to label pourbaix domains\nplt (pyplot): Pyplot instance for plotting\n\nReturns:\nplt (pyplot) - matplotlib plot object with pourbaix diagram", "source": "juraj-google-style"}
{"code": "def texture_cube(self, size, components, data=None, *, alignment=1, dtype='f1') -> 'TextureCube':\n        \n\n        res = TextureCube.__new__(TextureCube)\n        res.mglo, res._glo = self.mglo.texture_cube(size, components, data, alignment, dtype)\n        res._size = size\n        res._components = components\n        res._dtype = dtype\n        res.ctx = self\n        res.extra = None\n        return res", "docstring": "Create a :py:class:`TextureCube` object.\n\nArgs:\nsize (tuple): The width, height of the texture. Each side of the cube will have this size.\ncomponents (int): The number of components 1, 2, 3 or 4.\ndata (bytes): Content of the texture.\n\nKeyword Args:\nalignment (int): The byte alignment 1, 2, 4 or 8.\ndtype (str): Data type.\n\nReturns:\n:py:class:`TextureCube` object", "source": "juraj-google-style"}
{"code": "def split_folder_and_path(filepath):\n    dirname = op.dirname(filepath)\n    filename = op.basename(filepath)\n    splitext = op.splitext(filename)\n    filename_without_extension = splitext[0]\n    extension = splitext[1]\n    return (dirname, filename_without_extension, extension)", "docstring": "Split a file path into its folder, filename, and extension\n\nArgs:\npath (str): Path to a file\n\nReturns:\ntuple: of (folder, filename (without extension), extension)", "source": "codesearchnet"}
{"code": "def include(filename, hosts=False, when=True):\n    \n\n    if not pyinfra.is_cli:\n        raise PyinfraError('local.include is only available in CLI mode.')\n\n    if not when:\n        return\n\n    if hosts is not False:\n        hosts = ensure_host_list(hosts, inventory=pseudo_state.inventory)\n        if pseudo_host not in hosts:\n            return\n\n    if pseudo_state.deploy_dir:\n        filename = path.join(pseudo_state.deploy_dir, filename)\n\n    frameinfo = get_caller_frameinfo()\n\n    logger.debug('Including local file: {0}'.format(filename))\n\n    try:\n        \n        \n        \n        \n\n        from pyinfra_cli.config import extract_file_config\n        from pyinfra_cli.util import exec_file\n\n        \n        config_data = extract_file_config(filename)\n        kwargs = {\n            key.lower(): value\n            for key, value in six.iteritems(config_data)\n            if key in [\n                'SUDO', 'SUDO_USER', 'SU_USER',\n                'PRESERVE_SUDO_ENV', 'IGNORE_ERRORS',\n            ]\n        }\n        with pseudo_state.deploy(\n            filename, kwargs, None, frameinfo.lineno,\n            in_deploy=False,\n        ):\n            exec_file(filename)\n\n        \n        \n\n    except IOError as e:\n        raise PyinfraError(\n            'Could not include local file: {0}\\n{1}'.format(filename, e),\n        )", "docstring": "Executes a local python file within the ``pyinfra.pseudo_state.deploy_dir``\ndirectory.\n\nArgs:\nhosts (string, list): group name or list of hosts to limit this include to\nwhen (bool): indicate whether to trigger operations in this include", "source": "juraj-google-style"}
{"code": "def set_userdata(self, key: str, value: Any, cloneable: bool=False) -> 'DNA':\n    self._userdata[key] = value\n    if cloneable:\n        self._cloneable_userdata_keys.add(key)\n    return self", "docstring": "Sets user data associated with a key.\n\nUser data associated with the DNA will live only within current process,\nand is not carried over during serialization/deserialization, which is\ndifferent from DNA metadata. (See `set_metadata` for more details.)\n\nArgs:\nkey: Key of the user data.\nvalue: Value of the user data.\ncloneable: If True, the key/value will be carry over to the cloned DNA.\n\nReturns:\nSelf.", "source": "github-repos"}
{"code": "def CaptureVariableInternal(self, value, depth, limits, can_enqueue=True):\n    \n    if depth == limits.max_depth:\n      return {'varTableIndex': 0}  \n\n    if value is None:\n      self._total_size += 4\n      return {'value': 'None'}\n\n    if isinstance(value, _PRIMITIVE_TYPES):\n      r = _TrimString(repr(value),  \n                      min(limits.max_value_len,\n                          self.max_size - self._total_size))\n      self._total_size += len(r)\n      return {'value': r, 'type': type(value).__name__}\n\n    if isinstance(value, _DATE_TYPES):\n      r = str(value)  \n      self._total_size += len(r)\n      return {'value': r, 'type': 'datetime.'+ type(value).__name__}\n\n    if isinstance(value, dict):\n      \n      \n      \n      items = [(repr(k), v) for (k, v) in value.items()]\n      return {'members':\n              self.CaptureVariablesList(items, depth + 1,\n                                        EMPTY_DICTIONARY, limits),\n              'type': 'dict'}\n\n    if isinstance(value, _VECTOR_TYPES):\n      fields = self.CaptureVariablesList(\n          (('[%d]' % i, x) for i, x in enumerate(value)),\n          depth + 1, EMPTY_COLLECTION, limits)\n      return {'members': fields, 'type': type(value).__name__}\n\n    if isinstance(value, types.FunctionType):\n      self._total_size += len(value.__name__)\n      \n      return {'value': 'function ' + value.__name__}\n\n    if isinstance(value, Exception):\n      fields = self.CaptureVariablesList(\n          (('[%d]' % i, x) for i, x in enumerate(value.args)),\n          depth + 1, EMPTY_COLLECTION, limits)\n      return {'members': fields, 'type': type(value).__name__}\n\n    if can_enqueue:\n      index = self._var_table_index.get(id(value))\n      if index is None:\n        index = len(self._var_table)\n        self._var_table_index[id(value)] = index\n        self._var_table.append(value)\n      self._total_size += 4  \n      return {'varTableIndex': index}\n\n    for pretty_printer in CaptureCollector.pretty_printers:\n      pretty_value = pretty_printer(value)\n      if not pretty_value:\n        continue\n\n      fields, object_type = pretty_value\n      return {'members':\n              self.CaptureVariablesList(fields, depth + 1, OBJECT_HAS_NO_FIELDS,\n                                        limits),\n              'type': object_type}\n\n    if not hasattr(value, '__dict__'):\n      \n      r = str(type(value))\n      self._total_size += len(r)\n      return {'value': r}\n\n    \n    items = value.__dict__.items()\n    if six.PY3:\n      \n      \n      \n      \n      items = list(itertools.islice(items, limits.max_list_items + 1))\n    members = self.CaptureVariablesList(items, depth + 2,\n                                        OBJECT_HAS_NO_FIELDS, limits)\n    v = {'members': members}\n\n    type_string = DetermineType(value)\n    if type_string:\n      v['type'] = type_string\n\n    return v", "docstring": "Captures a single nameless object into Variable message.\n\nTODO(vlif): safely evaluate iterable types.\nTODO(vlif): safely call str(value)\n\nArgs:\nvalue: data to capture\ndepth: nested depth of dictionaries and vectors so far.\nlimits: Per-object limits for capturing variable data.\ncan_enqueue: allows referencing the object in variables table.\n\nReturns:\nFormatted captured data as per Variable proto.", "source": "juraj-google-style"}
{"code": "def update(self, puts, deletes):\n        \n        with self._lmdb.begin(write=True, buffers=True) as txn:\n            cursor = txn.cursor(self._main_db)\n            \n            \n            for key in deletes:\n                if not cursor.set_key(key.encode()):\n                    \n                    continue\n\n                value = self._deserializer(bytes(cursor.value()))\n                cursor.delete()\n\n                for (index_db, index_key_fn) in self._indexes.values():\n                    index_keys = index_key_fn(value)\n                    index_cursor = txn.cursor(index_db)\n                    for idx_key in index_keys:\n                        if index_cursor.set_key(idx_key):\n                            index_cursor.delete()\n\n            \n            for key, value in puts:\n                packed = self._serializer(value)\n\n                cursor.put(key.encode(), packed, overwrite=True)\n\n                for (index_db, index_key_fn) in self._indexes.values():\n                    index_keys = index_key_fn(value)\n                    index_cursor = txn.cursor(index_db)\n                    for idx_key in index_keys:\n                        index_cursor.put(idx_key, key.encode())\n\n        self.sync()", "docstring": "Applies the given puts and deletes atomically.\n\nArgs:\nputs (:iterable:`tuple`): an iterable of key/value pairs to insert\ndeletes (:iterable:str:) an iterable of keys to delete", "source": "juraj-google-style"}
{"code": "def _get_return_value(self, tensors, indices):\n    tensors = self._create_device_transfers(tensors)\n    for output, i in zip(tensors, indices):\n        output.set_shape(self._shapes[i])\n    if self._names:\n        return {self._names[i]: t for t, i in zip(tensors, indices)}\n    return tensors", "docstring": "Return the value to return from a get op.\n\nIf the staging area has names, return a dictionary with the\nnames as keys.  Otherwise return either a single tensor\nor a list of tensors depending on the length of `tensors`.\n\nArgs:\ntensors: List of tensors from the get op.\nindices: Indices of associated names and shapes\n\nReturns:\nA single tensor, a list of tensors, or a dictionary\nof tensors.", "source": "github-repos"}
{"code": "def recode_dwgsim_reads(dwgsim_prefix, fastq_rnf_fo, fai_fo, genome_id, estimate_unknown_values, number_of_read_tuples=(10 ** 9)):\n    dwgsim_pattern = re.compile('@(.*)_([0-9]+)_([0-9]+)_([01])_([01])_([01])_([01])_([0-9]+):([0-9]+):([0-9]+)_([0-9]+):([0-9]+):([0-9]+)_(([0-9abcdef])+)')\n    fai_index = rnftools.utils.FaIdx(fai_fo=fai_fo)\n    read_tuple_id_width = len(format(number_of_read_tuples, 'x'))\n    read_tuple_id = 0\n    last_read_tuple_name = None\n    old_fq = '{}.bfast.fastq'.format(dwgsim_prefix)\n    fq_creator = rnftools.rnfformat.FqCreator(fastq_fo=fastq_rnf_fo, read_tuple_id_width=read_tuple_id_width, genome_id_width=2, chr_id_width=fai_index.chr_id_width, coor_width=fai_index.coor_width, info_reads_in_tuple=True, info_simulator='dwgsim')\n    i = 0\n    with open(old_fq, 'r+') as f1:\n        for line in f1:\n            if ((i % 4) == 0):\n                read_tuple_name = line[1:].strip()\n                if (read_tuple_name != last_read_tuple_name):\n                    new_tuple = True\n                    if (last_read_tuple_name is not None):\n                        read_tuple_id += 1\n                else:\n                    new_tuple = False\n                last_read_tuple_name = read_tuple_name\n                m = dwgsim_pattern.search(line)\n                if (m is None):\n                    rnftools.utils.error(\"Read tuple '{}' was not created by DwgSim.\".format(line[1:]), program='RNFtools', subprogram='MIShmash', exception=ValueError)\n                contig_name = m.group(1)\n                start_1 = int(m.group(2))\n                start_2 = int(m.group(3))\n                direction_1 = ('F' if (int(m.group(4)) == 0) else 'R')\n                direction_2 = ('F' if (int(m.group(5)) == 0) else 'R')\n                chr_id = (fai_index.dict_chr_ids[contig_name] if (fai_index.dict_chr_ids != {}) else '0')\n            elif ((i % 4) == 1):\n                bases = line.strip()\n                if new_tuple:\n                    segment = rnftools.rnfformat.Segment(genome_id=genome_id, chr_id=chr_id, direction=direction_1, left=start_1, right=(((start_1 + len(bases)) - 1) if estimate_unknown_values else 0))\n                else:\n                    segment = rnftools.rnfformat.Segment(genome_id=genome_id, chr_id=chr_id, direction=direction_2, left=start_2, right=(((start_2 + len(bases)) - 1) if estimate_unknown_values else 0))\n            elif ((i % 4) == 2):\n                pass\n            elif ((i % 4) == 3):\n                qualities = line.strip()\n                fq_creator.add_read(read_tuple_id=read_tuple_id, bases=bases, qualities=qualities, segments=[segment])\n            i += 1\n    fq_creator.flush_read_tuple()", "docstring": "Convert DwgSim FASTQ file to RNF FASTQ file.\n\nArgs:\ndwgsim_prefix (str): DwgSim prefix of the simulation (see its commandline parameters).\nfastq_rnf_fo (file): File object of RNF FASTQ.\nfai_fo (file): File object for FAI file of the reference genome.\ngenome_id (int): RNF genome ID to be used.\nestimate_unknown_values (bool): Estimate unknown values (right coordinate of each end).\nnumber_of_read_tuples (int): Estimate of number of simulated read tuples (to set width).", "source": "codesearchnet"}
{"code": "def diffuse_horizontal_radiation(self, value=9999.0):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `diffuse_horizontal_radiation`'.format(value))\n        if (value < 0.0):\n            raise ValueError('value need to be greater or equal 0.0 for field `diffuse_horizontal_radiation`')\n    self._diffuse_horizontal_radiation = value", "docstring": "Corresponds to IDD Field `diffuse_horizontal_radiation`\n\nArgs:\nvalue (float): value for IDD Field `diffuse_horizontal_radiation`\nUnit: Wh/m2\nvalue >= 0.0\nMissing value: 9999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def _get_fields(mcs, bases, namespace):\n        \n        fields = [\n            (name, namespace.pop(name))\n            for name, attribute\n            in list(namespace.items())\n            if isinstance(attribute, BaseField)\n        ]\n\n        for base in reversed(bases):\n            if hasattr(base, mcs._fields_storage_key):\n                fields = list(\n                    getattr(base, mcs._fields_storage_key).items()\n                ) + fields\n\n        return OrderedDict(fields)", "docstring": "Create fields dictionary to be used in resource class namespace.\n\nPop all field objects from attributes dict (namespace) and store them\nunder _field_storage_key atrribute. Also collect all fields from base\nclasses in order that ensures fields can be overriden.\n\nArgs:\nbases: all base classes of created serializer class\nnamespace (dict): namespace as dictionary of attributes", "source": "juraj-google-style"}
{"code": "def __set_type(self, obj, prop):\n        \n        if TypeHandler.is_pure(obj, prop):\n            self.args_type = \"PURE\"\n            self.pure = SinonBase.Pure()\n            setattr(self.pure, \"func\", Wrapper.empty_function)\n            self.orig_func = None\n        elif TypeHandler.is_module_function(obj, prop):\n            self.args_type = \"MODULE_FUNCTION\"\n            self.orig_func = None\n        elif TypeHandler.is_function(obj):\n            self.args_type = \"FUNCTION\"\n            self.orig_func = None\n        elif TypeHandler.is_module(obj):\n            self.args_type = \"MODULE\"\n        elif TypeHandler.is_instance(obj):\n            obj = obj.__class__\n            self.args_type = \"MODULE\"", "docstring": "Triage type based on arguments\nHere are four types of base: PURE, MODULE, MODULE_FUNCTION, FUNCTION\nArgs:\nobj: None, FunctionType, ModuleType, Class, Instance\nprop: None, string", "source": "juraj-google-style"}
{"code": "def _encode_required_field(self, name: str, containing_type_builder: expressions.Builder, builder: expressions.Builder, element_definition: message.Message) -> Optional[validation_pb2.SqlRequirement]:\n    element = cast(Any, element_definition)\n    if not _is_elem_supported(element):\n        return None\n    field_name = _last_path_token(builder)\n    min_size = element.min.value\n    max_size = element.max.value\n    element_count = builder.count()\n    query_list = []\n    if _fhir_path_data_types.is_collection(builder.return_type) and max_size.isdigit():\n        query_list.append(element_count <= int(max_size))\n    if min_size == 1:\n        query_list.append(builder.exists())\n    elif min_size > 0:\n        query_list.append(element_count >= min_size)\n    if not query_list:\n        return None\n    constraint_key = f'{name}-cardinality-is-valid'\n    description = f'The length of {name} must be maximum {max_size} and minimum {min_size}.'\n    fhir_path_builder = query_list[0]\n    for query in query_list[1:]:\n        fhir_path_builder = fhir_path_builder & query\n    if constraint_key in self._options.skip_keys:\n        return None\n    type_codes = _utils.element_type_codes(element)\n    if 'Reference' not in type_codes and (not _SKIP_TYPE_CODES.isdisjoint(type_codes)):\n        return None\n    result = self._encode_fhir_path_builder_constraint(fhir_path_builder, containing_type_builder)\n    if result is None:\n        return None\n    element_definition_path = self._abs_path_invocation(containing_type_builder)\n    constraint_key_column_name: str = _key_to_sql_column_name(_path_to_sql_column_name(constraint_key))\n    column_name_base: str = _path_to_sql_column_name(element_definition_path)\n    column_name = f'{column_name_base}_{constraint_key_column_name}'\n    requirement = validation_pb2.SqlRequirement(column_name=column_name, sql_expression=result.sql, fhir_path_sql_expression=result.fhir_path_sql, severity=validation_pb2.ValidationSeverity.SEVERITY_ERROR, type=validation_pb2.ValidationType.VALIDATION_TYPE_CARDINALITY, element_path=element_definition_path, description=description, fhir_path_key=constraint_key, fhir_path_expression=result.builder.fhir_path, fields_referenced_by_expression=[field_name])\n    return requirement", "docstring": "Returns `SqlRequirement` for the required field passed.\n\nArgs:\nname: name of the constraint key.\ncontaining_type_builder: The builder of the Structure definition for the\nrequired field.\nbuilder: The builder containing the element to encode required field for.\nelement_definition: Element definition of the builder.\n\nReturns:\nA `SqlRequirement` representing the requirement generated from\nthe element.", "source": "github-repos"}
{"code": "def infer_return_type(c, input_types, debug=False, depth=5):\n    try:\n        if hashable(c) and c in known_return_types:\n            return known_return_types[c]\n        elif isinstance(c, types.FunctionType):\n            return infer_return_type_func(c, input_types, debug, depth)\n        elif isinstance(c, types.MethodType):\n            if c.__self__ is not None:\n                input_types = [Const(c.__self__)] + input_types\n            return infer_return_type_func(c.__func__, input_types, debug, depth)\n        elif isinstance(c, BoundMethod):\n            input_types = [c.type] + input_types\n            return infer_return_type_func(c.func, input_types, debug, depth)\n        elif inspect.isclass(c):\n            if c in typehints.DISALLOWED_PRIMITIVE_TYPES:\n                return {list: typehints.List[Any], set: typehints.Set[Any], frozenset: typehints.FrozenSet[Any], tuple: typehints.Tuple[Any, ...], dict: typehints.Dict[Any, Any]}[c]\n            return c\n        elif c == getattr and len(input_types) == 2 and isinstance(input_types[1], Const):\n            from apache_beam.typehints import opcodes\n            return opcodes._getattr(input_types[0], input_types[1].value)\n        elif isinstance(c, python_callable.PythonCallableWithSource):\n            return infer_return_type(c._callable, input_types, debug, depth)\n        else:\n            return Any\n    except TypeInferenceError:\n        if debug:\n            traceback.print_exc()\n        return Any\n    except Exception:\n        if debug:\n            sys.stdout.flush()\n            raise\n        else:\n            return Any", "docstring": "Analyses a callable to deduce its return type.\n\nArgs:\nc: A Python callable to infer the return type of.\ninput_types: A sequence of inputs corresponding to the input types.\ndebug: Whether to print verbose debugging information.\ndepth: Maximum inspection depth during type inference.\n\nReturns:\nA TypeConstraint that that the return value of this function will (likely)\nsatisfy given the specified inputs.", "source": "github-repos"}
{"code": "def extended_capabilities(self):\n        \n        buf = (ctypes.c_uint8 * 32)()\n        self._dll.JLINKARM_GetEmuCapsEx(buf, 32)\n        return list(buf)", "docstring": "Gets the capabilities of the connected emulator as a list.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nList of 32 integers which define the extended capabilities based on\ntheir value and index within the list.", "source": "juraj-google-style"}
{"code": "def wheel_delta(self):\n    delta = self._libinput.libinput_event_tablet_tool_get_wheel_delta(self._handle)\n    changed = self._libinput.libinput_event_tablet_tool_wheel_has_changed(self._handle)\n    return (delta, changed)", "docstring": "The delta for the wheel in degrees and whether it has changed in\nthis event.\n\nReturns:\n(float, bool): The delta of the wheel, in degrees, compared to\nthe last event and whether it has changed.", "source": "codesearchnet"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        local_buffer = utils.BytearrayStream()\n\n        if self._located_items:\n            self._located_items.write(local_buffer, kmip_version=kmip_version)\n\n        if self._unique_identifiers:\n            for unique_identifier in self._unique_identifiers:\n                unique_identifier.write(\n                    local_buffer,\n                    kmip_version=kmip_version\n                )\n\n        self.length = local_buffer.length()\n        super(LocateResponsePayload, self).write(\n            output_buffer,\n            kmip_version=kmip_version\n        )\n        output_buffer.write(local_buffer.buffer)", "docstring": "Write the data encoding the Locate response payload to a buffer.\n\nArgs:\noutput_buffer (stream): A data buffer in which to encode object\ndata, supporting a write method.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def get_content(self, url):\n        \n        cache_path = self._url_to_path(url)\n        try:\n            with open(cache_path, 'rb') as f:\n                return f.read()\n        except IOError:\n            return None", "docstring": "Returns the content of a cached resource.\n\nArgs:\nurl: The url of the resource\n\nReturns:\nThe content of the cached resource or None if not in the cache", "source": "juraj-google-style"}
{"code": "def __random_density_bures(N, rank=None, seed=None):\n    P = (np.eye(N) + random_unitary(N).data)\n    G = P.dot(__ginibre_matrix(N, rank, seed))\n    G = G.dot(G.conj().T)\n    return (G / np.trace(G))", "docstring": "Generate a random density matrix from the Bures metric.\n\nArgs:\nN (int): the length of the density matrix.\nrank (int or None): the rank of the density matrix. The default\nvalue is full-rank.\nseed (int): Optional. To set a random seed.\nReturns:\nndarray: rho (N,N) a density matrix.", "source": "codesearchnet"}
{"code": "def _should_get_another_batch(self, content):\n    if (('max-keys' in self._options) and (self._options['max-keys'] <= common._MAX_GET_BUCKET_RESULT)):\n        return False\n    elements = self._find_elements(content, set([common._T_IS_TRUNCATED, common._T_NEXT_MARKER]))\n    if (elements.get(common._T_IS_TRUNCATED, 'false').lower() != 'true'):\n        return False\n    next_marker = elements.get(common._T_NEXT_MARKER)\n    if (next_marker is None):\n        self._options.pop('marker', None)\n        return False\n    self._options['marker'] = next_marker\n    return True", "docstring": "Whether to issue another GET bucket call.\n\nArgs:\ncontent: response XML.\n\nReturns:\nTrue if should, also update self._options for the next request.\nFalse otherwise.", "source": "codesearchnet"}
{"code": "def empty(cls, base_uri=None, draft=AUTO):\n        \n        return cls.from_object({}, base_uri=base_uri, draft=draft)", "docstring": "Returns an empty ``Document``.\n\nArguments:\n\n- ``base_uri``: optional URL used as the basis when expanding\nrelative URLs in the document.\n- ``draft``: a ``Draft`` instance that selects the version of the spec\nto which the document should conform. Defaults to\n``drafts.AUTO``.", "source": "juraj-google-style"}
{"code": "def text(self, x, y, text):\n    for (i, char) in enumerate(text):\n        self.point((x + i), y, char)", "docstring": "Print a text on ASCII canvas.\n\nArgs:\nx (int): x coordinate where the text should start.\ny (int): y coordinate where the text should start.\ntext (str): string that should be printed.", "source": "codesearchnet"}
{"code": "def mkdir(path):\n    \n    try:\n        os.makedirs(path)\n        \n        if not os.path.isdir(path):  \n            raise IOError('path is not a directory')\n    except OSError as e:\n        \n        if e.errno == 17 and os.path.isdir(path):\n            return\n        raise", "docstring": "Make a directory and its parents.\n\nArgs:\npath (str): path to create\n\nReturns:\nNone\n\nRaises:\nOSError if the directory cannot be created.", "source": "juraj-google-style"}
{"code": "def position(string, index):\n\t\n\n\tif not string:\n\t\treturn None\n\n\tif index < 0 or index >= len(string):\n\t\traise InternalError(\"Out-of-range index passed to errors.position!\")\n\n\tlines = string.split(\"\\n\")\n\n\t\n\t\n\tif len(lines) == 1:\n\t\treturn str(index)\n\n\tbefore = n = 0\n\n\tfor n, line in enumerate(lines):\n\t\tfuture = before + len(line) + 1 \n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\tif future > index:\n\t\t\tbreak\n\t\tbefore = future\n\n\t\n\t\n\treturn \"{0}:{1}\".format(n, index - before)", "docstring": "Returns a helpful position description for an index in a\n(multi-line) string using the format line:column.\n\nArguments:\nstring (str): The string to which the index refers.\nindex (int): The index of the character in question.\n\nReturns:\nA string with the format line:column where line refers to the\n1-indexed row/line in which the character is found within the\nstring and column to the position of the character within\n(relative to) that  line.", "source": "juraj-google-style"}
{"code": "def serialize(self, items, default_flow_style=False):\n        \n\n        \n        yaml = self._load_yaml()\n\n        \n        items = dict(items)\n        return yaml.dump(items, default_flow_style=default_flow_style)", "docstring": "Does the inverse of config parsing by taking parsed values and\nconverting them back to a string representing config file contents.\n\nArgs:\ndefault_flow_style: defines serialization format (see PyYAML docs)", "source": "juraj-google-style"}
{"code": "def _finalize_job(cls, mapreduce_spec, mapreduce_state):\n    \n    config = util.create_datastore_write_config(mapreduce_spec)\n    queue_name = util.get_queue_name(mapreduce_spec.params.get(\n        model.MapreduceSpec.PARAM_DONE_CALLBACK_QUEUE))\n    done_callback = mapreduce_spec.params.get(\n        model.MapreduceSpec.PARAM_DONE_CALLBACK)\n    done_task = None\n    if done_callback:\n      done_task = taskqueue.Task(\n          url=done_callback,\n          headers=util._get_task_headers(mapreduce_spec.mapreduce_id,\n                                         util.CALLBACK_MR_ID_TASK_HEADER),\n          method=mapreduce_spec.params.get(\"done_callback_method\", \"POST\"))\n\n    @db.transactional(retries=5)\n    def _put_state():\n      \n      fresh_state = model.MapreduceState.get_by_job_id(\n          mapreduce_spec.mapreduce_id)\n      if not fresh_state.active:\n        logging.warning(\n            \"Job %s is not active. Looks like spurious task execution. \"\n            \"Dropping task.\", mapreduce_spec.mapreduce_id)\n        return\n      mapreduce_state.put(config=config)\n      \n      if done_task and not _run_task_hook(\n          mapreduce_spec.get_hooks(),\n          \"enqueue_done_task\",\n          done_task,\n          queue_name):\n        done_task.add(queue_name, transactional=True)\n\n    _put_state()\n    logging.info(\"Final result for job '%s' is '%s'\",\n                 mapreduce_spec.mapreduce_id, mapreduce_state.result_status)\n    cls._clean_up_mr(mapreduce_spec)", "docstring": "Finalize job execution.\n\nInvokes done callback and save mapreduce state in a transaction,\nand schedule necessary clean ups. This method is idempotent.\n\nArgs:\nmapreduce_spec: an instance of MapreduceSpec\nmapreduce_state: an instance of MapreduceState", "source": "juraj-google-style"}
{"code": "def piola_kirchoff_1(self, def_grad):\n        \n        if not self.is_symmetric:\n            raise ValueError(\"The stress tensor is not symmetric, \\\n                             PK stress is based on a symmetric stress tensor.\")\n        def_grad = SquareTensor(def_grad)\n        return def_grad.det*np.dot(self, def_grad.inv.trans)", "docstring": "calculates the first Piola-Kirchoff stress\n\nArgs:\ndef_grad (3x3 array-like): deformation gradient tensor", "source": "juraj-google-style"}
{"code": "def FindExtensionByName(self, full_name):\n    full_name = _NormalizeFullyQualifiedName(full_name)\n    (message_name, _, extension_name) = full_name.rpartition('.')\n    try:\n        scope = self.FindMessageTypeByName(message_name)\n    except KeyError:\n        scope = self.FindFileContainingSymbol(full_name)\n    return scope.extensions_by_name[extension_name]", "docstring": "Loads the named extension descriptor from the pool.\n\nArgs:\nfull_name: The full name of the extension descriptor to load.\n\nReturns:\nA FieldDescriptor, describing the named extension.", "source": "codesearchnet"}
{"code": "def _get_package_name(prefix=settings.TEMP_DIR, book_id=None):\n    if (book_id is None):\n        book_id = str(uuid.uuid4())\n    return os.path.join(prefix, book_id)", "docstring": "Return package path. Use uuid to generate package's directory name.\n\nArgs:\nbook_id (str, default None): UUID of the book.\nprefix (str, default settings.TEMP_DIR): Where the package will be\nstored. Default :attr:`settings.TEMP_DIR`.\n\nReturns:\nstr: Path to the root directory.", "source": "codesearchnet"}
{"code": "def reset_sequence(cls, value=None, force=False):\n    cls._meta.reset_sequence(value, force=force)", "docstring": "Reset the sequence counter.\n\nArgs:\nvalue (int or None): the new 'next' sequence value; if None,\nrecompute the next value from _setup_next_sequence().\nforce (bool): whether to force-reset parent sequence counters\nin a factory inheritance chain.", "source": "codesearchnet"}
{"code": "def find(lst, a, case_sensitive=True):\n    a = force_list(a)\n    if (not case_sensitive):\n        lst = [x.lower() for x in lst]\n        a = [y.lower() for y in a]\n    return [i for (i, x) in enumerate(lst) if (x in a)]", "docstring": "Return indices of a list which have elements that match an object or list of objects\n\nArgs:\nlst: list of values\na: object(s) to check equality\ncase_sensitive: if the search should be case sensitive\n\nReturns:\nlist: list of indicies of lst which equal a", "source": "codesearchnet"}
{"code": "def json_to_numpy(string_like, dtype=None):\n    data = json.loads(string_like)\n    return np.array(data, dtype=dtype)", "docstring": "Convert a JSON object to a numpy array.\n\nArgs:\nstring_like (str): JSON string.\ndtype (dtype, optional):  Data type of the resulting array. If None, the dtypes will be determined by the\ncontents of each column, individually. This argument can only be used to\n'upcast' the array.  For downcasting, use the .astype(t) method.\nReturns:\n(np.array): numpy array", "source": "codesearchnet"}
{"code": "def _redirect_with_params(url_name, *args, **kwargs):\n    url = urlresolvers.reverse(url_name, args=args)\n    params = parse.urlencode(kwargs, True)\n    return '{0}?{1}'.format(url, params)", "docstring": "Helper method to create a redirect response with URL params.\n\nThis builds a redirect string that converts kwargs into a\nquery string.\n\nArgs:\nurl_name: The name of the url to redirect to.\nkwargs: the query string param and their values to build.\n\nReturns:\nA properly formatted redirect string.", "source": "codesearchnet"}
{"code": "def _export(self, path, variables_saver):\n    \n    self._saved_model_handler.export(path, variables_saver=variables_saver)\n\n    module_def_proto = module_def_pb2.ModuleDef()\n    module_def_proto.format = module_def_pb2.ModuleDef.FORMAT_V3\n    module_def_filename = get_module_proto_path(path)\n    tf_utils.atomic_write_string_to_file(\n        module_def_filename,\n        module_def_proto.SerializeToString(),\n        overwrite=False)\n    logging.info(\"Exported TF-Hub module to: %s\", path)", "docstring": "Internal.\n\nArgs:\npath: string where to export the module to.\nvariables_saver: an unary-function that writes the module variables\ncheckpoint on the given path.", "source": "juraj-google-style"}
{"code": "def i2le_script(number):\n    \n    if number == 0:\n        return '00'\n    for i in range(80):\n        try:\n            return number.to_bytes(\n                length=i,  \n                byteorder='little',\n                signed=True).hex()\n        except Exception:\n            continue", "docstring": "Convert int to signed little endian (l.e.) hex for scripts\nArgs:\nnumber  (int): int value to convert to bytes in l.e. format\nReturns:\n(str): the hex-encoded signed LE number", "source": "juraj-google-style"}
{"code": "def _extract_id_token(id_token):\n    if (type(id_token) == bytes):\n        segments = id_token.split(b'.')\n    else:\n        segments = id_token.split(u'.')\n    if (len(segments) != 3):\n        raise VerifyJwtTokenError('Wrong number of segments in token: {0}'.format(id_token))\n    return json.loads(_helpers._from_bytes(_helpers._urlsafe_b64decode(segments[1])))", "docstring": "Extract the JSON payload from a JWT.\n\nDoes the extraction w/o checking the signature.\n\nArgs:\nid_token: string or bytestring, OAuth 2.0 id_token.\n\nReturns:\nobject, The deserialized JSON payload.", "source": "codesearchnet"}
{"code": "def auto_convert_cell(flagable, cell, position, worksheet, flags, units, parens_as_neg=True):\n    \n    conversion = cell\n\n    \n    if isinstance(cell, (int, float)):\n        pass\n    \n    elif isinstance(cell, basestring):\n        \n        if not cell:\n            conversion = None\n        else:\n            conversion = auto_convert_string_cell(flagable, cell, position, worksheet,\n                                                  flags, units, parens_as_neg=parens_as_neg)\n    \n    elif cell != None:\n        \n        \n        flagable.flag_change(flags, 'warning', position, worksheet,\n                             flagable.FLAGS['unknown-to-string'])\n        conversion = str(cell)\n        \n        if not conversion:\n            conversion = None\n    else:\n        \n        pass\n\n    return conversion", "docstring": "Performs a first step conversion of the cell to check\nit's type or try to convert if a valid conversion exists.\n\nArgs:\nparens_as_neg: Converts numerics surrounded by parens to negative values", "source": "juraj-google-style"}
{"code": "def get_shell_code(self, shell=None, parent_environ=None, style=OutputStyle.file):\n        \n        executor = self._create_executor(interpreter=create_shell(shell),\n                                         parent_environ=parent_environ)\n\n        if self.load_path and os.path.isfile(self.load_path):\n            executor.env.REZ_RXT_FILE = self.load_path\n\n        self._execute(executor)\n        return executor.get_output(style)", "docstring": "Get the shell code resulting from intepreting this context.\n\nArgs:\nshell (str): Shell type, for eg 'bash'. If None, the current shell\ntype is used.\nparent_environ (dict): Environment to interpret the context within,\ndefaults to os.environ if None.\nstyle (): Style to format shell code in.", "source": "juraj-google-style"}
{"code": "def _set_values_internal(self, context, pipeline_key, root_pipeline_key, outputs, result_status):\n    self._context = context\n    self._pipeline_key = pipeline_key\n    self._root_pipeline_key = root_pipeline_key\n    self._result_status = result_status\n    self.outputs = outputs", "docstring": "Sets the user-visible values provided as an API by this class.\n\nArgs:\ncontext: The _PipelineContext used for this Pipeline.\npipeline_key: The db.Key of this pipeline.\nroot_pipeline_key: The db.Key of the root pipeline.\noutputs: The PipelineFuture for this pipeline.\nresult_status: The result status of this pipeline.", "source": "codesearchnet"}
{"code": "def random_bernoulli(shape, p=0.0, dtype=None, seed=None):\n    if dtype is None:\n        dtype = floatx()\n    if seed is None:\n        seed = np.random.randint(10000000.0)\n    return array_ops.where_v2(random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p, array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))", "docstring": "Returns a tensor with random bernoulli distribution of values.\n\nArgs:\nshape: A tuple of integers, the shape of tensor to create.\np: A float, `0. <= p <= 1`, probability of bernoulli distribution.\ndtype: String, dtype of returned tensor.\nseed: Integer, random seed.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def with_mfa(self, mfa_token):\n    if hasattr(mfa_token, '__call__'):\n        self.context.mfa_token = mfa_token.__call__()\n    else:\n        self.context.mfa_token = mfa_token\n    return self", "docstring": "Set the MFA token for the next request.\n`mfa_token`s are only good for one request. Use this method to chain into\nthe protected action you want to perform.\n\nNote: Only useful for Application authentication.\nUsage:\naccount.with_mfa(application.totp.now()).pay(...)\n\nArgs:\nmfa_token (str/function, optional): TOTP token for the Application\nOR a callable/function which will generate such a token when called.\n\nReturns:\nself", "source": "codesearchnet"}
{"code": "def send(self, src_file, filename, st_mode=DEFAULT_PUSH_MODE, mtime=None,\n           timeout=None):\n    \n    transport = DataFilesyncTransport(self.stream)\n    transport.write_data('SEND', '%s,%s' % (filename, st_mode), timeout)\n\n    try:\n      while True:\n        data = src_file.read(MAX_PUSH_DATA_BYTES)\n        if not data:\n          break\n        transport.write_data('DATA', data, timeout)\n\n      mtime = mtime or int(time.time())\n      transport.write_message(\n          FilesyncMessageTypes.DoneMessage('DONE', mtime), timeout)\n    except usb_exceptions.AdbStreamClosedError:\n      \n      \n      \n      self._check_for_fail_message(transport, sys.exc_info(), timeout)\n\n    data_msg = transport.read_message(timeout)\n    data_msg.assert_command_is('OKAY')", "docstring": "Push a file-like object to the device.\n\nArgs:\nsrc_file: File-like object for reading from\nfilename: Filename to push to on the device\nst_mode: stat mode for filename on the device\nmtime: modification time to set for the file on the device\ntimeout: Timeout to use for the send operation.\n\nRaises:\nAdbProtocolError: If we get an unexpected response.\nAdbRemoteError: If there's a remote error (but valid protocol).", "source": "juraj-google-style"}
{"code": "def filter(self, versions, key=(lambda x: x)):\n    return [x for x in versions if self.check(key(x))]", "docstring": "Filter all of the versions in an iterable that match this version range\n\nArgs:\nversions (iterable): An iterable of SemanticVersion objects\n\nReturns:\nlist: A list of the SemanticVersion objects that matched this range", "source": "codesearchnet"}
{"code": "def profile_update_args_v3(self, profile):\n    ij = self.load_install_json(profile.get('install_json', 'install.json'))\n    ijp = self.install_json_params(ij)\n    if ((profile.get('args', {}).get('app', {}).get('optional') is None) and (profile.get('args', {}).get('app', {}).get('required') is None)):\n        app_args = profile['args'].pop('app')\n        profile['args']['app'] = {}\n        profile['args']['app']['optional'] = {}\n        profile['args']['app']['required'] = {}\n        for arg in self.profile_settings_args_install_json(ij, None):\n            required = ijp.get(arg).get('required', False)\n            try:\n                if required:\n                    profile['args']['app']['required'][arg] = app_args.pop(arg)\n                else:\n                    profile['args']['app']['optional'][arg] = app_args.pop(arg)\n            except KeyError:\n                if self.args.verbose:\n                    print('{}{}Input \"{}\" not found in profile \"{}\".'.format(c.Style.BRIGHT, c.Fore.YELLOW, arg, profile.get('profile_name')))\n        print('{}{}Updating args section to v3 schema for profile {}.'.format(c.Style.BRIGHT, c.Fore.YELLOW, profile.get('profile_name')))", "docstring": "Update v1 profile args to v3 schema for args.\n\n.. code-block:: javascript\n\n\"args\": {\n\"app\": {\n\"required\": {\n\"input_strings\": \"capitalize\",\n\"tc_action\": \"Capitalize\"\n},\n\"optional\": {\n\"fail_on_error\": true\n}\n}\n},\n\"default\": {\n\"api_access_id\": \"$env.API_ACCESS_ID\",\n\"api_default_org\": \"$env.API_DEFAULT_ORG\",\n},\n\nArgs:\nprofile (dict): The dictionary containting the profile settings.", "source": "codesearchnet"}
{"code": "def write_filepath(filepath, strategy):\n    dirpath = os.path.dirname(filepath)\n    base = os.path.basename(filepath)\n    return os.path.join(write_dirpath(dirpath, strategy), base)", "docstring": "Returns the writing file path to be used to save file distributedly.\n\nDirectory to contain `filepath` would be created if it doesn't exist.\n\nArgs:\nfilepath: Original filepath that would be used without distribution.\nstrategy: The tf.distribute strategy object currently used.\n\nReturns:\nThe writing filepath that should be used to save file with distribution.", "source": "github-repos"}
{"code": "def __init__(self, name, type_var, impl_type, type_checker):\n        \n        assert isinstance(name, str), repr(name)\n        assert isinstance(impl_type, type), repr(impl_type)\n        assert not isinstance(impl_type, TypingMeta), repr(impl_type)\n        assert isinstance(type_var, (type, _TypingBase)), repr(type_var)\n        self.name = name\n        self.type_var = type_var\n        self.impl_type = impl_type\n        self.type_checker = type_checker", "docstring": "Initializer.\n\nArgs:\nname: The name, e.g. 'Pattern'.\ntype_var: The type parameter, e.g. AnyStr, or the\nspecific type, e.g. str.\nimpl_type: The implementation type.\ntype_checker: Function that takes an impl_type instance.\nand returns a value that should be a type_var instance.", "source": "juraj-google-style"}
{"code": "def pbc_diff(fcoords1, fcoords2):\n    fdist = np.subtract(fcoords1, fcoords2)\n    return (fdist - np.round(fdist))", "docstring": "Returns the 'fractional distance' between two coordinates taking into\naccount periodic boundary conditions.\n\nArgs:\nfcoords1: First set of fractional coordinates. e.g., [0.5, 0.6,\n0.7] or [[1.1, 1.2, 4.3], [0.5, 0.6, 0.7]]. It can be a single\ncoord or any array of coords.\nfcoords2: Second set of fractional coordinates.\n\nReturns:\nFractional distance. Each coordinate must have the property that\nabs(a) <= 0.5. Examples:\npbc_diff([0.1, 0.1, 0.1], [0.3, 0.5, 0.9]) = [-0.2, -0.4, 0.2]\npbc_diff([0.9, 0.1, 1.01], [0.3, 0.5, 0.9]) = [-0.4, -0.4, 0.11]", "source": "codesearchnet"}
{"code": "def serialize_to_json(self, name, datas):\n        \n        data_object = datas.get('object', None)\n\n        if data_object is None:\n            msg = (\"JSON reference '{}' lacks of required 'object' variable\")\n            raise SerializerError(msg.format(name))\n\n        try:\n            content = json.loads(data_object, object_pairs_hook=OrderedDict)\n        except json.JSONDecodeError as e:\n            msg = \"JSON reference '{}' raised error from JSON decoder: {}\"\n            raise SerializerError(msg.format(name, e))\n        else:\n            return content", "docstring": "Serialize given datas to any object from assumed JSON string.\n\nArguments:\nname (string): Name only used inside possible exception message.\ndatas (dict): Datas to serialize.\n\nReturns:\nobject: Object depending from JSON content.", "source": "juraj-google-style"}
{"code": "def from_path_and_array(cls, path, folder, y, classes=None, val_idxs=None, test_name=None, num_workers=8, tfms=(None, None), bs=64):\n    assert (not ((tfms[0] is None) or (tfms[1] is None))), 'please provide transformations for your train and validation sets'\n    assert (not os.path.isabs(folder)), 'folder needs to be a relative path'\n    fnames = np.core.defchararray.add(f'{folder}/', sorted(os.listdir(f'{path}{folder}')))\n    return cls.from_names_and_array(path, fnames, y, classes, val_idxs, test_name, num_workers=num_workers, tfms=tfms, bs=bs)", "docstring": "Read in images given a sub-folder and their labels given a numpy array\n\nArguments:\npath: a root path of the data (used for storing trained models, precomputed values, etc)\nfolder: a name of the folder in which training images are contained.\ny: numpy array which contains target labels ordered by filenames.\nbs: batch size\ntfms: transformations (for data augmentations). e.g. output of `tfms_from_model`\nval_idxs: index of images to be used for validation. e.g. output of `get_cv_idxs`.\nIf None, default arguments to get_cv_idxs are used.\ntest_name: a name of the folder which contains test images.\nnum_workers: number of workers\n\nReturns:\nImageClassifierData", "source": "codesearchnet"}
{"code": "def _fits_surface(self, width, height):\n    assert ((width > 0) and (height > 0))\n    if (self.rot and ((width > self.width) or (height > self.height))):\n        (width, height) = (height, width)\n    if ((width > self.width) or (height > self.height)):\n        return False\n    else:\n        return True", "docstring": "Test surface is big enough to place a rectangle\n\nArguments:\nwidth (int, float): Rectangle width\nheight (int, float): Rectangle height\n\nReturns:\nboolean: True if it could be placed, False otherwise", "source": "codesearchnet"}
{"code": "def affine(img, angle, translate, scale, shear, resample=0, fillcolor=None):\n    if (not _is_pil_image(img)):\n        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n    assert (isinstance(translate, (tuple, list)) and (len(translate) == 2)), 'Argument translate should be a list or tuple of length 2'\n    assert (scale > 0.0), 'Argument scale should be positive'\n    output_size = img.size\n    center = (((img.size[0] * 0.5) + 0.5), ((img.size[1] * 0.5) + 0.5))\n    matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)\n    kwargs = ({'fillcolor': fillcolor} if (PILLOW_VERSION[0] == '5') else {})\n    return img.transform(output_size, Image.AFFINE, matrix, resample, **kwargs)", "docstring": "Apply affine transformation on the image keeping image center invariant\n\nArgs:\nimg (PIL Image): PIL Image to be rotated.\nangle (float or int): rotation angle in degrees between -180 and 180, clockwise direction.\ntranslate (list or tuple of integers): horizontal and vertical translations (post-rotation translation)\nscale (float): overall scale\nshear (float): shear angle value in degrees between -180 to 180, clockwise direction.\nresample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):\nAn optional resampling filter.\nSee `filters`_ for more information.\nIf omitted, or if the image has mode \"1\" or \"P\", it is set to ``PIL.Image.NEAREST``.\nfillcolor (int): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0)", "source": "codesearchnet"}
{"code": "def serialize(self, user=None):\n    return {'content': self.body, 'type': self.typ, 'updated_at': self.updated_at, 'timestamp': self.updated_at, 'is_update': (not hasattr(self, 'unsaved')), 'attachments': [attachment.serialize() for attachment in self.attachment_set], 'title': self.msg_title, 'url': self.url, 'sender_name': self.sender.full_name, 'sender_key': self.sender.key, 'channel_key': self.channel.key, 'cmd': 'message', 'avatar_url': self.sender.avatar, 'key': self.key}", "docstring": "Serializes message for given user.\n\nNote:\nShould be called before first save(). Otherwise \"is_update\" will get wrong value.\n\nArgs:\nuser: User object\n\nReturns:\nDict. JSON serialization ready dictionary object", "source": "codesearchnet"}
{"code": "def get_data_node(self, path: DataPath) -> Optional[DataNode]:\n        \n        addr = self.schema_data.path2route(path)\n        node = self.schema\n        for p in addr:\n            node = node.get_data_child(*p)\n            if node is None:\n                return None\n        return node", "docstring": "Return the data node addressed by a data path.\n\nArgs:\npath: Data path.\n\nReturns:\nData node if found in the schema, or ``None``.\n\nRaises:\nInvalidSchemaPath: If the schema path is invalid.", "source": "juraj-google-style"}
{"code": "def read_mutiple_items(f, container_type, item_type, separator=' '):\n    return __read(f, (lambda line: container_type((item_type(item) for item in line.split(separator)))))", "docstring": "Extract an iterable from the current line of a file-like object.\n\nArgs:\nf (file): the file-like object to read from\ncontainer_type (type): type of the iterable that will be returned\nitem_type (type): type of the values that will be elements of the returned iterable\nseparator (str): the separator between two consecutive items\n\nReturns:\nThe extracted iterable\n\nExample:\nThe file \"a.input\" contains three lines and three comma-separated digits on each::\n\n>>> with open(\"a.input\") as f:\n...     print(utools.files.read_multiple_items(f, list, int, separator=\",\"))\n...     print(utools.files.read_multiple_items(f, set, str, separator=\",\"))\n...     print(utools.files.read_multiple_items(f, tuple, float, separator=\",\"))\n...\n[1, 2, 3]\n{\"4\", \"5\", \"6\"}\n(7.0, 8.0, 9.0)", "source": "codesearchnet"}
{"code": "def read(self, size=None):\n    if (size is not None):\n        read_size = min(size, self.__remaining_bytes)\n    else:\n        read_size = self.__remaining_bytes\n    data = self.__stream.read(read_size)\n    if ((read_size > 0) and (not data)):\n        raise exceptions.StreamExhausted(('Not enough bytes in stream; expected %d, exhausted after %d' % (self.__max_bytes, (self.__max_bytes - self.__remaining_bytes))))\n    self.__remaining_bytes -= len(data)\n    return data", "docstring": "Read at most size bytes from this slice.\n\nCompared to other streams, there is one case where we may\nunexpectedly raise an exception on read: if the underlying stream\nis exhausted (i.e. returns no bytes on read), and the size of this\nslice indicates we should still be able to read more bytes, we\nraise exceptions.StreamExhausted.\n\nArgs:\nsize: If provided, read no more than size bytes from the stream.\n\nReturns:\nThe bytes read from this slice.\n\nRaises:\nexceptions.StreamExhausted", "source": "codesearchnet"}
{"code": "def _get_source(link):\n    \n    if link.startswith(\"http:\n        down = httpkie.Downloader()\n        return down.download(link)\n\n    if os.path.exists(link):\n        with open(link) as f:\n            return f.read()\n\n    raise UserWarning(\"html: '%s' is neither URL or data!\" % link)", "docstring": "Return source of the `link` whether it is filename or url.\n\nArgs:\nlink (str): Filename or URL.\n\nReturns:\nstr: Content.\n\nRaises:\nUserWarning: When the `link` couldn't be resolved.", "source": "juraj-google-style"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n    cls = [self.cls_token_id]\n    sep = [self.sep_token_id]\n    return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A Electra sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def MethodCalled(self, mock_method):\n    \n\n    \n    \n    for method in self._methods:\n      if method == mock_method:\n        \n        \n        \n        \n        self._methods.remove(mock_method)\n\n        \n        if not self.IsSatisfied():\n          mock_method._call_queue.appendleft(self)\n\n        return self, method\n\n    raise UnexpectedMethodCallError(mock_method, self)", "docstring": "Remove a method call from the group.\n\nIf the method is not in the set, an UnexpectedMethodCallError will be\nraised.\n\nArgs:\nmock_method: a mock method that should be equal to a method in the group.\n\nReturns:\nThe mock method from the group\n\nRaises:\nUnexpectedMethodCallError if the mock_method was not in the group.", "source": "juraj-google-style"}
{"code": "def sg_parallel(func):\n    r\n    @wraps(func)\n    def wrapper(**kwargs):\n        r\n        \n        opt = tf.sg_opt(kwargs)\n\n        \n        res = []\n        for i in range(sg_gpus()):\n            \n            with tf.device('/gpu:%d' % i):\n                \n                with tf.name_scope('gpu_%d' % i):\n                    \n                    with sg_context(reuse=(True if i > 0 else False)):\n                        \n                        res.append(func(opt * tf.sg_opt(gpu_index=i)))\n\n        return res\n\n    return wrapper", "docstring": "r\"\"\"Decorates function as multiple gpu support towers.\nArgs:\nfunc: function to decorate", "source": "juraj-google-style"}
{"code": "def parent_callback(self, parent_fu):\n    if (parent_fu.done() is True):\n        e = parent_fu._exception\n        if e:\n            super().set_exception(e)\n        else:\n            super().set_result(self.file_obj)\n    return", "docstring": "Callback from executor future to update the parent.\n\nArgs:\n- parent_fu (Future): Future returned by the executor along with callback\n\nReturns:\n- None\n\nUpdates the super() with the result() or exception()", "source": "codesearchnet"}
{"code": "def CopyFromDateTimeString(self, time_string):\n    date_time_values = self._CopyDateTimeFromString(time_string)\n    self._CopyFromDateTimeValues(date_time_values)", "docstring": "Copies time elements from a date and time string.\n\nArgs:\ntime_string (str): date and time value formatted as:\nYYYY-MM-DD hh:mm:ss.######[+-]##:##\n\nWhere # are numeric digits ranging from 0 to 9 and the seconds\nfraction can be either 3 or 6 digits. The time of day, seconds\nfraction and time zone offset are optional. The default time zone\nis UTC.", "source": "codesearchnet"}
{"code": "def incoming(self, messages):\n    if self._observers:\n        campfire = self._room.get_campfire()\n        for message in messages:\n            for observer in self._observers:\n                observer(Message(campfire, message))", "docstring": "Called when incoming messages arrive.\n\nArgs:\nmessages (tuple): Messages (each message is a dict)", "source": "codesearchnet"}
{"code": "def fill_dataset_tree(self, tree, data_sets):\n        \n\n        tree.model().removeRows(0, tree.model().rowCount())\n        for index, (time, script) in enumerate(data_sets.items()):\n            name = script.settings['tag']\n            type = script.name\n\n            item_time = QtGui.QStandardItem(str(time))\n            item_name = QtGui.QStandardItem(str(name))\n            item_type = QtGui.QStandardItem(str(type))\n\n            item_time.setSelectable(False)\n            item_time.setEditable(False)\n            item_type.setSelectable(False)\n            item_type.setEditable(False)\n\n            tree.model().appendRow([item_time, item_name, item_type])", "docstring": "fills the tree with data sets where datasets is a dictionary of the form\nArgs:\ntree:\ndata_sets: a dataset\n\nReturns:", "source": "juraj-google-style"}
{"code": "def clamp(value, maximum=None):\n    value = max(value, 0)\n    if (maximum is not None):\n        return min(value, maximum)\n    else:\n        return value", "docstring": "Clamp numeric values to be non-negative, an optionally, less than a\ngiven maximum.\n\nArgs:\nvalue (float) :\nA number to clamp.\n\nmaximum (float, optional) :\nA max bound to to clamp to. If None, there is no upper bound,\nand values are only clamped to be non-negative. (default: None)\n\nReturns:\nfloat", "source": "codesearchnet"}
{"code": "def get_string(self, significant_figures=6):\n    ph = ('{:.%df}' % significant_figures)\n    lines = []\n    for (bound, d) in zip(self.bounds, 'xyz'):\n        fillers = (bound + ([d] * 2))\n        bound_format = ' '.join((([ph] * 2) + [' {}lo {}hi']))\n        lines.append(bound_format.format(*fillers))\n    if self.tilt:\n        tilt_format = ' '.join((([ph] * 3) + [' xy xz yz']))\n        lines.append(tilt_format.format(*self.tilt))\n    return '\\n'.join(lines)", "docstring": "Returns the string representation of simulation box in LAMMPS\ndata file format.\n\nArgs:\nsignificant_figures (int): No. of significant figures to\noutput for box settings. Default to 6.\n\nReturns:\nString representation", "source": "codesearchnet"}
{"code": "def iso_date(d) -> str:\n        \n        if isinstance(d, datetime):\n            return d.isoformat()\n        elif isinstance(d, date):\n            return datetime.combine(d, datetime.min.time()).isoformat()\n        else:\n            try:\n                datetime.strptime(d, '%Y-%m-%dT%H:%M:%S')\n                return d\n            except ValueError:\n                try:\n                    datetime.strptime(d, '%Y-%m-%d')\n                    return d + \"T00:00:00\"\n                except ValueError:\n                    pass\n        raise ISODateError(\"Can not convert value to ISO format for kg\")", "docstring": "Return iso format of a date\n\nArgs:\nd:\nReturns: str", "source": "juraj-google-style"}
{"code": "def get_config(self):\n    raise NotImplementedError(f'{self} does not implement get_config()')", "docstring": "Returns the config of the quantizer.\n\nA quantizer config is a Python dictionary (serializable)\ncontaining all configuration parameters of the quantizer.\nThe same quantizer can be reinstantiated later\n(without any saved state) from this configuration.\n\nThis method is optional if you are just training and executing models,\nexporting to and from SavedModels, or using weight checkpoints.\n\nThis method is required for Keras `model_to_estimator`, saving and\nloading models to HDF5 formats, Keras model cloning, some visualization\nutilities, and exporting models to and from JSON.\n\nReturns:\nPython dictionary.", "source": "github-repos"}
{"code": "def predict_on_batch(self, x):\n    raise NotImplementedError", "docstring": "Returns predictions for a single batch of samples.\n\nArgs:\nx: Input data. It must be array-like.\n\nReturns:\nNumPy array(s) of predictions.", "source": "github-repos"}
{"code": "def _read_hip_para(self, length, *, version):\n    counter = 0\n    optkind = list()\n    options = dict()\n    while (counter < length):\n        kind = self._read_binary(2)\n        if (not kind):\n            break\n        code = int(kind, base=2)\n        cbit = (True if int(kind[15], base=2) else False)\n        clen = self._read_unpack(2)\n        plen = ((11 + clen) - ((clen + 3) % 8))\n        dscp = _HIP_PARA.get(code, 'Unassigned')\n        data = _HIP_PROC(dscp)(self, code, cbit, clen, desc=dscp, length=plen, version=version)\n        counter += plen\n        if (dscp in optkind):\n            if isinstance(options[dscp], tuple):\n                options[dscp] += (Info(data),)\n            else:\n                options[dscp] = (Info(options[dscp]), Info(data))\n        else:\n            optkind.append(dscp)\n            options[dscp] = data\n    if (counter != length):\n        raise ProtocolError(f'HIPv{version}: invalid format')\n    return (tuple(optkind), options)", "docstring": "Read HIP parameters.\n\nPositional arguments:\n* length -- int, length of parameters\n\nKeyword arguments:\n* version -- int, HIP version\n\nReturns:\n* dict -- extracted HIP parameters", "source": "codesearchnet"}
{"code": "def MatchBestComponentName(self, component):\n    \n    fd = self.OpenAsContainer()\n\n    \n    file_listing = set(fd.ListNames())\n\n    \n    if component not in file_listing:\n      \n      lower_component = component.lower()\n      for x in file_listing:\n        if lower_component == x.lower():\n          component = x\n          break\n\n    if fd.supported_pathtype != self.pathspec.pathtype:\n      new_pathspec = rdf_paths.PathSpec(\n          path=component, pathtype=fd.supported_pathtype)\n    else:\n      new_pathspec = self.pathspec.last.Copy()\n      new_pathspec.path = component\n\n    return new_pathspec", "docstring": "Returns the name of the component which matches best our base listing.\n\nIn order to do the best case insensitive matching we list the files in the\nbase handler and return the base match for this component.\n\nArgs:\ncomponent: A component name which should be present in this directory.\n\nReturns:\nthe best component name.", "source": "juraj-google-style"}
{"code": "def _update_record(self, identifier, rtype=None, name=None, content=None):\n    if (identifier is not None):\n        identifier = int(identifier)\n        records = self._list_records_internal(identifier=identifier)\n    else:\n        records = self._list_records_internal(name=name, rtype=rtype)\n    LOGGER.debug('Records to update (%d): %s', len(records), records)\n    assert records, 'No record found to update'\n    success = True\n    for record in records:\n        name = (name if (name is not None) else record['name'])\n        rtype = (rtype if (rtype is not None) else record['type'])\n        content = (content if (content is not None) else record['content'])\n        success = (success and self._create_record_internal(rtype, name, content, record['id']))\n    return success", "docstring": "Update a DNS entry identified by identifier or name in the domain zone.\nAny non given argument will leave the current value of the DNS entry.\n\nArgs:\nidentifier (str): The easyname id of the DNS entry to update.\n[rtype] (str): The DNS rtype (e.g. A, TXT, MX, etc) of the new entry.\n[name] (str): The name of the new DNS entry, e.g the domain for which\na MX entry shall be valid.\n[content] (str): The content of the new DNS entry, e.g. the mail\nserver hostname for a MX entry.\n\nReturns:\nbool: True if the record was updated successfully, False otherwise.\n\nRaises:\nAssertionError: When a request returns unexpected or unknown data.", "source": "codesearchnet"}
{"code": "def unescape(cls, text: str) -> str:\n        \n        chop = text.split(\"\\\\\", 1)\n        try:\n            return (chop[0] if len(chop) == 1\n                    else chop[0] + cls.unescape_map[chop[1][0]] +\n                    cls.unescape(chop[1][1:]))\n        except KeyError:\n            raise InvalidArgument(text) from None", "docstring": "Replace escape sequence with corresponding characters.\n\nArgs:\ntext: Text to unescape.", "source": "juraj-google-style"}
{"code": "def _ImageDimensions(image, rank):\n    if image.get_shape().is_fully_defined():\n        return image.get_shape().as_list()\n    else:\n        static_shape = image.get_shape().with_rank(rank).as_list()\n        dynamic_shape = array_ops_stack.unstack(array_ops.shape(image), rank)\n        return [s if s is not None else d for s, d in zip(static_shape, dynamic_shape)]", "docstring": "Returns the dimensions of an image tensor.\n\nArgs:\nimage: A rank-D Tensor. For 3-D  of shape: `[height, width, channels]`.\nrank: The expected rank of the image\n\nReturns:\nA list of corresponding to the dimensions of the\ninput image.  Dimensions that are statically known are python integers,\notherwise, they are integer scalar tensors.", "source": "github-repos"}
{"code": "def GetFileAndLine(component):\n    if inspect.isbuiltin(component):\n        return (None, None)\n    try:\n        filename = inspect.getsourcefile(component)\n    except TypeError:\n        return (None, None)\n    try:\n        unused_code, lineindex = inspect.findsource(component)\n        lineno = lineindex + 1\n    except (OSError, IndexError):\n        lineno = None\n    return (filename, lineno)", "docstring": "Returns the filename and line number of component.\n\nArgs:\ncomponent: A component to find the source information for, usually a class\nor routine.\nReturns:\nfilename: The name of the file where component is defined.\nlineno: The line number where component is defined.", "source": "github-repos"}
{"code": "def strip_prefix_from_items(prefix, items):\n    items_no_prefix = []\n    for item in items:\n        if item.startswith(prefix):\n            items_no_prefix.append(item[len(prefix):])\n        else:\n            items_no_prefix.append(item)\n    return items_no_prefix", "docstring": "Strips out the prefix from each of the items if it is present.\n\nArgs:\nprefix: the string for that you wish to strip from the beginning of each\nof the items.\nitems: a list of strings that may or may not contain the prefix you want\nto strip out.\n\nReturns:\nitems_no_prefix: a copy of the list of items (same order) without the\nprefix (if present).", "source": "codesearchnet"}
{"code": "def encode(self, s):\n    \n    \n    \n    \n    if s.endswith(\".mp3\"):\n      \n      out_filepath = s[:-4] + \".wav\"\n      call([\n          \"sox\", \"--guard\", s, \"-r\", \"16k\", \"-b\", \"16\", \"-c\", \"1\", out_filepath\n      ])\n      s = out_filepath\n    elif not s.endswith(\".wav\"):\n      out_filepath = s + \".wav\"\n      if not os.path.exists(out_filepath):\n        call([\"sox\", \"-r\", \"16k\", \"-b\", \"16\", \"-c\", \"1\", s, out_filepath])\n      s = out_filepath\n    rate, data = wavfile.read(s)\n    assert rate == self._sample_rate\n    assert len(data.shape) == 1\n    if data.dtype not in [np.float32, np.float64]:\n      data = data.astype(np.float32) / np.iinfo(data.dtype).max\n    return data.tolist()", "docstring": "Transform a string with a filename into a list of float32.\n\nArgs:\ns: path to the file with a waveform.\n\nReturns:\nsamples: list of int16s", "source": "juraj-google-style"}
{"code": "def start(self, name: str, increment_count: bool=True) -> None:\n    if (not self._timing):\n        return\n    now = get_now_utc_pendulum()\n    if self._stack:\n        last = self._stack[(- 1)]\n        self._totaldurations[last] += (now - self._starttimes[last])\n    if (name not in self._starttimes):\n        self._totaldurations[name] = datetime.timedelta()\n        self._count[name] = 0\n    self._starttimes[name] = now\n    if increment_count:\n        self._count[name] += 1\n    self._stack.append(name)", "docstring": "Start a named timer.\n\nArgs:\nname: name of the timer\nincrement_count: increment the start count for this timer", "source": "codesearchnet"}
{"code": "def __init__(self, rfile, maxlen, bufsize=8192):\n        \n        self.rfile = rfile\n        self.maxlen = maxlen\n        self.bytes_read = 0\n        self.buffer = EMPTY\n        self.bufsize = bufsize\n        self.closed = False", "docstring": "Initialize ChunkedRFile instance.\n\nArgs:\nrfile (file): file encoded with the 'chunked' transfer encoding\nmaxlen (int): maximum length of the file being read\nbufsize (int): size of the buffer used to read the file", "source": "juraj-google-style"}
{"code": "def collect_members(module_to_name):\n    members = {}\n    for (module, module_name) in module_to_name.items():\n        all_names = getattr(module, '__all__', None)\n        for (name, member) in inspect.getmembers(module):\n            if ((inspect.isfunction(member) or inspect.isclass(member)) and (not _always_drop_symbol_re.match(name)) and ((all_names is None) or (name in all_names))):\n                fullname = ('%s.%s' % (module_name, name))\n                if (name in members):\n                    (other_fullname, other_member) = members[name]\n                    if (member is not other_member):\n                        raise RuntimeError(('Short name collision between %s and %s' % (fullname, other_fullname)))\n                    if (len(fullname) == len(other_fullname)):\n                        raise RuntimeError((\"Can't decide whether to use %s or %s for %s: both full names have length %d\" % (fullname, other_fullname, name, len(fullname))))\n                    if (len(fullname) > len(other_fullname)):\n                        continue\n                members[name] = (fullname, member)\n    return members", "docstring": "Collect all symbols from a list of modules.\n\nArgs:\nmodule_to_name: Dictionary mapping modules to short names.\n\nReturns:\nDictionary mapping name to (fullname, member) pairs.", "source": "codesearchnet"}
{"code": "def delete_tag(self, tag_name, **kwargs):\n    resp = self._delete(self._u(self._TAG_ENDPOINT_SUFFIX, tag_name), **kwargs)\n    resp.raise_for_status()\n    return resp", "docstring": "delete a tag by name\n\nArgs:\ntag_name (string): name of tag to delete", "source": "codesearchnet"}
{"code": "def _normalize_hparams(hparams):\n  \n  result = {}\n  for (k, v) in six.iteritems(hparams):\n    if isinstance(k, HParam):\n      k = k.name\n    if k in result:\n      raise ValueError(\"multiple values specified for hparam %r\" % (k,))\n    result[k] = v\n  return result", "docstring": "Normalize a dict keyed by `HParam`s and/or raw strings.\n\nArgs:\nhparams: A `dict` whose keys are `HParam` objects and/or strings\nrepresenting hyperparameter names, and whose values are\nhyperparameter values. No two keys may have the same name.\n\nReturns:\nA `dict` whose keys are hyperparameter names (as strings) and whose\nvalues are the corresponding hyperparameter values.\n\nRaises:\nValueError: If two entries in `hparams` share the same\nhyperparameter name.", "source": "juraj-google-style"}
{"code": "def _GetDateTime(self, filetime):\n    if (filetime == 0):\n        return dfdatetime_semantic_time.SemanticTime('Not set')\n    return dfdatetime_filetime.Filetime(timestamp=filetime)", "docstring": "Retrieves the date and time from a FILETIME timestamp.\n\nArgs:\nfiletime (int): FILETIME timestamp.\n\nReturns:\ndfdatetime.DateTimeValues: date and time.", "source": "codesearchnet"}
{"code": "def list(self, pattern='*'):\n    if (self._descriptors is None):\n        self._descriptors = self._client.list_metric_descriptors(filter_string=self._filter_string, type_prefix=self._type_prefix)\n    return [metric for metric in self._descriptors if fnmatch.fnmatch(metric.type, pattern)]", "docstring": "Returns a list of metric descriptors that match the filters.\n\nArgs:\npattern: An optional pattern to further filter the descriptors. This can\ninclude Unix shell-style wildcards. E.g. ``\"compute*\"``,\n``\"*cpu/load_??m\"``.\n\nReturns:\nA list of MetricDescriptor objects that match the filters.", "source": "codesearchnet"}
{"code": "def byte_adaptor(fbuffer):\n    \n    if six.PY3:\n        strings = fbuffer.read().decode('latin-1')\n        fbuffer = six.StringIO(strings)\n        return fbuffer\n    else:\n        return fbuffer", "docstring": "provides py3 compatibility by converting byte based\nfile stream to string based file stream\n\nArguments:\nfbuffer: file like objects containing bytes\n\nReturns:\nstring buffer", "source": "juraj-google-style"}
{"code": "def check(self, dsm, simplicity_factor=2, **kwargs):\n    economy_of_mechanism = False\n    message = ''\n    data = dsm.data\n    categories = dsm.categories\n    dsm_size = dsm.size[0]\n    if (not categories):\n        categories = (['appmodule'] * dsm_size)\n    dependency_number = 0\n    for i in range(0, dsm_size):\n        for j in range(0, dsm_size):\n            if ((categories[i] not in ('framework', 'corelib')) and (categories[j] not in ('framework', 'corelib')) and (data[i][j] > 0)):\n                dependency_number += 1\n    if (dependency_number < (dsm_size * simplicity_factor)):\n        economy_of_mechanism = True\n    else:\n        message = ' '.join([('Number of dependencies (%s)' % dependency_number), ('> number of rows (%s)' % dsm_size), ('* simplicity factor (%s) = %s' % (simplicity_factor, (dsm_size * simplicity_factor)))])\n    return (economy_of_mechanism, message)", "docstring": "Check economy of mechanism.\n\nAs first abstraction, number of dependencies between two modules\n< 2 * the number of modules\n(dependencies to the framework are NOT considered).\n\nArgs:\ndsm (:class:`DesignStructureMatrix`): the DSM to check.\nsimplicity_factor (int): simplicity factor.\n\nReturns:\nbool: True if economic, else False", "source": "codesearchnet"}
{"code": "def open_shards(glob_pattern, mode='rt', encoding='utf-8'):\n    if 'b' in mode:\n        encoding = None\n    with tempfile.NamedTemporaryFile(delete=False) as out_file:\n        for shard in glob.glob(glob_pattern):\n            with open(shard, 'rb') as in_file:\n                out_file.write(in_file.read())\n        concatenated_file_name = out_file.name\n    return io.open(concatenated_file_name, mode, encoding=encoding)", "docstring": "Returns a composite file of all shards matching the given glob pattern.\n\nArgs:\nglob_pattern (str): Pattern used to match files which should be opened.\nmode (str): Specify the mode in which the file should be opened. For\navailable modes, check io.open() documentation.\nencoding (str): Name of the encoding used to decode or encode the file.\nThis should only be used in text mode.\n\nReturns:\nA stream with the contents of the opened files.", "source": "github-repos"}
{"code": "def __cloudflare_list_zone_records(self, *, account, zoneID, **kwargs):\n        \n        done = False\n        records = {}\n        page = 1\n\n        while not done:\n            kwargs['page'] = page\n            response = self.__cloudflare_request(\n                account=account,\n                path='/zones/{}/dns_records'.format(zoneID),\n                args=kwargs\n            )\n            info = response['result_info']\n\n            \n            if 'total_pages' not in info or page >= info['total_pages']:\n                done = True\n            else:\n                page += 1\n\n            for record in response['result']:\n                if record['name'] in records:\n                    records[record['name']]['value'] = sorted(records[record['name']]['value'] + [record['content']])\n                else:\n                    records[record['name']] = {\n                        'name': record['name'],\n                        'value': sorted([record['content']]),\n                        'type': record['type']\n                    }\n\n        return list(records.values())", "docstring": "Helper function to list all records on a CloudFlare DNS Zone. Returns a `dict` containing the records and\ntheir information.\n\nArgs:\naccount (:obj:`CloudFlareAccount`): A CloudFlare Account object\nzoneID (`int`): Internal CloudFlare ID of the DNS zone\n**kwargs (`dict`): Additional arguments to be consumed by the API endpoint\n\nReturns:\n:obj:`dict` of `str`: `dict`", "source": "juraj-google-style"}
{"code": "def ParseSMS(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    phone_number = self._GetRowValue(query_hash, row, 'dstnum_sms')\n    if phone_number:\n      phone_number = phone_number.replace(' ', '')\n\n    event_data = SkypeSMSEventData()\n    event_data.number = phone_number\n    event_data.query = query\n    event_data.text = self._GetRowValue(query_hash, row, 'msg_sms')\n\n    timestamp = self._GetRowValue(query_hash, row, 'time_sms')\n    if timestamp:\n      date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(date_time, 'SMS from Skype')\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses an SMS.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from query.", "source": "juraj-google-style"}
{"code": "def _orthogonal_matrix(self, n):\n    a = random_ops.random_normal([n, n], dtype=self.dtype, seed=self.seed)\n    if self.seed:\n        self.seed += 1\n    q, r = gen_linalg_ops.qr(a)\n    d = array_ops.diag_part(r)\n    q *= math_ops.sign(d)\n    return q", "docstring": "Construct an n x n orthogonal matrix.\n\nArgs:\nn: Dimension.\n\nReturns:\nA n x n orthogonal matrix.", "source": "github-repos"}
{"code": "def where(condition, x1=None, x2=None):\n    if x1 is None and x2 is not None or (x1 is not None and x2 is None):\n        raise ValueError('`x1` and `x2` either both should be `None` or both should have non-None value.')\n    if any_symbolic_tensors((condition, x1, x2)):\n        return Where().symbolic_call(condition, x1, x2)\n    return backend.numpy.where(condition, x1, x2)", "docstring": "Return elements chosen from `x1` or `x2` depending on `condition`.\n\nArgs:\ncondition: Where `True`, yield `x1`, otherwise yield `x2`.\nx1: Values from which to choose when `condition` is `True`.\nx2: Values from which to choose when `condition` is `False`.\n\nReturns:\nA tensor with elements from `x1` where `condition` is `True`, and\nelements from `x2` where `condition` is `False`.", "source": "github-repos"}
{"code": "def cartesian(self,subsets=None,step_pixels=100,max_distance_pixels=150,*args,**kwargs):\n        \n        n = Cartesian.read_cellframe(self,subsets=subsets,step_pixels=step_pixels,max_distance_pixels=max_distance_pixels,prune_neighbors=False,*args,**kwargs)\n        if 'measured_regions' in kwargs: n.measured_regions = kwargs['measured_regions']\n        else: n.measured_regions = self.get_measured_regions()\n        if 'measured_phenotypes' in kwargs: n.measured_phenotypes = kwargs['measured_phenotypes']\n        else: n.measured_phenotypes = self.phenotypes\n        n.microns_per_pixel = self.microns_per_pixel\n        return n", "docstring": "Return a class that can be used to create honeycomb plots\n\nArgs:\nsubsets (list): list of SubsetLogic objects\nstep_pixels (int): distance between hexagons\nmax_distance_pixels (int): the distance from each point by which to caclulate the quanitty of the phenotype for that area\n\nReturns:\nCartesian: returns a class that holds the layout of the points to plot.", "source": "juraj-google-style"}
{"code": "def categorize(self, categories, default=None):\n        \n        return dim(self, categorize, categories=categories, default=default)", "docstring": "Replaces discrete values with supplied categories\n\nReplaces discrete values in input array into a fixed set of\ncategories defined either as a list or dictionary.\n\nArgs:\ncategories: List or dict of categories to map inputs to\ndefault: Default value to assign if value not in categories", "source": "juraj-google-style"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        local_stream = utils.BytearrayStream()\n\n        if len(self._credentials) == 0:\n            raise ValueError(\"Authentication struct missing credentials.\")\n        for credential in self._credentials:\n            credential.write(local_stream, kmip_version=kmip_version)\n\n        self.length = local_stream.length()\n        super(Authentication, self).write(\n            output_stream,\n            kmip_version=kmip_version\n        )\n        output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the Authentication struct to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def getTickTock(self, vals):\n        \n        val0, val1 = vals\n\n        try:\n            _tick = self._getLiftValu(val0)\n        except ValueError as e:\n            raise s_exc.BadTypeValu(name=self.name, valu=val0,\n                                    mesg='Unable to process the value for val0 in getTickTock.')\n\n        sortval = False\n        if isinstance(val1, str):\n            if val1.startswith(('+-', '-+')):\n                sortval = True\n                delt = s_time.delta(val1[2:])\n                \n                _tock = _tick + delt\n                _tick = _tick - delt\n            elif val1.startswith('-'):\n                sortval = True\n                _tock = self._getLiftValu(val1, relto=_tick)\n            else:\n                _tock = self._getLiftValu(val1, relto=_tick)\n        else:\n            _tock = self._getLiftValu(val1, relto=_tick)\n\n        if sortval and _tick >= _tock:\n            tick = min(_tick, _tock)\n            tock = max(_tick, _tock)\n            return tick, tock\n\n        return _tick, _tock", "docstring": "Get a tick, tock time pair.\n\nArgs:\nvals (list): A pair of values to norm.\n\nReturns:\n(int, int): A ordered pair of integers.", "source": "juraj-google-style"}
{"code": "async def count(self, text, opts=None):\n    i = 0\n    async for _ in self.cell.eval(text, opts=opts, user=self.user):\n        i += 1\n    return i", "docstring": "Count the number of nodes which result from a storm query.\n\nArgs:\ntext (str): Storm query text.\nopts (dict): Storm query options.\n\nReturns:\n(int): The number of nodes resulting from the query.", "source": "codesearchnet"}
{"code": "def rpow(self, other, axis=\"columns\", level=None, fill_value=None):\n        \n        return self._binary_op(\n            \"rpow\", other, axis=axis, level=level, fill_value=fill_value\n        )", "docstring": "Pow this DataFrame against another DataFrame/Series/scalar.\n\nArgs:\nother: The object to use to apply the pow against this.\naxis: The axis to pow over.\nlevel: The Multilevel index level to apply pow over.\nfill_value: The value to fill NaNs with.\n\nReturns:\nA new DataFrame with the Pow applied.", "source": "juraj-google-style"}
{"code": "def roc_auc_score(y_true: Union[List[List[float]], List[List[int]], np.ndarray],\n                  y_pred: Union[List[List[float]], List[List[int]], np.ndarray]) -> float:\n    \n    try:\n        return sklearn.metrics.roc_auc_score(np.squeeze(np.array(y_true)),\n                                             np.squeeze(np.array(y_pred)), average=\"macro\")\n    except ValueError:\n        return 0.", "docstring": "Compute Area Under the Curve (AUC) from prediction scores.\n\nArgs:\ny_true: true binary labels\ny_pred: target scores, can either be probability estimates of the positive class\n\nReturns:\nArea Under the Curve (AUC) from prediction scores", "source": "juraj-google-style"}
{"code": "def get_backend_engine(self, name, **kwargs):\n        \n        if name not in self._engines:\n            msg = \"Given settings backend is unknowed: {}\"\n            raise SettingsBackendError(msg.format(name))\n\n        return self._engines[name](**kwargs)", "docstring": "Get backend engine from given name.\n\nArgs:\n(string): Path to validate.\n\nRaises:\nboussole.exceptions.SettingsBackendError: If given backend name\ndoes not match any available engine.\n\nReturns:\nobject: Instance of selected backend engine.", "source": "juraj-google-style"}
{"code": "def get_sample_window(self, type_tag, size=10):\n        \n\n        \n        size = size * 1024 * 1024\n\n        \n        cursor = self.database[self.sample_collection].find({'type_tag': type_tag},\n            {'md5': 1,'length': 1}).sort('import_time',pymongo.DESCENDING)\n        total_size = 0\n        md5_list = []\n        for item in cursor:\n            if total_size > size:\n                return md5_list\n            md5_list.append(item['md5'])\n            total_size += item['length']\n\n        \n        \n        return md5_list", "docstring": "Get a window of samples not to exceed size (in MB).\n\nArgs:\ntype_tag: Type of sample ('exe','pcap','pdf','json','swf', or ...).\nsize: Size of samples in MBs.\n\nReturns:\na list of md5s.", "source": "juraj-google-style"}
{"code": "def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False, rotate_manager_token=False):\n    url = self._url('/swarm/update')\n    response = self._post_json(url, data=swarm_spec, params={'rotateWorkerToken': rotate_worker_token, 'rotateManagerToken': rotate_manager_token, 'version': version})\n    self._raise_for_status(response)\n    return True", "docstring": "Update the Swarm's configuration\n\nArgs:\nversion (int): The version number of the swarm object being\nupdated. This is required to avoid conflicting writes.\nswarm_spec (dict): Configuration settings to update. Use\n:py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec` to\ngenerate a valid configuration. Default: ``None``.\nrotate_worker_token (bool): Rotate the worker join token. Default:\n``False``.\nrotate_manager_token (bool): Rotate the manager join token.\nDefault: ``False``.\n\nReturns:\n``True`` if the request went through.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "async def verify_docker_worker_task(chain, link):\n    \n    if chain != link:\n        \n        \n        \n        \n        check_interactive_docker_worker(link)\n        verify_docker_image_sha(chain, link)", "docstring": "Docker-worker specific checks.\n\nArgs:\nchain (ChainOfTrust): the chain we're operating on\nlink (ChainOfTrust or LinkOfTrust): the trust object for the signing task.\n\nRaises:\nCoTError: on failure.", "source": "juraj-google-style"}
{"code": "def all(self, data={}, **kwargs):\n    return super(Subscription, self).all(data, **kwargs)", "docstring": "Fetch all Subscription entities\n\nReturns:\nDictionary of Subscription data", "source": "codesearchnet"}
{"code": "def reduce_by(self, package_request):\n    if self.pr:\n        reqstr = _short_req_str(package_request)\n        self.pr.passive('reducing %s wrt %s...', self, reqstr)\n    if self.solver.optimised:\n        if (package_request in self.been_reduced_by):\n            return (self, [])\n    if ((package_request.range is None) or (package_request.name not in self.fam_requires)):\n        return (self, [])\n    with self.solver.timed(self.solver.reduction_time):\n        return self._reduce_by(package_request)", "docstring": "Remove variants whos dependencies conflict with the given package\nrequest.\n\nReturns:\n(VariantSlice, [Reduction]) tuple, where slice may be None if all\nvariants were reduced.", "source": "codesearchnet"}
{"code": "def emboss_pepstats_on_fasta(infile, outfile='', outdir='', outext='.pepstats', force_rerun=False):\n    \n\n    \n    outfile = ssbio.utils.outfile_maker(inname=infile, outname=outfile, outdir=outdir, outext=outext)\n\n    \n    program = 'pepstats'\n    pepstats_args = '-sequence=\"{}\" -outfile=\"{}\"'.format(infile, outfile)\n    cmd_string = '{} {}'.format(program, pepstats_args)\n    ssbio.utils.command_runner(cmd_string, force_rerun_flag=force_rerun, outfile_checker=outfile, silent=True)\n\n    return outfile", "docstring": "Run EMBOSS pepstats on a FASTA file.\n\nArgs:\ninfile: Path to FASTA file\noutfile: Name of output file without extension\noutdir: Path to output directory\noutext: Extension of results file, default is \".pepstats\"\nforce_rerun: Flag to rerun pepstats\n\nReturns:\nstr: Path to output file.", "source": "juraj-google-style"}
{"code": "def GetApprovalForObject(object_urn, token=None, username=\"\"):\n    \n    if token is None:\n      raise access_control.UnauthorizedAccess(\n          \"No token given, cannot authenticate.\")\n\n    if not username:\n      username = token.username\n\n    approvals_root_urn = aff4.ROOT_URN.Add(\"ACL\").Add(\n        object_urn.Path()).Add(username)\n\n    children_urns = list(aff4.FACTORY.ListChildren(approvals_root_urn))\n    if not children_urns:\n      raise access_control.UnauthorizedAccess(\n          \"No approval found for user %s\" % utils.SmartStr(username),\n          subject=object_urn)\n\n    last_error = None\n    approvals = aff4.FACTORY.MultiOpen(\n        children_urns,\n        mode=\"r\",\n        aff4_type=Approval,\n        age=aff4.ALL_TIMES,\n        token=token)\n    for approval in approvals:\n      try:\n        test_token = access_control.ACLToken(\n            username=username, reason=approval.Get(approval.Schema.REASON))\n        approval.CheckAccess(test_token)\n\n        return test_token\n      except access_control.UnauthorizedAccess as e:\n        last_error = e\n\n    if last_error:\n      \n      raise access_control.UnauthorizedAccess(last_error, subject=object_urn)\n    else:\n      \n      \n      \n      raise access_control.UnauthorizedAccess(\n          \"Couldn't open any of %d approvals \"\n          \"for user %s\" % (len(children_urns), utils.SmartStr(username)),\n          subject=object_urn)", "docstring": "Looks for approvals for an object and returns available valid tokens.\n\nArgs:\nobject_urn: Urn of the object we want access to.\n\ntoken: The token to use to lookup the ACLs.\n\nusername: The user to get the approval for, if \"\" we get it from the\ntoken.\n\nReturns:\nA token for access to the object on success, otherwise raises.\n\nRaises:\nUnauthorizedAccess: If there are no valid approvals available.", "source": "juraj-google-style"}
{"code": "def checksum1(data, stringlength):\n    \n    value_buffer = 0\n    for count in range(0, stringlength):\n        value_buffer = value_buffer ^ data[count]\n    return value_buffer&0xFE", "docstring": "Calculate Checksum 1\n\nCalculate the ckecksum 1 required for the herkulex data packet\n\nArgs:\ndata (list): the data of which checksum is to be calculated\nstringlength (int): the length of the data\n\nReturns:\nint:  The calculated checksum 1", "source": "juraj-google-style"}
{"code": "def from_json(cls, json):\n    \n    if json[cls.KEY_RANGE_PARAM] is None:\n      \n      key_ranges = None\n    else:\n      key_ranges = []\n      for k in json[cls.KEY_RANGE_PARAM]:\n        if k:\n          key_ranges.append(key_range.KeyRange.from_json(k))\n        else:\n          key_ranges.append(None)\n\n    if json[cls.NAMESPACE_RANGE_PARAM] is None:\n      ns_range = None\n    else:\n      ns_range = namespace_range.NamespaceRange.from_json_object(\n          json[cls.NAMESPACE_RANGE_PARAM])\n\n    if json[cls.CURRENT_KEY_RANGE_PARAM] is None:\n      current_key_range = None\n    else:\n      current_key_range = key_range.KeyRange.from_json(\n          json[cls.CURRENT_KEY_RANGE_PARAM])\n\n    return cls(\n        json[cls.ENTITY_KIND_PARAM],\n        key_ranges,\n        ns_range,\n        json[cls.BATCH_SIZE_PARAM],\n        current_key_range,\n        filters=json.get(cls.FILTERS_PARAM))", "docstring": "Create new DatastoreInputReader from the json, encoded by to_json.\n\nArgs:\njson: json map representation of DatastoreInputReader.\n\nReturns:\nan instance of DatastoreInputReader with all data deserialized from json.", "source": "juraj-google-style"}
{"code": "def substitute(self, var_map):\n    if (self in var_map):\n        return var_map[self]\n    return self._substitute(var_map)", "docstring": "Substitute sub-expressions\n\nArgs:\nvar_map (dict): Dictionary with entries of the form\n``{expr: substitution}``", "source": "codesearchnet"}
{"code": "def initialize(self):\n    if ops.executing_eagerly_outside_functions():\n        self._iterator._eager_reset()\n        return []\n    else:\n        return [self._iterator.initializer]", "docstring": "Initialize underlying iterator.\n\nIn eager execution, this simply recreates the underlying iterator.\nIn graph execution, it returns the initializer ops for the underlying\niterator.\n\nReturns:\nA list of any initializer ops that should be run.", "source": "github-repos"}
{"code": "def get_catalog_results(self, content_filter_query, query_params=None, traverse_pagination=False):\n        \n        query_params = query_params or {}\n\n        try:\n            endpoint = getattr(self.client, self.SEARCH_ALL_ENDPOINT)\n            response = endpoint().post(data=content_filter_query, **query_params)\n            if traverse_pagination:\n                response['results'] = self.traverse_pagination(response, endpoint, content_filter_query, query_params)\n                response['next'] = response['previous'] = None\n        except Exception as ex:  \n            LOGGER.exception(\n                'Attempted to call course-discovery search/all/ endpoint with the following parameters: '\n                'content_filter_query: %s, query_params: %s, traverse_pagination: %s. '\n                'Failed to retrieve data from the catalog API. content -- [%s]',\n                content_filter_query,\n                query_params,\n                traverse_pagination,\n                getattr(ex, 'content', '')\n            )\n            \n            raise ex\n\n        return response", "docstring": "Return results from the discovery service's search/all endpoint.\n\nArguments:\ncontent_filter_query (dict): query parameters used to filter catalog results.\nquery_params (dict): query parameters used to paginate results.\ntraverse_pagination (bool): True to return all results, False to return the paginated response.\nDefaults to False.\n\nReturns:\ndict: Paginated response or all the records.", "source": "juraj-google-style"}
{"code": "def samefile(path1, path2):\n    (path1, path1_is_storage) = format_and_is_storage(path1)\n    (path2, path2_is_storage) = format_and_is_storage(path2)\n    if ((not path1_is_storage) and (not path2_is_storage)):\n        return os_path_samefile(path1, path2)\n    if ((not path1_is_storage) or (not path2_is_storage)):\n        return False\n    with handle_os_exceptions():\n        system = get_instance(path1)\n        if (system is not get_instance(path2)):\n            return False\n        elif (system.relpath(path1) != system.relpath(path2)):\n            return False\n    return True", "docstring": "Return True if both pathname arguments refer to the same file or directory.\n\nEquivalent to \"os.path.samefile\".\n\nArgs:\npath1 (path-like object): Path or URL.\npath2 (path-like object): Path or URL.\n\nReturns:\nbool: True if same file or directory.", "source": "codesearchnet"}
{"code": "def from_file(cls, source, distance_weights=None, merge_same_words=False, group_marker_opening='<<', group_marker_closing='>>'):\n    source_string = open(source, 'r').read()\n    return cls.from_string(source_string, distance_weights, merge_same_words, group_marker_opening=group_marker_opening, group_marker_closing=group_marker_closing)", "docstring": "Read a string from a file and derive a ``Graph`` from it.\n\nThis is a convenience function for opening a file and passing its\ncontents to ``Graph.from_string()`` (see that for more detail)\n\nArgs:\nsource (str): the file to read and derive the graph from\ndistance_weights (dict): dict of relative indices corresponding\nwith word weights. See ``Graph.from_string`` for more detail.\nmerge_same_words (bool): whether nodes which have the same value\nshould be merged or not.\ngroup_marker_opening (str): The string used to mark the beginning\nof word groups.\ngroup_marker_closing (str): The string used to mark the end\nof word groups.\n\nReturns: Graph\n\nExample:\n>>> graph = Graph.from_file('cage.txt')            # doctest: +SKIP\n>>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP\n'poetry i have nothing to say and i'", "source": "codesearchnet"}
{"code": "def schedule(self, function, args, kwargs):\n    closure = Closure(function, self.closure_queue._cancellation_mgr, args=args, kwargs=kwargs)\n    ret = closure.build_output_remote_value()\n    self.closure_queue.put(closure)\n    return ret", "docstring": "Schedules `function` to be dispatched to a worker for execution.\n\nArgs:\nfunction: The function to be dispatched to a worker for execution\nasynchronously.\nargs: Positional arguments for `fn`.\nkwargs: Keyword arguments for `fn`.\n\nReturns:\nA `RemoteValue` object.", "source": "github-repos"}
{"code": "def get_organisation(self, id, name=None):\n    return self.create_organisation(dict(id=id, name=name))", "docstring": "Get an organisation\n\nReturns:\nOrganisation: The organisation with the given `id`", "source": "codesearchnet"}
{"code": "def _split_list_into_bundles(self, output_pcollection, elements, max_element_per_bundle, element_size_fn):\n    bundle = self._evaluation_context.create_bundle(output_pcollection)\n    bundle_size = 0\n    bundles = [bundle]\n    for element in elements:\n        if max_element_per_bundle and bundle_size >= max_element_per_bundle:\n            bundle = self._evaluation_context.create_bundle(output_pcollection)\n            bundle_size = 0\n            bundles.append(bundle)\n        bundle.output(element)\n        bundle_size += element_size_fn(element)\n    return bundles", "docstring": "Splits elements, an iterable, into multiple output bundles.\n\nArgs:\noutput_pcollection: PCollection that the elements belong to.\nelements: elements to be chunked into bundles.\nmax_element_per_bundle: (approximately) the maximum element per bundle.\nIf it is None, only a single bundle will be produced.\nelement_size_fn: Function to return the size of a given element.\n\nReturns:\nList of output uncommitted bundles with at least one bundle.", "source": "github-repos"}
{"code": "def metadata(self, url):\n    _, path = self._parse_url(url)\n    status = self._hdfs_client.status(path, strict=False)\n    if status is None:\n        raise BeamIOError('File not found: %s' % url)\n    return FileMetadata(url, status[_FILE_STATUS_LENGTH], status[_FILE_STATUS_UPDATED] / 1000.0)", "docstring": "Fetch metadata fields of a file on the FileSystem.\n\nArgs:\nurl: string url of a file.\n\nReturns:\n:class:`~apache_beam.io.filesystem.FileMetadata`.\n\nRaises:\n``BeamIOError``: if url doesn't exist.", "source": "github-repos"}
{"code": "def exportUsufy(data, ext, fileH):\n    if (ext == 'csv'):\n        usufyToCsvExport(data, ((fileH + '.') + ext))\n    elif (ext == 'gml'):\n        usufyToGmlExport(data, ((fileH + '.') + ext))\n    elif (ext == 'json'):\n        usufyToJsonExport(data, ((fileH + '.') + ext))\n    elif (ext == 'ods'):\n        usufyToOdsExport(data, ((fileH + '.') + ext))\n    elif (ext == 'png'):\n        usufyToPngExport(data, ((fileH + '.') + ext))\n    elif (ext == 'txt'):\n        usufyToTextExport(data, ((fileH + '.') + ext))\n    elif (ext == 'xls'):\n        usufyToXlsExport(data, ((fileH + '.') + ext))\n    elif (ext == 'xlsx'):\n        usufyToXlsxExport(data, ((fileH + '.') + ext))", "docstring": "Method that exports the different structures onto different formats.\n\nArgs:\n-----\ndata: Data to export.\next: One of the following: csv, excel, json, ods.\nfileH: Fileheader for the output files.\n\nReturns:\n--------\nPerforms the export as requested by parameter.", "source": "codesearchnet"}
{"code": "def CreateTask(self, session_identifier):\n    \n    task = tasks.Task(session_identifier)\n    logger.debug('Created task: {0:s}.'.format(task.identifier))\n\n    with self._lock:\n      self._tasks_queued[task.identifier] = task\n      self._total_number_of_tasks += 1\n\n      self.SampleTaskStatus(task, 'created')\n\n    return task", "docstring": "Creates a task.\n\nArgs:\nsession_identifier (str): the identifier of the session the task is\npart of.\n\nReturns:\nTask: task attribute container.", "source": "juraj-google-style"}
{"code": "def delete(self, key):\n    key = self._service_key(key)\n    self._service_ops['delete'](key)", "docstring": "Removes the object named by `key` in `service`.\n\nArgs:\nkey: Key naming the object to remove.", "source": "codesearchnet"}
{"code": "def build_all_reduce_device_prefixes(job_name, num_tasks):\n    if (job_name != 'localhost'):\n        return [('/job:%s/task:%d' % (job_name, d)) for d in range(0, num_tasks)]\n    else:\n        assert (num_tasks == 1)\n        return [('/job:%s' % job_name)]", "docstring": "Build list of device prefix names for all_reduce.\n\nArgs:\njob_name: \"worker\", \"ps\" or \"localhost\".\nnum_tasks: number of jobs across which device names should be generated.\n\nReturns:\nA list of device name prefix strings. Each element spells out the full\nhost name without adding the device.\ne.g. \"/job:worker/task:0\"", "source": "codesearchnet"}
{"code": "def get_package(name, version, paths=None):\n    \n    if isinstance(version, basestring):\n        range_ = VersionRange(\"==%s\" % version)\n    else:\n        range_ = VersionRange.from_version(version, \"==\")\n\n    it = iter_packages(name, range_, paths)\n    try:\n        return it.next()\n    except StopIteration:\n        return None", "docstring": "Get an exact version of a package.\n\nArgs:\nname (str): Name of the package, eg 'maya'.\nversion (Version or str): Version of the package, eg '1.0.0'\npaths (list of str, optional): paths to search for package, defaults\nto `config.packages_path`.\n\nReturns:\n`Package` object, or None if the package was not found.", "source": "juraj-google-style"}
{"code": "def new_cells(self, name=None, formula=None):\n        \n        \n        return self._impl.new_cells(name, formula).interface", "docstring": "Create a cells in the space.\n\nArgs:\nname: If omitted, the model is named automatically ``CellsN``,\nwhere ``N`` is an available number.\nfunc: The function to define the formula of the cells.\n\nReturns:\nThe new cells.", "source": "juraj-google-style"}
{"code": "def Get(self, request, global_params=None):\n    config = self.GetMethodConfig('Get')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Gets the specified model resource by model ID.\n\nArgs:\nrequest: (BigqueryModelsGetRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Model) The response message.", "source": "github-repos"}
{"code": "def _full_reduce(self, axis, map_func, reduce_func=None):\n        \n        if reduce_func is None:\n            reduce_func = map_func\n\n        mapped_parts = self.data.map_across_blocks(map_func)\n        full_frame = mapped_parts.map_across_full_axis(axis, reduce_func)\n        if axis == 0:\n            columns = self.columns\n            return self.__constructor__(\n                full_frame, index=[\"__reduced__\"], columns=columns\n            )\n        else:\n            index = self.index\n            return self.__constructor__(\n                full_frame, index=index, columns=[\"__reduced__\"]\n            )", "docstring": "Apply function that will reduce the data to a Pandas Series.\n\nArgs:\naxis: 0 for columns and 1 for rows. Default is 0.\nmap_func: Callable function to map the dataframe.\nreduce_func: Callable function to reduce the dataframe. If none,\nthen apply map_func twice.\n\nReturn:\nA new QueryCompiler object containing the results from map_func and\nreduce_func.", "source": "juraj-google-style"}
{"code": "def swd_read16(self, offset):\n        \n        value = self._dll.JLINK_SWD_GetU16(offset)\n        return ctypes.c_uint16(value).value", "docstring": "Gets a unit of ``16`` bits from the input buffer.\n\nArgs:\nself (JLink): the ``JLink`` instance\noffset (int): the offset (in bits) from which to start reading\n\nReturns:\nThe integer read from the input buffer.", "source": "juraj-google-style"}
{"code": "def slice(inputs, start_indices, shape):\n    if any_symbolic_tensors((inputs, start_indices)):\n        return Slice(shape=shape).symbolic_call(inputs, start_indices)\n    return backend.core.slice(inputs, start_indices, shape)", "docstring": "Return a slice of an input tensor.\n\nAt a high level, this operation is an explicit replacement for array slicing\ne.g. `inputs[start_indices: start_indices + shape]`.\nUnlike slicing via brackets, this operation will accept tensor start\nindices on all backends, which is useful when indices dynamically computed\nvia other tensor operations.\n\n```python\ninputs = np.zeros((5, 5))\nstart_indices = np.array([3, 3])\nshape = np.array([2, 2])\ninputs = keras.ops.slice(inputs, start_indices, shape)\n```\n\nArgs:\ninputs: A tensor, the tensor to be updated.\nstart_indices: A list/tuple of shape `(inputs.ndim,)`, specifying\nthe starting indices for updating.\nshape: The full shape of the returned slice.\n\nReturns:\nA tensor, has the same shape and dtype as `inputs`.", "source": "github-repos"}
{"code": "def get_usb_serial(self, port_num):\n    port = self.port_map[str(port_num)]\n    arg = ''.join(['DEVICE INFO,', self._addr, '.', port])\n    cmd = ['esuit64', '-t', arg]\n    info = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n    serial = None\n    if ('SERIAL' in info):\n        serial_info = info.split('SERIAL:')[1]\n        serial = serial_info.split('\\n')[0].strip()\n        use_info = info.split('BY')[1].split(' ')[1]\n        if (use_info == 'NO'):\n            cmd = ['esuit64', '-t', 'AUTO USE ALL']\n            subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n            time.sleep((50.0 / 1000.0))\n    else:\n        raise ValueError('No USB device detected')\n    return serial", "docstring": "Get the device serial number\n\nArgs:\nport_num: port number on the Cambrionix unit\n\nReturn:\nusb device serial number", "source": "codesearchnet"}
{"code": "def string_to_scopes(scopes):\n    \n    if not scopes:\n        return []\n    elif isinstance(scopes, six.string_types):\n        return scopes.split(' ')\n    else:\n        return scopes", "docstring": "Converts stringifed scope value to a list.\n\nIf scopes is a list then it is simply passed through. If scopes is an\nstring then a list of each individual scope is returned.\n\nArgs:\nscopes: a string or iterable of strings, the scopes.\n\nReturns:\nThe scopes in a list.", "source": "juraj-google-style"}
{"code": "def make_single_array(ds, batch_size=(8 * 1024)):\n    if (isinstance(ds.output_types, tuple) or isinstance(ds.output_shapes, tuple)):\n        raise ValueError('Dataset must have a single type and shape')\n    nshapes = len(ds.output_shapes)\n    if (nshapes > 0):\n        raise ValueError('Dataset must be comprised of scalars (TensorShape=[])')\n    batches = []\n    with tf.Session() as sess:\n        ds = ds.batch(batch_size)\n        iterator = ds.make_initializable_iterator()\n        sess.run(iterator.initializer)\n        get_next = iterator.get_next()\n        with tqdm(desc='Elements', unit_scale=1) as pbar:\n            try:\n                while True:\n                    batches.append(sess.run(get_next))\n                    pbar.update(len(batches[(- 1)]))\n            except tf.errors.OutOfRangeError:\n                pass\n    if batches:\n        return np.concatenate(batches)\n    return np.array([], dtype=ds.output_types.as_numpy_dtype)", "docstring": "Create a single numpy array from a dataset.\n\nThe dataset must have only one dimension, that is,\nthe length of its `output_shapes` and `output_types`\nis 1, and its output shape must be `[]`, that is,\nevery tensor in the dataset must be a scalar.\n\nArgs:\nds:  a TF Dataset.\nbatch_size:  how many elements to read per pass\n\nReturns:\na single numpy array.", "source": "codesearchnet"}
{"code": "def na_if(series, *values):\n    series = pd.Series(series)\n    series[series.isin(values)] = np.nan\n    return series", "docstring": "If values in a series match a specified value, change them to `np.nan`.\n\nArgs:\nseries: Series or vector, often symbolic.\n*values: Value(s) to convert to `np.nan` in the series.", "source": "codesearchnet"}
{"code": "def from_api_repr(cls, resource):\n        \n        if (\n            \"datasetReference\" not in resource\n            or \"datasetId\" not in resource[\"datasetReference\"]\n        ):\n            raise KeyError(\n                \"Resource lacks required identity information:\"\n                '[\"datasetReference\"][\"datasetId\"]'\n            )\n        project_id = resource[\"datasetReference\"][\"projectId\"]\n        dataset_id = resource[\"datasetReference\"][\"datasetId\"]\n        dataset = cls(DatasetReference(project_id, dataset_id))\n        dataset._properties = copy.deepcopy(resource)\n        return dataset", "docstring": "Factory: construct a dataset given its API representation\n\nArgs:\nresource (Dict[str: object]):\nDataset resource representation returned from the API\n\nReturns:\ngoogle.cloud.bigquery.dataset.Dataset:\nDataset parsed from ``resource``.", "source": "juraj-google-style"}
{"code": "def custom_apply(self, path: utils.KeyPath, value_spec: pg_typing.ValueSpec, allow_partial: bool, child_transform: Optional[Callable[[utils.KeyPath, pg_typing.Field, Any], Any]]=None) -> Tuple[bool, 'Dict']:\n    proceed_with_standard_apply = True\n    if self._value_spec:\n        if value_spec and (not value_spec.is_compatible(self._value_spec)):\n            raise ValueError(utils.message_on_path(f'Dict (spec={self._value_spec!r}) cannot be assigned to an incompatible field (spec={value_spec!r}).', path))\n        if self._allow_partial == allow_partial:\n            proceed_with_standard_apply = False\n        else:\n            self._allow_partial = allow_partial\n    elif isinstance(value_spec, pg_typing.Dict):\n        self._value_spec = value_spec\n    return (proceed_with_standard_apply, self)", "docstring": "Implement pg.typing.CustomTyping interface.\n\nArgs:\npath: KeyPath of current object.\nvalue_spec: Origin value spec of the field.\nallow_partial: Whether allow partial object to be created.\nchild_transform: Function to transform child node values in dict_obj into\ntheir final values. Transform function is called on leaf nodes first,\nthen on their containers, recursively.\n\nReturns:\nA tuple (proceed_with_standard_apply, transformed value)", "source": "github-repos"}
{"code": "def dict_of_sets_add(dictionary, key, value):\n    \n    \n    set_objs = dictionary.get(key, set())\n    set_objs.add(value)\n    dictionary[key] = set_objs", "docstring": "Add value to a set in a dictionary by key\n\nArgs:\ndictionary (DictUpperBound): Dictionary to which to add values\nkey (Any): Key within dictionary\nvalue (Any): Value to add to set in dictionary\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _binary_assert_doc_v2(sym, opname, test_var):\n\n    def _decorator(func):\n        \n        func.__doc__ = '\\n    Assert the condition `x {sym} y` holds element-wise.\\n\\n    This Op checks that `x[i] {sym} y[i]` holds for every pair of (possibly\\n    broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is\\n    trivially satisfied.\\n\\n    If `x` {sym} `y` does not hold, `message`, as well as the first `summarize`\\n    entries of `x` and `y` are printed, and `InvalidArgumentError` is raised.\\n\\n    When using inside `tf.function`, this API takes effects during execution.\\n    It\\'s recommended to use this API with `tf.control_dependencies` to\\n    ensure the correct execution order.\\n\\n    In the following example, without `tf.control_dependencies`, errors may\\n    not be raised at all.\\n    Check `tf.control_dependencies` for more details.\\n\\n    >>> def check_size(x):\\n    ...   with tf.control_dependencies([\\n    ...       tf.debugging.{opname}(tf.size(x), {test_var},\\n    ...                       message=\\'Bad tensor size\\')]):\\n    ...     return x\\n\\n    >>> check_size(tf.ones([2, 3], tf.float32))\\n    Traceback (most recent call last):\\n       ...\\n    InvalidArgumentError: ...\\n\\n    Args:\\n      x:  Numeric `Tensor`.\\n      y:  Numeric `Tensor`, same dtype as and broadcastable to `x`.\\n      message: A string to prefix to the default message. (optional)\\n      summarize: Print this many entries of each tensor. (optional)\\n      name: A name for this operation (optional).  Defaults to \"{opname}\".\\n\\n    Returns:\\n      Op that raises `InvalidArgumentError` if `x {sym} y` is False. This can\\n        be used with `tf.control_dependencies` inside of `tf.function`s to\\n        block followup computation until the check has executed.\\n      @compatibility(eager)\\n      returns None\\n      @end_compatibility\\n\\n    Raises:\\n      InvalidArgumentError: if the check can be performed immediately and\\n        `x == y` is False. The check can be performed immediately during eager\\n        execution or if `x` and `y` are statically known.\\n    '.format(sym=sym, opname=opname, test_var=test_var)\n        return func\n    return _decorator", "docstring": "Common docstring for v2 assert_* ops that compare two tensors element-wise.\n\nArgs:\nsym: Binary operation symbol, i.e. \"==\"\nopname: Name for the symbol, i.e. \"assert_equal\"\ntest_var: A number used in the docstring example\n\nReturns:\nDecorator that adds the appropriate docstring to the function for\nsymbol `sym`.", "source": "github-repos"}
{"code": "def inspect_edge(G: AnalysisGraph, source: str, target: str):\n    \n\n    return create_statement_inspection_table(\n        G[source][target][\"InfluenceStatements\"]\n    )", "docstring": "'Drill down' into an edge in the analysis graph and inspect its\nprovenance. This function prints the provenance.\n\nArgs:\nG\nsource\ntarget", "source": "juraj-google-style"}
{"code": "def __eq__(self, other):\n        \n        return SeriesWeld(\n            grizzly_impl.compare(\n                self.expr,\n                other,\n                \"==\",\n                self.weld_type\n            ),\n            WeldBit(),\n            self.df,\n            self.column_name\n        )", "docstring": "Summary\n\nArgs:\nother (TYPE): Description\n\nReturns:\nTYPE: Description", "source": "juraj-google-style"}
{"code": "async def get_ticket(self, request):\n    session = (await get_session(request))\n    return session.get(self.cookie_name)", "docstring": "Called to return the ticket for a request.\n\nArgs:\nrequest: aiohttp Request object.\n\nReturns:\nA ticket (string like) object, or None if no ticket is available\nfor the passed request.", "source": "codesearchnet"}
{"code": "def get_all_artifacts_per_task_id(chain, upstream_artifacts):\n    \n    all_artifacts_per_task_id = {}\n    for link in chain.links:\n        \n        if link.task_type in PARENT_TASK_TYPES:\n            add_enumerable_item_to_dict(\n                dict_=all_artifacts_per_task_id, key=link.task_id, item='public/task-graph.json'\n            )\n        \n        if link.task_type in DECISION_TASK_TYPES:\n            add_enumerable_item_to_dict(\n                dict_=all_artifacts_per_task_id, key=link.task_id, item='public/actions.json'\n            )\n            add_enumerable_item_to_dict(\n                dict_=all_artifacts_per_task_id, key=link.task_id, item='public/parameters.yml'\n            )\n\n    if upstream_artifacts:\n        for upstream_dict in upstream_artifacts:\n            add_enumerable_item_to_dict(\n                dict_=all_artifacts_per_task_id, key=upstream_dict['taskId'], item=upstream_dict['paths']\n            )\n\n    \n    for task_id, paths in all_artifacts_per_task_id.items():\n        all_artifacts_per_task_id[task_id] = sorted(set(paths))\n\n    return all_artifacts_per_task_id", "docstring": "Return every artifact to download, including the Chain Of Trust Artifacts.\n\nArgs:\nchain (ChainOfTrust): the chain of trust object\nupstream_artifacts: the list of upstream artifact definitions\n\nReturns:\ndict: sorted list of paths to downloaded artifacts ordered by taskId", "source": "juraj-google-style"}
{"code": "def from_api_repr(cls, resource):\n        \n        entry = resource.copy()\n        role = entry.pop(\"role\", None)\n        entity_type, entity_id = entry.popitem()\n        if len(entry) != 0:\n            raise ValueError(\"Entry has unexpected keys remaining.\", entry)\n        return cls(role, entity_type, entity_id)", "docstring": "Factory: construct an access entry given its API representation\n\nArgs:\nresource (Dict[str, object]):\nAccess entry resource representation returned from the API\n\nReturns:\ngoogle.cloud.bigquery.dataset.AccessEntry:\nAccess entry parsed from ``resource``.\n\nRaises:\nValueError:\nIf the resource has more keys than ``role`` and one additional\nkey.", "source": "juraj-google-style"}
{"code": "def sent_request(self, value):\n        \n        if value == self._defaults['sentRequest'] and 'sentRequest' in self._values:\n            del self._values['sentRequest']\n        else:\n            self._values['sentRequest'] = value", "docstring": "The sent_request property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def filter_def_file(def_file: str, filter_file: str, filtered_file: str) -> None:\n    with open(filter_file, 'r', encoding='utf-8') as filter_file_handle:\n        filter_json: Dict[str, Any] = json.load(filter_file_handle)\n        inclusion_patterns: List[str] = filter_json['global'] + ['EXPORTS', '*;*']\n        incl_patterns: List[Pattern[str]] = [re.compile(re.escape(p).replace('\\\\*', '.*')) for p in inclusion_patterns]\n        exclusion_patterns: List[str] = filter_json['local']\n        excl_patterns: List[Pattern[str]] = [re.compile(re.escape(p).replace('\\\\*', '.*')) for p in exclusion_patterns]\n    with open(def_file, 'r') as orig_file, open(filtered_file, 'w') as filt_file:\n        for l in orig_file:\n            if not matches_any(excl_patterns, l) or matches_any(incl_patterns, l):\n                filt_file.write(l)", "docstring": "Filters a windows .def file based on a filter .json.\n\nArgs:\ndef_file: The path to the input windows .def file.\nfilter_file: The path to the filter file (JSON format).\nfiltered_file: The path to the output filtered windows .def file.", "source": "github-repos"}
{"code": "def BuildAdGroupOperations(batch_job_helper, campaign_operations, number_of_adgroups=1):\n    adgroup_operations = [{'xsi_type': 'AdGroupOperation', 'operand': {'campaignId': campaign_operation['operand']['id'], 'id': batch_job_helper.GetId(), 'name': ('Batch Ad Group \n    return adgroup_operations", "docstring": "Builds the operations adding desired number of AdGroups to given Campaigns.\n\nNote: When the AdGroups are created, they will have a different Id than those\ngenerated here as a temporary Id. This is just used to identify them in the\nBatchJobService.\n\nArgs:\nbatch_job_helper: a BatchJobHelper instance.\ncampaign_operations: a list containing the operations that will add\nCampaigns.\nnumber_of_adgroups: an int defining the number of AdGroups to be created per\nCampaign.\n\nReturns:\na list containing the operations that will add the desired number of\nAdGroups to each of the provided Campaigns.", "source": "codesearchnet"}
{"code": "def __init__(self, job, runner, options=None):\n    self._job = job\n    self._runner = runner\n    self._options = options\n    self.metric_results = None", "docstring": "Initialize a new DataflowPipelineResult instance.\n\nArgs:\njob: Job message from the Dataflow API. Could be :data:`None` if a job\nrequest was not sent to Dataflow service (e.g. template jobs).\nrunner: DataflowRunner instance.", "source": "github-repos"}
{"code": "def create_autocast_variable(variable):\n    if not distributed_training_utils.is_distributed_variable(variable):\n        return AutoCastVariable(variable)\n\n    class AutoCastDistributedVariable(AutoCastVariable, variable.__class__):\n        \n\n        def __repr__(self):\n            return '<AutoCastDistributedVariable dtype={v.dtype.name} dtype_to_cast_to={v._cast_dtype.name} inner_variable={v._variable}>'.format(v=self)\n    return AutoCastDistributedVariable(variable)", "docstring": "Creates an AutoCastVariable that wraps another variable.\n\nThis typically just returns `AutoCastVariable(variable)`. But, if the variable\nis a DistributedVariable or one of its subclasses, we instead dynamically\ncreate a class that subclasses from both AutoCastVariable and\nvariable.__class__. This is so the returned variable will still pass\n`isinstance(variable, variable.__class__)`, which is required for\nDistributedVariables and its subclasses to work properly.\n\nArgs:\nvariable: A floating-point resource variable to wrap.\n\nReturns:\nAn AutoCastVariable that wraps the variable.", "source": "github-repos"}
{"code": "def compute_bleu(reference_corpus, translation_corpus, max_order=4, use_bp=True):\n    reference_length = 0\n    translation_length = 0\n    bp = 1.0\n    geo_mean = 0\n    matches_by_order = ([0] * max_order)\n    possible_matches_by_order = ([0] * max_order)\n    precisions = []\n    for (references, translations) in zip(reference_corpus, translation_corpus):\n        reference_length += len(references)\n        translation_length += len(translations)\n        ref_ngram_counts = _get_ngrams_with_counter(references, max_order)\n        translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)\n        overlap = dict(((ngram, min(count, translation_ngram_counts[ngram])) for (ngram, count) in ref_ngram_counts.items()))\n        for ngram in overlap:\n            matches_by_order[(len(ngram) - 1)] += overlap[ngram]\n        for ngram in translation_ngram_counts:\n            possible_matches_by_order[(len(ngram) - 1)] += translation_ngram_counts[ngram]\n    precisions = ([0] * max_order)\n    smooth = 1.0\n    for i in xrange(0, max_order):\n        if (possible_matches_by_order[i] > 0):\n            precisions[i] = (float(matches_by_order[i]) / possible_matches_by_order[i])\n            if (matches_by_order[i] > 0):\n                precisions[i] = (float(matches_by_order[i]) / possible_matches_by_order[i])\n            else:\n                smooth *= 2\n                precisions[i] = (1.0 / (smooth * possible_matches_by_order[i]))\n        else:\n            precisions[i] = 0.0\n    if (max(precisions) > 0):\n        p_log_sum = sum((math.log(p) for p in precisions if p))\n        geo_mean = math.exp((p_log_sum / max_order))\n    if use_bp:\n        ratio = (translation_length / reference_length)\n        bp = (math.exp((1 - (1.0 / ratio))) if (ratio < 1.0) else 1.0)\n    bleu = (geo_mean * bp)\n    return np.float32(bleu)", "docstring": "Computes BLEU score of translated segments against one or more references.\n\nArgs:\nreference_corpus: list of references for each translation. Each\nreference should be tokenized into a list of tokens.\ntranslation_corpus: list of translations to score. Each translation\nshould be tokenized into a list of tokens.\nmax_order: Maximum n-gram order to use when computing BLEU score.\nuse_bp: boolean, whether to apply brevity penalty.\n\nReturns:\nBLEU score.", "source": "codesearchnet"}
{"code": "def _ring_2d(m, n):\n    if (m == 1):\n        return [(0, i) for i in range(n)]\n    if (n == 1):\n        return [(i, 0) for i in range(m)]\n    if ((m % 2) != 0):\n        tf.logging.warning('Odd dimension')\n        return [((i % m), (i \n    ret = [(0, 0)]\n    for i in range((m \n        for j in range(1, n):\n            ret.append(((2 * i), j))\n        for j in range((n - 1), 0, (- 1)):\n            ret.append((((2 * i) + 1), j))\n    for i in range((m - 1), 0, (- 1)):\n        ret.append((i, 0))\n    return ret", "docstring": "Ring-order of a mxn mesh.\n\nArgs:\nm: an integer\nn: an integer\nReturns:\na list of mxn pairs", "source": "codesearchnet"}
{"code": "def to_json_string(self, use_diff: bool=True) -> str:\n    if use_diff is True:\n        config_dict = self.to_diff_dict()\n    else:\n        config_dict = self.to_dict()\n    return json.dumps(config_dict, indent=2, sort_keys=True) + '\\n'", "docstring": "Serializes this instance to a JSON string.\n\nArgs:\nuse_diff (`bool`, *optional*, defaults to `True`):\nIf set to `True`, only the difference between the config instance and the default `PretrainedConfig()`\nis serialized to JSON string.\n\nReturns:\n`str`: String containing all the attributes that make up this configuration instance in JSON format.", "source": "github-repos"}
{"code": "def get_aligned_output_features_output_indices(out_features: Optional[list[str]], out_indices: Optional[Union[list[int], tuple[int]]], stage_names: list[str]) -> tuple[list[str], list[int]]:\n    out_indices = list(out_indices) if out_indices is not None else None\n    verify_out_features_out_indices(out_features=out_features, out_indices=out_indices, stage_names=stage_names)\n    output_features, output_indices = _align_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=stage_names)\n    verify_out_features_out_indices(out_features=output_features, out_indices=output_indices, stage_names=stage_names)\n    return (output_features, output_indices)", "docstring": "Get the `out_features` and `out_indices` so that they are aligned.\n\nThe logic is as follows:\n- `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the\n`out_indices`.\n- `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the\n`out_features`.\n- `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage.\n- `out_indices` and `out_features` set: they are verified to be aligned.\n\nArgs:\nout_features (`List[str]`): The names of the features for the backbone to output.\nout_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output.\nstage_names (`List[str]`): The names of the stages of the backbone.", "source": "github-repos"}
{"code": "def get_output_embeddings(self) -> nn.Module:\n    return None", "docstring": "Returns the model's output embeddings.\n\nReturns:\n`nn.Module`: A torch module mapping hidden states to vocabulary.", "source": "github-repos"}
{"code": "def bulk_insert(self, rows, return_model=False):\n    if (self.conflict_target or self.conflict_action):\n        compiler = self._build_insert_compiler(rows)\n        objs = compiler.execute_sql(return_id=True)\n        if return_model:\n            return [self.model(**dict(r, **k)) for (r, k) in zip(rows, objs)]\n        else:\n            return [dict(r, **k) for (r, k) in zip(rows, objs)]\n    return super().bulk_create([self.model(**fields) for fields in rows])", "docstring": "Creates multiple new records in the database.\n\nThis allows specifying custom conflict behavior using .on_conflict().\nIf no special behavior was specified, this uses the normal Django create(..)\n\nArguments:\nrows:\nAn array of dictionaries, where each dictionary\ndescribes the fields to insert.\n\nreturn_model (default: False):\nIf model instances should be returned rather than\njust dicts.\n\nReturns:\nA list of either the dicts of the rows inserted, including the pk or\nthe models of the rows inserted with defaults for any fields not specified", "source": "codesearchnet"}
{"code": "def get_header(message, name):\n    header = message.get(name)\n    log.debug('Getting header {!r}: {!r}'.format(name, header))\n    if header:\n        return decode_header_part(header)\n    return six.text_type()", "docstring": "Gets an email.message.Message and a header name and returns\nthe mail header decoded with the correct charset.\n\nArgs:\nmessage (email.message.Message): email message object\nname (string): header to get\n\nReturns:\ndecoded header", "source": "codesearchnet"}
{"code": "def flip_variable(self, v):\n    try:\n        idx = self.variables.index(v)\n    except ValueError:\n        raise ValueError('variable {} is not a variable in constraint {}'.format(v, self.name))\n    if (self.vartype is dimod.BINARY):\n        original_func = self.func\n\n        def func(*args):\n            new_args = list(args)\n            new_args[idx] = (1 - new_args[idx])\n            return original_func(*new_args)\n        self.func = func\n        self.configurations = frozenset((((config[:idx] + ((1 - config[idx]),)) + config[(idx + 1):]) for config in self.configurations))\n    else:\n        original_func = self.func\n\n        def func(*args):\n            new_args = list(args)\n            new_args[idx] = (- new_args[idx])\n            return original_func(*new_args)\n        self.func = func\n        self.configurations = frozenset((((config[:idx] + ((- config[idx]),)) + config[(idx + 1):]) for config in self.configurations))\n    self.name = '{} ({} flipped)'.format(self.name, v)", "docstring": "Flip a variable in the constraint.\n\nArgs:\nv (variable):\nVariable in the constraint to take the complementary value of its\nconstruction value.\n\nExamples:\nThis example creates a constraint that :math:`a = b` on binary variables\nand flips variable a.\n\n>>> import dwavebinarycsp\n>>> const = dwavebinarycsp.Constraint.from_func(operator.eq,\n...             ['a', 'b'], dwavebinarycsp.BINARY)\n>>> const.check({'a': 0, 'b': 0})\nTrue\n>>> const.flip_variable('a')\n>>> const.check({'a': 1, 'b': 0})\nTrue\n>>> const.check({'a': 0, 'b': 0})\nFalse", "source": "codesearchnet"}
{"code": "def add(self, other_op):\n    self._op.logEntries.extend(other_op.logEntries)\n    self._merge_timestamps(other_op)\n    self._merge_metric_values(other_op)", "docstring": "Combines `other_op` with the operation held by this aggregator.\n\nN.B. It merges the operations log entries and metric values, but makes\nthe assumption the operation is consistent.  It's the callers\nresponsibility to ensure consistency\n\nArgs:\nother_op (\nclass:`endpoints_management.gen.servicecontrol_v1_messages.Operation`):\nan operation merge into this one", "source": "codesearchnet"}
{"code": "def ParsePageVisitedRow(self, parser_mediator, query, row, cache=None, database=None, **unused_kwargs):\n    query_hash = hash(query)\n    from_visit = self._GetRowValue(query_hash, row, 'from_visit')\n    hidden = self._GetRowValue(query_hash, row, 'hidden')\n    rev_host = self._GetRowValue(query_hash, row, 'rev_host')\n    typed = self._GetRowValue(query_hash, row, 'typed')\n    extras = []\n    if from_visit:\n        extras.append('visited from: {0:s}'.format(self._GetUrl(from_visit, cache, database)))\n    if (hidden == '1'):\n        extras.append('(url hidden)')\n    if (typed == '1'):\n        extras.append('(directly typed)')\n    else:\n        extras.append('(URL not typed directly)')\n    event_data = FirefoxPlacesPageVisitedEventData()\n    event_data.host = self._ReverseHostname(rev_host)\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.title = self._GetRowValue(query_hash, row, 'title')\n    event_data.url = self._GetRowValue(query_hash, row, 'url')\n    event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count')\n    event_data.visit_type = self._GetRowValue(query_hash, row, 'visit_type')\n    if extras:\n        event_data.extra = extras\n    timestamp = self._GetRowValue(query_hash, row, 'visit_date')\n    if timestamp:\n        date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a page visited row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.\ncache (Optional[SQLiteCache]): cache.\ndatabase (Optional[SQLiteDatabase]): database.", "source": "codesearchnet"}
{"code": "def AddEvent(self, event):\n    \n    self._RaiseIfNotWritable()\n\n    \n    \n    event_data_identifier = event.GetEventDataIdentifier()\n    if event_data_identifier:\n      if not isinstance(event_data_identifier, identifiers.SQLTableIdentifier):\n        raise IOError('Unsupported event data identifier type: {0:s}'.format(\n            type(event_data_identifier)))\n\n      event.event_data_row_identifier = event_data_identifier.row_identifier\n\n    self._AddSerializedEvent(event)", "docstring": "Adds an event.\n\nArgs:\nevent (EventObject): event.\n\nRaises:\nIOError: when the storage file is closed or read-only or\nif the event data identifier type is not supported.\nOSError: when the storage file is closed or read-only or\nif the event data identifier type is not supported.", "source": "juraj-google-style"}
{"code": "def getGrid(self, use_mask=True):\n    grid_card_name = 'WATERSHED_MASK'\n    if (not use_mask):\n        grid_card_name = 'ELEVATION'\n    return self.getGridByCard(grid_card_name)", "docstring": "Returns GDALGrid object of GSSHA model bounds\n\nParamters:\nuse_mask(bool): If True, uses watershed mask. Otherwise, it uses the elevaiton grid.\n\nReturns:\nGDALGrid", "source": "codesearchnet"}
{"code": "def on_predict_batch_end(self, batch, logs=None):\n    if self._should_call_predict_batch_hooks:\n        self._call_batch_hook(ModeKeys.PREDICT, 'end', batch, logs=logs)", "docstring": "Calls the `on_predict_batch_end` methods of its callbacks.\n\nArgs:\nbatch: Integer, index of batch within the current epoch.\nlogs: Dict. Aggregated metric results up until this batch.", "source": "github-repos"}
{"code": "def _gauss(mean: int, sigma: int) -> int:\n    return int(random.gauss(mean, sigma))", "docstring": "Creates a variation from a base value\n\nArgs:\nmean: base value\nsigma: gaussian sigma\n\nReturns: random value", "source": "codesearchnet"}
{"code": "def get_identifier(identifier, module_globals, module_name):\n    if isinstance(identifier, six.string_types):\n        fn = module_globals.get(identifier)\n        if (fn is None):\n            raise ValueError('Unknown {}: {}'.format(module_name, identifier))\n        return fn\n    elif callable(identifier):\n        return identifier\n    else:\n        raise ValueError('Could not interpret identifier')", "docstring": "Helper utility to retrieve the callable function associated with a string identifier.\n\nArgs:\nidentifier: The identifier. Could be a string or function.\nmodule_globals: The global objects of the module.\nmodule_name: The module name\n\nReturns:\nThe callable associated with the identifier.", "source": "codesearchnet"}
{"code": "def set_intrusion_alert_through_smoke_detectors(self, activate: bool = True):\n        \n        data = {\"intrusionAlertThroughSmokeDetectors\": activate}\n        return self._restCall(\n            \"home/security/setIntrusionAlertThroughSmokeDetectors\", json.dumps(data)\n        )", "docstring": "activate or deactivate if smoke detectors should \"ring\" during an alarm\n\nArgs:\nactivate(bool): True will let the smoke detectors \"ring\" during an alarm", "source": "juraj-google-style"}
{"code": "def add_arguments(self, parser, bootstrap=False):\n    [item.add_argument(parser, bootstrap) for item in self._get_items(bootstrap=False)]", "docstring": "Adds all items to the parser passed in.\n\nArgs:\nparser (argparse.ArgumentParser): The parser to add all items to.\nbootstrap (bool): Flag to indicate whether you only want to mark\nbootstrapped items as required on the command-line.", "source": "codesearchnet"}
{"code": "def datacenters(self):\n    if (not self.__datacenters):\n        self.__datacenters = Datacenters(self.__connection)\n    return self.__datacenters", "docstring": "Gets the Datacenters API client.\n\nReturns:\nDatacenters:", "source": "codesearchnet"}
{"code": "def from_dict(cls, copula_dict):\n        \n        instance = cls(copula_dict['copula_type'])\n        instance.theta = copula_dict['theta']\n        instance.tau = copula_dict['tau']\n        return instance", "docstring": "Create a new instance from the given parameters.\n\nArgs:\ncopula_dict: `dict` with the parameters to replicate the copula.\nLike the output of `Bivariate.to_dict`\n\nReturns:\nBivariate: Instance of the copula defined on the parameters.", "source": "juraj-google-style"}
{"code": "def should_submit(stack):\n    \n    if stack.enabled:\n        return True\n\n    logger.debug(\"Stack %s is not enabled.  Skipping.\", stack.name)\n    return False", "docstring": "Tests whether a stack should be submitted to CF for update/create\n\nArgs:\nstack (:class:`stacker.stack.Stack`): The stack object to check.\n\nReturns:\nbool: If the stack should be submitted, return True.", "source": "juraj-google-style"}
{"code": "def findLabel(self, query, create=False):\n        \n        if isinstance(query, six.string_types):\n            query = query.lower()\n\n        for label in self._labels.values():\n            if (isinstance(query, six.string_types) and query == label.name.lower()) or \\\n                (isinstance(query, Pattern) and query.search(label.name)):\n                return label\n\n        return self.createLabel(query) if create and isinstance(query, six.string_types) else None", "docstring": "Find a label with the given name.\n\nArgs:\nname (Union[_sre.SRE_Pattern, str]): A str or regular expression to match against the name.\ncreate (bool): Whether to create the label if it doesn't exist (only if name is a str).\n\nReturns:\nUnion[gkeepapi.node.Label, None]: The label.", "source": "juraj-google-style"}
{"code": "def __eof_qubit(rho):\n    \n    c = concurrence(rho)\n    c = 0.5 + 0.5 * np.sqrt(1 - c * c)\n    return shannon_entropy([c, 1 - c])", "docstring": "Compute the Entanglement of Formation of a 2-qubit density matrix.\n\nArgs:\nrho ((array_like): (4,4) array_like, input density matrix.\n\nReturns:\nfloat: The entanglement of formation.", "source": "juraj-google-style"}
{"code": "def convert_variables_to_constants_v2(func, lower_control_flow=True, aggressive_inlining=False):\n    converter_data = _FunctionConverterDataInEager(func=func, lower_control_flow=lower_control_flow, aggressive_inlining=aggressive_inlining)\n    output_graph_def, converted_input_indices = _replace_variables_by_constants(converter_data=converter_data)\n    return _construct_concrete_function(func, output_graph_def, converted_input_indices)", "docstring": "Replaces all the variables in a graph with constants of the same values.\n\nTensorFlow 2.0 function for converting all Variable ops into Const ops holding\nthe same values. This makes it possible to describe the network fully with a\nsingle GraphDef file, and allows the removal of a lot of ops related to\nloading and saving the variables. This function runs Grappler's function\ninlining optimization in order to return a single subgraph.\n\nThe current implementation only works for graphs that do not contain any\ncontrol flow or embedding related ops.\n\nArgs:\nfunc: ConcreteFunction.\nlower_control_flow: Boolean indicating whether or not to lower control flow\nops such as If and While. (default True)\naggressive_inlining: Boolean indicating whether or not to do aggressive\nfunction inlining (might be unsafe if function has stateful ops, not\nproperly connected to control outputs). (default False)\n\nReturns:\nConcreteFunction containing a simplified version of the original.", "source": "github-repos"}
{"code": "def strip_string(self, string, *args):\n    res = string\n    for r in args:\n        res = re.sub(r, '', res.strip(), flags=(re.IGNORECASE | re.MULTILINE))\n    return res.strip()", "docstring": "Strips matching regular expressions from string\n\nKeyword arguments:\nstring -- The given string, that will be stripped\n*args -- List of regex strings, that are used in parsing\n\nReturns:\nString with *args removed from string", "source": "codesearchnet"}
{"code": "def calculate_expiration(self, token):\n        \n        if not token:\n            return None\n        now = datetime.utcnow()\n        time_to_live = self.config[\"expiration\"]\n        if \"exp\" not in token:\n            return now + timedelta(seconds=time_to_live)\n        elif self.config[\"refresh\"]:\n            exp = datetime.utcfromtimestamp(token[\"exp\"])\n            \n            if exp - now < timedelta(seconds=0.5 * time_to_live):\n                return now + timedelta(seconds=time_to_live)\n        return None", "docstring": "Calculate token expiration\n\nreturn expiration if the token need to set expiration or refresh,\notherwise return None.\n\nArgs:\ntoken (dict): a decoded token", "source": "juraj-google-style"}
{"code": "def _AcceptRPC(self):\n    request = self._ReadObject()\n    if (request['func'] == '__kill__'):\n        self.ClearBreakpoints()\n        self._WriteObject('__kill_ack__')\n        return False\n    if (('func' not in request) or request['func'].startswith('_')):\n        raise RpcException('Not a valid public API function.')\n    rpc_result = getattr(self, request['func'])(*request['args'])\n    self._WriteObject(rpc_result)\n    return True", "docstring": "Reads RPC request from stdin and processes it, writing result to stdout.\n\nReturns:\nTrue as long as execution is to be continued, False otherwise.\nRaises:\nRpcException: if no function was specified in the RPC or no such API\nfunction exists.", "source": "codesearchnet"}
{"code": "def delete_with_casper_admin_save(self, pkg):\n        \n        \n        if pkg.__class__.__name__ == \"Package\":\n            package_to_delete = pkg.id\n        elif isinstance(pkg, int):\n            package_to_delete = pkg\n        elif isinstance(pkg, str):\n            package_to_delete = self.connection[\"jss\"].Package(pkg).id\n        else:\n            raise TypeError\n\n        data_dict = {\"username\": self.connection[\"jss\"].user,\n                     \"password\": self.connection[\"jss\"].password,\n                     \"deletedPackageID\": package_to_delete}\n        self.connection[\"jss\"].session.post(url=self.connection[\"delete_url\"],\n                                            data=data_dict)", "docstring": "Delete a pkg from the distribution server.\n\nArgs:\npkg: Can be a jss.Package object, an int ID of a package, or\na filename.", "source": "juraj-google-style"}
{"code": "def call_requests(requests: Union[(Request, Iterable[Request])], methods: Methods, debug: bool) -> Response:\n    if isinstance(requests, collections.Iterable):\n        return BatchResponse((safe_call(r, methods, debug=debug) for r in requests))\n    return safe_call(requests, methods, debug=debug)", "docstring": "Takes a request or list of Requests and calls them.\n\nArgs:\nrequests: Request object, or a collection of them.\nmethods: The list of methods that can be called.\ndebug: Include more information in error responses.", "source": "codesearchnet"}
{"code": "def print_stats(self, reset=True):\n        \n        if not self.ncalls:\n            return\n\n        stats = self.stats\n        code = self.fn.__code__\n        print('--- Function Profiling ---')\n        print('File \"{}\", line {}, function {}'.format(\n            code.co_filename,\n            code.co_firstlineno,\n            self.fn.__name__))\n        stats.sort_stats(*self.sort_keys)\n        stats.print_stats(*self.print_restrictions)\n        print('--------------------------')\n        if reset:\n            self.reset_stats()", "docstring": "Manually print profiling result.\n\nArgs:\nreset (bool): If False is specified, the profiling statistics so\nfar is maintained. If ``True`` (default),\n:obj:`~reset_stats`\nis called to reset the profiling statistics.", "source": "juraj-google-style"}
{"code": "def update(self, other, **kwargs):\n    assert isinstance(other, type(self)), 'Must have the same DataManager subclass to perform this operation'\n\n    def update_builder(df, other, **kwargs):\n        df = df.copy()\n        df.update(other, **kwargs)\n        return df\n    return self._inter_df_op_handler(update_builder, other, **kwargs)", "docstring": "Uses other manager to update corresponding values in this manager.\n\nArgs:\nother: The other manager.\n\nReturns:\nNew DataManager with updated data and index.", "source": "codesearchnet"}
{"code": "def __init__(self, obj_to_invoke, method_name):\n    if not isinstance(obj_to_invoke, (DoFn, RestrictionProvider, WatermarkEstimatorProvider)):\n        raise ValueError(\"'obj_to_invoke' has to be either a 'DoFn' or a 'RestrictionProvider'. Received %r instead.\" % obj_to_invoke)\n    self.args, self.defaults = core.get_function_arguments(obj_to_invoke, method_name)\n    self.method_value = getattr(obj_to_invoke, method_name)\n    self.method_name = method_name\n    self.has_userstate_arguments = False\n    self.state_args_to_replace = {}\n    self.timer_args_to_replace = {}\n    self.timestamp_arg_name = None\n    self.window_arg_name = None\n    self.key_arg_name = None\n    self.restriction_provider = None\n    self.restriction_provider_arg_name = None\n    self.watermark_estimator_provider = None\n    self.watermark_estimator_provider_arg_name = None\n    self.dynamic_timer_tag_arg_name = None\n    if hasattr(self.method_value, 'unbounded_per_element'):\n        self.unbounded_per_element = True\n    else:\n        self.unbounded_per_element = False\n    for kw, v in zip(self.args[-len(self.defaults):], self.defaults):\n        if isinstance(v, core.DoFn.StateParam):\n            self.state_args_to_replace[kw] = v.state_spec\n            self.has_userstate_arguments = True\n        elif isinstance(v, core.DoFn.TimerParam):\n            self.timer_args_to_replace[kw] = v.timer_spec\n            self.has_userstate_arguments = True\n        elif core.DoFn.TimestampParam == v:\n            self.timestamp_arg_name = kw\n        elif core.DoFn.WindowParam == v:\n            self.window_arg_name = kw\n        elif core.DoFn.WindowedValueParam == v:\n            self.window_arg_name = kw\n        elif core.DoFn.KeyParam == v:\n            self.key_arg_name = kw\n        elif isinstance(v, core.DoFn.RestrictionParam):\n            self.restriction_provider = v.restriction_provider or obj_to_invoke\n            self.restriction_provider_arg_name = kw\n        elif isinstance(v, core.DoFn.WatermarkEstimatorParam):\n            self.watermark_estimator_provider = v.watermark_estimator_provider or obj_to_invoke\n            self.watermark_estimator_provider_arg_name = kw\n        elif core.DoFn.DynamicTimerTagParam == v:\n            self.dynamic_timer_tag_arg_name = kw\n    if self.watermark_estimator_provider is None:\n        self.watermark_estimator_provider = NoOpWatermarkEstimatorProvider()", "docstring": "Initiates a ``MethodWrapper``.\n\nArgs:\nobj_to_invoke: the object that contains the method. Has to either be a\n`DoFn` object or a `RestrictionProvider` object.\nmethod_name: name of the method as a string.", "source": "github-repos"}
{"code": "def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple]]=None):\n    logits = outputs.logits\n    if target_sizes is not None:\n        if len(logits) != len(target_sizes):\n            raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n        if is_torch_tensor(target_sizes):\n            target_sizes = target_sizes.numpy()\n        semantic_segmentation = []\n        for idx in range(len(logits)):\n            resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)\n            semantic_map = resized_logits[0].argmax(dim=0)\n            semantic_segmentation.append(semantic_map)\n    else:\n        semantic_segmentation = logits.argmax(dim=1)\n        semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]\n    return semantic_segmentation", "docstring": "Converts the output of [`DPTForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.\n\nArgs:\noutputs ([`DPTForSemanticSegmentation`]):\nRaw outputs of the model.\ntarget_sizes (`List[Tuple]` of length `batch_size`, *optional*):\nList of tuples corresponding to the requested final size (height, width) of each prediction. If unset,\npredictions will not be resized.\n\nReturns:\nsemantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic\nsegmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is\nspecified). Each entry of each `torch.Tensor` correspond to a semantic class id.", "source": "github-repos"}
{"code": "def parse_tmhmm_long(tmhmm_results):\n    \n    with open(tmhmm_results) as f:\n        lines = f.read().splitlines()\n\n    infodict = defaultdict(dict)\n\n    for l in lines:\n        if 'Number of predicted TMHs:' in l:\n            gene = l.split(' Number')[0].strip('\n            infodict[gene]['num_tm_helices'] = int(l.split(': ')[1])\n\n        if 'WARNING' in l:\n            log.warning('{}: no TMHMM predictions'.format(l))\n            continue\n\n        \n\n        \n        if '\n            stuff = l.split()\n            if stuff[1] == 'TMHMM2.0':\n\n                gene = stuff[0]\n                region = stuff[2]\n                region_start = stuff[3]\n                region_end = stuff[4]\n\n                if 'sequence' in infodict[gene]:\n                    tm_seq = infodict[gene]['sequence']\n                else:\n                    tm_seq = ''\n\n                if region == 'outside':\n                    info = 'O'\n                elif region == 'inside':\n                    info = 'I'\n                elif region == 'TMhelix':\n                    info = 'T'\n                else:\n                    log.error('{}: unknown region type'.format(info))\n                    info = '-'\n\n                for r in range(int(region_start), int(region_end) + 1):\n                    tm_seq += info\n\n                infodict[gene]['sequence'] = tm_seq\n\n    return infodict", "docstring": "Parse the 'long' output format of TMHMM and return a dictionary of ``{sequence_ID: TMHMM_prediction}``.\n\nArgs:\ntmhmm_results (str): Path to long format TMHMM output.\n\nReturns:\ndict: Dictionary of ``{sequence_ID: TMHMM_prediction}``", "source": "juraj-google-style"}
{"code": "def _repeat(values, count):\n    return [[value] * value for value in np.tile(values, count)]", "docstring": "Produces a list of lists suitable for testing interleave.\n\nArgs:\nvalues: for each element `x` the result contains `[x] * x`\ncount: determines how many times to repeat `[x] * x` in the result\n\nReturns:\nA list of lists of values suitable for testing interleave.", "source": "github-repos"}
{"code": "def _write_except_dispatcher(self, exc, tb, handlers):\n    \n    handler_labels = []\n    for i, except_node in enumerate(handlers):\n      handler_labels.append(self.block.genlabel())\n      if except_node.type:\n        with self.visit_expr(except_node.type) as type_,\\\n            self.block.alloc_temp('bool') as is_inst:\n          self.writer.write_checked_call2(\n              is_inst, 'πg.IsInstance(πF, {}.ToObject(), {})', exc, type_.expr)\n          self.writer.write_tmpl(textwrap.dedent(), is_inst=is_inst.expr, label=handler_labels[-1])\n      else:\n        \n        if i != len(handlers) - 1:\n          msg = \"default 'except:' must be last\"\n          raise util.ParseError(except_node, msg)\n        self.writer.write('goto Label{}'.format(handler_labels[-1]))\n    if handlers[-1].type:\n      \n      self.writer.write(\n          'πE = πF.Raise({}.ToObject(), nil, {}.ToObject())'.format(exc, tb))\n      self.writer.write('continue')\n    return handler_labels", "docstring": "Outputs a Go code that jumps to the appropriate except handler.\n\nArgs:\nexc: Go variable holding the current exception.\ntb: Go variable holding the current exception's traceback.\nhandlers: A list of ast.ExceptHandler nodes.\n\nReturns:\nA list of Go labels indexes corresponding to the exception handlers.\n\nRaises:\nParseError: Except handlers are in an invalid order.", "source": "juraj-google-style"}
{"code": "def display_upstream_structure(structure_dict):\n    \n    graph = _create_graph(structure_dict)\n    plt = Image(graph.create_png())\n    display(plt)", "docstring": "Displays pipeline structure in the jupyter notebook.\n\nArgs:\nstructure_dict (dict): dict returned by\n:func:`~steppy.base.Step.upstream_structure`.", "source": "juraj-google-style"}
{"code": "def find_mrms_tracks(self):\n    obs_objects = []\n    tracked_obs_objects = []\n    if (self.mrms_ew is not None):\n        self.mrms_grid.load_data()\n        if (len(self.mrms_grid.data) != len(self.hours)):\n            print('Less than 24 hours of observation data found')\n            return tracked_obs_objects\n        for (h, hour) in enumerate(self.hours):\n            mrms_data = np.zeros(self.mrms_grid.data[h].shape)\n            mrms_data[:] = np.array(self.mrms_grid.data[h])\n            mrms_data[(mrms_data < 0)] = 0\n            hour_labels = self.mrms_ew.size_filter(self.mrms_ew.label(gaussian_filter(mrms_data, self.gaussian_window)), self.size_filter)\n            hour_labels[(mrms_data < self.mrms_ew.min_thresh)] = 0\n            obj_slices = find_objects(hour_labels)\n            num_slices = len(obj_slices)\n            obs_objects.append([])\n            if (num_slices > 0):\n                for sl in obj_slices:\n                    obs_objects[(- 1)].append(STObject(mrms_data[sl], np.where((hour_labels[sl] > 0), 1, 0), self.model_grid.x[sl], self.model_grid.y[sl], self.model_grid.i[sl], self.model_grid.j[sl], hour, hour, dx=self.model_grid.dx))\n                    if (h > 0):\n                        dims = obs_objects[(- 1)][(- 1)].timesteps[0].shape\n                        obs_objects[(- 1)][(- 1)].estimate_motion(hour, self.mrms_grid.data[(h - 1)], dims[1], dims[0])\n        for (h, hour) in enumerate(self.hours):\n            past_time_objs = []\n            for obj in tracked_obs_objects:\n                if (obj.end_time == (hour - 1)):\n                    past_time_objs.append(obj)\n            if (len(past_time_objs) == 0):\n                tracked_obs_objects.extend(obs_objects[h])\n            elif ((len(past_time_objs) > 0) and (len(obs_objects[h]) > 0)):\n                assignments = self.object_matcher.match_objects(past_time_objs, obs_objects[h], (hour - 1), hour)\n                unpaired = list(range(len(obs_objects[h])))\n                for pair in assignments:\n                    past_time_objs[pair[0]].extend(obs_objects[h][pair[1]])\n                    unpaired.remove(pair[1])\n                if (len(unpaired) > 0):\n                    for up in unpaired:\n                        tracked_obs_objects.append(obs_objects[h][up])\n            print('Tracked Obs Objects: {0:03d} Hour: {1:02d}'.format(len(tracked_obs_objects), hour))\n    return tracked_obs_objects", "docstring": "Identify objects from MRMS timesteps and link them together with object matching.\n\nReturns:\nList of STObjects containing MESH track information.", "source": "codesearchnet"}
{"code": "def set_parameter(self, name, value):\n        \n        i = self.get_parameter_names(include_frozen=True).index(name)\n        v = self.get_parameter_vector(include_frozen=True)\n        v[i] = value\n        self.set_parameter_vector(v, include_frozen=True)", "docstring": "Set a parameter value by name\n\nArgs:\nname: The name of the parameter\nvalue (float): The new value for the parameter", "source": "juraj-google-style"}
{"code": "def regex_check(equation_str):\n    \n    match1 = re.match(\n        r'^(([xy+\\-*/()0-9. ]+|sin\\(|cos\\(|exp\\(|log\\()?)+$',\n        equation_str\n    )\n    match2 = re.match(r'^.*([xy]) *([xy]).*$', equation_str)\n    if match1 and not match2:\n        return True\n    raise BadInputError('Cannot parse entered equation')", "docstring": "A quick regular expression check to see that the input is sane\n\nArgs:\nequation_str (str): String of equation to be parsed by sympify\nfunction. Expected to be valid Python.\n\nRaises:\nBadInputError: If input does not look safe to parse as an equation.", "source": "juraj-google-style"}
{"code": "def process(self, batch, *args, **kwargs):\n    if (self.postprocessing is not None):\n        batch = self.postprocessing(batch)\n    return batch", "docstring": "Process a list of examples to create a batch.\n\nPostprocess the batch with user-provided Pipeline.\n\nArgs:\nbatch (list(object)): A list of object from a batch of examples.\nReturns:\nobject: Processed object given the input and custom\npostprocessing Pipeline.", "source": "codesearchnet"}
{"code": "def state_name(self):\n    if (self.state == 1):\n        return 'New Issue'\n    elif (self.state == 2):\n        return 'Shutdown in 1 week'\n    elif (self.state == 3):\n        return 'Shutdown in 1 day'\n    elif (self.state == 4):\n        return 'Pending Shutdown'\n    elif (self.state == 5):\n        return 'Stopped, delete in 12 weeks'\n    elif (self.state == 6):\n        return 'Instance deleted'\n    else:\n        raise ValueError('Invalid state: {}'.format(self.state))", "docstring": "Get a human-readable value of the state\n\nReturns:\nstr: Name of the current state", "source": "codesearchnet"}
{"code": "def _authenticate(self):\n    csrf_token = self._get_csrf_token()\n    self._login(csrf_token)\n    domain_text_element = self._get_domain_text_of_authoritative_zone()\n    self.domain_id = self._get_domain_id(domain_text_element)\n    LOGGER.debug('Easyname domain ID: %s', self.domain_id)\n    return True", "docstring": "Authenticates against Easyname website and try to find out the domain\nid.\nEasyname uses a CSRF token in its login form, so two requests are\nneccessary to actually login.\n\nReturns:\nbool: True if domain id was found.\n\nRaises:\nAssertionError: When a request returns unexpected or unknown data.\nValueError: When login data is wrong or the domain does not exist.", "source": "codesearchnet"}
{"code": "def _get_resource_hash(zone_name, record):\n    record_data = defaultdict(int, record)\n    if (type(record_data['GeoLocation']) == dict):\n        record_data['GeoLocation'] = ':'.join(['{}={}'.format(k, v) for (k, v) in record_data['GeoLocation'].items()])\n    args = [zone_name, record_data['Name'], record_data['Type'], record_data['Weight'], record_data['Region'], record_data['GeoLocation'], record_data['Failover'], record_data['HealthCheckId'], record_data['TrafficPolicyInstanceId']]\n    return get_resource_id('r53r', args)", "docstring": "Returns the last ten digits of the sha256 hash of the combined arguments. Useful for generating unique\nresource IDs\n\nArgs:\nzone_name (`str`): The name of the DNS Zone the record belongs to\nrecord (`dict`): A record dict to generate the hash from\n\nReturns:\n`str`", "source": "codesearchnet"}
{"code": "def create_bmi_config_file(self, filename: str = \"bmi_config.txt\") -> None:\n        \n        s0 = self.construct_default_initial_state()\n        s0.to_csv(filename, index_label=\"variable\")", "docstring": "Create a BMI config file to initialize the model.\n\nArgs:\nfilename: The filename with which the config file should be saved.", "source": "juraj-google-style"}
{"code": "def last_timestamp(self, event_key=None):\n    if (event_key is None):\n        timestamps = [self._trackers[key].first_timestamp for key in self._trackers]\n        return max((timestamp for timestamp in timestamps if (timestamp >= 0)))\n    else:\n        return self._trackers[event_key].last_timestamp", "docstring": "Obtain the last timestamp.\n\nArgs:\nevent_key: the type key of the sought events (e.g., constants.NAN_KEY). If\nNone, includes all event type keys.\n\nReturns:\nLast (latest) timestamp of all the events of the given type (or all\nevent types if event_key is None).", "source": "codesearchnet"}
{"code": "def page_length(self, length):\n        \n        mH = length/256\n        mL = length%256\n        if length < 12000:\n            self.send(chr(27)+'('+'C'+chr(2)+chr(0)+chr(mL)+chr(mH))\n        else:\n            raise RuntimeError('Length must be less than 12000.')", "docstring": "Specifies page length. This command is only valid with continuous length labels.\n\nArgs:\nlength: The length of the page, in dots. Can't exceed 12000.\nReturns:\nNone\nRaises:\nRuntimeError: Length must be less than 12000.", "source": "juraj-google-style"}
{"code": "def ModulePath(module_name):\n  \n  module = importlib.import_module(module_name)\n  path = inspect.getfile(module)\n  \n  \n  if compatibility.PY2:\n    path = path.decode(\"utf-8\")\n\n  \n  \n  if os.path.basename(path).startswith(\"__init__.\"):\n    path = os.path.dirname(path)\n\n  \n  if path.endswith(\".pyc\"):\n    path = path[:-4] + \".py\"\n\n  return path", "docstring": "Computes a path to the specified module.\n\nArgs:\nmodule_name: A name of the module to get the path for.\n\nReturns:\nA path to the specified module.\n\nRaises:\nImportError: If specified module cannot be imported.", "source": "juraj-google-style"}
{"code": "def of_type(self, classinfo):\n    if self.closed():\n        raise ValueError('Attempt to call of_type() on a closed Queryable.')\n    if (not is_type(classinfo)):\n        raise TypeError('of_type() parameter classinfo={0} is not a class object or a type objector a tuple of class or type objects.'.format(classinfo))\n    return self.where((lambda x: isinstance(x, classinfo)))", "docstring": "Filters elements according to whether they are of a certain type.\n\nNote: This method uses deferred execution.\n\nArgs:\nclassinfo: If classinfo is neither a class object nor a type object\nit may be a tuple of class or type objects, or may recursively\ncontain other such tuples (other sequence types are not\naccepted).\n\nReturns:\nA Queryable over those elements of the source sequence for which\nthe predicate is True.\n\nRaises:\nValueError: If the Queryable is closed.\nTypeError: If classinfo is not a class, type, or tuple of classes,\ntypes, and such tuples.", "source": "codesearchnet"}
{"code": "def __init__(self, drop_ffi_call_fn, initialized_ptr=None):\n        \n        if initialized_ptr is not None:\n            self._ptr = initialized_ptr\n        else:\n            self._ptr = ctypes.c_void_p()\n\n        self._drop_ffi_fn = drop_ffi_call_fn", "docstring": "Constructs an owned pointer.\nInitializing the pointer is left to the extending classes\n\nArgs:\ndrop_ffi_call_fn (str): the name of the FFI function to call on\ndrop or garbage collection.\ninitialized_ptr (ctypes.c_void_p:optional): a preinitialized\npointer to the native memory", "source": "juraj-google-style"}
{"code": "def add(app: web.Application, feature: Any, key: Hashable=None, exist_ok: bool=False):\n    if (FEATURES_KEY not in app):\n        app[FEATURES_KEY] = dict()\n    key = (key or type(feature))\n    if (key in app[FEATURES_KEY]):\n        if exist_ok:\n            return\n        else:\n            raise KeyError(f'Feature \"{key}\" already registered')\n    app[FEATURES_KEY][key] = feature", "docstring": "Adds a new feature to the app.\n\nFeatures can either be registered as the default feature for the class,\nor be given an explicit name.\n\nArgs:\napp (web.Application):\nThe current Aiohttp application.\n\nfeature (Any):\nThe new feature that should be registered.\nIt is recommended, but not required to use a `ServiceFeature`.\n\nkey (Hashable, optional):\nThe key under which the feature should be registered.\nDefaults to `type(feature)`.\n\nexist_ok (bool):\nIf truthy, this function will do nothing if a feature was already registered for `key`.\nOtherwise, an exception is raised.", "source": "codesearchnet"}
{"code": "def _assert_gcs_files(files):\n  \n\n  if sys.version_info.major > 2:\n    string_type = (str, bytes)  \n  else:\n    string_type = basestring  \n\n  if isinstance(files, string_type):\n    files = [files]\n\n  for f in files:\n    if f is not None and not f.startswith('gs:\n      raise ValueError('File %s is not a gcs path' % f)", "docstring": "Check files starts wtih gs://.\n\nArgs:\nfiles: string to file path, or list of file paths.", "source": "juraj-google-style"}
{"code": "def cd(new_directory, clean_up=(lambda : True)):\n    previous_directory = os.getcwd()\n    os.chdir(os.path.expanduser(new_directory))\n    try:\n        (yield)\n    finally:\n        os.chdir(previous_directory)\n        clean_up()", "docstring": "Changes into a given directory and cleans up after it is done\n\nArgs:\nnew_directory: The directory to change to\nclean_up: A method to clean up the working directory once done", "source": "codesearchnet"}
{"code": "async def _async_supervisor(func, animation_, step, *args, **kwargs):\n    with ThreadPoolExecutor(max_workers=2) as pool:\n        with _terminating_event() as event:\n            pool.submit(animate_cli, animation_, step, event)\n            result = (await func(*args, **kwargs))\n    return result", "docstring": "Supervisor for running an animation with an asynchronous function.\n\nArgs:\nfunc: A function to be run alongside an animation.\nanimation_: An infinite generator that produces\nstrings for the animation.\nstep: Seconds between each animation frame.\n*args: Arguments for func.\n**kwargs: Keyword arguments for func.\nReturns:\nThe result of func(*args, **kwargs)\nRaises:\nAny exception that is thrown when executing func.", "source": "codesearchnet"}
{"code": "def SerializeExclusiveData(self, writer):\n        \n        writer.WriteByte(self.AssetType)\n        writer.WriteVarString(self.Name)\n        writer.WriteFixed8(self.Amount)\n        writer.WriteByte(self.Precision)\n\n        self.Owner.Serialize(writer)\n\n        writer.WriteUInt160(self.Admin)", "docstring": "Serialize object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"}
{"code": "def recognize(self, node: yaml.Node, expected_type: Type) -> RecResult:\n    logger.debug('Recognizing {} as a {}'.format(node, expected_type))\n    recognized_types = None\n    if (expected_type in [str, int, float, bool, bool_union_fix, datetime, None, type(None)]):\n        (recognized_types, message) = self.__recognize_scalar(node, expected_type)\n    elif is_generic_union(expected_type):\n        (recognized_types, message) = self.__recognize_union(node, expected_type)\n    elif is_generic_list(expected_type):\n        (recognized_types, message) = self.__recognize_list(node, expected_type)\n    elif is_generic_dict(expected_type):\n        (recognized_types, message) = self.__recognize_dict(node, expected_type)\n    elif (expected_type in self.__registered_classes.values()):\n        (recognized_types, message) = self.__recognize_user_classes(node, expected_type)\n    if (recognized_types is None):\n        raise RecognitionError('Could not recognize for type {}, is it registered?'.format(expected_type))\n    logger.debug('Recognized types {} matching {}'.format(recognized_types, expected_type))\n    return (recognized_types, message)", "docstring": "Figure out how to interpret this node.\n\nThis is not quite a type check. This function makes a list of \\\nall types that match the expected type and also the node, and \\\nreturns that list. The goal here is not to test validity, but \\\nto determine how to process this node further.\n\nThat said, it will recognize built-in types only in case of \\\nan exact match.\n\nArgs:\nnode: The YAML node to recognize.\nexpected_type: The type we expect this node to be, based \\\non the context provided by our type definitions.\n\nReturns:\nA list of matching types.", "source": "codesearchnet"}
{"code": "def __init__(self, api, endpoint=None, cls=None):\n        \n        self.api = api\n        self.endpoint = endpoint\n        self._cls = cls", "docstring": "Creates an instance of the APIEndpoint class.\n\nArgs:\napi - Gophish.client - The authenticated REST client\nendpoint - str - The URL path to the resource endpoint\ncls - gophish.models.Model - The Class to use when parsing results", "source": "juraj-google-style"}
{"code": "def most_frequent(self, k):\n    word_count = {w: self.word_count[w] for w in self.words[:k]}\n    return CountedVocabulary(word_count=word_count)", "docstring": "Returns a vocabulary with the most frequent `k` words.\n\nArgs:\nk (integer): specifies the top k most frequent words to be returned.", "source": "codesearchnet"}
{"code": "def install_package(tar_url, folder, md5_url='{tar_url}.md5', on_download=(lambda : None), on_complete=(lambda : None)):\n    data_file = join(folder, basename(tar_url))\n    md5_url = md5_url.format(tar_url=tar_url)\n    try:\n        remote_md5 = download(md5_url).decode('utf-8').split(' ')[0]\n    except (UnicodeDecodeError, URLError):\n        raise ValueError(('Invalid MD5 url: ' + md5_url))\n    if (remote_md5 != calc_md5(data_file)):\n        on_download()\n        if isfile(data_file):\n            try:\n                with tarfile.open(data_file) as tar:\n                    for i in reversed(list(tar)):\n                        try:\n                            os.remove(join(folder, i.path))\n                        except OSError:\n                            pass\n            except (OSError, EOFError):\n                pass\n        download_extract_tar(tar_url, folder, data_file)\n        on_complete()\n        if (remote_md5 != calc_md5(data_file)):\n            raise ValueError(('MD5 url does not match tar: ' + md5_url))\n        return True\n    return False", "docstring": "Install or update a tar package that has an md5\n\nArgs:\ntar_url (str): URL of package to download\nfolder (str): Location to extract tar. Will be created if doesn't exist\nmd5_url (str): URL of md5 to use to check for updates\non_download (Callable): Function that gets called when downloading a new update\non_complete (Callable): Function that gets called when a new download is complete\n\nReturns:\nbool: Whether the package was updated", "source": "codesearchnet"}
{"code": "def _ParseFile(self, file_obj, line_parser):\n    lines = [l.strip() for l in utils.ReadFileBytesAsUnicode(file_obj).splitlines()]\n    try:\n        for (index, line) in enumerate(lines):\n            if line:\n                line_parser(line)\n    except (IndexError, KeyError) as e:\n        raise parser.ParseError(('Invalid file at line %d: %s' % ((index + 1), e)))", "docstring": "Process a file line by line.\n\nArgs:\nfile_obj: The file to parse.\nline_parser: The parser method used to process and store line content.\n\nRaises:\nparser.ParseError if the parser is unable to process the line.", "source": "codesearchnet"}
{"code": "def ee_initialize(use_personal_account: bool=False, enforce_high_volume: bool=False, service_account: t.Optional[str]=None, private_key: t.Optional[str]=None, project_id: t.Optional[str]=None) -> None:\n    creds = get_creds(use_personal_account, service_account, private_key)\n    on_compute_engine = is_compute_engine()\n    if on_compute_engine:\n        if project_id is None and use_personal_account:\n            raise RuntimeError('Project_name should not be None!')\n        params = {'credentials': creds, 'opt_url': 'https:\n        if project_id:\n            params['project'] = project_id\n        ee.Initialize(**params)\n    elif enforce_high_volume and (not on_compute_engine):\n        raise RuntimeError('Must run on a compute engine VM to use the high volume earth engine api.')\n    else:\n        ee.Initialize(creds)", "docstring": "Initializes earth engine with the high volume API when using a compute engine VM.\n\nArgs:\nuse_personal_account: A flag to use personal account for ee authentication. Default: False.\nenforce_high_volume: A flag to use the high volume API when using a compute engine VM. Default: False.\nservice_account: Service account address when using a private key for earth engine authentication.\nprivate_key: A private key path to authenticate earth engine using private key. Default: None.\nProject ID: An identifier that represents the name of a project present in Earth Engine.\nRaises:\nRuntimeError: Earth Engine did not initialize.", "source": "github-repos"}
{"code": "def validate_start_end_range(range_tuple):\n    \n\n    start, end = range_tuple\n\n    if (start and end) and (start > end):\n        raise ValueError(_(\"Start after end!\"))\n\n    return range_tuple", "docstring": "Perform basic sanity checks on a timeframe.\n\nArgs:\nrange_tuple (tuple): ``(start, end)`` tuple as returned by\n``complete_timeframe``.\n\nRaises:\nValueError: If start > end.\n\nReturns:\ntuple: ``(start, end)`` tuple that passed validation.\n\nNote:\n``timeframes`` may be incomplete, especially if ``complete_timeframe(partial=True)`` has\nbeen used to construct them.", "source": "juraj-google-style"}
{"code": "def remat(f):\n    return tf.recompute_grad(f)", "docstring": "Implementation of rematerialization.\n\nArgs:\nf: The function or operation to rematerialize.\nReturns:\nA function wrapping f that defines a custom gradient, which\nrecomputes f on the backwards pass of a gradient call.", "source": "github-repos"}
{"code": "def expand_groups(grp):\n    p = re.compile('(?P<name>.+)\\\\[(?P<start>\\\\d+)-(?P<end>\\\\d+)\\\\]')\n    m = p.match(grp)\n    if (m is not None):\n        s = int(m.group('start'))\n        e = int(m.group('end'))\n        n = m.group('name')\n        return list(map((lambda x: (n + str(x))), range(s, (e + 1))))\n    else:\n        return [grp]", "docstring": "Expand group names.\n\nArgs:\ngrp (string): group names to expand\n\nReturns:\nlist of groups\n\nExamples:\n\n* grp[1-3] will be expanded to [grp1, grp2, grp3]\n* grp1 will be expanded to [grp1]", "source": "codesearchnet"}
{"code": "def __init__(self, launchdjobs):\n    \n    self.launchdjobs = launchdjobs\n\n    self.blacklist_regex = [\n        re.compile(r\"^0x[a-z0-9]+\\.anonymous\\..+$\"),\n        re.compile(r\"^0x[a-z0-9]+\\.mach_init\\.(crash_inspector|Inspector)$\"),\n    ]", "docstring": "Initialize.\n\nArgs:\nlaunchdjobs: NSCFArray of NSCFDictionarys containing launchd job data from\nthe ServiceManagement framework.", "source": "juraj-google-style"}
{"code": "def graph_key_from_tag(tag, entity_index):\n    \n    start_token = tag.get('start_token')\n    entity = tag.get('entities', [])[entity_index]\n    return str(start_token) + '-' + entity.get('key') + '-' + str(entity.get('confidence'))", "docstring": "Returns a key from a tag entity\n\nArgs:\ntag (tag) : this is the tag selected to get the key from\nentity_index (int) : this is the index of the tagged entity\n\nReturns:\nstr : String representing the key for the given tagged entity.", "source": "juraj-google-style"}
{"code": "def substitute_globals(config_dict):\n    \n    constants = get_all_constants()\n\n    if type(config_dict) != dict:\n        return\n\n    for key in config_dict.keys():\n        if key in constants and type(config_dict[key]) in _ALLOWED:\n            globals()[key] = config_dict[key]", "docstring": "Set global variables to values defined in `config_dict`.\n\nArgs:\nconfig_dict (dict): dictionary with data, which are used to set \\\n`globals`.\n\nNote:\n`config_dict` have to be dictionary, or it is ignored. Also all\nvariables, that are not already in globals, or are not types defined in\n:attr:`_ALLOWED` (str, int, float) or starts with ``_`` are silently\nignored.", "source": "juraj-google-style"}
{"code": "def _parse_data_fields(self, fields, tag_id=\"tag\", sub_id=\"code\"):\n        \n        for field in fields:\n            params = field.params\n\n            if tag_id not in params:\n                continue\n\n            \n            field_repr = OrderedDict([\n                [self.i1_name, params.get(self.i1_name, \" \")],\n                [self.i2_name, params.get(self.i2_name, \" \")],\n            ])\n\n            \n            for subfield in field.find(\"subfield\"):\n                if sub_id not in subfield.params:\n                    continue\n\n                content = MARCSubrecord(\n                    val=subfield.getContent().strip(),\n                    i1=field_repr[self.i1_name],\n                    i2=field_repr[self.i2_name],\n                    other_subfields=field_repr\n                )\n\n                \n                code = subfield.params[sub_id]\n                if code in field_repr:\n                    field_repr[code].append(content)\n                else:\n                    field_repr[code] = [content]\n\n            tag = params[tag_id]\n            if tag in self.datafields:\n                self.datafields[tag].append(field_repr)\n            else:\n                self.datafields[tag] = [field_repr]", "docstring": "Parse data fields.\n\nArgs:\nfields (list): of HTMLElements\ntag_id (str): parameter name, which holds the information, about\nfield name this is normally \"tag\", but in case of\noai_marc \"id\"\nsub_id (str): id of parameter, which holds informations about\nsubfield name this is normally \"code\" but in case of\noai_marc \"label\"", "source": "juraj-google-style"}
{"code": "def _get_media_files(cls, packager, media_packages, media_type, extra_files):\n    source_files = list(extra_files)\n    if ((not settings.PIPELINE_ENABLED) and settings.PIPELINE_COLLECTOR_ENABLED):\n        default_collector.collect()\n    for media_package in media_packages:\n        package = packager.package_for(media_type, media_package)\n        if settings.PIPELINE_ENABLED:\n            source_files.append(staticfiles_storage.url(package.output_filename))\n        else:\n            source_files += packager.compile(package.paths)\n    return source_files", "docstring": "Return source or output media files for a list of packages.\n\nThis will go through the media files belonging to the provided list\nof packages referenced in a Media class and return the output files\n(if Pipeline is enabled) or the source files (if not enabled).\n\nArgs:\npackager (pipeline.packager.Packager):\nThe packager responsible for media compilation for this type\nof package.\n\nmedia_packages (list of unicode):\nThe list of media packages referenced in Media to compile or\nreturn.\n\nextra_files (list of unicode):\nThe list of extra files to include in the result. This would\nbe the list stored in the Media class's original :py:attr:`css`\nor :py:attr:`js` attributes.\n\nReturns:\nlist:\nThe list of media files for the given packages.", "source": "codesearchnet"}
{"code": "def get_transcript_lengths(ensembl, transcript_ids):\n    \n    \n    transcripts = {}\n    for transcript_id in transcript_ids:\n        \n        try:\n            seq = ensembl.get_protein_seq_for_transcript(transcript_id)\n        except ValueError:\n            continue\n        \n        transcripts[transcript_id] = len(seq)\n    \n    return transcripts", "docstring": "finds the protein length for ensembl transcript IDs for a gene\n\nArgs:\nensembl: EnsemblRequest object to request sequences and data\nfrom the ensembl REST API\ntranscript_ids: list of transcript IDs for a single gene\n\nReturns:\ndictionary of lengths (in amino acids), indexed by transcript IDs", "source": "juraj-google-style"}
{"code": "def _as_serialized_graph(self, allow_stateful=None, strip_device_assignment=None, external_state_policy=options_lib.ExternalStatePolicy.WARN):\n    if external_state_policy:\n        policy = external_state_policy.value\n        return gen_dataset_ops.dataset_to_graph_v2(self._variant_tensor, external_state_policy=policy, strip_device_assignment=strip_device_assignment)\n    if strip_device_assignment:\n        return gen_dataset_ops.dataset_to_graph(self._variant_tensor, allow_stateful=allow_stateful, strip_device_assignment=strip_device_assignment)\n    return gen_dataset_ops.dataset_to_graph(self._variant_tensor, allow_stateful=allow_stateful)", "docstring": "Produces serialized graph representation of the dataset.\n\nArgs:\nallow_stateful: If true, we allow stateful ops to be present in the graph\ndef. In that case, the state in these ops would be thrown away.\nstrip_device_assignment: If true, non-local (i.e. job and task) device\nassignment is stripped from ops in the serialized graph.\nexternal_state_policy: The ExternalStatePolicy enum that determines how we\nhandle input pipelines that depend on external state. By default, its\nset to WARN.\n\nReturns:\nA scalar `tf.Tensor` of `tf.string` type, representing this dataset as a\nserialized graph.", "source": "github-repos"}
{"code": "def resolve_image_as_pil(self, image_url, coords=None):\n        \n        files = self.mets.find_files(url=image_url)\n        if files:\n            image_filename = self.download_file(files[0]).local_filename\n        else:\n            image_filename = self.download_url(image_url)\n\n        if image_url not in self.image_cache['pil']:\n            self.image_cache['pil'][image_url] = Image.open(image_filename)\n\n        pil_image = self.image_cache['pil'][image_url]\n\n        if coords is None:\n            return pil_image\n        if image_url not in self.image_cache['cv2']:\n            log.debug(\"Converting PIL to OpenCV: %s\", image_url)\n            color_conversion = cv2.COLOR_GRAY2BGR if pil_image.mode in ('1', 'L') else  cv2.COLOR_RGB2BGR\n            pil_as_np_array = np.array(pil_image).astype('uint8') if pil_image.mode == '1' else np.array(pil_image)\n            self.image_cache['cv2'][image_url] = cv2.cvtColor(pil_as_np_array, color_conversion)\n        cv2_image = self.image_cache['cv2'][image_url]\n        poly = np.array(coords, np.int32)\n        log.debug(\"Cutting region %s from %s\", coords, image_url)\n        region_cut = cv2_image[\n            np.min(poly[:, 1]):np.max(poly[:, 1]),\n            np.min(poly[:, 0]):np.max(poly[:, 0])\n        ]\n        return Image.fromarray(region_cut)", "docstring": "Resolve an image URL to a PIL image.\n\nArgs:\ncoords (list) : Coordinates of the bounding box to cut from the image\n\nReturns:\nImage or region in image as PIL.Image", "source": "juraj-google-style"}
{"code": "def toy_logistic_data(num_examples, input_size=2, weights_prior_stddev=5.0):\n    random_weights = (weights_prior_stddev * np.random.randn(input_size))\n    random_bias = np.random.randn()\n    design_matrix = ((np.random.rand(num_examples, input_size) * 2) - 1)\n    logits = np.reshape((np.dot(design_matrix, random_weights) + random_bias), ((- 1), 1))\n    p_labels = (1.0 / (1 + np.exp((- logits))))\n    labels = np.int32((p_labels > np.random.rand(num_examples, 1)))\n    return (random_weights, random_bias, np.float32(design_matrix), labels)", "docstring": "Generates synthetic data for binary classification.\n\nArgs:\nnum_examples: The number of samples to generate (scalar Python `int`).\ninput_size: The input space dimension (scalar Python `int`).\nweights_prior_stddev: The prior standard deviation of the weight\nvector. (scalar Python `float`).\n\nReturns:\nrandom_weights: Sampled weights as a Numpy `array` of shape\n`[input_size]`.\nrandom_bias: Sampled bias as a scalar Python `float`.\ndesign_matrix: Points sampled uniformly from the cube `[-1,\n1]^{input_size}`, as a Numpy `array` of shape `(num_examples,\ninput_size)`.\nlabels: Labels sampled from the logistic model `p(label=1) =\nlogistic(dot(features, random_weights) + random_bias)`, as a Numpy\n`int32` `array` of shape `(num_examples, 1)`.", "source": "codesearchnet"}
{"code": "def are_you_sure(msg=''):\n    r\n    print(msg)\n    from utool import util_arg\n    from utool import util_str\n    override = util_arg.get_argflag(('--yes', '--y', '-y'))\n    if override:\n        print('accepting based on command line flag')\n        return True\n    valid_ans = ['yes', 'y']\n    valid_prompt = util_str.conj_phrase(valid_ans, 'or')\n    ans = input('Are you sure?\\n Enter %s to accept\\n' % valid_prompt)\n    return ans.lower() in valid_ans", "docstring": "r\"\"\"\nPrompts user to accept or checks command line for -y\n\nArgs:\nmsg (str):\n\nReturns:\nbool: accept or not", "source": "juraj-google-style"}
{"code": "def _astimezone_ts(self, timezone):\n    \n    if self.created.tzinfo is timezone:\n        return self\n    else:\n        nw_obj = Timestamps((None,)*4)\n        nw_obj.created = self.created.astimezone(timezone)\n        nw_obj.changed = self.changed.astimezone(timezone)\n        nw_obj.mft_changed = self.mft_changed.astimezone(timezone)\n        nw_obj.accessed = self.accessed.astimezone(timezone)\n\n        return nw_obj", "docstring": "Changes the time zones of all timestamps.\n\nReceives a new timezone and applies to all timestamps, if necessary.\n\nArgs:\ntimezone (:obj:`tzinfo`): Time zone to be applied\n\nReturns:\nA new ``Timestamps`` object if the time zone changes, otherwise returns ``self``.", "source": "juraj-google-style"}
{"code": "def key_swap(d, cls, marshal):\n    dname = '_{}marshal_key_swap'.format(('' if marshal else 'un'))\n    if hasattr(cls, dname):\n        key_swap = getattr(cls, dname)\n        return {(key_swap[k] if (k in key_swap) else k): v for (k, v) in d.items()}\n    else:\n        return d", "docstring": "Swap the keys in a dictionary\n\nArgs:\nd:       dict, The dict to swap keys in\ncls:     class, If the class has a staticly defined\n_marshal_key_swap and/or _unmarshal_key_swap dict,\nthe keys will be swapped.\nOtherwise @d is returned\nmarshal: bool, True if marshalling class to JSON,\nFalse if unmarshalling JSON to class\nReturns:\ndict", "source": "codesearchnet"}
{"code": "def _run_conversion(self, meta_graph_def):\n    grappler_session_config = config_pb2.ConfigProto()\n    custom_rewriter_config = _get_tensorrt_rewriter_config(conversion_params=self._conversion_params._replace(allow_build_at_runtime=True), is_dynamic_op=True, max_batch_size=None, disable_non_trt_optimizers=self._test_only_disable_non_trt_optimizers, use_implicit_batch=not self._use_dynamic_shape, profile_strategy=self._profile_strategy)\n    grappler_session_config.graph_options.rewrite_options.CopyFrom(custom_rewriter_config)\n    return tf_optimizer.OptimizeGraph(grappler_session_config, meta_graph_def, graph_id=b'tf_graph')", "docstring": "Run Grappler's OptimizeGraph() tool to convert the graph.\n\nArgs:\nmeta_graph_def: the MetaGraphDef instance to run the optimizations on.\n\nReturns:\nThe optimized GraphDef.", "source": "github-repos"}
{"code": "def Decode(data, encoding=None):\n    if data is None:\n        return None\n    if isinstance(data, str) or isinstance(data, bytes):\n        string = data\n    else:\n        string = str(data)\n    if isinstance(string, str):\n        return string\n    try:\n        return string.decode('ascii')\n    except UnicodeError:\n        pass\n    if encoding:\n        try:\n            return string.decode(encoding)\n        except UnicodeError:\n            pass\n    try:\n        return string.decode('utf8')\n    except UnicodeError:\n        pass\n    try:\n        return string.decode(sys.getfilesystemencoding())\n    except UnicodeError:\n        pass\n    try:\n        return string.decode(sys.getdefaultencoding())\n    except UnicodeError:\n        pass\n    return string.decode('iso-8859-1')", "docstring": "Returns string with non-ascii characters decoded to UNICODE.\n\nUTF-8, the suggested encoding, and the usual suspects will be attempted in\norder.\n\nArgs:\ndata: A string or object that has str() and unicode() methods that may\ncontain an encoding incompatible with the standard output encoding.\nencoding: The suggested encoding if known.\n\nReturns:\nA text string representing the decoded byte string.", "source": "github-repos"}
{"code": "def transpose(a, axes=None):\n    \n    if isinstance(a, np.ndarray):\n        return np.transpose(a, axes)\n    elif isinstance(a, RemoteArray):\n        return a.transpose(*axes)\n    elif isinstance(a, Remote):\n        return _remote_to_array(a).transpose(*axes)\n    elif isinstance(a, DistArray):\n        if axes is None:\n            axes = range(a.ndim - 1, -1, -1)\n        axes = list(axes)\n        if len(set(axes)) < len(axes):\n            raise ValueError(\"repeated axis in transpose\")\n        if sorted(axes) != list(range(a.ndim)):\n            raise ValueError(\"axes don't match array\")\n        distaxis = a._distaxis\n        new_distaxis = axes.index(distaxis)\n        new_subarrays = [ra.transpose(*axes) for ra in a._subarrays]\n        return DistArray(new_subarrays, new_distaxis)\n    else:\n        return np.transpose(a, axes)", "docstring": "Returns a view of the array with axes transposed.\n\nFor a 1-D array, this has no effect.\nFor a 2-D array, this is the usual matrix transpose.\nFor an n-D array, if axes are given, their order indicates how the\naxes are permuted\n\nArgs:\na (array_like): Input array.\naxes (list of int, optional): By default, reverse the dimensions,\notherwise permute the axes according to the values given.", "source": "juraj-google-style"}
{"code": "def push(self, stream, value):\n        \n\n        raise ArgumentError(\"Attempting to push reading to an invalid stream walker that cannot hold data\", selector=self.selector, stream=stream)", "docstring": "Update this stream walker with a new responsive reading.\n\nArgs:\nstream (DataStream): The stream that we're pushing\nvalue (IOTileReading): The reading tha we're pushing", "source": "juraj-google-style"}
{"code": "def exit_code(self, code):\n    if ((code is not None) and (code in [0, 1, 3])):\n        self._exit_code = code\n    else:\n        self.log.warning(u'Invalid exit code')", "docstring": "Set the App exit code.\n\nFor TC Exchange Apps there are 3 supported exit codes.\n* 0 indicates a normal exit\n* 1 indicates a failure during execution\n* 3 indicates a partial failure\n\nArgs:\ncode (integer): The exit code value for the app.", "source": "codesearchnet"}
{"code": "def _add_function(self, func, identify_observed):\n    key = self.make_key(func)\n    if (key not in self.observers):\n        self.observers[key] = ObserverFunction(func, identify_observed, (key, self.observers))\n        return True\n    else:\n        return False", "docstring": "Add a function as an observer.\n\nArgs:\nfunc: The function to register as an observer.\nidentify_observed: See docstring for add_observer.\n\nReturns:\nTrue if the function is added, otherwise False.", "source": "codesearchnet"}
{"code": "def getHostCertPath(self, name):\n    path = s_common.genpath(self.certdir, 'hosts', ('%s.crt' % name))\n    if (not os.path.isfile(path)):\n        return None\n    return path", "docstring": "Gets the path to a host certificate.\n\nArgs:\nname (str): The name of the host keypair.\n\nExamples:\nGet the path to the host certificate for the host \"myhost\":\n\nmypath = cdir.getHostCertPath('myhost')\n\nReturns:\nstr: The path if exists.", "source": "codesearchnet"}
{"code": "def get_next_base26(prev=None):\n    if (not prev):\n        return 'a'\n    r = re.compile('^[a-z]*$')\n    if (not r.match(prev)):\n        raise ValueError('Invalid base26')\n    if (not prev.endswith('z')):\n        return (prev[:(- 1)] + chr((ord(prev[(- 1)]) + 1)))\n    return (get_next_base26(prev[:(- 1)]) + 'a')", "docstring": "Increment letter-based IDs.\n\nGenerates IDs like ['a', 'b', ..., 'z', 'aa', ab', ..., 'az', 'ba', ...]\n\nReturns:\nstr: Next base-26 ID.", "source": "codesearchnet"}
{"code": "def process_input(self, stream, value, rpc_executor):\n        \n\n        self.sensor_log.push(stream, value)\n\n        \n        if stream.important:\n            associated_output = stream.associated_stream()\n            self.sensor_log.push(associated_output, value)\n\n        to_check = deque([x for x in self.roots])\n\n        while len(to_check) > 0:\n            node = to_check.popleft()\n            if node.triggered():\n                try:\n                    results = node.process(rpc_executor, self.mark_streamer)\n                    for result in results:\n                        result.raw_time = value.raw_time\n                        self.sensor_log.push(node.stream, result)\n                except:\n                    self._logger.exception(\"Unhandled exception in graph node processing function for node %s\", str(node))\n\n                \n                \n                if len(results) > 0:\n                    to_check.extend(node.outputs)", "docstring": "Process an input through this sensor graph.\n\nThe tick information in value should be correct and is transfered\nto all results produced by nodes acting on this tick.\n\nArgs:\nstream (DataStream): The stream the input is part of\nvalue (IOTileReading): The value to process\nrpc_executor (RPCExecutor): An object capable of executing RPCs\nin case we need to do that.", "source": "juraj-google-style"}
{"code": "def set_timing(self, timing: bool, reset: bool=False) -> None:\n    self._timing = timing\n    if reset:\n        self.reset()", "docstring": "Manually set the ``timing`` parameter, and optionally reset the timers.\n\nArgs:\ntiming: should we be timing?\nreset: reset the timers?", "source": "codesearchnet"}
{"code": "def get_object_id_from_graph(access_token=None):\n    if (access_token is None):\n        access_token = get_graph_token_from_msi()\n    endpoint = (('https:\n    headers = {'Authorization': ('Bearer ' + access_token), 'Host': GRAPH_RESOURCE_HOST}\n    ret = requests.get(endpoint, headers=headers)\n    return ret.json()['id']", "docstring": "Return the object ID for the Graph user who owns the access token.\n\nArgs:\naccess_token (str): A Microsoft Graph access token. (Not an Azure access token.)\nIf not provided, attempt to get it from MSI_ENDPOINT.\n\nReturns:\nAn object ID string for a user or service principal.", "source": "codesearchnet"}
{"code": "def get_definition(self, name: YangIdentifier, kw: YangIdentifier) -> Optional['Statement']:\n    stmt = self.superstmt\n    while stmt:\n        res = stmt.find1(kw, name)\n        if res:\n            return res\n        stmt = stmt.superstmt\n    return None", "docstring": "Search ancestor statements for a definition.\n\nArgs:\nname: Name of a grouping or datatype (with no prefix).\nkw: ``grouping`` or ``typedef``.\n\nRaises:\nDefinitionNotFound: If the definition is not found.", "source": "codesearchnet"}
{"code": "def parse_panel_app_panel(panel_info, hgnc_map, institute='cust000', panel_type='clinical'):\n    \n    date_format = \"%Y-%m-%dT%H:%M:%S.%f\"\n    \n    gene_panel = {}\n    gene_panel['version'] = float(panel_info['version'])\n    gene_panel['date'] = get_date(panel_info['Created'][:-1], date_format=date_format)\n    gene_panel['display_name'] = panel_info['SpecificDiseaseName']\n    gene_panel['institute'] = institute\n    gene_panel['panel_type'] = panel_type\n    \n    LOG.info(\"Parsing panel %s\", gene_panel['display_name'])\n    \n    gene_panel['genes'] = []\n    \n    nr_low_confidence = 1\n    nr_genes = 0\n    for nr_genes, gene in enumerate(panel_info['Genes'],1):\n        gene_info = parse_panel_app_gene(gene, hgnc_map)\n        if not gene_info:\n            nr_low_confidence += 1\n            continue\n        gene_panel['genes'].append(gene_info)\n    \n    LOG.info(\"Number of genes in panel %s\", nr_genes)\n    LOG.info(\"Number of low confidence genes in panel %s\", nr_low_confidence)\n    \n    return gene_panel", "docstring": "Parse a PanelApp panel\n\nArgs:\npanel_info(dict)\nhgnc_map(dict): Map from symbol to hgnc ids\ninstitute(str)\npanel_type(str)\n\nReturns:\ngene_panel(dict)", "source": "juraj-google-style"}
{"code": "def find_newline(self, size=-1):\n    \n    if size < 0:\n      return self._buffer.find('\\n', self._offset)\n    return self._buffer.find('\\n', self._offset, self._offset + size)", "docstring": "Search for newline char in buffer starting from current offset.\n\nArgs:\nsize: number of bytes to search. -1 means all.\n\nReturns:\noffset of newline char in buffer. -1 if doesn't exist.", "source": "juraj-google-style"}
{"code": "def _traceback_to_alignment(tb, a, b):\n    \n    \n    \n    \n    for idx, direction in tb:\n        if direction == Direction.DIAG:\n            yield (idx[0] - 1, idx[1] - 1)\n        elif direction == Direction.UP:\n            yield (idx[0] - 1, None)\n        elif direction == Direction.LEFT:\n            yield (None, idx[1] - 1)", "docstring": "Convert a traceback (i.e. as returned by `tracebacks()`) into an alignment\n(i.e. as returned by `align`).\n\nArguments:\ntb: A traceback.\na: the sequence defining the rows in the traceback matrix.\nb: the sequence defining the columns in the traceback matrix.\n\nReturns: An iterable of (index, index) tupless where ether (but not both)\ntuples can be `None`.", "source": "juraj-google-style"}
{"code": "def create_graph_from_data(self, data, **kwargs):\n        \n        \n        self.arguments['{SCORE}'] = self.scores[self.score]\n        self.arguments['{CUTOFF}'] = str(self.cutoff)\n        self.arguments['{VARSEL}'] = str(self.variablesel).upper()\n        self.arguments['{SELMETHOD}'] = self.var_selection[self.selmethod]\n        self.arguments['{PRUNING}'] = str(self.pruning).upper()\n        self.arguments['{PRUNMETHOD}'] = self.var_selection[self.prunmethod]\n        self.arguments['{NJOBS}'] = str(self.nb_jobs)\n        self.arguments['{VERBOSE}'] = str(self.verbose).upper()\n        results = self._run_cam(data, verbose=self.verbose)\n\n        return nx.relabel_nodes(nx.DiGraph(results),\n                                {idx: i for idx, i in enumerate(data.columns)})", "docstring": "Apply causal discovery on observational data using CAM.\n\nArgs:\ndata (pandas.DataFrame): DataFrame containing the data\n\nReturns:\nnetworkx.DiGraph: Solution given by the CAM algorithm.", "source": "juraj-google-style"}
{"code": "def _get_entities(self, text, language=''):\n    body = {'document': {'type': 'PLAIN_TEXT', 'content': text}, 'encodingType': 'UTF32'}\n    if language:\n        body['document']['language'] = language\n    request = self.service.documents().analyzeEntities(body=body)\n    response = request.execute()\n    result = []\n    for entity in response.get('entities', []):\n        mentions = entity.get('mentions', [])\n        if (not mentions):\n            continue\n        entity_text = mentions[0]['text']\n        offset = entity_text['beginOffset']\n        for word in entity_text['content'].split():\n            result.append({'content': word, 'beginOffset': offset})\n            offset += len(word)\n    return result", "docstring": "Returns the list of entities retrieved from the given text.\n\nArgs:\ntext (str): Input text.\nlanguage (:obj:`str`, optional): Language code.\n\nReturns:\nList of entities.", "source": "codesearchnet"}
{"code": "def root(self):\n    node = self\n    while (node.package is not None):\n        node = node.package\n    return node", "docstring": "Property to return the root of this node.\n\nReturns:\nPackage: this node's root package.", "source": "codesearchnet"}
{"code": "def extract_jtl_string_pairs_from_text_file(results_dict, file_path):\n    \n    result_pairs = re.findall(JTL_REGEX, open(file_path).read())\n    for result_key, result_comment in result_pairs:\n        results_dict[result_key] = result_comment\n    return results_dict", "docstring": "Extracts all string pairs matching the JTL pattern from given text file.\n\nThis can be used as an \"extract_func\" argument in the extract_string_pairs_in_directory method.\n\nArgs:\nresults_dict (dict): The dict to add the the string pairs to.\nfile_path (str): The path of the file from which to extract the string pairs.", "source": "juraj-google-style"}
{"code": "def Pack(cls, obj, version):\n    \n    if isinstance(obj, (datetime.datetime, datetime.date)):\n      return cls.AdManagerDateTimePacker(obj, version)\n    return obj", "docstring": "Pack the given object using Ad Manager-specific logic.\n\nArgs:\nobj: an object to be packed for SOAP using Ad Manager-specific logic, if\napplicable.\nversion: the version of the current API, e.g. 'v201811'\n\nReturns:\nThe given object packed with Ad Manager-specific logic for SOAP,\nif applicable. Otherwise, returns the given object unmodified.", "source": "juraj-google-style"}
{"code": "def read(self, directory, filename, session, spatial=False, spatialReferenceID=4236, replaceParamFile=None, **kwargs):\n    path = os.path.join(directory, filename)\n    filename_split = filename.split('.')\n    name = filename_split[0]\n    extension = ''\n    if (len(filename_split) >= 2):\n        extension = filename_split[(- 1)]\n    if os.path.isfile(path):\n        session.add(self)\n        self._read(directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile, **kwargs)\n        self._commit(session, self.COMMIT_ERROR_MESSAGE)\n    else:\n        session.rollback()\n        log.warning('Could not find file named {0}. File not read.'.format(filename))", "docstring": "Generic read file into database method.\n\nArgs:\ndirectory (str): Directory containing the file to be read.\nfilename (str): Name of the file which will be read (e.g.: 'example.prj').\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.\nspatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects.\nDefaults to False.\nspatialReferenceID (int, optional): Integer id of spatial reference system for the model. Required if\nspatial is True. Defaults to srid 4236.\nreplaceParamFile (:class:`gsshapy.orm.ReplaceParamFile`, optional): ReplaceParamFile instance. Use this if\nthe file you are reading contains replacement parameters.", "source": "codesearchnet"}
{"code": "def seek(self, offset, whence=os.SEEK_SET):\n    \n    if not self._is_open:\n      raise IOError('Not opened.')\n\n    \n    \n    \n    if whence not in [os.SEEK_SET, os.SEEK_CUR, os.SEEK_END]:\n      raise IOError('Unsupported whence.')\n\n    self._file_object.seek(offset, whence)", "docstring": "Seeks to an offset within the file-like object.\n\nArgs:\noffset (int): offset to seek to.\nwhence (Optional(int)): value that indicates whether offset is an absolute\nor relative position within the file.\n\nRaises:\nIOError: if the seek failed.\nOSError: if the seek failed.", "source": "juraj-google-style"}
{"code": "def xzhdr(self, header, msgid_range=None):\n    args = header\n    if (msgid_range is not None):\n        args += (' ' + utils.unparse_msgid_range(msgid_range))\n    (code, message) = self.command('XZHDR', args)\n    if (code != 221):\n        raise NNTPReplyError(code, message)\n    return self.info(code, message, compressed=True)", "docstring": "XZHDR command.\n\nArgs:\nmsgid_range: A message-id as a string, or an article number as an\ninteger, or a tuple of specifying a range of article numbers in\nthe form (first, [last]) - if last is omitted then all articles\nafter first are included. A msgid_range of None (the default)\nuses the current article.", "source": "codesearchnet"}
{"code": "def diff_dictionaries(old_dict, new_dict):\n    old_set = set(old_dict)\n    new_set = set(new_dict)\n    added_set = (new_set - old_set)\n    removed_set = (old_set - new_set)\n    common_set = (old_set & new_set)\n    changes = 0\n    output = []\n    for key in added_set:\n        changes += 1\n        output.append(DictValue(key, None, new_dict[key]))\n    for key in removed_set:\n        changes += 1\n        output.append(DictValue(key, old_dict[key], None))\n    for key in common_set:\n        output.append(DictValue(key, old_dict[key], new_dict[key]))\n        if (str(old_dict[key]) != str(new_dict[key])):\n            changes += 1\n    output.sort(key=attrgetter('key'))\n    return [changes, output]", "docstring": "Diffs two single dimension dictionaries\n\nReturns the number of changes and an unordered list\nexpressing the common entries and changes.\n\nArgs:\nold_dict(dict): old dictionary\nnew_dict(dict): new dictionary\n\nReturns: list()\nint: number of changed records\nlist: [DictValue]", "source": "codesearchnet"}
{"code": "def manual_payment(request, invoice_id):\n    FORM_PREFIX = 'manual_payment'\n    current_invoice = InvoiceController.for_id_or_404(invoice_id)\n    form = forms.ManualPaymentForm((request.POST or None), prefix=FORM_PREFIX)\n    if (request.POST and form.is_valid()):\n        form.instance.invoice = current_invoice.invoice\n        form.instance.entered_by = request.user\n        form.save()\n        current_invoice.update_status()\n        form = forms.ManualPaymentForm(prefix=FORM_PREFIX)\n    data = {'invoice': current_invoice.invoice, 'form': form}\n    return render(request, 'registrasion/manual_payment.html', data)", "docstring": "Allows staff to make manual payments or refunds on an invoice.\n\nThis form requires a login, and the logged in user needs to be staff.\n\nArguments:\ninvoice_id (castable to int): The invoice ID to be paid\n\nReturns:\nrender:\nRenders ``registrasion/manual_payment.html`` with the following\ndata::\n\n{\n\"invoice\": models.commerce.Invoice(),\n\"form\": form,   # A form that saves a ``ManualPayment``\n# object.\n}", "source": "codesearchnet"}
{"code": "def GetMessages(self, formatter_mediator, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    event_values = event.CopyToDict()\n\n    read_receipt = event_values.get('read_receipt', None)\n    if read_receipt is not None:\n      event_values['read_receipt'] = (\n          self._READ_RECEIPT.get(read_receipt, 'UNKNOWN'))\n\n    message_type = event_values.get('message_type', None)\n    if message_type is not None:\n      event_values['message_type'] = (\n          self._MESSAGE_TYPE.get(message_type, 'UNKNOWN'))\n\n    return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def list(self, keyword=None, arg=None):\n    return [x for x in self.list_gen(keyword, arg)]", "docstring": "LIST command.\n\nA wrapper for all of the other list commands. The output of this command\ndepends on the keyword specified. The output format for each keyword can\nbe found in the list function that corresponds to the keyword.\n\nArgs:\nkeyword: Information requested.\narg: Pattern or keyword specific argument.\n\nNote: Keywords supported by this function are include ACTIVE,\nACTIVE.TIMES, DISTRIB.PATS, HEADERS, NEWSGROUPS, OVERVIEW.FMT and\nEXTENSIONS.\n\nRaises:\nNotImplementedError: For unsupported keywords.", "source": "codesearchnet"}
{"code": "def push(self, files, run=None, entity=None, project=None, description=None, force=True, progress=False):\n    if (project is None):\n        project = self.get_project()\n    if (project is None):\n        raise CommError('No project configured.')\n    if (run is None):\n        run = self.current_run_id\n    (run_id, result) = self.upload_urls(project, files, run, entity, description)\n    responses = []\n    for (file_name, file_info) in result.items():\n        try:\n            normal_name = os.path.join(*file_name.split('/'))\n            open_file = (files[normal_name] if isinstance(files, dict) else open(normal_name, 'rb'))\n        except IOError:\n            print(('%s does not exist' % file_name))\n            continue\n        if progress:\n            if hasattr(progress, '__call__'):\n                responses.append(self.upload_file_retry(file_info['url'], open_file, progress))\n            else:\n                length = os.fstat(open_file.fileno()).st_size\n                with click.progressbar(file=progress, length=length, label=('Uploading file: %s' % file_name), fill_char=click.style('&', fg='green')) as bar:\n                    responses.append(self.upload_file_retry(file_info['url'], open_file, (lambda bites, _: bar.update(bites))))\n        else:\n            responses.append(self.upload_file_retry(file_info['url'], open_file))\n        open_file.close()\n    return responses", "docstring": "Uploads multiple files to W&B\n\nArgs:\nfiles (list or dict): The filenames to upload\nrun (str, optional): The run to upload to\nentity (str, optional): The entity to scope this project to.  Defaults to wandb models\nproject (str, optional): The name of the project to upload to. Defaults to the one in settings.\ndescription (str, optional): The description of the changes\nforce (bool, optional): Whether to prevent push if git has uncommitted changes\nprogress (callable, or stream): If callable, will be called with (chunk_bytes,\ntotal_bytes) as argument else if True, renders a progress bar to stream.\n\nReturns:\nThe requests library response object", "source": "codesearchnet"}
{"code": "def from_url(url, format=None):\n    \n    \n    string = urllib2.urlopen(url).read()\n    \n    if PY3 is True:\n        \n        string = string.decode('utf-8')\n\n    \n    if format:\n        \n        format = format.lower().replace(\" \", \"_\")\n        func = parse.__getattr__(\"from_%s\" % format)\n    else:\n        \n        func = parse.from_unknown_text\n\n    \n    crs = func(string)\n    return crs", "docstring": "Returns the crs object from a string interpreted as a specified format, located at a given url site.\n\nArguments:\n\n- *url*: The url where the crs string is to be read from.\n- *format* (optional): Which format to parse the crs string as. One of \"ogc wkt\", \"esri wkt\", or \"proj4\".\nIf None, tries to autodetect the format for you (default).\n\nReturns:\n\n- CRS object.", "source": "juraj-google-style"}
{"code": "def device(self, idx):\n\n    class GpuDevice(Structure):\n        pass\n    c_nvmlDevice_t = POINTER(GpuDevice)\n    c_index = c_uint(idx)\n    device = c_nvmlDevice_t()\n    _check_return(_NVML.get_function('nvmlDeviceGetHandleByIndex_v2')(c_index, byref(device)))\n    return NvidiaDevice(device)", "docstring": "Get a specific GPU device\n\nArgs:\nidx: index of device\n\nReturns:\nNvidiaDevice: single GPU device", "source": "codesearchnet"}
{"code": "def console_set_default_background(con: tcod.console.Console, col: Tuple[(int, int, int)]) -> None:\n    lib.TCOD_console_set_default_background(_console(con), col)", "docstring": "Change the default background color for a console.\n\nArgs:\ncon (Console): Any Console instance.\ncol (Union[Tuple[int, int, int], Sequence[int]]):\nAn (r, g, b) sequence or Color instance.\n\n.. deprecated:: 8.5\nUse :any:`Console.default_bg` instead.", "source": "codesearchnet"}
{"code": "def read_zmat(cls, inputfile, implicit_index=True):\n        \n        cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral']\n        if implicit_index:\n            zmat_frame = pd.read_table(inputfile, comment='\n                                       delim_whitespace=True,\n                                       names=cols)\n            zmat_frame.index = range(1, len(zmat_frame) + 1)\n        else:\n            zmat_frame = pd.read_table(inputfile, comment='\n                                       delim_whitespace=True,\n                                       names=['temp_index'] + cols)\n            zmat_frame.set_index('temp_index', drop=True, inplace=True)\n            zmat_frame.index.name = None\n        if pd.isnull(zmat_frame.iloc[0, 1]):\n            zmat_values = [1.27, 127., 127.]\n            zmat_refs = [constants.int_label[x] for x in\n                         ['origin', 'e_z', 'e_x']]\n            for row, i in enumerate(zmat_frame.index[:3]):\n                cols = ['b', 'a', 'd']\n                zmat_frame.loc[:, cols] = zmat_frame.loc[:, cols].astype('O')\n                if row < 2:\n                    zmat_frame.loc[i, cols[row:]] = zmat_refs[row:]\n                    zmat_frame.loc[i, ['bond', 'angle', 'dihedral'][row:]\n                                   ] = zmat_values[row:]\n                else:\n                    zmat_frame.loc[i, 'd'] = zmat_refs[2]\n                    zmat_frame.loc[i, 'dihedral'] = zmat_values[2]\n\n        elif zmat_frame.iloc[0, 1] in constants.int_label.keys():\n            zmat_frame = zmat_frame.replace(\n                {col: constants.int_label for col in ['b', 'a', 'd']})\n        zmat_frame = cls._cast_correct_types(zmat_frame)\n        try:\n            Zmat = cls(zmat_frame)\n        except InvalidReference:\n            raise UndefinedCoordinateSystem(\n                'Your zmatrix cannot be transformed to cartesian coordinates')\n        return Zmat", "docstring": "Reads a zmat file.\n\nLines beginning with ``#`` are ignored.\n\nArgs:\ninputfile (str):\nimplicit_index (bool): If this option is true the first column\nhas to be the element symbols for the atoms.\nThe row number is used to determine the index.\n\nReturns:\nZmat:", "source": "juraj-google-style"}
{"code": "def _construct(self, context):\n    with self.g.as_default():\n        if self._pass_through:\n            return self._pass_through._construct(context)\n        current_value = context.get(self, None)\n        assert (current_value is not _unspecified), 'Circular dependency'\n        if (current_value is not None):\n            return current_value\n        context[self] = _unspecified\n        method_args = self._replace_deferred(self._method_args, context)\n        method_kwargs = self._replace_deferred(self._method_kwargs, context)\n        result = self._method(*method_args, **method_kwargs)\n        _strip_unnecessary_contents_from_stack(result, set())\n        context[self] = result\n        return result", "docstring": "Constructs this by calling the deferred method.\n\nThis assumes that all unbound_vars have been specified in context and if\nthis layer has already been computed in this context, then the previously\nconstructed value will be returned.\n\nArgs:\ncontext: A dict of UnboundVariables/_DeferredLayers to their values.\nReturns:\nThe result of calling the given method on this layer.", "source": "codesearchnet"}
{"code": "def append_transformation(self, transformation, extend_collection=False, clear_redo=True):\n    if (self.ncores and transformation.use_multiprocessing):\n        p = Pool(self.ncores)\n        z = map((lambda x: (x, transformation, extend_collection, clear_redo)), self.transformed_structures)\n        new_tstructs = p.map(_apply_transformation, z, 1)\n        self.transformed_structures = []\n        for ts in new_tstructs:\n            self.transformed_structures.extend(ts)\n    else:\n        new_structures = []\n        for x in self.transformed_structures:\n            new = x.append_transformation(transformation, extend_collection, clear_redo=clear_redo)\n            if (new is not None):\n                new_structures.extend(new)\n        self.transformed_structures.extend(new_structures)", "docstring": "Appends a transformation to all TransformedStructures.\n\nArgs:\ntransformation: Transformation to append\nextend_collection: Whether to use more than one output structure\nfrom one-to-many transformations. extend_collection can be a\nnumber, which determines the maximum branching for each\ntransformation.\nclear_redo (bool): Whether to clear the redo list. By default,\nthis is True, meaning any appends clears the history of\nundoing. However, when using append_transformation to do a\nredo, the redo list should not be cleared to allow multiple\nredos.\n\nReturns:\nList of booleans corresponding to initial transformed structures\neach boolean describes whether the transformation altered the\nstructure", "source": "codesearchnet"}
{"code": "def compute_batch_size(dataset):\n\n    def get_static_batch_dim(type_spec):\n        try:\n            output_shape = type_spec._to_legacy_output_shapes()\n        except NotImplementedError:\n            return None\n        if not isinstance(output_shape, tensor_shape.TensorShape):\n            return None\n        if output_shape.rank is None:\n            return None\n        return output_shape.dims[0].value\n    batch_dims = [get_static_batch_dim(type_spec) for type_spec in nest.flatten(dataset_ops.get_structure(dataset))]\n    if all((d is not None for d in batch_dims)):\n        if all((d == batch_dims[0] for d in batch_dims)):\n            batch_dim = batch_dims[0]\n        else:\n            batch_dim = -1\n        return constant_op.constant(batch_dim, dtype=dtypes.int64, name='static_batch_size')\n    return ged_ops.compute_batch_size(dataset._variant_tensor)", "docstring": "An operation that returns the batch size of the dataset.\n\nThis op tries to infer the batch size statically by walking up the dataset\ntree from the final dataset node and returning the batch size of the first\nbatching dataset (such as from .batch() and .padded_batch()) that it\nencounters. This differs from using the `element_spec` of a dataset in that it\ndoes not account for partial batches.\n\nThis operation may fail if it encounters contradictory batch sizes (for\nexample, if the dataset is created by zipping together two datasets with\ndifferent batch sizes), if there are no explicit batching transformations, or\nif there are operations downstream from the batching transformation that may\nmodify its batch size. In these cases, it returns a -1.\n\nArgs:\ndataset: A `tf.data.Dataset` object.\n\nReturns:\nA `tf.int64` Tensor representing the batch size of the dataset sans partial\nbatches. If this cannot be inferred statically, the value of this tensor\nwill be -1.", "source": "github-repos"}
{"code": "def filter_by_months(self, months):\n        \n        _filt_values = []\n        _filt_datetimes = []\n        for i, d in enumerate(self.datetimes):\n            if d in months:\n                _filt_datetimes.append(d)\n                _filt_values.append(self._values[i])\n        _filt_header = self.header.duplicate()\n        return MonthlyCollection(_filt_header, _filt_values, _filt_datetimes)", "docstring": "Filter the Data Collection based on a list of months of the year (as integers).\n\nArgs:\nmonths: A List of months of the year [1..12]\n\nReturn:\nA new Data Collection with filtered data", "source": "juraj-google-style"}
{"code": "def _add_sync_queues_and_barrier(self, name, dependencies):\n        \n        self._sync_queue_counter += 1\n        with tf.device(self.sync_queue_devices[self._sync_queue_counter % len(self.sync_queue_devices)]):\n            sync_queues = [\n                tf.FIFOQueue(self.num_worker, [tf.bool], shapes=[[]],\n                             shared_name='%s%s' % (name, i))\n                for i in range(self.num_worker)]\n            queue_ops = []\n            \n            token = tf.constant(False)\n            with tf.control_dependencies(dependencies):\n                for i, q in enumerate(sync_queues):\n                    if i != self.task_index:\n                        queue_ops.append(q.enqueue(token))\n\n            \n            queue_ops.append(\n                sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1))\n\n            return tf.group(*queue_ops, name=name)", "docstring": "Adds ops to enqueue on all worker queues.\n\nArgs:\nname: prefixed for the shared_name of ops.\ndependencies: control dependency from ops.\n\nReturns:\nan op that should be used as control dependency before starting next step.", "source": "juraj-google-style"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    if registry_key is None:\n      return\n\n    values_dict = {}\n    for value_name in self._VALUE_NAMES:\n      registry_value = registry_key.GetValueByName(value_name)\n      if not registry_value:\n        continue\n\n      value_data = registry_value.GetDataAsObject()\n      if value_data is None:\n        continue\n\n      values_dict[value_name] = value_data\n\n    event_data = windows_events.WindowsRegistryEventData()\n    event_data.key_path = registry_key.path\n    event_data.offset = registry_key.offset\n    event_data.regvalue = values_dict\n\n    event = time_events.DateTimeValuesEvent(\n        registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def easeInOutCirc(n):\n    _checkRange(n)\n    n = (n * 2)\n    if (n < 1):\n        return ((- 0.5) * (math.sqrt((1 - (n ** 2))) - 1))\n    else:\n        n = (n - 2)\n        return (0.5 * (math.sqrt((1 - (n ** 2))) + 1))", "docstring": "A circular tween function that accelerates, reaches the midpoint, and then decelerates.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "codesearchnet"}
{"code": "def parse(self, line, cell, namespace=None):\n    if (namespace is None):\n        ipy = IPython.get_ipython()\n        namespace = ipy.user_ns\n    args = CommandParser.create_args(line, namespace)\n    sub_parsers_progs = [x.prog for x in self._get_subparsers()]\n    matched_progs = []\n    for prog in sub_parsers_progs:\n        match = prog.split()[1:]\n        for i in range(len(args)):\n            if (args[i:(i + len(match))] == match):\n                matched_progs.append(prog)\n                break\n    matched_prog = None\n    if matched_progs:\n        matched_prog = max(matched_progs, key=(lambda x: len(x.split())))\n    line_args = self._get_subparser_line_args(matched_prog)\n    if line_args:\n        cell_config = None\n        try:\n            (cell_config, cell) = google.datalab.utils.commands.parse_config_for_selected_keys(cell, line_args)\n        except:\n            pass\n        if cell_config:\n            google.datalab.utils.commands.replace_vars(cell_config, namespace)\n            for arg_name in cell_config:\n                arg_value = cell_config[arg_name]\n                if (arg_value is None):\n                    continue\n                if (('--' + arg_name) in args):\n                    raise ValueError(('config item \"%s\" is specified in both cell and line.' % arg_name))\n                if isinstance(arg_value, bool):\n                    if arg_value:\n                        line += (' --%s' % arg_name)\n                else:\n                    line += (' --%s %s' % (arg_name, str(cell_config[arg_name])))\n    args = CommandParser.create_args(line, namespace)\n    args = vars(self.parse_args(args))\n    cell_config = None\n    cell_args = self._get_subparser_cell_args(matched_prog)\n    if cell_args:\n        try:\n            (cell_config, _) = google.datalab.utils.commands.parse_config_for_selected_keys(cell, cell_args)\n        except:\n            pass\n        if cell_config:\n            google.datalab.utils.commands.replace_vars(cell_config, namespace)\n        for arg in cell_args:\n            if (cell_args[arg]['required'] and ((cell_config is None) or (cell_config.get(arg, None) is None))):\n                raise ValueError(('Cell config \"%s\" is required.' % arg))\n    if cell_config:\n        args.update(cell_config)\n    return (args, cell)", "docstring": "Parses a line and cell into a dictionary of arguments, expanding variables from a namespace.\n\nFor each line parameters beginning with --, it also checks the cell content and see if it exists\nthere. For example, if \"--config1\" is a line parameter, it checks to see if cell dict contains\n\"config1\" item, and if so, use the cell value. The \"config1\" item will also be removed from\ncell content.\n\nArgs:\nline: line content.\ncell: cell content.\nnamespace: user namespace. If None, IPython's user namespace is used.\n\nReturns:\nA tuple of: 1. parsed config dict. 2. remaining cell after line parameters are extracted.", "source": "codesearchnet"}
{"code": "def onScreen(x, y=None):\n    (x, y) = _unpackXY(x, y)\n    x = int(x)\n    y = int(y)\n    (width, height) = platformModule._size()\n    return ((0 <= x < width) and (0 <= y < height))", "docstring": "Returns whether the given xy coordinates are on the screen or not.\n\nArgs:\nEither the arguments are two separate values, first arg for x and second\nfor y, or there is a single argument of a sequence with two values, the\nfirst x and the second y.\nExample: onScreen(x, y) or onScreen([x, y])\n\nReturns:\nbool: True if the xy coordinates are on the screen at its current\nresolution, otherwise False.", "source": "codesearchnet"}
{"code": "def buckets_get(self, bucket, projection='noAcl'):\n    \n    args = {'projection': projection}\n    url = Api._ENDPOINT + (Api._BUCKET_PATH % bucket)\n    return google.datalab.utils.Http.request(url, credentials=self._credentials, args=args)", "docstring": "Issues a request to retrieve information about a bucket.\n\nArgs:\nbucket: the name of the bucket.\nprojection: the projection of the bucket information to retrieve.\nReturns:\nA parsed bucket information dictionary.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"}
{"code": "def add_batch_parser(subparsers, parent_parser):\n    \n    parser = subparsers.add_parser(\n        'batch',\n        help='Displays information about batches and submit new batches',\n        description='Provides subcommands to display Batch information and '\n        'submit Batches to the validator via the REST API.')\n\n    grand_parsers = parser.add_subparsers(title='subcommands',\n                                          dest='subcommand')\n    grand_parsers.required = True\n    add_batch_list_parser(grand_parsers, parent_parser)\n    add_batch_show_parser(grand_parsers, parent_parser)\n    add_batch_status_parser(grand_parsers, parent_parser)\n    add_batch_submit_parser(grand_parsers, parent_parser)", "docstring": "Adds arguments parsers for the batch list, batch show and batch status\ncommands\n\nArgs:\nsubparsers: Add parsers to this subparser object\nparent_parser: The parent argparse.ArgumentParser object", "source": "juraj-google-style"}
{"code": "def main(pipeline_name, pipeline_context_input, working_dir, log_level, log_path):\n    pypyr.log.logger.set_root_logger(log_level, log_path)\n    logger.debug('starting pypyr')\n    pypyr.moduleloader.set_working_directory(working_dir)\n    load_and_run_pipeline(pipeline_name=pipeline_name, pipeline_context_input=pipeline_context_input, working_dir=working_dir)\n    logger.debug('pypyr done')", "docstring": "Entry point for pypyr pipeline runner.\n\nCall this once per pypyr run. Call me if you want to run a pypyr pipeline\nfrom your own code. This function does some one-off 1st time initialization\nbefore running the actual pipeline.\n\npipeline_name.yaml should be in the working_dir/pipelines/ directory.\n\nArgs:\npipeline_name: string. Name of pipeline, sans .yaml at end.\npipeline_context_input: string. Initialize the pypyr context with this\nstring.\nworking_dir: path. looks for ./pipelines and modules in this directory.\nlog_level: int. Standard python log level enumerated value.\nlog_path: os.path. Append log to this path.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def list():\n    kbs = []\n    ret = _pshell_json('Get-HotFix | Select HotFixID')\n    for item in ret:\n        kbs.append(item['HotFixID'])\n    return kbs", "docstring": "Get a list of updates installed on the machine\n\nReturns:\nlist: A list of installed updates\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' wusa.list", "source": "codesearchnet"}
{"code": "def sbi_ids(self) -> List[str]:\n    return ast.literal_eval(DB.get_hash_value(self._key, 'sbi_ids'))", "docstring": "Get the list of SBI Ids.\n\nReturns:\nlist, list of SBI ids associated with this subarray.", "source": "codesearchnet"}
{"code": "def create_public_ip(access_token, subscription_id, resource_group, public_ip_name, dns_label, location):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/publicIPAddresses/', public_ip_name, '?api-version=', NETWORK_API])\n    ip_body = {'location': location}\n    properties = {'publicIPAllocationMethod': 'Dynamic'}\n    properties['dnsSettings'] = {'domainNameLabel': dns_label}\n    ip_body['properties'] = properties\n    body = json.dumps(ip_body)\n    return do_put(endpoint, body, access_token)", "docstring": "Create a public ip address.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\npublic_ip_name (str): Name of the new public ip address resource.\ndns_label (str): DNS label to apply to the IP address.\nlocation (str): Azure data center location. E.g. westus.\n\nReturns:\nHTTP response. Public IP address JSON body.", "source": "codesearchnet"}
{"code": "def copy2(src, dst, metadata=None, retry_params=None):\n  \n  common.validate_file_path(src)\n  common.validate_file_path(dst)\n\n  if metadata is None:\n    metadata = {}\n    copy_meta = 'COPY'\n  else:\n    copy_meta = 'REPLACE'\n  metadata.update({'x-goog-copy-source': src,\n                   'x-goog-metadata-directive': copy_meta})\n\n  api = storage_api._get_storage_api(retry_params=retry_params)\n  status, resp_headers, content = api.put_object(\n      api_utils._quote_filename(dst), headers=metadata)\n  errors.check_status(status, [200], src, metadata, resp_headers, body=content)", "docstring": "Copy the file content from src to dst.\n\nArgs:\nsrc: /bucket/filename\ndst: /bucket/filename\nmetadata: a dict of metadata for this copy. If None, old metadata is copied.\nFor example, {'x-goog-meta-foo': 'bar'}.\nretry_params: An api_utils.RetryParams for this call to GCS. If None,\nthe default one is used.\n\nRaises:\nerrors.AuthorizationError: if authorization failed.\nerrors.NotFoundError: if an object that's expected to exist doesn't.", "source": "juraj-google-style"}
{"code": "def validate(self, value):\n    if (value == ''):\n        if self.kwargs.get('nullable', __nullable__):\n            value = None\n        else:\n            value = 0\n    if (not isinstance(value, Model)):\n        return super(ReferenceProperty, self).validate(value)\n    if (not value.is_saved()):\n        raise BadValueError(('%s instance must be saved before it can be stored as a reference' % self.reference_class.__class__.__name__))\n    if (not isinstance(value, self.reference_class)):\n        raise KindError(('Property %s must be an instance of %s' % (self.name, self.reference_class.__class__.__name__)))\n    return value", "docstring": "Validate reference.\n\nReturns:\nA valid value.\n\nRaises:\nBadValueError for the following reasons:\n- Value is not saved.\n- Object not of correct model type for reference.", "source": "codesearchnet"}
{"code": "def get_list_subtask_positions_objs(client, list_id):\n    params = {'list_id': int(list_id)}\n    response = client.authenticated_request(client.api.Endpoints.SUBTASK_POSITIONS, params=params)\n    return response.json()", "docstring": "Gets all subtask positions objects for the tasks within a given list. This is a convenience method so you don't have to get all the list's tasks before getting subtasks, though I can't fathom how mass subtask reordering is useful.\n\nReturns:\nList of SubtaskPositionsObj-mapped objects representing the order of subtasks for the tasks within the given list", "source": "codesearchnet"}
{"code": "def __random_density_hs(N, rank=None, seed=None):\n    \n    G = __ginibre_matrix(N, rank, seed)\n    G = G.dot(G.conj().T)\n    return G / np.trace(G)", "docstring": "Generate a random density matrix from the Hilbert-Schmidt metric.\n\nArgs:\nN (int): the length of the density matrix.\nrank (int or None): the rank of the density matrix. The default\nvalue is full-rank.\nseed (int): Optional. To set a random seed.\nReturns:\nndarray: rho (N,N  a density matrix.", "source": "juraj-google-style"}
{"code": "def decrypt(key, ciphertext, shift_function=shift_case_english):\n    \n    return [shift_function(key, symbol) for symbol in ciphertext]", "docstring": "Decrypt Shift enciphered ``ciphertext`` using ``key``.\n\nExamples:\n>>> ''.join(decrypt(3, \"KHOOR\"))\nHELLO\n\n>> decrypt(15, [0xcf, 0x9e, 0xaf, 0xe0], shift_bytes)\n[0xde, 0xad, 0xbe, 0xef]\n\nArgs:\nkey (int): The shift to use\nciphertext (iterable): The symbols to decrypt\nshift_function (function (shift, symbol)): Shift function to apply to symbols in the ciphertext\n\nReturns:\nDecrypted ciphertext, list of plaintext symbols", "source": "juraj-google-style"}
{"code": "def matches(self, msg_seq: int, msg: MessageInterface) -> bool:\n    return all((crit.matches(msg_seq, msg) for crit in self.all_criteria))", "docstring": "The message matches if all the defined search key criteria match.\n\nArgs:\nmsg_seq: The message sequence ID.\nmsg: The message object.", "source": "codesearchnet"}
{"code": "def predict(self, data, alpha=0.01, max_iter=2000, **kwargs):\n        \n        edge_model = GraphLasso(alpha=alpha, max_iter=max_iter)\n        edge_model.fit(data.values)\n\n        return nx.relabel_nodes(nx.DiGraph(edge_model.get_precision()),\n                                {idx: i for idx, i in enumerate(data.columns)})", "docstring": "Predict the graph skeleton.\n\nArgs:\ndata (pandas.DataFrame): observational data\nalpha (float): regularization parameter\nmax_iter (int): maximum number of iterations\n\nReturns:\nnetworkx.Graph: Graph skeleton", "source": "juraj-google-style"}
{"code": "def _create_sample_validator(expected_input_keys: Collection[str]) -> Callable[[rd.RepresentativeSample], rd.RepresentativeSample]:\n\n    def validator(sample: rd.RepresentativeSample) -> rd.RepresentativeSample:\n        \n        if not isinstance(sample, Mapping):\n            raise ValueError(f'Invalid representative sample type. Provide a mapping (usually a dict) of {{input_key: input_value}}. Got type: {type(sample)} instead.')\n        if set(sample.keys()) != expected_input_keys:\n            raise KeyError(f'Invalid input keys for representative sample. The function expects input keys of: {set(expected_input_keys)}. Got: {set(sample.keys())}. Please provide correct input keys for representative samples.')\n        return sample\n    return validator", "docstring": "Creates a validator function for a representative sample.\n\nArgs:\nexpected_input_keys: Input keys (keyword argument names) that the function\nthe sample will be used for is expecting to receive.\n\nReturns:\nA callable that validates a `RepresentativeSample`.", "source": "github-repos"}
{"code": "def _validate_iss(claims, issuer=None):\n    \n\n    if issuer is not None:\n        if isinstance(issuer, string_types):\n            issuer = (issuer,)\n        if claims.get('iss') not in issuer:\n            raise JWTClaimsError('Invalid issuer')", "docstring": "Validates that the 'iss' claim is valid.\n\nThe \"iss\" (issuer) claim identifies the principal that issued the\nJWT.  The processing of this claim is generally application specific.\nThe \"iss\" value is a case-sensitive string containing a StringOrURI\nvalue.  Use of this claim is OPTIONAL.\n\nArgs:\nclaims (dict): The claims dictionary to validate.\nissuer (str or iterable): Acceptable value(s) for the issuer that\nsigned the token.", "source": "juraj-google-style"}
{"code": "def _get_required_fn(fn, root_path):\n    if (not fn.startswith(root_path)):\n        raise ValueError('Both paths have to be absolute or local!')\n    replacer = ('/' if root_path.endswith('/') else '')\n    return fn.replace(root_path, replacer, 1)", "docstring": "Definition of the MD5 file requires, that all paths will be absolute\nfor the package directory, not for the filesystem.\n\nThis function converts filesystem-absolute paths to package-absolute paths.\n\nArgs:\nfn (str): Local/absolute path to the file.\nroot_path (str): Local/absolute path to the package directory.\n\nReturns:\nstr: Package-absolute path to the file.\n\nRaises:\nValueError: When `fn` is absolute and `root_path` relative or \\\nconversely.", "source": "codesearchnet"}
{"code": "def get_hours_description(self):\n    expression = self._expression_parts[2]\n    return self.get_segment_description(expression, _('every hour'), (lambda s: self.format_time(s, '0')), (lambda s: _('every {0} hours').format(s)), (lambda s: _('between {0} and {1}')), (lambda s: _('at {0}')))", "docstring": "Generates a description for only the HOUR portion of the expression\n\nReturns:\nThe HOUR description", "source": "codesearchnet"}
{"code": "def pathcase(string):\n    \n    string = snakecase(string)\n    if not string:\n        return string\n    return re.sub(r\"_\", \"/\", string)", "docstring": "Convert string into path case.\nJoin punctuation with slash.\n\nArgs:\nstring: String to convert.\n\nReturns:\nstring: Path cased string.", "source": "juraj-google-style"}
{"code": "def compile_source(self, sourcepath):\n    relpath = os.path.relpath(sourcepath, self.settings.SOURCES_PATH)\n    conditions = {'sourcedir': None, 'nopartial': True, 'exclude_patterns': self.settings.EXCLUDES, 'excluded_libdirs': self.settings.LIBRARY_PATHS}\n    if self.finder.match_conditions(sourcepath, **conditions):\n        destination = self.finder.get_destination(relpath, targetdir=self.settings.TARGET_PATH)\n        self.logger.debug(u'Compile: {}'.format(sourcepath))\n        (success, message) = self.compiler.safe_compile(self.settings, sourcepath, destination)\n        if success:\n            self.logger.info(u'Output: {}'.format(message))\n        else:\n            self.logger.error(message)\n        return (sourcepath, destination)\n    return None", "docstring": "Compile source to its destination\n\nCheck if the source is eligible to compile (not partial and allowed\nfrom exclude patterns)\n\nArgs:\nsourcepath (string): Sass source path to compile to its\ndestination using project settings.\n\nReturns:\ntuple or None: A pair of (sourcepath, destination), if source has\nbeen compiled (or at least tried). If the source was not\neligible to compile, return will be ``None``.", "source": "codesearchnet"}
{"code": "def format_statevector(vec, decimals=None):\n    num_basis = len(vec)\n    vec_complex = np.zeros(num_basis, dtype=complex)\n    for i in range(num_basis):\n        vec_complex[i] = (vec[i][0] + (1j * vec[i][1]))\n    if decimals:\n        vec_complex = np.around(vec_complex, decimals=decimals)\n    return vec_complex", "docstring": "Format statevector coming from the backend to present to the Qiskit user.\n\nArgs:\nvec (list): a list of [re, im] complex numbers.\ndecimals (int): the number of decimals in the statevector.\nIf None, no rounding is done.\n\nReturns:\nlist[complex]: a list of python complex numbers.", "source": "codesearchnet"}
{"code": "def load_tf_sharded_weights_from_safetensors(model, shard_files, ignore_mismatched_sizes=False, strict=False, _prefix=None):\n    unexpected_keys = set()\n    all_missing_keys = []\n    mismatched_keys = set()\n    for shard_file in shard_files:\n        missing_layers, unexpected_layers, mismatched_layers = load_tf_weights_from_safetensors(model, shard_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=_prefix)\n        all_missing_keys.append(set(missing_layers))\n        unexpected_keys.update(unexpected_layers)\n        mismatched_keys.update(mismatched_layers)\n        gc.collect()\n    missing_keys = set.intersection(*all_missing_keys)\n    if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0):\n        error_message = f'Error(s) in loading state_dict for {model.__class__.__name__}'\n        if len(missing_keys) > 0:\n            str_missing_keys = ','.join([f'\"{k}\"' for k in missing_keys])\n            error_message += f'\\nMissing key(s): {str_missing_keys}.'\n        if len(unexpected_keys) > 0:\n            str_unexpected_keys = ','.join([f'\"{k}\"' for k in unexpected_keys])\n            error_message += f'\\nMissing key(s): {str_unexpected_keys}.'\n        raise RuntimeError(error_message)\n    return (missing_keys, unexpected_keys, mismatched_keys)", "docstring": "This is the same as `load_tf_weights_from_safetensors` but for a sharded TF-format safetensors checkpoint.\nDetect missing and unexpected layers and load the TF weights from the shard file accordingly to their names and\nshapes.\n\nThis load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being\nloaded in the model.\n\nArgs:\nmodel (`keras.models.Model`): The model in which to load the checkpoint.\nshard_files (`str` or `os.PathLike`): A list containing the sharded checkpoint names.\nignore_mismatched_sizes`bool`, *optional`, defaults to `True`):\nWhether or not to ignore the mismatch between the sizes\nstrict (`bool`, *optional*, defaults to `True`):\nWhether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint.\n\nReturns:\nThree lists, one for the missing layers, another one for the unexpected layers, and a last one for the\nmismatched layers.", "source": "github-repos"}
{"code": "def attach(cls, transform_job_name, sagemaker_session=None):\n    sagemaker_session = (sagemaker_session or Session())\n    job_details = sagemaker_session.sagemaker_client.describe_transform_job(TransformJobName=transform_job_name)\n    init_params = cls._prepare_init_params_from_job_description(job_details)\n    transformer = cls(sagemaker_session=sagemaker_session, **init_params)\n    transformer.latest_transform_job = _TransformJob(sagemaker_session=sagemaker_session, job_name=init_params['base_transform_job_name'])\n    return transformer", "docstring": "Attach an existing transform job to a new Transformer instance\n\nArgs:\ntransform_job_name (str): Name for the transform job to be attached.\nsagemaker_session (sagemaker.session.Session): Session object which manages interactions with\nAmazon SageMaker APIs and any other AWS services needed. If not specified, one will be created\nusing the default AWS configuration chain.\n\nReturns:\nsagemaker.transformer.Transformer: The Transformer instance with the specified transform job attached.", "source": "codesearchnet"}
{"code": "def _on_receive(self, client, userdata, message):\n        \n\n        topic = message.topic\n        encoded = message.payload\n\n        try:\n            packet = json.loads(encoded)\n        except ValueError:\n            self._logger.warn(\"Could not decode json packet: %s\", encoded)\n            return\n\n        try:\n            seq = packet['sequence']\n            message_data = packet['message']\n        except KeyError:\n            self._logger.warn(\"Message received did not have required sequence and message keys: %s\", packet)\n            return\n\n        \n        \n        if topic not in self.queues:\n            found = False\n            for _, regex, callback, ordered in self.wildcard_queues:\n                if regex.match(topic):\n                    self.queues[topic] = PacketQueue(0, callback, ordered)\n                    found = True\n                    break\n\n            if not found:\n                self._logger.warn(\"Received message for unknown topic: %s\", topic)\n                return\n\n        self.queues[topic].receive(seq, [seq, topic, message_data])", "docstring": "Callback called whenever we receive a message on a subscribed topic\n\nArgs:\nclient (string): The client id of the client receiving the message\nuserdata (string): Any user data set with the underlying MQTT client\nmessage (object): The mesage with a topic and payload.", "source": "juraj-google-style"}
{"code": "def _apply(self, ctx: ExtensionContext) -> AugmentedDict:\n\n    def process(pattern: Pattern[str], _str: str) -> Any:\n        _match = pattern.match(_str)\n        if (_match is None):\n            return _str\n        (placeholder, external_path) = (_match.group(1), _match.group(2))\n        with open(self.locator(external_path, (cast(str, ctx.document) if Validator.is_file(document=ctx.document) else None))) as fhandle:\n            content = fhandle.read()\n        return _str.replace(placeholder, content)\n    (node_key, node_value) = ctx.node\n    _pattern = re.compile(self.__pattern__)\n    return {node_key: process(_pattern, node_value)}", "docstring": "Performs the actual loading of an external resource into the current model.\n\nArgs:\nctx: The processing context.\n\nReturns:\nReturns a dictionary that gets incorporated into the actual model.", "source": "codesearchnet"}
{"code": "def get_item_concept_mapping(self, lang):\n    concepts = self.filter(active=True, lang=lang)\n    return group_keys_by_value_lists(Concept.objects.get_concept_item_mapping(concepts, lang))", "docstring": "Get mapping of items_ids to concepts containing these items\n\nArgs:\nlang (str): language of concepts\n\nReturns:\ndict: item (int) -> set of concepts (int)", "source": "codesearchnet"}
{"code": "def Open(self, file_object):\n    if (not file_object):\n        raise ValueError('Missing file-like object.')\n    file_object.seek(0, os.SEEK_SET)\n    data = file_object.read(len(self._HEADER_SIGNATURE))\n    if (data != self._HEADER_SIGNATURE):\n        file_object.close()\n        raise IOError('Unsupported SQLite database signature.')\n    with tempfile.NamedTemporaryFile(delete=False) as temp_file:\n        self._temp_file_path = temp_file.name\n        while data:\n            temp_file.write(data)\n            data = file_object.read(self._COPY_BUFFER_SIZE)\n    self._connection = sqlite3.connect(self._temp_file_path)\n    self._connection.text_factory = bytes\n    self._cursor = self._connection.cursor()", "docstring": "Opens the database file object.\n\nArgs:\nfile_object (FileIO): file-like object.\n\nRaises:\nIOError: if the SQLite database signature does not match.\nOSError: if the SQLite database signature does not match.\nValueError: if the file-like object is invalid.", "source": "codesearchnet"}
{"code": "def get_all_leaves(self, item_ids=None, language=None, forbidden_item_ids=None):\n        \n        return sorted(set(flatten(self.get_leaves(item_ids, language=language, forbidden_item_ids=forbidden_item_ids).values())))", "docstring": "Get all leaves reachable from the given set of items. Leaves having\ninactive relations to other items are omitted.\n\nArgs:\nitem_ids (list): items which are taken as roots for the reachability\nlanguage (str): if specified, filter out items which are not\navailable in the given language\n\nReturns:\nset: leaf items which are reachable from the given set of items", "source": "juraj-google-style"}
{"code": "def get_angle(v1, v2, units=\"degrees\"):\n    \n    d = np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)\n    d = min(d, 1)\n    d = max(d, -1)\n    angle = math.acos(d)\n    if units == \"degrees\":\n        return math.degrees(angle)\n    elif units == \"radians\":\n        return angle\n    else:\n        raise ValueError(\"Invalid units {}\".format(units))", "docstring": "Calculates the angle between two vectors.\n\nArgs:\nv1: Vector 1\nv2: Vector 2\nunits: \"degrees\" or \"radians\". Defaults to \"degrees\".\n\nReturns:\nAngle between them in degrees.", "source": "juraj-google-style"}
{"code": "def ParseInteger(text, is_signed=False, is_long=False):\n    result = _ParseAbstractInteger(text, is_long=is_long)\n    checker = _INTEGER_CHECKERS[((2 * int(is_long)) + int(is_signed))]\n    checker.CheckValue(result)\n    return result", "docstring": "Parses an integer.\n\nArgs:\ntext: The text to parse.\nis_signed: True if a signed integer must be parsed.\nis_long: True if a long integer must be parsed.\n\nReturns:\nThe integer value.\n\nRaises:\nValueError: Thrown Iff the text is not a valid integer.", "source": "codesearchnet"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_stream = BytearrayStream()\n    if self._unique_identifier:\n        self._unique_identifier.write(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('Invalid struct missing the unique identifier attribute.')\n    if self._cryptographic_parameters:\n        self._cryptographic_parameters.write(local_stream, kmip_version=kmip_version)\n    self.length = local_stream.length()\n    super(MACSignatureKeyInformation, self).write(output_stream, kmip_version=kmip_version)\n    output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the MACSignatureKeyInformation struct to a\nstream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def make_coordinated_read_dataset(self, cluster, num_consumers, sharding_policy=data_service_ops.ShardingPolicy.OFF):\n    if sharding_policy not in [data_service_ops.ShardingPolicy.OFF, data_service_ops.ShardingPolicy.DYNAMIC]:\n        raise ValueError(f'Unsupported sharding policy: {sharding_policy}')\n    ds = dataset_ops.Dataset.from_tensors(math_ops.cast(0, dtypes.int64))\n    ds = ds.concatenate(dataset_ops.Dataset.random())\n\n    def make_group(x):\n        x = x % 2 ** 32\n        return dataset_ops.Dataset.range(x * num_consumers, (x + 1) * num_consumers)\n    ds = ds.flat_map(make_group)\n    consumers = []\n    for consumer_index in range(num_consumers):\n        consumers.append(self.make_distributed_dataset(ds, cluster, job_name='test', processing_mode=sharding_policy, consumer_index=consumer_index, num_consumers=num_consumers))\n    ds = dataset_ops.Dataset.from_tensor_slices(consumers)\n    ds = ds.interleave(lambda x: x, cycle_length=num_consumers, num_parallel_calls=num_consumers)\n    return ds", "docstring": "Creates a dataset that performs coordinated reads.\n\nThe dataset simulates `num_consumers` consumers by using parallel\ninterleave to read with `num_consumers` threads, one for each consumer. The\nnth element of the dataset is produced by consumer `n % num_consumers`.\n\nThe dataset executed on each worker will produce groups of `num_consumers`\nsequentially increasing numbers. For example, if `num_consumers=3` a worker\ndataset could produce [0, 1, 2, 9, 10, 11, 21, 22, 23]. This enables\n`checkCoordinatedReadGroups` below to assess whether the values received in\neach step came from the same group.\n\nArgs:\ncluster: A tf.data service `TestCluster`.\nnum_consumers: The number of consumers to simulate.\nsharding_policy: The sharding policy to use. Currently only OFF and\nDYNAMIC are supported.\n\nReturns:\nA dataset that simulates reading with `num_consumers` consumers.", "source": "github-repos"}
{"code": "def make_hash(self, task):\n    t = [serialize_object(task['func_name'])[0], serialize_object(task['fn_hash'])[0], serialize_object(task['args'])[0], serialize_object(task['kwargs'])[0], serialize_object(task['env'])[0]]\n    x = b''.join(t)\n    hashedsum = hashlib.md5(x).hexdigest()\n    return hashedsum", "docstring": "Create a hash of the task inputs.\n\nThis uses a serialization library borrowed from ipyparallel.\nIf this fails here, then all ipp calls are also likely to fail due to failure\nat serialization.\n\nArgs:\n- task (dict) : Task dictionary from dfk.tasks\n\nReturns:\n- hash (str) : A unique hash string", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n        \n        self.GetGroup = channel.unary_unary(\n            \"/google.devtools.clouderrorreporting.v1beta1.ErrorGroupService/GetGroup\",\n            request_serializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_error__group__service__pb2.GetGroupRequest.SerializeToString,\n            response_deserializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_common__pb2.ErrorGroup.FromString,\n        )\n        self.UpdateGroup = channel.unary_unary(\n            \"/google.devtools.clouderrorreporting.v1beta1.ErrorGroupService/UpdateGroup\",\n            request_serializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_error__group__service__pb2.UpdateGroupRequest.SerializeToString,\n            response_deserializer=google_dot_devtools_dot_clouderrorreporting__v1beta1_dot_proto_dot_common__pb2.ErrorGroup.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def sketch_fasta(fasta_path, outdir):\n    genome_name = genome_name_from_fasta_path(fasta_path)\n    outpath = os.path.join(outdir, genome_name)\n    args = ['mash', 'sketch', '-o', outpath, fasta_path]\n    logging.info('Running Mash sketch with command: %s', ' '.join(args))\n    p = Popen(args)\n    p.wait()\n    sketch_path = (outpath + '.msh')\n    assert os.path.exists(sketch_path), 'Mash sketch for genome {} was not created at {}'.format(genome_name, sketch_path)\n    return sketch_path", "docstring": "Create a Mash sketch from an input fasta file\n\nArgs:\nfasta_path (str): input fasta file path. Genome name in fasta filename\noutdir (str): output directory path to write Mash sketch file to\n\nReturns:\nstr: output Mash sketch file path", "source": "codesearchnet"}
{"code": "def distort_color(image, thread_id=0, scope=None):\n  \n  with tf.name_scope(values=[image], name=scope, default_name='distort_color'):\n    color_ordering = thread_id % 2\n\n    if color_ordering == 0:\n      image = tf.image.random_brightness(image, max_delta=32. / 255.)\n      image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n      image = tf.image.random_hue(image, max_delta=0.2)\n      image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n    elif color_ordering == 1:\n      image = tf.image.random_brightness(image, max_delta=32. / 255.)\n      image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n      image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n      image = tf.image.random_hue(image, max_delta=0.2)\n\n    \n    image = tf.clip_by_value(image, 0.0, 1.0)\n    return image", "docstring": "Distort the color of the image.\n\nEach color distortion is non-commutative and thus ordering of the color ops\nmatters. Ideally we would randomly permute the ordering of the color ops.\nRather then adding that level of complication, we select a distinct ordering\nof color ops for each preprocessing thread.\n\nArgs:\nimage: Tensor containing single image.\nthread_id: preprocessing thread ID.\nscope: Optional scope for name_scope.\nReturns:\ncolor-distorted image", "source": "juraj-google-style"}
{"code": "def ParseRecord(self, parser_mediator, key, structure):\n    if (key not in ('log_entry', 'log_entry_at_end', 'log_entry_offset', 'log_entry_offset_at_end')):\n        raise errors.ParseError('Unable to parse record, unknown structure: {0:s}'.format(key))\n    try:\n        date_time_string = self._GetISO8601String(structure)\n    except ValueError as exception:\n        parser_mediator.ProduceExtractionWarning('unable to determine date time string with error: {0!s}'.format(exception))\n    fraction_of_second_length = len(structure.fraction_of_second)\n    if (fraction_of_second_length == 3):\n        date_time = dfdatetime_time_elements.TimeElementsInMilliseconds()\n    elif (fraction_of_second_length in (6, 7)):\n        date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()\n    try:\n        date_time.CopyFromStringISO8601(date_time_string)\n    except ValueError as exception:\n        parser_mediator.ProduceExtractionWarning('unable to parse date time value: {0:s} with error: {1!s}'.format(date_time_string, exception))\n        return\n    event_data = SCCMLogEventData()\n    event_data.component = structure.component\n    event_data.offset = 0\n    event_data.text = structure.text\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parse the record and return an SCCM log event object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nkey (str): name of the parsed structure.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.\n\nRaises:\nParseError: when the structure type is unknown.", "source": "codesearchnet"}
{"code": "def find_local_maxima(self, input_grid):\n    (pixels, q_data) = self.quantize(input_grid)\n    centers = OrderedDict()\n    for p in pixels.keys():\n        centers[p] = []\n    marked = (np.ones(q_data.shape, dtype=int) * self.UNMARKED)\n    MIN_INFL = int(np.round((1 + (0.5 * np.sqrt(self.max_size)))))\n    MAX_INFL = (2 * MIN_INFL)\n    marked_so_far = []\n    for b in sorted(pixels.keys(), reverse=True):\n        infl_dist = (MIN_INFL + int(np.round(((float(b) / self.max_bin) * (MAX_INFL - MIN_INFL)))))\n        for p in pixels[b]:\n            if (marked[p] == self.UNMARKED):\n                ok = False\n                del marked_so_far[:]\n                for ((i, j), v) in np.ndenumerate(marked[((p[0] - infl_dist):((p[0] + infl_dist) + 1), (p[1] - infl_dist):((p[1] + infl_dist) + 1))]):\n                    if (v == self.UNMARKED):\n                        ok = True\n                        marked[(((i - infl_dist) + p[0]), ((j - infl_dist) + p[1]))] = b\n                        marked_so_far.append((((i - infl_dist) + p[0]), ((j - infl_dist) + p[1])))\n                    else:\n                        ok = False\n                        break\n                if ok:\n                    centers[b].append(p)\n                else:\n                    for m in marked_so_far:\n                        marked[m] = self.UNMARKED\n    marked[(:, :)] = self.UNMARKED\n    deferred_from_last = []\n    deferred_to_next = []\n    for delta in range(0, (self.delta + 1)):\n        for b in sorted(centers.keys(), reverse=True):\n            bin_lower = (b - delta)\n            deferred_from_last[:] = deferred_to_next[:]\n            del deferred_to_next[:]\n            foothills = []\n            n_centers = len(centers[b])\n            tot_centers = (n_centers + len(deferred_from_last))\n            for i in range(tot_centers):\n                if (i < n_centers):\n                    center = centers[b][i]\n                else:\n                    center = deferred_from_last[(i - n_centers)]\n                if (bin_lower < 0):\n                    bin_lower = 0\n                if (marked[center] == self.UNMARKED):\n                    captured = self.set_maximum(q_data, marked, center, bin_lower, foothills)\n                    if (not captured):\n                        deferred_to_next.append(center)\n                    else:\n                        pass\n            self.remove_foothills(q_data, marked, b, bin_lower, centers, foothills)\n        del deferred_from_last[:]\n        del deferred_to_next[:]\n    return marked", "docstring": "Finds the local maxima in the inputGrid and perform region growing to identify objects.\n\nArgs:\ninput_grid: Raw input data.\n\nReturns:\narray with labeled objects.", "source": "codesearchnet"}
{"code": "def compute_weighted_loss(losses, sample_weight=None, reduction=ReductionV2.SUM_OVER_BATCH_SIZE, name=None):\n    ReductionV2.validate(reduction)\n    if reduction == ReductionV2.AUTO:\n        reduction = ReductionV2.SUM_OVER_BATCH_SIZE\n    if sample_weight is None:\n        sample_weight = 1.0\n    with backend.name_scope(name or 'weighted_loss'):\n        ops.get_default_graph()._last_loss_reduction = reduction\n        if not isinstance(losses, (keras_tensor.KerasTensor, ragged_tensor.RaggedTensor)):\n            losses = tensor_conversion.convert_to_tensor_v2_with_dispatch(losses)\n        input_dtype = losses.dtype\n        if not isinstance(sample_weight, keras_tensor.KerasTensor):\n            sample_weight = tensor_conversion.convert_to_tensor_v2_with_dispatch(sample_weight)\n        losses = math_ops.cast(losses, 'float32')\n        sample_weight = math_ops.cast(sample_weight, 'float32')\n        losses, _, sample_weight = squeeze_or_expand_dimensions(losses, None, sample_weight)\n        weighted_losses = math_ops.multiply(losses, sample_weight)\n        loss = reduce_weighted_loss(weighted_losses, reduction)\n        loss = math_ops.cast(loss, input_dtype)\n        return loss", "docstring": "Computes the weighted loss.\n\nArgs:\nlosses: `Tensor` of shape `[batch_size, d1, ... dN]`.\nsample_weight: Optional `Tensor` whose rank is either 0, or the same rank as\n`losses`, or be broadcastable to `losses`.\nreduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.\nDefault value is `SUM_OVER_BATCH_SIZE`.\nname: Optional name for the op.\n\nRaises:\nValueError: If the shape of `sample_weight` is not compatible with `losses`.\n\nReturns:\nWeighted loss `Tensor` of the same type as `losses`. If `reduction` is\n`NONE`, this has the same shape as `losses`; otherwise, it is scalar.", "source": "github-repos"}
{"code": "def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5, from_sorted_ids=False):\n    reshape_matches = False\n    y_pred = ops.convert_to_tensor(y_pred)\n    y_true_dtype = y_pred.dtype if from_sorted_ids else 'int32'\n    y_true = ops.convert_to_tensor(y_true, dtype=y_true_dtype)\n    y_true_rank = len(y_true.shape)\n    y_pred_rank = len(y_pred.shape)\n    y_true_org_shape = ops.shape(y_true)\n    if y_true_rank is not None and y_pred_rank is not None:\n        if y_pred_rank > 2:\n            y_pred = ops.reshape(y_pred, [-1, y_pred.shape[-1]])\n        if y_true_rank > 1:\n            reshape_matches = True\n            y_true = ops.reshape(y_true, [-1])\n    if from_sorted_ids:\n        matches = ops.any(ops.equal(ops.expand_dims(y_true, axis=1), y_pred[:, :k]), axis=1)\n    else:\n        matches = ops.in_top_k(y_true, y_pred, k=k)\n    matches = ops.cast(matches, dtype=backend.floatx())\n    if reshape_matches:\n        matches = ops.reshape(matches, y_true_org_shape)\n    return matches", "docstring": "Computes how often integer targets are in the top `K` predictions.\n\nArgs:\ny_true: A tensor of shape `(batch_size)` representing indices or IDs of\ntrue categories.\ny_pred: If `from_sorted_ids=False`, a tensor of shape\n`(batch_size, num_categories)` containing the scores for each sample\nfor all possible categories. If `from_sorted_ids=True`, a tensor of\nshape `(batch_size, N)` containing indices or IDs of the top `N`\ncategories in order from highest score to lowest score.\nk: (Optional) Number of top elements to look at for computing accuracy.\nDefaults to `5`.\nfrom_sorted_ids: (Optional) Whether `y_pred` is sorted category IDs or\nscores for all categories (the default).\n\nReturns:\nA tensor with the same shape as `y_true` containing ones where `y_true`\nis in the top `k` and zeros elsewhere.", "source": "github-repos"}
{"code": "def load_install_json(self, filename=None):\n    if (filename is None):\n        filename = 'install.json'\n    file_fqpn = os.path.join(self.app_path, filename)\n    install_json = None\n    if os.path.isfile(file_fqpn):\n        try:\n            with open(file_fqpn, 'r') as fh:\n                install_json = json.load(fh)\n        except ValueError as e:\n            self.handle_error('Failed to load \"{}\" file ({}).'.format(file_fqpn, e))\n    else:\n        self.handle_error('File \"{}\" could not be found.'.format(file_fqpn))\n    return install_json", "docstring": "Return install.json data.\n\nArgs:\nfilename (str, optional): Defaults to None. The install.json filename (for bundled\nApps).\n\nReturns:\ndict: The contents of the install.json file.", "source": "codesearchnet"}
{"code": "def is_supported(cls, file=None, request=None, response=None, url_info=None):\n    tests = ((response, cls.is_response), (file, cls.is_file), (request, cls.is_request), (url_info, cls.is_url))\n    for (instance, method) in tests:\n        if instance:\n            try:\n                result = method(instance)\n            except NotImplementedError:\n                pass\n            else:\n                if result:\n                    return True\n                elif (result is VeryFalse):\n                    return VeryFalse", "docstring": "Given the hints, return whether the document is supported.\n\nArgs:\nfile: A file object containing the document.\nrequest (:class:`.http.request.Request`): An HTTP request.\nresponse (:class:`.http.request.Response`): An HTTP response.\nurl_info (:class:`.url.URLInfo`): A URLInfo.\n\nReturns:\nbool: If True, the reader should be able to read it.", "source": "codesearchnet"}
{"code": "def get_named_parent(decl):\n    if (not decl):\n        return None\n    parent = decl.parent\n    while (parent and ((not parent.name) or (parent.name == '::'))):\n        parent = parent.parent\n    return parent", "docstring": "Returns a reference to a named parent declaration.\n\nArgs:\ndecl (declaration_t): the child declaration\n\nReturns:\ndeclaration_t: the declaration or None if not found.", "source": "codesearchnet"}
{"code": "def unlock(self):\n    if (not unlockers.unlock(self, self._device.manufacturer)):\n        raise errors.JLinkException('Failed to unlock device.')\n    return True", "docstring": "Unlocks the device connected to the J-Link.\n\nUnlocking a device allows for access to read/writing memory, as well as\nflash programming.\n\nNote:\nUnlock is not supported on all devices.\n\nSupported Devices:\nKinetis\n\nReturns:\n``True``.\n\nRaises:\nJLinkException: if the device fails to unlock.", "source": "codesearchnet"}
{"code": "def is_file(self, follow_symlinks=True):\n        \n        return self._system.isfile(\n            path=self._path, client_kwargs=self._client_kwargs)", "docstring": "Return True if this entry is a file or a symbolic link pointing to a\nfile; return False if the entry is or points to a directory or other\nnon-file entry, or if it doesn’t exist anymore.\n\nThe result is cached on the os.DirEntry object.\n\nArgs:\nfollow_symlinks (bool): Follow symlinks.\nNot supported on cloud storage objects.\n\nReturns:\nbool: True if directory exists.", "source": "juraj-google-style"}
{"code": "def _ensure_list(tensor_or_list):\n  \n  if isinstance(tensor_or_list, (list, tuple)):\n    return list(tensor_or_list), True\n  return [tensor_or_list], False", "docstring": "Converts the input arg to a list if it is not a list already.\n\nArgs:\ntensor_or_list: A `Tensor` or a Python list of `Tensor`s. The argument to\nconvert to a list of `Tensor`s.\n\nReturns:\nA tuple of two elements. The first is a Python list of `Tensor`s containing\nthe original arguments. The second is a boolean indicating whether\nthe original argument was a list or tuple already.", "source": "juraj-google-style"}
{"code": "def assert_input_compatibility(input_spec, inputs, layer_name):\n    if not input_spec:\n        return\n    input_spec = nest.flatten(input_spec)\n    if isinstance(inputs, dict):\n        names = [spec.name for spec in input_spec]\n        if all(names):\n            list_inputs = []\n            for name in names:\n                if name not in inputs:\n                    raise ValueError('Missing data for input \"%s\". You passed a data dictionary with keys %s. Expected the following keys: %s' % (name, list(inputs.keys()), names))\n                list_inputs.append(inputs[name])\n            inputs = list_inputs\n    inputs = nest.flatten(inputs)\n    for x in inputs:\n        if not hasattr(x, 'shape'):\n            raise TypeError('Inputs to a layer should be tensors. Got: %s' % (x,))\n    if len(inputs) != len(input_spec):\n        raise ValueError('Layer ' + layer_name + ' expects ' + str(len(input_spec)) + ' input(s), but it received ' + str(len(inputs)) + ' input tensors. Inputs received: ' + str(inputs))\n    for input_index, (x, spec) in enumerate(zip(inputs, input_spec)):\n        if spec is None:\n            continue\n        shape = tensor_shape.TensorShape(x.shape)\n        if shape.rank is None:\n            return\n        if spec.ndim is not None and (not spec.allow_last_axis_squeeze):\n            ndim = shape.rank\n            if ndim != spec.ndim:\n                raise ValueError('Input ' + str(input_index) + ' of layer ' + layer_name + ' is incompatible with the layer: expected ndim=' + str(spec.ndim) + ', found ndim=' + str(ndim) + '. Full shape received: ' + str(tuple(shape)))\n        if spec.max_ndim is not None:\n            ndim = x.shape.rank\n            if ndim is not None and ndim > spec.max_ndim:\n                raise ValueError('Input ' + str(input_index) + ' of layer ' + layer_name + ' is incompatible with the layer: expected max_ndim=' + str(spec.max_ndim) + ', found ndim=' + str(ndim))\n        if spec.min_ndim is not None:\n            ndim = x.shape.rank\n            if ndim is not None and ndim < spec.min_ndim:\n                raise ValueError('Input ' + str(input_index) + ' of layer ' + layer_name + ' is incompatible with the layer: : expected min_ndim=' + str(spec.min_ndim) + ', found ndim=' + str(ndim) + '. Full shape received: ' + str(tuple(shape)))\n        if spec.dtype is not None:\n            if x.dtype.name != spec.dtype:\n                raise ValueError('Input ' + str(input_index) + ' of layer ' + layer_name + ' is incompatible with the layer: expected dtype=' + str(spec.dtype) + ', found dtype=' + str(x.dtype))\n        shape_as_list = shape.as_list()\n        if spec.axes:\n            for axis, value in spec.axes.items():\n                if hasattr(value, 'value'):\n                    value = value.value\n                if value is not None and shape_as_list[int(axis)] not in {value, None}:\n                    raise ValueError('Input ' + str(input_index) + ' of layer ' + layer_name + ' is incompatible with the layer: expected axis ' + str(axis) + ' of input shape to have value ' + str(value) + ' but received input with shape ' + display_shape(x.shape))\n        if spec.shape is not None and shape.rank is not None:\n            spec_shape = spec.shape\n            if spec.allow_last_axis_squeeze:\n                if shape_as_list and shape_as_list[-1] == 1:\n                    shape_as_list = shape_as_list[:-1]\n                if spec_shape and spec_shape[-1] == 1:\n                    spec_shape = spec_shape[:-1]\n            for spec_dim, dim in zip(spec_shape, shape_as_list):\n                if spec_dim is not None and dim is not None:\n                    if spec_dim != dim:\n                        raise ValueError('Input ' + str(input_index) + ' is incompatible with layer ' + layer_name + ': expected shape=' + str(spec.shape) + ', found shape=' + display_shape(x.shape))", "docstring": "Checks compatibility between the layer and provided inputs.\n\nThis checks that the tensor(s) `inputs` verify the input assumptions\nof a layer (if any). If not, a clear and actional exception gets raised.\n\nArgs:\ninput_spec: An InputSpec instance, list of InputSpec instances, a nested\nstructure of InputSpec instances, or None.\ninputs: Input tensor, list of input tensors, or a nested structure of\ninput tensors.\nlayer_name: String, name of the layer (for error message formatting).\n\nRaises:\nValueError: in case of mismatch between\nthe provided inputs and the expectations of the layer.", "source": "github-repos"}
{"code": "def macro_state(self, micro_state):\n    assert (len(micro_state) == len(self.micro_indices))\n    reindexed = self.reindex()\n    micro_state = np.array(micro_state)\n    return tuple(((0 if (sum(micro_state[list(reindexed.partition[i])]) in self.grouping[i][0]) else 1) for i in self.macro_indices))", "docstring": "Translate a micro state to a macro state\n\nArgs:\nmicro_state (tuple[int]): The state of the micro nodes in this\ncoarse-graining.\n\nReturns:\ntuple[int]: The state of the macro system, translated as specified\nby this coarse-graining.\n\nExample:\n>>> coarse_grain = CoarseGrain(((1, 2),), (((0,), (1, 2)),))\n>>> coarse_grain.macro_state((0, 0))\n(0,)\n>>> coarse_grain.macro_state((1, 0))\n(1,)\n>>> coarse_grain.macro_state((1, 1))\n(1,)", "source": "codesearchnet"}
{"code": "def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3):\n    if (kmip_version < enums.KMIPVersion.KMIP_1_3):\n        raise exceptions.VersionNotSupported('KMIP {} does not support the CapabilityInformation object.'.format(kmip_version.value))\n    local_buffer = BytearrayStream()\n    if self._streaming_capability:\n        self._streaming_capability.write(local_buffer, kmip_version=kmip_version)\n    if self._asynchronous_capability:\n        self._asynchronous_capability.write(local_buffer, kmip_version=kmip_version)\n    if self._attestation_capability:\n        self._attestation_capability.write(local_buffer, kmip_version=kmip_version)\n    if (kmip_version >= enums.KMIPVersion.KMIP_1_4):\n        if self._batch_undo_capability:\n            self._batch_undo_capability.write(local_buffer, kmip_version=kmip_version)\n        if self._batch_continue_capability:\n            self._batch_continue_capability.write(local_buffer, kmip_version=kmip_version)\n    if self._unwrap_mode:\n        self._unwrap_mode.write(local_buffer, kmip_version=kmip_version)\n    if self._destroy_action:\n        self._destroy_action.write(local_buffer, kmip_version=kmip_version)\n    if self._shredding_algorithm:\n        self._shredding_algorithm.write(local_buffer, kmip_version=kmip_version)\n    if self._rng_mode:\n        self._rng_mode.write(local_buffer, kmip_version=kmip_version)\n    self.length = local_buffer.length()\n    super(CapabilityInformation, self).write(output_buffer, kmip_version=kmip_version)\n    output_buffer.write(local_buffer.buffer)", "docstring": "Write the CapabilityInformation structure encoding to the data stream.\n\nArgs:\noutput_buffer (stream): A data stream in which to encode\nCapabilityInformation structure data, supporting a write\nmethod.\nkmip_version (enum): A KMIPVersion enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 2.0.\n\nRaises:\nVersionNotSupported: Raised when a KMIP version is provided that\ndoes not support the CapabilityInformation structure.", "source": "codesearchnet"}
{"code": "def disambiguate_text(self, text, language=None, entities=None):\n        \n\n        body = {\n            \"text\": text,\n            \"entities\": [],\n            \"onlyNER\": \"false\",\n            \"customisation\": \"generic\"\n        }\n\n        if language:\n            body['language'] = {\"lang\": language}\n\n        if entities:\n            body['entities'] = entities\n\n        result, status_code = self._process_query(body)\n\n        if status_code != 200:\n            logger.debug('Disambiguation failed.')\n\n        return result, status_code", "docstring": "Call the disambiguation service in order to get meanings.\n\nArgs:\ntext (str): Text to be disambiguated.\nlanguage (str): language of text (if known)\nentities (list): list of entities or mentions to be supplied by\nthe user.\n\nReturns:\ndict, int: API response and API status.", "source": "juraj-google-style"}
{"code": "def generate_nearest_neighbour_lookup_table(self):\n    self.jump_probability = {}\n    for site_label_1 in self.connected_site_pairs:\n        self.jump_probability[site_label_1] = {}\n        for site_label_2 in self.connected_site_pairs[site_label_1]:\n            self.jump_probability[site_label_1][site_label_2] = {}\n            for coordination_1 in range(self.max_coordination_per_site[site_label_1]):\n                self.jump_probability[site_label_1][site_label_2][coordination_1] = {}\n                for coordination_2 in range(1, (self.max_coordination_per_site[site_label_2] + 1)):\n                    self.jump_probability[site_label_1][site_label_2][coordination_1][coordination_2] = self.relative_probability(site_label_1, site_label_2, coordination_1, coordination_2)", "docstring": "Construct a look-up table of relative jump probabilities for a nearest-neighbour interaction Hamiltonian.\n\nArgs:\nNone.\n\nReturns:\nNone.", "source": "codesearchnet"}
{"code": "def wait_until_page_ready(page_object, timeout=WTF_TIMEOUT_MANAGER.NORMAL):\n        \n        try:\n            do_until(lambda: page_object.webdriver.execute_script(\"return document.readyState\").lower()\n                     == 'complete', timeout)\n        except wait_utils.OperationTimeoutError:\n            raise PageUtilOperationTimeoutError(\n                \"Timeout occurred while waiting for page to be ready.\")", "docstring": "Waits until document.readyState == Complete (e.g. ready to execute javascript commands)\n\nArgs:\npage_object (PageObject) : PageObject class\n\nKwargs:\ntimeout (number) : timeout period", "source": "juraj-google-style"}
{"code": "def infer_from_frame_stack(self, ob_stack):\n    (logits, vf) = self.sess.run([self.logits_t, self.value_function_t], feed_dict={self.obs_t: ob_stack})\n    return (logits, vf)", "docstring": "Infer policy from stack of observations.\n\nArgs:\nob_stack: array of shape (1, frame_stack_size, height, width, channels)\n\nReturns:\nlogits and vf.", "source": "codesearchnet"}
{"code": "def update(self, measurement, measurement_matrix):\n        \n        \n        measurement_matrix = np.atleast_2d(measurement_matrix)\n        expected_meas_mat_shape = (measurement.mean.shape[0], self.state_length)\n        if measurement_matrix.shape != expected_meas_mat_shape:\n            raise ValueError(\"Measurement matrix is wrong shape ({}). \" \\\n                    \"Expected: {}\".format(\n                        measurement_matrix.shape, expected_meas_mat_shape))\n\n        \n        self.measurements[-1].append(measurement)\n        self.measurement_matrices[-1].append(measurement_matrix)\n\n        \n        \n        prior = self.posterior_state_estimates[-1]\n\n        \n        innovation = measurement.mean - measurement_matrix.dot(prior.mean)\n        innovation_cov = measurement_matrix.dot(prior.cov).dot(\n            measurement_matrix.T)\n        innovation_cov += measurement.cov\n        kalman_gain = prior.cov.dot(measurement_matrix.T).dot(\n            np.linalg.inv(innovation_cov))\n\n        \n        post = self.posterior_state_estimates[-1]\n        self.posterior_state_estimates[-1] = MultivariateNormal(\n            mean=post.mean + kalman_gain.dot(innovation),\n            cov=post.cov - kalman_gain.dot(measurement_matrix).dot(prior.cov)\n        )", "docstring": "After each :py:meth:`predict`, this method may be called repeatedly to\nprovide additional measurements for each time step.\n\nArgs:\nmeasurement (MultivariateNormal): Measurement for this\ntime step with specified mean and covariance.\nmeasurement_matrix (array): Measurement matrix for this measurement.", "source": "juraj-google-style"}
{"code": "def find(self, predicate, first_n=0, device_name=None, exclude_node_names=None):\n    if exclude_node_names:\n        exclude_node_names = re.compile(exclude_node_names)\n    matched_data = []\n    for device in self._dump_tensor_data if device_name is None else (self._dump_tensor_data[device_name],):\n        for datum in self._dump_tensor_data[device]:\n            if exclude_node_names and exclude_node_names.match(datum.node_name):\n                continue\n            if predicate(datum, datum.get_tensor()):\n                matched_data.append(datum)\n                if first_n > 0 and len(matched_data) >= first_n:\n                    return matched_data\n    return matched_data", "docstring": "Find dumped tensor data by a certain predicate.\n\nArgs:\npredicate: A callable that takes two input arguments:\n\n```python\ndef predicate(debug_tensor_datum, tensor):\n# returns a bool\n```\n\nwhere `debug_tensor_datum` is an instance of `DebugTensorDatum`, which\ncarries the metadata, such as the `Tensor`'s node name, output slot\ntimestamp, debug op name, etc.; and `tensor` is the dumped tensor value\nas a `numpy.ndarray`.\nfirst_n: (`int`) return only the first n `DebugTensotDatum` instances (in\ntime order) for which the predicate returns True. To return all the\n`DebugTensotDatum` instances, let first_n be <= 0.\ndevice_name: optional device name.\nexclude_node_names: Optional regular expression to exclude nodes with\nnames matching the regular expression.\n\nReturns:\nA list of all `DebugTensorDatum` objects in this `DebugDumpDir` object\nfor which predicate returns True, sorted in ascending order of the\ntimestamp.", "source": "github-repos"}
{"code": "def select_copula(cls, X):\n        \n        frank = Bivariate(CopulaTypes.FRANK)\n        frank.fit(X)\n\n        if frank.tau <= 0:\n            selected_theta = frank.theta\n            selected_copula = CopulaTypes.FRANK\n            return selected_copula, selected_theta\n\n        copula_candidates = [frank]\n        theta_candidates = [frank.theta]\n\n        try:\n            clayton = Bivariate(CopulaTypes.CLAYTON)\n            clayton.fit(X)\n            copula_candidates.append(clayton)\n            theta_candidates.append(clayton.theta)\n        except ValueError:\n            \n            pass\n\n        try:\n            gumbel = Bivariate(CopulaTypes.GUMBEL)\n            gumbel.fit(X)\n            copula_candidates.append(gumbel)\n            theta_candidates.append(gumbel.theta)\n        except ValueError:\n            \n            pass\n\n        z_left, L, z_right, R = cls.compute_empirical(X)\n        left_dependence, right_dependence = cls.get_dependencies(\n            copula_candidates, z_left, z_right)\n\n        \n        cost_L = [np.sum((L - l) ** 2) for l in left_dependence]\n        cost_R = [np.sum((R - r) ** 2) for r in right_dependence]\n        cost_LR = np.add(cost_L, cost_R)\n\n        selected_copula = np.argmax(cost_LR)\n        selected_theta = theta_candidates[selected_copula]\n        return CopulaTypes(selected_copula), selected_theta", "docstring": "Select best copula function based on likelihood.\n\nArgs:\nX: 2-dimensional `np.ndarray`\n\nReturns:\ntuple: `tuple(CopulaType, float)` best fit and model param.", "source": "juraj-google-style"}
{"code": "def are_equal_elements(a_el, b_el):\n    if (a_el.tagName != b_el.tagName):\n        return False\n    if (sorted(a_el.attributes.items()) != sorted(b_el.attributes.items())):\n        return False\n    if (len(a_el.childNodes) != len(b_el.childNodes)):\n        return False\n    for (a_child_el, b_child_el) in zip(a_el.childNodes, b_el.childNodes):\n        if (a_child_el.nodeType != b_child_el.nodeType):\n            return False\n        if ((a_child_el.nodeType == a_child_el.TEXT_NODE) and (a_child_el.data != b_child_el.data)):\n            return False\n        if ((a_child_el.nodeType == a_child_el.ELEMENT_NODE) and (not are_equal_elements(a_child_el, b_child_el))):\n            return False\n    return True", "docstring": "Normalize and compare ElementTrees for equality.\n\nArgs:\na_el: ElementTree\nb_el: ElementTree\nElementTrees to compare for equality.\n\nReturns:\nbool: ``True`` if the ElementTrees are semantically equivalent.", "source": "codesearchnet"}
{"code": "def get_unspent_outputs(self):\n    cursor = backend.query.get_unspent_outputs(self.connection)\n    return (record for record in cursor)", "docstring": "Get the utxoset.\n\nReturns:\ngenerator of unspent_outputs.", "source": "codesearchnet"}
{"code": "def pull(self, project, run=None, entity=None):\n        \n        project, run = self.parse_slug(project, run=run)\n        urls = self.download_urls(project, run, entity)\n        responses = []\n        for fileName in urls:\n            _, response = self.download_write_file(urls[fileName])\n            if response:\n                responses.append(response)\n\n        return responses", "docstring": "Download files from W&B\n\nArgs:\nproject (str): The project to download\nrun (str, optional): The run to upload to\nentity (str, optional): The entity to scope this project to.  Defaults to wandb models\n\nReturns:\nThe requests library response object", "source": "juraj-google-style"}
{"code": "def obtain_all_variant_tensor_ops(dataset):\n    return _traverse(dataset, lambda op: op.outputs[0].dtype == dtypes.variant)", "docstring": "Given an input dataset, finds all dataset ops used for construction.\n\nA series of transformations would have created this dataset with each\ntransformation including zero or more Dataset ops, each producing a dataset\nvariant tensor. This method outputs all of them.\n\nArgs:\ndataset: Dataset to find variant tensors for.\n\nReturns:\nA list of variant_tensor producing dataset ops used to construct this\ndataset.", "source": "github-repos"}
{"code": "def download_supplementary_files(self, directory='./', download_sra=True, email=None, sra_kwargs=None):\n    directory_path = os.path.abspath(os.path.join(directory, ('%s_%s_%s' % ('Supp', self.get_accession(), re.sub('[\\\\s\\\\*\\\\?\\\\(\\\\),\\\\.;]', '_', self.metadata['title'][0])))))\n    utils.mkdir_p(os.path.abspath(directory_path))\n    downloaded_paths = dict()\n    if (sra_kwargs is None):\n        sra_kwargs = {}\n    blacklist = ('NONE',)\n    for (metakey, metavalue) in iteritems(self.metadata):\n        if ('supplementary_file' in metakey):\n            assert ((len(metavalue) == 1) and (metavalue != ''))\n            if (metavalue[0] in blacklist):\n                logger.warn((\"%s value is blacklisted as '%s' - skipping\" % (metakey, metavalue[0])))\n                continue\n            if ('sra' not in metavalue[0]):\n                download_path = os.path.abspath(os.path.join(directory, os.path.join(directory_path, metavalue[0].split('/')[(- 1)])))\n                try:\n                    utils.download_from_url(metavalue[0], download_path)\n                    downloaded_paths[metavalue[0]] = download_path\n                except Exception as err:\n                    logger.error(('Cannot download %s supplementary file (%s)' % (self.get_accession(), err)))\n    if download_sra:\n        try:\n            downloaded_files = self.download_SRA(email, directory=directory, **sra_kwargs)\n            downloaded_paths.update(downloaded_files)\n        except Exception as err:\n            logger.error(('Cannot download %s SRA file (%s)' % (self.get_accession(), err)))\n    return downloaded_paths", "docstring": "Download all supplementary data available for the sample.\n\nArgs:\ndirectory (:obj:`str`): Directory to download the data (in this directory\nfunction will create new directory with the files).\nDefaults to \"./\".\ndownload_sra (:obj:`bool`): Indicates whether to download SRA raw\ndata too. Defaults to True.\nemail (:obj:`str`): E-mail that will be provided to the Entrez.\nIt is mandatory if download_sra=True. Defaults to None.\nsra_kwargs (:obj:`dict`, optional): Kwargs passed to the\ndownload_SRA method. Defaults to None.\n\nReturns:\n:obj:`dict`: A key-value pair of name taken from the metadata and\npaths downloaded, in the case of SRA files the key is ``SRA``.", "source": "codesearchnet"}
{"code": "def make_iaf_stack(total_event_size,\n                   num_hidden_layers=2,\n                   seed=None,\n                   dtype=tf.float32):\n  \n\n  seed = tfd.SeedStream(seed, 'make_iaf_stack')\n\n  def make_iaf():\n    \n    initializer = tf.compat.v2.keras.initializers.VarianceScaling(\n        2 * 0.01, seed=seed() % (2**31 - 1))\n\n    made = tfb.AutoregressiveLayer(\n        params=2,\n        event_shape=[total_event_size],\n        hidden_units=[total_event_size] * num_hidden_layers,\n        activation=tf.nn.elu,\n        kernel_initializer=initializer,\n        dtype=dtype)\n\n    def shift_and_scale(x):\n      \n      x.set_shape(\n          x.shape.merge_with([None] * (x.shape.ndims - 1) + [total_event_size]))\n      return tf.unstack(made(x), num=2, axis=-1)\n\n    return tfb.Invert(tfb.MaskedAutoregressiveFlow(shift_and_scale))\n\n  def make_swap():\n    \n    permutation = list(reversed(range(total_event_size)))\n    return tfb.Permute(permutation)\n\n  bijector = make_iaf()\n  bijector = make_swap()(bijector)\n  bijector = make_iaf()(bijector)\n  bijector = make_swap()(bijector)\n  bijector = make_iaf()(bijector)\n  bijector = make_swap()(bijector)\n\n  return bijector", "docstring": "Creates an stacked IAF bijector.\n\nThis bijector operates on vector-valued events.\n\nArgs:\ntotal_event_size: Number of dimensions to operate over.\nnum_hidden_layers: How many hidden layers to use in each IAF.\nseed: Random seed for the initializers.\ndtype: DType for the variables.\n\nReturns:\nbijector: The created bijector.", "source": "juraj-google-style"}
{"code": "def addColumn(self, header, values=[]):\n    if (len(values) == 0):\n        self._impl.addColumn(header)\n    else:\n        assert (len(values) == self.getNumRows())\n        if any((isinstance(value, basestring) for value in values)):\n            values = list(map(str, values))\n            self._impl.addColumnStr(header, values)\n        elif all((isinstance(value, Real) for value in values)):\n            values = list(map(float, values))\n            self._impl.addColumnDbl(header, values)\n        else:\n            raise NotImplementedError", "docstring": "Add a new column with the corresponding header and values to the\ndataframe.\n\nArgs:\nheader: The name of the new column.\n\nvalues: A list of size :func:`~amplpy.DataFrame.getNumRows` with\nall the values of the new column.", "source": "codesearchnet"}
{"code": "def unpack(self, buff, offset=0):\n        \n        try:\n            self._value = struct.unpack_from(self._fmt, buff, offset)[0]\n            if self.enum_ref:\n                self._value = self.enum_ref(self._value)\n        except (struct.error, TypeError, ValueError) as exception:\n            msg = '{}; fmt = {}, buff = {}, offset = {}.'.format(exception,\n                                                                 self._fmt,\n                                                                 buff, offset)\n            raise UnpackException(msg)", "docstring": "Unpack *buff* into this object.\n\nThis method will convert a binary data into a readable value according\nto the attribute format.\n\nArgs:\nbuff (bytes): Binary buffer.\noffset (int): Where to begin unpacking.\n\nRaises:\n:exc:`~.exceptions.UnpackException`: If unpack fails.", "source": "juraj-google-style"}
{"code": "def ldap_sync(self, **kwargs):\n        \n        path = '/groups/%s/ldap_sync' % self.get_id()\n        self.manager.gitlab.http_post(path, **kwargs)", "docstring": "Sync LDAP groups.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabCreateError: If the server cannot perform the request", "source": "juraj-google-style"}
{"code": "def ensure_value_spec(value_spec: class_schema.ValueSpec, src_spec: class_schema.ValueSpec, root_path: typing.Optional[utils.KeyPath]=None) -> typing.Optional[class_schema.ValueSpec]:\n    if isinstance(value_spec, Union):\n        value_spec = value_spec.get_candidate(src_spec)\n    if isinstance(value_spec, Any):\n        return None\n    if not src_spec.is_compatible(value_spec):\n        raise TypeError(utils.message_on_path(f'Source spec {src_spec} is not compatible with destination spec {value_spec}.', root_path))\n    return value_spec", "docstring": "Extract counter part from value spec that matches dest spec type.\n\nArgs:\nvalue_spec: Value spec.\nsrc_spec: Destination value spec.\nroot_path: An optional path for the value to include in error message.\n\nReturns:\nvalue_spec of src_spec_type\n\nRaises:\nTypeError: When value_spec cannot match src_spec_type.", "source": "github-repos"}
{"code": "def _SetUnknownFlag(self, name, value):\n    \n    setter = self.__dict__['__set_unknown']\n    if setter:\n      try:\n        setter(name, value)\n        return value\n      except (TypeError, ValueError):  \n        raise exceptions.IllegalFlagValueError('\"{1}\" is not valid for --{0}'\n                                               .format(name, value))\n      except NameError:  \n        pass\n    raise exceptions.UnrecognizedFlagError(name, value)", "docstring": "Returns value if setting flag |name| to |value| returned True.\n\nArgs:\nname: Name of the flag to set.\nvalue: Value to set.\n\nReturns:\nFlag value on successful call.\n\nRaises:\nUnrecognizedFlagError\nIllegalFlagValueError", "source": "juraj-google-style"}
{"code": "def from_composition_and_entries(comp, entries_in_chemsys, working_ion_symbol='Li'):\n    pd = PhaseDiagram(entries_in_chemsys)\n    return ConversionElectrode.from_composition_and_pd(comp, pd, working_ion_symbol)", "docstring": "Convenience constructor to make a ConversionElectrode from a\ncomposition and all entries in a chemical system.\n\nArgs:\ncomp: Starting composition for ConversionElectrode, e.g.,\nComposition(\"FeF3\")\nentries_in_chemsys: Sequence containing all entries in a\nchemical system. E.g., all Li-Fe-F containing entries.\nworking_ion_symbol: Element symbol of working ion. Defaults to Li.", "source": "codesearchnet"}
{"code": "def adb_cmd(self, command, **kwargs):\n        \n        kwargs['timeout'] = kwargs.get('timeout', self._adb_shell_timeout)\n        if isinstance(command, list) or isinstance(command, tuple):\n            return self.adb_device.run_cmd(*list(command), **kwargs)\n        return self.adb_device.run_cmd(command, **kwargs)", "docstring": "Run adb command, for example: adb(['pull', '/data/local/tmp/a.png'])\n\nArgs:\ncommand: string or list of string\n\nReturns:\ncommand output", "source": "juraj-google-style"}
{"code": "def from_xmrs(cls, xmrs, **kwargs):\n    x = cls()\n    x.__dict__.update(xmrs.__dict__)\n    return x", "docstring": "Facilitate conversion among subclasses.\n\nArgs:\nxmrs (:class:`Xmrs`): instance to convert from; possibly\nan instance of a subclass, such as :class:`Mrs` or\n:class:`Dmrs`\n**kwargs: additional keyword arguments that may be used\nby a subclass's redefinition of :meth:`from_xmrs`.", "source": "codesearchnet"}
{"code": "def get_nodes_lines(self, **kwargs):\n    params = {'Nodes': util.ints_to_string(kwargs.get('nodes', []))}\n    result = self.make_request('bus', 'get_nodes_lines', **params)\n    if (not util.check_result(result)):\n        return (False, result.get('resultDescription', 'UNKNOWN ERROR'))\n    values = util.response_list(result, 'resultValues')\n    return (True, [emtype.NodeLinesItem(**a) for a in values])", "docstring": "Obtain stop IDs, coordinates and line information.\n\nArgs:\nnodes (list[int] | int): nodes to query, may be empty to get\nall nodes.\n\nReturns:\nStatus boolean and parsed response (list[NodeLinesItem]), or message\nstring in case of error.", "source": "codesearchnet"}
{"code": "def google_api_initilaize(config, api_call, alias=None):\n    if api_call['function'].endswith('list') or alias == 'list':\n        api_call['iterate'] = True\n    if api_call['api'] == 'dfareporting':\n        if not api_call['function'].startswith('userProfiles'):\n            is_superuser, profile_id = get_profile_for_api(config, api_call['auth'], api_call['kwargs']['id'] if api_call['function'] == 'accounts.get' else api_call['kwargs']['accountId'])\n            api_call['kwargs']['profileId'] = profile_id\n            if is_superuser:\n                api_call['version'] = 'prerelease'\n            elif 'accountId' in api_call['kwargs']:\n                del api_call['kwargs']['accountId']", "docstring": "Some Google API calls require a lookup or pre-call, add it here.\n\nModifies the API call before actual execution with any data\nspecifically required by an endpoint.  Currently:\n\n> dfa-reporting - look up user profile and add to call.\n\nArgs:\napi_call (dict): the JSON for the API call as defined in recipe.\nalias (string): mostly used to signal a list behavior (change to iterate in future?)\n\nReturns (dict):\nA modified JSON with additional API values added.\nCurrently mostly used by dfareporting API to add profile and account.\n\nRaises:\nValueError: If a required key in the recipe is missing.", "source": "github-repos"}
{"code": "def __truediv__(self, other):\n        \n        return self.__class__(self.x, self.y.__truediv__(other), *self._args,\n                              **self._kwargs)", "docstring": "True division of y\nArgs:\nother: The divisor\n\nReturns:\nSpectrum object with y values divided", "source": "juraj-google-style"}
{"code": "def bridge_create(br, may_exist=True, parent=None, vlan=None):\n    param_may_exist = _param_may_exist(may_exist)\n    if ((parent is not None) and (vlan is None)):\n        raise ArgumentValueError('If parent is specified, vlan must also be specified.')\n    if ((vlan is not None) and (parent is None)):\n        raise ArgumentValueError('If vlan is specified, parent must also be specified.')\n    param_parent = ('' if (parent is None) else ' {0}'.format(parent))\n    param_vlan = ('' if (vlan is None) else ' {0}'.format(vlan))\n    cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent, param_vlan)\n    result = __salt__['cmd.run_all'](cmd)\n    return _retcode_to_bool(result['retcode'])", "docstring": "Creates a new bridge.\n\nArgs:\nbr: A string - bridge name\nmay_exist: Bool, if False - attempting to create a bridge that exists returns False.\nparent: String, the name of the parent bridge (if the bridge shall be\ncreated as a fake bridge). If specified, vlan must also be\nspecified.\nvlan: Int, the VLAN ID of the bridge (if the bridge shall be created as\na fake bridge). If specified, parent must also be specified.\n\nReturns:\nTrue on success, else False.\n\n.. versionadded:: 2016.3.0\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.bridge_create br0", "source": "codesearchnet"}
{"code": "def enableEditing(self, enabled):\n        \n        for button in self.buttons[1:]:\n            button.setEnabled(enabled)\n            if button.isChecked():\n                button.setChecked(False)\n\n        model = self.tableView.model()\n\n        if model is not None:\n            model.enableEditing(enabled)", "docstring": "Enable the editing buttons to add/remove rows/columns and to edit the data.\n\nThis method is also a slot.\nIn addition, the data of model will be made editable,\nif the `enabled` parameter is true.\n\nArgs:\nenabled (bool): This flag indicates, if the buttons\nshall be activated.", "source": "juraj-google-style"}
{"code": "def destringize(self, string):\n    m = read_tuple_destr_pattern.match(string)\n    if (not m):\n        smbl.messages.error(\"'{}' is not a valid read name with respect to the RNF specification\".format(string), program='RNFtools', subprogram='RNF format', exception=ValueError)\n    groups = m.groups()\n    self.prefix = groups[0]\n    read_tuple_id = groups[1]\n    self.read_tuple_id = int(read_tuple_id, 16)\n    self.segments = []\n    segments_str = groups[2:(- 1)]\n    for b_str in segments_str:\n        if (b_str is not None):\n            if (b_str[0] == ','):\n                b_str = b_str[1:]\n            b = rnftools.rnfformat.Segment()\n            b.destringize(b_str)\n            self.segments.append(b)\n    self.suffix = groups[(- 1)]", "docstring": "Get RNF values for this read from its textual representation and save them\ninto this object.\n\nArgs:\nstring(str): Textual representation of a read.\n\nRaises:\nValueError", "source": "codesearchnet"}
{"code": "def doMove(self, orgresource, dstresource, dummy = 56184, stresource = 'F', bShareFireCopy = 'false'):\n        \n\n        url = nurls['doMove']\n        \n        data = {'userid': self.user_id,\n                'useridx': self.useridx,\n                'dummy': dummy,\n                'orgresource': orgresource,\n                'dstresource': dstresource,\n                'overwrite': overwrite,\n                'bShareFireCopy': bShareFireCopy,\n                }\n\n        r = self.session.post(url = url, data = data)\n\n        try:\n            j = json.loads(r.text)\n        except:\n            print '[*] Success checkUpload: 0 result'\n\n            return False\n\n        return self.resultManager(r.text)", "docstring": "DoMove\n\nArgs:\ndummy: ???\norgresource: Path for a file which you want to move\ndstresource: Destination path\nbShareFireCopy: ???\n\nReturns:\nTrue: Move success\nFalse: Move failed", "source": "juraj-google-style"}
{"code": "def list_classes(mod_name):\n    \n    mod = sys.modules[mod_name]\n    return [cls.__name__ for cls in mod.__dict__.values()\n            if is_mod_class(mod, cls)]", "docstring": "Lists all classes declared in a module.\n\nArgs:\nmod_name: the module name\nReturns:\nA list of functions declared in that module.", "source": "juraj-google-style"}
{"code": "def write_input(self, output_dir, make_dir_if_not_present=True, include_cif=False):\n    vinput = self.get_vasp_input()\n    vinput.write_input(output_dir, make_dir_if_not_present=make_dir_if_not_present)\n    if include_cif:\n        s = vinput['POSCAR'].structure\n        fname = (Path(output_dir) / ('%s.cif' % re.sub('\\\\s', '', s.formula)))\n        s.to(filename=fname)", "docstring": "Writes a set of VASP input to a directory.\n\nArgs:\noutput_dir (str): Directory to output the VASP input files\nmake_dir_if_not_present (bool): Set to True if you want the\ndirectory (and the whole path) to be created if it is not\npresent.\ninclude_cif (bool): Whether to write a CIF file in the output\ndirectory for easier opening by VESTA.", "source": "codesearchnet"}
{"code": "def validate_key(self, key):\n    if (not models.PasswordResetToken.valid_tokens.filter(key=key).exists()):\n        raise serializers.ValidationError(_('The provided reset token does not exist, or is expired.'))\n    return key", "docstring": "Validate the provided reset key.\n\nReturns:\nThe validated key.\n\nRaises:\nserializers.ValidationError:\nIf the provided key does not exist.", "source": "codesearchnet"}
{"code": "def _is_part_processor_protocol(obj: Any) -> bool:\n\n    def _full_name(obj: Any) -> str:\n        \n        return obj.__module__ + '.' + getattr(obj, '__qualname__', '')\n    if not callable(obj):\n        return False\n    if isinstance(obj, types.FunctionType):\n        type_hint = typing.get_type_hints(obj)\n    else:\n        type_hint = typing.get_type_hints(obj.__call__)\n    if 'return' not in type_hint:\n        return False\n    return_type = type_hint.pop('return')\n    if len(type_hint) != 1:\n        return False\n    if len(typing.get_args(return_type)) != 1:\n        return False\n    if return_type.__qualname__ != 'AsyncIterable' or _full_name(typing.get_args(return_type)[0]) != _full_name(content_api.ProcessorPart):\n        return False\n    if _full_name(next(iter(type_hint.values()))) != _full_name(content_api.ProcessorPart):\n        return False\n    return True", "docstring": "Returns True if `obj` implements PartProcessorFn.\n\nThis function is needed as Processors and PartProcessors are Protocols and do\nnot have proper runtime type checking.\n\nArgs:\nobj: any object or function", "source": "github-repos"}
{"code": "def _Open(self, path_spec, mode='rb'):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(path_spec)\n\n    fvde_volume = pyfvde.volume()\n    file_object = resolver.Resolver.OpenFileObject(\n        path_spec.parent, resolver_context=self._resolver_context)\n\n    try:\n      fvde.FVDEVolumeOpen(\n          fvde_volume, path_spec, file_object, resolver.Resolver.key_chain)\n    except:\n      file_object.close()\n      raise\n\n    self._fvde_volume = fvde_volume\n    self._file_object = file_object", "docstring": "Opens the file system defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nmode (Optional[str]): file access mode. The default is 'rb'\nread-only binary.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file system could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def update_user_groups(self, user, claims):\n    if (settings.GROUPS_CLAIM is not None):\n        django_groups = [group.name for group in user.groups.all()]\n        if (settings.GROUPS_CLAIM in claims):\n            claim_groups = claims[settings.GROUPS_CLAIM]\n            if (not isinstance(claim_groups, list)):\n                claim_groups = [claim_groups]\n        else:\n            logger.debug(\"The configured groups claim '{}' was not found in the access token\".format(settings.GROUPS_CLAIM))\n            claim_groups = []\n        groups_to_remove = (set(django_groups) - set(claim_groups))\n        groups_to_add = (set(claim_groups) - set(django_groups))\n        for group_name in groups_to_remove:\n            group = Group.objects.get(name=group_name)\n            user.groups.remove(group)\n            logger.debug(\"User removed from group '{}'\".format(group_name))\n        for group_name in groups_to_add:\n            try:\n                if settings.MIRROR_GROUPS:\n                    (group, _) = Group.objects.get_or_create(name=group_name)\n                    logger.debug(\"Created group '{}'\".format(group_name))\n                else:\n                    group = Group.objects.get(name=group_name)\n                user.groups.add(group)\n                logger.debug(\"User added to group '{}'\".format(group_name))\n            except ObjectDoesNotExist:\n                pass", "docstring": "Updates user group memberships based on the GROUPS_CLAIM setting.\n\nArgs:\nuser (django.contrib.auth.models.User): User model instance\nclaims (dict): Claims from the access token", "source": "codesearchnet"}
{"code": "def is_valid_op(self, symmop):\n    coords = self.centered_mol.cart_coords\n    for site in self.centered_mol:\n        coord = symmop.operate(site.coords)\n        ind = find_in_coord_list(coords, coord, self.tol)\n        if (not ((len(ind) == 1) and (self.centered_mol[ind[0]].species == site.species))):\n            return False\n    return True", "docstring": "Check if a particular symmetry operation is a valid symmetry operation\nfor a molecule, i.e., the operation maps all atoms to another\nequivalent atom.\n\nArgs:\nsymmop (SymmOp): Symmetry operation to test.\n\nReturns:\n(bool): Whether SymmOp is valid for Molecule.", "source": "codesearchnet"}
{"code": "def multiply(self, other):\n        \n        if not isinstance(other, Number):\n            raise QiskitError(\"other is not a number\")\n        return Chi(other * self._data, self._input_dims, self._output_dims)", "docstring": "Return the QuantumChannel self + other.\n\nArgs:\nother (complex): a complex number.\n\nReturns:\nChi: the scalar multiplication other * self as a Chi object.\n\nRaises:\nQiskitError: if other is not a valid scalar.", "source": "juraj-google-style"}
{"code": "def get_nested_plot_frame(obj, key_map, cached=False):\n    \n    clone = obj.map(lambda x: x)\n\n    \n    \n    for it1, it2 in zip(obj.traverse(lambda x: x), clone.traverse(lambda x: x)):\n        if isinstance(it1, DynamicMap):\n            with disable_constant(it2.callback):\n                it2.callback.inputs = it1.callback.inputs\n    with item_check(False):\n        return clone.map(lambda x: get_plot_frame(x, key_map, cached=cached),\n                         [DynamicMap, HoloMap], clone=False)", "docstring": "Extracts a single frame from a nested object.\n\nReplaces any HoloMap or DynamicMap in the nested data structure,\nwith the item corresponding to the supplied key.\n\nArgs:\nobj: Nested Dimensioned object\nkey_map: Dictionary mapping between dimensions and key value\ncached: Whether to allow looking up key in cache\n\nReturns:\nNested datastructure where maps are replaced with single frames", "source": "juraj-google-style"}
{"code": "def get_board(self, id, name=None):\n    return self.create_board(dict(id=id, name=name))", "docstring": "Get a board\n\nReturns:\nBoard: The board with the given `id`", "source": "codesearchnet"}
{"code": "def list(cls, session, mailbox):\n    endpoint = ('/mailboxes/%d/conversations.json' % mailbox.id)\n    return super(Conversations, cls).list(session, endpoint)", "docstring": "Return conversations in a mailbox.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nmailbox (helpscout.models.Mailbox): Mailbox to list.\n\nReturns:\nRequestPaginator(output_type=helpscout.models.Conversation):\nConversations iterator.", "source": "codesearchnet"}
{"code": "def _GetMaxSizeFromNestedMaximumIterations(value, while_ctxt):\n    value_name = value.name\n    curr_ctxt = ops.get_default_graph()._get_control_flow_context()\n    curr_ctxt_name = curr_ctxt.name if curr_ctxt is not None else ''\n    max_size = constant_op.constant(1)\n    while while_ctxt not in (None, curr_ctxt):\n        max_iter = while_ctxt.maximum_iterations\n        if max_iter is None:\n            raise ValueError(\"Cannot create a gradient accumulator for tensor '%s' inside XLA while_loop because maximum_iterations was not passed to the tf.while_loop call ('%s').\" % (value_name, while_ctxt.name))\n        max_iter_ctxt = max_iter.op._get_control_flow_context()\n        if util.IsContainingContext(curr_ctxt, max_iter_ctxt):\n            max_size *= max_iter\n        else:\n            const_max_iter = tensor_util.constant_value(max_iter)\n            if const_max_iter is None:\n                raise ValueError(\"Cannot create a gradient accumulator for tensor '%s' inside XLA while_loop. maximum_iterations tensor '%s' for while_loop context '%s' must be statically known (e.g. a constant value or known shape dimension), or be defined at or outside the while loop context '%s' (currently defined in '%s').\" % (value_name, max_iter.name, while_ctxt.name, curr_ctxt_name, max_iter_ctxt.name))\n            max_size *= const_max_iter\n        while_ctxt = util.GetContainingWhileContext(while_ctxt.outer_context, stop_ctxt=curr_ctxt)\n    return max_size", "docstring": "Calculate a max_size for use by stack ops inside an XLA while_loop.\n\nArgs:\nvalue: The value inside the while_loop forward context.  Used for printing\nerror messages.\nwhile_ctxt: The forward context inside which value resides.  This does not\nalways match the value's immediate context, as `value` may be inside e.g.\na cond context inside the while_loop.\n\nReturns:\nA tensor containing the `max_size` to feed to a Stack initializer.\n\nRaises:\nValueError: If `value` is nested inside a `while_loop` that either\nlacks a `maximum_iterations` parameter, or the `maximum_iterations`\nparameter:\n\n- is inside a `while_loop` that is a parent of the calling context, and\n- cannot be evaluated at graph build time to a constant.", "source": "github-repos"}
{"code": "def isempty(self, tables=None):\n    tables = (tables or self.tables)\n    for table in tables:\n        if (self.num_rows(table) > 0):\n            return False\n    return True", "docstring": "Return whether a table or the entire database is empty.\n\nA database is empty is if it has no tables. A table is empty\nif it has no rows.\n\nArguments:\n\ntables (sequence of str, optional): If provided, check\nthat the named tables are empty. If not provided, check\nthat all tables are empty.\n\nReturns:\n\nbool: True if tables are empty, else false.\n\nRaises:\n\nsql.OperationalError: If one or more of the tables do not\nexist.", "source": "codesearchnet"}
{"code": "def remote_upload(self, remote_url, folder_id=None, headers=None):\n    kwargs = {'folder': folder_id, 'headers': headers}\n    params = {'url': remote_url}\n    params.update({key: value for (key, value) in kwargs.items() if value})\n    return self._get('remotedl/add', params=params)", "docstring": "Used to make a remote file upload to openload.co\n\nNote:\nIf folder_id is not provided, the file will be uploaded to ``Home`` folder.\n\nArgs:\nremote_url (str): direct link of file to be remotely downloaded.\nfolder_id (:obj:`str`, optional): folder-ID to upload to.\nheaders (:obj:`dict`, optional): additional HTTP headers (e.g. Cookies or HTTP Basic-Auth)\n\nReturns:\ndict: dictionary containing (\"id\": uploaded file id, \"folderid\"). ::\n\n{\n\"id\": \"12\",\n\"folderid\": \"4248\"\n}", "source": "codesearchnet"}
{"code": "def disassemble_instruction(self, instruction):\n        \n        if not util.is_integer(instruction):\n            raise TypeError('Expected instruction to be an integer.')\n\n        buf_size = self.MAX_BUF_SIZE\n        buf = (ctypes.c_char * buf_size)()\n        res = self._dll.JLINKARM_DisassembleInst(ctypes.byref(buf), buf_size, instruction)\n        if res < 0:\n            raise errors.JLinkException('Failed to disassemble instruction.')\n\n        return ctypes.string_at(buf).decode()", "docstring": "Disassembles and returns the assembly instruction string.\n\nArgs:\nself (JLink): the ``JLink`` instance.\ninstruction (int): the instruction address.\n\nReturns:\nA string corresponding to the assembly instruction string at the\ngiven instruction address.\n\nRaises:\nJLinkException: on error.\nTypeError: if ``instruction`` is not a number.", "source": "juraj-google-style"}
{"code": "def remove_send_message(self, connection):\n    if (connection in self._send_message):\n        del self._send_message[connection]\n        LOGGER.debug('Removed send_message function for connection %s', connection)\n    else:\n        LOGGER.warning('Attempted to remove send_message function for connection %s, but no send_message function was registered', connection)", "docstring": "Removes a send_message function previously registered\nwith the Dispatcher.\n\nArgs:\nconnection (str): A locally unique identifier provided\nby the receiver of messages.", "source": "codesearchnet"}
{"code": "def adjust_internal_tacking_values(self,\n                                       min_non_zero_index,\n                                       max_index,\n                                       total_added):\n        \n        if max_index >= 0:\n            max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))\n            self.max_value = max(self.max_value, max_value)\n        if min_non_zero_index >= 0:\n            min_value = self.get_value_from_index(min_non_zero_index)\n            self.min_value = min(self.min_value, min_value)\n        self.total_count += total_added", "docstring": "Called during decoding and add to adjust the new min/max value and\ntotal count\n\nArgs:\nmin_non_zero_index min nonzero index of all added counts (-1 if none)\nmax_index max index of all added counts (-1 if none)", "source": "juraj-google-style"}
{"code": "def run_trials(runs):\n    inside_runs = 0\n    for _ in range(runs):\n        x = random.uniform(0, 1)\n        y = random.uniform(0, 1)\n        inside_runs += 1 if x * x + y * y <= 1.0 else 0\n    return (runs, inside_runs, 0)", "docstring": "Run trials and return a 3-tuple representing the results.\n\nArgs:\nruns: Number of trial runs to be executed.\n\nReturns:\nA 3-tuple (total trials, inside trials, 0).\n\nThe final zero is needed solely to make sure that the combine_results function\nhas same type for inputs and outputs (a requirement for combiner functions).", "source": "github-repos"}
{"code": "def significant_control(self, num, entity_id, entity_type='individual', **kwargs):\n    entities = {'individual': 'individual', 'corporate': 'corporate-entity', 'legal': 'legal-person', 'statements': 'persons-with-significant-control-statements', 'secure': 'super-secure'}\n    try:\n        entity = entities[entity_type]\n    except KeyError as e:\n        msg = ('Wrong entity_type supplied. Please choose from ' + 'individual, corporate, legal, statements or secure')\n        raise Exception(msg) from e\n    baseuri = ((self._BASE_URI + 'company/{}/persons-with-significant-control/'.format(num)) + '{}/{}'.format(entity, entity_id))\n    res = self.session.get(baseuri, params=kwargs)\n    self.handle_http_error(res)\n    return res", "docstring": "Get details of a specific entity with significant control.\n\nArgs:\nnum (str, int): Company number to search on.\nentity_id (str, int): Entity id to request details for\nentity_type (str, int): What type of entity to search for. Defaults\nto 'individual'. Other possible opetions are\n'corporate' (for corporate entitys), 'legal' (for legal\npersons), 'statements' (for a person with significant control\nstatement) and 'secure' (for a super secure person).\nkwargs (dict): additional keywords passed into requests.session.get\n*params* keyword.", "source": "codesearchnet"}
{"code": "def createCategoryFilter(self, positiveExamples):\n        \n        categoryFilter = self._fullClient.createCategoryFilter(\"CategoryFilter\", positiveExamples)\n        return categoryFilter.positions", "docstring": "Creates a filter fingerprint.\nArgs:\npositiveExamples, list(str): The list of positive example texts.\nReturns:\nlist of int: the positions representing the filter representing the texts\nRaises:\nCorticalioException: if the request was not successful", "source": "juraj-google-style"}
{"code": "def __init__(self, port_no=None, queue_id=None):\n        \n        super().__init__()\n        self.port_no = port_no\n        self.queue_id = queue_id", "docstring": "Create a QueueStatsRequest with the optional parameters below.\n\nArgs:\nport_no (:class:`int`, :class:`~pyof.v0x01.common.phy_port.Port`):\nAll ports if :attr:`.Port.OFPP_ALL`.\nqueue_id (int): All queues if OFPQ_ALL (``0xfffffff``).", "source": "juraj-google-style"}
{"code": "def get_angle(v1, v2, units='degrees'):\n    d = ((np.dot(v1, v2) / np.linalg.norm(v1)) / np.linalg.norm(v2))\n    d = min(d, 1)\n    d = max(d, (- 1))\n    angle = math.acos(d)\n    if (units == 'degrees'):\n        return math.degrees(angle)\n    elif (units == 'radians'):\n        return angle\n    else:\n        raise ValueError('Invalid units {}'.format(units))", "docstring": "Calculates the angle between two vectors.\n\nArgs:\nv1: Vector 1\nv2: Vector 2\nunits: \"degrees\" or \"radians\". Defaults to \"degrees\".\n\nReturns:\nAngle between them in degrees.", "source": "codesearchnet"}
{"code": "def __init__(self, name, context=None):\n    \n    if context is None:\n      context = google.datalab.Context.default()\n    self._context = context\n    self._api = _api.Api(context)\n    self._name_parts = _utils.parse_dataset_name(name, self._api.project_id)\n    self._full_name = '%s.%s' % self._name_parts\n    self._info = None\n    try:\n      self._info = self._get_info()\n    except google.datalab.utils.RequestException:\n      pass", "docstring": "Initializes an instance of a Dataset.\n\nArgs:\nname: the name of the dataset, as a string or (project_id, dataset_id) tuple.\ncontext: an optional Context object providing project_id and credentials. If a specific\nproject id or credentials are unspecified, the default ones configured at the global\nlevel are used.\nRaises:\nException if the name is invalid.", "source": "juraj-google-style"}
{"code": "def to_script(self, wf_name='wf'):\n        \n        self._closed()\n\n        script = []\n\n        \n        \n        \n        \n        \n        \n        \n        \n\n        \n        params = []\n        returns = []\n        for name, typ in self.wf_inputs.items():\n            params.append('{}=\\'{}\\''.format(name, typ))\n            returns.append(name)\n        script.append('{} = {}.add_inputs({})'.format(\n            ', '.join(returns), wf_name, ', '.join(params)))\n\n        \n        returns = []\n        for name, step in self.wf_steps.items():\n            pyname = step.python_name\n            returns = ['{}_{}'.format(pyname, o) for o in step['out']]\n            params = ['{}={}'.format(name, python_name(param))\n                      for name, param in step['in'].items()]\n            script.append('{} = {}.{}({})'.format(\n                ', '.join(returns), wf_name, pyname, ', '.join(params)))\n\n        \n        params = []\n        for name, details in self.wf_outputs.items():\n            params.append('{}={}'.format(\n                name, python_name(details['outputSource'])))\n        script.append('{}.add_outputs({})'.format(wf_name, ', '.join(params)))\n\n        return '\\n'.join(script)", "docstring": "Generated and print the scriptcwl script for the currunt workflow.\n\nArgs:\nwf_name (str): string used for the WorkflowGenerator object in the\ngenerated script (default: ``wf``).", "source": "juraj-google-style"}
{"code": "def url_to_text(self, url):\n        \n        path, headers = urllib.request.urlretrieve(url)\n        return self.path_to_text(path)", "docstring": "Download PDF file and transform its document to string.\n\nArgs:\nurl:   PDF url.\n\nReturns:\nstring.", "source": "juraj-google-style"}
{"code": "def _validate_compose_list(destination_file, file_list, files_metadata=None, number_of_files=32):\n    common.validate_file_path(destination_file)\n    bucket = destination_file[0:(destination_file.index('/', 1) + 1)]\n    try:\n        if isinstance(file_list, types.StringTypes):\n            raise TypeError\n        list_len = len(file_list)\n    except TypeError:\n        raise TypeError('file_list must be a list')\n    if (list_len > number_of_files):\n        raise ValueError(('Compose attempted to create composite with too many(%i) components; limit is (%i).' % (list_len, number_of_files)))\n    if (list_len <= 0):\n        raise ValueError('Compose operation requires at least one component; 0 provided.')\n    if (files_metadata is None):\n        files_metadata = []\n    elif (len(files_metadata) > list_len):\n        raise ValueError(('files_metadata contains more entries(%i) than file_list(%i)' % (len(files_metadata), list_len)))\n    list_of_files = []\n    for (source_file, meta_data) in itertools.izip_longest(file_list, files_metadata):\n        if (not isinstance(source_file, str)):\n            raise TypeError('Each item of file_list must be a string')\n        if source_file.startswith('/'):\n            logging.warn('Detected a \"/\" at the start of the file, Unless the file name contains a \"/\" it  may cause files to be misread')\n        if source_file.startswith(bucket):\n            logging.warn('Detected bucket name at the start of the file, must not specify the bucket when listing file_names. May cause files to be misread')\n        common.validate_file_path((bucket + source_file))\n        list_entry = {}\n        if (meta_data is not None):\n            list_entry.update(meta_data)\n        list_entry['Name'] = source_file\n        list_of_files.append(list_entry)\n    return (list_of_files, bucket)", "docstring": "Validates the file_list and merges the file_list, files_metadata.\n\nArgs:\ndestination: Path to the file (ie. /destination_bucket/destination_file).\nfile_list: List of files to compose, see compose for details.\nfiles_metadata: Meta details for each file in the file_list.\nnumber_of_files: Maximum number of files allowed in the list.\n\nReturns:\nA tuple (list_of_files, bucket):\nlist_of_files: Ready to use dict version of the list.\nbucket: bucket name extracted from the file paths.", "source": "codesearchnet"}
{"code": "def is_github_repo_owner_the_official_one(context, repo_owner):\n    official_repo_owner = context.config['official_github_repos_owner']\n    if (not official_repo_owner):\n        raise ConfigError('This worker does not have a defined owner for official GitHub repositories. Given \"official_github_repos_owner\": {}'.format(official_repo_owner))\n    return (official_repo_owner == repo_owner)", "docstring": "Given a repo_owner, check if it matches the one configured to be the official one.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\nrepo_owner (str): the repo_owner to verify\n\nRaises:\nscriptworker.exceptions.ConfigError: when no official owner was defined\n\nReturns:\nbool: True when ``repo_owner`` matches the one configured to be the official one", "source": "codesearchnet"}
{"code": "def get_key(self, key, request_only=False):\n    values = {}\n    requested_names = [x.name for x in self._package_requests if (not x.conflict)]\n    for pkg in self.resolved_packages:\n        if ((not request_only) or (pkg.name in requested_names)):\n            value = getattr(pkg, key)\n            if (value is not None):\n                values[pkg.name] = (pkg, value)\n    return values", "docstring": "Get a data key value for each resolved package.\n\nArgs:\nkey (str): String key of property, eg 'tools'.\nrequest_only (bool): If True, only return the key from resolved\npackages that were also present in the request.\n\nReturns:\nDict of {pkg-name: (variant, value)}.", "source": "codesearchnet"}
{"code": "def validate(self, config):\n    if (not isinstance(config, dict)):\n        raise errors.SchemeValidationError('Scheme can only validate a dictionary config, but was given {} (type: {})'.format(config, type(config)))\n    for arg in self.args:\n        if (arg.name in config):\n            arg.validate(config[arg.name])\n        elif arg.required:\n            raise errors.SchemeValidationError('Option \"{}\" is required, but not found.'.format(arg.name))", "docstring": "Validate the given config against the `Scheme`.\n\nArgs:\nconfig (dict): The configuration to validate.\n\nRaises:\nerrors.SchemeValidationError: The configuration fails\nvalidation against the `Schema`.", "source": "codesearchnet"}
{"code": "def get_label_set(self, type_str=None):\n        \n        return {v.label_str for v in self.node_gen if type_str in (None, v.type_str)}", "docstring": "Get a set of label_str for the tree rooted at this node.\n\nArgs:\ntype_str:\nSUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include\ninformation from nodes of that type.\n\nReturns:\nset: The labels of the nodes leading up to this node from the root.", "source": "juraj-google-style"}
{"code": "def test_src_dir_path(relative_path):\n    return os.path.join(os.environ['TEST_SRCDIR'], 'org_tensorflow/tensorflow', relative_path)", "docstring": "Creates an absolute test srcdir path given a relative path.\n\nArgs:\nrelative_path: a path relative to tensorflow root.\ne.g. \"contrib/session_bundle/example\".\n\nReturns:\nAn absolute path to the linked in runfiles.", "source": "github-repos"}
{"code": "def extractSchedule(self, schedule, period):\n    ret = namedtuple('ret', ['Hour', 'Min', 'Tariff', 'Period', 'Schedule'])\n    work_table = self.m_schd_1_to_4\n    if (Schedules.Schedule_5 <= schedule <= Schedules.Schedule_6):\n        work_table = self.m_schd_5_to_6\n    period += 1\n    schedule += 1\n    ret.Period = str(period)\n    ret.Schedule = str(schedule)\n    if ((schedule < 1) or (schedule > Extents.Schedules) or (period < 0) or (period > Extents.Periods)):\n        ekm_log(((('Out of bounds: tariff ' + str(period)) + ' for schedule ') + str(schedule)))\n        ret.Hour = ret.Min = ret.Tariff = str(0)\n        return ret\n    idxhr = (((('Schedule_' + str(schedule)) + '_Period_') + str(period)) + '_Hour')\n    idxmin = (((('Schedule_' + str(schedule)) + '_Period_') + str(period)) + '_Min')\n    idxrate = (((('Schedule_' + str(schedule)) + '_Period_') + str(period)) + '_Tariff')\n    if (idxhr not in work_table):\n        ekm_log(('Incorrect index: ' + idxhr))\n        ret.Hour = ret.Min = ret.Tariff = str(0)\n        return ret\n    if (idxmin not in work_table):\n        ekm_log(('Incorrect index: ' + idxmin))\n        ret.Hour = ret.Min = ret.Tariff = str(0)\n        return ret\n    if (idxrate not in work_table):\n        ekm_log(('Incorrect index: ' + idxrate))\n        ret.Hour = ret.Min = ret.Tariff = str(0)\n        return ret\n    ret.Hour = work_table[idxhr][MeterData.StringValue]\n    ret.Min = work_table[idxmin][MeterData.StringValue].zfill(2)\n    ret.Tariff = work_table[idxrate][MeterData.StringValue]\n    return ret", "docstring": "Read a single schedule tariff from meter object buffer.\n\nArgs:\nschedule (int): A :class:`~ekmmeters.Schedules` value or in range(Extent.Schedules).\ntariff (int): A :class:`~ekmmeters.Tariffs` value or in range(Extent.Tariffs).\n\nReturns:\nbool: True on completion.", "source": "codesearchnet"}
{"code": "def _parse_logging(log_values: dict, service_config: dict):\n        \n        for log_key, log_value in log_values.items():\n            if 'driver' in log_key:\n                service_config['log_driver'] = log_value\n            if 'options' in log_key:\n                service_config['log_driver_options'] = log_value", "docstring": "Parse log key.\n\nArgs:\nlog_values (dict): logging configuration values\nservice_config (dict): Service specification", "source": "juraj-google-style"}
{"code": "def getWindow(title, exact=False):\n    titles = getWindows()\n    hwnd = titles.get(title, None)\n    if ((not hwnd) and (not exact)):\n        for (k, v) in titles.items():\n            if (title in k):\n                hwnd = v\n                break\n    if hwnd:\n        return Window(hwnd)\n    else:\n        return None", "docstring": "Return Window object if 'title' or its part found in visible windows titles, else return None\n\nReturn only 1 window found first\nArgs:\ntitle: unicode string\nexact (bool): True if search only exact match", "source": "codesearchnet"}
{"code": "def connectivity_array(self):\n    cart_coords = np.array(self.s.cart_coords)\n    all_sites = (cart_coords[(:, None, :)] + self.cart_offsets[(None, :, :)])\n    vt = Voronoi(all_sites.reshape(((- 1), 3)))\n    n_images = all_sites.shape[1]\n    cs = (len(self.s), len(self.s), len(self.cart_offsets))\n    connectivity = np.zeros(cs)\n    vts = np.array(vt.vertices)\n    for ((ki, kj), v) in vt.ridge_dict.items():\n        atomi = (ki \n        atomj = (kj \n        imagei = (ki % n_images)\n        imagej = (kj % n_images)\n        if ((imagei != (n_images \n            continue\n        if (imagei == (n_images \n            val = solid_angle(vt.points[ki], vts[v])\n            connectivity[(atomi, atomj, imagej)] = val\n        if (imagej == (n_images \n            val = solid_angle(vt.points[kj], vts[v])\n            connectivity[(atomj, atomi, imagei)] = val\n        if ((- 10.101) in vts[v]):\n            warn('Found connectivity with infinite vertex. Cutoff is too low, and results may be incorrect')\n    return connectivity", "docstring": "Provides connectivity array.\n\nReturns:\nconnectivity: An array of shape [atomi, atomj, imagej]. atomi is\nthe index of the atom in the input structure. Since the second\natom can be outside of the unit cell, it must be described\nby both an atom index and an image index. Array data is the\nsolid angle of polygon between atomi and imagej of atomj", "source": "codesearchnet"}
{"code": "def run_config_diagnostics(config_path=CONFIG_PATH):\n    config = read_config(config_path)\n    missing_sections = set()\n    malformed_entries = defaultdict(set)\n    for (section, expected_section_keys) in SECTION_KEYS.items():\n        section_content = config.get(section)\n        if (not section_content):\n            missing_sections.add(section)\n        else:\n            for option in expected_section_keys:\n                option_value = section_content.get(option)\n                if (not option_value):\n                    malformed_entries[section].add(option)\n    return (config_path, missing_sections, malformed_entries)", "docstring": "Run diagnostics on the configuration file.\n\nArgs:\nconfig_path (str): Path to the configuration file.\nReturns:\nstr, Set[str], dict(str, Set[str]): The path to the configuration file, a set of missing\nsections and a dict that maps each section to the entries that have either missing or empty\noptions.", "source": "codesearchnet"}
{"code": "def add_arguments(self, parser):\n        \n        parser.add_argument('name', nargs=1, choices=['kinetis'],\n                            help='name of MCU to unlock')\n        return self.add_common_arguments(parser, True)", "docstring": "Adds the unlock command arguments to the parser.\n\nArgs:\nself (UnlockCommand): the ``UnlockCommand`` instance\nparser (argparse.ArgumentParser): the parser to add the arguments to\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def acquaint_insides(swap_gate: ops.Gate, acquaintance_gate: ops.Operation, qubits: Sequence[ops.Qid], before: bool, layers: Layers, mapping: Dict[(ops.Qid, int)]) -> None:\n    max_reach = _get_max_reach(len(qubits), round_up=before)\n    reaches = itertools.chain(range(1, (max_reach + 1)), range(max_reach, (- 1), (- 1)))\n    offsets = ((0, 1) * max_reach)\n    swap_gate = SwapPermutationGate(swap_gate)\n    ops = []\n    for (offset, reach) in zip(offsets, reaches):\n        if (offset == before):\n            ops.append(acquaintance_gate)\n        for dr in range(offset, reach, 2):\n            ops.append(swap_gate(*qubits[dr:(dr + 2)]))\n    intrastitial_layer = getattr(layers, ('pre' if before else 'post'))\n    intrastitial_layer += ops\n    interstitial_layer = getattr(layers, (('prior' if before else 'posterior') + '_interstitial'))\n    interstitial_layer.append(acquaintance_gate)\n    reached_qubits = qubits[:(max_reach + 1)]\n    positions = list((mapping[q] for q in reached_qubits))\n    mapping.update(zip(reached_qubits, reversed(positions)))", "docstring": "Acquaints each of the qubits with another set specified by an\nacquaintance gate.\n\nArgs:\nqubits: The list of qubits of which half are individually acquainted\nwith another list of qubits.\nlayers: The layers to put gates into.\nacquaintance_gate: The acquaintance gate that acquaints the end qubit\nwith another list of qubits.\nbefore: Whether the acquainting is done before the shift.\nswap_gate: The gate used to swap logical indices.\nmapping: The mapping from qubits to logical indices. Used to keep track\nof the effect of inside-acquainting swaps.", "source": "codesearchnet"}
{"code": "def log(self, metric):\n    message = self.LOGFMT.format(**metric)\n    if metric['context']:\n        message += ' context: {context}'.format(context=metric['context'])\n    self._logger.log(self.level, message)", "docstring": "Format and output metric.\n\nArgs:\nmetric (dict): Complete metric.", "source": "codesearchnet"}
{"code": "def split_input(cls, mapper_spec):\n    \n    batch_size = int(_get_params(mapper_spec).get(\n        cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))\n    shard_count = mapper_spec.shard_count\n    namespace_ranges = namespace_range.NamespaceRange.split(shard_count,\n                                                            contiguous=True)\n    return [NamespaceInputReader(ns_range, batch_size)\n            for ns_range in namespace_ranges]", "docstring": "Returns a list of input readers for the input spec.\n\nArgs:\nmapper_spec: The MapperSpec for this InputReader.\n\nReturns:\nA list of InputReaders.", "source": "juraj-google-style"}
{"code": "def Validate(self, expression):\n    \n    parsed = self._Load(expression)\n\n    if not parsed:\n      raise DefinitionError(\"Empty StatFilter expression.\")\n\n    bad_keys = set(parsed) - self._KEYS\n    if bad_keys:\n      raise DefinitionError(\"Invalid parameters: %s\" % \",\".join(bad_keys))\n\n    if self.cfg.mask and not self.cfg.mode:\n      raise DefinitionError(\"mode can only be set when mask is also defined.\")\n\n    if self.cfg.mask:\n      if len(self.cfg.mask) > 1:\n        raise DefinitionError(\"Too many mask values defined.\")\n      if not self._PERM_RE.match(self.cfg.mask[0]):\n        raise DefinitionError(\"mask=%s is not octal, e.g. 0600\" % self.cfg.mask)\n\n    if self.cfg.mode:\n      if len(self.cfg.mode) > 1:\n        raise DefinitionError(\"Too many mode values defined.\")\n      if not self._PERM_RE.match(self.cfg.mode[0]):\n        raise DefinitionError(\"mode=%s is not octal, e.g. 0600\" % self.cfg.mode)\n\n    if self.cfg.gid:\n      for gid in self.cfg.gid:\n        matched = self._UID_GID_RE.match(gid)\n        if not matched:\n          raise DefinitionError(\"gid: %s is not an integer preceded by \"\n                                \"!, >, < or =.\" % gid)\n\n    if self.cfg.uid:\n      for uid in self.cfg.uid:\n        matched = self._UID_GID_RE.match(uid)\n        if not matched:\n          raise DefinitionError(\"uid: %s is not an integer preceded by \"\n                                \"!, >, < or =.\" % uid)\n\n    if self.cfg.file_re:\n      if len(self.cfg.file_re) > 1:\n        raise DefinitionError(\"Too many regexes defined: %s\" % self.cfg.file_re)\n      try:\n        self.file_re = re.compile(self.cfg.file_re[0])\n      except (re.error, TypeError) as e:\n        raise DefinitionError(\"Invalid file regex: %s\" % e)\n\n    if self.cfg.path_re:\n      if len(self.cfg.path_re) > 1:\n        raise DefinitionError(\"Too many regexes defined: %s\" % self.cfg.path_re)\n      try:\n        self.path_re = re.compile(self.cfg.path_re[0])\n      except (re.error, TypeError) as e:\n        raise DefinitionError(\"Invalid path regex: %s\" % e)\n\n    if self.cfg.file_type:\n      if len(self.cfg.file_type) > 1:\n        raise DefinitionError(\n            \"Too many file types defined: %s\" % self.cfg.file_type)\n      file_type = self.cfg.file_type[0].upper()\n      if file_type not in self._TYPES:\n        raise DefinitionError(\"Unsupported file type %s\" % file_type)\n\n    self._Initialize()\n    if not self.matchers:\n      raise DefinitionError(\"StatFilter has no actions: %s\" % expression)\n    return True", "docstring": "Validates that a parsed rule entry is valid for fschecker.\n\nArgs:\nexpression: A rule expression.\n\nRaises:\nDefinitionError: If the filter definition could not be validated.\n\nReturns:\nTrue if the expression validated OK.", "source": "juraj-google-style"}
{"code": "def gpio_get(self, pins=None):\n    if (pins is None):\n        pins = range(4)\n    size = len(pins)\n    indices = (ctypes.c_uint8 * size)(*pins)\n    statuses = (ctypes.c_uint8 * size)()\n    result = self._dll.JLINK_EMU_GPIO_GetState(ctypes.byref(indices), ctypes.byref(statuses), size)\n    if (result < 0):\n        raise errors.JLinkException(result)\n    return list(statuses)", "docstring": "Returns a list of states for the given pins.\n\nDefaults to the first four pins if an argument is not given.\n\nArgs:\nself (JLink): the ``JLink`` instance\npins (list): indices of the GPIO pins whose states are requested\n\nReturns:\nA list of states.\n\nRaises:\nJLinkException: on error.", "source": "codesearchnet"}
{"code": "def add_primitives_path(path):\n    \n    if path not in _PRIMITIVES_PATHS:\n        if not os.path.isdir(path):\n            raise ValueError('Invalid path: {}'.format(path))\n\n        LOGGER.debug('Adding new primitives path %s', path)\n        _PRIMITIVES_PATHS.insert(0, os.path.abspath(path))", "docstring": "Add a new path to look for primitives.\n\nThe new path will be inserted in the first place of the list,\nso any primitive found in this new folder will take precedence\nover any other primitive with the same name that existed in the\nsystem before.\n\nArgs:\npath (str): path to add\n\nRaises:\nValueError: A `ValueError` will be raised if the path is not valid.", "source": "juraj-google-style"}
{"code": "def is45(msg):\n    \n    if allzeros(msg):\n        return False\n\n    d = hex2bin(data(msg))\n\n    \n    if wrongstatus(d, 1, 2, 3):\n        return False\n\n    if wrongstatus(d, 4, 5, 6):\n        return False\n\n    if wrongstatus(d, 7, 8, 9):\n        return False\n\n    if wrongstatus(d, 10, 11, 12):\n        return False\n\n    if wrongstatus(d, 13, 14, 15):\n        return False\n\n    if wrongstatus(d, 16, 17, 26):\n        return False\n\n    if wrongstatus(d, 27, 28, 38):\n        return False\n\n    if wrongstatus(d, 39, 40, 51):\n        return False\n\n    \n    if bin2int(d[51:56]) != 0:\n        return False\n\n    temp = temp45(msg)\n    if temp:\n        if temp > 60 or temp < -80:\n            return False\n\n    return True", "docstring": "Check if a message is likely to be BDS code 4,5.\n\nMeteorological hazard report\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nbool: True or False", "source": "juraj-google-style"}
{"code": "def get_reduced_structure(self, reduction_algo='niggli'):\n    if (reduction_algo == 'niggli'):\n        reduced_latt = self._lattice.get_niggli_reduced_lattice()\n    elif (reduction_algo == 'LLL'):\n        reduced_latt = self._lattice.get_lll_reduced_lattice()\n    else:\n        raise ValueError('Invalid reduction algo : {}'.format(reduction_algo))\n    if (reduced_latt != self.lattice):\n        return self.__class__(reduced_latt, self.species_and_occu, self.cart_coords, coords_are_cartesian=True, to_unit_cell=True, site_properties=self.site_properties, charge=self._charge)\n    else:\n        return self.copy()", "docstring": "Get a reduced structure.\n\nArgs:\nreduction_algo (str): The lattice reduction algorithm to use.\nCurrently supported options are \"niggli\" or \"LLL\".", "source": "codesearchnet"}
{"code": "def trace(src, options=None):\n    options = options or config.Options.create()\n    with config.verbosity_from(options):\n        loader = load_pytd.create_loader(options)\n        ret = analyze.infer_types(src=src, options=options, loader=loader)\n        pytd_module = ret.ast\n        raw_traces = []\n        for op, symbol, data in ret.context.vm.opcode_traces:\n            raw_traces.append((op, symbol, tuple((_to_pytd(d, loader, pytd_module) for d in data))))\n    return source.Code(src, raw_traces, TypeTrace, options.input)", "docstring": "Generates type traces for the given source code.\n\nArgs:\nsrc: The source text.\noptions: A pytype.config.Options object that can be used to specify options\nsuch as the target Python version.\n\nReturns:\nA source.Code object.", "source": "github-repos"}
{"code": "def get_attr_location(self, name, location):\n    line, _ = location\n    src_line = self.line(line)\n    attr = name.split('.')[-1]\n    dot_attr = '.' + attr\n    if dot_attr in src_line:\n        col = src_line.index(dot_attr)\n        return (Location(line, col + 1), len(attr))\n    else:\n        attr_loc = self._get_multiline_location(location, 5, dot_attr)\n        if attr_loc:\n            return (Location(attr_loc.line, attr_loc.column + 1), len(attr))\n        else:\n            for l in self.get_closest_line_range(line, line + 5):\n                if self.line(l).endswith('.'):\n                    next_line = self.next_non_comment_line(l)\n                    text = self.line(next_line)\n                    if text.lstrip().startswith(attr):\n                        c = text.index(attr)\n                        return (Location(next_line, c), len(attr))\n        return (location, len(name))", "docstring": "Returns the location and span of the attribute in an attribute access.\n\nArgs:\nname: The attribute name.\nlocation: The location of the value the attribute is accessed on.", "source": "github-repos"}
{"code": "def __init__(self, server_id):\n        \n\n        data = datatools.get_data()\n        \n        self.server_id = server_id\n        self.logger = logging.getLogger(\"{}.{}\".format(__name__, self.server_id))\n        \n        self.songcache_dir = \"{}/{}\".format(_root_songcache_dir, self.server_id)\n        self.songcache_next_dir = \"{}/{}/next\".format(_root_songcache_dir, self.server_id)\n        self.output_format = \"{}/{}\".format(self.songcache_dir, file_format)\n        self.output_format_next = \"{}/{}\".format(self.songcache_next_dir, file_format)\n\n        \n        self.vchannel = None\n        self.vclient = None\n        self.streamer = None\n        self.current_duration = 0\n        self.current_download_elapsed = 0\n        self.is_live = False\n        self.queue = []\n        self.prev_queue = []\n        self.prev_queue_max = 500\n        self.volume = 20\n        \n        self.vclient_starttime = None\n        self.vclient_task = None\n        self.pause_time = None\n        self.prev_time = \"\"\n        \n        self.loop_type = 'off'\n\n        \n        self.mready = False\n        self.vready = False\n        self.state = 'off'\n\n        \n        self.mchannel = None\n        self.embed = None\n        self.queue_display = 9\n        self.nowplayinglog = logging.getLogger(\"{}.{}.nowplaying\".format(__name__, self.server_id))\n        self.nowplayinglog.setLevel(\"DEBUG\")\n        self.nowplayingauthorlog = logging.getLogger(\"{}.{}.nowplayingauthor\".format(__name__, self.server_id))\n        self.nowplayingauthorlog.setLevel(\"DEBUG\")\n        self.nowplayingsourcelog = logging.getLogger(\"{}.{}.nowplayingsource\".format(__name__, self.server_id))\n        self.nowplayingsourcelog.setLevel(\"DEBUG\")\n        self.timelog = logging.getLogger(\"{}.{}.time\".format(__name__, self.server_id))\n        self.timelog.setLevel(\"DEBUG\")\n        self.timelog.propagate = False\n        self.queuelog = logging.getLogger(\"{}.{}.queue\".format(__name__, self.server_id))\n        self.queuelog.setLevel(\"DEBUG\")\n        self.queuelog.propagate = False\n        self.queuelenlog = logging.getLogger(\"{}.{}.queuelen\".format(__name__, self.server_id))\n        self.queuelenlog.setLevel(\"DEBUG\")\n        self.queuelenlog.propagate = False\n        self.volumelog = logging.getLogger(\"{}.{}.volume\".format(__name__, self.server_id))\n        self.volumelog.setLevel(\"DEBUG\")\n        self.statuslog = logging.getLogger(\"{}.{}.status\".format(__name__, self.server_id))\n        self.statuslog.setLevel(\"DEBUG\")\n        self.statustimer = None\n\n        \n        self.clear_cache()\n\n        \n        self.topic = \"\"\n        self.topicchannel = None\n        \n        if \"topic_id\" in data[\"discord\"][\"servers\"][self.server_id][_data.modulename]:\n            topic_id = data[\"discord\"][\"servers\"][self.server_id][_data.modulename][\"topic_id\"]\n            if topic_id is not None and topic_id != \"\":\n                logger.debug(\"Topic channel id: {}\".format(topic_id))\n                self.topicchannel = client.get_channel(topic_id)\n        \n        if \"volume\" in data[\"discord\"][\"servers\"][self.server_id][_data.modulename]:\n            self.volume = data[\"discord\"][\"servers\"][self.server_id][_data.modulename][\"volume\"]\n        else:\n            self.write_volume()", "docstring": "Locks onto a server for easy management of various UIs\n\nArgs:\nserver_id (str): The Discord ID of the server to lock on to", "source": "juraj-google-style"}
{"code": "def flip_channel_order(self, image):\n    self._ensure_format_supported(image)\n    if isinstance(image, PIL.Image.Image):\n        image = self.to_numpy_array(image)\n    return image[::-1, :, :]", "docstring": "Flips the channel order of `image` from RGB to BGR, or vice versa. Note that this will trigger a conversion of\n`image` to a NumPy array if it's a PIL Image.\n\nArgs:\nimage (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):\nThe image whose color channels to flip. If `np.ndarray` or `torch.Tensor`, the channel dimension should\nbe first.", "source": "github-repos"}
{"code": "def get_random_value(length=10, character_sets=[string.ascii_uppercase, string.ascii_lowercase]):\n    return ''.join((random.choice(''.join(character_sets)) for i in range(length)))", "docstring": "Get a random string with the given length.\n\nArgs:\nlength (int): The length of the string to return.\ncharacter_sets list(str): The caracter sets to use.\n\nReturns:\nstr: The random string.", "source": "codesearchnet"}
{"code": "def _DownloadAuthUrl(self, url, dest_dir):\n    \n    dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False)\n    dest_file.close()\n    dest = dest_file.name\n\n    self.logger.info(\n        'Downloading url from %s to %s using authentication token.', url, dest)\n\n    if not self.token:\n      response = self.watcher.GetMetadata(\n          self.token_metadata_key, recursive=False, retry=False)\n\n      if not response:\n        self.logger.info(\n            'Authentication token not found. Attempting unauthenticated '\n            'download.')\n        return self._DownloadUrl(url, dest_dir)\n\n      self.token = '%s %s' % (\n          response.get('token_type', ''), response.get('access_token', ''))\n\n    try:\n      request = urlrequest.Request(url)\n      request.add_unredirected_header('Metadata-Flavor', 'Google')\n      request.add_unredirected_header('Authorization', self.token)\n      content = urlrequest.urlopen(request).read().decode('utf-8')\n    except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:\n      self.logger.warning('Could not download %s. %s.', url, str(e))\n      return None\n\n    with open(dest, 'wb') as f:\n      f.write(content)\n\n    return dest", "docstring": "Download a Google Storage URL using an authentication token.\n\nIf the token cannot be fetched, fallback to unauthenticated download.\n\nArgs:\nurl: string, the URL to download.\ndest_dir: string, the path to a directory for storing metadata scripts.\n\nReturns:\nstring, the path to the file storing the metadata script.", "source": "juraj-google-style"}
{"code": "def uses_star_args_in_call(node):\n    if sys.version_info[:2] >= (3, 5):\n        for arg in node.args:\n            if isinstance(arg, ast.Starred):\n                return True\n    elif node.starargs:\n        return True\n    return False", "docstring": "Check if an ast.Call node uses arbitrary-length positional *args.\n\nThis function works with the AST call node format of Python3.5+\nas well as the different AST format of earlier versions of Python.\n\nArgs:\nnode: The ast.Call node to check arg values for.\n\nReturns:\nTrue if the node uses starred variadic positional args or keyword args.\nFalse if it does not.", "source": "github-repos"}
{"code": "def sign(self, byts):\n    chosen_hash = c_hashes.SHA256()\n    hasher = c_hashes.Hash(chosen_hash, default_backend())\n    hasher.update(byts)\n    digest = hasher.finalize()\n    return self.priv.sign(digest, c_ec.ECDSA(c_utils.Prehashed(chosen_hash)))", "docstring": "Compute the ECC signature for the given bytestream.\n\nArgs:\nbyts (bytes): The bytes to sign.\n\nReturns:\nbytes: The RSA Signature bytes.", "source": "codesearchnet"}
{"code": "def load_metrics(event_dir, epoch):\n    metrics = {}\n    for filename in tf.gfile.ListDirectory(event_dir):\n        path = os.path.join(event_dir, filename)\n        for event in tf.train.summary_iterator(path):\n            if ((event.step == epoch) and event.HasField('summary')):\n                value = event.summary.value[0]\n                metrics[value.tag] = value.simple_value\n    return metrics", "docstring": "Loads metrics for this epoch if they have already been written.\n\nThis reads the entire event file but it's small with just per-epoch metrics.\n\nArgs:\nevent_dir: TODO(koz4k): Document this.\nepoch: TODO(koz4k): Document this.\n\nReturns:\nmetrics.", "source": "codesearchnet"}
{"code": "def RegisterMessage(self, message):\n    \n\n    desc = message.DESCRIPTOR\n    self._classes[desc.full_name] = message\n    self.pool.AddDescriptor(desc)\n    return message", "docstring": "Registers the given message type in the local database.\n\nCalls to GetSymbol() and GetMessages() will return messages registered here.\n\nArgs:\nmessage: a message.Message, to be registered.\n\nReturns:\nThe provided message.", "source": "juraj-google-style"}
{"code": "def GetSectionByIndex(self, section_index):\n    \n    if not self._is_parsed:\n      self._Parse()\n      self._is_parsed = True\n\n    if section_index < 0 or section_index >= len(self._sections):\n      return None\n\n    return self._sections[section_index]", "docstring": "Retrieves a specific section based on the index.\n\nArgs:\nsection_index (int): index of the section.\n\nReturns:\nVolumeExtent: a volume extent or None if not available.", "source": "juraj-google-style"}
{"code": "def run_commands(self, commands, encoding='json', send_enable=True, **kwargs):\n    commands = make_iterable(commands)\n    commands = [({'cmd': c.split('MULTILINE:')[0], 'input': ('%s\\n' % c.split('MULTILINE:')[1].strip())} if ('MULTILINE:' in c) else c) for c in commands]\n    if send_enable:\n        if self._enablepwd:\n            commands.insert(0, {'cmd': 'enable', 'input': self._enablepwd})\n        else:\n            commands.insert(0, 'enable')\n    response = self._connection.execute(commands, encoding, **kwargs)\n    if send_enable:\n        response['result'].pop(0)\n    return response['result']", "docstring": "Sends the commands over the transport to the device\n\nThis method sends the commands to the device using the nodes\ntransport.  This is a lower layer function that shouldn't normally\nneed to be used, preferring instead to use config() or enable().\n\nArgs:\ncommands (list): The ordered list of commands to send to the\ndevice using the transport\nencoding (str): The encoding method to use for the request and\nexcpected response.\nsend_enable (bool): If True the enable command will be\nprepended to the command list automatically.\n**kwargs: Additional keyword arguments for expanded eAPI\nfunctionality. Only supported eAPI params are used in building\nthe request\n\nReturns:\nThis method will return the raw response from the connection\nwhich is a Python dictionary object.", "source": "codesearchnet"}
{"code": "def parse(self, filepath, content):\n        \n        try:\n            parsed = yaml.load(content)\n        except yaml.YAMLError as exc:\n            msg = \"No YAML object could be decoded from file: {}\\n{}\"\n            raise SettingsBackendError(msg.format(filepath, exc))\n        return parsed", "docstring": "Parse opened settings content using YAML parser.\n\nArgs:\nfilepath (str): Settings object, depends from backend\ncontent (str): Settings content from opened file, depends from\nbackend.\n\nRaises:\nboussole.exceptions.SettingsBackendError: If parser can not decode\na valid YAML object.\n\nReturns:\ndict: Dictionnary containing parsed setting elements.", "source": "juraj-google-style"}
{"code": "def __get_state_by_id(cls, job_id):\n    \n    state = model.MapreduceState.get_by_job_id(job_id)\n    if state is None:\n      raise ValueError(\"Job state for job %s is missing.\" % job_id)\n    return state", "docstring": "Get job state by id.\n\nArgs:\njob_id: job id.\n\nReturns:\nmodel.MapreduceState for the job.\n\nRaises:\nValueError: if the job state is missing.", "source": "juraj-google-style"}
{"code": "def _print_test_names(test_classes):\n    for test_class in test_classes:\n        cls = test_class(config_parser.TestRunConfig())\n        test_names = []\n        try:\n            cls._pre_run()\n            if cls.tests:\n                test_names = list(cls.tests)\n            else:\n                test_names = cls.get_existing_test_names()\n        except Exception:\n            logging.exception('Failed to retrieve generated tests.')\n        finally:\n            cls._clean_up()\n        print('==========> %s <==========' % cls.TAG)\n        for name in test_names:\n            print(f'{cls.TAG}.{name}')", "docstring": "Prints the names of all the tests in all test classes.\nArgs:\ntest_classes: classes, the test classes to print names from.", "source": "github-repos"}
{"code": "def _get_prop_from_modelclass(modelclass, name):\n  \n  if name == '__key__':\n    return modelclass._key\n\n  parts = name.split('.')\n  part, more = parts[0], parts[1:]\n  prop = modelclass._properties.get(part)\n  if prop is None:\n    if issubclass(modelclass, model.Expando):\n      prop = model.GenericProperty(part)\n    else:\n      raise TypeError('Model %s has no property named %r' %\n                      (modelclass._get_kind(), part))\n\n  while more:\n    part = more.pop(0)\n    if not isinstance(prop, model.StructuredProperty):\n      raise TypeError('Model %s has no property named %r' %\n                      (modelclass._get_kind(), part))\n    maybe = getattr(prop, part, None)\n    if isinstance(maybe, model.Property) and maybe._name == part:\n      prop = maybe\n    else:\n      maybe = prop._modelclass._properties.get(part)\n      if maybe is not None:\n        \n        \n        prop = getattr(prop, maybe._code_name)\n      else:\n        if issubclass(prop._modelclass, model.Expando) and not more:\n          prop = model.GenericProperty()\n          prop._name = name  \n        else:\n          raise KeyError('Model %s has no property named %r' %\n                         (prop._modelclass._get_kind(), part))\n\n  return prop", "docstring": "Helper for FQL parsing to turn a property name into a property object.\n\nArgs:\nmodelclass: The model class specified in the query.\nname: The property name.  This may contain dots which indicate\nsub-properties of structured properties.\n\nReturns:\nA Property object.\n\nRaises:\nKeyError if the property doesn't exist and the model clas doesn't\nderive from Expando.", "source": "juraj-google-style"}
{"code": "def run_idle(self):\n    if ((not self.idlers) or (self.inactive >= len(self.idlers))):\n        return False\n    idler = self.idlers.popleft()\n    (callback, args, kwds) = idler\n    _logging_debug('idler: %s', callback.__name__)\n    res = callback(*args, **kwds)\n    if (res is not None):\n        if res:\n            self.inactive = 0\n        else:\n            self.inactive += 1\n        self.idlers.append(idler)\n    else:\n        _logging_debug('idler %s removed', callback.__name__)\n    return True", "docstring": "Run one of the idle callbacks.\n\nReturns:\nTrue if one was called, False if no idle callback was called.", "source": "codesearchnet"}
{"code": "def _MakeFieldDescriptor(self, field_proto, message_name, index, is_extension=False):\n    if message_name:\n        full_name = '.'.join((message_name, field_proto.name))\n    else:\n        full_name = field_proto.name\n    return descriptor.FieldDescriptor(name=field_proto.name, full_name=full_name, index=index, number=field_proto.number, type=field_proto.type, cpp_type=None, message_type=None, enum_type=None, containing_type=None, label=field_proto.label, has_default_value=False, default_value=None, is_extension=is_extension, extension_scope=None, options=field_proto.options)", "docstring": "Creates a field descriptor from a FieldDescriptorProto.\n\nFor message and enum type fields, this method will do a look up\nin the pool for the appropriate descriptor for that type. If it\nis unavailable, it will fall back to the _source function to\ncreate it. If this type is still unavailable, construction will\nfail.\n\nArgs:\nfield_proto: The proto describing the field.\nmessage_name: The name of the containing message.\nindex: Index of the field\nis_extension: Indication that this field is for an extension.\n\nReturns:\nAn initialized FieldDescriptor object", "source": "codesearchnet"}
{"code": "def __init__(self, qobj_model, **run_config):\n        \n        self._qobj_model = qobj_model\n        self._run_config = run_config", "docstring": "Create new converter.\n\nArgs:\nqobj_model (QobjInstruction): marshmallow model to serialize to object.\nrun_config (dict): experimental configuration.", "source": "juraj-google-style"}
{"code": "def __init__(self, column_names=None, title=None):\n    \n    super(CLITableView, self).__init__(column_names=column_names, title=title)\n    if self._columns:\n      self._column_width = len(self._columns[0])\n    else:\n      self._column_width = 0", "docstring": "Initializes a command line table view.\n\nArgs:\ncolumn_names (Optional[list[str]]): column names.\ntitle (Optional[str]): title.", "source": "juraj-google-style"}
{"code": "def xldate_as_datetime(xldate, datemode=0, option=\"to_datetime\"):\n    \n\n    \n\n    if option == \"to_float\":\n        d = (xldate - 25589) * 86400.0\n    else:\n        try:\n            d = datetime.datetime(1899, 12, 30) + \\\n                datetime.timedelta(days=xldate + 1462 * datemode)\n            \n            \n            if option == \"to_string\":\n                date_format = \"%Y-%m-%d %H:%M:%S\"  \n                d = d.strftime(date_format)\n        except TypeError:\n            logging.info(f'The date is not of correct type [{xldate}]')\n            d = xldate\n    return d", "docstring": "Converts a xls date stamp to a more sensible format.\n\nArgs:\nxldate (str): date stamp in Excel format.\ndatemode (int): 0 for 1900-based, 1 for 1904-based.\noption (str): option in (\"to_datetime\", \"to_float\", \"to_string\"),\nreturn value\n\nReturns:\ndatetime (datetime object, float, or string).", "source": "juraj-google-style"}
{"code": "def table_chains(self, table='filter'):\n    return dict(((c['name'], self.get_chain(c['name'], table)) for c in self.get_table(table)))", "docstring": "Get a dict where the keys are all the chains for the given table\nand each value is the set of rules defined for the given chain.\n\nArgs:\ntable (str): table name, defaults to ``filter``\n\nReturns:\ndict: chains with set of defined rules", "source": "codesearchnet"}
{"code": "def cutting_indices(self, independent_decision_points: List[pg.geno.DecisionPoint], global_state: pg.geno.AttributeDict, step: int) -> List[int]:", "docstring": "Implementation of getting the indices of the cutting points.\n\nArgs:\nindependent_decision_points: A list of independent decision points.\nglobal_state: An optional keyword argument as the global state. Subclass\ncan omit.\nstep: An optional keyword argument as the curent step. Subclass can omit.\n\nReturns:\nA list of integers as the cutting points.", "source": "github-repos"}
{"code": "def scroll(clicks, x=None, y=None, pause=None, _pause=True):\n    _failSafeCheck()\n    if (type(x) in (tuple, list)):\n        (x, y) = (x[0], x[1])\n    (x, y) = position(x, y)\n    platformModule._scroll(clicks, x, y)\n    _autoPause(pause, _pause)", "docstring": "Performs a scroll of the mouse scroll wheel.\n\nWhether this is a vertical or horizontal scroll depends on the underlying\noperating system.\n\nThe x and y parameters detail where the mouse event happens. If None, the\ncurrent mouse position is used. If a float value, it is rounded down. If\noutside the boundaries of the screen, the event happens at edge of the\nscreen.\n\nArgs:\nclicks (int, float): The amount of scrolling to perform.\nx (int, float, None, tuple, optional): The x position on the screen where the\nclick happens. None by default. If tuple, this is used for x and y.\ny (int, float, None, optional): The y position on the screen where the\nclick happens. None by default.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def sample(self, bqm, num_reads=10):\n        \n        values = tuple(bqm.vartype.value)\n\n        def _itersample():\n            for __ in range(num_reads):\n                sample = {v: choice(values) for v in bqm.linear}\n                energy = bqm.energy(sample)\n\n                yield sample, energy\n\n        samples, energies = zip(*_itersample())\n\n        return SampleSet.from_samples(samples, bqm.vartype, energies)", "docstring": "Give random samples for a binary quadratic model.\n\nVariable assignments are chosen by coin flip.\n\nArgs:\nbqm (:obj:`.BinaryQuadraticModel`):\nBinary quadratic model to be sampled from.\n\nnum_reads (int, optional, default=10):\nNumber of reads.\n\nReturns:\n:obj:`.SampleSet`", "source": "juraj-google-style"}
{"code": "def get_property_dict(entity_proto):\n    return dict(((p.key, p.value) for p in entity_proto.property))", "docstring": "Convert datastore.Entity to a dict of property name -> datastore.Value.\n\nArgs:\nentity_proto: datastore.Entity proto message.\n\nUsage:\n>>> get_property_dict(entity_proto)\n{'foo': {string_value='a'}, 'bar': {integer_value=2}}\n\nReturns:\ndict of entity properties.", "source": "codesearchnet"}
{"code": "def _read_mptcp_join(self, bits, size, kind):\n    if (self._syn and self._ack):\n        return self._read_join_synack(bits, size, kind)\n    elif self._syn:\n        return self._read_join_syn(bits, size, kind)\n    elif self._ack:\n        return self._read_join_ack(bits, size, kind)\n    else:\n        temp = self._read_fileng(size)\n        data = dict(kind=kind, length=(size + 1), subtype='MP_JOIN-Unknown', data=(bytes(chr(int(bits[:4], base=2)), encoding='utf-8') + temp))\n        return data", "docstring": "Read Join Connection option.\n\nPositional arguments:\n* bits - str, 4-bit data\n* size - int, length of option\n* kind - int, 30 (Multipath TCP)\n\nReturns:\n* dict -- extracted Join Connection (MP_JOIN) option\n\nStructure of MP_JOIN [RFC 6824]:\nOctets      Bits        Name                        Description\n0           0     tcp.mp.kind                 Kind (30)\n1           8     tcp.mp.length               Length\n2          16     tcp.mp.subtype              Subtype (1)\n2          20     tcp.mp.data                 Handshake-specific Data", "source": "codesearchnet"}
{"code": "def op_nodes(self, op=None):\n        \n        nodes = []\n        for node in self._multi_graph.nodes():\n            if node.type == \"op\":\n                if op is None or isinstance(node.op, op):\n                    nodes.append(node)\n        return nodes", "docstring": "Get the list of \"op\" nodes in the dag.\n\nArgs:\nop (Type): Instruction subclass op nodes to return. if op=None, return\nall op nodes.\nReturns:\nlist[DAGNode]: the list of node ids containing the given op.", "source": "juraj-google-style"}
{"code": "def get_plugin_asset(plugin_asset_cls, graph=None):\n    if graph is None:\n        graph = ops.get_default_graph()\n    if not plugin_asset_cls.plugin_name:\n        raise ValueError('Class %s has no plugin_name' % plugin_asset_cls.__name__)\n    name = _PLUGIN_ASSET_PREFIX + plugin_asset_cls.plugin_name\n    container = graph.get_collection(name)\n    if container:\n        if len(container) != 1:\n            raise ValueError('Collection for %s had %d items, expected 1' % (name, len(container)))\n        instance = container[0]\n        if not isinstance(instance, plugin_asset_cls):\n            raise ValueError('Plugin name collision between classes %s and %s' % (plugin_asset_cls.__name__, instance.__class__.__name__))\n    else:\n        instance = plugin_asset_cls()\n        graph.add_to_collection(name, instance)\n        graph.add_to_collection(_PLUGIN_ASSET_PREFIX, plugin_asset_cls.plugin_name)\n    return instance", "docstring": "Acquire singleton PluginAsset instance from a graph.\n\nPluginAssets are always singletons, and are stored in tf Graph collections.\nThis way, they can be defined anywhere the graph is being constructed, and\nif the same plugin is configured at many different points, the user can always\nmodify the same instance.\n\nArgs:\nplugin_asset_cls: The PluginAsset class\ngraph: (optional) The graph to retrieve the instance from. If not specified,\nthe default graph is used.\n\nReturns:\nAn instance of the plugin_asset_class\n\nRaises:\nValueError: If we have a plugin name collision, or if we unexpectedly find\nthe wrong number of items in a collection.", "source": "github-repos"}
{"code": "def __init__(self, loss_scale_value):\n    super(FixedLossScale, self).__init__()\n    if not isinstance(loss_scale_value, (int, float)):\n        raise ValueError('loss_scale_value must be a Python int or float.')\n    if loss_scale_value < 1:\n        raise ValueError('loss_scale_value must be at least 1.')\n    self._loss_scale_value = float(loss_scale_value)", "docstring": "Creates the fixed loss scale.\n\nArgs:\nloss_scale_value: A Python float. Its ideal value varies depending on\nmodels to run. Choosing a too small loss_scale might affect model\nquality; a too big loss_scale might cause inf or nan. There is no single\nright loss_scale to apply. There is no harm choosing a relatively big\nnumber as long as no nan or inf is encountered in training.\n\nRaises:\nValueError: If loss_scale_value is less than 1.", "source": "github-repos"}
{"code": "def _print_download_progress_msg(self, msg, flush=False):\n    if self._interactive_mode():\n        self._max_prog_str = max(self._max_prog_str, len(msg))\n        sys.stdout.write(('\\r%-{}s'.format(self._max_prog_str) % msg))\n        sys.stdout.flush()\n        if flush:\n            print('\\n')\n    else:\n        logging.info(msg)", "docstring": "Prints a message about download progress either to the console or TF log.\n\nArgs:\nmsg: Message to print.\nflush: Indicates whether to flush the output (only used in interactive\nmode).", "source": "codesearchnet"}
{"code": "def _address_content(self, x):\n    \n    mem_keys = tf.layers.dense(self.mem_vals, self.key_depth,\n                               bias_initializer=tf.constant_initializer(1.0),\n                               name=\"mem_key\")\n    mem_query = tf.layers.dense(x, self.key_depth,\n                                bias_initializer=tf.constant_initializer(1.0),\n                                name=\"mem_query\")\n    norm = tf.matmul(self._norm(mem_query), self._norm(mem_keys),\n                     transpose_b=True)\n    dot_product = tf.matmul(mem_query, mem_keys, transpose_b=True)\n    cos_dist = tf.div(dot_product, norm + 1e-7, name=\"cos_dist\")\n    access_logits = self.sharpen_factor * cos_dist\n    return access_logits", "docstring": "Address the memory based on content similarity.\n\nArgs:\nx: a tensor in the shape of [batch_size, length, depth].\nReturns:\nthe logits for each memory entry [batch_size, length, memory_size].", "source": "juraj-google-style"}
{"code": "def register_user(self, user):\n        \n        self.users[user.index] = {'known_items': set()}\n        self.n_user += 1", "docstring": "For new users, append their information into the dictionaries.\n\nArgs:\nuser (User): User.", "source": "juraj-google-style"}
{"code": "def write_fasta_file_from_dict(indict, outname, outdir=None, outext='.faa', force_rerun=False):\n    if (not outdir):\n        outdir = ''\n    outfile = ssbio.utils.outfile_maker(inname='', outname=outname, outdir=outdir, outext=outext)\n    if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n        seqs = []\n        for (i, s) in indict.items():\n            seq = ssbio.protein.sequence.utils.cast_to_seq_record(s, id=i)\n            seqs.append(seq)\n        SeqIO.write(seqs, outfile, 'fasta')\n    return outfile", "docstring": "Write a FASTA file for a dictionary of IDs and their sequence strings.\n\nArgs:\nindict: Input dictionary with keys as IDs and values as sequence strings\noutname: Name of the output file which will have outext appended to it\noutdir: Path to directory to output sequences to\noutext: Extension of FASTA file, default \".faa\"\nforce_rerun: If file should be overwritten if it exists\n\nReturns:\nstr: Path to output FASTA file.", "source": "codesearchnet"}
{"code": "def Push(cls, connection, datafile, filename,\n             st_mode=DEFAULT_PUSH_MODE, mtime=0, progress_callback=None):\n        \n\n        fileinfo = ('{},{}'.format(filename, int(st_mode))).encode('utf-8')\n\n        cnxn = FileSyncConnection(connection, b'<2I')\n        cnxn.Send(b'SEND', fileinfo)\n\n        if progress_callback:\n            total_bytes = os.fstat(datafile.fileno()).st_size if isinstance(datafile, file) else -1\n            progress = cls._HandleProgress(lambda current: progress_callback(filename, current, total_bytes))\n            next(progress)\n\n        while True:\n            data = datafile.read(MAX_PUSH_DATA)\n            if data:\n                cnxn.Send(b'DATA', data)\n\n                if progress_callback:\n                    progress.send(len(data))\n            else:\n                break\n\n        if mtime == 0:\n            mtime = int(time.time())\n        \n        \n        cnxn.Send(b'DONE', size=mtime)\n        for cmd_id, _, data in cnxn.ReadUntil((), b'OKAY', b'FAIL'):\n            if cmd_id == b'OKAY':\n                return\n            raise PushFailedError(data)", "docstring": "Push a file-like object to the device.\n\nArgs:\nconnection: ADB connection\ndatafile: File-like object for reading from\nfilename: Filename to push to\nst_mode: stat mode for filename\nmtime: modification time\nprogress_callback: callback method that accepts filename, bytes_written and total_bytes\n\nRaises:\nPushFailedError: Raised on push failure.", "source": "juraj-google-style"}
{"code": "def _tuple_of_big_endian_int(bit_groups: Tuple[(np.ndarray, ...)]) -> Tuple[(int, ...)]:\n    return tuple((_big_endian_int(bits) for bits in bit_groups))", "docstring": "Returns the big-endian integers specified by groups of bits.\n\nArgs:\nbit_groups: Groups of descending bits, each specifying a big endian\ninteger with the 1s bit at the end.\n\nReturns:\nA tuple containing the integer for each group.", "source": "codesearchnet"}
{"code": "def register_for_auto_class(cls, auto_class='AutoProcessor'):\n    if not isinstance(auto_class, str):\n        auto_class = auto_class.__name__\n    import transformers.models.auto as auto_module\n    if not hasattr(auto_module, auto_class):\n        raise ValueError(f'{auto_class} is not a valid auto class.')\n    cls._auto_class = auto_class", "docstring": "Register this class with a given auto class. This should only be used for custom feature extractors as the ones\nin the library are already mapped with `AutoProcessor`.\n\n\n\nArgs:\nauto_class (`str` or `type`, *optional*, defaults to `\"AutoProcessor\"`):\nThe auto class to register this new feature extractor with.", "source": "github-repos"}
{"code": "def exe_cmd(*cmds, timeout=DEFAULT_TIMEOUT_SEC):\n    cmd = ' '.join(cmds)\n    ret, out, err = utils.run_command(cmd=cmd, stdout=PIPE, stderr=PIPE, shell=True, timeout=timeout)\n    logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s', utils.cli_cmd_to_string(cmds), out, err, ret)\n    if not err:\n        return out\n    return err", "docstring": "Executes commands in a new shell. Directing stderr to PIPE, with timeout.\n\nThis is fastboot's own exe_cmd because of its peculiar way of writing\nnon-error info to stderr.\n\nArgs:\ncmds: A sequence of commands and arguments.\ntimeout: The number of seconds to wait before timing out.\n\nReturns:\nThe output of the command run, in bytes.\n\nRaises:\nException: An error occurred during the command execution or\nthe command timed out.", "source": "github-repos"}
{"code": "def is_distributing_by_cloning(model):\n    if backend.is_tpu_strategy(model._distribution_strategy) and context.executing_eagerly:\n        return False\n    elif ops.executing_eagerly_outside_functions():\n        return bool(model._compile_distribution)\n    return True", "docstring": "Decide whether this model is going to be distributed via cloning.\n\nWe are going to distribute the model by cloning in graph mode.\n\nArgs:\nmodel: Keras model to distribute.\n\nReturns:\nTrue if the `model` is going to be distributed using cloning and False\notherwise.", "source": "github-repos"}
{"code": "def has_chosen(state, correct, msgs):\n    ctxt = {}\n    exec(state.student_code, globals(), ctxt)\n    sel_indx = ctxt['selected_option']\n    if (sel_indx != correct):\n        state.report(Feedback(msgs[(sel_indx - 1)]))\n    else:\n        state.reporter.success_msg = msgs[(correct - 1)]\n    return state", "docstring": "Verify exercises of the type MultipleChoiceExercise\n\nArgs:\nstate:    State instance describing student and solution code. Can be omitted if used with Ex().\ncorrect:  index of correct option, where 1 is the first option.\nmsgs  :    list of feedback messages corresponding to each option.\n\n:Example:\nThe following SCT is for a multiple choice exercise with 2 options, the first\nof which is correct.::\n\nEx().has_chosen(1, ['Correct!', 'Incorrect. Try again!'])", "source": "codesearchnet"}
{"code": "def gather(strategy, value):\n    return nest.map_structure(functools.partial(_gather, strategy), value)", "docstring": "Gathers value from all workers.\n\nThis is intended for tests before we implement an official all-gather API.\n\nArgs:\nstrategy: a `tf.distribute.Strategy`.\nvalue: a nested structure of n-dim `tf.distribute.DistributedValue` of\n`tf.Tensor`, or of a `tf.Tensor` if the strategy only has one replica.\nCannot contain tf.sparse.SparseTensor.\n\nReturns:\na (n+1)-dim `tf.Tensor`.", "source": "github-repos"}
{"code": "def convert_dropout(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting dropout ...')\n\n    if names == 'short':\n        tf_name = 'DO' + random_string(6)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    dropout = keras.layers.Dropout(rate=params['ratio'], name=tf_name)\n    layers[scope_name] = dropout(layers[inputs[0]])", "docstring": "Convert dropout.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def update_defaults(self, new_defaults, respect_none=False):\n        \n        for key, value in six.iteritems(new_defaults):\n            item = self.get_item(key)\n            if item is None:\n                raise YapconfItemNotFound(\"Cannot update default for {0}, \"\n                                          \"there is no config item by the \"\n                                          \"name of {1}\".format(key, key), None)\n\n            item.update_default(value, respect_none)", "docstring": "Update items defaults to the values in the new_defaults dict.\n\nArgs:\nnew_defaults (dict): A key-value pair of new defaults to be\napplied.\nrespect_none (bool): Flag to indicate if ``None`` values should\nconstitute an update to the default.", "source": "juraj-google-style"}
{"code": "def cumulative_probabilities(self):\n    partition_function = np.sum(self.p)\n    return (np.cumsum(self.p) / partition_function)", "docstring": "Cumulative sum of the relative probabilities for all possible jumps.\n\nArgs:\nNone\n\nReturns:\n(np.array): Cumulative sum of relative jump probabilities.", "source": "codesearchnet"}
{"code": "def Append(self, item):\n    \n    if self._index >= self._size:\n      self._index = self._index % self._size\n\n    try:\n      self._list[self._index] = item\n    except IndexError:\n      self._list.append(item)\n    self._index += 1", "docstring": "Add an item to the list.\n\nArgs:\nitem (object): item.", "source": "juraj-google-style"}
{"code": "def where(self, *constraints: column_expression_builder.ColumnExpressionBuilder) -> 'View':\n    for constraint in constraints:\n        if constraint.node.return_type != _fhir_path_data_types.Boolean:\n            raise ValueError(('view `where` expressions must be boolean predicates', f' got `{constraint.node.to_fhir_path()}`'))\n    return View(self._context, self._root_resource, self._fields, self._constraints + tuple(constraints), self._handler)", "docstring": "Returns a new View instance with these added constraints.\n\nArgs:\n*constraints: a list of FHIRPath expressions to conjuctively constrain the\nunderlying data.  The returned view will apply the both the current and\nadditional constraints defined here.", "source": "github-repos"}
{"code": "def _add_bound_method(self, bound_method, identify_observed):\n    inst = bound_method.__self__\n    method_name = bound_method.__name__\n    key = self.make_key(bound_method)\n    if (key not in self.observers):\n        self.observers[key] = ObserverBoundMethod(inst, method_name, identify_observed, (key, self.observers))\n        return True\n    else:\n        return False", "docstring": "Add an bound method as an observer.\n\nArgs:\nbound_method: The bound method to add as an observer.\nidentify_observed: See the docstring for add_observer.\n\nReturns:\nTrue if the bound method is added, otherwise False.", "source": "codesearchnet"}
{"code": "def forward(self, inputs_embeds, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n    output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n    return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n    encoder_states = () if output_hidden_states else None\n    all_attentions = () if output_attentions else None\n    hidden_states = inputs_embeds\n    for idx, encoder_layer in enumerate(self.layers):\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if self.gradient_checkpointing and self.training:\n            layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, output_attentions)\n        else:\n            layer_outputs = encoder_layer(hidden_states, output_attentions=output_attentions)\n        hidden_states = layer_outputs[0]\n        if output_attentions:\n            all_attentions = all_attentions + (layer_outputs[1],)\n    if output_hidden_states:\n        encoder_states = encoder_states + (hidden_states,)\n    if not return_dict:\n        return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n    return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Args:\ninputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\nOptionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\nThis is useful if you want more control over how to convert `input_ids` indices into associated vectors\nthan the model's internal embedding lookup matrix.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.\noutput_hidden_states (`bool`, *optional*):\nWhether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\nfor more detail.\nreturn_dict (`bool`, *optional*):\nWhether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.", "source": "github-repos"}
{"code": "def filter(self, *query_filter):\n    for query in query_filter:\n        self.query.append(query)\n    return self", "docstring": "Set the query filter to perform the query with\n\nArgs:\n*query_filter: Simplified Query Language filter", "source": "codesearchnet"}
{"code": "def _CreateAdGroup(client, campaign_id):\n  \n  ad_group_service = client.GetService('AdGroupService')\n\n  operations = [{\n      'operator': 'ADD',\n      'operand': {\n          'campaignId': campaign_id,\n          'adGroupType': 'SEARCH_DYNAMIC_ADS',\n          'name': 'Earth to Mars Cruises \n          'status': 'PAUSED',\n          'biddingStrategyConfiguration': {\n              'bids': [{\n                  'xsi_type': 'CpcBid',\n                  'bid': {\n                      'microAmount': '3000000'\n                  },\n              }]\n          }\n      }\n  }]\n\n  ad_group = ad_group_service.mutate(operations)['value'][0]\n  ad_group_id = ad_group['id']\n\n  print 'Ad group with ID \"%d\" and name \"%s\" was created.' % (\n      ad_group_id, ad_group['name'])\n\n  return ad_group_id", "docstring": "Creates an ad group.\n\nArgs:\nclient: an AdWordsClient instance.\ncampaign_id: an integer campaign ID.\n\nReturns:\nAn integer ad group ID.", "source": "juraj-google-style"}
{"code": "def inverse_transform(self, y):\n    sklearn.base.check_is_fitted(self)\n    xp, _ = sklearn.utils._array_api.get_namespace(y)\n    if self.ndim_ == 1 and y.ndim == 2:\n        return xp.squeeze(y, axis=1)\n    return y", "docstring": "Revert the transformation of transform.\n\nArgs:\ny: np.ndarray\nTransformed numpy array.\n\nReturns:\nnp.ndarray\nIf the transformer was fit to a 1D numpy array,\nand a 2D numpy array with a singleton second dimension\nis passed, it will be squeezed back to 1D. Otherwise, it\nwill eb left untouched.", "source": "github-repos"}
{"code": "def prepare_soap_body(self, method, parameters, namespace):\n    tags = []\n    for (name, value) in parameters:\n        tag = '<{name}>{value}</{name}>'.format(name=name, value=escape(('%s' % value), {'\"': '&quot;'}))\n        tags.append(tag)\n    wrapped_params = ''.join(tags)\n    if (namespace is not None):\n        soap_body = '<{method} xmlns=\"{namespace}\">{params}</{method}>'.format(method=method, params=wrapped_params, namespace=namespace)\n    else:\n        soap_body = '<{method}>{params}</{method}>'.format(method=method, params=wrapped_params)\n    return soap_body", "docstring": "Prepare the SOAP message body for sending.\n\nArgs:\nmethod (str): The name of the method to call.\nparameters (list): A list of (name, value) tuples containing\nthe parameters to pass to the method.\nnamespace (str): tThe XML namespace to use for the method.\n\nReturns:\nstr: A properly formatted SOAP Body.", "source": "codesearchnet"}
{"code": "class Iterator(PyDataset):\n    white_list_formats = ('png', 'jpg', 'jpeg', 'bmp', 'ppm', 'tif', 'tiff')\n\n    def __init__(self, n, batch_size, shuffle, seed):\n        self.n = n\n        self.batch_size = batch_size\n        self.seed = seed\n        self.shuffle = shuffle\n        self.batch_index = 0\n        self.total_batches_seen = 0\n        self.lock = threading.Lock()\n        self.index_array = None\n        self.index_generator = self._flow_index()\n\n    def _set_index_array(self):\n        self.index_array = np.arange(self.n)\n        if self.shuffle:\n            self.index_array = np.random.permutation(self.n)\n\n    def __getitem__(self, idx):\n        if idx >= len(self):\n            raise ValueError('Asked to retrieve element {idx}, but the Sequence has length {length}'.format(idx=idx, length=len(self)))\n        if self.seed is not None:\n            np.random.seed(self.seed + self.total_batches_seen)\n        self.total_batches_seen += 1\n        if self.index_array is None:\n            self._set_index_array()\n        index_array = self.index_array[self.batch_size * idx:self.batch_size * (idx + 1)]\n        return self._get_batches_of_transformed_samples(index_array)\n\n    def __len__(self):\n        return (self.n + self.batch_size - 1) \n\n    def on_epoch_end(self):\n        self._set_index_array()\n\n    def reset(self):\n        self.batch_index = 0\n\n    def _flow_index(self):\n        self.reset()\n        while 1:\n            if self.seed is not None:\n                np.random.seed(self.seed + self.total_batches_seen)\n            if self.batch_index == 0:\n                self._set_index_array()\n            if self.n == 0:\n                current_index = 0\n            else:\n                current_index = self.batch_index * self.batch_size % self.n\n            if self.n > current_index + self.batch_size:\n                self.batch_index += 1\n            else:\n                self.batch_index = 0\n            self.total_batches_seen += 1\n            yield self.index_array[current_index:current_index + self.batch_size]\n\n    def __iter__(self):\n        return self\n\n    def __next__(self):\n        with self.lock:\n            index_array = next(self.index_generator)\n        return self._get_batches_of_transformed_samples(index_array)\n\n    def _get_batches_of_transformed_samples(self, index_array):\n        \n        raise NotImplementedError", "docstring": "Base class for image data iterators.\n\nDEPRECATED.\n\nEvery `Iterator` must implement the `_get_batches_of_transformed_samples`\nmethod.\n\nArgs:\nn: Integer, total number of samples in the dataset to loop over.\nbatch_size: Integer, size of a batch.\nshuffle: Boolean, whether to shuffle the data between epochs.\nseed: Random seeding for data shuffling.", "source": "github-repos"}
{"code": "def main(args):\n    if (not args):\n        raise Exception('Please specify at least one JSON config path')\n    inputs = []\n    program = []\n    outputs = []\n    for arg in args:\n        with open(arg) as fd:\n            config = json.load(fd)\n        inputs.extend(config.get('inputs', []))\n        program.extend(config.get('program', []))\n        outputs.extend(config.get('outputs', []))\n    if (not program):\n        raise Exception('Please specify a program')\n    return run(inputs, program, outputs)", "docstring": "Invokes run function using a JSON file config.\n\nArgs:\nargs: CLI args, which can be a JSON file containing an object whose\nattributes are the parameters to the run function. If multiple JSON\nfiles are passed, their contents are concatenated.\nReturns:\n0 if succeeded or nonzero if failed.\nRaises:\nException: If input data is missing.", "source": "codesearchnet"}
{"code": "def request(self,message,message_type):\n        \n        if message_type == MULTIPART:\n            raise Exception(\"Unsupported request type\")\n            \n        super(Requestor,self).send(message,message_type)", "docstring": "Send a request message of the given type\n\nArgs:\n- message: the message to publish\n- message_type: the type of message being sent", "source": "juraj-google-style"}
{"code": "def GetSourceStrings(cls, event):\n    \n    \n    \n    formatter_object = cls.GetFormatterObject(event.data_type)\n    return formatter_object.GetSources(event)", "docstring": "Retrieves the formatted source strings for a specific event object.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nlist[str, str]: short and long version of the source of the event.", "source": "juraj-google-style"}
{"code": "def qrandom(n):\n    import quantumrandom\n    return np.concatenate([quantumrandom.get_data(data_type='uint16', array_length=1024) for i in range(int(np.ceil((n / 1024.0))))])[:n]", "docstring": "Creates an array of n true random numbers obtained from the quantum random\nnumber generator at qrng.anu.edu.au\n\nThis function requires the package quantumrandom and an internet connection.\n\nArgs:\nn (int):\nlength of the random array\n\nReturn:\narray of ints:\narray of truly random unsigned 16 bit int values", "source": "codesearchnet"}
{"code": "def convert_slice(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting slice ...')\n\n    if len(params['axes']) > 1:\n        raise AssertionError('Cannot convert slice by multiple dimensions')\n\n    if params['axes'][0] not in [0, 1, 2, 3]:\n        raise AssertionError('Slice by dimension more than 3 or less than 0 is not supported')\n\n    def target_layer(x, axis=int(params['axes'][0]), start=int(params['starts'][0]), end=int(params['ends'][0])):\n        if axis == 0:\n            return x[start:end]\n        elif axis == 1:\n            return x[:, start:end]\n        elif axis == 2:\n            return x[:, :, start:end]\n        elif axis == 3:\n            return x[:, :, :, start:end]\n\n    lambda_layer = keras.layers.Lambda(target_layer)\n    layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert slice operation.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def make_bubble_surface(dims=DEFAULT_DIMS, repeat=3):\n    \n    gradients = make_gradients(dims)\n    return (\n        np.sin((gradients[0] - 0.5) * repeat * np.pi) *\n        np.sin((gradients[1] - 0.5) * repeat * np.pi))", "docstring": "Makes a surface from the product of sine functions on each axis.\n\nArgs:\ndims (pair): the dimensions of the surface to create\nrepeat (int): the frequency of the waves is set to ensure this many\nrepetitions of the function\n\nReturns:\nsurface: A surface.", "source": "juraj-google-style"}
{"code": "def discovery(self, logfile=None, tracefile=None):\n    self._enable_logging(logfile=logfile, tracefile=tracefile)\n    self.log(\"'discovery' method is deprecated. Please 'connect' with force_discovery=True.\")\n    self.log('Device discovery process started')\n    self.connect(logfile=logfile, force_discovery=True, tracefile=tracefile)\n    self.disconnect()", "docstring": "Discover the device details.\n\nThis method discover several device attributes.\n\nArgs:\nlogfile (file): Optional file descriptor for session logging. The file must be open for write.\nThe session is logged only if ``log_session=True`` was passed to the constructor.\nIt the parameter is not passed then the default *session.log* file is created in `log_dir`.", "source": "codesearchnet"}
{"code": "def ctc_loss_and_grad(logits, labels, label_length, logit_length, unique=None):\n    num_labels = _get_dim(logits, 2)\n    max_label_seq_length = _get_dim(labels, 1)\n    ilabel_log_probs = nn_ops.log_softmax(logits)\n    state_log_probs = _ilabel_to_state(labels, num_labels, ilabel_log_probs)\n    state_trans_probs = _ctc_state_trans(labels)\n    initial_state_log_probs, final_state_log_probs = ctc_state_log_probs(label_length, max_label_seq_length)\n    fwd_bwd_log_probs, log_likelihood = _forward_backward_log(state_trans_log_probs=math_ops.log(state_trans_probs), initial_state_log_probs=initial_state_log_probs, final_state_log_probs=final_state_log_probs, observed_log_probs=state_log_probs, sequence_length=logit_length)\n    if unique:\n        olabel_log_probs = _state_to_olabel_unique(labels, num_labels, fwd_bwd_log_probs, unique)\n    else:\n        olabel_log_probs = _state_to_olabel(labels, num_labels, fwd_bwd_log_probs)\n    grad = math_ops.exp(ilabel_log_probs) - math_ops.exp(olabel_log_probs)\n    max_logit_length = _get_dim(logits, 0)\n    logit_mask = array_ops.sequence_mask(logit_length, max_logit_length, dtypes.float32)\n    logit_mask = array_ops.transpose(logit_mask, perm=[1, 0])\n    logit_mask = array_ops.expand_dims(logit_mask, axis=2)\n    grad *= logit_mask\n    loss = -log_likelihood\n    return (loss, grad)", "docstring": "Computes the CTC loss and gradients.\n\nMost users will want fwd_bwd.ctc_loss\n\nThis function returns the computed gradient, it does not have a gradient\nof its own defined.\n\nArgs:\nlogits: tensor of shape [frames, batch_size, num_labels]\nlabels: tensor of shape [batch_size, max_label_seq_length]\nlabel_length: tensor of shape [batch_size] Length of reference label\nsequence in labels.\nlogit_length: tensor of shape [batch_size] Length of input sequence in\nlogits.\nunique: (optional) unique label indices as computed by unique(labels) If\nsupplied, enables an implementation that is faster and more memory\nefficient on TPU.\n\nReturns:\nloss: tensor of shape [batch_size]\ngradient: tensor of shape [frames, batch_size, num_labels]", "source": "github-repos"}
{"code": "def _chunk_query(l, n, cn, conn, table, db_type):\n    \n    \n    [insert_query_m(l[i:i + n], table, conn, cn, db_type) for i in range(0, len(l), n)]", "docstring": "Call for inserting SQL query in chunks based on n rows\n\nArgs:\nl (list): List of tuples\nn (int): Number of rows\ncn (str): Column names\nconn (connection object): Database connection object\ntable (str): Table name\ndb_type (str): If \"sqlite\" or \"mysql\"", "source": "juraj-google-style"}
{"code": "def correlation_vector(self, value):\n        \n        if value == self._defaults['ai.operation.correlationVector'] and 'ai.operation.correlationVector' in self._values:\n            del self._values['ai.operation.correlationVector']\n        else:\n            self._values['ai.operation.correlationVector'] = value", "docstring": "The correlation_vector property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def emit(self, record):\n        \n        \n        if record.levelno < logging.getLevelName(self.min_level):\n            return\n\n        evt = LogEvent()\n        evt.level = record.levelname\n        evt.levelno = record.levelno\n        evt.timestamp = datetime.fromtimestamp(record.created)\n        evt.message = record.message\n        evt.filename = record.filename\n        evt.lineno = record.lineno\n        evt.module = record.module\n        evt.funcname = record.funcName\n        evt.pathname = record.pathname\n        evt.process_id = record.process\n\n        \n        if record.levelno >= 40:\n            evt.stacktrace = traceback.format_exc()\n\n        try:\n            db.session.add(evt)\n            db.session.commit()\n        except Exception:\n            db.session.rollback()", "docstring": "Persist a record into the database\n\nArgs:\nrecord (`logging.Record`): The logging.Record object to store\n\nReturns:\n`None`", "source": "juraj-google-style"}
{"code": "def _ot_make_closed(self, access_string):\n        \n        self.observation_table.sm_vector.append(access_string)\n        for i in self.alphabet:\n            self.observation_table.smi_vector.append(access_string + i)\n            for e in self.observation_table.em_vector:\n                self._fill_table_entry(access_string + i, e)", "docstring": "Given a state input_string in Smi that is not equivalent with any state in Sm\nthis method will move that state in Sm create a corresponding Smi\nstate and fill the corresponding entries in the table.\nArgs:\naccess_string (str): State access string\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def rpc(self, address, rpc_id, *args, **kwargs):\n    if isinstance(rpc_id, RPCDeclaration):\n        arg_format = rpc_id.arg_format\n        resp_format = rpc_id.resp_format\n        rpc_id = rpc_id.rpc_id\n    else:\n        arg_format = kwargs.get('arg_format', None)\n        resp_format = kwargs.get('resp_format', None)\n    arg_payload = b''\n    if (arg_format is not None):\n        arg_payload = pack_rpc_payload(arg_format, args)\n    self._logger.debug('Sending rpc to %d:%04X, payload=%s', address, rpc_id, args)\n    resp_payload = self.call_rpc(address, rpc_id, arg_payload)\n    if (resp_format is None):\n        return []\n    resp = unpack_rpc_payload(resp_format, resp_payload)\n    return resp", "docstring": "Immediately dispatch an RPC inside this EmulatedDevice.\n\nThis function is meant to be used for testing purposes as well as by\ntiles inside a complex EmulatedDevice subclass that need to\ncommunicate with each other.  It should only be called from the main\nvirtual device thread where start() was called from.\n\n**Background workers may not call this method since it may cause them to deadlock.**\n\nArgs:\naddress (int): The address of the tile that has the RPC.\nrpc_id (int): The 16-bit id of the rpc we want to call\n*args: Any required arguments for the RPC as python objects.\n**kwargs: Only two keyword arguments are supported:\n- arg_format: A format specifier for the argument list\n- result_format: A format specifier for the result\n\nReturns:\nlist: A list of the decoded response members from the RPC.", "source": "codesearchnet"}
{"code": "def label(self, main_type, sub_type, unique_id, label, action='ADD', owner=None, params=None):\n        \n        params = params or {}\n\n        if owner:\n            params['owner'] = owner\n\n        action = action.upper()\n\n        if not sub_type:\n            url = '/v2/{}/{}/securityLabels/{}'.format(main_type, unique_id, quote(label))\n        else:\n            url = '/v2/{}/{}/{}/securityLabels/{}'.format(\n                main_type, sub_type, unique_id, quote(label)\n            )\n\n        if action == 'ADD':\n            return self.tcex.session.post(url, params=params)\n\n        if action == 'DELETE':\n            return self.tcex.session.delete(url, params=params)\n\n        if action == 'GET':\n            return self.tcex.session.get(url, params=params)\n\n        return None", "docstring": "Args:\nowner:\nmain_type:\nsub_type:\nunique_id:\nlabel:\naction:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def from_key_counter(cls, key, counter, alg):\n    counter = _convert_to_state_tensor(counter)\n    key = _convert_to_state_tensor(key)\n    alg = random_ops_util.convert_alg_to_int(alg)\n    counter.shape.assert_is_compatible_with([_get_state_size(alg) - 1])\n    key.shape.assert_is_compatible_with([])\n    key = array_ops.reshape(key, [1])\n    state = array_ops.concat([counter, key], 0)\n    return cls(state=state, alg=alg)", "docstring": "Creates a generator from a key and a counter.\n\nThis constructor only applies if the algorithm is a counter-based algorithm.\nSee method `key` for the meaning of \"key\" and \"counter\".\n\nArgs:\nkey: the key for the RNG, a scalar of type STATE_TYPE.\ncounter: a vector of dtype STATE_TYPE representing the initial counter for\nthe RNG, whose length is algorithm-specific.,\nalg: the RNG algorithm. If None, it will be auto-selected. See\n`__init__` for its possible values.\n\nReturns:\nThe new generator.", "source": "github-repos"}
{"code": "def HandleBlockReceived(self, inventory):\n        \n        block = IOHelper.AsSerializableWithType(inventory, 'neo.Core.Block.Block')\n        if not block:\n            return\n\n        blockhash = block.Hash.ToBytes()\n        try:\n            if blockhash in BC.Default().BlockRequests:\n                BC.Default().BlockRequests.remove(blockhash)\n        except KeyError:\n            pass\n        try:\n            if blockhash in self.myblockrequests:\n                \n                self.heart_beat(HEARTBEAT_BLOCKS)\n                self.myblockrequests.remove(blockhash)\n        except KeyError:\n            pass\n        self.leader.InventoryReceived(block)", "docstring": "Process a Block inventory payload.\n\nArgs:\ninventory (neo.Network.Inventory):", "source": "juraj-google-style"}
{"code": "def plane_xz(size=(10, 10), resolution=(10, 10)) -> VAO:\n    (sx, sz) = size\n    (rx, rz) = resolution\n    (dx, dz) = ((sx / rx), (sz / rz))\n    (ox, oz) = (((- sx) / 2), ((- sz) / 2))\n\n    def gen_pos():\n        for z in range(rz):\n            for x in range(rx):\n                (yield (ox + (x * dx)))\n                (yield 0)\n                (yield (oz + (z * dz)))\n\n    def gen_uv():\n        for z in range(rz):\n            for x in range(rx):\n                (yield (x / (rx - 1)))\n                (yield (1 - (z / (rz - 1))))\n\n    def gen_normal():\n        for _ in range((rx * rz)):\n            (yield 0.0)\n            (yield 1.0)\n            (yield 0.0)\n\n    def gen_index():\n        for z in range((rz - 1)):\n            for x in range((rx - 1)):\n                (yield (((z * rz) + x) + 1))\n                (yield ((z * rz) + x))\n                (yield (((z * rz) + x) + rx))\n                (yield (((z * rz) + x) + 1))\n                (yield (((z * rz) + x) + rx))\n                (yield ((((z * rz) + x) + rx) + 1))\n    pos_data = numpy.fromiter(gen_pos(), dtype=numpy.float32)\n    uv_data = numpy.fromiter(gen_uv(), dtype=numpy.float32)\n    normal_data = numpy.fromiter(gen_normal(), dtype=numpy.float32)\n    index_data = numpy.fromiter(gen_index(), dtype=numpy.uint32)\n    vao = VAO('plane_xz', mode=moderngl.TRIANGLES)\n    vao.buffer(pos_data, '3f', ['in_position'])\n    vao.buffer(uv_data, '2f', ['in_uv'])\n    vao.buffer(normal_data, '3f', ['in_normal'])\n    vao.index_buffer(index_data, index_element_size=4)\n    return vao", "docstring": "Generates a plane on the xz axis of a specific size and resolution.\nNormals and texture coordinates are also included.\n\nArgs:\nsize: (x, y) tuple\nresolution: (x, y) tuple\n\nReturns:\nA :py:class:`demosys.opengl.vao.VAO` instance", "source": "codesearchnet"}
{"code": "def clear_errors():\n    \n    data = []\n    data.append(0x0B)\n    data.append(BROADCAST_ID)\n    data.append(RAM_WRITE_REQ)\n    data.append(STATUS_ERROR_RAM)\n    data.append(BYTE2)\n    data.append(0x00)\n    data.append(0x00)\n    send_data(data)", "docstring": "Clears the errors register of all Herkulex servos\n\nArgs:\nnone", "source": "juraj-google-style"}
{"code": "def _decorate_run_options_for_debug(self, run_options, debug_urls, debug_ops='DebugIdentity', node_name_regex_allowlist=None, op_type_regex_allowlist=None, tensor_dtype_regex_allowlist=None, tolerate_debug_op_creation_failures=False):\n    run_options.output_partition_graphs = True\n    debug_utils.watch_graph(run_options, self._sess.graph, debug_urls=debug_urls, debug_ops=debug_ops, node_name_regex_allowlist=node_name_regex_allowlist, op_type_regex_allowlist=op_type_regex_allowlist, tensor_dtype_regex_allowlist=tensor_dtype_regex_allowlist, tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures, reset_disk_byte_usage=self._run_call_count == 1 or self._is_disk_usage_reset_each_run())", "docstring": "Modify a RunOptions object for debug tensor watching.\n\nSpecifies request for outputting partition graphs. Adds\ndebug_tensor_watch_opts with proper debug URLs.\n\nArgs:\nrun_options: (RunOptions) the modified RunOptions object.\ndebug_urls: (list of str) debug URLs to be entered in run_options.\ndebug_tensor_watch_opts.\ndebug_ops: (str or list of str) debug op(s) to be used by the debugger.\nnode_name_regex_allowlist: Regular-expression allowlist for node\nname.\nop_type_regex_allowlist: Regular-expression allowlist for op type.\ntensor_dtype_regex_allowlist: Regular-expression allowlist for tensor\ndtype.\ntolerate_debug_op_creation_failures: Whether debug op creation failures\nare to be tolerated.", "source": "github-repos"}
{"code": "def parse_arguments(argv):\n  \n  parser = argparse.ArgumentParser(\n      formatter_class=argparse.RawDescriptionHelpFormatter,\n      description=textwrap.dedent())\n  parser.add_argument('--cloud',\n                      action='store_true',\n                      help='Analysis will use cloud services.')\n  parser.add_argument('--output',\n                      metavar='DIR',\n                      type=str,\n                      required=True,\n                      help='GCS or local folder')\n\n  input_group = parser.add_argument_group(\n      title='Data Source Parameters',\n      description='schema is only needed if using --csv')\n\n  \n  input_group.add_argument('--csv',\n                           metavar='FILE',\n                           type=str,\n                           required=False,\n                           action='append',\n                           help='Input CSV absolute file paths. May contain a '\n                                'file pattern.')\n  input_group.add_argument('--schema',\n                           metavar='FILE',\n                           type=str,\n                           required=False,\n                           help='Schema file path. Only required if using csv files')\n\n  \n  input_group.add_argument('--bigquery',\n                           metavar='PROJECT_ID.DATASET.TABLE_NAME',\n                           type=str,\n                           required=False,\n                           help=('Must be in the form project.dataset.table_name'))\n\n  parser.add_argument('--features',\n                      metavar='FILE',\n                      type=str,\n                      required=True,\n                      help='Features file path')\n\n  args = parser.parse_args(args=argv[1:])\n\n  if args.cloud:\n    if not args.output.startswith('gs:\n      raise ValueError('--output must point to a location on GCS')\n    if (args.csv and\n       not all(x.startswith('gs:\n      raise ValueError('--csv must point to a location on GCS')\n    if args.schema and not args.schema.startswith('gs:\n      raise ValueError('--schema must point to a location on GCS')\n\n  if not args.cloud and args.bigquery:\n    raise ValueError('--bigquery must be used with --cloud')\n\n  if not ((args.bigquery and args.csv is None and\n           args.schema is None) or\n          (args.bigquery is None and args.csv and\n           args.schema)):\n    raise ValueError('either --csv and --schema must both'\n                     ' be set or just --bigquery is set')\n\n  return args", "docstring": "Parse command line arguments.\n\nArgs:\nargv: list of command line arguments, including program name.\n\nReturns:\nAn argparse Namespace object.\n\nRaises:\nValueError: for bad parameters", "source": "juraj-google-style"}
{"code": "def create_struct(name):\n    \n    sid = idc.GetStrucIdByName(name)\n    if sid != idaapi.BADADDR:\n        \n        raise exceptions.SarkStructAlreadyExists(\"A struct names {!r} already exists.\".format(name))\n\n    sid = idc.AddStrucEx(-1, name, 0)\n    if sid == idaapi.BADADDR:\n        raise exceptions.SarkStructCreationFailed(\"Struct creation failed.\")\n\n    return sid", "docstring": "Create a structure.\n\nArgs:\nname: The structure's name\n\nReturns:\nThe sturct ID\n\nRaises:\nexceptions.SarkStructAlreadyExists: A struct with the same name already exists\nexceptions.SarkCreationFailed:  Struct creation failed", "source": "juraj-google-style"}
{"code": "def deal_with_changeset_stack_policy(self, fqn, stack_policy):\n        \n        if stack_policy:\n            kwargs = generate_stack_policy_args(stack_policy)\n            kwargs[\"StackName\"] = fqn\n            logger.debug(\"Setting stack policy on %s.\", fqn)\n            self.cloudformation.set_stack_policy(**kwargs)", "docstring": "Set a stack policy when using changesets.\n\nChangeSets don't allow you to set stack policies in the same call to\nupdate them. This sets it before executing the changeset if the\nstack policy is passed in.\n\nArgs:\nstack_policy (:class:`stacker.providers.base.Template`): A template\nobject representing a stack policy.", "source": "juraj-google-style"}
{"code": "def get_review(review_struct):\n    \n    review_fn = _resource_context(\"review.rst\")\n\n    \n    with open(review_fn) as f:\n        review = f.read()\n\n    \n    with NamedTemporaryFile(suffix=\".png\") as qr_file:\n        url = pyqrcode.create(review_struct.internal_url)\n        url.png(qr_file.name, scale=5)\n\n        \n        qr_file.flush()\n        qr_file.seek(0)\n\n        \n        review = Template(review).substitute(\n            content=review_struct.get_rst(),\n            datum=time.strftime(\"%d.%m.%Y\", time.localtime()),\n            cas=time.strftime(\"%H:%M\", time.localtime()),\n            resources_path=RES_PATH,\n            qr_path=qr_file.name,\n        )\n\n        return gen_pdf(\n            review,\n            open(_resource_context(\"review_style.json\")).read(),\n        )", "docstring": "Generate review from `review_struct`.\n\nArgs:\nreview_struct (obj): :class:`.GenerateReview` instance.\n\nReturns:\nobj: StringIO file instance containing PDF file.", "source": "juraj-google-style"}
{"code": "def get_dataset_split(tmp_dir, split, use_control_set):\n    if (not use_control_set):\n        dataset_split = {problem.DatasetSplit.TRAIN: [f for f in tf.gfile.Glob(os.path.join(tmp_dir, 'train-novels*.txt'))], problem.DatasetSplit.EVAL: [os.path.join(tmp_dir, 'lambada_control_test_data_plain_text.txt')]}\n    return dataset_split[split]", "docstring": "Gives the file paths with regards to the given split.\n\nArgs:\ntmp_dir: temp directory\nsplit: dataset split\nuse_control_set: uses control dataset if true.\n\nReturns:\nlist of file paths.", "source": "codesearchnet"}
{"code": "def summarize_tensors(tensor_dict, tag=None):\n    if (tag is None):\n        tag = 'tensors/'\n    for t_name in list(tensor_dict):\n        t = tensor_dict[t_name]\n        tf.summary.histogram((tag + t_name), t)", "docstring": "Summarize the tensors.\n\nArgs:\ntensor_dict: a dictionary of tensors.\ntag: name scope of the summary; defaults to tensors/.", "source": "codesearchnet"}
{"code": "def rename(self, container, name):\n        \n        url = self._url(\"/containers/{0}/rename\", container)\n        params = {'name': name}\n        res = self._post(url, params=params)\n        self._raise_for_status(res)", "docstring": "Rename a container. Similar to the ``docker rename`` command.\n\nArgs:\ncontainer (str): ID of the container to rename\nname (str): New name for the container\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def predict_array(self, arr):\n        \n        precompute = self.precompute\n        self.precompute = False\n        pred = super().predict_array(arr)\n        self.precompute = precompute\n        return pred", "docstring": "This over-ride is necessary because otherwise the learner method accesses the wrong model when it is called\nwith precompute set to true\n\nArgs:\narr: a numpy array to be used as input to the model for prediction purposes\nReturns:\na numpy array containing the predictions from the model", "source": "juraj-google-style"}
{"code": "def play_alert(zones, alert_uri, alert_volume=20, alert_duration=0, fade_back=False):\n    for zone in zones:\n        zone.snap = Snapshot(zone)\n        zone.snap.snapshot()\n        print('snapshot of zone: {}'.format(zone.player_name))\n    for zone in zones:\n        if zone.is_coordinator:\n            if (not zone.is_playing_tv):\n                trans_state = zone.get_current_transport_info()\n                if (trans_state['current_transport_state'] == 'PLAYING'):\n                    zone.pause()\n        zone.volume = alert_volume\n        zone.mute = False\n    print('will play: {} on all coordinators'.format(alert_uri))\n    for zone in zones:\n        if zone.is_coordinator:\n            zone.play_uri(uri=alert_uri, title='Sonos Alert')\n    time.sleep(alert_duration)\n    for zone in zones:\n        print('restoring {}'.format(zone.player_name))\n        zone.snap.restore(fade=fade_back)", "docstring": "Demo function using soco.snapshot across multiple Sonos players.\n\nArgs:\nzones (set): a set of SoCo objects\nalert_uri (str): uri that Sonos can play as an alert\nalert_volume (int): volume level for playing alert (0 tp 100)\nalert_duration (int): length of alert (if zero then length of track)\nfade_back (bool): on reinstating the zones fade up the sound?", "source": "codesearchnet"}
{"code": "def result(self):\n    self.wait()\n    if self._fatal_error:\n        raise self._fatal_error\n    return self._result", "docstring": "Get the result for a job. This will block if the job is incomplete.\n\nReturns:\nThe result for the Job.\n\nRaises:\nAn exception if the Job resulted in an exception.", "source": "codesearchnet"}
{"code": "def _GeneratePathString(self, mediator, pathspec, hashes):\n    display_name = mediator.GetDisplayNameForPathSpec(pathspec)\n    path_string = '{0:s}:'.format(display_name)\n    for (hash_name, hash_value) in sorted(hashes.items()):\n        path_string = '{0:s} {1:s}={2:s}'.format(path_string, hash_name, hash_value)\n    return path_string", "docstring": "Generates a string containing a pathspec and its hashes.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between analysis\nplugins and other components, such as storage and dfvfs.\npathspec (dfvfs.Pathspec): the path specification) to generate a string\nfor.\nhashes (dict[str, str]): mapping of hash attribute names to the value of\nthat hash for the path specification being processed.\n\nReturns:\nstr: string of the form \"display_name: hash_type=hash_value\". For example,\n\"OS:/path/spec: test_hash=4 other_hash=5\".", "source": "codesearchnet"}
{"code": "def describe_enum(enum_definition):\n    enum_descriptor = EnumDescriptor()\n    enum_descriptor.name = enum_definition.definition_name().split('.')[(- 1)]\n    values = []\n    for number in enum_definition.numbers():\n        value = enum_definition.lookup_by_number(number)\n        values.append(describe_enum_value(value))\n    if values:\n        enum_descriptor.values = values\n    return enum_descriptor", "docstring": "Build descriptor for Enum class.\n\nArgs:\nenum_definition: Enum class to provide descriptor for.\n\nReturns:\nInitialized EnumDescriptor instance describing the Enum class.", "source": "codesearchnet"}
{"code": "def import_mapping(connection_id, mapping):\n    \n    url = os.path.join(settings.HEROKU_CONNECT_API_ENDPOINT,\n                       'connections', connection_id, 'actions', 'import')\n\n    response = requests.post(\n        url=url,\n        json=mapping,\n        headers=_get_authorization_headers()\n    )\n    response.raise_for_status()", "docstring": "Import Heroku Connection mapping for given connection.\n\nArgs:\nconnection_id (str): Heroku Connection connection ID.\nmapping (dict): Heroku Connect mapping.\n\nRaises:\nrequests.HTTPError: If an error occurs uploading the mapping.\nValueError: If the mapping is not JSON serializable.", "source": "juraj-google-style"}
{"code": "def GetFeedMapping(client, feed, placeholder_type):\n    feed_mapping_service = client.GetService('FeedMappingService', 'v201809')\n    attribute_mappings = {}\n    more_pages = True\n    selector = {'fields': ['FeedMappingId', 'AttributeFieldMappings'], 'predicates': [{'field': 'FeedId', 'operator': 'EQUALS', 'values': [feed['id']]}, {'field': 'PlaceholderType', 'operator': 'EQUALS', 'values': [placeholder_type]}], 'paging': {'startIndex': 0, 'numberResults': PAGE_SIZE}}\n    while more_pages:\n        page = feed_mapping_service.get(selector)\n        if ('entries' in page):\n            for feed_mapping in page['entries']:\n                for attribute_mapping in feed_mapping['attributeFieldMappings']:\n                    if (attribute_mapping['feedAttributeId'] in attribute_mappings):\n                        attribute_mappings[attribute_mapping['feedAttributeId']].append(attribute_mapping['fieldId'])\n                    else:\n                        attribute_mappings[attribute_mapping['feedAttributeId']] = [attribute_mapping['fieldId']]\n        selector['paging']['startIndex'] += PAGE_SIZE\n        more_pages = (selector['paging']['startIndex'] < int(page['totalNumEntries']))\n    return attribute_mappings", "docstring": "Gets the Feed Mapping for a given Feed.\n\nArgs:\nclient: an AdWordsClient instance.\nfeed: the Feed we are retrieving the Feed Mapping for.\nplaceholder_type: the Placeholder Type we are looking for.\nReturns:\nA dictionary containing the Feed Mapping.", "source": "codesearchnet"}
{"code": "def _calculate_hash(files, root):\n    \n    file_hash = hashlib.md5()\n    for fname in sorted(files):\n        f = os.path.join(root, fname)\n        file_hash.update((fname + \"\\0\").encode())\n        with open(f, \"rb\") as fd:\n            for chunk in iter(lambda: fd.read(4096), \"\"):\n                if not chunk:\n                    break\n                file_hash.update(chunk)\n            file_hash.update(\"\\0\".encode())\n\n    return file_hash.hexdigest()", "docstring": "Returns a hash of all of the given files at the given root.\n\nArgs:\nfiles (list[str]): file names to include in the hash calculation,\nrelative to ``root``.\nroot (str): base directory to analyze files in.\n\nReturns:\nstr: A hash of the hashes of the given files.", "source": "juraj-google-style"}
{"code": "def _init_boto3_clients(self):\n        \n        try:\n            profile = self._config.get('environment', {}).get('profile')\n            region = self._config.get('environment', {}).get('region')\n            if profile:\n                self._b3Sess = boto3.session.Session(profile_name=profile)\n            else:\n                self._b3Sess = boto3.session.Session()\n\n            self._s3 = self._b3Sess.client('s3')\n            self._cloudFormation = self._b3Sess.client('cloudformation', region_name=region)\n            self._ssm = self._b3Sess.client('ssm', region_name=region)\n\n            return True\n        except Exception as wtf:\n            logging.error('Exception caught in intialize_session(): {}'.format(wtf))\n            traceback.print_exc(file=sys.stdout)\n            return False", "docstring": "The utililty requires boto3 clients to Cloud Formation and S3. Here is\nwhere we make them.\n\nArgs:\nNone\n\nReturns:\nGood or Bad; True or False", "source": "juraj-google-style"}
{"code": "def from_json(cls, data):\n    optional_keys = ('city', 'state', 'country', 'latitude', 'longitude', 'time_zone', 'elevation', 'station_id', 'source')\n    for key in optional_keys:\n        if (key not in data):\n            data[key] = None\n    return cls(data['city'], data['state'], data['country'], data['latitude'], data['longitude'], data['time_zone'], data['elevation'], data['station_id'], data['source'])", "docstring": "Create a location from a dictionary.\n\nArgs:\ndata: {\n\"city\": \"-\",\n\"latitude\": 0,\n\"longitude\": 0,\n\"time_zone\": 0,\n\"elevation\": 0}", "source": "codesearchnet"}
{"code": "def _fdopen_ver2(self, file_des, mode='r', bufsize=None):\n    if (not is_int_type(file_des)):\n        raise TypeError('an integer is required')\n    try:\n        return FakeFileOpen(self.filesystem).call(file_des, mode=mode)\n    except IOError as exc:\n        self.filesystem.raise_os_error(exc.errno, exc.filename)", "docstring": "Returns an open file object connected to the file descriptor\nfile_des.\n\nArgs:\nfile_des: An integer file descriptor for the file object requested.\nmode: Additional file flags. Currently checks to see if the mode\nmatches the mode of the requested file object.\nbufsize: ignored. (Used for signature compliance with\n__builtin__.fdopen)\n\nReturns:\nFile object corresponding to file_des.\n\nRaises:\nOSError: if bad file descriptor or incompatible mode is given.\nTypeError: if file descriptor is not an integer.", "source": "codesearchnet"}
{"code": "def AddServiceDescriptor(self, service_desc):\n    \n\n    if not isinstance(service_desc, descriptor.ServiceDescriptor):\n      raise TypeError('Expected instance of descriptor.ServiceDescriptor.')\n\n    self._service_descriptors[service_desc.full_name] = service_desc", "docstring": "Adds a ServiceDescriptor to the pool.\n\nArgs:\nservice_desc: A ServiceDescriptor.", "source": "juraj-google-style"}
{"code": "def __resource_descriptor(self, resource_path, methods):\n    \n    descriptor = {}\n    method_map = {}\n    sub_resource_index = collections.defaultdict(list)\n    sub_resource_map = {}\n\n    resource_path_tokens = resource_path.split('.')\n    for service, protorpc_meth_info in methods:\n      method_info = getattr(protorpc_meth_info, 'method_info', None)\n      path = method_info.get_path(service.api_info)\n      method_id = method_info.method_id(service.api_info)\n      canonical_method_id = self._get_canonical_method_id(method_id)\n\n      current_resource_path = self._get_resource_path(method_id)\n\n      \n      if (current_resource_path[:len(resource_path_tokens)] !=\n          resource_path_tokens):\n        raise api_exceptions.ToolError(\n            'Internal consistency error in resource path {0}'.format(\n                current_resource_path))\n\n      \n      \n      effective_resource_path = current_resource_path[\n          len(resource_path_tokens):]\n\n      \n      if effective_resource_path:\n        sub_resource_name = effective_resource_path[0]\n        new_resource_path = '.'.join([resource_path, sub_resource_name])\n        sub_resource_index[new_resource_path].append(\n            (service, protorpc_meth_info))\n      else:\n        method_map[canonical_method_id] = self.__method_descriptor(\n            service, method_info, protorpc_meth_info)\n\n    \n    for sub_resource, sub_resource_methods in sub_resource_index.items():\n      sub_resource_name = sub_resource.split('.')[-1]\n      sub_resource_map[sub_resource_name] = self.__resource_descriptor(\n          sub_resource, sub_resource_methods)\n\n    if method_map:\n      descriptor['methods'] = method_map\n\n    if sub_resource_map:\n      descriptor['resources'] = sub_resource_map\n\n    return descriptor", "docstring": "Describes a resource.\n\nArgs:\nresource_path: string, the path of the resource (e.g., 'entries.items')\nmethods: list of tuples of type\n(endpoints.Service, protorpc.remote._RemoteMethodInfo), the methods\nthat serve this resource.\n\nReturns:\nDictionary describing the resource.", "source": "juraj-google-style"}
{"code": "def PrepareMatches(self, file_system):\n    \n    if self._location is not None:\n      self._location_segments = self._SplitPath(\n          self._location, file_system.PATH_SEPARATOR)\n\n    elif self._location_regex is not None:\n      path_separator = file_system.PATH_SEPARATOR\n      if path_separator == '\\\\':\n        \n        path_separator = '\\\\\\\\'\n\n      self._location_segments = self._SplitPath(\n          self._location_regex, path_separator)\n\n    if self._location_segments is not None:\n      self._number_of_location_segments = len(self._location_segments)", "docstring": "Prepare find specification for matching.\n\nArgs:\nfile_system (FileSystem): file system.", "source": "juraj-google-style"}
{"code": "def _ParseAndValidateRecord(self, parser_mediator, text_file_object):\n    \n    try:\n      title = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)\n      url = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)\n      timestamp = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)\n      popularity_index = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)\n    except UnicodeDecodeError:\n      return False\n\n    if len(title) == self._MAXIMUM_LINE_SIZE and title[-1] != '\\n':\n      return False\n\n    if len(url) == self._MAXIMUM_LINE_SIZE and url[-1] != '\\n':\n      return False\n\n    if len(timestamp) == self._MAXIMUM_LINE_SIZE and timestamp[-1] != '\\n':\n      return False\n\n    if (len(popularity_index) == self._MAXIMUM_LINE_SIZE and\n        popularity_index[-1] != '\\n'):\n      return False\n\n    title = title.strip()\n    url = url.strip()\n    timestamp = timestamp.strip()\n    popularity_index = popularity_index.strip()\n\n    if not title or not url or not timestamp or not popularity_index:\n      return False\n\n    event_data = OperaGlobalHistoryEventData()\n\n    if not self._IsValidUrl(url):\n      return False\n\n    event_data.url = url\n    if title != url:\n      event_data.title = title\n\n    try:\n      event_data.popularity_index = int(popularity_index, 10)\n      timestamp = int(timestamp, 10)\n    except ValueError:\n      return False\n\n    if event_data.popularity_index < 0:\n      event_data.description = 'First and Only Visit'\n    else:\n      event_data.description = 'Last Visit'\n\n    date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    return True", "docstring": "Parses and validates an Opera global history record.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ntext_file_object (dfvfs.TextFile): text file.\n\nReturns:\nbool: True if the record was successfully parsed.", "source": "juraj-google-style"}
{"code": "def cleanup(context):\n    for name in ('work_dir', 'artifact_dir', 'task_log_dir'):\n        path = context.config[name]\n        if os.path.exists(path):\n            log.debug('rm({})'.format(path))\n            rm(path)\n        makedirs(path)", "docstring": "Clean up the work_dir and artifact_dir between task runs, then recreate.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.", "source": "codesearchnet"}
{"code": "def vr60baro(msg):\n    \n    d = hex2bin(data(msg))\n\n    if d[34] == '0':\n        return None\n\n    sign = int(d[35])    \n    value = bin2int(d[36:45])\n\n    if value == 0 or value == 511:  \n        return 0\n\n    value = value - 512 if sign else value\n\n    roc = value * 32   \n    return roc", "docstring": "Vertical rate from barometric measurement, this value may be very noisy.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message (BDS60) string\n\nReturns:\nint: vertical rate in feet/minutes", "source": "juraj-google-style"}
{"code": "def memory_zones(self):\n        \n        count = self.num_memory_zones()\n        if count == 0:\n            return list()\n\n        buf = (structs.JLinkMemoryZone * count)()\n        res = self._dll.JLINK_GetMemZones(buf, count)\n        if res < 0:\n            raise errors.JLinkException(res)\n\n        return list(buf)", "docstring": "Gets all memory zones supported by the current target.\n\nSome targets support multiple memory zones.  This function provides the\nability to get a list of all the memory zones to facilate using the\nmemory zone routing functions.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nA list of all the memory zones as ``JLinkMemoryZone`` structures.\n\nRaises:\nJLinkException: on hardware errors.", "source": "juraj-google-style"}
{"code": "def create_ltp_package(aleph_record, book_id, ebook_fn, data, url, urn_nbn=None):\n    (root_dir, orig_dir, meta_dir) = _create_package_hierarchy(book_id=book_id)\n    original_fn = os.path.join(orig_dir, fn_composers.original_fn(book_id, ebook_fn))\n    with open(original_fn, 'wb') as f:\n        f.write(data)\n    metadata_filenames = []\n    records = marcxml2mods(marc_xml=aleph_record, uuid=book_id, url=url)\n    for (cnt, mods_record) in enumerate(records):\n        fn = os.path.join(meta_dir, fn_composers.volume_fn(cnt))\n        with open(fn, 'w') as f:\n            f.write(mods_record)\n        metadata_filenames.append(fn)\n    md5_fn = os.path.join(root_dir, fn_composers.checksum_fn(book_id))\n    checksums = checksum_generator.generate_hashfile(root_dir)\n    with open(md5_fn, 'w') as f:\n        f.write(checksums)\n    info_fn = os.path.join(root_dir, fn_composers.info_fn(book_id))\n    with open(info_fn, 'w') as f:\n        f.write(info_composer.compose_info(root_dir=root_dir, files=([original_fn] + metadata_filenames), hash_fn=md5_fn, aleph_record=aleph_record, urn_nbn=urn_nbn))\n    return root_dir", "docstring": "Create LTP package as it is specified in specification v1.0 as I understand\nit.\n\nArgs:\naleph_record (str): XML containing full aleph record.\nbook_id (str): UUID of the book.\nebook_fn (str): Original filename of the ebook.\ndata (str/bytes): Ebook's content.\nurl (str): URL of the publication used when the URL can't be found in\n`aleph_record`.\nurn_nbn (str, default None): URN:NBN.\n\nReturns:\nstr: Name of the package's directory in ``/tmp``.", "source": "codesearchnet"}
{"code": "def identify(text):\n    filtered_text = set(list(text)).intersection(ALL_CHARS)\n    if (len(filtered_text) is 0):\n        return None\n    if filtered_text.issubset(SHARED_CHARS):\n        return EITHER\n    if filtered_text.issubset(TRAD_CHARS):\n        return TRAD\n    if filtered_text.issubset(SIMP_CHARS):\n        return SIMP\n    if filtered_text.difference(TRAD_CHARS).issubset(SIMP_CHARS):\n        return BOTH", "docstring": "Identify whether a string is simplified or traditional Chinese.\n\nReturns:\nNone: if there are no recognizd Chinese characters.\nEITHER: if the test is inconclusive.\nTRAD: if the text is traditional.\nSIMP: if the text is simplified.\nBOTH: the text has characters recognized as being solely traditional\nand other characters recognized as being solely simplified.", "source": "codesearchnet"}
{"code": "def test_noninlined_funcdef(self, mode):\n    self._maybe_skip(mode)\n    with ops.device(_get_device(mode)):\n        random_seed.set_random_seed(0)\n        x = _input([8, 8])\n        y = _matmul_act(x)\n        y = _example_noninlined_funcdef(y)\n        optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.01)\n        g = optimizer.compute_gradients(y, [x])\n        output = (g, y)\n    output_val_ref, output_val, cost_graph = self._run(mode, output)\n    node_map = _build_node_map(cost_graph.node)\n    self._assert_output_f16(mode, node_map, 'MatMul')\n    tol = 0.01 if mode == 'mkl' else 0.001\n    atol = 0.01 if test.is_built_with_rocm() else tol\n    self.assertAllClose(output_val_ref, output_val, atol=atol, rtol=tol)", "docstring": "Test graph with non-inlined function subgraph.\n\nThis requires the grappler pass to handle an OpDef that only appears in the\ngraph's function registry instead of the global op registry.\n\nArgs:\nmode: Either 'cuda' or 'mkl'.", "source": "github-repos"}
{"code": "def run_function_on_all_workers(self, function, run_on_other_drivers=False):\n    if (self.mode is None):\n        self.cached_functions_to_run.append(function)\n    else:\n        pickled_function = pickle.dumps(function)\n        function_to_run_id = hashlib.sha1(pickled_function).digest()\n        key = (b'FunctionsToRun:' + function_to_run_id)\n        function({'worker': self})\n        function_exported = self.redis_client.setnx((b'Lock:' + key), 1)\n        if (not function_exported):\n            return\n        check_oversized_pickle(pickled_function, function.__name__, 'function', self)\n        self.redis_client.hmset(key, {'driver_id': self.task_driver_id.binary(), 'function_id': function_to_run_id, 'function': pickled_function, 'run_on_other_drivers': str(run_on_other_drivers)})\n        self.redis_client.rpush('Exports', key)", "docstring": "Run arbitrary code on all of the workers.\n\nThis function will first be run on the driver, and then it will be\nexported to all of the workers to be run. It will also be run on any\nnew workers that register later. If ray.init has not been called yet,\nthen cache the function and export it later.\n\nArgs:\nfunction (Callable): The function to run on all of the workers. It\ntakes only one argument, a worker info dict. If it returns\nanything, its return values will not be used.\nrun_on_other_drivers: The boolean that indicates whether we want to\nrun this function on other drivers. One case is we may need to\nshare objects across drivers.", "source": "codesearchnet"}
{"code": "def scrape(self, url):\n        \n        if isinstance(url, str) is False:\n            raise TypeError(\"The type of url must be str.\")\n\n        if self.readable_web_pdf is not None and self.readable_web_pdf.is_pdf_url(url) is True:\n            web_data = self.readable_web_pdf.url_to_text(url)\n        else:\n            web_data = \"\"\n            req = urllib.request.Request(url=url)\n            with urllib.request.urlopen(req) as f:\n                web = f.read().decode('utf-8')\n                dom = pq(web)\n                [dom(remove_object).remove() for remove_object in self.__remove_object_list]\n\n                for dom_object in self.__dom_object_list:\n                    web_data += dom(dom_object).text()\n\n        sleep(1)\n        return web_data", "docstring": "Execute Web-Scraping.\nThe target dom objects are in self.__dom_object_list.\n\nArgs:\nurl:    Web site url.\n\nReturns:\nThe result. this is a string.\n\n@TODO(chimera0): check URLs format.", "source": "juraj-google-style"}
{"code": "def _normalize_field_name(self, field_name) -> str:\n        \n\n        if isinstance(field_name, tuple):\n            field_name, _ = field_name\n\n        return field_name", "docstring": "Normalizes a field name into a string by\nextracting the field name if it was specified\nas a reference to a HStore key (as a tuple).\n\nArguments:\nfield_name:\nThe field name to normalize.\n\nReturns:\nThe normalized field name.", "source": "juraj-google-style"}
{"code": "def model_fn(self, x: core.Tensor) -> Mapping[str, core.Tensor]:\n    if math_ops.reduce_sum(x) > 10.0:\n        out = math_ops.matmul(x, self.filters_0)\n        out = nn_ops.bias_add(out, self.bias_0)\n        return {'output': out}\n    out = math_ops.matmul(x, self.filters_1)\n    out = nn_ops.bias_add(out, self.bias_1)\n    return {'output': out}", "docstring": "Runs the input tensor to a branched operations.\n\nThe graph is branched by a condition whether the sum of elements of `x`\nis greater than 10.\n\nArgs:\nx: Input tensor.\n\nReturns:\nA map of: output key -> output result.", "source": "github-repos"}
{"code": "def sample_from_likelihood(self, n_timesteps=10):\n        \n\n        self.latent_state_sequences = lmap(\n            lambda A: ltake(\n                n_timesteps,\n                iterate(\n                    lambda s: pd.Series(A @ s.values, index=s.index), self.s0\n                ),\n            ),\n            self.transition_matrix_collection,\n        )\n\n        self.observed_state_sequences = [\n            [self.sample_observed_state(s) for s in latent_state_sequence]\n            for latent_state_sequence in self.latent_state_sequences\n        ]", "docstring": "Sample a collection of observed state sequences from the likelihood\nmodel given a collection of transition matrices.\n\nArgs:\nn_timesteps: The number of timesteps for the sequences.", "source": "juraj-google-style"}
{"code": "def __init__(self, request, async, callback=None, callbacks=dict(), root_object=None):\n        \n\n        self._uses_authentication = True\n        self._has_timeouted = False\n        \n        self._ignore_request_idle = False\n        self._xhr_timeout = 3000\n        self._response = None\n        self._error_message = None\n        self._transaction_id = uuid.uuid4().hex\n\n        self._request = request\n        self._async = async\n        self._callback = callback\n        self._callbacks = callbacks\n        self._user_info = None\n        self._object_last_action_timer = None\n        self._root_object = root_object", "docstring": "Intializes a new connection for a given request\n\nNURESTConnection object is in charge of the HTTP call. It relies on request library\n\nArgs:\nrequest: the NURESTRequest to send\ncallback: the method that will be fired after sending\ncallbacks: a dictionary of user callbacks. Should contains local and remote callbacks", "source": "juraj-google-style"}
{"code": "def get_sendback(self, uuid, key):\n        \n        def send_back_callback(data):\n            self.sendResponse(\n                serializers.serialize(data),\n                uuid,\n                key\n            )\n\n        return send_back_callback", "docstring": "Return function for sending progress messages back to original caller.\n\nArgs:\nuuid (str): UUID of the received message.\nkey (str): Routing key.\n\nReturns:\nfn reference: Reference to function which takes only one data \\\nargument.", "source": "juraj-google-style"}
{"code": "def get_capture_handler_config_by_name(self, name):\n    handler_confs = []\n    for (address, stream_capturer) in self._stream_capturers.iteritems():\n        handler_data = stream_capturer[0].dump_handler_config_data()\n        for h in handler_data:\n            if (h['handler']['name'] == name):\n                handler_confs.append(h)\n    return handler_confs", "docstring": "Return data for handlers of a given name.\n\nArgs:\nname:\nName of the capture handler(s) to return config data for.\n\nReturns:\nDictionary dump from the named capture handler as given by\nthe :func:`SocketStreamCapturer.dump_handler_config_data` method.", "source": "codesearchnet"}
{"code": "def __init__(self, file_handle):\n    if not file_handle.writable():\n        raise ValueError('Output stream must be writable')\n    self._file_handle = file_handle\n    self._coder = RowAsDictJsonCoder()", "docstring": "Initialize an JsonRowWriter.\n\nArgs:\nfile_handle (io.IOBase): Output stream to write to.", "source": "github-repos"}
{"code": "def precision(truth, recommend, k=None):\n    \n    if len(recommend) == 0:\n        if len(truth) == 0:\n            return 1.\n        return 0.\n\n    if k is None:\n        k = len(recommend)\n    return count_true_positive(truth, recommend[:k]) / float(k)", "docstring": "Precision@k.\n\nArgs:\ntruth (numpy 1d array): Set of truth samples.\nrecommend (numpy 1d array): Ordered set of recommended samples.\nk (int): Top-k items in `recommend` will be recommended.\n\nReturns:\nfloat: Precision@k.", "source": "juraj-google-style"}
{"code": "def _validate_isvalid_orcid(self, isvalid_orcid, field, value):\n        \n        if isvalid_orcid and 'ORCID' in value:\n            try:\n                res = search_orcid(value['ORCID'])\n            except ConnectionError:\n                warn('network not available, ORCID not validated.')\n                return\n            except HTTPError:\n                self._error(field, 'ORCID incorrect or invalid for ' +\n                            value['name']\n                            )\n                return\n\n            family_name = res['name']['family-name']['value']\n            given_name = res['name']['given-names']['value']\n            if not compare_name(given_name, family_name, value['name']):\n                self._error(field, 'Name and ORCID do not match. Name supplied: ' +\n                            value['name'] + '. Name associated with ORCID: ' +\n                            ' '.join([given_name, family_name])\n                            )", "docstring": "Checks for valid ORCID if given.\n\nArgs:\nisvalid_orcid (`bool`): flag from schema indicating ORCID to be checked.\nfield (`str`): 'author'\nvalue (`dict`): dictionary of author metadata.\n\nThe rule's arguments are validated against this schema:\n{'isvalid_orcid': {'type': 'bool'}, 'field': {'type': 'str'},\n'value': {'type': 'dict'}}", "source": "juraj-google-style"}
{"code": "def direct_normal_illuminance(self, value=999999.0):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `direct_normal_illuminance`'.format(value))\n        if (value < 0.0):\n            raise ValueError('value need to be greater or equal 0.0 for field `direct_normal_illuminance`')\n    self._direct_normal_illuminance = value", "docstring": "Corresponds to IDD Field `direct_normal_illuminance`\nwill be missing if >= 999900\n\nArgs:\nvalue (float): value for IDD Field `direct_normal_illuminance`\nUnit: lux\nvalue >= 0.0\nMissing value: 999999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def run(self, xml, **kwargs):\n    kwargs['output'] = self.__graph__()\n    if isinstance(xml, str):\n        try:\n            self.source = etree.XML(xml)\n        except ValueError:\n            try:\n                self.source = etree.XML(xml.encode())\n            except:\n                raise ValueError('Cannot run error {}'.format(sys.exc_info()[0]))\n    else:\n        self.source = xml\n    super(XMLProcessor, self).run(**kwargs)\n    self.output = kwargs['output']\n    return kwargs['output']", "docstring": "Method takes either an etree.ElementTree or raw XML text\nas the first argument.\n\nArgs:\nxml(etree.ElementTree or text", "source": "codesearchnet"}
{"code": "def db_insert_record(self, table_name, columns):\n        \n        bindings = ('?,' * len(columns)).strip(',')\n        values = [None] * len(columns)\n        sql = 'INSERT INTO {} ({}) VALUES ({})'.format(table_name, ', '.join(columns), bindings)\n        cur = self.db_conn.cursor()\n        cur.execute(sql, values)", "docstring": "Insert records into DB.\n\nArgs:\ntable_name (str): The name of the table.\ncolumns (list): List of columns for insert statement.", "source": "juraj-google-style"}
{"code": "def __init__(self, message=None, host=None):\n        \n        self.message = message\n        self.hostname = str(host) if host else None", "docstring": "Initialize the GeneralError object.\n\nArgs:\nmessage (str): Custom message to be passed to the exceptions. Defaults to *None*.\nIf *None* then the general class *__doc__* is used.\nhost (str): Custom string which can be used to enhance the exception message by adding the \"`host`: \"\nprefix to the message string. Defaults to *None*. If `host` is *None* then message stays unchanged.", "source": "juraj-google-style"}
{"code": "async def getTempCortex(mods=None):\n    \n    with s_common.getTempDir() as dirn:\n\n        async with await Cortex.anit(dirn) as core:\n            if mods:\n                for mod in mods:\n                    await core.loadCoreModule(mod)\n            async with core.getLocalProxy() as prox:\n                yield prox", "docstring": "Get a proxy to a cortex backed by a temporary directory.\n\nArgs:\nmods (list): A list of modules which are loaded into the cortex.\n\nNotes:\nThe cortex and temporary directory are town down on exit.\nThis should only be called from synchronous code.\n\nReturns:\nProxy to the cortex.", "source": "juraj-google-style"}
{"code": "def RegisterDecoder(cls, decoder):\n    encoding_method = decoder.ENCODING_METHOD.lower()\n    if (encoding_method in cls._decoders):\n        raise KeyError('Decoder for encoding method: {0:s} already set.'.format(decoder.ENCODING_METHOD))\n    cls._decoders[encoding_method] = decoder", "docstring": "Registers a decoder for a specific encoding method.\n\nArgs:\ndecoder (type): decoder class.\n\nRaises:\nKeyError: if the corresponding decoder is already set.", "source": "codesearchnet"}
{"code": "def _add_parameters(self, parameter_map, parameter_list):\n    for parameter in parameter_list:\n        if parameter.get('$ref'):\n            parameter = self.specification['parameters'].get(parameter.get('$ref').split('/')[(- 1)])\n        parameter_map[parameter['name']] = parameter", "docstring": "Populates the given parameter map with the list of parameters provided, resolving any reference objects encountered.\n\nArgs:\nparameter_map: mapping from parameter names to parameter objects\nparameter_list: list of either parameter objects or reference objects", "source": "codesearchnet"}
{"code": "def get_ccc_handle_from_uuid(self, uuid):\n        \n\n        if uuid in self.uuid_cccds:\n            return self.uuid_cccds[uuid].handle\n        \n        char = self.get_characteristic_from_uuid(uuid)\n        if char is None:\n            return None\n\n        ccc = char.get_descriptor_by_uuid(UUID_GATT_CCC)\n        if ccc is not None:\n            self.uuid_cccds[uuid] = ccc\n        return None if ccc is None else ccc.handle", "docstring": "Utility function to retrieve the client characteristic configuration\ndescriptor handle for a given characteristic.\n\nArgs:\nuuid (str): a string containing the hex-encoded UUID\n\nReturns:\nNone if an error occurs, otherwise an integer handle.", "source": "juraj-google-style"}
{"code": "def _GetElementDataTypeDefinition(self, data_type_definition):\n    \n    if not data_type_definition:\n      raise errors.FormatError('Missing data type definition')\n\n    element_data_type_definition = getattr(\n        data_type_definition, 'element_data_type_definition', None)\n    if not element_data_type_definition:\n      raise errors.FormatError(\n          'Invalid data type definition missing element')\n\n    return element_data_type_definition", "docstring": "Retrieves the element data type definition.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\n\nReturns:\nDataTypeDefinition: element data type definition.\n\nRaises:\nFormatError: if the element data type cannot be determined from the data\ntype definition.", "source": "juraj-google-style"}
{"code": "def Lease(self, request, global_params=None):\n    config = self.GetMethodConfig('Lease')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "Leases a dataflow WorkItem to run.\n\nArgs:\nrequest: (DataflowProjectsLocationsJobsWorkItemsLeaseRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(LeaseWorkItemResponse) The response message.", "source": "github-repos"}
{"code": "def get_pending_computer_name():\n    current = get_computer_name()\n    pending = __utils__['reg.read_value']('HKLM', 'SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters', 'NV Hostname')['vdata']\n    if pending:\n        return (pending if (pending != current) else None)\n    return False", "docstring": "Get a pending computer name. If the computer name has been changed, and the\nchange is pending a system reboot, this function will return the pending\ncomputer name. Otherwise, ``None`` will be returned. If there was an error\nretrieving the pending computer name, ``False`` will be returned, and an\nerror message will be logged to the minion log.\n\nReturns:\nstr:\nReturns the pending name if pending restart. Returns ``None`` if not\npending restart.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt 'minion-id' system.get_pending_computer_name", "source": "codesearchnet"}
{"code": "def configure_and_build(self, show_progress=True, optimized=True,\n                            skip_configuration=False):\n        \n\n        \n        if not skip_configuration:\n            configuration_command = ['python', 'waf', 'configure', '--enable-examples',\n                                     '--disable-gtk', '--disable-python']\n\n            if optimized:\n                configuration_command += ['--build-profile=optimized',\n                                          '--out=build/optimized']\n\n            \n            subprocess.call(configuration_command, cwd=self.path,\n                            stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n        \n        build_process = subprocess.Popen(['python', 'waf', 'build'], cwd=self.path,\n                                         stdout=subprocess.PIPE,\n                                         stderr=subprocess.PIPE)\n\n        \n        if show_progress:\n            line_iterator = self.get_build_output(build_process)\n            pbar = None\n            try:\n                [initial, total] = next(line_iterator)\n                pbar = tqdm(line_iterator, initial=initial, total=total,\n                            unit='file', desc='Building ns-3', smoothing=0)\n                for current, total in pbar:\n                    pbar.n = current\n            except (StopIteration):\n                if pbar is not None:\n                    pbar.n = pbar.total\n        else:  \n            build_process.communicate()", "docstring": "Configure and build the ns-3 code.\n\nArgs:\nshow_progress (bool): whether or not to display a progress bar\nduring compilation.\noptimized (bool): whether to use an optimized build. If False, use\na standard ./waf configure.\nskip_configuration (bool): whether to skip the configuration step,\nand only perform compilation.", "source": "juraj-google-style"}
{"code": "def get_security_group_id(name='', env='', region=''):\n    vpc_id = get_vpc_id(env, region)\n    LOG.info('Find %s sg in %s [%s] in %s', name, env, region, vpc_id)\n    url = '{0}/securityGroups/{1}/{2}/{3}?vpcId={4}'.format(API_URL, env, region, name, vpc_id)\n    response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n    assert response.ok\n    result = response.json()\n    try:\n        security_group_id = result['id']\n    except KeyError:\n        msg = 'Security group ({0}) not found'.format(name)\n        raise SpinnakerSecurityGroupError(msg)\n    LOG.info('Found: %s', security_group_id)\n    return security_group_id", "docstring": "Get a security group ID.\n\nArgs:\nname (str): Security Group name to find.\nenv (str): Deployment environment to search.\nregion (str): AWS Region to search.\n\nReturns:\nstr: ID of Security Group, e.g. sg-xxxx.\n\nRaises:\nAssertionError: Call to Gate API was not successful.\nSpinnakerSecurityGroupError: Security Group _name_ was not found for\n_env_ in _region_.", "source": "codesearchnet"}
{"code": "def run_bottleneck_on_image(sess, image_data, image_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor):\n    resized_input_values = sess.run(decoded_image_tensor, {image_data_tensor: image_data})\n    bottleneck_values = sess.run(bottleneck_tensor, {resized_input_tensor: resized_input_values})\n    bottleneck_values = np.squeeze(bottleneck_values)\n    return bottleneck_values", "docstring": "Runs inference on an image to extract the 'bottleneck' summary layer.\n\nArgs:\nsess: Current active TensorFlow Session.\nimage_data: String of raw JPEG data.\nimage_data_tensor: Input data layer in the graph.\ndecoded_image_tensor: Output of initial image resizing and preprocessing.\nresized_input_tensor: The input node of the recognition graph.\nbottleneck_tensor: Layer before the final softmax.\n\nReturns:\nNumpy array of bottleneck values.", "source": "codesearchnet"}
{"code": "def supervised_to_dict(dataset, text2self):\n\n    def my_fn(inputs, targets):\n        if text2self:\n            return {'targets': targets}\n        else:\n            return {'inputs': inputs, 'targets': targets}\n    return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)", "docstring": "Turns a supervised dataset into a dataset with a feature dictionary.\n\nif text2self, then the features dictionary contains a \"targets\" key.\nelse, the features dictionary contains \"inputs\" and \"targets\" keys.\n\nArgs:\ndataset: a tf.data.Dataset\ntext2self: a boolean\nReturns:\na tf.data.Dataset", "source": "codesearchnet"}
{"code": "def process_equities(equities: List[str], mask: types.IntTensor=None) -> Tuple[List[str], List[int]]:\n    equity_list = cashflow_streams.to_list(equities)\n    if mask is not None:\n        return (equity_list, mask)\n    mask, mask_map, num_unique_equities = cashflow_streams.create_mask(equity_list)\n    equity_types = [mask_map[i] for i in range(num_unique_equities)]\n    return (equity_types, mask)", "docstring": "Extracts unique equities and computes an integer mask.\n\n#### Example\n\n```python\nprocess_equities([\"GOOG\", \"MSFT\", \"GOOG\", \"GOOG\"])\n# Returns\n(['GOOG', 'MSFT'], [0, 1, 0, 0])\n```\n\nArgs:\nequities: A list of equity names.\nmask: An optional integer mask for the sorted equity sequence. If supplied,\nbecomes a no-op.\n\nReturns:\nA Tuple of `(equities, mask)` where  `equities` is a list of unique sorted\nequities and `mask` is a list of integers which is the mask for `equities`.", "source": "github-repos"}
{"code": "def mark_typed_object(self, name, type_object):\n    if (not hasattr(type_object, 'dump')):\n        raise ArgumentError(('The passed type object %s is missing required method: dump()' % type_object))\n    if (not hasattr(type_object, 'Restore')):\n        raise ArgumentError(('The passed type object %s is missing required method: Restore()' % type_object))\n\n    def _dump_obj(obj):\n        if (obj is None):\n            return None\n        return obj.dump()\n\n    def _restore_obj(obj):\n        if (obj is None):\n            return obj\n        return type_object.Restore(obj)\n    self.mark_complex(name, _dump_obj, _restore_obj)", "docstring": "Mark a property as containing a serializable object.\n\nThis convenience method allows you to avoid having to call\n``mark_complex()`` whenever you need to serialize a complex object.\nThis method requires that property ``name`` be a single class that\ncontains a dump() method and a Restore() class method where\ntype_object.Restore(x.dump()) == x.\n\nArgs:\nname (str): The name of the complex property.\ntype_object: The class object that will be contained inside\nthis property.", "source": "codesearchnet"}
{"code": "def update_hash(src_file):\n    hash_file = (local.path(src_file) + '.hash')\n    new_hash = 0\n    with open(hash_file, 'w') as h_file:\n        new_hash = get_hash_of_dirs(src_file)\n        h_file.write(str(new_hash))\n    return new_hash", "docstring": "Update the hash for the given file.\n\nArgs:\nsrc: The file name.\nroot: The path of the given file.", "source": "codesearchnet"}
{"code": "def _stream_data(self, chunk=None):\n    self._stream_sm_running = True\n    if (chunk is None):\n        chunk = self._next_streaming_chunk(20)\n    if ((chunk is None) or (len(chunk) == 0)):\n        self._stream_sm_running = False\n        return\n    try:\n        self._send_notification(StreamingChar.value_handle, chunk)\n        self._defer(self._stream_data)\n    except bable_interface.BaBLEException as err:\n        if (err.packet.status == 'Rejected'):\n            time.sleep(0.05)\n            self._defer(self._stream_data, [chunk])\n        else:\n            self._audit('ErrorStreamingReport')\n            self._logger.exception('Error while streaming data')", "docstring": "Stream reports to the ble client in 20 byte chunks\n\nArgs:\nchunk (bytearray): A chunk that should be sent instead of requesting a\nnew chunk from the pending reports.", "source": "codesearchnet"}
{"code": "def get_commits(self, since_sha=None):\n    assert self.tempdir\n    cmd = ['git', 'log', '--first-parent', '--reverse', COMMIT_FORMAT]\n    if since_sha:\n        commits = [self.get_commit(since_sha)]\n        cmd.append('{}..HEAD'.format(since_sha))\n    else:\n        commits = []\n        cmd.append('HEAD')\n    output = cmd_output(*cmd, cwd=self.tempdir)\n    for (sha, date) in chunk_iter(output.splitlines(), 2):\n        commits.append(Commit(sha, int(date)))\n    return commits", "docstring": "Returns a list of Commit objects.\n\nArgs:\nsince_sha - (optional) A sha to search from", "source": "codesearchnet"}
{"code": "def normalize(model: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Dict[str, int]]:\n    is_old_format = all([isinstance(v, int) for v in model.values()])\n    if is_old_format:\n        output = {}\n        sorted_items = sorted(model.items(), key=lambda x: x[0])\n        groups = itertools.groupby(sorted_items, key=lambda x: x[0].split(':')[0])\n        for group in groups:\n            output[group[0]] = dict(((item[0].split(':')[-1], item[1]) for item in group[1]))\n        return output\n    try:\n        assert all([isinstance(v, int) for groups in model.values() for v in groups.values()]), 'Scores should be integers'\n    except (AssertionError, AttributeError) as e:\n        raise Exception('Unsupported model format:', e)\n    else:\n        return model", "docstring": "Updates a model to the latest format. Does nothing if it's updated already.\n\nArgs:\nmodel: A model.\nReturns:\nAn updated model.", "source": "github-repos"}
{"code": "def add(self, decorations):\n    added = 0\n    if isinstance(decorations, list):\n        not_repeated = (set(decorations) - set(self._decorations))\n        self._decorations.extend(list(not_repeated))\n        added = len(not_repeated)\n    elif (decorations not in self._decorations):\n        self._decorations.append(decorations)\n        added = 1\n    if (added > 0):\n        self._order_decorations()\n        self.update()\n    return added", "docstring": "Add text decorations on a CodeEditor instance.\n\nDon't add duplicated decorations, and order decorations according\ndraw_order and the size of the selection.\n\nArgs:\ndecorations (sourcecode.api.TextDecoration) (could be a list)\nReturns:\nint: Amount of decorations added.", "source": "codesearchnet"}
{"code": "def user_has_access(self, user):\n    if (ROLE_ADMIN in user.roles):\n        return True\n    if self.enabled:\n        if (not self.required_roles):\n            return True\n        for role in self.required_roles:\n            if (role in user.roles):\n                return True\n    return False", "docstring": "Check if a user has access to view information for the account\n\nArgs:\nuser (:obj:`User`): User object to check\n\nReturns:\nTrue if user has access to the account, else false", "source": "codesearchnet"}
{"code": "def create_dir(path):\n    full_path = abs_path(path)\n    if not os.path.exists(full_path):\n        try:\n            os.makedirs(full_path)\n        except OSError as e:\n            if e.errno != errno.EEXIST:\n                raise", "docstring": "Creates a directory if it does not exist already.\n\nArgs:\npath: The path of the directory to create.", "source": "github-repos"}
{"code": "def prepare_subprocess_cmd(subprocess_cmd):\n    \n    help_cmd = subprocess_cmd + ['--helpfull']\n    help_output = subprocess.run(help_cmd, stdout=subprocess.PIPE).stdout\n    help_output = help_output.decode('ascii')\n    if 'python' in subprocess_cmd[0]:\n        valid_flags = parse_helpfull_output(help_output)\n    else:\n        valid_flags = parse_helpfull_output(help_output, regex=FLAG_HELP_RE_CC)\n    parsed_flags = flags.FlagValues().read_flags_from_files(subprocess_cmd[1:])\n\n    filtered_flags = filter_flags(parsed_flags, valid_flags)\n    return [subprocess_cmd[0]] + filtered_flags", "docstring": "Prepares a subprocess command by running --helpfull and masking flags.\n\nArgs:\nsubprocess_cmd: List[str], what would be passed into subprocess.call()\ni.e. ['python', 'train.py', '--flagfile=flags']\n\nReturns:\n['python', 'train.py', '--train_flag=blah', '--more_flags']", "source": "juraj-google-style"}
{"code": "def obtain(self, dest):\n        \n        \n        url, rev_options = self.get_url_rev_options(self.url)\n\n        if not os.path.exists(dest):\n            self.fetch_new(dest, url, rev_options)\n            return\n\n        rev_display = rev_options.to_display()\n        if self.is_repository_directory(dest):\n            existing_url = self.get_remote_url(dest)\n            if self.compare_urls(existing_url, url):\n                logger.debug(\n                    '%s in %s exists, and has correct URL (%s)',\n                    self.repo_name.title(),\n                    display_path(dest),\n                    url,\n                )\n                if not self.is_commit_id_equal(dest, rev_options.rev):\n                    logger.info(\n                        'Updating %s %s%s',\n                        display_path(dest),\n                        self.repo_name,\n                        rev_display,\n                    )\n                    self.update(dest, url, rev_options)\n                else:\n                    logger.info('Skipping because already up-to-date.')\n                return\n\n            logger.warning(\n                '%s %s in %s exists with URL %s',\n                self.name,\n                self.repo_name,\n                display_path(dest),\n                existing_url,\n            )\n            prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',\n                      ('s', 'i', 'w', 'b'))\n        else:\n            logger.warning(\n                'Directory %s already exists, and is not a %s %s.',\n                dest,\n                self.name,\n                self.repo_name,\n            )\n            \n            prompt = ('(i)gnore, (w)ipe, (b)ackup ',  \n                      ('i', 'w', 'b'))\n\n        logger.warning(\n            'The plan is to install the %s repository %s',\n            self.name,\n            url,\n        )\n        response = ask_path_exists('What to do?  %s' % prompt[0], prompt[1])\n\n        if response == 'a':\n            sys.exit(-1)\n\n        if response == 'w':\n            logger.warning('Deleting %s', display_path(dest))\n            rmtree(dest)\n            self.fetch_new(dest, url, rev_options)\n            return\n\n        if response == 'b':\n            dest_dir = backup_dir(dest)\n            logger.warning(\n                'Backing up %s to %s', display_path(dest), dest_dir,\n            )\n            shutil.move(dest, dest_dir)\n            self.fetch_new(dest, url, rev_options)\n            return\n\n        \n        if response == 's':\n            logger.info(\n                'Switching %s %s to %s%s',\n                self.repo_name,\n                display_path(dest),\n                url,\n                rev_display,\n            )\n            self.switch(dest, url, rev_options)", "docstring": "Install or update in editable mode the package represented by this\nVersionControl object.\n\nArgs:\ndest: the repository directory in which to install or update.", "source": "juraj-google-style"}
{"code": "def compute(self, x):\n        \n        q_learning = copy(self.__greedy_q_learning)\n        q_learning.epsilon_greedy_rate = x[0]\n        q_learning.alpha_value = x[1]\n        q_learning.gamma_value = x[2]\n        if self.__init_state_key is not None:\n            q_learning.learn(state_key=self.__init_state_key, limit=int(x[3]))\n        else:\n            q_learning.learn(limit=x[3])\n        q_sum = q_learning.q_df.q_value.sum()\n        if q_sum != 0:\n            cost = q_learning.q_df.shape[0] / q_sum\n        else:\n            cost = q_learning.q_df.shape[0] / 1e-4\n\n        return cost", "docstring": "Compute cost.\n\nArgs:\nx:    `np.ndarray` of explanatory variables.\n\nReturns:\ncost", "source": "juraj-google-style"}
{"code": "def Write(self, output_writer):\n    \n    if self._title:\n      output_writer.Write('\n\n    if not self._columns:\n      self._columns = ['' for _ in range(0, self._number_of_columns)]\n\n    output_writer.Write(' | '.join(self._columns))\n    output_writer.Write('\\n')\n\n    output_writer.Write(' | '.join(['---' for _ in self._columns]))\n    output_writer.Write('\\n')\n\n    for values in self._rows:\n      values = ['{0!s}'.format(value) for value in values]\n      output_writer.Write(' | '.join(values))\n      output_writer.Write('\\n')\n\n    output_writer.Write('\\n')", "docstring": "Writes the table to the output writer.\n\nArgs:\noutput_writer (OutputWriter): output writer.", "source": "juraj-google-style"}
{"code": "def word_matches(s1, s2, n=3):\n    return __matches(s1, s2, word_ngrams, n=n)", "docstring": "Word-level n-grams that match between two strings\n\nArgs:\ns1: a string\ns2: another string\nn: an int for the n in n-gram\n\nReturns:\nset: the n-grams found in both strings", "source": "codesearchnet"}
{"code": "def get_video_features(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.LongTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=False):\n    batch_size, frames, channel, height, width = pixel_values.shape\n    pixel_values = pixel_values.reshape(batch_size * frames, channel, height, width)\n    vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True)\n    image_embeds = vision_outputs[0]\n    image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)\n    query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)\n    query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)\n    if qformer_attention_mask is None:\n        qformer_attention_mask = torch.ones_like(qformer_input_ids)\n    qformer_input_ids = qformer_input_ids.repeat_interleave(frames, dim=0)\n    qformer_attention_mask = qformer_attention_mask.repeat_interleave(frames, dim=0)\n    qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)\n    query_outputs = self.qformer(input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True)\n    query_output = query_outputs[0][:, :query_tokens.size(1), :]\n    language_model_inputs = self.language_projection(query_output)\n    language_model_inputs = language_model_inputs.reshape(batch_size, self.config.num_query_tokens * frames, -1)\n    if return_dict:\n        return (language_model_inputs, vision_outputs, query_outputs)\n    return language_model_inputs", "docstring": "Encodes images into continuous embeddings that can be forwarded to the language model.\n\nArgs:\npixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\nThe tensors corresponding to the input images.", "source": "github-repos"}
{"code": "def symm_reduce(self, coords_set, threshold=1e-6):\n        \n        surf_sg = SpacegroupAnalyzer(self.slab, 0.1)\n        symm_ops = surf_sg.get_symmetry_operations()\n        unique_coords = []\n        \n        coords_set = [self.slab.lattice.get_fractional_coords(coords)\n                      for coords in coords_set]\n        for coords in coords_set:\n            incoord = False\n            for op in symm_ops:\n                if in_coord_list_pbc(unique_coords, op.operate(coords),\n                                     atol=threshold):\n                    incoord = True\n                    break\n            if not incoord:\n                unique_coords += [coords]\n        \n        return [self.slab.lattice.get_cartesian_coords(coords)\n                for coords in unique_coords]", "docstring": "Reduces the set of adsorbate sites by finding removing\nsymmetrically equivalent duplicates\n\nArgs:\ncoords_set: coordinate set in cartesian coordinates\nthreshold: tolerance for distance equivalence, used\nas input to in_coord_list_pbc for dupl. checking", "source": "juraj-google-style"}
{"code": "def from_file(cls, filename):\n        \n        with zopen(filename) as f:\n            return cls.from_string(f.read())", "docstring": "Read an Fiesta input from a file. Currently tested to work with\nfiles generated from this class itself.\n\nArgs:\nfilename: Filename to parse.\n\nReturns:\nFiestaInput object", "source": "juraj-google-style"}
{"code": "def _ReadMemberHeader(self, file_object):\n    file_offset = file_object.get_offset()\n    member_header = self._ReadStructure(file_object, file_offset, self._MEMBER_HEADER_SIZE, self._MEMBER_HEADER, 'member header')\n    if (member_header.signature != self._GZIP_SIGNATURE):\n        raise errors.FileFormatError('Unsupported signature: 0x{0:04x}.'.format(member_header.signature))\n    if (member_header.compression_method != self._COMPRESSION_METHOD_DEFLATE):\n        raise errors.FileFormatError('Unsupported compression method: {0:d}.'.format(member_header.compression_method))\n    self.modification_time = member_header.modification_time\n    self.operating_system = member_header.operating_system\n    if (member_header.flags & self._FLAG_FEXTRA):\n        file_offset = file_object.get_offset()\n        extra_field_data_size = self._ReadStructure(file_object, file_offset, self._UINT16LE_SIZE, self._UINT16LE, 'extra field data size')\n        file_object.seek(extra_field_data_size, os.SEEK_CUR)\n    if (member_header.flags & self._FLAG_FNAME):\n        file_offset = file_object.get_offset()\n        string_value = self._ReadString(file_object, file_offset, self._CSTRING, 'original filename')\n        self.original_filename = string_value.rstrip('\\x00')\n    if (member_header.flags & self._FLAG_FCOMMENT):\n        file_offset = file_object.get_offset()\n        string_value = self._ReadString(file_object, file_offset, self._CSTRING, 'comment')\n        self.comment = string_value.rstrip('\\x00')\n    if (member_header.flags & self._FLAG_FHCRC):\n        file_object.read(2)", "docstring": "Reads a member header.\n\nArgs:\nfile_object (FileIO): file-like object to read from.\n\nRaises:\nFileFormatError: if the member header cannot be read.", "source": "codesearchnet"}
{"code": "def to_barrier_key(cls, barrier_index_key):\n    barrier_index_path = barrier_index_key.to_path()\n    (pipeline_kind, dependent_pipeline_id, unused_kind, purpose) = barrier_index_path[(- 4):]\n    barrier_record_path = (pipeline_kind, dependent_pipeline_id, _BarrierRecord.kind(), purpose)\n    return db.Key.from_path(*barrier_record_path)", "docstring": "Converts a _BarrierIndex key to a _BarrierRecord key.\n\nArgs:\nbarrier_index_key: db.Key for a _BarrierIndex entity.\n\nReturns:\ndb.Key for the corresponding _BarrierRecord entity.", "source": "codesearchnet"}
{"code": "def compose_tree_path(tree, issn=False):\n    \n    if issn:\n        return join(\n            \"/\",\n            ISSN_DOWNLOAD_KEY,\n            basename(tree.issn)\n        )\n\n    return join(\n        \"/\",\n        PATH_DOWNLOAD_KEY,\n        quote_plus(tree.path).replace(\"%2F\", \"/\"),\n    )", "docstring": "Compose absolute path for given `tree`.\n\nArgs:\npub (obj): :class:`.Tree` instance.\nissn (bool, default False): Compose URL using ISSN.\n\nReturns:\nstr: Absolute path of the tree, without server's address and protocol.", "source": "juraj-google-style"}
{"code": "def proto_refactor_files(dest_dir, namespace, namespace_path):\n    for (dn, dns, fns) in os.walk(dest_dir):\n        for fn in fns:\n            fn = os.path.join(dn, fn)\n            if fnmatch.fnmatch(fn, '*.proto'):\n                data = proto_refactor(fn, namespace, namespace_path)\n                with open(fn, 'w') as f:\n                    f.write(data)", "docstring": "This method runs the refactoring on all the Protobuf files in the\nDropsonde repo.\n\nArgs:\ndest_dir (str): directory where the Protobuf files lives.\nnamespace (str): the desired package name (i.e. \"dropsonde.py2\")\nnamespace_path (str): the desired path corresponding to the package\nname (i.e. \"dropsonde/py2\")", "source": "codesearchnet"}
{"code": "def Add(self, file_desc_proto):\n    proto_name = file_desc_proto.name\n    if (proto_name not in self._file_desc_protos_by_file):\n        self._file_desc_protos_by_file[proto_name] = file_desc_proto\n    elif (self._file_desc_protos_by_file[proto_name] != file_desc_proto):\n        raise DescriptorDatabaseConflictingDefinitionError(('%s already added, but with different descriptor.' % proto_name))\n    package = file_desc_proto.package\n    for message in file_desc_proto.message_type:\n        self._file_desc_protos_by_symbol.update(((name, file_desc_proto) for name in _ExtractSymbols(message, package)))\n    for enum in file_desc_proto.enum_type:\n        self._file_desc_protos_by_symbol['.'.join((package, enum.name))] = file_desc_proto\n    for extension in file_desc_proto.extension:\n        self._file_desc_protos_by_symbol['.'.join((package, extension.name))] = file_desc_proto\n    for service in file_desc_proto.service:\n        self._file_desc_protos_by_symbol['.'.join((package, service.name))] = file_desc_proto", "docstring": "Adds the FileDescriptorProto and its types to this database.\n\nArgs:\nfile_desc_proto: The FileDescriptorProto to add.\nRaises:\nDescriptorDatabaseConflictingDefinitionError: if an attempt is made to\nadd a proto with the same name but different definition than an\nexisiting proto in the database.", "source": "codesearchnet"}
{"code": "def SendSourceFiles(self, request, context):\n    return debug_service_pb2.EventReply()", "docstring": "Base implementation of the handling of SendSourceFiles calls.\n\nThe base implementation does nothing with the incoming request.\nOverride in an implementation of the server if necessary.\n\nArgs:\nrequest: A `DebuggedSourceFiles` proto, containing the path, content, size\nand last-modified timestamp of source files.\ncontext: Server context.\n\nReturns:\nA `EventReply` proto.", "source": "github-repos"}
{"code": "def _abort_workflow(pb: ProcessingBlock, workflow_stage_dict: dict,\n                    docker: DockerSwarmClient):\n    \n    \n    _abort_flag = False\n    if _abort_flag:\n        for workflow_stage in pb.workflow_stages:\n            for service_id, _ in \\\n                    workflow_stage_dict[workflow_stage.id]['services'].items():\n                docker.delete_service(service_id)\n                LOG.info(\"Deleted Service Id %s\", service_id)\n        return True\n    return False", "docstring": "Abort the workflow.\n\nTODO(BMo): This function currently does nothing as the abort flag\nis hardcoded to False!\n\nThis function is used by `execute_processing_block`.\n\nArgs:\npb (ProcessingBlock): Configuration database Processing block object.\nworkflow_stage_dict (dict): Workflow stage metadata dictionary.\ndocker (DockerClient): Docker Swarm Client object.\n\nReturns:\nbool, True if the stage is aborted, otherwise False.", "source": "juraj-google-style"}
{"code": "def CreateCampaignWithBiddingStrategy(client, bidding_strategy_id, budget_id):\n    campaign_service = client.GetService('CampaignService', version='v201809')\n    campaign = {'name': ('Interplanetary Cruise \n    operation = {'operator': 'ADD', 'operand': campaign}\n    response = campaign_service.mutate([operation])\n    new_campaign = response['value'][0]\n    print(('Campaign with name \"%s\", ID \"%s\" and bidding scheme ID \"%s\" was created.' % (new_campaign['name'], new_campaign['id'], new_campaign['biddingStrategyConfiguration']['biddingStrategyId'])))\n    return new_campaign", "docstring": "Create a Campaign with a Shared Bidding Strategy.\n\nArgs:\nclient: AdWordsClient the client to run the example with.\nbidding_strategy_id: string the bidding strategy ID to use.\nbudget_id: string the shared budget ID to use.\n\nReturns:\ndict An object representing a campaign.", "source": "codesearchnet"}
{"code": "def snapshot(self, name):\n        \n        return self.get_data(\n            \"volumes/%s/snapshots/\" % self.id,\n            type=POST,\n            params={\"name\": name}\n        )", "docstring": "Create a snapshot of the volume.\n\nArgs:\nname: string - a human-readable name for the snapshot", "source": "juraj-google-style"}
{"code": "def get_dos(self, partial_dos=False, npts_mu=10000, T=None):\n    spin = (self.data.spin if isinstance(self.data.spin, int) else 1)\n    (energies, densities, vvdos, cdos) = BL.BTPDOS(self.eband, self.vvband, npts=npts_mu)\n    if (T is not None):\n        densities = BL.smoothen_DOS(energies, densities, T)\n    tdos = Dos((self.efermi / units.eV), (energies / units.eV), {Spin(spin): densities})\n    if partial_dos:\n        tdos = self.get_partial_doses(tdos=tdos, npts_mu=npts_mu, T=T)\n    return tdos", "docstring": "Return a Dos object interpolating bands\n\nArgs:\npartial_dos: if True, projections will be interpolated as well\nand partial doses will be return. Projections must be available\nin the loader.\nnpts_mu: number of energy points of the Dos\nT: parameter used to smooth the Dos", "source": "codesearchnet"}
{"code": "def __wizard(rho, epsilon=None):\n    \n    if epsilon is None:\n        epsilon = 0.  \n\n    dim = len(rho)\n    rho_wizard = np.zeros([dim, dim])\n    v, w = np.linalg.eigh(rho)  \n    for j in range(dim):\n        if v[j] < epsilon:\n            tmp = v[j]\n            v[j] = 0.\n            \n            x = 0.\n            for k in range(j + 1, dim):\n                x += tmp / (dim - (j + 1))\n                v[k] = v[k] + tmp / (dim - (j + 1))\n    for j in range(dim):\n        rho_wizard = rho_wizard + v[j] * outer(w[:, j])\n    return rho_wizard", "docstring": "Returns the nearest positive semidefinite operator to an operator.\n\nThis method is based on reference [1]. It constrains positivity\nby setting negative eigenvalues to zero and rescaling the positive\neigenvalues.\n\nArgs:\nrho (array_like): the input operator.\nepsilon(float or None): threshold (>=0) for truncating small\neigenvalues values to zero.\n\nReturns:\nnumpy.array: A positive semidefinite numpy array.", "source": "juraj-google-style"}
{"code": "def tensordot(x1, x2, axes=2):\n    if any_symbolic_tensors((x1, x2)):\n        return Tensordot(axes=axes).symbolic_call(x1, x2)\n    return backend.numpy.tensordot(x1, x2, axes=axes)", "docstring": "Compute the tensor dot product along specified axes.\n\nArgs:\nx1: First tensor.\nx2: Second tensor.\naxes: - If an integer, N, sum over the last N axes of `x1` and the\nfirst N axes of `x2` in order. The sizes of the corresponding\naxes must match.\n- Or, a list of axes to be summed over, first sequence applying\nto `x1`, second to `x2`. Both sequences must be of the\nsame length.\n\nReturns:\nThe tensor dot product of the inputs.", "source": "github-repos"}
{"code": "def has_checked_field(self, locator, **kwargs):\n        \n\n        kwargs[\"checked\"] = True\n        return self.has_selector(\"field\", locator, **kwargs)", "docstring": "Checks if the page or current node has a radio button or checkbox with the given label,\nvalue, or id, that is currently checked.\n\nArgs:\nlocator (str): The label, name, or id of a checked field.\n**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.\n\nReturns:\nbool: Whether it exists.", "source": "juraj-google-style"}
{"code": "def read(self, size=None):\n    \n    if not self._is_open:\n      raise IOError('Not opened.')\n\n    return self._vshadow_store.read(size)", "docstring": "Reads a byte string from the file-like object at the current offset.\n\nThe function will read a byte string of the specified size or\nall of the remaining data if no size was specified.\n\nArgs:\nsize (Optional[int]): number of bytes to read, where None is all\nremaining data.\n\nReturns:\nbytes: data read.\n\nRaises:\nIOError: if the read failed.\nOSError: if the read failed.", "source": "juraj-google-style"}
{"code": "def _ConvertBool(value, require_str):\n  \n  if require_str:\n    if value == 'true':\n      return True\n    elif value == 'false':\n      return False\n    else:\n      raise ParseError('Expected \"true\" or \"false\", not {0}.'.format(value))\n\n  if not isinstance(value, bool):\n    raise ParseError('Expected true or false without quotes.')\n  return value", "docstring": "Convert a boolean value.\n\nArgs:\nvalue: A scalar value to convert.\nrequire_str: If True, value must be a str.\n\nReturns:\nThe bool parsed.\n\nRaises:\nParseError: If a boolean value couldn't be consumed.", "source": "juraj-google-style"}
{"code": "def LockedWrite(self, cache_data):\n    if isinstance(cache_data, six.text_type):\n        cache_data = cache_data.encode(encoding=self._encoding)\n    with self._thread_lock:\n        if (not self._EnsureFileExists()):\n            return False\n        with self._process_lock_getter() as acquired_plock:\n            if (not acquired_plock):\n                return False\n            with open(self._filename, 'wb') as f:\n                f.write(cache_data)\n            return True", "docstring": "Acquire an interprocess lock and write a string.\n\nThis method safely acquires the locks then writes a string\nto the cache file. If the string is written successfully\nthe function will return True, if the write fails for any\nreason it will return False.\n\nArgs:\ncache_data: string or bytes to write.\n\nReturns:\nbool: success", "source": "codesearchnet"}
{"code": "def spec_filled(self, pos_args, kw_args):\n    req_names = self.arg_names\n    if (len(self.arg_defaults) > 0):\n        req_names = req_names[:(- len(self.arg_defaults))]\n    req = [x for x in req_names if (x not in kw_args)]\n    return (len(req) <= len(pos_args))", "docstring": "Check if we have enough arguments to call this function.\n\nArgs:\npos_args (list): A list of all the positional values we have.\nkw_args (dict): A dict of all of the keyword args we have.\n\nReturns:\nbool: True if we have a filled spec, False otherwise.", "source": "codesearchnet"}
{"code": "def dismiss_prompt(self, text=None, wait=None):\n        \n\n        with self.driver.dismiss_modal(\"prompt\", text=text, wait=wait):\n            yield", "docstring": "Execute the wrapped code, dismissing a prompt.\n\nArgs:\ntext (str | RegexObject, optional): Text to match against the text in the modal.\nwait (int | float, optional): Maximum time to wait for the modal to appear after\nexecuting the wrapped code.\n\nRaises:\nModalNotFound: If a modal dialog hasn't been found.", "source": "juraj-google-style"}
{"code": "def generate_argument_parser(cls, tree, actions={}):\n    (cur_as, cur_subas) = tree\n    parser = devassistant_argparse.ArgumentParser(argument_default=argparse.SUPPRESS, usage=argparse.SUPPRESS, add_help=False)\n    cls.add_default_arguments_to(parser)\n    for arg in cur_as.args:\n        arg.add_argument_to(parser)\n    if (cur_subas or actions):\n        subparsers = cls._add_subparsers_required(parser, dest=settings.SUBASSISTANT_N_STRING.format('0'))\n        for subas in sorted(cur_subas, key=(lambda x: x[0].name)):\n            for alias in ([subas[0].name] + getattr(subas[0], 'aliases', [])):\n                cls.add_subassistants_to(subparsers, subas, level=1, alias=alias)\n        for (action, subactions) in sorted(actions.items(), key=(lambda x: x[0].name)):\n            cls.add_action_to(subparsers, action, subactions, level=1)\n    return parser", "docstring": "Generates argument parser for given assistant tree and actions.\n\nArgs:\ntree: assistant tree as returned by\ndevassistant.assistant_base.AssistantBase.get_subassistant_tree\nactions: dict mapping actions (devassistant.actions.Action subclasses) to their\nsubaction dicts\nReturns:\ninstance of devassistant_argparse.ArgumentParser (subclass of argparse.ArgumentParser)", "source": "codesearchnet"}
{"code": "def ChunkedDecoderLayer(feature_depth, feedforward_depth, num_heads, dropout, chunk_selector, mode):\n    return layers.Serial(layers.Residual(layers.Map(layers.LayerNorm()), layers.ChunkedCausalMultiHeadedAttention(feature_depth, num_heads=num_heads, dropout=dropout, chunk_selector=chunk_selector, mode=mode), layers.Map(layers.Dropout(rate=dropout, mode=mode))), layers.Map(ResidualFeedForward(feature_depth, feedforward_depth, dropout, mode=mode)))", "docstring": "Transformer decoder layer operating on chunks.\n\nArgs:\nfeature_depth: int:  depth of embedding\nfeedforward_depth: int: depth of feed-forward layer\nnum_heads: int: number of attention heads\ndropout: float: dropout rate (how much to drop out)\nchunk_selector: a function from chunk number to list of chunks to attend.\nmode: str: 'train' or 'eval'\n\nReturns:\nthe layer.", "source": "codesearchnet"}
{"code": "def get_gui_hint(self, hint):\n        \n        if hint == 'type':\n            \n            \n            if self.kwargs.get('action') == 'store_true' or self.kwargs.get('nargs') == 0:\n                return 'bool'\n            \n            elif self.kwargs.get('action') == 'store_const':\n                return 'const'\n            return self.gui_hints.get('type', 'str')\n        elif hint == 'default':\n            hint_type = self.get_gui_hint('type')\n            hint_default = self.gui_hints.get('default', None)\n            arg_default = self.kwargs.get('default', None)\n            preserved_value = None\n            if 'preserved' in self.kwargs:\n                preserved_value = config_manager.get_config_value(self.kwargs['preserved'])\n\n            if hint_type == 'path':\n                if preserved_value is not None:\n                    default = preserved_value\n                elif hint_default is not None:\n                    default = hint_default.replace('$(pwd)', utils.get_cwd_or_homedir())\n                else:\n                    default = arg_default or '~'\n                return os.path.abspath(os.path.expanduser(default))\n            elif hint_type == 'bool':\n                return hint_default or arg_default or False\n            elif hint_type == 'const':\n                return hint_default or arg_default\n            else:\n                if hint_default == '$(whoami)':\n                    hint_default = getpass.getuser()\n                return preserved_value or hint_default or arg_default or ''", "docstring": "Returns the value for specified gui hint (or a sensible default value,\nif this argument doesn't specify the hint).\n\nArgs:\nhint: name of the hint to get value for\nReturns:\nvalue of the hint specified in yaml or a sensible default", "source": "juraj-google-style"}
{"code": "def _combine_eq_sets(eq_sets, operations):\n    UNIT = np.eye(3)\n\n    def all_equivalent_atoms_of_i(i, eq_sets, ops):\n        'WORKS INPLACE on operations\\n            '\n        visited = set([i])\n        tmp_eq_sets = {j: (eq_sets[j] - visited) for j in eq_sets[i]}\n        while tmp_eq_sets:\n            new_tmp_eq_sets = {}\n            for j in tmp_eq_sets:\n                if (j in visited):\n                    continue\n                visited.add(j)\n                for k in tmp_eq_sets[j]:\n                    new_tmp_eq_sets[k] = (eq_sets[k] - visited)\n                    if (i not in ops[k]):\n                        ops[k][i] = (np.dot(ops[j][i], ops[k][j]) if (k != i) else UNIT)\n                    ops[i][k] = ops[k][i].T\n            tmp_eq_sets = new_tmp_eq_sets\n        return (visited, ops)\n    eq_sets = copy.deepcopy(eq_sets)\n    new_eq_sets = {}\n    ops = copy.deepcopy(operations)\n    to_be_deleted = set()\n    for i in eq_sets:\n        if (i in to_be_deleted):\n            continue\n        (visited, ops) = all_equivalent_atoms_of_i(i, eq_sets, ops)\n        to_be_deleted |= (visited - {i})\n    for k in to_be_deleted:\n        eq_sets.pop(k, None)\n    return {'eq_sets': eq_sets, 'sym_ops': ops}", "docstring": "Combines the dicts of _get_equivalent_atom_dicts into one\n\nArgs:\neq_sets (dict)\noperations (dict)\n\nReturns:\ndict: The returned dictionary has two possible keys:\n\n``eq_sets``:\nA dictionary of indices mapping to sets of indices,\neach key maps to indices of all equivalent atoms.\nThe keys are guaranteed to be not equivalent.\n\n``sym_ops``:\nTwofold nested dictionary.\n``operations[i][j]`` gives the symmetry operation\nthat maps atom ``i`` unto ``j``.", "source": "codesearchnet"}
{"code": "def frame(self, locator=None, *args, **kwargs):\n        \n\n        self.switch_to_frame(self._find_frame(locator, *args, **kwargs))\n        try:\n            yield\n        finally:\n            self.switch_to_frame(\"parent\")", "docstring": "Execute the wrapped code within the given iframe using the given frame or frame name/id.\nMay not be supported by all drivers.\n\nArgs:\nlocator (str | Element, optional): The name/id of the frame or the frame's element.\nDefaults to the only frame in the document.", "source": "juraj-google-style"}
{"code": "def stop(self, **kwargs):\n        \n        return self.client.api.stop(self.id, **kwargs)", "docstring": "Stops a container. Similar to the ``docker stop`` command.\n\nArgs:\ntimeout (int): Timeout in seconds to wait for the container to\nstop before sending a ``SIGKILL``. Default: 10\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"}
{"code": "def _get_other_names(self, line):\n        \n        m = re.search(self.compound_regex['other_names'][0], line, re.IGNORECASE)\n        if m:\n            self.other_names.append(m.group(1).strip())", "docstring": "Parse and extract any other names that might be recorded for the compound\n\nArgs:\nline (str): line of the msp file", "source": "juraj-google-style"}
{"code": "def AddExtensionDescriptor(self, extension):\n    if (not (isinstance(extension, descriptor.FieldDescriptor) and extension.is_extension)):\n        raise TypeError('Expected an extension descriptor.')\n    if (extension.extension_scope is None):\n        self._toplevel_extensions[extension.full_name] = extension\n    try:\n        existing_desc = self._extensions_by_number[extension.containing_type][extension.number]\n    except KeyError:\n        pass\n    else:\n        if (extension is not existing_desc):\n            raise AssertionError(('Extensions \"%s\" and \"%s\" both try to extend message type \"%s\" with field number %d.' % (extension.full_name, existing_desc.full_name, extension.containing_type.full_name, extension.number)))\n    self._extensions_by_number[extension.containing_type][extension.number] = extension\n    self._extensions_by_name[extension.containing_type][extension.full_name] = extension\n    if _IsMessageSetExtension(extension):\n        self._extensions_by_name[extension.containing_type][extension.message_type.full_name] = extension", "docstring": "Adds a FieldDescriptor describing an extension to the pool.\n\nArgs:\nextension: A FieldDescriptor.\n\nRaises:\nAssertionError: when another extension with the same number extends the\nsame message.\nTypeError: when the specified extension is not a\ndescriptor.FieldDescriptor.", "source": "codesearchnet"}
{"code": "def send_notice(self, room_id, text_content, timestamp=None):\n        \n        body = {\n            \"msgtype\": \"m.notice\",\n            \"body\": text_content\n        }\n        return self.send_message_event(room_id, \"m.room.message\", body,\n                                       timestamp=timestamp)", "docstring": "Perform PUT /rooms/$room_id/send/m.room.message with m.notice msgtype\n\nArgs:\nroom_id (str): The room ID to send the event in.\ntext_content (str): The m.notice body to send.\ntimestamp (int): Set origin_server_ts (For application services only)", "source": "juraj-google-style"}
{"code": "def imag(input, name=None):\n    with ops.name_scope(name, 'Imag', [input]) as name:\n        input = ops.convert_to_tensor(input, name='input')\n        if input.dtype.is_complex:\n            return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)\n        else:\n            return array_ops.zeros_like(input)", "docstring": "Returns the imaginary part of a complex (or real) tensor.\n\nGiven a tensor `input`, this operation returns a tensor of type `float` that\nis the imaginary part of each element in `input` considered as a complex\nnumber. If `input` is real, a tensor of all zeros is returned.\n\nFor example:\n\n```python\nx = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])\ntf.math.imag(x)  # [4.75, 5.75]\n```\n\nArgs:\ninput: A `Tensor`. Must be one of the following types: `float`, `double`,\n`complex64`, `complex128`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` of type `float32` or `float64`.", "source": "github-repos"}
{"code": "def readCmd(cls, cmd):\n    args = shlex.split(cmd)\n    proc = subprocess.Popen(args, stdout=subprocess.PIPE)\n    (proc_stdout, proc_stderr) = proc.communicate(input=None)\n    return proc_stdout.decode()", "docstring": "run command and return the str format stdout\n\nArgs:\ncmd: string\nReturns:\nstr: what the command's echo", "source": "codesearchnet"}
{"code": "def register_sub_command(self, sub_command, additional_ids=[]):\n        \n        self.__register_sub_command(sub_command, sub_command.command_desc().command)\n        self.__additional_ids.update(additional_ids)\n        for id in additional_ids:\n            self.__register_sub_command(sub_command, id)", "docstring": "Register a command as a subcommand.\nIt will have it's CommandDesc.command string used as id. Additional ids can be provided.\n\nArgs:\nsub_command (CommandBase): Subcommand to register.\nadditional_ids (List[str]): List of additional ids. Can be empty.", "source": "juraj-google-style"}
{"code": "def conv_block(x, growth_rate, name):\n    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1\n    x1 = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_0_bn')(x)\n    x1 = layers.Activation('relu', name=name + '_0_relu')(x1)\n    x1 = layers.Conv2D(4 * growth_rate, 1, use_bias=False, name=name + '_1_conv')(x1)\n    x1 = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_1_bn')(x1)\n    x1 = layers.Activation('relu', name=name + '_1_relu')(x1)\n    x1 = layers.Conv2D(growth_rate, 3, padding='same', use_bias=False, name=name + '_2_conv')(x1)\n    x = layers.Concatenate(axis=bn_axis, name=name + '_concat')([x, x1])\n    return x", "docstring": "A building block for a dense block.\n\nArgs:\nx: input tensor.\ngrowth_rate: float, growth rate at dense layers.\nname: string, block label.\n\nReturns:\nOutput tensor for the block.", "source": "github-repos"}
{"code": "def is45(msg):\n    if allzeros(msg):\n        return False\n    d = hex2bin(data(msg))\n    if wrongstatus(d, 1, 2, 3):\n        return False\n    if wrongstatus(d, 4, 5, 6):\n        return False\n    if wrongstatus(d, 7, 8, 9):\n        return False\n    if wrongstatus(d, 10, 11, 12):\n        return False\n    if wrongstatus(d, 13, 14, 15):\n        return False\n    if wrongstatus(d, 16, 17, 26):\n        return False\n    if wrongstatus(d, 27, 28, 38):\n        return False\n    if wrongstatus(d, 39, 40, 51):\n        return False\n    if (bin2int(d[51:56]) != 0):\n        return False\n    temp = temp45(msg)\n    if temp:\n        if ((temp > 60) or (temp < (- 80))):\n            return False\n    return True", "docstring": "Check if a message is likely to be BDS code 4,5.\n\nMeteorological hazard report\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nbool: True or False", "source": "codesearchnet"}
{"code": "def __fill_buffer(self, size=0):\n    \n    read_size = min(max(size, self.__buffer_size), MAX_BLOB_FETCH_SIZE)\n\n    self.__buffer = fetch_data(self.__blob_key, self.__position,\n                               self.__position + read_size - 1)\n    self.__buffer_position = 0\n    self.__eof = len(self.__buffer) < read_size", "docstring": "Fills the internal buffer.\n\nArgs:\nsize: Number of bytes to read. Will be clamped to\n[self.__buffer_size, MAX_BLOB_FETCH_SIZE].", "source": "juraj-google-style"}
{"code": "def fleet_id_to_slug(did):\n    try:\n        fleet_slug = IOTileFleetSlug(did)\n    except ValueError:\n        raise ArgumentError('Unable to recognize {} as a fleet id'.format(did))\n    return str(fleet_slug)", "docstring": "Converts a fleet id into a correct fleet slug.\n\nArgs:\ndid (long) : A fleet id\ndid (string) : A device slug in the form of XXXX, XXXX-XXXX-XXXX, g--XXXX, g--XXXX-XXXX-XXXX\nReturns:\nstr: The device slug in the g--XXXX-XXXX-XXX format\nRaises:\nArgumentError: if the ID is not in the [1, 16**12] range, or if not a valid string", "source": "codesearchnet"}
{"code": "def _should_invoke_v2_op():\n    if not _ops.executing_eagerly_outside_functions():\n        return False\n    if not _summary_ops_v2.has_default_writer():\n        warnings.warn('Cannot activate TF2 compatibility support for TF1 summary ops: default summary writer not found.')\n        return False\n    if _get_step_for_v2() is None:\n        warnings.warn('Cannot activate TF2 compatibility support for TF1 summary ops: global step not set. To set step for summary writer, use `tf.summary.SummaryWriter.as_default(step=_)`, `tf.summary.experimental.set_step()` or `tf.compat.v1.train.create_global_step()`.')\n        return False\n    return True", "docstring": "Check if v2 op can be invoked.\n\nWhen calling TF1 summary op in eager mode, if the following conditions are\nmet, v2 op will be invoked:\n- The outermost context is eager mode.\n- A default TF2 summary writer is present.\n- A step is set for the writer (using `tf.summary.SummaryWriter.as_default`,\n`tf.summary.experimental.set_step` or\n`tf.compat.v1.train.create_global_step`).\n\nReturns:\nA boolean indicating whether v2 summary op should be invoked.", "source": "github-repos"}
{"code": "def anm_score(self, x, y):\n        \n        gp = GaussianProcessRegressor().fit(x, y)\n        y_predict = gp.predict(x)\n        indepscore = normalized_hsic(y_predict - y, x)\n\n        return indepscore", "docstring": "Compute the fitness score of the ANM model in the x->y direction.\n\nArgs:\na (numpy.ndarray): Variable seen as cause\nb (numpy.ndarray): Variable seen as effect\n\nReturns:\nfloat: ANM fit score", "source": "juraj-google-style"}
{"code": "def _SkipFieldValue(tokenizer):\n  \n  \n  \n  if tokenizer.TryConsumeByteString():\n    while tokenizer.TryConsumeByteString():\n      pass\n    return\n\n  if (not tokenizer.TryConsumeIdentifier() and\n      not tokenizer.TryConsumeInt64() and\n      not tokenizer.TryConsumeUint64() and\n      not tokenizer.TryConsumeFloat()):\n    raise ParseError('Invalid field value: ' + tokenizer.token)", "docstring": "Skips over a field value.\n\nArgs:\ntokenizer: A tokenizer to parse the field name and values.\n\nRaises:\nParseError: In case an invalid field value is found.", "source": "juraj-google-style"}
{"code": "def _matrix_conv(self, m1, m2):\n    n = m1[0, 0, 0].shape.as_list()[0]\n    if n != m2[0, 0, 0].shape.as_list()[0]:\n        raise ValueError(f'The entries in matrices m1 and m2 must have the same dimensions. Received m1[0, 0, 0].shape={m1[0, 0, 0].shape} and m2[0, 0, 0].shape={m2[0, 0, 0].shape}.')\n    k = int(np.cbrt(len(m1)))\n    l = int(np.cbrt(len(m2)))\n    result = {}\n    size = k + l - 1\n    for i in range(size):\n        for j in range(size):\n            for r in range(size):\n                result[i, j, r] = array_ops.zeros([n, n], self.dtype)\n                for index1 in range(min(k, i + 1)):\n                    for index2 in range(min(k, j + 1)):\n                        for index3 in range(min(k, r + 1)):\n                            if i - index1 < l and j - index2 < l and (r - index3 < l):\n                                result[i, j, r] += math_ops.matmul(m1[index1, index2, index3], m2[i - index1, j - index2, r - index3])\n    return result", "docstring": "Matrix convolution.\n\nArgs:\nm1: is a k x k x k  dictionary, each element is a n x n matrix.\nm2: is a l x l x l dictionary, each element is a n x n matrix.\n\nReturns:\n(k + l - 1) x (k + l - 1) x (k + l - 1) dictionary each\nelement is a n x n matrix.\nRaises:\nValueError: if the entries of m1 and m2 are of different dimensions.", "source": "github-repos"}
{"code": "def upsert_and_get(self, conflict_target: List, fields: Dict, index_predicate: str=None):\n        \n\n        return self.get_queryset().upsert_and_get(conflict_target, fields, index_predicate)", "docstring": "Creates a new record or updates the existing one\nwith the specified data and then gets the row.\n\nArguments:\nconflict_target:\nFields to pass into the ON CONFLICT clause.\n\nfields:\nFields to insert/update.\n\nindex_predicate:\nThe index predicate to satisfy an arbiter partial index.\n\nReturns:\nThe model instance representing the row\nthat was created/updated.", "source": "juraj-google-style"}
{"code": "def combine_reducers(reducers):\n    \n    final_reducers = {key: reducer\n                      for key, reducer in reducers.items()\n                      if hasattr(reducer, '__call__')}\n\n    sanity_error = None\n    try:\n        assert_reducer_sanity(final_reducers)\n    except Exception as e:\n        sanity_error = e\n\n    def combination(state=None, action=None):\n        if state is None:\n            state = {}\n        if sanity_error:\n            raise sanity_error\n\n        has_changed = False\n        next_state = {}\n        for key, reducer in final_reducers.items():\n            previous_state_for_key = state.get(key)\n            next_state_for_key = reducer(previous_state_for_key, action)\n            if next_state_for_key is None:\n                msg = get_undefined_state_error_message(key, action)\n                raise Exception(msg)\n            next_state[key] = next_state_for_key\n            has_changed = (has_changed or\n                           next_state_for_key != previous_state_for_key)\n        return next_state if has_changed else state\n\n    return combination", "docstring": "composition tool for creating reducer trees.\n\nArgs:\nreducers: dict with state keys and reducer functions\nthat are responsible for each key\n\nReturns:\na new, combined reducer function", "source": "juraj-google-style"}
{"code": "def _compute_cosine_distance(cls, inputs, clusters, inputs_normalized=True):\n    output = []\n    if not inputs_normalized:\n        with ops.colocate_with(clusters, ignore_existing=True):\n            clusters = nn_impl.l2_normalize(clusters, axis=1)\n    for inp in inputs:\n        with ops.colocate_with(inp, ignore_existing=True):\n            if not inputs_normalized:\n                inp = nn_impl.l2_normalize(inp, axis=1)\n            output.append(1 - math_ops.matmul(inp, clusters, transpose_b=True))\n    return output", "docstring": "Computes cosine distance between each input and each cluster center.\n\nArgs:\ninputs: list of input Tensor.\nclusters: cluster Tensor\ninputs_normalized: if True, it assumes that inp and clusters are\nnormalized and computes the dot product which is equivalent to the\ncosine distance. Else it L2 normalizes the inputs first.\n\nReturns:\nlist of Tensors, where each element corresponds to each element in inp.\nThe value is the distance of each row to all the cluster centers.", "source": "github-repos"}
{"code": "def _VerifyValues(self, pool_func, input_sizes, window, strides, padding, expected):\n    total_size = 1\n    for s in input_sizes:\n        total_size *= s\n    x = np.arange(1.0, total_size + 1, dtype=np.float32)\n    x = x.reshape(input_sizes)\n    with self.session() as sess, self.test_scope():\n        inputs = array_ops.placeholder(dtypes.float32)\n        t = pool_func(inputs, ksize=[1] + window + [1], strides=[1] + strides + [1], padding=padding)\n        vals = sess.run(t, {inputs: x})\n    actual = vals.flatten()\n    self.assertAllClose(expected, actual)", "docstring": "Verifies the output values of the pooling function.\n\nArgs:\npool_func: Function to be called: co.MaxPool, co.AvgPool.\ninput_sizes: Input tensor dimensions.\nwindow: Tuple of kernel dims: planes, rows, cols.\nstrides: Tuple of strides for dims: planes, rows, cols.\npadding: Padding type.\nexpected: An array containing the expected operation outputs.", "source": "github-repos"}
{"code": "def get_conda_root():\n    try:\n        conda_root = _import_conda_root()\n    except ImportError:\n        envs_dir = dirname(CONDA_PREFIX)\n        if (basename(envs_dir) == 'envs'):\n            conda_root = dirname(envs_dir)\n        else:\n            conda_root = _conda_root_from_conda_info()\n    return conda_root", "docstring": "Get the PREFIX of the conda installation.\n\nReturns:\nstr: the ROOT_PREFIX of the conda installation", "source": "codesearchnet"}
{"code": "def setViewModel(self, model):\n    if isinstance(model, DataFrameModel):\n        self.enableEditing(False)\n        self.uncheckButton()\n        selectionModel = self.tableView.selectionModel()\n        self.tableView.setModel(model)\n        model.dtypeChanged.connect(self.updateDelegate)\n        model.dataChanged.connect(self.updateDelegates)\n        del selectionModel", "docstring": "Sets the model for the enclosed TableView in this widget.\n\nArgs:\nmodel (DataFrameModel): The model to be displayed by\nthe Table View.", "source": "codesearchnet"}
{"code": "def _on_connection_open(self, connection):\n        \n        _log.info(\"Successfully opened connection to %s\", connection.params.host)\n        self._channel = connection.channel(on_open_callback=self._on_channel_open)", "docstring": "Callback invoked when the connection is successfully established.\n\nArgs:\nconnection (pika.connection.SelectConnection): The newly-estabilished\nconnection.", "source": "juraj-google-style"}
{"code": "def release(self):\n    if (not self.acquired):\n        return False\n    os.close(self.fd)\n    if os.path.exists(self.path):\n        os.remove(self.path)\n    self.acquired = False\n    return True", "docstring": "Cleans up the lockfile if it was acquired.\n\nArgs:\nself (JLock): the ``JLock`` instance\n\nReturns:\n``False`` if the lock was not released or the lock is not acquired,\notherwise ``True``.", "source": "codesearchnet"}
{"code": "def prepare_context(pipeline, context_in_string, context):\n    \n    logger.debug(\"starting\")\n\n    parsed_context = get_parsed_context(\n        pipeline=pipeline,\n        context_in_string=context_in_string)\n\n    context.update(parsed_context)\n\n    logger.debug(\"done\")", "docstring": "Prepare context for pipeline run.\n\nArgs:\npipeline: dict. Dictionary representing the pipeline.\ncontext_in_string: string. Argument string used to initialize context.\ncontext: pypyr.context.Context. Merge any new context generated from\ncontext_in_string into this context instance.\n\nReturns:\nNone. The context instance to use for the pipeline run is contained\nin the context arg, it's not passed back as a function return.", "source": "juraj-google-style"}
{"code": "def trace(self, data, callback=None):\n        \n\n        conn_id = self._find_connection(self.conn_string)\n\n        if conn_id is not None:\n            self.adapter.notify_event_nowait(self.conn_string, 'trace', data)\n\n        if callback is not None:\n            callback(conn_id is not None)", "docstring": "Queue data for tracing\n\nArgs:\ndata (bytearray, string): Unstructured data to trace to any\nconnected client.\ncallback (callable): An optional callback that will be called with\na bool value of True when this data actually gets traced.\nIf the client disconnects and the data is dropped instead,\ncallback will be called with False.", "source": "juraj-google-style"}
{"code": "def multilayer_fully_connected(images, labels):\n    images = pt.wrap(images)\n    with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=1e-05):\n        return images.flatten().fully_connected(100).fully_connected(100).softmax_classifier(10, labels)", "docstring": "Creates a multi layer network of fully_connected layers.\n\nEach layer is 100 neurons.  Please change this to experiment with\narchitectures.\n\nArgs:\nimages: The input images.\nlabels: The labels as dense one-hot vectors.\nReturns:\nA softmax result.", "source": "codesearchnet"}
{"code": "def set_reconnect_parameters(self, interval, attempts, restore_state=True):\n    self._reconnect_attempts = max(0, attempts)\n    self._reconnect_interval = max(0, interval)\n    self._reconnect_restore_state = restore_state", "docstring": "Sets the behaviour of the automatic reconnect feature.\n\nWhen a connected SK8 is disconnected unexpectedly (in other words not by a\nuser-triggered action), an automatic attempt to reconnect to the device\ncan be made. If successful this will typically resume the connection with\nan interruption of only a few seconds.\n\nThis method allows the application to configure some aspects of the automatic\nreconnect functionality.\n\nArgs:\ninterval (float): time in seconds between successive attempts to reconnect.\nAlso applies to the delay between the initial disconnection and the first\nattempt to reconnect.\nattempts (int): the number of attempts to make to recreate the connection. This\ncan be set to zero in order to disable the reconnection feature.\nrestore_state (bool): if True, the streaming state of the device will also be\nrestored if possible. For example, the IMU configuration will be re-applied\nafter the reconnection attempt succeeds, to return the SK8 to the same\nstate it was in before the disconnection occurred.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def count_weights(scope=None, exclude=None, graph=None):\n    if scope:\n        scope = (scope if scope.endswith('/') else (scope + '/'))\n    graph = (graph or tf.get_default_graph())\n    vars_ = graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n    if scope:\n        vars_ = [var for var in vars_ if var.name.startswith(scope)]\n    if exclude:\n        exclude = re.compile(exclude)\n        vars_ = [var for var in vars_ if (not exclude.match(var.name))]\n    shapes = [var.get_shape().as_list() for var in vars_]\n    return int(sum((np.prod(shape) for shape in shapes)))", "docstring": "Count learnable parameters.\n\nArgs:\nscope: Restrict the count to a variable scope.\nexclude: Regex to match variable names to exclude.\ngraph: Operate on a graph other than the current default graph.\n\nReturns:\nNumber of learnable parameters as integer.", "source": "codesearchnet"}
{"code": "def _get_resource_view(self, resource_view):\n    if isinstance(resource_view, dict):\n        resource_view = ResourceView(resource_view, configuration=self.configuration)\n    if isinstance(resource_view, ResourceView):\n        return resource_view\n    raise HDXError(('Type %s is not a valid resource view!' % type(resource_view).__name__))", "docstring": "Get resource view id\n\nArgs:\nresource_view (Union[ResourceView,Dict]): ResourceView metadata from a ResourceView object or dictionary\n\nReturns:\nResourceView: ResourceView object", "source": "codesearchnet"}
{"code": "def from_latents(self, latents: torch.Tensor):\n    quantized_representation = 0\n    quantized_latents = []\n    codes = []\n    codebook_dims_tensor = torch.tensor([0] + [q.codebook_dim for q in self.quantizers])\n    dims = torch.cumsum(codebook_dims_tensor, dim=0)\n    n_codebooks = np.where(dims <= latents.shape[1])[0].max(axis=0, keepdims=True)[0]\n    for i in range(n_codebooks):\n        hidden_dim_j, hidden_dim_k = (dims[i], dims[i + 1])\n        quantized_latents_i, codes_i = self.quantizers[i].decode_latents(latents[:, hidden_dim_j:hidden_dim_k, :])\n        quantized_latents.append(quantized_latents_i)\n        codes.append(codes_i)\n        quantized_representation_i = self.quantizers[i].out_proj(quantized_latents_i)\n        quantized_representation = quantized_representation + quantized_representation_i\n    return (quantized_representation, torch.cat(quantized_latents, dim=1))", "docstring": "Reconstructs the quantized representation from unquantized latents.\n\nArgs:\nlatents (`torch.Tensor` of shape `(batch_size, total_latent_dimension, time_steps)`):\nContinuous representation of input after projection.\n\nReturns:\nquantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`):\nQuantized representation of the full-projected space.\nquantized_latents (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`):\nQuantized representation of the latent space (continuous representation before quantization).", "source": "github-repos"}
{"code": "def RemoveTask(self, task):\n    with self._lock:\n        if (task.identifier not in self._tasks_abandoned):\n            raise KeyError('Task {0:s} was not abandoned.'.format(task.identifier))\n        if (not task.has_retry):\n            raise KeyError('Will not remove a task {0:s} without retry task.'.format(task.identifier))\n        del self._tasks_abandoned[task.identifier]\n        logger.debug('Removed task {0:s}.'.format(task.identifier))", "docstring": "Removes an abandoned task.\n\nArgs:\ntask (Task): task.\n\nRaises:\nKeyError: if the task was not abandoned or the task was abandoned and\nwas not retried.", "source": "codesearchnet"}
{"code": "def _filters_pb(self):\n    num_filters = len(self._field_filters)\n    if (num_filters == 0):\n        return None\n    elif (num_filters == 1):\n        return _filter_pb(self._field_filters[0])\n    else:\n        composite_filter = query_pb2.StructuredQuery.CompositeFilter(op=enums.StructuredQuery.CompositeFilter.Operator.AND, filters=[_filter_pb(filter_) for filter_ in self._field_filters])\n        return query_pb2.StructuredQuery.Filter(composite_filter=composite_filter)", "docstring": "Convert all the filters into a single generic Filter protobuf.\n\nThis may be a lone field filter or unary filter, may be a composite\nfilter or may be :data:`None`.\n\nReturns:\ngoogle.cloud.firestore_v1beta1.types.\\\nStructuredQuery.Filter: A \"generic\" filter representing the\ncurrent query's filters.", "source": "codesearchnet"}
{"code": "def get_event_report(self, source=\"log\"):\n        \n        \n        ofile = {\n            \"output\": self.output_file,\n            \"log\": self.log_file}[source]\n\n        parser = events.EventsParser()\n\n        if not ofile.exists:\n            if not self.mpiabort_file.exists:\n                return None\n            else:\n                \n                abort_report = parser.parse(self.mpiabort_file.path)\n                return abort_report\n\n        try:\n            report = parser.parse(ofile.path)\n            \n\n            \n            if self.mpiabort_file.exists:\n                logger.critical(\"Found ABI_MPIABORTFILE!!!!!\")\n                abort_report = parser.parse(self.mpiabort_file.path)\n                if len(abort_report) != 1:\n                    logger.critical(\"Found more than one event in ABI_MPIABORTFILE\")\n\n                \n                \n                \n\n                \n                \n                last_abort_event = abort_report[-1]\n                if report and last_abort_event != report[-1]:\n                    report.append(last_abort_event)\n                else:\n                    report.append(last_abort_event)\n\n            return report\n\n        \n        except Exception as exc:\n            \n            msg = \"%s: Exception while parsing ABINIT events:\\n %s\" % (ofile, str(exc))\n            self.set_status(self.S_ABICRITICAL, msg=msg)\n            return parser.report_exception(ofile.path, exc)", "docstring": "Analyzes the main logfile of the calculation for possible Errors or Warnings.\nIf the ABINIT abort file is found, the error found in this file are added to\nthe output report.\n\nArgs:\nsource: \"output\" for the main output file,\"log\" for the log file.\n\nReturns:\n:class:`EventReport` instance or None if the source file file does not exist.", "source": "juraj-google-style"}
{"code": "def ParseFileEntry(self, parser_mediator, file_entry):\n    \n    index_file_parser = ChromeCacheIndexFileParser()\n\n    file_object = file_entry.GetFileObject()\n    try:\n      index_file_parser.ParseFileObject(parser_mediator, file_object)\n    except (IOError, errors.ParseError) as exception:\n      file_object.close()\n\n      display_name = parser_mediator.GetDisplayName()\n      raise errors.UnableToParseFile(\n          '[{0:s}] unable to parse index file {1:s} with error: {2!s}'.format(\n              self.NAME, display_name, exception))\n\n    \n\n    try:\n      file_system = file_entry.GetFileSystem()\n      self._ParseIndexTable(\n          parser_mediator, file_system, file_entry,\n          index_file_parser.index_table)\n    finally:\n      file_object.close()", "docstring": "Parses Chrome Cache files.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_entry (dfvfs.FileEntry): file entry.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def __init__(self, name=None):\n    rr = gen_io_ops.whole_file_reader_v2(name=name)\n    super(WholeFileReader, self).__init__(rr, supports_serialize=True)", "docstring": "Create a WholeFileReader.\n\nArgs:\nname: A name for the operation (optional).", "source": "github-repos"}
{"code": "def profile_settings_args_install_json(self, ij, required):\n        \n\n        profile_args = {}\n        \n        for p in ij.get('params') or []:\n            \n            if p.get('required', False) != required and required is not None:\n                continue\n            if p.get('type').lower() == 'boolean':\n                profile_args[p.get('name')] = self._to_bool(p.get('default', False))\n            elif p.get('type').lower() == 'choice':\n                valid_values = '|'.join(self.expand_valid_values(p.get('validValues', [])))\n                profile_args[p.get('name')] = '[{}]'.format(valid_values)\n            elif p.get('type').lower() == 'multichoice':\n                profile_args[p.get('name')] = p.get('validValues', [])\n            elif p.get('name') in ['api_access_id', 'api_secret_key']:\n                \n                pass\n            else:\n                types = '|'.join(p.get('playbookDataType', []))\n                if types:\n                    profile_args[p.get('name')] = p.get('default', '<{}>'.format(types))\n                else:\n                    profile_args[p.get('name')] = p.get('default', '')\n        return profile_args", "docstring": "Return args based on install.json params.\n\nArgs:\nij (dict): The install.json contents.\nrequired (bool): If True only required args will be returned.\n\nReturns:\ndict: Dictionary of required or optional App args.", "source": "juraj-google-style"}
{"code": "def list_directories_in_directory(full_directory_path):\n    \n    directories = list()\n    for directory_name in __os.listdir(full_directory_path):\n        if __os.path.isdir(__os.path.join(full_directory_path, directory_name)):\n            directories.append(directory_name)\n    return directories", "docstring": "List the directories in a specified directory\nArgs:\nfull_directory_path: The full directory path to check, derive from the os module\n\nReturns: returns a list of directories", "source": "juraj-google-style"}
{"code": "def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1):\n    \n    N_input = N_input or 1\n    N_output = N_output or 1\n    N_hidden = N_hidden or tuple()\n    if isinstance(N_hidden, (int, float, basestring)):\n        N_hidden = (int(N_hidden),)\n\n    hidden_layer_type = hidden_layer_type or tuple()\n    hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type))\n\n    if verbosity > 0:\n        print(N_hidden, ' layers of type ', hidden_layer_type)\n\n    assert(len(N_hidden) == len(hidden_layer_type))\n    nn = pb.structure.FeedForwardNetwork()\n\n    \n    nn.addInputModule(pb.structure.BiasUnit(name='bias'))\n    nn.addInputModule(pb.structure.LinearLayer(N_input, name='input'))\n    for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden, hidden_layer_type)):\n        Nhid = int(Nhid)\n        nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden')))\n    nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output'))\n\n    \n    nn.addConnection(pb.structure.FullConnection(nn['bias'],  nn['hidden'] if N_hidden else nn['output']))\n    nn.addConnection(pb.structure.FullConnection(nn['input'], nn['hidden'] if N_hidden else nn['output']))\n    for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden[:-1], hidden_layer_type[:-1])):\n        Nhid = int(Nhid)\n        nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')],\n                         nn['hidden-{}'.format(i + 1)]))\n    i = len(N_hidden) - 1\n    nn.addConnection(pb.structure.FullConnection(nn['hidden-{}'.format(i) if i else 'hidden'], nn['output']))\n\n    nn.sortModules()\n    if FAST:\n        try:\n            nn.convertToFastNetwork()\n        except:\n            if verbosity > 0:\n                print('Unable to convert slow PyBrain NN to a fast ARAC network...')\n    if verbosity > 0:\n        print(nn.connections)\n    return nn", "docstring": "Build a neural net with the indicated input, hidden, and outout dimensions\n\nArguments:\nparams (dict or PyBrainParams namedtuple):\ndefault: {'N_hidden': 6}\n(this is the only parameter that affects the NN build)\n\nReturns:\nFeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers", "source": "juraj-google-style"}
{"code": "def __init__(self, checkpoint_dir, save_secs=None, save_steps=None, saver=None, checkpoint_basename='model.ckpt', scaffold=None, listeners=None, save_graph_def=True):\n    logging.info('Create CheckpointSaverHook.')\n    if saver is not None and scaffold is not None:\n        raise ValueError('You cannot provide both saver and scaffold.')\n    self._saver = saver\n    self._checkpoint_dir = checkpoint_dir\n    self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)\n    self._scaffold = scaffold\n    self._timer = SecondOrStepTimer(every_secs=save_secs, every_steps=save_steps)\n    self._listeners = listeners or []\n    self._steps_per_run = 1000000\n    self._save_graph_def = save_graph_def", "docstring": "Initializes a `CheckpointSaverHook`.\n\nArgs:\ncheckpoint_dir: `str`, base directory for the checkpoint files.\nsave_secs: `int`, save every N secs.\nsave_steps: `int`, save every N steps.\nsaver: `Saver` object, used for saving.\ncheckpoint_basename: `str`, base name for the checkpoint files.\nscaffold: `Scaffold`, use to get saver object.\nlisteners: List of `CheckpointSaverListener` subclass instances. Used for\ncallbacks that run immediately before or after this hook saves the\ncheckpoint.\nsave_graph_def: Whether to save the GraphDef and MetaGraphDef to\n`checkpoint_dir`. The GraphDef is saved after the session is created as\n`graph.pbtxt`. MetaGraphDefs are saved out for every checkpoint as\n`model.ckpt-*.meta`.\n\nRaises:\nValueError: One of `save_steps` or `save_secs` should be set.\nValueError: At most one of `saver` or `scaffold` should be set.", "source": "github-repos"}
{"code": "def _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata):\n    feeds = dict(((t.deref()._as_tf_output(), v) for t, v in feed_dict.items()))\n    fetches = [t._as_tf_output() for t in fetch_list]\n    targets = [op._c_op for op in target_list]\n\n    def _run_fn(feed_dict, fetch_list, target_list, options, run_metadata):\n        self._extend_graph()\n        return self._call_tf_sessionrun(options, feed_dict, fetch_list, target_list, run_metadata)\n\n    def _prun_fn(handle, feed_dict, fetch_list):\n        if target_list:\n            raise RuntimeError(f'partial_run() requires empty `target_list`. Received: target_list={target_list} (non-empty)')\n        return self._call_tf_sessionprun(handle, feed_dict, fetch_list)\n    if handle is None:\n        return self._do_call(_run_fn, feeds, fetches, targets, options, run_metadata)\n    else:\n        return self._do_call(_prun_fn, handle, feeds, fetches)", "docstring": "Runs a step based on the given fetches and feeds.\n\nArgs:\nhandle: a handle for partial_run. None if this is just a call to run().\ntarget_list: A list of operations to be run, but not fetched.\nfetch_list: A list of tensors to be fetched.\nfeed_dict: A dictionary that maps tensors to numpy ndarrays.\noptions: A (pointer to a) [`RunOptions`] protocol buffer, or None\nrun_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None\n\nReturns:\nA list of numpy ndarrays, corresponding to the elements of\n`fetch_list`.  If the ith element of `fetch_list` contains the\nname of an operation, the first Tensor output of that operation\nwill be returned for that element.\n\nRaises:\ntf.errors.OpError: Or one of its subclasses on error.", "source": "github-repos"}
{"code": "def GetDataStreamByPathSpec(self, path_spec):\n    file_entry = self.GetFileEntryByPathSpec(path_spec)\n    if (not file_entry):\n        return None\n    data_stream_name = getattr(path_spec, 'data_stream', None)\n    return file_entry.GetDataStream(data_stream_name)", "docstring": "Retrieves a data stream for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\nDataStream: a data stream or None if not available.", "source": "codesearchnet"}
{"code": "def create_media_asset(access_token, name, options='0'):\n    path = '/Assets'\n    endpoint = ''.join([ams_rest_endpoint, path])\n    body = (((('{\"Name\": \"' + name) + '\", \"Options\": \"') + str(options)) + '\"}')\n    return do_ams_post(endpoint, path, body, access_token)", "docstring": "Create Media Service Asset.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nname (str): Media Service Asset Name.\noptions (str): Media Service Options.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"}
{"code": "def _process_v1_graph_mode_tensor(self, op_type, tensor, debug_tensor, tensor_debug_mode):\n    if op_type in ('Placeholder', 'PlaceholderWithDefault'):\n        self._placeholder_to_debug_tensor[tensor] = debug_tensor\n        return tensor\n    elif tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR and op_type != 'Const':\n        self._tensor_aliases[debug_tensor.name] = tensor.name\n        return debug_tensor\n    else:\n        with self._symbolic_tensor_counter_lock:\n            identity_name = 'tfdbg_identity_%d' % self._symbolic_tensor_counter\n        identity = array_ops.identity(tensor, name=identity_name)\n        identity.op._add_control_input(debug_tensor.op)\n        self._tensor_aliases[identity.name] = tensor.name\n        return identity", "docstring": "For V1 graph mode, determine what tensor to output from callback.\n\nArgs:\nop_type: Type of the op that outputs the original symbolic tensor.\ntensor: The original output symbolic tensor.\ndebug_tensor: The debugger-instrumented tensor.\ntensor_debug_mode: Debug mode used, a tfdbg TensorDebugMode enum.\n\nReturns:\nA symbolic tensor to be returned by the dumping op_callback.", "source": "github-repos"}
{"code": "def _get_api_call(self, function_name, *args):\n    \n    api_call = dedent() % {\n        'api_call': function_name,\n        'args': ', '.join(args)\n    }\n    script = '\\n'.join((api.API_SCRIPT, api_call))\n    try:\n      return self._browser.execute_async_script(script)\n    except TimeoutException:\n      \n      raise APIError", "docstring": "Runs an api call with javascript-formatted arguments.\n\nArgs:\nfunction_name: The name of the KindleAPI call to run.\n*args: Javascript-formatted arguments to pass to the API call.\n\nReturns:\nThe result of the API call.\n\nRaises:\nAPIError: If the API call fails or times out.", "source": "juraj-google-style"}
{"code": "def __lt__(self, other: 'TensorFluent') -> 'TensorFluent':\n        \n        return self._binary_op(self, other, tf.less, tf.float32)", "docstring": "Returns a TensorFluent for the less-then relational operator.\n\nArgs:\nself: The first operand.\nother: The second operand.", "source": "juraj-google-style"}
{"code": "def __toString(self, values):\n\t\t\n\t\tfor key in values:\n\t\t\tif not values[key] is str:\n\t\t\t\tvalues[key] = str(values[key])\n\t\treturn values", "docstring": "Will replace dict values with string values\n\nArgs:\nvalues (dict): Dictionary of values\n\nReturns:\nUpdated values dict", "source": "juraj-google-style"}
{"code": "def get_gdb_response(self, timeout_sec=DEFAULT_GDB_TIMEOUT_SEC, raise_error_on_timeout=True):\n    self.verify_valid_gdb_subprocess()\n    if (timeout_sec < 0):\n        self.logger.warning('timeout_sec was negative, replacing with 0')\n        timeout_sec = 0\n    if USING_WINDOWS:\n        retval = self._get_responses_windows(timeout_sec)\n    else:\n        retval = self._get_responses_unix(timeout_sec)\n    if ((not retval) and raise_error_on_timeout):\n        raise GdbTimeoutError(('Did not get response from gdb after %s seconds' % timeout_sec))\n    else:\n        return retval", "docstring": "Get response from GDB, and block while doing so. If GDB does not have any response ready to be read\nby timeout_sec, an exception is raised.\n\nArgs:\ntimeout_sec (float): Maximum time to wait for reponse. Must be >= 0. Will return after\nraise_error_on_timeout (bool): Whether an exception should be raised if no response was found\nafter timeout_sec\n\nReturns:\nList of parsed GDB responses, returned from gdbmiparser.parse_response, with the\nadditional key 'stream' which is either 'stdout' or 'stderr'\n\nRaises:\nGdbTimeoutError if response is not received within timeout_sec\nValueError if select returned unexpected file number\nNoGdbProcessError if there is no gdb subprocess running", "source": "codesearchnet"}
{"code": "def splitext(path):\n    \n    \n    parent_path, pathname = split(path)\n    if pathname.startswith(\".\") and pathname.count(\".\") == 1:\n        return path, \"\"\n    if \".\" not in pathname:\n        return path, \"\"\n    pathname, ext = pathname.rsplit(\".\", 1)\n    path = join(parent_path, pathname)\n    return path, \".\" + ext", "docstring": "Split the extension from the path.\n\nArguments:\npath (str): A path to split.\n\nReturns:\n(str, str): A tuple containing the path and the extension.\n\nExample:\n>>> splitext('baz.txt')\n('baz', '.txt')\n>>> splitext('foo/bar/baz.txt')\n('foo/bar/baz', '.txt')\n>>> splitext('foo/bar/.foo')\n('foo/bar/.foo', '')", "source": "juraj-google-style"}
{"code": "def _call_api(self, method, params=None):\n        \n        url = self.url.format(method=method)\n        if not params:\n            params = {'token': self.token}\n        else:\n            params['token'] = self.token\n        logger.debug('Send request to %s', url)\n        response = requests.get(url, params=params).json()\n        if self.verify:\n            if not response['ok']:\n                msg = 'For {url} API returned this bad response {response}'\n                raise Exception(msg.format(url=url, response=response))\n        return response", "docstring": "Low-level method to call the Slack API.\n\nArgs:\nmethod: {str} method name to call\nparams: {dict} GET parameters\nThe token will always be added", "source": "juraj-google-style"}
{"code": "def _on_queue_declareok(self, frame):\n    _log.info('Successfully declared the %s queue', frame.method.queue)\n    for binding in self._bindings:\n        if (binding['queue'] == frame.method.queue):\n            for key in binding['routing_keys']:\n                _log.info('Asserting %s is bound to %s with the %s key', binding['queue'], binding['exchange'], key)\n                self._channel.queue_bind(callback=None, queue=binding['queue'], exchange=binding['exchange'], routing_key=key)\n            bc_args = dict(queue=frame.method.queue)\n            if (_pika_version < pkg_resources.parse_version('1.0.0b1')):\n                bc_args['consumer_callback'] = self._on_message\n            else:\n                bc_args['on_message_callback'] = self._on_message\n            tag = self._channel.basic_consume(**bc_args)\n            self._consumers[tag] = binding['queue']", "docstring": "Callback invoked when a queue is successfully declared.\n\nArgs:\nframe (pika.frame.Method): The message sent from the server.", "source": "codesearchnet"}
{"code": "def PrepareMatches(self, file_system):\n    if (self._location is not None):\n        self._location_segments = self._SplitPath(self._location, file_system.PATH_SEPARATOR)\n    elif (self._location_regex is not None):\n        path_separator = file_system.PATH_SEPARATOR\n        if (path_separator == '\\\\'):\n            path_separator = '\\\\\\\\'\n        self._location_segments = self._SplitPath(self._location_regex, path_separator)\n    if (self._location_segments is not None):\n        self._number_of_location_segments = len(self._location_segments)", "docstring": "Prepare find specification for matching.\n\nArgs:\nfile_system (FileSystem): file system.", "source": "codesearchnet"}
{"code": "def point_line_distance(point, start, end):\n    if (start == end):\n        return distance(point, start)\n    else:\n        un_dist = abs((((end.lat - start.lat) * (start.lon - point.lon)) - ((start.lat - point.lat) * (end.lon - start.lon))))\n        n_dist = sqrt((((end.lat - start.lat) ** 2) + ((end.lon - start.lon) ** 2)))\n        if (n_dist == 0):\n            return 0\n        else:\n            return (un_dist / n_dist)", "docstring": "Distance from a point to a line, formed by two points\n\nArgs:\npoint (:obj:`Point`)\nstart (:obj:`Point`): line point\nend (:obj:`Point`): line point\nReturns:\nfloat: distance to line, in degrees", "source": "codesearchnet"}
{"code": "def dry_bulb_temperature(self, value=99.9):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `dry_bulb_temperature`'.format(value))\n            if value <= -70.0:\n                raise ValueError('value need to be greater -70.0 '\n                                 'for field `dry_bulb_temperature`')\n            if value >= 70.0:\n                raise ValueError('value need to be smaller 70.0 '\n                                 'for field `dry_bulb_temperature`')\n\n        self._dry_bulb_temperature = value", "docstring": "Corresponds to IDD Field `dry_bulb_temperature`\n\nArgs:\nvalue (float): value for IDD Field `dry_bulb_temperature`\nUnit: C\nvalue > -70.0\nvalue < 70.0\nMissing value: 99.9\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def value_shape(self):\n    for serialized_tensor in self.object_proto.attributes:\n        if serialized_tensor.name == constants.VARIABLE_VALUE_KEY:\n            return self._checkpoint.shape_map[serialized_tensor.checkpoint_key]\n    return None", "docstring": "The shape of the VARIABLE_VALUE tensor.\n\nReturns:\nIf found a TensorShape object, otherwise None.", "source": "github-repos"}
{"code": "def get_by(self, field, value):\n        \n        return self._client.get_by(field=field, value=value)", "docstring": "Gets all drive enclosures that match the filter.\n\nThe search is case-insensitive.\n\nArgs:\nField: field name to filter.\nValue: value to filter.\n\nReturns:\nlist: A list of drive enclosures.", "source": "juraj-google-style"}
{"code": "def get_permissions(cls):\n    perms = []\n    for (kls_name, kls) in cls.registry.items():\n        for method_name in cls.__dict__.keys():\n            if method_name.endswith('_view'):\n                perms.append(('%s.%s' % (kls_name, method_name)))\n    return perms", "docstring": "Generates permissions for all CrudView based class methods.\n\nReturns:\nList of Permission objects.", "source": "codesearchnet"}
{"code": "def remove(self, x):\n    \n    with tf.name_scope(\"pad_reduce/remove\"):\n      x_shape = x.get_shape().as_list()\n      x = tf.gather_nd(\n          x,\n          indices=self.nonpad_ids,\n      )\n      if not tf.executing_eagerly():\n        \n        \n        x.set_shape([None] + x_shape[1:])\n    return x", "docstring": "Remove padding from the given tensor.\n\nArgs:\nx (tf.Tensor): of shape [dim_origin,...]\n\nReturns:\na tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin", "source": "juraj-google-style"}
{"code": "def create(self, rs_params):\n        \n        repl_id = rs_params.get('id', None)\n        if repl_id is not None and repl_id in self:\n            raise ReplicaSetError(\n                \"replica set with id={id} already exists\".format(id=repl_id))\n        repl = ReplicaSet(rs_params)\n        self[repl.repl_id] = repl\n        return repl.repl_id", "docstring": "create new replica set\nArgs:\nrs_params - replica set configuration\nReturn repl_id which can use to take the replica set", "source": "juraj-google-style"}
{"code": "def FindFileContainingSymbol(self, symbol):\n    \n\n    symbol = _NormalizeFullyQualifiedName(symbol)\n    try:\n      return self._descriptors[symbol].file\n    except KeyError:\n      pass\n\n    try:\n      return self._enum_descriptors[symbol].file\n    except KeyError:\n      pass\n\n    try:\n      return self._FindFileContainingSymbolInDb(symbol)\n    except KeyError:\n      pass\n\n    try:\n      return self._file_desc_by_toplevel_extension[symbol]\n    except KeyError:\n      pass\n\n    \n    message_name, _, extension_name = symbol.rpartition('.')\n    try:\n      message = self.FindMessageTypeByName(message_name)\n      assert message.extensions_by_name[extension_name]\n      return message.file\n\n    except KeyError:\n      raise KeyError('Cannot find a file containing %s' % symbol)", "docstring": "Gets the FileDescriptor for the file containing the specified symbol.\n\nArgs:\nsymbol: The name of the symbol to search for.\n\nReturns:\nA FileDescriptor that contains the specified symbol.\n\nRaises:\nKeyError: if the file cannot be found in the pool.", "source": "juraj-google-style"}
{"code": "def frombase(path1, path2):\n    \n    \n    if not isparent(path1, path2):\n        raise ValueError(\"path1 must be a prefix of path2\")\n    return path2[len(path1) :]", "docstring": "Get the final path of ``path2`` that isn't in ``path1``.\n\nArguments:\npath1 (str): A PyFilesytem path.\npath2 (str): A PyFilesytem path.\n\nReturns:\nstr: the final part of ``path2``.\n\nExample:\n>>> frombase('foo/bar/', 'foo/bar/baz/egg')\n'baz/egg'", "source": "juraj-google-style"}
{"code": "def raw_decrypt(self, ciphertext):\n    if (not isinstance(ciphertext, int)):\n        raise TypeError(('Expected ciphertext to be an int, not: %s' % type(ciphertext)))\n    decrypt_to_p = ((self.l_function(powmod(ciphertext, (self.p - 1), self.psquare), self.p) * self.hp) % self.p)\n    decrypt_to_q = ((self.l_function(powmod(ciphertext, (self.q - 1), self.qsquare), self.q) * self.hq) % self.q)\n    return self.crt(decrypt_to_p, decrypt_to_q)", "docstring": "Decrypt raw ciphertext and return raw plaintext.\n\nArgs:\nciphertext (int): (usually from :meth:`EncryptedNumber.ciphertext()`)\nthat is to be Paillier decrypted.\n\nReturns:\nint: Paillier decryption of ciphertext. This is a positive\ninteger < :attr:`public_key.n`.\n\nRaises:\nTypeError: if ciphertext is not an int.", "source": "codesearchnet"}
{"code": "def all(self, data={}, **kwargs):\n    return super(Payment, self).all(data, **kwargs)", "docstring": "Fetch all Payment entities\n\nReturns:\nDictionary of Payment data", "source": "codesearchnet"}
{"code": "def CreateCustomizerFeed(client, feed_name):\n    ad_customizer_feed_service = client.GetService('AdCustomizerFeedService', 'v201809')\n    customizer_feed = {'feedName': feed_name, 'feedAttributes': [{'type': 'STRING', 'name': 'Name'}, {'type': 'STRING', 'name': 'Price'}, {'type': 'DATE_TIME', 'name': 'Date'}]}\n    feed_service_operation = {'operator': 'ADD', 'operand': customizer_feed}\n    response = ad_customizer_feed_service.mutate([feed_service_operation])\n    if (response and ('value' in response)):\n        feed = response['value'][0]\n        feed_data = {'feedId': feed['feedId'], 'nameId': feed['feedAttributes'][0]['id'], 'priceId': feed['feedAttributes'][1]['id'], 'dateId': feed['feedAttributes'][2]['id']}\n        (print('Feed with name \"%s\" and ID %s was added with:\\n\\tName attribute ID %s and price attribute ID %s and date attributeID %s') % (feed['feedName'], feed['feedId'], feed_data['nameId'], feed_data['priceId'], feed_data['dateId']))\n        return feed\n    else:\n        raise errors.GoogleAdsError('No feeds were added')", "docstring": "Creates a new AdCustomizerFeed.\n\nArgs:\nclient: an AdWordsClient instance.\nfeed_name: the name for the new AdCustomizerFeed.\n\nReturns:\nThe new AdCustomizerFeed.", "source": "codesearchnet"}
{"code": "def __init__(self, iterable=None, raise_on_duplicate=False):\n\t\t\n\t\tself._list = list()\n\t\tself._dict = dict()\n\t\tif iterable:\n\t\t\tif raise_on_duplicate:\n\t\t\t\tself._extend(iterable)\n\t\t\telse:\n\t\t\t\tself._update(iterable)", "docstring": "Create a setlist, initializing from iterable if present.\n\nArgs:\niterable (Iterable): Values to initialize the setlist with.\nraise_on_duplicate: Raise a ValueError if any duplicate values\nare present.", "source": "juraj-google-style"}
{"code": "def get(self, dash_id):\n        \n        data = json.loads(r_db.hmget(config.DASH_CONTENT_KEY, dash_id)[0])\n        return build_response(dict(data=data, code=200))", "docstring": "Read dashboard content.\n\nArgs:\ndash_id: dashboard id.\n\nReturns:\nA dict containing the content of that dashboard, not include the meta info.", "source": "juraj-google-style"}
{"code": "def _generate(cls, strategy, params):\n        \n        if cls._meta.abstract:\n            raise errors.FactoryError(\n                \"Cannot generate instances of abstract factory %(f)s; \"\n                \"Ensure %(f)s.Meta.model is set and %(f)s.Meta.abstract \"\n                \"is either not set or False.\" % dict(f=cls.__name__))\n\n        step = builder.StepBuilder(cls._meta, params, strategy)\n        return step.build()", "docstring": "generate the object.\n\nArgs:\nparams (dict): attributes to use for generating the object\nstrategy: the strategy to use", "source": "juraj-google-style"}
{"code": "def _RetryRequest(self, timeout=None, **request_args):\n    while True:\n        try:\n            now = time.time()\n            if (not timeout):\n                timeout = config.CONFIG['Client.http_timeout']\n            result = requests.request(**request_args)\n            result.raise_for_status()\n            if (not result.ok):\n                raise requests.RequestException(response=result)\n            return ((time.time() - now), result)\n        except IOError as e:\n            self.consecutive_connection_errors += 1\n            if (self.active_base_url is not None):\n                response = getattr(e, 'response', None)\n                if (getattr(response, 'status_code', None) == 406):\n                    raise\n                if (self.consecutive_connection_errors >= self.retry_error_limit):\n                    logging.info('Too many connection errors to %s, retrying another URL', self.active_base_url)\n                    self.active_base_url = None\n                    raise e\n                logging.debug('Unable to connect to frontend. Backing off %s seconds.', self.error_poll_min)\n                self.Wait(self.error_poll_min)\n            else:\n                raise e", "docstring": "Retry the request a few times before we determine it failed.\n\nSometimes the frontend becomes loaded and issues a 500 error to throttle the\nclients. We wait Client.error_poll_min seconds between each attempt to back\noff the frontend. Note that this does not affect any timing algorithm in the\nclient itself which is controlled by the Timer() class.\n\nArgs:\ntimeout: Timeout for retry.\n**request_args: Args to the requests.request call.\n\nReturns:\na tuple of duration, urllib.request.urlopen response.", "source": "codesearchnet"}
{"code": "def __init__(self, wmin, hmin,\n                 wmax=None, hmax=None,\n                 max_aspect_ratio=None):\n        \n        if max_aspect_ratio is None:\n            max_aspect_ratio = 9999999\n        self._init(locals())", "docstring": "Randomly crop a box of shape (h, w), sampled from [min, max] (both inclusive).\nIf max is None, will use the input image shape.\n\nArgs:\nwmin, hmin, wmax, hmax: range to sample shape.\nmax_aspect_ratio (float): the upper bound of ``max(w,h)/min(w,h)``.", "source": "juraj-google-style"}
{"code": "def get_node_ip_address(address=\"8.8.8.8:53\"):\n    \n    ip_address, port = address.split(\":\")\n    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n    try:\n        \n        \n        s.connect((ip_address, int(port)))\n        node_ip_address = s.getsockname()[0]\n    except Exception as e:\n        node_ip_address = \"127.0.0.1\"\n        \n        if e.errno == 101:\n            try:\n                \n                host_name = socket.getfqdn(socket.gethostname())\n                node_ip_address = socket.gethostbyname(host_name)\n            except Exception:\n                pass\n    finally:\n        s.close()\n\n    return node_ip_address", "docstring": "Determine the IP address of the local node.\n\nArgs:\naddress (str): The IP address and port of any known live service on the\nnetwork you care about.\n\nReturns:\nThe IP address of the current node.", "source": "juraj-google-style"}
{"code": "def build_graph(device, input_shape, axes, num_layers, mode, scale, train):\n    moment_shape = []\n    keep_dims = mode == 'py' or mode == 'slow'\n    if keep_dims:\n        for axis in range(len(input_shape)):\n            if axis in axes:\n                moment_shape.append(1)\n            else:\n                moment_shape.append(input_shape[axis])\n    else:\n        for axis in range(len(input_shape)):\n            if axis not in axes:\n                moment_shape.append(input_shape[axis])\n    with ops.device('/%s:0' % device):\n        tensor = variables.Variable(random_ops.truncated_normal(input_shape))\n        for _ in range(num_layers):\n            if train:\n                mean, variance = nn_impl.moments(tensor, axes, keep_dims=keep_dims)\n            else:\n                mean = array_ops.zeros(moment_shape)\n                variance = array_ops.ones(moment_shape)\n            beta = variables.Variable(array_ops.zeros(moment_shape))\n            gamma = variables.Variable(constant_op.constant(1.0, shape=moment_shape))\n            if mode == 'py':\n                tensor = batch_norm_py(tensor, mean, variance, beta, gamma, scale)\n            elif mode == 'op':\n                tensor = batch_norm_op(tensor, mean, variance, beta, gamma, scale)\n            elif mode == 'slow':\n                tensor = batch_norm_slow(tensor, mean, variance, beta, gamma, scale)\n        if train:\n            return gradients_impl.gradients([tensor], variables.trainable_variables())\n        else:\n            return [tensor]", "docstring": "Build a graph containing a sequence of batch normalizations.\n\nArgs:\ndevice: string, the device to run on.\ninput_shape: shape of the input tensor.\naxes: axes that are to be normalized across.\nnum_layers: number of batch normalization layers in the graph.\nmode: \"op\", \"py\" or \"slow\" depending on the implementation.\nscale: scale after normalization.\ntrain: if true, also run backprop.\n\nReturns:\nAn array of tensors to run()", "source": "github-repos"}
{"code": "def import_certificate(self, certificate_data, bay_number=None):\n        \n        uri = \"{}/https/certificaterequest\".format(self.data['uri'])\n\n        if bay_number:\n            uri += \"?bayNumber=%d\" % (bay_number)\n\n        headers = {'Content-Type': 'application/json'}\n        return self._helper.do_put(uri, certificate_data, -1, headers)", "docstring": "Imports a signed server certificate into the enclosure.\n\nArgs:\ncertificate_data: Dictionary with Signed certificate and type.\nbay_number: OA to which the signed certificate will be imported.\n\nReturns:\nEnclosure.", "source": "juraj-google-style"}
{"code": "def apply(\n        self,\n        func,\n        axis=0,\n        broadcast=None,\n        raw=False,\n        reduce=None,\n        result_type=None,\n        convert_dtype=True,\n        args=(),\n        **kwds\n    ):\n        \n        axis = self._get_axis_number(axis)\n        ErrorMessage.non_verified_udf()\n        if isinstance(func, string_types):\n            if axis == 1:\n                kwds[\"axis\"] = axis\n            result = self._string_function(func, *args, **kwds)\n            \n            if isinstance(result, BasePandasDataset):\n                return result._query_compiler\n            return result\n        elif isinstance(func, dict):\n            if axis == 1:\n                raise TypeError(\n                    \"(\\\"'dict' object is not callable\\\", \"\n                    \"'occurred at index {0}'\".format(self.index[0])\n                )\n            if len(self.columns) != len(set(self.columns)):\n                warnings.warn(\n                    \"duplicate column names not supported with apply().\",\n                    FutureWarning,\n                    stacklevel=2,\n                )\n        elif not callable(func) and not is_list_like(func):\n            raise TypeError(\"{} object is not callable\".format(type(func)))\n        query_compiler = self._query_compiler.apply(func, axis, *args, **kwds)\n        return query_compiler", "docstring": "Apply a function along input axis of DataFrame.\n\nArgs:\nfunc: The function to apply\naxis: The axis over which to apply the func.\nbroadcast: Whether or not to broadcast.\nraw: Whether or not to convert to a Series.\nreduce: Whether or not to try to apply reduction procedures.\n\nReturns:\nSeries or DataFrame, depending on func.", "source": "juraj-google-style"}
{"code": "def commutator(A, B=None):\n    \n    if B:\n        return A * B - B * A\n    return SPre(A) - SPost(A)", "docstring": "Commutator of `A` and `B`\n\nIf ``B != None``, return the commutator :math:`[A,B]`, otherwise return\nthe super-operator :math:`[A,\\cdot]`.  The super-operator :math:`[A,\\cdot]`\nmaps any other operator ``B`` to the commutator :math:`[A, B] = A B - B A`.\n\nArgs:\nA: The first operator to form the commutator of.\nB: The second operator to form the commutator of, or None.\n\nReturns:\nSuperOperator: The linear superoperator :math:`[A,\\cdot]`", "source": "juraj-google-style"}
{"code": "def create_in_hdx(self):\n    self.check_required_fields()\n    if (not self._update_resource_view(log=True)):\n        self._save_to_hdx('create', 'title')", "docstring": "Check if resource view exists in HDX and if so, update it, otherwise create resource view\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def setEditorData(self, spinBox, index):\n        \n        if index.isValid():\n            value = index.model().data(index, QtCore.Qt.EditRole)\n            spinBox.setValue(value)", "docstring": "Sets the data to be displayed and edited by the editor from the data model item specified by the model index.\n\nArgs:\nspinBox (BigIntSpinbox): editor widget.\nindex (QModelIndex): model data index.", "source": "juraj-google-style"}
{"code": "def decode_function_result(self, function_name, data):\n        \n        description = self.function_data[function_name]\n        arguments = decode_abi(description['decode_types'], data)\n        return arguments", "docstring": "Return the function call result decoded.\n\nArgs:\nfunction_name (str): One of the existing functions described in the\ncontract interface.\ndata (bin): The encoded result from calling `function_name`.\n\nReturn:\nList[object]: The values returned by the call to `function_name`.", "source": "juraj-google-style"}
{"code": "def incident(self, name, owner=None, **kwargs):\n    return Incident(self.tcex, name, owner=owner, **kwargs)", "docstring": "Create the Incident TI object.\n\nArgs:\nowner:\nname:\n**kwargs:\n\nReturn:", "source": "codesearchnet"}
{"code": "def _build(self, input_sequence, state):\n    input_shape = input_sequence.get_shape()\n    if (input_shape[0] is None):\n        raise ValueError('Time dimension of input (dim 0) must be staticallyknown.')\n    seq_length = int(input_shape[0])\n    (forward_state, backward_state) = state\n    output_sequence_f = []\n    output_sequence_b = []\n    with tf.name_scope('forward_rnn'):\n        core_state = forward_state\n        for i in six.moves.range(seq_length):\n            (core_output, core_state) = self._forward_core(input_sequence[(i, :)], core_state)\n            output_sequence_f.append((core_output, core_state))\n        output_sequence_f = nest.map_structure((lambda *vals: tf.stack(vals)), *output_sequence_f)\n    with tf.name_scope('backward_rnn'):\n        core_state = backward_state\n        for i in six.moves.range((seq_length - 1), (- 1), (- 1)):\n            (core_output, core_state) = self._backward_core(input_sequence[(i, :)], core_state)\n            output_sequence_b.append((core_output, core_state))\n        output_sequence_b = nest.map_structure((lambda *vals: tf.stack(vals)), *output_sequence_b)\n    return {'outputs': {'forward': output_sequence_f[0], 'backward': output_sequence_b[0]}, 'state': {'forward': output_sequence_f[1], 'backward': output_sequence_b[1]}}", "docstring": "Connects the BidirectionalRNN module into the graph.\n\nArgs:\ninput_sequence: tensor (time, batch, [feature_1, ..]). It must be\ntime_major.\nstate: tuple of states for the forward and backward cores.\n\nReturns:\nA dict with forward/backard states and output sequences:\n\n\"outputs\":{\n\"forward\": ...,\n\"backward\": ...},\n\"state\": {\n\"forward\": ...,\n\"backward\": ...}\n\nRaises:\nValueError: in case time dimension is not statically known.", "source": "codesearchnet"}
{"code": "def charspan(cls, start, end):\n    return cls(Lnk.CHARSPAN, (int(start), int(end)))", "docstring": "Create a Lnk object for a character span.\n\nArgs:\nstart: the initial character position (cfrom)\nend: the final character position (cto)", "source": "codesearchnet"}
{"code": "def get_signature_defs(tflite_model):\n    model = tflite_model\n    if not isinstance(tflite_model, bytearray):\n        model = bytearray(tflite_model)\n    serialized_signature_def_map = signature_def_util.GetSignatureDefMap(model)\n\n    def _deserialize(serialized):\n        signature_def = meta_graph_pb2.SignatureDef()\n        signature_def.ParseFromString(serialized)\n        return signature_def\n    return {k: _deserialize(v) for k, v in serialized_signature_def_map.items()}", "docstring": "Get SignatureDef dict from the Metadata of a TfLite flatbuffer buffer.\n\nArgs:\ntflite_model: TFLite model buffer to get the signature_def.\n\nReturns:\ndict containing serving names to SignatureDefs if exists, otherwise, empty\ndict.\n\nRaises:\nValueError:\ntflite_model buffer does not contain a valid TFLite model.\nDecodeError:\nSignatureDef cannot be parsed from TfLite SignatureDef metadata.", "source": "github-repos"}
{"code": "def send_invitation(self, invitation, **kwargs):\n    return self.email_message(invitation.invitee_identifier, self.invitation_subject, self.invitation_body, invitation.invited_by, **kwargs).send()", "docstring": "Sends an invitation message for a specific invitation.\n\nThis could be overridden to do other things, such as sending a confirmation\nemail to the sender.\n\nArgs:\ninvitation:\n\nReturns:", "source": "codesearchnet"}
{"code": "def imflip(img, direction='horizontal'):\n    assert (direction in ['horizontal', 'vertical'])\n    if (direction == 'horizontal'):\n        return np.flip(img, axis=1)\n    else:\n        return np.flip(img, axis=0)", "docstring": "Flip an image horizontally or vertically.\n\nArgs:\nimg (ndarray): Image to be flipped.\ndirection (str): The flip direction, either \"horizontal\" or \"vertical\".\n\nReturns:\nndarray: The flipped image.", "source": "codesearchnet"}
{"code": "def stop_app(self, package_name, clear=False):\n        \n        if clear:\n            self.adb_shell(['pm', 'clear', package_name])\n        else:\n            self.adb_shell(['am', 'force-stop', package_name])\n        return self", "docstring": "Stop application\n\nArgs:\npackage_name: string like com.example.app1\nclear: bool, remove user data\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def download(self, file: Optional[IO[bytes]]=None,\n                 duration_timeout: Optional[float]=None):\n        \n        yield from \\\n            self._current_session.download(file, duration_timeout=duration_timeout)", "docstring": "Download content.\n\nArgs:\nfile: An optional file object for the document contents.\nduration_timeout: Maximum time in seconds of which the\nentire file must be read.\n\nReturns:\nResponse: An instance of :class:`.http.request.Response`.\n\nSee :meth:`WebClient.session` for proper usage of this function.\n\nCoroutine.", "source": "juraj-google-style"}
{"code": "def to_representation(self, instance):\n    if self.id_only():\n        return instance.pk\n    pk = getattr(instance, 'pk', None)\n    if ((not settings.ENABLE_SERIALIZER_OBJECT_CACHE) or (pk is None)):\n        return self._to_representation(instance)\n    else:\n        if (pk not in self.obj_cache):\n            self.obj_cache[pk] = self._to_representation(instance)\n        return self.obj_cache[pk]", "docstring": "Modified to_representation method. Optionally may cache objects.\n\nArguments:\ninstance: A model instance or data object.\nReturns:\nInstance ID if the serializer is meant to represent its ID.\nOtherwise, a tagged data dict representation.", "source": "codesearchnet"}
{"code": "def __init__(self, param_specs, non_tensor_params, prefer_static_fields):\n    self._param_specs = param_specs\n    self._non_tensor_params = non_tensor_params\n    self._prefer_static_fields = prefer_static_fields", "docstring": "Initializes a new `_LinearOperatorSpec`.\n\nArgs:\nparam_specs: Python `dict` of `tf.TypeSpec` instances that describe\nkwargs to the `LinearOperator`'s constructor that are `Tensor`-like or\n`CompositeTensor` subclasses.\nnon_tensor_params: Python `dict` containing non-`Tensor` and non-\n`CompositeTensor` kwargs to the `LinearOperator`'s constructor.\nprefer_static_fields: Python `tuple` of strings corresponding to the names\nof `Tensor`-like args to the `LinearOperator`s constructor that may be\nstored as static values, if known. These are typically shapes, indices,\nor axis values.", "source": "github-repos"}
{"code": "def greater(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return Greater().symbolic_call(x1, x2)\n    return backend.numpy.greater(x1, x2)", "docstring": "Return the truth value of `x1 > x2` element-wise.\n\nArgs:\nx1: First input tensor.\nx2: Second input tensor.\n\nReturns:\nOutput tensor, element-wise comparison of `x1` and `x2`.", "source": "github-repos"}
{"code": "def _execute(self, command, params=None):\n        \n        if not params:\n            params = {}\n        params['id'] = self._id\n        return self._parent.execute(command, params)", "docstring": "Executes a command against the underlying HTML element.\n\nArgs:\ncommand: The name of the command to _execute as a string.\nparams: A dictionary of named parameters to send with the command.\n\nReturns:\nThe command's JSON response loaded into a dictionary object.", "source": "juraj-google-style"}
{"code": "def getmtime(self, path):\n        \n        try:\n            file_obj = self.filesystem.resolve(path)\n            return file_obj.st_mtime\n        except IOError:\n            self.filesystem.raise_os_error(errno.ENOENT, winerror=3)", "docstring": "Returns the modification time of the fake file.\n\nArgs:\npath: the path to fake file.\n\nReturns:\n(int, float) the modification time of the fake file\nin number of seconds since the epoch.\n\nRaises:\nOSError: if the file does not exist.", "source": "juraj-google-style"}
{"code": "def get_results(self) -> Iterable[PluginScanResult]:\n    for _ in range(self._get_current_processes_nb()):\n        self._task_queue.put(None)\n    for (hostname, hostname_queue) in self._hostname_queues_dict.items():\n        for i in range(len(self._processes_dict[hostname])):\n            hostname_queue.put(None)\n    received_task_results = 0\n    expected_task_results = (self._queued_tasks_nb + self._get_current_processes_nb())\n    while (received_task_results != expected_task_results):\n        result = self._result_queue.get()\n        self._result_queue.task_done()\n        received_task_results += 1\n        if (result is None):\n            pass\n        else:\n            (yield result)\n    self._task_queue.join()\n    self._result_queue.join()\n    for hostname_queue in self._hostname_queues_dict.values():\n        hostname_queue.join()\n    for process_list in self._processes_dict.values():\n        for process in process_list:\n            process.join()", "docstring": "Return the result of previously queued scan commands; new commands cannot be queued once this is called.\n\nReturns:\nThe results of all the scan commands previously queued. Each result will be an instance of the scan\ncorresponding command's PluginScanResult subclass. If there was an unexpected error while running the scan\ncommand, it will be a 'PluginRaisedExceptionScanResult' instance instead.", "source": "codesearchnet"}
{"code": "def after_request(response):\n    response.headers.add('Access-Control-Allow-Origin', '*')\n    response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')\n    response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')\n    return response", "docstring": "Modifies the response object prior to sending it to the client. Used to add CORS headers to the request\n\nArgs:\nresponse (response): Flask response object\n\nReturns:\n`None`", "source": "codesearchnet"}
{"code": "def build_dataset(instruction_dicts, dataset_from_file_fn, shuffle_files=False, parallel_reads=64):\n    if _no_examples_skipped(instruction_dicts):\n        instruction_ds = tf.data.Dataset.from_tensor_slices([d['filepath'] for d in instruction_dicts])\n        build_ds_from_instruction = dataset_from_file_fn\n    else:\n        instruction_ds = _build_instruction_ds(instruction_dicts)\n        build_ds_from_instruction = functools.partial(_build_ds_from_instruction, ds_from_file_fn=dataset_from_file_fn)\n    if shuffle_files:\n        instruction_ds = instruction_ds.shuffle(len(instruction_dicts))\n    ds = instruction_ds.interleave(build_ds_from_instruction, cycle_length=parallel_reads, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n    return ds", "docstring": "Constructs a `tf.data.Dataset` from TFRecord files.\n\nArgs:\ninstruction_dicts: `list` of {'filepath':, 'mask':, 'offset_mask':}\ncontaining the information about which files and which examples to use.\nThe boolean mask will be repeated and zipped with the examples from\nfilepath.\ndataset_from_file_fn: function returning a `tf.data.Dataset` given a\nfilename.\nshuffle_files: `bool`, Whether to shuffle the input filenames.\nparallel_reads: `int`, how many files to read in parallel.\n\nReturns:\n`tf.data.Dataset`", "source": "codesearchnet"}
{"code": "def _set_value(instance_to_path_map, path_to_instance_map, prop_tree, config_instance):\n    \n    path = instance_to_path_map[config_instance]\n\n    \n    group = prop_tree\n    for elem in path[:-1]:\n        group = getattr(group, elem)\n\n    assert group._key == config_instance.parent.key\n    setattr(group, config_instance.key, config_instance.value)\n\n    \n    \n    \n    \n    term = getattr(group, config_instance.key)\n    try:\n        if hasattr(term, '_term'):\n            \n            term._term._config = config_instance\n            return\n    except KeyError:\n        \n        pass\n\n    try:\n        if hasattr(term, '_config'):\n            term._config = config_instance\n            return\n    except KeyError:\n        \n        pass\n    else:\n        pass", "docstring": "Finds appropriate term in the prop_tree and sets its value from config_instance.\n\nArgs:\nconfigs_map (dict): key is id of the config, value is Config instance (AKA cache of the configs)\nprop_tree (PropertyDictTree): poperty tree to populate.\nconfig_instance (Config):", "source": "juraj-google-style"}
{"code": "def _add_session(self, session, start_info, groups_by_name):\n    group_name = (start_info.group_name or session.name)\n    if (group_name in groups_by_name):\n        groups_by_name[group_name].sessions.extend([session])\n    else:\n        group = api_pb2.SessionGroup(name=group_name, sessions=[session], monitor_url=start_info.monitor_url)\n        for (key, value) in six.iteritems(start_info.hparams):\n            group.hparams[key].CopyFrom(value)\n        groups_by_name[group_name] = group", "docstring": "Adds a new Session protobuffer to the 'groups_by_name' dictionary.\n\nCalled by _build_session_groups when we encounter a new session. Creates\nthe Session protobuffer and adds it to the relevant group in the\n'groups_by_name' dict. Creates the session group if this is the first time\nwe encounter it.\n\nArgs:\nsession: api_pb2.Session. The session to add.\nstart_info: The SessionStartInfo protobuffer associated with the session.\ngroups_by_name: A str to SessionGroup protobuffer dict. Representing the\nsession groups and sessions found so far.", "source": "codesearchnet"}
{"code": "def call(self, sequence_output: tf.Tensor) -> tf.Tensor:\n    logits = (tf.einsum('bsj,j->bs', sequence_output, self.output_weights) + self.output_bias) / self.temperature\n    return logits", "docstring": "Computes logits per token\n\nArgs:\nsequence_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):\nAlso known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the\nmodel.\n\nReturns:\nlogits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Logits per token.", "source": "github-repos"}
{"code": "def retrieve_bazel_version():\n    bazel_executable = shutil.which('bazel')\n    if bazel_executable is None:\n        bazel_executable = shutil.which('bazelisk')\n        if bazel_executable is None:\n            print('Cannot find bazel. Please install bazel/bazelisk.')\n            sys.exit(1)\n    stderr = open(os.devnull, 'wb')\n    curr_version = run_shell([bazel_executable, '--version'], allow_non_zero=True, stderr=stderr)\n    if curr_version.startswith('bazel '):\n        curr_version = curr_version.split('bazel ')[1]\n    curr_version_int = convert_version_to_int(curr_version)\n    if not curr_version_int:\n        print('WARNING: current bazel installation is not a release version.')\n        return curr_version\n    print('You have bazel %s installed.' % curr_version)\n    return curr_version", "docstring": "Retrieve installed bazel version (or bazelisk).\n\nReturns:\nThe bazel version detected.", "source": "github-repos"}
{"code": "def to_graphviz(self) -> str:\n    graph = 'digraph finite_state_machine { rankdir=LR; node [fixedsize=true];'\n    for (origin, dest) in self._transitions.items():\n        origin = origin.replace(' ', '_')\n        for d in dest:\n            d = d.replace(' ', '_')\n            graph += '{0} -> {1};'.format(origin, d)\n    graph += '}'\n    return graph", "docstring": "Converts the FSM behaviour structure to Graphviz syntax\n\nReturns:\nstr: the graph in Graphviz syntax", "source": "codesearchnet"}
{"code": "def add_node(self, binary_descriptor):\n        \n\n        try:\n            node_string = parse_binary_descriptor(binary_descriptor)\n        except:\n            self._logger.exception(\"Error parsing binary node descriptor: %s\", binary_descriptor)\n            return _pack_sgerror(SensorGraphError.INVALID_NODE_STREAM)  \n\n        try:\n            self.graph.add_node(node_string)\n        except NodeConnectionError:\n            return _pack_sgerror(SensorGraphError.STREAM_NOT_IN_USE)\n        except ProcessingFunctionError:\n            return _pack_sgerror(SensorGraphError.INVALID_PROCESSING_FUNCTION)\n        except ResourceUsageError:\n            return _pack_sgerror(SensorGraphError.NO_NODE_SPACE_AVAILABLE)\n\n        return Error.NO_ERROR", "docstring": "Add a node to the sensor_graph using a binary node descriptor.\n\nArgs:\nbinary_descriptor (bytes): An encoded binary node descriptor.\n\nReturns:\nint: A packed error code.", "source": "juraj-google-style"}
{"code": "def decode_dict(value_fields, client):\n    return {key: decode_value(value, client) for (key, value) in six.iteritems(value_fields)}", "docstring": "Converts a protobuf map of Firestore ``Value``-s.\n\nArgs:\nvalue_fields (google.protobuf.pyext._message.MessageMapContainer): A\nprotobuf map of Firestore ``Value``-s.\nclient (~.firestore_v1beta1.client.Client): A client that has\na document factory.\n\nReturns:\nDict[str, Union[NoneType, bool, int, float, datetime.datetime, \\\nstr, bytes, dict, ~google.cloud.Firestore.GeoPoint]]: A dictionary\nof native Python values converted from the ``value_fields``.", "source": "codesearchnet"}
{"code": "def format_sympy_expr(sympy_expr, functions=None):\n    if (functions is None):\n        functions = {}\n    str_expr = str(sympy_expr)\n    result = str_expr.replace(' ', '')\n    for (fn_name, char) in six.iteritems(functions):\n        result = result.replace(fn_name, char)\n    return result", "docstring": "Convert sympy expression into a string which can be encoded.\n\nArgs:\nsympy_expr: Any sympy expression tree or string.\nfunctions: Defines special functions. A dict mapping human readable string\nnames, like \"log\", \"exp\", \"sin\", \"cos\", etc., to single chars. Each\nfunction gets a unique token, like \"L\" for \"log\".\n\nReturns:\nA string representation of the expression suitable for encoding as a\nsequence input.", "source": "codesearchnet"}
{"code": "def get_params(width, height, distortion_scale):\n    half_height = int((height / 2))\n    half_width = int((width / 2))\n    topleft = (random.randint(0, int((distortion_scale * half_width))), random.randint(0, int((distortion_scale * half_height))))\n    topright = (random.randint(((width - int((distortion_scale * half_width))) - 1), (width - 1)), random.randint(0, int((distortion_scale * half_height))))\n    botright = (random.randint(((width - int((distortion_scale * half_width))) - 1), (width - 1)), random.randint(((height - int((distortion_scale * half_height))) - 1), (height - 1)))\n    botleft = (random.randint(0, int((distortion_scale * half_width))), random.randint(((height - int((distortion_scale * half_height))) - 1), (height - 1)))\n    startpoints = [(0, 0), ((width - 1), 0), ((width - 1), (height - 1)), (0, (height - 1))]\n    endpoints = [topleft, topright, botright, botleft]\n    return (startpoints, endpoints)", "docstring": "Get parameters for ``perspective`` for a random perspective transform.\n\nArgs:\nwidth : width of the image.\nheight : height of the image.\n\nReturns:\nList containing [top-left, top-right, bottom-right, bottom-left] of the orignal image,\nList containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.", "source": "codesearchnet"}
{"code": "def cdf(self, value, name='cdf'):\n    return self._call_cdf(value, name)", "docstring": "Cumulative distribution function.\n\nGiven random variable `X`, the cumulative distribution function `cdf` is:\n\n```none\ncdf(x) := P[X <= x]\n```\n\nArgs:\nvalue: `float` or `double` `Tensor`.\nname: Python `str` prepended to names of ops created by this function.\n\nReturns:\ncdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with\nvalues of type `self.dtype`.", "source": "github-repos"}
{"code": "def take_at_most_n_seconds(time_s, func, *args, **kwargs):\n  \n  thread = threading.Thread(target=func, args=args, kwargs=kwargs)\n  thread.start()\n  thread.join(time_s)\n  if thread.is_alive():\n    return False\n  return True", "docstring": "A function that returns whether a function call took less than time_s.\n\nNOTE: The function call is not killed and will run indefinitely if hung.\n\nArgs:\ntime_s: Maximum amount of time to take.\nfunc: Function to call.\n*args: Arguments to call the function with.\n**kwargs: Keyword arguments to call the function with.\nReturns:\nTrue if the function finished in less than time_s seconds.", "source": "juraj-google-style"}
{"code": "def simple_lmdb_settings(path, map_size=1e9, user_supplied_id=False):\n    \n\n    def decorator(cls):\n        provider = \\\n            ff.UserSpecifiedIdProvider(key='_id') \\\n            if user_supplied_id else ff.UuidProvider()\n\n        class Settings(ff.PersistenceSettings):\n            id_provider = provider\n            key_builder = ff.StringDelimitedKeyBuilder('|')\n            database = ff.LmdbDatabase(\n                    path, key_builder=key_builder, map_size=map_size)\n\n        class Model(cls, Settings):\n            pass\n\n        Model.__name__ = cls.__name__\n        Model.__module__ = cls.__module__\n        return Model\n\n    return decorator", "docstring": "Creates a decorator that can be used to configure sane default LMDB\npersistence settings for a model\n\nArgs:\npath (str): The path where the LMDB database files will be created\nmap_size (int): The amount of space to allot for the database", "source": "juraj-google-style"}
{"code": "def ProcessNewBlock(self, block):\n        \n        added = set()\n        changed = set()\n        deleted = set()\n\n        try:\n            \n            \n            for tx in block.FullTransactions:\n\n                for index, output in enumerate(tx.outputs):\n\n                    \n                    state = self.CheckAddressState(output.ScriptHash)\n\n                    if state & AddressState.InWallet > 0:\n\n                        \n                        key = CoinReference(tx.Hash, index)\n\n                        \n                        if key in self._coins.keys():\n                            coin = self._coins[key]\n                            coin.State |= CoinState.Confirmed\n                            changed.add(coin)\n                        else:\n                            newcoin = Coin.CoinFromRef(coin_ref=key, tx_output=output, state=CoinState.Confirmed, transaction=tx)\n                            self._coins[key] = newcoin\n                            added.add(newcoin)\n\n                        if state & AddressState.WatchOnly > 0:\n                            self._coins[key].State |= CoinState.WatchOnly\n                            changed.add(self._coins[key])\n\n            \n            for tx in block.FullTransactions:\n\n                for input in tx.inputs:\n\n                    if input in self._coins.keys():\n                        if self._coins[input].Output.AssetId == Blockchain.SystemShare().Hash:\n                            coin = self._coins[input]\n                            coin.State |= CoinState.Spent | CoinState.Confirmed\n                            changed.add(coin)\n\n                        else:\n                            deleted.add(self._coins[input])\n                            del self._coins[input]\n\n            for claimTx in [tx for tx in block.Transactions if tx.Type == TransactionType.ClaimTransaction]:\n\n                for ref in claimTx.Claims:\n                    if ref in self._coins.keys():\n                        deleted.add(self._coins[ref])\n                        del self._coins[ref]\n\n            \n            self._current_height += 1\n\n            \n            \n            \n            self.OnProcessNewBlock(block, added, changed, deleted)\n\n            \n            \n            \n            if len(added) + len(deleted) + len(changed) > 0:\n                self.BalanceChanged()\n\n        except Exception as e:\n            traceback.print_stack()\n            traceback.print_exc()\n            logger.error(\"could not process %s \" % e)", "docstring": "Processes a block on the blockchain.  This should be done in a sequential order, ie block 4 should be\nonly processed after block 3.\n\nArgs:\nblock: (neo.Core.Block) a block on the blockchain.", "source": "juraj-google-style"}
{"code": "def _copy_trackable_to_cpu(self, object_map):\n    del object_map\n    raise NotImplementedError('Need to implement _copy_trackable_to_cpu() if the Trackable requires AsyncCheckpoint support.')", "docstring": "Creates a copy of this object onto CPU, also copies values over.\n\nNeeds to be overridden if the `Trackable` requires AsyncCheckpoint support.\nThe method first checks whether a copy of `self` is already created in\n`object_map`, and creates one if not already created. Then the method copies\nthe **values** of itself over to its copy mapped by `object_map`.\n\nArgs:\nobject_map: A dictionary that maps original Trackables to the copied\nTrackables, which reside in the CPU.", "source": "github-repos"}
{"code": "def GetFilter(cls, filter_name):\n    try:\n        filt_cls = cls.GetPlugin(filter_name)\n    except KeyError:\n        raise DefinitionError(('Filter %s does not exist.' % filter_name))\n    return filt_cls()", "docstring": "Return an initialized filter. Only initialize filters once.\n\nArgs:\nfilter_name: The name of the filter, as a string.\n\nReturns:\nan initialized instance of the filter.\n\nRaises:\nDefinitionError if the type of filter has not been defined.", "source": "codesearchnet"}
{"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n    assert already_has_special_tokens and token_ids_1 is None, 'You cannot use ``already_has_special_tokens=False`` with this tokenizer. Please use a slow (full python) tokenizer to activate this argument. Or set `return_special_tokens_mask=True` when calling the encoding method to get the special tokens mask in any tokenizer. '\n    all_special_ids = self.all_special_ids\n    special_tokens_mask = [1 if token in all_special_ids else 0 for token in token_ids_0]\n    return special_tokens_mask", "docstring": "Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of ids of the first sequence.\ntoken_ids_1 (`List[int]`, *optional*):\nList of ids of the second sequence.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\nA list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"}
{"code": "def add_trunk_group(self, intf, value):\n        \n        string = 'switchport trunk group {}'.format(value)\n        return self.configure_interface(intf, string)", "docstring": "Adds the specified trunk group to the interface\n\nArgs:\nintf (str): The interface name to apply the trunk group to\nvalue (str): The trunk group value to apply to the interface\n\nReturns:\nTrue if the operation as successfully applied otherwise false", "source": "juraj-google-style"}
{"code": "def get_decomposition_energy(self, entry, pH, V):\n        \n        \n        if self._multielement and not isinstance(entry, MultiEntry):\n            possible_entries = self._generate_multielement_entries(\n                self._filtered_entries, forced_include=[entry])\n\n            \n            if entry.phase_type == \"solid\":\n                possible_entries = [e for e in possible_entries\n                                    if e.phase_type.count(\"Solid\") == 1]\n            possible_energies = [e.normalized_energy_at_conditions(pH, V)\n                                 for e in possible_entries]\n        else:\n            possible_energies = [entry.normalized_energy_at_conditions(pH, V)]\n\n        min_energy = np.min(possible_energies, axis=0)\n\n        \n        hull = self.get_hull_energy(pH, V)\n        return min_energy - hull", "docstring": "Finds decomposition to most stable entry\n\nArgs:\nentry (PourbaixEntry): PourbaixEntry corresponding to\ncompound to find the decomposition for\npH (float): pH at which to find the decomposition\nV (float): voltage at which to find the decomposition\n\nReturns:\nreaction corresponding to the decomposition", "source": "juraj-google-style"}
{"code": "def extend(self, base: 'KeySpec') -> 'KeySpec':", "docstring": "Extend base key specification and returns self.\n\nNOTE(daiyip): When a ``Field`` extends a base Field (from a base schema),\nit calls ``extend`` on both its ``KeySpec`` and ``ValueSpec``.\n``KeySpec.extend`` is to determine whether the ``Field`` key is allowed to\nbe extended, and ``ValueSpec.extend`` is to determine the final\n``ValueSpec`` after extension.\n\nArgs:\nbase: A base ``KeySpec`` object.\n\nReturns:\nAn ``KeySpec`` object derived from this key spec by extending the base.", "source": "github-repos"}
{"code": "def get_summary_description(node_def):\n    if node_def.op != 'TensorSummary':\n        raise ValueError(\"Can't get_summary_description on %s\" % node_def.op)\n    description_str = _compat.as_str_any(node_def.attr['description'].s)\n    summary_description = SummaryDescription()\n    _json_format.Parse(description_str, summary_description)\n    return summary_description", "docstring": "Given a TensorSummary node_def, retrieve its SummaryDescription.\n\nWhen a Summary op is instantiated, a SummaryDescription of associated\nmetadata is stored in its NodeDef. This method retrieves the description.\n\nArgs:\nnode_def: the node_def_pb2.NodeDef of a TensorSummary op\n\nReturns:\na summary_pb2.SummaryDescription\n\nRaises:\nValueError: if the node is not a summary op.\n\n@compatibility(eager)\nNot compatible with eager execution. To write TensorBoard\nsummaries under eager execution, use `tf.contrib.summary` instead.\n@end_compatibility", "source": "github-repos"}
{"code": "def gaussian(duration: int, amp: complex, sigma: float, name: str=None) -> SamplePulse:\n    center = (duration / 2)\n    zeroed_width = (duration + 2)\n    return _sampled_gaussian_pulse(duration, amp, center, sigma, zeroed_width=zeroed_width, rescale_amp=True, name=name)", "docstring": "r\"\"\"Generates unnormalized gaussian `SamplePulse`.\n\nCentered at `duration/2` and zeroed at `t=-1` to prevent large initial discontinuity.\n\nApplies `left` sampling strategy to generate discrete pulse from continuous function.\n\nIntegrated area under curve is $\\Omega_g(amp, sigma) = amp \\times np.sqrt(2\\pi \\sigma^2)$\n\nArgs:\nduration: Duration of pulse. Must be greater than zero.\namp: Pulse amplitude at `duration/2`.\nsigma: Width (standard deviation) of pulse.\nname: Name of pulse.", "source": "codesearchnet"}
{"code": "def _get_model_info(func, parent_class):\n    from transformers.models import auto as auto_module\n    if parent_class is not None:\n        model_name_lowercase = get_model_name(parent_class)\n    else:\n        model_name_lowercase = get_model_name(func)\n    if model_name_lowercase and model_name_lowercase not in getattr(getattr(auto_module, PLACEHOLDER_TO_AUTO_MODULE['config_class'][0]), PLACEHOLDER_TO_AUTO_MODULE['config_class'][1]):\n        model_name_lowercase = model_name_lowercase.replace('_', '-')\n    class_name = func.__qualname__.split('.')[0]\n    if model_name_lowercase is None:\n        config_class = None\n    else:\n        try:\n            config_class = getattr(getattr(auto_module, PLACEHOLDER_TO_AUTO_MODULE['config_class'][0]), PLACEHOLDER_TO_AUTO_MODULE['config_class'][1])[model_name_lowercase]\n        except KeyError:\n            if model_name_lowercase in HARDCODED_CONFIG_FOR_MODELS:\n                config_class = HARDCODED_CONFIG_FOR_MODELS[model_name_lowercase]\n            else:\n                config_class = 'ModelConfig'\n                print(f'🚨 Config not found for {model_name_lowercase}. You can manually add it to HARDCODED_CONFIG_FOR_MODELS in utils/args_doc.py')\n    return (model_name_lowercase, class_name, config_class)", "docstring": "Extract model information from a function or its parent class.\n\nArgs:\nfunc (`function`): The function to extract information from\nparent_class (`class`): Optional parent class of the function", "source": "github-repos"}
{"code": "def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:\n    for layer, heads in heads_to_prune.items():\n        self.encoder.layer[layer].attention.prune_heads(heads)", "docstring": "Prunes heads of the model.\n\nArgs:\nheads_to_prune (`dict`):\nSee base class `PreTrainedModel`. The input dictionary must have the following format: {layer_num:\nlist of heads to prune in this layer}", "source": "github-repos"}
{"code": "def highlight(__text: str, *, lexer: str = 'diff',\n              formatter: str = 'terminal') -> str:\n    \n    if sys.stdout.isatty():\n        lexer = get_lexer_by_name(lexer)\n        formatter = get_formatter_by_name(formatter)\n        __text = pyg_highlight(__text, lexer, formatter)\n    return __text", "docstring": "Highlight text highlighted using ``pygments``.\n\nReturns text untouched if colour output is not enabled.\n\nSee also: :pypi:`Pygments`\n\nArgs:\n__text: Text to highlight\nlexer: Jinja lexer to use\nformatter: Jinja formatter to use\nReturns:\nSyntax highlighted output, when possible", "source": "juraj-google-style"}
{"code": "def transpose(self, permutation: Optional[List[int]] = None) -> 'TensorFluent':\n        \n        if permutation == []:\n            return self\n        t = tf.transpose(self.tensor, permutation) if permutation != [] else self.tensor\n        scope = self.scope.as_list()\n        batch = self.batch\n        return TensorFluent(t, scope, batch=batch)", "docstring": "Returns a TensorFluent for the transpose operation with given `permutation`.\n\nArgs:\npermutation: The output's shape permutation.\n\nReturns:\nA TensorFluent wrapping the transpose operation.", "source": "juraj-google-style"}
{"code": "def restore_state(self, state):\n        \n\n        super(ReferenceController, self).restore_state(state)\n\n        state_name = state.get('state_name')\n        state_version = state.get('state_version')\n\n        if state_name != self.STATE_NAME or state_version != self.STATE_VERSION:\n            raise ArgumentError(\"Invalid emulated device state name or version\", found=(state_name, state_version),\n                                expected=(self.STATE_NAME, self.STATE_VERSION))\n\n        self.app_info = state.get('app_info', (0, \"0.0\"))\n        self.os_info = state.get('os_info', (0, \"0.0\"))\n\n        \n        self.sensor_log.prepare_for_restore()\n\n        \n        self.remote_bridge.restore(state.get('remote_bridge', {}))\n        self.tile_manager.restore(state.get('tile_manager', {}))\n        self.config_database.restore(state.get('config_database', {}))\n        self.sensor_log.restore(state.get('sensor_log', {}))", "docstring": "Restore the current state of this emulated object.\n\nArgs:\nstate (dict): A previously dumped state produced by dump_state.", "source": "juraj-google-style"}
{"code": "def logistic(x: Union[(float, np.ndarray)], k: float, theta: float) -> Optional[float]:\n    if ((x is None) or (k is None) or (theta is None)):\n        return None\n    return (1 / (1 + np.exp(((- k) * (x - theta)))))", "docstring": "r\"\"\"\nStandard logistic function.\n\n.. math::\n\ny = \\frac {1} {1 + e^{-k (x - \\theta)}}\n\nArgs:\nx: :math:`x`\nk: :math:`k`\ntheta: :math:`\\theta`\n\nReturns:\n:math:`y`", "source": "codesearchnet"}
{"code": "def CanonicalPathToLocalPath(path):\n    path = path.replace('/\\\\', '\\\\')\n    path = path.replace('/', '\\\\')\n    m = re.match('\\\\\\\\([a-zA-Z]):(.*)$', path)\n    if m:\n        path = ('%s:\\\\%s' % (m.group(1), m.group(2).lstrip('\\\\')))\n    return path", "docstring": "r\"\"\"Converts the canonical paths as used by GRR to OS specific paths.\n\nDue to the inconsistencies between handling paths in windows we need to\nconvert a path to an OS specific version prior to using it. This function\nshould be called just before any OS specific functions.\n\nCanonical paths on windows have:\n- / instead of \\.\n- Begin with /X:// where X is the drive letter.\n\nArgs:\npath: A canonical path specification.\n\nReturns:\nA windows specific path.", "source": "codesearchnet"}
{"code": "def retrieve_reviewers(self, product):\n        \n        if not isinstance(product, self._product_cls):\n            raise TypeError(\n                \"Type of given product isn't acceptable:\", product,\n                \", expected:\", self._product_cls)\n        return list(self.graph.predecessors(product))", "docstring": "Retrieve reviewers who reviewed a given product.\n\nArgs:\nproduct: A product specifying reviewers.\n\nReturns:\nA list of reviewers who review the product.\n\nRaises:\nTypeError: when given product isn't instance of specified product\nclass when this graph is constructed.", "source": "juraj-google-style"}
{"code": "def titles(self, unique=False):\n    if unique:\n        return tools.uniqued((title for (_, title) in self.iterfiles()))\n    return [title for (_, title) in self.iterfiles()]", "docstring": "Return a list of all available spreadsheet titles.\n\nArgs:\nunique (bool): drop duplicates\nReturns:\nlist: list of title/name strings", "source": "codesearchnet"}
{"code": "def has_types(self, types, all_=True):\n        \n        func = all if all_ else any\n        return func([self.get_stim(t) for t in listify(types)])", "docstring": "Check whether the current component list matches all Stim types\nin the types argument.\n\nArgs:\ntypes (Stim, list): a Stim class or iterable of Stim classes.\nall_ (bool): if True, all input types must match; if False, at\nleast one input type must match.\n\nReturns:\nTrue if all passed types match at least one Stim in the component\nlist, otherwise False.", "source": "juraj-google-style"}
{"code": "def make_new_node(self, distance, angle):\n    return Node((((cos((- angle)) * distance) + self.pos[0]), ((sin((- angle)) * distance) + self.pos[1])))", "docstring": "Make a new node from an existing one.\n\nThis method creates a new node with a distance and angle given.\nThe position of the new node is calculated with:\nx2 = cos(-angle)*distance+x1\ny2 = sin(-angle)*distance+y1\n\nArgs:\ndistance (float): The distance of the original node to the new node.\nangle (rad): The angle between the old and new node, relative to the horizont.\n\nReturns:\nobject: The node with calculated poistion.", "source": "codesearchnet"}
{"code": "def validate_field_value_type(value_type, in_mapping_key=False, allow_forward_references=False):\n    if isinstance(value_type, str) or type_annotations.is_forward_ref(value_type):\n        if allow_forward_references:\n            return\n        else:\n            raise TypeError(f'Unresolved forward reference {value_type!r}')\n    if value_type in (int, float, str, bytes, bool, None, _NoneType, dtypes.DType):\n        return\n    elif value_type in (tensor.Tensor, tensor_shape.TensorShape) or (isinstance(value_type, type) and _issubclass(value_type, composite_tensor.CompositeTensor)):\n        if in_mapping_key:\n            raise TypeError(f'Mapping had a key {value_type.__name__!r} with type {type(value_type).__name__!r}')\n    elif type_annotations.is_generic_tuple(value_type) or type_annotations.is_generic_union(value_type):\n        type_args = type_annotations.get_generic_type_args(value_type)\n        if len(type_args) == 2 and type_args[1] is Ellipsis and type_annotations.is_generic_tuple(value_type):\n            validate_field_value_type(type_args[0], in_mapping_key, allow_forward_references)\n        else:\n            for arg in type_annotations.get_generic_type_args(value_type):\n                validate_field_value_type(arg, in_mapping_key, allow_forward_references)\n    elif type_annotations.is_generic_mapping(value_type):\n        key_type, value_type = type_annotations.get_generic_type_args(value_type)\n        validate_field_value_type(key_type, True, allow_forward_references)\n        validate_field_value_type(value_type, in_mapping_key, allow_forward_references)\n    elif isinstance(value_type, type):\n        raise TypeError(f'Unsupported type annotation {value_type.__name__!r}')\n    else:\n        raise TypeError(f'Unsupported type annotation {value_type!r}')", "docstring": "Checks that `value_type` contains only supported type annotations.\n\nArgs:\nvalue_type: The type annotation to check.\nin_mapping_key: True if `value_type` is nested in the key of a mapping.\nallow_forward_references: If false, then raise an exception if a\n`value_type` contains a forward reference (i.e., a string literal).\n\nRaises:\nTypeError: If `value_type` contains an unsupported type annotation.", "source": "github-repos"}
{"code": "def CallState(self, messages=None, next_state='', client_id=None, request_data=None, start_time=None):\n    if (messages is None):\n        messages = []\n    if (not next_state):\n        raise ValueError(\"next_state can't be empty.\")\n    request_state = rdf_flow_runner.RequestState(id=random.UInt32(), session_id=self.context.session_id, client_id=client_id, next_state=next_state)\n    if request_data:\n        request_state.data = rdf_protodict.Dict().FromDict(request_data)\n    self.QueueRequest(request_state, timestamp=start_time)\n    if ((not messages) or (not isinstance(messages[(- 1)], rdf_flows.GrrStatus))):\n        messages.append(rdf_flows.GrrStatus())\n    for (i, payload) in enumerate(messages):\n        if isinstance(payload, rdfvalue.RDFValue):\n            msg = rdf_flows.GrrMessage(session_id=self.session_id, request_id=request_state.id, response_id=(1 + i), auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED, payload=payload, type=rdf_flows.GrrMessage.Type.MESSAGE)\n            if isinstance(payload, rdf_flows.GrrStatus):\n                msg.type = rdf_flows.GrrMessage.Type.STATUS\n        else:\n            raise flow_runner.FlowRunnerError(('Bad message %s of type %s.' % (payload, type(payload))))\n        self.QueueResponse(msg, timestamp=start_time)\n    self.QueueNotification(session_id=self.session_id, timestamp=start_time)", "docstring": "This method is used to asynchronously schedule a new hunt state.\n\nThe state will be invoked in a later time and receive all the messages\nwe send.\n\nArgs:\nmessages: A list of rdfvalues to send. If the last one is not a GrrStatus,\nwe append an OK Status.\nnext_state: The state in this hunt to be invoked with the responses.\nclient_id: ClientURN to use in scheduled requests.\nrequest_data: Any dict provided here will be available in the RequestState\nprotobuf. The Responses object maintains a reference to this protobuf\nfor use in the execution of the state method. (so you can access this\ndata by responses.request).\nstart_time: Schedule the state at this time. This delays notification and\nmessages for processing into the future.\n\nRaises:\nValueError: on arguments error.", "source": "codesearchnet"}
{"code": "def remove_plugin(self, f):\n        \n        if f.endswith('.py'):\n            plugin_name = os.path.splitext(os.path.basename(f))[0]\n            print '- %s %sREMOVED' % (plugin_name, color.Red)\n            print '\\t%sNote: still in memory, restart Workbench to remove...%s' % \\\n                  (color.Yellow, color.Normal)", "docstring": "Remvoing a deleted plugin.\n\nArgs:\nf: the filepath for the plugin.", "source": "juraj-google-style"}
{"code": "def message_to_extension(msg: message.Message, extension_cls: Type[_T]) -> _T:\n    extension = extension_cls()\n    add_message_to_extension(msg, extension)\n    return extension", "docstring": "Converts an Extension profile into a generic Extension type.\n\nArgs:\nmsg: The Message to convert.\nextension_cls: The type of FHIR Extension to convert to.\n\nReturns:\nA an instance of extension_cls.", "source": "github-repos"}
{"code": "def action_scope(self, action_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]:\n        \n        return dict(zip(self.rddl.domain.action_fluent_ordering, action_fluents))", "docstring": "Returns a partial scope with current action-fluents.\n\nArgs:\naction_fluents (Sequence[tf.Tensor]): The action fluents.\n\nReturns:\nA mapping from action fluent names to :obj:`rddl2tf.fluent.TensorFluent`.", "source": "juraj-google-style"}
{"code": "def lcm(*numbers):\n    \n    n = 1\n    for i in numbers:\n        n = (i * n) \n    return n", "docstring": "Return lowest common multiple of a sequence of numbers.\n\nArgs:\n\\*numbers: Sequence of numbers.\n\nReturns:\n(int) Lowest common multiple of numbers.", "source": "juraj-google-style"}
{"code": "def _get_cert_expiration_time(headers):\n    cache_control = headers.get('Cache-Control', '')\n    for entry in cache_control.split(','):\n        match = _MAX_AGE_REGEX.match(entry)\n        if match:\n            cache_time_seconds = int(match.group(1))\n            break\n    else:\n        return 0\n    age = headers.get('Age')\n    if (age is not None):\n        try:\n            age = int(age)\n        except ValueError:\n            age = 0\n        cache_time_seconds -= age\n    return max(0, cache_time_seconds)", "docstring": "Get the expiration time for a cert, given the response headers.\n\nGet expiration time from the headers in the result.  If we can't get\na time from the headers, this returns 0, indicating that the cert\nshouldn't be cached.\n\nArgs:\nheaders: A dict containing the response headers from the request to get\ncerts.\n\nReturns:\nAn integer with the number of seconds the cert should be cached.  This\nvalue is guaranteed to be >= 0.", "source": "codesearchnet"}
{"code": "def ProduceAnalysisReport(self, plugin):\n    \n    analysis_report = plugin.CompileReport(self)\n    if not analysis_report:\n      return\n\n    analysis_report.time_compiled = timelib.Timestamp.GetNow()\n\n    plugin_name = getattr(analysis_report, 'plugin_name', plugin.plugin_name)\n    if plugin_name:\n      analysis_report.plugin_name = plugin_name\n\n    if self._event_filter_expression:\n      \n      analysis_report.filter_string = self._event_filter_expression\n\n    self._storage_writer.AddAnalysisReport(analysis_report)\n\n    self.number_of_produced_analysis_reports += 1\n    self.number_of_produced_event_tags = (\n        self._storage_writer.number_of_event_tags)\n\n    self.last_activity_timestamp = time.time()", "docstring": "Produces an analysis report.\n\nArgs:\nplugin (AnalysisPlugin): plugin.", "source": "juraj-google-style"}
{"code": "def direct_normal_illuminance(self, value=999999.0):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `direct_normal_illuminance`'.format(value))\n            if value < 0.0:\n                raise ValueError('value need to be greater or equal 0.0 '\n                                 'for field `direct_normal_illuminance`')\n\n        self._direct_normal_illuminance = value", "docstring": "Corresponds to IDD Field `direct_normal_illuminance`\nwill be missing if >= 999900\n\nArgs:\nvalue (float): value for IDD Field `direct_normal_illuminance`\nUnit: lux\nvalue >= 0.0\nMissing value: 999999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def hash32(data: Any, seed=0) -> int:\n    \n    with MultiTimerContext(timer, TIMING_HASH):\n        c_data = to_str(data)\n        if mmh3:\n            return mmh3.hash(c_data, seed=seed)\n        py_data = to_bytes(c_data)\n        py_unsigned = murmur3_x86_32(py_data, seed=seed)\n        return twos_comp_to_signed(py_unsigned, n_bits=32)", "docstring": "Non-cryptographic, deterministic, fast hash.\n\nArgs:\ndata: data to hash\nseed: seed\n\nReturns:\nsigned 32-bit integer", "source": "juraj-google-style"}
{"code": "def apply_actions(self, actions):\n        \n        modified = []\n        for a in actions:\n            if \"dict\" in a:\n                k = a[\"dict\"]\n                modified.append(k)\n                self.vi[k] = self.modify_object(a[\"action\"], self.vi[k])\n            elif \"file\" in a:\n                self.modify(a[\"action\"], a[\"file\"])\n            else:\n                raise ValueError(\"Unrecognized format: {}\".format(a))\n        for f in modified:\n            self.vi[f].write_file(f)", "docstring": "Applies a list of actions to the Vasp Input Set and rewrites modified\nfiles.\nArgs:\nactions [dict]: A list of actions of the form {'file': filename,\n'action': moddermodification} or {'dict': vaspinput_key,\n'action': moddermodification}", "source": "juraj-google-style"}
{"code": "def fail_api(channel):\n    \n\n    gui = ui_embed.UI(\n        channel,\n        \"Couldn't get stats off RLTrackerNetwork.\",\n        \"Maybe the API changed, please tell Infraxion.\",\n        modulename=modulename,\n        colour=0x0088FF\n    )\n\n    return gui", "docstring": "Creates an embed UI for when the API call didn't work\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\n\nReturns:\nui (ui_embed.UI): The embed UI object", "source": "juraj-google-style"}
{"code": "def process_alias_export_namespace(namespace):\n    namespace.export_path = os.path.abspath(namespace.export_path)\n    if os.path.isfile(namespace.export_path):\n        raise CLIError(FILE_ALREADY_EXISTS_ERROR.format(namespace.export_path))\n    export_path_dir = os.path.dirname(namespace.export_path)\n    if (not os.path.isdir(export_path_dir)):\n        os.makedirs(export_path_dir)\n    if os.path.isdir(namespace.export_path):\n        namespace.export_path = os.path.join(namespace.export_path, ALIAS_FILE_NAME)", "docstring": "Validate input arguments when the user invokes 'az alias export'.\n\nArgs:\nnamespace: argparse namespace object.", "source": "codesearchnet"}
{"code": "def GetEntries(self, parser_mediator, match=None, **unused_kwargs):\n    format_version = match.get('WebHistoryFileVersion', None)\n    if (format_version != 1):\n        parser_mediator.ProduceExtractionWarning('unsupported Safari history version: {0!s}'.format(format_version))\n        return\n    if ('WebHistoryDates' not in match):\n        return\n    for history_entry in match.get('WebHistoryDates', {}):\n        last_visited_date = history_entry.get('lastVisitedDate', None)\n        if (last_visited_date is None):\n            parser_mediator.ProduceExtractionWarning('missing last visited date')\n            continue\n        try:\n            timestamp = float(last_visited_date)\n        except (TypeError, ValueError):\n            parser_mediator.ProduceExtractionWarning('unable to convert last visited date {0:s}'.format(last_visited_date))\n            continue\n        display_title = history_entry.get('displayTitle', None)\n        event_data = SafariHistoryEventData()\n        if (display_title != event_data.title):\n            event_data.display_title = display_title\n        event_data.title = history_entry.get('title', None)\n        event_data.url = history_entry.get('', None)\n        event_data.visit_count = history_entry.get('visitCount', None)\n        event_data.was_http_non_get = history_entry.get('lastVisitWasHTTPNonGet', None)\n        timestamp = int(timestamp)\n        date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n        parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts Safari history items.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmatch (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.", "source": "codesearchnet"}
{"code": "def rmtree(self, exclude_wildcard=''):\n    if (not exclude_wildcard):\n        shutil.rmtree(self.workdir)\n    else:\n        w = WildCard(exclude_wildcard)\n        for (dirpath, dirnames, filenames) in os.walk(self.workdir):\n            for fname in filenames:\n                path = os.path.join(dirpath, fname)\n                if (not w.match(fname)):\n                    os.remove(path)", "docstring": "Remove all files and directories in the working directory\n\nArgs:\nexclude_wildcard: Optional string with regular expressions separated by `|`.\nFiles matching one of the regular expressions will be preserved.\nexample: exclude_wildard=\"*.nc|*.txt\" preserves all the files\nwhose extension is in [\"nc\", \"txt\"].", "source": "codesearchnet"}
{"code": "def upload(self, file_path, uri=None, timeout=-1):\n        \n        if not uri:\n            uri = self._uri\n\n        upload_file_name = os.path.basename(file_path)\n        task, entity = self._connection.post_multipart_with_response_handling(uri, file_path, upload_file_name)\n\n        if not task:\n            return entity\n\n        return self._task_monitor.wait_for_task(task, timeout)", "docstring": "Makes a multipart request.\n\nArgs:\nfile_path:\nFile to upload.\nuri:\nA specific URI (optional).\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Response body.", "source": "juraj-google-style"}
{"code": "def _uses_buffer_offset(model: schema_fb.ModelT) -> bool:\n    if not model.metadata:\n        return False\n    return any(map(lambda metadata: metadata.name.decode('utf-8') == 'buffer_location', model.metadata))", "docstring": "Determines whether the model is using an offset buffer.\n\nArgs:\nmodel: A TFLite model.\n\nReturns:\nTrue iff the model is using offset buffers. Offset buffers are enabled by\nthe flag `_experimental_use_buffer_offset`.", "source": "github-repos"}
{"code": "def extract_attribute_grid(self, model_grid, potential=False, future=False):\n        \n\n        if potential:\n            var_name = model_grid.variable + \"-potential\"\n            timesteps = np.arange(self.start_time - 1, self.end_time)\n        elif future:\n            var_name = model_grid.variable + \"-future\"\n            timesteps = np.arange(self.start_time + 1, self.end_time + 2)\n        else:\n            var_name = model_grid.variable\n            timesteps = np.arange(self.start_time, self.end_time + 1)\n        self.attributes[var_name] = []\n        for ti, t in enumerate(timesteps):\n            self.attributes[var_name].append(\n                model_grid.data[t - model_grid.start_hour, self.i[ti], self.j[ti]])", "docstring": "Extracts the data from a ModelOutput or ModelGrid object within the bounding box region of the STObject.\n\nArgs:\nmodel_grid: A ModelGrid or ModelOutput Object\npotential: Extracts from the time before instead of the same time as the object", "source": "juraj-google-style"}
{"code": "def easeInOutQuad(n):\n    \n    _checkRange(n)\n    if n < 0.5:\n        return 2 * n**2\n    else:\n        n = n * 2 - 1\n        return -0.5 * (n*(n-2) - 1)", "docstring": "A quadratic tween function that accelerates, reaches the midpoint, and then decelerates.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "juraj-google-style"}
{"code": "def __init__(self, location=None, **kwargs):\n    \n    if not location:\n      raise ValueError('Missing location value.')\n\n    parent = None\n    if 'parent' in kwargs:\n      parent = kwargs['parent']\n      del kwargs['parent']\n\n    if parent:\n      raise ValueError('Parent value set.')\n\n    \n    location = os.path.abspath(location)\n\n    super(OSPathSpec, self).__init__(location=location, parent=parent, **kwargs)", "docstring": "Initializes a path specification.\n\nNote that the operating system path specification cannot have a parent.\n\nArgs:\nlocation (Optional[str]): operating specific location string e.g.\n/opt/dfvfs or C:\\\\Opt\\\\dfvfs.\n\nRaises:\nValueError: when location is not set or parent is set.", "source": "juraj-google-style"}
{"code": "def dequeue(self) -> Tuple[(int, TItem)]:\n    if (self._len == 0):\n        raise ValueError('BucketPriorityQueue is empty.')\n    while (self._buckets and (not self._buckets[0])):\n        self._buckets.pop(0)\n        self._offset += 1\n    item = self._buckets[0].pop(0)\n    priority = self._offset\n    self._len -= 1\n    if (self._drop_set is not None):\n        self._drop_set.remove((priority, item))\n    return (priority, item)", "docstring": "Removes and returns an item from the priority queue.\n\nReturns:\nA tuple whose first element is the priority of the dequeued item\nand whose second element is the dequeued item.\n\nRaises:\nValueError:\nThe queue is empty.", "source": "codesearchnet"}
{"code": "def register_peer(self, connection_id, endpoint):\n        \n        with self._lock:\n            if len(self._peers) < self._maximum_peer_connectivity:\n                self._peers[connection_id] = endpoint\n                self._topology.set_connection_status(connection_id,\n                                                     PeerStatus.PEER)\n                LOGGER.debug(\"Added connection_id %s with endpoint %s, \"\n                             \"connected identities are now %s\",\n                             connection_id, endpoint, self._peers)\n            else:\n                raise PeeringException(\n                    \"At maximum configured number of peers: {} \"\n                    \"Rejecting peering request from {}.\".format(\n                        self._maximum_peer_connectivity,\n                        endpoint))\n\n        public_key = self.peer_to_public_key(connection_id)\n        if public_key:\n            self._consensus_notifier.notify_peer_connected(public_key)", "docstring": "Registers a connected connection_id.\n\nArgs:\nconnection_id (str): A unique identifier which identifies an\nconnection on the network server socket.\nendpoint (str): The publically reachable endpoint of the new\npeer", "source": "juraj-google-style"}
{"code": "def __init__(self, unique_identifier=None, attributes=None):\n        \n        super(GetAttributesResponsePayload, self).__init__(\n            enums.Tags.RESPONSE_PAYLOAD)\n\n        self._unique_identifier = None\n        self._attributes = list()\n\n        self.unique_identifier = unique_identifier\n        self.attributes = attributes", "docstring": "Construct a GetAttributes response payload.\n\nArgs:\nunique_identifier (string): The ID of the managed object with\nwhich the retrieved attributes should be associated. Optional,\ndefaults to None.\nattributes (list): A list of attribute structures associated with\nthe managed object. Optional, defaults to None.", "source": "juraj-google-style"}
{"code": "def all_prod(tensors):\n    return _apply_all_reduce('prod', tensors)", "docstring": "Returns a list of tensors with the all-reduce product across `tensors`.\n\nThe computation is done with an all-reduce operation, so if only some of the\nreturned tensors are evaluated then the computation will hang.\n\nArgs:\ntensors: The input tensors across which to multiply; must be assigned\nto GPU devices.\n\nReturns:\nList of tensors, each with the product of the input tensors, where tensor i\nhas the same device as `tensors[i]`.", "source": "github-repos"}
{"code": "def crop_and_resize(image, boxes, box_ind, crop_size, pad_border=True):\n    assert isinstance(crop_size, int), crop_size\n    boxes = tf.stop_gradient(boxes)\n    if pad_border:\n        image = tf.pad(image, [[0, 0], [0, 0], [1, 1], [1, 1]], mode='SYMMETRIC')\n        boxes = (boxes + 1)\n\n    @under_name_scope()\n    def transform_fpcoor_for_tf(boxes, image_shape, crop_shape):\n        '\\n        The way tf.image.crop_and_resize works (with normalized box):\\n        Initial point (the value of output[0]): x0_box * (W_img - 1)\\n        Spacing: w_box * (W_img - 1) / (W_crop - 1)\\n        Use the above grid to bilinear sample.\\n\\n        However, what we want is (with fpcoor box):\\n        Spacing: w_box / W_crop\\n        Initial point: x0_box + spacing/2 - 0.5\\n        (-0.5 because bilinear sample (in my definition) assumes floating point coordinate\\n         (0.0, 0.0) is the same as pixel value (0, 0))\\n\\n        This function transform fpcoor boxes to a format to be used by tf.image.crop_and_resize\\n\\n        Returns:\\n            y1x1y2x2\\n        '\n        (x0, y0, x1, y1) = tf.split(boxes, 4, axis=1)\n        spacing_w = ((x1 - x0) / tf.cast(crop_shape[1], tf.float32))\n        spacing_h = ((y1 - y0) / tf.cast(crop_shape[0], tf.float32))\n        imshape = [tf.cast((image_shape[0] - 1), tf.float32), tf.cast((image_shape[1] - 1), tf.float32)]\n        nx0 = (((x0 + (spacing_w / 2)) - 0.5) / imshape[1])\n        ny0 = (((y0 + (spacing_h / 2)) - 0.5) / imshape[0])\n        nw = ((spacing_w * tf.cast((crop_shape[1] - 1), tf.float32)) / imshape[1])\n        nh = ((spacing_h * tf.cast((crop_shape[0] - 1), tf.float32)) / imshape[0])\n        return tf.concat([ny0, nx0, (ny0 + nh), (nx0 + nw)], axis=1)\n    image_shape = tf.shape(image)[2:]\n    boxes = transform_fpcoor_for_tf(boxes, image_shape, [crop_size, crop_size])\n    image = tf.transpose(image, [0, 2, 3, 1])\n    ret = tf.image.crop_and_resize(image, boxes, tf.cast(box_ind, tf.int32), crop_size=[crop_size, crop_size])\n    ret = tf.transpose(ret, [0, 3, 1, 2])\n    return ret", "docstring": "Aligned version of tf.image.crop_and_resize, following our definition of floating point boxes.\n\nArgs:\nimage: NCHW\nboxes: nx4, x1y1x2y2\nbox_ind: (n,)\ncrop_size (int):\nReturns:\nn,C,size,size", "source": "codesearchnet"}
{"code": "def is_control(input, model_file=None, model_proto=None, name=None):\n    return _gen_sentencepiece_processor_op.sentencepiece_get_piece_type(input, model_file=model_file, model_proto=model_proto, name=name, piece_type=1)", "docstring": "Returns true if input id is control piece.\n\nArgs:\ninput: An arbitrary tensor of int32.\nmodel_file: The sentencepiece model file path.\nmodel_proto: The sentencepiece model serialized proto.\nEither `model_file` or `model_proto` must be set.\nname: The name argument that is passed to the op function.\nReturns:\nA tensor of bool with the same shape as input.", "source": "codesearchnet"}
{"code": "def upload(self, params={}):\n        \n        if self.upload_token is not None:\n            \n            status = self.check()\n            if status['status'] != 4:\n                return self.commit()\n            else:\n                self.new_slice()\n                while self.slice_task_id != 0:\n                    self.upload_slice()\n                return self.commit()\n        else:\n            \n            self.create(self.prepare_video_params(**params))\n            self.create_file()\n            self.new_slice()\n            while self.slice_task_id != 0:\n                self.upload_slice()\n            return self.commit()", "docstring": "start uploading the file until upload is complete or error.\nThis is the main method to used, If you do not care about\nstate of process.\n\nArgs:\nparams: a dict object describe video info, eg title,\ntags, description, category.\nall video params see the doc of prepare_video_params.\n\nReturns:\nreturn video_id if upload successfully", "source": "juraj-google-style"}
{"code": "def validate_and_decode(jwt_bu64, cert_obj):\n    \n    try:\n        return jwt.decode(\n            jwt_bu64.strip(), cert_obj.public_key(), algorithms=['RS256'], verify=True\n        )\n    except jwt.InvalidTokenError as e:\n        raise JwtException('Signature is invalid. error=\"{}\"'.format(str(e)))", "docstring": "Validate the JWT and return as a dict.\n\n- JWTs contain a set of values serialized to a JSON dict. This decodes the JWT and\nreturns it as a dict.\n\nArgs:\njwt_bu64: bytes\nThe JWT encoded using a a URL safe flavor of Base64.\n\ncert_obj: cryptography.Certificate\nPublic certificate used for signing the JWT (typically the CN cert).\n\nRaises:\nJwtException: If validation fails.\n\nReturns:\ndict: Values embedded in the JWT.", "source": "juraj-google-style"}
{"code": "def _MergeEventTag(self, storage_writer, attribute_container):\n    if (attribute_container.CONTAINER_TYPE != 'event_tag'):\n        return\n    event_identifier = attribute_container.GetEventIdentifier()\n    if (not event_identifier):\n        return\n    stored_event_tag = self._event_tag_index.GetEventTagByIdentifier(storage_writer, event_identifier)\n    if stored_event_tag:\n        attribute_container.AddComment(stored_event_tag.comment)\n        attribute_container.AddLabels(stored_event_tag.labels)\n    self._event_tag_index.SetEventTag(attribute_container)", "docstring": "Merges an event tag with the last stored event tag.\n\nIf there is an existing event the provided event tag is updated with\nthe contents of the existing one. After which the event tag index is\nupdated.\n\nArgs:\nstorage_writer (StorageWriter): storage writer.\nattribute_container (AttributeContainer): container.", "source": "codesearchnet"}
{"code": "def GetContract(self, script_hash):\n    if (script_hash.ToBytes() in self._contracts.keys()):\n        return self._contracts[script_hash.ToBytes()]\n    return None", "docstring": "Get contract for specified script_hash.\n\nArgs:\nscript_hash (UInt160): a bytearray (len 20).\n\nReturns:\nContract: if a contract was found matching the provided script hash, otherwise None", "source": "codesearchnet"}
{"code": "def _populate_quantization_options_default_values(quantization_options: _QuantizationOptions) -> None:\n    if quantization_options.op_set == quant_opts_pb2.OpSet.OP_SET_UNSPECIFIED:\n        quantization_options.op_set = quant_opts_pb2.OpSet.XLA\n    if not quantization_options.tags:\n        quantization_options.tags.append(tag_constants.SERVING)\n    if not quantization_options.signature_keys:\n        quantization_options.signature_keys.append(signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)\n    if not quantization_options.HasField('freeze_all_variables'):\n        quantization_options.freeze_all_variables = True\n    if quantization_options.enable_legacy_weight_only:\n        raise ValueError('Legacy weight-only is deprecated. Use weight-only quantization method.')\n    if quantization_options.quantization_method.preset_method == _PresetMethod.METHOD_UNSPECIFIED:\n        logging.debug('\"preset_method\" for QuantizationMethod is not specified.Static range quantization is used by default.')\n        quantization_options.quantization_method.preset_method = _PresetMethod.METHOD_STATIC_RANGE_INT8\n    if quantization_options.min_num_elements_for_weights == 0:\n        quantization_options.min_num_elements_for_weights = _DYNAMIC_RANGE_DEFAULT_MIN_NUM_ELEMENTS_FOR_WEIGHTS\n        logging.warning('QuantizationOptions.min_num_elements_for_weights is not set (0). Setting to the default value: %d.', _DYNAMIC_RANGE_DEFAULT_MIN_NUM_ELEMENTS_FOR_WEIGHTS)\n    if not quantization_options.HasField('enable_per_channel_quantization'):\n        quantization_options.enable_per_channel_quantization = False\n    if quantization_options.enable_per_channel_quantization and (not ((quantization_options.op_set == quant_opts_pb2.OpSet.UNIFORM_QUANTIZED or quantization_options.quantization_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8) or (quantization_options.op_set in (quant_opts_pb2.OpSet.XLA, quant_opts_pb2.OpSet.STABLEHLO) and quantization_options.quantization_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_INT8))):\n        raise ValueError('Currently, per-channel quantization is supported for Uniform Quantized opset, weight only quantization, or XLA/StableHLO opset with static range quantization.')\n    if quantization_options.quantization_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8 and (quantization_options.op_set == quant_opts_pb2.OpSet.UNIFORM_QUANTIZED or quantization_options.op_set == quant_opts_pb2.OpSet.TF):\n        raise ValueError('TF/Uniform quantized opset does not support weight-only.')\n    if quantization_options.op_set == quant_opts_pb2.OpSet.STABLEHLO and (quantization_options.quantization_method.preset_method != _PresetMethod.METHOD_STATIC_RANGE_INT8 and quantization_options.quantization_method.preset_method != _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8):\n        raise ValueError('StableHLO quantized opset currently only supports static range quantization and weight-only quantizationvia TF Quantizer.')\n    logging.debug('Setting `force_graph_mode_calibration = True` to ensure the calibration mode is executed properly.')\n    quantization_options.force_graph_mode_calibration = True\n    if quantization_options.HasField('debugger_config'):\n        if not quantization_options.debugger_config.log_dir_path:\n            quantization_options.debugger_config.log_dir_path = '/tmp/dumps'\n        if quantization_options.debugger_config.debugger_type == stablehlo_quant_config_pb2.DebuggerConfig.DebuggerType.DEBUGGER_TYPE_UNSPECIFIED:\n            raise ValueError('Debugger is enabled but debugger type was not specified.')\n        if quantization_options.debugger_config.debugger_type == stablehlo_quant_config_pb2.DebuggerConfig.DebuggerType.DEBUGGER_TYPE_WHOLE_MODEL and (not quantization_options.debugger_config.unquantized_dump_model_path):\n            raise ValueError('Debugger type whole model verify was used but unquantized_dump_model_path was not specified.')\n    _populate_quantization_component_spec(quantization_options.quantization_method)\n    _populate_unitwise_quantization_specs(quantization_options)\n    if quantization_options.quantization_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_INT8:\n        _populate_calibration_options(quantization_options)", "docstring": "Populates default values for QuantizationOptions.\n\nPopulates unspecified or unset fields of QuantizationOptions with the default\nvalues.\n\n* If `op_set` is unspecified, it defaults to `OpSet.XLA`.\n* If `freeze_all_variables` is not set, it defaults to `True`.\n* Check if configurations are set correctly:\n- Per-channel quantization is supported for Uniform Quantized opset only.\n\nArgs:\nquantization_options: An instance of QuantizationOptions.", "source": "github-repos"}
{"code": "def connect_container_to_network(self, container, net_id, ipv4_address=None, ipv6_address=None, aliases=None, links=None, link_local_ips=None):\n    data = {'Container': container, 'EndpointConfig': self.create_endpoint_config(aliases=aliases, links=links, ipv4_address=ipv4_address, ipv6_address=ipv6_address, link_local_ips=link_local_ips)}\n    url = self._url('/networks/{0}/connect', net_id)\n    res = self._post_json(url, data=data)\n    self._raise_for_status(res)", "docstring": "Connect a container to a network.\n\nArgs:\ncontainer (str): container-id/name to be connected to the network\nnet_id (str): network id\naliases (:py:class:`list`): A list of aliases for this endpoint.\nNames in that list can be used within the network to reach the\ncontainer. Defaults to ``None``.\nlinks (:py:class:`list`): A list of links for this endpoint.\nContainers declared in this list will be linked to this\ncontainer. Defaults to ``None``.\nipv4_address (str): The IP address of this container on the\nnetwork, using the IPv4 protocol. Defaults to ``None``.\nipv6_address (str): The IP address of this container on the\nnetwork, using the IPv6 protocol. Defaults to ``None``.\nlink_local_ips (:py:class:`list`): A list of link-local\n(IPv4/IPv6) addresses.", "source": "codesearchnet"}
{"code": "def _block_qargs_to_indices(self, block_qargs, global_index_map):\n        \n        block_indices = [global_index_map[q] for q in block_qargs]\n        ordered_block_indices = sorted(block_indices)\n        block_positions = {q: ordered_block_indices.index(global_index_map[q])\n                           for q in block_qargs}\n        return block_positions", "docstring": "Map each qubit in block_qargs to its wire position among the block's wires.\nArgs:\nblock_qargs (list): list of qubits that a block acts on\nglobal_index_map (dict): mapping from each qubit in the\ncircuit to its wire position within that circuit\nReturns:\ndict: mapping from qarg to position in block", "source": "juraj-google-style"}
{"code": "def get_action(self, action_id):\n        \n        return Action.get_object(\n            api_token=self.token,\n            action_id=action_id\n        )", "docstring": "Returns a specific Action by its ID.\n\nArgs:\naction_id (int): id of action", "source": "juraj-google-style"}
{"code": "def ContainsKey(self, public_key):\n    return self.ContainsKeyHash(Crypto.ToScriptHash(public_key.encode_point(True), unhex=True))", "docstring": "Test if the wallet contains the supplied public key.\n\nArgs:\npublic_key (edcsa.Curve.point): a public key to test for its existance. e.g. KeyPair.PublicKey\n\nReturns:\nbool: True if exists, False otherwise.", "source": "codesearchnet"}
{"code": "def TrimVariableTable(self, new_size):\n\n    def ProcessBufferFull(variables):\n        for variable in variables:\n            var_index = variable.get('varTableIndex')\n            if ((var_index is not None) and (var_index >= new_size)):\n                variable['varTableIndex'] = 0\n            members = variable.get('members')\n            if (members is not None):\n                ProcessBufferFull(members)\n    del self._var_table[new_size:]\n    ProcessBufferFull(self.breakpoint['evaluatedExpressions'])\n    for stack_frame in self.breakpoint['stackFrames']:\n        ProcessBufferFull(stack_frame['arguments'])\n        ProcessBufferFull(stack_frame['locals'])\n    ProcessBufferFull(self._var_table)", "docstring": "Trims the variable table in the formatted breakpoint message.\n\nRemoves trailing entries in variables table. Then scans the entire\nbreakpoint message and replaces references to the trimmed variables to\npoint to var_index of 0 (\"buffer full\").\n\nArgs:\nnew_size: desired size of variables table.", "source": "codesearchnet"}
{"code": "def _set_state_variables(self, updates):\n    if not self.built:\n        raise RuntimeError('_set_state_variables() must be called after build().')\n    with ops.init_scope():\n        for var_name, value in updates.items():\n            self.state_variables[var_name].assign(value)", "docstring": "Directly update the internal state of this Layer.\n\nThis method expects a string-keyed dict of {state_variable_name: state}. The\nprecise nature of the state, and the names associated, are describe by\nthe subclasses of CombinerPreprocessingLayer.\n\nArgs:\nupdates: A string keyed dict of weights to update.\n\nRaises:\nRuntimeError: if 'build()' was not called before 'set_processing_state'.", "source": "github-repos"}
{"code": "def find_input(self, stream):\n    for (i, input_x) in enumerate(self.inputs):\n        if input_x[0].matches(stream):\n            return i", "docstring": "Find the input that responds to this stream.\n\nArgs:\nstream (DataStream): The stream to find\n\nReturns:\n(index, None): The index if found or None", "source": "codesearchnet"}
{"code": "def _generate_composite(self, comp_node, keepables):\n        \n        if comp_node.name in self.datasets:\n            \n            return\n        compositor, prereqs, optional_prereqs = comp_node.data\n\n        try:\n            prereq_datasets = self._get_prereq_datasets(\n                comp_node.name,\n                prereqs,\n                keepables,\n            )\n        except KeyError:\n            return\n\n        optional_datasets = self._get_prereq_datasets(\n            comp_node.name,\n            optional_prereqs,\n            keepables,\n            skip=True\n        )\n\n        try:\n            composite = compositor(prereq_datasets,\n                                   optional_datasets=optional_datasets,\n                                   **self.attrs)\n\n            cid = DatasetID.from_dict(composite.attrs)\n\n            self.datasets[cid] = composite\n            \n            if comp_node.name in self.wishlist:\n                self.wishlist.remove(comp_node.name)\n                self.wishlist.add(cid)\n            comp_node.name = cid\n        except IncompatibleAreas:\n            LOG.debug(\"Delaying generation of %s because of incompatible areas\", str(compositor.id))\n            preservable_datasets = set(self.datasets.keys())\n            prereq_ids = set(p.name for p in prereqs)\n            opt_prereq_ids = set(p.name for p in optional_prereqs)\n            keepables |= preservable_datasets & (prereq_ids | opt_prereq_ids)\n            \n            \n            keepables.add(comp_node.name)\n            return", "docstring": "Collect all composite prereqs and create the specified composite.\n\nArgs:\ncomp_node (Node): Composite Node to generate a Dataset for\nkeepables (set): `set` to update if any datasets are needed\nwhen generation is continued later. This can\nhappen if generation is delayed to incompatible\nareas which would require resampling first.", "source": "juraj-google-style"}
{"code": "def _get_spec(self) -> dict:\n    if self.spec:\n        return self.spec\n    self.spec = requests.get(self.SPEC_URL.format(self.version)).json()\n    return self.spec", "docstring": "Fetches the OpenAPI spec from the server.\n\nIf the spec has already been fetched, the cached version is returned instead.\n\nArgS:\nNone\n\nReturns:\nOpenAPI spec data", "source": "codesearchnet"}
{"code": "def _Open(self, path_spec, mode='rb'):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    file_object = resolver.Resolver.OpenFileObject(\n        path_spec.parent, resolver_context=self._resolver_context)\n\n    try:\n      tsk_image_object = tsk_image.TSKFileSystemImage(file_object)\n      tsk_file_system = pytsk3.FS_Info(tsk_image_object)\n    except:\n      file_object.close()\n      raise\n\n    self._file_object = file_object\n    self._tsk_file_system = tsk_file_system", "docstring": "Opens the file system object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nmode (Optional[str]): file access mode.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file system object could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def transfer_project(self, to_namespace, **kwargs):\n        \n        path = '/projects/%s/transfer' % (self.id,)\n        self.manager.gitlab.http_put(path,\n                                     post_data={\"namespace\": to_namespace},\n                                     **kwargs)", "docstring": "Transfer a project to the given namespace ID\n\nArgs:\nto_namespace (str): ID or path of the namespace to transfer the\nproject to\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabTransferProjectError: If the project could not be transfered", "source": "juraj-google-style"}
{"code": "def plot_labels(ax, label_fontsize=14,\n                xlabel=None, xlabel_arg=None,\n                ylabel=None, ylabel_arg=None,\n                zlabel=None, zlabel_arg=None):\n    \n    xlabel = xlabel if xlabel is not None else ax.get_xlabel() or 'X'\n    ylabel = ylabel if ylabel is not None else ax.get_ylabel() or 'Y'\n\n    xlabel_arg = dict_if_none(xlabel_arg)\n    ylabel_arg = dict_if_none(ylabel_arg)\n\n    ax.set_xlabel(xlabel, fontsize=label_fontsize, **xlabel_arg)\n    ax.set_ylabel(ylabel, fontsize=label_fontsize, **ylabel_arg)\n\n    if hasattr(ax, 'zaxis'):\n        zlabel = zlabel if zlabel is not None else ax.get_zlabel() or 'Z'\n        zlabel_arg = dict_if_none(zlabel_arg)\n        ax.set_zlabel(zlabel, fontsize=label_fontsize, **zlabel_arg)", "docstring": "Sets the labels options of a matplotlib plot\n\nArgs:\nax: matplotlib axes\nlabel_fontsize(int): Size of the labels' font\nxlabel(str): The xlabel for the figure\nxlabel_arg(dict):  Passsed into matplotlib as xlabel arguments\nylabel(str): The ylabel for the figure\nylabel_arg(dict):  Passsed into matplotlib as ylabel arguments\nzlabel(str): The zlabel for the figure\nzlabel_arg(dict):  Passsed into matplotlib as zlabel arguments", "source": "juraj-google-style"}
{"code": "def table_exists(client, table_reference):\n    from google.cloud.exceptions import NotFound\n    try:\n        client.get_table(table_reference)\n        return True\n    except NotFound:\n        return False", "docstring": "Return if a table exists.\n\nArgs:\nclient (google.cloud.bigquery.client.Client):\nA client to connect to the BigQuery API.\ntable_reference (google.cloud.bigquery.table.TableReference):\nA reference to the table to look for.\n\nReturns:\nbool: ``True`` if the table exists, ``False`` otherwise.", "source": "codesearchnet"}
{"code": "def from_file(cls, jss, filename):\n    tree = ElementTree.parse(filename)\n    root = tree.getroot()\n    return cls(jss, root)", "docstring": "Create a new JSSObject from an external XML file.\n\nArgs:\njss: A JSS object.\nfilename: String path to an XML file.", "source": "codesearchnet"}
{"code": "def date_range(start, end, boo):\n  \n  earliest = datetime.strptime(start.replace('-', ' '), '%Y %m %d')\n  latest = datetime.strptime(end.replace('-', ' '), '%Y %m %d')\n  num_days = (latest - earliest).days + 1\n  all_days = [latest - timedelta(days=x) for x in range(num_days)]\n  all_days.reverse()\n\n  output = []\n\n  if boo:\n    \n    for d in all_days:\n      output.append(int(str(d).replace('-', '')[:8]))\n  else:\n    \n    for d in all_days:\n      output.append(str(d)[:10])\n  return output", "docstring": "Return list of dates within a specified range, inclusive.\n\nArgs:\nstart: earliest date to include, String (\"2015-11-25\")\nend: latest date to include, String (\"2015-12-01\")\nboo: if true, output list contains Numbers (20151230); if false, list contains Strings (\"2015-12-30\")\nReturns:\nlist of either Numbers or Strings", "source": "juraj-google-style"}
{"code": "def exists(self, vars_list: List[str]) -> 'TensorFluent':\n    return self._aggregation_op(tf.reduce_any, self, vars_list)", "docstring": "Returns the TensorFluent for the exists aggregation function.\n\nArgs:\nvars_list: The list of variables to be aggregated over.\n\nReturns:\nA TensorFluent wrapping the exists aggregation function.", "source": "codesearchnet"}
{"code": "def assertProtoEquals(self, expected_message_maybe_ascii, validate_message, msg=None, relative_tolerance=None):\n    if isinstance(expected_message_maybe_ascii, type(validate_message)):\n        expected_message = expected_message_maybe_ascii\n        self._AssertProtoEquals(expected_message, validate_message, msg=msg, relative_tolerance=relative_tolerance)\n    elif isinstance(expected_message_maybe_ascii, (str, bytes)):\n        expected_message = type(validate_message)()\n        text_format.Merge(expected_message_maybe_ascii, expected_message, descriptor_pool=descriptor_pool.Default())\n        self._AssertProtoEquals(expected_message, validate_message, msg=msg, relative_tolerance=relative_tolerance)\n    else:\n        assert False, \"Can't compare protos of type %s and %s.\" % (type(expected_message_maybe_ascii), type(validate_message))", "docstring": "Asserts that message is same as parsed expected_message_ascii.\n\nCreates another prototype of message, reads the ascii message into it and\nthen compares them using self._AssertProtoEqual().\n\nArgs:\nexpected_message_maybe_ascii: proto message in original or ascii form.\nvalidate_message: the message to validate.\nmsg: Optional message to report on failure.\nrelative_tolerance: float. The allowable difference between the two values\nbeing compared is determined by multiplying the relative tolerance by\nthe maximum of the two values. If this is not provided, then all floats\nare compared using string comparison.", "source": "github-repos"}
{"code": "def get(self, url, headers=None, params=None):\n    merged_headers = self._merge_headers(headers)\n    if ('Accept' not in merged_headers):\n        merged_headers['Accept'] = MEDIA_TYPE_TAXII_V20\n    accept = merged_headers['Accept']\n    resp = self.session.get(url, headers=merged_headers, params=params)\n    resp.raise_for_status()\n    content_type = resp.headers['Content-Type']\n    if (not self.valid_content_type(content_type=content_type, accept=accept)):\n        msg = \"Unexpected Response. Got Content-Type: '{}' for Accept: '{}'\"\n        raise TAXIIServiceException(msg.format(content_type, accept))\n    return _to_json(resp)", "docstring": "Perform an HTTP GET, using the saved requests.Session and auth info.\nIf \"Accept\" isn't one of the given headers, a default TAXII mime type is\nused.  Regardless, the response type is checked against the accept\nheader value, and an exception is raised if they don't match.\n\nArgs:\nurl (str): URL to retrieve\nheaders (dict): Any other headers to be added to the request.\nparams: dictionary or bytes to be sent in the query string for the\nrequest. (optional)", "source": "codesearchnet"}
{"code": "def filter_by(cls, **kwargs):\n    limit = kwargs.pop('limit', None)\n    reverse = kwargs.pop('reverse', False)\n    q = cls.query.filter_by(**kwargs)\n    if reverse:\n        q = q.order_by(cls.id.desc())\n    if limit:\n        q = q.limit(limit)\n    return q", "docstring": "Same as SQLAlchemy's filter_by. Additionally this accepts\ntwo special keyword arguments `limit` and `reverse` for limiting\nthe results and reversing the order respectively.\n\nArgs:\n\n**kwargs: filter parameters\n\nExamples:\n\n>>> user = User.filter_by(email=\"new@x.com\")\n\n>>> shipments = Shipment.filter_by(country=\"India\", limit=3, reverse=True)", "source": "codesearchnet"}
{"code": "def get_loggable_url(url):\n    \n    loggable_url = url or \"\"\n    for secret_string in (\"bewit=\", \"AWSAccessKeyId=\", \"access_token=\"):\n        parts = loggable_url.split(secret_string)\n        loggable_url = parts[0]\n    if loggable_url != url:\n        loggable_url = \"{}<snip>\".format(loggable_url)\n    return loggable_url", "docstring": "Strip out secrets from taskcluster urls.\n\nArgs:\nurl (str): the url to strip\n\nReturns:\nstr: the loggable url", "source": "juraj-google-style"}
{"code": "def get_entry_by_material_id(self, material_id, compatible_only=True, inc_structure=None, property_data=None, conventional_unit_cell=False):\n    data = self.get_entries(material_id, compatible_only=compatible_only, inc_structure=inc_structure, property_data=property_data, conventional_unit_cell=conventional_unit_cell)\n    return data[0]", "docstring": "Get a ComputedEntry corresponding to a material_id.\n\nArgs:\nmaterial_id (str): Materials Project material_id (a string,\ne.g., mp-1234).\ncompatible_only (bool): Whether to return only \"compatible\"\nentries. Compatible entries are entries that have been\nprocessed using the MaterialsProjectCompatibility class,\nwhich performs adjustments to allow mixing of GGA and GGA+U\ncalculations for more accurate phase diagrams and reaction\nenergies.\ninc_structure (str): If None, entries returned are\nComputedEntries. If inc_structure=\"final\",\nComputedStructureEntries with final structures are returned.\nOtherwise, ComputedStructureEntries with initial structures\nare returned.\nproperty_data (list): Specify additional properties to include in\nentry.data. If None, no data. Should be a subset of\nsupported_properties.\nconventional_unit_cell (bool): Whether to get the standard\nconventional unit cell\n\nReturns:\nComputedEntry or ComputedStructureEntry object.", "source": "codesearchnet"}
{"code": "def update(self, id=None, new_data={}, **kwargs):\n        \n\n        if id is None:\n            path = self.path\n        else:\n            path = '%s/%s' % (self.path, id)\n\n        self._check_missing_update_attrs(new_data)\n        files = {}\n\n        \n        types = getattr(self, '_types', {})\n        if types:\n            \n            new_data = new_data.copy()\n            for attr_name, type_cls in types.items():\n                if attr_name in new_data.keys():\n                    type_obj = type_cls(new_data[attr_name])\n\n                    \n                    \n                    if issubclass(type_cls, g_types.FileAttribute):\n                        k = type_obj.get_file_name(attr_name)\n                        files[attr_name] = (k, new_data.pop(attr_name))\n                    else:\n                        new_data[attr_name] = type_obj.get_for_api()\n\n        http_method = self._get_update_method()\n        return http_method(path, post_data=new_data, files=files, **kwargs)", "docstring": "Update an object on the server.\n\nArgs:\nid: ID of the object to update (can be None if not required)\nnew_data: the update data for the object\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nReturns:\ndict: The new object data (*not* a RESTObject)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabUpdateError: If the server cannot perform the request", "source": "juraj-google-style"}
{"code": "def _StopAnalysisProcesses(self, abort=False):\n    logger.debug('Stopping analysis processes.')\n    self._StopMonitoringProcesses()\n    if abort:\n        self._AbortTerminate()\n    if (not self._use_zeromq):\n        logger.debug('Emptying queues.')\n        for event_queue in self._event_queues.values():\n            event_queue.Empty()\n    for event_queue in self._event_queues.values():\n        event_queue.PushItem(plaso_queue.QueueAbort(), block=False)\n    self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)\n    for event_queue in self._event_queues.values():\n        event_queue.Close(abort=abort)\n    if abort:\n        self._AbortKill()\n    else:\n        self._AbortTerminate()\n        self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT)\n        for event_queue in self._event_queues.values():\n            event_queue.Close(abort=True)", "docstring": "Stops the analysis processes.\n\nArgs:\nabort (bool): True to indicated the stop is issued on abort.", "source": "codesearchnet"}
{"code": "def _get_scatter_keys(client, query, num_splits):\n    scatter_point_query = _create_scatter_query(query, num_splits)\n    client_query = scatter_point_query._to_client_query(client)\n    client_key_splits = [client_entity.key for client_entity in client_query.fetch(client=client, limit=scatter_point_query.limit)]\n    client_key_splits.sort(key=client_key_sort_key)\n    return client_key_splits", "docstring": "Gets a list of split keys given a desired number of splits.\n\nThis list will contain multiple split keys for each split. Only a single split\nkey will be chosen as the split point, however providing multiple keys allows\nfor more uniform sharding.\n\nArgs:\nclient: the client to datastore containing the data.\nquery: the user query.\nnum_splits: the number of desired splits.\n\nReturns:\nA list of scatter keys returned by Datastore.", "source": "github-repos"}
{"code": "def _FormatDataToken(self, token_data):\n    \n    format_string = bsmtoken.BSM_TOKEN_DATA_PRINT.get(\n        token_data.data_format, 'UNKNOWN')\n\n    if token_data.data_format == 4:\n      data = bytes(bytearray(token_data.data)).split(b'\\x00')[0]\n      data = data.decode('utf-8')\n    else:\n      data = ''.join(['{0:02x}'.format(byte) for byte in token_data.data])\n    return {\n        'format': format_string,\n        'data': data}", "docstring": "Formats a data token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_data): AUT_DATA token data.\n\nReturns:\ndict[str, str]: token values.", "source": "juraj-google-style"}
{"code": "def StringEscape(self, string, match, **unused_kwargs):\n    \n    if match.group(1) in '\\'\"rnbt':\n      self.string += string.decode('unicode_escape')\n    else:\n      self.string += string", "docstring": "Escape backslashes found inside a string quote.\n\nBackslashes followed by anything other than ['\"rnbt] will just be included\nin the string.\n\nArgs:\nstring: The string that matched.\nmatch: the match object (instance of re.MatchObject).\nWhere match.group(1) contains the escaped code.", "source": "juraj-google-style"}
{"code": "def rules(cls, attr=None):\n        \n        try:\n            if attr is None:\n                attr = cls._rules_attr()\n            return getattr(cls, attr).keys()\n        except TypeError:\n            return ()", "docstring": "Iterable of rule names used by :meth:`create`\n\nArgs:\nattr (None or str): Name of the class attribute to which to get the\nnames. If None, one of ``'_rules'``, ``'_binary_rules'`` is\nautomatically chosen", "source": "juraj-google-style"}
{"code": "def changes(self, **kwargs):\n    path = self._get_id_path('changes')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the changes for a specific movie id.\n\nChanges are grouped by key, and ordered by date in descending order.\nBy default, only the last 24 hours of changes are returned. The\nmaximum number of days that can be returned in a single request is 14.\nThe language is present on fields that are translatable.\n\nArgs:\nstart_date: (optional) Expected format is 'YYYY-MM-DD'.\nend_date: (optional) Expected format is 'YYYY-MM-DD'.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def close_session(self, commit=True):\n        \n        if self._session is not None:\n            if commit:\n                self._session.commit()\n            self._session.close()\n            self._session = None", "docstring": "Commit and close the DB session associated with this task (no error is raised if None is open)\n\nArgs:\ncommit (bool): commit session before closing (default=True)", "source": "juraj-google-style"}
{"code": "def has_completed_result_block_format(self, error_message):\n    extras = self._get_extras()\n    if _InstrumentationResultSignals.PASS in extras:\n        return True\n    elif _InstrumentationResultSignals.FAIL in extras:\n        return False\n    else:\n        raise signals.TestError(details=error_message, extras=extras)", "docstring": "Checks the instrumentation result block for a signal indicating\nnormal completion.\n\nArgs:\nerror_message: string, the error message to give if the\ninstrumentation run did not complete successfully.-\n\nReturns:\nA boolean indicating whether or not the instrumentation run passed\nor failed overall.\n\nRaises:\nsignals.TestError: Error raised if the instrumentation run did not\ncomplete because of a crash or some other issue.", "source": "github-repos"}
{"code": "def get_el(el):\n        \n        tag_name = el.elt.tagName.lower()\n        if tag_name in {\"input\", \"textarea\", \"select\"}:\n            return el.value\n        else:\n            raise ValueError(\n                \"Getter for %s (%s) not implemented!\" % (tag_name, el.id)\n            )", "docstring": "Get value of given `el` tag element.\n\nAutomatically choose proper method to set the `value` based on the type\nof the `el`.\n\nArgs:\nel (obj): Element reference to the input you want to convert to\ntypeahead.\n\nReturns:\nstr: Value of the object.", "source": "juraj-google-style"}
{"code": "def inner_horizontal_border(self):\n    return u'{lm}{lv}{hz}{rv}'.format(lm=(' ' * self.margins.left), lv=self.border_style.outer_vertical_inner_right, rv=self.border_style.outer_vertical_inner_left, hz=self.inner_horizontals())", "docstring": "The complete inner horizontal border section, including the left and right border verticals.\n\nReturns:\nstr: The complete inner horizontal border.", "source": "codesearchnet"}
{"code": "def _ValidateFleetspeakServiceConfig(self, config_path):\n    with open(config_path, 'rb') as f:\n        pool = descriptor_pool.DescriptorPool()\n        pool.AddDescriptor(fs_config_pb2.Config.DESCRIPTOR)\n        parsed_config = text_format.Parse(f.read(), fs_system_pb2.ClientServiceConfig(), descriptor_pool=pool)\n        if (parsed_config.factory != 'Daemon'):\n            raise BuildError('Fleetspeak config does not have the expected factory type.')\n        daemon_cfg = fs_config_pb2.Config()\n        parsed_config.config.Unpack(daemon_cfg)\n        if (not daemon_cfg.argv):\n            raise BuildError('Fleetspeak daemon service config does not specify command line args.')", "docstring": "Validates a Fleetspeak service config.\n\nChecks that the given file is a valid TextFormat representation of\na Fleetspeak service config proto.\n\nArgs:\nconfig_path: Path to the config file.\n\nRaises:\nBuildError: If the config is not valid.", "source": "codesearchnet"}
{"code": "def restore(self):\n    self.read_checkpoint_manager.restore_or_initialize()", "docstring": "Restore the training state from the backed up checkpoint file.\n\nReturns:\nTrue if the training state is successfully restored. False if the training\nstate doesn't need to be restored, or error occurred so it can't.", "source": "github-repos"}
{"code": "def write_if_allowed(filename: str,\n                     content: str,\n                     overwrite: bool = False,\n                     mock: bool = False) -> None:\n    \n    \n    if not overwrite and exists(filename):\n        fail(\"File exists, not overwriting: {!r}\".format(filename))\n\n    \n    directory = dirname(filename)\n    if not mock:\n        mkdir_p(directory)\n\n    \n    log.info(\"Writing to {!r}\", filename)\n    if mock:\n        log.warning(\"Skipping writes as in mock mode\")\n    else:\n        with open(filename, \"wt\") as outfile:\n            outfile.write(content)", "docstring": "Writes the contents to a file, if permitted.\n\nArgs:\nfilename: filename to write\ncontent: contents to write\noverwrite: permit overwrites?\nmock: pretend to write, but don't\n\nRaises:\nRuntimeError: if file exists but overwriting not permitted", "source": "juraj-google-style"}
{"code": "def save_scatter_table(self, fn, description=\"\"):\n        \n        data = {\n           \"description\": description,\n           \"time\": datetime.now(),\n           \"psd_scatter\": (self.num_points, self.D_max, self._psd_D, \n                self._S_table, self._Z_table, self._angular_table, \n                self._m_table, self.geometries),\n           \"version\": tmatrix_aux.VERSION\n           }\n        pickle.dump(data, file(fn, 'w'), pickle.HIGHEST_PROTOCOL)", "docstring": "Save the scattering lookup tables.\n\nSave the state of the scattering lookup tables to a file.\nThis can be loaded later with load_scatter_table.\n\nOther variables will not be saved, but this does not matter because\nthe results of the computations are based only on the contents\nof the table.\n\nArgs:\nfn: The name of the scattering table file.\ndescription (optional): A description of the table.", "source": "juraj-google-style"}
{"code": "def get_charge_transfer(self, atom_index):\n        \n        if self.potcar is None:\n            raise ValueError(\"POTCAR must be supplied in order to calculate \"\n                             \"charge transfer!\")\n        potcar_indices = []\n        for i, v in enumerate(self.natoms):\n            potcar_indices += [i] * v\n        nelect = self.potcar[potcar_indices[atom_index]].nelectrons\n        return self.data[atom_index][\"charge\"] - nelect", "docstring": "Returns the charge transferred for a particular atom. Requires POTCAR\nto be supplied.\n\nArgs:\natom_index:\nIndex of atom.\n\nReturns:\nCharge transfer associated with atom from the Bader analysis.\nGiven by final charge on atom - nelectrons in POTCAR for\nassociated atom.", "source": "juraj-google-style"}
{"code": "def text_colour_for_hex(hexx, percent=50, dark='\n    \n    return light if hex_is_dark(hexx, percent=percent) else dark", "docstring": "Function to decide what colour to use for a given hex colour.\n\nArgs:\nhexx (str): A hexadecimal colour, starting with '#'.\n\nReturns:\nbool: The colour's brightness is less than the given percent.", "source": "juraj-google-style"}
{"code": "def type_spec_from_value(element, use_fallback=True):\n    spec = type_spec._type_spec_from_value(element)\n    if spec is not None:\n        return spec\n    if isinstance(element, collections_abc.Mapping):\n        if isinstance(element, collections.defaultdict):\n            ctor = lambda items: type(element)(element.default_factory, items)\n        else:\n            ctor = type(element)\n        return ctor([(k, type_spec_from_value(v)) for k, v in element.items()])\n    if isinstance(element, tuple):\n        if hasattr(element, '_fields') and isinstance(element._fields, collections_abc.Sequence) and all((isinstance(f, str) for f in element._fields)):\n            if isinstance(element, wrapt.ObjectProxy):\n                element_type = type(element.__wrapped__)\n            else:\n                element_type = type(element)\n            return element_type(*[type_spec_from_value(v) for v in element])\n        return tuple([type_spec_from_value(v) for v in element])\n    if hasattr(element.__class__, '__attrs_attrs__'):\n        attrs = getattr(element.__class__, '__attrs_attrs__')\n        return type(element)(*[type_spec_from_value(getattr(element, a.name)) for a in attrs])\n    if isinstance(element, CustomNestProtocol):\n        metadata, children = element.__tf_flatten__()\n        return element.__tf_unflatten__(metadata, type_spec_from_value(children))\n    if use_fallback:\n        try:\n            tensor = ops.convert_to_tensor(element)\n            spec = type_spec_from_value(tensor)\n            if spec is not None:\n                return spec\n        except (ValueError, TypeError) as e:\n            logging.vlog(3, 'Failed to convert %r to tensor: %s' % (type(element).__name__, e))\n    raise TypeError('Could not build a `TypeSpec` for {} with type {}'.format(element, type(element).__name__))", "docstring": "Creates a type specification for the given value.\n\nArgs:\nelement: The element to create the type specification for.\nuse_fallback: Whether to fall back to converting the element to a tensor\nin order to compute its `TypeSpec`.\n\nReturns:\nA nested structure of `TypeSpec`s that represents the type specification\nof `element`.\n\nRaises:\nTypeError: If a `TypeSpec` cannot be built for `element`, because its type\nis not supported.", "source": "github-repos"}
{"code": "def _trigger(self, obj, old, value, hint=None, setter=None):\n    if hasattr(obj, 'trigger'):\n        obj.trigger(self.name, old, value, hint, setter)", "docstring": "Unconditionally send a change event notification for the property.\n\nArgs:\nobj (HasProps)\nThe object the property is being set on.\n\nold (obj) :\nThe previous value of the property\n\nnew (obj) :\nThe new value of the property\n\nhint (event hint or None, optional)\nAn optional update event hint, e.g. ``ColumnStreamedEvent``\n(default: None)\n\nUpdate event hints are usually used at times when better\nupdate performance can be obtained by special-casing in\nsome way (e.g. streaming or patching column data sources)\n\nsetter (ClientSession or ServerSession or None, optional) :\nThis is used to prevent \"boomerang\" updates to Bokeh apps.\n(default: None)\n\nIn the context of a Bokeh server application, incoming updates\nto properties will be annotated with the session that is\ndoing the updating. This value is propagated through any\nsubsequent change notifications that the update triggers.\nThe session can compare the event setter to itself, and\nsuppress any updates that originate from itself.\n\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def sqlInsert(def_buf, raw_a, raw_b):\n        \n        count = 0\n        qry_str = \"INSERT INTO  Meter_Reads ( \\n\\t\"\n        for fld in def_buf:\n            if count > 0:\n                qry_str += \", \\n\\t\"\n            qry_str = qry_str + fld\n            count += 1\n        qry_str += (\",\\n\\t\" + Field.Time_Stamp + \", \\n\\t\" +\n                    \"Raw_A,\\n\\t\" +\n                    \"Raw_B\\n) \\n\" +\n                    \"VALUES( \\n\\t\")\n        count = 0\n        for fld in def_buf:\n            if count > 0:\n                qry_str += \", \\n\\t\"\n            fld_type = def_buf[fld][MeterData.TypeValue]\n            fld_str_content = def_buf[fld][MeterData.StringValue]\n            delim = \"\"\n            if (fld_type == FieldType.Hex) or \\\n                    (fld_type == FieldType.String) or \\\n                    (fld_type == FieldType.PowerFactor):\n                delim = \"'\"\n            qry_str = qry_str + delim + fld_str_content + delim\n            count += 1\n        time_val = int(time.time() * 1000)\n        qry_str = (qry_str + \",\\n\\t\" + str(time_val) + \",\\n\\t'\" +\n                   binascii.b2a_hex(raw_a) + \"'\" + \",\\n\\t'\" +\n                   binascii.b2a_hex(raw_b) + \"'\\n);\")\n        ekm_log(qry_str, 4)\n        return qry_str", "docstring": "Reasonably portable SQL INSERT for from combined read buffer.\nArgs:\ndef_buf (SerialBlock): Database only serial block of all fields.\nraw_a (str): Raw A read as hex string.\nraw_b (str): Raw B read (if exists, otherwise empty) as hex string.\n\nReturns:\nstr: SQL insert for passed read buffer", "source": "juraj-google-style"}
{"code": "def modify_dict(data, key, value, create_if_missing=False):\n    \n    data_copy = copy.deepcopy(data)\n    key_copy = copy.deepcopy(key)\n\n    delver = data_copy\n    current_key = key_copy\n    last_key = \"Root\"\n\n    \n    while len(current_key) > 1:\n        if current_key[0] not in delver:\n            raise KeyError(\"ModifyJsonStep Key Couldn't find Subkey {} in {}.\".format(current_key[0], last_key))\n\n        if len(current_key) > 2 and not isinstance(delver[current_key[0]], dict):\n            raise ValueError(\"ModifyJsonStep The Value of {} is a {}, not a dict\".format(current_key[0], type(delver[current_key[0]])))\n\n        last_key = current_key[0]\n        delver = delver[current_key[0]]\n        current_key.pop(0)\n\n    if current_key[0] not in delver and not create_if_missing:\n        raise KeyError(\"ModifyJsonStep Key Couldn't find Subkey {} in {}.\".format(current_key[0], last_key))\n\n    delver[current_key[0]] = value\n\n    return data_copy", "docstring": "Change (or add) a json key/value pair.\n\nArgs:\ndata (dict): The original data. This will not be modified.\nkey (list): A list of keys and subkeys specifing the key to change (list can be one)\nvalue (str): The value to change for the above key\ncreate_if_missing (bool): Set to true to create key if the last key in the list is not found\nOtherwise the function will throw a KeyError\nReturns:\n(dict): the final modified dict", "source": "juraj-google-style"}
{"code": "def plot_wigner_seitz(lattice, ax=None, **kwargs):\n    (ax, fig, plt) = get_ax3d_fig_plt(ax)\n    if ('color' not in kwargs):\n        kwargs['color'] = 'k'\n    if ('linewidth' not in kwargs):\n        kwargs['linewidth'] = 1\n    bz = lattice.get_wigner_seitz_cell()\n    (ax, fig, plt) = get_ax3d_fig_plt(ax)\n    for iface in range(len(bz)):\n        for line in itertools.combinations(bz[iface], 2):\n            for jface in range(len(bz)):\n                if ((iface < jface) and any((np.all((line[0] == x)) for x in bz[jface])) and any((np.all((line[1] == x)) for x in bz[jface]))):\n                    ax.plot(*zip(line[0], line[1]), **kwargs)\n    return (fig, ax)", "docstring": "Adds the skeleton of the Wigner-Seitz cell of the lattice to a matplotlib Axes\n\nArgs:\nlattice: Lattice object\nax: matplotlib :class:`Axes` or None if a new figure should be created.\nkwargs: kwargs passed to the matplotlib function 'plot'. Color defaults to black\nand linewidth to 1.\n\nReturns:\nmatplotlib figure and matplotlib ax", "source": "codesearchnet"}
{"code": "def is_unlikely_link(text):\n    if ((text[:1] in ',;+:') or (text[(- 1):] in '.,;+:')):\n        return True\n    if re.search('[\\\\\\\\$()\\'\"[\\\\]{}|<>`]', text):\n        return True\n    if ((text[:1] == '.') and (not text.startswith('./')) and (not text.startswith('../'))):\n        return True\n    if (text in ('/', '\n        return True\n    if (('\n        return True\n    if (text in MIMETYPES):\n        return True\n    (tag_1, dummy, tag_2) = text.partition('.')\n    if ((tag_1 in HTML_TAGS) and (tag_2 != 'html')):\n        return True\n    if FIRST_PART_TLD_PATTERN.match(text):\n        return True", "docstring": "Return whether the text is likely to cause false positives.\n\nThis function assumes that leading/trailing whitespace has already been\nremoved.\n\nReturns:\nbool", "source": "codesearchnet"}
{"code": "def inference(self, limit=1000):\n        \n        route_list = []\n        memory_list = []\n        state_key = self.__start_point_tuple\n        x, y = state_key\n        end_x, end_y = self.__end_point_tuple\n        for i in range(limit):\n            q_df = self.q_df[self.q_df.state_key == state_key]\n            if len(memory_list):\n                q_df = q_df[~q_df.action_key.isin(memory_list)]\n            if q_df.shape[0] > 1:\n                q_df = q_df.sort_values(by=[\"q_value\"], ascending=False)\n                action_key = q_df.iloc[0, :][\"action_key\"]\n                q_value = q_df.iloc[0, :][\"q_value\"]\n            elif q_df.shape[0] == 1:\n                action_key = q_df.action_key.values[0]\n                q_value = q_df.q_value.values[0]\n            else:\n                action_key_list = self.extract_possible_actions(state_key)\n                action_key_list = [v for v in action_key_list if v not in memory_list]\n                q_value = 0.0\n                if len(action_key_list):\n                    action_key = random.choice(action_key_list)\n                    _q_df = q_df[q_df.action_key == action_key]\n                    if _q_df.shape[0]:\n                        q_value = _q_df.q_value.values[0]\n\n            state_key = self.update_state(\n                state_key=state_key,\n                action_key=action_key\n            )\n            x, y = state_key\n            route_list.append((x, y, q_value))\n            memory_list.append(state_key)\n            if self.check_the_end_flag(state_key) is True:\n                break\n\n        return route_list", "docstring": "Inference route.\n\nArgs:\nlimit:    the number of inferencing.\n\nReturns:\n[(x_1, y_1), (x_2, y_2), ...]", "source": "juraj-google-style"}
{"code": "def import_tf_tensor(self, x, tf_x):\n    return self.LaidOutTensor(self.make_slices(tf_x, x.shape))", "docstring": "Import a tf.Tensor, producing a LaidOutTensor.\n\nArgs:\nx: a Tensor\ntf_x: a tf.Tensor\nReturns:\na LaidOutTensor", "source": "codesearchnet"}
{"code": "def create(self, key, value):\n    data = None\n    if (key is not None):\n        key = key.strip()\n        self.tcex.log.debug(u'create variable {}'.format(key))\n        parsed_key = self.parse_variable(key.strip())\n        variable_type = parsed_key['type']\n        if (variable_type in self.read_data_types):\n            data = self.create_data_types[variable_type](key, value)\n        else:\n            data = self.create_raw(key, value)\n    return data", "docstring": "Create method of CRUD operation for working with KeyValue DB.\n\nThis method will automatically determine the variable type and\ncall the appropriate method to write the data.  If a non standard\ntype is provided the data will be written as RAW data.\n\nArgs:\nkey (string): The variable to write to the DB.\nvalue (any): The data to write to the DB.\n\nReturns:\n(string): Result string of DB write.", "source": "codesearchnet"}
{"code": "def load_file_system_library(library_filename):\n    py_tf.TF_LoadLibrary(library_filename)", "docstring": "Loads a TensorFlow plugin, containing file system implementation.\n\nPass `library_filename` to a platform-specific mechanism for dynamically\nloading a library. The rules for determining the exact location of the\nlibrary are platform-specific and are not documented here.\n\nArgs:\nlibrary_filename: Path to the plugin.\nRelative or absolute filesystem path to a dynamic library file.\n\nReturns:\nNone.\n\nRaises:\nRuntimeError: when unable to load the library.", "source": "github-repos"}
{"code": "def get_filetypes_info(editor_quote=\"`\", flag_leaf=True):\n    \n    NONE_REPL = \"\"\n    import f311\n    data = []  \n\n    for attr in f311.classes_file(flag_leaf):\n        description = a99.get_obj_doc0(attr)\n\n        def_ = NONE_REPL if attr.default_filename is None else attr.default_filename\n        ee = attr.editors\n        if ee is None:\n            ee = NONE_REPL\n        else:\n            \n            ee = \", \".join([\"{0}{1}{0}\".format(editor_quote, x, editor_quote) for x in ee])\n\n        data.append({\"description\": description, \"default_filename\": def_, \"classname\": attr.__name__,\n                     \"editors\": ee, \"class\": attr, \"txtbin\": \"text\" if attr.flag_txt else \"binary\"})\n\n    data.sort(key=lambda x: x[\"description\"])\n\n    return data", "docstring": "Reports available data types\n\nArgs:\neditor_quote: character to enclose the name of the editor script between.\nflag_leaf: see tabulate_filetypes_rest()\n\nReturns:\nlist: list of FileTypeInfo", "source": "juraj-google-style"}
{"code": "def _sia(cache_key, subsystem):\n    log.info('Calculating big-phi data for %s...', subsystem)\n    if (not subsystem):\n        log.info('Subsystem %s is empty; returning null SIA immediately.', subsystem)\n        return _null_sia(subsystem)\n    if (not connectivity.is_strong(subsystem.cm, subsystem.node_indices)):\n        log.info('%s is not strongly connected; returning null SIA immediately.', subsystem)\n        return _null_sia(subsystem)\n    if (len(subsystem.cut_indices) == 1):\n        if (not subsystem.cm[subsystem.node_indices][subsystem.node_indices]):\n            log.info('Single micro nodes %s without selfloops cannot have phi; returning null SIA immediately.', subsystem)\n            return _null_sia(subsystem)\n        elif (not config.SINGLE_MICRO_NODES_WITH_SELFLOOPS_HAVE_PHI):\n            log.info('Single micro nodes %s with selfloops cannot have phi; returning null SIA immediately.', subsystem)\n            return _null_sia(subsystem)\n    log.debug('Finding unpartitioned CauseEffectStructure...')\n    unpartitioned_ces = _ces(subsystem)\n    if (not unpartitioned_ces):\n        log.info('Empty unpartitioned CauseEffectStructure; returning null SIA immediately.')\n        return _null_sia(subsystem)\n    log.debug('Found unpartitioned CauseEffectStructure.')\n    if (len(subsystem.cut_indices) == 1):\n        cuts = [Cut(subsystem.cut_indices, subsystem.cut_indices, subsystem.cut_node_labels)]\n    else:\n        cuts = sia_bipartitions(subsystem.cut_indices, subsystem.cut_node_labels)\n    engine = ComputeSystemIrreducibility(cuts, subsystem, unpartitioned_ces)\n    result = engine.run(config.PARALLEL_CUT_EVALUATION)\n    if config.CLEAR_SUBSYSTEM_CACHES_AFTER_COMPUTING_SIA:\n        log.debug('Clearing subsystem caches.')\n        subsystem.clear_caches()\n    log.info('Finished calculating big-phi data for %s.', subsystem)\n    return result", "docstring": "Return the minimal information partition of a subsystem.\n\nArgs:\nsubsystem (Subsystem): The candidate set of nodes.\n\nReturns:\nSystemIrreducibilityAnalysis: A nested structure containing all the\ndata from the intermediate calculations. The top level contains the\nbasic irreducibility information for the given subsystem.", "source": "codesearchnet"}
{"code": "def format_var_name(variable, var_list):\n        \n        z_index = None\n        if variable in var_list:\n            var_name = variable\n        elif variable.ljust(6, \"_\") in var_list:\n            var_name = variable.ljust(6, \"_\")\n        elif any([variable in v_sub.split(\"_\") for v_sub in var_list]):\n            var_name = var_list[[variable in v_sub.split(\"_\") for v_sub in var_list].index(True)]\n            z_index = var_name.split(\"_\").index(variable)\n        else:\n            raise KeyError(\"{0} not found in {1}\".format(variable, var_list))\n        return var_name, z_index", "docstring": "Searches var list for variable name, checks other variable name format options.\n\nArgs:\nvariable (str): Variable being loaded\nvar_list (list): List of variables in file.\n\nReturns:\nName of variable in file containing relevant data, and index of variable z-level if multiple variables\ncontained in same array in file.", "source": "juraj-google-style"}
{"code": "def __init__(self, file_path, expected_checksum, sleep_secs=None):\n    if sleep_secs is not None:\n        if isinstance(sleep_secs, int):\n            self.sleep_secs = sleep_secs\n        else:\n            raise ValueError('Sleep seconds, if received, must be int. But received: %r, %s' % (sleep_secs, type(sleep_secs)))\n    else:\n        self.sleep_secs = None\n    self.file_path = file_path\n    self.expected_checksum = expected_checksum", "docstring": "Initialize a FileChecksumMatcher object\n\nArgs:\nfile_path : A string that is the full path of output file. This path\ncan contain globs.\nexpected_checksum : A hash string that is computed from expected\nresult.\nsleep_secs : Number of seconds to wait before verification start.\nExtra time are given to make sure output files are ready on FS.", "source": "github-repos"}
{"code": "def flatten(l):\n    \n    for el in l:\n        \n        if isinstance(el, Iterable) and not isinstance(\n                el, (str, bytes)) and not isinstance(el, dict):\n            yield from flatten(el)\n        else:\n            yield el", "docstring": "Flatten a multi-deminision list and return a iterable\n\nNote that dict and str will not be expanded, instead, they will be kept as a single element.\n\nArgs:\nl (list): The list needs to be flattened\n\nReturns:\nA iterable of flattened list. To have a list instead use ``list(flatten(l))``", "source": "juraj-google-style"}
{"code": "def _get_edge_sentences(G: AnalysisGraph, source: str, target: str) -> List[str]:\n    return chain.from_iterable([[repr(e.text) for e in s.evidence] for s in G.edges[(source, target)]['InfluenceStatements']])", "docstring": "Return the sentences that led to the construction of a specified edge.\n\nArgs:\nG\nsource: The source of the edge.\ntarget: The target of the edge.", "source": "codesearchnet"}
{"code": "def console_get_height(con: tcod.console.Console) -> int:\n    \n    return int(lib.TCOD_console_get_height(_console(con)))", "docstring": "Return the height of a console.\n\nArgs:\ncon (Console): Any Console instance.\n\nReturns:\nint: The height of a Console.\n\n.. deprecated:: 2.0\nUse `Console.height` instead.", "source": "juraj-google-style"}
{"code": "def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, decoder_input_ids: np.ndarray | tf.Tensor | None=None, decoder_attention_mask: np.ndarray | tf.Tensor | None=None, decoder_position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, decoder_head_mask: np.ndarray | tf.Tensor | None=None, cross_attn_head_mask: np.ndarray | tf.Tensor | None=None, encoder_outputs: Optional[TFBaseModelOutput]=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: bool=False) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]:\n    if labels is not None:\n        labels = tf.where(labels == self.config.pad_token_id, tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), labels)\n        use_cache = False\n        if decoder_input_ids is None and decoder_inputs_embeds is None:\n            decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)\n    outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n    lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)\n    lm_logits = self.bias_layer(lm_logits)\n    masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)\n    if not return_dict:\n        output = (lm_logits,) + outputs[1:]\n        return (masked_lm_loss,) + output if masked_lm_loss is not None else output\n    return TFSeq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)", "docstring": "labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):\nLabels for computing the masked language modeling loss. Indices should either be in `[0, ...,\nconfig.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nReturns:", "source": "github-repos"}
{"code": "def future(self, request_iterator, timeout=None, metadata=None, credentials=None):\n    return _utils.wrap_future_call(self._inner.future(_utils.WrappedAsyncIterator(request_iterator, self._loop), timeout, metadata, credentials), self._loop, self._executor)", "docstring": "Asynchronously invokes the underlying RPC on the client.\n\nArgs:\nrequest_iterator: An ASYNC iterator that yields request values for the RPC.\ntimeout: An optional duration of time in seconds to allow for the RPC.\nIf None, the timeout is considered infinite.\nmetadata: Optional :term:`metadata` to be transmitted to the\nservice-side of the RPC.\ncredentials: An optional CallCredentials for the RPC.\n\nReturns:\nAn object that is both a Call for the RPC and a Future. In the event of\nRPC completion, the return Call-Future's result value will be the\nresponse message of the RPC. Should the event terminate with non-OK\nstatus, the returned Call-Future's exception value will be an RpcError.", "source": "codesearchnet"}
{"code": "def __init__(self, target, converter_target_spec=None, converter_allow_custom_ops=None, raise_exception=False):\n    functools.update_wrapper(self, target)\n    self._func = target\n    self._obj_func = None\n    self._verified = False\n    self._log_messages = []\n    self._raise_exception = raise_exception\n    self._converter_target_spec = converter_target_spec\n    self._converter_allow_custom_ops = converter_allow_custom_ops", "docstring": "Initialize the decorator object.\n\nHere is the description of the object variables.\n- _func     : decorated function.\n- _obj_func : for class object, we need to use this object to provide `self`\ninstance as 1 first argument.\n- _verified : whether the compatibility is checked or not.\n\nArgs:\ntarget: decorated function.\nconverter_target_spec : target_spec of TFLite converter parameter.\nconverter_allow_custom_ops : allow_custom_ops of TFLite converter\nparameter.\nraise_exception : to raise an exception on compatibility issues.\nUser need to use get_compatibility_log() to check details.", "source": "github-repos"}
{"code": "def get_and_check_project(valid_vcs_rules, source_url):\n    \n    project_path = match_url_regex(valid_vcs_rules, source_url, match_url_path_callback)\n    if project_path is None:\n        raise ValueError(\"Unknown repo for source url {}!\".format(source_url))\n    project = project_path.split('/')[-1]\n    return project", "docstring": "Given vcs rules and a source_url, return the project.\n\nThe project is in the path, but is the repo name.\n`releases/mozilla-beta` is the path; `mozilla-beta` is the project.\n\nArgs:\nvalid_vcs_rules (tuple of frozendicts): the valid vcs rules, per\n``match_url_regex``.\nsource_url (str): the source url to find the project for.\n\nRaises:\nRuntimeError: on failure to find the project.\n\nReturns:\nstr: the project.", "source": "juraj-google-style"}
{"code": "def format_as_single_line(self, prefix=None, divider=' | ', enabled_item_attrs=None, disabled_item_attrs=None):\n    if enabled_item_attrs is not None and (not isinstance(enabled_item_attrs, list)):\n        enabled_item_attrs = [enabled_item_attrs]\n    if disabled_item_attrs is not None and (not isinstance(disabled_item_attrs, list)):\n        disabled_item_attrs = [disabled_item_attrs]\n    menu_line = prefix if prefix is not None else ''\n    attr_segs = []\n    for item in self._items:\n        menu_line += item.caption\n        item_name_begin = len(menu_line) - len(item.caption)\n        if item.is_enabled():\n            final_attrs = [item]\n            if enabled_item_attrs:\n                final_attrs.extend(enabled_item_attrs)\n            attr_segs.append((item_name_begin, len(menu_line), final_attrs))\n        elif disabled_item_attrs:\n            attr_segs.append((item_name_begin, len(menu_line), disabled_item_attrs))\n        menu_line += divider\n    return RichTextLines(menu_line, font_attr_segs={0: attr_segs})", "docstring": "Format the menu as a single-line RichTextLines object.\n\nArgs:\nprefix: (str) String added to the beginning of the line.\ndivider: (str) The dividing string between the menu items.\nenabled_item_attrs: (list or str) Attributes applied to each enabled\nmenu item, e.g., [\"bold\", \"underline\"].\ndisabled_item_attrs: (list or str) Attributes applied to each\ndisabled menu item, e.g., [\"red\"].\n\nReturns:\n(RichTextLines) A single-line output representing the menu, with\nfont_attr_segs marking the individual menu items.", "source": "github-repos"}
{"code": "def node_device(self, node_name):\n    if not self._debug_graphs:\n        raise LookupError('Node devices are not loaded from partition graphs yet.')\n    if node_name not in self._node_devices:\n        raise ValueError(\"Node '%s' does not exist in partition graphs.\" % node_name)\n    output = list(self._node_devices[node_name])\n    return output[0] if len(output) == 1 else output", "docstring": "Get the names of the devices that has nodes of the specified name.\n\nArgs:\nnode_name: (`str`) name of the node.\n\nReturns:\n(`str` or `list` of `str`) name of the device(s) on which the node of the\ngiven name is found. Returns a `str` if there is only one such device,\notherwise return a `list` of `str`.\n\nRaises:\nLookupError: If node inputs and control inputs have not been loaded\nfrom partition graphs yet.\nValueError: If the node does not exist in partition graphs.", "source": "github-repos"}
{"code": "def dv(self, orb):\n        \n\n        orb = orb.copy(form=\"cartesian\")\n\n        if self.frame == \"QSW\":\n            mat = to_qsw(orb).T\n        elif self.frame == \"TNW\":\n            mat = to_tnw(orb).T\n        else:\n            mat = np.identity(3)\n\n        \n        return mat @ self._dv", "docstring": "Computation of the velocity increment in the reference frame of the orbit\n\nArgs:\norb (Orbit):\nReturn:\nnumpy.array: Velocity increment, length 3", "source": "juraj-google-style"}
{"code": "def __init__(self, weight_shape: Sequence[int]) -> None:\n    self.filters = np.random.uniform(low=-1.0, high=1.0, size=weight_shape)\n    if bias_fn is not None:\n        self.bias = np.random.uniform(low=-1.0, high=1.0, size=weight_shape[-1])", "docstring": "Initializes a MatmulModel.\n\nArgs:\nweight_shape: Shape of the weight tensor.", "source": "github-repos"}
{"code": "def GetAutomountMasterMap(self):\n    master_map = self.GetAutomountMap(location='auto.master')\n    for map_entry in master_map:\n        map_entry.location = os.path.split(map_entry.location)[1]\n        self.log.debug('master map has: %s' % map_entry.location)\n    return master_map", "docstring": "Return the autmount master map from this source.\n\nReturns:\nan instance of automount.AutomountMap", "source": "github-repos"}
{"code": "def Parse(self, text):\n    self.parser.parse(text)", "docstring": "Parse |text| and store the parsed information in self.global_env.\n\nArgs:\ntext: The text to parse.", "source": "github-repos"}
{"code": "def __init__(self, image_processor: AutoImageProcessor, id2label: Mapping[int, str], threshold: float=0.0):\n    self.image_processor = image_processor\n    self.id2label = id2label\n    self.threshold = threshold\n    self.metric = self.get_metric()", "docstring": "Initialize evaluator with image processor, id2label mapping and threshold for filtering predictions.\n\nArgs:\nimage_processor (AutoImageProcessor): Image processor for\n`post_process_instance_segmentation` method.\nid2label (Mapping[int, str]): Mapping from class id to class name.\nthreshold (float): Threshold to filter predicted boxes by confidence. Defaults to 0.0.", "source": "github-repos"}
{"code": "def ParseNolintSuppressions(filename, raw_line, linenum, error):\n  \n  matched = Search(r'\\bNOLINT(NEXTLINE)?\\b(\\([^)]+\\))?', raw_line)\n  if matched:\n    if matched.group(1):\n      suppressed_line = linenum + 1\n    else:\n      suppressed_line = linenum\n    category = matched.group(2)\n    if category in (None, '(*)'):  \n      _error_suppressions.setdefault(None, set()).add(suppressed_line)\n    else:\n      if category.startswith('(') and category.endswith(')'):\n        category = category[1:-1]\n        if category in _ERROR_CATEGORIES:\n          _error_suppressions.setdefault(category, set()).add(suppressed_line)\n        elif category not in _LEGACY_ERROR_CATEGORIES:\n          error(filename, linenum, 'readability/nolint', 5,\n                'Unknown NOLINT error category: %s' % category)", "docstring": "Updates the global list of line error-suppressions.\n\nParses any NOLINT comments on the current line, updating the global\nerror_suppressions store.  Reports an error if the NOLINT comment\nwas malformed.\n\nArgs:\nfilename: str, the name of the input file.\nraw_line: str, the line of input text, with comments.\nlinenum: int, the number of the current line.\nerror: function, an error handler.", "source": "juraj-google-style"}
{"code": "def set_structure(self, structure, reset_camera=True, to_unit_cell=True):\n        \n        self.ren.RemoveAllViewProps()\n\n        has_lattice = hasattr(structure, \"lattice\")\n\n        if has_lattice:\n            s = Structure.from_sites(structure, to_unit_cell=to_unit_cell)\n            s.make_supercell(self.supercell, to_unit_cell=to_unit_cell)\n        else:\n            s = structure\n\n        inc_coords = []\n        for site in s:\n            self.add_site(site)\n            inc_coords.append(site.coords)\n\n        count = 0\n        labels = [\"a\", \"b\", \"c\"]\n        colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]\n\n        if has_lattice:\n            matrix = s.lattice.matrix\n\n        if self.show_unit_cell and has_lattice:\n            \n            self.add_text([0, 0, 0], \"o\")\n            for vec in matrix:\n                self.add_line((0, 0, 0), vec, colors[count])\n                self.add_text(vec, labels[count], colors[count])\n                count += 1\n            for (vec1, vec2) in itertools.permutations(matrix, 2):\n                self.add_line(vec1, vec1 + vec2)\n            for (vec1, vec2, vec3) in itertools.permutations(matrix, 3):\n                self.add_line(vec1 + vec2, vec1 + vec2 + vec3)\n\n        if self.show_bonds or self.show_polyhedron:\n            elements = sorted(s.composition.elements, key=lambda a: a.X)\n            anion = elements[-1]\n\n            def contains_anion(site):\n                for sp in site.species.keys():\n                    if sp.symbol == anion.symbol:\n                        return True\n                return False\n\n            anion_radius = anion.average_ionic_radius\n            for site in s:\n                exclude = False\n                max_radius = 0\n                color = np.array([0, 0, 0])\n                for sp, occu in site.species.items():\n                    if sp.symbol in self.excluded_bonding_elements \\\n                            or sp == anion:\n                        exclude = True\n                        break\n                    max_radius = max(max_radius, sp.average_ionic_radius)\n                    color = color + \\\n                            occu * np.array(self.el_color_mapping.get(sp.symbol,\n                                                                      [0, 0, 0]))\n\n                if not exclude:\n                    max_radius = (1 + self.poly_radii_tol_factor) * \\\n                                 (max_radius + anion_radius)\n                    nn = structure.get_neighbors(site, float(max_radius))\n                    nn_sites = []\n                    for nnsite, dist in nn:\n                        if contains_anion(nnsite):\n                            nn_sites.append(nnsite)\n                            if not in_coord_list(inc_coords, nnsite.coords):\n                                self.add_site(nnsite)\n                    if self.show_bonds:\n                        self.add_bonds(nn_sites, site)\n                    if self.show_polyhedron:\n                        color = [i / 255 for i in color]\n                        self.add_polyhedron(nn_sites, site, color)\n\n        if self.show_help:\n            self.helptxt_actor = vtk.vtkActor2D()\n            self.helptxt_actor.VisibilityOn()\n            self.helptxt_actor.SetMapper(self.helptxt_mapper)\n            self.ren.AddActor(self.helptxt_actor)\n            self.display_help()\n\n        camera = self.ren.GetActiveCamera()\n        if reset_camera:\n            if has_lattice:\n                \n                lengths = s.lattice.abc\n                pos = (matrix[1] + matrix[2]) * 0.5 + \\\n                      matrix[0] * max(lengths) / lengths[0] * 3.5\n                camera.SetPosition(pos)\n                camera.SetViewUp(matrix[2])\n                camera.SetFocalPoint((matrix[0] + matrix[1] + matrix[2]) * 0.5)\n            else:\n                origin = s.center_of_mass\n                max_site = max(\n                    s, key=lambda site: site.distance_from_point(origin))\n                camera.SetPosition(origin + 5 * (max_site.coords - origin))\n                camera.SetFocalPoint(s.center_of_mass)\n\n        self.structure = structure\n        self.title = s.composition.formula", "docstring": "Add a structure to the visualizer.\n\nArgs:\nstructure: structure to visualize\nreset_camera: Set to True to reset the camera to a default\ndetermined based on the structure.\nto_unit_cell: Whether or not to fall back sites into the unit cell.", "source": "juraj-google-style"}
{"code": "def _respond(self, channel, text):\n        \n        result = self._format_message(channel, text)\n        if result is not None:\n            logger.info(\n                'Sending message: %r',\n                truncate(result, max_len=50),\n            )\n        self.socket.send_str(result)", "docstring": "Respond to a message on the current socket.\n\nArgs:\nchannel (:py:class:`str`): The channel to send to.\ntext (:py:class:`str`): The message text to send.", "source": "juraj-google-style"}
{"code": "def start_with(self, request):\n        \n\n        HTTPRequestHelper.patch_with_options(request, self.__options)\n        self.queue.add_request(request)\n\n        self.__crawler_start()", "docstring": "Start the crawler using the given request.\n\nArgs:\nrequest (:class:`nyawc.http.Request`): The startpoint for the crawler.", "source": "juraj-google-style"}
{"code": "def credits(self, **kwargs):\n    path = self._get_id_path('credits')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the cast and crew information for a specific movie id.\n\nArgs:\nappend_to_response: (optional) Comma separated, any movie method.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def create_blob(profile, content):\n    resource = '/blobs'\n    payload = {'content': content}\n    data = api.post_request(profile, resource, payload)\n    return data", "docstring": "Create a blob.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\ncontent\nThe (UTF-8 encoded) content to create in the blob.\n\nReturns:\nA dict with data about the newly created blob.", "source": "codesearchnet"}
{"code": "def _extract_hunt_results(self, output_file_path):\n    \n    \n    collection_paths = []\n    client_ids = set()\n    client_id_to_fqdn = {}\n    hunt_dir = None\n    try:\n      with zipfile.ZipFile(output_file_path) as archive:\n        items = archive.infolist()\n        for f in items:\n\n          if not hunt_dir:\n            hunt_dir = f.filename.split('/')[0]\n\n          \n          \n          if f.filename.split('/')[-1] == 'client_info.yaml':\n            client_id, fqdn = self._get_client_fqdn(archive.read(f))\n            client_id_to_fqdn[client_id] = fqdn\n            continue\n\n          client_id = f.filename.split('/')[1]\n          if client_id.startswith('C.'):\n            if client_id not in client_ids:\n              client_directory = os.path.join(self.output_path,\n                                              hunt_dir, client_id)\n              collection_paths.append((client_id, client_directory))\n              client_ids.add(client_id)\n            try:\n              archive.extract(f, self.output_path)\n            except KeyError as exception:\n              print('Extraction error: {0:s}'.format(exception))\n              return []\n\n    except OSError as exception:\n      msg = 'Error manipulating file {0:s}: {1!s}'.format(\n          output_file_path, exception)\n      self.state.add_error(msg, critical=True)\n      return []\n    except zipfile.BadZipfile as exception:\n      msg = 'Bad zipfile {0:s}: {1!s}'.format(\n          output_file_path, exception)\n      self.state.add_error(msg, critical=True)\n      return []\n\n    try:\n      os.remove(output_file_path)\n    except OSError as exception:\n      print('Output path {0:s} could not be removed: {1:s}'.format(\n          output_file_path, exception))\n\n    \n    \n    fqdn_collection_paths = []\n    for client_id, path in collection_paths:\n      fqdn = client_id_to_fqdn.get(client_id, client_id)\n      fqdn_collection_paths.append((fqdn, path))\n\n    if not fqdn_collection_paths:\n      self.state.add_error('Nothing was extracted from the hunt archive',\n                           critical=True)\n      return []\n\n    return fqdn_collection_paths", "docstring": "Open a hunt output archive and extract files.\n\nArgs:\noutput_file_path: The path where the hunt archive is downloaded to.\n\nReturns:\nlist: tuples containing:\nstr: The name of the client from where the files were downloaded.\nstr: The directory where the files were downloaded to.", "source": "juraj-google-style"}
{"code": "def get_mimetype(url):\n    filename = url.split('?')[0]\n    filename = filename.split('\n    (content_type, _) = mimetypes.guess_type(filename)\n    return (url, content_type)", "docstring": "Guess based on the file extension.\n\nArgs:\nurl (text): Web url that was linked to by a reddit submission.\n\nReturns:\nmodified_url (text): The url (or filename) that will be used when\nconstructing the command to run.\ncontent_type (text): The mime-type that will be used when\nconstructing the command to run. If the mime-type is unknown,\nreturn None and the program will fallback to using the web\nbrowser.", "source": "codesearchnet"}
{"code": "def list_projects(self, dataset_name):\n    url = ((self.url() + '/nd/resource/dataset/{}'.format(dataset_name)) + '/project/')\n    req = self.remote_utils.get_url(url)\n    if (req.status_code is not 200):\n        raise RemoteDataNotFoundError('Could not find {}'.format(req.text))\n    else:\n        return req.json()", "docstring": "Lists a set of projects related to a dataset.\n\nArguments:\ndataset_name (str): Dataset name to search projects for\n\nReturns:\ndict: Projects found based on dataset query", "source": "codesearchnet"}
{"code": "def forward_tcp(self, host, port):\n        \n\n        return self.transport.open_channel(\n            'direct-tcpip',\n            (host, port),\n            self.transport.getpeername()\n        )", "docstring": "Open a connection to host:port via an ssh tunnel.\n\nArgs:\nhost (str): The host to connect to.\nport (int): The port to connect to.\n\nReturns:\nA socket-like object that is connected to the provided host:port.", "source": "juraj-google-style"}
{"code": "def group_by_reducer(key_func, reducer):\n\n    def _apply_fn(dataset):\n        \n        return _GroupByReducerDataset(dataset, key_func, reducer)\n    return _apply_fn", "docstring": "A transformation that groups elements and performs a reduction.\n\nThis transformation maps element of a dataset to a key using `key_func` and\ngroups the elements by key. The `reducer` is used to process each group; its\n`init_func` is used to initialize state for each group when it is created, the\n`reduce_func` is used to update the state every time an element is mapped to\nthe matching group, and the `finalize_func` is used to map the final state to\nan output value.\n\nArgs:\nkey_func: A function mapping a nested structure of tensors\n(having shapes and types defined by `self.output_shapes` and\n`self.output_types`) to a scalar `tf.int64` tensor.\nreducer: An instance of `Reducer`, which captures the reduction logic using\nthe `init_func`, `reduce_func`, and `finalize_func` functions.\n\nReturns:\nA `Dataset` transformation function, which can be passed to\n`tf.data.Dataset.apply`.", "source": "github-repos"}
{"code": "def update_script(self, information, timeout=(- 1)):\n    uri = '{}/script'.format(self.data['uri'])\n    return self._helper.update(information, uri=uri, timeout=timeout)", "docstring": "Updates the configuration script of the logical enclosure and on all enclosures in the logical enclosure with\nthe specified ID.\n\nArgs:\ninformation: Updated script.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturn:\nConfiguration script.", "source": "codesearchnet"}
{"code": "def check_missing(self, args):\n    return [opt.name for opt in self if ((opt.name not in args) and (opt.default is None))]", "docstring": "Returns the names of all options that are required but were not specified.\n\nAll options that don't have a default value are required in order to run the\nworkflow.\n\nArgs:\nargs (dict): A dictionary of the provided arguments that is checked for\nmissing options.\n\nReturns:\nlist: A list with the names of the options that are missing from the\nprovided arguments.", "source": "codesearchnet"}
{"code": "def find_or_build(cls, **kwargs):\n        \n        keys = kwargs.pop('keys') if 'keys' in kwargs else []\n        return cls.first(**subdict(kwargs, keys)) or cls.build(**kwargs)", "docstring": "Checks if an instance already exists in db with these kwargs else\nreturns a new, saved instance of the service's model class.\n\nArgs:\n**kwargs: instance parameters", "source": "juraj-google-style"}
{"code": "def FindFieldByName(self, full_name):\n    \n    full_name = _NormalizeFullyQualifiedName(full_name)\n    message_name, _, field_name = full_name.rpartition('.')\n    message_descriptor = self.FindMessageTypeByName(message_name)\n    return message_descriptor.fields_by_name[field_name]", "docstring": "Loads the named field descriptor from the pool.\n\nArgs:\nfull_name: The full name of the field descriptor to load.\n\nReturns:\nThe field descriptor for the named field.\n\nRaises:\nKeyError: if the field cannot be found in the pool.", "source": "juraj-google-style"}
{"code": "def mirror_sources(self, sourcedir, targetdir=None, recursive=True, excludes=[]):\n    sources = self.compilable_sources(sourcedir, absolute=False, recursive=recursive, excludes=excludes)\n    maplist = []\n    for filepath in sources:\n        src = filepath\n        dst = self.get_destination(src, targetdir=targetdir)\n        if targetdir:\n            src = os.path.join(sourcedir, src)\n        maplist.append((src, dst))\n    return maplist", "docstring": "Mirroring compilable sources filepaths to their targets.\n\nArgs:\nsourcedir (str): Directory path to scan.\n\nKeyword Arguments:\nabsolute (bool): Returned paths will be absolute using\n``sourcedir`` argument (if True), else return relative paths.\nrecursive (bool): Switch to enabled recursive finding (if True).\nDefault to True.\nexcludes (list): A list of excluding patterns (glob patterns).\nPatterns are matched against the relative filepath (from its\nsourcedir).\n\nReturns:\nlist: A list of pairs ``(source, target)``. Where ``target`` is the\n``source`` path but renamed with ``.css`` extension. Relative\ndirectory from source dir is left unchanged but if given,\nreturned paths will be absolute (using ``sourcedir`` for\nsources and ``targetdir`` for targets).", "source": "codesearchnet"}
{"code": "def _parse_octet(self, octet_str):\n        \n        if not octet_str:\n            raise ValueError(\"Empty octet not permitted\")\n        \n        if not self._DECIMAL_DIGITS.issuperset(octet_str):\n            msg = \"Only decimal digits permitted in %r\"\n            raise ValueError(msg % octet_str)\n        \n        \n        if len(octet_str) > 3:\n            msg = \"At most 3 characters permitted in %r\"\n            raise ValueError(msg % octet_str)\n        \n        octet_int = int(octet_str, 10)\n        \n        \n        \n        if octet_int > 7 and octet_str[0] == '0':\n            msg = \"Ambiguous (octal/decimal) value in %r not permitted\"\n            raise ValueError(msg % octet_str)\n        if octet_int > 255:\n            raise ValueError(\"Octet %d (> 255) not permitted\" % octet_int)\n        return octet_int", "docstring": "Convert a decimal octet into an integer.\n\nArgs:\noctet_str: A string, the number to parse.\n\nReturns:\nThe octet as an integer.\n\nRaises:\nValueError: if the octet isn't strictly a decimal from [0..255].", "source": "juraj-google-style"}
{"code": "def setup_privnet(self, host=None):\n        \n        self.setup(FILENAME_SETTINGS_PRIVNET)\n        if isinstance(host, str):\n            if \":\" in host:\n                raise Exception(\"No protocol prefix or port allowed in host, use just the IP or domain.\")\n            print(\"Using custom privatenet host:\", host)\n            self.SEED_LIST = [\"%s:20333\" % host]\n            self.RPC_LIST = [\"http:\n            print(\"- P2P:\", \", \".join(self.SEED_LIST))\n            print(\"- RPC:\", \", \".join(self.RPC_LIST))\n        self.check_privatenet()", "docstring": "Load settings from the privnet JSON config file\n\nArgs:\nhost (string, optional): if supplied, uses this IP or domain as neo nodes. The host must\nuse these standard ports: P2P 20333, RPC 30333.", "source": "juraj-google-style"}
{"code": "def csv_to_dict(csv_filepath, **kwargs):\n    \n    callbacks = {'to_list': csv_tolist,\n                 'row_csv_limiter': row_csv_limiter,\n                 'csv_row_cleaner': csv_row_cleaner,\n                 'row_headers_count': row_headers_count,\n                 'get_col_header': get_csv_col_headers,\n                 'get_row_headers': get_row_headers,\n                 'populate_headers': populate_headers,\n                 'csv_column_header_cleaner': csv_column_header_cleaner,\n                 'csv_column_cleaner': csv_column_cleaner,\n                 'retrieve_csv_data': retrieve_csv_data}\n\n    callbacks.update(kwargs.get('alt_callbacks', {}))\n    rows = kwargs.get('rows', [])\n\n    if not rows:\n        \n        rows = callbacks.get('to_list')(csv_filepath, **kwargs)\n\n        if not rows:\n            msg = 'Empty rows obtained from {}'.format(csv_filepath)\n            logger.warning(msg)\n            raise ValueError(msg)\n\n    \n    rows = callbacks.get('row_csv_limiter')(\n        rows, kwargs.get('limits', [None, None]))\n\n    \n    rows = callbacks.get('csv_row_cleaner')(rows)\n\n    \n    rows = callbacks.get('csv_column_cleaner')(rows)\n\n    \n    num_row_headers = callbacks.get('row_headers_count')(rows)\n\n    \n    c_headers_raw = callbacks.get('get_col_header')(rows, num_row_headers)\n\n    \n    r_headers = callbacks.get('get_row_headers')(\n        rows, num_row_headers, len(c_headers_raw))\n\n    \n    c_headers_dirty = callbacks.get('populate_headers')(\n        c_headers_raw) if len(c_headers_raw) > 1 else c_headers_raw[0]\n\n    \n    c_headers = callbacks.get('csv_column_header_cleaner')(c_headers_dirty)\n\n    \n    csv_data = callbacks.get('retrieve_csv_data')(\n        rows,\n        column_header=len(c_headers_raw),\n        row_header=num_row_headers,\n        limit_column=len(c_headers) - len(c_headers_dirty) or None)\n\n    \n    if csv_data:\n        assert len(c_headers) == len(csv_data[0])\n\n    \n    if r_headers:\n        assert len(r_headers) == len(csv_data)\n\n    \n    kwargs.pop('rows', None)\n    result = csv_format(csv_data, c_headers, r_headers, rows, **kwargs)\n\n    return result", "docstring": "Turn csv into dict.\nArgs:\n:csv_filepath: path to csv file to turn into dict.\n:limits: path to csv file to turn into dict", "source": "juraj-google-style"}
{"code": "def temporal_segmentation(segments, min_time):\n    final_segments = []\n    for segment in segments:\n        final_segments.append([])\n        for point in segment:\n            if (point.dt > min_time):\n                final_segments.append([])\n            final_segments[(- 1)].append(point)\n    return final_segments", "docstring": "Segments based on time distant points\n\nArgs:\nsegments (:obj:`list` of :obj:`list` of :obj:`Point`): segment points\nmin_time (int): minimum required time for segmentation", "source": "codesearchnet"}
{"code": "def _decompress(self, compressed_payload):\n    if self._data:\n        raise RuntimeError('Cannot decompress to an instance with payload')\n    self._data = zlib.decompress(compressed_payload)\n    len_data = len(self._data)\n    counts_size = (len_data - payload_header_size)\n    if (payload_header_size > counts_size > MAX_COUNTS_SIZE):\n        raise HdrLengthException(('Invalid size:' + str(len_data)))\n    self.payload = PayloadHeader.from_buffer_copy(self._data)\n    cookie = self.payload.cookie\n    if (get_cookie_base(cookie) != V2_ENCODING_COOKIE_BASE):\n        raise HdrCookieException(('Invalid cookie: %x' % cookie))\n    word_size = get_word_size_in_bytes_from_cookie(cookie)\n    if (word_size != V2_MAX_WORD_SIZE_IN_BYTES):\n        raise HdrCookieException(('Invalid V2 cookie: %x' % cookie))", "docstring": "Decompress a compressed payload into this payload wrapper.\nNote that the decompressed buffer is saved in self._data and the\ncounts array is not yet allocated.\n\nArgs:\ncompressed_payload (string) a payload in zlib compressed form\nException:\nHdrCookieException:\nthe compressed payload has an invalid cookie\nHdrLengthException:\nthe decompressed size is too small for the HdrPayload structure\nor is not aligned or is too large for the passed payload class\nHdrHistogramSettingsException:\nmismatch in the significant figures, lowest and highest\ntrackable value", "source": "codesearchnet"}
{"code": "def batch_size(self):\n    raise NotImplementedError", "docstring": "Return the batch size of the dataset created.\n\nFor certain type of the data input, the batch size is known, and even\nrequired, like numpy array. Whereas for dataset, the batch is unknown\nunless we take a peek.\n\nReturns:\nint, the batch size of the dataset, or None if it is unknown.", "source": "github-repos"}
{"code": "def __init__(self, decode_module, methodName='runTest'):\n    super(DecodeProtoOpTestBase, self).__init__(methodName)\n    self._decode_module = decode_module", "docstring": "DecodeProtoOpTestBase initializer.\n\nArgs:\ndecode_module: a module containing the `decode_proto_op` method\nmethodName: the name of the test method (same as for test.TestCase)", "source": "github-repos"}
{"code": "def RunOnce(self, names=None, token=None):\n    \n    del token\n\n    leased_jobs = data_store.REL_DB.LeaseCronJobs(\n        cronjob_ids=names, lease_time=rdfvalue.Duration(\"10m\"))\n    logging.info(\"Leased %d cron jobs for processing.\", len(leased_jobs))\n    if not leased_jobs:\n      return\n\n    errors = {}\n    processed_count = 0\n    for job in sorted(leased_jobs, key=lambda j: j.cron_job_id):\n      if self.TerminateStuckRunIfNeeded(job):\n        continue\n\n      if not self.JobDueToRun(job):\n        continue\n\n      try:\n        if self.RunJob(job):\n          processed_count += 1\n        else:\n          logging.info(\n              \"Can't schedule cron job %s on a thread pool \"\n              \"(all threads are busy or CPU load is high)\", job.cron_job_id)\n          break\n      except Exception as e:  \n        logging.exception(\"Cron job %s has failed: %s\", job.cron_job_id, e)\n        errors[job.cron_job_id] = e\n\n    logging.info(\"Processed %d cron jobs.\", processed_count)\n    data_store.REL_DB.ReturnLeasedCronJobs(leased_jobs)\n\n    if errors:\n      raise OneOrMoreCronJobsFailedError(errors)", "docstring": "Tries to lock and run cron jobs.\n\nArgs:\nnames: List of cron jobs to run.  If unset, run them all.\ntoken: security token.\n\nRaises:\nOneOrMoreCronJobsFailedError: if one or more individual cron jobs fail.\nNote: a failure of a single cron job doesn't preclude other cron jobs\nfrom running.", "source": "juraj-google-style"}
{"code": "def get_all_users(configuration=None, **kwargs):\n    user = User(configuration=configuration)\n    user['id'] = 'all users'\n    result = user._write_to_hdx('list', kwargs, 'id')\n    users = list()\n    if result:\n        for userdict in result:\n            user = User(userdict, configuration=configuration)\n            users.append(user)\n    else:\n        logger.debug(result)\n    return users", "docstring": "Get all users in HDX\n\nArgs:\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n**kwargs: See below\nq (str): Restrict to names containing a string. Defaults to all users.\norder_by (str): Field by which to sort - any user field or edits (number_of_edits). Defaults to 'name'.\n\nReturns:\nList[User]: List of all users in HDX", "source": "codesearchnet"}
{"code": "def _flush_tensor_values_cache(self, tensor_fetches, op_fetches, on_tpu, tensor_trace_order, graph):\n    if not tensor_trace_order.traced_tensors:\n        logging.warn('No tensor values being traced. No flush cache op added.')\n        return tensor_fetches\n    with ops.control_dependencies(op_fetches + [tensor.op for tensor in tensor_fetches]):\n        flush_cache_op = self._generate_flush_cache_op(self._tt_config.num_replicas, on_tpu, tensor_trace_order, graph)\n        return control_flow_ops.tuple(tensor_fetches, control_inputs=[flush_cache_op])", "docstring": "Flushes the intermediate tensor values in the graph to the cache.\n\nArgs:\ntensor_fetches: list of tensor results returned by the model_fn.\nop_fetches: list of ops that are returned by the model_fn, e.g., train_op.\non_tpu: if the graph is executed on TPU.\ntensor_trace_order: TensorTraceOrder object holding tensorname to id map.\ngraph: TensorFlow graph.\n\nReturns:\nAn identical copy of tensor_fetches.", "source": "github-repos"}
{"code": "def recipe_sheets_to_bigquery(config, auth_read, auth_write, sheets_url, sheets_tab, sheets_range, dataset, table, sheets_header):\n    sheets(config, {'auth': auth_read, 'sheet': sheets_url, 'tab': sheets_tab, 'range': sheets_range, 'header': sheets_header, 'out': {'auth': auth_write, 'bigquery': {'dataset': dataset, 'table': table}}})", "docstring": "Import data from a sheet and move it to a BigQuery table.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nauth_write (authentication) - Credentials used for writing data.\nsheets_url (string) - NA\nsheets_tab (string) - NA\nsheets_range (string) - NA\ndataset (string) - NA\ntable (string) - NA\nsheets_header (boolean) - NA", "source": "github-repos"}
{"code": "def list_keyvaults_sub(access_token, subscription_id):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.KeyVault/vaults', '?api-version=', KEYVAULT_API])\n    return do_get_next(endpoint, access_token)", "docstring": "Lists key vaults belonging to this subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. 200 OK.", "source": "codesearchnet"}
{"code": "def add_average_summary(self, var, tag=None, decay=0.999, ignore_nan=True):\n    if (not self.summary_collections):\n        return\n    with self.g.as_default():\n        if ((decay < 0.9) or (decay >= 1.0)):\n            raise ValueError(('Decay is %5.2f, but has to be in [0, 1).' % decay))\n        avg_var = self.exponential_moving_average(var, decay=decay, ignore_nan=ignore_nan)\n        if (tag is None):\n            tag = _bare_var_name(avg_var)\n            tag = self.g.unique_name(tag)\n        self.add_scalar_summary(avg_var, tag)\n        return avg_var", "docstring": "Add a summary with the moving average of var.\n\nAdds a variable to keep track of the exponential moving average and adds an\nupdate operation to the bookkeeper. The name of the variable is\n'%s_average' % name prefixed with the current variable scope.\n\nArgs:\nvar: The variable for which a moving average should be computed.\ntag: The tag of the summary. If None var.name[:-2] is used to strip off\nthe ':0' that is added by TF.\ndecay: How much history to use in the moving average.\nHigher, means more history values [0.9, 1) accepted.\nignore_nan: If the value is NaN or Inf, skip it. Note that this default\nis different than the exponential_moving_average one.\nReturns:\nThe averaged variable.\nRaises:\nValueError: if decay is not in [0.9, 1).", "source": "codesearchnet"}
{"code": "def find_emails_by_subject(self, subject, limit=50, match_recipient=None):\n    self._mail.select('inbox')\n    matching_uids = self.__search_email_by_subject(subject, match_recipient)\n    return matching_uids", "docstring": "Searches for Email by Subject.  Returns email's imap message IDs\nas a list if matching subjects is found.\n\nArgs:\nsubject (str) - Subject to search for.\n\nKwargs:\nlimit (int) - Limit search to X number of matches, default 50\nmatch_recipient (str) - Recipient to exactly (don't care if not specified)\n\nReturns:\nlist - List of Integers representing imap message UIDs.", "source": "codesearchnet"}
{"code": "def QueryAndOwn(self, queue, lease_seconds=10, limit=1):\n    \n    with self.data_store.GetMutationPool() as mutation_pool:\n      return mutation_pool.QueueQueryAndOwn(queue, lease_seconds, limit,\n                                            self.frozen_timestamp)", "docstring": "Returns a list of Tasks leased for a certain time.\n\nArgs:\nqueue: The queue to query from.\nlease_seconds: The tasks will be leased for this long.\nlimit: Number of values to fetch.\n\nReturns:\nA list of GrrMessage() objects leased.", "source": "juraj-google-style"}
{"code": "def _from_record(data):\n    if isinstance(data, dict):\n        return Schema._from_dict_record(data)\n    elif isinstance(data, list):\n        return Schema._from_list_record(data)\n    else:\n        raise Exception(('Cannot create a schema from record %s' % str(data)))", "docstring": "Infer a BigQuery table schema from a list of fields or a dictionary. The typeof the elements\nis used. For a list, the field names are simply 'Column1', 'Column2', etc.\n\nArgs:\ndata: The list of fields or dictionary.\nReturns:\nA list of dictionaries containing field 'name' and 'type' entries, suitable for use in a\nBigQuery Tables resource schema.", "source": "codesearchnet"}
{"code": "def next_weekday(date):\n    \n    n_days = 7 - date.weekday()\n    if n_days > 3:\n        n_days = 1\n    return date + datetime.timedelta(days=n_days)", "docstring": "Return the first weekday after date\n\nArgs:\ndate (datetime or datetime.date)\nReturns:\n(datetime or datetime.date)\nRaises:\n-", "source": "juraj-google-style"}
{"code": "def define_saver(exclude=None):\n    variables = []\n    exclude = (exclude or [])\n    exclude = [re.compile(regex) for regex in exclude]\n    for variable in tf.global_variables():\n        if any((regex.match(variable.name) for regex in exclude)):\n            continue\n        variables.append(variable)\n    saver = tf.train.Saver(variables, keep_checkpoint_every_n_hours=5)\n    return saver", "docstring": "Create a saver for the variables we want to checkpoint.\n\nArgs:\nexclude: List of regexes to match variable names to exclude.\n\nReturns:\nSaver object.", "source": "codesearchnet"}
{"code": "def setPollingValues(self, max_waits, wait_sleep):\n    self.m_max_waits = max_waits\n    self.m_wait_sleep = wait_sleep", "docstring": "Optional polling loop control\n\nArgs:\nmax_waits (int):   waits\nwait_sleep (int):  ms per wait", "source": "codesearchnet"}
{"code": "def lint(exclude, skip_untracked, commit_only):\n    exclude = (list(exclude) + conf.get('lint.exclude', []))\n    runner = LintRunner(exclude, skip_untracked, commit_only)\n    if (not runner.run()):\n        exit(1)", "docstring": "Lint python files.\n\nArgs:\nexclude (list[str]):\nA list of glob string patterns to test against. If the file/path\nmatches any of those patters, it will be filtered out.\nskip_untracked (bool):\nIf set to **True** it will skip all files not tracked by git.\ncommit_only (bool):\nOnly lint files that are staged for commit.", "source": "codesearchnet"}
{"code": "def store_object(self, obj):\n    self._check_obj_properties(obj)\n    with transaction.manager:\n        self._put_into_indexes(obj)", "docstring": "Save `obj` into database and into proper indexes.\n\nAttr:\nobj (obj): Indexable object.\n\nRaises:\nInvalidType: When the `obj` doesn't have right properties.\nUnindexableobjlication: When there is no indexes defined.", "source": "codesearchnet"}
{"code": "def add_parser(self, func=None, name=None, **kwargs):\n    if func:\n        if (not func.__doc__):\n            raise ValueError('No docstrings given in {0}'.format(func.__name__))\n        info = _parse_doc(func.__doc__)\n        if ((_HELP not in kwargs) or (not kwargs[_HELP])):\n            kwargs[_HELP] = info['headline']\n        if ((_DESCRIPTION not in kwargs) or (not kwargs[_DESCRIPTION])):\n            kwargs[_DESCRIPTION] = info['description']\n        if ((_FORMAT_CLASS not in kwargs) or (not kwargs[_FORMAT_CLASS])):\n            kwargs[_FORMAT_CLASS] = argparse.RawTextHelpFormatter\n        if (not name):\n            name = (func.__name__ if hasattr(func, '__name__') else func)\n        res = self.__delegate.add_parser(name, argmap=info['args'], **kwargs)\n        res.set_defaults(cmd=func)\n    else:\n        res = self.__delegate.add_parser(name, **kwargs)\n    return res", "docstring": "Add parser.\n\nThis method makes a new sub command parser. It takes same arguments\nas add_parser() of the action class made by\nargparse.ArgumentParser.add_subparsers.\n\nIn addition to, it takes one positional argument `func`, which is the\nfunction implements process of this sub command. The `func` will be used\nto determine the name, help, and description of this sub command. The\nfunction `func` will also be set as a default value of `cmd` attribute.\n\nIf you want to choose name of this sub command, use keyword argument\n`name`.\n\nArgs:\nfunc: function implements the process of this command.\nname: name of this command. If not give, the function name is used.\n\nReturns:\nnew ArgumentParser object.\n\nRaises:\nValueError: if the given function does not have docstrings.", "source": "codesearchnet"}
{"code": "def validate(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame:\n        \n        series = table[self.name]\n\n        self._check_series_name(series)\n\n        validators = self.validators\n\n        results = pd.DataFrame({validator: series for validator in validators}, index=series.index)\n\n        for name, func in validators.items():\n            results[name] = func(results[name])\n\n        results['dtype'] = self._validate_series_dtype(series)\n\n        if self.unique:\n            results['unique'] = v.funcs.unique(series)\n\n        if failed_only:\n            results = find_failed_rows(results)\n\n        return results", "docstring": "Return a dataframe of validation results for the appropriate series vs the vector of validators.\n\nArgs:\ntable (pd.DataFrame): A dataframe on which to apply validation logic.\nfailed_only (bool): If ``True``: return only the indexes that failed to validate.", "source": "juraj-google-style"}
{"code": "def raster_to_asc(raster_f, asc_f):\n    raster_r = RasterUtilClass.read_raster(raster_f)\n    RasterUtilClass.write_asc_file(asc_f, raster_r.data, raster_r.nCols, raster_r.nRows, raster_r.geotrans, raster_r.noDataValue)", "docstring": "Converting Raster format to ASCII raster.\n\nArgs:\nraster_f: raster file.\nasc_f: output ASCII file.", "source": "codesearchnet"}
{"code": "def convert_field(field_names: Optional[List[str]]=None):\n    if field_names is None:\n        field_names = []\n\n    def convert_field_decorator(convert_method):\n        convert_method.convert_field_names = field_names\n\n        @functools.wraps(convert_method)\n        def convert_field_wrapper(self, src_proto, dest_proto):\n            convert_method(self, src_proto, dest_proto)\n        return convert_field_wrapper\n    return convert_field_decorator", "docstring": "Decorator that converts proto fields.\n\nArgs:\nfield_names: list of field names from src proto this function handles.\n\nReturns:\nconvert_field_decorator\n\nTypical usage example:\n\n@converter.convert_field(field_names=[\"hello\"])\ndef hello_convert_function(self, src_proto, dest_proto):\n...", "source": "github-repos"}
{"code": "def as_pyplot_figure(self, label=1, **kwargs):\n    import matplotlib.pyplot as plt\n    exp = self.as_list(label=label, **kwargs)\n    fig = plt.figure()\n    vals = [x[1] for x in exp]\n    names = [x[0] for x in exp]\n    vals.reverse()\n    names.reverse()\n    colors = [('green' if (x > 0) else 'red') for x in vals]\n    pos = (np.arange(len(exp)) + 0.5)\n    plt.barh(pos, vals, align='center', color=colors)\n    plt.yticks(pos, names)\n    if (self.mode == 'classification'):\n        title = ('Local explanation for class %s' % self.class_names[label])\n    else:\n        title = 'Local explanation'\n    plt.title(title)\n    return fig", "docstring": "Returns the explanation as a pyplot figure.\n\nWill throw an error if you don't have matplotlib installed\nArgs:\nlabel: desired label. If you ask for a label for which an\nexplanation wasn't computed, will throw an exception.\nWill be ignored for regression explanations.\nkwargs: keyword arguments, passed to domain_mapper\n\nReturns:\npyplot figure (barchart).", "source": "codesearchnet"}
{"code": "def metadata(self, path):\n    try:\n        file_metadata = s3io.S3IO(options=self._options)._status(path)\n        return FileMetadata(path, file_metadata['size'], file_metadata['last_updated'])\n    except Exception as e:\n        raise BeamIOError('Metadata operation failed', {path: e})", "docstring": "Fetch metadata fields of a file on the FileSystem.\n\nArgs:\npath: string path of a file.\n\nReturns:\n:class:`~apache_beam.io.filesystem.FileMetadata`.\n\nRaises:\n``BeamIOError``: if path isn't a file or doesn't exist.", "source": "github-repos"}
{"code": "def FillDeviceAttributes(device, descriptor):\n  \n  attributes = HidAttributes()\n  result = hid.HidD_GetAttributes(device, ctypes.byref(attributes))\n  if not result:\n    raise ctypes.WinError()\n\n  buf = ctypes.create_string_buffer(1024)\n  result = hid.HidD_GetProductString(device, buf, 1024)\n\n  if not result:\n    raise ctypes.WinError()\n\n  descriptor.vendor_id = attributes.VendorID\n  descriptor.product_id = attributes.ProductID\n  descriptor.product_string = ctypes.wstring_at(buf)", "docstring": "Fill out the attributes of the device.\n\nFills the devices HidAttributes and product string\ninto the descriptor.\n\nArgs:\ndevice: A handle to the open device\ndescriptor: The DeviceDescriptor to populate with the\nattributes.\n\nReturns:\nNone\n\nRaises:\nWindowsError when unable to obtain attributes or product\nstring.", "source": "juraj-google-style"}
{"code": "def __init__(self, start, size, name=\"merge_dims\"):\n    \n    super(MergeDims, self).__init__(name=name)\n    self._start = start\n    self._size = size\n\n    \n    if size <= 1:\n      raise ValueError(\"`size` should be strictly greater than 1.\")", "docstring": "Constructs the MergeDims module.\n\nArgs:\nstart: Start of the range of dimensions to merge.\nsize: Size the range of dimensions to merge.\nname: The name of the module.\n\nRaises:\nValueError: If `size` is not strictly greater than 1.", "source": "juraj-google-style"}
{"code": "def _filter_valid_filepaths(self, df, x_col):\n    filepaths = df[x_col].map(lambda fname: os.path.join(self.directory, fname))\n    mask = filepaths.apply(validate_filename, args=(self.white_list_formats,))\n    n_invalid = (~mask).sum()\n    if n_invalid:\n        warnings.warn('Found {} invalid image filename(s) in x_col=\"{}\". These filename(s) will be ignored.'.format(n_invalid, x_col))\n    return df[mask]", "docstring": "Keep only dataframe rows with valid filenames.\n\nArgs:\ndf: Pandas dataframe containing filenames in a column\nx_col: string, column in `df` that contains the filenames or\nfilepaths\nReturns:\nabsolute paths to image files", "source": "github-repos"}
{"code": "def ParseNotificationcenterRow(\n      self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = MacNotificationCenterEventData()\n    event_data.bundle_name = self._GetRowValue(query_hash, row, 'bundle_name')\n    event_data.presented = self._GetRowValue(query_hash, row, 'presented')\n\n    blob = self._GetRowValue(query_hash, row, 'dataBlob')\n\n    try:\n      full_biplist = biplist.readPlistFromString(blob)\n      \n      \n      req = full_biplist['req']\n\n    except (biplist.InvalidPlistException, KeyError) as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to read plist from database with error: {0!s}'.format(\n              exception))\n      return\n\n    event_data.title = req.get('titl', None)\n    event_data.subtitle = req.get('subt', None)\n    event_data.body = req.get('body', None)\n\n    timestamp = self._GetRowValue(query_hash, row, 'timestamp')\n    date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_CREATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a message row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def from_dict(cls, metadata):\n    hyperparameters = metadata.get('hyperparameters')\n    tunable = metadata.get('tunable_hyperparameters')\n    pipeline = cls(metadata['primitives'], metadata.get('init_params'), metadata.get('input_names'), metadata.get('output_names'))\n    if hyperparameters:\n        pipeline.set_hyperparameters(hyperparameters)\n    if (tunable is not None):\n        pipeline._tunable_hyperparameters = tunable\n    return pipeline", "docstring": "Create a new MLPipeline from a dict specification.\n\nThe dict structure is the same as the one created by the `to_dict` method.\n\nArgs:\nmetadata (dict): Dictionary containing the pipeline specification.\n\nReturns:\nMLPipeline:\nA new MLPipeline instance with the details found in the\ngiven specification dictionary.", "source": "codesearchnet"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=False):\n    vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True)\n    image_embeds = vision_outputs[0]\n    image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)\n    query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)\n    query_outputs = self.qformer(query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True)\n    query_output = query_outputs[0]\n    if query_output.dtype != image_embeds.dtype:\n        query_output = query_output.to(image_embeds.dtype)\n    language_model_inputs = self.language_projection(query_output)\n    if return_dict:\n        return (language_model_inputs, vision_outputs, query_outputs)\n    return language_model_inputs", "docstring": "Encodes images into continuous embeddings that can be forwarded to the language model.\n\nArgs:\npixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\nThe tensors corresponding to the input images.", "source": "github-repos"}
{"code": "def _parse_single_video(self, example_proto):\n    \n    context_features = {\n        \"game_duration_loops\": tf.io.FixedLenFeature([1], tf.int64),\n        \"game_duration_seconds\": tf.io.FixedLenFeature([1], tf.float32),\n        \"n_steps\": tf.io.FixedLenFeature([1], tf.int64),\n        \"screen_size\": tf.io.FixedLenFeature([2], tf.int64),\n    }\n\n    sequence_features = {\n        \"rgb_screen\": tf.io.FixedLenSequenceFeature([], tf.string),\n    }\n\n    _, seq_feat = tf.io.parse_single_sequence_example(\n        example_proto,\n        context_features=context_features,\n        sequence_features=sequence_features)\n\n    video_frames = tf.map_fn(\n        tf.image.decode_png, seq_feat[\"rgb_screen\"], dtype=tf.uint8)\n    return video_frames", "docstring": "Parses single video from the input tfrecords.\n\nArgs:\nexample_proto: tfExample proto with a single video.\n\nReturns:\ndict with all frames, positions and actions.", "source": "juraj-google-style"}
{"code": "def auth_user(self, username, password):\n    response = self._post((self.rest_url + '/authentication'), data=json.dumps({'value': password}), params={'username': username})\n    if (not response.ok):\n        return None\n    return response.json()", "docstring": "Authenticate a user account against the Crowd server.\n\nAttempts to authenticate the user against the Crowd server.\n\nArgs:\nusername: The account username.\n\npassword: The account password.\n\nReturns:\ndict:\nA dict mapping of user attributes if the application\nauthentication was successful. See the Crowd documentation\nfor the authoritative list of attributes.\n\nNone: If authentication failed.", "source": "codesearchnet"}
{"code": "def compute_k(self, memory_antecedent):\n    if self.shared_kv:\n        raise ValueError('compute_k cannot be called with shared_kv')\n    ret = mtf.einsum([memory_antecedent, self.wk], reduced_dims=[self.memory_input_dim])\n    if self.combine_dims:\n        ret = mtf.replace_dimensions(ret, ret.shape.dims[(- 1)], self.k_dims)\n    return ret", "docstring": "Compute key Tensor k.\n\nArgs:\nmemory_antecedent: a Tensor with dimensions\n{memory_input_dim} + other_dims\nReturns:\na Tensor with dimensions\nmemory_heads_dims + {key_dim} + other_dims", "source": "codesearchnet"}
{"code": "def most_exposes(python_type):\n    \n    _exposes = set()\n    try:\n        \n        do_not_expose = set(python_type.__dir__(object) + \\\n            ['__slots__', '__module__', '__weakref__']) \n        empty = python_type.__new__(python_type) \n    except AttributeError: \n        try:\n            _exposes = python_type.__slots__\n        except AttributeError:\n            pass\n    except TypeError: \n        for _workaround in storable_workarounds:\n            try:\n                _exposes = _workaround(python_type)\n            except (SystemExit, KeyboardInterrupt):\n                raise\n            except:\n                pass\n            else:\n                break\n    else:\n        \n        \n        all_members = empty.__dir__() \n        for attr in all_members:\n            if attr in do_not_expose:\n                \n                continue\n            try: \n                getattr(empty, attr)\n            except AttributeError as e: \n                \n                \n                if e.args:\n                    msg = e.args[0]\n                    if msg == attr or msg.endswith(\"' object has no attribute '{}'\".format(attr)):\n                        _exposes.add(attr)\n            except (SystemExit, KeyboardInterrupt):\n                raise\n            except:\n                pass\n        for attr in ('__dict__',):\n            if attr in all_members:\n                _exposes.add(attr)\n    return list(_exposes)", "docstring": "Core engine for the automatic generation of storable instances.\n\nFinds the attributes exposed by the objects of a given type.\n\nMostly Python3-only.\nDoes not handle types which `__new__` method requires extra arguments either.\n\nArguments:\n\npython_type (type): object type.\n\nReturns:\n\nlist: attributes exposed.", "source": "juraj-google-style"}
{"code": "def set_tensor_final(self, tensor_name):\n    \n    tensor = self._name_to_tensor(tensor_name)\n    self._final_tensors.add(tensor)", "docstring": "Denotes a tensor as a final output of the computation.\n\nArgs:\ntensor_name: a string, name of a tensor in the graph.", "source": "juraj-google-style"}
{"code": "def get_context(self, max_frames=None, missing_entities=[]):\n        \n        if not max_frames or max_frames > len(self.frame_stack):\n            max_frames = len(self.frame_stack)\n\n        missing_entities = list(missing_entities)\n        context = []\n        for i in xrange(max_frames):\n            frame_entities = [entity.copy() for entity in self.frame_stack[i].entities]\n            for entity in frame_entities:\n                entity['confidence'] = entity.get('confidence', 1.0) / (2.0 + i)\n            context += frame_entities\n\n        result = []\n        if len(missing_entities) > 0:\n            for entity in context:\n                if entity.get('data') in missing_entities:\n                    result.append(entity)\n                    \n                    \n                    \n                    \n                    missing_entities.remove(entity.get('data'))\n        else:\n            result = context\n\n        return result", "docstring": "Constructs a list of entities from the context.\n\nArgs:\nmax_frames(int): maximum number of frames to look back\nmissing_entities(list of str): a list or set of tag names, as strings\n\nReturns:\nlist: a list of entities", "source": "juraj-google-style"}
{"code": "def nth(series, n, order_by=None):\n    \n\n    if order_by is not None:\n        series = order_series_by(series, order_by)\n    try:\n        return series.iloc[n]\n    except:\n        return np.nan", "docstring": "Returns the nth value of a series.\n\nArgs:\nseries (pandas.Series): column to summarize.\nn (integer): position of desired value. Returns `NaN` if out of range.\n\nKwargs:\norder_by: a pandas.Series or list of series (can be symbolic) to order\nthe input series by before summarization.", "source": "juraj-google-style"}
{"code": "def getProperty(self, orgresource, dummy = 56184):\n        \n\n        url = nurls['getProperty']\n\n        data = {'userid': self.user_id,\n                'useridx': self.useridx,\n                'dummy': dummy,\n                'orgresource': orgresource,\n                }\n\n        r = self.session.post(url = url, data = data)\n        j = json.loads(r.text)\n\n        if self.resultManager(r.text):\n            f = FileInfo()\n            result = j['resultvalue']\n\n            f.resourcetype = result['resourcetype']\n            f.resourceno = result['resourceno']\n\n            return f\n\n        else:\n            return False", "docstring": "GetProperty\n\nArgs:\ndummy: ???\norgresource: File path\n\nReturns:\nFileInfo object:\nFalse: Failed to get property", "source": "juraj-google-style"}
{"code": "def fetch_github_pull_request(destination_directory: str,\n                              repository: github_repository.GithubRepository,\n                              pull_request_number: int,\n                              verbose: bool\n                              ) -> prepared_env.PreparedEnv:\n    \n\n    branch = 'pull/{}/head'.format(pull_request_number)\n    os.chdir(destination_directory)\n    print('chdir', destination_directory, file=sys.stderr)\n\n    shell_tools.run_cmd(\n        'git',\n        'init',\n        None if verbose else '--quiet',\n        out=sys.stderr)\n    result = _git_fetch_for_comparison(remote=repository.as_remote(),\n                                       actual_branch=branch,\n                                       compare_branch='master',\n                                       verbose=verbose)\n    shell_tools.run_cmd(\n        'git',\n        'branch',\n        None if verbose else '--quiet',\n        'compare_commit',\n        result.compare_commit_id,\n        log_run_to_stderr=verbose)\n    shell_tools.run_cmd(\n        'git',\n        'checkout',\n        None if verbose else '--quiet',\n        '-b',\n        'actual_commit',\n        result.actual_commit_id,\n        log_run_to_stderr=verbose)\n    return prepared_env.PreparedEnv(\n        github_repo=repository,\n        actual_commit_id=result.actual_commit_id,\n        compare_commit_id=result.compare_commit_id,\n        destination_directory=destination_directory,\n        virtual_env_path=None)", "docstring": "Uses content from github to create a dir for testing and comparisons.\n\nArgs:\ndestination_directory: The location to fetch the contents into.\nrepository: The github repository that the commit lives under.\npull_request_number: The id of the pull request to clone. If None, then\nthe master branch is cloned instead.\nverbose: When set, more progress output is produced.\n\nReturns:\nCommit ids corresponding to content to test/compare.", "source": "juraj-google-style"}
{"code": "def get_send_request_correct_body(self, path, action):\n    (path_name, path_spec) = self.get_path_spec(path)\n    if ((path_spec is not None) and (action in path_spec.keys())):\n        for (name, spec) in path_spec[action]['parameters'].items():\n            if (spec['in'] == 'body'):\n                if ('type' in spec.keys()):\n                    return self.get_example_from_prop_spec(spec)\n                elif ('schema' in spec.keys()):\n                    if (('type' in spec['schema'].keys()) and (spec['schema']['type'] == 'array')):\n                        if ('$ref' in spec['schema']['items']):\n                            definition_name = self.get_definition_name_from_ref(spec['schema']['items']['$ref'])\n                            return [self.definitions_example[definition_name]]\n                        else:\n                            definition_name = self.get_definition_name_from_ref(spec['schema']['items']['type'])\n                            return [definition_name]\n                    elif ('type' in spec['schema'].keys()):\n                        return self.get_example_from_prop_spec(spec['schema'])\n                    else:\n                        definition_name = self.get_definition_name_from_ref(spec['schema']['$ref'])\n                        return self.definitions_example[definition_name]", "docstring": "Get an example body which is correct to send to the given path with the given action.\n\nArgs:\npath: path of the request\naction: action of the request (get, post, put, delete)\n\nReturns:\nA dict representing a correct body for the request or None if no\nbody is required.", "source": "codesearchnet"}
{"code": "def assert_key_has_value(self, key, caller):\n        \n        assert key, (\"key parameter must be specified.\")\n        self.assert_key_exists(key, caller)\n\n        if self[key] is None:\n            raise KeyInContextHasNoValueError(\n                f\"context['{key}'] must have a value for {caller}.\")", "docstring": "Assert that context contains key which also has a value.\n\nArgs:\nkey: validate this key exists in context AND has a value that isn't\nNone.\ncaller: string. calling function name - this used to construct\nerror messages\n\nRaises:\nKeyNotInContextError: Key doesn't exist\nKeyInContextHasNoValueError: context[key] is None\nAssertionError: if key is None", "source": "juraj-google-style"}
{"code": "def is_valid(draco_query: List[str], debug=False) -> bool:\n    \n    _, stdout = run_clingo(\n        draco_query,\n        files=[\"define.lp\", \"hard.lp\", \"hard-integrity.lp\"],\n        silence_warnings=True,\n        debug=debug,\n    )\n\n    return json.loads(stdout)[\"Result\"] != \"UNSATISFIABLE\"", "docstring": "Check a task.\nArgs:\ndraco_query: a list of facts\nReturns:\nwhether the task is valid", "source": "juraj-google-style"}
{"code": "def unique(seen, *iterables):\n    _add = seen.add\n    return (i for i in chain(*iterables) if ((i not in seen) and (not _add(i))))", "docstring": "Get the unique items in iterables while preserving order.  Note that this\nmutates the seen set provided only when the returned generator is used.\n\nArgs:\nseen (set): either an empty set, or the set of things already seen\n*iterables: one or more iterable lists to chain together\n\nReturns:\ngenerator:", "source": "codesearchnet"}
{"code": "def find_furious_yaml(config_file=__file__):\n    checked = set()\n    result = _find_furious_yaml(os.path.dirname(config_file), checked)\n    if (not result):\n        result = _find_furious_yaml(os.getcwd(), checked)\n    return result", "docstring": "Traverse directory trees to find a furious.yaml file\n\nBegins with the location of this file then checks the\nworking directory if not found\n\nArgs:\nconfig_file: location of this file, override for\ntesting\nReturns:\nthe path of furious.yaml or None if not found", "source": "codesearchnet"}
{"code": "def setAccessPolicy(self, pid, accessPolicy, serialVersion, vendorSpecific=None):\n        \n        response = self.setAccessPolicyResponse(\n            pid, accessPolicy, serialVersion, vendorSpecific\n        )\n        return self._read_boolean_response(response)", "docstring": "See Also: setAccessPolicyResponse()\n\nArgs:\npid:\naccessPolicy:\nserialVersion:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _sign_input(cls, input_, message, key_pairs):\n    if isinstance(input_.fulfillment, Ed25519Sha256):\n        return cls._sign_simple_signature_fulfillment(input_, message, key_pairs)\n    elif isinstance(input_.fulfillment, ThresholdSha256):\n        return cls._sign_threshold_signature_fulfillment(input_, message, key_pairs)\n    else:\n        raise ValueError(\"Fulfillment couldn't be matched to Cryptocondition fulfillment type.\")", "docstring": "Signs a single Input.\n\nNote:\nThis method works only for the following Cryptoconditions\ncurrently:\n- Ed25519Fulfillment\n- ThresholdSha256.\n\nArgs:\ninput_ (:class:`~bigchaindb.common.transaction.\nInput`) The Input to be signed.\nmessage (str): The message to be signed\nkey_pairs (dict): The keys to sign the Transaction with.", "source": "codesearchnet"}
{"code": "def _FormatMessageShort(self, event):\n    \n    _, message_short = self._output_mediator.GetFormattedMessages(event)\n    if message_short is None:\n      data_type = getattr(event, 'data_type', 'UNKNOWN')\n      raise errors.NoFormatterFound(\n          'Unable to find event formatter for: {0:s}.'.format(data_type))\n\n    return message_short", "docstring": "Formats the short message.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: short message field.\n\nRaises:\nNoFormatterFound: if no event formatter can be found to match the data\ntype in the event.", "source": "juraj-google-style"}
{"code": "def FlagCxx11Features(filename, clean_lines, linenum, error):\n    line = clean_lines.elided[linenum]\n    include = Match('\\\\s*\n    if (include and (include.group(1) in ('cfenv', 'condition_variable', 'fenv.h', 'future', 'mutex', 'thread', 'chrono', 'ratio', 'regex', 'system_error'))):\n        error(filename, linenum, 'build/c++11', 5, ('<%s> is an unapproved C++11 header.' % include.group(1)))\n    if (Match('\\\\s*\n        return\n    for top_name in ('alignment_of', 'aligned_union'):\n        if Search(('\\\\bstd::%s\\\\b' % top_name), line):\n            error(filename, linenum, 'build/c++11', 5, ('std::%s is an unapproved C++11 class or function.  Send c-style an example of where it would make your code more readable, and they may let you use it.' % top_name))", "docstring": "Flag those c++11 features that we only allow in certain places.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def load_users(path=settings.LOGIN_FILE):\n    if (not os.path.exists(path)):\n        return {}\n    data = ''\n    with open(path) as f:\n        data = f.read().splitlines()\n    users = {}\n    cnt = 1\n    for line in data:\n        line = line.split(':')\n        assert (len(line) == 7), (\"Bad number of fields in '%s', at line %d!\" % (path, cnt))\n        users[line[0]] = {'pass_hash': line[1], 'uid': line[2], 'gid': line[3], 'full_name': line[4], 'home': line[5], 'shell': line[6]}\n        cnt += 1\n    return users", "docstring": "Read passwd file and return dict with users and all their settings.\n\nArgs:\npath (str, default settings.LOGIN_FILE): path of the file,\nwhich will be loaded (default :attr:`ftp.settings.LOGIN_FILE`).\n\nReturns:\n(dict): username: {pass_hash, uid, gid, full_name, home, shell}\n\nExample of returned data::\n\n{\n\"xex\": {\n\"pass_hash\": \"$asd$aiosjdaiosjdásghwasdjo\",\n\"uid\": \"2000\",\n\"gid\": \"2000\",\n\"full_name\": \"ftftf\",\n\"home\": \"/home/ftp/xex\",\n\"shell\": \"/bin/false\"\n}\n}", "source": "codesearchnet"}
{"code": "def GetFileEntryByPathSpec(self, path_spec):\n    \n    return compressed_stream_file_entry.CompressedStreamFileEntry(\n        self._resolver_context, self, path_spec, is_root=True, is_virtual=True)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\nCompressedStreamFileEntry: a file entry or None if not available.", "source": "juraj-google-style"}
{"code": "def python_graph(self):\n    return self._python_graph", "docstring": "Get the Python graph.\n\nReturns:\nIf the Python graph has been set, returns a `tf.Graph` object. Otherwise,\nreturns None.", "source": "github-repos"}
{"code": "class AsList(AsSideInput):\n\n    @staticmethod\n    def _from_runtime_iterable(it, options):\n        return list(it)\n\n    def _side_input_data(self) -> SideInputData:\n        return SideInputData(common_urns.side_inputs.ITERABLE.urn, self._window_mapping_fn, list)", "docstring": "Marker specifying that an entire PCollection is to be used as a side input.\n\nIntended for use in side-argument specification---the same places where\nAsSingleton and AsIter are used, but forces materialization of this\nPCollection as a list.\n\nArgs:\npcoll: Input pcollection.\n\nReturns:\nAn AsList-wrapper around a PCollection whose one element is a list\ncontaining all elements in pcoll.", "source": "github-repos"}
{"code": "def avg(self, vars_list: List[str]) -> 'TensorFluent':\n        \n        operand = self\n        if operand.dtype == tf.bool:\n            operand = operand.cast(tf.float32)\n        return self._aggregation_op(tf.reduce_mean, operand, vars_list)", "docstring": "Returns the TensorFluent for the avg aggregation function.\n\nArgs:\nvars_list: The list of variables to be aggregated over.\n\nReturns:\nA TensorFluent wrapping the avg aggregation function.", "source": "juraj-google-style"}
{"code": "def get_subgraph_for_concept(self, concept: str, depth: int=1, reverse: bool=False):\n    nodeset = {concept}\n    if reverse:\n        func = self.predecessors\n    else:\n        func = self.successors\n    for i in range(depth):\n        nodeset.update(chain.from_iterable([list(func(n)) for n in nodeset]))\n    return AnalysisGraph(self.subgraph(nodeset).copy())", "docstring": "Returns a new subgraph of the analysis graph for a single concept.\n\nArgs:\nconcept: The concept that the subgraph will be centered around.\ndepth: The depth to which the depth-first search must be performed.\n\nreverse: Sets the direction of causal influence flow to examine.\nSetting this to False (default) will search for upstream causal\ninfluences, and setting it to True will search for\ndownstream causal influences.\n\nReturns:\nAnalysisGraph", "source": "codesearchnet"}
{"code": "def convert_obatoms_to_molecule(self, atoms, residue_name=None, site_property=\"ff_map\"):\n        \n\n        restore_site_props = True if residue_name is not None else False\n\n        if restore_site_props and not hasattr(self, \"map_residue_to_mol\"):\n            self._set_residue_map()\n\n        coords = []\n        zs = []\n        for atm in atoms:\n            coords.append(list(atm.coords))\n            zs.append(atm.atomicnum)\n\n        mol = Molecule(zs, coords)\n\n        if restore_site_props:\n\n            props = []\n\n            ref = self.map_residue_to_mol[residue_name].copy()\n\n            \n            assert len(mol) == len(ref)\n            assert ref.formula == mol.formula\n\n            \n            for i, site in enumerate(mol):\n                assert site.specie.symbol == ref[i].specie.symbol\n                props.append(getattr(ref[i], site_property))\n\n            mol.add_site_property(site_property, props)\n\n        return mol", "docstring": "Convert list of openbabel atoms to MOlecule.\n\nArgs:\natoms ([OBAtom]): list of OBAtom objects\nresidue_name (str): the key in self.map_residue_to_mol. Usec to\nrestore the site properties in the final packed molecule.\nsite_property (str): the site property to be restored.\n\nReturns:\nMolecule object", "source": "juraj-google-style"}
{"code": "def build_configuration(self):\n    configuration = config.Configuration()\n    pegtree = pegnode.parse(self.filestring)\n    for section_node in pegtree:\n        if isinstance(section_node, pegnode.GlobalSection):\n            configuration.globall = self.build_global(section_node)\n        elif isinstance(section_node, pegnode.FrontendSection):\n            configuration.frontends.append(self.build_frontend(section_node))\n        elif isinstance(section_node, pegnode.DefaultsSection):\n            configuration.defaults.append(self.build_defaults(section_node))\n        elif isinstance(section_node, pegnode.ListenSection):\n            configuration.listens.append(self.build_listen(section_node))\n        elif isinstance(section_node, pegnode.UserlistSection):\n            configuration.userlists.append(self.build_userlist(section_node))\n        elif isinstance(section_node, pegnode.BackendSection):\n            configuration.backends.append(self.build_backend(section_node))\n    return configuration", "docstring": "Parse the haproxy config file\n\nRaises:\nException: when there are unsupported section\n\nReturns:\nconfig.Configuration: haproxy config object", "source": "codesearchnet"}
{"code": "def get_configuration(self, uri):\n        \n\n        req_headers = {\n            'Accept': 'application/vnd.onshape.v1+json',\n            'Content-Type': 'application/json'\n        }\n        return self._api.request('get', '/api/partstudios/d/' + uri[\"did\"] + '/' + uri[\"wvm_type\"] + '/' + uri[\"wvm\"] + '/e/' + uri[\"eid\"] + '/configuration', headers=req_headers)", "docstring": "get the configuration of a PartStudio\n\nArgs:\n- uri (dict): points to a particular element\n\nReturns:\n- requests.Response: Onshape response data", "source": "juraj-google-style"}
{"code": "def AddMapping(self, filename, new_mapping):\n    \n    for field in self._REQUIRED_MAPPING_FIELDS:\n      if field not in new_mapping:\n        raise problems.InvalidMapping(field)\n    if filename in self.GetKnownFilenames():\n      raise problems.DuplicateMapping(filename)\n    self._file_mapping[filename] = new_mapping", "docstring": "Adds an entry to the list of known filenames.\n\nArgs:\nfilename: The filename whose mapping is being added.\nnew_mapping: A dictionary with the mapping to add. Must contain all\nfields in _REQUIRED_MAPPING_FIELDS.\nRaises:\nDuplicateMapping if the filename already exists in the mapping\nInvalidMapping if not all required fields are present", "source": "juraj-google-style"}
{"code": "def call(self, input_values: tf.Tensor, attention_mask: tf.Tensor | None=None, token_type_ids: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:\n    output_hidden_states = output_hidden_states if output_hidden_states else self.config.output_hidden_states\n    output_attentions = output_attentions if output_attentions else self.config.output_attentions\n    return_dict = return_dict if return_dict else self.config.return_dict\n    outputs = self.wav2vec2(input_values=input_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n    return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import AutoProcessor, TFWav2Vec2Model\n>>> from datasets import load_dataset\n>>> import soundfile as sf\n\n>>> processor = AutoProcessor.from_pretrained(\"facebook/wav2vec2-base-960h\")\n>>> model = TFWav2Vec2Model.from_pretrained(\"facebook/wav2vec2-base-960h\")\n\n\n>>> def map_to_array(batch):\n...     speech, _ = sf.read(batch[\"file\"])\n...     batch[\"speech\"] = speech\n...     return batch\n\n\n>>> ds = load_dataset(\"hf-internal-testing/librispeech_asr_dummy\", \"clean\", split=\"validation\")\n>>> ds = ds.map(map_to_array)\n\n>>> input_values = processor(ds[\"speech\"][0], return_tensors=\"tf\").input_values  # Batch size 1\n>>> hidden_states = model(input_values).last_hidden_state\n```", "source": "github-repos"}
{"code": "def EnumValueName(self, enum, value):\n    \n    return self.enum_types_by_name[enum].values_by_number[value].name", "docstring": "Returns the string name of an enum value.\n\nThis is just a small helper method to simplify a common operation.\n\nArgs:\nenum: string name of the Enum.\nvalue: int, value of the enum.\n\nReturns:\nstring name of the enum value.\n\nRaises:\nKeyError if either the Enum doesn't exist or the value is not a valid\nvalue for the enum.", "source": "juraj-google-style"}
{"code": "def get_weights(self):\n    self._check_sess()\n    return {k: v.eval(session=self.sess) for (k, v) in self.variables.items()}", "docstring": "Returns a dictionary containing the weights of the network.\n\nReturns:\nDictionary mapping variable names to their weights.", "source": "codesearchnet"}
{"code": "def __init__(self, **kwinfo):\n        \n        self._author_fakename = getpass.getuser()\n        self._author_truename = ProjectInfo.find_pakcage_info(\n            'author', SRC_FOLDER, PROJECT_NAME, '__init__.py')\n        self._email = ProjectInfo.find_pakcage_info(\n            'email', SRC_FOLDER, PROJECT_NAME, '__init__.py')\n        self._project_name = os.path.basename(\n            os.path.dirname(os.path.realpath(__file__)))\n        self._project_version = ProjectInfo.find_pakcage_info(\n            'version', SRC_FOLDER, PROJECT_NAME, '__init__.py')\n\n        for key, info in kwinfo.items():\n            key = '_' + key\n            setattr(self, key, info)", "docstring": "init project info\n\nArgs:\nauthor_fakename (str): TODO\nauthor_truename (str): TODO\nemail (str): TODO\nproject_name (str): TODO\nproject_version (str): TODO", "source": "juraj-google-style"}
{"code": "def mean_centroid(candidates):\n    sum_x = 0.0\n    sum_y = 0.0\n    for (centroid_x, centroid_y, _, _) in candidates:\n        sum_x += centroid_x\n        sum_y += centroid_y\n    denom = (3.0 * len(candidates))\n    return ((sum_x / denom), (sum_y / denom))", "docstring": "Take the mean of all centroids in set of reference triangles.\n\n.. note::\n\nThis is used **only** as a helper for :func:`locate_point`.\n\nArgs:\ncandidates (List[Tuple[float, float, float, numpy.ndarray]): List of\n4-tuples, each of which has been produced by :func:`locate_point`.\nEach 4-tuple contains\n\n* Three times centroid ``x``-value\n* Three times centroid ``y``-value\n* \"Width\" of a parameter space for a surface\n* Control points for a surface\n\nWe only use the first two values, which are triple the desired\nvalue so that we can put off division by three until summing in\nour average. We don't use the other two values, they are just an\nartifact of the way ``candidates`` is constructed by the caller.\n\nReturns:\nTuple[float, float]: The mean of all centroids.", "source": "codesearchnet"}
{"code": "def wrefs(self, index = None, recurse=True):\n        \n        targets =[]\n        self._helper_wrefs(targets, recurse)\n        if index is None:\n            return targets\n        else:\n            return targets[index]", "docstring": "Returns a list of word references, these can be Words but also Morphemes or Phonemes.\n\nArguments:\nindex (int or None): If set to an integer, will retrieve and return the n'th element (starting at 0) instead of returning the list of all", "source": "juraj-google-style"}
{"code": "def _CompareStores(self, storage_reader, compare_storage_reader):\n    \n    storage_counters = self._CalculateStorageCounters(storage_reader)\n    compare_storage_counters = self._CalculateStorageCounters(\n        compare_storage_reader)\n\n    \n\n    return storage_counters == compare_storage_counters", "docstring": "Compares the contents of two stores.\n\nArgs:\nstorage_reader (StorageReader): storage reader.\ncompare_storage_reader (StorageReader): storage to compare against.\n\nReturns:\nbool: True if the content of the stores is identical.", "source": "juraj-google-style"}
{"code": "def softmax(x, axis=-1):\n    if isinstance(axis, int) and x.shape[axis] == 1:\n        warnings.warn(f'You are using a softmax over axis {axis} of a tensor of shape {x.shape}. This axis has size 1. The softmax operation will always return the value 1, which is likely not what you intended. Did you mean to use a sigmoid instead?')\n    if any_symbolic_tensors((x,)):\n        return Softmax(axis).symbolic_call(x)\n    if isinstance(axis, tuple):\n        axis_to_keep = [v for v in range(len(x.shape)) if v not in axis]\n        x_transposed = backend.numpy.transpose(x, axes=(*axis_to_keep, *axis))\n        x_reshaped = backend.numpy.reshape(x_transposed, (*[x.shape[v] for v in axis_to_keep], -1))\n        x = backend.nn.softmax(x_reshaped, axis=-1)\n        x = backend.numpy.reshape(x, x_transposed.shape)\n        x = backend.numpy.transpose(x, axes=list(backend.numpy.argsort([*axis_to_keep, *axis])))\n        return x\n    else:\n        return backend.nn.softmax(x, axis=axis)", "docstring": "Softmax activation function.\n\nThe elements of the output vector lie within the range `(0, 1)`, and their\ntotal sum is exactly 1 (excluding the floating point rounding error).\n\nEach vector is processed independently. The `axis` argument specifies the\naxis along which the function is applied within the input.\n\nIt is defined as:\n`f(x) = exp(x) / sum(exp(x))`\n\nArgs:\nx: Input tensor.\naxis: Integer, axis along which the softmax is applied.\n\nReturns:\nA tensor with the same shape as `x`.\n\nExample:\n\n>>> x = np.array([-1., 0., 1.])\n>>> x_softmax = keras.ops.softmax(x)\n>>> print(x_softmax)\narray([0.09003057, 0.24472847, 0.66524096], shape=(3,), dtype=float64)", "source": "github-repos"}
{"code": "def recipe_dbm_to_sheets(config, auth_read, report_id, report_name, sheet, tab):\n    dbm(config, {'auth': auth_read, 'report': {'report_id': report_id, 'name': report_name}, 'out': {'sheets': {'sheet': sheet, 'tab': tab, 'range': 'A1'}}})", "docstring": "Move existing DV360 report into a Sheets tab.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nreport_id (integer) - DV360 report ID given in UI, not needed if name used.\nreport_name (string) - Name of report, not needed if ID used.\nsheet (string) - Full URL to sheet being written to.\ntab (string) - Existing tab in sheet to write to.", "source": "github-repos"}
{"code": "def LookupClients(self, keywords):\n    \n    if isinstance(keywords, string_types):\n      raise ValueError(\n          \"Keywords should be an iterable, not a string (got %s).\" % keywords)\n\n    start_time, end_time, filtered_keywords, unversioned_keywords = (\n        self._AnalyzeKeywords(keywords))\n\n    last_seen_map = None\n    if unversioned_keywords:\n      last_seen_map = {}\n\n    \n    \n\n    raw_results = self.Lookup(\n        list(map(self._NormalizeKeyword, filtered_keywords)),\n        start_time=start_time.AsMicrosecondsSinceEpoch(),\n        end_time=end_time.AsMicrosecondsSinceEpoch(),\n        last_seen_map=last_seen_map)\n    if not raw_results:\n      return []\n\n    if unversioned_keywords:\n      universal_last_seen_raw = {}\n      self.ReadPostingLists(\n          list(map(self._NormalizeKeyword, raw_results)),\n          start_time=start_time.AsMicrosecondsSinceEpoch(),\n          end_time=end_time.AsMicrosecondsSinceEpoch(),\n          last_seen_map=universal_last_seen_raw)\n\n      universal_last_seen = {}\n      for (_, client_id), ts in iteritems(universal_last_seen_raw):\n        universal_last_seen[client_id] = ts\n\n      old_results = set()\n      for keyword in unversioned_keywords:\n        for result in raw_results:\n          if last_seen_map[(keyword, result)] < universal_last_seen[result]:\n            old_results.add(result)\n      raw_results -= old_results\n\n    return [rdf_client.ClientURN(result) for result in raw_results]", "docstring": "Returns a list of client URNs associated with keywords.\n\nArgs:\nkeywords: The list of keywords to search by.\n\nReturns:\nA list of client URNs.\n\nRaises:\nValueError: A string (single keyword) was passed instead of an iterable.", "source": "juraj-google-style"}
{"code": "def cellsiter_to_dataframe(cellsiter, args, drop_allna=True):\n    from modelx.core.cells import shareable_parameters\n    if len(args):\n        indexes = shareable_parameters(cellsiter)\n    else:\n        indexes = get_all_params(cellsiter.values())\n    result = None\n    for cells in cellsiter.values():\n        df = cells_to_dataframe(cells, args)\n        if (drop_allna and df.isnull().all().all()):\n            continue\n        if (df.index.names != [None]):\n            if isinstance(df.index, pd.MultiIndex):\n                if (_pd_ver < (0, 20)):\n                    df = _reset_naindex(df)\n            df = df.reset_index()\n        missing_params = (set(indexes) - set(df))\n        for params in missing_params:\n            df[params] = np.nan\n        if (result is None):\n            result = df\n        else:\n            try:\n                result = pd.merge(result, df, how='outer')\n            except MergeError:\n                result = pd.concat([result, df], axis=1)\n            except ValueError:\n                cols = (set(result.columns) & set(df.columns))\n                for col in cols:\n                    if (len([str(frame[col].dtype) for frame in (result, df) if (str(frame[col].dtype) == 'object')]) == 1):\n                        if (str(result[col].dtype) == 'object'):\n                            frame = df\n                        else:\n                            frame = result\n                        frame[[col]] = frame[col].astype('object')\n                result = pd.merge(result, df, how='outer')\n    if (result is None):\n        return pd.DataFrame()\n    else:\n        return (result.set_index(indexes) if indexes else result)", "docstring": "Convert multiple cells to a frame.\n\nIf args is an empty sequence, all values are included.\nIf args is specified, cellsiter must have shareable parameters.\n\nArgs:\ncellsiter: A mapping from cells names to CellsImpl objects.\nargs: A sequence of arguments", "source": "codesearchnet"}
{"code": "def explain_text(self, labels, instance, column_name=None, num_features=10, num_samples=5000):\n    from lime.lime_text import LimeTextExplainer\n    if ((len(self._text_columns) > 1) and (not column_name)):\n        raise ValueError(('There are multiple text columns in the input of the model. ' + 'Please specify \"column_name\".'))\n    elif (column_name and (column_name not in self._text_columns)):\n        raise ValueError(('Specified column_name \"%s\" not found in the model input.' % column_name))\n    text_column_name = (column_name if column_name else self._text_columns[0])\n    if isinstance(instance, six.string_types):\n        instance = next(csv.DictReader([instance], fieldnames=self._headers))\n    predict_fn = self._make_text_predict_fn(labels, instance, text_column_name)\n    explainer = LimeTextExplainer(class_names=labels)\n    exp = explainer.explain_instance(instance[text_column_name], predict_fn, labels=range(len(labels)), num_features=num_features, num_samples=num_samples)\n    return exp", "docstring": "Explain a text field of a prediction.\n\nIt analyze the prediction by LIME, and returns a report of which words are most impactful\nin contributing to certain labels.\n\nArgs:\nlabels: a list of labels to explain.\ninstance: the prediction instance. It needs to conform to model's input. Can be a csv\nline string, or a dict.\ncolumn_name: which text column to explain. Can be None if there is only one text column\nin the model input.\nnum_features: maximum number of words (features) to analyze. Passed to\nLIME LimeTextExplainer directly.\nnum_samples: size of the neighborhood to learn the linear model. Passed to\nLIME LimeTextExplainer directly.\n\nReturns:\nA LIME's lime.explanation.Explanation.\n\nThrows:\nValueError if the given text column is not found in model input or column_name is None\nbut there are multiple text columns in model input.", "source": "codesearchnet"}
{"code": "def _GetMetadataRequest(self, metadata_url, params=None, timeout=None):\n    \n    headers = {'Metadata-Flavor': 'Google'}\n    params = urlparse.urlencode(params or {})\n    url = '%s?%s' % (metadata_url, params)\n    request = urlrequest.Request(url, headers=headers)\n    request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({}))\n    timeout = timeout or self.timeout\n    return request_opener.open(request, timeout=timeout*1.1)", "docstring": "Performs a GET request with the metadata headers.\n\nArgs:\nmetadata_url: string, the URL to perform a GET request on.\nparams: dictionary, the query parameters in the GET request.\ntimeout: int, timeout in seconds for metadata requests.\n\nReturns:\nHTTP response from the GET request.\n\nRaises:\nurlerror.HTTPError: raises when the GET request fails.", "source": "juraj-google-style"}
{"code": "def plot(self, ax_list=None, fontsize=12, **kwargs):\n        \n        history = self.history\n\n        \n        num_plots, ncols, nrows = len(history), 1, 1\n        if num_plots > 1:\n            ncols = 2\n            nrows = num_plots \n\n        ax_list, fig, plot = get_axarray_fig_plt(ax_list, nrows=nrows, ncols=ncols,\n                                                 sharex=True, sharey=False, squeeze=False)\n        ax_list = np.array(ax_list).ravel()\n\n        iter_num = np.array(list(range(self.num_iterations))) + 1\n        label = kwargs.pop(\"label\", None)\n\n        for i, ((key, values), ax) in enumerate(zip(history.items(), ax_list)):\n            ax.grid(True)\n            ax.set_xlabel('Relaxation Step')\n            ax.set_xticks(iter_num, minor=False)\n            ax.set_ylabel(key)\n\n            xx, yy = iter_num, values\n            if not kwargs and label is None:\n                ax.plot(xx, yy, \"-o\", lw=2.0)\n            else:\n                ax.plot(xx, yy, label=label if i == 0 else None, **kwargs)\n\n            if key in _VARS_SUPPORTING_LOGSCALE and np.all(yy > 1e-22):\n                ax.set_yscale(\"log\")\n\n            if key in _VARS_WITH_YRANGE:\n                ymin, ymax = _VARS_WITH_YRANGE[key]\n                val_min, val_max = np.min(yy), np.max(yy)\n                if abs(val_max - val_min) > abs(ymax - ymin):\n                    ax.set_ylim(ymin, ymax)\n\n            if label is not None:\n                ax.legend(loc=\"best\", fontsize=fontsize, shadow=True)\n\n        \n        if num_plots % ncols != 0:\n            ax_list[-1].plot(xx, yy, lw=0.0)\n            ax_list[-1].axis('off')\n\n        return fig", "docstring": "Plot relaxation history i.e. the results of the last iteration of each SCF cycle.\n\nArgs:\nax_list: List of axes. If None a new figure is produced.\nfontsize: legend fontsize.\nkwargs: keyword arguments are passed to ax.plot\n\nReturns: matplotlib figure", "source": "juraj-google-style"}
{"code": "def _executeMassiveMethod(path, method, args=None, classArgs=None):\n    response = {}\n    if (args is None):\n        args = {}\n    if (classArgs is None):\n        classArgs = {}\n    sys.path.append(path)\n    exclude = ['__init__.py', 'base.py']\n    for f in AtomShieldsScanner._getFiles(path, '*.py', exclude=exclude):\n        try:\n            instance = AtomShieldsScanner._getClassInstance(path=f, args=classArgs)\n            if (instance is not None):\n                if callable(method):\n                    args['instance'] = instance\n                    output = method(**args)\n                    response[instance.__class__.NAME] = output\n                elif hasattr(instance, method):\n                    output = getattr(instance, method)(**args)\n                    response[instance.__class__.NAME] = output\n                else:\n                    continue\n        except Exception as e:\n            AtomShieldsScanner._debug(('[!] %s' % e))\n    sys.path.remove(path)\n    return response", "docstring": "Execute an specific method for each class instance located in path\n\nArgs:\npath (str): Absolute path which contains the .py files\nmethod (str): Method to execute into class instance\n\nReturns:\ndict: Dictionary which contains the response for every class instance.\nThe dictionary keys are the value of 'NAME' class variable.", "source": "codesearchnet"}
{"code": "def _ListActiveBreakpoints(self, service):\n    \n    try:\n      response = service.debuggees().breakpoints().list(\n          debuggeeId=self._debuggee_id, waitToken=self._wait_token,\n          successOnTimeout=True).execute()\n      if not response.get('waitExpired'):\n        self._wait_token = response.get('nextWaitToken')\n        breakpoints = response.get('breakpoints') or []\n        if self._breakpoints != breakpoints:\n          self._breakpoints = breakpoints\n          native.LogInfo(\n              'Breakpoints list changed, %d active, wait token: %s' % (\n                  len(self._breakpoints), self._wait_token))\n          self.on_active_breakpoints_changed(copy.deepcopy(self._breakpoints))\n    except BaseException:\n      native.LogInfo('Failed to query active breakpoints: ' +\n                     traceback.format_exc())\n\n      \n      \n      self._debuggee_id = None\n\n      return (True, self.list_backoff.Failed())\n\n    self.list_backoff.Succeeded()\n    return (False, 0)", "docstring": "Single attempt query the list of active breakpoints.\n\nMust not be called before the debuggee has been registered. If the request\nfails, this function resets self._debuggee_id, which triggers repeated\ndebuggee registration.\n\nArgs:\nservice: client to use for API calls\n\nReturns:\n(registration_required, delay) tuple", "source": "juraj-google-style"}
{"code": "def query(self, sql_query, return_as='dataframe'):\n    if isinstance(sql_query, str):\n        pass\n    elif isinstance(sql_query, unicode):\n        sql_query = str(sql_query)\n    else:\n        raise QueryDbError('query() requires a str or unicode input.')\n    query = sqlalchemy.sql.text(sql_query)\n    if (return_as.upper() in ['DF', 'DATAFRAME']):\n        return self._to_df(query, self._engine)\n    elif (return_as.upper() in ['RESULT', 'RESULTPROXY']):\n        with self._engine.connect() as conn:\n            result = conn.execute(query)\n            return result\n    else:\n        raise QueryDbError('Other return types not implemented.')", "docstring": "Execute a raw SQL query against the the SQL DB.\n\nArgs:\nsql_query (str): A raw SQL query to execute.\n\nKwargs:\nreturn_as (str): Specify what type of object should be\nreturned. The following are acceptable types:\n- \"dataframe\": pandas.DataFrame or None if no matching query\n- \"result\": sqlalchemy.engine.result.ResultProxy\n\nReturns:\nresult (pandas.DataFrame or sqlalchemy ResultProxy): Query result\nas a DataFrame (default) or sqlalchemy result (specified with\nreturn_as=\"result\")\n\nRaises:\nQueryDbError", "source": "codesearchnet"}
{"code": "def receive(self, sequence, args):\n    if (not self._reorder):\n        self._callback(*args)\n        return\n    if ((self._next_expected is not None) and (sequence < self._next_expected)):\n        print(('Dropping out of order packet, seq=%d' % sequence))\n        return\n    self._out_of_order.append((sequence, args))\n    self._out_of_order.sort(key=(lambda x: x[0]))\n    while (len(self._out_of_order) > 0):\n        (seq, args) = self._out_of_order[0]\n        if ((self._next_expected is not None) and (seq != self._next_expected)):\n            return\n        self._callback(*args)\n        self._out_of_order.pop(0)\n        self._next_expected = (seq + 1)", "docstring": "Receive one packet\n\nIf the sequence number is one we've already seen before, it is dropped.\n\nIf it is not the next expected sequence number, it is put into the\n_out_of_order queue to be processed once the holes in sequence number\nare filled in.\n\nArgs:\nsequence (int): The sequence number of the received packet\nargs (list): The list of packet contents that will be passed to callback\nas callback(*args)", "source": "codesearchnet"}
{"code": "def get_energy(self, composition, strict=True):\n        \n        if strict and set(composition.keys()) > set(self.keys()):\n            s = set(composition.keys()) - set(self.keys())\n            raise ValueError(\"Potentials not specified for {}\".format(s))\n        return sum(self.get(k, 0) * v for k, v in composition.items())", "docstring": "Calculates the energy of a composition.\n\nArgs:\ncomposition (Composition): input composition\nstrict (bool): Whether all potentials must be specified", "source": "juraj-google-style"}
{"code": "def add_request(self, input_ids: List[int], request_id: Optional[str]=None, max_new_tokens: Optional[int]=None) -> str:\n    if request_id is None:\n        with self._request_lock:\n            request_id = f'req_{self._request_counter}'\n            self._request_counter += 1\n    max_new_tokens = self.generation_config.max_new_tokens if max_new_tokens is None else max_new_tokens\n    state = RequestState(request_id=request_id, prompt_ids=list(input_ids), full_prompt_ids=list(input_ids), max_new_tokens=max_new_tokens, eos_token_id=self.generation_config.eos_token_id)\n    self.input_queue.put(state, block=True, timeout=10)\n    logger.debug(f'Added request {request_id} to queue.')\n    return request_id", "docstring": "Add a new generation request to the queue.\n\nArgs:\ninput_ids: Input token IDs to use as prompt\nrequest_id: Optional custom request ID (auto-generated if None)\n**kwargs: Additional generation parameters\n\nReturns:\nstr: The request ID", "source": "github-repos"}
{"code": "def recipe_ga360_segmentology(config, auth_write, auth_read, view, recipe_slug):\n    dataset(config, {'description': 'Create a dataset for bigquery tables.', 'hour': [4], 'auth': auth_write, 'dataset': recipe_slug})\n    bigquery(config, {'auth': auth_write, 'function': 'Pearson Significance Test', 'to': {'dataset': recipe_slug}})\n    ga(config, {'auth': auth_read, 'kwargs': {'reportRequests': [{'viewId': view, 'dateRanges': [{'startDate': '90daysAgo', 'endDate': 'today'}], 'dimensions': [{'name': 'ga:userType'}, {'name': 'ga:userDefinedValue'}, {'name': 'ga:latitude'}, {'name': 'ga:longitude'}], 'metrics': [{'expression': 'ga:users'}, {'expression': 'ga:sessionsPerUser'}, {'expression': 'ga:bounces'}, {'expression': 'ga:timeOnPage'}, {'expression': 'ga:pageviews'}]}], 'useResourceQuotas': False}, 'out': {'bigquery': {'dataset': recipe_slug, 'table': 'GA360_KPI'}}})\n    bigquery(config, {'auth': auth_write, 'from': {'query': 'WITH GA360_SUM AS (\\n         SELECT\\n            A.Dimensions.userType AS User_Type,\\n           A.Dimensions.userDefinedValue AS User_Value,\\n           B.zip_code AS Zip,\\n           SUM(Metrics.users) AS Users,\\n           SUM(Metrics.sessionsPerUser) AS Sessions,\\n           SUM(Metrics.timeOnPage) AS Time_On_Site,\\n           SUM(Metrics.bounces) AS Bounces,\\n           SUM(Metrics.pageviews) AS Page_Views\\n         FROM `{dataset}.GA360_KPI` AS A\\n          JOIN `bigquery-public-data.geo_us_boundaries.zip_codes` AS B\\n         ON ST_WITHIN(ST_GEOGPOINT(A.Dimensions.longitude, A.Dimensions.latitude), B.zip_code_geom)\\n         GROUP BY 1,2,3\\n         )\\n         SELECT\\n           User_Type,\\n           User_Value,\\n           Zip,\\n           Users,\\n           SAFE_DIVIDE(Users, SUM(Users) OVER()) AS User_Percent,\\n           SAFE_DIVIDE(Sessions, SUM(Sessions) OVER()) AS Impression_Percent,\\n           SAFE_DIVIDE(Time_On_Site, SUM(Time_On_Site) OVER()) AS Time_On_Site_Percent,\\n           SAFE_DIVIDE(Bounces, SUM(Bounces) OVER()) AS Bounce_Percent,\\n           SAFE_DIVIDE(Page_Views, SUM(Page_Views) OVER()) AS Page_View_Percent\\n         FROM GA360_SUM        ', 'parameters': {'dataset': recipe_slug}, 'legacy': False}, 'to': {'dataset': recipe_slug, 'view': 'GA360_KPI_Normalized'}})\n    census(config, {'auth': auth_write, 'normalize': {'census_geography': 'zip_codes', 'census_year': '2018', 'census_span': '5yr'}, 'to': {'dataset': recipe_slug, 'type': 'view'}})\n    census(config, {'auth': auth_write, 'correlate': {'join': 'Zip', 'pass': ['User_Type', 'User_Value'], 'sum': ['Users'], 'correlate': ['User_Percent', 'Impression_Percent', 'Time_On_Site_Percent', 'Bounce_Percent', 'Page_View_Percent'], 'dataset': recipe_slug, 'table': 'GA360_KPI_Normalized', 'significance': 80}, 'to': {'dataset': recipe_slug, 'type': 'view'}})", "docstring": "GA360 funnel analysis using Census data.\n\nArgs:\nauth_write (authentication) - Authorization used for writing data.\nauth_read (authentication) - Authorization for reading GA360.\nview (string) - View Id\nrecipe_slug (string) - Name of Google BigQuery dataset to create.", "source": "github-repos"}
{"code": "def custom_returnvalue(self, printer, desc=None):\n    self.return_info = ReturnInfo(None, printer, True, desc)", "docstring": "Use a custom function to print the return value.\n\nArgs:\nprinter (callable): A function that should take in the return\nvalue and convert it to a string.\ndesc (str): An optional description of the return value.", "source": "codesearchnet"}
{"code": "def __init__(self, start, end):\n    \n    if start > end:\n      raise ValueError(\n          \"Invalid time-range: %s > %s.\" % (start.AsMicrosecondsSinceEpoch(),\n                                            end.AsMicrosecondsSinceEpoch()))\n    self._start = start\n    self._end = end", "docstring": "Initializes a TimeRange.\n\nArgs:\nstart: An RDFDatetime that indicates the beginning of the time-range.\nend: An RDFDatetime that indicates the end of the time-range.\n\nRaises:\nValueError: If the beginning of the time range is at a future time as\ncompared to the end of the time-range.", "source": "juraj-google-style"}
{"code": "def inplace_add(x, i, v):\n    return alias_inplace_add(gen_array_ops.deep_copy(x), i, v)", "docstring": "Applies an inplace add on input x at index i with value v.\n\nNote that this function is not actually inplace - it allocates\na copy of x.  The utility is not avoiding memory copies but rather\nspecifying a sparse update.\n\nIf i is None, x and v must be the same shape. Computes\ny = x; y += v;\nIf i is a scalar, x has a rank 1 higher than v's. Computes\ny = x; y[i, :] += v;\nOtherwise, x and v must have the same rank. Computes\ny = x; y[i, :] += v;\n\nArgs:\nx: A Tensor.\ni: None, a scalar or a vector.\nv: A Tensor.\n\nReturns:\nReturns y, which is guaranteed not to be an alias of x.", "source": "github-repos"}
{"code": "def get_ecommerce_client(url_postfix='', site_code=None):\n    \n    ecommerce_api_root = get_configuration('ECOMMERCE_API_ROOT', site_code=site_code)\n    signing_key = get_configuration('JWT_SECRET_KEY', site_code=site_code)\n    issuer = get_configuration('JWT_ISSUER', site_code=site_code)\n    service_username = get_configuration('ECOMMERCE_SERVICE_USERNAME', site_code=site_code)\n    return EdxRestApiClient(\n        ecommerce_api_root + url_postfix, signing_key=signing_key, issuer=issuer, username=service_username)", "docstring": "Get client for fetching data from ecommerce API.\nArguments:\nsite_code (str): (Optional) The SITE_OVERRIDES key to inspect for site-specific values\nurl_postfix (str): (Optional) The URL postfix value to append to the ECOMMERCE_API_ROOT value.\n\nReturns:\nEdxRestApiClient object", "source": "juraj-google-style"}
{"code": "def unstack(df, level=-1, reset_index=True):\n    \n    df = df.unstack(level=level)\n    if reset_index:\n        df = df.reset_index()\n        df.columns = df.columns.map(_join_names)\n\n    return df", "docstring": "pd.DataFrame.unstack adapter.\n\nCall the `df.unstack` method using the indicated level and afterwards\njoin the column names using an underscore.\n\nArgs:\ndf (pandas.DataFrame): DataFrame to unstack.\nlevel (str, int or list): Level(s) of index to unstack, can pass level name\nreset_index (bool): Whether to reset the index after unstacking\n\nReturns:\npandas.Dataframe: unstacked dataframe", "source": "juraj-google-style"}
{"code": "def get(self, name):\n        \n\n        \n        interface = name\n        if not interface:\n            raise ValueError(\"Vrrp.get(): interface must contain a value.\")\n\n        \n        \n        config = self.get_block('interface %s' % interface)\n        if config is None:\n            return config\n\n        \n        \n        match = set(re.findall(r'^\\s+(?:no |)vrrp (\\d+)', config, re.M))\n        if not match:\n            return None\n\n        \n        result = dict()\n\n        for vrid in match:\n            subd = dict()\n\n            \n            subd.update(self._parse_delay_reload(config, vrid))\n            subd.update(self._parse_description(config, vrid))\n            subd.update(self._parse_enable(config, vrid))\n            subd.update(self._parse_ip_version(config, vrid))\n            subd.update(self._parse_mac_addr_adv_interval(config, vrid))\n            subd.update(self._parse_preempt(config, vrid))\n            subd.update(self._parse_preempt_delay_min(config, vrid))\n            subd.update(self._parse_preempt_delay_reload(config, vrid))\n            subd.update(self._parse_primary_ip(config, vrid))\n            subd.update(self._parse_priority(config, vrid))\n            subd.update(self._parse_secondary_ip(config, vrid))\n            subd.update(self._parse_timers_advertise(config, vrid))\n            subd.update(self._parse_track(config, vrid))\n            subd.update(self._parse_bfd_ip(config, vrid))\n\n            result.update({int(vrid): subd})\n\n        \n        return result if result else None", "docstring": "Get the vrrp configurations for a single node interface\n\nArgs:\nname (string): The name of the interface for which vrrp\nconfigurations will be retrieved.\n\nReturns:\nA dictionary containing the vrrp configurations on the interface.\nReturns None if no vrrp configurations are defined or\nif the interface is not configured.", "source": "juraj-google-style"}
{"code": "def Add(self, rdf_value, mutation_pool=None):\n    \n    self.StaticAdd(self.urn, rdf_value, mutation_pool=mutation_pool)", "docstring": "Adds an rdf value to the queue.\n\nAdds an rdf value to the queue. Does not require that the queue be locked.\n\nArgs:\nrdf_value: The rdf value to add to the queue.\n\nmutation_pool: A MutationPool object to write to.\n\nRaises:\nValueError: rdf_value has unexpected type.", "source": "juraj-google-style"}
{"code": "def get_appliance(self, appliance_id):\n    url = ('https:\n    headers = self.__gen_headers()\n    headers['Content-Type'] = 'application/json'\n    r = requests.get(url, headers=headers)\n    return r.json()", "docstring": "Get the information for a specified appliance\n\nArgs:\nappliance_id (string): identifiying string of appliance\n\nReturns:\nlist: dictionary object containing information about the specified appliance", "source": "codesearchnet"}
{"code": "def GetHashType(self, hash_str):\n    \n    \n    for hash_type, hash_re in self.hashes:\n      if hash_re.match(hash_str):\n        return hash_type\n    \n    return \"EMPTY\"", "docstring": "Identify the type of hash in a hash string.\n\nArgs:\nhash_str: A string value that may be a hash.\n\nReturns:\nA string description of the type of hash.", "source": "juraj-google-style"}
{"code": "def resume(resume_delay=0):\n    return ProcessContinuation(resume_delay=resume_delay)", "docstring": "A convenient method that produces a ``ProcessContinuation``.\n\nArgs:\nresume_delay: delay after which processing current element should be\nresumed.\nReturns: a ``ProcessContinuation`` for signalling the runner that current\ninput element has not been fully processed and should be resumed later.", "source": "github-repos"}
{"code": "def __init__(self, file_object=None):\n    \n    super(SelfFeederMixIn, self).__init__()\n    self.file_object = file_object", "docstring": "Initializes the lexer feeder min object.\n\nArgs:\nfile_object: Optional file-like object.", "source": "juraj-google-style"}
{"code": "def clean_single_dict(indict, prepend_to_keys=None, remove_keys_containing=None):\n    if (not prepend_to_keys):\n        prepend_to_keys = ''\n    outdict = {}\n    for (k, v) in indict.items():\n        if remove_keys_containing:\n            if (remove_keys_containing in k):\n                continue\n        outdict[(prepend_to_keys + k)] = v[0]\n    return outdict", "docstring": "Clean a dict with values that contain single item iterators to single items\n\nArgs:\nindict (dict): Dictionary to be cleaned\nprepend_to_keys (str): String to prepend to all keys\nremove_keys_containing (str): Text to check for in keys to ignore\n\nReturns:\ndict: Cleaned dictionary\n\nExamples:\n>>> clean_single_dict(indict={'test1': [1], 'test2': ['H']})\n{'test1': 1, 'test2': 'H'}\n\n>>> clean_single_dict(indict={'test1': [1], 'test2': ['H']}, prepend_to_keys='struct_')\n{'struct_test1': 1, 'struct_test2': 'H'}\n\n>>> clean_single_dict(indict={'test1': [1], 'ignore': ['H']}, prepend_to_keys='struct_', remove_keys_containing='ignore')\n{'struct_test1': 1}", "source": "codesearchnet"}
{"code": "def interconnect_link_topologies(self):\n    if (not self.__interconnect_link_topologies):\n        self.__interconnect_link_topologies = InterconnectLinkTopologies(self.__connection)\n    return self.__interconnect_link_topologies", "docstring": "Gets the InterconnectLinkTopologies API client.\n\nReturns:\nInterconnectLinkTopologies:", "source": "codesearchnet"}
{"code": "def check_termination(self) -> None:\n    if self._is_thread_joined:\n        if self.is_alive():\n            raise RuntimeError('Thread was not joined with main thread, and is still running when the test finished.')\n    else:\n        self._testcase.fail('A checked thread was not joined.')", "docstring": "Returns whether the checked thread was properly used and did terminate.\n\nEvery checked thread should be \"join\"ed after starting, and before the\ntest tears down. If it is not joined, it is possible the thread will hang\nand cause flaky failures in tests.\n\nRaises:\nself._testcase.failureException: If check_termination was called before\nthread was joined.\n\nRuntimeError: If the thread is not terminated. This means thread was not\njoined with the main thread.", "source": "github-repos"}
{"code": "def get_pipeline_options(project: str, job_name: str, mode: str, device: str, num_workers: int=cfg.NUM_WORKERS, **kwargs: Any) -> PipelineOptions:\n    job_name = f'{job_name}-{datetime.now().strftime('%Y%m%d%H%M%S')}'\n    staging_bucket = f'gs:\n    dataflow_options = {'runner': 'DirectRunner' if mode == 'local' else 'DataflowRunner', 'job_name': job_name, 'project': project, 'region': cfg.REGION, 'staging_location': f'{staging_bucket}/dflow-staging', 'temp_location': f'{staging_bucket}/dflow-temp', 'setup_file': './setup.py'}\n    flags = []\n    if device == 'GPU':\n        flags = ['--experiment=worker_accelerator=type:nvidia-tesla-p4;count:1;install-nvidia-driver']\n        dataflow_options.update({'sdk_container_image': cfg.DOCKER_IMG, 'machine_type': 'n1-standard-4'})\n    if num_workers:\n        dataflow_options.update({'num_workers': num_workers})\n    return PipelineOptions(flags=flags, **dataflow_options)", "docstring": "Function to retrieve the pipeline options.\nArgs:\nproject: GCP project to run on\nmode: Indicator to run local, cloud or template\nnum_workers: Number of Workers for running the job parallely\nReturns:\nDataflow pipeline options", "source": "github-repos"}
{"code": "def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n    if distribute_lib.in_cross_replica_context():\n        raise ValueError('apply_gradients() must be called in a replica context.')\n    if not self._doing_dynamic_loss_scaling():\n        return self._optimizer.apply_gradients(grads_and_vars, global_step, name)\n    replica_context = distribute_lib.get_replica_context()\n    grads_and_vars = tuple(grads_and_vars)\n    return replica_context.merge_call(self._distributed_apply, args=(grads_and_vars, global_step, name))", "docstring": "Apply gradients to variables.\n\nThis is the second part of `minimize()`. It returns an `Operation` that\nconditionally applies gradients if all gradient values are finite.\nOtherwise no update is performed (nor is `global_step` incremented).\n\nArgs:\ngrads_and_vars: List of (gradient, variable) pairs as returned by\n`compute_gradients()`.\nglobal_step: Optional `Variable` to increment by one after the variables\nhave been updated.\nname: Optional name for the returned operation.  Default to the name\npassed to the `Optimizer` constructor.\n\nReturns:\nAn `Operation` that conditionally applies the specified gradients. If\n`global_step` was not None, that operation also increments `global_step`.\n\nRaises:\nRuntimeError: If you should use `_distributed_apply()` instead.", "source": "github-repos"}
{"code": "def _copy_attr(self, module, varname, cls, attrname=None):\n        \n\n        if not hasattr(module, varname):\n            raise RuntimeError(\"Variable '{}' not found\".format(varname))\n\n        obj = getattr(module, varname)\n\n        if not isinstance(obj, cls):\n            raise RuntimeError(\n                \"Expecting fobj to be a {}, not a '{}'\".format(cls.__name__, obj.__class__.__name__))\n\n        if attrname is None:\n            attrname = varname\n\n        setattr(self, attrname, obj)", "docstring": "Copies attribute from module object to self. Raises if object not of expected class\n\nArgs:\nmodule: module object\nvarname: variable name\ncls: expected class of variable\nattrname: attribute name of self. Falls back to varname", "source": "juraj-google-style"}
{"code": "def _escaped_token_to_subtoken_strings(self, escaped_token):\n    \n    \n    \n    ret = []\n    start = 0\n    token_len = len(escaped_token)\n    while start < token_len:\n      for end in range(\n          min(token_len, start + self._max_subtoken_len), start, -1):\n        subtoken = escaped_token[start:end]\n        if subtoken in self._subtoken_string_to_id:\n          ret.append(subtoken)\n          start = end\n          break\n\n      else:  \n        \n        \n        \n        assert False, \"Token substring not found in subtoken vocabulary.\"\n\n    return ret", "docstring": "Converts an escaped token string to a list of subtoken strings.\n\nArgs:\nescaped_token: An escaped token as a unicode string.\nReturns:\nA list of subtokens as unicode strings.", "source": "juraj-google-style"}
{"code": "def get_language_stemmer(language):\n    from lunr.languages import SUPPORTED_LANGUAGES\n    from nltk.stem.snowball import SnowballStemmer\n    return SnowballStemmer(SUPPORTED_LANGUAGES[language])", "docstring": "Retrieves the SnowballStemmer for a particular language.\n\nArgs:\nlanguage (str): ISO-639-1 code of the language.", "source": "codesearchnet"}
{"code": "def get_cached_item(cache_key, alternative_cache_key, *func_args, **func_kwargs):\n    \n    key = get_cache_key(cache_key, func, *func_args, **func_kwargs)\n    return cache.get(key)", "docstring": "Not a decorator, but a helper function to retrieve the cached\nitem for a key created via get_cache_key.\nArgs:\n- cache_key: if there was a specific cache key used to cache the\nfunction, it should be provided here. If not this should be None\n- func: the function which was cache\n- *func_args: arguments of the function\n- **func_kwargs: keyword arguments of this function", "source": "juraj-google-style"}
{"code": "def init_logger(self, log_dir=None, level=logging.INFO):\n    logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=level)\n    logger = logging.getLogger(__name__)\n    if (log_dir and (self.rank == 0)):\n        filename = '{}.log'.format(self.timestamp)\n        log_file = osp.join(log_dir, filename)\n        self._add_file_handler(logger, log_file, level=level)\n    return logger", "docstring": "Init the logger.\n\nArgs:\nlog_dir(str, optional): Log file directory. If not specified, no\nlog file will be used.\nlevel (int or str): See the built-in python logging module.\n\nReturns:\n:obj:`~logging.Logger`: Python logger.", "source": "codesearchnet"}
{"code": "def update_args(self, args):\n        \n\n        for arg in vars(args):\n            if self.get(arg) and getattr(args, arg) is not None:\n                self._config[self.root_section][arg] = getattr(args, arg)", "docstring": "Update config dictionary with parsed args, as resolved by argparse.\nOnly root positional arguments that already exist will overridden.\n\nArgs:\nargs (namespace): args parsed by argparse", "source": "juraj-google-style"}
{"code": "def getThumbnailForItem(self, itemId, fileName, filePath):\n        \n        admin = None\n        item = None\n\n        try:\n            admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)\n            item = admin.content.getItem(itemId = itemId)\n            return item.saveThumbnail(fileName=fileName,filePath=filePath)\n        except:\n            line, filename, synerror = trace()\n            raise common.ArcRestHelperError({\n                        \"function\": \"getThumbnailForItem\",\n                        \"line\": line,\n                        \"filename\":  filename,\n                        \"synerror\": synerror,\n                                        }\n                                        )\n        finally:\n            admin = None\n            item = None\n            del admin\n            del item\n            gc.collect()", "docstring": "Gets an item's thumbnail and saves it to disk.\n\nArgs:\nitemId (str): The item's ID.\nfileName (str): The name of the output image.\nfileName (str): The directory on disk where to save the thumbnail.\nReturns:\ndict: The result from :py:func:`arcrest.manageorg._content.UserItem.saveThumbnail`", "source": "juraj-google-style"}
{"code": "def distribute_data_input(per_process_batch, layout, batch_dim_name):\n    from keras.src.distribution import TensorLayout\n    if isinstance(layout, TensorLayout):\n        layout = layout.backend_layout\n    return jax.make_array_from_process_local_data(layout, per_process_batch)", "docstring": "Distribute the input data with the corresponding layout.\n\nNote that the inputs here is a local worker batch. Within the local worker,\nthe data need to be further partitioned to map to each of the devices.\n\nArgs:\ninputs: `jax.Array` that is already sharded to a local process size.\nlayout: `TensorLayout` for the distribution information, or a\n`jax.sharding.Sharding` instance.\n\nReturns:\nA global batch distributed according to `layout`.", "source": "github-repos"}
{"code": "def GetUnavailableBonus(self):\n    height = (Blockchain.Default().Height + 1)\n    unspents = self.FindUnspentCoinsByAsset(Blockchain.SystemShare().Hash)\n    refs = [coin.Reference for coin in unspents]\n    try:\n        unavailable_bonus = Blockchain.CalculateBonus(refs, height_end=height)\n        return unavailable_bonus\n    except Exception as e:\n        pass\n    return Fixed8(0)", "docstring": "Gets the total claimable amount of Gas in the wallet that is not available to claim\nbecause it has not yet been spent.\n\nReturns:\nFixed8: the amount of Gas unavailable to claim.", "source": "codesearchnet"}
{"code": "def cross_section(verts, tris, plane_orig, plane_normal, **kwargs):\n    mesh = TriangleMesh(verts, tris)\n    plane = Plane(plane_orig, plane_normal)\n    return cross_section_mesh(mesh, plane, **kwargs)", "docstring": "Compute the planar cross section of a mesh. This returns a set of\npolylines.\n\nArgs:\nverts: Nx3 array of the vertices position\nfaces: Nx3 array of the faces, containing vertex indices\nplane_orig: 3-vector indicating the plane origin\nplane_normal: 3-vector indicating the plane normal\n\nReturns:\nA list of Nx3 arrays, each representing a disconnected portion\nof the cross section as a polyline", "source": "codesearchnet"}
{"code": "def set_position(self, x, y):\n    self.attributes['x'] = str(x)\n    self.attributes['y'] = str(y)", "docstring": "Sets the shape position.\n\nArgs:\nx (int): the x coordinate\ny (int): the y coordinate", "source": "codesearchnet"}
{"code": "def _convert(value, dtype=None):\n    result = numpy_compat.np_asarray(value, dtype=dtype, order='C')\n    if result.dtype.char == 'S' and result is not value:\n        return numpy_compat.np_asarray(value, order='C', dtype=object)\n    elif result.dtype.char == 'U' and result is not value:\n        value = np.vectorize(lambda x: x.encode('utf8'))(value)\n        return numpy_compat.np_asarray(value, order='C', dtype=object)\n    elif result.dtype.char == 'U':\n        return result.astype(np.bytes_)\n    else:\n        return result", "docstring": "Converts an arg to numpy, avoiding dangerous string and unicode dtypes.\n\nNumpy pads with zeros when using string and unicode dtypes if different\ncomponents of a tensor have different lengths.  This is bad: ignoring the\npadding is wrong for text data, and removing the padding is wrong for binary\ndata.  To avoid this bug, we redo the conversion using an object dtype.\nAdditionally, we convert unicode strings to (byte-)strings for\ncompatibility.\n\nArgs:\nvalue: Value to convert to a numpy array.\ndtype: (Optional.) Desired NumPy type for the returned value.\n\nReturns:\nA numpy array.", "source": "github-repos"}
{"code": "def _verify_pair(prev, curr):\n        \n        if prev._dimension != 2:\n            raise ValueError(\"Curve not in R^2\", prev)\n\n        end = prev._nodes[:, -1]\n        start = curr._nodes[:, 0]\n        if not _helpers.vector_close(end, start):\n            raise ValueError(\n                \"Not sufficiently close\",\n                \"Consecutive sides do not have common endpoint\",\n                prev,\n                curr,\n            )", "docstring": "Verify a pair of sides share an endpoint.\n\n.. note::\n\nThis currently checks that edge endpoints match **exactly**\nbut allowing some roundoff may be desired.\n\nArgs:\nprev (.Curve): \"Previous\" curve at piecewise junction.\ncurr (.Curve): \"Next\" curve at piecewise junction.\n\nRaises:\nValueError: If the previous side is not in 2D.\nValueError: If consecutive sides don't share an endpoint.", "source": "juraj-google-style"}
{"code": "def apply_rules(self, rules, recursive=True):\n    if recursive:\n        new_args = [_apply_rules(arg, rules) for arg in self.args]\n        new_kwargs = {key: _apply_rules(val, rules) for (key, val) in self.kwargs.items()}\n    else:\n        new_args = self.args\n        new_kwargs = self.kwargs\n    simplified = self.create(*new_args, **new_kwargs)\n    return _apply_rules_no_recurse(simplified, rules)", "docstring": "Rebuild the expression while applying a list of rules\n\nThe rules are applied against the instantiated expression, and any\nsub-expressions if `recursive` is True. Rule application is best though\nof as a pattern-based substitution. This is different from the\n*automatic* rules that :meth:`create` uses (see :meth:`add_rule`),\nwhich are applied *before* expressions are instantiated.\n\nArgs:\nrules (list or ~collections.OrderedDict): List of rules or\ndictionary mapping names to rules, where each rule is a tuple\n(:class:`Pattern`, replacement callable), cf.\n:meth:`apply_rule`\nrecursive (bool): If true (default), apply rules to all arguments\nand keyword arguments of the expression. Otherwise, only the\nexpression itself will be re-instantiated.\n\nIf `rules` is a dictionary, the keys (rules names) are used only for\ndebug logging, to allow an analysis of which rules lead to the final\nform of an expression.", "source": "codesearchnet"}
{"code": "def create_checksum_object_from_iterator(\n    itr, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM\n):\n    \n    checksum_str = calculate_checksum_on_iterator(itr, algorithm)\n    checksum_pyxb = d1_common.types.dataoneTypes.checksum(checksum_str)\n    checksum_pyxb.algorithm = algorithm\n    return checksum_pyxb", "docstring": "Calculate the checksum of an iterator.\n\nArgs:\nitr: iterable\nObject which supports the iterator protocol.\n\nalgorithm: str\nChecksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.\n\nReturns:\nPopulated Checksum PyXB object.", "source": "juraj-google-style"}
{"code": "def service_configuration_check(config):\n    \n    ipv4_enabled = config.getboolean('daemon', 'ipv4')\n    ipv6_enabled = config.getboolean('daemon', 'ipv6')\n    services = config.sections()\n    \n    services.remove('daemon')\n    ip_prefixes = []\n\n    for service in services:\n        for option, getter in SERVICE_OPTIONS_TYPE.items():\n            try:\n                getattr(config, getter)(service, option)\n            except configparser.NoOptionError as error:\n                if option not in SERVICE_OPTIONAL_OPTIONS:\n                    raise ValueError(error)\n            except configparser.Error as error:\n                raise ValueError(error)\n            except ValueError as exc:\n                msg = (\"invalid data for '{opt}' option in service check \"\n                       \"{name}: {err}\"\n                       .format(opt=option, name=service, err=exc))\n                raise ValueError(msg)\n\n        if (config.get(service, 'on_disabled') != 'withdraw' and\n                config.get(service, 'on_disabled') != 'advertise'):\n            msg = (\"'on_disabled' option has invalid value ({val}) for \"\n                   \"service check {name}, 'on_disabled option should be set \"\n                   \"either to 'withdraw' or to 'advertise'\"\n                   .format(name=service,\n                           val=config.get(service, 'on_disabled')))\n            raise ValueError(msg)\n\n        ip_prefixes.append(config.get(service, 'ip_prefix'))\n\n        if not valid_ip_prefix(config.get(service, 'ip_prefix')):\n            msg = (\"invalid value ({val}) for 'ip_prefix' option in service \"\n                   \"check {name}. It should be an IP PREFIX in form of \"\n                   \"ip/prefixlen.\"\n                   .format(name=service, val=config.get(service, 'ip_prefix')))\n            raise ValueError(msg)\n\n        _ip_prefix = ipaddress.ip_network(config.get(service, 'ip_prefix'))\n        if not ipv6_enabled and _ip_prefix.version == 6:\n            raise ValueError(\"IPv6 support is disabled in \"\n                             \"anycast-healthchecker while there is an IPv6 \"\n                             \"prefix configured for {name} service check\"\n                             .format(name=service))\n        if not ipv4_enabled and _ip_prefix.version == 4:\n            raise ValueError(\"IPv4 support is disabled in \"\n                             \"anycast-healthchecker while there is an IPv4 \"\n                             \"prefix configured for {name} service check\"\n                             .format(name=service))\n\n        cmd = shlex.split(config.get(service, 'check_cmd'))\n        try:\n            proc = subprocess.Popen(cmd)\n            proc.kill()\n        except (OSError, subprocess.SubprocessError) as exc:\n            msg = (\"failed to run check command '{cmd}' for service check \"\n                   \"{name}: {err}\"\n                   .format(name=service,\n                           cmd=config.get(service, 'check_cmd'),\n                           err=exc))\n            raise ValueError(msg)\n\n    occurrences_of_ip_prefixes = Counter(ip_prefixes)\n    for ip_prefix, counter in occurrences_of_ip_prefixes.items():\n        if counter > 1:\n            raise ValueError(\"{ip} is used by {c} service checks\"\n                             .format(ip=ip_prefix, c=counter))", "docstring": "Perform a sanity check against options for each service check.\n\nArguments:\nconfig (obj): A configparser object which holds our configuration.\n\nReturns:\nNone if all sanity checks are successfully passed otherwise raises a\nValueError exception.", "source": "juraj-google-style"}
{"code": "def make_tests(self, sdkobject, testcase):\n        \n        tests = dict()\n        attributes = sdkobject.get_attributes()\n\n        for attribute in attributes:\n\n            if attribute.local_name in self.IGNORED_ATTRIBUTES:\n                continue\n\n            for function_name, conditions in self._attributes_registry.items():\n                if self.does_attribute_meet_condition(attribute, conditions):\n                    (test_name, test_func) = self._create_test(testcase=testcase, sdkobject=sdkobject, function_name=function_name, attribute=attribute)\n                    tests[test_name] = test_func\n\n        for function_name, infos in self._object_registry.items():\n            (test_name, test_func) = self._create_test(testcase=testcase, sdkobject=sdkobject, function_name=function_name)\n            tests[test_name] = test_func\n\n        return tests", "docstring": "Make all tests that should be run for the given object in the specified testcase\n\nArgs:\nsdkobject: the sdk object\ntestcase: the test case\n\nReturns:\nIt returns a dictionary of all tests to run", "source": "juraj-google-style"}
{"code": "def ones(shape, dtype=None, **kwargs):\n    \n    data = np.ones(shape, dtype)\n    return dc.array(data, **kwargs)", "docstring": "Create an array of given shape and type, filled with ones.\n\nArgs:\nshape (sequence of ints): 2D shape of the array.\ndtype (data-type, optional): Desired data-type for the array.\nkwargs (optional): Other arguments of the array (*coords, attrs, and name).\n\nReturns:\narray (decode.array): Decode array filled with ones.", "source": "juraj-google-style"}
{"code": "def flatten_top_level_keys(data, top_level_keys):\n    \n    flattened_data = {}\n\n    for top_level_key in top_level_keys:\n        if data[top_level_key] is None:\n            flattened_data[top_level_key] = None\n        else:\n            for key in data[top_level_key]:\n                flattened_data['{}_-_{}'.format(top_level_key, key)] = data[top_level_key][key]\n\n    return flattened_data", "docstring": "Helper method to flatten a nested dict of dicts (one level)\n\nExample:\n{'a': {'b': 'bbb'}} becomes {'a_-_b': 'bbb'}\n\nThe separator '_-_' gets formatted later for the column headers\n\nArgs:\ndata: the dict to flatten\ntop_level_keys: a list of the top level keys to flatten ('a' in the example above)", "source": "juraj-google-style"}
{"code": "def AsDict(self):\n    sources = []\n    for source in self.sources:\n        source_definition = {'type': source.type_indicator, 'attributes': source.AsDict()}\n        if source.supported_os:\n            source_definition['supported_os'] = source.supported_os\n        if source.conditions:\n            source_definition['conditions'] = source.conditions\n        sources.append(source_definition)\n    artifact_definition = {'name': self.name, 'doc': self.description, 'sources': sources}\n    if self.labels:\n        artifact_definition['labels'] = self.labels\n    if self.supported_os:\n        artifact_definition['supported_os'] = self.supported_os\n    if self.provides:\n        artifact_definition['provides'] = self.provides\n    if self.conditions:\n        artifact_definition['conditions'] = self.conditions\n    if self.urls:\n        artifact_definition['urls'] = self.urls\n    return artifact_definition", "docstring": "Represents an artifact as a dictionary.\n\nReturns:\ndict[str, object]: artifact attributes.", "source": "codesearchnet"}
{"code": "def update_port_monitor(self, resource, timeout=-1):\n        \n        data = resource.copy()\n        if 'type' not in data:\n            data['type'] = 'port-monitor'\n\n        uri = \"{}{}\".format(self.data[\"uri\"], self.PORT_MONITOR_PATH)\n        return self._helper.update(data, uri=uri, timeout=timeout)", "docstring": "Updates the port monitor configuration of a logical interconnect.\n\nArgs:\nresource: Port monitor configuration.\n\nReturns:\ndict: Port monitor configuration.", "source": "juraj-google-style"}
{"code": "def get_signature_request_list(self, page=1, ux_version=None):\n        \n\n        request = self._get_request()\n        parameters = {\n            \"page\": page\n        }\n\n        if ux_version is not None:\n            parameters['ux_version'] = ux_version\n\n        return request.get(self.SIGNATURE_REQUEST_LIST_URL, parameters=parameters)", "docstring": "Get a list of SignatureRequest that you can access\n\nThis includes SignatureRequests you have sent as well as received, but\nnot ones that you have been CCed on.\n\nArgs:\n\npage (int, optional):   Which page number of the SignatureRequest list to return. Defaults to 1.\n\nux_version (int):       UX version, either 1 (default) or 2.\n\nReturns:\nA ResourceList object", "source": "juraj-google-style"}
{"code": "def consume(self, source):\n        \n        manifest = OrderedDict()\n\n        rules = parse_stylesheet(\n            source,\n            skip_comments=True,\n            skip_whitespace=True,\n        )\n\n        for rule in rules:\n            \n            name = self.digest_prelude(rule)\n\n            \n            if not name.startswith(RULE_BASE_PREFIX):\n                continue\n\n            properties = self.digest_content(rule)\n            manifest[name] = properties\n\n        return manifest", "docstring": "Parse source and consume tokens from tinycss2.\n\nArguments:\nsource (string): Source content to parse.\n\nReturns:\ndict: Retrieved rules.", "source": "juraj-google-style"}
{"code": "def _validate_isvalid_orcid(self, isvalid_orcid, field, value):\n    if (isvalid_orcid and ('ORCID' in value)):\n        try:\n            res = search_orcid(value['ORCID'])\n        except ConnectionError:\n            warn('network not available, ORCID not validated.')\n            return\n        except HTTPError:\n            self._error(field, ('ORCID incorrect or invalid for ' + value['name']))\n            return\n        family_name = res['name']['family-name']['value']\n        given_name = res['name']['given-names']['value']\n        if (not compare_name(given_name, family_name, value['name'])):\n            self._error(field, ((('Name and ORCID do not match. Name supplied: ' + value['name']) + '. Name associated with ORCID: ') + ' '.join([given_name, family_name])))", "docstring": "Checks for valid ORCID if given.\n\nArgs:\nisvalid_orcid (`bool`): flag from schema indicating ORCID to be checked.\nfield (`str`): 'author'\nvalue (`dict`): dictionary of author metadata.\n\nThe rule's arguments are validated against this schema:\n{'isvalid_orcid': {'type': 'bool'}, 'field': {'type': 'str'},\n'value': {'type': 'dict'}}", "source": "codesearchnet"}
{"code": "def parse_variable(self, variable):\n    data = None\n    if (variable is not None):\n        variable = variable.strip()\n        if re.match(self._variable_match, variable):\n            var = re.search(self._variable_parse, variable)\n            data = {'root': var.group(0), 'job_id': var.group(2), 'name': var.group(3), 'type': var.group(4)}\n    return data", "docstring": "Method to parse an input or output variable.\n\n**Example Variable**::\n\n#App:1234:output!String\n\nArgs:\nvariable (string): The variable name to parse.\n\nReturns:\n(dictionary): Result of parsed string.", "source": "codesearchnet"}
{"code": "def distribute_equally(daily_data, divide=False):\n    index = hourly_index(daily_data.index)\n    hourly_data = daily_data.reindex(index)\n    hourly_data = hourly_data.groupby(hourly_data.index.day).transform((lambda x: x.fillna(method='ffill', limit=23)))\n    if divide:\n        hourly_data /= 24\n    return hourly_data", "docstring": "Obtains hourly values by equally distributing the daily values.\n\nArgs:\ndaily_data: daily values\ndivide: if True, divide resulting values by the number of hours in\norder to preserve the daily sum (required e.g. for precipitation).\n\nReturns:\nEqually distributed hourly values.", "source": "codesearchnet"}
{"code": "def get_parameter_names(self, include_frozen=False):\n    if include_frozen:\n        return self.parameter_names\n    return tuple((p for (p, f) in zip(self.parameter_names, self.unfrozen_mask) if f))", "docstring": "Get a list of the parameter names\n\nArgs:\ninclude_frozen (Optional[bool]): Should the frozen parameters be\nincluded in the returned value? (default: ``False``)", "source": "codesearchnet"}
{"code": "def redraw(self, reset_camera=False):\n    self.ren.RemoveAllViewProps()\n    self.picker = None\n    self.add_picker_fixed()\n    self.helptxt_mapper = vtk.vtkTextMapper()\n    tprops = self.helptxt_mapper.GetTextProperty()\n    tprops.SetFontSize(14)\n    tprops.SetFontFamilyToTimes()\n    tprops.SetColor(0, 0, 0)\n    if (self.structure is not None):\n        self.set_structure(self.structure, reset_camera)\n    self.ren_win.Render()", "docstring": "Redraw the render window.\n\nArgs:\nreset_camera: Set to True to reset the camera to a\npre-determined default for each structure.  Defaults to False.", "source": "codesearchnet"}
{"code": "def Convert(self, metadata, stat_entry, token=None):\n    \n    return self.BatchConvert([(metadata, stat_entry)], token=token)", "docstring": "Converts StatEntry to ExportedFile.\n\nDoes nothing if StatEntry corresponds to a registry entry and not to a file.\n\nArgs:\nmetadata: ExportedMetadata to be used for conversion.\nstat_entry: StatEntry to be converted.\ntoken: Security token.\n\nReturns:\nList or generator with resulting RDFValues. Empty list if StatEntry\ncorresponds to a registry entry and not to a file.", "source": "juraj-google-style"}
{"code": "def port_get_tag(port):\n    cmd = 'ovs-vsctl get port {0} tag'.format(port)\n    result = __salt__['cmd.run_all'](cmd)\n    retcode = result['retcode']\n    stdout = result['stdout']\n    return _stdout_list_split(retcode, stdout)", "docstring": "Lists tags of the port.\n\nArgs:\nport: A string - port name.\n\nReturns:\nList of tags (or empty list), False on failure.\n\n.. versionadded:: 2016.3.0\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.port_get_tag tap0", "source": "codesearchnet"}
{"code": "def _process_rules(self, rules):\n    cidr = []\n    non_cidr = []\n    for rule in rules:\n        if ('.' in rule['app']):\n            self.log.debug('Custom CIDR rule: %s', rule)\n            self._validate_cidr(rule)\n            cidr.append(rule)\n        else:\n            self.log.debug('SG reference rule: %s', rule)\n            non_cidr.append(rule)\n    self.log.debug('Custom CIDR rules: %s', cidr)\n    self.log.debug('SG reference rules: %s', non_cidr)\n    return (non_cidr, cidr)", "docstring": "Process rules into cidr and non-cidr lists.\n\nArgs:\nrules (list): Allowed Security Group ports and protocols.\n\nReturns:\n(list, list): Security Group reference rules and custom CIDR rules.", "source": "codesearchnet"}
{"code": "def Dump(obj):\n    text = yaml.safe_dump(obj, default_flow_style=False, allow_unicode=True)\n    if compatibility.PY2:\n        text = text.decode('utf-8')\n    return text", "docstring": "Stringifies a Python object into its YAML representation.\n\nArgs:\nobj: A Python object to convert to YAML.\n\nReturns:\nA YAML representation of the given object.", "source": "codesearchnet"}
{"code": "def score_braycurtis(self, term1, term2, **kwargs):\n\n        \n\n        t1_kde = self.kde(term1, **kwargs)\n        t2_kde = self.kde(term2, **kwargs)\n\n        return 1-distance.braycurtis(t1_kde, t2_kde)", "docstring": "Compute a weighting score based on the \"City Block\" distance between\nthe kernel density estimates of two terms.\n\nArgs:\nterm1 (str)\nterm2 (str)\n\nReturns: float", "source": "juraj-google-style"}
{"code": "def dagify_min_edge(g):\n    while (not nx.is_directed_acyclic_graph(g)):\n        cycle = next(nx.simple_cycles(g))\n        scores = []\n        edges = []\n        for (i, j) in zip(cycle[:1], cycle[:1]):\n            edges.append((i, j))\n            scores.append(g[i][j]['weight'])\n        (i, j) = edges[scores.index(min(scores))]\n        gc = deepcopy(g)\n        gc.remove_edge(i, j)\n        gc.add_edge(j, i)\n        if (len(list(nx.simple_cycles(gc))) < len(list(nx.simple_cycles(g)))):\n            g.add_edge(j, i, weight=min(scores))\n        g.remove_edge(i, j)\n    return g", "docstring": "Input a graph and output a DAG.\n\nThe heuristic is to reverse the edge with the lowest score of the cycle\nif possible, else remove it.\n\nArgs:\ng (networkx.DiGraph): Graph to modify to output a DAG\n\nReturns:\nnetworkx.DiGraph: DAG made out of the input graph.", "source": "codesearchnet"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    values_dict = {}\n\n    if registry_key.number_of_values == 0:\n      values_dict['Value'] = 'No values stored in key.'\n\n    else:\n      for registry_value in registry_key.GetValues():\n        value_name = registry_value.name or '(default)'\n\n        if registry_value.data is None:\n          value_string = '[{0:s}] Empty'.format(\n              registry_value.data_type_string)\n\n        elif registry_value.DataIsString():\n          value_string = registry_value.GetDataAsObject()\n          value_string = '[{0:s}] {1:s}'.format(\n              registry_value.data_type_string, value_string)\n\n        elif registry_value.DataIsInteger():\n          value_integer = registry_value.GetDataAsObject()\n          value_string = '[{0:s}] {1:d}'.format(\n              registry_value.data_type_string, value_integer)\n\n        elif registry_value.DataIsMultiString():\n          multi_string = registry_value.GetDataAsObject()\n          if not isinstance(multi_string, (list, tuple)):\n            value_string = '[{0:s}]'.format(registry_value.data_type_string)\n            \n          else:\n            value_string = '[{0:s}] {1:s}'.format(\n                registry_value.data_type_string, ''.join(multi_string))\n\n        else:\n          value_string = '[{0:s}]'.format(registry_value.data_type_string)\n\n        values_dict[value_name] = value_string\n\n    event_data = windows_events.WindowsRegistryEventData()\n    event_data.key_path = registry_key.path\n    event_data.offset = registry_key.offset\n    event_data.regvalue = values_dict\n\n    event = time_events.DateTimeValuesEvent(\n        registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):\n    min_patches = images_kwargs.get('min_patches', None) or self.min_patches\n    max_patches = images_kwargs.get('max_patches', None) or self.max_patches\n    patch_size = images_kwargs.get('size', None) or self.size\n    crop_to_patches = images_kwargs.get('crop_to_patches', None) or self.crop_to_patches\n    num_patches = 1\n    if crop_to_patches and max_patches > 1:\n        num_columns, num_rows = get_optimal_tiled_canvas((height, width), (patch_size['height'], patch_size['width']), min_patches, max_patches)\n        num_patches += num_columns * num_rows\n    return num_patches", "docstring": "A utility that returns number patches for a given image size.\n\nArgs:\nheight (`int`):\nHeight of the input image.\nwidth (`int`):\nWidth of the input image.\nimages_kwargs (`dict`, *optional*)\nAny kwargs to override defaults of the image processor.\nReturns:\n`int`: Number of patches per image.", "source": "github-repos"}
{"code": "def add_relationship(self, txn_id, predecessors):\n        \n\n        all_pred = set(predecessors)\n        for pred in predecessors:\n            all_pred.update(self._predecessors_by_id[pred])\n\n        self._predecessors_by_id[txn_id] = all_pred", "docstring": "Add a predecessor-successor relationship between one txn id and\na set of predecessors.\n\nArgs:\ntxn_id (str): The transaction id of the transaction.\npredecessors (set): The transaction ids of the\ntransaction's predecessors\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def hpo_terms(self, query=None, hpo_term=None, text=None, limit=None):\n    query_dict = {}\n    search_term = None\n    if query:\n        query_dict = {'$or': [{'hpo_id': {'$regex': query, '$options': 'i'}}, {'description': {'$regex': query, '$options': 'i'}}]}\n        search_term = query\n    elif text:\n        new_string = ''\n        for (i, word) in enumerate(text.split(' ')):\n            if (i == 0):\n                new_string += word\n            else:\n                new_string += ' \"{0}\"'.format(word)\n        LOG.info('Search HPO terms with %s', new_string)\n        query_dict['$text'] = {'$search': new_string}\n        search_term = text\n    elif hpo_term:\n        query_dict['hpo_id'] = hpo_term\n        search_term = hpo_term\n    limit = (limit or int(100000000000.0))\n    res = self.hpo_term_collection.find(query_dict).limit(limit).sort('hpo_number', ASCENDING)\n    LOG.info('Found {0} terms with search word {1}'.format(res.count(), search_term))\n    return res", "docstring": "Return all HPO terms\n\nIf a query is sent hpo_terms will try to match with regex on term or\ndescription.\n\nArgs:\nquery(str): Part of a hpoterm or description\nhpo_term(str): Search for a specific hpo term\nlimit(int): the number of desired results\n\nReturns:\nresult(pymongo.Cursor): A cursor with hpo terms", "source": "codesearchnet"}
{"code": "def eval(self, expr, **kwargs):\n        \n        columns = self.index if self._is_transposed else self.columns\n        index = self.columns if self._is_transposed else self.index\n\n        \n        \n        columns_copy = pandas.DataFrame(columns=self.columns)\n        columns_copy = columns_copy.eval(expr, inplace=False, **kwargs)\n        expect_series = isinstance(columns_copy, pandas.Series)\n\n        def eval_builder(df, **kwargs):\n            \n            \n            kwargs.pop(\"axis\", None)\n            df.columns = columns\n            result = df.eval(expr, inplace=False, **kwargs)\n            return result\n\n        func = self._build_mapreduce_func(eval_builder, axis=1, **kwargs)\n        new_data = self._map_across_full_axis(1, func)\n\n        if expect_series:\n            new_columns = [columns_copy.name]\n            new_index = index\n        else:\n            new_columns = columns_copy.columns\n            new_index = self.index\n        return self.__constructor__(new_data, new_index, new_columns)", "docstring": "Returns a new QueryCompiler with expr evaluated on columns.\n\nArgs:\nexpr: The string expression to evaluate.\n\nReturns:\nA new QueryCompiler with new columns after applying expr.", "source": "juraj-google-style"}
{"code": "def str_to_etree(xml_str, encoding='utf-8'):\n    parser = xml.etree.ElementTree.XMLParser(encoding=encoding)\n    return xml.etree.ElementTree.fromstring(xml_str, parser=parser)", "docstring": "Deserialize API XML doc to an ElementTree.\n\nArgs:\nxml_str: bytes\nDataONE API XML doc\n\nencoding: str\nDecoder to use when converting the XML doc ``bytes`` to a Unicode str.\n\nReturns:\nElementTree: Matching the API version of the XML doc.", "source": "codesearchnet"}
{"code": "def summarize_variables(var_list=None, tag=None):\n  \n  if var_list is None:\n    var_list = tf.trainable_variables()\n  if tag is None:\n    tag = \"training_variables/\"\n\n  name_to_var = {v.name: v for v in var_list}\n  for v_name in list(name_to_var):\n    v = name_to_var[v_name]\n    tf.summary.histogram(tag + v_name, v)", "docstring": "Summarize the variables.\n\nArgs:\nvar_list: a list of variables; defaults to trainable_variables.\ntag: name scope of the summary; defaults to training_variables/.", "source": "juraj-google-style"}
{"code": "def assertAllGreater(self, a, comparison_target):\n    a, comparison_target = self.evaluate_if_both_tensors(a, comparison_target)\n    a = self._GetNdArray(a)\n    self.assertGreater(np.min(a), comparison_target)", "docstring": "Assert element values are all greater than a target value.\n\nArgs:\na: The numpy `ndarray`, or anything that can be converted into a numpy\n`ndarray` (including Tensor).\ncomparison_target: The target value of comparison.", "source": "github-repos"}
{"code": "def requested_test_names_dict(self):\n    return {'Requested Tests': copy.deepcopy(self.requested)}", "docstring": "Gets the requested test names of a test run in a dict format.\n\nNote a test can be requested multiple times, so there can be duplicated\nvalues\n\nReturns:\nA dict with a key and the list of strings.", "source": "github-repos"}
{"code": "def _truncate(self, new_rank: int) -> 'DynamicRaggedShape.Spec':\n    if self.rank is None:\n        return self._set_rank_if_unknown(new_rank)._truncate(new_rank)\n    if new_rank == 0:\n        return DynamicRaggedShape.Spec._from_tensor_shape([], 0, self.dtype)\n    if new_rank == 1:\n        vector_size = self._dimension(0)\n        return DynamicRaggedShape.Spec._from_tensor_shape([vector_size], 0, self.dtype)\n    if new_rank < self.num_row_partitions + 1:\n        new_row_partitions = self._row_partitions[:new_rank - 1]\n        new_static_inner_shape = tensor_shape.TensorShape([new_row_partitions[-1].nvals])\n        return DynamicRaggedShape.Spec(row_partitions=new_row_partitions, static_inner_shape=new_static_inner_shape, dtype=self.dtype)\n    else:\n        remainder = new_rank - self.num_row_partitions\n        new_static_inner_shape = self._static_inner_shape[:remainder]\n        return DynamicRaggedShape.Spec(row_partitions=self._row_partitions, static_inner_shape=new_static_inner_shape, dtype=self.dtype)", "docstring": "Truncate a ragged shape spec.\n\nFor example, if the original spec s was for a shape:\n[3, [4, 1], 2, 7]\n\nThen truncate_dynamic_ragged_shape_spec(s, 3) is a spec for:\n[3, [4, 1], 2]\n\nArgs:\nnew_rank: the new rank\n\nReturns:\nA truncated DynamicRaggedShape.Spec.", "source": "github-repos"}
{"code": "def getKeyName(username, date, blob_key):\n    sep = FileMetadata.__SEP\n    return str(((((username + sep) + str(date)) + sep) + blob_key))", "docstring": "Returns the internal key for a particular item in the database.\n\nOur items are stored with keys of the form 'user/date/blob_key' ('/' is\nnot the real separator, but __SEP is).\n\nArgs:\nusername: The given user's e-mail address.\ndate: A datetime object representing the date and time that an input\nfile was uploaded to this app.\nblob_key: The blob key corresponding to the location of the input file\nin the Blobstore.\nReturns:\nThe internal key for the item specified by (username, date, blob_key).", "source": "codesearchnet"}
{"code": "def terminate_ec2_instance(client, resource):\n    instance = EC2Instance.get(resource.id)\n    if (instance.state == 'terminated'):\n        return (ActionStatus.IGNORED, {})\n    client.terminate_instances(InstanceIds=[resource.id])\n    return (ActionStatus.SUCCEED, {'instance_type': resource.instance_type, 'public_ip': resource.public_ip})", "docstring": "Terminate an EC2 Instance\n\nThis function will terminate an EC2 Instance.\n\nArgs:\nclient (:obj:`boto3.session.Session.client`): A boto3 client object\nresource (:obj:`Resource`): The resource object to terminate\n\nReturns:\n`ActionStatus`", "source": "codesearchnet"}
{"code": "def with_min_execution_time(self, min_micros=0, min_accelerator_micros=0, min_cpu_micros=0):\n    self._options['min_micros'] = min_micros\n    self._options['min_accelerator_micros'] = min_accelerator_micros\n    self._options['min_cpu_micros'] = min_cpu_micros\n    return self", "docstring": "Only show profiler nodes consuming no less than 'min_micros'.\n\nArgs:\nmin_micros: Only show profiler nodes with execution time\nno less than this. It sums accelerator and cpu times.\nmin_accelerator_micros: Only show profiler nodes spend no less than\nthis time on accelerator (e.g. GPU).\nmin_cpu_micros: Only show profiler nodes spend no less than\nthis time on cpu.\nReturns:\nself", "source": "github-repos"}
{"code": "def read(self, size=None):\n    data = b''\n    while ((size and (len(data) < size)) and (self._current_offset < self.uncompressed_data_size)):\n        member = self._GetMemberForOffset(self._current_offset)\n        member_offset = (self._current_offset - member.uncompressed_data_offset)\n        data_read = member.ReadAtOffset(member_offset, size)\n        if data_read:\n            self._current_offset += len(data_read)\n            data = b''.join([data, data_read])\n    return data", "docstring": "Reads a byte string from the gzip file at the current offset.\n\nThe function will read a byte string up to the specified size or\nall of the remaining data if no size was specified.\n\nArgs:\nsize (Optional[int]): number of bytes to read, where None is all\nremaining data.\n\nReturns:\nbytes: data read.\n\nRaises:\nIOError: if the read failed.\nOSError: if the read failed.", "source": "codesearchnet"}
{"code": "def rebin(d, n_x, n_y=None):\n    if (d.ndim == 2):\n        if (n_y is None):\n            n_y = 1\n        if (n_x is None):\n            n_x = 1\n        d = d[(:(int((d.shape[0] \n        d = d.reshape(((d.shape[0] \n        d = d.mean(axis=3)\n        d = d.mean(axis=1)\n    elif (d.ndim == 1):\n        d = d[:(int((d.shape[0] \n        d = d.reshape(((d.shape[0] \n        d = d.mean(axis=1)\n    else:\n        raise RuntimeError('Only NDIM <= 2 supported')\n    return d", "docstring": "Rebin data by averaging bins together\n\nArgs:\nd (np.array): data\nn_x (int): number of bins in x dir to rebin into one\nn_y (int): number of bins in y dir to rebin into one\n\nReturns:\nd: rebinned data with shape (n_x, n_y)", "source": "codesearchnet"}
{"code": "def load_glove(file):\n    \n    model = {}\n    with open(file, encoding=\"utf8\", errors='ignore') as f:\n        for line in f:\n            line = line.split(' ')\n            word = line[0]\n            vector = np.array([float(val) for val in line[1:]])\n            model[word] = vector\n\n    return model", "docstring": "Loads GloVe vectors in numpy array.\n\nArgs:\nfile (str): a path to a glove file.\n\nReturn:\ndict: a dict of numpy arrays.", "source": "juraj-google-style"}
{"code": "def get_all_options(self, drop_default=False, add_extra_args_fn: Optional[Callable[[_BeamArgumentParser], None]]=None, retain_unknown_options=False) -> Dict[str, Any]:\n    subset = {}\n    parser = _BeamArgumentParser(allow_abbrev=False)\n    for cls in PipelineOptions.__subclasses__():\n        subset[str(cls)] = cls\n    for cls in subset.values():\n        cls._add_argparse_args(parser)\n    if add_extra_args_fn:\n        add_extra_args_fn(parser)\n    known_args, unknown_args = parser.parse_known_args(self._flags)\n    if retain_unknown_options:\n        if unknown_args:\n            _LOGGER.warning('Unknown pipeline options received: %s. Ignore if flags are used for internal purposes.' % ','.join(unknown_args))\n        seen = set()\n\n        def add_new_arg(arg, **kwargs):\n            if arg not in seen:\n                parser.add_argument(arg, **kwargs)\n            seen.add(arg)\n        i = 0\n        while i < len(unknown_args):\n            if unknown_args[i] == '--':\n                break\n            if not unknown_args[i].startswith('-'):\n                i += 1\n                continue\n            if i + 1 >= len(unknown_args) or unknown_args[i + 1].startswith('-'):\n                split = unknown_args[i].split('=', 1)\n                if len(split) == 1:\n                    add_new_arg(unknown_args[i], action='store_true')\n                else:\n                    add_new_arg(split[0], type=str)\n                i += 1\n            elif unknown_args[i].startswith('--'):\n                add_new_arg(unknown_args[i], type=str)\n                i += 2\n            else:\n                _LOGGER.warning('Discarding flag %s, single dash flags are not allowed.', unknown_args[i])\n                i += 2\n                continue\n        parsed_args, _ = parser.parse_known_args(self._flags)\n    else:\n        if unknown_args:\n            _LOGGER.warning('Discarding unparseable args: %s', unknown_args)\n        parsed_args = known_args\n    result = vars(parsed_args)\n    overrides = self._all_options.copy()\n    for k in list(result):\n        overrides.pop(k, None)\n        if k in self._all_options:\n            result[k] = self._all_options[k]\n        if drop_default and parser.get_default(k) == result[k] and (not isinstance(parser.get_default(k), ValueProvider)):\n            del result[k]\n    if overrides:\n        if retain_unknown_options:\n            result.update(overrides)\n        else:\n            _LOGGER.warning('Discarding invalid overrides: %s', overrides)\n    return result", "docstring": "Returns a dictionary of all defined arguments.\n\nReturns a dictionary of all defined arguments (arguments that are defined in\nany subclass of PipelineOptions) into a dictionary.\n\nArgs:\ndrop_default: If set to true, options that are equal to their default\nvalues, are not returned as part of the result dictionary.\nadd_extra_args_fn: Callback to populate additional arguments, can be used\nby runner to supply otherwise unknown args.\nretain_unknown_options: If set to true, options not recognized by any\nknown pipeline options class will still be included in the result. If\nset to false, they will be discarded.\n\nReturns:\nDictionary of all args and values.", "source": "github-repos"}
{"code": "def add_user(self, user_obj):\n        \n        LOG.info(\"Adding user %s to the database\", user_obj['email'])\n        if not '_id' in user_obj:\n            user_obj['_id'] = user_obj['email']\n    \n        try:\n            self.user_collection.insert_one(user_obj)\n            LOG.debug(\"User inserted\")\n        except DuplicateKeyError as err:\n            raise IntegrityError(\"User {} already exists in database\".format(user_obj['email']))\n\n        return user_obj", "docstring": "Add a user object to the database\n\nArgs:\nuser_obj(scout.models.User): A dictionary with user information\n\nReturns:\nuser_info(dict): a copy of what was inserted", "source": "juraj-google-style"}
{"code": "def get_models(self, model, page=None):\n        \n        if page is not None:\n            return self._store.find_all(self._get_model_class(model), params={'page': int(page)})\n        else:\n            return self._store.find_all(self._get_model_class(model))", "docstring": "Get all the models from the server.\n\nArgs:\nmodel (string): The class as a string.\npage (string, optional): The page number as a string\n\nReturns:\nlist: A list of instances of the requested model.", "source": "juraj-google-style"}
{"code": "def add_number_parameters(self, number):\n        \n        if isinstance(number, list):\n            for x in number:\n                self.add_number_parameters(x)\n            return\n        self._parameters.append(\"{ \\\"value\\\": \" + str(number) + \" }\")", "docstring": "Add given number parameters to the internal list.\n\nArgs:\nnumber (list of int or list of float): A number or list of numbers to add to the parameters.", "source": "juraj-google-style"}
{"code": "def aggregate_and_return_name_for_output(self, fused_op_name, output_index, out_graphdef):\n    flattened = self.flatten_nodes()\n    if self.aggregation == OpHint.AGGREGATE_FIRST or self.aggregation == OpHint.AGGREGATE_LAST:\n        assert len(flattened) == 1\n    if len(flattened) == 1 and self.aggregation != OpHint.AGGREGATE_STACK:\n        temp_op = _LiteSingleOperand(flattened[0])\n        return temp_op.aggregate_and_return_name_for_output(fused_op_name, output_index, out_graphdef)\n    else:\n        stack_node = _node_def_pb2.NodeDef()\n        stack_node.op = 'Unpack'\n        stack_node.name = 'OpHintUnstack-%s' % flattened[0].name\n        stack_node.attr['num'].i = len(flattened)\n        output_type = flattened[0].attr['T'].type\n        stack_node.attr['T'].type = output_type\n        stack_node.input.append(_tensorflow_output_name(fused_op_name, output_index))\n        out_graphdef.node.extend([stack_node])\n        for idx, discrete in enumerate(flattened):\n            output_node = _copy.deepcopy(discrete)\n            del output_node.input[:]\n            output_node.input.append(_tensorflow_output_name(stack_node.name, idx))\n            out_graphdef.node.extend([output_node])\n        return output_type", "docstring": "This adds to `out_graphdef` all the unaggregated outputs.\n\nI.e. we are outputting from a fused stub, but we need to make it compatible\nwith the unfused original graph so we insert an unpack. Ideally in a later\nstage the unpack -> pack sequences will be removed.\n\nArgs:\nfused_op_name: The name of the stub we are in the process of fusing.\noutput_index: The output output_index this object represents.\nout_graphdef: The graphdef we are in the process of buildings\n\nReturns:\nThe type of the aggregated output (so we can finish building the stub\nop).", "source": "github-repos"}
{"code": "def fa_peft_integration_check(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, target_dtype: Optional[torch.dtype]=None):\n    if target_dtype is None:\n        return (query, key, value)\n    input_dtype = query.dtype\n    if input_dtype == torch.float32:\n        logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.')\n        query = query.to(target_dtype)\n        key = key.to(target_dtype)\n        value = value.to(target_dtype)\n    return (query, key, value)", "docstring": "PEFT usually casts the layer norms in float32 for training stability reasons\ntherefore the input hidden states gets silently casted in float32. Hence, we need\ncast them back in float16 / bfloat16 just to be sure everything works as expected.\nThis might slowdown training & inference so it is recommended to not cast the LayerNorms!\n\nArgs:\nquery (`torch.Tensor`):\nInput query states to be passed to Flash Attention API\nkey (`torch.Tensor`):\nInput key states to be passed to Flash Attention API\nvalue (`torch.Tensor`):\nInput value states to be passed to Flash Attention API\ntarget_dtype (`torch.dtype`, *optional*):\nThe dtype to convert the attention tensors to. Conversion can be ignored by\nnot providing the target dtype.", "source": "github-repos"}
{"code": "def group_by(self, key, field=(lambda x: x.xfer)):\n    return Transactions([t for t in self.trans if (field(t) == key)])", "docstring": "Returns all transactions whose given ``field`` matches ``key``.\n\nReturns:\nA ``Transactions`` object.", "source": "codesearchnet"}
{"code": "def add_server(self, name, prefer=False):\n    if ((not name) or re.match('^[\\\\s]+$', name)):\n        raise ValueError('ntp server name must be specified')\n    if prefer:\n        name = ('%s prefer' % name)\n    cmd = self.command_builder('ntp server', value=name)\n    return self.configure(cmd)", "docstring": "Add or update an NTP server entry to the node config\n\nArgs:\nname (string): The IP address or FQDN of the NTP server.\nprefer (bool): Sets the NTP server entry as preferred if True.\n\nReturns:\nTrue if the operation succeeds, otherwise False.", "source": "codesearchnet"}
{"code": "def poll_for_job_completion(runner, result, duration, state_update_callback=None):\n    if result.state == PipelineState.DONE:\n        return\n    last_message_time = None\n    current_seen_messages = set()\n    last_error_rank = float('-inf')\n    last_error_msg = None\n    last_job_state = None\n    final_countdown_timer_secs = 50.0\n    sleep_secs = 5.0\n\n    def rank_error(msg):\n        if 'work item was attempted' in msg:\n            return -1\n        elif 'Traceback' in msg:\n            return 1\n        return 0\n    if duration:\n        start_secs = time.time()\n        duration_secs = duration \n    job_id = result.job_id()\n    while True:\n        response = runner.dataflow_client.get_job(job_id)\n        if response.currentState is not None:\n            if response.currentState != last_job_state:\n                if state_update_callback:\n                    state_update_callback(response.currentState)\n                _LOGGER.info('Job %s is in state %s', job_id, response.currentState)\n                last_job_state = response.currentState\n            if str(response.currentState) != 'JOB_STATE_RUNNING':\n                if final_countdown_timer_secs <= 0.0 or last_error_msg is not None or str(response.currentState) == 'JOB_STATE_DONE' or (str(response.currentState) == 'JOB_STATE_CANCELLED') or (str(response.currentState) == 'JOB_STATE_UPDATED') or (str(response.currentState) == 'JOB_STATE_DRAINED'):\n                    break\n                if str(response.currentState) not in ('JOB_STATE_PENDING', 'JOB_STATE_QUEUED'):\n                    sleep_secs = 1.0\n                    final_countdown_timer_secs -= sleep_secs\n        time.sleep(sleep_secs)\n        page_token = None\n        while True:\n            messages, page_token = runner.dataflow_client.list_messages(job_id, page_token=page_token, start_time=last_message_time)\n            for m in messages:\n                message = '%s: %s: %s' % (m.time, m.messageImportance, m.messageText)\n                if not last_message_time or m.time > last_message_time:\n                    last_message_time = m.time\n                    current_seen_messages = set()\n                if message in current_seen_messages:\n                    continue\n                else:\n                    current_seen_messages.add(message)\n                if m.messageImportance is None:\n                    continue\n                message_importance = str(m.messageImportance)\n                if message_importance == 'JOB_MESSAGE_DEBUG' or message_importance == 'JOB_MESSAGE_DETAILED':\n                    _LOGGER.debug(message)\n                elif message_importance == 'JOB_MESSAGE_BASIC':\n                    _LOGGER.info(message)\n                elif message_importance == 'JOB_MESSAGE_WARNING':\n                    _LOGGER.warning(message)\n                elif message_importance == 'JOB_MESSAGE_ERROR':\n                    _LOGGER.error(message)\n                    if rank_error(m.messageText) >= last_error_rank:\n                        last_error_rank = rank_error(m.messageText)\n                        last_error_msg = m.messageText\n                else:\n                    _LOGGER.info(message)\n            if not page_token:\n                break\n        if duration:\n            passed_secs = time.time() - start_secs\n            if passed_secs > duration_secs:\n                _LOGGER.warning('Timing out on waiting for job %s after %d seconds', job_id, passed_secs)\n                break\n    result._job = response\n    runner.last_error_msg = last_error_msg", "docstring": "Polls for the specified job to finish running (successfully or not).\n\nUpdates the result with the new job information before returning.\n\nArgs:\nrunner: DataflowRunner instance to use for polling job state.\nresult: DataflowPipelineResult instance used for job information.\nduration (int): The time to wait (in milliseconds) for job to finish.\nIf it is set to :data:`None`, it will wait indefinitely until the job\nis finished.", "source": "github-repos"}
{"code": "def set_conf_str(conf, optstrs):\n    falsy = ['0', 'no', 'n', 'off', 'false', 'f']\n    bool_actions = ['store_true', 'store_false', internal.Switch]\n    for optstr in optstrs:\n        (opt, val) = optstr.split('=', 1)\n        (sec, opt) = opt.split('.', 1)\n        if (sec not in conf):\n            raise error.SectionError(sec)\n        if (opt not in conf[sec]):\n            raise error.OptionError(opt)\n        meta = conf[sec].def_[opt]\n        if (meta.default is None):\n            if ('type' in meta.cmd_kwargs):\n                cast = meta.cmd_kwargs['type']\n            else:\n                act = meta.cmd_kwargs.get('action')\n                cast = (bool if (act in bool_actions) else str)\n        else:\n            cast = type(meta.default)\n        if ((cast is bool) and (val.lower() in falsy)):\n            val = ''\n        conf[sec][opt] = cast(val)", "docstring": "Set options from a list of section.option=value string.\n\nArgs:\nconf (:class:`~loam.manager.ConfigurationManager`): the conf to update.\noptstrs (list of str): the list of 'section.option=value' formatted\nstring.", "source": "codesearchnet"}
{"code": "def deconstruct_single_qubit_matrix_into_angles(mat: np.ndarray) -> Tuple[(float, float, float)]:\n    right_phase = (cmath.phase((mat[(0, 1)] * np.conj(mat[(0, 0)]))) + math.pi)\n    mat = np.dot(mat, _phase_matrix((- right_phase)))\n    bottom_phase = cmath.phase((mat[(1, 0)] * np.conj(mat[(0, 0)])))\n    mat = np.dot(_phase_matrix((- bottom_phase)), mat)\n    rotation = math.atan2(abs(mat[(1, 0)]), abs(mat[(0, 0)]))\n    mat = np.dot(_rotation_matrix((- rotation)), mat)\n    diagonal_phase = cmath.phase((mat[(1, 1)] * np.conj(mat[(0, 0)])))\n    return ((right_phase + diagonal_phase), (rotation * 2), bottom_phase)", "docstring": "Breaks down a 2x2 unitary into more useful ZYZ angle parameters.\n\nArgs:\nmat: The 2x2 unitary matrix to break down.\n\nReturns:\nA tuple containing the amount to phase around Z, then rotate around Y,\nthen phase around Z (all in radians).", "source": "codesearchnet"}
{"code": "def variable_shape(handle, out_type=None):\n    if out_type is None:\n        if flags.config().tf_shape_default_int64.value():\n            out_type = dtypes.int64\n        else:\n            out_type = dtypes.int32\n    handle_data = get_eager_safe_handle_data(handle)\n    if handle_data is None or not handle_data.is_set:\n        return gen_resource_variable_ops.variable_shape(handle, out_type=out_type)\n    shape_proto = handle_data.shape_and_type[0].shape\n    if shape_proto.unknown_rank or any((x.size == -1 for x in shape_proto.dim)):\n        return gen_resource_variable_ops.variable_shape(handle, out_type=out_type)\n    return constant_op.constant([x.size for x in shape_proto.dim], dtype=out_type)", "docstring": "Returns the shape of the variable from the handle.\n\nIf the output shape dtype is not specified, it will be set to int64 if\ntf_shape_default_int64 is enabled, otherwise it will be set to int32.\n\nArgs:\nhandle: The handle of the variable.\nout_type: The dtype of the output shape.\n\nReturns:\nThe shape of the variable.", "source": "github-repos"}
{"code": "def delete(self, resource, timeout=(- 1)):\n    if (type(resource) is dict):\n        headers = {'If-Match': resource.get('eTag', '*')}\n    else:\n        headers = {'If-Match': '*'}\n    return self._client.delete(resource, timeout=timeout, custom_headers=headers)", "docstring": "Deletes a Scope.\n\nArgs:\nresource: dict object to delete\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\nbool: Indicates if the resource was successfully deleted.", "source": "codesearchnet"}
{"code": "def Gamma(cls, shape: 'TensorFluent', scale: 'TensorFluent', batch_size: Optional[int]=None) -> Tuple[(Distribution, 'TensorFluent')]:\n    if (shape.scope != scale.scope):\n        raise ValueError('Gamma distribution: parameters must have same scope!')\n    concentration = shape.tensor\n    rate = (1 / scale.tensor)\n    dist = tf.distributions.Gamma(concentration, rate)\n    batch = (shape.batch or scale.batch)\n    if ((not batch) and (batch_size is not None)):\n        t = dist.sample(batch_size)\n        batch = True\n    else:\n        t = dist.sample()\n    scope = shape.scope.as_list()\n    return (dist, TensorFluent(t, scope, batch=batch))", "docstring": "Returns a TensorFluent for the Gamma sampling op with given shape and scale parameters.\n\nArgs:\nshape: The shape parameter of the Gamma distribution.\nscale: The scale parameter of the Gamma distribution.\nbatch_size: The size of the batch (optional).\n\nReturns:\nThe Gamma distribution and a TensorFluent sample drawn from the distribution.\n\nRaises:\nValueError: If parameters do not have the same scope.", "source": "codesearchnet"}
{"code": "def gym_space_spec(gym_space):\n  \n  \n  try:\n    tf_dtype = tf.as_dtype(gym_space.dtype)\n  except TypeError as e:\n    tf.logging.error(\"Cannot convert space's type [%s] to tf.dtype\",\n                     gym_space.dtype)\n    raise e\n\n  \n  if isinstance(gym_space, Box):\n    return box_space_spec(gym_space, tf_dtype)\n  elif isinstance(gym_space, Discrete):\n    return discrete_space_spec(gym_space, tf_dtype)\n  else:\n    raise NotImplementedError", "docstring": "Returns a reading spec of a gym space.\n\nNOTE: Only implemented currently for Box and Discrete.\n\nArgs:\ngym_space: instance of gym.spaces whose spec we want.\n\nReturns:\nReading spec for that space.\n\nRaises:\nNotImplementedError: For spaces whose reading spec we haven't implemented.", "source": "juraj-google-style"}
{"code": "def send(self, config, log, obs_id, beam_id):\n    log.info('Starting Pulsar Data Transfer...')\n    socket = self._ftp.transfercmd('STOR {0}_{1}'.format(obs_id, beam_id))\n    socket.send(json.dumps(config).encode())\n    socket.send(bytearray((1000 * 1000)))\n    config['metadata']['name'] = 'candidate_two'\n    socket.send(json.dumps(config).encode())\n    socket.send(bytearray((1000 * 1000)))\n    socket.close()\n    log.info('Pulsar Data Transfer Completed...')", "docstring": "Send the pulsar data to the ftp server\n\nArgs:\nconfig (dict): Dictionary of settings\nlog (logging.Logger): Python logging object\nobs_id: observation id\nbeam_id: beam id", "source": "codesearchnet"}
{"code": "def reinit_nested_vars(variables, indices=None):\n  \n  if isinstance(variables, (tuple, list)):\n    return tf.group(*[\n        reinit_nested_vars(variable, indices) for variable in variables])\n  if indices is None:\n    return variables.assign(tf.zeros_like(variables))\n  else:\n    zeros = tf.zeros([tf.shape(indices)[0]] + variables.shape[1:].as_list())\n    return tf.scatter_update(variables, indices, zeros)", "docstring": "Reset all variables in a nested tuple to zeros.\n\nArgs:\nvariables: Nested tuple or list of variables.\nindices: Batch indices to reset, defaults to all.\n\nReturns:\nOperation.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    self.SendEvents = channel.stream_stream('/tensorflow.EventListener/SendEvents', request_serializer=tensorflow_dot_core_dot_util_dot_event__pb2.Event.SerializeToString, response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString)\n    self.SendTracebacks = channel.unary_unary('/tensorflow.EventListener/SendTracebacks', request_serializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.CallTraceback.SerializeToString, response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString)\n    self.SendSourceFiles = channel.unary_unary('/tensorflow.EventListener/SendSourceFiles', request_serializer=tensorflow_dot_core_dot_protobuf_dot_debug__pb2.DebuggedSourceFiles.SerializeToString, response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString)", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "github-repos"}
{"code": "def update_function_configuration(self, vpc_config):\n        \n        LOG.info('Updating configuration for lambda function: %s', self.app_name)\n\n        try:\n            self.lambda_client.update_function_configuration(\n                Environment=self.lambda_environment,\n                FunctionName=self.app_name,\n                Runtime=self.runtime,\n                Role=self.role_arn,\n                Handler=self.handler,\n                Description=self.description,\n                Timeout=int(self.timeout),\n                MemorySize=int(self.memory),\n                VpcConfig=vpc_config)\n\n            if self.concurrency_limit:\n                self.lambda_client.put_function_concurrency(\n                    FunctionName=self.app_name,\n                    ReservedConcurrentExecutions=self.concurrency_limit\n                )\n            else:\n                self.lambda_client.delete_function_concurrency(FunctionName=self.app_name)\n\n        except boto3.exceptions.botocore.exceptions.ClientError as error:\n            if 'CreateNetworkInterface' in error.response['Error']['Message']:\n                message = '{0} is missing \"ec2:CreateNetworkInterface\"'.format(self.role_arn)\n                LOG.debug(message)\n                raise SystemExit(message)\n\n            raise\n        LOG.info('Updating Lambda function tags')\n\n        lambda_arn = get_lambda_arn(self.app_name, self.env, self.region)\n        self.lambda_client.tag_resource(Resource=lambda_arn, Tags={'app_group': self.group, 'app_name': self.app_name})\n\n        LOG.info(\"Successfully updated Lambda configuration.\")", "docstring": "Update existing Lambda function configuration.\n\nArgs:\nvpc_config (dict): Dictionary of SubnetIds and SecurityGroupsIds for using\na VPC in lambda", "source": "juraj-google-style"}
{"code": "async def _try_catch_coro(emitter, event, listener, coro):\n    try:\n        (await coro)\n    except Exception as exc:\n        if (event == emitter.LISTENER_ERROR_EVENT):\n            raise\n        emitter.emit(emitter.LISTENER_ERROR_EVENT, event, listener, exc)", "docstring": "Coroutine wrapper to catch errors after async scheduling.\n\nArgs:\nemitter (EventEmitter): The event emitter that is attempting to\ncall a listener.\nevent (str): The event that triggered the emitter.\nlistener (async def): The async def that was used to generate the coro.\ncoro (coroutine): The coroutine that should be tried.\n\nIf an exception is caught the function will use the emitter to emit the\nfailure event. If, however, the current event _is_ the failure event then\nthe method reraises. The reraised exception may show in debug mode for the\nevent loop but is otherwise silently dropped.", "source": "codesearchnet"}
{"code": "def _build(self, input_batch, is_training, test_local_stats=True):\n    input_shape = input_batch.get_shape()\n    if (self._axis is not None):\n        if (len(self._axis) > len(input_shape)):\n            raise base.IncompatibleShapeError('Too many indices specified in axis: len({}) > len({}).'.format(self._axis, input_shape))\n        if (max(self._axis) >= len(input_shape)):\n            raise base.IncompatibleShapeError('One or more index in axis is too large for input shape: {} >= {:d}.'.format(self._axis, len(input_shape)))\n        if (min(self._axis) < 0):\n            raise base.IncompatibleShapeError('Indices in axis must be non-negative: {} < 0.'.format(self._axis))\n        axis = self._axis\n    else:\n        axis = tuple(range(len(input_shape))[:(- 1)])\n    dtype = input_batch.dtype.base_dtype\n    if (self._fused and (dtype == tf.bfloat16)):\n        raise base.NotSupportedError('Fused batch norm does not support tf.bfloat16.')\n    stat_dtype = (tf.float32 if (dtype in [tf.float16, tf.bfloat16]) else dtype)\n    self._mean_shape = input_batch.get_shape().as_list()\n    for index in axis:\n        self._mean_shape[index] = 1\n    use_batch_stats = (is_training | test_local_stats)\n    (mean, variance) = self._build_statistics(input_batch, axis, use_batch_stats, stat_dtype)\n    self._build_scale_offset(dtype)\n    (out, mean, variance) = self._batch_norm_op(input_batch, mean, variance, use_batch_stats, stat_dtype)\n    update_ops = self._build_update_ops(mean, variance, is_training)\n    if update_ops:\n        if self._update_ops_collection:\n            for update_op in update_ops:\n                tf.add_to_collection(self._update_ops_collection, update_op)\n        else:\n            with tf.control_dependencies(update_ops):\n                out = tf.identity(out)\n    return out", "docstring": "Connects the BatchNorm module into the graph.\n\nArgs:\ninput_batch: A Tensor of arbitrary dimension. By default, the final\ndimension is not reduced over when computing the minibatch statistics.\nis_training: A boolean to indicate if the module should be connected in\ntraining mode, meaning the moving averages are updated. Can be a Tensor.\ntest_local_stats: A boolean to indicate if local batch statistics should\nbe used when `is_training=False`. If not, moving averages are used.\nBy default `True`. Can be a Tensor.\n\nReturns:\nA tensor with the same shape as `input_batch`.\n\nRaises:\nbase.IncompatibleShapeError: If `axis` is not valid for the\ninput shape or has negative entries.\nbase.NotSupportedError: If `input_batch` has data type of `tf.bfloat16`.", "source": "codesearchnet"}
{"code": "def median(data):\n    \n    ordered = sorted(data)\n    length = len(ordered)\n    if length % 2 == 0:\n        return (\n            ordered[math.floor(length / 2) - 1] + ordered[math.floor(length / 2)]\n        ) / 2.0\n\n    elif length % 2 != 0:\n        return ordered[math.floor(length / 2)]", "docstring": "Calculates  the median of a list of integers or floating point numbers.\n\nArgs:\ndata: A list of integers or floating point numbers\n\nReturns:\nSorts the list numerically and returns the middle number if the list has an odd number\nof items. If the list contains an even number of items the mean of the two middle numbers\nis returned.", "source": "juraj-google-style"}
{"code": "def get_permissions(obj_name, principal=None, obj_type='file'):\n    obj_dacl = dacl(obj_name, obj_type)\n    if (principal is None):\n        return obj_dacl.list_aces()\n    return obj_dacl.get_ace(principal)", "docstring": "Get the permissions for the passed object\n\nArgs:\n\nobj_name (str):\nThe name of or path to the object.\n\nprincipal (Optional[str]):\nThe name of the user or group for which to get permissions. Can also\npass a SID. If None, all ACEs defined on the object will be\nreturned. Default is None\n\nobj_type (Optional[str]):\nThe type of object for which to get permissions.\n\nReturns:\ndict: A dictionary representing the object permissions\n\nUsage:\n\n.. code-block:: python\n\nsalt.utils.win_dacl.get_permissions('C:\\\\Temp')", "source": "codesearchnet"}
{"code": "def _shard_counts(layout: layout_lib.Layout, batch_dim: Optional[str]=None) -> List[int]:\n    shard_counts = []\n    for spec in layout.sharding_specs:\n        if spec in (batch_dim, layout_lib.UNSHARDED):\n            shard_counts.append(1)\n        else:\n            shard_counts.append(layout.mesh.dim_size(spec))\n    return shard_counts", "docstring": "Computes a list of the number of shards in each dimension of the layout.\n\nThe shard counts are used to slice each dataset element. The batch dimension's\ncount is overridden to 1 since we only consider how many shards to make\nlocally (within each local replica). Sharding across clients is handled by\neither tf.data.Dataset's shard transformation (in the single-client case) or\ntf.data service's distribute function (in the multi-client case).\n\nArgs:\nlayout: the layout to compute the shard counts for.\nbatch_dim: the name of the batch dimension of the layout, if present.\n\nReturns:\nA list of shard counts, one element per dimension of the layout.", "source": "github-repos"}
{"code": "def sequence_accuracy(labels, outputs):\n  \n  \n  all_correct = tf.reduce_all(\n      tf.logical_or(tf.equal(labels, outputs), tf.equal(labels, 0)), axis=-1\n  )\n  return tf.metrics.mean(all_correct)", "docstring": "Compute the sequence-level accuracy.\n\nA sequence is only considered correct if all of its entries were predicted\ncorrectly.\n\nArgs:\nlabels: ground-truth labels, shape=(batch, packed_seq_length)\noutputs: predicted tokens, shape=(batch, seq_length)\nReturns:\nTwo ops, one for getting the current average accuracy and another for\nupdating the running average estimate.", "source": "juraj-google-style"}
{"code": "def locked_get(self):\n    query = {self.key_name: self.key_value}\n    entities = self.model_class.objects.filter(**query)\n    if (len(entities) > 0):\n        credential = getattr(entities[0], self.property_name)\n        if (getattr(credential, 'set_store', None) is not None):\n            credential.set_store(self)\n        return credential\n    else:\n        return None", "docstring": "Retrieve stored credential from the Django ORM.\n\nReturns:\noauth2client.Credentials retrieved from the Django ORM, associated\nwith the ``model``, ``key_value``->``key_name`` pair used to query\nfor the model, and ``property_name`` identifying the\n``CredentialsProperty`` field, all of which are defined in the\nconstructor for this Storage object.", "source": "codesearchnet"}
{"code": "def get_timestamp(self, url, xpath=None):\n        \n        if not path.exists(self.db_path):\n            return None\n\n        if self._query(url, xpath).count() > 0:\n            return self._query(url, xpath).one().queried_on", "docstring": "Get time stamp of cached query result.\n\nIf DB has not yet been initialized or url/xpath has not been queried yet, return None.\n\nArgs:\nurl (str): If given, clear specific item only. Otherwise remove the DB file.\nxpath (str): xpath to search (may be ``None``)\n\nReturns:\ndatetime.datetime: cached response timestamp, None if not available", "source": "juraj-google-style"}
{"code": "def flip_channel_order(image: np.ndarray, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n    input_data_format = infer_channel_dimension_format(image) if input_data_format is None else input_data_format\n    if input_data_format == ChannelDimension.LAST:\n        image = image[..., ::-1]\n    elif input_data_format == ChannelDimension.FIRST:\n        image = image[::-1, ...]\n    else:\n        raise ValueError(f'Unsupported channel dimension: {input_data_format}')\n    if data_format is not None:\n        image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)\n    return image", "docstring": "Flips the channel order of the image.\n\nIf the image is in RGB format, it will be converted to BGR and vice versa.\n\nArgs:\nimage (`np.ndarray`):\nThe image to flip.\ndata_format (`ChannelDimension`, *optional*):\nThe channel dimension format for the output image. Can be one of:\n- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n- `ChannelDimension.LAST`: image in (height, width, num_channels) format.\nIf unset, will use same as the input image.\ninput_data_format (`ChannelDimension`, *optional*):\nThe channel dimension format for the input image. Can be one of:\n- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n- `ChannelDimension.LAST`: image in (height, width, num_channels) format.\nIf unset, will use the inferred format of the input image.", "source": "github-repos"}
{"code": "def expect_false(condition, msg, extras=None):\n    try:\n        asserts.assert_false(condition, msg, extras)\n    except signals.TestSignal as e:\n        logging.exception('Expected a `False` value, got `True`.')\n        recorder.add_error(e)", "docstring": "Expects an expression evaluates to False.\n\nIf the expectation is not met, the test is marked as fail after its\nexecution finishes.\n\nArgs:\nexpr: The expression that is evaluated.\nmsg: A string explaining the details in case of failure.\nextras: An optional field for extra information to be included in test\nresult.", "source": "codesearchnet"}
{"code": "def UserAgentFragment(self):\n    if self.operating_system == OperatingSystem.LINUX:\n        return '({name} {version})'.format(name=self.operating_system.name, version=platform.release())\n    elif self.operating_system == OperatingSystem.WINDOWS:\n        return '({name} NT {version})'.format(name=self.operating_system.name, version=platform.version())\n    elif self.operating_system == OperatingSystem.MACOSX:\n        format_string = '(Macintosh; {name} Mac OS X {version})'\n        arch_string = self.architecture.name if self.architecture == Architecture.ppc else 'Intel'\n        return format_string.format(name=arch_string, version=platform.release())\n    else:\n        return '()'", "docstring": "Generates the fragment of the User-Agent that represents the OS.\n\nExamples:\n(Linux 3.2.5-gg1236)\n(Windows NT 6.1.7601)\n(Macintosh; PPC Mac OS X 12.4.0)\n(Macintosh; Intel Mac OS X 12.4.0)\n\nReturns:\nstr, The fragment of the User-Agent string.", "source": "github-repos"}
{"code": "def is_reading_in_conditional_node(self, variable):\n        \n        variables_read = [n.variables_read for n in self.nodes if n.contains_if()]\n        variables_read = [item for sublist in variables_read for item in sublist]\n        return variable in variables_read", "docstring": "Check if the function reads the variable in a IF node\nArgs:\nvariable (Variable):\nReturns:\nbool: True if the variable is read", "source": "juraj-google-style"}
{"code": "def  set_led(self, colorcode):\n        \n        data = []\n        data.append(0x0A)\n        data.append(self.servoid)\n        data.append(RAM_WRITE_REQ)\n        data.append(LED_CONTROL_RAM)\n        data.append(0x01)\n        data.append(colorcode)\n        send_data(data)", "docstring": "Set the LED Color of Herkulex\n\nArgs:\ncolorcode (int): The code for colors\n(0x00-OFF\n0x02-BLUE\n0x03-CYAN\n0x04-RED\n0x05-ORANGE\n0x06-VIOLET\n0x07-WHITE", "source": "juraj-google-style"}
{"code": "def get_sample(self, md5):\n        \n\n        \n        if len(md5) < 32:\n            md5 = self.get_full_md5(md5, self.sample_collection)\n\n        \n        sample_info = self.database[self.sample_collection].find_one({'md5': md5})\n        if not sample_info:\n            return None\n\n        \n        try:\n            grid_fs_id = sample_info['__grid_fs']\n            sample_info = self.clean_for_serialization(sample_info)\n            sample_info.update({'raw_bytes':self.gridfs_handle.get(grid_fs_id).read()})\n            return sample_info\n        except gridfs.errors.CorruptGridFile:\n            \n            self.database[self.sample_collection].update({'md5': md5}, {'md5': None})\n            return None", "docstring": "Get the sample from the data store.\n\nThis method first fetches the data from datastore, then cleans it for serialization\nand then updates it with 'raw_bytes' item.\n\nArgs:\nmd5: The md5 digest of the sample to be fetched from datastore.\n\nReturns:\nThe sample dictionary or None", "source": "juraj-google-style"}
{"code": "def _create_unicode_map():\n    unicode_map = {}\n    for (beta, uni) in _map.BETACODE_MAP.items():\n        norm = unicodedata.normalize('NFC', uni)\n        unicode_map[norm] = beta\n        unicode_map[uni] = beta\n    final_sigma_norm = unicodedata.normalize('NFC', _FINAL_LC_SIGMA)\n    unicode_map[final_sigma_norm] = 's'\n    unicode_map[_FINAL_LC_SIGMA] = 's'\n    return unicode_map", "docstring": "Create the inverse map from unicode to betacode.\n\nReturns:\nThe hash map to convert unicode characters to the beta code representation.", "source": "codesearchnet"}
{"code": "def __init__(self, clustering_algorithm, n_clusters: int, cluster_args: dict, checkpoints_path: str, batch_size: int=1024, is_batched: bool=False):\n    super().__init__()\n    self.clustering_algorithm = clustering_algorithm\n    self.n_clusters = n_clusters\n    self.batch_size = batch_size\n    self.cluster_args = cluster_args\n    self.checkpoints_path = checkpoints_path\n    self.is_batched = is_batched", "docstring": "Clustering transformation itself, it first preprocesses the data,\nthen it applies the clustering transformation step by step on each\nof the batches.\n\nExample Usage::\n\npcoll | OnlineClustering(\nclustering_algorithm=OnlineKMeansClustering\nbatch_size=1024,\nn_clusters=6\ncluster_args={}))\n\nArgs:\nclustering_algorithm: Clustering algorithm (DoFn)\nn_clusters: Number of clusters\ncluster_args: Arguments for the sklearn clustering algorithm\n(check sklearn documentation for more information)\nbatch_size: size of the data batches\nis_batched: boolean value that marks if the collection is already\nbatched and thus doesn't need to be batched by this transform", "source": "github-repos"}
{"code": "def _construct_context_for_args(args):\n    global_default_context = google.datalab.Context.default()\n    config = {}\n    for key in global_default_context.config:\n        config[key] = global_default_context.config[key]\n    billing_tier_arg = args.get('billing', None)\n    if billing_tier_arg:\n        config['bigquery_billing_tier'] = billing_tier_arg\n    return google.datalab.Context(project_id=global_default_context.project_id, credentials=global_default_context.credentials, config=config)", "docstring": "Construct a new Context for the parsed arguments.\n\nArgs:\nargs: the dictionary of magic arguments.\nReturns:\nA new Context based on the current default context, but with any explicitly\nspecified arguments overriding the default's config.", "source": "codesearchnet"}
{"code": "def transform(geom, to_sref):\n    \n    \n    try:\n        geom = getattr(geom, 'polygon', Envelope(geom).polygon)\n    except (TypeError, ValueError):\n        pass\n    else:\n        geom.AssignSpatialReference(to_sref)\n    try:\n        geom_sref = geom.GetSpatialReference()\n    except AttributeError:\n        return transform(Geometry(geom), to_sref)\n    if geom_sref is None:\n        raise Exception('Cannot transform from unknown spatial reference')\n    \n    if not geom_sref.IsSame(to_sref):\n        geom = geom.Clone()\n        geom.TransformTo(to_sref)\n    return geom", "docstring": "Returns a transformed Geometry.\n\nArguments:\ngeom -- any coercible Geometry value or Envelope\nto_sref -- SpatialReference or EPSG ID as int", "source": "juraj-google-style"}
{"code": "def remove(self, name):\n    try:\n        del self.data[name]\n    except (ValueError, KeyError):\n        import warnings\n        warnings.warn((\"Unable to find column '%s' in data source\" % name))", "docstring": "Remove a column of data.\n\nArgs:\nname (str) : name of the column to remove\n\nReturns:\nNone\n\n.. note::\nIf the column name does not exist, a warning is issued.", "source": "codesearchnet"}
{"code": "def _ConvertFile(cls, path):\n    with open(path) as f:\n        src = f.read()\n    short_path = os.path.basename(path)\n    assertions = 0\n    for assertion_re in (cls.ASSERTION_RE, cls.MOCK_METHOD_CALL_RE):\n        start = 0\n        match = assertion_re.search(src, start)\n        while match:\n            assertion_start = match.start('assertion')\n            i = assertion_start + len(match.group('assertion'))\n            last_comma = i - 1\n            args = []\n            depth_round = 1\n            depth_curly = 0\n            depth_square = 0\n            while depth_round:\n                if i == len(src):\n                    line = src[:assertion_start].count('\\n') + 1\n                    snippet = src[assertion_start:src.find('\\n', assertion_start)]\n                    logging.error('Unbalanced parentheses at %s:%d: %s', short_path, line, snippet)\n                    return False\n                elif cls.QUOTE_RE.match(src[i]):\n                    start_quote = src[i]\n                    i += 1\n                    while src[i] != start_quote or src[i - 1] == '\\\\':\n                        i += 1\n                elif src[i] == '\n                    while src[i] != '\\n':\n                        i += 1\n                elif src[i] == '(':\n                    depth_round += 1\n                elif src[i] == ')':\n                    depth_round -= 1\n                elif src[i] == '{':\n                    depth_curly += 1\n                elif src[i] == '}':\n                    depth_curly -= 1\n                elif src[i] == '[':\n                    depth_square += 1\n                elif src[i] == ']':\n                    depth_square -= 1\n                if not depth_curly and (not depth_square) and (src[i] == ',' and depth_round == 1 or (src[i] == ')' and (not depth_round))):\n                    arg = src[last_comma + 1:i].strip()\n                    if arg:\n                        args.append(arg)\n                    last_comma = i\n                i += 1\n            end = i\n            indentation, akey = match.group('indent', 'akey')\n            if akey not in cls.MOCK_METHOD_ASSERTIONS and (not akey.startswith('Raises')):\n                args = args[:2]\n            if 'method' in match.groupdict():\n                args.insert(0, match.group('method'))\n            replacement = cls._GetReplacement(indentation, akey, args)\n            logging.debug((start, end, replacement))\n            src = ''.join((src[:assertion_start], replacement, src[end:]))\n            assertions += 1\n            start = assertion_start + len(replacement)\n            match = assertion_re.search(src, start)\n    output_path = FLAGS.output and os.path.expanduser(FLAGS.output) or path\n    with open(output_path, 'w') as f:\n        f.write(src)\n    logging.info('Converted %s (%d assertion%s)', short_path, assertions, '' if assertions == 1 else 's')\n    return True", "docstring": "Converts a single file from unittest to PyTruth.\n\nArgs:\npath: string, the path of file to be converted.\n\nReturns:\nBoolean: True if the file was successfully converted, otherwise False.", "source": "github-repos"}
{"code": "def get_transaction(self, transaction_id):\n    payload = self._get_data_by_id(transaction_id, 'commit_store_get_transaction')\n    txn = Transaction()\n    txn.ParseFromString(payload)\n    return txn", "docstring": "Returns a Transaction object from the block store by its id.\n\nParams:\ntransaction_id (str): The header_signature of the desired txn\n\nReturns:\nTransaction: The specified transaction\n\nRaises:\nValueError: The transaction is not in the block store", "source": "codesearchnet"}
{"code": "def write_events(self, events):\n        \n        with self.write_lock, self.conn:\n            self.conn.executemany(\n                'INSERT INTO state_events('\n                '   identifier, source_statechange_id, log_time, data'\n                ') VALUES(?, ?, ?, ?)',\n                events,\n            )", "docstring": "Save events.\n\nArgs:\nstate_change_identifier: Id of the state change that generate these events.\nevents: List of Event objects.", "source": "juraj-google-style"}
{"code": "def returns_true_or_raises(f):\n\n    @functools.wraps(f)\n    def wrapped(*args, **kwargs):\n        ret = f(*args, **kwargs)\n        if (ret is not True):\n            raise RuntimeError(('Unexpected return value %r' % ret))\n        return True\n    return wrapped", "docstring": "A safety net.\n\nDecorator for functions that are only allowed to return True or raise\nan exception.\n\nArgs:\nf: A function whose only expected return value is True.\n\nReturns:\nA wrapped functions whose guaranteed only return value is True.", "source": "codesearchnet"}
{"code": "def _UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator):\n  \n  for idx, func in enumerate(iterator):\n    assert callable(func), 'Test generators must yield callables, got %r' % (\n        func,)\n    if getattr(func, '__x_use_name__', False):\n      new_name = func.__name__\n    else:\n      new_name = '%s%s%d' % (name, _SEPARATOR, idx)\n    assert new_name not in dct, (\n        'Name of parameterized test case \"%s\" not unique' % (new_name,))\n    dct[new_name] = func\n    id_suffix[new_name] = getattr(func, '__x_extra_id__', '')", "docstring": "Adds individual test cases to a dictionary.\n\nArgs:\ndct: The target dictionary.\nid_suffix: The dictionary for mapping names to test IDs.\nname: The original name of the test case.\niterator: The iterator generating the individual test cases.", "source": "juraj-google-style"}
{"code": "def FromId(os_id, error_on_unknown=True):\n    if not os_id:\n        return None\n    for operating_system in OperatingSystem._ALL:\n        if operating_system.id == os_id:\n            return operating_system\n    if error_on_unknown:\n        raise InvalidEnumValue(os_id, 'Operating System', [value.id for value in OperatingSystem._ALL])\n    return None", "docstring": "Gets the enum corresponding to the given operating system id.\n\nArgs:\nos_id: str, The operating system id to parse\nerror_on_unknown: bool, True to raise an exception if the id is unknown,\nFalse to just return None.\n\nRaises:\nInvalidEnumValue: If the given value cannot be parsed.\n\nReturns:\nOperatingSystemTuple, One of the OperatingSystem constants or None if the\ninput is None.", "source": "github-repos"}
{"code": "def nav_to_vcf_dir(ftp, build):\n    if (build == 'b37'):\n        ftp.cwd(DIR_CLINVAR_VCF_B37)\n    elif (build == 'b38'):\n        ftp.cwd(DIR_CLINVAR_VCF_B38)\n    else:\n        raise IOError('Genome build not recognized.')", "docstring": "Navigate an open ftplib.FTP to appropriate directory for ClinVar VCF files.\n\nArgs:\nftp:   (type: ftplib.FTP) an open connection to ftp.ncbi.nlm.nih.gov\nbuild: (type: string) genome build, either 'b37' or 'b38'", "source": "codesearchnet"}
{"code": "def _AddWebPageCriteria(client, ad_group_id):\n  \n  ad_group_criterion_service = client.GetService('AdGroupCriterionService',\n                                                 version='v201809')\n\n  operations = [{\n      'operator': 'ADD',\n      \n      'operand': {\n          'xsi_type': 'BiddableAdGroupCriterion',\n          'adGroupId': ad_group_id,\n          \n          'criterion': {\n              'xsi_type': 'Webpage',\n              'parameter': {\n                  'criterionName': 'Special offers for children.',\n                  'conditions': [\n                      {\n                          'operand': 'URL',\n                          'argument': '/marscruise/children'\n                      },\n                      {\n                          'operand': 'PAGE_TITLE',\n                          'argument': 'Special Offer'\n                      }\n                  ]\n              }\n          },\n          'userStatus': 'PAUSED',\n          \n          'biddingStrategyConfiguration': {\n              'bids': [{\n                  'xsi_type': 'CpcBid',\n                  'bid': {\n                      'microAmount': 10000000L\n                  }\n              }]\n          }\n      }\n  }]\n\n  criterion = ad_group_criterion_service.mutate(operations)['value'][0]\n\n  print 'Webpage criterion with ID \"%d\" was added to ad group ID \"%d\".' % (\n      criterion['criterion']['id'], criterion['adGroupId'])", "docstring": "Adds a web page criterion to target Dynamic Search Ads.\n\nArgs:\nclient: an AdWordsClient instance.\nad_group_id: an integer ID of the ad group the criteria is being added to.", "source": "juraj-google-style"}
{"code": "def set_lacp_mode(self, name, mode):\n        \n        if mode not in ['on', 'passive', 'active']:\n            return False\n\n        grpid = re.search(r'(\\d+)', name).group()\n\n        remove_commands = list()\n        add_commands = list()\n\n        for member in self.get_members(name):\n            remove_commands.append('interface %s' % member)\n            remove_commands.append('no channel-group %s' % grpid)\n            add_commands.append('interface %s' % member)\n            add_commands.append('channel-group %s mode %s' % (grpid, mode))\n\n        return self.configure(remove_commands + add_commands)", "docstring": "Configures the LACP mode of the member interfaces\n\nArgs:\nname(str): The Port-Channel interface name to configure the\nLACP mode\n\nmode(str): The LACP mode to configure the member interfaces to.\nValid values are 'on, 'passive', 'active'\n\nReturns:\nTrue if the operation succeeds otherwise False", "source": "juraj-google-style"}
{"code": "def sync(self, since=None, timeout_ms=30000, filter=None,\n             full_state=None, set_presence=None):\n        \n\n        request = {\n            \n            \"timeout\": int(timeout_ms)\n        }\n\n        if since:\n            request[\"since\"] = since\n\n        if filter:\n            request[\"filter\"] = filter\n\n        if full_state:\n            request[\"full_state\"] = json.dumps(full_state)\n\n        if set_presence:\n            request[\"set_presence\"] = set_presence\n\n        return self._send(\"GET\", \"/sync\", query_params=request,\n                          api_path=MATRIX_V2_API_PATH)", "docstring": "Perform a sync request.\n\nArgs:\nsince (str): Optional. A token which specifies where to continue a sync from.\ntimeout_ms (int): Optional. The time in milliseconds to wait.\nfilter (int|str): Either a Filter ID or a JSON string.\nfull_state (bool): Return the full state for every room the user has joined\nDefaults to false.\nset_presence (str): Should the client be marked as \"online\" or\" offline\"", "source": "juraj-google-style"}
{"code": "def email_has_role(self, email, role_name, uuid=None):\n    mbr_data = self.get_membership(uuid=uuid)\n    docs = []\n    try:\n        docs = mbr_data['response']['docs']\n    except KeyError:\n        failure_message = 'KeyError in membership data - got {0}'.format(mbr_data)\n        log.exception(failure_message)\n        raise PyLmodUnexpectedData(failure_message)\n    if (len(docs) == 0):\n        return False\n    has_role = any((((x.get('email') == email) and (x.get('roleType') == role_name)) for x in docs))\n    if has_role:\n        return True\n    return False", "docstring": "Determine if an email is associated with a role.\n\nArgs:\nemail (str): user email\nrole_name (str): user role\nuuid (str): optional uuid. defaults to self.cuuid\n\nRaises:\nPyLmodUnexpectedData: Unexpected data was returned.\nrequests.RequestException: Exception connection error\n\nReturns:\nbool: True or False if email has role_name", "source": "codesearchnet"}
{"code": "def upload(target):\n    log.info('Uploading to pypi server <33>{}'.format(target))\n    with conf.within_proj_dir():\n        shell.run('python setup.py sdist register -r \"{}\"'.format(target))\n        shell.run('python setup.py sdist upload -r \"{}\"'.format(target))", "docstring": "Upload the release to a pypi server.\n\nTODO: Make sure the git directory is clean before allowing a release.\n\nArgs:\ntarget (str):\npypi target as defined in ~/.pypirc", "source": "codesearchnet"}
{"code": "def codemirror_field_js_bundle(field):\n    manifesto = CodemirrorAssetTagRender()\n    manifesto.register_from_fields(field)\n    try:\n        bundle_name = manifesto.js_bundle_names()[0]\n    except IndexError:\n        msg = \"Given field with configuration name '{}' does not have a Javascript bundle name\"\n        raise CodeMirrorFieldBundleError(msg.format(field.config_name))\n    return bundle_name", "docstring": "Filter to get CodeMirror Javascript bundle name needed for a single field.\n\nExample:\n::\n\n{% load djangocodemirror_tags %}\n{{ form.myfield|codemirror_field_js_bundle }}\n\nArguments:\nfield (django.forms.fields.Field): A form field that contains a widget\n:class:`djangocodemirror.widget.CodeMirrorWidget`.\n\nRaises:\nCodeMirrorFieldBundleError: If Codemirror configuration form field\ndoes not have a bundle name.\n\nReturns:\nstring: Bundle name to load with webassets.", "source": "codesearchnet"}
{"code": "def stringize(\n        self,\n        rnf_profile,\n    ):\n        \n\n        coor_width = max(rnf_profile.coor_width, len(str(self.left)), len(str(self.right)))\n        return \"({},{},{},{},{})\".format(\n            str(self.genome_id).zfill(rnf_profile.genome_id_width),\n            str(self.chr_id).zfill(rnf_profile.chr_id_width), self.direction,\n            str(self.left).zfill(coor_width),\n            str(self.right).zfill(coor_width)\n        )", "docstring": "Create RNF representation of this segment.\n\nArgs:\nrnf_profile (rnftools.rnfformat.RnfProfile): RNF profile (with widths).", "source": "juraj-google-style"}
{"code": "def _format_batch_statuses(statuses, batch_ids, tracker):\n    proto_statuses = []\n    for batch_id in batch_ids:\n        if (statuses[batch_id] == client_batch_submit_pb2.ClientBatchStatus.INVALID):\n            invalid_txns = tracker.get_invalid_txn_info(batch_id)\n            for txn_info in invalid_txns:\n                try:\n                    txn_info['transaction_id'] = txn_info.pop('id')\n                except KeyError as e:\n                    LOGGER.debug(e)\n        else:\n            invalid_txns = None\n        proto_statuses.append(client_batch_submit_pb2.ClientBatchStatus(batch_id=batch_id, status=statuses[batch_id], invalid_transactions=invalid_txns))\n    return proto_statuses", "docstring": "Takes a statuses dict and formats it for transmission with Protobuf and\nZMQ.\n\nArgs:\nstatuses (dict of int): Dict with batch ids as the key, status as value\nbatch_ids (list of str): The batch ids in their original order\ntracker (BatchTracker): A batch tracker with access to invalid info", "source": "codesearchnet"}
{"code": "def add_migrations(self, migrations):\n    if self.__closed:\n        raise MigrationSessionError(\"Can't change applied session\")\n    self._to_apply.extend(migrations)", "docstring": "Add migrations to be applied.\n\nArgs:\nmigrations: a list of migrations to add of the form [(app, migration_name), ...]\nRaises:\nMigrationSessionError if called on a closed MigrationSession", "source": "codesearchnet"}
{"code": "def get_victim_email_asset(self, main_type, sub_type, unique_id, asset_id, params=None):\n        \n        params = params or {}\n\n        return self.victim_email_asset(main_type, sub_type, unique_id, asset_id, params=params)", "docstring": "Args:\nmain_type:\nsub_type:\nunique_id:\nasset_id:\nparams:\n\nReturn:", "source": "juraj-google-style"}
{"code": "def merge(self, x=None, y=None, ildj_map=None, kwargs=None, mapping=None):\n    if mapping is None:\n        mapping = _Mapping(x=x, y=y, ildj_map=ildj_map, kwargs=kwargs)\n    elif any((arg is not None for arg in [x, y, ildj_map, kwargs])):\n        raise ValueError('Cannot simultaneously specify mapping and individual arguments.')\n    return _Mapping(x=self._merge(self.x, mapping.x), y=self._merge(self.y, mapping.y), ildj_map=self._merge_dicts(self.ildj_map, mapping.ildj_map), kwargs=self._merge(self.kwargs, mapping.kwargs))", "docstring": "Returns new _Mapping with args merged with self.\n\nArgs:\nx: `Tensor`. Forward.\ny: `Tensor`. Inverse.\nildj_map: `Dictionary`. This is a mapping from event_ndims to a `Tensor`\nrepresenting the inverse log det jacobian.\nkwargs: Python dictionary. Extra args supplied to\nforward/inverse/etc functions.\nmapping: Instance of _Mapping to merge. Can only be specified if no other\narg is specified.\n\nReturns:\nmapping: New instance of `_Mapping` which has inputs merged with self.\n\nRaises:\nValueError: if mapping and any other arg is not `None`.", "source": "github-repos"}
{"code": "def capture_widget(widget, path=None):\n    if use_qt5:\n        pixmap = widget.grab()\n    else:\n        pixmap = QtGui.QPixmap.grabWidget(widget)\n    if path:\n        pixmap.save(path)\n    else:\n        image_buffer = QtCore.QBuffer()\n        image_buffer.open(QtCore.QIODevice.ReadWrite)\n        pixmap.save(image_buffer, 'PNG')\n        return image_buffer.data().data()", "docstring": "Grab an image of a Qt widget\n\nArgs:\nwidget: The Qt Widget to capture\npath (optional): The path to save to. If not provided - will return image data.\n\nReturns:\nIf a path is provided, the image will be saved to it.\nIf not, the PNG buffer will be returned.", "source": "codesearchnet"}
{"code": "def _peek(self, chars=1):\n    line = self._socket.recv(chars, socket.MSG_PEEK)\n    logger.debug(('Server sent (peek): ' + line.rstrip()))\n    return line", "docstring": "Peek at the data in the server response.\n\nPeeking should only be done when the response can be predicted.\nMake sure that the socket will not block by requesting too\nmuch data from it while peeking.\n\nArgs:\nchars -- the number of characters to peek.", "source": "codesearchnet"}
{"code": "def GetControlSequenceLen(self, buf):\n    if not self._csi or not buf.startswith(self._csi):\n        return 0\n    n = 0\n    for c in buf:\n        n += 1\n        if c.isalpha():\n            break\n    return n", "docstring": "Returns the control sequence length at the beginning of buf.\n\nUsed in display width computations. Control sequences have display width 0.\n\nArgs:\nbuf: The string to check for a control sequence.\n\nReturns:\nThe control sequence length at the beginning of buf or 0 if buf does not\nstart with a control sequence.", "source": "github-repos"}
{"code": "def set_style(self, column, style):\n    column_idx = None\n    while (len(self.headers) > len(self.__style_list)):\n        self.__style_list.append(None)\n    if isinstance(column, six.integer_types):\n        column_idx = column\n    elif isinstance(column, six.string_types):\n        try:\n            column_idx = self.headers.index(column)\n        except ValueError:\n            pass\n    if (column_idx is not None):\n        self.__style_list[column_idx] = style\n        self.__clear_preprocess()\n        self._dp_extractor.format_flags_list = [_ts_to_flag[self.__get_thousand_separator(col_idx)] for col_idx in range(len(self.__style_list))]\n        return\n    raise ValueError('column must be an int or string: actual={}'.format(column))", "docstring": "Set |Style| for a specific column.\n\nArgs:\ncolumn (|int| or |str|):\nColumn specifier. column index or header name correlated with the column.\nstyle (|Style|):\nStyle value to be set to the column.\n\nRaises:\nValueError: If the column specifier is invalid.", "source": "codesearchnet"}
{"code": "def AddKeyByPath(self, key_path, registry_key):\n    \n    if not key_path.startswith(definitions.KEY_PATH_SEPARATOR):\n      raise ValueError('Key path does not start with: {0:s}'.format(\n          definitions.KEY_PATH_SEPARATOR))\n\n    if not self._root_key:\n      self._root_key = FakeWinRegistryKey(self._key_path_prefix)\n\n    path_segments = key_paths.SplitKeyPath(key_path)\n    parent_key = self._root_key\n    for path_segment in path_segments:\n      try:\n        subkey = FakeWinRegistryKey(path_segment)\n        parent_key.AddSubkey(subkey)\n      except KeyError:\n        subkey = parent_key.GetSubkeyByName(path_segment)\n\n      parent_key = subkey\n\n    parent_key.AddSubkey(registry_key)", "docstring": "Adds a Windows Registry key for a specific key path.\n\nArgs:\nkey_path (str): Windows Registry key path to add the key.\nregistry_key (WinRegistryKey): Windows Registry key.\n\nRaises:\nKeyError: if the subkey already exists.\nValueError: if the Windows Registry key cannot be added.", "source": "juraj-google-style"}
{"code": "def build_signature_def(inputs=None, outputs=None, method_name=None, defaults=None):\n    signature_def = meta_graph_pb2.SignatureDef()\n    if inputs is not None:\n        for item in inputs:\n            signature_def.inputs[item].CopyFrom(inputs[item])\n    if outputs is not None:\n        for item in outputs:\n            signature_def.outputs[item].CopyFrom(outputs[item])\n    if method_name is not None:\n        signature_def.method_name = method_name\n    if defaults is not None:\n        for arg_name, default in defaults.items():\n            if isinstance(default, ops.EagerTensor):\n                signature_def.defaults[arg_name].CopyFrom(tensor_util.make_tensor_proto(default.numpy()))\n            elif default.op.type == 'Const':\n                signature_def.defaults[arg_name].CopyFrom(default.op.get_attr('value'))\n            else:\n                raise ValueError(f'Unable to convert object {str(default)} of type {type(default)} to TensorProto.')\n    return signature_def", "docstring": "Utility function to build a SignatureDef protocol buffer.\n\nArgs:\ninputs: Inputs of the SignatureDef defined as a proto map of string to\ntensor info.\noutputs: Outputs of the SignatureDef defined as a proto map of string to\ntensor info.\nmethod_name: Method name of the SignatureDef as a string.\ndefaults: Defaults of the SignatureDef defined as a proto map of string to\nTensorProto.\n\nReturns:\nA SignatureDef protocol buffer constructed based on the supplied arguments.", "source": "github-repos"}
{"code": "def max_range(ranges, combined=True):\n    \n    try:\n        with warnings.catch_warnings():\n            warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')\n            values = [tuple(np.NaN if v is None else v for v in r) for r in ranges]\n            if pd and any(isinstance(v, datetime_types) and not isinstance(v, cftime_types)\n                          for r in values for v in r):\n                converted = []\n                for l, h in values:\n                    if isinstance(l, datetime_types) and isinstance(h, datetime_types):\n                        l, h = (pd.Timestamp(l).to_datetime64(),\n                                pd.Timestamp(h).to_datetime64())\n                    converted.append((l, h))\n                values = converted\n\n            arr = np.array(values)\n            if not len(arr):\n                return np.NaN, np.NaN\n            elif arr.dtype.kind in 'OSU':\n                arr = list(python2sort([\n                    v for r in values for v in r\n                    if not is_nan(v) and v is not None]))\n                return arr[0], arr[-1]\n            elif arr.dtype.kind in 'M':\n                return ((arr.min(), arr.max()) if combined else\n                        (arr[:, 0].min(), arr[:, 1].min()))\n\n            if combined:\n                return (np.nanmin(arr), np.nanmax(arr))\n            else:\n                return (np.nanmin(arr[:, 0]), np.nanmax(arr[:, 1]))\n    except:\n        return (np.NaN, np.NaN)", "docstring": "Computes the maximal lower and upper bounds from a list bounds.\n\nArgs:\nranges (list of tuples): A list of range tuples\ncombined (boolean, optional): Whether to combine bounds\nWhether range should be computed on lower and upper bound\nindependently or both at once\n\nReturns:\nThe maximum range as a single tuple", "source": "juraj-google-style"}
{"code": "def __init__(self, api_key: str, config: interfaces.Config | None=None):\n    self._config = config or interfaces.Config()\n    self._genai_processor = genai_model.GenaiModel(api_key=api_key, model_name=self._config.topic_researcher_model_name, generate_content_config=types.GenerateContentConfig(tools=self._config.enabled_research_tools))\n    p_preamble = preamble.Preamble(content=[ProcessorPart(prompts.TOPIC_RESEARCH_PREAMBLE), ProcessorPart('Topic to research: ')])\n    p_verbalizer = topic_verbalizer.TopicVerbalizer(config=self._config)\n    p_suffix = preamble.Suffix(content=[ProcessorPart('Your research: ')])\n    self._pipeline = p_verbalizer + p_preamble + p_suffix + self._genai_processor", "docstring": "Initializes the TopicResearcher.\n\nArgs:\napi_key: The API key to use for the GenAI API.\nconfig: The agent configuration.", "source": "github-repos"}
{"code": "def _file_changed_nilrt(full_filepath):\n    rs_state_dir = '/var/lib/salt/restartcheck_state'\n    base_filename = os.path.basename(full_filepath)\n    timestamp_file = os.path.join(rs_state_dir, '{0}.timestamp'.format(base_filename))\n    md5sum_file = os.path.join(rs_state_dir, '{0}.md5sum'.format(base_filename))\n    if ((not os.path.exists(timestamp_file)) or (not os.path.exists(md5sum_file))):\n        return True\n    prev_timestamp = __salt__['file.read'](timestamp_file).rstrip()\n    cur_timestamp = str(int(os.path.getmtime(full_filepath)))\n    if (prev_timestamp != cur_timestamp):\n        return True\n    return bool(__salt__['cmd.retcode']('md5sum -cs {0}'.format(md5sum_file), output_loglevel='quiet'))", "docstring": "Detect whether a file changed in an NILinuxRT system using md5sum and timestamp\nfiles from a state directory.\n\nReturns:\n- False if md5sum/timestamp state files don't exist\n- True/False depending if ``base_filename`` got modified/touched", "source": "codesearchnet"}
{"code": "def poll(\n            self,\n            transaction_hash: bytes,\n    ):\n        \n        if len(transaction_hash) != 32:\n            raise ValueError(\n                'transaction_hash must be a 32 byte hash',\n            )\n\n        transaction_hash = encode_hex(transaction_hash)\n\n        \n        \n        \n        \n        \n        \n        \n        last_result = None\n\n        while True:\n            \n            \n            transaction = self.web3.eth.getTransaction(transaction_hash)\n\n            \n            if transaction is None and last_result is not None:\n                raise Exception('invalid transaction, check gas price')\n\n            \n            if transaction and transaction['blockNumber'] is not None:\n                last_result = transaction\n\n                \n                transaction_block = transaction['blockNumber']\n                confirmation_block = transaction_block + self.default_block_num_confirmations\n\n                block_number = self.block_number()\n\n                if block_number >= confirmation_block:\n                    return transaction\n\n            gevent.sleep(1.0)", "docstring": "Wait until the `transaction_hash` is applied or rejected.\n\nArgs:\ntransaction_hash: Transaction hash that we are waiting for.", "source": "juraj-google-style"}
{"code": "def cancel(self, request, *args, **kwargs):  \n        \n        status = self.get_object()\n        status.cancel()\n        serializer = StatusSerializer(status, context={'request': request})\n        return Response(serializer.data)", "docstring": "Cancel the task associated with the specified status record.\n\nArguments:\nrequest (Request): A POST including a task status record ID\n\nReturns\n-------\nResponse: A JSON response indicating whether the cancellation succeeded or not", "source": "juraj-google-style"}
{"code": "def _api_scrape(json_inp, ndx):\n    \n\n    try:\n        headers = json_inp['resultSets'][ndx]['headers']\n        values = json_inp['resultSets'][ndx]['rowSet']\n    except KeyError:\n        \n        \n        try:\n            headers = json_inp['resultSet'][ndx]['headers']\n            values = json_inp['resultSet'][ndx]['rowSet']\n        except KeyError:\n            \n            headers = json_inp['resultSet']['headers']\n            values = json_inp['resultSet']['rowSet']\n    if HAS_PANDAS:\n        return DataFrame(values, columns=headers)\n    else:\n        \n        return [dict(zip(headers, value)) for value in values]", "docstring": "Internal method to streamline the getting of data from the json\n\nArgs:\njson_inp (json): json input from our caller\nndx (int): index where the data is located in the api\n\nReturns:\nIf pandas is present:\nDataFrame (pandas.DataFrame): data set from ndx within the\nAPI's json\nelse:\nA dictionary of both headers and values from the page", "source": "juraj-google-style"}
{"code": "def chmod_r(root: str, permission: int) -> None:\n    \n    os.chmod(root, permission)\n    for dirpath, dirnames, filenames in os.walk(root):\n        for d in dirnames:\n            os.chmod(os.path.join(dirpath, d), permission)\n        for f in filenames:\n            os.chmod(os.path.join(dirpath, f), permission)", "docstring": "Recursive ``chmod``.\n\nArgs:\nroot: directory to walk down\npermission: e.g. ``e.g. stat.S_IWUSR``", "source": "juraj-google-style"}
{"code": "def get_tensor_sharding(tensor):\n    if isinstance(tensor, resource_variable_ops.BaseResourceVariable) and context.xla_sharding_for_resource_variables_enabled():\n        sharding = tensor._get_xla_sharding()\n        if sharding is None:\n            return None\n        else:\n            return sharding.SerializeToString()\n    try:\n        return get_op_sharding(tensor.op)\n    except AttributeError:\n        return None", "docstring": "Returns sharding attribute of a Tensor.\n\nArgs:\ntensor: a Tensor.\n\nReturns:\nThe attribute representing XLA sharding on tensor's op.", "source": "github-repos"}
{"code": "def update_tag(self, tag_name, description=None, custom_properties=None, **kwargs):\n    data = {'description': (description or ''), 'customProperties': (custom_properties or {})}\n    resp = self._put(self._u(self._TAG_ENDPOINT_SUFFIX, tag_name), data=data, **kwargs)\n    resp.raise_for_status()\n    return resp.json()", "docstring": "update a tag by name\n\nArgs:\ntag_name (string): name of tag to update\ndescription (optional[string]): a description\ncustom_properties (optional[dict]): dictionary of custom properties", "source": "codesearchnet"}
{"code": "def split_by_sparsity(values):\n    dense_values = []\n    dense_indices = []\n    sparse_values = []\n    sparse_indices = []\n    for i, v in enumerate(values):\n        if is_indexed_slices(v):\n            sparse_values.append(v)\n            sparse_indices.append(i)\n        else:\n            dense_values.append(v)\n            dense_indices.append(i)\n    return (dense_values, dense_indices, sparse_values, sparse_indices)", "docstring": "Split values into dense and sparse values.\n\nArgs:\nvalues: a list of tensors or `PerReplica`s.\n\nReturns:\nFour lists:\na list of dense values, a list of their indices in `values` and\na list of sparse values, a list of their indices in `values`.", "source": "github-repos"}
{"code": "def standardize_tuple(value, n, name, allow_zero=False):\n    error_msg = f'The `{name}` argument must be a tuple of {n} integers. Received {name}={value}'\n    if isinstance(value, int):\n        value_tuple = (value,) * n\n    else:\n        try:\n            value_tuple = tuple(value)\n        except TypeError:\n            raise ValueError(error_msg)\n        if len(value_tuple) != n:\n            raise ValueError(error_msg)\n        for single_value in value_tuple:\n            try:\n                int(single_value)\n            except (ValueError, TypeError):\n                error_msg += f'including element {single_value} of type {type(single_value)}'\n                raise ValueError(error_msg)\n    if allow_zero:\n        unqualified_values = {v for v in value_tuple if v < 0}\n        req_msg = '>= 0'\n    else:\n        unqualified_values = {v for v in value_tuple if v <= 0}\n        req_msg = '> 0'\n    if unqualified_values:\n        error_msg += f', including values {unqualified_values} that do not satisfy `value {req_msg}`'\n        raise ValueError(error_msg)\n    return value_tuple", "docstring": "Transforms non-negative/positive integer/integers into an integer tuple.\n\nArgs:\nvalue: int or iterable of ints. The value to validate and convert.\nn: int. The size of the tuple to be returned.\nname: string. The name of the argument being validated, e.g. \"strides\"\nor \"kernel_size\". This is only used to format error messages.\nallow_zero: bool, defaults to `False`. A `ValueError` will raised\nif zero is received and this argument is `False`.\n\nReturns:\nA tuple of n integers.", "source": "github-repos"}
{"code": "def halt(self):\n    if self.is_closed:\n        _std_log.info('Disconnect requested, but AMQP connection already gone')\n        self._channel = None\n        return\n    _std_log.info('Waiting for %d consumer(s) to finish processing before halting', len(self._consumers))\n    pending_cancels = []\n    for c in list(self._consumers.values()):\n        pending_cancels.append(c.cancel())\n    (yield defer.gatherResults(pending_cancels))\n    _std_log.info('Finished canceling %d consumers', len(self._consumers))\n    try:\n        (yield self.close())\n    except pika.exceptions.ConnectionWrongStateError:\n        pass\n    self._consumers = {}\n    self._channel = None", "docstring": "Signal to consumers they should stop after finishing any messages\ncurrently being processed, then close the connection.\n\nReturns:\ndefer.Deferred: fired when all consumers have successfully stopped\nand the connection is closed.", "source": "codesearchnet"}
{"code": "def quote_xml(text):\n    text = _coerce_unicode(text)\n    if text.startswith(CDATA_START):\n        return text\n    return saxutils.escape(text)", "docstring": "Format a value for display as an XML text node.\n\nReturns:\nUnicode string (str on Python 3, unicode on Python 2)", "source": "codesearchnet"}
{"code": "def removeColumns(self, columnNames):\n    model = self.tableView.model()\n    if (model is not None):\n        model.removeDataFrameColumns(columnNames)\n    self.removeColumnButton.setChecked(False)", "docstring": "Removes one or multiple columns from the model.\n\nThis method is also a slot.\n\nArgs:\ncolumnNames (list): A list of columns, which shall\nbe removed from the model.", "source": "codesearchnet"}
{"code": "def _all_number_groups_are_exactly_present(numobj, normalized_candidate, formatted_number_groups):\n    \n    candidate_groups = re.split(NON_DIGITS_PATTERN, normalized_candidate)\n    \n    if numobj.extension is not None:\n        candidate_number_group_index = len(candidate_groups) - 2\n    else:\n        candidate_number_group_index = len(candidate_groups) - 1\n    \n    \n    \n    \n    if (len(candidate_groups) == 1 or\n        candidate_groups[candidate_number_group_index].find(national_significant_number(numobj)) != -1):\n        return True\n    \n    \n    formatted_number_group_index = len(formatted_number_groups) - 1\n    while (formatted_number_group_index > 0 and candidate_number_group_index >= 0):\n        if (candidate_groups[candidate_number_group_index] !=\n            formatted_number_groups[formatted_number_group_index]):\n            return False\n        formatted_number_group_index -= 1\n        candidate_number_group_index -= 1\n    \n    \n    return (candidate_number_group_index >= 0 and\n            candidate_groups[candidate_number_group_index].endswith(formatted_number_groups[0]))", "docstring": "Returns True if the groups of digits found in our candidate phone number match our\nexpectations.\n\nArguments:\nnumobj -- the original number we found when parsing\nnormalized_candidate -- the candidate number, normalized to only contain ASCII digits,\nbut with non-digits (spaces etc) retained\nexpected_number_groups -- the groups of digits that we would expect to see if we\nformatted this number\nReturns True if expectations matched.", "source": "juraj-google-style"}
{"code": "def find_indices(lst, element):\n    result = []\n    offset = (- 1)\n    while True:\n        try:\n            offset = lst.index(element, (offset + 1))\n        except ValueError:\n            return result\n        result.append(offset)", "docstring": "Returns the indices for all occurrences of 'element' in 'lst'.\n\nArgs:\nlst (list): List to search.\nelement:  Element to find.\n\nReturns:\nlist: List of indices or values", "source": "codesearchnet"}
{"code": "def get_all_datasets(cls, configuration=None, page_size=1000, check_duplicates=True, **kwargs):\n    dataset = Dataset(configuration=configuration)\n    dataset['id'] = 'all datasets'\n    total_rows = kwargs.get('limit', cls.max_int)\n    start = kwargs.get('offset', 0)\n    all_datasets = None\n    attempts = 0\n    while ((attempts < cls.max_attempts) and (all_datasets is None)):\n        all_datasets = list()\n        for page in range(((total_rows \n            pagetimespagesize = (page * page_size)\n            kwargs['offset'] = (start + pagetimespagesize)\n            rows_left = (total_rows - pagetimespagesize)\n            rows = min(rows_left, page_size)\n            kwargs['limit'] = rows\n            result = dataset._write_to_hdx('all', kwargs, 'id')\n            datasets = list()\n            if isinstance(result, list):\n                no_results = len(result)\n                if ((no_results == 0) and (page == 0)):\n                    all_datasets = None\n                    break\n                for datasetdict in result:\n                    dataset = Dataset(configuration=configuration)\n                    dataset.old_data = dict()\n                    dataset.data = datasetdict\n                    dataset._dataset_create_resources()\n                    datasets.append(dataset)\n                all_datasets += datasets\n                if (no_results < rows):\n                    break\n            else:\n                logger.debug(result)\n        if (all_datasets is None):\n            attempts += 1\n        elif check_duplicates:\n            names_list = [dataset['name'] for dataset in all_datasets]\n            names = set(names_list)\n            if (len(names_list) != len(names)):\n                all_datasets = None\n                attempts += 1\n    if ((attempts == cls.max_attempts) and (all_datasets is None)):\n        raise HDXError('Maximum attempts reached for getting all datasets!')\n    return all_datasets", "docstring": "Get all datasets in HDX\n\nArgs:\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\npage_size (int): Size of page to return. Defaults to 1000.\ncheck_duplicates (bool): Whether to check for duplicate datasets. Defaults to True.\n**kwargs: See below\nlimit (int): Number of rows to return. Defaults to all datasets (sys.maxsize)\noffset (int): Offset in the complete result for where the set of returned datasets should begin\n\nReturns:\nList[Dataset]: list of all datasets in HDX", "source": "codesearchnet"}
{"code": "def resource_action(client, action='', log_format='item: %(key)s', **kwargs):\n    result = None\n    try:\n        result = getattr(client, action)(**kwargs)\n        LOG.info(log_format, kwargs)\n    except botocore.exceptions.ClientError as error:\n        error_code = error.response['Error']['Code']\n        if (error_code == 'AccessDenied'):\n            LOG.fatal(error)\n            raise\n        elif (error_code == 'EntityAlreadyExists'):\n            LOG.info(' '.join(('Found', log_format)), kwargs)\n        else:\n            LOG.fatal(error)\n    return result", "docstring": "Call _action_ using boto3 _client_ with _kwargs_.\n\nThis is meant for _action_ methods that will create or implicitely prove a\ngiven Resource exists. The _log_failure_ flag is available for methods that\nshould always succeed, but will occasionally fail due to unknown AWS\nissues.\n\nArgs:\nclient (botocore.client.IAM): boto3 client object.\naction (str): Client method to call.\nlog_format (str): Generic log message format, 'Added' or 'Found' will\nbe prepended depending on the scenario.\nprefix (str): Prefix word to use in successful INFO message.\n**kwargs: Keyword arguments to pass to _action_ method.\n\nReturns:\ndict: boto3 response.", "source": "codesearchnet"}
{"code": "def get_soa_record(client, zone_id, zone_name):\n    \n\n    response = client.list_resource_record_sets(HostedZoneId=zone_id,\n                                                StartRecordName=zone_name,\n                                                StartRecordType=\"SOA\",\n                                                MaxItems=\"1\")\n    return SOARecord(response[\"ResourceRecordSets\"][0])", "docstring": "Gets the SOA record for zone_name from zone_id.\n\nArgs:\nclient (:class:`botocore.client.Route53`): The connection used to\ninteract with Route53's API.\nzone_id (string): The AWS Route53 zone id of the hosted zone to query.\nzone_name (string): The name of the DNS hosted zone to create.\n\nReturns:\n:class:`stacker.util.SOARecord`: An object representing the parsed SOA\nrecord returned from AWS Route53.", "source": "juraj-google-style"}
{"code": "def localopt(self, forcefield='mmff94', steps=500):\n    pbmol = pb.Molecule(self._obmol)\n    pbmol.localopt(forcefield=forcefield, steps=steps)\n    self._obmol = pbmol.OBMol", "docstring": "A wrapper to pybel's localopt method to optimize a Molecule.\n\nArgs:\nforcefield: Default is mmff94. Options are 'gaff', 'ghemical',\n'mmff94', 'mmff94s', and 'uff'.\nsteps: Default is 500.", "source": "codesearchnet"}
{"code": "def set_query_parameter(url, param_name, param_value):\n    \n    scheme, netloc, path, query_string, fragment = urlsplit(url)\n    query_params = parse_qs(query_string)\n\n    query_params[param_name] = [param_value]\n    new_query_string = urlencode(query_params, doseq=True)\n\n    return urlunsplit((scheme, netloc, path, new_query_string, fragment))", "docstring": "Given a URL, set or replace a query parameter and return the modified URL.\n\nArgs:\nurl: a given  URL\nparam_name: the parameter name to add\nparam_value: the parameter value\nReturns:\nURL with the added parameter", "source": "juraj-google-style"}
{"code": "def load_ui_type(uifile):\n    import pysideuic\n    import xml.etree.ElementTree as ElementTree\n    from cStringIO import StringIO\n    parsed = ElementTree.parse(uifile)\n    widget_class = parsed.find('widget').get('class')\n    form_class = parsed.find('class').text\n    with open(uifile, 'r') as f:\n        o = StringIO()\n        frame = {}\n        pysideuic.compileUi(f, o, indent=0)\n        pyc = compile(o.getvalue(), '<string>', 'exec')\n        (exec(pyc) in frame)\n        form_class = frame[('Ui_%s' % form_class)]\n        base_class = eval(('QtWidgets.%s' % widget_class))\n    return (form_class, base_class)", "docstring": "Pyside equivalent for the loadUiType function in PyQt.\n\nFrom the PyQt4 documentation:\nLoad a Qt Designer .ui file and return a tuple of the generated form\nclass and the Qt base class. These can then be used to create any\nnumber of instances of the user interface without having to parse the\n.ui file more than once.\n\nNote:\nPyside lacks the \"loadUiType\" command, so we have to convert the ui\nfile to py code in-memory first and then execute it in a special frame\nto retrieve the form_class.\n\nArgs:\nuifile (str): Absolute path to .ui file\n\n\nReturns:\ntuple: the generated form class, the Qt base class", "source": "codesearchnet"}
{"code": "def days_until(self, target_date_tensor):\n    return target_date_tensor.ordinal() - self._ordinals", "docstring": "Computes the number of days until the target dates.\n\nArgs:\ntarget_date_tensor: A DateTensor object broadcastable to the shape of\n\"self\".\n\nReturns:\nAn int32 tensor with numbers of days until the target dates.\n\n#### Example\n\n```python\ndates = tff.datetime.dates_from_tuples([(2020, 1, 25), (2020, 3, 2)])\ntarget = tff.datetime.dates_from_tuples([(2020, 3, 5)])\ndates.days_until(target) # [40, 3]\n\ntargets = tff.datetime.dates_from_tuples([(2020, 2, 5), (2020, 3, 5)])\ndates.days_until(targets)  # [11, 3]\n```", "source": "github-repos"}
{"code": "def floodlight_email(config, task: dict, day: str, alerts: dict[str, list[str, str, str, str, int, str]]) -> None:\n    for email, table in alerts.items():\n        t = EmailTemplate()\n        t.align('center')\n        t.section(True)\n        issues = sum((1 for row in table if row[5] != 'NORMAL'))\n        if issues > 0:\n            subject = '%d Floodlight Alerts For %s' % (issues, day)\n        else:\n            subject = 'All Floodlights Normal For %s' % day\n        t.header(subject)\n        t.paragraph('The following floodlights are being monitored.  A status of LOW or HIGH inidcates impressions have changed significantly for the day.  A status of NORMAL means impressions are close to the average for the past 7 days.')\n        t.table([{'name': 'Date', 'type': 'STRING'}, {'name': 'Floodlight', 'type': 'STRING'}, {'name': 'Activity Id', 'type': 'STRING'}, {'name': 'Activity', 'type': 'STRING'}, {'name': 'Impressions', 'type': 'INTEGER'}, {'name': 'Status', 'type': 'STRING'}], table)\n        t.paragraph('Your monitored floodlights and recipients are listed in the sheet below.')\n        t.button('Floodlight Monitoring Sheet', sheets_url(config, task['auth'], task['sheet']['sheet']), big=True)\n        t.section(False)\n        if config.verbose:\n            print('FLOODLIGHT MONITOR EMAIL ALERTS', email, len(table))\n        send_email(config, task['auth'], email, None, None, subject, t.get_text(), t.get_html())", "docstring": "Send an email to each alert group with status of all activities.\n\nThe email template will contain all activities for each email address specified in the input sheet.\n\nArgs:\nday - the latest day that was present in all combined reports, used for title of email.\nalerts - Each email in the sheet with a list of activities and statuses.\n\nReturns:\nNothing.", "source": "github-repos"}
{"code": "def get_config_parameter_multiline(config: ConfigParser,\n                                   section: str,\n                                   param: str,\n                                   default: List[str]) -> List[str]:\n    \n    try:\n        multiline = config.get(section, param)\n        lines = [x.strip() for x in multiline.splitlines()]\n        return [line for line in lines if line]\n    except (TypeError, ValueError, NoOptionError):\n        log.warning(\n            \"Configuration variable {} not found or improper in section [{}]; \"\n            \"using default of {!r}\", param, section, default)\n        return default", "docstring": "Get multi-line string parameter from ``configparser`` ``.INI`` file,\nas a list of strings (one per line, ignoring blank lines).\n\nArgs:\nconfig: :class:`ConfigParser` object\nsection: section name within config file\nparam: name of parameter within section\ndefault: default value\nReturns:\nparameter value, or default", "source": "juraj-google-style"}
{"code": "def upsert(self, insert_index, val, fn=None):\n    fn = (fn or (lambda current, passed: passed))\n    self._magnitude = 0\n    position = self.position_for_index(insert_index)\n    if ((position < len(self.elements)) and (self.elements[position] == insert_index)):\n        self.elements[(position + 1)] = fn(self.elements[(position + 1)], val)\n    else:\n        self.elements.insert(position, val)\n        self.elements.insert(position, insert_index)", "docstring": "Inserts or updates an existing index within the vector.\n\nArgs:\n- insert_index (int): The index at which the element should be\ninserted.\n- val (int|float): The value to be inserted into the vector.\n- fn (callable, optional): An optional callable taking two\narguments, the current value and the passed value to generate\nthe final inserted value at the position in case of collision.", "source": "codesearchnet"}
{"code": "def _to_snake_case(string):\n        \n        sub_string = r'\\1_\\2'\n        string = REGEX_CAMEL_FIRST.sub(sub_string, string)\n        return REGEX_CAMEL_SECOND.sub(sub_string, string).lower()", "docstring": "Return a snake cased version of the input string.\n\nArgs:\nstring (str): A camel cased string.\n\nReturns:\nstr: A snake cased string.", "source": "juraj-google-style"}
{"code": "def attention_lm_moe_small():\n    hparams = attention_lm_moe_base()\n    hparams.num_hidden_layers = 4\n    hparams.hidden_size = 512\n    hparams.filter_size = 2048\n    hparams.moe_num_experts = 128\n    hparams.moe_layers = '2'\n    return hparams", "docstring": "Cheap model for single-gpu training.\n\non lm1b_32k:\n~312M params\n1.6 steps/sec on  [GeForce GTX TITAN X]\nAfter 50K steps on 8 GPUs (synchronous):\neval_log_ppl_per_token = 3.31\n\nReturns:\nan hparams object.", "source": "codesearchnet"}
{"code": "def execute_edit(args, root_dir=None):\n    \n    \n    EDITOR = os.environ.get('EDITOR', 'vim')\n    \n    key = args['key']\n    status = command_factory('status')({}, root_dir=root_dir)\n\n    \n    if not isinstance(status['data'], str) and key in status['data']:\n        if status['data'][key]['status'] in ['queued', 'stashed']:\n            command = status['data'][key]['command']\n        else:\n            print(\"Entry is not 'queued' or 'stashed'\")\n            sys.exit(1)\n    else:\n        print('No entry with this key')\n        sys.exit(1)\n\n    with tempfile.NamedTemporaryFile(suffix=\".tmp\") as tf:\n        tf.write(command.encode('utf-8'))\n        tf.flush()\n        call([EDITOR, tf.name])\n\n        \n        \n        tf.seek(0)\n        edited_command = tf.read().decode('utf-8')\n\n    print_command_factory('edit')({\n        'key': key,\n        'command': edited_command,\n    }, root_dir=root_dir)", "docstring": "Edit a existing queue command in the daemon.\n\nArgs:\nargs['key'] int: The key of the queue entry to be edited\nroot_dir (string): The path to the root directory the daemon is running in.", "source": "juraj-google-style"}
{"code": "def _DepthwiseConv2dNativeBackpropInputGrad(op: ops.Operation, grad):\n    return [None, gen_nn_ops.depthwise_conv2d_native_backprop_filter(grad, array_ops.shape(op.inputs[1]), op.inputs[2], dilations=op.get_attr('dilations'), strides=op.get_attr('strides'), padding=op.get_attr('padding'), explicit_paddings=op.get_attr('explicit_paddings'), data_format=op.get_attr('data_format')), gen_nn_ops.depthwise_conv2d_native(grad, op.inputs[1], dilations=op.get_attr('dilations'), strides=op.get_attr('strides'), padding=op.get_attr('padding'), explicit_paddings=op.get_attr('explicit_paddings'), data_format=op.get_attr('data_format'))]", "docstring": "The derivatives for deconvolution.\n\nArgs:\nop: the Deconvolution op.\ngrad: the tensor representing the gradient w.r.t. the output\n\nReturns:\nthe gradients w.r.t. the input and the filter", "source": "github-repos"}
{"code": "def get_definition(self, stmt: Statement, sctx: SchemaContext) -> Tuple[(Statement, SchemaContext)]:\n    if (stmt.keyword == 'uses'):\n        kw = 'grouping'\n    elif (stmt.keyword == 'type'):\n        kw = 'typedef'\n    else:\n        raise ValueError(\"not a 'uses' or 'type' statement\")\n    (loc, did) = self.resolve_pname(stmt.argument, sctx.text_mid)\n    if (did == sctx.text_mid):\n        dstmt = stmt.get_definition(loc, kw)\n        if dstmt:\n            return (dstmt, sctx)\n    else:\n        dstmt = self.modules[did].statement.find1(kw, loc)\n        if dstmt:\n            return (dstmt, SchemaContext(sctx.schema_data, sctx.default_ns, did))\n    for sid in self.modules[did].submodules:\n        dstmt = self.modules[sid].statement.find1(kw, loc)\n        if dstmt:\n            return (dstmt, SchemaContext(sctx.schema_data, sctx.default_ns, sid))\n    raise DefinitionNotFound(kw, stmt.argument)", "docstring": "Find the statement defining a grouping or derived type.\n\nArgs:\nstmt: YANG \"uses\" or \"type\" statement.\nsctx: Schema context where the definition is used.\n\nReturns:\nA tuple consisting of the definition statement ('grouping' or\n'typedef') and schema context of the definition.\n\nRaises:\nValueError: If `stmt` is neither \"uses\" nor \"type\" statement.\nModuleNotRegistered: If `mid` is not registered in the data model.\nUnknownPrefix: If the prefix specified in the argument of `stmt`\nis not declared.\nDefinitionNotFound: If the corresponding definition is not found.", "source": "codesearchnet"}
{"code": "def get_windows_if_list(extended=False):\n    \n    \n    def _get_mac(x):\n        size = x[\"physical_address_length\"]\n        if size != 6:\n            return \"\"\n        data = bytearray(x[\"physical_address\"])\n        return str2mac(bytes(data)[:size])\n\n    def _get_ips(x):\n        unicast = x['first_unicast_address']\n        anycast = x['first_anycast_address']\n        multicast = x['first_multicast_address']\n\n        def _resolve_ips(y):\n            if not isinstance(y, list):\n                return []\n            ips = []\n            for ip in y:\n                addr = ip['address']['address'].contents\n                if addr.si_family == socket.AF_INET6:\n                    ip_key = \"Ipv6\"\n                    si_key = \"sin6_addr\"\n                else:\n                    ip_key = \"Ipv4\"\n                    si_key = \"sin_addr\"\n                data = getattr(addr, ip_key)\n                data = getattr(data, si_key)\n                data = bytes(bytearray(data.byte))\n                \n                if data:\n                    ips.append(inet_ntop(addr.si_family, data))\n            return ips\n\n        ips = []\n        ips.extend(_resolve_ips(unicast))\n        if extended:\n            ips.extend(_resolve_ips(anycast))\n            ips.extend(_resolve_ips(multicast))\n        return ips\n\n    if six.PY2:\n        _str_decode = lambda x: x.encode('utf8', errors='ignore')\n    else:\n        _str_decode = plain_str\n    return [\n        {\n            \"name\": _str_decode(x[\"friendly_name\"]),\n            \"win_index\": x[\"interface_index\"],\n            \"description\": _str_decode(x[\"description\"]),\n            \"guid\": _str_decode(x[\"adapter_name\"]),\n            \"mac\": _get_mac(x),\n            \"ipv4_metric\": 0 if WINDOWS_XP else x[\"ipv4_metric\"],\n            \"ipv6_metric\": 0 if WINDOWS_XP else x[\"ipv6_metric\"],\n            \"ips\": _get_ips(x)\n        } for x in GetAdaptersAddresses()\n    ]", "docstring": "Returns windows interfaces through GetAdaptersAddresses.\n\nparams:\n- extended: include anycast and multicast IPv6 (default False)", "source": "juraj-google-style"}
{"code": "def _print_reference(self, reference: message.Message) -> None:\n    set_oneof = reference.WhichOneof('reference')\n    if self.json_format == _FhirJsonFormat.PURE and set_oneof is not None and (set_oneof != 'uri'):\n        standardized_reference = copy.copy(reference)\n        new_uri = proto_utils.get_value_at_field(standardized_reference, 'uri')\n        proto_utils.set_value_at_field(new_uri, 'value', references.reference_to_string(reference))\n        self._print_message(standardized_reference)\n    else:\n        self._print_message(reference)", "docstring": "Standardizes and prints the provided reference.\n\nNote that \"standardization\" in the case of PURE FHIR JSON refers to\nun-typing the typed-reference prior to printing.\n\nArgs:\nreference: The reference to print.", "source": "github-repos"}
{"code": "def handle(self, connection_id, message_content):\n        \n        try:\n            request = self._request_proto()\n            request.ParseFromString(message_content)\n        except DecodeError:\n            LOGGER.info('Protobuf %s failed to deserialize', request)\n            return self._wrap_result(self._status.INTERNAL_ERROR)\n\n        try:\n            response = self._respond(request)\n        except _ResponseFailed as e:\n            response = e.status\n\n        return self._wrap_result(response)", "docstring": "Handles parsing incoming requests, and wrapping the final response.\n\nArgs:\nconnection_id (str): ZMQ identity sent over ZMQ socket\nmessage_content (bytes): Byte encoded request protobuf to be parsed\n\nReturns:\nHandlerResult: result to be sent in response back to client", "source": "juraj-google-style"}
{"code": "def _add_value_to_extension(msg: message.Message, extension: message.Message, is_choice_type: bool) -> None:\n    if is_choice_type:\n        oneofs = msg.DESCRIPTOR.oneofs\n        if not oneofs:\n            raise fhir_errors.InvalidFhirError(f'Choice type is missing a oneof: {msg.DESCRIPTOR.full_name}')\n        value_field_name = msg.WhichOneof(oneofs[0].name)\n        if value_field_name is None:\n            raise ValueError(f'Choice type has no value set: {msg.DESCRIPTOR.full_name}')\n        value_field = msg.DESCRIPTOR.fields_by_name[value_field_name]\n        _verify_field_is_proto_message_type(value_field)\n        _add_value_to_extension(proto_utils.get_value_at_field(msg, value_field), extension, False)\n    else:\n        value_field_mapping = _get_value_field_mapping_for_extension(extension)\n        value_field = value_field_mapping.get(msg.DESCRIPTOR.full_name)\n        if value_field is not None:\n            proto_utils.set_value_at_field(cast(Any, extension).value, cast(Any, value_field), msg)\n        elif annotation_utils.has_fhir_valueset_url(msg):\n            codes.copy_code(msg, cast(Any, extension).value.code)\n        elif fhir_types.is_type_or_profile_of_coding(msg):\n            codes.copy_coding(msg, cast(Any, extension).value.coding)\n        else:\n            _add_fields_to_extension(msg, extension)", "docstring": "Adds the fields from msg to a generic Extension.\n\nAttempts are first made to set the \"value\" field of the generic Extension\nbased on the type of field set on message. If this fails, checks are made\nagainst the generic Code and Coding types, and finally we fall back to adding\nthe message's fields as sub-extensions.\n\nArgs:\nmsg: The message whose values to add to extension.\nextension: The generic Extension to populate.\nis_choice_type: Whether or not the provided message represents a \"choice\"\ntype.", "source": "github-repos"}
{"code": "def make_time(h=0, m=0, s=0, ms=0, frames=None, fps=None):\n    if ((frames is None) and (fps is None)):\n        return times_to_ms(h, m, s, ms)\n    elif ((frames is not None) and (fps is not None)):\n        return frames_to_ms(frames, fps)\n    else:\n        raise ValueError('Both fps and frames must be specified')", "docstring": "Convert time to milliseconds.\n\nSee :func:`pysubs2.time.times_to_ms()`. When both frames and fps are specified,\n:func:`pysubs2.time.frames_to_ms()` is called instead.\n\nRaises:\nValueError: Invalid fps, or one of frames/fps is missing.\n\nExample:\n>>> make_time(s=1.5)\n1500\n>>> make_time(frames=50, fps=25)\n2000", "source": "codesearchnet"}
{"code": "def has_atomic_move(path):\n    try:\n        return _pywrap_file_io.HasAtomicMove(compat.path_to_bytes(path))\n    except errors.OpError:\n        return True", "docstring": "Checks whether the file system supports atomic moves.\n\nReturns whether or not the file system of the given path supports the atomic\nmove operation for a file or folder.  If atomic move is supported, it is\nrecommended to use a temp location for writing and then move to the final\nlocation.\n\nArgs:\npath: string, path to a file\n\nReturns:\nTrue, if the path is on a file system that supports atomic move\nFalse, if the file system does not support atomic move. In such cases\nwe need to be careful about using moves. In some cases it is safer\nnot to use temporary locations in this case.", "source": "github-repos"}
{"code": "def diff_text1(self, diffs):\n    \n    text = []\n    for (op, data) in diffs:\n      if op != self.DIFF_INSERT:\n        text.append(data)\n    return \"\".join(text)", "docstring": "Compute and return the source text (all equalities and deletions).\n\nArgs:\ndiffs: Array of diff tuples.\n\nReturns:\nSource text.", "source": "juraj-google-style"}
{"code": "def setup_engines(client=None):\n    \n    if not client:\n        try:\n            client = ipyparallel.Client()\n        except:\n            raise DistobClusterError(\n                u)\n    eids = client.ids\n    if not eids:\n        raise DistobClusterError(\n                u'No ipyparallel compute engines are available')\n    nengines = len(eids)\n    dv = client[eids]\n    dv.use_dill()\n    with dv.sync_imports(quiet=True):\n        import distob\n    \n    ars = []\n    for i in eids:\n        dv.targets = i\n        ars.append(dv.apply_async(_remote_setup_engine, i, nengines))\n    dv.wait(ars)\n    for ar in ars:\n        if not ar.successful():\n            raise ar.r\n    \n    if distob.engine is None:\n        distob.engine = ObjectHub(-1, client)", "docstring": "Prepare all iPython engines for distributed object processing.\n\nArgs:\nclient (ipyparallel.Client, optional): If None, will create a client\nusing the default ipyparallel profile.", "source": "juraj-google-style"}
{"code": "def get_atlas_per_gene_mutation_df(self, gene_id):\n        \n        \n        \n        \n        \n        \n        \n\n        g = self.reference_gempro.genes.get_by_id(gene_id)\n\n        single, fingerprint = g.protein.sequence_mutation_summary(alignment_type='seqalign')\n\n        structure_type_suffix = 'NA'\n        appender = []\n\n        for k, strains in single.items():\n            \n            to_append = {}\n            orig_res = k[0]\n            resnum = int(k[1])\n            mutated_res = k[2]\n            num_strains_mutated = len(strains)\n            strain_ids = [str(x.split(g.id + '_')[1]) for x in strains]\n            to_append['ref_residue'] = orig_res\n            to_append['ref_resnum'] = resnum\n            to_append['strain_residue'] = mutated_res\n            to_append['num_strains_mutated'] = num_strains_mutated\n            to_append['strains_mutated'] = ';'.join(strain_ids)\n            to_append['at_disulfide_bridge'] = False\n\n            \n            origres_props = ssbio.protein.sequence.properties.residues.residue_biochemical_definition(orig_res)\n            mutres_props = ssbio.protein.sequence.properties.residues.residue_biochemical_definition(mutated_res)\n            to_append['ref_residue_prop'] = origres_props\n            to_append['strain_residue_prop'] = mutres_props\n\n            \n            grantham_s, grantham_txt = ssbio.protein.sequence.properties.residues.grantham_score(orig_res, mutated_res)\n            to_append['grantham_score'] = grantham_s\n            to_append['grantham_annotation'] = grantham_txt\n\n            \n            to_append.update(g.protein.get_residue_annotations(seq_resnum=resnum, use_representatives=True))\n\n            \n            if g.protein.representative_structure:\n                if g.protein.representative_structure.is_experimental:\n                    to_append['structure_type'] = 'EXP'\n                else:\n                    to_append['structure_type'] = 'HOM'\n\n                \n                repchain = g.protein.representative_chain\n                repchain_annotations = g.protein.representative_structure.chains.get_by_id(repchain).seq_record.annotations\n                if 'SSBOND-biopython' in repchain_annotations:\n                    structure_resnum = g.protein.map_seqprop_resnums_to_structprop_resnums(resnums=resnum,\n                                                                                           use_representatives=True)\n                    if resnum in structure_resnum:\n                        ssbonds = repchain_annotations['SSBOND-biopython']\n                        ssbonds_res = []\n                        for x in ssbonds:\n                            ssbonds_res.append(x[0])\n                            ssbonds_res.append(x[1])\n\n                        if structure_resnum in ssbonds_res:\n                            to_append['at_disulfide_bridge'] = True\n\n            appender.append(to_append)\n\n        if not appender:\n            return pd.DataFrame()\n\n        cols = ['ref_residue', 'ref_resnum', 'strain_residue', 'num_strains_mutated', 'strains_mutated',\n                'ref_residue_prop', 'strain_residue_prop', 'grantham_score', 'grantham_annotation',\n                'at_disulfide_bridge',\n                'seq_SS-sspro', 'seq_SS-sspro8', 'seq_RSA-accpro', 'seq_RSA-accpro20', 'seq_TM-tmhmm',\n                'struct_SS-dssp', 'struct_RSA-dssp', 'struct_ASA-dssp',\n                'struct_CA_DEPTH-msms', 'struct_RES_DEPTH-msms',\n                'struct_PHI-dssp', 'struct_PSI-dssp',\n                'struct_resnum', 'struct_residue'\n                'strains_mutated']\n\n        df_gene_summary = pd.DataFrame.from_records(appender, columns=cols)\n\n        \n        df_gene_summary.dropna(axis=1, how='all', inplace=True)\n\n        df_gene_summary.sort_values(by='ref_resnum', inplace=True)\n        df_gene_summary = df_gene_summary.set_index('ref_resnum')\n        return df_gene_summary", "docstring": "Create a single data frame which summarizes a gene and its mutations.\n\nArgs:\ngene_id (str): Gene ID in the base model\n\nReturns:\nDataFrame: Pandas DataFrame of the results", "source": "juraj-google-style"}
{"code": "def loop_until_timeout_or_true(timeout_s, function, sleep_s=1):\n    return loop_until_timeout_or_valid(timeout_s, function, (lambda x: x), sleep_s)", "docstring": "Loops until the specified function returns True or a timeout is reached.\n\nNote: The function may return anything which evaluates to implicit True.  This\nfunction will loop calling it as long as it continues to return something\nwhich evaluates to False.  We ensure this method is called at least once\nregardless of timeout.\n\nArgs:\ntimeout_s: The number of seconds to wait until a timeout condition is\nreached. As a convenience, this accepts None to mean never timeout.  Can\nalso be passed a PolledTimeout object instead of an integer.\nfunction: The function to call each iteration.\nsleep_s: The number of seconds to wait after calling the function.\n\nReturns:\nWhatever the function returned last.", "source": "codesearchnet"}
{"code": "def import_laid_out_tensor(mesh, laid_out_tensor, shape, name=None):\n    return ImportLaidOutTensorOperation(mesh, laid_out_tensor, convert_to_shape(shape), name=name).outputs[0]", "docstring": "Import a laid_out_tensor.\n\nFor expert users.\nThe input must be laid out appropriately given the eventual MeshImpl,\nand layout.\n\nArgs:\nmesh: a Mesh\nlaid_out_tensor: a LaidOutTensor\nshape: a mtf.Shape\nname: an optional string\n\nReturns:\na mtf.Tensor", "source": "codesearchnet"}
{"code": "def content_ratings(self, **kwargs):\n    path = self._get_id_path('content_ratings')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the content ratings for a TV Series.\n\nArgs:\nlanguage: (optional) ISO 639 code.\nappend_to_response: (optional) Comma separated, any collection\nmethod.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def create_output_excerpts(self, test_info):\n    dest_path = test_info.output_path\n    utils.create_dir(dest_path)\n    filename = self._ad.generate_filename(self.OUTPUT_FILE_TYPE, test_info, 'txt')\n    excerpt_file_path = os.path.join(dest_path, filename)\n    with open(excerpt_file_path, 'w', encoding='utf-8', errors='replace', newline='') as out:\n        while self._adb_logcat_file_obj:\n            line = self._adb_logcat_file_obj.readline()\n            if not line:\n                break\n            out.write(line)\n    self._ad.log.debug('logcat excerpt created at: %s', excerpt_file_path)\n    return [excerpt_file_path]", "docstring": "Convenient method for creating excerpts of adb logcat.\n\nThis copies logcat lines from self.adb_logcat_file_path to an excerpt\nfile, starting from the location where the previous excerpt ended.\n\nCall this method at the end of: `setup_class`, `teardown_test`, and\n`teardown_class`.\n\nArgs:\ntest_info: `self.current_test_info` in a Mobly test.\n\nReturns:\nList of strings, the absolute paths to excerpt files.", "source": "github-repos"}
{"code": "def set(self, name, value):\n        \n        if name not in self._options:\n\n            self.register(name, self._generator())\n\n        return self._options[name].__set__(self, value)", "docstring": "Set an option value.\n\nArgs:\nname (str): The name of the option.\nvalue: The value to set the option to.\n\nRaises:\nTypeError: If the value is not a string or appropriate native type.\nValueError: If the value is a string but cannot be coerced.\n\nIf the name is not registered a new option will be created using the\noption generator.", "source": "juraj-google-style"}
{"code": "def assign_sub(self, delta, use_locking=False, name=None, read_value=True):\n    raise NotImplementedError", "docstring": "Subtracts a value from this variable.\n\nThis is essentially a shortcut for `assign_sub(self, delta)`.\n\nArgs:\ndelta: A `Tensor`. The value to subtract from this variable.\nuse_locking: If `True`, use locking during the operation.\nname: The name of the operation to be created\nread_value: if True, will return something which evaluates to the new\nvalue of the variable; if False will return the assign op.\n\nReturns:\nThe updated variable. If `read_value` is false, instead returns None in\nEager mode and the assign op in graph mode.", "source": "github-repos"}
{"code": "def all_pairs(sets, similarity_func_name='jaccard', similarity_threshold=0.5):\n    if ((not isinstance(sets, list)) or (len(sets) == 0)):\n        raise ValueError('Input parameter sets must be a non-empty list.')\n    if (similarity_func_name not in _similarity_funcs):\n        raise ValueError('Similarity function {} is not supported.'.format(similarity_func_name))\n    if ((similarity_threshold < 0) or (similarity_threshold > 1.0)):\n        raise ValueError('Similarity threshold must be in the range [0, 1].')\n    if (similarity_func_name not in _symmetric_similarity_funcs):\n        raise ValueError('The similarity function must be symmetric ({})'.format(', '.join(_symmetric_similarity_funcs)))\n    similarity_func = _similarity_funcs[similarity_func_name]\n    overlap_threshold_func = _overlap_threshold_funcs[similarity_func_name]\n    position_filter_func = _position_filter_funcs[similarity_func_name]\n    (sets, _) = _frequency_order_transform(sets)\n    index = defaultdict(list)\n    logging.debug('Find all pairs with similarities >= {}...'.format(similarity_threshold))\n    count = 0\n    for x1 in np.argsort([len(s) for s in sets]):\n        s1 = sets[x1]\n        t = overlap_threshold_func(len(s1), similarity_threshold)\n        prefix_size = ((len(s1) - t) + 1)\n        prefix = s1[:prefix_size]\n        candidates = set([x2 for (p1, token) in enumerate(prefix) for (x2, p2) in index[token] if position_filter_func(s1, sets[x2], p1, p2, similarity_threshold)])\n        for x2 in candidates:\n            s2 = sets[x2]\n            sim = similarity_func(s1, s2)\n            if (sim < similarity_threshold):\n                continue\n            (yield tuple((sorted([x1, x2], reverse=True) + [sim])))\n            count += 1\n        for (j, token) in enumerate(prefix):\n            index[token].append((x1, j))\n    logging.debug('{} pairs found.'.format(count))", "docstring": "Find all pairs of sets with similarity greater than a threshold.\nThis is an implementation of the All-Pair-Binary algorithm in the paper\n\"Scaling Up All Pairs Similarity Search\" by Bayardo et al., with\nposition filter enhancement.\n\nArgs:\nsets (list): a list of sets, each entry is an iterable representing a\nset.\nsimilarity_func_name (str): the name of the similarity function used;\nthis function currently supports `\"jaccard\"` and `\"cosine\"`.\nsimilarity_threshold (float): the threshold used, must be a float\nbetween 0 and 1.0.\n\nReturns:\npairs (Iterator[tuple]): an iterator of tuples `(x, y, similarity)`\nwhere `x` and `y` are the indices of sets in the input list `sets`.", "source": "codesearchnet"}
{"code": "def size(x):\n    if any_symbolic_tensors((x,)):\n        return Size().symbolic_call(x)\n    return backend.numpy.size(x)", "docstring": "Return the number of elements in a tensor.\n\nArgs:\nx: Input tensor.\n\nReturns:\nNumber of elements in `x`.", "source": "github-repos"}
{"code": "def GetSubkeyByName(self, name):\n    \n    pyregf_key = self._pyregf_key.get_sub_key_by_name(name)\n    if not pyregf_key:\n      return None\n\n    key_path = key_paths.JoinKeyPath([self._key_path, pyregf_key.name])\n    return REGFWinRegistryKey(pyregf_key, key_path=key_path)", "docstring": "Retrieves a subkey by name.\n\nArgs:\nname (str): name of the subkey.\n\nReturns:\nWinRegistryKey: Windows Registry subkey or None if not found.", "source": "juraj-google-style"}
{"code": "def topological_sort_operations(operations):\n    in_degrees = collections.OrderedDict()\n    for op in reversed(operations):\n        if op not in in_degrees:\n            in_degrees[op] = 0\n        for next_op in reversed(_op_dependencies(op)):\n            in_degrees[next_op] = in_degrees.get(next_op, 0) + 1\n    nexts = []\n    for op, in_degree in in_degrees.items():\n        if in_degree == 0:\n            nexts.append(op)\n    order = {}\n    next_order = 0\n    while nexts:\n        op, nexts = (nexts[0], nexts[1:])\n        order[op] = next_order\n        next_order += 1\n        for next_op in reversed(_op_dependencies(op)):\n            in_degrees[next_op] -= 1\n            if in_degrees[next_op] == 0:\n                nexts.append(next_op)\n    assert len(order) == len(operations)\n    return order", "docstring": "Topological sorts a list of operations.\n\nThis does a topological sort of the operations in a graph. The edges include\nboth data dependencies and control dependencies. Note that the edge goes from\nan operation to its dependencies.\n\nThe sort is intentionally unstable, reversing orders of operations and\ndependencies on ties.\n\nArgs:\noperations: a list of tf.Operation in the same graph.\n\nReturns:\nA map from a tf.Operation to its topological order.", "source": "github-repos"}
{"code": "def _record_local(self, node, op, name, typ, orig_val=None, final=None):\n    if orig_val:\n        self.current_local_ops.append(LocalOp(name, LocalOp.Op.ASSIGN))\n    if typ:\n        self.current_local_ops.append(LocalOp(name, LocalOp.Op.ANNOTATE))\n    self._update_annotations_dict(node, op, name, typ, orig_val, self.current_annotated_locals, final=final)", "docstring": "Record a type annotation on a local variable.\n\nThis method records three types of local operations:\n- An annotation, e.g., `x: int`. In this case, `typ` is PyTDClass(int) and\n`orig_val` is None.\n- An assignment, e.g., `x = 0`. In this case, `typ` is None and `orig_val`\nis Instance(int).\n- An annotated assignment, e.g., `x: int = None`. In this case, `typ` is\nPyTDClass(int) and `orig_val` is Instance(None).\n\nArgs:\nnode: The current node.\nop: The current opcode.\nname: The variable name.\ntyp: The annotation.\norig_val: The original value, if any.\nfinal: Whether the annotation is tagged Final (None to preserve any\nexisting Final tag when updating an existing annotation).", "source": "github-repos"}
{"code": "def queryize(terms, exclude_screen_name=None):\n    \n    ors = ' OR '.join('\"{}\"'.format(x) for x in terms if not x.startswith('-'))\n    nots = ' '.join('-\"{}\"'.format(x[1:]) for x in terms if x.startswith('-'))\n    sn = \"-from:{}\".format(exclude_screen_name) if exclude_screen_name else ''\n    return ' '.join((ors, nots, sn))", "docstring": "Create query from list of terms, using OR\nbut intelligently excluding terms beginning with '-' (Twitter's NOT operator).\nOptionally add -from:exclude_screen_name.\n\n>>> helpers.queryize(['apple', 'orange', '-peach'])\nu'apple OR orange -peach'\n\nArgs:\nterms (list): Search terms.\nexclude_screen_name (str): A single screen name to exclude from the search.\n\nReturns:\nA string ready to be passed to tweepy.API.search", "source": "juraj-google-style"}
{"code": "def addSearchers(self, *searchers):\n        \n        self._searchers.extend(searchers)\n\n        debug.logger & debug.flagCompiler and debug.logger(\n            'current compiled MIBs location(s): %s' % ', '.join([str(x) for x in self._searchers]))\n\n        return self", "docstring": "Add more transformed MIBs repositories.\n\nMibCompiler.compile will invoke each of configured searcher objects\nin order of their addition asking each if already transformed MIB\nmodule already exists and is more recent than specified.\n\nArgs:\nsearchers: searcher object(s)\n\nReturns:\nreference to itself (can be used for call chaining)", "source": "juraj-google-style"}
{"code": "def get(self,identity,params=None, headers=None):\n        \n        path = self._sub_url_params('/creditor_bank_accounts/:identity', {\n          \n            'identity': identity,\n          })\n        \n\n        response = self._perform_request('GET', path, params, headers,\n                                         retry_failures=True)\n        return self._resource_for(response)", "docstring": "Get a single creditor bank account.\n\nRetrieves the details of an existing creditor bank account.\n\nArgs:\nidentity (string): Unique identifier, beginning with \"BA\".\nparams (dict, optional): Query string parameters.\n\nReturns:\nListResponse of CreditorBankAccount instances", "source": "juraj-google-style"}
{"code": "def stage_tc_indicator_entity(self, indicator_data):\n    path = '@.{value: summary, '\n    path += 'type: type, '\n    path += 'ownerName: ownerName, '\n    path += 'confidence: confidence || `0`, '\n    path += 'rating: rating || `0`}'\n    return self.path_data(indicator_data, path)", "docstring": "Convert JSON data to TCEntity.\n\nArgs:\nindicator_data (str): [description]\n\nReturns:\n[type]: [description]", "source": "codesearchnet"}
{"code": "def correct_structure(self, atol=1e-08):\n    return np.allclose(self.structure.lattice.matrix, self.prim.lattice.matrix, atol=atol)", "docstring": "Determine if the structure matches the standard primitive structure.\n\nThe standard primitive will be different between seekpath and pymatgen\nhigh-symmetry paths, but this is handled by the specific subclasses.\n\nArgs:\natol (:obj:`float`, optional): Absolute tolerance used to compare\nthe input structure with the primitive standard structure.\n\nReturns:\nbool: ``True`` if the structure is the same as the standard\nprimitive, otherwise ``False``.", "source": "codesearchnet"}
{"code": "def _OpenParentFile(self, file_system, path_spec, vhdi_file):\n    location = getattr(path_spec, 'location', None)\n    if (not location):\n        raise errors.PathSpecError('Unsupported path specification without location.')\n    location_path_segments = file_system.SplitPath(location)\n    parent_filename = vhdi_file.parent_filename\n    (_, _, parent_filename) = parent_filename.rpartition('\\\\')\n    location_path_segments.pop()\n    location_path_segments.append(parent_filename)\n    parent_file_location = file_system.JoinPath(location_path_segments)\n    kwargs = path_spec_factory.Factory.GetProperties(path_spec)\n    kwargs['location'] = parent_file_location\n    if (path_spec.parent is not None):\n        kwargs['parent'] = path_spec.parent\n    parent_file_path_spec = path_spec_factory.Factory.NewPathSpec(path_spec.type_indicator, **kwargs)\n    if (not file_system.FileEntryExistsByPathSpec(parent_file_path_spec)):\n        return\n    file_object = resolver.Resolver.OpenFileObject(parent_file_path_spec, resolver_context=self._resolver_context)\n    vhdi_parent_file = pyvhdi.file()\n    vhdi_parent_file.open_file_object(file_object)\n    if vhdi_parent_file.parent_identifier:\n        self._OpenParentFile(file_system, parent_file_path_spec, vhdi_parent_file)\n    vhdi_file.set_parent(vhdi_parent_file)\n    self._parent_vhdi_files.append(vhdi_parent_file)\n    self._sub_file_objects.append(file_object)", "docstring": "Opens the parent file.\n\nArgs:\nfile_system (FileSystem): file system of the VHDI file.\npath_spec (PathSpec): path specification of the VHDI file.\nvhdi_file (pyvhdi.file): VHDI file.\n\nRaises:\nPathSpecError: if the path specification is incorrect.", "source": "codesearchnet"}
{"code": "def update_mapping(mapping: Dict[ops.Qid, LogicalIndex],\n                   operations: ops.OP_TREE\n                   ) -> None:\n    \n    for op in ops.flatten_op_tree(operations):\n        if (isinstance(op, ops.GateOperation) and\n            isinstance(op.gate, PermutationGate)):\n            op.gate.update_mapping(mapping, op.qubits)", "docstring": "Updates a mapping (in place) from qubits to logical indices according to\na set of permutation gates. Any gates other than permutation gates are\nignored.\n\nArgs:\nmapping: The mapping to update.\noperations: The operations to update according to.", "source": "juraj-google-style"}
{"code": "def chat(self, id):\n    json = self.skype.conn('GET', '{0}/users/ME/conversations/{1}'.format(self.skype.conn.msgsHost, id), auth=SkypeConnection.Auth.RegToken, params={'view': 'msnp24Equivalent'}).json()\n    cls = SkypeSingleChat\n    if ('threadProperties' in json):\n        info = self.skype.conn('GET', '{0}/threads/{1}'.format(self.skype.conn.msgsHost, json.get('id')), auth=SkypeConnection.Auth.RegToken, params={'view': 'msnp24Equivalent'}).json()\n        json.update(info)\n        cls = SkypeGroupChat\n    return self.merge(cls.fromRaw(self.skype, json))", "docstring": "Get a single conversation by identifier.\n\nArgs:\nid (str): single or group chat identifier", "source": "codesearchnet"}
{"code": "def set_vrf(self, name, vrf, default=False, disable=False):\n    commands = [('interface %s' % name)]\n    commands.append(self.command_builder('vrf forwarding', vrf, default=default, disable=disable))\n    return self.configure(commands)", "docstring": "Applies a VRF to the interface\n\nNote: VRF being applied to interface must already exist in switch\nconfig. Ethernet port must be in routed mode. This functionality\ncan also be handled in the VRF api.\n\nArgs:\nname (str): The interface identifier.  It must be a full\ninterface name (ie Ethernet, not Et)\nvrf (str): The vrf name to be applied to the interface\ndefault (bool): Specifies the default value for VRF\ndisable (bool): Specifies to disable VRF\n\nReturns:\nTrue if the operation succeeds otherwise False is returned", "source": "codesearchnet"}
{"code": "def generate(self, model, outfolder, *, exclude=None):\n        \n        with pythonic_names():\n            super().generate(model, outfolder)\n\n            check_dependency = self.with_dependencies and model.eResource\n            if check_dependency:\n                if exclude is None:\n                    exclude = set()\n                resource = model.eResource\n                \n                exclude.add(resource)\n                rset = resource.resource_set\n                direct_resources = {r for r in rset.resources.values() if r not in exclude}\n                for resource in direct_resources:\n                    self.generate(resource.contents[0], outfolder, exclude=exclude)", "docstring": "Generate model code.\n\nArgs:\nmodel: The meta-model to generate code for.\noutfolder: Path to the directoty that will contain the generated code.\nexclude: List of referenced resources for which code was already generated\n(to prevent regeneration).", "source": "juraj-google-style"}
{"code": "def save_to_text_file(monsoon_data, file_path):\n    if (not monsoon_data):\n        raise MonsoonError('Attempting to write empty Monsoon data to file, abort')\n    utils.create_dir(os.path.dirname(file_path))\n    with io.open(file_path, 'w', encoding='utf-8') as f:\n        for md in monsoon_data:\n            f.write(str(md))\n            f.write(MonsoonData.delimiter)", "docstring": "Save multiple MonsoonData objects to a text file.\n\nArgs:\nmonsoon_data: A list of MonsoonData objects to write to a text\nfile.\nfile_path: The full path of the file to save to, including the file\nname.", "source": "codesearchnet"}
{"code": "def should_stop(self):\n    return self._coord.should_stop()", "docstring": "Check if the coordinator was told to stop.\n\nSee `Coordinator.should_stop()`.\n\nReturns:\nTrue if the coordinator was told to stop, False otherwise.", "source": "github-repos"}
{"code": "def VerifyRow(self, parser_mediator, row):\n    \n    if len(row) < self.MIN_COLUMNS:\n      return False\n\n    \n    \n    try:\n      timestamp = self._ConvertToTimestamp(row['date'], row['time'])\n    except (ValueError, TypeError):\n      return False\n\n    if timestamp is None:\n      return False\n\n    \n    try:\n      action = int(row['action'], 10)\n    except (ValueError, TypeError):\n      return False\n\n    if action not in formatter.SCAN_RESULTS:\n      return False\n    return True", "docstring": "Verifies if a line of the file is in the expected format.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrow (dict[str, str]): fields of a single row, as specified in COLUMNS.\n\nReturns:\nbool: True if this is the correct parser, False otherwise.", "source": "juraj-google-style"}
{"code": "def from_monitoring_infos(monitoring_info_list, user_metrics_only=False):\n    counters = {}\n    distributions = {}\n    gauges = {}\n    string_sets = {}\n    bounded_tries = {}\n    for mi in monitoring_info_list:\n        if user_metrics_only and (not monitoring_infos.is_user_monitoring_info(mi)):\n            continue\n        try:\n            key = _create_metric_key(mi)\n        except ValueError as e:\n            _LOGGER.debug(str(e))\n            continue\n        metric_result = monitoring_infos.extract_metric_result_map_value(mi)\n        if monitoring_infos.is_counter(mi):\n            counters[key] = metric_result\n        elif monitoring_infos.is_distribution(mi):\n            distributions[key] = metric_result\n        elif monitoring_infos.is_gauge(mi):\n            gauges[key] = metric_result\n        elif monitoring_infos.is_string_set(mi):\n            string_sets[key] = metric_result\n        elif monitoring_infos.is_bounded_trie(mi):\n            bounded_tries[key] = metric_result\n    return (counters, distributions, gauges, string_sets, bounded_tries)", "docstring": "Groups MonitoringInfo objects into counters, distributions, gauges and\nstring sets\n\nArgs:\nmonitoring_info_list: An iterable of MonitoringInfo objects.\nuser_metrics_only: If true, includes user metrics only.\nReturns:\nA tuple containing three dictionaries: counters, distributions, gauges and\nstring set, respectively. Each dictionary contains (MetricKey, metric\nresult) pairs.", "source": "github-repos"}
{"code": "def rank(self, **kwargs):\n    axis = kwargs.get('axis', 0)\n    numeric_only = (True if axis else kwargs.get('numeric_only', False))\n    func = self._prepare_method(pandas.DataFrame.rank, **kwargs)\n    new_data = self._map_across_full_axis(axis, func)\n    if numeric_only:\n        new_columns = self.compute_index(1, new_data, True)\n    else:\n        new_columns = self.columns\n    new_dtypes = pandas.Series([np.float64 for _ in new_columns], index=new_columns)\n    return self.__constructor__(new_data, self.index, new_columns, new_dtypes)", "docstring": "Computes numerical rank along axis. Equal values are set to the average.\n\nReturns:\nDataManager containing the ranks of the values along an axis.", "source": "codesearchnet"}
{"code": "def TryConsume(self, token):\n    if (self.token == token):\n        self.NextToken()\n        return True\n    return False", "docstring": "Tries to consume a given piece of text.\n\nArgs:\ntoken: Text to consume.\n\nReturns:\nTrue iff the text was consumed.", "source": "codesearchnet"}
{"code": "def __recognize_list(self, node: yaml.Node, expected_type: Type) -> RecResult:\n    logger.debug('Recognizing as a list')\n    if (not isinstance(node, yaml.SequenceNode)):\n        message = '{}{}Expected a list here.'.format(node.start_mark, os.linesep)\n        return ([], message)\n    item_type = generic_type_args(expected_type)[0]\n    for item in node.value:\n        (recognized_types, message) = self.recognize(item, item_type)\n        if (len(recognized_types) == 0):\n            return ([], message)\n        if (len(recognized_types) > 1):\n            recognized_types = [List[t] for t in recognized_types]\n            return (recognized_types, message)\n    return ([expected_type], '')", "docstring": "Recognize a node that we expect to be a list of some kind.\n\nArgs:\nnode: The node to recognize.\nexpected_type: List[...something...]\n\nReturns\nexpected_type and the empty string if it was recognized,\n[] and an error message otherwise.", "source": "codesearchnet"}
{"code": "def __init__(self, output_path):\n    self._output_path = output_path\n    self._profile = cProfile.Profile() if self._output_path else None", "docstring": "Initialize.\n\nArgs:\noutput_path: A pathname for the profiler output.  An empty string\nindicates that no profiling should be done.", "source": "github-repos"}
{"code": "def _to_numpy(a):\n    if isinstance(a, ops.EagerTensor):\n        return a.numpy()\n    if isinstance(a, tensor.Tensor):\n        sess = ops.get_default_session()\n        return sess.run(a)\n    if isinstance(a, indexed_slices.IndexedSlicesValue):\n        arr = np.zeros(a.dense_shape)\n        assert len(a.values) == len(a.indices), 'IndexedSlicesValue has %s value slices but %s indices\\n%s' % (a.values, a.indices, a)\n        for values_slice, index in zip(a.values, a.indices):\n            assert 0 <= index < len(arr), 'IndexedSlicesValue has invalid index %s\\n%s' % (index, a)\n            arr[index] += values_slice\n        return arr\n    return a", "docstring": "Converts Tensors, EagerTensors, and IndexedSlicesValue to numpy arrays.\n\nArgs:\na: any value.\n\nReturns:\nIf a is EagerTensor or Tensor, returns the evaluation of a by calling\nnumpy() or run(). If a is IndexedSlicesValue, constructs the corresponding\ndense numpy array. Otherwise returns a unchanged.", "source": "github-repos"}
{"code": "def StartProfiling(self, configuration, identifier):\n    \n    if not configuration:\n      return\n\n    if configuration.HaveProfileTasks():\n      self._tasks_profiler = profilers.TasksProfiler(identifier, configuration)\n      self._tasks_profiler.Start()", "docstring": "Starts profiling.\n\nArgs:\nconfiguration (ProfilingConfiguration): profiling configuration.\nidentifier (str): identifier of the profiling session used to create\nthe sample filename.", "source": "juraj-google-style"}
{"code": "def get_geno_marker(self, marker, return_index=False):\n        \n        if self._mode != \"r\":\n            raise UnsupportedOperation(\"not available in 'w' mode\")\n\n        \n        if marker not in self._bim.index:\n            raise ValueError(\"{}: marker not in BIM\".format(marker))\n\n        \n        seek_index = self._bim.loc[marker, \"i\"]\n        self.seek(seek_index)\n\n        if return_index:\n            return self._read_current_marker(), seek_index\n        return self._read_current_marker()", "docstring": "Gets the genotypes for a given marker.\n\nArgs:\nmarker (str): The name of the marker.\nreturn_index (bool): Wether to return the marker's index or not.\n\nReturns:\nnumpy.ndarray: The genotypes of the marker (additive format).", "source": "juraj-google-style"}
{"code": "def get_void_volume_surfarea(structure, rad_dict=None, chan_rad=0.3, probe_rad=0.1):\n    with ScratchDir('.'):\n        name = 'temp_zeo'\n        zeo_inp_filename = (name + '.cssr')\n        ZeoCssr(structure).write_file(zeo_inp_filename)\n        rad_file = None\n        if rad_dict:\n            rad_file = (name + '.rad')\n            with open(rad_file, 'w') as fp:\n                for el in rad_dict.keys():\n                    fp.write('{0}     {1}'.format(el, rad_dict[el]))\n        atmnet = AtomNetwork.read_from_CSSR(zeo_inp_filename, True, rad_file)\n        vol_str = volume(atmnet, 0.3, probe_rad, 10000)\n        sa_str = surface_area(atmnet, 0.3, probe_rad, 10000)\n        vol = None\n        sa = None\n        for line in vol_str.split('\\n'):\n            if ('Number_of_pockets' in line):\n                fields = line.split()\n                if (float(fields[1]) > 1):\n                    vol = (- 1.0)\n                    break\n                if (float(fields[1]) == 0):\n                    vol = (- 1.0)\n                    break\n                vol = float(fields[3])\n        for line in sa_str.split('\\n'):\n            if ('Number_of_pockets' in line):\n                fields = line.split()\n                if (float(fields[1]) > 1):\n                    sa = (- 1.0)\n                    break\n                if (float(fields[1]) == 0):\n                    sa = (- 1.0)\n                    break\n                sa = float(fields[3])\n    if ((not vol) or (not sa)):\n        raise ValueError('Error in zeo++ output stream')\n    return (vol, sa)", "docstring": "Computes the volume and surface area of isolated void using Zeo++.\nUseful to compute the volume and surface area of vacant site.\n\nArgs:\nstructure: pymatgen Structure containing vacancy\nrad_dict(optional): Dictionary with short name of elements and their\nradii.\nchan_rad(optional): Minimum channel Radius.\nprobe_rad(optional): Probe radius for Monte Carlo sampling.\n\nReturns:\nvolume: floating number representing the volume of void", "source": "codesearchnet"}
{"code": "def equals(self, actual_seq):\n    \n\n    try:\n      expected = dict([(element, None) for element in self._expected_seq])\n      actual = dict([(element, None) for element in actual_seq])\n    except TypeError:\n      \n      expected = list(self._expected_seq)\n      actual = list(actual_seq)\n      expected.sort()\n      actual.sort()\n    return expected == actual", "docstring": "Check to see whether actual_seq has same elements as expected_seq.\n\nArgs:\nactual_seq: sequence\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def call(self, inputs):\n    net = self.encoder_net(tf.cast(inputs, tf.float32))\n    return ed.MultivariateNormalDiag(loc=net[(..., :self.latent_size)], scale_diag=tf.nn.softplus(net[(..., self.latent_size:)]), name='latent_code_posterior')", "docstring": "Runs the model forward to return a stochastic encoding.\n\nArgs:\ninputs: Tensor of shape [1, num_productions, num_production_rules]. It is\na sequence of productions of length `num_productions`. Each production\nis a one-hot vector of length `num_production_rules`: it determines\nwhich production rule the production corresponds to.\n\nReturns:\nlatent_code_posterior: A random variable capturing a sample from the\nvariational distribution, of shape [1, self.latent_size].", "source": "codesearchnet"}
{"code": "def broadcast_change():\n    (_, res) = win32gui.SendMessageTimeout(win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE, 0, 0, win32con.SMTO_ABORTIFHUNG, 5000)\n    return (not bool(res))", "docstring": "Refresh the windows environment.\n\n.. note::\nThis will only effect new processes and windows. Services will not see\nthe change until the system restarts.\n\nReturns:\nbool: True if successful, otherwise False\n\nUsage:\n\n.. code-block:: python\n\nimport salt.utils.win_reg\nwinreg.broadcast_change()", "source": "codesearchnet"}
{"code": "def s(self, data, errors='strict'):\n        \n        try:\n            if data is None or isinstance(data, (int, list, dict)):\n                pass  \n            elif isinstance(data, unicode):\n                try:\n                    data.decode('utf-8')\n                except UnicodeEncodeError:  \n                    \n                    data = str(data.encode('utf-8').strip(), errors=errors)\n                    self.log.warning(u'Encoding poorly encoded string ({})'.format(data))\n                except AttributeError:\n                    pass  \n            else:\n                data = str(data, 'utf-8', errors=errors)  \n        except NameError:\n            pass  \n        return data", "docstring": "Decode value using correct Python 2/3 method.\n\nThis method is intended to replace the :py:meth:`~tcex.tcex.TcEx.to_string` method with\nbetter logic to handle poorly encoded unicode data in Python2 and still work in Python3.\n\nArgs:\ndata (any): Data to ve validated and (de)encoded\nerrors (string): What method to use when dealing with errors.\n\nReturns:\n(string): Return decoded data", "source": "juraj-google-style"}
{"code": "def __init__(self, file_entry, bytes_per_sector):\n    \n    super(TSKVolume, self).__init__(file_entry.name)\n    self._file_entry = file_entry\n    self._bytes_per_sector = bytes_per_sector", "docstring": "Initializes a volume.\n\nArgs:\nfile_entry (TSKPartitionFileEntry): a TSK partition file entry.\nbytes_per_sector (int): number of bytes per sector.", "source": "juraj-google-style"}
{"code": "def connect_to(self, vertex, weight=1):\n    for edge in self.edges_out:\n        if (vertex == edge.vertex_in):\n            return edge\n    return Edge(self, vertex, weight)", "docstring": "Connect this vertex to another one.\n\nArgs:\nvertex (Vertex): vertex to connect to.\nweight (int): weight of the edge.\n\nReturns:\nEdge: the newly created edge.", "source": "codesearchnet"}
{"code": "def report_error_to_cluster(self, error_code, error_message):\n    if self._context_handle:\n        pywrap_tfe.TFE_ReportErrorToCluster(self._context_handle, error_code, error_message)\n    else:\n        raise ValueError('Context is not initialized.')", "docstring": "Report error to other members in a multi-client cluster.\n\nArgs:\nerror_code: a `tf.errors` error code.\nerror_message: a string. The error message.", "source": "github-repos"}
{"code": "def _infer_num_gpus_per_worker(devices):\n    if _is_device_list_single_worker(devices):\n        return sum((1 for d in devices if _is_gpu_device(d)))\n    else:\n        device_dict = _group_device_list(devices)\n        num_gpus = None\n        for _, devices_in_task in device_dict.items():\n            for device_in_task in devices_in_task:\n                if num_gpus is None:\n                    num_gpus = sum((1 for d in device_in_task if _is_gpu_device(d)))\n                elif num_gpus != sum((1 for d in device_in_task if _is_gpu_device(d))):\n                    raise ValueError('All workers should have the same number of GPUs.')\n                for d in device_in_task:\n                    d_spec = tf_device.DeviceSpec.from_string(d)\n                    if d_spec.device_type == 'GPU' and d_spec.device_index >= num_gpus:\n                        raise ValueError('GPU `device_index` on a worker should be consecutive and start from 0.')\n        return num_gpus", "docstring": "Infers the number of GPUs on each worker.\n\nCurrently to make multi-worker cross device ops work, we need all workers to\nhave the same number of GPUs.\n\nArgs:\ndevices: a list of device strings, can be either local devices or remote\ndevices.\n\nReturns:\nnumber of GPUs per worker.\n\nRaises:\nValueError if workers have different number of GPUs or GPU indices are not\nconsecutive and starting from 0.", "source": "github-repos"}
{"code": "def atomic_download(handle, download_fn, module_dir, lock_file_timeout_sec=(10 * 60)):\n    lock_file = _lock_filename(module_dir)\n    task_uid = uuid.uuid4().hex\n    lock_contents = _lock_file_contents(task_uid)\n    tmp_dir = _temp_download_dir(module_dir, task_uid)\n    try:\n        while True:\n            try:\n                tf_utils.atomic_write_string_to_file(lock_file, lock_contents, overwrite=False)\n                if tf_v1.gfile.Exists(module_dir):\n                    return module_dir\n                break\n            except tf.errors.OpError:\n                pass\n            _wait_for_lock_to_disappear(handle, lock_file, lock_file_timeout_sec)\n        logging.info(\"Downloading TF-Hub Module '%s'.\", handle)\n        tf_v1.gfile.MakeDirs(tmp_dir)\n        download_fn(handle, tmp_dir)\n        _write_module_descriptor_file(handle, module_dir)\n        try:\n            tf_v1.gfile.Rename(tmp_dir, module_dir)\n            logging.info(\"Downloaded TF-Hub Module '%s'.\", handle)\n        except tf.errors.AlreadyExistsError:\n            logging.warning('Module already exists in %s', module_dir)\n    finally:\n        try:\n            tf_v1.gfile.DeleteRecursively(tmp_dir)\n        except tf.errors.NotFoundError:\n            pass\n        try:\n            contents = tf_utils.read_file_to_string(lock_file)\n        except tf.errors.NotFoundError:\n            contents = ''\n        if (contents == lock_contents):\n            try:\n                tf_v1.gfile.Remove(lock_file)\n            except tf.errors.NotFoundError:\n                pass\n    return module_dir", "docstring": "Returns the path to a Module directory for a given TF-Hub Module handle.\n\nArgs:\nhandle: (string) Location of a TF-Hub Module.\ndownload_fn: Callback function that actually performs download. The callback\nreceives two arguments, handle and the location of a temporary\ndirectory to download the content into.\nmodule_dir: Directory where to download the module files to.\nlock_file_timeout_sec: The amount of time we give the current holder of\nthe lock to make progress in downloading a module.\nIf no progress is made, the lock is revoked.\n\nReturns:\nA string containing the path to a TF-Hub Module directory.\n\nRaises:\nValueError: if the Module is not found.", "source": "codesearchnet"}
{"code": "def _create_L_ind(self, L):\n    if issparse(L[0]):\n        L = [L_t.todense() for L_t in L]\n    L = self._to_numpy(L)\n    L_ind = np.ones((self.n, (self.m * self.k)))\n    for (yi, y) in enumerate(self.task_graph.feasible_set()):\n        for t in range(self.t):\n            L_ind[(:, yi::self.k)] *= np.where(np.logical_or((L[t] == y[t]), (L[t] == 0)), 1, 0)\n        L_ind[(:, yi::self.k)] *= np.where((sum(L) != 0), 1, 0)\n    return L_ind", "docstring": "Convert T label matrices with labels in 0...K_t to a one-hot format\n\nHere we can view e.g. the $(i,j)$ entries of the $T$ label matrices as\na _label vector_ emitted by LF j for data point i.\n\nArgs:\nL: a T-length list of [n,m] scipy.sparse label matrices with values\nin {0,1,...,k}\n\nReturns:\nL_ind: An [n,m*k] dense np.ndarray with values in {0,1}\n\nNote that no column is required for 0 (abstain) labels.", "source": "codesearchnet"}
{"code": "def append(self, value, key=''):\n        \n        if isinstance(value, type('')) or isinstance(value, type(u'')):\n            value = ListItem(value)\n\n        keys = super(ListView, self).append(value, key=key)\n        if type(value) in (list, tuple, dict):\n            for k in keys:\n                if not self.EVENT_ONCLICK in self.children[k].attributes:\n                    self.children[k].onclick.connect(self.onselection)\n                self.children[k].attributes['selected'] = False\n        else:\n            \n            if not self.EVENT_ONCLICK in value.attributes:\n                value.onclick.connect(self.onselection)\n            value.attributes['selected'] = False\n        return keys", "docstring": "Appends child items to the ListView. The items are accessible by list.children[key].\n\nArgs:\nvalue (ListItem, or iterable of ListItems): The child to be appended. In case of a dictionary,\neach item's key is used as 'key' param for the single append.\nkey (str): The unique string identifier for the child. Ignored in case of iterable 'value'\nparam.", "source": "juraj-google-style"}
{"code": "def ConsumeInteger(self, is_long=False):\n    try:\n        result = _ParseAbstractInteger(self.token, is_long=is_long)\n    except ValueError as e:\n        raise self.ParseError(str(e))\n    self.NextToken()\n    return result", "docstring": "Consumes an integer number.\n\nArgs:\nis_long: True if the value should be returned as a long integer.\nReturns:\nThe integer parsed.\n\nRaises:\nParseError: If an integer couldn't be consumed.", "source": "codesearchnet"}
{"code": "def _SetValues(self, values):\n    \n\n    def _ToStr(value):\n      \n      if isinstance(value, (list, tuple)):\n        result = []\n        for val in value:\n          result.append(str(val))\n        return result\n      else:\n        return str(value)\n\n    \n    if isinstance(values, Row):\n      if self._keys != values.header:\n        raise TypeError('Attempt to append row with mismatched header.')\n      self._values = copy.deepcopy(values.values)\n\n    elif isinstance(values, dict):\n      for key in self._keys:\n        if key not in values:\n          raise TypeError('Dictionary key mismatch with row.')\n      for key in self._keys:\n        self[key] = _ToStr(values[key])\n\n    elif isinstance(values, list) or isinstance(values, tuple):\n      if len(values) != len(self._values):\n        raise TypeError('Supplied list length != row length')\n      for (index, value) in enumerate(values):\n        self._values[index] = _ToStr(value)\n\n    else:\n      raise TypeError('Supplied argument must be Row, dict or list, not %s',\n                      type(values))", "docstring": "Set values from supplied dictionary or list.\n\nArgs:\nvalues: A Row, dict indexed by column name, or list.\n\nRaises:\nTypeError: Argument is not a list or dict, or list is not equal row\nlength or dictionary keys don't match.", "source": "juraj-google-style"}
{"code": "def normalize_audio_buffer(buf, volume_percentage, sample_width=2):\n    if (sample_width != 2):\n        raise Exception('unsupported sample width:', sample_width)\n    scale = (math.pow(2, ((1.0 * volume_percentage) / 100)) - 1)\n    arr = array.array('h', buf)\n    for idx in range(0, len(arr)):\n        arr[idx] = int((arr[idx] * scale))\n    buf = arr.tostring()\n    return buf", "docstring": "Adjusts the loudness of the audio data in the given buffer.\n\nVolume normalization is done by scaling the amplitude of the audio\nin the buffer by a scale factor of 2^(volume_percentage/100)-1.\nFor example, 50% volume scales the amplitude by a factor of 0.414,\nand 75% volume scales the amplitude by a factor of 0.681.\nFor now we only sample_width 2.\n\nArgs:\nbuf: byte string containing audio data to normalize.\nvolume_percentage: volume setting as an integer percentage (1-100).\nsample_width: size of a single sample in bytes.", "source": "codesearchnet"}
{"code": "def google_api_execute(config, auth, api_call, results, errors, append=None):\n    try:\n        rows = API(config, api_call).execute()\n        if results:\n            if isinstance(rows, dict):\n                rows = [rows]\n            elif results.get('bigquery', {}).get('format', 'JSON') == 'CSV':\n                rows = [[r] for r in rows]\n            if config.verbose:\n                print('.', end='', flush=True)\n            if append:\n                rows = google_api_append(append, api_call['kwargs'], rows)\n            yield from map(lambda r: Discovery_To_BigQuery.clean(r), rows)\n    except HttpError as e:\n        if errors:\n            rows = [{'Error': str(e), 'Parameters': [{'Key': k, 'Value': str(v)} for k, v in api_call['kwargs'].items()]}]\n            put_rows(config, auth, errors, rows)\n            if 'bigquery' in errors:\n                errors['bigquery']['disposition'] = 'WRITE_APPEND'\n        else:\n            raise e", "docstring": "Execute the actual API call and write to the end points defined.\n\nThe API call is completely defined at this point.\nThe results and error definition is optional.\n\nArgs:\nauth (string): either \"user\" or \"service\" to make the API call.\napi_call (dict): the JSON for the API call as defined in recipe.\nresults (dict): defines where the data will be written\nerrors (dict): defines where the errors will be written\nappend (dict): optional parameters to append to each row, given as BQ schema\n\nReturns (dict):\nNone, all data is transfered between API / BigQuery\n\nRaises:\nValueError: If a required key in the recipe is missing.", "source": "github-repos"}
{"code": "def symmetric_kl_divergence(predicted, actual):\n    epsilon = tf.constant(1e-07, dtype=tf.float32, name='epsilon')\n    p = tf.math.maximum(predicted, epsilon)\n    q = tf.math.maximum(actual, epsilon)\n    kld_1 = tf.math.reduce_sum(tf.math.multiply(p, tf.math.log(tf.math.divide(p, q))))\n    kld_2 = tf.math.reduce_sum(tf.math.multiply(q, tf.math.log(tf.math.divide(q, p))))\n    return tf.add(kld_1, kld_2)", "docstring": "Calculate symmetric KL-divergence over two classification tensors.\n\nNote that here the classifications do not form a probability distribution.\nThey are, however normalized to 0..1 and calculating a KL-divergence over them\ngives reasonable numerical results.\n\nShape of the two inputs must be the same at inference time but is otherwise\nunconstrained.\n\nArgs:\npredicted: classification outputs from model\nactual: golden classification outputs\n\nReturns:\nSingle scalar tensor with symmetric KL-divergence between predicted and\nactual.", "source": "github-repos"}
{"code": "def put_pixel(self, x: int, y: int, color: Tuple[(int, int, int)]) -> None:\n    lib.TCOD_image_put_pixel(self.image_c, x, y, color)", "docstring": "Change a pixel on this Image.\n\nArgs:\nx (int): X pixel of the Image.  Starting from the left at 0.\ny (int): Y pixel of the Image.  Starting from the top at 0.\ncolor (Union[Tuple[int, int, int], Sequence[int]]):\nAn (r, g, b) sequence or Color instance.", "source": "codesearchnet"}
{"code": "def setupSerialPort(loopback, port):\n    if loopback:\n        testSerial = SerialTestClass()\n        serialPort = testSerial.serialPort\n    else:\n        serialPort = serial.Serial(port, 115200, timeout=0)\n    return serialPort", "docstring": "Sets up serial port by connecting to phsyical or software port.\n\nDepending on command line options, this function will either connect to a\nSerialTestClass() port for loopback testing or to the specified port from\nthe command line option. If loopback is True it overrides the physical port\nspecification.\n\nArgs:\nloopback: argparse option\nport: argparse option\n\nReturns:\nserialPort: Pyserial serial port instance", "source": "codesearchnet"}
{"code": "def create_unbroadcast_axis(shape, broadcast_shape):\n  \n  return tuple(\n      -(1 + i)\n      for i in range(len(broadcast_shape))\n      if i >= len(shape) or broadcast_shape[-(1 + i)] > shape[-(1 + i)])", "docstring": "Creates the reduction axis for unbroadcasting.\n\nArgs:\nshape: A list. The shape after the broadcast operation.\nbroadcast_shape: A list. The original shape the array being unbroadcast\nhad.\nReturns:\nA list. The axes along which the array needs to be reduced. These axes will\nbe distributed evenly into the original shape.", "source": "juraj-google-style"}
{"code": "def write_rst(self,\n                  prefix: str = \"\",\n                  suffix: str = \"\",\n                  heading_underline_char: str = \"=\",\n                  method: AutodocMethod = None,\n                  overwrite: bool = False,\n                  mock: bool = False) -> None:\n        \n        content = self.rst_content(\n            prefix=prefix,\n            suffix=suffix,\n            heading_underline_char=heading_underline_char,\n            method=method\n        )\n        write_if_allowed(self.target_rst_filename, content,\n                         overwrite=overwrite, mock=mock)", "docstring": "Writes the RST file to our destination RST filename, making any\nnecessary directories.\n\nArgs:\nprefix: as for :func:`rst_content`\nsuffix: as for :func:`rst_content`\nheading_underline_char: as for :func:`rst_content`\nmethod: as for :func:`rst_content`\noverwrite: overwrite the file if it exists already?\nmock: pretend to write, but don't", "source": "juraj-google-style"}
{"code": "def csv_to_dict(file_name, file_location):\n    \n    file = __os.path.join(file_location, file_name)\n    try:\n        csv_read = open(file, \"r\")\n    except Exception as e:\n        LOGGER.critical('Function csv_to_dict Error {error} ignoring any errors'.format(error=e))\n        print('Error {error} ignoring any errors'.format(error=e))\n        csv_read = open(file, \"r\", errors='ignore')\n    data_row = __csv.DictReader(csv_read, dialect=\"excel\")\n    dict_key = 1\n    temp_dict = dict()\n    for row in data_row:\n        temp_dict[dict_key] = row\n        dict_key += 1\n    csv_read.close()\n    return temp_dict", "docstring": "Function to import a csv as a dictionary\nArgs:\nfile_name: The name of the csv file\nfile_location: The location of the file, derive from the os module\n\nReturns: returns a dictionary", "source": "juraj-google-style"}
{"code": "def ParseDestList(self, parser_mediator, olecf_item):\n    \n    header_map = self._GetDataTypeMap('dest_list_header')\n\n    try:\n      header, entry_offset = self._ReadStructureFromFileObject(\n          olecf_item, 0, header_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.UnableToParseFile(\n          'Unable to parse DestList header with error: {0!s}'.format(\n              exception))\n\n    if header.format_version == 1:\n      entry_map = self._GetDataTypeMap('dest_list_entry_v1')\n    elif header.format_version in (3, 4):\n      entry_map = self._GetDataTypeMap('dest_list_entry_v3')\n    else:\n      parser_mediator.ProduceExtractionWarning(\n          'unsupported format version: {0:d}.'.format(header.format_version))\n      return\n\n    while entry_offset < olecf_item.size:\n      try:\n        entry, entry_data_size = self._ReadStructureFromFileObject(\n            olecf_item, entry_offset, entry_map)\n      except (ValueError, errors.ParseError) as exception:\n        raise errors.UnableToParseFile(\n            'Unable to parse DestList entry with error: {0!s}'.format(\n                exception))\n\n      display_name = 'DestList entry at offset: 0x{0:08x}'.format(entry_offset)\n\n      try:\n        droid_volume_identifier = self._ParseDistributedTrackingIdentifier(\n            parser_mediator, entry.droid_volume_identifier, display_name)\n\n      except (TypeError, ValueError) as exception:\n        droid_volume_identifier = ''\n        parser_mediator.ProduceExtractionWarning(\n            'unable to read droid volume identifier with error: {0!s}'.format(\n                exception))\n\n      try:\n        droid_file_identifier = self._ParseDistributedTrackingIdentifier(\n            parser_mediator, entry.droid_file_identifier, display_name)\n\n      except (TypeError, ValueError) as exception:\n        droid_file_identifier = ''\n        parser_mediator.ProduceExtractionWarning(\n            'unable to read droid file identifier with error: {0!s}'.format(\n                exception))\n\n      try:\n        birth_droid_volume_identifier = (\n            self._ParseDistributedTrackingIdentifier(\n                parser_mediator, entry.birth_droid_volume_identifier,\n                display_name))\n\n      except (TypeError, ValueError) as exception:\n        birth_droid_volume_identifier = ''\n        parser_mediator.ProduceExtractionWarning((\n            'unable to read birth droid volume identifier with error: '\n            '{0:s}').format(\n                exception))\n\n      try:\n        birth_droid_file_identifier = self._ParseDistributedTrackingIdentifier(\n            parser_mediator, entry.birth_droid_file_identifier, display_name)\n\n      except (TypeError, ValueError) as exception:\n        birth_droid_file_identifier = ''\n        parser_mediator.ProduceExtractionWarning((\n            'unable to read birth droid file identifier with error: '\n            '{0:s}').format(\n                exception))\n\n      if entry.last_modification_time == 0:\n        date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n      else:\n        date_time = dfdatetime_filetime.Filetime(\n            timestamp=entry.last_modification_time)\n\n      event_data = AutomaticDestinationsDestListEntryEventData()\n      event_data.birth_droid_file_identifier = birth_droid_file_identifier\n      event_data.birth_droid_volume_identifier = birth_droid_volume_identifier\n      event_data.droid_file_identifier = droid_file_identifier\n      event_data.droid_volume_identifier = droid_volume_identifier\n      event_data.entry_number = entry.entry_number\n      event_data.hostname = entry.hostname.rstrip('\\x00')\n      event_data.offset = entry_offset\n      event_data.path = entry.path.rstrip('\\x00')\n      event_data.pin_status = entry.pin_status\n\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n      entry_offset += entry_data_size", "docstring": "Parses the DestList OLECF item.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nolecf_item (pyolecf.item): OLECF item.\n\nRaises:\nUnableToParseFile: if the DestList cannot be parsed.", "source": "juraj-google-style"}
{"code": "def __init__(self, name, description, *labels):\n    super(BoolGauge, self).__init__('BoolGauge', _bool_gauge_methods, len(labels), name, description, *labels)", "docstring": "Creates a new BoolGauge.\n\nArgs:\nname: name of the new metric.\ndescription: description of the new metric.\n*labels: The label list of the new metric.", "source": "github-repos"}
{"code": "def timestamp_YmdHMS(value):\n    \n    i = int(value)\n    S = i\n    M = S\n    H = M\n    d = H\n    m = d\n    Y = m\n    return int(calendar.timegm((\n        Y % 10000, m % 100, d % 100, H % 100, M % 100, S % 100, 0, 0, 0)\n    ))", "docstring": "Convert timestamp string to time in seconds since epoch.\n\nTimestamps strings like '20130618120000' are able to be converted by this\nfunction.\n\nArgs:\nvalue: A timestamp string in the format '%Y%m%d%H%M%S'.\n\nReturns:\nThe time in seconds since epoch as an integer.\n\nRaises:\nValueError: If timestamp is invalid.\n\nNote: The timezone is assumed to be UTC/GMT.", "source": "juraj-google-style"}
{"code": "def _create_in_hdx(self, object_type, id_field_name, name_field_name, file_to_upload=None):\n    self.check_required_fields()\n    if ((id_field_name in self.data) and self._load_from_hdx(object_type, self.data[id_field_name])):\n        logger.warning(('%s exists. Updating %s' % (object_type, self.data[id_field_name])))\n        self._merge_hdx_update(object_type, id_field_name, file_to_upload)\n    else:\n        self._save_to_hdx('create', name_field_name, file_to_upload)", "docstring": "Helper method to check if resource exists in HDX and if so, update it, otherwise create it\n\n\nArgs:\nobject_type (str): Description of HDX object type (for messages)\nid_field_name (str): Name of field containing HDX object identifier\nname_field_name (str): Name of field containing HDX object name\nfile_to_upload (Optional[str]): File to upload to HDX (if url not supplied)\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def verify_cert(signature_chain_url: str) -> Optional[crypto.X509]:\n    \n    try:\n        certs_chain_get = requests.get(signature_chain_url)\n    except requests.exceptions.ConnectionError as e:\n        log.error(f'Amazon signature chain get error: {e}')\n        return None\n\n    certs_chain_txt = certs_chain_get.text\n    certs_chain = extract_certs(certs_chain_txt)\n\n    amazon_cert: crypto.X509 = certs_chain.pop(0)\n\n    \n    sc_url_verification = verify_sc_url(signature_chain_url)\n    if not sc_url_verification:\n        log.error(f'Amazon signature url {signature_chain_url} was not verified')\n\n    \n    expired_verification = not amazon_cert.has_expired()\n    if not expired_verification:\n        log.error(f'Amazon certificate ({signature_chain_url}) expired')\n\n    \n    sans_verification = verify_sans(amazon_cert)\n    if not sans_verification:\n        log.error(f'Subject alternative names verification for ({signature_chain_url}) certificate failed')\n\n    \n    chain_verification = verify_certs_chain(certs_chain, amazon_cert)\n    if not chain_verification:\n        log.error(f'Certificates chain verification for ({signature_chain_url}) certificate failed')\n\n    result = (sc_url_verification and expired_verification and sans_verification and chain_verification)\n\n    return amazon_cert if result else None", "docstring": "Conducts series of Alexa SSL certificate verifications against Amazon Alexa requirements.\n\nArgs:\nsignature_chain_url: Signature certificate URL from SignatureCertChainUrl HTTP header.\nReturns:\nresult: Amazon certificate if verification was successful, None if not.", "source": "juraj-google-style"}
{"code": "def flush(writer=None, name=None):\n    del name\n    if writer is None:\n        writer = _summary_state.writer\n        if writer is None:\n            return control_flow_ops.no_op()\n    if isinstance(writer, SummaryWriter):\n        return writer.flush()\n    raise ValueError('Invalid argument to flush(): %r' % (writer,))", "docstring": "Forces summary writer to send any buffered data to storage.\n\nThis operation blocks until that finishes.\n\nArgs:\nwriter: The `tf.summary.SummaryWriter` to flush. If None, the current\ndefault writer will be used instead; if there is no current writer, this\nreturns `tf.no_op`.\nname: Ignored legacy argument for a name for the operation.\n\nReturns:\nThe created `tf.Operation`.", "source": "github-repos"}
{"code": "def _Stat(self, path, ext_attrs=False):\n    local_path = client_utils.CanonicalPathToLocalPath(path)\n    result = client_utils.StatEntryFromPath(local_path, self.pathspec, ext_attrs=ext_attrs)\n    try:\n        result.symlink = utils.SmartUnicode(os.readlink(local_path))\n    except (OSError, AttributeError):\n        pass\n    return result", "docstring": "Returns stat information of a specific path.\n\nArgs:\npath: A unicode string containing the path.\next_attrs: Whether the call should also collect extended attributes.\n\nReturns:\na StatResponse proto\n\nRaises:\nIOError when call to os.stat() fails", "source": "codesearchnet"}
{"code": "def _ParseLogLine(self, parser_mediator, key, structure):\n    \n    time_elements_tuple = self._GetTimeElementsTuple(key, structure)\n\n    try:\n      date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(\n          time_elements_tuple=time_elements_tuple)\n    except ValueError:\n      parser_mediator.ProduceExtractionWarning(\n          'invalid date time value: {0!s}'.format(structure.date_time))\n      return\n\n    self._last_month = time_elements_tuple[1]\n\n    event_data = MacWifiLogEventData()\n    event_data.agent = structure.agent\n    \n    \n    event_data.function = structure.function.strip()\n    event_data.text = structure.text\n\n    if key == 'known_function_logline':\n      event_data.action = self._GetAction(\n          event_data.function, event_data.text)\n\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_ADDED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parse a single log line and produce an event object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nkey (str): name of the parsed structure.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.", "source": "juraj-google-style"}
{"code": "def get_distance(self, node):\n    delta = ((node.pos[0] - self.pos[0]), (node.pos[1] - self.pos[1]))\n    return sqrt(((delta[0] ** 2) + (delta[1] ** 2)))", "docstring": "Get the distance beetween 2 nodes\n\nArgs:\nnode (object): The other node.", "source": "codesearchnet"}
{"code": "def ephemeris(self, **kwargs):\n        \n\n        for orb in self.iter(inclusive=True, **kwargs):\n            yield orb", "docstring": "Generator giving the propagation of the orbit at different dates\n\nArgs:\nstart (Date)\nstop (Date or timedelta)\nstep (timedelta)\nYield:\nOrbit", "source": "juraj-google-style"}
{"code": "def copy_cwl_files(from_dir=CWL_PATH, to_dir=None):\n    cwl_files = glob.glob('{}{}*.cwl'.format(from_dir, os.sep))\n    if (len(cwl_files) > 0):\n        create_dirs(to_dir)\n    for fi in cwl_files:\n        fo = os.path.join(to_dir, os.path.basename(fi))\n        shutil.copy2(fi, fo)\n    return len(cwl_files)", "docstring": "Copy cwl files to a directory where the cwl-runner can find them.\n\nArgs:\nfrom_dir (str): Path to directory where to copy files from (default:\nthe cwl directory of nlppln).\nto_dir (str): Path to directory where the files should be copied to\n(e.g., the CWL working directory).", "source": "codesearchnet"}
{"code": "def _validate_namespace(self, namespace):\n        \n        if self._namespace_regex.fullmatch(namespace) is None:\n            LOGGER.debug('Invalid namespace: %s', namespace)\n            raise _ResponseFailed(self._status.INVALID_ADDRESS)", "docstring": "Validates a namespace, raising a ResponseFailed error if invalid.\n\nArgs:\nstate_root (str): The state_root to validate\n\nRaises:\nResponseFailed: The state_root was invalid, and a status of\nINVALID_ROOT will be sent with the response.", "source": "juraj-google-style"}
{"code": "class BridgeTowerProcessor(ProcessorMixin):\n    attributes = ['image_processor', 'tokenizer']\n    image_processor_class = 'BridgeTowerImageProcessor'\n    tokenizer_class = ('RobertaTokenizer', 'RobertaTokenizerFast')\n\n    def __init__(self, image_processor, tokenizer):\n        super().__init__(image_processor, tokenizer)\n\n    def __call__(self, images, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[BridgeTowerProcessorKwargs]) -> BatchEncoding:\n        \n        output_kwargs = self._merge_kwargs(BridgeTowerProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)\n        encoding = self.tokenizer(text=text, **output_kwargs['text_kwargs'])\n        encoding_image_processor = self.image_processor(images, **output_kwargs['images_kwargs'])\n        encoding.update(encoding_image_processor)\n        return encoding\n\n    def batch_decode(self, *args, **kwargs):\n        \n        return self.tokenizer.batch_decode(*args, **kwargs)\n\n    def decode(self, *args, **kwargs):\n        \n        return self.tokenizer.decode(*args, **kwargs)\n\n    @property\n    def model_input_names(self):\n        tokenizer_input_names = self.tokenizer.model_input_names\n        image_processor_input_names = self.image_processor.model_input_names\n        return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))", "docstring": "Constructs a BridgeTower processor which wraps a Roberta tokenizer and BridgeTower image processor into a single\nprocessor.\n\n[`BridgeTowerProcessor`] offers all the functionalities of [`BridgeTowerImageProcessor`] and\n[`RobertaTokenizerFast`]. See the docstring of [`~BridgeTowerProcessor.__call__`] and\n[`~BridgeTowerProcessor.decode`] for more information.\n\nArgs:\nimage_processor (`BridgeTowerImageProcessor`):\nAn instance of [`BridgeTowerImageProcessor`]. The image processor is a required input.\ntokenizer (`RobertaTokenizerFast`):\nAn instance of ['RobertaTokenizerFast`]. The tokenizer is a required input.", "source": "github-repos"}
{"code": "def update_video(self, video_id, title=\"\", description=\"\", keywords=\"\", access_control=AccessControl.Unlisted):\n        \n\n        \n        if not self.authenticated:\n            raise ApiError(_(\"Authentication is required\"))\n\n        entry = self.fetch_video(video_id)\n\n        \n        extension = self._access_control(access_control)\n        if extension:\n            entry.extension_elements = extension\n\n        if title:\n            entry.media.title.text = title\n\n        if description:\n            entry.media.description.text = description\n\n        \n        \n\n        success = Api.yt_service.UpdateVideoEntry(entry)\n        return success", "docstring": "Updates the video\n\nAuthentication is required\n\nParams:\nentry: video entry fetch via 'fetch_video()'\ntitle: string\ndescription: string\nkeywords: string\n\nReturns:\na video entry on success\nNone otherwise", "source": "juraj-google-style"}
{"code": "def attention_bias_prepend_inputs_full_attention(padding):\n    in_target = tf.cumsum(padding, axis=1, exclusive=True)\n    target_pos = tf.cumsum(in_target, axis=1)\n    illegal_connections = tf.greater(tf.expand_dims(target_pos, 1), tf.expand_dims(target_pos, 2))\n    bias = (tf.to_float(illegal_connections) * (- 1000000000.0))\n    bias = tf.expand_dims(bias, 1)\n    return bias", "docstring": "Create a bias tensor for prepend_mode=\"prepend_inputs_full_attention\".\n\nSee prepend_inputs in common_hparams.py.\n\nProduces a bias tensor to be used in self-attention.\n\nThis bias tensor allows for full connectivity in the \"inputs\" part of\nthe sequence and masked connectivity in the targets part.\n\nArgs:\npadding: a float `Tensor` with shape [batch, length] with\nones in positions corresponding to padding.  In each row, a single\npadding position separates the input part from the target part.\n\nReturns:\na `Tensor` with shape [batch, 1, length, length].", "source": "codesearchnet"}
{"code": "def append(self, value, key=''):\n        \n        if type(value) in (list, tuple, dict):\n            if type(value)==dict:\n                for k in value.keys():\n                    self.append(value[k], k)\n                return value.keys()\n            keys = []\n            for child in value:\n                keys.append( self.append(child) )\n            return keys\n        \n        key = str(key)\n        if not isinstance(value, Widget):\n            raise ValueError('value should be a Widget (otherwise use add_child(key,other)')\n\n        if 'left' in value.style.keys():\n            del value.style['left']\n        if 'right' in value.style.keys():\n            del value.style['right']\n\n        if not 'order' in value.style.keys():\n            value.style.update({'position':'static', 'order':'-1'})\n\n        if key.isdigit():\n            value.style['order'] = key\n\n        key = value.identifier if key == '' else key\n        self.add_child(key, value)\n\n        return key", "docstring": "It allows to add child widgets to this.\nThe key allows to access the specific child in this way widget.children[key].\nThe key have to be numeric and determines the children order in the layout.\n\nArgs:\nvalue (Widget): Child instance to be appended.\nkey (str): Unique identifier for the child. If key.isdigit()==True '0' '1'.. the value determines the order\nin the layout", "source": "juraj-google-style"}
{"code": "def rename(self, name):\n    return self.client.api.rename(self.id, name)", "docstring": "Rename this container. Similar to the ``docker rename`` command.\n\nArgs:\nname (str): New name for the container\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "def parse_document_id(chrom, pos, ref, alt, variant_type, case_id):\n    return generate_md5_key([chrom, pos, ref, alt, variant_type, case_id])", "docstring": "Parse the unique document id for a variant.\n\nThis will always be unique in the database.\n\nArgs:\nchrom(str)\npos(str)\nref(str)\nalt(str)\nvariant_type(str): 'clinical' or 'research'\ncase_id(str): unqiue family id\n\nReturns:\ndocument_id(str): The unique document id in an md5 string", "source": "codesearchnet"}
{"code": "def _fetch(self, method, url=None, post_data=None, parse_data=True, key=None, parameters=None, listener=None, full_return=False):\n    headers = self.get_headers()\n    headers['Content-Type'] = 'application/json'\n    handlers = []\n    debuglevel = int(self._settings['debug'])\n    handlers.append(urllib2.HTTPHandler(debuglevel=debuglevel))\n    if hasattr(httplib, 'HTTPS'):\n        handlers.append(urllib2.HTTPSHandler(debuglevel=debuglevel))\n    handlers.append(urllib2.HTTPCookieProcessor(cookielib.CookieJar()))\n    password_url = self._get_password_url()\n    if (password_url and ('Authorization' not in headers)):\n        pwd_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()\n        pwd_manager.add_password(None, password_url, self._settings['user'], self._settings['password'])\n        handlers.append(HTTPBasicAuthHandler(pwd_manager))\n    opener = urllib2.build_opener(*handlers)\n    if (post_data is not None):\n        post_data = json.dumps(post_data)\n    uri = self._url(url, parameters)\n    request = RESTRequest(uri, method=method, headers=headers)\n    if (post_data is not None):\n        request.add_data(post_data)\n    response = None\n    try:\n        response = opener.open(request)\n        body = response.read()\n        if (password_url and (password_url not in self._settings['authorizations']) and request.has_header('Authorization')):\n            self._settings['authorizations'][password_url] = request.get_header('Authorization')\n    except urllib2.HTTPError as e:\n        if (e.code == 401):\n            raise AuthenticationError(('Access denied while trying to access %s' % uri))\n        elif (e.code == 404):\n            raise ConnectionError(('URL not found: %s' % uri))\n        else:\n            raise\n    except urllib2.URLError as e:\n        raise ConnectionError(('Error while fetching from %s: %s' % (uri, e)))\n    finally:\n        if response:\n            response.close()\n        opener.close()\n    data = None\n    if parse_data:\n        if (not key):\n            key = string.split(url, '/')[0]\n        data = self.parse(body, key)\n    if full_return:\n        info = (response.info() if response else None)\n        status = (int(string.split(info['status'])[0]) if (info and ('status' in info)) else None)\n        return {'success': ((status >= 200) and (status < 300)), 'data': data, 'info': info, 'body': body}\n    return data", "docstring": "Issue a request.\n\nArgs:\nmethod (str): Request method (GET/POST/PUT/DELETE/etc.) If not specified, it will be POST if post_data is not None\n\nKwargs:\nurl (str): Destination URL\npost_data (str): A string of what to POST\nparse_data (bool): If true, parse response data\nkey (string): If parse_data==True, look for this key when parsing data\nparameters (dict): Additional GET parameters to append to the URL\nlistener (func): callback called when uploading a file\nfull_return (bool): If set to True, get a full response (with success, data, info, body)\n\nReturns:\ndict. Response. If full_return==True, a dict with keys: success, data, info, body, otherwise the parsed data\n\nRaises:\nAuthenticationError, ConnectionError, urllib2.HTTPError, ValueError", "source": "codesearchnet"}
{"code": "def getPageType(name, number=False):\n    if (not (name in pageNames())):\n        return None\n    pageType = PyOrigin.Pages(name).GetType()\n    if number:\n        return str(pageType)\n    if (pageType == 1):\n        return 'matrix'\n    if (pageType == 2):\n        return 'book'\n    if (pageType == 3):\n        return 'graph'\n    if (pageType == 4):\n        return 'layout'\n    if (pageType == 5):\n        return 'notes'", "docstring": "Returns the type of the page with that name.\nIf that name doesn't exist, None is returned.\n\nArgs:\nname (str): name of the page to get the folder from\nnumber (bool): if True, return numbers (i.e., a graph will be 3)\nif False, return words where appropriate (i.e, \"graph\")\n\nReturns:\nstring of the type of object the page is", "source": "codesearchnet"}
{"code": "def get_event(self, event_name, event_history=None):\n        \n        if event_history is None:\n            event_history = event_name + '_history'\n        return self._db.rpoplpush(event_name, event_history)", "docstring": "Get an event from the database.\n\nGets an event from the named event list removing the event and\nadding it to the event history.\n\nArgs:\nevent_name (str): Event list key.\nevent_history (str, optional): Event history list.\n\nReturns:\nstr: string representation of the event object", "source": "juraj-google-style"}
{"code": "def _resolve_grad_captures(body_graph, body_grad_graph, while_op):\n    new_capture_inputs = []\n    for t in body_grad_graph.external_captures:\n        if t.graph == body_graph:\n            for i, output in enumerate(t.graph.outputs):\n                if output is t:\n                    t = while_op.outputs[i]\n                    break\n            assert t.graph == body_graph.outer_graph\n        new_capture_inputs.append(t)\n    return new_capture_inputs", "docstring": "Returns the tensors to pass as captured inputs to `body_grad_graph`.\n\n`body_grad_graph` may have external references to:\n1. Its outer graph containing the input gradients. These are left as-is.\n2. Accumulators captured from the forward-pass graph. These should have been\nadded as `while_op` outputs after the gradient graph was built. We replace\nthese with the corresponding output of `while_op`, i.e. a tensor in\n`body_graph.outer_graph`. In the case of nested control flow or functions,\nthe gradient logic handling `body_grad_graph.outer_graph` will make sure\nthe tensor from `body_graph.outer_graph` is also correctly captured.\n\nArgs:\nbody_graph: FuncGraph. The forward-pass body function.\nbody_grad_graph: FuncGraph. The body gradients function.\nwhile_op: The forward-pass While Operation calling `body_graph`.\n\nReturns:\nA list of input tensors to be passed as the captured inputs to\n`body_grad_graph`.", "source": "github-repos"}
{"code": "def get_document(project_id, knowledge_base_id, document_id):\n    import dialogflow_v2beta1 as dialogflow\n    client = dialogflow.DocumentsClient()\n    document_path = client.document_path(project_id, knowledge_base_id, document_id)\n    response = client.get_document(document_path)\n    print('Got Document:')\n    print(' - Display Name: {}'.format(response.display_name))\n    print(' - Knowledge ID: {}'.format(response.name))\n    print(' - MIME Type: {}'.format(response.mime_type))\n    print(' - Knowledge Types:')\n    for knowledge_type in response.knowledge_types:\n        print('    - {}'.format(KNOWLEDGE_TYPES[knowledge_type]))\n    print(' - Source: {}\\n'.format(response.content_uri))", "docstring": "Gets a Document.\n\nArgs:\nproject_id: The GCP project linked with the agent.\nknowledge_base_id: Id of the Knowledge base.\ndocument_id: Id of the Document.", "source": "codesearchnet"}
{"code": "def __init__(self, org=None, course=None, run=None, branch=None, version_guid=None, deprecated=False, **kwargs):\n        \n        offering_arg = kwargs.pop('offering', None)\n        if offering_arg:\n            warnings.warn(\n                \"offering is deprecated! Use course and run instead.\",\n                DeprecationWarning,\n                stacklevel=2\n            )\n            course, __, run = offering_arg.partition(\"/\")\n\n        if deprecated:\n            for part in (org, course, run):\n                self._check_location_part(part, self.INVALID_CHARS_DEPRECATED)\n\n            fields = [org, course]\n            \n            if run:\n                fields.append(run)\n            if branch is not None:\n                fields.append(branch)\n            if not all(self.DEPRECATED_ALLOWED_ID_RE.match(field) for field in fields):\n                raise InvalidKeyError(self.__class__, fields)\n\n        else:\n            if version_guid:\n                version_guid = self.as_object_id(version_guid)\n\n            for name, value in [['org', org], ['course', course], ['run', run], ['branch', branch]]:\n                if not (value is None or self.ALLOWED_ID_RE.match(value)):\n                    raise InvalidKeyError(self.__class__,\n                                          u\"Special characters not allowed in field {}: '{}'\".format(name, value))\n\n        super(CourseLocator, self).__init__(\n            org=org,\n            course=course,\n            run=run,\n            branch=branch,\n            version_guid=version_guid,\n            deprecated=deprecated,\n            **kwargs\n        )\n\n        if self.deprecated and (self.org is None or self.course is None):\n            raise InvalidKeyError(self.__class__, \"Deprecated strings must set both org and course.\")\n\n        if not self.deprecated and self.version_guid is None and \\\n                (self.org is None or self.course is None or self.run is None):\n            raise InvalidKeyError(self.__class__, \"Either version_guid or org, course, and run should be set\")", "docstring": "Construct a CourseLocator\n\nArgs:\nversion_guid (string or ObjectId): optional unique id for the version\norg, course, run (string): the standard definition. Optional only if version_guid given\nbranch (string): the branch such as 'draft', 'published', 'staged', 'beta'", "source": "juraj-google-style"}
{"code": "def get_unstable_entries(self, charge_to_discharge=True):\n        \n        list_copy = list(self._unstable_entries)\n        return list_copy if charge_to_discharge else list_copy.reverse()", "docstring": "Returns the unstable entries for the electrode.\n\nArgs:\ncharge_to_discharge: Order from most charge to most discharged\nstate? Defaults to True.\n\nReturns:\nA list of unstable entries in the electrode, ordered by amount of\nthe working ion.", "source": "juraj-google-style"}
{"code": "def _check_conversion_params(conversion_params, is_v2=False):\n    supported_precision_modes = TrtPrecisionMode.supported_precision_modes()\n    if conversion_params.precision_mode not in supported_precision_modes:\n        raise ValueError(\"precision mode '{}' is not supported.It should be one of {}\".format(conversion_params.precision_mode, supported_precision_modes))\n    if conversion_params.minimum_segment_size <= 0 and conversion_params.minimum_segment_size != -1:\n        raise ValueError('minimum segment size should be positive or -1 (to disable main graph conversion).')", "docstring": "Validate the provided TrtConversionParams.\n\nArgs:\nconversion_params: a TrtConversionParams instance.\nis_v2: whether we're getting a RewriterConfig for TF 2.0.\n\nRaises:\nTypeError: if any of the parameters are of unexpected type.\nValueError: if any of the parameters are of unexpected value.", "source": "github-repos"}
{"code": "def get_results(self, fp=sys.stdout, inline=True, delim=None, fetch=True, qlog=None, arguments=[]):\n    result_path = self.meta_data['results_resource']\n    conn = Qubole.agent()\n    include_header = 'false'\n    if (len(arguments) == 1):\n        include_header = arguments.pop(0)\n        if (include_header not in ('true', 'false')):\n            raise ParseError('incude_header can be either true or false')\n    r = conn.get(result_path, {'inline': inline, 'include_headers': include_header})\n    if r.get('inline'):\n        raw_results = r['results']\n        encoded_results = raw_results.encode('utf8')\n        if (sys.version_info < (3, 0, 0)):\n            fp.write(encoded_results)\n        else:\n            import io\n            if isinstance(fp, io.TextIOBase):\n                if hasattr(fp, 'buffer'):\n                    fp.buffer.write(encoded_results)\n                else:\n                    fp.write(raw_results)\n            elif (isinstance(fp, io.BufferedIOBase) or isinstance(fp, io.RawIOBase)):\n                fp.write(encoded_results)\n            else:\n                pass\n    elif fetch:\n        storage_credentials = conn.get(Account.credentials_rest_entity_path)\n        if (storage_credentials['region_endpoint'] is not None):\n            boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'], aws_secret_access_key=storage_credentials['storage_secret_key'], security_token=storage_credentials['session_token'], host=storage_credentials['region_endpoint'])\n        else:\n            boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'], aws_secret_access_key=storage_credentials['storage_secret_key'], security_token=storage_credentials['session_token'])\n        log.info(('Starting download from result locations: [%s]' % ','.join(r['result_location'])))\n        num_result_dir = Command.find(self.id).num_result_dir\n        if ((include_header.lower() == 'true') and (qlog is not None)):\n            write_headers(qlog, fp)\n        for s3_path in r['result_location']:\n            _download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=delim)\n    else:\n        fp.write(','.join(r['result_location']))", "docstring": "Fetches the result for the command represented by this object\n\nget_results will retrieve results of the command and write to stdout by default.\nOptionally one can write to a filestream specified in `fp`. The `inline` argument\ndecides whether the result can be returned as a CRLF separated string. In cases where\nthe results are greater than 20MB, get_results will attempt to read from s3 and write\nto fp. The retrieval of results from s3 can be turned off by the `fetch` argument\n\nArgs:\n`fp`: a file object to write the results to directly\n`inline`: whether or not results are returned inline as CRLF separated string\n`fetch`: True to fetch the result even if it is greater than 20MB, False to\nonly get the result location on s3", "source": "codesearchnet"}
{"code": "def print_result_for_plain_cgi_script_from_tuple(contenttype_headers_content: WSGI_TUPLE_TYPE, status: str='200 OK') -> None:\n    (contenttype, headers, content) = contenttype_headers_content\n    print_result_for_plain_cgi_script(contenttype, headers, content, status)", "docstring": "Writes HTTP result to stdout.\n\nArgs:\ncontenttype_headers_content:\nthe tuple ``(contenttype, extraheaders, data)``\nstatus:\nHTTP status message (default ``\"200 OK``)", "source": "codesearchnet"}
{"code": "def measure_topology(script):\n    \n    filter_xml = '  <xmlfilter name=\"Compute Topological Measures\"/>\\n'\n    util.write_filter(script, filter_xml)\n    if isinstance(script, mlx.FilterScript):\n        script.parse_topology = True\n    return None", "docstring": "Compute a set of topological measures over a mesh\n\nArgs:\nscript: the mlx.FilterScript object or script filename to write\nthe filter to.\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "juraj-google-style"}
{"code": "def extract_keywords_from_text(index_page, no_items=5):\n    \n    index_page = MLStripper.strip_tags(index_page)\n    tokenized_index = TextBlob(index_page).lower()\n\n    def to_str(key):\n        if isinstance(key, unicode):\n            return key.encode(\"utf-8\")\n\n        return key\n\n    present_keywords = [\n        KEYWORDS_LOWER[key]\n        for key in KEYWORDS_LOWER.keys()\n        if len(key) > 3 and key in tokenized_index\n    ]\n\n    def to_source_string(key):\n        source = \"Keyword analysis\"\n        try:\n            return SourceString(key, source)\n        except UnicodeEncodeError:\n            return SourceString(key.encode(\"utf-8\"), source)\n\n    multi_keywords = [\n        to_source_string(key)\n        for key in present_keywords\n        if tokenized_index.words.count(key) >= 1\n    ]\n\n    multi_keywords = sorted(multi_keywords, key=lambda x: len(x), reverse=True)\n\n    if len(multi_keywords) > no_items:\n        return multi_keywords[:no_items]\n\n    return multi_keywords", "docstring": "Try to process text on the `index_page` deduce the keywords and then try\nto match them on the Aleph's dataset.\n\nFunction returns maximally `no_items` items, to prevent spamming the user.\n\nArgs:\nindex_page (str): Content of the page as UTF-8 string\nno_items (int, default 5): Number of items to return.\n\nReturns:\nlist: List of :class:`.SourceString` objects.", "source": "juraj-google-style"}
{"code": "def get_wulff_shape(self, material_id):\n        \n        from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n        from pymatgen.analysis.wulff import WulffShape, hkl_tuple_to_str\n\n        structure = self.get_structure_by_material_id(material_id)\n        surfaces = self.get_surface_data(material_id)[\"surfaces\"]\n        lattice = (SpacegroupAnalyzer(structure)\n                   .get_conventional_standard_structure().lattice)\n        miller_energy_map = {}\n        for surf in surfaces:\n            miller = tuple(surf[\"miller_index\"])\n            \n            if (miller not in miller_energy_map) or surf[\"is_reconstructed\"]:\n                miller_energy_map[miller] = surf[\"surface_energy\"]\n        millers, energies = zip(*miller_energy_map.items())\n        return WulffShape(lattice, millers, energies)", "docstring": "Constructs a Wulff shape for a material.\n\nArgs:\nmaterial_id (str): Materials Project material_id, e.g. 'mp-123'.\nReturns:\npymatgen.analysis.wulff.WulffShape", "source": "juraj-google-style"}
{"code": "def _client_receive(self):\n    try:\n        return self._client.readline()\n    except socket.error as e:\n        raise errors.Error(self._device, f'Encountered socket error \"{e}\" reading RPC response') from e", "docstring": "Receives the server's response of an RPC message.\n\nReturns:\nRaw bytes of the response.\n\nRaises:\nerrors.Error: if a socket error occurred during the read.", "source": "github-repos"}
{"code": "def get_book_links(links):\n    book_links = []\n    for link in links:\n        data = DOWNER.download((link + '1'))\n        dom = dhtmlparser.parseString(data)\n        book_links.extend(_parse_book_links(dom))\n        max_page = _get_max_page(dom)\n        if (max_page == 1):\n            continue\n        for i in range((max_page - 1)):\n            data = DOWNER.download((link + str((i + 2))))\n            book_links.extend(_parse_book_links(dhtmlparser.parseString(data)))\n    return book_links", "docstring": "Go thru `links` to categories and return list to all publications in all\ngiven categories.\n\nArgs:\nlinks (list): List of strings (absolute links to categories).\n\nReturns:\nlist: List of strings / absolute links to book details.", "source": "codesearchnet"}
{"code": "def print_str(self, string):\n    (x, y) = self._cursor\n    for char in string:\n        if (char == '\\n'):\n            x = 0\n            y += 1\n            continue\n        if (char == '\\r'):\n            x = 0\n            continue\n        (x, y) = self._normalizeCursor(x, y)\n        self.draw_char(x, y, char, self._fg, self._bg)\n        x += 1\n    self._cursor = (x, y)", "docstring": "Print a string at the virtual cursor.\n\nHandles special characters such as '\\\\n' and '\\\\r'.\nPrinting past the bottom of the console will scroll everything upwards\nif :any:`set_mode` is set to 'scroll'.\n\nColors can be set with :any:`set_colors` and the virtual cursor can\nbe moved with :any:`move`.\n\nArgs:\nstring (Text): The text to print.\n\n.. seealso:: :any:`draw_str`, :any:`move`, :any:`set_colors`,\n:any:`set_mode`, :any:`write`, :any:`Window`", "source": "codesearchnet"}
{"code": "def get_first(self, status):\n    items = self.get_all(status)\n    if items:\n        return list(items.items())[0][1]\n    return None", "docstring": "Get the first item in the queue that has the given status.\n\nArgs:\nstatus (str): return the first item with this status.\n\nReturns:\n:class:`nyawc.QueueItem`: The first queue item with the given status.", "source": "codesearchnet"}
{"code": "def FileEntryExistsByPathSpec(self, path_spec):\n    \n    location = getattr(path_spec, 'location', None)\n    if location is None or not location.startswith(self.LOCATION_ROOT):\n      return False\n\n    if len(location) == 1:\n      return True\n\n    return self._cpio_archive_file.FileEntryExistsByPath(location[1:])", "docstring": "Determines if a file entry for a path specification exists.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\nbool: True if the file entry exists.", "source": "juraj-google-style"}
{"code": "def db_for_write(self, model, **hints):\n    try:\n        if (model.sf_access == READ_ONLY):\n            raise WriteNotSupportedError(('%r is a read-only model.' % model))\n    except AttributeError:\n        pass\n    return None", "docstring": "Prevent write actions on read-only tables.\n\nRaises:\nWriteNotSupportedError: If models.sf_access is ``read_only``.", "source": "codesearchnet"}
{"code": "def __init__(\n            self,\n            epsilon,\n            alphabet=None):\n        \n        self.bookeeping = None\n        self.groups = None\n        self.epsilon = epsilon\n        if alphabet is None:\n            alphabet = createalphabet()\n        self.alphabet = alphabet", "docstring": "Initialization Function\nArgs:\nepsilon (str): The epsilon symbol\nalphabet (list): The DFA Alphabet\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def run_init_ops(self, sess, tags, import_scope=None):\n    meta_graph_def = self.get_meta_graph_def_from_tags(tags)\n    with sess.graph.as_default():\n        asset_tensors_dictionary = get_asset_tensors(self._export_dir, meta_graph_def, import_scope=import_scope)\n        init_op = get_init_op(meta_graph_def, import_scope)\n        if init_op is not None:\n            sess.run(fetches=[init_op], feed_dict=asset_tensors_dictionary)", "docstring": "Run initialization ops defined in the `MetaGraphDef`.\n\nArgs:\nsess: tf.compat.v1.Session to restore variable values.\ntags: a set of string tags identifying a MetaGraphDef.\nimport_scope: Optional `string` -- if specified, prepend this string\nfollowed by '/' to all loaded tensor names. This scope is applied to\ntensor instances loaded into the passed session, but it is *not* written\nthrough to the static `MetaGraphDef` protocol buffer that is returned.", "source": "github-repos"}
{"code": "def update_reminder(self, reminder):\n\t\t\n\t\turi = '/'.join([self.api_uri,\n\t\t\t\t\t\tself.reminders_suffix,\n\t\t\t\t\t\t])\n\t\t\n\t\tpayload = None\n\t\tif  type(reminder) is not StreakReminder:\n\t\t\treturn requests.codes.bad_request, None\n\n\t\tpayload = reminder.to_dict(rw = True)\n\t\n\t\ttry:\n\t\t\turi = '/'.join([uri, reminder.attributes['key']])\n\t\texcept KeyError:\n\t\t\treturn requests.codes.bad_request, None\n\t\n\t\tcode, data = self._req('post', uri , json.dumps(payload))\n\t\t\n\t\treturn code, data", "docstring": "Creates a reminder with the provided attributes.\nArgs:\nreminder\t\tupdated reminder of StreakReminder type\nreturn\t\t\t(status code, reminder dict)", "source": "juraj-google-style"}
{"code": "def consult_filters(self, url_info: URLInfo, url_record: URLRecord, is_redirect: bool=False) \\\n            -> Tuple[bool, str, dict]:\n        \n        if not self._url_filter:\n            return True, 'nofilters', None\n\n        test_info = self._url_filter.test_info(url_info, url_record)\n\n        verdict = test_info['verdict']\n\n        if verdict:\n            reason = 'filters'\n        elif is_redirect and self.is_only_span_hosts_failed(test_info):\n            verdict = True\n            reason = 'redirect'\n        else:\n            reason = 'filters'\n\n        return verdict, reason, test_info", "docstring": "Consult the URL filter.\n\nArgs:\nurl_record: The URL record.\nis_redirect: Whether the request is a redirect and it is\ndesired that it spans hosts.\n\nReturns\ntuple:\n\n1. bool: The verdict\n2. str: A short reason string: nofilters, filters, redirect\n3. dict: The result from :func:`DemuxURLFilter.test_info`", "source": "juraj-google-style"}
{"code": "def get_instance(cls, device):\n        \n\n        if cls._nuis.get(device) is None:\n            cls._nuis[device] = AndroidUiautomationPoco(device)\n        return cls._nuis[device]", "docstring": "This is only a slot to store and get already initialized poco instance rather than initializing again. You can\nsimply pass the ``current device instance`` provided by ``airtest`` to get the AndroidUiautomationPoco instance.\nIf no such AndroidUiautomationPoco instance, a new instance will be created and stored.\n\nArgs:\ndevice (:py:obj:`airtest.core.device.Device`): more details refer to ``airtest doc``\n\nReturns:\npoco instance", "source": "juraj-google-style"}
{"code": "def remove_alias(alias_names):\n    alias_table = get_alias_table()\n    for alias_name in alias_names:\n        if (alias_name not in alias_table.sections()):\n            raise CLIError(ALIAS_NOT_FOUND_ERROR.format(alias_name))\n        alias_table.remove_section(alias_name)\n    _commit_change(alias_table)", "docstring": "Remove an alias.\n\nArgs:\nalias_name: The name of the alias to be removed.", "source": "codesearchnet"}
{"code": "def __init__(self, value, indices=None, name=None):\n    del name\n    super(CSRSparseMatrix, self).__init__()\n    if isinstance(value, sparse_tensor.SparseTensor):\n        if indices is not None:\n            raise ValueError('indices must be None if value is a SparseTensor.')\n        self._dtype = value.dtype\n        self._csr_matrix = sm_ops.sparse_tensor_to_csr_sparse_matrix(indices=value.indices, values=value.values, dense_shape=value.dense_shape)\n    else:\n        value = ops.convert_to_tensor(value)\n        self._dtype = value.dtype\n        if indices is not None:\n            indices = ops.convert_to_tensor(indices, dtype=dtypes.int64)\n        else:\n            indices = array_ops.stop_gradient(array_ops.where(value))\n        self._csr_matrix = sm_ops.dense_to_csr_sparse_matrix(value, indices)\n    if self._eager_mode:\n        self._csr_matrix._handle_data = _make_handle_data(value)", "docstring": "Construct a CSRSparseMatrix from a dense matrix or SparseTensor.\n\nArgs:\nvalue: A dense `2D` or `3D` Tensor or `SparseTensor`.\nindices: The nonzero indices of `value`\n(if `value` is not a `SparseTensor`).\nname: Optional op name.\n\nRaises:\nValueError: if `value` is a `SparseTensor` and `indices` is not `None`.", "source": "github-repos"}
{"code": "def _build_instruction_ds(instructions):\n  \n  \n  tensor_inputs = {\n      \n      k: np.array(vals, dtype=np.int64) if k == \"mask_offset\" else list(vals)\n      for k, vals in utils.zip_dict(*instructions)\n  }\n  return tf.data.Dataset.from_tensor_slices(tensor_inputs)", "docstring": "Create a dataset containing individual instruction for each shard.\n\nEach instruction is a dict:\n```\n{\n\"filepath\": tf.Tensor(shape=(), dtype=tf.string),\n\"mask_offset\": tf.Tensor(shape=(), dtype=tf.int64),\n\"mask\": tf.Tensor(shape=(100,), dtype=tf.bool),\n}\n```\n\nArgs:\ninstructions: `list[dict]`, the list of instruction dict\n\nReturns:\ninstruction_ds: The dataset containing the instruction. The dataset size is\nthe number of shard.", "source": "juraj-google-style"}
{"code": "def sequence_to_onehot(sequence: str, mapping: Mapping[str, int], map_unknown_to_x: bool=False) -> np.ndarray:\n    num_entries = max(mapping.values()) + 1\n    if sorted(set(mapping.values())) != list(range(num_entries)):\n        raise ValueError('The mapping must have values from 0 to num_unique_aas-1 without any gaps. Got: %s' % sorted(mapping.values()))\n    one_hot_arr = np.zeros((len(sequence), num_entries), dtype=np.int32)\n    for aa_index, aa_type in enumerate(sequence):\n        if map_unknown_to_x:\n            if aa_type.isalpha() and aa_type.isupper():\n                aa_id = mapping.get(aa_type, mapping['X'])\n            else:\n                raise ValueError(f'Invalid character in the sequence: {aa_type}')\n        else:\n            aa_id = mapping[aa_type]\n        one_hot_arr[aa_index, aa_id] = 1\n    return one_hot_arr", "docstring": "Maps the given sequence into a one-hot encoded matrix.\n\nArgs:\nsequence: An amino acid sequence.\nmapping: A dictionary mapping amino acids to integers.\nmap_unknown_to_x: If True, any amino acid that is not in the mapping will be\nmapped to the unknown amino acid 'X'. If the mapping doesn't contain amino acid 'X', an error will be thrown.\nIf False, any amino acid not in the mapping will throw an error.\n\nReturns:\nA numpy array of shape (seq_len, num_unique_aas) with one-hot encoding of the sequence.\n\nRaises:\nValueError: If the mapping doesn't contain values from 0 to\nnum_unique_aas - 1 without any gaps.", "source": "github-repos"}
{"code": "def index(self, name=None):\n    try:\n        return self.header.index(name)\n    except ValueError:\n        raise TableError(('Unknown index name %s.' % name))", "docstring": "Returns index number of supplied column name.\n\nArgs:\nname: string of column name.\n\nRaises:\nTableError: If name not found.\n\nReturns:\nIndex of the specified header entry.", "source": "codesearchnet"}
{"code": "def _ConsumeInteger(tokenizer, is_signed=False, is_long=False):\n    try:\n        result = ParseInteger(tokenizer.token, is_signed=is_signed, is_long=is_long)\n    except ValueError as e:\n        raise tokenizer.ParseError(str(e))\n    tokenizer.NextToken()\n    return result", "docstring": "Consumes an integer number from tokenizer.\n\nArgs:\ntokenizer: A tokenizer used to parse the number.\nis_signed: True if a signed integer must be parsed.\nis_long: True if a long integer must be parsed.\n\nReturns:\nThe integer parsed.\n\nRaises:\nParseError: If an integer with given characteristics couldn't be consumed.", "source": "codesearchnet"}
{"code": "def _check_instance_type(type_constraint, instance, var_name=None, verbose=False):\n    hint_type = \"argument: '%s'\" % var_name if var_name is not None else 'return type'\n    try:\n        check_constraint(type_constraint, instance)\n    except SimpleTypeHintError:\n        if verbose:\n            verbose_instance = '%s, ' % instance\n        else:\n            verbose_instance = ''\n        raise TypeCheckError('Type-hint for %s violated. Expected an instance of %s, instead found %san instance of %s.' % (hint_type, type_constraint, verbose_instance, type(instance)))\n    except CompositeTypeHintError as e:\n        raise TypeCheckError('Type-hint for %s violated: %s' % (hint_type, e))", "docstring": "A helper function to report type-hint constraint violations.\n\nArgs:\ntype_constraint: An instance of a 'TypeConstraint' or a built-in Python\ntype.\ninstance: The candidate object which will be checked by to satisfy\n'type_constraint'.\nvar_name: If 'instance' is an argument, then the actual name for the\nparameter in the original function definition.\n\nRaises:\nTypeCheckError: If 'instance' fails to meet the type-constraint of\n'type_constraint'.", "source": "github-repos"}
{"code": "def repeat(n: int, body: Callable[..., Union[core_types.TensorLike, Iterable]], inputs: Optional[List[core_types.TensorLike]]=None, infeed_queue: Optional[tpu_feed.InfeedQueue]=None, name: Any=None) -> List[core_types.TensorLike]:\n\n    def _convert_to_list(xs):\n        if not isinstance(xs, (list, tuple)):\n            return [xs]\n        else:\n            return list(xs)\n\n    def cond(i, *args):\n        del args\n        return i < n\n\n    def body_wrapper(i, *args):\n        return [i + 1] + _convert_to_list(body(*args))\n    inputs = [0] if inputs is None else [0] + _convert_to_list(inputs)\n    outputs = while_loop(cond, body_wrapper, inputs=inputs, infeed_queue=infeed_queue, name=name)\n    outputs = _convert_to_list(outputs)\n    if len(outputs) == 1:\n        return outputs[0].op\n    else:\n        return outputs[1:]", "docstring": "Builds a training loop that executes a fixed number of iterations.\n\nThe set of loop-carried tensors correspond to `inputs`.\n`body` must be a function that takes and returns the values of the\nloop-carried tensors.\n\nArgs:\nn: the number of loop iterations\nbody: a Python function that builds the loop body.\ninputs: a list of initial values passed into the training loop or None\n(equivalent to an empty list).\ninfeed_queue: if not None, the infeed queue from which to append a tuple of\narguments as inputs to condition.\nname: (Deprecated) Does nothing.\n\nReturns:\nThe final values of the loop-carried tensors.\nRaises:\nValueError: if there is a type error.", "source": "github-repos"}
{"code": "def wire(self, name, receive=None, send=None, respond=None, **kwargs):\n    if (hasattr(self, name) and (name != 'main')):\n        raise AttributeError(\"cannot use '%s' as name for wire, attribute already exists\")\n    if send:\n        self.log_debug((\"Wiring '%s'.send: %s\" % (name, send)))\n    if respond:\n        self.log_debug((\"Wiring '%s'.respond: %s\" % (name, respond)))\n    if receive:\n        self.log_debug((\"Wiring '%s'.receive: %s\" % (name, receive)))\n    wire = Wire(receive=receive, send=send, respond=respond)\n    wire.name = ('%s.%s' % (self.name, name))\n    wire.meta = kwargs.get('meta', {})\n    wire.on('receive', self.on_receive)\n    setattr(self, name, wire)\n    if (not self.main):\n        self.main = wire\n    return wire", "docstring": "Wires the link to a connection. Can be called multiple\ntimes to set up wires to different connections\n\nAfter creation wire will be accessible on the link via its name\nas an attribute.\n\nYou can undo this action with the cut() method\n\nArguments:\n\n- name (str): unique name for the wire\n\nKeyword Arguments:\n\n- receive (Connection): wire receiver to this connection\n- respond (Connection): wire responder to this connection\n- send (Connection): wire sender to this connection\n- meta (dict): attach these meta variables to any message\nsent from this wire\n\nReturns:\n\n- Wire: the created wire instance", "source": "codesearchnet"}
{"code": "def from_voigt(cls, voigt_input):\n        \n        voigt_input = np.array(voigt_input)\n        rank = sum(voigt_input.shape) \n        t = cls(np.zeros([3] * rank))\n        if voigt_input.shape != t._vscale.shape:\n            raise ValueError(\"Invalid shape for voigt matrix\")\n        voigt_input = voigt_input / t._vscale\n        this_voigt_map = t.get_voigt_dict(rank)\n        for ind in this_voigt_map:\n            t[ind] = voigt_input[this_voigt_map[ind]]\n        return cls(t)", "docstring": "Constructor based on the voigt notation vector or matrix.\n\nArgs:\nvoigt_input (array-like): voigt input for a given tensor", "source": "juraj-google-style"}
{"code": "def find_amplitude(chunk):\n    return (abs(int((chunk.max() - chunk.min()))) / config.SAMPLE_RANGE)", "docstring": "Calculate the 0-1 amplitude of an ndarray chunk of audio samples.\n\nSamples in the ndarray chunk are signed int16 values oscillating\nanywhere between -32768 and 32767. Find the amplitude between 0 and 1\nby summing the absolute values of the minimum and maximum, and dividing\nby 32767.\n\nArgs:\nchunk (numpy.ndarray): An array of int16 audio samples\n\nReturns:\nfloat: The amplitude of the sample between 0 and 1.\nNote that this is not a decibel representation of\nthe amplitude.", "source": "codesearchnet"}
{"code": "def relpath(path, start=None):\n    relative = get_instance(path).relpath(path)\n    if start:\n        return os_path_relpath(relative, start=start).replace('\\\\', '/')\n    return relative", "docstring": "Return a relative file path to path either from the\ncurrent directory or from an optional start directory.\n\nFor storage objects, \"path\" and \"start\" are relative to\nstorage root.\n\n\"/\" are not stripped on storage objects path. The ending slash is required\non some storage to signify that target is a directory.\n\nEquivalent to \"os.path.relpath\".\n\nArgs:\npath (path-like object): Path or URL.\nstart (path-like object): Relative from this optional directory.\nDefault to \"os.curdir\" for local files.\n\nReturns:\nstr: Relative path.", "source": "codesearchnet"}
{"code": "def create_keras_history(tensors):\n    _, created_layers = _create_keras_history_helper(tensors, set(), [])\n    return created_layers", "docstring": "Wraps TensorFlow Operations for compatibility with the Functional API.\n\nThis method checks to see if a Tensor in `tensors` is missing Keras metadata\nand has its origin in a Keras `Input` Layer. If so, this method will replace\nthe raw TensorFlow Operations that created this tensor with\n`TensorFlowOpLayer` instances that create identical operations.\n\nAny Tensors not originating from a Keras `Input` Layer will be treated as\nconstants when constructing `TensorFlowOpLayer` instances.\n\nArgs:\ntensors: A structure of Tensors, some of which come from raw TensorFlow\noperations and need to have Keras metadata assigned to them.\n\nReturns:\ncreated_layers: List. The `TensorFlowOpLayer` instances created to wrap\nthe raw Tensorflow operations.", "source": "github-repos"}
{"code": "def new(namespace, name, wdl, synopsis, documentation=None, api_url=fapi.PROD_API_ROOT):\n    r = fapi.update_workflow(namespace, name, synopsis, wdl, documentation, api_url)\n    fapi._check_response_code(r, 201)\n    d = r.json()\n    return Method(namespace, name, d['snapshotId'])", "docstring": "Create new FireCloud method.\n\nIf the namespace + name already exists, a new snapshot is created.\n\nArgs:\nnamespace (str): Method namespace for this method\nname (str): Method name\nwdl (file): WDL description\nsynopsis (str): Short description of task\ndocumentation (file): Extra documentation for method", "source": "codesearchnet"}
{"code": "def port_create_gre(br, port, id, remote):\n    if (not (0 <= id < (2 ** 32))):\n        return False\n    elif (not __salt__['dig.check_ip'](remote)):\n        return False\n    elif (not bridge_exists(br)):\n        return False\n    elif (port in port_list(br)):\n        cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)\n        result = __salt__['cmd.run_all'](cmd)\n        return _retcode_to_bool(result['retcode'])\n    else:\n        cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} options:key={3}'.format(br, port, remote, id)\n        result = __salt__['cmd.run_all'](cmd)\n        return _retcode_to_bool(result['retcode'])", "docstring": "Generic Routing Encapsulation - creates GRE tunnel between endpoints.\n\nArgs:\nbr: A string - bridge name.\nport: A string - port name.\nid: An integer - unsigned 32-bit number, tunnel's key.\nremote: A string - remote endpoint's IP address.\n\nReturns:\nTrue on success, else False.\n\n.. versionadded:: 2016.3.0\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10", "source": "codesearchnet"}
{"code": "def do_youtube_dl(worker, site, page):\n    with tempfile.TemporaryDirectory(prefix='brzl-ydl-') as tempdir:\n        ydl = _build_youtube_dl(worker, tempdir, site)\n        ie_result = _try_youtube_dl(worker, ydl, site, page)\n        outlinks = set()\n        if (ie_result and (ie_result.get('extractor') == 'youtube:playlist')):\n            outlinks = {('https:\n        return (ydl.fetch_spy.fetches, outlinks)", "docstring": "Runs youtube-dl configured for `worker` and `site` to download videos from\n`page`.\n\nArgs:\nworker (brozzler.BrozzlerWorker): the calling brozzler worker\nsite (brozzler.Site): the site we are brozzling\npage (brozzler.Page): the page we are brozzling\n\nReturns:\ntuple with two entries:\n`list` of `dict`: with info about urls fetched:\n[{\n'url': ...,\n'method': ...,\n'response_code': ...,\n'response_headers': ...,\n}, ...]\n`list` of `str`: outlink urls", "source": "codesearchnet"}
{"code": "def get_details(self, ids):\n        \n        if isinstance(ids, list):\n            if len(ids) > 5:\n                ids = ids[:5]\n            id_param = ';'.join(ids) + '/'\n        else:\n            ids = str(ids)\n            id_param = ids + '/'\n\n        header, content = self._http_request(id_param)\n        resp = json.loads(content)\n        if not self._is_http_response_ok(header):\n            error = resp.get('error_message', 'Unknown Error')\n            raise HttpException(header.status, header.reason, error) \n        return resp", "docstring": "Locu Venue Details API Call Wrapper\n\nArgs:\nlist of ids : ids of a particular venues to get insights about. Can process up to 5 ids", "source": "juraj-google-style"}
{"code": "def sawtooth(duration: int, amp: complex, period: float = None,\n             phase: float = 0, name: str = None) -> SamplePulse:\n    \n    if period is None:\n        period = duration\n\n    return _sampled_sawtooth_pulse(duration, amp, period, phase=phase, name=name)", "docstring": "Generates sawtooth wave `SamplePulse`.\n\nArgs:\nduration: Duration of pulse. Must be greater than zero.\namp: Pulse amplitude. Wave range is [-amp, amp].\nperiod: Pulse period, units of dt. If `None` defaults to single cycle.\nphase: Pulse phase.\nname: Name of pulse.", "source": "juraj-google-style"}
{"code": "def delete_recursively(dirname):\n    delete_recursively_v2(dirname)", "docstring": "Deletes everything under dirname recursively.\n\nArgs:\ndirname: string, a path to a directory\n\nRaises:\nerrors.OpError: If the operation fails.", "source": "github-repos"}
{"code": "def oauth_access(\n        self, *, client_id: str, client_secret: str, code: str, **kwargs\n    ) -> SlackResponse:\n        \n        kwargs.update(\n            {\"client_id\": client_id, \"client_secret\": client_secret, \"code\": code}\n        )\n        return self.api_call(\"oauth.access\", data=kwargs)", "docstring": "Exchanges a temporary OAuth verifier code for an access token.\n\nArgs:\nclient_id (str): Issued when you created your application. e.g. '4b39e9-752c4'\nclient_secret (str): Issued when you created your application. e.g. '33fea0113f5b1'\ncode (str): The code param returned via the OAuth callback. e.g. 'ccdaa72ad'", "source": "juraj-google-style"}
{"code": "def libdmtx_function(fname, restype, *args):\n    prototype = CFUNCTYPE(restype, *args)\n    return prototype((fname, load_libdmtx()))", "docstring": "Returns a foreign function exported by `libdmtx`.\n\nArgs:\nfname (:obj:`str`): Name of the exported function as string.\nrestype (:obj:): Return type - one of the `ctypes` primitive C data\ntypes.\n*args: Arguments - a sequence of `ctypes` primitive C data types.\n\nReturns:\ncddl.CFunctionType: A wrapper around the function.", "source": "codesearchnet"}
{"code": "def _command_template(self, switches, objectInput=None):\n    command = ['java', '-jar', self.file_jar, '-eUTF-8']\n    if self.memory_allocation:\n        command.append('-Xmx{}'.format(self.memory_allocation))\n    command.extend(switches)\n    if (not objectInput):\n        objectInput = subprocess.PIPE\n    log.debug('Subprocess command: {}'.format(', '.join(command)))\n    if six.PY2:\n        with open(os.devnull, 'w') as devnull:\n            out = subprocess.Popen(command, stdin=objectInput, stdout=subprocess.PIPE, stderr=devnull)\n    elif six.PY3:\n        out = subprocess.Popen(command, stdin=objectInput, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n    (stdoutdata, _) = out.communicate()\n    return stdoutdata.decode('utf-8').strip()", "docstring": "Template for Tika app commands\n\nArgs:\nswitches (list): list of switches to Tika app Jar\nobjectInput (object): file object/standard input to analyze\n\nReturn:\nStandard output data (unicode Python 2, str Python 3)", "source": "codesearchnet"}
{"code": "def cumsum(x, axis=0):\n    return math_ops.cumsum(x, axis=axis)", "docstring": "Cumulative sum of the values in a tensor, alongside the specified axis.\n\nArgs:\nx: A tensor or variable.\naxis: An integer, the axis to compute the sum.\n\nReturns:\nA tensor of the cumulative sum of values of `x` along `axis`.", "source": "github-repos"}
{"code": "def get_value_by_xy(self, x, y):\n    if ((x < self.xMin) or (x > self.xMax) or (y < self.yMin) or (y > self.yMax)):\n        return None\n    else:\n        row = (self.nRows - int(numpy.ceil(((y - self.yMin) / self.dx))))\n        col = int(numpy.floor(((x - self.xMin) / self.dx)))\n        value = self.data[row][col]\n        if (value == self.noDataValue):\n            return None\n        else:\n            return value", "docstring": "Get raster value by xy coordinates.\n\nArgs:\nx: X Coordinate.\ny: Y Coordinate.\n\nReturns:\nraster value, None if the input are invalid.", "source": "codesearchnet"}
{"code": "def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, **kwargs):\n    generated_texts = self.batch_decode(generated_outputs, skip_special_tokens=skip_special_tokens, **kwargs)\n    return [self.post_process_generation(text, cleanup_and_extract=False) for text in generated_texts]", "docstring": "Post-process the output of the model to decode the text.\n\nArgs:\ngenerated_outputs (`torch.Tensor` or `np.ndarray`):\nThe output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`\nor `(sequence_length,)`.\nskip_special_tokens (`bool`, *optional*, defaults to `True`):\nWhether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.\n**kwargs:\nAdditional arguments to be passed to the tokenizer's `batch_decode method`.\n\nReturns:\n`List[str]`: The decoded text.", "source": "github-repos"}
{"code": "def db_dp004(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `db_dp004`'.format(value))\n    self._db_dp004 = value", "docstring": "Corresponds to IDD Field `db_dp004`\nmean coincident dry-bulb temperature to\nDew-point temperature corresponding to 0.4% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `db_dp004`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "async def register_agent(self, short_name):\n        \n\n        await self.send_command(OPERATIONS.CMD_SET_AGENT, {'name': short_name},\n                                MESSAGES.SetAgentResponse)", "docstring": "Register to act as the RPC agent for this service.\n\nAfter this call succeeds, all requests to send RPCs to this service\nwill be routed through this agent.\n\nArgs:\nshort_name (str): A unique short name for this service that functions\nas an id", "source": "juraj-google-style"}
{"code": "def __init__(self, submit_timestamp, metric_id, value, metric=None, label=None):\n    self.submit_timestamp = submit_timestamp\n    self.metric_id = metric_id\n    self.label = label or metric.key.metric.namespace + '_' + parse_step(metric.key.step) + '_' + metric.key.metric.name\n    self.value = value", "docstring": "Initializes :class:`Metric`\n\nArgs:\nmetric (object): object of metric result\nsubmit_timestamp (float): date-time of saving metric to database\nmetric_id (uuid): unique id to identify test run\nvalue: value of metric\nlabel: custom metric name to be saved in database", "source": "github-repos"}
{"code": "def extract(self, log, basis, name, function=None):\n    intervals = {}\n    previous_ix = (- 1)\n    for (i, z) in enumerate(basis):\n        ix = self.read_at(z, index=True)\n        if (ix is None):\n            continue\n        if (ix == previous_ix):\n            intervals[ix].append(log[i])\n        else:\n            intervals[ix] = [log[i]]\n        previous_ix = ix\n    for (ix, data) in intervals.items():\n        f = (function or utils.null)\n        d = f(np.array(data))\n        self[ix].data[name] = d\n    return None", "docstring": "'Extract' a log into the components of a striplog.\n\nArgs:\nlog (array_like). A log or other 1D data.\nbasis (array_like). The depths or elevations of the log samples.\nname (str). The name of the attribute to store in the components.\nfunction (function). A function that takes an array as the only\ninput, and returns whatever you want to store in the 'name'\nattribute of the primary component.\nReturns:\nNone. The function works on the striplog in place.", "source": "codesearchnet"}
{"code": "def request(self, batch: Sequence[ExampleT], model: ModelT, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionT]:\n    raise NotImplementedError(type(self))", "docstring": "Makes a request to a remote inference service and returns the response.\nShould raise an exception of some kind if there is an error to enable the\nretry and client-side throttling logic to work. Returns an iterable of the\ndesired prediction type. This method should return the values directly, as\nhandling return values as a generator can prevent the retry logic from\nfunctioning correctly.\n\nArgs:\nbatch: A sequence of examples or features.\nmodel: The model used to make inferences.\ninference_args: Extra arguments for models whose inference call requires\nextra parameters.\n\nReturns:\nAn Iterable of Predictions.", "source": "github-repos"}
{"code": "def _narrow_unichr(code_point):\n    try:\n        if (len(code_point.char) > 1):\n            return code_point.char\n    except AttributeError:\n        pass\n    return six.unichr(code_point)", "docstring": "Retrieves the unicode character representing any given code point, in a way that won't break on narrow builds.\n\nThis is necessary because the built-in unichr function will fail for ordinals above 0xFFFF on narrow builds (UCS2);\nordinals above 0xFFFF would require recalculating and combining surrogate pairs. This avoids that by retrieving the\nunicode character that was initially read.\n\nArgs:\ncode_point (int|CodePoint): An int or a subclass of int that contains the unicode character representing its\ncode point in an attribute named 'char'.", "source": "codesearchnet"}
{"code": "def __clone_function(f, name=None):\n    if (not isinstance(f, types.FunctionType)):\n        raise SimTypeError('Given parameter is not a function.')\n    if (name is None):\n        name = f.__name__\n    newglobals = f.__globals__.copy()\n    globals_used = [x for x in f.__globals__ if (x in f.__code__.co_names)]\n    for x in globals_used:\n        gv = f.__globals__[x]\n        if isinstance(gv, types.FunctionType):\n            newglobals[x] = __clone_function(gv)\n        elif isinstance(gv, types.ModuleType):\n            newglobals[x] = gv\n        else:\n            newglobals[x] = copy.deepcopy(gv)\n    newfunc = types.FunctionType(f.__code__, newglobals, name, f.__defaults__, f.__closure__)\n    return newfunc", "docstring": "Make a new version of a function that has its own independent copy\nof any globals that it uses directly, and has its own name.\nAll other attributes are assigned from the original function.\n\nArgs:\nf: the function to clone\nname (str):  the name for the new function (if None, keep the same name)\n\nReturns:\nA copy of the function f, having its own copy of any globals used\n\nRaises:\nSimValueError", "source": "codesearchnet"}
{"code": "def _check_cores_output_sizes(self):\n    for core_sizes in zip(*tuple(_get_flat_core_sizes(self._cores))):\n        first_core_list = core_sizes[0][1:]\n        for (i, core_list) in enumerate(core_sizes[1:]):\n            if (core_list[1:] != first_core_list):\n                raise ValueError(('The outputs of the provided cores are not able to be concatenated along the first feature dimension. Core 0 has shape %s, whereas Core %d has shape %s - these must only differ in the first dimension' % (core_sizes[0], (i + 1), core_list)))", "docstring": "Checks the output_sizes of the cores of the DeepRNN module.\n\nRaises:\nValueError: if the outputs of the cores cannot be concatenated along their\nfirst dimension.", "source": "codesearchnet"}
{"code": "def _post_process_apply(self, result_data, axis, try_scale=True):\n    if try_scale:\n        try:\n            internal_index = self.compute_index(0, result_data, True)\n        except IndexError:\n            internal_index = self.compute_index(0, result_data, False)\n        try:\n            internal_columns = self.compute_index(1, result_data, True)\n        except IndexError:\n            internal_columns = self.compute_index(1, result_data, False)\n    else:\n        internal_index = self.compute_index(0, result_data, False)\n        internal_columns = self.compute_index(1, result_data, False)\n    if (not axis):\n        index = internal_index\n        if (len(internal_columns) != len(self.columns)):\n            columns = internal_columns\n        else:\n            columns = self.columns\n    else:\n        columns = internal_columns\n        if (len(internal_index) != len(self.index)):\n            index = internal_index\n        else:\n            index = self.index\n    return self.__constructor__(result_data, index, columns)", "docstring": "Recompute the index after applying function.\n\nArgs:\nresult_data: a BaseFrameManager object.\naxis: Target axis along which function was applied.\n\nReturns:\nA new PandasQueryCompiler.", "source": "codesearchnet"}
{"code": "def tensor_name(self):\n    return _get_tensor_name(self.node_name, self.output_slot)", "docstring": "Name of the tensor watched by the debug op.\n\nReturns:\n(`str`) `Tensor` name, in the form of `node_name`:`output_slot`", "source": "github-repos"}
{"code": "def find_function(self, context, funname):\n    if (funname in self.builtins):\n        return self.builtins[funname]\n    func = None\n    if isinstance(context, dict):\n        if (funname in context):\n            func = context[funname]\n            if isinstance(func, str):\n                func = self._deferred_add(func)\n                context[funname] = func\n    elif hasattr(context, funname):\n        func = getattr(context, funname)\n    if (func is None):\n        raise NotFoundError('Function not found', function=funname)\n    return func", "docstring": "Find a function in the given context by name.\n\nThis function will first search the list of builtins and if the\ndesired function is not a builtin, it will continue to search\nthe given context.\n\nArgs:\ncontext (object): A dict or class that is a typedargs context\nfunname (str): The name of the function to find\n\nReturns:\ncallable: The found function.", "source": "codesearchnet"}
{"code": "def update_conversation(self, conversation):\n        \n        \n        \n        \n        \n\n        new_state = conversation.self_conversation_state\n        old_state = self._conversation.self_conversation_state\n        self._conversation = conversation\n\n        \n        if not new_state.delivery_medium_option:\n            new_state.delivery_medium_option.extend(\n                old_state.delivery_medium_option\n            )\n\n        \n        old_timestamp = old_state.self_read_state.latest_read_timestamp\n        new_timestamp = new_state.self_read_state.latest_read_timestamp\n        if new_timestamp == 0:\n            new_state.self_read_state.latest_read_timestamp = old_timestamp\n\n        \n        for new_entry in conversation.read_state:\n            tstamp = parsers.from_timestamp(new_entry.latest_read_timestamp)\n            if tstamp == 0:\n                continue\n            uid = parsers.from_participantid(new_entry.participant_id)\n            if uid not in self._watermarks or self._watermarks[uid] < tstamp:\n                self._watermarks[uid] = tstamp", "docstring": "Update the internal state of the conversation.\n\nThis method is used by :class:`.ConversationList` to maintain this\ninstance.\n\nArgs:\nconversation: ``Conversation`` message.", "source": "juraj-google-style"}
{"code": "def __init__(self, file_object):\n    \n    if not file_object:\n      raise ValueError('Missing file-like object.')\n\n    \n    self._file_object = file_object\n    \n    \n    \n    tsk_img_type = getattr(\n        pytsk3, 'TSK_IMG_TYPE_EXTERNAL', pytsk3.TSK_IMG_TYPE_RAW)\n    \n    \n    pytsk3.Img_Info.__init__(self, url='', type=tsk_img_type)", "docstring": "Initializes an image object.\n\nArgs:\nfile_object (FileIO): file-like object.\n\nRaises:\nValueError: if the file-like object is invalid.", "source": "juraj-google-style"}
{"code": "def decode_metar(self, metar):\n    try:\n        from metar import Metar\n    except:\n        return 'Unable to parse metars. Please install parser from https:\n    m = Metar.Metar(metar)\n    return m.string()", "docstring": "Simple method that decodes a given metar string.\n\nArgs:\nmetar (str): The metar data\n\nReturns:\nThe metar data in readable format\n\nExample::\n\nfrom pyflightdata import FlightData\nf=FlightData()\nf.decode_metar('WSSS 181030Z 04009KT 010V080 9999 FEW018TCU BKN300 29/22 Q1007 NOSIG')", "source": "codesearchnet"}
{"code": "def GetLVMLogicalVolumeByPathSpec(self, path_spec):\n    \n    volume_index = lvm.LVMPathSpecGetVolumeIndex(path_spec)\n    if volume_index is None:\n      return None\n    return self._vslvm_volume_group.get_logical_volume(volume_index)", "docstring": "Retrieves a LVM logical volume for a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\npyvslvm.logical_volume: a LVM logical volume or None if not available.", "source": "juraj-google-style"}
{"code": "def __init__(self, file_system, mount_point):\n    \n    if not file_system or not mount_point:\n      raise ValueError('Missing file system or mount point value.')\n\n    if path_spec_factory.Factory.IsSystemLevelTypeIndicator(\n        file_system.type_indicator):\n      if not hasattr(mount_point, 'location'):\n        raise errors.PathSpecError(\n            'Mount point path specification missing location.')\n\n    super(FileSystemSearcher, self).__init__()\n    self._file_system = file_system\n    self._mount_point = mount_point", "docstring": "Initializes a file system searcher.\n\nArgs:\nfile_system (FileSystem): file system.\nmount_point (PathSpec): mount point path specification that refers\nto the base location of the file system.\n\nRaises:\nPathSpecError: if the mount point path specification is incorrect.\nValueError: when file system or mount point is not set.", "source": "juraj-google-style"}
{"code": "def _dump_to_pages(dump):\n    pos = 0\n    ret = []\n    start_tag = u'<page>\\n'\n    end_tag = u'</page>\\n'\n    while True:\n        start_pos = dump.find(start_tag, pos)\n        if (start_pos == (- 1)):\n            break\n        start_pos += len(start_tag)\n        end_pos = dump.find(end_tag, start_pos)\n        if (end_pos == (- 1)):\n            break\n        ret.append(dump[start_pos:end_pos])\n        pos = (end_pos + len(end_tag))\n    return ret", "docstring": "Extract pages from an xml dump.\n\nArgs:\ndump: a unicode string\nReturns:\na list of unicode strings", "source": "codesearchnet"}
{"code": "def trace2(A, B):\n    r\n    A = asarray(A, float)\n    B = asarray(B, float)\n\n    layout_error = \"Wrong matrix layout.\"\n\n    if not (len(A.shape) == 2 and len(B.shape) == 2):\n        raise ValueError(layout_error)\n\n    if not (A.shape[1] == B.shape[0] and A.shape[0] == B.shape[1]):\n        raise ValueError(layout_error)\n\n    return _sum(A.T * B)", "docstring": "r\"\"\"Trace of :math:`\\mathrm A \\mathrm B^\\intercal`.\n\nArgs:\nA (array_like): Left-hand side.\nB (array_like): Right-hand side.\n\nReturns:\nfloat: Trace of :math:`\\mathrm A \\mathrm B^\\intercal`.", "source": "juraj-google-style"}
{"code": "def thread_safe_client(client, lock=None):\n    \n    if lock is None:\n        lock = threading.Lock()\n    return _ThreadSafeProxy(client, lock)", "docstring": "Create a thread-safe proxy which locks every method call\nfor the given client.\n\nArgs:\nclient: the client object to be guarded.\nlock: the lock object that will be used to lock client's methods.\nIf None, a new lock will be used.\n\nReturns:\nA thread-safe proxy for the given client.", "source": "juraj-google-style"}
{"code": "def as_list(self):\n    if self._dims is None:\n        raise ValueError('as_list() is not defined on an unknown TensorShape.')\n    return list(self._dims)", "docstring": "Returns a list of integers or `None` for each dimension.\n\nReturns:\nA list of integers or `None` for each dimension.\n\nRaises:\nValueError: If `self` is an unknown shape with an unknown rank.", "source": "github-repos"}
{"code": "def _to_df(self, result, handle_annotations=None):\n    annotations = result._data\n    if (handle_annotations == 'first'):\n        annotations = [annotations[0]]\n    face_results = []\n    for (i, annotation) in enumerate(annotations):\n        data_dict = {}\n        for (field, val) in annotation.items():\n            if ('Confidence' in field):\n                data_dict[('face_' + field)] = val\n            elif ('oundingPoly' in field):\n                for (j, vertex) in enumerate(val['vertices']):\n                    for dim in ['x', 'y']:\n                        name = ('%s_vertex%d_%s' % (field, (j + 1), dim))\n                        val = (vertex[dim] if (dim in vertex) else np.nan)\n                        data_dict[name] = val\n            elif (field == 'landmarks'):\n                for lm in val:\n                    name = (('landmark_' + lm['type']) + '_%s')\n                    lm_pos = {(name % k): v for (k, v) in lm['position'].items()}\n                    data_dict.update(lm_pos)\n            else:\n                data_dict[field] = val\n        face_results.append(data_dict)\n    return pd.DataFrame(face_results)", "docstring": "Converts a Google API Face JSON response into a Pandas Dataframe.\n\nArgs:\nresult (ExtractorResult): Result object from which to parse out a\nDataframe.\nhandle_annotations (str): How returned face annotations should be\nhandled in cases where there are multiple faces.\n'first' indicates to only use the first face JSON object, all\nother values will default to including every face.", "source": "codesearchnet"}
{"code": "def expand_docstring(**kwargs):\n  \n  def _fn_wrapped(fn):\n    \n    doc = inspect.cleandoc(fn.__doc__)\n    for k, v in six.iteritems(kwargs):\n      \n      \n      \n      pattern = r'\\$\\{' + str(k) + r'\\}'\n      doc = re.sub(pattern, lambda match: v, doc)  \n    fn.__doc__ = doc\n    return fn\n  return _fn_wrapped", "docstring": "Decorator to programmatically expand the docstring.\n\nArgs:\n**kwargs: Keyword arguments to set. For each key-value pair `k` and `v`,\nthe key is found as `${k}` in the docstring and replaced with `v`.\n\nReturns:\nDecorated function.", "source": "juraj-google-style"}
{"code": "def _match_dbname(self, dbname):\n        \n        for config in self._clusters:\n            if re.match(config['pattern'], dbname):\n                return config\n        raise Exception('No such database %s.' % dbname)", "docstring": "Map a database name to the Cluster that holds the database.\n\nArgs:\ndbname: A database name.\n\nReturns:\nA dict containing the information about the Cluster that holds the\ndatabase.", "source": "juraj-google-style"}
{"code": "def get_arrive_stop(self, **kwargs):\n    params = {'idStop': kwargs.get('stop_number'), 'cultureInfo': util.language_code(kwargs.get('lang'))}\n    result = self.make_request('geo', 'get_arrive_stop', **params)\n    if (not util.check_result(result, 'arrives')):\n        return (False, 'UNKNOWN ERROR')\n    values = util.response_list(result, 'arrives')\n    return (True, [emtype.Arrival(**a) for a in values])", "docstring": "Obtain bus arrival info in target stop.\n\nArgs:\nstop_number (int): Stop number to query.\nlang (str): Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[Arrival]), or message string\nin case of error.", "source": "codesearchnet"}
{"code": "def is_active(self):\n    if ((not self._is_active) and self._is_active_lock.acquire(False)):\n        if self._is_active:\n            self._is_active_lock.release()\n        else:\n\n            def compute_is_active():\n                self._is_active = any(self.generate_run_to_tools())\n                self._is_active_lock.release()\n            new_thread = threading.Thread(target=compute_is_active, name='ProfilePluginIsActiveThread')\n            new_thread.start()\n    return self._is_active", "docstring": "Whether this plugin is active and has any profile data to show.\n\nDetecting profile data is expensive, so this process runs asynchronously\nand the value reported by this method is the cached value and may be stale.\n\nReturns:\nWhether any run has profile data.", "source": "codesearchnet"}
{"code": "def _get_help_for_command_prefix(self, cmd_prefix):\n    lines = []\n    resolved_prefix = self._resolve_prefix(cmd_prefix)\n    if not resolved_prefix:\n        lines.append('Invalid command prefix: \"%s\"' % cmd_prefix)\n        return lines\n    lines.append(resolved_prefix)\n    if resolved_prefix in self._prefix_to_aliases:\n        lines.append(HELP_INDENT + 'Aliases: ' + ', '.join(self._prefix_to_aliases[resolved_prefix]))\n    lines.append('')\n    help_lines = self._prefix_to_help[resolved_prefix].split('\\n')\n    for line in help_lines:\n        lines.append(HELP_INDENT + line)\n    return lines", "docstring": "Compile the help information for a given command prefix.\n\nArgs:\ncmd_prefix: Command prefix, as the prefix itself or one of its aliases.\n\nReturns:\nA list of str as the help information for cmd_prefix. If the cmd_prefix\ndoes not exist, the returned list of str will indicate that.", "source": "github-repos"}
{"code": "def projection_name(self, **kwargs: Dict[(str, Any)]) -> str:\n    return self.projection_name_format.format(**kwargs)", "docstring": "Define the projection name for this projector.\n\nNote:\nThis function is just a basic placeholder and likely should be overridden.\n\nArgs:\nkwargs: Projection information dict combined with additional arguments passed to the\nprojection function.\nReturns:\nProjection name string formatted with the passed options. By default, it returns\n``projection_name_format`` formatted with the arguments to this function.", "source": "codesearchnet"}
{"code": "def from_prediction(features: FeatureDict, result: ModelOutput, b_factors: Optional[np.ndarray]=None, chain_index: Optional[np.ndarray]=None, remark: Optional[str]=None, parents: Optional[Sequence[str]]=None, parents_chain_index: Optional[Sequence[int]]=None) -> Protein:\n    return Protein(aatype=features['aatype'], atom_positions=result['final_atom_positions'], atom_mask=result['final_atom_mask'], residue_index=features['residue_index'] + 1, b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask']), chain_index=chain_index, remark=remark, parents=parents, parents_chain_index=parents_chain_index)", "docstring": "Assembles a protein from a prediction.\n\nArgs:\nfeatures: Dictionary holding model inputs.\nresult: Dictionary holding model outputs.\nb_factors: (Optional) B-factors to use for the protein.\nchain_index: (Optional) Chain indices for multi-chain predictions\nremark: (Optional) Remark about the prediction\nparents: (Optional) List of template names\nReturns:\nA protein instance.", "source": "github-repos"}
{"code": "def values(self, column_major=False):\n        \n        if column_major:\n            return list(map(list, zip(*self._values)))\n        return [row[:] for row in self._values]", "docstring": "Return a nested list with the worksheet values.\n\nArgs:\ncolumn_major (bool): as list of columns (default list of rows)\nReturns:\nlist: list of lists with values", "source": "juraj-google-style"}
{"code": "def plot_carriers(self, temp=300):\n        \n        import matplotlib.pyplot as plt\n        plt.semilogy(self._bz.mu_steps,\n                     abs(self._bz._carrier_conc[temp] / (self._bz.vol * 1e-24)),\n                     linewidth=3.0, color='r')\n        self._plot_bg_limits()\n        self._plot_doping(temp)\n        plt.xlim(-0.5, self._bz.gap + 0.5)\n        plt.ylim(1e14, 1e22)\n        plt.ylabel(\"carrier concentration (cm-3)\", fontsize=30.0)\n        plt.xlabel(\"E-E$_f$ (eV)\", fontsize=30)\n        plt.xticks(fontsize=25)\n        plt.yticks(fontsize=25)\n        return plt", "docstring": "Plot the carrier concentration in function of Fermi level\n\nArgs:\ntemp: the temperature\n\nReturns:\na matplotlib object", "source": "juraj-google-style"}
{"code": "def _getlatest_ami_id(context):\n  \n  try:\n    response = context.aws_client(\"ec2\").describe_images(\n        Filters=[\n            {\"Name\": \"is-public\", \"Values\": [\"false\"]},\n            {\"Name\": \"name\", \"Values\": [context.service_name + EFConfig.AMI_SUFFIX + \"*\"]}\n        ])\n  except:\n    return None\n  if len(response[\"Images\"]) > 0:\n    return sorted(response[\"Images\"], key=itemgetter('CreationDate'), reverse=True)[0][\"ImageId\"]\n  else:\n    return None", "docstring": "Get the most recent AMI ID for a service\nArgs:\ncontext: a populated EFVersionContext object\nReturns:\nImageId or None if no images exist or on error", "source": "juraj-google-style"}
{"code": "def read_avro(file_path_or_buffer, schema=None, **kwargs):\n    \n    if isinstance(file_path_or_buffer, six.string_types):\n        with open(file_path_or_buffer, 'rb') as f:\n            return __file_to_dataframe(f, schema, **kwargs)\n    else:\n        return __file_to_dataframe(file_path_or_buffer, schema, **kwargs)", "docstring": "Avro file reader.\n\nArgs:\nfile_path_or_buffer: Input file path or file-like object.\nschema: Avro schema.\n**kwargs: Keyword argument to pandas.DataFrame.from_records.\n\nReturns:\nClass of pd.DataFrame.", "source": "juraj-google-style"}
{"code": "def MultiOpenOrdered(self, urns, **kwargs):\n    precondition.AssertIterableType(urns, rdfvalue.RDFURN)\n    urn_filedescs = {}\n    for filedesc in self.MultiOpen(urns, **kwargs):\n        urn_filedescs[filedesc.urn] = filedesc\n    filedescs = []\n    for urn in urns:\n        try:\n            filedescs.append(urn_filedescs[urn])\n        except KeyError:\n            raise IOError(('No associated AFF4 object for `%s`' % urn))\n    return filedescs", "docstring": "Opens many URNs and returns handles in the same order.\n\n`MultiOpen` can return file handles in arbitrary order. This makes it more\nefficient and in most cases the order does not matter. However, there are\ncases where order is important and this function should be used instead.\n\nArgs:\nurns: A list of URNs to open.\n**kwargs: Same keyword arguments as in `MultiOpen`.\n\nReturns:\nA list of file-like objects corresponding to the specified URNs.\n\nRaises:\nIOError: If one of the specified URNs does not correspond to the AFF4\nobject.", "source": "codesearchnet"}
{"code": "def __init__(self, name, aliases=None, description=None, urls=None):\n    \n    super(DataTypeDefinition, self).__init__()\n    self.aliases = aliases or []\n    self.description = description\n    self.name = name\n    self.urls = urls", "docstring": "Initializes a data type definition.\n\nArgs:\nname (str): name.\naliases (Optional[list[str]]): aliases.\ndescription (Optional[str]): description.\nurls (Optional[list[str]]): URLs.", "source": "juraj-google-style"}
{"code": "def cvt2frames(self,\n                   frame_dir,\n                   file_start=0,\n                   filename_tmpl='{:06d}.jpg',\n                   start=0,\n                   max_num=0,\n                   show_progress=True):\n        \n        mkdir_or_exist(frame_dir)\n        if max_num == 0:\n            task_num = self.frame_cnt - start\n        else:\n            task_num = min(self.frame_cnt - start, max_num)\n        if task_num <= 0:\n            raise ValueError('start must be less than total frame number')\n        if start > 0:\n            self._set_real_position(start)\n\n        def write_frame(file_idx):\n            img = self.read()\n            filename = osp.join(frame_dir, filename_tmpl.format(file_idx))\n            cv2.imwrite(filename, img)\n\n        if show_progress:\n            track_progress(write_frame, range(file_start,\n                                              file_start + task_num))\n        else:\n            for i in range(task_num):\n                img = self.read()\n                if img is None:\n                    break\n                filename = osp.join(frame_dir,\n                                    filename_tmpl.format(i + file_start))\n                cv2.imwrite(filename, img)", "docstring": "Convert a video to frame images\n\nArgs:\nframe_dir (str): Output directory to store all the frame images.\nfile_start (int): Filenames will start from the specified number.\nfilename_tmpl (str): Filename template with the index as the\nplaceholder.\nstart (int): The starting frame index.\nmax_num (int): Maximum number of frames to be written.\nshow_progress (bool): Whether to show a progress bar.", "source": "juraj-google-style"}
{"code": "def __init__(self, x=0, y=0, w=0, h=0):\n        \n        self._ptr = ffi.new('SDL_Rect *', [x, y, w, h])", "docstring": "Construct a new Rect with the given position and size.\n\nArgs:\nx (int): The x position of the upper left corner of the rectangle.\ny (int): The y position of the upper left corner of the rectangle.\nw (int): The width of the rectangle.\nh (int): The height of the rectangle.", "source": "juraj-google-style"}
{"code": "def run(self, source, **kwargs):\n    kwargs['output'] = self.__graph__()\n    if isinstance(source, str):\n        import json\n        source = json.loads(source)\n    self.source = source\n    super(JSONProcessor, self).run(**kwargs)\n    self.output = kwargs['output']\n    return output", "docstring": "Method takes a JSON source and any keywords and transforms from\nJSON to Lean BIBFRAME 2.0 triples\n\nArgs:\n\n----\nsource: str, dict", "source": "codesearchnet"}
{"code": "def process_and_frame(self, doc: Document):\n        \n        nested_docs = self.process_ems(doc)\n        parent_kg = doc.cdr_document.get('knowledge_graph', None)\n        if parent_kg:\n            if nested_docs and len(nested_docs) > 0:\n                for nested_doc in nested_docs:\n                    json_doc = nested_doc.cdr_document\n                    doc_id = json_doc['doc_id']\n                    if doc_id != doc.doc_id:\n                        for field_name in list(parent_kg):\n                            field_extractions = parent_kg[field_name]\n                            if not isinstance(field_extractions, list):\n                                field_extractions = [field_extractions]\n                            for i in range(0, len(field_extractions)):\n                                field_extraction = field_extractions[i]\n                                if 'value' in field_extraction and field_extraction['value'] == doc_id:\n                                    del field_extractions[i]\n                                    field_extractions.append(\n                                        {'value': json_doc, 'key': field_extraction['key'], 'is_nested': True})", "docstring": "Processes a document and if it has child docs, embeds them in the parent document. Only works for 1 level of\nnesting. Kind of hack, will implement properly later\nArgs:\ndoc: input document to be run etk modules on\n\nReturns:", "source": "juraj-google-style"}
{"code": "def deployment_groups(self):\n    if (not self.__deployment_groups):\n        self.__deployment_groups = DeploymentGroups(self.__connection)\n    return self.__deployment_groups", "docstring": "Gets the Deployment Groups API client.\n\nReturns:\nDeploymentGroups:", "source": "codesearchnet"}
{"code": "def reduce(x, op='sum'):\n    \n    import warnings\n    warnings.warn(\n        \"Deprecated API. Use ``sum`` or ``mean`` instead.\", DeprecationWarning)\n    from .function_bases import reduce_sum, reduce_mean\n    if op == 'sum':\n        return reduce_sum(x)\n    elif op == 'mean':\n        return reduce_mean(x)\n    raise ValueError()", "docstring": "Reduction function with given operation.\n\nArgs:\nx (Variable): An input.\nop (str): 'sum' or 'mean'.\n\nNote:\nThis is deprecated. Use ``mean`` or ``sum`` instead.", "source": "juraj-google-style"}
{"code": "def filter_with_legacy_function(self, predicate) -> 'DatasetV2':\n    from tensorflow.python.data.ops import filter_op\n    return filter_op._FilterDataset(self, predicate, use_legacy_function=True)", "docstring": "Filters this dataset according to `predicate`.\n\nNote: This is an escape hatch for existing uses of `filter` that do not work\nwith V2 functions. New uses are strongly discouraged and existing uses\nshould migrate to `filter` as this method will be removed in V2.\n\nArgs:\npredicate: A function mapping a (nested) structure of tensors (having\nshapes and types defined by `self.output_shapes` and\n`self.output_types`) to a scalar `tf.bool` tensor.\n\nReturns:\nDataset: The `Dataset` containing the elements of this dataset for which\n`predicate` is `True`.", "source": "github-repos"}
{"code": "def skip(self, delta):\n\n    def update_fn(v):\n        return self._skip_single_var(v, delta)\n    if values_util.is_saving_non_distributed():\n        return update_fn(self.state)\n    if self._distribution_strategy is not None:\n        with distribute_lib.enter_or_assert_strategy(self._distribution_strategy):\n            if distribute_lib.in_cross_replica_context():\n                values_util.mark_as_unsaveable()\n            if distribute_lib.in_cross_replica_context() or 'CentralStorage' in type(self._distribution_strategy).__name__:\n                return distribute_lib.get_strategy().extended.update(self.state, update_fn)\n    return update_fn(self.state)", "docstring": "Advance the counter of a counter-based RNG.\n\nArgs:\ndelta: the amount of advancement. The state of the RNG after\n`skip(n)` will be the same as that after `normal([n])`\n(or any other distribution). The actual increment added to the\ncounter is an unspecified implementation detail.\n\nReturns:\nA `Tensor` of type `int64`.", "source": "github-repos"}
{"code": "def transpose(self, name=None, activate_final=None):\n    if (name is None):\n        name = (self.module_name + '_transpose')\n    if (activate_final is None):\n        activate_final = self.activate_final\n    output_sizes = [(lambda l=layer: l.input_shape[1]) for layer in self._layers]\n    output_sizes.reverse()\n    return MLP(name=name, output_sizes=output_sizes, activation=self.activation, activate_final=activate_final, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, use_bias=self.use_bias, use_dropout=self.use_dropout)", "docstring": "Returns transposed `MLP`.\n\nArgs:\nname: Optional string specifying the name of the transposed module. The\ndefault name is constructed by appending \"_transpose\"\nto `self.module_name`.\nactivate_final: Optional boolean determining if the activation and batch\nnormalization, if turned on, are applied to the final layer.\n\nReturns:\nMatching transposed `MLP` module.", "source": "codesearchnet"}
{"code": "def clean_df(df, fill_nan=True, drop_empty_columns=True):\n    \n    if fill_nan:\n        df = df.fillna(value=np.nan)\n    if drop_empty_columns:\n        df = df.dropna(axis=1, how='all')\n    return df.sort_index()", "docstring": "Clean a pandas dataframe by:\n1. Filling empty values with Nan\n2. Dropping columns with all empty values\n\nArgs:\ndf: Pandas DataFrame\nfill_nan (bool): If any empty values (strings, None, etc) should be replaced with NaN\ndrop_empty_columns (bool): If columns whose values are all empty should be dropped\n\nReturns:\nDataFrame: cleaned DataFrame", "source": "juraj-google-style"}
{"code": "def _arguments(code, module):\n    arg_parser = CommandParser.create('')\n    try:\n        builtins = {'source': _table, 'datestring': _datestring}\n        env = {}\n        env.update(builtins)\n        exec(code, env)\n        for key in env:\n            if ((key in builtins) or (key[0] == '_')):\n                continue\n            val = env[key]\n            key = ('--%s' % key)\n            if isinstance(val, bool):\n                if val:\n                    arg_parser.add_argument(key, default=val, action='store_true')\n                else:\n                    arg_parser.add_argument(key, default=val, action='store_false')\n            elif (isinstance(val, basestring) or isinstance(val, int) or isinstance(val, float) or isinstance(val, int)):\n                arg_parser.add_argument(key, default=val)\n            elif isinstance(val, list):\n                arg_parser.add_argument(key, default=val, nargs='+')\n            elif isinstance(val, tuple):\n                arg_parser.add_argument(key, default=list(val), nargs='+')\n            elif (isinstance(val, dict) and ('type' in val)):\n                if (val['type'] == 'datestring'):\n                    arg_parser.add_argument(key, default='', type=_make_string_formatter(val['format'], offset=val['offset']))\n                elif (val['type'] == 'table'):\n                    if (val['format'] is not None):\n                        arg_parser.add_argument(key, default='', type=_make_table_formatter(val['format'], offset=val['offset']))\n                    else:\n                        arg_parser.add_argument(key, default=val['name'], type=_make_table)\n                else:\n                    raise Exception(('Cannot generate argument for %s of type %s' % (key, type(val))))\n            else:\n                raise Exception(('Cannot generate argument for %s of type %s' % (key, type(val))))\n    except Exception as e:\n        print((\"%%sql arguments: %s from code '%s'\" % (str(e), str(code))))\n    return arg_parser", "docstring": "Define pipeline arguments.\n\nArgs:\ncode: the Python code to execute that defines the arguments.", "source": "codesearchnet"}
{"code": "def cost_matrix(self, set_a, set_b, time_a, time_b):\n        \n        costs = np.zeros((len(set_a), len(set_b)))\n        for a, item_a in enumerate(set_a):\n            for b, item_b in enumerate(set_b):\n                costs[a, b] = self.total_cost_function(item_a, item_b, time_a, time_b)\n        return costs", "docstring": "Calculates the costs (distances) between the items in set a and set b at the specified times.\n\nArgs:\nset_a: List of STObjects\nset_b: List of STObjects\ntime_a: time at which objects in set_a are evaluated\ntime_b: time at whcih object in set_b are evaluated\n\nReturns:\nA numpy array with shape [len(set_a), len(set_b)] containing the cost matrix between the items in set a\nand the items in set b.", "source": "juraj-google-style"}
{"code": "def rebalance(self, weight, child, base=np.nan, update=True):\n    if (weight == 0):\n        if (child in self.children):\n            return self.close(child)\n        else:\n            return\n    if np.isnan(base):\n        base = self.value\n    if (child not in self.children):\n        c = SecurityBase(child)\n        c.setup(self._universe)\n        c.update(self.now)\n        self._add_child(c)\n    c = self.children[child]\n    delta = (weight - c.weight)\n    c.allocate((delta * base))", "docstring": "Rebalance a child to a given weight.\n\nThis is a helper method to simplify code logic. This method is used\nwhen we want to se the weight of a particular child to a set amount.\nIt is similar to allocate, but it calculates the appropriate allocation\nbased on the current weight.\n\nArgs:\n* weight (float): The target weight. Usually between -1.0 and 1.0.\n* child (str): child to allocate to - specified by name.\n* base (float): If specified, this is the base amount all weight\ndelta calculations will be based off of. This is useful when we\ndetermine a set of weights and want to rebalance each child\ngiven these new weights. However, as we iterate through each\nchild and call this method, the base (which is by default the\ncurrent value) will change. Therefore, we can set this base to\nthe original value before the iteration to ensure the proper\nallocations are made.\n* update (bool): Force update?", "source": "codesearchnet"}
{"code": "def DeleteGRRTempFile(path):\n  \n  precondition.AssertType(path, Text)\n\n  if not os.path.isabs(path):\n    raise ErrorBadPath(\"Path must be absolute\")\n\n  prefix = config.CONFIG[\"Client.tempfile_prefix\"]\n  directories = [\n      GetTempDirForRoot(root) for root in config.CONFIG[\"Client.tempdir_roots\"]\n  ]\n  if not _CheckIfPathIsValidForDeletion(\n      path, prefix=prefix, directories=directories):\n    msg = (\"Can't delete temp file %s. Filename must start with %s \"\n           \"or lie within any of %s.\")\n    raise ErrorNotTempFile(msg % (path, prefix, \";\".join(directories)))\n\n  if os.path.exists(path):\n    \n    files.FILE_HANDLE_CACHE.Flush()\n    os.remove(path)\n  else:\n    raise ErrorNotAFile(\"%s does not exist.\" % path)", "docstring": "Delete a GRR temp file.\n\nTo limit possible damage the path must be absolute and either the\nfile must be within any of the Client.tempdir_roots or the file name\nmust begin with Client.tempfile_prefix.\n\nArgs:\npath: path string to file to be deleted.\n\nRaises:\nOSError: Permission denied, or file not found.\nErrorBadPath: Path must be absolute.\nErrorNotTempFile: Filename must start with Client.tempfile_prefix.\nErrorNotAFile: File to delete does not exist.", "source": "juraj-google-style"}
{"code": "def find_usbserial(vendor, product):\n  \n  if platform.system() == 'Linux':\n    vendor, product = [('%04x' % (x)).strip() for x in (vendor, product)]\n    return linux_find_usbserial(vendor, product)\n  elif platform.system() == 'Darwin':\n    return osx_find_usbserial(vendor, product)\n  else:\n    raise NotImplementedError('Cannot find serial ports on %s'\n                              % platform.system())", "docstring": "Find the tty device for a given usbserial devices identifiers.\n\nArgs:\nvendor: (int) something like 0x0000\nproduct: (int) something like 0x0000\n\nReturns:\nString, like /dev/ttyACM0 or /dev/tty.usb...", "source": "juraj-google-style"}
{"code": "def CreateStorageWriter(cls, storage_format, session, path):\n    \n    if storage_format == definitions.STORAGE_FORMAT_SQLITE:\n      return sqlite_writer.SQLiteStorageFileWriter(session, path)\n\n    return None", "docstring": "Creates a storage writer.\n\nArgs:\nsession (Session): session the storage changes are part of.\npath (str): path to the storage file.\nstorage_format (str): storage format.\n\nReturns:\nStorageWriter: a storage writer or None if the storage file cannot be\nopened or the storage format is not supported.", "source": "juraj-google-style"}
{"code": "def convex_hull_collide(nodes1, nodes2):\n    \n    polygon1 = _helpers.simple_convex_hull(nodes1)\n    _, polygon_size1 = polygon1.shape\n    polygon2 = _helpers.simple_convex_hull(nodes2)\n    _, polygon_size2 = polygon2.shape\n    if polygon_size1 == 2 and polygon_size2 == 2:\n        return line_line_collide(polygon1, polygon2)\n\n    else:\n        return _helpers.polygon_collide(polygon1, polygon2)", "docstring": "Determine if the convex hulls of two curves collide.\n\n.. note::\n\nThis is a helper for :func:`from_linearized`.\n\nArgs:\nnodes1 (numpy.ndarray): Control points of a first curve.\nnodes2 (numpy.ndarray): Control points of a second curve.\n\nReturns:\nbool: Indicating if the convex hulls collide.", "source": "juraj-google-style"}
{"code": "def _AvgPoolAlongRows(self, input_matrix, row_seq, overlapping):\n    output_image = np.zeros(input_matrix.shape[1])\n    row_max = row_seq[-1]\n    for i in range(row_seq.shape[0] - 1):\n        row_start = row_seq[i]\n        row_end = row_seq[i + 1] + 1 if overlapping else row_seq[i + 1]\n        row_end = min(row_end, row_max)\n        output_image = np.vstack((output_image, np.mean(input_matrix[row_start:row_end, :], axis=0)))\n    return output_image[1:, :]", "docstring": "Perform average pool along row of a 2-D matrix based on row_seq.\n\nArgs:\ninput_matrix: A 2-D matrix.\nrow_seq: Cumulative pooling sequence along row.\noverlapping: Whether or not use overlapping when pooling.\n\nReturns:\nA 2-D matrix, with\n* num_rows = len(row_seq)-1\n* num_cols = input_matrix.num_cols.", "source": "github-repos"}
{"code": "def _format_time(self, time_per_unit, unit_name):\n    formatted = ''\n    if time_per_unit >= 1 or time_per_unit == 0:\n        formatted += f' {time_per_unit:.0f}s/{unit_name}'\n    elif time_per_unit >= 0.001:\n        formatted += f' {time_per_unit * 1000.0:.0f}ms/{unit_name}'\n    else:\n        formatted += f' {time_per_unit * 1000000.0:.0f}us/{unit_name}'\n    return formatted", "docstring": "format a given duration to display to the user.\n\nGiven the duration, this function formats it in either milliseconds\nor seconds and displays the unit (i.e. ms/step or s/epoch).\n\nArgs:\ntime_per_unit: the duration to display\nunit_name: the name of the unit to display\n\nReturns:\nA string with the correctly formatted duration and units", "source": "github-repos"}
{"code": "def __init__(self, hash=None, height=None, items=None):\n        \n        self.TransactionHash = hash\n        self.TransactionHeight = height\n        if items is None:\n            self.Items = []\n        else:\n            self.Items = items", "docstring": "Create an instance.\n\nArgs:\nhash (UInt256):\nheight (int):\nitems (list):", "source": "juraj-google-style"}
{"code": "def has_current_path(self, path, **kwargs):\n        \n\n        try:\n            return self.assert_current_path(path, **kwargs)\n        except ExpectationNotMet:\n            return False", "docstring": "Checks if the page has the given path.\n\nArgs:\npath (str | RegexObject): The string or regex that the current \"path\" should match.\n**kwargs: Arbitrary keyword arguments for :class:`CurrentPathQuery`.\n\nReturns:\nbool: Whether it matches.", "source": "juraj-google-style"}
{"code": "def list_datasets(self):\n\n    def _row_gen(attributes):\n        for attr in attributes.values():\n            (yield (attr.name, attr.display_name))\n    return pd.DataFrame.from_records(_row_gen(self.datasets), columns=['name', 'display_name'])", "docstring": "Lists available datasets in a readable DataFrame format.\n\nReturns:\npd.DataFrame: Frame listing available datasets.", "source": "codesearchnet"}
{"code": "def byte_swap_string_content(buffer, from_endiness, to_endiness):\n    num_of_strings = int.from_bytes(buffer.data[0:4], from_endiness)\n    string_content = bytearray(buffer.data[4 * (num_of_strings + 2):])\n    prefix_data = b''.join([int.from_bytes(buffer.data[i:i + 4], from_endiness).to_bytes(4, to_endiness) for i in range(0, (num_of_strings + 1) * 4 + 1, 4)])\n    buffer.data = prefix_data + string_content", "docstring": "Helper function for byte-swapping the string buffer.\n\nArgs:\nbuffer: TFLite string buffer of from_endiness format.\nfrom_endiness: The original endianness format of the string buffer.\nto_endiness: The destined endianness format of the string buffer.", "source": "github-repos"}
{"code": "def get_transcript_ids_for_ensembl_gene_ids(self, gene_ids, hgnc_symbols):\n        \n        \n        chroms = {\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"11\", \\\n             \"12\", \"13\", \"14\", \"15\", \"16\", \"17\", \"18\", \"19\", \"20\", \"21\", \"22\", \\\n              \"X\", \"Y\"}\n        \n        headers = {\"content-type\": \"application/json\"}\n        \n        transcript_ids = []\n        for gene_id in gene_ids:\n            self.attempt = 0\n            ext = \"/overlap/id/{}?feature=transcript\".format(gene_id)\n            r = self.ensembl_request(ext, headers)\n            \n            for item in json.loads(r):\n                \n                if item[\"biotype\"] not in [\"protein_coding\", \"polymorphic_pseudogene\"]:\n                    continue\n                \n                \n                \n                \n                if item[\"Parent\"] != gene_id or item[\"seq_region_name\"] not in \\\n                        chroms or \\\n                        all([symbol not in item[\"external_name\"] for symbol in hgnc_symbols]):\n                    continue\n                transcript_ids.append(item[\"id\"])\n        \n        return transcript_ids", "docstring": "fetch the ensembl transcript IDs for a given ensembl gene ID.\n\nArgs:\ngene_ids: list of Ensembl gene IDs for the gene\nhgnc_symbols: list of possible HGNC symbols for gene", "source": "juraj-google-style"}
{"code": "def random_inputs(num_devices, input_shape=gin.REQUIRED, input_dtype=np.int32, input_range=(0, 255), output_shape=gin.REQUIRED, output_dtype=np.int32, output_range=(0, 9)):\n    if ((input_shape[0] % num_devices) != 0):\n        tf.logging.fatal('num_devices[%d] should divide the first dimension of input_shape[%s]', num_devices, input_shape)\n    if ((output_shape[0] % num_devices) != 0):\n        tf.logging.fatal('num_devices[%d] should divide the first dimension of output_shape[%s]', num_devices, output_shape)\n\n    def random_minibatches():\n        'Generate a stream of random mini-batches.'\n        if (input_dtype in [np.float16, np.float32, np.float64]):\n            rand = np.random.uniform\n        else:\n            rand = np.random.random_integers\n        while True:\n            inp = rand(input_range[0], input_range[1], input_shape)\n            inp = inp.astype(input_dtype)\n            out = rand(output_range[0], output_range[1], output_shape)\n            out = out.astype(output_dtype)\n            (yield (inp, out))\n    input_shape_without_batch = list(input_shape)[1:]\n    return Inputs(train_stream=random_minibatches, train_eval_stream=random_minibatches, eval_stream=random_minibatches, input_shape=input_shape_without_batch)", "docstring": "Make random Inputs for debugging.\n\nArgs:\nnum_devices: how many devices to build the inputs for.\ninput_shape: the shape of inputs (including batch dimension).\ninput_dtype: the type of the inputs (int32 by default).\ninput_range: the range of inputs (defaults to (0, 255)).\noutput_shape: the shape of outputs (including batch dimension).\noutput_dtype: the type of the outputs (int32 by default).\noutput_range: the range of outputs (defaults to (0, 9)).\n\nReturns:\ntrax.inputs.Inputs", "source": "codesearchnet"}
{"code": "def occurrence(self, file_name=None, path=None, date=None):\n        \n        if self._indicator_data.get('type') != 'File':\n            \n            return None\n\n        occurrence_obj = FileOccurrence(file_name, path, date)\n        self._occurrences.append(occurrence_obj)\n        return occurrence_obj", "docstring": "Add a file Occurrence.\n\nArgs:\nfile_name (str, optional): The file name for this occurrence.\npath (str, optional): The file path for this occurrence.\ndate (str, optional): The datetime expression for this occurrence.\n\nReturns:\nobj: An instance of Occurrence.", "source": "juraj-google-style"}
{"code": "def _ReadUUIDDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):\n    return self._ReadFixedSizeDataTypeDefinition(definitions_registry, definition_values, data_types.UUIDDefinition, definition_name, self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE, default_size=16, is_member=is_member, supported_size_values=(16,))", "docstring": "Reads an UUID data type definition.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\ndefinition_values (dict[str, object]): definition values.\ndefinition_name (str): name of the definition.\nis_member (Optional[bool]): True if the data type definition is a member\ndata type definition.\n\nReturns:\nUUIDDataTypeDefinition: UUID data type definition.\n\nRaises:\nDefinitionReaderError: if the definitions values are missing or if\nthe format is incorrect.", "source": "codesearchnet"}
{"code": "def get_recipe(self, recipe_name):\n    if recipe_name.endswith('.yaml'):\n        recipe = self._recipes.get(RecipeObject.FromFile(recipe_name, self._recipe_actions, self._recipe_resources).name)\n    else:\n        recipe = self._recipes.get(recipe_name)\n    if (recipe is None):\n        raise RecipeNotFoundError('Could not find recipe', recipe_name=recipe_name, known_recipes=[x for x in self._recipes.keys()])\n    return recipe", "docstring": "Get a recipe by name.\n\nArgs:\nrecipe_name (str): The name of the recipe to fetch. Can be either the\nyaml file name or the name of the recipe.", "source": "codesearchnet"}
{"code": "def connection_required(func):\n        \n        @functools.wraps(func)\n        def wrapper(self, *args, **kwargs):\n            \n            if not self.target_connected():\n                raise errors.JLinkException('Target is not connected.')\n            return func(self, *args, **kwargs)\n        return wrapper", "docstring": "Decorator to specify that a target connection is required in order\nfor the given method to be used.\n\nArgs:\nfunc (function): function being decorated\n\nReturns:\nThe wrapper function.", "source": "juraj-google-style"}
{"code": "def unsplat(f: Callable[[Iterable], A]) -> Callable[..., A]:\n    \n\n    def unsplatted(*args):\n        return f(args)\n\n    return unsplatted", "docstring": "Convert a function taking a single iterable argument into a function taking multiple arguments.\n\nArgs:\nf: Any function taking a single iterable argument\n\nReturns:\nA function that accepts multiple arguments. Each argument of this function is passed as an element of an\niterable to ``f``.\n\nExample:\n$ def f(a):\n$     return a[0] + a[1] + a[2]\n$\n$ f([1, 2, 3])  # 6\n$ g = unsplat(f)\n$ g(1, 2, 3)  # 6", "source": "juraj-google-style"}
{"code": "def assert_not_visible(self, selector, testid=None, **kwargs):\n        \n        self.info_log(\n            \"Assert not visible selector(%s) testid(%s)\" % (selector, testid)\n        )\n\n        highlight = kwargs.get(\n            'highlight',\n            BROME_CONFIG['highlight']['highlight_on_assertion_failure']\n        )\n        self.debug_log(\"effective highlight: %s\" % highlight)\n\n        wait_until_not_visible = kwargs.get(\n            'wait_until_not_visible',\n            BROME_CONFIG['proxy_driver']['wait_until_not_visible_before_assert_not_visible']  \n        )\n        self.debug_log(\n            \"effective wait_until_not_visible: %s\" % wait_until_not_visible\n        )\n\n        if wait_until_not_visible:\n            self.wait_until_not_visible(selector, raise_exception=False)\n\n        element = self.find(\n            selector,\n            raise_exception=False,\n            wait_until_visible=False,\n            wait_until_present=False\n        )\n        if element and element.is_displayed(raise_exception=False):\n            data = self.execute_script(\n                \"return arguments[0].getBoundingClientRect();\",\n                element._element\n            )\n\n            if highlight:\n                element.highlight(\n                    style=BROME_CONFIG['highlight']['style_on_assertion_failure']  \n                )\n            if testid is not None:\n                self.create_test_result(testid, False, extra_data={\n                    'bounding_client_rect': data,\n                    'video_x_offset': self.browser_config.get('video_x_offset', 0),  \n                    'video_y_offset': self.browser_config.get('video_y_offset', 0)  \n                })\n\n            return False\n        else:\n            if testid is not None:\n                self.create_test_result(testid, True)\n\n            return True", "docstring": "Assert that the element is not visible in the dom\n\nArgs:\nselector (str): the selector used to find the element\ntest_id (str): the test_id or a str\n\nKwargs:\nwait_until_not_visible (bool)\nhighlight (bool)\n\nReturns:\nbool: True is the assertion succeed; False otherwise.", "source": "juraj-google-style"}
{"code": "def to_step_result(func):\n    \n\n    @ft.wraps(func)\n    def wrapper(*args, **kwargs):\n        \n        res = func(*args, **kwargs)\n        if not res:\n            res = [StepResult.OK]\n\n        if not hasattr(res, \"__iter__\"):\n            res = [res]\n        return res\n\n    return wrapper", "docstring": "Convert a function return to a list of StepResults.\n\nAll Step subclasses automatically wrap the result of their\n__call__ method's result with this wrapper.\nIf the result is not a list of StepResult values, one will\nbe generated.\n\nresult of `[StepResult.OK]`, or convert the given result into\na list.\n\nArgs:\nfunc: The function to wrap.", "source": "juraj-google-style"}
{"code": "def greater(x, y):\n    return math_ops.greater(x, y)", "docstring": "Element-wise truth value of (x > y).\n\nArgs:\nx: Tensor or variable.\ny: Tensor or variable.\n\nReturns:\nA bool tensor.", "source": "github-repos"}
{"code": "def register_file_reader(*args):\n\n    def do_registration(file_reader_fn, is_readable_fn):\n        if (file_reader_fn not in list(zip(*_FILE_READERS))[0]):\n            _FILE_READERS.append((file_reader_fn, is_readable_fn))\n    if (len(args) == 1):\n        return functools.partial(do_registration, is_readable_fn=args[0])\n    elif (len(args) == 2):\n        do_registration(*args)\n    else:\n        err_str = 'register_file_reader() takes 1 or 2 arguments ({} given)'\n        raise TypeError(err_str.format(len(args)))", "docstring": "Register a file reader for use in parse_config_file.\n\nRegistered file readers will be used to try reading files passed to\n`parse_config_file`. All file readers (beginning with the default `open`) will\nbe tried until one of them succeeds at opening the file.\n\nThis function may also be be used used as a decorator. For example:\n\n@register_file_reader(IOError)\ndef exotic_data_source(filename):\n...\n\nArgs:\n*args: (When used as a decorator, only the existence check is supplied.)\n- file_reader_fn: The file reader function to register. This should be a\nfunction that can be used as a context manager to open a file and\nprovide a file-like object, similar to Python's built-in `open`.\n- is_readable_fn: A function taking the file path and returning a boolean\nindicating whether the file can be read by `file_reader_fn`.\n\nReturns:\n`None`, or when used as a decorator, a function that will perform the\nregistration using the supplied readability predicate.", "source": "codesearchnet"}
{"code": "def _validate_config(config):\n        \n        if not isinstance(config, list):\n            raise TypeError('Config must be a list')\n\n        for config_dict in config:\n            if not isinstance(config_dict, dict):\n                raise TypeError('Config must be a list of dictionaries')\n            label = config_dict.keys()[0]\n            cfg = config_dict[label]\n            if not isinstance(cfg, dict):\n                raise TypeError('Config structure is broken')\n\n            if 'host' not in cfg:\n                raise TypeError('Config entries must have a value for host')\n            if not isinstance(cfg['host'], str) and not isinstance(cfg['host'], list):\n                raise TypeError('Host must be a string or a list.')\n\n            if 'port' not in cfg:\n                raise TypeError('Config entries must have a value for port')\n            if not isinstance(cfg['port'], int):\n                raise TypeError('Port must be an int')\n\n            if 'dbpath' not in cfg:\n                raise TypeError('Config entries must have a value for dbpath')\n            if not isinstance(cfg['dbpath'], str):\n                if not isinstance(cfg['dbpath'], list):\n                    raise TypeError('Dbpath must either a string or a list of '\n                                    'strings')\n                for dbpath in cfg['dbpath']:\n                    if not isinstance(dbpath, str):\n                        raise TypeError('Dbpath must either a string or a list '\n                                        'of strings')\n\n            if ('read_preference' in cfg and\n                not isinstance(cfg['read_preference'], str)):\n                raise TypeError('Read_preference must be a string')\n\n            if ('replicaSet' in cfg and\n                not isinstance(cfg['replicaSet'], str)):\n                raise TypeError('replicaSet must be a string')", "docstring": "Validate that the provided configurtion is valid.\n\nEach dictionary in the configuration list must have the following\nmandatory entries :\n{label: {host(string), port(int), dbpath(string|list of strings)}}\nIt can also contain 1 optional key:\n{read_preference(string)}\n\nArgs:\nconfig: the list of configurations provided at instantiation\n\nRaises:\nTypeError: a fault in the configurations is found", "source": "juraj-google-style"}
{"code": "def push_error_to_driver(worker, error_type, message, driver_id=None):\n    \n    if driver_id is None:\n        driver_id = ray.DriverID.nil()\n    worker.raylet_client.push_error(driver_id, error_type, message,\n                                    time.time())", "docstring": "Push an error message to the driver to be printed in the background.\n\nArgs:\nworker: The worker to use.\nerror_type (str): The type of the error.\nmessage (str): The message that will be printed in the background\non the driver.\ndriver_id: The ID of the driver to push the error message to. If this\nis None, then the message will be pushed to all drivers.", "source": "juraj-google-style"}
{"code": "def apply_grad_cartesian_tensor(grad_X, zmat_dist):\n    \n    columns = ['bond', 'angle', 'dihedral']\n    C_dist = zmat_dist.loc[:, columns].values.T\n    try:\n        C_dist = C_dist.astype('f8')\n        C_dist[[1, 2], :] = np.radians(C_dist[[1, 2], :])\n    except (TypeError, AttributeError):\n        C_dist[[1, 2], :] = sympy.rad(C_dist[[1, 2], :])\n    cart_dist = np.tensordot(grad_X, C_dist, axes=([3, 2], [0, 1])).T\n    from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian\n    return Cartesian(atoms=zmat_dist['atom'],\n                     coords=cart_dist, index=zmat_dist.index)", "docstring": "Apply the gradient for transformation to cartesian space onto zmat_dist.\n\nArgs:\ngrad_X (:class:`numpy.ndarray`): A ``(3, n, n, 3)`` array.\nThe mathematical details of the index layout is explained in\n:meth:`~chemcoord.Cartesian.get_grad_zmat()`.\nzmat_dist (:class:`~chemcoord.Zmat`):\nDistortions in Zmatrix space.\n\nReturns:\n:class:`~chemcoord.Cartesian`: Distortions in cartesian space.", "source": "juraj-google-style"}
{"code": "def has_no_narrow_start(neuron, frac=0.9):\n    \n    bad_ids = [(neurite.root_node.id, [neurite.root_node.points[1]])\n               for neurite in neuron.neurites\n               if neurite.root_node.points[1][COLS.R] < frac * neurite.root_node.points[2][COLS.R]]\n    return CheckResult(len(bad_ids) == 0, bad_ids)", "docstring": "Check if neurites have a narrow start\n\nArguments:\nneuron(Neuron): The neuron object to test\nfrac(float): Ratio that the second point must be smaller than the first\n\nReturns:\nCheckResult with a list of all first segments of neurites with a narrow start", "source": "juraj-google-style"}
{"code": "def _get_args(cls, args):\n        \n        \n        if not isinstance(args, tuple) or not len(args) == 2:\n            raise TypeError(\n                \"{}[...] takes exactly two arguments.\".format(cls.__name__)\n            )\n        return super(_LengthBoundedMeta, cls)._get_args(args + (len,))", "docstring": "Return the parameters necessary to check type boundaries.\n\nArgs:\nargs: A tuple with two parameters: a type, and a slice representing\nthe minimum and maximum lengths allowed for values of that\ntype.\n\nReturns:\nA tuple with three parameters: a type, a slice, and the len\nfunction.", "source": "juraj-google-style"}
{"code": "def write_content(self, content, destination):\n    directory = os.path.dirname(destination)\n    if (directory and (not os.path.exists(directory))):\n        os.makedirs(directory)\n    with io.open(destination, 'w', encoding='utf-8') as f:\n        f.write(content)\n    return destination", "docstring": "Write given content to destination path.\n\nIt will create needed directory structure first if it contain some\ndirectories that does not allready exists.\n\nArgs:\ncontent (str): Content to write to target file.\ndestination (str): Destination path for target file.\n\nReturns:\nstr: Path where target file has been written.", "source": "codesearchnet"}
{"code": "def find_from(path):\n    realpath = os.path.realpath(path)\n    config_path = os.path.join(realpath, '.ensime')\n    if os.path.isfile(config_path):\n        return config_path\n    elif (realpath == os.path.abspath('/')):\n        return None\n    else:\n        dirname = os.path.dirname(realpath)\n        return ProjectConfig.find_from(dirname)", "docstring": "Find path of an .ensime config, searching recursively upward from path.\n\nArgs:\npath (str): Path of a file or directory from where to start searching.\n\nReturns:\nstr: Canonical path of nearest ``.ensime``, or ``None`` if not found.", "source": "codesearchnet"}
{"code": "def enable_argscope_for_module(module, log_shape=True):\n    \n    if is_tfv2() and module == tf.layers:\n        module = tf.compat.v1.layers\n    for name, obj in getmembers(module):\n        if isfunction(obj):\n            setattr(module, name, enable_argscope_for_function(obj,\n                    log_shape=log_shape))", "docstring": "Overwrite all functions of a given module to support argscope.\nNote that this function monkey-patches the module and therefore could\nhave unexpected consequences.\nIt has been only tested to work well with ``tf.layers`` module.\n\nExample:\n\n.. code-block:: python\n\nimport tensorflow as tf\nenable_argscope_for_module(tf.layers)\n\nArgs:\nlog_shape (bool): print input/output shapes of each function.", "source": "juraj-google-style"}
{"code": "def now_playing(self, **kwargs):\n    path = self._get_path('now_playing')\n    response = self._GET(path, kwargs)\n    self._set_attrs_to_values(response)\n    return response", "docstring": "Get the list of movies playing in theatres. This list refreshes\nevery day. The maximum number of items this list will include is 100.\n\nArgs:\npage: (optional) Minimum value of 1.  Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "codesearchnet"}
{"code": "def _from_any_pb(pb_type, any_pb):\n    \n    msg = pb_type()\n    if not any_pb.Unpack(msg):\n        raise TypeError(\n            \"Could not convert {} to {}\".format(\n                any_pb.__class__.__name__, pb_type.__name__\n            )\n        )\n\n    return msg", "docstring": "Converts an Any protobuf to the specified message type\n\nArgs:\npb_type (type): the type of the message that any_pb stores an instance\nof.\nany_pb (google.protobuf.any_pb2.Any): the object to be converted.\n\nReturns:\npb_type: An instance of the pb_type message.\n\nRaises:\nTypeError: if the message could not be converted.", "source": "juraj-google-style"}
{"code": "def _convert_service_account_credentials(credentials):\n    \n    info = credentials.serialization_data.copy()\n    info['token_uri'] = credentials.token_uri\n    return google.oauth2.service_account.Credentials.from_service_account_info(\n        info)", "docstring": "Converts to :class:`google.oauth2.service_account.Credentials`.\n\nArgs:\ncredentials (Union[\noauth2client.service_account.ServiceAccountCredentials,\noauth2client.service_account._JWTAccessCredentials]): The\ncredentials to convert.\n\nReturns:\ngoogle.oauth2.service_account.Credentials: The converted credentials.", "source": "juraj-google-style"}
{"code": "class PerceiverBasicVideoAutoencodingDecoder(PerceiverAbstractDecoder):\n\n    def __init__(self, config: PerceiverConfig, output_shape: List[int], position_encoding_type: str, **decoder_kwargs) -> None:\n        super().__init__()\n        if len(output_shape) != 4:\n            raise ValueError(f'Expected rank 4 output_shape, got {output_shape}.')\n        self.output_shape = output_shape\n        self.output_num_channels = decoder_kwargs['output_num_channels']\n        self.decoder = PerceiverBasicDecoder(config, output_index_dims=self.output_shape[1:4], position_encoding_type=position_encoding_type, **decoder_kwargs)\n\n    @property\n    def num_query_channels(self) -> int:\n        return self.decoder.num_query_channels\n\n    def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):\n        return self.decoder.decoder_query(inputs, modality_sizes=modality_sizes, inputs_without_pos=inputs_without_pos, subsampled_points=subsampled_points)\n\n    def forward(self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor]=None) -> PerceiverDecoderOutput:\n        decoder_outputs = self.decoder(query, z)\n        logits = decoder_outputs.logits\n        logits = torch.reshape(logits, self.output_shape + [logits.shape[-1]])\n        return PerceiverDecoderOutput(logits=logits, cross_attentions=decoder_outputs.cross_attentions)", "docstring": "Cross-attention based video-autoencoding decoder. Light-weight wrapper of [*PerceiverBasicDecoder*] with video\nreshaping logic.\n\nArgs:\nconfig ([*PerceiverConfig*]):\nModel configuration.\noutput_shape (`List[int]`):\nShape of the output as (batch_size, num_frames, height, width), excluding the channel dimension.\nposition_encoding_type (`str`):\nThe type of position encoding to use. Can be either \"trainable\", \"fourier\", or \"none\".", "source": "github-repos"}
{"code": "def get_hash_of_dirs(directory):\n    import hashlib\n    sha = hashlib.sha512()\n    if (not os.path.exists(directory)):\n        return (- 1)\n    for (root, _, files) in os.walk(directory):\n        for name in files:\n            filepath = (local.path(root) / name)\n            if filepath.exists():\n                with open(filepath, 'rb') as next_file:\n                    for line in next_file:\n                        sha.update(line)\n    return sha.hexdigest()", "docstring": "Recursively hash the contents of the given directory.\n\nArgs:\ndirectory (str): The root directory we want to hash.\n\nReturns:\nA hash of all the contents in the directory.", "source": "codesearchnet"}
{"code": "def add_device(self, device, container):\n        \n        \n        \n        if self.findtext(\"is_smart\") == \"false\":\n            self.add_object_to_path(device, container)\n        else:\n            \n            \n            raise ValueError(\"Devices may not be added to smart groups.\")", "docstring": "Add a device to a group. Wraps JSSObject.add_object_to_path.\n\nArgs:\ndevice: A JSSObject to add (as list data), to this object.\nlocation: Element or a string path argument to find()", "source": "juraj-google-style"}
{"code": "def get_object(self, object_ids):\n        \n        \n        for object_id in object_ids:\n            if not isinstance(object_id, ObjectID):\n                raise TypeError(\n                    \"Attempting to call `get` on the value {}, \"\n                    \"which is not an ray.ObjectID.\".format(object_id))\n        \n        \n        \n        plain_object_ids = [\n            plasma.ObjectID(object_id.binary()) for object_id in object_ids\n        ]\n        for i in range(0, len(object_ids),\n                       ray._config.worker_fetch_request_size()):\n            self.raylet_client.fetch_or_reconstruct(\n                object_ids[i:(i + ray._config.worker_fetch_request_size())],\n                True)\n\n        \n        final_results = self.retrieve_and_deserialize(plain_object_ids, 0)\n        \n        \n        unready_ids = {\n            plain_object_ids[i].binary(): i\n            for (i, val) in enumerate(final_results)\n            if val is plasma.ObjectNotAvailable\n        }\n\n        if len(unready_ids) > 0:\n            \n            \n            \n            while len(unready_ids) > 0:\n                object_ids_to_fetch = [\n                    plasma.ObjectID(unready_id)\n                    for unready_id in unready_ids.keys()\n                ]\n                ray_object_ids_to_fetch = [\n                    ObjectID(unready_id) for unready_id in unready_ids.keys()\n                ]\n                fetch_request_size = ray._config.worker_fetch_request_size()\n                for i in range(0, len(object_ids_to_fetch),\n                               fetch_request_size):\n                    self.raylet_client.fetch_or_reconstruct(\n                        ray_object_ids_to_fetch[i:(i + fetch_request_size)],\n                        False,\n                        self.current_task_id,\n                    )\n                results = self.retrieve_and_deserialize(\n                    object_ids_to_fetch,\n                    max([\n                        ray._config.get_timeout_milliseconds(),\n                        int(0.01 * len(unready_ids)),\n                    ]),\n                )\n                \n                \n                for i, val in enumerate(results):\n                    if val is not plasma.ObjectNotAvailable:\n                        object_id = object_ids_to_fetch[i].binary()\n                        index = unready_ids[object_id]\n                        final_results[index] = val\n                        unready_ids.pop(object_id)\n\n            \n            \n            self.raylet_client.notify_unblocked(self.current_task_id)\n\n        assert len(final_results) == len(object_ids)\n        return final_results", "docstring": "Get the value or values in the object store associated with the IDs.\n\nReturn the values from the local object store for object_ids. This will\nblock until all the values for object_ids have been written to the\nlocal object store.\n\nArgs:\nobject_ids (List[object_id.ObjectID]): A list of the object IDs\nwhose values should be retrieved.", "source": "juraj-google-style"}
{"code": "def v4_int_to_packed(address):\n    if (address > _BaseV4._ALL_ONES):\n        raise ValueError('Address too large for IPv4')\n    return Bytes(struct.pack('!I', address))", "docstring": "The binary representation of this address.\n\nArgs:\naddress: An integer representation of an IPv4 IP address.\n\nReturns:\nThe binary representation of this address.\n\nRaises:\nValueError: If the integer is too large to be an IPv4 IP\naddress.", "source": "codesearchnet"}
{"code": "def get_embedded_tweet(tweet):\n    \n    if tweet.retweeted_tweet is not None:\n        return tweet.retweeted_tweet\n    elif tweet.quoted_tweet is not None:\n        return tweet.quoted_tweet\n    else:\n        return None", "docstring": "Get the retweeted Tweet OR the quoted Tweet and return it as a dictionary\n\nArgs:\ntweet (Tweet): A Tweet object (not simply a dict)\n\nReturns:\ndict (or None, if the Tweet is neither a quote tweet or a Retweet):\na dictionary representing the quote Tweet or the Retweet", "source": "juraj-google-style"}
{"code": "def Serialize(self, writer):\n        \n        writer.WriteByte(self.Usage)\n\n        if isinstance(self.Data, UIntBase):\n            self.Data = self.Data.Data\n\n        length = len(self.Data)\n\n        if length > self.MAX_ATTR_DATA_SIZE:\n            raise Exception(\"Invalid transaction attribute\")\n\n        if self.Usage == TransactionAttributeUsage.ContractHash or self.Usage == TransactionAttributeUsage.Vote or \\\n                (self.Usage >= TransactionAttributeUsage.Hash1 and self.Usage <= TransactionAttributeUsage.Hash15):\n            writer.WriteBytes(self.Data)\n\n        elif self.Usage == TransactionAttributeUsage.ECDH02 or self.Usage == TransactionAttributeUsage.ECDH03:\n            writer.WriteBytes(self.Data[1:33])\n\n        elif self.Usage == TransactionAttributeUsage.Script:\n            writer.WriteBytes(self.Data)\n\n        elif self.Usage == TransactionAttributeUsage.DescriptionUrl:\n            writer.WriteVarString(self.Data)\n\n        elif self.Usage == TransactionAttributeUsage.Description or self.Usage >= TransactionAttributeUsage.Remark:\n            writer.WriteVarString(self.Data)\n        else:\n            logger.error(\"format error!!!\")", "docstring": "Serialize object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):\n\nRaises:\nException: if the length exceeds the maximum allowed number of attributes in a transaction.", "source": "juraj-google-style"}
{"code": "def shutdown_tpu_system(cluster_resolver=None):\n    tpu_strategy_util.shutdown_tpu_system_impl(cluster_resolver, TPUClusterResolver)", "docstring": "Shuts down the TPU devices.\n\nThis will clear all caches, even those that are maintained through sequential\ncalls to tf.tpu.experimental.initialize_tpu_system, such as the compilation\ncache.\n\nArgs:\ncluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,\nwhich provides information about the TPU cluster.\n\nRaises:\nRuntimeError: If no TPU devices found for eager execution or if run in a\ntf.function.", "source": "github-repos"}
{"code": "def start(self, device):\n        \n\n        super(NativeBLEVirtualInterface, self).start(device)\n        self.set_advertising(True)", "docstring": "Start serving access to this VirtualIOTileDevice\n\nArgs:\ndevice (VirtualIOTileDevice): The device we will be providing access to", "source": "juraj-google-style"}
{"code": "def register_subclass(cls, typeid):\n        \n        def decorator(subclass):\n            cls._subcls_lookup[typeid] = subclass\n            subclass.typeid = typeid\n            return subclass\n        return decorator", "docstring": "Register a subclass so from_dict() works\n\nArgs:\ntypeid (str): Type identifier for subclass", "source": "juraj-google-style"}
{"code": "def _expand_place_ids(self, terms):\n        \n\n        place_vids = []\n        first_type = None\n\n        for result in self.backend.identifier_index.search(terms):\n\n            if not first_type:\n                first_type = result.type\n\n            if result.type != first_type:\n                \n                continue\n\n            place_vids.append(result.vid)\n\n        if place_vids:\n            \n            all_set = set(itertools.chain.from_iterable(iallval(GVid.parse(x)) for x in place_vids))\n            place_vids += list(str(x) for x in all_set)\n            return place_vids\n        else:\n            return terms", "docstring": "Lookups all of the place identifiers to get gvids\n\nArgs:\nterms (str or unicode): terms to lookup\n\nReturns:\nstr or list: given terms if no identifiers found, otherwise list of identifiers.", "source": "juraj-google-style"}
{"code": "def _get_initial_step(parameters, lower_bounds, upper_bounds, max_step_sizes):\n    nmr_params = parameters.shape[1]\n    initial_step = np.zeros_like(parameters)\n    if (max_step_sizes is None):\n        max_step_sizes = 0.1\n    if isinstance(max_step_sizes, Number):\n        max_step_sizes = ([max_step_sizes] * nmr_params)\n    max_step_sizes = np.array(max_step_sizes)\n    for ind in range(parameters.shape[1]):\n        minimum_allowed_step = np.minimum(np.abs((parameters[(:, ind)] - lower_bounds[ind])), np.abs((upper_bounds[ind] - parameters[(:, ind)])))\n        initial_step[(:, ind)] = np.minimum(minimum_allowed_step, max_step_sizes[ind])\n    return (initial_step / 2.0)", "docstring": "Get an initial step size to use for every parameter.\n\nThis chooses the step sizes based on the maximum step size and the lower and upper bounds.\n\nArgs:\nparameters (ndarray): The parameters at which to evaluate the gradient. A (d, p) matrix with d problems,\np parameters and n samples.\nlower_bounds (list): lower bounds\nupper_bounds (list): upper bounds\nmax_step_sizes (list or None): the maximum step size, or the maximum step size per parameter. Defaults to 0.1\n\nReturns:\nndarray: for every problem instance the vector with the initial step size for each parameter.", "source": "codesearchnet"}
{"code": "def inverse(self, name=None):\n    if (self._num_coeff != 6):\n        raise tf.errors.UnimplementedError('AffineGridWarper currently supportsinversion only for the 2D case.')\n\n    def _affine_grid_warper_inverse(inputs):\n        'Assembles network to compute inverse affine transformation.\\n\\n      Each `inputs` row potentially contains [a, b, tx, c, d, ty]\\n      corresponding to an affine matrix:\\n\\n        A = [a, b, tx],\\n            [c, d, ty]\\n\\n      We want to generate a tensor containing the coefficients of the\\n      corresponding inverse affine transformation in a constraints-aware\\n      fashion.\\n      Calling M:\\n\\n        M = [a, b]\\n            [c, d]\\n\\n      the affine matrix for the inverse transform is:\\n\\n         A_in = [M^(-1), M^-1 * [-tx, -tx]^T]\\n\\n      where\\n\\n        M^(-1) = (ad - bc)^(-1) * [ d, -b]\\n                                  [-c,  a]\\n\\n      Args:\\n        inputs: Tensor containing a batch of transformation parameters.\\n\\n      Returns:\\n        A tensorflow graph performing the inverse affine transformation\\n        parametrized by the input coefficients.\\n      '\n        batch_size = tf.expand_dims(tf.shape(inputs)[0], 0)\n        constant_shape = tf.concat([batch_size, tf.convert_to_tensor((1,))], 0)\n        index = iter(range(6))\n\n        def get_variable(constraint):\n            if (constraint is None):\n                i = next(index)\n                return inputs[(:, i:(i + 1))]\n            else:\n                return tf.fill(constant_shape, tf.constant(constraint, dtype=inputs.dtype))\n        constraints = chain.from_iterable(self.constraints)\n        (a, b, tx, c, d, ty) = (get_variable(constr) for constr in constraints)\n        det = ((a * d) - (b * c))\n        a_inv = (d / det)\n        b_inv = ((- b) / det)\n        c_inv = ((- c) / det)\n        d_inv = (a / det)\n        m_inv = basic.BatchReshape([2, 2])(tf.concat([a_inv, b_inv, c_inv, d_inv], 1))\n        txy = tf.expand_dims(tf.concat([tx, ty], 1), 2)\n        txy_inv = basic.BatchFlatten()(tf.matmul(m_inv, txy))\n        tx_inv = txy_inv[(:, 0:1)]\n        ty_inv = txy_inv[(:, 1:2)]\n        inverse_gw_inputs = tf.concat([a_inv, b_inv, (- tx_inv), c_inv, d_inv, (- ty_inv)], 1)\n        agw = AffineGridWarper(self.output_shape, self.source_shape)\n        return agw(inverse_gw_inputs)\n    if (name is None):\n        name = (self.module_name + '_inverse')\n    return base.Module(_affine_grid_warper_inverse, name=name)", "docstring": "Returns a `sonnet` module to compute inverse affine transforms.\n\nThe function first assembles a network that given the constraints of the\ncurrent AffineGridWarper and a set of input parameters, retrieves the\ncoefficients of the corresponding inverse affine transform, then feeds its\noutput into a new AffineGridWarper setup to correctly warp the `output`\nspace into the `source` space.\n\nArgs:\nname: Name of module implementing the inverse grid transformation.\n\nReturns:\nA `sonnet` module performing the inverse affine transform of a reference\ngrid of points via an AffineGridWarper module.\n\nRaises:\ntf.errors.UnimplementedError: If the function is called on a non 2D\ninstance of AffineGridWarper.", "source": "codesearchnet"}
{"code": "def multi_choice_spec(self) -> Optional['DecisionPoint']:\n    self._ensure_dna_spec()\n    multi_choice_spec = None\n    if self.children:\n        child_spec = self.children[0].spec\n        if child_spec.is_subchoice:\n            multi_choice_spec = child_spec.parent_spec\n    return multi_choice_spec", "docstring": "Returns the multi-choice spec for child DNAs.\n\nReturns:\nIf the children of this DNA are decisions of a multi-choice's subchoices,\nreturn the multi-choice spec (`pg.geno.Choices`). Otherwise returns None.", "source": "github-repos"}
{"code": "def live_processes(self):\n    result = []\n    for (process_type, process_infos) in self.all_processes.items():\n        for process_info in process_infos:\n            if (process_info.process.poll() is None):\n                result.append((process_type, process_info.process))\n    return result", "docstring": "Return a list of the live processes.\n\nReturns:\nA list of the live processes.", "source": "codesearchnet"}
{"code": "def variants(self, case_id, skip=0, count=1000, filters=None):\n    filters = (filters or {})\n    case_obj = self.case(case_id=case_id)\n    limit = (count + skip)\n    genes = set()\n    if filters.get('gene_ids'):\n        genes = set([gene_id.strip() for gene_id in filters['gene_ids']])\n    frequency = None\n    if filters.get('frequency'):\n        frequency = float(filters['frequency'])\n    cadd = None\n    if filters.get('cadd'):\n        cadd = float(filters['cadd'])\n    genetic_models = None\n    if filters.get('genetic_models'):\n        genetic_models = set(filters['genetic_models'])\n    sv_len = None\n    if filters.get('sv_len'):\n        sv_len = float(filters['sv_len'])\n    impact_severities = None\n    if filters.get('impact_severities'):\n        impact_severities = set(filters['impact_severities'])\n    vcf_file_path = case_obj.variant_source\n    self.head = get_header(vcf_file_path)\n    self.vep_header = self.head.vep_columns\n    self.snpeff_header = self.head.snpeff_columns\n    variants = self._get_filtered_variants(vcf_file_path, filters)\n    result = []\n    skip_index = 0\n    for (index, variant) in enumerate(variants):\n        index += 1\n        if (skip_index >= skip):\n            variant_obj = self._format_variants(variant=variant, index=index, case_obj=case_obj)\n            if (genes and variant_obj):\n                if (not set(variant_obj['gene_symbols']).intersection(genes)):\n                    variant_obj = None\n            if (impact_severities and variant_obj):\n                if (not (variant_obj['impact_severity'] in impact_severities)):\n                    variant_obj = None\n            if (frequency and variant_obj):\n                if (variant_obj.max_freq > frequency):\n                    variant_obj = None\n            if (cadd and variant_obj):\n                if (variant_obj['cadd_score'] < cadd):\n                    variant_obj = None\n            if (genetic_models and variant_obj):\n                models = set(variant_obj.genetic_models)\n                if (not models.intersection(genetic_models)):\n                    variant_obj = None\n            if (sv_len and variant_obj):\n                if (variant_obj.sv_len < sv_len):\n                    variant_obj = None\n            if variant_obj:\n                skip_index += 1\n                if (skip_index <= limit):\n                    result.append(variant_obj)\n                else:\n                    break\n        else:\n            skip_index += 1\n    return Results(result, len(result))", "docstring": "Return all variants in the VCF.\n\nThis function will apply the given filter and return the 'count' first\nvariants. If skip the first 'skip' variants will not be regarded.\n\nArgs:\ncase_id (str): Path to a vcf file (for this adapter)\nskip (int): Skip first variants\ncount (int): The number of variants to return\nfilters (dict): A dictionary with filters. Currently this will\nlook like: {\ngene_list: [] (list of hgnc ids),\nfrequency: None (float),\ncadd: None (float),\nsv_len: None (float),\nconsequence: [] (list of consequences),\nis_lof: None (Bool),\ngenetic_models [] (list of genetic models)\nsv_type: List (list of sv types),\n}\nReturns:\npuzzle.constants.Results : Named tuple with variants and\nnr_of_variants", "source": "codesearchnet"}
{"code": "def to_python(self, value: Union[dict, str, None]) -> LocalizedValue:\n        \n\n        \n        \n        try:\n            deserialized_value = super(LocalizedField, self).to_python(value)\n        except json.JSONDecodeError:\n            deserialized_value = value\n\n        if not deserialized_value:\n            return self.attr_class()\n\n        return self.attr_class(deserialized_value)", "docstring": "Turns the specified database value into its Python\nequivalent.\n\nArguments:\nvalue:\nThe value that is stored in the database and\nneeds to be converted to its Python equivalent.\n\nReturns:\nA :see:LocalizedValue instance containing the\ndata extracted from the database.", "source": "juraj-google-style"}
{"code": "def maybe_get_static_value(x, dtype=None):\n    if x is None:\n        return x\n    try:\n        x_ = tensor_util.constant_value(x)\n    except TypeError:\n        x_ = x\n    if x_ is None or dtype is None:\n        return x_\n    return np.array(x_, dtype)", "docstring": "Helper which tries to return a static value.\n\nGiven `x`, extract it's value statically, optionally casting to a specific\ndtype. If this is not possible, None is returned.\n\nArgs:\nx: `Tensor` for which to extract a value statically.\ndtype: Optional dtype to cast to.\n\nReturns:\nStatically inferred value if possible, otherwise None.", "source": "github-repos"}
{"code": "def build_metagraph_list(self):\n    ops = []\n    self.ignore_unknown_dtypes = True\n    for key in sorted(self.meta_params):\n        value = self.convert_data_to_string(self.meta_params[key])\n        if (len(value) == 0):\n            continue\n        if isinstance(value, str):\n            ops.append(tf.contrib.summary.generic(name=key, tensor=tf.convert_to_tensor(str(value))))\n        else:\n            ops.append(tf.contrib.summary.generic(name=key, tensor=tf.as_string(tf.convert_to_tensor(value))))\n    return ops", "docstring": "Convert MetaParams into TF Summary Format and create summary_op.\n\nReturns:\nMerged TF Op for TEXT summary elements, should only be executed once to reduce data duplication.", "source": "codesearchnet"}
{"code": "def _make_unique_slug(slug: str, language: str, is_unique: Callable[([str], bool)]) -> str:\n    index = 1\n    unique_slug = slug\n    while (not is_unique(unique_slug, language)):\n        unique_slug = ('%s-%d' % (slug, index))\n        index += 1\n    return unique_slug", "docstring": "Guarentees that the specified slug is unique by appending\na number until it is unique.\n\nArguments:\nslug:\nThe slug to make unique.\n\nis_unique:\nFunction that can be called to verify\nwhether the generate slug is unique.\n\nReturns:\nA guarenteed unique slug.", "source": "codesearchnet"}
{"code": "def original(self, index=None):\n    if (index is None):\n        try:\n            return next(self.select(Original, None, False, False))\n        except StopIteration:\n            raise NoSuchAnnotation\n    else:\n        for e in self.select(Original, None, False, False):\n            return e[index]\n        raise NoSuchAnnotation", "docstring": "Get the old annotation prior to correction.\n\nThis returns only one annotation if multiple exist, use `index` to select another in the sequence.\n\nReturns:\nan annotation element (:class:`AbstractElement`)\n\nRaises:\n:class:`NoSuchAnnotation`", "source": "codesearchnet"}
{"code": "def get_psd(self, omega):\n        \n        w = np.asarray(omega)\n        (alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,\n         beta_complex_real, beta_complex_imag) = self.coefficients\n        p = get_psd_value(\n            alpha_real, beta_real,\n            alpha_complex_real, alpha_complex_imag,\n            beta_complex_real, beta_complex_imag,\n            w.flatten(),\n        )\n        return p.reshape(w.shape)", "docstring": "Compute the PSD of the term for an array of angular frequencies\n\nArgs:\nomega (array[...]): An array of frequencies where the PSD should\nbe evaluated.\n\nReturns:\nThe value of the PSD for each ``omega``. This will have the same\nshape as ``omega``.", "source": "juraj-google-style"}
{"code": "def read_cifar10(filename_queue):\n\n    class CIFAR10Record(object):\n        pass\n    result = CIFAR10Record()\n    label_bytes = 1\n    result.height = 32\n    result.width = 32\n    result.depth = 3\n    image_bytes = ((result.height * result.width) * result.depth)\n    record_bytes = (label_bytes + image_bytes)\n    reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)\n    (result.key, value) = reader.read(filename_queue)\n    record_bytes = tf.decode_raw(value, tf.uint8)\n    result.label = tf.cast(tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)\n    depth_major = tf.reshape(tf.strided_slice(record_bytes, [label_bytes], [(label_bytes + image_bytes)]), [result.depth, result.height, result.width])\n    result.uint8image = tf.transpose(depth_major, [1, 2, 0])\n    return result", "docstring": "Reads and parses examples from CIFAR10 data files.\n\nRecommendation: if you want N-way read parallelism, call this function\nN times.  This will give you N independent Readers reading different\nfiles & positions within those files, which will give better mixing of\nexamples.\n\nArgs:\nfilename_queue: A queue of strings with the filenames to read from.\n\nReturns:\nAn object representing a single example, with the following fields:\nheight: number of rows in the result (32)\nwidth: number of columns in the result (32)\ndepth: number of color channels in the result (3)\nkey: a scalar string Tensor describing the filename & record number\nfor this example.\nlabel: an int32 Tensor with the label in the range 0..9.\nuint8image: a [height, width, depth] uint8 Tensor with the image data", "source": "codesearchnet"}
{"code": "def CreateFile(filename):\n  \n  with gcs.open(filename, 'w') as f:\n    f.write('abcde\\n')\n\n  blobstore_filename = '/gs' + filename\n  return blobstore.create_gs_key(blobstore_filename)", "docstring": "Create a GCS file with GCS client lib.\n\nArgs:\nfilename: GCS filename.\n\nReturns:\nThe corresponding string blobkey for this GCS file.", "source": "juraj-google-style"}
{"code": "def parse_selinux(parts):\n    (owner, group) = parts[:2]\n    selinux = parts[2].split(':')\n    lsel = len(selinux)\n    (path, link) = parse_path(parts[(- 1)])\n    result = {'owner': owner, 'group': group, 'se_user': selinux[0], 'se_role': (selinux[1] if (lsel > 1) else None), 'se_type': (selinux[2] if (lsel > 2) else None), 'se_mls': (selinux[3] if (lsel > 3) else None), 'name': path}\n    if link:\n        result['link'] = link\n    return result", "docstring": "Parse part of an ls output line that is selinux.\n\nArgs:\nparts (list): A four element list of strings representing the initial\nparts of an ls line after the permission bits. The parts are owner\ngroup, selinux info, and the path.\n\nReturns:\nA dict containing owner, group, se_user, se_role, se_type, se_mls, and\nname. If the raw name was a symbolic link, link is always included.", "source": "codesearchnet"}
{"code": "def poisson_ll(data, means):\n    if sparse.issparse(data):\n        return sparse_poisson_ll(data, means)\n    (genes, cells) = data.shape\n    clusters = means.shape[1]\n    ll = np.zeros((cells, clusters))\n    for i in range(clusters):\n        means_i = np.tile(means[(:, i)], (cells, 1))\n        means_i = (means_i.transpose() + eps)\n        ll[(:, i)] = np.sum((xlogy(data, means_i) - means_i), 0)\n    return ll", "docstring": "Calculates the Poisson log-likelihood.\n\nArgs:\ndata (array): 2d numpy array of genes x cells\nmeans (array): 2d numpy array of genes x k\n\nReturns:\ncells x k array of log-likelihood for each cell/cluster pair", "source": "codesearchnet"}
{"code": "def _MergeField(self, tokenizer, message):\n    \n    message_descriptor = message.DESCRIPTOR\n    if (hasattr(message_descriptor, 'syntax') and\n        message_descriptor.syntax == 'proto3'):\n      \n      \n      self._allow_multiple_scalars = True\n    if tokenizer.TryConsume('['):\n      name = [tokenizer.ConsumeIdentifier()]\n      while tokenizer.TryConsume('.'):\n        name.append(tokenizer.ConsumeIdentifier())\n      name = '.'.join(name)\n\n      if not message_descriptor.is_extendable:\n        raise tokenizer.ParseErrorPreviousToken(\n            'Message type \"%s\" does not have extensions.' %\n            message_descriptor.full_name)\n      \n      field = message.Extensions._FindExtensionByName(name)\n      \n      if not field:\n        if self.allow_unknown_extension:\n          field = None\n        else:\n          raise tokenizer.ParseErrorPreviousToken(\n              'Extension \"%s\" not registered.' % name)\n      elif message_descriptor != field.containing_type:\n        raise tokenizer.ParseErrorPreviousToken(\n            'Extension \"%s\" does not extend message type \"%s\".' %\n            (name, message_descriptor.full_name))\n\n      tokenizer.Consume(']')\n\n    else:\n      name = tokenizer.ConsumeIdentifierOrNumber()\n      if self.allow_field_number and name.isdigit():\n        number = ParseInteger(name, True, True)\n        field = message_descriptor.fields_by_number.get(number, None)\n        if not field and message_descriptor.is_extendable:\n          field = message.Extensions._FindExtensionByNumber(number)\n      else:\n        field = message_descriptor.fields_by_name.get(name, None)\n\n        \n        \n        \n        if not field:\n          field = message_descriptor.fields_by_name.get(name.lower(), None)\n          if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP:\n            field = None\n\n        if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and\n            field.message_type.name != name):\n          field = None\n\n      if not field:\n        raise tokenizer.ParseErrorPreviousToken(\n            'Message type \"%s\" has no field named \"%s\".' %\n            (message_descriptor.full_name, name))\n\n    if field:\n      if not self._allow_multiple_scalars and field.containing_oneof:\n        \n        \n        \n        which_oneof = message.WhichOneof(field.containing_oneof.name)\n        if which_oneof is not None and which_oneof != field.name:\n          raise tokenizer.ParseErrorPreviousToken(\n              'Field \"%s\" is specified along with field \"%s\", another member '\n              'of oneof \"%s\" for message type \"%s\".' %\n              (field.name, which_oneof, field.containing_oneof.name,\n               message_descriptor.full_name))\n\n      if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:\n        tokenizer.TryConsume(':')\n        merger = self._MergeMessageField\n      else:\n        tokenizer.Consume(':')\n        merger = self._MergeScalarField\n\n      if (field.label == descriptor.FieldDescriptor.LABEL_REPEATED and\n          tokenizer.TryConsume('[')):\n        \n        while True:\n          merger(tokenizer, message, field)\n          if tokenizer.TryConsume(']'):\n            break\n          tokenizer.Consume(',')\n\n      else:\n        merger(tokenizer, message, field)\n\n    else:  \n      assert self.allow_unknown_extension\n      _SkipFieldContents(tokenizer)\n\n    \n    \n    if not tokenizer.TryConsume(','):\n      tokenizer.TryConsume(';')", "docstring": "Merges a single protocol message field into a message.\n\nArgs:\ntokenizer: A tokenizer to parse the field name and values.\nmessage: A protocol message to record the data.\n\nRaises:\nParseError: In case of text parsing problems.", "source": "juraj-google-style"}
{"code": "def convert_seeded_answers(answers):\n    converted = {}\n    for (index, answer) in enumerate(answers):\n        converted.setdefault(answer['answer'], {})\n        converted[answer['answer']][('seeded' + str(index))] = answer['rationale']\n    return converted", "docstring": "Convert seeded answers into the format that can be merged into student answers.\n\nArgs:\nanswers (list): seeded answers\n\nReturns:\ndict: seeded answers with student answers format:\n{\n0: {\n'seeded0': 'rationaleA'\n}\n1: {\n'seeded1': 'rationaleB'\n}\n}", "source": "codesearchnet"}
{"code": "def usergroups_users_update(self, *, usergroup: str, users: List[str], **kwargs) -> SlackResponse:\n    self._validate_xoxp_token()\n    kwargs.update({'usergroup': usergroup, 'users': users})\n    return self.api_call('usergroups.users.update', json=kwargs)", "docstring": "Update the list of users for a User Group\n\nArgs:\nusergroup (str): The encoded ID of the User Group to update.\ne.g. 'S0604QSJC'\nusers (list): A list user IDs that represent the entire list of\nusers for the User Group. e.g. ['U060R4BJ4', 'U060RNRCZ']", "source": "codesearchnet"}
{"code": "def create(self, specify_uri=False, ignore_tombstone=False, serialization_format=None, stream=False, auto_refresh=None):\n    if self.exists:\n        raise Exception('resource exists attribute True, aborting')\n    else:\n        if specify_uri:\n            verb = 'PUT'\n        else:\n            verb = 'POST'\n        logger.debug(('creating resource %s with verb %s' % (self.uri, verb)))\n        if issubclass(type(self), NonRDFSource):\n            self.binary._prep_binary()\n            data = self.binary.data\n        else:\n            if (not serialization_format):\n                serialization_format = self.repo.default_serialization\n            data = self.rdf.graph.serialize(format=serialization_format)\n            logger.debug('Serialized graph used for resource creation:')\n            logger.debug(data.decode('utf-8'))\n            self.headers['Content-Type'] = serialization_format\n        response = self.repo.api.http_request(verb, self.uri, data=data, headers=self.headers, stream=stream)\n        return self._handle_create(response, ignore_tombstone, auto_refresh)", "docstring": "Primary method to create resources.\n\nArgs:\nspecify_uri (bool): If True, uses PUT verb and sets the URI during creation.  If False, uses POST and gets repository minted URI\nignore_tombstone (bool): If True, will attempt creation, if tombstone exists (409), will delete tombstone and retry\nserialization_format(str): Content-Type header / mimetype that will be used to serialize self.rdf.graph, and set headers for PUT/POST requests\nauto_refresh (bool): If True, refreshes resource after update. If left None, defaults to repo.default_auto_refresh", "source": "codesearchnet"}
{"code": "def single_qubit_state_tomography(sampler: sim.Sampler, qubit: devices.GridQubit, circuit: circuits.Circuit, repetitions: int=1000) -> TomographyResult:\n    circuit_z = (circuit + circuits.Circuit.from_ops(ops.measure(qubit, key='z')))\n    results = sampler.run(circuit_z, repetitions=repetitions)\n    rho_11 = np.mean(results.measurements['z'])\n    rho_00 = (1.0 - rho_11)\n    circuit_x = circuits.Circuit.from_ops(circuit, (ops.X(qubit) ** 0.5), ops.measure(qubit, key='z'))\n    results = sampler.run(circuit_x, repetitions=repetitions)\n    rho_01_im = (np.mean(results.measurements['z']) - 0.5)\n    circuit_y = circuits.Circuit.from_ops(circuit, (ops.Y(qubit) ** (- 0.5)), ops.measure(qubit, key='z'))\n    results = sampler.run(circuit_y, repetitions=repetitions)\n    rho_01_re = (0.5 - np.mean(results.measurements['z']))\n    rho_01 = (rho_01_re + (1j * rho_01_im))\n    rho_10 = np.conj(rho_01)\n    rho = np.array([[rho_00, rho_01], [rho_10, rho_11]])\n    return TomographyResult(rho)", "docstring": "Single-qubit state tomography.\n\nThe density matrix of the output state of a circuit is measured by first\ndoing projective measurements in the z-basis, which determine the\ndiagonal elements of the matrix. A X/2 or Y/2 rotation is then added before\nthe z-basis measurement, which determines the imaginary and real parts of\nthe off-diagonal matrix elements, respectively.\n\nSee Vandersypen and Chuang, Rev. Mod. Phys. 76, 1037 for details.\n\nArgs:\nsampler: The quantum engine or simulator to run the circuits.\nqubit: The qubit under test.\ncircuit: The circuit to execute on the qubit before tomography.\nrepetitions: The number of measurements for each basis rotation.\n\nReturns:\nA TomographyResult object that stores and plots the density matrix.", "source": "codesearchnet"}
{"code": "def ParseZeitgeistEventRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    event_data = ZeitgeistActivityEventData()\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.subject_uri = self._GetRowValue(query_hash, row, 'subj_uri')\n    timestamp = self._GetRowValue(query_hash, row, 'timestamp')\n    date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_UNKNOWN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a zeitgeist event row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "codesearchnet"}
{"code": "def from_file(cls, jss, filename):\n        \n        tree = ElementTree.parse(filename)\n        root = tree.getroot()\n        return cls(jss, root)", "docstring": "Create a new JSSObject from an external XML file.\n\nArgs:\njss: A JSS object.\nfilename: String path to an XML file.", "source": "juraj-google-style"}
{"code": "def _copy_delpoy_scripts(self, scripts):\n    if (not os.path.exists(self.paths.scripts())):\n        os.makedirs(self.paths.scripts())\n    new_scripts = []\n    for script in scripts:\n        script = os.path.expandvars(script)\n        if (not os.path.exists(script)):\n            raise RuntimeError(('Script %s does not exist' % script))\n        sanitized_name = script.replace('/', '_')\n        new_script_cur_path = os.path.expandvars(self.paths.scripts(sanitized_name))\n        shutil.copy(script, new_script_cur_path)\n        new_script_init_path = os.path.join('$LAGO_PREFIX_PATH', os.path.basename(self.paths.scripts()), sanitized_name)\n        new_scripts.append(new_script_init_path)\n    return new_scripts", "docstring": "Copy the given deploy scripts to the scripts dir in the prefix\n\nArgs:\nscripts(list of str): list of paths of the scripts to copy to the\nprefix\n\nReturns:\nlist of str: list with the paths to the copied scripts, with a\nprefixed with $LAGO_PREFIX_PATH so the full path is not\nhardcoded", "source": "codesearchnet"}
{"code": "def setup(template, version=None):\n    temple.check.is_git_ssh_path(template)\n    temple.check.not_in_git_repo()\n    repo_path = temple.utils.get_repo_path(template)\n    msg = 'You will be prompted for the parameters of your new project. Please read the docs at https:\n    print(msg)\n    (cc_repo_dir, config) = temple.utils.get_cookiecutter_config(template, version=version)\n    if (not version):\n        with temple.utils.cd(cc_repo_dir):\n            ret = temple.utils.shell('git rev-parse HEAD', stdout=subprocess.PIPE)\n            version = ret.stdout.decode('utf-8').strip()\n    _generate_files(repo_dir=cc_repo_dir, config=config, template=template, version=version)", "docstring": "Sets up a new project from a template\n\nNote that the `temple.constants.TEMPLE_ENV_VAR` is set to 'setup' during the duration\nof this function.\n\nArgs:\ntemplate (str): The git SSH path to a template\nversion (str, optional): The version of the template to use when updating. Defaults\nto the latest version", "source": "codesearchnet"}
{"code": "def create_bird_config_files(bird_configuration):\n    \n    for ip_version in bird_configuration:\n        \n        config_file = bird_configuration[ip_version]['config_file']\n        try:\n            touch(config_file)\n        except OSError as exc:\n            raise ValueError(\"failed to create {f}:{e}\"\n                             .format(f=config_file, e=exc))\n        if bird_configuration[ip_version]['keep_changes']:\n            history_dir = os.path.join(os.path.dirname(config_file), 'history')\n            try:\n                os.mkdir(history_dir)\n            except FileExistsError:\n                pass\n            except OSError as exc:\n                raise ValueError(\"failed to make directory {d} for keeping a \"\n                                 \"history of changes for {b}:{e}\"\n                                 .format(d=history_dir, b=config_file, e=exc))\n            else:\n                print(\"{d} is created\".format(d=history_dir))", "docstring": "Create bird configuration files per IP version.\n\nCreates bird configuration files if they don't exist. It also creates the\ndirectories where we store the history of changes, if this functionality is\nenabled.\n\nArguments:\nbird_configuration (dict): A dictionary with settings for bird.\n\nReturns:\nNone\n\nRaises:\nValueError if we can't create bird configuration files and the\ndirectory to store the history of changes in bird configuration file.", "source": "juraj-google-style"}
{"code": "def get_videos_for_course(course_id, sort_field=None, sort_dir=SortDirection.asc, pagination_conf=None):\n    \n    return _get_videos_for_filter(\n        {'courses__course_id': six.text_type(course_id), 'courses__is_hidden': False},\n        sort_field,\n        sort_dir,\n        pagination_conf,\n    )", "docstring": "Returns an iterator of videos for the given course id.\n\nArgs:\ncourse_id (String)\nsort_field (VideoSortField)\nsort_dir (SortDirection)\n\nReturns:\nA generator expression that contains the videos found, sorted by the\ngiven field and direction, with ties broken by edx_video_id to ensure a\ntotal order.", "source": "juraj-google-style"}
{"code": "def call(self, inputs, state):\n    original_shape = inputs.shape\n    if (len(original_shape) < 2):\n        inputs = tf.reshape(inputs, [1, (- 1)])\n    (out, state) = self.lstm_cell(inputs, state)\n    out = self.output_layer(out)\n    correct_shape = tf.concat((original_shape[:(- 1)], tf.shape(input=out)[(- 1):]), 0)\n    out = tf.reshape(out, correct_shape)\n    loc = out[(..., :self.dimensions)]\n    scale_diag = (tf.nn.softplus(out[(..., self.dimensions:)]) + 1e-05)\n    return (tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag), state)", "docstring": "Runs the model to generate a distribution for a single timestep.\n\nThis generates a batched MultivariateNormalDiag distribution using\nthe output of the recurrent model at the current timestep to\nparameterize the distribution.\n\nArgs:\ninputs: The sampled value of `z` at the previous timestep, i.e.,\n`z_{t-1}`, of shape [..., dimensions].\n`z_0` should be set to the empty matrix.\nstate: A tuple containing the (hidden, cell) state.\n\nReturns:\nA tuple of a MultivariateNormalDiag distribution, and the state of\nthe recurrent function at the end of the current timestep. The\ndistribution will have event shape [dimensions], batch shape\n[...], and sample shape [sample_shape, ..., dimensions].", "source": "codesearchnet"}
{"code": "def __init__(self, *rows, **kwargs):\n        \n        if not all([isinstance(r, Row) for r in rows]):\n            raise TypeError('All elements of Grid must be Row instances')\n        self.type = 'grid'\n        self.rows = rows", "docstring": "Init method.\n\nArgs:\n*rows (): the instances of Row.\n**kwargs (): not used.", "source": "juraj-google-style"}
{"code": "def get(self, url, params={}, headers={}, auth=(), certificate_path=None):\n    certificate_path = (certificate_path if certificate_path else False)\n    return self.session.get(url, params=params, headers=headers, verify=certificate_path, auth=auth, timeout=self.timeout)", "docstring": "Returns the response payload from the request to the given URL.\n\nArgs:\nurl (str): The URL for the WEB API that the request is being made too.\nparams (dict): Dictionary containing the query string parameters.\nheaders (dict): HTTP Headers that may be needed for the request.\nauth (tuple): User ID and password for Basic Auth\ncertificate_path (str): Path to the ssl certificate.\n\nReturns:\nresponse: (HttpResponse): Response object from requests.get api request", "source": "codesearchnet"}
{"code": "def from_csv(input_csv, headers=None, schema_file=None):\n    if (headers is not None):\n        names = headers\n    elif (schema_file is not None):\n        with _util.open_local_or_gcs(schema_file, mode='r') as f:\n            schema = json.load(f)\n        names = [x['name'] for x in schema]\n    else:\n        raise ValueError('Either headers or schema_file is needed')\n    all_files = _util.glob_files(input_csv)\n    all_df = []\n    for file_name in all_files:\n        with _util.open_local_or_gcs(file_name, mode='r') as f:\n            all_df.append(pd.read_csv(f, names=names))\n    df = pd.concat(all_df, ignore_index=True)\n    if (('target' not in df) or ('predicted' not in df)):\n        raise ValueError('Cannot find \"target\" or \"predicted\" column')\n    labels = sorted((set(df['target']) | set(df['predicted'])))\n    cm = confusion_matrix(df['target'], df['predicted'], labels=labels)\n    return ConfusionMatrix(cm, labels)", "docstring": "Create a ConfusionMatrix from a csv file.\n\nArgs:\ninput_csv: Path to a Csv file (with no header). Can be local or GCS path.\nheaders: Csv headers. If present, it must include 'target' and 'predicted'.\nschema_file: Path to a JSON file containing BigQuery schema. Used if \"headers\" is None.\nIf present, it must include 'target' and 'predicted' columns.\nReturns:\nA ConfusionMatrix that can be plotted.\nRaises:\nValueError if both headers and schema_file are None, or it does not include 'target'\nor 'predicted' columns.", "source": "codesearchnet"}
{"code": "def create_ingress_rule(self, app, rule):\n    if isinstance(rule, dict):\n        start_port = rule.get('start_port')\n        end_port = rule.get('end_port')\n        protocol = rule.get('protocol', 'tcp')\n        requested_cross_account = rule.get('env', self.env)\n        if (self.env == requested_cross_account):\n            cross_account_env = None\n            cross_account_vpc_id = None\n        else:\n            cross_account_env = requested_cross_account\n            cross_account_vpc_id = get_vpc_id(cross_account_env, self.region)\n    else:\n        start_port = rule\n        end_port = rule\n        protocol = 'tcp'\n        cross_account_env = None\n        cross_account_vpc_id = None\n    created_rule = {'app': app, 'start_port': start_port, 'end_port': end_port, 'protocol': protocol, 'cross_account_env': cross_account_env, 'cross_account_vpc_id': cross_account_vpc_id}\n    self.log.debug('Normalized ingress rule: %s', created_rule)\n    return created_rule", "docstring": "Create a normalized ingress rule.\n\nArgs:\napp (str): Application name\nrule (dict or int): Allowed Security Group ports and protocols.\n\nReturns:\ndict: Contains app, start_port, end_port, protocol, cross_account_env and cross_account_vpc_id", "source": "codesearchnet"}
{"code": "def GetUsername(self, event, default_username='-'):\n    username = getattr(event, 'username', None)\n    if (username and (username != '-')):\n        return username\n    session_identifier = event.GetSessionIdentifier()\n    if (session_identifier is None):\n        return default_username\n    user_sid = getattr(event, 'user_sid', None)\n    username = self._knowledge_base.GetUsernameByIdentifier(user_sid, session_identifier=session_identifier)\n    return (username or default_username)", "docstring": "Retrieves the username related to the event.\n\nArgs:\nevent (EventObject): event.\ndefault_username (Optional[str]): default username.\n\nReturns:\nstr: username.", "source": "codesearchnet"}
{"code": "def GetScripts(self, dest_dir):\n    \n    metadata_dict = self.watcher.GetMetadata() or {}\n\n    try:\n      instance_data = metadata_dict['instance']['attributes']\n    except KeyError:\n      instance_data = None\n      self.logger.warning('Instance attributes were not found.')\n\n    try:\n      project_data = metadata_dict['project']['attributes']\n    except KeyError:\n      project_data = None\n      self.logger.warning('Project attributes were not found.')\n\n    return (self._GetAttributeScripts(instance_data, dest_dir)\n            or self._GetAttributeScripts(project_data, dest_dir))", "docstring": "Retrieve the scripts to execute.\n\nArgs:\ndest_dir: string, the path to a directory for storing metadata scripts.\n\nReturns:\ndict, a dictionary mapping set metadata keys with associated scripts.", "source": "juraj-google-style"}
{"code": "def _build(self, inputs):\n    if nest.is_sequence(inputs):\n        merged_tensors = [self._merge(tensor) for tensor in nest.flatten(inputs)]\n        return nest.pack_sequence_as(inputs, merged_tensors)\n    return self._merge(inputs)", "docstring": "Connects the MergeDims module into the graph.\n\nArgs:\ninputs: Tensor or a nested list of Tensors to merge. Its rank must be\ngreater than or equal to `start` + `size`.\n\nReturns:\nThe merged Tensor or a nested list of merged Tensors.\n\nRaises:\nValueError: If any of the `inputs` tensors has insufficient rank.", "source": "codesearchnet"}
{"code": "def __init__(self, asynchronous_correlation_value=None):\n        \n        super(PollRequestPayload, self).__init__(\n            enums.Tags.REQUEST_PAYLOAD\n        )\n\n        self._asynchronous_correlation_value = None\n        self.asynchronous_correlation_value = asynchronous_correlation_value", "docstring": "Construct a Poll request payload struct.\n\nArgs:\nasynchronous_correlation_value (bytes): The ID of a pending\noperation to poll the status of, in bytes. Optional, defaults\nto None.", "source": "juraj-google-style"}
{"code": "def unpackVersion(ver):\n    major = ((ver >> (20 * 2)) & mask20)\n    minor = ((ver >> 20) & mask20)\n    patch = (ver & mask20)\n    return (major, minor, patch)", "docstring": "Unpack a system normalized integer representing a softare version into its component parts.\n\nArgs:\nver (int): System normalized integer value to unpack into a tuple.\n\nReturns:\n(int, int, int): A tuple containing the major, minor and patch values shifted out of the integer.", "source": "codesearchnet"}
{"code": "def __init__(self, cache_folder, genome_build):\n        \n        \n        self.cache = EnsemblCache(cache_folder, genome_build)\n        \n        self.prior_time = time.time() - 1\n        self.rate_limit = 0.067\n        \n        server_dict = {\"grch37\": \"grch37.\", \"grch38\": \"\"}\n        \n        self.server = \"http:\n        \n        self.check_ensembl_api_version()", "docstring": "obtain the sequence for a transcript from ensembl\n\nArgs:\ncache_folder: path to folder for caching data requested from Ensembl\ngenome_build: string indicating the genome build (\"grch37\" or \"grch38\")", "source": "juraj-google-style"}
{"code": "def parse_cytoband(lines):\n    \n    cytobands = {}\n    for line in lines:\n        line = line.rstrip()\n        splitted_line = line.split('\\t')\n        chrom = splitted_line[0].lstrip('chr')\n        start = int(splitted_line[1])\n        stop = int(splitted_line[2])\n        name = splitted_line[3]\n        if chrom in cytobands:\n            \n            cytobands[chrom][start:stop] = name\n        else:\n            \n            new_tree = intervaltree.IntervalTree()\n            \n            new_tree[start:stop] = name\n            \n            cytobands[chrom] = new_tree\n    \n    return cytobands", "docstring": "Parse iterable with cytoband coordinates\n\n\nArgs:\nlines(iterable): Strings on format \"chr1\\t2300000\\t5400000\\tp36.32\\tgpos25\"\n\nReturns:\ncytobands(dict): Dictionary with chromosome names as keys and\ninterval trees as values", "source": "juraj-google-style"}
{"code": "def check_phonefy(self, query, kwargs={}):\n        \n        data = self.launchQueryForMode(query=query, mode=\"phonefy\")\n        if self._somethingFound(data, mode=\"phonefy\"):\n            return data\n        return None", "docstring": "Verifying a mailfy query in this platform.\n\nThis might be redefined in any class inheriting from Platform. The only\ncondition is that any of this should return a dictionary as defined.\n\nArgs:\n-----\nquery: The element to be searched.\nkwargs: Dictionary with extra parameters. Just in case.\n\nReturn:\n-------\nReturns the collected data if exists or None if not.", "source": "juraj-google-style"}
{"code": "def _VerifyValues(self, image, kernel, strides, rates, padding, out, use_gpu):\n    strides = [1] + strides + [1]\n    rates = [1] + rates + [1]\n    with self.cached_session(use_gpu=use_gpu):\n        out_tensor = nn_ops.erosion2d(constant_op.constant(image), constant_op.constant(kernel), strides=strides, rates=rates, padding=padding, name='erosion2d')\n        self.assertAllClose(out, self.evaluate(out_tensor))", "docstring": "Verifies the output values of the erosion function.\n\nArgs:\nimage: Input tensor with shape: [batch, in_height, in_width, channels].\nkernel: Filter tensor with shape: [filter_height, filter_width, channels].\nstrides: Output strides, specified as [stride_height, stride_width].\nrates: Atrous rates, specified as [rate_height, rate_width].\npadding: Padding type.\nout: Expected output.\nuse_gpu: Whether we are running on GPU.", "source": "github-repos"}
{"code": "def export_as_file(self, file_path, cv_source):\n        \n        if os.path.exists(file_path):\n            raise exceptions.UserError('{} already exists'.format(file_path))\n\n        with open(file_path, 'wb') as f:\n            f.write(self.export_as_code(cv_source).encode('utf8'))", "docstring": "Export the ensemble as a single Python file and saves it to `file_path`.\n\nThis is EXPERIMENTAL as putting different modules together would probably wreak havoc\nespecially on modules that make heavy use of global variables.\n\nArgs:\nfile_path (str, unicode): Absolute/local path of place to save file in\n\ncv_source (str, unicode): String containing actual code for base learner\ncross-validation used to generate secondary meta-features.", "source": "juraj-google-style"}
{"code": "def iter_package_families(paths=None):\n    for path in (paths or config.packages_path):\n        repo = package_repository_manager.get_repository(path)\n        for resource in repo.iter_package_families():\n            (yield PackageFamily(resource))", "docstring": "Iterate over package families, in no particular order.\n\nNote that multiple package families with the same name can be returned.\nUnlike packages, families later in the searchpath are not hidden by earlier\nfamilies.\n\nArgs:\npaths (list of str, optional): paths to search for package families,\ndefaults to `config.packages_path`.\n\nReturns:\n`PackageFamily` iterator.", "source": "codesearchnet"}
{"code": "def add_table(self, table):\n        \n\n        \n        \n        import astropy\n        \n        table_array = table.__array__()\n        self.table_keys= table.keys()\n        table_columns= []\n        for i in range(0,len(table.columns[0])):\n            row_data = []\n\n            \n            \n            for item in table_array[i]:\n                if isinstance(item, bytes):\n                    row_data.append(item.decode('utf-8'))\n                else:\n                    row_data.append(item)\n            table_columns.append(row_data)\n\n        self.table_columns = table_columns\n        self.table_flag= not self.table_flag", "docstring": "load a VOTable -already accessible on the python side- into the widget\nArgs:\ntable: votable object", "source": "juraj-google-style"}
{"code": "def generate(self, model_len=None, model_width=None):\n        \n        if model_len is None:\n            model_len = Constant.MODEL_LEN\n        if model_width is None:\n            model_width = Constant.MODEL_WIDTH\n        if isinstance(model_width, list) and not len(model_width) == model_len:\n            raise ValueError(\"The length of 'model_width' does not match 'model_len'\")\n        elif isinstance(model_width, int):\n            model_width = [model_width] * model_len\n\n        graph = Graph(self.input_shape, False)\n        output_node_id = 0\n        n_nodes_prev_layer = self.input_shape[0]\n        for width in model_width:\n            output_node_id = graph.add_layer(\n                StubDense(n_nodes_prev_layer, width), output_node_id\n            )\n            output_node_id = graph.add_layer(\n                StubDropout1d(Constant.MLP_DROPOUT_RATE), output_node_id\n            )\n            output_node_id = graph.add_layer(StubReLU(), output_node_id)\n            n_nodes_prev_layer = width\n\n        graph.add_layer(StubDense(n_nodes_prev_layer, self.n_output_node), output_node_id)\n        return graph", "docstring": "Generates a Multi-Layer Perceptron.\nArgs:\nmodel_len: An integer. Number of hidden layers.\nmodel_width: An integer or a list of integers of length `model_len`. If it is a list, it represents the\nnumber of nodes in each hidden layer. If it is an integer, all hidden layers have nodes equal to this\nvalue.\nReturns:\nAn instance of the class Graph. Represents the neural architecture graph of the generated model.", "source": "juraj-google-style"}
{"code": "def delete_course_completion(self, user_id, payload):\n    return self._delete(urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.completion_status_api_path), payload, self.COMPLETION_PROVIDER_SCOPE)", "docstring": "Delete a completion status previously sent to the Degreed Completion Status endpoint\n\nArgs:\nuser_id: Unused.\npayload: JSON encoded object (serialized from DegreedLearnerDataTransmissionAudit)\ncontaining the required completion status fields for deletion per Degreed documentation.\n\nReturns:\nA tuple containing the status code and the body of the response.\nRaises:\nHTTPError: if we received a failure response code from Degreed", "source": "codesearchnet"}
{"code": "def _list_all_concrete_functions_for_serialization(self):\n    seen_signatures = []\n    if self.input_signature is not None:\n        seen_signatures.append((self.input_signature, {}))\n    else:\n        concrete_functions = self._list_all_concrete_functions()\n        for concrete_function in concrete_functions:\n            signature = concrete_function.structured_input_signature\n            flattened = nest.flatten(signature)\n            if any((isinstance(arg, func_graph_module.UnknownArgument) for arg in flattened)):\n                logging.info('Unsupported signature for serialization: %s.', signature)\n                continue\n            equal_to_signature = functools.partial(function_type_utils.is_same_structure, signature, check_values=True)\n            if not any((equal_to_signature(s) for s in seen_signatures)):\n                seen_signatures.append(signature)\n    concrete_functions = []\n    for args, kwargs in seen_signatures:\n        concrete_functions.append(self.get_concrete_function(*args, **kwargs))\n    return concrete_functions", "docstring": "Returns all concrete functions for serialization.\n\nReturns:\nA list of instances of `ConcreteFunction`.", "source": "github-repos"}
{"code": "def whoami(self) -> dict:\n        \n        if not self.access_token:\n            return {}\n        self._try_refresh_access_token()\n        return self.session.get(self.WHOAMI_URL).json()", "docstring": "Returns the basic information about the authenticated character.\n\nObviously doesn't do anything if this Preston instance is not\nauthenticated, so it returns an empty dict.\n\nArgs:\nNone\n\nReturns:\ncharacter info if authenticated, otherwise an empty dict", "source": "juraj-google-style"}
{"code": "def clean(self, force: bool=False):\n        \n        with (yield from self._lock):\n            for connection in tuple(self.ready):\n                if force or connection.closed():\n                    connection.close()\n                    self.ready.remove(connection)", "docstring": "Clean closed connections.\n\nArgs:\nforce: Clean connected and idle connections too.\n\nCoroutine.", "source": "juraj-google-style"}
{"code": "def markdown_to_safe_html(markdown_string):\n    warning = ''\n    if isinstance(markdown_string, six.binary_type):\n        markdown_string_decoded = markdown_string.decode('utf-8')\n        markdown_string = markdown_string_decoded.replace(u'\\x00', u'')\n        num_null_bytes = (len(markdown_string_decoded) - len(markdown_string))\n        if num_null_bytes:\n            warning = ('<!-- WARNING: discarded %d null bytes in markdown string after UTF-8 decoding -->\\n' % num_null_bytes)\n    string_html = markdown.markdown(markdown_string, extensions=['markdown.extensions.tables'])\n    string_sanitized = bleach.clean(string_html, tags=_ALLOWED_TAGS, attributes=_ALLOWED_ATTRIBUTES)\n    return (warning + string_sanitized)", "docstring": "Convert Markdown to HTML that's safe to splice into the DOM.\n\nArguments:\nmarkdown_string: A Unicode string or UTF-8--encoded bytestring\ncontaining Markdown source. Markdown tables are supported.\n\nReturns:\nA string containing safe HTML.", "source": "codesearchnet"}
{"code": "def __intervals_from_tops(self,\n                              tops,\n                              values,\n                              basis,\n                              components,\n                              field=None,\n                              ignore_nan=True):\n        \n        \n        length = float(basis.size)\n        start, stop = basis[0], basis[-1]\n        tops = [start + (p/(length-1)) * (stop-start) for p in tops]\n        bases = tops[1:] + [stop]\n\n        list_of_Intervals = []\n        for i, t in enumerate(tops):\n\n            v, c, d = values[i], [], {}\n\n            if ignore_nan and np.isnan(v):\n                continue\n\n            if (field is not None):\n                d = {field: v}\n\n            if components is not None:\n                try:\n                    c = [deepcopy(components[int(v)])]\n                except IndexError:\n                    c = []\n\n                if c and (c[0] is None):\n                    c = []\n\n            interval = Interval(t, bases[i], data=d, components=c)\n            list_of_Intervals.append(interval)\n\n        return list_of_Intervals", "docstring": "Private method. Take a sequence of tops in an arbitrary dimension,\nand provide a list of intervals from which a striplog can be made.\n\nThis is only intended to be used by ``from_image()``.\n\nArgs:\ntops (iterable). A list of floats.\nvalues (iterable). A list of values to look up.\nbasis (iterable). A list of components.\ncomponents (iterable). A list of Components.\n\nReturns:\nList. A list of Intervals.", "source": "juraj-google-style"}
{"code": "def __str__(self):\n        \n        name = self.__class__.__name__\n        return '%s(Handle %d, Address %d)' % (name, self.Handle, self.Addr)", "docstring": "Returns a formatted string describing the breakpoint.\n\nArgs:\nself (JLinkBreakpointInfo): the ``JLinkBreakpointInfo`` instance\n\nReturns:\nStirng representation of the breakpoint.", "source": "juraj-google-style"}
{"code": "def put(self, rid, data, raise_on_error=True):\n        \n        response_data = None\n        headers = {'Content-Type': 'application/json', 'DB-Method': 'PUT'}\n        url = '/v2/exchange/db/{}/{}/{}'.format(self.domain, self.data_type, rid)\n        r = self.tcex.session.post(url, json=data, headers=headers)\n        self.tcex.log.debug('datastore put status code: {}'.format(r.status_code))\n        if r.ok and 'application/json' in r.headers.get('content-type', ''):\n            response_data = r.json()\n        else:\n            error = r.text or r.reason\n            self.tcex.handle_error(805, ['put', r.status_code, error], raise_on_error)\n        return response_data", "docstring": "Update the data for the provided Id.\n\nArgs:\nrid (str): The record identifier.\ndata (dict): A search query\nraise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.\n\nReturns:\nobject : Python request response.", "source": "juraj-google-style"}
{"code": "def sunrise(self, date=None, zenith=None):\n    return (segment.sunrise(date, zenith) for segment in self)", "docstring": "Calculate sunrise times for locations.\n\nArgs:\ndate (datetime.date): Calculate rise or set for given date\nzenith (str): Calculate sunrise events, or end of twilight\nReturns:\nlist of list of datetime.datetime: The time for the sunrise for\neach point in each segment", "source": "codesearchnet"}
{"code": "def clean_df(df, header=None, **read_csv_kwargs):\n    \n    df = read_csv(df, header=header, **read_csv_kwargs)\n    df = df.fillna(' ')\n    for col in df.columns:\n        df[col] = df[col].apply(unicode2ascii)\n    return df", "docstring": "Convert UTF8 characters in a CSV file or dataframe into ASCII\n\nArgs:\ndf (DataFrame or str): DataFrame or path or url to CSV", "source": "juraj-google-style"}
{"code": "def register_handler(self, handler, event_name, args):\n    if self.started:\n        raise IllegalStateError(\"Can't register service after polling is started\")\n    self.lock.acquire()\n    try:\n        if (event_name in self.handlers):\n            raise DuplicateError('A handler for {} already exists'.format(event_name))\n        self.handlers[event_name] = (handler, args)\n    finally:\n        self.lock.release()", "docstring": "Registers an event handler.\n\nOne type of event can only have one event handler associated with it.\n\nArgs:\nhandler: The event handler function to be registered.\nevent_name: Name of the event the handler is for.\nargs: User arguments to be passed to the handler when it's called.\n\nRaises:\nIllegalStateError: Raised if attempts to register a handler after\nthe dispatcher starts running.\nDuplicateError: Raised if attempts to register more than one\nhandler for one type of event.", "source": "codesearchnet"}
{"code": "def change_extension(self, filepath, new_extension):\n    (filename, ext) = os.path.splitext(filepath)\n    return '.'.join([filename, new_extension])", "docstring": "Change final filename extension.\n\nArgs:\nfilepath (str): A file path (relative or absolute).\nnew_extension (str): New extension name (without leading dot) to\napply.\n\nReturns:\nstr: Filepath with new extension.", "source": "codesearchnet"}
{"code": "def from_json(data):\n        \n        memfiles = InMemoryFiles()\n        memfiles.files = json.loads(data)\n        return memfiles", "docstring": "Convert JSON into a in memory file storage.\n\nArgs:\ndata (str): valid JSON with path and filenames and\nthe base64 encoding of the file content.\n\nReturns:\nInMemoryFiles: in memory file storage", "source": "juraj-google-style"}
{"code": "def write(self, name, **data):\n    data['name'] = name\n    if (not ('timestamp' in data)):\n        data['timestamp'] = datetime.utcnow()\n    try:\n        self.producer.send(topic=self.topic, value=data)\n        self.producer.flush()\n    except (KafkaTimeoutError, NoBrokersAvailable) as exc:\n        logger.warning('writing metric %r failure %r', data, exc)", "docstring": "Write the metric to kafka\n\nArgs:\nname (str): The name of the metric to write\ndata (dict): Additional data to store with the metric", "source": "codesearchnet"}
{"code": "def tap(self, locator, x_offset=None, y_offset=None, count=1):\n        \n        driver = self._current_application()\n        el = self._element_find(locator, True, True)\n        action = TouchAction(driver)\n        action.tap(el,x_offset,y_offset, count).perform()", "docstring": "Tap element identified by ``locator``.\n\nArgs:\n- ``x_offset`` - (optional) x coordinate to tap, relative to the top left corner of the element.\n- ``y_offset`` - (optional) y coordinate. If y is used, x must also be set, and vice versa\n- ``count`` - can be used for multiple times of tap on that element", "source": "juraj-google-style"}
{"code": "def subscribe_registration_ids_to_topic(self, registration_ids, topic_name):\n    url = 'https:\n    payload = {'to': ('/topics/' + topic_name), 'registration_tokens': registration_ids}\n    response = self.requests_session.post(url, json=payload)\n    if (response.status_code == 200):\n        return True\n    elif (response.status_code == 400):\n        error = response.json()\n        raise InvalidDataError(error['error'])\n    else:\n        raise FCMError()", "docstring": "Subscribes a list of registration ids to a topic\n\nArgs:\nregistration_ids (list): ids to be subscribed\ntopic_name (str): name of topic\n\nReturns:\nTrue: if operation succeeded\n\nRaises:\nInvalidDataError: data sent to server was incorrectly formatted\nFCMError: an error occured on the server", "source": "codesearchnet"}
{"code": "def to_dataframe(self, start_row=0, max_rows=None):\n    \n    fetcher = self._get_row_fetcher(start_row=start_row, max_rows=max_rows)\n    count = 0\n    page_token = None\n    df = None\n    while True:\n      page_rows, page_token = fetcher(page_token, count)\n      if len(page_rows):\n        count += len(page_rows)\n        if df is None:\n          df = pandas.DataFrame.from_records(page_rows)\n        else:\n          df = df.append(page_rows, ignore_index=True)\n      if not page_token:\n        break\n\n    \n    ordered_fields = [field.name for field in self.schema]\n    return df[ordered_fields] if df is not None else pandas.DataFrame()", "docstring": "Exports the table to a Pandas dataframe.\n\nArgs:\nstart_row: the row of the table at which to start the export (default 0)\nmax_rows: an upper limit on the number of rows to export (default None)\nReturns:\nA Pandas dataframe containing the table data.", "source": "juraj-google-style"}
{"code": "def is_value_type_valid_for_exact_conditions(self, value):\n    if (isinstance(value, string_types) or isinstance(value, (numbers.Integral, float))):\n        return True\n    return False", "docstring": "Method to validate if the value is valid for exact match type evaluation.\n\nArgs:\nvalue: Value to validate.\n\nReturns:\nBoolean: True if value is a string, boolean, or number. Otherwise False.", "source": "codesearchnet"}
{"code": "def get_end_time_metric(result: PipelineResult, namespace: str, name: str) -> int:\n    distributions = result.metrics().query(MetricsFilter().with_namespace(namespace).with_name(name))['distributions']\n    max_list = list(map(lambda m: m.result.max, distributions))\n    return max(max_list) if len(max_list) > 0 else -1", "docstring": "get the end time out of all times recorded by the specified distribution\nmetric\n\nArgs:\nresult: the PipelineResult which metrics are read from\nnamespace: a string representing the namespace of wanted metric\nname: a string representing the  name of the wanted metric\n\nReturns:\nthe largest time in the metric or -1 if it doesn't exist", "source": "github-repos"}
{"code": "def as_tuning_range(self, name):\n        \n        return {'Name': name,\n                'MinValue': to_str(self.min_value),\n                'MaxValue': to_str(self.max_value),\n                'ScalingType': self.scaling_type}", "docstring": "Represent the parameter range as a dicionary suitable for a request to\ncreate an Amazon SageMaker hyperparameter tuning job.\n\nArgs:\nname (str): The name of the hyperparameter.\n\nReturns:\ndict[str, str]: A dictionary that contains the name and values of the hyperparameter.", "source": "juraj-google-style"}
{"code": "def clean_channel_worker_username(self):\n    channel_worker_username = self.cleaned_data['channel_worker_username'].strip()\n    try:\n        User.objects.get(username=channel_worker_username)\n    except User.DoesNotExist:\n        raise ValidationError(ValidationMessages.INVALID_CHANNEL_WORKER.format(channel_worker_username=channel_worker_username))\n    return channel_worker_username", "docstring": "Clean enterprise channel worker user form field\n\nReturns:\nstr: the cleaned value of channel user username for transmitting courses metadata.", "source": "codesearchnet"}
{"code": "def _build_trial_meta(cls, expr_dir):\n        \n        meta_file = os.path.join(expr_dir, EXPR_META_FILE)\n        meta = parse_json(meta_file)\n\n        if not meta:\n            job_id = expr_dir.split(\"/\")[-2]\n            trial_id = expr_dir[-8:]\n            params = parse_json(os.path.join(expr_dir, EXPR_PARARM_FILE))\n            meta = {\n                \"trial_id\": trial_id,\n                \"job_id\": job_id,\n                \"status\": \"RUNNING\",\n                \"type\": \"TUNE\",\n                \"start_time\": os.path.getctime(expr_dir),\n                \"end_time\": None,\n                \"progress_offset\": 0,\n                \"result_offset\": 0,\n                \"params\": params\n            }\n\n        if not meta.get(\"start_time\", None):\n            meta[\"start_time\"] = os.path.getctime(expr_dir)\n\n        if isinstance(meta[\"start_time\"], float):\n            meta[\"start_time\"] = timestamp2date(meta[\"start_time\"])\n\n        if meta.get(\"end_time\", None):\n            meta[\"end_time\"] = timestamp2date(meta[\"end_time\"])\n\n        meta[\"params\"] = parse_json(os.path.join(expr_dir, EXPR_PARARM_FILE))\n\n        return meta", "docstring": "Build meta file for trial.\n\nArgs:\nexpr_dir (str): Directory path of the experiment.\n\nReturn:\nA dict of trial meta info.", "source": "juraj-google-style"}
{"code": "def parse_expression(src):\n    src = STANDARD_PREAMBLE + src.strip()\n    node = parse(src, preamble_len=STANDARD_PREAMBLE_LEN, single_node=True)\n    if __debug__:\n        if not isinstance(node, gast.Expr):\n            raise ValueError('expected exactly one node of type Expr, got {}'.format(node))\n    return node.value", "docstring": "Returns the AST of given identifier.\n\nArgs:\nsrc: A piece of code that represents a single Python expression\nReturns:\nA gast.AST object.\nRaises:\nValueError: if src does not consist of a single Expression.", "source": "github-repos"}
{"code": "def transpose(vari):\n    \n    if isinstance(vari, Poly):\n        core = vari.A.copy()\n        for key in vari.keys:\n            core[key] = transpose(core[key])\n        return Poly(core, vari.dim, vari.shape[::-1], vari.dtype)\n\n    return numpy.transpose(vari)", "docstring": "Transpose a shapeable quantety.\n\nArgs:\nvari (chaospy.poly.base.Poly, numpy.ndarray):\nQuantety of interest.\n\nReturns:\n(chaospy.poly.base.Poly, numpy.ndarray):\nSame type as ``vari``.\n\nExamples:\n>>> P = chaospy.reshape(chaospy.prange(4), (2,2))\n>>> print(P)\n[[1, q0], [q0^2, q0^3]]\n>>> print(chaospy.transpose(P))\n[[1, q0^2], [q0, q0^3]]", "source": "juraj-google-style"}
{"code": "def read_nose(in_file):\n    suites = {}\n    doc_xml = minidom.parse(in_file)\n    suite_xml = doc_xml.getElementsByTagName('testsuite')[0]\n    for case_xml in suite_xml.getElementsByTagName('testcase'):\n        classname = case_xml.getAttribute('classname')\n        if (classname not in suites):\n            suites[classname] = []\n        case = {'name': case_xml.getAttribute('name'), 'time': float(case_xml.getAttribute('time'))}\n        skipped_xml = case_xml.getElementsByTagName('skipped')\n        if skipped_xml:\n            if skipped_xml[0].hasAttribute('type'):\n                type = skipped_xml[0].getAttribute('type')\n            else:\n                type = ''\n            case['skipped'] = {'type': type, 'message': skipped_xml[0].getAttribute('message'), 'text': ''.join([child.nodeValue for child in skipped_xml[0].childNodes])}\n        failure_xml = case_xml.getElementsByTagName('failure')\n        if failure_xml:\n            if failure_xml[0].hasAttribute('type'):\n                type = failure_xml[0].getAttribute('type')\n            else:\n                type = ''\n            case['failure'] = {'type': type, 'message': failure_xml[0].getAttribute('message'), 'text': ''.join([child.nodeValue for child in failure_xml[0].childNodes])}\n        error_xml = case_xml.getElementsByTagName('error')\n        if error_xml:\n            if error_xml[0].hasAttribute('type'):\n                type = error_xml[0].getAttribute('type')\n            else:\n                type = ''\n            case['error'] = {'type': type, 'message': error_xml[0].getAttribute('message'), 'text': ''.join([child.nodeValue for child in error_xml[0].childNodes])}\n        suites[classname].append(case)\n    return suites", "docstring": "Parse nose-style test reports into a `dict`\n\nArgs:\nin_file (:obj:`str`): path to nose-style test report\n\nReturns:\n:obj:`dict`: dictionary of test suites", "source": "codesearchnet"}
{"code": "def add(self, index):\n    if ((index - self.flush_at) < self.interval):\n        return\n    now = time.time()\n    elapsed = (now - self.lap)\n    elapsed_total = (now - self.start)\n    it = (index - self.flush_at)\n    self.lap = now\n    if self.verbose:\n        logger.info('iter={} {{{}}}={}[sec/{}iter] {}[sec]'.format(index, self.name, elapsed, it, elapsed_total))\n    if (self.fd is not None):\n        print('{} {} {} {}'.format(index, elapsed, it, elapsed_total), file=self.fd)\n    self.flush_at = index", "docstring": "Calculate time elapsed from the point previously called\nthis method or this object is created to this is called.\n\nArgs:\nindex (int): Index to be displayed, and be used to take intervals.", "source": "codesearchnet"}
{"code": "def _convert_and_export_metrics(self, convert_func, *args, **kwargs):\n    self._increase_conversion_attempt_metric()\n    self._save_conversion_params_metric()\n    start_time = time.process_time()\n    result = convert_func(self, *args, **kwargs)\n    elapsed_time_ms = (time.process_time() - start_time) * 1000\n    if result:\n        self._increase_conversion_success_metric()\n    self._set_conversion_latency_metric(round(elapsed_time_ms))\n    self._tflite_metrics.export_metrics()\n    if self.exclude_conversion_metadata or self._experimental_use_buffer_offset:\n        return result\n    model_object = flatbuffer_utils.convert_bytearray_to_object(result)\n    if _check_model_use_buffer_offset(model_object):\n        return result\n    sparsity_modes = _get_sparsity_modes(model_object)\n    model_hash = _get_model_hash(model_object)\n    self._metadata.options.modelOptimizationModes.extend(sparsity_modes)\n    self._metadata.environment.modelHash = model_hash\n    model_object = _populate_conversion_metadata(model_object, self._metadata)\n    return flatbuffer_utils.convert_object_to_bytearray(model_object)", "docstring": "Wraps around convert function to export metrics.\n\nArgs:\nconvert_func: The convert function to wrap.\n*args: Positional arguments of the convert function.\n**kwargs: The keyword arguments of the convert function.\n\nReturns:\nThe decorator to wrap the convert function.", "source": "github-repos"}
{"code": "def values(self, *args):\n        \n        return [dict(zip(args, values_list))\n                for values_list in self.values_list(flatten=False, *args)]", "docstring": "Returns list of dicts (field names as keys) for given fields.\n\nArgs:\n\\*args: List of fields to be returned as dict.\n\nReturns:\nlist of dicts for given fields.\n\nExample:\n>>> Person.objects.filter(age__gte=16, name__startswith='jo').values('name', 'lastname')", "source": "juraj-google-style"}
{"code": "def _get_path_for_op_id(self, id: str) -> Optional[str]:\n        \n        for path_key, path_value in self._get_spec()['paths'].items():\n            for method in self.METHODS:\n                if method in path_value:\n                    if self.OPERATION_ID_KEY in path_value[method]:\n                        if path_value[method][self.OPERATION_ID_KEY] == id:\n                            return path_key\n        return None", "docstring": "Searches the spec for a path matching the operation id.\n\nArgs:\nid: operation id\n\nReturns:\npath to the endpoint, or None if not found", "source": "juraj-google-style"}
{"code": "def guid(valu=None):\n    \n    if valu is None:\n        return binascii.hexlify(os.urandom(16)).decode('utf8')\n    \n    byts = s_msgpack.en(valu)\n    return hashlib.md5(byts).hexdigest()", "docstring": "Get a 16 byte guid value.\n\nBy default, this is a random guid value.\n\nArgs:\nvalu: Object used to construct the guid valu from.  This must be able\nto be msgpack'd.\n\nReturns:\nstr: 32 character, lowercase ascii string.", "source": "juraj-google-style"}
{"code": "def get_preprocessor(model_name: str) -> Optional[Union['AutoTokenizer', 'AutoFeatureExtractor', 'AutoProcessor']]:\n    from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer\n    try:\n        return AutoProcessor.from_pretrained(model_name)\n    except (ValueError, OSError, KeyError):\n        tokenizer, feature_extractor = (None, None)\n        try:\n            tokenizer = AutoTokenizer.from_pretrained(model_name)\n        except (OSError, KeyError):\n            pass\n        try:\n            feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)\n        except (OSError, KeyError):\n            pass\n        if tokenizer is not None and feature_extractor is not None:\n            raise ValueError(f\"Couldn't auto-detect preprocessor for {model_name}. Found both a tokenizer and a feature extractor.\")\n        elif tokenizer is None and feature_extractor is None:\n            return None\n        elif tokenizer is not None:\n            return tokenizer\n        else:\n            return feature_extractor", "docstring": "Gets a preprocessor (tokenizer, feature extractor or processor) that is available for `model_name`.\n\nArgs:\nmodel_name (`str`): Name of the model for which a preprocessor are loaded.\n\nReturns:\n`Optional[Union[AutoTokenizer, AutoFeatureExtractor, AutoProcessor]]`:\nIf a processor is found, it is returned. Otherwise, if a tokenizer or a feature extractor exists, it is\nreturned. If both a tokenizer and a feature extractor exist, an error is raised. The function returns\n`None` if no preprocessor is found.", "source": "github-repos"}
{"code": "def _maybe_repeat(self, x):\n    \n    if isinstance(x, list):\n      assert len(x) == self.n\n      return x\n    else:\n      return [x] * self.n", "docstring": "Utility function for processing arguments that are singletons or lists.\n\nArgs:\nx: either a list of self.n elements, or not a list.\n\nReturns:\na list of self.n elements.", "source": "juraj-google-style"}
{"code": "def __set_unkown_effect(self, hgvs_string):\n        \n        \n        unknown_effect_list = ['?', '(=)', '=']  \n        if hgvs_string in unknown_effect_list:\n            self.unknown_effect = True\n        elif \"(\" in hgvs_string:\n            \n            self.unknown_effect = True\n        else:\n            self.unknown_effect = False\n\n        \n        \n        \n        if \"?\" in hgvs_string:\n            self.is_missing_info = True\n        else:\n            self.is_missing_info = False", "docstring": "Sets a flag for unkown effect according to HGVS syntax. The\nCOSMIC database also uses unconventional questionmarks to denote\nmissing information.\n\nArgs:\nhgvs_string (str): hgvs syntax with \"p.\" removed", "source": "juraj-google-style"}
{"code": "def CreateKey(self, private_key=None):\n    if (private_key is None):\n        private_key = bytes(Random.get_random_bytes(32))\n    key = KeyPair(priv_key=private_key)\n    self._keys[key.PublicKeyHash.ToBytes()] = key\n    return key", "docstring": "Create a KeyPair\n\nArgs:\nprivate_key (iterable_of_ints): (optional) 32 byte private key\n\nReturns:\nKeyPair: a KeyPair instance", "source": "codesearchnet"}
{"code": "def transform_tensor(self, tensor):\n        \n        dim = tensor.shape\n        rank = len(dim)\n        assert all([i == 3 for i in dim])\n        \n        lc = string.ascii_lowercase\n        indices = lc[:rank], lc[rank:2 * rank]\n        einsum_string = ','.join([a + i for a, i in zip(*indices)])\n        einsum_string += ',{}->{}'.format(*indices[::-1])\n        einsum_args = [self.rotation_matrix] * rank + [tensor]\n\n        return np.einsum(einsum_string, *einsum_args)", "docstring": "Applies rotation portion to a tensor. Note that tensor has to be in\nfull form, not the Voigt form.\n\nArgs:\ntensor (numpy array): a rank n tensor\n\nReturns:\nTransformed tensor.", "source": "juraj-google-style"}
{"code": "def createResourceMapFromStream(in_stream, base_url=d1_common.const.URL_DATAONE_ROOT):\n    pids = []\n    for line in in_stream:\n        pid = line.strip()\n        if ((pid == '\n            continue\n    if (len(pids) < 2):\n        raise ValueError('Insufficient numbers of identifiers provided.')\n    logging.info('Read {} identifiers'.format(len(pids)))\n    ore = ResourceMap(base_url=base_url)\n    logging.info('ORE PID = {}'.format(pids[0]))\n    ore.initialize(pids[0])\n    logging.info('Metadata PID = {}'.format(pids[1]))\n    ore.addMetadataDocument(pids[1])\n    ore.addDataDocuments(pids[2:], pids[1])\n    return ore", "docstring": "Create a simple OAI-ORE Resource Map with one Science Metadata document and any\nnumber of Science Data objects, using a stream of PIDs.\n\nArgs:\nin_stream:\nThe first non-blank line is the PID of the resource map itself. Second line is\nthe science metadata PID and remaining lines are science data PIDs.\n\nExample stream contents:\n\n::\n\nPID_ORE_value\nsci_meta_pid_value\ndata_pid_1\ndata_pid_2\ndata_pid_3\n\nbase_url : str\nRoot of the DataONE environment in which the Resource Map will be used.\n\nReturns:\nResourceMap : OAI-ORE Resource Map", "source": "codesearchnet"}
{"code": "def from_rotation_and_translation_and_time_reversal(rotation_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), translation_vec=(0, 0, 0), time_reversal=1, tol=0.1):\n    symmop = SymmOp.from_rotation_and_translation(rotation_matrix=rotation_matrix, translation_vec=translation_vec, tol=tol)\n    return MagSymmOp.from_symmop(symmop, time_reversal)", "docstring": "Creates a symmetry operation from a rotation matrix, translation\nvector and time reversal operator.\n\nArgs:\nrotation_matrix (3x3 array): Rotation matrix.\ntranslation_vec (3x1 array): Translation vector.\ntime_reversal (int): Time reversal operator, +1 or -1.\ntol (float): Tolerance to determine if rotation matrix is valid.\n\nReturns:\nMagSymmOp object", "source": "codesearchnet"}
{"code": "def _ParsePropertiesXMLFile(self, xml_data):\n    xml_root = ElementTree.fromstring(xml_data)\n    properties = {}\n    for xml_element in xml_root.iter():\n        if (not xml_element.text):\n            continue\n        (_, _, name) = xml_element.tag.partition('}')\n        if (name == 'lpstr'):\n            continue\n        property_name = self._PROPERTY_NAMES.get(name, None)\n        if (not property_name):\n            property_name = self._FormatPropertyName(name)\n        properties[property_name] = xml_element.text\n    return properties", "docstring": "Parses a properties XML file.\n\nArgs:\nxml_data (bytes): data of a _rels/.rels XML file.\n\nReturns:\ndict[str, object]: properties.\n\nRaises:\nzipfile.BadZipfile: if the properties XML file cannot be read.", "source": "codesearchnet"}
{"code": "def __eq__(self, rhs):\n    \n\n    return (isinstance(rhs, MockMethod) and\n            self._name == rhs._name and\n            self._params == rhs._params and\n            self._named_params == rhs._named_params)", "docstring": "Test whether this MockMethod is equivalent to another MockMethod.\n\nArgs:\n# rhs: the right hand side of the test\nrhs: MockMethod", "source": "juraj-google-style"}
{"code": "def load_glossary(file_path: str, read_json=False) -> List[str]:\n    if read_json:\n        if file_path.endswith('.gz'):\n            return json.load(gzip.open(file_path))\n        return json.load(open(file_path))\n    return open(file_path).read().splitlines()", "docstring": "A glossary is a text file, one entry per line.\n\nArgs:\nfile_path (str): path to a text file containing a glossary.\nread_json (bool): set True if the glossary is in json format\nReturns: List of the strings in the glossary.", "source": "codesearchnet"}
{"code": "def wait(fs, timeout=None, return_when=ALL_COMPLETED):\n    with _AcquireFutures(fs):\n        done = set((f for f in fs if (f._state in [CANCELLED_AND_NOTIFIED, FINISHED])))\n        not_done = (set(fs) - done)\n        if ((return_when == FIRST_COMPLETED) and done):\n            return DoneAndNotDoneFutures(done, not_done)\n        elif ((return_when == FIRST_EXCEPTION) and done):\n            if any((f for f in done if ((not f.cancelled()) and (f.exception() is not None)))):\n                return DoneAndNotDoneFutures(done, not_done)\n        if (len(done) == len(fs)):\n            return DoneAndNotDoneFutures(done, not_done)\n        waiter = _create_and_install_waiters(fs, return_when)\n    waiter.event.wait(timeout)\n    for f in fs:\n        with f._condition:\n            f._waiters.remove(waiter)\n    done.update(waiter.finished_futures)\n    return DoneAndNotDoneFutures(done, (set(fs) - done))", "docstring": "Wait for the futures in the given sequence to complete.\n\nArgs:\nfs: The sequence of Futures (possibly created by different Executors) to\nwait upon.\ntimeout: The maximum number of seconds to wait. If None, then there\nis no limit on the wait time.\nreturn_when: Indicates when this function should return. The options\nare:\n\nFIRST_COMPLETED - Return when any future finishes or is\ncancelled.\nFIRST_EXCEPTION - Return when any future finishes by raising an\nexception. If no future raises an exception\nthen it is equivalent to ALL_COMPLETED.\nALL_COMPLETED -   Return when all futures finish or are cancelled.\n\nReturns:\nA named 2-tuple of sets. The first set, named 'done', contains the\nfutures that completed (is finished or cancelled) before the wait\ncompleted. The second set, named 'not_done', contains uncompleted\nfutures.", "source": "codesearchnet"}
{"code": "def ellipse_distance(item_a, time_a, item_b, time_b, max_value):\n    ts = np.array([0, np.pi])\n    ell_a = item_a.get_ellipse_model(time_a)\n    ell_b = item_b.get_ellipse_model(time_b)\n    ends_a = ell_a.predict_xy(ts)\n    ends_b = ell_b.predict_xy(ts)\n    distances = np.sqrt((((ends_a[(:, 0:1)] - ends_b[(:, 0:1)].T) ** 2) + ((ends_a[(:, 1:)] - ends_b[(:, 1:)].T) ** 2)))\n    return (np.minimum(distances[(0, 1)], max_value) / float(max_value))", "docstring": "Calculate differences in the properties of ellipses fitted to each object.\n\nArgs:\nitem_a: STObject from the first set in ObjectMatcher\ntime_a: Time integer being evaluated\nitem_b: STObject from the second set in ObjectMatcher\ntime_b: Time integer being evaluated\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "codesearchnet"}
{"code": "def get_subscribers(object_type: str) -> List[str]:\n    return DB.get_list(_keys.subscribers(object_type))", "docstring": "Get the list of subscribers to events of the object type.\n\nArgs:\nobject_type (str): Type of object.\n\nReturns:\nList[str], list of subscriber names.", "source": "codesearchnet"}
{"code": "def expand_recurring(number, repeat=5):\n    \n    if \"[\" in number:\n        pattern_index = number.index(\"[\")\n        pattern = number[pattern_index + 1:-1]\n        number = number[:pattern_index]\n        number = number + pattern * (repeat + 1)\n    return number", "docstring": "Expands a recurring pattern within a number.\n\nArgs:\nnumber(tuple): the number to process in the form:\n(int, int, int, ... \".\", ... , int int int)\nrepeat: the number of times to expand the pattern.\n\nReturns:\nThe original number with recurring pattern expanded.\n\nExample:\n>>> expand_recurring((1, \".\", 0, \"[\", 9, \"]\"), repeat=3)\n(1, '.', 0, 9, 9, 9, 9)", "source": "juraj-google-style"}
{"code": "def result_code(self, value):\n        \n        if value == self._defaults['resultCode'] and 'resultCode' in self._values:\n            del self._values['resultCode']\n        else:\n            self._values['resultCode'] = value", "docstring": "The result_code property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def __init__(self, name, number, aliases=None, description=None):\n    \n    super(EnumerationValue, self).__init__()\n    self.aliases = aliases or []\n    self.description = description\n    self.name = name\n    self.number = number", "docstring": "Initializes an enumeration value.\n\nArgs:\nname (str): name.\nnumber (int): number.\naliases (Optional[list[str]]): aliases.\ndescription (Optional[str]): description.", "source": "juraj-google-style"}
{"code": "def __delitem__(self, obj, sync=True):\n        \n        if self._is_item:\n            raise TypeError(\"This an item of the parent ListNode\")\n        list(self._generate_instances())\n        _lnk_key = None\n        if isinstance(obj, six.string_types):\n            _lnk_key = obj\n            _obj = self.node_dict[obj]\n        elif not isinstance(obj, self.__class__):\n            _lnk_key = obj.key\n            _obj = self.node_dict[obj.key]\n            del self.node_dict[obj.key]\n        else:\n            _obj = obj\n        self.node_stack.remove(_obj)\n        if _lnk_key and sync:\n            \n            \n            rel_name = \"%s.%s\" % (_obj.__class__.__name__,\n                                  _obj.get_link()['field'])\n            remote_node_name = self._root_node.get_link(field=rel_name)['reverse']\n            _lnk_obj = getattr(_obj, _obj.get_link()['field'])\n            getattr(_lnk_obj, remote_node_name).__delitem__(self._root_node.key, sync=False)\n            \n            self._root_node.on_save.append(_lnk_obj.save)", "docstring": "Allow usage of \"del\" statement on ListNodes with bracket notation.\n\nArgs:\nobj: ListNode item or relation key.\n\nRaises:\nTypeError: If it's called on a ListNode item (intstead of ListNode's itself)", "source": "juraj-google-style"}
{"code": "def parse_rfc3339_utc_string(rfc3339_utc_string):\n    m = re.match('(\\\\d{4})-(\\\\d{2})-(\\\\d{2})T(\\\\d{2}):(\\\\d{2}):(\\\\d{2}).?(\\\\d*)Z', rfc3339_utc_string)\n    if (not m):\n        return None\n    groups = m.groups()\n    if (len(groups[6]) not in (0, 3, 6, 9)):\n        return None\n    g = [int(val) for val in groups[:6]]\n    fraction = groups[6]\n    if (not fraction):\n        micros = 0\n    elif (len(fraction) == 3):\n        micros = (int(fraction) * 1000)\n    elif (len(fraction) == 6):\n        micros = int(fraction)\n    elif (len(fraction) == 9):\n        micros = int(round((int(fraction) / 1000)))\n    else:\n        assert False, 'Fraction length not 0, 6, or 9: {}'.len(fraction)\n    try:\n        return datetime(g[0], g[1], g[2], g[3], g[4], g[5], micros, tzinfo=pytz.utc)\n    except ValueError as e:\n        assert False, 'Could not parse RFC3339 datestring: {} exception: {}'.format(rfc3339_utc_string, e)", "docstring": "Converts a datestamp from RFC3339 UTC to a datetime.\n\nArgs:\nrfc3339_utc_string: a datetime string in RFC3339 UTC \"Zulu\" format\n\nReturns:\nA datetime.", "source": "codesearchnet"}
{"code": "def output(self, filename):\n    info = 'Inheritance\\n'\n    if (not self.contracts):\n        return\n    info += (blue('Child_Contract -> ') + green('Immediate_Base_Contracts'))\n    info += green(' [Not_Immediate_Base_Contracts]')\n    for child in self.contracts:\n        info += blue(f)\n        if child.inheritance:\n            immediate = child.immediate_inheritance\n            not_immediate = [i for i in child.inheritance if (i not in immediate)]\n            info += (' -> ' + green(', '.join(map(str, immediate))))\n            if not_immediate:\n                info += ((', [' + green(', '.join(map(str, not_immediate)))) + ']')\n    info += (green('\\n\\nBase_Contract -> ') + blue('Immediate_Child_Contracts'))\n    info += blue(' [Not_Immediate_Child_Contracts]')\n    for base in self.contracts:\n        info += green(f)\n        children = list(self._get_child_contracts(base))\n        if children:\n            immediate = [child for child in children if (base in child.immediate_inheritance)]\n            not_immediate = [child for child in children if (not (child in immediate))]\n            info += (' -> ' + blue(', '.join(map(str, immediate))))\n            if not_immediate:\n                info += ((', [' + blue(', '.join(map(str, not_immediate)))) + ']')\n    self.info(info)", "docstring": "Output the inheritance relation\n\n_filename is not used\nArgs:\n_filename(string)", "source": "codesearchnet"}
{"code": "def build_sanitiser_node_dict(\n    cfg,\n    sinks_in_file\n):\n    \n    sanitisers = list()\n    for sink in sinks_in_file:\n        sanitisers.extend(sink.sanitisers)\n\n    sanitisers_in_file = list()\n    for sanitiser in sanitisers:\n        for cfg_node in cfg.nodes:\n            if sanitiser in cfg_node.label:\n                sanitisers_in_file.append(Sanitiser(sanitiser, cfg_node))\n\n    sanitiser_node_dict = dict()\n    for sanitiser in sanitisers:\n        sanitiser_node_dict[sanitiser] = list(find_sanitiser_nodes(\n            sanitiser,\n            sanitisers_in_file\n        ))\n    return sanitiser_node_dict", "docstring": "Build a dict of string -> TriggerNode pairs, where the string\nis the sanitiser and the TriggerNode is a TriggerNode of the sanitiser.\n\nArgs:\ncfg(CFG): cfg to traverse.\nsinks_in_file(list[TriggerNode]): list of TriggerNodes containing\nthe sinks in the file.\n\nReturns:\nA string -> TriggerNode dict.", "source": "juraj-google-style"}
{"code": "def most_exposes(python_type):\n    _exposes = set()\n    try:\n        do_not_expose = set((python_type.__dir__(object) + ['__slots__', '__module__', '__weakref__']))\n        empty = python_type.__new__(python_type)\n    except AttributeError:\n        try:\n            _exposes = python_type.__slots__\n        except AttributeError:\n            pass\n    except TypeError:\n        for _workaround in storable_workarounds:\n            try:\n                _exposes = _workaround(python_type)\n            except (SystemExit, KeyboardInterrupt):\n                raise\n            except:\n                pass\n            else:\n                break\n    else:\n        all_members = empty.__dir__()\n        for attr in all_members:\n            if (attr in do_not_expose):\n                continue\n            try:\n                getattr(empty, attr)\n            except AttributeError as e:\n                if e.args:\n                    msg = e.args[0]\n                    if ((msg == attr) or msg.endswith(\"' object has no attribute '{}'\".format(attr))):\n                        _exposes.add(attr)\n            except (SystemExit, KeyboardInterrupt):\n                raise\n            except:\n                pass\n        for attr in ('__dict__',):\n            if (attr in all_members):\n                _exposes.add(attr)\n    return list(_exposes)", "docstring": "Core engine for the automatic generation of storable instances.\n\nFinds the attributes exposed by the objects of a given type.\n\nMostly Python3-only.\nDoes not handle types which `__new__` method requires extra arguments either.\n\nArguments:\n\npython_type (type): object type.\n\nReturns:\n\nlist: attributes exposed.", "source": "codesearchnet"}
{"code": "def InspectZipFile(self, parser_mediator, zip_file):\n    \n    try:\n      xml_data = zip_file.read('_rels/.rels')\n      property_files = self._ParseRelationshipsXMLFile(xml_data)\n    except (IndexError, IOError, KeyError, OverflowError, ValueError,\n            zipfile.BadZipfile) as exception:\n      parser_mediator.ProduceExtractionWarning((\n          'Unable to parse relationships XML file: _rels/.rels with error: '\n          '{0!s}').format(exception))\n      return\n\n    metadata = {}\n\n    for path in property_files:\n      try:\n        xml_data = zip_file.read(path)\n        properties = self._ParsePropertiesXMLFile(xml_data)\n      except (IndexError, IOError, KeyError, OverflowError, ValueError,\n              zipfile.BadZipfile) as exception:\n        parser_mediator.ProduceExtractionWarning((\n            'Unable to parse properties XML file: {0:s} with error: '\n            '{1!s}').format(path, exception))\n        continue\n\n      metadata.update(properties)\n\n    event_data = OpenXMLEventData()\n    event_data.app_version = self._GetPropertyValue(\n        parser_mediator, metadata, 'app_version')\n    event_data.app_version = self._GetPropertyValue(\n        parser_mediator, metadata, 'app_version')\n    event_data.author = self._GetPropertyValue(\n        parser_mediator, metadata, 'author')\n    event_data.creating_app = self._GetPropertyValue(\n        parser_mediator, metadata, 'creating_app')\n    event_data.doc_security = self._GetPropertyValue(\n        parser_mediator, metadata, 'doc_security')\n    event_data.hyperlinks_changed = self._GetPropertyValue(\n        parser_mediator, metadata, 'hyperlinks_changed')\n    event_data.i4 = self._GetPropertyValue(\n        parser_mediator, metadata, 'i4')\n    event_data.last_saved_by = self._GetPropertyValue(\n        parser_mediator, metadata, 'last_saved_by')\n    event_data.links_up_to_date = self._GetPropertyValue(\n        parser_mediator, metadata, 'links_up_to_date')\n    event_data.number_of_characters = self._GetPropertyValue(\n        parser_mediator, metadata, 'number_of_characters')\n    event_data.number_of_characters_with_spaces = self._GetPropertyValue(\n        parser_mediator, metadata, 'number_of_characters_with_spaces')\n    event_data.number_of_lines = self._GetPropertyValue(\n        parser_mediator, metadata, 'number_of_lines')\n    event_data.number_of_pages = self._GetPropertyValue(\n        parser_mediator, metadata, 'number_of_pages')\n    event_data.number_of_paragraphs = self._GetPropertyValue(\n        parser_mediator, metadata, 'number_of_paragraphs')\n    event_data.number_of_words = self._GetPropertyValue(\n        parser_mediator, metadata, 'number_of_words')\n    event_data.revision_number = self._GetPropertyValue(\n        parser_mediator, metadata, 'revision_number')\n    event_data.scale_crop = self._GetPropertyValue(\n        parser_mediator, metadata, 'scale_crop')\n    event_data.shared_doc = self._GetPropertyValue(\n        parser_mediator, metadata, 'shared_doc')\n    event_data.template = self._GetPropertyValue(\n        parser_mediator, metadata, 'template')\n    event_data.total_time = self._GetPropertyValue(\n        parser_mediator, metadata, 'total_time')\n\n    self._ProduceEvent(\n        parser_mediator, event_data, metadata, 'created',\n        definitions.TIME_DESCRIPTION_CREATION, 'creation time')\n    self._ProduceEvent(\n        parser_mediator, event_data, metadata, 'modified',\n        definitions.TIME_DESCRIPTION_MODIFICATION, 'modification time')\n    self._ProduceEvent(\n        parser_mediator, event_data, metadata, 'last_printed',\n        definitions.TIME_DESCRIPTION_LAST_PRINTED, 'last printed time')", "docstring": "Parses an OXML file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nzip_file (zipfile.ZipFile): the zip file containing OXML content. It is\nnot be closed in this method, but will be closed by the parser logic\nin czip.py.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def GetUserByEmail(self, email):\n    \n    user = self.rpc_helper.GetAccountInfoByEmail(email)\n    return GitkitUser.FromApiResponse(user)", "docstring": "Gets user info by email.\n\nArgs:\nemail: string, the user email.\n\nReturns:\nGitkitUser, containing the user info.", "source": "juraj-google-style"}
{"code": "def deserialize_subject_info(subject_info_xml):\n    try:\n        return d1_common.xml.deserialize(subject_info_xml)\n    except ValueError as e:\n        raise d1_common.types.exceptions.InvalidToken(0, 'Could not deserialize SubjectInfo. subject_info=\"{}\", error=\"{}\"'.format(subject_info_xml, str(e)))", "docstring": "Deserialize SubjectInfo XML doc to native object.\n\nArgs:\nsubject_info_xml: str\nSubjectInfo XML doc\n\nReturns:\nSubjectInfo PyXB object", "source": "codesearchnet"}
{"code": "def url_is_project(url, default='not_a_func'):\n    \n    try:\n        u = resolve(url)\n        if u and u.func != default:\n            return True\n    except Resolver404:\n        static_url = settings.STATIC_URL\n        static_url_wd = static_url.lstrip('/')\n        if url.startswith(static_url):\n            url = url[len(static_url):]\n        elif url.startswith(static_url_wd):\n            url = url[len(static_url_wd):]\n        else:\n            return False\n        if finders.find(url):\n            return True\n    return False", "docstring": "Check if URL is part of the current project's URLs.\n\nArgs:\nurl (str): URL to check.\ndefault (callable): used to filter out some URLs attached to function.\n\nReturns:", "source": "juraj-google-style"}
{"code": "def ParseLSQuarantineRow(\n      self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = LsQuarantineEventData()\n    event_data.agent = self._GetRowValue(query_hash, row, 'Agent')\n    event_data.data = self._GetRowValue(query_hash, row, 'Data')\n    event_data.query = query\n    event_data.url = self._GetRowValue(query_hash, row, 'URL')\n\n    timestamp = self._GetRowValue(query_hash, row, 'Time')\n    date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a launch services quarantine event row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"}
{"code": "def get_tracks_for_album(self, artist, album, full_album_art_uri=False):\n    subcategories = [artist, album]\n    result = self.get_album_artists(full_album_art_uri=full_album_art_uri, subcategories=subcategories, complete_result=True)\n    result._metadata['search_type'] = 'tracks_for_album'\n    return result", "docstring": "Get the tracks of an artist's album.\n\nArgs:\nartist (str): an artist's name.\nalbum (str): an album name.\nfull_album_art_uri: whether the album art URI should be\nabsolute (i.e. including the IP address). Default `False`.\n\nReturns:\nA `SearchResult` instance.", "source": "codesearchnet"}
{"code": "def port(self, value):\n        \n\n        self._port = value\n        \n        if value is None:\n            try:\n                del self._connectionXML.attrib['port']\n            except KeyError:\n                pass\n        else:\n            self._connectionXML.set('port', value)", "docstring": "Set the connection's port property.\n\nArgs:\nvalue:  New port value. String.\n\nReturns:\nNothing.", "source": "juraj-google-style"}
{"code": "def _parse_query_key(self, key, val, is_escaped):\n    if key.endswith('__contains'):\n        key = key[:(- 10)]\n        val = self._parse_query_modifier('contains', val, is_escaped)\n    elif key.endswith('__range'):\n        key = key[:(- 7)]\n        val = self._parse_query_modifier('range', val, is_escaped)\n    elif key.endswith('__startswith'):\n        key = key[:(- 12)]\n        val = self._parse_query_modifier('startswith', val, is_escaped)\n    elif key.endswith('__endswith'):\n        key = key[:(- 10)]\n        val = self._parse_query_modifier('endswith', val, is_escaped)\n    elif key.endswith('__lt'):\n        key = key[:(- 4)]\n        val = self._parse_query_modifier('lt', val, is_escaped)\n    elif key.endswith('__gt'):\n        key = key[:(- 4)]\n        val = self._parse_query_modifier('gt', val, is_escaped)\n    elif key.endswith('__lte'):\n        key = key[:(- 5)]\n        val = self._parse_query_modifier('lte', val, is_escaped)\n    elif key.endswith('__gte'):\n        key = key[:(- 5)]\n        val = self._parse_query_modifier('gte', val, is_escaped)\n    elif ((key != 'NOKEY') and (not is_escaped)):\n        val = self._escape_query(val)\n    return (key, val)", "docstring": "Strips query modifier from key and call's the appropriate value modifier.\n\nArgs:\nkey (str): Query key\nval: Query value\n\nReturns:\nParsed query key and value.", "source": "codesearchnet"}
{"code": "def run(self):\n    while self.should_run:\n        try:\n            self.logger.debug(('Sending heartbeat, seq ' + last_sequence))\n            self.ws.send(json.dumps({'op': 1, 'd': last_sequence}))\n        except Exception as e:\n            self.logger.error(f'Got error in heartbeat: {str(e)}')\n        finally:\n            elapsed = 0.0\n            while ((elapsed < self.interval) and self.should_run):\n                time.sleep(self.TICK_INTERVAL)\n                elapsed += self.TICK_INTERVAL", "docstring": "Runs the thread\n\nThis method handles sending the heartbeat to the Discord websocket server, so the connection\ncan remain open and the bot remain online for those commands that require it to be.\n\nArgs:\nNone", "source": "codesearchnet"}
{"code": "def Erase(self, partition, timeout_ms=None):\n        \n        self._SimpleCommand(b'erase', arg=partition, timeout_ms=timeout_ms)", "docstring": "Erases the given partition.\n\nArgs:\npartition: Partition to clear.", "source": "juraj-google-style"}
{"code": "def stage_tc_batch_xid(xid_type, xid_value, owner):\n        \n        xid_string = '{}-{}-{}'.format(xid_type, xid_value, owner)\n        hash_object = hashlib.sha256(xid_string.encode('utf-8'))\n        return hash_object.hexdigest()", "docstring": "Create an xid for a batch job.\n\nArgs:\nxid_type (str): [description]\nxid_value (str): [description]\nowner (str): [description]\n\nReturns:\n[type]: [description]", "source": "juraj-google-style"}
{"code": "def set_save_handler(save_handler: Optional[Callable[..., Any]]) -> Optional[Callable[..., Any]]:\n    if save_handler and (not callable(save_handler)):\n        raise ValueError('`save_handler` must be callable.')\n    global _SAVE_HANDLER\n    old_handler = _SAVE_HANDLER\n    _SAVE_HANDLER = save_handler\n    return old_handler", "docstring": "Sets global save handler.\n\nArgs:\nsave_handler: A callable object that takes at least one argument as value to\nsave. `symbolic.save` method will pass through all arguments to this\nhandler and return its return value.\n\nReturns:\nPrevious global save handler.", "source": "github-repos"}
{"code": "def _SetHeader(self, new_values):\n        \n        row = self.row_class()\n        row.row = 0\n        for v in new_values:\n            row[v] = v\n        self._table[0] = row", "docstring": "Sets header of table to the given tuple.\n\nArgs:\nnew_values: Tuple of new header values.", "source": "juraj-google-style"}
{"code": "def estimate_motion(self, time, intensity_grid, max_u, max_v):\n    ti = np.where((time == self.times))[0][0]\n    mask_vals = np.where((self.masks[ti].ravel() == 1))\n    i_vals = self.i[ti].ravel()[mask_vals]\n    j_vals = self.j[ti].ravel()[mask_vals]\n    obj_vals = self.timesteps[ti].ravel()[mask_vals]\n    u_shifts = np.arange((- max_u), (max_u + 1))\n    v_shifts = np.arange((- max_v), (max_v + 1))\n    min_error = 99999999999.0\n    best_u = 0\n    best_v = 0\n    for u in u_shifts:\n        j_shift = (j_vals - u)\n        for v in v_shifts:\n            i_shift = (i_vals - v)\n            if np.all(((((0 <= i_shift) & (i_shift < intensity_grid.shape[0])) & (0 <= j_shift)) & (j_shift < intensity_grid.shape[1]))):\n                shift_vals = intensity_grid[(i_shift, j_shift)]\n            else:\n                shift_vals = np.zeros(i_shift.shape)\n            error = np.abs((shift_vals - obj_vals)).mean()\n            if (error < min_error):\n                min_error = error\n                best_u = (u * self.dx)\n                best_v = (v * self.dx)\n    self.u[ti] = best_u\n    self.v[ti] = best_v\n    return (best_u, best_v, min_error)", "docstring": "Estimate the motion of the object with cross-correlation on the intensity values from the previous time step.\n\nArgs:\ntime: time being evaluated.\nintensity_grid: 2D array of intensities used in cross correlation.\nmax_u: Maximum x-component of motion. Used to limit search area.\nmax_v: Maximum y-component of motion. Used to limit search area\n\nReturns:\nu, v, and the minimum error.", "source": "codesearchnet"}
{"code": "def ipv4_is_defined(address):\n    \n\n    \n    query_ip = IPv4Address(str(address))\n\n    \n    results = namedtuple('ipv4_is_defined_results', 'is_defined, ietf_name, '\n                                                    'ietf_rfc')\n\n    \n    if query_ip in IPv4Network('0.0.0.0/8'):\n\n        return results(True, 'This Network', 'RFC 1122, Section 3.2.1.3')\n\n    \n    elif query_ip.is_loopback:\n\n        return results(True, 'Loopback', 'RFC 1122, Section 3.2.1.3')\n\n    \n    elif query_ip.is_link_local:\n\n        return results(True, 'Link Local', 'RFC 3927')\n\n    \n    elif query_ip in IPv4Network('192.0.0.0/24'):\n\n        return results(True, 'IETF Protocol Assignments', 'RFC 5736')\n\n    \n    elif query_ip in IPv4Network('192.0.2.0/24'):\n\n        return results(True, 'TEST-NET-1', 'RFC 5737')\n\n    \n    elif query_ip in IPv4Network('192.88.99.0/24'):\n\n        return results(True, '6to4 Relay Anycast', 'RFC 3068')\n\n    \n    elif query_ip in IPv4Network('198.18.0.0/15'):\n\n        return (results(True,\n                'Network Interconnect Device Benchmark Testing',\n                        'RFC 2544'))\n\n    \n    elif query_ip in IPv4Network('198.51.100.0/24'):\n\n        return results(True, 'TEST-NET-2', 'RFC 5737')\n\n    \n    elif query_ip in IPv4Network('203.0.113.0/24'):\n\n        return results(True, 'TEST-NET-3', 'RFC 5737')\n\n    \n    elif query_ip.is_multicast:\n\n        return results(True, 'Multicast', 'RFC 3171')\n\n    \n    elif query_ip in IPv4Network('255.255.255.255/32'):\n\n        return results(True, 'Limited Broadcast', 'RFC 919, Section 7')\n\n    \n    elif query_ip.is_private:\n\n        return results(True, 'Private-Use Networks', 'RFC 1918')\n\n    \n    \n    elif query_ip in IPv4Network('198.97.38.0/24'):\n\n        return results(True, 'IANA Reserved', '')\n\n    return results(False, '', '')", "docstring": "The function for checking if an IPv4 address is defined (does not need to\nbe resolved).\n\nArgs:\naddress (:obj:`str`): An IPv4 address.\n\nReturns:\nnamedtuple:\n\n:is_defined (bool): True if given address is defined, otherwise\nFalse\n:ietf_name (str): IETF assignment name if given address is\ndefined, otherwise ''\n:ietf_rfc (str): IETF assignment RFC if given address is defined,\notherwise ''", "source": "juraj-google-style"}
{"code": "def fit_gaussian(samples, ddof=0):\n    if (len(samples.shape) == 1):\n        return (np.mean(samples), np.std(samples, ddof=ddof))\n    return (np.mean(samples, axis=1), np.std(samples, axis=1, ddof=ddof))", "docstring": "Calculates the mean and the standard deviation of the given samples.\n\nArgs:\nsamples (ndarray): a one or two dimensional array. If one dimensional we calculate the fit using all\nvalues. If two dimensional, we fit the Gaussian for every set of samples over the first dimension.\nddof (int): the difference degrees of freedom in the std calculation. See numpy.", "source": "codesearchnet"}
{"code": "def timestamp(value, fmt=None):\n    if fmt:\n        return _timestamp_formats.get(fmt, (lambda v: timestamp_fmt(v, fmt)))(value)\n    l = len(value)\n    if ((19 <= l <= 24) and (value[3] == ' ')):\n        try:\n            return timestamp_d_b_Y_H_M_S(value)\n        except (KeyError, ValueError, OverflowError):\n            pass\n    if (30 <= l <= 31):\n        try:\n            return timestamp_a__d_b_Y_H_M_S_z(value)\n        except (KeyError, ValueError, OverflowError):\n            pass\n    if (l == 14):\n        try:\n            return timestamp_YmdHMS(value)\n        except (ValueError, OverflowError):\n            pass\n    try:\n        return timestamp_epoch(value)\n    except ValueError:\n        pass\n    return timestamp_any(value)", "docstring": "Parse a datetime to a unix timestamp.\n\nUses fast custom parsing for common datetime formats or the slow dateutil\nparser for other formats. This is a trade off between ease of use and speed\nand is very useful for fast parsing of timestamp strings whose format may\nstandard but varied or unknown prior to parsing.\n\nCommon formats include:\n1 Feb 2010 12:00:00 GMT\nMon, 1 Feb 2010 22:00:00 +1000\n20100201120000\n1383470155 (seconds since epoch)\n\nSee the other timestamp_*() functions for more details.\n\nArgs:\nvalue: A string representing a datetime.\nfmt: A timestamp format string like for time.strptime().\n\nReturns:\nThe time in seconds since epoch as and integer for the value specified.", "source": "codesearchnet"}
{"code": "def has_relationship(self, left_id, left_type, right_id, right_type,\n                         rel_type='Related To'):\n        \n        data = self.get_object(left_id, left_type)\n        if not data:\n            raise CRITsOperationalError('Crits Object not found with id {}'\n                                        'and type {}'.format(left_id,\n                                                             left_type))\n        if 'relationships' not in data:\n            return False\n        for relationship in data['relationships']:\n            if relationship['relationship'] != rel_type:\n                continue\n            if relationship['value'] != right_id:\n                continue\n            if relationship['type'] != right_type:\n                continue\n            return True\n        return False", "docstring": "Checks if the two objects are related\n\nArgs:\nleft_id: The CRITs ID of the first indicator\nleft_type: The CRITs TLO type of the first indicator\nright_id: The CRITs ID of the second indicator\nright_type: The CRITs TLO type of the second indicator\nrel_type: The relationships type (\"Related To\", etc)\nReturns:\nTrue or False if the relationship exists or not.", "source": "juraj-google-style"}
{"code": "def __init__(self, identifier):\n    \n    super(FormatSpecification, self).__init__()\n    self.identifier = identifier\n    self.signatures = []", "docstring": "Initializes a specification.\n\nArgs:\nidentifier (str): unique name for the format.", "source": "juraj-google-style"}
{"code": "class Mean(Metric):\n\n    def __init__(self, name='mean', dtype=None):\n        super().__init__(name=name, dtype=dtype)\n        self.total = self.add_variable(shape=(), initializer=initializers.Zeros(), dtype=self.dtype, name='total')\n        self.count = self.add_variable(shape=(), initializer=initializers.Zeros(), dtype=self.dtype, name='count')\n\n    def update_state(self, values, sample_weight=None):\n        values, sample_weight = reduce_to_samplewise_values(values, sample_weight, reduce_fn=ops.mean, dtype=self.dtype)\n        self.total.assign_add(ops.sum(values))\n        if sample_weight is not None:\n            num_samples = ops.sum(sample_weight)\n        elif len(values.shape) >= 1:\n            num_samples = ops.shape(values)[0]\n        else:\n            num_samples = 1\n        self.count.assign_add(ops.cast(num_samples, dtype=self.dtype))\n\n    def reset_state(self):\n        self.total.assign(0)\n        self.count.assign(0)\n\n    def result(self):\n        return ops.divide_no_nan(self.total, ops.cast(self.count, dtype=self.dtype))", "docstring": "Compute the (weighted) mean of the given values.\n\nFor example, if values is `[1, 3, 5, 7]` then the mean is 4.\nIf `sample_weight` was specified as `[1, 1, 0, 0]` then the mean would be 2.\n\nThis metric creates two variables, `total` and `count`.\nThe mean value returned is simply `total` divided by `count`.\n\nArgs:\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.\n\nExample:\n\n>>> m = Mean()\n>>> m.update_state([1, 3, 5, 7])\n>>> m.result()\n4.0\n\n>>> m.reset_state()\n>>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])\n>>> m.result()\n2.0", "source": "github-repos"}
{"code": "def end_before(self, document_fields):\n    return self._cursor_helper(document_fields, before=True, start=False)", "docstring": "End query results before a particular document value.\n\nThe result set will **exclude** the document specified by\n``document_fields``.\n\nIf the current query already has specified an end cursor -- either\nvia this method or\n:meth:`~.firestore_v1beta1.query.Query.end_at` -- this will\noverwrite it.\n\nWhen the query is sent to the server, the ``document_fields`` will\nbe used in the order given by fields set by\n:meth:`~.firestore_v1beta1.query.Query.order_by`.\n\nArgs:\ndocument_fields (Union[~.firestore_v1beta1.\\\ndocument.DocumentSnapshot, dict, list, tuple]): a document\nsnapshot or a dictionary/list/tuple of fields representing a\nquery results cursor. A cursor is a collection of values that\nrepresent a position in a query result set.\n\nReturns:\n~.firestore_v1beta1.query.Query: A query with cursor. Acts as\na copy of the current query, modified with the newly added\n\"end before\" cursor.", "source": "codesearchnet"}
{"code": "def request(self, request):\n        \n\n        url = \"{}{}\".format(self._base_url, request.path)\n\n        timeout = self.poll_timeout\n\n        if request.stream is True:\n            timeout = self.stream_timeout\n\n        try:\n            http_response = self._session.request(\n                request.method,\n                url,\n                headers=self._headers,\n                params=request.params,\n                data=request.body,\n                stream=request.stream,\n                timeout=timeout\n            )\n        except requests.exceptions.ConnectionError:\n            raise V20ConnectionError(url)\n        except requests.exceptions.ConnectTimeout:\n            raise V20Timeout(url, \"connect\")\n        except requests.exceptions.ReadTimeout:\n            raise V20Timeout(url, \"read\")\n\n        request.headers = http_response.request.headers\n\n        response = Response(\n            request,\n            request.method,\n            http_response.url,\n            http_response.status_code,\n            http_response.reason,\n            http_response.headers\n        )\n\n        if request.stream:\n            response.set_line_parser(\n                request.line_parser\n            )\n\n            response.set_lines(\n                http_response.iter_lines(\n                    self.stream_chunk_size\n                )\n            )\n        else:\n            response.set_raw_body(http_response.text)\n\n        return response", "docstring": "Perform an HTTP request through the context\n\nArgs:\nrequest: A v20.request.Request object\n\nReturns:\nA v20.response.Response object", "source": "juraj-google-style"}
{"code": "def represent_as_tuple(string):\n    \n    keep = (\".\", \"[\", \"]\")\n    return tuple(str_digit_to_int(c) if c not in keep else c for c in string)", "docstring": "Represent a number-string in the form of a tuple of digits.\n\"868.0F\" -> (8, 6, 8, '.', 0, 15)\n\nArgs:\nstring - Number represented as a string of digits.\nReturns:\nNumber represented as an iterable container of digits\n\n>>> represent_as_tuple('868.0F')\n(8, 6, 8, '.', 0, 15)", "source": "juraj-google-style"}
{"code": "def _call_for_each_replica(distribution, fn, args, kwargs):\n    run_concurrently = False\n    if not context.executing_eagerly():\n        ops.get_default_graph().switch_to_thread_local()\n    coord = coordinator.Coordinator(clean_stop_exception_types=(_RequestedStop,))\n    shared_variable_store = {}\n    devices = distribution.extended.worker_devices\n    thread_local_callables = _get_thread_local_configuration_callable()\n    threads = []\n    for index in range(len(devices)):\n        variable_creator_fn = shared_variable_creator.make_fn(shared_variable_store, index)\n        t = _MirroredReplicaThread(distribution, coord, index, devices, variable_creator_fn, fn, distribute_utils.caching_scope_local, distribute_utils.select_replica(index, args), distribute_utils.select_replica(index, kwargs), thread_local_callables)\n        threads.append(t)\n    for t in threads:\n        t.start()\n    try:\n        with coord.stop_on_exception():\n            all_done = False\n            while not all_done and (not coord.should_stop()):\n                done = []\n                if run_concurrently:\n                    for t in threads:\n                        t.should_run.set()\n                    for t in threads:\n                        t.has_paused.wait()\n                        t.has_paused.clear()\n                        if coord.should_stop():\n                            return None\n                        done.append(t.done)\n                else:\n                    for t in threads:\n                        t.should_run.set()\n                        t.has_paused.wait()\n                        t.has_paused.clear()\n                        if coord.should_stop():\n                            return None\n                        done.append(t.done)\n                if coord.should_stop():\n                    return None\n                all_done = all(done)\n                if not all_done:\n                    if any(done):\n                        raise RuntimeError('Some replicas made a different number of replica_context().merge_call() calls.')\n                    merge_args = distribute_utils.regroup(tuple((t.merge_args for t in threads)))\n                    merge_kwargs = distribute_utils.regroup(tuple((t.merge_kwargs for t in threads)))\n                    mtt_captured_name_scope = threads[0].captured_name_scope\n                    mtt_captured_var_scope = threads[0].captured_var_scope\n                    mtt_captured_control_deps = set()\n                    for t in threads:\n                        mtt_captured_control_deps.update(t.captured_control_deps)\n                    with ops.name_scope(mtt_captured_name_scope), ops.control_dependencies(mtt_captured_control_deps), variable_scope.variable_scope(mtt_captured_var_scope), _maybe_enter_eager_mode(threads[0].merge_call_entered_in_eager):\n                        merge_result = threads[0].merge_fn(distribution, *merge_args, **merge_kwargs)\n                    for r, t in enumerate(threads):\n                        t.merge_result = distribute_utils.select_replica(r, merge_result)\n    finally:\n        for t in threads:\n            t.should_run.set()\n        coord.join(threads)\n    return distribute_utils.regroup(tuple((t.main_result for t in threads)))", "docstring": "Run `fn` in separate threads, once per replica/worker device.\n\nArgs:\ndistribution: the DistributionStrategy object.\nfn: function to run (will be run once per replica, each in its own thread).\nargs: positional arguments for `fn`\nkwargs: keyword arguments for `fn`.\n\nReturns:\nMerged return value of `fn` across all replicas.\n\nRaises:\nRuntimeError: If fn() calls get_replica_context().merge_call() a different\nnumber of times from the available devices.", "source": "github-repos"}
{"code": "def parse_vlq(self, segment):\n    values = []\n    (cur, shift) = (0, 0)\n    for c in segment:\n        val = B64[ord(c)]\n        (val, cont) = ((val & 31), (val >> 5))\n        cur += (val << shift)\n        shift += 5\n        if (not cont):\n            (cur, sign) = ((cur >> 1), (cur & 1))\n            if sign:\n                cur = (- cur)\n            values.append(cur)\n            (cur, shift) = (0, 0)\n    if (cur or shift):\n        raise SourceMapDecodeError('leftover cur/shift in vlq decode')\n    return values", "docstring": "Parse a string of VLQ-encoded data.\n\nReturns:\na list of integers.", "source": "codesearchnet"}
{"code": "def dump_package_data(data, buf, format_=FileFormat.py, skip_attributes=None):\n    \n    if format_ == FileFormat.txt:\n        raise ValueError(\"'txt' format not supported for packages.\")\n\n    data_ = dict((k, v) for k, v in data.iteritems() if v is not None)\n    data_ = package_serialise_schema.validate(data_)\n    skip = set(skip_attributes or [])\n\n    items = []\n    for key in package_key_order:\n        if key not in skip:\n            value = data_.pop(key, None)\n            if value is not None:\n                items.append((key, value))\n\n    \n    for key, value in data_.iteritems():\n        if key not in skip:\n            items.append((key, value))\n\n    dump_func = dump_functions[format_]\n    dump_func(items, buf)", "docstring": "Write package data to `buf`.\n\nArgs:\ndata (dict): Data source - must conform to `package_serialise_schema`.\nbuf (file-like object): Destination stream.\nformat_ (`FileFormat`): Format to dump data in.\nskip_attributes (list of str): List of attributes to not print.", "source": "juraj-google-style"}
{"code": "def _GetLoadConfigTimestamp(self, pefile_object):\n    if (not hasattr(pefile_object, 'DIRECTORY_ENTRY_LOAD_CONFIG')):\n        return None\n    timestamp = getattr(pefile_object.DIRECTORY_ENTRY_LOAD_CONFIG.struct, 'TimeDateStamp', 0)\n    return timestamp", "docstring": "Retrieves the timestamp from the Load Configuration directory.\n\nArgs:\npefile_object (pefile.PE): pefile object.\n\nReturns:\nint: load configuration timestamps or None if there are none present.", "source": "codesearchnet"}
{"code": "def on_connected(self, connection):\n    log.info('PikaClient: connected to RabbitMQ')\n    self.connected = True\n    self.in_channel = self.connection.channel(self.on_channel_open)", "docstring": "AMQP connection callback.\nCreates input channel.\n\nArgs:\nconnection: AMQP connection", "source": "codesearchnet"}
{"code": "def count(cls, cur, table: str, where_keys: list=None):\n    if where_keys:\n        (where_clause, values) = cls._get_where_clause_with_values(where_keys)\n        query = cls._count_query_where.format(table, where_clause)\n        (q, t) = (query, values)\n    else:\n        query = cls._count_query.format(table)\n        (q, t) = (query, ())\n    (yield from cur.execute(q, t))\n    result = (yield from cur.fetchone())\n    return int(result[0])", "docstring": "gives the number of records in the table\n\nArgs:\ntable: a string indicating the name of the table\n\nReturns:\nan integer indicating the number of records in the table", "source": "codesearchnet"}
{"code": "def encipher_vigenere(plaintext, plain_vocab, key):\n  \n  ciphertext = []\n  \n  layers = [\n      ShiftEncryptionLayer(plain_vocab, i) for i in range(len(plain_vocab))\n  ]\n\n  for i, sentence in enumerate(plaintext):\n    cipher_sentence = []\n    for j, character in enumerate(sentence):\n      key_idx = key[j % len(key)]\n      encrypted_char = layers[key_idx].encrypt_character(character)\n      cipher_sentence.append(encrypted_char)\n    ciphertext.append(cipher_sentence)\n\n  return ciphertext", "docstring": "Encrypt plain text with given key.\n\nArgs:\nplaintext (list of list of Strings): a list of plain text to encrypt.\nplain_vocab (list of Integer): unique vocabularies being used.\nkey (list of Integer): key to encrypt cipher using Vigenere table.\n\nReturns:\nciphertext (list of Strings): encrypted plain text.", "source": "juraj-google-style"}
{"code": "def ValidateFeedStartAndExpirationDates(self, problems, first_date, last_date, first_date_origin, last_date_origin, today):\n    warning_cutoff = (today + datetime.timedelta(days=60))\n    if (last_date < warning_cutoff):\n        problems.ExpirationDate(time.mktime(last_date.timetuple()), last_date_origin)\n    if (first_date > today):\n        problems.FutureService(time.mktime(first_date.timetuple()), first_date_origin)", "docstring": "Validate the start and expiration dates of the feed.\nIssue a warning if it only starts in the future, or if\nit expires within 60 days.\n\nArgs:\nproblems: The problem reporter object\nfirst_date: A date object representing the first day the feed is active\nlast_date: A date object representing the last day the feed is active\ntoday: A date object representing the date the validation is being run on\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def tags(self, value):\n        \n        if value == self._defaults['tags'] and 'tags' in self._values:\n            del self._values['tags']\n        else:\n            self._values['tags'] = value", "docstring": "The tags property.\n\nArgs:\nvalue (hash). the property value.", "source": "juraj-google-style"}
{"code": "def console_set_char_background(con: tcod.console.Console, x: int, y: int, col: Tuple[(int, int, int)], flag: int=BKGND_SET) -> None:\n    lib.TCOD_console_set_char_background(_console(con), x, y, col, flag)", "docstring": "Change the background color of x,y to col using a blend mode.\n\nArgs:\ncon (Console): Any Console instance.\nx (int): Character x position from the left.\ny (int): Character y position from the top.\ncol (Union[Tuple[int, int, int], Sequence[int]]):\nAn (r, g, b) sequence or Color instance.\nflag (int): Blending mode to use, defaults to BKGND_SET.", "source": "codesearchnet"}
{"code": "def lattice_2_lmpbox(lattice, origin=(0, 0, 0)):\n    \n    a, b, c = lattice.abc\n    xlo, ylo, zlo = origin\n    xhi = a + xlo\n    m = lattice.matrix\n    xy = np.dot(m[1], m[0] / a)\n    yhi = np.sqrt(b ** 2 - xy ** 2) + ylo\n    xz = np.dot(m[2], m[0] / a)\n    yz = (np.dot(m[1], m[2]) - xy * xz) / (yhi - ylo)\n    zhi = np.sqrt(c ** 2 - xz ** 2 - yz ** 2) + zlo\n    tilt = None if lattice.is_orthogonal else [xy, xz, yz]\n    rot_matrix = np.linalg.solve([[xhi - xlo, 0, 0],\n                                  [xy, yhi - ylo, 0],\n                                  [xz, yz, zhi - zlo]], m)\n    bounds = [[xlo, xhi], [ylo, yhi], [zlo, zhi]]\n    symmop = SymmOp.from_rotation_and_translation(rot_matrix, origin)\n    return LammpsBox(bounds, tilt), symmop", "docstring": "Converts a lattice object to LammpsBox, and calculates the symmetry\noperation used.\n\nArgs:\nlattice (Lattice): Input lattice.\norigin: A (3,) array/list of floats setting lower bounds of\nsimulation box. Default to (0, 0, 0).\n\nReturns:\nLammpsBox, SymmOp", "source": "juraj-google-style"}
{"code": "def dilated_conv_stack(name, x, mid_channels, output_channels, dilation_rates, activation='relu', dropout=0.0):\n    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n        output = 0.0\n        for (dil_ind, dil_rate) in enumerate(dilation_rates):\n            curr_out = conv_stack(('dil_%d' % dil_ind), x, mid_channels=mid_channels, output_channels=output_channels, dilations=dil_rate, activation=activation, dropout=dropout)\n            output += curr_out\n        return output", "docstring": "Dilated convolutional stack.\n\nFeatures at different rates are computed independently using a 3 layer\nconvolutional stack and added.\n\nArgs:\nname: variable scope.\nx: 5-D Tensor.\nmid_channels: Number of output channels of the first layer in the conv\nstack.\noutput_channels: Number of output channels of the last layer.\ndilation_rates: A list of dilation rates.\nactivation: Can be either \"relu\" or \"gatu\"\ndropout: dropout.\nReturns:\noutput: 5-D Tensor.", "source": "codesearchnet"}
{"code": "def id_token_jwt_grant(request, token_uri, assertion):\n    body = {'assertion': assertion, 'grant_type': _JWT_GRANT_TYPE}\n    response_data = _token_endpoint_request(request, token_uri, body)\n    try:\n        id_token = response_data['id_token']\n    except KeyError as caught_exc:\n        new_exc = exceptions.RefreshError('No ID token in response.', response_data)\n        six.raise_from(new_exc, caught_exc)\n    payload = jwt.decode(id_token, verify=False)\n    expiry = datetime.datetime.utcfromtimestamp(payload['exp'])\n    return (id_token, expiry, response_data)", "docstring": "Implements the JWT Profile for OAuth 2.0 Authorization Grants, but\nrequests an OpenID Connect ID Token instead of an access token.\n\nThis is a variant on the standard JWT Profile that is currently unique\nto Google. This was added for the benefit of authenticating to services\nthat require ID Tokens instead of access tokens or JWT bearer tokens.\n\nArgs:\nrequest (google.auth.transport.Request): A callable used to make\nHTTP requests.\ntoken_uri (str): The OAuth 2.0 authorization server's token endpoint\nURI.\nassertion (str): JWT token signed by a service account. The token's\npayload must include a ``target_audience`` claim.\n\nReturns:\nTuple[str, Optional[datetime], Mapping[str, str]]:\nThe (encoded) Open ID Connect ID Token, expiration, and additional\ndata returned by the endpoint.\n\nRaises:\ngoogle.auth.exceptions.RefreshError: If the token endpoint returned\nan error.", "source": "codesearchnet"}
{"code": "def id_transcripts_by_gene(self, build='37'):\n        \n        hgnc_id_transcripts = {}\n        LOG.info(\"Fetching all id transcripts\")\n        for gene_obj in self.hgnc_collection.find({'build': build}):\n            hgnc_id = gene_obj['hgnc_id']\n            id_transcripts = self.get_id_transcripts(hgnc_id=hgnc_id, build=build)\n            hgnc_id_transcripts[hgnc_id] = id_transcripts\n\n        return hgnc_id_transcripts", "docstring": "Return a dictionary with hgnc_id as keys and a set of id transcripts as value\n\nArgs:\nbuild(str)\n\nReturns:\nhgnc_id_transcripts(dict)", "source": "juraj-google-style"}
{"code": "def _bfs_sort(self, start):\n        \n        pathstates = {}\n        \n        \n        queue = []\n        \n        queue.append([0, start])\n        pathstates[start.stateid] = 0\n        while queue:\n            \n            leaf = queue.pop(0)\n            node = leaf[1]\n            pathlen = leaf[0]\n            \n            \n            for arc in node.arcs:\n                next_state = self.mma[arc.nextstate]\n                if next_state.stateid not in pathstates:\n                    queue.append([pathlen + 1, next_state])\n                    pathstates[next_state.stateid] = pathlen + 1\n        orderedstatesdict = OrderedDict(\n            sorted(\n                pathstates.items(),\n                key=lambda x: x[1],\n                reverse=False))\n        for state in self.mma.states:\n            orderedstatesdict[state.stateid] = state\n        orderedstates = [x[1] for x in list(orderedstatesdict.items())]\n        return orderedstates", "docstring": "maintain a map of states distance using BFS\nArgs:\nstart (fst state): The initial DFA state\nReturns:\nlist: An ordered list of DFA states\nusing path distance", "source": "juraj-google-style"}
{"code": "def validate_id(tx_body):\n        \n        \n        \n        tx_body = rapidjson.loads(rapidjson.dumps(tx_body))\n\n        try:\n            proposed_tx_id = tx_body['id']\n        except KeyError:\n            raise InvalidHash('No transaction id found!')\n\n        tx_body['id'] = None\n\n        tx_body_serialized = Transaction._to_str(tx_body)\n        valid_tx_id = Transaction._to_hash(tx_body_serialized)\n\n        if proposed_tx_id != valid_tx_id:\n            err_msg = (\"The transaction's id '{}' isn't equal to \"\n                       \"the hash of its body, i.e. it's not valid.\")\n            raise InvalidHash(err_msg.format(proposed_tx_id))", "docstring": "Validate the transaction ID of a transaction\n\nArgs:\ntx_body (dict): The Transaction to be transformed.", "source": "juraj-google-style"}
{"code": "def markdown_to_safe_html(markdown_string):\n  \n  warning = ''\n  \n  if isinstance(markdown_string, six.binary_type):\n    markdown_string_decoded = markdown_string.decode('utf-8')\n    \n    \n    markdown_string = markdown_string_decoded.replace(u'\\x00', u'')\n    num_null_bytes = len(markdown_string_decoded) - len(markdown_string)\n    if num_null_bytes:\n      warning = ('<!-- WARNING: discarded %d null bytes in markdown string '\n                 'after UTF-8 decoding -->\\n') % num_null_bytes\n\n  string_html = markdown.markdown(\n      markdown_string, extensions=['markdown.extensions.tables'])\n  string_sanitized = bleach.clean(\n      string_html, tags=_ALLOWED_TAGS, attributes=_ALLOWED_ATTRIBUTES)\n  return warning + string_sanitized", "docstring": "Convert Markdown to HTML that's safe to splice into the DOM.\n\nArguments:\nmarkdown_string: A Unicode string or UTF-8--encoded bytestring\ncontaining Markdown source. Markdown tables are supported.\n\nReturns:\nA string containing safe HTML.", "source": "juraj-google-style"}
{"code": "def _get_example_from_properties(self, spec):\n        \n        local_spec = deepcopy(spec)\n\n        \n        \n        \n        additional_property = False\n        if 'additionalProperties' in local_spec:\n            additional_property = True\n            if 'properties' not in local_spec:\n                local_spec['properties'] = {}\n            local_spec['properties'].update({\n                'any_prop1': local_spec['additionalProperties'],\n                'any_prop2': local_spec['additionalProperties'],\n            })\n            del(local_spec['additionalProperties'])\n            required = local_spec.get('required', [])\n            required += ['any_prop1', 'any_prop2']\n            local_spec['required'] = required\n\n        example = {}\n        properties = local_spec.get('properties')\n        if properties is not None:\n            required = local_spec.get('required', properties.keys())\n\n            for inner_name, inner_spec in properties.items():\n                if inner_name not in required:\n                    continue\n                partial = self.get_example_from_prop_spec(inner_spec)\n                \n                \n                \n                if isinstance(partial, list):\n                    partial = partial[0]\n                example[inner_name] = partial\n\n        return example, additional_property", "docstring": "Get example from the properties of an object defined inline.\n\nArgs:\nprop_spec: property specification you want an example of.\n\nReturns:\nAn example for the given spec\nA boolean, whether we had additionalProperties in the spec, or not", "source": "juraj-google-style"}
{"code": "class WhisperProcessor(ProcessorMixin):\n    feature_extractor_class = 'WhisperFeatureExtractor'\n    tokenizer_class = 'WhisperTokenizer'\n\n    def __init__(self, feature_extractor, tokenizer):\n        super().__init__(feature_extractor, tokenizer)\n        self.current_processor = self.feature_extractor\n        self._in_target_context_manager = False\n\n    def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):\n        return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps)\n\n    def __call__(self, *args, **kwargs):\n        \n        if self._in_target_context_manager:\n            return self.current_processor(*args, **kwargs)\n        audio = kwargs.pop('audio', None)\n        sampling_rate = kwargs.pop('sampling_rate', None)\n        text = kwargs.pop('text', None)\n        if len(args) > 0:\n            audio = args[0]\n            args = args[1:]\n        if audio is None and text is None:\n            raise ValueError('You need to specify either an `audio` or `text` input to process.')\n        if audio is not None:\n            inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)\n        if text is not None:\n            encodings = self.tokenizer(text, **kwargs)\n        if text is None:\n            return inputs\n        elif audio is None:\n            return encodings\n        else:\n            inputs['labels'] = encodings['input_ids']\n            return inputs\n\n    def batch_decode(self, *args, **kwargs):\n        \n        return self.tokenizer.batch_decode(*args, **kwargs)\n\n    def decode(self, *args, **kwargs):\n        \n        return self.tokenizer.decode(*args, **kwargs)\n\n    def get_prompt_ids(self, text: str, return_tensors='np'):\n        return self.tokenizer.get_prompt_ids(text, return_tensors=return_tensors)", "docstring": "Constructs a Whisper processor which wraps a Whisper feature extractor and a Whisper tokenizer into a single\nprocessor.\n\n[`WhisperProcessor`] offers all the functionalities of [`WhisperFeatureExtractor`] and [`WhisperTokenizer`]. See\nthe [`~WhisperProcessor.__call__`] and [`~WhisperProcessor.decode`] for more information.\n\nArgs:\nfeature_extractor (`WhisperFeatureExtractor`):\nAn instance of [`WhisperFeatureExtractor`]. The feature extractor is a required input.\ntokenizer (`WhisperTokenizer`):\nAn instance of [`WhisperTokenizer`]. The tokenizer is a required input.", "source": "github-repos"}
{"code": "def add_read(\n        self,\n        read_tuple_id,\n        bases,\n        qualities,\n        segments,\n    ):\n        \n\n        assert type(bases) is str, \"Wrong type of bases: '{}'\".format(bases)\n        assert type(qualities) is str, \"Wrong type of qualities: '{}'\".format(qualities)\n        assert type(segments) is tuple or type(segments) is list\n\n        if self.current_read_tuple_id != read_tuple_id:\n            self.flush_read_tuple()\n        self.current_read_tuple_id = read_tuple_id\n\n        self.seqs_bases.append(bases)\n        self.seqs_qualities.append(qualities)\n        self.segments.extend(segments)", "docstring": "Add a new read to the current buffer. If it is a new read tuple (detected from ID), the buffer will be flushed.\n\nArgs:\nread_tuple_id (int): ID of the read tuple.\nbases (str): Sequence of bases.\nqualities (str): Sequence of FASTQ qualities.\nsegments (list of rnftools.rnfformat.segment): List of segments constituting the read.", "source": "juraj-google-style"}
{"code": "def bullet_base_pose_to_world_pose(self, pose_in_base):\n        \n        pose_in_base = T.pose2mat(pose_in_base)\n\n        base_pos_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[0])\n        base_orn_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[1])\n        base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world))\n\n        pose_in_world = T.pose_in_A_to_pose_in_B(\n            pose_A=pose_in_base, pose_A_in_B=base_pose_in_world\n        )\n        return T.mat2pose(pose_in_world)", "docstring": "Convert a pose in the base frame to a pose in the world frame.\n\nArgs:\npose_in_base: a (pos, orn) tuple.\n\nReturns:\npose_in world: a (pos, orn) tuple.", "source": "juraj-google-style"}
{"code": "def GetUpdates(self, source, search_base, search_filter, search_scope, since):\n    if self.conf.get('ad'):\n        self.attrs.append('whenChanged')\n    else:\n        self.attrs.append('modifyTimestamp')\n    if since is not None:\n        ts = self.FromTimestampToLdap(since)\n        if self.conf.get('ad'):\n            ts = int(ts.rstrip('.0Z')) + 1\n            ts = '%s.0Z' % ts\n            search_filter = '(&%s(whenChanged>=%s))' % (search_filter, ts)\n        else:\n            ts = int(ts.rstrip('Z')) + 1\n            ts = '%sZ' % ts\n            search_filter = '(&%s(modifyTimestamp>=%s))' % (search_filter, ts)\n    if search_scope == 'base':\n        search_scope = ldap.SCOPE_BASE\n    elif search_scope in ['one', 'onelevel']:\n        search_scope = ldap.SCOPE_ONELEVEL\n    elif search_scope in ['sub', 'subtree']:\n        search_scope = ldap.SCOPE_SUBTREE\n    else:\n        raise error.ConfigurationError('Invalid scope: %s' % search_scope)\n    source.Search(search_base=search_base, search_filter=search_filter, search_scope=search_scope, attrs=self.attrs)\n    max_ts = None\n    data_map = self.CreateMap()\n    for obj in source:\n        for field in self.essential_fields:\n            if field not in obj:\n                logging.warn('invalid object passed: %r not in %r', field, obj)\n                raise ValueError('Invalid object passed: %r', obj)\n        if self.conf.get('ad'):\n            obj_ts = self.FromLdapToTimestamp(obj['whenChanged'][0])\n        else:\n            try:\n                obj_ts = self.FromLdapToTimestamp(obj['modifyTimestamp'][0])\n            except KeyError:\n                obj_ts = 0\n        if max_ts is None or obj_ts > max_ts:\n            max_ts = obj_ts\n        try:\n            if not data_map.Add(self.Transform(obj)):\n                logging.info('could not add obj: %r', obj)\n        except AttributeError as e:\n            logging.warning('error %r, discarding malformed obj: %r', str(e), obj)\n    self.PostProcess(data_map, source, search_filter, search_scope)\n    data_map.SetModifyTimestamp(max_ts)\n    return data_map", "docstring": "Get updates from a source.\n\nArgs:\nsource: a data source\nsearch_base: the LDAP base of the tree\nsearch_filter: the LDAP object filter\nsearch_scope:  the LDAP scope filter, one of 'base', 'one', or 'sub'.\nsince: a timestamp to get updates since (None for 'get everything')\n\nReturns:\na tuple containing the map of updates and a maximum timestamp\n\nRaises:\nerror.ConfigurationError: scope is invalid\nValueError: an object in the source map is malformed", "source": "github-repos"}
{"code": "def hgnc_id(self, hgnc_symbol, build='37'):\n    query = {'hgnc_symbol': hgnc_symbol, 'build': build}\n    projection = {'hgnc_id': 1, '_id': 0}\n    res = self.hgnc_collection.find(query, projection)\n    if (res.count() > 0):\n        return res[0]['hgnc_id']\n    else:\n        return None", "docstring": "Query the genes with a hgnc symbol and return the hgnc id\n\nArgs:\nhgnc_symbol(str)\nbuild(str)\n\nReturns:\nhgnc_id(int)", "source": "codesearchnet"}
{"code": "def intrusion_set(self, name, **kwargs):\n        \n        group_obj = IntrusionSet(name, **kwargs)\n        return self._group(group_obj)", "docstring": "Add Intrusion Set data to Batch object.\n\nArgs:\nname (str): The name for this Group.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nxid (str, kwargs): The external id for this Group.\n\nReturns:\nobj: An instance of IntrusionSet.", "source": "juraj-google-style"}
{"code": "def copy_code(source: message.Message, target: message.Message) -> None:\n    if not fhir_types.is_type_or_profile_of_code(source.DESCRIPTOR):\n        raise fhir_errors.InvalidFhirError(f'Source: {source.DESCRIPTOR.full_name} is not type or profile of Code.')\n    if not fhir_types.is_type_or_profile_of_code(target.DESCRIPTOR):\n        raise fhir_errors.InvalidFhirError(f'Target: {target.DESCRIPTOR.full_name} is not type or profile of Code.')\n    if proto_utils.are_same_message_type(source.DESCRIPTOR, target.DESCRIPTOR):\n        target.CopyFrom(source)\n        return\n    source_value_field = source.DESCRIPTOR.fields_by_name.get('value')\n    target_value_field = target.DESCRIPTOR.fields_by_name.get('value')\n    if source_value_field is None or target_value_field is None:\n        raise fhir_errors.InvalidFhirError(f'Unable to copy code from {source.DESCRIPTOR.full_name} to {target.DESCRIPTOR.full_name}.')\n    proto_utils.copy_common_field(source, target, 'id')\n    proto_utils.copy_common_field(source, target, 'extension')\n    if source_value_field.type not in _CODE_TYPES or target_value_field.type not in _CODE_TYPES:\n        raise ValueError(f'Unable to copy from {source.DESCRIPTOR.full_name} to {target.DESCRIPTOR.full_name}. Must have a field of TYPE_ENUM or TYPE_STRING.')\n    source_value = proto_utils.get_value_at_field(source, source_value_field)\n    if source_value_field.type == target_value_field.type:\n        proto_utils.set_value_at_field(target, target_value_field, source_value)\n    elif source_value_field.type == descriptor.FieldDescriptor.TYPE_STRING:\n        source_enum_value = code_string_to_enum_value_descriptor(source_value, target_value_field.enum_type)\n        proto_utils.set_value_at_field(target, target_value_field, source_enum_value.number)\n    elif source_value_field.type == descriptor.FieldDescriptor.TYPE_ENUM:\n        source_string_value = enum_value_descriptor_to_code_string(source_value_field.enum_type.values_by_number[source_value])\n        proto_utils.set_value_at_field(target, target_value_field, source_string_value)\n    else:\n        raise ValueError(f'Unexpected generic value field type: {source_value_field.type}. Must be a field of TYPE_ENUM or TYPE_STRING in order to copy.')", "docstring": "Adds all fields from source to target.\n\nArgs:\nsource: The FHIR Code instance to copy from.\ntarget: The target FHIR Code instance to copy to.", "source": "github-repos"}
{"code": "def invoke_process_element(self, sdf_invoker, output_processor, element, restriction, watermark_estimator_state, *args, **kwargs):\n    assert isinstance(sdf_invoker, DoFnInvoker)\n\n    class CheckpointState(object):\n\n        def __init__(self):\n            self.checkpointed = None\n            self.residual_restriction = None\n    checkpoint_state = CheckpointState()\n\n    def initiate_checkpoint():\n        with self._checkpoint_lock:\n            if checkpoint_state.checkpointed:\n                return\n            checkpoint_state.checkpointed = object()\n        split = sdf_invoker.try_split(0)\n        if split:\n            _, checkpoint_state.residual_restriction = split\n        else:\n            checkpoint_state.checkpointed = None\n    output_processor.reset()\n    Timer(self._max_duration, initiate_checkpoint).start()\n    sdf_invoker.invoke_process(element, additional_args=args, restriction=restriction, watermark_estimator_state=watermark_estimator_state)\n    assert output_processor.output_iter is not None\n    output_count = 0\n    process_continuation = None\n    for output in output_processor.output_iter:\n        assert not process_continuation\n        if isinstance(output, ProcessContinuation):\n            initiate_checkpoint()\n            process_continuation = output\n            continue\n        yield output\n        output_count += 1\n        if self._max_num_outputs and output_count >= self._max_num_outputs:\n            initiate_checkpoint()\n    result = SDFProcessElementInvoker.Result(residual_restriction=checkpoint_state.residual_restriction) if checkpoint_state.residual_restriction else SDFProcessElementInvoker.Result()\n    yield result", "docstring": "Invokes `process()` method of a Splittable `DoFn` for a given element.\n\nArgs:\nsdf_invoker: a `DoFnInvoker` for the Splittable `DoFn`.\nelement: the element to process\nReturns:\na `SDFProcessElementInvoker.Result` object.", "source": "github-repos"}
{"code": "def join(table1, table2, on=None, how='inner', name=None):\n    if (how not in ('inner', 'left')):\n        ItsdbError(\"Only 'inner' and 'left' join methods are allowed.\")\n    on = _join_pivot(on, table1, table2)\n    fields = _RelationJoin(table1.fields, table2.fields, on=on)\n    get_key = (lambda rec: tuple((rec.get(k) for k in on)))\n    key_indices = set((table2.fields.index(k) for k in on))\n    right = defaultdict(list)\n    for rec in table2:\n        right[get_key(rec)].append([c for (i, c) in enumerate(rec) if (i not in key_indices)])\n    rfill = [f.default_value() for f in table2.fields if (f.name not in on)]\n    joined = []\n    for lrec in table1:\n        k = get_key(lrec)\n        if ((how == 'left') or (k in right)):\n            joined.extend(((lrec + rrec) for rrec in right.get(k, [rfill])))\n    return Table(fields, joined)", "docstring": "Join two tables and return the resulting Table object.\n\nFields in the resulting table have their names prefixed with their\ncorresponding table name. For example, when joining `item` and\n`parse` tables, the `i-input` field of the `item` table will be\nnamed `item:i-input` in the resulting Table. Pivot fields (those\nin *on*) are only stored once without the prefix.\n\nBoth inner and left joins are possible by setting the *how*\nparameter to `inner` and `left`, respectively.\n\n.. warning::\n\nBoth *table2* and the resulting joined table will exist in\nmemory for this operation, so it is not recommended for very\nlarge tables on low-memory systems.\n\nArgs:\ntable1 (:class:`Table`): the left table to join\ntable2 (:class:`Table`): the right table to join\non (str): the shared key to use for joining; if `None`, find\nshared keys using the schemata of the tables\nhow (str): the method used for joining (`\"inner\"` or `\"left\"`)\nname (str): the name assigned to the resulting table", "source": "codesearchnet"}
{"code": "def sample(input_placeholder, logits, seed=None, max_length=1024, temperature=1.0):\n    assert (temperature > 0), 'Temperature must be greater than 0.'\n    if (not seed):\n        seed = chr((ord('A') + random.randint(0, 25)))\n    result = ''\n    recurrent_runner = pt.train.RecurrentRunner()\n    recurrent_runner.reset()\n    for c in seed[:(- 1)]:\n        recurrent_runner.run([logits], {input_placeholder: data_utils.convert_to_int(c)})\n        result += c\n    ci = ord(seed[(- 1)])\n    while ((len(result) < max_length) and (ci != data_utils.EOS)):\n        result += chr(ci)\n        logit_result = recurrent_runner.run([logits], {input_placeholder: ci})[0][0]\n        logit_result /= temperature\n        logit_result -= logit_result.max()\n        distribution = numpy.exp(logit_result)\n        distribution /= distribution.sum()\n        distribution -= 1e-08\n        ci = numpy.argmax(numpy.random.multinomial(1, distribution))\n    result += chr(ci)\n    return result", "docstring": "Samples from the LSTM model.\n\nSampling is done by first running either the seed or an arbitrary character\nthrough the model and then drawing the next character from the probability\ndistribution definted by `softmax`.\n\nArgs:\ninput_placeholder: A placeholder that expects a scalar feed.\nlogits: The logits.  This works with the logits so that it can apply the\ntemperature.\nseed: Either a string of characters to prime the network or None.\nmax_length: The maximum length to draw in case EOS is not reached.\ntemperature: A value that is used to renormalize the inputs.  A higher value\nselects less likely choices.\nReturns:\nA string that was sampled from the model.", "source": "codesearchnet"}
{"code": "def aside_view_declaration(self, view_name):\n    if (view_name in self._combined_asides):\n        return getattr(self, self._combined_asides[view_name])\n    else:\n        return None", "docstring": "Find and return a function object if one is an aside_view for the given view_name\n\nAside methods declare their view provision via @XBlockAside.aside_for(view_name)\nThis function finds those declarations for a block.\n\nArguments:\nview_name (string): the name of the view requested.\n\nReturns:\neither the function or None", "source": "codesearchnet"}
{"code": "def get_suffixes(arr):\n    \n    arr = tuple(arr)\n    return [arr]\n    return (arr[i:] for i in range(len(arr)))", "docstring": "Returns all possible suffixes of an array (lazy evaluated)\nArgs:\narr: input array\nReturns:\nArray of all possible suffixes (as tuples)", "source": "juraj-google-style"}
{"code": "def sql_column_like_drug(self, column_name: str) -> str:\n    clauses = ['{col} LIKE {fragment}'.format(col=column_name, fragment=sql_string_literal(f)) for f in self.sql_like_fragments]\n    return '({})'.format(' OR '.join(clauses))", "docstring": "Returns SQL like\n\n.. code-block:: sql\n\n(column_name LIKE '%drugname1%' OR\ncolumn_name LIKE '%drugname2%')\n\nfor the drug names that this Drug object knows about.\n\nArgs:\ncolumn_name: column name, pre-escaped if necessary\n\nReturns:\nSQL fragment as above", "source": "codesearchnet"}
{"code": "def main():\n    parser = argparse.ArgumentParser(description='Cherry picking automation.')\n    parser.add_argument('--version', help='<new_major_ver>.<new_minor_ver>.<new_patch_ver>', default='')\n    parser.add_argument('--nightly', help='disable the service provisioning step', action='store_true')\n    args = parser.parse_args()\n    check_all_files()\n    old_version = get_current_semver_version()\n    if args.nightly:\n        if args.version:\n            new_version = Version.parse_from_string(args.version, NIGHTLY_VERSION)\n            new_version.set_identifier_string('-dev' + time.strftime('%Y%m%d'))\n        else:\n            new_version = Version(old_version.major, str(old_version.minor), old_version.patch, '-dev' + time.strftime('%Y%m%d'), NIGHTLY_VERSION)\n    else:\n        new_version = Version.parse_from_string(args.version, SNAPSHOT_VERSION)\n    update_tf_version_bzl(old_version, new_version)\n    update_bazelrc(old_version, new_version)\n    update_readme(old_version, new_version)\n    print('Major: %s -> %s' % (old_version.major, new_version.major))\n    print('Minor: %s -> %s' % (old_version.minor, new_version.minor))\n    print('Patch: %s -> %s\\n' % (old_version.patch, new_version.patch))\n    check_for_old_version(old_version, new_version)", "docstring": "This script updates all instances of version in the tensorflow directory.\n\nRequirements:\nversion: The version tag\nOR\nnightly: Create a nightly tag with current date\n\nRaises:\nRuntimeError: If the script is not being run from tf source dir", "source": "github-repos"}
{"code": "def validate(self, institute, case, user, link, variant, validate_type):\n        \n        if not validate_type in SANGER_OPTIONS:\n            LOG.warning(\"Invalid validation string: %s\", validate_type)\n            LOG.info(\"Validation options: %s\", ', '.join(SANGER_OPTIONS))\n            return\n\n        updated_variant = self.variant_collection.find_one_and_update(\n            {'_id': variant['_id']},\n            {'$set': {'validation': validate_type}},\n            return_document=pymongo.ReturnDocument.AFTER\n        )\n\n        self.create_event(\n            institute=institute,\n            case=case,\n            user=user,\n            link=link,\n            category='variant',\n            verb='validate',\n            variant=variant,\n            subject=variant['display_name'],\n        )\n        return updated_variant", "docstring": "Mark validation status for a variant.\n\nArguments:\ninstitute (dict): A Institute object\ncase (dict): Case object\nuser (dict): A User object\nlink (str): The url to be used in the event\nvariant (dict): A variant object\nvalidate_type(str): The outcome of validation.\nchoices=('True positive', 'False positive')\n\nReturns:\nupdated_variant(dict)", "source": "juraj-google-style"}
{"code": "def parse(cls, args):\n        \n        parsed = {}\n\n        try:\n            (options, args) = cls.optparser.parse_args(args)\n        except OptionParsingError as e:\n            raise ParseError(e.msg, cls.optparser.format_help())\n        except OptionParsingExit as e:\n            return None\n\n        parsed['label'] = options.label\n        parsed['can_notify'] = options.can_notify\n        parsed['name'] = options.name\n        parsed['tags'] = options.tags\n        parsed[\"command_type\"] = \"HadoopCommand\"\n        parsed['print_logs'] = options.print_logs\n        parsed['print_logs_live'] = options.print_logs_live\n        parsed['pool'] = options.pool\n\n        if len(args) < 2:\n            raise ParseError(\"Need at least two arguments\", cls.usage)\n\n        subcmd = args.pop(0)\n        if subcmd not in cls.subcmdlist:\n            raise ParseError(\"First argument must be one of <%s>\" %\n                             \"|\".join(cls.subcmdlist))\n\n        parsed[\"sub_command\"] = subcmd\n        parsed[\"sub_command_args\"] = \" \".join(\"'\" + str(a) + \"'\" for a in args)\n\n        return parsed", "docstring": "Parse command line arguments to construct a dictionary of command\nparameters that can be used to create a command\n\nArgs:\n`args`: sequence of arguments\n\nReturns:\nDictionary that can be used in create method\n\nRaises:\nParseError: when the arguments are not correct", "source": "juraj-google-style"}
{"code": "def filter_by_analysis_period(self, analysis_period):\n        \n        self._check_analysis_period(analysis_period)\n        _filtered_data = self.filter_by_doys(analysis_period.doys_int)\n        _filtered_data.header._analysis_period = analysis_period\n        return _filtered_data", "docstring": "Filter the Data Collection based on an analysis period.\n\nArgs:\nanalysis period: A Ladybug analysis period\n\nReturn:\nA new Data Collection with filtered data", "source": "juraj-google-style"}
{"code": "def from_data(cls, data):\n    obj = cls()\n    with contextlib.closing(BytesIO(data)) as file_handle:\n        obj.load_file(file_handle)\n    return obj", "docstring": "Load an FCS file from a bytes-like object.\n\nArgs:\ndata: buffer containing contents of an FCS file.\n\nReturns:\nFCSParser instance with data loaded", "source": "codesearchnet"}
{"code": "def restrict_with(self, expr: str, error_tag: str = None,\n                      error_message: str = None) -> None:\n        \n        def parse(x: str) -> Number:\n            res = self.parser(x)\n            if res is None:\n                raise InvalidArgument(expr)\n            return res\n\n        def simpl(rng: List[Number]) -> List[Number]:\n            return ([rng[0]] if rng[0] == rng[1] else rng)\n\n        def to_num(xs): return [parse(x) for x in xs]\n        lo = self.intervals[0][0]\n        hi = self.intervals[-1][-1]\n        ran = []\n        for p in [p.strip() for p in expr.split(\"|\")]:\n            r = [i.strip() for i in p.split(\"..\")]\n            if len(r) > 2:\n                raise InvalidArgument(expr)\n            ran.append(r)\n        if ran[0][0] != \"min\":\n            lo = parse(ran[0][0])\n        if ran[-1][-1] != \"max\":\n            hi = parse(ran[-1][-1])\n        self.intervals = (\n            [simpl([lo, hi])] if len(ran) == 1 else (\n                [simpl([lo, parse(ran[0][-1])])] +\n                [to_num(r) for r in ran[1:-1]] +\n                [simpl([parse(ran[-1][0]), hi])]))\n        if error_tag:\n            self.error_tag = error_tag\n        if error_message:\n            self.error_message = error_message", "docstring": "Combine the receiver with new intervals.\n\nArgs:\nexpr: \"range\" or \"length\" expression.\nerror_tag: error tag of the new expression.\nerror_message: error message for the new expression.\n\nRaises:\nInvalidArgument: If parsing of `expr` fails.", "source": "juraj-google-style"}
{"code": "def from_pandas(cls, df, block_partitions_cls):\n        \n        new_index = df.index\n        new_columns = df.columns\n        new_dtypes = df.dtypes\n        new_data = block_partitions_cls.from_pandas(df)\n        return cls(new_data, new_index, new_columns, dtypes=new_dtypes)", "docstring": "Improve simple Pandas DataFrame to an advanced and superior Modin DataFrame.\n\nArgs:\ncls: DataManger object to convert the DataFrame to.\ndf: Pandas DataFrame object.\nblock_partitions_cls: BlockParitions object to store partitions\n\nReturns:\nReturns DataManager containing data from the Pandas DataFrame.", "source": "juraj-google-style"}
{"code": "def sharded_filename(filename_tensor: tensor_lib.Tensor, shard: int, num_shards: tensor_lib.Tensor) -> tensor_lib.Tensor:\n    return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards)", "docstring": "Append sharding information to a filename.\n\nArgs:\nfilename_tensor: A string tensor.\nshard: Integer.  The shard for the filename.\nnum_shards: An int Tensor for the number of shards.\n\nReturns:\nA string tensor.", "source": "github-repos"}
{"code": "def _contains_nd(nodes, point):\n    min_vals = np.min(nodes, axis=1)\n    if (not np.all((min_vals <= point))):\n        return False\n    max_vals = np.max(nodes, axis=1)\n    if (not np.all((point <= max_vals))):\n        return False\n    return True", "docstring": "r\"\"\"Predicate indicating if a point is within a bounding box.\n\n.. note::\n\nThere is also a Fortran implementation of this function, which\nwill be used if it can be built.\n\nArgs:\nnodes (numpy.ndarray): A set of points.\npoint (numpy.ndarray): A 1D NumPy array representing a point\nin the same dimension as ``nodes``.\n\nReturns:\nbool: Indicating containment.", "source": "codesearchnet"}
{"code": "def dropout(inputs, keep_prob=0.5, is_training=True, scope=None):\n  \n  if is_training and keep_prob > 0:\n    with tf.name_scope(scope, 'Dropout', [inputs]):\n      return tf.nn.dropout(inputs, keep_prob)\n  else:\n    return inputs", "docstring": "Returns a dropout layer applied to the input.\n\nArgs:\ninputs: the tensor to pass to the Dropout layer.\nkeep_prob: the probability of keeping each input unit.\nis_training: whether or not the model is in training mode. If so, dropout is\napplied and values scaled. Otherwise, inputs is returned.\nscope: Optional scope for name_scope.\n\nReturns:\na tensor representing the output of the operation.", "source": "juraj-google-style"}
{"code": "def AddCustomJsonFieldMapping(message_type, python_name, json_name, package=None):\n    if (not issubclass(message_type, messages.Message)):\n        raise exceptions.TypecheckError(('Cannot set JSON field mapping for non-message \"%s\"' % message_type))\n    try:\n        _ = message_type.field_by_name(python_name)\n    except KeyError:\n        raise exceptions.InvalidDataError(('Field %s not recognized for type %s' % (python_name, message_type)))\n    field_mappings = _JSON_FIELD_MAPPINGS.setdefault(message_type, {})\n    _CheckForExistingMappings('field', message_type, python_name, json_name)\n    field_mappings[python_name] = json_name", "docstring": "Add a custom wire encoding for a given message field.\n\nThis is primarily used in generated code, to handle enum values\nwhich happen to be Python keywords.\n\nArgs:\nmessage_type: (messages.Message) A message type\npython_name: (basestring) Python name for this value.\njson_name: (basestring) JSON name to be used on the wire.\npackage: (NoneType, optional) No effect, exists for legacy compatibility.", "source": "codesearchnet"}
{"code": "def times_update(self, factor):\n    if (factor < 0):\n        raise ValueError('The factor must not be negative.')\n    elif (factor == 0):\n        self.clear()\n    else:\n        _elements = self._elements\n        for element in _elements:\n            _elements[element] *= factor\n        self._total *= factor", "docstring": "Update each this multiset by multiplying each element's multiplicity with the given scalar factor.\n\n>>> ms = Multiset('aab')\n>>> ms.times_update(2)\n>>> sorted(ms)\n['a', 'a', 'a', 'a', 'b', 'b']\n\nYou can also use the ``*=`` operator for the same effect:\n\n>>> ms = Multiset('ac')\n>>> ms *= 3\n>>> sorted(ms)\n['a', 'a', 'a', 'c', 'c', 'c']\n\nFor a variant of the operation which does not modify the multiset, but returns a new\nmultiset instead see :meth:`times`.\n\nArgs:\nfactor: The factor to multiply each multiplicity with.", "source": "codesearchnet"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return token_ids_0\n    return token_ids_0 + token_ids_1", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens.\n\nThis implementation does not add special tokens and this method should be overridden in a subclass.\n\nArgs:\ntoken_ids_0 (`List[int]`): The first tokenized sequence.\ntoken_ids_1 (`List[int]`, *optional*): The second tokenized sequence.\n\nReturns:\n`List[int]`: The model input with special tokens.", "source": "github-repos"}
{"code": "def channel_ready_future(channel):\n    fut = channel._loop.create_future()\n\n    def _set_result(state):\n        if ((not fut.done()) and (state is _grpc.ChannelConnectivity.READY)):\n            fut.set_result(None)\n    fut.add_done_callback((lambda f: channel.unsubscribe(_set_result)))\n    channel.subscribe(_set_result, try_to_connect=True)\n    return fut", "docstring": "Creates a Future that tracks when a Channel is ready.\n\nCancelling the Future does not affect the channel's state machine.\nIt merely decouples the Future from channel state machine.\n\nArgs:\nchannel: A Channel object.\n\nReturns:\nA Future object that matures when the channel connectivity is\nChannelConnectivity.READY.", "source": "codesearchnet"}
{"code": "def transitive_inputs(self, node_name, include_control=True, include_reversed_ref=False, device_name=None):\n    if not self._debug_graphs:\n        raise LookupError('Node inputs are not loaded from partition graphs yet.')\n    device_name = self._infer_device_name(device_name, node_name)\n    input_lists = [self._debug_graphs[device_name].node_inputs]\n    if include_control:\n        input_lists.append(self._debug_graphs[device_name].node_ctrl_inputs)\n    if include_reversed_ref:\n        input_lists.append(self._debug_graphs[device_name].node_reversed_ref_inputs)\n    tracer = debug_graphs.DFSGraphTracer(input_lists, skip_node_names=self._get_merge_node_names(device_name))\n    tracer.trace(node_name)\n    return tracer.inputs()", "docstring": "Get the transitive inputs of given node according to partition graphs.\n\nArgs:\nnode_name: Name of the node.\ninclude_control: Include control inputs (True by default).\ninclude_reversed_ref: Whether a ref input, say from A to B, is to be also\nconsidered as an input from B to A. The rationale is that ref inputs\ngenerally let the recipient (e.g., B in this case) mutate the value of\nthe source (e.g., A in this case). So the reverse direction of the ref\nedge reflects the direction of information flow.\ndevice_name: (`str`) name of the device. If there is only one device or if\nnode_name exists on only one device, this argument is optional.\n\nReturns:\n(`list` of `str`) all transitive inputs to the node, as a list of node\nnames.\n\nRaises:\nLookupError: If node inputs and control inputs have not been loaded\nfrom partition graphs yet.", "source": "github-repos"}
{"code": "def handle_range(schema, field, validator, parent_schema):\n    if (not isinstance(field, fields.Number)):\n        return schema\n    if validator.min:\n        schema['minimum'] = validator.min\n        schema['exclusiveMinimum'] = True\n    else:\n        schema['minimum'] = 0\n        schema['exclusiveMinimum'] = False\n    if validator.max:\n        schema['maximum'] = validator.max\n        schema['exclusiveMaximum'] = True\n    return schema", "docstring": "Adds validation logic for ``marshmallow.validate.Range``, setting the\nvalues appropriately ``fields.Number`` and it's subclasses.\n\nArgs:\nschema (dict): The original JSON schema we generated. This is what we\nwant to post-process.\nfield (fields.Field): The field that generated the original schema and\nwho this post-processor belongs to.\nvalidator (marshmallow.validate.Length): The validator attached to the\npassed in field.\nparent_schema (marshmallow.Schema): The Schema instance that the field\nbelongs to.\n\nReturns:\ndict: A, possibly, new JSON Schema that has been post processed and\naltered.", "source": "codesearchnet"}
{"code": "def OpenFile(self, windows_path):\n    path_spec = self._path_resolver.ResolvePath(windows_path)\n    if (path_spec is None):\n        return None\n    return self._file_system.GetFileObjectByPathSpec(path_spec)", "docstring": "Opens the file specificed by the Windows path.\n\nArgs:\nwindows_path (str): Windows path to the file.\n\nReturns:\nFileIO: file-like object or None if the file does not exist.", "source": "codesearchnet"}
{"code": "def random_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None):\n    dtype = dtypes.as_dtype(dtype)\n    with ops.name_scope('random_uniform'):\n        samples = random_ops.random_uniform(shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed)\n        if dtype.is_complex:\n            if seed is not None:\n                seed += 12345\n            more_samples = random_ops.random_uniform(shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed)\n            samples = math_ops.complex(samples, more_samples)\n        return samples", "docstring": "Tensor with (possibly complex) Uniform entries.\n\nSamples are distributed like\n\n```\nUniform[minval, maxval], if dtype is real,\nX + iY,  where X, Y ~ Uniform[minval, maxval], if dtype is complex.\n```\n\nArgs:\nshape:  `TensorShape` or Python list.  Shape of the returned tensor.\nminval:  `0-D` `Tensor` giving the minimum values.\nmaxval:  `0-D` `Tensor` giving the maximum values.\ndtype:  `TensorFlow` `dtype` or Python dtype\nseed:  Python integer seed for the RNG.\n\nReturns:\n`Tensor` with desired shape and dtype.", "source": "github-repos"}
{"code": "def interpolate_beat_times(self, beat_times: numpy.ndarray, steps_per_beat: numpy.ndarray, n_extend: numpy.ndarray):\n    requires_backends(self, ['scipy'])\n    beat_times_function = scipy.interpolate.interp1d(np.arange(beat_times.size), beat_times, bounds_error=False, fill_value='extrapolate')\n    ext_beats = beat_times_function(np.linspace(0, beat_times.size + n_extend - 1, beat_times.size * steps_per_beat + n_extend))\n    return ext_beats", "docstring": "This method takes beat_times and then interpolates that using `scipy.interpolate.interp1d` and the output is\nthen used to convert raw audio to log-mel-spectrogram.\n\nArgs:\nbeat_times (`numpy.ndarray`):\nbeat_times is passed into `scipy.interpolate.interp1d` for processing.\nsteps_per_beat (`int`):\nused as an parameter to control the interpolation.\nn_extend (`int`):\nused as an parameter to control the interpolation.", "source": "github-repos"}
{"code": "def model_to_dot(model, show_shapes=False, show_dtype=False, show_layer_names=True, rankdir='TB', expand_nested=False, dpi=200, subgraph=False, show_layer_activations=False, show_trainable=False, **kwargs):\n    from keras.src.ops.function import make_node_key\n    if not model.built:\n        raise ValueError('This model has not yet been built. Build the model first by calling `build()` or by calling the model on a batch of data.')\n    from keras.src.models import functional\n    from keras.src.models import sequential\n    if not check_pydot():\n        raise ImportError('You must install pydot (`pip install pydot`) for model_to_dot to work.')\n    if subgraph:\n        dot = pydot.Cluster(style='dashed', graph_name=model.name)\n        dot.set('label', model.name)\n        dot.set('labeljust', 'l')\n    else:\n        dot = pydot.Dot()\n        dot.set('rankdir', rankdir)\n        dot.set('concentrate', True)\n        dot.set('dpi', dpi)\n        dot.set('splines', 'ortho')\n        dot.set_node_defaults(shape='record')\n    if kwargs.pop('layer_range', None) is not None:\n        raise ValueError('Argument `layer_range` is no longer supported.')\n    if kwargs:\n        raise ValueError(f'Unrecognized keyword arguments: {kwargs}')\n    kwargs = {'show_layer_names': show_layer_names, 'show_layer_activations': show_layer_activations, 'show_dtype': show_dtype, 'show_shapes': show_shapes, 'show_trainable': show_trainable}\n    if isinstance(model, sequential.Sequential):\n        layers = model.layers\n    elif not isinstance(model, functional.Functional):\n        node = make_node(model, **kwargs)\n        dot.add_node(node)\n        return dot\n    else:\n        layers = model._operations\n    for i, layer in enumerate(layers):\n        if expand_nested and isinstance(layer, (functional.Functional, sequential.Sequential)):\n            submodel = model_to_dot(layer, show_shapes, show_dtype, show_layer_names, rankdir, expand_nested, subgraph=True, show_layer_activations=show_layer_activations, show_trainable=show_trainable)\n            dot.add_subgraph(submodel)\n        else:\n            node = make_node(layer, **kwargs)\n            dot.add_node(node)\n    if isinstance(model, sequential.Sequential):\n        if not expand_nested:\n            for i in range(len(layers) - 1):\n                add_edge(dot, layers[i], layers[i + 1])\n            return dot\n        else:\n            layers = model.layers[1:]\n    for layer in layers:\n        for inbound_index, inbound_node in enumerate(layer._inbound_nodes):\n            if isinstance(model, functional.Functional) and make_node_key(layer, inbound_index) not in model._nodes:\n                continue\n            for input_index, input_tensor in enumerate(inbound_node.input_tensors):\n                input_history = input_tensor._keras_history\n                if input_history.operation is None:\n                    continue\n                input_node = input_history.operation._inbound_nodes[input_history.node_index]\n                output_index = input_history.tensor_index\n                source = input_node.operation\n                destination = layer\n                if not expand_nested:\n                    add_edge(dot, source, layer)\n                    continue\n                while isinstance(source, (functional.Functional, sequential.Sequential)):\n                    source, _, output_index = source.outputs[output_index]._keras_history\n                while isinstance(destination, (functional.Functional, sequential.Sequential)):\n                    if isinstance(destination, functional.Functional):\n                        destination = destination.inputs[input_index]._keras_history.operation\n                    else:\n                        destination = destination.layers[0]\n                add_edge(dot, source, destination)\n    return dot", "docstring": "Convert a Keras model to dot format.\n\nArgs:\nmodel: A Keras model instance.\nshow_shapes: whether to display shape information.\nshow_dtype: whether to display layer dtypes.\nshow_layer_names: whether to display layer names.\nrankdir: `rankdir` argument passed to PyDot,\na string specifying the format of the plot: `\"TB\"`\ncreates a vertical plot; `\"LR\"` creates a horizontal plot.\nexpand_nested: whether to expand nested Functional models\ninto clusters.\ndpi: Image resolution in dots per inch.\nsubgraph: whether to return a `pydot.Cluster` instance.\nshow_layer_activations: Display layer activations (only for layers that\nhave an `activation` property).\nshow_trainable: whether to display if a layer is trainable.\n\nReturns:\nA `pydot.Dot` instance representing the Keras model or\na `pydot.Cluster` instance representing nested model if\n`subgraph=True`.", "source": "github-repos"}
{"code": "def plot_generated_images(images, fname):\n  \n  fig = plt.figure(figsize=(4, 4))\n  canvas = backend_agg.FigureCanvasAgg(fig)\n\n  for i, image in enumerate(images):\n    ax = fig.add_subplot(4, 4, i + 1)\n    plt.axis('off')\n    ax.set_xticklabels([])\n    ax.set_yticklabels([])\n    ax.imshow(image.reshape(IMAGE_SHAPE[:-1]), cmap='Greys_r')\n\n  fig.tight_layout()\n  plt.subplots_adjust(wspace=0.05, hspace=0.05)\n  canvas.print_figure(fname, format='png')", "docstring": "Save a synthetic image as a PNG file.\n\nArgs:\nimages: samples of synthetic images generated by the generative network.\nfname: Python `str`, filename to save the plot to.", "source": "juraj-google-style"}
{"code": "def compute_output_signature(self, input_signature):\n\n    def check_type_return_shape(s):\n        if not isinstance(s, tensor.TensorSpec):\n            raise TypeError('Only TensorSpec signature types are supported, but saw signature entry: {}.'.format(s))\n        return s.shape\n    input_shape = nest.map_structure(check_type_return_shape, input_signature)\n    output_shape = self.compute_output_shape(input_shape)\n    dtype = self._compute_dtype\n    if dtype is None:\n        input_dtypes = [s.dtype for s in nest.flatten(input_signature)]\n        dtype = input_dtypes[0]\n    return nest.map_structure(lambda s: tensor.TensorSpec(dtype=dtype, shape=s), output_shape)", "docstring": "Compute the output tensor signature of the layer based on the inputs.\n\nUnlike a TensorShape object, a TensorSpec object contains both shape\nand dtype information for a tensor. This method allows layers to provide\noutput dtype information if it is different from the input dtype.\nFor any layer that doesn't implement this function,\nthe framework will fall back to use `compute_output_shape`, and will\nassume that the output dtype matches the input dtype.\n\nArgs:\ninput_signature: Single TensorSpec or nested structure of TensorSpec\nobjects, describing a candidate input for the layer.\n\nReturns:\nSingle TensorSpec or nested structure of TensorSpec objects, describing\nhow the layer would transform the provided input.\n\nRaises:\nTypeError: If input_signature contains a non-TensorSpec object.", "source": "github-repos"}
{"code": "def filter(self, cls, recursive=False):\n        \n        source = self.walk_preorder if recursive else self._children\n        return [\n            codeobj\n            for codeobj in source()\n            if isinstance(codeobj, cls)\n        ]", "docstring": "Retrieves all descendants (including self) that are instances\nof a given class.\n\nArgs:\ncls (class): The class to use as a filter.\n\nKwargs:\nrecursive (bool): Whether to descend recursively down the tree.", "source": "juraj-google-style"}
{"code": "def opcode_to_name(model, op_code):\n    op = model.operatorCodes[op_code]\n    code = max(op.builtinCode, op.deprecatedBuiltinCode)\n    for name, value in vars(schema_fb.BuiltinOperator).items():\n        if value == code:\n            return name\n    return None", "docstring": "Converts a TFLite op_code to the human readable name.\n\nArgs:\nmodel: The input tflite model.\nop_code: The op_code to resolve to a readable name.\n\nReturns:\nA string containing the human readable op name, or None if not resolvable.", "source": "github-repos"}
{"code": "def pyc_load(fp):\n    magic_1 = U16(fp.read(2), target=MARSHAL_TARGET)\n    magic_2 = U16(fp.read(2), target=MARSHAL_TARGET)\n    internals = MAGIC_MAP.get(magic_1)\n    if (internals is None):\n        raise ValueError(('Invalid or unknown magic (%d).' % magic_1))\n    if (magic_2 != 2573):\n        raise ValueError(('Invalid secondary magic (%d).' % magic_2))\n    timestamp = datetime.datetime.fromtimestamp(U32(fp.read(4), target=MARSHAL_TARGET))\n    if (internals['version'] >= 33):\n        file_size = U32(fp.read(4))\n    else:\n        file_size = None\n    code_object = marshal_load(fp, internals)\n    return PycFile(magic_1, internals, timestamp, file_size, code_object)", "docstring": "Load a .pyc file from a file-like object.\n\nArguments:\nfp(file): The file-like object to read.\n\nReturns:\nPycFile: The parsed representation of the .pyc file.", "source": "codesearchnet"}
{"code": "def duration(self):\n    duration = 0.0\n    if (len(self.events) > 0):\n        first = datetime.fromtimestamp(self.events[0]['timestamp'])\n        last = datetime.fromtimestamp(self.events[(- 1)]['timestamp'])\n        duration = (last - first).total_seconds()\n    return duration", "docstring": "Calculate how long the stage took.\n\nReturns:\nfloat: (current) duration of the stage", "source": "codesearchnet"}
{"code": "def _prepare_socket_file(self, socket_path, default_prefix):\n    if (socket_path is not None):\n        if os.path.exists(socket_path):\n            raise Exception('Socket file {} exists!'.format(socket_path))\n        socket_dir = os.path.dirname(socket_path)\n        try_to_create_directory(socket_dir)\n        return socket_path\n    return self._make_inc_temp(prefix=default_prefix, directory_name=self._sockets_dir)", "docstring": "Prepare the socket file for raylet and plasma.\n\nThis method helps to prepare a socket file.\n1. Make the directory if the directory does not exist.\n2. If the socket file exists, raise exception.\n\nArgs:\nsocket_path (string): the socket file to prepare.", "source": "codesearchnet"}
{"code": "def _VerifyValues(self, input_sizes=None, filter_sizes=None, out_backprop_sizes=None, strides=None, dilations=None, padding=None, data_format_src='NHWC', data_format_dst='NHWC', expected=None):\n    total_size_1 = np.prod(input_sizes)\n    total_size_2 = np.prod(out_backprop_sizes)\n    x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes)\n    x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(out_backprop_sizes)\n    strides = [1] + strides + [1]\n    if dilations is not None:\n        dilations = [1] + dilations + [1]\n    expected = np.reshape(expected, filter_sizes)\n    x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src, data_format_dst)\n    x2 = test_utils.ConvertBetweenDataFormats(x2, data_format_src, data_format_dst)\n    input_sizes = test_utils.PermuteDimsBetweenDataFormats(input_sizes, data_format_src, data_format_dst)\n    out_backprop_sizes = test_utils.PermuteDimsBetweenDataFormats(out_backprop_sizes, data_format_src, data_format_dst)\n    strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src, data_format_dst)\n    if dilations is not None:\n        dilations = test_utils.PermuteDimsBetweenDataFormats(dilations, data_format_src, data_format_dst)\n    with self.session() as sess:\n        t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes)\n        t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes)\n        with self.test_scope():\n            tensor = gen_nn_ops.conv2d_backprop_filter(input=t1, filter_sizes=filter_sizes, out_backprop=t2, strides=strides, dilations=dilations, padding=padding, data_format=data_format_dst)\n        value = sess.run(tensor, {t1: x1, t2: x2})\n        self.assertAllEqual(filter_sizes, value.shape)\n        self.assertAllClose(expected, value, 0.001)", "docstring": "Tests that gen_nn_ops.conv2d_backprop_filter produces the right output.\n\nArgs:\ninput_sizes: Input tensor dimensions in\n[batch, input_rows, input_cols, input_depth].\nfilter_sizes: Filter tensor dimensions in\n[kernel_rows, kernel_cols, input_depth, output_depth].\nout_backprop_sizes: Output gradients tensor dimensions.\nstrides: Stride.\ndilations: Dilations.\npadding: Padding type.\ndata_format_src: Data format input is in.\ndata_format_dst: Data format verification will run and input is converted\nto.\nexpected: Expected output.", "source": "github-repos"}
{"code": "def check(cls, status):\n    assert (cls.trigger is not None), 'Invalid ErrorTrap, trigger not set'\n    assert (cls.error is not None), 'Invalid ErrorTrap, error not set'\n    if (status == cls.trigger):\n        raise cls.error()", "docstring": "Checks if a status enum matches the trigger originally set, and\nif so, raises the appropriate error.\n\nArgs:\nstatus (int, enum): A protobuf enum response status to check.\n\nRaises:\nAssertionError: If trigger or error were not set.\n_ApiError: If the statuses don't match. Do not catch. Will be\ncaught automatically and sent back to the client.", "source": "codesearchnet"}
{"code": "def _process_list_value(name, parse_fn, var_type, m_dict, values, results_dictionary):\n    if (m_dict['index'] is not None):\n        raise ValueError('Assignment of a list to a list index.')\n    elements = filter(None, re.split('[ ,]', m_dict['vals']))\n    if (name in results_dictionary):\n        raise _reuse_fail(name, values)\n    try:\n        results_dictionary[name] = [parse_fn(e) for e in elements]\n    except ValueError:\n        _parse_fail(name, var_type, m_dict['vals'], values)", "docstring": "Update results_dictionary from a list of values.\n\nUsed to update results_dictionary to be returned by parse_values when\nencountering a clause with a list RHS (e.g.  \"arr=[1,2,3]\".)\n\nMutates results_dictionary.\n\nArgs:\nname: Name of variable in assignment (\"arr\").\nparse_fn: Function for parsing individual values.\nvar_type: Type of named variable.\nm_dict: Dictionary constructed from regex parsing.\nm_dict['val']: RHS value (scalar)\nvalues: Full expression being parsed\nresults_dictionary: The dictionary being updated for return by the parsing\nfunction.\n\nRaises:\nValueError: If the name has an index or the values cannot be parsed.", "source": "codesearchnet"}
{"code": "def chat_delete(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:\n    kwargs.update({'channel': channel, 'ts': ts})\n    return self.api_call('chat.delete', json=kwargs)", "docstring": "Deletes a message.\n\nArgs:\nchannel (str): Channel containing the message to be deleted. e.g. 'C1234567890'\nts (str): Timestamp of the message to be deleted. e.g. '1234567890.123456'", "source": "codesearchnet"}
{"code": "def __init__(self, dllpath=None):\n        \n        self._lib = None\n        self._winlib = None\n        self._path = None\n        self._windows = sys.platform.startswith('win')\n        self._cygwin = sys.platform.startswith('cygwin')\n        self._temp = None\n\n        if self._windows or self._cygwin:\n            self._sdk = self.get_appropriate_windows_sdk_name()\n        else:\n            self._sdk = self.JLINK_SDK_NAME\n\n        if dllpath is not None:\n            self.load(dllpath)\n        else:\n            self.load_default()", "docstring": "Initializes an instance of a ``Library``.\n\nLoads the default J-Link DLL if ``dllpath`` is ``None``, otherwise\nloads the DLL specified by the given ``dllpath``.\n\nArgs:\nself (Library): the ``Library`` instance\ndllpath (str): the DLL to load into the library\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def _init_from_converter(self, options: QuantizationDebugOptions, converter: TFLiteConverter, calibrated_model: Optional[bytes]=None, float_model: Optional[bytes]=None) -> None:\n    self.quant_model = convert.mlir_quantize(calibrated_model, disable_per_channel=converter._experimental_disable_per_channel, fully_quantize=options.fully_quantize, enable_numeric_verify=True, denylisted_ops=options.denylisted_ops, denylisted_nodes=options.denylisted_nodes)\n    self._quant_interpreter = _interpreter.Interpreter(model_content=self.quant_model)\n    self._float_interpreter = None\n    if float_model is not None:\n        self._float_interpreter = _interpreter.Interpreter(model_content=float_model)", "docstring": "Convert the model and apply options.\n\nConverts the quantized model and initializes a quantized model interpreter\nwith the quantized model. Returns a float model interpreter if float model\nis provided.\n\nArgs:\noptions: a QuantizationDebugOptions object.\nconverter: an initialized tf.lite.TFLiteConverter.\ncalibrated_model: Calibrated model bytes.\nfloat_model: Float model bytes.", "source": "github-repos"}
{"code": "def writeline(self, line, line_number):\n        \n        tmp_file = tempfile.TemporaryFile('w+')\n        if not line.endswith(os.linesep):\n\n            line += os.linesep\n        try:\n\n            with open(self.path, 'r') as file_handle:\n\n                for count, new_line in enumerate(file_handle):\n\n                    if count == line_number:\n\n                        new_line = line\n\n                    tmp_file.write(new_line)\n\n            tmp_file.seek(0)\n            with open(self.path, 'w') as file_handle:\n\n                for new_line in tmp_file:\n\n                    file_handle.write(new_line)\n        finally:\n\n            tmp_file.close()", "docstring": "Rewrite a single line in the file.\n\nArgs:\nline (str): The new text to write to the file.\nline_number (int): The line of the file to rewrite. Numbering\nstarts at 0.", "source": "juraj-google-style"}
{"code": "def unregister_peer(self, connection_id):\n        \n        public_key = self.peer_to_public_key(connection_id)\n        if public_key:\n            self._consensus_notifier.notify_peer_disconnected(public_key)\n\n        with self._lock:\n            if connection_id in self._peers:\n                del self._peers[connection_id]\n                LOGGER.debug(\"Removed connection_id %s, \"\n                             \"connected identities are now %s\",\n                             connection_id, self._peers)\n                self._topology.set_connection_status(connection_id,\n                                                     PeerStatus.TEMP)\n            else:\n                LOGGER.warning(\"Connection unregister failed as connection \"\n                               \"was not registered: %s\",\n                               connection_id)", "docstring": "Removes a connection_id from the registry.\n\nArgs:\nconnection_id (str): A unique identifier which identifies an\nconnection on the network server socket.", "source": "juraj-google-style"}
{"code": "class FlaxForcedBOSTokenLogitsProcessor(FlaxLogitsProcessor):\n\n    def __init__(self, bos_token_id: int):\n        self.bos_token_id = bos_token_id\n\n    def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:\n        new_scores = jnp.full(scores.shape, -float('inf'))\n        apply_penalty = 1 - jnp.bool_(cur_len - 1)\n        scores = jnp.where(apply_penalty, new_scores.at[:, self.bos_token_id].set(0), scores)\n        return scores", "docstring": "[`FlaxLogitsProcessor`] that enforces the specified token as the first generated token.\n\nArgs:\nbos_token_id (`int`):\nThe id of the token to force as the first generated token.", "source": "github-repos"}
{"code": "def forward(self, inference_args, input_tangents):\n    if self._forward is None:\n        self._forward, self._forward_graph, self._backward, self._forwardprop_output_indices, self._num_forwardprop_outputs = self._forward_and_backward_functions(inference_args, input_tangents)\n    return self._forward", "docstring": "Construct or fetch a forward function with side-outputs.\n\nWhen graph building without a tape active, symbolic gradients rely on\nregenerating the backward function for higher-order gradients (to account\nfor new side outputs of the rewritten forward function call). Thus there is\nno fixed backward function for this case. However, when a tape is active\n(eager or graph building), we generate fixed backward and forward functions\nat forward function call time.\n\nThis difference between the tape and non-tape cases is to avoid building\nunneeded backward functions while graph building (where we may or may not\neventually need gradients).\n\nArgs:\ninference_args: A flat list of Tensors, arguments to the inference\nfunction.\ninput_tangents: A flat list of Tensors, jvps associated with\n`inference_args`.\n\nReturns:\nA forward atomic_function.AtomicFunction.", "source": "github-repos"}
{"code": "def _build(self, inputs, memory, treat_input_as_matrix=False):\n    \n    if treat_input_as_matrix:\n      inputs = basic.BatchFlatten(preserve_dims=2)(inputs)\n      inputs_reshape = basic.BatchApply(\n          basic.Linear(self._mem_size), n_dims=2)(inputs)\n    else:\n      inputs = basic.BatchFlatten()(inputs)\n      inputs = basic.Linear(self._mem_size)(inputs)\n      inputs_reshape = tf.expand_dims(inputs, 1)\n\n    memory_plus_input = tf.concat([memory, inputs_reshape], axis=1)\n    next_memory = self._attend_over_memory(memory_plus_input)\n\n    n = inputs_reshape.get_shape().as_list()[1]\n    next_memory = next_memory[:, :-n, :]\n\n    if self._gate_style == 'unit' or self._gate_style == 'memory':\n      self._input_gate, self._forget_gate = self._create_gates(\n          inputs_reshape, memory)\n      next_memory = self._input_gate * tf.tanh(next_memory)\n      next_memory += self._forget_gate * memory\n\n    output = basic.BatchFlatten()(next_memory)\n    return output, next_memory", "docstring": "Adds relational memory to the TensorFlow graph.\n\nArgs:\ninputs: Tensor input.\nmemory: Memory output from the previous time step.\ntreat_input_as_matrix: Optional, whether to treat `input` as a sequence\nof matrices. Defaulta to False, in which case the input is flattened\ninto a vector.\n\nReturns:\noutput: This time step's output.\nnext_memory: The next version of memory to use.", "source": "juraj-google-style"}
{"code": "def minimize_peak_memory(graph, scheduler_alg):\n    if (scheduler_alg == 'NAIVE'):\n        return _minimize_peak_memory_naive(graph)\n    elif (scheduler_alg == 'LIST'):\n        return _minimize_peak_memory_list(graph)\n    else:\n        raise NotImplementedError('{} is not a scheduler algorithm. It should be one of NAIVE or LIST.'.format(scheduler_alg))", "docstring": "Computes a schedule to minimize peak memory.\n\nArgs:\ngraph: an mtf.auto_mtf.graph_interface.GraphInterface.\nscheduler_alg: a string, one of 'NAIVE' or 'LIST'\n\nReturns:\nan iterable of integers representing the schedule.", "source": "codesearchnet"}
{"code": "def scaled_wulff(self, wulffshape, r):\n    r_ratio = (r / wulffshape.effective_radius)\n    miller_list = wulffshape.miller_energy_dict.keys()\n    se_list = np.array(list(wulffshape.miller_energy_dict.values()))\n    scaled_se = (se_list * r_ratio)\n    return WulffShape(wulffshape.lattice, miller_list, scaled_se, symprec=self.symprec)", "docstring": "Scales the Wulff shape with an effective radius r. Note that the resulting\nWulff does not neccesarily have the same effective radius as the one\nprovided. The Wulff shape is scaled by its surface energies where first\nthe surface energies are scale by the minimum surface energy and then\nmultiplied by the given effective radius.\n\nArgs:\nwulffshape (WulffShape): Initial, unscaled WulffShape\nr (float): Arbitrary effective radius of the WulffShape\n\nReturns:\nWulffShape (scaled by r)", "source": "codesearchnet"}
{"code": "def load_yaml_config(conf_file):\n    global g_config\n    with open(conf_file) as fp:\n        g_config = util.yaml_load(fp)\n        src_dir = get_path('src_dir', None)\n        if (src_dir is not None):\n            sys.path.insert(0, src_dir)\n        for cmd in get('commands', []):\n            _import(cmd)", "docstring": "Load a YAML configuration.\n\nThis will not update the configuration but replace it entirely.\n\nArgs:\nconf_file (str):\nPath to the YAML config. This function will not check the file name\nor extension and will just crash if the given file does not exist or\nis not a valid YAML file.", "source": "codesearchnet"}
{"code": "def _parse_description(html_chunk):\n    \n    description_tag = html_chunk.match(\n        [\"div\", {\"class\": \"kniha_detail_text\"}],\n        \"p\"\n    )\n\n    if not description_tag:\n        return None\n\n    description = get_first_content(description_tag)\n    description = description.replace(\"<br />\", \"\\n\")\n    description = description.replace(\"<br/>\", \"\\n\")\n\n    return dhtmlparser.removeTags(description).strip()", "docstring": "Parse description of the book.\n\nArgs:\nhtml_chunk (obj): HTMLElement containing slice of the page with details.\n\nReturns:\nstr/None: Description as string or None if not found.", "source": "juraj-google-style"}
{"code": "def _subtoken_ids_to_tokens(self, subtokens):\n    \n    concatenated = \"\".join(\n        [self._subtoken_id_to_subtoken_string(s) for s in subtokens])\n    split = concatenated.split(\"_\")\n    ret = []\n    for t in split:\n      if t:\n        unescaped = _unescape_token(t + \"_\")\n        if unescaped:\n          ret.append(unescaped)\n    return ret", "docstring": "Converts a list of subtoken ids to a list of tokens.\n\nArgs:\nsubtokens: a list of integers in the range [0, vocab_size)\nReturns:\na list of strings.", "source": "juraj-google-style"}
{"code": "def default_filename(ext):\n    \n    if ext == \"py\":\n        raise RuntimeError(\"asked for a default filename with 'py' extension\")\n\n    filename = detect_current_filename()\n\n    if filename is None:\n        return temp_filename(ext)\n\n    basedir = dirname(filename) or getcwd()\n\n    if _no_access(basedir) or _shares_exec_prefix(basedir):\n        return temp_filename(ext)\n\n    name, _ = splitext(basename(filename))\n    return join(basedir, name + \".\" + ext)", "docstring": "Generate a default filename with a given extension, attempting to use\nthe filename of the currently running process, if possible.\n\nIf the filename of the current process is not available (or would not be\nwritable), then a temporary file with the given extension is returned.\n\nArgs:\next (str) : the desired extension for the filename\n\nReturns:\nstr\n\nRaises:\nRuntimeError\nIf the extensions requested is \".py\"", "source": "juraj-google-style"}
{"code": "class RealmScorerOutput(ModelOutput):\n    relevance_score: Optional[torch.FloatTensor] = None\n    query_score: Optional[torch.FloatTensor] = None\n    candidate_score: Optional[torch.FloatTensor] = None", "docstring": "Outputs of [`RealmScorer`] models.\n\nArgs:\nrelevance_score (`torch.FloatTensor` of shape `(batch_size, config.num_candidates)`):\nThe relevance score of document candidates (before softmax).\nquery_score (`torch.FloatTensor` of shape `(batch_size, config.retriever_proj_size)`):\nQuery score derived from the query embedder.\ncandidate_score (`torch.FloatTensor` of shape `(batch_size, config.num_candidates, config.retriever_proj_size)`):\nCandidate score derived from the embedder.", "source": "github-repos"}
{"code": "def get_package_from_string(txt, paths=None):\n    o = VersionedObject(txt)\n    return get_package(o.name, o.version, paths=paths)", "docstring": "Get a package given a string.\n\nArgs:\ntxt (str): String such as 'foo', 'bah-1.3'.\npaths (list of str, optional): paths to search for package, defaults\nto `config.packages_path`.\n\nReturns:\n`Package` instance, or None if no package was found.", "source": "codesearchnet"}
{"code": "def _replace_sparse_with_values(value, sparse_list):\n    flat_vals = nest.flatten(value, expand_composites=False)\n    new_vals = []\n    for v in flat_vals:\n        if isinstance(v, sparse_tensor.SparseTensor):\n            sparse_list.append(v)\n            new_vals.append(v.values)\n        else:\n            new_vals.append(v)\n    return nest.pack_sequence_as(value, new_vals, expand_composites=False)", "docstring": "Replace `SparseTensor`s with their values in `value`\n\nEach `SparseTensor` in `value` is replaced by its `values` tensor, and\ncollects all `SparseTensor`s in `sparse_list`.\n\nArgs:\nvalue: A structure of `Tensor`s and `SparseTensor`s\nsparse_list: A list. Output parameter that collects all `SparseTensor`s in\n`value`.\n\nReturns:\n`value` with each SparseTensor replaced by its `.value` attribute.", "source": "github-repos"}
{"code": "def _project_TH2(self, hist: Hist) -> Any:\n        \n        if len(self.projection_axes) != 1:\n            raise ValueError(len(self.projection_axes), \"Invalid number of axes\")\n\n        \n        \n        \n        \n        \n        projection_func_map = {\n            TH1AxisType.x_axis.value: hist.ProjectionX,\n            TH1AxisType.y_axis.value: hist.ProjectionY\n        }\n\n        \n        \n        \n        try:\n            \n            axis_type = self.projection_axes[0].axis_type.value\n        except ValueError:\n            \n            axis_type = self.axis_type  \n\n        projection_func = projection_func_map[axis_type]\n\n        \n        logger.info(f\"Projecting onto axis range {self.projection_axes[0].name} from hist {hist.GetName()}\")\n        projected_hist = projection_func()\n\n        return projected_hist", "docstring": "Perform the actual TH2 -> TH1 projection.\n\nThis projection can only be to 1D.\n\nArgs:\nhist (ROOT.TH2): Histogram from which the projections should be performed.\nReturns:\nROOT.TH1: The projected histogram.", "source": "juraj-google-style"}
{"code": "async def _connect_and_read(self):\n    while (not self._stopped):\n        try:\n            self._connection_attempts += 1\n            async with aiohttp.ClientSession(loop=self._event_loop, timeout=aiohttp.ClientTimeout(total=self.timeout)) as session:\n                self._session = session\n                (url, data) = (await self._retreive_websocket_info())\n                async with session.ws_connect(url, heartbeat=self.ping_interval, ssl=self.ssl, proxy=self.proxy) as websocket:\n                    self._logger.debug('The Websocket connection has been opened.')\n                    self._websocket = websocket\n                    self._dispatch_event(event='open', data=data)\n                    (await self._read_messages())\n        except (client_err.SlackClientNotConnectedError, client_err.SlackApiError) as exception:\n            self._logger.debug(str(exception))\n            self._dispatch_event(event='error', data=exception)\n            if (self.auto_reconnect and (not self._stopped)):\n                (await self._wait_exponentially(exception))\n                continue\n            self._logger.exception('The Websocket encountered an error. Closing the connection...')\n            self._close_websocket()\n            raise", "docstring": "Retreives and connects to Slack's RTM API.\n\nMakes an authenticated call to Slack's RTM API to retrieve\na websocket URL. Then connects to the message server and\nreads event messages as they come in.\n\nIf 'auto_reconnect' is specified we\nretrieve a new url and reconnect any time the connection\nis lost unintentionally or an exception is thrown.\n\nRaises:\nSlackApiError: Unable to retreive RTM URL from Slack.\nwebsockets.exceptions: Errors thrown by the 'websockets' library.", "source": "codesearchnet"}
{"code": "def gather(self, indices, name=None):\n    return self._implementation.gather(indices, name=name)", "docstring": "Return selected values in the TensorArray as a packed `Tensor`.\n\nAll of selected values must have been written and their shapes\nmust all match.\n\nArgs:\nindices: A `1-D` `Tensor` taking values in `[0, max_value)`.  If the\n`TensorArray` is not dynamic, `max_value=size()`.\nname: A name for the operation (optional).\n\nReturns:\nThe tensors in the `TensorArray` selected by `indices`, packed into one\ntensor.", "source": "github-repos"}
{"code": "def get_time_of_day_description(self):\n    seconds_expression = self._expression_parts[0]\n    minute_expression = self._expression_parts[1]\n    hour_expression = self._expression_parts[2]\n    description = StringBuilder()\n    if ((any(((exp in minute_expression) for exp in self._special_characters)) is False) and (any(((exp in hour_expression) for exp in self._special_characters)) is False) and (any(((exp in seconds_expression) for exp in self._special_characters)) is False)):\n        description.append(_('At '))\n        description.append(self.format_time(hour_expression, minute_expression, seconds_expression))\n    elif (('-' in minute_expression) and (',' not in minute_expression) and (any(((exp in hour_expression) for exp in self._special_characters)) is False)):\n        minute_parts = minute_expression.split('-')\n        description.append(_('Every minute between {0} and {1}').format(self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))\n    elif ((',' in hour_expression) and ('-' not in hour_expression) and (any(((exp in minute_expression) for exp in self._special_characters)) is False)):\n        hour_parts = hour_expression.split(',')\n        description.append(_('At'))\n        for (i, hour_part) in enumerate(hour_parts):\n            description.append(' ')\n            description.append(self.format_time(hour_part, minute_expression))\n            if (i < (len(hour_parts) - 2)):\n                description.append(',')\n            if (i == (len(hour_parts) - 2)):\n                description.append(_(' and'))\n    else:\n        seconds_description = self.get_seconds_description()\n        minutes_description = self.get_minutes_description()\n        hours_description = self.get_hours_description()\n        description.append(seconds_description)\n        if description:\n            description.append(', ')\n        description.append(minutes_description)\n        if description:\n            description.append(', ')\n        description.append(hours_description)\n    return str(description)", "docstring": "Generates a description for only the TIMEOFDAY portion of the expression\n\nReturns:\nThe TIMEOFDAY description", "source": "codesearchnet"}
{"code": "def drive_enclosures(self):\n    if (not self.__drive_enclures):\n        self.__drive_enclures = DriveEnclosures(self.__connection)\n    return self.__drive_enclures", "docstring": "Gets the Drive Enclosures API client.\n\nReturns:\nDriveEnclosures:", "source": "codesearchnet"}
{"code": "def _hexdecode(hexstring):\n    \n    \n    \n    \n    \n\n    _checkString(hexstring, description='hexstring')\n\n    if len(hexstring) % 2 != 0:\n        raise ValueError('The input hexstring must be of even length. Given: {!r}'.format(hexstring))\n\n    if sys.version_info[0] > 2:\n        by = bytes(hexstring, 'latin1')\n        try:\n            return str(binascii.unhexlify(by), encoding='latin1')\n        except binascii.Error as err:\n            new_error_message = 'Hexdecode reported an error: {!s}. Input hexstring: {}'.format(err.args[0], hexstring)\n            raise TypeError(new_error_message)\n\n    else:\n        try:\n            return hexstring.decode('hex')\n        except TypeError as err:\n            raise TypeError('Hexdecode reported an error: {}. Input hexstring: {}'.format(err.message, hexstring))", "docstring": "Convert a hex encoded string to a byte string.\n\nFor example '4A' will return 'J', and '04' will return ``'\\\\x04'`` (which has length 1).\n\nArgs:\nhexstring (str): Can be for example 'A3' or 'A3B4'. Must be of even length.\nAllowed characters are '0' to '9', 'a' to 'f' and 'A' to 'F' (not space).\n\nReturns:\nA string of half the length, with characters corresponding to all 0-255 values for each byte.\n\nRaises:\nTypeError, ValueError", "source": "juraj-google-style"}
{"code": "def str_to_mac(mac_string):\n    \n    sp = mac_string.split(':')\n    mac_string = ''.join(sp)\n    return binascii.unhexlify(mac_string)", "docstring": "Convert a readable string to a MAC address\n\nArgs:\nmac_string (str): a readable string (e.g. '01:02:03:04:05:06')\nReturns:\nstr: a MAC address in hex form", "source": "juraj-google-style"}
{"code": "def indicator_arrays(tc_entity_array):\n        \n        type_dict = {}\n        for ea in tc_entity_array:\n            type_dict.setdefault(ea['type'], []).append(ea['value'])\n        return type_dict", "docstring": "Convert TCEntityArray to Indicator Type dictionary.\n\nArgs:\ntc_entity_array (dictionary): The TCEntityArray to convert.\n\nReturns:\n(dictionary): Dictionary containing arrays of indicators for each indicator type.", "source": "juraj-google-style"}
{"code": "def ray_get_and_free(object_ids):\n    \n\n    global _last_free_time\n    global _to_free\n\n    result = ray.get(object_ids)\n    if type(object_ids) is not list:\n        object_ids = [object_ids]\n    _to_free.extend(object_ids)\n\n    \n    now = time.time()\n    if (len(_to_free) > MAX_FREE_QUEUE_SIZE\n            or now - _last_free_time > FREE_DELAY_S):\n        ray.internal.free(_to_free)\n        _to_free = []\n        _last_free_time = now\n\n    return result", "docstring": "Call ray.get and then queue the object ids for deletion.\n\nThis function should be used whenever possible in RLlib, to optimize\nmemory usage. The only exception is when an object_id is shared among\nmultiple readers.\n\nArgs:\nobject_ids (ObjectID|List[ObjectID]): Object ids to fetch and free.\n\nReturns:\nThe result of ray.get(object_ids).", "source": "juraj-google-style"}
{"code": "def _BuildKeyHierarchy(self, subkeys, values):\n    \n    if subkeys:\n      for registry_key in subkeys:\n        name = registry_key.name.upper()\n        if name in self._subkeys:\n          continue\n        self._subkeys[name] = registry_key\n\n        \n        registry_key._key_path = key_paths.JoinKeyPath([\n            self._key_path, registry_key.name])\n\n    if values:\n      for registry_value in values:\n        name = registry_value.name.upper()\n        if name in self._values:\n          continue\n        self._values[name] = registry_value", "docstring": "Builds the Windows Registry key hierarchy.\n\nArgs:\nsubkeys (list[FakeWinRegistryKey]): list of subkeys.\nvalues (list[FakeWinRegistryValue]): list of values.", "source": "juraj-google-style"}
{"code": "def expect_no_raises(message=None, extras=None):\n    \n    try:\n        yield\n    except Exception as e:\n        e_record = records.ExceptionRecord(e)\n        if extras:\n            e_record.extras = extras\n        msg = message or 'Got an unexpected exception'\n        details = '%s: %s' % (msg, e_record.details)\n        logging.exception(details)\n        e_record.details = details\n        recorder.add_error(e_record)", "docstring": "Expects no exception is raised in a context.\n\nIf the expectation is not met, the test is marked as fail after its\nexecution finishes.\n\nA default message is added to the exception `details`.\n\nArgs:\nmessage: string, custom message to add to exception's `details`.\nextras: An optional field for extra information to be included in test\nresult.", "source": "juraj-google-style"}
{"code": "def _compute_attention(self, query, key, value, attention_mask=None, training=None, return_attention_scores=False):\n    if self._flash_attention and return_attention_scores:\n        raise ValueError('Returning attention scores is not supported when flash attention is enabled. Please disable flash attention to access attention scores.')\n    use_dot_product_attention = not (self._dropout > 0.0 or return_attention_scores or len(query.shape) != 4)\n    if use_dot_product_attention:\n        if attention_mask is not None:\n            mask_expansion_axis = -len(self._attention_axes) * 2 - 1\n            len_attention_scores_shape = 4\n            for _ in range(len_attention_scores_shape - len(attention_mask.shape)):\n                attention_mask = ops.expand_dims(attention_mask, axis=mask_expansion_axis)\n            attention_mask = ops.cast(attention_mask, dtype='bool')\n        attention_output = ops.dot_product_attention(query=query, key=key, value=value, bias=None, mask=attention_mask, scale=self._inverse_sqrt_key_dim, is_causal=False, flash_attention=self._flash_attention)\n        return (attention_output, None)\n    query = ops.multiply(query, ops.cast(self._inverse_sqrt_key_dim, query.dtype))\n    attention_scores = ops.einsum(self._dot_product_equation, key, query)\n    attention_scores = self._masked_softmax(attention_scores, attention_mask)\n    if self._dropout > 0.0:\n        final_attn_scores = self._dropout_layer(attention_scores, training=training)\n    else:\n        final_attn_scores = attention_scores\n    attention_output = ops.einsum(self._combine_equation, final_attn_scores, value)\n    return (attention_output, attention_scores)", "docstring": "Applies Dot-product attention with query, key, value tensors.\n\nThis function defines the computation inside `call` with projected\nmulti-head Q, K, V inputs. Users can override this function for\ncustomized attention implementation.\n\nArgs:\nquery: Projected query tensor of shape `(B, T, N, key_dim)`.\nkey: Projected key tensor of shape `(B, S, N, key_dim)`.\nvalue: Projected value tensor of shape `(B, S, N, value_dim)`.\nattention_mask: a boolean mask of shape `(B, T, S)`, that prevents\nattention to certain positions. It is generally not needed if\nthe `query` and `value` (and/or `key`) are masked.\ntraining: Python boolean indicating whether the layer should behave\nin training mode (adding dropout) or in inference mode (doing\nnothing).\n\nReturns:\nattention_output: Multi-headed outputs of attention computation.\nattention_scores: Multi-headed attention weights.", "source": "github-repos"}
{"code": "def install_dependencies(package: str) -> None:\n    subprocess.check_call([sys.executable, '-m', 'pip', 'install', package])", "docstring": "Install Python dependencies\n\nArgs:\npackage (string): The package to install", "source": "github-repos"}
{"code": "def message_upperbound(self, tree, spins, subtheta):\n        \n        energy_sources = set()\n        for v, subtree in tree.items():\n\n            assert all(u in spins for u in self._ancestors[v])\n\n            \n            \n            \n            def energy_contributions():\n                yield subtheta.linear[v]\n\n                for u, bias in subtheta.adj[v].items():\n                    if u in spins:\n                        yield Times(limitReal(spins[u]), bias)\n\n            energy = Plus(energy_contributions())\n\n            \n            \n            if subtree:\n                spins[v] = 1.\n                plus = self.message_upperbound(subtree, spins, subtheta)\n                spins[v] = -1.\n                minus = self.message_upperbound(subtree, spins, subtheta)\n                del spins[v]\n            else:\n                plus = minus = limitReal(0.0)\n\n            \n            m = FreshSymbol(REAL)\n\n            self.assertions.update({LE(m, Plus(energy, plus)),\n                                    LE(m, Plus(Times(energy, limitReal(-1.)), minus))})\n\n            energy_sources.add(m)\n\n        return Plus(energy_sources)", "docstring": "Determine an upper bound on the energy of the elimination tree.\n\nArgs:\ntree (dict): The current elimination tree\nspins (dict): The current fixed spins\nsubtheta (dict): Theta with spins fixed.\n\nReturns:\nThe formula for the energy of the tree.", "source": "juraj-google-style"}
{"code": "def intersection(self, other):\n    if (not hasattr(other, '__iter__')):\n        other = [other]\n    bounds = self.bounds\n    for range in other:\n        bounds = self._intersection(bounds, range.bounds)\n        if (not bounds):\n            return None\n    range = VersionRange(None)\n    range.bounds = bounds\n    return range", "docstring": "AND together version ranges.\n\nCalculates the intersection of this range with one or more other ranges.\n\nArgs:\nother: VersionRange object (or list of) to AND with.\n\nReturns:\nNew VersionRange object representing the intersection, or None if\nno ranges intersect.", "source": "codesearchnet"}
{"code": "def results(self, use_cache=True, dialect=None, billing_tier=None):\n    return self._materialization.results(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier)", "docstring": "Materialize the view synchronously.\n\nIf you require more control over the execution, use execute() or execute_async().\n\nArgs:\nuse_cache: whether to use cached results or not.\ndialect : {'legacy', 'standard'}, default 'legacy'\n'legacy' : Use BigQuery's legacy SQL dialect.\n'standard' : Use BigQuery's standard SQL (beta), which is\ncompliant with the SQL 2011 standard.\nbilling_tier: Limits the billing tier for this job. Queries that have resource\nusage beyond this tier will fail (without incurring a charge). If unspecified, this\nwill be set to your project default. This can also be used to override your\nproject-wide default billing tier on a per-query basis.\nReturns:\nA QueryResultsTable containing the result set.\nRaises:\nException if the query could not be executed or query response was malformed.", "source": "codesearchnet"}
{"code": "def write(self, file_name):\n        \n        try:\n            assert file_name[-6:] == '.xhtml'\n        except (AssertionError, IndexError):\n            raise ValueError('filename must end with .xhtml')\n        with open(file_name, 'wb') as f:\n            f.write(self.content.encode('utf-8'))", "docstring": "Writes the chapter object to an xhtml file.\n\nArgs:\nfile_name (str): The full name of the xhtml file to save to.", "source": "juraj-google-style"}
{"code": "def register_write(self, reg_index, value):\n    res = self._dll.JLINKARM_WriteReg(reg_index, value)\n    if (res != 0):\n        raise errors.JLinkException(('Error writing to register %d' % reg_index))\n    return value", "docstring": "Writes into an ARM register.\n\nNote:\nThe data is not immediately written, but is cached before being\ntransferred to the CPU on CPU start.\n\nArgs:\nself (JLink): the ``JLink`` instance\nreg_index (int): the ARM register to write to\nvalue (int): the value to write to the register\n\nReturns:\nThe value written to the ARM register.\n\nRaises:\nJLinkException: on write error.", "source": "codesearchnet"}
{"code": "def scatter_div(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n    return self._lazy_read(gen_resource_variable_ops.resource_scatter_div(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))", "docstring": "Divide this variable by `tf.IndexedSlices`.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to divide this variable by.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nThe updated variable.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"}
{"code": "def load_database(adapter, variant_file=None, sv_file=None, family_file=None, family_type='ped', skip_case_id=False, gq_treshold=None, case_id=None, max_window=3000, profile_file=None, hard_threshold=0.95, soft_threshold=0.9):\n    vcf_files = []\n    nr_variants = None\n    vcf_individuals = None\n    if variant_file:\n        vcf_info = check_vcf(variant_file)\n        nr_variants = vcf_info['nr_variants']\n        variant_type = vcf_info['variant_type']\n        vcf_files.append(variant_file)\n        vcf_individuals = vcf_info['individuals']\n    nr_sv_variants = None\n    sv_individuals = None\n    if sv_file:\n        vcf_info = check_vcf(sv_file, 'sv')\n        nr_sv_variants = vcf_info['nr_variants']\n        vcf_files.append(sv_file)\n        sv_individuals = vcf_info['individuals']\n    profiles = None\n    matches = None\n    if profile_file:\n        profiles = get_profiles(adapter, profile_file)\n        matches = profile_match(adapter, profiles, hard_threshold=hard_threshold, soft_threshold=soft_threshold)\n    for _vcf_file in vcf_files:\n        vcf = get_vcf(_vcf_file)\n        if gq_treshold:\n            if (not vcf.contains('GQ')):\n                LOG.warning('Set gq-treshold to 0 or add info to vcf {0}'.format(_vcf_file))\n                raise SyntaxError('GQ is not defined in vcf header')\n    family = None\n    family_id = None\n    if family_file:\n        LOG.info('Loading family from %s', family_file)\n        with open(family_file, 'r') as family_lines:\n            family = get_case(family_lines=family_lines, family_type=family_type)\n            family_id = family.family_id\n    case_id = (case_id or family_id)\n    case_obj = build_case(case=family, case_id=case_id, vcf_path=variant_file, vcf_individuals=vcf_individuals, nr_variants=nr_variants, vcf_sv_path=sv_file, sv_individuals=sv_individuals, nr_sv_variants=nr_sv_variants, profiles=profiles, matches=matches, profile_path=profile_file)\n    load_case(adapter=adapter, case_obj=case_obj)\n    nr_inserted = 0\n    for file_type in ['vcf_path', 'vcf_sv_path']:\n        variant_type = 'snv'\n        if (file_type == 'vcf_sv_path'):\n            variant_type = 'sv'\n        if (case_obj.get(file_type) is None):\n            continue\n        vcf_obj = get_vcf(case_obj[file_type])\n        try:\n            nr_inserted += load_variants(adapter=adapter, vcf_obj=vcf_obj, case_obj=case_obj, skip_case_id=skip_case_id, gq_treshold=gq_treshold, max_window=max_window, variant_type=variant_type)\n        except Exception as err:\n            LOG.warning(err)\n            delete(adapter=adapter, case_obj=case_obj)\n            raise err\n    return nr_inserted", "docstring": "Load the database with a case and its variants\n\nArgs:\nadapter: Connection to database\nvariant_file(str): Path to variant file\nsv_file(str): Path to sv variant file\nfamily_file(str): Path to family file\nfamily_type(str): Format of family file\nskip_case_id(bool): If no case information should be added to variants\ngq_treshold(int): If only quality variants should be considered\ncase_id(str): If different case id than the one in family file should be used\nmax_window(int): Specify the max size for sv windows\ncheck_profile(bool): Does profile check if True\nhard_threshold(float): Rejects load if hamming distance above this is found\nsoft_threshold(float): Stores similar samples if hamming distance above this is found\n\nReturns:\nnr_inserted(int)", "source": "codesearchnet"}
{"code": "def multi_interpolation_basis(n_objectives=6, n_interp_steps=5, width=128, channels=3):\n    (N, M, W, Ch) = (n_objectives, n_interp_steps, width, channels)\n    const_term = sum([lowres_tensor([W, W, Ch], [(W \n    const_term = tf.reshape(const_term, [1, 1, 1, W, W, Ch])\n    example_interps = [sum([lowres_tensor([M, W, W, Ch], [2, (W \n    example_basis = []\n    for n in range(N):\n        col = []\n        for m in range(N):\n            interp = (example_interps[n] + example_interps[m][::(- 1)])\n            col.append(interp)\n        example_basis.append(col)\n    interp_basis = []\n    for n in range(N):\n        col = [interp_basis[m][(N - n)][::(- 1)] for m in range(n)]\n        col.append(tf.zeros([M, W, W, 3]))\n        for m in range((n + 1), N):\n            interp = sum([lowres_tensor([M, W, W, Ch], [M, (W \n            col.append(interp)\n        interp_basis.append(col)\n    basis = []\n    for n in range(N):\n        col_ex = tf.stack(example_basis[n])\n        col_in = tf.stack(interp_basis[n])\n        basis.append((col_ex + col_in))\n    basis = tf.stack(basis)\n    return (basis + const_term)", "docstring": "A paramaterization for interpolating between each pair of N objectives.\n\nSometimes you want to interpolate between optimizing a bunch of objectives,\nin a paramaterization that encourages images to align.\n\nArgs:\nn_objectives: number of objectives you want interpolate between\nn_interp_steps: number of interpolation steps\nwidth: width of intepolated images\nchannel\n\nReturns:\nA [n_objectives, n_objectives, n_interp_steps, width, width, channel]\nshaped tensor, t, where the final [width, width, channel] should be\nseen as images, such that the following properties hold:\n\nt[a, b]    = t[b, a, ::-1]\nt[a, i, 0] = t[a, j, 0] for all i, j\nt[a, a, i] = t[a, a, j] for all i, j\nt[a, b, i] = t[b, a, -i] for all i", "source": "codesearchnet"}
{"code": "def sort(self, cmp=None, key=None, reverse=False):\n        \n\n        def _DefaultKey(value):\n            \n            result = []\n            for key in self.header:\n                \n                try:\n                    result.append(float(value[key]))\n                except ValueError:\n                    result.append(value[key])\n            return result\n\n        key = key or _DefaultKey\n        \n        new_table = self._table[1:]\n\n        if cmp is not None:\n            key = cmp_to_key(cmp)\n\n        new_table.sort(key=key, reverse=reverse)\n\n        \n        self._table = [self.header]\n        self._table.extend(new_table)\n        \n        for index, row in enumerate(self._table):\n            row.row = index", "docstring": "Sorts rows in the texttable.\n\nArgs:\ncmp: func, non default sort algorithm to use.\nkey: func, applied to each element before sorting.\nreverse: bool, reverse order of sort.", "source": "juraj-google-style"}
{"code": "def reduce_by_device(parallelism, data, reduce_fn):\n    unique_devices = []\n    device_to_data = {}\n    for (dev, datum) in zip(parallelism.devices, data):\n        if (dev not in device_to_data):\n            unique_devices.append(dev)\n            device_to_data[dev] = [datum]\n        else:\n            device_to_data[dev].append(datum)\n    device_parallelism = Parallelism(unique_devices)\n    grouped_data = [device_to_data[dev] for dev in unique_devices]\n    return (device_parallelism, device_parallelism(reduce_fn, grouped_data))", "docstring": "Reduces data per device.\n\nThis can be useful, for example, if we want to all-reduce n tensors on k<n\ndevices (like during eval when we have only one device).  We call\nreduce_by_device() to first sum the tensors per device, then call our usual\nall-reduce operation to create one sum per device, followed by\nexpand_by_device, to create the appropriate number of pointers to these\nresults.  See all_reduce_ring() below for an example of how this is used.\n\nArgs:\nparallelism: a expert_utils.Parallelism object\ndata: a list of Tensors with length parallelism.n\nreduce_fn: a function taking a list of Tensors.  e.g. tf.add_n\n\nReturns:\ndevice_parallelism: a Parallelism object with each device listed only once.\nreduced_data: A list of Tensors, one per device.", "source": "codesearchnet"}
{"code": "def record_corrected_value(self, value, expected_interval, count=1):\n    while True:\n        if (not self.record_value(value, count)):\n            return False\n        if ((value <= expected_interval) or (expected_interval <= 0)):\n            return True\n        value -= expected_interval", "docstring": "Record a new value into the histogram and correct for\ncoordinated omission if needed\n\nArgs:\nvalue: the value to record (must be in the valid range)\nexpected_interval: the expected interval between 2 value samples\ncount: incremental count (defaults to 1)", "source": "codesearchnet"}
{"code": "def update_endpoint(self, endpoint_name, endpoint_config_name):\n    if (not _deployment_entity_exists((lambda : self.sagemaker_client.describe_endpoint(EndpointName=endpoint_name)))):\n        raise ValueError('Endpoint with name \"{}\" does not exist; please use an existing endpoint name'.format(endpoint_name))\n    self.sagemaker_client.update_endpoint(EndpointName=endpoint_name, EndpointConfigName=endpoint_config_name)\n    return endpoint_name", "docstring": "Update an Amazon SageMaker ``Endpoint`` according to the endpoint configuration specified in the request\n\nRaise an error if endpoint with endpoint_name does not exist.\n\nArgs:\nendpoint_name (str): Name of the Amazon SageMaker ``Endpoint`` to update.\nendpoint_config_name (str): Name of the Amazon SageMaker endpoint configuration to deploy.\n\nReturns:\nstr: Name of the Amazon SageMaker ``Endpoint`` being updated.", "source": "codesearchnet"}
{"code": "def plot(self):\n    plt.rcParams['xtick.major.pad'] = '6'\n    plt.rcParams['ytick.major.pad'] = '6'\n    plt.rcParams['axes.linewidth'] = 2\n    npoint = 1000\n    xs = np.linspace(0, 1, npoint)\n    xs_reverse_converted = InterfacialReactivity._reverse_convert(xs, self.factor1, self.factor2)\n    energies = [self._get_energy(x) for x in xs_reverse_converted]\n    plt.plot(xs, energies, 'k-')\n    kinks = self.get_kinks()\n    (_, x_kink, energy_kink, _, _) = zip(*kinks)\n    plt.scatter(x_kink, energy_kink, marker='o', c='blue', s=20)\n    plt.scatter(self.minimum()[0], self.minimum()[1], marker='*', c='red', s=300)\n    for (index, x, energy, _, _) in kinks:\n        plt.annotate(index, xy=(x, energy), xytext=(5, 30), textcoords='offset points', ha='right', va='bottom', arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')).draggable()\n    plt.xlim([(- 0.05), 1.05])\n    if self.norm:\n        plt.ylabel('Energy (eV/atom)')\n    else:\n        plt.ylabel('Energy (eV/f.u.)')\n    plt.xlabel('$x$ in $x$ {} + $(1-x)$ {}'.format(self.c1.reduced_formula, self.c2.reduced_formula))\n    return plt", "docstring": "Plots reaction energy as a function of mixing ratio x in\nself.c1 - self.c2 tie line using pylab.\n\nReturns:\nPylab object that plots reaction energy as a function of\nmixing ratio x.", "source": "codesearchnet"}
{"code": "def run_without_time_limit(self, cmd):\n    cmd = ([DOCKER_BINARY, 'run', DOCKER_NVIDIA_RUNTIME] + cmd)\n    logging.info('Docker command: %s', ' '.join(cmd))\n    start_time = time.time()\n    retval = subprocess.call(cmd)\n    elapsed_time_sec = int((time.time() - start_time))\n    logging.info('Elapsed time of attack: %d', elapsed_time_sec)\n    logging.info('Docker retval: %d', retval)\n    if (retval != 0):\n        logging.warning('Docker returned non-zero retval: %d', retval)\n        raise WorkerError(('Docker returned non-zero retval ' + str(retval)))\n    return elapsed_time_sec", "docstring": "Runs docker command without time limit.\n\nArgs:\ncmd: list with the command line arguments which are passed to docker\nbinary\n\nReturns:\nhow long it took to run submission in seconds\n\nRaises:\nWorkerError: if error occurred during execution of the submission", "source": "codesearchnet"}
{"code": "def matrix(self) -> np.ndarray:\n    num_qubits = self.num_qubits()\n    if (num_qubits is None):\n        raise ValueError('Unknown number of qubits')\n    num_dim = (2 ** num_qubits)\n    result = np.zeros((num_dim, num_dim), dtype=np.complex128)\n    for (gate, coefficient) in self.items():\n        result += (protocols.unitary(gate) * coefficient)\n    return result", "docstring": "Reconstructs matrix of self using unitaries of underlying gates.\n\nRaises:\nTypeError: if any of the gates in self does not provide a unitary.", "source": "codesearchnet"}
{"code": "def quality(self, tests, alias=None):\n    this_tests = ((((tests.get('each', []) + tests.get('Each', [])) + tests.get('EACH', [])) + tests.get(self.mnemonic, [])) + utils.flatten_list([tests.get(a) for a in self.get_alias(alias=alias)]))\n    this_tests = filter(None, this_tests)\n    if (not tests.get(self.mnemonic, 1)):\n        this_tests = []\n    return {test.__name__: test(self) for test in this_tests}", "docstring": "Run a series of tests and return the corresponding results.\n\nArgs:\ntests (list): a list of functions.\nalias (dict): a dictionary mapping mnemonics to lists of mnemonics.\n\nReturns:\nlist. The results. Stick to booleans (True = pass) or ints.", "source": "codesearchnet"}
{"code": "def update(self, iterable):\n        \n        for pair in pairwise_longest(iterable, fillvalue=_FILL):\n            self._edges.append(pair)\n            self._results = None", "docstring": "Update with an ordered iterable of items.\n\nArgs:\niterable: An ordered iterable of items. The relative\norder of the items in this iterable will be respected\nin the TopoSet (in the absence of cycles).", "source": "juraj-google-style"}
{"code": "def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):\n    vision_data = {}\n    if image_sizes is not None:\n        images_kwargs = AyaVisionProcessorKwargs._defaults.get('images_kwargs', {})\n        images_kwargs.update(kwargs)\n        num_image_patches = [self.image_processor.get_number_of_image_patches(*image_size, images_kwargs) for image_size in image_sizes]\n        token_per_patch = (self.img_size \n        num_image_tokens = [token_per_patch + 3 + sum((token_per_patch + 1 for _ in range(1, num_patches))) for num_patches in num_image_patches]\n        vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})\n    return MultiModalData(**vision_data)", "docstring": "Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.\n\nArgs:\nimage_sizes (`List[List[int]]`, *optional*):\nThe input sizes formatted as (height, width) per each image.\n\nReturns:\n`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided\ninput modalities, along with other useful data.", "source": "github-repos"}
{"code": "def add_archive_as_dir(self, zip_file_obj):\n    BalancedDiscStorage._check_interface(zip_file_obj)\n    file_hash = self._get_hash(zip_file_obj)\n    dir_path = self._create_dir_path(file_hash)\n    full_path = os.path.join(dir_path, file_hash)\n    if os.path.exists(full_path):\n        shutil.rmtree(full_path)\n    os.mkdir(full_path)\n    try:\n        self._unpack_zip(zip_file_obj, full_path)\n    except Exception:\n        shutil.rmtree(full_path)\n        raise\n    return PathAndHash(path=full_path, hash=file_hash)", "docstring": "Add archive to the storage and unpack it.\n\nArgs:\nzip_file_obj (file): Opened file-like object.\n\nReturns:\nobj: Path where the `zip_file_obj` was unpacked wrapped in \\\n:class:`.PathAndHash` structure.\n\nRaises:\nValueError: If there is too many files in .zip archive. \\\nSee :attr:`._max_zipfiles` for details.\nAssertionError: If the `zip_file_obj` is not file-like object.", "source": "codesearchnet"}
{"code": "def generate_session_id(secret_key=settings.secret_key_bytes(), signed=settings.sign_sessions()):\n    secret_key = _ensure_bytes(secret_key)\n    if signed:\n        base_id = _get_random_string(secret_key=secret_key)\n        return ((base_id + '-') + _signature(base_id, secret_key))\n    else:\n        return _get_random_string(secret_key=secret_key)", "docstring": "Generate a random session ID.\n\nTypically, each browser tab connected to a Bokeh application\nhas its own session ID.  In production deployments of a Bokeh\napp, session IDs should be random and unguessable - otherwise\nusers of the app could interfere with one another.\n\nIf session IDs are signed with a secret key, the server can\nverify that the generator of the session ID was \"authorized\"\n(the generator had to know the secret key). This can be used\nto have a separate process, such as another web application,\nwhich generates new sessions on a Bokeh server. This other\nprocess may require users to log in before redirecting them to\nthe Bokeh server with a valid session ID, for example.\n\nArgs:\nsecret_key (str, optional) : Secret key (default: value of 'BOKEH_SECRET_KEY' env var)\nsigned (bool, optional) : Whether to sign the session ID (default: value of\n'BOKEH_SIGN_SESSIONS' env var)", "source": "codesearchnet"}
{"code": "def make_action(self, fn, schema_parser, meta):\n        \n        validate_input = validate_output = None\n        if \"$input\" in meta:\n            with MarkKey(\"$input\"):\n                validate_input = schema_parser.parse(meta[\"$input\"])\n        if \"$output\" in meta:\n            with MarkKey(\"$output\"):\n                validate_output = schema_parser.parse(meta[\"$output\"])\n\n        def action(data):\n            if validate_input:\n                try:\n                    data = validate_input(data)\n                except Invalid as ex:\n                    return abort(400, \"InvalidData\", str(ex))\n                if isinstance(data, dict):\n                    rv = fn(**data)\n                else:\n                    rv = fn(data)\n            else:\n                rv = fn()\n            rv, status, headers = unpack(rv)\n            if validate_output:\n                try:\n                    rv = validate_output(rv)\n                except Invalid as ex:\n                    return abort(500, \"ServerError\", str(ex))\n            return rv, status, headers\n        return action", "docstring": "Make resource's method an action\n\nValidate input, output by schema in meta.\nIf no input schema, call fn without params.\nIf no output schema, will not validate return value.\n\nArgs:\nfn: resource's method\nschema_parser: for parsing schema in meta\nmeta: meta data of the action", "source": "juraj-google-style"}
{"code": "def unauthorized(cls, errors=None):\n    if cls.expose_status:\n        cls.response.content_type = 'application/json'\n        cls.response._status_line = '401 Unauthorized'\n    return cls(401, errors=errors).to_json", "docstring": "Shortcut API for HTTP 401 `Unauthorized` response.\n\nArgs:\nerrors (list): Response key/value data.\n\nReturns:\nWSResponse Instance.", "source": "codesearchnet"}
{"code": "def roc_auc_score(gold, probs, ignore_in_gold=[], ignore_in_pred=[]):\n    \n    gold = arraylike_to_numpy(gold)\n\n    \n    \n    if len(ignore_in_pred) > 0:\n        raise ValueError(\"ignore_in_pred not defined for ROC-AUC score.\")\n    keep = [x not in ignore_in_gold for x in gold]\n    gold = gold[keep]\n    probs = probs[keep, :]\n\n    \n    gold_s = pred_to_prob(torch.from_numpy(gold), k=probs.shape[1]).numpy()\n    return skm.roc_auc_score(gold_s, probs)", "docstring": "Compute the ROC AUC score, given the gold labels and predicted probs.\n\nArgs:\ngold: A 1d array-like of gold labels\nprobs: A 2d array-like of predicted probabilities\nignore_in_gold: A list of labels for which elements having that gold\nlabel will be ignored.\n\nReturns:\nroc_auc_score: The (float) roc_auc score", "source": "juraj-google-style"}
{"code": "def generate_nodes(tpm, cm, network_state, indices, node_labels=None):\n    if (node_labels is None):\n        node_labels = NodeLabels(None, indices)\n    node_state = utils.state_of(indices, network_state)\n    return tuple((Node(tpm, cm, index, state, node_labels) for (index, state) in zip(indices, node_state)))", "docstring": "Generate |Node| objects for a subsystem.\n\nArgs:\ntpm (np.ndarray): The system's TPM\ncm (np.ndarray): The corresponding CM.\nnetwork_state (tuple): The state of the network.\nindices (tuple[int]): Indices to generate nodes for.\n\nKeyword Args:\nnode_labels (|NodeLabels|): Textual labels for each node.\n\nReturns:\ntuple[Node]: The nodes of the system.", "source": "codesearchnet"}
{"code": "def run(self, tag=None, output=None, **kwargs):\n        \n        start = datetime.datetime.now()\n        count = 0\n        if tag:\n            tag = Uri(tag)\n            xml_generator = etree.iterparse(self.source,\n                                            \n                                            tag=tag.etree)\n        else:\n            xml_generator = etree.iterparse(self.source) \n                                            \n        i = 0\n        for event, element in xml_generator:\n            type_tags = element.findall(_RDF_TYPE_TAG)\n            rdf_types = [el.get(_RES_TAG)\n                         for el in type_tags\n                         if el.get(_RES_TAG)]\n            \n            if str(self.filter_val) in rdf_types:\n                pdb.set_trace()\n                \n                \n                \n                \n                count += 1\n            \n            \n            i += 1\n            element.clear()\n        print(\"Found '{}' items in {}\".format(count,\n                (datetime.datetime.now() - start)))", "docstring": "runs the extractor\n\nArgs:\n-----\noutput: ['filepath', None]", "source": "juraj-google-style"}
{"code": "def read_links(self, file, encoding=None):\n    return [item[0] for item in self.iter_text(file, encoding) if item[1]]", "docstring": "Return an iterator of links found in the document.\n\nArgs:\nfile: A file object containing the document.\nencoding (str): The encoding of the document.\n\nReturns:\niterable: str", "source": "codesearchnet"}
{"code": "def phenSpecificEffects(snps, pheno1, pheno2, K=None, covs=None, test='lrt'):\n    N = snps.shape[0]\n    if (K is None):\n        K = SP.eye(N)\n    assert (pheno1.shape[1] == pheno2.shape[1]), 'Only consider equal number of phenotype dimensions'\n    if (covs is None):\n        covs = SP.ones(N, 1)\n    assert ((pheno1.shape[1] == 1) and (pheno2.shape[1] == 1) and (pheno1.shape[0] == N) and (pheno2.shape[0] == N) and (K.shape[0] == N) and (K.shape[1] == N) and (covs.shape[0] == N)), 'shapes missmatch'\n    Inter = SP.zeros(((N * 2), 1))\n    Inter[(0:N, 0)] = 1\n    Inter0 = SP.ones(((N * 2), 1))\n    Yinter = SP.concatenate((pheno1, pheno2), 0)\n    Xinter = SP.tile(snps, (2, 1))\n    Covitner = SP.tile(covs(2, 1))\n    lm = simple_interaction(snps=Xinter, pheno=Yinter, covs=Covinter, Inter=Inter, Inter0=Inter0, test=test)\n    return lm", "docstring": "Univariate fixed effects interaction test for phenotype specific SNP effects\n\nArgs:\nsnps:   [N x S] SP.array of S SNPs for N individuals (test SNPs)\npheno1: [N x 1] SP.array of 1 phenotype for N individuals\npheno2: [N x 1] SP.array of 1 phenotype for N individuals\nK:      [N x N] SP.array of LMM-covariance/kinship koefficients (optional)\nIf not provided, then linear regression analysis is performed\ncovs:   [N x D] SP.array of D covariates for N individuals\ntest:    'lrt' for likelihood ratio test (default) or 'f' for F-test\n\nReturns:\nlimix LMM object", "source": "codesearchnet"}
{"code": "def __init__(self, value):\n        \n        super().__init__(duration=0)\n\n        if abs(value) > 1:\n            raise PulseError(\"Absolute value of PV amplitude exceeds 1.\")\n\n        self._value = complex(value)", "docstring": "create new persistent value command.\n\nArgs:\nvalue (complex): Complex value to apply, bounded by an absolute value of 1.\nThe allowable precision is device specific.\nRaises:\nPulseError: when input value exceed 1.", "source": "juraj-google-style"}
{"code": "def get_usb_serial(self, port_num):\n    \n\n    port = self.port_map[str(port_num)]\n    arg = ''.join(['DEVICE INFO,', self._addr, '.', port])\n    cmd = (['esuit64', '-t', arg])\n    info = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n    serial = None\n    if \"SERIAL\" in info:\n      serial_info = info.split('SERIAL:')[1]\n      serial = serial_info.split('\\n')[0].strip()\n      use_info = info.split('BY')[1].split(' ')[1]\n      if use_info == 'NO':\n        cmd = (['esuit64', '-t', 'AUTO USE ALL'])\n        subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n        time.sleep(50.0/1000.0)\n    else:\n      raise ValueError('No USB device detected')\n    return serial", "docstring": "Get the device serial number\n\nArgs:\nport_num: port number on the Cambrionix unit\n\nReturn:\nusb device serial number", "source": "juraj-google-style"}
{"code": "def create_clusters(provider, context, **kwargs):\n    \n    conn = get_session(provider.region).client('ecs')\n\n    try:\n        clusters = kwargs[\"clusters\"]\n    except KeyError:\n        logger.error(\"setup_clusters hook missing \\\"clusters\\\" argument\")\n        return False\n\n    if isinstance(clusters, basestring):\n        clusters = [clusters]\n\n    cluster_info = {}\n    for cluster in clusters:\n        logger.debug(\"Creating ECS cluster: %s\", cluster)\n        r = conn.create_cluster(clusterName=cluster)\n        cluster_info[r[\"cluster\"][\"clusterName\"]] = r\n    return {\"clusters\": cluster_info}", "docstring": "Creates ECS clusters.\n\nExpects a \"clusters\" argument, which should contain a list of cluster\nnames to create.\n\nArgs:\nprovider (:class:`stacker.providers.base.BaseProvider`): provider\ninstance\ncontext (:class:`stacker.context.Context`): context instance\n\nReturns: boolean for whether or not the hook succeeded.", "source": "juraj-google-style"}
{"code": "def __init__(self, base: ModelHandler[ExampleT, PredictionT, ModelT]):\n    self._base = base\n    self._env_vars = getattr(base, '_env_vars', {})", "docstring": "A ModelHandler that skips batching in RunInference.\n\nArgs:\nbase: An implementation of the underlying model handler.", "source": "github-repos"}
{"code": "def individual(self, ind_id=None):\n        \n        for ind_obj in self.individuals:\n            if ind_obj.ind_id == ind_id:\n                return ind_obj\n        return None", "docstring": "Return a individual object\n\nArgs:\nind_id (str): A individual id\n\nReturns:\nindividual (puzzle.models.individual)", "source": "juraj-google-style"}
{"code": "def encode_chunk(dataframe):\n    csv_buffer = six.StringIO()\n    dataframe.to_csv(csv_buffer, index=False, header=False, encoding='utf-8', float_format='%.15g', date_format='%Y-%m-%d %H:%M:%S.%f')\n    body = csv_buffer.getvalue()\n    if isinstance(body, bytes):\n        body = body.decode('utf-8')\n    body = body.encode('utf-8')\n    return six.BytesIO(body)", "docstring": "Return a file-like object of CSV-encoded rows.\n\nArgs:\ndataframe (pandas.DataFrame): A chunk of a dataframe to encode", "source": "codesearchnet"}
{"code": "def get_excitation_spectrum(self, width=0.1, npoints=2000):\n        \n        roots = self.parse_tddft()\n        data = roots[\"singlet\"]\n        en = np.array([d[\"energy\"] for d in data])\n        osc = np.array([d[\"osc_strength\"] for d in data])\n\n        epad = 20.0 * width\n        emin = en[0] - epad\n        emax = en[-1] + epad\n        de = (emax - emin) / npoints\n\n        \n        if width < 2 * de:\n            width = 2 * de\n\n        energies = [emin + ie * de for ie in range(npoints)]\n\n        cutoff = 20.0 * width\n        gamma = 0.5 * width\n        gamma_sqrd = gamma * gamma\n\n        de = (energies[-1] - energies[0]) / (len(energies) - 1)\n        prefac = gamma / np.pi * de\n\n        x = []\n        y = []\n        for energy in energies:\n            xx0 = energy - en\n            stot = osc / (xx0 * xx0 + gamma_sqrd)\n            t = np.sum(stot[np.abs(xx0) <= cutoff])\n            x.append(energy)\n            y.append(t * prefac)\n        return ExcitationSpectrum(x, y)", "docstring": "Generate an excitation spectra from the singlet roots of TDDFT\ncalculations.\n\nArgs:\nwidth (float): Width for Gaussian smearing.\nnpoints (int): Number of energy points. More points => smoother\ncurve.\n\nReturns:\n(ExcitationSpectrum) which can be plotted using\npymatgen.vis.plotters.SpectrumPlotter.", "source": "juraj-google-style"}
{"code": "def get_md5sum(fname, chunk_size=1024):\n    \n\n    def iter_chunks(f):\n        while True:\n            chunk = f.read(chunk_size)\n            if not chunk:\n                break\n            yield chunk\n\n    sig = hashlib.md5()\n\n    with open(fname, 'rb') as f:\n        for chunk in iter_chunks(f):\n            sig.update(chunk)\n\n        \n        \n\n    return sig.hexdigest()", "docstring": "Returns the MD5 checksum of a file.\n\nArgs:\nfname (str): Filename\nchunk_size (Optional[int]): Size (in Bytes) of the chunks that should be\nread in at once. Increasing chunk size reduces the number of reads\nrequired, but increases the memory usage. Defaults to 1024.\n\nReturns:\nThe MD5 checksum of the file, which is a string.", "source": "juraj-google-style"}
{"code": "def label(self, label, action='ADD', params=None):\n        \n\n        if params is None:\n            params = {}\n\n        if not label:\n            self._tcex.handle_error(925, ['label', 'Security Label', 'label', 'label', label])\n\n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        if action == 'GET':\n            return self.tc_requests.get_label(\n                self.api_type,\n                self.api_sub_type,\n                self.unique_id,\n                label,\n                owner=self.owner,\n                params=params,\n            )\n\n        if action == 'ADD':\n            return self.tc_requests.add_label(\n                self.api_type, self.api_sub_type, self.unique_id, label, owner=self.owner\n            )\n\n        if action == 'DELETE':\n            return self.tc_requests.delete_label(\n                self.api_type, self.api_sub_type, self.unique_id, label, owner=self.owner\n            )\n\n        self._tcex.handle_error(925, ['action', 'label', 'action', 'action', action])\n        return None", "docstring": "Adds a Security Label to a Indicator/Group or Victim\nArgs:\nparams:\nlabel: The name of the Security Label\naction:", "source": "juraj-google-style"}
{"code": "def load_library(library_location):\n    if os.path.exists(library_location):\n        if os.path.isdir(library_location):\n            directory_contents = os.listdir(library_location)\n            kernel_libraries = [os.path.join(library_location, f) for f in directory_contents if _is_shared_object(f)]\n        else:\n            kernel_libraries = [library_location]\n        for lib in kernel_libraries:\n            py_tf.TF_LoadLibrary(lib)\n    else:\n        raise OSError(errno.ENOENT, 'The file or folder to load kernel libraries from does not exist.', library_location)", "docstring": "Loads a TensorFlow plugin.\n\n\"library_location\" can be a path to a specific shared object, or a folder.\nIf it is a folder, all shared objects that are named \"libtfkernel*\" will be\nloaded. When the library is loaded, kernels registered in the library via the\n`REGISTER_*` macros are made available in the TensorFlow process.\n\nArgs:\nlibrary_location: Path to the plugin or the folder of plugins.\nRelative or absolute filesystem path to a dynamic library file or folder.\n\nReturns:\nNone\n\nRaises:\nOSError: When the file to be loaded is not found.\nRuntimeError: when unable to load the library.", "source": "github-repos"}
{"code": "def catchup_subscriber(self, connection_id):\n    with self._subscribers_cv:\n        subscriber = self._subscribers[connection_id]\n        last_known_block_id = subscriber.get_last_known_block_id()\n        subscriptions = subscriber.subscriptions\n    if (last_known_block_id is not None):\n        LOGGER.debug('Catching up Subscriber %s from %s', connection_id, last_known_block_id)\n        for block_id in self.get_catchup_block_ids(last_known_block_id):\n            events = self.get_events_for_block_id(block_id, subscriptions)\n            event_list = EventList(events=events)\n            self._send(connection_id, event_list.SerializeToString())", "docstring": "Send an event list with all events that are in the given\nsubscriptions from all blocks since that latest block in the current\nchain that is in the given last known block ids.\n\nRaises:\nPossibleForkDetectedError\nA possible fork was detected while building the event list\nNoKnownBlockError\nNone of the last known blocks were in the current chain\nKeyError\nUnknown connection_id", "source": "codesearchnet"}
{"code": "def dag_to_circuit(dag):\n    qregs = collections.OrderedDict()\n    for qreg in dag.qregs.values():\n        qreg_tmp = QuantumRegister(qreg.size, name=qreg.name)\n        qregs[qreg.name] = qreg_tmp\n    cregs = collections.OrderedDict()\n    for creg in dag.cregs.values():\n        creg_tmp = ClassicalRegister(creg.size, name=creg.name)\n        cregs[creg.name] = creg_tmp\n    name = (dag.name or None)\n    circuit = QuantumCircuit(*qregs.values(), *cregs.values(), name=name)\n    for node in dag.topological_op_nodes():\n        qubits = []\n        for qubit in node.qargs:\n            qubits.append(qregs[qubit[0].name][qubit[1]])\n        clbits = []\n        for clbit in node.cargs:\n            clbits.append(cregs[clbit[0].name][clbit[1]])\n        if (node.condition is None):\n            control = None\n        else:\n            control = (node.condition[0], node.condition[1])\n        inst = node.op.copy()\n        inst.control = control\n        circuit.append(inst, qubits, clbits)\n    return circuit", "docstring": "Build a ``QuantumCircuit`` object from a ``DAGCircuit``.\n\nArgs:\ndag (DAGCircuit): the input dag.\n\nReturn:\nQuantumCircuit: the circuit representing the input dag.", "source": "codesearchnet"}
{"code": "def SetProtocol(self, protocol):\n    \n    if protocol not in self.SUPPORTED_PROTOCOLS:\n      raise ValueError('Unsupported protocol: {0!s}'.format(protocol))\n\n    self._protocol = protocol", "docstring": "Sets the protocol that will be used to query Viper.\n\nArgs:\nprotocol (str): protocol to use to query Viper. Either 'http' or 'https'.\n\nRaises:\nValueError: if the protocol is not supported.", "source": "juraj-google-style"}
{"code": "def member_of(self, group):\n    if isinstance(group, Group):\n        group = group.name\n    return self.groups.filter(name=group).exists()", "docstring": "Returns whether a user is a member of a certain group.\n\nArgs:\ngroup\nThe name of a group (string) or a group object\n\nReturns:\nBoolean", "source": "codesearchnet"}
{"code": "def get_sub_category(alt_len, ref_len, category, svtype=None):\n    subcategory = ''\n    if (category in ('snv', 'indel', 'cancer')):\n        if (ref_len == alt_len):\n            subcategory = 'snv'\n        else:\n            subcategory = 'indel'\n    elif (category == 'sv'):\n        subcategory = svtype\n    return subcategory", "docstring": "Get the subcategory for a VCF variant\n\nThe sub categories are:\n'snv', 'indel', 'del', 'ins', 'dup', 'bnd', 'inv'\n\nArgs:\nalt_len(int)\nref_len(int)\ncategory(str)\nsvtype(str)\n\nReturns:\nsubcategory(str)", "source": "codesearchnet"}
{"code": "def choose_template(self, template):\n        \n        n1 = int(template)/10\n        n2 = int(template)%10\n        self.send('^TS'+'0'+str(n1)+str(n2))", "docstring": "Choose a template\n\nArgs:\ntemplate: String, choose which template you would like.\nReturns:\nNone\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def __init__(self, concentration1=None, concentration0=None, validate_args=False, allow_nan_stats=True, name='Beta'):\n    parameters = dict(locals())\n    with ops.name_scope(name, values=[concentration1, concentration0]) as name:\n        self._concentration1 = self._maybe_assert_valid_concentration(ops.convert_to_tensor(concentration1, name='concentration1'), validate_args)\n        self._concentration0 = self._maybe_assert_valid_concentration(ops.convert_to_tensor(concentration0, name='concentration0'), validate_args)\n        check_ops.assert_same_float_dtype([self._concentration1, self._concentration0])\n        self._total_concentration = self._concentration1 + self._concentration0\n    super(Beta, self).__init__(dtype=self._total_concentration.dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, reparameterization_type=distribution.FULLY_REPARAMETERIZED, parameters=parameters, graph_parents=[self._concentration1, self._concentration0, self._total_concentration], name=name)", "docstring": "Initialize a batch of Beta distributions.\n\nArgs:\nconcentration1: Positive floating-point `Tensor` indicating mean\nnumber of successes; aka \"alpha\". Implies `self.dtype` and\n`self.batch_shape`, i.e.,\n`concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`.\nconcentration0: Positive floating-point `Tensor` indicating mean\nnumber of failures; aka \"beta\". Otherwise has same semantics as\n`concentration1`.\nvalidate_args: Python `bool`, default `False`. When `True` distribution\nparameters are checked for validity despite possibly degrading runtime\nperformance. When `False` invalid inputs may silently render incorrect\noutputs.\nallow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n(e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\nresult is undefined. When `False`, an exception is raised if one or\nmore of the statistic's batch members are undefined.\nname: Python `str` name prefixed to Ops created by this class.", "source": "github-repos"}
{"code": "def list_class_funcnames(fname, blank_pats=['    \n    \n    with open(fname, 'r') as file_:\n        lines = file_.readlines()\n    funcname_list = []\n\n    \n    for lx, line in enumerate(lines):\n        \n        if any([line.startswith(pat) for pat in blank_pats]):\n            funcname_list.append('')\n        if line.startswith('    def '):\n            def_x    = line.find('def')\n            rparen_x = line.find('(')\n            funcname = line[(def_x + 3):rparen_x]\n            \n            funcname_list.append(funcname)\n    return funcname_list", "docstring": "list_class_funcnames\n\nArgs:\nfname (str): filepath\nblank_pats (list): defaults to '    #'\n\nReturns:\nlist: funcname_list\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_inspect import *  # NOQA\n>>> fname = 'util_class.py'\n>>> blank_pats = ['    #']\n>>> funcname_list = list_class_funcnames(fname, blank_pats)\n>>> print(funcname_list)", "source": "juraj-google-style"}
{"code": "def convert_labels(Y, source, dest):\n    if (Y is None):\n        return Y\n    if isinstance(Y, np.ndarray):\n        Y = Y.copy()\n        assert isinstance(Y, int)\n    elif isinstance(Y, torch.Tensor):\n        Y = Y.clone()\n        assert (np.sum((Y.numpy() - Y.numpy().astype(int))) == 0.0)\n    else:\n        raise ValueError('Unrecognized label data type.')\n    negative_map = {'categorical': 2, 'plusminus': (- 1), 'onezero': 0}\n    Y[(Y == negative_map[source])] = negative_map[dest]\n    return Y", "docstring": "Convert a matrix from one label type to another\n\nArgs:\nY: A np.ndarray or torch.Tensor of labels (ints)\nsource: The convention the labels are currently expressed in\ndest: The convention to convert the labels to\n\nConventions:\n'categorical': [0: abstain, 1: positive, 2: negative]\n'plusminus': [0: abstain, 1: positive, -1: negative]\n'onezero': [0: negative, 1: positive]\n\nNote that converting to 'onezero' will combine abstain and negative labels.", "source": "codesearchnet"}
{"code": "def success(channel, title, datapacks):\n    gui = ui_embed.UI(channel, title, '', modulename=modulename, datapacks=datapacks)\n    return gui", "docstring": "Creates an embed UI containing the help message\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\ntitle (str): The title of the embed\ndatapacks (list): The hex value\n\nReturns:\nui (ui_embed.UI): The embed UI object", "source": "codesearchnet"}
{"code": "def find_in_coord_list_pbc(fcoord_list, fcoord, atol=1e-8):\n    \n    if len(fcoord_list) == 0:\n        return []\n    fcoords = np.tile(fcoord, (len(fcoord_list), 1))\n    fdist = fcoord_list - fcoords\n    fdist -= np.round(fdist)\n    return np.where(np.all(np.abs(fdist) < atol, axis=1))[0]", "docstring": "Get the indices of all points in a fractional coord list that are\nequal to a fractional coord (with a tolerance), taking into account\nperiodic boundary conditions.\n\nArgs:\nfcoord_list: List of fractional coords\nfcoord: A specific fractional coord to test.\natol: Absolute tolerance. Defaults to 1e-8.\n\nReturns:\nIndices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.", "source": "juraj-google-style"}
{"code": "def extract_subject_info_extension(cert_obj):\n    try:\n        subject_info_der = cert_obj.extensions.get_extension_for_oid(cryptography.x509.oid.ObjectIdentifier(DATAONE_SUBJECT_INFO_OID)).value.value\n        return str(pyasn1.codec.der.decoder.decode(subject_info_der)[0])\n    except Exception as e:\n        logging.debug('SubjectInfo not extracted. reason=\"{}\"'.format(e))", "docstring": "Extract DataONE SubjectInfo XML doc from certificate.\n\nCertificates issued by DataONE may include an embedded XML doc containing\nadditional information about the subject specified in the certificate DN. If\npresent, the doc is stored as an extension with an OID specified by DataONE and\nformatted as specified in the DataONE SubjectInfo schema definition.\n\nArgs:\ncert_obj: cryptography.Certificate\n\nReturns:\nstr : SubjectInfo XML doc if present, else None", "source": "codesearchnet"}
{"code": "def draw_on(self, canvas, stem_color, leaf_color, thickness, ages=None):\n    if (canvas.__module__ in SUPPORTED_CANVAS):\n        drawer = SUPPORTED_CANVAS[canvas.__module__]\n        drawer(self, canvas, stem_color, leaf_color, thickness, ages).draw()", "docstring": "Draw the tree on a canvas.\n\nArgs:\ncanvas (object): The canvas, you want to draw the tree on. Supported canvases: svgwrite.Drawing and PIL.Image (You can also add your custom libraries.)\nstem_color (tupel): Color or gradient for the stem of the tree.\nleaf_color (tupel): Color for the leaf (= the color for last iteration).\nthickness (int): The start thickness of the tree.", "source": "codesearchnet"}
{"code": "def Register(self, app_id, challenge, registered_keys):\n    client_data = model.ClientData(model.ClientData.TYP_REGISTRATION, challenge, self.origin)\n    challenge_param = self.InternalSHA256(client_data.GetJson())\n    app_param = self.InternalSHA256(app_id)\n    for key in registered_keys:\n        try:\n            if (key.version != u'U2F_V2'):\n                continue\n            resp = self.security_key.CmdAuthenticate(challenge_param, app_param, key.key_handle, True)\n            raise errors.HardwareError('Should Never Happen')\n        except errors.TUPRequiredError:\n            raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE)\n        except errors.InvalidKeyHandleError as e:\n            pass\n        except errors.HardwareError as e:\n            raise errors.U2FError(errors.U2FError.BAD_REQUEST, e)\n    for _ in range(30):\n        try:\n            resp = self.security_key.CmdRegister(challenge_param, app_param)\n            return model.RegisterResponse(resp, client_data)\n        except errors.TUPRequiredError as e:\n            self.security_key.CmdWink()\n            time.sleep(0.5)\n        except errors.HardwareError as e:\n            raise errors.U2FError(errors.U2FError.BAD_REQUEST, e)\n    raise errors.U2FError(errors.U2FError.TIMEOUT)", "docstring": "Registers app_id with the security key.\n\nExecutes the U2F registration flow with the security key.\n\nArgs:\napp_id: The app_id to register the security key against.\nchallenge: Server challenge passed to the security key.\nregistered_keys: List of keys already registered for this app_id+user.\n\nReturns:\nRegisterResponse with key_handle and attestation information in it (\nencoded in FIDO U2F binary format within registration_data field).\n\nRaises:\nU2FError: There was some kind of problem with registration (e.g.\nthe device was already registered or there was a timeout waiting\nfor the test of user presence).", "source": "codesearchnet"}
{"code": "def _inverse_log_det_jacobian(self, y):\n    raise NotImplementedError('inverse_log_det_jacobian not implemented.')", "docstring": "Subclass implementation of `inverse_log_det_jacobian` public function.\n\nIn particular, this method differs from the public function, in that it\ndoes not take `event_ndims`. Thus, this implements the minimal Jacobian\ndeterminant calculation (i.e. over `inverse_min_event_ndims`).\n\nArgs:\ny: `Tensor`. The input to the \"inverse_log_det_jacobian\" evaluation.\nReturns:\ninverse_log_det_jacobian: `Tensor`, if this bijector is injective.\nIf not injective, returns the k-tuple containing jacobians for the\nunique `k` points `(x1, ..., xk)` such that `g(xi) = y`.", "source": "github-repos"}
{"code": "def run(app: web.Application):\n    \n    host = app['config']['host']\n    port = app['config']['port']\n\n    \n    web.run_app(app, host=host, port=port)", "docstring": "Runs the application in an async context.\nThis function will block indefinitely until the application is shut down.\n\nArgs:\napp (web.Application):\nThe Aiohttp Application as created by `create_app()`", "source": "juraj-google-style"}
{"code": "def _use_memcache(self, key, options=None):\n    \n    flag = ContextOptions.use_memcache(options)\n    if flag is None:\n      flag = self._memcache_policy(key)\n    if flag is None:\n      flag = ContextOptions.use_memcache(self._conn.config)\n    if flag is None:\n      flag = True\n    return flag", "docstring": "Return whether to use memcache for this key.\n\nArgs:\nkey: Key instance.\noptions: ContextOptions instance, or None.\n\nReturns:\nTrue if the key should be cached in memcache, False otherwise.", "source": "juraj-google-style"}
{"code": "def elaborate_borns_and_epsilon(ucell, borns, epsilon, primitive_matrix=None, supercell_matrix=None, is_symmetry=True, symmetrize_tensors=False, symprec=1e-05):\n    assert (len(borns) == ucell.get_number_of_atoms()), ('num_atom %d != len(borns) %d' % (ucell.get_number_of_atoms(), len(borns)))\n    if symmetrize_tensors:\n        (borns_, epsilon_) = symmetrize_borns_and_epsilon(borns, epsilon, ucell, symprec=symprec, is_symmetry=is_symmetry)\n    else:\n        borns_ = borns\n        epsilon_ = epsilon\n    (indeps_in_supercell, indeps_in_unitcell) = _extract_independent_atoms(ucell, primitive_matrix=primitive_matrix, supercell_matrix=supercell_matrix, is_symmetry=is_symmetry, symprec=symprec)\n    return (borns_[indeps_in_unitcell].copy(), epsilon_, indeps_in_supercell)", "docstring": "Symmetrize Born effective charges and dielectric constants and\nextract Born effective charges of symmetrically independent atoms\nfor primitive cell.\n\n\nArgs:\nucell (Atoms): Unit cell structure\nborns (np.array): Born effective charges of ucell\nepsilon (np.array): Dielectric constant tensor\n\nReturns:\n(np.array) Born effective charges of symmetrically independent atoms\nin primitive cell\n(np.array) Dielectric constant\n(np.array) Atomic index mapping table from supercell to primitive cell\nof independent atoms\n\nRaises:\nAssertionError: Inconsistency of number of atoms or Born effective\ncharges.\n\nWarning:\nBroken symmetry of Born effective charges", "source": "codesearchnet"}
{"code": "def register_token(self, token_class, regexp=None):\n    if (regexp is None):\n        regexp = token_class.regexp\n    self.tokens.register(token_class, regexp)", "docstring": "Register a token class.\n\nArgs:\ntoken_class (tdparser.Token): the token class to register\nregexp (optional str): the regexp for elements of that token.\nDefaults to the `regexp` attribute of the token class.", "source": "codesearchnet"}
{"code": "def run_local_server(self, host='localhost', port=8080, authorization_prompt_message=_DEFAULT_AUTH_PROMPT_MESSAGE, success_message=_DEFAULT_WEB_SUCCESS_MESSAGE, open_browser=True, **kwargs):\n    self.redirect_uri = 'http:\n    (auth_url, _) = self.authorization_url(**kwargs)\n    wsgi_app = _RedirectWSGIApp(success_message)\n    local_server = wsgiref.simple_server.make_server(host, port, wsgi_app, handler_class=_WSGIRequestHandler)\n    if open_browser:\n        webbrowser.open(auth_url, new=1, autoraise=True)\n    print(authorization_prompt_message.format(url=auth_url))\n    local_server.handle_request()\n    authorization_response = wsgi_app.last_request_uri.replace('http', 'https')\n    self.fetch_token(authorization_response=authorization_response)\n    return self.credentials", "docstring": "Run the flow using the server strategy.\n\nThe server strategy instructs the user to open the authorization URL in\ntheir browser and will attempt to automatically open the URL for them.\nIt will start a local web server to listen for the authorization\nresponse. Once authorization is complete the authorization server will\nredirect the user's browser to the local web server. The web server\nwill get the authorization code from the response and shutdown. The\ncode is then exchanged for a token.\n\nArgs:\nhost (str): The hostname for the local redirect server. This will\nbe served over http, not https.\nport (int): The port for the local redirect server.\nauthorization_prompt_message (str): The message to display to tell\nthe user to navigate to the authorization URL.\nsuccess_message (str): The message to display in the web browser\nthe authorization flow is complete.\nopen_browser (bool): Whether or not to open the authorization URL\nin the user's browser.\nkwargs: Additional keyword arguments passed through to\n:meth:`authorization_url`.\n\nReturns:\ngoogle.oauth2.credentials.Credentials: The OAuth 2.0 credentials\nfor the user.", "source": "codesearchnet"}
{"code": "def GetMessages(self, files):\n\n    def _GetAllMessageNames(desc):\n        'Walk a message Descriptor and recursively yields all message names.'\n        (yield desc.full_name)\n        for msg_desc in desc.nested_types:\n            for full_name in _GetAllMessageNames(msg_desc):\n                (yield full_name)\n    result = {}\n    for file_name in files:\n        file_desc = self.pool.FindFileByName(file_name)\n        for msg_desc in file_desc.message_types_by_name.values():\n            for full_name in _GetAllMessageNames(msg_desc):\n                try:\n                    result[full_name] = self._classes[full_name]\n                except KeyError:\n                    pass\n    return result", "docstring": "Gets all registered messages from a specified file.\n\nOnly messages already created and registered will be returned; (this is the\ncase for imported _pb2 modules)\nBut unlike MessageFactory, this version also returns already defined nested\nmessages, but does not register any message extensions.\n\nArgs:\nfiles: The file names to extract messages from.\n\nReturns:\nA dictionary mapping proto names to the message classes.\n\nRaises:\nKeyError: if a file could not be found.", "source": "codesearchnet"}
{"code": "def config_to_string(config):\n    \n    output = []\n    for section, section_content in config.items():\n        output.append(\"[{}]\".format(section))\n        for option, option_value in section_content.items():\n            output.append(\"{} = {}\".format(option, option_value))\n    return \"\\n\".join(output)", "docstring": "Nice output string for the config, which is a nested defaultdict.\n\nArgs:\nconfig (defaultdict(defaultdict)): The configuration information.\nReturns:\nstr: A human-readable output string detailing the contents of the config.", "source": "juraj-google-style"}
{"code": "def _get_best(values: List[float], losses: List[float],\n                  max_loss_div: float = 0.9, min_val_div: float = 10.0) -> float:\n        \n        assert len(values) == len(losses), \"lengths of values and losses should be equal\"\n        min_ind = np.argmin(losses)\n        for i in range(min_ind - 1, 0, -1):\n            if (losses[i] * max_loss_div > losses[min_ind]) or\\\n                    (values[i] * min_val_div < values[min_ind]):\n                return values[i + 1]\n        return values[min_ind] / min_val_div", "docstring": "Find the best value according to given losses\n\nArgs:\nvalues: list of considered values\nlosses: list of obtained loss values corresponding to `values`\nmax_loss_div: maximal divergence of loss to be considered significant\nmin_val_div: minimum divergence of loss to be considered significant\n\nReturns:\nbest value divided by `min_val_div`", "source": "juraj-google-style"}
{"code": "def _get_annotations(self, text, language=''):\n    \n    body = {\n        'document': {\n            'type': 'PLAIN_TEXT',\n            'content': text,\n        },\n        'features': {\n            'extract_syntax': True,\n        },\n        'encodingType': 'UTF32',\n    }\n    if language:\n      body['document']['language'] = language\n\n    request = self.service.documents().annotateText(body=body)\n    response = request.execute()\n    tokens = response.get('tokens', [])\n    language = response.get('language')\n\n    return {'tokens': tokens, 'language': language}", "docstring": "Returns the list of annotations retrieved from the given text.\n\nArgs:\ntext (str): Input text.\nlanguage (:obj:`str`, optional): Language code.\n\nReturns:\nResults in a dictionary. :code:`tokens` contains the list of annotations\nand :code:`language` contains the inferred language from the input.", "source": "juraj-google-style"}
{"code": "def set_params(self, **params):\n\t\t\n\t\tif 'bias' in params.keys():\n\t\t\tself.intercept_ = params['bias']\n\t\tif 'weights' in params.keys():\n\t\t\tself.coef_ = params['weights']\n\t\tfor key in params.keys():\n\t\t\tif 'b_' == key[:2]:\n\t\t\t\tself.B[int(key[2:])] = params[key]\n\n\t\treturn self", "docstring": "Set the parameters of the estimator.\n\nArgs:\nbias (array-like) : bias of the estimator. Also known as the intercept in a linear model.\nweights (array-like) : weights of the features. Also known as coeficients.\nNER biases (array-like) : NER entities infering column position on X and bias value. Ex: `b_4=10, b_5=6`.\n\nExample:\n>>> cls = VTT()\n>>> cls.set_params(b_4=10, b_5=6, b_6=8)", "source": "juraj-google-style"}
{"code": "def open(path, mode=gdalconst.GA_ReadOnly):\n    \n    path = getattr(path, 'name', path)\n    try:\n        return Raster(vsiprefix(path), mode)\n    except AttributeError:\n        try:\n            imgdata = path.read()\n        except AttributeError:\n            raise TypeError('Not a file-like object providing read()')\n        else:\n            imgio = MemFileIO(delete=False)\n            gdal.FileFromMemBuffer(imgio.name, imgdata)\n            return Raster(imgio, mode)\n    raise ValueError('Failed to open raster from \"%r\"' % path)", "docstring": "Returns a Raster instance.\n\nArguments:\npath -- local or remote path as str or file-like object\nKeyword args:\nmode -- gdal constant representing access mode", "source": "juraj-google-style"}
{"code": "def encode(self, label):\n    label = super().encode(label)\n    return torch.tensor(self.stoi.get(label, self.unknown_index))", "docstring": "Encodes a ``label``.\n\nArgs:\nlabel (object): Label to encode.\n\nReturns:\ntorch.Tensor: Encoding of the label.", "source": "codesearchnet"}
{"code": "def while_loop(self, context, step_method):\n        \n        logger.debug(\"starting\")\n\n        context['whileCounter'] = 0\n\n        if self.stop is None and self.max is None:\n            \n            \n            logger.error(f\"while decorator missing both max and stop.\")\n            raise PipelineDefinitionError(\"the while decorator must have \"\n                                          \"either max or stop, or both. \"\n                                          \"But not neither.\")\n\n        error_on_max = context.get_formatted_as_type(\n            self.error_on_max, out_type=bool)\n        sleep = context.get_formatted_as_type(self.sleep, out_type=float)\n        if self.max is None:\n            max = None\n            logger.info(f\"while decorator will loop until {self.stop} \"\n                        f\"evaluates to True at {sleep}s intervals.\")\n        else:\n            max = context.get_formatted_as_type(self.max, out_type=int)\n\n            if max < 1:\n                logger.info(\n                    f\"max {self.max} is {max}. while only runs when max > 0.\")\n                logger.debug(\"done\")\n                return\n\n            if self.stop is None:\n                logger.info(f\"while decorator will loop {max} times at \"\n                            f\"{sleep}s intervals.\")\n            else:\n                logger.info(f\"while decorator will loop {max} times, or \"\n                            f\"until {self.stop} evaluates to True at \"\n                            f\"{sleep}s intervals.\")\n\n        if not poll.while_until_true(interval=sleep,\n                                     max_attempts=max)(\n                self.exec_iteration)(context=context,\n                                     step_method=step_method):\n            \n            if error_on_max:\n                logger.error(f\"exhausted {max} iterations of while loop, \"\n                             \"and errorOnMax is True.\")\n                if self.stop and max:\n                    raise LoopMaxExhaustedError(\"while loop reached \"\n                                                f\"{max} and {self.stop} \"\n                                                \"never evaluated to True.\")\n                else:\n                    raise LoopMaxExhaustedError(f\"while loop reached {max}.\")\n            else:\n                if self.stop and max:\n                    logger.info(\n                        f\"while decorator looped {max} times, \"\n                        f\"and {self.stop} never evaluated to True.\")\n\n            logger.debug(\"while loop done\")\n        else:\n            logger.info(f\"while loop done, stop condition {self.stop} \"\n                        \"evaluated True.\")\n\n        logger.debug(\"done\")", "docstring": "Run step inside a while loop.\n\nArgs:\ncontext: (pypyr.context.Context) The pypyr context. This arg will\nmutate - after method execution will contain the new\nupdated context.\nstep_method: (method/function) This is the method/function that\nwill execute on every loop iteration. Signature is:\nfunction(context)", "source": "juraj-google-style"}
{"code": "def daemon(args):\n    if os.environ.get(DVC_DAEMON):\n        logger.debug('skipping launching a new daemon.')\n        return\n    cmd = [sys.executable]\n    if (not is_binary()):\n        cmd += ['-m', 'dvc']\n    cmd += (['daemon', '-q'] + args)\n    env = fix_env()\n    file_path = os.path.abspath(inspect.stack()[0][1])\n    env[cast_bytes_py2('PYTHONPATH')] = cast_bytes_py2(os.path.dirname(os.path.dirname(file_path)))\n    env[cast_bytes_py2(DVC_DAEMON)] = cast_bytes_py2('1')\n    _spawn(cmd, env)", "docstring": "Launch a `dvc daemon` command in a detached process.\n\nArgs:\nargs (list): list of arguments to append to `dvc daemon` command.", "source": "codesearchnet"}
{"code": "def get_versions(self):\n    versions_response = self.repo.api.http_request('GET', ('%s/fcr:versions' % self.uri))\n    versions_graph = self.repo.api.parse_rdf_payload(versions_response.content, versions_response.headers)\n    for version_uri in versions_graph.objects(self.uri, self.rdf.prefixes.fedora.hasVersion):\n        version_label = versions_graph.value(version_uri, self.rdf.prefixes.fedora.hasVersionLabel, None).toPython()\n        self._affix_version(version_uri, version_label)", "docstring": "retrieves all versions of an object, and stores them at self.versions\n\nArgs:\nNone\n\nReturns:\nNone: appends instances", "source": "codesearchnet"}
{"code": "def updateNodeCapabilities(self, nodeId, node, vendorSpecific=None):\n        \n        response = self.updateNodeCapabilitiesResponse(nodeId, node, vendorSpecific)\n        return self._read_boolean_response(response)", "docstring": "See Also: updateNodeCapabilitiesResponse()\n\nArgs:\nnodeId:\nnode:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def _workflow_complete(workflow_stage_dict: dict):\n    complete_stages = []\n    for (_, stage_config) in workflow_stage_dict.items():\n        complete_stages.append((stage_config['status'] == 'complete'))\n    if all(complete_stages):\n        LOG.info('PB workflow complete!')\n        return True\n    return False", "docstring": "Check if the workflow is complete.\n\nThis function checks if the entire workflow is complete.\n\nThis function is used by `execute_processing_block`.\n\nArgs:\nworkflow_stage_dict (dict): Workflow metadata dictionary.\n\nReturns:\nbool, True if the workflow is complete, otherwise False.", "source": "codesearchnet"}
{"code": "def concat(self, axis, other, **kwargs):\n    return self._append_list_of_managers(other, axis, **kwargs)", "docstring": "Concatenates two objects together.\n\nArgs:\naxis: The axis index object to join (0 for columns, 1 for index).\nother: The other_index to concat with.\n\nReturns:\nConcatenated objects.", "source": "codesearchnet"}
{"code": "def from_files(path_dir, dos_spin=1):\n        \n        run_type, warning, efermi, gap, doping_levels = \\\n            BoltztrapAnalyzer.parse_outputtrans(path_dir)\n\n        vol = BoltztrapAnalyzer.parse_struct(path_dir)\n\n        intrans = BoltztrapAnalyzer.parse_intrans(path_dir)\n\n        if run_type == \"BOLTZ\":\n            dos, pdos = BoltztrapAnalyzer.parse_transdos(\n                path_dir, efermi, dos_spin=dos_spin, trim_dos=False)\n\n            mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping, \\\n            seebeck_doping, cond_doping, kappa_doping, hall_doping, \\\n            carrier_conc = BoltztrapAnalyzer. \\\n                parse_cond_and_hall(path_dir, doping_levels)\n\n            return BoltztrapAnalyzer(\n                gap, mu_steps, cond, seebeck, kappa, hall, pn_doping_levels,\n                mu_doping, seebeck_doping, cond_doping, kappa_doping,\n                hall_doping, intrans, dos, pdos, carrier_conc, vol, warning)\n\n        elif run_type == \"DOS\":\n            trim = True if intrans[\"dos_type\"] == \"HISTO\" else False\n            dos, pdos = BoltztrapAnalyzer.parse_transdos(\n                path_dir, efermi, dos_spin=dos_spin, trim_dos=trim)\n\n            return BoltztrapAnalyzer(gap=gap, dos=dos, dos_partial=pdos,\n                                     warning=warning, vol=vol)\n\n        elif run_type == \"BANDS\":\n            bz_kpoints = np.loadtxt(\n                os.path.join(path_dir, \"boltztrap_band.dat\"))[:, -3:]\n            bz_bands = np.loadtxt(\n                os.path.join(path_dir, \"boltztrap_band.dat\"))[:, 1:-6]\n            return BoltztrapAnalyzer(bz_bands=bz_bands, bz_kpoints=bz_kpoints,\n                                     warning=warning, vol=vol)\n\n        elif run_type == \"FERMI\":\n            \n\n            if os.path.exists(os.path.join(path_dir, 'boltztrap_BZ.cube')):\n                fs_data = read_cube_file(\n                    os.path.join(path_dir, 'boltztrap_BZ.cube'))\n            elif os.path.exists(os.path.join(path_dir, 'fort.30')):\n                fs_data = read_cube_file(os.path.join(path_dir, 'fort.30'))\n            else:\n                raise BoltztrapError(\"No data file found for fermi surface\")\n            return BoltztrapAnalyzer(fermi_surface_data=fs_data)\n\n        else:\n            raise ValueError(\"Run type: {} not recognized!\".format(run_type))", "docstring": "get a BoltztrapAnalyzer object from a set of files\n\nArgs:\npath_dir: directory where the boltztrap files are\ndos_spin: in DOS mode, set to 1 for spin up and -1 for spin down\n\nReturns:\na BoltztrapAnalyzer object", "source": "juraj-google-style"}
{"code": "def get_branch(profile, name):\n    ref = ('heads/' + name)\n    data = refs.get_ref(profile, ref)\n    return data", "docstring": "Fetch a branch.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nname\nThe name of the branch to fetch.\n\nReturns:\nA dict with data baout the branch.", "source": "codesearchnet"}
{"code": "def get_config(self):\n    return {}", "docstring": "Returns the initializer's configuration as a JSON-serializable dict.\n\nReturns:\nA JSON-serializable Python dict.", "source": "github-repos"}
{"code": "def output(self, _filename):\n        \n\n        txt = ''\n        for c in self.contracts:\n            txt += \"\\nContract %s\\n\"%c.name\n            table = PrettyTable(['Variable', 'Dependencies'])\n            for v in c.state_variables:\n                table.add_row([v.name, _get(v, c)])\n\n            txt += str(table)\n\n            txt += \"\\n\"\n            for f in c.functions_and_modifiers_not_inherited:\n                txt += \"\\nFunction %s\\n\"%f.full_name\n                table = PrettyTable(['Variable', 'Dependencies'])\n                for v in f.variables:\n                    table.add_row([v.name, _get(v, f)])\n                for v in c.state_variables:\n                    table.add_row([v.canonical_name, _get(v, f)])\n                txt += str(table)\n            self.info(txt)", "docstring": "_filename is not used\nArgs:\n_filename(string)", "source": "juraj-google-style"}
{"code": "def __init__(self, filesystem):\n        \n        self._filesystem = filesystem\n        self.name = ''\n        self.path = ''\n        self._inode = None\n        self._islink = False\n        self._isdir = False\n        self._statresult = None\n        self._statresult_symlink = None", "docstring": "Initialize the dir entry with unset values.\n\nArgs:\nfilesystem: the fake filesystem used for implementation.", "source": "juraj-google-style"}
{"code": "def Evaluate(self, client_obj):\n    if (self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ALL):\n        quantifier = all\n    elif (self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ANY):\n        quantifier = any\n    else:\n        raise ValueError(('Unexpected match mode value: %s' % self.match_mode))\n    return quantifier((rule.Evaluate(client_obj) for rule in self.rules))", "docstring": "Evaluates rules held in the rule set.\n\nArgs:\nclient_obj: Either an aff4 client object or a client_info dict as returned\nby ReadFullInfoClient if the relational db is used for reading.\n\nReturns:\nA bool value of the evaluation.\n\nRaises:\nValueError: The match mode is of unknown value.", "source": "codesearchnet"}
{"code": "def create_raw(self, key, value):\n    data = None\n    if ((key is not None) and (value is not None)):\n        data = self.db.create(key.strip(), value)\n    else:\n        self.tcex.log.warning(u'The key or value field was None.')\n    return data", "docstring": "Create method of CRUD operation for raw data.\n\nArgs:\nkey (string): The variable to write to the DB.\nvalue (any): The data to write to the DB.\n\nReturns:\n(string): Result of DB write.", "source": "codesearchnet"}
{"code": "def reorder_resources(self, resource_ids, hxl_update=True):\n        \n        \n        dataset_id = self.data.get('id')\n        if not dataset_id:\n            raise HDXError('Dataset has no id! It must be read, created or updated first.')\n        data = {'id': dataset_id,\n                'order': resource_ids}\n        self._write_to_hdx('reorder', data, 'package_id')\n        if hxl_update:\n            self.hxl_update()", "docstring": "Reorder resources in dataset according to provided list.\nIf only some resource ids are supplied then these are\nassumed to be first and the other resources will stay in\ntheir original order.\n\nArgs:\nresource_ids (List[str]): List of resource ids\nhxl_update (bool): Whether to call package_hxl_update. Defaults to True.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def add_listener(self, event, listener):\n        \n        self.emit('new_listener', event, listener)\n        self._listeners[event].append(listener)\n        self._check_limit(event)\n        return self", "docstring": "Bind a listener to a particular event.\n\nArgs:\nevent (str): The name of the event to listen for. This may be any\nstring value.\nlistener (def or async def): The callback to execute when the event\nfires. This may be a sync or async function.", "source": "juraj-google-style"}
{"code": "def package_info(pkg_name):\n    \n    indent = \"  \"\n    for config, _ in _iter_packages():\n        if pkg_name == config[\"name\"]:\n            print(\"Package:\", pkg_name)\n            print(indent, \"Platform:\", config[\"platform\"])\n            print(indent, \"Version:\", config[\"version\"])\n            print(indent, \"Path:\", config[\"path\"])\n            print(indent, \"Worlds:\")\n            for world in config[\"maps\"]:\n                world_info(world[\"name\"], world_config=world, initial_indent=\"    \")", "docstring": "Prints the information of a package.\n\nArgs:\npkg_name (str): The name of the desired package to get information", "source": "juraj-google-style"}
{"code": "def __init__(self, value, translator):\n    self.value = value\n    self.translator = translator", "docstring": "Creates a NestedValueProvider that wraps the provided ValueProvider.\n\nArgs:\nvalue: ValueProvider object to wrap\ntranslator: function that is applied to the ValueProvider\nRaises:\n``RuntimeValueProviderError``: if any of the provided objects are not\naccessible.", "source": "github-repos"}
{"code": "def remove_user(username):\n    \n    users = passwd_reader.load_users()\n\n    assert username in users, \"Username '%s' not found!\" % username\n\n    \n    del users[username]\n    passwd_reader.save_users(users)\n\n    \n    home_dir = settings.DATA_PATH + username\n    if os.path.exists(home_dir):\n        shutil.rmtree(home_dir)\n\n    reload_configuration()", "docstring": "Remove user, his home directory and so on..\n\nArgs:\nusername (str): User's name.", "source": "juraj-google-style"}
{"code": "def __init__(self, output_filename=\"std_err.txt\"):\n        \n        self.output_filename = output_filename\n        self.errors = set()\n        self.error_count = Counter()", "docstring": "Initializes the handler with the output file to check.\n\nArgs:\noutput_filename (str): This is the file where the stderr for vasp\nis being redirected. The error messages that are checked are\npresent in the stderr. Defaults to \"std_err.txt\", which is the\ndefault redirect used by :class:`custodian.vasp.jobs.VaspJob`.", "source": "juraj-google-style"}
{"code": "def human_timestamp_to_datetime(human_timestamp, to_utc=False):\n    settings = {}\n    if to_utc:\n        settings = {'TO_TIMEZONE': 'UTC'}\n    return dateparser.parse(human_timestamp, settings=settings)", "docstring": "Converts a human-readable timestamp into a Python ``DateTime`` object\n\nArgs:\nhuman_timestamp (str): A timestamp string\nto_utc (bool): Convert the timestamp to UTC\n\nReturns:\nDateTime: The converted timestamp", "source": "codesearchnet"}
{"code": "def process(self, element):\n    text_input, prediction_result = element\n    softmax = torch.nn.Softmax(dim=-1)(prediction_result.inference['logits']).detach().numpy()\n    return [{'input': text_input, 'softmax': softmax}]", "docstring": "Takes the input text and the prediction result, and returns a dictionary\nwith the input text and the softmax probabilities\n\nArgs:\nelement: The tuple of input text and the prediction result\n\nReturns:\nA list of dictionaries, each containing the input text\nand the softmax output.", "source": "github-repos"}
{"code": "def correction(self, word):\n    return max(self.candidates(word), key=self.word_probability)", "docstring": "The most probable correct spelling for the word\n\nArgs:\nword (str): The word to correct\nReturns:\nstr: The most likely candidate", "source": "codesearchnet"}
{"code": "def Uniform(cls,\n            low: 'TensorFluent', high: 'TensorFluent',\n            batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:\n        \n        if low.scope != high.scope:\n            raise ValueError('Uniform distribution: parameters must have same scope!')\n        dist = tf.distributions.Uniform(low.tensor, high.tensor)\n        batch = low.batch or high.batch\n        if not batch and batch_size is not None:\n            t = dist.sample(batch_size)\n            batch = True\n        else:\n            t = dist.sample()\n        scope = low.scope.as_list()\n        return (dist, TensorFluent(t, scope, batch=batch))", "docstring": "Returns a TensorFluent for the Uniform sampling op with given low and high parameters.\n\nArgs:\nlow: The low parameter of the Uniform distribution.\nhigh: The high parameter of the Uniform distribution.\nbatch_size: The size of the batch (optional).\n\nReturns:\nThe Uniform distribution and a TensorFluent sample drawn from the distribution.\n\nRaises:\nValueError: If parameters do not have the same scope.", "source": "juraj-google-style"}
{"code": "def get_upstream_artifacts_full_paths_per_task_id(context):\n    \n    upstream_artifacts = context.task['payload']['upstreamArtifacts']\n    task_ids_and_relative_paths = [\n        (artifact_definition['taskId'], artifact_definition['paths'])\n        for artifact_definition in upstream_artifacts\n    ]\n\n    optional_artifacts_per_task_id = get_optional_artifacts_per_task_id(upstream_artifacts)\n\n    upstream_artifacts_full_paths_per_task_id = {}\n    failed_paths_per_task_id = {}\n    for task_id, paths in task_ids_and_relative_paths:\n        for path in paths:\n            try:\n                path_to_add = get_and_check_single_upstream_artifact_full_path(context, task_id, path)\n                add_enumerable_item_to_dict(\n                    dict_=upstream_artifacts_full_paths_per_task_id,\n                    key=task_id, item=path_to_add\n                )\n            except ScriptWorkerTaskException:\n                if path in optional_artifacts_per_task_id.get(task_id, []):\n                    log.warning('Optional artifact \"{}\" of task \"{}\" not found'.format(path, task_id))\n                    add_enumerable_item_to_dict(\n                        dict_=failed_paths_per_task_id,\n                        key=task_id, item=path\n                    )\n                else:\n                    raise\n\n    return upstream_artifacts_full_paths_per_task_id, failed_paths_per_task_id", "docstring": "List the downloaded upstream artifacts.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\n\nReturns:\ndict, dict: lists of the paths to upstream artifacts, sorted by task_id.\nFirst dict represents the existing upstream artifacts. The second one\nmaps the optional artifacts that couldn't be downloaded\n\nRaises:\nscriptworker.exceptions.ScriptWorkerTaskException: when an artifact doesn't exist.", "source": "juraj-google-style"}
{"code": "def block_diag(*blocks: np.ndarray) -> np.ndarray:\n    \n    for b in blocks:\n        if b.shape[0] != b.shape[1]:\n            raise ValueError('Blocks must be square.')\n\n    if not blocks:\n        return np.zeros((0, 0), dtype=np.complex128)\n\n    n = sum(b.shape[0] for b in blocks)\n    dtype = functools.reduce(_merge_dtypes, (b.dtype for b in blocks))\n\n    result = np.zeros(shape=(n, n), dtype=dtype)\n    i = 0\n    for b in blocks:\n        j = i + b.shape[0]\n        result[i:j, i:j] = b\n        i = j\n\n    return result", "docstring": "Concatenates blocks into a block diagonal matrix.\n\nArgs:\n*blocks: Square matrices to place along the diagonal of the result.\n\nReturns:\nA block diagonal matrix with the given blocks along its diagonal.\n\nRaises:\nValueError: A block isn't square.", "source": "juraj-google-style"}
{"code": "def metadata_matches(self, query={}):\n    result = (len(query.keys()) > 0)\n    for key in query.keys():\n        result = (result and (query[key] == self.metadata.get(key)))\n    return result", "docstring": "Returns key matches to metadata\n\nThis will check every key in query for a matching key in metadata\nreturning true if every key is in metadata.  query without keys\nreturn false.\n\nArgs:\nquery(object): metadata for matching\n\nReturns:\nbool:\nTrue: when key count in query is > 0 and all keys in query in\nself.metadata\nFalse: if key count in query is <= 0 or any key in query not\nfound in self.metadata", "source": "codesearchnet"}
{"code": "def replica_id_in_sync_group(self):\n    if tensor_util.is_tf_type(self._replica_id_in_sync_group):\n        return self._replica_id_in_sync_group\n    return constant_op.constant(self._replica_id_in_sync_group, dtypes.int32, name='replica_id_in_sync_group')", "docstring": "Returns the id of the replica.\n\nThis identifies the replica among all replicas that are kept in sync. The\nvalue of the replica id can range from 0 to\n`tf.distribute.ReplicaContext.num_replicas_in_sync` - 1.\n\nNOTE: This is not guaranteed to be the same ID as the XLA replica ID use\nfor low-level operations such as collective_permute.\n\nReturns:\na `Tensor`.", "source": "github-repos"}
{"code": "def load(config):\n    if config.sys_path:\n        logger.debug('Appending %s to sys.path.', config.sys_path)\n        sys.path.append(config.sys_path)\n        logger.debug('sys.path is now %s', sys.path)\n    if config.lookups:\n        for (key, handler) in config.lookups.items():\n            register_lookup_handler(key, handler)\n    return config", "docstring": "Loads a stacker configuration by modifying sys paths, loading lookups,\netc.\n\nArgs:\nconfig (:class:`Config`): the stacker config to load.\n\nReturns:\n:class:`Config`: the stacker config provided above.", "source": "codesearchnet"}
{"code": "def get_missing_services(self, services):\n        \n        required_services = set(services)\n        provided_services = set(self._services.keys())\n        missing_services = required_services.difference(provided_services)\n\n        return sorted(missing_services)", "docstring": "Check if all required services are provided\n\nArgs:\nservices: List with the service names which are required\nReturns:\nList with missing services", "source": "juraj-google-style"}
{"code": "def __init__(\n      self, resolver_context, encoding_method=None, file_object=None):\n    \n    if file_object is not None and encoding_method is None:\n      raise ValueError(\n          'File-like object provided without corresponding encoding method.')\n\n    super(EncodedStream, self).__init__(resolver_context)\n    self._current_offset = 0\n    self._decoded_data = b''\n    self._decoded_data_offset = 0\n    self._decoded_data_size = 0\n    self._decoded_stream_size = None\n    self._decoder = None\n    self._encoded_data = b''\n    self._encoding_method = encoding_method\n    self._file_object = file_object\n    self._file_object_set_in_init = bool(file_object)\n    self._realign_offset = True", "docstring": "Initializes a file-like object.\n\nIf the file-like object is chained do not separately use the parent\nfile-like object.\n\nArgs:\nresolver_context (Context): resolver context.\nencoding_method (Optional[str]): method used to the encode the data.\nfile_object (Optional[file]): parent file-like object.\n\nRaises:\nValueError: if file_object provided but encoding_method is not.", "source": "juraj-google-style"}
{"code": "def _maybe_cast_inputs(self, inputs):\n    compute_dtype = self._compute_dtype\n    if self._autocast and compute_dtype and dtypes.as_dtype(compute_dtype).is_floating:\n\n        def f(x):\n            \n            cast_types = (tensor.Tensor, sparse_tensor.SparseTensor, ragged_tensor.RaggedTensor)\n            if isinstance(x, cast_types) and x.dtype.is_floating and (x.dtype.base_dtype.name != compute_dtype):\n                return math_ops.cast(x, compute_dtype)\n            elif isinstance(x, tensor.TensorSpec) and x.dtype.is_floating:\n                return tensor.TensorSpec(x.shape, compute_dtype, x.name)\n            else:\n                return x\n        return nest.map_structure(f, inputs)\n    else:\n        return inputs", "docstring": "Maybe casts the inputs to the compute dtype.\n\nIf self._compute_dtype is floating-point, and self_autocast is True,\nfloating-point inputs are casted to self._compute_dtype.\n\nArgs:\ninputs: Input tensor, or structure of input tensors.\n\nReturns:\n`inputs`, but tensors may have been casted to self._compute_dtype", "source": "github-repos"}
{"code": "def AddFile(self, filepath):\n    if (filepath not in self._files):\n        self._files.add(filepath)\n        return True\n    return False", "docstring": "Adds a file path as a source.\n\nArgs:\nfilepath: a string representing a path to the file.\n\nReturns:\nTrue if the file is not an already existing source.", "source": "codesearchnet"}
{"code": "def from_py_func(cls, code):\n    from bokeh.util.deprecation import deprecated\n    deprecated(\"'from_py_func' is deprecated and will be removed in an eventual 2.0 release. Use CustomJSHover directly instead.\")\n    if (not isinstance(code, FunctionType)):\n        raise ValueError('CustomJSHover.from_py_func only accepts function objects.')\n    pscript = import_required('pscript', ('To use Python functions for CustomJSHover, you need PScript ' + '(\"conda install -c conda-forge pscript\" or \"pip install pscript\")'))\n\n    def pscript_compile(code):\n        sig = signature(code)\n        (all_names, default_values) = get_param_info(sig)\n        if ((len(all_names) - len(default_values)) != 0):\n            raise ValueError('Function may only contain keyword arguments.')\n        if (default_values and (not any((isinstance(value, Model) for value in default_values)))):\n            raise ValueError('Default value must be a Bokeh Model.')\n        func_kwargs = dict(zip(all_names, default_values))\n        code = (pscript.py2js(code, 'transformer') + ('return transformer(%s);\\n' % ', '.join(all_names)))\n        return (code, func_kwargs)\n    (jsfunc, func_kwargs) = pscript_compile(code)\n    return cls(code=jsfunc, args=func_kwargs)", "docstring": "Create a ``CustomJSHover`` instance from a Python functions. The\nfunction is translated to JavaScript using PScript.\n\nThe python functions must have no positional arguments. It is\npossible to pass Bokeh models (e.g. a ``ColumnDataSource``) as keyword\narguments to the functions.\n\nThe ``code`` function namespace will contain the variable ``value``\n(the untransformed value) at render time as well as ``format`` and\n``special_vars`` as described in the class description.\n\nArgs:\ncode (function) : a scalar function to transform a single ``value``\n\nReturns:\nCustomJSHover", "source": "codesearchnet"}
{"code": "def _other_wrapper(self, name, writing):\n    io_attr = getattr(self._io, name)\n\n    def other_wrapper(*args, **kwargs):\n        'Wrap all other calls to the stream Object.\\n\\n            We do this to track changes to the write pointer.  Anything that\\n            moves the write pointer in a file open for appending should move\\n            the read pointer as well.\\n\\n            Args:\\n                *args: Pass through args.\\n                **kwargs: Pass through kwargs.\\n\\n            Returns:\\n                Wrapped stream object method.\\n            '\n        write_seek = self._io.tell()\n        ret_value = io_attr(*args, **kwargs)\n        if (write_seek != self._io.tell()):\n            self._read_seek = self._io.tell()\n            self._read_whence = 0\n        if ((not writing) or (not IS_PY2)):\n            return ret_value\n    return other_wrapper", "docstring": "Wrap a stream attribute in an other_wrapper.\n\nArgs:\nname: the name of the stream attribute to wrap.\n\nReturns:\nother_wrapper which is described below.", "source": "codesearchnet"}
{"code": "def _dirint_bins(ktp, alt, w, dktp):\n    \n    it = range(len(ktp))\n\n    \n    ktp_bin = [-1] * len(ktp)\n    ktp_bin = [0 if ktp[i] >= 0 and ktp[i] < 0.24 else ktp_bin[i] for i in it]\n    ktp_bin = [1 if ktp[i] >= 0.24 and ktp[i] < 0.4 else ktp_bin[i] for i in it]\n    ktp_bin = [2 if ktp[i] >= 0.4 and ktp[i] < 0.56 else ktp_bin[i] for i in it]\n    ktp_bin = [3 if ktp[i] >= 0.56 and ktp[i] < 0.7 else ktp_bin[i] for i in it]\n    ktp_bin = [4 if ktp[i] >= 0.7 and ktp[i] < 0.8 else ktp_bin[i] for i in it]\n    ktp_bin = [5 if ktp[i] >= 0.8 and ktp[i] <= 1 else ktp_bin[i] for i in it]\n\n    \n    alt_bin = [-1] * len(alt)\n    alt_bin = [0 if alt[i] <= 90 and alt[i] > 65 else alt_bin[i] for i in it]\n    alt_bin = [1 if alt[i] <= 65 and alt[i] > 50 else alt_bin[i] for i in it]\n    alt_bin = [2 if alt[i] <= 50 and alt[i] > 35 else alt_bin[i] for i in it]\n    alt_bin = [3 if alt[i] <= 35 and alt[i] > 20 else alt_bin[i] for i in it]\n    alt_bin = [4 if alt[i] <= 20 and alt[i] > 10 else alt_bin[i] for i in it]\n    alt_bin = [5 if alt[i] <= 10 else alt_bin[i] for i in it]\n\n    \n    w_bin = [-1] * len(w)\n    w_bin = [0 if w[i] >= 0 and w[i] < 1 else w_bin[i] for i in it]\n    w_bin = [1 if w[i] >= 1 and w[i] < 2 else w_bin[i] for i in it]\n    w_bin = [2 if w[i] >= 2 and w[i] < 3 else w_bin[i] for i in it]\n    w_bin = [3 if w[i] >= 3 else w_bin[i] for i in it]\n    w_bin = [4 if w[i] == -1 else w_bin[i] for i in it]\n\n    \n    dktp_bin = [-1] * len(dktp)\n    dktp_bin = [0 if dktp[i] >= 0 and dktp[i] < 0.015 else dktp_bin[i] for i in it]\n    dktp_bin = [1 if dktp[i] >= 0.015 and dktp[i] < 0.035 else dktp_bin[i] for i in it]\n    dktp_bin = [2 if dktp[i] >= 0.035 and dktp[i] < 0.07 else dktp_bin[i] for i in it]\n    dktp_bin = [3 if dktp[i] >= 0.07 and dktp[i] < 0.15 else dktp_bin[i] for i in it]\n    dktp_bin = [4 if dktp[i] >= 0.15 and dktp[i] < 0.3 else dktp_bin[i] for i in it]\n    dktp_bin = [5 if dktp[i] >= 0.3 and dktp[i] <= 1 else dktp_bin[i] for i in it]\n    dktp_bin = [6 if dktp[i] == -1 else dktp_bin[i] for i in it]\n\n    return ktp_bin, alt_bin, w_bin, dktp_bin", "docstring": "Determine the bins for the DIRINT coefficients.\n\nArgs:\nktp : Altitude-independent clearness index\nalt : Solar altitude angle\nw : precipitable water estimated from surface dew-point temperature\ndktp : stability index\n\nReturns:\ntuple of ktp_bin, alt_bin, w_bin, dktp_bin", "source": "juraj-google-style"}
{"code": "def __sendCommand(self, cmd):\n        \n        logging.info('%s: sendCommand[%s]', self.port, cmd)\n        if self.logThreadStatus == self.logStatus['running']:\n            self.logThreadStatus = self.logStatus['pauseReq']\n            while self.logThreadStatus != self.logStatus['paused'] and self.logThreadStatus != self.logStatus['stop']:\n                pass\n\n        try:\n            \n            retry_times = 3\n            while retry_times > 0:\n                retry_times -= 1\n                try:\n                    self._sendline(cmd)\n                    self._expect(cmd)\n                except Exception as e:\n                    logging.exception('%s: failed to send command[%s]: %s', self.port, cmd, str(e))\n                    if retry_times == 0:\n                        raise\n                else:\n                    break\n\n            line = None\n            response = []\n            retry_times = 10\n            while retry_times > 0:\n                line = self._readline()\n                logging.info('%s: the read line is[%s]', self.port, line)\n                if line:\n                    response.append(line)\n                    if line == 'Done':\n                        break\n                else:\n                    retry_times -= 1\n                    time.sleep(0.2)\n            if line != 'Done':\n                raise Exception('%s: failed to find end of response' % self.port)\n            logging.info('%s: send command[%s] done!', self.port, cmd)\n            return response\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger(\"sendCommand() Error: \" + str(e))\n            raise", "docstring": "send specific command to reference unit over serial port\n\nArgs:\ncmd: OpenThread CLI string\n\nReturns:\nDone: successfully send the command to reference unit and parse it\nValue: successfully retrieve the desired value from reference unit\nError: some errors occur, indicates by the followed specific error number", "source": "juraj-google-style"}
{"code": "def number(digit):\n\t\n\n\tspoken = str(digit)\n\n\tif spoken.startswith(\"8\") or spoken[:len(spoken) % 3] == \"11\":\n\t\tarticle = \"an \"\n\telse:\n\t\tarticle = \"a \"\n\n\tif spoken.endswith(\"1\") and spoken != \"11\":\n\t\tsuffix = \"st\"\n\telif spoken.endswith(\"2\") and spoken != \"12\":\n\t\tsuffix = \"nd\"\n\telif spoken.endswith(\"3\") and spoken != \"13\":\n\t\tsuffix = \"rd\"\n\telse:\n\t\tsuffix = \"th\"\n\n\tif digit > 999:\n\t\tprefix = len(spoken) % 3\n\t\tseparated = spoken[:prefix]\n\t\tfor n in range(prefix, len(spoken), 3):\n\t\t\tseparated += \",\" + spoken[n : n + 3]\n\t\tspoken = separated\n\n\treturn article + spoken + suffix", "docstring": "Gets a spoken-word representation for a number.\n\nArguments:\ndigit (int): An integer to convert into spoken-word.\n\nReturns:\nA spoken-word representation for a digit,\nincluding an article ('a' or 'an') and a suffix,\ne.g. 1 -> 'a 1st', 11 -> \"an 11th\". Adittionally\ndelimits characters in pairs of three for values > 999.", "source": "juraj-google-style"}
{"code": "def _record_result_type(recorder, f):\n\n    def wrapper(*args, **kwargs):\n        res = f(*args, **kwargs)\n        res = recorder(args, kwargs, res)\n        return res\n    return wrapper", "docstring": "A decorator that records some information about the function.\n\nArgs:\nrecorder: a function of signature `(args, kwargs, res) -> res`.\nf: the original function.\n\nReturns:\nA transformed function that calls the original function and then the\nrecorder afterwards.", "source": "github-repos"}
{"code": "def _full_reduce(nodes):\n    \n    was_reduced, nodes = maybe_reduce(nodes)\n    while was_reduced:\n        was_reduced, nodes = maybe_reduce(nodes)\n    return nodes", "docstring": "Apply degree reduction to ``nodes`` until it can no longer be reduced.\n\n.. note::\n\nThere is also a Fortran implementation of this function, which\nwill be used if it can be built.\n\nArgs:\nnodes (numpy.ndarray): The nodes in the curve.\n\nReturns:\nnumpy.ndarray: The fully degree-reduced nodes.", "source": "juraj-google-style"}
{"code": "def __init__(self, filename, args, version):\n        \n        self.args = args\n        self.version = version\n\n        self.filename = filename\n\n        try:\n            with open(self.filename, 'rb') as file:\n                self.data = json.load(file)\n        except IOError:\n            self.data = {}", "docstring": "Args:\nfilename:\nFilename for database.\nargs:\nProgram arguments.\nversion:\nVersion of file.", "source": "juraj-google-style"}
{"code": "def run_command(self, command, arg=None, is_eval=False):\n    mode = ((is_eval and 'eval') or 'command')\n    if isinstance(arg, tuple):\n        (name, d) = arg\n    else:\n        (name, d) = (arg, {})\n    result = getattr(self.connection.admin, mode)(command, name, **d)\n    return result", "docstring": "run command on the server\n\nArgs:\ncommand - command string\narg - command argument\nis_eval - if True execute command as eval\n\nreturn command's result", "source": "codesearchnet"}
{"code": "def daemonize(pidfile=None):\n    resource.setrlimit(resource.RLIMIT_CORE, (0, 0))\n    os.chdir('/')\n    os.umask(0)\n    pid = os.fork()\n    if (pid > 0):\n        os._exit(0)\n    os.setsid()\n    pid = os.fork()\n    if (pid > 0):\n        os._exit(0)\n\n    def terminate(signal, stack_frame):\n        msg = 'Terminating on signal {}'.format(signal)\n        logger.info(msg)\n        raise SystemExit(msg)\n    signal.signal(signal.SIGTERM, terminate)\n    streams = [sys.stdin, sys.stdout, sys.stderr]\n    for stream in streams:\n        devnull = os.open(os.devnull, os.O_RDWR)\n        os.dup2(devnull, stream.fileno())\n    for fd in [stream.fileno() for stream in streams]:\n        try:\n            os.close(fd)\n        except OSError as err:\n            if (err.errno == errno.EBADF):\n                pass\n    if ((pidfile is None) or (pidfile.strip() == '')):\n        logger.debug('Empty pidfile set')\n    else:\n        pid = os.getpid()\n        try:\n            with open(pidfile, 'w') as f:\n                f.write('{}\\n'.format(pid))\n                f.close()\n        except EnvironmentError:\n            logger.error('Failed to create pidfile at {}'.format(pidfile))\n\n        def remove_pid_file():\n            os.remove(pidfile)\n        atexit.register(remove_pid_file)\n    logger.debug('Process daemonized')", "docstring": "Turn the running process into a proper daemon according to PEP3143.\n\nArgs:\npidfile --The pidfile to create.", "source": "codesearchnet"}
{"code": "def __init__(self, instrumentation_key, telemetry_channel=None):\n        \n        if instrumentation_key:\n            if isinstance(instrumentation_key, channel.TelemetryChannel):\n                telemetry_channel = instrumentation_key\n                instrumentation_key = None\n        else:\n            raise Exception('Instrumentation key was required but not provided')\n        self._context = channel.TelemetryContext()\n        self._context.instrumentation_key = instrumentation_key\n        self._channel = telemetry_channel or channel.TelemetryChannel()\n        self._telemetry_processors = []", "docstring": "Initializes a new instance of the class.\n\nArgs:\ninstrumentation_key (str). the instrumentation key to use for this telemetry client.\\n\ntelemetry_channel (:class:`channel.TelemetryChannel`). the optional telemetry channel to be used instead of\nconstructing a default one.", "source": "juraj-google-style"}
{"code": "def _tensor_product(self, other, reverse=False):\n        \n        \n        if not isinstance(other, Kraus):\n            other = Kraus(other)\n\n        \n        ka_l, ka_r = self._data\n        kb_l, kb_r = other._data\n        if reverse:\n            input_dims = self.input_dims() + other.input_dims()\n            output_dims = self.output_dims() + other.output_dims()\n            kab_l = [np.kron(b, a) for a in ka_l for b in kb_l]\n        else:\n            input_dims = other.input_dims() + self.input_dims()\n            output_dims = other.output_dims() + self.output_dims()\n            kab_l = [np.kron(a, b) for a in ka_l for b in kb_l]\n        if ka_r is None and kb_r is None:\n            kab_r = None\n        else:\n            if ka_r is None:\n                ka_r = ka_l\n            if kb_r is None:\n                kb_r = kb_l\n            if reverse:\n                kab_r = [np.kron(b, a) for a in ka_r for b in kb_r]\n            else:\n                kab_r = [np.kron(a, b) for a in ka_r for b in kb_r]\n        data = (kab_l, kab_r)\n        return Kraus(data, input_dims, output_dims)", "docstring": "Return the tensor product channel.\n\nArgs:\nother (QuantumChannel): a quantum channel subclass.\nreverse (bool): If False return self ⊗ other, if True return\nif True return (other ⊗ self) [Default: False\nReturns:\nKraus: the tensor product channel as a Kraus object.\n\nRaises:\nQiskitError: if other cannot be converted to a channel.", "source": "juraj-google-style"}
{"code": "def cancel(self, consumers):\n        \n        for consumer in consumers:\n            del self._consumers[consumer.queue]\n            protocol = yield self.when_connected()\n            yield protocol.cancel(consumer)", "docstring": "Cancel a consumer that was previously started with consume.\n\nArgs:\nconsumer (list of fedora_messaging.api.Consumer): The consumers to cancel.", "source": "juraj-google-style"}
{"code": "def read_float(self, little_endian=True):\n    if little_endian:\n        endian = '<'\n    else:\n        endian = '>'\n    return self.unpack(('%sf' % endian), 4)", "docstring": "Read 4 bytes as a float value from the stream.\n\nArgs:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nfloat:", "source": "codesearchnet"}
{"code": "def _process_new(self, feed_item):\n    lp = self.landing_page_dao.get(feed_item, required=True)\n    feed_item[FieldMap.CAMPAIGN_LANDING_PAGE_ID] = lp['id']\n    feed_item[FieldMap.CAMPAIGN_LANDING_PAGE_NAME] = lp['name']\n    return {'advertiserId': feed_item.get(FieldMap.ADVERTISER_ID, None), 'name': feed_item.get(FieldMap.CAMPAIGN_NAME, None), 'startDate': StringExtensions.convertDateTimeStrToDateStr(feed_item.get(FieldMap.CAMPAIGN_START_DATE, None)), 'endDate': StringExtensions.convertDateTimeStrToDateStr(feed_item.get(FieldMap.CAMPAIGN_END_DATE, None)), 'defaultLandingPageId': lp['id']}", "docstring": "Creates a new campaign DCM object from a feed item representing a campaign from the Bulkdozer feed.\n\nThis function simply creates the object to be inserted later by the BaseDAO\nobject.\n\nArgs:\nfeed_item: Feed item representing the campaign from the Bulkdozer feed.\n\nReturns:\nA campaign object ready to be inserted in DCM through the API.", "source": "github-repos"}
{"code": "def update_(self, sct_dict, conf_arg=True):\n    for (opt, val) in sct_dict.items():\n        if (opt not in self.def_):\n            continue\n        if ((not conf_arg) or self.def_[opt].conf_arg):\n            self[opt] = val", "docstring": "Update values of configuration section with dict.\n\nArgs:\nsct_dict (dict): dict indexed with option names. Undefined\noptions are discarded.\nconf_arg (bool): if True, only options that can be set in a config\nfile are updated.", "source": "codesearchnet"}
{"code": "def destroy_record(client=None, found_record=None, record='', zone_id=''):\n    LOG.debug('Found DNS record: %s', found_record)\n    if (found_record['Name'].strip('.') == record):\n        dns_json = get_template(template_file='destroy/destroy_dns.json.j2', record=json.dumps(found_record))\n        dns_dict = json.loads(dns_json)\n        client.change_resource_record_sets(HostedZoneId=zone_id, ChangeBatch=dns_dict)\n        LOG.info('Destroyed \"%s\" in %s', found_record['Name'], zone_id)\n    else:\n        LOG.info('DNS record \"%s\" missing from %s.', record, zone_id)\n        LOG.debug(\"Found someone else's record: %s\", found_record['Name'])\n    return True", "docstring": "Destroy an individual DNS record.\n\nArgs:\nclient (botocore.client.Route53): Route 53 boto3 client.\nfound_record (dict): Route 53 record set::\n\n{'Name': 'unicorn.forrest.dev.example.com.',\n'ResourceRecords':\n[{'Value':\n'internal-unicornforrest-1777489395.us-east-1.elb.amazonaws.com'\n}],\n'TTL': 60,\n'Type': 'CNAME'}\n\nrecord (str): Application DNS record name. e.g.\nzone_id (str): Route 53 Hosted Zone ID, e.g. /hostedzone/ZSVGJWJ979WQD.\n\nReturns:\nbool: True upon successful completion.", "source": "codesearchnet"}
{"code": "def read_from(fpath, verbose=None, aslines=False, strict=True, n=None, errors='replace'):\n    if (n is None):\n        n = __READ_TAIL_N__\n    verbose = _rectify_verb_read(verbose)\n    if verbose:\n        print(('[util_io] * Reading text file: %r ' % util_path.tail(fpath, n=n)))\n    try:\n        if (not util_path.checkpath(fpath, verbose=verbose, n=n)):\n            raise IOError('[io] * FILE DOES NOT EXIST!')\n        with open(fpath, 'rb') as file_:\n            if aslines:\n                if six.PY2:\n                    text = [line.decode('utf8', errors=errors) for line in file_.readlines()]\n                else:\n                    text = [line.decode('utf8', errors=errors) for line in file_.readlines()]\n            elif six.PY2:\n                text = file_.read().decode('utf8', errors=errors)\n            else:\n                text = file_.read().decode('utf8', errors=errors)\n        return text\n    except IOError as ex:\n        from utool import util_dbg\n        if (verbose or strict):\n            util_dbg.printex(ex, (' * Error reading fpath=%r' % util_path.tail(fpath, n=n)), '[io]')\n        if strict:\n            raise", "docstring": "r\"\"\" Reads text from a file. Automatically returns utf8.\n\nArgs:\nfpath (str): file path\naslines (bool): if True returns list of lines\nverbose (bool): verbosity flag\n\nReturns:\nstr: text from fpath (this is unicode)\n\nIgnore:\nx = b'''/whaleshark_003_fors\\xc3\\xb8g.wmv\" />\\r\\n'''\nut.writeto('foo.txt', x)\ny = ut.readfrom('foo.txt')\ny.encode('utf8') == x", "source": "codesearchnet"}
{"code": "def get_event(self, event_key):\n    \n\n    event = self.event_key_map.get(event_key)\n\n    if event:\n      return event\n\n    self.logger.error('Event \"%s\" is not in datafile.' % event_key)\n    self.error_handler.handle_error(exceptions.InvalidEventException(enums.Errors.INVALID_EVENT_KEY_ERROR))\n    return None", "docstring": "Get event for the provided event key.\n\nArgs:\nevent_key: Event key for which event is to be determined.\n\nReturns:\nEvent corresponding to the provided event key.", "source": "juraj-google-style"}
{"code": "def machine_op(self, operation):\n        \n        operations = {'feed2start': 1,\n                      'feedone': 2,\n                      'cut': 3\n                      }\n        \n        if operation in operations:\n            self.send('^'+'O'+'P'+chr(operations[operation]))\n        else:\n            raise RuntimeError('Invalid operation.')", "docstring": "Perform machine operations\n\nArgs:\noperations: which operation you would like\nReturns:\nNone\nRaises:\nRuntimeError: Invalid operation", "source": "juraj-google-style"}
{"code": "def get_first_model_with_resource_name(cls, resource_name):\n        \n\n        models = cls.get_models_with_resource_name(resource_name)\n\n        if len(models) > 0:\n            return models[0]\n\n        return None", "docstring": "Get the first model corresponding to a resource_name\n\nArgs:\nresource_name: the resource name", "source": "juraj-google-style"}
{"code": "def config_init(config_file, json_config_obj, config_dirname=None):\n    \n    HOME = os.environ['HOME']\n    \n    if config_dirname:\n        dir_path = HOME + '/' + config_dirname\n        if not os.path.exists(dir_path):\n            os.mkdir(dir_path)\n            os.chmod(dir_path, 0o755)\n    else:\n        dir_path = HOME\n    \n    r = export_json_object(\n            dict_obj=json_config_obj,\n            filename=dir_path + '/' + config_file\n        )\n    return r", "docstring": "Summary:\nCreates local config from JSON seed template\nArgs:\n:config_file (str): filesystem object containing json dict of config values\n:json_config_obj (json):  data to be written to config_file\n:config_dirname (str):  dir name containing config_file\nReturns:\nTYPE: bool, Success | Failure", "source": "juraj-google-style"}
{"code": "def profile_df(df):\n  \n  \n  \n  return IPython.core.display.HTML(\n      pandas_profiling.ProfileReport(df).html.replace('bootstrap', 'nonexistent'))", "docstring": "Generate a profile of data in a dataframe.\n\nArgs:\ndf: the Pandas dataframe.", "source": "juraj-google-style"}
{"code": "class EncodecEncoderOutput(ModelOutput):\n    audio_codes: Optional[torch.LongTensor] = None\n    audio_scales: Optional[torch.FloatTensor] = None", "docstring": "Args:\naudio_codes (`torch.LongTensor`  of shape `(batch_size, nb_chunks, chunk_length)`, *optional*):\nDiscret code embeddings computed using `model.encode`.\naudio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*):\nScaling factor for each `audio_codes` input. This is used to unscale each chunk of audio when decoding.", "source": "github-repos"}
{"code": "def run_simulations(self, param_list, show_progress=True):\n    if (self.runner is None):\n        raise Exception('No runner was ever specified for this CampaignManager.')\n    if (param_list == []):\n        return\n    desired_params = self.db.get_params()\n    for p in param_list:\n        passed = list(p.keys())\n        available = (['RngRun'] + desired_params)\n        if (set(passed) != set(available)):\n            raise ValueError(('Specified parameter combination does not match the supported parameters:\\nPassed: %s\\nSupported: %s' % (sorted(passed), sorted(available))))\n    if self.check_repo:\n        self.check_repo_ok()\n    self.runner.configure_and_build(skip_configuration=True)\n    shuffle(param_list)\n    results = self.runner.run_simulations(param_list, self.db.get_data_dir())\n    if show_progress:\n        result_generator = tqdm(results, total=len(param_list), unit='simulation', desc='Running simulations')\n    else:\n        result_generator = results\n    for result in result_generator:\n        self.db.insert_result(result)", "docstring": "Run several simulations specified by a list of parameter combinations.\n\nNote: this function does not verify whether we already have the\nrequired simulations in the database - it just runs all the parameter\ncombinations that are specified in the list.\n\nArgs:\nparam_list (list): list of parameter combinations to execute.\nItems of this list are dictionaries, with one key for each\nparameter, and a value specifying the parameter value (which\ncan be either a string or a number).\nshow_progress (bool): whether or not to show a progress bar with\npercentage and expected remaining time.", "source": "codesearchnet"}
{"code": "def build_hlo_module(root: testlib_base.HloInstruction, *instructions: testlib_base.HloInstruction, extra_computations: Sequence[testlib_base.HloComputation] | None=None) -> tuple[testlib_base.HloModule, testlib_base.BufferAssignment]:\n    hlo_module = testlib_base.HloModule(root.name())\n    hlo_module.add_entry_computation(testlib_base.build_hlo_computation(root, *instructions))\n    if extra_computations is not None:\n        for computation in extra_computations:\n            hlo_module.add_computation(computation)\n    return annotate_hlo_module(hlo_module)", "docstring": "Builds an HLO module from a root instruction and its dependencies.\n\nArgs:\nroot: The root instruction of the module.\n*instructions: The instructions that are dependencies of the root\ninstruction.\nextra_computations: Any extra computations that should be added to the\nmodule.\n\nReturns:\nA tuple containing the HLO module and its buffer assignment.", "source": "github-repos"}
{"code": "def to_pil_image(image: Union[np.ndarray, 'PIL.Image.Image', 'torch.Tensor', 'tf.Tensor', 'jnp.ndarray'], do_rescale: Optional[bool]=None, image_mode: Optional[str]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> 'PIL.Image.Image':\n    requires_backends(to_pil_image, ['vision'])\n    if isinstance(image, PIL.Image.Image):\n        return image\n    if is_torch_tensor(image) or is_tf_tensor(image):\n        image = image.numpy()\n    elif is_jax_tensor(image):\n        image = np.array(image)\n    elif not isinstance(image, np.ndarray):\n        raise ValueError(f'Input image type not supported: {type(image)}')\n    image = to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format)\n    image = np.squeeze(image, axis=-1) if image.shape[-1] == 1 else image\n    do_rescale = _rescale_for_pil_conversion(image) if do_rescale is None else do_rescale\n    if do_rescale:\n        image = rescale(image, 255)\n    image = image.astype(np.uint8)\n    return PIL.Image.fromarray(image, mode=image_mode)", "docstring": "Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if\nneeded.\n\nArgs:\nimage (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor` or `tf.Tensor`):\nThe image to convert to the `PIL.Image` format.\ndo_rescale (`bool`, *optional*):\nWhether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will default\nto `True` if the image type is a floating type and casting to `int` would result in a loss of precision,\nand `False` otherwise.\nimage_mode (`str`, *optional*):\nThe mode to use for the PIL image. If unset, will use the default mode for the input image type.\ninput_data_format (`ChannelDimension`, *optional*):\nThe channel dimension format of the input image. If unset, will use the inferred format from the input.\n\nReturns:\n`PIL.Image.Image`: The converted image.", "source": "github-repos"}
{"code": "def decode(self, encoded):\n        \n        if self.enforce_reversible:\n            self.enforce_reversible = False\n            if self.encode(self.decode(encoded)) != encoded:\n                raise ValueError('Decoding is not reversible for \"%s\"' % encoded)\n            self.enforce_reversible = True\n\n        return encoded", "docstring": "Decodes an object.\n\nArgs:\nobject_ (object): Encoded object.\n\nReturns:\nobject: Object decoded.", "source": "juraj-google-style"}
{"code": "def _set_update(self):\n    try:\n        self._updateStack = False\n        stack_name = self._config.get('environment', {}).get('stack_name', None)\n        response = self._cloudFormation.describe_stacks(StackName=stack_name)\n        stack = response['Stacks'][0]\n        if (stack['StackStatus'] == 'ROLLBACK_COMPLETE'):\n            logging.info('stack is in ROLLBACK_COMPLETE status and should be deleted')\n            del_stack_resp = self._cloudFormation.delete_stack(StackName=stack_name)\n            logging.info('delete started for stack: {}'.format(stack_name))\n            logging.debug('delete_stack returned: {}'.format(json.dumps(del_stack_resp, indent=4)))\n            stack_delete = self.poll_stack()\n            if (not stack_delete):\n                return False\n        if (stack['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE']):\n            self._updateStack = True\n    except:\n        self._updateStack = False\n    logging.info(('update_stack: ' + str(self._updateStack)))\n    return True", "docstring": "Determine if we are creating a new stack or updating and existing one.\nThe update member is set as you would expect at the end of this query.\n\nArgs:\nNone\n\nReturns:\nTrue", "source": "codesearchnet"}
{"code": "def __init__(\n        self,\n        deconvolution_layer_list,\n        opt_params=None,\n        learning_rate=1e-05,\n        verbose_mode=False\n    ):\n        \n        for deconvolution_layer in deconvolution_layer_list:\n            if isinstance(deconvolution_layer, DeconvolutionLayer) is False:\n                raise TypeError()\n\n        if opt_params is None:\n            opt_params = Adam()\n            opt_params.dropout_rate = 0.0\n        \n        if isinstance(opt_params, OptParams) is False:\n            raise TypeError()\n\n        logger = getLogger(\"pydbm\")\n        handler = StreamHandler()\n        if verbose_mode is True:\n            handler.setLevel(DEBUG)\n            logger.setLevel(DEBUG)\n        else:\n            handler.setLevel(ERROR)\n            logger.setLevel(ERROR)\n\n        logger.addHandler(handler)\n\n        self.__deconvolution_layer_list = deconvolution_layer_list\n        self.__learning_rate = learning_rate\n        self.__attenuate_epoch = 50\n        self.__opt_params = opt_params\n        self.__logger = logger", "docstring": "Init.\n\nArgs:\ndeconvolution_layer_list:   `list` of `DeconvolutionLayer`.\nopt_params:                 is-a `OptParams`. If `None`, this value will be `Adam`.\nlearning_rate:              Learning rate.\nverbose_mode:               Verbose mode or not.", "source": "juraj-google-style"}
{"code": "def indices2nodes(self, indices):\n        \n        if set(indices) - set(self.node_indices):\n            raise ValueError(\n                \"`indices` must be a subset of the Subsystem's indices.\")\n        return tuple(self._index2node[n] for n in indices)", "docstring": "Return |Nodes| for these indices.\n\nArgs:\nindices (tuple[int]): The indices in question.\n\nReturns:\ntuple[Node]: The |Node| objects corresponding to these indices.\n\nRaises:\nValueError: If requested indices are not in the subsystem.", "source": "juraj-google-style"}
{"code": "def enable_tracing(self):\n    if (not self.connected):\n        raise HardwareError('Cannot enable tracing if we are not in a connected state')\n    if (self._traces is not None):\n        _clear_queue(self._traces)\n        return self._traces\n    self._traces = queue.Queue()\n    self._loop.run_coroutine(self.adapter.open_interface(0, 'tracing'))\n    return self._traces", "docstring": "Open the tracing interface and accumulate traces in a queue.\n\nThis method is safe to call multiple times in a single device\nconnection. There is no way to check if the tracing interface is\nopened or to close it once it is opened (apart from disconnecting from\nthe device).\n\nThe first time this method is called, it will open the tracing\ninterface and return a queue that will be filled asynchronously with\nreports as they are received.  Subsequent calls will just empty the\nqueue and return the same queue without interacting with the device at\nall.\n\nReturns:\nqueue.Queue: A queue that will be filled with trace data from the device.\n\nThe trace data will be in disjoint bytes objects in the queue", "source": "codesearchnet"}
{"code": "def load_local(self, state, name):\n    var = self.block_env.get_local(self.frame.current_block, name)\n    if self.ctx.options.strict_undefined_checks and self.ctx.python_version >= (3, 10) and (not var):\n        raise KeyError()\n    return self.load_from(state, self.frame.f_locals, name)", "docstring": "Called when a local is loaded onto the stack.\n\nUses the name to retrieve the value from the current locals().\n\nArgs:\nstate: The current VM state.\nname: Name of the local\n\nReturns:\nA tuple of the state and the value (cfg.Variable)\n\nRaises:\nKeyError: If the name is determined to be undefined", "source": "github-repos"}
{"code": "def AddDescriptor(self, desc):\n    if (not isinstance(desc, descriptor.Descriptor)):\n        raise TypeError('Expected instance of descriptor.Descriptor.')\n    self._descriptors[desc.full_name] = desc\n    self._AddFileDescriptor(desc.file)", "docstring": "Adds a Descriptor to the pool, non-recursively.\n\nIf the Descriptor contains nested messages or enums, the caller must\nexplicitly register them. This method also registers the FileDescriptor\nassociated with the message.\n\nArgs:\ndesc: A Descriptor.", "source": "codesearchnet"}
{"code": "def vgg_layer(inputs, nout, kernel_size=3, activation=tf.nn.leaky_relu, padding='SAME', is_training=True, has_batchnorm=False, scope=None):\n    with tf.variable_scope(scope):\n        net = tfl.conv2d(inputs, nout, kernel_size=kernel_size, padding=padding, activation=None, name='conv')\n        if has_batchnorm:\n            net = tfl.batch_normalization(net, training=is_training, name='bn')\n        net = activation(net)\n    return net", "docstring": "A layer of VGG network with batch norm.\n\nArgs:\ninputs: image tensor\nnout: number of output channels\nkernel_size: size of the kernel\nactivation: activation function\npadding: padding of the image\nis_training: whether it is training mode or not\nhas_batchnorm: whether batchnorm is applied or not\nscope: variable scope of the op\nReturns:\nnet: output of layer", "source": "codesearchnet"}
{"code": "def put_archive(self, path, data):\n        \n        return self.client.api.put_archive(self.id, path, data)", "docstring": "Insert a file or folder in this container using a tar archive as\nsource.\n\nArgs:\npath (str): Path inside the container where the file(s) will be\nextracted. Must exist.\ndata (bytes): tar data to be extracted\n\nReturns:\n(bool): True if the call succeeds.\n\nRaises:\n:py:class:`~docker.errors.APIError` If an error occurs.", "source": "juraj-google-style"}
{"code": "def _set_dacl_inheritance(path, objectType, inheritance=True, copy=True, clear=False):\n    \n    ret = {'result': False,\n           'comment': '',\n           'changes': {}}\n\n    if path:\n        try:\n            sd = win32security.GetNamedSecurityInfo(path, objectType, win32security.DACL_SECURITY_INFORMATION)\n            tdacl = sd.GetSecurityDescriptorDacl()\n            if inheritance:\n                if clear:\n                    counter = 0\n                    removedAces = []\n                    while counter < tdacl.GetAceCount():\n                        tAce = tdacl.GetAce(counter)\n                        if (tAce[0][1] & win32security.INHERITED_ACE) != win32security.INHERITED_ACE:\n                            tdacl.DeleteAce(counter)\n                            removedAces.append(_ace_to_text(tAce, objectType))\n                        else:\n                            counter = counter + 1\n                    if removedAces:\n                        ret['changes']['Removed ACEs'] = removedAces\n                else:\n                    ret['changes']['Non-Inherited ACEs'] = 'Left in the DACL'\n                win32security.SetNamedSecurityInfo(\n                    path, objectType,\n                    win32security.DACL_SECURITY_INFORMATION | win32security.UNPROTECTED_DACL_SECURITY_INFORMATION,\n                    None, None, tdacl, None)\n                ret['changes']['Inheritance'] = 'Enabled'\n            else:\n                if not copy:\n                    counter = 0\n                    inheritedAcesRemoved = []\n                    while counter < tdacl.GetAceCount():\n                        tAce = tdacl.GetAce(counter)\n                        if (tAce[0][1] & win32security.INHERITED_ACE) == win32security.INHERITED_ACE:\n                            tdacl.DeleteAce(counter)\n                            inheritedAcesRemoved.append(_ace_to_text(tAce, objectType))\n                        else:\n                            counter = counter + 1\n                    if inheritedAcesRemoved:\n                        ret['changes']['Removed ACEs'] = inheritedAcesRemoved\n                else:\n                    ret['changes']['Previously Inherited ACEs'] = 'Copied to the DACL'\n                win32security.SetNamedSecurityInfo(\n                    path, objectType,\n                    win32security.DACL_SECURITY_INFORMATION | win32security.PROTECTED_DACL_SECURITY_INFORMATION,\n                    None, None, tdacl, None)\n                ret['changes']['Inheritance'] = 'Disabled'\n            ret['result'] = True\n        except Exception as e:\n            ret['result'] = False\n            ret['comment'] = 'Error attempting to set the inheritance.  The error was {0}.'.format(e)\n\n    return ret", "docstring": "helper function to set the inheritance\nArgs:\n\npath (str): The path to the object\n\nobjectType (str): The type of object\n\ninheritance (bool): True enables inheritance, False disables\n\ncopy (bool): Copy inherited ACEs to the DACL before disabling\ninheritance\n\nclear (bool): Remove non-inherited ACEs from the DACL", "source": "juraj-google-style"}
{"code": "def get_index(fn, cols, names, sep):\n    if (not has_index(fn)):\n        return generate_index(fn, cols, names, sep)\n    file_index = read_index(get_index_fn(fn))\n    if (len((set(names) - (set(file_index.columns) - {'seek'}))) != 0):\n        raise ValueError('{}: missing index columns: reindex'.format(fn))\n    if ('seek' not in file_index.columns):\n        raise ValueError('{}: invalid index: reindex'.format(fn))\n    return file_index", "docstring": "Restores the index for a given file.\n\nArgs:\nfn (str): the name of the file.\ncols (list): a list containing column to keep (as int).\nnames (list): the name corresponding to the column to keep (as str).\nsep (str): the field separator.\n\nReturns:\npandas.DataFrame: the index.\n\nIf the index doesn't exist for the file, it is first created.", "source": "codesearchnet"}
{"code": "def deploy(target):\n    if (not os.getenv(CIRCLECI_ENV_VAR)):\n        raise EnvironmentError('Must be on CircleCI to run this script')\n    current_branch = os.getenv('CIRCLE_BRANCH')\n    if ((target == 'PROD') and (current_branch != 'master')):\n        raise EnvironmentError('Refusing to deploy to production from branch {current_branch!r}. Production deploys can only be made from master.'.format(current_branch=current_branch))\n    if (target in ('PROD', 'TEST')):\n        pypi_username = os.getenv('{target}_PYPI_USERNAME'.format(target=target))\n        pypi_password = os.getenv('{target}_PYPI_PASSWORD'.format(target=target))\n    else:\n        raise ValueError(\"Deploy target must be 'PROD' or 'TEST', got {target!r}.\".format(target=target))\n    if (not (pypi_username and pypi_password)):\n        raise EnvironmentError(\"Missing '{target}_PYPI_USERNAME' and/or '{target}_PYPI_PASSWORD' environment variables. These are required to push to PyPI.\".format(target=target))\n    os.environ['TWINE_USERNAME'] = pypi_username\n    os.environ['TWINE_PASSWORD'] = pypi_password\n    _shell('git config --global user.email \"oss@cloverhealth.com\"')\n    _shell('git config --global user.name \"Circle CI\"')\n    _shell('git config push.default current')\n    ret = _shell('make version', stdout=subprocess.PIPE)\n    version = ret.stdout.decode('utf-8').strip()\n    print('Deploying version {version!r}...'.format(version=version))\n    _shell('git tag -f -a {version} -m \"Version {version}\"'.format(version=version))\n    _shell('sed -i.bak \"s/^__version__ = .*/__version__ = {version!r}/\" */version.py'.format(version=version))\n    _shell('python setup.py sdist bdist_wheel')\n    _shell('git add ChangeLog AUTHORS */version.py')\n    _shell('git commit --no-verify -m \"Merge autogenerated files [skip ci]\"')\n    _pypi_push('dist')\n    _shell('git push --follow-tags')\n    print('Deployment complete. Latest version is {version}.'.format(version=version))", "docstring": "Deploys the package and documentation.\n\nProceeds in the following steps:\n\n1. Ensures proper environment variables are set and checks that we are on Circle CI\n2. Tags the repository with the new version\n3. Creates a standard distribution and a wheel\n4. Updates version.py to have the proper version\n5. Commits the ChangeLog, AUTHORS, and version.py file\n6. Pushes to PyPI\n7. Pushes the tags and newly committed files\n\nRaises:\n`EnvironmentError`:\n- Not running on CircleCI\n- `*_PYPI_USERNAME` and/or `*_PYPI_PASSWORD` environment variables\nare missing\n- Attempting to deploy to production from a branch that isn't master", "source": "codesearchnet"}
{"code": "def __init__(self, date=None, year=None, season=None, day_of_season=None,\n                 *args, **kwargs):\n        \n\n        if year is not None and season is not None and \\\n           day_of_season is not None:\n            date = (datetime.datetime(year=year - 1166, month=1, day=1) +\n                    datetime.timedelta(days=(season * 73) + day_of_season - 1))\n        elif date is None or not hasattr(date, \"timetuple\"):\n            date = datetime.date.today()\n        self.date = date\n\n        time_tuple = self.date.timetuple()\n\n        \n        year = time_tuple.tm_year\n        self.year = year + 1166  \n\n        day_of_year = time_tuple.tm_yday - 1  \n        if is_leap_year(year) and day_of_year > 59:\n            day_of_year -= 1  \n\n        self.day_of_week = day_of_year % 5\n        self.day_of_season = day_of_year % 73 + 1  \n        self.season = int(day_of_year / 73)\n\n        if is_leap_year(year) and time_tuple.tm_yday == 60:\n            self.holiday = \"St. Tib's Day\"\n            self.day_of_week = None\n            self.day_of_season = None\n            self.season = None\n        elif self.day_of_season == 5:\n            self.holiday = self.HOLIDAYS[\"apostle\"][self.season]\n        elif self.day_of_season == 50:\n            self.holiday = self.HOLIDAYS[\"seasonal\"][self.season]\n        else:\n            self.holiday = None\n\n        super(DDate, self).__init__(*args, **kwargs)", "docstring": "Discordian date setup and mangling.\n\nNote: year, season and day_of_season are all required if any are used\n\nArgs:\ndate: optional date object with a timetuple method, or uses today\nyear: optional integer discordian year to create from\nseason: optional integer discodian season to create from\nday_of_season: optional int discordian day of season to create from", "source": "juraj-google-style"}
{"code": "def get_geno_marker(self, marker, return_index=False):\n    if (self._mode != 'r'):\n        raise UnsupportedOperation(\"not available in 'w' mode\")\n    if (marker not in self._bim.index):\n        raise ValueError('{}: marker not in BIM'.format(marker))\n    seek_index = self._bim.loc[(marker, 'i')]\n    self.seek(seek_index)\n    if return_index:\n        return (self._read_current_marker(), seek_index)\n    return self._read_current_marker()", "docstring": "Gets the genotypes for a given marker.\n\nArgs:\nmarker (str): The name of the marker.\nreturn_index (bool): Wether to return the marker's index or not.\n\nReturns:\nnumpy.ndarray: The genotypes of the marker (additive format).", "source": "codesearchnet"}
{"code": "def make_directory_writable(dirname):\n    retval = shell_call(['docker', 'run', '-v', '{0}:/output_dir'.format(dirname), 'busybox:1.27.2', 'chmod', '-R', 'a+rwx', '/output_dir'])\n    if (not retval):\n        logging.error('Failed to change permissions on directory: %s', dirname)\n    return retval", "docstring": "Makes directory readable and writable by everybody.\n\nArgs:\ndirname: name of the directory\n\nReturns:\nTrue if operation was successfull\n\nIf you run something inside Docker container and it writes files, then\nthese files will be written as root user with restricted permissions.\nSo to be able to read/modify these files outside of Docker you have to change\npermissions to be world readable and writable.", "source": "codesearchnet"}
{"code": "def bind_sockets(address, port):\n    \n    ss = netutil.bind_sockets(port=port or 0, address=address)\n    assert len(ss)\n    ports = {s.getsockname()[1] for s in ss}\n    assert len(ports) == 1, \"Multiple ports assigned??\"\n    actual_port = ports.pop()\n    if port:\n        assert actual_port == port\n    return ss, actual_port", "docstring": "Bind a socket to a port on an address.\n\nArgs:\naddress (str) :\nAn address to bind a port on, e.g. ``\"localhost\"``\n\nport (int) :\nA port number to bind.\n\nPass 0 to have the OS automatically choose a free port.\n\nThis function returns a 2-tuple with the new socket as the first element,\nand the port that was bound as the second. (Useful when passing 0 as a port\nnumber to bind any free port.)\n\nReturns:\n(socket, port)", "source": "juraj-google-style"}
{"code": "def getitem_row_array(self, key):\n        \n        \n        key = list(key)\n\n        def getitem(df, internal_indices=[]):\n            return df.iloc[internal_indices]\n\n        result = self.data.apply_func_to_select_indices(\n            1, getitem, key, keep_remaining=False\n        )\n        \n        \n        new_index = self.index[key]\n        return self.__constructor__(result, new_index, self.columns, self._dtype_cache)", "docstring": "Get row data for target labels.\n\nArgs:\nkey: Target numeric indices by which to retrieve data.\n\nReturns:\nA new QueryCompiler.", "source": "juraj-google-style"}
{"code": "def load_file(file_path, credentials=None):\n  \n  if file_path.startswith('gs:\n    return _load_file_from_gcs(file_path, credentials)\n  else:\n    return open(file_path, 'r')", "docstring": "Load a file from either local or gcs.\n\nArgs:\nfile_path: The target file path, which should have the prefix 'gs://' if\nto be loaded from gcs.\ncredentials: Optional credential to be used to load the file from gcs.\n\nReturns:\nA python File object if loading file from local or a StringIO object if\nloading from gcs.", "source": "juraj-google-style"}
{"code": "def format_filter_value(self, element, value):\n        \n        format_func = self.allowed_filter.get(element)\n        return format_func(value)", "docstring": "Calls the specific function to format value,\ndepending on the given element.\n\nArguments:\nelement (string): The element of the VT to be formatted.\nvalue (dictionary): The element value.\n\nReturns:\nReturns a formatted value.", "source": "juraj-google-style"}
{"code": "def loads(s, single=False):\n    \n    es = deserialize(s)\n    if single:\n        return next(es)\n    return es", "docstring": "Deserialize :class:`Eds` string representations\n\nArgs:\ns (str): Eds string\nsingle (bool): if `True`, only return the first Xmrs object\nReturns:\na generator of :class:`Eds` objects (unless the *single* option\nis `True`)", "source": "juraj-google-style"}
{"code": "def del_node(self, node):\n        \n        for node_ in self.values():\n            if node in node_:\n                node_.pop(node)\n        return bool(self.pop(node))", "docstring": "Removes a **node object** from the ``DictGraph``. Returns ``True`` if a\n**node object** has been removed. If the **node object** is not in the\n``DictGraph`` raises a ``KeyError``.\n\nArguments:\n\n- node(``object``) **node object** to be removed. Any hashable Python\n``object``.", "source": "juraj-google-style"}
{"code": "def _write(self, file_prefix, options=None):\n    if options and options.experimental_enable_async_checkpoint:\n        self._checkpoint_options = options\n        if checkpoint_context.in_preemption_save_context():\n            if self._async_checkpointer_impl is not None:\n                self._async_checkpointer_impl.sync()\n            logging.warning('Switching to regular sync checkpoint for preemption checkpoint.')\n        elif context.executing_eagerly():\n            return self._async_checkpointer()._write(file_prefix, options)\n        else:\n            logging.warning('Saving async checkpoint in graph mode is currently not supported; switching to regular sync checkpoint instead.')\n    start_time = time.time()\n    options = options or checkpoint_options.CheckpointOptions()\n    output = self._saver.save(file_prefix=file_prefix, options=options)\n    output = _convert_file_name_tensor_to_string(output)\n    if options.experimental_write_callbacks:\n        _execute_callbacks(options.experimental_write_callbacks, output)\n    if context.executing_eagerly():\n        context.async_wait()\n    end_time = time.time()\n    if not checkpoint_context.in_async_metrics_context():\n        metrics.AddCheckpointWriteDuration(api_label=_CHECKPOINT_V2, microseconds=_get_duration_microseconds(start_time, end_time))\n    global _END_TIME_OF_LAST_WRITE\n    with _END_TIME_OF_LAST_WRITE_LOCK:\n        if not checkpoint_context.in_async_metrics_context():\n            metrics.AddTrainingTimeSaved(api_label=_CHECKPOINT_V2, microseconds=_get_duration_microseconds(_END_TIME_OF_LAST_WRITE, end_time))\n        if checkpoint_context.in_preemption_save_context():\n            _preemption_checkpoint_saved_time_usecs.get_cell().increase_by(_get_duration_microseconds(_END_TIME_OF_LAST_WRITE, end_time))\n        _END_TIME_OF_LAST_WRITE = end_time\n    metrics.RecordCheckpointSize(api_label=_CHECKPOINT_V2, filesize=_get_checkpoint_size(output))\n    return output", "docstring": "Internal method that implements Checkpoint.write().\n\nArgs:\nfile_prefix: A prefix to use for the checkpoint filenames\n(/path/to/directory/and_a_prefix).\noptions: Optional `tf.train.CheckpointOptions` object.\n\nReturns:\nThe full path to the checkpoint (i.e. `file_prefix`).", "source": "github-repos"}
{"code": "def _query(cls, *args, **kwds):\n    \n    \n    if 'distinct' in kwds:\n      if 'group_by' in kwds:\n        raise TypeError(\n            'cannot use distinct= and group_by= at the same time')\n      projection = kwds.get('projection')\n      if not projection:\n        raise TypeError(\n            'cannot use distinct= without projection=')\n      if kwds.pop('distinct'):\n        kwds['group_by'] = projection\n\n    \n    from .query import Query  \n    qry = Query(kind=cls._get_kind(), **kwds)\n    qry = qry.filter(*cls._default_filters())\n    qry = qry.filter(*args)\n    return qry", "docstring": "Create a Query object for this class.\n\nArgs:\ndistinct: Optional bool, short hand for group_by = projection.\n*args: Used to apply an initial filter\n**kwds: are passed to the Query() constructor.\n\nReturns:\nA Query object.", "source": "juraj-google-style"}
{"code": "def ShlexSplit(string):\n    precondition.AssertType(string, Text)\n    if PY2:\n        string = string.encode('utf-8')\n    parts = shlex.split(string)\n    if PY2:\n        parts = [part.decode('utf-8') for part in parts]\n    return parts", "docstring": "A wrapper for `shlex.split` that works with unicode objects.\n\nArgs:\nstring: A unicode string to split.\n\nReturns:\nA list of unicode strings representing parts of the input string.", "source": "codesearchnet"}
{"code": "def __init__(self, autoconnect=True, password=None, db=0,\n                 **connection_kwargs):\n        \n        if 'read_callback' in connection_kwargs or \\\n                'close_callback' in connection_kwargs:\n            raise Exception(\"read_callback and close_callback are not allowed \"\n                            \"to be used here.\")\n        self.connection_kwargs = connection_kwargs\n        self.autoconnect = autoconnect\n        self.password = password\n        self.db = db\n        self.__connection = None\n        self.subscribed = False\n        self.__connection = None\n        self.__reader = None\n        \n        self.__callback_queue = None\n        \n        self._condition = tornado.locks.Condition()\n        self._reply_list = None", "docstring": "Constructor.\n\nArgs:\nautoconnect (boolean): True if the client is in autoconnect mode\n(and in autoreconnection mode) (default True).\npassword (string): the password to authenticate with.\ndb (int): database number.\n**connection_kwargs: :class:`Connection` object kwargs.", "source": "juraj-google-style"}
{"code": "def unzip_file(source_file, dest_dir=None, mkdir=False):\n    if (dest_dir is None):\n        (dest_dir, fname) = os.path.split(source_file)\n    elif (not os.path.isdir(dest_dir)):\n        if mkdir:\n            preparedir(dest_dir)\n        else:\n            created = preparedir(dest_dir, False)\n            if (not created):\n                raise ValueError(('Failed to find %s.' % dest_dir))\n    with zipfile.ZipFile(source_file) as zf:\n        for member in zf.infolist():\n            words = member.filename.split('\\\\')\n            for word in words[:(- 1)]:\n                (drive, word) = os.path.splitdrive(word)\n                (head, word) = os.path.split(word)\n                if (word in (os.curdir, os.pardir, '')):\n                    continue\n                dest_dir = os.path.join(dest_dir, word)\n            zf.extract(member, dest_dir)", "docstring": "Unzip a compressed file.\n\nArgs:\nsource_file: Full path to a valid compressed file (e.g. c:/ladybug/testPts.zip)\ndest_dir: Target folder to extract to (e.g. c:/ladybug).\nDefault is set to the same directory as the source file.\nmkdir: Set to True to create the directory if doesn't exist (Default: False)", "source": "codesearchnet"}
{"code": "def beautify(self, string):\n\t\t\n\n\t\tif not string:\n\t\t\treturn string\n\n\t\t\n\t\tstring, phrases = self.parse(string)\n\n\t\tif not phrases:\n\t\t\treturn string\n\n\t\tif not self.positional and not self.always:\n\t\t\traise errors.ArgumentError(\"Found phrases, but no styles \"\n\t\t\t\t\t\t\t\t\t   \"were supplied!\")\n\n\t\treturn self.stringify(string, phrases)", "docstring": "Wraps together all actions needed to beautify a string, i.e.\nparse the string and then stringify the phrases (replace tags\nwith formatting codes).\n\nArguments:\nstring (str): The string to beautify/parse.\n\nReturns:\nThe parsed, stringified and ultimately beautified string.\n\nRaises:\nerrors.ArgumentError if phrases were found, but not a single style\n(flag combination) was supplied.", "source": "juraj-google-style"}
{"code": "def Validate(self, sections=None, parameters=None):\n    if isinstance(sections, string_types):\n        sections = [sections]\n    if (sections is None):\n        sections = []\n    if (parameters is None):\n        parameters = []\n    validation_errors = {}\n    for section in sections:\n        for descriptor in self.type_infos:\n            if descriptor.name.startswith((section + '.')):\n                try:\n                    self.Get(descriptor.name)\n                except (Error, ValueError) as e:\n                    validation_errors[descriptor.name] = e\n    for parameter in parameters:\n        for descriptor in self.type_infos:\n            if (parameter == descriptor.name):\n                try:\n                    self.Get(descriptor.name)\n                except (Error, ValueError) as e:\n                    validation_errors[descriptor.name] = e\n    return validation_errors", "docstring": "Validate sections or individual parameters.\n\nThe GRR configuration file contains several sections, used by different\ncomponents. Many of these components don't care about other sections. This\nmethod allows a component to declare in advance what sections and parameters\nit cares about, and have these validated.\n\nArgs:\nsections: A list of sections to validate. All parameters within the\nsection are validated.\nparameters: A list of specific parameters (in the format section.name) to\nvalidate.\n\nReturns:\ndict of {parameter: Exception}, where parameter is a section.name string.", "source": "codesearchnet"}
{"code": "def _generate_latex_source(circuit, filename=None, scale=0.7, style=None, reverse_bits=False, plot_barriers=True, justify=None):\n    (qregs, cregs, ops) = utils._get_layered_instructions(circuit, reverse_bits=reverse_bits, justify=justify)\n    qcimg = _latex.QCircuitImage(qregs, cregs, ops, scale, style=style, plot_barriers=plot_barriers, reverse_bits=reverse_bits)\n    latex = qcimg.latex()\n    if filename:\n        with open(filename, 'w') as latex_file:\n            latex_file.write(latex)\n    return latex", "docstring": "Convert QuantumCircuit to LaTeX string.\n\nArgs:\ncircuit (QuantumCircuit): input circuit\nscale (float): image scaling\nfilename (str): optional filename to write latex\nstyle (dict or str): dictionary of style or file name of style file\nreverse_bits (bool): When set to True reverse the bit order inside\nregisters for the output visualization.\nplot_barriers (bool): Enable/disable drawing barriers in the output\ncircuit. Defaults to True.\njustify (str) : `left`, `right` or `none`. Defaults to `left`. Says how\nthe circuit should be justified.\n\nReturns:\nstr: Latex string appropriate for writing to file.", "source": "codesearchnet"}
{"code": "def generate_stack_policy_args(stack_policy=None):\n    \n\n    args = {}\n    if stack_policy:\n        logger.debug(\"Stack has a stack policy\")\n        if stack_policy.url:\n            \n            \n            \n            \n            \n            \n            raise NotImplementedError\n        else:\n            args[\"StackPolicyBody\"] = stack_policy.body\n    return args", "docstring": "Converts a stack policy object into keyword args.\n\nArgs:\nstack_policy (:class:`stacker.providers.base.Template`): A template\nobject representing a stack policy.\n\nReturns:\ndict: A dictionary of keyword arguments to be used elsewhere.", "source": "juraj-google-style"}
{"code": "def _find_channel_index(data_format):\n    for (i, c) in enumerate(data_format):\n        if (c == 'C'):\n            return i\n    raise ValueError('data_format requires a channel dimension. Got: {}'.format(data_format))", "docstring": "Returns the index of the channel dimension.\n\nArgs:\ndata_format: A string of characters corresponding to Tensor dimensionality.\n\nReturns:\nchannel_index: An integer indicating the channel dimension.\n\nRaises:\nValueError: If no channel dimension was found.", "source": "codesearchnet"}
{"code": "def format(self, *args, **kwargs):\n        \n        inplace = kwargs.pop(\"inplace\", False)\n        if not inplace:\n            return str(self).format(*args, **kwargs)\n        self._lines = str(self).format(*args, **kwargs).splitlines()", "docstring": "Format the string representation of the editor.\n\nArgs:\ninplace (bool): If True, overwrite editor's contents with formatted contents", "source": "juraj-google-style"}
{"code": "def get_snmp_configuration(self):\n    uri = '{}{}'.format(self.data['uri'], self.SNMP_CONFIGURATION_PATH)\n    return self._helper.do_get(uri)", "docstring": "Gets the SNMP configuration for a logical interconnect.\n\nReturns:\ndict: SNMP configuration.", "source": "codesearchnet"}
{"code": "def noisy_moment(self, moment: 'cirq.Moment', system_qubits: Sequence['cirq.Qid']) -> 'cirq.OP_TREE':\n    if (not hasattr(self.noisy_moments, '_not_overridden')):\n        return self.noisy_moments([moment], system_qubits)\n    if (not hasattr(self.noisy_operation, '_not_overridden')):\n        return [self.noisy_operation(op) for op in moment]\n    assert False, 'Should be unreachable.'", "docstring": "Adds noise to the operations from a moment.\n\nArgs:\nmoment: The moment to add noise to.\nsystem_qubits: A list of all qubits in the system.\n\nReturns:\nAn OP_TREE corresponding to the noisy operations for the moment.", "source": "codesearchnet"}
{"code": "def assert_no_new_tensors(f: _F) -> _F:\n\n    def decorator(self: 'TensorFlowTestCase', **kwargs):\n        \n\n        def _is_tensorflow_object(obj) -> bool:\n            try:\n                return isinstance(obj, (tensor_lib.Tensor, variables.Variable, tensor_shape.Dimension, tensor_shape.TensorShape))\n            except (ReferenceError, AttributeError):\n                return False\n        tensors_before = set((id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj)))\n        outside_executed_eagerly = cast(bool, context.executing_eagerly())\n        outside_graph_key = ops.get_default_graph()._graph_key\n        with ops.Graph().as_default():\n            ops.get_default_graph()._graph_key = outside_graph_key\n            if outside_executed_eagerly:\n                with context.eager_mode():\n                    result = f(self, **kwargs)\n            else:\n                result = f(self, **kwargs)\n        context.context()._clear_caches()\n        gc.collect()\n        tensors_after = [obj for obj in gc.get_objects() if _is_tensorflow_object(obj) and id(obj) not in tensors_before]\n        if tensors_after:\n            raise AssertionError('%d Tensors not deallocated after test: %s' % (len(tensors_after), str(tensors_after)))\n        return result\n    return tf_decorator.make_decorator(f, decorator)", "docstring": "Decorator for asserting that no new Tensors persist after a test.\n\nMainly useful for checking that code using the Python C API has correctly\nmanipulated reference counts.\n\nClears the caches that it knows about, runs the garbage collector, then checks\nthat there are no Tensor or Tensor-like objects still around. This includes\nTensors to which something still has a reference (e.g. from missing\nPy_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one\nof the objects has __del__ defined).\n\nArgs:\nf: The test case to run.\n\nReturns:\nThe decorated test case.", "source": "github-repos"}
{"code": "def headers_present(self, headers):\n        \n        headers = {name: re.compile('(.*)') for name in headers}\n        self.add_matcher(matcher('HeadersMatcher', headers))", "docstring": "Defines a list of headers that must be present in the\noutgoing request in order to satisfy the matcher, no matter what value\nthe headers hosts.\n\nHeader keys are case insensitive.\n\nArguments:\nheaders (list|tuple): header keys to match.\n\nReturns:\nself: current Mock instance.\n\nExample::\n\n(pook.get('server.com/api')\n.headers_present(['content-type', 'Authorization']))", "source": "juraj-google-style"}
{"code": "def create_row_token_type_ids_from_sequences(self, query_ids: List[int], table_values: List[TableValue]) -> List[int]:\n    table_row_ids = list(zip(*table_values))[2] if table_values else []\n    return [0] * (1 + len(query_ids) + 1) + list(table_row_ids)", "docstring": "Creates the row token type IDs according to the query token IDs and a list of table values.\n\nArgs:\nquery_ids (`List[int]`): list of token IDs corresponding to the ID.\ntable_values (`List[TableValue]`): lift of table values, which are named tuples containing the\ntoken value, the column ID and the row ID of said token.\n\nReturns:\n`List[int]`: List of ints containing the row token type IDs values.", "source": "github-repos"}
{"code": "def from_string(string_data, file_format=\"xyz\"):\n        \n        mols = pb.readstring(str(file_format), str(string_data))\n        return BabelMolAdaptor(mols.OBMol)", "docstring": "Uses OpenBabel to read a molecule from a string in all supported\nformats.\n\nArgs:\nstring_data: String containing molecule data.\nfile_format: String specifying any OpenBabel supported formats.\n\nReturns:\nBabelMolAdaptor object", "source": "juraj-google-style"}
{"code": "def detect_builtin_shadowing_definitions(self, contract):\n    result = []\n    for function in contract.functions:\n        if (function.contract == contract):\n            if self.is_builtin_symbol(function.name):\n                result.append((self.SHADOWING_FUNCTION, function, None))\n            result += self.detect_builtin_shadowing_locals(function)\n    for modifier in contract.modifiers:\n        if (modifier.contract == contract):\n            if self.is_builtin_symbol(modifier.name):\n                result.append((self.SHADOWING_MODIFIER, modifier, None))\n            result += self.detect_builtin_shadowing_locals(modifier)\n    for variable in contract.variables:\n        if (variable.contract == contract):\n            if self.is_builtin_symbol(variable.name):\n                result.append((self.SHADOWING_STATE_VARIABLE, variable, None))\n    for event in contract.events:\n        if (event.contract == contract):\n            if self.is_builtin_symbol(event.name):\n                result.append((self.SHADOWING_EVENT, event, None))\n    return result", "docstring": "Detects if functions, access modifiers, events, state variables, or local variables are named after built-in\nsymbols. Any such definitions are returned in a list.\n\nReturns:\nlist of tuple: (type, definition, [local variable parent])", "source": "codesearchnet"}
{"code": "def _fit(self, col):\n        \n\n        column = col[self.col_name].replace({np.nan: np.inf})\n        frequencies = column.groupby(column).count().rename({np.inf: None}).to_dict()\n        \n        start = 0\n        end = 0\n        num_vals = len(col)\n\n        for val in frequencies:\n            prob = frequencies[val] / num_vals\n            end = start + prob\n            interval = (start, end)\n            mean = np.mean(interval)\n            std = prob / 6\n            self.probability_map[val] = (interval, mean, std)\n            start = end", "docstring": "Create a map of the empirical probability for each category.\n\nArgs:\ncol(pandas.DataFrame): Data to transform.", "source": "juraj-google-style"}
{"code": "def multi(self, **kwargs):\n        \n        path = self._get_path('multi')\n\n        response = self._GET(path, kwargs)\n        self._set_attrs_to_values(response)\n        return response", "docstring": "Search the movie, tv show and person collections with a single query.\n\nArgs:\nquery: CGI escpaed string.\npage: (optional) Minimum value of 1. Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\ninclude_adult: (optional) Toggle the inclusion of adult titles.\nExpected value is True or False.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"}
{"code": "def conv(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:\n    scale = [1.0] * self.out_channel_size\n    offset = [0.5] * self.out_channel_size\n    mean, variance = (scale, offset)\n    out = nn_ops.conv2d(input_tensor, self.filters, strides=strides, dilations=dilations, padding=padding, data_format='NHWC')\n    if has_bias:\n        out = nn_ops.bias_add(out, self.bias, data_format='NHWC')\n    if has_batch_norm:\n        out, _, _, _, _, _ = nn_ops.fused_batch_norm_v3(out, scale, offset, mean, variance, is_training=False)\n    if activation_fn is not None:\n        out = activation_fn(out)\n    return {'output': out}", "docstring": "Performs a 2D convolution operation.\n\nArgs:\ninput_tensor: Input tensor to perform convolution on.\n\nReturns:\nA map of: output key -> output result.", "source": "github-repos"}
{"code": "def getWhoisInfo(domain):\n    new = []\n    try:\n        emails = {}\n        emails['type'] = 'i3visio.alias'\n        emails['value'] = str(domain.split('.')[0])\n        emails['attributes'] = []\n        new.append(emails)\n    except:\n        pass\n    info = whois.whois(domain)\n    if (info.status == None):\n        raise Exception((('UnknownDomainError: ' + domain) + ' could not be resolved.'))\n    try:\n        emails = {}\n        emails['type'] = 'i3visio.email'\n        if (type(info.emails) is not list):\n            aux = [info.emails]\n            emails['value'] = json.dumps(aux)\n        else:\n            emails['value'] = json.dumps(info.emails)\n        emails['attributes'] = []\n        new.append(emails)\n    except:\n        pass\n    try:\n        tmp = {}\n        tmp['type'] = 'i3visio.location.country'\n        tmp['value'] = str(info.country)\n        tmp['attributes'] = []\n        new.append(tmp)\n    except:\n        pass\n    try:\n        tmp = {}\n        tmp['type'] = 'i3visio.registrar'\n        tmp['value'] = str(info.registrar)\n        tmp['attributes'] = []\n        new.append(tmp)\n    except:\n        pass\n    try:\n        tmp = {}\n        tmp['type'] = 'i3visio.fullname'\n        try:\n            tmp['value'] = str(info.name)\n        except:\n            tmp['value'] = info.name\n        tmp['attributes'] = []\n        new.append(tmp)\n    except:\n        pass\n    return new", "docstring": "Method that trie to recover the whois info from a domain.\n\nArgs:\n-----\ndomain: The domain to verify.\n\nReturns:\n--------\ndict: A dictionary containing the result as an i3visio entity with its\n`value`, `type` and `attributes`.", "source": "codesearchnet"}
{"code": "def func(self, w, *args):\n        \n        x0 = args[0]\n        x1 = args[1]\n\n        n0 = x0.shape[0]\n        n1 = x1.shape[0]\n\n        \n        n = max(n0, n1) * 10\n        idx0 = np.random.choice(range(n0), size=n)\n        idx1 = np.random.choice(range(n1), size=n)\n\n        \n        b0 = np.ones((n0, 1))\n        b1 = np.ones((n1, 1))\n        i1 = self.i + 1\n        h = self.h\n        h1 = h + 1\n\n        \n        \n        if sparse.issparse(x0):\n            p0 = np.hstack((sigm(sparse.hstack((x0, b0)).dot(w[:-h1].reshape(\n                               i1, h))), b0)).dot(w[-h1:].reshape(h1, 1))\n            p1 = np.hstack((sigm(sparse.hstack((x1, b1)).dot(w[:-h1].reshape(\n                               i1, h))), b1)).dot(w[-h1:].reshape(h1, 1))\n        else:\n            p0 = np.hstack((sigm(np.hstack((x0, b0)).dot(w[:-h1].reshape(\n                               i1, h))), b0)).dot(w[-h1:].reshape(h1, 1))\n            p1 = np.hstack((sigm(np.hstack((x1, b1)).dot(w[:-h1].reshape(\n                               i1, h))), b1)).dot(w[-h1:].reshape(h1, 1))\n\n        p0 = p0[idx0]\n        p1 = p1[idx1]\n\n        \n        \n        \n        \n        return .5 * (sum((1 - p1 + p0) ** 2) / n +\n                     self.l1 * sum(w[:-h1] ** 2) / (i1 * h) +\n                     self.l2 * sum(w[-h1:] ** 2) / h1)", "docstring": "Return the costs of the neural network for predictions.\n\nArgs:\nw (array of float): weight vectors such that:\nw[:-h1] -- weights between the input and h layers\nw[-h1:] -- weights between the h and output layers\nargs: features (args[0]) and target (args[1])\n\nReturns:\ncombined cost of RMSE, L1, and L2 regularization", "source": "juraj-google-style"}
{"code": "def get_gains_losses(changes):\n    \n    res = {'gains': [], 'losses': []}\n    for change in changes:\n        if change > 0:\n            res['gains'].append(change)\n        else:\n            res['losses'].append(change * -1)\n    logger.debug('Gains: {0}'.format(res['gains']))\n    logger.debug('Losses: {0}'.format(res['losses']))\n    return res", "docstring": "Categorizes changes into gains and losses\n\nArgs:\nchanges: List of floats of price changes between entries in JSON.\n\nReturns:\nDict of changes with keys 'gains' and 'losses'.\nAll values are positive.", "source": "juraj-google-style"}
{"code": "def create_pipeline_stage(self, pipeline_key, name, **kwargs):\n\t\t\n\t\t\n\t\tif not (pipeline_key and name):\n\t\t\treturn requests.codes.bad_request, None\n\n\t\turi = '/'.join([\n\t\t\t\t\t\tself.api_uri,\n\t\t\t\t\t\tself.pipelines_suffix,\n\t\t\t\t\t\tpipeline_key,\n\t\t\t\t\t\tself.stages_suffix])\n\t\t\n\t\tkwargs.update({'name':name})\n\n\t\tnew_box = StreakStage(**kwargs)\n\t\t\n\t\tcode, data = self._req('put', uri, new_box.to_dict(rw = True))\n\t\t\n\t\treturn code, data", "docstring": "Creates a pipeline stage with the provided attributes.\nArgs:\nname\trequired name string\nkwargs\t{..} see StreakStage object for details\nreturn\t(status code, stage dict)", "source": "juraj-google-style"}
{"code": "def _format_subscripts(self, subscripts, value, limit=10, indent=2):\n    lines = []\n    subscripts = np.transpose(subscripts)\n    prefix = ' ' * indent\n    if np.ndim(value) == 0:\n        return [prefix + '[0] : ' + str(value)]\n    for subscript in itertools.islice(subscripts, limit):\n        lines.append(prefix + str(subscript) + ' : ' + str(value[tuple(subscript)]))\n    if len(subscripts) > limit:\n        lines.append(prefix + '...')\n    return lines", "docstring": "Generate a summary of ndarray subscripts as a list of str.\n\nIf limit == N, this method will print up to the first N subscripts on\nseparate\nlines. A line of ellipses (...) will be appended at the end if the number of\nsubscripts exceeds N.\n\nArgs:\nsubscripts: The tensor (np.ndarray) subscripts, of the same format as\nnp_where()'s return value, i.e., a tuple of arrays with each array\ncorresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).\nvalue: (np.ndarray) value of the tensor.\nlimit: (int) The maximum number of indices to print.\nindent: (int) Number of characters to indent at the beginning of each\nline.\n\nReturns:\n(list of str) the multi-line representation of the subscripts and values,\npotentially with omission at the end.", "source": "github-repos"}
{"code": "def temporal_padding(x, padding=(1, 1)):\n    assert len(padding) == 2\n    pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]\n    return array_ops.pad(x, pattern)", "docstring": "Pads the middle dimension of a 3D tensor.\n\nArgs:\nx: Tensor or variable.\npadding: Tuple of 2 integers, how many zeros to\nadd at the start and end of dim 1.\n\nReturns:\nA padded 3D tensor.", "source": "github-repos"}
{"code": "def nodes(self, device_name=None):\n    if not self._debug_graphs:\n        raise LookupError('No partition graphs have been loaded.')\n    if device_name is None:\n        nodes = []\n        for device_name in self._debug_graphs:\n            nodes.extend(self._debug_graphs[device_name].node_inputs.keys())\n        return nodes\n    else:\n        if device_name not in self._debug_graphs:\n            raise ValueError('Invalid device name: %s' % device_name)\n        return self._debug_graphs[device_name].node_inputs.keys()", "docstring": "Get a list of all nodes from the partition graphs.\n\nArgs:\ndevice_name: (`str`) name of device. If None, all nodes from all available\ndevices will be included.\n\nReturns:\nAll nodes' names, as a list of str.\n\nRaises:\nLookupError: If no partition graphs have been loaded.\nValueError: If specified node name does not exist.", "source": "github-repos"}
{"code": "def detect_timezone():\n    if (sys.platform == 'win32'):\n        tz = _detect_timezone_windows()\n        if (tz is not None):\n            return tz\n    tz = _detect_timezone_environ()\n    if (tz is not None):\n        return tz\n    tz = _detect_timezone_etc_timezone()\n    if (tz is not None):\n        return tz\n    tz = _detect_timezone_etc_localtime()\n    if (tz is not None):\n        return tz\n    warnings.warn(\"Had to fall back to worst detection method (the 'PHP' method).\")\n    tz = _detect_timezone_php()\n    if (tz is not None):\n        return tz\n    raise pytz.UnknownTimeZoneError('Unable to detect your timezone!')", "docstring": "Try and detect the timezone that Python is currently running in.\n\nWe have a bunch of different methods for trying to figure this out (listed in\norder they are attempted).\n* In windows, use win32timezone.TimeZoneInfo.local()\n* Try TZ environment variable.\n* Try and find /etc/timezone file (with timezone name).\n* Try and find /etc/localtime file (with timezone data).\n* Try and match a TZ to the current dst/offset/shortname.\n\nReturns:\nThe detected local timezone as a tzinfo object\n\nRaises:\npytz.UnknownTimeZoneError: If it was unable to detect a timezone.", "source": "codesearchnet"}
{"code": "def convert_mass_to_atomic_fractions(mass_fractions):\n    \n    atomic_fractions = {}\n\n    for z, mass_fraction in mass_fractions.items():\n        atomic_fractions[z] = mass_fraction / pyxray.element_atomic_weight(z)\n\n    total_fraction = sum(atomic_fractions.values())\n\n    for z, fraction in atomic_fractions.items():\n        try:\n            atomic_fractions[z] = fraction / total_fraction\n        except ZeroDivisionError:\n            atomic_fractions[z] = 0.0\n\n    return atomic_fractions", "docstring": "Converts a mass fraction :class:`dict` to an atomic fraction :class:`dict`.\n\nArgs:\nmass_fractions (dict): mass fraction :class:`dict`.\nThe composition is specified by a dictionary.\nThe keys are atomic numbers and the values weight fractions.\nNo wildcard are accepted.", "source": "juraj-google-style"}
{"code": "def parse_date(date_string, ignoretz=True):\n    try:\n        return parser.parse(date_string, ignoretz=ignoretz)\n    except TypeError:\n        return None", "docstring": "Parse a string as a date. If the string fails to parse, `None` will be returned instead\n\n>>> parse_date('2017-08-15T18:24:31')\ndatetime.datetime(2017, 8, 15, 18, 24, 31)\n\nArgs:\ndate_string (`str`): Date in string format to parse\nignoretz (`bool`): If set ``True``, ignore time zones and return a naive :class:`datetime` object.\n\nReturns:\n`datetime`, `None`", "source": "codesearchnet"}
{"code": "def _ReadTableHeader(self, file_object, table_header_offset):\n    \n    data_type_map = self._GetDataTypeMap('keychain_table_header')\n\n    table_header, _ = self._ReadStructureFromFileObject(\n        file_object, table_header_offset, data_type_map)\n\n    return table_header", "docstring": "Reads the table header.\n\nArgs:\nfile_object (file): file-like object.\ntable_header_offset (int): offset of the tables header relative to\nthe start of the file.\n\nReturns:\nkeychain_table_header: table header.\n\nRaises:\nParseError: if the table header cannot be read.", "source": "juraj-google-style"}
{"code": "def argmin(input_, key=None):\n    \n    \n    \n    \n    \n    \n    \n    if isinstance(input, dict):\n        return list(input.keys())[argmin(list(input.values()), key=key)]\n    else:\n        if key is None:\n            def _key(item):\n                return item[1]\n        else:\n            def _key(item):\n                return key(item[1])\n        return min(enumerate(input), key=_key)[0]", "docstring": "Returns index / key of the item with the smallest value.\n\nArgs:\ninput_ (dict or list):\n\nNote:\na[argmin(a, key=key)] == min(a, key=key)", "source": "juraj-google-style"}
{"code": "def from_config(cls, config):\n    return cls(**config)", "docstring": "Creates a layer from its config.\n\nThis method is the reverse of `get_config`,\ncapable of instantiating the same layer from the config\ndictionary. It does not handle layer connectivity\n(handled by Network), nor weights (handled by `set_weights`).\n\nArgs:\nconfig: A Python dictionary, typically the\noutput of get_config.\n\nReturns:\nA layer instance.", "source": "github-repos"}
{"code": "def bitwise_xor(x, y):\n    if any_symbolic_tensors((x, y)):\n        return BitwiseXor().symbolic_call(x, y)\n    return backend.numpy.bitwise_xor(x, y)", "docstring": "Compute the bit-wise XOR of two arrays element-wise.\n\nComputes the bit-wise XOR of the underlying binary representation of the\nintegers in the input arrays. This ufunc implements the C/Python operator\n`^`.\n\nArgs:\nx: Input integer tensor.\ny: Input integer tensor.\n\nReturns:\nResult tensor.", "source": "github-repos"}
{"code": "def insert(self, entity_id, property_uri, value):\n    if (not entity_id.startswith('http')):\n        entity_uri = urllib.parse.urljoin(self.base_url, entity_id)\n    else:\n        entity_uri = entity_id\n    if entity_uri.endswith('/'):\n        entity_uri = entity_uri[:(- 1)]\n    if (not entity_id.endswith('fcr:metadata')):\n        entity_uri = '/'.join([entity_uri, 'fcr:metadata'])\n    if (not self.exists(entity_id)):\n        self.create(entity_id)\n    sparql_template = Template('$prefix\\n        INSERT DATA {\\n             <$entity> $prop_uri $value_str ;\\n        }')\n    sparql = sparql_template.substitute(prefix=build_prefixes(self.namespaces), entity=entity_uri, prop_uri=property_uri, value_str=self.__value_format__(value))\n    update_request = urllib.request.Request(entity_uri, data=sparql.encode(), method='PATCH', headers={'Content-Type': 'application/sparql-update'})\n    try:\n        response = urllib.request.urlopen(update_request)\n    except urllib.error.HTTPError:\n        print('Error trying patch {}, sparql=\\n{}'.format(entity_uri, sparql))\n        return False\n    if (response.code < 400):\n        return True\n    return False", "docstring": "Method inserts a new entity's property in Fedora4 Repository\n\nArgs:\nentity_id(string): Unique ID of Fedora object\nproperty_uri(string): URI of property\nvalue: Value of the property, can be literal or URI reference\n\nReturns:\nboolean: True if successful changed in Fedora, False otherwise", "source": "codesearchnet"}
{"code": "def setKeepAliveTimeOut(self, iTimeOut):\n        \n        print '%s call setKeepAliveTimeOut' % self.port\n        print iTimeOut\n        try:\n            cmd = WPANCTL_CMD + 'setprop NCP:SleepyPollInterval %s' % str(iTimeOut*1000)\n            print cmd\n            return self.__sendCommand(cmd)[0] != 'Fail'\n        except Exception, e:\n            ModuleHelper.WriteIntoDebugLogger('setKeepAliveTimeOut() Error: ' + str(e))", "docstring": "set keep alive timeout for device\nhas been deprecated and also set SED polling rate\n\nArgs:\niTimeOut: data poll period for sleepy end device\n\nReturns:\nTrue: successful to set the data poll period for SED\nFalse: fail to set the data poll period for SED", "source": "juraj-google-style"}
{"code": "def _normalize_angle(angle, range, step):\n    \n    while angle <= range[0]:\n        angle += step\n    while angle >= range[1]:\n        angle -= step\n    return angle", "docstring": "Finds an angle that matches the given one modulo step.\n\nIncrements and decrements the given value with a given step.\n\nArgs:\nrange: a 2-tuple of min and max target values.\nstep: tuning step.\n\nReturns:\nNormalized value within a given range.", "source": "juraj-google-style"}
{"code": "def register_agent(self, host, sweep_id=None, project_name=None):\n        \n        mutation = gql()\n        if project_name is None:\n            project_name = self.settings('project')\n\n        \n        def no_retry_400(e):\n            if not isinstance(e, requests.HTTPError):\n                return True\n            if e.response.status_code != 400:\n                return True\n            body = json.loads(e.response.content)\n            raise UsageError(body['errors'][0]['message'])\n\n        response = self.gql(mutation, variable_values={\n            'host': host,\n            'entityName': self.settings(\"entity\"),\n            'projectName': project_name,\n            'sweep': sweep_id}, check_retry_fn=no_retry_400)\n        return response['createAgent']['agent']", "docstring": "Register a new agent\n\nArgs:\nhost (str): hostname\npersistent (bool): long running or oneoff\nsweep (str): sweep id\nproject_name: (str): model that contains sweep", "source": "juraj-google-style"}
{"code": "def _next_file(self):\n    while True:\n        if self._bucket_iter:\n            try:\n                return self._bucket_iter.next().filename\n            except StopIteration:\n                self._bucket_iter = None\n                self._bucket = None\n        if (self._index >= len(self._filenames)):\n            return\n        filename = self._filenames[self._index]\n        self._index += 1\n        if ((self._delimiter is None) or (not filename.endswith(self._delimiter))):\n            return filename\n        self._bucket = cloudstorage.listbucket(filename, delimiter=self._delimiter)\n        self._bucket_iter = iter(self._bucket)", "docstring": "Find next filename.\n\nself._filenames may need to be expanded via listbucket.\n\nReturns:\nNone if no more file is left. Filename otherwise.", "source": "codesearchnet"}
{"code": "def CalculateForecastStats(matched, available, possible=None):\n    if (matched > 0):\n        available_percent = ((float(available) / matched) * 100.0)\n    else:\n        available_percent = 0\n    if (possible is not None):\n        if (matched > 0):\n            possible_percent = ((possible / float(matched)) * 100.0)\n        else:\n            possible_percent = 0\n    else:\n        possible_percent = None\n    return (available_percent, possible_percent)", "docstring": "Calculate forecast percentage stats.\n\nArgs:\nmatched: The number of matched impressions.\navailable: The number of available impressions.\npossible: The optional number of possible impressions.\n\nReturns:\nThe percentage of impressions that are available and possible.", "source": "codesearchnet"}
{"code": "def stats(self):\n    per_utt_stats = self.stats_per_utterance()\n    return stats.DataStats.concatenate(per_utt_stats.values())", "docstring": "Return statistics calculated overall samples of all utterances in the corpus.\n\nReturns:\nDataStats: A DataStats object containing statistics overall samples in the corpus.", "source": "codesearchnet"}
{"code": "def safe_date(self, x):\n        \n\n        t = x[self.col_name]\n        if np.isnan(t):\n            return t\n\n        elif np.isposinf(t):\n            t = sys.maxsize\n\n        elif np.isneginf(t):\n            t = -sys.maxsize\n\n        tmp = time.localtime(float(t) / 1e9)\n        return time.strftime(self.date_format, tmp)", "docstring": "Transform x[self.col_name] into a date string.\n\nArgs:\nx(dict like / pandas.Series): Row containing data to cast safely.\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def memory_read(self, addr, num_units, zone=None, nbits=None):\n    buf_size = num_units\n    buf = None\n    access = 0\n    if (nbits is None):\n        buf = (ctypes.c_uint8 * buf_size)()\n        access = 0\n    elif (nbits == 8):\n        buf = (ctypes.c_uint8 * buf_size)()\n        access = 1\n    elif (nbits == 16):\n        buf = (ctypes.c_uint16 * buf_size)()\n        access = 2\n        buf_size = (buf_size * access)\n    elif (nbits == 32):\n        buf = (ctypes.c_uint32 * buf_size)()\n        access = 4\n        buf_size = (buf_size * access)\n    else:\n        raise ValueError(('Given bit size is invalid: %s' % nbits))\n    args = [addr, buf_size, buf, access]\n    method = self._dll.JLINKARM_ReadMemEx\n    if (zone is not None):\n        method = self._dll.JLINKARM_ReadMemZonedEx\n        args.append(zone.encode())\n    units_read = method(*args)\n    if (units_read < 0):\n        raise errors.JLinkReadException(units_read)\n    return buf[:units_read]", "docstring": "Reads memory from a target system or specific memory zone.\n\nThe optional ``zone`` specifies a memory zone to access to read from,\ne.g. ``IDATA``, ``DDATA``, or ``CODE``.\n\nThe given number of bits, if provided, must be either ``8``, ``16``, or\n``32``.  If not provided, always reads ``num_units`` bytes.\n\nArgs:\nself (JLink): the ``JLink`` instance\naddr (int): start address to read from\nnum_units (int): number of units to read\nzone (str): optional memory zone name to access\nnbits (int): number of bits to use for each unit\n\nReturns:\nList of units read from the target system.\n\nRaises:\nJLinkException: if memory could not be read.\nValueError: if ``nbits`` is not ``None``, and not in ``8``, ``16``,\nor ``32``.", "source": "codesearchnet"}
{"code": "def upload_metric(self, dataset_name, table_name, run_id):\n    \n    expected_file = os.path.join(\n        self._logging_dir, logger.METRIC_LOG_FILE_NAME)\n    with tf.gfile.GFile(expected_file) as f:\n      lines = f.readlines()\n      metrics = []\n      for line in filter(lambda l: l.strip(), lines):\n        metric = json.loads(line)\n        metric[\"run_id\"] = run_id\n        metrics.append(metric)\n      table_ref = self._bq_client.dataset(dataset_name).table(table_name)\n      errors = self._bq_client.insert_rows_json(table_ref, metrics)\n      if errors:\n        tf.logging.error(\n            \"Failed to upload benchmark info to bigquery: {}\".format(errors))", "docstring": "Upload metric information to Bigquery.\n\nArgs:\ndataset_name: string, the name of bigquery dataset where the data will be\nuploaded.\ntable_name: string, the name of bigquery table under the dataset where\nthe metric data will be uploaded. This is different from the\nbenchmark_run table.\nrun_id: string, a unique ID that will be attached to the data, usually\nthis is a UUID4 format. This should be the same as the benchmark run_id.", "source": "juraj-google-style"}
{"code": "def storage_volume_attachments(self):\n    if (not self.__storage_volume_attachments):\n        self.__storage_volume_attachments = StorageVolumeAttachments(self.__connection)\n    return self.__storage_volume_attachments", "docstring": "Gets the StorageVolumeAttachments API client.\n\nReturns:\nStorageVolumeAttachments:", "source": "codesearchnet"}
{"code": "def market_if_touched(self, accountID, **kwargs):\n        \n        return self.create(\n            accountID,\n            order=MarketIfTouchedOrderRequest(**kwargs)\n        )", "docstring": "Shortcut to create a MarketIfTouched Order in an Account\n\nArgs:\naccountID : The ID of the Account\nkwargs : The arguments to create a MarketIfTouchedOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "juraj-google-style"}
{"code": "def pack_container(in_container, out_file):\n    \n    container_filename = local.path(out_file).basename\n    out_container = local.cwd / \"container-out\" / container_filename\n    out_dir = out_container.dirname\n\n    \n    with local.cwd(in_container):\n        tar(\"cjf\", out_container, \".\")\n    c_hash = download.update_hash(out_container)\n    if out_dir.exists():\n        mkdir(\"-p\", out_dir)\n    mv(out_container, out_file)\n    mv(out_container + \".hash\", out_file + \".hash\")\n\n    new_container = {\"path\": out_file, \"hash\": str(c_hash)}\n    CFG[\"container\"][\"known\"] += new_container", "docstring": "Pack a container image into a .tar.bz2 archive.\n\nArgs:\nin_container (str): Path string to the container image.\nout_file (str): Output file name.", "source": "juraj-google-style"}
{"code": "def get_course_details(self, course_id):\n    try:\n        return self.client.course(course_id).get()\n    except (SlumberBaseException, ConnectionError, Timeout) as exc:\n        LOGGER.exception('Failed to retrieve course enrollment details for course [%s] due to: [%s]', course_id, str(exc))\n        return {}", "docstring": "Query the Enrollment API for the course details of the given course_id.\n\nArgs:\ncourse_id (str): The string value of the course's unique identifier\n\nReturns:\ndict: A dictionary containing details about the course, in an enrollment context (allowed modes, etc.)", "source": "codesearchnet"}
{"code": "class ClvpProcessor(ProcessorMixin):\n    feature_extractor_class = 'ClvpFeatureExtractor'\n    tokenizer_class = 'ClvpTokenizer'\n    model_input_names = ['input_ids', 'input_features', 'attention_mask']\n\n    def __init__(self, feature_extractor, tokenizer):\n        super().__init__(feature_extractor, tokenizer)\n\n    def __call__(self, *args, **kwargs):\n        \n        raw_speech = kwargs.pop('raw_speech', None)\n        sampling_rate = kwargs.pop('sampling_rate', None)\n        text = kwargs.pop('text', None)\n        if raw_speech is None and text is None:\n            raise ValueError('You need to specify either an `raw_speech` or `text` input to process.')\n        if raw_speech is not None:\n            inputs = self.feature_extractor(raw_speech, sampling_rate=sampling_rate, **kwargs)\n        if text is not None:\n            encodings = self.tokenizer(text, **kwargs)\n        if text is None:\n            return inputs\n        elif raw_speech is None:\n            return encodings\n        else:\n            inputs['input_ids'] = encodings['input_ids']\n            inputs['attention_mask'] = encodings['attention_mask']\n            return inputs\n\n    def batch_decode(self, *args, **kwargs):\n        \n        return self.tokenizer.batch_decode(*args, **kwargs)\n\n    def decode(self, *args, **kwargs):\n        \n        return self.tokenizer.decode(*args, **kwargs)", "docstring": "Constructs a CLVP processor which wraps a CLVP Feature Extractor and a CLVP Tokenizer into a single processor.\n\n[`ClvpProcessor`] offers all the functionalities of [`ClvpFeatureExtractor`] and [`ClvpTokenizer`]. See the\n[`~ClvpProcessor.__call__`], [`~ClvpProcessor.decode`] and [`~ClvpProcessor.batch_decode`] for more information.\n\nArgs:\nfeature_extractor (`ClvpFeatureExtractor`):\nAn instance of [`ClvpFeatureExtractor`]. The feature extractor is a required input.\ntokenizer (`ClvpTokenizer`):\nAn instance of [`ClvpTokenizer`]. The tokenizer is a required input.", "source": "github-repos"}
{"code": "def append(self, node):\n    if (not isinstance(node, grammar.STATEMENTS)):\n        raise ValueError\n    self.to_append[(- 1)].append(node)", "docstring": "Append a statement to the current statement.\n\nNote that multiple calls to append will result in the last statement to be\nappended to end up at the bottom.\n\nArgs:\nnode: The statement to append.\n\nRaises:\nValueError: If the given node is not a statement.", "source": "codesearchnet"}
{"code": "def set_params(self, **params):\n    if ('bias' in params.keys()):\n        self.intercept_ = params['bias']\n    if ('weights' in params.keys()):\n        self.coef_ = params['weights']\n    for key in params.keys():\n        if ('b_' == key[:2]):\n            self.B[int(key[2:])] = params[key]\n    return self", "docstring": "Set the parameters of the estimator.\n\nArgs:\nbias (array-like) : bias of the estimator. Also known as the intercept in a linear model.\nweights (array-like) : weights of the features. Also known as coeficients.\nNER biases (array-like) : NER entities infering column position on X and bias value. Ex: `b_4=10, b_5=6`.\n\nExample:\n>>> cls = VTT()\n>>> cls.set_params(b_4=10, b_5=6, b_6=8)", "source": "codesearchnet"}
{"code": "def cidr_check(cidr, return_cidr=True):\n    \n    try:\n        if int(cidr) < 0 or int(cidr) > 32:\n            good_cidr = False\n        else:\n            good_cidr = True\n        if return_cidr:\n            while not good_cidr:\n                print(\"Sorry the CIDR value %s is not a valid value must be a value of 0 to 32.  Please try again.\"\n                      % (cidr,))\n                cidr = input(\"What is the mask for in CIDR format?: \")\n                if int(cidr) < 0 or int(cidr) > 32:\n                    good_cidr = False\n                else:\n                    good_cidr = True\n            return cidr\n        elif not return_cidr:\n            return good_cidr\n    except ValueError:\n        LOGGER.critical('Function cidr_check expected a number but got {item}'.format(item=cidr))\n        raise ValueError(\"The input needs to be a number!!\")", "docstring": "Function to verify a good CIDR value\nArgs:\ncidr: CIDR value 0 to 32\nreturn_cidr: Set to True it returns a CIDR value, set to False returns True or False\n\nReturns: see return_cidr for return options", "source": "juraj-google-style"}
{"code": "def _unconstrained_to_raw_svi(unconstrained_parameters):\n    b = tf.math.exp(unconstrained_parameters[..., 1])\n    rho = 2 * tf.math.sigmoid(unconstrained_parameters[..., 2]) - 1\n    m = unconstrained_parameters[..., 3]\n    sigma = tf.math.exp(unconstrained_parameters[..., 4])\n    a = tf.math.exp(unconstrained_parameters[..., 0]) - b * sigma * tf.math.sqrt(1 - rho ** 2)\n    return tf.transpose([a, b, rho, m, sigma])", "docstring": "Converts unconstrained optimizarion parameters to raw SVI ones.\n\nPerforms the inverse transformation of the internal unconstrained model\nparameters into the standard raw SVI parameters `a, b, rho, m, sigma`.\n\nArgs:\nunconstrained_parameters: A rank 2 real `Tensor` of shape [batch_size, 5],\nrepresenting SVI model's raw parameters.\n\nReturns:\nA rank 2 real `Tensor` of shape [batch_size, 5], representing the\nunconstrained parameters, used in internal optimization of the SVI model.", "source": "github-repos"}
{"code": "def _GetMemberDataTypeMaps(self, data_type_definition, data_type_map_cache):\n    if (not data_type_definition):\n        raise errors.FormatError('Missing data type definition')\n    members = getattr(data_type_definition, 'members', None)\n    if (not members):\n        raise errors.FormatError('Invalid data type definition missing members')\n    data_type_maps = []\n    members_data_size = 0\n    for member_definition in members:\n        if isinstance(member_definition, data_types.MemberDataTypeDefinition):\n            member_definition = member_definition.member_data_type_definition\n        if ((data_type_definition.byte_order != definitions.BYTE_ORDER_NATIVE) and (member_definition.byte_order == definitions.BYTE_ORDER_NATIVE)):\n            member_definition = copy.copy(member_definition)\n            member_definition.name = '_{0:s}_{1:s}'.format(data_type_definition.name, member_definition.name)\n            member_definition.byte_order = data_type_definition.byte_order\n        if (member_definition.name not in data_type_map_cache):\n            data_type_map = DataTypeMapFactory.CreateDataTypeMapByType(member_definition)\n            data_type_map_cache[member_definition.name] = data_type_map\n        data_type_map = data_type_map_cache[member_definition.name]\n        if (members_data_size is not None):\n            if (not isinstance(member_definition, data_types.PaddingDefinition)):\n                byte_size = member_definition.GetByteSize()\n            else:\n                (_, byte_size) = divmod(members_data_size, member_definition.alignment_size)\n                if (byte_size > 0):\n                    byte_size = (member_definition.alignment_size - byte_size)\n                data_type_map.byte_size = byte_size\n            if (byte_size is None):\n                members_data_size = None\n            else:\n                members_data_size += byte_size\n        data_type_maps.append(data_type_map)\n    return data_type_maps", "docstring": "Retrieves the member data type maps.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\ndata_type_map_cache (dict[str, DataTypeMap]): cached data type maps.\n\nReturns:\nlist[DataTypeMap]: member data type maps.\n\nRaises:\nFormatError: if the data type maps cannot be determined from the data\ntype definition.", "source": "codesearchnet"}
{"code": "def maybe_add_training_arg(original_call, wrapped_call, expects_training_arg, default_training_value):\n    if not expects_training_arg:\n        return (wrapped_call, None)\n\n    def wrap_with_training_arg(*args, **kwargs):\n        \n        training_arg_index = get_training_arg_index(original_call)\n        training = get_training_arg(training_arg_index, args, kwargs)\n        if training is None:\n            training = default_training_value or K.learning_phase()\n        args = list(args)\n        kwargs = kwargs.copy()\n\n        def replace_training_and_call(training):\n            set_training_arg(training, training_arg_index, args, kwargs)\n            return wrapped_call(*args, **kwargs)\n        return control_flow_util.smart_cond(training, lambda: replace_training_and_call(True), lambda: replace_training_and_call(False))\n    arg_spec = tf_inspect.getfullargspec(original_call)\n    defaults = list(arg_spec.defaults) if arg_spec.defaults is not None else []\n    kwonlyargs = arg_spec.kwonlyargs\n    kwonlydefaults = arg_spec.kwonlydefaults or {}\n    if 'training' not in arg_spec.args:\n        kwonlyargs.append('training')\n        kwonlydefaults['training'] = default_training_value\n    else:\n        index = arg_spec.args.index('training')\n        training_default_index = len(arg_spec.args) - index\n        if arg_spec.defaults and len(arg_spec.defaults) >= training_default_index and (defaults[-training_default_index] is None):\n            defaults[-training_default_index] = default_training_value\n    decorator_argspec = tf_inspect.FullArgSpec(args=arg_spec.args, varargs=arg_spec.varargs, varkw=arg_spec.varkw, defaults=defaults, kwonlyargs=kwonlyargs, kwonlydefaults=kwonlydefaults, annotations=arg_spec.annotations)\n    return (wrap_with_training_arg, decorator_argspec)", "docstring": "Decorate call and optionally adds training argument.\n\nIf a layer expects a training argument, this function ensures that 'training'\nis present in the layer args or kwonly args, with the default training value.\n\nArgs:\noriginal_call: Original call function.\nwrapped_call: Wrapped call function.\nexpects_training_arg: Whether to include 'training' argument.\ndefault_training_value: Default value of the training kwarg to include in\nthe arg spec. If `None`, the default is `K.learning_phase()`.\n\nReturns:\nTuple of (\nfunction that calls `wrapped_call` and sets the training arg,\nArgspec of returned function or `None` if the argspec is unchanged)", "source": "github-repos"}
{"code": "def Deserialize(self, reader):\n        \n        self.HashStart = reader.ReadSerializableArray('neocore.UInt256.UInt256')\n        self.HashStop = reader.ReadUInt256()", "docstring": "Deserialize full object.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def _exclude_denylisted_ops(self, node_names):\n    return [node_name for node_name in node_names if self._debug_dump.node_op_type(debug_graphs.get_node_name(node_name)) not in self._GRAPH_STRUCT_OP_TYPE_DENYLIST]", "docstring": "Exclude all nodes whose op types are in _GRAPH_STRUCT_OP_TYPE_DENYLIST.\n\nArgs:\nnode_names: An iterable of node or graph element names.\n\nReturns:\nA list of node names that are not denylisted.", "source": "github-repos"}
{"code": "def send_message(host, data, timeout=None, properties=None):\n    channel = _get_channel(host, timeout)\n    if (not properties):\n        properties = pika.BasicProperties(content_type='application/json', delivery_mode=2, headers={'UUID': str(uuid.uuid4())})\n    parameters = settings.get_amqp_settings()[host]\n    channel.basic_publish(exchange=parameters['exchange'], routing_key=parameters['in_key'], properties=properties, body=data)", "docstring": "Send message to given `host`.\n\nArgs:\nhost (str): Specified host: aleph/ftp/whatever available host.\ndata (str): JSON data.\ntimeout (int, default None): How much time wait for connection.", "source": "codesearchnet"}
{"code": "def get_discovery_doc(self, services, hostname=None):\n    if (not isinstance(services, (tuple, list))):\n        services = [services]\n    util.check_list_type(services, remote._ServiceClass, 'services', allow_none=False)\n    return self.__discovery_doc_descriptor(services, hostname=hostname)", "docstring": "JSON dict description of a protorpc.remote.Service in discovery format.\n\nArgs:\nservices: Either a single protorpc.remote.Service or a list of them\nthat implements an api/version.\nhostname: string, Hostname of the API, to override the value set on the\ncurrent service. Defaults to None.\n\nReturns:\ndict, The discovery document as a JSON dict.", "source": "codesearchnet"}
{"code": "def add(self, label):\n    label.label_list = self\n    self.label_tree.addi(label.start, label.end, label)", "docstring": "Add a label to the end of the list.\n\nArgs:\nlabel (Label): The label to add.", "source": "codesearchnet"}
{"code": "def ForceRemoveFileObject(self, path_spec):\n    cache_value = self._file_object_cache.GetCacheValue(path_spec.comparable)\n    if (not cache_value):\n        return False\n    while (not cache_value.IsDereferenced()):\n        cache_value.vfs_object.close()\n    return True", "docstring": "Forces the removal of a file-like object based on a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nbool: True if the file-like object was cached.", "source": "codesearchnet"}
{"code": "def five_crop(img, size):\n    if isinstance(size, numbers.Number):\n        size = (int(size), int(size))\n    else:\n        assert (len(size) == 2), 'Please provide only two dimensions (h, w) for size.'\n    (w, h) = img.size\n    (crop_h, crop_w) = size\n    if ((crop_w > w) or (crop_h > h)):\n        raise ValueError('Requested crop size {} is bigger than input size {}'.format(size, (h, w)))\n    tl = img.crop((0, 0, crop_w, crop_h))\n    tr = img.crop(((w - crop_w), 0, w, crop_h))\n    bl = img.crop((0, (h - crop_h), crop_w, h))\n    br = img.crop(((w - crop_w), (h - crop_h), w, h))\n    center = center_crop(img, (crop_h, crop_w))\n    return (tl, tr, bl, br, center)", "docstring": "Crop the given PIL Image into four corners and the central crop.\n\n.. Note::\nThis transform returns a tuple of images and there may be a\nmismatch in the number of inputs and targets your ``Dataset`` returns.\n\nArgs:\nsize (sequence or int): Desired output size of the crop. If size is an\nint instead of sequence like (h, w), a square crop (size, size) is\nmade.\n\nReturns:\ntuple: tuple (tl, tr, bl, br, center)\nCorresponding top left, top right, bottom left, bottom right and center crop.", "source": "codesearchnet"}
{"code": "def run_calibration(self, saved_model_path: str, signature_keys: list[str], tags: set[str], force_graph_mode_calibration: bool, representative_dataset_file_map_serialized: dict[str, bytes]) -> Optional[bool]:\n    dataset_file_map = {}\n    for signature_key, dataset_file_serialized in representative_dataset_file_map_serialized.items():\n        dataset_file_map[signature_key] = quantization_options_pb2.RepresentativeDatasetFile.FromString(dataset_file_serialized)\n    return _call_and_return_none_on_error(func=functools.partial(_run_calibration, saved_model_path, signature_keys, tags, force_graph_mode_calibration, dataset_file_map), error_msg=f'Failed to run calibration on model \"{saved_model_path}\", signature_keys: {signature_keys}, tags: {tags}.')", "docstring": "Runs calibration and adds calibration statistics to exported model.\n\nArgs:\nsaved_model_path: Path to the SavedModel to run calibration.\nsignature_keys: List of signature keys corresponding to SignatureDefs to\nrun calibration on.\ntags: A set of tags that identify the MetaGraphDef.\nforce_graph_mode_calibration: If True, runs the calibration in graph mode.\nrepresentative_dataset_file_map_serialized: Signature key ->\n`RepresentativeDatasetFile` mapping for running the calibration step.\nEach dataset file stores the representative dataset for the function\nmatching the signature key.\n\nReturns:\nThe error message if the function raises and exception. `None` otherwise.", "source": "github-repos"}
{"code": "def read_header(self, return_idxs=False):\n        \n        self.header = sigproc.read_header(self.filename, return_idxs=return_idxs)\n        return self.header", "docstring": "Read blimpy header and return a Python dictionary of key:value pairs\n\nArgs:\nfilename (str): name of file to open\n\nOptional args:\nreturn_idxs (bool): Default False. If true, returns the file offset indexes\nfor values\n\nReturns:\nPython dict of key:value pairs, OR returns file offset indexes for values.", "source": "juraj-google-style"}
{"code": "def add_or_update(data, item, value):\n    data = data.splitlines()\n    data = map((lambda x: bytearray(x)), data)\n    conf = filter((lambda x: (x.strip() and (x.strip().split()[0] == item))), data)\n    if conf:\n        conf[0][:] = ((conf[0].strip().split()[0] + ' ') + value)\n    else:\n        comments = filter((lambda x: (x.strip().startswith('\n        if comments:\n            comments[0][:] = ((comments[0].split('\n        else:\n            data.append((((item + ' ') + value) + '\\n'))\n    return '\\n'.join(map((lambda x: str(x)), data))", "docstring": "Add or update value in configuration file format used by proftpd.\n\nArgs:\ndata (str): Configuration file as string.\nitem (str): What option will be added/updated.\nvalue (str): Value of option.\n\nReturns:\nstr: updated configuration", "source": "codesearchnet"}
{"code": "def set_number_of_atoms( self, n, selected_sites=None ):\n        \n        self.number_of_atoms = n\n        self.atoms = species.Species( self.lattice.populate_sites( self.number_of_atoms, selected_sites=selected_sites ) )", "docstring": "Set the number of atoms for the simulation, and populate the simulation lattice.\n\nArgs:\nn (Int): Number of atoms for this simulation.\nselected_sites (:obj:(List|Set|String), optional): Selects a subset of site types to be populated with atoms. Defaults to None.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def sparse_embedding_aggregate_slice(params, values_and_values_mask, combiner='mean', name='sparse_embedding_aggregate_slice'):\n    values, values_mask = values_and_values_mask\n    with ops.name_scope(name):\n        _, embedding_dimension = params.get_shape().as_list()\n        n_batch, n_indices_padded = values.get_shape().as_list()\n        if not n_batch:\n            n_batch = -1\n        emb_lookup = array_ops.reshape(embedding_ops.embedding_lookup(params, array_ops.reshape(values, [n_batch, n_indices_padded])), [n_batch, n_indices_padded, embedding_dimension])\n        values_mask_broadcast = array_ops.reshape(values_mask, [n_batch, n_indices_padded, 1])\n        aggregate_emb = math_ops.reduce_sum(emb_lookup * values_mask_broadcast, axis=1)\n        if combiner == 'sum':\n            return aggregate_emb\n        elif combiner == 'mean':\n            return aggregate_emb / math_ops.maximum(math_ops.reduce_sum(values_mask_broadcast, axis=1), 1.0)\n        else:\n            raise ValueError('Dense TPU Embedding does not support combiner other than sum and mean.')", "docstring": "Uses XLA's dynamic slice operations to perform embedding lookups.\n\nFrom third_party/cloud_tpu/models/movielens/tpu_embedding.py\n\nArgs:\nparams: Tensor of embedding table. Rank 2 (table_size x embedding dim)\nvalues_and_values_mask: is a two-tuple that contains: values - Tensor of\nembedding indices. Rank 2 (batch x n_indices) values_mask - Tensor of mask\n/ weights. Rank 2 (batch x n_indices)\ncombiner: The combiner to use for the embedding lookup. Currently supports\n'sum' and 'mean'.\nname: Optional name scope for created ops\n\nReturns:\nRank 2 tensor of aggregated (per batch element) embedding vectors.\n\nRaises:\nValueError: Combiner is not supported.", "source": "github-repos"}
{"code": "def add_omim_info(genes, alias_genes, genemap_lines, mim2gene_lines):\n    \n    LOG.info(\"Add omim info\")\n    omim_genes = get_mim_genes(genemap_lines, mim2gene_lines)\n    \n    for hgnc_symbol in omim_genes:\n        omim_info = omim_genes[hgnc_symbol]\n        inheritance = omim_info.get('inheritance', set())\n        \n        for hgnc_id in get_correct_ids(hgnc_symbol, alias_genes):\n            gene_info = genes[hgnc_id]\n\n            \n            gene_info['omim_id'] = omim_info['mim_number']\n\n            gene_info['inheritance_models'] = list(inheritance)\n            gene_info['phenotypes'] = omim_info.get('phenotypes', [])", "docstring": "Add omim information\n\nWe collect information on what phenotypes that are associated with a gene,\nwhat inheritance models that are associated and the correct omim id.\n\nArgs:\ngenes(dict): Dictionary with all genes\nalias_genes(dict): Genes mapped to all aliases\ngenemap_lines(iterable): Iterable with raw omim info\nmim2gene_lines(iterable): Iterable with raw omim info", "source": "juraj-google-style"}
{"code": "def ParseLeakFilesTable(self, parser_mediator, database=None, table=None, **unused_kwargs):\n    if (database is None):\n        raise ValueError('Missing database value.')\n    if (table is None):\n        raise ValueError('Missing table value.')\n    for esedb_record in table.records:\n        if parser_mediator.abort:\n            break\n        record_values = self._GetRecordValues(parser_mediator, table.name, esedb_record)\n        event_data = MsieWebCacheLeakFilesEventData()\n        event_data.cached_filename = record_values.get('Filename', None)\n        event_data.leak_identifier = record_values.get('LeakId', None)\n        timestamp = record_values.get('CreationTime', None)\n        if timestamp:\n            date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n            event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n            parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses the LeakFiles table.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ndatabase (Optional[pyesedb.file]): ESE database.\ntable (Optional[pyesedb.table]): table.\n\nRaises:\nValueError: if the database or table value is missing.", "source": "codesearchnet"}
{"code": "def ExtractEvents(self, parser_mediator, registry_key, **kwargs):\n    \n    values_dict = {}\n    for registry_value in registry_key.GetValues():\n      if not registry_value.name or not registry_value.data:\n        continue\n\n      if registry_value.name == 'UpdateKey':\n        self._ParseUpdateKeyValue(\n            parser_mediator, registry_value, registry_key.path)\n      else:\n        values_dict[registry_value.name] = registry_value.GetDataAsObject()\n\n    event_data = windows_events.WindowsRegistryEventData()\n    event_data.key_path = registry_key.path\n    event_data.offset = registry_key.offset\n    event_data.regvalue = values_dict\n    event_data.source_append = self._SOURCE_APPEND\n    event_data.urls = self.URLS\n\n    event = time_events.DateTimeValuesEvent(\n        registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts events from a Windows Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.", "source": "juraj-google-style"}
{"code": "def validate(obj, schema):\n    if isinstance(obj, str):\n        obj = json.loads(obj)\n    return JsonValidator(schema)._validate(obj)", "docstring": "Validate an object against a schema\n\nArgs:\nobj (dict):\nschema (dict):", "source": "codesearchnet"}
{"code": "def match_as_dict(self, film_sl_vectors, substrate_sl_vectors, film_vectors, substrate_vectors, match_area):\n    d = {}\n    d['film_sl_vecs'] = np.asarray(film_sl_vectors)\n    d['sub_sl_vecs'] = np.asarray(substrate_sl_vectors)\n    d['match_area'] = match_area\n    d['film_vecs'] = np.asarray(film_vectors)\n    d['sub_vecs'] = np.asarray(substrate_vectors)\n    return d", "docstring": "Returns dict which contains ZSL match\n\nArgs:\nfilm_miller(array)\nsubstrate_miller(array)", "source": "codesearchnet"}
{"code": "def get_by_name(self, name):\n        \n        managed_sans = self.get_all()\n        result = [x for x in managed_sans if x['name'] == name]\n\n        resource = result[0] if result else None\n        if resource:\n            resource = self.new(self._connection, resource)\n\n        return resource", "docstring": "Gets a Managed SAN by name.\n\nArgs:\nname: Name of the Managed SAN\n\nReturns:\ndict: Managed SAN.", "source": "juraj-google-style"}
{"code": "def add(self, virtual_bit, physical_bit=None):\n        \n        if physical_bit is None:\n            physical_candidate = len(self)\n            while physical_candidate in self._p2v:\n                physical_candidate += 1\n            physical_bit = physical_candidate\n        self[virtual_bit] = physical_bit", "docstring": "Adds a map element between `bit` and `physical_bit`. If `physical_bit` is not\ndefined, `bit` will be mapped to a new physical bit (extending the length of the\nlayout by one.)\nArgs:\nvirtual_bit (tuple): A (qu)bit. For example, (QuantumRegister(3, 'qr'), 2).\nphysical_bit (int): A physical bit. For example, 3.", "source": "juraj-google-style"}
{"code": "def DeserializeUnsigned(self, reader):\n        \n        self.Version = reader.ReadUInt32()\n        self.PrevHash = reader.ReadUInt256()\n        self.MerkleRoot = reader.ReadUInt256()\n        self.Timestamp = reader.ReadUInt32()\n        self.Index = reader.ReadUInt32()\n        self.ConsensusData = reader.ReadUInt64()\n        self.NextConsensus = reader.ReadUInt160()", "docstring": "Deserialize unsigned data only.\n\nArgs:\nreader (neo.IO.BinaryReader):", "source": "juraj-google-style"}
{"code": "def jsbuild_prompt():\n    print(BOKEHJS_BUILD_PROMPT)\n    mapping = {'1': True, '2': False}\n    value = input('Choice? ')\n    while (value not in mapping):\n        print((\"Input '%s' not understood. Valid choices: 1, 2\\n\" % value))\n        value = input('Choice? ')\n    return mapping[value]", "docstring": "Prompt users whether to build a new BokehJS or install an existing one.\n\nReturns:\nbool : True, if a new build is requested, False otherwise", "source": "codesearchnet"}
{"code": "def get_object_from_name(name):\n    \n\n    dot = name.rindex(\".\")\n    mod_name, property_name = name[:dot], name[dot + 1:]\n    __import__(mod_name)\n    return getattr(sys.modules[mod_name], property_name)", "docstring": "Returns the named object.\n\nArguments:\nname (str): A string of form `package.subpackage.etc.module.property`.\nThis function will import `package.subpackage.etc.module` and\nreturn `property` from that module.", "source": "juraj-google-style"}
{"code": "def add_showcases(self, showcases, showcases_to_check=None):\n        \n        \n        if showcases_to_check is None:\n            showcases_to_check = self.get_showcases()\n        allshowcasesadded = True\n        for showcase in showcases:\n            if not self.add_showcase(showcase, showcases_to_check=showcases_to_check):\n                allshowcasesadded = False\n        return allshowcasesadded", "docstring": "Add dataset to multiple showcases\n\nArgs:\nshowcases (List[Union[Showcase,Dict,str]]): A list of either showcase ids or showcase metadata from Showcase objects or dictionaries\nshowcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to showcases containing dataset.\n\nReturns:\nbool: True if all showcases added or False if any already present", "source": "juraj-google-style"}
{"code": "def cpu_halt_reasons(self):\n        \n        buf_size = self.MAX_NUM_MOES\n        buf = (structs.JLinkMOEInfo * buf_size)()\n        num_reasons = self._dll.JLINKARM_GetMOEs(buf, buf_size)\n        if num_reasons < 0:\n            raise errors.JLinkException(num_reasons)\n\n        return list(buf)[:num_reasons]", "docstring": "Retrives the reasons that the CPU was halted.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nA list of ``JLInkMOEInfo`` instances specifying the reasons for which\nthe CPU was halted.  This list may be empty in the case that the CPU\nis not halted.\n\nRaises:\nJLinkException: on hardware error.", "source": "juraj-google-style"}
{"code": "def getmethodclass(m):\n    if not hasattr(m, '__name__') and hasattr(m, '__class__') and hasattr(m, '__call__'):\n        if isinstance(m.__class__, type):\n            return m.__class__\n    m_self = getattr(m, '__self__', None)\n    if m_self is not None:\n        if inspect.isclass(m_self):\n            return m_self\n        return m_self.__class__\n    owners = []\n    caller_frame = tf_inspect.currentframe().f_back\n    try:\n        for v in itertools.chain(caller_frame.f_locals.values(), caller_frame.f_globals.values()):\n            if hasattr(v, m.__name__):\n                candidate = getattr(v, m.__name__)\n                if hasattr(candidate, 'im_func'):\n                    candidate = candidate.im_func\n                if hasattr(m, 'im_func'):\n                    m = m.im_func\n                if candidate is m:\n                    owners.append(v)\n    finally:\n        del caller_frame\n    if owners:\n        if len(owners) == 1:\n            return owners[0]\n        owner_types = tuple((o if tf_inspect.isclass(o) else type(o) for o in owners))\n        for o in owner_types:\n            if tf_inspect.isclass(o) and issubclass(o, tuple(owner_types)):\n                return o\n        raise ValueError('Found too many owners of %s: %s' % (m, owners))\n    return None", "docstring": "Resolves a function's owner, e.g.\n\na method's class.\n\nNote that this returns the object that the function was retrieved from, not\nnecessarily the class where it was defined.\n\nThis function relies on Python stack frame support in the interpreter, and\nhas the same limitations that inspect.currentframe.\n\nLimitations. This function will only work correctly if the owned class is\nvisible in the caller's global or local variables.\n\nArgs:\nm: A user defined function\n\nReturns:\nThe class that this function was retrieved from, or None if the function\nis not an object or class method, or the class that owns the object or\nmethod is not visible to m.\n\nRaises:\nValueError: if the class could not be resolved for any unexpected reason.", "source": "github-repos"}
{"code": "def conformPadding(cls, chars):\n    pad = chars\n    if (pad and (pad[0] not in PAD_MAP)):\n        pad = cls.getPaddingChars(cls.getPaddingNum(pad))\n    return pad", "docstring": "Ensure alternate input padding formats are conformed\nto formats defined in PAD_MAP\n\nIf chars is already a format defined in PAD_MAP, then\nit is returned unmodified.\n\nExample::\n'#'    -> '#'\n'@@@@' -> '@@@@'\n'%04d' -> '#'\n\nArgs:\nchars (str): input padding chars\n\nReturns:\nstr: conformed padding chars\n\nRaises:\nValueError: If chars contains invalid padding characters", "source": "codesearchnet"}
{"code": "def upload(self, file_path, timeout=(- 1)):\n    return self._client.upload(file_path, timeout=timeout)", "docstring": "Upload an SPP ISO image file or a hotfix file to the appliance.\nThe API supports upload of one hotfix at a time into the system.\nFor the successful upload of a hotfix, ensure its original name and extension are not altered.\n\nArgs:\nfile_path: Full path to firmware.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Information about the updated firmware bundle.", "source": "codesearchnet"}
{"code": "def download_and_extract(uri, name, path):  \n    \n    if not os.path.exists(path):\n        os.makedirs(path)\n    if not os.listdir(path):\n        with tmpdir() as tmp:\n            if uri.startswith('s3:\n                dst = os.path.join(tmp, 'tar_file')\n                s3_download(uri, dst)\n\n                with tarfile.open(name=dst, mode='r:gz') as t:\n                    t.extractall(path=path)\n\n            elif os.path.isdir(uri):\n                if uri == path:\n                    return\n                if os.path.exists(path):\n                    shutil.rmtree(path)\n                shutil.move(uri, path)\n            else:\n                shutil.copy2(uri, os.path.join(path, name))", "docstring": "Download, prepare and install a compressed tar file from S3 or local directory as an entry point.\n\nSageMaker Python SDK saves the user provided entry points as compressed tar files in S3\n\nArgs:\nname (str): name of the entry point.\nuri (str): the location of the entry point.\npath (bool): The path where the script will be installed. It will not download and install the\nif the path already has the user entry point.", "source": "juraj-google-style"}
{"code": "def get_cosmosdb_account_keys(access_token, subscription_id, rgname, account_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourcegroups/', rgname,\n                        '/providers/Microsoft.DocumentDB/databaseAccounts/', account_name,\n                        '/listKeys',\n                        '?api-version=', COSMOSDB_API])\n    return do_post(endpoint, '', access_token)", "docstring": "Get the access keys for the specified Cosmos DB account.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\naccount_name (str): Name of the Cosmos DB account.\n\nReturns:\nHTTP response. JSON body of Cosmos DB account keys.", "source": "juraj-google-style"}
{"code": "def __init__(self, tensor_proto, initialized=True):\n    self._tensor_proto = tensor_proto\n    self._initialized = initialized", "docstring": "Constructor.\n\nArgs:\ntensor_proto: the `TensorProto` object that cannot be represented as a\n`np.ndarray` object.\ninitialized: (`bool`) whether the Tensor is initialized.", "source": "github-repos"}
{"code": "def HasTable(self, table_name):\n    \n    if not self._connection:\n      raise RuntimeError(\n          'Cannot determine if table exists database not opened.')\n\n    sql_query = self._HAS_TABLE_QUERY.format(table_name)\n\n    self._cursor.execute(sql_query)\n    if self._cursor.fetchone():\n      return True\n\n    return False", "docstring": "Determines if a specific table exists.\n\nArgs:\ntable_name (str): table name.\n\nReturns:\nbool: True if the table exists.\n\nRaises:\nRuntimeError: if the database is not opened.", "source": "juraj-google-style"}
{"code": "def set_documents(self, documents, fully_formed=False):\n\n    def add_id(document, id):\n\n        def make_id_tag(root, rel_path, max_depth):\n            if (max_depth < 0):\n                raise ParameterError('document_id_xpath too deep!')\n            if (not rel_path):\n                return root\n            else:\n                child = root.find(rel_path[0])\n                if (child is None):\n                    child = ET.Element(rel_path[0])\n                    root.append(child)\n                return make_id_tag(child, rel_path[1:], (max_depth - 1))\n        make_id_tag(document, doc_id_xpath, 10).text = str(id)\n    if fully_formed:\n        if (not isinstance(documents, list)):\n            documents = [documents]\n    else:\n        doc_root_tag = self.connection.document_root_xpath\n        doc_id_xpath = self.connection.document_id_xpath.split('/')\n        documents = dict([(id, to_etree((document if (document is not None) else query.term('', doc_root_tag)), doc_root_tag)) for (id, document) in documents.items()])\n        for (id, document) in documents.items():\n            if (document.tag != doc_root_tag):\n                documents[id] = ET.Element(doc_root_tag)\n                documents[id].append(document)\n        for (id, document) in documents.items():\n            add_id(document, id)\n        documents = documents.values()\n    self._documents = map(to_raw_xml, documents)", "docstring": "Wrap documents in the correct root tags, add id fields and convert them to xml strings.\n\nArgs:\ndocuments -- If fully_formed is False (default), accepts dict where keys are document ids and values can be ether\nxml string, etree.ElementTree or dict representation of an xml document (see dict_to_etree()).\nIf fully_formed is True, accepts list or single document where ids are integrated in document or\nnot needed and document has the right root tag.\n\nKeyword args:\nfully_formed  -- If documents are fully formed (contains the right root tags and id fields) set to True\nto avoid the owerhead of documets beeing parsed at all. If set to True only list of documents or\na single document can be pased as 'documents', not a dict of documents. Default is False.", "source": "codesearchnet"}
{"code": "def _get_package_name(prefix=settings.TEMP_DIR, book_id=None):\n    \n    if book_id is None:\n        book_id = str(uuid.uuid4())\n\n    return os.path.join(prefix, book_id)", "docstring": "Return package path. Use uuid to generate package's directory name.\n\nArgs:\nbook_id (str, default None): UUID of the book.\nprefix (str, default settings.TEMP_DIR): Where the package will be\nstored. Default :attr:`settings.TEMP_DIR`.\n\nReturns:\nstr: Path to the root directory.", "source": "juraj-google-style"}
{"code": "def range_index_map(batch_shape, num_segments, name='range_index_map'):\n    batch_shape = tf.convert_to_tensor(batch_shape)\n    batch_shape.shape.assert_has_rank(1)\n    num_segments = tf.convert_to_tensor(num_segments)\n    num_segments.shape.assert_has_rank(0)\n    indices = tf.range(num_segments)\n    shape = tf.concat([tf.ones_like(batch_shape, dtype=tf.int32), tf.expand_dims(num_segments, axis=0)], axis=0)\n    indices = tf.reshape(indices, shape)\n    multiples = tf.concat([batch_shape, [1]], axis=0)\n    indices = tf.tile(indices, multiples)\n    return IndexMap(indices=indices, num_segments=num_segments, batch_dims=batch_shape.shape.as_list()[0])", "docstring": "Constructs an index map equal to range(num_segments).\n\nArgs:\nbatch_shape (`tf.Tensor`):\nBatch shape\nnum_segments (`int`):\nNumber of segments\nname (`str`, *optional*, defaults to 'range_index_map'):\nName for the operation. Currently not used\n\nReturns:\n(`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments).", "source": "github-repos"}
{"code": "def aggregate_gradients_using_copy_with_device_selection(tower_grads, avail_devices, use_mean=True, check_inf_nan=False):\n    agg_grads = []\n    has_nan_or_inf_list = []\n    for (i, single_grads) in enumerate(zip(*tower_grads)):\n        with tf.device(avail_devices[(i % len(avail_devices))]):\n            (grad_and_var, has_nan_or_inf) = aggregate_single_gradient(single_grads, use_mean, check_inf_nan)\n            agg_grads.append(grad_and_var)\n            has_nan_or_inf_list.append(has_nan_or_inf)\n    return agg_grads", "docstring": "Aggregate gradients, controlling device for the aggregation.\n\nArgs:\ntower_grads: List of lists of (gradient, variable) tuples. The outer list\nis over towers. The inner list is over individual gradients.\nuse_mean: if True, mean is taken, else sum of gradients is taken.\ncheck_inf_nan: If true, check grads for nans and infs.\n\nReturns:\nThe tuple ([(average_gradient, variable),], has_nan_or_inf) where the\ngradient has been averaged across all towers. The variable is chosen from\nthe first tower. The has_nan_or_inf indicates the grads has nan or inf.", "source": "codesearchnet"}
{"code": "def _build_update_ops(self, mean, variance, is_training):\n\n    def build_update_ops():\n        'Builds the exponential moving average update ops.'\n        update_mean_op = moving_averages.assign_moving_average(variable=self._moving_mean, value=tf.reshape(mean, (self._num_channels,)), decay=self._decay_rate, zero_debias=False, name='update_moving_mean').op\n        update_variance_op = moving_averages.assign_moving_average(variable=self._moving_variance, value=tf.reshape(variance, (self._num_channels,)), decay=self._decay_rate, zero_debias=False, name='update_moving_variance').op\n        return (update_mean_op, update_variance_op)\n\n    def build_no_ops():\n        return (tf.no_op(), tf.no_op())\n    is_training_const = utils.constant_value(is_training)\n    if ((is_training_const is None) or is_training_const):\n        (update_mean_op, update_variance_op) = utils.smart_cond(is_training, build_update_ops, build_no_ops)\n        return (update_mean_op, update_variance_op)\n    else:\n        return None", "docstring": "Builds the moving average update ops when using moving variance.\n\nArgs:\nmean: The mean value to update with.\nvariance: The variance value to update with.\nis_training: Boolean Tensor to indicate if we're currently in\ntraining mode.\n\nReturns:\nTuple of `(update_mean_op, update_variance_op)` when `is_training` is or\ncould be `True`. Returns `None` when `is_training=False`.", "source": "codesearchnet"}
{"code": "def check_python_import(package_or_module):\n    \n    logger = logging.getLogger(__name__)\n    logger.debug(\"Checking python import '%s'...\", package_or_module)\n    loader = pkgutil.get_loader(package_or_module)\n    found = loader is not None\n    if found:\n        logger.debug(\"Python %s '%s' found\",\n                     \"package\" if loader.is_package(package_or_module)\n                     else \"module\", package_or_module)\n    else:  \n        logger.debug(\"Python import '%s' not found\", package_or_module)\n    return found", "docstring": "Checks if a python package or module is importable.\nArguments:\npackage_or_module -- the package or module name to check\nReturns:\nTrue or False", "source": "juraj-google-style"}
{"code": "def get(self):\n        \n        parser = reqparse.RequestParser()\n        parser.add_argument('search', type=str, required=True)\n        parser.add_argument('limit', type=int)\n        args = parser.parse_args()\n\n        if not args['search']:\n            return make_error(400, 'text_search cannot be empty')\n        if not args['limit']:\n            \n            del args['limit']\n\n        pool = current_app.config['bigchain_pool']\n\n        with pool() as bigchain:\n            assets = bigchain.text_search(**args)\n\n        try:\n            \n            return list(assets)\n        except OperationError as e:\n            return make_error(\n                400,\n                '({}): {}'.format(type(e).__name__, e)\n            )", "docstring": "API endpoint to perform a text search on the assets.\n\nArgs:\nsearch (str): Text search string to query the text index\nlimit (int, optional): Limit the number of returned documents.\n\nReturn:\nA list of assets that match the query.", "source": "juraj-google-style"}
{"code": "def with_env_recursive(cmd, **envvars):\n    from plumbum.commands.base import BoundCommand, BoundEnvCommand\n    if isinstance(cmd, BoundCommand):\n        cmd.cmd = with_env_recursive(cmd.cmd, **envvars)\n    elif isinstance(cmd, BoundEnvCommand):\n        cmd.envvars.update(envvars)\n        cmd.cmd = with_env_recursive(cmd.cmd, **envvars)\n    return cmd", "docstring": "Recursively updates the environment of cmd and all its subcommands.\n\nArgs:\ncmd - A plumbum command-like object\n**envvars - The environment variables to update\n\nReturns:\nThe updated command.", "source": "codesearchnet"}
{"code": "def convert_to_scl(spec, scl_options):\n    \n    scl_options['skip_functions'] = scl_options['skip_functions'].split(',')\n    scl_options['meta_spec'] = None\n    convertor = SclConvertor(options=scl_options)\n    return str(convertor.convert(spec))", "docstring": "Convert spec into SCL-style spec file using `spec2scl`.\n\nArgs:\nspec: (str) a spec file\nscl_options: (dict) SCL options provided\nReturns:\nA converted spec file", "source": "juraj-google-style"}
{"code": "def set_representative_sequence(self, force_rerun=False):\n        \n\n        if len(self.sequences) == 0:\n            log.error('{}: no sequences mapped'.format(self.id))\n            return self.representative_sequence\n\n        kegg_mappings = self.filter_sequences(KEGGProp)\n        if len(kegg_mappings) > 0:\n            kegg_to_use = kegg_mappings[0]\n            if len(kegg_mappings) > 1:\n                log.warning('{}: multiple KEGG mappings found, using the first entry {}'.format(self.id, kegg_to_use.id))\n\n        uniprot_mappings = self.filter_sequences(UniProtProp)\n\n        \n        if self.representative_sequence and not force_rerun:\n            log.debug('{}: representative sequence already set'.format(self.id))\n\n        \n        elif len(kegg_mappings) > 0 and len(uniprot_mappings) == 0:\n            self.representative_sequence = kegg_to_use\n            log.debug('{}: representative sequence set from KEGG ID {}'.format(self.id, kegg_to_use.id))\n\n        \n        elif len(kegg_mappings) == 0 and len(uniprot_mappings) > 0:\n            \n            \n            u_ranker = []\n            for u in uniprot_mappings:\n                u_ranker.append((u.id, u.ranking_score()))\n            sorted_by_second = sorted(u_ranker, key=lambda tup: tup[1], reverse=True)\n            best_u_id = sorted_by_second[0][0]\n\n            best_u = uniprot_mappings.get_by_id(best_u_id)\n            self.representative_sequence = best_u\n            log.debug('{}: representative sequence set from UniProt ID {}'.format(self.id, best_u_id))\n\n        \n        elif len(kegg_mappings) > 0 and len(uniprot_mappings) > 0:\n            \n            if kegg_to_use.num_pdbs > 0 and not uniprot_mappings.has_id(kegg_to_use.uniprot):\n                self.representative_sequence = kegg_to_use\n                log.debug('{}: representative sequence set from KEGG ID {}'.format(self.id, kegg_to_use.id))\n            else:\n                \n                u_ranker = []\n                for u in uniprot_mappings:\n                    u_ranker.append((u.id, u.ranking_score()))\n                sorted_by_second = sorted(u_ranker, key=lambda tup: tup[1], reverse=True)\n                best_u_id = sorted_by_second[0][0]\n\n                best_u = uniprot_mappings.get_by_id(best_u_id)\n                self.representative_sequence = best_u\n                log.debug('{}: representative sequence set from UniProt ID {}'.format(self.id, best_u_id))\n\n        return self.representative_sequence", "docstring": "Automatically consolidate loaded sequences (manual, UniProt, or KEGG) and set a single representative\nsequence.\n\nManually set representative sequences override all existing mappings. UniProt mappings override KEGG mappings\nexcept when KEGG mappings have PDBs associated with them and UniProt doesn't.\n\nArgs:\nforce_rerun (bool): Set to True to recheck stored sequences\n\nReturns:\nSeqProp: Which sequence was set as representative", "source": "juraj-google-style"}
{"code": "def __init__(self, host, cert, reason):\n    \n    httplib.HTTPException.__init__(self)\n    self.host = host\n    self.cert = cert\n    self.reason = reason", "docstring": "Constructor.\n\nArgs:\nhost: The hostname the connection was made to.\ncert: The SSL certificate (as a dictionary) the host returned.", "source": "juraj-google-style"}
{"code": "def destroy_sns_event(app_name, env, region):\n    \n    session = boto3.Session(profile_name=env, region_name=region)\n    sns_client = session.client('sns')\n\n    lambda_subscriptions = get_sns_subscriptions(app_name=app_name, env=env, region=region)\n\n    for subscription_arn in lambda_subscriptions:\n        sns_client.unsubscribe(SubscriptionArn=subscription_arn)\n\n    LOG.debug(\"Lambda SNS event deleted\")\n    return True", "docstring": "Destroy all Lambda SNS subscriptions.\n\nArgs:\napp_name (str): name of the lambda function\nenv (str): Environment/Account for lambda function\nregion (str): AWS region of the lambda function\n\nReturns:\nboolean: True if subscription destroyed successfully", "source": "juraj-google-style"}
{"code": "def create_checksum_object_from_stream(\n    f, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM\n):\n    \n    checksum_str = calculate_checksum_on_stream(f, algorithm)\n    checksum_pyxb = d1_common.types.dataoneTypes.checksum(checksum_str)\n    checksum_pyxb.algorithm = algorithm\n    return checksum_pyxb", "docstring": "Calculate the checksum of a stream.\n\nArgs:\nf: file-like object\nOnly requirement is a ``read()`` method that returns ``bytes``.\n\nalgorithm: str\nChecksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.\n\nReturns:\nPopulated Checksum PyXB object.", "source": "juraj-google-style"}
{"code": "def is_applicable(self, trackable: base.Trackable) -> bool:", "docstring": "Returns whether the adapter is applicable to trackable for resharding.\n\nArgs:\ntrackable: A Trackable object that is being restored.\n\nReturns:\nA Boolean indicating if the checkpoint value for this Trackable should be\nresharded.", "source": "github-repos"}
{"code": "def maybe_zero_out_padding(inputs, kernel_size, nonpadding_mask):\n    if ((kernel_size != 1) and (kernel_size != (1, 1)) and (nonpadding_mask is not None)):\n        while (nonpadding_mask.get_shape().ndims < inputs.get_shape().ndims):\n            nonpadding_mask = tf.expand_dims(nonpadding_mask, (- 1))\n        return (inputs * nonpadding_mask)\n    return inputs", "docstring": "If necessary, zero out inputs to a conv for padding positions.\n\nArgs:\ninputs: a Tensor with shape [batch, length, ...]\nkernel_size: an integer or pair of integers\nnonpadding_mask: a Tensor with shape [batch, length]\n\nReturns:\nTensor of the same shape as inputs.", "source": "codesearchnet"}
{"code": "def __init__(self, opt, reduction=losses.Reduction.MEAN, name='CrossShardOptimizer', group_assignment=None):\n    accepted_reductions = (losses.Reduction.SUM, losses.Reduction.MEAN)\n    if reduction not in accepted_reductions:\n        raise ValueError(f'Argument `reduction` should be one of {accepted_reductions}. Received: {reduction}')\n    if not isinstance(opt, optimizer.Optimizer):\n        raise TypeError(f'CrossShardOptimizer only works with tf.training.Optimizer and not Keras Optimizer. Received: {opt}. If you are using TPUStrategy, Keras Optimizer will sum gradients across replicas.If you want to average your gradients, rescale your loss with: `loss /= global_batch_size`')\n    super(CrossShardOptimizer, self).__init__(False, name)\n    self._opt = opt\n    self._reduction = reduction\n    self._group_assignment = group_assignment", "docstring": "Construct a new cross-shard optimizer.\n\nArgs:\nopt: An existing `Optimizer` to encapsulate.\nreduction: The reduction to apply to the shard losses.\nname: Optional name prefix for the operations created when applying\ngradients. Defaults to \"CrossShardOptimizer\".\ngroup_assignment: Optional 2d int32 lists with shape\n[num_groups, num_replicas_per_group] which describles how to apply\noptimizer to subgroups.\n\nRaises:\nValueError: If reduction is not a valid cross-shard reduction.", "source": "github-repos"}
{"code": "def redact_event(self, room_id, event_id, reason=None, txn_id=None, timestamp=None):\n        \n        if not txn_id:\n            txn_id = self._make_txn_id()\n\n        path = '/rooms/%s/redact/%s/%s' % (\n            room_id, event_id, txn_id\n        )\n        content = {}\n        if reason:\n            content['reason'] = reason\n        params = {}\n        if timestamp:\n            params[\"ts\"] = timestamp\n        return self._send(\"PUT\", path, content, query_params=params)", "docstring": "Perform PUT /rooms/$room_id/redact/$event_id/$txn_id/\n\nArgs:\nroom_id(str): The room ID to redact the message event in.\nevent_id(str): The event id to redact.\nreason (str): Optional. The reason the message was redacted.\ntxn_id(int): Optional. The transaction ID to use.\ntimestamp(int): Optional. Set origin_server_ts (For application services only)", "source": "juraj-google-style"}
{"code": "def get_frame(self, index):\n    frame_num = self.frame_index[index]\n    onset = (float(frame_num) / self.fps)\n    if (index < (self.n_frames - 1)):\n        next_frame_num = self.frame_index[(index + 1)]\n        end = (float(next_frame_num) / self.fps)\n    else:\n        end = float(self.duration)\n    duration = ((end - onset) if (end > onset) else 0.0)\n    return VideoFrameStim(self, frame_num, data=self.clip.get_frame(onset), duration=duration)", "docstring": "Get video frame at the specified index.\n\nArgs:\nindex (int): Positional index of the desired frame.", "source": "codesearchnet"}
{"code": "def dot_product(t1, t2, keep_dims=False, name=None, reduction_dim=None):\n    with tf.name_scope(name, 'dot', [t1, t2]) as scope:\n        t1 = tf.convert_to_tensor(t1, name='t1')\n        t2 = tf.convert_to_tensor(t2, name='t2')\n        mul = tf.multiply(t1, t2)\n        if (not reduction_dim):\n            reduction_dim = _last_index(mul, 1)\n        return tf.reduce_sum(mul, reduction_dim, name=scope, keep_dims=keep_dims)", "docstring": "Computes the dot product of t1 and t2.\n\nArgs:\nt1: A rank 2 tensor.\nt2: A tensor that is the same size as t1.\nkeep_dims: If true, reduction does not change the rank of the input.\nname: Optional name for this op.\nreduction_dim: The dimension to reduce, by default choose the last one\nand if no shape is specified guess 1.\nReturns:\nThe dot product.", "source": "codesearchnet"}
{"code": "def from_string(contents):\n        \n        lines = [l.strip() for l in contents.split(\"\\n\")]\n\n        link0_patt = re.compile(r\"^(%.+)\\s*=\\s*(.+)\")\n        link0_dict = {}\n        for i, l in enumerate(lines):\n            if link0_patt.match(l):\n                m = link0_patt.match(l)\n                link0_dict[m.group(1).strip(\"=\")] = m.group(2)\n\n        route_patt = re.compile(r\"^\n        route = \"\"\n        route_index = None\n        for i, l in enumerate(lines):\n            if route_patt.match(l):\n                route += \" \" + l\n                route_index = i\n            \n            elif (l == \"\" or l.isspace()) and route_index:\n                break\n        functional, basis_set, route_paras, dieze_tag = read_route_line(route)\n        ind = 2\n        title = []\n        while lines[route_index + ind].strip():\n            title.append(lines[route_index + ind].strip())\n            ind += 1\n        title = ' '.join(title)\n        ind += 1\n        toks = re.split(r\"[,\\s]+\", lines[route_index + ind])\n        charge = int(toks[0])\n        spin_mult = int(toks[1])\n        coord_lines = []\n        spaces = 0\n        input_paras = {}\n        ind += 1\n        for i in range(route_index + ind, len(lines)):\n            if lines[i].strip() == \"\":\n                spaces += 1\n            if spaces >= 2:\n                d = lines[i].split(\"=\")\n                if len(d) == 2:\n                    input_paras[d[0]] = d[1]\n            else:\n                coord_lines.append(lines[i].strip())\n        mol = GaussianInput._parse_coords(coord_lines)\n        mol.set_charge_and_spin(charge, spin_mult)\n\n        return GaussianInput(mol, charge=charge, spin_multiplicity=spin_mult,\n                             title=title, functional=functional,\n                             basis_set=basis_set,\n                             route_parameters=route_paras,\n                             input_parameters=input_paras,\n                             link0_parameters=link0_dict,\n                             dieze_tag=dieze_tag)", "docstring": "Creates GaussianInput from a string.\n\nArgs:\ncontents: String representing an Gaussian input file.\n\nReturns:\nGaussianInput object", "source": "juraj-google-style"}
{"code": "def __init__(self, *args, **kwargs):\n    super(UnionClusterResolver, self).__init__()\n    self._rpc_layer = kwargs.pop('rpc_layer', None)\n    self._task_type = kwargs.pop('task_type', None)\n    self._task_id = kwargs.pop('task_id', None)\n    if kwargs:\n        raise ValueError('Unexpected kwargs provided {!r}'.format(kwargs))\n    if not args:\n        raise ValueError('At least one ClusterResolver is required.')\n    for cluster_resolver in args:\n        if not isinstance(cluster_resolver, ClusterResolver):\n            raise TypeError('All arguments must be a sub-class of `ClusterResolver.`')\n    self._cluster_resolvers = args", "docstring": "Initializes a UnionClusterResolver with other ClusterResolvers.\n\nArgs:\n*args: `ClusterResolver` objects to be unionized.\n**kwargs:\nrpc_layer - (Optional) Override value for the RPC layer used by\nTensorFlow.\ntask_type - (Optional) Override value for the current task type.\ntask_id - (Optional) Override value for the current task index.\n\nRaises:\nTypeError: If any argument is not a subclass of `ClusterResolvers`.\nValueError: If there are no arguments passed.", "source": "github-repos"}
{"code": "def ragged_rank(self):\n    values_is_ragged = isinstance(self._values, RaggedTensor)\n    return self._values.ragged_rank + 1 if values_is_ragged else 1", "docstring": "The number of times the RaggedTensor's flat_values is partitioned.\n\nExamples:\n\n>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])\n>>> values.ragged_rank\n1\n\n>>> rt = tf.RaggedTensor.from_uniform_row_length(values, 2)\n>>> rt.ragged_rank\n2\n\nReturns:\nA Python `int` indicating the number of times the underlying `flat_values`\nTensor has been partitioned to add a new dimension.\nI.e., `tf.rank(rt) = tf.rank(rt.flat_values) + rt.ragged_rank`.", "source": "github-repos"}
{"code": "def __init__(self, name):\n    \n    super(MemberSectionDefinition, self).__init__()\n    self.name = name\n    self.members = []", "docstring": "Initializes a member section definition.\n\nArgs:\nname (str): name.", "source": "juraj-google-style"}
{"code": "def _GetRequestClass(self, method_descriptor):\n    if (method_descriptor.containing_service != self.descriptor):\n        raise RuntimeError('GetRequestClass() given method descriptor for wrong service type.')\n    return method_descriptor.input_type._concrete_class", "docstring": "Returns the class of the request protocol message.\n\nArgs:\nmethod_descriptor: Descriptor of the method for which to return the\nrequest protocol message class.\n\nReturns:\nA class that represents the input protocol message of the specified\nmethod.", "source": "codesearchnet"}
{"code": "def remove_object_from_list(self, obj, list_element):\n        \n        list_element = self._handle_location(list_element)\n\n        if isinstance(obj, JSSObject):\n            results = [item for item in list_element.getchildren() if\n                       item.findtext(\"id\") == obj.id]\n        elif isinstance(obj, (int, basestring)):\n            results = [item for item in list_element.getchildren() if\n                       item.findtext(\"id\") == str(obj) or\n                       item.findtext(\"name\") == obj]\n\n        if len(results) == 1:\n            list_element.remove(results[0])\n        elif len(results) > 1:\n            raise ValueError(\"There is more than one matching object at that \"\n                             \"path!\")", "docstring": "Remove an object from a list element.\n\nArgs:\nobj: Accepts JSSObjects, id's, and names\nlist_element: Accepts an Element or a string path to that\nelement", "source": "juraj-google-style"}
{"code": "def expression(value):\n        \n        if isinstance(value, Expression):\n            \n            \n            return Expression(value._type, value._value)\n        if hasattr(value, 'spl_json'):\n            sj = value.spl_json()\n            return Expression(sj['type'], sj['value'])\n        return Expression('splexpr', value)", "docstring": "Create an SPL expression.\n\nArgs:\nvalue: Expression as a string or another `Expression`. If value is an instance of `Expression` then a new instance is returned containing the same type and value.\n\nReturns:\nExpression: SPL expression from `value`.", "source": "juraj-google-style"}
{"code": "def getColorHSV(name):\n    try:\n        x = getColorInfoList()[getColorList().index(name.upper())]\n    except:\n        return ((- 1), (- 1), (- 1))\n    r = (x[1] / 255.0)\n    g = (x[2] / 255.0)\n    b = (x[3] / 255.0)\n    cmax = max(r, g, b)\n    V = round((cmax * 100), 1)\n    cmin = min(r, g, b)\n    delta = (cmax - cmin)\n    if (delta == 0):\n        hue = 0\n    elif (cmax == r):\n        hue = (60.0 * (((g - b) / delta) % 6))\n    elif (cmax == g):\n        hue = (60.0 * (((b - r) / delta) + 2))\n    else:\n        hue = (60.0 * (((r - g) / delta) + 4))\n    H = int(round(hue))\n    if (cmax == 0):\n        sat = 0\n    else:\n        sat = (delta / cmax)\n    S = int(round((sat * 100)))\n    return (H, S, V)", "docstring": "Retrieve the hue, saturation, value triple of a color name.\n\nReturns:\na triple (degree, percent, percent). If not found (-1, -1, -1) is returned.", "source": "codesearchnet"}
{"code": "def to_lasio(self, keys=None, basis=None):\n    l = lasio.LASFile()\n    l.well.DATE = str(datetime.datetime.today())\n    for (obj, dic) in LAS_FIELDS.items():\n        if (obj == 'data'):\n            continue\n        for (attr, (sect, item)) in dic.items():\n            value = getattr(getattr(self, obj), attr, None)\n            try:\n                getattr(l, sect)[item].value = value\n            except:\n                h = lasio.HeaderItem(item, '', value, '')\n                getattr(l, sect)[item] = h\n    l.header['Curves'] = []\n    if (basis is None):\n        basis = self.survey_basis(keys=keys)\n    try:\n        l.add_curve('DEPT', basis)\n    except:\n        raise Exception('Please provide a depth basis.')\n    setattr(l.well, 'STRT', basis[0])\n    setattr(l.well, 'STOP', basis[(- 1)])\n    setattr(l.well, 'STEP', (basis[1] - basis[0]))\n    other = ''\n    if (keys is None):\n        keys = [k for (k, v) in self.data.items() if isinstance(v, Curve)]\n    else:\n        keys = utils.flatten_list(keys)\n    for k in keys:\n        d = self.data[k]\n        if (getattr(d, 'null', None) is not None):\n            d[np.isnan(d)] = d.null\n        try:\n            new_data = np.copy(d.to_basis_like(basis))\n        except:\n            pass\n        try:\n            descr = getattr(d, 'description', '')\n            l.add_curve(k.upper(), new_data, unit=d.units, descr=descr)\n        except:\n            try:\n                other += ('{}\\n'.format(k.upper()) + d.to_csv())\n            except:\n                pass\n    if other:\n        l.other = other\n    return l", "docstring": "Makes a lasio object from the current well.\n\nArgs:\nbasis (ndarray): Optional. The basis to export the curves in. If\nyou don't specify one, it will survey all the curves with\n``survey_basis()``.\nkeys (list): List of strings: the keys of the data items to\ninclude, if not all of them. You can have nested lists, such\nas you might use for ``tracks`` in ``well.plot()``.\n\nReturns:\nlasio. The lasio object.", "source": "codesearchnet"}
{"code": "def save(self, file_prefix: tensor_lib.Tensor, options: 'checkpoint_options.CheckpointOptions | None'=None) -> ops.Operation:\n    options = options or checkpoint_options.CheckpointOptions()\n    with ops.device('CPU'):\n        sharded_suffix = array_ops.where(string_ops.regex_full_match(file_prefix, '^s3:\n        tmp_checkpoint_prefix = string_ops.string_join([file_prefix, sharded_suffix])\n        registered_paths = {saver_name: registered_saver_filename(file_prefix, saver_name) for saver_name in self._registered_savers}\n\n    def save_fn() -> ops.Operation:\n        saved_prefixes = []\n        for saver_name, (save_fn, _) in self._registered_savers.items():\n            maybe_saved_prefixes = save_fn(registered_paths[saver_name])\n            if maybe_saved_prefixes is not None:\n                flattened_saved_prefixes = nest.flatten(maybe_saved_prefixes)\n                if not all((tensor_util.is_tf_type(x) and x.dtype == dtypes.string for x in flattened_saved_prefixes)):\n                    raise ValueError(f'Registered saver must return a (maybe empty) list of string type tensors. Got {maybe_saved_prefixes}.')\n                saved_prefixes.extend(flattened_saved_prefixes)\n        shards_by_task = self._get_shards_by_task(options.experimental_sharding_callback)\n        num_shards = sum([len(shards) for _, shards in shards_by_task])\n        metrics.AddNumCheckpointShardsWritten(num_shards=num_shards)\n        num_shards_tensor = constant_op.constant(num_shards, name='num_shards')\n        sharded_saves = []\n        shard_idx = 0\n        for task, shards in shards_by_task:\n            for shard in shards:\n                with ops.device(task):\n                    shard_prefix = sharded_filename(tmp_checkpoint_prefix, shard_idx, num_shards_tensor)\n                    shard_idx += 1\n                saved_prefixes.append(shard_prefix)\n                sharded_saves.append(_single_shard_save(shard_prefix, shard, task, options))\n        with ops.control_dependencies(sharded_saves):\n            tensor_device_spec = list(self._shardable_tensors_by_task.keys())[-1]\n            merge_device_spec = options.experimental_io_device or saveable_object_util.set_cpu0(tensor_device_spec.to_string())\n            with ops.device(merge_device_spec):\n                return gen_io_ops.merge_v2_checkpoints(saved_prefixes, file_prefix, delete_old_dirs=True)\n    if context.executing_eagerly() and self._num_unique_tasks > 1:\n\n        @def_function.function(jit_compile=False)\n        def tf_function_save() -> None:\n            save_fn()\n        tf_function_save()\n    else:\n        return save_fn()", "docstring": "Save the saveable objects to a checkpoint with `file_prefix`.\n\nArgs:\nfile_prefix: A string or scalar string Tensor containing the prefix to\nsave under.\noptions: Optional `CheckpointOptions` object.\nReturns:\nAn `Operation`, or None when executing eagerly.", "source": "github-repos"}
{"code": "def get_image_features(self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, vision_feature_layer: Optional[Union[int, List[int]]]=None, vision_feature_select_strategy: Optional[str]=None):\n    vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer\n    vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy\n    image_num_patches = [image_size_to_num_patches(image_size=imsize, grid_pinpoints=self.config.image_grid_pinpoints, patch_size=self.config.vision_config.image_size) for imsize in image_sizes]\n    if pixel_values.dim() == 5:\n        _pixel_values_list = [pix_val[:num_patch] for pix_val, num_patch in zip(pixel_values, image_num_patches)]\n        pixel_values = torch.cat(_pixel_values_list, dim=0)\n    elif pixel_values.dim() != 4:\n        raise ValueError(f'pixel_values of shape {pixel_values.shape}, expect to be of 4 or 5 dimensions')\n    image_features = self.vision_tower(pixel_values, output_hidden_states=True)\n    if isinstance(vision_feature_layer, int):\n        selected_image_feature = image_features.hidden_states[vision_feature_layer]\n    else:\n        hs_pool = [image_features.hidden_states[layer_idx] for layer_idx in vision_feature_layer]\n        selected_image_feature = torch.cat(hs_pool, dim=-1)\n    if vision_feature_select_strategy == 'default':\n        selected_image_feature = selected_image_feature[:, 1:]\n    elif vision_feature_select_strategy == 'full':\n        selected_image_feature = selected_image_feature\n    image_features = self.multi_modal_projector(selected_image_feature)\n    image_features = torch.split(image_features, image_num_patches, dim=0)\n    image_features, feature_lens = self.pack_image_features(image_features, image_sizes, vision_feature_select_strategy=vision_feature_select_strategy, image_newline=self.image_newline)\n    return image_features", "docstring": "Obtains image last hidden states from the vision tower and apply multimodal projection.\n\nArgs:\npixel_values (`torch.FloatTensor]` of shape `(batch_size, num_patches, channels, height, width)`)\nThe tensors corresponding to the input images.\nimage_sizes (`torch.Tensor` of shape `(num_images, 2)`)\nActual image size of each images (H, W).\nvision_feature_layer (`Union[int, List[int]]`, *optional*):\nThe index of the layer to select the vision feature. If multiple indices are provided,\nthe vision feature of the corresponding indices will be concatenated to form the\nvision features.\nvision_feature_select_strategy (`str`, *optional*):\nThe feature selection strategy used to select the vision feature from the vision backbone.\nCan be one of `\"default\"` or `\"full\"`\nReturns:\nimage_features (List[`torch.Tensor`]): List of image feature tensor, each contains all the visual feature of all patches\nand are of shape `(num_patches, image_length, embed_dim)`).", "source": "github-repos"}
{"code": "def decode(obj, content_type):\n    try:\n        decoder = _decoders_map[content_type]\n        return decoder(obj)\n    except KeyError:\n        raise _errors.UnsupportedFormatError(content_type)", "docstring": "Decode an object ton a one of the default content types to a numpy array.\n\nArgs:\nobj (object): to be decoded.\ncontent_type (str): content type to be used.\n\nReturns:\nnp.array: decoded object.", "source": "codesearchnet"}
{"code": "def profile_setting_default_args(ij):\n    profile_default_args = OrderedDict()\n    profile_default_args['api_default_org'] = '$env.API_DEFAULT_ORG'\n    profile_default_args['api_access_id'] = '$env.API_ACCESS_ID'\n    profile_default_args['api_secret_key'] = '$envs.API_SECRET_KEY'\n    profile_default_args['tc_api_path'] = '$env.TC_API_PATH'\n    profile_default_args['tc_docker'] = False\n    profile_default_args['tc_in_path'] = 'log'\n    profile_default_args['tc_log_level'] = 'debug'\n    profile_default_args['tc_log_path'] = 'log'\n    profile_default_args['tc_log_to_api'] = False\n    profile_default_args['tc_out_path'] = 'log'\n    profile_default_args['tc_proxy_external'] = False\n    profile_default_args['tc_proxy_host'] = '$env.TC_PROXY_HOST'\n    profile_default_args['tc_proxy_port'] = '$env.TC_PROXY_PORT'\n    profile_default_args['tc_proxy_password'] = '$envs.TC_PROXY_PASSWORD'\n    profile_default_args['tc_proxy_tc'] = False\n    profile_default_args['tc_proxy_username'] = '$env.TC_PROXY_USERNAME'\n    profile_default_args['tc_temp_path'] = 'log'\n    if (ij.get('runtimeLevel') == 'Playbook'):\n        profile_default_args['tc_playbook_db_type'] = 'Redis'\n        profile_default_args['tc_playbook_db_context'] = str(uuid4())\n        profile_default_args['tc_playbook_db_path'] = '$env.DB_PATH'\n        profile_default_args['tc_playbook_db_port'] = '$env.DB_PORT'\n        profile_default_args['tc_playbook_out_variables'] = ''\n    return profile_default_args", "docstring": "Build the default args for this profile.\n\nArgs:\nij (dict): The install.json contents.\n\nReturns:\ndict: The default args for a Job or Playbook App.", "source": "codesearchnet"}
{"code": "def url(self):\n    base_url = 'https:\n    archived_at = self._get_archived_at()\n    if (archived_at and archived_at.startswith('<')):\n        archived_at = archived_at[1:]\n    if (archived_at and archived_at.endswith('>')):\n        archived_at = archived_at[:(- 1)]\n    if (archived_at and archived_at.startswith('http')):\n        return archived_at\n    elif archived_at:\n        return (base_url + archived_at)\n    else:\n        return None", "docstring": "An URL to the email in HyperKitty\n\nReturns:\nstr or None: A relevant URL.", "source": "codesearchnet"}
{"code": "def _find_executable_or_die(executable_name: str, executable_path: Optional[str]=None) -> str:\n    if executable_path:\n        return str(pathlib.Path(executable_path).resolve(strict=True))\n    resolved_path_to_exe = _find_executable(executable_name)\n    if resolved_path_to_exe is None:\n        raise RuntimeError(f'Could not find executable `{executable_name}`! Please change your $PATH or pass the path directly like`--{executable_name}_path=path/to/executable.')\n    logging.info('Found path to %s at %s', executable_name, resolved_path_to_exe)\n    return resolved_path_to_exe", "docstring": "Finds executable and resolves symlinks or raises RuntimeError.\n\nResolving symlinks is sometimes necessary for finding system headers.\n\nArgs:\nexecutable_name: The name of the executable that we want to find.\nexecutable_path: If not None, the path to the executable.\n\nReturns:\nThe path to the executable we are looking for, after symlinks are resolved.\nRaises:\nRuntimeError: if path to the executable cannot be found.", "source": "github-repos"}
{"code": "def __init__(self, file_pattern, interval=360.0, has_deduplication=True, start_timestamp=Timestamp.now(), stop_timestamp=MAX_TIMESTAMP, match_updated_files=False, apply_windowing=False, empty_match_treatment=EmptyMatchTreatment.ALLOW):\n    self.file_pattern = file_pattern\n    self.interval = interval\n    self.has_deduplication = has_deduplication\n    self.start_ts = start_timestamp\n    self.stop_ts = stop_timestamp\n    self.match_upd = match_updated_files\n    self.apply_windowing = apply_windowing\n    self.empty_match_treatment = empty_match_treatment\n    _LOGGER.warning('Matching Continuously is stateful, and can scale poorly. Consider using Pub/Sub Notifications (https:", "docstring": "Initializes a MatchContinuously transform.\n\nArgs:\nfile_pattern: The file path to read from.\ninterval: Interval at which to check for files in seconds.\nhas_deduplication: Whether files already read are discarded or not.\nstart_timestamp: Timestamp for start file checking.\nstop_timestamp: Timestamp after which no more files will be checked.\nmatch_updated_files: (When has_deduplication is set to True) whether match\nfile with timestamp changes.\napply_windowing: Whether each element should be assigned to\nindividual window. If false, all elements will reside in global window.", "source": "github-repos"}
{"code": "def load_weights_from_hdf5_group(f, layers):\n    if 'keras_version' in f.attrs:\n        original_keras_version = f.attrs['keras_version']\n        if hasattr(original_keras_version, 'decode'):\n            original_keras_version = original_keras_version.decode('utf8')\n    else:\n        original_keras_version = '1'\n    if 'backend' in f.attrs:\n        original_backend = f.attrs['backend']\n        if hasattr(original_backend, 'decode'):\n            original_backend = original_backend.decode('utf8')\n    else:\n        original_backend = None\n    filtered_layers = []\n    for layer in layers:\n        weights = _legacy_weights(layer)\n        if weights:\n            filtered_layers.append(layer)\n    layer_names = load_attributes_from_hdf5_group(f, 'layer_names')\n    filtered_layer_names = []\n    for name in layer_names:\n        g = f[name]\n        weight_names = load_attributes_from_hdf5_group(g, 'weight_names')\n        if weight_names:\n            filtered_layer_names.append(name)\n    layer_names = filtered_layer_names\n    if len(layer_names) != len(filtered_layers):\n        raise ValueError('You are trying to load a weight file containing ' + str(len(layer_names)) + ' layers into a model with ' + str(len(filtered_layers)) + ' layers.')\n    weight_value_tuples = []\n    for k, name in enumerate(layer_names):\n        g = f[name]\n        weight_names = load_attributes_from_hdf5_group(g, 'weight_names')\n        weight_values = [np.asarray(g[weight_name]) for weight_name in weight_names]\n        layer = filtered_layers[k]\n        symbolic_weights = _legacy_weights(layer)\n        weight_values = preprocess_weights_for_loading(layer, weight_values, original_keras_version, original_backend)\n        if len(weight_values) != len(symbolic_weights):\n            raise ValueError('Layer \n        weight_value_tuples += zip(symbolic_weights, weight_values)\n    backend.batch_set_value(weight_value_tuples)", "docstring": "Implements topological (order-based) weight loading.\n\nArgs:\nf: A pointer to a HDF5 group.\nlayers: a list of target layers.\n\nRaises:\nValueError: in case of mismatch between provided layers\nand weights file.", "source": "github-repos"}
{"code": "def remove(self, uids: Iterable[int]) -> None:\n    for uid in uids:\n        self._recent.discard(uid)\n        self._flags.pop(uid, None)", "docstring": "Remove any session flags for the given message.\n\nArgs:\nuids: The message UID values.", "source": "codesearchnet"}
{"code": "def set_value(self, text):\n    if self.single_line:\n        text = text.replace('\\n', '')\n    self.set_text(text)", "docstring": "Sets the text content.\n\nArgs:\ntext (str): The string content that have to be appended as standard child identified by the key 'text'", "source": "codesearchnet"}
{"code": "class CsvPipelineDataFormat(PipelineDataFormat):\n\n    def __init__(self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False):\n        super().__init__(output_path, input_path, column, overwrite=overwrite)\n\n    def __iter__(self):\n        with open(self.input_path, 'r') as f:\n            reader = csv.DictReader(f)\n            for row in reader:\n                if self.is_multi_columns:\n                    yield {k: row[c] for k, c in self.column}\n                else:\n                    yield row[self.column[0]]\n\n    def save(self, data: List[dict]):\n        \n        with open(self.output_path, 'w') as f:\n            if len(data) > 0:\n                writer = csv.DictWriter(f, list(data[0].keys()))\n                writer.writeheader()\n                writer.writerows(data)", "docstring": "Support for pipelines using CSV data format.\n\nArgs:\noutput_path (`str`): Where to save the outgoing data.\ninput_path (`str`): Where to look for the input data.\ncolumn (`str`): The column to read.\noverwrite (`bool`, *optional*, defaults to `False`):\nWhether or not to overwrite the `output_path`.", "source": "github-repos"}
{"code": "def set_bias(self, bias):\n    self.x_offset += (bias - self._bias)\n    self._bias = bias\n    self._build_cdict()", "docstring": "Adjusts the image bias.\n\nBias determines where the color changes start.  At low bias, low\nintensities (i.e., low pixel values) will have non-zero color\ndifferences, while at high bias only high pixel values will have\nnon-zero differences\n\nArgs:\nbias: float\nA number between 0 and 1.  Note that upon initialization the\ncolormap has a default bias of 0.5.\n\nReturns: void", "source": "codesearchnet"}
{"code": "def get_numeric_feature_names(example):\n  \n  numeric_features = ('float_list', 'int64_list')\n  features = get_example_features(example)\n  return sorted([\n      feature_name for feature_name in features\n      if features[feature_name].WhichOneof('kind') in numeric_features\n  ])", "docstring": "Returns a list of feature names for float and int64 type features.\n\nArgs:\nexample: An example.\n\nReturns:\nA list of strings of the names of numeric features.", "source": "juraj-google-style"}
{"code": "def get_atom_map(structure):\n    syms = [site.specie.symbol for site in structure]\n    unique_pot_atoms = []\n    [unique_pot_atoms.append(i) for i in syms if (not unique_pot_atoms.count(i))]\n    atom_map = {}\n    for (i, atom) in enumerate(unique_pot_atoms):\n        atom_map[atom] = (i + 1)\n    return atom_map", "docstring": "Returns a dict that maps each atomic symbol to a unique integer starting\nfrom 1.\n\nArgs:\nstructure (Structure)\n\nReturns:\ndict", "source": "codesearchnet"}
{"code": "def _convert_args(handler, args):\n    args = list(args)\n    params = inspect.signature(handler).parameters\n    for (i, (arg, name)) in enumerate(zip(args, params)):\n        default = params[name].default\n        annotation = params[name].annotation\n        if (annotation != inspect.Parameter.empty):\n            if (isinstance(annotation, type) and (annotation != str)):\n                args[i] = annotation(arg)\n        elif (default != inspect.Parameter.empty):\n            if ((default is not None) and (not isinstance(default, str))):\n                args[i] = type(default)(arg)\n    return args", "docstring": "Convert a list of command arguments to types specified by the handler.\n\nArgs:\nhandler: a command handler function.\nargs: the list of string arguments to pass to handler.\n\nReturns:\nA new list containing `args` that have been converted to the expected type\nfor `handler`. For each function parameter of `handler` that has either an\nexplicit type annotation or a non-None default value, the corresponding\nelement in `args` is converted to that type.", "source": "codesearchnet"}
{"code": "def cmPrecision(cm, average=True):\n    \n\n    \n    cm = cm.type(torch.float64)\n    precision = cm.diag() / (cm.sum(dim=0) + 1e-15)\n    if average:\n        return precision.mean()\n    return precision", "docstring": "Calculates precision using :class:`~ignite.metrics.ConfusionMatrix` metric.\nArgs:\ncm (ConfusionMatrix): instance of confusion matrix metric\naverage (bool, optional): if True metric value is averaged over all classes\nReturns:\nMetricsLambda", "source": "juraj-google-style"}
{"code": "def retrieve_all(self, subset=None):\n    get_object = self.factory.get_object\n    obj_class = self.obj_class\n    full_objects = [get_object(obj_class, list_obj.id, subset) for list_obj in self]\n    return JSSObjectList(self.factory, obj_class, full_objects)", "docstring": "Return a list of all JSSListData elements as full JSSObjects.\n\nThis can take a long time given a large number of objects,\nand depending on the size of each object. Subsetting to only\ninclude the data you need can improve performance.\n\nArgs:\nsubset: For objects which support it, a list of sub-tags to\nrequest, or an \"&\" delimited string, (e.g.\n\"general&purchasing\").  Default to None.", "source": "codesearchnet"}
{"code": "def get_start_time_metric(result: PipelineResult, namespace: str, name: str) -> int:\n    distributions = result.metrics().query(MetricsFilter().with_namespace(namespace).with_name(name))['distributions']\n    min_list = list(map(lambda m: m.result.min, distributions))\n    return min(min_list) if len(min_list) > 0 else -1", "docstring": "get the start time out of all times recorded by the specified distribution\nmetric\n\nArgs:\nresult: the PipelineResult which metrics are read from\nnamespace: a string representing the namespace of wanted metric\nname: a string representing the  name of the wanted metric\n\nReturns:\nthe smallest time in the metric or -1 if it doesn't exist", "source": "github-repos"}
{"code": "def run(self, row, **kwargs):\n    self.source = row\n    kwargs['output'] = self.__graph__()\n    super(CSVRowProcessor, self).run(**kwargs)\n    return kwargs['output']", "docstring": "Methods takes a row and depending if a dict or list,\nruns RML rules.\n\nArgs:\n-----\nrow(Dict, List): Row from CSV Reader", "source": "codesearchnet"}
{"code": "def _construct_w(self, inputs):\n    weight_shape = (self._kernel_shape + (1, 1))\n    if ('w' not in self._initializers):\n        self._initializers['w'] = create_weight_initializer(weight_shape[:2], dtype=inputs.dtype)\n    w = tf.get_variable('w', shape=weight_shape, dtype=inputs.dtype, initializer=self._initializers['w'], partitioner=self._partitioners.get('w', None), regularizer=self._regularizers.get('w', None))\n    return w", "docstring": "Construct the convolution weight matrix.\n\nFigures out the shape of the weight matrix, initialize it, and return it.\n\nArgs:\ninputs: A Tensor of shape `data_format` and of type `tf.float16`,\n`tf.bfloat16` or `tf.float32`.\n\nReturns:\nw: A weight matrix of the same type as `inputs` and of shape\n[kernel_shape, 1, 1].", "source": "codesearchnet"}
{"code": "def load_snippet(self, name, package):\n    if hasattr(self, name):\n        raise SnippetError(self, ('Attribute \"%s\" already exists, please use a different name.' % name))\n    self.services.snippets.add_snippet_client(name, package)", "docstring": "Starts the snippet apk with the given package name and connects.\n\nExamples:\n\n.. code-block:: python\n\nad.load_snippet(\nname='maps', package='com.google.maps.snippets')\nad.maps.activateZoom('3')\n\nArgs:\nname: string, the attribute name to which to attach the snippet\nclient. E.g. `name='maps'` attaches the snippet client to\n`ad.maps`.\npackage: string, the package name of the snippet apk to connect to.\n\nRaises:\nSnippetError: Illegal load operations are attempted.", "source": "codesearchnet"}
{"code": "def _make_pred_succ_maps(self, node):\n    pred_map = {e[2]['wire']: e[0] for e in self._multi_graph.in_edges(nbunch=node, data=True)}\n    succ_map = {e[2]['wire']: e[1] for e in self._multi_graph.out_edges(nbunch=node, data=True)}\n    return (pred_map, succ_map)", "docstring": "Return predecessor and successor dictionaries.\n\nArgs:\nnode (DAGNode): reference to multi_graph node\n\nReturns:\ntuple(dict): tuple(predecessor_map, successor_map)\nThese map from wire (Register, int) to predecessor (successor)\nnodes of n.", "source": "codesearchnet"}
{"code": "def write_file(self, filepath, filename=None, directory=None):\n        \n        arcname = None\n        if filename or directory:\n            directory = directory.rstrip(\"/\") + \"/\" if directory else \"\"\n            filename = filename or os.path.basename(filepath)\n            arcname = \"{}{}\".format(directory, filename)\n        self._copy_to_zipfile(filepath, arcname=arcname)\n        return arcname or filepath", "docstring": "write_file: Write local file to zip\nArgs:\nfilepath: (str) location to local file\ndirectory: (str) directory in zipfile to write file to (optional)\nReturns: path to file in zip\n\nNote: filepath must be a relative path", "source": "juraj-google-style"}
{"code": "def load_config(self, config):\n        \n        for k, v in config.items():\n            if hasattr(self, k):\n                raise DeviceError(\n                    self,\n                    ('Attribute %s already exists with value %s, cannot set '\n                     'again.') % (k, getattr(self, k)))\n            setattr(self, k, v)", "docstring": "Add attributes to the AndroidDevice object based on config.\n\nArgs:\nconfig: A dictionary representing the configs.\n\nRaises:\nError: The config is trying to overwrite an existing attribute.", "source": "juraj-google-style"}
{"code": "def Log(self, format_str, *args):\n    \n    log_entry = rdf_flow_objects.FlowLogEntry(\n        client_id=self.rdf_flow.client_id,\n        flow_id=self.rdf_flow.flow_id,\n        hunt_id=self.rdf_flow.parent_hunt_id,\n        message=format_str % args)\n    data_store.REL_DB.WriteFlowLogEntries([log_entry])\n    if self.rdf_flow.parent_hunt_id:\n      db_compat.ProcessHuntFlowLog(self.rdf_flow, format_str % args)", "docstring": "Logs the message using the flow's standard logging.\n\nArgs:\nformat_str: Format string\n*args: arguments to the format string", "source": "juraj-google-style"}
{"code": "def threw(self, error_type=None):\n        \n        if not error_type:\n            return True if len(self.exceptions) > 0 else False\n        else:\n            return uch.obj_in_list(self.exceptions, error_type)", "docstring": "Determining whether the exception is thrown\nArgs:\nerror_type:\nNone: checking without specified exception\nSpecified Exception\nReturn: Boolean", "source": "juraj-google-style"}
{"code": "def get_creation_date_tags(url, domain, as_dicts=False):\n    creation_date_tags = [mementoweb_api_tags(url), get_whois_tags(domain)]\n    creation_date_tags = sorted(sum(creation_date_tags, []), key=(lambda x: x.date))\n    if (not as_dicts):\n        return creation_date_tags\n    return [item._as_dict() for item in creation_date_tags]", "docstring": "Put together all data sources in this module and return it's output.\n\nArgs:\nurl (str): URL of the web. With relative paths and so on.\ndomain (str): Just the domain of the web.\nas_dicts (bool, default False): Convert output to dictionaries\ncompatible with :class:`.SourceString`?\n\nReturns:\nlist: Sorted list of :class:`TimeResource` objects or dicts.", "source": "codesearchnet"}
{"code": "def remove_collisions(self, min_dist=0.5):\n        \n        vfcoords = [v.frac_coords for v in self.vnodes]\n        sfcoords = self.structure.frac_coords\n        dist_matrix = self.structure.lattice.get_all_distances(vfcoords,\n                                                               sfcoords)\n        all_dist = np.min(dist_matrix, axis=1)\n        new_vnodes = []\n        for i, v in enumerate(self.vnodes):\n            if all_dist[i] > min_dist:\n                new_vnodes.append(v)\n        self.vnodes = new_vnodes", "docstring": "Remove vnodes that are too close to existing atoms in the structure\n\nArgs:\nmin_dist(float): The minimum distance that a vertex needs to be\nfrom existing atoms.", "source": "juraj-google-style"}
{"code": "def _cast_to_frameset(cls, other):\n    if isinstance(other, FrameSet):\n        return other\n    try:\n        return FrameSet(other)\n    except Exception:\n        return NotImplemented", "docstring": "Private method to simplify comparison operations.\n\nArgs:\nother (:class:`FrameSet` or set or frozenset or or iterable): item to be compared\n\nReturns:\n:class:`FrameSet`\n\nRaises:\n:class:`NotImplemented`: if a comparison is impossible", "source": "codesearchnet"}
{"code": "def load(fh, model):\n    \n    graphs = penman.load(fh, cls=XMRSCodec)\n    xs = [model.from_triples(g.triples()) for g in graphs]\n    return xs", "docstring": "Deserialize PENMAN graphs from a file (handle or filename)\n\nArgs:\nfh: filename or file object\nmodel: Xmrs subclass instantiated from decoded triples\nReturns:\na list of objects (of class *model*)", "source": "juraj-google-style"}
{"code": "def xresnet18(pretrained=False, **kwargs):\n    model = XResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n    if pretrained:\n        model.load_state_dict(model_zoo.load_url(model_urls['xresnet18']))\n    return model", "docstring": "Constructs a XResNet-18 model.\n\nArgs:\npretrained (bool): If True, returns a model pre-trained on ImageNet", "source": "codesearchnet"}
{"code": "def __init__(self, dims):\n    if not isinstance(dims, tuple):\n        raise TypeError('The dimensions passed to DummyMultiDimensionalLSTM should be a tuple of ints.')\n    self._dims = dims\n    self._output_size = tensor_shape.TensorShape(self._dims)\n    self._state_size = (tensor_shape.TensorShape(self._dims), tensor_shape.TensorShape(self._dims))", "docstring": "Initialize the Multi-dimensional LSTM cell.\n\nArgs:\ndims: tuple that contains the dimensions of the output of the cell,\nwithout including 'Time' or 'Batch' dimensions.", "source": "github-repos"}
{"code": "def get_device_details(self, device):\n    if not isinstance(device, PhysicalDevice):\n        raise ValueError('device must be a tf.config.PhysicalDevice, but got: %s' % (device,))\n    if self._physical_device_to_index is None or device not in self._physical_device_to_index:\n        raise ValueError('The PhysicalDevice must be one obtained from calling `tf.config.list_physical_devices`, but got: %s' % (device,))\n    index = self._physical_device_to_index[device]\n    details = pywrap_tfe.TF_GetDeviceDetails(index)\n    if 'compute_capability' in details:\n        try:\n            major, minor = details['compute_capability'].split('.')\n            details['compute_capability'] = (int(major), int(minor))\n        except ValueError as exc:\n            raise RuntimeError('Device returned compute capability an in invalid format: %s' % details['compute_capability']) from exc\n    return details", "docstring": "Returns details about a physical devices.\n\nArgs:\ndevice: A `tf.config.PhysicalDevice` returned by\n`tf.config.list_physical_devices` or `tf.config.get_visible_devices`.\n\nReturns:\nA dict with string keys.", "source": "github-repos"}
{"code": "def _define_step(self, done, score, summary):\n    if (done.shape.ndims == 0):\n        done = done[None]\n    if (score.shape.ndims == 0):\n        score = score[None]\n    score_mean = streaming_mean.StreamingMean((), tf.float32)\n    with tf.control_dependencies([done, score, summary]):\n        done_score = tf.gather(score, tf.where(done)[(:, 0)])\n        submit_score = tf.cond(tf.reduce_any(done), (lambda : score_mean.submit(done_score)), tf.no_op)\n    with tf.control_dependencies([submit_score]):\n        mean_score = tf.cond(self._report, score_mean.clear, float)\n        steps_made = tf.shape(score)[0]\n        next_step = self._step.assign_add(steps_made)\n    with tf.control_dependencies([mean_score, next_step]):\n        return (tf.identity(summary), mean_score, next_step, steps_made)", "docstring": "Combine operations of a phase.\n\nKeeps track of the mean score and when to report it.\n\nArgs:\ndone: Tensor indicating whether current score can be used.\nscore: Tensor holding the current, possibly intermediate, score.\nsummary: Tensor holding summary string to write if not an empty string.\n\nReturns:\nTuple of summary tensor, mean score, and new global step. The mean score\nis zero for non reporting steps.", "source": "codesearchnet"}
{"code": "def UninstallDriver(bundle_name):\n  \n  km = objc.KextManager()\n\n  cf_bundle_name = km.PyStringToCFString(bundle_name)\n  status = km.iokit.KextManagerUnloadKextWithIdentifier(cf_bundle_name)\n  km.dll.CFRelease(cf_bundle_name)\n  return status", "docstring": "Calls into the IOKit to unload a kext by its name.\n\nArgs:\nbundle_name: The bundle identifier of the kernel extension as defined in\nInfo.plist field CFBundleIdentifier.\nReturns:\nThe error code from the library call. objc.OS_SUCCESS if successfull.", "source": "juraj-google-style"}
{"code": "def sg_summary_param(tensor, prefix=None, name=None):\n    r\n    \n    prefix = '' if prefix is None else prefix + '/'\n    \n    name = prefix + _pretty_name(tensor) if name is None else prefix + name\n    \n    _scalar(name + '/abs', tf.reduce_mean(tf.abs(tensor)))\n    _histogram(name + '/abs-h', tf.abs(tensor))", "docstring": "r\"\"\"Register `tensor` to summary report as `parameters`\n\nArgs:\ntensor: A `Tensor` to log as parameters\nprefix: A `string`. A prefix to display in the tensor board web UI.\nname: A `string`. A name to display in the tensor board web UI.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def setScales(self,scales=None,term_num=None):\n        \n        if scales==None:\n            for term_i in range(self.n_terms):\n                n_scales = self.vd.getTerm(term_i).getNumberScales()\n                self.vd.getTerm(term_i).setScales(SP.array(SP.randn(n_scales)))\n        elif term_num==None:\n            assert scales.shape[0]==self.vd.getNumberScales(), 'incompatible shape'\n            index = 0\n            for term_i in range(self.n_terms):\n                index1 = index+self.vd.getTerm(term_i).getNumberScales()\n                self.vd.getTerm(term_i).setScales(scales[index:index1])\n                index = index1\n        else:\n            assert scales.shape[0]==self.vd.getTerm(term_num).getNumberScales(), 'incompatible shape'\n            self.vd.getTerm(term_num).setScales(scales)", "docstring": "get random initialization of variances based on the empirical trait variance\n\nArgs:\nscales:     if scales==None: set them randomly,\nelse: set scales to term_num (if term_num==None: set to all terms)\nterm_num:   set scales to term_num", "source": "juraj-google-style"}
{"code": "def embedding_lookup(self, x, means):\n    \n    x_means_hot = self.nearest_neighbor(x, means)\n    x_means_hot_flat = tf.reshape(\n        x_means_hot, [-1, self.hparams.num_blocks, self.hparams.block_v_size])\n    x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means)\n    x_means = tf.transpose(x_means, [1, 0, 2])\n    q_loss = tf.reduce_mean(\n        tf.squared_difference(tf.stop_gradient(x), x_means))\n    e_loss = tf.reduce_mean(\n        tf.squared_difference(x, tf.stop_gradient(x_means)))\n    return x_means_hot, x_means, q_loss, e_loss", "docstring": "Compute nearest neighbors and loss for training the embeddings.\n\nArgs:\nx: Batch of encoder continuous latent states sliced/projected into\nshape\n[-1, num_blocks, block_dim].\nmeans: Embedding means.\n\nReturns:\nThe nearest neighbor in one hot form, the nearest neighbor\nitself, the\ncommitment loss, embedding training loss.", "source": "juraj-google-style"}
{"code": "def get_dataframe(self, md5, compress='lz4'):\n        \n        \n        sample = self.data_store.get_sample(md5)\n        if not sample:\n            raise WorkBench.DataNotFound(\"Could not find %s in the data store\", md5)\n        if not compress:\n            return sample['raw_bytes']\n        else:\n            compress_df = lz4.dumps(sample['raw_bytes'])\n            print 'Info: DataFrame compression %.0f%%' % (len(compress_df)*100.0/float(len(sample['raw_bytes'])))\n            return compress_df", "docstring": "Return a dataframe from the DataStore. This is just a convenience method\nthat uses get_sample internally.\nArgs:\nmd5: the md5 of the dataframe\ncompress: compression to use: (defaults to 'lz4' but can be set to None)\nReturns:\nA msgpack'd Pandas DataFrame\nRaises:\nWorkbench.DataNotFound if the dataframe is not found.", "source": "juraj-google-style"}
{"code": "def peek_native(make):\n\n    def peek(service, container, _stack=None):\n        return make(service.peekNative(container))\n    return peek", "docstring": "Deserializer factory for types which state can be natively serialized.\n\nArguments:\n\nmake (callable): type constructor.\n\nReturns:\n\ncallable: deserializer (`peek` routine)", "source": "codesearchnet"}
{"code": "def _get_params(self, validator_parameter, name_prefix):\n    params_validator = self.request.get(validator_parameter)\n    user_params = {}\n    for key in self.request.arguments():\n        if key.startswith(name_prefix):\n            values = self.request.get_all(key)\n            adjusted_key = key[len(name_prefix):]\n            if (len(values) == 1):\n                user_params[adjusted_key] = values[0]\n            else:\n                user_params[adjusted_key] = values\n    if params_validator:\n        resolved_validator = util.for_name(params_validator)\n        resolved_validator(user_params)\n    return user_params", "docstring": "Retrieves additional user-supplied params for the job and validates them.\n\nArgs:\nvalidator_parameter: name of the request parameter which supplies\nvalidator for this parameter set.\nname_prefix: common prefix for all parameter names in the request.\n\nRaises:\nAny exception raised by the 'params_validator' request parameter if\nthe params fail to validate.\n\nReturns:\nThe user parameters.", "source": "codesearchnet"}
{"code": "def _GetXY(fd):\n    try:\n        rc = struct.unpack(b'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, 'junk'))\n        return (rc[1], rc[0]) if rc else None\n    except:\n        return None", "docstring": "Returns the terminal (x,y) size for fd.\n\nArgs:\nfd: The terminal file descriptor.\n\nReturns:\nThe terminal (x,y) size for fd or None on error.", "source": "github-repos"}
{"code": "def parallel_concat(processor_list: Sequence[Processor]) -> Processor:\n    if not processor_list:\n        raise ValueError('processor_list is empty')\n    return _ParallelProcessor(processor_list)", "docstring": "Create a sequence of processors to be run in parallel.\n\nThe output is the concatenation of all processors, i.e.:\n\nparallel_concat([p1, p2])(stream) -> [p1(stream), p2(stream)]\n\nArgs:\nprocessor_list: list of processors.\n\nReturns:\nA processor consisting of the parallel run of all the processors in the\nlist. The execution is sequential from the first processor to the last and\nthe result of each processor is concatenated", "source": "github-repos"}
{"code": "def _update_step(self, sequence):\n    (observ, action, old_policy_params, reward, advantage) = sequence['sequence']\n    length = sequence['length']\n    old_policy = self._policy_type(**old_policy_params)\n    (value_loss, value_summary) = self._value_loss(observ, reward, length)\n    network = self._network(observ, length)\n    (policy_loss, policy_summary) = self._policy_loss(old_policy, network.policy, action, advantage, length)\n    network_loss = network.get('loss', 0.0)\n    loss = ((policy_loss + value_loss) + tf.reduce_mean(network_loss))\n    (gradients, variables) = zip(*self._optimizer.compute_gradients(loss))\n    optimize = self._optimizer.apply_gradients(zip(gradients, variables))\n    summary = tf.summary.merge([value_summary, policy_summary, tf.summary.histogram('network_loss', network_loss), tf.summary.scalar('avg_network_loss', tf.reduce_mean(network_loss)), tf.summary.scalar('gradient_norm', tf.global_norm(gradients)), utility.gradient_summaries(zip(gradients, variables))])\n    with tf.control_dependencies([optimize]):\n        return [tf.identity(x) for x in (value_loss, policy_loss, summary)]", "docstring": "Compute the current combined loss and perform a gradient update step.\n\nThe sequences must be a dict containing the keys `length` and `sequence`,\nwhere the latter is a tuple containing observations, actions, parameters of\nthe behavioral policy, rewards, and advantages.\n\nArgs:\nsequence: Sequences of episodes or chunks of episodes.\n\nReturns:\nTuple of value loss, policy loss, and summary tensor.", "source": "codesearchnet"}
{"code": "def approximate_gradient(f, variables, delta=0.1):\n\n    def var_gradient(var):\n        \n\n        def mapper_func(i):\n            \n            stencil = _five_point_stencil(f, var, i, delta)\n            inner_sum = tf.nest.map_structure(tf.math.reduce_sum, tf.nest.flatten(stencil))\n            outer_sum = tf.math.reduce_sum(tf.stack(inner_sum))\n            entry_derivative = tf.reduce_sum(outer_sum)\n            return entry_derivative\n        derivatives = tf.map_fn(mapper_func, tf.range(tf.size(var)), fn_output_signature=tf.float32)\n        return tf.reshape(derivatives, tf.shape(var))\n    return tf.nest.map_structure(var_gradient, variables)", "docstring": "Approximates the gradient of f using five point stencil.\n\nSuppose the input function returns a possibly nested structure `r` under\ngradient tape `t`.  Then this function returns an approximation to\n`t.gradient(r, variables, unconnected_gradients=tf.UnconnectedGradients.ZERO)`\n\nArgs:\nf: Callable taking no arguments and returning a possibly nested structure\nwhose atomic elements are `tf.Tensor`.\nvariables: Possibly nested structure of `tf.Variable` in which to\ndifferentiate `f`.\ndelta: Size of the fundamental perturbation in the stencil.\n\nReturns:\nThe approximate gradient.  Has the same structure as the return from a\ncorresponding call to `tf.GradientTape().gradient`.", "source": "github-repos"}
{"code": "def decode_field(self, field, value):\n    for decoder in _GetFieldCodecs(field, 'decoder'):\n        result = decoder(field, value)\n        value = result.value\n        if result.complete:\n            return value\n    if isinstance(field, messages.MessageField):\n        field_value = self.decode_message(field.message_type, json.dumps(value))\n    elif isinstance(field, messages.EnumField):\n        value = (GetCustomJsonEnumMapping(field.type, json_name=value) or value)\n        try:\n            field_value = super(_ProtoJsonApiTools, self).decode_field(field, value)\n        except messages.DecodeError:\n            if (not isinstance(value, six.string_types)):\n                raise\n            field_value = None\n    else:\n        field_value = super(_ProtoJsonApiTools, self).decode_field(field, value)\n    return field_value", "docstring": "Decode the given JSON value.\n\nArgs:\nfield: a messages.Field for the field we're decoding.\nvalue: a python value we'd like to decode.\n\nReturns:\nA value suitable for assignment to field.", "source": "codesearchnet"}
{"code": "def sigmoid(x):\n    if any_symbolic_tensors((x,)):\n        return Sigmoid().symbolic_call(x)\n    return backend.nn.sigmoid(x)", "docstring": "Sigmoid activation function.\n\nIt is defined as `f(x) = 1 / (1 + exp(-x))`.\n\nArgs:\nx: Input tensor.\n\nReturns:\nA tensor with the same shape as `x`.\n\nExample:\n\n>>> x = keras.ops.convert_to_tensor([-6.0, 1.0, 0.0, 1.0, 6.0])\n>>> keras.ops.sigmoid(x)\narray([0.00247262, 0.7310586, 0.5, 0.7310586, 0.9975274], dtype=float32)", "source": "github-repos"}
{"code": "def __init__(self, entry_list, weights=None):\n        \n        if weights is None:\n            self.weights = [1.0] * len(entry_list)\n        else:\n            self.weights = weights\n        self.entry_list = entry_list", "docstring": "Initializes a MultiEntry.\n\nArgs:\nentry_list ([PourbaixEntry]): List of component PourbaixEntries\nweights ([float]): Weights associated with each entry. Default is None", "source": "juraj-google-style"}
{"code": "def create_upload_url(success_path, max_bytes_per_blob=None, max_bytes_total=None, **options):\n    fut = create_upload_url_async(success_path, max_bytes_per_blob=max_bytes_per_blob, max_bytes_total=max_bytes_total, **options)\n    return fut.get_result()", "docstring": "Create upload URL for POST form.\n\nArgs:\nsuccess_path: Path within application to call when POST is successful\nand upload is complete.\nmax_bytes_per_blob: The maximum size in bytes that any one blob in the\nupload can be or None for no maximum size.\nmax_bytes_total: The maximum size in bytes that the aggregate sizes of all\nof the blobs in the upload can be or None for no maximum size.\n**options: Options for create_rpc().\n\nReturns:\nThe upload URL.\n\nRaises:\nTypeError: If max_bytes_per_blob or max_bytes_total are not integral types.\nValueError: If max_bytes_per_blob or max_bytes_total are not\npositive values.", "source": "codesearchnet"}
{"code": "def cluster_spec(self):\n    if self._override_client:\n        client = self._override_client\n    else:\n        from kubernetes import config as k8sconfig\n        from kubernetes import client as k8sclient\n        k8sconfig.load_kube_config()\n        client = k8sclient.CoreV1Api()\n    cluster_map = {}\n    for tf_job in self._job_to_label_mapping:\n        all_pods = []\n        for selector in self._job_to_label_mapping[tf_job]:\n            ret = client.list_pod_for_all_namespaces(label_selector=selector)\n            selected_pods = []\n            for pod in sorted(ret.items, key=lambda x: x.metadata.name):\n                if pod.status.phase == 'Running':\n                    selected_pods.append('%s:%s' % (pod.status.host_ip, self._tf_server_port))\n                else:\n                    raise RuntimeError('Pod \"%s\" is not running; phase: \"%s\"' % (pod.metadata.name, pod.status.phase))\n            all_pods.extend(selected_pods)\n        cluster_map[tf_job] = all_pods\n    return server_lib.ClusterSpec(cluster_map)", "docstring": "Returns a ClusterSpec object based on the latest info from Kubernetes.\n\nWe retrieve the information from the Kubernetes master every time this\nmethod is called.\n\nReturns:\nA ClusterSpec containing host information returned from Kubernetes.\n\nRaises:\nRuntimeError: If any of the pods returned by the master is not in the\n`Running` phase.", "source": "github-repos"}
{"code": "def check_configuration(ctx, base_key, needed_keys):\n    if (base_key not in ctx.keys()):\n        exit(\"[{}ERROR{}] missing configuration for '{}'\".format(ERROR_COLOR, RESET_COLOR, base_key))\n    if (ctx.releaser is None):\n        exit(\"[{}ERROR{}] empty configuration for '{}' found\".format(ERROR_COLOR, RESET_COLOR, base_key))\n    for my_key in needed_keys:\n        if (my_key not in ctx[base_key].keys()):\n            exit(\"[{}ERROR{}] missing configuration key '{}.{}'\".format(ERROR_COLOR, RESET_COLOR, base_key, my_key))", "docstring": "Confrim a valid configuration.\n\nArgs:\nctx (invoke.context):\nbase_key (str): the base configuration key everything is under.\nneeded_keys (list): sub-keys of the base key that are checked to make\nsure they exist.", "source": "codesearchnet"}
{"code": "def execute_before(self, sensor_graph, scope_stack):\n        \n\n        parent = scope_stack[-1]\n        new_scope = Scope(\"Configuration Scope\", sensor_graph, parent.allocator, parent)\n        new_scope.add_identifier('current_slot', self.slot)\n        scope_stack.append(new_scope)", "docstring": "Execute statement before children are executed.\n\nArgs:\nsensor_graph (SensorGraph): The sensor graph that we are building or\nmodifying\nscope_stack (list(Scope)): A stack of nested scopes that may influence\nhow this statement allocates clocks or other stream resources.", "source": "juraj-google-style"}
{"code": "def format_counts(counts, header=None):\n    counts_dict = {}\n    for (key, val) in counts.items():\n        key = format_counts_memory(key, header)\n        counts_dict[key] = val\n    return counts_dict", "docstring": "Format a single experiment result coming from backend to present\nto the Qiskit user.\n\nArgs:\ncounts (dict): counts histogram of multiple shots\nheader (dict): the experiment header dictionary containing\nuseful information for postprocessing.\n\nReturns:\ndict: a formatted counts", "source": "codesearchnet"}
{"code": "def to_frame(self, *args):\n    if (sys.version_info < (3, 6, 0)):\n        from collections import OrderedDict\n        impls = OrderedDict()\n        for (name, obj) in self.items():\n            impls[name] = obj._impl\n    else:\n        impls = get_impls(self)\n    return _to_frame_inner(impls, args)", "docstring": "Convert the cells in the view into a DataFrame object.\n\nIf ``args`` is not given, this method returns a DataFrame that\nhas an Index or a MultiIndex depending of the number of\ncells parameters and columns each of which corresponds to each\ncells included in the view.\n\n``args`` can be given to calculate cells values and limit the\nDataFrame indexes to the given arguments.\n\nThe cells in this view may have different number of parameters,\nbut parameters shared among multiple cells\nmust appear in the same position in all the parameter lists.\nFor example,\nHaving ``foo()``, ``bar(x)`` and ``baz(x, y=1)`` is okay\nbecause the shared parameter ``x`` is always the first parameter,\nbut this method does not work if the view has ``quz(x, z=2, y=1)``\ncells in addition to the first three cells, because ``y`` appears\nin different positions.\n\nArgs:\nargs(optional): multiple arguments,\nor an iterator of arguments to the cells.", "source": "codesearchnet"}
{"code": "def __init__(self, header, values):\n        \n        assert isinstance(header, Header), \\\n            'header must be a Ladybug Header object. Got {}'.format(type(header))\n        assert isinstance(header.analysis_period, AnalysisPeriod), \\\n            'header of {} must have an analysis_period.'.format(self.__class__.__name__)\n        assert header.analysis_period.st_hour == 0, \\\n            'analysis_period start hour of {} must be 0. Got {}'.format(\n                self.__class__.__name__, header.analysis_period.st_hour)\n        assert header.analysis_period.end_hour == 23, \\\n            'analysis_period end hour of {} must be 23. Got {}'.format(\n                self.__class__.__name__, header.analysis_period.end_hour)\n\n        self._header = header\n        self.values = values\n        self._datetimes = None\n        self._validated_a_period = True", "docstring": "Initialize hourly discontinuous collection.\n\nArgs:\nheader: A Ladybug Header object.  Note that this header\nmust have an AnalysisPeriod on it that aligns with the\nlist of values.\nvalues: A list of values.  Note that the length of this list\nmust align with the AnalysisPeriod on the header.", "source": "juraj-google-style"}
{"code": "def prepare_soap_body(self, method, parameters, namespace):\n        \n\n        tags = []\n        for name, value in parameters:\n            tag = \"<{name}>{value}</{name}>\".format(\n                name=name, value=escape(\"%s\" % value, {'\"': \"&quot;\"}))\n            \n            \n            tags.append(tag)\n\n        wrapped_params = \"\".join(tags)\n        \n        if namespace is not None:\n            soap_body = (\n                '<{method} xmlns=\"{namespace}\">'\n                '{params}'\n                '</{method}>'.format(\n                    method=method, params=wrapped_params,\n                    namespace=namespace\n                ))\n        else:\n            soap_body = (\n                '<{method}>'\n                '{params}'\n                '</{method}>'.format(\n                    method=method, params=wrapped_params\n                ))\n\n        return soap_body", "docstring": "Prepare the SOAP message body for sending.\n\nArgs:\nmethod (str): The name of the method to call.\nparameters (list): A list of (name, value) tuples containing\nthe parameters to pass to the method.\nnamespace (str): tThe XML namespace to use for the method.\n\nReturns:\nstr: A properly formatted SOAP Body.", "source": "juraj-google-style"}
{"code": "def __init__(self, group_type, name, **kwargs):\n        \n        self._utils = TcExUtils()\n        self._name = name\n        self._type = group_type\n        self._group_data = {'name': name, 'type': group_type}\n        \n        for arg, value in kwargs.items():\n            self.add_key_value(arg, value)\n        \n        if kwargs.get('xid') is None:\n            self._group_data['xid'] = str(uuid.uuid4())\n        self._attributes = []\n        self._labels = []\n        self._file_content = None\n        self._tags = []\n        self._processed = False", "docstring": "Initialize Class Properties.\n\nArgs:\ngroup_type (str): The ThreatConnect define Group type.\nname (str): The name for this Group.\nxid (str, kwargs): The external id for this Group.", "source": "juraj-google-style"}
{"code": "def _skip_parameter_matching(self) -> bool:\n    if self.signature.type_params:\n        return False\n    if self.ctx.options.analyze_annotated:\n        return False\n    return self.signature.has_return_annotation or self.full_name == '__init__'", "docstring": "Check whether we should skip parameter matching.\n\nThis is use to skip parameter matching for function calls in the context of\ninference (pyi generation). This is to optimize the case where we don't\nneed to match parameters in cases which the function has explicit type\nannotations, meaning that we don't need to infer the type.\n\nReturns:\nTrue if we should skip parameter matching.", "source": "github-repos"}
{"code": "def load_config(logdir):\n  \n  \n  config_path = logdir and os.path.join(logdir, 'config.yaml')\n  if not config_path or not tf.gfile.Exists(config_path):\n    message = (\n        'Cannot resume an existing run since the logging directory does not '\n        'contain a configuration file.')\n    raise IOError(message)\n  with tf.gfile.FastGFile(config_path, 'r') as file_:\n    config = yaml.load(file_, Loader=yaml.Loader)\n  message = 'Resume run and write summaries and checkpoints to {}.'\n  tf.logging.info(message.format(config.logdir))\n  return config", "docstring": "Load a configuration from the log directory.\n\nArgs:\nlogdir: The logging directory containing the configuration file.\n\nRaises:\nIOError: The logging directory does not contain a configuration file.\n\nReturns:\nConfiguration object.", "source": "juraj-google-style"}
{"code": "def obtain_capture_by_value_ops(dataset):\n\n    def capture_by_value(op):\n        return op.outputs[0].dtype in TENSOR_TYPES_ALLOWLIST or op.type in OP_TYPES_ALLOWLIST\n    return _traverse(dataset, capture_by_value)", "docstring": "Given an input dataset, finds all allowlisted ops used for construction.\n\nAllowlisted ops are stateful ops which are known to be safe to capture by\nvalue.\n\nArgs:\ndataset: Dataset to find allowlisted stateful ops for.\n\nReturns:\nA list of variant_tensor producing dataset ops used to construct this\ndataset.", "source": "github-repos"}
{"code": "def get_custom_objects():\n    return GLOBAL_CUSTOM_OBJECTS", "docstring": "Retrieves a live reference to the global dictionary of custom objects.\n\nCustom objects set using `custom_object_scope()` are not added to the\nglobal dictionary of custom objects, and will not appear in the returned\ndictionary.\n\nExample:\n\n```python\nget_custom_objects().clear()\nget_custom_objects()['MyObject'] = MyObject\n```\n\nReturns:\nGlobal dictionary mapping registered class names to classes.", "source": "github-repos"}
{"code": "def _assign_method(self, resource_class, method_type):\n        \n\n        \n        method_name = resource_class.get_method_name(\n            resource_class, method_type)\n        valid_status_codes = getattr(\n            resource_class.Meta,\n            'valid_status_codes',\n            DEFAULT_VALID_STATUS_CODES\n        )\n\n        \n        \n        def get(self, method_type=method_type, method_name=method_name,\n                valid_status_codes=valid_status_codes,\n                resource=resource_class, data=None, uid=None, **kwargs):\n            return self.call_api(\n                method_type, method_name,\n                valid_status_codes, resource,\n                data, uid=uid, **kwargs)\n\n        def put(self, method_type=method_type, method_name=method_name,\n                valid_status_codes=valid_status_codes,\n                resource=resource_class, data=None, uid=None, **kwargs):\n            return self.call_api(\n                method_type, method_name,\n                valid_status_codes, resource,\n                data, uid=uid, **kwargs)\n\n        def post(self, method_type=method_type, method_name=method_name,\n                 valid_status_codes=valid_status_codes,\n                 resource=resource_class, data=None, uid=None, **kwargs):\n            return self.call_api(\n                method_type, method_name,\n                valid_status_codes, resource,\n                data, uid=uid, **kwargs)\n\n        def patch(self, method_type=method_type, method_name=method_name,\n                  valid_status_codes=valid_status_codes,\n                  resource=resource_class, data=None, uid=None, **kwargs):\n            return self.call_api(\n                method_type, method_name,\n                valid_status_codes, resource,\n                data, uid=uid, **kwargs)\n\n        def delete(self, method_type=method_type, method_name=method_name,\n                   valid_status_codes=valid_status_codes,\n                   resource=resource_class, data=None, uid=None, **kwargs):\n            return self.call_api(\n                method_type, method_name,\n                valid_status_codes, resource,\n                data, uid=uid, **kwargs)\n\n        method_map = {\n            'GET': get,\n            'PUT': put,\n            'POST': post,\n            'PATCH': patch,\n            'DELETE': delete\n        }\n\n        setattr(\n            self, method_name,\n            types.MethodType(method_map[method_type], self)\n        )", "docstring": "Using reflection, assigns a new method to this class.\n\nArgs:\nresource_class: A resource class\nmethod_type: The HTTP method type", "source": "juraj-google-style"}
{"code": "def sendline(self, text):\n    logger.debug(\"Sending input '{0}' to '{1}'\".format(text, self.name))\n    try:\n        return self._spawn.sendline(text)\n    except pexpect.exceptions.EOF as e:\n        logger.debug('Raising termination exception.')\n        raise TerminationException(instance=self, real_exception=e, output=self.get_output())\n    except pexpect.exceptions.TIMEOUT as e:\n        logger.debug('Raising timeout exception.')\n        raise TimeoutException(instance=self, real_exception=e, output=self.get_output())\n    except Exception as e:\n        logger.debug(('Sending input failed: ' + str(e)))\n        raise NestedException(instance=self, real_exception=e, output=self.get_output())", "docstring": "Sends an input line to the running program, including os.linesep.\n\nArgs:\ntext (str): The input text to be send.\n\nRaises:\nTerminationException: The program terminated before / while / after sending the input.\nNestedException: An internal problem occured while waiting for the output.", "source": "codesearchnet"}
{"code": "def ModulePath(module_name):\n    module = importlib.import_module(module_name)\n    path = inspect.getfile(module)\n    if compatibility.PY2:\n        path = path.decode('utf-8')\n    if os.path.basename(path).startswith('__init__.'):\n        path = os.path.dirname(path)\n    if path.endswith('.pyc'):\n        path = (path[:(- 4)] + '.py')\n    return path", "docstring": "Computes a path to the specified module.\n\nArgs:\nmodule_name: A name of the module to get the path for.\n\nReturns:\nA path to the specified module.\n\nRaises:\nImportError: If specified module cannot be imported.", "source": "codesearchnet"}
{"code": "def next(self):\n    options = {}\n    if self._buffer_size:\n        options['read_buffer_size'] = self._buffer_size\n    if self._account_id:\n        options['_account_id'] = self._account_id\n    while True:\n        filename = self._next_file()\n        if (filename is None):\n            raise StopIteration()\n        if (self._path_filter and (not self._path_filter.accept(self._slice_ctx, filename))):\n            continue\n        try:\n            start_time = time.time()\n            handle = cloudstorage.open(filename, **options)\n            self._slice_ctx.incr(self.COUNTER_IO_READ_MSEC, (int((time.time() - start_time)) * 1000))\n            self._slice_ctx.incr(self.COUNTER_FILE_READ)\n            return handle\n        except cloudstorage.NotFoundError:\n            logging.warning('File %s may have been removed. Skipping file.', filename)\n            self._slice_ctx.incr(self.COUNTER_FILE_MISSING)", "docstring": "Returns a handler to the next file.\n\nNon existent files will be logged and skipped. The file might have been\nremoved after input splitting.\n\nReturns:\nThe next input from this input reader in the form of a cloudstorage\nReadBuffer that supports a File-like interface (read, readline, seek,\ntell, and close). An error may be raised if the file can not be opened.\n\nRaises:\nStopIteration: The list of files has been exhausted.", "source": "codesearchnet"}
{"code": "def float_value_convert(dictin, dropfailedvalues=False):\n    \n    \n    return key_value_convert(dictin, valuefn=float, dropfailedvalues=dropfailedvalues)", "docstring": "Convert values of dictionary to floats\n\nArgs:\ndictin (DictUpperBound): Input dictionary\ndropfailedvalues (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False.\n\nReturns:\nDict: Dictionary with values converted to floats", "source": "juraj-google-style"}
{"code": "def git_ls_remote(self, uri, ref):\n        \n        logger.debug(\"Invoking git to retrieve commit id for repo %s...\", uri)\n        lsremote_output = subprocess.check_output(['git',\n                                                   'ls-remote',\n                                                   uri,\n                                                   ref])\n        if b\"\\t\" in lsremote_output:\n            commit_id = lsremote_output.split(b\"\\t\")[0]\n            logger.debug(\"Matching commit id found: %s\", commit_id)\n            return commit_id\n        else:\n            raise ValueError(\"Ref \\\"%s\\\" not found for repo %s.\" % (ref, uri))", "docstring": "Determine the latest commit id for a given ref.\n\nArgs:\nuri (string): git URI\nref (string): git ref\n\nReturns:\nstr: A commit id", "source": "juraj-google-style"}
{"code": "def tell(self):\n    self._check_open()\n    return self.position", "docstring": "Tell the file's current offset.\n\nReturns:\ncurrent offset in reading this file.\n\nRaises:\n``ValueError``: When this stream is closed.", "source": "github-repos"}
{"code": "def read_stream(self, start_offset=0, byte_count=None):\n    \n    try:\n      return self._api.object_download(self._bucket, self._key,\n                                       start_offset=start_offset, byte_count=byte_count)\n    except Exception as e:\n      raise e", "docstring": "Reads the content of this object as text.\n\nArgs:\nstart_offset: the start offset of bytes to read.\nbyte_count: the number of bytes to read. If None, it reads to the end.\nReturns:\nThe text content within the object.\nRaises:\nException if there was an error requesting the object's content.", "source": "juraj-google-style"}
{"code": "def v_cross(u, v):\n    '\\n    i = u[1]*v[2] - u[2]*v[1]\\n    j = u[2]*v[0] - u[0]*v[2]\\n    k = u[0]*v[1] - u[1]*v[0]\\n    '\n    i = '(({u1})*({v2}) - ({u2})*({v1}))'.format(u1=u[1], u2=u[2], v1=v[1], v2=v[2])\n    j = '(({u2})*({v0}) - ({u0})*({v2}))'.format(u0=u[0], u2=u[2], v0=v[0], v2=v[2])\n    k = '(({u0})*({v1}) - ({u1})*({v0}))'.format(u0=u[0], u1=u[1], v0=v[0], v1=v[1])\n    return [i, j, k]", "docstring": "muparser cross product function\n\nCompute the cross product of two 3x1 vectors\n\nArgs:\nu (list or tuple of 3 strings): first vector\nv (list or tuple of 3 strings): second vector\nReturns:\nA list containing a muparser string of the cross product", "source": "codesearchnet"}
{"code": "def get_tensor_size(self, tensor_name, partial_layout=None,\n                      mesh_dimension_to_size=None):\n    \n    return (self.get_tensor_dtype(tensor_name).size *\n            self.get_tensor_num_entries(tensor_name, partial_layout,\n                                        mesh_dimension_to_size))", "docstring": "The size of a tensor in bytes.\n\nIf partial_layout is specified, then mesh_dimension_to_size must also be. In\nthis case, the size on a single device is returned.\n\nArgs:\ntensor_name: a string, name of a tensor in the graph.\npartial_layout: an optional {string: string}, from MTF dimension name to\nmesh dimension name.\nmesh_dimension_to_size: an optional {string: int}, from mesh dimension\nname to size.\n\nReturns:\nan integer", "source": "juraj-google-style"}
{"code": "def calculate_cidr(start_address, end_address):\n    \n\n    tmp_addrs = []\n\n    try:\n\n        tmp_addrs.extend(summarize_address_range(\n            ip_address(start_address),\n            ip_address(end_address)))\n\n    except (KeyError, ValueError, TypeError):  \n\n        try:\n\n            tmp_addrs.extend(summarize_address_range(\n                ip_network(start_address).network_address,\n                ip_network(end_address).network_address))\n\n        except AttributeError:  \n\n            tmp_addrs.extend(summarize_address_range(\n                ip_network(start_address).ip,\n                ip_network(end_address).ip))\n\n    return [i.__str__() for i in collapse_addresses(tmp_addrs)]", "docstring": "The function to calculate a CIDR range(s) from a start and end IP address.\n\nArgs:\nstart_address (:obj:`str`): The starting IP address.\nend_address (:obj:`str`): The ending IP address.\n\nReturns:\nlist of str: The calculated CIDR ranges.", "source": "juraj-google-style"}
{"code": "def _deserialize_audience(audience_map):\n    for audience in audience_map.values():\n        (condition_structure, condition_list) = condition_helper.loads(audience.conditions)\n        audience.__dict__.update({'conditionStructure': condition_structure, 'conditionList': condition_list})\n    return audience_map", "docstring": "Helper method to de-serialize and populate audience map with the condition list and structure.\n\nArgs:\naudience_map: Dict mapping audience ID to audience object.\n\nReturns:\nDict additionally consisting of condition list and structure on every audience object.", "source": "codesearchnet"}
{"code": "def get_default_backend_config(appdirs):\n    return {'store': 'sqlalchemy', 'day_start': datetime.time(5, 30, 0), 'fact_min_delta': 1, 'tmpfile_path': os.path.join(appdirs.user_data_dir, '{}.tmp'.format(appdirs.appname)), 'db_engine': 'sqlite', 'db_path': os.path.join(appdirs.user_data_dir, '{}.sqlite'.format(appdirs.appname))}", "docstring": "Return a default config dictionary.\n\nArgs:\nappdirs (HamsterAppDirs): ``HamsterAppDirs`` instance encapsulating the apps details.\n\nReturns:\ndict: Dictionary with a default configuration.\n\nNote:\nThose defaults are independent of the particular config-store.", "source": "codesearchnet"}
{"code": "def __init__(self, curriculum_obj, batch_size, max_len, ops, token_by_char):\n    \n    \n    self._vocab_dict = collections.defaultdict(lambda: 0)\n    self._vocab_dict[self.UNK] = 0\n    self._inv_vocab_dict = collections.defaultdict(lambda: self.UNK)\n\n    self.curriculum_obj = curriculum_obj\n    self._max_seq_length = max_len\n    self._ops = ops\n    self._token_by_char = token_by_char\n    self._batch_size = batch_size\n\n    \n    num_token_digits = 1 if token_by_char else curriculum_obj.max_length\n    token_list = get_tokens(10 ** num_token_digits)\n    self.vocab_size = 1\n    for token in self.DEFAULT_START_TOKENS + token_list:\n      if token not in self._vocab_dict:\n        self._vocab_dict[token] = self.vocab_size\n        self._inv_vocab_dict[self.vocab_size] = token\n        self.vocab_size += 1", "docstring": "Creates a TokenDataSource instance.\n\nArgs:\ncurriculum_obj: (LTECurriculum) determines sample complexity.\nbatch_size: (int) Batch size to generate.\nmax_len: (int) This is the maximum size of any given sample sequence.\nops: (list(CodeOp)). Task operations that inherit from CodeOp().\ntoken_by_char: (bool) Whether to tokenize by char (\"detokenized\") or by\nkeyword, literals and numbers.", "source": "juraj-google-style"}
{"code": "def get_note(self, noteid, version=None):\n        \n        \n        params_version = \"\"\n        if version is not None:\n            params_version = '/v/' + str(version)\n\n        params = '/i/%s%s' % (str(noteid), params_version)\n        request = Request(DATA_URL+params)\n        request.add_header(self.header, self.get_token())\n        try:\n            response = urllib2.urlopen(request)\n        except HTTPError as e:\n            if e.code == 401:\n                raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.')\n            else:\n                return e, -1\n        except IOError as e:\n            return e, -1\n        note = json.loads(response.read().decode('utf-8'))\n        note = self.__add_simplenote_api_fields(note, noteid, int(response.info().get(\"X-Simperium-Version\")))\n        \n        \n        if \"tags\" in note:\n            note[\"tags\"] = sorted(note[\"tags\"])\n\n        return note, 0", "docstring": "Method to get a specific note\n\nArguments:\n- noteid (string): ID of the note to get\n- version (int): optional version of the note to get\n\nReturns:\nA tuple `(note, status)`\n\n- note (dict): note object\n- status (int): 0 on success and -1 otherwise", "source": "juraj-google-style"}
{"code": "def update(self):\n    (data_format, data) = RuuviTagSensor.get_data(self._mac, self._bt_device)\n    if (data == self._data):\n        return self._state\n    self._data = data\n    if (self._data is None):\n        self._state = {}\n    else:\n        self._state = get_decoder(data_format).decode_data(self._data)\n    return self._state", "docstring": "Get lates data from the sensor and update own state.\n\nReturns:\ndict: Latest state", "source": "codesearchnet"}
{"code": "def run(self, dag):\n        \n        cx_runs = dag.collect_runs([\"cx\"])\n        for cx_run in cx_runs:\n            \n            partition = []\n            chunk = []\n            for i in range(len(cx_run) - 1):\n                chunk.append(cx_run[i])\n\n                qargs0 = cx_run[i].qargs\n                qargs1 = cx_run[i + 1].qargs\n\n                if qargs0 != qargs1:\n                    partition.append(chunk)\n                    chunk = []\n            chunk.append(cx_run[-1])\n            partition.append(chunk)\n            \n            for chunk in partition:\n                if len(chunk) % 2 == 0:\n                    for n in chunk:\n                        dag.remove_op_node(n)\n                else:\n                    for n in chunk[1:]:\n                        dag.remove_op_node(n)\n        return dag", "docstring": "Run one pass of cx cancellation on the circuit\n\nArgs:\ndag (DAGCircuit): the directed acyclic graph to run on.\nReturns:\nDAGCircuit: Transformed DAG.", "source": "juraj-google-style"}
{"code": "def all(x, axis=None, keepdims=False):\n    x = math_ops.cast(x, dtypes_module.bool)\n    return math_ops.reduce_all(x, axis, keepdims)", "docstring": "Bitwise reduction (logical AND).\n\nArgs:\nx: Tensor or variable.\naxis: axis along which to perform the reduction.\nkeepdims: whether the drop or broadcast the reduction axes.\n\nReturns:\nA uint8 tensor (0s and 1s).", "source": "github-repos"}
{"code": "def _get_or_create_arg_by_name(state, name, is_kwarg=False):\n    for arg in state.args + state.kwargs:\n        if arg.name == name:\n            return arg\n    arg = Namespace()\n    arg.name = name\n    arg.type.lines = []\n    arg.description.lines = []\n    if is_kwarg:\n        state.kwargs.append(arg)\n    else:\n        state.args.append(arg)\n    return arg", "docstring": "Gets or creates a new Arg.\n\nThese Arg objects (Namespaces) are turned into the ArgInfo namedtuples\nreturned by parse. Each Arg object is used to collect the name, type, and\ndescription of a single argument to the docstring's function.\n\nArgs:\nstate: The state of the parser.\nname: The name of the arg to create.\nis_kwarg: A boolean representing whether the argument is a keyword arg.\nReturns:\nThe new Arg.", "source": "github-repos"}
{"code": "def _contiguous_groups(\n        length: int,\n        comparator: Callable[[int, int], bool]\n) -> List[Tuple[int, int]]:\n    \n    result = []\n    start = 0\n    while start < length:\n        past = start + 1\n        while past < length and comparator(start, past):\n            past += 1\n        result.append((start, past))\n        start = past\n    return result", "docstring": "Splits range(length) into approximate equivalence classes.\n\nArgs:\nlength: The length of the range to split.\ncomparator: Determines if two indices have approximately equal items.\n\nReturns:\nA list of (inclusive_start, exclusive_end) range endpoints. Each\ncorresponds to a run of approximately-equivalent items.", "source": "juraj-google-style"}
{"code": "def delete_public_ip(access_token, subscription_id, resource_group, public_ip_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Network/publicIPAddresses/', public_ip_name,\n                        '?api-version=', NETWORK_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete a public ip addresses associated with a resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\npublic_ip_name (str): Name of the public ip address resource.\n\nReturns:\nHTTP response.", "source": "juraj-google-style"}
{"code": "def _CheckParserCanProcessFileEntry(self, parser, file_entry):\n    for filter_object in parser.FILTERS:\n        if filter_object.Match(file_entry):\n            return True\n    return False", "docstring": "Determines if a parser can process a file entry.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry.\nparser (BaseParser): parser.\n\nReturns:\nbool: True if the file entry can be processed by the parser object.", "source": "codesearchnet"}
{"code": "def _pretty_print(key_val, sep=': ', min_col_width=39, text_width=None):\n    \n    if text_width is None:\n        text_width = get_terminal_size().columns\n    if text_width < min_col_width:\n        min_col_width = text_width\n    ncols = (text_width + 1) \n    colw = (text_width + 1) \n    ncols = min(ncols, len(key_val))\n\n    wrapper = TextWrapper(width=colw)\n    lines = []\n    for key, val in key_val:\n        if len(key) + len(sep) >= colw \n            wrapper.subsequent_indent = ' '\n        else:\n            wrapper.subsequent_indent = ' ' * (len(key) + len(sep))\n        lines.extend(wrapper.wrap('{}{}{}'.format(key, sep, val)))\n\n    chunks = []\n    for rem_col in range(ncols, 1, -1):\n        isep = ceil(len(lines) / rem_col)\n        while isep < len(lines) and lines[isep][0] == ' ':\n            isep += 1\n        chunks.append(lines[:isep])\n        lines = lines[isep:]\n    chunks.append(lines)\n    lines = zip_longest(*chunks, fillvalue='')\n\n    fmt = '|'.join(['{{:{}}}'.format(colw)] * (ncols - 1))\n    fmt += '|{}' if ncols > 1 else '{}'\n    print(*(fmt.format(*line) for line in lines), sep='\\n')", "docstring": "Print a iterable of key/values\n\nArgs:\nkey_val (list of (str, str)): the pairs of section names and text.\nsep (str): separator between section names and text.\nmin_col_width (int): minimal acceptable column width\ntext_width (int): text width to use. If set to None, will try to infer\nthe size of the terminal.", "source": "juraj-google-style"}
{"code": "def _ExpectedKeysForEntry(self, entry):\n    return [entry.name]", "docstring": "Generate a list of expected cache keys for this type of map.\n\nArgs:\nentry: A GroupMapEntry\n\nReturns:\nA list of strings", "source": "github-repos"}
{"code": "def forward(self, seq_length=None, position=None):\n    if position is None and seq_length is None:\n        raise ValueError('Either position or seq_length must be provided')\n    if position is None:\n        position = torch.arange(seq_length, dtype=torch.float32, device=self.inv_timescales.device).unsqueeze(0)\n    elif position.ndim != 2:\n        raise ValueError(f'position must be 2-dimensional, got shape {position.shape}')\n    scaled_time = position.view(*position.shape, 1) * self.inv_timescales.view(1, 1, -1)\n    signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=2)\n    signal = F.pad(signal, (0, 0, 0, self.embedding_dims % 2))\n    return signal", "docstring": "Generates a Tensor of sinusoids with different frequencies.\n\nArgs:\nseq_length: an optional Python int defining the output sequence length.\nif the `position` argument is specified.\nposition: [B, seq_length], optional position for each token in the\nsequence, only required when the sequence is packed.\n\nReturns:\n[B, seqlen, D] if `position` is specified, else [1, seqlen, D]", "source": "github-repos"}
{"code": "def _HasTable(self, table_name):\n    \n    query = self._HAS_TABLE_QUERY.format(table_name)\n\n    self._cursor.execute(query)\n    return bool(self._cursor.fetchone())", "docstring": "Determines if a specific table exists.\n\nArgs:\ntable_name (str): name of the table.\n\nReturns:\nbool: True if the table exists, false otherwise.", "source": "juraj-google-style"}
{"code": "def handle_simple_responses(\n      self, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):\n    \n    return self._accept_responses('OKAY', info_cb, timeout_ms=timeout_ms)", "docstring": "Accepts normal responses from the device.\n\nArgs:\ntimeout_ms: Timeout in milliseconds to wait for each response.\ninfo_cb: Optional callback for text sent from the bootloader.\n\nReturns:\nOKAY packet's message.", "source": "juraj-google-style"}
{"code": "def get_spd_dos(self):\n    spd_dos = {}\n    for atom_dos in self.pdos.values():\n        for (orb, pdos) in atom_dos.items():\n            orbital_type = _get_orb_type(orb)\n            if (orbital_type not in spd_dos):\n                spd_dos[orbital_type] = pdos\n            else:\n                spd_dos[orbital_type] = add_densities(spd_dos[orbital_type], pdos)\n    return {orb: Dos(self.efermi, self.energies, densities) for (orb, densities) in spd_dos.items()}", "docstring": "Get orbital projected Dos.\n\nReturns:\ndict of {orbital: Dos}, e.g. {\"s\": Dos object, ...}", "source": "codesearchnet"}
{"code": "def bbox_transpose(bbox, axis, rows, cols):\n    \n    x_min, y_min, x_max, y_max = bbox\n    if axis != 0 and axis != 1:\n        raise ValueError('Axis must be either 0 or 1.')\n    if axis == 0:\n        bbox = [y_min, x_min, y_max, x_max]\n    if axis == 1:\n        bbox = [1 - y_max, 1 - x_max, 1 - y_min, 1 - x_min]\n    return bbox", "docstring": "Transposes a bounding box along given axis.\n\nArgs:\nbbox (tuple): A tuple (x_min, y_min, x_max, y_max).\naxis (int): 0 - main axis, 1 - secondary axis.\nrows (int): Image rows.\ncols (int): Image cols.", "source": "juraj-google-style"}
{"code": "def cudnn_compatible_gru(units, n_hidden, n_layers=1, trainable_initial_states=False, seq_lengths=None, input_initial_h=None, name='cudnn_gru', reuse=False):\n    with tf.variable_scope(name, reuse=reuse):\n        if trainable_initial_states:\n            init_h = tf.get_variable('init_h', [n_layers, 1, n_hidden])\n            init_h = tf.tile(init_h, (1, tf.shape(units)[0], 1))\n        else:\n            init_h = tf.zeros([n_layers, tf.shape(units)[0], n_hidden])\n        initial_h = (input_initial_h or init_h)\n        with tf.variable_scope('cudnn_gru', reuse=reuse):\n\n            def single_cell():\n                return tf.contrib.cudnn_rnn.CudnnCompatibleGRUCell(n_hidden)\n            cell = tf.nn.rnn_cell.MultiRNNCell([single_cell() for _ in range(n_layers)])\n            units = tf.transpose(units, (1, 0, 2))\n            (h, h_last) = tf.nn.dynamic_rnn(cell=cell, inputs=units, time_major=True, initial_state=tuple(tf.unstack(initial_h, axis=0)))\n            h = tf.transpose(h, (1, 0, 2))\n            h_last = h_last[(- 1)]\n            if (seq_lengths is not None):\n                indices = tf.stack([tf.range(tf.shape(h)[0]), (seq_lengths - 1)], axis=1)\n                h_last = tf.gather_nd(h, indices)\n            return (h, h_last)", "docstring": "CuDNN Compatible GRU implementation.\nIt should be used to load models saved with CudnnGRUCell to run on CPU.\n\nArgs:\nunits: tf.Tensor with dimensions [B x T x F], where\nB - batch size\nT - number of tokens\nF - features\n\nn_hidden: dimensionality of hidden state\ntrainable_initial_states: whether to create a special trainable variable\nto initialize the hidden states of the network or use just zeros\nseq_lengths: tensor of sequence lengths with dimension [B]\nn_layers: number of layers\ninput_initial_h: initial hidden state, tensor\nname: name of the variable scope to use\nreuse:whether to reuse already initialized variable\n\nReturns:\nh - all hidden states along T dimension,\ntf.Tensor with dimensionality [B x T x F]\nh_last - last hidden state, tf.Tensor with dimensionality [B x H]", "source": "codesearchnet"}
{"code": "def _get(self, feed_item):\n    return self._api().get(profileId=self.profile_id, id=str(feed_item[self._id_field])).execute()", "docstring": "Fetches an item from CM.\n\nArgs:\nfeed_item: Feed item from Bulkdozer feed representing the item to fetch\nfrom CM.", "source": "github-repos"}
{"code": "def _build_mask_ds(mask, mask_offset):\n  \n  mask_ds = tf.data.Dataset.from_tensor_slices(mask)\n  mask_ds = mask_ds.repeat()\n  mask_ds = mask_ds.skip(mask_offset)\n  return mask_ds", "docstring": "Build the mask dataset to indicate which element to skip.\n\nArgs:\nmask: `tf.Tensor`, binary mask to apply to all following elements. This\nmask should have a length 100.\nmask_offset: `tf.Tensor`, Integer specifying from how much the mask\nshould be shifted for the first element.\n\nReturns:\nmask_ds: `tf.data.Dataset`, a dataset returning False for examples to skip\nand True for examples to keep.", "source": "juraj-google-style"}
{"code": "def decode_message(self, message_type, encoded_message):\n    encoded_message = six.ensure_str(encoded_message)\n    if (not encoded_message.strip()):\n        return message_type()\n    dictionary = json.loads(encoded_message)\n    message = self.__decode_dictionary(message_type, dictionary)\n    message.check_initialized()\n    return message", "docstring": "Merge JSON structure to Message instance.\n\nArgs:\nmessage_type: Message to decode data to.\nencoded_message: JSON encoded version of message.\n\nReturns:\nDecoded instance of message_type.\n\nRaises:\nValueError: If encoded_message is not valid JSON.\nmessages.ValidationError if merged message is not initialized.", "source": "codesearchnet"}
{"code": "def get_version():\n    if (not INSTALLED):\n        try:\n            with open('version.txt', 'r') as v_fh:\n                return v_fh.read()\n        except Exception:\n            warnings.warn('Unable to resolve package version until installed', UserWarning)\n            return '0.0.0'\n    return p_version.get_version(HERE)", "docstring": "find current version information\n\nReturns:\n(str): version information", "source": "codesearchnet"}
{"code": "def _assert_obj_type(pub, name='pub', obj_type=DBPublication):\n    if (not isinstance(pub, obj_type)):\n        raise InvalidType(('`%s` have to be instance of %s, not %s!' % (name, obj_type.__name__, pub.__class__.__name__)))", "docstring": "Make sure, that `pub` is instance of the `obj_type`.\n\nArgs:\npub (obj): Instance which will be checked.\nname (str): Name of the instance. Used in exception. Default `pub`.\nobj_type (class): Class of which the `pub` should be instance. Default\n:class:`.DBPublication`.\n\nRaises:\nInvalidType: When the `pub` is not instance of `obj_type`.", "source": "codesearchnet"}
{"code": "def GetUserinfo(credentials, http=None):  \n    \n    http = http or httplib2.Http()\n    url = _GetUserinfoUrl(credentials)\n    \n    \n    response, content = http.request(url)\n    if response.status == http_client.BAD_REQUEST:\n        credentials.refresh(http)\n        url = _GetUserinfoUrl(credentials)\n        response, content = http.request(url)\n    return json.loads(content or '{}')", "docstring": "Get the userinfo associated with the given credentials.\n\nThis is dependent on the token having either the userinfo.email or\nuserinfo.profile scope for the given token.\n\nArgs:\ncredentials: (oauth2client.client.Credentials) incoming credentials\nhttp: (httplib2.Http, optional) http instance to use\n\nReturns:\nThe email address for this token, or None if the required scopes\naren't available.", "source": "juraj-google-style"}
{"code": "def AddRoute(self, short_name, long_name, route_type, route_id=None):\n    if (route_id is None):\n        route_id = util.FindUniqueId(self.routes)\n    route = self._gtfs_factory.Route(short_name=short_name, long_name=long_name, route_type=route_type, route_id=route_id)\n    route.agency_id = self.GetDefaultAgency().agency_id\n    self.AddRouteObject(route)\n    return route", "docstring": "Add a route to this schedule.\n\nArgs:\nshort_name: Short name of the route, such as \"71L\"\nlong_name: Full name of the route, such as \"NW 21st Ave/St Helens Rd\"\nroute_type: A type such as \"Tram\", \"Subway\" or \"Bus\"\nroute_id: id of the route or None, in which case a unique id is picked\nReturns:\nA new Route object", "source": "codesearchnet"}
{"code": "def read_nanopubs(fn: str) -> Iterable[Mapping[str, Any]]:\n    \n\n    jsonl_flag, json_flag, yaml_flag = False, False, False\n    if fn == \"-\" or \"jsonl\" in fn:\n        jsonl_flag = True\n    elif \"json\" in fn:\n        json_flag = True\n    elif re.search(\"ya?ml\", fn):\n        yaml_flag = True\n    else:\n        log.error(\"Do not recognize nanopub file format - neither json nor jsonl format.\")\n        return {}\n\n    try:\n        if re.search(\"gz$\", fn):\n            f = gzip.open(fn, \"rt\")\n        else:\n            try:\n                f = click.open_file(fn, mode=\"rt\")\n            except Exception as e:\n                log.info(f\"Can not open file {fn}  Error: {e}\")\n                quit()\n\n        if jsonl_flag:\n            for line in f:\n                yield json.loads(line)\n        elif json_flag:\n            nanopubs = json.load(f)\n            for nanopub in nanopubs:\n                yield nanopub\n        elif yaml_flag:\n            nanopubs = yaml.load(f, Loader=yaml.SafeLoader)\n            for nanopub in nanopubs:\n                yield nanopub\n\n    except Exception as e:\n        log.error(f\"Could not open file: {fn}\")", "docstring": "Read file and generate nanopubs\n\nIf filename has *.gz, will read as a gzip file\nIf filename has *.jsonl*, will parsed as a JSONLines file\nIF filename has *.json*, will be parsed as a JSON file\nIf filename has *.yaml* or *.yml*,  will be parsed as a YAML file\n\nArgs:\nfilename (str): filename to read nanopubs from\n\nReturns:\nGenerator[Mapping[str, Any]]: generator of nanopubs in nanopub_bel JSON Schema format", "source": "juraj-google-style"}
{"code": "def training_loop_hparams_from_scoped_overrides(scoped_overrides, trial_id):\n    trial_hp_overrides = scoped_overrides.values()\n    loop_hp = create_loop_hparams()\n    model_hp_name = trial_hp_overrides.get('loop.generative_model_params', loop_hp.generative_model_params)\n    model_hp = registry.hparams(model_hp_name).parse(FLAGS.hparams)\n    base_algo_params_name = trial_hp_overrides.get('loop.base_algo_params', loop_hp.base_algo_params)\n    algo_hp = registry.hparams(base_algo_params_name)\n    combined_hp = merge_unscoped_hparams(zip(HP_SCOPES, [loop_hp, model_hp, algo_hp]))\n    combined_hp.override_from_dict(trial_hp_overrides)\n    (loop_hp, model_hp, algo_hp) = split_scoped_hparams(HP_SCOPES, combined_hp)\n    model_hp_name = ('model_hp_%s' % str(trial_id))\n    dynamic_register_hparams(model_hp_name, model_hp)\n    loop_hp.generative_model_params = model_hp_name\n    algo_hp_name = ('algo_hp_%s' % str(trial_id))\n    dynamic_register_hparams(algo_hp_name, algo_hp)\n    loop_hp.base_algo_params = algo_hp_name\n    return loop_hp", "docstring": "Create HParams suitable for training loop from scoped HParams.\n\nArgs:\nscoped_overrides: HParams, with keys all scoped by one of HP_SCOPES. These\nparameters are overrides for the base HParams created by\ncreate_loop_hparams.\ntrial_id: str, trial identifier. This is used to register unique HParams\nnames for the underlying model and ppo HParams.\n\nReturns:\nHParams suitable for passing to training_loop.", "source": "codesearchnet"}
{"code": "def __deepcopy__(self, memo):\n    with distribute_lib.enter_or_assert_strategy(self._distribute_strategy):\n        new_values = []\n        for value in self._values:\n            with ops.device(value.device):\n                new_values.append(copy.deepcopy(value, memo))\n    copied_variable = type(self)(strategy=self._distribute_strategy, values=new_values, aggregation=self._aggregation, var_policy=copy.deepcopy(self._policy, memo))\n    memo[id(self)] = copied_variable\n    return copied_variable", "docstring": "Perform a deepcopy of the `DistributedVariable`.\n\nUnlike the deepcopy of a regular tf.Variable, this keeps the original\nstrategy and devices of the `DistributedVariable`.  To avoid confusion\nwith the behavior of deepcopy on a regular `Variable` (which does\ncopy into new devices), we only allow a deepcopy of a `DistributedVariable`\nwithin its originating strategy scope.\n\nArgs:\nmemo: The memoization object for `deepcopy`.\n\nReturns:\nA deep copy of the current `DistributedVariable`.\n\nRaises:\nRuntimeError: If trying to deepcopy into a different strategy.", "source": "github-repos"}
{"code": "def create_room(self, alias=None, is_public=False, invitees=None):\n        \n        response = self.api.create_room(alias=alias,\n                                        is_public=is_public,\n                                        invitees=invitees)\n        return self._mkroom(response[\"room_id\"])", "docstring": "Create a new room on the homeserver.\n\nArgs:\nalias (str): The canonical_alias of the room.\nis_public (bool):  The public/private visibility of the room.\ninvitees (str[]): A set of user ids to invite into the room.\n\nReturns:\nRoom\n\nRaises:\nMatrixRequestError", "source": "juraj-google-style"}
{"code": "def load_feature_lists(self, feature_lists):\n        \n\n        column_names = []\n        feature_ranges = []\n        running_feature_count = 0\n\n        for list_id in feature_lists:\n            feature_list_names = load_lines(self.features_dir + 'X_train_{}.names'.format(list_id))\n            column_names.extend(feature_list_names)\n            start_index = running_feature_count\n            end_index = running_feature_count + len(feature_list_names) - 1\n            running_feature_count += len(feature_list_names)\n            feature_ranges.append([list_id, start_index, end_index])\n\n        X_train = np.hstack([\n            load(self.features_dir + 'X_train_{}.pickle'.format(list_id))\n            for list_id in feature_lists\n        ])\n        X_test = np.hstack([\n            load(self.features_dir + 'X_test_{}.pickle'.format(list_id))\n            for list_id in feature_lists\n        ])\n\n        df_train = pd.DataFrame(X_train, columns=column_names)\n        df_test = pd.DataFrame(X_test, columns=column_names)\n\n        return df_train, df_test, feature_ranges", "docstring": "Load pickled features for train and test sets, assuming they are saved\nin the `features` folder along with their column names.\n\nArgs:\nfeature_lists: A list containing the names of the feature lists to load.\n\nReturns:\nA tuple containing 3 items: train dataframe, test dataframe,\nand a list describing the index ranges for the feature lists.", "source": "juraj-google-style"}
{"code": "def foreach_loop(self, context):\n        \n        logger.debug(\"starting\")\n\n        \n        \n        foreach = context.get_formatted_iterable(self.foreach_items)\n\n        foreach_length = len(foreach)\n\n        logger.info(f\"foreach decorator will loop {foreach_length} times.\")\n\n        for i in foreach:\n            logger.info(f\"foreach: running step {i}\")\n            \n            context['i'] = i\n            \n            \n            self.run_conditional_decorators(context)\n            logger.debug(f\"foreach: done step {i}\")\n\n        logger.debug(f\"foreach decorator looped {foreach_length} times.\")\n        logger.debug(\"done\")", "docstring": "Run step once for each item in foreach_items.\n\nOn each iteration, the invoked step can use context['i'] to get the\ncurrent iterator value.\n\nArgs:\ncontext: (pypyr.context.Context) The pypyr context. This arg will\nmutate.", "source": "juraj-google-style"}
{"code": "def tryload(self, cfgstr=None, on_error='raise'):\n        \n        cfgstr = self._rectify_cfgstr(cfgstr)\n        if self.enabled:\n            try:\n                if self.verbose > 1:\n                    self.log('[cacher] tryload fname={}'.format(self.fname))\n                return self.load(cfgstr)\n            except IOError:\n                if self.verbose > 0:\n                    self.log('[cacher] ... {} cache miss'.format(self.fname))\n            except Exception:\n                if self.verbose > 0:\n                    self.log('[cacher] ... failed to load')\n                if on_error == 'raise':\n                    raise\n                elif on_error == 'clear':\n                    self.clear(cfgstr)\n                    return None\n                else:\n                    raise KeyError('Unknown method on_error={}'.format(on_error))\n        else:\n            if self.verbose > 1:\n                self.log('[cacher] ... cache disabled: fname={}'.format(self.fname))\n        return None", "docstring": "Like load, but returns None if the load fails due to a cache miss.\n\nArgs:\non_error (str): How to handle non-io errors errors. Either raise,\nwhich re-raises the exception, or clear which deletes the cache\nand returns None.", "source": "juraj-google-style"}
{"code": "def GetPasswdMap(self, since=None):\n    return PasswdUpdateGetter(self.conf).GetUpdates(source=self, search_base=self.conf['base'], search_filter=self.conf['filter'], search_scope=self.conf['scope'], since=since)", "docstring": "Return the passwd map from this source.\n\nArgs:\nsince: Get data only changed since this timestamp (inclusive) or None\nfor all data.\n\nReturns:\ninstance of maps.PasswdMap", "source": "github-repos"}
{"code": "def get_kpoint_degeneracy(self, kpoint, cartesian=False, tol=1e-2):\n        \n        all_kpts = self.get_sym_eq_kpoints(kpoint, cartesian, tol=tol)\n        if all_kpts is not None:\n            return len(all_kpts)", "docstring": "Returns degeneracy of a given k-point based on structure symmetry\nArgs:\nkpoint (1x3 array): coordinate of the k-point\ncartesian (bool): kpoint is in cartesian or fractional coordinates\ntol (float): tolerance below which coordinates are considered equal\n\nReturns:\n(int or None): degeneracy or None if structure is not available", "source": "juraj-google-style"}
{"code": "def weights(self):\n    return self._dedup_weights(self._undeduplicated_weights)", "docstring": "Returns the list of all layer variables/weights.\n\nNote: This will not track the weights of nested `tf.Modules` that are not\nthemselves Keras layers.\n\nReturns:\nA list of variables.", "source": "github-repos"}
{"code": "def get_features_for_wav(self, wav_filename, model_settings, sess):\n    desired_samples = model_settings['desired_samples']\n    input_dict = {self.wav_filename_placeholder_: wav_filename, self.time_shift_padding_placeholder_: [[0, 0], [0, 0]], self.time_shift_offset_placeholder_: [0, 0], self.background_data_placeholder_: np.zeros([desired_samples, 1]), self.background_volume_placeholder_: 0, self.foreground_volume_placeholder_: 1}\n    data_tensor = sess.run([self.output_], feed_dict=input_dict)\n    return data_tensor", "docstring": "Applies the feature transformation process to the input_wav.\n\nRuns the feature generation process (generally producing a spectrogram from\nthe input samples) on the WAV file. This can be useful for testing and\nverifying implementations being run on other platforms.\n\nArgs:\nwav_filename: The path to the input audio file.\nmodel_settings: Information about the current model being trained.\nsess: TensorFlow session that was active when processor was created.\n\nReturns:\nNumpy data array containing the generated features.", "source": "github-repos"}
{"code": "def price(self, market: pmd.ProcessedMarketData, name: Optional[str]=None) -> types.FloatTensor:\n    name = name or self._name + '_price'\n    with tf.name_scope(name):\n        discount_curve = cashflow_streams.get_discount_curve(self._discount_curve_type, market, self._mask)\n        reference_curve = cashflow_streams.get_discount_curve(self._reference_curve_type, market, self._reference_mask)\n        daycount_fractions = tf.expand_dims(self._daycount_fractions, axis=-1)\n        fwd_rate = reference_curve.forward_rate(self._accrual_start_date.expand_dims(axis=-1), self._accrual_end_date.expand_dims(axis=-1), day_count_fraction=daycount_fractions)\n        discount_at_settlement = discount_curve.discount_factor(self._accrual_start_date.expand_dims(axis=-1))\n        discount_at_settlement = tf.where(daycount_fractions > 0.0, discount_at_settlement, tf.zeros_like(discount_at_settlement))\n        discount_at_settlement = tf.squeeze(discount_at_settlement, axis=-1)\n        fwd_rate = tf.squeeze(fwd_rate, axis=-1)\n        return self._short_position * discount_at_settlement * self._notional_amount * (fwd_rate - self._fixed_rate) * self._daycount_fractions / (1.0 + self._daycount_fractions * fwd_rate)", "docstring": "Returns the present value of the stream on the valuation date.\n\nArgs:\nmarket: An instance of `ProcessedMarketData`.\nname: Python str. The name to give to the ops created by this function.\nDefault value: `None` which maps to 'price'.\n\nReturns:\nA `Tensor` of shape `batch_shape`  containing the modeled price of each\nFRA contract based on the input market data.", "source": "github-repos"}
{"code": "def _calc_dir_size(path):\n    dir_size = 0\n    for (root, dirs, files) in os.walk(path):\n        for fn in files:\n            full_fn = os.path.join(root, fn)\n            dir_size += os.path.getsize(full_fn)\n    return dir_size", "docstring": "Calculate size of all files in `path`.\n\nArgs:\npath (str): Path to the directory.\n\nReturns:\nint: Size of the directory in bytes.", "source": "codesearchnet"}
{"code": "def delete_s3_bucket(client, resource):\n    if dbconfig.get('enable_delete_s3_buckets', NS_AUDITOR_REQUIRED_TAGS, False):\n        client.delete_bucket(Bucket=resource.id)\n    return (ActionStatus.SUCCEED, resource.metrics())", "docstring": "Delete an S3 bucket\n\nThis function will try to delete an S3 bucket\n\nArgs:\nclient (:obj:`boto3.session.Session.client`): A boto3 client object\nresource (:obj:`Resource`): The resource object to terminate\n\nReturns:\n`ActionStatus`", "source": "codesearchnet"}
{"code": "def _load_stdlib_versions(self):\n    lines = self._store.load_stdlib_versions()\n    versions = {}\n    for line in lines:\n        line2 = line.split('\n        if not line2:\n            continue\n        match = re.fullmatch('(.+): (\\\\d)\\\\.(\\\\d+)(?:-(?:(\\\\d)\\\\.(\\\\d+))?)?', line2)\n        assert match\n        module, min_major, min_minor, max_major, max_minor = match.groups()\n        minimum = (int(min_major), int(min_minor))\n        maximum = (int(max_major), int(max_minor)) if max_major is not None and max_minor is not None else None\n        versions[module] = (minimum, maximum)\n    return versions", "docstring": "Loads the contents of typeshed/stdlib/VERSIONS.\n\nVERSIONS lists the stdlib modules with the Python version in which they were\nfirst added, in the format `{module}: {min_major}.{min_minor}-` or\n`{module}: {min_major}.{min_minor}-{max_major}.{max_minor}`.\n\nReturns:\nA mapping from module name to version range in the format\n{name: ((min_major, min_minor), (max_major, max_minor))}\nThe max tuple can be `None`.", "source": "github-repos"}
{"code": "def fetch_task_to_run(self):\n    if all((task.is_completed for task in self)):\n        raise StopIteration('All tasks completed.')\n    for task in self:\n        if task.can_run:\n            return task\n    logger.warning('Possible deadlock in fetch_task_to_run!')\n    return None", "docstring": "Returns the first task that is ready to run or\nNone if no task can be submitted at present\"\n\nRaises:\n`StopIteration` if all tasks are done.", "source": "codesearchnet"}
{"code": "def user_is_sponsor(self, user):\n    sponsors = self.get_true_sponsors()\n    for sponsor in sponsors:\n        sp_user = sponsor.user\n        if (sp_user == user):\n            return True\n    return False", "docstring": "Return whether the given user is a sponsor of the activity.\n\nReturns:\nBoolean", "source": "codesearchnet"}
{"code": "def _readable_flags(transport):\n    if ('flags' not in transport):\n        return None\n    _flag_list = []\n    flags = transport['flags']\n    if (flags & dpkt.tcp.TH_SYN):\n        if (flags & dpkt.tcp.TH_ACK):\n            _flag_list.append('syn_ack')\n        else:\n            _flag_list.append('syn')\n    elif (flags & dpkt.tcp.TH_FIN):\n        if (flags & dpkt.tcp.TH_ACK):\n            _flag_list.append('fin_ack')\n        else:\n            _flag_list.append('fin')\n    elif (flags & dpkt.tcp.TH_RST):\n        _flag_list.append('rst')\n    elif (flags & dpkt.tcp.TH_PUSH):\n        _flag_list.append('psh')\n    return _flag_list", "docstring": "Method that turns bit flags into a human readable list\n\nArgs:\ntransport (dict): transport info, specifically needs a 'flags' key with bit_flags\nReturns:\nlist: a list of human readable flags (e.g. ['syn_ack', 'fin', 'rst', ...]", "source": "codesearchnet"}
{"code": "def write(self, data, echo=None):\n        \n\n        if echo or (echo is None and self.echo):\n            sys.stdout.write(data.decode('latin1'))\n            sys.stdout.flush()\n        self.channel.write(data)", "docstring": "Write data to channel.\n\nArgs:\ndata(bytes): The data to write to the channel.\necho(bool): Whether to echo the written data to stdout.\n\nRaises:\nEOFError: If the channel was closed before all data was sent.", "source": "juraj-google-style"}
{"code": "def _refresh(self, http):\n        \n        if not self.store:\n            self._do_refresh_request(http)\n        else:\n            self.store.acquire_lock()\n            try:\n                new_cred = self.store.locked_get()\n\n                if (new_cred and not new_cred.invalid and\n                        new_cred.access_token != self.access_token and\n                        not new_cred.access_token_expired):\n                    logger.info('Updated access_token read from Storage')\n                    self._updateFromCredential(new_cred)\n                else:\n                    self._do_refresh_request(http)\n            finally:\n                self.store.release_lock()", "docstring": "Refreshes the access_token.\n\nThis method first checks by reading the Storage object if available.\nIf a refresh is still needed, it holds the Storage lock until the\nrefresh is completed.\n\nArgs:\nhttp: an object to be used to make HTTP requests.\n\nRaises:\nHttpAccessTokenRefreshError: When the refresh fails.", "source": "juraj-google-style"}
{"code": "def _request(self, method, resource_uri, **kwargs):\n        \n        data = kwargs.get('data')\n        response = method(self.API_BASE_URL + resource_uri,\n                          json=data, headers=self.headers)\n        response.raise_for_status()\n        return response.json()", "docstring": "Perform a method on a resource.\n\nArgs:\nmethod: requests.`method`\nresource_uri: resource endpoint\nRaises:\nHTTPError\nReturns:\nJSON Response", "source": "juraj-google-style"}
{"code": "def yaml_to_ordered_dict(stream, loader=yaml.SafeLoader):\n\n    class OrderedUniqueLoader(loader):\n        '\\n        Subclasses the given pyYAML `loader` class.\\n\\n        Validates all sibling keys to insure no duplicates.\\n\\n        Returns an OrderedDict instead of a Dict.\\n        '\n        NO_DUPE_SIBLINGS = ['stacks', 'class_path']\n        NO_DUPE_CHILDREN = ['stacks']\n\n        def _error_mapping_on_dupe(self, node, node_name):\n            'check mapping node for dupe children keys.'\n            if isinstance(node, MappingNode):\n                mapping = {}\n                for n in node.value:\n                    a = n[0]\n                    b = mapping.get(a.value, None)\n                    if b:\n                        msg = '{} mapping cannot have duplicate keys {} {}'\n                        raise ConstructorError(msg.format(node_name, b.start_mark, a.start_mark))\n                    mapping[a.value] = a\n\n        def _validate_mapping(self, node, deep=False):\n            if (not isinstance(node, MappingNode)):\n                raise ConstructorError(None, None, ('expected a mapping node, but found %s' % node.id), node.start_mark)\n            mapping = OrderedDict()\n            for (key_node, value_node) in node.value:\n                key = self.construct_object(key_node, deep=deep)\n                try:\n                    hash(key)\n                except TypeError as exc:\n                    raise ConstructorError('while constructing a mapping', node.start_mark, ('found unhashable key (%s)' % exc), key_node.start_mark)\n                if ((key in mapping) and (key in self.NO_DUPE_SIBLINGS)):\n                    msg = '{} key cannot have duplicate siblings {} {}'\n                    raise ConstructorError(msg.format(key, node.start_mark, key_node.start_mark))\n                if (key in self.NO_DUPE_CHILDREN):\n                    self._error_mapping_on_dupe(value_node, key_node.value)\n                value = self.construct_object(value_node, deep=deep)\n                mapping[key] = value\n            return mapping\n\n        def construct_mapping(self, node, deep=False):\n            'Override parent method to use OrderedDict.'\n            if isinstance(node, MappingNode):\n                self.flatten_mapping(node)\n            return self._validate_mapping(node, deep=deep)\n\n        def construct_yaml_map(self, node):\n            data = OrderedDict()\n            (yield data)\n            value = self.construct_mapping(node)\n            data.update(value)\n    OrderedUniqueLoader.add_constructor(u'tag:yaml.org,2002:map', OrderedUniqueLoader.construct_yaml_map)\n    return yaml.load(stream, OrderedUniqueLoader)", "docstring": "Provides yaml.load alternative with preserved dictionary order.\n\nArgs:\nstream (string): YAML string to load.\nloader (:class:`yaml.loader`): PyYAML loader class. Defaults to safe\nload.\n\nReturns:\nOrderedDict: Parsed YAML.", "source": "codesearchnet"}
{"code": "def handle_document(self, item_session: ItemSession, filename: str) -> Actions:\n    self._waiter.reset()\n    action = self.handle_response(item_session)\n    if (action == Actions.NORMAL):\n        self._statistics.increment(item_session.response.body.size())\n        item_session.set_status(Status.done, filename=filename)\n    return action", "docstring": "Process a successful document response.\n\nReturns:\nA value from :class:`.hook.Actions`.", "source": "codesearchnet"}
{"code": "def run_build_model(self, num_runs=5, silent=False, force_rerun=False):\n        \n        \n        self.mutation_ddG_avg_outfile = 'Average_{}.fxout'.format(op.splitext(self.repaired_pdb_outfile)[0])\n        self.mutation_ddG_raw_outfile = 'Raw_{}.fxout'.format(op.splitext(self.repaired_pdb_outfile)[0])\n\n        \n        foldx_build_model = 'foldx --command=BuildModel --pdb={} --mutant-file={} --numberOfRuns={}'.format(self.repaired_pdb_outfile,\n                                                                                                            op.basename(self.mutation_infile),\n                                                                                                            num_runs)\n\n        ssbio.utils.command_runner(shell_command=foldx_build_model, force_rerun_flag=force_rerun, silent=silent,\n                                   outfile_checker=self.mutation_ddG_avg_outfile, cwd=self.foldx_dir)", "docstring": "Run FoldX BuildModel command with a mutant file input.\n\nOriginal command::\n\nfoldx --command=BuildModel --pdb=4bxi_Repair.pdb --mutant-file=individual_list.txt --numberOfRuns=5\n\nArgs:\nnum_runs (int):\nsilent (bool): If FoldX output should be silenced from printing to the shell.\nforce_rerun (bool): If FoldX BuildModel should be rerun even if the results file exists.", "source": "juraj-google-style"}
{"code": "def _compute_numeric_jacobian(x, x_shape, x_data, y, y_shape, delta, extra_feed_dict):\n    if x.dtype == dtypes.bfloat16:\n        x = math_ops.cast(x, dtypes.float32)\n    if y.dtype == dtypes.bfloat16:\n        y = math_ops.cast(y, dtypes.float32)\n    if x_data.dtype == dtypes.bfloat16.as_numpy_dtype:\n        x_data = x_data.astype(np.float32)\n    x_size = _product(x_shape) * (2 if x.dtype.is_complex else 1)\n    y_size = _product(y_shape) * (2 if y.dtype.is_complex else 1)\n    x_dtype = x.dtype.real_dtype.as_numpy_dtype\n    y_dtype = y.dtype.real_dtype.as_numpy_dtype\n    x_data = numpy_compat.np_asarray(x_data, dtype=x.dtype.as_numpy_dtype)\n    scale = numpy_compat.np_asarray(2 * delta, dtype=y_dtype)[()]\n    jacobian = np.zeros((x_size, y_size), dtype=x_dtype)\n    for row in range(x_size):\n        x_pos = x_data.copy()\n        x_neg = x_data.copy()\n        x_pos.ravel().view(x_dtype)[row] += delta\n        y_pos = y.eval(feed_dict=_extra_feeds(extra_feed_dict, {x: x_pos}))\n        x_neg.ravel().view(x_dtype)[row] -= delta\n        y_neg = y.eval(feed_dict=_extra_feeds(extra_feed_dict, {x: x_neg}))\n        diff = (y_pos - y_neg) / scale\n        jacobian[row, :] = diff.ravel().view(y_dtype)\n    logging.vlog(1, 'Numeric Jacobian =\\n%s', jacobian)\n    return jacobian", "docstring": "Computes the numeric Jacobian for dy/dx.\n\nComputes the numeric Jacobian by slightly perturbing the inputs and\nmeasuring the differences on the output.\n\nArgs:\nx: the tensor \"x\".\nx_shape: the dimensions of x as a tuple or an array of ints.\nx_data: a numpy array as the input data for x\ny: the tensor \"y\".\ny_shape: the dimensions of y as a tuple or an array of ints.\ndelta: the amount of perturbation we give to the input\nextra_feed_dict: dict that allows fixing specified tensor values\nduring the jacobian calculation.\n\nReturns:\nA 2-d numpy array representing the Jacobian for dy/dx. It has \"x_size\" rows\nand \"y_size\" columns where \"x_size\" is the number of elements in x and\n\"y_size\" is the number of elements in y.", "source": "github-repos"}
{"code": "def destroy_cloudwatch_event(app='', env='dev', region=''):\n    session = boto3.Session(profile_name=env, region_name=region)\n    cloudwatch_client = session.client('events')\n    event_rules = get_cloudwatch_event_rule(app_name=app, account=env, region=region)\n    for rule in event_rules:\n        cloudwatch_client.remove_targets(Rule=rule, Ids=[app])\n    return True", "docstring": "Destroy Cloudwatch event subscription.\n\nArgs:\napp (str): Spinnaker Application name.\nenv (str): Deployment environment.\nregion (str): AWS region.\nReturns:\nbool: True upon successful completion.", "source": "codesearchnet"}
{"code": "def __init__(self, path, script, optimized=True):\n        \n\n        \n        self.path = path\n        self.script = script\n\n        if optimized:\n            \n            \n            \n            library_path = \"%s:%s\" % (\n                os.path.join(path, 'build/optimized'),\n                os.path.join(path, 'build/optimized/lib'))\n\n            \n            self.environment = {\n                'LD_LIBRARY_PATH': library_path,\n                'DYLD_LIBRARY_PATH': library_path}\n        else:\n            library_path = \"%s:%s\" % (os.path.join(path, 'build'),\n                                      os.path.join(path, 'build/lib'))\n            self.environment = {\n                'LD_LIBRARY_PATH': os.path.join(path, 'build'),\n                'DYLD_LIBRARY_PATH': os.path.join(path, 'build')}\n\n        \n        self.configure_and_build(path, optimized=optimized)\n\n        \n        \n        if optimized:\n            build_status_path = os.path.join(path,\n                                             'build/optimized/build-status.py')\n        else:\n            build_status_path = os.path.join(path,\n                                             'build/build-status.py')\n\n        \n        try:  \n            spec = importlib.util.spec_from_file_location('build_status',\n                                                          build_status_path)\n            build_status = importlib.util.module_from_spec(spec)\n            spec.loader.exec_module(build_status)\n        except (AttributeError):  \n            import imp\n            build_status = imp.load_source('build_status', build_status_path)\n\n        \n        \n        \n        \n        matches = [{'name': program,\n                    'path': os.path.abspath(os.path.join(path, program))} for\n                   program in build_status.ns3_runnable_programs if self.script\n                   in program]\n\n        if not matches:\n            raise ValueError(\"Cannot find %s script\" % self.script)\n\n        \n        \n        match_percentages = map(lambda x: {'name': x['name'],\n                                           'path': x['path'],\n                                           'percentage':\n                                           len(self.script)/len(x['name'])},\n                                matches)\n\n        self.script_executable = max(match_percentages,\n                                     key=lambda x: x['percentage'])['path']\n\n        if optimized and \"scratch\" in self.script_executable:\n            self.script_executable = os.path.abspath(\n                os.path.join(path, \"build/optimized/scratch\", self.script))", "docstring": "Initialization function.\n\nArgs:\npath (str): absolute path to the ns-3 installation this Runner\nshould lock on.\nscript (str): ns-3 script that will be used by this Runner.\noptimized (bool): whether this Runner should build ns-3 with the\noptimized profile.", "source": "juraj-google-style"}
{"code": "def to_string(cls, error_code):\n        \n        if error_code == cls.COMPARE_ERROR:\n            return 'Error comparing flash content to programming data.'\n        elif error_code == cls.PROGRAM_ERASE_ERROR:\n            return 'Error during program/erase phase.'\n        elif error_code == cls.VERIFICATION_ERROR:\n            return 'Error verifying programmed data.'\n        return super(JLinkFlashErrors, cls).to_string(error_code)", "docstring": "Returns the string message for the given ``error_code``.\n\nArgs:\ncls (JLinkFlashErrors): the ``JLinkFlashErrors`` class\nerror_code (int): error code to convert\n\nReturns:\nAn error string corresponding to the error code.\n\nRaises:\nValueError: if the error code is invalid.", "source": "juraj-google-style"}
{"code": "def stdout(self):\n    if (not self.id):\n        raise WorkflowError('Workflow is not running.  Cannot get stdout.')\n    if self.batch_values:\n        raise NotImplementedError('Query Each Workflow Id within the Batch Workflow for stdout.')\n    wf = self.workflow.get(self.id)\n    stdout_list = []\n    for task in wf['tasks']:\n        stdout_list.append({'id': task['id'], 'taskType': task['taskType'], 'name': task['name'], 'stdout': self.workflow.get_stdout(self.id, task['id'])})\n    return stdout_list", "docstring": "Get stdout from all the tasks of a workflow.\n\nReturns:\n(list): tasks with their stdout\n\nExample:\n>>> workflow.stdout\n[\n{\n\"id\": \"4488895771403082552\",\n\"taskType\": \"AOP_Strip_Processor\",\n\"name\": \"Task1\",\n\"stdout\": \"............\"\n}\n]", "source": "codesearchnet"}
{"code": "def to_dict(self, filter=True):\n    result = {}\n    for (k, v) in self:\n        r = _to_dict(v, filter)\n        if r:\n            result[k] = r\n    return result", "docstring": "Returns a dictionary with the values of the model. Note that the values\nof the leafs are evaluated to python types.\n\nArgs:\nfilter (bool): If set to ``True``, show only values that have been set.\n\nReturns:\ndict: A dictionary with the values of the model.\n\nExample:\n\n>>> pretty_print(config.to_dict(filter=True))\n>>> {\n>>>     \"interfaces\": {\n>>>         \"interface\": {\n>>>             \"et1\": {\n>>>                 \"config\": {\n>>>                     \"description\": \"My description\",\n>>>                     \"mtu\": 1500\n>>>                 },\n>>>                 \"name\": \"et1\"\n>>>             },\n>>>             \"et2\": {\n>>>                 \"config\": {\n>>>                     \"description\": \"Another description\",\n>>>                     \"mtu\": 9000\n>>>                 },\n>>>                 \"name\": \"et2\"\n>>>             }\n>>>         }\n>>>     }\n>>> }", "source": "codesearchnet"}
{"code": "def set_input(self, p_name, value):\n        \n        name = self.python_names.get(p_name)\n        if p_name is None or name not in self.get_input_names():\n            raise ValueError('Invalid input \"{}\"'.format(p_name))\n        self.step_inputs[name] = value", "docstring": "Set a Step's input variable to a certain value.\n\nThe value comes either from a workflow input or output of a previous\nstep.\n\nArgs:\nname (str): the name of the Step input\nvalue (str): the name of the output variable that provides the\nvalue for this input.\n\nRaises:\nValueError: The name provided is not a valid input name for this\nStep.", "source": "juraj-google-style"}
{"code": "def set_mac_address(self, mac_address=None, default=False, disable=False):\n    base_command = 'ip virtual-router mac-address'\n    if ((not default) and (not disable)):\n        if (mac_address is not None):\n            if (not re.match('(?:[a-f0-9]{2}:){5}[a-f0-9]{2}', mac_address)):\n                raise ValueError('mac_address must be formatted like:aa:bb:cc:dd:ee:ff')\n        else:\n            raise ValueError('mac_address must be a properly formatted address string')\n    if (default or (disable and (not mac_address))):\n        current_mac = self._parse_mac_address()\n        if current_mac['mac_address']:\n            base_command = ((base_command + ' ') + current_mac['mac_address'])\n    commands = self.command_builder(base_command, value=mac_address, default=default, disable=disable)\n    return self.configure(commands)", "docstring": "Sets the virtual-router mac address\n\nThis method will set the switch virtual-router mac address. If a\nvirtual-router mac address already exists it will be overwritten.\n\nArgs:\nmac_address (string): The mac address that will be assigned as\nthe virtual-router mac address. This should be in the format,\naa:bb:cc:dd:ee:ff.\ndefault (bool): Sets the virtual-router mac address to the system\ndefault (which is to remove the configuration line).\ndisable (bool): Negates the virtual-router mac address using\nthe system no configuration command\n\nReturns:\nTrue if the set operation succeeds otherwise False.", "source": "codesearchnet"}
{"code": "def count_up_to(self, limit):\n    raise NotImplementedError", "docstring": "Increments this variable until it reaches `limit`.\n\nWhen that Op is run it tries to increment the variable by `1`. If\nincrementing the variable would bring it above `limit` then the Op raises\nthe exception `OutOfRangeError`.\n\nIf no error is raised, the Op outputs the value of the variable before\nthe increment.\n\nThis is essentially a shortcut for `count_up_to(self, limit)`.\n\nArgs:\nlimit: value at which incrementing the variable raises an error.\n\nReturns:\nA `Tensor` that will hold the variable value before the increment. If no\nother Op modifies this variable, the values produced will all be\ndistinct.", "source": "github-repos"}
{"code": "def db(self, entity, query_filters=\"size=10\"):\n        \n        if self.entity_api_key == \"\":\n            return {'status': 'failure', 'response': 'No API key found in request'}\n\n        historic_url = self.base_url + \"api/0.1.0/historicData?\" + query_filters\n        historic_headers = {\n            \"apikey\": self.entity_api_key,\n            \"Content-Type\": \"application/json\"\n        }\n\n        historic_query_data = json.dumps({\n            \"query\": {\n                \"match\": {\n                    \"key\": entity\n                }\n            }\n        })\n\n        with self.no_ssl_verification():\n            r = requests.get(historic_url, data=historic_query_data, headers=historic_headers)\n        response = dict()\n        if \"No API key\" in str(r.content.decode(\"utf-8\")):\n            response[\"status\"] = \"failure\"\n        else:\n            r = r.content.decode(\"utf-8\")\n            response = r\n        return response", "docstring": "This function allows an entity to access the historic data.\n\nArgs:\nentity        (string): Name of the device to listen to\nquery_filters (string): Elastic search response format string\nexample, \"pretty=true&size=10\"", "source": "juraj-google-style"}
{"code": "def WritePathHashHistory(self, client_path, hash_entries):\n    \n    client_path_history = ClientPathHistory()\n    for timestamp, hash_entry in iteritems(hash_entries):\n      client_path_history.AddHashEntry(timestamp, hash_entry)\n\n    self.MultiWritePathHistory({client_path: client_path_history})", "docstring": "Writes a collection of `Hash` observed for particular path.\n\nArgs:\nclient_path: A `ClientPath` instance.\nhash_entries: A dictionary with timestamps as keys and `Hash` instances as\nvalues.", "source": "juraj-google-style"}
{"code": "def parse_arguments(argv):\n  \n  parser = argparse.ArgumentParser(\n      description='Runs Preprocessing on structured data.')\n  parser.add_argument('--output-dir',\n                      type=str,\n                      required=True,\n                      help='Google Cloud Storage which to place outputs.')\n\n  parser.add_argument('--schema-file',\n                      type=str,\n                      required=False,\n                      help=('BigQuery json schema file'))\n  parser.add_argument('--input-file-pattern',\n                      type=str,\n                      required=False,\n                      help='Input CSV file names. May contain a file pattern')\n\n  \n  \n  \n  parser.add_argument('--bigquery-table',\n                      type=str,\n                      required=False,\n                      help=('project:dataset.table_name'))\n\n  args = parser.parse_args(args=argv[1:])\n\n  if not args.output_dir.startswith('gs:\n    raise ValueError('--output-dir must point to a location on GCS')\n\n  if args.bigquery_table:\n    if args.schema_file or args.input_file_pattern:\n      raise ValueError('If using --bigquery-table, then --schema-file and '\n                       '--input-file-pattern, '\n                       'are not needed.')\n  else:\n    if not args.schema_file or not args.input_file_pattern:\n      raise ValueError('If not using --bigquery-table, then --schema-file and '\n                       '--input-file-pattern '\n                       'are required.')\n\n    if not args.input_file_pattern.startswith('gs:\n      raise ValueError('--input-file-pattern must point to files on GCS')\n\n  return args", "docstring": "Parse command line arguments.\n\nArgs:\nargv: list of command line arguments, includeing programe name.\n\nReturns:\nAn argparse Namespace object.\n\nRaises:\nValueError: for bad parameters", "source": "juraj-google-style"}
{"code": "def IsNotNone(*fields, default=None):\n    \n\n    when_clauses = [\n        expressions.When(\n            ~expressions.Q(**{field: None}),\n            then=expressions.F(field)\n        )\n        for field in reversed(fields)\n    ]\n\n    return expressions.Case(\n        *when_clauses,\n        default=expressions.Value(default),\n        output_field=CharField()\n    )", "docstring": "Selects whichever field is not None, in the specified order.\n\nArguments:\nfields:\nThe fields to attempt to get a value from,\nin order.\n\ndefault:\nThe value to return in case all values are None.\n\nReturns:\nA Case-When expression that tries each field and\nreturns the specified default value when all of\nthem are None.", "source": "juraj-google-style"}
{"code": "def _describe_bitmask(bits: int, table: Dict[(Any, str)], default: str='0') -> str:\n    result = []\n    for (bit, name) in table.items():\n        if (bit & bits):\n            result.append(name)\n    if (not result):\n        return default\n    return '|'.join(result)", "docstring": "Returns a bitmask in human readable form.\n\nThis is a private function, used internally.\n\nArgs:\nbits (int): The bitmask to be represented.\ntable (Dict[Any,str]): A reverse lookup table.\ndefault (Any): A default return value when bits is 0.\n\nReturns: str: A printable version of the bits variable.", "source": "codesearchnet"}
{"code": "def to_representation(self, instance):\n    updated_program = copy.deepcopy(instance)\n    enterprise_customer_catalog = self.context['enterprise_customer_catalog']\n    updated_program['enrollment_url'] = enterprise_customer_catalog.get_program_enrollment_url(updated_program['uuid'])\n    for course in updated_program['courses']:\n        course['enrollment_url'] = enterprise_customer_catalog.get_course_enrollment_url(course['key'])\n        for course_run in course['course_runs']:\n            course_run['enrollment_url'] = enterprise_customer_catalog.get_course_run_enrollment_url(course_run['key'])\n    return updated_program", "docstring": "Return the updated program data dictionary.\n\nArguments:\ninstance (dict): The program data.\n\nReturns:\ndict: The updated program data.", "source": "codesearchnet"}
{"code": "def plot_power_factor_mu(self, temp=600, output='eig',\n                             relaxation_time=1e-14, xlim=None):\n        \n        import matplotlib.pyplot as plt\n        plt.figure(figsize=(9, 7))\n        pf = self._bz.get_power_factor(relaxation_time=relaxation_time,\n                                       output=output, doping_levels=False)[\n            temp]\n        plt.semilogy(self._bz.mu_steps, pf, linewidth=3.0)\n        self._plot_bg_limits()\n        self._plot_doping(temp)\n        if output == 'eig':\n            plt.legend(['PF$_1$', 'PF$_2$', 'PF$_3$'])\n        if xlim is None:\n            plt.xlim(-0.5, self._bz.gap + 0.5)\n        else:\n            plt.xlim(xlim)\n        plt.ylabel(\"Power factor, ($\\\\mu$W/(mK$^2$))\", fontsize=30.0)\n        plt.xlabel(\"E-E$_f$ (eV)\", fontsize=30.0)\n        plt.xticks(fontsize=25)\n        plt.yticks(fontsize=25)\n        plt.tight_layout()\n        return plt", "docstring": "Plot the power factor in function of Fermi level. Semi-log plot\n\nArgs:\ntemp: the temperature\nxlim: a list of min and max fermi energy by default (0, and band\ngap)\ntau: A relaxation time in s. By default none and the plot is by\nunits of relaxation time\n\nReturns:\na matplotlib object", "source": "juraj-google-style"}
{"code": "def bitwise_or(x, y):\n    if any_symbolic_tensors((x, y)):\n        return BitwiseOr().symbolic_call(x, y)\n    return backend.numpy.bitwise_or(x, y)", "docstring": "Compute the bit-wise OR of two arrays element-wise.\n\nComputes the bit-wise OR of the underlying binary representation of the\nintegers in the input arrays. This ufunc implements the C/Python operator\n`|`.\n\nArgs:\nx: Input integer tensor.\ny: Input integer tensor.\n\nReturns:\nResult tensor.", "source": "github-repos"}
{"code": "def random(cls, components, width=False, colour=None):\n    try:\n        list_of_Decors = [Decor.random(c) for c in [i[0] for i in components.unique if i[0]]]\n    except:\n        try:\n            list_of_Decors = [Decor.random(c) for c in components.copy()]\n        except:\n            list_of_Decors = [Decor.random(components)]\n    if (colour is not None):\n        for d in list_of_Decors:\n            d.colour = colour\n    if width:\n        for (i, d) in enumerate(list_of_Decors):\n            d.width = (i + 1)\n    return cls(list_of_Decors)", "docstring": "Generate a random legend for a given list of components.\n\nArgs:\ncomponents (list or Striplog): A list of components. If you pass\na Striplog, it will use the primary components. If you pass a\ncomponent on its own, you will get a random Decor.\nwidth (bool): Also generate widths for the components, based on the\norder in which they are encountered.\ncolour (str): If you want to give the Decors all the same colour,\nprovide a hex string.\nReturns:\nLegend or Decor: A legend (or Decor) with random colours.\nTODO:\nIt might be convenient to have a partial method to generate an\n'empty' legend. Might be an easy way for someone to start with a\ntemplate, since it'll have the components in it already.", "source": "codesearchnet"}
{"code": "def getLanguage(self, body, ):\n        \n\n        resourcePath = '/text/detect_language'\n        method = 'POST'\n\n        queryParams = {}\n        headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}\n        postData = None\n\n        postData = body\n        response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)\n        return language_rest.LanguageRest(**response.json())", "docstring": "Detect the language of a text\nArgs:\nbody, str: Your input text (UTF-8) (required)\nReturns: LanguageRest", "source": "juraj-google-style"}
{"code": "def wait_for_tx(self, tx, max_seconds=120):\n    tx_hash = None\n    if isinstance(tx, (str, UInt256)):\n        tx_hash = str(tx)\n    elif isinstance(tx, Transaction):\n        tx_hash = tx.Hash.ToString()\n    else:\n        raise AttributeError((\"Supplied tx is type '%s', but must be Transaction or UInt256 or str\" % type(tx)))\n    wait_event = Event()\n    time_start = time.time()\n    while True:\n        (_tx, height) = Blockchain.Default().GetTransaction(tx_hash)\n        if (height > (- 1)):\n            return True\n        wait_event.wait(3)\n        seconds_passed = (time.time() - time_start)\n        if (seconds_passed > max_seconds):\n            raise TxNotFoundInBlockchainError(('Transaction with hash %s not found after %s seconds' % (tx_hash, int(seconds_passed))))", "docstring": "Wait for tx to show up on blockchain\n\nArgs:\ntx (Transaction or UInt256 or str): Transaction or just the hash\nmax_seconds (float): maximum seconds to wait for tx to show up. default: 120\n\nReturns:\nTrue: if transaction was found\n\nRaises:\nAttributeError: if supplied tx is not Transaction or UInt256 or str\nTxNotFoundInBlockchainError: if tx is not found in blockchain after max_seconds", "source": "codesearchnet"}
{"code": "def fetch_support_file(name, timestamp_tuple):\n    stored_filename = os.path.join(_subpar_package, 'runtime', name)\n    content = pkgutil.get_data(_subpar_package, 'runtime/' + name)\n    if content is None:\n        raise error.Error(\"Internal error: Can't find runtime support file [%s]\" % name)\n    return stored_resource.StoredContent(stored_filename, timestamp_tuple, content)", "docstring": "Read a file from the runtime package\n\nArgs:\nname: filename in runtime package's directory\ntimestamp_tuple: Stored timestamp, as ZipInfo tuple\n\nReturns:\nA StoredResource representing the content of that file", "source": "github-repos"}
{"code": "def add_defaults_to_kwargs(defaults, **kwargs):\n    \n    defaults = dict(defaults)\n    defaults.update(kwargs)\n    return defaults", "docstring": "Updates `kwargs` with dict of `defaults`\n\nArgs:\ndefaults: A dictionary of keys and values\n**kwargs: The kwargs to update.\n\nReturns:\nThe updated kwargs.", "source": "juraj-google-style"}
{"code": "def get_pending_servermanager():\n    vname = 'CurrentRebootAttempts'\n    key = 'SOFTWARE\\\\Microsoft\\\\ServerManager'\n    reg_ret = __utils__['reg.read_value']('HKLM', key, vname)\n    if reg_ret['success']:\n        log.debug('Found key: %s', key)\n        try:\n            if (int(reg_ret['vdata']) > 0):\n                return True\n        except ValueError:\n            pass\n    else:\n        log.debug('Unable to access key: %s', key)\n    return False", "docstring": "Determine whether there are pending Server Manager tasks that require a\nreboot.\n\n.. versionadded:: 2016.11.0\n\nReturns:\nbool: ``True`` if there are pending Server Manager tasks, otherwise\n``False``\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' system.get_pending_servermanager", "source": "codesearchnet"}
{"code": "def __call__(self, kl_fn):\n    if not callable(kl_fn):\n        raise TypeError('kl_fn must be callable, received: %s' % kl_fn)\n    if self._key in _DIVERGENCES:\n        raise ValueError('KL(%s || %s) has already been registered to: %s' % (self._key[0].__name__, self._key[1].__name__, _DIVERGENCES[self._key]))\n    _DIVERGENCES[self._key] = kl_fn\n    return kl_fn", "docstring": "Perform the KL registration.\n\nArgs:\nkl_fn: The function to use for the KL divergence.\n\nReturns:\nkl_fn\n\nRaises:\nTypeError: if kl_fn is not a callable.\nValueError: if a KL divergence function has already been registered for\nthe given argument classes.", "source": "github-repos"}
{"code": "def sine(w, A=1, phi=0, offset=0):\n    from math import sin\n\n    def f(i):\n        return ((A * sin(((w * i) + phi))) + offset)\n    return partial(force, sequence=_advance(f))", "docstring": "Return a driver function that can advance a sequence of sine values.\n\n.. code-block:: none\n\nvalue = A * sin(w*i + phi) + offset\n\nArgs:\nw (float) : a frequency for the sine driver\nA (float) : an amplitude for the sine driver\nphi (float) : a phase offset to start the sine driver with\noffset (float) : a global offset to add to the driver values", "source": "codesearchnet"}
{"code": "def prop(pode, prop):\n    form = pode[0][0]\n    if prop.startswith(form):\n        prop = prop[len(form):]\n    if (prop[0] == ':'):\n        prop = prop[1:]\n    return pode[1]['props'].get(prop)", "docstring": "Return the valu of a given property on the node.\n\nArgs:\npode (tuple): A packed node.\nprop (str): Property to retrieve.\n\nNotes:\nThe prop argument may be the full property name (foo:bar:baz), relative property name (:baz) , or the unadorned\nproperty name (baz).\n\nReturns:", "source": "codesearchnet"}
{"code": "def zip_cluster(data, k, init=None, max_iters=100):\n    (genes, cells) = data.shape\n    (init, new_assignments) = kmeans_pp((data + eps), k, centers=init)\n    centers = np.copy(init)\n    M = np.zeros(centers.shape)\n    assignments = new_assignments\n    for c in range(k):\n        (centers[(:, c)], M[(:, c)]) = zip_fit_params_mle(data[(:, (assignments == c))])\n    for it in range(max_iters):\n        lls = zip_ll(data, centers, M)\n        new_assignments = np.argmax(lls, 1)\n        if np.equal(assignments, new_assignments).all():\n            return (assignments, centers, M)\n        for c in range(k):\n            (centers[(:, c)], M[(:, c)]) = zip_fit_params_mle(data[(:, (assignments == c))])\n        assignments = new_assignments\n    return (assignments, centers, M)", "docstring": "Performs hard EM clustering using the zero-inflated Poisson distribution.\n\nArgs:\ndata (array): A 2d array- genes x cells\nk (int): Number of clusters\ninit (array, optional): Initial centers - genes x k array. Default: None, use kmeans++\nmax_iters (int, optional): Maximum number of iterations. Default: 100\n\nReturns:\nassignments (array): integer assignments of cells to clusters (length cells)\nL (array): Poisson parameter (genes x k)\nM (array): zero-inflation parameter (genes x k)", "source": "codesearchnet"}
{"code": "def get_email_message(self, message_uid, message_type='text/plain'):\n    self._mail.select('inbox')\n    result = self._mail.uid('fetch', message_uid, '(RFC822)')\n    msg = email.message_from_string(result[1][0][1])\n    try:\n        for part in msg.walk():\n            if (part.get_content_type() == message_type):\n                return part.get_payload(decode=True)\n    except:\n        return msg.get_payload(decode=True)", "docstring": "Fetch contents of email.\n\nArgs:\nmessage_uid (int): IMAP Message UID number.\n\nKwargs:\nmessage_type: Can be 'text' or 'html'", "source": "codesearchnet"}
{"code": "def config_updated_since(self, sentry_unit, filename, mtime, sleep_time=20, retry_count=30, retry_sleep_time=10):\n    unit_name = sentry_unit.info['unit_name']\n    self.log.debug(('Checking that %s updated since %s on %s' % (filename, mtime, unit_name)))\n    time.sleep(sleep_time)\n    file_mtime = None\n    tries = 0\n    while ((tries <= retry_count) and (not file_mtime)):\n        try:\n            file_mtime = self._get_file_mtime(sentry_unit, filename)\n            self.log.debug('Attempt {} to get {} file mtime on {} OK'.format(tries, filename, unit_name))\n        except IOError as e:\n            self.log.debug('Attempt {} to get {} file mtime on {} failed\\n{}'.format(tries, filename, unit_name, e))\n            time.sleep(retry_sleep_time)\n            tries += 1\n    if (not file_mtime):\n        self.log.warn('Could not determine file mtime, assuming file does not exist')\n        return False\n    if (file_mtime >= mtime):\n        self.log.debug(('File mtime is newer than provided mtime (%s >= %s) on %s (OK)' % (file_mtime, mtime, unit_name)))\n        return True\n    else:\n        self.log.warn(('File mtime is older than provided mtime(%s < on %s) on %s' % (file_mtime, mtime, unit_name)))\n        return False", "docstring": "Check if file was modified after a given time.\n\nArgs:\nsentry_unit (sentry): The sentry unit to check the file mtime on\nfilename (string): The file to check mtime of\nmtime (float): The epoch time to check against\nsleep_time (int): Initial sleep time (s) before looking for file\nretry_sleep_time (int): Time (s) to sleep between retries\nretry_count (int): If file is not found, how many times to retry\n\nReturns:\nbool: True if file was modified more recently than mtime, False if\nfile was modified before mtime, or if file not found.", "source": "codesearchnet"}
{"code": "def store_checksums(dataset_name, sizes_checksums):\n    path = _get_path(dataset_name)\n    original_data = _get_sizes_checksums(path)\n    new_data = original_data.copy()\n    new_data.update(sizes_checksums)\n    if (original_data == new_data):\n        return\n    with tf.io.gfile.GFile(path, 'w') as f:\n        for (url, (size, checksum)) in sorted(new_data.items()):\n            f.write(('%s %s %s\\n' % (url, size, checksum)))", "docstring": "Store given checksums and sizes for specific dataset.\n\nContent of file is never disgarded, only updated. This is to ensure that if\nprocess is killed right after first download finishes, checksums registered\nduring previous runs aren't lost.\n\nIt is the responsibility of the caller not to call function multiple times in\nparallel for a given dataset.\n\nOnly original file content is updated. This means the entire set of new sizes\nand checksums must be given at every call.\n\nArgs:\ndataset_name: string.\nsizes_checksums: dict, {url: (size_in_bytes, checksum)}.", "source": "codesearchnet"}
{"code": "def index_list_for_sort_order(x: List[Any], key: Callable[([Any], Any)]=None, reverse: bool=False) -> List[int]:\n\n    def key_with_user_func(idx_val: Tuple[(int, Any)]):\n        return key(idx_val[1])\n    if key:\n        sort_key = key_with_user_func\n    else:\n        sort_key = itemgetter(1)\n    index_value_list = sorted(enumerate(x), key=sort_key, reverse=reverse)\n    return [i for (i, _) in index_value_list]", "docstring": "Returns a list of indexes of ``x``, IF ``x`` WERE TO BE SORTED.\n\nArgs:\nx: data\nkey: function to be applied to the data to generate a sort key; this\nfunction is passed as the ``key=`` parameter to :func:`sorted`;\nthe default is ``itemgetter(1)``\nreverse: reverse the sort order?\n\nReturns:\nlist of integer index values\n\nExample:\n\n.. code-block:: python\n\nz = [\"a\", \"c\", \"b\"]\nindex_list_for_sort_order(z)  # [0, 2, 1]\nindex_list_for_sort_order(z, reverse=True)  # [1, 2, 0]\nq = [(\"a\", 9), (\"b\", 8), (\"c\", 7)]\nindex_list_for_sort_order(q, key=itemgetter(1))", "source": "codesearchnet"}
{"code": "def get_substrates(self, material_id, number=50, orient=None):\n    req = '/materials/{}/substrates?n={}'.format(material_id, number)\n    if orient:\n        req += '&orient={}'.format(' '.join(map(str, orient)))\n    return self._make_request(req)", "docstring": "Get a substrate list for a material id. The list is in order of\nincreasing elastic energy if a elastic tensor is available for\nthe material_id. Otherwise the list is in order of increasing\nmatching area.\n\nArgs:\nmaterial_id (str): Materials Project material_id, e.g. 'mp-123'.\norient (list) : substrate orientation to look for\nnumber (int) : number of substrates to return;\nn=0 returns all available matches\nReturns:\nlist of dicts with substrate matches", "source": "codesearchnet"}
{"code": "def MeasureCosts(self, item):\n    op_perf_bytes_list, run_time, step_stats_bytes = tf_cluster.TF_MeasureCosts(item.tf_item, self._tf_cluster, self._generate_timeline)\n    op_perfs = [op_performance_data_pb2.OpPerformance.FromString(op_perf_bytes) for op_perf_bytes in op_perf_bytes_list]\n    return (op_perfs, run_time, step_stats_pb2.StepStats.FromString(step_stats_bytes))", "docstring": "Returns the cost of running the specified item.\n\nArgs:\nitem: The item for which to measure the costs.\nReturns: The triplet op_perfs, runtime, step_stats.", "source": "github-repos"}
{"code": "def frame(data, window_length, hop_length):\n    num_samples = data.shape[0]\n    num_frames = (1 + int(np.floor(((num_samples - window_length) / hop_length))))\n    shape = ((num_frames, window_length) + data.shape[1:])\n    strides = (((data.strides[0] * hop_length),) + data.strides)\n    return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)", "docstring": "Convert array into a sequence of successive possibly overlapping frames.\n\nAn n-dimensional array of shape (num_samples, ...) is converted into an\n(n+1)-D array of shape (num_frames, window_length, ...), where each frame\nstarts hop_length points after the preceding one.\n\nThis is accomplished using stride_tricks, so the original data is not\ncopied.  However, there is no zero-padding, so any incomplete frames at the\nend are not included.\n\nArgs:\ndata: np.array of dimension N >= 1.\nwindow_length: Number of samples in each frame.\nhop_length: Advance (in samples) between each window.\n\nReturns:\n(N+1)-D np.array with as many rows as there are complete frames that can be\nextracted.", "source": "codesearchnet"}
{"code": "def anchored_pairs(self, anchor):\n    pairs = OrderedDict()\n    for term in self.keys:\n        score = self.get_pair(anchor, term)\n        if score:\n            pairs[term] = score\n    return utils.sort_dict(pairs)", "docstring": "Get distances between an anchor term and all other terms.\n\nArgs:\nanchor (str): The anchor term.\n\nReturns:\nOrderedDict: The distances, in descending order.", "source": "codesearchnet"}
{"code": "def get_parent(self):\n    if (not isinstance(self.parent, Expression)):\n        raise FiqlObjectException(('Parent must be of %s not %s' % (Expression, type(self.parent))))\n    return self.parent", "docstring": "Get the parent ``Expression`` for this object.\n\nReturns:\nExpression: The ``Expression`` which contains this object.\n\nRaises:\nFiqlObjectException: Parent is ``None``.", "source": "codesearchnet"}
{"code": "def __init__(self, text, quiet=False):\n    \n    self.__text = text\n    self.reliable = True\n    \n    self.quiet = quiet\n    \n    self.detect(text)", "docstring": "Detector of the language used in `text`.\n\nArgs:\ntext (string): unicode string.", "source": "juraj-google-style"}
{"code": "def result_type(*dtypes):\n    if len(dtypes) == 0:\n        return config.floatx()\n    for dtype in dtypes:\n        if dtype in FLOAT8_TYPES:\n            raise ValueError(f'There is no implicit conversions from float8 dtypes to others. You must cast it internally. Received: {dtypes}')\n    return _lattice_result_type(*(config.floatx() if arg is None else arg for arg in dtypes))", "docstring": "Returns the type from applying the Keras type promotion rules.\n\nIn general, each argument is first parsed by `backend.standardize_dtype`,\nand the resulting dtype is determined by the least upper bound of the type\npromotion lattice.\n\nNote: This function attempts to match the result of `jnp.result_type`.\n\nArgs:\ndtypes: Input dtypes.\n\nReturns:\nThe result dtype.\n\nExamples:\n\n>>> x = keras.ops.ones((1,), dtype=\"bfloat16\")\n>>> keras.backend.result_type(x.dtype, int)\n\"bfloat16\"\n\n>>> x = keras.ops.ones((1,), dtype=\"int32\")\n>>> y = keras.ops.ones((1,), dtype=\"float32\")\n>>> keras.backend.result_type(x.dtype, y.dtype)\n\"float32\"\n\n>>> z= keras.ops.ones((1,), dtype='complex64')\n>>> keras.backend.result_type(z.dtype, int)\n\"float64\"", "source": "github-repos"}
{"code": "def _CheckIsSocket(self, file_entry):\n    if (definitions.FILE_ENTRY_TYPE_SOCKET not in self._file_entry_types):\n        return False\n    return file_entry.IsSocket()", "docstring": "Checks the is_socket find specification.\n\nArgs:\nfile_entry (FileEntry): file entry.\n\nReturns:\nbool: True if the file entry matches the find specification, False if not.", "source": "codesearchnet"}
{"code": "def UpdateNumberOfWarnings(\n      self, number_of_consumed_warnings, number_of_produced_warnings):\n    \n    consumed_warnings_delta = 0\n    if number_of_consumed_warnings is not None:\n      if number_of_consumed_warnings < self.number_of_consumed_warnings:\n        raise ValueError(\n            'Number of consumed warnings smaller than previous update.')\n\n      consumed_warnings_delta = (\n          number_of_consumed_warnings - self.number_of_consumed_warnings)\n\n      self.number_of_consumed_warnings = number_of_consumed_warnings\n      self.number_of_consumed_warnings_delta = consumed_warnings_delta\n\n    produced_warnings_delta = 0\n    if number_of_produced_warnings is not None:\n      if number_of_produced_warnings < self.number_of_produced_warnings:\n        raise ValueError(\n            'Number of produced warnings smaller than previous update.')\n\n      produced_warnings_delta = (\n          number_of_produced_warnings - self.number_of_produced_warnings)\n\n      self.number_of_produced_warnings = number_of_produced_warnings\n      self.number_of_produced_warnings_delta = produced_warnings_delta\n\n    return consumed_warnings_delta > 0 or produced_warnings_delta > 0", "docstring": "Updates the number of warnings.\n\nArgs:\nnumber_of_consumed_warnings (int): total number of warnings consumed by\nthe process.\nnumber_of_produced_warnings (int): total number of warnings produced by\nthe process.\n\nReturns:\nbool: True if either number of warnings has increased.\n\nRaises:\nValueError: if the consumed or produced number of warnings is smaller\nthan the value of the previous update.", "source": "juraj-google-style"}
{"code": "def _build_path(self):\n    if (not self.path):\n        self.path = '/'\n    if self.uri_parameters:\n        self.path = ((self.path + ';') + requote_uri(self.uri_parameters))\n    if self.query:\n        self.path = ((self.path + '?') + self.query)\n    if self.params:\n        try:\n            if self.query:\n                self.path = (self.path + self._dict_to_query(self.params, base_query=True))\n            else:\n                self.path = (self.path + self._dict_to_query(self.params))\n        except AttributeError:\n            self.path = ((self.path + '?') + self.params)\n    self.path = requote_uri(self.path)\n    self.req_url = urlunparse((self.scheme, self.host, (self.path or ''), '', '', ''))", "docstring": "Constructs the actual request URL with accompanying query if any.\n\nReturns:\nNone: But does modify self.path, which contains the final\nrequest path sent to the server.", "source": "codesearchnet"}
{"code": "def ask_question(self, field_name, pattern=NAME_PATTERN, is_required=False,\n                     password=False):\n        \n        input_value = \"\"\n        question = (\"Insert the field using the pattern below:\"\n                    \"\\n{}\\n{}: \".format(pattern[0], field_name))\n\n        while not input_value:\n            input_value = getpass(question) if password else input(question)\n\n            if not (input_value or is_required):\n                break\n\n            if password:\n                confirm_password = getpass('Confirm your password: ')\n                if confirm_password != input_value:\n                    print(\"Password does not match\")\n                    input_value = \"\"\n\n            if not self.valid_attribute(input_value, pattern[1]):\n                error_message = \"The content must fit the pattern: {}\\n\"\n                print(error_message.format(pattern[0]))\n                input_value = \"\"\n\n        return input_value", "docstring": "Ask a question and get the input values.\n\nThis method will validade the input values.\nArgs:\nfield_name(string): Field name used to ask for input value.\npattern(tuple): Pattern to validate the input value.\nis_required(bool): Boolean value if the input value is required.\npassword(bool): Boolean value to get input password with mask.\nReturns:\ninput_value(string): Input value validated.", "source": "juraj-google-style"}
{"code": "def __init__(self, value=None):\n        \n        super(VendorIdentification, self).__init__(\n            value, Tags.VENDOR_IDENTIFICATION)", "docstring": "Construct a VendorIdentification object.\n\nArgs:\nvalue (str): A string describing a KMIP vendor. Optional, defaults\nto None.", "source": "juraj-google-style"}
{"code": "def GetMetaData(self, request):\n    if (request.timeout == 0):\n        raise ValueError(\"Requests library can't handle timeout of 0\")\n    result = requests.request('GET', request.url, headers=request.headers, timeout=request.timeout)\n    result.raise_for_status()\n    if (not result.ok):\n        raise requests.RequestException(response=result)\n    return rdf_cloud.CloudMetadataResponse(label=(request.label or request.url), text=result.text)", "docstring": "Get metadata from local metadata server.\n\nAny failed URL check will fail the whole action since our bios/service\nchecks may not always correctly identify cloud machines. We don't want to\nwait on multiple DNS timeouts.\n\nArgs:\nrequest: CloudMetadataRequest object\nReturns:\nrdf_cloud.CloudMetadataResponse object\nRaises:\nValueError: if request has a timeout of 0. This is a defensive\ncheck (we pass 1.0) because the requests library just times out and it's\nnot obvious why.", "source": "codesearchnet"}
{"code": "def create_with_claims(self, claims):\n    new_kwargs = dict(self._kwargs)\n    new_kwargs.update(claims)\n    result = self.__class__(self._service_account_email, self._signer, scopes=self._scopes, private_key_id=self._private_key_id, client_id=self.client_id, user_agent=self._user_agent, **new_kwargs)\n    result.token_uri = self.token_uri\n    result.revoke_uri = self.revoke_uri\n    result._private_key_pkcs8_pem = self._private_key_pkcs8_pem\n    result._private_key_pkcs12 = self._private_key_pkcs12\n    result._private_key_password = self._private_key_password\n    return result", "docstring": "Create credentials that specify additional claims.\n\nArgs:\nclaims: dict, key-value pairs for claims.\n\nReturns:\nServiceAccountCredentials, a copy of the current service account\ncredentials with updated claims to use when obtaining access\ntokens.", "source": "codesearchnet"}
{"code": "def _BuildFindSpecsFromRegistrySourceKey(self, key_path):\n    \n    find_specs = []\n    for key_path_glob in path_helper.PathHelper.ExpandRecursiveGlobs(\n        key_path, '\\\\'):\n      logger.debug('building find spec from key path glob: {0:s}'.format(\n          key_path_glob))\n\n      key_path_glob_upper = key_path_glob.upper()\n      if key_path_glob_upper.startswith('HKEY_USERS\\\\%%USERS.SID%%'):\n        key_path_glob = 'HKEY_CURRENT_USER{0:s}'.format(key_path_glob[26:])\n\n      find_spec = registry_searcher.FindSpec(key_path_glob=key_path_glob)\n      find_specs.append(find_spec)\n\n    return find_specs", "docstring": "Build find specifications from a Windows Registry source type.\n\nArgs:\nkey_path (str): Windows Registry key path defined by the source.\n\nReturns:\nlist[dfwinreg.FindSpec]: find specifications for the Windows Registry\nsource type.", "source": "juraj-google-style"}
{"code": "def _axis_gather(params, indices, axis):\n    if axis > 1:\n        if not isinstance(params, ragged_tensor.RaggedTensor):\n            params = ragged_tensor.RaggedTensor.from_tensor(params, ragged_rank=1, row_splits_dtype=indices.row_splits.dtype)\n        return params.with_values(_gather(params.values, indices, axis - 1, 0))\n    if indices.shape.rank is None:\n        raise ValueError('rank(indices) must be known statically')\n    assert axis == 1\n    flat_params = _flatten_dims_0_and_1(params)\n    adjustments = _row_starts(params, indices.dtype)\n    adjustments = _increase_rank_to(adjustments, indices.shape.ndims + 1)\n    adjusted_indices = indices + adjustments\n    return _gather(flat_params, adjusted_indices, axis - 1, 0)", "docstring": "Helper that implements ragged gather when axis>0 and batch_dims==0.\n\nArgs:\nparams: The tensor from which to gather values.\nindices: The indices of values to gather.\naxis: The axis in `params` to gather `indices` from.\n\nReturns:\nA potentially ragged tensor.", "source": "github-repos"}
{"code": "def _update_repo(repo_config, store, tags_only):\n    \n    repo_path = store.clone(repo_config['repo'], repo_config['rev'])\n\n    cmd_output('git', 'fetch', cwd=repo_path)\n    tag_cmd = ('git', 'describe', 'origin/master', '--tags')\n    if tags_only:\n        tag_cmd += ('--abbrev=0',)\n    else:\n        tag_cmd += ('--exact',)\n    try:\n        rev = cmd_output(*tag_cmd, cwd=repo_path)[1].strip()\n    except CalledProcessError:\n        tag_cmd = ('git', 'rev-parse', 'origin/master')\n        rev = cmd_output(*tag_cmd, cwd=repo_path)[1].strip()\n\n    \n    if rev == repo_config['rev']:\n        return repo_config\n\n    try:\n        path = store.clone(repo_config['repo'], rev)\n        manifest = load_manifest(os.path.join(path, C.MANIFEST_FILE))\n    except InvalidManifestError as e:\n        raise RepositoryCannotBeUpdatedError(six.text_type(e))\n\n    \n    hooks = {hook['id'] for hook in repo_config['hooks']}\n    hooks_missing = hooks - {hook['id'] for hook in manifest}\n    if hooks_missing:\n        raise RepositoryCannotBeUpdatedError(\n            'Cannot update because the tip of master is missing these hooks:\\n'\n            '{}'.format(', '.join(sorted(hooks_missing))),\n        )\n\n    \n    new_config = repo_config.copy()\n    new_config['rev'] = rev\n    return new_config", "docstring": "Updates a repository to the tip of `master`.  If the repository cannot\nbe updated because a hook that is configured does not exist in `master`,\nthis raises a RepositoryCannotBeUpdatedError\n\nArgs:\nrepo_config - A config for a repository", "source": "juraj-google-style"}
{"code": "def to_string(self, format_, fps=None, **kwargs):\n    fp = io.StringIO()\n    self.to_file(fp, format_, fps=fps, **kwargs)\n    return fp.getvalue()", "docstring": "Get subtitle file as a string.\n\nSee :meth:`SSAFile.save()` for full description.\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def log_every_n(level, msg, n, *args):\n    count = _GetNextLogCountPerToken(_GetFileAndLine())\n    log_if(level, msg, not count % n, *args)", "docstring": "Log 'msg % args' at level 'level' once per 'n' times.\n\nLogs the 1st call, (N+1)st call, (2N+1)st call,  etc.\nNot threadsafe.\n\nArgs:\nlevel: The level at which to log.\nmsg: The message to be logged.\nn: The number of times this should be called before it is logged.\n*args: The args to be substituted into the msg.", "source": "github-repos"}
{"code": "def verify_controller_module(module):\n    \n    required_attributes = ('create', 'destroy', 'MOBLY_CONTROLLER_CONFIG_NAME')\n    for attr in required_attributes:\n        if not hasattr(module, attr):\n            raise signals.ControllerError(\n                'Module %s missing required controller module attribute'\n                ' %s.' % (module.__name__, attr))\n        if not getattr(module, attr):\n            raise signals.ControllerError(\n                'Controller interface %s in %s cannot be null.' %\n                (attr, module.__name__))", "docstring": "Verifies a module object follows the required interface for\ncontrollers.\n\nThe interface is explained in the docstring of\n`base_test.BaseTestClass.register_controller`.\n\nArgs:\nmodule: An object that is a controller module. This is usually\nimported with import statements or loaded by importlib.\n\nRaises:\nControllerError: if the module does not match the Mobly controller\ninterface, or one of the required members is null.", "source": "juraj-google-style"}
{"code": "def _validate_ids(self, resource_ids):\n        \n        for resource_id in resource_ids:\n            if self._id_regex.fullmatch(resource_id) is None:\n                LOGGER.debug('Invalid resource id requested: %s', resource_id)\n                raise _ResponseFailed(self._status.INVALID_ID)", "docstring": "Validates a list of ids, raising a ResponseFailed error if invalid.\n\nArgs:\nresource_id (list of str): The ids to validate\n\nRaises:\nResponseFailed: The id was invalid, and a status of INVALID_ID\nwill be sent with the response.", "source": "juraj-google-style"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_stream = BytearrayStream()\n    if self._cryptographic_parameters:\n        self._cryptographic_parameters.write(local_stream, kmip_version=kmip_version)\n    if self._initialization_vector:\n        self._initialization_vector.write(local_stream, kmip_version=kmip_version)\n    if self._derivation_data:\n        self._derivation_data.write(local_stream, kmip_version=kmip_version)\n    if self._salt:\n        self._salt.write(local_stream, kmip_version=kmip_version)\n    if self._iteration_count:\n        self._iteration_count.write(local_stream, kmip_version=kmip_version)\n    self.length = local_stream.length()\n    super(DerivationParameters, self).write(output_stream, kmip_version=kmip_version)\n    output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the DerivationParameters struct to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"}
{"code": "def __init__(self, channel):\n    \n    self.Capabilities = channel.unary_unary(\n        '/gnmi.gNMI/Capabilities',\n        request_serializer=gnmi__pb2.CapabilityRequest.SerializeToString,\n        response_deserializer=gnmi__pb2.CapabilityResponse.FromString,\n        )\n    self.Get = channel.unary_unary(\n        '/gnmi.gNMI/Get',\n        request_serializer=gnmi__pb2.GetRequest.SerializeToString,\n        response_deserializer=gnmi__pb2.GetResponse.FromString,\n        )\n    self.Set = channel.unary_unary(\n        '/gnmi.gNMI/Set',\n        request_serializer=gnmi__pb2.SetRequest.SerializeToString,\n        response_deserializer=gnmi__pb2.SetResponse.FromString,\n        )\n    self.Subscribe = channel.stream_stream(\n        '/gnmi.gNMI/Subscribe',\n        request_serializer=gnmi__pb2.SubscribeRequest.SerializeToString,\n        response_deserializer=gnmi__pb2.SubscribeResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def __init__(self, sql, module=None):\n    \n    self._sql = sql\n    self._module = module", "docstring": "Initializes the SqlStatement.\n\nArgs:\nsql: a string containing a SQL query with optional variable references.\nmodule: if defined in a %%sql cell, the parent SqlModule object for the SqlStatement.", "source": "juraj-google-style"}
{"code": "def predict_dataset(self, x, **kwargs):\n        \n        printout = kwargs.get(\"printout\", None)\n        pred = []\n        res = []\n        x.columns = [\"A\", \"B\"]\n        for idx, row in x.iterrows():\n            a = scale(row['A'].reshape((len(row['A']), 1)))\n            b = scale(row['B'].reshape((len(row['B']), 1)))\n\n            pred.append(self.predict_proba(a, b, idx=idx))\n\n            if printout is not None:\n                res.append([row['SampleID'], pred[-1]])\n                DataFrame(res, columns=['SampleID', 'Predictions']).to_csv(\n                    printout, index=False)\n        return pred", "docstring": "Generic dataset prediction function.\n\nRuns the score independently on all pairs.\n\nArgs:\nx (pandas.DataFrame): a CEPC format Dataframe.\nkwargs (dict): additional arguments for the algorithms\n\nReturns:\npandas.DataFrame: a Dataframe with the predictions.", "source": "juraj-google-style"}
{"code": "def inspect_network(self, net_id, verbose=None, scope=None):\n        \n        params = {}\n        if verbose is not None:\n            if version_lt(self._version, '1.28'):\n                raise InvalidVersion('verbose was introduced in API 1.28')\n            params['verbose'] = verbose\n        if scope is not None:\n            if version_lt(self._version, '1.31'):\n                raise InvalidVersion('scope was introduced in API 1.31')\n            params['scope'] = scope\n\n        url = self._url(\"/networks/{0}\", net_id)\n        res = self._get(url, params=params)\n        return self._result(res, json=True)", "docstring": "Get detailed information about a network.\n\nArgs:\nnet_id (str): ID of network\nverbose (bool): Show the service details across the cluster in\nswarm mode.\nscope (str): Filter the network by scope (``swarm``, ``global``\nor ``local``).", "source": "juraj-google-style"}
{"code": "def from_string(species_string: str):\n        \n        m = re.search(r\"([A-Z][a-z]*)([0-9.]*)([+\\-]*)(.*)\", species_string)\n        if m:\n            sym = m.group(1)\n            if m.group(2) == \"\" and m.group(3) == \"\":\n                oxi = 0\n            else:\n                oxi = 1 if m.group(2) == \"\" else float(m.group(2))\n                oxi = -oxi if m.group(3) == \"-\" else oxi\n            properties = None\n            if m.group(4):\n                toks = m.group(4).split(\"=\")\n                properties = {toks[0]: float(toks[1])}\n            return DummySpecie(sym, oxi, properties)\n        raise ValueError(\"Invalid DummySpecies String\")", "docstring": "Returns a Dummy from a string representation.\n\nArgs:\nspecies_string (str): A string representation of a dummy\nspecies, e.g., \"X2+\", \"X3+\".\n\nReturns:\nA DummySpecie object.\n\nRaises:\nValueError if species_string cannot be intepreted.", "source": "juraj-google-style"}
{"code": "def path_compute(\n    p: tcod.path.AStar, ox: int, oy: int, dx: int, dy: int\n) -> bool:\n    \n    return bool(lib.TCOD_path_compute(p._path_c, ox, oy, dx, dy))", "docstring": "Find a path from (ox, oy) to (dx, dy).  Return True if path is found.\n\nArgs:\np (AStar): An AStar instance.\nox (int): Starting x position.\noy (int): Starting y position.\ndx (int): Destination x position.\ndy (int): Destination y position.\nReturns:\nbool: True if a valid path was found.  Otherwise False.", "source": "juraj-google-style"}
{"code": "def _example_from_complex_def(self, prop_spec):\n    if ('schema' not in prop_spec):\n        return [{}]\n    elif ('type' not in prop_spec['schema']):\n        definition_name = self.get_definition_name_from_ref(prop_spec['schema']['$ref'])\n        if self.build_one_definition_example(definition_name):\n            return self.definitions_example[definition_name]\n    elif (prop_spec['schema']['type'] == 'array'):\n        if ('items' in prop_spec.keys()):\n            definition_name = self.get_definition_name_from_ref(prop_spec['items']['$ref'])\n        elif ('$ref' in prop_spec['schema']['items']):\n            definition_name = self.get_definition_name_from_ref(prop_spec['schema']['items']['$ref'])\n        else:\n            definition_name = self.get_definition_name_from_ref(prop_spec['schema']['items']['type'])\n            return [definition_name]\n        return [self.definitions_example[definition_name]]\n    else:\n        return self.get_example_from_prop_spec(prop_spec['schema'])", "docstring": "Get an example from a property specification.\n\nIn case there is no \"type\" key in the root of the dictionary.\n\nArgs:\nprop_spec: property specification you want an example of.\n\nReturns:\nAn example.", "source": "codesearchnet"}
{"code": "def as_dict(self):\n    out = {}\n    for prop in self:\n        propval = getattr(self, prop)\n        if hasattr(propval, 'for_json'):\n            out[prop] = propval.for_json()\n        elif isinstance(propval, list):\n            out[prop] = [getattr(x, 'for_json', (lambda : x))() for x in propval]\n        elif isinstance(propval, (ProtocolBase, LiteralValue)):\n            out[prop] = propval.as_dict()\n        elif (propval is not None):\n            out[prop] = propval\n    return out", "docstring": "Return a dictionary containing the current values\nof the object.\n\nReturns:\n(dict): The object represented as a dictionary", "source": "codesearchnet"}
{"code": "def list_documents(self, page_size=None):\n    (parent, _) = self._parent_info()\n    iterator = self._client._firestore_api.list_documents(parent, self.id, page_size=page_size, show_missing=True, metadata=self._client._rpc_metadata)\n    iterator.collection = self\n    iterator.item_to_value = _item_to_document_ref\n    return iterator", "docstring": "List all subdocuments of the current collection.\n\nArgs:\npage_size (Optional[int]]): The maximum number of documents\nin each page of results from this request. Non-positive values\nare ignored. Defaults to a sensible value set by the API.\n\nReturns:\nSequence[~.firestore_v1beta1.collection.DocumentReference]:\niterator of subdocuments of the current collection. If the\ncollection does not exist at the time of `snapshot`, the\niterator will be empty", "source": "codesearchnet"}
{"code": "def __init__(self, manager, obj_cls, _list):\n        \n        self.manager = manager\n        self._obj_cls = obj_cls\n        self._list = _list", "docstring": "Creates an objects list from a GitlabList.\n\nYou should not create objects of this type, but use managers list()\nmethods instead.\n\nArgs:\nmanager: the RESTManager to attach to the objects\nobj_cls: the class of the created objects\n_list: the GitlabList holding the data", "source": "juraj-google-style"}
{"code": "def get_by_resource(self, resource_uri):\n        \n        uri = self.URI + self.RESOURCES_PATH + '/' + resource_uri\n        return self._client.get(id_or_uri=uri)", "docstring": "Gets all the labels for the specified resource\n\nArgs:\nresource_uri: The resource URI\n\nReturns:\ndict: Resource Labels", "source": "juraj-google-style"}
{"code": "def minimum(station_code):\n    temp = None\n    fin = None\n    try:\n        fin = open(('%s/%s' % (env.WEATHER_DATA_PATH, _basename(station_code, 'ddy'))))\n    except IOError:\n        logger.info('File not found')\n        download_extract(_eere_url(station_code))\n        fin = open(('%s/%s' % (env.WEATHER_DATA_PATH, _basename(station_code, 'ddy'))))\n    for line in fin:\n        value = re.search('Max Drybulb=(-?\\\\d+\\\\.\\\\d*)', line)\n        if value:\n            temp = float(value.groups()[0])\n    if (not temp):\n        try:\n            fin = open(('%s/%s' % (env.WEATHER_DATA_PATH, _basename(station_code, 'stat'))))\n            for line in fin:\n                if (line.find('Minimum Dry Bulb') is not (- 1)):\n                    return float(line[37:(- 1)].split('°')[0])\n        except IOError:\n            pass\n    if temp:\n        return temp\n    else:\n        raise Exception('Error: Minimum Temperature not found')", "docstring": "Extreme Minimum Design Temperature for a location.\n\nDegrees in Celcius\n\nArgs:\nstation_code (str): Weather Station Code\n\nReturns:\nfloat degrees Celcius", "source": "codesearchnet"}
{"code": "def _url(self, url=None, parameters=None):\n    uri = (url or self._settings['url'])\n    if (url and self._settings['base_url']):\n        uri = ('%s/%s' % (self._settings['base_url'], url))\n    uri += '.json'\n    if parameters:\n        uri += ('?%s' % urllib.urlencode(parameters))\n    return uri", "docstring": "Build destination URL.\n\nKwargs:\nurl (str): Destination URL\nparameters (dict): Additional GET parameters to append to the URL\n\nReturns:\nstr. URL", "source": "codesearchnet"}
{"code": "def patchify_image(self, image: 'torch.Tensor', patch_size: Optional[Dict[str, int]]=None) -> 'torch.Tensor':\n    requires_backends(self, ['torch'])\n    patch_size = patch_size if patch_size is not None else self.patch_size\n    patch_height, patch_width = (patch_size['height'], patch_size['width'])\n    batch_size, channels, _, _ = image.shape\n    unfolded_along_height = image.unfold(2, patch_height, patch_height)\n    patches = unfolded_along_height.unfold(3, patch_width, patch_width)\n    patches = patches.contiguous()\n    patches = patches.view(batch_size, channels, -1, patch_height, patch_width)\n    patches = patches.permute(0, 2, 3, 4, 1)\n    patches = patches.reshape(batch_size, -1, channels * patch_height * patch_width)\n    return patches", "docstring": "Convert an image into a tensor of patches.\n\nArgs:\nimage (`torch.Tensor`):\nImage to convert. Shape: [batch, channels, height, width]\npatch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`):\nDictionary in the format `{\"height\": int, \"width\": int}` specifying the size of the patches.", "source": "github-repos"}
{"code": "def create_template(self, s, provider_name=None):\n    if (provider_name is None):\n        provider_name = self.supported_providers[0]\n    return template_exception_handler((lambda : self.get_provider(provider_name).create_template(s)), self.error_context)", "docstring": "Creates a template from the given string based on the specified provider or the provider with\nhighest precedence.\n\nArgs:\ns: The string to convert to a template.\nprovider_name: The name of the provider to use to create the template.", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    evt_file = pyevt.file()\n    evt_file.set_ascii_codepage(parser_mediator.codepage)\n\n    try:\n      evt_file.open_file_object(file_object)\n    except IOError as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to open file with error: {0!s}'.format(exception))\n      return\n\n    try:\n      self._ParseRecords(parser_mediator, evt_file)\n    finally:\n      evt_file.close()", "docstring": "Parses a Windows EventLog (EVT) file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.", "source": "juraj-google-style"}
{"code": "async def ban_user(channel, user):\n    \n\n    data = datatools.get_data()\n    server_id = channel.server.id\n\n    try:\n        await client.ban(user)\n    except discord.errors.Forbidden:\n        await client.send_typing(channel)\n        embed = ui_embed.error(channel, \"Ban Error\", \"I do not have the permissions to ban that person.\")\n        await embed.send()\n        return\n\n    \n    if \"warnings\" in data[\"discord\"][\"servers\"][server_id][_data.modulename]:\n        if user.id in data[\"discord\"][\"servers\"][server_id][_data.modulename][\"warnings\"]:\n            data[\"discord\"][\"servers\"][server_id][_data.modulename][\"warnings\"][user.id] = 0\n            datatools.write_data(data)\n\n    await client.send_typing(channel)\n    embed = ui_embed.user_ban(channel, user)\n    await embed.send()\n\n    try:\n        response = \"You have been banned from the server '{}' \" \\\n                   \"contact the owners to resolve this issue.\".format(channel.server.name)\n        await client.send_message(user, response)\n    except Exception as e:\n        logger.exception(e)", "docstring": "Bans a user from a server\n\nArgs:\nchannel: The channel to send the warning message in\nuser: The user to give the warning to", "source": "juraj-google-style"}
{"code": "def add_prefix(self, name, *args, **kwargs):\n        \n        if os.path.exists(self.join(name)):\n            raise LagoPrefixAlreadyExistsError(name, self.path)\n\n        self.prefixes[name] = self.prefix_class(\n            self.join(name), *args, **kwargs\n        )\n        self.prefixes[name].initialize()\n        if self.current is None:\n            self.set_current(name)\n\n        return self.prefixes[name]", "docstring": "Adds a new prefix to the workdir.\n\nArgs:\nname(str): Name of the new prefix to add\n*args: args to pass along to the prefix constructor\n*kwargs: kwargs to pass along to the prefix constructor\n\nReturns:\nThe newly created prefix\n\nRaises:\nLagoPrefixAlreadyExistsError: if prefix name already exists in the\nworkdir", "source": "juraj-google-style"}
{"code": "def collective_dr_squared( self ):\n        \n        return sum( np.square( sum( [ atom.dr for atom in self.atoms ] ) ) )", "docstring": "Squared sum of total displacements for these atoms.\n\nArgs:\nNone\n\nReturns:\n(Float): The square of the summed total displacements for these atoms.", "source": "juraj-google-style"}
{"code": "def dedent(self, node, dirty=True):\n    if (node.id not in self._subitems):\n        return\n    del self._subitems[node.id]\n    node.super_list_item_id = None\n    node.parent_item = None\n    if dirty:\n        node.touch(True)", "docstring": "Dedent an item. Does nothing if the target is not indented under this item.\n\nArgs:\nnode (gkeepapi.node.ListItem): Item to dedent.\ndirty (bool): Whether this node should be marked dirty.", "source": "codesearchnet"}
{"code": "def _get_data(filenames):\n    \n    if filenames:\n        data = \"\"\n        for filename in filenames:\n            with open(filename, \"rb\") as f:\n                data += f.read()\n    else:\n        data = sys.stdin.read()\n\n    return data", "docstring": "Read data from file(s) or STDIN.\n\nArgs:\nfilenames (list): List of files to read to get data. If empty or\nNone, read from STDIN.", "source": "juraj-google-style"}
{"code": "def fit(self, train_events, test_events, n_epoch=1):\n        \n        \n        for e in train_events:\n            self.__validate(e)\n            self.rec.users[e.user.index]['known_items'].add(e.item.index)\n            self.item_buffer.append(e.item.index)\n\n        \n        for e in test_events:\n            self.__validate(e)\n            self.item_buffer.append(e.item.index)\n\n        self.__batch_update(train_events, test_events, n_epoch)\n\n        \n        \n        for e in test_events:\n            self.rec.users[e.user.index]['known_items'].add(e.item.index)\n            self.rec.update(e)", "docstring": "Train a model using the first 30% positive events to avoid cold-start.\n\nEvaluation of this batch training is done by using the next 20% positive events.\nAfter the batch SGD training, the models are incrementally updated by using the 20% test events.\n\nArgs:\ntrain_events (list of Event): Positive training events (0-30%).\ntest_events (list of Event): Test events (30-50%).\nn_epoch (int): Number of epochs for the batch training.", "source": "juraj-google-style"}
{"code": "def create_position_ids_from_input_ids(input_ids: tf.Tensor, padding_idx: int, past_key_values_length: Optional[int]=0) -> tf.Tensor:\n    mask = tf.cast(tf.math.not_equal(input_ids, padding_idx), dtype=tf.int32)\n    incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask\n    return tf.cast(incremental_indices, dtype=tf.int64) + padding_idx", "docstring": "Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding\nsymbols are ignored. This is modified from fairseq's `utils.make_positions`.\n\nArgs:\nx: tf.Tensor x:\nReturns: tf.Tensor", "source": "github-repos"}
{"code": "def _FormatMessages(self, format_string, short_format_string, event_values):\n    message_string = self._FormatMessage(format_string, event_values)\n    if short_format_string:\n        short_message_string = self._FormatMessage(short_format_string, event_values)\n    else:\n        short_message_string = message_string\n    if (len(short_message_string) > 80):\n        short_message_string = '{0:s}...'.format(short_message_string[:77])\n    return (message_string, short_message_string)", "docstring": "Determines the formatted message strings.\n\nArgs:\nformat_string (str): message format string.\nshort_format_string (str): short message format string.\nevent_values (dict[str, object]): event values.\n\nReturns:\ntuple(str, str): formatted message string and short message string.", "source": "codesearchnet"}
{"code": "def object_download(self, bucket, key, start_offset=0, byte_count=None):\n    \n    args = {'alt': 'media'}\n    headers = {}\n    if start_offset > 0 or byte_count is not None:\n      header = 'bytes=%d-' % start_offset\n      if byte_count is not None:\n        header += '%d' % byte_count\n      headers['Range'] = header\n    url = Api._DOWNLOAD_ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key)))\n    return google.datalab.utils.Http.request(url, args=args, headers=headers,\n                                             credentials=self._credentials, raw_response=True)", "docstring": "Reads the contents of an object as text.\n\nArgs:\nbucket: the name of the bucket containing the object.\nkey: the key of the object to be read.\nstart_offset: the start offset of bytes to read.\nbyte_count: the number of bytes to read. If None, it reads to the end.\nReturns:\nThe text content within the object.\nRaises:\nException if the object could not be read from.", "source": "juraj-google-style"}
{"code": "def dayname(year, month, day):\n    legal_date(year, month, day)\n    yearday = (((month - 1) * 28) + day)\n    if isleap(((year + YEAR_EPOCH) - 1)):\n        dname = data.day_names_leap[(yearday - 1)]\n    else:\n        dname = data.day_names[(yearday - 1)]\n    return (MONTHS[(month - 1)], dname)", "docstring": "Give the name of the month and day for a given date.\n\nReturns:\ntuple month_name, day_name", "source": "codesearchnet"}
{"code": "def path_get_origin(p: tcod.path.AStar) -> Tuple[int, int]:\n    \n    x = ffi.new(\"int *\")\n    y = ffi.new(\"int *\")\n    lib.TCOD_path_get_origin(p._path_c, x, y)\n    return x[0], y[0]", "docstring": "Get the current origin position.\n\nThis point moves when :any:`path_walk` returns the next x,y step.\n\nArgs:\np (AStar): An AStar instance.\nReturns:\nTuple[int, int]: An (x, y) point.", "source": "juraj-google-style"}
{"code": "def get_storage_account(access_token, subscription_id, rgname, account_name):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.Storage/storageAccounts/', account_name, '?api-version=', STORAGE_API])\n    return do_get(endpoint, access_token)", "docstring": "Get the properties for the named storage account.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\naccount_name (str): Name of the new storage account.\n\nReturns:\nHTTP response. JSON body of storage account properties.", "source": "codesearchnet"}
{"code": "def info(self, user_id):\n    resp = self._rtm_client.get('v1/user.info?user_id={}'.format(user_id))\n    if resp.is_fail():\n        raise RTMServiceError('Failed to get user information', resp)\n    return resp.data['result']", "docstring": "Gets user information by user id\n\nArgs:\nuser_id(int): the id of user\n\nReturns:\nUser\n\nThrows:\nRTMServiceError when request failed", "source": "codesearchnet"}
{"code": "def safe_logit(p: Union[float, int]) -> Optional[float]:\n    r\n    if p > 1 or p < 0:\n        return None  \n    if p == 1:\n        return float(\"inf\")\n    if p == 0:\n        return float(\"-inf\")\n    return math.log(p / (1 - p))", "docstring": "r\"\"\"\nReturns the logit (log odds) of its input probability\n\n.. math::\n\n\\alpha = logit(p) = log(x / (1 - x))\n\nArgs:\np: :math:`p`\n\nReturns:\n:math:`\\alpha`, or ``None`` if ``x`` is not in the range [0, 1].", "source": "juraj-google-style"}
{"code": "def __init__(self, identifier=None, session_identifier=None):\n    \n    super(TaskStart, self).__init__()\n    self.identifier = identifier\n    self.session_identifier = session_identifier\n    self.timestamp = None", "docstring": "Initializes a task start attribute container.\n\nArgs:\nidentifier (Optional[str]): unique identifier of the task.\nThe identifier should match that of the corresponding\ntask completion information.\nsession_identifier (Optional[str]): identifier of the session the task\nis part of.", "source": "juraj-google-style"}
{"code": "def get_strip_metadata(self, catID):\n    self.logger.debug('Retrieving strip catalog metadata')\n    url = ('%(base_url)s/record/%(catID)s?includeRelationships=false' % {'base_url': self.base_url, 'catID': catID})\n    r = self.gbdx_connection.get(url)\n    if (r.status_code == 200):\n        return r.json()['properties']\n    elif (r.status_code == 404):\n        self.logger.debug(('Strip not found: %s' % catID))\n        r.raise_for_status()\n    else:\n        self.logger.debug(('There was a problem retrieving catid: %s' % catID))\n        r.raise_for_status()", "docstring": "Retrieves the strip catalog metadata given a cat ID.\n\nArgs:\ncatID (str): The source catalog ID from the platform catalog.\n\nReturns:\nmetadata (dict): A metadata dictionary .\n\nTODO: have this return a class object with interesting information exposed.", "source": "codesearchnet"}
{"code": "def convert_to_tensor_or_sparse_tensor(value, dtype=None, name=None):\n    if dtype is not None:\n        dtype = dtypes.as_dtype(dtype)\n    if isinstance(value, SparseTensorValue):\n        value = SparseTensor.from_value(value)\n    if isinstance(value, SparseTensor):\n        if dtype and (not dtype.is_compatible_with(value.dtype)):\n            raise RuntimeError(f'Sparse dtype mismatch. Requested: {dtype.name},  Actual: {value.dtype.name}')\n        return value\n    return ops.convert_to_tensor(value, dtype=dtype, name=name)", "docstring": "Converts value to a `SparseTensor` or `Tensor`.\n\nArgs:\nvalue: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a\nregistered `Tensor` conversion function.\ndtype: Optional element type for the returned tensor. If missing, the type\nis inferred from the type of `value`.\nname: Optional name to use if a new `Tensor` is created.\n\nReturns:\nA `SparseTensor` or `Tensor` based on `value`.\n\nRaises:\nRuntimeError: If result type is incompatible with `dtype`.", "source": "github-repos"}
{"code": "def seat_slot(self):\n    if (self.type == EventType.TOUCH_FRAME):\n        raise AttributeError(_wrong_prop.format(self.type))\n    return self._libinput.libinput_event_touch_get_seat_slot(self._handle)", "docstring": "The seat slot of the touch event.\n\nA seat slot is a non-negative seat wide unique identifier of an active\ntouch point.\n\nEvents from single touch devices will be represented as one individual\ntouch point per device.\n\nFor events not of type :attr:`~libinput.constant.EventType.TOUCH_DOWN`,\n:attr:`~libinput.constant.EventType.TOUCH_UP`,\n:attr:`~libinput.constant.EventType.TOUCH_MOTION` or\n:attr:`~libinput.constant.EventType.TOUCH_CANCEL`, this property\nraises :exc:`AssertionError`.\n\nReturns:\nint: The seat slot of the touch event.\nRaises:\nAssertionError", "source": "codesearchnet"}
{"code": "def reboot(self, target_mode=None, timeout_ms=None):\n    \n    return self._simple_command('reboot', arg=target_mode,\n                                timeout_ms=timeout_ms)", "docstring": "Reboots the device.\n\nArgs:\ntarget_mode: Normal reboot when unspecified (or None). Can specify\nother target modes, such as 'recovery' or 'bootloader'.\ntimeout_ms: Optional timeout in milliseconds to wait for a response.\nReturns:\nUsually the empty string. Depends on the bootloader and the target_mode.", "source": "juraj-google-style"}
{"code": "def export_mt_variants(variants, sample_id):\n    document_lines = []\n    for variant in variants:\n        line = []\n        position = variant.get('position')\n        change = '>'.join([variant.get('reference'), variant.get('alternative')])\n        line.append(position)\n        line.append(change)\n        line.append((str(position) + change))\n        genes = []\n        prot_effect = []\n        for gene in variant.get('genes'):\n            genes.append(gene.get('hgnc_symbol', ''))\n            for transcript in gene.get('transcripts'):\n                if (transcript.get('is_canonical') and transcript.get('protein_sequence_name')):\n                    prot_effect.append(urllib.parse.unquote(transcript.get('protein_sequence_name')))\n        line.append(','.join(prot_effect))\n        line.append(','.join(genes))\n        ref_ad = ''\n        alt_ad = ''\n        for sample in variant['samples']:\n            if (sample.get('sample_id') == sample_id):\n                ref_ad = sample['allele_depths'][0]\n                alt_ad = sample['allele_depths'][1]\n        line.append(ref_ad)\n        line.append(alt_ad)\n        document_lines.append(line)\n    return document_lines", "docstring": "Export mitochondrial variants for a case to create a MT excel report\n\nArgs:\nvariants(list): all MT variants for a case, sorted by position\nsample_id(str) : the id of a sample within the case\n\nReturns:\ndocument_lines(list): list of lines to include in the document", "source": "codesearchnet"}
{"code": "def _get_connection(self, cluster):\n    if ('connection' not in cluster):\n        cluster['connection'] = self._connection_class(socketTimeoutMS=self._network_timeout, w=1, j=self.j, **cluster['params'])\n    return cluster['connection']", "docstring": "Return a connection to a Cluster.\n\nReturn a MongoClient or a MongoReplicaSetClient for the given Cluster.\nThis is done in a lazy manner (if there is already a Client connected to\nthe Cluster, it is returned and no other Client is created).\n\nArgs:\ncluster: A dict containing information about a cluster.\n\nReturns:\nA MongoClient or MongoReplicaSetClient instance connected to the\ndesired cluster", "source": "codesearchnet"}
{"code": "def graph_structure(self, x1x2):\n        \n        with argscope([tf.layers.conv2d], activation=lambda x: tf.nn.leaky_relu(x, 0.1),\n                      padding='valid', strides=2, kernel_size=3,\n                      data_format='channels_first'), \\\n            argscope([tf.layers.conv2d_transpose], padding='same', activation=tf.identity,\n                     data_format='channels_first', strides=2, kernel_size=4):\n\n            \n            x = tf.layers.conv2d(pad(x1x2, 3), 64, kernel_size=7, name='conv1')\n            conv2 = tf.layers.conv2d(pad(x, 2), 128, kernel_size=5, name='conv2')\n            conv3 = tf.layers.conv2d(pad(conv2, 2), 256, kernel_size=5, name='conv3')\n\n            conv2a, _ = tf.split(conv2, 2, axis=0)\n            conv3a, conv3b = tf.split(conv3, 2, axis=0)\n\n            corr = correlation(conv3a, conv3b,\n                               kernel_size=1,\n                               max_displacement=20,\n                               stride_1=1,\n                               stride_2=2,\n                               pad=20, data_format='NCHW')\n            corr = tf.nn.leaky_relu(corr, 0.1)\n\n            conv_redir = tf.layers.conv2d(conv3a, 32, kernel_size=1, strides=1, name='conv_redir')\n\n            in_conv3_1 = tf.concat([conv_redir, corr], axis=1, name='in_conv3_1')\n            conv3_1 = tf.layers.conv2d(pad(in_conv3_1, 1), 256, name='conv3_1', strides=1)\n\n            x = tf.layers.conv2d(pad(conv3_1, 1), 512, name='conv4')\n            conv4 = tf.layers.conv2d(pad(x, 1), 512, name='conv4_1', strides=1)\n            x = tf.layers.conv2d(pad(conv4, 1), 512, name='conv5')\n            conv5 = tf.layers.conv2d(pad(x, 1), 512, name='conv5_1', strides=1)\n            x = tf.layers.conv2d(pad(conv5, 1), 1024, name='conv6')\n            conv6 = tf.layers.conv2d(pad(x, 1), 1024, name='conv6_1', strides=1)\n\n            flow6 = tf.layers.conv2d(pad(conv6, 1), 2, name='predict_flow6', strides=1, activation=tf.identity)\n            flow6_up = tf.layers.conv2d_transpose(flow6, 2, name='upsampled_flow6_to_5')\n            x = tf.layers.conv2d_transpose(conv6, 512, name='deconv5', activation=lambda x: tf.nn.leaky_relu(x, 0.1))\n\n            \n            concat5 = tf.concat([conv5, x, flow6_up], axis=1, name='concat5')\n            flow5 = tf.layers.conv2d(pad(concat5, 1), 2, name='predict_flow5', strides=1, activation=tf.identity)\n            flow5_up = tf.layers.conv2d_transpose(flow5, 2, name='upsampled_flow5_to_4')\n            x = tf.layers.conv2d_transpose(concat5, 256, name='deconv4', activation=lambda x: tf.nn.leaky_relu(x, 0.1))\n\n            concat4 = tf.concat([conv4, x, flow5_up], axis=1, name='concat4')\n            flow4 = tf.layers.conv2d(pad(concat4, 1), 2, name='predict_flow4', strides=1, activation=tf.identity)\n            flow4_up = tf.layers.conv2d_transpose(flow4, 2, name='upsampled_flow4_to_3')\n            x = tf.layers.conv2d_transpose(concat4, 128, name='deconv3', activation=lambda x: tf.nn.leaky_relu(x, 0.1))\n\n            concat3 = tf.concat([conv3_1, x, flow4_up], axis=1, name='concat3')\n            flow3 = tf.layers.conv2d(pad(concat3, 1), 2, name='predict_flow3', strides=1, activation=tf.identity)\n            flow3_up = tf.layers.conv2d_transpose(flow3, 2, name='upsampled_flow3_to_2')\n            x = tf.layers.conv2d_transpose(concat3, 64, name='deconv2', activation=lambda x: tf.nn.leaky_relu(x, 0.1))\n\n            concat2 = tf.concat([conv2a, x, flow3_up], axis=1, name='concat2')\n            flow2 = tf.layers.conv2d(pad(concat2, 1), 2, name='predict_flow2', strides=1, activation=tf.identity)\n\n            return tf.identity(flow2, name='flow2')", "docstring": "Architecture of FlowNetCorr in Figure 2 of FlowNet 1.0.\nArgs:\nx: 2CHW.", "source": "juraj-google-style"}
{"code": "def get_flat_tensors_for_gradients(xs):\n    return nest.flatten([_get_tensors_for_gradient(x) for x in xs])", "docstring": "Returns a flat list of Tensors that should be differentiated for `xs`.\n\nArgs:\nxs: A list of `Tensor`s or `CompositeTensor`s.\n\nReturns:\nA flat list of `Tensor`s constructed from `xs`, where `Tensor` values are\nleft as-is, and `CompositeTensor`s are replaced with\n`_get_tensors_for_gradient(x)`.", "source": "github-repos"}
{"code": "def _parse_directory(self):\n    if self._parser.has_option('storage', 'directory'):\n        directory = self._parser.get('storage', 'directory')\n        if (directory == CUSTOM_APPS_DIR):\n            raise ConfigError('{} cannot be used as a storage directory.'.format(CUSTOM_APPS_DIR))\n    else:\n        directory = MACKUP_BACKUP_PATH\n    return str(directory)", "docstring": "Parse the storage directory in the config.\n\nReturns:\nstr", "source": "codesearchnet"}
{"code": "def _head_object(s3_conn, bucket, key):\n    try:\n        return s3_conn.head_object(Bucket=bucket, Key=key)\n    except botocore.exceptions.ClientError as e:\n        if (e.response['Error']['Code'] == '404'):\n            return None\n        else:\n            raise", "docstring": "Retrieve information about an object in S3 if it exists.\n\nArgs:\ns3_conn (botocore.client.S3): S3 connection to use for operations.\nbucket (str): name of the bucket containing the key.\nkey (str): name of the key to lookup.\n\nReturns:\ndict: S3 object information, or None if the object does not exist.\nSee the AWS documentation for explanation of the contents.\n\nRaises:\nbotocore.exceptions.ClientError: any error from boto3 other than key\nnot found is passed through.", "source": "codesearchnet"}
{"code": "def delete_nsg(access_token, subscription_id, resource_group, nsg_name):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/resourceGroups/', resource_group,\n                        '/providers/Microsoft.Network/networkSecurityGroups/', nsg_name,\n                        '?api-version=', NETWORK_API])\n    return do_delete(endpoint, access_token)", "docstring": "Delete network security group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nnsg_name (str): Name of the NSG.\n\nReturns:\nHTTP response.", "source": "juraj-google-style"}
{"code": "def check_for_replay(name, names_to_seq_id, msg, config, context=None):\n    prev_seq_id = names_to_seq_id.get(name, None)\n    cur_seq_id = msg.get('seq_id', None)\n    if ((prev_seq_id is None) or (cur_seq_id is None)):\n        return [msg]\n    if (cur_seq_id <= prev_seq_id):\n        return []\n    if ((cur_seq_id == (prev_seq_id + 1)) or (prev_seq_id < 0)):\n        ret = [msg]\n    else:\n        ret = list(get_replay(name, {'seq_id_range': (prev_seq_id, cur_seq_id)}, config, context))\n        if ((len(ret) == 0) or (ret[(- 1)]['seq_id'] < msg['seq_id'])):\n            ret.append(msg)\n    names_to_seq_id[name] = cur_seq_id\n    return ret", "docstring": "Check to see if messages need to be replayed.\n\nArgs:\nname (str): The consumer's name.\nnames_to_seq_id (dict): A dictionary that maps names to the last seen sequence ID.\nmsg (dict): The latest message that has arrived.\nconfig (dict): A configuration dictionary. This dictionary should contain, at a\nminimum, two keys. The first key, 'replay_endpoints', should be a dictionary\nthat maps ``name`` to a ZeroMQ socket. The second key, 'io_threads', is an\ninteger used to initialize the ZeroMQ context.\ncontext (zmq.Context): The ZeroMQ context to use. If a context is not provided,\none will be created.\n\nReturns:\nlist: A list of message dictionaries.", "source": "codesearchnet"}
{"code": "def organize_models(self, outdir, force_rerun=False):\n        \n        uniprot_to_swissmodel = defaultdict(list)\n        for u, models in self.all_models.items():\n            for m in models:\n                original_filename = '{}_{}_{}_{}'.format(m['from'], m['to'], m['template'], m['coordinate_id'])\n                file_path = op.join(self.metadata_dir,\n                                    u[:2], u[2:4], u[4:], 'swissmodel',\n                                    '{}.pdb'.format(original_filename))\n                if op.exists(file_path):\n                    new_filename = '{}_{}_{}_{}.pdb'.format(u, m['from'], m['to'], m['template'][:4])\n                    shutil.copy(file_path, op.join(outdir, new_filename))\n                    uniprot_to_swissmodel[u].append(new_filename)\n                else:\n                    log.warning('{}: no file {} found for model'.format(u, file_path))\n\n        return uniprot_to_swissmodel", "docstring": "Organize and rename SWISS-MODEL models to a single folder with a name containing template information.\n\nArgs:\noutdir (str): New directory to copy renamed models to\nforce_rerun (bool): If models should be copied again even if they already exist\n\nReturns:\ndict: Dictionary of lists, UniProt IDs as the keys and new file paths as the values", "source": "juraj-google-style"}
{"code": "def build_model(self, token_encoder_model, sentence_encoder_model, trainable_embeddings=True, output_activation='softmax'):\n    if (not isinstance(token_encoder_model, SequenceEncoderBase)):\n        raise ValueError('`token_encoder_model` should be an instance of `{}`'.format(SequenceEncoderBase))\n    if (not isinstance(sentence_encoder_model, SequenceEncoderBase)):\n        raise ValueError('`sentence_encoder_model` should be an instance of `{}`'.format(SequenceEncoderBase))\n    if ((not sentence_encoder_model.allows_dynamic_length()) and (self.max_sents is None)):\n        raise ValueError(\"Sentence encoder model '{}' requires padding. You need to provide `max_sents`\")\n    if (self.embeddings_index is None):\n        embedding_layer = Embedding(len(self.token_index), self.embedding_dims, input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings)\n    else:\n        embedding_layer = Embedding(len(self.token_index), self.embedding_dims, weights=[build_embedding_weights(self.token_index, self.embeddings_index)], input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings)\n    word_input = Input(shape=(self.max_tokens,), dtype='int32')\n    x = embedding_layer(word_input)\n    word_encoding = token_encoder_model(x)\n    token_encoder_model = Model(word_input, word_encoding, name='word_encoder')\n    doc_input = Input(shape=(self.max_sents, self.max_tokens), dtype='int32')\n    sent_encoding = TimeDistributed(token_encoder_model)(doc_input)\n    x = sentence_encoder_model(sent_encoding)\n    x = Dense(self.num_classes, activation=output_activation)(x)\n    return Model(doc_input, x)", "docstring": "Builds a model that first encodes all words within sentences using `token_encoder_model`, followed by\n`sentence_encoder_model`.\n\nArgs:\ntoken_encoder_model: An instance of `SequenceEncoderBase` for encoding tokens within sentences. This model\nwill be applied across all sentences to create a sentence encoding.\nsentence_encoder_model: An instance of `SequenceEncoderBase` operating on sentence encoding generated by\n`token_encoder_model`. This encoding is then fed into a final `Dense` layer for classification.\ntrainable_embeddings: Whether or not to fine tune embeddings.\noutput_activation: The output activation to use. (Default value: 'softmax')\nUse:\n- `softmax` for binary or multi-class.\n- `sigmoid` for multi-label classification.\n- `linear` for regression output.\n\nReturns:\nThe model output tensor.", "source": "codesearchnet"}
{"code": "def RegisterHasher(cls, hasher_class):\n    hasher_name = hasher_class.NAME.lower()\n    if (hasher_name in cls._hasher_classes):\n        raise KeyError('hasher class already set for name: {0:s}.'.format(hasher_class.NAME))\n    cls._hasher_classes[hasher_name] = hasher_class", "docstring": "Registers a hasher class.\n\nThe hasher classes are identified based on their lower case name.\n\nArgs:\nhasher_class (type): class object of the hasher.\n\nRaises:\nKeyError: if hasher class is already set for the corresponding name.", "source": "codesearchnet"}
{"code": "def Execute(self, message):\n    \n    self.message = message\n    if message:\n      self.require_fastpoll = message.require_fastpoll\n\n    args = None\n    try:\n      if self.message.args_rdf_name:\n        if not self.in_rdfvalue:\n          raise RuntimeError(\"Did not expect arguments, got %s.\" %\n                             self.message.args_rdf_name)\n\n        if self.in_rdfvalue.__name__ != self.message.args_rdf_name:\n          raise RuntimeError(\n              \"Unexpected arg type %s != %s.\" %\n              (self.message.args_rdf_name, self.in_rdfvalue.__name__))\n\n        args = self.message.payload\n\n      \n      if self._authentication_required and (\n          self.message.auth_state !=\n          rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED):\n        raise RuntimeError(\"Message for %s was not Authenticated.\" %\n                           self.message.name)\n\n      self.cpu_start = self.proc.cpu_times()\n      self.cpu_limit = self.message.cpu_limit\n\n      if getattr(flags.FLAGS, \"debug_client_actions\", False):\n        pdb.set_trace()\n\n      try:\n        self.Run(args)\n\n      \n      finally:\n        used = self.proc.cpu_times()\n        self.cpu_used = (used.user - self.cpu_start.user,\n                         used.system - self.cpu_start.system)\n\n    except NetworkBytesExceededError as e:\n      self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.NETWORK_LIMIT_EXCEEDED,\n                     \"%r: %s\" % (e, e), traceback.format_exc())\n\n    \n    \n    except Exception as e:  \n      self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR,\n                     \"%r: %s\" % (e, e), traceback.format_exc())\n\n      if flags.FLAGS.pdb_post_mortem:\n        self.DisableNanny()\n        pdb.post_mortem()\n\n    if self.status.status != rdf_flows.GrrStatus.ReturnedStatus.OK:\n      logging.info(\"Job Error (%s): %s\", self.__class__.__name__,\n                   self.status.error_message)\n\n      if self.status.backtrace:\n        logging.debug(self.status.backtrace)\n\n    if self.cpu_used:\n      self.status.cpu_time_used.user_cpu_time = self.cpu_used[0]\n      self.status.cpu_time_used.system_cpu_time = self.cpu_used[1]\n\n    \n    self.SendReply(self.status, message_type=rdf_flows.GrrMessage.Type.STATUS)\n\n    self._RunGC()", "docstring": "This function parses the RDFValue from the server.\n\nThe Run method will be called with the specified RDFValue.\n\nArgs:\nmessage:     The GrrMessage that we are called to process.\n\nReturns:\nUpon return a callback will be called on the server to register\nthe end of the function and pass back exceptions.\nRaises:\nRuntimeError: The arguments from the server do not match the expected\nrdf type.", "source": "juraj-google-style"}
{"code": "def get_dsub_version():\n    filename = os.path.join(os.path.dirname(__file__), 'dsub/_dsub_version.py')\n    with open(filename, 'r') as versionfile:\n        for line in versionfile:\n            if line.startswith('DSUB_VERSION ='):\n                version = line.partition('=')[2]\n                return version.strip().strip('\\'\"')\n    raise ValueError('Could not find version.')", "docstring": "Get the dsub version out of the _dsub_version.py source file.\n\nSetup.py should not import dsub version from dsub directly since ambiguity in\nimport order could lead to an old version of dsub setting the version number.\nParsing the file directly is simpler than using import tools (whose interface\nvaries between python 2.7, 3.4, and 3.5).\n\nReturns:\nstring of dsub version.\n\nRaises:\nValueError: if the version is not found.", "source": "codesearchnet"}
{"code": "def variable_product_dict(variabledict: dict[str, cfg.Variable], limit: int=DEEP_VARIABLE_LIMIT):\n    return [dict(d) for d in _variable_product_items(variabledict.items(), ComplexityLimit(limit))]", "docstring": "Take the Cartesian product of variables in the values of a dict.\n\nThis Cartesian product is taken using the dict keys as the indices into the\ninput and output dicts. So:\nvariable_product_dict({\"x\": Variable(a, b), \"y\": Variable(c, d)})\n==\n[{\"x\": a, \"y\": c}, {\"x\": a, \"y\": d}, {\"x\": b, \"y\": c}, {\"x\": b, \"y\": d}]\nThis is exactly analogous to a traditional Cartesian product except that\ninstead of trying each possible value of a numbered position, we are trying\neach possible value of a named position.\n\nArgs:\nvariabledict: A dict with variable values.\nlimit: How many results to allow before aborting.\n\nReturns:\nA list of dicts with Value values.", "source": "github-repos"}
{"code": "def __init__(self, resolver_context):\n    \n    super(SQLiteBlobFileSystem, self).__init__(resolver_context)\n    self._file_object = None\n    self._number_of_rows = None", "docstring": "Initializes a file system.\n\nArgs:\nresolver_context (Context): resolver context.", "source": "juraj-google-style"}
{"code": "def list_storage_accounts_rg(access_token, subscription_id, rgname):\n    endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.Storage/storageAccounts', '?api-version=', STORAGE_API])\n    return do_get(endpoint, access_token)", "docstring": "List the storage accounts in the specified resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\n\nReturns:\nHTTP response. JSON body list of storage accounts.", "source": "codesearchnet"}
{"code": "def encode(self, sequence):\n    sequence = super().encode(sequence)\n    sequence = self.tokenize(sequence)\n    vector = [self.stoi.get(token, self.unknown_index) for token in sequence]\n    if self.append_eos:\n        vector.append(self.eos_index)\n    return torch.tensor(vector)", "docstring": "Encodes a ``sequence``.\n\nArgs:\nsequence (str): String ``sequence`` to encode.\n\nReturns:\ntorch.Tensor: Encoding of the ``sequence``.", "source": "codesearchnet"}
{"code": "def get_object_id_from_graph(access_token=None):\n    \n    if access_token is None:\n        access_token = get_graph_token_from_msi()\n\n    endpoint = 'https:\n    headers = {'Authorization': 'Bearer ' + access_token, 'Host': GRAPH_RESOURCE_HOST}\n    ret = requests.get(endpoint, headers=headers)\n    return ret.json()['id']", "docstring": "Return the object ID for the Graph user who owns the access token.\n\nArgs:\naccess_token (str): A Microsoft Graph access token. (Not an Azure access token.)\nIf not provided, attempt to get it from MSI_ENDPOINT.\n\nReturns:\nAn object ID string for a user or service principal.", "source": "juraj-google-style"}
{"code": "def verify(self):\n    if any(((not i) for i in (self.chat_uid, self.module_id))):\n        raise ValueError('Chat data is incomplete.')\n    if (not isinstance(self.chat_type, ChatType)):\n        raise ValueError('Invalid chat type.')\n    if (self.chat_type == ChatType.Group):\n        if any((((not isinstance(i, EFBChat)) or (not (i.chat_type == ChatType.User))) for i in self.members)):\n            raise ValueError('The group has an invalid member.')\n    if ((self.group is not None) and ((not isinstance(self.group, EFBChat)) or (not (self.group.chat_type == ChatType.Group)))):\n        raise ValueError('The member is in an invalid group.')", "docstring": "Verify the completeness of the data.\n\nRaises:\nValueError: When this chat is invalid.", "source": "codesearchnet"}
{"code": "def create_input_data_based_on_hw_requirement(num_chip, max_unique_ids_per_partition, per_sc_vocab_size, per_sc_sample_count, num_minibatches_per_physical_sparse_core):\n    num_sc_per_chip = 4\n    num_physical_replica = num_chip * num_sc_per_chip\n    col_ids = []\n    row_ids = []\n    gains = []\n    smallest_num_division = np.power(2, np.ceil(np.log2(num_minibatches_per_physical_sparse_core)))\n    division_size = (per_sc_vocab_size + smallest_num_division - 1) \n    assert division_size >= max_unique_ids_per_partition, f'The max_unique_ids_per_partition is set to {max_unique_ids_per_partition} and the number of minibatches per sparse core is set to {num_minibatches_per_physical_sparse_core}. But the vocab size per sparse core is {per_sc_vocab_size} which is not going to fit that many minibatches, consider setting the number of minibatches smaller.'\n    per_sc_per_minibatch_id_nums_for_each_replica = np.random.randint(max_unique_ids_per_partition * (num_minibatches_per_physical_sparse_core - 1) + 1, max_unique_ids_per_partition * num_minibatches_per_physical_sparse_core + 1, size=num_physical_replica)\n    per_chip_sample_count = per_sc_sample_count * num_sc_per_chip\n    for chip_id in range(num_chip):\n        for sc_id in range(num_sc_per_chip):\n            np.random.shuffle(per_sc_per_minibatch_id_nums_for_each_replica)\n            for physical_replica_id in range(num_physical_replica):\n                physical_replica_id_nums = per_sc_per_minibatch_id_nums_for_each_replica[physical_replica_id]\n                local_col_ids = np.array([])\n                for i in range(num_minibatches_per_physical_sparse_core):\n                    local_col_ids_minibatch_size = max_unique_ids_per_partition\n                    if i == num_minibatches_per_physical_sparse_core - 1:\n                        local_col_ids_minibatch_size = physical_replica_id_nums - i * max_unique_ids_per_partition\n                    local_col_ids = np.append(local_col_ids, np.random.choice(np.arange(division_size), size=local_col_ids_minibatch_size, replace=False) + i * division_size)\n                local_row_ids = np.random.randint(low=0, high=per_sc_sample_count, size=physical_replica_id_nums)\n                row_ids += list(local_row_ids + chip_id * per_chip_sample_count + sc_id * per_sc_sample_count)\n                col_ids += list(local_col_ids * num_physical_replica + physical_replica_id)\n                gains += list(np.random.random(size=physical_replica_id_nums))\n    return (np.array(row_ids), np.array(col_ids), np.array(gains))", "docstring": "Create the coo tensor based on hardware requirements.\n\nArgs:\nnum_chip: number of chips in the tpu system.\nmax_unique_ids_per_partition: max unique ids per physical replica\nper_sc_vocab_size: per sc shard of table size.\nper_sc_sample_count: per sc sample count.\nnum_minibatches_per_physical_sparse_core: per sc minibatch number.\n\nReturns:\nrow_ids, col_ids, gains and splits", "source": "github-repos"}
{"code": "def execute(self, try_limit=5, try_interval=0.5, timeout=30):\n    return Promise(no_error(self._execute), u'Executing {!r}'.format(self), try_limit=try_limit, try_interval=try_interval, timeout=timeout).fulfill()", "docstring": "Execute this query, retrying based on the supplied parameters.\n\nKeyword Args:\ntry_limit (int): The number of times to retry the query.\ntry_interval (float): The number of seconds to wait between each try (float).\ntimeout (float): The maximum number of seconds to spend retrying (float).\n\nReturns:\nThe transformed results of the query.\n\nRaises:\nBrokenPromise: The query did not execute without a Selenium error after one or more attempts.", "source": "codesearchnet"}
{"code": "class MaskFormerSwinPatchMerging(nn.Module):\n\n    def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:\n        super().__init__()\n        self.input_resolution = input_resolution\n        self.dim = dim\n        self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)\n        self.norm = norm_layer(4 * dim)\n\n    def maybe_pad(self, input_feature, height, width):\n        should_pad = height % 2 == 1 or width % 2 == 1\n        if should_pad:\n            pad_values = (0, 0, 0, width % 2, 0, height % 2)\n            input_feature = nn.functional.pad(input_feature, pad_values)\n        return input_feature\n\n    def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor:\n        height, width = input_dimensions\n        batch_size, dim, num_channels = input_feature.shape\n        input_feature = input_feature.view(batch_size, height, width, num_channels)\n        input_feature = self.maybe_pad(input_feature, height, width)\n        input_feature_0 = input_feature[:, 0::2, 0::2, :]\n        input_feature_1 = input_feature[:, 1::2, 0::2, :]\n        input_feature_2 = input_feature[:, 0::2, 1::2, :]\n        input_feature_3 = input_feature[:, 1::2, 1::2, :]\n        input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)\n        input_feature = input_feature.view(batch_size, -1, 4 * num_channels)\n        input_feature = self.norm(input_feature)\n        input_feature = self.reduction(input_feature)\n        return input_feature", "docstring": "Patch Merging Layer.\n\nArgs:\ninput_resolution (`Tuple[int]`):\nResolution of input feature.\ndim (`int`):\nNumber of input channels.\nnorm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):\nNormalization layer class.", "source": "github-repos"}
{"code": "def set_parent(self, node):\n        \n        self._parent = node\n\n        if node is None:\n            \n            self._depth = 0\n        else:\n            self._depth = node.get_depth() + 1", "docstring": "Attach node to its parent.\n\nArgs:\nnode: Parent node.\n\nNote:\n``node`` can be ``None``. In that case, the node is detached from its previous parent.", "source": "juraj-google-style"}
{"code": "def _deserialize(cls, serialization):\n    return cls(*serialization)", "docstring": "Reconstructs a TypeSpec from a value returned by `serialize`.\n\nArgs:\nserialization: A value returned by _serialize.  In some contexts,\n`namedtuple`s in `serialization` may not have the identical type that\nwas returned by `_serialize` (but its type will still be a `namedtuple`\ntype with the same type name and field names).  For example, the code\nthat loads a SavedModel does not have access to the original\n`namedtuple` type, so it dynamically creates a new `namedtuple` type\nwith the same type name and field names as the original one.  If\nnecessary, you can check `serialization` for these duck-typed\n`nametuple` types, and restore them to the original type. (E.g., this\nwould be necessary if you rely on type checks such as `isinstance` for\nthis `TypeSpec`'s member variables).\n\nReturns:\nA `TypeSpec` of type `cls`.", "source": "github-repos"}
{"code": "def set_invite_only(self, invite_only):\n        \n        join_rule = \"invite\" if invite_only else \"public\"\n        try:\n            self.client.api.set_join_rule(self.room_id, join_rule)\n            self.invite_only = invite_only\n            return True\n        except MatrixRequestError:\n            return False", "docstring": "Set how the room can be joined.\n\nArgs:\ninvite_only(bool): If True, users will have to be invited to join\nthe room. If False, anyone who knows the room link can join.\n\nReturns:\nTrue if successful, False if not", "source": "juraj-google-style"}
{"code": "def _GetInstanceAndProjectAttributes(self, metadata_dict):\n    metadata_dict = (metadata_dict or {})\n    try:\n        instance_data = metadata_dict['instance']['attributes']\n    except KeyError:\n        instance_data = {}\n        self.logger.warning('Instance attributes were not found.')\n    try:\n        project_data = metadata_dict['project']['attributes']\n    except KeyError:\n        project_data = {}\n        self.logger.warning('Project attributes were not found.')\n    return (instance_data, project_data)", "docstring": "Get dictionaries for instance and project attributes.\n\nArgs:\nmetadata_dict: json, the deserialized contents of the metadata server.\n\nReturns:\ntuple, two dictionaries for instance and project attributes.", "source": "codesearchnet"}
{"code": "def write_temporary_file(content, prefix='', suffix=''):\n    \n    temp = tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, mode='w+t', delete=False)\n    temp.writelines(content)\n    temp.close()\n    return temp.name", "docstring": "Generating a temporary file with content.\n\nArgs:\ncontent (str): file content (usually a script, Dockerfile, playbook or config file)\nprefix (str): the filename starts with this prefix (default: no prefix)\nsuffix (str): the filename ends with this suffix (default: no suffix)\n\nReturns:\nstr: name of the temporary file\n\nNote:\nYou are responsible for the deletion of the file.", "source": "juraj-google-style"}
{"code": "def Exponential(cls,\n            mean: 'TensorFluent',\n            batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:\n        \n        rate = 1 / mean.tensor\n        dist = tf.distributions.Exponential(rate)\n        batch = mean.batch\n        if not batch and batch_size is not None:\n            t = dist.sample(batch_size)\n            batch = True\n        else:\n            t = dist.sample()\n        scope = mean.scope.as_list()\n        return (dist, TensorFluent(t, scope, batch=batch))", "docstring": "Returns a TensorFluent for the Exponential sampling op with given mean parameter.\n\nArgs:\nmean: The mean parameter of the Exponential distribution.\nbatch_size: The size of the batch (optional).\n\nReturns:\nThe Exponential distribution and a TensorFluent sample drawn from the distribution.", "source": "juraj-google-style"}
{"code": "def GetAnalyzerInstances(cls, analyzer_names):\n    \n    analyzer_instances = []\n    for analyzer_name, analyzer_class in iter(cls.GetAnalyzers()):\n      if analyzer_name in analyzer_names:\n        analyzer_instances.append(analyzer_class())\n\n    return analyzer_instances", "docstring": "Retrieves instances for all the specified analyzers.\n\nArgs:\nanalyzer_names (list[str]): names of the analyzers to retrieve.\n\nReturns:\nlist[BaseAnalyzer]: analyzer instances.", "source": "juraj-google-style"}
{"code": "def str_to_v1_str(xml_str):\n    \n    if str_is_v1(xml_str):\n        return xml_str\n    etree_obj = str_to_etree(xml_str)\n    strip_v2_elements(etree_obj)\n    etree_replace_namespace(etree_obj, d1_common.types.dataoneTypes_v1.Namespace)\n    return etree_to_str(etree_obj)", "docstring": "Convert a API v2 XML doc to v1 XML doc.\n\nRemoves elements that are only valid for v2 and changes namespace to v1.\n\nIf doc is already v1, it is returned unchanged.\n\nArgs:\nxml_str : str\nAPI v2 XML doc. E.g.: ``SystemMetadata v2``.\n\nReturns:\nstr : API v1 XML doc. E.g.: ``SystemMetadata v1``.", "source": "juraj-google-style"}
{"code": "def method(cache_name, key_prefix=None):\n\n    def decorator(func):\n        if ((func.__name__ in ['cause_repertoire', 'effect_repertoire']) and (not config.CACHE_REPERTOIRES)):\n            return func\n\n        @wraps(func)\n        def wrapper(obj, *args, **kwargs):\n            cache = getattr(obj, cache_name)\n            key = cache.key(*args, _prefix=key_prefix, **kwargs)\n            value = cache.get(key)\n            if (value is None):\n                value = func(obj, *args, **kwargs)\n                cache.set(key, value)\n            return value\n        return wrapper\n    return decorator", "docstring": "Caching decorator for object-level method caches.\n\nCache key generation is delegated to the cache.\n\nArgs:\ncache_name (str): The name of the (already-instantiated) cache\non the decorated object which should be used to store results\nof this method.\n*key_prefix: A constant to use as part of the cache key in addition\nto the method arguments.", "source": "codesearchnet"}
{"code": "def inflate_plugin(self, identifier, definition=None, cls=None):\n        \n        cls = self.get_plugin(identifier, cls)\n        \n        \n        return cls(**definition or {})", "docstring": "Inflate a plugin thanks to it's identifier, definition and class.\n\nArgs:\nidentifier (str): the plugin identifier.\ndefinition (dict): the kwargs to instantiate the plugin with.\ncls (str): \"provider\", \"checker\", or None.\n\nReturns:\nProvider/Checker: instance of plugin.", "source": "juraj-google-style"}
{"code": "def __init__(self, params=None):\n        \n        super().__init__()\n        if params:\n\n            \n            \n            if (params.get(\"MAGMOM\") and isinstance(params[\"MAGMOM\"][0], (int, float))) \\\n                    and (params.get(\"LSORBIT\") or params.get(\"LNONCOLLINEAR\")):\n                val = []\n                for i in range(len(params[\"MAGMOM\"])\n                    val.append(params[\"MAGMOM\"][i*3:(i+1)*3])\n                params[\"MAGMOM\"] = val\n\n            self.update(params)", "docstring": "Creates an Incar object.\n\nArgs:\nparams (dict): A set of input parameters as a dictionary.", "source": "juraj-google-style"}
{"code": "def copy(self):\n    req = type(self)()\n    req.__dict__ = self.__dict__.copy()\n    req._headers = self.headers.copy()\n    return req", "docstring": "Copies the current Request object instance for side-effects purposes.\n\nReturns:\npook.Request: copy of the current Request instance.", "source": "codesearchnet"}
{"code": "def __neg__(self: EventSetOrNode) -> EventSetOrNode:\n    from temporian.core.operators.scalar import multiply_scalar\n    return multiply_scalar(input=self, value=-1)", "docstring": "Negates an [`EventSet`][temporian.EventSet] element-wise.\n\nExample:\n```python\n>>> a = tp.event_set(\n...     timestamps=[1, 2],\n...     features={\"M\": [1, -5], \"N\": [-1.0, 5.5]},\n... )\n>>> -a\nindexes: ...\n'M': [-1  5]\n'N': [ 1.  -5.5]\n...\n\n```\n\nReturns:\nNegated EventSet.", "source": "github-repos"}
{"code": "def create_resource(self, function, args=None, kwargs=None):\n    closure = ResourceClosure(function, self._cluster.resource_cancellation_mgr, args=args, kwargs=kwargs)\n    return self._register_and_schedule_resource_closure(closure)", "docstring": "Asynchronously creates a per-worker resource represented by a `RemoteValue`.\n\nArgs:\nfunction: the resource function to be run remotely. It should be a\n`tf.function`, a concrete function or a Python function.\nargs: positional arguments to be passed to the function.\nkwargs: keyword arguments to be passed to the function.\n\nReturns:\none or several RemoteValue objects depending on the function return\nvalues.", "source": "github-repos"}
{"code": "def update(self, **kwargs):\n    do_simple_update = kwargs.get('simple_update', True)\n    no_of_updates = 0\n    for model in self:\n        no_of_updates += 1\n        model._load_data(kwargs)\n        model.save(internal=True)\n    return no_of_updates", "docstring": "Updates the matching objects for specified fields.\n\nNote:\nPost/pre save hooks and signals will NOT triggered.\n\nUnlike RDBMS systems, this method makes individual save calls\nto backend DB store. So this is exists as more of a comfortable\nutility method and not a performance enhancement.\n\nKeyword Args:\n\\*\\*kwargs: Fields with their corresponding values to be updated.\n\nReturns:\nInt. Number of updated objects.\n\nExample:\n.. code-block:: python\n\nEntry.objects.filter(pub_date__lte=2014).update(comments_on=False)", "source": "codesearchnet"}
{"code": "def __init__(self, campfire, id):\n        \n        super(Room, self).__init__(campfire)\n        self._load(id)", "docstring": "Initialize.\n\nArgs:\ncampfire (:class:`Campfire`): Campfire instance\npassword (str): Room ID", "source": "juraj-google-style"}
{"code": "def get_edge_by_index(self, source_index: int, target_index: int) -> Optional[Edge]:\n        \n        edge = self._edges.get((source_index, target_index))\n        if edge is not None:\n            return edge\n        return self._edges.get((target_index, source_index))", "docstring": "Returns the edge connecting the nodes with the specified indices if such an edge exists.\n\nArguments:\nsource_index (int): The index of one of the endpoints of queried edge.\ntarget_index (int): The index of the other endpoint of the queried edge.\n\nReturns:\nThe edge connecting the nodes with the specified indices\nor `None` if no such node exists.", "source": "juraj-google-style"}
{"code": "def sqrt(cls, x: 'TensorFluent') -> 'TensorFluent':\n        \n        return cls._unary_op(x, tf.sqrt, tf.float32)", "docstring": "Returns a TensorFluent for the sqrt function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the sqrt function.", "source": "juraj-google-style"}
{"code": "def __hash__(self):\n    return hash(self.path)", "docstring": "Hash function.\n\nReturns:\nreturn the hash value of its path.\nNOTE(daiyip): KeyPath shares the same hash of its JSONPath representation\n(relative form), thus we can lookup a dict with KeyPath key by string,\nand vice versa.", "source": "github-repos"}
{"code": "def __init__(self, file_object, encoding='utf-8'):\n    \n    super(FileObjectOutputWriter, self).__init__(encoding=encoding)\n    self._errors = 'strict'\n    self._file_object = file_object", "docstring": "Initializes a file object command line interface output writer.\n\nArgs:\nfile_object (file): file-like object to read from.\nencoding (Optional[str]): output encoding.", "source": "juraj-google-style"}
{"code": "def get_diff_coeff(hvec, n=1):\n    hvec = np.array(hvec, dtype=np.float)\n    acc = len(hvec)\n    exp = np.column_stack(([np.arange(acc)] * acc))\n    a = (np.vstack(([hvec] * acc)) ** exp)\n    b = np.zeros(acc)\n    b[n] = factorial(n)\n    return np.linalg.solve(a, b)", "docstring": "Helper function to find difference coefficients of an\nderivative on an arbitrary mesh.\n\nArgs:\nhvec (1D array-like): sampling stencil\nn (int): degree of derivative to find", "source": "codesearchnet"}
{"code": "def modify(self, modification, obj):\n    for (action, settings) in modification.items():\n        if (action in self.supported_actions):\n            self.supported_actions[action].__call__(obj, settings)\n        elif self.strict:\n            raise ValueError('{} is not a supported action!'.format(action))", "docstring": "Note that modify makes actual in-place modifications. It does not\nreturn a copy.\n\nArgs:\nmodification (dict): Modification must be {action_keyword :\nsettings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}}\nobj (dict/str/object): Object to modify depending on actions. For\nexample, for DictActions, obj will be a dict to be modified.\nFor FileActions, obj will be a string with a full pathname to a\nfile.", "source": "codesearchnet"}
{"code": "class RunOneDetector(beam.PTransform[beam.PCollection[NestedKeyedInputT], beam.PCollection[NestedKeyedOutputT]]):\n\n    def __init__(self, detector):\n        self._detector = detector\n\n    def expand(self, input: beam.PCollection[NestedKeyedInputT]) -> beam.PCollection[NestedKeyedOutputT]:\n        model_id = getattr(self._detector, '_model_id', getattr(self._detector, '_key', 'unknown_model'))\n        model_uuid = f'{model_id}:{uuid.uuid4().hex[:6]}'\n        ret = input | beam.Reshuffle() | f'Score and Learn ({model_uuid})' >> RunScoreAndLearn(self._detector)\n        if self._detector._threshold_criterion:\n            ret = ret | f'Run Threshold Criterion ({model_uuid})' >> RunThresholdCriterion(self._detector._threshold_criterion)\n        return ret", "docstring": "Runs a single anomaly detector on a PCollection of data.\n\nThis PTransform applies a single `AnomalyDetector` to the input data,\nincluding scoring, learning, and thresholding.\n\nArgs:\ndetector: The `AnomalyDetector` to run.", "source": "github-repos"}
{"code": "def sort_orbitals(element_pdos):\n    sorted_orbitals = ['s', 'p', 'py', 'pz', 'px', 'd', 'dxy', 'dyz', 'dz2', 'dxz', 'dx2', 'f', 'f_3', 'f_2', 'f_1', 'f_0', 'f1', 'f2', 'f3']\n    unsorted_keys = element_pdos.keys()\n    sorted_keys = []\n    for key in sorted_orbitals:\n        if (key in unsorted_keys):\n            sorted_keys.append(key)\n    return sorted_keys", "docstring": "Sort the orbitals of an element's projected density of states.\n\nSorts the orbitals based on a standard format. E.g. s < p < d.\nWill also sort lm decomposed orbitals. This is useful for plotting/saving.\n\nArgs:\nelement_pdos (dict): An element's pdos. Should be formatted as a\n:obj:`dict` of ``{orbital: dos}``. Where dos is a\n:obj:`~pymatgen.electronic_structure.dos.Dos` object. For example::\n\n{'s': dos, 'px': dos}\n\nReturns:\nlist: The sorted orbitals.", "source": "codesearchnet"}
{"code": "def merge_dicts(dicts, op=operator.add):\n    \n    a = None\n    for b in dicts:\n\n        if a is None:\n            a = b.copy()\n        else:\n            a = dict(a.items() + b.items() + [(k, op(a[k], b[k])) for k in set(b) & set(a)])\n    return a", "docstring": "Merge a list of dictionaries.\n\nArgs:\ndicts (list): a list of dictionary objects\nop (operator): an operator item used to merge the dictionaries. Defaults to :py:func:`operator.add`.\n\nReturns:\ndict: the merged dictionary", "source": "juraj-google-style"}
{"code": "class TFSwinPatchMerging(keras.layers.Layer):\n\n    def __init__(self, input_resolution: Tuple[int, int], dim: int, norm_layer: Optional[Callable]=None, **kwargs) -> None:\n        super().__init__(**kwargs)\n        self.input_resolution = input_resolution\n        self.dim = dim\n        self.reduction = keras.layers.Dense(2 * dim, use_bias=False, name='reduction')\n        if norm_layer is None:\n            self.norm = keras.layers.LayerNormalization(epsilon=1e-05, name='norm')\n        else:\n            self.norm = norm_layer(name='norm')\n\n    def maybe_pad(self, input_feature: tf.Tensor, height: int, width: int) -> tf.Tensor:\n        should_pad = height % 2 == 1 or width % 2 == 1\n        if should_pad:\n            pad_values = ((0, 0), (0, height % 2), (0, width % 2), (0, 0))\n            input_feature = tf.pad(input_feature, pad_values)\n        return input_feature\n\n    def call(self, input_feature: tf.Tensor, input_dimensions: Tuple[int, int], training: bool=False) -> tf.Tensor:\n        height, width = input_dimensions\n        batch_size, _, num_channels = shape_list(input_feature)\n        input_feature = tf.reshape(input_feature, (batch_size, height, width, num_channels))\n        input_feature = self.maybe_pad(input_feature, height, width)\n        input_feature_0 = input_feature[:, 0::2, 0::2, :]\n        input_feature_1 = input_feature[:, 1::2, 0::2, :]\n        input_feature_2 = input_feature[:, 0::2, 1::2, :]\n        input_feature_3 = input_feature[:, 1::2, 1::2, :]\n        input_feature = tf.concat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)\n        input_feature = tf.reshape(input_feature, (batch_size, -1, 4 * num_channels))\n        input_feature = self.norm(input_feature, training=training)\n        input_feature = self.reduction(input_feature, training=training)\n        return input_feature\n\n    def build(self, input_shape=None):\n        if self.built:\n            return\n        self.built = True\n        if getattr(self, 'reduction', None) is not None:\n            with tf.name_scope(self.reduction.name):\n                self.reduction.build([None, None, 4 * self.dim])\n        if getattr(self, 'norm', None) is not None:\n            with tf.name_scope(self.norm.name):\n                self.norm.build([None, None, 4 * self.dim])", "docstring": "Patch Merging Layer.\n\nArgs:\ninput_resolution (`Tuple[int]`):\nResolution of input feature.\ndim (`int`):\nNumber of input channels.\nnorm_layer (`keras.layer.Layer`, *optional*, defaults to `keras.layers.LayerNormalization`):\nNormalization layer class.", "source": "github-repos"}
{"code": "def map(self, callback: Callable[[T], U]) -> 'Option[U]':\n        \n        return self._type.Some(callback(self._val)) if self._is_some else cast('Option[U]', NONE)", "docstring": "Applies the ``callback`` with the contained value as its argument or\nreturns :py:data:`NONE`.\n\nArgs:\ncallback: The callback to apply to the contained value.\n\nReturns:\nThe ``callback`` result wrapped in an :class:`Option` if the\ncontained value is ``Some``, otherwise :py:data:`NONE`\n\nExamples:\n>>> Some(10).map(lambda x: x * x)\nSome(100)\n>>> NONE.map(lambda x: x * x)\nNONE", "source": "juraj-google-style"}
{"code": "def part_studio_stl(self, did, wid, eid):\n    req_headers = {'Accept': 'application/vnd.onshape.v1+octet-stream'}\n    return self._api.request('get', (((((('/api/partstudios/d/' + did) + '/w/') + wid) + '/e/') + eid) + '/stl'), headers=req_headers)", "docstring": "Exports STL export from a part studio\n\nArgs:\n- did (str): Document ID\n- wid (str): Workspace ID\n- eid (str): Element ID\n\nReturns:\n- requests.Response: Onshape response data", "source": "codesearchnet"}
{"code": "def metamodel_from_file(file_name, **kwargs):\n    \n    with codecs.open(file_name, 'r', 'utf-8') as f:\n        lang_desc = f.read()\n\n    metamodel = metamodel_from_str(lang_desc=lang_desc,\n                                   file_name=file_name,\n                                   **kwargs)\n\n    return metamodel", "docstring": "Creates new metamodel from the given file.\n\nArgs:\nfile_name(str): The name of the file with textX language description.\nother params: See metamodel_from_str.", "source": "juraj-google-style"}
{"code": "def ces_distance(C1, C2):\n    \n    if config.USE_SMALL_PHI_DIFFERENCE_FOR_CES_DISTANCE:\n        return round(small_phi_ces_distance(C1, C2), config.PRECISION)\n\n    concepts_only_in_C1 = [\n        c1 for c1 in C1 if not any(c1.emd_eq(c2) for c2 in C2)]\n    concepts_only_in_C2 = [\n        c2 for c2 in C2 if not any(c2.emd_eq(c1) for c1 in C1)]\n    \n    \n    if not concepts_only_in_C1 or not concepts_only_in_C2:\n        dist = _ces_distance_simple(C1, C2)\n    else:\n        dist = _ces_distance_emd(concepts_only_in_C1, concepts_only_in_C2)\n\n    return round(dist, config.PRECISION)", "docstring": "Return the distance between two cause-effect structures.\n\nArgs:\nC1 (CauseEffectStructure): The first |CauseEffectStructure|.\nC2 (CauseEffectStructure): The second |CauseEffectStructure|.\n\nReturns:\nfloat: The distance between the two cause-effect structures in concept\nspace.", "source": "juraj-google-style"}
{"code": "def _ip_string_from_prefix(self, prefixlen=None):\n    if (not prefixlen):\n        prefixlen = self._prefixlen\n    return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen))", "docstring": "Turn a prefix length into a dotted decimal string.\n\nArgs:\nprefixlen: An integer, the netmask prefix length.\n\nReturns:\nA string, the dotted decimal netmask string.", "source": "codesearchnet"}
{"code": "def add(self, path, compress=None):\n        \n        if os.path.isdir(path):\n            self.add_dir(path, compress)\n        else:\n            self.add_file(path, compress)", "docstring": "Add `path` to the MAR file.\n\nIf `path` is a file, it will be added directly.\nIf `path` is a directory, it will be traversed recursively and all\nfiles inside will be added.\n\nArgs:\npath (str): path to file or directory on disk to add to this MAR\nfile\ncompress (str): One of 'xz', 'bz2', or None. Defaults to None.", "source": "juraj-google-style"}
{"code": "def from_string(cls, contents):\n        \n        mol = None\n        charge = None\n        spin_multiplicity = None\n        params = dict()\n        lines = contents.split('\\n')\n        parse_section = False\n        section_name = None\n        section_text = []\n        ghost_atoms = None\n        for line_num, line in enumerate(lines):\n            l = line.strip().lower()\n\n            if len(l) == 0:\n                continue\n            if (not parse_section) and (l == \"$end\" or not l.startswith(\"$\")):\n                raise ValueError(\"Format error, parsing failed\")\n            if parse_section and l != \"$end\":\n                section_text.append(line)\n            if l.startswith(\"$\") and not parse_section:\n                parse_section = True\n                section_name = l[1:]\n                available_sections = [\"comment\", \"molecule\", \"rem\"] + \\\n                    sorted(list(cls.optional_keywords_list))\n                if section_name not in available_sections:\n                    raise ValueError(\"Unrecognized keyword \" + line.strip() +\n                                     \" at line \" + str(line_num))\n                if section_name in params:\n                    raise ValueError(\"duplicated keyword \" + line.strip() +\n                                     \"at line \" + str(line_num))\n            if parse_section and l == \"$end\":\n                func_name = \"_parse_\" + section_name\n                if func_name not in QcTask.__dict__:\n                    raise Exception(func_name + \" is not implemented yet, \"\n                                    \"please implement it\")\n                parse_func = QcTask.__dict__[func_name].__get__(None, QcTask)\n                if section_name == \"molecule\":\n                    mol, charge, spin_multiplicity, ghost_atoms = parse_func(section_text)\n                else:\n                    d = parse_func(section_text)\n                    params[section_name] = d\n                parse_section = False\n                section_name = None\n                section_text = []\n        if parse_section:\n            raise ValueError(\"Format error. \" + section_name + \" is not \"\n                             \"terminated\")\n        jobtype = params[\"rem\"][\"jobtype\"]\n        title = params.get(\"comment\", None)\n        exchange = params[\"rem\"].get(\"exchange\", \"hf\")\n        method = params[\"rem\"].get(\"method\", None)\n        correlation = params[\"rem\"].get(\"correlation\", None)\n        basis_set = params[\"rem\"][\"basis\"]\n        aux_basis_set = params[\"rem\"].get(\"aux_basis\", None)\n        ecp = params[\"rem\"].get(\"ecp\", None)\n        optional_params = None\n        op_keys = set(params.keys()) - {\"comment\", \"rem\"}\n        if len(op_keys) > 0:\n            optional_params = dict()\n            for k in op_keys:\n                optional_params[k] = params[k]\n        return QcTask(molecule=mol, charge=charge,\n                      spin_multiplicity=spin_multiplicity,\n                      jobtype=jobtype, title=title,\n                      exchange=exchange, correlation=correlation,\n                      basis_set=basis_set, aux_basis_set=aux_basis_set,\n                      ecp=ecp, rem_params=params[\"rem\"],\n                      optional_params=optional_params,\n                      ghost_atoms=ghost_atoms,\n                      method=method)", "docstring": "Creates QcInput from a string.\n\nArgs:\ncontents: String representing a QChem input file.\n\nReturns:\nQcInput object", "source": "juraj-google-style"}
{"code": "def _broadcast_half(ac_0: _LayerBroadcaster, a_1: RowPartition) -> Tuple[_LayerBroadcaster, RowPartition]:\n    c_1 = ac_0.broadcast_row_partition(a_1)\n    old_value_rowids = array_ops.gather(ac_0.gather_index, c_1.value_rowids())\n    old_row_starts = array_ops.gather(a_1.row_splits(), old_value_rowids)\n    gather_index = old_row_starts + c_1.offsets_in_rows()\n    return [_LayerBroadcaster.from_gather_index(gather_index), c_1]", "docstring": "Does a NOOP broadcast of a_1.\n\n*-ac_0-->*\n|        |\na_1      c_1\n|        |\nV        V\n*-ac_1-->*\n\nNote that by definition this cannot fail: there is always a well-defined\nNOOP broadcast. This is usually intended as half of broadcasting two shapes\ntogether.\nArgs:\nac_0: previous LayerBroadcaster\na_1: previous RowPartition\n\nReturns:\n[ac_1, c_1] where ac_1 is the next LayerBroadcaster, and c_1 is the\nbroadcast RowPartition", "source": "github-repos"}
{"code": "def step(self, input_stream, value):\n    reading = IOTileReading(input_stream.encode(), self.tick_count, value)\n    self.sensor_graph.process_input(input_stream, reading, self.rpc_executor)", "docstring": "Step the sensor graph through one since input.\n\nThe internal tick count is not advanced so this function may\nbe called as many times as desired to input specific conditions\nwithout simulation time passing.\n\nArgs:\ninput_stream (DataStream): The input stream to push the\nvalue into\nvalue (int): The reading value to push as an integer", "source": "codesearchnet"}
{"code": "def element(self, using, value):\n    return self._execute(Command.FIND_ELEMENT, {'using': using, 'value': value})", "docstring": "Find an element in the current context.\n\nSupport:\nAndroid iOS Web(WebView)\n\nArgs:\nusing(str): The element location strategy.\nvalue(str): The value of the location strategy.\n\nReturns:\nWebElement Object.\n\nRaises:\nWebDriverException.", "source": "codesearchnet"}
{"code": "def Wait(self, context, *args):", "docstring": "Wait for and validate an interaction event.\n\nThis method should block and wait for a specific interaction. For example,\nthis method might wait for a specific message over a TCP connection.\n\nArgs:\ncontext: Context of this event.\ncontext.source: Source role for this event. Use the attributes of\nthe source role to validate where the event came from.\ncontext.target: Target role for this event. Use the attributes of\nthe target role to validate the event recipient.\n*args: Additional arguments for validating the event. These arguments\nshould be used to validate the incoming event.\n\nReturns:\nTrue if the event was successfully validated.", "source": "github-repos"}
{"code": "def __init__(self, env, keys=None):\n        \n        self.env = env\n\n        if keys is None:\n            assert self.env.use_object_obs, \"Object observations need to be enabled.\"\n            keys = [\"robot-state\", \"object-state\"]\n        self.keys = keys\n\n        \n        flat_ob = self._flatten_obs(self.env.reset(), verbose=True)\n        self.obs_dim = flat_ob.size\n        high = np.inf * np.ones(self.obs_dim)\n        low = -high\n        self.observation_space = spaces.Box(low=low, high=high)\n        low, high = self.env.action_spec\n        self.action_space = spaces.Box(low=low, high=high)", "docstring": "Initializes the Gym wrapper.\n\nArgs:\nenv (MujocoEnv instance): The environment to wrap.\nkeys (list of strings): If provided, each observation will\nconsist of concatenated keys from the wrapped environment's\nobservation dictionary. Defaults to robot-state and object-state.", "source": "juraj-google-style"}
{"code": "def submit_jobs(job_specs):\n    \n    gk = get_api_client()\n    jobs = []\n    try:\n        for site, job_spec in job_specs:\n            logger.info(\"Submitting %s on %s\" % (job_spec, site))\n            jobs.append(gk.sites[site].jobs.create(job_spec))\n    except Exception as e:\n        logger.error(\"An error occured during the job submissions\")\n        logger.error(\"Cleaning the jobs created\")\n        for job in jobs:\n            job.delete()\n        raise(e)\n\n    return jobs", "docstring": "Submit a job\n\nArgs:\njob_spec (dict): The job specifiation (see Grid'5000 API reference)", "source": "juraj-google-style"}
{"code": "def get_device(ads, **kwargs):\n    filtered = get_devices(ads, **kwargs)\n    if (len(filtered) == 1):\n        return filtered[0]\n    else:\n        serials = [ad.serial for ad in filtered]\n        raise Error(('More than one device matched: %s' % serials))", "docstring": "Finds a unique AndroidDevice instance from a list that has specific\nattributes of certain values.\n\nDeprecated, use `get_devices(ads, **kwargs)[0]` instead.\nThis method will be removed in 1.8.\n\nExample:\nget_device(android_devices, label='foo', phone_number='1234567890')\nget_device(android_devices, model='angler')\n\nArgs:\nads: A list of AndroidDevice instances.\nkwargs: keyword arguments used to filter AndroidDevice instances.\n\nReturns:\nThe target AndroidDevice instance.\n\nRaises:\nError: None or more than one device is matched.", "source": "codesearchnet"}
{"code": "def from_any_pb(pb_type, any_pb):\n    \n    msg = pb_type()\n\n    \n    if callable(getattr(pb_type, \"pb\", None)):\n        msg_pb = pb_type.pb(msg)\n    else:\n        msg_pb = msg\n\n    \n    if not any_pb.Unpack(msg_pb):\n        raise TypeError(\n            \"Could not convert {} to {}\".format(\n                any_pb.__class__.__name__, pb_type.__name__\n            )\n        )\n\n    \n    return msg", "docstring": "Converts an ``Any`` protobuf to the specified message type.\n\nArgs:\npb_type (type): the type of the message that any_pb stores an instance\nof.\nany_pb (google.protobuf.any_pb2.Any): the object to be converted.\n\nReturns:\npb_type: An instance of the pb_type message.\n\nRaises:\nTypeError: if the message could not be converted.", "source": "juraj-google-style"}
{"code": "def _create_job_info(self, job_dir):\n    meta = self._build_job_meta(job_dir)\n    self.logger.debug(('Create job: %s' % meta))\n    job_record = JobRecord.from_json(meta)\n    job_record.save()", "docstring": "Create information for given job.\n\nMeta file will be loaded if exists, and the job information will\nbe saved in db backend.\n\nArgs:\njob_dir (str): Directory path of the job.", "source": "codesearchnet"}
{"code": "def GetTSKFileByPathSpec(self, path_spec):\n    inode = getattr(path_spec, 'inode', None)\n    location = getattr(path_spec, 'location', None)\n    if (inode is not None):\n        tsk_file = self._tsk_file_system.open_meta(inode=inode)\n    elif (location is not None):\n        tsk_file = self._tsk_file_system.open(location)\n    else:\n        raise errors.PathSpecError('Path specification missing inode and location.')\n    return tsk_file", "docstring": "Retrieves the SleuthKit file object for a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\npytsk3.File: TSK file.\n\nRaises:\nPathSpecError: if the path specification is missing inode and location.", "source": "codesearchnet"}
{"code": "def set_tif(self, interface):\n        \n        if not ((1 << interface) & self.supported_tifs()):\n            raise errors.JLinkException('Unsupported target interface: %s' % interface)\n\n        \n        \n        res = self._dll.JLINKARM_TIF_Select(interface)\n        if res != 0:\n            return False\n\n        self._tif = interface\n        return True", "docstring": "Selects the specified target interface.\n\nNote that a restart must be triggered for this to take effect.\n\nArgs:\nself (Jlink): the ``JLink`` instance\ninterface (int): integer identifier of the interface\n\nReturns:\n``True`` if target was updated, otherwise ``False``.\n\nRaises:\nJLinkException: if the given interface is invalid or unsupported.", "source": "juraj-google-style"}
{"code": "def prepare_srcs(deps: list[str], deps_destinations: list[str], srcs_dir: str) -> None:\n    path_to_replace = {'external/local_xla/': 'tensorflow/compiler', 'external/local_tsl/': 'tensorflow'}\n    deps_mapping_dict = {}\n    for deps_destination in deps_destinations:\n        with open(deps_destination, 'r') as deps_destination_file:\n            deps_mapping_dict.update(json.load(deps_destination_file))\n    for file in deps:\n        for path, val in path_to_replace.items():\n            if path in file:\n                copy_file(file, os.path.join(srcs_dir, val), path)\n                break\n        else:\n            if 'external' not in file:\n                if file in deps_mapping_dict:\n                    dest = deps_mapping_dict[file]\n                    if dest:\n                        copy_file(file, srcs_dir, None, dest)\n                else:\n                    copy_file(file, srcs_dir, None, None)", "docstring": "Rearrange source files in target the target directory.\n\nExclude `external` files and move vendored xla/tsl files accordingly.\n\nArgs:\ndeps: a list of paths to files.\ndeps_destinations: a list of json files with mapping of deps to their\ndestinations for deps whose original path and path inside the wheel are\ndifferent.\nsrcs_dir: target directory where files are copied to.", "source": "github-repos"}
{"code": "async def run(self, login: LoginProtocol):\n        \n        self._print('%d +++| %s', bytes(socket_info.get()))\n        await self._do_greeting(login)\n        while True:\n            resp: Response\n            try:\n                cmd = await self._read_command()\n            except (ConnectionError, EOFError):\n                break\n            except NotParseable as exc:\n                resp = BadCommandResponse(exc)\n            else:\n                try:\n                    if isinstance(cmd, NoOpCommand):\n                        resp = NoOpResponse(cmd.tag)\n                    elif isinstance(cmd, LogoutCommand):\n                        resp = Response(Condition.BYE)\n                    elif isinstance(cmd, CapabilityCommand):\n                        resp = CapabilitiesResponse(self.capabilities)\n                    elif self._session is None:\n                        if isinstance(cmd, AuthenticateCommand):\n                            resp = await self._do_authenticate(login, cmd)\n                        elif isinstance(cmd, StartTLSCommand):\n                            resp = await self._do_starttls()\n                        else:\n                            resp = Response(Condition.NO, text='Bad command.')\n                    else:\n                        if isinstance(cmd, UnauthenticateCommand):\n                            resp = await self._do_unauthenticate()\n                        else:\n                            assert self._session.filter_set is not None\n                            state = FilterState(self._session.filter_set,\n                                                self.config)\n                            resp = await state.run(cmd)\n                except Exception:\n                    _log.exception('Unhandled exception')\n                    resp = Response(Condition.NO, text='Server error.')\n            await self._write_response(resp)\n            if resp.is_bye:\n                break\n        self._print('%d ---| %s', b'<disconnected>')", "docstring": "Start the socket communication with the server greeting, and then\nenter the command/response cycle.\n\nArgs:\nlogin: The login/authentication function.", "source": "juraj-google-style"}
{"code": "def __init__(self, layouts: Optional[sparse_core_layout_pb2.SparseCoreTableLayouts]=None):\n    self._checkpoint_layouts = {}\n    self._checkpoint_to_reshard_callback = {}\n    if layouts:\n        for layout in layouts.tables:\n            self._checkpoint_layouts[layout.table_name] = layout", "docstring": "An adapter for TPUEmbeddingV3 checkpoints.\n\nConstructs an adapter for TPUEmbeddingV3 to handle layout changes. between\ncheckpoint values and embedding object being restored.\n\nArgs:\nlayouts: The target layouts required.", "source": "github-repos"}
{"code": "def PreprocessSources(\n      self, artifacts_registry_object, source_path_specs,\n      resolver_context=None):\n    \n    detected_operating_systems = []\n    for source_path_spec in source_path_specs:\n      try:\n        file_system, mount_point = self.GetSourceFileSystem(\n            source_path_spec, resolver_context=resolver_context)\n      except (RuntimeError, dfvfs_errors.BackEndError) as exception:\n        logger.error(exception)\n        continue\n\n      try:\n        searcher = file_system_searcher.FileSystemSearcher(\n            file_system, mount_point)\n\n        operating_system = self._DetermineOperatingSystem(searcher)\n        if operating_system != definitions.OPERATING_SYSTEM_FAMILY_UNKNOWN:\n          preprocess_manager.PreprocessPluginsManager.RunPlugins(\n              artifacts_registry_object, file_system, mount_point,\n              self.knowledge_base)\n\n          detected_operating_systems.append(operating_system)\n\n      finally:\n        file_system.Close()\n\n    if detected_operating_systems:\n      logger.info('Preprocessing detected operating systems: {0:s}'.format(\n          ', '.join(detected_operating_systems)))\n      self.knowledge_base.SetValue(\n          'operating_system', detected_operating_systems[0])", "docstring": "Preprocesses the sources.\n\nArgs:\nartifacts_registry_object (artifacts.ArtifactDefinitionsRegistry):\nartifact definitions registry.\nsource_path_specs (list[dfvfs.PathSpec]): path specifications of\nthe sources to process.\nresolver_context (Optional[dfvfs.Context]): resolver context.", "source": "juraj-google-style"}
{"code": "def Refresh(self):\n    with requests.Session() as session:\n        session.proxies = self.proxy_config.proxies\n        session.verify = (not self.proxy_config.disable_certificate_validation)\n        session.cert = self.proxy_config.cafile\n        self.creds.refresh(google.auth.transport.requests.Request(session=session))", "docstring": "Uses the Refresh Token to retrieve and set a new Access Token.\n\nRaises:\ngoogle.auth.exceptions.RefreshError: If the refresh fails.", "source": "codesearchnet"}
{"code": "def to_proto(self, export_scope=None):\n    if export_scope is None or self.queue.name.startswith(export_scope):\n        queue_runner_def = queue_runner_pb2.QueueRunnerDef()\n        queue_runner_def.queue_name = ops.strip_name_scope(self.queue.name, export_scope)\n        for enqueue_op in self.enqueue_ops:\n            queue_runner_def.enqueue_op_name.append(ops.strip_name_scope(enqueue_op.name, export_scope))\n        queue_runner_def.close_op_name = ops.strip_name_scope(self.close_op.name, export_scope)\n        queue_runner_def.cancel_op_name = ops.strip_name_scope(self.cancel_op.name, export_scope)\n        queue_runner_def.queue_closed_exception_types.extend([errors.error_code_from_exception_type(cls) for cls in self._queue_closed_exception_types])\n        return queue_runner_def\n    else:\n        return None", "docstring": "Converts this `QueueRunner` to a `QueueRunnerDef` protocol buffer.\n\nArgs:\nexport_scope: Optional `string`. Name scope to remove.\n\nReturns:\nA `QueueRunnerDef` protocol buffer, or `None` if the `Variable` is not in\nthe specified name scope.", "source": "github-repos"}
{"code": "def send(self, request):\n    if (self.call is None):\n        raise ValueError('Can not send() on an RPC that has never been open()ed.')\n    if self.call.is_active():\n        self._request_queue.put(request)\n    else:\n        next(self.call)", "docstring": "Queue a message to be sent on the stream.\n\nSend is non-blocking.\n\nIf the underlying RPC has been closed, this will raise.\n\nArgs:\nrequest (protobuf.Message): The request to send.", "source": "codesearchnet"}
{"code": "def make_gradients(dims=DEFAULT_DIMS):\n    return np.meshgrid(np.linspace(0.0, 1.0, dims[0]), np.linspace(0.0, 1.0, dims[1]))", "docstring": "Makes a pair of gradients to generate textures from numpy primitives.\n\nArgs:\ndims (pair): the dimensions of the surface to create\n\nReturns:\npair: A pair of surfaces.", "source": "codesearchnet"}
{"code": "def zsh_complete(self, path, cmd, *cmds, sourceable=False):\n    grouping = (internal.zsh_version() >= (5, 4))\n    path = pathlib.Path(path)\n    firstline = ['\n    firstline.extend(cmds)\n    subcmds = list(self.subcmds.keys())\n    with path.open('w') as zcf:\n        print(*firstline, end='\\n\\n', file=zcf)\n        print('function _{} {{'.format(cmd), file=zcf)\n        print('local line', file=zcf)\n        print('_arguments -C', end=BLK, file=zcf)\n        if subcmds:\n            substrs = [\"{}\\\\:'{}'\".format(sub, self.subcmds[sub].help) for sub in subcmds]\n            print('\"1:Commands:(({}))\"'.format(' '.join(substrs)), end=BLK, file=zcf)\n        self._zsh_comp_command(zcf, None, grouping)\n        if subcmds:\n            print(\"'*::arg:->args'\", file=zcf)\n            print('case $line[1] in', file=zcf)\n            for sub in subcmds:\n                print('{sub}) _{cmd}_{sub} ;;'.format(sub=sub, cmd=cmd), file=zcf)\n            print('esac', file=zcf)\n        print('}', file=zcf)\n        for sub in subcmds:\n            print('\\nfunction _{}_{} {{'.format(cmd, sub), file=zcf)\n            print('_arguments', end=BLK, file=zcf)\n            self._zsh_comp_command(zcf, sub, grouping)\n            print('}', file=zcf)\n        if sourceable:\n            print('\\ncompdef _{0} {0}'.format(cmd), *cmds, file=zcf)", "docstring": "Write zsh compdef script.\n\nArgs:\npath (path-like): desired path of the compdef script.\ncmd (str): command name that should be completed.\ncmds (str): extra command names that should be completed.\nsourceable (bool): if True, the generated file will contain an\nexplicit call to ``compdef``, which means it can be sourced\nto activate CLI completion.", "source": "codesearchnet"}
{"code": "def GetEntries(self, parser_mediator, cache=None, database=None, **kwargs):\n    if (database is None):\n        raise ValueError('Invalid database.')\n    for (table_name, callback_method) in iter(self._tables.items()):\n        if parser_mediator.abort:\n            break\n        if (not callback_method):\n            continue\n        callback = getattr(self, callback_method, None)\n        if (callback is None):\n            logger.warning('[{0:s}] missing callback method: {1:s} for table: {2:s}'.format(self.NAME, callback_method, table_name))\n            continue\n        esedb_table = database.get_table_by_name(table_name)\n        if (not esedb_table):\n            logger.warning('[{0:s}] missing table: {1:s}'.format(self.NAME, table_name))\n            continue\n        callback(parser_mediator, cache=cache, database=database, table=esedb_table, **kwargs)", "docstring": "Extracts event objects from the database.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ncache (Optional[ESEDBCache]): cache.\ndatabase (Optional[pyesedb.file]): ESE database.\n\nRaises:\nValueError: If the database attribute is not valid.", "source": "codesearchnet"}
{"code": "def actor_checkpoint_info(self, actor_id):\n        \n        self._check_connected()\n        message = self._execute_command(\n            actor_id,\n            \"RAY.TABLE_LOOKUP\",\n            ray.gcs_utils.TablePrefix.ACTOR_CHECKPOINT_ID,\n            \"\",\n            actor_id.binary(),\n        )\n        if message is None:\n            return None\n        gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(\n            message, 0)\n        entry = (\n            ray.gcs_utils.ActorCheckpointIdData.GetRootAsActorCheckpointIdData(\n                gcs_entry.Entries(0), 0))\n        checkpoint_ids_str = entry.CheckpointIds()\n        num_checkpoints = len(checkpoint_ids_str) \n        assert len(checkpoint_ids_str) % ID_SIZE == 0\n        checkpoint_ids = [\n            ray.ActorCheckpointID(\n                checkpoint_ids_str[(i * ID_SIZE):((i + 1) * ID_SIZE)])\n            for i in range(num_checkpoints)\n        ]\n        return {\n            \"ActorID\": ray.utils.binary_to_hex(entry.ActorId()),\n            \"CheckpointIds\": checkpoint_ids,\n            \"Timestamps\": [\n                entry.Timestamps(i) for i in range(num_checkpoints)\n            ],\n        }", "docstring": "Get checkpoint info for the given actor id.\nArgs:\nactor_id: Actor's ID.\nReturns:\nA dictionary with information about the actor's checkpoint IDs and\ntheir timestamps.", "source": "juraj-google-style"}
{"code": "def encode(self, spec, value, minimum_rank=0):\n    raise NotImplementedError(f'{type(self).__name__}.encode')", "docstring": "Encodes `value` as a nest of batchable `Tensor` or `CompositeTensor`.\n\nArgs:\nspec: The TypeSpec of the value to encode.\nvalue: A value compatible with `spec`.\nminimum_rank: The minimum rank for the returned Tensors, CompositeTensors,\nand ExtensionType values.  This can be used to ensure that the encoded\nvalues can be unbatched this number of times.   If `minimum_rank>0`,\nthen `t.shape[:minimum_rank]` must be compatible for all values `t`\nreturned by `encode`.\n\nReturns:\nA nest (as defined by `tf.nest`) of `tf.Tensor`s, batchable\n`tf.CompositeTensor`s, or `tf.ExtensionType`s.  Stacking, unstacking, or\nconcatenating these encoded values and then decoding the result must be\nequivalent to stacking, unstacking, or concatenating the original values.", "source": "github-repos"}
{"code": "def batch_shuffle(index_array, batch_size):\n    batch_count = int(len(index_array) / batch_size)\n    last_batch = index_array[batch_count * batch_size:]\n    index_array = index_array[:batch_count * batch_size]\n    index_array = index_array.reshape((batch_count, batch_size))\n    np.random.shuffle(index_array)\n    index_array = index_array.flatten()\n    return np.append(index_array, last_batch)", "docstring": "Shuffles an array in a batch-wise fashion.\n\nUseful for shuffling HDF5 arrays\n(where one cannot access arbitrary indices).\n\nArgs:\nindex_array: array of indices to be shuffled.\nbatch_size: integer.\n\nReturns:\nThe `index_array` array, shuffled in a batch-wise fashion.", "source": "github-repos"}
{"code": "def GetPasswdMap(self, since=None):\n    return PasswdUpdateGetter().GetUpdates(self, self.conf['passwd_url'], since)", "docstring": "Return the passwd map from this source.\n\nArgs:\nsince: Get data only changed since this timestamp (inclusive) or None\nfor all data.\n\nReturns:\ninstance of passwd.PasswdMap", "source": "github-repos"}
{"code": "def __init__(self, browser, **kwargs):\n        \n        if len(kwargs) > 1:\n            raise TypeError('BrowserQuery() takes at most 1 keyword argument.')\n\n        if not kwargs:\n            raise TypeError('Must pass a query keyword argument to BrowserQuery().')\n\n        query_name, query_value = list(kwargs.items())[0]\n\n        if query_name not in QUERY_TYPES:\n            raise TypeError(u'{} is not a supported query type for BrowserQuery()'.format(query_name))\n\n        def query_fn():  \n            return getattr(browser, QUERY_TYPES[query_name])(query_value)\n\n        super(BrowserQuery, self).__init__(\n            query_fn,\n            desc=u\"BrowserQuery({}={!r})\".format(query_name, query_value),\n        )\n        self.browser = browser", "docstring": "Generate a query over a browser.\n\nArgs:\nbrowser (selenium.webdriver): A Selenium-controlled browser.\n\nKeyword Args:\ncss (str): A CSS selector.\nxpath (str): An XPath selector.\n\nReturns:\nBrowserQuery\n\nRaises:\nTypeError: The query must be passed either a CSS or XPath selector, but not both.", "source": "juraj-google-style"}
{"code": "def __init__(self, tsk_attribute):\n    \n    super(TSKAttribute, self).__init__()\n    self._tsk_attribute = tsk_attribute", "docstring": "Initializes an attribute.\n\nArgs:\ntsk_attribute (pytsk3.Attribute): TSK attribute.", "source": "juraj-google-style"}
{"code": "def tabledata_insert_all(self, table_name, rows):\n    \n    url = Api._ENDPOINT + (Api._TABLES_PATH % table_name) + \"/insertAll\"\n\n    data = {\n        'kind': 'bigquery\n        'rows': rows\n    }\n\n    return datalab.utils.Http.request(url, data=data, credentials=self._credentials)", "docstring": "Issues a request to insert data into a table.\n\nArgs:\ntable_name: the name of the table as a tuple of components.\nrows: the data to populate the table, as a list of dictionaries.\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.GetVersion = channel.unary_unary(\n        '/versionpb.API/GetVersion',\n        request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,\n        response_deserializer=client_dot_version_dot_versionpb_dot_version__pb2.Version.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def _load(self, dataset='train'):\n        \n        data, labels = None, None\n        if dataset is 'train':\n            files = [os.path.join(self.cifar10_dir, 'data_batch_%d' % i) for i in range(1, 6)]\n        else:\n            files = [os.path.join(self.cifar10_dir, 'test_batch')]\n\n        for file in files:\n            if not os.path.exists(file):\n                raise FileNotFoundError('Failed to find file: ' + file)\n\n        \n        for file in files:\n            with open(file, 'rb') as f:\n                cifar10 = pickle.load(f, encoding='latin1')\n\n            if labels is None:\n                labels = np.array(cifar10['labels'])\n            else:\n                labels = np.concatenate((labels, cifar10['labels']), axis=0)\n\n            if data is None:\n                data = cifar10['data']\n            else:\n                data = np.concatenate((data, cifar10['data']), axis=0)\n\n        \n        data = np.array(data, dtype=float) / 255.0\n        data = data.reshape([-1, self.num_channels, self.img_size, self.img_size])\n        data = data.transpose([0, 2, 3, 1])\n\n        \n        labels = np.eye(self.num_classes)[np.array(labels).reshape(-1)]\n\n        if dataset is 'train':\n            self._train_data, self._train_labels = data, labels\n        else:\n            self._test_data, self._test_labels = data, labels", "docstring": "Load the data in memory.\nArgs:\ndataset: string in ['train', 'test']", "source": "juraj-google-style"}
{"code": "def seek(self, relative_position):\n    self._player_interface.Seek(Int64(((1000.0 * 1000) * relative_position)))\n    self.seekEvent(self, relative_position)", "docstring": "Seek the video by `relative_position` seconds\n\nArgs:\nrelative_position (float): The position in seconds to seek to.", "source": "codesearchnet"}
{"code": "def adduser(name, username, **kwargs):\n    \n    try:\n        group_obj = _get_group_object(name)\n    except pywintypes.com_error as exc:\n        msg = 'Failed to access group {0}. {1}'.format(\n            name, win32api.FormatMessage(exc.excepinfo[5]))\n        log.error(msg)\n        return False\n\n    existing_members = [_get_username(x) for x in group_obj.members()]\n    username = salt.utils.win_functions.get_sam_name(username)\n\n    try:\n        if username not in existing_members:\n            group_obj.Add('WinNT:\n            log.info('Added user %s', username)\n        else:\n            log.warning('User %s is already a member of %s', username, name)\n            return False\n    except pywintypes.com_error as exc:\n        msg = 'Failed to add {0} to group {1}. {2}'.format(\n            username, name, win32api.FormatMessage(exc.excepinfo[5]))\n        log.error(msg)\n        return False\n\n    return True", "docstring": "Add a user to a group\n\nArgs:\n\nname (str):\nThe name of the group to modify\n\nusername (str):\nThe name of the user to add to the group\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' group.adduser foo username", "source": "juraj-google-style"}
{"code": "def get_summary_string(self):\n    from rez.plugin_managers import plugin_manager\n    txt = ('Rez %s' % __version__)\n    txt += ('\\n\\n%s' % plugin_manager.get_summary_string())\n    return txt", "docstring": "Get a string summarising the state of Rez as a whole.\n\nReturns:\nString.", "source": "codesearchnet"}
{"code": "def save(self, force=False):\n    if ((not self._success) and (not force)):\n        raise ConfigError('The config file appears to be corrupted:\\n\\n    {fname}\\n\\nBefore attempting to save the configuration, please either fix the config file manually, or overwrite it with a blank configuration as follows:\\n\\n    from dustmaps.config import config\\n    config.reset()\\n\\n'.format(fname=self.fname))\n    with open(self.fname, 'w') as f:\n        json.dump(self._options, f, indent=2)", "docstring": "Saves the configuration to a JSON, in the standard config location.\n\nArgs:\nforce (Optional[:obj:`bool`]): Continue writing, even if the original\nconfig file was not loaded properly. This is dangerous, because\nit could cause the previous configuration options to be lost.\nDefaults to :obj:`False`.\n\nRaises:\n:obj:`ConfigError`: if the configuration file was not successfully\nloaded on initialization of the class, and\n:obj:`force` is :obj:`False`.", "source": "codesearchnet"}
{"code": "def getline(self, lnum=None):\n    return (self._vim.current.buffer[lnum] if lnum else self._vim.current.line)", "docstring": "Get a line from the current buffer.\n\nArgs:\nlnum (Optional[str]): Number of the line to get, current if ``None``.\n\nTodo:\n- Give this more behavior of Vim ``getline()``?\n- ``buffer[index]`` is zero-based, this is probably too confusing", "source": "codesearchnet"}
{"code": "def forward(self, hidden_states):\n    forwarded_states = self.mlp(hidden_states)\n    output = hidden_states + self.norm(forwarded_states)\n    return output", "docstring": "Args:\nhidden_states (`torch.Tensor`) :\n[num_groups, tokens_per_group, hidden_dim] inputs to send to experts.\nReturns:\ntorch.Tensor[num_groups, tokens_per_group, hidden_dim]", "source": "github-repos"}
{"code": "def to_str(value):\n    if ((sys.version_info.major < 3) and isinstance(value, six.string_types)):\n        return value\n    return str(value)", "docstring": "Convert the input to a string, unless it is a unicode string in Python 2.\n\nUnicode strings are supported as native strings in Python 3, but ``str()`` cannot be\ninvoked on unicode strings in Python 2, so we need to check for that case when\nconverting user-specified values to strings.\n\nArgs:\nvalue: The value to convert to a string.\n\nReturns:\nstr or unicode: The string representation of the value or the unicode string itself.", "source": "codesearchnet"}
{"code": "def browse(self, folder, levels=None, prefix=None):\n    assert (isinstance(levels, int) or (levels is None))\n    assert (isinstance(prefix, string_types) or (prefix is None))\n    return self.get('browse', params={'folder': folder, 'levels': levels, 'prefix': prefix})", "docstring": "Returns the directory tree of the global model.\n\nDirectories are always JSON objects (map/dictionary), and files are\nalways arrays of modification time and size. The first integer is\nthe files modification time, and the second integer is the file\nsize.\n\nArgs:\nfolder (str): The root folder to traverse.\nlevels (int): How deep within the tree we want to dwell down.\n(0 based, defaults to unlimited depth)\nprefix (str): Defines a prefix within the tree where to start\nbuilding the structure.\n\nReturns:\ndict", "source": "codesearchnet"}
{"code": "def stop_gradient(input, name=None):\n    if isinstance(input, composite_tensor.CompositeTensor) and (not _pywrap_utils.IsResourceVariable(input)):\n        return nest.map_structure(stop_gradient, input, expand_composites=True)\n    with record.stop_recording():\n        return gen_array_ops.stop_gradient(input, name=name)", "docstring": "Stops gradient computation.\n\nNOTE: This docstring is patched out below. See\ntensorflow/core/api_def/base_api/api_def_StopGradient.pbtxt for the full\ndocstring. That file determines the public documentation page.\n\nArgs:\ninput: A `Tensor`.\nname: A name for this operation.\n\nReturns:\nA `Tensor`. Has the same dtype as `input`.", "source": "github-repos"}
{"code": "def add_gripper(self, arm_name, gripper):\n    if (arm_name in self.grippers):\n        raise ValueError('Attempts to add multiple grippers to one body')\n    arm_subtree = self.worldbody.find(\".\n    for actuator in gripper.actuator:\n        if (actuator.get('name') is None):\n            raise XMLError('Actuator has no name')\n        if (not actuator.get('name').startswith('gripper')):\n            raise XMLError(\"Actuator name {} does not have prefix 'gripper'\".format(actuator.get('name')))\n    for body in gripper.worldbody:\n        arm_subtree.append(body)\n    self.merge(gripper, merge_body=False)\n    self.grippers[arm_name] = gripper", "docstring": "Mounts gripper to arm.\n\nThrows error if robot already has a gripper or gripper type is incorrect.\n\nArgs:\narm_name (str): name of arm mount\ngripper (MujocoGripper instance): gripper MJCF model", "source": "codesearchnet"}
{"code": "def AddDirectory(self, path):\n    if self.file_system.FileEntryExistsByPath(path):\n        raise ValueError('Path: {0:s} already set.'.format(path))\n    self._AddParentDirectories(path)\n    self.file_system.AddFileEntry(path, file_entry_type=definitions.FILE_ENTRY_TYPE_DIRECTORY)", "docstring": "Adds a directory to the fake file system.\n\nNote that this function will create parent directories if needed.\n\nArgs:\npath (str): path of the directory within the fake file system.\n\nRaises:\nValueError: if the path is already set.", "source": "codesearchnet"}
{"code": "def read_from_hdx(identifier, configuration=None):\n        \n        \n\n        if is_valid_uuid(identifier) is False:\n            raise HDXError('%s is not a valid resource id!' % identifier)\n        resource = Resource(configuration=configuration)\n        result = resource._load_from_hdx('resource', identifier)\n        if result:\n            return resource\n        return None", "docstring": "Reads the resource given by identifier from HDX and returns Resource object\n\nArgs:\nidentifier (str): Identifier of resource\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nOptional[Resource]: Resource object if successful read, None if not", "source": "juraj-google-style"}
{"code": "def merge_dictionaries(dicts, merge_lists=False):\n    dict1 = dicts[0]\n    for other_dict in dicts[1:]:\n        merge_two_dictionaries(dict1, other_dict, merge_lists=merge_lists)\n    return dict1", "docstring": "Merges all dictionaries in dicts into a single dictionary and returns result\n\nArgs:\ndicts (List[DictUpperBound]): Dictionaries to merge into the first one in the list\nmerge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False.\n\nReturns:\nDictUpperBound: Merged dictionary", "source": "codesearchnet"}
{"code": "def get_fail_graph(self, failure_index=None):\n        \n        phase, _ = self._get_failed_phase(failure_index)\n        return phase.get_graph()", "docstring": "Returns a graph showing a solve failure.\n\nArgs:\nfailure_index: See `failure_reason`\n\nReturns:\nA pygraph.digraph object.", "source": "juraj-google-style"}
{"code": "def install(self, apk_path, destination_dir=None, timeout_ms=None):\n    \n    if not destination_dir:\n      destination_dir = '/data/local/tmp/'\n    basename = os.path.basename(apk_path)\n    destination_path = destination_dir + basename\n    self.push(apk_path, destination_path, timeout_ms=timeout_ms)\n    return self.Shell('pm install -r \"%s\"' % destination_path,\n                      timeout_ms=timeout_ms)", "docstring": "Install apk to device.\n\nDoesn't support verifier file, instead allows destination directory to be\noverridden.\n\nArguments:\napk_path: Local path to apk to install.\ndestination_dir: Optional destination directory. Use /system/app/ for\npersistent applications.\ntimeout_ms: Expected timeout for pushing and installing.\n\nReturns:\nThe pm install output.", "source": "juraj-google-style"}
{"code": "def delta_E(self):\n    site_delta_E = (self.final_site.energy - self.initial_site.energy)\n    if self.nearest_neighbour_energy:\n        site_delta_E += self.nearest_neighbour_delta_E()\n    if self.coordination_number_energy:\n        site_delta_E += self.coordination_number_delta_E()\n    return site_delta_E", "docstring": "The change in system energy if this jump were accepted.\n\nArgs:\nNone\n\nReturns:\n(Float): delta E", "source": "codesearchnet"}
{"code": "def send(self, message_type, message, connection_id, one_way=False):\n    try:\n        self._network.send(message_type, message, connection_id, one_way=one_way)\n    except ValueError:\n        LOGGER.debug('Connection %s is no longer valid. Removing from list of peers.', connection_id)\n        if (connection_id in self._peers):\n            del self._peers[connection_id]", "docstring": "Sends a message via the network.\n\nArgs:\nmessage_type (str): The type of the message.\nmessage (bytes): The message to be sent.\nconnection_id (str): The connection to send it to.", "source": "codesearchnet"}
{"code": "def when_matches(self, path, good_value, bad_values=None, timeout=None,\n                     event_timeout=None):\n        \n        future = self.when_matches_async(path, good_value, bad_values)\n        self.wait_all_futures(\n            future, timeout=timeout, event_timeout=event_timeout)", "docstring": "Resolve when an path value equals value\n\nArgs:\npath (list): The path to wait to\ngood_value (object): the value to wait for\nbad_values (list): values to raise an error on\ntimeout (float): time in seconds to wait for responses, wait\nforever if None\nevent_timeout: maximum time in seconds to wait between each response\nevent, wait forever if None", "source": "juraj-google-style"}
{"code": "def get(cls, session, record_id, endpoint_override=None):\n        \n        cls._check_implements('get')\n        try:\n            return cls(\n                endpoint_override or '/%s/%d.json' % (\n                    cls.__endpoint__, record_id,\n                ),\n                singleton=True,\n                session=session,\n            )\n        except HelpScoutRemoteException as e:\n            if e.status_code == 404:\n                return None\n            else:\n                raise", "docstring": "Return a specific record.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nrecord_id (int): The ID of the record to get.\nendpoint_override (str, optional): Override the default\nendpoint using this.\n\nReturns:\nhelpscout.BaseModel: A record singleton, if existing. Otherwise\n``None``.", "source": "juraj-google-style"}
{"code": "def GetNetgroupMap(self, since=None):\n    return NetgroupUpdateGetter().GetUpdates(self, self.conf['netgroup_url'], since)", "docstring": "Return the netgroup map from this source.\n\nArgs:\nsince: Get data only changed since this timestamp (inclusive) or None\nfor all data.\n\nReturns:\ninstance of netgroup.NetgroupMap", "source": "github-repos"}
{"code": "def derive_value(self, value):\n    return IonEvent(self.event_type, self.ion_type, value, self.field_name, self.annotations, self.depth)", "docstring": "Derives a new event from this one setting the ``value`` attribute.\n\nArgs:\nvalue: (any):\nThe value associated with the derived event.\n\nReturns:\nIonEvent: The newly generated non-thunk event.", "source": "codesearchnet"}
{"code": "def delete_files(file_paths):\n    if len(file_paths) == 0:\n        raise RuntimeError('Clean up failed. Invalid file path: %s.' % file_paths)\n    FileSystems.delete(file_paths)", "docstring": "A function to clean up files or directories using ``FileSystems``.\n\nGlob is supported in file path and directories will be deleted recursively.\n\nArgs:\nfile_paths: A list of strings contains file paths or directories.", "source": "github-repos"}
{"code": "def register(self, address, retry=True):\n        \n\n        logger.debug(\"<%s> Sending REGISTER request to: %s\" % (str(self.cuuid),\n                                                                str(address)))\n        if not self.listener.listening:\n            logger.warning(\"Neteria client is not listening.\")\n\n        \n        message = {\"method\": \"REGISTER\", \"cuuid\": str(self.cuuid)}\n\n        \n        \n        if self.encryption:\n            message[\"encryption\"] = [self.encryption.n, self.encryption.e]\n\n        \n        self.listener.send_datagram(\n            serialize_data(message, self.compression,\n                           encryption=False), address)\n\n        if retry:\n            \n            self.register_retries = 0\n\n        \n        \n        self.listener.call_later(\n            self.timeout, self.retransmit, {\"method\": \"REGISTER\",\n                                            \"address\": address})", "docstring": "This function will send a register packet to the discovered Neteria\nserver.\n\nArgs:\naddress (tuple): A tuple of the (address, port) to send the register\nrequest to.\nretry (boolean): Whether or not we want to reset the current number\nof registration retries to 0.\n\nReturns:\nNone\n\nExamples:\n>>> address\n('192.168.0.20', 40080)", "source": "juraj-google-style"}
{"code": "def CompileReport(self, mediator):\n    \n    report_text = 'Tagging plugin produced {0:d} tags.\\n'.format(\n        self._number_of_event_tags)\n    self._number_of_event_tags = 0\n    return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)", "docstring": "Compiles an analysis report.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between\nanalysis plugins and other components, such as storage and dfvfs.\n\nReturns:\nAnalysisReport: analysis report.", "source": "juraj-google-style"}
{"code": "def __init__(self, callback):\n        \n        self._callback = callback\n        self._brocade_tunnels = brocade_tunnels(callback=pynos.utilities.return_xml)", "docstring": "VCS init function\n\nArgs:\ncallback: Callback function that will be called for each action\n\nReturns:\nVCS Object\n\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def _getClassInstance(path, args=None):\n\t\t\n\t\tif not path.endswith(\".py\"):\n\t\t\treturn None\n\n\t\tif args is None:\n\t\t\targs = {}\n\n\t\tclassname = AtomShieldsScanner._getClassName(path)\n\t\tbasename = os.path.basename(path).replace(\".py\", \"\")\n\t\tsys.path.append(os.path.dirname(path))\n\t\ttry:\n\t\t\tmod = __import__(basename, globals(), locals(), [classname], -1)\n\t\t\tclass_ = getattr(mod, classname)\n\t\t\tinstance = class_(**args)\n\t\texcept Exception as e:\n\t\t\tAtomShieldsScanner._debug(\"[!] %s\" % e)\n\t\t\treturn None\n\t\tfinally:\n\t\t\tsys.path.remove(os.path.dirname(path))\n\t\treturn instance", "docstring": "Returns a class instance from a .py file.\n\nArgs:\npath (str): Absolute path to .py file\nargs (dict): Arguments passed via class constructor\n\nReturns:\nobject: Class instance or None", "source": "juraj-google-style"}
{"code": "def _ParseRecordString(self, record_strings_data, record_strings_data_offset, string_offset):\n    if (string_offset == 0):\n        return None\n    if (string_offset & self._STRING_OFFSET_MSB):\n        if ((string_offset >> 60) != 8):\n            raise errors.ParseError('Invalid inline record string flag.')\n        string_size = ((string_offset >> 56) & 15)\n        if (string_size >= 8):\n            raise errors.ParseError('Invalid inline record string size.')\n        string_data = bytes(bytearray([((string_offset >> (8 * byte_index)) & 255) for byte_index in range(6, (- 1), (- 1))]))\n        try:\n            return string_data[:string_size].decode('utf-8')\n        except UnicodeDecodeError as exception:\n            raise errors.ParseError('Unable to decode inline record string with error: {0!s}.'.format(exception))\n    data_offset = (string_offset - record_strings_data_offset)\n    record_string_map = self._GetDataTypeMap('asl_record_string')\n    try:\n        record_string = self._ReadStructureFromByteStream(record_strings_data[data_offset:], string_offset, record_string_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.ParseError('Unable to parse record string at offset: 0x{0:08x} with error: {1!s}'.format(string_offset, exception))\n    return record_string.string.rstrip('\\x00')", "docstring": "Parses a record string.\n\nArgs:\nrecord_strings_data (bytes): record strings data.\nrecord_strings_data_offset (int): offset of the record strings data\nrelative to the start of the file.\nstring_offset (int): offset of the string relative to the start of\nthe file.\n\nReturns:\nstr: record string or None if string offset is 0.\n\nRaises:\nParseError: if the record string cannot be parsed.", "source": "codesearchnet"}
{"code": "def __init__(self, declaration):\n        \n        self._address_space = None\n        self._type_qualifiers = []\n        self._basic_ctype = ''\n        self._vector_type_length = None\n        self._nmr_pointer_stars = 0\n        self._pointer_qualifiers = []\n        self._name = ''\n        self._array_sizes = []\n\n        param = self\n\n        class Semantics:\n\n            def type_qualifiers(self, ast):\n                if ast in param._type_qualifiers:\n                    raise ValueError('The pre-type qualifier \"{}\" is present multiple times.'.format(ast))\n                param._type_qualifiers.append(ast)\n                return ast\n\n            def address_space(self, ast):\n                param._address_space = ''.join(ast)\n                return ''.join(ast)\n\n            def basic_ctype(self, ast):\n                param._basic_ctype = ast\n                return ast\n\n            def vector_type_length(self, ast):\n                param._vector_type_length = int(ast)\n                return ast\n\n            def pointer_star(self, ast):\n                param._nmr_pointer_stars += 1\n                return ast\n\n            def pointer_qualifiers(self, ast):\n                if ast in param._pointer_qualifiers:\n                    raise ValueError('The pre-type qualifier \"{}\" is present multiple times.'.format(ast))\n                param._pointer_qualifiers.append(ast)\n                return ast\n\n            def name(self, ast):\n                param._name = ast\n                return ast\n\n            def array_size(self, ast):\n                param._array_sizes.append(int(ast[1:-1]))\n                return ast\n\n        _cl_data_type_parser.parse(declaration, semantics=Semantics())", "docstring": "Creates a new function parameter for the CL functions.\n\nArgs:\ndeclaration (str): the declaration of this parameter. For example ``global int foo``.", "source": "juraj-google-style"}
{"code": "def set_message(self, title, msg, typ, url=None):\n    return self.user.send_notification(title=title, message=msg, typ=typ, url=url)", "docstring": "Sets user notification message.\n\nArgs:\ntitle: Msg. title\nmsg:  Msg. text\ntyp: Msg. type\nurl: Additional URL (if exists)\n\nReturns:\nMessage ID.", "source": "codesearchnet"}
{"code": "def l2_loss(tensor, weight=1.0, scope=None):\n    with tf.name_scope(scope, 'L2Loss', [tensor]):\n        weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='loss_weight')\n        loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value')\n        tf.add_to_collection(LOSSES_COLLECTION, loss)\n        return loss", "docstring": "Define a L2Loss, useful for regularize, i.e. weight decay.\n\nArgs:\ntensor: tensor to regularize.\nweight: an optional weight to modulate the loss.\nscope: Optional scope for name_scope.\n\nReturns:\nthe L2 loss op.", "source": "codesearchnet"}
{"code": "def _update_cross_replica(self, update_fn, value, **kwargs):\n    values_util.mark_as_unsaveable()\n    return self.distribute_strategy.extended.update(self, update_fn, args=(value,), kwargs=kwargs, group=True)", "docstring": "Applies updates across replicas.\n\nArgs:\nupdate_fn: A callable to pass to `strategy.extended.update` to update the\nvariable. It should has the same signature as `Variable.assign()`.\nvalue: value to be passed to `update_fn`.\n**kwargs: remaining arguments to `update_fn`.\n\nReturns:\nUpdated variable or `tf.Operation`.", "source": "github-repos"}
{"code": "def get_name_scope(self) -> str:\n    return self._name_stack", "docstring": "Returns the current name scope.\n\nFor example:\n\n```python\nwith tf.name_scope('scope1'):\nwith tf.name_scope('scope2'):\nprint(tf.compat.v1.get_default_graph().get_name_scope())\n```\nwould print the string `scope1/scope2`.\n\nReturns:\nA string representing the current name scope.", "source": "github-repos"}
{"code": "def exec_python(attr, src, executable='python'):\n    import subprocess\n    if isinstance(src, basestring):\n        src = [src]\n    p = popen([executable, '-c', '; '.join(src)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n    (out, err) = p.communicate()\n    if p.returncode:\n        from rez.exceptions import InvalidPackageError\n        raise InvalidPackageError((\"Error determining package attribute '%s':\\n%s\" % (attr, err)))\n    return out.strip()", "docstring": "Runs a python subproc to calculate a package attribute.\n\nArgs:\nattr (str): Name of package attribute being created.\nsrc (list of str): Python code to execute, will be converted into\nsemicolon-delimited single line of code.\n\nReturns:\nstr: Output of python process.", "source": "codesearchnet"}
{"code": "def register(config_class, feature_extractor_class, exist_ok=False):\n    FEATURE_EXTRACTOR_MAPPING.register(config_class, feature_extractor_class, exist_ok=exist_ok)", "docstring": "Register a new feature extractor for this class.\n\nArgs:\nconfig_class ([`PretrainedConfig`]):\nThe configuration corresponding to the model to register.\nfeature_extractor_class ([`FeatureExtractorMixin`]): The feature extractor to register.", "source": "github-repos"}
{"code": "def __div__(self, other):\n    return self", "docstring": "DEPRECATED: Use `__floordiv__` via `x // y` instead.\n\nThis function exists only for backwards compatibility purposes; new code\nshould use `__floordiv__` via the syntax `x // y`.  Using `x // y`\ncommunicates clearly that the result rounds down, and is forward compatible\nto Python 3.\n\nArgs:\nother: Another `Dimension`.\n\nReturns:\nA `Dimension` whose value is the integer quotient of `self` and `other`.", "source": "github-repos"}
{"code": "def get_completed_task(self, task, timeout=-1):\n        \n        self.__wait_task_completion(task, timeout)\n\n        return self.get(task)", "docstring": "Waits until the task is completed and returns the task resource.\n\nArgs:\ntask: TaskResource\ntimeout: Timeout in seconds\n\nReturns:\ndict: TaskResource", "source": "juraj-google-style"}
{"code": "def subtract_business_days(self, date_tensor, num_days, roll_convention=constants.BusinessDayConvention.NONE):\n    return self.add_business_days(date_tensor, -num_days, roll_convention)", "docstring": "Adds given number of business days to given dates.\n\nNote that this is different from calling `subtract_period_and_roll` with\nPeriodType.DAY. For example, subtracting 5 business days from Friday gives\nthe previous Friday (unless there are holidays on this week or previous\nFriday). Subtracting 5 days and rolling means landing on Sunday and then\nrolling either to Monday or to Friday, depending on the roll convention.\n\nIf any of the dates in `date_tensor` are not business days, they will be\nrolled to business days before doing the subtraction. If `roll_convention`\nis `NONE`, and any dates are not business days, an exception is raised.\n\nArgs:\ndate_tensor: `DateTensor` of dates to advance from.\nnum_days: Tensor of int32 type broadcastable to `date_tensor`.\nroll_convention: BusinessDayConvention. Determines how to roll a date that\nfalls on a holiday.\n\nReturns:\nThe resulting `DateTensor`.", "source": "github-repos"}
{"code": "def is_for_driver_task(self):\n    return all(((len(x) == 0) for x in [self.module_name, self.class_name, self.function_name]))", "docstring": "See whether this function descriptor is for a driver or not.\n\nReturns:\nTrue if this function descriptor is for driver tasks.", "source": "codesearchnet"}
{"code": "def guess_file_type(kind, filepath=None, youtube_id=None, web_url=None, encoding=None):\n    \n    if youtube_id:\n        return FileTypes.YOUTUBE_VIDEO_FILE\n    elif web_url:\n        return FileTypes.WEB_VIDEO_FILE\n    elif encoding:\n        return FileTypes.BASE64_FILE\n    else:\n        ext = os.path.splitext(filepath)[1][1:].lower()\n        if kind in FILE_TYPE_MAPPING and ext in FILE_TYPE_MAPPING[kind]:\n            return FILE_TYPE_MAPPING[kind][ext]\n    return None", "docstring": "guess_file_class: determines what file the content is\nArgs:\nfilepath (str): filepath of file to check\nReturns: string indicating file's class", "source": "juraj-google-style"}
{"code": "def get_replicated_var_handle(self, name: Text, handle_id: Text, vars_: Union[List[core_types.Tensor], List[variables.Variable]], is_mirrored: bool=False, is_packed: bool=False) -> core_types.Tensor:\n    device_assignment = _enclosing_tpu_device_assignment()\n    handle = self._replicated_vars.get(handle_id)\n    if handle is not None:\n        return handle\n    if device_assignment is not None and (not is_packed):\n        job_name = pydev.DeviceSpec.from_string(vars_[0].device).job\n        devices_to_vars = {device_util.canonicalize(v.device): v for v in vars_}\n        replicated_vars = []\n        for replica_id in range(device_assignment.num_replicas):\n            for logical_core in range(device_assignment.num_cores_per_replica):\n                device = device_util.canonicalize(device_assignment.tpu_device(replica=replica_id, logical_core=logical_core, job=job_name))\n                if device in devices_to_vars:\n                    replicated_vars.append(devices_to_vars[device])\n                    break\n            else:\n                raise ValueError('Failed to find a variable on any device in replica {} for current device assignment'.format(replica_id))\n    else:\n        replicated_vars = vars_\n    _, graph = _enclosing_tpu_context_and_graph()\n    with graph.as_default():\n        if isinstance(replicated_vars[0], variables.Variable):\n            replicated_vars = [v.handle for v in replicated_vars]\n        saved_context = graph._get_control_flow_context()\n        graph._set_control_flow_context(self.outer_context)\n        handle = tpu_ops.tpu_replicated_input(replicated_vars, name=name + '/handle', is_mirrored_variable=is_mirrored, is_packed=is_packed)\n        graph._set_control_flow_context(saved_context)\n    self._replicated_vars[handle_id] = handle\n    return handle", "docstring": "Returns a variable handle for replicated TPU variable 'var'.\n\nThis is a method used by an experimental replicated variable implementation\nand is not intended as a public API.\n\nArgs:\nname: The common name of the variable.\nhandle_id: Unique ID of the variable handle, used as the cache key.\nvars_: The replicated TPU variables or handles.\nis_mirrored: Whether the variables are mirrored, which guarantees the\nvalues in each replica are always the same.\nis_packed: Whether the replicated variables are packed into one variable.\n\nReturns:\nThe handle of the TPU replicated input node.", "source": "github-repos"}
{"code": "def _build(self, *args, **kwargs):\n    flattened = nest.flatten([args, kwargs])\n    merged_flattened = [(merge_leading_dims(inp, self._n_dims) if (inp is not None) else None) for inp in flattened]\n    (merged_args, merged_kwargs) = nest.pack_sequence_as([args, kwargs], merged_flattened)\n    results = self._module(*merged_args, **merged_kwargs)\n    example_input = tf.convert_to_tensor(flattened[self._input_example_index])\n\n    def _split_to_original_leading_dims(result):\n        if (result is None):\n            return None\n        else:\n            return split_leading_dim(result, example_input, self._n_dims)\n    flat_results = nest.flatten(results)\n    flat_unmerged_results = [_split_to_original_leading_dims(result) for result in flat_results]\n    return nest.pack_sequence_as(results, flat_unmerged_results)", "docstring": "Connects the BatchApply module into the graph.\n\nArgs:\n*args: a Tensor or a nested list or dictionary of Tensors. The input\ntensors will have their first dimensions merged, then an op or a\nmodule will be called on the input. The first dimension of the output\ntensor(s) will be split again based on the leading dimensions of the\nfirst input tensor.\n**kwargs: Dictionary of named arguments; used in the same way as `*args`.\n\nReturns:\nA Tensor or nested list or dictionary of Tensors as a result of applying\nthe process above. (\"None\" return values are also supported.)", "source": "codesearchnet"}
{"code": "def __init__(self,\n                 *,\n                 dtype: Type[np.number] = np.complex64,\n                 noise: devices.NoiseModel = devices.NO_NOISE):\n        \n        if dtype not in {np.complex64, np.complex128}:\n            raise ValueError(\n                'dtype must be complex64 or complex128, was {}'.format(dtype))\n\n        self._dtype = dtype\n        self.noise = noise", "docstring": "Density matrix simulator.\n\nArgs:\ndtype: The `numpy.dtype` used by the simulation. One of\n`numpy.complex64` or `numpy.complex128`\nnoise: A noise model to apply while simulating.", "source": "juraj-google-style"}
{"code": "def get_ir_reciprocal_mesh(self, mesh=(10, 10, 10), is_shift=(0, 0, 0)):\n    shift = np.array([(1 if i else 0) for i in is_shift])\n    (mapping, grid) = spglib.get_ir_reciprocal_mesh(np.array(mesh), self._cell, is_shift=shift, symprec=self._symprec)\n    results = []\n    for (i, count) in zip(*np.unique(mapping, return_counts=True)):\n        results.append((((grid[i] + (shift * (0.5, 0.5, 0.5))) / mesh), count))\n    return results", "docstring": "k-point mesh of the Brillouin zone generated taken into account\nsymmetry.The method returns the irreducible kpoints of the mesh\nand their weights\n\nArgs:\nmesh (3x1 array): The number of kpoint for the mesh needed in\neach direction\nis_shift (3x1 array): Whether to shift the kpoint grid. (1, 1,\n1) means all points are shifted by 0.5, 0.5, 0.5.\n\nReturns:\nA list of irreducible kpoints and their weights as a list of\ntuples [(ir_kpoint, weight)], with ir_kpoint given\nin fractional coordinates", "source": "codesearchnet"}
{"code": "def __init__(self, name=None, options=None):\n    del options\n    rr = gen_io_ops.lmdb_reader(name=name)\n    super(LMDBReader, self).__init__(rr)", "docstring": "Create a LMDBReader.\n\nArgs:\nname: A name for the operation (optional).\noptions: A LMDBRecordOptions object (optional).", "source": "github-repos"}
{"code": "def dump(self, content, entry_type):\n    new_content = copy.deepcopy(content)\n    new_content['Type'] = entry_type.value\n    with self._lock:\n        with io.open(self._path, 'a', encoding='utf-8') as f:\n            yaml.safe_dump(new_content, f, explicit_start=True, explicit_end=True, allow_unicode=True, indent=4)", "docstring": "Dumps a dictionary as a yaml document to the summary file.\n\nEach call to this method dumps a separate yaml document to the same\nsummary file associated with a test run.\n\nThe content of the dumped dictionary has an extra field `TYPE` that\nspecifies the type of each yaml document, which is the flag for parsers\nto identify each document.\n\nArgs:\ncontent: dictionary, the content to serialize and write.\nentry_type: a member of enum TestSummaryEntryType.\n\nRaises:\nrecoreds.Error: An invalid entry type is passed in.", "source": "github-repos"}
{"code": "def _head(self, client_kwargs):\n        \n        return _handle_http_errors(\n            self.client.request(\n                'HEAD', timeout=self._TIMEOUT, **client_kwargs)).headers", "docstring": "Returns object HTTP header.\n\nArgs:\nclient_kwargs (dict): Client arguments.\n\nReturns:\ndict: HTTP header.", "source": "juraj-google-style"}
{"code": "def same_dynamic_shape(a, b):\n    a = tf.convert_to_tensor(value=a, name='a')\n    b = tf.convert_to_tensor(value=b, name='b')\n\n    def all_shapes_equal():\n        return tf.reduce_all(input_tensor=tf.equal(tf.concat([tf.shape(input=a), tf.shape(input=b)], 0), tf.concat([tf.shape(input=b), tf.shape(input=a)], 0)))\n    return tf.cond(pred=tf.equal(tf.rank(a), tf.rank(b)), true_fn=all_shapes_equal, false_fn=(lambda : tf.constant(False)))", "docstring": "Returns whether a and b have the same dynamic shape.\n\nArgs:\na: `Tensor`\nb: `Tensor`\n\nReturns:\n`bool` `Tensor` representing if both tensors have the same shape.", "source": "codesearchnet"}
{"code": "def u2handlers(self):\n    return_handlers = suds.transport.http.HttpTransport.u2handlers(self)\n    return_handlers.extend(self.handlers)\n    return return_handlers", "docstring": "Get a collection of urllib2 handlers to be installed in the opener.\n\nReturns:\nA list of handlers to be installed to the OpenerDirector used by suds.", "source": "codesearchnet"}
{"code": "def parse(self, argument):\n    \n    if isinstance(argument, list):\n      return argument\n    elif not argument:\n      return []\n    else:\n      if self._comma_compat:\n        argument = argument.replace(',', ' ')\n      return argument.split()", "docstring": "Parses argument as whitespace-separated list of strings.\n\nIt also parses argument as comma-separated list of strings if requested.\n\nArgs:\nargument: string argument passed in the commandline.\n\nReturns:\n[str], the parsed flag value.", "source": "juraj-google-style"}
{"code": "def add_spectra(self, spectra_dict, key_sort_func=None):\n        \n        if key_sort_func:\n            keys = sorted(spectra_dict.keys(), key=key_sort_func)\n        else:\n            keys = spectra_dict.keys()\n        for label in keys:\n            self.add_spectra(label, spectra_dict[label])", "docstring": "Add a dictionary of doses, with an optional sorting function for the\nkeys.\n\nArgs:\ndos_dict: dict of {label: Dos}\nkey_sort_func: function used to sort the dos_dict keys.", "source": "juraj-google-style"}
{"code": "def __init__(\n        self,\n        app=None,\n        base_url='http:\n        namespaces=DEFAULT_NAMESPACES):\n        \n        self.app = app\n        self.namespaces = namespaces\n        self.base_url = None\n        if app is not None:\n            self.init_app(app)\n            if 'FEDORA_BASE_URL' in app.config:\n                self.base_url = app.config.get('FEDORA_BASE_URL')\n        if self.base_url is None:\n            self.base_url = base_url\n        \n        if self.base_url.endswith(\"/\"):\n            self.base_url = self.base_url[:-1]\n        self.transaction = []", "docstring": "Initializes a Repository object\n\nArgs:\napp(Flask): Flask app, default is None\nbase_url(str): Base url for Fedora Commons, defaults to\nlocalhost:8080.\nnamespaces(list): List of namespace tuples of prefix, uri for\neach namespace in Fedora", "source": "juraj-google-style"}
{"code": "def cancel_job_button(self, description=None):\n    if (not hasattr(self, 'jobId')):\n        return\n    try:\n        import ipywidgets as widgets\n        if (not description):\n            description = 'Cancel job: '\n            description += (self.name if hasattr(self, 'name') else self.job.name)\n        button = widgets.Button(description=description, button_style='danger', layout=widgets.Layout(width='40%'))\n        out = widgets.Output()\n        vb = widgets.VBox([button, out])\n\n        @out.capture(clear_output=True)\n        def _cancel_job_click(b):\n            b.disabled = True\n            print((('Cancelling job: id=' + str(self.job.id)) + ' ...\\n'), flush=True)\n            try:\n                rc = self.job.cancel()\n                out.clear_output()\n                if rc:\n                    print((((('Cancelled job: id=' + str(self.job.id)) + ' : ') + self.job.name) + '\\n'), flush=True)\n                else:\n                    print((((('Job already cancelled: id=' + str(self.job.id)) + ' : ') + self.job.name) + '\\n'), flush=True)\n            except:\n                b.disabled = False\n                out.clear_output()\n                raise\n        button.on_click(_cancel_job_click)\n        display(vb)\n    except:\n        pass", "docstring": "Display a button that will cancel the submitted job.\n\nUsed in a Jupyter IPython notebook to provide an interactive\nmechanism to cancel a job submitted from the notebook.\n\nOnce clicked the button is disabled unless the cancel fails.\n\nA job may be cancelled directly using::\n\nsubmission_result = submit(ctx_type, topology, config)\nsubmission_result.job.cancel()\n\nArgs:\n\ndescription(str): Text used as the button description, defaults to value based upon the job name.\n\n.. warning::\nBehavior when called outside a notebook is undefined.\n\n.. versionadded:: 1.12", "source": "codesearchnet"}
{"code": "def floodlight_report(config, task: dict, floodlight_id: int) -> int:\n    account_id, subaccount_id = parse_account(config, task['auth'], task['account'])\n    name = 'Floodlight Monitor %s %s ( StarThinker )' % (account_id, floodlight_id)\n    if config.verbose:\n        print('FLOODLIGHT MONITOR REPORT: ', name)\n    report = report_build(config, task['auth'], task['account'], {'kind': 'dfareporting\n    return report['id']", "docstring": "Create a report for a specific floodlight if it does not exist.\n\nArgs:\nfloodlight_id - the floodlight being monitored\n\nReturns:\nThe id of the created report.", "source": "github-repos"}
{"code": "def get_tensors(object_):\n    if torch.is_tensor(object_):\n        return [object_]\n    elif isinstance(object_, (str, float, int)):\n        return []\n    tensors = set()\n    if isinstance(object_, collections.abc.Mapping):\n        for value in object_.values():\n            tensors.update(get_tensors(value))\n    elif isinstance(object_, collections.abc.Iterable):\n        for value in object_:\n            tensors.update(get_tensors(value))\n    else:\n        members = [value for (key, value) in inspect.getmembers(object_) if (not isinstance(value, (collections.abc.Callable, type(None))))]\n        tensors.update(get_tensors(members))\n    return tensors", "docstring": "Get all tensors associated with ``object_``\n\nArgs:\nobject_ (any): Any object to look for tensors.\n\nReturns:\n(list of torch.tensor): List of tensors that are associated with ``object_``.", "source": "codesearchnet"}
{"code": "def _print_drift_report(self):\n        \n        try:\n            response = self._cloud_formation.describe_stack_resources(StackName=self._stack_name)\n            rows = []\n            for resource in response.get('StackResources', []):\n                row = []\n                row.append(resource.get('LogicalResourceId', 'unknown'))\n                row.append(resource.get('PhysicalResourceId', 'unknown'))\n                row.append(resource.get('ResourceStatus', 'unknown'))\n                row.append(resource.get('DriftInformation', {}).get('StackResourceDriftStatus', 'unknown'))\n                rows.append(row)\n\n            print('Drift Report:')\n            print(tabulate(rows, headers=[\n                'Logical ID',\n                'Physical ID',\n                'Resource Status',\n                'Drift Info'\n            ]))\n        except Exception as wtf:\n            logging.error(wtf, exc_info=True)\n            return False\n\n        return True", "docstring": "Report the drift of the stack.\n\nArgs:\nNone\n\nReturns:\nGood or Bad; True or False\n\nNote: not yet implemented", "source": "juraj-google-style"}
{"code": "def __init__(self, session, object_factory):\n        \n        check_type(session, RestSession)\n\n        super(PeopleAPI, self).__init__()\n\n        self._session = session\n        self._object_factory = object_factory", "docstring": "Initialize a new PeopleAPI object with the provided RestSession.\n\nArgs:\nsession(RestSession): The RESTful session object to be used for\nAPI calls to the Webex Teams service.\n\nRaises:\nTypeError: If the parameter types are incorrect.", "source": "juraj-google-style"}
{"code": "def liquid_precipitation_depth(self, value=999.0):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `liquid_precipitation_depth`'.format(value))\n\n        self._liquid_precipitation_depth = value", "docstring": "Corresponds to IDD Field `liquid_precipitation_depth`\n\nArgs:\nvalue (float): value for IDD Field `liquid_precipitation_depth`\nUnit: mm\nMissing value: 999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def RestrictFeedItemToAdGroup(client, feed_item, adgroup_id):\n  \n  \n  feed_item_target_service = client.GetService(\n      'FeedItemTargetService', 'v201809')\n\n  \n  \n  ad_group_target = {\n      'xsi_type': 'FeedItemAdGroupTarget',\n      'feedId': feed_item['feedId'],\n      'feedItemId': feed_item['feedItemId'],\n      'adGroupId': adgroup_id\n  }\n\n  operation = {'operator': 'ADD', 'operand': ad_group_target}\n\n  response = feed_item_target_service.mutate([operation])\n  new_ad_group_target = response['value'][0]\n\n  print('Feed item target for feed ID %s and feed item ID %s was created to '\n        'restrict serving to ad group ID %s' %\n        (new_ad_group_target['feedId'],\n         new_ad_group_target['feedItemId'],\n         new_ad_group_target['adGroupId']))", "docstring": "Restricts the feed item to an ad group.\n\nArgs:\nclient: an AdWordsClient instance.\nfeed_item: The feed item.\nadgroup_id: The ad group ID.", "source": "juraj-google-style"}
{"code": "def log_metric(self, name, value, unit=None, global_step=None, extras=None):\n    if (not isinstance(value, numbers.Number)):\n        tf.logging.warning('Metric value to log should be a number. Got %s', type(value))\n        return\n    if extras:\n        extras = [{'name': k, 'value': v} for (k, v) in sorted(extras.items())]\n    else:\n        extras = []\n    with tf.gfile.GFile(os.path.join(self._logging_dir, METRIC_LOG_FILE_NAME), 'a') as f:\n        metric = {'name': name, 'value': float(value), 'unit': unit, 'global_step': global_step, 'timestamp': datetime.datetime.now().strftime(_DATE_TIME_FORMAT_PATTERN), 'extras': extras}\n        try:\n            json.dump(metric, f)\n            f.write('\\n')\n        except (TypeError, ValueError) as e:\n            tf.logging.warning('Failed to dump metric to log file: name %s, value %s, error %s', name, value, e)", "docstring": "Log the benchmark metric information to local file.\n\nCurrently the logging is done in a synchronized way. This should be updated\nto log asynchronously.\n\nArgs:\nname: string, the name of the metric to log.\nvalue: number, the value of the metric. The value will not be logged if it\nis not a number type.\nunit: string, the unit of the metric, E.g \"image per second\".\nglobal_step: int, the global_step when the metric is logged.\nextras: map of string:string, the extra information about the metric.", "source": "codesearchnet"}
{"code": "def get(cls, resource_type):\n        \n        if isinstance(resource_type, str):\n            obj = getattr(db, cls.__name__).find_one(cls.resource_type == resource_type)\n\n        elif isinstance(resource_type, int):\n            obj = getattr(db, cls.__name__).find_one(cls.resource_type_id == resource_type)\n\n        elif isinstance(resource_type, cls):\n            return resource_type\n\n        else:\n            obj = None\n\n        if not obj:\n            obj = cls()\n            obj.resource_type = resource_type\n            db.session.add(obj)\n            db.session.commit()\n            db.session.refresh(obj)\n\n        return obj", "docstring": "Returns the ResourceType object for `resource_type`. If no existing object was found, a new type will\nbe created in the database and returned\n\nArgs:\nresource_type (str): Resource type name\n\nReturns:\n:obj:`ResourceType`", "source": "juraj-google-style"}
{"code": "def run_step(context):\n    logger.debug('started')\n    context.assert_child_key_has_value('fileWriteYaml', 'path', __name__)\n    out_path = context.get_formatted_string(context['fileWriteYaml']['path'])\n    is_payload_specified = ('payload' in context['fileWriteYaml'])\n    yaml_writer = pypyr.yaml.get_yaml_parser_roundtrip_for_context()\n    logger.debug(f'opening destination file for writing: {out_path}')\n    os.makedirs(os.path.abspath(os.path.dirname(out_path)), exist_ok=True)\n    with open(out_path, 'w') as outfile:\n        if is_payload_specified:\n            payload = context['fileWriteYaml']['payload']\n            formatted_iterable = context.get_formatted_iterable(payload)\n        else:\n            formatted_iterable = context.get_formatted_iterable(context)\n        yaml_writer.dump(formatted_iterable, outfile)\n    logger.info(f'formatted context content and wrote to {out_path}')\n    logger.debug('done')", "docstring": "Write payload out to yaml file.\n\nArgs:\ncontext: pypyr.context.Context. Mandatory.\nThe following context keys expected:\n- fileWriteYaml\n- path. mandatory. path-like. Write output file to\nhere. Will create directories in path for you.\n- payload. optional. Write this to output file. If not\nspecified, output entire context.\n\nReturns:\nNone.\n\nRaises:\npypyr.errors.KeyNotInContextError: fileWriteYaml or\nfileWriteYaml['path'] missing in context.\npypyr.errors.KeyInContextHasNoValueError: fileWriteYaml or\nfileWriteYaml['path'] exists but is None.", "source": "codesearchnet"}
{"code": "def _analyze_input_data(self, entry, k, depth=1, max_depth=3, max_list=3):\n\n    class _elementInfo(object):\n\n        def __init__(self, el, pos, depth=0, max_list=3):\n            self.shape = ''\n            self.type = type(el).__name__\n            self.dtype = ''\n            self.range = ''\n            self.sub_elements = []\n            self.ident = (' ' * (depth * 2))\n            self.pos = pos\n            numpy_scalar_types = list(itertools.chain(*np.sctypes.values()))\n            if isinstance(el, (int, float, bool)):\n                self.range = ' with value {}'.format(el)\n            elif (type(el) is np.ndarray):\n                self.shape = ' of shape {}'.format(el.shape)\n                self.dtype = ':{}'.format(str(el.dtype))\n                self.range = ' in range [{}, {}]'.format(el.min(), el.max())\n            elif (type(el) in numpy_scalar_types):\n                self.range = ' with value {}'.format(el)\n            elif isinstance(el, list):\n                self.shape = ' of len {}'.format(len(el))\n                if (depth < max_depth):\n                    for (k, subel) in enumerate(el):\n                        if (k < max_list):\n                            self.sub_elements.append(_elementInfo(subel, k, (depth + 1), max_list))\n                        else:\n                            self.sub_elements.append(((' ' * ((depth + 1) * 2)) + '...'))\n                            break\n                elif (len(el) > 0):\n                    self.sub_elements.append(((' ' * ((depth + 1) * 2)) + ' ...'))\n\n        def __str__(self):\n            strings = []\n            vals = (self.ident, self.pos, self.type, self.dtype, self.shape, self.range)\n            strings.append('{}{}: {}{}{}{}'.format(*vals))\n            for (k, el) in enumerate(self.sub_elements):\n                strings.append(str(el))\n            return '\\n'.join(strings)\n    return str(_elementInfo(entry, k, depth, max_list))", "docstring": "Gather useful debug information from a datapoint.\n\nArgs:\nentry: the datapoint component\nk (int): index of this component in current datapoint\ndepth (int, optional): recursion depth\nmax_depth, max_list: same as in :meth:`__init__`.\n\nReturns:\nstring: debug message", "source": "codesearchnet"}
{"code": "def _remove_string_from_commastring(self, field, string):\n        \n        \n        commastring = self.data.get(field, '')\n        if string in commastring:\n            self.data[field] = commastring.replace(string, '')\n            return True\n        return False", "docstring": "Remove a string from a comma separated list of strings\n\nArgs:\nfield (str): Field containing comma separated list\nstring (str): String to remove\n\nReturns:\nbool: True if string removed or False if not", "source": "juraj-google-style"}
{"code": "def size(self):\n    if (len(self.grouping_column_types) > 1):\n        index_type = WeldStruct([self.grouping_column_types])\n    else:\n        index_type = self.grouping_column_types[0]\n        index_name = self.grouping_column_names[0]\n    return SeriesWeld(grizzly_impl.groupby_size(self.columns, self.column_types, self.grouping_columns, self.grouping_column_types), WeldLong(), index_type=index_type, index_name=index_name)", "docstring": "Returns the sizes of the groups as series.\n\nReturns:\nTYPE: Description", "source": "codesearchnet"}
{"code": "def _get_required_params_for_conversion(self, event_key, event_tags):\n    snapshot = {}\n    event_dict = {self.EventParams.EVENT_ID: self.config.get_event(event_key).id, self.EventParams.TIME: self._get_time(), self.EventParams.KEY: event_key, self.EventParams.UUID: str(uuid.uuid4())}\n    if event_tags:\n        revenue_value = event_tag_utils.get_revenue_value(event_tags)\n        if (revenue_value is not None):\n            event_dict[event_tag_utils.REVENUE_METRIC_TYPE] = revenue_value\n        numeric_value = event_tag_utils.get_numeric_value(event_tags, self.config.logger)\n        if (numeric_value is not None):\n            event_dict[event_tag_utils.NUMERIC_METRIC_TYPE] = numeric_value\n        if (len(event_tags) > 0):\n            event_dict[self.EventParams.TAGS] = event_tags\n    snapshot[self.EventParams.EVENTS] = [event_dict]\n    return snapshot", "docstring": "Get parameters that are required for the conversion event to register.\n\nArgs:\nevent_key: Key representing the event which needs to be recorded.\nevent_tags: Dict representing metadata associated with the event.\n\nReturns:\nDict consisting of the decisions and events info for conversion event.", "source": "codesearchnet"}
{"code": "def read_saved_model(saved_model_dir):\n    path_to_pbtxt = os.path.join(compat.as_bytes(saved_model_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))\n    path_to_pb = os.path.join(compat.as_bytes(saved_model_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))\n    if not file_io.file_exists(path_to_pbtxt) and (not file_io.file_exists(path_to_pb)):\n        raise IOError('SavedModel file does not exist at: %s' % saved_model_dir)\n    saved_model = saved_model_pb2.SavedModel()\n    if file_io.file_exists(path_to_pb):\n        with file_io.FileIO(path_to_pb, 'rb') as f:\n            file_content = f.read()\n        try:\n            saved_model.ParseFromString(file_content)\n            return saved_model\n        except message.DecodeError as e:\n            raise IOError('Cannot parse proto file %s: %s.' % (path_to_pb, str(e)))\n    elif file_io.file_exists(path_to_pbtxt):\n        with file_io.FileIO(path_to_pbtxt, 'rb') as f:\n            file_content = f.read()\n        try:\n            text_format.Merge(file_content.decode('utf-8'), saved_model)\n            return saved_model\n        except text_format.ParseError as e:\n            raise IOError('Cannot parse pbtxt file %s: %s.' % (path_to_pbtxt, str(e)))\n    else:\n        raise IOError('SavedModel file does not exist at: %s/{%s|%s}' % (saved_model_dir, constants.SAVED_MODEL_FILENAME_PBTXT, constants.SAVED_MODEL_FILENAME_PB))", "docstring": "Reads the saved_model.pb or saved_model.pbtxt file containing `SavedModel`.\n\nArgs:\nsaved_model_dir: Directory containing the SavedModel file.\n\nReturns:\nA `SavedModel` protocol buffer.\n\nRaises:\nIOError: If the file does not exist, or cannot be successfully parsed.", "source": "github-repos"}
{"code": "def keypath(self, key):\n    return fs.path(self.path, self.escape_key(key))", "docstring": "Get the filesystem path for a key.\n\nArguments:\nkey: Key.\n\nReturns:\nstr: Absolute path.", "source": "codesearchnet"}
{"code": "def CreateSignatureContract(publicKey):\n    script = Contract.CreateSignatureRedeemScript(publicKey)\n    params = b'\\x00'\n    encoded = publicKey.encode_point(True)\n    pubkey_hash = Crypto.ToScriptHash(encoded, unhex=True)\n    return Contract(script, params, pubkey_hash)", "docstring": "Create a signature contract.\n\nArgs:\npublicKey (edcsa.Curve.point): e.g. KeyPair.PublicKey.\n\nReturns:\nneo.SmartContract.Contract: a Contract instance.", "source": "codesearchnet"}
{"code": "def status(self):\n    return BackendStatus(backend_name=self.name(), backend_version=__version__, operational=True, pending_jobs=0, status_msg='')", "docstring": "Return backend status.\n\nReturns:\nBackendStatus: the status of the backend.", "source": "codesearchnet"}
{"code": "def load_template(filename):\n    \n    \n    template_file = os.path.join(PKG_DIR, 'templates', filename)\n    with open(template_file) as fp:\n        return fp.read()", "docstring": "Load template from file.\n\nThe templates are part of the package and must be included as\n``package_data`` in project ``setup.py``.\n\nArgs:\nfilename (str):\nThe template path. Relative to `peltak` package directory.\n\nReturns:\nstr: The content of the chosen template.", "source": "juraj-google-style"}
{"code": "def get_central_coors(self, row, col):\n    if ((row < 0) or (row >= self.nRows) or (col < 0) or (col >= self.nCols)):\n        raise ValueError(('The row (%d) or col (%d) must be >=0 and less than nRows (%d) or nCols (%d)!' % (row, col, self.nRows, self.nCols)))\n    else:\n        tmpx = (self.xMin + ((col + 0.5) * self.dx))\n        tmpy = (self.yMax - ((row + 0.5) * self.dx))\n        return (tmpx, tmpy)", "docstring": "Get the coordinates of central grid.\n\nArgs:\nrow: row number, range from 0 to (nRows - 1).\ncol: col number, range from 0 to (nCols - 1).\n\nReturns:\nXY coordinates. If the row or col are invalid, raise ValueError.", "source": "codesearchnet"}
{"code": "def _constant_value(ragged_factory, inner_factory, pylist, dtype, ragged_rank, inner_shape):\n    if ragged_tensor.is_ragged(pylist):\n        raise TypeError('pylist may not be a RaggedTensor or RaggedTensorValue.')\n    if not isinstance(pylist, (list, tuple)) and np.ndim(pylist) == 0:\n        if ragged_rank is not None and ragged_rank != 0:\n            raise ValueError('Invalid pylist=%r: incompatible with ragged_rank=%d' % (pylist, ragged_rank))\n        if inner_shape is not None and inner_shape:\n            raise ValueError('Invalid pylist=%r: incompatible with dim(inner_shape)=%d' % (pylist, len(inner_shape)))\n        return inner_factory(pylist, dtype, ())\n    if ragged_rank is not None and ragged_rank < 0:\n        raise ValueError('Invalid ragged_rank=%r: must be nonnegative' % ragged_rank)\n    scalar_depth, max_depth = _find_scalar_and_max_depth(pylist)\n    if scalar_depth is not None:\n        if max_depth > scalar_depth:\n            raise ValueError('Invalid pylist=%r: empty list nesting is greater than scalar value nesting' % pylist)\n        if ragged_rank is not None and max_depth < ragged_rank:\n            raise ValueError(f'Invalid pylist={pylist}, max depth smaller than ragged_rank={ragged_rank}')\n    if inner_shape is not None and ragged_rank is not None:\n        expected_depth = ragged_rank + len(inner_shape) + 1\n        if scalar_depth is not None and expected_depth != scalar_depth or (scalar_depth is None and expected_depth < max_depth):\n            raise ValueError('Invalid pylist=%r: incompatible with ragged_rank=%d and dim(inner_shape)=%d' % (pylist, ragged_rank, len(inner_shape)))\n    if ragged_rank == 0 or (ragged_rank is None and (max_depth < 2 or (inner_shape is not None and max_depth - len(inner_shape) < 2))):\n        return inner_factory(pylist, dtype, inner_shape)\n    if inner_shape is None:\n        if ragged_rank is None:\n            inner_shape = ()\n        else:\n            inner_shape = _default_inner_shape_for_pylist(pylist, ragged_rank)\n    if ragged_rank is None:\n        if scalar_depth is None:\n            ragged_rank = max(1, max_depth - 1)\n        else:\n            ragged_rank = max(1, scalar_depth - 1 - len(inner_shape))\n    nested_splits = []\n    values = pylist\n    for dim in range(ragged_rank):\n        nested_splits.append([0])\n        concatenated_values = []\n        for row in values:\n            nested_splits[dim].append(nested_splits[dim][-1] + len(row))\n            concatenated_values.extend(row)\n        values = concatenated_values\n    values = inner_factory(values, dtype=dtype, shape=(len(values),) + inner_shape, name='values')\n    for row_splits in reversed(nested_splits):\n        values = ragged_factory(values, row_splits)\n    return values", "docstring": "Constructs a constant RaggedTensor or RaggedTensorValue.\n\nArgs:\nragged_factory: A factory function with the signature:\n`ragged_factory(values, row_splits)`\ninner_factory: A factory function with the signature: `inner_factory(pylist,\ndtype, shape, name)`\npylist: A nested `list`, `tuple` or `np.ndarray`.\ndtype: Data type for returned value.\nragged_rank: Ragged rank for returned value.\ninner_shape: Inner value shape for returned value.\n\nReturns:\nA value returned by `ragged_factory` or `inner_factory`.\n\nRaises:\nValueError: If the scalar values in `pylist` have inconsistent nesting\ndepth; or if ragged_rank or inner_shape are incompatible with `pylist`.", "source": "github-repos"}
{"code": "def _Open(self, path_spec, mode='rb'):\n    \n    if not path_spec.HasParent():\n      raise errors.PathSpecError(\n          'Unsupported path specification without parent.')\n\n    file_object = resolver.Resolver.OpenFileObject(\n        path_spec.parent, resolver_context=self._resolver_context)\n\n    try:\n      fsapfs_container = pyfsapfs.container()\n      fsapfs_container.open_file_object(file_object)\n    except:\n      file_object.close()\n      raise\n\n    self._file_object = file_object\n    self._fsapfs_container = fsapfs_container", "docstring": "Opens the file system defined by path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\nmode (Optional[str])): file access mode. The default is 'rb' read-only\nbinary.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file system object could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"}
{"code": "def common_vector_root(vec1, vec2):\n    \n    root = []\n    for v1, v2 in zip(vec1, vec2):\n        if v1 == v2:\n            root.append(v1)\n        else:\n            return root\n\n    return root", "docstring": "Return common root of the two vectors.\n\nArgs:\nvec1 (list/tuple): First vector.\nvec2 (list/tuple): Second vector.\n\nUsage example::\n\n>>> common_vector_root([1, 2, 3, 4, 5], [1, 2, 8, 9, 0])\n[1, 2]\n\nReturns:\nlist: Common part of two vectors or blank list.", "source": "juraj-google-style"}
{"code": "def __init__(\n      self, resolver_context, encryption_method=None, file_object=None):\n    \n    if file_object is not None and encryption_method is None:\n      raise ValueError(\n          'File-like object provided without corresponding encryption method.')\n\n    super(EncryptedStream, self).__init__(resolver_context)\n    self._current_offset = 0\n    self._decrypted_data = b''\n    self._decrypted_data_offset = 0\n    self._decrypted_data_size = 0\n    self._decrypted_stream_size = None\n    self._decrypter = None\n    self._encrypted_data = b''\n    self._encryption_method = encryption_method\n    self._file_object = file_object\n    self._file_object_set_in_init = bool(file_object)\n    self._path_spec = None\n    self._realign_offset = True", "docstring": "Initializes a file-like object.\n\nIf the file-like object is chained do not separately use the parent\nfile-like object.\n\nArgs:\nresolver_context (Context): resolver context.\nencryption_method (Optional[str]): method used to the encrypt the data.\nfile_object (Optional[FileIO]): parent file-like object.\n\nRaises:\nValueError: if file_object provided but encryption_method is not.", "source": "juraj-google-style"}
{"code": "def optimized_for_xmon(circuit: circuits.Circuit, new_device: Optional[xmon_device.XmonDevice]=None, qubit_map: Callable[([ops.Qid], devices.GridQubit)]=(lambda e: cast(devices.GridQubit, e)), allow_partial_czs: bool=False) -> circuits.Circuit:\n    copy = circuit.copy()\n    opts = (_OPTIMIZERS_PART_CZ if allow_partial_czs else _OPTIMIZERS)\n    for optimizer in opts:\n        optimizer(copy)\n    return circuits.Circuit.from_ops((op.transform_qubits(qubit_map) for op in copy.all_operations()), strategy=circuits.InsertStrategy.EARLIEST, device=(new_device or copy.device))", "docstring": "Optimizes a circuit with XmonDevice in mind.\n\nStarts by converting the circuit's operations to the xmon gate set, then\nbegins merging interactions and rotations, ejecting pi-rotations and phasing\noperations, dropping unnecessary operations, and pushing operations earlier.\n\nArgs:\ncircuit: The circuit to optimize.\nnew_device: The device the optimized circuit should be targeted at. If\nset to None, the circuit's current device is used.\nqubit_map: Transforms the qubits (e.g. so that they are GridQubits).\nallow_partial_czs: If true, the optimized circuit may contain partial CZ\ngates.  Otherwise all partial CZ gates will be converted to full CZ\ngates.  At worst, two CZ gates will be put in place of each partial\nCZ from the input.\n\nReturns:\nThe optimized circuit.", "source": "codesearchnet"}
{"code": "def list_adb_devices_by_usb_id():\n    out = adb.AdbProxy().devices(['-l'])\n    clean_lines = new_str(out, 'utf-8').strip().split('\\n')\n    results = []\n    for line in clean_lines:\n        tokens = line.strip().split()\n        if ((len(tokens) > 2) and (tokens[1] == 'device')):\n            results.append(tokens[2])\n    return results", "docstring": "List the usb id of all android devices connected to the computer that\nare detected by adb.\n\nReturns:\nA list of strings that are android device usb ids. Empty if there's\nnone.", "source": "codesearchnet"}
{"code": "def isanytargetmethod(object):\n    decorators, target = tf_decorator.unwrap(object)\n    for decorator in decorators:\n        if _inspect.ismethod(decorator.decorated_target):\n            return True\n    while isinstance(target, functools.partial):\n        target = target.func\n    return callable(target) and (not _inspect.isfunction(target))", "docstring": "Checks if `object` or a TF Decorator wrapped target contains self or cls.\n\nThis function could be used along with `tf_inspect.getfullargspec` to\ndetermine if the first argument of `object` argspec is self or cls. If the\nfirst argument is self or cls, it needs to be excluded from argspec when we\ncompare the argspec to the input arguments and, if provided, the tf.function\ninput_signature.\n\nLike `tf_inspect.getfullargspec` and python `inspect.getfullargspec`, it\ndoes not unwrap python decorators.\n\nArgs:\nobj: An method, function, or functool.partial, possibly decorated by\nTFDecorator.\n\nReturns:\nA bool indicates if `object` or any target along the chain of TF decorators\nis a method.", "source": "github-repos"}
{"code": "def ParseCookieRow(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    cookie_name = self._GetRowValue(query_hash, row, 'name')\n    cookie_data = self._GetRowValue(query_hash, row, 'value')\n\n    hostname = self._GetRowValue(query_hash, row, 'host_key')\n    if hostname.startswith('.'):\n      hostname = hostname[1:]\n\n    httponly = self._GetRowValue(query_hash, row, 'httponly')\n    path = self._GetRowValue(query_hash, row, 'path')\n    persistent = self._GetRowValue(query_hash, row, 'persistent')\n    secure = self._GetRowValue(query_hash, row, 'secure')\n\n    if secure:\n      scheme = 'https'\n    else:\n      scheme = 'http'\n\n    url = '{0:s}:\n\n    event_data = ChromeCookieEventData()\n    event_data.cookie_name = cookie_name\n    event_data.data = cookie_data\n    event_data.host = hostname\n    event_data.httponly = bool(httponly)\n    event_data.path = path\n    event_data.persistent = bool(persistent)\n    event_data.query = query\n    event_data.secure = bool(secure)\n    event_data.url = url\n\n    timestamp = self._GetRowValue(query_hash, row, 'creation_utc')\n    date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_CREATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'last_access_utc')\n    date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    timestamp = self._GetRowValue(query_hash, row, 'expires_utc')\n    if timestamp:\n      date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_EXPIRATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    for plugin in self._cookie_plugins:\n      if cookie_name != plugin.COOKIE_NAME:\n        continue\n\n      try:\n        plugin.UpdateChainAndProcess(\n            parser_mediator, cookie_data=cookie_data, cookie_name=cookie_name,\n            url=url)\n\n      except Exception as exception:  \n        parser_mediator.ProduceExtractionWarning(\n            'plugin: {0:s} unable to parse cookie with error: {1!s}'.format(\n                plugin.NAME, exception))", "docstring": "Parses a cookie row.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from the query.", "source": "juraj-google-style"}
{"code": "def change_subscription(self, topics):\n        \n        if self._user_assignment:\n            raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)\n\n        if isinstance(topics, six.string_types):\n            topics = [topics]\n\n        if self.subscription == set(topics):\n            log.warning(\"subscription unchanged by change_subscription(%s)\",\n                        topics)\n            return\n\n        for t in topics:\n            self._ensure_valid_topic_name(t)\n\n        log.info('Updating subscribed topics to: %s', topics)\n        self.subscription = set(topics)\n        self._group_subscription.update(topics)\n\n        \n        for tp in set(self.assignment.keys()):\n            if tp.topic not in self.subscription:\n                del self.assignment[tp]", "docstring": "Change the topic subscription.\n\nArguments:\ntopics (list of str): topics for subscription\n\nRaises:\nIllegalStateErrror: if assign_from_user has been used already\nTypeError: if a topic is None or a non-str\nValueError: if a topic is an empty string or\n- a topic name is '.' or '..' or\n- a topic name does not consist of ASCII-characters/'-'/'_'/'.'", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n        \n        self.ListGroups = channel.unary_unary(\n            \"/google.monitoring.v3.GroupService/ListGroups\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.ListGroupsRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.ListGroupsResponse.FromString,\n        )\n        self.GetGroup = channel.unary_unary(\n            \"/google.monitoring.v3.GroupService/GetGroup\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.GetGroupRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2.Group.FromString,\n        )\n        self.CreateGroup = channel.unary_unary(\n            \"/google.monitoring.v3.GroupService/CreateGroup\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.CreateGroupRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2.Group.FromString,\n        )\n        self.UpdateGroup = channel.unary_unary(\n            \"/google.monitoring.v3.GroupService/UpdateGroup\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.UpdateGroupRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__pb2.Group.FromString,\n        )\n        self.DeleteGroup = channel.unary_unary(\n            \"/google.monitoring.v3.GroupService/DeleteGroup\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.DeleteGroupRequest.SerializeToString,\n            response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n        self.ListGroupMembers = channel.unary_unary(\n            \"/google.monitoring.v3.GroupService/ListGroupMembers\",\n            request_serializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.ListGroupMembersRequest.SerializeToString,\n            response_deserializer=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_group__service__pb2.ListGroupMembersResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def value_container(val):\n    container = None\n    if not isinstance(val, values_lib.DistributedVariable):\n        if hasattr(val, '_distributed_container'):\n            container = val._distributed_container()\n        elif isinstance(val, composite_tensor.CompositeTensor) and hasattr(val, 'handle') and hasattr(val.handle, '_distributed_container'):\n            container = val.handle._distributed_container()\n    return container if container is not None else val", "docstring": "Returns the container that this per-replica `value` belongs to.\n\nArgs:\nval: A value returned by `call_for_each_replica()` or a variable created in\n`scope()`.\n\nReturns:\nA container that `value` belongs to.\nIf value does not belong to any container (including the case of\ncontainer having been destroyed), returns the value itself.", "source": "github-repos"}
{"code": "def __and__(self, other):\n        \n        other = self._cast_to_frameset(other)\n        if other is NotImplemented:\n            return NotImplemented\n        return self.from_iterable(self.items & other.items, sort=True)", "docstring": "Overloads the ``&`` operator.\nReturns a new :class:`FrameSet` that holds only the\nframes `self` and `other` have in common.\n\nNote:\n\nThe order of operations is irrelevant:\n``(self & other) == (other & self)``\n\nArgs:\nother (:class:`FrameSet`):\n\nReturns:\n:class:`FrameSet`:\n:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`", "source": "juraj-google-style"}
{"code": "def prepare_prop_defs(prop_defs, prop_name, cls_names):\n    \n    def get_def(prop_defs, def_fields, default_val=None):\n        \n        rtn_list = []\n        for fld in def_fields:\n            if prop_defs.get(fld):\n                rtn_list += prop_defs.get(fld)\n        if not rtn_list and default_val:\n            rtn_list.append(default_val)\n        elif rtn_list:\n            try:\n                rtn_list = list(set(rtn_list))\n            except TypeError as e:\n                \n                \n                \n                new_rtn = []\n                for item in rtn_list:\n                    if isinstance(item, MODULE.rdfclass.RdfClassBase):\n                        new_rtn.append(\\\n                                \"|\".join(merge_rdf_list(item['owl_unionOf'])))\n                    elif isinstance(item, list):\n                        new_rtn.append(\"|\".join(item))\n                    else:\n                        new_rtn.append(item)\n                rtn_list = list(set(new_rtn))\n                new_rtn = []\n                for item in rtn_list:\n                    if \"|\" in item:\n                        new_rtn.append([Uri(domain) \\\n                                        for domain in item.split(\"|\")])\n                    else:\n                        new_rtn.append(Uri(item))\n                rtn_list = new_rtn\n\n                \n        return rtn_list\n\n    required_def_defaults = {\n        Uri('kds_rangeDef'): [{}],\n        Uri('rdfs_range'): [Uri(\"xsd_string\")],\n        Uri('rdfs_domain'): cls_names,\n        Uri('rdfs_label'): [NSM.nouri(prop_name)],\n        Uri('kds_formDefault'): [{\n            Uri('kds:appliesToClass'): Uri('kdr:AllClasses'),\n            Uri('kds:formFieldName'): \"emailaddr\",\n            Uri('kds:formLabelName'): [NSM.nouri(prop_name)],\n            Uri('kds:formFieldHelp'): find_values(DESCRIPTION_FIELDS,\n                                                  prop_defs,\n                                                  None),\n            Uri('kds:fieldType'): {\n                Uri('rdf:type'): Uri('kdr:TextField')\n            }\n        }],\n        Uri('kds_propertyValidation'): [],\n        Uri('kds_propertySecurity'): [],\n        Uri('kds_propertyProcessing'): []\n    }\n    for prop in required_def_defaults:\n        if prop not in prop_defs.keys():\n            prop_defs[prop] = required_def_defaults[prop]\n    prop_defs['rdfs_domain'] = get_def(prop_defs, DOMAIN_FIELDS, cls_names)\n    prop_defs['rdfs_range'] = get_def(prop_defs, RANGE_FIELDS,\n                                      Uri('xsd_string'))\n\n    return prop_defs", "docstring": "Examines and adds any missing defs to the prop_defs dictionary for\nuse with the RdfPropertyMeta.__prepare__ method\n\nArgs:\n-----\nprop_defs: the defintions from the rdf vocabulary defintion\nprop_name: the property name\ncls_names: the name of the associated classes\n\nReturns:\n--------\nprop_defs", "source": "juraj-google-style"}
{"code": "def _items(self, cart_status, category=None):\n        \n\n        if not isinstance(cart_status, Iterable):\n            cart_status = [cart_status]\n\n        status_query = (\n            Q(productitem__cart__status=status) for status in cart_status\n        )\n\n        in_cart = Q(productitem__cart__user=self.user)\n        in_cart = in_cart & reduce(operator.__or__, status_query)\n\n        quantities_in_cart = When(\n            in_cart,\n            then=\"productitem__quantity\",\n        )\n\n        quantities_or_zero = Case(\n            quantities_in_cart,\n            default=Value(0),\n        )\n\n        products = inventory.Product.objects\n\n        if category:\n            products = products.filter(category=category)\n\n        products = products.select_related(\"category\")\n        products = products.annotate(quantity=Sum(quantities_or_zero))\n        products = products.filter(quantity__gt=0)\n\n        out = []\n        for prod in products:\n            out.append(ProductAndQuantity(prod, prod.quantity))\n        return out", "docstring": "Aggregates the items that this user has purchased.\n\nArguments:\ncart_status (int or Iterable(int)): etc\ncategory (Optional[models.inventory.Category]): the category\nof items to restrict to.\n\nReturns:\n[ProductAndQuantity, ...]: A list of product-quantity pairs,\naggregating like products from across multiple invoices.", "source": "juraj-google-style"}
{"code": "def add(self, name: str, path_or_url: str) -> Source:\n    logger.info('adding source: %s -> %s', name, path_or_url)\n    if (name in self.__sources):\n        logger.info('name already used by existing source: %s', name)\n        raise NameInUseError(name)\n    is_url = False\n    try:\n        scheme = urllib.parse.urlparse(path_or_url).scheme\n        is_url = (scheme in ['http', 'https'])\n        logger.debug('source determined to be remote: %s', path_or_url)\n    except ValueError:\n        logger.debug('source determined to be local: %s', path_or_url)\n    if is_url:\n        url = path_or_url\n        path = url.replace('https:\n        path = path.replace('/', '_')\n        path = path.replace('.', '_')\n        path = os.path.join(self.__path, path)\n        shutil.rmtree(path, ignore_errors=True)\n        try:\n            logger.debug('cloning repository %s to %s', url, path)\n            repo = git.Repo.clone_from(url, path)\n            logger.debug('cloned repository %s to %s', url, path)\n            sha = repo.head.object.hexsha\n            version = repo.git.rev_parse(sha, short=8)\n        except:\n            shutil.rmtree(path, ignore_errors=True)\n            logger.error('failed to download remote source to local: %s -> %s', url, path)\n            raise IOError(\"failed to download remote source to local installation: '{}' -> '{}'\".format(url, path))\n        source = RemoteSource(name, path, url, version)\n    else:\n        path = os.path.abspath(path_or_url)\n        if (not os.path.isdir(path)):\n            raise IOError('no directory found at path: {}'.format(path))\n        source = LocalSource(name, path)\n    self.load(source)\n    self.save()\n    logger.info('added source: %s', name)", "docstring": "Attempts to register a source provided by a given URL or local path\nunder a given name.\n\nReturns:\na description of the registered source.\n\nRaises:\nNameInUseError: if an existing source is already registered under\nthe given name.\nIOError: if no directory exists at the given path.\nIOError: if downloading the remote source failed. (FIXME)", "source": "codesearchnet"}
{"code": "def set_name(self, vid, name=None, default=False, disable=False):\n        \n        cmds = self.command_builder('name', value=name, default=default,\n                                    disable=disable)\n        return self.configure_vlan(vid, cmds)", "docstring": "Configures the VLAN name\n\nEosVersion:\n4.13.7M\n\nArgs:\nvid (str): The VLAN ID to Configures\nname (str): The value to configure the vlan name\ndefault (bool): Defaults the VLAN ID name\ndisable (bool): Negates the VLAN ID name\n\nReturns:\nTrue if the operation was successful otherwise False", "source": "juraj-google-style"}
{"code": "def as_list(self, value):\n    if isinstance(value, tensor_lib.Tensor):\n        return [value]\n    elif isinstance(value, IndexedSlices):\n        return [value]\n    elif isinstance(value, value_lib.Mirrored):\n        return value.values\n    else:\n        raise ValueError('unwrap: unsupported input type: %s' % type(value))", "docstring": "An utility to convert a `Mirrored`, `Tensor` or `IndexedSlices` to a list.\n\nThe reason it exists is to provide a uniformed view of returned value of\n\"reduce\" calls, especially across tf.function boundaries. Returning\n`Mirrored` from a tf.function will only evaluate the primary value, which\nmakes collective ops of non-primary device being pruned, and will eventually\ncause hanging.\n\nArgs:\nvalue: the value to convert, can be one of `Mirrored`, `Tensor` and\n`IndexedSlices`.\n\nReturns:\nA list of `Tensor` or `IndexedSlices`.", "source": "github-repos"}
{"code": "def DisplayAccountTree(account, accounts, links, depth=0):\n  \n  prefix = '-' * depth * 2\n  print '%s%s, %s' % (prefix, account['customerId'], account['name'])\n  if account['customerId'] in links:\n    for child_link in links[account['customerId']]:\n      child_account = accounts[child_link['clientCustomerId']]\n      DisplayAccountTree(child_account, accounts, links, depth + 1)", "docstring": "Displays an account tree.\n\nArgs:\naccount: dict The account to display.\naccounts: dict Map from customerId to account.\nlinks: dict Map from customerId to child links.\ndepth: int Depth of the current account in the tree.", "source": "juraj-google-style"}
{"code": "def get_keys_from_shelve(file_name, file_location):\n    \n    temp_list = list()\n    file = __os.path.join(file_location, file_name)\n    shelve_store = __shelve.open(file)\n    for key in shelve_store:\n        temp_list.append(key)\n    shelve_store.close()\n    return temp_list", "docstring": "Function to retreive all keys in a shelve\nArgs:\nfile_name: Shelve storage file name\nfile_location: The location of the file, derive from the os module\n\nReturns:\na list of the keys", "source": "juraj-google-style"}
{"code": "def from_proto(cls, struct_def_proto: message.Message, backbone_element_path: Optional[str]=None) -> 'QuantityStructureDataType':\n    struct_type = StructureDataType.from_proto(struct_def_proto=struct_def_proto, backbone_element_path=backbone_element_path, parent_definitions=None)\n    return cls(structure_definition=struct_type.structure_definition, backbone_element_path=struct_type.backbone_element_path, base_type=struct_type.base_type, element_type=struct_type.element_type, _child_defs=struct_type._child_defs, _slices=struct_type._slices, _raw_url=struct_type._raw_url, root_element_definition=struct_type.root_element_definition, cardinality=struct_type.cardinality)", "docstring": "Creates a QuantityStructureDataType from a proto.\n\nArgs:\nstruct_def_proto: Proto containing information about the structure\ndefinition.\nbackbone_element_path: Optional path to the structure def.\n\nReturns:\nA QuantityStructureDataType.", "source": "github-repos"}
{"code": "def left_margin(self, margin):\n        \n        if margin <= 255 and margin >= 0:\n            self.send(chr(27)+'I'+chr(margin))\n        else:\n            raise RuntimeError('Invalid margin parameter.')", "docstring": "Specify the left margin.\n\nArgs:\nmargin: The left margin, in character width. Must be less than the media's width.\nReturns:\nNone\nRaises:\nRuntimeError: Invalid margin parameter.", "source": "juraj-google-style"}
{"code": "def set_Tc(self, Tc, T=None):\n        \n        if isinstance(Tc, Iterable):\n            if len(Tc)==len(T):\n                x = np.concatenate(([-ttconf.BIG_NUMBER], T, [ttconf.BIG_NUMBER]))\n                y = np.concatenate(([Tc[0]], Tc, [Tc[-1]]))\n                self.Tc = interp1d(x,y)\n            else:\n                self.logger(\"need Tc values and Timepoints of equal length\",2,warn=True)\n                self.Tc = interp1d([-ttconf.BIG_NUMBER, ttconf.BIG_NUMBER], [1e-5, 1e-5])\n        else:\n            self.Tc = interp1d([-ttconf.BIG_NUMBER, ttconf.BIG_NUMBER],\n                               [Tc+ttconf.TINY_NUMBER, Tc+ttconf.TINY_NUMBER])\n        self.calc_integral_merger_rate()", "docstring": "initialize the merger model with a coalescent time\n\nArgs:\n- Tc:   a float or an iterable, if iterable another argument T of same shape is required\n- T:    an array like of same shape as Tc that specifies the time pivots corresponding to Tc\nReturns:\n- None", "source": "juraj-google-style"}
{"code": "def _decode_exp(self, access_token=None):\n    c = self.get_credentials()\n    jwt = (access_token or c.access_token)\n    x = self.decode_jwt_payload(jwt)\n    if ('exp' in x):\n        try:\n            exp = int(x['exp'])\n        except ValueError:\n            raise PanCloudError('Expiration time (exp) must be an integer')\n        else:\n            self.jwt_exp = exp\n            return exp\n    else:\n        raise PanCloudError('No exp field found in payload')", "docstring": "Extract exp field from access token.\n\nArgs:\naccess_token (str): Access token to decode. Defaults to ``None``.\n\nReturns:\nint: JWT expiration in epoch seconds.", "source": "codesearchnet"}
{"code": "def plot_wigner_seitz(lattice, ax=None, **kwargs):\n    \n    ax, fig, plt = get_ax3d_fig_plt(ax)\n\n    if \"color\" not in kwargs:\n        kwargs[\"color\"] = \"k\"\n    if \"linewidth\" not in kwargs:\n        kwargs[\"linewidth\"] = 1\n\n    bz = lattice.get_wigner_seitz_cell()\n    ax, fig, plt = get_ax3d_fig_plt(ax)\n    for iface in range(len(bz)):\n        for line in itertools.combinations(bz[iface], 2):\n            for jface in range(len(bz)):\n                if iface < jface and any(\n                        np.all(line[0] == x) for x in bz[jface]) \\\n                        and any(np.all(line[1] == x) for x in bz[jface]):\n                    ax.plot(*zip(line[0], line[1]), **kwargs)\n\n    return fig, ax", "docstring": "Adds the skeleton of the Wigner-Seitz cell of the lattice to a matplotlib Axes\n\nArgs:\nlattice: Lattice object\nax: matplotlib :class:`Axes` or None if a new figure should be created.\nkwargs: kwargs passed to the matplotlib function 'plot'. Color defaults to black\nand linewidth to 1.\n\nReturns:\nmatplotlib figure and matplotlib ax", "source": "juraj-google-style"}
{"code": "async def getNodeByBuid(self, buid):\n    node = self.livenodes.get(buid)\n    if (node is not None):\n        return node\n    props = {}\n    proplayr = {}\n    for layr in self.layers:\n        layerprops = (await layr.getBuidProps(buid))\n        props.update(layerprops)\n        proplayr.update({k: layr for k in layerprops})\n    node = s_node.Node(self, buid, props.items(), proplayr=proplayr)\n    (await asyncio.sleep(0))\n    if (node.ndef is None):\n        return None\n    self.buidcache.append(node)\n    self.livenodes[buid] = node\n    return node", "docstring": "Retrieve a node tuple by binary id.\n\nArgs:\nbuid (bytes): The binary ID for the node.\n\nReturns:\nOptional[s_node.Node]: The node object or None.", "source": "codesearchnet"}
{"code": "def _ParseCacheEntry(\n      self, parser_mediator, file_object, display_name, block_size):\n    \n    cache_entry, event_data = self._ReadCacheEntry(\n        file_object, display_name, block_size)\n\n    date_time = dfdatetime_posix_time.PosixTime(\n        timestamp=cache_entry.last_fetched_time)\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    if cache_entry.last_modified_time:\n      date_time = dfdatetime_posix_time.PosixTime(\n          timestamp=cache_entry.last_modified_time)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_WRITTEN)\n      parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    if cache_entry.expiration_time:\n      date_time = dfdatetime_posix_time.PosixTime(\n          timestamp=cache_entry.expiration_time)\n      event = time_events.DateTimeValuesEvent(\n          date_time, definitions.TIME_DESCRIPTION_EXPIRATION)\n      parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a cache entry.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\ndisplay_name (str): display name.\nblock_size (int): block size.", "source": "juraj-google-style"}
{"code": "def dframe(self, dimensions=None, multi_index=False):\n        \n        if dimensions is None:\n            dimensions = [d.name for d in self.dimensions()]\n        else:\n            dimensions = [self.get_dimension(d, strict=True).name for d in dimensions]\n        df = self.interface.dframe(self, dimensions)\n        if multi_index:\n            df = df.set_index([d for d in dimensions if d in self.kdims])\n        return df", "docstring": "Convert dimension values to DataFrame.\n\nReturns a pandas dataframe of columns along each dimension,\neither completely flat or indexed by key dimensions.\n\nArgs:\ndimensions: Dimensions to return as columns\nmulti_index: Convert key dimensions to (multi-)index\n\nReturns:\nDataFrame of columns corresponding to each dimension", "source": "juraj-google-style"}
{"code": "def remove_child(self, child):\n    if (not isinstance(child, Node)):\n        raise TypeError('child must be a Node')\n    try:\n        self.children.remove(child)\n        child.parent = None\n    except:\n        raise RuntimeError('Attempting to remove non-existent child')", "docstring": "Remove child from ``Node`` object\n\nArgs:\n``child`` (``Node``): The child to remove", "source": "codesearchnet"}
{"code": "def encode(signer, payload, header=None, key_id=None):\n    \n    if header is None:\n        header = {}\n\n    if key_id is None:\n        key_id = signer.key_id\n\n    header.update({'typ': 'JWT', 'alg': 'RS256'})\n\n    if key_id is not None:\n        header['kid'] = key_id\n\n    segments = [\n        _helpers.unpadded_urlsafe_b64encode(\n            json.dumps(header).encode('utf-8')\n        ),\n        _helpers.unpadded_urlsafe_b64encode(\n            json.dumps(payload).encode('utf-8')\n        ),\n    ]\n\n    signing_input = b'.'.join(segments)\n    signature = signer.sign(signing_input)\n    segments.append(\n        _helpers.unpadded_urlsafe_b64encode(signature)\n    )\n\n    return b'.'.join(segments)", "docstring": "Make a signed JWT.\n\nArgs:\nsigner (google.auth.crypt.Signer): The signer used to sign the JWT.\npayload (Mapping[str, str]): The JWT payload.\nheader (Mapping[str, str]): Additional JWT header payload.\nkey_id (str): The key id to add to the JWT header. If the\nsigner has a key id it will be used as the default. If this is\nspecified it will override the signer's key id.\n\nReturns:\nbytes: The encoded JWT.", "source": "juraj-google-style"}
{"code": "def measure(self) -> np.ndarray:\n    probs = np.real(bk.evaluate(self.probabilities()))\n    indices = np.asarray(list(np.ndindex(*([2] * self.qubit_nb))))\n    res = np.random.choice(probs.size, p=probs.ravel())\n    res = indices[res]\n    return res", "docstring": "Measure the state in the computational basis.\n\nReturns:\nA [2]*bits array of qubit states, either 0 or 1", "source": "codesearchnet"}
{"code": "def deps_from_pydit_json(requires, runtime=True):\n    \n    parsed = []\n    for req in requires:\n        \n        \n        \n        name, specs = None, None\n        \n        reqs = req.split(' ')\n        name = reqs[0]\n        if len(reqs) == 2:\n            specs = reqs[1]\n            \n            specs = specs.split(\",\")\n            \n            specs = [re.sub('[()]', '', spec) for spec in specs]\n            \n            \n            specs = [re.split('([0-9])', spec, 1) for spec in specs]\n            \n            \n            \n            for spec in specs:\n                spec[1:3] = [''.join(spec[1:3])]\n        if specs:\n            for spec in specs:\n                if '!' in spec[0]:\n                    parsed.append(['Conflicts', name, '=', spec[1]])\n                elif specs[0] == '==':\n                    parsed.append(['Requires', name, '=', spec[1]])\n                else:\n                    parsed.append(['Requires', name, spec[0], spec[1]])\n        else:\n            parsed.append(['Requires', name])\n\n    if not runtime:\n        for pars in parsed:\n            pars[0] = 'Build' + pars[0]\n\n    return parsed", "docstring": "Parses dependencies returned by pydist.json, since versions\nuses brackets we can't use pkg_resources to parse and we need a separate\nmethod\nArgs:\nrequires: list of dependencies as written in pydist.json of the package\nruntime: are the dependencies runtime (True) or build time (False)\nReturns:\nList of semi-SPECFILE dependecies (see dependency_to_rpm for format)", "source": "juraj-google-style"}
{"code": "def _find_test_class():\n    try:\n        return utils.find_subclass_in_module(base_test.BaseTestClass, sys.modules['__main__'])\n    except ValueError:\n        logging.exception('Exactly one subclass of `base_test.BaseTestClass` should be in the main file.')\n        sys.exit(1)", "docstring": "Finds the test class in a test script.\n\nWalk through module members and find the subclass of BaseTestClass. Only\none subclass is allowed in a test script.\n\nReturns:\nThe test class in the test module.\n\nRaises:\nSystemExit: Raised if the number of test classes is not exactly one.", "source": "github-repos"}
{"code": "def center_text(text, width=80):\n    \n    centered = []\n    for line in text.splitlines():\n        centered.append(line.center(width))\n    return \"\\n\".join(centered)", "docstring": "Center all lines of the text.\n\nIt is assumed that all lines width is smaller then B{width}, because the\nline width will not be checked.\n\nArgs:\ntext (str): Text to wrap.\nwidth (int): Maximum number of characters per line.\n\nReturns:\nstr: Centered text.", "source": "juraj-google-style"}
{"code": "def isregex_expr(expr):\n    \n    if not isinstance(expr, str):\n        return False\n\n    return all([\n        len(expr) > 3,\n        expr.startswith('re/'),\n        expr.endswith('/')\n    ])", "docstring": "Returns ``True`` is the given expression value is a regular expression\nlike string with prefix ``re/`` and suffix ``/``, otherwise ``False``.\n\nArguments:\nexpr (mixed): expression value to test.\n\nReturns:\nbool", "source": "juraj-google-style"}
{"code": "def init_logger(level, printout=True):\n    \n    root_logger = logging.getLogger(\"boussole\")\n    root_logger.setLevel(level)\n\n    \n    if not printout:\n        from io import StringIO\n        dummystream = StringIO()\n        handler = logging.StreamHandler(dummystream)\n    \n    else:\n        handler = logging.StreamHandler()\n        handler.setFormatter(\n            colorlog.ColoredFormatter(\n                '%(asctime)s - %(log_color)s%(message)s',\n                datefmt=\"%H:%M:%S\"\n            )\n        )\n\n    root_logger.addHandler(handler)\n\n    return root_logger", "docstring": "Initialize app logger to configure its level/handler/formatter/etc..\n\nTodo:\n* A mean to raise click.Abort or sys.exit when CRITICAL is used;\n\nArgs:\nlevel (str): Level name (``debug``, ``info``, etc..).\n\nKeyword Arguments:\nprintout (bool): If False, logs will never be outputed.\n\nReturns:\nlogging.Logger: Application logger.", "source": "juraj-google-style"}
{"code": "def readpar(par_file, root):\n    par_nml = deepcopy(PAR_DEFAULT)\n    if PAR_DFLT_FILE.is_file():\n        _enrich_with_par(par_nml, PAR_DFLT_FILE)\n    else:\n        PAR_DFLT_FILE.parent.mkdir(exist_ok=True)\n        f90nml.write(par_nml, str(PAR_DFLT_FILE))\n    if (not par_file.is_file()):\n        raise NoParFileError(par_file)\n    par_main = f90nml.read(str(par_file))\n    if ('default_parameters_parfile' in par_main):\n        par_dflt = par_main['default_parameters_parfile'].get('par_name_defaultparameters', 'par_defaults')\n        par_dflt = (root / par_dflt)\n        if (not par_dflt.is_file()):\n            raise NoParFileError(par_dflt)\n        _enrich_with_par(par_nml, par_dflt)\n    _enrich_with_par(par_nml, par_file)\n    par_out = ((root / par_nml['ioin']['output_file_stem']) / '_parameters.dat')\n    if par_out.is_file():\n        _enrich_with_par(par_nml, par_out)\n    par_out = ((root / par_nml['ioin']['hdf5_output_folder']) / 'parameters.dat')\n    if par_out.is_file():\n        _enrich_with_par(par_nml, par_out)\n    return par_nml", "docstring": "Read StagYY par file.\n\nThe namelist is populated in chronological order with:\n\n- :data:`PAR_DEFAULT`, an internal dictionary defining defaults;\n- :data:`PAR_DFLT_FILE`, the global configuration par file;\n- ``par_name_defaultparameters`` if it is defined in ``par_file``;\n- ``par_file`` itself;\n- ``parameters.dat`` if it can be found in the StagYY output directories.\n\nArgs:\npar_file (:class:`pathlib.Path`): path of par file.\nroot (:class:`pathlib.Path`): path on which other paths are rooted.\nThis is usually par.parent.\nReturns:\n:class:`f90nml.namelist.Namelist`: case insensitive dict of dict of\nvalues with first key being the namelist and second key the variables'\nname.", "source": "codesearchnet"}
{"code": "def filter_by_months_per_hour(self, months_per_hour):\n        \n        _filt_values = []\n        _filt_datetimes = []\n        for i, d in enumerate(self.datetimes):\n            if d in months_per_hour:\n                _filt_datetimes.append(d)\n                _filt_values.append(self._values[i])\n        return MonthlyPerHourCollection(\n            self.header.duplicate(), _filt_values, _filt_datetimes)", "docstring": "Filter the Data Collection based on a list of months per hour (as strings).\n\nArgs:\nmonths_per_hour: A list of tuples representing months per hour.\nEach tuple should possess two values: the first is the month\nand the second is the hour. (eg. (12, 23) = December at 11 PM)\n\nReturn:\nA new Data Collection with filtered data", "source": "juraj-google-style"}
{"code": "def __init__(self, git, rev):\n        \n        self.git = git\n        self.rev = rev", "docstring": "Create GitTree instance\n\nArgs:\ngit (dvc.scm.Git):\nbranch:", "source": "juraj-google-style"}
{"code": "def run_inference(self, batch: Sequence[dict[str, Union[tf.Tensor, torch.Tensor]]], model: Union[AutoModel, TFAutoModel], inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n    inference_args = {} if not inference_args else inference_args\n    if self._inference_fn:\n        return self._inference_fn(batch, model, self._device, inference_args, self._model_uri)\n    if self._framework == 'tf':\n        return _run_inference_tensorflow_keyed_tensor(batch, model, self._device, inference_args, self._model_uri)\n    else:\n        return _run_inference_torch_keyed_tensor(batch, model, self._device, inference_args, self._model_uri)", "docstring": "Runs inferences on a batch of Keyed Tensors and returns an Iterable of\nTensors Predictions.\n\nThis method stacks the list of Tensors in a vectorized format to optimize\nthe inference call.\n\nArgs:\nbatch: A sequence of Keyed Tensors. These Tensors should be batchable,\nas this method will call `tf.stack()`/`torch.stack()` and pass in\nbatched Tensors with dimensions (batch_size, n_features, etc.) into\nthe model's predict() function.\nmodel: A Tensorflow/PyTorch model.\ninference_args: Non-batchable arguments required as inputs to the\nmodel's inference function. Unlike Tensors in `batch`,\nthese parameters will not be dynamically batched.\nReturns:\nAn Iterable of type PredictionResult.", "source": "github-repos"}
{"code": "def _get_connection(self):\n    if getattr(self, '_connection', None):\n        logger.debug('Connection to sqlite db already exists. Using existing one.')\n    else:\n        dsn = self._dsn\n        if (dsn == 'sqlite:\n            dsn = ':memory:'\n        else:\n            dsn = dsn.replace('sqlite:\n        logger.debug('Creating new apsw connection.\\n   dsn: {}, config_dsn: {}'.format(dsn, self._dsn))\n        self._connection = apsw.Connection(dsn)\n    return self._connection", "docstring": "Returns connection to sqlite db.\n\nReturns:\nconnection to the sqlite db who stores mpr data.", "source": "codesearchnet"}
{"code": "def delete_row_range(self, format_str, start_game, end_game):\n        \n        row_keys = make_single_array(\n            self.tf_table.keys_by_range_dataset(\n                format_str.format(start_game),\n                format_str.format(end_game)))\n        row_keys = list(row_keys)\n        if not row_keys:\n            utils.dbg('No rows left for games %d..%d' % (\n                start_game, end_game))\n            return\n        utils.dbg('Deleting %d rows:  %s..%s' % (\n            len(row_keys), row_keys[0], row_keys[-1]))\n\n        \n        \n        \n        \n        \n        row_keys.reverse()\n        total_keys = len(row_keys)\n        utils.dbg('Deleting total of %d keys' % total_keys)\n        concurrency = min(MAX_BT_CONCURRENCY,\n                          multiprocessing.cpu_count() * 2)\n        with multiprocessing.Pool(processes=concurrency) as pool:\n            batches = []\n            with tqdm(desc='Keys', unit_scale=2, total=total_keys) as pbar:\n                for b in utils.iter_chunks(bigtable.row.MAX_MUTATIONS,\n                                           row_keys):\n                    pbar.update(len(b))\n                    batches.append((self.btspec, b))\n                    if len(batches) >= concurrency:\n                        pool.map(_delete_rows, batches)\n                        batches = []\n                pool.map(_delete_rows, batches)\n                batches = []", "docstring": "Delete rows related to the given game range.\n\nArgs:\nformat_str:  a string to `.format()` by the game numbers\nin order to create the row prefixes.\nstart_game:  the starting game number of the deletion.\nend_game:  the ending game number of the deletion.", "source": "juraj-google-style"}
{"code": "def DecompressMessageList(cls, packed_message_list):\n    \n    compression = packed_message_list.compression\n    if compression == rdf_flows.PackedMessageList.CompressionType.UNCOMPRESSED:\n      data = packed_message_list.message_list\n\n    elif (compression ==\n          rdf_flows.PackedMessageList.CompressionType.ZCOMPRESSION):\n      try:\n        data = zlib.decompress(packed_message_list.message_list)\n      except zlib.error as e:\n        raise DecodingError(\"Failed to decompress: %s\" % e)\n    else:\n      raise DecodingError(\"Compression scheme not supported\")\n\n    try:\n      result = rdf_flows.MessageList.FromSerializedString(data)\n    except rdfvalue.DecodeError:\n      raise DecodingError(\"RDFValue parsing failed.\")\n\n    return result", "docstring": "Decompress the message data from packed_message_list.\n\nArgs:\npacked_message_list: A PackedMessageList rdfvalue with some data in it.\n\nReturns:\na MessageList rdfvalue.\n\nRaises:\nDecodingError: If decompression fails.", "source": "juraj-google-style"}
{"code": "def api_representation(self, content_type):\n    payload = dict(Subject=self.subject, Body=dict(ContentType=content_type, Content=self.body))\n    if (self.sender is not None):\n        payload.update(From=self.sender.api_representation())\n    if any((isinstance(item, str) for item in self.to)):\n        self.to = [Contact(email=email) for email in self.to]\n    recipients = [contact.api_representation() for contact in self.to]\n    payload.update(ToRecipients=recipients)\n    if self.cc:\n        if any((isinstance(email, str) for email in self.cc)):\n            self.cc = [Contact(email) for email in self.cc]\n        cc_recipients = [contact.api_representation() for contact in self.cc]\n        payload.update(CcRecipients=cc_recipients)\n    if self.bcc:\n        if any((isinstance(email, str) for email in self.bcc)):\n            self.bcc = [Contact(email) for email in self.bcc]\n        bcc_recipients = [contact.api_representation() for contact in self.bcc]\n        payload.update(BccRecipients=bcc_recipients)\n    if self._attachments:\n        payload.update(Attachments=[attachment.api_representation() for attachment in self._attachments])\n    payload.update(Importance=str(self.importance))\n    return dict(Message=payload)", "docstring": "Returns the JSON representation of this message required for making requests to the API.\n\nArgs:\ncontent_type (str): Either 'HTML' or 'Text'", "source": "codesearchnet"}
{"code": "def get_value_at_percentile(self, percentile):\n    count_at_percentile = self.get_target_count_at_percentile(percentile)\n    total = 0\n    for index in range(self.counts_len):\n        total += self.get_count_at_index(index)\n        if (total >= count_at_percentile):\n            value_at_index = self.get_value_from_index(index)\n            if percentile:\n                return self.get_highest_equivalent_value(value_at_index)\n            return self.get_lowest_equivalent_value(value_at_index)\n    return 0", "docstring": "Get the value for a given percentile\n\nArgs:\npercentile: a float in [0.0..100.0]\nReturns:\nthe value for the given percentile", "source": "codesearchnet"}
{"code": "def __init__(self, enum, value=None, tag=enums.Tags.DEFAULT):\n        \n        super(Enumeration, self).__init__(tag, enums.Types.ENUMERATION)\n\n        self.value = value\n        self.enum = enum\n        self.length = Enumeration.LENGTH\n\n        self.validate()", "docstring": "Create an Enumeration.\n\nArgs:\nenum (class): The enumeration class of which value is a member\n(e.g., Tags). Required.\nvalue (int): The value of the Enumeration, must be an integer\n(e.g., Tags.DEFAULT). Optional, defaults to None.\ntag (Tags): An enumeration defining the tag of the Enumeration.\nOptional, defaults to Tags.DEFAULT.", "source": "juraj-google-style"}
{"code": "def has_node_with_value(self, value):\n    for node in self.node_list:\n        if (node.value == value):\n            return True\n    else:\n        return False", "docstring": "Whether any node in ``self.node_list`` has the value ``value``.\n\nArgs:\nvalue (Any): The value to find in ``self.node_list``\n\nReturns: bool\n\nExample:\n>>> from blur.markov.node import Node\n>>> node_1 = Node('One')\n>>> graph = Graph([node_1])\n>>> graph.has_node_with_value('One')\nTrue\n>>> graph.has_node_with_value('Foo')\nFalse", "source": "codesearchnet"}
{"code": "def _add_dispatch(x, y, name=None):\n    if ops.is_auto_dtype_conversion_enabled():\n        return add(x, y, name=name)\n    if not isinstance(y, tensor_lib.Tensor) and (not isinstance(y, sparse_tensor.SparseTensor)):\n        y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name='y')\n    if x.dtype == dtypes.string:\n        return gen_math_ops.add(x, y, name=name)\n    else:\n        return gen_math_ops.add_v2(x, y, name=name)", "docstring": "The operation invoked by the `Tensor.__add__` operator.\n\nPurpose in the API:\n\nThis method is exposed in TensorFlow's API so that library developers\ncan register dispatching for `Tensor.__add__` to allow it to handle\ncustom composite tensors & other custom objects.\n\nThe API symbol is not intended to be called by users directly and does\nappear in TensorFlow's generated documentation.\n\nArgs:\nx: The left-hand side of the `+` operator.\ny: The right-hand side of the `+` operator.\nname: an optional name for the operation.\n\nReturns:\nThe result of the elementwise `+` operation.", "source": "github-repos"}
{"code": "def __init__(self, table, num_oov_buckets, hasher_spec=FastHashSpec, name=None, key_dtype=None):\n    if name:\n        name = name.rstrip('/')\n    if table:\n        if key_dtype is None:\n            key_dtype = table.key_dtype\n        supported_table_key_dtypes = (dtypes.int64, dtypes.string)\n        if table.key_dtype not in supported_table_key_dtypes:\n            raise TypeError(f'Invalid `key_dtype`, expected one of {supported_table_key_dtypes}, received {key_dtype}.')\n        if table.key_dtype.is_integer != key_dtype.is_integer:\n            raise TypeError('Invalid `key dtype`, expected %s but got %s.' % ('integer' if key_dtype.is_integer else 'non-integer', table.key_dtype))\n        if table.value_dtype != dtypes.int64:\n            raise TypeError('Invalid `value_dtype`: expected int64 but got %s.' % table.value_dtype)\n        self._table = table\n        name = name or self._table.name\n    else:\n        if num_oov_buckets <= 0:\n            raise ValueError('`oov_buckets` must be > 0 if no `table` is supplied.')\n        key_dtype = dtypes.string if key_dtype is None else key_dtype\n        self._table = None\n        name = name or 'hash_bucket'\n    if not key_dtype.is_integer and dtypes.string != key_dtype:\n        raise TypeError(f'Invalid `key_dtype`, expected integer or string, got {key_dtype}.')\n    self._num_oov_buckets = num_oov_buckets\n    if not isinstance(hasher_spec, HasherSpec):\n        raise TypeError(f'`hasher_spec` must be of type HasherSpec, got {type(hasher_spec)}.')\n    self._hasher_spec = hasher_spec\n    if name:\n        self._table_name = name.split('/')[-1]\n    else:\n        self._table_name = None\n    super(IdTableWithHashBuckets, self).__init__(key_dtype, dtypes.int64)", "docstring": "Construct a `IdTableWithHashBuckets` object.\n\nArgs:\ntable: Table that maps `tf.string` or `tf.int64` keys to `tf.int64` ids.\nnum_oov_buckets: Number of buckets to use for out-of-vocabulary keys.\nhasher_spec: A `HasherSpec` to specify the hash function to use for\nassignation of out-of-vocabulary buckets  (optional).\nname: A name for the operation (optional).\nkey_dtype: Data type of keys passed to `lookup`. Defaults to\n`table.key_dtype` if `table` is specified, otherwise `tf.string`. Must\nbe string or integer, and must be castable to `table.key_dtype`.\n\nRaises:\nValueError: when `table` in None and `num_oov_buckets` is not positive.\nTypeError: when `hasher_spec` is invalid.", "source": "github-repos"}
{"code": "def users_setPhoto(self, *, image: Union[str, IOBase], **kwargs) -> SlackResponse:\n        \n        self._validate_xoxp_token()\n        return self.api_call(\"users.setPhoto\", files={\"image\": image}, data=kwargs)", "docstring": "Set the user profile photo\n\nArgs:\nimage (str): Supply the path of the image you'd like to upload.\ne.g. 'myimage.png'", "source": "juraj-google-style"}
{"code": "def create_box_comments(self, box_key, message, **kwargs):\n\t\t\n\t\turi = '/'.join([\n\t\t\t\t\t\tself.api_uri,\n\t\t\t\t\t\tself.boxes_suffix,\n\t\t\t\t\t\tbox_key,\n\t\t\t\t\t\tself.comments_suffix\n\t\t\t\t\t\t])\n\n\t\tif not (box_key and message):\n\t\t\treturn requests.codes.bad_request, None\n\n\t\tkwargs.update({'message':message})\n\n\t\tnew_cmt = StreakComment(**kwargs)\n\t\t\n\t\t\n\t\t\n\t\tcode, r_data = self._req('put', uri, new_cmt.to_dict())\n\t\t\n\t\treturn code, r_data", "docstring": "Creates a comments in a box with the provided attributes.\nArgs:\nbox_key\t\t\tkey for box\nmessage\t\t\tmessage string\nkwargs\t\t\t{} see StreakComment object for more information\nreturn\t\t\t(status code, comment dict)", "source": "juraj-google-style"}
{"code": "def Reload(self):\n    with self._generator_mutex:\n        for event in self._generator.Load():\n            self._ProcessEvent(event)\n    return self", "docstring": "Loads all events added since the last call to `Reload`.\n\nIf `Reload` was never called, loads all events in the file.\n\nReturns:\nThe `EventAccumulator`.", "source": "codesearchnet"}
{"code": "def _cast_dict(self, data_dict):\n        \n        for key, value in data_dict.iteritems():\n            data_dict[key] = self._cast_value(value)\n\n        \n        if 'resp_body_data' in data_dict:\n            del data_dict['resp_body_data']\n\n        return data_dict", "docstring": "Internal method that makes sure any dictionary elements\nare properly cast into the correct types, instead of\njust treating everything like a string from the csv file.\n\nArgs:\ndata_dict: dictionary containing bro log data.\n\nReturns:\nCleaned Data dict.", "source": "juraj-google-style"}
{"code": "def create_sns_event(app_name, env, region, rules):\n    session = boto3.Session(profile_name=env, region_name=region)\n    sns_client = session.client('sns')\n    topic_name = rules.get('topic')\n    lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region)\n    topic_arn = get_sns_topic_arn(topic_name=topic_name, account=env, region=region)\n    protocol = 'lambda'\n    statement_id = '{}_sns_{}'.format(app_name, topic_name)\n    principal = 'sns.amazonaws.com'\n    add_lambda_permissions(function=lambda_alias_arn, statement_id=statement_id, action='lambda:InvokeFunction', principal=principal, source_arn=topic_arn, env=env, region=region)\n    sns_client.subscribe(TopicArn=topic_arn, Protocol=protocol, Endpoint=lambda_alias_arn)\n    LOG.debug('SNS Lambda event created')\n    LOG.info('Created SNS event subscription on topic %s', topic_name)", "docstring": "Create SNS lambda event from rules.\n\nArgs:\napp_name (str): name of the lambda function\nenv (str): Environment/Account for lambda function\nregion (str): AWS region of the lambda function\nrules (str): Trigger rules from the settings", "source": "codesearchnet"}
{"code": "def checkout(request, user_id=None):\n    if (user_id is not None):\n        if request.user.is_staff:\n            user = User.objects.get(id=int(user_id))\n        else:\n            raise Http404()\n    else:\n        user = request.user\n    current_cart = CartController.for_user(user)\n    if (('fix_errors' in request.GET) and (request.GET['fix_errors'] == 'true')):\n        current_cart.fix_simple_errors()\n    try:\n        current_invoice = InvoiceController.for_cart(current_cart.cart)\n    except ValidationError as ve:\n        return _checkout_errors(request, ve)\n    return redirect('invoice', current_invoice.invoice.id)", "docstring": "Runs the checkout process for the current cart.\n\nIf the query string contains ``fix_errors=true``, Registrasion will attempt\nto fix errors preventing the system from checking out, including by\ncancelling expired discounts and vouchers, and removing any unavailable\nproducts.\n\nArguments:\nuser_id (castable to int):\nIf the requesting user is staff, then the user ID can be used to\nrun checkout for another user.\nReturns:\nrender or redirect:\nIf the invoice is generated successfully, or there's already a\nvalid invoice for the current cart, redirect to ``invoice``.\nIf there are errors when generating the invoice, render\n``registrasion/checkout_errors.html`` with the following data::\n\n{\n\"error_list\", [str, ...]  # The errors to display.\n}", "source": "codesearchnet"}
{"code": "def diff_lineMode(self, text1, text2, deadline):\n    \n\n    \n    (text1, text2, linearray) = self.diff_linesToChars(text1, text2)\n\n    diffs = self.diff_main(text1, text2, False, deadline)\n\n    \n    self.diff_charsToLines(diffs, linearray)\n    \n    self.diff_cleanupSemantic(diffs)\n\n    \n    \n    diffs.append((self.DIFF_EQUAL, ''))\n    pointer = 0\n    count_delete = 0\n    count_insert = 0\n    text_delete = ''\n    text_insert = ''\n    while pointer < len(diffs):\n      if diffs[pointer][0] == self.DIFF_INSERT:\n        count_insert += 1\n        text_insert += diffs[pointer][1]\n      elif diffs[pointer][0] == self.DIFF_DELETE:\n        count_delete += 1\n        text_delete += diffs[pointer][1]\n      elif diffs[pointer][0] == self.DIFF_EQUAL:\n        \n        if count_delete >= 1 and count_insert >= 1:\n          \n          subDiff = self.diff_main(text_delete, text_insert, False, deadline)\n          diffs[pointer - count_delete - count_insert : pointer] = subDiff\n          pointer = pointer - count_delete - count_insert + len(subDiff)\n        count_insert = 0\n        count_delete = 0\n        text_delete = ''\n        text_insert = ''\n\n      pointer += 1\n\n    diffs.pop()  \n\n    return diffs", "docstring": "Do a quick line-level diff on both strings, then rediff the parts for\ngreater accuracy.\nThis speedup can produce non-minimal diffs.\n\nArgs:\ntext1: Old string to be diffed.\ntext2: New string to be diffed.\ndeadline: Time when the diff should be complete by.\n\nReturns:\nArray of changes.", "source": "juraj-google-style"}
{"code": "def __init__(self, instrumentation_key, wsgi_application, *args, **kwargs):\n        \n        if not instrumentation_key:\n            raise Exception('Instrumentation key was required but not provided')\n        if not wsgi_application:\n            raise Exception('WSGI application was required but not provided')\n        telemetry_channel = kwargs.pop('telemetry_channel', None)\n        if not telemetry_channel:\n            sender = applicationinsights.channel.AsynchronousSender()\n            queue = applicationinsights.channel.AsynchronousQueue(sender)\n            telemetry_channel = applicationinsights.channel.TelemetryChannel(None, queue)\n        self.client = applicationinsights.TelemetryClient(instrumentation_key, telemetry_channel)\n        self.client.context.device.type = \"PC\"\n        self._wsgi_application = wsgi_application\n        self._common_properties = kwargs.pop('common_properties', {})", "docstring": "Initialize a new instance of the class.\n\nArgs:\ninstrumentation_key (str). the instrumentation key to use while sending telemetry to the service.\\n\nwsgi_application (func). the WSGI application that we're wrapping.", "source": "juraj-google-style"}
{"code": "def __update(self, score, values, error):\n        \n\n        if self._minimize:\n            if self._best_score is None or score > self._best_score:\n                self._best_score = score\n                self._best_values = values.copy()\n                self._best_error = error\n                self._logger.log(\n                    'debug',\n                    'New best food source memorized: {}'.format(\n                        self._best_error\n                    )\n                )\n                return True\n        elif not self._minimize:\n            if self._best_score is None or score < self._best_score:\n                self._best_score = score\n                self._best_values = values.copy()\n                self._best_error = error\n                self._logger.log(\n                    'debug',\n                    'New best food source memorized: {}'.format(\n                        self._best_error\n                    )\n                )\n                return True\n        return False", "docstring": "Update the best score and values if the given score is better than\nthe current best score\n\nArgs:\nscore (float): new score to evaluate\nvalues (list): new value ranges to evaluate\nerror (float): new fitness function return value to evaluate\n\nReturns:\nbool: True if new score is better, False otherwise", "source": "juraj-google-style"}
{"code": "def imresize_like(img, dst_img, return_scale=False, interpolation='bilinear'):\n    (h, w) = dst_img.shape[:2]\n    return imresize(img, (w, h), return_scale, interpolation)", "docstring": "Resize image to the same size of a given image.\n\nArgs:\nimg (ndarray): The input image.\ndst_img (ndarray): The target image.\nreturn_scale (bool): Whether to return `w_scale` and `h_scale`.\ninterpolation (str): Same as :func:`resize`.\n\nReturns:\ntuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or\n`resized_img`.", "source": "codesearchnet"}
{"code": "def ComponentsToPath(components):\n    precondition.AssertIterableType(components, Text)\n    for component in components:\n        if (not component):\n            raise ValueError('Empty path component in: {}'.format(components))\n        if ('/' in component):\n            raise ValueError(\"Path component with '/' in: {}\".format(components))\n    if components:\n        return ('/' + '/'.join(components))\n    else:\n        return ''", "docstring": "Converts a list of path components to a canonical path representation.\n\nArgs:\ncomponents: A sequence of path components.\n\nReturns:\nA canonical MySQL path representation.", "source": "codesearchnet"}
{"code": "def get_subscribers(object_type: str) -> List[str]:\n    \n    return DB.get_list(_keys.subscribers(object_type))", "docstring": "Get the list of subscribers to events of the object type.\n\nArgs:\nobject_type (str): Type of object.\n\nReturns:\nList[str], list of subscriber names.", "source": "juraj-google-style"}
{"code": "def __call__(self, request: beam.Row, *args, **kwargs):\n    try:\n        entity_id = request._asdict()[self.row_key]\n    except KeyError:\n        raise KeyError('Enrichment requests to Vertex AI Feature Store should contain a field: %s in the input `beam.Row` to join the input with fetched response. This is used as the `FeatureViewDataKey` to fetch feature values corresponding to this key.' % self.row_key)\n    try:\n        selector = aiplatform.gapic.FeatureSelector(id_matcher=aiplatform.gapic.IdMatcher(ids=self.feature_ids))\n        response = self.client.read_feature_values(request=aiplatform.gapic.ReadFeatureValuesRequest(entity_type=self.entity_type_path, entity_id=entity_id, feature_selector=selector))\n    except NotFound:\n        raise ValueError(_not_found_err_message(self.feature_store_id, self.entity_type_id, entity_id))\n    response_dict = {}\n    proto_to_dict = proto.Message.to_dict(response.entity_view)\n    for key, msg in zip(response.header.feature_descriptors, proto_to_dict['data']):\n        if msg and 'value' in msg:\n            response_dict[key.id] = list(msg['value'].values())[0]\n        elif self.exception_level == ExceptionLevel.RAISE:\n            raise ValueError(_not_found_err_message(self.feature_store_id, self.entity_type_id, entity_id))\n        elif self.exception_level == ExceptionLevel.WARN:\n            _LOGGER.warning(_not_found_err_message(self.feature_store_id, self.entity_type_id, entity_id))\n    return (request, beam.Row(**response_dict))", "docstring": "Fetches feature value for an entity-id from\nVertex AI Feature Store (Legacy).\n\nArgs:\nrequest: the input `beam.Row` to enrich.", "source": "github-repos"}
{"code": "def issorted(list_, op=operator.le):\n    return all((op(list_[ix], list_[(ix + 1)]) for ix in range((len(list_) - 1))))", "docstring": "Determines if a list is sorted\n\nArgs:\nlist_ (list):\nop (func): sorted operation (default=operator.le)\n\nReturns:\nbool : True if the list is sorted", "source": "codesearchnet"}
{"code": "def _is_current_explicit_device(device_type):\n    device_type = device_type.upper()\n    if device_type not in ['CPU', 'GPU']:\n        raise ValueError('`device_type` should be either \"CPU\" or \"GPU\".')\n    device = _get_current_tf_device()\n    return device is not None and device.device_type == device_type.upper()", "docstring": "Check if the current device is explicitly set on the device type specified.\n\nArgs:\ndevice_type: A string containing `GPU` or `CPU` (case-insensitive).\n\nReturns:\nA boolean indicating if the current device scope is explicitly set on the\ndevice type.\n\nRaises:\nValueError: If the `device_type` string indicates an unsupported device.", "source": "github-repos"}
{"code": "def glyph_has_ink(font: TTFont, name: Text) -> bool:\n  \n  if 'glyf' in font:\n    return ttf_glyph_has_ink(font, name)\n  elif ('CFF ' in font) or ('CFF2' in font):\n    return cff_glyph_has_ink(font, name)\n  else:\n    raise Exception(\"Could not find 'glyf', 'CFF ', or 'CFF2' table.\")", "docstring": "Checks if specified glyph has any ink.\n\nThat is, that it has at least one defined contour associated.\nComposites are considered to have ink if any of their components have ink.\nArgs:\nfont:       the font\nglyph_name: The name of the glyph to check for ink.\nReturns:\nTrue if the font has at least one contour associated with it.", "source": "juraj-google-style"}
{"code": "def sample_observed_state(self, s: pd.Series) -> Dict:\n    return {n[0]: {i.name: np.random.normal((s[n[0]] * i.mean), i.stdev) for i in n[1]['indicators'].values()} for n in self.nodes(data=True)}", "docstring": "Sample observed state vector. This is the implementation of the\nemission function.\n\nArgs:\ns: Latent state vector.\n\nReturns:\nObserved state vector.", "source": "codesearchnet"}
{"code": "def _ip_int_from_prefix(self, prefixlen=None):\n        \n        if prefixlen is None:\n            prefixlen = self._prefixlen\n        return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen)", "docstring": "Turn the prefix length netmask into a int for comparison.\n\nArgs:\nprefixlen: An integer, the prefix length.\n\nReturns:\nAn integer.", "source": "juraj-google-style"}
{"code": "def RegisterDecoder(cls, decoder):\n    \n    encoding_method = decoder.ENCODING_METHOD.lower()\n    if encoding_method in cls._decoders:\n      raise KeyError(\n          'Decoder for encoding method: {0:s} already set.'.format(\n              decoder.ENCODING_METHOD))\n\n    cls._decoders[encoding_method] = decoder", "docstring": "Registers a decoder for a specific encoding method.\n\nArgs:\ndecoder (type): decoder class.\n\nRaises:\nKeyError: if the corresponding decoder is already set.", "source": "juraj-google-style"}
{"code": "def take_bug_reports(ads, test_name=None, begin_time=None, destination=None):\n    if begin_time is None:\n        begin_time = mobly_logger.get_log_file_timestamp()\n    else:\n        begin_time = mobly_logger.sanitize_filename(str(begin_time))\n\n    def take_br(test_name, begin_time, ad, destination):\n        ad.take_bug_report(test_name=test_name, begin_time=begin_time, destination=destination)\n    args = [(test_name, begin_time, ad, destination) for ad in ads]\n    utils.concurrent_exec(take_br, args)", "docstring": "Takes bug reports on a list of android devices.\n\nIf you want to take a bug report, call this function with a list of\nandroid_device objects in on_fail. But reports will be taken on all the\ndevices in the list concurrently. Bug report takes a relative long\ntime to take, so use this cautiously.\n\nArgs:\nads: A list of AndroidDevice instances.\ntest_name: Name of the test method that triggered this bug report.\nIf None, the default name \"bugreport\" will be used.\nbegin_time: timestamp taken when the test started, can be either\nstring or int. If None, the current time will be used.\ndestination: string, path to the directory where the bugreport\nshould be saved.", "source": "github-repos"}
{"code": "def get_nn_images(self, structure, n):\n    return [e['image'] for e in self.get_nn_info(structure, n)]", "docstring": "Get image location of all near neighbors of site with index n in\nstructure.\n\nArgs:\nstructure (Structure): input structure.\nn (integer): index of site for which to determine the image\nlocation of near neighbors.\nReturns:\nimages (list of 3D integer array): image locations of\nnear neighbors.", "source": "codesearchnet"}
{"code": "def grid_reload_from_name(job_name):\n    gk = get_api_client()\n    sites = get_all_sites_obj()\n    jobs = []\n    for site in [s for s in sites if (s.uid not in gk.excluded_site)]:\n        logger.info(('Reloading %s from %s' % (job_name, site.uid)))\n        _jobs = site.jobs.list(name=job_name, state='waiting,launching,running')\n        if (len(_jobs) == 1):\n            logger.info(('Reloading %s from %s' % (_jobs[0].uid, site.uid)))\n            jobs.append(_jobs[0])\n        elif (len(_jobs) > 1):\n            raise EnosG5kDuplicateJobsError(site, job_name)\n    return jobs", "docstring": "Reload all running or pending jobs of Grid'5000 with a given name.\n\nBy default all the sites will be searched for jobs with the name\n``job_name``. Using EnOSlib there can be only one job per site with name\n``job_name``.\n\nNote that it honors the ``exluded_sites`` attribute of the client so the\nscan can be reduced.\n\nArgs:\njob_name (str): the job name\n\n\nReturns:\nThe list of the python-grid5000 jobs retrieved.\n\nRaises:\nEnosG5kDuplicateJobsError: if there's several jobs with the same name\non a site.", "source": "codesearchnet"}
{"code": "def get_relavent_units(self):\n    relavent_units = {}\n    for (location, unit) in self.units.items():\n        if self.unit_is_related(location, self.worksheet):\n            relavent_units[location] = unit\n    return relavent_units", "docstring": "Retrieves the relevant units for this data block.\n\nReturns:\nAll flags related to this block.", "source": "codesearchnet"}
{"code": "def _to_json_like(self, include_defaults):\n    all_attrs = self.properties_with_values(include_defaults=include_defaults)\n    subtype = getattr(self.__class__, '__subtype__', None)\n    if ((subtype is not None) and (subtype != self.__class__.__view_model__)):\n        attrs = {}\n        for (attr, value) in all_attrs.items():\n            if (attr in self.__class__.__dict__):\n                continue\n            else:\n                attrs[attr] = value\n    else:\n        attrs = all_attrs\n    for (k, v) in attrs.items():\n        if (isinstance(v, float) and (v == float('inf'))):\n            attrs[k] = None\n    return attrs", "docstring": "Returns a dictionary of the attributes of this object, in\na layout corresponding to what BokehJS expects at unmarshalling time.\n\nThis method does not convert \"Bokeh types\" into \"plain JSON types,\"\nfor example each child Model will still be a Model, rather\nthan turning into a reference, numpy isn't handled, etc.\nThat's what \"json like\" means.\n\nThis method should be considered \"private\" or \"protected\",\nfor use internal to Bokeh; use ``to_json()`` instead because\nit gives you only plain JSON-compatible types.\n\nArgs:\ninclude_defaults (bool) : whether to include attributes\nthat haven't been changed from the default.", "source": "codesearchnet"}
{"code": "class MgpstrTokenizer(PreTrainedTokenizer):\n    vocab_files_names = VOCAB_FILES_NAMES\n\n    def __init__(self, vocab_file, unk_token='[GO]', bos_token='[GO]', eos_token='[s]', pad_token='[GO]', **kwargs):\n        with open(vocab_file, encoding='utf-8') as vocab_handle:\n            self.vocab = json.load(vocab_handle)\n        self.decoder = {v: k for k, v in self.vocab.items()}\n        super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs)\n\n    @property\n    def vocab_size(self):\n        return len(self.vocab)\n\n    def get_vocab(self):\n        vocab = dict(self.vocab).copy()\n        vocab.update(self.added_tokens_encoder)\n        return vocab\n\n    def _tokenize(self, text):\n        \n        char_tokens = []\n        for s in text:\n            char_tokens.extend(s)\n        return char_tokens\n\n    def _convert_token_to_id(self, token):\n        \n        return self.vocab.get(token, self.vocab.get(self.unk_token))\n\n    def _convert_id_to_token(self, index):\n        \n        return self.decoder.get(index)\n\n    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:\n        if not os.path.isdir(save_directory):\n            logger.error('Vocabulary path ({}) should be a directory'.format(save_directory))\n            return\n        vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])\n        with open(vocab_file, 'w', encoding='utf-8') as f:\n            f.write(json.dumps(self.vocab, indent=2, sort_keys=True, ensure_ascii=False) + '\\n')\n        return (vocab_file,)", "docstring": "Construct a MGP-STR char tokenizer.\n\nThis tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to\nthis superclass for more information regarding those methods.\n\nArgs:\nvocab_file (`str`):\nPath to the vocabulary file.\nunk_token (`str`, *optional*, defaults to `\"[GO]\"`):\nThe unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\ntoken instead.\nbos_token (`str`, *optional*, defaults to `\"[GO]\"`):\nThe beginning of sequence token.\neos_token (`str`, *optional*, defaults to `\"[s]\"`):\nThe end of sequence token.\npad_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `\"[GO]\"`):\nA special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by\nattention mechanisms or loss computation.", "source": "github-repos"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    display_name = parser_mediator.GetDisplayName()\n    if (not zipfile.is_zipfile(file_object)):\n        raise errors.UnableToParseFile('[{0:s}] unable to parse file: {1:s} with error: {2:s}'.format(self.NAME, display_name, 'Not a Zip file.'))\n    try:\n        zip_file = zipfile.ZipFile(file_object, 'r', allowZip64=True)\n        self._ProcessZipFileWithPlugins(parser_mediator, zip_file)\n        zip_file.close()\n    except (zipfile.BadZipfile, struct.error) as exception:\n        raise errors.UnableToParseFile('[{0:s}] unable to parse file: {1:s} with error: {2!s}'.format(self.NAME, display_name, exception))", "docstring": "Parses a compound ZIP file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def split(self, bitindex):\n    if (bitindex < 0):\n        raise ValueError('bitindex must be larger or equal to 0.')\n    if (bitindex > len(self)):\n        raise ValueError((\"bitindex larger than the array's size. Len: %s; bitindex: %s\" % (len(self), bitindex)))\n    if (bitindex == 0):\n        return (None, self)\n    if (bitindex == len(self)):\n        return (self, None)\n    left = TDOPromise(self._chain, self._bitstart, bitindex, _parent=self)\n    right = TDOPromise(self._chain, 0, (len(self) - bitindex), _parent=self)\n    self._components = []\n    self._addsub(left, 0)\n    self._addsub(right, bitindex)\n    return (left, right)", "docstring": "Split a promise into two promises at the provided index.\n\nA common operation in JTAG is reading/writing to a\nregister. During the operation, the TMS pin must be low, but\nduring the writing of the last bit, the TMS pin must be\nhigh. Requiring all reads or writes to have full arbitrary\ncontrol over the TMS pin is unrealistic.\n\nSplitting a promise into two sub promises is a way to mitigate\nthis issue. The final read bit is its own subpromise that can\nbe associated with a different primitive than the 'rest' of\nthe subpromise.\n\nReturns:\nTwo TDOPromise instances: the 'Rest' and the 'Tail'.\nThe 'Rest' is the first chunk of the original promise.\nThe 'Tail' is a single bit sub promise for the final bit\nin the operation\n\nIf the 'Rest' would have a length of 0, None is returned", "source": "codesearchnet"}
{"code": "def HasDateExceptionOn(self, date, exception_type=_EXCEPTION_TYPE_ADD):\n    \n    if date in self.date_exceptions:\n      return exception_type == self.date_exceptions[date][0]\n    return False", "docstring": "Test if this service period has a date exception of the given type.\n\nArgs:\ndate: a string of form \"YYYYMMDD\"\nexception_type: the exception type the date should have. Defaults to\n_EXCEPTION_TYPE_ADD\n\nReturns:\nTrue iff this service has service exception of specified type at date.", "source": "juraj-google-style"}
{"code": "def random_brightness(x, brightness_range, scale=True):\n    if len(brightness_range) != 2:\n        raise ValueError(f'`brightness_range should be tuple or list of two floats. Received: {brightness_range}')\n    u = np.random.uniform(brightness_range[0], brightness_range[1])\n    return apply_brightness_shift(x, u, scale)", "docstring": "Performs a random brightness shift.\n\nDEPRECATED.\n\nArgs:\nx: Input tensor. Must be 3D.\nbrightness_range: Tuple of floats; brightness range.\nscale: Whether to rescale the image such that minimum and maximum values\nare 0 and 255 respectively. Default: True.\n\nReturns:\nNumpy image tensor.\n\nRaises:\nValueError if `brightness_range` isn't a tuple.", "source": "github-repos"}
{"code": "def restore(self, x):\n    with tf.name_scope('pad_reduce/restore'):\n        x = tf.scatter_nd(indices=self.nonpad_ids, updates=x, shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0))\n    return x", "docstring": "Add padding back to the given tensor.\n\nArgs:\nx (tf.Tensor): of shape [dim_compressed,...]\n\nReturns:\na tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The\ndim is restored from the original reference tensor", "source": "codesearchnet"}
{"code": "def _LinearFoldByteStream(self, mapped_value, **unused_kwargs):\n    \n    try:\n      return self._operation.WriteTo(mapped_value)\n\n    except Exception as exception:\n      error_string = (\n          'Unable to write: {0:s} to byte stream with error: {1!s}').format(\n              self._data_type_definition.name, exception)\n      raise errors.FoldingError(error_string)", "docstring": "Folds the data type into a byte stream.\n\nArgs:\nmapped_value (object): mapped value.\n\nReturns:\nbytes: byte stream.\n\nRaises:\nFoldingError: if the data type definition cannot be folded into\nthe byte stream.", "source": "juraj-google-style"}
{"code": "def add_get_parameters(url, parameters, percent_encode=True):\n    url_parts = list(parse.urlparse(url))\n    query = dict(parse.parse_qs(url_parts[4]))\n    query.update(parameters)\n    if percent_encode:\n        url_parts[4] = parse.urlencode(query)\n    else:\n        url_parts[4] = '&'.join([((key + '=') + value) for (key, value) in query.items()])\n    return parse.urlunparse(url_parts)", "docstring": "Utility function to add GET parameters to an existing URL.\n\nArgs:\nparameters\nA dictionary of the parameters that should be added.\npercent_encode\nWhether the query parameters should be percent encoded.\n\nReturns:\nThe updated URL.", "source": "codesearchnet"}
{"code": "def set_router_id(self, value=None, default=False, disable=False):\n    cmd = self.command_builder('router-id', value=value, default=default, disable=disable)\n    return self.configure_ospf(cmd)", "docstring": "Controls the router id property for the OSPF Proccess\n\nArgs:\nvalue (str): The router-id value\ndefault (bool): Controls the use of the default keyword\ndisable (bool): Controls the use of the no keyword\nReturns:\nbool: True if the commands are completed successfully", "source": "codesearchnet"}
{"code": "class DacDecoderOutput(ModelOutput):\n    audio_values: Optional[torch.FloatTensor] = None", "docstring": "Args:\naudio_values (`torch.FloatTensor`  of shape `(batch_size, input_length)`, *optional*):\nDecoded audio values, obtained using the decoder part of Dac.", "source": "github-repos"}
{"code": "def unique(self, name=None) -> 'DatasetV2':\n    from tensorflow.python.data.ops import unique_op\n    return unique_op._unique(self, name)", "docstring": "A transformation that discards duplicate elements of a `Dataset`.\n\nUse this transformation to produce a dataset that contains one instance of\neach unique element in the input. For example:\n\n>>> dataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1])\n>>> dataset = dataset.unique()\n>>> sorted([a.item() for a in dataset.as_numpy_iterator()])\n[1, 2, 37]\n\nNote: This transformation only supports datasets which fit into memory\nand have elements of either `tf.int32`, `tf.int64` or `tf.string` type.\n\nArgs:\nname: (Optional.) A name for the tf.data operation.\n\nReturns:\nA new `Dataset` with the transformation applied as described above.", "source": "github-repos"}
{"code": "def CreateMock(self, class_to_mock):\n    new_mock = MockObject(class_to_mock)\n    self._mock_objects.append(new_mock)\n    return new_mock", "docstring": "Create a new mock object.\n\nArgs:\n# class_to_mock: the class to be mocked\nclass_to_mock: class\n\nReturns:\nMockObject that can be used as the class_to_mock would be.", "source": "codesearchnet"}
{"code": "def get_SZ(self, psd, geometry):\n    if ((self._S_table is None) or (self._Z_table is None)):\n        raise AttributeError('Initialize or load the scattering table first.')\n    if ((not isinstance(psd, PSD)) or (self._previous_psd != psd)):\n        self._S_dict = {}\n        self._Z_dict = {}\n        psd_w = psd(self._psd_D)\n        for geom in self.geometries:\n            self._S_dict[geom] = trapz((self._S_table[geom] * psd_w), self._psd_D)\n            self._Z_dict[geom] = trapz((self._Z_table[geom] * psd_w), self._psd_D)\n        self._previous_psd = psd\n    return (self._S_dict[geometry], self._Z_dict[geometry])", "docstring": "Compute the scattering matrices for the given PSD and geometries.\n\nReturns:\nThe new amplitude (S) and phase (Z) matrices.", "source": "codesearchnet"}
{"code": "def update(self, attributes=None):\n    resource_type = self._resource_type()\n    resource_path = self._resource_path()\n    session = self._session\n    singleton = self.is_singleton()\n    id = (None if singleton else self.id)\n    url = session._build_url(resource_path, id)\n    attributes = build_request_body(resource_type, self.id, attributes=attributes)\n    process = self._mk_one(session, singleton=singleton)\n    return session.patch(url, CB.json(200, process), json=attributes)", "docstring": "Update this resource.\n\nNot all aspects of a resource can be updated. If the server\nrejects updates an error will be thrown.\n\nKeyword Arguments:\n\nattributes(dict): Attributes that are to be updated\n\nReturns:\n\nResource: A new instance of this type of resource with the\nupdated attribute. On errors an exception is thrown.", "source": "codesearchnet"}
{"code": "def chgroups(name, groups, append=True):\n    if six.PY2:\n        name = _to_unicode(name)\n    if isinstance(groups, string_types):\n        groups = groups.split(',')\n    groups = [x.strip(' *') for x in groups]\n    if six.PY2:\n        groups = [_to_unicode(x) for x in groups]\n    ugrps = set(list_groups(name))\n    if (ugrps == set(groups)):\n        return True\n    name = _cmd_quote(name)\n    if (not append):\n        for group in ugrps:\n            group = _cmd_quote(group).lstrip(\"'\").rstrip(\"'\")\n            if (group not in groups):\n                cmd = 'net localgroup \"{0}\" {1} /delete'.format(group, name)\n                __salt__['cmd.run_all'](cmd, python_shell=True)\n    for group in groups:\n        if (group in ugrps):\n            continue\n        group = _cmd_quote(group).lstrip(\"'\").rstrip(\"'\")\n        cmd = 'net localgroup \"{0}\" {1} /add'.format(group, name)\n        out = __salt__['cmd.run_all'](cmd, python_shell=True)\n        if (out['retcode'] != 0):\n            log.error(out['stdout'])\n            return False\n    agrps = set(list_groups(name))\n    return (len((ugrps - agrps)) == 0)", "docstring": "Change the groups this user belongs to, add append=False to make the user a\nmember of only the specified groups\n\nArgs:\nname (str): The user name for which to change groups\n\ngroups (str, list): A single group or a list of groups to assign to the\nuser. For multiple groups this can be a comma delimited string or a\nlist.\n\nappend (bool, optional): True adds the passed groups to the user's\ncurrent groups. False sets the user's groups to the passed groups\nonly. Default is True.\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' user.chgroups jsnuffy Administrators,Users True", "source": "codesearchnet"}
{"code": "def parse(self, buf: memoryview, params: Params) \\\n            -> Tuple[Command, memoryview]:\n        \n        try:\n            tag, buf = Tag.parse(buf, params)\n        except NotParseable as exc:\n            return InvalidCommand(params, exc), buf[0:0]\n        else:\n            params = params.copy(tag=tag.value)\n        cmd_parts: List[bytes] = []\n        while True:\n            try:\n                _, buf = Space.parse(buf, params)\n                atom, buf = Atom.parse(buf, params)\n                cmd_parts.append(atom.value.upper())\n            except NotParseable as exc:\n                return InvalidCommand(params, exc), buf[0:0]\n            command = b' '.join(cmd_parts)\n            cmd_type = self.commands.get(command)\n            if not cmd_type:\n                return InvalidCommand(params, None, command), buf[0:0]\n            elif not cmd_type.compound:\n                break\n        params = params.copy(command_name=command)\n        try:\n            return cmd_type.parse(buf, params)\n        except NotParseable as exc:\n            return InvalidCommand(params, exc, command, cmd_type), buf[0:0]", "docstring": "Parse the given bytes into a command. The basic syntax is a tag\nstring, a command name, possibly some arguments, and then an endline.\nIf the command has a complete structure but cannot be parsed, an\n:class:`InvalidCommand` is returned.\n\nArgs:\nbuf: The bytes to parse.\nparams: The parsing parameters.", "source": "juraj-google-style"}
{"code": "def within(self, other: 'Interval', inclusive: bool=True) -> bool:\n    if (not other):\n        return False\n    if inclusive:\n        return ((self.start >= other.start) and (self.end <= other.end))\n    else:\n        return ((self.start > other.start) and (self.end < other.end))", "docstring": "Is this interval contained within the other?\n\nArgs:\nother: the :class:`Interval` to check\ninclusive: use inclusive rather than exclusive range checks?", "source": "codesearchnet"}
{"code": "def worker_task(work_item, config):\n    global _workspace\n    _ensure_workspace(config)\n    result = worker(work_item.module_path, config.python_version, work_item.operator_name, work_item.occurrence, config.test_command, config.timeout)\n    return (work_item.job_id, result)", "docstring": "The celery task which performs a single mutation and runs a test suite.\n\nThis runs `cosmic-ray worker` in a subprocess and returns the results,\npassing `config` to it via stdin.\n\nArgs:\nwork_item: A dict describing a WorkItem.\nconfig: The configuration to use for the test execution.\n\nReturns: An updated WorkItem", "source": "codesearchnet"}
{"code": "def __call__(self, name, value):\n        \n        super(IterableTypeChecker, self).__call__(name, value)\n        if isinstance(self.item_type, type):\n            if not all(isinstance(o, self.item_type) for o in value):\n                raise ValueError(\"All elements of %s must be %s\" % (name, self.item_type))\n        if isinstance(self.min_length, int):\n            if len(value) < self.min_length:\n                raise ValueError(\"%s must be longer than %s (or equal)\" % (name, self.min_length))\n        if isinstance(self.max_length, int):\n            if len(value) > self.max_length:\n                raise ValueError(\"%s must be shorter than %s (or equal)\" % (name, self.max_length))\n        if len(value) == 0 and not self.empty:\n            raise ValueError(\"%s must not be empty\" % name)", "docstring": "Call method.\n\nArgs:\nname (str): the value's name.\nvalue (iterable): the value to check.\n\nRaises:\nValueError: if value is not type iter_type.\nValueError: if any item in value is not type item_type.\nValueError: if value's length is less than min_length.\nValueError: if value's length is more than max_length.\nValueError: if value's length is 0 and emptiness is not allowed.", "source": "juraj-google-style"}
{"code": "def trigger_if_changed(self, obj, old):\n    new_value = self.__get__(obj, obj.__class__)\n    if (not self.property.matches(old, new_value)):\n        self._trigger(obj, old, new_value)", "docstring": "Send a change event notification if the property is set to a\nvalue is not equal to ``old``.\n\nArgs:\nobj (HasProps)\nThe object the property is being set on.\n\nold (obj) :\nThe previous value of the property to compare\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def test_fail(self, e=None):\n    self._test_end(TestResultEnums.TEST_RESULT_FAIL, e)", "docstring": "To mark the test as failed in this record.\n\nOnly test_fail does instance check because we want 'assert xxx' to also\nfail the test same way assert_true does.\n\nArgs:\ne: An exception object. It can be an instance of AssertionError or\nmobly.base_test.TestFailure.", "source": "github-repos"}
{"code": "async def _retreive_websocket_info(self):\n    if (self._web_client is None):\n        self._web_client = WebClient(token=self.token, base_url=self.base_url, ssl=self.ssl, proxy=self.proxy, run_async=True, loop=self._event_loop, session=self._session)\n    self._logger.debug('Retrieving websocket info.')\n    if (self.connect_method in ['rtm.start', 'rtm_start']):\n        resp = (await self._web_client.rtm_start())\n    else:\n        resp = (await self._web_client.rtm_connect())\n    url = resp.get('url')\n    if (url is None):\n        msg = 'Unable to retreive RTM URL from Slack.'\n        raise client_err.SlackApiError(message=msg, response=resp)\n    return (url, resp.data)", "docstring": "Retreives the WebSocket info from Slack.\n\nReturns:\nA tuple of websocket information.\ne.g.\n(\n\"wss://...\",\n{\n\"self\": {\"id\": \"U01234ABC\",\"name\": \"robotoverlord\"},\n\"team\": {\n\"domain\": \"exampledomain\",\n\"id\": \"T123450FP\",\n\"name\": \"ExampleName\"\n}\n}\n)\n\nRaises:\nSlackApiError: Unable to retreive RTM URL from Slack.", "source": "codesearchnet"}
{"code": "def file_config(filename=None):\n    \n    logger.debug('On entry into file_config(), filename = {}'.format(filename))\n\n    if filename is None:\n        filename = CONFIG_DEFAULT_PATH\n\n    logger.debug('file_config() will try to open `{}`'.format(filename))\n    with open(filename) as f:\n        try:\n            config = json.load(f)\n        except ValueError as err:\n            raise exceptions.ConfigurationError(\n                'Failed to parse the JSON configuration from `{}`, {}'.format(filename, err)\n            )\n\n        logger.info('Configuration loaded from `{}`'.format(filename))\n\n    return config", "docstring": "Returns the config values found in a configuration file.\n\nArgs:\nfilename (str): the JSON file with the configuration values.\nIf ``None``, CONFIG_DEFAULT_PATH will be used.\n\nReturns:\ndict: The config values in the specified config file (or the\nfile at CONFIG_DEFAULT_PATH, if filename == None)", "source": "juraj-google-style"}
{"code": "def __init__(self, header_handler, packer, version):\n    \n    self._header_handler = header_handler\n    self._packer = packer\n    self._version = version\n    self._method_proxies = {}", "docstring": "Initializes a SOAP service.\n\nArgs:\nheader_handler: A googleads.common.HeaderHandler instance used to set\nSOAP and HTTP headers.\npacker: A googleads.common.SoapPacker instance used to transform\nentities.\nversion: the version of the current API, e.g. 'v201811'", "source": "juraj-google-style"}
{"code": "def get_property(self, name):\n        \n        for prop in self.resource.properties:\n            if prop.name == name:\n                return prop\n\n        raise AttributeError(name)", "docstring": "Return a named property for a resource, if available. Will raise an `AttributeError` if the property\ndoes not exist\n\nArgs:\nname (str): Name of the property to return\n\nReturns:\n`ResourceProperty`", "source": "juraj-google-style"}
{"code": "def populate_request_data(self, request_args):\n    request_args['auth'] = HTTPBasicAuth(self._username, self._password)\n    return request_args", "docstring": "Add the authentication info to the supplied dictionary.\n\nWe use the `requests.HTTPBasicAuth` class as the `auth` param.\n\nArgs:\n`request_args`: The arguments that will be passed to the request.\nReturns:\nThe updated arguments for the request.", "source": "codesearchnet"}
{"code": "def has_relationship(self, left_id, left_type, right_id, right_type, rel_type='Related To'):\n    data = self.get_object(left_id, left_type)\n    if (not data):\n        raise CRITsOperationalError('Crits Object not found with id {}and type {}'.format(left_id, left_type))\n    if ('relationships' not in data):\n        return False\n    for relationship in data['relationships']:\n        if (relationship['relationship'] != rel_type):\n            continue\n        if (relationship['value'] != right_id):\n            continue\n        if (relationship['type'] != right_type):\n            continue\n        return True\n    return False", "docstring": "Checks if the two objects are related\n\nArgs:\nleft_id: The CRITs ID of the first indicator\nleft_type: The CRITs TLO type of the first indicator\nright_id: The CRITs ID of the second indicator\nright_type: The CRITs TLO type of the second indicator\nrel_type: The relationships type (\"Related To\", etc)\nReturns:\nTrue or False if the relationship exists or not.", "source": "codesearchnet"}
{"code": "def _dilated_conv_layer(self, output_channels, dilation_rate, apply_relu, name):\n    layer_components = [conv.Conv2D(output_channels, [3, 3], initializers=self._initializers, regularizers=self._regularizers, rate=dilation_rate, name=('dilated_conv_' + name))]\n    if apply_relu:\n        layer_components.append((lambda net: tf.nn.relu(net, name=('relu_' + name))))\n    return sequential.Sequential(layer_components, name=name)", "docstring": "Create a dilated convolution layer.\n\nArgs:\noutput_channels: int. Number of output channels for each pixel.\ndilation_rate: int. Represents how many pixels each stride offset will\nmove. A value of 1 indicates a standard convolution.\napply_relu: bool. If True, a ReLU non-linearlity is added.\nname: string. Name for layer.\n\nReturns:\na sonnet Module for a dilated convolution.", "source": "codesearchnet"}
{"code": "def is_valid_assignment(self, mtf_dimension_name, mesh_dimension_name):\n    \n    return ((mtf_dimension_name in self._splittable_mtf_dimension_names) and\n            (self._mtf_dimension_name_to_size_gcd[mtf_dimension_name] %\n             self._mesh_dimension_name_to_size[mesh_dimension_name] == 0))", "docstring": "Whether this MTF dimension may be assigned to this mesh dimension.\n\nArgs:\nmtf_dimension_name: string, the name of a Mesh TensorFlow dimension.\nmesh_dimension_name: string, the name of a mesh dimension.\n\nReturns:\nA boolean indicating whether the assignment is valid.", "source": "juraj-google-style"}
{"code": "def load_file_to_base64_str(f_path):\n    path = abs_path(f_path)\n    with io.open(path, 'rb') as f:\n        f_bytes = f.read()\n        base64_str = base64.b64encode(f_bytes).decode('utf-8')\n        return base64_str", "docstring": "Loads the content of a file into a base64 string.\n\nArgs:\nf_path: full path to the file including the file name.\n\nReturns:\nA base64 string representing the content of the file in utf-8 encoding.", "source": "codesearchnet"}
{"code": "def get_paths(self, id_or_uri, path_id_or_uri=''):\n    if path_id_or_uri:\n        uri = self._client.build_uri(path_id_or_uri)\n        if ('/paths' not in uri):\n            uri = (((self._client.build_uri(id_or_uri) + '/paths') + '/') + path_id_or_uri)\n    else:\n        uri = (self._client.build_uri(id_or_uri) + '/paths')\n    return self._client.get(uri)", "docstring": "Gets all paths or a specific attachment path for the specified volume attachment.\n\nArgs:\nid_or_uri: Can be either the volume attachment id or the volume attachment uri.\npath_id_or_uri: Can be either the path id or the path uri.\n\nReturns:\ndict: Paths.", "source": "codesearchnet"}
{"code": "def thaw_parameter(self, name):\n    i = self.get_parameter_names(include_frozen=True).index(name)\n    self.unfrozen_mask[i] = True", "docstring": "Thaw a parameter by name\n\nArgs:\nname: The name of the parameter", "source": "codesearchnet"}
{"code": "def remove_cert_binding(name, site, hostheader='', ipaddress='*', port=443):\n    name = six.text_type(name).upper()\n    binding_info = _get_binding_info(hostheader, ipaddress, port)\n    ps_cmd = ['$Site = Get-ChildItem', '-Path', \"'IIS:\\\\Sites'\", '|', 'Where-Object', \" {{ $_.Name -Eq '{0}' }};\".format(site), '$Binding = $Site.Bindings.Collection', '| Where-Object { $_.bindingInformation', \"-Eq '{0}' }};\".format(binding_info), '$Binding.RemoveSslCertificate()']\n    current_cert_bindings = list_cert_bindings(site)\n    if (binding_info not in current_cert_bindings):\n        log.warning('Binding not found: %s', binding_info)\n        return True\n    if (name != current_cert_bindings[binding_info]['certificatehash']):\n        log.debug('Certificate binding already absent: %s', name)\n        return True\n    cmd_ret = _srvmgr(ps_cmd)\n    if (cmd_ret['retcode'] != 0):\n        msg = 'Unable to remove certificate binding: {0}\\nError: {1}'.format(name, cmd_ret['stderr'])\n        raise CommandExecutionError(msg)\n    new_cert_bindings = list_cert_bindings(site)\n    if (binding_info not in new_cert_bindings):\n        log.warning('Binding not found: %s', binding_info)\n        return True\n    if (name != new_cert_bindings[binding_info]['certificatehash']):\n        log.debug('Certificate binding removed successfully: %s', name)\n        return True\n    log.error('Unable to remove certificate binding: %s', name)\n    return False", "docstring": "Remove a certificate from an IIS Web Binding.\n\n.. versionadded:: 2016.11.0\n\n.. note::\n\nThis function only removes the certificate from the web binding. It does\nnot remove the web binding itself.\n\nArgs:\nname (str): The thumbprint of the certificate.\nsite (str): The IIS site name.\nhostheader (str): The host header of the binding.\nipaddress (str): The IP address of the binding.\nport (int): The TCP port of the binding.\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.remove_cert_binding name='AAA000' site='site0' hostheader='example.com' ipaddress='*' port='443'", "source": "codesearchnet"}
{"code": "def delay(self, n, start_time):\n    \n    if (n > self.max_retries or\n        (n > self.min_retries and\n         time.time() - start_time > self.max_retry_period)):\n      return -1\n    return min(\n        math.pow(self.backoff_factor, n-1) * self.initial_delay,\n        self.max_delay)", "docstring": "Calculate delay before the next retry.\n\nArgs:\nn: the number of current attempt. The first attempt should be 1.\nstart_time: the time when retry started in unix time.\n\nReturns:\nNumber of seconds to wait before next retry. -1 if retry should give up.", "source": "juraj-google-style"}
{"code": "def RegisterImplementation(source):\n    global _source_implementations\n    if 'name' not in source.__dict__:\n        raise RuntimeError(\"'name' not defined in Source %r\" % (source,))\n    _source_implementations[source.name] = source", "docstring": "Register a Source implementation with the factory method.\n\nSources being registered are expected to have a name attribute,\nunique to themselves.\n\nChild modules are expected to call this method in the file-level\nscope.\n\nArgs:\nsource: A class type that is a subclass of Source\n\nReturns:\nNothing\n\nRaises:\nRuntimeError: no 'name' entry in this source.", "source": "github-repos"}
{"code": "def write(self, array_dict: Dict[str, np.ndarray]) -> None:\n    self._writer.write(_make_example(array_dict))", "docstring": "Writes a dictionary of arrays to the file.\n\nArgs:\narray_dict: A record to write. Should be a dictionary with string keys and\nnumpy array values.", "source": "github-repos"}
{"code": "def update_panel(adapter, panel_name, panel_version, new_version=None, new_date=None):\n    \n    panel_obj = adapter.gene_panel(panel_name, panel_version)\n\n    if not panel_obj:\n        raise IntegrityError(\"Panel %s version %s does not exist\" % (panel_name, panel_version))\n\n    updated_panel = adapter.update_panel(panel_obj, new_version, new_date)\n    \n    panel_id = updated_panel['_id']\n\n    \n    update = {'$set': {}}\n    if new_version:\n        update['$set']['panels.$.version'] = updated_panel['version']\n    if new_date:\n        update['$set']['panels.$.updated_at'] = updated_panel['date']\n    \n    LOG.info('Updating affected cases with {0}'.format(update))\n    \n    query = {'panels': { '$elemMatch': {'panel_name': panel_name}}}\n    adapter.case_collection.update_many(query, update)\n    \n    return updated_panel", "docstring": "Update a gene panel in the database\n\nWe need to update the actual gene panel and then all cases that refers to the panel.\n\nArgs:\nadapter(scout.adapter.MongoAdapter)\npanel_name(str): Unique name for a gene panel\npanel_version(float)\nnew_version(float)\nnew_date(datetime.datetime)\n\nReturns:\nupdated_panel(scout.models.GenePanel): The updated gene panel object", "source": "juraj-google-style"}
{"code": "def insert_query_m(data, table, conn, columns=None, db_type='mysql'):\n    if (len(data) > 10000):\n        _chunk_query(data, 10000, columns, conn, table, db_type)\n    else:\n        if (db_type == 'sqlite'):\n            type_sign = '?'\n        else:\n            type_sign = '%s'\n        type_com = (type_sign + ', ')\n        type = (type_com * (len(data[0]) - 1))\n        type = (type + type_sign)\n        if columns:\n            stmt = (((((('INSERT INTO ' + table) + '( ') + columns) + ') VALUES (') + type) + ')')\n        else:\n            stmt = (((('INSERT INTO ' + table) + ' VALUES (') + type) + ')')\n        cursor = conn.cursor()\n        cursor.executemany(stmt, data)\n        conn.commit()", "docstring": "Insert python list of tuples into SQL table\n\nArgs:\ndata (list): List of tuples\ntable (str): Name of database table\nconn (connection object): database connection object\ncolumns (str): String of column names to use if not assigned then all columns are presumed to be used [Optional]\ndb_type (str): If \"sqlite\" or \"mysql\"", "source": "codesearchnet"}
{"code": "def run_scan_command(self, server_info: ServerConnectivityInfo, scan_command: PluginScanCommand) -> PluginScanResult:\n    plugin_class = self._plugins_repository.get_plugin_class_for_command(scan_command)\n    plugin = plugin_class()\n    return plugin.process_task(server_info, scan_command)", "docstring": "Run a single scan command against a server; will block until the scan command has been completed.\n\nArgs:\nserver_info: The server's connectivity information. The test_connectivity_to_server() method must have been\ncalled first to ensure that the server is online and accessible.\nscan_command: The scan command to run against this server.\n\nReturns:\nThe result of the scan command, which will be an instance of the scan command's\ncorresponding PluginScanResult subclass.", "source": "codesearchnet"}
{"code": "def elevation(self, value=0.0):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `elevation`'.format(value))\n        if (value < (- 1000.0)):\n            raise ValueError('value need to be greater or equal -1000.0 for field `elevation`')\n        if (value >= 9999.9):\n            raise ValueError('value need to be smaller 9999.9 for field `elevation`')\n    self._elevation = value", "docstring": "Corresponds to IDD Field `elevation`\n\nArgs:\nvalue (float): value for IDD Field `elevation`\nUnit: m\nDefault value: 0.0\nvalue >= -1000.0\nvalue < 9999.9\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def _PrintWarningCounters(self, storage_counters):\n    warnings_by_pathspec = storage_counters.get('warnings_by_path_spec', {})\n    warnings_by_parser_chain = storage_counters.get('warnings_by_parser_chain', {})\n    if (not warnings_by_parser_chain):\n        self._output_writer.Write('No warnings stored.\\n\\n')\n        return\n    table_view = views.ViewsFactory.GetTableView(self._views_format_type, title='Warnings generated per parser', column_names=['Parser (plugin) name', 'Number of warnings'])\n    for (parser_chain, count) in warnings_by_parser_chain.items():\n        parser_chain = (parser_chain or '<No parser>')\n        table_view.AddRow([parser_chain, '{0:d}'.format(count)])\n    table_view.Write(self._output_writer)\n    table_view = views.ViewsFactory.GetTableView(self._views_format_type, title='Pathspecs with most warnings', column_names=['Number of warnings', 'Pathspec'])\n    top_pathspecs = warnings_by_pathspec.most_common(10)\n    for (pathspec, count) in top_pathspecs:\n        for (path_index, line) in enumerate(pathspec.split('\\n')):\n            if (not line):\n                continue\n            if (path_index == 0):\n                table_view.AddRow(['{0:d}'.format(count), line])\n            else:\n                table_view.AddRow(['', line])\n    table_view.Write(self._output_writer)", "docstring": "Prints a summary of the warnings.\n\nArgs:\nstorage_counters (dict): storage counters.", "source": "codesearchnet"}
{"code": "def stop(self):\n    if (self._status is TaskStatus.STOPPED):\n        return\n    if (self._status is not TaskStatus.STARTED):\n        raise RuntimeError(('Cannot stop %s in state %s' % (self, self._status)))\n    self._stop()\n    STARTED_TASKS.remove(self)\n    self._status = TaskStatus.STOPPED", "docstring": "Stop a task immediately.\n\nRaises:\nRuntimeError: If the task hasn't been started or has already been\nstopped.", "source": "codesearchnet"}
{"code": "def run(self, args):\n        \n        jlink = self.create_jlink(args)\n        mcu = args.name[0].lower()\n        if pylink.unlock(jlink, mcu):\n            print('Successfully unlocked device!')\n        else:\n            print('Failed to unlock device!')", "docstring": "Unlocks the target device.\n\nArgs:\nself (UnlockCommand): the ``UnlockCommand`` instance\nargs (Namespace): the arguments passed on the command-line\n\nReturns:\n``None``", "source": "juraj-google-style"}
{"code": "def write_buffers(self, conn, locked=True):\n    if (conn is None):\n        raise ValueError('Cannot write_buffers to connection None')\n    sent = 0\n    for (header, payload) in self._buffers:\n        (yield conn.write_message(header, locked=locked))\n        (yield conn.write_message(payload, binary=True, locked=locked))\n        sent += (len(header) + len(payload))\n    raise gen.Return(sent)", "docstring": "Write any buffer headers and payloads to the given connection.\n\nArgs:\nconn (object) :\nMay be any object with a ``write_message`` method. Typically,\na Tornado ``WSHandler`` or ``WebSocketClientConnection``\n\nlocked (bool) :\n\nReturns:\nint : number of bytes sent", "source": "codesearchnet"}
{"code": "def get_transition_chempots(self, element):\n        \n        if element not in self.elements:\n            raise ValueError(\"get_transition_chempots can only be called with \"\n                             \"elements in the phase diagram.\")\n\n        critical_chempots = []\n        for facet in self.facets:\n            chempots = self._get_facet_chempots(facet)\n            critical_chempots.append(chempots[element])\n\n        clean_pots = []\n        for c in sorted(critical_chempots):\n            if len(clean_pots) == 0:\n                clean_pots.append(c)\n            else:\n                if abs(c - clean_pots[-1]) > PhaseDiagram.numerical_tol:\n                    clean_pots.append(c)\n        clean_pots.reverse()\n        return tuple(clean_pots)", "docstring": "Get the critical chemical potentials for an element in the Phase\nDiagram.\n\nArgs:\nelement: An element. Has to be in the PD in the first place.\n\nReturns:\nA sorted sequence of critical chemical potentials, from less\nnegative to more negative.", "source": "juraj-google-style"}
{"code": "def NewCheckpointReader(filepattern):\n    try:\n        return CheckpointReader(compat.as_bytes(filepattern))\n    except RuntimeError as e:\n        error_translator(e)", "docstring": "A function that returns a CheckPointReader.\n\nArgs:\nfilepattern: The filename.\n\nReturns:\nA CheckpointReader object.", "source": "github-repos"}
{"code": "def assert_scalar_v2(tensor, message=None, name=None):\n    assert_scalar(tensor=tensor, message=message, name=name)", "docstring": "Asserts that the given `tensor` is a scalar.\n\nThis function raises `ValueError` unless it can be certain that the given\n`tensor` is a scalar. `ValueError` is also raised if the shape of `tensor` is\nunknown.\n\nThis is always checked statically, so this method returns nothing.\n\nArgs:\ntensor: A `Tensor`.\nmessage: A string to prefix to the default message.\nname:  A name for this operation. Defaults to \"assert_scalar\"\n\nRaises:\nValueError: If the tensor is not scalar (rank 0), or if its shape is\nunknown.", "source": "github-repos"}
{"code": "def IsNamedTuple(component):\n    if not isinstance(component, tuple):\n        return False\n    has_fields = bool(getattr(component, '_fields', None))\n    return has_fields", "docstring": "Return true if the component is a namedtuple.\n\nUnfortunately, Python offers no native way to check for a namedtuple type.\nInstead, we need to use a simple hack which should suffice for our case.\nnamedtuples are internally implemented as tuples, therefore we need to:\n1. Check if the component is an instance of tuple.\n2. Check if the component has a _fields attribute which regular tuples do\nnot have.\n\nArgs:\ncomponent: The component to analyze.\nReturns:\nTrue if the component is a namedtuple or False otherwise.", "source": "github-repos"}
{"code": "def alias_tensors(*args):\n\n    def alias_if_tensor(a):\n        return array_ops.identity(a) if isinstance(a, tensor.Tensor) else a\n    if len(args) > 1:\n        return (alias_if_tensor(a) for a in args)\n    elif len(args) == 1:\n        return alias_if_tensor(args[0])\n    raise ValueError('at least one argument required')", "docstring": "Wraps any Tensor arguments with an identity op.\n\nAny other argument, including Variables, is returned unchanged.\n\nArgs:\n*args: Any arguments. Must contain at least one element.\n\nReturns:\nSame as *args, with Tensor instances replaced as described.\n\nRaises:\nValueError: If args doesn't meet the requirements.", "source": "github-repos"}
{"code": "def apply_region_configs(env_config):\n    new_config = env_config.copy()\n    for region in env_config.get('regions', REGIONS):\n        if isinstance(env_config.get('regions'), dict):\n            region_specific_config = env_config['regions'][region]\n            new_config[region] = dict(DeepChainMap(region_specific_config, env_config))\n        else:\n            new_config[region] = env_config.copy()\n    LOG.debug('Region Specific Config:\\n%s', new_config)\n    return new_config", "docstring": "Override default env configs with region specific configs and nest\nall values under a region\n\nArgs:\nenv_config (dict): The environment specific config.\n\nReturn:\ndict: Newly updated dictionary with region overrides applied.", "source": "codesearchnet"}
{"code": "def imag(x):\n    if any_symbolic_tensors((x,)):\n        return Imag().symbolic_call(x)\n    return backend.numpy.imag(x)", "docstring": "Return the imaginary part of the complex argument.\n\nArgs:\nx: Input tensor.\n\nReturns:\nThe imaginary component of the complex argument.", "source": "github-repos"}
{"code": "def _batch_prepare_for_model(self, batch_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]], batch_shape_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]], batch_pronunciation_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:\n    batch_outputs = {}\n    for i, (first_ids, second_ids) in enumerate(batch_ids_pairs):\n        first_shape_ids, second_shape_ids = batch_shape_ids_pairs[i]\n        first_pronunciation_ids, second_pronunciation_ids = batch_pronunciation_ids_pairs[i]\n        outputs = self.prepare_for_model(first_ids, first_shape_ids, first_pronunciation_ids, pair_ids=second_ids, pair_shape_ids=second_shape_ids, pair_pronunciation_ids=second_pronunciation_ids, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose)\n        for key, value in outputs.items():\n            if key not in batch_outputs:\n                batch_outputs[key] = []\n            batch_outputs[key].append(value)\n    batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)\n    batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)\n    return batch_outputs", "docstring": "Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It\nadds special tokens, truncates sequences if overflowing while taking into account the special tokens and\nmanages a moving window (with user defined stride) for overflowing tokens\n\nArgs:\nbatch_ids_pairs: list of tokenized input ids or input ids pairs\nbatch_shape_ids_pairs: list of tokenized input shape ids or input shape ids pairs\nbatch_pronunciation_ids_pairs: list of tokenized input pronunciation ids or input pronunciation ids pairs", "source": "github-repos"}
{"code": "def chop(array, epsilon=1e-10):\n    ret = np.array(array)\n    if np.isrealobj(ret):\n        ret[(abs(ret) < epsilon)] = 0.0\n    else:\n        ret.real[(abs(ret.real) < epsilon)] = 0.0\n        ret.imag[(abs(ret.imag) < epsilon)] = 0.0\n    return ret", "docstring": "Truncate small values of a complex array.\n\nArgs:\narray (array_like): array to truncte small values.\nepsilon (float): threshold.\n\nReturns:\nnp.array: A new operator with small values set to zero.", "source": "codesearchnet"}
{"code": "def _read_mptcp_remove(self, bits, size):\n    adid = []\n    for _ in size:\n        adid.append(self._read_unpack(1))\n    data = dict(subtype='REMOVE_ADDR', removeaddr=dict(addrid=(adid or None)))\n    return data", "docstring": "Read Remove Address option.\n\nPositional arguments:\n* bits - str, 4-bit data\n* size - int, length of option\n\nReturns:\n* dict -- extracted Remove Address (REMOVE_ADDR) option\n\nStructure of REMOVE_ADDR [RFC 6824]:\n1                   2                   3\n0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\n+---------------+---------------+-------+-------+---------------+\n|     Kind      |  Length = 3+n |Subtype|(resvd)|   Address ID  | ...\n+---------------+---------------+-------+-------+---------------+\n(followed by n-1 Address IDs, if required)\n\nOctets      Bits        Name                            Description\n0           0     tcp.opt.kind                    Kind (30)\n1           8     tcp.opt.length                  Length\n2          16     tcp.opt.mp.subtype              Subtype (4)\n2          20     -                               Reserved (must be zero)\n3          24     tcp.opt.mp.removeaddr.addrid    Address ID (optional list)", "source": "codesearchnet"}
{"code": "def __init__(self, output, output_name, loss_fn, loss_weight=None, training_target=None, output_loss_metric=None, sample_weight=None, sample_weight_mode=None):\n    self._output = output\n    self._output_name = output_name\n    self._loss_fn = loss_fn\n    self._loss_weight = loss_weight\n    self._training_target = training_target\n    self._output_loss_metric = output_loss_metric\n    self._sample_weight = sample_weight\n    self._sample_weight_mode = sample_weight_mode", "docstring": "Initialize the _TrainingEndpoint.\n\nNote that the output and output_name should be stable as long as the model\nstructure doesn't change. The training_target suppose to be mutable since\nthe information is provided via `compile()`\n\nArgs:\noutput: the output tensor of the model.\noutput_name: the unique name of the output tensor.\nloss_fn: the loss function for the output tensor.\nloss_weight: float, the weights for the loss.\ntraining_target: the _TrainingTarget for the model.\noutput_loss_metric: the metric object for the loss function.\nsample_weight: the weights for how a sample is weighted during metric and\nloss calculation. Could be None.\nsample_weight_mode: string, 'temporal', 'samplewise' or None. The mode for\nhow the sample_weight is populated.", "source": "github-repos"}
{"code": "def begin_stream(self, command: Command) -> Reply:\n        \n        yield from self._control_stream.write_command(command)\n        reply = yield from self._control_stream.read_reply()\n\n        self.raise_if_not_match(\n            'Begin stream',\n            (\n                ReplyCodes.file_status_okay_about_to_open_data_connection,\n                ReplyCodes.data_connection_already_open_transfer_starting,\n            ),\n            reply\n        )\n\n        return reply", "docstring": "Start sending content on the data stream.\n\nArgs:\ncommand: A command that tells the server to send data over the\ndata connection.\n\nCoroutine.\n\nReturns:\nThe begin reply.", "source": "juraj-google-style"}
{"code": "def __init__(self, topology: Topology, core_assignment: np.ndarray):\n    if not isinstance(topology, Topology):\n        raise ValueError('topology must be a Topology object, got {}'.format(type(topology)))\n    core_assignment = numpy_compat.np_asarray(core_assignment, dtype=np.int32)\n    self._topology = topology\n    if core_assignment.ndim != 3:\n        raise ValueError(f'core_assignment must be a rank 3 numpy array, got shape {core_assignment.shape}')\n    self._num_replicas = core_assignment.shape[0]\n    self._num_cores_per_replica = core_assignment.shape[1]\n    if core_assignment.shape[-1] != topology.mesh_rank:\n        raise ValueError(f'core_assignment.shape[-1] must have size equal to topology rank ({topology.mesh_rank}), got core_assignment.shape={core_assignment.shape}')\n    self._core_assignment = core_assignment\n    self._task_and_cores_to_replicas = _compute_task_and_cores_to_replicas(self._core_assignment, topology)", "docstring": "Constructs a `DeviceAssignment` object.\n\nArgs:\ntopology: A `Topology` object that describes the physical TPU topology.\ncore_assignment: A logical to physical core mapping, represented as a\nrank 3 numpy array. See the description of the `core_assignment`\nproperty for more details.\n\nRaises:\nValueError: If `topology` is not `Topology` object.\nValueError: If `core_assignment` is not a rank 3 numpy array.", "source": "github-repos"}
{"code": "def glob(*args):\n    \n    if len(args) is 1 and isinstance(args[0], list):\n        args = args[0]\n    matches = []\n    for pattern in args:\n        for item in glob2.glob(pattern):\n            if not os.path.isdir(item):\n                matches.append(item)\n    return matches", "docstring": "Returns list of paths matching one or more wildcard patterns.\n\nArgs:\ninclude_dirs: Include directories in the output", "source": "juraj-google-style"}
{"code": "def merge(profile, branch, merge_into):\n    \n    data = merges.merge(profile, branch, merge_into)\n    return data", "docstring": "Merge a branch into another branch.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nbranch\nThe name of the branch to merge.\n\nmerge_into\nThe name of the branch you want to merge into.\n\nReturns:\nA dict wtih data about the merge.", "source": "juraj-google-style"}
{"code": "def __init__(self, xid=None, body_type=None, flags=0, body=b''):\n        \n        super().__init__(xid)\n        self.body_type = body_type\n        self.flags = flags\n        self.body = body", "docstring": "Create a StatsRequest with the optional parameters below.\n\nArgs:\nxid (int): xid to be used on the message header.\nbody_type (StatsType): One of the OFPST_* constants.\nflags (int): OFPSF_REQ_* flags (none yet defined).\nbody (BinaryData): Body of the request.", "source": "juraj-google-style"}
{"code": "def orthologize(ast, bo, species_id: str):\n    \n\n    \n    \n\n    if not species_id:\n        bo.validation_messages.append(\n            (\"WARNING\", \"No species id was provided for orthologization\")\n        )\n        return ast\n\n    if isinstance(ast, NSArg):\n        if ast.orthologs:\n            \n            if ast.orthologs.get(species_id, None):\n                orthologized_nsarg_val = ast.orthologs[species_id][\"decanonical\"]\n                ns, value = orthologized_nsarg_val.split(\":\")\n                ast.change_nsvalue(ns, value)\n                ast.canonical = ast.orthologs[species_id][\"canonical\"]\n                ast.decanonical = ast.orthologs[species_id][\"decanonical\"]\n                ast.orthologized = True\n                bo.ast.species.add(\n                    (species_id, ast.orthologs[species_id][\"species_label\"])\n                )\n            else:\n                bo.ast.species.add((ast.species_id, ast.species_label))\n                bo.validation_messages.append(\n                    (\"WARNING\", f\"No ortholog found for {ast.namespace}:{ast.value}\")\n                )\n        elif ast.species_id:\n            bo.ast.species.add((ast.species_id, ast.species_label))\n\n    \n    if hasattr(ast, \"args\"):\n        for arg in ast.args:\n            orthologize(arg, bo, species_id)\n\n    return ast", "docstring": "Recursively orthologize BEL Entities in BEL AST using API endpoint\n\nNOTE: - will take first ortholog returned in BEL.bio API result (which may return more than one ortholog)\n\nArgs:\nast (BEL): BEL AST\nendpoint (str): endpoint url with a placeholder for the term_id\n\nReturns:\nBEL: BEL AST", "source": "juraj-google-style"}
{"code": "def transpose(self, name=None):\n    \n    if name is None:\n      name = self.module_name + \"_transpose\"\n\n    if self._data_format == DATA_FORMAT_NWC:\n      stride = self._stride[1:-1]\n    else:  \n      stride = self._stride[2:]\n\n    return Conv1D(output_channels=lambda: self.input_channels,\n                  kernel_shape=self.kernel_shape,\n                  stride=stride,\n                  padding=self.padding,\n                  use_bias=self._use_bias,\n                  initializers=self.initializers,\n                  partitioners=self.partitioners,\n                  regularizers=self.regularizers,\n                  data_format=self._data_format,\n                  custom_getter=self._custom_getter,\n                  name=name)", "docstring": "Returns matching `Conv1D` module.\n\nArgs:\nname: Optional string assigning name of transpose module. The default name\nis constructed by appending \"_transpose\" to `self.name`.\n\nReturns:\n`Conv1D` module.", "source": "juraj-google-style"}
{"code": "def clean_model_doc_toc(model_doc: List[dict]) -> List[dict]:\n    counts = defaultdict(int)\n    for doc in model_doc:\n        counts[doc['local']] += 1\n    duplicates = [key for key, value in counts.items() if value > 1]\n    new_doc = []\n    for duplicate_key in duplicates:\n        titles = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key})\n        if len(titles) > 1:\n            raise ValueError(f'{duplicate_key} is present several times in the documentation table of content at `docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the others.')\n        new_doc.append({'local': duplicate_key, 'title': titles[0]})\n    new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1])\n    return sorted(new_doc, key=lambda s: s['title'].lower())", "docstring": "Cleans a section of the table of content of the model documentation (one specific modality) by removing duplicates\nand sorting models alphabetically.\n\nArgs:\nmodel_doc (`List[dict]`):\nThe list of dictionaries extracted from the `_toctree.yml` file for this specific modality.\n\nReturns:\n`List[dict]`: List of dictionaries like the input, but cleaned up and sorted.", "source": "github-repos"}
{"code": "def __init__(self, filepath):\n    \n    self._filepath = filepath\n    self._subword_text_encoder = text_encoder.SubwordTextEncoder(filepath)", "docstring": "Create a T2tVocabulary.\n\nArgs:\nfilepath: a string", "source": "juraj-google-style"}
{"code": "def _make_static_axis_non_negative_list(axis, ndims):\n  \n  axis = distribution_util.make_non_negative_axis(axis, ndims)\n\n  axis_const = tf.get_static_value(axis)\n  if axis_const is None:\n    raise ValueError(\n        'Expected argument `axis` to be statically available.  Found: %s' %\n        axis)\n\n  \n  axis = axis_const + np.zeros([1], dtype=axis_const.dtype)\n\n  return list(int(dim) for dim in axis)", "docstring": "Convert possibly negatively indexed axis to non-negative list of ints.\n\nArgs:\naxis:  Integer Tensor.\nndims:  Number of dimensions into which axis indexes.\n\nReturns:\nA list of non-negative Python integers.\n\nRaises:\nValueError: If `axis` is not statically defined.", "source": "juraj-google-style"}
{"code": "def tokenize_sentence(input_dict):\n    text, uid = (input_dict['text'], input_dict['id'])\n    tokens = Tokenizer([text], padding=True, truncation=True, return_tensors='pt')\n    tokens = {key: torch.squeeze(val) for key, val in tokens.items()}\n    return ((text, uid), tokens)", "docstring": "It takes a dictionary with a text and an id, tokenizes the text, and\nreturns a tuple of the text and id and the tokenized text\n\nArgs:\ninput_dict: a dictionary with the text and id of the sentence\n\nReturns:\nA tuple of the text and id, and a dictionary of the tokens.", "source": "github-repos"}
{"code": "def dtime(sdat, tstart=None, tend=None):\n    \n    tseries = sdat.tseries_between(tstart, tend)\n    time = tseries['t'].values\n    return time[1:] - time[:-1], time[:-1]", "docstring": "Time increment dt.\n\nCompute dt as a function of time.\n\nArgs:\nsdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.\ntstart (float): time at which the computation should start. Use the\nbeginning of the time series data if set to None.\ntend (float): time at which the computation should end. Use the\nend of the time series data if set to None.\nReturns:\ntuple of :class:`numpy.array`: dt and time arrays.", "source": "juraj-google-style"}
{"code": "def play(self, **kwargs):\n        \n        path = '%s/%s/play' % (self.manager.path, self.get_id())\n        self.manager.gitlab.http_post(path)", "docstring": "Trigger a job explicitly.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabJobPlayError: If the job could not be triggered", "source": "juraj-google-style"}
{"code": "def GetPredefinedFile(stubs_subdir, module, extension='.pytd', as_package=False):\n    parts = module.split('.')\n    if as_package:\n        parts.append('__init__')\n    mod_path = path_utils.join(*parts) + extension\n    path = path_utils.join('stubs', stubs_subdir, mod_path)\n    return (path, pytype_source_utils.load_text_file(path))", "docstring": "Get the contents of a predefined PyTD, typically with a file name *.pytd.\n\nArguments:\nstubs_subdir: the directory, typically \"builtins\" or \"stdlib\"\nmodule: module name (e.g., \"sys\" or \"__builtins__\")\nextension: either \".pytd\" or \".py\"\nas_package: try the module as a directory with an __init__ file\n\nReturns:\nThe contents of the file\nRaises:\nIOError: if file not found", "source": "github-repos"}
{"code": "def add_whitespace_before(char, input_file, output_file):\n    \n    line_count = get_line_count(input_file)\n    input_file = open(input_file, 'r')\n    output_file = open(output_file, 'r+')\n    for line in range(line_count):\n        string = input_file.readline()\n        \n        if re.search(r'[a-zA-Z0-9]' + char, string) != None:\n            string = re.sub(char, ' ' + char, string)\n        output_file.write(string)\n    input_file.close()", "docstring": "Adds a space before a character if there's isn't one already.\n\nArgs:\nchar: string, character that needs a space before it.\n\ninput_file: string, path to file to parse.\n\noutput_file: string, path to destination file.\n\nReturns:\nNone.", "source": "juraj-google-style"}
{"code": "def retrieve_from_web(generate_csv=False):\n    url = 'https:\n    source = urllib.request.urlopen(url)\n    matches = []\n    while True:\n        line = source.readline()\n        if '</html>' in line:\n            break\n        else:\n            gpu = re.search('<a href=.*>([\\\\w\\\\S\\\\s\\\\d\\\\[\\\\]\\\\,]+[^*])</a>(<a href=.*)?.*', line)\n            capability = re.search('([\\\\d]+).([\\\\d]+)(/)?([\\\\d]+)?(.)?([\\\\d]+)?.*</td>.*', line)\n            if gpu:\n                matches.append(gpu.group(1))\n            elif capability:\n                if capability.group(3):\n                    capability_str = capability.group(4) + '.' + capability.group(6)\n                else:\n                    capability_str = capability.group(1) + '.' + capability.group(2)\n                matches.append(capability_str)\n    return create_gpu_capa_map(matches, generate_csv)", "docstring": "Retrieves list of all CUDA compute capability from NVIDIA webpage.\n\nArgs:\ngenerate_csv: Boolean for generating an output file containing\nthe results.\n\nReturns:\nOrderedDict that is a list of all CUDA compute capability listed on the\nNVIDIA page. Order goes from top to bottom of the webpage content (.html).", "source": "github-repos"}
{"code": "def num_memory_zones(self):\n    count = self._dll.JLINK_GetMemZones(0, 0)\n    if (count < 0):\n        raise errors.JLinkException(count)\n    return count", "docstring": "Returns the number of memory zones supported by the target.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nAn integer count of the number of memory zones supported by the\ntarget.\n\nRaises:\nJLinkException: on error.", "source": "codesearchnet"}
{"code": "def gaussian_deriv(duration: int, amp: complex, sigma: float, name: str = None) -> SamplePulse:\n    r\n    center = duration/2\n    return _sampled_gaussian_deriv_pulse(duration, amp, center, sigma, name=name)", "docstring": "r\"\"\"Generates unnormalized gaussian derivative `SamplePulse`.\n\nApplies `left` sampling strategy to generate discrete pulse from continuous function.\n\nArgs:\nduration: Duration of pulse. Must be greater than zero.\namp: Pulse amplitude at `center`.\nsigma: Width (standard deviation) of pulse.\nname: Name of pulse.", "source": "juraj-google-style"}
{"code": "def install(self, package: str, option: str='-r') -> None:\n    if (not os.path.isfile(package)):\n        raise FileNotFoundError(f'{package!r} does not exist.')\n    for i in option:\n        if (i not in '-lrtsdg'):\n            raise ValueError(f'There is no option named: {option!r}.')\n    self._execute('-s', self.device_sn, 'install', option, package)", "docstring": "Push package to the device and install it.\n\nArgs:\noption:\n-l: forward lock application\n-r: replace existing application\n-t: allow test packages\n-s: install application on sdcard\n-d: allow version code downgrade (debuggable packages only)\n-g: grant all runtime permissions", "source": "codesearchnet"}
{"code": "def get_slab_regions(slab, blength=3.5):\n    \n\n    fcoords, indices, all_indices = [], [], []\n    for site in slab:\n        \n        neighbors = slab.get_neighbors(site, blength, include_index=True,\n                                       include_image=True)\n        for nn in neighbors:\n            if nn[0].frac_coords[2] < 0:\n                \n                fcoords.append(nn[0].frac_coords[2])\n                indices.append(nn[-2])\n                if nn[-2] not in all_indices:\n                    all_indices.append(nn[-2])\n\n    if fcoords:\n        \n        \n        while fcoords:\n            last_fcoords = copy.copy(fcoords)\n            last_indices = copy.copy(indices)\n            site = slab[indices[fcoords.index(min(fcoords))]]\n            neighbors = slab.get_neighbors(site, blength, include_index=True,\n                                           include_image=True)\n            fcoords, indices = [], []\n            for nn in neighbors:\n                if 1 > nn[0].frac_coords[2] > 0 and \\\n                                nn[0].frac_coords[2] < site.frac_coords[2]:\n                    \n                    fcoords.append(nn[0].frac_coords[2])\n                    indices.append(nn[-2])\n                    if nn[-2] not in all_indices:\n                        all_indices.append(nn[-2])\n\n        \n        upper_fcoords = []\n        for site in slab:\n            if all([nn[-1] not in all_indices for nn in\n                    slab.get_neighbors(site, blength,\n                                       include_index=True)]):\n                upper_fcoords.append(site.frac_coords[2])\n        coords = copy.copy(last_fcoords) if not fcoords else copy.copy(fcoords)\n        min_top = slab[last_indices[coords.index(min(coords))]].frac_coords[2]\n        ranges = [[0, max(upper_fcoords)], [min_top, 1]]\n    else:\n        \n        \n        sorted_sites = sorted(slab, key=lambda site: site.frac_coords[2])\n        ranges = [[sorted_sites[0].frac_coords[2],\n                   sorted_sites[-1].frac_coords[2]]]\n\n    return ranges", "docstring": "Function to get the ranges of the slab regions. Useful for discerning where\nthe slab ends and vacuum begins if the slab is not fully within the cell\nArgs:\nslab (Structure): Structure object modelling the surface\nblength (float, Ang): The bondlength between atoms. You generally\nwant this value to be larger than the actual bondlengths in\norder to find atoms that are part of the slab", "source": "juraj-google-style"}
{"code": "def total_stored(self, wanted, slots=None):\n    if (slots is None):\n        slots = self.window.slots\n    wanted = make_slot_check(wanted)\n    return sum((slot.amount for slot in slots if wanted(slot)))", "docstring": "Calculates the total number of items of that type\nin the current window or given slot range.\n\nArgs:\nwanted: function(Slot) or Slot or itemID or (itemID, metadata)", "source": "codesearchnet"}
{"code": "class EnsembleAnomalyDetector(AnomalyDetector):\n\n    def __init__(self, sub_detectors: Optional[list[AnomalyDetector]]=None, aggregation_strategy: Optional[AggregationFn]=None, **kwargs):\n        if 'model_id' not in kwargs or kwargs['model_id'] is None:\n            kwargs['model_id'] = getattr(self, 'spec_type', lambda: 'custom')()\n        super().__init__(**kwargs)\n        self._aggregation_strategy = aggregation_strategy\n        self._sub_detectors = sub_detectors\n\n    def learn_one(self, x: beam.Row) -> None:\n        \n        raise NotImplementedError\n\n    def score_one(self, x: beam.Row) -> float:\n        \n        raise NotImplementedError", "docstring": "An abstract base class for an ensemble of anomaly (sub-)detectors.\n\nArgs:\nsub_detectors: A list of `AnomalyDetector` used in this ensemble model.\naggregation_strategy: An optional `AggregationFn` to apply to the\npredictions from all sub-detectors and yield an aggregated result.\nmodel_id: Inherited from `AnomalyDetector`.\nfeatures: Inherited from `AnomalyDetector`.\ntarget: Inherited from `AnomalyDetector`.\nthreshold_criterion: Inherited from `AnomalyDetector`.", "source": "github-repos"}
{"code": "def make_initializer(self, dataset, name=None):\n    with ops.name_scope(name, 'make_initializer') as name:\n        dataset_output_types = nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), dataset.element_spec)\n        dataset_output_shapes = nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), dataset.element_spec)\n        dataset_output_classes = nest.map_structure(lambda component_spec: component_spec._to_legacy_output_classes(), dataset.element_spec)\n        nest.assert_same_structure(self.output_types, dataset_output_types)\n        nest.assert_same_structure(self.output_shapes, dataset_output_shapes)\n        for iterator_class, dataset_class in zip(nest.flatten(self.output_classes), nest.flatten(dataset_output_classes)):\n            if iterator_class is not dataset_class:\n                raise TypeError(f'Expected output classes {self.output_classes!r} but got dataset with output classes {dataset_output_classes!r}.')\n        for iterator_dtype, dataset_dtype in zip(nest.flatten(self.output_types), nest.flatten(dataset_output_types)):\n            if iterator_dtype != dataset_dtype:\n                raise TypeError(f'Expected output types {self.output_types!r} but got dataset with output types {dataset_output_types!r}.')\n        for iterator_shape, dataset_shape in zip(nest.flatten(self.output_shapes), nest.flatten(dataset_output_shapes)):\n            if not iterator_shape.is_compatible_with(dataset_shape):\n                raise TypeError(f'Expected output shapes compatible with {self.output_shapes!r} but got dataset with output shapes {dataset_output_shapes!r}.')\n    with ops.colocate_with(self._iterator_resource):\n        return gen_dataset_ops.make_iterator(dataset._variant_tensor, self._iterator_resource, name=name)", "docstring": "Returns a `tf.Operation` that initializes this iterator on `dataset`.\n\nArgs:\ndataset: A `Dataset` whose `element_spec` if compatible with this\niterator.\nname: (Optional.) A name for the created operation.\n\nReturns:\nA `tf.Operation` that can be run to initialize this iterator on the given\n`dataset`.\n\nRaises:\nTypeError: If `dataset` and this iterator do not have a compatible\n`element_spec`.", "source": "github-repos"}
{"code": "def train_auto_encoder(self, generative_model, a_logs_list):\n        \n        error_arr = generative_model.update()\n        if error_arr.ndim > 1:\n            error_arr = error_arr.mean()\n        a_logs_list.append(error_arr)\n\n        self.__logger.debug(\"The reconstruction error (mean): \" + str(error_arr))\n\n        return generative_model, a_logs_list", "docstring": "Train the generative model as the Auto-Encoder.\n\nArgs:\ngenerative_model:   Generator which draws samples from the `fake` distribution.\na_logs_list:        `list` of the reconstruction errors.\n\nReturns:\nThe tuple data. The shape is...\n- Generator which draws samples from the `fake` distribution.\n- `list` of the reconstruction errors.", "source": "juraj-google-style"}
{"code": "def access_vlan(self, inter_type, inter, vlan_id):\n    config = ET.Element('config')\n    interface = ET.SubElement(config, 'interface', xmlns='urn:brocade.com:mgmt:brocade-interface')\n    int_type = ET.SubElement(interface, inter_type)\n    name = ET.SubElement(int_type, 'name')\n    name.text = inter\n    switchport = ET.SubElement(int_type, 'switchport')\n    access = ET.SubElement(switchport, 'access')\n    accessvlan = ET.SubElement(access, 'accessvlan')\n    accessvlan.text = vlan_id\n    try:\n        self._callback(config)\n        return True\n    except Exception as error:\n        logging.error(error)\n        return False", "docstring": "Add a L2 Interface to a specific VLAN.\n\nArgs:\ninter_type: The type of interface you want to configure. Ex.\ntengigabitethernet, gigabitethernet, fortygigabitethernet.\ninter: The ID for the interface you want to configure. Ex. 1/0/1\nvlan_id: ID for the VLAN interface being modified. Value of 2-4096.\n\nReturns:\nTrue if command completes successfully or False if not.\n\nRaises:\nNone", "source": "codesearchnet"}
{"code": "def get_user_display_name(self, userid):\n        \n\n        user_info = self.slack_client.api_call('users.info', user=userid)\n        if user_info.get('ok'):\n            user = user_info.get('user')\n            if user.get('profile'):\n                return user.get('profile').get('display_name')\n            else:\n                return user.get('name')\n        else:\n            return userid", "docstring": "Given a Slack userid, grabs user display_name from api.\n\nArgs:\nuserid (string): the user id of the user being queried\nReturns:\ndict: a dictionary of the api response", "source": "juraj-google-style"}
{"code": "def _GenerateZipInfo(self, arcname=None, compress_type=None, st=None):\n    \n    \n    if st is None:\n      \n      \n      st = os.stat_result((0o100644, 0, 0, 0, 0, 0, 0, 0, 0, 0))\n      \n\n    mtime = time.localtime(st.st_mtime or time.time())\n    date_time = mtime[0:6]\n    \n    if arcname is None:\n      raise ValueError(\"An arcname must be provided.\")\n\n    zinfo = zipfile.ZipInfo(arcname, date_time)\n    zinfo.external_attr = (st[0] & 0xFFFF) << 16  \n\n    if compress_type is None:\n      zinfo.compress_type = self._compression\n    else:\n      zinfo.compress_type = compress_type\n\n    zinfo.file_size = 0\n    zinfo.compress_size = 0\n    zinfo.flag_bits = 0x08  \n    zinfo.CRC = 0x08074b50  \n    \n    \n    zinfo.extra = struct.pack(\n        \"<HHIIHH\",\n        0x5855,\n        12,\n        0,  \n        0,  \n        0,  \n        0)  \n    return zinfo", "docstring": "Generate ZipInfo instance for the given name, compression and stat.\n\nArgs:\narcname: The name in the archive this should take.\ncompress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED)\nst: An optional stat object to be used for setting headers.\n\nReturns:\nZipInfo instance.\n\nRaises:\nValueError: If arcname is not provided.", "source": "juraj-google-style"}
{"code": "def get_conditional_uni(cls, left_parent, right_parent):\n    (left, right, _) = cls._identify_eds_ing(left_parent, right_parent)\n    left_u = (left_parent.U[0] if (left_parent.L == left) else left_parent.U[1])\n    right_u = (right_parent.U[0] if (right_parent.L == right) else right_parent.U[1])\n    return (left_u, right_u)", "docstring": "Identify pair univariate value from parents.\n\nArgs:\nleft_parent(Edge): left parent\nright_parent(Edge): right parent\n\nReturns:\ntuple[np.ndarray, np.ndarray]: left and right parents univariate.", "source": "codesearchnet"}
{"code": "def minimum(x, y):\n    return math_ops.minimum(x, y)", "docstring": "Element-wise minimum of two tensors.\n\nArgs:\nx: Tensor or variable.\ny: Tensor or variable.\n\nReturns:\nA tensor.", "source": "github-repos"}
{"code": "def delete(name, **kwargs):\n    \n    if info(name):\n        comp_obj = _get_computer_object()\n        try:\n            comp_obj.Delete('group', name)\n            log.info('Successfully removed group %s', name)\n        except pywintypes.com_error as exc:\n            msg = 'Failed to remove group {0}. {1}'.format(\n                name, win32api.FormatMessage(exc.excepinfo[5]))\n            log.error(msg)\n            return False\n    else:\n        log.warning('The group %s does not exists.', name)\n        return False\n\n    return True", "docstring": "Remove the named group\n\nArgs:\n\nname (str):\nThe name of the group to remove\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' group.delete foo", "source": "juraj-google-style"}
{"code": "def get_recurrent_dropout_mask_for_cell(self, inputs, training, count=1):\n    if self.recurrent_dropout == 0:\n        return None\n    init_kwargs = dict(inputs=inputs, training=training, count=count)\n    return self._recurrent_dropout_mask_cache.setdefault(kwargs=init_kwargs)", "docstring": "Get the recurrent dropout mask for RNN cell.\n\nIt will create mask based on context if there isn't any existing cached\nmask. If a new mask is generated, it will update the cache in the cell.\n\nArgs:\ninputs: The input tensor whose shape will be used to generate dropout\nmask.\ntraining: Boolean tensor, whether its in training mode, dropout will be\nignored in non-training mode.\ncount: Int, how many dropout mask will be generated. It is useful for cell\nthat has internal weights fused together.\nReturns:\nList of mask tensor, generated or cached mask based on context.", "source": "github-repos"}
{"code": "def _find_config(self):\n    for search_path in self.config_paths:\n        for ext in self._fmt_to_ext.get(self.config_format):\n            path = os.path.abspath(os.path.join(search_path, (self.config_name + ext)))\n            if os.path.isfile(path):\n                self.config_file = path\n                return\n    raise BisonError('No file named {} found in search paths {}'.format(self.config_name, self.config_paths))", "docstring": "Searches through the configured `config_paths` for the `config_name`\nfile.\n\nIf there are no `config_paths` defined, this will raise an error, so the\ncaller should take care to check the value of `config_paths` first.\n\nReturns:\nstr: The fully qualified path to the configuration that was found.\n\nRaises:\nException: No paths are defined in `config_paths` or no file with\nthe `config_name` was found in any of the specified `config_paths`.", "source": "codesearchnet"}
{"code": "def version(self):\n    version = int(self._dll.JLINKARM_GetDLLVersion())\n    major = (version / 10000)\n    minor = ((version / 100) % 100)\n    rev = (version % 100)\n    rev = ('' if (rev == 0) else chr(((rev + ord('a')) - 1)))\n    return ('%d.%02d%s' % (major, minor, rev))", "docstring": "Returns the device's version.\n\nThe device's version is returned as a string of the format: M.mr where\n``M`` is major number, ``m`` is minor number, and ``r`` is revision\ncharacter.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nDevice version string.", "source": "codesearchnet"}
{"code": "def __init__(self, api_key: str, config: interfaces.Config=interfaces.Config()) -> None:\n    self._config = config\n    p_topic_generator = topic_generator.TopicGenerator(api_key=api_key, config=config)\n    p_topic_researcher = topic_researcher.TopicResearcher(api_key=api_key, config=config)\n    p_topic_verbalizer = topic_verbalizer.TopicVerbalizer(config=config)\n    p_genai_model = genai_model.GenaiModel(api_key=api_key, model_name=self._config.research_synthesizer_model_name)\n    p_preamble = preamble.Preamble(content=[ProcessorPart(prompts.SYNTHESIS_PREAMBLE), ProcessorPart('Research text: ')])\n    p_suffix = preamble.Suffix(content=[ProcessorPart('Your synthesized research: ')])\n    self._pipeline = p_topic_generator + p_topic_researcher + p_topic_verbalizer + p_preamble + p_suffix + p_genai_model", "docstring": "Initializes the Research Agent.\n\nArgs:\napi_key: The API key to use for the GenAI API.\nconfig: The configuration for the Research Agent.", "source": "github-repos"}
{"code": "def delete_submission(self, submission_id):\n        \n        LOG.info(\"Deleting clinvar submission %s\", submission_id)\n        submission_obj = self.clinvar_submission_collection.find_one({ '_id' : ObjectId(submission_id)})\n\n        submission_variants = submission_obj.get('variant_data')\n        submission_casedata = submission_obj.get('case_data')\n\n        submission_objects = []\n\n        if submission_variants and submission_casedata:\n            submission_objects = submission_variants + submission_casedata\n        elif submission_variants:\n            submission_objects = submission_variants\n        elif submission_casedata:\n            submission_objects = submission_casedata\n\n        \n        result = self.clinvar_collection.delete_many({'_id': { \"$in\": submission_objects} })\n        deleted_objects = result.deleted_count\n\n        \n        result = self.clinvar_submission_collection.delete_one({'_id': ObjectId(submission_id)})\n        deleted_submissions = result.deleted_count\n\n        \n        return deleted_objects,deleted_submissions", "docstring": "Deletes a Clinvar submission object, along with all associated clinvar objects (variants and casedata)\n\nArgs:\nsubmission_id(str): the ID of the submission to be deleted\n\nReturns:\ndeleted_objects(int): the number of associated objects removed (variants and/or casedata)\ndeleted_submissions(int): 1 if it's deleted, 0 if something went wrong", "source": "juraj-google-style"}
{"code": "def append_column(table, col_name, default_value=None):\n    \n    table[0].append(col_name.strip())\n    for row in table[1:]:\n        row.append(default_value)", "docstring": "Appends a column to the raw data without any integrity checks.\n\nArgs:\ndefault_value: The value which will assigned, not copied into each row", "source": "juraj-google-style"}
{"code": "def resolve_for(self, node):\n        \n\n        self.node = node\n        self.actual_text = normalize_text(\n            node.visible_text if self.query_type == \"visible\" else node.all_text)\n        self.count = len(re.findall(self.search_regexp, self.actual_text))\n\n        return self.count", "docstring": "Resolves this query relative to the given node.\n\nArgs:\nnode (node.Base): The node to be evaluated.\n\nReturns:\nint: The number of matches found.", "source": "juraj-google-style"}
{"code": "def add_evaluation_step(result_tensor, ground_truth_tensor):\n  \n  with tf.name_scope('accuracy'):\n    with tf.name_scope('correct_prediction'):\n      prediction = tf.argmax(result_tensor, 1)\n      correct_prediction = tf.equal(prediction, ground_truth_tensor)\n    with tf.name_scope('accuracy'):\n      evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n  tf.summary.scalar('accuracy', evaluation_step)\n  return evaluation_step, prediction", "docstring": "Inserts the operations we need to evaluate the accuracy of our results.\n\nArgs:\nresult_tensor: The new final node that produces results.\nground_truth_tensor: The node we feed ground truth data\ninto.\n\nReturns:\nTuple of (evaluation step, prediction).", "source": "juraj-google-style"}
{"code": "def cast_to_str(obj):\n    if isinstance(obj, str):\n        return obj\n    if isinstance(obj, Seq):\n        return str(obj)\n    if isinstance(obj, SeqRecord):\n        return str(obj.seq)\n    else:\n        raise ValueError('Must provide a string, Seq, or SeqRecord object.')", "docstring": "Return a string representation of a Seq or SeqRecord.\n\nArgs:\nobj (str, Seq, SeqRecord): Biopython Seq or SeqRecord\n\nReturns:\nstr: String representation of the sequence", "source": "codesearchnet"}
{"code": "async def apply(self, sender: str, recipient: str, mailbox: str,\n                    append_msg: AppendMessage) \\\n            -> Tuple[Optional[str], AppendMessage]:\n        \n        ...", "docstring": "Run the filter and return the mailbox where it should be appended,\nor None to discard, and the message to be appended, which is usually\nthe same as ``append_msg``.\n\nArgs:\nsender: The envelope sender of the message.\nrecipient: The envelope recipient of the message.\nmailbox: The intended mailbox to append the message.\nappend_msg: The message to be appended.\n\nraises:\n:exc:`~pymap.exceptions.AppendFailure`", "source": "juraj-google-style"}
{"code": "def laplacian_pyramid_image(shape, n_levels=4, sd=None):\n    \n    batch_dims = shape[:-3]\n    w, h, ch = shape[-3:]\n    pyramid = 0\n    for n in range(n_levels):\n        k = 2 ** n\n        pyramid += lowres_tensor(shape, batch_dims + (w \n    return pyramid", "docstring": "Simple laplacian pyramid paramaterization of an image.\n\nFor more flexibility, use a sum of lowres_tensor()s.\n\nArgs:\nshape: shape of resulting image, [batch, width, height, channels].\nn_levels: number of levels of laplacian pyarmid.\nsd: standard deviation of param initialization.\n\nReturns:\ntensor with shape from first argument.", "source": "juraj-google-style"}
{"code": "def returns(desc=None, printer=None, data=True):\n    if (data is False):\n        raise ArgumentError('Specifying non data return type in returns is no longer supported')\n\n    def _returns(func):\n        annotated(func)\n        func.custom_returnvalue(printer, desc)\n        return func\n    return _returns", "docstring": "Specify how the return value of this function should be handled.\n\nArgs:\ndesc (str): A deprecated description of the return value\nprinter (callable): A callable function that can format this return value\ndata (bool): A deprecated parameter for specifying that this function\nreturns data.", "source": "codesearchnet"}
{"code": "def experimental_distribute_dataset(self, dataset, options=None):\n    return super(OneDeviceStrategy, self).experimental_distribute_dataset(dataset, options)", "docstring": "Distributes a tf.data.Dataset instance provided via dataset.\n\nIn this case, there is only one device, so this is only a thin wrapper\naround the input dataset. It will, however, prefetch the input data to the\nspecified device. The returned distributed dataset can be iterated over\nsimilar to how regular datasets can.\n\nNOTE: Currently, the user cannot add any more transformations to a\ndistributed dataset.\n\nExample:\n```\nstrategy = tf.distribute.OneDeviceStrategy()\ndataset = tf.data.Dataset.range(10).batch(2)\ndist_dataset = strategy.experimental_distribute_dataset(dataset)\nfor x in dist_dataset:\nprint(x)  # [0, 1], [2, 3],...\n```\nArgs:\ndataset: `tf.data.Dataset` to be prefetched to device.\noptions: `tf.distribute.InputOptions` used to control options on how this\ndataset is distributed.\nReturns:\nA \"distributed `Dataset`\" that the caller can iterate over.", "source": "github-repos"}
{"code": "def _to_df(self, result, handle_annotations=None):\n        \n        annotations = result._data\n        if handle_annotations == 'first':\n            annotations = [annotations[0]]\n\n        face_results = []\n        for i, annotation in enumerate(annotations):\n            data_dict = {}\n            for field, val in annotation.items():\n                if 'Confidence' in field:\n                    data_dict['face_' + field] = val\n                elif 'oundingPoly' in field:\n                    for j, vertex in enumerate(val['vertices']):\n                        for dim in ['x', 'y']:\n                            name = '%s_vertex%d_%s' % (field, j+1, dim)\n                            val = vertex[dim] if dim in vertex else np.nan\n                            data_dict[name] = val\n                elif field == 'landmarks':\n                    for lm in val:\n                        name = 'landmark_' + lm['type'] + '_%s'\n                        lm_pos = {name %\n                                  k: v for (k, v) in lm['position'].items()}\n                        data_dict.update(lm_pos)\n                else:\n                    data_dict[field] = val\n\n            face_results.append(data_dict)\n\n        return pd.DataFrame(face_results)", "docstring": "Converts a Google API Face JSON response into a Pandas Dataframe.\n\nArgs:\nresult (ExtractorResult): Result object from which to parse out a\nDataframe.\nhandle_annotations (str): How returned face annotations should be\nhandled in cases where there are multiple faces.\n'first' indicates to only use the first face JSON object, all\nother values will default to including every face.", "source": "juraj-google-style"}
{"code": "def std_dev(self, value):\n        \n        if value == self._defaults['stdDev'] and 'stdDev' in self._values:\n            del self._values['stdDev']\n        else:\n            self._values['stdDev'] = value", "docstring": "The std_dev property.\n\nArgs:\nvalue (float). the property value.", "source": "juraj-google-style"}
{"code": "def docx_process_simple_text(text: str, width: int) -> str:\n    if width:\n        return '\\n'.join(textwrap.wrap(text, width=width))\n    else:\n        return text", "docstring": "Word-wraps text.\n\nArgs:\ntext: text to process\nwidth: width to word-wrap to (or 0 to skip word wrapping)\n\nReturns:\nwrapped text", "source": "codesearchnet"}
{"code": "def _find_address_values_in_chain(self, base_contexts, addresses_to_find):\n        \n\n        contexts_in_chain = deque()\n        contexts_in_chain.extend(base_contexts)\n        reads = list(addresses_to_find)\n        address_values = []\n        context_ids_already_searched = []\n        context_ids_already_searched.extend(base_contexts)\n\n        \n        \n        \n\n        while reads:\n            try:\n                current_c_id = contexts_in_chain.popleft()\n            except IndexError:\n                \n                break\n            current_context = self._contexts[current_c_id]\n\n            \n            deleted_addresses = current_context.get_if_deleted(reads)\n            for address in deleted_addresses:\n                if address is not None:\n                    address_values.append((address, None))\n\n            reads = list(set(reads) - set(deleted_addresses))\n\n            \n            \n            \n\n            values = current_context.get_if_set(reads)\n            addresses_not_found = []\n            for address, value in zip(reads, values):\n                if value is not None:\n                    address_values.append((address, value))\n                else:\n                    addresses_not_found.append(address)\n            reads = addresses_not_found\n\n            \n            \n\n            addresses_in_inputs = [address for address in reads\n                                   if address in current_context]\n\n            values = current_context.get_if_not_set(addresses_in_inputs)\n\n            address_values.extend(list(zip(addresses_in_inputs, values)))\n\n            for add in addresses_in_inputs:\n                reads.remove(add)\n\n            for c_id in current_context.base_contexts:\n                if c_id not in context_ids_already_searched:\n                    contexts_in_chain.append(c_id)\n                    context_ids_already_searched.append(c_id)\n\n        return address_values, reads", "docstring": "Breadth first search through the chain of contexts searching for\nthe bytes values at the addresses in addresses_to_find.\n\nArgs:\nbase_contexts (list of str): The context ids to start with.\naddresses_to_find (list of str): Addresses to find values in the\nchain of contexts.\n\nReturns:\ntuple of found address_values and still not found addresses", "source": "juraj-google-style"}
{"code": "def scale(self, width: int, height: int) -> None:\n        \n        lib.TCOD_image_scale(self.image_c, width, height)\n        self.width, self.height = width, height", "docstring": "Scale this Image to the new width and height.\n\nArgs:\nwidth (int): The new width of the Image after scaling.\nheight (int): The new height of the Image after scaling.", "source": "juraj-google-style"}
{"code": "def _update_context_field_binary_composition(present_locations, expression):\n    \n    if not any((isinstance(expression.left, ContextField),\n                isinstance(expression.right, ContextField))):\n        raise AssertionError(u'Received a BinaryComposition {} without any ContextField '\n                             u'operands. This should never happen.'.format(expression))\n\n    if isinstance(expression.left, ContextField):\n        context_field = expression.left\n        location_name, _ = context_field.location.get_location_name()\n        if location_name not in present_locations:\n            return TrueLiteral\n\n    if isinstance(expression.right, ContextField):\n        context_field = expression.right\n        location_name, _ = context_field.location.get_location_name()\n        if location_name not in present_locations:\n            return TrueLiteral\n\n    return expression", "docstring": "Lower BinaryCompositions involving non-existent ContextFields to True.\n\nArgs:\npresent_locations: set of all locations in the current MatchQuery that have not been pruned\nexpression: BinaryComposition with at least one ContextField operand\n\nReturns:\nTrueLiteral iff either ContextField operand is not in `present_locations`,\nand the original expression otherwise", "source": "juraj-google-style"}
{"code": "def _distro_release_info(self):\n    if self.distro_release_file:\n        distro_info = self._parse_distro_release_file(self.distro_release_file)\n        basename = os.path.basename(self.distro_release_file)\n        match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)\n        if match:\n            distro_info['id'] = match.group(1)\n        return distro_info\n    else:\n        try:\n            basenames = os.listdir(_UNIXCONFDIR)\n            basenames.sort()\n        except OSError:\n            basenames = ['SuSE-release', 'arch-release', 'base-release', 'centos-release', 'fedora-release', 'gentoo-release', 'mageia-release', 'mandrake-release', 'mandriva-release', 'mandrivalinux-release', 'manjaro-release', 'oracle-release', 'redhat-release', 'sl-release', 'slackware-version']\n        for basename in basenames:\n            if (basename in _DISTRO_RELEASE_IGNORE_BASENAMES):\n                continue\n            match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)\n            if match:\n                filepath = os.path.join(_UNIXCONFDIR, basename)\n                distro_info = self._parse_distro_release_file(filepath)\n                if ('name' in distro_info):\n                    self.distro_release_file = filepath\n                    distro_info['id'] = match.group(1)\n                    return distro_info\n        return {}", "docstring": "Get the information items from the specified distro release file.\n\nReturns:\nA dictionary containing all information items.", "source": "codesearchnet"}
{"code": "def _check_property(self, rest=None, require_indexed=True):\n    \n    if require_indexed and not self._indexed:\n      raise InvalidPropertyError('Property is unindexed %s' % self._name)\n    if rest:\n      raise InvalidPropertyError('Referencing subproperty %s.%s '\n                                 'but %s is not a structured property' %\n                                 (self._name, rest, self._name))", "docstring": "Internal helper to check this property for specific requirements.\n\nCalled by Model._check_properties().\n\nArgs:\nrest: Optional subproperty to check, of the form 'name1.name2...nameN'.\n\nRaises:\nInvalidPropertyError if this property does not meet the given\nrequirements or if a subproperty is specified.  (StructuredProperty\noverrides this method to handle subproperties.)", "source": "juraj-google-style"}
{"code": "def get_organization(self, **kwargs):\n    resp = self._get(self._u(self._ORGANIZATION_ENDPOINT_SUFFIX), **kwargs)\n    resp.raise_for_status()\n    return resp.json()", "docstring": "Get the organization to which the user belongs\n\nReturns:\ndictionary of the response", "source": "codesearchnet"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    super(EncryptResponsePayload, self).read(input_stream, kmip_version=kmip_version)\n    local_stream = utils.BytearrayStream(input_stream.read(self.length))\n    if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):\n        self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)\n        self._unique_identifier.read(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('invalid payload missing the unique identifier attribute')\n    if self.is_tag_next(enums.Tags.DATA, local_stream):\n        self._data = primitives.ByteString(tag=enums.Tags.DATA)\n        self._data.read(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('invalid payload missing the data attribute')\n    if self.is_tag_next(enums.Tags.IV_COUNTER_NONCE, local_stream):\n        self._iv_counter_nonce = primitives.ByteString(tag=enums.Tags.IV_COUNTER_NONCE)\n        self._iv_counter_nonce.read(local_stream, kmip_version=kmip_version)\n    self.is_oversized(local_stream)", "docstring": "Read the data encoding the Encrypt response payload and decode it\ninto its constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the unique_identifier or data attributes\nare missing from the encoded payload.", "source": "codesearchnet"}
{"code": "def SmartSet(self, obj, attr_name, new_attr):\n    _, obj = tf_decorator.unwrap(obj)\n    if tf_inspect.ismodule(obj) or (not tf_inspect.isclass(obj) and attr_name in obj.__dict__):\n        orig_obj = obj\n        orig_attr = getattr(obj, attr_name)\n    else:\n        if not tf_inspect.isclass(obj):\n            mro = list(tf_inspect.getmro(obj.__class__))\n        else:\n            mro = list(tf_inspect.getmro(obj))\n        mro.reverse()\n        orig_attr = None\n        found_attr = False\n        for cls in mro:\n            try:\n                orig_obj = cls\n                orig_attr = getattr(obj, attr_name)\n                found_attr = True\n            except AttributeError:\n                continue\n        if not found_attr:\n            raise AttributeError('Attribute not found.')\n    old_attribute = obj.__dict__.get(attr_name)\n    if old_attribute is not None and isinstance(old_attribute, staticmethod):\n        orig_attr = staticmethod(orig_attr)\n    self.stubs.append((orig_obj, attr_name, orig_attr))\n    setattr(orig_obj, attr_name, new_attr)", "docstring": "Replace obj.attr_name with new_attr.\n\nThis method is smart and works at the module, class, and instance level\nwhile preserving proper inheritance. It will not stub out C types however\nunless that has been explicitly allowed by the type.\n\nThis method supports the case where attr_name is a staticmethod or a\nclassmethod of obj.\n\nNotes:\n- If obj is an instance, then it is its class that will actually be\nstubbed. Note that the method Set() does not do that: if obj is\nan instance, it (and not its class) will be stubbed.\n- The stubbing is using the builtin getattr and setattr. So, the __get__\nand __set__ will be called when stubbing (TODO: A better idea would\nprobably be to manipulate obj.__dict__ instead of getattr() and\nsetattr()).\n\nArgs:\nobj: The object whose attributes we want to modify.\nattr_name: The name of the attribute to modify.\nnew_attr: The new value for the attribute.\n\nRaises:\nAttributeError: If the attribute cannot be found.", "source": "github-repos"}
{"code": "def rvs(self, size=1):\n        \n        return np.random.multivariate_normal(self.mean, self.cov, size)", "docstring": "Convenience method to sample from this distribution.\n\nArgs:\nsize (int or tuple): Shape of return value. Each element is drawn\nindependently from this distribution.", "source": "juraj-google-style"}
{"code": "def GetArtifactKnowledgeBase(client_obj, allow_uninitialized=False):\n    client_schema = client_obj.Schema\n    kb = client_obj.Get(client_schema.KNOWLEDGE_BASE)\n    if (not allow_uninitialized):\n        if (not kb):\n            raise artifact_utils.KnowledgeBaseUninitializedError(('KnowledgeBase empty for %s.' % client_obj.urn))\n        if (not kb.os):\n            raise artifact_utils.KnowledgeBaseAttributesMissingError(('KnowledgeBase missing OS for %s. Knowledgebase content: %s' % (client_obj.urn, kb)))\n    if (not kb):\n        kb = client_schema.KNOWLEDGE_BASE()\n        SetCoreGRRKnowledgeBaseValues(kb, client_obj)\n    if (kb.os == 'Windows'):\n        if ((not kb.environ_allusersappdata) and kb.environ_allusersprofile):\n            if (kb.os_major_version >= 6):\n                kb.environ_allusersappdata = u'c:\\\\programdata'\n                kb.environ_allusersprofile = u'c:\\\\programdata'\n            else:\n                kb.environ_allusersappdata = u'c:\\\\documents and settings\\\\All Users\\\\Application Data'\n                kb.environ_allusersprofile = u'c:\\\\documents and settings\\\\All Users'\n    return kb", "docstring": "This generates an artifact knowledge base from a GRR client.\n\nArgs:\nclient_obj: A GRRClient object which is opened for reading.\nallow_uninitialized: If True we accept an uninitialized knowledge_base.\n\nReturns:\nA KnowledgeBase semantic value.\n\nRaises:\nArtifactProcessingError: If called when the knowledge base has not been\ninitialized.\nKnowledgeBaseUninitializedError: If we failed to initialize the knowledge\nbase.\n\nThis is needed so that the artifact library has a standardized\ninterface to the data that is actually stored in the GRRClient object in\nthe GRR datastore.\n\nWe expect that the client KNOWLEDGE_BASE is already filled out through the,\nKnowledgeBaseInitialization flow, but attempt to make some intelligent\nguesses if things failed.", "source": "codesearchnet"}
{"code": "def swo_speed_info(self):\n        \n        info = structs.JLinkSWOSpeedInfo()\n        res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.GET_SPEED_INFO,\n                                             ctypes.byref(info))\n        if res < 0:\n            raise errors.JLinkException(res)\n\n        return info", "docstring": "Retrieves information about the supported SWO speeds.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nA ``JLinkSWOSpeedInfo`` instance describing the target's supported\nSWO speeds.\n\nRaises:\nJLinkException: on error", "source": "juraj-google-style"}
{"code": "def from_spec(cls, spec):\n    dtype = dtypes.as_dtype(spec.dtype)\n    minimum = getattr(spec, 'minimum', dtype.min)\n    maximum = getattr(spec, 'maximum', dtype.max)\n    return BoundedTensorSpec(spec.shape, dtype, minimum, maximum, spec.name)", "docstring": "Returns a `TensorSpec` with the same shape and dtype as `spec`.\n\nIf `spec` is a `BoundedTensorSpec`, then the new spec's bounds are set to\n`spec.minimum` and `spec.maximum`; otherwise, the bounds are set to\n`spec.dtype.min` and `spec.dtype.max`.\n\n>>> spec = tf.TensorSpec(shape=[8, 3], dtype=tf.int32, name=\"x\")\n>>> BoundedTensorSpec.from_spec(spec)\nBoundedTensorSpec(shape=(8, 3), dtype=tf.int32, name='x',\nminimum=array(-2147483648, dtype=int32),\nmaximum=array(2147483647, dtype=int32))\n\nArgs:\nspec: The `TypeSpec` used to create the new `BoundedTensorSpec`.", "source": "github-repos"}
{"code": "def _GatherReturnElements(requested_return_elements, graph, results):\n    return_outputs = c_api.TF_ImportGraphDefResultsReturnOutputs(results)\n    return_opers = c_api.TF_ImportGraphDefResultsReturnOperations(results)\n    combined_return_elements = []\n    outputs_idx = 0\n    opers_idx = 0\n    for name in requested_return_elements:\n        if ':' in name:\n            combined_return_elements.append(graph._get_tensor_by_tf_output(return_outputs[outputs_idx]))\n            outputs_idx += 1\n        else:\n            combined_return_elements.append(graph._get_operation_by_tf_operation(return_opers[opers_idx]))\n            opers_idx += 1\n    return combined_return_elements", "docstring": "Returns the requested return elements from results.\n\nArgs:\nrequested_return_elements: list of strings of operation and tensor names\ngraph: Graph\nresults: wrapped TF_ImportGraphDefResults\n\nReturns:\nlist of `Operation` and/or `Tensor` objects", "source": "github-repos"}
{"code": "def merge_ids(self, token, channel, ids, delete=False):\n        \n        url = self.url() + \"/merge/{}/\".format(','.join([str(i) for i in ids]))\n        req = self.remote_utils.get_url(url)\n        if req.status_code is not 200:\n            raise RemoteDataUploadError('Could not merge ids {}'.format(\n                                        ','.join([str(i) for i in ids])))\n        if delete:\n            self.delete_ramon(token, channel, ids[1:])\n        return True", "docstring": "Call the restful endpoint to merge two RAMON objects into one.\n\nArguments:\ntoken (str): The token to inspect\nchannel (str): The channel to inspect\nids (int[]): the list of the IDs to merge\ndelete (bool : False): Whether to delete after merging.\n\nReturns:\njson: The ID as returned by ndstore", "source": "juraj-google-style"}
{"code": "def notify_changes(self, changes):\n        \n        \n        ret = []\n        child_changes = {}\n        for change in changes:\n            \n            self._add_child_change(change, child_changes)\n\n        \n        if self.update_requests:\n            serialized = serialize_object(self.data)\n            for request in self.update_requests:\n                ret.append(request.update_response(serialized))\n\n        \n        if self.delta_requests:\n            for change in changes:\n                change[-1] = serialize_object(change[-1])\n            for request in self.delta_requests:\n                ret.append(request.delta_response(changes))\n\n        \n        for name, child_changes in child_changes.items():\n            ret += self.children[name].notify_changes(child_changes)\n        return ret", "docstring": "Set our data and notify anyone listening\n\nArgs:\nchanges (list): [[path, optional data]] where path is the path to\nwhat has changed, and data is the unserialized object that has\nchanged\n\nReturns:\nlist: [(callback, Response)] that need to be called", "source": "juraj-google-style"}
{"code": "def check_version_2(dataset):\n    if ((float(dataset.get('version')) >= 2.0) if dataset.get('version') else False):\n        return True\n    else:\n        return False", "docstring": "Checks if json-stat version attribute exists and is equal or greater \\\nthan 2.0 for a given dataset.\n\nArgs:\ndataset (OrderedDict): data in JSON-stat format, previously \\\ndeserialized to a python object by \\\njson.load() or json.loads(),\n\nReturns:\nbool: True if version exists and is equal or greater than 2.0, \\\nFalse otherwise. For datasets without the version attribute, \\\nalways return False.", "source": "codesearchnet"}
{"code": "def writeline(self, line=b'', sep=b'\\n', echo=None):\n        \n\n        self.writelines([line], sep, echo)", "docstring": "Write a byte sequences to the channel and terminate it with carriage\nreturn and line feed.\n\nArgs:\nline(bytes): The line to send.\nsep(bytes): The separator to use after each line.\necho(bool): Whether to echo the written data to stdout.\n\nRaises:\nEOFError: If the channel was closed before all data was sent.", "source": "juraj-google-style"}
{"code": "def get_tabular_stream(self, url, **kwargs):\n    self.close_response()\n    file_type = kwargs.get('file_type')\n    if (file_type is not None):\n        kwargs['format'] = file_type\n        del kwargs['file_type']\n    try:\n        self.response = tabulator.Stream(url, **kwargs)\n        self.response.open()\n        return self.response\n    except TabulatorException as e:\n        raisefrom(DownloadError, ('Getting tabular stream for %s failed!' % url), e)", "docstring": "Get Tabulator stream.\n\nArgs:\nurl (str): URL to download\n**kwargs:\nheaders (Union[int, List[int], List[str]]): Number of row(s) containing headers or list of headers\nfile_type (Optional[str]): Type of file. Defaults to inferring.\ndelimiter (Optional[str]): Delimiter used for values in each row. Defaults to inferring.\n\nReturns:\ntabulator.Stream: Tabulator Stream object", "source": "codesearchnet"}
{"code": "def _GetProcessedStorageFilePath(self, task):\n    \n    filename = '{0:s}.plaso'.format(task.identifier)\n    return os.path.join(self._processed_task_storage_path, filename)", "docstring": "Retrieves the path of a task storage file in the processed directory.\n\nArgs:\ntask (Task): task.\n\nReturns:\nstr: path of a task storage file in the processed directory.", "source": "juraj-google-style"}
{"code": "def get_vocabulary(preprocess_output_dir, name):\n  \n  vocab_file = os.path.join(preprocess_output_dir, CATEGORICAL_ANALYSIS % name)\n  if not file_io.file_exists(vocab_file):\n    raise ValueError('File %s not found in %s' %\n                     (CATEGORICAL_ANALYSIS % name, preprocess_output_dir))\n\n  labels = python_portable_string(\n      file_io.read_file_to_string(vocab_file)).split('\\n')\n  label_values = [x for x in labels if x]  \n\n  return label_values", "docstring": "Loads the vocabulary file as a list of strings.\n\nArgs:\npreprocess_output_dir: Should contain the file CATEGORICAL_ANALYSIS % name.\nname: name of the csv column.\n\nReturns:\nList of strings.\n\nRaises:\nValueError: if file is missing.", "source": "juraj-google-style"}
{"code": "def load_tensor_from_event_file(event_file_path):\n    event = event_pb2.Event()\n    with gfile.Open(event_file_path, 'rb') as f:\n        event.ParseFromString(f.read())\n        return load_tensor_from_event(event)", "docstring": "Load a tensor from an event file.\n\nAssumes that the event file contains a `Event` protobuf and the `Event`\nprotobuf contains a `Tensor` value.\n\nArgs:\nevent_file_path: (`str`) path to the event file.\n\nReturns:\nThe tensor value loaded from the event file, as a `numpy.ndarray`. For\nuninitialized Tensors, returns `None`. For Tensors of data types that\ncannot be converted to `numpy.ndarray` (e.g., `tf.resource`), return\n`None`.", "source": "github-repos"}
{"code": "def unescape(inp, quote='\"'):\n    if (len(inp) < 2):\n        return inp\n    output = ''\n    unesc = False\n    for act in inp:\n        if ((act == quote) and unesc):\n            output = output[:(- 1)]\n        output += act\n        if (act == '\\\\'):\n            unesc = (not unesc)\n        else:\n            unesc = False\n    return output", "docstring": "Unescape `quote` in string `inp`.\n\nExample usage::\n\n>> unescape('hello \\\\\"')\n'hello \"'\n\nArgs:\ninp (str): String in which `quote` will be unescaped.\nquote (char, default \"): Specify which character will be unescaped.\n\nReturns:\nstr: Unescaped string.", "source": "codesearchnet"}
{"code": "def __init__(self, string_table):\n    self._string_table = string_table\n    self._node_name_to_sample = {}", "docstring": "Constructor.\n\nArgs:\nstring_table: A `StringTable` object.", "source": "github-repos"}
{"code": "def get_dense_tensor(self, transformation_cache, state_manager):\n    if isinstance(self.categorical_column, SequenceCategoricalColumn):\n        raise ValueError('In indicator_column: {}. categorical_column must not be of type SequenceCategoricalColumn. Suggested fix A: If you wish to use DenseFeatures, use a non-sequence categorical_column_with_*. Suggested fix B: If you wish to create sequence input, use SequenceFeatures instead of DenseFeatures. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))\n    return transformation_cache.get(self, state_manager)", "docstring": "Returns dense `Tensor` representing feature.\n\nArgs:\ntransformation_cache: A `FeatureTransformationCache` object to access\nfeatures.\nstate_manager: A `StateManager` to create / access resources such as\nlookup tables.\n\nReturns:\nDense `Tensor` created within `transform_feature`.\n\nRaises:\nValueError: If `categorical_column` is a `SequenceCategoricalColumn`.", "source": "github-repos"}
{"code": "def __init__(self, columns: list[str]) -> None:\n    super().__init__(columns)\n    if not columns:\n        raise RuntimeError('Columns are not specified. Please specify the column for the  op %s' % self.__class__.__name__)", "docstring": "Base Operation class for TFT data processing transformations.\nProcessing logic for the transformation is defined in the\napply_transform() method. If you have a custom transformation that is not\nsupported by the existing transforms, you can extend this class\nand implement the apply_transform() method.\nArgs:\ncolumns: List of column names to apply the transformation.", "source": "github-repos"}
{"code": "def on_fail(self, record):", "docstring": "A function that is executed upon a test failure.\n\nUser implementation is optional.\n\nArgs:\nrecord: records.TestResultRecord, a copy of the test record for\nthis test, containing all information of the test execution\nincluding exception objects.", "source": "github-repos"}
{"code": "def near(point, dist, points):\n    \n    for cmpt in points:\n        if haversine(point, cmpt) <= dist:\n            return True\n    return False", "docstring": "Determine if the given point is within dist of any of points.\n\nArgs:\npoint ((float,float)): A latitude, longitude float tuple.\ndist (int): A distance in mm ( base units )\npoints (list): A list of latitude, longitude float tuples to compare against.", "source": "juraj-google-style"}
{"code": "def ProcessBlocks(self, block_limit=1000):\n    self._lock.acquire()\n    try:\n        blockcount = 0\n        while ((self._current_height <= Blockchain.Default().Height) and ((block_limit == 0) or (blockcount < block_limit))):\n            block = Blockchain.Default().GetBlockByHeight(self._current_height)\n            if (block is not None):\n                self.ProcessNewBlock(block)\n            else:\n                self._current_height += 1\n            blockcount += 1\n        self.SaveStoredData('Height', self._current_height)\n    except Exception as e:\n        logger.warn(('Could not process ::: %s ' % e))\n    finally:\n        self._lock.release()", "docstring": "Method called on a loop to check the current height of the blockchain.  If the height of the blockchain\nis more than the current stored height in the wallet, we get the next block in line and\nprocesses it.\n\nIn the case that the wallet height is far behind the height of the blockchain, we do this 1000\nblocks at a time.\n\nArgs:\nblock_limit (int): the number of blocks to process synchronously. defaults to 1000. set to 0 to block until the wallet is fully rebuilt.", "source": "codesearchnet"}
{"code": "def predict_proba(self, X):\n    return collections.deque(self.iter_predict_proba(X), maxlen=1).pop()", "docstring": "Returns the predicted probabilities for ``X``.\n\nArguments:\nX (array-like or sparse matrix of shape (n_samples, n_features)): The input samples.\nSparse matrices are accepted only if they are supported by the weak model.\n\nReturns:\narray of shape (n_samples, n_classes) containing the predicted probabilities.", "source": "codesearchnet"}
{"code": "def setup(self,\n            hosts, files, use_tsk,\n            reason, grr_server_url, grr_username, grr_password, approvers=None,\n            verify=True):\n    \n    super(GRRFileCollector, self).setup(\n        reason, grr_server_url, grr_username, grr_password,\n        approvers=approvers, verify=verify)\n\n    if files is not None:\n      self.files = [item.strip() for item in files.strip().split(',')]\n\n    self.hostnames = [item.strip() for item in hosts.strip().split(',')]\n    self.use_tsk = use_tsk", "docstring": "Initializes a GRR file collector.\n\nArgs:\nhosts: Comma-separated list of hostnames to launch the flow on.\nfiles: list of file paths.\nuse_tsk: toggle for use_tsk flag on GRR flow.\nreason: justification for GRR access.\ngrr_server_url: GRR server URL.\ngrr_username: GRR username.\ngrr_password: GRR password.\napprovers: list of GRR approval recipients.\nverify: boolean, whether to verify the GRR server's x509 certificate.", "source": "juraj-google-style"}
{"code": "def __init__( self, max_r, number_of_bins ):\n        \n        self.max_r = max_r\n        self.number_of_bins = number_of_bins\n        self.data = np.zeros( number_of_bins )\n        self.dr = max_r / number_of_bins", "docstring": "Initialise a Rdf object for manipulating radial distribution functions.\n\nArgs:\nmax_r (Float): the maximum r value stored for g(r).\nnumber_of_bins (Int): number of bins for storing data about g(r).\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def __init__(self, pb_class_from: Type[FROM], pb_class_to: Type[TO], field_names_to_ignore: Optional[List[str]]=None):\n    if field_names_to_ignore is None:\n        field_names_to_ignore = []\n    self._pb_class_from = pb_class_from\n    self._pb_class_to = pb_class_to\n    self._field_names_to_ignore = field_names_to_ignore\n    self._function_convert_field_names = []\n    self._convert_functions = []\n    self._assert_all_fields_are_handled()", "docstring": "Constructor for the ProtoConverter.\n\nArgs:\npb_class_from: the init method for the proto to convert from.\npb_class_to: the init method for the proto to convert to.\nfield_names_to_ignore: the fields from the source proto that will be\nignored by the converter.\n\nReturns:\nProtoConverter\n\nRaise:\nNotImplementedError: When creating the proto converter if there are\nfields not handled or ignored.", "source": "github-repos"}
{"code": "def ParsePageVisitRow(self, parser_mediator, query, row, **unused_kwargs):\n    query_hash = hash(query)\n    was_http_non_get = self._GetRowValue(query_hash, row, 'http_non_get')\n    event_data = SafariHistoryPageVisitedEventData()\n    event_data.offset = self._GetRowValue(query_hash, row, 'id')\n    event_data.query = query\n    event_data.title = self._GetRowValue(query_hash, row, 'title')\n    event_data.url = self._GetRowValue(query_hash, row, 'url')\n    event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count')\n    event_data.was_http_non_get = bool(was_http_non_get)\n    timestamp = self._GetRowValue(query_hash, row, 'visit_time')\n    date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a visited row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "codesearchnet"}
{"code": "def ScanSource(self, source_path):\n    \n    \n    \n    if os.path.islink(source_path):\n      source_path = os.path.realpath(source_path)\n\n    if (not source_path.startswith('\\\\\\\\.\\\\') and\n        not os.path.exists(source_path)):\n      raise errors.SourceScannerError(\n          'No such device, file or directory: {0:s}.'.format(source_path))\n\n    scan_context = source_scanner.SourceScannerContext()\n    scan_context.OpenSourcePath(source_path)\n\n    try:\n      self._source_scanner.Scan(scan_context)\n    except (ValueError, dfvfs_errors.BackEndError) as exception:\n      raise errors.SourceScannerError(\n          'Unable to scan source with error: {0!s}.'.format(exception))\n\n    if scan_context.source_type not in (\n        scan_context.SOURCE_TYPE_STORAGE_MEDIA_DEVICE,\n        scan_context.SOURCE_TYPE_STORAGE_MEDIA_IMAGE):\n      scan_node = scan_context.GetRootScanNode()\n      self._source_path_specs.append(scan_node.path_spec)\n      return scan_context\n\n    \n    scan_node = scan_context.GetRootScanNode()\n    while len(scan_node.sub_nodes) == 1:\n      scan_node = scan_node.sub_nodes[0]\n\n    base_path_specs = []\n    if scan_node.type_indicator != (\n        dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION):\n      self._ScanVolume(scan_context, scan_node, base_path_specs)\n\n    else:\n      \n      partition_identifiers = self._GetTSKPartitionIdentifiers(scan_node)\n      if not partition_identifiers:\n        raise errors.SourceScannerError('No partitions found.')\n\n      for partition_identifier in partition_identifiers:\n        location = '/{0:s}'.format(partition_identifier)\n        sub_scan_node = scan_node.GetSubNodeByLocation(location)\n        self._ScanVolume(scan_context, sub_scan_node, base_path_specs)\n\n    if not base_path_specs:\n      raise errors.SourceScannerError(\n          'No supported file system found in source.')\n\n    self._source_path_specs = base_path_specs\n\n    return scan_context", "docstring": "Scans the source path for volume and file systems.\n\nThis function sets the internal source path specification and source\ntype values.\n\nArgs:\nsource_path (str): path to the source.\n\nReturns:\ndfvfs.SourceScannerContext: source scanner context.\n\nRaises:\nSourceScannerError: if the format of or within the source is\nnot supported.", "source": "juraj-google-style"}
{"code": "def events_from_file(filepath):\n    records = list(tf_record.tf_record_iterator(filepath))\n    result = []\n    for r in records:\n        event = event_pb2.Event()\n        event.ParseFromString(r)\n        result.append(event)\n    return result", "docstring": "Returns all events in a single event file.\n\nArgs:\nfilepath: Path to the event file.\n\nReturns:\nA list of all tf.Event protos in the event file.", "source": "github-repos"}
{"code": "def remove_results(vcs, signature):\n    \n    results_directory = _get_results_directory(vcs, signature)\n    if not os.path.exists(results_directory):\n        raise ResultsNotFoundError\n    shutil.rmtree(results_directory)", "docstring": "Removed saved results for this signature\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\nsignature (str)\nRaises:\nResultsNotFoundError", "source": "juraj-google-style"}
{"code": "def right_shift(x, y):\n    if any_symbolic_tensors((x, y)):\n        return RightShift().symbolic_call(x, y)\n    return backend.numpy.right_shift(x, y)", "docstring": "Shift the bits of an integer to the right.\n\nBits are shifted to the right `y`. Because the internal representation of\nnumbers is in binary format, this operation is equivalent to dividing `x` by\n`2**y`.\n\nArgs:\nx: Input integer tensor.\ny: Input integer tensor.\n\nReturns:\nResult tensor.", "source": "github-repos"}
{"code": "def from_bigquery(sql):\n    if isinstance(sql, bq.Query):\n        sql = sql._expanded_sql()\n    parts = sql.split('.')\n    if ((len(parts) == 1) or (len(parts) > 3) or any(((' ' in x) for x in parts))):\n        sql = (('(' + sql) + ')')\n    else:\n        sql = (('`' + sql) + '`')\n    query = bq.Query(('SELECT target, predicted, count(*) as count FROM %s group by target, predicted' % sql))\n    df = query.execute().result().to_dataframe()\n    labels = sorted((set(df['target']) | set(df['predicted'])))\n    labels_count = len(labels)\n    df['target'] = [labels.index(x) for x in df['target']]\n    df['predicted'] = [labels.index(x) for x in df['predicted']]\n    cm = [([0] * labels_count) for i in range(labels_count)]\n    for (index, row) in df.iterrows():\n        cm[row['target']][row['predicted']] = row['count']\n    return ConfusionMatrix(cm, labels)", "docstring": "Create a ConfusionMatrix from a BigQuery table or query.\n\nArgs:\nsql: Can be one of:\nA SQL query string.\nA Bigquery table string.\nA Query object defined with '%%bq query --name [query_name]'.\nThe query results or table must include \"target\", \"predicted\" columns.\nReturns:\nA ConfusionMatrix that can be plotted.\nRaises:\nValueError if query results or table does not include 'target' or 'predicted' columns.", "source": "codesearchnet"}
{"code": "def handle_error(self):\n    if (not self.tasks):\n        return\n    self.mark_parent_tasks_as_failed(self.cur_task, flush_logs=True)\n    for (index, task) in enumerate(self.tasks.values()):\n        if self.should_show_by_depth((index + 1)):\n            continue\n        start_task_header = logging.LogRecord('', logging.INFO, '', 0, '', [], None)\n        start_task_header.msg = ColorFormatter.colored('default', START_TASK_MSG)\n        start_task_header.task = task.name\n        self.pretty_emit(start_task_header, is_header=True, task_level=(index + 1))\n    for old_record in self.tasks[self.cur_task]:\n        self.pretty_emit(old_record)\n    self.tasks[self.cur_task].clear()", "docstring": "Handles an error log record that should be shown\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def of(seconds: TimestampTypes) -> 'Timestamp':\n    if isinstance(seconds, Timestamp):\n        return seconds\n    elif isinstance(seconds, (int, float)):\n        return Timestamp(seconds)\n    elif isinstance(seconds, datetime.datetime):\n        return Timestamp.from_utc_datetime(seconds)\n    else:\n        raise TypeError('Cannot interpret %s %s as Timestamp.' % (seconds, type(seconds)))", "docstring": "Return the Timestamp for the given number of seconds.\n\nIf the input is already a Timestamp, the input itself will be returned.\n\nArgs:\nseconds: Number of seconds as int, float, long, or Timestamp.\n\nReturns:\nCorresponding Timestamp object.", "source": "github-repos"}
{"code": "def add(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:\n    out = math_ops.add(input_tensor, input_tensor)\n    return {'output': out}", "docstring": "Performs an add operation.\n\nArgs:\ninput_tensor: Input tensor to perform add on.\n\nReturns:\nA map of: output key -> output result.", "source": "github-repos"}
{"code": "def validate(self, definition, version=None, strict=False):\n    if (not HAS_KUBERNETES_VALIDATE):\n        raise KubernetesValidateMissing()\n    errors = list()\n    warnings = list()\n    try:\n        if (version is None):\n            try:\n                version = self.version['kubernetes']['gitVersion']\n            except KeyError:\n                version = kubernetes_validate.latest_version()\n        kubernetes_validate.validate(definition, version, strict)\n    except kubernetes_validate.utils.ValidationError as e:\n        errors.append(('resource definition validation error at %s: %s' % ('.'.join([str(item) for item in e.path]), e.message)))\n    except VersionNotSupportedError as e:\n        errors.append(('Kubernetes version %s is not supported by kubernetes-validate' % version))\n    except kubernetes_validate.utils.SchemaNotFoundError as e:\n        warnings.append(('Could not find schema for object kind %s with API version %s in Kubernetes version %s (possibly Custom Resource?)' % (e.kind, e.api_version, e.version)))\n    return (warnings, errors)", "docstring": "validate checks a kubernetes resource definition\n\nArgs:\ndefinition (dict): resource definition\nversion (str): version of kubernetes to validate against\nstrict (bool): whether unexpected additional properties should be considered errors\n\nReturns:\nwarnings (list), errors (list): warnings are missing validations, errors are validation failures", "source": "codesearchnet"}
{"code": "def ranseed(seed=None):\n    if (seed is None):\n        seed = numpy.random.randint(1, int(2000000000.0), size=3)\n    try:\n        seed = tuple(seed)\n    except TypeError:\n        pass\n    numpy.random.seed(seed)\n    ranseed.seed = seed\n    return seed", "docstring": "Seed random number generators with tuple ``seed``.\n\nArgument ``seed`` is an integer or\na :class:`tuple` of integers that is used to seed\nthe random number generators used by :mod:`numpy` and\n:mod:`random` (and therefore by :mod:`gvar`). Reusing\nthe same ``seed`` results in the same set of random numbers.\n\n``ranseed`` generates its own seed when called without an argument\nor with ``seed=None``. This seed is stored in ``ranseed.seed`` and\nalso returned by the function. The seed can be used to regenerate\nthe same set of random numbers at a later time.\n\nArgs:\nseed (int, tuple, or None): Seed for generator. Generates a\nrandom tuple if ``None``.\nReturns:\nThe seed used to reseed the generator.", "source": "codesearchnet"}
{"code": "def contains(self, name):\n    try:\n        self._api.buckets_get(name)\n    except google.datalab.utils.RequestException as e:\n        if (e.status == 404):\n            return False\n        raise e\n    except Exception as e:\n        raise e\n    return True", "docstring": "Checks if the specified bucket exists.\n\nArgs:\nname: the name of the bucket to lookup.\nReturns:\nTrue if the bucket exists; False otherwise.\nRaises:\nException if there was an error requesting information about the bucket.", "source": "codesearchnet"}
{"code": "def prepare_locust_tests(path):\n    \n    tests_mapping = loader.load_tests(path)\n    testcases = parser.parse_tests(tests_mapping)\n\n    locust_tests = []\n\n    for testcase in testcases:\n        testcase_weight = testcase.get(\"config\", {}).pop(\"weight\", 1)\n        for _ in range(testcase_weight):\n            locust_tests.append(testcase)\n\n    return locust_tests", "docstring": "prepare locust testcases\n\nArgs:\npath (str): testcase file path.\n\nReturns:\nlist: locust tests data\n\n[\ntestcase1_dict,\ntestcase2_dict\n]", "source": "juraj-google-style"}
{"code": "class _ConfusionMatrixConditionCount(Metric):\n\n    def __init__(self, confusion_matrix_cond, thresholds=None, name=None, dtype=None):\n        super(_ConfusionMatrixConditionCount, self).__init__(name=name, dtype=dtype)\n        self._confusion_matrix_cond = confusion_matrix_cond\n        self.init_thresholds = thresholds\n        self.thresholds = metrics_utils.parse_init_thresholds(thresholds, default_threshold=0.5)\n        self._thresholds_distributed_evenly = metrics_utils.is_evenly_distributed_thresholds(self.thresholds)\n        self.accumulator = self.add_weight('accumulator', shape=(len(self.thresholds),), initializer=init_ops.zeros_initializer)\n\n    def update_state(self, y_true, y_pred, sample_weight=None):\n        \n        return metrics_utils.update_confusion_matrix_variables({self._confusion_matrix_cond: self.accumulator}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, sample_weight=sample_weight)\n\n    def result(self):\n        if len(self.thresholds) == 1:\n            result = self.accumulator[0]\n        else:\n            result = self.accumulator\n        return tensor_conversion.convert_to_tensor_v2_with_dispatch(result)\n\n    def reset_state(self):\n        num_thresholds = len(to_list(self.thresholds))\n        backend.batch_set_value([(v, np.zeros((num_thresholds,))) for v in self.variables])\n\n    def get_config(self):\n        config = {'thresholds': self.init_thresholds}\n        base_config = super(_ConfusionMatrixConditionCount, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))", "docstring": "Calculates the number of the given confusion matrix condition.\n\nArgs:\nconfusion_matrix_cond: One of `metrics_utils.ConfusionMatrix` conditions.\nthresholds: (Optional) Defaults to 0.5. A float value or a python list/tuple\nof float threshold values in [0, 1]. A threshold is compared with\nprediction values to determine the truth value of predictions (i.e., above\nthe threshold is `true`, below is `false`). One metric value is generated\nfor each threshold value.\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.", "source": "github-repos"}
{"code": "def set_column(self, X, column, value):\n    if isinstance(X, pd.DataFrame):\n        X.loc[(:, column)] = value\n    else:\n        X[(:, column)] = value\n    return X", "docstring": "Sets a column on the matrix X with the given value.\n\nArgs:\nX: `numpy.ndarray` or `pandas.DataFrame`.\ncolumn: `int` or `str`.\nvalue: `np.ndarray` with shape (1,)\n\nReturns:\n`np.ndarray` or `pandas.DataFrame` with the inserted column.", "source": "codesearchnet"}
{"code": "def __init__(self, context):\n    \n    self.logdir = context.logdir\n    self.multiplexer = context.multiplexer\n    self.plugin_logdir = plugin_asset_util.PluginDirectory(\n        self.logdir, PLUGIN_NAME)\n    self.stub = None\n    self.master_tpu_unsecure_channel = context.flags.master_tpu_unsecure_channel\n\n    \n    \n    self._is_active = False\n    \n    self._is_active_lock = threading.Lock()", "docstring": "Constructs a profiler plugin for TensorBoard.\n\nThis plugin adds handlers for performance-related frontends.\n\nArgs:\ncontext: A base_plugin.TBContext instance.", "source": "juraj-google-style"}
{"code": "def get_protocol_version(protocol=None, target=None):\n    target = get_py_internals(target)\n    if (protocol is None):\n        protocol = target['pickle_default_protocol']\n    if (protocol > cPickle.HIGHEST_PROTOCOL):\n        warnings.warn(('Downgrading pickle protocol, running python supports up to %d.' % cPickle.HIGHEST_PROTOCOL))\n        protocol = cPickle.HIGHEST_PROTOCOL\n    target_highest_protocol = target['pickle_highest_protocol']\n    if (protocol > target_highest_protocol):\n        warnings.warn(('Downgrading pickle protocol, target python supports up to %d.' % target_highest_protocol))\n        protocol = target_highest_protocol\n    return protocol", "docstring": "Return a suitable pickle protocol version for a given target.\n\nArguments:\ntarget: The internals description of the targeted python\nversion. If this is ``None`` the specification of the currently\nrunning python version will be used.\nprotocol(None or int): The requested protocol version (or None for the\ndefault of the target python version).\n\nReturns:\nint: A suitable pickle protocol version.", "source": "codesearchnet"}
{"code": "def set_hook_data(self, key, data):\n        \n\n        if not isinstance(data, collections.Mapping):\n            raise ValueError(\"Hook (key: %s) data must be an instance of \"\n                             \"collections.Mapping (a dictionary for \"\n                             \"example).\" % key)\n\n        if key in self.hook_data:\n            raise KeyError(\"Hook data for key %s already exists, each hook \"\n                           \"must have a unique data_key.\", key)\n\n        self.hook_data[key] = data", "docstring": "Set hook data for the given key.\n\nArgs:\nkey(str): The key to store the hook data in.\ndata(:class:`collections.Mapping`): A dictionary of data to store,\nas returned from a hook.", "source": "juraj-google-style"}
{"code": "def set_energy_upperbound(self, spins, offset=0):\n        \n        spin_energy = self.energy_upperbound(spins)\n        self.assertions.add(GE(spin_energy, self.gap + offset))", "docstring": "Upper bound the energy of Theta with spins fixed to be greater than (gap + offset).\n\nArgs:\nspins (dict): Spin values for a subset of the variables in Theta.\noffset (float): A value that is added to the upper bound. Default value is 0.\n\nNotes:\nAdd equality constraint to assertions.", "source": "juraj-google-style"}
{"code": "def on_test_batch_end(self, batch, logs=None):", "docstring": "Called at the end of a batch in `evaluate` methods.\n\nAlso called at the end of a validation batch in the `fit`\nmethods, if validation data is provided.\n\nSubclasses should override for any actions to run.\n\nNote that if the `steps_per_execution` argument to `compile` in\n`tf.keras.Model` is set to `N`, this method will only be called every `N`\nbatches.\n\nArgs:\nbatch: Integer, index of batch within the current epoch.\nlogs: Dict. Aggregated metric results up until this batch.", "source": "github-repos"}
{"code": "def _ParseLine(self, parser_mediator, structure):\n    (month, day_of_month, year, hours, minutes, seconds, milliseconds) = structure.date_time\n    year += 2000\n    time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds, milliseconds)\n    try:\n        date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(time_elements_tuple=time_elements_tuple)\n    except ValueError:\n        parser_mediator.ProduceExtractionWarning('invalid date time value: {0!s}'.format(structure.date_time))\n        return\n    event_data = SkyDriveLogEventData()\n    event_data.detail = structure.detail.replace('\\n', ' ')\n    event_data.log_level = structure.log_level\n    event_data.module = structure.module\n    event_data.source_code = structure.source_code\n    event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ADDED)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a logline and store appropriate attributes.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.", "source": "codesearchnet"}
{"code": "def list(self, pattern='*'):\n    if (self._descriptors is None):\n        self._descriptors = self._client.list_resource_descriptors(filter_string=self._filter_string)\n    return [resource for resource in self._descriptors if fnmatch.fnmatch(resource.type, pattern)]", "docstring": "Returns a list of resource descriptors that match the filters.\n\nArgs:\npattern: An optional pattern to further filter the descriptors. This can\ninclude Unix shell-style wildcards. E.g. ``\"aws*\"``, ``\"*cluster*\"``.\n\nReturns:\nA list of ResourceDescriptor objects that match the filters.", "source": "codesearchnet"}
{"code": "def _Dhcpcd(self, interfaces, logger):\n    \n    for interface in interfaces:\n      dhcpcd = ['/sbin/dhcpcd']\n      try:\n        subprocess.check_call(dhcpcd + ['-x', interface])\n      except subprocess.CalledProcessError:\n        \n        logger.info('Dhcpcd not yet running for interface %s.', interface)\n      try:\n        subprocess.check_call(dhcpcd + [interface])\n      except subprocess.CalledProcessError:\n        \n        logger.warning('Could not activate interface %s.', interface)", "docstring": "Use dhcpcd to activate the interfaces.\n\nArgs:\ninterfaces: list of string, the output device names to enable.\nlogger: logger object, used to write to SysLog and serial port.", "source": "juraj-google-style"}
{"code": "def unreduce_like(array, original_array, axis, keepdims):\n  \n  atype = type(array)\n  unreducer = unreducers[atype]\n  shape = shape_functions[atype]\n  return unreducer(array, shape(original_array), axis, keepdims)", "docstring": "Reverse summing over a dimension.\n\nArgs:\narray: The array that was reduced.\noriginal_array: An array whose shape to unreduce to.\naxis: The axis or axes that were summed.\nkeepdims: Whether these axes were kept as singleton axes.\n\nReturns:\nAn array with axes broadcast to match the shape of the original array.", "source": "juraj-google-style"}
{"code": "def ProcessConfigOverrides(filename):\n    abs_filename = os.path.abspath(filename)\n    cfg_filters = []\n    keep_looking = True\n    while keep_looking:\n        (abs_path, base_name) = os.path.split(abs_filename)\n        if (not base_name):\n            break\n        cfg_file = os.path.join(abs_path, 'CPPLINT.cfg')\n        abs_filename = abs_path\n        if (not os.path.isfile(cfg_file)):\n            continue\n        try:\n            with open(cfg_file) as file_handle:\n                for line in file_handle:\n                    (line, _, _) = line.partition('\n                    if (not line.strip()):\n                        continue\n                    (name, _, val) = line.partition('=')\n                    name = name.strip()\n                    val = val.strip()\n                    if (name == 'set noparent'):\n                        keep_looking = False\n                    elif (name == 'filter'):\n                        cfg_filters.append(val)\n                    elif (name == 'exclude_files'):\n                        if base_name:\n                            pattern = re.compile(val)\n                            if pattern.match(base_name):\n                                sys.stderr.write(('Ignoring \"%s\": file excluded by \"%s\". File path component \"%s\" matches pattern \"%s\"\\n' % (filename, cfg_file, base_name, val)))\n                                return False\n                    elif (name == 'linelength'):\n                        global _line_length\n                        try:\n                            _line_length = int(val)\n                        except ValueError:\n                            sys.stderr.write('Line length must be numeric.')\n                    else:\n                        sys.stderr.write(('Invalid configuration option (%s) in file %s\\n' % (name, cfg_file)))\n        except IOError:\n            sys.stderr.write((\"Skipping config file '%s': Can't open for reading\\n\" % cfg_file))\n            keep_looking = False\n    for filter in reversed(cfg_filters):\n        _AddFilters(filter)\n    return True", "docstring": "Loads the configuration files and processes the config overrides.\n\nArgs:\nfilename: The name of the file being processed by the linter.\n\nReturns:\nFalse if the current |filename| should not be processed further.", "source": "codesearchnet"}
{"code": "def confirm(message: Text, default: bool=True, qmark: Text=DEFAULT_QUESTION_PREFIX, style: Optional[Style]=None, **kwargs: Any) -> Question:\n    merged_style = merge_styles([DEFAULT_STYLE, style])\n    status = {'answer': None}\n\n    def get_prompt_tokens():\n        tokens = []\n        tokens.append(('class:qmark', qmark))\n        tokens.append(('class:question', ' {} '.format(message)))\n        if (status['answer'] is not None):\n            answer = ' {}'.format((YES if status['answer'] else NO))\n            tokens.append(('class:answer', answer))\n        else:\n            instruction = ' {}'.format((YES_OR_NO if default else NO_OR_YES))\n            tokens.append(('class:instruction', instruction))\n        return to_formatted_text(tokens)\n    bindings = KeyBindings()\n\n    @bindings.add(Keys.ControlQ, eager=True)\n    @bindings.add(Keys.ControlC, eager=True)\n    def _(event):\n        event.app.exit(exception=KeyboardInterrupt, style='class:aborting')\n\n    @bindings.add('n')\n    @bindings.add('N')\n    def key_n(event):\n        status['answer'] = False\n        event.app.exit(result=False)\n\n    @bindings.add('y')\n    @bindings.add('Y')\n    def key_y(event):\n        status['answer'] = True\n        event.app.exit(result=True)\n\n    @bindings.add(Keys.ControlM, eager=True)\n    def set_answer(event):\n        status['answer'] = default\n        event.app.exit(result=default)\n\n    @bindings.add(Keys.Any)\n    def other(event):\n        'Disallow inserting other text.'\n        pass\n    return Question(PromptSession(get_prompt_tokens, key_bindings=bindings, style=merged_style, **kwargs).app)", "docstring": "Prompt the user to confirm or reject.\n\nThis question type can be used to prompt the user for a confirmation\nof a yes-or-no question. If the user just hits enter, the default\nvalue will be returned.\n\nArgs:\nmessage: Question text\n\ndefault: Default value will be returned if the user just hits\nenter.\n\nqmark: Question prefix displayed in front of the question.\nBy default this is a `?`\n\nstyle: A custom color and style for the question parts. You can\nconfigure colors as well as font types for different elements.\n\nReturns:\nQuestion: Question instance, ready to be prompted (using `.ask()`).", "source": "codesearchnet"}
{"code": "def _validate_alias_file_content(alias_file_path, url=''):\n    alias_table = get_config_parser()\n    try:\n        alias_table.read(alias_file_path)\n        for (alias_name, alias_command) in reduce_alias_table(alias_table):\n            _validate_alias_name(alias_name)\n            _validate_alias_command(alias_command)\n            _validate_alias_command_level(alias_name, alias_command)\n            _validate_pos_args_syntax(alias_name, alias_command)\n    except Exception as exception:\n        error_msg = (CONFIG_PARSING_ERROR % AliasManager.process_exception_message(exception))\n        error_msg = error_msg.replace(alias_file_path, (url or alias_file_path))\n        raise CLIError(error_msg)", "docstring": "Make sure the alias name and alias command in the alias file is in valid format.\n\nArgs:\nThe alias file path to import aliases from.", "source": "codesearchnet"}
{"code": "class DetrEncoder(DetrPreTrainedModel):\n\n    def __init__(self, config: DetrConfig):\n        super().__init__(config)\n        self.dropout = config.dropout\n        self.layerdrop = config.encoder_layerdrop\n        self.layers = nn.ModuleList([DetrEncoderLayer(config) for _ in range(config.encoder_layers)])\n        self.post_init()\n\n    def forward(self, inputs_embeds=None, attention_mask=None, object_queries=None, output_attentions=None, output_hidden_states=None, return_dict=None):\n        \n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n        hidden_states = inputs_embeds\n        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n        if attention_mask is not None:\n            attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)\n        encoder_states = () if output_hidden_states else None\n        all_attentions = () if output_attentions else None\n        for i, encoder_layer in enumerate(self.layers):\n            if output_hidden_states:\n                encoder_states = encoder_states + (hidden_states,)\n            to_drop = False\n            if self.training:\n                dropout_probability = torch.rand([])\n                if dropout_probability < self.layerdrop:\n                    to_drop = True\n            if to_drop:\n                layer_outputs = (None, None)\n            else:\n                layer_outputs = encoder_layer(hidden_states, attention_mask, object_queries=object_queries, output_attentions=output_attentions)\n                hidden_states = layer_outputs[0]\n            if output_attentions:\n                all_attentions = all_attentions + (layer_outputs[1],)\n        if output_hidden_states:\n            encoder_states = encoder_states + (hidden_states,)\n        if not return_dict:\n            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a\n[`DetrEncoderLayer`].\n\nThe encoder updates the flattened feature map through multiple self-attention layers.\n\nSmall tweak for DETR:\n\n- object_queries are added to the forward pass.\n\nArgs:\nconfig: DetrConfig", "source": "github-repos"}
{"code": "def addColumn(self, header, values=[]):\n        \n        if len(values) == 0:\n            self._impl.addColumn(header)\n        else:\n            assert len(values) == self.getNumRows()\n            if any(isinstance(value, basestring) for value in values):\n                values = list(map(str, values))\n                self._impl.addColumnStr(header, values)\n            elif all(isinstance(value, Real) for value in values):\n                values = list(map(float, values))\n                self._impl.addColumnDbl(header, values)\n            else:\n                raise NotImplementedError", "docstring": "Add a new column with the corresponding header and values to the\ndataframe.\n\nArgs:\nheader: The name of the new column.\n\nvalues: A list of size :func:`~amplpy.DataFrame.getNumRows` with\nall the values of the new column.", "source": "juraj-google-style"}
{"code": "def WriteSignedBinary(binary_urn, binary_content, private_key, public_key, chunk_size=1024, token=None):\n    if _ShouldUseLegacyDatastore():\n        collects.GRRSignedBlob.NewFromContent(binary_content, binary_urn, chunk_size=chunk_size, token=token, private_key=private_key, public_key=public_key)\n    if data_store.RelationalDBEnabled():\n        blob_references = rdf_objects.BlobReferences()\n        for chunk_offset in range(0, len(binary_content), chunk_size):\n            chunk = binary_content[chunk_offset:(chunk_offset + chunk_size)]\n            blob_rdf = rdf_crypto.SignedBlob()\n            blob_rdf.Sign(chunk, private_key, verify_key=public_key)\n            blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(blob_rdf.SerializeToString())\n            blob_references.items.Append(rdf_objects.BlobReference(offset=chunk_offset, size=len(chunk), blob_id=blob_id))\n        data_store.REL_DB.WriteSignedBinaryReferences(_SignedBinaryIDFromURN(binary_urn), blob_references)", "docstring": "Signs a binary and saves it to the datastore.\n\nIf a signed binary with the given URN already exists, its contents will get\noverwritten.\n\nArgs:\nbinary_urn: URN that should serve as a unique identifier for the binary.\nbinary_content: Contents of the binary, as raw bytes.\nprivate_key: Key that should be used for signing the binary contents.\npublic_key: Key that should be used to verify the signature generated using\nthe private key.\nchunk_size: Size, in bytes, of the individual blobs that the binary contents\nwill be split to before saving to the datastore.\ntoken: ACL token to use with the legacy (non-relational) datastore.", "source": "codesearchnet"}
{"code": "def _filter_pb(field_or_unary):\n    if isinstance(field_or_unary, query_pb2.StructuredQuery.FieldFilter):\n        return query_pb2.StructuredQuery.Filter(field_filter=field_or_unary)\n    elif isinstance(field_or_unary, query_pb2.StructuredQuery.UnaryFilter):\n        return query_pb2.StructuredQuery.Filter(unary_filter=field_or_unary)\n    else:\n        raise ValueError('Unexpected filter type', type(field_or_unary), field_or_unary)", "docstring": "Convert a specific protobuf filter to the generic filter type.\n\nArgs:\nfield_or_unary (Union[google.cloud.proto.firestore.v1beta1.\\\nquery_pb2.StructuredQuery.FieldFilter, google.cloud.proto.\\\nfirestore.v1beta1.query_pb2.StructuredQuery.FieldFilter]): A\nfield or unary filter to convert to a generic filter.\n\nReturns:\ngoogle.cloud.firestore_v1beta1.types.\\\nStructuredQuery.Filter: A \"generic\" filter.\n\nRaises:\nValueError: If ``field_or_unary`` is not a field or unary filter.", "source": "codesearchnet"}
{"code": "def pivot_samples(self, values, index='ID_REF'):\n    data = []\n    for gsm in self.gsms.values():\n        tmp_data = gsm.table.copy()\n        tmp_data['name'] = gsm.name\n        data.append(tmp_data)\n    ndf = concat(data).pivot(index=index, values=values, columns='name')\n    return ndf", "docstring": "Pivot samples by specified column.\n\nConstruct a table in which columns (names) are the samples, index\nis a specified column eg. ID_REF and values in the columns are of one\nspecified type.\n\nArgs:\nvalues (:obj:`str`): Column name present in all GSMs.\nindex (:obj:`str`, optional): Column name that will become an index in\npivoted table. Defaults to \"ID_REF\".\n\nReturns:\n:obj:`pandas.DataFrame`: Pivoted data", "source": "codesearchnet"}
{"code": "def listtransactions(self, user_id=\"\", count=10, start_at=0):\n        \n        txlist = self.rpc.call(\"listtransactions\", user_id, count, start_at)\n        self.logger.debug(\"Got transaction list for \" + str(user_id))\n        return txlist", "docstring": "List all transactions associated with this account.\n\nArgs:\nuser_id (str): this user's unique identifier\ncount (int): number of transactions to return (default=10)\nstart_at (int): start the list at this transaction (default=0)\n\nReturns:\nlist [dict]: transactions associated with this user's account", "source": "juraj-google-style"}
{"code": "def split_metrics_by_namespace_and_name(metrics, namespace, name):\n    matching_metrics = []\n    not_matching_metrics = []\n    for dist in metrics:\n        if dist.key.metric.namespace == namespace and dist.key.metric.name == name:\n            matching_metrics.append(dist)\n        else:\n            not_matching_metrics.append(dist)\n    return (matching_metrics, not_matching_metrics)", "docstring": "Splits metrics list namespace and name.\n\nArgs:\nmetrics: list of metrics from pipeline result\nnamespace(str): filter metrics by namespace\nname(str): filter metrics by name\n\nReturns:\ntwo lists - one of metrics which are matching filters\nand second of not matching", "source": "github-repos"}
{"code": "def get_id(date=None, project: str = 'sip',\n               instance_id: int = None) -> str:\n        \n        if date is None:\n            date = datetime.datetime.utcnow()\n\n        if isinstance(date, datetime.datetime):\n            date = date.strftime('%Y%m%d')\n\n        if instance_id is None:\n            instance_id = randint(0, 9999)\n\n        return 'SBI-{}-{}-{:04d}'.format(date, project, instance_id)", "docstring": "Get a SBI Identifier.\n\nArgs:\ndate (str or datetime.datetime, optional): UTC date of the SBI\nproject (str, optional ): Project Name\ninstance_id (int, optional): SBI instance identifier\n\nReturns:\nstr, Scheduling Block Instance (SBI) ID.", "source": "juraj-google-style"}
{"code": "def field_content_length(msg: message.Message, field: Union[descriptor.FieldDescriptor, str]) -> int:\n    if isinstance(field, str):\n        field = _field_descriptor_for_name(msg, field)\n    if field_is_repeated(field):\n        return len(getattr(msg, field.name))\n    return 1 if msg.HasField(field.name) else 0", "docstring": "Returns the size of the field.\n\nArgs:\nmsg: The Message whose fields to examine.\nfield: The FieldDescriptor or name of the field to examine.\n\nReturns:\nThe number of elements at the provided field. If field describes a singular\nprotobuf field, this will return 1. If the field is not set, returns 0.", "source": "github-repos"}
{"code": "def __write_error(self, status_code, error_message=None):\n    if (error_message is None):\n        error_message = httplib.responses[status_code]\n    status = ('%d %s' % (status_code, httplib.responses[status_code]))\n    message = EndpointsErrorMessage(state=EndpointsErrorMessage.State.APPLICATION_ERROR, error_message=error_message)\n    return (status, self.__PROTOJSON.encode_message(message))", "docstring": "Return the HTTP status line and body for a given error code and message.\n\nArgs:\nstatus_code: HTTP status code to be returned.\nerror_message: Error message to be returned.\n\nReturns:\nTuple (http_status, body):\nhttp_status: HTTP status line, e.g. 200 OK.\nbody: Body of the HTTP request.", "source": "codesearchnet"}
{"code": "def convert_convtranspose(params, w_name, scope_name, inputs, layers, weights, names):\n    \n    print('Converting transposed convolution ...')\n\n    if names == 'short':\n        tf_name = 'C' + random_string(7)\n    elif names == 'keep':\n        tf_name = w_name\n    else:\n        tf_name = w_name + str(random.random())\n\n    bias_name = '{0}.bias'.format(w_name)\n    weights_name = '{0}.weight'.format(w_name)\n\n    if len(weights[weights_name].numpy().shape) == 4:\n        W = weights[weights_name].numpy().transpose(2, 3, 1, 0)\n        height, width, n_filters, channels = W.shape\n\n        n_groups = params['group']\n        if n_groups > 1:\n            raise AssertionError('Cannot convert conv1d with groups != 1')\n\n        if params['dilations'][0] > 1:\n            raise AssertionError('Cannot convert conv1d with dilation_rate != 1')\n\n        if bias_name in weights:\n            biases = weights[bias_name].numpy()\n            has_bias = True\n        else:\n            biases = None\n            has_bias = False\n\n        input_name = inputs[0]\n\n        if has_bias:\n            weights = [W, biases]\n        else:\n            weights = [W]\n\n        conv = keras.layers.Conv2DTranspose(\n            filters=n_filters,\n            kernel_size=(height, width),\n            strides=(params['strides'][0], params['strides'][1]),\n            padding='valid',\n            output_padding=0,\n            weights=weights,\n            use_bias=has_bias,\n            activation=None,\n            dilation_rate=params['dilations'][0],\n            bias_initializer='zeros', kernel_initializer='zeros',\n            name=tf_name\n        )\n\n        layers[scope_name] = conv(layers[input_name])\n\n        \n        \n        layers[scope_name].set_shape(layers[scope_name]._keras_shape)\n\n        pads = params['pads']\n        if pads[0] > 0:\n            assert(len(pads) == 2 or (pads[2] == pads[0] and pads[3] == pads[1]))\n\n            crop = keras.layers.Cropping2D(\n                pads[:2],\n                name=tf_name + '_crop'\n            )\n            layers[scope_name] = crop(layers[scope_name])\n    else:\n        raise AssertionError('Layer is not supported for now')", "docstring": "Convert transposed convolution layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"}
{"code": "def format_float(digit=0, is_pct=False):\n    if is_pct:\n        space = (' ' if (digit < 0) else '')\n        fmt = f'{{:{space}.{abs(int(digit))}%}}'\n        return (lambda vv: ('NaN' if np.isnan(vv) else fmt.format(vv)))\n    else:\n        return (lambda vv: ('NaN' if np.isnan(vv) else (f'{{:,.{digit}f}}'.format(vv) if vv else ('-' + (' ' * abs(digit))))))", "docstring": "Number display format for pandas\n\nArgs:\ndigit: number of digits to keep\nif negative, add one space in front of positive pct\nis_pct: % display\n\nReturns:\nlambda function to format floats\n\nExamples:\n>>> format_float(0)(1e5)\n'100,000'\n>>> format_float(1)(1e5)\n'100,000.0'\n>>> format_float(-1, True)(.2)\n' 20.0%'\n>>> format_float(-1, True)(-.2)\n'-20.0%'\n>>> pd.options.display.float_format = format_float(2)", "source": "codesearchnet"}
{"code": "def shutdown(self, vm_names=None, reboot=False):\n        \n        self.virt_env.shutdown(vm_names, reboot)", "docstring": "Shutdown this prefix\n\nArgs:\nvm_names(list of str): List of the vms to shutdown\nreboot(bool): If true, reboot the requested vms\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def _match_value_against_type(self, value: cfg.Binding, other_type: abstract.BaseValue, subst: _SubstType, view: _ViewType) -> _SubstType | None:\n    left = value.data\n    left = abstract_utils.unwrap_final(left)\n    other_type = abstract_utils.unwrap_final(other_type)\n    is_recursive = abstract_utils.is_recursive_annotation(other_type)\n    if is_recursive:\n        key = (left, other_type)\n        if key in self._recursive_annots_cache:\n            return subst if self._recursive_annots_cache[key] else None\n        self._recursive_annots_cache[key] = True\n    subst = self._match_nonfinal_value_against_type(left, value, other_type, subst, view)\n    if is_recursive:\n        self._recursive_annots_cache[key] = subst is not None\n    return subst", "docstring": "One-way unify value into pytd type given a substitution.\n\nArgs:\nvalue: A cfg.Binding.\nother_type: A BaseValue instance.\nsubst: The current substitution. This dictionary is not modified.\nview: A mapping of Variable to Value.\n\nReturns:\nA new (or unmodified original) substitution dict if the matching\nsucceeded, None otherwise.", "source": "github-repos"}
{"code": "def In(self, *values):\n    \n    self._awql = self._CreateMultipleValuesCondition(values, 'IN')\n    return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"in\".\n\nArgs:\n*values: The values to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "juraj-google-style"}
{"code": "def get_field(self, key: str) -> Optional[Field]:\n    if key in self._fields:\n        return self._fields[key]\n    if self._allow_nonconst_keys:\n        for key_spec, field in self._fields.items():\n            if key_spec.match(key):\n                return field\n    return None", "docstring": "Get field definition (Field) for a key.\n\nArgs:\nkey: string as input key.\n\nReturns:\nMatched field. A field is considered a match when:\n* Its key spec is a ConstStrKey that equals to the input key.\n* Or it's the first field whose key spec is a NonConstKey\nwhich matches the input key.", "source": "github-repos"}
{"code": "def forward_loss(self, pixel_values, pred, mask, interpolate_pos_encoding: bool=False):\n    target = self.patchify(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)\n    if self.config.norm_pix_loss:\n        mean = target.mean(dim=-1, keepdim=True)\n        var = target.var(dim=-1, keepdim=True)\n        target = (target - mean) / (var + 1e-06) ** 0.5\n    loss = (pred - target) ** 2\n    loss = loss.mean(dim=-1)\n    loss = (loss * mask).sum() / mask.sum()\n    return loss", "docstring": "Args:\npixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\nPixel values.\npred (`torch.FloatTensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`:\nPredicted pixel values.\nmask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\nTensor indicating which patches are masked (1) and which are not (0).\ninterpolate_pos_encoding (`bool`, *optional*, default `False`):\ninterpolation flag passed during the forward pass.\n\nReturns:\n`torch.FloatTensor`: Pixel reconstruction loss.", "source": "github-repos"}
{"code": "def monitor(self, job, event_monitor, result_monitor):\n    logging.info('starting to monitor the job')\n    last_active_ms = -1\n    perf = None\n    cancel_job = False\n    waiting_for_shutdown = False\n    while True:\n        now = int(time.time() * 1000)\n        logging.debug('now is %d', now)\n        curr_perf = NexmarkLauncher.get_performance(job, event_monitor, result_monitor)\n        if perf is None or curr_perf.has_progress(perf):\n            last_active_ms = now\n        if self.streaming and (not waiting_for_shutdown):\n            quiet_duration = (now - last_active_ms) \n            if curr_perf.event_count >= self.args.num_events and curr_perf.result_count >= 0 and (quiet_duration > self.DONE_DELAY):\n                logging.info('streaming query appears to have finished executing')\n                waiting_for_shutdown = True\n                cancel_job = True\n            elif quiet_duration > self.TERMINATE_DELAY:\n                logging.error('streaming query have been stuck for %d seconds', quiet_duration)\n                logging.error('canceling streaming job')\n                waiting_for_shutdown = True\n                cancel_job = True\n            elif quiet_duration > self.WARNING_DELAY:\n                logging.warning('streaming query have been stuck for %d seconds', quiet_duration)\n            if cancel_job:\n                job.cancel()\n        perf = curr_perf\n        stopped = PipelineState.is_terminal(job.state)\n        if stopped:\n            break\n        if not waiting_for_shutdown:\n            if last_active_ms == now:\n                logging.info('activity seen, new performance data extracted')\n            else:\n                logging.info('no activity seen')\n        else:\n            logging.info('waiting for shutdown')\n        time.sleep(self.PERF_DELAY)\n    return perf", "docstring": "keep monitoring the performance and progress of running job and cancel\nthe job if the job is stuck or seems to have finished running\n\nReturns:\nthe final performance if it is measured", "source": "github-repos"}
{"code": "def _set_shape(self, shape):\n    shape = tensor_shape.as_shape(shape)\n    if shape.rank is None:\n        return\n    shape = shape.as_list()\n    if shape[0] is not None:\n        self._row_partition._row_splits.set_shape(shape[0] + 1)\n    dtype = self._row_partition.dtype\n    for i, partition in enumerate(self._nested_row_partitions):\n        size = shape[i + 1]\n        if size is not None:\n            if partition._uniform_row_length is not None:\n                old_row_length = tensor_util.constant_value(partition._uniform_row_length)\n                if old_row_length is not None:\n                    if size == old_row_length:\n                        continue\n                    else:\n                        raise ValueError(f'Inconsistent size for axis {i + 1}: {old_row_length} vs. {size}.')\n            partition._uniform_row_length = ops.convert_to_tensor(size, dtype)\n            if partition._nrows is None:\n                partition._nrows = array_ops.size(partition._row_splits, out_type=dtype) - 1\n    if hasattr(self.flat_values, 'set_shape'):\n        flat_shape = tensor_shape.as_shape([None] + shape[self.ragged_rank + 1:])\n        self.flat_values.set_shape(flat_shape)", "docstring": "Updates the static shape of `self` to be `shape`.\n\n* If a dimension of `shape` has known rank, and is encoded via\npartitioning, then this will update the corresponding partition to\ndefine `_uniform_row_length` and `nrows`.\n* If a dimension of `shape` has a known rank, and is encoded as one\nof the `flat_values` dimensions, then `flat_values.set_shape()` will\nbe used to update its shape.\n\nWarning: Using this method to assert an incorrect shape for a RaggedTensor\n(i.e., one that's not consistent with its actual shape) can cause\nsegmentation faults and very difficult-to-diagnose behavior.  Only use this\nmethod if you are certain that the shape is correct.\n\nArgs:\nshape: `tf.TensorShape` specifying the shape for this `RaggedTensor`.", "source": "github-repos"}
{"code": "def key_periods(ciphertext, max_key_period):\n    \n    if max_key_period <= 0:\n        raise ValueError(\"max_key_period must be a positive integer\")\n\n    key_scores = []\n    for period in range(1, min(max_key_period, len(ciphertext)) + 1):\n        score = abs(ENGLISH_IC - index_of_coincidence(*split_columns(ciphertext, period)))\n        key_scores.append((period, score))\n\n    return [p[0] for p in sorted(key_scores, key=lambda x: x[1])]", "docstring": "Rank all key periods for ``ciphertext`` up to and including ``max_key_period``\n\nExample:\n>>> key_periods(ciphertext, 30)\n[2, 4, 8, 3, ...]\n\nArgs:\nciphertext (str): The text to analyze\nmax_key_period (int): The maximum period the key could be\n\nReturns:\nSorted list of keys\n\nRaises:\nValueError: If max_key_period is less than or equal to 0", "source": "juraj-google-style"}
{"code": "def to_matrix( xx, yy, zz, xy, yz, xz ):\n    \n    matrix = np.array( [[xx, xy, xz], [xy, yy, yz], [xz, yz, zz]] )\n    return matrix", "docstring": "Convert a list of matrix components to a symmetric 3x3 matrix.\nInputs should be in the order xx, yy, zz, xy, yz, xz.\n\nArgs:\nxx (float): xx component of the matrix.\nyy (float): yy component of the matrix.\nzz (float): zz component of the matrix.\nxy (float): xy component of the matrix.\nyz (float): yz component of the matrix.\nxz (float): xz component of the matrix.\n\nReturns:\n(np.array): The matrix, as a 3x3 numpy array.", "source": "juraj-google-style"}
{"code": "def _find_hstreaming():\n    global WARNED_HADOOP_HOME, HADOOP_STREAMING_PATH_CACHE\n    if HADOOP_STREAMING_PATH_CACHE:\n        return HADOOP_STREAMING_PATH_CACHE\n    try:\n        search_root = os.environ['HADOOP_HOME']\n    except KeyError:\n        search_root = '/'\n    cmd = ('find %s -name hadoop*streaming*.jar' % search_root)\n    p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n    HADOOP_STREAMING_PATH_CACHE = p.communicate()[0].split('\\n')[0]\n    if ((search_root == '/') and (not WARNED_HADOOP_HOME)):\n        WARNED_HADOOP_HOME = True\n        hadoop_home = HADOOP_STREAMING_PATH_CACHE[:HADOOP_STREAMING_PATH_CACHE.rfind('/contrib/')]\n        logging.warn(('Set the HADOOP_HOME environmental variable to your hadoop path to improve performance. Put the following [export HADOOP_HOME=\"%s\"] in ~/.bashrc' % hadoop_home))\n    return HADOOP_STREAMING_PATH_CACHE", "docstring": "Finds the whole path to the hadoop streaming jar.\n\nIf the environmental var HADOOP_HOME is specified, then start the search\nfrom there.\n\nReturns:\nFull path to the hadoop streaming jar if found, else return an empty\nstring.", "source": "codesearchnet"}
{"code": "def import_subview(self, idx, subview):\n    subview.corpus = self\n    self._subviews[idx] = subview", "docstring": "Add the given subview to the corpus.\n\nArgs:\nidx (str): An idx that is unique in the corpus for identifying the subview.\nIf already a subview exists with the given id it will be overridden.\nsubview (Subview): The subview to add.", "source": "codesearchnet"}
{"code": "def batch_shape_tensor(self, name='batch_shape_tensor'):\n    with self._name_scope(name):\n        if self.batch_shape.is_fully_defined():\n            return ops.convert_to_tensor(self.batch_shape.as_list(), dtype=dtypes.int32, name='batch_shape')\n        return self._batch_shape_tensor()", "docstring": "Shape of a single sample from a single event index as a 1-D `Tensor`.\n\nThe batch dimensions are indexes into independent, non-identical\nparameterizations of this distribution.\n\nArgs:\nname: name to give to the op\n\nReturns:\nbatch_shape: `Tensor`.", "source": "github-repos"}
{"code": "def add_derivatives(self, path, **kwargs):\n    paths = listify(path)\n    deriv_dirs = []\n\n    def check_for_description(dir):\n        dd = os.path.join(dir, 'dataset_description.json')\n        return os.path.exists(dd)\n    for p in paths:\n        p = os.path.abspath(p)\n        if os.path.exists(p):\n            if check_for_description(p):\n                deriv_dirs.append(p)\n            else:\n                subdirs = [d for d in os.listdir(p) if os.path.isdir(os.path.join(p, d))]\n                for sd in subdirs:\n                    sd = os.path.join(p, sd)\n                    if check_for_description(sd):\n                        deriv_dirs.append(sd)\n    if (not deriv_dirs):\n        warnings.warn(\"Derivative indexing was enabled, but no valid derivatives datasets were found in any of the provided or default locations. Please make sure all derivatives datasets you intend to index contain a 'dataset_description.json' file, as described in the BIDS-derivatives specification.\")\n    for deriv in deriv_dirs:\n        dd = os.path.join(deriv, 'dataset_description.json')\n        with open(dd, 'r', encoding='utf-8') as ddfd:\n            description = json.load(ddfd)\n        pipeline_name = description.get('PipelineDescription', {}).get('Name')\n        if (pipeline_name is None):\n            raise ValueError('Every valid BIDS-derivatives dataset must have a PipelineDescription.Name field set inside dataset_description.json.')\n        if (pipeline_name in self.derivatives):\n            raise ValueError(\"Pipeline name '%s' has already been added to this BIDSLayout. Every added pipeline must have a unique name!\")\n        kwargs['config'] = (kwargs.get('config') or ['bids', 'derivatives'])\n        kwargs['sources'] = (kwargs.get('sources') or self)\n        self.derivatives[pipeline_name] = BIDSLayout(deriv, **kwargs)\n    for deriv in self.derivatives.values():\n        self.entities.update(deriv.entities)", "docstring": "Add BIDS-Derivatives datasets to tracking.\n\nArgs:\npath (str, list): One or more paths to BIDS-Derivatives datasets.\nEach path can point to either a derivatives/ directory\ncontaining one more more pipeline directories, or to a single\npipeline directory (e.g., derivatives/fmriprep).\nkwargs (dict): Optional keyword arguments to pass on to\nBIDSLayout() when initializing each of the derivative datasets.\n\nNote: Every derivatives directory intended for indexing MUST contain a\nvalid dataset_description.json file. See the BIDS-Derivatives\nspecification for details.", "source": "codesearchnet"}
{"code": "def connect(self,\n                fedora_url,\n                data=None,\n                method='Get'):\n        \n        if data is None:\n            data = {}\n        if not fedora_url.startswith(\"http\"):\n            fedora_url = urllib.parse.urljoin(self.base_url, fedora_url)\n        request = urllib.request.Request(fedora_url,\n                                         method=method)\n        request.add_header('Accept', 'text/turtle')\n        request.add_header('Content-Type', 'text/turtle')\n        if len(data) > 0:\n            request.data = data\n        try:\n            response = urllib.request.urlopen(request)\n        except urllib.error.URLError as err:\n            if hasattr(err, 'reason'):\n                print(\"failed to reach server at {} with {} method\".format(\n                    fedora_url,\n                    request.method))\n                print(\"Reason: \", err.reason)\n                print(\"Data: \", data)\n            elif hasattr(err, 'code'):\n                print(\"Server error {}\".format(err.code))\n            raise err\n        return response", "docstring": "Method attempts to connect to REST servers of the Fedora\nCommons repository using optional data parameter.\n\nArgs:\nfedora_url(string): Fedora URL\ndata(dict): Data to through to REST endpoint\nmethod(str): REST Method, defaults to GET\n\nReturns:\nresult(string): Response string from Fedora", "source": "juraj-google-style"}
{"code": "def _inchi_labels(mol):\n        \n        obconv = ob.OBConversion()\n        obconv.SetOutFormat(str(\"inchi\"))\n        obconv.AddOption(str(\"a\"), ob.OBConversion.OUTOPTIONS)\n        obconv.AddOption(str(\"X\"), ob.OBConversion.OUTOPTIONS, str(\"DoNotAddH\"))\n        inchi_text = obconv.WriteString(mol)\n        match = re.search(r\"InChI=(?P<inchi>.+)\\nAuxInfo=.+\"\n                          r\"/N:(?P<labels>[0-9,;]+)/(E:(?P<eq_atoms>[0-9,\"\n                          r\";\\(\\)]*)/)?\", inchi_text)\n        inchi = match.group(\"inchi\")\n        label_text = match.group(\"labels\")\n        eq_atom_text = match.group(\"eq_atoms\")\n        heavy_atom_labels = tuple([int(i) for i in label_text.replace(\n            ';', ',').split(',')])\n        eq_atoms = []\n        if eq_atom_text is not None:\n            eq_tokens = re.findall(r'\\(((?:[0-9]+,)+[0-9]+)\\)', eq_atom_text\n                                   .replace(';', ','))\n            eq_atoms = tuple([tuple([int(i) for i in t.split(',')])\n                              for t in eq_tokens])\n        return heavy_atom_labels, eq_atoms, inchi", "docstring": "Get the inchi canonical labels of the heavy atoms in the molecule\n\nArgs:\nmol: The molecule. OpenBabel OBMol object\n\nReturns:\nThe label mappings. List of tuple of canonical label,\noriginal label\nList of equivalent atoms.", "source": "juraj-google-style"}
{"code": "def preprocessing_fn(inputs):\n    result = {'clicked': inputs['clicked']}\n    for name in _INTEGER_COLUMN_NAMES:\n        feature = inputs[name]\n        feature = tft.sparse_tensor_to_dense_with_shape(feature, [None, 1], default_value=-1)\n        feature = tf.squeeze(feature, axis=1)\n        result[name] = feature\n        result[name + '_bucketized'] = tft.bucketize(feature, _NUM_BUCKETS)\n    for name in _CATEGORICAL_COLUMN_NAMES:\n        feature = inputs[name]\n        feature = tft.sparse_tensor_to_dense_with_shape(feature, [None, 1], default_value='')\n        feature = tf.squeeze(feature, axis=1)\n        result[get_transformed_categorical_column_name(name)] = tft.compute_and_apply_vocabulary(feature, frequency_threshold=frequency_threshold)\n    return result", "docstring": "User defined preprocessing function for criteo columns.\n\nArgs:\ninputs: dictionary of input `tensorflow_transform.Column`.\nReturns:\nA dictionary of `tensorflow_transform.Column` representing the transformed\ncolumns.", "source": "github-repos"}
{"code": "def _update_graph_variables(self, learning_rate: float = None, momentum: float = None):\n        \n        if learning_rate is not None:\n            K.set_value(self.get_learning_rate_variable(), learning_rate)\n            \n        if momentum is not None:\n            K.set_value(self.get_momentum_variable(), momentum)", "docstring": "Update graph variables setting giving `learning_rate` and `momentum`\n\nArgs:\nlearning_rate: learning rate value to be set in graph (set if not None)\nmomentum: momentum value to be set in graph (set if not None)\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n        \n        super(DerivationParameters, self).read(\n            input_stream,\n            kmip_version=kmip_version\n        )\n        local_stream = BytearrayStream(input_stream.read(self.length))\n\n        if self.is_tag_next(\n                enums.Tags.CRYPTOGRAPHIC_PARAMETERS,\n                local_stream\n        ):\n            self._cryptographic_parameters = CryptographicParameters()\n            self._cryptographic_parameters.read(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        if self.is_tag_next(enums.Tags.INITIALIZATION_VECTOR, local_stream):\n            self._initialization_vector = ByteString(\n                tag=enums.Tags.INITIALIZATION_VECTOR\n            )\n            self._initialization_vector.read(\n                local_stream,\n                kmip_version=kmip_version\n            )\n\n        if self.is_tag_next(enums.Tags.DERIVATION_DATA, local_stream):\n            self._derivation_data = ByteString(tag=enums.Tags.DERIVATION_DATA)\n            self._derivation_data.read(local_stream, kmip_version=kmip_version)\n\n        if self.is_tag_next(enums.Tags.SALT, local_stream):\n            self._salt = ByteString(tag=enums.Tags.SALT)\n            self._salt.read(local_stream, kmip_version=kmip_version)\n\n        if self.is_tag_next(Tags.ITERATION_COUNT, local_stream):\n            self._iteration_count = Integer(tag=Tags.ITERATION_COUNT)\n            self._iteration_count.read(local_stream, kmip_version=kmip_version)\n\n        self.is_oversized(local_stream)", "docstring": "Read the data encoding the DerivationParameters struct and decode it\ninto its constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"}
{"code": "def save_data(X, y, path):\n    catalog = {'.csv': save_csv, '.sps': save_libsvm, '.h5': save_hdf5}\n    ext = os.path.splitext(path)[1]\n    func = catalog[ext]\n    if (y is None):\n        y = np.zeros((X.shape[0],))\n    func(X, y, path)", "docstring": "Save data as a CSV, LibSVM or HDF5 file based on the file extension.\n\nArgs:\nX (numpy or scipy sparse matrix): Data matrix\ny (numpy array): Target vector. If None, all zero vector will be saved.\npath (str): Path to the CSV, LibSVM or HDF5 file to save data.", "source": "codesearchnet"}
{"code": "def _CreateImage(media_service, opener, url):\n    image_data = opener.open(url).read().decode('utf-8')\n    image = {'type': 'IMAGE', 'data': image_data, 'xsi_type': 'Image'}\n    return media_service.upload(image)[0]", "docstring": "Creates an image and uploads it to the server.\n\nArgs:\nmedia_service: a SudsServiceProxy instance for AdWords's MediaService.\nopener: an OpenerDirector instance.\nurl: a str URL used to load image data.\n\nReturns:\nThe image that was successfully uploaded.", "source": "codesearchnet"}
{"code": "def convert(self, vroot, entry_variables):\n    for converter in self.converters:\n        vroot = converter.convert(vroot, entry_variables)\n    return vroot", "docstring": "Convert a given graph.\n\nConvert a given graph using the `converters` in the order of the registeration, i.e., sequentially.\n\nArgs:\nvroot (:obj:`Variable`): NNabla Variable\nentry_variables (:obj:`Variable`): Entry variable from which the conversion starts.", "source": "codesearchnet"}
{"code": "def BatchConvert(self, metadata_value_pairs, token=None):\n    \n\n    \n    msg_dict = {}\n    for metadata, msg in metadata_value_pairs:\n      msg_dict.setdefault(msg.source, []).append((metadata, msg))\n\n    metadata_objects = []\n    metadata_to_fetch = []\n\n    \n    for client_urn in msg_dict:\n      try:\n        metadata_objects.append(self.cached_metadata[client_urn])\n      except KeyError:\n        metadata_to_fetch.append(client_urn)\n\n    if metadata_to_fetch:\n      if data_store.RelationalDBEnabled():\n        client_ids = set(urn.Basename() for urn in metadata_to_fetch)\n        infos = data_store.REL_DB.MultiReadClientFullInfo(client_ids)\n\n        fetched_metadata = [\n            GetMetadata(client_id, info) for client_id, info in infos.items()\n        ]\n      else:\n        client_fds = aff4.FACTORY.MultiOpen(\n            metadata_to_fetch, mode=\"r\", token=token)\n\n        fetched_metadata = [\n            GetMetadataLegacy(client_fd, token=token)\n            for client_fd in client_fds\n        ]\n\n      for metadata in fetched_metadata:\n        self.cached_metadata[metadata.client_urn] = metadata\n      metadata_objects.extend(fetched_metadata)\n\n    data_by_type = {}\n    for metadata in metadata_objects:\n      try:\n        for original_metadata, message in msg_dict[metadata.client_urn]:\n          \n          \n          new_metadata = ExportedMetadata(metadata)\n          new_metadata.source_urn = original_metadata.source_urn\n          new_metadata.annotations = original_metadata.annotations\n          new_metadata.original_timestamp = message.payload.age\n          cls_name = message.payload.__class__.__name__\n\n          \n          \n          if cls_name not in data_by_type:\n            converters_classes = ExportConverter.GetConvertersByValue(\n                message.payload)\n            data_by_type[cls_name] = {\n                \"converters\": [cls(self.options) for cls in converters_classes],\n                \"batch_data\": [(new_metadata, message.payload)]\n            }\n          else:\n            data_by_type[cls_name][\"batch_data\"].append(\n                (new_metadata, message.payload))\n\n      except KeyError:\n        pass\n\n    \n    converted_batch = []\n    for dataset in itervalues(data_by_type):\n      for converter in dataset[\"converters\"]:\n        converted_batch.extend(\n            converter.BatchConvert(dataset[\"batch_data\"], token=token))\n\n    return converted_batch", "docstring": "Converts a batch of GrrMessages into a set of RDFValues at once.\n\nArgs:\nmetadata_value_pairs: a list or a generator of tuples (metadata, value),\nwhere metadata is ExportedMetadata to be used for conversion and value\nis a GrrMessage to be converted.\ntoken: Security token.\n\nReturns:\nResulting RDFValues. Empty list is a valid result and means that\nconversion wasn't possible.", "source": "juraj-google-style"}
{"code": "def business_days_in_period(self, date_tensor, period_tensor):\n    return self.business_days_between(date_tensor, date_tensor + period_tensor)", "docstring": "Calculates number of business days in a period.\n\nIncludes the dates in `date_tensor`, but excludes final dates resulting from\naddition of `period_tensor`.\n\nArgs:\ndate_tensor: DateTensor of starting dates.\nperiod_tensor: PeriodTensor, should be broadcastable to `date_tensor`.\n\nReturns:\nAn int32 Tensor with the number of business days in given periods that\nstart at given dates.", "source": "github-repos"}
{"code": "def check_num_tasks(chain, task_count):\n    \n    errors = []\n    \n    \n    min_decision_tasks = 1\n    if task_count['decision'] < min_decision_tasks:\n        errors.append(\"{} decision tasks; we must have at least {}!\".format(\n            task_count['decision'], min_decision_tasks\n        ))\n    raise_on_errors(errors)", "docstring": "Make sure there are a specific number of specific task types.\n\nCurrently we only check decision tasks.\n\nArgs:\nchain (ChainOfTrust): the chain we're operating on\ntask_count (dict): mapping task type to the number of links.\n\nRaises:\nCoTError: on failure.", "source": "juraj-google-style"}
{"code": "def _rank(x):\n    rank = ops.convert_to_tensor(x).get_shape().ndims\n    if rank:\n        return (rank, True)\n    else:\n        return (array_ops.rank(x), False)", "docstring": "Helper function to retrieve the rank of a tensor.\n\nArgs:\nx: Something convertible to `Tensor`.\n\nReturns:\nEither a pair `(rank, True)` where `rank` is an integer or a pair\n`(rank, False)` where `rank` is an integer `Tensor`. In either case,\n`rank` is the rank of `x`.", "source": "github-repos"}
{"code": "def get_nodes_lines(self, **kwargs):\n        \n        \n        params = {'Nodes': util.ints_to_string(kwargs.get('nodes', []))}\n\n        \n        result = self.make_request('bus', 'get_nodes_lines', **params)\n\n        if not util.check_result(result):\n            return False, result.get('resultDescription', 'UNKNOWN ERROR')\n\n        \n        values = util.response_list(result, 'resultValues')\n        return True, [emtype.NodeLinesItem(**a) for a in values]", "docstring": "Obtain stop IDs, coordinates and line information.\n\nArgs:\nnodes (list[int] | int): nodes to query, may be empty to get\nall nodes.\n\nReturns:\nStatus boolean and parsed response (list[NodeLinesItem]), or message\nstring in case of error.", "source": "juraj-google-style"}
{"code": "def _CanProcessKeyWithPlugin(self, registry_key, plugin):\n    \n    for registry_key_filter in plugin.FILTERS:\n      \n      \n      if getattr(registry_key_filter, 'key_paths', []):\n        continue\n\n      if registry_key_filter.Match(registry_key):\n        return True\n\n    return False", "docstring": "Determines if a plugin can process a Windows Registry key or its values.\n\nArgs:\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\nplugin (WindowsRegistryPlugin): Windows Registry plugin.\n\nReturns:\nbool: True if the Registry key can be processed with the plugin.", "source": "juraj-google-style"}
{"code": "def fetch_credential(self, credential=None, profile=None):\n        \n        q = self.db.get(self.query.profile == profile)\n        if q is not None:\n            return q.get(credential)", "docstring": "Fetch credential from credentials file.\n\nArgs:\ncredential (str): Credential to fetch.\nprofile (str): Credentials profile. Defaults to ``'default'``.\n\nReturns:\nstr, None: Fetched credential or ``None``.", "source": "juraj-google-style"}
{"code": "def fulfill_transaction(transaction, *, private_keys):\n    if (not isinstance(private_keys, (list, tuple))):\n        private_keys = [private_keys]\n    if isinstance(private_keys, tuple):\n        private_keys = list(private_keys)\n    transaction_obj = Transaction.from_dict(transaction)\n    try:\n        signed_transaction = transaction_obj.sign(private_keys)\n    except KeypairMismatchException as exc:\n        raise MissingPrivateKeyError('A private key is missing!') from exc\n    return signed_transaction.to_dict()", "docstring": "Fulfills the given transaction.\n\nArgs:\ntransaction (dict): The transaction to be fulfilled.\nprivate_keys (:obj:`str` | :obj:`list` | :obj:`tuple`): One or\nmore private keys to be used for fulfilling the\ntransaction.\n\nReturns:\ndict: The fulfilled transaction payload, ready to be sent to a\nBigchainDB federation.\n\nRaises:\n:exc:`~.exceptions.MissingPrivateKeyError`: If a private\nkey is missing.", "source": "codesearchnet"}
{"code": "def __init__(self, app):\n        \n\n        super(SendgridEmailAdapter, self).__init__(app)\n\n        sendgrid_api_key = app.config.get('SENDGRID_API_KEY')\n        if not sendgrid_api_key:\n            raise ConfigError(\n                \"The SENDGRID_API_KEY setting is missing. Set SENDGRID_API_KEY in your app config.\")\n\n        \n        try:\n            from sendgrid import SendGridAPIClient\n            self.sg = SendGridAPIClient(apikey=sendgrid_api_key)\n        except ImportError:\n            raise ConfigError(SENDGRID_IMPORT_ERROR_MESSAGE)", "docstring": "Check config settings and setup SendGrid Web API v3.\n\nArgs:\napp(Flask): The Flask application instance.", "source": "juraj-google-style"}
{"code": "def doc2id(self, doc):\n    doc = map(self.process_token, doc)\n    return [self.token_to_id(token) for token in doc]", "docstring": "Get the list of token_id given doc.\n\nArgs:\ndoc (list): document.\n\nReturns:\nlist: int id of doc.", "source": "codesearchnet"}
{"code": "def call(self, func, key, timeout=None):\n    result = self.get(key)\n    if (result == NONE_RESULT):\n        return None\n    if (result is None):\n        result = func()\n        self.set(key, (result if (result is not None) else NONE_RESULT), timeout)\n    return result", "docstring": "Wraps a function call with cache.\n\nArgs:\nfunc (function): the function to call.\nkey (str): the cache key for this call.\ntimeout (int): the cache timeout for the key (the\nunit of this parameter depends on\nthe cache class you use, for example,\nif you use the classes from werkzeug,\nthen timeout is in seconds.)\n\nReturns:\nThe return value of calling func", "source": "codesearchnet"}
{"code": "def compute_f(match_num, test_num, gold_num):\n    \n    if test_num == 0 or gold_num == 0:\n        return 0.00, 0.00, 0.00\n    precision = float(match_num) / float(test_num)\n    recall = float(match_num) / float(gold_num)\n    if (precision + recall) != 0:\n        f_score = 2 * precision * recall / (precision + recall)\n        if veryVerbose:\n            print(\"F-score:\", f_score, file=DEBUG_LOG)\n        return precision, recall, f_score\n    else:\n        if veryVerbose:\n            print(\"F-score:\", \"0.0\", file=DEBUG_LOG)\n        return precision, recall, 0.00", "docstring": "Compute the f-score based on the matching triple number,\ntriple number of AMR set 1,\ntriple number of AMR set 2\nArgs:\nmatch_num: matching triple number\ntest_num:  triple number of AMR 1 (test file)\ngold_num:  triple number of AMR 2 (gold file)\nReturns:\nprecision: match_num/test_num\nrecall: match_num/gold_num\nf_score: 2*precision*recall/(precision+recall)", "source": "juraj-google-style"}
{"code": "def assert_split_at_fraction_fails(source, num_items_to_read_before_split, split_fraction):\n    assert_split_at_fraction_behavior(source, num_items_to_read_before_split, split_fraction, ExpectedSplitOutcome.MUST_FAIL)", "docstring": "Asserts that dynamic work rebalancing at a given fraction fails.\n\nAsserts that trying to perform dynamic splitting after reading\n'num_items_to_read_before_split' items from the source fails.\n\nArgs:\nsource: source to perform dynamic splitting on.\nnum_items_to_read_before_split: number of items to read before splitting.\nsplit_fraction: fraction to split at.", "source": "github-repos"}
{"code": "def NewFromJSON(data):\n    return Comment(body=data.get('body', None), posted_at=data.get('posted_at', None), user=User.NewFromJSON(data.get('user', None)))", "docstring": "Create a new Comment instance from a JSON dict.\n\nArgs:\ndata (dict): JSON dictionary representing a Comment.\n\nReturns:\nA Comment instance.", "source": "codesearchnet"}
{"code": "def make_grid_texture(num_h_lines=10, num_v_lines=10, resolution=50):\n    (x_h, y_h) = make_lines_texture(num_h_lines, resolution)\n    (y_v, x_v) = make_lines_texture(num_v_lines, resolution)\n    return (np.concatenate([x_h, x_v]), np.concatenate([y_h, y_v]))", "docstring": "Makes a texture consisting of a grid of vertical and horizontal lines.\n\nArgs:\nnum_h_lines (int): the number of horizontal lines to draw\nnum_v_lines (int): the number of vertical lines to draw\nresolution (int): the number of midpoints to draw on each line\n\nReturns:\nA texture.", "source": "codesearchnet"}
{"code": "def format_cert_name(env='', account='', region='', certificate=None):\n    cert_name = None\n    if certificate:\n        if certificate.startswith('arn'):\n            LOG.info('Full ARN provided...skipping lookup.')\n            cert_name = certificate\n        else:\n            generated_cert_name = generate_custom_cert_name(env, region, account, certificate)\n            if generated_cert_name:\n                LOG.info('Found generated certificate %s from template', generated_cert_name)\n                cert_name = generated_cert_name\n            else:\n                LOG.info('Using default certificate name logic')\n                cert_name = 'arn:aws:iam::{account}:server-certificate/{name}'.format(account=account, name=certificate)\n    LOG.debug('Certificate name: %s', cert_name)\n    return cert_name", "docstring": "Format the SSL certificate name into ARN for ELB.\n\nArgs:\nenv (str): Account environment name\naccount (str): Account number for ARN\nregion (str): AWS Region.\ncertificate (str): Name of SSL certificate\n\nReturns:\nstr: Fully qualified ARN for SSL certificate\nNone: Certificate is not desired", "source": "codesearchnet"}
{"code": "def run_display_app_errors(self, err):\n        \n        if err is not None and err:\n            for e_ in err.decode('utf-8').split('\\n'):\n                print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, e_))\n                self.log.error('[tcrun] App error: {}'.format(e_))", "docstring": "Handle the exit code for the current run.\n\nArgs:\nerr (str): One or more lines of errors messages.", "source": "juraj-google-style"}
{"code": "def convertTime(self, time):\n    m_format = ''\n    if time.minute:\n        m_format = ':%M'\n    timeString = time.strftime((('%I' + m_format) + ' %p'))\n    if (not int(timeString[0])):\n        timeString = timeString[1:]\n    return timeString", "docstring": "Convert a datetime object representing a time into a human-ready\nstring that can be read, spoken aloud, etc.\n\nArgs:\ntime (datetime.date): A datetime object to be converted into text.\n\nReturns:\nA string representation of the input time, ignoring any day-related\ninformation.", "source": "codesearchnet"}
{"code": "def _decorate_ast_reference_data_types(node: _ast.AbstractSyntaxTree) -> None:\n    if isinstance(node, _ast.Identifier) and node.value == 'reference':\n        node.data_type = unittest.mock.Mock(spec=_fhir_path_data_types.StructureDataType, element_type='Reference')\n    for child in node.children or ():\n        _decorate_ast_reference_data_types(child)", "docstring": "Adds data types for reference nodes.\n\nSets the data_type for any identifier node named 'reference' to that of a\nReference type.\n\nArgs:\nnode: The root node of the AST to modify.", "source": "github-repos"}
{"code": "def _use_cache(self, key, options=None):\n    flag = ContextOptions.use_cache(options)\n    if (flag is None):\n        flag = self._cache_policy(key)\n    if (flag is None):\n        flag = ContextOptions.use_cache(self._conn.config)\n    if (flag is None):\n        flag = True\n    return flag", "docstring": "Return whether to use the context cache for this key.\n\nArgs:\nkey: Key instance.\noptions: ContextOptions instance, or None.\n\nReturns:\nTrue if the key should be cached, False otherwise.", "source": "codesearchnet"}
{"code": "def less(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return Less().symbolic_call(x1, x2)\n    return backend.numpy.less(x1, x2)", "docstring": "Return the truth value of `x1 < x2` element-wise.\n\nArgs:\nx1: First input tensor.\nx2: Second input tensor.\n\nReturns:\nOutput tensor, element-wise comparison of `x1` and `x2`.", "source": "github-repos"}
{"code": "def gather(values, index, name='segmented_gather'):\n    return tf.gather(values, index.indices, batch_dims=index.batch_dims, name=name)", "docstring": "Gathers from `values` using the index map. For each element in the domain of the index map this operation looks up\na value for that index in `values`. Two elements from the same segment always get assigned the same value.\n\nArgs:\nvalues: [B1, ..., Bn, num_segments, V1, ...] Tensor with segment values.\nindex: [B1, ..., Bn, I1, ..., Ik] IndexMap.\nname: Name for the TensorFlow operation.\n\nReturns:\n[B1, ..., Bn, I1, ..., Ik, V1, ...] Tensor with the gathered values.", "source": "github-repos"}
{"code": "def keep_file(self, task, response, min_size=None, max_size=None):\n    try:\n        img = Image.open(BytesIO(response.content))\n    except (IOError, OSError):\n        return False\n    task['img_size'] = img.size\n    if (min_size and (not self._size_gt(img.size, min_size))):\n        return False\n    if (max_size and (not self._size_lt(img.size, max_size))):\n        return False\n    return True", "docstring": "Decide whether to keep the image\n\nCompare image size with ``min_size`` and ``max_size`` to decide.\n\nArgs:\nresponse (Response): response of requests.\nmin_size (tuple or None): minimum size of required images.\nmax_size (tuple or None): maximum size of required images.\nReturns:\nbool: whether to keep the image.", "source": "codesearchnet"}
{"code": "def MapByteStream(\n      self, byte_stream, byte_offset=0, context=None, **unused_kwargs):\n    \n    context_state = getattr(context, 'state', {})\n\n    size_hints = context_state.get('size_hints', {})\n\n    elements_data_size = self._CalculateElementsDataSize(context)\n    if elements_data_size is not None:\n      self._CheckByteStreamSize(byte_stream, byte_offset, elements_data_size)\n\n    elif not self._HasElementsTerminator():\n      raise errors.MappingError(\n          'Unable to determine elements data size and missing elements '\n          'terminator')\n\n    else:\n      byte_stream_size = len(byte_stream)\n\n      element_byte_size = self._element_data_type_definition.GetByteSize()\n      elements_data_offset = byte_offset\n      next_elements_data_offset = elements_data_offset + element_byte_size\n\n      elements_terminator = self._data_type_definition.elements_terminator\n      element_value = byte_stream[\n          elements_data_offset:next_elements_data_offset]\n\n      while byte_stream[elements_data_offset:]:\n        elements_data_offset = next_elements_data_offset\n        if element_value == elements_terminator:\n          elements_data_size = elements_data_offset - byte_offset\n          break\n\n        next_elements_data_offset += element_byte_size\n        element_value = byte_stream[\n            elements_data_offset:next_elements_data_offset]\n\n      if element_value != elements_terminator:\n        size_hints[self._data_type_definition.name] = DataTypeMapSizeHint(\n            byte_stream_size - byte_offset)\n\n        context_state['size_hints'] = size_hints\n\n        error_string = (\n            'Unable to read: {0:s} from byte stream at offset: {1:d} '\n            'with error: unable to find elements terminator').format(\n                self._data_type_definition.name, byte_offset)\n        raise errors.ByteStreamTooSmallError(error_string)\n\n    if context:\n      context.byte_size = elements_data_size\n\n      size_hints[self._data_type_definition.name] = DataTypeMapSizeHint(\n          elements_data_size, is_complete=True)\n\n      context_state['size_hints'] = size_hints\n\n    return byte_stream[byte_offset:byte_offset + elements_data_size]", "docstring": "Maps the data type on a byte stream.\n\nArgs:\nbyte_stream (bytes): byte stream.\nbyte_offset (Optional[int]): offset into the byte stream where to start.\ncontext (Optional[DataTypeMapContext]): data type map context.\n\nReturns:\ntuple[object, ...]: mapped values.\n\nRaises:\nMappingError: if the data type definition cannot be mapped on\nthe byte stream.", "source": "juraj-google-style"}
{"code": "def _fqdn(o, oset=True, recheck=False, pmodule=None):\n    \n    if id(o) in _set_failures or o is None:\n        return None\n    \n    if recheck or not _safe_hasattr(o, \"__fqdn__\"):\n        import inspect\n        if not hasattr(o, \"__name__\"):\n            msg.warn(\"Skipped object {}: no __name__ attribute.\".format(o), 3)\n            return\n        \n        result = None\n        if hasattr(o, \"__acornext__\") and o.__acornext__ is not None:\n            otarget = o.__acornext__\n        else:\n            otarget = o\n            \n        omod = _safe_getmodule(otarget) or pmodule\n        if (omod is None and hasattr(otarget, \"__objclass__\") and\n            otarget.__objclass__ is not None): \n            omod = _safe_getmodule(otarget.__objclass__)\n            parts = (\"<unknown>\" if omod is None else omod.__name__,\n                     otarget.__objclass__.__name__,\n                     otarget.__name__)\n            \n            result = \"{}.{}.{}\".format(*parts)\n        elif (omod is None and hasattr(otarget, \"__class__\") and\n              otarget.__class__ is not None):\n            omod = _safe_getmodule(otarget.__class__)\n            parts = (\"<unknown>\" if omod is None else omod.__name__,\n                     otarget.__class__.__name__,\n                     otarget.__name__)\n            \n            result = \"{}.{}.{}\".format(*parts)\n        elif omod is not otarget:\n            parts = (_fqdn(omod, False), otarget.__name__)\n            \n            result = \"{}.{}\".format(*parts)\n        else:\n            result = otarget.__name__\n\n        if oset:\n            _safe_setattr(o, \"__fqdn__\", result)\n        return result\n\n    if _safe_hasattr(o, \"__fqdn__\"):\n        return o.__fqdn__", "docstring": "Returns the fully qualified name of the object.\n\nArgs:\no (type): instance of the object's type.\noset (bool): when True, the fqdn will also be set on the object as attribute\n`__fqdn__`.\nrecheck (bool): for sub-classes, sometimes the super class has already had\nits __fqdn__ attribute set; in that case, we want to recheck the\nobject's name. This usually only gets used during object extension.", "source": "juraj-google-style"}
{"code": "def __init__(self, locations=None, separation_char=os.sep):\n        \n        super(LocationDescriptor, self).__init__()\n\n        self._separation_char = separation_char\n\n        \n        if isinstance(locations, list):\n            self._locations_list = list(locations)\n        elif isinstance(locations, str) or isinstance(locations, unicode):\n            self._locations_list = locations.split(self._separation_char)\n        elif locations is None:\n            self._locations_list = list()\n        else:\n            raise TypeError(\"Argument in constructor not recognized.\")", "docstring": "Constructor.\n\nArgs:\nlocations: Can be either a string with sub-strings joined by the separation character or a list of strings,\neach giving a location.\nseparation_char: Separation character in the location string.\n\nRaises:\nTypeError: if argument is not recognized as either a string, a list of strings or ``None``.\n\nNotes:\nEmpty :class:`LocationDescriptor`s **are** allowed and empty locations are also allowed.", "source": "juraj-google-style"}
{"code": "def failure_message(description, options):\n    message = 'expected to find {}'.format(description)\n    if (options['count'] is not None):\n        message += ' {count} {times}'.format(count=options['count'], times=declension('time', 'times', options['count']))\n    elif (options['between'] is not None):\n        between = options['between']\n        if between:\n            (first, last) = (between[0], between[(- 1)])\n        else:\n            (first, last) = (None, None)\n        message += ' between {first} and {last} times'.format(first=first, last=last)\n    elif (options['maximum'] is not None):\n        message += ' at most {maximum} {times}'.format(maximum=options['maximum'], times=declension('time', 'times', options['maximum']))\n    elif (options['minimum'] is not None):\n        message += ' at least {minimum} {times}'.format(minimum=options['minimum'], times=declension('time', 'times', options['minimum']))\n    return message", "docstring": "Returns a expectation failure message for the given query description.\n\nArgs:\ndescription (str): A description of the failed query.\noptions (Dict[str, Any]): The query options.\n\nReturns:\nstr: A message describing the failure.", "source": "codesearchnet"}
{"code": "def find_response_component(self, api_id=None, signature_id=None):\n        \n        if not api_id and not signature_id:\n            raise ValueError('At least one of api_id and signature_id is required')\n\n        components = list()\n        \n        if self.response_data:\n            for component in self.response_data:\n                if (api_id and component['api_id']) == api_id or (signature_id and component['signature_id'] == signature_id):\n                    components.append(component)\n\n        return components", "docstring": "Find one or many repsonse components.\n\nArgs:\n\napi_id (str):           Api id associated with the component(s) to be retrieved.\n\nsignature_id (str):     Signature id associated with the component(s) to be retrieved.\n\nReturns:\nA list of dictionaries containing component data", "source": "juraj-google-style"}
{"code": "class ZoeDepthFeatureFusionLayer(nn.Module):\n\n    def __init__(self, config, align_corners=True):\n        super().__init__()\n        self.align_corners = align_corners\n        self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True)\n        self.residual_layer1 = ZoeDepthPreActResidualLayer(config)\n        self.residual_layer2 = ZoeDepthPreActResidualLayer(config)\n\n    def forward(self, hidden_state, residual=None):\n        if residual is not None:\n            if hidden_state.shape != residual.shape:\n                residual = nn.functional.interpolate(residual, size=(hidden_state.shape[2], hidden_state.shape[3]), mode='bilinear', align_corners=False)\n            hidden_state = hidden_state + self.residual_layer1(residual)\n        hidden_state = self.residual_layer2(hidden_state)\n        hidden_state = nn.functional.interpolate(hidden_state, scale_factor=2, mode='bilinear', align_corners=self.align_corners)\n        hidden_state = self.projection(hidden_state)\n        return hidden_state", "docstring": "Feature fusion layer, merges feature maps from different stages.\n\nArgs:\nconfig (`[ZoeDepthConfig]`):\nModel configuration class defining the model architecture.\nalign_corners (`bool`, *optional*, defaults to `True`):\nThe align_corner setting for bilinear upsample.", "source": "github-repos"}
{"code": "def call_backend(self, orig_request, start_response):\n    \n    method_config, params = self.lookup_rest_method(orig_request)\n    if not method_config:\n      cors_handler = self._create_cors_handler(orig_request)\n      return util.send_wsgi_not_found_response(start_response,\n                                               cors_handler=cors_handler)\n\n    \n    transformed_request = self.transform_request(\n        orig_request, params, method_config)\n\n    \n    \n    discovery = discovery_service.DiscoveryService(\n        self.config_manager, self._backend)\n    discovery_response = discovery.handle_discovery_request(\n        transformed_request.path, transformed_request, start_response)\n    if discovery_response:\n      return discovery_response\n\n    url = transformed_request.base_path + transformed_request.path\n    transformed_request.headers['Content-Type'] = 'application/json'\n    transformed_environ = self.prepare_backend_environ(\n        orig_request.server, 'POST', url, transformed_request.headers.items(),\n        transformed_request.body, transformed_request.source_ip,\n        orig_request.port)\n\n    \n    with util.StartResponseProxy() as start_response_proxy:\n      body_iter = self._backend(transformed_environ, start_response_proxy.Proxy)\n      status = start_response_proxy.response_status\n      headers = start_response_proxy.response_headers\n\n      \n      body = start_response_proxy.response_body\n      \n      if not body:\n        body = ''.join(body_iter)\n\n    return self.handle_backend_response(orig_request, transformed_request,\n                                        status, headers, body, method_config,\n                                        start_response)", "docstring": "Generate API call (from earlier-saved request).\n\nThis calls start_response and returns the response body.\n\nArgs:\norig_request: An ApiRequest, the original request from the user.\nstart_response: A function with semantics defined in PEP-333.\n\nReturns:\nA string containing the response body.", "source": "juraj-google-style"}
{"code": "def wrap_with_monitor(env, video_dir):\n  \n  env = ExtendToEvenDimentions(env)\n  env = RenderObservations(env)  \n  env = gym.wrappers.Monitor(env, video_dir, force=True,\n                             video_callable=lambda idx: True,\n                             write_upon_reset=True)\n  return env", "docstring": "Wrap environment with gym.Monitor.\n\nVideo recording provided by Monitor requires\n1) both height and width of observation to be even numbers.\n2) rendering of environment\n\nArgs:\nenv: environment.\nvideo_dir: video directory.\n\nReturns:\nwrapped environment.", "source": "juraj-google-style"}
{"code": "def getQueryEngineDescription(self, queryEngine, **kwargs):\n        \n        response = self.getQueryEngineDescriptionResponse(queryEngine, **kwargs)\n        return self._read_dataone_type_response(response, 'QueryEngineDescription')", "docstring": "See Also: getQueryEngineDescriptionResponse()\n\nArgs:\nqueryEngine:\n**kwargs:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def run_suite(test_classes, argv=None):\n    \n    \n    parser = argparse.ArgumentParser(description='Mobly Suite Executable.')\n    parser.add_argument(\n        '-c',\n        '--config',\n        nargs=1,\n        type=str,\n        required=True,\n        metavar='<PATH>',\n        help='Path to the test configuration file.')\n    parser.add_argument(\n        '--tests',\n        '--test_case',\n        nargs='+',\n        type=str,\n        metavar='[ClassA[.test_a] ClassB[.test_b] ...]',\n        help='A list of test classes and optional tests to execute.')\n    if not argv:\n        argv = sys.argv[1:]\n    args = parser.parse_args(argv)\n    \n    test_configs = config_parser.load_test_config_file(args.config[0])\n\n    \n    for test_class in test_classes:\n        if not issubclass(test_class, base_test.BaseTestClass):\n            logging.error('Test class %s does not extend '\n                          'mobly.base_test.BaseTestClass', test_class)\n            sys.exit(1)\n\n    \n    selected_tests = compute_selected_tests(test_classes, args.tests)\n\n    \n    ok = True\n    for config in test_configs:\n        runner = test_runner.TestRunner(config.log_path, config.test_bed_name)\n        for (test_class, tests) in selected_tests.items():\n            runner.add_test_class(config, test_class, tests)\n        try:\n            runner.run()\n            ok = runner.results.is_all_pass and ok\n        except signals.TestAbortAll:\n            pass\n        except:\n            logging.exception('Exception when executing %s.',\n                              config.test_bed_name)\n            ok = False\n    if not ok:\n        sys.exit(1)", "docstring": "Executes multiple test classes as a suite.\n\nThis is the default entry point for running a test suite script file\ndirectly.\n\nArgs:\ntest_classes: List of python classes containing Mobly tests.\nargv: A list that is then parsed as cli args. If None, defaults to cli\ninput.", "source": "juraj-google-style"}
{"code": "def update(self, value):\n    with tf.name_scope((self._name + '/update')):\n        if (value.shape.ndims == self._mean.shape.ndims):\n            value = value[(None, ...)]\n        count = tf.shape(value)[0]\n        with tf.control_dependencies([self._count.assign_add(count)]):\n            step = tf.cast(self._count, tf.float32)\n            mean_delta = tf.reduce_sum((value - self._mean[(None, ...)]), 0)\n            new_mean = (self._mean + (mean_delta / step))\n            new_mean = tf.cond((self._count > 1), (lambda : new_mean), (lambda : value[0]))\n            var_delta = ((value - self._mean[(None, ...)]) * (value - new_mean[(None, ...)]))\n            new_var_sum = (self._var_sum + tf.reduce_sum(var_delta, 0))\n        with tf.control_dependencies([new_mean, new_var_sum]):\n            update = (self._mean.assign(new_mean), self._var_sum.assign(new_var_sum))\n        with tf.control_dependencies(update):\n            if (value.shape.ndims == 1):\n                value = tf.reduce_mean(value)\n            return self._summary('value', tf.reduce_mean(value))", "docstring": "Update the mean and variance estimates.\n\nArgs:\nvalue: Batch or single value tensor.\n\nReturns:\nSummary tensor.", "source": "codesearchnet"}
{"code": "def process(self, element):\n    \n    import apache_beam as beam\n    import six\n    import tensorflow as tf\n\n    \n    \n    tf.logging.set_verbosity(tf.logging.ERROR)\n    try:\n      clean_element = []\n      for line in element:\n        clean_element.append(line.rstrip())\n\n      \n      batch_result = self._session.run(\n          fetches=self._transformed_features,\n          feed_dict={self._input_placeholder_tensor: clean_element})\n\n      \n      \n      \n      \n      \n      \n\n      \n      for i in range(len(clean_element)):\n        transformed_features = {}\n        for name, value in six.iteritems(batch_result):\n          if isinstance(value, tf.SparseTensorValue):\n            batch_i_indices = value.indices[:, 0] == i\n            batch_i_values = value.values[batch_i_indices]\n            transformed_features[name] = batch_i_values.tolist()\n          else:\n            transformed_features[name] = value[i].tolist()\n\n        yield transformed_features\n\n    except Exception as e:  \n      yield beam.pvalue.TaggedOutput('errors', (str(e), element))", "docstring": "Run the transformation graph on batched input data\n\nArgs:\nelement: list of csv strings, representing one batch input to the TF graph.\n\nReturns:\ndict containing the transformed data. Results are un-batched. Sparse\ntensors are converted to lists.", "source": "juraj-google-style"}
{"code": "def GetDecompressor(cls, compression_method):\n    compression_method = compression_method.lower()\n    decompressor = cls._decompressors.get(compression_method, None)\n    if (not decompressor):\n        return None\n    return decompressor()", "docstring": "Retrieves the decompressor object for a specific compression method.\n\nArgs:\ncompression_method (str): compression method identifier.\n\nReturns:\nDecompressor: decompressor or None if the compression method does\nnot exists.", "source": "codesearchnet"}
{"code": "def save_data(self, filename):\n    with zopen(filename, 'wt') as f:\n        json.dump(list(self._data), f, cls=MontyEncoder)", "docstring": "Save the assimilated data to a file.\n\nArgs:\nfilename (str): filename to save the assimilated data to. Note\nthat if the filename ends with gz or bz2, the relevant gzip\nor bz2 compression will be applied.", "source": "codesearchnet"}
{"code": "def SetInputSourceConfiguration(self, configuration):\n    mount_path = configuration.mount_path\n    if (mount_path and mount_path.endswith(os.sep)):\n        mount_path = mount_path[:(- 1)]\n    self._mount_path = mount_path", "docstring": "Sets the input source configuration settings.\n\nArgs:\nconfiguration (InputSourceConfiguration): input source configuration.", "source": "codesearchnet"}
{"code": "def init_from_adversarial_batches_write_to_datastore(self, submissions,\n                                                       adv_batches):\n    \n    \n    idx = 0\n    for s_id in iterkeys(submissions.defenses):\n      for adv_id in iterkeys(adv_batches.data):\n        class_batch_id = CLASSIFICATION_BATCH_ID_PATTERN.format(idx)\n        idx += 1\n        self.data[class_batch_id] = {\n            'adversarial_batch_id': adv_id,\n            'submission_id': s_id,\n            'result_path': os.path.join(\n                self._round_name,\n                CLASSIFICATION_BATCHES_SUBDIR,\n                s_id + '_' + adv_id + '.csv')\n        }\n    \n    client = self._datastore_client\n    with client.no_transact_batch() as batch:\n      for key, value in iteritems(self.data):\n        entity = client.entity(client.key(KIND_CLASSIFICATION_BATCH, key))\n        entity.update(value)\n        batch.put(entity)", "docstring": "Populates data from adversarial batches and writes to datastore.\n\nArgs:\nsubmissions: instance of CompetitionSubmissions\nadv_batches: instance of AversarialBatches", "source": "juraj-google-style"}
{"code": "def _zeo_key(self, key, new_type=OOBTree):\n        \n        zeo_key = self.zeo.get(key, None)\n\n        if zeo_key is None:\n            zeo_key = new_type()\n            self.zeo[key] = zeo_key\n\n        return zeo_key", "docstring": "Get key from the :attr:`zeo` database root. If the key doesn't exist,\ncreate it by calling `new_type` argument.\n\nArgs:\nkey (str): Key in the root dict.\nnew_type (func/obj): Object/function returning the new instance.\n\nReturns:\nobj: Stored object, or `new_type`.", "source": "juraj-google-style"}
{"code": "def reindex_similar(self, other, n_sphere=4):\n\n    def make_subset_similar(m1, subset1, m2, subset2, index_dct):\n        'Changes index_dct INPLACE'\n        coords = ['x', 'y', 'z']\n        index1 = list(subset1)\n        for m1_i in index1:\n            dist_m2_to_m1_i = m2.get_distance_to(m1.loc[(m1_i, coords)], subset2, sort=True)\n            m2_i = dist_m2_to_m1_i.index[0]\n            dist_new = dist_m2_to_m1_i.loc[(m2_i, 'distance')]\n            m2_pos_i = dist_m2_to_m1_i.loc[(m2_i, coords)]\n            counter = itertools.count()\n            found = False\n            while (not found):\n                if (m2_i in index_dct.keys()):\n                    old_m1_pos = m1.loc[(index_dct[m2_i], coords)]\n                    if (dist_new < np.linalg.norm((m2_pos_i - old_m1_pos))):\n                        index1.append(index_dct[m2_i])\n                        index_dct[m2_i] = m1_i\n                        found = True\n                    else:\n                        m2_i = dist_m2_to_m1_i.index[next(counter)]\n                        dist_new = dist_m2_to_m1_i.loc[(m2_i, 'distance')]\n                        m2_pos_i = dist_m2_to_m1_i.loc[(m2_i, coords)]\n                else:\n                    index_dct[m2_i] = m1_i\n                    found = True\n        return index_dct\n    molecule1 = self.copy()\n    molecule2 = other.copy()\n    partition1 = molecule1.partition_chem_env(n_sphere)\n    partition2 = molecule2.partition_chem_env(n_sphere)\n    index_dct = {}\n    for key in partition1:\n        message = 'You have chemically different molecules, regarding the topology of their connectivity.'\n        assert (len(partition1[key]) == len(partition2[key])), message\n        index_dct = make_subset_similar(molecule1, partition1[key], molecule2, partition2[key], index_dct)\n    molecule2.index = [index_dct[i] for i in molecule2.index]\n    return molecule2.loc[molecule1.index]", "docstring": "Reindex ``other`` to be similarly indexed as ``self``.\n\nReturns a reindexed copy of ``other`` that minimizes the\ndistance for each atom to itself in the same chemical environemt\nfrom ``self`` to ``other``.\nRead more about the definition of the chemical environment in\n:func:`Cartesian.partition_chem_env`\n\n.. note:: It is necessary to align ``self`` and other before\napplying this method.\nThis can be done via :meth:`~Cartesian.align`.\n\n.. note:: It is probably necessary to improve the result using\n:meth:`~Cartesian.change_numbering()`.\n\nArgs:\nother (Cartesian):\nn_sphere (int): Wrapper around the argument for\n:meth:`~Cartesian.partition_chem_env`.\n\nReturns:\nCartesian: Reindexed version of other", "source": "codesearchnet"}
{"code": "def exportGurobiModel(self, gurobiDriver='gurobi', verbose=False):\n        \n        from gurobipy import GRB, read\n        from tempfile import mkdtemp\n        from shutil import rmtree\n        from os import path\n        import sys\n        if (sys.version_info > (3, 0)):\n            from io import StringIO\n        else:\n            from io import BytesIO as StringIO\n        tmp_dir = mkdtemp()\n        model_file = path.join(tmp_dir, 'model.mps')\n\n        previous = {\n            'solver': self.getOption('solver') or '',\n            'gurobi_auxfiles': self.getOption('auxfiles') or '',\n            'gurobi_options': self.getOption('gurobi_options') or '',\n        }\n        temporary = {\n            'solver': gurobiDriver,\n            'gurobi_auxfiles': 'rc',\n            'gurobi_options': .format(model_file)\n        }\n\n        for option in temporary:\n            self.setOption(option, temporary[option])\n\n        output = self.getOutput('solve;')\n        if not path.isfile(model_file):\n            raise RuntimeError(output)\n\n        for option in previous:\n            self.setOption(option, previous[option])\n\n        text_trap = StringIO()\n        stdout = sys.stdout\n        sys.stdout = text_trap\n        model = read(model_file)\n        sys.stdout = stdout\n        if verbose:\n            print(text_trap.getvalue())\n        if model_file.endswith('.mps'):\n            if not self.getCurrentObjective().minimization():\n                model.ModelSense = GRB.MAXIMIZE\n                model.setObjective(- model.getObjective())\n        model.update()\n        rmtree(tmp_dir)\n        return model", "docstring": "Export the model to Gurobi as a gurobipy.Model object.\n\nArgs:\ngurobiDriver: The name or the path of the Gurobi solver driver.\nverbose: Whether should generate verbose output.\n\nReturns:\nA :class:`gurobipy.Model` object with the model loaded.", "source": "juraj-google-style"}
{"code": "def write_config_file(config_instance, appdirs=DEFAULT_APPDIRS, file_name=DEFAULT_CONFIG_FILENAME):\n    path = get_config_path(appdirs, file_name)\n    with open(path, 'w') as fobj:\n        config_instance.write(fobj)\n    return config_instance", "docstring": "Write a ConfigParser instance to file at the correct location.\n\nArgs:\nconfig_instance: Config instance to safe to file.\nappdirs (HamsterAppDirs, optional): ``HamsterAppDirs`` instance storing app/user specific\npath information.\nfile_name (text_type, optional): Name of the config file. Defaults to\n``DEFAULT_CONFIG_FILENAME``.\n\nReturns:\nSafeConfigParser: Instance written to file.", "source": "codesearchnet"}
{"code": "def url_assembler(query_string, no_redirect=0, no_html=0, skip_disambig=0):\n    params = [('q', query_string.encode('utf-8')), ('format', 'json')]\n    if no_redirect:\n        params.append(('no_redirect', 1))\n    if no_html:\n        params.append(('no_html', 1))\n    if skip_disambig:\n        params.append(('skip_disambig', 1))\n    return ('/?' + urlencode(params))", "docstring": "Assembler of parameters for building request query.\n\nArgs:\nquery_string: Query to be passed to DuckDuckGo API.\nno_redirect: Skip HTTP redirects (for !bang commands). Default - False.\nno_html: Remove HTML from text, e.g. bold and italics. Default - False.\nskip_disambig: Skip disambiguation (D) Type. Default - False.\n\nReturns:\nA “percent-encoded” string which is used as a part of the query.", "source": "codesearchnet"}
{"code": "def submit_files(self, halt_on_error=True):\n        \n        \n        if self.halt_on_file_error is not None:\n            halt_on_error = self.halt_on_file_error\n\n        upload_status = []\n        for xid, content_data in self._files.items():\n            del self._files[xid]  \n            status = True\n\n            \n            if self.debug and xid in self.saved_xids:\n                self.tcex.log.debug('skipping previously saved file {}.'.format(xid))\n                continue\n\n            \n            content = content_data.get('fileContent')\n            if callable(content):\n                content = content_data.get('fileContent')(xid)\n            if content is None:\n                upload_status.append({'uploaded': False, 'xid': xid})\n                self.tcex.log.warning('File content was null for xid {}.'.format(xid))\n                continue\n            if content_data.get('type') == 'Document':\n                api_branch = 'documents'\n            elif content_data.get('type') == 'Report':\n                api_branch = 'reports'\n\n            \n            url = '/v2/groups/{}/{}/upload'.format(api_branch, xid)\n            headers = {'Content-Type': 'application/octet-stream'}\n            params = {'owner': self._owner}\n            r = self.submit_file_content('POST', url, content, headers, params, halt_on_error)\n            if r.status_code == 401:\n                \n                self.tcex.log.info('Received 401 status code using POST. Trying PUT to update.')\n                r = self.submit_file_content('PUT', url, content, headers, params, halt_on_error)\n            self.tcex.log.debug('{} Upload URL: {}.'.format(content_data.get('type'), r.url))\n            if not r.ok:\n                status = False\n                self.tcex.handle_error(585, [r.status_code, r.text], halt_on_error)\n            elif self.debug:\n                self.saved_xids.append(xid)\n            self.tcex.log.info('Status {} for file upload with xid {}.'.format(r.status_code, xid))\n            upload_status.append({'uploaded': status, 'xid': xid})\n        return upload_status", "docstring": "Submit Files for Documents and Reports to ThreatConnect API.\n\nCritical Errors\n\n* There is insufficient document storage allocated to this account.\n\nArgs:\nhalt_on_error (bool, default:True): If True any exception will raise an error.\n\nReturns:\ndict: The upload status for each xid.", "source": "juraj-google-style"}
{"code": "def _ParseFieldsMetadata(self, structure):\n    fields = structure.fields.split(' ')\n    log_line_structure = pyparsing.Empty()\n    if ((fields[0] == 'date') and (fields[1] == 'time')):\n        log_line_structure += self.DATE_TIME.setResultsName('date_time')\n        fields = fields[2:]\n    for member in fields:\n        log_line_structure += self._LOG_LINE_STRUCTURES.get(member, self.URI)\n    updated_structures = []\n    for line_structure in self._line_structures:\n        if (line_structure[0] != 'logline'):\n            updated_structures.append(line_structure)\n    updated_structures.append(('logline', log_line_structure))\n    self._line_structures = updated_structures", "docstring": "Parses the fields metadata and updates the log line definition to match.\n\nArgs:\nstructure (pyparsing.ParseResults): structure parsed from the log file.", "source": "codesearchnet"}
{"code": "def str_to_inet(address):\n    \n    \n    try:\n        return socket.inet_pton(socket.AF_INET, address)\n    except socket.error:\n        return socket.inet_pton(socket.AF_INET6, address)", "docstring": "Convert an a string IP address to a inet struct\n\nArgs:\naddress (str): String representation of address\nReturns:\ninet: Inet network address", "source": "juraj-google-style"}
{"code": "def torch_equals_ignore_index(tensor, tensor_other, ignore_index=None):\n    \n    if ignore_index is not None:\n        assert tensor.size() == tensor_other.size()\n        mask_arr = tensor.ne(ignore_index)\n        tensor = tensor.masked_select(mask_arr)\n        tensor_other = tensor_other.masked_select(mask_arr)\n\n    return torch.equal(tensor, tensor_other)", "docstring": "Compute ``torch.equal`` with the optional mask parameter.\n\nArgs:\nignore_index (int, optional): Specifies a ``tensor`` index that is ignored.\n\nReturns:\n(bool) Returns ``True`` if target and prediction are equal.", "source": "juraj-google-style"}
{"code": "def read(cls, five9, external_id):\n        \n        results = cls.search(five9, {cls.__uid_field__: external_id})\n        if not results:\n            return None\n        return results[0]", "docstring": "Return a record singleton for the ID.\n\nArgs:\nfive9 (five9.Five9): The authenticated Five9 remote.\nexternal_id (mixed): The identified on Five9. This should be the\nvalue that is in the ``__uid_field__`` field on the record.\n\nReturns:\nBaseModel: The record, if found. Otherwise ``None``", "source": "juraj-google-style"}
{"code": "def mach53(msg):\n    d = hex2bin(data(msg))\n    if (d[23] == '0'):\n        return None\n    mach = (bin2int(d[24:33]) * 0.008)\n    return round(mach, 3)", "docstring": "MACH number, DBS 5,3 message\n\nArgs:\nmsg (String): 28 bytes hexadecimal message\n\nReturns:\nfloat: MACH number", "source": "codesearchnet"}
{"code": "def build_avatar_url(jid):\n        \n        digest = md5(str(jid).encode(\"utf-8\")).hexdigest()\n        return \"http:", "docstring": "Static method to build a gravatar url with the agent's JID\n\nArgs:\njid (aioxmpp.JID): an XMPP identifier\n\nReturns:\nstr: an URL for the gravatar", "source": "juraj-google-style"}
{"code": "def shift_and_pad(tensor, shift, axis=0):\n  \n  shape = tensor.shape\n  rank = len(shape)\n  assert 0 <= abs(axis) < rank\n\n  length = int(shape[axis])\n  assert 0 <= abs(shift) < length\n\n  paddings = [(0, 0)] * rank\n  begin = [0] * rank\n  size = [-1] * rank\n\n  if shift > 0:\n    paddings[axis] = (shift, 0)\n    size[axis] = length - shift\n  elif shift < 0:\n    paddings[axis] = (0, -shift)\n    begin[axis] = -shift\n\n  ret = tf.pad(tf.slice(tensor, begin, size), paddings)\n\n  return ret", "docstring": "Shifts and pads with zero along an axis.\n\nExample:\nshift_and_pad([1, 2, 3, 4], 2)  --> [0, 0, 1, 2]\nshift_and_pad([1, 2, 3, 4], -2) --> [3, 4, 0, 0]\n\nArgs:\ntensor: Tensor; to be shifted and padded.\nshift: int; number of positions to shift by.\naxis: int; along which axis to shift and pad.\n\nReturns:\nA Tensor with the same shape as the input tensor.", "source": "juraj-google-style"}
{"code": "def _validate_paths(self, settings, name, value):\n        \n        return [self._validate_path(settings, name, item)\n                for item in value]", "docstring": "Apply ``SettingsPostProcessor._validate_path`` to each element in\nlist.\n\nArgs:\nsettings (dict): Current settings.\nname (str): Setting name.\nvalue (list): List of paths to patch.\n\nRaises:\nboussole.exceptions.SettingsInvalidError: Once a path does not\nexists.\n\nReturns:\nlist: Validated paths.", "source": "juraj-google-style"}
{"code": "def _unknown_args(self, args):\n        \n        for u in args:\n            self.tcex.log.warning(u'Unsupported arg found ({}).'.format(u))", "docstring": "Log argparser unknown arguments.\n\nArgs:\nargs (list): List of unknown arguments", "source": "juraj-google-style"}
{"code": "def InitPathInfos(self, client_id, path_infos):\n    \n    self.ClearPathHistory(client_id, path_infos)\n    self.WritePathInfos(client_id, path_infos)", "docstring": "Initializes a collection of path info records for a client.\n\nUnlike `WritePathInfo`, this method clears stat and hash histories of paths\nassociated with path info records. This method is intended to be used only\nin the data migration scripts.\n\nArgs:\nclient_id: A client identifier for which the paths are to be initialized.\npath_infos: A list of `rdf_objects.PathInfo` objects to write.", "source": "juraj-google-style"}
{"code": "async def find_user(cls, config: Config, user: str) -> Tuple[(str, str)]:\n    with open(config.users_file, 'r') as users_file:\n        for line in users_file:\n            (this_user, user_dir, password) = line.split(':', 2)\n            if (user == this_user):\n                return (password.rstrip('\\r\\n'), (user_dir or user))\n    raise InvalidAuth()", "docstring": "If the given user ID exists, return its expected password and\nmailbox path. Override this method to implement custom login logic.\n\nArgs:\nconfig: The maildir config object.\nuser: The expected user ID.\n\nRaises:\nInvalidAuth: The user ID was not valid.", "source": "codesearchnet"}
{"code": "def write_to_fil(self, filename_out, *args, **kwargs):\n        \n\n        \n        t0 = time.time()\n\n        \n        self.__update_header()\n\n        if self.container.isheavy():\n            self.__write_to_fil_heavy(filename_out)\n        else:\n            self.__write_to_fil_light(filename_out)\n\n        t1 = time.time()\n        logger.info('Conversion time: %2.2fsec' % (t1- t0))", "docstring": "Write data to .fil file.\nIt check the file size then decides how to write the file.\n\nArgs:\nfilename_out (str): Name of output file", "source": "juraj-google-style"}
{"code": "def AsDict(self, dt=True):\n        \n        data = {}\n        if self.name:\n            data['name'] = self.name\n            data['mlkshk_url'] = self.mlkshk_url\n        if self.profile_image_url:\n            data['profile_image_url'] = self.profile_image_url\n        if self.id:\n            data['id'] = self.id\n        if self.about:\n            data['about'] = self.about\n        if self.website:\n            data['website'] = self.website\n        if self.shakes:\n            data['shakes'] = [shk.AsDict(dt=dt) for shk in self.shakes]\n        data['shake_count'] = self.shake_count\n        return data", "docstring": "A dict representation of this User instance.\n\nThe return value uses the same key names as the JSON representation.\n\nArgs:\ndt (bool): If True, return dates as python datetime objects. If\nFalse, return dates as ISO strings.\n\nReturn:\nA dict representing this User instance", "source": "juraj-google-style"}
{"code": "def _use_cache(self, key, options=None):\n    \n    flag = ContextOptions.use_cache(options)\n    if flag is None:\n      flag = self._cache_policy(key)\n    if flag is None:\n      flag = ContextOptions.use_cache(self._conn.config)\n    if flag is None:\n      flag = True\n    return flag", "docstring": "Return whether to use the context cache for this key.\n\nArgs:\nkey: Key instance.\noptions: ContextOptions instance, or None.\n\nReturns:\nTrue if the key should be cached, False otherwise.", "source": "juraj-google-style"}
{"code": "def run_suite(test_classes, argv=None):\n    parser = argparse.ArgumentParser(description='Mobly Suite Executable.')\n    parser.add_argument('-c', '--config', nargs=1, type=str, required=True, metavar='<PATH>', help='Path to the test configuration file.')\n    parser.add_argument('--tests', '--test_case', nargs='+', type=str, metavar='[ClassA[.test_a] ClassB[.test_b] ...]', help='A list of test classes and optional tests to execute.')\n    if (not argv):\n        argv = sys.argv[1:]\n    args = parser.parse_args(argv)\n    test_configs = config_parser.load_test_config_file(args.config[0])\n    for test_class in test_classes:\n        if (not issubclass(test_class, base_test.BaseTestClass)):\n            logging.error('Test class %s does not extend mobly.base_test.BaseTestClass', test_class)\n            sys.exit(1)\n    selected_tests = compute_selected_tests(test_classes, args.tests)\n    ok = True\n    for config in test_configs:\n        runner = test_runner.TestRunner(config.log_path, config.test_bed_name)\n        for (test_class, tests) in selected_tests.items():\n            runner.add_test_class(config, test_class, tests)\n        try:\n            runner.run()\n            ok = (runner.results.is_all_pass and ok)\n        except signals.TestAbortAll:\n            pass\n        except:\n            logging.exception('Exception when executing %s.', config.test_bed_name)\n            ok = False\n    if (not ok):\n        sys.exit(1)", "docstring": "Executes multiple test classes as a suite.\n\nThis is the default entry point for running a test suite script file\ndirectly.\n\nArgs:\ntest_classes: List of python classes containing Mobly tests.\nargv: A list that is then parsed as cli args. If None, defaults to cli\ninput.", "source": "codesearchnet"}
{"code": "def __init__(self, export_dir):\n    self._export_dir = export_dir\n    self._saved_model = loader.parse_saved_model(export_dir)", "docstring": "Creates an MethodNameUpdater object.\n\nArgs:\nexport_dir: Directory containing the SavedModel files.\n\nRaises:\nIOError: If the saved model file does not exist, or cannot be successfully\nparsed.", "source": "github-repos"}
{"code": "def assert_cardinality(expected_cardinality):\n\n    def _apply_fn(dataset):\n        return _AssertCardinalityDataset(dataset, expected_cardinality)\n    return _apply_fn", "docstring": "Asserts the cardinality of the input dataset.\n\nNOTE: The following assumes that \"examples.tfrecord\" contains 42 records.\n\n>>> dataset = tf.data.TFRecordDataset(\"examples.tfrecord\")\n>>> cardinality = tf.data.experimental.cardinality(dataset)\n>>> print((cardinality == tf.data.experimental.UNKNOWN_CARDINALITY).numpy())\nTrue\n>>> dataset = dataset.apply(tf.data.experimental.assert_cardinality(42))\n>>> print(tf.data.experimental.cardinality(dataset).numpy())\n42\n\nArgs:\nexpected_cardinality: The expected cardinality of the input dataset.\n\nReturns:\nA `Dataset` transformation function, which can be passed to\n`tf.data.Dataset.apply`.\n\nRaises:\nFailedPreconditionError: The assertion is checked at runtime (when iterating\nthe dataset) and an error is raised if the actual and expected cardinality\ndiffer.", "source": "github-repos"}
{"code": "def __init__(self, app_or_name, registry=None):\n        \n        if isinstance(app_or_name, flask.Flask):\n            self.app = app_or_name\n        else:\n            \n            self.app = flask.Flask(app_or_name)\n            \n            \n            self.app.wsgi_app = ProxyFix(self.app.wsgi_app)\n\n        \n        self.blueprint = flask.Blueprint(\n            \"gourde\", __name__, template_folder=\"templates\"\n        )\n\n        self.host = \"0.0.0.0\"\n        self.port = 8080\n        self.debug = False\n        self.log_level = None\n        self.twisted = False\n        self.gunicorn = False\n        self.threads = None\n        self.metrics = None\n        self.is_setup = False\n\n        self.setup_blueprint()\n        self.setup_prometheus(registry)\n        self.setup_sentry(sentry_dsn=None)", "docstring": "Build a new Gourde.\n\nArgs:\nEither a flask.Flask or the name of the calling module.", "source": "juraj-google-style"}
{"code": "def dbmin05years(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type float '\n                                 'for field `dbmin05years`'.format(value))\n\n        self._dbmin05years = value", "docstring": "Corresponds to IDD Field `dbmin05years`\n5-year return period values for minimum extreme dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmin05years`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def write_hex(fout, buf, offset, width=16):\n    \n\n    skipped_zeroes = 0\n    for i, chunk in enumerate(chunk_iter(buf, width)):\n        \n        if chunk == (b\"\\x00\" * width):\n            skipped_zeroes += 1\n            continue\n        elif skipped_zeroes != 0:\n            fout.write(\"  -- skipped zeroes: {}\\n\".format(skipped_zeroes))\n            skipped_zeroes = 0\n\n        \n        fout.write(\"{:016x}  \".format(i * width + offset))\n\n        \n        column = \"  \".join([\" \".join([\"{:02x}\".format(c) for c in subchunk])\n                            for subchunk in chunk_iter(chunk, 8)])\n        w = width * 2 + (width - 1) + ((width \n        if len(column) != w:\n            column += \" \" * (w - len(column))\n        fout.write(column)\n\n        \n        fout.write(\"  |\")\n        for c in chunk:\n            if c in PRINTABLE_CHARS:\n                fout.write(chr(c))\n            else:\n                fout.write(\".\")\n        if len(chunk) < width:\n            fout.write(\" \" * (width - len(chunk)))\n        fout.write(\"|\")\n\n        fout.write(\"\\n\")", "docstring": "Write the content of 'buf' out in a hexdump style\n\nArgs:\nfout: file object to write to\nbuf: the buffer to be pretty printed\noffset: the starting offset of the buffer\nwidth: how many bytes should be displayed per row", "source": "juraj-google-style"}
{"code": "def parse_headers(obj):\n    \n    if isinstance(obj, basestring):\n        obj = cStringIO.StringIO(obj)\n    hdrs = []\n    for line in obj:\n        hdr = parse_header(line)\n        if not hdr:\n            break\n        if isinstance(hdr, basestring):\n            if not hdrs:\n                raise ValueError(\"First header is a continuation\")\n            hdrs[-1] = (hdrs[-1][0], hdrs[-1][1] + hdr)\n            continue\n        hdrs.append(hdr)\n    return iodict.IODict(hdrs)", "docstring": "Parse a string a iterable object (including file like objects) to a\npython dictionary.\n\nArgs:\nobj: An iterable object including file-like objects.\n\nReturns:\nAn dictionary of headers. If a header is repeated then the last value\nfor that header is given.\n\nRaises:\nValueError: If the first line is a continuation line or the headers\ncannot be parsed.", "source": "juraj-google-style"}
{"code": "def endswith(self, search_str):\n    for entry in reversed(list(open(self._jrnl_file, 'r'))[(- 5):]):\n        if (search_str in entry):\n            return True\n    return False", "docstring": "Check whether the provided string exists in Journal file.\n\nOnly checks the last 5 lines of the journal file. This method is\nusually used when tracking a journal from an active Revit session.\n\nArgs:\nsearch_str (str): string to search for\n\nReturns:\nbool: if True the search string is found", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    file_offset = file_object.get_offset()\n    file_size = file_object.get_size()\n    while (file_offset < file_size):\n        try:\n            self._ParseRecord(parser_mediator, file_object)\n        except errors.ParseError as exception:\n            if (file_offset == 0):\n                raise errors.UnableToParseFile('Unable to parse first event record with error: {0!s}'.format(exception))\n        file_offset = file_object.get_offset()", "docstring": "Parses a BSM file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def calculate_weights(correlation_matrix, min_wt):\n    np.fill_diagonal(correlation_matrix.values, np.nan)\n    correlation_matrix = correlation_matrix.clip(lower=0)\n    raw_weights = correlation_matrix.mean(axis=1)\n    raw_weights = raw_weights.clip(lower=min_wt)\n    weights = (raw_weights / sum(raw_weights))\n    return (raw_weights.round(rounding_precision), weights.round(rounding_precision))", "docstring": "Calculate a weight for each profile based on its correlation to other\nreplicates. Negative correlations are clipped to 0, and weights are clipped\nto be min_wt at the least.\n\nArgs:\ncorrelation_matrix (pandas df): Correlations between all replicates\nmin_wt (float): Minimum raw weight when calculating weighted average\n\nReturns:\nraw weights (pandas series):  Mean correlation to other replicates\nweights (pandas series): raw_weights normalized such that they add to 1", "source": "codesearchnet"}
{"code": "def wrap_or_unwrap(value):\n    if isinstance(value, NoDependency):\n        return value.value\n    if isinstance(value, base.Trackable):\n        return value\n    elif type(value) == dict:\n        return _DictWrapper(value)\n    elif type(value) == collections.OrderedDict:\n        return _DictWrapper(value)\n    elif type(value) == list:\n        return ListWrapper(value)\n    elif isinstance(value, tuple) and _should_wrap_tuple(value):\n        return _TupleWrapper(value)\n    else:\n        return value", "docstring": "Wraps input value into trackable data structures.\n\nThis is mostly useful for containers like list, dict, etc, which could contain\ntrackable objects in it. Wrapped data structure will be tracked when\nassociated with a `tf.Module`, so that save model/checkpoint can properly\ntrack the dependency.\n\nIt will also unwrap NoDependency objects.\n\nArgs:\nvalue: the input object to be wrapped.\n\nReturns:\nWrapped trackable data structure.", "source": "github-repos"}
{"code": "def load(self, filepath, file_encoding=None):\n    with open(filepath, encoding=file_encoding) as inf:\n        for line in inf:\n            current_line = str(line).strip()\n            if current_line.startswith('@prefix'):\n                self._add_ttl_ns(current_line.replace('\\n', ''))\n            elif (len(current_line) > 10):\n                break\n    self.__make_dicts__", "docstring": "Reads the the beginning of a turtle file and sets the prefix's used\nin that file and sets the prefix attribute\n\nArgs:\nfilepath: the path to the turtle file\nfile_encoding: specify a specific encoding if necessary", "source": "codesearchnet"}
{"code": "def load(path):\n        \n        with open(path, \"r\") as fobj:\n            analytics = Analytics(info=json.load(fobj))\n        os.unlink(path)\n        return analytics", "docstring": "Loads analytics report from json file specified by path.\n\nArgs:\npath (str): path to json file with analytics report.", "source": "juraj-google-style"}
{"code": "def set_metadata(self, key: str, value: Any, per_trial: bool=True) -> None:", "docstring": "Sets metadata for current trial or current sampling.\n\nMetadata can be used in two use cases:\n\n* Worker processes that co-work on the same trial can use meta-data to\ncommunicate with each other.\n* Worker use metadata as a persistent store to save information for\ncurrent trial, which can be retrieved via `poll_result` method later.\n\nArgs:\nkey: A string as key to metadata.\nvalue: A value that can be serialized by `pg.to_json_str`.\nper_trial: If True, the key is set per current trial. Otherwise, it\nis set per current sampling loop.", "source": "github-repos"}
{"code": "def _TerminateProcess(self, process):\n    pid = process.pid\n    logger.warning('Terminating process: (PID: {0:d}).'.format(pid))\n    process.terminate()\n    process.join(timeout=self._PROCESS_JOIN_TIMEOUT)\n    if process.is_alive():\n        logger.warning('Killing process: (PID: {0:d}).'.format(pid))\n        self._KillProcess(pid)", "docstring": "Terminate a process.\n\nArgs:\nprocess (MultiProcessBaseProcess): process to terminate.", "source": "codesearchnet"}
{"code": "def update_state(world):\n    world_size = len(world)\n\n    def wrap(index):\n        'Wrap an index around the other end of the array'\n        return (index % world_size)\n    for x in range(world_size):\n        for y in range(world_size):\n            if (not world[x][y].allow_change.get()):\n                continue\n            live_neighbor_count = sum([world[wrap(x)][wrap((y + 1))].value, world[wrap((x + 1))][wrap((y + 1))].value, world[wrap((x + 1))][wrap(y)].value, world[wrap((x + 1))][wrap((y - 1))].value, world[wrap(x)][wrap((y - 1))].value, world[wrap((x - 1))][wrap((y - 1))].value, world[wrap((x - 1))][wrap(y)].value, world[wrap((x - 1))][wrap((y + 1))].value])\n            if world[x][y].value:\n                if (not ((live_neighbor_count == 2) or (live_neighbor_count == 3))):\n                    world[x][y].value = False\n            elif (live_neighbor_count == 3):\n                world[x][y].value = True", "docstring": "Increment the world state, determining which cells live, die, or appear.\n\nArgs:\nworld (list[list]): A square matrix of cells\n\nReturns: None", "source": "codesearchnet"}
{"code": "def delete(self, record_id):\n        \n        record_url = self.record_url(record_id)\n        return self._delete(record_url)", "docstring": "Deletes a record by its id\n\n>>> record = airtable.match('Employee Id', 'DD13332454')\n>>> airtable.delete(record['id'])\n\nArgs:\nrecord_id(``str``): Airtable record id\n\nReturns:\nrecord (``dict``): Deleted Record", "source": "juraj-google-style"}
{"code": "def _resolve_subkeys(key, separator='.'):\n    \n    subkey = None\n    if separator in key:\n        index = key.index(separator)\n        subkey = key[index + 1:]\n        key = key[:index]\n    return key, subkey", "docstring": "Given a key which may actually be a nested key, return the top level\nkey and any nested subkeys as separate values.\n\nArgs:\nkey (str): A string that may or may not contain the separator.\nseparator (str): The namespace separator. Defaults to `.`.\n\nReturns:\nTuple[str, str]: The key and subkey(s).", "source": "juraj-google-style"}
{"code": "def add_evaluation_step(result_tensor, ground_truth_tensor):\n    with tf.name_scope('accuracy'):\n        with tf.name_scope('correct_prediction'):\n            prediction = tf.argmax(result_tensor, 1)\n            correct_prediction = tf.equal(prediction, ground_truth_tensor)\n        with tf.name_scope('accuracy'):\n            evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n    tf.summary.scalar('accuracy', evaluation_step)\n    return (evaluation_step, prediction)", "docstring": "Inserts the operations we need to evaluate the accuracy of our results.\n\nArgs:\nresult_tensor: The new final node that produces results.\nground_truth_tensor: The node we feed ground truth data\ninto.\n\nReturns:\nTuple of (evaluation step, prediction).", "source": "codesearchnet"}
{"code": "def AddWeight(self, path_segment_index, weight):\n    \n    if path_segment_index not in self._weight_per_index:\n      raise ValueError('Path segment index not set.')\n\n    self._weight_per_index[path_segment_index] += weight\n\n    if weight not in self._indexes_per_weight:\n      self._indexes_per_weight[weight] = []\n\n    self._indexes_per_weight[weight].append(path_segment_index)", "docstring": "Adds a weight for a specific path segment index.\n\nArgs:\npath_segment_index: an integer containing the path segment index.\nweight: an integer containing the weight.\n\nRaises:\nValueError: if the path segment weights do not contain\nthe path segment index.", "source": "juraj-google-style"}
{"code": "def softmax_cross_entropy_one_hot(logits, labels, weights_fn=None):\n    with tf.variable_scope('softmax_cross_entropy_one_hot', values=[logits, labels]):\n        del weights_fn\n        cross_entropy = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits)\n        return (cross_entropy, tf.constant(1.0))", "docstring": "Calculate softmax cross entropy given one-hot labels and logits.\n\nArgs:\nlogits: Tensor of size [batch-size, o=1, p=1, num-classes]\nlabels: Tensor of size [batch-size, o=1, p=1, num-classes]\nweights_fn: Function that takes in labels and weighs examples (unused)\nReturns:\ncross-entropy (scalar), weights", "source": "codesearchnet"}
{"code": "def var(\n        self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs\n    ):\n        \n        axis = self._get_axis_number(axis) if axis is not None else 0\n        if numeric_only is not None and not numeric_only:\n            self._validate_dtypes(numeric_only=True)\n        return self._reduce_dimension(\n            self._query_compiler.var(\n                axis=axis,\n                skipna=skipna,\n                level=level,\n                ddof=ddof,\n                numeric_only=numeric_only,\n                **kwargs\n            )\n        )", "docstring": "Computes variance across the DataFrame.\n\nArgs:\naxis (int): The axis to take the variance on.\nskipna (bool): True to skip NA values, false otherwise.\nddof (int): degrees of freedom\n\nReturns:\nThe variance of the DataFrame.", "source": "juraj-google-style"}
{"code": "def first_seen(self, first_seen):\n    if (not self.can_update()):\n        self._tcex.handle_error(910, [self.type])\n    first_seen = self._utils.format_datetime(first_seen, date_format='%Y-%m-%dT%H:%M:%SZ')\n    self._data['firstSeen'] = first_seen\n    request = {'firstSeen': first_seen}\n    return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)", "docstring": "Updates the campaign with the new first_seen date.\n\nArgs:\nfirst_seen: The first_seen date. Converted to %Y-%m-%dT%H:%M:%SZ date format\n\nReturns:", "source": "codesearchnet"}
{"code": "def fstat(self, file_des):\n    file_object = self.filesystem.get_open_file(file_des).get_object()\n    return file_object.stat_result.copy()", "docstring": "Return the os.stat-like tuple for the FakeFile object of file_des.\n\nArgs:\nfile_des: The file descriptor of filesystem object to retrieve.\n\nReturns:\nThe FakeStatResult object corresponding to entry_path.\n\nRaises:\nOSError: if the filesystem object doesn't exist.", "source": "codesearchnet"}
{"code": "def sparsemax(x, axis=-1):\n    if any_symbolic_tensors((x,)):\n        return Sparsemax(axis).symbolic_call(x)\n    return backend.nn.sparsemax(x, axis=axis)", "docstring": "Sparsemax activation function.\n\nFor each batch `i`, and class `j`,\nsparsemax activation function is defined as:\n\n`sparsemax(x)[i, j] = max(x[i, j] - τ(x[i, :]), 0).`\n\nArgs:\nx: Input tensor.\naxis: `int`, axis along which the sparsemax operation is applied.\n\nReturns:\nA tensor, output of sparsemax transformation. Has the same type and\nshape as `x`.\n\nExample:\n\n>>> x = np.array([-1., 0., 1.])\n>>> x_sparsemax = keras.ops.sparsemax(x)\n>>> print(x_sparsemax)\narray([0., 0., 1.], shape=(3,), dtype=float64)", "source": "github-repos"}
{"code": "def post_op(self, id: str, path_data: Union[(dict, None)], post_data: Any) -> dict:\n    path = self._get_path_for_op_id(id)\n    return self.post_path(path, path_data, post_data)", "docstring": "Modifies the ESI by looking up an operation id.\n\nArgs:\npath: raw ESI URL path\npath_data: data to format the path with (can be None)\npost_data: data to send to ESI\n\nReturns:\nESI data", "source": "codesearchnet"}
{"code": "def rotate(p, rad, o=(0, 0)):\n    \n    v = vector(o, p)\n    fx = lambda x, y, d: x * cos(d) - y * sin(d)\n    fy = lambda x, y, d: x * sin(d) + y * cos(d)\n    rv = fx(v[0], v[1], rad), fy(v[0], v[1], rad)\n    return translate(rv, o)", "docstring": "rotate vector\nArgs:\np: point (x, y)\nrad: angle(radian)\no: origin (x, y)", "source": "juraj-google-style"}
{"code": "async def process_message(self, message, wait=True):\n    to_check = deque([self._waiters])\n    ignored = True\n    while (len(to_check) > 0):\n        context = to_check.popleft()\n        waiters = context.get(OperationManager._LEAF, [])\n        for waiter in waiters:\n            if isinstance(waiter, asyncio.Future):\n                waiter.set_result(message)\n            else:\n                try:\n                    (await _wait_or_launch(self._loop, waiter, message, wait))\n                except:\n                    self._logger.warning('Error calling every_match callback, callback=%s, message=%s', waiter, message, exc_info=True)\n            ignored = False\n        for key in context:\n            if (key is OperationManager._LEAF):\n                continue\n            message_val = _get_key(message, key)\n            if (message_val is _MISSING):\n                continue\n            next_level = context[key]\n            if (message_val in next_level):\n                to_check.append(next_level[message_val])\n    return (not ignored)", "docstring": "Process a message to see if it wakes any waiters.\n\nThis will check waiters registered to see if they match the given\nmessage.  If so, they are awoken and passed the message.  All matching\nwaiters will be woken.\n\nThis method returns False if the message matched no waiters so it was\nignored.\n\nNormally you want to use wait=True (the default behavior) to guarantee\nthat all callbacks have finished before this method returns.  However,\nsometimes that can cause a deadlock if those callbacks would\nthemselves invoke behavior that requires whatever is waiting for this\nmethod to be alive.  In that case you can pass wait=False to ensure\nthat the caller of this method does not block.\n\nArgs:\nmessage (dict or object): The message that we should process\nwait (bool): Whether to block until all callbacks have finished\nor to return once the callbacks have been launched.\n\nReturns:\nbool: True if at least one waiter matched, otherwise False.", "source": "codesearchnet"}
{"code": "def get_statuses(self, batch_ids):\n    with self._lock:\n        return {b: self.get_status(b) for b in batch_ids}", "docstring": "Returns a statuses dict for the requested batches.\n\nArgs:\nbatch_ids (list of str): The ids of the batches to get statuses for\n\nReturns:\ndict: A dict with keys of batch ids, and values of status enums", "source": "codesearchnet"}
{"code": "def supported_cache_type(types):\n    \n    if isinstance(types, str):\n        types = [typ.strip() for typ in types.split(\",\")]\n    for typ in types:\n        if typ not in [\"reflink\", \"hardlink\", \"symlink\", \"copy\"]:\n            return False\n    return True", "docstring": "Checks if link type config option has a valid value.\n\nArgs:\ntypes (list/string): type(s) of links that dvc should try out.", "source": "juraj-google-style"}
{"code": "def _gen_sentence(self, assetid_body_tuple):\n        \n        asset_id, body = assetid_body_tuple\n        text = self._process(body)\n        sentence = LabeledSentence(text, labels=['DOC_%s' % str(asset_id)])\n        return sentence", "docstring": "Takes an assetid_body_tuple and returns a Doc2Vec LabeledSentence\n\nArgs:\nassetid_body_tuple (tuple): (assetid, bodytext) pair", "source": "juraj-google-style"}
{"code": "def _run_submission(self, metadata):\n    \n    if self._use_gpu:\n      docker_binary = 'nvidia-docker'\n      container_name = metadata['container_gpu']\n    else:\n      docker_binary = 'docker'\n      container_name = metadata['container']\n    if metadata['type'] == 'defense':\n      cmd = [docker_binary, 'run',\n             '--network=none',\n             '-m=24g',\n             '-v', '{0}:/input_images:ro'.format(self._sample_input_dir),\n             '-v', '{0}:/output_data'.format(self._sample_output_dir),\n             '-v', '{0}:/code'.format(self._extracted_submission_dir),\n             '-w', '/code',\n             container_name,\n             './' + metadata['entry_point'],\n             '/input_images',\n             '/output_data/result.csv']\n    else:\n      epsilon = np.random.choice(ALLOWED_EPS)\n      cmd = [docker_binary, 'run',\n             '--network=none',\n             '-m=24g',\n             '-v', '{0}:/input_images:ro'.format(self._sample_input_dir),\n             '-v', '{0}:/output_images'.format(self._sample_output_dir),\n             '-v', '{0}:/code'.format(self._extracted_submission_dir),\n             '-w', '/code',\n             container_name,\n             './' + metadata['entry_point'],\n             '/input_images',\n             '/output_images',\n             str(epsilon)]\n    logging.info('Command to run submission: %s', ' '.join(cmd))\n    return shell_call(cmd)", "docstring": "Runs submission inside Docker container.\n\nArgs:\nmetadata: dictionary with submission metadata\n\nReturns:\nTrue if status code of Docker command was success (i.e. zero),\nFalse otherwise.", "source": "juraj-google-style"}
{"code": "def update_qos_aggregated_configuration(self, qos_configuration, timeout=-1):\n        \n        uri = \"{}{}\".format(self.data[\"uri\"], self.QOS_AGGREGATED_CONFIGURATION)\n        return self._helper.update(qos_configuration, uri=uri, timeout=timeout)", "docstring": "Updates the QoS aggregated configuration for the logical interconnect.\n\nArgs:\nqos_configuration:\nQOS configuration.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation in\nOneView, just stops waiting for its completion.\n\nReturns:\ndict: Logical Interconnect.", "source": "juraj-google-style"}
{"code": "def _decode_socket_response_bytes(self, response):\n    try:\n        return str(response, encoding='utf8')\n    except UnicodeError:\n        self.log.error('Failed to decode socket response bytes using encoding utf8: %s', response)\n        raise", "docstring": "Returns a string decoded from the socket response bytes.\n\nArgs:\nresponse: bytes, the response to be decoded.\n\nReturns:\nThe string decoded from the given bytes.\n\nRaises:\nUnicodeError: if failed to decode the given bytes using encoding utf8.", "source": "github-repos"}
{"code": "def random(cls, num_qubits, seed=None):\n    if (seed is not None):\n        np.random.seed(seed)\n    z = np.random.randint(2, size=num_qubits).astype(np.bool)\n    x = np.random.randint(2, size=num_qubits).astype(np.bool)\n    return cls(z, x)", "docstring": "Return a random Pauli on number of qubits.\n\nArgs:\nnum_qubits (int): the number of qubits\nseed (int): Optional. To set a random seed.\nReturns:\nPauli: the random pauli", "source": "codesearchnet"}
{"code": "def to_csv(self):\n    header = []\n    component_header = []\n    for row in self:\n        for j in row.__dict__.keys():\n            if (j == '_colour'):\n                j = 'colour'\n            header.append(j)\n        for k in row.component.__dict__.keys():\n            component_header.append(k)\n    header = set(header)\n    component_header = set(component_header)\n    header.remove('component')\n    header_row = ''\n    if ('colour' in header):\n        header_row += 'colour,'\n        header.remove('colour')\n        has_colour = True\n    for item in header:\n        header_row += (item + ',')\n    for item in component_header:\n        header_row += (('component ' + item) + ',')\n    result = (header_row.strip(',') + '\\n')\n    for row in self:\n        if has_colour:\n            result += (row.__dict__.get('_colour', '') + ',')\n        for item in header:\n            result += (str(row.__dict__.get(item, '')) + ',')\n        for item in component_header:\n            result += (str(row.component.__dict__.get(item, '')) + ',')\n        result += '\\n'\n    return result", "docstring": "Renders a legend as a CSV string.\n\nNo arguments.\n\nReturns:\nstr: The legend as a CSV.", "source": "codesearchnet"}
{"code": "def add_output(self, *args, **kwargs):\n    return self._outputs.add(*args, **kwargs)", "docstring": "Add a wrapped output argument to the hint.\n\nArgs:\n*args: The output tensor.\n**kwargs:\n\"name\" label\n\"tag\" a tag to group multiple arguments that will be aggregated. I.e.\na string like 'cool_input'. Basically multiple inputs can be added\nto the same hint for parallel operations that will eventually be\ncombined. An example would be static_rnn which creates multiple copies\nof state or inputs.\n\"aggregate\" aggregation strategy that is valid only for tag non None.\nAcceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST,\nand OpHint.AGGREGATE_STACK.\n\"index_override\" The global index to use. This corresponds to the\nargument order in the final stub that will be generated.\nReturns:\nThe wrapped output tensor.", "source": "github-repos"}
{"code": "def usufyToGmlExport(d, fPath):\n    \n    \n    try:\n        oldData=nx.read_gml(fPath)\n    except UnicodeDecodeError as e:\n        print(\"UnicodeDecodeError:\\t\" + str(e))\n        print(\"Something went wrong when reading the .gml file relating to the decoding of UNICODE.\")\n        import time as time\n        fPath+=\"_\" +str(time.time())\n        print(\"To avoid losing data, the output file will be renamed to use the timestamp as:\\n\" + fPath + \"_\" + str(time.time()))\n        print()\n        \n        oldData = nx.Graph()\n    except Exception as e:\n        \n        oldData = nx.Graph()\n\n    newGraph = _generateGraphData(d, oldData)\n\n    \n    nx.write_gml(newGraph,fPath)", "docstring": "Workaround to export data to a .gml file.\n\nArgs:\n-----\nd: Data to export.\nfPath: File path for the output file.", "source": "juraj-google-style"}
{"code": "def _build(self, inputs, prev_state):\n    next_state = self._model(prev_state)\n    return (next_state, next_state)", "docstring": "Connects the ModelRNN module into the graph.\n\nIf this is not the first time the module has been connected to the graph,\nthe Tensors provided as input_ and state must have the same final\ndimension, in order for the existing variables to be the correct size for\ntheir corresponding multiplications. The batch size may differ for each\nconnection.\n\nArgs:\ninputs: Tensor input to the ModelRNN (ignored).\nprev_state: Tensor of size `model.output_size`.\n\nReturns:\noutput: Tensor of size `model.output_size`.\nnext_state: Tensor of size `model.output_size`.", "source": "codesearchnet"}
{"code": "def to_proj4(self, as_dict=False):\n        \n        string = \"%s\" % self.proj.to_proj4()\n        string += \" %s\" % self.geogcs.to_proj4(toplevel=False)\n        string += \" \" + \" \".join(param.to_proj4() for param in self.params)\n        string += \" %s\" % self.unit.to_proj4()\n        string += \" +axis=\" + self.twin_ax[0].proj4 + self.twin_ax[1].proj4 + \"u\" \n        string += \" +no_defs\"\n        \n        if as_dict:\n            return dict([\n                        entry.lstrip('+').split('=')\n                        for entry in string.split()\n                        if entry != \"+no_defs\"\n                         ])\n        else:\n            return string", "docstring": "Returns the CS as a proj4 formatted string or dict.\n\nArguments:\n\n- **as_dict** (optional): If True, returns the proj4 string as a dict (defaults to False).", "source": "juraj-google-style"}
{"code": "def create_opengl_context(surface_size=(640, 480)):\n    egl_display = egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY)\n    (major, minor) = (egl.EGLint(), egl.EGLint())\n    egl.eglInitialize(egl_display, pointer(major), pointer(minor))\n    config_attribs = [egl.EGL_SURFACE_TYPE, egl.EGL_PBUFFER_BIT, egl.EGL_BLUE_SIZE, 8, egl.EGL_GREEN_SIZE, 8, egl.EGL_RED_SIZE, 8, egl.EGL_DEPTH_SIZE, 24, egl.EGL_RENDERABLE_TYPE, egl.EGL_OPENGL_BIT, egl.EGL_NONE]\n    config_attribs = (egl.EGLint * len(config_attribs))(*config_attribs)\n    num_configs = egl.EGLint()\n    egl_cfg = egl.EGLConfig()\n    egl.eglChooseConfig(egl_display, config_attribs, pointer(egl_cfg), 1, pointer(num_configs))\n    (width, height) = surface_size\n    pbuffer_attribs = [egl.EGL_WIDTH, width, egl.EGL_HEIGHT, height, egl.EGL_NONE]\n    pbuffer_attribs = (egl.EGLint * len(pbuffer_attribs))(*pbuffer_attribs)\n    egl_surf = egl.eglCreatePbufferSurface(egl_display, egl_cfg, pbuffer_attribs)\n    egl.eglBindAPI(egl.EGL_OPENGL_API)\n    egl_context = egl.eglCreateContext(egl_display, egl_cfg, egl.EGL_NO_CONTEXT, None)\n    egl.eglMakeCurrent(egl_display, egl_surf, egl_surf, egl_context)", "docstring": "Create offscreen OpenGL context and make it current.\n\nUsers are expected to directly use EGL API in case more advanced\ncontext management is required.\n\nArgs:\nsurface_size: (width, height), size of the offscreen rendering surface.", "source": "codesearchnet"}
{"code": "def GetNumberOfRows(self, table_name):\n    \n    if not self._connection:\n      raise IOError('Not opened.')\n\n    self._cursor.execute(self._NUMBER_OF_ROWS_QUERY.format(table_name))\n    row = self._cursor.fetchone()\n    if not row:\n      raise IOError(\n          'Unable to retrieve number of rows of table: {0:s}'.format(\n              table_name))\n\n    number_of_rows = row[0]\n    if isinstance(number_of_rows, py2to3.STRING_TYPES):\n      try:\n        number_of_rows = int(number_of_rows, 10)\n      except ValueError as exception:\n        raise IOError((\n            'Unable to determine number of rows of table: {0:s} '\n            'with error: {1!s}').format(table_name, exception))\n\n    return number_of_rows", "docstring": "Retrieves the number of rows in the table.\n\nArgs:\ntable_name (str): name of the table.\n\nReturns:\nint: number of rows.\n\nRaises:\nIOError: if the file-like object has not been opened.\nOSError: if the file-like object has not been opened.", "source": "juraj-google-style"}
{"code": "def upload_file(self, url, file, callback=None, extra_headers={}):\n    extra_headers = extra_headers.copy()\n    response = None\n    if (os.stat(file.name).st_size == 0):\n        raise CommError(('%s is an empty file' % file.name))\n    try:\n        progress = Progress(file, callback=callback)\n        response = requests.put(url, data=progress, headers=extra_headers)\n        response.raise_for_status()\n    except requests.exceptions.RequestException as e:\n        total = progress.len\n        status = self._status_request(url, total)\n        if (status.status_code in (308, 408, 500, 502, 503, 504)):\n            util.sentry_reraise(retry.TransientException(exc=e))\n        else:\n            util.sentry_reraise(e)\n    return response", "docstring": "Uploads a file to W&B with failure resumption\n\nArgs:\nurl (str): The url to download\nfile (str): The path to the file you want to upload\ncallback (:obj:`func`, optional): A callback which is passed the number of\nbytes uploaded since the last time it was called, used to report progress\n\nReturns:\nThe requests library response object", "source": "codesearchnet"}
{"code": "def get_switch_macs(self, switch_ip=None, node=None, vlan=None, mac=None, port=None, verbose=0):\n    if (switch_ip == None):\n        if (node == None):\n            raise Exception('get_switch_macs() requires switch_ip or node parameter')\n            return None\n        switch_ip = node.get_ipaddr()\n    mac_obj = natlas_mac(self.config)\n    if (vlan == None):\n        macs = mac_obj.get_macs(switch_ip, verbose)\n    else:\n        macs = mac_obj.get_macs_for_vlan(switch_ip, vlan, verbose)\n    if ((mac == None) & (port == None)):\n        return (macs if macs else [])\n    ret = []\n    for m in macs:\n        if (mac != None):\n            if (re.match(mac, m.mac) == None):\n                continue\n        if (port != None):\n            if (re.match(port, m.port) == None):\n                continue\n        ret.append(m)\n    return ret", "docstring": "Get the CAM table from a switch.\n\nArgs:\nswitch_ip           IP address of the device\nnode                natlas_node from new_node()\nvlan                Filter results by VLAN\nMAC                 Filter results by MAC address (regex)\nport                Filter results by port (regex)\nverbose             Display progress to stdout\n\nswitch_ip or node is required\n\nReturn:\nArray of natlas_mac objects", "source": "codesearchnet"}
{"code": "def dict_get_path(data, path, default=None):\n    keys = path.split('.')\n    for k in keys:\n        if (type(data) == list):\n            found = False\n            for item in data:\n                name = item.get('name', item.get('type'))\n                if (name == k):\n                    found = True\n                    data = item\n                    break\n            if (not found):\n                return default\n        elif (type(data) == dict):\n            if (k in data):\n                data = data[k]\n            else:\n                return default\n        else:\n            return default\n    return data", "docstring": "Returns the value inside nested structure of data located\nat period delimited path\n\nWhen traversing a list, as long as that list is containing objects of\ntype dict, items in that list will have their \"name\" and \"type\" values\ntested against the current key in the path.\n\nArgs:\ndata (dict or list): data to traverse\npath (str): '.' delimited string\n\nKwargs:\ndefault: value to return if path does not exist", "source": "codesearchnet"}
{"code": "def import_tracks(self, import_tracks):\n        \n\n        if isinstance(import_tracks, tracks.Track):\n            import_tracks = [import_tracks]\n\n        idx_mapping = {}\n\n        for track in import_tracks:\n            idx_mapping[track.idx] = track\n\n            \n            if track.idx in self._tracks.keys():\n                track.idx = naming.index_name_if_in_list(track.idx, self._tracks.keys())\n\n            self._tracks[track.idx] = track\n\n        return idx_mapping", "docstring": "Add the given tracks/track to the corpus.\nIf any of the given track-ids already exists, a suffix is appended so it is unique.\n\nArgs:\nimport_tracks (list): Either a list of or a single :py:class:`audiomate.tracks.Track`.\n\nReturns:\ndict: A dictionary containing track-idx mappings (old-track-idx/track-instance).\nIf a track is imported, whose idx already exists this mapping can be used to check\nthe new id.", "source": "juraj-google-style"}
{"code": "def _set_save_spec(self, inputs, args=None, kwargs=None):\n    if self._saved_model_inputs_spec is not None:\n        return\n    inputs_spec = tree.map_structure(tf_utils.get_tensor_spec, inputs)\n    args_spec = tree.map_structure(tf_utils.get_tensor_spec, args or [])\n    kwargs_spec = {}\n    for key, kwarg in kwargs.items():\n        flat_kwarg = tree.flatten(kwarg)\n        flat_specs = [tf_utils.get_tensor_spec(x) for x in flat_kwarg]\n        if any((s is None for s in flat_specs)):\n            continue\n        kwargs_spec[key] = tree.pack_sequence_as(kwarg, flat_specs)\n    self._saved_model_inputs_spec = inputs_spec\n    self._saved_model_arg_spec = ([inputs_spec] + list(args_spec), kwargs_spec)", "docstring": "Defines the save spec so that serialization can trace layer calls.\n\nThe TensorSpecs of the call function `inputs`, `args`, and `kwargs` are\nsaved into a tuple of `([inputs] + args, kwargs)`.\n\nArgs:\ninputs: possibly nested inputs passed into the call function.\nargs: a list of positional arguments passed into call.\nkwargs: a dictionary of keyword arguments passed into call.", "source": "github-repos"}
{"code": "def version(self):\n        \n        version = int(self._dll.JLINKARM_GetDLLVersion())\n        major = version / 10000\n        minor = (version / 100) % 100\n        rev = version % 100\n        rev = '' if rev == 0 else chr(rev + ord('a') - 1)\n        return '%d.%02d%s' % (major, minor, rev)", "docstring": "Returns the device's version.\n\nThe device's version is returned as a string of the format: M.mr where\n``M`` is major number, ``m`` is minor number, and ``r`` is revision\ncharacter.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nDevice version string.", "source": "juraj-google-style"}
{"code": "def __init__(self, board_name, https=False, session=None):\n        \n        self._board_name = board_name\n        self._https = https\n        self._protocol = 'https:\n        self._url = Url(board_name=board_name, https=self._https)\n\n        self._requests_session = session or requests.session()\n        self._requests_session.headers['User-Agent'] = 'py-4chan/%s' % __version__\n\n        self._thread_cache = {}", "docstring": "Creates a :mod:`basc_py4chan.Board` object.\n\nArgs:\nboard_name (string): Name of the board, such as \"tg\" or \"etc\".\nhttps (bool): Whether to use a secure connection to 4chan.\nsession: Existing requests.session object to use instead of our current one.", "source": "juraj-google-style"}
{"code": "def start(self):\n    return (self.first.lineno, self.first.column)", "docstring": "The start of the logical line.\n\nReturns:\nA tuple of the starting line number and column.", "source": "github-repos"}
{"code": "def merge_lists(*args):\n    out = {}\n    for contacts in filter(None, args):\n        for contact in contacts:\n            out[contact.value] = contact\n    return list(out.values())", "docstring": "Merge an arbitrary number of lists into a single list and dedupe it\n\nArgs:\n*args: Two or more lists\n\nReturns:\nA deduped merged list of all the provided lists as a single list", "source": "codesearchnet"}
{"code": "def crop_image_to_patches(self, images: np.ndarray, min_patches: int, max_patches: int, use_thumbnail: bool=True, patch_size: Optional[Union[Tuple, int, dict]]=None, data_format: ChannelDimension=None):\n    if data_format is None:\n        data_format = infer_channel_dimension_format(images)\n    images = to_channel_dimension_format(images, ChannelDimension.FIRST, data_format)\n    patch_size_height, patch_size_width = (patch_size['height'], patch_size['width'])\n    original_height, original_width = images.shape[-2:]\n    num_columns, num_rows = get_optimal_tiled_canvas((original_height, original_width), (patch_size_height, patch_size_width), min_patches, max_patches)\n    target_width = patch_size_width * num_columns\n    target_height = patch_size_height * num_rows\n    num_blocks = num_columns * num_rows\n    resized_image = self.resize(images, {'height': target_height, 'width': target_width}, data_format=ChannelDimension.FIRST, input_data_format=ChannelDimension.FIRST)\n    processed_images = []\n    for i in range(num_blocks):\n        column = i % num_columns\n        row = i \n        box = (column * patch_size_width, row * patch_size_height, (column + 1) * patch_size_width, (row + 1) * patch_size_height)\n        patch_image = resized_image[..., box[1]:box[3], box[0]:box[2]]\n        patch_image = to_channel_dimension_format(patch_image, data_format, ChannelDimension.FIRST)\n        processed_images.append(patch_image)\n    if use_thumbnail and len(processed_images) != 1:\n        thumbnail_img = self.resize(images, patch_size, data_format=data_format, input_data_format=ChannelDimension.FIRST)\n        processed_images.append(thumbnail_img)\n    return processed_images", "docstring": "Crop the image to patches and return a list of cropped images.\nThe number of patches and their grid arrangement are determined by the original image size,\nthe target patch size and the minimum and maximum number of patches.\nThe aspect ratio of the patches grid is chosen to be the closest to the original image aspect ratio.\n\nArgs:\nimages (`np.ndarray`):\nThe image to be cropped.\nmin_patches (`int`):\nThe minimum number of patches to be extracted from the image.\nmax_patches (`int`):\nThe maximum number of patches to be extracted from the image.\nuse_thumbnail (`bool`, *optional*, defaults to `True`):\nWhether to add a thumbnail image to the list of cropped patches.\npatch_size (`int`, `Tuple[int, int]`, `dict`, *optional*):\nThe size of the output patches.\ndata_format (`ChannelDimension`, *optional*):\nThe format of the image data. If `None`, the format is inferred from the input image.\n\nReturns:\nList[`PIL.Image.Image`] or List[np.ndarray]: The list of cropped images.", "source": "github-repos"}
{"code": "def _commit(self):\n    if (not self.in_progress):\n        raise ValueError(_CANT_COMMIT)\n    commit_response = _commit_with_retry(self._client, self._write_pbs, self._id)\n    self._clean_up()\n    return list(commit_response.write_results)", "docstring": "Transactionally commit the changes accumulated.\n\nReturns:\nList[google.cloud.proto.firestore.v1beta1.\\\nwrite_pb2.WriteResult, ...]: The write results corresponding\nto the changes committed, returned in the same order as the\nchanges were applied to this transaction. A write result contains\nan ``update_time`` field.\n\nRaises:\nValueError: If no transaction is in progress.", "source": "codesearchnet"}
{"code": "def final_energy_from_outcar( filename='OUTCAR' ):\n    \n    with open( filename ) as f:\n        outcar = f.read()\n    energy_re = re.compile( \"energy\\(sigma->0\\) =\\s+([-\\d\\.]+)\" )\n    energy = float( energy_re.findall( outcar )[-1] )\n    return energy", "docstring": "Finds and returns the energy from a VASP OUTCAR file, by searching for the last `energy(sigma->0)` entry.\n\nArgs:\nfilename (Str, optional): OUTCAR filename. Defaults to 'OUTCAR'.\n\nReturns:\n(Float): The last energy read from the OUTCAR file.", "source": "juraj-google-style"}
{"code": "def FindEnumTypeByName(self, full_name):\n    full_name = _NormalizeFullyQualifiedName(full_name)\n    if (full_name not in self._enum_descriptors):\n        self._FindFileContainingSymbolInDb(full_name)\n    return self._enum_descriptors[full_name]", "docstring": "Loads the named enum descriptor from the pool.\n\nArgs:\nfull_name: The full name of the enum descriptor to load.\n\nReturns:\nThe enum descriptor for the named type.\n\nRaises:\nKeyError: if the enum cannot be found in the pool.", "source": "codesearchnet"}
{"code": "def _control_dependencies_for_inputs(self, input_ops) -> list[Operation]:\n    ret = []\n    for controller in self._control_dependencies_stack:\n        dominated = False\n        for op in input_ops:\n            if controller.op_in_group(op):\n                dominated = True\n                break\n        if not dominated:\n            ret.extend((c for c in controller.control_inputs if c not in input_ops))\n    return ret", "docstring": "For an op that takes `input_ops` as inputs, compute control inputs.\n\nThe returned control dependencies should yield an execution that\nis equivalent to adding all control inputs in\nself._control_dependencies_stack to a newly created op. However,\nthis function attempts to prune the returned control dependencies\nby observing that nodes created within the same `with\ncontrol_dependencies(...):` block may have data dependencies that make\nthe explicit approach redundant.\n\nArgs:\ninput_ops: The data input ops for an op to be created.\n\nReturns:\nA list of control inputs for the op to be created.", "source": "github-repos"}
{"code": "def ensuredir(path_, verbose=None, info=False, mode=1023):\n    if (verbose is None):\n        verbose = VERYVERBOSE\n    if isinstance(path_, (list, tuple)):\n        path_ = join(*path_)\n    if (HAVE_PATHLIB and isinstance(path_, pathlib.Path)):\n        path_ = str(path_)\n    if (not checkpath(path_, verbose=verbose, info=info)):\n        if verbose:\n            print(('[util_path] mkdir(%r)' % path_))\n        try:\n            os.makedirs(normpath(path_), mode=mode)\n        except OSError as ex:\n            util_dbg.printex(ex, 'check that the longest existing path is not a bad windows symlink.', keys=['path_'])\n            raise\n    return path_", "docstring": "r\"\"\"\nEnsures that directory will exist. creates new dir with sticky bits by\ndefault\n\nArgs:\npath (str): dpath to ensure. Can also be a tuple to send to join\ninfo (bool): if True prints extra information\nmode (int): octal mode of directory (default 0o1777)\n\nReturns:\nstr: path - the ensured directory", "source": "codesearchnet"}
{"code": "def create_connection(port=_PORT_, timeout=_TIMEOUT_, restart=False):\n    if (_CON_SYM_ in globals()):\n        if (not isinstance(globals()[_CON_SYM_], pdblp.BCon)):\n            del globals()[_CON_SYM_]\n    if ((_CON_SYM_ in globals()) and (not restart)):\n        con = globals()[_CON_SYM_]\n        if getattr(con, '_session').start():\n            con.start()\n        return (con, False)\n    else:\n        con = pdblp.BCon(port=port, timeout=timeout)\n        globals()[_CON_SYM_] = con\n        con.start()\n        return (con, True)", "docstring": "Create Bloomberg connection\n\nReturns:\n(Bloomberg connection, if connection is new)", "source": "codesearchnet"}
{"code": "def reconstruct_feature_maps(hidden_state: torch.Tensor, batch_size: int, padding: int, output_size: Tuple[float, float]) -> torch.Tensor:\n    features = reshape_features(hidden_state)\n    features = merge_patches(features, batch_size=batch_size, padding=padding)\n    features = F.interpolate(features, size=output_size, mode='bilinear', align_corners=False)\n    return features", "docstring": "Reconstructs feature maps from the hidden state produced by any of the encoder. Converts the hidden state of shape\n`(n_patches_per_batch * batch_size, seq_len, hidden_size)` to feature maps of shape\n`(batch_size, hidden_size, output_size[0], output_size[1])`.\n\nArgs:\nhidden_state (torch.Tensor): Input tensor of shape `(n_patches_per_batch * batch_size, seq_len, hidden_size)`\nrepresenting the encoded patches.\nbatch_size (int): The number of samples in a batch.\npadding (int): The amount of padding to be removed when merging patches.\noutput_size (Tuple[float, float]): The desired output size for the feature maps, specified as `(height, width)`.\n\nReturns:\ntorch.Tensor: Reconstructed feature maps of shape `(batch_size, hidden_size, output_size[0], output_size[1])`.", "source": "github-repos"}
{"code": "def parallactic_angles(times, antenna_positions, field_centre):\n    import pyrap.quanta as pq\n    try:\n        zenith = pm.direction('AZEL', '0deg', '90deg')\n    except AttributeError as e:\n        if (pm is None):\n            raise ImportError('python-casacore import failed')\n        raise\n    reference_positions = [pm.position('itrf', *(pq.quantity(x, 'm') for x in pos)) for pos in antenna_positions]\n    fc_rad = pm.direction('J2000', *(pq.quantity(f, 'rad') for f in field_centre))\n    return np.asarray([(pm.do_frame(pm.epoch('UTC', pq.quantity(t, 's'))) and [(pm.do_frame(rp) and pm.posangle(fc_rad, zenith).get_value('rad')) for rp in reference_positions]) for t in times])", "docstring": "Computes parallactic angles per timestep for the given\nreference antenna position and field centre.\n\nArguments:\ntimes: ndarray\nArray of unique times with shape (ntime,),\nobtained from TIME column of MS table\nantenna_positions: ndarray of shape (na, 3)\nAntenna positions, obtained from POSITION\ncolumn of MS ANTENNA sub-table\nfield_centre : ndarray of shape (2,)\nField centre, should be obtained from MS PHASE_DIR\n\nReturns:\nAn array of parallactic angles per time-step", "source": "codesearchnet"}
{"code": "def test_load_saved_model_with_no_variables(self, builder_cls):\n    with ops.Graph().as_default():\n        path = _get_export_dir('no_variable_saved_model')\n        with session.Session(graph=ops.Graph()) as sess:\n            x = variable_v1.VariableV1(5, name='x', collections=['not_global_variable'])\n            y = variable_v1.VariableV1(11, name='y', collections=['not_global_variable'])\n            self.assertFalse(variables._all_saveable_objects())\n            z = x + y\n            self.evaluate(variables.variables_initializer([x, y]))\n            foo_sig_def = signature_def_utils.build_signature_def({'foo_input': utils.build_tensor_info(x)}, {'foo_output': utils.build_tensor_info(z)})\n            builder = saved_model_builder.SavedModelBuilder(path)\n            builder.add_meta_graph_and_variables(sess, ['foo_graph'], {'foo': foo_sig_def}, saver=tf_saver.Saver([x, y]))\n            builder.save()\n        loader = loader_impl.SavedModelLoader(path)\n        with self.session(graph=ops.Graph()) as sess:\n            saver, _ = loader.load_graph(sess.graph, ['foo_graph'])\n            self.assertFalse(variables._all_saveable_objects())\n            self.assertIsNotNone(saver)\n        with self.session(graph=ops.Graph()) as sess:\n            loader.load(sess, ['foo_graph'])\n            self.assertEqual(5, sess.run(_tensor_name('x')))\n            self.assertEqual(11, sess.run(_tensor_name('y')))", "docstring": "Test that SavedModel runs saver when there appear to be no variables.\n\nWhen no variables are detected, this may mean that the variables were saved\nto different collections, or the collections weren't saved to the\nSavedModel. If the SavedModel MetaGraphDef contains a saver, it should still\nrun in either of these cases.\n\nArgs:\nbuilder_cls: SavedModelBuilder or _SavedModelBuilder class", "source": "github-repos"}
{"code": "def _add_filestore_resources(self, filestore_resources, create_default_views, hxl_update):\n        \n        \n        for resource in filestore_resources:\n            for created_resource in self.data['resources']:\n                if resource['name'] == created_resource['name']:\n                    merge_two_dictionaries(resource.data, created_resource)\n                    del resource['url']\n                    resource.update_in_hdx()\n                    merge_two_dictionaries(created_resource, resource.data)\n                    break\n        self.init_resources()\n        self.separate_resources()\n        if create_default_views:\n            self.create_default_views()\n        if hxl_update:\n            self.hxl_update()", "docstring": "Helper method to create files in filestore by updating resources.\n\nArgs:\nfilestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to)\ncreate_default_views (bool): Whether to call package_create_default_resource_views.\nhxl_update (bool): Whether to call package_hxl_update.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def stop_ec2_instance(client, resource):\n    instance = EC2Instance.get(resource.id)\n    if (instance.state in ('stopped', 'terminated')):\n        return (ActionStatus.IGNORED, {})\n    client.stop_instances(InstanceIds=[resource.id])\n    return (ActionStatus.SUCCEED, {'instance_type': resource.instance_type, 'public_ip': resource.public_ip})", "docstring": "Stop an EC2 Instance\n\nThis function will attempt to stop a running instance.\n\nArgs:\nclient (:obj:`boto3.session.Session.client`): A boto3 client object\nresource (:obj:`Resource`): The resource object to stop\n\nReturns:\n`ActionStatus`", "source": "codesearchnet"}
{"code": "def delete_panel(self, panel_obj):\n        \n        res = self.panel_collection.delete_one({'_id': panel_obj['_id']})\n        LOG.warning(\"Deleting panel %s, version %s\" % (panel_obj['panel_name'], panel_obj['version']))\n        return res", "docstring": "Delete a panel by '_id'.\n\nArgs:\npanel_obj(dict)\n\nReturns:\nres(pymongo.DeleteResult)", "source": "juraj-google-style"}
{"code": "def _MakeEnumValueDescriptor(self, value_proto, index):\n    return descriptor.EnumValueDescriptor(name=value_proto.name, index=index, number=value_proto.number, options=_OptionsOrNone(value_proto), type=None)", "docstring": "Creates a enum value descriptor object from a enum value proto.\n\nArgs:\nvalue_proto: The proto describing the enum value.\nindex: The index of the enum value.\n\nReturns:\nAn initialized EnumValueDescriptor object.", "source": "codesearchnet"}
{"code": "def split_last_dimension(x, n):\n  \n  x_shape = common_layers.shape_list(x)\n  m = x_shape[-1]\n  if isinstance(m, int) and isinstance(n, int):\n    assert m % n == 0\n  return tf.reshape(x, x_shape[:-1] + [n, m", "docstring": "Reshape x so that the last dimension becomes two dimensions.\n\nThe first of these two dimensions is n.\n\nArgs:\nx: a Tensor with shape [..., m]\nn: an integer.\n\nReturns:\na Tensor with shape [..., n, m/n]", "source": "juraj-google-style"}
{"code": "def wrap_callable(cls, uri, methods, callable_obj):\n        \n        if isinstance(callable_obj, HandlerMeta):\n            callable_obj.base_endpoint = uri\n            callable_obj.is_valid = True\n            return callable_obj\n\n        if isinstance(callable_obj, types.FunctionType):\n            return cls(uri=uri, methods=methods, callable_obj=callable_obj)\n\n        raise RouteError(\"Invalid handler type.\")", "docstring": "Wraps function-based callable_obj into a `Route` instance, else\nproxies a `bottle_neck.handlers.BaseHandler` subclass instance.\n\nArgs:\nuri (str):  The uri relative path.\nmethods (tuple): A tuple of valid method strings.\ncallable_obj (instance): The callable object.\n\nReturns:\nA route instance.\n\nRaises:\nRouteError for invalid callable object type.", "source": "juraj-google-style"}
{"code": "def visualize_embeddings(summary_writer, config):\n    logdir = summary_writer.get_logdir()\n    if (logdir is None):\n        raise ValueError('Summary writer must have a logdir')\n    config_pbtxt = _text_format.MessageToString(config)\n    path = os.path.join(logdir, _projector_plugin.PROJECTOR_FILENAME)\n    with tf.io.gfile.GFile(path, 'w') as f:\n        f.write(config_pbtxt)", "docstring": "Stores a config file used by the embedding projector.\n\nArgs:\nsummary_writer: The summary writer used for writing events.\nconfig: `tf.contrib.tensorboard.plugins.projector.ProjectorConfig`\nproto that holds the configuration for the projector such as paths to\ncheckpoint files and metadata files for the embeddings. If\n`config.model_checkpoint_path` is none, it defaults to the\n`logdir` used by the summary_writer.\n\nRaises:\nValueError: If the summary writer does not have a `logdir`.", "source": "codesearchnet"}
{"code": "def validate(self):\n    if (not isinstance(self.value, bytes)):\n        raise TypeError('key value must be bytes')\n    elif (not isinstance(self.cryptographic_algorithm, enums.CryptographicAlgorithm)):\n        raise TypeError('key algorithm must be a CryptographicAlgorithm enumeration')\n    elif (not isinstance(self.cryptographic_length, six.integer_types)):\n        raise TypeError('key length must be an integer')\n    elif (not isinstance(self.key_format_type, enums.KeyFormatType)):\n        raise TypeError('key format type must be a KeyFormatType enumeration')\n    elif (self.key_format_type not in self._valid_formats):\n        raise ValueError('key format type must be one of {0}'.format(self._valid_formats))\n    mask_count = len(self.cryptographic_usage_masks)\n    for i in range(mask_count):\n        mask = self.cryptographic_usage_masks[i]\n        if (not isinstance(mask, enums.CryptographicUsageMask)):\n            position = '({0} in list)'.format(i)\n            raise TypeError('key mask {0} must be a CryptographicUsageMask enumeration'.format(position))\n    name_count = len(self.names)\n    for i in range(name_count):\n        name = self.names[i]\n        if (not isinstance(name, six.string_types)):\n            position = '({0} in list)'.format(i)\n            raise TypeError('key name {0} must be a string'.format(position))", "docstring": "Verify that the contents of the PublicKey object are valid.\n\nRaises:\nTypeError: if the types of any PublicKey attributes are invalid.", "source": "codesearchnet"}
{"code": "def update_service(name, service_map):\n    if (name in service_map):\n        service = service_map[name]\n        data = service.update()\n        if (not data):\n            logger.warning('no data received for service: %s', name)\n        else:\n            data['service_name'] = service.service_name\n            CACHE[name] = dict(data=data, updated=datetime.now())\n    else:\n        logger.warning('service not found: %s', name)\n    if (name in CACHE):\n        return add_time(CACHE[name])\n    return {}", "docstring": "Get an update from the specified service.\n\nArguments:\nname (:py:class:`str`): The name of the service.\nservice_map (:py:class:`dict`): A mapping of service names to\n:py:class:`flash.service.core.Service` instances.\n\nReturns:\n:py:class:`dict`: The updated data.", "source": "codesearchnet"}
{"code": "def _FormatMessageShort(self, event):\n    (_, message_short) = self._output_mediator.GetFormattedMessages(event)\n    if (message_short is None):\n        data_type = getattr(event, 'data_type', 'UNKNOWN')\n        raise errors.NoFormatterFound('Unable to find event formatter for: {0:s}.'.format(data_type))\n    return message_short", "docstring": "Formats the short message.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: short message field.\n\nRaises:\nNoFormatterFound: if no event formatter can be found to match the data\ntype in the event.", "source": "codesearchnet"}
{"code": "def CheckForHeaderGuard(filename, clean_lines, error):\n  \n\n  \n  \n  \n  \n  \n  \n  raw_lines = clean_lines.lines_without_raw_strings\n  for i in raw_lines:\n    if Search(r'\n      return\n\n  \n  for i in raw_lines:\n    if Search(r'^\\s*\n      return\n\n  cppvar = GetHeaderGuardCPPVariable(filename)\n\n  ifndef = ''\n  ifndef_linenum = 0\n  define = ''\n  endif = ''\n  endif_linenum = 0\n  for linenum, line in enumerate(raw_lines):\n    linesplit = line.split()\n    if len(linesplit) >= 2:\n      \n      if not ifndef and linesplit[0] == '\n        \n        ifndef = linesplit[1]\n        ifndef_linenum = linenum\n      if not define and linesplit[0] == '\n        define = linesplit[1]\n    \n    if line.startswith('\n      endif = line\n      endif_linenum = linenum\n\n  if not ifndef or not define or ifndef != define:\n    error(filename, 0, 'build/header_guard', 5,\n          'No \n          cppvar)\n    return\n\n  \n  \n  if ifndef != cppvar:\n    error_level = 0\n    if ifndef != cppvar + '_':\n      error_level = 5\n\n    ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,\n                            error)\n    error(filename, ifndef_linenum, 'build/header_guard', error_level,\n          '\n\n  \n  ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,\n                          error)\n  match = Match(r'\n  if match:\n    if match.group(1) == '_':\n      \n      error(filename, endif_linenum, 'build/header_guard', 0,\n            '\n    return\n\n  \n  \n  \n  no_single_line_comments = True\n  for i in xrange(1, len(raw_lines) - 1):\n    line = raw_lines[i]\n    if Match(r'^(?:(?:\\'(?:\\.|[^\\'])*\\')|(?:\"(?:\\.|[^\"])*\")|[^\\'\"])*\n      no_single_line_comments = False\n      break\n\n  if no_single_line_comments:\n    match = Match(r'\n    if match:\n      if match.group(1) == '_':\n        \n        error(filename, endif_linenum, 'build/header_guard', 0,\n              '\n      return\n\n  \n  error(filename, endif_linenum, 'build/header_guard', 5,\n        '", "docstring": "Checks that the file contains a header guard.\n\nLogs an error if no #ifndef header guard is present.  For other\nheaders, checks that the full pathname is used.\n\nArgs:\nfilename: The name of the C++ header file.\nclean_lines: A CleansedLines instance containing the file.\nerror: The function to call with any errors found.", "source": "juraj-google-style"}
{"code": "def __init__(self, executor_type=None, config_proto=None):\n    self.config_proto_serialized = config_proto\n    self.executor_type = executor_type", "docstring": "Constructor.\n\nArgs:\nexecutor_type: (optional) name of the executor to be used to execute the\neager function. If None or an empty string, the default Tensorflow\nexecutor will be used.\nconfig_proto: (optional) a `config_pb2.ConfigProto` proto or a serialized\nstring of that proto. The config used by Grappler when optimizing the\nfunction graph. Each concrete function is optimized the first time is\ncalled. Changing config_proto after the first call has no effect. If\nconfig_proto is None, an empty RewriterConfig will be used.", "source": "github-repos"}
{"code": "def merge_variables(variables, **kwargs):\n    var_dict = OrderedDict()\n    for v in variables:\n        if (v.name not in var_dict):\n            var_dict[v.name] = []\n        var_dict[v.name].append(v)\n    return [merge_variables(vars_, **kwargs) for vars_ in list(var_dict.values())]", "docstring": "Concatenates Variables along row axis.\n\nArgs:\nvariables (list): List of Variables to merge. Variables can have\ndifferent names (and all Variables that share a name will be\nconcatenated together).\n\nReturns:\nA list of Variables.", "source": "codesearchnet"}
{"code": "def is_supported(cls, desc):\n    for m in cls:\n        if m.matches(desc):\n            return True\n    return False", "docstring": "Determines if the given metric descriptor is supported.\n\nArgs:\ndesc (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricDescriptor`): the\nmetric descriptor to test\n\nReturn:\n`True` if desc is supported, otherwise `False`", "source": "codesearchnet"}
{"code": "def save(self, checkpoint_dir=None):\n    checkpoint_dir = os.path.join((checkpoint_dir or self.logdir), 'checkpoint_{}'.format(self._iteration))\n    if (not os.path.exists(checkpoint_dir)):\n        os.makedirs(checkpoint_dir)\n    checkpoint = self._save(checkpoint_dir)\n    saved_as_dict = False\n    if isinstance(checkpoint, string_types):\n        if ((not checkpoint.startswith(checkpoint_dir)) or (checkpoint == checkpoint_dir)):\n            raise ValueError('The returned checkpoint path must be within the given checkpoint dir {}: {}'.format(checkpoint_dir, checkpoint))\n        if (not os.path.exists(checkpoint)):\n            raise ValueError('The returned checkpoint path does not exist: {}'.format(checkpoint))\n        checkpoint_path = checkpoint\n    elif isinstance(checkpoint, dict):\n        saved_as_dict = True\n        checkpoint_path = os.path.join(checkpoint_dir, 'checkpoint')\n        with open(checkpoint_path, 'wb') as f:\n            pickle.dump(checkpoint, f)\n    else:\n        raise ValueError('`_save` must return a dict or string type: {}'.format(str(type(checkpoint))))\n    with open((checkpoint_path + '.tune_metadata'), 'wb') as f:\n        pickle.dump({'experiment_id': self._experiment_id, 'iteration': self._iteration, 'timesteps_total': self._timesteps_total, 'time_total': self._time_total, 'episodes_total': self._episodes_total, 'saved_as_dict': saved_as_dict}, f)\n    return checkpoint_path", "docstring": "Saves the current model state to a checkpoint.\n\nSubclasses should override ``_save()`` instead to save state.\nThis method dumps additional metadata alongside the saved path.\n\nArgs:\ncheckpoint_dir (str): Optional dir to place the checkpoint.\n\nReturns:\nCheckpoint path that may be passed to restore().", "source": "codesearchnet"}
{"code": "def category(msg):\n    \n\n    if common.typecode(msg) < 1 or common.typecode(msg) > 4:\n        raise RuntimeError(\"%s: Not a identification message\" % msg)\n\n    msgbin = common.hex2bin(msg)\n    return common.bin2int(msgbin[5:8])", "docstring": "Aircraft category number\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string\n\nReturns:\nint: category number", "source": "juraj-google-style"}
{"code": "def update(self, friendly_name=None, description=None):\n    \n    self._get_info()\n\n    if self._info:\n      if friendly_name:\n        self._info['friendlyName'] = friendly_name\n      if description:\n        self._info['description'] = description\n      try:\n        self._api.datasets_update(self._name_parts, self._info)\n      except Exception as e:\n        raise e\n      finally:\n        self._info = None", "docstring": "Selectively updates Dataset information.\n\nArgs:\nfriendly_name: if not None, the new friendly name.\ndescription: if not None, the new description.\n\nReturns:", "source": "juraj-google-style"}
{"code": "def load(self, path):\n        \n        \n        missing_files = self._check_for_missing_files(path)\n\n        if len(missing_files) > 0:\n            raise IOError('Invalid data set of type {}: files {} not found at {}'.format(\n                self.type(), ' '.join(missing_files), path))\n\n        return self._load(path)", "docstring": "Load and return the corpus from the given path.\n\nArgs:\npath (str): Path to the data set to load.\n\nReturns:\nCorpus: The loaded corpus\n\nRaises:\nIOError: When the data set is invalid, for example because required files (annotations, …) are missing.", "source": "juraj-google-style"}
{"code": "def _open_interface(self, conn_id, iface, callback):\n        \n\n        try:\n            context = self.conns.get_context(conn_id)\n        except ArgumentError:\n            callback(conn_id, self.id, False, \"Could not find connection information\")\n            return\n\n        self.conns.begin_operation(conn_id, 'open_interface', callback, self.get_config('default_timeout'))\n\n        topics = context['topics']\n\n        open_iface_message = {'key': context['key'], 'type': 'command', 'operation': 'open_interface', 'client': self.name, 'interface': iface}\n        self.client.publish(topics.action, open_iface_message)", "docstring": "Open an interface on this device\n\nArgs:\nconn_id (int): the unique identifier for the connection\niface (string): the interface name to open\ncallback (callback): Callback to be called when this command finishes\ncallback(conn_id, adapter_id, success, failure_reason)", "source": "juraj-google-style"}
{"code": "def forward(self, hidden_states: torch.FloatTensor, rotary_pos_emb: torch.FloatTensor, attention_mask: torch.LongTensor, position_ids: torch.LongTensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]:\n    residual = hidden_states\n    hidden_states = self.input_rmsnorm(hidden_states)\n    attention_outputs = self.self_attn(hidden_states=hidden_states, rotary_pos_emb=rotary_pos_emb, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions)\n    hidden_states = attention_outputs[0]\n    hidden_states = residual + hidden_states\n    residual = hidden_states\n    hidden_states = self.post_attention_rmsnorm(hidden_states)\n    hidden_states = self.mlp(hidden_states)\n    hidden_states = residual + hidden_states\n    outputs = (hidden_states,)\n    if output_attentions:\n        outputs += (attention_outputs[-1],)\n    return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor` of shape `(batch, seq_len, embed_dim)`):\ninput to the layer.\nrotary_pos_emb (`torch.FloatTensor`):\nrotary position embeddings generated by `ClvpRotaryPositionalEmbedding` module.\nattention_mask (`torch.FloatTensor` of shape `(batch, 1, tgt_len, src_len)`):\nattention mask where padding elements are indicated by very large negative values.\nposition_ids (`torch.LongTensor`):\nDenotes position ids of the input tokens.\noutput_attentions (`bool`, *optional*, defaults to `False`):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def compare_files(path1, path2):\n    diff = difflib.ndiff(open(path1).readlines(), open(path2).readlines())\n    return [x for x in diff if (x[0] in ['-', '+', '?'])]", "docstring": "Returns the delta between two files using -, ?, + format excluding\nlines that are the same\n\nArgs:\npath1 (str): Path to first file\npath2 (str): Path to second file\n\nReturns:\nList[str]: Delta between the two files", "source": "codesearchnet"}
{"code": "def fib_list(n):\n    \n\n    \n    assert n >= 0, 'n must be a positive integer'\n\n    list_results = [0, 1]\n    for i in range(2, n+1):\n        list_results.append(list_results[i-1] + list_results[i-2])\n    return list_results[n]", "docstring": "[summary]\nThis algorithm computes the n-th fibbonacci number\nvery quick. approximate O(n)\nThe algorithm use dynamic programming.\n\nArguments:\nn {[int]} -- [description]\n\nReturns:\n[int] -- [description]", "source": "juraj-google-style"}
{"code": "def ensemble_center(self, site_list, indices, cartesian=True):\n        \n        if cartesian:\n            return np.average([site_list[i].coords for i in indices],\n                              axis=0)\n        else:\n            return np.average([site_list[i].frac_coords for i in indices],\n                              axis=0)", "docstring": "Finds the center of an ensemble of sites selected from\na list of sites.  Helper method for the find_adsorption_sites\nalgorithm.\n\nArgs:\nsite_list (list of sites): list of sites\nindices (list of ints): list of ints from which to select\nsites from site list\ncartesian (bool): whether to get average fractional or\ncartesian coordinate", "source": "juraj-google-style"}
{"code": "def list_nics(access_token, subscription_id):\n    \n    endpoint = ''.join([get_rm_endpoint(),\n                        '/subscriptions/', subscription_id,\n                        '/providers/Microsoft.Network/',\n                        '/networkInterfaces?api-version=', NETWORK_API])\n    return do_get(endpoint, access_token)", "docstring": "List the network interfaces in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body of NICs list with properties.", "source": "juraj-google-style"}
{"code": "def __init__(self, input_ebm: ebm.EnergyInference, input_qnn: qnn.QuantumInference, name: Union[None, str]=None):\n    super().__init__(name=name)\n    self._e_inference = input_ebm\n    self._q_inference = input_qnn\n    self._modular_hamiltonian = hamiltonian.Hamiltonian(self.e_inference.energy, self.q_inference.circuit)", "docstring": "Initializes a QHBM.\n\nArgs:\ninput_ebm: Attends to density operator eigenvalues.\ninput_qnn: Attends to density operator eigenvectors.\nname: Optional name for the model.", "source": "github-repos"}
{"code": "def send_audio_file(self, audio_file, device_state, authentication_headers, dialog_request_id, distance_profile, audio_format):\n    payload = {'context': device_state, 'event': {'header': {'namespace': 'SpeechRecognizer', 'name': 'Recognize', 'messageId': self.generate_message_id(), 'dialogRequestId': dialog_request_id}, 'payload': {'profile': distance_profile, 'format': audio_format}}}\n    multipart_data = MultipartEncoder(fields=[('request', ('request', json.dumps(payload), 'application/json;', {'Content-Disposition': \"form-data; name='request'\"})), ('audio', ('audio', audio_file, 'application/octet-stream', {'Content-Disposition': \"form-data; name='audio'\"}))], boundary='boundary')\n    headers = {**authentication_headers, 'Content-Type': multipart_data.content_type}\n    stream_id = self.connection.request('POST', '/v20160207/events', headers=headers, body=multipart_data)\n    response = self.connection.get_response(stream_id)\n    return self.parse_response(response)", "docstring": "Send audio to AVS\n\nThe file-like object are steaming uploaded for improved latency.\n\nReturns:\nbytes -- wav audio bytes returned from AVS", "source": "codesearchnet"}
{"code": "def FromTrimmedData(byts):\n        \n        block = Block()\n        block.__is_trimmed = True\n        ms = StreamManager.GetStream(byts)\n        reader = BinaryReader(ms)\n\n        block.DeserializeUnsigned(reader)\n        reader.ReadByte()\n        witness = Witness()\n        witness.Deserialize(reader)\n        block.Script = witness\n\n        bc = GetBlockchain()\n        tx_list = []\n        for tx_hash in reader.ReadHashes():\n            tx = bc.GetTransaction(tx_hash)[0]\n            if not tx:\n                raise Exception(\"Could not find transaction!\\n Are you running code against a valid Blockchain instance?\\n Tests that accesses transactions or size of a block but inherit from NeoTestCase instead of BlockchainFixtureTestCase will not work.\")\n            tx_list.append(tx)\n\n        if len(tx_list) < 1:\n            raise Exception(\"Invalid block, no transactions found for block %s \" % block.Index)\n\n        block.Transactions = tx_list\n\n        StreamManager.ReleaseStream(ms)\n\n        return block", "docstring": "Deserialize a block from raw bytes.\n\nArgs:\nbyts:\n\nReturns:\nBlock:", "source": "juraj-google-style"}
{"code": "def encode(self):\n    slot = 0\n    match_op = self.KNOWN_MATCH_NAMES['match_controller']\n    if (not self.controller):\n        slot = self.slot\n        match_op = self.KNOWN_MATCH_NAMES['match_slot']\n    return struct.pack('<B6xB', slot, match_op)", "docstring": "Encode this slot identifier into a binary descriptor.\n\nReturns:\nbytes: The 8-byte encoded slot identifier", "source": "codesearchnet"}
{"code": "def _group_and_publish_tasks_statistics(self, result):\n        \n        for i in result:\n            executor_id = i['executor_id']\n            i['executor_id'] = executor_id[:executor_id.rfind('.')]\n            i['statistics']['instances_count'] = 1\n\n        r = {}\n        for i in result:\n            executor_id = i['executor_id']\n            r[executor_id] = r.get(executor_id, {})\n            r[executor_id]['framework_id'] = i['framework_id']\n            r[executor_id]['statistics'] = r[executor_id].get('statistics', {})\n            r[executor_id]['statistics'] = self._sum_statistics(\n                i['statistics'], r[executor_id]['statistics'])\n\n        self._add_cpu_usage(r)\n        self._add_cpu_percent(r)\n        self._add_mem_percent(r)\n        self._publish(r)", "docstring": "This function group statistics of same tasks by adding them.\nIt also add 'instances_count' statistic to get information about\nhow many instances is running on the server\n\nArgs:\nresult: result of mesos query. List of dictionaries with\n'executor_id', 'framework_id' as a strings and 'statistics'\nas dictionary of labeled numbers", "source": "juraj-google-style"}
{"code": "def assignee(self, assignee_id, action='ADD'):\n        \n        if not self.can_update():\n            self._tcex.handle_error(910, [self.type])\n\n        return self.tc_requests.assignee(\n            self.api_type, self.api_sub_type, self.unique_id, assignee_id, action=action\n        )", "docstring": "Adds a assignee to the task\n\nArgs:\nassignee_id: The id of the assignee to be added\naction:", "source": "juraj-google-style"}
{"code": "def serialize_to_normalized_pretty_json(py_obj):\n    \n    return json.dumps(py_obj, sort_keys=True, indent=2, cls=ToJsonCompatibleTypes)", "docstring": "Serialize a native object to normalized, pretty printed JSON.\n\nThe JSON string is normalized by sorting any dictionary keys.\n\nArgs:\npy_obj: object\nAny object that can be represented in JSON. Some types, such as datetimes are\nautomatically converted to strings.\n\nReturns:\nstr: normalized, pretty printed JSON string.", "source": "juraj-google-style"}
{"code": "def GetMessages(self, formatter_mediator, event):\n    \n    if self.DATA_TYPE != event.data_type:\n      raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n          event.data_type))\n\n    event_values = event.CopyToDict()\n\n    visit_type = event_values.get('visit_type', 0)\n    transition = self._URL_TRANSITIONS.get(visit_type, None)\n    if transition:\n      transition_str = 'Transition: {0!s}'.format(transition)\n\n    extra = event_values.get('extra', None)\n    if extra:\n      if transition:\n        extra.append(transition_str)\n      event_values['extra_string'] = ' '.join(extra)\n\n    elif transition:\n      event_values['extra_string'] = transition_str\n\n    return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"}
{"code": "def psnr(x1, x2, max_val):\n    if any_symbolic_tensors((x1, x2)):\n        return PSNR(max_val).symbolic_call(x1, x2)\n    return backend.nn.psnr(x1, x2, max_val)", "docstring": "Peak Signal-to-Noise Ratio (PSNR) function.\n\nThis function computes the Peak Signal-to-Noise Ratio between two signals,\n`x1` and `x2`. PSNR is a measure of the quality of a reconstructed signal.\nThe higher the PSNR, the closer the reconstructed signal is to the original\nsignal. Note that it can become negative when the signal power is\nsmaller that the noise power.\n\nArgs:\nx1: The first input signal.\nx2: The second input signal. Must have the same shape as `x1`.\nmax_val: The maximum possible value in the signals.\n\nReturns:\nfloat: The PSNR value between `x1` and `x2`.\n\nExamples:\n\n>>> x1 = keras.random.normal((2, 4, 4, 3))\n>>> x2 = keras.random.normal((2, 4, 4, 3))\n>>> max_val = 1.0\n>>> keras.ops.nn.psnr(x1, x2, max_val)\n-3.1697404", "source": "github-repos"}
{"code": "def energy_upperbound(self, spins):\n        \n        subtheta = self.theta.copy()\n        subtheta.fix_variables(spins)\n\n        \n        trees = self._trees\n\n        if not trees:\n            \n            \n            assert not subtheta.linear and not subtheta.quadratic\n            return subtheta.offset\n\n        energy = Plus(self.message_upperbound(trees, {}, subtheta), subtheta.offset)\n\n        return energy", "docstring": "A formula for an upper bound on the energy of Theta with spins fixed.\n\nArgs:\nspins (dict): Spin values for a subset of the variables in Theta.\n\nReturns:\nFormula that upper bounds the energy with spins fixed.", "source": "juraj-google-style"}
{"code": "def __init__(self, tpu_cluster_resolver=None, steps_per_run=None, device_assignment=None):\n    super().__init__(TPUExtended(self, tpu_cluster_resolver, steps_per_run, device_assignment))\n    distribute_lib.distribution_strategy_gauge.get_cell('V1').set('TPUStrategy')\n    distribute_lib.distribution_strategy_replica_gauge.get_cell('num_workers').set(self.extended.num_hosts)\n    distribute_lib.distribution_strategy_replica_gauge.get_cell('num_replicas_per_worker').set(self.extended.num_replicas_per_host)\n    self._enable_packed_variable_in_eager_mode = True", "docstring": "Initializes the TPUStrategy object.\n\nArgs:\ntpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,\nwhich provides information about the TPU cluster.\nsteps_per_run: Number of steps to run on device before returning to the\nhost. Note that this can have side-effects on performance, hooks,\nmetrics, summaries etc.\nThis parameter is only used when Distribution Strategy is used with\nKeras.\ndevice_assignment: Optional `tf.tpu.experimental.DeviceAssignment` to\nspecify the placement of replicas on the TPU cluster. Currently only\nsupports the usecase of using a single core within a TPU cluster.", "source": "github-repos"}
{"code": "def unregister(self, alias):\n        \n        if alias not in self._service_objects:\n            raise Error(self._device,\n                        'No service is registered with alias \"%s\".' % alias)\n        service_obj = self._service_objects.pop(alias)\n        if service_obj.is_alive:\n            with expects.expect_no_raises(\n                    'Failed to stop service instance \"%s\".' % alias):\n                service_obj.stop()", "docstring": "Unregisters a service instance.\n\nStops a service and removes it from the manager.\n\nArgs:\nalias: string, the alias of the service instance to unregister.", "source": "juraj-google-style"}
{"code": "def from_string(string):\n        \n        lines = list(clean_lines(string.splitlines()))\n\n        def input_mode(line):\n            if line[0] == \"&\":\n                return (\"sections\", line[1:].lower())\n            elif \"ATOMIC_SPECIES\" in line:\n                return (\"pseudo\", )\n            elif \"K_POINTS\" in line:\n                return (\"kpoints\", line.split(\"{\")[1][:-1])\n            elif \"CELL_PARAMETERS\" in line or \"ATOMIC_POSITIONS\" in line:\n                return (\"structure\", line.split(\"{\")[1][:-1])\n            elif line == \"/\":\n                return None\n            else:\n                return mode\n\n        sections = {\"control\": {}, \"system\": {}, \"electrons\": {}, \n                    \"ions\": {}, \"cell\":{}}\n        pseudo = {}\n        pseudo_index = 0\n        lattice = []\n        species = []\n        coords = []\n        structure = None\n        site_properties = {\"pseudo\":[]}\n        mode = None\n        for line in lines:\n            mode = input_mode(line)\n            if mode == None:\n                pass\n            elif mode[0] == \"sections\":\n                section = mode[1]\n                m = re.match(r'(\\w+)\\(?(\\d*?)\\)?\\s*=\\s*(.*)', line)\n                if m:\n                    key = m.group(1).strip()\n                    key_ = m.group(2).strip()\n                    val = m.group(3).strip()\n                    if key_ != \"\":\n                        if sections[section].get(key, None) == None:\n                            val_ = [0.0]*20 \n                            val_[int(key_)-1] = PWInput.proc_val(key, val)\n                            sections[section][key] = val_\n\n                            site_properties[key] = []\n                        else:\n                            sections[section][key][int(key_)-1] = PWInput.proc_val(key, val) \n                    else:\n                        sections[section][key] = PWInput.proc_val(key, val)\n\n            elif mode[0] == \"pseudo\":\n                m = re.match(r'(\\w+)\\s+(\\d*.\\d*)\\s+(.*)', line)\n                if m:\n                    pseudo[m.group(1).strip()] = {}\n                    pseudo[m.group(1).strip()][\"index\"] = pseudo_index\n                    pseudo[m.group(1).strip()][\"pseudopot\"] = m.group(3).strip()\n                    pseudo_index += 1\n            elif mode[0] == \"kpoints\":\n                m = re.match(r'(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)', line)\n                if m:\n                    kpoints_grid = (int(m.group(1)), int(m.group(2)), int(m.group(3)))\n                    kpoints_shift = (int(m.group(4)), int(m.group(5)), int(m.group(6)))\n                else:\n                    kpoints_mode = mode[1]\n            elif mode[0] == \"structure\":\n                m_l = re.match(r'(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)', line)\n                m_p = re.match(r'(\\w+)\\s+(-?\\d+\\.\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)', line)\n                if m_l:\n                    lattice += [ float(m_l.group(1)), float(m_l.group(2)), float(m_l.group(3)) ]\n                elif m_p:\n                    site_properties[\"pseudo\"].append(pseudo[m_p.group(1)][\"pseudopot\"])\n                    species += [pseudo[m_p.group(1)][\"pseudopot\"].split(\".\")[0]]\n                    coords += [[float(m_p.group(2)), float(m_p.group(3)), float(m_p.group(4))]]\n\n                    for k, v in site_properties.items():\n                        if k != \"pseudo\":\n                            site_properties[k].append(sections['system'][k][pseudo[m_p.group(1)][\"index\"]])\n                if mode[1] == \"angstrom\":\n                    coords_are_cartesian = True\n                elif mode[1] == \"crystal\":\n                    coords_are_cartesian = False\n\n        structure = Structure(Lattice(lattice), species, coords, \n                              coords_are_cartesian=coords_are_cartesian,\n                              site_properties=site_properties)\n        return PWInput(structure=structure, control=sections[\"control\"],\n                       system=sections[\"system\"], electrons=sections[\"electrons\"], \n                       ions=sections[\"ions\"], cell=sections[\"cell\"], kpoints_mode=kpoints_mode,\n                       kpoints_grid=kpoints_grid, kpoints_shift=kpoints_shift)", "docstring": "Reads an PWInput object from a string.\n\nArgs:\nstring (str): PWInput string\n\nReturns:\nPWInput object", "source": "juraj-google-style"}
{"code": "def _apply_with_plugs(self, subplugs, error_on_unknown):\n    plugs_by_name = {plug.name: plug for plug in self.plugs}\n    new_plugs = dict(plugs_by_name)\n    for (name, sub_class) in six.iteritems(subplugs):\n        original_plug = plugs_by_name.get(name)\n        accept_substitute = True\n        if (original_plug is None):\n            if (not error_on_unknown):\n                continue\n            accept_substitute = False\n        elif isinstance(original_plug.cls, openhtf.plugs.PlugPlaceholder):\n            accept_substitute = issubclass(sub_class, original_plug.cls.base_class)\n        else:\n            accept_substitute = (('auto_placeholder' in original_plug.cls.__dict__) and original_plug.cls.auto_placeholder and issubclass(sub_class, original_plug.cls))\n        if (not accept_substitute):\n            raise openhtf.plugs.InvalidPlugError(('Could not find valid placeholder for substitute plug %s required for phase %s' % (name, self.name)))\n        new_plugs[name] = mutablerecords.CopyRecord(original_plug, cls=sub_class)\n    return mutablerecords.CopyRecord(self, plugs=list(new_plugs.values()), options=self.options.format_strings(**subplugs), measurements=[m.with_args(**subplugs) for m in self.measurements])", "docstring": "Substitute plugs for placeholders for this phase.\n\nArgs:\nsubplugs: dict of plug name to plug class, plug classes to replace.\nerror_on_unknown: bool, if True, then error when an unknown plug name is\nprovided.\n\nRaises:\nopenhtf.plugs.InvalidPlugError if for one of the plug names one of the\nfollowing is true:\n- error_on_unknown is True and the plug name is not registered.\n- The new plug subclass is not a subclass of the original.\n- The original plug class is not a placeholder or automatic placeholder.\n\nReturns:\nPhaseDescriptor with updated plugs.", "source": "codesearchnet"}
{"code": "def calculate_checksum_on_iterator(\n    itr, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM\n):\n    \n    checksum_calc = get_checksum_calculator_by_dataone_designator(algorithm)\n    for chunk in itr:\n        checksum_calc.update(chunk)\n    return checksum_calc.hexdigest()", "docstring": "Calculate the checksum of an iterator.\n\nArgs:\nitr: iterable\nObject which supports the iterator protocol.\n\nalgorithm: str\nChecksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.\n\nReturns:\nstr : Checksum as a hexadecimal string, with length decided by the algorithm.", "source": "juraj-google-style"}
{"code": "def apply_grads(self, grads, variables):\n    \n    ops = []\n    for grad, var in zip(grads, variables):\n      ops.extend(self.apply_grad(grad, var))\n    if not ops:\n      return ops\n    return variables[0].graph.combine_assignments(ops)", "docstring": "Apply gradients to variables.\n\nCall this function externally instead of apply_grad().  This causes the\noperations to be combined, which is necessary for stacking variables\nsee mtf.rewrite_stack_variables().\n\nArgs:\ngrads: a list of Tensor\nvariables: a list of Variables\nReturns:\na list of Operations", "source": "juraj-google-style"}
{"code": "def delete_merged_branches(self, **kwargs):\n    path = ('/projects/%s/repository/merged_branches' % self.get_id())\n    self.manager.gitlab.http_delete(path, **kwargs)", "docstring": "Delete merged branches.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabDeleteError: If the server failed to perform the request", "source": "codesearchnet"}
{"code": "def add_datasets(self, datasets, datasets_to_check=None):\n        \n        \n        if datasets_to_check is None:\n            datasets_to_check = self.get_datasets()\n        alldatasetsadded = True\n        for dataset in datasets:\n            if not self.add_dataset(dataset, datasets_to_check=datasets_to_check):\n                alldatasetsadded = False\n        return alldatasetsadded", "docstring": "Add multiple datasets\n\nArgs:\ndatasets (List[Union[Dataset,Dict,str]]): A list of either dataset ids or dataset metadata from Dataset objects or dictionaries\ndatasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase.\n\nReturns:\nbool: True if all datasets added or False if any already present", "source": "juraj-google-style"}
{"code": "def render_layout_form(form, layout=None, **kwargs):\n\t\n\tdef make_component(type_, *args):\n\t\t\n\t\tif type_ == \"Text\":\n\t\t\treturn \"\".join(args)\n\n\t\telif type_ == \"Field\":\n\t\t\tresult = \"\"\n\t\t\tfor c in args:\n\t\t\t\tif isinstance(c, tuple):\n\t\t\t\t\tresult += make_component(*c)\n\t\t\t\telif isinstance(c, str):\n\t\t\t\t\tresult += render_field(form.__getitem__(c), **kwargs)\n\t\t\treturn result\n\t\telse:\n\t\t\tif len(args) < 2:\n\t\t\t\treturn \"\"\n\n\t\t\tresult = \"\".join([make_component(*c) for c in args])\n\t\t\tif type_:\n\t\t\t\treturn \"<div class=\\\"%s\\\">%s</div>\" % (type_.lower(), result)\n\t\t\telse:\n\t\t\t\treturn result \n\n\treturn mark_safe(\"\".join([make_component(*component) for component in layout]))", "docstring": "Render an entire form with Semantic UI wrappers for each field with\na layout provided in the template or in the form class\n\nArgs:\nform (form): Django Form\nlayout (tuple): layout design\nkwargs (dict): other attributes will be passed to fields\n\nReturns:\nstring: HTML of Django Form fields with Semantic UI wrappers", "source": "juraj-google-style"}
{"code": "def from_string(key_pem, is_x509_cert):\n        \n        if is_x509_cert:\n            key_pem = _helpers._to_bytes(key_pem)\n            pemLines = key_pem.replace(b' ', b'').split()\n            certDer = _helpers._urlsafe_b64decode(b''.join(pemLines[1:-1]))\n            certSeq = DerSequence()\n            certSeq.decode(certDer)\n            tbsSeq = DerSequence()\n            tbsSeq.decode(certSeq[0])\n            pubkey = RSA.importKey(tbsSeq[6])\n        else:\n            pubkey = RSA.importKey(key_pem)\n        return PyCryptoVerifier(pubkey)", "docstring": "Construct a Verified instance from a string.\n\nArgs:\nkey_pem: string, public key in PEM format.\nis_x509_cert: bool, True if key_pem is an X509 cert, otherwise it\nis expected to be an RSA key in PEM format.\n\nReturns:\nVerifier instance.", "source": "juraj-google-style"}
{"code": "def construct_error_message(driver_id, error_type, message, timestamp):\n    builder = flatbuffers.Builder(0)\n    driver_offset = builder.CreateString(driver_id.binary())\n    error_type_offset = builder.CreateString(error_type)\n    message_offset = builder.CreateString(message)\n    ray.core.generated.ErrorTableData.ErrorTableDataStart(builder)\n    ray.core.generated.ErrorTableData.ErrorTableDataAddDriverId(builder, driver_offset)\n    ray.core.generated.ErrorTableData.ErrorTableDataAddType(builder, error_type_offset)\n    ray.core.generated.ErrorTableData.ErrorTableDataAddErrorMessage(builder, message_offset)\n    ray.core.generated.ErrorTableData.ErrorTableDataAddTimestamp(builder, timestamp)\n    error_data_offset = ray.core.generated.ErrorTableData.ErrorTableDataEnd(builder)\n    builder.Finish(error_data_offset)\n    return bytes(builder.Output())", "docstring": "Construct a serialized ErrorTableData object.\n\nArgs:\ndriver_id: The ID of the driver that the error should go to. If this is\nnil, then the error will go to all drivers.\nerror_type: The type of the error.\nmessage: The error message.\ntimestamp: The time of the error.\n\nReturns:\nThe serialized object.", "source": "codesearchnet"}
{"code": "def __init__(self, max_edit_distance=0, match_threshold=0.0):\n        \n        self.root = TrieNode('root')\n        self.max_edit_distance = max_edit_distance\n        self.match_threshold = match_threshold", "docstring": "Init the Trie object and create root node.\n\nCreates an Trie object with a root node with the passed in\nmax_edit_distance and match_threshold.\n\nArgs:\nmax_edit_distance(int): ?\nmatch_threshold(int): ?\n\nNotes:\nThis never seems to get called with max_edit_distance or match_threshold", "source": "juraj-google-style"}
{"code": "def InteractiveShell(self, cmd=None, strip_cmd=True, delim=None, strip_delim=True):\n    conn = self._get_service_connection(b'shell:')\n    return self.protocol_handler.InteractiveShellCommand(conn, cmd=cmd, strip_cmd=strip_cmd, delim=delim, strip_delim=strip_delim)", "docstring": "Get stdout from the currently open interactive shell and optionally run a command\non the device, returning all output.\n\nArgs:\ncmd: Optional. Command to run on the target.\nstrip_cmd: Optional (default True). Strip command name from stdout.\ndelim: Optional. Delimiter to look for in the output to know when to stop expecting more output\n(usually the shell prompt)\nstrip_delim: Optional (default True): Strip the provided delimiter from the output\n\nReturns:\nThe stdout from the shell command.", "source": "codesearchnet"}
{"code": "def bind_to_uniform_block(self, binding=0, *, offset=0, size=-1) -> None:\n        \n\n        self.mglo.bind_to_uniform_block(binding, offset, size)", "docstring": "Bind the buffer to a uniform block.\n\nArgs:\nbinding (int): The uniform block binding.\n\nKeyword Args:\noffset (int): The offset.\nsize (int): The size. Value ``-1`` means all.", "source": "juraj-google-style"}
{"code": "def get_cols_to_keep(gctoo, cid=None, col_bool=None, cidx=None, exclude_cid=None):\n    \n\n    \n    if cid is not None:\n        assert type(cid) == list, \"cid must be a list. cid: {}\".format(cid)\n\n        cols_to_keep = [gctoo_col for gctoo_col in gctoo.data_df.columns if gctoo_col in cid]\n\n        \n        num_missing_cids = len(cid) - len(cols_to_keep)\n        if num_missing_cids != 0:\n            logger.info(\"{} cids were not found in the GCT.\".format(num_missing_cids))\n\n    \n    elif col_bool is not None:\n\n        assert len(col_bool) == gctoo.data_df.shape[1], (\n            \"col_bool must have length equal to gctoo.data_df.shape[1]. \" +\n            \"len(col_bool): {}, gctoo.data_df.shape[1]: {}\".format(\n                len(col_bool), gctoo.data_df.shape[1]))\n        cols_to_keep = gctoo.data_df.columns[col_bool].values\n\n    \n    elif cidx is not None:\n\n        assert type(cidx[0]) is int, (\n            \"cidx must be a list of integers. cidx[0]: {}, \" +\n            \"type(cidx[0]): {}\").format(cidx[0], type(cidx[0]))\n\n        assert max(cidx) <= gctoo.data_df.shape[1], (\n            \"cidx contains an integer larger than the number of columns in \" +\n            \"the GCToo. max(cidx): {}, gctoo.data_df.shape[1]: {}\").format(\n                max(cidx), gctoo.data_df.shape[1])\n\n        cols_to_keep = gctoo.data_df.columns[cidx].values\n\n    \n    else:\n        cols_to_keep = gctoo.data_df.columns.values\n\n    \n    if exclude_cid is not None:\n\n        \n        cols_to_keep = [col_to_keep for col_to_keep in cols_to_keep if col_to_keep not in exclude_cid]\n\n    return cols_to_keep", "docstring": "Figure out based on the possible columns inputs which columns to keep.\n\nArgs:\ngctoo (GCToo object):\ncid (list of strings):\ncol_bool (boolean array):\ncidx (list of integers):\nexclude_cid (list of strings):\n\nReturns:\ncols_to_keep (list of strings): col ids to be kept", "source": "juraj-google-style"}
{"code": "def calcPF(pf):\n    pf_y = pf[:1]\n    pf_x = pf[1:]\n    result = 100\n    if (pf_y == CosTheta.CapacitiveLead):\n        result = (200 - int(pf_x))\n    elif (pf_y == CosTheta.InductiveLag):\n        result = int(pf_x)\n    return result", "docstring": "Simple wrap to calc legacy PF value\n\nArgs:\npf: meter power factor reading\n\nReturns:\nint: legacy push pf", "source": "codesearchnet"}
{"code": "def get_fail_graph(self, failure_index=None):\n    (phase, _) = self._get_failed_phase(failure_index)\n    return phase.get_graph()", "docstring": "Returns a graph showing a solve failure.\n\nArgs:\nfailure_index: See `failure_reason`\n\nReturns:\nA pygraph.digraph object.", "source": "codesearchnet"}
{"code": "def preprocess(self, raw_inputs):\n        \n        image_arrays = []\n        for raw_im in raw_inputs:\n            im = raw_im.convert('L')\n            im = im.resize(MNIST_DIM, Image.ANTIALIAS)\n            arr = np.array(im)\n            image_arrays.append(arr)\n\n        inputs = np.array(image_arrays)\n        return inputs.reshape(len(inputs),\n                              MNIST_DIM[0],\n                              MNIST_DIM[1], 1).astype('float32') / 255", "docstring": "Convert images into the format required by our model.\n\nOur model requires that inputs be grayscale (mode 'L'), be resized to\n`MNIST_DIM`, and be represented as float32 numpy arrays in range\n[0, 1].\n\nArgs:\nraw_inputs (list of Images): a list of PIL Image objects\n\nReturns:\narray (float32): num images * height * width * num channels", "source": "juraj-google-style"}
{"code": "def _to_boolean(operand: List[WorkSpaceMessage]) -> Optional[bool]:\n    if not operand:\n        return None\n    if len(operand) > 1:\n        raise ValueError('Expected a single boolean result but got multiple items.')\n    if not fhir_types.is_boolean(operand[0].message):\n        raise ValueError('Expected a boolean but got a non-boolean value.')\n    return proto_utils.get_value_at_field(operand[0].message, 'value')", "docstring": "Converts an evaluation result to a boolean value or None.\n\nArgs:\noperand: an expression operand result to convert to boolean.\n\nReturns:\nthe boolean value, or None if the operand was empty.\n\nRaises:\nValueError if it is not an empty result or a single, boolean value.", "source": "github-repos"}
{"code": "def get_version(tool_name, tool_command):\n    result = {}\n    for line in Bash(ShellConfig(script=tool_command, internal=True)).process():\n        if (line.find('command not found') >= 0):\n            VersionsCheck.LOGGER.error(\"Required tool '%s' not found (stopping pipeline)!\", tool_name)\n            sys.exit(1)\n        else:\n            version = list(re.findall('(\\\\d+(\\\\.\\\\d+)+)+', line))[0][0]\n            result = {tool_name: Version(str(version))}\n        break\n    return result", "docstring": "Get name and version of a tool defined by given command.\n\nArgs:\ntool_name (str): name of the tool.\ntool_command (str): Bash one line command to get the version of the tool.\n\nReturns:\ndict: tool name and version or empty when no line has been found", "source": "codesearchnet"}
{"code": "def create_from_json(cls, json_data):\n    block = Block()\n    block_info = json_data['block_info']\n    block.block_id = block_info['block_id']\n    block.num_bins = (block_info['num_bins'] if ('num_bins' in block_info) else None)\n    block.property_type = (block_info['property_type'] if ('property_type' in block_info) else None)\n    block.meta = (json_data['meta'] if ('meta' in json_data) else None)\n    block.component_results = _create_component_results(json_data, 'block_info')\n    return block", "docstring": "Deserialize block json data into a Block object\n\nArgs:\njson_data (dict): The json data for this block\n\nReturns:\nBlock object", "source": "codesearchnet"}
{"code": "def get_hostname_prefix():\n    parts = []\n    version = modules.get_current_version_name()\n    default_version = modules.get_default_version()\n    if (version != default_version):\n        parts.append(version)\n    module = modules.get_current_module_name()\n    if (module != 'default'):\n        parts.append(module)\n    if parts:\n        parts.append('')\n    return '-dot-'.join(parts)", "docstring": "Returns the hostname prefix of a running Endpoints service.\n\nThe prefix is the portion of the hostname that comes before the API name.\nFor example, if a non-default version and a non-default service are in use,\nthe returned result would be '{VERSION}-dot-{SERVICE}-'.\n\nReturns:\nstr, the hostname prefix.", "source": "codesearchnet"}
{"code": "def dtype(self) -> torch.dtype:\n    if self._rot_mats is not None:\n        return self._rot_mats.dtype\n    elif self._quats is not None:\n        return self._quats.dtype\n    else:\n        raise ValueError('Both rotations are None')", "docstring": "Returns the dtype of the underlying rotation.\n\nReturns:\nThe dtype of the underlying rotation", "source": "github-repos"}
{"code": "def marginalize_out(node_indices, tpm):\n    return (tpm.sum(tuple(node_indices), keepdims=True) / np.array(tpm.shape)[list(node_indices)].prod())", "docstring": "Marginalize out nodes from a TPM.\n\nArgs:\nnode_indices (list[int]): The indices of nodes to be marginalized out.\ntpm (np.ndarray): The TPM to marginalize the node out of.\n\nReturns:\nnp.ndarray: A TPM with the same number of dimensions, with the nodes\nmarginalized out.", "source": "codesearchnet"}
{"code": "def symmetry_reduce(tensors, structure, tol=1e-08, **kwargs):\n    sga = SpacegroupAnalyzer(structure, **kwargs)\n    symmops = sga.get_symmetry_operations(cartesian=True)\n    unique_mapping = TensorMapping([tensors[0]], [[]], tol=tol)\n    for tensor in tensors[1:]:\n        is_unique = True\n        for (unique_tensor, symmop) in itertools.product(unique_mapping, symmops):\n            if np.allclose(unique_tensor.transform(symmop), tensor, atol=tol):\n                unique_mapping[unique_tensor].append(symmop)\n                is_unique = False\n                break\n        if is_unique:\n            unique_mapping[tensor] = []\n    return unique_mapping", "docstring": "Function that converts a list of tensors corresponding to a structure\nand returns a dictionary consisting of unique tensor keys with symmop\nvalues corresponding to transformations that will result in derivative\ntensors from the original list\n\nArgs:\ntensors (list of tensors): list of Tensor objects to test for\nsymmetrically-equivalent duplicates\nstructure (Structure): structure from which to get symmetry\ntol (float): tolerance for tensor equivalence\nkwargs: keyword arguments for the SpacegroupAnalyzer\n\nreturns:\ndictionary consisting of unique tensors with symmetry operations\ncorresponding to those which will reconstruct the remaining\ntensors as values", "source": "codesearchnet"}
{"code": "def set_bpduguard(self, name, value=False, default=False, disable=False):\n    value = ('enable' if value else 'disable')\n    string = 'spanning-tree bpduguard'\n    cmds = self.command_builder(string, value=value, default=default, disable=disable)\n    return self.configure_interface(name, cmds)", "docstring": "Configures the bpduguard value for the specified interface\n\nArgs:\nname (string): The interface identifier to configure.  The name\nmust be the full interface name (eg Ethernet1, not Et1)\n\nvalue (bool): True if bpduguard is enabled otherwise False\n\ndefault (bool): Configures the bpduguard parameter to its default\nvalue using the EOS CLI default config command\n\ndisable (bool): Negates the bpduguard parameter using the EOS\nCLI no config command\n\nReturns:\nTrue if the command succeeds, otherwise False\n\nRaises:\nValueError: Rasied if an invalid interface name is specified\n\nTypeError: Raised if the value keyword argument does not evaluate\nto a valid boolean", "source": "codesearchnet"}
{"code": "def remove_triple(self, p, o, auto_refresh=True):\n    self.rdf.graph.remove((self.uri, p, self._handle_object(o)))\n    self._handle_triple_refresh(auto_refresh)", "docstring": "remove triple by supplying p,o\n\nArgs:\np (rdflib.term.URIRef): predicate\no (): object\nauto_refresh (bool): whether or not to update object-like self.rdf.triples\n\nReturns:\nNone: removes triple from self.rdf.graph", "source": "codesearchnet"}
{"code": "def __init__(self, xid=None, role=None, generation_id=None):\n        \n        super().__init__(xid)\n        self.role = role\n        self.generation_id = generation_id", "docstring": "Create a RoleBaseMessage with the optional parameters below.\n\nArgs:\nxid (int): OpenFlow xid to the header.\nrole (:class:`~.controller2switch.common.ControllerRole`): .\ngeneration_id (int): Master Election Generation Id.", "source": "juraj-google-style"}
{"code": "def create_forwarding_information_base(self, timeout=(- 1)):\n    uri = '{}{}'.format(self.data['uri'], self.FORWARDING_INFORMATION_PATH)\n    return self._helper.do_post(uri, None, timeout, None)", "docstring": "Generates the forwarding information base dump file for a logical interconnect.\n\nArgs:\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation in\nOneView, just stops waiting for its completion.\n\nReturns: Interconnect Forwarding Information Base DataInfo.", "source": "codesearchnet"}
{"code": "def delete(self, teamId):\n    check_type(teamId, basestring, may_be_none=False)\n    self._session.delete(((API_ENDPOINT + '/') + teamId))", "docstring": "Delete a team.\n\nArgs:\nteamId(basestring): The ID of the team to be deleted.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"}
{"code": "def __init__(self, name, default_name=None, values=None) -> None:\n    self._name_scope = name_scope(name, default_name, values, skip_on_eager=False)\n    self._name = default_name if name is None else name", "docstring": "Initialize the context manager.\n\nArgs:\nname: The name argument that is passed to the op function.\ndefault_name: The default name to use if the `name` argument is `None`.\nvalues: The list of `Tensor` arguments that are passed to the op function.\n\nRaises:\nTypeError: if `default_name` is passed in but not a string.", "source": "github-repos"}
{"code": "def get_messages(self):\n    uri = '{}/messages'.format(self.data['uri'])\n    return self._helper.do_get(uri)", "docstring": "Retrieves the error or status messages associated with the specified profile.\n\nReturns:\ndict: Server Profile Health.", "source": "codesearchnet"}
{"code": "def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs):\n    data_format = backend.standardize_data_format(data_format)\n    img = array_to_img(x, data_format=data_format, scale=scale)\n    if img.mode == 'RGBA' and (file_format == 'jpg' or file_format == 'jpeg'):\n        warnings.warn('The JPG format does not support RGBA images, converting to RGB.')\n        img = img.convert('RGB')\n    img.save(path, format=file_format, **kwargs)", "docstring": "Saves an image stored as a NumPy array to a path or file object.\n\nArgs:\npath: Path or file object.\nx: NumPy array.\ndata_format: Image data format, either `\"channels_first\"` or\n`\"channels_last\"`.\nfile_format: Optional file format override. If omitted, the format to\nuse is determined from the filename extension. If a file object was\nused instead of a filename, this parameter should always be used.\nscale: Whether to rescale image values to be within `[0, 255]`.\n**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.", "source": "github-repos"}
{"code": "def properties(cls, with_bases=True):\n    if with_bases:\n        return accumulate_from_superclasses(cls, '__properties__')\n    else:\n        return set(cls.__properties__)", "docstring": "Collect the names of properties on this class.\n\nThis method *optionally* traverses the class hierarchy and includes\nproperties defined on any parent classes.\n\nArgs:\nwith_bases (bool, optional) :\nWhether to include properties defined on parent classes in\nthe results. (default: True)\n\nReturns:\nset[str] : property names", "source": "codesearchnet"}
{"code": "def dump(self):\n    out = []\n    out.append(self.filetype)\n    out.append('Format: {}'.format(self.version))\n    out.append('Type: ASCII')\n    out.append('')\n    for cmd in self.commands:\n        out.append(self.encode(cmd))\n    return ('\\n'.join(out) + '\\n')", "docstring": "Dump all commands in this object to a string.\n\nReturns:\nstr: An encoded list of commands separated by\n\\n characters suitable for saving to a file.", "source": "codesearchnet"}
{"code": "def data(self):\n        \n        if self._data_type == int:\n            if self._pb.HasField(\"int64_data\"):\n                return self._pb.int64_data\n            if self._pb.HasField(\"int32_data\"):\n                return self._pb.int32_data\n            if self._pb.HasField(\"uint64_data\"):\n                return self._pb.uint64_data\n            if self._pb.HasField(\"uint32_data\"):\n                return self._pb.uint32_data\n        elif self._data_type == float:\n            if self._pb.HasField(\"float32_data\"):\n                return self._pb.float32_data\n            if self._pb.HasField(\"float64_data\"):\n                return self._pb.float64_data\n        elif self._data_type == str:\n            return self._pb.string_data\n        elif self._data_type == bool:\n            return self._pb.bool_data\n        elif self._data_type == bytes:\n            return self._pb.bytes_data\n        return None", "docstring": "Metric data\n\nArgs:\nvalue (:obj:`bool` or :obj:`int` or :obj:`long` or :obj:`float`\nor :obj:`basestring` or :obj:`bytes`)\n\nReturns:\nvalue\n\nRaises:\n:obj:`TypeError`", "source": "juraj-google-style"}
{"code": "def extract_bundle(self, resource, timeout=(- 1)):\n    return self._client.update(resource, timeout=timeout, custom_headers={'Content-Type': 'text/plain'})", "docstring": "Extracts the existing bundle on the appliance and creates all the artifacts.\n\nArgs:\nresource (dict): Artifact Bundle to extract.\ntimeout:\nTimeout in seconds. Waits for task completion by default. The timeout does not abort the operation in\nOneView, it just stops waiting for its completion.\n\nReturns:\ndict: The Artifact Bundle.", "source": "codesearchnet"}
{"code": "def _validate(self, obj):\n        \n        report = ValidationReport()\n        if not self.validator.is_valid(obj):\n            for v in self.validator.iter_errors(obj):\n                report.add_error(\"[%s] %s\" % ('.'.join(str(vv) for vv in v.path), v.message))\n        return report", "docstring": "Do the actual validation\n\nArguments:\nobj (dict): object to validate\n\nReturns: ValidationReport", "source": "juraj-google-style"}
{"code": "def without_document_lock(func):\n\n    @wraps(func)\n    def wrapper(*args, **kw):\n        return func(*args, **kw)\n    wrapper.nolock = True\n    return wrapper", "docstring": "Wrap a callback function to execute without first obtaining the\ndocument lock.\n\nArgs:\nfunc (callable) : The function to wrap\n\nReturns:\ncallable : a function wrapped to execute without a |Document| lock.\n\nWhile inside an unlocked callback, it is completely *unsafe* to modify\n``curdoc()``. The value of ``curdoc()`` inside the callback will be a\nspecially wrapped version of |Document| that only allows safe operations,\nwhich are:\n\n* :func:`~bokeh.document.Document.add_next_tick_callback`\n* :func:`~bokeh.document.Document.remove_next_tick_callback`\n\nOnly these may be used safely without taking the document lock. To make\nother changes to the document, you must add a next tick callback and make\nyour changes to ``curdoc()`` from that second callback.\n\nAttempts to otherwise access or change the Document will result in an\nexception being raised.", "source": "codesearchnet"}
{"code": "def get_service_account_email(self, project=None):\n    if (project is None):\n        project = self.project\n    path = ('/projects/%s/serviceAccount' % (project,))\n    api_response = self._connection.api_request(method='GET', path=path)\n    return api_response['email']", "docstring": "Get the email address of the project's BigQuery service account\n\nNote:\nThis is the service account that BigQuery uses to manage tables\nencrypted by a key in KMS.\n\nArgs:\nproject (str, optional):\nProject ID to use for retreiving service account email.\nDefaults to the client's project.\n\nReturns:\nstr: service account email address\n\nExample:\n\n>>> from google.cloud import bigquery\n>>> client = bigquery.Client()\n>>> client.get_service_account_email()\nmy_service_account@my-project.iam.gserviceaccount.com", "source": "codesearchnet"}
{"code": "def is_number(s):\n    \n    try:\n        float(s)\n        return True\n    except ValueError:\n        pass\n\n    try:\n        import unicodedata\n        unicodedata.numeric(s)\n        return True\n    except (TypeError, ValueError):\n        pass\n\n    return False", "docstring": "Determines if the input is numeric\n\nArgs:\ns: The value to check.\nReturns:\nbool: ``True`` if the input is numeric, ``False`` otherwise.", "source": "juraj-google-style"}
{"code": "def SetDecodedStreamSize(self, decoded_stream_size):\n    \n    if self._is_open:\n      raise IOError('Already open.')\n\n    if decoded_stream_size < 0:\n      raise ValueError((\n          'Invalid decoded stream size: {0:d} value out of '\n          'bounds.').format(decoded_stream_size))\n\n    self._decoded_stream_size = decoded_stream_size", "docstring": "Sets the decoded stream size.\n\nThis function is used to set the decoded stream size if it can be\ndetermined separately.\n\nArgs:\ndecoded_stream_size (int): size of the decoded stream in bytes.\n\nRaises:\nIOError: if the file-like object is already open.\nOSError: if the file-like object is already open.\nValueError: if the decoded stream size is invalid.", "source": "juraj-google-style"}
{"code": "def str2tuple(str_in):\n    \n    tuple_out = safe_eval(str_in)\n    if not isinstance(tuple_out, tuple):\n        tuple_out = None\n    return tuple_out", "docstring": "Extracts a tuple from a string.\n\nArgs:\nstr_in (string) that contains python tuple\nReturns:\n(dict) or None if no valid tuple was found\nRaises:\n-", "source": "juraj-google-style"}
{"code": "def delete_clinvar_object(self, object_id, object_type, submission_id):\n    LOG.info('Deleting clinvar object %s (%s)', object_id, object_type)\n    result = ''\n    if (object_type == 'variant_data'):\n        self.clinvar_submission_collection.find_one_and_update({'_id': ObjectId(submission_id)}, {'$pull': {'variant_data': object_id}})\n        variant_object = self.clinvar_collection.find_one({'_id': object_id})\n        linking_id = variant_object.get('linking_id')\n        result = self.clinvar_collection.delete_many({'linking_id': linking_id})\n    else:\n        result = self.clinvar_collection.delete_one({'_id': object_id})\n    self.clinvar_submission_collection.find_one_and_update({'_id': ObjectId(submission_id)}, {'$pull': {'case_data': object_id}})\n    updated_submission = self.clinvar_submission_collection.find_one_and_update({'_id': submission_id}, {'$set': {'updated_at': datetime.now()}}, return_document=pymongo.ReturnDocument.AFTER)\n    return updated_submission", "docstring": "Remove a variant object from clinvar database and update the relative submission object\n\nArgs:\nobject_id(str) : the id of an object to remove from clinvar_collection database collection (a variant of a case)\nobject_type(str) : either 'variant_data' or 'case_data'. It's a key in the clinvar_submission object.\nsubmission_id(str): the _id key of a clinvar submission\n\nReturns:\nupdated_submission(obj): an updated clinvar submission", "source": "codesearchnet"}
{"code": "def decoded(self):\n    logging.info('Decoding message: {0}'.format(self.message))\n    self.offset = (self.offset * (- 1))\n    return self.cipher()", "docstring": "Decodes message using Caesar shift cipher\n\nInverse operation of encoding, applies negative offset to Caesar shift\ncipher.\n\nReturns:\nString decoded with cipher.", "source": "codesearchnet"}
{"code": "def send_example_telemetry(example_name, *example_args, framework='pytorch'):\n    if is_offline_mode():\n        return\n    data = {'example': example_name, 'framework': framework}\n    for args in example_args:\n        args_as_dict = {k: v for k, v in args.__dict__.items() if not k.startswith('_') and v is not None}\n        if 'model_name_or_path' in args_as_dict:\n            model_name = args_as_dict['model_name_or_path']\n            if not os.path.isdir(model_name):\n                data['model_name'] = args_as_dict['model_name_or_path']\n        if 'dataset_name' in args_as_dict:\n            data['dataset_name'] = args_as_dict['dataset_name']\n        elif 'task_name' in args_as_dict:\n            script_name = example_name.replace('tf_', '').replace('flax_', '').replace('run_', '')\n            script_name = script_name.replace('_no_trainer', '')\n            data['dataset_name'] = f'{script_name}-{args_as_dict['task_name']}'\n    send_telemetry(topic='examples', library_name='transformers', library_version=__version__, user_agent=http_user_agent(data))", "docstring": "Sends telemetry that helps tracking the examples use.\n\nArgs:\nexample_name (`str`): The name of the example.\n*example_args (dataclasses or `argparse.ArgumentParser`): The arguments to the script. This function will only\ntry to extract the model and dataset name from those. Nothing else is tracked.\nframework (`str`, *optional*, defaults to `\"pytorch\"`): The framework for the example.", "source": "github-repos"}
{"code": "def __init__(self, location, resource_pool):\n        \n        self.location = location\n        self.pool = resource_pool", "docstring": "Create a package repository.\n\nArgs:\nlocation (str): A string specifying the location of the repository.\nThis could be a filesystem path, or a database uri, etc.\nresource_pool (`ResourcePool`): The pool used to manage package\nresources.", "source": "juraj-google-style"}
{"code": "def value(self):\n    raise NotImplementedError", "docstring": "Returns the last snapshot of this variable.\n\nYou usually do not need to call this method as all ops that need the value\nof the variable call it automatically through a `convert_to_tensor()` call.\n\nReturns a `Tensor` which holds the value of the variable.  You can not\nassign a new value to this tensor as it is not a reference to the variable.\n\nTo avoid copies, if the consumer of the returned value is on the same device\nas the variable, this actually returns the live value of the variable, not\na copy.  Updates to the variable are seen by the consumer.  If the consumer\nis on a different device it will get a copy of the variable.\n\nReturns:\nA `Tensor` containing the value of the variable.", "source": "github-repos"}
{"code": "def training_job_summaries(self, force_refresh=False):\n        \n        if force_refresh:\n            self.clear_cache()\n        if self._training_job_summaries is not None:\n            return self._training_job_summaries\n        output = []\n        next_args = {}\n        for count in range(100):\n            logging.debug(\"Calling list_training_jobs_for_hyper_parameter_tuning_job %d\" % count)\n            raw_result = self._sage_client.list_training_jobs_for_hyper_parameter_tuning_job(\n                HyperParameterTuningJobName=self.name, MaxResults=100, **next_args\n            )\n            new_output = raw_result['TrainingJobSummaries']\n            output.extend(new_output)\n            logging.debug(\"Got %d more TrainingJobs. Total so far: %d\" % (len(new_output), len(output)))\n            if ('NextToken' in raw_result) and (len(new_output) > 0):\n                next_args['NextToken'] = raw_result['NextToken']\n            else:\n                break\n        self._training_job_summaries = output\n        return output", "docstring": "A (paginated) list of everything from ``ListTrainingJobsForTuningJob``.\n\nArgs:\nforce_refresh (bool): Set to True to fetch the latest data from SageMaker API.\n\nReturns:\ndict: The Amazon SageMaker response for ``ListTrainingJobsForTuningJob``.", "source": "juraj-google-style"}
{"code": "def GetFormattedMessages(self, event):\n    event_formatter = self.GetEventFormatter(event)\n    if (not event_formatter):\n        return (None, None)\n    return event_formatter.GetMessages(self._formatter_mediator, event)", "docstring": "Retrieves the formatted messages related to the event.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\ntuple: containing:\n\nstr: full message string or None if no event formatter was found.\nstr: short message string or None if no event formatter was found.", "source": "codesearchnet"}
{"code": "def query(botcust2, message):\n    \n    logger.debug(\"Getting Mitsuku reply\")\n\n    \n    params = {\n        'botid': 'f6a012073e345a08',\n        'amp;skin': 'chat'\n    }\n    headers = {\n        'Accept-Encoding': 'gzip, deflate, br',\n        'Accept-Language': 'en-US,en;q=0.8',\n        'Cache-Control': 'max-age=0',\n        'Connection': 'keep-alive',\n        'Content-Length': str(len(message) + 34),\n        'Content-Type': 'application/x-www-form-urlencoded',\n        'Cookie': 'botcust2=' + botcust2,\n        'DNT': '1',\n        'Host': 'kakko.pandorabots.com',\n        'Origin': 'https:\n        'Referer': 'https:\n        'Upgrade-Insecure-Requests': '1',\n        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '\n                      'AppleWebKit/537.36 (KHTML, like Gecko) '\n                      'Chrome/58.0.3029.110 Safari/537.36'\n    }\n    data = {\n        'botcust2': botcust2,\n        'message': message\n    }\n\n    \n    logger.debug(\"Sending POST request\")\n    response = requests.post(\n        url,\n        params=params,\n        headers=headers,\n        data=data\n    )\n    logger.debug(\"POST response {}\".format(response))\n\n    \n    parsed = lxml.html.parse(io.StringIO(response.text)).getroot()\n    try:\n        result = parsed[1][2][0][2].tail[1:]\n        logger.debug(\"Getting botcust2 successful\")\n    except IndexError:\n        result = False\n        logger.critical(\"Getting botcust2 from html failed\")\n\n    return result", "docstring": "Sends a message to Mitsuku and retrieves the reply\n\nArgs:\nbotcust2 (str): The botcust2 identifier\nmessage (str): The message to send to Mitsuku\n\nReturns:\nreply (str): The message Mitsuku sent back", "source": "juraj-google-style"}
{"code": "def restore(cdiff, a):\n    left = (a.splitlines(1) if isinstance(a, string_types) else a)\n    lrest = []\n    iline = 0\n    for (i, line) in enumerate(left):\n        if (iline not in cdiff):\n            lrest.append(('  ' + line))\n            iline += 1\n        else:\n            cs = [l[0] for l in cdiff[iline]]\n            add = (cs.count('+') - cs.count('-'))\n            lrest.extend(cdiff[iline])\n            iline += (add + 1)\n    for i in sorted(cdiff.keys()):\n        if (i >= len(left)):\n            lrest.extend(cdiff[i])\n    from difflib import restore\n    return list(restore(lrest, 2))", "docstring": "Restores the full text of either the edited text using the\ncompressed diff.\n\nArgs:\ncdiff (dict): compressed diff returned by\n:func:`~acorn.logging.diff.compress`.\na (str or list): *original* string or list of strings to use as a\nreference to restore the edited version.", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    file_offset = file_object.get_offset()\n    file_size = file_object.get_size()\n    while file_offset < file_size:\n      try:\n        self._ParseRecord(parser_mediator, file_object)\n      except errors.ParseError as exception:\n        if file_offset == 0:\n          raise errors.UnableToParseFile(\n              'Unable to parse first event record with error: {0!s}'.format(\n                  exception))\n\n        \n\n      file_offset = file_object.get_offset()", "docstring": "Parses a BSM file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"}
{"code": "def _countdown(self, waitTime=0, printString=\"Waiting %*d seconds...\", verbose=True):\n        \n        if waitTime <= 0:\n            waitTime = self.__retryDelay\n        for remaining in range(waitTime, 0, -1):\n            _vPrint(verbose, \"\\r\" + printString % (len(str(waitTime)), remaining), end=\"\", flush=True)\n            time.sleep(1)\n        if verbose:\n            _vPrint(verbose, \"\\r\" + printString % (len(str(waitTime)), 0))", "docstring": "Makes a pretty countdown.\n\nArgs:\ngitquery (str): The query or endpoint itself.\nExamples:\nquery: 'query { viewer { login } }'\nendpoint: '/user'\nprintString (Optional[str]): A counter message to display.\nDefaults to 'Waiting %*d seconds...'\nverbose (Optional[bool]): If False, all extra printouts will be\nsuppressed. Defaults to True.", "source": "juraj-google-style"}
{"code": "def _reload_config(self, reload_original_config):\n    if reload_original_config:\n        self.original_config = self.running_config\n        self.original_config.set_name('original')\n    paths = self.running_config.get_paths()\n    self.running_config = FortiConfig('running', vdom=self.vdom)\n    for path in paths:\n        self.load_config(path, empty_candidate=True)", "docstring": "This command will update the running config from the live device.\n\nArgs:\n* reload_original_config:\n* If ``True`` the original config will be loaded with the running config before reloading the\\\noriginal config.\n* If ``False`` the original config will remain untouched.", "source": "codesearchnet"}
{"code": "def create(cls, session, record, endpoint_override=None, out_type=None, **add_params):\n    cls._check_implements('create')\n    data = record.to_api()\n    params = {'reload': True}\n    params.update(**add_params)\n    data.update(params)\n    return cls((endpoint_override or ('/%s.json' % cls.__endpoint__)), data=data, request_type=RequestPaginator.POST, singleton=True, session=session, out_type=out_type)", "docstring": "Create an object on HelpScout.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nrecord (helpscout.BaseModel): The record to be created.\nendpoint_override (str, optional): Override the default\nendpoint using this.\nout_type (helpscout.BaseModel, optional): The type of record to\noutput. This should be provided by child classes, by calling\nsuper.\n**add_params (mixed): Add these to the request parameters.\n\nReturns:\nhelpscout.models.BaseModel: Newly created record. Will be of the", "source": "codesearchnet"}
{"code": "def chat_postEphemeral(self, *, channel: str, user: str, **kwargs) -> SlackResponse:\n    kwargs.update({'channel': channel, 'user': user})\n    return self.api_call('chat.postEphemeral', json=kwargs)", "docstring": "Sends an ephemeral message to a user in a channel.\n\nArgs:\nchannel (str): The channel id. e.g. 'C1234567890'\nuser (str): The id of user who should see the message. e.g. 'U0BPQUNTA'\ntext (str): The message you'd like to share. e.g. 'Hello world'\ntext is not required when presenting blocks.\nblocks (list): A dictionary list of blocks.\nBlocks are required when not presenting text.\ne.g. [{\"type\": \"section\", \"text\": {\"type\": \"plain_text\", \"text\": \"Hello world\"}}]", "source": "codesearchnet"}
{"code": "def __init__(self, callback):\n        \n        self._callback = callback\n        self._ras = brocade_ras(callback=pynos.utilities.return_xml)", "docstring": "RAS init method.\nArgs:\ncallback: Callback function that will be called for each action.\nReturns:\nRAS Object\nRaises:\nNone", "source": "juraj-google-style"}
{"code": "def get_configured_consensus_module(block_id, state_view):\n        \n        settings_view = SettingsView(state_view)\n\n        default_consensus = \\\n            'genesis' if block_id == NULL_BLOCK_IDENTIFIER else 'devmode'\n        consensus_module_name = settings_view.get_setting(\n            'sawtooth.consensus.algorithm', default_value=default_consensus)\n        return ConsensusFactory.get_consensus_module(\n            consensus_module_name)", "docstring": "Returns the consensus_module based on the consensus module set by\nthe \"sawtooth_settings\" transaction family.\n\nArgs:\nblock_id (str): the block id associated with the current state_view\nstate_view (:obj:`StateView`): the current state view to use for\nsetting values\nRaises:\nUnknownConsensusModuleError: Thrown when an invalid consensus\nmodule has been configured.", "source": "juraj-google-style"}
{"code": "def normalize_cell_value(value):\n    if (isinstance(value, dict) or isinstance(value, list)):\n        return json.dumps(value)\n    return value", "docstring": "Process value for writing into a cell.\n\nArgs:\nvalue: any type of variable\n\nReturns:\njson serialized value if value is list or dict, else value", "source": "codesearchnet"}
{"code": "def VerifyStructure(self, parser_mediator, lines):\n    return ((re.match(self._VERIFICATION_REGEX, lines) or re.match(self._CHROMEOS_VERIFICATION_REGEX, lines)) is not None)", "docstring": "Verifies that this is a syslog-formatted file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between\nparsers and other components, such as storage and dfvfs.\nlines (str): one or more lines from the text file.\n\nReturns:\nbool: True if this is the correct parser, False otherwise.", "source": "codesearchnet"}
{"code": "def __init__(self, resolver_context):\n    \n    super(FileIO, self).__init__()\n    self._is_cached = False\n    self._is_open = False\n    self._resolver_context = resolver_context", "docstring": "Initializes a file-like object.\n\nArgs:\nresolver_context (Context): resolver context.", "source": "juraj-google-style"}
{"code": "def xslt_transformation(xml, template):\n    transformer = ET.XSLT(_read_template(template))\n    newdom = transformer(_read_marcxml(xml))\n    return ET.tostring(newdom, pretty_print=True, encoding='utf-8')", "docstring": "Transform `xml` using XSLT `template`.\n\nArgs:\nxml (str): Filename or XML string. Don't use ``\\\\n`` in case of\nfilename.\ntemplate (str): Filename or XML string. Don't use ``\\\\n`` in case of\nfilename.\n\nReturns:\nstr: Transformed `xml` as string.", "source": "codesearchnet"}
{"code": "def dim_reduce_data(data, d):\n    \n    genes, cells = data.shape\n    distances = np.zeros((cells, cells))\n    for i in range(cells):\n        for j in range(cells):\n            distances[i,j] = poisson_dist(data[:,i], data[:,j])\n    \n    proximity = distances**2\n    J = np.eye(cells) - 1./cells\n    B = -0.5*np.dot(J, np.dot(proximity, J))\n    \n    e_val, e_vec = np.linalg.eigh(B)\n    \n    lam = np.diag(e_val[-d:])[::-1]\n    \n    E = e_vec[:,-d:][::-1]\n    X = np.dot(E, lam**0.5)\n    return X", "docstring": "Does a MDS on the data directly, not on the means.\n\nArgs:\ndata (array): genes x cells\nd (int): desired dimensionality\n\nReturns:\nX, a cells x d matrix", "source": "juraj-google-style"}
{"code": "def from_corpus(cls, corpus):\n    ds = Corpus()\n    tracks = copy.deepcopy(list(corpus.tracks.values()))\n    track_mapping = ds.import_tracks(tracks)\n    issuers = copy.deepcopy(list(corpus.issuers.values()))\n    issuer_mapping = ds.import_issuers(issuers)\n    utterances = copy.deepcopy(list(corpus.utterances.values()))\n    for utterance in utterances:\n        utterance.track = track_mapping[utterance.track.idx]\n        if (utterance.issuer is not None):\n            utterance.issuer = issuer_mapping[utterance.issuer.idx]\n    ds.import_utterances(utterances)\n    subviews = copy.deepcopy(corpus.subviews)\n    for (subview_idx, subview) in subviews.items():\n        ds.import_subview(subview_idx, subview)\n    for (feat_container_idx, feature_container) in corpus.feature_containers.items():\n        ds.new_feature_container(feat_container_idx, feature_container.path)\n    return ds", "docstring": "Create a new modifiable corpus from any other CorpusView.\nThis for example can be used to create a independent modifiable corpus from a subview.\n\nArgs:\ncorpus (CorpusView): The corpus to create a copy from.\n\nReturns:\nCorpus: A new corpus with the same data as the given one.", "source": "codesearchnet"}
{"code": "def test_string(self, string: str) -> bool:\n        \n        if self.input.startswith(string, self.offset):\n            self.offset += len(string)\n            return True\n        return False", "docstring": "If `string` comes next, return ``True`` and advance offset.\n\nArgs:\nstring: string to test", "source": "juraj-google-style"}
{"code": "def allocate(self, size, max_time_to_block_ms):\n        \n        with self._lock:\n            \n            if self._free:\n                return self._free.popleft()\n\n            elif self._poolable_size == 0:\n                return io.BytesIO()\n\n            else:\n                \n                buf = None\n                more_memory = threading.Condition(self._lock)\n                self._waiters.append(more_memory)\n                \n                \n                while buf is None:\n                    start_wait = time.time()\n                    more_memory.wait(max_time_to_block_ms / 1000.0)\n                    end_wait = time.time()\n                    if self.wait_time:\n                        self.wait_time.record(end_wait - start_wait)\n\n                    if self._free:\n                        buf = self._free.popleft()\n                    else:\n                        self._waiters.remove(more_memory)\n                        raise Errors.KafkaTimeoutError(\n                            \"Failed to allocate memory within the configured\"\n                            \" max blocking time\")\n\n                \n                \n                removed = self._waiters.popleft()\n                assert removed is more_memory, 'Wrong condition'\n\n                \n                \n                if self._free and self._waiters:\n                    self._waiters[0].notify()\n\n                \n                return buf", "docstring": "Allocate a buffer of the given size. This method blocks if there is not\nenough memory and the buffer pool is configured with blocking mode.\n\nArguments:\nsize (int): The buffer size to allocate in bytes [ignored]\nmax_time_to_block_ms (int): The maximum time in milliseconds to\nblock for buffer memory to be available\n\nReturns:\nio.BytesIO", "source": "juraj-google-style"}
{"code": "def sort_dict(d, desc=True):\n\n    \n\n    sort = sorted(d.items(), key=lambda x: x[1], reverse=desc)\n    return OrderedDict(sort)", "docstring": "Sort an ordered dictionary by value, descending.\n\nArgs:\nd (OrderedDict): An ordered dictionary.\ndesc (bool): If true, sort desc.\n\nReturns:\nOrderedDict: The sorted dictionary.", "source": "juraj-google-style"}
{"code": "async def bootstrap(self, addrs):\n    log.debug('Attempting to bootstrap node with %i initial contacts', len(addrs))\n    cos = list(map(self.bootstrap_node, addrs))\n    gathered = (await asyncio.gather(*cos))\n    nodes = [node for node in gathered if (node is not None)]\n    spider = NodeSpiderCrawl(self.protocol, self.node, nodes, self.ksize, self.alpha)\n    return (await spider.find())", "docstring": "Bootstrap the server by connecting to other known nodes in the network.\n\nArgs:\naddrs: A `list` of (ip, port) `tuple` pairs.  Note that only IP\naddresses are acceptable - hostnames will cause an error.", "source": "codesearchnet"}
{"code": "def _has_requirements(self):\n    self._closed()\n    return any([self.has_workflow_step, self.has_scatter_requirement, self.has_multiple_inputs])", "docstring": "Returns True if the workflow needs a requirements section.\n\nReturns:\nbool: True if the workflow needs a requirements section, False\notherwise.", "source": "codesearchnet"}
{"code": "def evaluate(nodes, x_val, y_val):\n    (_, num_nodes) = nodes.shape\n    if (num_nodes == 1):\n        raise ValueError('A point cannot be implicitized')\n    elif (num_nodes == 2):\n        return (((nodes[(0, 0)] - x_val) * (nodes[(1, 1)] - y_val)) - ((nodes[(0, 1)] - x_val) * (nodes[(1, 0)] - y_val)))\n    elif (num_nodes == 3):\n        (val_a, val_b, val_c) = (nodes[(0, :)] - x_val)\n        val_b *= 2\n        (val_d, val_e, val_f) = (nodes[(1, :)] - y_val)\n        val_e *= 2\n        sub1 = ((val_b * val_f) - (val_c * val_e))\n        sub2 = ((val_a * val_f) - (val_c * val_d))\n        sub_det_a = (((- val_e) * sub1) + (val_f * sub2))\n        sub_det_d = ((val_b * sub1) - (val_c * sub2))\n        return ((val_a * sub_det_a) + (val_d * sub_det_d))\n    elif (num_nodes == 4):\n        return _evaluate3(nodes, x_val, y_val)\n    else:\n        raise _helpers.UnsupportedDegree((num_nodes - 1), supported=(1, 2, 3))", "docstring": "r\"\"\"Evaluate the implicitized bivariate polynomial containing the curve.\n\nAssumes `algebraic curve`_ containing :math:`B(s, t)` is given by\n:math:`f(x, y) = 0`. This function evaluates :math:`f(x, y)`.\n\n.. note::\n\nThis assumes, but doesn't check, that ``nodes`` has 2 rows.\n\n.. note::\n\nThis assumes, but doesn't check, that ``nodes`` is not degree-elevated.\nIf it were degree-elevated, then the Sylvester matrix will always\nhave zero determinant.\n\nArgs:\nnodes (numpy.ndarray): ``2 x N`` array of nodes in a curve.\nx_val (float): ``x``-coordinate for evaluation.\ny_val (float): ``y``-coordinate for evaluation.\n\nReturns:\nfloat: The computed value of :math:`f(x, y)`.\n\nRaises:\nValueError: If the curve is a point.\n.UnsupportedDegree: If the degree is not 1, 2 or 3.", "source": "codesearchnet"}
{"code": "def __init__(self, max_tries=5, max_wait=10, *args, **kwargs):\n        \n        self._max_tries = max_tries\n        if self._max_tries < 1:\n            raise TypeError('max_tries must be a positive integer')\n        self._max_wait = max_wait\n        if self._max_wait < 1:\n            raise TypeError('max_wait must be >= 1')\n        super(NetworkType, self).__init__(*args, **kwargs)", "docstring": "Validation type for external resources\n\nAttempts to connect to the resource, backing off on failure.\n\nArgs:\nmax_tries: Max number of times to attempt a connection before failing\nmax_wait: Max number of seconds to wait between connection attempts. This can be\nused to cap the exponential backoff.", "source": "juraj-google-style"}
{"code": "def _stream_output(process):\n    \n    exit_code = None\n\n    while exit_code is None:\n        stdout = process.stdout.readline().decode(\"utf-8\")\n        sys.stdout.write(stdout)\n        exit_code = process.poll()\n\n    if exit_code != 0:\n        raise RuntimeError(\"Process exited with code: %s\" % exit_code)\n\n    return exit_code", "docstring": "Stream the output of a process to stdout\n\nThis function takes an existing process that will be polled for output. Only stdout\nwill be polled and sent to sys.stdout.\n\nArgs:\nprocess(subprocess.Popen): a process that has been started with\nstdout=PIPE and stderr=STDOUT\n\nReturns (int): process exit code", "source": "juraj-google-style"}
{"code": "def put(self, obj):\n    self._queue.put(obj, block=True, timeout=self._queue_put_timeout)\n    if (obj is _SHUTDOWNREQUEST):\n        return", "docstring": "Put request into queue.\n\nArgs:\nobj (cheroot.server.HTTPConnection): HTTP connection\nwaiting to be processed", "source": "codesearchnet"}
{"code": "def concat(self, axis, other_blocks):\n    if (type(other_blocks) is list):\n        other_blocks = [blocks.partitions for blocks in other_blocks]\n        return self.__constructor__(np.concatenate(([self.partitions] + other_blocks), axis=axis))\n    else:\n        return self.__constructor__(np.append(self.partitions, other_blocks.partitions, axis=axis))", "docstring": "Concatenate the blocks with another set of blocks.\n\nNote: Assumes that the blocks are already the same shape on the\ndimension being concatenated. A ValueError will be thrown if this\ncondition is not met.\n\nArgs:\naxis: The axis to concatenate to.\nother_blocks: the other blocks to be concatenated. This is a\nBaseFrameManager object.\n\nReturns:\nA new BaseFrameManager object, the type of object that called this.", "source": "codesearchnet"}
{"code": "def post_transform(self, args):\n    args = (args[1:] if (args and (args[0] == 'az')) else args)\n    post_transform_commands = []\n    for (i, arg) in enumerate(args):\n        if (is_alias_command(['create'], args) and (i > 0) and (args[(i - 1)] in ['-c', '--command'])):\n            post_transform_commands.append(arg)\n        else:\n            post_transform_commands.append(os.path.expandvars(arg))\n    AliasManager.write_alias_config_hash(self.alias_config_hash)\n    AliasManager.write_collided_alias(self.collided_alias)\n    return post_transform_commands", "docstring": "Inject environment variables, and write hash to alias hash file after transforming alias to commands.\n\nArgs:\nargs: A list of args to post-transform.", "source": "codesearchnet"}
{"code": "def as_dict(value):\n    return {field.name: getattr(value, field.name) for field in value._tf_extension_type_fields()}", "docstring": "Extracts the attributes of `value` and their values to a dict format.\n\nUnlike `dataclasses.asdict()`, this function is not recursive and in case of\nnested `ExtensionType` objects, only the top level object is converted to a\ndict.\n\nArgs:\nvalue: An `ExtensionType` object.\n\nReturns:\nA dict that contains the attributes of `value` and their values.", "source": "github-repos"}
{"code": "def load_from_dict(self, conf_dict=None):\n    self.set_to_default()\n    self._update_dict(self._config, conf_dict)\n    self._update_python_paths()", "docstring": "Load the configuration from a dictionary.\n\nArgs:\nconf_dict (dict): Dictionary with the configuration.", "source": "codesearchnet"}
{"code": "def recipe_sa_report(config, auth_sa, auth_bq, dataset, table, report, is_incremental_load):\n    sa(config, {'description': 'Create a dataset for bigquery tables.', 'auth': auth_sa, 'body': report, 'out': {'bigquery': {'auth': auth_bq, 'dataset': dataset, 'table': table, 'is_incremental_load': is_incremental_load, 'header': True}}})", "docstring": "Move SA360 report to BigQuery.\n\nArgs:\nauth_sa (authentication) - Credentials used for writing data.\nauth_bq (authentication) - Authorization used for writing data.\ndataset (string) - Existing BigQuery dataset.\ntable (string) - Table to create from this report.\nreport (json) - Body part of report request API call.\nis_incremental_load (boolean) - Clear data in destination table during this report's time period, then append report data to destination table.", "source": "github-repos"}
{"code": "def AddArguments(cls, argument_group):\n    \n    argument_group.add_argument(\n        '-o', '--output_format', '--output-format', metavar='FORMAT',\n        dest='output_format', default='dynamic', help=(\n            'The output format. Use \"-o list\" to see a list of available '\n            'output formats.'))\n\n    argument_group.add_argument(\n        '-w', '--write', metavar='OUTPUT_FILE', dest='write',\n        help='Output filename.')\n\n    \n    \n    arguments = sys.argv[1:]\n    argument_index = 0\n\n    if '-o' in arguments:\n      argument_index = arguments.index('-o') + 1\n    elif '--output_format' in arguments:\n      argument_index = arguments.index('--output_format') + 1\n    elif '--output-format' in arguments:\n      argument_index = arguments.index('--output-format') + 1\n\n    if 0 < argument_index < len(arguments):\n      names = [name.strip() for name in arguments[argument_index].split(',')]\n    else:\n      names = ['dynamic']\n\n    if names and names != ['list']:\n      manager.ArgumentHelperManager.AddCommandLineArguments(\n          argument_group, category='output', names=names)", "docstring": "Adds command line arguments to an argument group.\n\nThis function takes an argument parser or an argument group object and adds\nto it all the command line arguments this helper supports.\n\nArgs:\nargument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\nargparse group.", "source": "juraj-google-style"}
{"code": "def get_countries(is_legacy_xml=False):\n    countries = {}\n    if ((sys.platform == 'win32') and getattr(sys, 'frozen', False)):\n        data_dir = path.dirname(sys.executable)\n    else:\n        data_dir = path.dirname(__file__)\n    if is_legacy_xml:\n        log.debug('Opening country code legacy XML: {0}'.format((str(data_dir) + '/data/iso_3166-1_list_en.xml')))\n        f = io.open((str(data_dir) + '/data/iso_3166-1_list_en.xml'), 'r', encoding='ISO-8859-1')\n        data = f.read()\n        if (not data):\n            return {}\n        dom = parseString(data)\n        entries = dom.getElementsByTagName('ISO_3166-1_Entry')\n        for entry in entries:\n            code = entry.getElementsByTagName('ISO_3166-1_Alpha-2_Code_element')[0].firstChild.data\n            name = entry.getElementsByTagName('ISO_3166-1_Country_name')[0].firstChild.data\n            countries[code] = name.title()\n    else:\n        log.debug('Opening country code CSV: {0}'.format((str(data_dir) + '/data/iso_3166-1_list_en.xml')))\n        f = io.open((str(data_dir) + '/data/iso_3166-1.csv'), 'r', encoding='utf-8')\n        csv_reader = csv.reader(f, delimiter=',', quotechar='\"')\n        for row in csv_reader:\n            code = row[0]\n            name = row[1]\n            countries[code] = name\n    return countries", "docstring": "The function to generate a dictionary containing ISO_3166-1 country codes\nto names.\n\nArgs:\nis_legacy_xml (:obj:`bool`): Whether to use the older country code\nlist (iso_3166-1_list_en.xml).\n\nReturns:\ndict: A mapping of country codes as the keys to the country names as\nthe values.", "source": "codesearchnet"}
{"code": "def _on_connection_close(self, connection, reply_code_or_reason, reply_text=None):\n        \n        self._channel = None\n\n        if isinstance(reply_code_or_reason, pika_errs.ConnectionClosed):\n            reply_code = reply_code_or_reason.reply_code\n            reply_text = reply_code_or_reason.reply_text\n        elif isinstance(reply_code_or_reason, int):\n            reply_code = reply_code_or_reason\n        else:\n            reply_code = 0\n            reply_text = str(reply_code_or_reason)\n\n        if reply_code == 200:\n            \n            _log.info(\"Server connection closed (%s), shutting down\", reply_text)\n            connection.ioloop.stop()\n        else:\n            _log.warning(\n                \"Connection to %s closed unexpectedly (%d): %s\",\n                connection.params.host,\n                reply_code,\n                reply_text,\n            )\n            self.call_later(1, self.reconnect)", "docstring": "Callback invoked when a previously-opened connection is closed.\n\nArgs:\nconnection (pika.connection.SelectConnection): The connection that\nwas just closed.\nreply_code_or_reason (int|Exception): The reason why the channel\nwas closed. In older versions of pika, this is the AMQP code.\nreply_text (str): The human-readable reason the connection was\nclosed (only in older versions of pika)", "source": "juraj-google-style"}
{"code": "def Evaluate(self, client_obj):\n    \n    if self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ALL:\n      quantifier = all\n    elif self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ANY:\n      quantifier = any\n    else:\n      raise ValueError(\"Unexpected match mode value: %s\" % self.match_mode)\n\n    return quantifier(rule.Evaluate(client_obj) for rule in self.rules)", "docstring": "Evaluates rules held in the rule set.\n\nArgs:\nclient_obj: Either an aff4 client object or a client_info dict as returned\nby ReadFullInfoClient if the relational db is used for reading.\n\nReturns:\nA bool value of the evaluation.\n\nRaises:\nValueError: The match mode is of unknown value.", "source": "juraj-google-style"}
{"code": "def writelines(self, lines, sep=b'\\n', echo=None):\n        \n\n        self.write(sep.join(lines + [b'']), echo)", "docstring": "Write a list of byte sequences to the channel and terminate them\nwith a separator (line feed).\n\nArgs:\nlines(list of bytes): The lines to send.\nsep(bytes): The separator to use after each line.\necho(bool): Whether to echo the written data to stdout.\n\nRaises:\nEOFError: If the channel was closed before all data was sent.", "source": "juraj-google-style"}
{"code": "def slice_vec(expr, start, stop):\n    weld_obj = WeldObject(encoder_, decoder_)\n    expr_var = weld_obj.update(expr)\n    if isinstance(expr, WeldObject):\n        expr_var = expr.obj_id\n        weld_obj.dependencies[expr_var] = expr\n    weld_template = '\\n    slice(%(expr)s, %(start)sL, %(stop)sL)\\n    '\n    weld_obj.weld_code = (weld_template % {'expr': expr_var, 'start': start, 'stop': stop})\n    return weld_obj", "docstring": "Slices the vector.\n\nArgs:\nexpr (WeldObject)\nstart (Long)\nstop (Long)", "source": "codesearchnet"}
{"code": "def _radix_int_handler_factory(radix_indicators, charset, parse_func):\n\n    def assertion(c, ctx):\n        return ((c in radix_indicators) and (((len(ctx.value) == 1) and (ctx.value[0] == _ZERO)) or ((len(ctx.value) == 2) and (ctx.value[0] == _MINUS) and (ctx.value[1] == _ZERO))) and (ctx.ion_type == IonType.INT))\n    return _numeric_handler_factory(charset, (lambda prev, c, ctx, trans: _illegal_character(c, ctx)), assertion, radix_indicators, parse_func, illegal_at_end=radix_indicators)", "docstring": "Generates a handler co-routine which tokenizes a integer of a particular radix.\n\nArgs:\nradix_indicators (sequence): The set of ordinals of characters that indicate the radix of this int.\ncharset (sequence): Set of ordinals of legal characters for this radix.\nparse_func (callable): Called upon ending the numeric value. Accepts the current token value and returns a\nthunk that lazily parses the token.", "source": "codesearchnet"}
{"code": "def get_content_type(content_type):\n    \n    m = email.message.Message()\n    m['Content-Type'] = content_type\n    return m.get_content_type()", "docstring": "Extract the MIME type value from a content type string.\n\nRemoves any subtype and parameter values that may be present in the string.\n\nArgs:\ncontent_type: str\nString with content type and optional subtype and parameter fields.\n\nReturns:\nstr: String with only content type\n\nExample:\n\n::\n\nInput:   multipart/form-data; boundary=aBoundaryString\nReturns: multipart/form-data", "source": "juraj-google-style"}
{"code": "def GetAdGroups(self, client_customer_id, campaign_id):\n    \n    self.client.SetClientCustomerId(client_customer_id)\n    selector = {\n        'fields': ['Id', 'Name', 'Status'],\n        'predicates': [\n            {\n                'field': 'CampaignId',\n                'operator': 'EQUALS',\n                'values': [campaign_id]\n            },\n            {\n                'field': 'Status',\n                'operator': 'NOT_EQUALS',\n                'values': ['REMOVED']\n            }\n        ]\n    }\n    adgroups = self.client.GetService('AdGroupService').get(selector)\n\n    if int(adgroups['totalNumEntries']) > 0:\n      return adgroups['entries']\n    else:\n      return None", "docstring": "Retrieves all AdGroups for the given campaign that haven't been removed.\n\nArgs:\nclient_customer_id: str Client Customer Id being used in API request.\ncampaign_id: str id of the campaign for which to fetch ad groups.\n\nReturns:\nlist List of AdGroup data objects.", "source": "juraj-google-style"}
{"code": "def available_partitions_for_topic(self, topic):\n        \n        if topic not in self._partitions:\n            return None\n        return set([partition for partition, metadata\n                              in six.iteritems(self._partitions[topic])\n                              if metadata.leader != -1])", "docstring": "Return set of partitions with known leaders\n\nArguments:\ntopic (str): topic to check for partitions\n\nReturns:\nset: {partition (int), ...}\nNone if topic not found.", "source": "juraj-google-style"}
{"code": "def copy(self, dest):\n        \n        if os.path.isfile(self.path):\n            shutil.copy2(self.path, dest)\n        else:\n            shutil.copytree(self.path, dest, symlinks=False, ignore=None)", "docstring": "Copy item to the given `dest` path.\n\nArgs:\n* dest: destination path to copy.", "source": "juraj-google-style"}
{"code": "def get_voigt_dict(rank):\n    vdict = {}\n    for ind in itertools.product(*([range(3)] * rank)):\n        v_ind = ind[:(rank % 2)]\n        for j in range((rank \n            pos = ((rank % 2) + (2 * j))\n            v_ind += (reverse_voigt_map[ind[pos:(pos + 2)]],)\n        vdict[ind] = v_ind\n    return vdict", "docstring": "Returns a dictionary that maps indices in the tensor to those\nin a voigt representation based on input rank\n\nArgs:\nrank (int): Tensor rank to generate the voigt map", "source": "codesearchnet"}
{"code": "def search_by_age(cls, *, limit=100, page=1, accounts=None, locations=None, age=720, properties=None, include_disabled=False):\n    qry = cls.search(limit=limit, page=page, accounts=accounts, locations=locations, properties=properties, include_disabled=include_disabled, return_query=True)\n    age_alias = aliased(ResourceProperty)\n    qry = qry.join(age_alias, (Resource.resource_id == age_alias.resource_id)).filter((age_alias.name == 'launch_date'), (cast(func.JSON_UNQUOTE(age_alias.value), DATETIME) < (datetime.now() - timedelta(days=age))))\n    total = qry.count()\n    qry = qry.limit(limit)\n    qry = qry.offset((((page - 1) * limit) if (page > 1) else 0))\n    return (total, [cls(x) for x in qry.all()])", "docstring": "Search for resources based on the provided filters\n\nArgs:\nlimit (`int`): Number of results to return. Default: 100\npage (`int`): Pagination offset for results. Default: 1\naccounts (`list` of `int`): A list of account id's to limit the returned resources to\nlocations (`list` of `str`): A list of locations as strings to limit the search for\nage (`int`): Age of instances older than `age` days to return\nproperties (`dict`): A `dict` containing property name and value pairs. Values can be either a str or a list\nof strings, in which case a boolean OR search is performed on the values\ninclude_disabled (`bool`): Include resources from disabled accounts. Default: False\n\nReturns:\n`list` of `Resource`", "source": "codesearchnet"}
{"code": "def handle_new_task(self, task_name, record):\n    record.msg = ColorFormatter.colored('default', START_TASK_MSG)\n    record.task = task_name\n    self.tasks[task_name] = Task(name=task_name, maxlen=self.buffer_size)\n    if self.should_show_by_depth():\n        self.pretty_emit(record, is_header=True)", "docstring": "Do everything needed when a task is starting\n\nParams:\ntask_name (str): name of the task that is starting\nrecord (logging.LogRecord): log record with all the info\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def add_prefix(self, name, *args, **kwargs):\n    if os.path.exists(self.join(name)):\n        raise LagoPrefixAlreadyExistsError(name, self.path)\n    self.prefixes[name] = self.prefix_class(self.join(name), *args, **kwargs)\n    self.prefixes[name].initialize()\n    if (self.current is None):\n        self.set_current(name)\n    return self.prefixes[name]", "docstring": "Adds a new prefix to the workdir.\n\nArgs:\nname(str): Name of the new prefix to add\n*args: args to pass along to the prefix constructor\n*kwargs: kwargs to pass along to the prefix constructor\n\nReturns:\nThe newly created prefix\n\nRaises:\nLagoPrefixAlreadyExistsError: if prefix name already exists in the\nworkdir", "source": "codesearchnet"}
{"code": "def set_all_ylims(self, ylim, dy, yscale, fontsize=None):\n    self._set_all_lims('y', ylim, dy, yscale, fontsize)\n    return", "docstring": "Set limits and ticks for y axis for whole figure.\n\nThis will set y axis limits and tick marks for the entire figure.\nIt can be overridden in the SinglePlot class.\n\nArgs:\nylim (len-2 list of floats): The limits for the axis.\ndy (float): Amount to increment by between the limits.\nyscale (str): Scale of the axis. Either `log` or `lin`.\nfontsize (int, optional): Set fontsize for y axis tick marks.\nDefault is None.", "source": "codesearchnet"}
{"code": "def filter_params(self, fn, override=None):\n        \n        override = override or {}\n        result = {}\n        for name, value in self.target_params.items():\n            if has_arg(fn, name):\n                result.update({name: value})\n        result.update(override)\n        return result", "docstring": "Filters `target_params` and return those in `fn`'s arguments.\nArgs:\nfn : arbitrary function\noverride: dict, values to override target_params\nReturns:\nresult : dict, dictionary containing variables\nin both target_params and fn's arguments.", "source": "juraj-google-style"}
{"code": "def page_format(self, topmargin, bottommargin):\n        \n        tL = topmargin%256\n        tH = topmargin/256\n        BL = bottommargin%256\n        BH = topmargin/256\n        if (tL+tH*256) < (BL + BH*256):\n            self.send(chr(27)+'('+'c'+chr(4)+chr(0)+chr(tL)+chr(tH)+chr(BL)+chr(BH))\n        else:\n            raise RuntimeError('The top margin must be less than the bottom margin')", "docstring": "Specify settings for top and bottom margins. Physically printable area depends on media.\n\nArgs:\ntopmargin: the top margin, in dots. The top margin must be less than the bottom margin.\nbottommargin: the bottom margin, in dots. The bottom margin must be less than the top margin.\nReturns:\nNone\nRaises:\nRuntimeError: Top margin must be less than the bottom margin.", "source": "juraj-google-style"}
{"code": "def blocking_save(self, query_dict=None, meta=None, index_fields=None):\n    query_dict = (query_dict or {})\n    for query in query_dict:\n        self.setattr(query, query_dict[query])\n    self.save(meta=meta, index_fields=index_fields)\n    while (not self.objects.filter(key=self.key, **query_dict).count()):\n        time.sleep(0.3)\n    return self", "docstring": "Saves object to DB. Waits till the backend properly indexes the new object.\n\nArgs:\nquery_dict(dict) : contains keys - values of  the model fields\nmeta (dict): JSON serializable meta data for logging of save operation.\n{'lorem': 'ipsum', 'dolar': 5}\nindex_fields (list): Tuple list for indexing keys in riak (with 'bin' or 'int').\nbin is used for string fields, int is used for integer fields.\n[('lorem','bin'),('dolar','int')]\n\n\nReturns:\nModel instance.", "source": "codesearchnet"}
{"code": "def add_alias(self, alias, index):\n    if (index >= len(self._datasets)):\n        raise DataInvalidIndex('A dataset with index {} does not exist'.format(index))\n    self._aliases[alias] = index", "docstring": "Add an alias pointing to the specified index.\n\nArgs:\nalias (str): The alias that should point to the given index.\nindex (int): The index of the dataset for which an alias should be added.\n\nRaises:\nDataInvalidIndex: If the index does not represent a valid dataset.", "source": "codesearchnet"}
{"code": "def write_alias_config_hash(alias_config_hash='', empty_hash=False):\n    with open(GLOBAL_ALIAS_HASH_PATH, 'w') as alias_config_hash_file:\n        alias_config_hash_file.write(('' if empty_hash else alias_config_hash))", "docstring": "Write self.alias_config_hash to the alias hash file.\n\nArgs:\nempty_hash: True if we want to write an empty string into the file. Empty string in the alias hash file\nmeans that we have to perform a full load of the command table in the next run.", "source": "codesearchnet"}
{"code": "def Current():\n    if os.name == 'nt':\n        return OperatingSystem.WINDOWS\n    elif 'linux' in sys.platform:\n        return OperatingSystem.LINUX\n    elif 'darwin' in sys.platform:\n        return OperatingSystem.MACOSX\n    elif 'cygwin' in sys.platform:\n        return OperatingSystem.CYGWIN\n    elif 'msys' in sys.platform:\n        return OperatingSystem.MSYS\n    return None", "docstring": "Determines the current operating system.\n\nReturns:\nOperatingSystemTuple, One of the OperatingSystem constants or None if it\ncannot be determined.", "source": "github-repos"}
{"code": "def _create_query(node, context):\n    visited_nodes = [node]\n    output_columns = _get_output_columns(visited_nodes, context)\n    filters = _get_filters(visited_nodes, context)\n    selectable = sql_context_helpers.get_node_selectable(node, context)\n    query = select(output_columns).select_from(selectable).where(and_(*filters))\n    return query", "docstring": "Create a query from a SqlNode.\n\nArgs:\nnode: SqlNode, the current node.\ncontext: CompilationContext, global compilation state and metadata.\n\nReturns:\nSelectable, selectable of the generated query.", "source": "codesearchnet"}
{"code": "def access(self, path, mode, dir_fd=None, follow_symlinks=None):\n    if ((follow_symlinks is not None) and (sys.version_info < (3, 3))):\n        raise TypeError(\"access() got an unexpected keyword argument 'follow_symlinks'\")\n    path = self._path_with_dir_fd(path, self.access, dir_fd)\n    try:\n        stat_result = self.stat(path, follow_symlinks=follow_symlinks)\n    except OSError as os_error:\n        if (os_error.errno == errno.ENOENT):\n            return False\n        raise\n    if is_root():\n        mode &= (~ os.W_OK)\n    return ((mode & ((stat_result.st_mode >> 6) & 7)) == mode)", "docstring": "Check if a file exists and has the specified permissions.\n\nArgs:\npath: (str) Path to the file.\nmode: (int) Permissions represented as a bitwise-OR combination of\nos.F_OK, os.R_OK, os.W_OK, and os.X_OK.\ndir_fd: If not `None`, the file descriptor of a directory, with\n`path` being relative to this directory.\nNew in Python 3.3.\nfollow_symlinks: (bool) If `False` and `path` points to a symlink,\nthe link itself is queried instead of the linked object.\nNew in Python 3.3.\n\nReturns:\nbool, `True` if file is accessible, `False` otherwise.", "source": "codesearchnet"}
{"code": "def to_number(result_type, value, default=None, minimum=None, maximum=None):\n    try:\n        return capped(result_type(value), minimum, maximum)\n    except (TypeError, ValueError):\n        return default", "docstring": "Cast `value` to numeric `result_type` if possible\n\nArgs:\nresult_type (type): Numerical type to convert to (one of: int, float, ...)\nvalue (str | unicode): Value to convert\ndefault (result_type.__class__ | None): Default to use `value` can't be turned into an int\nminimum (result_type.__class__ | None): If specified, result can't be below this minimum\nmaximum (result_type.__class__ | None): If specified, result can't be above this maximum\n\nReturns:\nCorresponding numeric value", "source": "codesearchnet"}
{"code": "def op(name, data, display_name=None, description=None, collections=None):\n    import tensorflow.compat.v1 as tf\n    if (display_name is None):\n        display_name = name\n    summary_metadata = metadata.create_summary_metadata(display_name=display_name, description=description)\n    with tf.name_scope(name):\n        with tf.control_dependencies([tf.assert_scalar(data)]):\n            return tf.summary.tensor_summary(name='scalar_summary', tensor=tf.cast(data, tf.float32), collections=collections, summary_metadata=summary_metadata)", "docstring": "Create a legacy scalar summary op.\n\nArguments:\nname: A unique name for the generated summary node.\ndata: A real numeric rank-0 `Tensor`. Must have `dtype` castable\nto `float32`.\ndisplay_name: Optional name for this summary in TensorBoard, as a\nconstant `str`. Defaults to `name`.\ndescription: Optional long-form description for this summary, as a\nconstant `str`. Markdown is supported. Defaults to empty.\ncollections: Optional list of graph collections keys. The new\nsummary op is added to these collections. Defaults to\n`[Graph Keys.SUMMARIES]`.\n\nReturns:\nA TensorFlow summary op.", "source": "codesearchnet"}
{"code": "def _copy(src, dst, src_is_storage, dst_is_storage):\n    \n    \n    if src_is_storage and dst_is_storage:\n        system_src = get_instance(src)\n        system_dst = get_instance(dst)\n\n        \n        if system_src is system_dst:\n\n            \n            if system_src.relpath(src) == system_dst.relpath(dst):\n                raise same_file_error(\n                    \"'%s' and '%s' are the same file\" % (src, dst))\n\n            \n            try:\n                return system_dst.copy(src, dst)\n            except (UnsupportedOperation, ObjectException):\n                pass\n\n        \n        \n        for caller, called, method in (\n                (system_dst, system_src, 'copy_from_%s'),\n                (system_src, system_dst, 'copy_to_%s')):\n            if hasattr(caller, method % called.storage):\n                try:\n                    return getattr(caller, method % called.storage)(\n                        src, dst, called)\n                except (UnsupportedOperation, ObjectException):\n                    continue\n\n    \n    with cos_open(src, 'rb') as fsrc:\n        with cos_open(dst, 'wb') as fdst:\n\n            \n            for stream in (fsrc, fdst):\n                try:\n                    buffer_size = getattr(stream, '_buffer_size')\n                    break\n                except AttributeError:\n                    continue\n            else:\n                buffer_size = COPY_BUFSIZE\n\n            \n            copyfileobj(fsrc, fdst, buffer_size)", "docstring": "Copies file from source to destination\n\nArgs:\nsrc (str or file-like object): Source file.\ndst (str or file-like object): Destination file.\nsrc_is_storage (bool): Source is storage.\ndst_is_storage (bool): Destination is storage.", "source": "juraj-google-style"}
{"code": "def node_inputs(self, node_name, is_control=False, device_name=None):\n    if not self._debug_graphs:\n        raise LookupError('Node inputs are not loaded from partition graphs yet.')\n    device_name = self._infer_device_name(device_name, node_name)\n    if is_control:\n        return self._debug_graphs[device_name].node_ctrl_inputs[node_name]\n    else:\n        return self._debug_graphs[device_name].node_inputs[node_name]", "docstring": "Get the inputs of given node according to partition graphs.\n\nArgs:\nnode_name: Name of the node.\nis_control: (`bool`) Whether control inputs, rather than non-control\ninputs, are to be returned.\ndevice_name: (`str`) name of the device. If there is only one device or if\nnode_name exists on only one device, this argument is optional.\n\nReturns:\n(`list` of `str`) inputs to the node, as a list of node names.\n\nRaises:\nLookupError: If node inputs and control inputs have not been loaded\nfrom partition graphs yet.", "source": "github-repos"}
{"code": "def set_doc_ids(self, doc_ids):\n    if isinstance(doc_ids, list):\n        self.set_documents(dict.fromkeys(doc_ids))\n    else:\n        self.set_documents({doc_ids: None})", "docstring": "Build xml documents from a list of document ids.\n\nArgs:\ndoc_ids -- A document id or a lost of those.", "source": "codesearchnet"}
{"code": "def from_file(cls, weafile, timestep=1, is_leap_year=False):\n        \n        assert os.path.isfile(weafile), 'Failed to find {}'.format(weafile)\n        location = Location()\n        with open(weafile, readmode) as weaf:\n            first_line = weaf.readline()\n            assert first_line.startswith('place'), \\\n                'Failed to find place in header. ' \\\n                '{} is not a valid wea file.'.format(weafile)\n            location.city = ' '.join(first_line.split()[1:])\n            \n            location.latitude = float(weaf.readline().split()[-1])\n            location.longitude = -float(weaf.readline().split()[-1])\n            location.time_zone = -int(weaf.readline().split()[-1]) / 15\n            location.elevation = float(weaf.readline().split()[-1])\n            weaf.readline()  \n\n            \n            direct_normal_irradiance = []\n            diffuse_horizontal_irradiance = []\n            for line in weaf:\n                dirn, difh = [int(v) for v in line.split()[-2:]]\n                direct_normal_irradiance.append(dirn)\n                diffuse_horizontal_irradiance.append(difh)\n\n        return cls.from_values(location, direct_normal_irradiance,\n                               diffuse_horizontal_irradiance, timestep, is_leap_year)", "docstring": "Create wea object from a wea file.\n\nArgs:\nweafile:Full path to wea file.\ntimestep: An optional integer to set the number of time steps per hour.\nDefault is 1 for one value per hour. If the wea file has a time step\nsmaller than an hour adjust this input accordingly.\nis_leap_year: A boolean to indicate if values are representing a leap year.\nDefault is False.", "source": "juraj-google-style"}
{"code": "def get_extana_led(self, cached=True):\n        \n        if cached and self.led_state is not None:\n            return self.led_state\n\n        extana_led = self.get_characteristic_handle_from_uuid(UUID_EXTANA_LED)\n        if extana_led is None:\n            logger.warn('Failed to find handle for ExtAna LED')\n            return None\n\n        rgb = self.dongle._read_attribute(self.conn_handle, extana_led, israw=True)\n        if rgb is None:\n            return rgb\n\n        return list(map(lambda x: int(x * (LED_MAX / INT_LED_MAX)), struct.unpack('<HHH', rgb)))", "docstring": "Returns the current (R, G, B) colour of the SK8-ExtAna LED.\n\nArgs:\ncached (bool): if True, returns the locally cached state of the LED (based\non the last call to :meth:`set_extana_led`). Otherwise query the device\nfor the current state.\n\nReturns:\na 3-tuple (r, g, b) (all unsigned integers) in the range 0-255, or `None` on error.", "source": "juraj-google-style"}
{"code": "def depth_august_average_ground_temperature(self, value=None):\n        \n        if value is not None:\n            try:\n                value = float(value)\n            except ValueError:\n                raise ValueError(\n                    'value {} need to be of type float '\n                    'for field `depth_august_average_ground_temperature`'.format(value))\n\n        self._depth_august_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_august_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_august_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def split(self, amount):\n        \n        split_objs = list(self.all())\n        if not split_objs:\n            raise NoSplitsFoundForRecurringCost()\n\n        portions = [split_obj.portion for split_obj in split_objs]\n\n        split_amounts = ratio_split(amount, portions)\n        return [\n            (split_objs[i], split_amount)\n            for i, split_amount\n            in enumerate(split_amounts)\n        ]", "docstring": "Split the value given by amount according to the RecurringCostSplit's portions\n\nArgs:\namount (Decimal):\n\nReturns:\nlist[(RecurringCostSplit, Decimal)]: A list with elements in the form (RecurringCostSplit, Decimal)", "source": "juraj-google-style"}
{"code": "def process_action(resource, action, action_issuer='unknown'):\n    from cinq_collector_aws import AWSRegionCollector\n    func_action = action_mapper[resource.resource_type][action]\n    extra_info = {}\n    action_status = ActionStatus.UNKNOWN\n    if func_action:\n        if (action_mapper[resource.resource_type]['service_name'] == 'lambda'):\n            client = get_aws_session(AWSAccount.get(dbconfig.get('rds_collector_account', AWSRegionCollector.ns, ''))).client('lambda', dbconfig.get('rds_collector_region', AWSRegionCollector.ns, ''))\n        else:\n            client = get_aws_session(AWSAccount(resource.account)).client(action_mapper[resource.resource_type]['service_name'], region_name=resource.location)\n        try:\n            logger.info(f'Trying to {action} resource {resource.id} for account {resource.account.account_name} / region {resource.location}')\n            (action_status, extra_info) = func_action(client, resource)\n            Enforcement.create(resource.account.account_id, resource.id, action, datetime.now(), extra_info)\n        except Exception as ex:\n            action_status = ActionStatus.FAILED\n            logger.exception('Failed to apply action {} to {}: {}'.format(action, resource.id, ex))\n        finally:\n            auditlog(event='{}.{}.{}.{}'.format(action_issuer, resource.resource_type, action, action_status), actor=action_issuer, data={'resource_id': resource.id, 'account_name': resource.account.account_name, 'location': resource.location, 'info': extra_info})\n            return action_status\n    else:\n        logger.error('Failed to apply action {} to {}: Not supported'.format(action, resource.id))\n        return ActionStatus.FAILED", "docstring": "Process an audit action for a resource, if possible\n\nArgs:\nresource (:obj:`Resource`): A resource object to perform the action on\naction (`str`): Type of action to perform (`kill` or `stop`)\naction_issuer (`str`): The issuer of the action\nReturns:\n`ActionStatus`", "source": "codesearchnet"}
{"code": "def accept_confirm(self, text=None, wait=None):\n        \n\n        with self.driver.accept_modal(\"confirm\", text=text, wait=wait):\n            yield", "docstring": "Execute the wrapped code, accepting a confirm.\n\nArgs:\ntext (str | RegexObject, optional): Text to match against the text in the modal.\nwait (int | float, optional): Maximum time to wait for the modal to appear after\nexecuting the wrapped code.\n\nRaises:\nModalNotFound: If a modal dialog hasn't been found.", "source": "juraj-google-style"}
{"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n    local_stream = utils.BytearrayStream()\n    if self._unique_identifier:\n        self._unique_identifier.write(local_stream, kmip_version=kmip_version)\n    if self._cryptographic_parameters:\n        self._cryptographic_parameters.write(local_stream, kmip_version=kmip_version)\n    if self._data:\n        self._data.write(local_stream, kmip_version=kmip_version)\n    else:\n        raise ValueError('invalid payload missing the data attribute')\n    if self._iv_counter_nonce:\n        self._iv_counter_nonce.write(local_stream, kmip_version=kmip_version)\n    self.length = local_stream.length()\n    super(DecryptRequestPayload, self).write(output_stream, kmip_version=kmip_version)\n    output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the Decrypt request payload to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the data attribute is not defined.", "source": "codesearchnet"}
{"code": "def send_put(self, mri, attribute_name, value):\n        \n        q = Queue()\n        request = Put(\n            path=[mri, attribute_name, \"value\"],\n            value=value)\n        request.set_callback(q.put)\n        IOLoopHelper.call(self._send_request, request)\n        response = q.get()\n        if isinstance(response, Error):\n            raise response.message\n        else:\n            return response.value", "docstring": "Abstract method to dispatch a Put to the server\n\nArgs:\nmri (str): The mri of the Block\nattribute_name (str): The name of the Attribute within the Block\nvalue: The value to put", "source": "juraj-google-style"}
{"code": "def go_in(self, vertex):\n    if self.vertex_in:\n        self.vertex_in.edges_in.remove(self)\n    self.vertex_in = vertex\n    vertex.edges_in.add(self)", "docstring": "Tell the edge to go into this vertex.\n\nArgs:\nvertex (Vertex): vertex to go into.", "source": "codesearchnet"}
{"code": "def make_selector(value):\n    \n    if is_callable(value):\n        return value\n    if is_string(value):\n        return a_(value)\n    raise ValueError(\"Unable to create callable selector from '{0}'\".format(value))", "docstring": "Create a selector callable from the supplied value.\n\nArgs:\nvalue: If is a callable, then returned unchanged.  If a string is used\nthen create an attribute selector. If in an integer is used then\ncreate a key selector.\n\nReturns:\nA callable selector based on the supplied value.\n\nRaises:\nValueError: If a selector cannot be created from the value.", "source": "juraj-google-style"}
{"code": "def _parse_line(self, line_no, line):\n        \n\n        try:\n            matched = statement.parseString(line)\n        except ParseException as exc:\n            raise DataError(\"Error parsing line in TileBus file\", line_number=line_no, column=exc.col, contents=line)\n\n        if 'symbol' in matched:\n            self._parse_cmd(matched)\n        elif 'filename' in matched:\n            self._parse_include(matched)\n        elif 'variable' in matched:\n            self._parse_assignment(matched)\n        elif 'configvar' in matched:\n            self._parse_configvar(matched)", "docstring": "Parse a line in a TileBus file\n\nArgs:\nline_no (int): The line number for printing useful error messages\nline (string): The line that we are trying to parse", "source": "juraj-google-style"}
{"code": "def __init__(self, identifier, configuration):\n    \n    super(GuppyMemoryProfiler, self).__init__()\n    self._identifier = identifier\n    self._path = configuration.directory\n    self._profiling_sample = 0\n    self._profiling_sample_rate = configuration.sample_rate\n    self._heapy = None\n    self._sample_file = '{0!s}.hpy'.format(identifier)\n\n    if self._path:\n      self._sample_file = os.path.join(self._path, self._sample_file)\n\n    if hpy:\n      self._heapy = hpy()", "docstring": "Initializes a memory profiler.\n\nArgs:\nidentifier (str): unique name of the profile.\nconfiguration (ProfilingConfiguration): profiling configuration.", "source": "juraj-google-style"}
{"code": "def _close_rpc_interface(self, connection_id, callback):\n        \n\n        try:\n            context = self.connections.get_context(connection_id)\n        except ArgumentError:\n            callback(connection_id, self.id, False, \"Could not find connection information\")\n            return\n\n        self.connections.begin_operation(connection_id, 'close_interface', callback, self.get_config('default_timeout'))\n\n        try:\n            service = context['services'][TileBusService]\n            header_characteristic = service[ReceiveHeaderChar]\n            payload_characteristic = service[ReceivePayloadChar]\n        except KeyError:\n            self.connections.finish_operation(connection_id, False, \"Can't find characteristics to open rpc interface\")\n            return\n\n        self.bable.set_notification(\n            enabled=False,\n            connection_handle=context['connection_handle'],\n            characteristic=header_characteristic,\n            on_notification_set=[self._on_interface_closed, context, payload_characteristic],\n            timeout=1.0\n        )", "docstring": "Disable RPC interface for this IOTile device\n\nArgs:\nconnection_id (int): The unique identifier for the connection\ncallback (callback): Callback to be called when this command finishes\ncallback(conn_id, adapter_id, success, failure_reason)", "source": "juraj-google-style"}
{"code": "def rename(df, **kwargs):\n    \n\n    return df.rename(columns={v: k for k, v in kwargs.items()})", "docstring": "Renames columns, where keyword argument values are the current names\nof columns and keys are the new names.\n\nArgs:\ndf (:obj:`pandas.DataFrame`): DataFrame passed in via `>>` pipe.\n\nKwargs:\n**kwargs: key:value pairs where keys are new names for columns and\nvalues are current names of columns.", "source": "juraj-google-style"}
{"code": "def __init__(self, name, value):\n        \n        acceptable_types = [basestring, bool, list, int]\n        acceptable = False\n        for acceptable_type in acceptable_types:\n            if isinstance(value, acceptable_type):\n                acceptable = True\n                if acceptable_type == bool:\n                    logger.debug(\"Converting parameter %s boolean '%s' \"\n                                 \"to string.\", name, value)\n                    value = str(value).lower()\n                    break\n\n                if acceptable_type == int:\n                    logger.debug(\"Converting parameter %s integer '%s' \"\n                                 \"to string.\", name, value)\n                    value = str(value)\n                    break\n\n        if not acceptable:\n            raise ValueError(\n                \"CFNParameter (%s) value must be one of %s got: %s\" % (\n                    name, \"str, int, bool, or list\", value))\n\n        self.name = name\n        self.value = value", "docstring": "Wrapper around a value to indicate a CloudFormation Parameter.\n\nArgs:\nname (str): the name of the CloudFormation Parameter\nvalue (str, list, int or bool): the value we're going to submit as\na CloudFormation Parameter.", "source": "juraj-google-style"}
{"code": "def _RemoveIllegalXMLCharacters(self, xml_string):\n    if (not isinstance(xml_string, py2to3.STRING_TYPES)):\n        return xml_string\n    return self._ILLEGAL_XML_RE.sub('�', xml_string)", "docstring": "Removes illegal characters for XML.\n\nIf the input is not a string it will be returned unchanged.\n\nArgs:\nxml_string (str): XML with possible illegal characters.\n\nReturns:\nstr: XML where all illegal characters have been removed.", "source": "codesearchnet"}
{"code": "def __wizard(rho, epsilon=None):\n    if (epsilon is None):\n        epsilon = 0.0\n    dim = len(rho)\n    rho_wizard = np.zeros([dim, dim])\n    (v, w) = np.linalg.eigh(rho)\n    for j in range(dim):\n        if (v[j] < epsilon):\n            tmp = v[j]\n            v[j] = 0.0\n            x = 0.0\n            for k in range((j + 1), dim):\n                x += (tmp / (dim - (j + 1)))\n                v[k] = (v[k] + (tmp / (dim - (j + 1))))\n    for j in range(dim):\n        rho_wizard = (rho_wizard + (v[j] * outer(w[(:, j)])))\n    return rho_wizard", "docstring": "Returns the nearest positive semidefinite operator to an operator.\n\nThis method is based on reference [1]. It constrains positivity\nby setting negative eigenvalues to zero and rescaling the positive\neigenvalues.\n\nArgs:\nrho (array_like): the input operator.\nepsilon(float or None): threshold (>=0) for truncating small\neigenvalues values to zero.\n\nReturns:\nnumpy.array: A positive semidefinite numpy array.", "source": "codesearchnet"}
{"code": "def send_graph_tracebacks(destinations, run_key, origin_stack, graph, send_source=True):\n    _send_call_tracebacks(destinations, origin_stack, is_eager_execution=False, call_key=run_key, graph=graph, send_source=send_source)", "docstring": "Send the tracebacks of a graph execution call to debug server(s).\n\nArgs:\ndestinations: gRPC destination addresses, a `str` or a `list` of `str`s,\ne.g., \"localhost:4242\". If a `list`, gRPC requests containing the same\n`CallTraceback` proto payload will be sent to all the destinations.\nrun_key: A string describing the feeds, fetches (and targets) names of the\n`tf.Session.run` call.\norigin_stack: The traceback of the `tf.Session.run()` invocation.\ngraph: A Python `tf.Graph` object (i.e., *not* a `tf.compat.v1.GraphDef`),\nwhich contains op tracebacks.\nsend_source: Whether the source files involved in the op tracebacks but\noutside the TensorFlow library are to be sent.", "source": "github-repos"}
{"code": "def sample_shape_tensor(self, name=\"sample_shape_tensor\"):\n    \n    with tf.compat.v1.name_scope(name):\n      if isinstance(self._sample_shape, tf.Tensor):\n        return self._sample_shape\n      return tf.convert_to_tensor(\n          value=self.sample_shape.as_list(), dtype=tf.int32)", "docstring": "Sample shape of random variable as a 1-D `Tensor`.\n\nArgs:\nname: name to give to the op\n\nReturns:\nsample_shape: `Tensor`.", "source": "juraj-google-style"}
{"code": "def _GetPathSegmentIndexForSimilarityWeights(self, similarity_weights, occurrence_weights, value_weights):\n    largest_weight = similarity_weights.GetLargestWeight()\n    if (largest_weight > 0):\n        similarity_weight_indexes = similarity_weights.GetIndexesForWeight(largest_weight)\n        number_of_similarity_indexes = len(similarity_weight_indexes)\n    else:\n        number_of_similarity_indexes = 0\n    path_segment_index = None\n    if (number_of_similarity_indexes == 0):\n        path_segment_index = self._GetPathSegmentIndexForOccurrenceWeights(occurrence_weights, value_weights)\n    elif (number_of_similarity_indexes == 1):\n        path_segment_index = similarity_weight_indexes[0]\n    else:\n        largest_weight = 0\n        largest_value_weight = 0\n        for similarity_index in similarity_weight_indexes:\n            occurrence_weight = occurrence_weights.GetWeightForIndex(similarity_index)\n            if ((largest_weight > 0) and (largest_weight == occurrence_weight)):\n                value_weight = value_weights.GetWeightForIndex(similarity_index)\n                if (largest_value_weight < value_weight):\n                    largest_weight = 0\n            if ((not path_segment_index) or (largest_weight < occurrence_weight)):\n                largest_weight = occurrence_weight\n                path_segment_index = similarity_index\n                largest_value_weight = value_weights.GetWeightForIndex(similarity_index)\n    return path_segment_index", "docstring": "Retrieves the index of the path segment based on similarity weights.\n\nArgs:\nsimilarity_weights: the similarity weights object (instance of\n_PathSegmentWeights).\noccurrence_weights: the occurrence weights object (instance of\n_PathSegmentWeights).\nvalue_weights: the value weights object (instance of _PathSegmentWeights).\n\nReturns:\nAn integer containing the path segment index.", "source": "codesearchnet"}
{"code": "def __getitem__(self, key: Union[int, str]) -> Node:\n        \n        node: Node = None\n        if isinstance(key, int):\n            node = self._nodes.get(key)\n        if isinstance(key, str):\n            node = self._node_name_map.get(key)\n\n        if node is None:\n            raise IndexError(\"Invalid key.\")\n\n        return node", "docstring": "Returns the node corresponding to the given key.\n\nIf the given key is an integer, then the node with the given index will be returned.\n\nIf the given key is a string, then the node with the given name will be returned.\n\nArguments:\nkey (Union[int, str]): The key that identifies the node to return.\n\nRaises:\nIndexError: If the index is invalid or out of range.", "source": "juraj-google-style"}
{"code": "def log_coroutine(self, cor, *args, **kwargs):\n    if self.stopping:\n        raise LoopStoppingError(('Could not launch coroutine because loop is shutting down: %s' % cor))\n    self.start()\n    cor = _instaniate_coroutine(cor, args, kwargs)\n\n    def _run_and_log():\n        task = self.loop.create_task(cor)\n        task.add_done_callback((lambda x: _log_future_exception(x, self._logger)))\n    if self.inside_loop():\n        _run_and_log()\n    else:\n        self.loop.call_soon_threadsafe(_run_and_log)", "docstring": "Run a coroutine logging any exception raised.\n\nThis routine will not block until the coroutine is finished\nnor will it return any result.  It will just log if any\nexception is raised by the coroutine during operation.\n\nIt is safe to call from both inside and outside the event loop.\n\nThere is no guarantee on how soon the coroutine will be scheduled.\n\nArgs:\ncor (coroutine): The coroutine that we wish to run in the\nbackground and wait until it finishes.", "source": "codesearchnet"}
{"code": "def check_peft_version(min_version: str) -> None:\n    if not is_peft_available():\n        raise ValueError('PEFT is not installed. Please install it with `pip install peft`')\n    is_peft_version_compatible = version.parse(importlib.metadata.version('peft')) >= version.parse(min_version)\n    if not is_peft_version_compatible:\n        raise ValueError(f'The version of PEFT you are using is not compatible, please use a version that is greater than {min_version}')", "docstring": "Checks if the version of PEFT is compatible.\n\nArgs:\nversion (`str`):\nThe version of PEFT to check against.", "source": "github-repos"}
{"code": "def divide(x1, x2, output_shape=None, name=None):\n    output_shape = convert_to_shape(output_shape)\n    if (not isinstance(x2, Tensor)):\n        return ScalarMultiplyOperation(x1, (1.0 / x2)).outputs[0]\n    with tf.name_scope(name, default_name='divide'):\n        (x1, x2) = binary_arguments_to_tensors(x1, x2)\n        return multiply(x1, reciprocal(x2), output_shape=output_shape)", "docstring": "Binary division with broadcasting.\n\nArgs:\nx1: a Tensor\nx2: a Tensor\noutput_shape: an optional Shape\nname: an optional string\nReturns:\na Tensor", "source": "codesearchnet"}
{"code": "def Put(self, message, block=True, timeout=1000):\n    message = message.SerializeToString()\n    if (not block):\n        if self.Full():\n            raise queue.Full\n    else:\n        t0 = time.time()\n        while self.Full():\n            time.sleep(1)\n            self._heart_beat_cb()\n            if ((time.time() - t0) > timeout):\n                raise queue.Full\n    with self._lock:\n        self._queue.appendleft(message)\n        self._total_size += len(message)", "docstring": "Put a message on the queue, blocking if it is too full.\n\nBlocks when the queue contains more than the threshold.\n\nArgs:\nmessage: rdf_flows.GrrMessage The message to put.\nblock: bool If True, we block and wait for the queue to have more space.\nOtherwise, if the queue is full, we raise.\ntimeout: int Maximum time (in seconds, with 1 sec resolution) we spend\nwaiting on the queue.\n\nRaises:\nqueue.Full: if the queue is full and block is False, or\ntimeout is exceeded.", "source": "codesearchnet"}
{"code": "def create_room(self, alias=None, is_public=False, invitees=None):\n    response = self.api.create_room(alias=alias, is_public=is_public, invitees=invitees)\n    return self._mkroom(response['room_id'])", "docstring": "Create a new room on the homeserver.\n\nArgs:\nalias (str): The canonical_alias of the room.\nis_public (bool):  The public/private visibility of the room.\ninvitees (str[]): A set of user ids to invite into the room.\n\nReturns:\nRoom\n\nRaises:\nMatrixRequestError", "source": "codesearchnet"}
{"code": "def transition_block(x, reduction, name):\n    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1\n    x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_bn')(x)\n    x = layers.Activation('relu', name=name + '_relu')(x)\n    x = layers.Conv2D(int(x.shape[bn_axis] * reduction), 1, use_bias=False, name=name + '_conv')(x)\n    x = layers.AveragePooling2D(2, strides=2, name=name + '_pool')(x)\n    return x", "docstring": "A transition block.\n\nArgs:\nx: input tensor.\nreduction: float, compression rate at transition layers.\nname: string, block label.\n\nReturns:\nOutput tensor for the block.", "source": "github-repos"}
{"code": "def get_dimensions(js_dict, naming):\n    dimensions = []\n    dim_names = []\n    if check_version_2(js_dict):\n        dimension_dict = js_dict\n    else:\n        dimension_dict = js_dict['dimension']\n    for dim in dimension_dict['id']:\n        dim_name = js_dict['dimension'][dim]['label']\n        if (not dim_name):\n            dim_name = dim\n        if (naming == 'label'):\n            dim_label = get_dim_label(js_dict, dim)\n            dimensions.append(dim_label)\n            dim_names.append(dim_name)\n        else:\n            dim_index = get_dim_index(js_dict, dim)\n            dimensions.append(dim_index)\n            dim_names.append(dim)\n    return (dimensions, dim_names)", "docstring": "Get dimensions from input data.\n\nArgs:\njs_dict (dict): dictionary containing dataset data and metadata.\nnaming (string, optional): dimension naming. Possible values: 'label' \\\nor 'id'.\n\nReturns:\ndimensions (list): list of pandas data frames with dimension \\\ncategory data.\ndim_names (list): list of strings with dimension names.", "source": "codesearchnet"}
{"code": "def unpatchify(self, patchified_pixel_values, original_image_size: Optional[Tuple[int, int]]=None):\n    patch_size, num_channels = (self.config.patch_size, self.config.num_channels)\n    original_image_size = original_image_size if original_image_size is not None else (self.config.image_size, self.config.image_size)\n    original_height, original_width = original_image_size\n    num_patches_h = original_height \n    num_patches_w = original_width \n    tf.debugging.assert_equal(num_patches_h * num_patches_w, shape_list(patchified_pixel_values)[1], message=f'The number of patches in the patchified pixel values is {shape_list(patchified_pixel_values)[1]} does not match the patches of original image {num_patches_w}*{num_patches_h}')\n    batch_size = shape_list(patchified_pixel_values)[0]\n    patchified_pixel_values = tf.reshape(patchified_pixel_values, (batch_size, num_patches_h, num_patches_w, patch_size, patch_size, num_channels))\n    patchified_pixel_values = tf.einsum('nhwpqc->nhpwqc', patchified_pixel_values)\n    pixel_values = tf.reshape(patchified_pixel_values, (batch_size, num_patches_h * patch_size, num_patches_w * patch_size, num_channels))\n    return pixel_values", "docstring": "Args:\npatchified_pixel_values (`tf.Tensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`:\nPatchified pixel values.\noriginal_image_size (`Tuple[int, int]`, *optional*):\nOriginal image size.\n\nReturns:\n`tf.Tensor` of shape `(batch_size, height, width, num_channels)`:\nPixel values.", "source": "github-repos"}
{"code": "def read_until(self, s, echo=None):\n    s_len = len(s)\n    buf = self.read(s_len, echo)\n    while (buf[(- s_len):] != s):\n        buf += self.read(1, echo)\n    return buf", "docstring": "Read until a certain string is encountered..\n\nArgs:\ns(bytes): The string to wait for.\necho(bool): Whether to write the read data to stdout.\n\nReturns:\nbytes: The data up to and including *s*.\n\nRaises:\nEOFError: If the channel was closed.", "source": "codesearchnet"}
{"code": "def _vec(A):\n    \n    N, m, n = A.shape\n    return A.reshape((N, m*n, 1), order='F')", "docstring": "Linear operator _vec() from Wiktorsson2001 p478\nArgs:\nA: a rank 3 array of shape N x m x n, giving a matrix A[j] for each\ninterval of time j in 0..N-1\nReturns:\narray of shape N x mn x 1, made by stacking the columns of matrix A[j] on\ntop of each other, for each j in 0..N-1", "source": "juraj-google-style"}
{"code": "def with_transform(self, transform: MLTransformProvider):\n    self._validate_transform(transform)\n    self.transforms.append(transform)\n    return self", "docstring": "Add a transform to the MLTransform pipeline.\nArgs:\ntransform: A BaseOperation instance.\nReturns:\nA MLTransform instance.", "source": "github-repos"}
{"code": "def __init__(self, channel):\n    \n    self.Log = channel.unary_unary(\n        '/pulumirpc.Engine/Log',\n        request_serializer=engine__pb2.LogRequest.SerializeToString,\n        response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n        )\n    self.GetRootResource = channel.unary_unary(\n        '/pulumirpc.Engine/GetRootResource',\n        request_serializer=engine__pb2.GetRootResourceRequest.SerializeToString,\n        response_deserializer=engine__pb2.GetRootResourceResponse.FromString,\n        )\n    self.SetRootResource = channel.unary_unary(\n        '/pulumirpc.Engine/SetRootResource',\n        request_serializer=engine__pb2.SetRootResourceRequest.SerializeToString,\n        response_deserializer=engine__pb2.SetRootResourceResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def input_vars(self, transitive: bool=False) -> Set[str]:\n    input_vars = set()\n\n    def list_var_refs(k, v, p):\n        del k, p\n        if isinstance(v, Function):\n            return pg.TraverseAction.CONTINUE\n        if isinstance(v, SymbolReference):\n            input_vars.add(v.name)\n        return pg.TraverseAction.ENTER\n    pg.traverse(self, list_var_refs)\n    if transitive:\n        parent_func = self.parent_func()\n        if parent_func is not None:\n            unresolved_vars = input_vars.copy()\n            for i in reversed(range(self.line_number())):\n                line = parent_func.body[i]\n                line_output_vars = line.output_vars()\n                if line_output_vars & unresolved_vars:\n                    line_input_vars = line.input_vars()\n                    input_vars.update(line_input_vars)\n                    unresolved_vars -= line_output_vars\n                    unresolved_vars.update(line_input_vars)\n            assert unresolved_vars.issubset(set(parent_func.args)), unresolved_vars\n    return input_vars", "docstring": "Returns the input context from this code entity.\n\nArgs:\ntransitive: If True, transitive input context will be included.\n\nReturns:\nA set of context.", "source": "github-repos"}
{"code": "def check_config(config, path):\n    \n    messages = []\n\n    config_copy = get_frozen_copy(config)\n    missing_keys = set(DEFAULT_CONFIG.keys()) - set(config_copy.keys())\n    if missing_keys:\n        messages.append(\"Missing config keys {}!\".format(missing_keys))\n\n    for key, value in config_copy.items():\n        if key not in DEFAULT_CONFIG:\n            messages.append(\"Unknown key {} in {}!\".format(key, path))\n            continue\n        if value is None:\n            messages.append(_VALUE_UNDEFINED_MESSAGE.format(path=path, key=key))\n        else:\n            value_type = type(value)\n            if isinstance(DEFAULT_CONFIG[key], Mapping) and 'by-cot-product' in DEFAULT_CONFIG[key]:\n                default_type = type(DEFAULT_CONFIG[key]['by-cot-product'][config['cot_product']])\n            else:\n                default_type = type(DEFAULT_CONFIG[key])\n            if value_type is not default_type:\n                messages.append(\n                    \"{} {}: type {} is not {}!\".format(path, key, value_type, default_type)\n                )\n        if value in (\"...\", b\"...\"):\n            messages.append(_VALUE_UNDEFINED_MESSAGE.format(path=path, key=key))\n        if key in (\"provisioner_id\", \"worker_group\", \"worker_type\", \"worker_id\") and not _is_id_valid(value):\n            messages.append('{} doesn\\'t match \"{}\" (required by Taskcluster)'.format(key, _GENERIC_ID_REGEX.pattern))\n    return messages", "docstring": "Validate the config against DEFAULT_CONFIG.\n\nAny unknown keys or wrong types will add error messages.\n\nArgs:\nconfig (dict): the running config.\npath (str): the path to the config file, used in error messages.\n\nReturns:\nlist: the error messages found when validating the config.", "source": "juraj-google-style"}
{"code": "def rename(name, new_name):\n    \n    if six.PY2:\n        name = _to_unicode(name)\n        new_name = _to_unicode(new_name)\n\n    \n    current_info = info(name)\n    if not current_info:\n        raise CommandExecutionError('User \\'{0}\\' does not exist'.format(name))\n\n    \n    new_info = info(new_name)\n    if new_info:\n        raise CommandExecutionError(\n            'User \\'{0}\\' already exists'.format(new_name)\n        )\n\n    \n    \n    with salt.utils.winapi.Com():\n        c = wmi.WMI(find_classes=0)\n\n    \n    try:\n        user = c.Win32_UserAccount(Name=name)[0]\n    except IndexError:\n        raise CommandExecutionError('User \\'{0}\\' does not exist'.format(name))\n\n    \n    result = user.Rename(new_name)[0]\n\n    \n    if not result == 0:\n        \n        error_dict = {0: 'Success',\n                      1: 'Instance not found',\n                      2: 'Instance required',\n                      3: 'Invalid parameter',\n                      4: 'User not found',\n                      5: 'Domain not found',\n                      6: 'Operation is allowed only on the primary domain controller of the domain',\n                      7: 'Operation is not allowed on the last administrative account',\n                      8: 'Operation is not allowed on specified special groups: user, admin, local, or guest',\n                      9: 'Other API error',\n                      10: 'Internal error'}\n        raise CommandExecutionError(\n            'There was an error renaming \\'{0}\\' to \\'{1}\\'. Error: {2}'\n            .format(name, new_name, error_dict[result])\n        )\n\n    return info(new_name).get('name') == new_name", "docstring": "Change the username for a named user\n\nArgs:\nname (str): The user name to change\n\nnew_name (str): The new name for the current user\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' user.rename jsnuffy jshmoe", "source": "juraj-google-style"}
{"code": "def get(feature_name):\n    implementations = MetadataExtractor._implementations()\n    try:\n        return implementations[feature_name]\n    except KeyError:\n        raise UnsupportedFeatureException('no MetadataExtractor registered for feature \"{feature_name}\" (try any of the following: {supported_features})'.format(feature_name=feature_name, supported_features=', '.join(sorted(implementations))))", "docstring": "Returns the MetadataExtractor that can extract information about the\nprovided feature name.\n\nRaises:\nUnsupportedFeature: If no extractor exists for the feature name.", "source": "codesearchnet"}
{"code": "def pooled_sample_variance(sample1, sample2):\n    \n    deg_freedom = len(sample1) + len(sample2) - 2\n    mean1 = statistics.mean(sample1)\n    squares1 = ((x - mean1) ** 2 for x in sample1)\n    mean2 = statistics.mean(sample2)\n    squares2 = ((x - mean2) ** 2 for x in sample2)\n\n    return (math.fsum(squares1) + math.fsum(squares2)) / float(deg_freedom)", "docstring": "Find the pooled sample variance for two samples.\n\nArgs:\nsample1: one sample.\nsample2: the other sample.\n\nReturns:\nPooled sample variance, as a float.", "source": "juraj-google-style"}
{"code": "def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):\n    line = clean_lines.elided[linenum]\n    fncall = line\n    for pattern in ('\\\\bif\\\\s*\\\\((.*)\\\\)\\\\s*{', '\\\\bfor\\\\s*\\\\((.*)\\\\)\\\\s*{', '\\\\bwhile\\\\s*\\\\((.*)\\\\)\\\\s*[{;]', '\\\\bswitch\\\\s*\\\\((.*)\\\\)\\\\s*{'):\n        match = Search(pattern, line)\n        if match:\n            fncall = match.group(1)\n            break\n    if ((not Search('\\\\b(if|for|while|switch|return|new|delete|catch|sizeof)\\\\b', fncall)) and (not Search(' \\\\([^)]+\\\\)\\\\([^)]*(\\\\)|,$)', fncall)) and (not Search(' \\\\([^)]+\\\\)\\\\[[^\\\\]]+\\\\]', fncall))):\n        if Search('\\\\w\\\\s*\\\\(\\\\s(?!\\\\s*\\\\\\\\$)', fncall):\n            error(filename, linenum, 'whitespace/parens', 4, 'Extra space after ( in function call')\n        elif Search('\\\\(\\\\s+(?!(\\\\s*\\\\\\\\)|\\\\()', fncall):\n            error(filename, linenum, 'whitespace/parens', 2, 'Extra space after (')\n        if (Search('\\\\w\\\\s+\\\\(', fncall) and (not Search('_{0,2}asm_{0,2}\\\\s+_{0,2}volatile_{0,2}\\\\s+\\\\(', fncall)) and (not Search('\n            if Search('\\\\boperator_*\\\\b', line):\n                error(filename, linenum, 'whitespace/parens', 0, 'Extra space before ( in function call')\n            else:\n                error(filename, linenum, 'whitespace/parens', 4, 'Extra space before ( in function call')\n        if Search('[^)]\\\\s+\\\\)\\\\s*[^{\\\\s]', fncall):\n            if Search('^\\\\s+\\\\)', fncall):\n                error(filename, linenum, 'whitespace/parens', 2, 'Closing ) should be moved to the previous line')\n            else:\n                error(filename, linenum, 'whitespace/parens', 2, 'Extra space before )')", "docstring": "Checks for the correctness of various spacing around function calls.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"}
{"code": "def __schemas_descriptor(self):\n    result = {}\n    for (schema_key, schema_value) in self.__parser.schemas().iteritems():\n        field_keys = schema_value.keys()\n        key_result = {}\n        if ('properties' in field_keys):\n            key_result['properties'] = schema_value['properties'].copy()\n            for (prop_key, prop_value) in schema_value['properties'].iteritems():\n                if ('enum' in prop_value):\n                    num_enums = len(prop_value['enum'])\n                    key_result['properties'][prop_key]['enumDescriptions'] = ([''] * num_enums)\n                elif ('default' in prop_value):\n                    if (prop_value.get('type') == 'boolean'):\n                        prop_value['default'] = ('true' if prop_value['default'] else 'false')\n                    else:\n                        prop_value['default'] = str(prop_value['default'])\n                key_result['properties'][prop_key].pop('required', None)\n        for key in ('type', 'id', 'description'):\n            if (key in field_keys):\n                key_result[key] = schema_value[key]\n        if key_result:\n            result[schema_key] = key_result\n    for schema_value in result.itervalues():\n        for field_value in schema_value.itervalues():\n            if isinstance(field_value, dict):\n                if ('$ref' in field_value):\n                    field_value['type'] = 'object'\n    return result", "docstring": "Describes the schemas section of the discovery document.\n\nReturns:\nDictionary describing the schemas of the document.", "source": "codesearchnet"}
{"code": "def validate(self, read_tuple_name):\n        \n        if reg_lrn.match(read_tuple_name) is None:\n            self.report_error(\n                read_tuple_name=read_tuple_name,\n                error_name=\"wrong_read_tuple_name_structure\",\n                message=\"'{}' is not matched\".format(reg_lrn),\n            )\n        else:\n            parts = read_tuple_name.split(\"__\")\n\n            if reg_prefix_part.match(parts[0]) is None:\n                self.report_error(\n                    read_tuple_name=read_tuple_name,\n                    error_name=\"wrong_prefix_part\",\n                    message=\"'{}' is not matched\".format(reg_prefix_part),\n                )\n\n            if reg_id_part.match(parts[1]) is None:\n                self.report_error(\n                    read_tuple_name=read_tuple_name,\n                    error_name=\"wrong_id_part\",\n                    message=\"'{}' is not matched\".format(reg_id_part),\n                )\n\n            if reg_segmental_part.match(parts[2]) is None:\n                self.report_error(\n                    read_tuple_name=read_tuple_name,\n                    error_name=\"wrong_segmental_part\",\n                    message=\"'{}' is not matched\".format(reg_segmental_part),\n                )\n\n            if reg_suffix_part.match(parts[3]) is None:\n                self.report_error(\n                    read_tuple_name=read_tuple_name,\n                    error_name=\"wrong_suffix_part\",\n                    message=\"'{}' is not matched\".format(reg_suffix_part),\n                )\n\n            if not self.rnf_profile.check(read_tuple_name):\n                self.report_error(\n                    read_tuple_name=read_tuple_name,\n                    error_name=\"wrong_profile\",\n                    message=\"Read has a wrong profile (wrong widths). It should be: {} but it is: {}.\".format(\n                        self.rnf_profile,\n                        rnftools.rnfformat.RnfProfile(read_tuple_name=read_tuple_name),\n                    ),\n                    warning=True,\n                )", "docstring": "Check RNF validity of a read tuple.\n\nArgs:\nread_tuple_name (str): Read tuple name to be checked.s", "source": "juraj-google-style"}
{"code": "def __init__(self, validate_args=False, name=\"absolute_value\"):\n    \n    self._graph_parents = []\n    self._name = name\n\n    with self._name_scope(\"init\"):\n      super(AbsoluteValue, self).__init__(\n          forward_min_event_ndims=0,\n          validate_args=validate_args,\n          name=name)", "docstring": "Instantiates the `AbsoluteValue` bijector.\n\nArgs:\nvalidate_args: Python `bool` indicating whether arguments should be\nchecked for correctness, in particular whether inputs to `inverse` and\n`inverse_log_det_jacobian` are non-negative.\nname: Python `str` name given to ops managed by this object.", "source": "juraj-google-style"}
{"code": "def create(self, members=(), admins=()):\n    memberObjs = [{'id': '8:{0}'.format(self.skype.userId), 'role': 'Admin'}]\n    for id in members:\n        if (id == self.skype.userId):\n            continue\n        memberObjs.append({'id': '8:{0}'.format(id), 'role': ('Admin' if (id in admins) else 'User')})\n    resp = self.skype.conn('POST', '{0}/threads'.format(self.skype.conn.msgsHost), auth=SkypeConnection.Auth.RegToken, json={'members': memberObjs})\n    return self.chat(resp.headers['Location'].rsplit('/', 1)[1])", "docstring": "Create a new group chat with the given users.\n\nThe current user is automatically added to the conversation as an admin.  Any other admin identifiers must also\nbe present in the member list.\n\nArgs:\nmembers (str list): user identifiers to initially join the conversation\nadmins (str list): user identifiers to gain admin privileges", "source": "codesearchnet"}
{"code": "def _check_properties(cls, property_names, require_indexed=True):\n    \n    assert isinstance(property_names, (list, tuple)), repr(property_names)\n    for name in property_names:\n      assert isinstance(name, basestring), repr(name)\n      if '.' in name:\n        name, rest = name.split('.', 1)\n      else:\n        rest = None\n      prop = cls._properties.get(name)\n      if prop is None:\n        cls._unknown_property(name)\n      else:\n        prop._check_property(rest, require_indexed=require_indexed)", "docstring": "Internal helper to check the given properties exist and meet specified\nrequirements.\n\nCalled from query.py.\n\nArgs:\nproperty_names: List or tuple of property names -- each being a string,\npossibly containing dots (to address subproperties of structured\nproperties).\n\nRaises:\nInvalidPropertyError if one of the properties is invalid.\nAssertionError if the argument is not a list or tuple of strings.", "source": "juraj-google-style"}
{"code": "def attention_lm_decoder(decoder_input,\n                         decoder_self_attention_bias,\n                         hparams,\n                         name=\"decoder\"):\n  \n  x = decoder_input\n  with tf.variable_scope(name):\n    for layer in range(hparams.num_hidden_layers):\n      with tf.variable_scope(\"layer_%d\" % layer):\n        with tf.variable_scope(\"self_attention\"):\n          y = common_attention.multihead_attention(\n              common_layers.layer_preprocess(\n                  x, hparams), None, decoder_self_attention_bias,\n              hparams.attention_key_channels or hparams.hidden_size,\n              hparams.attention_value_channels or hparams.hidden_size,\n              hparams.hidden_size, hparams.num_heads, hparams.attention_dropout)\n          x = common_layers.layer_postprocess(x, y, hparams)\n        with tf.variable_scope(\"ffn\"):\n          y = common_layers.conv_hidden_relu(\n              common_layers.layer_preprocess(x, hparams),\n              hparams.filter_size,\n              hparams.hidden_size,\n              dropout=hparams.relu_dropout)\n          x = common_layers.layer_postprocess(x, y, hparams)\n    return common_layers.layer_preprocess(x, hparams)", "docstring": "A stack of attention_lm layers.\n\nArgs:\ndecoder_input: a Tensor\ndecoder_self_attention_bias: bias Tensor for self-attention\n(see common_attention.attention_bias())\nhparams: hyperparameters for model\nname: a string\n\nReturns:\ny: a Tensors", "source": "juraj-google-style"}
{"code": "def store_to_file(self, filename):\n    \n    with tf.gfile.Open(filename, \"w\") as f:\n      for i in range(len(self._id_to_token)):\n        f.write(self._id_to_token[i] + \"\\n\")", "docstring": "Write vocab file to disk.\n\nVocab files have one token per line. The file ends in a newline. Reserved\ntokens are written to the vocab file as well.\n\nArgs:\nfilename: Full path of the file to store the vocab to.", "source": "juraj-google-style"}
{"code": "def cluster_spec(self):\n    tf_config = _load_tf_config(self._port)\n    if 'cluster' not in tf_config:\n        return ClusterSpec({})\n    return ClusterSpec(tf_config['cluster'])", "docstring": "Returns a ClusterSpec based on the SageMaker environment variables.\n\nReturns:\nA ClusterSpec with information from the SageMaker environment variables.", "source": "github-repos"}
{"code": "def __init__(self, req, config, section):\n    self.req = req\n    self.exclude = None\n    self.include = None\n    self.range = [None, None]\n    self.config = config\n    self._req_type = ''\n    self._section = section\n    self._initialized = None\n    self._error_message = []\n    self.parse_single_req()", "docstring": "Initializes a version or dependency requirement object.\n\nArgs:\nreq: List that contains individual supported versions or a single string\nthat contains `range` definition.\ne.g. [`range(1.0, 2.0) include(3.0) exclude(1.5)`]\ne.g. [`1.0`, `3.0`, `7.1`]\nconfig: String that is the configuration name.\ne.g. `platform`\nsection: String that is the section name from the `.ini` config file\nunder which the requirement is defined.\ne.g. `Required`, `Optional`, `Unsupported`, `Dependency`", "source": "github-repos"}
{"code": "def GetUser(self, sid=None, uid=None, username=None):\n    if sid:\n        for user in self.users:\n            if (user.sid == sid):\n                return user\n        return None\n    if uid:\n        for user in self.users:\n            if (user.uid == uid):\n                return user\n    if username:\n        for user in self.users:\n            if (user.username == username):\n                if (uid and user.uid and (user.uid != uid)):\n                    return None\n                else:\n                    return user", "docstring": "Retrieve a User based on sid, uid or username.\n\nOn windows we first get a SID and use it to find the username.  We want to\navoid combining users with name collisions, which occur when local users\nhave the same username as domain users (something like Admin is particularly\ncommon).  So if a SID is provided, don't also try to match by username.\n\nOn linux we first get a username, then use this to find the UID, so we want\nto combine these records or we end up with multiple partially-filled user\nrecords.\n\nTODO(user): this won't work at all well with a query for uid=0 because\nthat is also the default for User objects that don't have uid\nset.\n\nArgs:\nsid: Windows user sid\nuid: Linux/Darwin user id\nusername: string\n\nReturns:\nrdf_client.User or None", "source": "codesearchnet"}
{"code": "def ParseConversationRow(self, parser_mediator, query, row, **unused_kwargs):\n    \n    query_hash = hash(query)\n\n    event_data = TangoAndroidConversationEventData()\n    event_data.conversation_identifier = self._GetRowValue(\n        query_hash, row, 'conv_id')\n\n    \n    \n    \n\n    date_time = dfdatetime_semantic_time.NotSet()\n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)\n    parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a conversation row from the database.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row resulting from query.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.Activate = channel.unary_unary(\n        '/enterprise.API/Activate',\n        request_serializer=client_dot_enterprise_dot_enterprise__pb2.ActivateRequest.SerializeToString,\n        response_deserializer=client_dot_enterprise_dot_enterprise__pb2.ActivateResponse.FromString,\n        )\n    self.GetState = channel.unary_unary(\n        '/enterprise.API/GetState',\n        request_serializer=client_dot_enterprise_dot_enterprise__pb2.GetStateRequest.SerializeToString,\n        response_deserializer=client_dot_enterprise_dot_enterprise__pb2.GetStateResponse.FromString,\n        )\n    self.Deactivate = channel.unary_unary(\n        '/enterprise.API/Deactivate',\n        request_serializer=client_dot_enterprise_dot_enterprise__pb2.DeactivateRequest.SerializeToString,\n        response_deserializer=client_dot_enterprise_dot_enterprise__pb2.DeactivateResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def requestA(self):\n    work_context = self.getContext()\n    self.setContext('request[v4A]')\n    self.m_serial_port.write((('2f3f'.decode('hex') + self.m_meter_address) + '3030210d0a'.decode('hex')))\n    self.m_raw_read_a = self.m_serial_port.getResponse(self.getContext())\n    unpacked_read_a = self.unpackStruct(self.m_raw_read_a, self.m_blk_a)\n    self.convertData(unpacked_read_a, self.m_blk_a)\n    self.m_kwh_precision = int(self.m_blk_a[Field.kWh_Scale][MeterData.NativeValue])\n    self.m_a_crc = self.crcMeterRead(self.m_raw_read_a, self.m_blk_a)\n    self.setContext(work_context)\n    return self.m_a_crc", "docstring": "Issue an A read on V4 meter.\n\nReturns:\nbool: True if CRC match at end of call.", "source": "codesearchnet"}
{"code": "def pack_rpc_payload(arg_format, args):\n    code = _create_respcode(arg_format, args)\n    packed_result = struct.pack(code, *args)\n    unpacked_validation = struct.unpack(code, packed_result)\n    if (tuple(args) != unpacked_validation):\n        raise RPCInvalidArgumentsError('Passed values would be truncated, please validate the size of your string', code=code, args=args)\n    return packed_result", "docstring": "Pack an RPC payload according to arg_format.\n\nArgs:\narg_format (str): a struct format code (without the <) for the\nparameter format for this RPC.  This format code may include the final\ncharacter V, which means that it expects a variable length bytearray.\nargs (list): A list of arguments to pack according to arg_format.\n\nReturns:\nbytes: The packed argument buffer.", "source": "codesearchnet"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    fixed_section_data_map = self._GetDataTypeMap('job_fixed_length_data_section')\n    try:\n        (fixed_length_section, file_offset) = self._ReadStructureFromFileObject(file_object, 0, fixed_section_data_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.UnableToParseFile('Unable to parse fixed-length data section with error: {0!s}'.format(exception))\n    if (not (fixed_length_section.product_version in self._PRODUCT_VERSIONS)):\n        raise errors.UnableToParseFile('Unsupported product version in: 0x{0:04x}'.format(fixed_length_section.product_version))\n    if (not (fixed_length_section.format_version == 1)):\n        raise errors.UnableToParseFile('Unsupported format version in: {0:d}'.format(fixed_length_section.format_version))\n    variable_section_data_map = self._GetDataTypeMap('job_variable_length_data_section')\n    try:\n        (variable_length_section, data_size) = self._ReadStructureFromFileObject(file_object, file_offset, variable_section_data_map)\n    except (ValueError, errors.ParseError) as exception:\n        raise errors.UnableToParseFile('Unable to parse variable-length data section with error: {0!s}'.format(exception))\n    file_offset += data_size\n    event_data = self._ParseEventData(variable_length_section)\n    date_time = self._ParseLastRunTime(parser_mediator, fixed_length_section)\n    if date_time:\n        event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_RUN)\n        parser_mediator.ProduceEventWithEventData(event, event_data)\n    trigger_data_map = self._GetDataTypeMap('job_trigger')\n    for trigger_index in range(0, variable_length_section.number_of_triggers):\n        try:\n            (trigger, data_size) = self._ReadStructureFromFileObject(file_object, file_offset, trigger_data_map)\n        except (ValueError, errors.ParseError) as exception:\n            raise errors.UnableToParseFile('Unable to parse trigger: {0:d} with error: {2!s}'.format(trigger_index, exception))\n        file_offset += data_size\n        event_data.trigger_type = trigger.trigger_type\n        date_time = self._ParseTriggerStartTime(parser_mediator, trigger)\n        if date_time:\n            event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_SCHEDULED_TO_START, time_zone=parser_mediator.timezone)\n            parser_mediator.ProduceEventWithEventData(event, event_data)\n        date_time = self._ParseTriggerEndTime(parser_mediator, trigger)\n        if date_time:\n            event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_SCHEDULED_TO_START, time_zone=parser_mediator.timezone)\n            parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a Windows job file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"}
{"code": "def WriteFixedString(self, value, length):\n        \n        towrite = value.encode('utf-8')\n        slen = len(towrite)\n        if slen > length:\n            raise Exception(\"string longer than fixed length: %s \" % length)\n        self.WriteBytes(towrite)\n        diff = length - slen\n\n        while diff > 0:\n            self.WriteByte(0)\n            diff -= 1", "docstring": "Write a string value to the stream.\n\nArgs:\nvalue (str): value to write to the stream.\nlength (int): length of the string to write.", "source": "juraj-google-style"}
{"code": "def _ExportEvent(self, output_module, event, deduplicate_events=True):\n    \n    if event.timestamp != self._export_event_timestamp:\n      self._FlushExportBuffer(\n          output_module, deduplicate_events=deduplicate_events)\n      self._export_event_timestamp = event.timestamp\n\n    self._export_event_heap.PushEvent(event)", "docstring": "Exports an event using an output module.\n\nArgs:\noutput_module (OutputModule): output module.\nevent (EventObject): event.\ndeduplicate_events (Optional[bool]): True if events should be\ndeduplicated.", "source": "juraj-google-style"}
{"code": "def restore_walker(self, dumped_state):\n        \n\n        selector_string = dumped_state.get(u'selector')\n        if selector_string is None:\n            raise ArgumentError(\"Invalid stream walker state in restore_walker, missing 'selector' key\", state=dumped_state)\n\n        selector = DataStreamSelector.FromString(selector_string)\n\n        walker = self.create_walker(selector)\n        walker.restore(dumped_state)\n        return walker", "docstring": "Restore a stream walker that was previously serialized.\n\nSince stream walkers need to be tracked in an internal list for\nnotification purposes, we need to be careful with how we restore\nthem to make sure they remain part of the right list.\n\nArgs:\ndumped_state (dict): The dumped state of a stream walker\nfrom a previous call to StreamWalker.dump()\n\nReturns:\nStreamWalker: The correctly restored StreamWalker subclass.", "source": "juraj-google-style"}
{"code": "def _get(self, feed_item):\n    result = store.get(self._entity, feed_item.get(FieldMap.CREATIVE_ASSET_ID, None))\n    if not result:\n        result = {'id': feed_item.get(FieldMap.CREATIVE_ASSET_ID, None), 'assetIdentifier': {'name': feed_item.get(FieldMap.CREATIVE_ASSET_NAME, None), 'type': feed_item.get(FieldMap.CREATIVE_TYPE, None)}}\n        store.set(self._entity, [feed_item.get(FieldMap.CREATIVE_ASSET_ID, None)], result)\n    return result", "docstring": "Retrieves an item from DCM or the local cache.\n\nArgs:\nfeed_item: The feed item representing the creative asset from the\nBulkdozer feed.\n\nReturns:\nInstance of the DCM object either from the API or from the local cache.", "source": "github-repos"}
{"code": "def default_output_fn(prediction, accept):\n    return _worker.Response(response=_encoders.encode(prediction, accept), mimetype=accept)", "docstring": "Function responsible to serialize the prediction for the response.\n\nArgs:\nprediction (obj): prediction returned by predict_fn .\naccept (str): accept content-type expected by the client.\n\nReturns:\n(worker.Response): a Flask response object with the following args:\n\n* Args:\nresponse: the serialized data to return\naccept: the content-type that the data was transformed to.", "source": "codesearchnet"}
{"code": "def nvals(self):\n    return self._row_splits[-1]", "docstring": "Returns the number of values partitioned by this `RowPartition`.\n\nIf the sequence partitioned by this `RowPartition` is a tensor, then\n`nvals` is the size of that tensor's outermost dimension -- i.e.,\n`nvals == values.shape[0]`.\n\nReturns:\nscalar integer Tensor", "source": "github-repos"}
{"code": "def api(self, name, namespace='pyeapi.api'):\n    module = load_module('{}.{}'.format(namespace, name))\n    if hasattr(module, 'initialize'):\n        module.initialize(self)\n    if hasattr(module, 'instance'):\n        return module.instance(self)\n    return module", "docstring": "Loads the specified api module\n\nThis method is the API autoload mechanism that will load the API\nmodule specified by the name argument.  The API module will be loaded\nand look first for an initialize() function and secondly for an\ninstance() function.  In both cases, the node object is passed to\nthe module.\n\nArgs:\nname (str): The name of the module to load.  The name should be\nthe name of the python file to import\nnamespace (str): The namespace to use to load the module.  The\ndefault value is 'pyeapi.api'\n\nReturns:\nThe API module loaded with the node instance.", "source": "codesearchnet"}
{"code": "def threshold(self) -> float:\n    return self._tracker.get()", "docstring": "Returns the current quantile-based threshold value.\n\nReturns:\nfloat: The dynamically calculated threshold value based on the quantile\ntracker.", "source": "github-repos"}
{"code": "def _format_origin_stack(origin_stack, call_traceback_proto):\n    string_to_id = {}\n    string_to_id[None] = 0\n    for frame in origin_stack:\n        file_path, lineno, func_name, line_text = frame\n        call_traceback_proto.origin_stack.traces.add(file_id=_string_to_id(file_path, string_to_id), lineno=lineno, function_id=_string_to_id(func_name, string_to_id), line_id=_string_to_id(line_text, string_to_id))\n    id_to_string = call_traceback_proto.origin_id_to_string\n    for key, value in string_to_id.items():\n        id_to_string[value] = key if key is not None else ''", "docstring": "Format a traceback stack for a `CallTraceback` proto.\n\nArgs:\norigin_stack: The stack list as returned by `traceback.extract_stack()`.\ncall_traceback_proto: A `CallTraceback` proto whose fields are to be\npopulated.", "source": "github-repos"}
{"code": "def update_script_from_item(self, item):\n    (script, path_to_script, script_item) = item.get_script()\n    dictator = list(script_item.to_dict().values())[0]\n    for instrument in list(script.instruments.keys()):\n        script.instruments[instrument]['settings'] = dictator[instrument]['settings']\n        del dictator[instrument]\n    for sub_script_name in list(script.scripts.keys()):\n        sub_script_item = script_item.get_subscript(sub_script_name)\n        self.update_script_from_item(sub_script_item)\n        del dictator[sub_script_name]\n    script.update(dictator)\n    script.data_path = self.gui_settings['data_folder']", "docstring": "updates the script based on the information provided in item\n\nArgs:\nscript: script to be updated\nitem: B26QTreeItem that contains the new settings of the script", "source": "codesearchnet"}
{"code": "def gfortran_search_path(library_dirs):\n    cmd = ('gfortran', '-print-search-dirs')\n    process = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n    return_code = process.wait()\n    if (return_code != 0):\n        return library_dirs\n    cmd_output = process.stdout.read().decode('utf-8')\n    search_lines = cmd_output.strip().split('\\n')\n    library_lines = [line[len(FORTRAN_LIBRARY_PREFIX):] for line in search_lines if line.startswith(FORTRAN_LIBRARY_PREFIX)]\n    if (len(library_lines) != 1):\n        msg = GFORTRAN_MISSING_LIBS.format(cmd_output)\n        print(msg, file=sys.stderr)\n        return library_dirs\n    library_line = library_lines[0]\n    accepted = set(library_dirs)\n    for part in library_line.split(os.pathsep):\n        full_path = os.path.abspath(part.strip())\n        if os.path.isdir(full_path):\n            accepted.add(full_path)\n        else:\n            msg = GFORTRAN_BAD_PATH.format(full_path)\n            print(msg, file=sys.stderr)\n    return sorted(accepted)", "docstring": "Get the library directory paths for ``gfortran``.\n\nLooks for ``libraries: =`` in the output of ``gfortran -print-search-dirs``\nand then parses the paths. If this fails for any reason, this method will\nprint an error and return ``library_dirs``.\n\nArgs:\nlibrary_dirs (List[str]): Existing library directories.\n\nReturns:\nList[str]: The library directories for ``gfortran``.", "source": "codesearchnet"}
{"code": "def get(cls, **kwargs):\n    fields = {}\n    for field in cls.url_fields:\n        value = kwargs.pop(field, None)\n        if (value is None):\n            cls._handle_wrong_field(field, ATTR_TYPE_URL)\n        fields[field] = value\n    model = cls(**fields)\n    model._populate(**kwargs)\n    return model", "docstring": "Retrieve an object by making a GET request to Transifex.\n\nEach value in `kwargs` that corresponds to a field\ndefined in `self.url_fields` will be used in the URL path\nof the request, so that a particular entry of this model\nis identified and retrieved.\n\nRaises:\nAttributeError: if not all values for parameters in `url_fields`\nare passed as kwargs\ntxlib.http.exceptions.NotFoundError: if the object with these\nattributes is not found on the remote server\ntxlib.http.exceptions.ServerError subclass: depending on\nthe particular server response\n\nExample:\n# Note: also catch exceptions\n>>> obj = MyModel.get(attr1=value1, attr2=value2)", "source": "codesearchnet"}
{"code": "def prompt_for_password(url, user=None, default_user=None):\n    \n    if user is None:\n        default_user = default_user or getpass.getuser()\n        while user is None:\n            user = compat.console_input(\n                \"Enter username for {} [{}]: \".format(url, default_user)\n            )\n            if user.strip() == \"\" and default_user:\n                user = default_user\n    if user:\n        pw = getpass.getpass(\n            \"Enter password for {}@{} (Ctrl+C to abort): \".format(user, url)\n        )\n        if pw or pw == \"\":\n            return (user, pw)\n    return None", "docstring": "Prompt for username and password.\n\nIf a user name is passed, only prompt for a password.\nArgs:\nurl (str): hostname\nuser (str, optional):\nPass a valid name to skip prompting for a user name\ndefault_user (str, optional):\nPass a valid name that is used as default when prompting\nfor a user name\nRaises:\nKeyboardInterrupt if user hits Ctrl-C\nReturns:\n(username, password) or None", "source": "juraj-google-style"}
{"code": "async def undo_check_in(self):\n    res = (await self.connection('POST', 'tournaments/{}/participants/{}/undo_check_in'.format(self._tournament_id, self._id)))\n    self._refresh_from_json(res)", "docstring": "Undo the check in for this participant\n\n|methcoro|\n\nWarning:\n|unstable|\n\nRaises:\nAPIException", "source": "codesearchnet"}
{"code": "def get_json_type(obj):\n    if hasattr(obj, 'get_config'):\n        serialized = serialization.serialize_keras_object(obj)\n        serialized['__passive_serialization__'] = True\n        return serialized\n    if type(obj).__module__ == np.__name__:\n        if isinstance(obj, np.ndarray):\n            return obj.tolist()\n        else:\n            return obj.item()\n    if callable(obj):\n        return obj.__name__\n    if type(obj).__name__ == type.__name__:\n        return obj.__name__\n    if tf.available and isinstance(obj, tf.compat.v1.Dimension):\n        return obj.value\n    if tf.available and isinstance(obj, tf.TensorShape):\n        return obj.as_list()\n    if tf.available and isinstance(obj, tf.DType):\n        return obj.name\n    if isinstance(obj, collections.abc.Mapping):\n        return dict(obj)\n    if obj is Ellipsis:\n        return {'class_name': '__ellipsis__'}\n    if tf.available and isinstance(obj, tf.TypeSpec):\n        from tensorflow.python.framework import type_spec_registry\n        try:\n            type_spec_name = type_spec_registry.get_name(type(obj))\n            return {'class_name': 'TypeSpec', 'type_spec': type_spec_name, 'serialized': obj._serialize()}\n        except ValueError:\n            raise ValueError(f'Unable to serialize {obj} to JSON, because the TypeSpec class {type(obj)} has not been registered.')\n    if tf.available and isinstance(obj, tf.__internal__.CompositeTensor):\n        spec = tf.type_spec_from_value(obj)\n        tensors = []\n        for tensor in tf.nest.flatten(obj, expand_composites=True):\n            tensors.append((tensor.dtype.name, tensor.numpy().tolist()))\n        return {'class_name': 'CompositeTensor', 'spec': get_json_type(spec), 'tensors': tensors}\n    if isinstance(obj, enum.Enum):\n        return obj.value\n    if isinstance(obj, bytes):\n        return {'class_name': '__bytes__', 'value': obj.decode('utf-8')}\n    raise TypeError(f'Unable to serialize {obj} to JSON. Unrecognized type {type(obj)}.')", "docstring": "Serializes any object to a JSON-serializable structure.\n\nArgs:\nobj: the object to serialize\n\nReturns:\nJSON-serializable structure representing `obj`.\n\nRaises:\nTypeError: if `obj` cannot be serialized.", "source": "github-repos"}
{"code": "def IsRaised(self):\n\n    class IsRaisedContext(_EmptySubject):\n        \n\n        def __init__(self, actual, get_actual_message):\n            super(IsRaisedContext, self).__init__(actual)\n            self._get_actual_message = get_actual_message\n\n        def __enter__(self):\n            return self\n\n        @asserts_truth\n        def __exit__(self, exc_type, exc, exc_tb):\n            if exc:\n                if issubclass(exc_type, type(self._actual)):\n                    if hasattr(self._actual, 'message'):\n                        AssertThat(exc).HasMessage(self._get_actual_message())\n                    AssertThat(exc).HasArgsThat().ContainsExactlyElementsIn(self._actual.args).InOrder()\n                else:\n                    self._FailWithSubject('should have been raised, but caught <{0!r}>'.format(exc))\n            else:\n                self._Resolve()\n                self._FailWithSubject('should have been raised, but was not')\n            return True\n    return IsRaisedContext(self._actual, self._GetActualMessage)", "docstring": "Asserts that an exception matching this subject is raised.\n\nThe raised exception must be the same type as (or a subclass of) this\nsubject's. The raised exception's \"message\" and \"args\" attributes must\nmatch this subject's exactly. As this is a fairly strict match,\n_ExceptionClassSubject.IsRaised() may be easier to use.\n\nReturns:\nA context within which an expected exception may be raised.", "source": "github-repos"}
{"code": "def date_clean(date, dashboard_style=False):\n    if dashboard_style:\n        dt = str(date)\n        out = ((((dt[4:6] + '/') + dt[6:]) + '/') + dt[:4])\n    else:\n        dt = str(date)\n        out = ((((dt[:4] + '-') + dt[4:6]) + '-') + dt[6:])\n    return out", "docstring": "Clean the numerical date value in order to present it.\n\nArgs:\nboo: numerical date (20160205)\nReturns:\nStringified version of the input date (\"2016-02-05\")", "source": "codesearchnet"}
{"code": "def _finished_callback(self, batch_fut, todo):\n    \n    self._running.remove(batch_fut)\n    err = batch_fut.get_exception()\n    if err is not None:\n      tb = batch_fut.get_traceback()\n      for (fut, _) in todo:\n        if not fut.done():\n          fut.set_exception(err, tb)", "docstring": "Passes exception along.\n\nArgs:\nbatch_fut: the batch future returned by running todo_tasklet.\ntodo: (fut, option) pair. fut is the future return by each add() call.\n\nIf the batch fut was successful, it has already called fut.set_result()\non other individual futs. This method only handles when the batch fut\nencountered an exception.", "source": "juraj-google-style"}
{"code": "def __eq__(self, other):\n        \n        if type(self) is type(other) and \\\n                self.kernel == other.kernel and \\\n                self.discriminator == other.discriminator:\n            return True\n        return False", "docstring": "Two Acquires are the same if they are of the same type\nand have the same kernel and discriminator.\n\nArgs:\nother (Acquire): Other Acquire\n\nReturns:\nbool: are self and other equal.", "source": "juraj-google-style"}
{"code": "class InstructBlipVideoForConditionalGenerationModelOutput(ModelOutput):\n    loss: Optional[Tuple[torch.FloatTensor]] = None\n    logits: Optional[Tuple[torch.FloatTensor]] = None\n    vision_outputs: Optional[torch.FloatTensor] = None\n    qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None\n    language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None\n\n    def to_tuple(self) -> Tuple[Any]:\n        return tuple((self[k] if k not in ['vision_outputs', 'qformer_outputs', 'language_model_outputs'] else getattr(self, k).to_tuple() for k in self.keys()))", "docstring": "Class defining the outputs of [`InstructBlipVideoForConditionalGeneration`].\n\nArgs:\nloss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):\nLanguage modeling loss from the language model.\nlogits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\nPrediction scores of the language modeling head of the language model.\nvision_outputs (`BaseModelOutputWithPooling`):\nOutputs of the vision encoder.\nqformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):\nOutputs of the Q-Former (Querying Transformer).\nlanguage_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):\nOutputs of the language model.", "source": "github-repos"}
{"code": "def convert_to_rgb(self, image: ImageInput) -> ImageInput:\n    return convert_to_rgb(image)", "docstring": "Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image\nas is.\nArgs:\nimage (ImageInput):\nThe image to convert.\n\nReturns:\nImageInput: The converted image.", "source": "github-repos"}
{"code": "def write_gff_file(self, outfile, force_rerun=False):\n    if ssbio.utils.force_rerun(outfile=outfile, flag=force_rerun):\n        with open(outfile, 'w') as out_handle:\n            GFF.write([self], out_handle)\n    self.feature_path = outfile", "docstring": "Write a GFF file for the protein features, ``features`` will now load directly from this file.\n\nArgs:\noutfile (str): Path to new FASTA file to be written to\nforce_rerun (bool): If an existing file should be overwritten", "source": "codesearchnet"}
{"code": "def get_servo_torque(self):\n    data = []\n    data.append(9)\n    data.append(self.servoid)\n    data.append(RAM_READ_REQ)\n    data.append(PWM_RAM)\n    data.append(BYTE2)\n    send_data(data)\n    rxdata = []\n    try:\n        rxdata = SERPORT.read(13)\n        if (ord(rxdata[10]) <= 127):\n            return (((ord(rxdata[10]) & 3) << 8) | (ord(rxdata[9]) & 255))\n        else:\n            return ((((ord(rxdata[10]) - 255) * 255) + (ord(rxdata[9]) & 255)) - 255)\n    except HerkulexError:\n        raise HerkulexError('could not communicate with motors')", "docstring": "Gets the current torque of Herkulex\n\nGives the current load on the servo shaft.\nIt is actually the PWM value to the motors\n\nArgs:\nnone\n\nReturns:\nint: the torque on servo shaft. range from -1023 to 1023\n\nRaises:\nSerialException: Error occured while opening serial port", "source": "codesearchnet"}
{"code": "def __init__(self, model):\n        \n        self._model = model\n\n        \n        if self.ALLOWED_SETTINGS:\n            self.update_settings({setting: self.ALLOWED_SETTINGS[setting][0]\n                                  for setting in self.ALLOWED_SETTINGS})", "docstring": "Create a new instance of this visualization.\n\n`BaseVisualization` is an interface and should only be instantiated via\na subclass.\n\nArgs:\nmodel (:obj:`.models.model.BaseModel`): NN model to be\nvisualized.", "source": "juraj-google-style"}
{"code": "def __init__(self, location):\n        \n        super(MarkLocation, self).__init__(location)\n        self.location = location\n        self.validate()", "docstring": "Create a new MarkLocation at the specified Location.\n\nArgs:\nlocation: Location object, must not be at a property field in the query\n\nReturns:\nnew MarkLocation object", "source": "juraj-google-style"}
{"code": "def get_mail_keys(message, complete=True):\n    \n\n    if complete:\n        log.debug(\"Get all headers\")\n        all_headers_keys = {i.lower() for i in message.keys()}\n        all_parts = ADDRESSES_HEADERS | OTHERS_PARTS | all_headers_keys\n    else:\n        log.debug(\"Get only mains headers\")\n        all_parts = ADDRESSES_HEADERS | OTHERS_PARTS\n\n    log.debug(\"All parts to get: {}\".format(\", \".join(all_parts)))\n    return all_parts", "docstring": "Given an email.message.Message, return a set with all email parts to get\n\nArgs:\nmessage (email.message.Message): email message object\ncomplete (bool): if True returns all email headers\n\nReturns:\nset with all email parts", "source": "juraj-google-style"}
{"code": "def fermi_energy_from_outcar( filename='OUTCAR' ):\n    \n    outcar = open(filename, \"r\").read()\n    \n    fermi_energy = re.search(r\"E-fermi\\s*:\\s*([-.\\d]*)\", outcar)\n    \n    fermi_energy = float(fermi_energy.group(1))\n    return fermi_energy", "docstring": "Finds and returns the fermi energy.\nArgs:\n-filename: the name of the outcar file to be read\n\nReturns:\n(Float): The fermi energy as found in the OUTCAR", "source": "juraj-google-style"}
{"code": "def is_namedtuple(instance, strict=False):\n    return _pywrap_utils.IsNamedtuple(instance, strict)", "docstring": "Returns True iff `instance` is a `namedtuple`.\n\nArgs:\ninstance: An instance of a Python object.\nstrict: If True, `instance` is considered to be a `namedtuple` only if it is\na \"plain\" namedtuple. For instance, a class inheriting from a `namedtuple`\nwill be considered to be a `namedtuple` iff `strict=False`.\n\nReturns:\nTrue if `instance` is a `namedtuple`.", "source": "github-repos"}
{"code": "def DeleteAddress(self, script_hash):\n        \n        coin_keys_toremove = []\n        coins_to_remove = []\n        for key, coinref in self._coins.items():\n            if coinref.Output.ScriptHash.ToBytes() == script_hash.ToBytes():\n                coin_keys_toremove.append(key)\n                coins_to_remove.append(coinref)\n\n        for k in coin_keys_toremove:\n            del self._coins[k]\n\n        ok = False\n        if script_hash.ToBytes() in self._contracts.keys():\n            ok = True\n            del self._contracts[script_hash.ToBytes()]\n        elif script_hash in self._watch_only:\n            ok = True\n            self._watch_only.remove(script_hash)\n\n        return ok, coins_to_remove", "docstring": "Deletes an address from the wallet (includes watch-only addresses).\n\nArgs:\nscript_hash (UInt160): a bytearray (len 20) representing the public key.\n\nReturns:\ntuple:\nbool: True if address removed, False otherwise.\nlist: a list of any ``neo.Wallet.Coin`` objects to be removed from the wallet.", "source": "juraj-google-style"}
{"code": "def load_hgnc_bulk(self, gene_objs):\n        \n\n        LOG.info(\"Loading gene bulk with length %s\", len(gene_objs))\n        try:\n            result = self.hgnc_collection.insert_many(gene_objs)\n        except (DuplicateKeyError, BulkWriteError) as err:\n            raise IntegrityError(err)\n\n        return result", "docstring": "Load a bulk of hgnc gene objects\n\nRaises IntegrityError if there are any write concerns\n\nArgs:\ngene_objs(iterable(scout.models.hgnc_gene))\n\nReturns:\nresult (pymongo.results.InsertManyResult)", "source": "juraj-google-style"}
{"code": "def forward(self, index: int, output: torch.Tensor, multi_stage_features: List[torch.Tensor], multi_stage_positional_embeddings: List[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, query_embeddings: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):\n    level_index = index % self.num_feature_levels\n    attention_mask[torch.where(attention_mask.sum(-1) == attention_mask.shape[-1])] = False\n    output, cross_attn_weights = self.cross_attn(output, multi_stage_features[level_index], memory_mask=attention_mask, memory_key_padding_mask=None, pos=multi_stage_positional_embeddings[level_index], query_pos=query_embeddings)\n    output, self_attn_weights = self.self_attn(output, output_mask=None, output_key_padding_mask=None, query_pos=query_embeddings)\n    output = self.ffn(output)\n    outputs = (output,)\n    if output_attentions:\n        outputs += (self_attn_weights, cross_attn_weights)\n    return outputs", "docstring": "Args:\nindex (`int`): index of the layer in the Transformer decoder.\noutput (`torch.FloatTensor`): the object queries of shape `(N, batch, hidden_dim)`\nmulti_stage_features (`List[torch.Tensor]`): the multi-scale features from the pixel decoder.\nmulti_stage_positional_embeddings (`List[torch.Tensor]`):\npositional embeddings for the multi_stage_features\nattention_mask (`torch.FloatTensor`): attention mask for the masked cross attention layer\nquery_embeddings (`torch.FloatTensor`, *optional*):\nposition embeddings that are added to the queries and keys in the self-attention layer.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"}
{"code": "def _ProcessDirectory(self, mediator, file_entry):\n    self.processing_status = definitions.STATUS_INDICATOR_COLLECTING\n    if self._processing_profiler:\n        self._processing_profiler.StartTiming('collecting')\n    for sub_file_entry in file_entry.sub_file_entries:\n        if self._abort:\n            break\n        try:\n            if (not sub_file_entry.IsAllocated()):\n                continue\n        except dfvfs_errors.BackEndError as exception:\n            warning_message = 'unable to process directory entry: {0:s} with error: {1!s}'.format(sub_file_entry.name, exception)\n            mediator.ProduceExtractionWarning(warning_message, path_spec=file_entry.path_spec)\n            continue\n        if (sub_file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK):\n            if (file_entry.IsRoot() and (sub_file_entry.name == '$OrphanFiles')):\n                continue\n        event_source = event_sources.FileEntryEventSource(path_spec=sub_file_entry.path_spec)\n        stat_object = sub_file_entry.GetStat()\n        if stat_object:\n            event_source.file_entry_type = stat_object.type\n        mediator.ProduceEventSource(event_source)\n        self.last_activity_timestamp = time.time()\n    if self._processing_profiler:\n        self._processing_profiler.StopTiming('collecting')\n    self.processing_status = definitions.STATUS_INDICATOR_RUNNING", "docstring": "Processes a directory file entry.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\nfile_entry (dfvfs.FileEntry): file entry of the directory.", "source": "codesearchnet"}
{"code": "def on_get(self, req, resp, handler=None, **kwargs):\n    self.handle((handler or self.retrieve), req, resp, **kwargs)", "docstring": "Respond on GET HTTP request assuming resource retrieval flow.\n\nThis request handler assumes that GET requests are associated with\nsingle resource instance retrieval. Thus default flow for such requests\nis:\n\n* Retrieve single resource instance of prepare its representation by\ncalling retrieve method handler.\n\nArgs:\nreq (falcon.Request): request object instance.\nresp (falcon.Response): response object instance to be modified\nhandler (method): list method handler to be called. Defaults\nto ``self.list``.\n**kwargs: additional keyword arguments retrieved from url template.", "source": "codesearchnet"}
{"code": "def _updated_config(self):\n    from tensorflow.python.keras import __version__ as keras_version\n    config = self.get_config()\n    model_config = {'class_name': self.__class__.__name__, 'config': config, 'keras_version': keras_version, 'backend': backend.backend()}\n    return model_config", "docstring": "Util shared between different serialization methods.\n\nReturns:\nModel config with Keras version information added.", "source": "github-repos"}
{"code": "def __subject_map__(self, map_iri):\n        \n        subject_map = SimpleNamespace()\n        subject_map_bnode = self.rml.value(\n            subject=map_iri,\n            predicate=NS_MGR.rr.subjectMap.rdflib)\n        if subject_map_bnode is None:\n            return\n        \n        subject_map.class_ = self.rml.value(\n            subject=subject_map_bnode,\n            predicate=getattr(NS_MGR.rr, \"class\").rdflib)\n        subject_map.template = self.rml.value(\n            subject=subject_map_bnode,\n            predicate=NS_MGR.rr.template.rdflib)\n        subject_map.termType = self.rml.value(\n            subject=subject_map_bnode,\n            predicate=NS_MGR.rr.termType.rdflib)\n        subject_map.deduplicate = self.rml.value(\n            subject=subject_map_bnode,\n            predicate=NS_MGR.kds.deduplicate.rdflib)\n        subject_map.reference = self.rml.value(\n            subject=subject_map_bnode,\n            predicate=NS_MGR.rr.reference.rdflib)\n        return subject_map", "docstring": "Creates a SimpleNamespace for the TripleMap's subjectMap and\npopulates properties from the RML RDF graph\n\nArgs:\n\n-----\nmap_iri: rdflib.URIRef,TripleMap IRI\n\nReturns:\n\n--------\nSimpleNamespace", "source": "juraj-google-style"}
{"code": "def _define_loop(graph, logdir, train_steps, eval_steps):\n  \n  loop = tools.Loop(\n      logdir, graph.step, graph.should_log, graph.do_report,\n      graph.force_reset)\n  loop.add_phase(\n      'train', graph.done, graph.score, graph.summary, train_steps,\n      report_every=train_steps,\n      log_every=train_steps \n      checkpoint_every=None,\n      feed={graph.is_training: True})\n  loop.add_phase(\n      'eval', graph.done, graph.score, graph.summary, eval_steps,\n      report_every=eval_steps,\n      log_every=eval_steps \n      checkpoint_every=10 * eval_steps,\n      feed={graph.is_training: False})\n  return loop", "docstring": "Create and configure a training loop with training and evaluation phases.\n\nArgs:\ngraph: Object providing graph elements via attributes.\nlogdir: Log directory for storing checkpoints and summaries.\ntrain_steps: Number of training steps per epoch.\neval_steps: Number of evaluation steps per epoch.\n\nReturns:\nLoop object.", "source": "juraj-google-style"}
{"code": "def mac_hex_to_ascii(mac_hex, inc_dots):\n        \n        v = mac_hex[2:]\n        ret = ''\n        for i in range(0, len(v), 4):\n            ret += v[i:i+4]\n            if ((inc_dots) & ((i+4) < len(v))):\n                ret += '.'\n\n        return ret", "docstring": "Format a hex MAC string to ASCII\n\nArgs:\nmac_hex:    Value from SNMP\ninc_dots:   1 to format as aabb.ccdd.eeff, 0 to format aabbccddeeff\n\nReturns:\nString representation of the mac_hex", "source": "juraj-google-style"}
{"code": "def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]:\n    return missing_keys", "docstring": "Override this method if you want to adjust the `missing_keys`.\n\nArgs:\nmissing_keys (`List[str]`, *optional*):\nThe list of missing keys in the checkpoint compared to the state dict of the model", "source": "github-repos"}
{"code": "def merge(self, workdir, gswfk_file, dfpt_files, gkk_files, out_gkk, binascii=0):\n    raise NotImplementedError('This method should be tested')\n    gswfk_file = os.path.absath(gswfk_file)\n    dfpt_files = [os.path.abspath(s) for s in list_strings(dfpt_files)]\n    gkk_files = [os.path.abspath(s) for s in list_strings(gkk_files)]\n    print(('Will merge %d 1WF files, %d GKK file in output %s' % (len(dfpt_files), len(gkk_files), out_gkk)))\n    if self.verbose:\n        for (i, f) in enumerate(dfpt_files):\n            print((' [%d] 1WF %s' % (i, f)))\n        for (i, f) in enumerate(gkk_files):\n            print((' [%d] GKK %s' % (i, f)))\n    (self.stdin_fname, self.stdout_fname, self.stderr_fname) = map(os.path.join, (3 * [workdir]), ['mrggkk.stdin', 'mrggkk.stdout', 'mrggkk.stderr'])\n    inp = StringIO()\n    inp.write((out_gkk + '\\n'))\n    inp.write((str(binascii) + '\\n'))\n    inp.write((gswfk_file + '\\n'))\n    dims = ' '.join([str(d) for d in dims])\n    inp.write((dims + '\\n'))\n    for fname in dfpt_files:\n        inp.write((fname + '\\n'))\n    for fname in gkk_files:\n        inp.write((fname + '\\n'))\n    self.stdin_data = [s for s in inp.getvalue()]\n    with open(self.stdin_fname, 'w') as fh:\n        fh.writelines(self.stdin_data)\n        fh.flush()\n        os.fsync(fh.fileno())\n    self.execute(workdir)\n    return out_gkk", "docstring": "Merge GGK files, return the absolute path of the new database.\n\nArgs:\ngswfk_file: Ground-state WFK filename\ndfpt_files: List of 1WFK files to merge.\ngkk_files: List of GKK files to merge.\nout_gkk: Name of the output GKK file\nbinascii: Integer flat. 0 --> binary output, 1 --> ascii formatted output", "source": "codesearchnet"}
{"code": "def replaceWith(self, el):\n    self.childs = el.childs\n    self.params = el.params\n    self.endtag = el.endtag\n    self.openertag = el.openertag\n    self._tagname = el.getTagName()\n    self._element = el.tagToString()\n    self._istag = el.isTag()\n    self._isendtag = el.isEndTag()\n    self._iscomment = el.isComment()\n    self._isnonpairtag = el.isNonPairTag()", "docstring": "Replace value in this element with values from `el`.\n\nThis useful when you don't want change all references to object.\n\nArgs:\nel (obj): :class:`HTMLElement` instance.", "source": "codesearchnet"}
{"code": "def post_state(self, name, state):\n    self.post_command(OPERATIONS.CMD_UPDATE_STATE, {'name': name, 'new_status': state})", "docstring": "Asynchronously try to update the state for a service.\n\nIf the update fails, nothing is reported because we don't wait for a\nresponse from the server.  This function will return immmediately and\nnot block.\n\nArgs:\nname (string): The name of the service\nstate (int): The new state of the service", "source": "codesearchnet"}
{"code": "def diff_packages(pkg1, pkg2=None):\n    \n    if pkg2 is None:\n        it = iter_packages(pkg1.name)\n        pkgs = [x for x in it if x.version < pkg1.version]\n        if not pkgs:\n            raise RezError(\"No package to diff with - %s is the earliest \"\n                           \"package version\" % pkg1.qualified_name)\n        pkgs = sorted(pkgs, key=lambda x: x.version)\n        pkg2 = pkgs[-1]\n\n    def _check_pkg(pkg):\n        if not (pkg.vcs and pkg.revision):\n            raise RezError(\"Cannot diff package %s: it is a legacy format \"\n                           \"package that does not contain enough information\"\n                           % pkg.qualified_name)\n\n    _check_pkg(pkg1)\n    _check_pkg(pkg2)\n    path = mkdtemp(prefix=\"rez-pkg-diff\")\n    paths = []\n\n    for pkg in (pkg1, pkg2):\n        print \"Exporting %s...\" % pkg.qualified_name\n        path_ = os.path.join(path, pkg.qualified_name)\n        vcs_cls_1 = plugin_manager.get_plugin_class(\"release_vcs\", pkg1.vcs)\n        vcs_cls_1.export(revision=pkg.revision, path=path_)\n        paths.append(path_)\n\n    difftool = config.difftool\n    print \"Opening diff viewer %s...\" % difftool\n    proc = Popen([difftool] + paths)\n    proc.wait()", "docstring": "Invoke a diff editor to show the difference between the source of two\npackages.\n\nArgs:\npkg1 (`Package`): Package to diff.\npkg2 (`Package`): Package to diff against. If None, the next most recent\npackage version is used.", "source": "juraj-google-style"}
{"code": "def find_log_dir(log_dir=None):\n    if log_dir:\n        dirs = [log_dir]\n    elif FLAGS['log_dir'].value:\n        dirs = [FLAGS['log_dir'].value]\n    else:\n        dirs = ['/tmp/', './']\n    for d in dirs:\n        if (os.path.isdir(d) and os.access(d, os.W_OK)):\n            return d\n    _absl_logger.fatal(\"Can't find a writable directory for logs, tried %s\", dirs)", "docstring": "Returns the most suitable directory to put log files into.\n\nArgs:\nlog_dir: str|None, if specified, the logfile(s) will be created in that\ndirectory.  Otherwise if the --log_dir command-line flag is provided,\nthe logfile will be created in that directory.  Otherwise the logfile\nwill be created in a standard location.", "source": "codesearchnet"}
{"code": "def append(self, species, coords, coords_are_cartesian=False, validate_proximity=False, properties=None):\n    return self.insert(len(self), species, coords, coords_are_cartesian=coords_are_cartesian, validate_proximity=validate_proximity, properties=properties)", "docstring": "Append a site to the structure.\n\nArgs:\nspecies: Species of inserted site\ncoords (3x1 array): Coordinates of inserted site\ncoords_are_cartesian (bool): Whether coordinates are cartesian.\nDefaults to False.\nvalidate_proximity (bool): Whether to check if inserted site is\ntoo close to an existing site. Defaults to False.\nproperties (dict): Properties of the site.\n\nReturns:\nNew structure with inserted site.", "source": "codesearchnet"}
{"code": "def kill_redis(self, check_alive=True):\n    self._kill_process_type(ray_constants.PROCESS_TYPE_REDIS_SERVER, check_alive=check_alive)", "docstring": "Kill the Redis servers.\n\nArgs:\ncheck_alive (bool): Raise an exception if any of the processes\nwere already dead.", "source": "codesearchnet"}
{"code": "def _get_definitions(source):\n    \n    \n    max_len = 0\n    descs = collections.OrderedDict()  \n    lines = (s.strip() for s in source.splitlines())\n    non_empty_lines = (s for s in lines if s)\n    for line in non_empty_lines:\n        if line:\n            arg, desc = re.split(r'\\s\\s+', line.strip())\n            arg_len = len(arg)\n            if arg_len > max_len:\n                max_len = arg_len\n            descs[arg] = desc\n    return descs, max_len", "docstring": "Extract a dictionary of arguments and definitions.\n\nArgs:\nsource: The source for a section of a usage string that contains\ndefinitions.\n\nReturns:\nA two-tuple containing a dictionary of all arguments and definitions as\nwell as the length of the longest argument.", "source": "juraj-google-style"}
{"code": "def ChangeScaleFactor(self, newfactor):\n    if ((float(newfactor) > 0) and (float(newfactor) < self._MAX_ZOOM)):\n        self._zoomfactor = newfactor", "docstring": "Changes the zoom of the graph manually.\n\n1.0 is the original canvas size.\n\nArgs:\n# float value between 0.0 and 5.0\nnewfactor: 0.7", "source": "codesearchnet"}
{"code": "def field(self, field_name, boost=1, extractor=None):\n    if ('/' in field_name):\n        raise ValueError('Field {} contains illegal character `/`')\n    self._fields[field_name] = Field(field_name, boost, extractor)", "docstring": "Adds a field to the list of document fields that will be indexed.\n\nEvery document being indexed should have this field. None values for\nthis field in indexed documents will not cause errors but will limit\nthe chance of that document being retrieved by searches.\n\nAll fields should be added before adding documents to the index. Adding\nfields after a document has been indexed will have no effect on already\nindexed documents.\n\nFields can be boosted at build time. This allows terms within that\nfield to have more importance on search results. Use a field boost to\nspecify that matches within one field are more important that other\nfields.\n\nArgs:\nfield_name (str): Name of the field to be added, must not include\na forward slash '/'.\nboost (int): Optional boost factor to apply to field.\nextractor (callable): Optional function to extract a field from\nthe document.\n\nRaises:\nValueError: If the field name contains a `/`.", "source": "codesearchnet"}
{"code": "def strip_cdata(text):\n    if (not is_cdata(text)):\n        return text\n    xml = '<e>{0}</e>'.format(text)\n    node = etree.fromstring(xml)\n    return node.text", "docstring": "Removes all CDATA blocks from `text` if it contains them.\n\nNote:\nIf the function contains escaped XML characters outside of a\nCDATA block, they will be unescaped.\n\nArgs:\nA string containing one or more CDATA blocks.\n\nReturns:\nAn XML unescaped string with CDATA block qualifiers removed.", "source": "codesearchnet"}
{"code": "def from_class(cls, target_class):\n    module_name = target_class.__module__\n    class_name = target_class.__name__\n    return cls(module_name, '__init__', class_name)", "docstring": "Create a FunctionDescriptor from a class.\n\nArgs:\ncls: Current class which is required argument for classmethod.\ntarget_class: the python class used to create the function\ndescriptor.\n\nReturns:\nThe FunctionDescriptor instance created according to the class.", "source": "codesearchnet"}
{"code": "def transmute_sites( self, old_site_label, new_site_label, n_sites_to_change ):\n        \n        selected_sites = self.select_sites( old_site_label )\n        for site in random.sample( selected_sites, n_sites_to_change ):\n            site.label = new_site_label\n        self.site_labels = set( [ site.label for site in self.sites ] )", "docstring": "Selects a random subset of sites with a specific label and gives them a different label.\n\nArgs:\nold_site_label (String or List(String)): Site label(s) of the sites to be modified..\nnew_site_label (String):                 Site label to be applied to the modified sites.\nn_sites_to_change (Int):                 Number of sites to modify.\n\nReturns:\nNone", "source": "juraj-google-style"}
{"code": "def open_remote_url(urls, **kwargs):\n    \n    if isinstance(urls, str):\n        urls = [urls]\n    for url in urls:\n        try:\n            web_file = requests.get(url, stream=True, **kwargs)\n            if 'html' in web_file.headers['content-type']:\n                raise ValueError(\"HTML source file retrieved.\")\n            return web_file\n        except Exception as ex:\n            logger.error('Fail to open remote url - {}'.format(ex))\n            continue", "docstring": "Open the url and check that it stores a file.\nArgs:\n:urls: Endpoint to take the file", "source": "juraj-google-style"}
{"code": "def load(cls, campaign_dir):\n        \n\n        \n        if not Path(campaign_dir).is_absolute():\n            raise ValueError(\"Path is not absolute\")\n\n        \n        if not Path(campaign_dir).exists():\n            raise ValueError(\"Directory does not exist\")\n\n        \n        filename = \"%s.json\" % os.path.split(campaign_dir)[1]\n        filepath = os.path.join(campaign_dir, filename)\n\n        try:\n            \n            tinydb = TinyDB(filepath)\n\n            \n            assert set(\n                tinydb.table('config').all()[0].keys()) == set(['script',\n                                                                'params',\n                                                                'commit'])\n        except:\n            \n            os.remove(filepath)\n            raise ValueError(\"Specified campaign directory seems corrupt\")\n\n        return cls(tinydb, campaign_dir)", "docstring": "Initialize from an existing database.\n\nIt is assumed that the database json file has the same name as its\ncontaining folder.\n\nArgs:\ncampaign_dir (str): The path to the campaign directory.", "source": "juraj-google-style"}
{"code": "def extract_output_file_path(args):\n    if args and args[-1].endswith('>'):\n        raise SyntaxError('Redirect file path is empty')\n    elif args and args[-1].startswith('>'):\n        try:\n            _parse_interval(args[-1])\n            if len(args) > 1 and args[-2].startswith('-'):\n                output_file_path = None\n            else:\n                output_file_path = args[-1][1:]\n                args = args[:-1]\n        except ValueError:\n            output_file_path = args[-1][1:]\n            args = args[:-1]\n    elif len(args) > 1 and args[-2] == '>':\n        output_file_path = args[-1]\n        args = args[:-2]\n    elif args and args[-1].count('>') == 1:\n        gt_index = args[-1].index('>')\n        if gt_index > 0 and args[-1][gt_index - 1] == '=':\n            output_file_path = None\n        else:\n            output_file_path = args[-1][gt_index + 1:]\n            args[-1] = args[-1][:gt_index]\n    elif len(args) > 1 and args[-2].endswith('>'):\n        output_file_path = args[-1]\n        args = args[:-1]\n        args[-1] = args[-1][:-1]\n    else:\n        output_file_path = None\n    return (args, output_file_path)", "docstring": "Extract output file path from command arguments.\n\nArgs:\nargs: (list of str) command arguments.\n\nReturns:\n(list of str) Command arguments with the output file path part stripped.\n(str or None) Output file path (if any).\n\nRaises:\nSyntaxError: If there is no file path after the last \">\" character.", "source": "github-repos"}
{"code": "def _send_script(self, client, uuid, chunk, key, chunk_status):\n    conn_id = self._validate_connection('send_script', uuid, key)\n    if (conn_id is None):\n        return\n    conn_data = self._connections[uuid]\n    conn_data['last_touch'] = monotonic()\n    slug = self._build_device_slug(uuid)\n    (index, count) = chunk_status\n    if (index == 0):\n        conn_data['script'] = bytes()\n    conn_data['script'] += chunk\n    if (index != (count - 1)):\n        return\n    conn_data['last_progress'] = None\n    try:\n        resp = (yield self._manager.send_script(conn_id, conn_data['script'], (lambda x, y: self._notify_progress_async(uuid, client, x, y))))\n        (yield None)\n        conn_data['script'] = bytes()\n    except Exception as exc:\n        self._logger.exception('Error in manager send_script')\n        resp = {'success': False, 'reason': ('Internal error: %s' % str(exc))}\n    payload = {'client': client, 'type': 'response', 'operation': 'send_script', 'success': resp['success']}\n    if (resp['success'] is False):\n        payload['failure_reason'] = resp['reason']\n    self._publish_response(slug, payload)", "docstring": "Send a script to the connected device.\n\nArgs:\nclient (string): The client that sent the rpc request\nuuid (int): The id of the device we're opening the interface on\nchunk (bytes): The binary script to send to the device\nkey (string): The key to authenticate the caller\nlast_chunk (tuple): the chunk index and count of chunks of this script\nso that we know to either accumulate it or send it on to the device\nimmediately.", "source": "codesearchnet"}
{"code": "def _check_create_file_writer_args(inside_function, **kwargs):\n    for arg_name, arg in kwargs.items():\n        if not isinstance(arg, ops.EagerTensor) and tensor_util.is_tf_type(arg):\n            if inside_function:\n                raise ValueError(f\"Invalid graph Tensor argument '{arg_name}={arg}' to create_file_writer() inside an @tf.function. The create call will be lifted into the outer eager execution context, so it cannot consume graph tensors defined inside the function body.\")\n            else:\n                raise ValueError(f\"Invalid graph Tensor argument '{arg_name}={arg}' to eagerly executed create_file_writer().\")", "docstring": "Helper to check the validity of arguments to a create_file_writer() call.\n\nArgs:\ninside_function: whether the create_file_writer() call is in a tf.function\n**kwargs: the arguments to check, as kwargs to give them names.\n\nRaises:\nValueError: if the arguments are graph tensors.", "source": "github-repos"}
{"code": "def authenticate(self, request, email=None, password=None, username=None):\n    email = (email or username)\n    try:\n        email_instance = models.EmailAddress.objects.get(is_verified=True, email=email)\n    except models.EmailAddress.DoesNotExist:\n        return None\n    user = email_instance.user\n    if user.check_password(password):\n        return user\n    return None", "docstring": "Attempt to authenticate a set of credentials.\n\nArgs:\nrequest:\nThe request associated with the authentication attempt.\nemail:\nThe user's email address.\npassword:\nThe user's password.\nusername:\nAn alias for the ``email`` field. This is provided for\ncompatability with Django's built in authentication\nviews.\n\nReturns:\nThe user associated with the provided credentials if they\nare valid. Returns ``None`` otherwise.", "source": "codesearchnet"}
{"code": "async def _location_auth_protect(self, location):\n    netloc_sans_port = self.host.split(':')[0]\n    netloc_sans_port = netloc_sans_port.replace(re.match(_WWX_MATCH, netloc_sans_port)[0], '')\n    base_domain = '.'.join(netloc_sans_port.split('.')[(- 2):])\n    (l_scheme, l_netloc, _, _, _, _) = urlparse(location)\n    location_sans_port = l_netloc.split(':')[0]\n    location_sans_port = location_sans_port.replace(re.match(_WWX_MATCH, location_sans_port)[0], '')\n    location_domain = '.'.join(location_sans_port.split('.')[(- 2):])\n    if (base_domain == location_domain):\n        if (l_scheme < self.scheme):\n            return False\n        else:\n            return True", "docstring": "Checks to see if the new location is\n1. The same top level domain\n2. As or more secure than the current connection type\n\nReturns:\nTrue (bool): If the current top level domain is the same\nand the connection type is equally or more secure.\nFalse otherwise.", "source": "codesearchnet"}
{"code": "def search(self, query_string):\n        \n        query = self.create_query()\n        \n        parser = QueryParser(query_string, query)\n        parser.parse()\n        return self.query(query)", "docstring": "Performs a search against the index using lunr query syntax.\n\nResults will be returned sorted by their score, the most relevant\nresults will be returned first.\n\nFor more programmatic querying use `lunr.Index.query`.\n\nArgs:\nquery_string (str): A string to parse into a Query.\n\nReturns:\ndict: Results of executing the query.", "source": "juraj-google-style"}
{"code": "def __init__(self, closure, type_spec):\n    self._closure = closure\n    self._type_spec = type_spec\n    self._values = None\n    self._has_fetched_to_local = False\n    self._has_fetched_to_local_lock = threading.Lock()\n    self._fetched_tensors = None\n    self._error = None\n    self._status_available_event = threading.Event()\n    self._status = remote_value.RemoteValueStatus.NOT_READY", "docstring": "Initializes a `RemoteValueImpl`.\n\nArgs:\nclosure: The closure from which the `RemoteValue` is created.\ntype_spec: The type spec for this `RemoteValue` which is used to trace\nfunctions that take this `RemoteValue` as input.", "source": "github-repos"}
{"code": "def strip_html_tags(text, allowed_tags=None):\n    \n    if text is None:\n        return\n    if allowed_tags is None:\n        allowed_tags = ALLOWED_TAGS\n    return bleach.clean(text, tags=allowed_tags, attributes=['id', 'class', 'style', 'href', 'title'], strip=True)", "docstring": "Strip all tags from a string except those tags provided in `allowed_tags` parameter.\n\nArgs:\ntext (str): string to strip html tags from\nallowed_tags (list): allowed list of html tags\n\nReturns: a string without html tags", "source": "juraj-google-style"}
{"code": "def _identify_eds_ing(first, second):\n    A = set([first.L, first.R])\n    A.update(first.D)\n    B = set([second.L, second.R])\n    B.update(second.D)\n    depend_set = (A & B)\n    (left, right) = sorted(list((A ^ B)))\n    return (left, right, depend_set)", "docstring": "Find nodes connecting adjacent edges.\n\nArgs:\nfirst(Edge): Edge object representing the first edge.\nsecond(Edge): Edge object representing the second edge.\n\nReturns:\ntuple[int, int, set[int]]: The first two values represent left and right node\nindicies of the new edge. The third value is the new dependence set.", "source": "codesearchnet"}
{"code": "def get_mysql_vars(mysql: str,\n                   host: str,\n                   port: int,\n                   user: str) -> Dict[str, str]:\n    \n    cmdargs = [\n        mysql,\n        \"-h\", host,\n        \"-P\", str(port),\n        \"-e\", \"SHOW VARIABLES; SHOW STATUS\",\n        \"-u\", user,\n        \"-p\"  \n    ]\n    log.info(\"Connecting to MySQL with user: {}\", user)\n    log.debug(cmdargs)\n    process = subprocess.Popen(cmdargs, stdout=subprocess.PIPE)\n    out, err = process.communicate()\n    lines = out.decode(\"utf8\").splitlines()\n    mysqlvars = {}\n    for line in lines:\n        var, val = line.split(\"\\t\")\n        mysqlvars[var] = val\n    return mysqlvars", "docstring": "Asks MySQL for its variables and status.\n\nArgs:\nmysql: ``mysql`` executable filename\nhost: host name\nport: TCP/IP port number\nuser: username\n\nReturns:\ndictionary of MySQL variables/values", "source": "juraj-google-style"}
{"code": "def bot_intent(self) -> 'IntentAPI':\n    if (not self._bot_intent):\n        self._bot_intent = IntentAPI(self.bot_mxid, self, state_store=self.state_store, log=self.intent_log)\n    return self._bot_intent", "docstring": "Get the intent API for the appservice bot.\n\nReturns:\nThe IntentAPI for the appservice bot.", "source": "codesearchnet"}
{"code": "def radar_xsect(scatterer, h_pol=True):\n    Z = scatterer.get_Z()\n    if h_pol:\n        return ((2 * np.pi) * (((Z[(0, 0)] - Z[(0, 1)]) - Z[(1, 0)]) + Z[(1, 1)]))\n    else:\n        return ((2 * np.pi) * (((Z[(0, 0)] + Z[(0, 1)]) + Z[(1, 0)]) + Z[(1, 1)]))", "docstring": "Radar cross section for the current setup.\n\nArgs:\nscatterer: a Scatterer instance.\nh_pol: If True (default), use horizontal polarization.\nIf False, use vertical polarization.\n\nReturns:\nThe radar cross section.", "source": "codesearchnet"}
{"code": "def status_update(self, crits_id, crits_type, status):\n        \n        obj_type = self._type_translation(crits_type)\n        patch_url = \"{0}/{1}/{2}/\".format(self.url, obj_type, crits_id)\n        params = {\n            'api_key': self.api_key,\n            'username': self.username,\n        }\n\n        data = {\n            'action': 'status_update',\n            'value': status,\n        }\n\n        r = requests.patch(patch_url, params=params, data=data,\n                           verify=self.verify, proxies=self.proxies)\n        if r.status_code == 200:\n            log.debug('Object {} set to {}'.format(crits_id, status))\n            return True\n        else:\n            log.error('Attempted to set object id {} to '\n                      'Informational, but did not receive a '\n                      '200'.format(crits_id))\n            log.error('Error message was: {}'.format(r.text))\n        return False", "docstring": "Update the status of the TLO. By default, the options are:\n- New\n- In Progress\n- Analyzed\n- Deprecated\n\nArgs:\ncrits_id: The object id of the TLO\ncrits_type: The type of TLO. This must be 'Indicator', ''\nstatus: The status to change.\nReturns:\nTrue if the status was updated. False otherwise.\nRaises:\nCRITsInvalidTypeError", "source": "juraj-google-style"}
{"code": "def comments_2(self, value=None):\n        \n        if value is not None:\n            try:\n                value = str(value)\n            except ValueError:\n                raise ValueError('value {} need to be of type str '\n                                 'for field `comments_2`'.format(value))\n            if ',' in value:\n                raise ValueError('value should not contain a comma '\n                                 'for field `comments_2`')\n\n        self._comments_2 = value", "docstring": "Corresponds to IDD Field `comments_2`\n\nArgs:\nvalue (str): value for IDD Field `comments_2`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"}
{"code": "def error_message():\n    sys.stderr.write('valid commands:\\n')\n    for cmd in get_valid_commands():\n        sys.stderr.write(('\\t%s\\n' % cmd))\n    return (- 1)", "docstring": "Writes out error message specifying the valid commands.\n\nReturns:\nFailure code for system exit", "source": "codesearchnet"}
{"code": "def configure_plugin(self, name, options):\n        \n        url = self._url('/plugins/{0}/set', name)\n        data = options\n        if isinstance(data, dict):\n            data = ['{0}={1}'.format(k, v) for k, v in six.iteritems(data)]\n        res = self._post_json(url, data=data)\n        self._raise_for_status(res)\n        return True", "docstring": "Configure a plugin.\n\nArgs:\nname (string): The name of the plugin. The ``:latest`` tag is\noptional, and is the default if omitted.\noptions (dict): A key-value mapping of options\n\nReturns:\n``True`` if successful", "source": "juraj-google-style"}
{"code": "def concept_distance(c1, c2):\n    \n    \n    \n    \n    cause_purview = tuple(set(c1.cause.purview + c2.cause.purview))\n    effect_purview = tuple(set(c1.effect.purview + c2.effect.purview))\n    \n    return (repertoire_distance(c1.expand_cause_repertoire(cause_purview),\n                                c2.expand_cause_repertoire(cause_purview)) +\n            repertoire_distance(c1.expand_effect_repertoire(effect_purview),\n                                c2.expand_effect_repertoire(effect_purview)))", "docstring": "Return the distance between two concepts in concept space.\n\nArgs:\nc1 (Concept): The first concept.\nc2 (Concept): The second concept.\n\nReturns:\nfloat: The distance between the two concepts in concept space.", "source": "juraj-google-style"}
{"code": "def send(self, response):\n        \n        self._connection.connection.set('{}:{}'.format(SIGNAL_REDIS_PREFIX, response.uid),\n                                        pickle.dumps(response))", "docstring": "Send a response back to the client that issued a request.\n\nArgs:\nresponse (Response): Reference to the response object that should be sent.", "source": "juraj-google-style"}
{"code": "def insert(self, iterable, index=0, data=None, weight=1.0):\n        \n        if index == len(iterable):\n            self.is_terminal = True\n            self.key = iterable\n            self.weight = weight\n            if data:\n                self.data.add(data)\n        else:\n            if iterable[index] not in self.children:\n                self.children[iterable[index]] = TrieNode()\n            self.children[iterable[index]].insert(iterable, index + 1, data)", "docstring": "Insert new node into tree\n\nArgs:\niterable(hashable): key used to find in the future.\ndata(object): data associated with the key\nindex(int): an index used for insertion.\nweight(float): the wait given for the item added.", "source": "juraj-google-style"}
{"code": "def isset(alias_name):\n    warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2)\n    raw_value = read(alias_name, allow_none=True)\n    if raw_value:\n        if re.compile('.+:\n            return True\n        else:\n            warnings.warn('\"{0}_PORT={1}\" does not look like a docker link.'.format(alias_name, raw_value), stacklevel=2)\n            return False\n    return False", "docstring": "Return a boolean if the docker link is set or not and is a valid looking docker link value.\n\nArgs:\nalias_name: The link alias name", "source": "codesearchnet"}
{"code": "def decompress(self, value: LocalizedValue) -> List[str]:\n        \n\n        result = []\n        for lang_code, _ in settings.LANGUAGES:\n            if value:\n                result.append(value.get(lang_code))\n            else:\n                result.append(None)\n\n        return result", "docstring": "Decompresses the specified value so\nit can be spread over the internal widgets.\n\nArguments:\nvalue:\nThe :see:LocalizedValue to display in this\nwidget.\n\nReturns:\nAll values to display in the inner widgets.", "source": "juraj-google-style"}
{"code": "def _wait_for_any_job(provider, job_ids, poll_interval):\n  \n  if not job_ids:\n    return\n  while True:\n    tasks = provider.lookup_job_tasks({'*'}, job_ids=job_ids)\n    running_jobs = set()\n    failed_jobs = set()\n    for t in tasks:\n      status = t.get_field('task-status')\n      job_id = t.get_field('job-id')\n      if status in ['FAILURE', 'CANCELED']:\n        failed_jobs.add(job_id)\n      if status == 'RUNNING':\n        running_jobs.add(job_id)\n    remaining_jobs = running_jobs.difference(failed_jobs)\n    if failed_jobs or len(remaining_jobs) != len(job_ids):\n      return remaining_jobs\n    SLEEP_FUNCTION(poll_interval)", "docstring": "Waits until any of the listed jobs is not running.\n\nIn particular, if any of the jobs sees one of its tasks fail,\nwe count the whole job as failing (but do not terminate the remaining\ntasks ourselves).\n\nArgs:\nprovider: job service provider\njob_ids: a list of job IDs (string) to wait for\npoll_interval: integer seconds to wait between iterations\n\nReturns:\nA set of the jobIDs with still at least one running task.", "source": "juraj-google-style"}
{"code": "def goto(directory, create=False):\n    \n\n    current = os.getcwd()\n    directory = os.path.abspath(directory)\n\n    if os.path.isdir(directory) or (create and mkdir(directory)):\n        logger.info(\"goto -> %s\", directory)\n        os.chdir(directory)\n        try:\n            yield True\n        finally:\n            logger.info(\"goto <- %s\", directory)\n            os.chdir(current)\n    else:\n        logger.info(\n            \"goto(%s) - directory does not exist, or cannot be \" \"created.\",\n            directory,\n        )\n        yield False", "docstring": "Context object for changing directory.\n\nArgs:\ndirectory (str): Directory to go to.\ncreate (bool): Create directory if it doesn't exists.\n\nUsage::\n\n>>> with goto(directory) as ok:\n...     if not ok:\n...         print 'Error'\n...     else:\n...         print 'All OK'", "source": "juraj-google-style"}
{"code": "def remove_child(self, child):\n    if ((child in self.children.values()) and hasattr(child, 'identifier')):\n        for k in self.children.keys():\n            if hasattr(self.children[k], 'identifier'):\n                if (self.children[k].identifier == child.identifier):\n                    if (k in self._render_children_list):\n                        self._render_children_list.remove(k)\n                    self.children.pop(k)\n                    break", "docstring": "Removes a child instance from the Tag's children.\n\nArgs:\nchild (Tag): The child to be removed.", "source": "codesearchnet"}
{"code": "def add_migrations(self, migrations):\n        \n        if self.__closed:\n            raise MigrationSessionError(\"Can't change applied session\")\n        self._to_apply.extend(migrations)", "docstring": "Add migrations to be applied.\n\nArgs:\nmigrations: a list of migrations to add of the form [(app, migration_name), ...]\nRaises:\nMigrationSessionError if called on a closed MigrationSession", "source": "juraj-google-style"}
{"code": "def booleans_processing(config, **kwargs):\n    final_booleans = {}\n    if 'output_attentions' in kwargs:\n        final_booleans['output_attentions'] = kwargs['output_attentions'] if kwargs['output_attentions'] is not None else config.output_attentions\n    final_booleans['output_hidden_states'] = kwargs['output_hidden_states'] if kwargs['output_hidden_states'] is not None else config.output_hidden_states\n    final_booleans['return_dict'] = kwargs['return_dict'] if kwargs['return_dict'] is not None else config.return_dict\n    if 'use_cache' in kwargs:\n        final_booleans['use_cache'] = kwargs['use_cache'] if kwargs['use_cache'] is not None else getattr(config, 'use_cache', None)\n    return final_booleans", "docstring": "Process the input booleans of each model.\n\nArgs:\nconfig ([`PretrainedConfig`]):\nThe config of the running model.\n**kwargs:\nThe boolean parameters\n\nReturns:\nA dictionary with the proper values for each boolean", "source": "github-repos"}
{"code": "def __init__(self, isbn):\n        \n        super(Isbn, self).__init__()\n        self._isbn = isbn\n        if len(isbn) in (9, 12):\n            self.isbn = _isbn_cleanse(isbn, False)\n        else:\n            self.isbn = _isbn_cleanse(isbn)", "docstring": "Initialise a new ``Isbn`` object.\n\nArgs:\nisbn (str): ISBN string", "source": "juraj-google-style"}
{"code": "def AddUserAccount(self, user_account, session_identifier=CURRENT_SESSION):\n    if (session_identifier not in self._user_accounts):\n        self._user_accounts[session_identifier] = {}\n    user_accounts = self._user_accounts[session_identifier]\n    if (user_account.identifier in user_accounts):\n        raise KeyError('User account: {0:s} already exists.'.format(user_account.identifier))\n    user_accounts[user_account.identifier] = user_account", "docstring": "Adds an user account.\n\nArgs:\nuser_account (UserAccountArtifact): user account artifact.\nsession_identifier (Optional[str])): session identifier, where\nCURRENT_SESSION represents the active session.\n\nRaises:\nKeyError: if the user account already exists.", "source": "codesearchnet"}
{"code": "def get_file_type(variant_source):\n    \n    file_type = 'unknown'\n    valid_vcf_suffixes = ('.vcf', '.vcf.gz')\n    if variant_source:\n        logger.debug(\"Check file type with file: {0}\".format(variant_source))\n        if variant_source.endswith('.db'):\n            file_type = 'gemini'\n            logger.debug(\"File {0} is a gemini database\".format(variant_source))\n        elif variant_source.endswith(valid_vcf_suffixes):\n            file_type = 'vcf'\n            logger.debug(\"File {0} is a vcf\".format(variant_source))\n        else:\n            logger.debug(\"File is in a unknown format\")\n    \n    return file_type", "docstring": "Check what kind of file variant source is\n\nArgs:\nvariant_source (str): Path to variant source\n\nReturns:\nfile_type (str): 'vcf', 'gemini' or 'unknown'", "source": "juraj-google-style"}
{"code": "def add_to_writer(self, writer: PdfFileWriter, start_recto: bool=True) -> None:\n    if self.is_html:\n        pdf = get_pdf_from_html(html=self.html, header_html=self.header_html, footer_html=self.footer_html, wkhtmltopdf_filename=self.wkhtmltopdf_filename, wkhtmltopdf_options=self.wkhtmltopdf_options)\n        append_memory_pdf_to_writer(pdf, writer, start_recto=start_recto)\n    elif self.is_filename:\n        if (start_recto and ((writer.getNumPages() % 2) != 0)):\n            writer.addBlankPage()\n        writer.appendPagesFromReader(PdfFileReader(open(self.filename, 'rb')))\n    else:\n        raise AssertionError(\"PdfPlan: shouldn't get here!\")", "docstring": "Add the PDF described by this class to a PDF writer.\n\nArgs:\nwriter: a :class:`PyPDF2.PdfFileWriter`\nstart_recto: start a new right-hand page?", "source": "codesearchnet"}
{"code": "def depth_soil_density(self, value=None):\n    if (value is not None):\n        try:\n            value = float(value)\n        except ValueError:\n            raise ValueError('value {} need to be of type float for field `depth_soil_density`'.format(value))\n    self._depth_soil_density = value", "docstring": "Corresponds to IDD Field `depth_soil_density`\n\nArgs:\nvalue (float): value for IDD Field `depth_soil_density`\nUnit: kg/m3\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"}
{"code": "def submit_evaluation(self, variant_obj, user_obj, institute_obj, case_obj, link, criteria):\n        \n        variant_specific = variant_obj['_id']\n        variant_id = variant_obj['variant_id']\n        user_id = user_obj['_id']\n        user_name = user_obj.get('name', user_obj['_id'])\n        institute_id = institute_obj['_id']\n        case_id = case_obj['_id']\n\n        evaluation_terms = [evluation_info['term'] for evluation_info in criteria]\n\n        classification = get_acmg(evaluation_terms)\n\n        evaluation_obj = build_evaluation(\n            variant_specific=variant_specific,\n            variant_id=variant_id,\n            user_id=user_id,\n            user_name=user_name,\n            institute_id=institute_id,\n            case_id=case_id,\n            classification=classification,\n            criteria=criteria\n        )\n\n        self._load_evaluation(evaluation_obj)\n\n        \n        self.update_acmg(institute_obj, case_obj, user_obj, link, variant_obj, classification)\n        return classification", "docstring": "Submit an evaluation to the database\n\nGet all the relevant information, build a evaluation_obj\n\nArgs:\nvariant_obj(dict)\nuser_obj(dict)\ninstitute_obj(dict)\ncase_obj(dict)\nlink(str): variant url\ncriteria(list(dict)):\n\n[\n{\n'term': str,\n'comment': str,\n'links': list(str)\n},\n.\n.\n]", "source": "juraj-google-style"}
{"code": "def WriteSerialized(cls, attribute_container):\n    \n    json_dict = cls.WriteSerializedDict(attribute_container)\n    return json.dumps(json_dict)", "docstring": "Writes an attribute container to serialized form.\n\nArgs:\nattribute_container (AttributeContainer): attribute container.\n\nReturns:\nstr: A JSON string containing the serialized form.", "source": "juraj-google-style"}
{"code": "def _send_http_request(self, xml_request):\n    headers = {'Host': self._host, 'Content-Type': 'text/xml', 'Recipient': self._storage}\n    try:\n        self._connection.request('POST', self._selector_url, xml_request, headers)\n        response = self._connection.getresponse()\n    except (httplib.CannotSendRequest, httplib.BadStatusLine):\n        Debug.warn('\\nRestarting socket, resending message!')\n        self._open_connection()\n        self._connection.request('POST', self._selector_url, xml_request, headers)\n        response = self._connection.getresponse()\n    data = response.read()\n    return data", "docstring": "Send a request via HTTP protocol.\n\nArgs:\nxml_request -- A fully formed xml request string for the CPS.\n\nReturns:\nThe raw xml response string.", "source": "codesearchnet"}
{"code": "def label_count(self, label_list_ids=None):\n    count = collections.defaultdict(int)\n    for utterance in self.utterances.values():\n        for (label_value, utt_count) in utterance.label_count(label_list_ids=label_list_ids).items():\n            count[label_value] += utt_count\n    return count", "docstring": "Return a dictionary containing the number of times, every label-value in this corpus is occurring.\n\nArgs:\nlabel_list_ids (list): If not None, only labels from label-lists with an id contained in this list\nare considered.\n\nReturns:\ndict: A dictionary containing the number of occurrences with the label-value as key.", "source": "codesearchnet"}
{"code": "def signature_to_callable(self, sig):\n    base_cls = self.ctx.convert.function_type\n    ret = sig.annotations.get('return', self.ctx.convert.unsolvable)\n    if not sig.kwonly_params and (self._detailed or sig.mandatory_param_count() == sig.maximum_param_count()):\n        args = [sig.annotations.get(name, self.ctx.convert.unsolvable) for name in sig.param_names]\n        params = {abstract_utils.ARGS: self.ctx.convert.merge_values(args), abstract_utils.RET: ret}\n        params.update(enumerate(args))\n        return abstract.CallableClass(base_cls, params, self.ctx)\n    else:\n        params = {abstract_utils.ARGS: self.ctx.convert.unsolvable, abstract_utils.RET: ret}\n        return abstract.ParameterizedClass(base_cls, params, self.ctx)", "docstring": "Converts a function.Signature object into a callable object.\n\nArgs:\nsig: The signature to convert.\n\nReturns:\nAn abstract.CallableClass representing the signature, or an\nabstract.ParameterizedClass if the signature has a variable number of\narguments.", "source": "github-repos"}
{"code": "def sspro_summary(self):\n    summary = {}\n    records = ssbio.protein.sequence.utils.fasta.load_fasta_file(self.out_sspro)\n    for r in records:\n        seq_summary = {}\n        seq_summary['percent_H-sspro'] = (r.seq.count('H') / float(len(r)))\n        seq_summary['percent_E-sspro'] = (r.seq.count('E') / float(len(r)))\n        seq_summary['percent_C-sspro'] = (r.seq.count('C') / float(len(r)))\n        summary[r.id] = seq_summary\n    return summary", "docstring": "Parse the SSpro output file and return a summary of secondary structure composition.\n\nThe output file is just a FASTA formatted file, so you can get residue level\ninformation by parsing it like a normal sequence file.\n\nReturns:\ndict: Percentage of:\nH: helix\nE: strand\nC: the rest", "source": "codesearchnet"}
{"code": "def get_feature_variable_string(self, feature_key, variable_key, user_id, attributes=None):\n    variable_type = entities.Variable.Type.STRING\n    return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)", "docstring": "Returns value for a certain string variable attached to a feature.\n\nArgs:\nfeature_key: Key of the feature whose variable's value is being accessed.\nvariable_key: Key of the variable whose value is to be accessed.\nuser_id: ID for user.\nattributes: Dict representing user attributes.\n\nReturns:\nString value of the variable. None if:\n- Feature key is invalid.\n- Variable key is invalid.\n- Mismatch with type of variable.", "source": "codesearchnet"}
{"code": "def claim(self, file_readers):\n        \n        (prefix_to_reader,\n         unclaimed_readers) = self._find_strelka_files(file_readers)\n        prefix_by_patients = self._split_prefix_by_patient(prefix_to_reader)\n        self._validate_vcf_readers(prefix_by_patients)\n        vcf_readers = self._create_vcf_readers(prefix_to_reader)\n\n        return (unclaimed_readers, vcf_readers)", "docstring": "Recognizes and claims Strelka VCFs form the set of all input VCFs.\n\nEach defined caller has a chance to evaluate and claim all the incoming\nfiles as something that it can process.\n\nArgs:\nfile_readers: the collection of currently unclaimed files\n\nReturns:\nA tuple of unclaimed readers and StrelkaVcfReaders.", "source": "juraj-google-style"}
{"code": "def op_signature_def(op, key):\n    return build_signature_def(outputs={key: utils.build_tensor_info_from_op(op)})", "docstring": "Creates a signature def with the output pointing to an op.\n\nNote that op isn't strictly enforced to be an Op object, and may be a Tensor.\nIt is recommended to use the build_signature_def() function for Tensors.\n\nArgs:\nop: An Op (or possibly Tensor).\nkey: Key to graph element in the SignatureDef outputs.\n\nReturns:\nA SignatureDef with a single output pointing to the op.", "source": "github-repos"}
{"code": "def add_object_to_path(self, obj, location):\n    location = self._handle_location(location)\n    location.append(obj.as_list_data())\n    results = [item for item in location.getchildren() if (item.findtext('id') == obj.id)][0]\n    return results", "docstring": "Add an object of type JSSContainerObject to location.\n\nThis method determines the correct list representation of an\nobject and adds it to \"location\". For example, add a Computer to\na ComputerGroup. The ComputerGroup will not have a child\nComputers/Computer tag with subelements \"name\" and \"id\".\n\nArgs:\nobj: A JSSContainerObject subclass.\nlocation: Element or a string path argument to find()\n\nReturns:\nElement for the object just added.", "source": "codesearchnet"}
{"code": "def report(\n    vulnerabilities,\n    fileobj,\n    print_sanitised,\n):\n    \n    TZ_AGNOSTIC_FORMAT = \"%Y-%m-%dT%H:%M:%SZ\"\n    time_string = datetime.utcnow().strftime(TZ_AGNOSTIC_FORMAT)\n\n    machine_output = {\n        'generated_at': time_string,\n        'vulnerabilities': [\n            vuln.as_dict() for vuln in vulnerabilities\n            if print_sanitised or not isinstance(vuln, SanitisedVulnerability)\n        ]\n    }\n\n    result = json.dumps(\n        machine_output,\n        indent=4\n    )\n\n    with fileobj:\n        fileobj.write(result)", "docstring": "Prints issues in JSON format.\nArgs:\nvulnerabilities: list of vulnerabilities to report\nfileobj: The output file object, which may be sys.stdout", "source": "juraj-google-style"}
{"code": "def consume(self, callback, queue):\n        \n        self.consumers[queue] = callback\n        if self._client_ready.called:\n            return self.client.consume(callback, queue)", "docstring": "Register a new consumer.\n\nThis consumer will be configured for every protocol this factory\nproduces so it will be reconfigured on network failures. If a connection\nis already active, the consumer will be added to it.\n\nArgs:\ncallback (callable): The callback to invoke when a message arrives.\nqueue (str): The name of the queue to consume from.", "source": "juraj-google-style"}
{"code": "def from_json(cls, data):\n        \n        assert 'name' in data, 'Required keyword \"name\" is missing!'\n        assert 'data_type' in data, 'Required keyword \"data_type\" is missing!'\n        if cls._type_enumeration is None:\n            cls._type_enumeration = _DataTypeEnumeration(import_modules=False)\n\n        if data['data_type'] == 'GenericType':\n            assert 'base_unit' in data, \\\n                'Keyword \"base_unit\" is missing and is required for GenericType.'\n            return cls._type_enumeration._GENERICTYPE(data['name'], data['base_unit'])\n        elif data['data_type'] in cls._type_enumeration._TYPES:\n            clss = cls._type_enumeration._TYPES[data['data_type']]\n            if data['data_type'] == data['name'].title().replace(' ', ''):\n                return clss()\n            else:\n                instance = clss()\n                instance._name = data['name']\n                return instance\n        else:\n            raise ValueError(\n                'Data Type {} could not be recognized'.format(data['data_type']))", "docstring": "Create a data type from a dictionary.\n\nArgs:\ndata: Data as a dictionary.\n{\n\"name\": data type name of the data type as a string\n\"data_type\": the class name of the data type as a string\n\"base_unit\": the base unit of the data type\n}", "source": "juraj-google-style"}
{"code": "def get_vocabulary(self, include_special_tokens=True):\n    if self.lookup_table.size() == 0:\n        vocab, indices = ([], [])\n    else:\n        keys, values = self.lookup_table.export()\n        vocab, indices = (values, keys) if self.invert else (keys, values)\n        vocab, indices = (self._tensor_vocab_to_numpy(vocab), indices.numpy())\n    lookup = collections.defaultdict(lambda: self.oov_token, zip(indices, vocab))\n    vocab = [lookup[x] for x in range(self.vocabulary_size())]\n    if self.mask_token is not None and self.output_mode == 'int':\n        vocab[0] = self.mask_token\n    if not include_special_tokens:\n        vocab = vocab[self._token_start_index():]\n    if self.vocabulary_dtype == 'string':\n        return [i.decode('utf-8') if isinstance(i, bytes) else i for i in vocab]\n    else:\n        return vocab", "docstring": "Returns the current vocabulary of the layer.\n\nArgs:\ninclude_special_tokens: If `True`, the returned vocabulary\nwill include mask and OOV tokens,\nand a term's index in the vocabulary\nwill equal the term's index when calling the layer.\nIf `False`, the returned vocabulary will not include\nany mask or OOV tokens.", "source": "github-repos"}
{"code": "def occurs_in_type(v, type2):\n    pruned_type2 = prune(type2)\n    if (pruned_type2 == v):\n        return True\n    elif isinstance(pruned_type2, TypeOperator):\n        return occurs_in(v, pruned_type2.types)\n    return False", "docstring": "Checks whether a type variable occurs in a type expression.\n\nNote: Must be called with v pre-pruned\n\nArgs:\nv:  The TypeVariable to be tested for\ntype2: The type in which to search\n\nReturns:\nTrue if v occurs in type2, otherwise False", "source": "codesearchnet"}
{"code": "def _GenerateSshKey(self, key_type, key_dest):\n    with tempfile.NamedTemporaryFile(prefix=key_type, delete=True) as temp:\n        temp_key = temp.name\n    command = ['ssh-keygen', '-t', key_type, '-f', temp_key, '-N', '', '-q']\n    try:\n        self.logger.info('Generating SSH key %s.', key_dest)\n        subprocess.check_call(command)\n    except subprocess.CalledProcessError:\n        self.logger.warning('Could not create SSH key %s.', key_dest)\n        return\n    shutil.move(temp_key, key_dest)\n    shutil.move(('%s.pub' % temp_key), ('%s.pub' % key_dest))\n    file_utils.SetPermissions(key_dest, mode=384)\n    file_utils.SetPermissions(('%s.pub' % key_dest), mode=420)", "docstring": "Generate a new SSH key.\n\nArgs:\nkey_type: string, the type of the SSH key.\nkey_dest: string, a file location to store the SSH key.", "source": "codesearchnet"}
{"code": "def assert_positive_definite(self, name='assert_positive_definite'):\n    with self._name_scope(name):\n        return self._assert_positive_definite()", "docstring": "Returns an `Op` that asserts this operator is positive definite.\n\nHere, positive definite means that the quadratic form `x^H A x` has positive\nreal part for all nonzero `x`.  Note that we do not require the operator to\nbe self-adjoint to be positive definite.\n\nArgs:\nname:  A name to give this `Op`.\n\nReturns:\nAn `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if\nthe operator is not positive definite.", "source": "github-repos"}
{"code": "def createEditor(self, parent, option, index):\n        \n        editor = BigIntSpinbox(parent)\n        try:\n            editor.setMinimum(self.minimum)\n            editor.setMaximum(self.maximum)\n            editor.setSingleStep(self.singleStep)\n        except TypeError as err:\n            \n            pass\n        return editor", "docstring": "Returns the widget used to edit the item specified by index for editing. The parent widget and style option are used to control how the editor widget appears.\n\nArgs:\nparent (QWidget): parent widget.\noption (QStyleOptionViewItem): controls how editor widget appears.\nindex (QModelIndex): model data index.", "source": "juraj-google-style"}
{"code": "def get_device_policy():\n    device_policy = context.context().device_policy\n    if device_policy == context.DEVICE_PLACEMENT_SILENT:\n        return 'silent'\n    elif device_policy == context.DEVICE_PLACEMENT_SILENT_FOR_INT32:\n        return 'silent_for_int32'\n    elif device_policy == context.DEVICE_PLACEMENT_WARN:\n        return 'warn'\n    elif device_policy == context.DEVICE_PLACEMENT_EXPLICIT:\n        return 'explicit'\n    else:\n        raise errors.InternalError(f'Got an invalid device policy: {device_policy!r}.')", "docstring": "Gets the current device policy.\n\nThe device policy controls how operations requiring inputs on a specific\ndevice (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1).\n\nThis function only gets the device policy for the current thread. Any\nsubsequently started thread will again use the default policy.\n\nReturns:\nCurrent thread device policy", "source": "github-repos"}
{"code": "def plot_all_stability_map(self, max_r, increments=50, delu_dict=None, delu_default=0, plt=None, labels=None, from_sphere_area=False, e_units='keV', r_units='nanometers', normalize=False, scale_per_atom=False):\n    plt = (plt if plt else pretty_plot(width=8, height=7))\n    for (i, analyzer) in enumerate(self.se_analyzers):\n        label = (labels[i] if labels else '')\n        plt = self.plot_one_stability_map(analyzer, max_r, delu_dict, label=label, plt=plt, increments=increments, delu_default=delu_default, from_sphere_area=from_sphere_area, e_units=e_units, r_units=r_units, normalize=normalize, scale_per_atom=scale_per_atom)\n    return plt", "docstring": "Returns the plot of the formation energy of a particles\nof different polymorphs against its effect radius\n\nArgs:\nmax_r (float): The maximum radius of the particle to plot up to.\nincrements (int): Number of plot points\ndelu_dict (Dict): Dictionary of the chemical potentials to be set as\nconstant. Note the key should be a sympy Symbol object of the\nformat: Symbol(\"delu_el\") where el is the name of the element.\ndelu_default (float): Default value for all unset chemical potentials\nplt (pylab): Plot\nlabels (list): List of labels for each plot, corresponds to the\nlist of se_analyzers\nfrom_sphere_area (bool): There are two ways to calculate the bulk\nformation energy. Either by treating the volume and thus surface\narea of the particle as a perfect sphere, or as a Wulff shape.", "source": "codesearchnet"}
{"code": "def activate(self, uid=None):\n        \n        \n        if uid is not None:\n            if not isinstance(uid, six.string_types):\n                raise TypeError(\"uid must be a string\")\n\n        \n        result = self.proxy.activate(uid)\n\n        status = result.result_status.value\n        if status == enums.ResultStatus.SUCCESS:\n            return\n        else:\n            reason = result.result_reason.value\n            message = result.result_message.value\n            raise exceptions.KmipOperationFailure(status, reason, message)", "docstring": "Activate a managed object stored by a KMIP appliance.\n\nArgs:\nuid (string): The unique ID of the managed object to activate.\nOptional, defaults to None.\n\nReturns:\nNone\n\nRaises:\nClientConnectionNotOpen: if the client connection is unusable\nKmipOperationFailure: if the operation result is a failure\nTypeError: if the input argument is invalid", "source": "juraj-google-style"}
{"code": "def register_command_handler(self, prefix, handler, help_info, prefix_aliases=None):\n    self._command_handler_registry.register_command_handler(prefix, handler, help_info, prefix_aliases=prefix_aliases)\n    self._tab_completion_registry.extend_comp_items('', [prefix])\n    if prefix_aliases:\n        self._tab_completion_registry.extend_comp_items('', prefix_aliases)", "docstring": "A wrapper around CommandHandlerRegistry.register_command_handler().\n\nIn addition to calling the wrapped register_command_handler() method, this\nmethod also registers the top-level tab-completion context based on the\ncommand prefixes and their aliases.\n\nSee the doc string of the wrapped method for more details on the args.\n\nArgs:\nprefix: (str) command prefix.\nhandler: (callable) command handler.\nhelp_info: (str) help information.\nprefix_aliases: (list of str) aliases of the command prefix.", "source": "github-repos"}
{"code": "def to_csv(pipe: BeamEventSet, file_path_prefix: str, schema: Schema, timestamp_key: str='timestamp', **wargs):\n    header_values = [timestamp_key] + schema.index_names() + schema.feature_names()\n    header_string = io.StringIO()\n    header_writer = csv.writer(header_string)\n    header_writer.writerow(header_values)\n    return add_feature_idx_and_flatten(pipe) | 'Group by features' >> beam.GroupByKey() | 'Convert to csv' >> beam.Map(_convert_to_csv) | 'Write csv' >> beam.io.textio.WriteToText(file_path_prefix=file_path_prefix, header=header_string.getvalue(), append_trailing_newlines=False, **wargs)", "docstring": "Writes a Beam EventSet to a file or set of csv files.\n\nLimitation: Timestamps are always stored as numerical values.\nTODO: Support datetime timestamps.\n\nUsage example:\n\n```\ninput_node: tp.EventSetNode = ...\n( p\n| tpb.from_csv(\"/input.csv\", input_node.schema)\n| ... # processing\n| tpb.to_csv(\"/output.csv\", output_node.schema)\n)\n```\n\nArgs:\npipe: Beam pipe containing an EventSet.\nfile_path_prefix: Path or path matching expression compatible with\nWriteToText.\nschema: Schema of the data. If you have a Temporian node, the schema is\navailable with `node.schema`.\ntimestamp_key: Key containing the timestamps.\n**wargs: Arguments passed to `beam.io.textio.WriteToText`.", "source": "github-repos"}
{"code": "def sub_chempots(gamma_dict, chempots):\n    \n\n    coeffs = [gamma_dict[k] for k in gamma_dict.keys()]\n    chempot_vals = []\n    for k in gamma_dict.keys():\n        if k not in chempots.keys():\n            chempot_vals.append(k)\n        elif k == 1:\n            chempot_vals.append(1)\n        else:\n            chempot_vals.append(chempots[k])\n\n    return np.dot(coeffs, chempot_vals)", "docstring": "Uses dot product of numpy array to sub chemical potentials\ninto the surface grand potential. This is much faster\nthan using the subs function in sympy.\nArgs:\ngamma_dict (dict): Surface grand potential equation\nas a coefficient dictionary\nchempots (dict): Dictionary assigning each chemical\npotential (key) in gamma a value\nReturns:\nSurface energy as a float", "source": "juraj-google-style"}
{"code": "def get_cbm_vbm(self, tol=0.001, abs_tol=False, spin=None):\n        \n        \n        if spin is None:\n            tdos = self.y if len(self.ydim) == 1 else np.sum(self.y, axis=1)\n        elif spin == Spin.up:\n            tdos = self.y[:, 0]\n        else:\n            tdos = self.y[:, 1]\n\n        if not abs_tol:\n            tol = tol * tdos.sum() / tdos.shape[0]\n\n        \n        i_fermi = 0\n        while self.x[i_fermi] <= self.efermi:\n            i_fermi += 1\n\n        \n        i_gap_start = i_fermi\n        while i_gap_start - 1 >= 0 and tdos[i_gap_start - 1] <= tol:\n            i_gap_start -= 1\n\n        \n        i_gap_end = i_gap_start\n        while i_gap_end < tdos.shape[0] and tdos[i_gap_end] <= tol:\n            i_gap_end += 1\n        i_gap_end -= 1\n        return self.x[i_gap_end], self.x[i_gap_start]", "docstring": "Expects a DOS object and finds the cbm and vbm.\n\nArgs:\ntol: tolerance in occupations for determining the gap\nabs_tol: An absolute tolerance (True) and a relative one (False)\nspin: Possible values are None - finds the gap in the summed\ndensities, Up - finds the gap in the up spin channel,\nDown - finds the gap in the down spin channel.\n\nReturns:\n(cbm, vbm): float in eV corresponding to the gap", "source": "juraj-google-style"}
{"code": "def get_layer_timing_signal_learned_1d(channels, layer, num_layers):\n  \n  shape = [num_layers, 1, 1, channels]\n  layer_embedding = (\n      tf.get_variable(\n          \"layer_embedding\",\n          shape,\n          initializer=tf.random_normal_initializer(0, channels**-0.5)) *\n      (channels**0.5))\n  return layer_embedding[layer, :, :, :]", "docstring": "get n-dimensional embedding as the layer (vertical) timing signal.\n\nAdds embeddings to represent the position of the layer in the tower.\n\nArgs:\nchannels: dimension of the timing signal\nlayer: layer num\nnum_layers: total number of layers\n\nReturns:\na Tensor of timing signals [1, 1, channels].", "source": "juraj-google-style"}
{"code": "def ready(self, node_id, metadata_priority=True):\n    self.maybe_connect(node_id)\n    return self.is_ready(node_id, metadata_priority=metadata_priority)", "docstring": "Check whether a node is connected and ok to send more requests.\n\nArguments:\nnode_id (int): the id of the node to check\nmetadata_priority (bool): Mark node as not-ready if a metadata\nrefresh is required. Default: True\n\nReturns:\nbool: True if we are ready to send to the given node", "source": "codesearchnet"}
{"code": "def as_object(obj):\n    \n    LOGGER.debug('as_object(%s)', obj)\n\n    if isinstance(obj, datetime.date):\n        return as_date(obj)\n\n    elif hasattr(obj, '__dict__'):\n\n        \n        out = {k: obj.__dict__[k] for k in obj.__dict__ if not k.startswith('_')}\n\n        \n        for k, v in (\n                (p, getattr(obj, p))\n                for p, _ in inspect.getmembers(\n                    obj.__class__,\n                    lambda x: isinstance(x, property))\n        ):\n            out[k] = v\n\n        return out", "docstring": "Return a JSON serializable type for ``o``.\n\nArgs:\nobj (:py:class:`object`): the object to be serialized.\n\nRaises:\n:py:class:`AttributeError`:\nwhen ``o`` is not a Python object.\n\nReturns:\n(dict): JSON serializable type for the given object.", "source": "juraj-google-style"}
{"code": "def map_seqprop_resnums_to_structprop_resnums(self, resnums, seqprop=None, structprop=None, chain_id=None, use_representatives=False):\n    resnums = ssbio.utils.force_list(resnums)\n    if use_representatives:\n        seqprop = self.representative_sequence\n        structprop = self.representative_structure\n        chain_id = self.representative_chain\n        if (not structprop):\n            raise ValueError('No representative structure set, please specify sequence, structure, and chain ID')\n    elif ((not seqprop) or (not structprop) or (not chain_id)):\n        raise ValueError('Please specify sequence, structure, and chain ID')\n    mapping_to_repchain_index = self._map_seqprop_resnums_to_structprop_chain_index(resnums=resnums, seqprop=seqprop, structprop=structprop, chain_id=chain_id, use_representatives=use_representatives)\n    chain = structprop.chains.get_by_id(chain_id)\n    chain_structure_resnum_mapping = chain.seq_record.letter_annotations['structure_resnums']\n    final_mapping = {}\n    for (k, v) in mapping_to_repchain_index.items():\n        k = int(k)\n        rn = chain_structure_resnum_mapping[v]\n        if (rn == float('Inf')):\n            log.warning('{}-{}, {}: structure file does not contain coordinates for this residue'.format(structprop.id, chain_id, k))\n        else:\n            rn = int(rn)\n            final_mapping[k] = rn\n            index_of_structure_resnum = chain_structure_resnum_mapping.index(rn)\n            format_data = {'seqprop_id': seqprop.id, 'seqprop_resid': seqprop[(k - 1)], 'seqprop_resnum': k, 'structprop_id': structprop.id, 'structprop_chid': chain_id, 'structprop_resid': chain.seq_record[index_of_structure_resnum], 'structprop_resnum': rn}\n            if (seqprop[(k - 1)] != chain.seq_record[index_of_structure_resnum]):\n                log.warning('Sequence {seqprop_id} residue {seqprop_resid}{seqprop_resnum} does not match to structure {structprop_id}-{structprop_chid} residue {structprop_resid}{structprop_resnum}. NOTE: this may be due to structural differences'.format(**format_data))\n            else:\n                log.debug('Sequence {seqprop_id} residue {seqprop_resid}{seqprop_resnum} is mapped to structure {structprop_id}-{structprop_chid} residue {structprop_resid}{structprop_resnum}'.format(**format_data))\n    return final_mapping", "docstring": "Map a residue number in any SeqProp to the structure's residue number for a specified chain.\n\nArgs:\nresnums (int, list): Residue numbers in the sequence\nseqprop (SeqProp): SeqProp object\nstructprop (StructProp): StructProp object\nchain_id (str): Chain ID to map to\nuse_representatives (bool): If the representative sequence and structure should be used. If True, seqprop,\nstructprop, and chain_id do not need to be defined.\n\nReturns:\ndict: Mapping of sequence residue numbers to structure residue numbers", "source": "codesearchnet"}
{"code": "def __init__(self, flag_desc, help):\n    \n    self.desc = flag_desc               \n    self.help = help                    \n    self.default = ''                   \n    self.tips = ''", "docstring": "Create the flag object.\nArgs:\nflag_desc  The command line forms this could take. (string)\nhelp       The help text (string)", "source": "juraj-google-style"}
{"code": "def scan_file(path):\n    \n    path = os.path.abspath(path)\n    if settings.USE_CLAMD:\n        return clamd.scan_file(path)\n    else:\n        return clamscan.scan_file(path)", "docstring": "Scan `path` for viruses using ``clamd`` or ``clamscan`` (depends on\n:attr:`settings.USE_CLAMD`.\n\nArgs:\npath (str): Relative or absolute path of file/directory you need to\nscan.\n\nReturns:\ndict: ``{filename: (\"FOUND\", \"virus type\")}`` or blank dict.\n\nRaises:\nValueError: When the server is not running.\nAssertionError: When the internal file doesn't exists.", "source": "juraj-google-style"}
{"code": "def type_check(type_constraint, datum, is_input):\n    datum_type = 'input' if is_input else 'output'\n    try:\n        check_constraint(type_constraint, datum)\n    except CompositeTypeHintError as e:\n        _, _, tb = sys.exc_info()\n        raise TypeCheckError(e.args[0]).with_traceback(tb)\n    except SimpleTypeHintError:\n        error_msg = \"According to type-hint expected %s should be of type %s. Instead, received '%s', an instance of type %s.\" % (datum_type, type_constraint, datum, type(datum))\n        _, _, tb = sys.exc_info()\n        raise TypeCheckError(error_msg).with_traceback(tb)", "docstring": "Typecheck a PTransform related datum according to a type constraint.\n\nThis function is used to optionally type-check either an input or an output\nto a PTransform.\n\nArgs:\ntype_constraint: An instance of a typehints.TypeContraint, one of the\nwhite-listed builtin Python types, or a custom user class.\ndatum: An instance of a Python object.\nis_input: True if 'datum' is an input to a PTransform's DoFn. False\notherwise.\n\nRaises:\nTypeError: If 'datum' fails to type-check according to 'type_constraint'.", "source": "github-repos"}
{"code": "def account_id(self, value):\n        \n        if value == self._defaults['ai.user.accountId'] and 'ai.user.accountId' in self._values:\n            del self._values['ai.user.accountId']\n        else:\n            self._values['ai.user.accountId'] = value", "docstring": "The account_id property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"}
{"code": "def _remove_duplicate_points(points, groups):\n    group_initial_ids = groups[(:, GPFIRST)]\n    to_be_reduced = np.zeros(len(group_initial_ids))\n    to_be_removed = []\n    for (ig, g) in enumerate(groups):\n        (iid, typ, pid) = (g[GPFIRST], g[GTYPE], g[GPID])\n        if ((pid != (- 1)) and (typ != 1) and (groups[pid][GTYPE] != 1)):\n            to_be_removed.append(iid)\n            to_be_reduced[(ig + 1):] += 1\n    groups[(:, GPFIRST)] = (groups[(:, GPFIRST)] - to_be_reduced)\n    points = np.delete(points, to_be_removed, axis=0)\n    return (points, groups)", "docstring": "Removes the duplicate points from the beginning of a section,\nif they are present in points-groups representation.\n\nReturns:\npoints, groups with unique points.", "source": "codesearchnet"}
{"code": "def get_chromosomes(self, sv=False):\n        \n        if sv:\n            res = self.db.structural_variant.distinct('chrom')\n        else:\n            res = self.db.variant.distinct('chrom')\n            \n        return res", "docstring": "Return a list of all chromosomes found in database\n\nArgs:\nsv(bool): if sv variants should be choosen\n\nReturns:\nres(iterable(str)): An iterable with all chromosomes in the database", "source": "juraj-google-style"}
{"code": "def sample(self, size=None):\n        \n        self._recompute()\n        if size is None:\n            n = np.random.randn(len(self._t))\n        else:\n            n = np.random.randn(len(self._t), size)\n        n = self.solver.dot_L(n)\n        if size is None:\n            return self.mean.get_value(self._t) + n[:, 0]\n        return self.mean.get_value(self._t)[None, :] + n.T", "docstring": "Sample from the prior distribution over datasets\n\nArgs:\nsize (Optional[int]): The number of samples to draw.\n\nReturns:\narray[n] or array[size, n]: The samples from the prior\ndistribution over datasets.", "source": "juraj-google-style"}
{"code": "def addon_name(self):\n    with self.selenium.context(self.selenium.CONTEXT_CHROME):\n        el = self.find_description()\n        return el.find_element(By.CSS_SELECTOR, 'b').text", "docstring": "Provide access to the add-on name.\n\nReturns:\nstr: Add-on name.", "source": "codesearchnet"}
{"code": "def plot_structures(self, structures, fontsize=6, **kwargs):\n    import matplotlib.pyplot as plt\n    nrows = len(structures)\n    (fig, axes) = plt.subplots(nrows=nrows, ncols=1, sharex=True, squeeze=False)\n    for (i, (ax, structure)) in enumerate(zip(axes.ravel(), structures)):\n        self.get_plot(structure, fontsize=fontsize, ax=ax, with_labels=(i == (nrows - 1)), **kwargs)\n        (spg_symbol, spg_number) = structure.get_space_group_info()\n        ax.set_title('{} {} ({}) '.format(structure.formula, spg_symbol, spg_number))\n    return fig", "docstring": "Plot diffraction patterns for multiple structures on the same figure.\n\nArgs:\nstructures (Structure): List of structures\ntwo_theta_range ([float of length 2]): Tuple for range of\ntwo_thetas to calculate in degrees. Defaults to (0, 90). Set to\nNone if you want all diffracted beams within the limiting\nsphere of radius 2 / wavelength.\nannotate_peaks (bool): Whether to annotate the peaks with plane\ninformation.\nfontsize: (int) fontsize for peak labels.", "source": "codesearchnet"}
{"code": "def _set_resultdir(name=None):\n    resultdir_name = (name or ('enos_' + datetime.today().isoformat()))\n    resultdir_path = os.path.abspath(resultdir_name)\n    if os.path.isfile(resultdir_path):\n        raise EnosFilePathError(resultdir_path, ('Result directory cannot be created due to existing file %s' % resultdir_path))\n    if (not os.path.isdir(resultdir_path)):\n        os.mkdir(resultdir_path)\n        logger.info(('Generate results directory %s' % resultdir_path))\n    link_path = SYMLINK_NAME\n    if os.path.lexists(link_path):\n        os.remove(link_path)\n    try:\n        os.symlink(resultdir_path, link_path)\n        logger.info(('Symlink %s to %s' % (resultdir_path, link_path)))\n    except OSError:\n        logger.warning(('Symlink %s to %s failed' % (resultdir_path, link_path)))\n    return resultdir_path", "docstring": "Set or get the directory to store experiment results.\n\n\nLooks at the `name` and create the directory if it doesn\"t exist\nor returns it in other cases. If the name is `None`, then the\nfunction generates an unique name for the results directory.\nFinally, it links the directory to `SYMLINK_NAME`.\n\nArgs:\nname (str): file path to an existing directory. It could be\nweather an absolute or a relative to the current working\ndirectory.\n\nReturns:\nthe file path of the results directory.", "source": "codesearchnet"}
{"code": "def _create_variables_and_slots(self) -> Dict[Text, Dict[Text, tf_variables.Variable]]:\n    variables = {}\n    for table in self._table_config:\n        variables[table.name] = self._create_variables(table, trainable=True)\n    return variables", "docstring": "Create variables for TPU embeddings.\n\nNote that this will always ensure that the variable is created under the\nTPUStrategy.\n\nReturns:\nA dict of dicts. The outer dict is keyed by the table names and the inner\ndicts are keyed by 'parameters' and the slot variable names.", "source": "github-repos"}
{"code": "def is_video(mime: str) -> bool:\n    return mime in INPUT_VIDEO_TYPES or mime.startswith('video/')", "docstring": "Returns whether the content is a video.\n\nArgs:\nmime: The mime string.\n\nReturns:\nTrue of it is a video, False otherwise.", "source": "github-repos"}
{"code": "def matches_filters(self, node):\n    visible = self.visible\n    if self.options['text']:\n        if isregex(self.options['text']):\n            regex = self.options['text']\n        elif (self.exact_text is True):\n            regex = re.compile('\\\\A{}\\\\Z'.format(re.escape(self.options['text'])))\n        else:\n            regex = toregex(self.options['text'])\n        text = normalize_text((node.all_text if (visible == 'all') else node.visible_text))\n        if (not regex.search(text)):\n            return False\n    if isinstance(self.exact_text, (bytes_, str_)):\n        regex = re.compile('\\\\A{}\\\\Z'.format(re.escape(self.exact_text)))\n        text = normalize_text((node.all_text if (visible == 'all') else node.visible_text))\n        if (not regex.search(text)):\n            return False\n    if (visible == 'visible'):\n        if (not node.visible):\n            return False\n    elif (visible == 'hidden'):\n        if node.visible:\n            return False\n    for (name, node_filter) in iter(self._node_filters.items()):\n        if (name in self.filter_options):\n            if (not node_filter.matches(node, self.filter_options[name])):\n                return False\n        elif node_filter.has_default:\n            if (not node_filter.matches(node, node_filter.default)):\n                return False\n    if (self.options['filter'] and (not self.options['filter'](node))):\n        return False\n    return True", "docstring": "Returns whether the given node matches all filters.\n\nArgs:\nnode (Element): The node to evaluate.\n\nReturns:\nbool: Whether the given node matches.", "source": "codesearchnet"}
{"code": "def no_results(channel):\n    gui = ui_embed.UI(channel, 'No results', ':c', modulename=modulename, colour=16746496)\n    return gui", "docstring": "Creates an embed UI for when there were no results\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\n\nReturns:\nui (ui_embed.UI): The embed UI object", "source": "codesearchnet"}
{"code": "def DeletePendingNotification(self, timestamp):\n    shown_notifications = self.Get(self.Schema.SHOWN_NOTIFICATIONS)\n    if (not shown_notifications):\n        shown_notifications = self.Schema.SHOWN_NOTIFICATIONS()\n    pending = self.Get(self.Schema.PENDING_NOTIFICATIONS)\n    if (not pending):\n        return\n    delete_count = 0\n    for idx in reversed(range(0, len(pending))):\n        if (pending[idx].timestamp == timestamp):\n            shown_notifications.Append(pending[idx])\n            pending.Pop(idx)\n            delete_count += 1\n    if (delete_count > 1):\n        raise UniqueKeyError(('Multiple notifications at %s' % timestamp))\n    self.Set(self.Schema.PENDING_NOTIFICATIONS, pending)\n    self.Set(self.Schema.SHOWN_NOTIFICATIONS, shown_notifications)", "docstring": "Deletes the pending notification with the given timestamp.\n\nArgs:\ntimestamp: The timestamp of the notification. Assumed to be unique.\n\nRaises:\nUniqueKeyError: Raised if multiple notifications have the timestamp.", "source": "codesearchnet"}
{"code": "def get(self, id):\n        \n        for obj in self.model.db:\n            if obj[\"id\"] == id:\n                return self._cast_model(obj)\n\n        return None", "docstring": "Get a object by id\nArgs:\nid (int): Object id\n\nReturns:\nObject: Object with specified id\nNone: If object not found", "source": "juraj-google-style"}
{"code": "def save(self, clean=True):\n        \n        ret = {}\n        if clean:\n            self._dirty = False\n        else:\n            ret['_dirty'] = self._dirty\n        return ret", "docstring": "Serialize into raw representation. Clears the dirty bit by default.\n\nArgs:\nclean (bool): Whether to clear the dirty bit.\n\nReturns:\ndict: Raw.", "source": "juraj-google-style"}
{"code": "def Run(self, conf, args):\n    try:\n        options, args = self.parser.parse_args(args)\n    except SystemExit as e:\n        return e.code\n    if options.maps:\n        self.log.info('Setting configured maps to %s', options.maps)\n        conf.maps = options.maps\n    for map_name in conf.maps:\n        if map_name == config.MAP_AUTOMOUNT:\n            value_list = self.GetAutomountMapMetadata(conf, epoch=options.epoch)\n            self.log.debug('Value list: %r', value_list)\n            for value_dict in value_list:\n                self.log.debug('Value dict: %r', value_dict)\n                output = options.automount_template % value_dict\n                print(output)\n        else:\n            for value_dict in self.GetSingleMapMetadata(map_name, conf, epoch=options.epoch):\n                self.log.debug('Value dict: %r', value_dict)\n                output = options.template % value_dict\n                print(output)\n    return os.EX_OK", "docstring": "Run the Status command.\n\nSee Command.Run() for full documentation on the Run() method.\n\nArgs:\nconf: nss_cache.config.Config object\nargs: list of arguments to be parsed by this command\n\nReturns:\nzero on success, nonzero on error", "source": "github-repos"}
{"code": "def _get_resource_hash(zone_name, record):\n        \n        record_data = defaultdict(int, record)\n        if type(record_data['GeoLocation']) == dict:\n            record_data['GeoLocation'] = \":\".join([\"{}={}\".format(k, v) for k, v in record_data['GeoLocation'].items()])\n\n        args = [\n            zone_name,\n            record_data['Name'],\n            record_data['Type'],\n            record_data['Weight'],\n            record_data['Region'],\n            record_data['GeoLocation'],\n            record_data['Failover'],\n            record_data['HealthCheckId'],\n            record_data['TrafficPolicyInstanceId']\n        ]\n\n        return get_resource_id('r53r', args)", "docstring": "Returns the last ten digits of the sha256 hash of the combined arguments. Useful for generating unique\nresource IDs\n\nArgs:\nzone_name (`str`): The name of the DNS Zone the record belongs to\nrecord (`dict`): A record dict to generate the hash from\n\nReturns:\n`str`", "source": "juraj-google-style"}
{"code": "def FindExecutableOnPath(executable, path=None, pathext=None, allow_extensions=False):\n    if not allow_extensions and os.path.splitext(executable)[1]:\n        raise ValueError('FindExecutableOnPath({0},...) failed because first argument must not have an extension.'.format(executable))\n    if os.path.dirname(executable):\n        raise ValueError('FindExecutableOnPath({0},...) failed because first argument must not have a path.'.format(executable))\n    if path is None:\n        effective_path = _GetSystemPath()\n    else:\n        effective_path = path\n    effective_pathext = pathext if pathext is not None else _PlatformExecutableExtensions(platforms.OperatingSystem.Current())\n    return _FindExecutableOnPath(executable, effective_path, effective_pathext)", "docstring": "Searches for `executable` in the directories listed in `path` or $PATH.\n\nExecutable must not contain a directory or an extension.\n\nArgs:\nexecutable: The name of the executable to find.\npath: A list of directories to search separated by 'os.pathsep'.  If None\nthen the system PATH is used.\npathext: An iterable of file name extensions to use.  If None then\nplatform specific extensions are used.\nallow_extensions: A boolean flag indicating whether extensions in the\nexecutable are allowed.\n\nReturns:\nThe path of 'executable' (possibly with a platform-specific extension) if\nfound and executable, None if not found.\n\nRaises:\nValueError: if executable has a path or an extension, and extensions are\nnot allowed, or if there's an internal error.", "source": "github-repos"}
{"code": "def _sign_of(money):\n    units = money.units\n    nanos = money.nanos\n    if units:\n        if (units > 0):\n            return 1\n        elif (units < 0):\n            return (- 1)\n    if nanos:\n        if (nanos > 0):\n            return 1\n        elif (nanos < 0):\n            return (- 1)\n    return 0", "docstring": "Determines the amount sign of a money instance\n\nArgs:\nmoney (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): the\ninstance to test\n\nReturn:\nint: 1, 0 or -1", "source": "codesearchnet"}
{"code": "def get_html_titles(index_page):\n    \n    dom = dhtmlparser.parseString(index_page)\n\n    title_tags = dom.find(\"title\")\n\n    return [\n        SourceString(tag.getContent().strip(), \"HTML\")\n        for tag in title_tags\n        if tag.getContent().strip()\n    ]", "docstring": "Return list of titles parsed from HTML.\n\nArgs:\nindex_page (str): HTML content of the page you wish to analyze.\n\nReturns:\nlist: List of :class:`.SourceString` objects.", "source": "juraj-google-style"}
{"code": "def _get_vep_transcript(self, transcript_info):\n        \n        transcript = Transcript(\n                hgnc_symbol = transcript_info.get('SYMBOL'),\n                transcript_id = transcript_info.get('Feature'),\n                ensembl_id = transcript_info.get('Gene'),\n                biotype = transcript_info.get('BIOTYPE'),\n                consequence = transcript_info.get('Consequence'),\n                strand = transcript_info.get('STRAND'),\n                sift = transcript_info.get('SIFT'),\n                polyphen = transcript_info.get('PolyPhen'),\n                exon = transcript_info.get('EXON'),\n                HGVSc = transcript_info.get('HGVSc'),\n                HGVSp = transcript_info.get('HGVSp'),\n                GMAF = transcript_info.get('GMAF'),\n                ExAC_MAF = transcript_info.get('ExAC_MAF')\n            )\n        return transcript", "docstring": "Create a Transcript based on the vep annotation\n\nArgs:\ntranscript_info (dict): A dict with vep info\n\nReturns:\ntranscript (puzzle.models.Transcript): A Transcripts", "source": "juraj-google-style"}
{"code": "def filterbanks(num_filter, coefficients, sampling_freq, low_freq=None, high_freq=None):\n    high_freq = (high_freq or (sampling_freq / 2))\n    low_freq = (low_freq or 300)\n    s = 'High frequency cannot be greater than half of the sampling frequency!'\n    assert (high_freq <= (sampling_freq / 2)), s\n    assert (low_freq >= 0), 'low frequency cannot be less than zero!'\n    mels = np.linspace(functions.frequency_to_mel(low_freq), functions.frequency_to_mel(high_freq), (num_filter + 2))\n    hertz = functions.mel_to_frequency(mels)\n    freq_index = np.floor((((coefficients + 1) * hertz) / sampling_freq)).astype(int)\n    filterbank = np.zeros([num_filter, coefficients])\n    for i in range(0, num_filter):\n        left = int(freq_index[i])\n        middle = int(freq_index[(i + 1)])\n        right = int(freq_index[(i + 2)])\n        z = np.linspace(left, right, num=((right - left) + 1))\n        filterbank[(i, left:(right + 1))] = functions.triangle(z, left=left, middle=middle, right=right)\n    return filterbank", "docstring": "Compute the Mel-filterbanks. Each filter will be stored in one rows.\nThe columns correspond to fft bins.\n\nArgs:\nnum_filter (int): the number of filters in the filterbank, default 20.\ncoefficients (int): (fftpoints//2 + 1). Default is 257.\nsampling_freq (float): the samplerate of the signal we are working\nwith. It affects mel spacing.\nlow_freq (float): lowest band edge of mel filters, default 0 Hz\nhigh_freq (float): highest band edge of mel filters,\ndefault samplerate/2\n\nReturns:\narray: A numpy array of size num_filter x (fftpoints//2 + 1)\nwhich are filterbank", "source": "codesearchnet"}
{"code": "def linear_interpolate_rank(tensor1, tensor2, coeffs, rank=1):\n  \n  \n  _, _, _, num_channels = common_layers.shape_list(tensor1)\n  diff_sq_sum = tf.reduce_sum((tensor1 - tensor2)**2, axis=(0, 1, 2))\n  _, feature_ranks = tf.math.top_k(diff_sq_sum, k=rank)\n  feature_rank = feature_ranks[-1]\n  channel_inds = tf.range(num_channels, dtype=tf.int32)\n  channel_mask = tf.equal(channel_inds, feature_rank)\n  ones_t = tf.ones(num_channels, dtype=tf.float32)\n  zeros_t = tf.zeros(num_channels, dtype=tf.float32)\n\n  interp_tensors = []\n  for coeff in coeffs:\n    curr_coeff = tf.where(channel_mask, coeff * ones_t, zeros_t)\n    interp_tensor = tensor1 + curr_coeff * (tensor2 - tensor1)\n    interp_tensors.append(interp_tensor)\n  return tf.concat(interp_tensors, axis=0)", "docstring": "Linearly interpolate channel at \"rank\" between two tensors.\n\nThe channels are ranked according to their L2 norm between tensor1[channel]\nand tensor2[channel].\n\nArgs:\ntensor1: 4-D Tensor, NHWC\ntensor2: 4-D Tensor, NHWC\ncoeffs: list of floats.\nrank: integer.\nReturns:\ninterp_latents: list of interpolated 4-D Tensors, shape=(NHWC)", "source": "juraj-google-style"}
{"code": "def _slice_single_param(param, param_event_ndims, slices, dist_batch_shape):\n  \n  \n  param_shape = tf.shape(input=param)\n  insert_ones = tf.ones(\n      [tf.size(input=dist_batch_shape) + param_event_ndims - tf.rank(param)],\n      dtype=param_shape.dtype)\n  new_param_shape = tf.concat([insert_ones, param_shape], axis=0)\n  full_batch_param = tf.reshape(param, new_param_shape)\n  param_slices = []\n  \n  \n  \n  param_dim_idx = 0\n  batch_dim_idx = 0\n  for slc in slices:\n    if slc is tf.newaxis:\n      param_slices.append(slc)\n      continue\n    if slc is Ellipsis:\n      if batch_dim_idx < 0:\n        raise ValueError('Found multiple `...` in slices {}'.format(slices))\n      param_slices.append(slc)\n      \n      num_remaining_non_newaxis_slices = sum(\n          [s is not tf.newaxis for s in slices[slices.index(Ellipsis) + 1:]])\n      batch_dim_idx = -num_remaining_non_newaxis_slices\n      param_dim_idx = batch_dim_idx - param_event_ndims\n      continue\n    \n    param_dim_size = new_param_shape[param_dim_idx]\n    batch_dim_size = dist_batch_shape[batch_dim_idx]\n    is_broadcast = batch_dim_size > param_dim_size\n    \n    if isinstance(slc, slice):\n      start, stop, step = slc.start, slc.stop, slc.step\n      if start is not None:\n        start = tf.where(is_broadcast, 0, start)\n      if stop is not None:\n        stop = tf.where(is_broadcast, 1, stop)\n      if step is not None:\n        step = tf.where(is_broadcast, 1, step)\n      param_slices.append(slice(start, stop, step))\n    else:  \n      param_slices.append(tf.where(is_broadcast, 0, slc))\n    param_dim_idx += 1\n    batch_dim_idx += 1\n  param_slices.extend([ALL_SLICE] * param_event_ndims)\n  return full_batch_param.__getitem__(param_slices)", "docstring": "Slices a single parameter of a distribution.\n\nArgs:\nparam: A `Tensor`, the original parameter to slice.\nparam_event_ndims: `int` event parameterization rank for this parameter.\nslices: A `tuple` of normalized slices.\ndist_batch_shape: The distribution's batch shape `Tensor`.\n\nReturns:\nnew_param: A `Tensor`, batch-sliced according to slices.", "source": "juraj-google-style"}
{"code": "def _check_expiration(self, url: str, data: 'SavedEndpoint') -> 'SavedEndpoint':\n        \n        if data.expires_after < time.time():\n            del self.data[url]\n            data = None\n        return data", "docstring": "Checks the expiration time for data for a url.\n\nIf the data has expired, it is deleted from the cache.\n\nArgs:\nurl: url to check\ndata: page of data for that url\n\nReturns:\nvalue of either the passed data or None if it expired", "source": "juraj-google-style"}
{"code": "def install_event_handlers(self, categories=None, handlers=None):\n    if ((categories is not None) and (handlers is not None)):\n        raise ValueError('categories and handlers are mutually exclusive!')\n    from .events import get_event_handler_classes\n    if categories:\n        raise NotImplementedError()\n        handlers = [cls() for cls in get_event_handler_classes(categories=categories)]\n    else:\n        handlers = (handlers or [cls() for cls in get_event_handler_classes()])\n    self._event_handlers = handlers", "docstring": "Install the `EventHandlers for this `Node`. If no argument is provided\nthe default list of handlers is installed.\n\nArgs:\ncategories: List of categories to install e.g. base + can_change_physics\nhandlers: explicit list of :class:`EventHandler` instances.\nThis is the most flexible way to install handlers.\n\n.. note::\n\ncategories and handlers are mutually exclusive.", "source": "codesearchnet"}
{"code": "def _replica_ctx_all_reduce(self, reduce_op, value, options=None):\n    if options is None:\n        options = collective_util.Options()\n    replica_context = get_replica_context()\n    assert replica_context, '`StrategyExtended._replica_ctx_all_reduce` must be called in a replica context'\n\n    def merge_fn(_, flat_value):\n        return self.batch_reduce_to(reduce_op, [(v, v) for v in flat_value], options)\n    reduced = replica_context.merge_call(merge_fn, args=(nest.flatten(value),))\n    return nest.pack_sequence_as(value, reduced)", "docstring": "All-reduce `value` across all replicas so that all get the final result.\n\nIf `value` is a nested structure of tensors, all-reduces of these tensors\nwill be batched when possible. `options` can be set to hint the batching\nbehavior.\n\nThis API must be called in a replica context.\n\nArgs:\nreduce_op: A `tf.distribute.ReduceOp` value specifying how values should\nbe combined.\nvalue: Value to be reduced. A tensor or a nested structure of tensors.\noptions: A `tf.distribute.experimental.CommunicationOptions`. Options to\nperform collective operations. This overrides the default options if the\n`tf.distribute.Strategy` takes one in the constructor.\n\nReturns:\nA tensor or a nested structure of tensors with the reduced values. The\nstructure is the same as `value`.", "source": "github-repos"}
{"code": "class HfDeepSpeedConfig(DeepSpeedConfig):\n\n    def __init__(self, config_file_or_dict):\n        set_hf_deepspeed_config(self)\n        dep_version_check('accelerate')\n        dep_version_check('deepspeed')\n        super().__init__(config_file_or_dict)", "docstring": "This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage.\n\nA `weakref` of this object is stored in the module's globals to be able to access the config from areas where\nthings like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore\nit's important that this object remains alive while the program is still running.\n\n[`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration\nwith values of [`TrainingArguments`] by replacing special placeholder values: `\"auto\"`. Without this special logic\nthe DeepSpeed configuration is not modified in any way.\n\nArgs:\nconfig_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict.", "source": "github-repos"}
{"code": "def run_stages(self, stage_context: translations.TransformContext, stages: List[translations.Stage]) -> 'RunnerResult':\n    worker_handler_manager = WorkerHandlerManager(stage_context.components.environments, self._provision_info)\n    pipeline_metrics = MetricsContainer('')\n    pipeline_metrics.get_counter(MetricName(str(type(self)), self.NUM_FUSED_STAGES_COUNTER, urn='internal:' + self.NUM_FUSED_STAGES_COUNTER)).update(len(stages))\n    monitoring_infos_by_stage: MutableMapping[str, Iterable['metrics_pb2.MonitoringInfo']] = {}\n    runner_execution_context = execution.FnApiRunnerExecutionContext(stages, worker_handler_manager, stage_context.components, stage_context.safe_coders, stage_context.data_channel_coders, self._num_workers, split_managers=self._split_managers)\n    try:\n        with self.maybe_profile():\n            runner_execution_context.setup()\n            bundle_counter = 0\n            while len(runner_execution_context.queues.ready_inputs) > 0:\n                _LOGGER.debug('Remaining ready bundles: %s\\n\\tWatermark pending bundles: %s\\n\\tTime pending bundles: %s', len(runner_execution_context.queues.ready_inputs), len(runner_execution_context.queues.watermark_pending_inputs), len(runner_execution_context.queues.time_pending_inputs))\n                consuming_stage_name, bundle_input = runner_execution_context.queues.ready_inputs.deque()\n                stage = runner_execution_context.stages[consuming_stage_name]\n                bundle_context_manager = runner_execution_context.bundle_manager_for(stage)\n                _BUNDLE_LOGGER.debug('Running bundle for stage %s\\n\\tExpected outputs: %s timers: %s', bundle_context_manager.stage.name, bundle_context_manager.stage_data_outputs, bundle_context_manager.stage_timer_outputs)\n                assert consuming_stage_name == bundle_context_manager.stage.name\n                bundle_counter += 1\n                bundle_results = self._execute_bundle(runner_execution_context, bundle_context_manager, bundle_input)\n                if consuming_stage_name in monitoring_infos_by_stage:\n                    monitoring_infos_by_stage[consuming_stage_name] = consolidate_monitoring_infos(itertools.chain(bundle_results.process_bundle.monitoring_infos, monitoring_infos_by_stage[consuming_stage_name]))\n                else:\n                    assert isinstance(bundle_results.process_bundle.monitoring_infos, Iterable)\n                    monitoring_infos_by_stage[consuming_stage_name] = bundle_results.process_bundle.monitoring_infos\n                if '' not in monitoring_infos_by_stage:\n                    monitoring_infos_by_stage[''] = list(pipeline_metrics.to_runner_api_monitoring_infos('').values())\n                else:\n                    monitoring_infos_by_stage[''] = consolidate_monitoring_infos(itertools.chain(pipeline_metrics.to_runner_api_monitoring_infos('').values(), monitoring_infos_by_stage['']))\n                if len(runner_execution_context.queues.ready_inputs) == 0:\n                    self._schedule_ready_bundles(runner_execution_context)\n        assert len(runner_execution_context.queues.ready_inputs) == 0, 'A total of %d ready bundles did not execute.' % len(runner_execution_context.queues.ready_inputs)\n        assert len(runner_execution_context.queues.watermark_pending_inputs) == 0, 'A total of %d watermark-pending bundles did not execute.' % len(runner_execution_context.queues.watermark_pending_inputs)\n        assert len(runner_execution_context.queues.time_pending_inputs) == 0, 'A total of %d time-pending bundles did not execute.' % len(runner_execution_context.queues.time_pending_inputs)\n    finally:\n        worker_handler_manager.close_all()\n    return RunnerResult(runner.PipelineState.DONE, monitoring_infos_by_stage)", "docstring": "Run a list of topologically-sorted stages in batch mode.\n\nArgs:\nstage_context (translations.TransformContext)\nstages (list[fn_api_runner.translations.Stage])", "source": "github-repos"}
{"code": "def sg_summary_param(tensor, prefix=None, name=None):\n    prefix = ('' if (prefix is None) else (prefix + '/'))\n    name = ((prefix + _pretty_name(tensor)) if (name is None) else (prefix + name))\n    _scalar((name + '/abs'), tf.reduce_mean(tf.abs(tensor)))\n    _histogram((name + '/abs-h'), tf.abs(tensor))", "docstring": "r\"\"\"Register `tensor` to summary report as `parameters`\n\nArgs:\ntensor: A `Tensor` to log as parameters\nprefix: A `string`. A prefix to display in the tensor board web UI.\nname: A `string`. A name to display in the tensor board web UI.\n\nReturns:\nNone", "source": "codesearchnet"}
{"code": "def create_metadata(self, resource, keys_vals):\n        \n        self.metadata_service.set_auth(self._token_metadata)\n        self.metadata_service.create(resource, keys_vals)", "docstring": "Associates new key-value pairs with the given resource.\n\nWill attempt to add all key-value pairs even if some fail.\n\nArgs:\nresource (intern.resource.boss.BossResource)\nkeys_vals (dictionary): Collection of key-value pairs to assign to\ngiven resource.\n\nRaises:\nHTTPErrorList on failure.", "source": "juraj-google-style"}
{"code": "def __init__(self, channel):\n    \n    self.GetModelStatus = channel.unary_unary(\n        '/tensorflow.serving.ModelService/GetModelStatus',\n        request_serializer=tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusRequest.SerializeToString,\n        response_deserializer=tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusResponse.FromString,\n        )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"}
{"code": "def create(self, python=None, system_site=False, always_copy=False):\n        \n        command = 'virtualenv'\n        if python:\n\n            command = '{0} --python={1}'.format(command, python)\n\n        if system_site:\n\n            command = '{0} --system-site-packages'.format(command)\n\n        if always_copy:\n\n            command = '{0} --always-copy'.format(command)\n\n        command = '{0} {1}'.format(command, self.path)\n        self._execute(command)", "docstring": "Create a new virtual environment.\n\nArgs:\npython (str): The name or path of a python interpreter to use while\ncreating the virtual environment.\nsystem_site (bool): Whether or not use use the system site packages\nwithin the virtual environment. Default is False.\nalways_copy (bool): Whether or not to force copying instead of\nsymlinking in the virtual environment. Default is False.", "source": "juraj-google-style"}
{"code": "def sagemaker_auth(overrides={}, path='.'):\n    api_key = overrides.get(env.API_KEY, Api().api_key)\n    if (api_key is None):\n        raise ValueError(\"Can't find W&B ApiKey, set the WANDB_API_KEY env variable or run `wandb login`\")\n    overrides[env.API_KEY] = api_key\n    with open(os.path.join(path, 'secrets.env'), 'w') as file:\n        for (k, v) in six.iteritems(overrides):\n            file.write('{}={}\\n'.format(k, v))", "docstring": "Write a secrets.env file with the W&B ApiKey and any additional secrets passed.\n\nArgs:\noverrides (dict, optional): Additional environment variables to write to secrets.env\npath (str, optional): The path to write the secrets file.", "source": "codesearchnet"}
{"code": "def get_settings(category='All'):\n    if (category.lower() in ['all', '*']):\n        category = '*'\n    elif (category.lower() not in [x.lower() for x in categories]):\n        raise KeyError('Invalid category: \"{0}\"'.format(category))\n    cmd = '/get /category:\"{0}\"'.format(category)\n    results = _auditpol_cmd(cmd)\n    ret = {}\n    for line in results[3:]:\n        if ('  ' in line.strip()):\n            ret.update(dict(list(zip(*([iter(re.split('\\\\s{2,}', line.strip()))] * 2)))))\n    return ret", "docstring": "Get the current configuration for all audit settings specified in the\ncategory\n\nArgs:\ncategory (str):\nOne of the nine categories to return. Can also be ``All`` to return\nthe settings for all categories. Valid options are:\n\n- Account Logon\n- Account Management\n- Detailed Tracking\n- DS Access\n- Logon/Logoff\n- Object Access\n- Policy Change\n- Privilege Use\n- System\n- All\n\nDefault value is ``All``\n\nReturns:\ndict: A dictionary containing all subcategories for the specified\ncategory along with their current configuration\n\nRaises:\nKeyError: On invalid category\nCommandExecutionError: If an error is encountered retrieving the settings\n\nUsage:\n\n.. code-block:: python\n\nimport salt.utils.win_lgpo_auditpol\n\n# Get current state of all audit settings\nsalt.utils.win_lgpo_auditpol.get_settings()\n\n# Get the current state of all audit settings in the \"Account Logon\"\n# category\nsalt.utils.win_lgpo_auditpol.get_settings(category=\"Account Logon\")", "source": "codesearchnet"}
{"code": "def register_filter(self, filter_name, filter_ref, force=False):\n        \n        if not force and (filter_name in self.filters_list()):\n            self.log_warning(\"Extension %s already exist, ignore redefinition.\" % ext_in)\n            return\n\n        self.__jinja2_environment.filters[filter_name] = filter_ref", "docstring": "Add/register one filter.\n\nArgs:\nfilter_name (str): Filter name used inside :program:`Jinja2` tags.\nfilter_ref: Reference to the filter itself, i.e. the corresponding :program:`Python` function.\nforce (bool): If set to ``True``, forces the registration of a filter no matter if it already exists or not.\n\nNote:\nThe list of user added/registered filters can be retrieve with :mth:`registered_filters_list`", "source": "juraj-google-style"}
{"code": "def __init__(self, encoding_method=None, parent=None, **kwargs):\n    \n    if not encoding_method or not parent:\n      raise ValueError('Missing encoding method or parent value.')\n\n    super(EncodedStreamPathSpec, self).__init__(parent=parent, **kwargs)\n    self.encoding_method = encoding_method", "docstring": "Initializes a path specification.\n\nNote that the encoded stream path specification must have a parent.\n\nArgs:\nencoding_method (Optional[str]): method used to the encode the data.\nparent (Optional[PathSpec]): parent path specification.\n\nRaises:\nValueError: when encoding method or parent are not set.", "source": "juraj-google-style"}
{"code": "def __tf_tensor__(self, dtype=None, name=None):\n    pass", "docstring": "Converts this object to a Tensor.\n\nArgs:\ndtype: data type for the returned Tensor\nname: a name for the operations which create the Tensor\nReturns:\nA Tensor.", "source": "github-repos"}
{"code": "def add_header(self, key, value, **params):\n        \n\n        key = self.escape(key)\n        ci_key = key.casefold()\n\n        def quoted_params(items):\n            for p in items:\n                param_name = self.escape(p[0])\n                param_val = self.de_quote(self.escape(p[1]))\n                yield param_name, param_val\n\n        sorted_items = sorted(params.items())\n\n        quoted_iter = ('%s=\"%s\"' % p for p in quoted_params(sorted_items))\n        param_str = ' '.join(quoted_iter)\n\n        if param_str:\n            value = \"%s; %s\" % (value, param_str)\n\n        self._header_data[ci_key] = (key, value)", "docstring": "Add a header to the collection, including potential parameters.\n\nArgs:\nkey (str): The name of the header\nvalue (str): The value to store under that key\nparams: Option parameters to be appended to the value,\nautomatically formatting them in a standard way", "source": "juraj-google-style"}
{"code": "def split_recursive(self, depth: int, min_width: int, min_height: int, max_horizontal_ratio: float, max_vertical_ratio: float, seed: Optional[tcod.random.Random]=None) -> None:\n    cdata = self._as_cdata()\n    lib.TCOD_bsp_split_recursive(cdata, (seed or ffi.NULL), depth, min_width, min_height, max_horizontal_ratio, max_vertical_ratio)\n    self._unpack_bsp_tree(cdata)", "docstring": "Divide this partition recursively.\n\nArgs:\ndepth (int): The maximum depth to divide this object recursively.\nmin_width (int): The minimum width of any individual partition.\nmin_height (int): The minimum height of any individual partition.\nmax_horizontal_ratio (float):\nPrevent creating a horizontal ratio more extreme than this.\nmax_vertical_ratio (float):\nPrevent creating a vertical ratio more extreme than this.\nseed (Optional[tcod.random.Random]):\nThe random number generator to use.", "source": "codesearchnet"}
{"code": "def get_latest_package(name, range_=None, paths=None, error=False):\n    \n    it = iter_packages(name, range_=range_, paths=paths)\n    try:\n        return max(it, key=lambda x: x.version)\n    except ValueError:  \n        if error:\n            \n            \n            raise PackageFamilyNotFoundError(\"No such package family %r\" % name)\n        return None", "docstring": "Get the latest package for a given package name.\n\nArgs:\nname (str): Package name.\nrange_ (`VersionRange`): Version range to search within.\npaths (list of str, optional): paths to search for package families,\ndefaults to `config.packages_path`.\nerror (bool): If True, raise an error if no package is found.\n\nReturns:\n`Package` object, or None if no package is found.", "source": "juraj-google-style"}
{"code": "def instrument(self, package, options=None, runner=None, handler=None) -> bytes:\n    if runner is None:\n        runner = DEFAULT_INSTRUMENTATION_RUNNER\n    if options is None:\n        options = {}\n    options_list = []\n    for option_key, option_value in options.items():\n        options_list.append('-e %s %s' % (option_key, option_value))\n    options_string = ' '.join(options_list)\n    instrumentation_command = 'am instrument -r -w %s %s/%s' % (options_string, package, runner)\n    logging.info('AndroidDevice|%s: Executing adb shell %s', self.serial, instrumentation_command)\n    if handler is None:\n        return self._exec_adb_cmd('shell', instrumentation_command, shell=False, timeout=None, stderr=None)\n    else:\n        return self._execute_adb_and_process_stdout('shell', instrumentation_command, shell=False, handler=handler)", "docstring": "Runs an instrumentation command on the device.\n\nThis is a convenience wrapper to avoid parameter formatting.\n\nExample:\n\n.. code-block:: python\n\ndevice.instrument(\n'com.my.package.test',\noptions = {\n'class': 'com.my.package.test.TestSuite',\n},\n)\n\nArgs:\npackage: string, the package of the instrumentation tests.\noptions: dict, the instrumentation options including the test\nclass.\nrunner: string, the test runner name, which defaults to\nDEFAULT_INSTRUMENTATION_RUNNER.\nhandler: optional func, when specified the function is used to parse\nthe instrumentation stdout line by line as the output is\ngenerated; otherwise, the stdout is simply returned once the\ninstrumentation is finished.\n\nReturns:\nThe stdout of instrumentation command or the stderr if the handler\nis set.", "source": "github-repos"}
{"code": "def add_residues_highlight_to_nglview(view, structure_resnums, chain, res_color='red'):\n    chain = ssbio.utils.force_list(chain)\n    if isinstance(structure_resnums, list):\n        structure_resnums = list(set(structure_resnums))\n    elif isinstance(structure_resnums, int):\n        structure_resnums = ssbio.utils.force_list(structure_resnums)\n    else:\n        raise ValueError('Input must either be a residue number of a list of residue numbers')\n    to_show_chains = '( '\n    for c in chain:\n        to_show_chains += ':{} or'.format(c)\n    to_show_chains = to_show_chains.strip(' or ')\n    to_show_chains += ' )'\n    to_show_res = '( '\n    for m in structure_resnums:\n        to_show_res += '{} or '.format(m)\n    to_show_res = to_show_res.strip(' or ')\n    to_show_res += ' )'\n    log.info('Selection: {} and not hydrogen and {}'.format(to_show_chains, to_show_res))\n    view.add_ball_and_stick(selection='{} and not hydrogen and {}'.format(to_show_chains, to_show_res), color=res_color)", "docstring": "Add a residue number or numbers to an NGLWidget view object.\n\nArgs:\nview (NGLWidget): NGLWidget view object\nstructure_resnums (int, list): Residue number(s) to highlight, structure numbering\nchain (str, list): Chain ID or IDs of which residues are a part of. If not provided, all chains in the\nmapped_chains attribute will be used. If that is also empty, and exception is raised.\nres_color (str): Color to highlight residues with", "source": "codesearchnet"}
{"code": "def stage(self, name, pipeline_counter=None):\n        \n        return Stage(\n            self.server,\n            pipeline_name=self.name,\n            stage_name=name,\n            pipeline_counter=pipeline_counter,\n        )", "docstring": "Helper to instantiate a :class:`gocd.api.stage.Stage` object\n\nArgs:\nname: The name of the stage\npipeline_counter:\n\nReturns:", "source": "juraj-google-style"}
{"code": "def create_customer(self, *, full_name, email):\n    payload = {'fullName': full_name, 'email': email}\n    return self.client._post((self.url + 'customers'), json=payload, headers=self.get_headers())", "docstring": "Creation of a customer in the system.\n\nArgs:\nfull_name: Customer's complete name.\nAlphanumeric. Max: 255.\n\nemail: Customer's email address.\nAlphanumeric. Max: 255.\n\nReturns:", "source": "codesearchnet"}
{"code": "def html_for_cgi_argument(argument, form):\n    value = (form[argument].value if (argument in form) else None)\n    return KEY_VALUE_TEMPLATE.format(argument, value)", "docstring": "Returns an HTML snippet for a CGI argument.\n\nArgs:\nargument: A string representing an CGI argument name in a form.\nform: A CGI FieldStorage object.\n\nReturns:\nString HTML representing the CGI value and variable.", "source": "codesearchnet"}
{"code": "def get_jwt_key_data():\n    global __jwt_data\n    if __jwt_data:\n        return __jwt_data\n    from cloud_inquisitor import config_path\n    from cloud_inquisitor.config import dbconfig\n    jwt_key_file = dbconfig.get('jwt_key_file_path', default='ssl/private.key')\n    if (not os.path.isabs(jwt_key_file)):\n        jwt_key_file = os.path.join(config_path, jwt_key_file)\n    with open(os.path.join(jwt_key_file), 'r') as f:\n        __jwt_data = f.read()\n    return __jwt_data", "docstring": "Returns the data for the JWT private key used for encrypting the user login token as a string object\n\nReturns:\n`str`", "source": "codesearchnet"}
{"code": "async def _perform_ping_timeout(self, delay: int):\n    (await sleep(delay))\n    error = TimeoutError('Ping timeout: no data received from server in {timeout} seconds.'.format(timeout=self.PING_TIMEOUT))\n    (await self.on_data_error(error))", "docstring": "Handle timeout gracefully.\n\nArgs:\ndelay (int): delay before raising the timeout (in seconds)", "source": "codesearchnet"}
{"code": "def event(self, name, **kwargs):\n    group_obj = Event(name, **kwargs)\n    return self._group(group_obj)", "docstring": "Add Event data to Batch object.\n\nArgs:\nname (str): The name for this Group.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nevent_date (str, kwargs): The event datetime expression for this Group.\nstatus (str, kwargs): The status for this Group.\nxid (str, kwargs): The external id for this Group.\n\nReturns:\nobj: An instance of Event.", "source": "codesearchnet"}
{"code": "def __init__(self, mac_addr):\n    \n    addr_info = mac_addr.lower().split(':')\n    if len(addr_info) < 6:\n      raise ValueError('Invalid mac address')\n\n    addr_info[2] = 'EtherSync'\n    self._addr = ''.join(addr_info[2:])", "docstring": "Construct a EtherSync object.\n\nArgs:\nmac_addr: mac address of the Cambrionix unit for EtherSync.", "source": "juraj-google-style"}
{"code": "def assignSchedule(self, schedule, period, hour, minute, tariff):\n        \n        if ((schedule not in range(Extents.Schedules)) or\n                (period not in range(Extents.Tariffs)) or\n                (hour < 0) or (hour > 23) or (minute < 0) or\n                (minute > 59) or (tariff < 0)):\n            ekm_log(\"Out of bounds in Schedule_\" + str(schedule + 1))\n            return False\n\n        period += 1\n        idx_min = \"Min_\" + str(period)\n        idx_hour = \"Hour_\" + str(period)\n        idx_rate = \"Tariff_\" + str(period)\n        if idx_min not in self.m_schedule_params:\n            ekm_log(\"Incorrect index: \" + idx_min)\n            return False\n        if idx_hour not in self.m_schedule_params:\n            ekm_log(\"Incorrect index: \" + idx_hour)\n            return False\n        if idx_rate not in self.m_schedule_params:\n            ekm_log(\"Incorrect index: \" + idx_rate)\n            return False\n\n        self.m_schedule_params[idx_rate] = tariff\n        self.m_schedule_params[idx_hour] = hour\n        self.m_schedule_params[idx_min] = minute\n        self.m_schedule_params['Schedule'] = schedule\n        return True", "docstring": "Assign one schedule tariff period to meter bufffer.\n\nArgs:\nschedule (int): A :class:`~ekmmeters.Schedules` value or in range(Extents.Schedules).\ntariff (int): :class:`~ekmmeters.Tariffs` value or in range(Extents.Tariffs).\nhour (int): Hour from 0-23.\nminute (int): Minute from 0-59.\ntariff (int): Rate value.\n\nReturns:\nbool: True on completed assignment.", "source": "juraj-google-style"}
{"code": "def __init__(self, url):\n        \n        \n        if isinstance(url, Uri):\n            self.uri = url\n        else:\n            self.uri = Uri(url)", "docstring": "Connect to an assembly that points to the assembly specified with the url.\n\nArgs:\n- url (str): The url of the onshape item", "source": "juraj-google-style"}
{"code": "def add(name, beacon_data, **kwargs):\n    ret = {'comment': 'Failed to add beacon {0}.'.format(name), 'result': False}\n    if (name in list_(return_yaml=False, **kwargs)):\n        ret['comment'] = 'Beacon {0} is already configured.'.format(name)\n        return ret\n    if any((('beacon_module' in key) for key in beacon_data)):\n        res = next((value for value in beacon_data if ('beacon_module' in value)))\n        beacon_name = res['beacon_module']\n    else:\n        beacon_name = name\n    if (beacon_name not in list_available(return_yaml=False, **kwargs)):\n        ret['comment'] = 'Beacon \"{0}\" is not available.'.format(beacon_name)\n        return ret\n    if (('test' in kwargs) and kwargs['test']):\n        ret['result'] = True\n        ret['comment'] = 'Beacon: {0} would be added.'.format(name)\n    else:\n        try:\n            eventer = salt.utils.event.get_event('minion', opts=__opts__)\n            res = __salt__['event.fire']({'name': name, 'beacon_data': beacon_data, 'func': 'validate_beacon'}, 'manage_beacons')\n            if res:\n                event_ret = eventer.get_event(tag='/salt/minion/minion_beacon_validation_complete', wait=kwargs.get('timeout', 30))\n                valid = event_ret['valid']\n                vcomment = event_ret['vcomment']\n            if (not valid):\n                ret['result'] = False\n                ret['comment'] = 'Beacon {0} configuration invalid, not adding.\\n{1}'.format(name, vcomment)\n                return ret\n        except KeyError:\n            ret['result'] = False\n            ret['comment'] = 'Event module not available. Beacon add failed.'\n            return ret\n        try:\n            res = __salt__['event.fire']({'name': name, 'beacon_data': beacon_data, 'func': 'add'}, 'manage_beacons')\n            if res:\n                event_ret = eventer.get_event(tag='/salt/minion/minion_beacon_add_complete', wait=kwargs.get('timeout', 30))\n                if (event_ret and event_ret['complete']):\n                    beacons = event_ret['beacons']\n                    if ((name in beacons) and (beacons[name] == beacon_data)):\n                        ret['result'] = True\n                        ret['comment'] = 'Added beacon: {0}.'.format(name)\n                elif event_ret:\n                    ret['result'] = False\n                    ret['comment'] = event_ret['comment']\n                else:\n                    ret['result'] = False\n                    ret['comment'] = 'Did not receive the manage event before the timeout of {0}s'.format(kwargs.get('timeout', 30))\n                return ret\n        except KeyError:\n            ret['result'] = False\n            ret['comment'] = 'Event module not available. Beacon add failed.'\n    return ret", "docstring": "Add a beacon on the minion\n\nArgs:\n\nname (str):\nName of the beacon to configure\n\nbeacon_data (dict):\nDictionary or list containing configuration for beacon.\n\nReturns:\ndict: Boolean and status message on success or failure of add.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' beacons.add ps \"[{'processes': {'salt-master': 'stopped', 'apache2': 'stopped'}}]\"", "source": "codesearchnet"}
{"code": "def _get_other_names(self, line):\n    m = re.search(self.compound_regex['other_names'][0], line, re.IGNORECASE)\n    if m:\n        self.other_names.append(m.group(1).strip())", "docstring": "Parse and extract any other names that might be recorded for the compound\n\nArgs:\nline (str): line of the msp file", "source": "codesearchnet"}
{"code": "def build_backend(self, backend_node):\n        \n        proxy_name = backend_node.backend_header.proxy_name.text\n        config_block_lines = self.__build_config_block(\n            backend_node.config_block)\n        return config.Backend(name=proxy_name, config_block=config_block_lines)", "docstring": "parse `backend` sections\n\nArgs:\nbackend_node (TreeNode): Description\n\nReturns:\nconfig.Backend: an object", "source": "juraj-google-style"}
{"code": "def _FormatTag(self, event):\n    \n    tag = getattr(event, 'tag', None)\n\n    if not tag:\n      return '-'\n\n    return ' '.join(tag.labels)", "docstring": "Formats the event tag.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: event tag field.", "source": "juraj-google-style"}
{"code": "def scroll(self, direction='vertical', percent=0.6, duration=2.0):\n        \n\n        if direction not in ('vertical', 'horizontal'):\n            raise ValueError('Argument `direction` should be one of \"vertical\" or \"horizontal\". Got {}'\n                             .format(repr(direction)))\n\n        start = [0.5, 0.5]\n        half_distance = percent / 2\n        if direction == 'vertical':\n            start[1] += half_distance\n            direction = [0, -percent]\n        else:\n            start[0] += half_distance\n            direction = [-percent, 0]\n\n        return self.swipe(start, direction=direction, duration=duration)", "docstring": "Scroll from the lower part to the upper part of the entire screen.\n\nArgs:\ndirection (:py:obj:`str`): scrolling direction. \"vertical\" or \"horizontal\"\npercent (:py:obj:`float`): scrolling distance percentage of the entire screen height or width according to\ndirection\nduration (:py:obj:`float`): time interval in which the action is performed", "source": "juraj-google-style"}
{"code": "def __init__(self, path, **kwargs):\n        \n        self.error_context = kwargs.pop('error_context', None)\n        self.error_context = self.error_context or StatikErrorContext()\n\n        if 'config' in kwargs and isinstance(kwargs['config'], dict):\n            logger.debug(\"Loading project configuration from constructor arguments\")\n            self.config = kwargs['config']\n        else:\n            self.config = None\n\n        self.safe_mode = kwargs.pop('safe_mode', False)\n\n        self.path, self.config_file_path = get_project_config_file(path, StatikProject.CONFIG_FILE)\n        if (self.path is None or self.config_file_path is None) and self.config is None:\n            raise MissingProjectConfig(context=self.error_context)\n\n        logger.debug(\"Project path configured as: %s\", self.path)\n\n        self.models = {}\n        self.template_engine = None\n        self.views = {}\n        self.db = None\n        self.project_context = None", "docstring": "Constructor.\n\nArgs:\npath: The full filesystem path to the base of the project.", "source": "juraj-google-style"}
{"code": "def licenses(self):\n        \n        buf_size = self.MAX_BUF_SIZE\n        buf = (ctypes.c_char * buf_size)()\n        res = self._dll.JLINK_GetAvailableLicense(buf, buf_size)\n        if res < 0:\n            raise errors.JLinkException(res)\n        return ctypes.string_at(buf).decode()", "docstring": "Returns a string of the built-in licenses the J-Link has.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nString of the contents of the built-in licenses the J-Link has.", "source": "juraj-google-style"}
{"code": "def AddVSSProcessingOptions(self, argument_group):\n    argument_group.add_argument('--no_vss', '--no-vss', dest='no_vss', action='store_true', default=False, help='Do not scan for Volume Shadow Snapshots (VSS). This means that Volume Shadow Snapshots (VSS) are not processed.')\n    argument_group.add_argument('--vss_only', '--vss-only', dest='vss_only', action='store_true', default=False, help='Do not process the current volume if Volume Shadow Snapshots (VSS) have been selected.')\n    argument_group.add_argument('--vss_stores', '--vss-stores', dest='vss_stores', action='store', type=str, default=None, help='Define Volume Shadow Snapshots (VSS) (or stores that need to be processed. A range of stores can be defined as: \"3..5\". Multiple stores can be defined as: \"1,3,5\" (a list of comma separated values). Ranges and lists can also be combined as: \"1,3..5\". The first store is 1. All stores can be defined as: \"all\".')", "docstring": "Adds the VSS processing options to the argument group.\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "codesearchnet"}
{"code": "def save(self, vleaf, fpath, cleanup=False, format=None):\n        \n        graph = self.create_graphviz_digraph(vleaf, format=format)\n        graph.render(fpath, cleanup=cleanup)", "docstring": "Save the graph to a given file path.\n\nArgs:\nvleaf (`nnabla.Variable`): End variable. All variables and functions which can be traversed from this variable are shown in the reuslt.\nfpath (`str`): The file path used to save.\ncleanup (`bool`): Clean up the source file after rendering. Default is False.\nformat (str):\nForce overwrite ``format`` (``'pdf', 'png', ...)``) configuration.", "source": "juraj-google-style"}
{"code": "def _CheckIsDirectory(self, file_entry):\n    if (definitions.FILE_ENTRY_TYPE_DIRECTORY not in self._file_entry_types):\n        return False\n    return file_entry.IsDirectory()", "docstring": "Checks the is_directory find specification.\n\nArgs:\nfile_entry (FileEntry): file entry.\n\nReturns:\nbool: True if the file entry matches the find specification, False if not.", "source": "codesearchnet"}
{"code": "def convert_to_python_types(args):\n    if isinstance(args, dict):\n        return {k: convert_to_python_type(v) for k, v in args.items()}\n    else:\n        return [convert_to_python_type(v) for v in args]", "docstring": "Convert the given list or dictionary of args to python types.\n\nArgs:\nargs: Either an iterable of types, or a dictionary where the values are\ntypes.\n\nReturns:\nIf given an iterable, a list of converted types. If given a dictionary,\na dictionary with the same keys, and values which have been converted.", "source": "github-repos"}
{"code": "def copy_rec(source, dest):\n    \n\n    if os.path.isdir(source):\n        for child in os.listdir(source):\n            new_dest = os.path.join(dest, child)\n            os.makedirs(new_dest, exist_ok=True)\n            copy_rec(os.path.join(source, child), new_dest)\n\n    elif os.path.isfile(source):\n        logging.info(' Copy \"{}\" to \"{}\"'.format(source, dest))\n        shutil.copy(source, dest)\n\n    else:\n        logging.info(' Ignoring \"{}\"'.format(source))", "docstring": "Copy files between diferent directories.\n\nCopy one or more files to an existing directory. This function is\nrecursive, if the source is a directory, all its subdirectories are created\nin the destination. Existing files in destination are overwrited without\nany warning.\n\nArgs:\nsource (str): File or directory name.\ndest (str): Directory name.\n\nRaises:\nFileNotFoundError: Destination directory doesn't exist.", "source": "juraj-google-style"}
{"code": "def maximum(x1, x2):\n    if any_symbolic_tensors((x1, x2)):\n        return Maximum().symbolic_call(x1, x2)\n    return backend.numpy.maximum(x1, x2)", "docstring": "Element-wise maximum of `x1` and `x2`.\n\nArgs:\nx1: First tensor.\nx2: Second tensor.\n\nReturns:\nOutput tensor, element-wise maximum of `x1` and `x2`.", "source": "github-repos"}
{"code": "def count(self, val=True):\n    return sum((elem.count(val) for elem in self._iter_components()))", "docstring": "Get the number of bits in the array with the specified value.\n\nArgs:\nval: A boolean value to check against the array's value.\n\nReturns:\nAn integer of the number of bits in the array equal to val.", "source": "codesearchnet"}
{"code": "def auth_middleware(policy):\n    \n    assert isinstance(policy, AbstractAuthentication)\n\n    async def _auth_middleware_factory(app, handler):\n\n        async def _middleware_handler(request):\n            \n            request[POLICY_KEY] = policy\n\n            \n            response = await handler(request)\n\n            \n            await policy.process_response(request, response)\n\n            return response\n\n        return _middleware_handler\n\n    return _auth_middleware_factory", "docstring": "Returns a aiohttp_auth middleware factory for use by the aiohttp\napplication object.\n\nArgs:\npolicy: A authentication policy with a base class of\nAbstractAuthentication.", "source": "juraj-google-style"}
{"code": "def get_package_hashes(filename):\n    log.debug('Getting package hashes')\n    filename = os.path.abspath(filename)\n    with open(filename, 'rb') as f:\n        data = f.read()\n    _hash = hashlib.sha256(data).hexdigest()\n    log.debug('Hash for file %s: %s', filename, _hash)\n    return _hash", "docstring": "Provides hash of given filename.\n\nArgs:\n\nfilename (str): Name of file to hash\n\nReturns:\n\n(str): sha256 hash", "source": "codesearchnet"}
{"code": "def _prepare_headers(self, additional_headers=None, **kwargs):\n        \n        user_agent = \"pyseaweed/{version}\".format(version=__version__)\n        headers = {\"User-Agent\": user_agent}\n        if additional_headers is not None:\n            headers.update(additional_headers)\n        return headers", "docstring": "Prepare headers for http communication.\n\nReturn dict of header to be used in requests.\n\nArgs:\n.. versionadded:: 0.3.2\n**additional_headers**: (optional) Additional headers\nto be used with request\n\nReturns:\nHeaders dict. Key and values are string", "source": "juraj-google-style"}
{"code": "def _url_format(self, service):\n    base_service_url = '{base}{service}'.format(base=self.urlbase, service=service)\n    return base_service_url", "docstring": "Generate URL from urlbase and service.\n\nArgs:\nservice (str): The endpoint service to use, i.e. gradebook\nReturns:\nstr: URL to where the request should be made", "source": "codesearchnet"}
{"code": "def _get_non_space_email(self, doc) -> List:\n        \n        result_lst = []\n        for e in doc:\n            if \"mail:\" in e.text.lower():\n                idx = e.text.lower().index(\"mail:\") + 5\n                value = e.text[idx:]\n                tmp_doc = self._nlp(value)\n                tmp_email_matches = self._like_email_matcher(tmp_doc)\n                for match_id, start, end in tmp_email_matches:\n                    span = tmp_doc[start:end]\n                    if self._check_domain(self._tokenizer.tokenize(span.text)):\n                        result_lst.append((span.text, idx+e.idx, idx+e.idx+len(value)))\n\n        return result_lst", "docstring": "Deal with corner case that there is \"email\" string in text and no space around it\nArgs:\ndoc: List[Token]\n\nReturns: Bool", "source": "juraj-google-style"}
{"code": "def _page_streamable(page_descriptor):\n    \n\n    def inner(a_func, settings, request, **kwargs):\n        \n        page_iterator = gax.PageIterator(\n            a_func, page_descriptor, settings.page_token, request, **kwargs)\n        if settings.flatten_pages:\n            return gax.ResourceIterator(page_iterator)\n        else:\n            return page_iterator\n\n    return inner", "docstring": "Creates a function that yields an iterable to performs page-streaming.\n\nArgs:\npage_descriptor (:class:`PageDescriptor`): indicates the structure\nof page streaming to be performed.\n\nReturns:\nCallable: A function that returns an iterator.", "source": "juraj-google-style"}
{"code": "def replace_list(items, match, replacement):\n    \n    return [replace(item, match, replacement) for item in items]", "docstring": "Replaces occurrences of a match string in a given list of strings and returns\na list of new strings. The match string can be a regex expression.\n\nArgs:\nitems (list):       the list of strings to modify.\nmatch (str):        the search expression.\nreplacement (str):  the string to replace with.", "source": "juraj-google-style"}
{"code": "def search_users(self, user):\n    user_url = ('%s/%s/%s' % (self.url, 'user', user))\n    response = self.jss.get(user_url)\n    return LDAPUsersResults(self.jss, response)", "docstring": "Search for LDAP users.\n\nArgs:\nuser: User to search for. It is not entirely clear how the\nJSS determines the results- are regexes allowed, or\nglobbing?\n\nReturns:\nLDAPUsersResult object.\n\nRaises:\nWill raise a JSSGetError if no results are found.", "source": "codesearchnet"}
{"code": "def initialize_resources(resource_list, name='init'):\n    if resource_list:\n        return control_flow_ops.group(*[r.create for r in resource_list], name=name)\n    return control_flow_ops.no_op(name=name)", "docstring": "Initializes the resources in the given list.\n\nArgs:\nresource_list: list of resources to initialize.\nname: name of the initialization op.\n\nReturns:\nop responsible for initializing all resources.", "source": "github-repos"}
{"code": "def load_dictionary(self, filename, encoding=\"utf-8\"):\n        \n        with load_file(filename, encoding) as data:\n            self._dictionary.update(json.loads(data.lower(), encoding=encoding))\n            self._update_dictionary()", "docstring": "Load in a pre-built word frequency list\n\nArgs:\nfilename (str): The filepath to the json (optionally gzipped) \\\nfile to be loaded\nencoding (str): The encoding of the dictionary", "source": "juraj-google-style"}
{"code": "def testNoopElimination(self, init_dataset_fn, transformation, expected_name):\n    dataset = init_dataset_fn()\n    if expected_name:\n        dataset = dataset.apply(testing.assert_next([expected_name, 'FiniteTake']))\n    else:\n        dataset = dataset.apply(testing.assert_next(['FiniteTake']))\n    dataset = dataset.apply(transformation)\n    dataset = dataset.take(1)\n    options = options_lib.Options()\n    options.experimental_optimization.apply_default_optimizations = False\n    options.experimental_optimization.noop_elimination = True\n    dataset = dataset.with_options(options)\n    get_next = self.getNext(dataset)\n    self.evaluate(get_next())", "docstring": "Runs a noop elimination test case.\n\nArgs:\ninit_dataset_fn: Function to create the initial dataset\ntransformation: Transformation to apply\nexpected_name: Name of the transformation if it is not eliminated", "source": "github-repos"}
{"code": "def deref(value: base.Symbolic, recursive: bool=False) -> Any:\n    if isinstance(value, Ref):\n        value = value.value\n    if recursive:\n\n        def _deref(k, v, p):\n            del k, p\n            if isinstance(v, Ref):\n                return deref(v.value, recursive=True)\n            return v\n        return value.rebind(_deref, raise_on_no_change=False)\n    return value", "docstring": "Dereferences a symbolic value that may contain pg.Ref.\n\nArgs:\nvalue: The input symbolic value.\nrecursive: If True, dereference `pg.Ref` in the entire tree. Otherwise\nOnly dereference the root node.\n\nReturns:\nThe dereferenced root, or dereferenced tree if recursive is True.", "source": "github-repos"}
{"code": "def sample_rate(self, value):\n        \n        if value == self._defaults['sampleRate'] and 'sampleRate' in self._values:\n            del self._values['sampleRate']\n        else:\n            self._values['sampleRate'] = value", "docstring": "The sample_rate property.\n\nArgs:\nvalue (float). the property value.", "source": "juraj-google-style"}
{"code": "def _create_field(self, uri , name, field_type, **kwargs):\n\t\t\n\t\t\n\t\tif not (name and (field_type in ['TEXT_INPUT', 'DATE', 'PERSON'])):\n\t\t\treturn requests.codes.bad_request, {'success' : 'False', \n\t\t\t\t\t\t\t\t\t\t\t\t'error': 'name needs to be provided and field_type needs to be \\'TEXT_INPUT\\', \\'DATE\\' or \\'PERSON\\''}\n\n\t\tkwargs.update({'name':name, 'type':field_type})\n\n\t\tnew_box = StreakField(**kwargs)\n\t\t\n\t\t\n\t\t\n\t\tcode, data = self._req('put', uri, new_box.to_dict(rw = True))\n\t\t\n\t\treturn code, data", "docstring": "Creates a field with the provided attributes.\nArgs:\nuri\t\tbase uri for the field (pipeline or box uri)\nname\trequired name string\nfield_type\trequired type string [TEXT_INPUT, DATE or PERSON]\nkwargs\t{}\nreturn\t(status code, field dict)", "source": "juraj-google-style"}
{"code": "def get_value_by_xy(self, x, y):\n        \n        if x < self.xMin or x > self.xMax or y < self.yMin or y > self.yMax:\n            return None\n            \n        else:\n            row = self.nRows - int(numpy.ceil((y - self.yMin) / self.dx))\n            col = int(numpy.floor((x - self.xMin) / self.dx))\n            value = self.data[row][col]\n            if value == self.noDataValue:\n                return None\n            else:\n                return value", "docstring": "Get raster value by xy coordinates.\n\nArgs:\nx: X Coordinate.\ny: Y Coordinate.\n\nReturns:\nraster value, None if the input are invalid.", "source": "juraj-google-style"}
{"code": "def setZeroResettableKWH(self, password=\"00000000\"):\n        \n        result = False\n        self.setContext(\"setZeroResettableKWH\")\n        try:\n            if not self.requestA():\n                self.writeCmdMsg(\"Bad read CRC on setting\")\n            else:\n                if not self.serialCmdPwdAuth(password):\n                    self.writeCmdMsg(\"Password failure\")\n                else:\n                    req_str = \"0157310230304433282903\"\n                    req_str += self.calc_crc16(req_str[2:].decode(\"hex\"))\n                    self.m_serial_port.write(req_str.decode(\"hex\"))\n                    if self.m_serial_port.getResponse(self.getContext()).encode(\"hex\") == \"06\":\n                        self.writeCmdMsg(\"Success: 06 returned.\")\n                        result = True\n            self.serialPostEnd()\n        except:\n            ekm_log(traceback.format_exc(sys.exc_info()))\n\n        self.setContext(\"\")\n        return result", "docstring": "Serial call to zero resettable kWh registers.\n\nArgs:\npassword (str): Optional password.\n\nReturns:\nbool: True on completion and ACK.", "source": "juraj-google-style"}
{"code": "def __init__(self, endpoint_name, sagemaker_session=None):\n        \n        super(TensorFlowPredictor, self).__init__(endpoint_name, sagemaker_session, tf_json_serializer,\n                                                  tf_json_deserializer)", "docstring": "Initialize an ``TensorFlowPredictor``.\n\nArgs:\nendpoint_name (str): The name of the endpoint to perform inference on.\nsagemaker_session (sagemaker.session.Session): Session object which manages interactions with\nAmazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one\nusing the default AWS configuration chain.", "source": "juraj-google-style"}
{"code": "def get_vep_info(vep_string, vep_header):\n    vep_annotations = [dict(zip(vep_header, vep_annotation.split('|'))) for vep_annotation in vep_string.split(',')]\n    return vep_annotations", "docstring": "Make the vep annotations into a dictionaries\n\nA vep dictionary will have the vep column names as keys and\nthe vep annotations as values.\nThe dictionaries are stored in a list\n\nArgs:\nvep_string (string): A string with the CSQ annotation\nvep_header (list): A list with the vep header\n\nReturn:\nvep_annotations (list): A list of vep dicts", "source": "codesearchnet"}
{"code": "def _rowwise_unsorted_segment_sum(values, indices, n):\n    (batch, k) = tf.unstack(tf.shape(indices), num=2)\n    indices_flat = (tf.reshape(indices, [(- 1)]) + (tf.div(tf.range((batch * k)), k) * n))\n    ret_flat = tf.unsorted_segment_sum(tf.reshape(values, [(- 1)]), indices_flat, (batch * n))\n    return tf.reshape(ret_flat, [batch, n])", "docstring": "UnsortedSegmentSum on each row.\n\nArgs:\nvalues: a `Tensor` with shape `[batch_size, k]`.\nindices: an integer `Tensor` with shape `[batch_size, k]`.\nn: an integer.\nReturns:\nA `Tensor` with the same type as `values` and shape `[batch_size, n]`.", "source": "codesearchnet"}
{"code": "def aggregate_and_return_name_for_input(self, out_graphdef):\n    del out_graphdef\n    raise RuntimeError('Unimplemented abstract method.')", "docstring": "This adds the node(s) to out_graphdef and returns the input node name.\n\nArgs:\nout_graphdef: A graphdef that is ready to have this input added.\n\nReturns:\nThe output that the stub should use as an input for this operand.\n\nRaises:\nRuntimeError: if the method is not implemented.", "source": "github-repos"}
{"code": "def read(self, index, name=None):\n    return self._implementation.read(index, name=name)", "docstring": "Read the value at location `index` in the TensorArray.\n\nArgs:\nindex: 0-D.  int32 tensor with the index to read from.\nname: A name for the operation (optional).\n\nReturns:\nThe tensor at index `index`.", "source": "github-repos"}
{"code": "def parse_keys(self, sn: \"DataNode\") -> Dict[InstanceName, ScalarValue]:\n        \n        res = {}\n        for k in self.keys:\n            knod = sn.get_data_child(*k)\n            if knod is None:\n                raise NonexistentSchemaNode(sn.qual_name, *k)\n            kval = knod.type.parse_value(self.keys[k])\n            if kval is None:\n                raise InvalidKeyValue(self.keys[k])\n            res[knod.iname()] = kval\n        return res", "docstring": "Parse key dictionary in the context of a schema node.\n\nArgs:\nsn: Schema node corresponding to a list.", "source": "juraj-google-style"}
{"code": "def render_pipeline_graph(self, pipeline_graph: 'PipelineGraph') -> str:\n    raise NotImplementedError", "docstring": "Renders the pipeline graph in HTML-compatible format.\n\nArgs:\npipeline_graph: (pipeline_graph.PipelineGraph) the graph to be rendererd.\n\nReturns:\nunicode, str or bytes that can be expressed as HTML.", "source": "github-repos"}
{"code": "def commutes(\n        m1: np.ndarray,\n        m2: np.ndarray,\n        *,\n        rtol: float = 1e-5,\n        atol: float = 1e-8) -> bool:\n    \n    return (m1.shape[0] == m1.shape[1] and\n            m1.shape == m2.shape and\n            np.allclose(m1.dot(m2), m2.dot(m1), rtol=rtol, atol=atol))", "docstring": "Determines if two matrices approximately commute.\n\nTwo matrices A and B commute if they are square and have the same size and\nAB = BA.\n\nArgs:\nm1: One of the matrices.\nm2: The other matrix.\nrtol: The per-matrix-entry relative tolerance on equality.\natol: The per-matrix-entry absolute tolerance on equality.\n\nReturns:\nWhether the two matrices have compatible sizes and a commutator equal\nto zero within tolerance.", "source": "juraj-google-style"}
{"code": "def decode_jpeg(image_buffer, scope=None):\n    with tf.name_scope(values=[image_buffer], name=scope, default_name='decode_jpeg'):\n        image = tf.image.decode_jpeg(image_buffer, channels=3)\n        image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n        return image", "docstring": "Decode a JPEG string into one 3-D float image Tensor.\n\nArgs:\nimage_buffer: scalar string Tensor.\nscope: Optional scope for name_scope.\nReturns:\n3-D float Tensor with values ranging from [0, 1).", "source": "codesearchnet"}
{"code": "def score_one(self, x: beam.Row) -> Optional[float]:\n    raise NotImplementedError", "docstring": "Scores a single data instance for anomalies.\n\nArgs:\nx: A `beam.Row` representing the data instance.\n\nReturns:\nThe outlier score as a float. None if an exception occurs during scoring,\nand NaN if the model is not ready.", "source": "github-repos"}
{"code": "def reverse_bettertransformer(self):\n    if not is_optimum_available():\n        raise ImportError('The package `optimum` is required to use Better Transformer.')\n    from optimum.version import __version__ as optimum_version\n    if version.parse(optimum_version) < version.parse('1.7.0'):\n        raise ImportError(f'Please install optimum>=1.7.0 to use Better Transformer. The version {optimum_version} was found.')\n    from optimum.bettertransformer import BetterTransformer\n    return BetterTransformer.reverse(self)", "docstring": "Reverts the transformation from [`~PreTrainedModel.to_bettertransformer`] so that the original modeling is\nused, for example in order to save the model.\n\nReturns:\n[`PreTrainedModel`]: The model converted back to the original modeling.", "source": "github-repos"}
{"code": "def not_evaluator(conditions, leaf_evaluator):\n    if (not (len(conditions) > 0)):\n        return None\n    result = evaluate(conditions[0], leaf_evaluator)\n    return (None if (result is None) else (not result))", "docstring": "Evaluates a list of conditions as if the evaluator had been applied\nto a single entry and NOT was applied to the result.\n\nArgs:\nconditions: List of conditions ex: [operand_1, operand_2].\nleaf_evaluator: Function which will be called to evaluate leaf condition values.\n\nReturns:\nBoolean:\n- True if the operand evaluates to False.\n- False if the operand evaluates to True.\nNone: if conditions is empty or condition couldn't be evaluated.", "source": "codesearchnet"}
{"code": "def context(name=None):\n    \n\n    def _context(cls):\n        annotated(cls, name)\n        cls.context = True\n\n        return cls\n\n    return _context", "docstring": "Declare that a class defines a context.\n\nContexts are for use with HierarchicalShell for discovering\nand using functionality from the command line.\n\nArgs:\nname (str): Optional name for this context if you don't want\nto just use the class name.", "source": "juraj-google-style"}
{"code": "def get_task(config):\n    path = os.path.join(config['work_dir'], 'task.json')\n    message = \"Can't read task from {}!\\n%(exc)s\".format(path)\n    contents = load_json_or_yaml(path, is_path=True, message=message)\n    return contents", "docstring": "Read the task.json from work_dir.\n\nArgs:\nconfig (dict): the running config, to find work_dir.\n\nReturns:\ndict: the contents of task.json\n\nRaises:\nScriptWorkerTaskException: on error.", "source": "codesearchnet"}
{"code": "def _ReadStructureFamilyDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):\n    if is_member:\n        error_message = 'data type not supported as member'\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    definition_object = self._ReadLayoutDataTypeDefinition(definitions_registry, definition_values, data_types.StructureFamilyDefinition, definition_name, self._SUPPORTED_DEFINITION_VALUES_STRUCTURE_FAMILY)\n    runtime = definition_values.get('runtime', None)\n    if (not runtime):\n        error_message = 'missing runtime'\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    runtime_data_type_definition = definitions_registry.GetDefinitionByName(runtime)\n    if (not runtime_data_type_definition):\n        error_message = 'undefined runtime: {0:s}.'.format(runtime)\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    if runtime_data_type_definition.family_definition:\n        error_message = 'runtime: {0:s} already part of a family.'.format(runtime)\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    definition_object.AddRuntimeDefinition(runtime_data_type_definition)\n    members = definition_values.get('members', None)\n    if (not members):\n        error_message = 'missing members'\n        raise errors.DefinitionReaderError(definition_name, error_message)\n    for member in members:\n        member_data_type_definition = definitions_registry.GetDefinitionByName(member)\n        if (not member_data_type_definition):\n            error_message = 'undefined member: {0:s}.'.format(member)\n            raise errors.DefinitionReaderError(definition_name, error_message)\n        if member_data_type_definition.family_definition:\n            error_message = 'member: {0:s} already part of a family.'.format(member)\n            raise errors.DefinitionReaderError(definition_name, error_message)\n        definition_object.AddMemberDefinition(member_data_type_definition)\n    return definition_object", "docstring": "Reads a structure family data type definition.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\ndefinition_values (dict[str, object]): definition values.\ndefinition_name (str): name of the definition.\nis_member (Optional[bool]): True if the data type definition is a member\ndata type definition.\n\nReturns:\nStructureDefinition: structure data type definition.\n\nRaises:\nDefinitionReaderError: if the definitions values are missing or if\nthe format is incorrect.", "source": "codesearchnet"}
{"code": "def predict_proba(self, a, b, device=None):\n        \n        device = SETTINGS.get_default(device=device)\n        if self.model is None:\n            print('Model has to be trained before doing any predictions')\n            raise ValueError\n        if len(np.array(a).shape) == 1:\n            a = np.array(a).reshape((-1, 1))\n            b = np.array(b).reshape((-1, 1))\n        m = np.hstack((a, b))\n        m = scale(m)\n        m = m.astype('float32')\n        m = th.from_numpy(m).t().unsqueeze(0)\n\n        if th.cuda.is_available():\n            m = m.cuda()\n\n        return (self.model(m).data.cpu().numpy()-.5) * 2", "docstring": "Infer causal directions using the trained NCC pairwise model.\n\nArgs:\na (numpy.ndarray): Variable 1\nb (numpy.ndarray): Variable 2\ndevice (str): Device to run the algorithm on (defaults to ``cdt.SETTINGS.default_device``)\n\nReturns:\nfloat: Causation score (Value : 1 if a->b and -1 if b->a)", "source": "juraj-google-style"}
{"code": "def ParseFileObject(self, parser_mediator, file_object):\n    \n    mft_metadata_file = pyfsntfs.mft_metadata_file()\n\n    try:\n      mft_metadata_file.open_file_object(file_object)\n    except IOError as exception:\n      parser_mediator.ProduceExtractionWarning(\n          'unable to open file with error: {0!s}'.format(exception))\n\n    for entry_index in range(0, mft_metadata_file.number_of_file_entries):\n      try:\n        mft_entry = mft_metadata_file.get_file_entry(entry_index)\n        self._ParseMFTEntry(parser_mediator, mft_entry)\n\n      except IOError as exception:\n        parser_mediator.ProduceExtractionWarning((\n            'unable to parse MFT entry: {0:d} with error: {1!s}').format(\n                entry_index, exception))\n\n    mft_metadata_file.close()", "docstring": "Parses a NTFS $MFT metadata file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.", "source": "juraj-google-style"}
{"code": "def __user_location(__pkg: str, type_) -> str:\n    if (ALLOW_DARWIN and (sys.platform == 'darwin')):\n        user_dir = '~/Library/{}'.format(__LOCATIONS[type_][0])\n    else:\n        user_dir = getenv('XDG_{}_HOME'.format(type_.upper()), path.sep.join([getenv('HOME', ''), __LOCATIONS[type_][1]]))\n    return path.expanduser(path.sep.join([user_dir, __pkg]))", "docstring": "Utility function to look up XDG basedir locations\n\nArgs:\n__pkg: Package name\n__type: Location type", "source": "codesearchnet"}
{"code": "def __init__(self, paths, ignore_list, path_segment_separator='/'):\n    \n    super(_PathFilterTable, self).__init__()\n    self._path_segment_separator = path_segment_separator\n    self.path_segments_per_index = {}\n    self.paths = list(paths)\n\n    for path in self.paths:\n      self._AddPathSegments(path, ignore_list)", "docstring": "Initializes and builds the path filter table from a list of paths.\n\nArgs:\npaths: a list of strings containing the paths.\nignore_list: a list of path segment indexes to ignore, where 0 is the\nindex of the first path segment relative from the root.\npath_segment_separator: optional string containing the path segment\nseparator.", "source": "juraj-google-style"}
{"code": "def Columns(iterable):\n  \n  columns = sorted(iterable)\n  return \"({})\".format(\", \".join(\"`{}`\".format(col) for col in columns))", "docstring": "Returns a string of column names for MySQL INSERTs.\n\nTo account for Iterables with undefined order (dicts before Python 3.6),\nthis function sorts column names.\n\nExamples:\n>>> Columns({\"password\": \"foo\", \"name\": \"bar\"})\nu'(`name`, `password`)'\n\nArgs:\niterable: The iterable of strings to be used as column names.\nReturns: A string containing a tuple of sorted comma-separated column names.", "source": "juraj-google-style"}
{"code": "def ready_size(self, name=None):\n    if name is None:\n        name = '%s_BarrierReadySize' % self._name\n    return gen_data_flow_ops.barrier_ready_size(self._barrier_ref, name=name)", "docstring": "Compute the number of complete elements in the given barrier.\n\nArgs:\nname: A name for the operation (optional).\n\nReturns:\nA single-element tensor containing the number of complete elements in the\ngiven barrier.", "source": "github-repos"}
{"code": "def send_password_reset_link(self, username):\n        \n\n        response = self._post(self.rest_url + \"/user/mail/password\",\n                              params={\"username\": username})\n\n        if response.ok:\n            return True\n\n        return False", "docstring": "Sends the user a password reset link (by email)\n\nArgs:\nusername: The account username.\n\nReturns:\nTrue: Succeeded\nFalse: If unsuccessful", "source": "juraj-google-style"}
{"code": "def tf_preprocess(self, states, actions, reward):\n        \n        \n        for name in sorted(self.states_preprocessing):\n            states[name] = self.states_preprocessing[name].process(tensor=states[name])\n\n        \n        if self.reward_preprocessing is not None:\n            reward = self.reward_preprocessing.process(tensor=reward)\n\n        return states, actions, reward", "docstring": "Applies preprocessing ops to the raw states/action/reward inputs.\n\nArgs:\nstates (dict): Dict of raw state tensors.\nactions (dict): Dict or raw action tensors.\nreward: 1D (float) raw rewards tensor.\n\nReturns: The preprocessed versions of the input tensors.", "source": "juraj-google-style"}
{"code": "def runCmd(cls, cmd):\n        \n        cit.echo(cmd, \"command\")\n        result = os.system(cmd)\n        cls.checkResult(result)", "docstring": "run command and show if success or failed\n\nArgs:\ncmd: string\nReturns:\nbool: if this command run successfully", "source": "juraj-google-style"}
{"code": "def count(self, event_str, inc_int=1):\n        \n        self._event_dict.setdefault(event_str, 0)\n        self._event_dict[event_str] += inc_int", "docstring": "Count an event.\n\nArgs:\nevent_str:\nThe name of an event to count. Used as a key in the event dict. The same\nname will also be used in the summary.\n\ninc_int: int\nOptional argument to increase the count for the event by more than 1.", "source": "juraj-google-style"}
{"code": "def import_file(source, use_32bit_registry=False):\n    cache_path = __salt__['cp.cache_file'](source)\n    if (not cache_path):\n        error_msg = \"File/URL '{0}' probably invalid.\".format(source)\n        raise ValueError(error_msg)\n    if use_32bit_registry:\n        word_sz_txt = '32'\n    else:\n        word_sz_txt = '64'\n    cmd = 'reg import \"{0}\" /reg:{1}'.format(cache_path, word_sz_txt)\n    cmd_ret_dict = __salt__['cmd.run_all'](cmd, python_shell=True)\n    retcode = cmd_ret_dict['retcode']\n    if (retcode != 0):\n        raise CommandExecutionError('reg.exe import failed', info=cmd_ret_dict)\n    return True", "docstring": "Import registry settings from a Windows ``REG`` file by invoking ``REG.EXE``.\n\n.. versionadded:: 2018.3.0\n\nArgs:\n\nsource (str):\nThe full path of the ``REG`` file. This can be either a local file\npath or a URL type supported by salt (e.g. ``salt://salt_master_path``)\n\nuse_32bit_registry (bool):\nIf the value of this parameter is ``True`` then the ``REG`` file\nwill be imported into the Windows 32 bit registry. Otherwise the\nWindows 64 bit registry will be used.\n\nReturns:\nbool: True if successful, otherwise an error is raised\n\nRaises:\nValueError: If the value of ``source`` is an invalid path or otherwise\ncauses ``cp.cache_file`` to return ``False``\nCommandExecutionError: If ``reg.exe`` exits with a non-0 exit code\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt machine1 reg.import_file salt://win/printer_config/110_Canon/postinstall_config.reg", "source": "codesearchnet"}
{"code": "def longest_one_seg_prefix(self, word):\n    for i in range(self.longest_seg, 0, (- 1)):\n        if (word[:i] in self.seg_dict):\n            return word[:i]\n    return ''", "docstring": "Return longest Unicode IPA prefix of a word\n\nArgs:\nword (unicode): input word as Unicode IPA string\n\nReturns:\nunicode: longest single-segment prefix of `word` in database", "source": "codesearchnet"}
{"code": "def map_part_function(fn: PartFn, match_fn: MatchFn | None=None) -> StreamFn:\n    match_fn = match_fn or (lambda _: True)\n    return functools.partial(_apply_part_function, (fn, match_fn))", "docstring": "Converts a part function to a function taking a stream of parts.\n\nAdds a context if missing to ensure error propagation.\n\nArgs:\nfn: a function that can be applied on a single part.\nmatch_fn: a function that returns True if the part should be processed by\nthe part function. When the part should not be processed, the part\nprocessor will not be called and the part will be passed as is.\n\nReturns:\nA function that is applied concurrently across the parts of the input\nstream.", "source": "github-repos"}
{"code": "def is_match(self, subject: Union[Expression, FlatTerm]) -> bool:\n        \n        try:\n            next(self.match(subject))\n        except StopIteration:\n            return False\n        return True", "docstring": "Check if the given subject matches any pattern in the net.\n\nArgs:\nsubject:\nThe subject that is matched. Must be constant.\n\nReturns:\nTrue, if any pattern matches the subject.", "source": "juraj-google-style"}
{"code": "def logsumexp(x, axis=None, keepdims=None):\n    return tf_np.asarray(math_ops.reduce_logsumexp(input_tensor=x, axis=axis, keepdims=keepdims))", "docstring": "Computes log(sum(exp(elements across dimensions of a tensor))).\n\nReduces `x` along the dimensions given in `axis`.\nUnless `keepdims` is true, the rank of the tensor is reduced by 1 for each\nentry in `axis`. If `keepdims` is true, the reduced dimensions\nare retained with length 1.\nIf `axis` has no entries, all dimensions are reduced, and a\ntensor with a single element is returned.\nThis function is more numerically stable than log(sum(exp(input))). It avoids\noverflows caused by taking the exp of large inputs and underflows caused by\ntaking the log of small inputs.\n\nArgs:\nx: The tensor to reduce. Should have numeric type.\naxis: The dimensions to reduce. If `None` (the default), reduces all\ndimensions. Must be in the range `[-rank(x), rank(x))`.\nkeepdims: If true, retains reduced dimensions with length 1.\n\nReturns:\nThe reduced tensor.", "source": "github-repos"}
{"code": "def extractSchedule(self, schedule, period):\n        \n        ret = namedtuple(\"ret\", [\"Hour\", \"Min\", \"Tariff\", \"Period\", \"Schedule\"])\n        work_table = self.m_schd_1_to_4\n        if Schedules.Schedule_5 <= schedule <= Schedules.Schedule_6:\n            work_table = self.m_schd_5_to_6\n        period += 1\n        schedule += 1\n        ret.Period = str(period)\n        ret.Schedule = str(schedule)\n        if (schedule < 1) or (schedule > Extents.Schedules) or (period < 0) or (period > Extents.Periods):\n            ekm_log(\"Out of bounds: tariff \" + str(period) + \" for schedule \" + str(schedule))\n            ret.Hour = ret.Min = ret.Tariff = str(0)\n            return ret\n\n        idxhr = \"Schedule_\" + str(schedule) + \"_Period_\" + str(period) + \"_Hour\"\n        idxmin = \"Schedule_\" + str(schedule) + \"_Period_\" + str(period) + \"_Min\"\n        idxrate = \"Schedule_\" + str(schedule) + \"_Period_\" + str(period) + \"_Tariff\"\n\n        if idxhr not in work_table:\n            ekm_log(\"Incorrect index: \" + idxhr)\n            ret.Hour = ret.Min = ret.Tariff = str(0)\n            return ret\n\n        if idxmin not in work_table:\n            ekm_log(\"Incorrect index: \" + idxmin)\n            ret.Hour = ret.Min = ret.Tariff = str(0)\n            return ret\n\n        if idxrate not in work_table:\n            ekm_log(\"Incorrect index: \" + idxrate)\n            ret.Hour = ret.Min = ret.Tariff = str(0)\n            return ret\n\n        ret.Hour = work_table[idxhr][MeterData.StringValue]\n        ret.Min = work_table[idxmin][MeterData.StringValue].zfill(2)\n        ret.Tariff = work_table[idxrate][MeterData.StringValue]\n        return ret", "docstring": "Read a single schedule tariff from meter object buffer.\n\nArgs:\nschedule (int): A :class:`~ekmmeters.Schedules` value or in range(Extent.Schedules).\ntariff (int): A :class:`~ekmmeters.Tariffs` value or in range(Extent.Tariffs).\n\nReturns:\nbool: True on completion.", "source": "juraj-google-style"}
{"code": "def build(self, client,\n              nobuild=False,\n              usecache=True,\n              pull=False):\n        \n        if not nobuild:\n            self.update_source_images(client,\n                                      usecache=usecache,\n                                      pull=pull)\n\n        width = utils.get_console_width()\n        cprint('\\n' + '='*width,\n               color='white', attrs=['bold'])\n\n        line = 'STARTING BUILD for \"%s\" (image definition \"%s\" from %s)\\n' % (\n            self.targetname, self.imagename, self.steps[-1].sourcefile)\n\n        cprint(_centered(line, width), color='blue', attrs=['bold'])\n\n        for istep, step in enumerate(self.steps):\n            print(colored('* Step','blue'),\n                  colored('%d/%d' % (istep+1, len(self.steps)), 'blue', attrs=['bold']),\n                  colored('for image', color='blue'),\n                  colored(self.imagename, color='blue', attrs=['bold']))\n\n            if not nobuild:\n                if step.bust_cache:\n                    stackkey = self._get_stack_key(istep)\n                    if stackkey in _rebuilt:\n                        step.bust_cache = False\n\n                step.build(client, usecache=usecache)\n                print(colored(\"* Created intermediate image\", 'green'),\n                      colored(step.buildname, 'green', attrs=['bold']),\n                      end='\\n\\n')\n\n                if step.bust_cache:\n                    _rebuilt.add(stackkey)\n\n        finalimage = step.buildname\n\n        if not nobuild:\n            self.finalizenames(client, finalimage)\n            line = 'FINISHED BUILDING \"%s\" (image definition \"%s\" from %s)'%(\n                self.targetname, self.imagename, self.steps[-1].sourcefile)\n            cprint(_centered(line, width),\n                   color='green', attrs=['bold'])\n            cprint('=' * width, color='white', attrs=['bold'], end='\\n\\n')", "docstring": "Drives the build of the final image - get the list of steps and execute them.\n\nArgs:\nclient (docker.Client): docker client object that will build the image\nnobuild (bool): just create dockerfiles, don't actually build the image\nusecache (bool): use docker cache, or rebuild everything from scratch?\npull (bool): try to pull new versions of repository images?", "source": "juraj-google-style"}
{"code": "def Webhook(self, request, global_params=None):\n    config = self.GetMethodConfig('Webhook')\n    return self._RunMethod(config, request, global_params=global_params)", "docstring": "ReceiveTriggerWebhook [Experimental] is called when the API receives a webhook request targeted at a specific trigger.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsTriggersWebhookRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ReceiveTriggerWebhookResponse) The response message.", "source": "github-repos"}
{"code": "def sample(self, n):\n    \n\n    row_total_count = 0\n    row_counts = []\n    for file in self.files:\n      with _util.open_local_or_gcs(file, 'r') as f:\n        num_lines = sum(1 for line in f)\n        row_total_count += num_lines\n        row_counts.append(num_lines)\n\n    names = None\n    dtype = None\n    if self._schema:\n      _MAPPINGS = {\n        'FLOAT': np.float64,\n        'INTEGER': np.int64,\n        'TIMESTAMP': np.datetime64,\n        'BOOLEAN': np.bool,\n      }\n      names = [x['name'] for x in self._schema]\n      dtype = {x['name']: _MAPPINGS.get(x['type'], object) for x in self._schema}\n\n    skip_count = row_total_count - n\n    \n    \n    skip_all = sorted(random.sample(range(0, row_total_count), skip_count))\n    dfs = []\n    for file, row_count in zip(self.files, row_counts):\n      skip = [x for x in skip_all if x < row_count]\n      skip_all = [x - row_count for x in skip_all if x >= row_count]\n      with _util.open_local_or_gcs(file, 'r') as f:\n        dfs.append(pd.read_csv(f, skiprows=skip, names=names, dtype=dtype, header=None))\n    return pd.concat(dfs, axis=0, ignore_index=True)", "docstring": "Samples data into a Pandas DataFrame.\nArgs:\nn: number of sampled counts.\nReturns:\nA dataframe containing sampled data.\nRaises:\nException if n is larger than number of rows.", "source": "juraj-google-style"}
{"code": "def compile_state_cpfs(self,\n                           scope: Dict[str, TensorFluent],\n                           batch_size: Optional[int] = None,\n                           noise: Optional[Noise] = None) -> List[CPFPair]:\n        \n        next_state_fluents = []\n\n        with self.graph.as_default():\n            with tf.name_scope('state_cpfs'):\n\n                for cpf in self.rddl.domain.state_cpfs:\n                    cpf_noise = noise.get(cpf.name, None) if noise is not None else None\n\n                    name_scope = utils.identifier(cpf.name)\n                    with tf.name_scope(name_scope):\n                        t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise)\n\n                    next_state_fluents.append((cpf.name, t))\n\n                key = lambda f: self.rddl.domain.next_state_fluent_ordering.index(f[0])\n                next_state_fluents = sorted(next_state_fluents, key=key)\n\n        return next_state_fluents", "docstring": "Compiles the next state fluent CPFs given the current `state` and `action` scope.\n\nArgs:\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.\nbatch_size (Optional[int]): The batch size.\n\nReturns:\nA list of state fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`.", "source": "juraj-google-style"}
{"code": "def evaluate(self, index):\n    \n\n    if self.condition_data[index][2] != self.CUSTOM_ATTRIBUTE_CONDITION_TYPE:\n      self.logger.warning(audience_logs.UNKNOWN_CONDITION_TYPE.format(self._get_condition_json(index)))\n      return None\n\n    condition_match = self.condition_data[index][3]\n    if condition_match is None:\n      condition_match = ConditionMatchTypes.EXACT\n\n    if condition_match not in self.EVALUATORS_BY_MATCH_TYPE:\n      self.logger.warning(audience_logs.UNKNOWN_MATCH_TYPE.format(self._get_condition_json(index)))\n      return None\n\n    if condition_match != ConditionMatchTypes.EXISTS:\n      attribute_key = self.condition_data[index][0]\n      if attribute_key not in self.attributes:\n        self.logger.debug(audience_logs.MISSING_ATTRIBUTE_VALUE.format(self._get_condition_json(index), attribute_key))\n        return None\n\n      if self.attributes.get(attribute_key) is None:\n        self.logger.debug(audience_logs.NULL_ATTRIBUTE_VALUE.format(self._get_condition_json(index), attribute_key))\n        return None\n\n    return self.EVALUATORS_BY_MATCH_TYPE[condition_match](self, index)", "docstring": "Given a custom attribute audience condition and user attributes, evaluate the\ncondition against the attributes.\n\nArgs:\nindex: Index of the condition to be evaluated.\n\nReturns:\nBoolean:\n- True if the user attributes match the given condition.\n- False if the user attributes don't match the given condition.\nNone: if the user attributes and condition can't be evaluated.", "source": "juraj-google-style"}
{"code": "def is_mergeable_with(self, timeslots: 'TimeslotCollection') -> bool:\n    for slot in timeslots.timeslots:\n        for interval in self._table[slot.channel]:\n            if slot.interval.has_overlap(interval):\n                return False\n    return True", "docstring": "Return if self is mergeable with `timeslots`.\n\nArgs:\ntimeslots: TimeslotCollection to be checked", "source": "codesearchnet"}
{"code": "def mark_backward(output_tensor, used_node_names):\n    op = output_tensor.op\n    if (op.name in used_node_names):\n        return\n    used_node_names.add(op.name)\n    for input_tensor in op.inputs:\n        mark_backward(input_tensor, used_node_names)\n    for control_input_op in op.control_inputs:\n        used_node_names.add(control_input_op.name)\n        for input_tensor in control_input_op.inputs:\n            mark_backward(input_tensor, used_node_names)", "docstring": "Function to propagate backwards in the graph and mark nodes as used.\n\nTraverses recursively through the graph from the end tensor, through the op\nthat generates the tensor, and then to the input tensors that feed the op.\nNodes encountered are stored in used_node_names.\n\nArgs:\noutput_tensor: A Tensor which we start the propagation.\nused_node_names: A list of strings, stores the name of nodes we've marked as\nvisited.", "source": "codesearchnet"}
{"code": "def auto_to_manual_spmd_partition(tensor, manual_sharding, single_dim=-1, unspecified_dims=None):\n    return tf2xla.spmd_full_to_shard_shape(tensor, manual_sharding=manual_sharding, dim=single_dim, unspecified_dims=unspecified_dims or [])", "docstring": "Switches from automatic SPMD partitioning to manual partitioning.\n\nConverts a full-shaped tensor (to be automatically partitioned by SPMD\npartitioner) to a shard-shaped tensor to be consumed by manually partitioned\nops.\n\nArgs:\ntensor: A tf.Tensor in full shape.\nmanual_sharding: A serialized string of OpSharding to be used in manual\npartitioning.\nsingle_dim: If >= 0, the conversion will happen only on this dim in\nsubgroups.\nunspecified_dims: An optional list of dimensions unspecified.\n\nReturns:\nA shard-shaped tensor to be consumed by manually partitioned ops.", "source": "github-repos"}
{"code": "def get_equivalent_kpoints(self, index):\n        \n        \n        \n\n        if self.kpoints[index].label is None:\n            return [index]\n\n        list_index_kpoints = []\n        for i in range(len(self.kpoints)):\n            if self.kpoints[i].label == self.kpoints[index].label:\n                list_index_kpoints.append(i)\n\n        return list_index_kpoints", "docstring": "Returns the list of kpoint indices equivalent (meaning they are the\nsame frac coords) to the given one.\n\nArgs:\nindex: the kpoint index\n\nReturns:\na list of equivalent indices\n\nTODO: now it uses the label we might want to use coordinates instead\n(in case there was a mislabel)", "source": "juraj-google-style"}
{"code": "class DatasetInitializer(lookup_ops.TableInitializerBase):\n\n    def __init__(self, dataset):\n        \n        self.dataset = dataset\n        elem_spec = self.dataset.element_spec\n        _check_table_initializer_element_spec(elem_spec)\n        key_type = elem_spec[0].dtype\n        value_type = elem_spec[1].dtype\n        super(DatasetInitializer, self).__init__(key_type, value_type)\n\n    def initialize(self, table):\n        lookup_ops.check_table_dtypes(table, self._key_dtype, self._value_dtype)\n        init_op = ged_ops.initialize_table_from_dataset(table.resource_handle, self.dataset._variant_tensor)\n        ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)\n        return init_op", "docstring": "Creates a table initializer from a `tf.data.Dataset`.\n\nSample usage:\n\n>>> keys = tf.data.Dataset.range(100)\n>>> values = tf.data.Dataset.range(100).map(\n...     lambda x: tf.strings.as_string(x * 2))\n>>> ds = tf.data.Dataset.zip((keys, values))\n>>> init = tf.data.experimental.DatasetInitializer(ds)\n>>> table = tf.lookup.StaticHashTable(init, \"\")\n>>> table.lookup(tf.constant([0, 1, 2], dtype=tf.int64)).numpy()\narray([b'0', b'2', b'4'], dtype=object)\n\nAttributes:\ndataset: A `tf.data.Dataset` object that produces tuples of scalars. The\nfirst scalar is treated as a key and the second as value.\nRaises: ValueError if `dataset` doesn't conform to specifications.", "source": "github-repos"}
{"code": "def halt(self):\n    res = int(self._dll.JLINKARM_Halt())\n    if (res == 0):\n        time.sleep(1)\n        return True\n    return False", "docstring": "Halts the CPU Core.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\n``True`` if halted, ``False`` otherwise.", "source": "codesearchnet"}
{"code": "def retransmit(self, data):\n    if (data['method'] == 'REGISTER'):\n        if ((not self.registered) and (self.register_retries < self.max_retries)):\n            logger.debug((('<%s> Timeout exceeded. ' % str(self.cuuid)) + 'Retransmitting REGISTER request.'))\n            self.register_retries += 1\n            self.register(data['address'], retry=False)\n        else:\n            logger.debug(('<%s> No need to retransmit.' % str(self.cuuid)))\n    if (data['method'] == 'EVENT'):\n        if (data['euuid'] in self.event_uuids):\n            self.event_uuids[data['euuid']]['retry'] += 1\n            if (self.event_uuids[data['euuid']]['retry'] > self.max_retries):\n                logger.debug(('<%s> Max retries exceeded. Timed out waiting for server for event: %s' % (data['cuuid'], data['euuid'])))\n                logger.debug(('<%s> <euuid:%s> Deleting event from currently processing event uuids' % (data['cuuid'], str(data['euuid']))))\n                del self.event_uuids[data['euuid']]\n            else:\n                self.listener.send_datagram(serialize_data(data, self.compression, self.encryption, self.server_key), self.server)\n                logger.debug(('<%s> <euuid:%s> Scheduling to retry in %s seconds' % (data['cuuid'], str(data['euuid']), str(self.timeout))))\n                self.listener.call_later(self.timeout, self.retransmit, data)\n        else:\n            logger.debug(('<%s> <euuid:%s> No need to retransmit.' % (str(self.cuuid), str(data['euuid']))))", "docstring": "Processes messages that have been delivered from the transport\nprotocol.\n\nArgs:\ndata (dict): A dictionary containing the packet data to resend.\n\nReturns:\nNone\n\nExamples:\n>>> data\n{'method': 'REGISTER', 'address': ('192.168.0.20', 40080)}", "source": "codesearchnet"}
{"code": "def _convert_to_sparse_tensor(sp_input):\n    if isinstance(sp_input, sparse_tensor.SparseTensorValue):\n        return sparse_tensor.SparseTensor.from_value(sp_input)\n    if not isinstance(sp_input, sparse_tensor.SparseTensor):\n        raise TypeError('Input must be a SparseTensor.')\n    return sp_input", "docstring": "Convert `sp_input` to `SparseTensor` and return it.\n\nArgs:\nsp_input: `SparseTensor` or `SparseTensorValue`.\n\nReturns:\n`sp_input` converted to `SparseTensor`.\n\nRaises:\nValueError: if `sp_input` is neither `SparseTensor` nor `SparseTensorValue`.", "source": "github-repos"}
{"code": "def not_evaluator(conditions, leaf_evaluator):\n  \n  if not len(conditions) > 0:\n    return None\n\n  result = evaluate(conditions[0], leaf_evaluator)\n  return None if result is None else not result", "docstring": "Evaluates a list of conditions as if the evaluator had been applied\nto a single entry and NOT was applied to the result.\n\nArgs:\nconditions: List of conditions ex: [operand_1, operand_2].\nleaf_evaluator: Function which will be called to evaluate leaf condition values.\n\nReturns:\nBoolean:\n- True if the operand evaluates to False.\n- False if the operand evaluates to True.\nNone: if conditions is empty or condition couldn't be evaluated.", "source": "juraj-google-style"}
{"code": "def getCard(self, name):\n    cards = self.projectCards\n    for card in cards:\n        if (card.name.upper() == name.upper()):\n            return card\n    return None", "docstring": "Retrieve card object for given card name.\n\nArgs:\nname (str): Name of card to be retrieved.\n\nReturns:\n:class:`.ProjectCard` or None: Project card object. Will return None if the card is not available.", "source": "codesearchnet"}
{"code": "def function_completions(\n    completion_text: str,\n    bel_spec: BELSpec,\n    function_list: list,\n    bel_fmt: str,\n    size: int,\n) -> list:\n    \n\n    \n    if isinstance(function_list, list):\n        if bel_fmt in [\"short\", \"medium\"]:\n            function_list = [\n                bel_spec[\"functions\"][\"to_short\"][fn] for fn in function_list\n            ]\n        else:\n            function_list = [\n                bel_spec[\"functions\"][\"to_long\"][fn] for fn in function_list\n            ]\n    elif bel_fmt in [\"short\", \"medium\"]:\n        function_list = bel_spec[\"functions\"][\"primary\"][\"list_short\"]\n    else:\n        function_list = bel_spec[\"functions\"][\"primary\"][\"list_long\"]\n\n    matches = []\n    for f in function_list:\n        escaped_completion_text = completion_text.replace(r\"(\", r\"\\(\").replace(\n            r\")\", r\"\\)\"\n        )\n        log.debug(f\"Completion match: {escaped_completion_text}  F: {f}\")\n        if re.match(escaped_completion_text, f):\n            matches.append(f)\n\n    replace_list = []\n    for match in matches:\n        if completion_text:\n            highlight = match.replace(completion_text, f\"<em>{completion_text}</em>\")\n        else:\n            highlight = completion_text\n\n        replace_list.append(\n            {\n                \"replacement\": match,\n                \"label\": f\"{match}()\",\n                \"highlight\": highlight,\n                \"type\": \"Function\",\n            }\n        )\n\n    return replace_list[:size]", "docstring": "Filter BEL functions by prefix\n\nArgs:\nprefix: completion string\nbel_fmt: short, medium, long BEL formats\nspec: BEL specification\n\nReturns:\nlist: list of BEL functions that match prefix", "source": "juraj-google-style"}
{"code": "def input_selector_schema(config_cls):\n    config_type = resolve_config_cls_arg(config_cls)\n    check.param_invariant(config_type.is_selector, 'config_cls')\n\n    def _wrap(func):\n\n        def _selector(context, config_value):\n            (selector_key, selector_value) = single_item(config_value)\n            return func(context, selector_key, selector_value)\n        return _create_input_schema(config_type, _selector)\n    return _wrap", "docstring": "A decorator for annotating a function that can take the selected properties\nfrom a ``config_value`` in to an instance of a custom type.\n\nArgs:\nconfig_cls (Selector)", "source": "codesearchnet"}
{"code": "def udp_messenger(domain_name, UDP_IP, UDP_PORT, sock_timeout, message):\n    try:\n        if (message is None):\n            raise ValueError('message was none')\n        encoded_message = bytes(message, 'utf-8')\n        if (encoded_message is None):\n            raise ValueError('utf-8 encoding of message failed')\n        if domain_name:\n            try:\n                UDP_IP = socket.gethostbyname(domain_name)\n            except Exception:\n                pass\n        if (UDP_IP is None):\n            raise Exception('UDP_IP is None')\n        if (UDP_PORT is None):\n            raise Exception('UDP_PORT is None')\n        sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n        sock.settimeout(sock_timeout)\n        sock.sendto(bytes(message, 'utf-8'), (UDP_IP, UDP_PORT))\n        sock.close()\n    except socket.timeout:\n        logger.debug('Failed to send usage tracking data: socket timeout')\n    except OSError as e:\n        logger.debug('Failed to send usage tracking data: OSError: {}'.format(e))\n    except Exception as e:\n        logger.debug('Failed to send usage tracking data: Exception: {}'.format(e))", "docstring": "Send UDP messages to usage tracker asynchronously\n\nThis multiprocessing based messenger was written to overcome the limitations\nof signalling/terminating a thread that is blocked on a system call. This\nmessenger is created as a separate process, and initialized with 2 queues,\nto_send to receive messages to be sent to the internet.\n\nArgs:\n- domain_name (str) : Domain name string\n- UDP_IP (str) : IP address YYY.YYY.YYY.YYY\n- UDP_PORT (int) : UDP port to send out on\n- sock_timeout (int) : Socket timeout\n- to_send (multiprocessing.Queue) : Queue of outgoing messages to internet", "source": "codesearchnet"}
{"code": "def _encode_primitive_regex(self, builder: expressions.Builder, element: ElementDefinition) -> List[validation_pb2.SqlRequirement]:\n    name = builder.fhir_path.split('.')[-1]\n    if _is_disabled(element):\n        return []\n    if not _is_elem_supported(element):\n        return []\n    assert not builder.return_type.returns_polymorphic(), f'Polymorphic element builder {builder.fhir_path} not expected in _encode_primitive_regex.'\n    primitive_regex_info = self._get_regex_from_element(builder, element)\n    if primitive_regex_info is None:\n        return []\n    primitive_regex = primitive_regex_info.regex\n    regex_type_code = primitive_regex_info.type_code\n    constraint_key = f'{name}-matches-{regex_type_code}-regex'\n    if constraint_key in self._options.skip_keys:\n        return []\n    element_is_repeated = _utils.is_repeated_element(element)\n    fhir_path_builder = builder.matches(primitive_regex)\n    if regex_type_code == 'positiveInt':\n        fhir_path_builder = builder > 0\n    elif regex_type_code == 'unsignedInt':\n        fhir_path_builder = builder >= 0\n    if element_is_repeated:\n        fhir_path_builder = builder.all(fhir_path_builder)\n    context_builder = builder.get_parent_builder()\n    if context_builder.return_type.returns_polymorphic():\n        context_builder = context_builder.get_parent_builder()\n    result = self._encode_fhir_path_builder_constraint(fhir_path_builder, context_builder)\n    if result is None:\n        return []\n    constraint_key_column_name: str = _key_to_sql_column_name(_path_to_sql_column_name(constraint_key))\n    column_name_base: str = _path_to_sql_column_name(self._abs_path_invocation(builder.get_parent_builder()))\n    column_name = f'{column_name_base}_{constraint_key_column_name}'\n    if column_name in self._regex_columns_generated:\n        return []\n    self._regex_columns_generated.add(column_name)\n    return [validation_pb2.SqlRequirement(column_name=column_name, sql_expression=result.sql, fhir_path_sql_expression=result.fhir_path_sql, severity=validation_pb2.ValidationSeverity.SEVERITY_ERROR, type=validation_pb2.ValidationType.VALIDATION_TYPE_PRIMITIVE_REGEX, element_path=self._abs_path_invocation(context_builder), description=f'{name} needs to match regex of {regex_type_code}.', fhir_path_key=constraint_key, fhir_path_expression=result.builder.fhir_path, fields_referenced_by_expression=[name])]", "docstring": "Returns regex `SqlRequirement`s for primitive `ElementDefinition`.\n\nArgs:\nbuilder: The current builder to encode regexes for.\nelement: The ElementDefinition at this location\n\nReturns:\nA list of `SqlRequirement`s representing requirements generated from\nprimitive fields on the element that have regexes .", "source": "github-repos"}
{"code": "def index_library_datasets(self, tick_f=None):\n    dataset_n = 0\n    partition_n = 0\n\n    def tick(d, p):\n        if tick_f:\n            tick_f('datasets: {} partitions: {}'.format(d, p))\n    for dataset in self.library.datasets:\n        if self.backend.dataset_index.index_one(dataset):\n            dataset_n += 1\n            tick(dataset_n, partition_n)\n            for partition in dataset.partitions:\n                self.backend.partition_index.index_one(partition)\n                partition_n += 1\n                tick(dataset_n, partition_n)\n        else:\n            pass", "docstring": "Indexes all datasets of the library.\n\nArgs:\ntick_f (callable, optional): callable of one argument. Gets string with index state.", "source": "codesearchnet"}
{"code": "def parse_time_indices(s):\n    if (not s.startswith('[')):\n        s = (('[' + s) + ']')\n    parsed = command_parser._parse_slices(s)\n    if (len(parsed) != 1):\n        raise ValueError(('Invalid number of slicing objects in time indices (%d)' % len(parsed)))\n    else:\n        return parsed[0]", "docstring": "Parse a string as time indices.\n\nArgs:\ns: A valid slicing string for time indices. E.g., '-1', '[:]', ':', '2:10'\n\nReturns:\nA slice object.\n\nRaises:\nValueError: If `s` does not represent valid time indices.", "source": "codesearchnet"}
{"code": "def MakeCdfFromItems(items, name=''):\n    \n    runsum = 0\n    xs = []\n    cs = []\n\n    for value, count in sorted(items):\n        runsum += count\n        xs.append(value)\n        cs.append(runsum)\n\n    total = float(runsum)\n    ps = [c / total for c in cs]\n\n    cdf = Cdf(xs, ps, name)\n    return cdf", "docstring": "Makes a cdf from an unsorted sequence of (value, frequency) pairs.\n\nArgs:\nitems: unsorted sequence of (value, frequency) pairs\nname: string name for this CDF\n\nReturns:\ncdf: list of (value, fraction) pairs", "source": "juraj-google-style"}
{"code": "def rpm_name(self, name, python_version=None, pkg_name=False):\n        \n        if pkg_name:\n            return super(DandifiedNameConvertor, self).rpm_name(\n                name, python_version)\n        original_name = name\n        converted = super(DandifiedNameConvertor, self).rpm_name(\n            name, python_version)\n        python_query = self.query.filter(name__substr=[\n            'python', 'py', original_name, canonical_form(original_name)])\n        if converted in [pkg.name for pkg in python_query]:\n            logger.debug(\"Converted name exists\")\n            return converted\n\n        logger.debug(\"Converted name not found, searches for correct form\")\n\n        not_versioned_name = NameVariants(self.base_name(original_name), '')\n        versioned_name = NameVariants(self.base_name(original_name),\n                                      python_version)\n\n        if self.base_name(original_name).startswith(\"py\"):\n            nonpy_name = NameVariants(self.base_name(\n                original_name)[2:], python_version)\n\n        for pkg in python_query:\n            versioned_name.find_match(pkg.name)\n            not_versioned_name.find_match(pkg.name)\n            if 'nonpy_name' in locals():\n                nonpy_name.find_match(pkg.name)\n\n        if 'nonpy_name' in locals():\n            versioned_name = versioned_name.merge(nonpy_name)\n\n        correct_form = versioned_name.merge(not_versioned_name).best_matching\n        logger.debug(\"Most likely correct form of the name {0}.\".format(\n            correct_form))\n        return correct_form or converted", "docstring": "Checks if name converted using superclass rpm_name_method match name\nof package in the query. Searches for correct name if it doesn't.\nArgs:\nname: name to convert\npython_version: python version for which to retrieve the name of\nthe package\npkg_name: flag to perform conversion of rpm package name\n(foo -> python-foo)", "source": "juraj-google-style"}
{"code": "def read_graph_execution_traces_event(self, locator):\n    file_index, offset = locator\n    graph_execution_traces_path = self._graph_execution_traces_paths[file_index]\n    with self._reader_read_locks[graph_execution_traces_path]:\n        proto_string = self._get_reader(graph_execution_traces_path).read(offset)[0]\n    return debug_event_pb2.DebugEvent.FromString(proto_string)", "docstring": "Read DebugEvent at given offset from given .graph_execution_traces file.\n\nArgs:\nlocator: A (file_index, offset) tuple that locates the DebugEvent\ncontaining the graph execution trace.\n\nReturns:\nA DebugEventProto.\n\nRaises:\n`errors.DataLossError` if offset is at a wrong location.\n`IndexError` if offset is out of range of the file.", "source": "github-repos"}
{"code": "def create_iam_role(self, account):\n        \n        try:\n            iam = self.session.client('iam')\n            trust = get_template('vpc_flow_logs_iam_role_trust.json').render()\n            policy = get_template('vpc_flow_logs_role_policy.json').render()\n\n            newrole = iam.create_role(\n                Path='/',\n                RoleName=self.role_name,\n                AssumeRolePolicyDocument=trust\n            )['Role']['Arn']\n\n            \n            iam.put_role_policy(\n                RoleName=self.role_name,\n                PolicyName='VpcFlowPolicy',\n                PolicyDocument=policy\n            )\n\n            self.log.debug('Created VPC Flow Logs role & policy for {}'.format(account.account_name))\n            auditlog(\n                event='vpc_flow_logs.create_iam_role',\n                actor=self.ns,\n                data={\n                    'account': account.account_name,\n                    'roleName': self.role_name,\n                    'trustRelationship': trust,\n                    'inlinePolicy': policy\n                }\n            )\n            return newrole\n\n        except Exception:\n            self.log.exception('Failed creating the VPC Flow Logs role for {}.'.format(account))", "docstring": "Create a new IAM role. Returns the ARN of the newly created role\n\nArgs:\naccount (:obj:`Account`): Account where to create the IAM role\n\nReturns:\n`str`", "source": "juraj-google-style"}
{"code": "def combine(specs):\n        \n        new_specs = {}\n        for spec in specs:\n            if new_specs.get(spec, None) is None:\n                new_specs[spec] = spec\n            else:\n                new_specs[spec].add(spec)\n        return list(new_specs.values())", "docstring": "Combine package specifications' limitations.\n\nArgs:\nspecs (list of PackageSpec): the package specifications.\n\nReturns:\nlist of PackageSpec: the new, merged list of PackageSpec.", "source": "juraj-google-style"}
{"code": "def get_tensor_name(tensor):\n    parts = tensor.name.split(':')\n    if len(parts) > 2:\n        raise ValueError('Tensor name invalid. Expect 0 or 1 colon, got {0}'.format(len(parts) - 1))\n    if len(parts) > 1 and parts[1] != '0':\n        return tensor.name\n    return parts[0]", "docstring": "Returns name of the input tensor.\n\nArgs:\ntensor: tf.Tensor\n\nReturns:\nstr", "source": "github-repos"}
{"code": "def _logfile_sigterm_handler(*_):\n    logging.error('Received SIGTERM.')\n    write_logfile()\n    print('Received signal. Please see the log file for more information.', file=sys.stderr)\n    sys.exit(signal)", "docstring": "Handle exit signals and write out a log file.\n\nRaises:\nSystemExit: Contains the signal as the return code.", "source": "codesearchnet"}
{"code": "def probability_density(self, X):\n        \n        self.check_fit()\n\n        U, V = self.split_matrix(X)\n\n        if self.theta == 0:\n            return np.multiply(U, V)\n\n        else:\n            num = np.multiply(np.multiply(-self.theta, self._g(1)), 1 + self._g(np.add(U, V)))\n            aux = np.multiply(self._g(U), self._g(V)) + self._g(1)\n            den = np.power(aux, 2)\n            return num / den", "docstring": "Compute density function for given copula family.\n\nArgs:\nX: `np.ndarray`\n\nReturns:\nnp.array: probability density", "source": "juraj-google-style"}
{"code": "def RowWith(self, column, value):\n        \n        for row in self._table[1:]:\n            if row[column] == value:\n                return row\n        return None", "docstring": "Retrieves the first non header row with the column of the given value.\n\nArgs:\ncolumn: str, the name of the column to check.\nvalue: str, The value of the column to check.\n\nReturns:\nA Row() of the first row found, None otherwise.\n\nRaises:\nIndexError: The specified column does not exist.", "source": "juraj-google-style"}
{"code": "def appliance_device_snmp_v1_trap_destinations(self):\n    if (not self.__appliance_device_snmp_v1_trap_destinations):\n        self.__appliance_device_snmp_v1_trap_destinations = ApplianceDeviceSNMPv1TrapDestinations(self.__connection)\n    return self.__appliance_device_snmp_v1_trap_destinations", "docstring": "Gets the ApplianceDeviceSNMPv1TrapDestinations API client.\n\nReturns:\nApplianceDeviceSNMPv1TrapDestinations:", "source": "codesearchnet"}
{"code": "def _in_gce_environment():\n    if (SETTINGS.env_name is not None):\n        return (SETTINGS.env_name == 'GCE_PRODUCTION')\n    if ((NO_GCE_CHECK != 'True') and _detect_gce_environment()):\n        SETTINGS.env_name = 'GCE_PRODUCTION'\n        return True\n    return False", "docstring": "Detect if the code is running in the Compute Engine environment.\n\nReturns:\nTrue if running in the GCE environment, False otherwise.", "source": "codesearchnet"}
{"code": "def _InternalUnpackAny(msg):\n    from google.protobuf import symbol_database\n    factory = symbol_database.Default()\n    type_url = msg.type_url\n    if (not type_url):\n        return None\n    type_name = type_url.split('/')[(- 1)]\n    descriptor = factory.pool.FindMessageTypeByName(type_name)\n    if (descriptor is None):\n        return None\n    message_class = factory.GetPrototype(descriptor)\n    message = message_class()\n    message.ParseFromString(msg.value)\n    return message", "docstring": "Unpacks Any message and returns the unpacked message.\n\nThis internal method is different from public Any Unpack method which takes\nthe target message as argument. _InternalUnpackAny method does not have\ntarget message type and need to find the message type in descriptor pool.\n\nArgs:\nmsg: An Any message to be unpacked.\n\nReturns:\nThe unpacked message.", "source": "codesearchnet"}
{"code": "def _write_source_file_content(self, file_path):\n    if file_path in self._source_file_paths:\n        return self._source_file_paths.index(file_path)\n    with self._source_file_paths_lock:\n        if file_path not in self._source_file_paths:\n            lines = None\n            if source_utils.is_extension_uncompiled_python_source(file_path):\n                try:\n                    lines, _ = source_utils.load_source(file_path)\n                except IOError as e:\n                    logging.warn('Failed to read source code from path: %s. Reason: %s', file_path, e)\n            writer = self.get_writer()\n            writer.WriteSourceFile(debug_event_pb2.SourceFile(file_path=file_path, host_name=self._hostname, lines=lines))\n            self._source_file_paths.append(file_path)\n        return self._source_file_paths.index(file_path)", "docstring": "Send the content of a source file via debug-events writer.\n\nArgs:\nfile_path: Path to the source file.\n\nReturns:\nAn int index for the file.", "source": "github-repos"}
{"code": "def clean(decrypted: bytes) -> str:\n    last = decrypted[(- 1)]\n    if isinstance(last, int):\n        return decrypted[:(- last)].decode('utf8')\n    return decrypted[:(- ord(last))].decode('utf8')", "docstring": "r\"\"\"Strip padding from decrypted value.\n\nRemove number indicated by padding\ne.g. if last is '\\x0e' then ord('\\x0e') == 14, so take off 14.\n\nArgs:\ndecrypted: decrypted value\nReturns:\nDecrypted stripped of junk padding", "source": "codesearchnet"}
{"code": "def get_forward_rate(self, start_date, maturity_date, daycount_fraction=None):\n    start_date = dates.convert_to_date_tensor(start_date)\n    maturity_date = dates.convert_to_date_tensor(maturity_date)\n    if daycount_fraction is None:\n        daycount_fraction = dates.daycount_actual_365_fixed(start_date=start_date, end_date=maturity_date, dtype=self._dtype)\n    else:\n        daycount_fraction = tf.convert_to_tensor(daycount_fraction, self._dtype)\n    dfstart = self.get_discount_factor(start_date)\n    dfmaturity = self.get_discount_factor(maturity_date)\n    return (dfstart / dfmaturity - 1.0) / daycount_fraction", "docstring": "Returns the simply accrued forward rate between [start_dt, maturity_dt].\n\nArgs:\nstart_date: A `DateTensor` specifying the start of the accrual period\nfor the forward rate.\nmaturity_date: A `DateTensor` specifying the end of the accrual period\nfor the forward rate. The shape of `maturity_date` must be the same\nas the shape of the `DateTensor` `start_date`.\ndaycount_fraction: An optional `Tensor` of real dtype specifying the\ntime between `start_date` and `maturity_date` in years computed using\nthe forward rate's day count basis. The shape of the input should be\nthe same as that of `start_date` and `maturity_date`.\nDefault value: `None`, in which case the daycount fraction is computed\nusing `ACTUAL_365` convention.\n\nReturns:\nA real tensor of same shape as the inputs containing the simply compounded\nforward rate.", "source": "github-repos"}
{"code": "def launch_external_file(filename: str, raise_if_fails: bool=False) -> None:\n    log.info('Launching external file: {!r}', filename)\n    try:\n        if sys.platform.startswith('linux'):\n            cmdargs = ['xdg-open', filename]\n            subprocess.call(cmdargs)\n        else:\n            os.startfile(filename)\n    except Exception as e:\n        log.critical('Error launching {!r}: error was {}.\\n\\n{}', filename, str(e), traceback.format_exc())\n        if raise_if_fails:\n            raise", "docstring": "Launches a file using the operating system's standard launcher.\n\nArgs:\nfilename: file to launch\nraise_if_fails: raise any exceptions from\n``subprocess.call([\"xdg-open\", filename])`` (Linux)\nor ``os.startfile(filename)`` (otherwise)? If not, exceptions\nare suppressed.", "source": "codesearchnet"}
{"code": "def clean(exclude):\n    pretend = context.get('pretend', False)\n    exclude = (list(exclude) + conf.get('clean.exclude', []))\n    clean_patterns = conf.get('clean.patterns', ['*__pycache__*', '*.py[cod]', '*.swp'])\n    num_files = 0\n    with util.timed_block() as t:\n        files = fs.filtered_walk(conf.proj_path(), clean_patterns, exclude)\n        for path in files:\n            try:\n                num_files += 1\n                if (not isdir(path)):\n                    log.info('  <91>[file] <90>{}', path)\n                    ((not pretend) and os.remove(path))\n                else:\n                    log.info('  <91>[dir]  <90>{}', path)\n                    ((not pretend) and rmtree(path))\n            except OSError:\n                log.info('<33>Failed to remove <90>{}', path)\n    if pretend:\n        msg = 'Would delete <33>{}<32> files. Took <33>{}<32>s'\n    else:\n        msg = 'Deleted <33>{}<32> files in <33>{}<32>s'\n    log.info(msg.format(num_files, t.elapsed_s))", "docstring": "Remove all unnecessary files.\n\nArgs:\npretend (bool):\nIf set to **True**, do not delete any files, just show what would be\ndeleted.\nexclude (list[str]):\nA list of path patterns to exclude from deletion.", "source": "codesearchnet"}
{"code": "def response_data_to_model_instance(self, response_data):\n    response_data['datetime_created'] = dateutil.parser.parse(response_data['datetime_created'])\n    if response_data['datetime_finished']:\n        response_data['datetime_finished'] = dateutil.parser.parse(response_data['datetime_finished'])\n    return super(BaseTaskInstanceManager, self).response_data_to_model_instance(response_data)", "docstring": "Convert response data to a task instance model.\n\nArgs:\nresponse_data (dict): The data from the request's response.\n\nReturns:\n:class:`saltant.models.base_task_instance.BaseTaskInstance`:\nA task instance model instance representing the task\ninstance from the reponse data.", "source": "codesearchnet"}
{"code": "def center_crop(self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n    output_size = size['shortest_edge']\n    return center_crop(image, size=(output_size, output_size), data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Center crop an image to `(size[\"height\"], size[\"width\"])`. If the input size is smaller than `crop_size` along\nany edge, the image is padded with 0's and then center cropped.\n\nArgs:\nimage (`np.ndarray`):\nImage to center crop.\nsize (`Dict[str, int]`):\nSize of the output image in the form `{\"height\": h, \"width\": w}`.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred from the input\nimage.", "source": "github-repos"}
{"code": "def GetOpeningBracket(node):\n    return getattr(node, _NODE_ANNOTATION_PREFIX + 'container_bracket', None)", "docstring": "Get opening bracket value from a node.\n\nArguments:\nnode: the node.\n\nReturns:\nThe opening bracket node or None if it couldn't find one.", "source": "github-repos"}
{"code": "def as_dict(self, verbosity=1, fmt=None, **kwargs):\n    if (fmt == 'abivars'):\n        'Returns a dictionary with the ABINIT variables.'\n        from pymatgen.io.abinit.abiobjects import structure_to_abivars\n        return structure_to_abivars(self, **kwargs)\n    latt_dict = self._lattice.as_dict(verbosity=verbosity)\n    del latt_dict['@module']\n    del latt_dict['@class']\n    d = {'@module': self.__class__.__module__, '@class': self.__class__.__name__, 'charge': self._charge, 'lattice': latt_dict, 'sites': []}\n    for site in self:\n        site_dict = site.as_dict(verbosity=verbosity)\n        del site_dict['lattice']\n        del site_dict['@module']\n        del site_dict['@class']\n        d['sites'].append(site_dict)\n    return d", "docstring": "Dict representation of Structure.\n\nArgs:\nverbosity (int): Verbosity level. Default of 1 includes both\ndirect and cartesian coordinates for all sites, lattice\nparameters, etc. Useful for reading and for insertion into a\ndatabase. Set to 0 for an extremely lightweight version\nthat only includes sufficient information to reconstruct the\nobject.\nfmt (str): Specifies a format for the dict. Defaults to None,\nwhich is the default format used in pymatgen. Other options\ninclude \"abivars\".\n**kwargs: Allow passing of other kwargs needed for certain\nformats, e.g., \"abivars\".\n\nReturns:\nJSON serializable dict representation.", "source": "codesearchnet"}
{"code": "def __init__(self, auth, **kwargs):\n        \n        self.auth = auth\n        self.protocol = kwargs.get('protocol', 'https')\n        self.domain = kwargs.get('domain', 'api.sumologic.com')\n        self.api = kwargs.get('api', '/api/v1')\n        api_path = '%s' % self.api\n        self.url = '%s:\n\n        \n        self._debug_mode = kwargs.get('debug', False)\n        self.log = logging.getLogger(__name__)\n        self.log.addHandler(logging.StreamHandler())\n        self.log.setLevel(get_logging_level(self._debug_mode))", "docstring": "Initializes Client object.\n\nArgs:\nauth (tuple): Authentication object\napi (str): Api endpath", "source": "juraj-google-style"}
{"code": "def remove_send_last_message(self, connection):\n        \n        if connection in self._send_last_message:\n            del self._send_last_message[connection]\n            LOGGER.debug(\"Removed send_last_message function \"\n                         \"for connection %s\", connection)\n        else:\n            LOGGER.warning(\"Attempted to remove send_last_message \"\n                           \"function for connection %s, but no \"\n                           \"send_last_message function was registered\",\n                           connection)", "docstring": "Removes a send_last_message function previously registered\nwith the Dispatcher.\n\nArgs:\nconnection (str): A locally unique identifier provided\nby the receiver of messages.", "source": "juraj-google-style"}
{"code": "def split(node, stack):\n    (node, defined, reaching) = _fix(node)\n    node = store_state(node, reaching, defined, stack)\n    anno.clearanno(node)\n    return node", "docstring": "Carry over the state from the primal to the adjoint.\n\nArgs:\nnode: A module with the primal and adjoint function definitions as returned\nby `reverse_ad`.\nstack: The stack node to use for storing and restoring state.\n\nReturns:\nfunc: A `Module` node with two function definitions containing the primal\nand adjoint respectively.", "source": "codesearchnet"}
{"code": "def gated_grpc_debug_watches(self):\n    return list(self._gated_grpc_debug_watches)", "docstring": "Get the list of debug watches with attribute gated_grpc=True.\n\nSince the server receives `GraphDef` from the debugged runtime, it can only\nreturn such debug watches that it has received so far.\n\nReturns:\nA `list` of `DebugWatch` `namedtuples` representing the debug watches with\ngated_grpc=True. Each `namedtuple` element has the attributes:\n`node_name` as a `str`,\n`output_slot` as an `int`,\n`debug_op` as a `str`.", "source": "github-repos"}
{"code": "def norm(self, limits=None):\n        \n        kwargs = {}\n        if limits is not None:\n            kwargs = {'min': limits[0], 'max': limits[1]}\n        return dim(self, norm, **kwargs)", "docstring": "Unity-based normalization to scale data into 0-1 range.\n\n(values - min) / (max - min)\n\nArgs:\nlimits: tuple of (min, max) defining the normalization range", "source": "juraj-google-style"}
{"code": "def _init_obj_attrs(self, obj, user=False):\n        \n        for attr in obj.__class__._tx_attrs.values():\n\n            if user:\n                \n                attr_name = \"_txa_%s\" % attr.name\n            else:\n                attr_name = attr.name\n\n            if attr.mult in [MULT_ZEROORMORE, MULT_ONEORMORE]:\n                \n                setattr(obj, attr_name, [])\n            elif attr.cls.__name__ in BASE_TYPE_NAMES:\n                \n                if self.auto_init_attributes:\n                    setattr(obj, attr_name,\n                            python_type(attr.cls.__name__)())\n                else:\n                    \n                    if attr.bool_assignment:\n                        \n                        \n                        setattr(obj, attr_name, False)\n                    else:\n                        \n                        \n                        \n                        \n                        setattr(obj, attr_name, None)\n            else:\n                \n                setattr(obj, attr_name, None)", "docstring": "Initialize obj attributes.\nArgs:\nobj(object): A python object to set attributes to.\nuser(bool): If this object is a user object mangle attribute names.", "source": "juraj-google-style"}
{"code": "def conf(self):\n    return self.env.get_template('conf.py.j2').render(metadata=self.metadata, package=self.package)", "docstring": "Generate the Sphinx `conf.py` configuration file\n\nReturns:\n(str): the contents of the `conf.py` file.", "source": "codesearchnet"}
{"code": "def _on_join_leader(self, response):\n    try:\n        group_assignment = self._perform_assignment(response.leader_id, response.group_protocol, response.members)\n    except Exception as e:\n        return Future().failure(e)\n    version = (0 if (self.config['api_version'] < (0, 11, 0)) else 1)\n    request = SyncGroupRequest[version](self.group_id, self._generation.generation_id, self._generation.member_id, [(member_id, (assignment if isinstance(assignment, bytes) else assignment.encode())) for (member_id, assignment) in six.iteritems(group_assignment)])\n    log.debug('Sending leader SyncGroup for group %s to coordinator %s: %s', self.group_id, self.coordinator_id, request)\n    return self._send_sync_group_request(request)", "docstring": "Perform leader synchronization and send back the assignment\nfor the group via SyncGroupRequest\n\nArguments:\nresponse (JoinResponse): broker response to parse\n\nReturns:\nFuture: resolves to member assignment encoded-bytes", "source": "codesearchnet"}
{"code": "def find_common_root(elements):\n    if (not elements):\n        raise UserWarning(\"Can't find common root - no elements suplied.\")\n    root_path = el_to_path_vector(elements.pop())\n    for el in elements:\n        el_path = el_to_path_vector(el)\n        root_path = common_vector_root(root_path, el_path)\n        if (not root_path):\n            raise UserWarning(('Vectors without common root:\\n%s' % str(el_path)))\n    return root_path", "docstring": "Find root which is common for all `elements`.\n\nArgs:\nelements (list): List of double-linked HTMLElement objects.\n\nReturns:\nlist: Vector of HTMLElement containing path to common root.", "source": "codesearchnet"}
{"code": "def _L2LossGrad(op: ops.Operation, grad):\n    return op.inputs[0] * grad", "docstring": "Return the gradients for L2Loss.\n\nArgs:\nop: The L2LossOp for which we need to generate gradients.\ngrad: Tensor containing a single number.\n\nReturns:\nThe gradient, which is (x * grad).", "source": "github-repos"}
{"code": "def add_query(self, query, join_with=AND):\n    if (not isinstance(query, DomainCondition)):\n        query = DomainCondition.from_tuple(query)\n    if len(self.query):\n        self.query.append(join_with)\n    self.query.append(query)", "docstring": "Join a new query to existing queries on the stack.\n\nArgs:\nquery (tuple or list or DomainCondition): The condition for the\nquery. If a ``DomainCondition`` object is not provided, the\ninput should conform to the interface defined in\n:func:`~.domain.DomainCondition.from_tuple`.\njoin_with (str): The join string to apply, if other queries are\nalready on the stack.", "source": "codesearchnet"}
{"code": "def cleanup(context):\n    \n    for name in 'work_dir', 'artifact_dir', 'task_log_dir':\n        path = context.config[name]\n        if os.path.exists(path):\n            log.debug(\"rm({})\".format(path))\n            rm(path)\n        makedirs(path)", "docstring": "Clean up the work_dir and artifact_dir between task runs, then recreate.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.", "source": "juraj-google-style"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    sep = [self.sep_token_id]\n    cls = [self.cls_token_id]\n    if token_ids_1 is None:\n        return cls + token_ids_0 + sep\n    return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. An FNet sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def InjectString(self, codestring, wait_for_completion=True):\n    \n    if self.inferior.is_running and self.inferior.gdb.IsAttached():\n      try:\n        self.inferior.gdb.InjectString(\n            self.inferior.position,\n            codestring,\n            wait_for_completion=wait_for_completion)\n      except RuntimeError:\n        exc_type, exc_value, exc_traceback = sys.exc_info()\n        traceback.print_exception(exc_type, exc_value, exc_traceback)\n    else:\n      logging.error('Not attached to any process.')", "docstring": "Try to inject python code into current thread.\n\nArgs:\ncodestring: Python snippet to execute in inferior. (may contain newlines)\nwait_for_completion: Block until execution of snippet has completed.", "source": "juraj-google-style"}
{"code": "def read_from(fpath, verbose=None, aslines=False, strict=True, n=None, errors='replace'):\n    r\n    if n is None:\n        n = __READ_TAIL_N__\n    verbose = _rectify_verb_read(verbose)\n    if verbose:\n        print('[util_io] * Reading text file: %r ' % util_path.tail(fpath, n=n))\n    try:\n        if not util_path.checkpath(fpath, verbose=verbose, n=n):\n            raise IOError('[io] * FILE DOES NOT EXIST!')\n        \n        with open(fpath, 'rb') as file_:\n            if aslines:\n                \n                if six.PY2:\n                    \n                    \n                    text = [line.decode('utf8', errors=errors)\n                            for line in file_.readlines()]\n                else:\n                    text = [line.decode('utf8', errors=errors)\n                            for line in file_.readlines()]\n                    \n            else:\n                \n                if six.PY2:\n                    text = file_.read().decode('utf8', errors=errors)\n                else:\n                    \n                    text = file_.read().decode('utf8', errors=errors)\n        return text\n    except IOError as ex:\n        from utool import util_dbg\n        if verbose or strict:\n            util_dbg.printex(ex, ' * Error reading fpath=%r' %\n                             util_path.tail(fpath, n=n), '[io]')\n        if strict:\n            raise", "docstring": "r\"\"\" Reads text from a file. Automatically returns utf8.\n\nArgs:\nfpath (str): file path\naslines (bool): if True returns list of lines\nverbose (bool): verbosity flag\n\nReturns:\nstr: text from fpath (this is unicode)\n\nIgnore:\nx = b'''/whaleshark_003_fors\\xc3\\xb8g.wmv\" />\\r\\n'''\nut.writeto('foo.txt', x)\ny = ut.readfrom('foo.txt')\ny.encode('utf8') == x", "source": "juraj-google-style"}
{"code": "def update_if_absent(self, **kwargs):\n    for arg in kwargs:\n        if hasattr(self, arg):\n            if (getattr(self, arg) is None):\n                setattr(self, arg, kwargs[arg])\n        else:\n            raise ValueError(('Invalid RayParams parameter in update_if_absent: %s' % arg))\n    self._check_usage()", "docstring": "Update the settings when the target fields are None.\n\nArgs:\nkwargs: The keyword arguments to set corresponding fields.", "source": "codesearchnet"}
{"code": "def libdmtx_function(fname, restype, *args):\n    \n    prototype = CFUNCTYPE(restype, *args)\n    return prototype((fname, load_libdmtx()))", "docstring": "Returns a foreign function exported by `libdmtx`.\n\nArgs:\nfname (:obj:`str`): Name of the exported function as string.\nrestype (:obj:): Return type - one of the `ctypes` primitive C data\ntypes.\n*args: Arguments - a sequence of `ctypes` primitive C data types.\n\nReturns:\ncddl.CFunctionType: A wrapper around the function.", "source": "juraj-google-style"}
{"code": "def is_module_function(obj, prop):\n    \n    python_version = sys.version_info[0]\n    if python_version == 3:\n        unicode = str\n\n    if prop and (isinstance(prop, str) or isinstance(prop, unicode)): \n        if prop in dir(obj):\n            if (\n                    isinstance(getattr(obj, prop), FunctionType)\n                    or isinstance(getattr(obj, prop), BuiltinFunctionType)\n                    or inspect.ismethod(getattr(obj, prop))\n            ):\n            \n            \n                return True\n            else:\n                ErrorHandler.prop_is_func_error(obj, prop)\n        else:\n            ErrorHandler.prop_in_obj_error(obj, prop)\n    elif prop:\n        ErrorHandler.prop_type_error(prop)\n    return False", "docstring": "Checking and setting type to MODULE_FUNCTION\nArgs:\nobj: ModuleType\nprop: FunctionType\nReturn:\nBoolean\nRaise:\nprop_type_error: When the type of prop is not valid\nprop_in_obj_error: When prop is not in the obj(module/class)\nprop_is_func_error: When prop is not a callable stuff", "source": "juraj-google-style"}
{"code": "def _kl_von_mises_von_mises(d1, d2, name=None):\n  \n  with tf.name_scope(name or \"kl_von_mises_von_mises\"):\n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    \n    i0e_concentration1 = tf.math.bessel_i0e(d1.concentration)\n    i1e_concentration1 = tf.math.bessel_i1e(d1.concentration)\n    i0e_concentration2 = tf.math.bessel_i0e(d2.concentration)\n    return ((d2.concentration - d1.concentration) +\n            tf.math.log(i0e_concentration2 / i0e_concentration1) +\n            (d1.concentration - d2.concentration * tf.cos(d1.loc - d2.loc)) *\n            (i1e_concentration1 / i0e_concentration1))", "docstring": "Batchwise KL divergence KL(d1 || d2) with d1 and d2 von Mises.\n\nArgs:\nd1: instance of a von Mises distribution object.\nd2: instance of a a von Mises distribution object.\nname: (optional) Name to use for created operations.\ndefault is \"kl_von_mises_von_mises\".\n\nReturns:\nBatchwise KL(d1 || d2)", "source": "juraj-google-style"}
{"code": "def normalize_name(name, overrides=None):\n    \n\n    normalized_name = name.title()\n\n    if overrides:\n        override_map = dict([(name.title(), name) for name in overrides])\n\n        return override_map.get(normalized_name, normalized_name)\n    else:\n        return normalized_name", "docstring": "Normalize the key name to title case.\n\nFor example, ``normalize_name('content-id')`` will become ``Content-Id``\n\nArgs:\nname (str): The name to normalize.\noverrides (set, sequence): A set or sequence containing keys that\nshould be cased to themselves. For example, passing\n``set('WARC-Type')`` will normalize any key named \"warc-type\" to\n``WARC-Type`` instead of the default ``Warc-Type``.\n\nReturns:\nstr", "source": "juraj-google-style"}
{"code": "def wait(self, **kwargs):\n    return self.client.api.wait(self.id, **kwargs)", "docstring": "Block until the container stops, then return its exit code. Similar to\nthe ``docker wait`` command.\n\nArgs:\ntimeout (int): Request timeout\ncondition (str): Wait until a container state reaches the given\ncondition, either ``not-running`` (default), ``next-exit``,\nor ``removed``\n\nReturns:\n(dict): The API's response as a Python dictionary, including\nthe container's exit code under the ``StatusCode`` attribute.\n\nRaises:\n:py:class:`requests.exceptions.ReadTimeout`\nIf the timeout is exceeded.\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"}
{"code": "class IncSlidingStdevTracker(IncStdevTracker):\n\n    def __init__(self, window_size):\n        super().__init__(window_mode=WindowMode.SLIDING, window_size=window_size)", "docstring": "Sliding window standard deviation tracker using incremental calculation.\n\nArgs:\nwindow_size: The size of the sliding window.", "source": "github-repos"}
{"code": "def perform(self, agent_indices, observ):\n    \n    with tf.name_scope('perform/'):\n      observ = self._observ_filter.transform(observ)\n      if self._last_state is None:\n        state = None\n      else:\n        state = tools.nested.map(\n            lambda x: tf.gather(x, agent_indices), self._last_state)\n      with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):\n        output = self._network(\n            observ[:, None], tf.ones(observ.shape[0]), state)\n      action = tf.cond(\n          self._is_training, output.policy.sample, output.policy.mode)\n      logprob = output.policy.log_prob(action)[:, 0]\n      \n      summary = tf.cond(self._should_log, lambda: tf.summary.merge([\n          tf.summary.histogram('mode', output.policy.mode()[:, 0]),\n          tf.summary.histogram('action', action[:, 0]),\n          tf.summary.histogram('logprob', logprob)]), str)\n      \n      if self._last_state is None:\n        assign_state = tf.no_op()\n      else:\n        assign_state = utility.assign_nested_vars(\n            self._last_state, output.state, agent_indices)\n      remember_last_action = tf.scatter_update(\n          self._last_action, agent_indices, action[:, 0])\n      policy_params = tools.nested.filter(\n          lambda x: isinstance(x, tf.Tensor), output.policy.parameters)\n      assert policy_params, 'Policy has no parameters to store.'\n      remember_last_policy = tools.nested.map(\n          lambda var, val: tf.scatter_update(var, agent_indices, val[:, 0]),\n          self._last_policy, policy_params, flatten=True)\n      with tf.control_dependencies((\n          assign_state, remember_last_action) + remember_last_policy):\n        return action[:, 0], tf.identity(summary)", "docstring": "Compute batch of actions and a summary for a batch of observation.\n\nArgs:\nagent_indices: Tensor containing current batch indices.\nobserv: Tensor of a batch of observations for all agents.\n\nReturns:\nTuple of action batch tensor and summary tensor.", "source": "juraj-google-style"}
{"code": "def check_for_lane_permission(self):\n    if self.current.lane_permission:\n        log.debug(('HAS LANE PERM: %s' % self.current.lane_permission))\n        perm = self.current.lane_permission\n        if (not self.current.has_permission(perm)):\n            raise HTTPError(403, (\"You don't have required lane permission: %s\" % perm))\n    if self.current.lane_relations:\n        context = self.get_pool_context()\n        log.debug(('HAS LANE RELS: %s' % self.current.lane_relations))\n        try:\n            cond_result = eval(self.current.lane_relations, context)\n        except:\n            log.exception(('CONDITION EVAL ERROR : %s || %s' % (self.current.lane_relations, context)))\n            raise\n        if (not cond_result):\n            log.debug(('LANE RELATION ERR: %s %s' % (self.current.lane_relations, context)))\n            raise HTTPError(403, (\"You aren't qualified for this lane: %s\" % self.current.lane_relations))", "docstring": "One or more permissions can be associated with a lane\nof a workflow. In a similar way, a lane can be\nrestricted with relation to other lanes of the workflow.\n\nThis method called on lane changes and checks user has\nrequired permissions and relations.\n\nRaises:\nHTTPForbidden: if the current user hasn't got the\nrequired permissions and proper relations", "source": "codesearchnet"}
{"code": "def restore(self, state):\n    selector = DataStreamSelector.FromString(state.get(u'selector'))\n    if (selector != self.selector):\n        raise ArgumentError('Attempted to restore a BufferedStreamWalker with a different selector', selector=self.selector, serialized_data=state)\n    self.seek(state.get(u'offset'), target='offset')", "docstring": "Restore a previous state of this stream walker.\n\nRaises:\nArgumentError: If the state refers to a different selector or the\noffset is invalid.", "source": "codesearchnet"}
{"code": "def convert_sigmoid(params, w_name, scope_name, inputs, layers, weights, names):\n    print('Converting sigmoid ...')\n    if (names == 'short'):\n        tf_name = ('SIGM' + random_string(4))\n    elif (names == 'keep'):\n        tf_name = w_name\n    else:\n        tf_name = (w_name + str(random.random()))\n    sigmoid = keras.layers.Activation('sigmoid', name=tf_name)\n    layers[scope_name] = sigmoid(layers[inputs[0]])", "docstring": "Convert sigmoid layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"}
{"code": "def scatter_div(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta)\n    return gen_state_ops.scatter_div(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)", "docstring": "Divide this variable by `tf.IndexedSlices`.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to divide this variable by.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nA `Tensor` that will hold the new value of this variable after\nthe scattered division has completed.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"}
{"code": "def balance(self, as_of=None, raw=False, leg_query=None, **kwargs):\n        \n        balances = [\n            account.simple_balance(as_of=as_of, raw=raw, leg_query=leg_query, **kwargs)\n            for account in self.get_descendants(include_self=True)\n        ]\n        return sum(balances, Balance())", "docstring": "Get the balance for this account, including child accounts\n\nArgs:\nas_of (Date): Only include transactions on or before this date\nraw (bool): If true the returned balance should not have its sign\nadjusted for display purposes.\nkwargs (dict): Will be used to filter the transaction legs\n\nReturns:\nBalance\n\nSee Also:\n:meth:`simple_balance()`", "source": "juraj-google-style"}
{"code": "def compare_count(self):\n    if (self.query.options['count'] is not None):\n        count_opt = int(self.query.options['count'])\n        self._cache_at_least((count_opt + 1))\n        return cmp(len(self._result_cache), count_opt)\n    if (self.query.options['minimum'] is not None):\n        min_opt = int(self.query.options['minimum'])\n        if (not self._cache_at_least(min_opt)):\n            return (- 1)\n    if (self.query.options['maximum'] is not None):\n        max_opt = int(self.query.options['maximum'])\n        if self._cache_at_least((max_opt + 1)):\n            return 1\n    if (self.query.options['between'] is not None):\n        between = self.query.options['between']\n        (min_opt, max_opt) = (between[0], between[(- 1)])\n        if (not self._cache_at_least(min_opt)):\n            return (- 1)\n        if self._cache_at_least((max_opt + 1)):\n            return 1\n        return 0\n    return 0", "docstring": "Returns how the result count compares to the query options.\n\nThe return value is negative if too few results were found, zero if enough were found, and\npositive if too many were found.\n\nReturns:\nint: -1, 0, or 1.", "source": "codesearchnet"}
{"code": "def get_devices(ads, **kwargs):\n\n    def _get_device_filter(ad):\n        for k, v in kwargs.items():\n            if not hasattr(ad, k):\n                return False\n            elif getattr(ad, k) != v:\n                return False\n        return True\n    filtered = filter_devices(ads, _get_device_filter)\n    if not filtered:\n        raise Error('Could not find a target device that matches condition: %s.' % kwargs)\n    else:\n        return filtered", "docstring": "Finds a list of AndroidDevice instance from a list that has specific\nattributes of certain values.\n\nExample:\nget_devices(android_devices, label='foo', phone_number='1234567890')\nget_devices(android_devices, model='angler')\n\nArgs:\nads: A list of AndroidDevice instances.\nkwargs: keyword arguments used to filter AndroidDevice instances.\n\nReturns:\nA list of target AndroidDevice instances.\n\nRaises:\nError: No devices are matched.", "source": "github-repos"}
{"code": "def stack1d(*points):\n    result = np.empty((2, len(points)), order='F')\n    for (index, point) in enumerate(points):\n        result[(:, index)] = point\n    return result", "docstring": "Fill out the columns of matrix with a series of points.\n\nThis is because ``np.hstack()`` will just make another 1D vector\nout of them and ``np.vstack()`` will put them in the rows.\n\nArgs:\npoints (Tuple[numpy.ndarray, ...]): Tuple of 1D points (i.e.\narrays with shape ``(2,)``.\n\nReturns:\nnumpy.ndarray: The array with each point in ``points`` as its\ncolumns.", "source": "codesearchnet"}
{"code": "def calculate_bv_sum_unordered(site, nn_list, scale_factor=1):\n    bvsum = 0\n    for (specie1, occu1) in site.species.items():\n        el1 = Element(specie1.symbol)\n        for (nn, dist) in nn_list:\n            for (specie2, occu2) in nn.species.items():\n                el2 = Element(specie2.symbol)\n                if (((el1 in ELECTRONEG) or (el2 in ELECTRONEG)) and (el1 != el2)):\n                    r1 = BV_PARAMS[el1]['r']\n                    r2 = BV_PARAMS[el2]['r']\n                    c1 = BV_PARAMS[el1]['c']\n                    c2 = BV_PARAMS[el2]['c']\n                    R = ((r1 + r2) - (((r1 * r2) * ((sqrt(c1) - sqrt(c2)) ** 2)) / ((c1 * r1) + (c2 * r2))))\n                    vij = exp(((R - (dist * scale_factor)) / 0.31))\n                    bvsum += (((occu1 * occu2) * vij) * (1 if (el1.X < el2.X) else (- 1)))\n    return bvsum", "docstring": "Calculates the BV sum of a site for unordered structures.\n\nArgs:\nsite:\nThe site\nnn_list:\nList of nearest neighbors in the format [(nn_site, dist), ...].\nscale_factor:\nA scale factor to be applied. This is useful for scaling distance,\nesp in the case of calculation-relaxed structures which may tend\nto under (GGA) or over bind (LDA).", "source": "codesearchnet"}
{"code": "def text(self, value):\n    self._text = value\n    self.timestamps.edited = datetime.datetime.utcnow()\n    self.touch(True)", "docstring": "Set the text value.\n\nArgs:\nvalue (str): Text value.", "source": "codesearchnet"}
{"code": "def update_parser(self, parser):\n        \n\n        self._parser = parser\n        ini_str = argparse_to_ini(parser)\n        configp = configparser.ConfigParser(allow_no_value=True)\n        configp.read_dict(self._config)\n        configp.read_string(ini_str)\n        self._config.update(\n            {s: dict(configp.items(s))\n             for s in configp.sections()}\n        )", "docstring": "Update config dictionary with declared arguments in an argparse.parser\nNew variables will be created, and existing ones overridden.\n\nArgs:\nparser (argparse.ArgumentParser): parser to read variables from", "source": "juraj-google-style"}
{"code": "def get_samples_live(self, sensor_id, last=None):\n    url = 'https:\n    headers = self.__gen_headers()\n    headers['Content-Type'] = 'application/json'\n    params = {'sensorId': sensor_id}\n    if last:\n        params['last'] = last\n    url = self.__append_url_params(url, params)\n    r = requests.get(url, headers=headers)\n    return r.json()", "docstring": "Get recent samples, one sample per second for up to the last 2 minutes.\n\nArgs:\nsensor_id (string): hexadecimal id of the sensor to query, e.g.\n``0x0013A20040B65FAD``\nlast (string): starting range, as ISO8601 timestamp\n\nReturns:\nlist: dictionary objects containing sample data", "source": "codesearchnet"}
{"code": "def import_global(name, modules=None, exceptions=DummyException, locals_=None, globals_=None, level=(- 1)):\n    frame = None\n    try:\n        if ((locals_ is None) or (globals_ is None)):\n            import inspect\n            frame = inspect.stack()[1][0]\n            if (locals_ is None):\n                locals_ = frame.f_locals\n            if (globals_ is None):\n                globals_ = frame.f_globals\n        try:\n            name = name.split('.')\n            if (not name[0]):\n                name = name[1:]\n                level = 1\n            module = __import__(name=(name[0] or '.'), globals=globals_, locals=locals_, fromlist=name[1:], level=max(level, 0))\n            try:\n                for attr in name[1:]:\n                    module = getattr(module, attr)\n            except AttributeError:\n                raise ImportError(('No module named ' + '.'.join(name)))\n            if (not modules):\n                modules = getattr(module, '__all__', dir(module))\n            else:\n                modules = set(modules).intersection(dir(module))\n            for k in set(dir(module)).intersection(modules):\n                if (k and (k[0] != '_')):\n                    globals_[k] = getattr(module, k)\n        except exceptions as e:\n            return e\n    finally:\n        del name, modules, exceptions, locals_, globals_, frame", "docstring": "Import the requested items into the global scope\n\nWARNING! this method _will_ overwrite your global scope\nIf you have a variable named \"path\" and you call import_global('sys')\nit will be overwritten with sys.path\n\nArgs:\nname (str): the name of the module to import, e.g. sys\nmodules (str): the modules to import, use None for everything\nexception (Exception): the exception to catch, e.g. ImportError\n`locals_`: the `locals()` method (in case you need a different scope)\n`globals_`: the `globals()` method (in case you need a different scope)\nlevel (int): the level to import from, this can be used for\nrelative imports", "source": "codesearchnet"}
{"code": "def get_content(url, headers={}, decoded=True):\n    logging.debug(('get_content: %s' % url))\n    req = request.Request(url, headers=headers)\n    if cookies:\n        cookies.add_cookie_header(req)\n        req.headers.update(req.unredirected_hdrs)\n    response = urlopen_with_retry(req)\n    data = response.read()\n    content_encoding = response.getheader('Content-Encoding')\n    if (content_encoding == 'gzip'):\n        data = ungzip(data)\n    elif (content_encoding == 'deflate'):\n        data = undeflate(data)\n    if decoded:\n        charset = match1(response.getheader('Content-Type', ''), 'charset=([\\\\w-]+)')\n        if (charset is not None):\n            data = data.decode(charset, 'ignore')\n        else:\n            data = data.decode('utf-8', 'ignore')\n    return data", "docstring": "Gets the content of a URL via sending a HTTP GET request.\n\nArgs:\nurl: A URL.\nheaders: Request headers used by the client.\ndecoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.\n\nReturns:\nThe content as a string.", "source": "codesearchnet"}
{"code": "def get_remote_info(url_id):\n    \n    try:\n        data = _send_request(url_id)\n    except Exception as e:\n        sys.stderr.write(\"Seeder GET error: \")  \n        sys.stderr.write(str(e.message))\n        return None\n\n    return _convert_to_wakat_format(data)", "docstring": "Download data and convert them to dict used in frontend.\n\nArgs:\nurl_id (str): ID used as identification in Seeder.\n\nReturns:\ndict: Dict with data for frontend or None in case of error.", "source": "juraj-google-style"}
{"code": "def write_compounds(self, stream, compounds, properties=None):\n    self._write_entries(stream, compounds, self.convert_compound_entry, properties)", "docstring": "Write iterable of compounds as YAML object to stream.\n\nArgs:\nstream: File-like object.\ncompounds: Iterable of compound entries.\nproperties: Set of compound properties to output (or None to output\nall).", "source": "codesearchnet"}
{"code": "def remove_species(self, species):\n    new_sites = []\n    species = [get_el_sp(sp) for sp in species]\n    for site in self._sites:\n        new_sp_occu = {sp: amt for (sp, amt) in site.species.items() if (sp not in species)}\n        if (len(new_sp_occu) > 0):\n            new_sites.append(Site(new_sp_occu, site.coords, properties=site.properties))\n    self._sites = new_sites", "docstring": "Remove all occurrences of a species from a molecule.\n\nArgs:\nspecies: Species to remove.", "source": "codesearchnet"}
{"code": "def make_new(self, rev):\n        \n        \n        return self.vcs.make_rev_options(rev, extra_args=self.extra_args)", "docstring": "Make a copy of the current instance, but with a new rev.\n\nArgs:\nrev: the name of the revision for the new object.", "source": "juraj-google-style"}
{"code": "def _assign_stablehlo_quantization_config_or_populate_default(self, args):\n    if self.experimental_stablehlo_quantizer_config is not None and Optimize.DEFAULT not in self.optimizations:\n        args['quantization_config'] = self.experimental_stablehlo_quantizer_config\n    elif Optimize.DEFAULT in self.optimizations and self.representative_dataset:\n        if len(self._saved_model_exported_names) != 1:\n            raise ValueError('StableHLO quantizer is only supported when converting from a SavedModel with one signature key.')\n        signature_key = self._saved_model_exported_names[0]\n        tfrecord_file_path = tempfile.mkstemp(suffix='.tfrecord', prefix=signature_key)[1]\n        rd.TfRecordRepresentativeDatasetSaver({signature_key: tfrecord_file_path}).save({signature_key: self.representative_dataset()})\n        quantization_config = qc.QuantizationConfig(static_range_ptq_preset=qc.StaticRangePtqPreset(representative_datasets=[qc.RepresentativeDatasetConfig(tf_record=qc.TfRecordFile(path=tfrecord_file_path))], enable_per_channel_quantized_weight=True, enable_full_int_quantization=True), pipeline_config=qc.PipelineConfig(unpack_quantized_types=False))\n        args['quantization_config'] = quantization_config\n    else:\n        raise ValueError('StableHLO quantizer only supports static-range and weight-only PTQ.')", "docstring": "Assigns `QuantizationConfig` to `args` or populate default.\n\nArgs:\nargs: Dictionary of argument names and associated values.", "source": "github-repos"}
{"code": "def from_control_flow_context_def(context_def, import_scope=None):\n    if context_def.HasField('cond_ctxt'):\n        return CondContext.from_proto(context_def.cond_ctxt, import_scope=import_scope)\n    if context_def.HasField('while_ctxt'):\n        return WhileContext.from_proto(context_def.while_ctxt, import_scope=import_scope)\n    raise NotImplementedError('Unknown ControlFlowContextDef field: %s' % context_def.WhichOneof('ctxt'))", "docstring": "Deserializes `context_def` into the appropriate ControlFlowContext.\n\nArgs:\ncontext_def: ControlFlowContextDef proto\nimport_scope: Optional `string`. Name scope to add.\n\nReturns:\nA ControlFlowContext subclass", "source": "github-repos"}
{"code": "def CreateKey(self, private_key=None):\n        \n        if private_key is None:\n            private_key = bytes(Random.get_random_bytes(32))\n\n        key = KeyPair(priv_key=private_key)\n        self._keys[key.PublicKeyHash.ToBytes()] = key\n        return key", "docstring": "Create a KeyPair\n\nArgs:\nprivate_key (iterable_of_ints): (optional) 32 byte private key\n\nReturns:\nKeyPair: a KeyPair instance", "source": "juraj-google-style"}
{"code": "def read(self):\n    if self._cache:\n        img = self._cache.get(self._position)\n        if (img is not None):\n            ret = True\n        else:\n            if (self._position != self._get_real_position()):\n                self._set_real_position(self._position)\n            (ret, img) = self._vcap.read()\n            if ret:\n                self._cache.put(self._position, img)\n    else:\n        (ret, img) = self._vcap.read()\n    if ret:\n        self._position += 1\n    return img", "docstring": "Read the next frame.\n\nIf the next frame have been decoded before and in the cache, then\nreturn it directly, otherwise decode, cache and return it.\n\nReturns:\nndarray or None: Return the frame if successful, otherwise None.", "source": "codesearchnet"}
{"code": "def send(self, message, socket_):\n        \n        if not socket_:\n            raise TensorForceError(\"No socket given in call to `send`!\")\n        elif not isinstance(message, dict):\n            raise TensorForceError(\"Message to be sent must be a dict!\")\n        message = msgpack.packb(message)\n        len_ = len(message)\n        \n        socket_.send(bytes(\"{:08d}\".format(len_), encoding=\"ascii\") + message)", "docstring": "Sends a message (dict) to the socket. Message consists of a 8-byte len header followed by a msgpack-numpy\nencoded dict.\n\nArgs:\nmessage: The message dict (e.g. {\"cmd\": \"reset\"})\nsocket_: The python socket object to use.", "source": "juraj-google-style"}
{"code": "def Add(self, file_desc_proto):\n    \n    proto_name = file_desc_proto.name\n    if proto_name not in self._file_desc_protos_by_file:\n      self._file_desc_protos_by_file[proto_name] = file_desc_proto\n    elif self._file_desc_protos_by_file[proto_name] != file_desc_proto:\n      raise DescriptorDatabaseConflictingDefinitionError(\n          '%s already added, but with different descriptor.' % proto_name)\n\n    \n    package = file_desc_proto.package\n    for message in file_desc_proto.message_type:\n      self._file_desc_protos_by_symbol.update(\n          (name, file_desc_proto) for name in _ExtractSymbols(message, package))\n    for enum in file_desc_proto.enum_type:\n      self._file_desc_protos_by_symbol[\n          '.'.join((package, enum.name))] = file_desc_proto\n    for extension in file_desc_proto.extension:\n      self._file_desc_protos_by_symbol[\n          '.'.join((package, extension.name))] = file_desc_proto\n    for service in file_desc_proto.service:\n      self._file_desc_protos_by_symbol[\n          '.'.join((package, service.name))] = file_desc_proto", "docstring": "Adds the FileDescriptorProto and its types to this database.\n\nArgs:\nfile_desc_proto: The FileDescriptorProto to add.\nRaises:\nDescriptorDatabaseConflictingDefinitionError: if an attempt is made to\nadd a proto with the same name but different definition than an\nexisiting proto in the database.", "source": "juraj-google-style"}
{"code": "def _generate_pickle_name(gt):\n    grammar_textfile_name = os.path.basename(gt)\n    head, tail = os.path.splitext(grammar_textfile_name)\n    if tail == '.txt':\n        tail = ''\n    cache_dir = user_cache_dir(appname='YAPF', appauthor='Google', version=yapf_version)\n    return cache_dir + os.sep + head + tail + '-py' + '.'.join(map(str, sys.version_info)) + '.pickle'", "docstring": "Get the filepath to write a pickle file to\ngiven the path of a grammar textfile.\n\nThe returned filepath should be in a user-specific cache directory.\n\nArgs:\ngt (str): path to grammar text file\n\nReturns:\nstr: path to pickle file", "source": "github-repos"}
{"code": "def to_json(self):\n    return {'name': self.name, 'segments': [segment.to_json() for segment in self.segments], 'meta': self.meta}", "docstring": "Converts track to a JSON serializable format\n\nReturns:\nMap with the name, and segments of the track.", "source": "codesearchnet"}
{"code": "def traverse_pagination(response, endpoint, content_filter_query, query_params):\n        \n        results = response.get('results', [])\n\n        page = 1\n        while response.get('next'):\n            page += 1\n            response = endpoint().post(content_filter_query, **dict(query_params, page=page))\n            results += response.get('results', [])\n\n        return results", "docstring": "Traverse a paginated API response and extracts and concatenates \"results\" returned by API.\n\nArguments:\nresponse (dict): API response object.\nendpoint (Slumber.Resource): API endpoint object.\ncontent_filter_query (dict): query parameters used to filter catalog results.\nquery_params (dict): query parameters used to paginate results.\n\nReturns:\nlist: all the results returned by the API.", "source": "juraj-google-style"}
{"code": "def _create_non_scalar_select(main_expr: _evaluation.ExpressionNode, other_expr: _evaluation.ExpressionNode, main_result: _sql_data_types.StandardSqlExpression, other_result: _sql_data_types.StandardSqlExpression, collection_check_func_name: str, sql_data_type: _sql_data_types.StandardSqlDataType, sql_alias: str):\n    if isinstance(other_expr, _evaluation.LiteralNode):\n        expression = _build_main_expr(main_expr)\n        sql_expr = f'ARRAY_CONTAINS({expression}, {other_expr})'\n        return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_expr, _sql_data_type=sql_data_type, _sql_alias=sql_alias), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK)\n    nested_query = f'ARRAY({other_result})' if isinstance(main_expr, _evaluation.LiteralNode) else f'ARRAY_AGG({main_result.sql_alias}) FROM ({main_result})'\n    sql_expr = f'ARRAY_EXCEPT((SELECT ARRAY({main_result.sql_alias})), (SELECT {nested_query}))'\n    return _sql_data_types.Select(select_part=_sql_data_types.FunctionCall(name=collection_check_func_name, params=[_sql_data_types.RawExpression(sql_expr, _sql_data_type=_sql_data_types.Int64), 'x -> x IS NOT NULL'], _sql_data_type=sql_data_type, _sql_alias=sql_alias), from_part=f'(SELECT {main_result.as_operand()})', sql_dialect=_sql_data_types.SqlDialect.SPARK)", "docstring": "Construct a Spark SQL select statement for non-scalar values.\n\nArgs:\nmain_expr: The primary (either left or right) expression being evaluated.\nother_expr: The secondary (opposite of main) expression.\nmain_result: The result of evaluating the main expression.\nother_result: The result of evaluating the other expression.\ncollection_check_func_name: The function name for collection checking\n('EXISTS' or 'NOT EXISTS').\nsql_data_type: The SQL data type for the result.\nsql_alias: The SQL alias for the result.\n\nReturns:\nA compiled Spark SQL select statement.", "source": "github-repos"}
{"code": "def install_bootstrapped_files(nb_path=None, server_config=True, DEBUG=False):\n    \n\n    install_path = None\n    print('Starting hide_code.js install...')\n    current_dir = path.abspath(path.dirname(__file__))\n    config_dirs = j_path.jupyter_config_path()\n    notebook_module_path = Utils.get_notebook_module_dir()\n\n    \n    \n    for dir in config_dirs:\n        custom_dir = path.join(dir, \"custom\")\n        if path.isdir(custom_dir):\n            install_path = custom_dir\n            break\n\n    \n    if install_path == None:\n        print(\"No config directories contain \\\"custom\\\" folder. Trying Jupyter notebook module path...\")\n        install_path = path.join(notebook_module_path, \"static\", \"custom\")\n\n    if nb_path != None:\n        install_path = nb_path\n        print(\"Using argument supplied path: \" + install_path)\n\n    if DEBUG:\n        print(install_path)\n\n    \n    if path.isdir(install_path):\n        shutil.copyfile(path.join(current_dir, \"hide_code.js\"), path.join(install_path, \"hide_code.js\"))\n        print('Copying hide_code.js to ' + install_path)\n\n        \n        print(\"Attempting to configure custom.js to auto-load hide_code.js...\")\n        try:\n            with open(path.join(current_dir, \"auto-load.txt\")) as auto:\n                auto_load_txt = auto.read();\n                auto_loaded = False\n\n                \n                with open(path.join(install_path, \"custom.js\"), 'r') as customJS:\n                    if auto_load_txt in customJS.read():\n                        auto_loaded = True\n                        print(\"Custom.js already configured to auto-load hide_code.js.\")\n\n                if not auto_loaded:  \n                    with open(path.join(install_path, \"custom.js\"), 'a') as customJS:\n                        customJS.write(auto_load_txt)\n                        print(\"Configured custom.js to auto-load hide_code.js.\")\n        except:\n            print(\"Custom.js not in custom directory.\")\n    else:\n        print('Unable to install into ' + install_path)\n        print('Directory doesn\\'t exist.')\n        print('Make sure Jupyter is installed.')\n\n    if server_config:\n        print(\"Attempting to configure auto-loading for hide_code export handlers.\")\n        try:\n            \n            server_cm = ConfigManager(config_dir=j_path.jupyter_config_dir())\n            cfg = server_cm.get('jupyter_notebook_config')\n            server_extensions = (cfg.setdefault('NotebookApp', {})\n                                 .setdefault('server_extensions', [])\n                                 )\n            extension = 'hide_code.hide_code'\n            if extension not in server_extensions:\n                cfg['NotebookApp']['server_extensions'] += [extension]\n                server_cm.update('jupyter_notebook_config', cfg)\n                print('Configured jupyter to auto-load hide_code export handlers.')\n            else:\n                print(\"Jupyter already configured to auto-load export handlers.\")\n        except:\n            print('Unable to install server extension.')", "docstring": "Installs javascript and exporting server extensions in Jupyter notebook.\n\nArgs:\nnb_path (string): Path to notebook module.\nserver_config (boolean): Install exporting server extensions.\nDEBUG (boolean): Verbose mode.", "source": "juraj-google-style"}
{"code": "def accept_prompt(self, text=None, response=None, wait=None):\n        \n\n        with self.driver.accept_modal(\"prompt\", text=text, response=response, wait=wait):\n            yield", "docstring": "Execute the wrapped code, accepting a prompt, optionally responding to the prompt.\n\nArgs:\ntext (str | RegexObject, optional): Text to match against the text in the modal.\nresponse (str, optional): Response to provide to the prompt.\nwait (int | float, optional): Maximum time to wait for the modal to appear after\nexecuting the wrapped code.\n\nRaises:\nModalNotFound: If a modal dialog hasn't been found.", "source": "juraj-google-style"}
{"code": "def format_underline(s, char=\"=\", indents=0):\n    \n\n    n = len(s)\n    ind = \" \" * indents\n    return [\"{}{}\".format(ind, s), \"{}{}\".format(ind, char*n)]", "docstring": "Traces a dashed line below string\n\nArgs:\ns: string\nchar:\nindents: number of leading intenting spaces\n\nReturns: list\n\n>>> print(\"\\\\n\".join(format_underline(\"Life of João da Silva\", \"^\", 2)))\nLife of João da Silva\n^^^^^^^^^^^^^^^^^^^^^", "source": "juraj-google-style"}
{"code": "def valid_file(value):\n    \n    if not value:\n        raise argparse.ArgumentTypeError(\"'' is not a valid file path\")\n    elif not os.path.exists(value):\n        raise argparse.ArgumentTypeError(\n            \"%s is not a valid file path\" % value)\n    elif os.path.isdir(value):\n        raise argparse.ArgumentTypeError(\n            \"%s is a directory, not a regular file\" % value)\n    return value", "docstring": "Check if given file exists and is a regular file.\n\nArgs:\nvalue (str): path to the file.\n\nRaises:\nargparse.ArgumentTypeError: if not valid.\n\nReturns:\nstr: original value argument.", "source": "juraj-google-style"}
{"code": "def validate_variable_name(self, name):\n        \n        if not name:\n            raise SerializerError(\"Variable name is empty\".format(name))\n\n        if name[0] not in PROPERTY_ALLOWED_START:\n            msg = \"Variable name '{}' must starts with a letter\"\n            raise SerializerError(msg.format(name))\n\n        for item in name:\n            if item not in PROPERTY_ALLOWED_CHARS:\n                msg = (\"Invalid variable name '{}': it must only contains \"\n                       \"letters, numbers and '_' character\")\n                raise SerializerError(msg.format(name))\n\n        return True", "docstring": "Validate variable name.\n\nArguments:\nname (string): Property name.\n\nReturns:\nbool: ``True`` if variable name is valid.", "source": "juraj-google-style"}
{"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n    if token_ids_1 is None:\n        return self.prefix_tokens + token_ids_0 + self.suffix_tokens\n    return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. The special tokens depend on calling set_lang.\n\nAn MBART-50 sequence has the following format, where `X` represents the sequence:\n\n- `input_ids` (for encoder) `[src_lang_code] X [eos]`\n- `labels`: (for decoder) `[tgt_lang_code] X [eos]`\n\nBOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a\nseparator.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"}
{"code": "def rapidfire(self, max_nlaunch=(- 1), max_loops=1, sleep_time=5):\n    (num_launched, do_exit, launched) = (0, False, [])\n    for count in range(max_loops):\n        if do_exit:\n            break\n        if (count > 0):\n            time.sleep(sleep_time)\n        tasks = self.fetch_tasks_to_run()\n        if any(((task in launched) for task in tasks)):\n            logger.critical(('numtasks %d already in launched list:\\n%s' % (len(tasks), launched)))\n        tasks = [t for t in tasks if (t not in launched)]\n        if (not tasks):\n            continue\n        for task in tasks:\n            fired = task.start()\n            if fired:\n                launched.append(task)\n                num_launched += 1\n            if (num_launched >= max_nlaunch > 0):\n                logger.info('num_launched >= max_nlaunch, going back to sleep')\n                do_exit = True\n                break\n    self.flow.pickle_dump()\n    return num_launched", "docstring": "Keeps submitting `Tasks` until we are out of jobs or no job is ready to run.\n\nArgs:\nmax_nlaunch: Maximum number of launches. default: no limit.\nmax_loops: Maximum number of loops\nsleep_time: seconds to sleep between rapidfire loop iterations\n\nReturns:\nThe number of tasks launched.", "source": "codesearchnet"}
{"code": "def positions(self, account: str = '') -> List[Position]:\n        \n        if account:\n            return list(self.wrapper.positions[account].values())\n        else:\n            return [v for d in self.wrapper.positions.values()\n                    for v in d.values()]", "docstring": "List of positions for the given account,\nor of all accounts if account is left blank.\n\nArgs:\naccount: If specified, filter for this account name.", "source": "juraj-google-style"}
{"code": "def delete_template(self, template_id):\n    url = self.TEMPLATE_DELETE_URL\n    request = self._get_request()\n    response = request.post((url + template_id), get_json=False)\n    return response", "docstring": "Deletes the specified template\n\nArgs:\n\ntemplate_id (str): The id of the template to delete\n\nReturns:\nA status code", "source": "codesearchnet"}
{"code": "def Update(self, other, callback):\n    \n    self.conditions.update(other.conditions)\n    self._Register(other.conditions, callback)", "docstring": "Adds existing triggers to this set, optionally rebuilding the registry.\n\nUsed to aggregate trigger methods from Probes to Methods to Checks.\n\nArgs:\nother: Another Triggers object.\ncallback: Registers all the updated triggers to the specified function.", "source": "juraj-google-style"}
{"code": "def iplot_state_hinton(rho, figsize=None):\n    \n\n    \n    html_template = Template()\n\n    \n    javascript_template = Template()\n    rho = _validate_input_state(rho)\n    if figsize is None:\n        options = {}\n    else:\n        options = {'width': figsize[0], 'height': figsize[1]}\n\n    \n    div_number = str(time.time())\n    div_number = re.sub('[.]', '', div_number)\n\n    \n    real = []\n    imag = []\n    for xvalue in rho:\n        row_real = []\n        col_imag = []\n\n        for value_real in xvalue.real:\n            row_real.append(float(value_real))\n        real.append(row_real)\n\n        for value_imag in xvalue.imag:\n            col_imag.append(float(value_imag))\n        imag.append(col_imag)\n\n    html = html_template.substitute({\n        'divNumber': div_number\n    })\n\n    javascript = javascript_template.substitute({\n        'divNumber': div_number,\n        'executions': [{'data': real}, {'data': imag}],\n        'options': options\n    })\n\n    display(HTML(html + javascript))", "docstring": "Create a hinton representation.\n\nGraphical representation of the input array using a 2D city style\ngraph (hinton).\n\nArgs:\nrho (array): Density matrix\nfigsize (tuple): Figure size in pixels.", "source": "juraj-google-style"}
{"code": "def __init__(self, obj):\n        \n        if isinstance(obj, Stream) and obj.stream_dict.get(\"/Subtype\") != \"/Image\":\n            raise TypeError(\"can't construct PdfImage from non-image\")\n        self.obj = obj", "docstring": "Construct a PDF image from a Image XObject inside a PDF\n\n``pim = PdfImage(page.Resources.XObject['/ImageNN'])``\n\nArgs:\nobj (pikepdf.Object): an Image XObject", "source": "juraj-google-style"}
{"code": "def rotateInZMat(theta_deg):\n    ct = np.cos(np.radians(theta_deg))\n    st = np.sin(np.radians(theta_deg))\n    rMat = np.array([[ct, (- st), 0], [st, ct, 0], [0, 0, 1]])\n    return rMat", "docstring": "Rotate a vector theta degrees around the z-axis\n\nEquivalent to yaw left\n\nRotates the vector in the sense that the x-axis is rotated\ntowards the y-axis. If looking along the z-axis (which is\nnot the way you usually look at it), the vector rotates\nclockwise.\n\nIf sitting on the vector [1,0,0], the rotation is towards the left\n\nInput:\ntheta_deg   (float) Angle through which vectors should be\nrotated in degrees\n\nReturns:\nA matrix\n\nTo rotate a vector, premultiply by this matrix.\nTo rotate the coord sys underneath the vector, post multiply", "source": "codesearchnet"}
{"code": "def format(self, data: Iterable[_FormatArg]) -> bytes:\n    fix_arg = self._fix_format_arg\n    return (self.how % tuple((fix_arg(item) for item in data)))", "docstring": "String interpolation into the format string.\n\nArgs:\ndata: The data interpolated into the format string.\n\nExamples:\n::\n\nBytesFormat(b'Hello, %b!') % b'World'\nBytesFormat(b'%b, %b!') % (b'Hello', b'World')", "source": "codesearchnet"}
{"code": "def resolve(self, context, provider):\n    try:\n        self._value.resolve(context, provider)\n    except FailedLookup as e:\n        raise FailedVariableLookup(self.name, e.lookup, e.error)", "docstring": "Recursively resolve any lookups with the Variable.\n\nArgs:\ncontext (:class:`stacker.context.Context`): Current context for\nbuilding the stack\nprovider (:class:`stacker.provider.base.BaseProvider`): subclass of\nthe base provider", "source": "codesearchnet"}
{"code": "def __init__(self, request, file, *args, **kwargs):\n        \n        self.ranged_file = RangedFileReader(file)\n        super(RangedFileResponse, self).__init__(self.ranged_file, *args, **kwargs)\n\n        if 'HTTP_RANGE' in request.META:\n            self.add_range_headers(request.META['HTTP_RANGE'])", "docstring": "RangedFileResponse constructor also requires a request, which\nchecks whether range headers should be added to the response.\n\nArgs:\nrequest(WGSIRequest): The Django request object.\nfile (File): A file-like object.", "source": "juraj-google-style"}
{"code": "def save_chkpt_vars(dic, path):\n    \n    logger.info(\"Variables to save to {}:\".format(path))\n    keys = sorted(list(dic.keys()))\n    logger.info(pprint.pformat(keys))\n\n    assert not path.endswith('.npy')\n    if path.endswith('.npz'):\n        np.savez_compressed(path, **dic)\n    else:\n        with tf.Graph().as_default(), \\\n                tf.Session() as sess:\n            for k, v in six.iteritems(dic):\n                k = get_op_tensor_name(k)[0]\n                _ = tf.Variable(name=k, initial_value=v)    \n            sess.run(tf.global_variables_initializer())\n            saver = tf.train.Saver()\n            saver.save(sess, path, write_meta_graph=False)", "docstring": "Save variables in dic to path.\n\nArgs:\ndic: {name: value}\npath: save as npz if the name ends with '.npz', otherwise save as a checkpoint.", "source": "juraj-google-style"}
{"code": "def make_rsa_keypair(bits):\n    \n    private_key = rsa.generate_private_key(\n        public_exponent=65537,\n        key_size=bits,\n        backend=default_backend(),\n    )\n    private_pem = private_key.private_bytes(\n        encoding=serialization.Encoding.PEM,\n        format=serialization.PrivateFormat.TraditionalOpenSSL,\n        encryption_algorithm=serialization.NoEncryption(),\n    )\n    public_pem = private_key.public_key().public_bytes(\n        encoding=serialization.Encoding.PEM,\n        format=serialization.PublicFormat.SubjectPublicKeyInfo,\n    )\n    return private_pem, public_pem", "docstring": "Generate an RSA keypair.\n\nArgs:\nbits (int): number of bits to use for the key.\n\nReturns:\n(private_key, public_key) - both as PEM encoded strings", "source": "juraj-google-style"}
{"code": "def get_current_track_info(self):\n    response = self.avTransport.GetPositionInfo([('InstanceID', 0), ('Channel', 'Master')])\n    track = {'title': '', 'artist': '', 'album': '', 'album_art': '', 'position': ''}\n    track['playlist_position'] = response['Track']\n    track['duration'] = response['TrackDuration']\n    track['uri'] = response['TrackURI']\n    track['position'] = response['RelTime']\n    metadata = response['TrackMetaData']\n    track['metadata'] = metadata\n    if ((metadata != '') and (track['duration'] == '0:00:00')):\n        metadata = XML.fromstring(really_utf8(metadata))\n        trackinfo = (metadata.findtext('.\n        index = trackinfo.find(' - ')\n        if (index > (- 1)):\n            track['artist'] = trackinfo[:index]\n            track['title'] = trackinfo[(index + 3):]\n        else:\n            track['title'] = metadata.findtext('.\n            if (not track['title']):\n                track['title'] = trackinfo\n    elif (metadata not in ('', 'NOT_IMPLEMENTED', None)):\n        metadata = XML.fromstring(really_utf8(metadata))\n        md_title = metadata.findtext('.\n        md_artist = metadata.findtext('.\n        md_album = metadata.findtext('.\n        track['title'] = ''\n        if md_title:\n            track['title'] = md_title\n        track['artist'] = ''\n        if md_artist:\n            track['artist'] = md_artist\n        track['album'] = ''\n        if md_album:\n            track['album'] = md_album\n        album_art_url = metadata.findtext('.\n        if (album_art_url is not None):\n            track['album_art'] = self.music_library.build_album_art_full_uri(album_art_url)\n    return track", "docstring": "Get information about the currently playing track.\n\nReturns:\ndict: A dictionary containing information about the currently\nplaying track: playlist_position, duration, title, artist, album,\nposition and an album_art link.\n\nIf we're unable to return data for a field, we'll return an empty\nstring. This can happen for all kinds of reasons so be sure to check\nvalues. For example, a track may not have complete metadata and be\nmissing an album name. In this case track['album'] will be an empty\nstring.\n\n.. note:: Calling this method on a slave in a group will not\nreturn the track the group is playing, but the last track\nthis speaker was playing.", "source": "codesearchnet"}
{"code": "def transform(self, df):\n    for (name, function) in self.outputs:\n        df[name] = function(df)", "docstring": "Transforms a DataFrame in place. Computes all outputs of the DataFrame.\n\nArgs:\ndf (pandas.DataFrame): DataFrame to transform.", "source": "codesearchnet"}
{"code": "def preprocess_mel(self, audio: np.ndarray, beatstep: np.ndarray):\n    if audio is not None and len(audio.shape) != 1:\n        raise ValueError(f'Expected `audio` to be a single channel audio input of shape `(n, )` but found shape {audio.shape}.')\n    if beatstep[0] > 0.0:\n        beatstep = beatstep - beatstep[0]\n    num_steps = self.num_bars * 4\n    num_target_steps = len(beatstep)\n    extrapolated_beatstep = self.interpolate_beat_times(beat_times=beatstep, steps_per_beat=1, n_extend=(self.num_bars + 1) * 4 + 1)\n    sample_indices = []\n    max_feature_length = 0\n    for i in range(0, num_target_steps, num_steps):\n        start_idx = i\n        end_idx = min(i + num_steps, num_target_steps)\n        start_sample = int(extrapolated_beatstep[start_idx] * self.sampling_rate)\n        end_sample = int(extrapolated_beatstep[end_idx] * self.sampling_rate)\n        sample_indices.append((start_sample, end_sample))\n        max_feature_length = max(max_feature_length, end_sample - start_sample)\n    padded_batch = []\n    for start_sample, end_sample in sample_indices:\n        feature = audio[start_sample:end_sample]\n        padded_feature = np.pad(feature, ((0, max_feature_length - feature.shape[0]),), 'constant', constant_values=0)\n        padded_batch.append(padded_feature)\n    padded_batch = np.asarray(padded_batch)\n    return (padded_batch, extrapolated_beatstep)", "docstring": "Preprocessing for log-mel-spectrogram\n\nArgs:\naudio (`numpy.ndarray` of shape `(audio_length, )` ):\nRaw audio waveform to be processed.\nbeatstep (`numpy.ndarray`):\nInterpolated values of the raw audio. If beatstep[0] is greater than 0.0, then it will be shifted by\nthe value at beatstep[0].", "source": "github-repos"}
{"code": "def __init__(self, email, password):\n        \n        self.email = email\n        self.password = password\n        self.token = None\n        self.refresh_token = None\n        self.last_api_call = None\n        self.state = []\n        \n        self.authenticated = self._authenticate()", "docstring": "Create the EcoNet API interface object.\nArgs:\nemail (str): EcoNet account email address.\npassword (str): EcoNet account password.", "source": "juraj-google-style"}
{"code": "def determinize(self):\n        \n\n        \n        epsilon_closure = {}\n        for state in self.states:\n            sid = state.stateid\n            epsilon_closure[sid] = self._epsilon_closure(state)\n\n        \n        trans_table = {}\n        for state in self.states:\n            trans_table[state.stateid] = defaultdict(set)\n            for arc in state:\n                char = self.isyms.find(arc.ilabel)\n                trans_table[state.stateid][char].add(arc.nextstate)\n\n        \n        \n        \n        \n        is_final = lambda nfa_states, dfa_state: True \\\n            if sum([ int(nfa_states[x].final) for x in dfa_state ]) >= 1 \\\n            else False\n\n        \n        state_idx = 1\n        nfa_states = copy.deepcopy(self.states)\n        self.states = []\n        \n        self.add_state()\n        new_initial = epsilon_closure[nfa_states[0].stateid]\n        self.states[0].final = is_final(nfa_states, new_initial)\n\n        dfa_state_idx_map = { frozenset(new_initial) : 0 }\n        stack = [new_initial]\n        while True:\n            \n            if not stack:\n                break\n            \n            src_dfa_state = stack.pop()\n            src_dfa_state_idx = dfa_state_idx_map[frozenset(src_dfa_state)]\n            for char in self.alphabet:\n                \n                target_dfa_state = set([])\n                for nfa_state in src_dfa_state:\n                    next_states = \\\n                        set([y for x in trans_table[nfa_state][char] \\\n                             for y in epsilon_closure[x] ])\n                    target_dfa_state.update(next_states)\n                \n                \n                if frozenset(target_dfa_state) not in dfa_state_idx_map:\n                    self.add_state()\n                    dfa_state_idx_map[frozenset(target_dfa_state)] = state_idx\n                    self.states[state_idx].final = is_final(nfa_states,\n                                                            target_dfa_state)\n                    state_idx += 1\n                    stack.append(target_dfa_state)\n\n                dst_state_idx = dfa_state_idx_map[frozenset(target_dfa_state)]\n                self.add_arc(src_dfa_state_idx, dst_state_idx, char)\n        return self", "docstring": "Transforms a Non Deterministic DFA into a Deterministic\nArgs:\nNone\nReturns:\nDFA: The resulting DFA\n\nCreating an equivalent DFA is done using the standard algorithm.\nA nice description can be found in the book:\nHarry R. Lewis and Christos H. Papadimitriou. 1998.\nE\nprint target_dfa_statelements of the Theory of Computation.", "source": "juraj-google-style"}
{"code": "def _CheckByteStreamSize(self, byte_stream, byte_offset, data_type_size):\n    \n    try:\n      byte_stream_size = len(byte_stream)\n\n    except Exception as exception:\n      raise errors.MappingError(exception)\n\n    if byte_stream_size - byte_offset < data_type_size:\n      raise errors.ByteStreamTooSmallError(\n          'Byte stream too small requested: {0:d} available: {1:d}'.format(\n              data_type_size, byte_stream_size))", "docstring": "Checks if the byte stream is large enough for the data type.\n\nArgs:\nbyte_stream (bytes): byte stream.\nbyte_offset (int): offset into the byte stream where to start.\ndata_type_size (int): data type size.\n\nRaises:\nByteStreamTooSmallError: if the byte stream is too small.\nMappingError: if the size of the byte stream cannot be determined.", "source": "juraj-google-style"}
{"code": "def resolves_for(self, node):\n        \n\n        self.actual_title = normalize_text(node.title)\n        return bool(self.search_regexp.search(self.actual_title))", "docstring": "Resolves this query relative to the given node.\n\nArgs:\nnode (node.Document): The node to be evaluated.\n\nReturns:\nbool: Whether the given node matches this query.", "source": "juraj-google-style"}
{"code": "def update_data(func):\n    \n    default = dict([\n        (param.name, param.default)\n        for param in inspect.signature(func).parameters.values()\n        if param.default != getattr(inspect, '_empty')\n    ])\n\n    @wraps(func)\n    def wrapper(*args, **kwargs):\n\n        default.update(kwargs)\n        kwargs.update(default)\n        cur_mod = sys.modules[func.__module__]\n        logger = logs.get_logger(name_or_func=f'{cur_mod.__name__}.{func.__name__}', types='stream')\n\n        root_path = cur_mod.DATA_PATH\n        date_type = kwargs.pop('date_type', 'date')\n        save_static = kwargs.pop('save_static', True)\n        save_dynamic = kwargs.pop('save_dynamic', True)\n        symbol = kwargs.get('symbol')\n        file_kw = dict(func=func, symbol=symbol, root=root_path, date_type=date_type)\n        d_file = cache_file(has_date=True, **file_kw)\n        s_file = cache_file(has_date=False, **file_kw)\n\n        cached = kwargs.pop('cached', False)\n        if cached and save_static and files.exists(s_file):\n            logger.info(f'Reading data from {s_file} ...')\n            return pd.read_parquet(s_file)\n\n        data = func(*args, **kwargs)\n\n        if save_static:\n            files.create_folder(s_file, is_file=True)\n            save_data(data=data, file_fmt=s_file, append=False)\n            logger.info(f'Saved data file to {s_file} ...')\n\n        if save_dynamic:\n            drop_dups = kwargs.pop('drop_dups', None)\n            files.create_folder(d_file, is_file=True)\n            save_data(data=data, file_fmt=d_file, append=True, drop_dups=drop_dups)\n            logger.info(f'Saved data file to {d_file} ...')\n\n        return data\n\n    return wrapper", "docstring": "Decorator to save data more easily. Use parquet as data format\n\nArgs:\nfunc: function to load data from data source\n\nReturns:\nwrapped function", "source": "juraj-google-style"}
{"code": "def add(self, spec):\n        \n        for limit in spec.limit_to:\n            if limit not in self.limit_to:\n                self.limit_to.append(limit)", "docstring": "Add limitations of given spec to self's.\n\nArgs:\nspec (PackageSpec): another spec.", "source": "juraj-google-style"}
{"code": "def deep_variable_product(variables, limit: int=DEEP_VARIABLE_LIMIT):\n    return _deep_values_list_product([v.bindings for v in variables], set(), ComplexityLimit(limit))", "docstring": "Take the deep Cartesian product of a list of Variables.\n\nFor example:\nx1.children = {v2, v3}\nv1 = {x1, x2}\nv2 = {x3}\nv3 = {x4, x5}\nv4 = {x6}\nthen\ndeep_variable_product([v1, v4]) will return:\n[[x1, x3, x4, x6],\n[x1, x3, x5, x6],\n[x2, x6]]\n.\nArgs:\nvariables: A sequence of Variables.\nlimit: How many results we allow before aborting.\n\nReturns:\nA list of lists of Values, where each sublist has one Value from each\nof the corresponding Variables and the Variables of their Values' children.\n\nRaises:\nTooComplexError: If we expanded too many values.", "source": "github-repos"}
{"code": "def build_vep_string(vep_info, vep_columns):\n    \n    logger = getLogger(__name__)\n    logger.debug(\"Building vep string from {0}\".format(vep_info))\n    logger.debug(\"Found vep headers {0}\".format(vep_columns))\n    vep_strings = []\n    for vep_annotation in vep_info:\n        try:\n            vep_info_list = [\n                vep_annotation[vep_key] for vep_key in vep_columns\n            ]\n        except KeyError:\n            raise SyntaxError(\"Vep entry does not correspond to vep headers\")\n        \n        vep_strings.append('|'.join(vep_info_list))\n    return ','.join(vep_strings)", "docstring": "Build a vep string formatted string.\n\nTake a list with vep annotations and build a new vep string\n\nArgs:\nvep_info (list): A list with vep annotation dictionaries\nvep_columns (list): A list with the vep column names found in the\nheader of the vcf\n\nReturns:\nstring: A string with the proper vep annotations", "source": "juraj-google-style"}
{"code": "def delete_file_v2(path):\n    _pywrap_file_io.DeleteFile(compat.path_to_bytes(path))", "docstring": "Deletes the path located at 'path'.\n\nArgs:\npath: string, a path\n\nRaises:\nerrors.OpError: Propagates any errors reported by the FileSystem API.  E.g.,\n`NotFoundError` if the path does not exist.", "source": "github-repos"}
{"code": "def random_string(length=8, charset=None):\n    \n    if length < 1:\n        raise ValueError('Length must be > 0')\n    if not charset:\n        charset = string.letters + string.digits\n    return ''.join(random.choice(charset) for unused in xrange(length))", "docstring": "Generates a string with random characters. If no charset is specified, only\nletters and digits are used.\n\nArgs:\nlength (int) length of the returned string\ncharset (string) list of characters to choose from\nReturns:\n(str) with random characters from charset\nRaises:\n-", "source": "juraj-google-style"}
{"code": "def requirements(requirements_file):\n    return [str(pkg.req) for pkg in parse_requirements(requirements_file, session=pip_download.PipSession()) if (pkg.req is not None)]", "docstring": "Return packages mentioned in the given file.\n\nArgs:\nrequirements_file (str): path to the requirements file to be parsed.\n\nReturns:\n(list): 3rd-party package dependencies contained in the file.", "source": "codesearchnet"}
{"code": "def update_conversation(self, conversation):\n    new_state = conversation.self_conversation_state\n    old_state = self._conversation.self_conversation_state\n    self._conversation = conversation\n    if (not new_state.delivery_medium_option):\n        new_state.delivery_medium_option.extend(old_state.delivery_medium_option)\n    old_timestamp = old_state.self_read_state.latest_read_timestamp\n    new_timestamp = new_state.self_read_state.latest_read_timestamp\n    if (new_timestamp == 0):\n        new_state.self_read_state.latest_read_timestamp = old_timestamp\n    for new_entry in conversation.read_state:\n        tstamp = parsers.from_timestamp(new_entry.latest_read_timestamp)\n        if (tstamp == 0):\n            continue\n        uid = parsers.from_participantid(new_entry.participant_id)\n        if ((uid not in self._watermarks) or (self._watermarks[uid] < tstamp)):\n            self._watermarks[uid] = tstamp", "docstring": "Update the internal state of the conversation.\n\nThis method is used by :class:`.ConversationList` to maintain this\ninstance.\n\nArgs:\nconversation: ``Conversation`` message.", "source": "codesearchnet"}
{"code": "def get_metadata(self, key) -> str:\n        \n        return self.metadata[key] if key in self.metadata else None", "docstring": "Get the value of a metadata. Returns None if metadata does not exist.\n\nArgs:\nkey (str): name of the metadata\n\nReturns:\nstr: the value of the metadata (or None)", "source": "juraj-google-style"}
{"code": "def _wait_for_any_event(events, timeout_s):\n  \n  def any_event_set():\n    return any(event.is_set() for event in events)\n\n  result = timeouts.loop_until_timeout_or_true(\n      timeout_s, any_event_set, sleep_s=_WAIT_FOR_ANY_EVENT_POLL_S)\n\n  return result or any_event_set()", "docstring": "Wait for any in a list of threading.Event's to be set.\n\nArgs:\nevents: List of threading.Event's.\ntimeout_s: Max duration in seconds to wait before returning.\n\nReturns:\nTrue if at least one event was set before the timeout expired, else False.", "source": "juraj-google-style"}
{"code": "def points_are_in_a_straight_line(points, tolerance=1e-07):\n    a = points[0]\n    b = points[1]\n    for c in points[2:]:\n        if (area_of_a_triangle_in_cartesian_space(a, b, c) > tolerance):\n            return False\n    return True", "docstring": "Check whether a set of points fall on a straight line.\nCalculates the areas of triangles formed by triplets of the points.\nReturns False is any of these areas are larger than the tolerance.\n\nArgs:\npoints (list(np.array)): list of Cartesian coordinates for each point.\ntolerance (optional:float): the maximum triangle size for these points to be considered colinear. Default is 1e-7.\n\nReturns:\n(bool): True if all points fall on a straight line (within the allowed tolerance).", "source": "codesearchnet"}
{"code": "def _parse_order_by(model, order_by):\n    out = []\n    for key in order_by:\n        key = key.strip()\n        if key.startswith('+'):\n            out.append(getattr(model, key[1:]))\n        elif key.startswith('-'):\n            out.append(getattr(model, key[1:]).desc())\n        else:\n            out.append(getattr(model, key))\n    return out", "docstring": "This function figures out the list of orderings for the given model and\nargument.\n\nArgs:\nmodel (nautilus.BaseModel): The model to compute ordering against\norder_by (list of str): the list of fields to order_by. If the field\nstarts with a `+` then the order is acending, if `-` descending,\nif no character proceeds the field, the ordering is assumed to be\nascending.\n\nReturns:\n(list of filters): the model filters to apply to the query", "source": "codesearchnet"}
{"code": "def iterable_source(iterable, target):\n    \n    it = iter(iterable)\n    for item in it:\n        try:\n            target.send(item)\n        except StopIteration:\n            return prepend(item, it)\n    return empty_iter()", "docstring": "Convert an iterable into a stream of events.\n\nArgs:\niterable: A series of items which will be sent to the target one by one.\ntarget: The target coroutine or sink.\n\nReturns:\nAn iterator over any remaining items.", "source": "juraj-google-style"}
{"code": "def onCall(self, n):\n    cond_oncall = (n + 1)\n    return _SinonStubCondition(copy=self._copy, oncall=cond_oncall, cond_args=self._cond_args, cond_kwargs=self._cond_kwargs)", "docstring": "Adds a condition for when the stub is called. When the condition is met, a special\nreturn value can be returned. Adds the specified call number into the condition\nlist.\n\nFor example, when the stub function is called the second time, it will return \"#\":\nstub.onCall(1).returns(\"#\")\n\nWithout returns/throws at the end of the chain of functions, nothing will happen.\nFor example, in this case, although 2 is in the condition list, nothing will happen:\nstub.onCall(2)\n\nArgs:\nn: integer, the call # for which we want a special return value.\nThe first call has an index of 0.\n\nReturn:\na SinonStub object (able to be chained)", "source": "codesearchnet"}
{"code": "def track_event(self, name, properties=None, measurements=None):\n        \n        data = channel.contracts.EventData()\n        data.name = name or NULL_CONSTANT_STRING\n        if properties:\n            data.properties = properties\n        if measurements:\n            data.measurements = measurements\n\n        self.track(data, self._context)", "docstring": "Send information about a single event that has occurred in the context of the application.\n\nArgs:\nname (str). the data to associate to this event.\\n\nproperties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\\n\nmeasurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)", "source": "juraj-google-style"}
{"code": "def load_attributes_from_hdf5_group(group, name):\n    if name in group.attrs:\n        data = [n.decode('utf8') if hasattr(n, 'decode') else n for n in group.attrs[name]]\n    else:\n        data = []\n        chunk_id = 0\n        while f'{name}{chunk_id}' in group.attrs:\n            data.extend([n.decode('utf8') if hasattr(n, 'decode') else n for n in group.attrs[f'{name}{chunk_id}']])\n            chunk_id += 1\n    return data", "docstring": "Loads attributes of the specified name from the HDF5 group.\n\nThis method deals with an inherent problem\nof HDF5 file which is not able to store\ndata larger than HDF5_OBJECT_HEADER_LIMIT bytes.\n\nArgs:\ngroup: A pointer to a HDF5 group.\nname: A name of the attributes to load.\n\nReturns:\ndata: Attributes data.", "source": "github-repos"}
{"code": "def fully_qualify_alias_labels(label, aliases):\n    for (alias, full_name) in aliases.items():\n        if (label == alias):\n            return full_name\n        elif label.startswith((alias + '.')):\n            return (full_name + label[len(alias):])\n    return label", "docstring": "Replace any aliases in label with the fully qualified name.\n\nArgs:\nlabel -- A label : str representing a name (e.g. myos.system)\naliases -- A dict of {alias: real_name} (e.g. {'myos': 'os'})\n\n>>> fully_qualify_alias_labels('myos.mycall', {'myos':'os'})\n'os.mycall'", "source": "codesearchnet"}
{"code": "def _ParseRecord(self, parser_mediator, file_object, record_offset):\n    \n    record_strings_data_offset = file_object.tell()\n    record_strings_data_size = record_offset - record_strings_data_offset\n\n    record_strings_data = self._ReadData(\n        file_object, record_strings_data_offset, record_strings_data_size)\n\n    record_map = self._GetDataTypeMap('asl_record')\n\n    try:\n      record, record_data_size = self._ReadStructureFromFileObject(\n          file_object, record_offset, record_map)\n    except (ValueError, errors.ParseError) as exception:\n      raise errors.UnableToParseFile((\n          'Unable to parse record at offset: 0x{0:08x} with error: '\n          '{1!s}').format(record_offset, exception))\n\n    hostname = self._ParseRecordString(\n        record_strings_data, record_strings_data_offset,\n        record.hostname_string_offset)\n\n    sender = self._ParseRecordString(\n        record_strings_data, record_strings_data_offset,\n        record.sender_string_offset)\n\n    facility = self._ParseRecordString(\n        record_strings_data, record_strings_data_offset,\n        record.facility_string_offset)\n\n    message = self._ParseRecordString(\n        record_strings_data, record_strings_data_offset,\n        record.message_string_offset)\n\n    file_offset = record_offset + record_data_size\n    additional_data_size = record.data_size + 6 - record_data_size\n\n    if additional_data_size % 8 != 0:\n      raise errors.ParseError(\n          'Invalid record additional data size: {0:d}.'.format(\n              additional_data_size))\n\n    additional_data = self._ReadData(\n        file_object, file_offset, additional_data_size)\n\n    extra_fields = {}\n    for additional_data_offset in range(0, additional_data_size - 8, 16):\n      record_extra_field = self._ParseRecordExtraField(\n          additional_data[additional_data_offset:], file_offset)\n\n      file_offset += 16\n\n      name = self._ParseRecordString(\n          record_strings_data, record_strings_data_offset,\n          record_extra_field.name_string_offset)\n\n      value = self._ParseRecordString(\n          record_strings_data, record_strings_data_offset,\n          record_extra_field.value_string_offset)\n\n      if name is not None:\n        extra_fields[name] = value\n\n    \n\n    event_data = ASLEventData()\n    event_data.computer_name = hostname\n    event_data.extra_information = ', '.join([\n        '{0:s}: {1:s}'.format(name, value)\n        for name, value in sorted(extra_fields.items())])\n    event_data.facility = facility\n    event_data.group_id = record.group_identifier\n    event_data.level = record.alert_level\n    event_data.message_id = record.message_identifier\n    event_data.message = message\n    event_data.pid = record.process_identifier\n    event_data.read_gid = record.real_group_identifier\n    event_data.read_uid = record.real_user_identifier\n    event_data.record_position = record_offset\n    event_data.sender = sender\n    \n    event_data.user_sid = '{0:d}'.format(record.user_identifier)\n\n    microseconds, _ = divmod(record.written_time_nanoseconds, 1000)\n    timestamp = (record.written_time * 1000000) + microseconds\n\n    \n    date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n        timestamp=timestamp)\n    \n    event = time_events.DateTimeValuesEvent(\n        date_time, definitions.TIME_DESCRIPTION_CREATION)\n    parser_mediator.ProduceEventWithEventData(event, event_data)\n\n    return record.next_record_offset", "docstring": "Parses a record and produces events.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (file): file-like object.\nrecord_offset (int): offset of the record relative to the start of\nthe file.\n\nReturns:\nint: next record offset.\n\nRaises:\nParseError: if the record cannot be parsed.", "source": "juraj-google-style"}
{"code": "def get_location_from_HDX_code(code, locations=None, configuration=None):\n    if (locations is None):\n        locations = Locations.validlocations(configuration)\n    for locdict in locations:\n        if (code.upper() == locdict['name'].upper()):\n            return locdict['title']\n    return None", "docstring": "Get location from HDX location code\n\nArgs:\ncode (str): code for which to get location name\nlocations (Optional[List[Dict]]): Valid locations list. Defaults to list downloaded from HDX.\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nOptional[str]: location name", "source": "codesearchnet"}
{"code": "def assert_same_float_dtype(tensors=None, dtype=None):\n    if tensors:\n        dtype = _assert_same_base_type(tensors, dtype)\n    if not dtype:\n        dtype = dtypes.float32\n    elif not dtype.is_floating:\n        raise ValueError('Expected floating point type, got %s.' % dtype)\n    return dtype", "docstring": "Validate and return float type based on `tensors` and `dtype`.\n\nFor ops such as matrix multiplication, inputs and weights must be of the\nsame float type. This function validates that all `tensors` are the same type,\nvalidates that type is `dtype` (if supplied), and returns the type. Type must\nbe a floating point type. If neither `tensors` nor `dtype` is supplied,\nthe function will return `dtypes.float32`.\n\nArgs:\ntensors: Tensors of input values. Can include `None` elements, which will be\nignored.\ndtype: Expected type.\n\nReturns:\nValidated type.\n\nRaises:\nValueError: if neither `tensors` nor `dtype` is supplied, or result is not\nfloat, or the common type of the inputs is not a floating point type.", "source": "github-repos"}
{"code": "def parse_datetime(__string: str) -> datetime.datetime:\n    if (not __string):\n        datetime_ = datetime.datetime.now(datetime.timezone.utc)\n    else:\n        datetime_ = ciso8601.parse_datetime(__string)\n    if (datetime_.tzinfo is None):\n        datetime_ = datetime_.replace(tzinfo=datetime.timezone.utc)\n    return datetime_", "docstring": "Parse ISO-8601 datetime string.\n\nArgs:\n__string: Datetime string to parse\nReturns:\nParsed datetime object", "source": "codesearchnet"}
{"code": "def open_usb_handle(self, port_num):\n    \n    serial = self.get_usb_serial(port_num)\n    return local_usb.LibUsbHandle.open(serial_number=serial)", "docstring": "open usb port\n\nArgs:\nport_num: port number on the Cambrionix unit\n\nReturn:\nusb handle", "source": "juraj-google-style"}
{"code": "def decode(self, fp: TextIO) -> BioCCollection:\n    tree = etree.parse(fp)\n    collection = self.__parse_collection(tree.getroot())\n    collection.encoding = tree.docinfo.encoding\n    collection.standalone = tree.docinfo.standalone\n    collection.version = tree.docinfo.xml_version\n    return collection", "docstring": "Deserialize ``fp`` to a BioC collection object.\n\nArgs:\nfp: a ``.read()``-supporting file-like object containing a BioC collection\n\nReturns:\nan object of BioCollection", "source": "codesearchnet"}
{"code": "def as_int(self) -> int:\n    if len(self._messages) != 1:\n        raise ValueError('FHIRPath did not evaluate to a single integer.')\n    return proto_utils.get_value_at_field(self._messages[0], 'value')", "docstring": "Returns the result as an integer.\n\nRaises:\nValueError if the `EvaluationResult` is not a single integer.", "source": "github-repos"}
{"code": "def kick_user(self, user_id, reason=\"\"):\n        \n        try:\n            self.client.api.kick_user(self.room_id, user_id)\n            return True\n        except MatrixRequestError:\n            return False", "docstring": "Kick a user from this room.\n\n\nArgs:\nuser_id (str): The matrix user id of a user.\nreason  (str): A reason for kicking the user.\n\nReturns:\nboolean: Whether user was kicked.", "source": "juraj-google-style"}
{"code": "def setPulseInputRatio(self, line_in, new_cnst, password='00000000'):\n    result = False\n    self.setContext('setPulseInputRatio')\n    try:\n        if (not self.requestA()):\n            self.writeCmdMsg('Bad read CRC on setting')\n        elif (not self.serialCmdPwdAuth(password)):\n            self.writeCmdMsg('Password failure')\n        else:\n            req_const = binascii.hexlify(str(new_cnst).zfill(4))\n            line_const = binascii.hexlify(str((line_in - 1)))\n            req_str = (((('01573102303041' + line_const) + '28') + req_const) + '2903')\n            req_str += self.calc_crc16(req_str[2:].decode('hex'))\n            self.m_serial_port.write(req_str.decode('hex'))\n            if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'):\n                self.writeCmdMsg('Success: 06 returned.')\n                result = True\n        self.serialPostEnd()\n    except:\n        ekm_log(traceback.format_exc(sys.exc_info()))\n    self.setContext('')\n    return result", "docstring": "Serial call to set pulse input ratio on a line.\n\nArgs:\nline_in (int): Member of :class:`~ekmmeters.Pulse`\nnew_cnst (int): New pulse input ratio\npassword (str): Optional password\n\nReturns:", "source": "codesearchnet"}
{"code": "def message_factory(msg_type, msg_types=MESSAGE_TYPES, *args, **kwargs):\n    try:\n        return msg_types[msg_type.lower()](*args, **kwargs)\n    except (UnknownProfileError, InvalidMessageInputError) as e:\n        err_exit('Unable to send message: ', e)\n    except KeyError:\n        raise UnsupportedMessageTypeError(msg_type, msg_types)", "docstring": "Factory function to return the specified message instance.\n\nArgs:\n:msg_type: (str) the type of message to send, i.e. 'Email'\n:msg_types: (str, list, or set) the supported message types\n:kwargs: (dict) keywords arguments that are required for the\nvarious message types.  See docstrings for each type.\ni.e. help(messages.Email), help(messages.Twilio), etc.", "source": "codesearchnet"}
{"code": "def get_file_handle(file_path):\n    \n    LOG.debug(\"Check if file end is correct\")\n\n    if not os.path.exists(file_path):\n        raise IOError(\"No such file:{0}\".format(file_path))\n\n    if not os.path.splitext(file_path)[-1] in VALID_ENDINGS:\n        raise IOError(\"Not a valid vcf file name: {}\".format(file_path))\n\n    vcf_obj = VCF(file_path)\n\n    return vcf_obj", "docstring": "Return cyvcf2 VCF object\n\nArgs:\nfile_path(str)\n\nReturns:\nvcf_obj(cyvcf2.VCF)", "source": "juraj-google-style"}
{"code": "def readinto(self, b):\n    self._checkClosed()\n    if self._position >= self._downloader.size:\n        return 0\n    start = self._position\n    end = min(self._position + len(b), self._downloader.size)\n    data = self._downloader.get_range(start, end)\n    self._position += len(data)\n    b[:len(data)] = data\n    return len(data)", "docstring": "Read up to len(b) bytes into b.\n\nReturns number of bytes read (0 for EOF).\n\nArgs:\nb: (bytearray/memoryview) Buffer to read into.", "source": "github-repos"}
{"code": "def lift_to_graph(tensors, graph, sources=None, disallowed_placeholders=None, add_sources=False, handle_captures=False, base_graph=None, op_map=None):\n    variable_init_tensors = []\n    init_tensors = []\n    for tensor in tensors:\n        if isinstance(tensor, resource_variable_ops.ResourceVariable):\n            variable_init_tensors.append(tensor)\n        else:\n            init_tensors.append(tensor)\n    base_graph = base_graph or init_tensors[0].graph\n    op_map = op_map or object_identity.ObjectIdentityDictionary()\n    sources = object_identity.ObjectIdentitySet(sources or [])\n    visited_ops = set((x.op for x in sources))\n    op_outputs = collections.defaultdict(set)\n    for init_tensor in init_tensors:\n        sources.update(op_selector.map_subgraph(init_tensor=init_tensor, sources=sources, disallowed_placeholders=disallowed_placeholders, visited_ops=visited_ops, op_outputs=op_outputs, add_sources=add_sources))\n    ops_to_copy = []\n    marked_ops = set([])\n    ops_to_visit = [_as_operation(t) for t in init_tensors if not op_outputs[_as_operation(t)]]\n    unvisited_ops = set(ops_to_visit)\n    while unvisited_ops:\n        while ops_to_visit:\n            op = ops_to_visit.pop()\n            if op in marked_ops:\n                continue\n            marked_ops.add(op)\n            ops_to_copy.append(op)\n            for inp in op_selector.graph_inputs(op):\n                if inp.type == 'TPUReplicateMetadata':\n                    continue\n                unvisited_ops.add(inp)\n                if all((x in marked_ops for x in op_outputs[inp])) and inp not in sources:\n                    ops_to_visit.append(inp)\n        unvisited_ops.difference_update(marked_ops)\n        if unvisited_ops:\n            ops_to_visit.append(next(iter(unvisited_ops)))\n    ops_to_copy.sort(key=lambda op: len(op_selector.graph_inputs(op)) == 0)\n    captures = []\n    inverse_captures = object_identity.ObjectIdentityDictionary()\n    internal_captures = []\n    if isinstance(base_graph, func_graph.FuncGraph) and isinstance(graph, func_graph.FuncGraph):\n        captures = base_graph.captures\n        for external_capture, internal_capture in captures:\n            inverse_captures[internal_capture] = external_capture\n        internal_captures = base_graph.internal_captures\n    with graph.as_default():\n        for i in variable_init_tensors:\n            op_map[i] = i\n        source_ops = set()\n        for s in internal_captures:\n            if s in sources:\n                sources.remove(s)\n                source_ops.add(s.op)\n                _copy_source(s=s, graph=graph, op_map=op_map, handle_captures=handle_captures, inverse_captures=inverse_captures, base_graph=base_graph)\n        for s in sources:\n            source_ops.add(s.op)\n            _copy_source(s=s, graph=graph, op_map=op_map, handle_captures=handle_captures, inverse_captures=inverse_captures, base_graph=base_graph)\n        input_mutations = []\n        control_mutations = []\n        for op in reversed(ops_to_copy):\n            if op in source_ops or op in op_map:\n                continue\n            new_input_mutations, new_control_mutations = _copy_non_source(op=op, graph=graph, op_map=op_map, base_graph=base_graph)\n            input_mutations.extend(new_input_mutations)\n            control_mutations.extend(new_control_mutations)\n        with graph._mutation_lock():\n            for mutation in input_mutations:\n                mutation.copied_op._update_input(mutation.input_index, op_map[mutation.old_graph_tensor])\n            for mutation in control_mutations:\n                if mutation.old_graph_op.type == 'TPUReplicateMetadata':\n                    continue\n                mutation.copied_op._add_control_input(op_map[mutation.old_graph_op])\n        return op_map", "docstring": "Copies the tensor and all its inputs recursively to the outer graph.\n\nArgs:\ntensors: The Tensors to lift.\ngraph: The graph to lift to.\nsources: Optional sequence of nodes to start from. If omitted the whole\nsubgraph which feeds into `init_tensor` is lifted.\ndisallowed_placeholders: An optional set of ops which may not appear in the\nlifted graph. Defaults to all placeholders.\nadd_sources: A boolean indicating whether placeholders which are not in\nsources should be allowed.\nhandle_captures: A boolean indicating whether to re-capture s in the new\ngraph or simply create a vanilla placeholder.\nbase_graph: The graph from which to lift ops. This will be inferred if not\nspecified.\nop_map: A map contains all the existing nodes that have been lifted to the\ndestination graph, so they won't be lifted and copied again.\n\nReturns:\nA mapping from ops in the current default graph to ops in `graph`.\n\nRaises:\nUnliftableError: If a placeholder blocks lifting.", "source": "github-repos"}
{"code": "def run_inference(self, batch: Sequence[dict[str, torch.Tensor]], model: torch.nn.Module, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n    inference_args = {} if not inference_args else inference_args\n    model_id = self._state_dict_path if not self._torch_script_model_path else self._torch_script_model_path\n    return self._inference_fn(batch, model, self._device, inference_args, model_id)", "docstring": "Runs inferences on a batch of Keyed Tensors and returns an Iterable of\nTensor Predictions.\n\nFor the same key across all examples, this will stack all Tensors values\nin a vectorized format to optimize the inference call.\n\nArgs:\nbatch: A sequence of keyed Tensors. These Tensors should be batchable,\nas this method will call `torch.stack()` and pass in batched Tensors\nwith dimensions (batch_size, n_features, etc.) into the model's\nforward() function.\nmodel: A PyTorch model.\ninference_args: Non-batchable arguments required as inputs to the model's\nforward() function. Unlike Tensors in `batch`, these parameters will\nnot be dynamically batched\n\nReturns:\nAn Iterable of type PredictionResult.", "source": "github-repos"}
{"code": "def error_handler(self, handler):\n    if (not self.opened()):\n        handler = (handler or util.noop)\n        self._error_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler)\n        self._dll.JLINKARM_SetErrorOutHandler(self._error_handler)", "docstring": "Setter for the error handler function.\n\nIf the DLL is open, this function is a no-op, so it should be called\nprior to calling ``open()``.\n\nArgs:\nself (JLink): the ``JLink`` instance\nhandler (function): function to call on error messages\n\nReturns:\n``None``", "source": "codesearchnet"}
{"code": "def _get_inputs_tensor_info_from_meta_graph_def(meta_graph_def, signature_def_key):\n    if signature_def_key not in meta_graph_def.signature_def:\n        raise ValueError(f'Could not find signature \"{signature_def_key}\". Please choose from: {', '.join(meta_graph_def.signature_def.keys())}')\n    return meta_graph_def.signature_def[signature_def_key].inputs", "docstring": "Gets TensorInfo for all inputs of the SignatureDef.\n\nReturns a dictionary that maps each input key to its TensorInfo for the given\nsignature_def_key in the meta_graph_def\n\nArgs:\nmeta_graph_def: MetaGraphDef protocol buffer with the SignatureDef map to\nlook up SignatureDef key.\nsignature_def_key: A SignatureDef key string.\n\nReturns:\nA dictionary that maps input tensor keys to TensorInfos.\n\nRaises:\nValueError if `signature_def_key` is not found in the MetaGraphDef.", "source": "github-repos"}
{"code": "def _bulk_cache_lookup(self, api_name, keys):\n    if self._cache:\n        responses = self._cache.bulk_lookup(api_name, keys)\n        missing_keys = [key for key in keys if (key not in responses.keys())]\n        return (responses, missing_keys)\n    return ({}, keys)", "docstring": "Performes a bulk cache lookup and returns a tuple with the results\nfound and the keys missing in the cache. If cached is not configured\nit will return an empty dictionary of found results and the initial\nlist of keys.\n\nArgs:\napi_name: a string name of the API.\nkeys: an enumerable of string keys.\nReturns:\nA tuple: (responses found, missing keys).", "source": "codesearchnet"}
